summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2019-12-08 14:37:10 +0100
committerIngo Molnar <mingo@kernel.org>2019-12-08 14:37:10 +0100
commit4f797f56c3786e2c6bc542b3f80e9a599b073976 (patch)
tree12f311ac2a06329d58d43437243a9b777c7822f4
parentc5105d764e0214bcc4c6d40d7ba231d01b2e9dda (diff)
parent63de37476ebd1e9bab6a9e17186dc5aa1da9ea99 (diff)
downloadlinux-stable-4f797f56c3786e2c6bc542b3f80e9a599b073976.tar.gz
linux-stable-4f797f56c3786e2c6bc542b3f80e9a599b073976.tar.bz2
linux-stable-4f797f56c3786e2c6bc542b3f80e9a599b073976.zip
Merge branch 'linus' into sched/urgent, to pick up the latest before merging new patches
Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--.gitignore2
-rw-r--r--.mailmap1
-rw-r--r--CREDITS3
-rw-r--r--Documentation/ABI/stable/sysfs-class-infiniband19
-rw-r--r--Documentation/ABI/stable/sysfs-driver-aspeed-vuart11
-rw-r--r--Documentation/ABI/stable/sysfs-driver-ib_srp2
-rw-r--r--Documentation/ABI/testing/debugfs-hyperv23
-rw-r--r--Documentation/ABI/testing/ima_policy4
-rw-r--r--Documentation/ABI/testing/sysfs-bus-coresight-devices-etm4x183
-rw-r--r--Documentation/ABI/testing/sysfs-bus-fsi16
-rw-r--r--Documentation/ABI/testing/sysfs-bus-iio11
-rw-r--r--Documentation/ABI/testing/sysfs-bus-iio-adc-ad719239
-rw-r--r--Documentation/ABI/testing/sysfs-bus-mei23
-rw-r--r--Documentation/ABI/testing/sysfs-bus-pci13
-rw-r--r--Documentation/ABI/testing/sysfs-bus-thunderbolt36
-rw-r--r--Documentation/ABI/testing/sysfs-class-led-driver-el15203000139
-rw-r--r--Documentation/ABI/testing/sysfs-class-mei10
-rw-r--r--Documentation/ABI/testing/sysfs-class-watchdog9
-rw-r--r--Documentation/ABI/testing/sysfs-fs-f2fs6
-rw-r--r--Documentation/ABI/testing/sysfs-platform-dfl-fme132
-rw-r--r--Documentation/ABI/testing/sysfs-platform-mellanox-bootctl58
-rw-r--r--Documentation/ABI/testing/sysfs-platform-wilco-ec17
-rw-r--r--Documentation/ABI/testing/sysfs-secvar46
-rw-r--r--Documentation/DMA-attributes.txt18
-rw-r--r--Documentation/Makefile12
-rw-r--r--Documentation/admin-guide/LSM/SafeSetID.rst4
-rw-r--r--Documentation/admin-guide/cgroup-v2.rst14
-rw-r--r--Documentation/admin-guide/dell_rbu.rst (renamed from Documentation/driver-api/dell_rbu.rst)14
-rw-r--r--Documentation/admin-guide/device-mapper/dm-dust.rst (renamed from Documentation/admin-guide/device-mapper/dm-dust.txt)243
-rw-r--r--Documentation/admin-guide/device-mapper/index.rst1
-rw-r--r--Documentation/admin-guide/index.rst65
-rw-r--r--Documentation/admin-guide/iostats.rst47
-rw-r--r--Documentation/admin-guide/kernel-parameters.rst1
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt72
-rw-r--r--Documentation/admin-guide/perf/imx-ddr.rst33
-rw-r--r--Documentation/admin-guide/perf/index.rst1
-rw-r--r--Documentation/admin-guide/sysctl/kernel.rst12
-rw-r--r--Documentation/conf.py3
-rw-r--r--Documentation/core-api/genalloc.rst26
-rw-r--r--Documentation/core-api/genericirq.rst52
-rw-r--r--Documentation/core-api/memory-allocation.rst50
-rw-r--r--Documentation/core-api/mm-api.rst2
-rw-r--r--Documentation/core-api/printk-formats.rst48
-rw-r--r--Documentation/core-api/refcount-vs-atomic.rst36
-rw-r--r--Documentation/core-api/symbol-namespaces.rst3
-rw-r--r--Documentation/dev-tools/kasan.rst63
-rw-r--r--Documentation/dev-tools/kmemleak.rst2
-rw-r--r--Documentation/devicetree/bindings/Makefile5
-rw-r--r--Documentation/devicetree/bindings/arm/amlogic/smp-sram.txt32
-rw-r--r--Documentation/devicetree/bindings/arm/arm,scmi.txt2
-rw-r--r--Documentation/devicetree/bindings/arm/arm,scpi.txt2
-rw-r--r--Documentation/devicetree/bindings/arm/axentia.txt28
-rw-r--r--Documentation/devicetree/bindings/arm/coresight.txt9
-rw-r--r--Documentation/devicetree/bindings/arm/freescale/fsl,scu.txt16
-rw-r--r--Documentation/devicetree/bindings/arm/omap/omap.txt30
-rw-r--r--Documentation/devicetree/bindings/arm/samsung/exynos-chipid.txt12
-rw-r--r--Documentation/devicetree/bindings/arm/samsung/exynos-chipid.yaml39
-rw-r--r--Documentation/devicetree/bindings/arm/samsung/pmu.txt72
-rw-r--r--Documentation/devicetree/bindings/arm/samsung/pmu.yaml105
-rw-r--r--Documentation/devicetree/bindings/arm/samsung/samsung-boards.txt83
-rw-r--r--Documentation/devicetree/bindings/arm/samsung/samsung-boards.yaml181
-rw-r--r--Documentation/devicetree/bindings/arm/samsung/samsung-secure-firmware.yaml31
-rw-r--r--Documentation/devicetree/bindings/arm/samsung/sysreg.txt19
-rw-r--r--Documentation/devicetree/bindings/arm/samsung/sysreg.yaml45
-rw-r--r--Documentation/devicetree/bindings/arm/sprd.txt14
-rw-r--r--Documentation/devicetree/bindings/arm/sprd.yaml33
-rw-r--r--Documentation/devicetree/bindings/arm/stm32/stm32.yaml27
-rw-r--r--Documentation/devicetree/bindings/arm/sunxi/smp-sram.txt44
-rw-r--r--Documentation/devicetree/bindings/ata/sata_rcar.txt7
-rw-r--r--Documentation/devicetree/bindings/board/fsl-board.txt30
-rw-r--r--Documentation/devicetree/bindings/bus/renesas,bsc.txt46
-rw-r--r--Documentation/devicetree/bindings/bus/renesas,bsc.yaml60
-rw-r--r--Documentation/devicetree/bindings/bus/simple-pm-bus.txt44
-rw-r--r--Documentation/devicetree/bindings/bus/simple-pm-bus.yaml75
-rw-r--r--Documentation/devicetree/bindings/clock/amlogic,axg-audio-clkc.txt3
-rw-r--r--Documentation/devicetree/bindings/clock/armada3700-periph-clock.txt5
-rw-r--r--Documentation/devicetree/bindings/clock/bitmain,bm1880-clk.yaml76
-rw-r--r--Documentation/devicetree/bindings/clock/imx7ulp-clock.txt1
-rw-r--r--Documentation/devicetree/bindings/clock/ingenic,cgu.txt1
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,gcc.txt94
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,gcc.yaml188
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,q6sstopcc.yaml43
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,rpmh-clk.txt27
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,rpmhcc.yaml49
-rw-r--r--Documentation/devicetree/bindings/clock/renesas,cpg-mssr.txt15
-rw-r--r--Documentation/devicetree/bindings/clock/renesas,rcar-gen2-cpg-clocks.txt60
-rw-r--r--Documentation/devicetree/bindings/clock/renesas,rcar-usb2-clock-sel.txt2
-rw-r--r--Documentation/devicetree/bindings/clock/ti/davinci/psc.txt2
-rw-r--r--Documentation/devicetree/bindings/counter/stm32-lptimer-cnt.txt29
-rw-r--r--Documentation/devicetree/bindings/counter/stm32-timer-cnt.txt31
-rw-r--r--Documentation/devicetree/bindings/counter/ti-eqep.yaml50
-rw-r--r--Documentation/devicetree/bindings/cpu/cpu-topology.txt2
-rw-r--r--Documentation/devicetree/bindings/cpufreq/ti-cpufreq.txt6
-rw-r--r--Documentation/devicetree/bindings/crypto/samsung-slimsss.txt19
-rw-r--r--Documentation/devicetree/bindings/crypto/samsung-slimsss.yaml47
-rw-r--r--Documentation/devicetree/bindings/crypto/samsung-sss.txt32
-rw-r--r--Documentation/devicetree/bindings/crypto/samsung-sss.yaml58
-rw-r--r--Documentation/devicetree/bindings/crypto/st,stm32-crc.txt16
-rw-r--r--Documentation/devicetree/bindings/crypto/st,stm32-crc.yaml38
-rw-r--r--Documentation/devicetree/bindings/crypto/st,stm32-cryp.txt19
-rw-r--r--Documentation/devicetree/bindings/crypto/st,stm32-cryp.yaml51
-rw-r--r--Documentation/devicetree/bindings/crypto/st,stm32-hash.txt30
-rw-r--r--Documentation/devicetree/bindings/crypto/st,stm32-hash.yaml69
-rw-r--r--Documentation/devicetree/bindings/devfreq/event/exynos-ppmu.txt26
-rw-r--r--Documentation/devicetree/bindings/devfreq/exynos-bus.txt2
-rw-r--r--Documentation/devicetree/bindings/display/allwinner,sun6i-a31-mipi-dsi.yaml5
-rw-r--r--Documentation/devicetree/bindings/display/amlogic,meson-dw-hdmi.yaml2
-rw-r--r--Documentation/devicetree/bindings/display/arm,malidp.txt3
-rw-r--r--Documentation/devicetree/bindings/display/bridge/anx7814.txt6
-rw-r--r--Documentation/devicetree/bindings/display/bridge/renesas,dw-hdmi.txt1
-rw-r--r--Documentation/devicetree/bindings/display/bridge/renesas,lvds.txt1
-rw-r--r--Documentation/devicetree/bindings/display/bridge/ti,sn65dsi86.txt2
-rw-r--r--Documentation/devicetree/bindings/display/cirrus,clps711x-fb.txt2
-rw-r--r--Documentation/devicetree/bindings/display/imx/fsl,imx-fb.txt2
-rw-r--r--Documentation/devicetree/bindings/display/mediatek/mediatek,disp.txt30
-rw-r--r--Documentation/devicetree/bindings/display/mediatek/mediatek,dsi.txt4
-rw-r--r--Documentation/devicetree/bindings/display/panel/sharp,ld-d5116z01b.txt26
-rw-r--r--Documentation/devicetree/bindings/display/panel/sharp,ld-d5116z01b.yaml30
-rw-r--r--Documentation/devicetree/bindings/display/renesas,du.txt2
-rw-r--r--Documentation/devicetree/bindings/display/rockchip/rockchip-vop.txt6
-rw-r--r--Documentation/devicetree/bindings/display/st,stm32-dsi.yaml150
-rw-r--r--Documentation/devicetree/bindings/display/st,stm32-ltdc.txt144
-rw-r--r--Documentation/devicetree/bindings/display/st,stm32-ltdc.yaml81
-rw-r--r--Documentation/devicetree/bindings/dma/allwinner,sun50i-a64-dma.yaml4
-rw-r--r--Documentation/devicetree/bindings/dma/dma-common.yaml9
-rw-r--r--Documentation/devicetree/bindings/dma/jz4780-dma.txt3
-rw-r--r--Documentation/devicetree/bindings/dma/milbeaut-m10v-hdmac.txt32
-rw-r--r--Documentation/devicetree/bindings/dma/milbeaut-m10v-xdmac.txt24
-rw-r--r--Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt1
-rw-r--r--Documentation/devicetree/bindings/dma/renesas,usb-dmac.txt1
-rw-r--r--Documentation/devicetree/bindings/dma/sifive,fu540-c000-pdma.yaml55
-rw-r--r--Documentation/devicetree/bindings/dma/ti-edma.txt8
-rw-r--r--Documentation/devicetree/bindings/dma/xilinx/xilinx_dma.txt24
-rw-r--r--Documentation/devicetree/bindings/eeprom/at24.txt90
-rw-r--r--Documentation/devicetree/bindings/eeprom/at24.yaml188
-rw-r--r--Documentation/devicetree/bindings/example-schema.yaml81
-rw-r--r--Documentation/devicetree/bindings/firmware/intel,ixp4xx-network-processing-engine.yaml2
-rw-r--r--Documentation/devicetree/bindings/firmware/nvidia,tegra186-bpmp.txt2
-rw-r--r--Documentation/devicetree/bindings/fsi/fsi-master-aspeed.txt24
-rw-r--r--Documentation/devicetree/bindings/gpio/brcm,xgs-iproc-gpio.yaml70
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-rda.yaml50
-rw-r--r--Documentation/devicetree/bindings/gpio/renesas,gpio-rcar.txt1
-rw-r--r--Documentation/devicetree/bindings/gpu/arm,mali-bifrost.yaml4
-rw-r--r--Documentation/devicetree/bindings/gpu/arm,mali-midgard.yaml22
-rw-r--r--Documentation/devicetree/bindings/gpu/arm,mali-utgard.yaml3
-rw-r--r--Documentation/devicetree/bindings/gpu/samsung-g2d.txt27
-rw-r--r--Documentation/devicetree/bindings/gpu/samsung-g2d.yaml75
-rw-r--r--Documentation/devicetree/bindings/gpu/samsung-rotator.txt28
-rw-r--r--Documentation/devicetree/bindings/gpu/samsung-rotator.yaml48
-rw-r--r--Documentation/devicetree/bindings/gpu/samsung-scaler.txt27
-rw-r--r--Documentation/devicetree/bindings/gpu/samsung-scaler.yaml81
-rw-r--r--Documentation/devicetree/bindings/hwlock/st,stm32-hwspinlock.txt23
-rw-r--r--Documentation/devicetree/bindings/hwlock/st,stm32-hwspinlock.yaml50
-rw-r--r--Documentation/devicetree/bindings/hwmon/adi,ltc2947.yaml104
-rw-r--r--Documentation/devicetree/bindings/hwmon/ibm,cffps1.txt3
-rw-r--r--Documentation/devicetree/bindings/hwmon/ti,tmp513.yaml93
-rw-r--r--Documentation/devicetree/bindings/i2c/allwinner,sun6i-a31-p2wi.yaml4
-rw-r--r--Documentation/devicetree/bindings/i2c/amlogic,meson6-i2c.yaml53
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-aspeed.txt3
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-at91.txt3
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-meson.txt30
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-stm32.txt65
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c.txt18
-rw-r--r--Documentation/devicetree/bindings/i2c/marvell,mv64xxx-i2c.yaml4
-rw-r--r--Documentation/devicetree/bindings/i2c/renesas,i2c.txt1
-rw-r--r--Documentation/devicetree/bindings/i2c/renesas,iic.txt1
-rw-r--r--Documentation/devicetree/bindings/i2c/st,stm32-i2c.yaml141
-rw-r--r--Documentation/devicetree/bindings/iio/adc/adi,ad7124.yaml3
-rw-r--r--Documentation/devicetree/bindings/iio/adc/adi,ad7292.yaml104
-rw-r--r--Documentation/devicetree/bindings/iio/adc/adi,ad7606.yaml5
-rw-r--r--Documentation/devicetree/bindings/iio/adc/adi,ad7780.yaml1
-rw-r--r--Documentation/devicetree/bindings/iio/adc/avia-hx711.yaml1
-rw-r--r--Documentation/devicetree/bindings/iio/adc/ingenic,adc.txt1
-rw-r--r--Documentation/devicetree/bindings/iio/adc/max1027-adc.txt20
-rw-r--r--Documentation/devicetree/bindings/iio/adc/mcp3911.txt30
-rw-r--r--Documentation/devicetree/bindings/iio/adc/microchip,mcp3911.yaml71
-rw-r--r--Documentation/devicetree/bindings/iio/adc/samsung,exynos-adc.txt107
-rw-r--r--Documentation/devicetree/bindings/iio/adc/samsung,exynos-adc.yaml151
-rw-r--r--Documentation/devicetree/bindings/iio/adc/st,stm32-adc.txt2
-rw-r--r--Documentation/devicetree/bindings/iio/chemical/plantower,pms7003.yaml1
-rw-r--r--Documentation/devicetree/bindings/iio/dac/lltc,ltc1660.yaml49
-rw-r--r--Documentation/devicetree/bindings/iio/dac/ltc1660.txt21
-rw-r--r--Documentation/devicetree/bindings/iio/iio-bindings.txt5
-rw-r--r--Documentation/devicetree/bindings/iio/imu/inv_mpu6050.txt1
-rw-r--r--Documentation/devicetree/bindings/iio/imu/nxp,fxos8700.yaml76
-rw-r--r--Documentation/devicetree/bindings/iio/imu/st_lsm6dsx.txt3
-rw-r--r--Documentation/devicetree/bindings/iio/light/adux1020.yaml47
-rw-r--r--Documentation/devicetree/bindings/iio/light/bh1750.txt18
-rw-r--r--Documentation/devicetree/bindings/iio/light/bh1750.yaml43
-rw-r--r--Documentation/devicetree/bindings/iio/light/veml6030.yaml62
-rw-r--r--Documentation/devicetree/bindings/iio/pressure/bmp085.yaml2
-rw-r--r--Documentation/devicetree/bindings/iio/proximity/maxbotix,mb1232.txt29
-rw-r--r--Documentation/devicetree/bindings/iio/proximity/maxbotix,mb1232.yaml60
-rw-r--r--Documentation/devicetree/bindings/iio/temperature/adi,ltc2983.yaml480
-rw-r--r--Documentation/devicetree/bindings/iio/timer/stm32-lptimer-trigger.txt23
-rw-r--r--Documentation/devicetree/bindings/iio/timer/stm32-timer-trigger.txt25
-rw-r--r--Documentation/devicetree/bindings/input/fsl,mpr121-touchkey.yaml89
-rw-r--r--Documentation/devicetree/bindings/input/ilitek,ili2xxx.txt3
-rw-r--r--Documentation/devicetree/bindings/input/input.yaml36
-rw-r--r--Documentation/devicetree/bindings/input/keys.txt8
-rw-r--r--Documentation/devicetree/bindings/input/max77650-onkey.txt26
-rw-r--r--Documentation/devicetree/bindings/input/max77650-onkey.yaml35
-rw-r--r--Documentation/devicetree/bindings/input/mpr121-touchkey.txt30
-rw-r--r--Documentation/devicetree/bindings/input/mtk-pmic-keys.txt4
-rw-r--r--Documentation/devicetree/bindings/input/st,stpmic1-onkey.txt2
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/ad7879.txt4
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/edt-ft5x06.txt1
-rw-r--r--Documentation/devicetree/bindings/interconnect/qcom,msm8974.yaml62
-rw-r--r--Documentation/devicetree/bindings/interconnect/qcom,qcs404.txt45
-rw-r--r--Documentation/devicetree/bindings/interconnect/qcom,qcs404.yaml77
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/allwinner,sun7i-a20-sc-nmi.yaml4
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.yaml1
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/brcm,bcm7038-l1-intc.txt11
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/fsl,ls-extirq.txt49
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/interrupts.txt12
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/qcom,pdc.txt3
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.txt48
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.yaml87
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/st,stm32-exti.txt29
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/st,stm32-exti.yaml98
-rw-r--r--Documentation/devicetree/bindings/iommu/arm,smmu-v3.txt77
-rw-r--r--Documentation/devicetree/bindings/iommu/arm,smmu-v3.yaml95
-rw-r--r--Documentation/devicetree/bindings/iommu/arm,smmu.txt182
-rw-r--r--Documentation/devicetree/bindings/iommu/arm,smmu.yaml230
-rw-r--r--Documentation/devicetree/bindings/iommu/renesas,ipmmu-vmsa.txt1
-rw-r--r--Documentation/devicetree/bindings/iommu/samsung,sysmmu.txt67
-rw-r--r--Documentation/devicetree/bindings/iommu/samsung,sysmmu.yaml108
-rw-r--r--Documentation/devicetree/bindings/leds/backlight/led-backlight.txt28
-rw-r--r--Documentation/devicetree/bindings/leds/backlight/lm3630a-backlight.yaml6
-rw-r--r--Documentation/devicetree/bindings/leds/backlight/pm8941-wled.txt42
-rw-r--r--Documentation/devicetree/bindings/leds/backlight/qcom-wled.txt154
-rw-r--r--Documentation/devicetree/bindings/leds/leds-el15203000.txt69
-rw-r--r--Documentation/devicetree/bindings/leds/leds-max77650.txt57
-rw-r--r--Documentation/devicetree/bindings/leds/leds-max77650.yaml51
-rw-r--r--Documentation/devicetree/bindings/mailbox/fsl,mu.txt2
-rw-r--r--Documentation/devicetree/bindings/mailbox/st,stm32-ipcc.yaml84
-rw-r--r--Documentation/devicetree/bindings/mailbox/stm32-ipcc.txt47
-rw-r--r--Documentation/devicetree/bindings/media/allwinner,sun4i-a10-ir.yaml4
-rw-r--r--Documentation/devicetree/bindings/media/allwinner,sun8i-h3-deinterlace.yaml76
-rw-r--r--Documentation/devicetree/bindings/media/amlogic,meson-gx-ao-cec.yaml91
-rw-r--r--Documentation/devicetree/bindings/media/i2c/ad5820.txt11
-rw-r--r--Documentation/devicetree/bindings/media/i2c/imx290.txt57
-rw-r--r--Documentation/devicetree/bindings/media/i2c/nokia,smia.txt2
-rw-r--r--Documentation/devicetree/bindings/media/i2c/ov2659.txt9
-rw-r--r--Documentation/devicetree/bindings/media/meson-ao-cec.txt37
-rw-r--r--Documentation/devicetree/bindings/media/rc.yaml7
-rw-r--r--Documentation/devicetree/bindings/media/renesas,csi2.txt1
-rw-r--r--Documentation/devicetree/bindings/media/renesas,vin.txt5
-rw-r--r--Documentation/devicetree/bindings/media/sh_mobile_ceu.txt17
-rw-r--r--Documentation/devicetree/bindings/media/st,stm32-cec.txt19
-rw-r--r--Documentation/devicetree/bindings/media/st,stm32-cec.yaml54
-rw-r--r--Documentation/devicetree/bindings/media/st,stm32-dcmi.txt45
-rw-r--r--Documentation/devicetree/bindings/media/st,stm32-dcmi.yaml86
-rw-r--r--Documentation/devicetree/bindings/media/ti,vpe.yaml64
-rw-r--r--Documentation/devicetree/bindings/memory-controllers/exynos-srom.txt79
-rw-r--r--Documentation/devicetree/bindings/memory-controllers/exynos-srom.yaml128
-rw-r--r--Documentation/devicetree/bindings/mfd/ab8500.txt119
-rw-r--r--Documentation/devicetree/bindings/mfd/madera.txt8
-rw-r--r--Documentation/devicetree/bindings/mfd/max77650.txt46
-rw-r--r--Documentation/devicetree/bindings/mfd/max77650.yaml149
-rw-r--r--Documentation/devicetree/bindings/mfd/max77693.txt1
-rw-r--r--Documentation/devicetree/bindings/mfd/qcom,spmi-pmic.txt2
-rw-r--r--Documentation/devicetree/bindings/mfd/samsung,exynos5433-lpass.txt2
-rw-r--r--Documentation/devicetree/bindings/mfd/st,stm32-lptimer.yaml120
-rw-r--r--Documentation/devicetree/bindings/mfd/st,stm32-timers.yaml162
-rw-r--r--Documentation/devicetree/bindings/mfd/stm32-lptimer.txt48
-rw-r--r--Documentation/devicetree/bindings/mfd/stm32-timers.txt73
-rw-r--r--Documentation/devicetree/bindings/mfd/syscon.txt32
-rw-r--r--Documentation/devicetree/bindings/mfd/syscon.yaml84
-rw-r--r--Documentation/devicetree/bindings/misc/allwinner,syscon.txt20
-rw-r--r--Documentation/devicetree/bindings/mmc/allwinner,sun4i-a10-mmc.yaml6
-rw-r--r--Documentation/devicetree/bindings/mmc/arasan,sdhci.txt42
-rw-r--r--Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt3
-rw-r--r--Documentation/devicetree/bindings/mmc/jz4740.txt8
-rw-r--r--Documentation/devicetree/bindings/mmc/mmc-controller.yaml14
-rw-r--r--Documentation/devicetree/bindings/mmc/owl-mmc.yaml59
-rw-r--r--Documentation/devicetree/bindings/mmc/renesas,sdhi.txt1
-rw-r--r--Documentation/devicetree/bindings/mmc/sdhci-atmel.txt5
-rw-r--r--Documentation/devicetree/bindings/mmc/sdhci-milbeaut.txt30
-rw-r--r--Documentation/devicetree/bindings/mtd/st,stm32-fmc2-nand.yaml98
-rw-r--r--Documentation/devicetree/bindings/mtd/stm32-fmc2-nand.txt61
-rw-r--r--Documentation/devicetree/bindings/net/allwinner,sun4i-a10-emac.yaml6
-rw-r--r--Documentation/devicetree/bindings/net/allwinner,sun4i-a10-mdio.yaml6
-rw-r--r--Documentation/devicetree/bindings/net/allwinner,sun7i-a20-gmac.yaml6
-rw-r--r--Documentation/devicetree/bindings/net/allwinner,sun8i-a83t-emac.yaml6
-rw-r--r--Documentation/devicetree/bindings/net/can/allwinner,sun4i-a10-can.yaml51
-rw-r--r--Documentation/devicetree/bindings/net/can/sun4i_can.txt36
-rw-r--r--Documentation/devicetree/bindings/net/davinci-mdio.txt36
-rw-r--r--Documentation/devicetree/bindings/net/ti,davinci-mdio.yaml71
-rw-r--r--Documentation/devicetree/bindings/net/wireless/ti,wl1251.txt26
-rw-r--r--Documentation/devicetree/bindings/nvmem/allwinner,sun4i-a10-sid.yaml4
-rw-r--r--Documentation/devicetree/bindings/nvmem/rockchip-otp.txt25
-rw-r--r--Documentation/devicetree/bindings/nvmem/sprd-efuse.txt39
-rw-r--r--Documentation/devicetree/bindings/pci/amlogic,meson-pcie.txt12
-rw-r--r--Documentation/devicetree/bindings/pci/layerscape-pci.txt1
-rw-r--r--Documentation/devicetree/bindings/pci/rcar-pci.txt1
-rw-r--r--Documentation/devicetree/bindings/phy/allwinner,sun50i-h6-usb3-phy.yaml47
-rw-r--r--Documentation/devicetree/bindings/phy/amlogic,meson-g12a-usb2-phy.yaml1
-rw-r--r--Documentation/devicetree/bindings/phy/phy-rockchip-inno-usb2.txt1
-rw-r--r--Documentation/devicetree/bindings/phy/qcom-qmp-phy.txt7
-rw-r--r--Documentation/devicetree/bindings/phy/rcar-gen3-phy-usb2.txt2
-rw-r--r--Documentation/devicetree/bindings/phy/rcar-gen3-phy-usb3.txt2
-rw-r--r--Documentation/devicetree/bindings/phy/rockchip,px30-dsi-dphy.yaml75
-rw-r--r--Documentation/devicetree/bindings/pinctrl/allwinner,sun4i-a10-pinctrl.yaml243
-rw-r--r--Documentation/devicetree/bindings/pinctrl/allwinner,sunxi-pinctrl.txt164
-rw-r--r--Documentation/devicetree/bindings/pinctrl/intel,lgm-pinctrl.yaml116
-rw-r--r--Documentation/devicetree/bindings/pinctrl/meson,pinctrl.txt1
-rw-r--r--Documentation/devicetree/bindings/pinctrl/pincfg-node.yaml140
-rw-r--r--Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt192
-rw-r--r--Documentation/devicetree/bindings/pinctrl/pinmux-node.yaml132
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,msm8976-pinctrl.txt183
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.txt8
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,pmic-mpp.txt4
-rw-r--r--Documentation/devicetree/bindings/pinctrl/renesas,pfc-pinctrl.txt4
-rw-r--r--Documentation/devicetree/bindings/pinctrl/rockchip,pinctrl.txt1
-rw-r--r--Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.yaml7
-rw-r--r--Documentation/devicetree/bindings/power/amlogic,meson-gx-pwrc.txt2
-rw-r--r--Documentation/devicetree/bindings/power/fsl,imx-gpc.txt2
-rw-r--r--Documentation/devicetree/bindings/power/fsl,imx-gpcv2.txt2
-rw-r--r--Documentation/devicetree/bindings/power/pd-samsung.txt45
-rw-r--r--Documentation/devicetree/bindings/power/pd-samsung.yaml66
-rw-r--r--Documentation/devicetree/bindings/power/power-domain.yaml133
-rw-r--r--Documentation/devicetree/bindings/power/power_domain.txt95
-rw-r--r--Documentation/devicetree/bindings/power/renesas,sysc-rmobile.txt2
-rw-r--r--Documentation/devicetree/bindings/power/reset/syscon-poweroff.txt30
-rw-r--r--Documentation/devicetree/bindings/power/reset/syscon-poweroff.yaml60
-rw-r--r--Documentation/devicetree/bindings/power/reset/syscon-reboot.txt30
-rw-r--r--Documentation/devicetree/bindings/power/reset/syscon-reboot.yaml60
-rw-r--r--Documentation/devicetree/bindings/power/supply/cpcap-charger.txt9
-rw-r--r--Documentation/devicetree/bindings/power/supply/max77650-charger.txt28
-rw-r--r--Documentation/devicetree/bindings/power/supply/max77650-charger.yaml34
-rw-r--r--Documentation/devicetree/bindings/power/xlnx,zynqmp-genpd.txt2
-rw-r--r--Documentation/devicetree/bindings/pwm/atmel-hlcdc-pwm.txt2
-rw-r--r--Documentation/devicetree/bindings/pwm/atmel-pwm.txt2
-rw-r--r--Documentation/devicetree/bindings/pwm/atmel-tcb-pwm.txt2
-rw-r--r--Documentation/devicetree/bindings/pwm/brcm,bcm7038-pwm.txt2
-rw-r--r--Documentation/devicetree/bindings/pwm/brcm,iproc-pwm.txt2
-rw-r--r--Documentation/devicetree/bindings/pwm/brcm,kona-pwm.txt2
-rw-r--r--Documentation/devicetree/bindings/pwm/img-pwm.txt2
-rw-r--r--Documentation/devicetree/bindings/pwm/imx-pwm.txt2
-rw-r--r--Documentation/devicetree/bindings/pwm/imx-tpm-pwm.txt2
-rw-r--r--Documentation/devicetree/bindings/pwm/lpc1850-sct-pwm.txt2
-rw-r--r--Documentation/devicetree/bindings/pwm/mxs-pwm.txt2
-rw-r--r--Documentation/devicetree/bindings/pwm/nvidia,tegra20-pwm.txt2
-rw-r--r--Documentation/devicetree/bindings/pwm/nxp,pca9685-pwm.txt2
-rw-r--r--Documentation/devicetree/bindings/pwm/pwm-bcm2835.txt2
-rw-r--r--Documentation/devicetree/bindings/pwm/pwm-berlin.txt2
-rw-r--r--Documentation/devicetree/bindings/pwm/pwm-fsl-ftm.txt2
-rw-r--r--Documentation/devicetree/bindings/pwm/pwm-hibvt.txt2
-rw-r--r--Documentation/devicetree/bindings/pwm/pwm-lp3943.txt2
-rw-r--r--Documentation/devicetree/bindings/pwm/pwm-mediatek.txt2
-rw-r--r--Documentation/devicetree/bindings/pwm/pwm-meson.txt2
-rw-r--r--Documentation/devicetree/bindings/pwm/pwm-mtk-disp.txt2
-rw-r--r--Documentation/devicetree/bindings/pwm/pwm-omap-dmtimer.txt2
-rw-r--r--Documentation/devicetree/bindings/pwm/pwm-rockchip.txt2
-rw-r--r--Documentation/devicetree/bindings/pwm/pwm-samsung.txt51
-rw-r--r--Documentation/devicetree/bindings/pwm/pwm-samsung.yaml109
-rw-r--r--Documentation/devicetree/bindings/pwm/pwm-sifive.txt2
-rw-r--r--Documentation/devicetree/bindings/pwm/pwm-sprd.txt2
-rw-r--r--Documentation/devicetree/bindings/pwm/pwm-stm32-lp.txt30
-rw-r--r--Documentation/devicetree/bindings/pwm/pwm-stm32.txt38
-rw-r--r--Documentation/devicetree/bindings/pwm/pwm-tiecap.txt2
-rw-r--r--Documentation/devicetree/bindings/pwm/pwm-tiehrpwm.txt2
-rw-r--r--Documentation/devicetree/bindings/pwm/pwm-zx.txt2
-rw-r--r--Documentation/devicetree/bindings/pwm/pwm.txt11
-rw-r--r--Documentation/devicetree/bindings/pwm/pwm.yaml29
-rw-r--r--Documentation/devicetree/bindings/pwm/renesas,pwm-rcar.txt40
-rw-r--r--Documentation/devicetree/bindings/pwm/renesas,pwm-rcar.yaml78
-rw-r--r--Documentation/devicetree/bindings/pwm/renesas,tpu-pwm.txt35
-rw-r--r--Documentation/devicetree/bindings/pwm/renesas,tpu-pwm.yaml69
-rw-r--r--Documentation/devicetree/bindings/pwm/spear-pwm.txt2
-rw-r--r--Documentation/devicetree/bindings/pwm/st,stmpe-pwm.txt2
-rw-r--r--Documentation/devicetree/bindings/pwm/ti,twl-pwm.txt2
-rw-r--r--Documentation/devicetree/bindings/pwm/ti,twl-pwmled.txt2
-rw-r--r--Documentation/devicetree/bindings/pwm/vt8500-pwm.txt2
-rw-r--r--Documentation/devicetree/bindings/regulator/fixed-regulator.yaml1
-rw-r--r--Documentation/devicetree/bindings/regulator/max77650-regulator.txt41
-rw-r--r--Documentation/devicetree/bindings/regulator/max77650-regulator.yaml31
-rw-r--r--Documentation/devicetree/bindings/remoteproc/qcom,q6v5.txt6
-rw-r--r--Documentation/devicetree/bindings/remoteproc/st,stm32-rproc.yaml128
-rw-r--r--Documentation/devicetree/bindings/remoteproc/stm32-rproc.txt63
-rw-r--r--Documentation/devicetree/bindings/rng/samsung,exynos4-rng.txt19
-rw-r--r--Documentation/devicetree/bindings/rng/samsung,exynos4-rng.yaml45
-rw-r--r--Documentation/devicetree/bindings/rng/st,stm32-rng.txt25
-rw-r--r--Documentation/devicetree/bindings/rng/st,stm32-rng.yaml48
-rw-r--r--Documentation/devicetree/bindings/rtc/renesas,sh-rtc.yaml70
-rw-r--r--Documentation/devicetree/bindings/rtc/rtc-mt6397.txt29
-rw-r--r--Documentation/devicetree/bindings/rtc/rtc-sh.txt28
-rw-r--r--Documentation/devicetree/bindings/rtc/s3c-rtc.txt31
-rw-r--r--Documentation/devicetree/bindings/rtc/s3c-rtc.yaml89
-rw-r--r--Documentation/devicetree/bindings/serial/8250.txt5
-rw-r--r--Documentation/devicetree/bindings/serial/fsl-lpuart.txt3
-rw-r--r--Documentation/devicetree/bindings/serial/renesas,sci-serial.txt6
-rw-r--r--Documentation/devicetree/bindings/serial/samsung_uart.txt58
-rw-r--r--Documentation/devicetree/bindings/serial/samsung_uart.yaml118
-rw-r--r--Documentation/devicetree/bindings/serial/sprd-uart.txt32
-rw-r--r--Documentation/devicetree/bindings/serial/sprd-uart.yaml72
-rw-r--r--Documentation/devicetree/bindings/serio/allwinner,sun4i-a10-ps2.yaml51
-rw-r--r--Documentation/devicetree/bindings/serio/allwinner,sun4i-ps2.txt22
-rw-r--r--Documentation/devicetree/bindings/soc/amlogic/amlogic,canvas.txt33
-rw-r--r--Documentation/devicetree/bindings/soc/amlogic/amlogic,canvas.yaml49
-rw-r--r--Documentation/devicetree/bindings/soc/bcm/brcm,bcm2835-pm.txt2
-rw-r--r--Documentation/devicetree/bindings/soc/mediatek/scpsys.txt2
-rw-r--r--Documentation/devicetree/bindings/soc/ti/sci-pm-domain.txt2
-rw-r--r--Documentation/devicetree/bindings/sound/adi,adau7118.yaml85
-rw-r--r--Documentation/devicetree/bindings/sound/allwinner,sun4i-a10-codec.yaml267
-rw-r--r--Documentation/devicetree/bindings/sound/allwinner,sun8i-a23-codec-analog.yaml38
-rw-r--r--Documentation/devicetree/bindings/sound/arndale.txt5
-rw-r--r--Documentation/devicetree/bindings/sound/fsl,mqs.txt36
-rw-r--r--Documentation/devicetree/bindings/sound/google,cros-ec-codec.txt24
-rw-r--r--Documentation/devicetree/bindings/sound/mt8183-afe-pcm.txt6
-rw-r--r--Documentation/devicetree/bindings/sound/mt8183-mt6358-ts3a227-max98357.txt7
-rw-r--r--Documentation/devicetree/bindings/sound/renesas,fsi.txt31
-rw-r--r--Documentation/devicetree/bindings/sound/renesas,fsi.yaml76
-rw-r--r--Documentation/devicetree/bindings/sound/renesas,rsnd.txt1
-rw-r--r--Documentation/devicetree/bindings/sound/rockchip-max98090.txt27
-rw-r--r--Documentation/devicetree/bindings/sound/rt1011.txt10
-rw-r--r--Documentation/devicetree/bindings/sound/rt5682.txt6
-rw-r--r--Documentation/devicetree/bindings/sound/samsung,odroid.txt54
-rw-r--r--Documentation/devicetree/bindings/sound/samsung,odroid.yaml91
-rw-r--r--Documentation/devicetree/bindings/sound/samsung-i2s.txt84
-rw-r--r--Documentation/devicetree/bindings/sound/samsung-i2s.yaml138
-rw-r--r--Documentation/devicetree/bindings/sound/sun4i-codec.txt94
-rw-r--r--Documentation/devicetree/bindings/sound/sun8i-codec-analog.txt17
-rw-r--r--Documentation/devicetree/bindings/sound/tas2562.txt34
-rw-r--r--Documentation/devicetree/bindings/sound/tas2770.txt37
-rw-r--r--Documentation/devicetree/bindings/sound/ti,pcm3168a.txt8
-rw-r--r--Documentation/devicetree/bindings/sound/tlv320aic31xx.txt5
-rw-r--r--Documentation/devicetree/bindings/sram/milbeaut-smp-sram.txt24
-rw-r--r--Documentation/devicetree/bindings/sram/renesas,smp-sram.txt27
-rw-r--r--Documentation/devicetree/bindings/sram/rockchip-smp-sram.txt30
-rw-r--r--Documentation/devicetree/bindings/sram/samsung-sram.txt38
-rw-r--r--Documentation/devicetree/bindings/sram/sram.txt80
-rw-r--r--Documentation/devicetree/bindings/sram/sram.yaml257
-rw-r--r--Documentation/devicetree/bindings/submitting-patches.txt21
-rw-r--r--Documentation/devicetree/bindings/thermal/st,stm32-thermal.yaml79
-rw-r--r--Documentation/devicetree/bindings/thermal/stm32-thermal.txt61
-rw-r--r--Documentation/devicetree/bindings/timer/ingenic,tcu.txt4
-rw-r--r--Documentation/devicetree/bindings/timer/samsung,exynos4210-mct.txt88
-rw-r--r--Documentation/devicetree/bindings/timer/samsung,exynos4210-mct.yaml124
-rw-r--r--Documentation/devicetree/bindings/timer/st,stm32-timer.txt22
-rw-r--r--Documentation/devicetree/bindings/timer/st,stm32-timer.yaml47
-rw-r--r--Documentation/devicetree/bindings/trivial-devices.yaml12
-rw-r--r--Documentation/devicetree/bindings/ufs/ti,j721e-ufs.yaml68
-rw-r--r--Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt1
-rw-r--r--Documentation/devicetree/bindings/usb/allwinner,sun4i-a10-musb.txt28
-rw-r--r--Documentation/devicetree/bindings/usb/allwinner,sun4i-a10-musb.yaml100
-rw-r--r--Documentation/devicetree/bindings/usb/amlogic,dwc3.txt88
-rw-r--r--Documentation/devicetree/bindings/usb/amlogic,meson-g12a-usb-ctrl.yaml127
-rw-r--r--Documentation/devicetree/bindings/usb/generic-ehci.yaml5
-rw-r--r--Documentation/devicetree/bindings/usb/renesas,usb3-peri.txt41
-rw-r--r--Documentation/devicetree/bindings/usb/renesas,usb3-peri.yaml86
-rw-r--r--Documentation/devicetree/bindings/usb/renesas,usbhs.txt57
-rw-r--r--Documentation/devicetree/bindings/usb/renesas,usbhs.yaml126
-rw-r--r--Documentation/devicetree/bindings/usb/richtek,rt1711h.txt29
-rw-r--r--Documentation/devicetree/bindings/usb/ti,hd3ss3220.txt38
-rw-r--r--Documentation/devicetree/bindings/usb/ti,j721e-usb.yaml86
-rw-r--r--Documentation/devicetree/bindings/usb/usb-xhci.txt1
-rw-r--r--Documentation/devicetree/bindings/usb/usb251xb.txt3
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.yaml2
-rw-r--r--Documentation/devicetree/bindings/watchdog/amlogic,meson-gxbb-wdt.yaml3
-rw-r--r--Documentation/devicetree/bindings/watchdog/atmel-sama5d4-wdt.txt2
-rw-r--r--Documentation/devicetree/bindings/watchdog/renesas,wdt.txt1
-rw-r--r--Documentation/devicetree/bindings/watchdog/samsung-wdt.txt35
-rw-r--r--Documentation/devicetree/bindings/watchdog/samsung-wdt.yaml74
-rw-r--r--Documentation/devicetree/writing-schema.rst9
-rw-r--r--Documentation/doc-guide/kernel-doc.rst29
-rw-r--r--Documentation/dontdiff1
-rw-r--r--Documentation/driver-api/devfreq.rst30
-rw-r--r--Documentation/driver-api/device_link.rst3
-rw-r--r--Documentation/driver-api/dma-buf.rst6
-rw-r--r--Documentation/driver-api/driver-model/devres.rst5
-rw-r--r--Documentation/driver-api/driver-model/driver.rst43
-rw-r--r--Documentation/driver-api/generic-counter.rst162
-rw-r--r--Documentation/driver-api/gpio/bt8xxgpio.rst (renamed from Documentation/driver-api/bt8xxgpio.rst)2
-rw-r--r--Documentation/driver-api/gpio/driver.rst27
-rw-r--r--Documentation/driver-api/gpio/index.rst1
-rw-r--r--Documentation/driver-api/index.rst5
-rw-r--r--Documentation/driver-api/infiniband.rst127
-rw-r--r--Documentation/driver-api/infrastructure.rst3
-rw-r--r--Documentation/driver-api/interconnect.rst2
-rw-r--r--Documentation/driver-api/pti_intel_mid.rst4
-rw-r--r--Documentation/features/core/tracehook/arch-support.txt2
-rw-r--r--Documentation/filesystems/autofs.rst (renamed from Documentation/filesystems/autofs.txt)263
-rw-r--r--Documentation/filesystems/debugfs.txt50
-rw-r--r--Documentation/filesystems/f2fs.txt5
-rw-r--r--Documentation/filesystems/fscrypt.rst4
-rw-r--r--Documentation/filesystems/index.rst1
-rw-r--r--Documentation/filesystems/locking.rst2
-rw-r--r--Documentation/firmware-guide/acpi/namespace.rst2
-rw-r--r--Documentation/fpga/dfl.rst10
-rw-r--r--Documentation/gpu/amdgpu.rst65
-rw-r--r--Documentation/gpu/drm-kms-helpers.rst3
-rw-r--r--Documentation/gpu/drm-mm.rst11
-rw-r--r--Documentation/gpu/i915.rst82
-rw-r--r--Documentation/gpu/mcde.rst2
-rw-r--r--Documentation/gpu/todo.rst135
-rw-r--r--Documentation/hwmon/bel-pfe.rst112
-rw-r--r--Documentation/hwmon/dell-smm-hwmon.rst164
-rw-r--r--Documentation/hwmon/ina3221.rst12
-rw-r--r--Documentation/hwmon/index.rst4
-rw-r--r--Documentation/hwmon/inspur-ipsps1.rst2
-rw-r--r--Documentation/hwmon/ltc2947.rst100
-rw-r--r--Documentation/hwmon/tmp513.rst103
-rw-r--r--Documentation/i2c/busses/i2c-i801.rst1
-rw-r--r--Documentation/i2c/busses/index.rst2
-rw-r--r--Documentation/i2c/index.rst2
-rw-r--r--Documentation/i2c/instantiating-devices.rst10
-rw-r--r--Documentation/i2c/writing-clients.rst8
-rw-r--r--Documentation/index.rst1
-rw-r--r--Documentation/kbuild/makefiles.rst17
-rw-r--r--Documentation/kbuild/modules.rst13
-rw-r--r--Documentation/maintainer/configure-git.rst30
-rw-r--r--Documentation/maintainer/index.rst1
-rw-r--r--Documentation/maintainer/maintainer-entry-profile.rst102
-rw-r--r--Documentation/media/cec.h.rst.exceptions89
-rw-r--r--Documentation/media/kapi/v4l2-controls.rst9
-rw-r--r--Documentation/media/uapi/cec/cec-funcs.rst1
-rw-r--r--Documentation/media/uapi/cec/cec-ioc-adap-g-caps.rst6
-rw-r--r--Documentation/media/uapi/cec/cec-ioc-adap-g-conn-info.rst105
-rw-r--r--Documentation/media/uapi/cec/cec-ioc-dqevent.rst8
-rw-r--r--Documentation/media/uapi/mediactl/request-api.rst4
-rw-r--r--Documentation/media/uapi/v4l/biblio.rst9
-rw-r--r--Documentation/media/uapi/v4l/buffer.rst13
-rw-r--r--Documentation/media/uapi/v4l/dev-mem2mem.rst1
-rw-r--r--Documentation/media/uapi/v4l/dev-stateless-decoder.rst424
-rw-r--r--Documentation/media/uapi/v4l/ext-ctrls-codec.rst569
-rw-r--r--Documentation/media/uapi/v4l/ext-ctrls-flash.rst2
-rw-r--r--Documentation/media/uapi/v4l/ext-ctrls-image-source.rst10
-rw-r--r--Documentation/media/uapi/v4l/meta-formats.rst1
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-compressed.rst35
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-meta-vivid.rst60
-rw-r--r--Documentation/media/uapi/v4l/v4l2-selection-targets.rst4
-rw-r--r--Documentation/media/uapi/v4l/vidioc-decoder-cmd.rst10
-rw-r--r--Documentation/media/uapi/v4l/vidioc-g-ext-ctrls.rst5
-rw-r--r--Documentation/media/uapi/v4l/vidioc-g-fbuf.rst2
-rw-r--r--Documentation/media/uapi/v4l/vidioc-queryctrl.rst24
-rw-r--r--Documentation/media/uapi/v4l/vidioc-reqbufs.rst6
-rw-r--r--Documentation/media/v4l-drivers/imx.rst75
-rw-r--r--Documentation/media/v4l-drivers/ipu3.rst53
-rw-r--r--Documentation/media/v4l-drivers/ipu3_rcb.svg331
-rw-r--r--Documentation/media/v4l-drivers/vimc.rst16
-rw-r--r--Documentation/media/videodev2.h.rst.exceptions5
-rw-r--r--Documentation/memory-barriers.txt11
-rw-r--r--Documentation/mips/ingenic-tcu.rst2
-rw-r--r--Documentation/misc-devices/xilinx_sdfec.rst291
-rw-r--r--Documentation/networking/device_drivers/mellanox/mlx5.rst2
-rw-r--r--Documentation/networking/devlink-trap.rst2
-rw-r--r--Documentation/networking/ip-sysctl.txt9
-rw-r--r--Documentation/networking/phy.rst2
-rw-r--r--Documentation/networking/ppp_generic.txt2
-rw-r--r--Documentation/nvdimm/maintainer-entry-profile.rst59
-rw-r--r--Documentation/power/drivers-testing.rst7
-rw-r--r--Documentation/power/freezing-of-tasks.rst37
-rw-r--r--Documentation/power/opp.rst32
-rw-r--r--Documentation/power/pci.rst50
-rw-r--r--Documentation/power/pm_qos_interface.rst26
-rw-r--r--Documentation/power/runtime_pm.rst4
-rw-r--r--Documentation/power/suspend-and-cpuhotplug.rst7
-rw-r--r--Documentation/power/swsusp.rst14
-rw-r--r--Documentation/powerpc/index.rst1
-rw-r--r--Documentation/powerpc/kaslr-booke32.rst42
-rw-r--r--Documentation/process/botching-up-ioctls.rst (renamed from Documentation/ioctl/botching-up-ioctls.rst)2
-rw-r--r--Documentation/process/embargoed-hardware-issues.rst2
-rw-r--r--Documentation/process/index.rst2
-rw-r--r--Documentation/process/magic-number.rst1
-rw-r--r--Documentation/process/maintainers.rst1
-rw-r--r--Documentation/process/submitting-patches.rst53
-rw-r--r--Documentation/riscv/boot-image-header.rst2
-rw-r--r--Documentation/scheduler/sched-stats.rst4
-rw-r--r--Documentation/scsi/scsi_mid_low_api.txt3
-rw-r--r--Documentation/security/keys/core.rst2
-rw-r--r--Documentation/security/lsm.rst2
-rw-r--r--Documentation/sound/kernel-api/writing-an-alsa-driver.rst222
-rw-r--r--Documentation/sphinx-static/theme_overrides.css10
-rw-r--r--Documentation/sphinx/kerneldoc.py17
-rwxr-xr-xDocumentation/sphinx/maintainers_include.py197
-rw-r--r--Documentation/sphinx/parallel-wrapper.sh33
-rw-r--r--Documentation/trace/coresight/coresight-cpu-debug.rst (renamed from Documentation/trace/coresight-cpu-debug.rst)0
-rw-r--r--Documentation/trace/coresight/coresight-etm4x-reference.rst798
-rw-r--r--Documentation/trace/coresight/coresight.rst (renamed from Documentation/trace/coresight.rst)2
-rw-r--r--Documentation/trace/coresight/index.rst9
-rw-r--r--Documentation/trace/ftrace-uses.rst10
-rw-r--r--Documentation/trace/ftrace.rst4
-rw-r--r--Documentation/trace/index.rst3
-rw-r--r--Documentation/trace/intel_th.rst28
-rw-r--r--Documentation/translations/it_IT/process/magic-number.rst1
-rw-r--r--Documentation/translations/it_IT/process/maintainer-pgp-guide.rst2
-rw-r--r--Documentation/translations/ko_KR/howto.rst56
-rw-r--r--Documentation/translations/ko_KR/index.rst4
-rw-r--r--Documentation/translations/ko_KR/memory-barriers.txt227
-rw-r--r--Documentation/translations/zh_CN/process/magic-number.rst1
-rw-r--r--Documentation/userspace-api/index.rst1
-rw-r--r--Documentation/userspace-api/ioctl/cdrom.rst (renamed from Documentation/ioctl/cdrom.rst)0
-rw-r--r--Documentation/userspace-api/ioctl/hdio.rst (renamed from Documentation/ioctl/hdio.rst)0
-rw-r--r--Documentation/userspace-api/ioctl/index.rst (renamed from Documentation/ioctl/index.rst)1
-rw-r--r--Documentation/userspace-api/ioctl/ioctl-decoding.rst (renamed from Documentation/ioctl/ioctl-decoding.rst)0
-rw-r--r--Documentation/userspace-api/ioctl/ioctl-number.rst (renamed from Documentation/ioctl/ioctl-number.rst)0
-rw-r--r--Documentation/vm/hmm.rst105
-rw-r--r--Documentation/w1/index.rst2
-rw-r--r--MAINTAINERS313
-rw-r--r--Makefile121
-rw-r--r--arch/Kconfig28
-rw-r--r--arch/alpha/include/asm/io.h6
-rw-r--r--arch/alpha/kernel/osf_sys.c67
-rw-r--r--arch/alpha/kernel/pci-sysfs.c8
-rw-r--r--arch/alpha/kernel/syscalls/syscall.tbl4
-rw-r--r--arch/arc/Kconfig1
-rw-r--r--arch/arc/configs/nps_defconfig1
-rw-r--r--arch/arc/configs/tb10x_defconfig1
-rw-r--r--arch/arc/include/asm/Kbuild1
-rw-r--r--arch/arc/include/asm/io.h4
-rw-r--r--arch/arc/include/asm/pgtable.h1
-rw-r--r--arch/arc/mm/dma.c8
-rw-r--r--arch/arc/mm/fault.c10
-rw-r--r--arch/arc/mm/highmem.c4
-rw-r--r--arch/arm/Kconfig3
-rw-r--r--arch/arm/boot/bootp/init.S2
-rw-r--r--arch/arm/boot/compressed/Makefile4
-rw-r--r--arch/arm/boot/compressed/atags_to_fdt.c12
-rw-r--r--arch/arm/boot/compressed/big-endian.S2
-rw-r--r--arch/arm/boot/compressed/head.S4
-rw-r--r--arch/arm/boot/compressed/libfdt_env.h4
-rw-r--r--arch/arm/boot/compressed/piggy.S2
-rw-r--r--arch/arm/boot/dts/am3517.dtsi31
-rw-r--r--arch/arm/boot/dts/am3517_mt_ventoux.dts2
-rw-r--r--arch/arm/boot/dts/aspeed-g5.dtsi1
-rw-r--r--arch/arm/boot/dts/logicpd-som-lv-35xx-devkit.dts2
-rw-r--r--arch/arm/boot/dts/logicpd-torpedo-35xx-devkit.dts2
-rw-r--r--arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi6
-rw-r--r--arch/arm/boot/dts/omap3-beagle-xm.dts2
-rw-r--r--arch/arm/boot/dts/omap3-beagle.dts2
-rw-r--r--arch/arm/boot/dts/omap3-cm-t3530.dts2
-rw-r--r--arch/arm/boot/dts/omap3-cm-t3730.dts2
-rw-r--r--arch/arm/boot/dts/omap3-devkit8000-lcd43.dts2
-rw-r--r--arch/arm/boot/dts/omap3-devkit8000-lcd70.dts2
-rw-r--r--arch/arm/boot/dts/omap3-devkit8000.dts2
-rw-r--r--arch/arm/boot/dts/omap3-gta04.dtsi2
-rw-r--r--arch/arm/boot/dts/omap3-ha-lcd.dts2
-rw-r--r--arch/arm/boot/dts/omap3-ha.dts2
-rw-r--r--arch/arm/boot/dts/omap3-igep0020-rev-f.dts2
-rw-r--r--arch/arm/boot/dts/omap3-igep0020.dts2
-rw-r--r--arch/arm/boot/dts/omap3-igep0030-rev-g.dts2
-rw-r--r--arch/arm/boot/dts/omap3-igep0030.dts2
-rw-r--r--arch/arm/boot/dts/omap3-ldp.dts2
-rw-r--r--arch/arm/boot/dts/omap3-lilly-a83x.dtsi2
-rw-r--r--arch/arm/boot/dts/omap3-lilly-dbb056.dts2
-rw-r--r--arch/arm/boot/dts/omap3-n9.dts2
-rw-r--r--arch/arm/boot/dts/omap3-n950-n9.dtsi7
-rw-r--r--arch/arm/boot/dts/omap3-n950.dts2
-rw-r--r--arch/arm/boot/dts/omap3-overo-storm-alto35.dts2
-rw-r--r--arch/arm/boot/dts/omap3-overo-storm-chestnut43.dts2
-rw-r--r--arch/arm/boot/dts/omap3-overo-storm-gallop43.dts2
-rw-r--r--arch/arm/boot/dts/omap3-overo-storm-palo35.dts2
-rw-r--r--arch/arm/boot/dts/omap3-overo-storm-palo43.dts2
-rw-r--r--arch/arm/boot/dts/omap3-overo-storm-summit.dts2
-rw-r--r--arch/arm/boot/dts/omap3-overo-storm-tobi.dts2
-rw-r--r--arch/arm/boot/dts/omap3-overo-storm-tobiduo.dts2
-rw-r--r--arch/arm/boot/dts/omap3-pandora-1ghz.dts2
-rw-r--r--arch/arm/boot/dts/omap3-pandora-common.dtsi36
-rw-r--r--arch/arm/boot/dts/omap3-sbc-t3530.dts2
-rw-r--r--arch/arm/boot/dts/omap3-sbc-t3730.dts2
-rw-r--r--arch/arm/boot/dts/omap3-sniper.dts2
-rw-r--r--arch/arm/boot/dts/omap3-thunder.dts2
-rw-r--r--arch/arm/boot/dts/omap3-zoom3.dts2
-rw-r--r--arch/arm/boot/dts/omap3430-sdp.dts2
-rw-r--r--arch/arm/boot/dts/omap34xx.dtsi66
-rw-r--r--arch/arm/boot/dts/omap36xx-clocks.dtsi4
-rw-r--r--arch/arm/boot/dts/omap36xx.dtsi65
-rw-r--r--arch/arm/boot/dts/omap3xxx-clocks.dtsi2
-rw-r--r--arch/arm/configs/axm55xx_defconfig1
-rw-r--r--arch/arm/configs/keystone_defconfig1
-rw-r--r--arch/arm/configs/lpc32xx_defconfig1
-rw-r--r--arch/arm/configs/moxart_defconfig1
-rw-r--r--arch/arm/configs/qcom_defconfig1
-rw-r--r--arch/arm/configs/zx_defconfig1
-rw-r--r--arch/arm/crypto/chacha-glue.c26
-rw-r--r--arch/arm/crypto/curve25519-glue.c5
-rw-r--r--arch/arm/crypto/poly1305-glue.c9
-rw-r--r--arch/arm/include/asm/Kbuild1
-rw-r--r--arch/arm/include/asm/arch_gicv3.h2
-rw-r--r--arch/arm/include/asm/dma-direct.h19
-rw-r--r--arch/arm/include/asm/ftrace.h4
-rw-r--r--arch/arm/include/asm/hw_breakpoint.h3
-rw-r--r--arch/arm/include/asm/io.h7
-rw-r--r--arch/arm/include/asm/pci.h2
-rw-r--r--arch/arm/include/asm/vdso/gettimeofday.h94
-rw-r--r--arch/arm/include/asm/vdso/vsyscall.h71
-rw-r--r--arch/arm/include/asm/vdso_datapage.h29
-rw-r--r--arch/arm/kernel/Makefile6
-rw-r--r--arch/arm/kernel/arch_timer.c1
-rw-r--r--arch/arm/kernel/hw_breakpoint.c3
-rw-r--r--arch/arm/kernel/module-plts.c1
-rw-r--r--arch/arm/kernel/process.c2
-rw-r--r--arch/arm/kernel/psci_smp.c6
-rw-r--r--arch/arm/kernel/ptrace.c2
-rw-r--r--arch/arm/kernel/return_address.c4
-rw-r--r--arch/arm/kernel/signal.h2
-rw-r--r--arch/arm/kernel/tcm.c5
-rw-r--r--arch/arm/kernel/time.c2
-rw-r--r--arch/arm/kernel/topology.c2
-rw-r--r--arch/arm/kernel/vdso.c87
-rw-r--r--arch/arm/mach-footbridge/dc21285.c1
-rw-r--r--arch/arm/mach-imx/cpuidle-imx6q.c4
-rw-r--r--arch/arm/mach-omap2/Makefile3
-rw-r--r--arch/arm/mach-omap2/common.h1
-rw-r--r--arch/arm/mach-omap2/hsmmc.c171
-rw-r--r--arch/arm/mach-omap2/hsmmc.h32
-rw-r--r--arch/arm/mach-omap2/pdata-quirks.c105
-rw-r--r--arch/arm/mach-pxa/include/mach/tosa.h15
-rw-r--r--arch/arm/mach-pxa/tosa.c22
-rw-r--r--arch/arm/mach-tegra/cpuidle-tegra20.c2
-rw-r--r--arch/arm/mach-ux500/cpu-db8500.c2
-rw-r--r--arch/arm/mm/Kconfig3
-rw-r--r--arch/arm/mm/dma-mapping-nommu.c2
-rw-r--r--arch/arm/mm/dma-mapping.c48
-rw-r--r--arch/arm/mm/init.c7
-rw-r--r--arch/arm/mm/iomap.c2
-rw-r--r--arch/arm/mm/ioremap.c4
-rw-r--r--arch/arm/mm/mmu.c2
-rw-r--r--arch/arm/mm/nommu.c4
-rw-r--r--arch/arm/mm/proc-arm1020.S2
-rw-r--r--arch/arm/mm/proc-arm1020e.S2
-rw-r--r--arch/arm/mm/proc-arm1022.S2
-rw-r--r--arch/arm/mm/proc-arm1026.S6
-rw-r--r--arch/arm/mm/proc-arm720.S2
-rw-r--r--arch/arm/mm/proc-arm740.S2
-rw-r--r--arch/arm/mm/proc-arm7tdmi.S2
-rw-r--r--arch/arm/mm/proc-arm920.S2
-rw-r--r--arch/arm/mm/proc-arm922.S2
-rw-r--r--arch/arm/mm/proc-arm925.S2
-rw-r--r--arch/arm/mm/proc-arm926.S6
-rw-r--r--arch/arm/mm/proc-arm940.S2
-rw-r--r--arch/arm/mm/proc-arm946.S2
-rw-r--r--arch/arm/mm/proc-arm9tdmi.S2
-rw-r--r--arch/arm/mm/proc-fa526.S2
-rw-r--r--arch/arm/mm/proc-feroceon.S2
-rw-r--r--arch/arm/mm/proc-mohawk.S2
-rw-r--r--arch/arm/mm/proc-sa110.S2
-rw-r--r--arch/arm/mm/proc-sa1100.S2
-rw-r--r--arch/arm/mm/proc-v6.S2
-rw-r--r--arch/arm/mm/proc-v7-bugs.c3
-rw-r--r--arch/arm/mm/proc-v7.S2
-rw-r--r--arch/arm/mm/proc-v7m.S4
-rw-r--r--arch/arm/mm/proc-xsc3.S2
-rw-r--r--arch/arm/mm/proc-xscale.S2
-rw-r--r--arch/arm/vdso/Makefile18
-rw-r--r--arch/arm/vdso/note.c (renamed from arch/s390/kernel/vdso32/note.S)10
-rw-r--r--arch/arm/vdso/vdso.lds.S2
-rw-r--r--arch/arm/vdso/vgettimeofday.c255
-rw-r--r--arch/arm/xen/mm.c12
-rw-r--r--arch/arm64/Kconfig1
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts1
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi33
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12b-a311d-khadas-vim3.dts25
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12b-s922x-khadas-vim3.dts25
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-khadas-vim3.dtsi4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-sm1-khadas-vim3l.dts25
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-sm1.dtsi4
-rw-r--r--arch/arm64/crypto/chacha-neon-glue.c5
-rw-r--r--arch/arm64/crypto/poly1305-glue.c5
-rw-r--r--arch/arm64/include/asm/Kbuild1
-rw-r--r--arch/arm64/include/asm/io.h2
-rw-r--r--arch/arm64/kernel/ptrace.c2
-rw-r--r--arch/arm64/mm/dma-mapping.c8
-rw-r--r--arch/arm64/mm/mmu.c2
-rw-r--r--arch/c6x/mm/dma-coherent.c14
-rw-r--r--arch/csky/Kconfig1
-rw-r--r--arch/csky/include/asm/io.h11
-rw-r--r--arch/csky/include/asm/pgtable.h4
-rw-r--r--arch/csky/mm/dma-mapping.c8
-rw-r--r--arch/csky/mm/ioremap.c52
-rw-r--r--arch/hexagon/include/asm/io.h18
-rw-r--r--arch/hexagon/include/uapi/asm/bitsperlong.h27
-rw-r--r--arch/hexagon/kernel/dma.c4
-rw-r--r--arch/hexagon/kernel/hexagon_ksyms.c4
-rw-r--r--arch/hexagon/mm/ioremap.c4
-rw-r--r--arch/ia64/Kconfig2
-rw-r--r--arch/ia64/include/asm/io.h5
-rw-r--r--arch/ia64/include/asm/iommu.h5
-rw-r--r--arch/ia64/include/uapi/asm/errno.h2
-rw-r--r--arch/ia64/include/uapi/asm/ioctl.h2
-rw-r--r--arch/ia64/include/uapi/asm/ioctls.h7
-rw-r--r--arch/ia64/kernel/asm-offsets.c2
-rw-r--r--arch/ia64/kernel/dma-mapping.c6
-rw-r--r--arch/ia64/mm/init.c4
-rw-r--r--arch/ia64/mm/ioremap.c4
-rw-r--r--arch/m68k/configs/m5475evb_defconfig1
-rw-r--r--arch/m68k/include/asm/kmap.h1
-rw-r--r--arch/m68k/kernel/dma.c4
-rw-r--r--arch/m68k/mm/kmap.c100
-rw-r--r--arch/microblaze/Kconfig2
-rw-r--r--arch/microblaze/configs/mmu_defconfig3
-rw-r--r--arch/microblaze/include/asm/io.h3
-rw-r--r--arch/microblaze/include/asm/irq.h1
-rw-r--r--arch/microblaze/kernel/dma.c14
-rw-r--r--arch/microblaze/kernel/entry.S5
-rw-r--r--arch/microblaze/kernel/head.S2
-rw-r--r--arch/microblaze/kernel/vmlinux.lds.S2
-rw-r--r--arch/mips/Kconfig5
-rw-r--r--arch/mips/bmips/dma.c2
-rw-r--r--arch/mips/configs/ci20_defconfig1
-rw-r--r--arch/mips/configs/loongson3_defconfig1
-rw-r--r--arch/mips/configs/malta_qemu_32r6_defconfig1
-rw-r--r--arch/mips/configs/maltaaprp_defconfig1
-rw-r--r--arch/mips/configs/maltasmvp_defconfig1
-rw-r--r--arch/mips/configs/maltasmvp_eva_defconfig1
-rw-r--r--arch/mips/configs/maltaup_defconfig1
-rw-r--r--arch/mips/configs/omega2p_defconfig1
-rw-r--r--arch/mips/configs/qi_lb60_defconfig1
-rw-r--r--arch/mips/configs/vocore2_defconfig1
-rw-r--r--arch/mips/crypto/chacha-glue.c6
-rw-r--r--arch/mips/crypto/poly1305-glue.c6
-rw-r--r--arch/mips/include/asm/Kbuild1
-rw-r--r--arch/mips/include/asm/dma-direct.h8
-rw-r--r--arch/mips/include/uapi/asm/msgbuf.h6
-rw-r--r--arch/mips/include/uapi/asm/sembuf.h4
-rw-r--r--arch/mips/include/uapi/asm/shmbuf.h6
-rw-r--r--arch/mips/include/uapi/asm/stat.h16
-rw-r--r--arch/mips/jazz/jazzdma.c17
-rw-r--r--arch/mips/kernel/binfmt_elfn32.c4
-rw-r--r--arch/mips/kernel/binfmt_elfo32.c4
-rw-r--r--arch/mips/mm/dma-noncoherent.c18
-rw-r--r--arch/mips/pci/fixup-sb1250.c16
-rw-r--r--arch/mips/ralink/Kconfig1
-rw-r--r--arch/mips/sgi-ip32/ip32-platform.c2
-rw-r--r--arch/nds32/Kconfig1
-rw-r--r--arch/nds32/Kconfig.cpu8
-rw-r--r--arch/nds32/boot/dts/Makefile2
-rw-r--r--arch/nds32/include/asm/io.h3
-rw-r--r--arch/nds32/include/asm/pgtable.h4
-rw-r--r--arch/nds32/kernel/dma.c8
-rw-r--r--arch/nds32/kernel/perf_event_cpu.c2
-rw-r--r--arch/nds32/kernel/vdso/gettimeofday.c61
-rw-r--r--arch/nds32/mm/Makefile3
-rw-r--r--arch/nds32/mm/ioremap.c62
-rw-r--r--arch/nios2/configs/10m50_defconfig1
-rw-r--r--arch/nios2/configs/3c120_defconfig1
-rw-r--r--arch/nios2/include/asm/io.h25
-rw-r--r--arch/nios2/mm/dma-mapping.c8
-rw-r--r--arch/nios2/mm/ioremap.c23
-rw-r--r--arch/openrisc/Kconfig26
-rw-r--r--arch/openrisc/include/asm/io.h1
-rw-r--r--arch/openrisc/kernel/dma.c2
-rw-r--r--arch/parisc/configs/c8000_defconfig1
-rw-r--r--arch/parisc/configs/generic-32bit_defconfig1
-rw-r--r--arch/parisc/include/asm/checksum.h101
-rw-r--r--arch/parisc/include/asm/io.h11
-rw-r--r--arch/parisc/include/uapi/asm/msgbuf.h6
-rw-r--r--arch/parisc/include/uapi/asm/sembuf.h4
-rw-r--r--arch/parisc/include/uapi/asm/shmbuf.h6
-rw-r--r--arch/parisc/kernel/cache.c2
-rw-r--r--arch/parisc/kernel/pci-dma.c8
-rw-r--r--arch/parisc/kernel/ptrace.c2
-rw-r--r--arch/parisc/mm/ioremap.c10
-rw-r--r--arch/powerpc/Kbuild1
-rw-r--r--arch/powerpc/Kconfig54
-rw-r--r--arch/powerpc/Kconfig.debug18
-rw-r--r--arch/powerpc/Makefile16
-rw-r--r--arch/powerpc/boot/dts/fsl/kmcent2.dts52
-rw-r--r--arch/powerpc/boot/libfdt_env.h2
-rw-r--r--arch/powerpc/configs/40x/acadia_defconfig3
-rw-r--r--arch/powerpc/configs/40x/ep405_defconfig3
-rw-r--r--arch/powerpc/configs/40x/kilauea_defconfig3
-rw-r--r--arch/powerpc/configs/40x/klondike_defconfig1
-rw-r--r--arch/powerpc/configs/40x/makalu_defconfig3
-rw-r--r--arch/powerpc/configs/40x/obs600_defconfig3
-rw-r--r--arch/powerpc/configs/40x/walnut_defconfig3
-rw-r--r--arch/powerpc/configs/44x/akebono_defconfig3
-rw-r--r--arch/powerpc/configs/44x/arches_defconfig3
-rw-r--r--arch/powerpc/configs/44x/bamboo_defconfig3
-rw-r--r--arch/powerpc/configs/44x/canyonlands_defconfig3
-rw-r--r--arch/powerpc/configs/44x/currituck_defconfig3
-rw-r--r--arch/powerpc/configs/44x/ebony_defconfig3
-rw-r--r--arch/powerpc/configs/44x/eiger_defconfig3
-rw-r--r--arch/powerpc/configs/44x/fsp2_defconfig3
-rw-r--r--arch/powerpc/configs/44x/icon_defconfig3
-rw-r--r--arch/powerpc/configs/44x/iss476-smp_defconfig3
-rw-r--r--arch/powerpc/configs/44x/katmai_defconfig3
-rw-r--r--arch/powerpc/configs/44x/rainier_defconfig3
-rw-r--r--arch/powerpc/configs/44x/redwood_defconfig3
-rw-r--r--arch/powerpc/configs/44x/sam440ep_defconfig3
-rw-r--r--arch/powerpc/configs/44x/sequoia_defconfig3
-rw-r--r--arch/powerpc/configs/44x/taishan_defconfig3
-rw-r--r--arch/powerpc/configs/52xx/pcm030_defconfig3
-rw-r--r--arch/powerpc/configs/83xx/kmeter1_defconfig3
-rw-r--r--arch/powerpc/configs/83xx/mpc837x_rdb_defconfig3
-rw-r--r--arch/powerpc/configs/85xx/ge_imp3a_defconfig1
-rw-r--r--arch/powerpc/configs/adder875_defconfig3
-rw-r--r--arch/powerpc/configs/amigaone_defconfig3
-rw-r--r--arch/powerpc/configs/cell_defconfig2
-rw-r--r--arch/powerpc/configs/chrp32_defconfig3
-rw-r--r--arch/powerpc/configs/corenet_base.config (renamed from arch/powerpc/configs/corenet_basic_defconfig)0
-rw-r--r--arch/powerpc/configs/debug.config1
-rw-r--r--arch/powerpc/configs/ep88xc_defconfig3
-rw-r--r--arch/powerpc/configs/gamecube_defconfig3
-rw-r--r--arch/powerpc/configs/mpc512x_defconfig3
-rw-r--r--arch/powerpc/configs/mpc5200_defconfig1
-rw-r--r--arch/powerpc/configs/mpc85xx_base.config (renamed from arch/powerpc/configs/mpc85xx_basic_defconfig)0
-rw-r--r--arch/powerpc/configs/mpc86xx_base.config (renamed from arch/powerpc/configs/mpc86xx_basic_defconfig)0
-rw-r--r--arch/powerpc/configs/mpc885_ads_defconfig3
-rw-r--r--arch/powerpc/configs/pmac32_defconfig2
-rw-r--r--arch/powerpc/configs/powernv_defconfig3
-rw-r--r--arch/powerpc/configs/ppc44x_defconfig3
-rw-r--r--arch/powerpc/configs/ppc6xx_defconfig4
-rw-r--r--arch/powerpc/configs/ps3_defconfig3
-rw-r--r--arch/powerpc/configs/skiroot_defconfig4
-rw-r--r--arch/powerpc/configs/storcenter_defconfig3
-rw-r--r--arch/powerpc/configs/tqm8xx_defconfig3
-rw-r--r--arch/powerpc/configs/wii_defconfig3
-rw-r--r--arch/powerpc/crypto/crc-vpmsum_test.c1
-rw-r--r--arch/powerpc/include/asm/Kbuild3
-rw-r--r--arch/powerpc/include/asm/asm-prototypes.h6
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgalloc.h15
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable-4k.h3
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable-64k.h3
-rw-r--r--arch/powerpc/include/asm/book3s/64/tlbflush.h16
-rw-r--r--arch/powerpc/include/asm/bug.h41
-rw-r--r--arch/powerpc/include/asm/cache.h55
-rw-r--r--arch/powerpc/include/asm/cacheflush.h36
-rw-r--r--arch/powerpc/include/asm/dma-direct.h13
-rw-r--r--arch/powerpc/include/asm/dma-mapping.h18
-rw-r--r--arch/powerpc/include/asm/fixmap.h26
-rw-r--r--arch/powerpc/include/asm/hw_breakpoint.h9
-rw-r--r--arch/powerpc/include/asm/hw_irq.h57
-rw-r--r--arch/powerpc/include/asm/nohash/32/kup-8xx.h1
-rw-r--r--arch/powerpc/include/asm/nohash/mmu-book3e.h11
-rw-r--r--arch/powerpc/include/asm/opal-api.h5
-rw-r--r--arch/powerpc/include/asm/opal.h7
-rw-r--r--arch/powerpc/include/asm/page.h7
-rw-r--r--arch/powerpc/include/asm/pgtable.h4
-rw-r--r--arch/powerpc/include/asm/reg.h26
-rw-r--r--arch/powerpc/include/asm/reg_8xx.h4
-rw-r--r--arch/powerpc/include/asm/sections.h14
-rw-r--r--arch/powerpc/include/asm/secure_boot.h29
-rw-r--r--arch/powerpc/include/asm/security_features.h11
-rw-r--r--arch/powerpc/include/asm/secvar.h35
-rw-r--r--arch/powerpc/include/asm/vdso_datapage.h6
-rw-r--r--arch/powerpc/include/uapi/asm/msgbuf.h6
-rw-r--r--arch/powerpc/include/uapi/asm/sembuf.h4
-rw-r--r--arch/powerpc/include/uapi/asm/shmbuf.h6
-rw-r--r--arch/powerpc/include/uapi/asm/spu_info.h14
-rw-r--r--arch/powerpc/include/uapi/asm/stat.h2
-rw-r--r--arch/powerpc/kernel/Makefile28
-rw-r--r--arch/powerpc/kernel/asm-offsets.c18
-rw-r--r--arch/powerpc/kernel/cpu_setup_fsl_booke.S2
-rw-r--r--arch/powerpc/kernel/dawr.c6
-rw-r--r--arch/powerpc/kernel/early_32.c9
-rw-r--r--arch/powerpc/kernel/eeh_driver.c22
-rw-r--r--arch/powerpc/kernel/eeh_sysfs.c18
-rw-r--r--arch/powerpc/kernel/entry_64.S6
-rw-r--r--arch/powerpc/kernel/exceptions-64e.S12
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S6
-rw-r--r--arch/powerpc/kernel/fadump.c15
-rw-r--r--arch/powerpc/kernel/fsl_booke_entry_mapping.S25
-rw-r--r--arch/powerpc/kernel/head_fsl_booke.S61
-rw-r--r--arch/powerpc/kernel/hw_breakpoint.c119
-rw-r--r--arch/powerpc/kernel/ima_arch.c78
-rw-r--r--arch/powerpc/kernel/misc_32.S611
-rw-r--r--arch/powerpc/kernel/misc_64.S109
-rw-r--r--arch/powerpc/kernel/process.c3
-rw-r--r--arch/powerpc/kernel/prom_init.c38
-rw-r--r--arch/powerpc/kernel/ptrace.c85
-rw-r--r--arch/powerpc/kernel/secure_boot.c50
-rw-r--r--arch/powerpc/kernel/security.c106
-rw-r--r--arch/powerpc/kernel/secvar-ops.c17
-rw-r--r--arch/powerpc/kernel/secvar-sysfs.c248
-rw-r--r--arch/powerpc/kernel/setup-common.c20
-rw-r--r--arch/powerpc/kernel/setup_32.c3
-rw-r--r--arch/powerpc/kernel/setup_64.c29
-rw-r--r--arch/powerpc/kernel/syscalls.c4
-rw-r--r--arch/powerpc/kernel/time.c7
-rw-r--r--arch/powerpc/kernel/traps.c15
-rw-r--r--arch/powerpc/kernel/udbg.c14
-rw-r--r--arch/powerpc/kernel/vdso32/gettimeofday.S6
-rw-r--r--arch/powerpc/kernel/vdso64/cacheflush.S4
-rw-r--r--arch/powerpc/kernel/vdso64/gettimeofday.S8
-rw-r--r--arch/powerpc/kexec/Makefile25
-rw-r--r--arch/powerpc/kexec/core.c (renamed from arch/powerpc/kernel/machine_kexec.c)1
-rw-r--r--arch/powerpc/kexec/core_32.c (renamed from arch/powerpc/kernel/machine_kexec_32.c)0
-rw-r--r--arch/powerpc/kexec/core_64.c (renamed from arch/powerpc/kernel/machine_kexec_64.c)0
-rw-r--r--arch/powerpc/kexec/crash.c (renamed from arch/powerpc/kernel/crash.c)0
-rw-r--r--arch/powerpc/kexec/elf_64.c (renamed from arch/powerpc/kernel/kexec_elf_64.c)0
-rw-r--r--arch/powerpc/kexec/file_load.c (renamed from arch/powerpc/kernel/machine_kexec_file_64.c)0
-rw-r--r--arch/powerpc/kexec/ima.c (renamed from arch/powerpc/kernel/ima_kexec.c)0
-rw-r--r--arch/powerpc/kexec/relocate_32.S500
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S30
-rw-r--r--arch/powerpc/mm/book3s32/mmu.c11
-rw-r--r--arch/powerpc/mm/book3s64/hash_native.c38
-rw-r--r--arch/powerpc/mm/book3s64/hash_utils.c19
-rw-r--r--arch/powerpc/mm/book3s64/pkeys.c10
-rw-r--r--arch/powerpc/mm/book3s64/radix_pgtable.c1
-rw-r--r--arch/powerpc/mm/book3s64/radix_tlb.c80
-rw-r--r--arch/powerpc/mm/dma-noncoherent.c8
-rw-r--r--arch/powerpc/mm/fault.c6
-rw-r--r--arch/powerpc/mm/init-common.c7
-rw-r--r--arch/powerpc/mm/init_32.c5
-rw-r--r--arch/powerpc/mm/init_64.c59
-rw-r--r--arch/powerpc/mm/ioremap_32.c1
-rw-r--r--arch/powerpc/mm/ioremap_64.c2
-rw-r--r--arch/powerpc/mm/mem.c183
-rw-r--r--arch/powerpc/mm/mmu_decl.h11
-rw-r--r--arch/powerpc/mm/nohash/8xx.c52
-rw-r--r--arch/powerpc/mm/nohash/Makefile1
-rw-r--r--arch/powerpc/mm/nohash/fsl_booke.c8
-rw-r--r--arch/powerpc/mm/nohash/kaslr_booke.c401
-rw-r--r--arch/powerpc/mm/pgtable_32.c5
-rw-r--r--arch/powerpc/perf/callchain.c17
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_gpt.c1
-rw-r--r--arch/powerpc/platforms/83xx/misc.c11
-rw-r--r--arch/powerpc/platforms/83xx/mpc836x_mds.c7
-rw-r--r--arch/powerpc/platforms/85xx/common.c23
-rw-r--r--arch/powerpc/platforms/85xx/corenet_generic.c2
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx.h2
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_mds.c7
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_rdb.c1
-rw-r--r--arch/powerpc/platforms/85xx/twr_p102x.c1
-rw-r--r--arch/powerpc/platforms/86xx/mpc8610_hpcd.c4
-rw-r--r--arch/powerpc/platforms/8xx/cpm1.c18
-rw-r--r--arch/powerpc/platforms/8xx/pic.c2
-rw-r--r--arch/powerpc/platforms/Kconfig10
-rw-r--r--arch/powerpc/platforms/Kconfig.cputype11
-rw-r--r--arch/powerpc/platforms/cell/spufs/inode.c2
-rw-r--r--arch/powerpc/platforms/powernv/Makefile1
-rw-r--r--arch/powerpc/platforms/powernv/opal-call.c3
-rw-r--r--arch/powerpc/platforms/powernv/opal-powercap.c2
-rw-r--r--arch/powerpc/platforms/powernv/opal-psr.c4
-rw-r--r--arch/powerpc/platforms/powernv/opal-secvar.c140
-rw-r--r--arch/powerpc/platforms/powernv/opal-sensor-groups.c2
-rw-r--r--arch/powerpc/platforms/powernv/opal.c89
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda-tce.c10
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c8
-rw-r--r--arch/powerpc/platforms/powernv/pci.c17
-rw-r--r--arch/powerpc/platforms/pseries/Kconfig1
-rw-r--r--arch/powerpc/platforms/pseries/cmm.c431
-rw-r--r--arch/powerpc/platforms/pseries/dtl.c38
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-cpu.c244
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-memory.c6
-rw-r--r--arch/powerpc/platforms/pseries/hvCall_inst.c12
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c33
-rw-r--r--arch/powerpc/platforms/pseries/of_helpers.c8
-rw-r--r--arch/powerpc/platforms/pseries/papr_scm.c30
-rw-r--r--arch/powerpc/platforms/pseries/pci_dlpar.c18
-rw-r--r--arch/powerpc/platforms/pseries/pseries_energy.c23
-rw-r--r--arch/powerpc/platforms/pseries/ras.c2
-rw-r--r--arch/powerpc/sysdev/Makefile1
-rw-r--r--arch/powerpc/sysdev/fsl_pci.c6
-rw-r--r--arch/powerpc/sysdev/simple_gpio.c143
-rw-r--r--arch/powerpc/sysdev/simple_gpio.h13
-rw-r--r--arch/powerpc/sysdev/xive/common.c9
-rwxr-xr-xarch/powerpc/tools/relocs_check.sh2
-rwxr-xr-xarch/powerpc/tools/unrel_branch_check.sh4
-rw-r--r--arch/powerpc/xmon/Makefile4
-rw-r--r--arch/powerpc/xmon/xmon.c121
-rw-r--r--arch/riscv/Kconfig51
-rw-r--r--arch/riscv/Makefile13
-rw-r--r--arch/riscv/boot/Makefile19
-rw-r--r--arch/riscv/boot/dts/sifive/fu540-c000.dtsi7
-rw-r--r--arch/riscv/boot/loader.S8
-rw-r--r--arch/riscv/boot/loader.lds.S16
-rw-r--r--arch/riscv/configs/nommu_virt_defconfig78
-rw-r--r--arch/riscv/include/asm/Kbuild1
-rw-r--r--arch/riscv/include/asm/asm-prototypes.h1
-rw-r--r--arch/riscv/include/asm/cache.h8
-rw-r--r--arch/riscv/include/asm/clint.h39
-rw-r--r--arch/riscv/include/asm/csr.h74
-rw-r--r--arch/riscv/include/asm/current.h6
-rw-r--r--arch/riscv/include/asm/elf.h4
-rw-r--r--arch/riscv/include/asm/fixmap.h2
-rw-r--r--arch/riscv/include/asm/ftrace.h5
-rw-r--r--arch/riscv/include/asm/futex.h12
-rw-r--r--arch/riscv/include/asm/hwcap.h7
-rw-r--r--arch/riscv/include/asm/image.h6
-rw-r--r--arch/riscv/include/asm/io.h149
-rw-r--r--arch/riscv/include/asm/irqflags.h12
-rw-r--r--arch/riscv/include/asm/kprobes.h6
-rw-r--r--arch/riscv/include/asm/mmio.h155
-rw-r--r--arch/riscv/include/asm/mmiowb.h2
-rw-r--r--arch/riscv/include/asm/mmu.h3
-rw-r--r--arch/riscv/include/asm/page.h10
-rw-r--r--arch/riscv/include/asm/pci.h6
-rw-r--r--arch/riscv/include/asm/pgalloc.h2
-rw-r--r--arch/riscv/include/asm/pgtable.h100
-rw-r--r--arch/riscv/include/asm/processor.h2
-rw-r--r--arch/riscv/include/asm/ptrace.h16
-rw-r--r--arch/riscv/include/asm/sbi.h11
-rw-r--r--arch/riscv/include/asm/seccomp.h10
-rw-r--r--arch/riscv/include/asm/sparsemem.h6
-rw-r--r--arch/riscv/include/asm/spinlock_types.h2
-rw-r--r--arch/riscv/include/asm/switch_to.h10
-rw-r--r--arch/riscv/include/asm/thread_info.h5
-rw-r--r--arch/riscv/include/asm/timex.h19
-rw-r--r--arch/riscv/include/asm/tlbflush.h12
-rw-r--r--arch/riscv/include/asm/uaccess.h4
-rw-r--r--arch/riscv/include/uapi/asm/elf.h6
-rw-r--r--arch/riscv/include/uapi/asm/hwcap.h6
-rw-r--r--arch/riscv/include/uapi/asm/ucontext.h6
-rw-r--r--arch/riscv/kernel/Makefile5
-rw-r--r--arch/riscv/kernel/asm-offsets.c8
-rw-r--r--arch/riscv/kernel/clint.c44
-rw-r--r--arch/riscv/kernel/cpu.c45
-rw-r--r--arch/riscv/kernel/entry.S112
-rw-r--r--arch/riscv/kernel/fpu.S8
-rw-r--r--arch/riscv/kernel/head.S112
-rw-r--r--arch/riscv/kernel/irq.c17
-rw-r--r--arch/riscv/kernel/perf_callchain.c2
-rw-r--r--arch/riscv/kernel/process.c17
-rw-r--r--arch/riscv/kernel/ptrace.c10
-rw-r--r--arch/riscv/kernel/reset.c5
-rw-r--r--arch/riscv/kernel/sbi.c17
-rw-r--r--arch/riscv/kernel/setup.c2
-rw-r--r--arch/riscv/kernel/signal.c38
-rw-r--r--arch/riscv/kernel/smp.c16
-rw-r--r--arch/riscv/kernel/smpboot.c4
-rw-r--r--arch/riscv/kernel/traps.c16
-rw-r--r--arch/riscv/lib/Makefile11
-rw-r--r--arch/riscv/lib/uaccess.S12
-rw-r--r--arch/riscv/mm/Makefile3
-rw-r--r--arch/riscv/mm/cacheflush.c26
-rw-r--r--arch/riscv/mm/context.c2
-rw-r--r--arch/riscv/mm/extable.c4
-rw-r--r--arch/riscv/mm/fault.c6
-rw-r--r--arch/riscv/mm/init.c28
-rw-r--r--arch/riscv/mm/ioremap.c84
-rw-r--r--arch/riscv/mm/tlbflush.c25
-rw-r--r--arch/s390/Kconfig18
-rw-r--r--arch/s390/Makefile1
-rw-r--r--arch/s390/boot/startup.c5
-rw-r--r--arch/s390/include/asm/cpu_mf.h2
-rw-r--r--arch/s390/include/asm/io.h4
-rw-r--r--arch/s390/include/asm/pci.h5
-rw-r--r--arch/s390/include/asm/pci_clp.h6
-rw-r--r--arch/s390/include/asm/perf_event.h7
-rw-r--r--arch/s390/include/asm/processor.h2
-rw-r--r--arch/s390/include/asm/stacktrace.h36
-rw-r--r--arch/s390/include/asm/unwind.h8
-rw-r--r--arch/s390/include/asm/vdso.h13
-rw-r--r--arch/s390/kernel/Makefile1
-rw-r--r--arch/s390/kernel/asm-offsets.c3
-rw-r--r--arch/s390/kernel/dumpstack.c7
-rw-r--r--arch/s390/kernel/head64.S2
-rw-r--r--arch/s390/kernel/machine_kexec.c4
-rw-r--r--arch/s390/kernel/perf_cpum_sf.c136
-rw-r--r--arch/s390/kernel/ptrace.c2
-rw-r--r--arch/s390/kernel/setup.c9
-rw-r--r--arch/s390/kernel/smp.c7
-rw-r--r--arch/s390/kernel/stacktrace.c43
-rw-r--r--arch/s390/kernel/unwind_bc.c80
-rw-r--r--arch/s390/kernel/vdso.c42
-rw-r--r--arch/s390/kernel/vdso32/.gitignore1
-rw-r--r--arch/s390/kernel/vdso32/Makefile66
-rw-r--r--arch/s390/kernel/vdso32/clock_getres.S44
-rw-r--r--arch/s390/kernel/vdso32/clock_gettime.S179
-rw-r--r--arch/s390/kernel/vdso32/getcpu.S33
-rw-r--r--arch/s390/kernel/vdso32/gettimeofday.S103
-rw-r--r--arch/s390/kernel/vdso32/vdso32.lds.S142
-rw-r--r--arch/s390/kernel/vdso32/vdso32_wrapper.S15
-rw-r--r--arch/s390/kernel/vdso64/getcpu.S4
-rw-r--r--arch/s390/lib/Makefile3
-rw-r--r--arch/s390/lib/test_unwind.c347
-rw-r--r--arch/s390/mm/maccess.c12
-rw-r--r--arch/s390/pci/pci.c21
-rw-r--r--arch/s390/pci/pci_clp.c6
-rw-r--r--arch/sh/boards/mach-ecovec24/setup.c33
-rw-r--r--arch/sh/boot/compressed/misc.c5
-rw-r--r--arch/sh/configs/rsk7264_defconfig1
-rw-r--r--arch/sh/drivers/Makefile2
-rw-r--r--arch/sh/drivers/platform_early.c347
-rw-r--r--arch/sh/include/asm/io.h9
-rw-r--r--arch/sh/include/asm/platform_early.h61
-rw-r--r--arch/sh/include/cpu-sh4/cpu/sh7734.h2
-rw-r--r--arch/sh/kernel/cpu/sh2/setup-sh7619.c3
-rw-r--r--arch/sh/kernel/cpu/sh2a/setup-mxg.c3
-rw-r--r--arch/sh/kernel/cpu/sh2a/setup-sh7201.c3
-rw-r--r--arch/sh/kernel/cpu/sh2a/setup-sh7203.c3
-rw-r--r--arch/sh/kernel/cpu/sh2a/setup-sh7206.c3
-rw-r--r--arch/sh/kernel/cpu/sh2a/setup-sh7264.c3
-rw-r--r--arch/sh/kernel/cpu/sh2a/setup-sh7269.c3
-rw-r--r--arch/sh/kernel/cpu/sh3/setup-sh3.c1
-rw-r--r--arch/sh/kernel/cpu/sh3/setup-sh7705.c3
-rw-r--r--arch/sh/kernel/cpu/sh3/setup-sh770x.c3
-rw-r--r--arch/sh/kernel/cpu/sh3/setup-sh7710.c3
-rw-r--r--arch/sh/kernel/cpu/sh3/setup-sh7720.c3
-rw-r--r--arch/sh/kernel/cpu/sh4/setup-sh4-202.c3
-rw-r--r--arch/sh/kernel/cpu/sh4/setup-sh7750.c9
-rw-r--r--arch/sh/kernel/cpu/sh4/setup-sh7760.c3
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7343.c3
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7366.c3
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7722.c3
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7723.c3
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7724.c3
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7734.c3
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7757.c3
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7763.c3
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7770.c3
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7780.c3
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7785.c3
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7786.c3
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-shx3.c3
-rw-r--r--arch/sh/kernel/cpu/sh5/setup-sh5.c3
-rw-r--r--arch/sh/kernel/dma-coherent.c6
-rw-r--r--arch/sh/kernel/setup.c3
-rw-r--r--arch/sh/kernel/time.c5
-rw-r--r--arch/sh/mm/ioremap.c4
-rw-r--r--arch/sparc/include/asm/Kbuild1
-rw-r--r--arch/sparc/include/asm/io_32.h1
-rw-r--r--arch/sparc/include/asm/io_64.h1
-rw-r--r--arch/sparc/include/uapi/asm/msgbuf.h6
-rw-r--r--arch/sparc/include/uapi/asm/sembuf.h4
-rw-r--r--arch/sparc/include/uapi/asm/shmbuf.h6
-rw-r--r--arch/sparc/include/uapi/asm/stat.h24
-rw-r--r--arch/sparc/kernel/ioport.c4
-rw-r--r--arch/sparc/vdso/vclock_gettime.c36
-rw-r--r--arch/um/Kconfig2
-rw-r--r--arch/um/drivers/Kconfig2
-rw-r--r--arch/um/drivers/harddog_kern.c1
-rw-r--r--arch/um/drivers/hostaudio_kern.c1
-rw-r--r--arch/um/drivers/vector_kern.c113
-rw-r--r--arch/um/drivers/vector_kern.h8
-rw-r--r--arch/um/drivers/vector_user.c94
-rw-r--r--arch/um/drivers/vector_user.h8
-rw-r--r--arch/um/drivers/virtio_uml.c76
-rw-r--r--arch/um/kernel/skas/syscall.c2
-rw-r--r--arch/um/os-Linux/main.c2
-rw-r--r--arch/unicore32/include/asm/io.h4
-rw-r--r--arch/unicore32/mm/ioremap.c8
-rw-r--r--arch/x86/Kconfig3
-rw-r--r--arch/x86/Kconfig.debug2
-rw-r--r--arch/x86/boot/compressed/eboot.c6
-rw-r--r--arch/x86/boot/compressed/kaslr.c46
-rw-r--r--arch/x86/crypto/blake2s-glue.c6
-rw-r--r--arch/x86/crypto/chacha_glue.c5
-rw-r--r--arch/x86/crypto/curve25519-x86_64.c7
-rw-r--r--arch/x86/crypto/poly1305_glue.c5
-rw-r--r--arch/x86/entry/entry_32.S43
-rw-r--r--arch/x86/entry/vdso/vclock_gettime.c6
-rw-r--r--arch/x86/entry/vsyscall/vsyscall_64.c6
-rw-r--r--arch/x86/events/core.c18
-rw-r--r--arch/x86/hyperv/hv_init.c15
-rw-r--r--arch/x86/include/asm/cpu_entry_area.h12
-rw-r--r--arch/x86/include/asm/device.h3
-rw-r--r--arch/x86/include/asm/dma-direct.h9
-rw-r--r--arch/x86/include/asm/doublefault.h13
-rw-r--r--arch/x86/include/asm/e820/types.h8
-rw-r--r--arch/x86/include/asm/efi.h17
-rw-r--r--arch/x86/include/asm/fpu/internal.h2
-rw-r--r--arch/x86/include/asm/ftrace.h13
-rw-r--r--arch/x86/include/asm/io.h7
-rw-r--r--arch/x86/include/asm/iommu.h18
-rw-r--r--arch/x86/include/asm/mmu_context.h4
-rw-r--r--arch/x86/include/asm/mshyperv.h1
-rw-r--r--arch/x86/include/asm/pgtable_32_types.h7
-rw-r--r--arch/x86/include/asm/processor.h2
-rw-r--r--arch/x86/include/asm/traps.h3
-rw-r--r--arch/x86/include/asm/unwind_hints.h8
-rw-r--r--arch/x86/include/uapi/asm/msgbuf.h6
-rw-r--r--arch/x86/include/uapi/asm/sembuf.h4
-rw-r--r--arch/x86/include/uapi/asm/shmbuf.h6
-rw-r--r--arch/x86/kernel/Makefile4
-rw-r--r--arch/x86/kernel/amd_gart_64.c4
-rw-r--r--arch/x86/kernel/cpu/common.c12
-rw-r--r--arch/x86/kernel/cpu/mce/therm_throt.c17
-rw-r--r--arch/x86/kernel/doublefault.c86
-rw-r--r--arch/x86/kernel/doublefault_32.c136
-rw-r--r--arch/x86/kernel/dumpstack_32.c30
-rw-r--r--arch/x86/kernel/e820.c12
-rw-r--r--arch/x86/kernel/ftrace.c14
-rw-r--r--arch/x86/kernel/ftrace_64.S42
-rw-r--r--arch/x86/kernel/pci-dma.c2
-rw-r--r--arch/x86/kernel/process.c52
-rw-r--r--arch/x86/kernel/ptrace.c36
-rw-r--r--arch/x86/kernel/setup.c18
-rw-r--r--arch/x86/kernel/traps.c31
-rw-r--r--arch/x86/lib/x86-opcode-map.txt44
-rw-r--r--arch/x86/mm/cpu_entry_area.c14
-rw-r--r--arch/x86/mm/fault.c2
-rw-r--r--arch/x86/mm/ioremap.c8
-rw-r--r--arch/x86/mm/kasan_init_64.c61
-rw-r--r--arch/x86/mm/mem_encrypt.c2
-rw-r--r--arch/x86/mm/pageattr.c4
-rw-r--r--arch/x86/mm/pat_interval.c12
-rw-r--r--arch/x86/pci/Makefile4
-rw-r--r--arch/x86/pci/common.c2
-rw-r--r--arch/x86/pci/fixup.c11
-rw-r--r--arch/x86/pci/intel_mid_pci.c2
-rw-r--r--arch/x86/pci/numachip.c5
-rw-r--r--arch/x86/pci/sta2x11-fixup.c135
-rw-r--r--arch/x86/platform/efi/efi.c54
-rw-r--r--arch/x86/platform/efi/quirks.c3
-rw-r--r--arch/x86/platform/olpc/olpc-xo1-pm.c8
-rw-r--r--arch/x86/platform/olpc/olpc-xo1-sci.c6
-rw-r--r--arch/x86/um/vdso/um_vdso.c12
-rw-r--r--arch/xtensa/Kconfig402
-rw-r--r--arch/xtensa/Kconfig.debug7
-rw-r--r--arch/xtensa/Makefile3
-rw-r--r--arch/xtensa/boot/Makefile5
-rw-r--r--arch/xtensa/configs/audio_kc705_defconfig1
-rw-r--r--arch/xtensa/configs/cadence_csp_defconfig1
-rw-r--r--arch/xtensa/configs/generic_kc705_defconfig1
-rw-r--r--arch/xtensa/configs/iss_defconfig1
-rw-r--r--arch/xtensa/configs/nommu_kc705_defconfig1
-rw-r--r--arch/xtensa/configs/smp_lx200_defconfig1
-rw-r--r--arch/xtensa/configs/virt_defconfig1
-rw-r--r--arch/xtensa/configs/xip_kc705_defconfig119
-rw-r--r--arch/xtensa/include/asm/Kbuild2
-rw-r--r--arch/xtensa/include/asm/atomic.h124
-rw-r--r--arch/xtensa/include/asm/bitops.h323
-rw-r--r--arch/xtensa/include/asm/cache.h6
-rw-r--r--arch/xtensa/include/asm/cmpxchg.h71
-rw-r--r--arch/xtensa/include/asm/fixmap.h8
-rw-r--r--arch/xtensa/include/asm/futex.h10
-rw-r--r--arch/xtensa/include/asm/hw_irq.h14
-rw-r--r--arch/xtensa/include/asm/initialize_mmu.h3
-rw-r--r--arch/xtensa/include/asm/io.h12
-rw-r--r--arch/xtensa/include/asm/kmem_layout.h29
-rw-r--r--arch/xtensa/include/asm/page.h11
-rw-r--r--arch/xtensa/include/asm/pgtable.h4
-rw-r--r--arch/xtensa/include/asm/platform.h27
-rw-r--r--arch/xtensa/include/asm/processor.h3
-rw-r--r--arch/xtensa/include/asm/syscall.h4
-rw-r--r--arch/xtensa/include/asm/uaccess.h16
-rw-r--r--arch/xtensa/include/asm/user.h20
-rw-r--r--arch/xtensa/include/asm/vectors.h44
-rw-r--r--arch/xtensa/kernel/Makefile3
-rw-r--r--arch/xtensa/kernel/coprocessor.S10
-rw-r--r--arch/xtensa/kernel/entry.S22
-rw-r--r--arch/xtensa/kernel/head.S13
-rw-r--r--arch/xtensa/kernel/pci-dma.c129
-rw-r--r--arch/xtensa/kernel/process.c2
-rw-r--r--arch/xtensa/kernel/ptrace.c18
-rw-r--r--arch/xtensa/kernel/setup.c7
-rw-r--r--arch/xtensa/kernel/signal.c4
-rw-r--r--arch/xtensa/kernel/traps.c27
-rw-r--r--arch/xtensa/kernel/vmlinux.lds.S58
-rw-r--r--arch/xtensa/mm/fault.c16
-rw-r--r--arch/xtensa/mm/init.c4
-rw-r--r--arch/xtensa/mm/kasan_init.c12
-rw-r--r--arch/xtensa/mm/mmu.c4
-rw-r--r--arch/xtensa/mm/tlb.c14
-rw-r--r--block/scsi_ioctl.c132
-rw-r--r--certs/blacklist.c9
-rw-r--r--drivers/acpi/Kconfig23
-rw-r--r--drivers/acpi/Makefile8
-rw-r--r--drivers/acpi/acpi_configfs.c4
-rw-r--r--drivers/acpi/acpi_lpss.c48
-rw-r--r--drivers/acpi/acpi_platform.c43
-rw-r--r--drivers/acpi/acpi_video.c8
-rw-r--r--drivers/acpi/acpica/acdebug.h2
-rw-r--r--drivers/acpi/acpica/acstruct.h10
-rw-r--r--drivers/acpi/acpica/acutils.h9
-rw-r--r--drivers/acpi/acpica/dbconvert.c4
-rw-r--r--drivers/acpi/acpica/dbdisply.c2
-rw-r--r--drivers/acpi/acpica/dbfileio.c2
-rw-r--r--drivers/acpi/acpica/dbinput.c36
-rw-r--r--drivers/acpi/acpica/dbmethod.c4
-rw-r--r--drivers/acpi/acpica/dbnames.c114
-rw-r--r--drivers/acpi/acpica/dbobject.c1
-rw-r--r--drivers/acpi/acpica/dscontrol.c2
-rw-r--r--drivers/acpi/acpica/dsfield.c12
-rw-r--r--drivers/acpi/acpica/evgpeblk.c11
-rw-r--r--drivers/acpi/acpica/evgpeinit.c3
-rw-r--r--drivers/acpi/acpica/evmisc.c12
-rw-r--r--drivers/acpi/acpica/evregion.c4
-rw-r--r--drivers/acpi/acpica/evrgnini.c1
-rw-r--r--drivers/acpi/acpica/hwxfsleep.c3
-rw-r--r--drivers/acpi/acpica/nsconvert.c2
-rw-r--r--drivers/acpi/acpica/nsdump.c6
-rw-r--r--drivers/acpi/acpica/nsxfname.c4
-rw-r--r--drivers/acpi/acpica/psobject.c7
-rw-r--r--drivers/acpi/acpica/rscreate.c3
-rw-r--r--drivers/acpi/acpica/tbdata.c3
-rw-r--r--drivers/acpi/acpica/tbxfload.c40
-rw-r--r--drivers/acpi/acpica/utbuffer.c52
-rw-r--r--drivers/acpi/acpica/utids.c2
-rw-r--r--drivers/acpi/acpica/uttrack.c2
-rw-r--r--drivers/acpi/arm64/iort.c20
-rw-r--r--drivers/acpi/button.c139
-rw-r--r--drivers/acpi/ec.c195
-rw-r--r--drivers/acpi/hmat/Makefile2
-rw-r--r--drivers/acpi/internal.h3
-rw-r--r--drivers/acpi/nfit/core.c7
-rw-r--r--drivers/acpi/numa/Kconfig (renamed from drivers/acpi/hmat/Kconfig)7
-rw-r--r--drivers/acpi/numa/Makefile3
-rw-r--r--drivers/acpi/numa/hmat.c (renamed from drivers/acpi/hmat/hmat.c)158
-rw-r--r--drivers/acpi/numa/srat.c (renamed from drivers/acpi/numa.c)0
-rw-r--r--drivers/acpi/osi.c6
-rw-r--r--drivers/acpi/pmic/intel_pmic.c20
-rw-r--r--drivers/acpi/pmic/intel_pmic_bytcrc.c (renamed from drivers/acpi/pmic/intel_pmic_crc.c)4
-rw-r--r--drivers/acpi/pmic/intel_pmic_chtcrc.c44
-rw-r--r--drivers/acpi/processor_idle.c21
-rw-r--r--drivers/acpi/property.c48
-rw-r--r--drivers/acpi/scan.c1
-rw-r--r--drivers/acpi/utils.c32
-rw-r--r--drivers/android/binder.c8
-rw-r--r--drivers/android/binder_alloc.c42
-rw-r--r--drivers/ata/ahci.c2
-rw-r--r--drivers/ata/ahci_imx.c25
-rw-r--r--drivers/ata/pata_arasan_cf.c1
-rw-r--r--drivers/ata/pata_atp867x.c2
-rw-r--r--drivers/ata/sata_nv.c2
-rw-r--r--drivers/base/core.c308
-rw-r--r--drivers/base/firmware_loader/Kconfig14
-rw-r--r--drivers/base/firmware_loader/builtin/Makefile3
-rw-r--r--drivers/base/firmware_loader/main.c9
-rw-r--r--drivers/base/memory.c40
-rw-r--r--drivers/base/platform.c393
-rw-r--r--drivers/base/power/common.c20
-rw-r--r--drivers/base/power/domain.c40
-rw-r--r--drivers/base/power/power.h30
-rw-r--r--drivers/base/power/wakeirq.c4
-rw-r--r--drivers/base/property.c83
-rw-r--r--drivers/base/soc.c30
-rw-r--r--drivers/base/swnode.c258
-rw-r--r--drivers/block/ataflop.c2
-rw-r--r--drivers/block/drbd/drbd_req.c2
-rw-r--r--drivers/block/pktcdvd.c25
-rw-r--r--drivers/block/sunvdc.c2
-rw-r--r--drivers/bus/Kconfig9
-rw-r--r--drivers/bus/Makefile1
-rw-r--r--drivers/bus/ti-pwmss.c (renamed from drivers/pwm/pwm-tipwmss.c)0
-rw-r--r--drivers/cdrom/cdrom.c12
-rw-r--r--drivers/char/Kconfig6
-rw-r--r--drivers/char/agp/Kconfig2
-rw-r--r--drivers/char/hw_random/Kconfig18
-rw-r--r--drivers/char/ipmi/Kconfig98
-rw-r--r--drivers/char/ipmi/ipmi_watchdog.c1
-rw-r--r--drivers/char/lp.c4
-rw-r--r--drivers/char/ppdev.c28
-rw-r--r--drivers/char/random.c1
-rw-r--r--drivers/char/tpm/tpm_vtpm_proxy.c12
-rw-r--r--drivers/char/virtio_console.c16
-rw-r--r--drivers/char/xillybus/xillybus_of.c5
-rw-r--r--drivers/clk/Kconfig7
-rw-r--r--drivers/clk/Makefile1
-rw-r--r--drivers/clk/at91/sckc.c3
-rw-r--r--drivers/clk/axs10x/i2s_pll_clock.c4
-rw-r--r--drivers/clk/axs10x/pll_clock.c7
-rw-r--r--drivers/clk/bcm/clk-bcm2835-aux.c4
-rw-r--r--drivers/clk/bcm/clk-bcm2835.c4
-rw-r--r--drivers/clk/clk-aspeed.c27
-rw-r--r--drivers/clk/clk-ast2600.c49
-rw-r--r--drivers/clk/clk-bd718x7.c1
-rw-r--r--drivers/clk/clk-bm1880.c969
-rw-r--r--drivers/clk/clk-composite.c13
-rw-r--r--drivers/clk/clk-divider.c2
-rw-r--r--drivers/clk/clk-fixed-rate.c2
-rw-r--r--drivers/clk/clk-gate.c2
-rw-r--r--drivers/clk/clk-gpio.c2
-rw-r--r--drivers/clk/clk-mux.c2
-rw-r--r--drivers/clk/clk.c27
-rw-r--r--drivers/clk/davinci/pll.c4
-rw-r--r--drivers/clk/davinci/psc.c4
-rw-r--r--drivers/clk/hisilicon/clk-hi3660.c60
-rw-r--r--drivers/clk/hisilicon/clk-hi3670.c152
-rw-r--r--drivers/clk/hisilicon/clk-hi6220.c3
-rw-r--r--drivers/clk/hisilicon/reset.c4
-rw-r--r--drivers/clk/imgtec/clk-boston.c3
-rw-r--r--drivers/clk/imx/clk-imx6sll.c8
-rw-r--r--drivers/clk/imx/clk-imx6sx.c12
-rw-r--r--drivers/clk/imx/clk-imx6ul.c8
-rw-r--r--drivers/clk/imx/clk-imx7d.c4
-rw-r--r--drivers/clk/imx/clk-imx7ulp.c9
-rw-r--r--drivers/clk/imx/clk-imx8mm.c150
-rw-r--r--drivers/clk/imx/clk-imx8mn.c166
-rw-r--r--drivers/clk/imx/clk-imx8mq.c77
-rw-r--r--drivers/clk/imx/clk-pll14xx.c72
-rw-r--r--drivers/clk/imx/clk.h3
-rw-r--r--drivers/clk/ingenic/Kconfig12
-rw-r--r--drivers/clk/ingenic/Makefile1
-rw-r--r--drivers/clk/ingenic/tcu.c3
-rw-r--r--drivers/clk/ingenic/x1000-cgu.c274
-rw-r--r--drivers/clk/mediatek/clk-mt2712.c6
-rw-r--r--drivers/clk/mediatek/clk-mt6779.c3
-rw-r--r--drivers/clk/mediatek/clk-mt6797.c3
-rw-r--r--drivers/clk/mediatek/clk-mt7622.c6
-rw-r--r--drivers/clk/mediatek/clk-mt7629.c6
-rw-r--r--drivers/clk/mediatek/clk-mt8183.c6
-rw-r--r--drivers/clk/meson/axg-audio.c2025
-rw-r--r--drivers/clk/meson/axg-audio.h21
-rw-r--r--drivers/clk/mvebu/ap-cpu-clk.c4
-rw-r--r--drivers/clk/mvebu/armada-37xx-periph.c6
-rw-r--r--drivers/clk/mvebu/armada-xp.c26
-rw-r--r--drivers/clk/mvebu/cp110-system-controller.c4
-rw-r--r--drivers/clk/pxa/clk-pxa27x.c1
-rw-r--r--drivers/clk/qcom/Kconfig26
-rw-r--r--drivers/clk/qcom/Makefile3
-rw-r--r--drivers/clk/qcom/clk-rcg.h2
-rw-r--r--drivers/clk/qcom/clk-rcg2.c6
-rw-r--r--drivers/clk/qcom/clk-rpmh.c53
-rw-r--r--drivers/clk/qcom/clk-smd-rpm.c3
-rw-r--r--drivers/clk/qcom/common.c5
-rw-r--r--drivers/clk/qcom/gcc-msm8998.c72
-rw-r--r--drivers/clk/qcom/gcc-sc7180.c2450
-rw-r--r--drivers/clk/qcom/gcc-sdm845.c96
-rw-r--r--drivers/clk/qcom/gpucc-msm8998.c338
-rw-r--r--drivers/clk/qcom/q6sstop-qcs404.c223
-rw-r--r--drivers/clk/renesas/Kconfig34
-rw-r--r--drivers/clk/renesas/Makefile5
-rw-r--r--drivers/clk/renesas/clk-mstp.c4
-rw-r--r--drivers/clk/renesas/clk-rcar-gen2.c457
-rw-r--r--drivers/clk/renesas/r8a774b1-cpg-mssr.c327
-rw-r--r--drivers/clk/renesas/r8a7796-cpg-mssr.c24
-rw-r--r--drivers/clk/renesas/r8a77965-cpg-mssr.c2
-rw-r--r--drivers/clk/renesas/rcar-gen2-cpg.c25
-rw-r--r--drivers/clk/renesas/rcar-gen3-cpg.c64
-rw-r--r--drivers/clk/renesas/renesas-cpg-mssr.c14
-rw-r--r--drivers/clk/renesas/renesas-cpg-mssr.h1
-rw-r--r--drivers/clk/rockchip/clk-half-divider.c3
-rw-r--r--drivers/clk/rockchip/clk-px30.c70
-rw-r--r--drivers/clk/samsung/clk-exynos5420.c34
-rw-r--r--drivers/clk/samsung/clk-s3c2410-dclk.c4
-rw-r--r--drivers/clk/samsung/clk.c3
-rw-r--r--drivers/clk/sprd/common.c6
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-h6.c23
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-h3.h4
-rw-r--r--drivers/clk/tegra/Makefile2
-rw-r--r--drivers/clk/tegra/clk-dfll.c56
-rw-r--r--drivers/clk/tegra/clk-dfll.h2
-rw-r--r--drivers/clk/tegra/clk-divider.c11
-rw-r--r--drivers/clk/tegra/clk-emc.c12
-rw-r--r--drivers/clk/tegra/clk-id.h4
-rw-r--r--drivers/clk/tegra/clk-periph.c21
-rw-r--r--drivers/clk/tegra/clk-pll-out.c9
-rw-r--r--drivers/clk/tegra/clk-pll.c86
-rw-r--r--drivers/clk/tegra/clk-sdmmc-mux.c16
-rw-r--r--drivers/clk/tegra/clk-super.c41
-rw-r--r--drivers/clk/tegra/clk-tegra-fixed.c15
-rw-r--r--drivers/clk/tegra/clk-tegra-periph.c8
-rw-r--r--drivers/clk/tegra/clk-tegra-super-gen4.c7
-rw-r--r--drivers/clk/tegra/clk-tegra124-dfll-fcpu.c1
-rw-r--r--drivers/clk/tegra/clk-tegra124.c55
-rw-r--r--drivers/clk/tegra/clk-tegra20-emc.c293
-rw-r--r--drivers/clk/tegra/clk-tegra20.c80
-rw-r--r--drivers/clk/tegra/clk-tegra210.c181
-rw-r--r--drivers/clk/tegra/clk-tegra30.c63
-rw-r--r--drivers/clk/tegra/clk.c112
-rw-r--r--drivers/clk/tegra/clk.h70
-rw-r--r--drivers/clk/ti/adpll.c11
-rw-r--r--drivers/clk/ti/clk-33xx.c4
-rw-r--r--drivers/clk/ti/clk-43xx.c4
-rw-r--r--drivers/clk/ti/clk-44xx.c4
-rw-r--r--drivers/clk/ti/clk-54xx.c11
-rw-r--r--drivers/clk/ti/clk-7xx.c8
-rw-r--r--drivers/clk/ti/clkctrl.c45
-rw-r--r--drivers/clk/ti/clock.h7
-rw-r--r--drivers/clk/ti/divider.c282
-rw-r--r--drivers/clk/uniphier/clk-uniphier-core.c3
-rw-r--r--drivers/clocksource/Kconfig1
-rw-r--r--drivers/clocksource/asm9260_timer.c4
-rw-r--r--drivers/clocksource/renesas-ostm.c189
-rw-r--r--drivers/clocksource/sh_cmt.c13
-rw-r--r--drivers/clocksource/sh_mtu2.c13
-rw-r--r--drivers/clocksource/sh_tmu.c14
-rw-r--r--drivers/clocksource/timer-of.c6
-rw-r--r--drivers/clocksource/timer-riscv.c31
-rw-r--r--drivers/counter/104-quad-8.c33
-rw-r--r--drivers/counter/Kconfig11
-rw-r--r--drivers/counter/Makefile1
-rw-r--r--drivers/counter/counter.c101
-rw-r--r--drivers/counter/ftm-quaddec.c14
-rw-r--r--drivers/counter/stm32-lptimer-cnt.c7
-rw-r--r--drivers/counter/stm32-timer-cnt.c23
-rw-r--r--drivers/counter/ti-eqep.c466
-rw-r--r--drivers/cpufreq/Kconfig.arm12
-rw-r--r--drivers/cpufreq/Makefile2
-rw-r--r--drivers/cpufreq/arm_big_little.c658
-rw-r--r--drivers/cpufreq/arm_big_little.h43
-rw-r--r--drivers/cpufreq/cpufreq-dt-platdev.c2
-rw-r--r--drivers/cpufreq/cpufreq.c18
-rw-r--r--drivers/cpufreq/imx-cpufreq-dt.c20
-rw-r--r--drivers/cpufreq/intel_pstate.c30
-rw-r--r--drivers/cpufreq/powernv-cpufreq.c17
-rw-r--r--drivers/cpufreq/s3c64xx-cpufreq.c7
-rw-r--r--drivers/cpufreq/scpi-cpufreq.c2
-rw-r--r--drivers/cpufreq/sun50i-cpufreq-nvmem.c25
-rw-r--r--drivers/cpufreq/ti-cpufreq.c119
-rw-r--r--drivers/cpufreq/vexpress-spc-cpufreq.c584
-rw-r--r--drivers/cpuidle/cpuidle-powernv.c7
-rw-r--r--drivers/cpuidle/cpuidle.c72
-rw-r--r--drivers/cpuidle/driver.c72
-rw-r--r--drivers/cpuidle/governor.c7
-rw-r--r--drivers/cpuidle/governors/haltpoll.c7
-rw-r--r--drivers/cpuidle/governors/ladder.c29
-rw-r--r--drivers/cpuidle/governors/menu.c131
-rw-r--r--drivers/cpuidle/governors/teo.c182
-rw-r--r--drivers/cpuidle/poll_state.c2
-rw-r--r--drivers/cpuidle/sysfs.c71
-rw-r--r--drivers/crypto/Kconfig1
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_crypto.c4
-rw-r--r--drivers/crypto/qat/qat_common/adf_ctl_drv.c2
-rw-r--r--drivers/dax/Kconfig27
-rw-r--r--drivers/dax/Makefile2
-rw-r--r--drivers/dax/bus.c24
-rw-r--r--drivers/dax/bus.h2
-rw-r--r--drivers/dax/dax-private.h2
-rw-r--r--drivers/dax/hmem.c56
-rw-r--r--drivers/dax/pmem/core.c6
-rw-r--r--drivers/devfreq/devfreq.c33
-rw-r--r--drivers/devfreq/event/exynos-ppmu.c1
-rw-r--r--drivers/devfreq/governor.h3
-rw-r--r--drivers/devfreq/tegra30-devfreq.c417
-rw-r--r--drivers/dma-buf/dma-buf.c124
-rw-r--r--drivers/dma-buf/dma-fence.c78
-rw-r--r--drivers/dma-buf/sw_sync.c2
-rw-r--r--drivers/dma-buf/sync_file.c2
-rw-r--r--drivers/dma/Kconfig88
-rw-r--r--drivers/dma/Makefile4
-rw-r--r--drivers/dma/at_xdmac.c7
-rw-r--r--drivers/dma/dma-jz4780.c16
-rw-r--r--drivers/dma/dw/platform.c2
-rw-r--r--drivers/dma/fsl-dpaa2-qdma/Kconfig9
-rw-r--r--drivers/dma/fsl-dpaa2-qdma/Makefile3
-rw-r--r--drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c825
-rw-r--r--drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.h153
-rw-r--r--drivers/dma/fsl-dpaa2-qdma/dpdmai.c376
-rw-r--r--drivers/dma/fsl-dpaa2-qdma/dpdmai.h177
-rw-r--r--drivers/dma/fsl-qdma.c3
-rw-r--r--drivers/dma/iop-adma.c10
-rw-r--r--drivers/dma/k3dma.c7
-rw-r--r--drivers/dma/mediatek/mtk-cqdma.c10
-rw-r--r--drivers/dma/mediatek/mtk-hsdma.c4
-rw-r--r--drivers/dma/mediatek/mtk-uart-apdma.c9
-rw-r--r--drivers/dma/milbeaut-hdmac.c578
-rw-r--r--drivers/dma/milbeaut-xdmac.c415
-rw-r--r--drivers/dma/mmp_pdma.c2
-rw-r--r--drivers/dma/mmp_tdma.c3
-rw-r--r--drivers/dma/owl-dma.c7
-rw-r--r--drivers/dma/sf-pdma/Kconfig6
-rw-r--r--drivers/dma/sf-pdma/Makefile1
-rw-r--r--drivers/dma/sf-pdma/sf-pdma.c620
-rw-r--r--drivers/dma/sf-pdma/sf-pdma.h124
-rw-r--r--drivers/dma/sh/rcar-dmac.c47
-rw-r--r--drivers/dma/sprd-dma.c17
-rw-r--r--drivers/dma/ti/edma.c77
-rw-r--r--drivers/dma/uniphier-mdmac.c4
-rw-r--r--drivers/dma/xilinx/xilinx_dma.c649
-rw-r--r--drivers/dma/zx_dma.c8
-rw-r--r--drivers/extcon/extcon-axp288.c38
-rw-r--r--drivers/extcon/extcon-intel-cht-wc.c16
-rw-r--r--drivers/extcon/extcon-sm5502.c6
-rw-r--r--drivers/extcon/extcon-sm5502.h2
-rw-r--r--drivers/firewire/core-cdev.c15
-rw-r--r--drivers/firewire/core-iso.c7
-rw-r--r--drivers/firewire/core.h2
-rw-r--r--drivers/firewire/ohci.c2
-rw-r--r--drivers/firmware/dmi_scan.c41
-rw-r--r--drivers/firmware/efi/Kconfig21
-rw-r--r--drivers/firmware/efi/Makefile5
-rw-r--r--drivers/firmware/efi/apple-properties.c18
-rw-r--r--drivers/firmware/efi/arm-init.c9
-rw-r--r--drivers/firmware/efi/arm-runtime.c24
-rw-r--r--drivers/firmware/efi/efi.c15
-rw-r--r--drivers/firmware/efi/esrt.c3
-rw-r--r--drivers/firmware/efi/fake_mem.c26
-rw-r--r--drivers/firmware/efi/fake_mem.h10
-rw-r--r--drivers/firmware/efi/libstub/arm32-stub.c5
-rw-r--r--drivers/firmware/efi/libstub/efi-stub-helper.c19
-rw-r--r--drivers/firmware/efi/libstub/random.c4
-rw-r--r--drivers/firmware/efi/x86_fake_mem.c69
-rw-r--r--drivers/firmware/qcom_scm-32.c5
-rw-r--r--drivers/firmware/qcom_scm-64.c153
-rw-r--r--drivers/firmware/qcom_scm.c6
-rw-r--r--drivers/firmware/qcom_scm.h5
-rw-r--r--drivers/firmware/stratix10-rsu.c42
-rw-r--r--drivers/firmware/stratix10-svc.c18
-rw-r--r--drivers/fpga/Kconfig2
-rw-r--r--drivers/fpga/dfl-fme-main.c385
-rw-r--r--drivers/fpga/zynq-fpga.c4
-rw-r--r--drivers/fsi/Kconfig8
-rw-r--r--drivers/fsi/Makefile1
-rw-r--r--drivers/fsi/fsi-core.c67
-rw-r--r--drivers/fsi/fsi-master-aspeed.c544
-rw-r--r--drivers/fsi/fsi-master-hub.c46
-rw-r--r--drivers/fsi/fsi-master.h71
-rw-r--r--drivers/gpio/Kconfig29
-rw-r--r--drivers/gpio/Makefile3
-rw-r--r--drivers/gpio/TODO4
-rw-r--r--drivers/gpio/gpio-104-dio-48e.c5
-rw-r--r--drivers/gpio/gpio-104-idi-48.c4
-rw-r--r--drivers/gpio/gpio-104-idio-16.c4
-rw-r--r--drivers/gpio/gpio-74xx-mmio.c5
-rw-r--r--drivers/gpio/gpio-amd-fch.c2
-rw-r--r--drivers/gpio/gpio-aspeed-sgpio.c (renamed from drivers/gpio/sgpio-aspeed.c)0
-rw-r--r--drivers/gpio/gpio-aspeed.c7
-rw-r--r--drivers/gpio/gpio-ath79.c10
-rw-r--r--drivers/gpio/gpio-bcm-kona.c6
-rw-r--r--drivers/gpio/gpio-bd70528.c9
-rw-r--r--drivers/gpio/gpio-bd9571mwv.c4
-rw-r--r--drivers/gpio/gpio-dln2.c6
-rw-r--r--drivers/gpio/gpio-em.c39
-rw-r--r--drivers/gpio/gpio-exar.c5
-rw-r--r--drivers/gpio/gpio-f7188x.c5
-rw-r--r--drivers/gpio/gpio-gpio-mm.c5
-rw-r--r--drivers/gpio/gpio-htc-egpio.c42
-rw-r--r--drivers/gpio/gpio-ich.c5
-rw-r--r--drivers/gpio/gpio-kempld.c5
-rw-r--r--drivers/gpio/gpio-lp873x.c2
-rw-r--r--drivers/gpio/gpio-lp87565.c5
-rw-r--r--drivers/gpio/gpio-lynxpoint.c6
-rw-r--r--drivers/gpio/gpio-madera.c5
-rw-r--r--drivers/gpio/gpio-max3191x.c2
-rw-r--r--drivers/gpio/gpio-max77620.c231
-rw-r--r--drivers/gpio/gpio-menz127.c1
-rw-r--r--drivers/gpio/gpio-merrifield.c79
-rw-r--r--drivers/gpio/gpio-mmio.c22
-rw-r--r--drivers/gpio/gpio-mockup.c105
-rw-r--r--drivers/gpio/gpio-moxtet.c4
-rw-r--r--drivers/gpio/gpio-mpc8xxx.c36
-rw-r--r--drivers/gpio/gpio-mvebu.c24
-rw-r--r--drivers/gpio/gpio-mxc.c13
-rw-r--r--drivers/gpio/gpio-mxs.c5
-rw-r--r--drivers/gpio/gpio-omap.c6
-rw-r--r--drivers/gpio/gpio-pca953x.c5
-rw-r--r--drivers/gpio/gpio-pci-idio-16.c4
-rw-r--r--drivers/gpio/gpio-pcie-idio-24.c9
-rw-r--r--drivers/gpio/gpio-pisosr.c2
-rw-r--r--drivers/gpio/gpio-pl061.c5
-rw-r--r--drivers/gpio/gpio-raspberrypi-exp.c5
-rw-r--r--drivers/gpio/gpio-rcar.c7
-rw-r--r--drivers/gpio/gpio-rda.c294
-rw-r--r--drivers/gpio/gpio-reg.c3
-rw-r--r--drivers/gpio/gpio-sa1100.c5
-rw-r--r--drivers/gpio/gpio-sama5d2-piobu.c7
-rw-r--r--drivers/gpio/gpio-sch.c5
-rw-r--r--drivers/gpio/gpio-sch311x.c5
-rw-r--r--drivers/gpio/gpio-siox.c4
-rw-r--r--drivers/gpio/gpio-stmpe.c5
-rw-r--r--drivers/gpio/gpio-tc3589x.c5
-rw-r--r--drivers/gpio/gpio-tegra.c5
-rw-r--r--drivers/gpio/gpio-tegra186.c384
-rw-r--r--drivers/gpio/gpio-thunderx.c5
-rw-r--r--drivers/gpio/gpio-tpic2810.c2
-rw-r--r--drivers/gpio/gpio-tps65086.c2
-rw-r--r--drivers/gpio/gpio-tps65912.c4
-rw-r--r--drivers/gpio/gpio-tps68470.c6
-rw-r--r--drivers/gpio/gpio-tqmx86.c5
-rw-r--r--drivers/gpio/gpio-ts4900.c5
-rw-r--r--drivers/gpio/gpio-twl4030.c10
-rw-r--r--drivers/gpio/gpio-twl6040.c3
-rw-r--r--drivers/gpio/gpio-uniphier.c5
-rw-r--r--drivers/gpio/gpio-wcove.c7
-rw-r--r--drivers/gpio/gpio-ws16c48.c5
-rw-r--r--drivers/gpio/gpio-xgene.c32
-rw-r--r--drivers/gpio/gpio-xgs-iproc.c320
-rw-r--r--drivers/gpio/gpio-xra1403.c5
-rw-r--r--drivers/gpio/gpio-xtensa.c4
-rw-r--r--drivers/gpio/gpio-zynq.c7
-rw-r--r--drivers/gpio/gpiolib-acpi.c17
-rw-r--r--drivers/gpio/gpiolib-of.c18
-rw-r--r--drivers/gpio/gpiolib.c282
-rw-r--r--drivers/gpio/gpiolib.h1
-rw-r--r--drivers/gpu/drm/Kconfig36
-rw-r--r--drivers/gpu/drm/Makefile6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h104
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c77
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c147
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c289
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c214
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c214
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c176
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c95
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c274
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c93
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c307
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c216
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c169
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c40
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c38
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c109
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h39
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h49
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c38
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c35
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c70
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c443
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mn.h53
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c84
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h101
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c71
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c53
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c459
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h87
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c659
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h43
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c209
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c99
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_test.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c375
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c158
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c318
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c52
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c92
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/arct_reg_init.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.c162
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c38
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c38
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c44
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c38
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_virtual.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/df_v1_7.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/df_v3_6.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c100
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c1389
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c151
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c474
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c380
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/navi10_ih.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/navi10_reg_init.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/navi12_reg_init.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/navi14_reg_init.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v2_3.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v6_1.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_0.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c214
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_4.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nv.c108
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v10_0.c44
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v11_0.c258
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v12_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v3_1.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c161
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si.c108
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_ih.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c183
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v6_0.c37
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v6_0.h31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v6_1.c48
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c37
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega10_ih.c41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c84
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.h3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c8
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h139
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c21
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_crat.c9
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c18
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c8
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c272
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c108
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h6
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_events.c15
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c12
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c5
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_iommu.c6
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_module.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c37
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h26
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c32
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c6
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c25
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.h3
-rw-r--r--drivers/gpu/drm/amd/display/Kconfig28
-rw-r--r--drivers/gpu/drm/amd/display/Makefile7
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/Makefile4
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c381
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h14
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c9
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c52
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c346
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h66
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c17
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c10
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c59
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c153
-rw-r--r--drivers/gpu/drm/amd/display/dc/Makefile4
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c25
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c186
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c304
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c38
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c281
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c345
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c101
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c44
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c74
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h44
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_ddc_types.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dsc.h14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_hw_types.h91
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_link.h18
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream.h23
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_types.h22
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_abm.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_aux.c93
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_aux.h187
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c52
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c44
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c51
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c52
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c52
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c51
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h28
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c27
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h50
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c60
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c43
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c62
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c57
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h89
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb_scl.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c640
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c349
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h34
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c116
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.h34
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c122
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.h33
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c470
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.h61
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c380
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_cp_psp.h49
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_helpers.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_pp_smu.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c85
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/gpio_base.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hdcp/Makefile28
-rw-r--r--drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c324
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h9
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h17
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/opp.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h19
-rw-r--r--drivers/gpu/drm/amd/display/include/ddc_service_types.h2
-rw-r--r--drivers/gpu/drm/amd/display/include/hdcp_types.h96
-rw-r--r--drivers/gpu/drm/amd/display/modules/color/color_gamma.c51
-rw-r--r--drivers/gpu/drm/amd/display/modules/freesync/freesync.c53
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/Makefile32
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c426
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h442
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c531
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c307
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c305
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c163
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h139
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c328
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.h272
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h1
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h289
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h3
-rw-r--r--drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c98
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/power_helpers.c93
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/power_helpers.h1
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/bif/bif_4_1_d.h1
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/bif/bif_4_1_sh_mask.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_d.h1
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_sh_mask.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_1_0_offset.h10
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_offset.h18
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_sh_mask.h18
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_0_smn.h12
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_offset.h4
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_sh_mask.h49
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/oss/osssys_4_0_sh_mask.h4
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_d.h1
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_sh_mask.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_d.h1
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_sh_mask.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h1
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_sh_mask.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_11_0_0_offset.h92
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_11_0_0_sh_mask.h176
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_2_5_offset.h12
-rw-r--r--drivers/gpu/drm/amd/include/atomfirmware.h27
-rw-r--r--drivers/gpu/drm/amd/include/discovery.h1
-rw-r--r--drivers/gpu/drm/amd/include/ivsrcid/nbio/irqsrcs_nbif_7_4.h42
-rw-r--r--drivers/gpu/drm/amd/include/kgd_kfd_interface.h13
-rw-r--r--drivers/gpu/drm/amd/include/kgd_pp_interface.h10
-rw-r--r--drivers/gpu/drm/amd/include/renoir_ip_offset.h34
-rw-r--r--drivers/gpu/drm/amd/include/vega10_enum.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/amd_powerplay.c45
-rw-r--r--drivers/gpu/drm/amd/powerplay/amdgpu_smu.c1142
-rw-r--r--drivers/gpu/drm/amd/powerplay/arcturus_ppt.c523
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/Makefile3
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/ci_baco.c195
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/ci_baco.h29
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/common_baco.c19
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/common_baco.h13
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/fiji_baco.c196
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/fiji_baco.h29
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c9
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/polaris_baco.c222
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/polaris_baco.h29
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_baco.c91
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_baco.h32
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c17
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/tonga_baco.c231
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/tonga_baco.h29
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c68
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.c23
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c41
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h370
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/arcturus_ppsmc.h3
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/hwmgr.h4
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_arcturus.h51
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu_types.h3
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h134
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_pptable.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h41
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/vega20_ppsmc.h3
-rw-r--r--drivers/gpu/drm/amd/powerplay/navi10_ppt.c543
-rw-r--r--drivers/gpu/drm/amd/powerplay/navi10_ppt.h11
-rw-r--r--drivers/gpu/drm/amd/powerplay/renoir_ppt.c483
-rw-r--r--drivers/gpu/drm/amd/powerplay/smu_internal.h204
-rw-r--r--drivers/gpu/drm/amd/powerplay/smu_v11_0.c370
-rw-r--r--drivers/gpu/drm/amd/powerplay/smu_v12_0.c153
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c4
-rw-r--r--drivers/gpu/drm/amd/powerplay/vega20_ppt.c134
-rw-r--r--drivers/gpu/drm/arc/arcpgu_drv.c16
-rw-r--r--drivers/gpu/drm/arc/arcpgu_hdmi.c1
-rw-r--r--drivers/gpu/drm/arm/display/Kconfig6
-rw-r--r--drivers/gpu/drm/arm/display/komeda/Makefile2
-rw-r--r--drivers/gpu/drm/arm/display/komeda/d71/d71_component.c221
-rw-r--r--drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c41
-rw-r--r--drivers/gpu/drm/arm/display/komeda/d71/d71_dev.h2
-rw-r--r--drivers/gpu/drm/arm/display/komeda/d71/d71_regs.h9
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_crtc.c105
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_dev.c77
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_dev.h20
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_drv.c30
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_event.c140
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_kms.c2
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_kms.h2
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h17
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c76
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c5
-rw-r--r--drivers/gpu/drm/arm/malidp_drv.c16
-rw-r--r--drivers/gpu/drm/arm/malidp_hw.c9
-rw-r--r--drivers/gpu/drm/arm/malidp_hw.h3
-rw-r--r--drivers/gpu/drm/arm/malidp_regs.h10
-rw-r--r--drivers/gpu/drm/ast/Kconfig2
-rw-r--r--drivers/gpu/drm/ast/ast_drv.c6
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h43
-rw-r--r--drivers/gpu/drm/ast/ast_main.c1
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c266
-rw-r--r--drivers/gpu/drm/ast/ast_ttm.c3
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c3
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c5
-rw-r--r--drivers/gpu/drm/bochs/Kconfig2
-rw-r--r--drivers/gpu/drm/bochs/bochs.h1
-rw-r--r--drivers/gpu/drm/bochs/bochs_drv.c7
-rw-r--r--drivers/gpu/drm/bochs/bochs_kms.c26
-rw-r--r--drivers/gpu/drm/bochs/bochs_mm.c3
-rw-r--r--drivers/gpu/drm/bridge/Kconfig3
-rw-r--r--drivers/gpu/drm/bridge/analogix-anx78xx.c110
-rw-r--r--drivers/gpu/drm/bridge/analogix-anx78xx.h17
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_core.c1
-rw-r--r--drivers/gpu/drm/bridge/cdns-dsi.c3
-rw-r--r--drivers/gpu/drm/bridge/dumb-vga-dac.c1
-rw-r--r--drivers/gpu/drm/bridge/lvds-encoder.c3
-rw-r--r--drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c1
-rw-r--r--drivers/gpu/drm/bridge/nxp-ptn3460.c1
-rw-r--r--drivers/gpu/drm/bridge/panel.c70
-rw-r--r--drivers/gpu/drm/bridge/parade-ps8622.c1
-rw-r--r--drivers/gpu/drm/bridge/sii902x.c1
-rw-r--r--drivers/gpu/drm/bridge/sii9234.c37
-rw-r--r--drivers/gpu/drm/bridge/sil-sii8620.c11
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.c4
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c21
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi.c155
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi.h39
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c10
-rw-r--r--drivers/gpu/drm/bridge/tc358764.c1
-rw-r--r--drivers/gpu/drm/bridge/tc358767.c66
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi86.c1
-rw-r--r--drivers/gpu/drm/bridge/ti-tfp410.c5
-rw-r--r--drivers/gpu/drm/cirrus/cirrus.c6
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.h247
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c18
-rw-r--r--drivers/gpu/drm/drm_atomic_uapi.c2
-rw-r--r--drivers/gpu/drm/drm_blend.c7
-rw-r--r--drivers/gpu/drm/drm_cache.c14
-rw-r--r--drivers/gpu/drm/drm_client_modeset.c3
-rw-r--r--drivers/gpu/drm/drm_connector.c142
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c23
-rw-r--r--drivers/gpu/drm/drm_crtc_helper_internal.h3
-rw-r--r--drivers/gpu/drm/drm_damage_helper.c8
-rw-r--r--drivers/gpu/drm/drm_debugfs_crc.c8
-rw-r--r--drivers/gpu/drm/drm_dp_cec.c29
-rw-r--r--drivers/gpu/drm/drm_dp_helper.c177
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c1807
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology_internal.h24
-rw-r--r--drivers/gpu/drm/drm_drv.c17
-rw-r--r--drivers/gpu/drm/drm_dsc.c23
-rw-r--r--drivers/gpu/drm/drm_edid.c222
-rw-r--r--drivers/gpu/drm/drm_edid_load.c2
-rw-r--r--drivers/gpu/drm/drm_encoder.c1
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c62
-rw-r--r--drivers/gpu/drm/drm_gem.c40
-rw-r--r--drivers/gpu/drm/drm_gem_shmem_helper.c31
-rw-r--r--drivers/gpu/drm/drm_gem_ttm_helper.c84
-rw-r--r--drivers/gpu/drm/drm_gem_vram_helper.c735
-rw-r--r--drivers/gpu/drm/drm_memory.c1
-rw-r--r--drivers/gpu/drm/drm_mipi_dbi.c11
-rw-r--r--drivers/gpu/drm/drm_mm.c36
-rw-r--r--drivers/gpu/drm/drm_mode_config.c2
-rw-r--r--drivers/gpu/drm/drm_of.c5
-rw-r--r--drivers/gpu/drm/drm_panel.c14
-rw-r--r--drivers/gpu/drm/drm_prime.c9
-rw-r--r--drivers/gpu/drm/drm_print.c60
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c4
-rw-r--r--drivers/gpu/drm/drm_simple_kms_helper.c3
-rw-r--r--drivers/gpu/drm/drm_syncobj.c38
-rw-r--r--drivers/gpu/drm/drm_trace.h14
-rw-r--r--drivers/gpu/drm/drm_vblank.c60
-rw-r--r--drivers/gpu/drm/drm_vram_helper_common.c8
-rw-r--r--drivers/gpu/drm/drm_vram_mm_helper.c297
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_buffer.c8
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_mic.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c32
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c4
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c1
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_display.c2
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_output.c2
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_crtc.c2
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/Kconfig3
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c14
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c6
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c3
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c1
-rw-r--r--drivers/gpu/drm/i2c/sil164_drv.c2
-rw-r--r--drivers/gpu/drm/i2c/tda9950.c12
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c10
-rw-r--r--drivers/gpu/drm/i810/i810_dma.c4
-rw-r--r--drivers/gpu/drm/i915/Kconfig18
-rw-r--r--drivers/gpu/drm/i915/Kconfig.debug147
-rw-r--r--drivers/gpu/drm/i915/Kconfig.profile49
-rw-r--r--drivers/gpu/drm/i915/Kconfig.unstable29
-rw-r--r--drivers/gpu/drm/i915/Makefile25
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic.c68
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic.h5
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic_plane.c58
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic_plane.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_audio.c46
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.c81
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.c1220
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.h13
-rw-r--r--drivers/gpu/drm/i915/display/intel_color.c550
-rw-r--r--drivers/gpu/drm/i915/display/intel_color.h7
-rw-r--r--drivers/gpu/drm/i915/display/intel_connector.c21
-rw-r--r--drivers/gpu/drm/i915/display/intel_crt.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c839
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c2380
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.h66
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.c554
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.h43
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_types.h63
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c509
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.h9
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c75
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.c412
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.h5
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsb.c332
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsb.h52
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_dvo.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev.c14
-rw-r--r--drivers/gpu/drm/i915/display/intel_frontbuffer.c19
-rw-r--r--drivers/gpu/drm/i915/display/intel_gmbus.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.c216
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.c297
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_lpe_audio.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_lvds.c10
-rw-r--r--drivers/gpu/drm/i915/display/intel_overlay.c32
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c441
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_sdvo.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_sdvo.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite.c549
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite.h8
-rw-r--r--drivers/gpu/drm/i915/display/intel_tc.c87
-rw-r--r--drivers/gpu/drm/i915/display/intel_tc.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_tv.c12
-rw-r--r--drivers/gpu/drm/i915/display/intel_vbt_defs.h55
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc.c74
-rw-r--r--drivers/gpu/drm/i915/display/intel_vga.c160
-rw-r--r--drivers/gpu/drm/i915/display/intel_vga.h18
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi.c8
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_client_blt.c9
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.c614
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.h61
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context_types.h22
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_domain.c56
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c89
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_internal.c20
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_lmem.c99
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_lmem.h37
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_mman.c84
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.c38
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.h52
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_blt.c13
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_types.h34
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pages.c48
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_phys.c5
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pm.c165
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pm.h3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_region.c174
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_region.h29
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shmem.c82
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shrinker.c124
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_stolen.c130
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_stolen.h3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_throttle.c4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_tiling.c42
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_userptr.c33
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c3
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/huge_pages.c579
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c30
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c214
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c704
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c306
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c354
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_phys.c2
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c33
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.h13
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/mock_context.c17
-rw-r--r--drivers/gpu/drm/i915/gt/intel_breadcrumbs.c19
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context.c25
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context.h1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context_types.h1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine.h231
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_cs.c246
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c234
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_heartbeat.h23
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_pm.c28
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_pool.c15
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_pool.h4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_types.h91
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_user.c18
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gpu_commands.h37
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.c160
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.h16
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_irq.c5
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm.c209
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm.h16
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_requests.c137
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_requests.h24
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_types.h36
-rw-r--r--drivers/gpu/drm/i915/gt/intel_hangcheck.c360
-rw-r--r--drivers/gpu/drm/i915/gt/intel_llc.c161
-rw-r--r--drivers/gpu/drm/i915/gt/intel_llc.h15
-rw-r--r--drivers/gpu/drm/i915/gt/intel_llc_types.h13
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.c1500
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.h39
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc_reg.h66
-rw-r--r--drivers/gpu/drm/i915/gt/intel_mocs.c277
-rw-r--r--drivers/gpu/drm/i915/gt/intel_mocs.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rc6.c787
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rc6.h28
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rc6_types.h29
-rw-r--r--drivers/gpu/drm/i915/gt/intel_renderstate.c1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.c172
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.h14
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset_types.h6
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring.c323
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring.h131
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring_submission.c (renamed from drivers/gpu/drm/i915/gt/intel_ringbuffer.c)404
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring_types.h51
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps.c1872
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps.h38
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps_types.h93
-rw-r--r--drivers/gpu/drm/i915/gt/intel_sseu.c37
-rw-r--r--drivers/gpu/drm/i915/gt/intel_sseu.h37
-rw-r--r--drivers/gpu/drm/i915/gt/intel_timeline.c52
-rw-r--r--drivers/gpu/drm/i915/gt/intel_timeline_types.h10
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.c67
-rw-r--r--drivers/gpu/drm/i915/gt/mock_engine.c7
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_context.c71
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c350
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_engine_pm.c2
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_gt_pm.c60
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_hangcheck.c207
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_llc.c80
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_llc.h14
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_lrc.c1895
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_reset.c16
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_timeline.c138
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_workarounds.c270
-rw-r--r--drivers/gpu/drm/i915/gt/selftests/mock_timeline.c2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.c185
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.h2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_log.c56
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_log.h4
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h3
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c21
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc.c41
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c15
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc.c38
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c76
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw_abi.h11
-rw-r--r--drivers/gpu/drm/i915/gt/uc/selftest_guc.c46
-rw-r--r--drivers/gpu/drm/i915/gvt/aperture_gm.c14
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/dmabuf.c3
-rw-r--r--drivers/gpu/drm/i915/gvt/execlist.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c23
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c17
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.c1
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c48
-rw-r--r--drivers/gpu/drm/i915/i915_active.c389
-rw-r--r--drivers/gpu/drm/i915/i915_active.h330
-rw-r--r--drivers/gpu/drm/i915/i915_active_types.h34
-rw-r--r--drivers/gpu/drm/i915/i915_buddy.c1
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c522
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c289
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h622
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c406
-rw-r--r--drivers/gpu/drm/i915/i915_gem.h16
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c58
-rw-r--r--drivers/gpu/drm/i915/i915_gem_fence_reg.c104
-rw-r--r--drivers/gpu/drm/i915/i915_gem_fence_reg.h7
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c413
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h77
-rw-r--r--drivers/gpu/drm/i915/i915_getparam.c8
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c150
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.h8
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c839
-rw-r--r--drivers/gpu/drm/i915/i915_irq.h16
-rw-r--r--drivers/gpu/drm/i915/i915_params.c12
-rw-r--r--drivers/gpu/drm/i915/i915_params.h5
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c80
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c1860
-rw-r--r--drivers/gpu/drm/i915/i915_perf.h32
-rw-r--r--drivers/gpu/drm/i915/i915_perf_types.h435
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.c309
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.h8
-rw-r--r--drivers/gpu/drm/i915/i915_priolist_types.h7
-rw-r--r--drivers/gpu/drm/i915/i915_query.c306
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h876
-rw-r--r--drivers/gpu/drm/i915/i915_request.c235
-rw-r--r--drivers/gpu/drm/i915/i915_request.h40
-rw-r--r--drivers/gpu/drm/i915/i915_scatterlist.h8
-rw-r--r--drivers/gpu/drm/i915/i915_scheduler.c5
-rw-r--r--drivers/gpu/drm/i915/i915_scheduler.h18
-rw-r--r--drivers/gpu/drm/i915/i915_scheduler_types.h9
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c11
-rw-r--r--drivers/gpu/drm/i915/i915_switcheroo.c67
-rw-r--r--drivers/gpu/drm/i915/i915_switcheroo.h14
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c162
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h40
-rw-r--r--drivers/gpu/drm/i915/i915_utils.c43
-rw-r--r--drivers/gpu/drm/i915/i915_utils.h34
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c639
-rw-r--r--drivers/gpu/drm/i915/i915_vma.h134
-rw-r--r--drivers/gpu/drm/i915/intel_csr.c4
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c230
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.h8
-rw-r--r--drivers/gpu/drm/i915/intel_memory_region.c272
-rw-r--r--drivers/gpu/drm/i915/intel_memory_region.h129
-rw-r--r--drivers/gpu/drm/i915/intel_pch.c14
-rw-r--r--drivers/gpu/drm/i915/intel_pch.h6
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c3167
-rw-r--r--drivers/gpu/drm/i915/intel_pm.h30
-rw-r--r--drivers/gpu/drm/i915/intel_region_lmem.c132
-rw-r--r--drivers/gpu/drm/i915/intel_region_lmem.h16
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c1
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c94
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.h20
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_tgl.c121
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_tgl.h16
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_active.c90
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_buddy.c4
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem.c46
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_evict.c143
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_gtt.c404
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_live_selftests.h5
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_mock_selftests.h1
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_perf.c217
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_random.c20
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_random.h4
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_request.c502
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_selftest.c23
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_vma.c19
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_flush_test.c33
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_flush_test.h2
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_live_test.c19
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_reset.c4
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_spinner.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_memory_region.c624
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_uncore.c56
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_device.c53
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gtt.c8
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_region.c60
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_region.h16
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_uncore.c5
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_uncore.h3
-rw-r--r--drivers/gpu/drm/imx/imx-ldb.c1
-rw-r--r--drivers/gpu/drm/imx/parallel-display.c1
-rw-r--r--drivers/gpu/drm/ingenic/ingenic-drm.c5
-rw-r--r--drivers/gpu/drm/lima/Kconfig1
-rw-r--r--drivers/gpu/drm/lima/Makefile4
-rw-r--r--drivers/gpu/drm/lima/lima_device.c5
-rw-r--r--drivers/gpu/drm/lima/lima_drv.c22
-rw-r--r--drivers/gpu/drm/lima/lima_gem.c195
-rw-r--r--drivers/gpu/drm/lima/lima_gem.h32
-rw-r--r--drivers/gpu/drm/lima/lima_gem_prime.c46
-rw-r--r--drivers/gpu/drm/lima/lima_gem_prime.h13
-rw-r--r--drivers/gpu/drm/lima/lima_mmu.c1
-rw-r--r--drivers/gpu/drm/lima/lima_object.c119
-rw-r--r--drivers/gpu/drm/lima/lima_object.h35
-rw-r--r--drivers/gpu/drm/lima/lima_sched.c6
-rw-r--r--drivers/gpu/drm/lima/lima_vm.c87
-rw-r--r--drivers/gpu/drm/mcde/mcde_drv.c3
-rw-r--r--drivers/gpu/drm/mcde/mcde_dsi.c4
-rw-r--r--drivers/gpu/drm/mediatek/Makefile2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_ovl.c111
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dpi.c1
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_crtc.c136
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_crtc.h2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp.c128
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c67
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h43
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c3
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_gem.c4
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_plane.c24
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_plane.h4
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dsi.c234
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi.c1
-rw-r--r--drivers/gpu/drm/mediatek/mtk_mipi_tx.c338
-rw-r--r--drivers/gpu/drm/mediatek/mtk_mipi_tx.h49
-rw-r--r--drivers/gpu/drm/mediatek/mtk_mt8173_mipi_tx.c288
-rw-r--r--drivers/gpu/drm/mediatek/mtk_mt8183_mipi_tx.c149
-rw-r--r--drivers/gpu/drm/meson/meson_drv.c32
-rw-r--r--drivers/gpu/drm/meson/meson_dw_hdmi.c115
-rw-r--r--drivers/gpu/drm/meson/meson_vclk.c9
-rw-r--r--drivers/gpu/drm/mgag200/Kconfig2
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_cursor.c327
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.c7
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.h23
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_main.c20
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c17
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_ttm.c7
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c24
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h4
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.h1
-rw-r--r--drivers/gpu/drm/msm/edp/edp.c4
-rw-r--r--drivers/gpu/drm/msm/edp/edp.h1
-rw-r--r--drivers/gpu/drm/msm/edp/edp_ctrl.c70
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.c4
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.h2
-rw-r--r--drivers/gpu/drm/msm/msm_debugfs.c6
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_crtc.c20
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_drv.c46
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_drv.h4
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_out.c26
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/disp.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c40
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c43
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c19
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_svm.c230
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c1
-rw-r--r--drivers/gpu/drm/omapdrm/dss/Makefile2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/core.c55
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dispc.c46
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dsi.c3
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss.c37
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4_core.c9
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi5_core.c129
-rw-r--r--drivers/gpu/drm/omapdrm/dss/output.c1
-rw-r--r--drivers/gpu/drm/omapdrm/omap_dmm_tiler.h2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c1
-rw-r--r--drivers/gpu/drm/omapdrm/omap_encoder.c1
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fb.c9
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.c137
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c2
-rw-r--r--drivers/gpu/drm/panel/panel-arm-versatile.c5
-rw-r--r--drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c5
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9322.c5
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9881c.c5
-rw-r--r--drivers/gpu/drm/panel/panel-innolux-p079zca.c5
-rw-r--r--drivers/gpu/drm/panel/panel-jdi-lt070me05000.c5
-rw-r--r--drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c5
-rw-r--r--drivers/gpu/drm/panel/panel-lg-lb035q02.c5
-rw-r--r--drivers/gpu/drm/panel/panel-lg-lg4573.c5
-rw-r--r--drivers/gpu/drm/panel/panel-lvds.c26
-rw-r--r--drivers/gpu/drm/panel/panel-nec-nl8048hl11.c5
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt39016.c5
-rw-r--r--drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c5
-rw-r--r--drivers/gpu/drm/panel/panel-orisetech-otm8009a.c5
-rw-r--r--drivers/gpu/drm/panel/panel-osd-osd101t2587-53ts.c5
-rw-r--r--drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c5
-rw-r--r--drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c4
-rw-r--r--drivers/gpu/drm/panel/panel-raydium-rm67191.c5
-rw-r--r--drivers/gpu/drm/panel/panel-raydium-rm68200.c5
-rw-r--r--drivers/gpu/drm/panel/panel-rocktech-jh057n00900.c5
-rw-r--r--drivers/gpu/drm/panel/panel-ronbo-rb070d30.c5
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-ld9040.c5
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6d16d0.c5
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c5
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c5
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e63m0.c5
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c5
-rw-r--r--drivers/gpu/drm/panel/panel-seiko-43wvf1g.c5
-rw-r--r--drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c5
-rw-r--r--drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c5
-rw-r--r--drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c5
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c29
-rw-r--r--drivers/gpu/drm/panel/panel-sitronix-st7701.c5
-rw-r--r--drivers/gpu/drm/panel/panel-sitronix-st7789v.c4
-rw-r--r--drivers/gpu/drm/panel/panel-sony-acx565akm.c5
-rw-r--r--drivers/gpu/drm/panel/panel-tpo-td028ttec1.c5
-rw-r--r--drivers/gpu/drm/panel/panel-tpo-td043mtea1.c5
-rw-r--r--drivers/gpu/drm/panel/panel-tpo-tpg110.c5
-rw-r--r--drivers/gpu/drm/panel/panel-truly-nt35597.c5
-rw-r--r--drivers/gpu/drm/panfrost/TODO2
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_devfreq.c124
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_devfreq.h3
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_device.h14
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_drv.c2
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem.c2
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_issues.h81
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_job.c17
-rw-r--r--drivers/gpu/drm/pl111/pl111_display.c4
-rw-r--r--drivers/gpu/drm/pl111/pl111_drv.c4
-rw-r--r--drivers/gpu/drm/qxl/Kconfig1
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.c20
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h4
-rw-r--r--drivers/gpu/drm/qxl/qxl_object.c32
-rw-r--r--drivers/gpu/drm/qxl/qxl_release.c11
-rw-r--r--drivers/gpu/drm/qxl/qxl_ttm.c62
-rw-r--r--drivers/gpu/drm/radeon/cik.c106
-rw-r--r--drivers/gpu/drm/radeon/r600.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon.h9
-rw-r--r--drivers/gpu/drm/radeon/radeon_audio.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c27
-rw-r--r--drivers/gpu/drm/radeon/radeon_dp_mst.c24
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_mn.c218
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c1
-rw-r--r--drivers/gpu/drm/radeon/si.c101
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.c30
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_encoder.c5
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_kms.c6
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_lvds.c29
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-core.c12
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-core.h3
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-reg.c19
-rw-r--r--drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c2
-rw-r--r--drivers/gpu/drm/rockchip/rk3066_hdmi.c8
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_gem.c2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c169
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.h10
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_lvds.c1
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_rgb.c4
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_vop_reg.c48
-rw-r--r--drivers/gpu/drm/scheduler/sched_entity.c12
-rw-r--r--drivers/gpu/drm/scheduler/sched_fence.c4
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c66
-rw-r--r--drivers/gpu/drm/selftests/Makefile2
-rw-r--r--drivers/gpu/drm/selftests/drm_modeset_selftests.h2
-rw-r--r--drivers/gpu/drm/selftests/test-drm_dp_mst_helper.c238
-rw-r--r--drivers/gpu/drm/selftests/test-drm_framebuffer.c2
-rw-r--r--drivers/gpu/drm/selftests/test-drm_mm.c14
-rw-r--r--drivers/gpu/drm/selftests/test-drm_modeset_common.h2
-rw-r--r--drivers/gpu/drm/sti/sti_cursor.c2
-rw-r--r--drivers/gpu/drm/sti/sti_dvo.c3
-rw-r--r--drivers/gpu/drm/sti/sti_gdp.c2
-rw-r--r--drivers/gpu/drm/sti/sti_hda.c3
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.c26
-rw-r--r--drivers/gpu/drm/sti/sti_tvout.c10
-rw-r--r--drivers/gpu/drm/sti/sti_vtg.c2
-rw-r--r--drivers/gpu/drm/stm/dw_mipi_dsi-stm.c5
-rw-r--r--drivers/gpu/drm/stm/ltdc.c39
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c6
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_lvds.c1
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_rgb.c1
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.c1
-rw-r--r--drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c35
-rw-r--r--drivers/gpu/drm/sun4i/sun6i_mipi_dsi.h1
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c2
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h1
-rw-r--r--drivers/gpu/drm/tegra/Kconfig2
-rw-r--r--drivers/gpu/drm/tegra/Makefile1
-rw-r--r--drivers/gpu/drm/tegra/dc.c30
-rw-r--r--drivers/gpu/drm/tegra/dc.h2
-rw-r--r--drivers/gpu/drm/tegra/dp.c876
-rw-r--r--drivers/gpu/drm/tegra/dp.h177
-rw-r--r--drivers/gpu/drm/tegra/dpaux.c208
-rw-r--r--drivers/gpu/drm/tegra/drm.c417
-rw-r--r--drivers/gpu/drm/tegra/drm.h13
-rw-r--r--drivers/gpu/drm/tegra/falcon.c64
-rw-r--r--drivers/gpu/drm/tegra/falcon.h16
-rw-r--r--drivers/gpu/drm/tegra/fb.c4
-rw-r--r--drivers/gpu/drm/tegra/gem.c81
-rw-r--r--drivers/gpu/drm/tegra/gem.h2
-rw-r--r--drivers/gpu/drm/tegra/gr2d.c12
-rw-r--r--drivers/gpu/drm/tegra/gr3d.c12
-rw-r--r--drivers/gpu/drm/tegra/hub.c6
-rw-r--r--drivers/gpu/drm/tegra/output.c28
-rw-r--r--drivers/gpu/drm/tegra/plane.c104
-rw-r--r--drivers/gpu/drm/tegra/plane.h8
-rw-r--r--drivers/gpu/drm/tegra/sor.c1704
-rw-r--r--drivers/gpu/drm/tegra/sor.h3
-rw-r--r--drivers/gpu/drm/tegra/vic.c138
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_external.c5
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_plane.c2
-rw-r--r--drivers/gpu/drm/tiny/gm12u320.c2
-rw-r--r--drivers/gpu/drm/ttm/Makefile4
-rw-r--r--drivers/gpu/drm/ttm/ttm_agp_backend.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c190
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c27
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c243
-rw-r--r--drivers/gpu/drm/ttm/ttm_execbuf_util.c57
-rw-r--r--drivers/gpu/drm/ttm/ttm_memory.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c4
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc_dma.c7
-rw-r--r--drivers/gpu/drm/tve200/tve200_drv.c4
-rw-r--r--drivers/gpu/drm/udl/udl_connector.c8
-rw-r--r--drivers/gpu/drm/v3d/v3d_bo.c2
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.c5
-rw-r--r--drivers/gpu/drm/v3d/v3d_gem.c55
-rw-r--r--drivers/gpu/drm/vboxvideo/Kconfig2
-rw-r--r--drivers/gpu/drm/vboxvideo/Makefile2
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_drv.c19
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_drv.h27
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_fb.c149
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_main.c119
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_mode.c138
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_ttm.c3
-rw-r--r--drivers/gpu/drm/vc4/vc4_crtc.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_dpi.c3
-rw-r--r--drivers/gpu/drm/vc4/vc4_dsi.c5
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c18
-rw-r--r--drivers/gpu/drm/vc4/vc4_hvs.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c4
-rw-r--r--drivers/gpu/drm/virtio/Kconfig2
-rw-r--r--drivers/gpu/drm/virtio/Makefile2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.c22
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.h135
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_fence.c4
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_gem.c183
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ioctl.c228
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_kms.c24
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_object.c270
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_plane.c61
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_prime.c34
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ttm.c305
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_vq.c227
-rw-r--r--drivers/gpu/drm/vkms/vkms_crtc.c9
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.c15
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.h6
-rw-r--r--drivers/gpu/drm/vkms/vkms_gem.c27
-rw-r--r--drivers/gpu/drm/vmwgfx/Kconfig1
-rw-r--r--drivers/gpu/drm/vmwgfx/Makefile2
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h233
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_bo.c27
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h48
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c488
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c196
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h13
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c397
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c15
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_validation.c77
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_validation.h18
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front_kms.c7
-rw-r--r--drivers/gpu/host1x/Kconfig2
-rw-r--r--drivers/gpu/host1x/bus.c2
-rw-r--r--drivers/gpu/host1x/cdma.c6
-rw-r--r--drivers/gpu/host1x/channel.c13
-rw-r--r--drivers/gpu/host1x/channel.h1
-rw-r--r--drivers/gpu/host1x/dev.c236
-rw-r--r--drivers/gpu/host1x/dev.h3
-rw-r--r--drivers/gpu/host1x/intr.c1
-rw-r--r--drivers/gpu/host1x/job.c91
-rw-r--r--drivers/gpu/host1x/job.h4
-rw-r--r--drivers/greybus/connection.c3
-rw-r--r--drivers/hid/Kconfig1
-rw-r--r--drivers/hid/Makefile1
-rw-r--r--drivers/hid/hid-core.c55
-rw-r--r--drivers/hid/hid-google-hammer.c146
-rw-r--r--drivers/hid/hid-hyperv.c34
-rw-r--r--drivers/hid/hid-ids.h6
-rw-r--r--drivers/hid/hid-lg-g15.c899
-rw-r--r--drivers/hid/hid-logitech-hidpp.c3
-rw-r--r--drivers/hid/hid-quirks.c8
-rw-r--r--drivers/hid/hid-rmi.c3
-rw-r--r--drivers/hid/hidraw.c14
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-core.c16
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/hbm.c2
-rw-r--r--drivers/hid/usbhid/hiddev.c11
-rw-r--r--drivers/hv/Makefile1
-rw-r--r--drivers/hv/connection.c87
-rw-r--r--drivers/hv/hv_balloon.c116
-rw-r--r--drivers/hv/hv_debugfs.c178
-rw-r--r--drivers/hv/hv_fcopy.c3
-rw-r--r--drivers/hv/hv_kvp.c3
-rw-r--r--drivers/hv/hv_snapshot.c3
-rw-r--r--drivers/hv/hv_util.c13
-rw-r--r--drivers/hv/hyperv_vmbus.h31
-rw-r--r--drivers/hv/ring_buffer.c2
-rw-r--r--drivers/hv/vmbus_drv.c27
-rw-r--r--drivers/hwmon/Kconfig41
-rw-r--r--drivers/hwmon/Makefile4
-rw-r--r--drivers/hwmon/ab8500.c65
-rw-r--r--drivers/hwmon/abituguru.c2
-rw-r--r--drivers/hwmon/applesmc.c38
-rw-r--r--drivers/hwmon/aspeed-pwm-tacho.c7
-rw-r--r--drivers/hwmon/dell-smm-hwmon.c115
-rw-r--r--drivers/hwmon/fschmd.c1
-rw-r--r--drivers/hwmon/ina3221.c163
-rw-r--r--drivers/hwmon/ltc2947-core.c1183
-rw-r--r--drivers/hwmon/ltc2947-i2c.c49
-rw-r--r--drivers/hwmon/ltc2947-spi.c50
-rw-r--r--drivers/hwmon/ltc2947.h12
-rw-r--r--drivers/hwmon/pmbus/Kconfig9
-rw-r--r--drivers/hwmon/pmbus/Makefile1
-rw-r--r--drivers/hwmon/pmbus/bel-pfe.c131
-rw-r--r--drivers/hwmon/pmbus/ibm-cffps.c74
-rw-r--r--drivers/hwmon/tmp421.c3
-rw-r--r--drivers/hwmon/tmp513.c772
-rw-r--r--drivers/hwmon/w83793.c3
-rw-r--r--drivers/hwspinlock/hwspinlock_core.c16
-rw-r--r--drivers/hwspinlock/sprd_hwspinlock.c48
-rw-r--r--drivers/hwspinlock/u8500_hsem.c53
-rw-r--r--drivers/hwtracing/coresight/Kconfig1
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x-sysfs.c312
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x.c351
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x.h81
-rw-r--r--drivers/hwtracing/coresight/coresight-funnel.c37
-rw-r--r--drivers/hwtracing/coresight/coresight-replicator.c36
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-etf.c26
-rw-r--r--drivers/hwtracing/coresight/coresight.c51
-rw-r--r--drivers/hwtracing/intel_th/core.c8
-rw-r--r--drivers/hwtracing/intel_th/pci.c10
-rw-r--r--drivers/hwtracing/stm/core.c12
-rw-r--r--drivers/hwtracing/stm/policy.c4
-rw-r--r--drivers/i2c/busses/Kconfig24
-rw-r--r--drivers/i2c/busses/i2c-aspeed.c4
-rw-r--r--drivers/i2c/busses/i2c-at91-core.c38
-rw-r--r--drivers/i2c/busses/i2c-at91-master.c53
-rw-r--r--drivers/i2c/busses/i2c-at91.h13
-rw-r--r--drivers/i2c/busses/i2c-bcm-iproc.c63
-rw-r--r--drivers/i2c/busses/i2c-cros-ec-tunnel.c15
-rw-r--r--drivers/i2c/busses/i2c-i801.c8
-rw-r--r--drivers/i2c/busses/i2c-icy.c9
-rw-r--r--drivers/i2c/busses/i2c-pxa.c75
-rw-r--r--drivers/i2c/busses/i2c-qup.c4
-rw-r--r--drivers/i2c/busses/i2c-rcar.c2
-rw-r--r--drivers/i2c/busses/i2c-sh_mobile.c2
-rw-r--r--drivers/i2c/busses/i2c-stm32.c16
-rw-r--r--drivers/i2c/busses/i2c-stm32f7.c32
-rw-r--r--drivers/i2c/busses/i2c-tegra.c4
-rw-r--r--drivers/i2c/busses/i2c-xiic.c2
-rw-r--r--drivers/i2c/i2c-core-base.c63
-rw-r--r--drivers/i2c/i2c-core-of.c1
-rw-r--r--drivers/i2c/i2c-smbus.c7
-rw-r--r--drivers/i2c/muxes/Kconfig18
-rw-r--r--drivers/ide/ide-tape.c27
-rw-r--r--drivers/iio/accel/cros_ec_accel_legacy.c6
-rw-r--r--drivers/iio/accel/st_accel_core.c1
-rw-r--r--drivers/iio/adc/Kconfig35
-rw-r--r--drivers/iio/adc/Makefile3
-rw-r--r--drivers/iio/adc/ab8500-gpadc.c1218
-rw-r--r--drivers/iio/adc/ad7292.c350
-rw-r--r--drivers/iio/adc/ad7949.c33
-rw-r--r--drivers/iio/adc/ad_sigma_delta.c3
-rw-r--r--drivers/iio/adc/aspeed_adc.c4
-rw-r--r--drivers/iio/adc/at91-sama5d2_adc.c4
-rw-r--r--drivers/iio/adc/bcm_iproc_adc.c2
-rw-r--r--drivers/iio/adc/cc10001_adc.c4
-rw-r--r--drivers/iio/adc/cpcap-adc.c2
-rw-r--r--drivers/iio/adc/dln2-adc.c20
-rw-r--r--drivers/iio/adc/exynos_adc.c6
-rw-r--r--drivers/iio/adc/hx711.c22
-rw-r--r--drivers/iio/adc/ingenic-adc.c153
-rw-r--r--drivers/iio/adc/intel_mrfld_adc.c262
-rw-r--r--drivers/iio/adc/lpc18xx_adc.c4
-rw-r--r--drivers/iio/adc/max1027.c180
-rw-r--r--drivers/iio/adc/mcp320x.c2
-rw-r--r--drivers/iio/adc/men_z188_adc.c1
-rw-r--r--drivers/iio/adc/meson_saradc.c4
-rw-r--r--drivers/iio/adc/mt6577_auxadc.c4
-rw-r--r--drivers/iio/adc/npcm_adc.c4
-rw-r--r--drivers/iio/adc/rcar-gyroadc.c4
-rw-r--r--drivers/iio/adc/sc27xx_adc.c16
-rw-r--r--drivers/iio/adc/spear_adc.c4
-rw-r--r--drivers/iio/adc/stm32-adc-core.c27
-rw-r--r--drivers/iio/adc/stm32-adc.c21
-rw-r--r--drivers/iio/adc/stmpe-adc.c2
-rw-r--r--drivers/iio/adc/twl4030-madc.c18
-rw-r--r--drivers/iio/adc/vf610_adc.c4
-rw-r--r--drivers/iio/adc/xilinx-xadc-core.c4
-rw-r--r--drivers/iio/chemical/atlas-ph-sensor.c8
-rw-r--r--drivers/iio/chemical/sgp30.c2
-rw-r--r--drivers/iio/chemical/sps30.c2
-rw-r--r--drivers/iio/common/cros_ec_sensors/Kconfig2
-rw-r--r--drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c6
-rw-r--r--drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c4
-rw-r--r--drivers/iio/dac/Kconfig4
-rw-r--r--drivers/iio/dac/ad5446.c6
-rw-r--r--drivers/iio/dac/ad7303.c13
-rw-r--r--drivers/iio/dac/lpc18xx_dac.c4
-rw-r--r--drivers/iio/dac/stm32-dac-core.c138
-rw-r--r--drivers/iio/dac/stm32-dac.c94
-rw-r--r--drivers/iio/dac/vf610_dac.c4
-rw-r--r--drivers/iio/gyro/adis16080.c8
-rw-r--r--drivers/iio/gyro/adis16130.c2
-rw-r--r--drivers/iio/gyro/adis16136.c24
-rw-r--r--drivers/iio/gyro/itg3200_core.c2
-rw-r--r--drivers/iio/gyro/mpu3050-core.c2
-rw-r--r--drivers/iio/gyro/st_gyro_core.c1
-rw-r--r--drivers/iio/humidity/hdc100x.c19
-rw-r--r--drivers/iio/imu/Kconfig27
-rw-r--r--drivers/iio/imu/Makefile5
-rw-r--r--drivers/iio/imu/adis.c5
-rw-r--r--drivers/iio/imu/adis16400.c22
-rw-r--r--drivers/iio/imu/adis16460.c8
-rw-r--r--drivers/iio/imu/adis16480.c116
-rw-r--r--drivers/iio/imu/fxos8700.h10
-rw-r--r--drivers/iio/imu/fxos8700_core.c649
-rw-r--r--drivers/iio/imu/fxos8700_i2c.c71
-rw-r--r--drivers/iio/imu/fxos8700_spi.c59
-rw-r--r--drivers/iio/imu/inv_mpu6050/Makefile7
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_aux.c204
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_aux.h19
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_core.c195
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c60
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h74
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_magn.c356
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_magn.h36
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c11
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c86
-rw-r--r--drivers/iio/imu/st_lsm6dsx/Kconfig3
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h87
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c109
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c1056
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c10
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c45
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_spi.c10
-rw-r--r--drivers/iio/industrialio-core.c19
-rw-r--r--drivers/iio/light/Kconfig22
-rw-r--r--drivers/iio/light/Makefile2
-rw-r--r--drivers/iio/light/adux1020.c849
-rw-r--r--drivers/iio/light/bh1750.c4
-rw-r--r--drivers/iio/light/cm36651.c2
-rw-r--r--drivers/iio/light/cros_ec_light_prox.c6
-rw-r--r--drivers/iio/light/tcs3414.c30
-rw-r--r--drivers/iio/light/veml6030.c908
-rw-r--r--drivers/iio/magnetometer/st_magn_core.c1
-rw-r--r--drivers/iio/pressure/bmp280-core.c130
-rw-r--r--drivers/iio/pressure/bmp280-i2c.c6
-rw-r--r--drivers/iio/pressure/bmp280-spi.c6
-rw-r--r--drivers/iio/pressure/bmp280.h1
-rw-r--r--drivers/iio/pressure/cros_ec_baro.c3
-rw-r--r--drivers/iio/pressure/st_pressure_core.c1
-rw-r--r--drivers/iio/pressure/zpa2326.c16
-rw-r--r--drivers/iio/proximity/pulsedlight-lidar-lite-v2.c5
-rw-r--r--drivers/iio/proximity/sx9500.c16
-rw-r--r--drivers/iio/temperature/Kconfig11
-rw-r--r--drivers/iio/temperature/Makefile1
-rw-r--r--drivers/iio/temperature/ltc2983.c1557
-rw-r--r--drivers/iio/temperature/max31856.c2
-rw-r--r--drivers/iio/temperature/maxim_thermocouple.c2
-rw-r--r--drivers/infiniband/Kconfig1
-rw-r--r--drivers/infiniband/core/Makefile2
-rw-r--r--drivers/infiniband/core/cache.c8
-rw-r--r--drivers/infiniband/core/cm.c66
-rw-r--r--drivers/infiniband/core/cm_msgs.h32
-rw-r--r--drivers/infiniband/core/cma.c107
-rw-r--r--drivers/infiniband/core/core_priv.h11
-rw-r--r--drivers/infiniband/core/counters.c40
-rw-r--r--drivers/infiniband/core/device.c51
-rw-r--r--drivers/infiniband/core/ib_core_uverbs.c335
-rw-r--r--drivers/infiniband/core/iwpm_util.h5
-rw-r--r--drivers/infiniband/core/mad.c31
-rw-r--r--drivers/infiniband/core/nldev.c141
-rw-r--r--drivers/infiniband/core/rdma_core.c1
-rw-r--r--drivers/infiniband/core/restrack.c20
-rw-r--r--drivers/infiniband/core/restrack.h1
-rw-r--r--drivers/infiniband/core/rw.c25
-rw-r--r--drivers/infiniband/core/sa_query.c2
-rw-r--r--drivers/infiniband/core/sysfs.c12
-rw-r--r--drivers/infiniband/core/umem.c12
-rw-r--r--drivers/infiniband/core/umem_odp.c341
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c2
-rw-r--r--drivers/infiniband/core/uverbs_ioctl.c3
-rw-r--r--drivers/infiniband/core/uverbs_main.c88
-rw-r--r--drivers/infiniband/core/verbs.c12
-rw-r--r--drivers/infiniband/hw/Makefile1
-rw-r--r--drivers/infiniband/hw/bnxt_re/Kconfig12
-rw-r--r--drivers/infiniband/hw/bnxt_re/bnxt_re.h1
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c28
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.h3
-rw-r--r--drivers/infiniband/hw/bnxt_re/main.c143
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.c5
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.h8
-rw-r--r--drivers/infiniband/hw/cxgb3/Kconfig19
-rw-r--r--drivers/infiniband/hw/cxgb3/Makefile7
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_hal.c1312
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_hal.h204
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_resource.c344
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_resource.h69
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_wr.h802
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch.c282
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch.h155
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.c2258
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.h233
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cq.c230
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_ev.c232
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_mem.c101
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c1321
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.h347
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_qp.c1082
-rw-r--r--drivers/infiniband/hw/cxgb3/tcb.h632
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c4
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/provider.c7
-rw-r--r--drivers/infiniband/hw/efa/efa.h13
-rw-r--r--drivers/infiniband/hw/efa/efa_admin_cmds_defs.h29
-rw-r--r--drivers/infiniband/hw/efa/efa_com.c5
-rw-r--r--drivers/infiniband/hw/efa/efa_com_cmd.c40
-rw-r--r--drivers/infiniband/hw/efa/efa_com_cmd.h19
-rw-r--r--drivers/infiniband/hw/efa/efa_main.c17
-rw-r--r--drivers/infiniband/hw/efa/efa_verbs.c370
-rw-r--r--drivers/infiniband/hw/hfi1/file_ops.c2
-rw-r--r--drivers/infiniband/hw/hfi1/hfi.h2
-rw-r--r--drivers/infiniband/hw/hfi1/mad.c17
-rw-r--r--drivers/infiniband/hw/hfi1/platform.c2
-rw-r--r--drivers/infiniband/hw/hfi1/user_exp_rcv.c146
-rw-r--r--drivers/infiniband/hw/hfi1/user_exp_rcv.h3
-rw-r--r--drivers/infiniband/hw/hfi1/verbs.h5
-rw-r--r--drivers/infiniband/hw/hns/Kconfig17
-rw-r--r--drivers/infiniband/hw/hns/Makefile8
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_ah.c14
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_alloc.c4
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cmd.h14
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cq.c300
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_db.c2
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_device.h55
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v1.c38
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c76
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.h4
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_main.c21
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_mr.c69
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_pd.c2
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_qp.c54
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_restrack.c10
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_srq.c86
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_cm.c2
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_verbs.c2
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c2
-rw-r--r--drivers/infiniband/hw/mlx4/doorbell.c2
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c30
-rw-r--r--drivers/infiniband/hw/mlx4/main.c18
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h8
-rw-r--r--drivers/infiniband/hw/mlx4/mr.c2
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c5
-rw-r--r--drivers/infiniband/hw/mlx4/srq.c2
-rw-r--r--drivers/infiniband/hw/mlx5/Makefile2
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c37
-rw-r--r--drivers/infiniband/hw/mlx5/devx.c25
-rw-r--r--drivers/infiniband/hw/mlx5/doorbell.c2
-rw-r--r--drivers/infiniband/hw/mlx5/flow.c29
-rw-r--r--drivers/infiniband/hw/mlx5/gsi.c2
-rw-r--r--drivers/infiniband/hw/mlx5/ib_virt.c24
-rw-r--r--drivers/infiniband/hw/mlx5/mad.c124
-rw-r--r--drivers/infiniband/hw/mlx5/main.c137
-rw-r--r--drivers/infiniband/hw/mlx5/mem.c199
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h80
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c180
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c1021
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c60
-rw-r--r--drivers/infiniband/hw/mlx5/restrack.c90
-rw-r--r--drivers/infiniband/hw/mlx5/srq.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_dev.h12
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mad.c74
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c4
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_ah.c33
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_ah.h11
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_main.c1
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_sli.h2
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_stats.c9
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_stats.h3
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c8
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.h2
-rw-r--r--drivers/infiniband/hw/qedr/main.c5
-rw-r--r--drivers/infiniband/hw/qedr/qedr.h72
-rw-r--r--drivers/infiniband/hw/qedr/qedr_iw_cm.c150
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c643
-rw-r--r--drivers/infiniband/hw/qedr/verbs.h12
-rw-r--r--drivers/infiniband/hw/qib/qib_iba6120.c1
-rw-r--r--drivers/infiniband/hw/qib/qib_mad.c38
-rw-r--r--drivers/infiniband/hw/qib/qib_sysfs.c6
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.h5
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c2
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h15
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c2
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c119
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c2
-rw-r--r--drivers/infiniband/sw/rdmavt/ah.c1
-rw-r--r--drivers/infiniband/sw/rdmavt/cq.c2
-rw-r--r--drivers/infiniband/sw/rdmavt/mr.c2
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.c30
-rw-r--r--drivers/infiniband/sw/rdmavt/vt.c3
-rw-r--r--drivers/infiniband/sw/rxe/rxe.c13
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mr.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_param.h13
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.c7
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.h1
-rw-r--r--drivers/infiniband/sw/siw/siw.h31
-rw-r--r--drivers/infiniband/sw/siw/siw_cm.c45
-rw-r--r--drivers/infiniband/sw/siw/siw_main.c35
-rw-r--r--drivers/infiniband/sw/siw/siw_verbs.c338
-rw-r--r--drivers/infiniband/sw/siw/siw_verbs.h1
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c10
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c5
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h34
-rw-r--r--drivers/infiniband/ulp/iser/iser_initiator.c5
-rw-r--r--drivers/infiniband/ulp/iser/iser_memory.c6
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c72
-rw-r--r--drivers/infiniband/ulp/opa_vnic/opa_vnic_internal.h8
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c47
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.h4
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c247
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.h58
-rw-r--r--drivers/input/input-poller.c9
-rw-r--r--drivers/input/joystick/Kconfig1
-rw-r--r--drivers/input/joystick/psxpad-spi.c64
-rw-r--r--drivers/input/keyboard/Kconfig28
-rw-r--r--drivers/input/keyboard/Makefile1
-rw-r--r--drivers/input/keyboard/adc-keys.c36
-rw-r--r--drivers/input/keyboard/adp5589-keys.c171
-rw-r--r--drivers/input/keyboard/clps711x-keypad.c70
-rw-r--r--drivers/input/keyboard/cros_ec_keyb.c6
-rw-r--r--drivers/input/keyboard/gpio_keys.c6
-rw-r--r--drivers/input/keyboard/gpio_keys_polled.c72
-rw-r--r--drivers/input/keyboard/imx_sc_key.c187
-rw-r--r--drivers/input/keyboard/jornada680_kbd.c37
-rw-r--r--drivers/input/keyboard/mpr121_touchkey.c69
-rw-r--r--drivers/input/misc/Kconfig15
-rw-r--r--drivers/input/misc/apanel.c153
-rw-r--r--drivers/input/misc/bma150.c190
-rw-r--r--drivers/input/misc/cobalt_btns.c73
-rw-r--r--drivers/input/misc/gpio_decoder.c42
-rw-r--r--drivers/input/misc/hp_sdc_rtc.c342
-rw-r--r--drivers/input/misc/kxtj9.c224
-rw-r--r--drivers/input/misc/mma8450.c101
-rw-r--r--drivers/input/misc/rb532_button.c48
-rw-r--r--drivers/input/misc/sgi_btns.c54
-rw-r--r--drivers/input/misc/wistron_btns.c51
-rw-r--r--drivers/input/mouse/Kconfig15
-rw-r--r--drivers/input/mouse/gpio_mouse.c45
-rw-r--r--drivers/input/mouse/synaptics.c1
-rw-r--r--drivers/input/rmi4/rmi_f54.c63
-rw-r--r--drivers/input/tablet/Kconfig20
-rw-r--r--drivers/input/touchscreen/Kconfig6
-rw-r--r--drivers/input/touchscreen/ar1021_i2c.c4
-rw-r--r--drivers/input/touchscreen/atmel_mxt_ts.c4
-rw-r--r--drivers/input/touchscreen/colibri-vf50-ts.c1
-rw-r--r--drivers/input/touchscreen/edt-ft5x06.c30
-rw-r--r--drivers/input/touchscreen/ili210x.c418
-rw-r--r--drivers/input/touchscreen/mms114.c3
-rw-r--r--drivers/input/touchscreen/pixcir_i2c_ts.c177
-rw-r--r--drivers/input/touchscreen/raspberrypi-ts.c38
-rw-r--r--drivers/input/touchscreen/s3c2410_ts.c1
-rw-r--r--drivers/input/touchscreen/st1232.c184
-rw-r--r--drivers/input/touchscreen/sur40.c92
-rw-r--r--drivers/input/touchscreen/tps6507x-ts.c36
-rw-r--r--drivers/input/touchscreen/ts4800-ts.c68
-rw-r--r--drivers/input/touchscreen/wacom_i2c.c1
-rw-r--r--drivers/interconnect/qcom/Kconfig9
-rw-r--r--drivers/interconnect/qcom/Makefile2
-rw-r--r--drivers/interconnect/qcom/msm8974.c784
-rw-r--r--drivers/iommu/Kconfig8
-rw-r--r--drivers/iommu/Makefile3
-rw-r--r--drivers/iommu/amd_iommu.c923
-rw-r--r--drivers/iommu/amd_iommu_types.h3
-rw-r--r--drivers/iommu/arm-smmu-impl.c5
-rw-r--r--drivers/iommu/arm-smmu-qcom.c51
-rw-r--r--drivers/iommu/arm-smmu-v3.c12
-rw-r--r--drivers/iommu/arm-smmu.c223
-rw-r--r--drivers/iommu/arm-smmu.h16
-rw-r--r--drivers/iommu/dma-iommu.c56
-rw-r--r--drivers/iommu/dmar.c5
-rw-r--r--drivers/iommu/exynos-iommu.c2
-rw-r--r--drivers/iommu/intel-iommu.c61
-rw-r--r--drivers/iommu/io-pgtable-arm-v7s.c15
-rw-r--r--drivers/iommu/io-pgtable-arm.c130
-rw-r--r--drivers/iommu/ioasid.c422
-rw-r--r--drivers/iommu/iommu.c73
-rw-r--r--drivers/iommu/ipmmu-vmsa.c223
-rw-r--r--drivers/iommu/msm_iommu.c2
-rw-r--r--drivers/iommu/mtk_iommu.c90
-rw-r--r--drivers/iommu/mtk_iommu.h2
-rw-r--r--drivers/iommu/mtk_iommu_v1.c2
-rw-r--r--drivers/iommu/of_iommu.c2
-rw-r--r--drivers/iommu/omap-iommu.c2
-rw-r--r--drivers/iommu/qcom_iommu.c10
-rw-r--r--drivers/iommu/rockchip-iommu.c11
-rw-r--r--drivers/iommu/s390-iommu.c2
-rw-r--r--drivers/iommu/tegra-gart.c2
-rw-r--r--drivers/iommu/tegra-smmu.c38
-rw-r--r--drivers/iommu/virtio-iommu.c5
-rw-r--r--drivers/irqchip/Kconfig8
-rw-r--r--drivers/irqchip/Makefile1
-rw-r--r--drivers/irqchip/irq-bcm7038-l1.c119
-rw-r--r--drivers/irqchip/irq-gic-v2m.c1
-rw-r--r--drivers/irqchip/irq-gic-v3-its-pci-msi.c1
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c302
-rw-r--r--drivers/irqchip/irq-gic-v3.c4
-rw-r--r--drivers/irqchip/irq-ingenic.c85
-rw-r--r--drivers/irqchip/irq-ls-extirq.c197
-rw-r--r--drivers/irqchip/irq-sifive-plic.c11
-rw-r--r--drivers/irqchip/irq-ti-sci-inta.c5
-rw-r--r--drivers/irqchip/irq-zevio.c2
-rw-r--r--drivers/irqchip/qcom-pdc.c149
-rw-r--r--drivers/isdn/capi/capi.c31
-rw-r--r--drivers/leds/Kconfig17
-rw-r--r--drivers/leds/Makefile1
-rw-r--r--drivers/leds/led-class-flash.c50
-rw-r--r--drivers/leds/led-class.c10
-rw-r--r--drivers/leds/led-triggers.c90
-rw-r--r--drivers/leds/leds-an30259a.c7
-rw-r--r--drivers/leds/leds-bcm6328.c7
-rw-r--r--drivers/leds/leds-bcm6358.c7
-rw-r--r--drivers/leds/leds-el15203000.c357
-rw-r--r--drivers/leds/leds-lm3601x.c4
-rw-r--r--drivers/leds/leds-lm3692x.c47
-rw-r--r--drivers/leds/leds-mlxreg.c4
-rw-r--r--drivers/leds/leds-pca9532.c14
-rw-r--r--drivers/leds/leds-tlc591xx.c90
-rw-r--r--drivers/leds/leds.h6
-rw-r--r--drivers/leds/trigger/ledtrig-netdev.c5
-rw-r--r--drivers/macintosh/ans-lcd.c3
-rw-r--r--drivers/mailbox/hi6220-mailbox.c1
-rw-r--r--drivers/mailbox/imx-mailbox.c74
-rw-r--r--drivers/mailbox/omap-mailbox.c2
-rw-r--r--drivers/mailbox/stm32-ipcc.c36
-rw-r--r--drivers/mailbox/tegra-hsp.c4
-rw-r--r--drivers/mcb/mcb-core.c28
-rw-r--r--drivers/mcb/mcb-lpc.c1
-rw-r--r--drivers/mcb/mcb-parse.c2
-rw-r--r--drivers/mcb/mcb-pci.c1
-rw-r--r--drivers/media/cec/cec-adap.c12
-rw-r--r--drivers/media/cec/cec-api.c20
-rw-r--r--drivers/media/cec/cec-core.c5
-rw-r--r--drivers/media/cec/cec-notifier.c5
-rw-r--r--drivers/media/cec/cec-pin.c10
-rw-r--r--drivers/media/common/siano/smscoreapi.c4
-rw-r--r--drivers/media/common/siano/smscoreapi.h4
-rw-r--r--drivers/media/common/siano/smsir.h2
-rw-r--r--drivers/media/common/videobuf2/videobuf2-v4l2.c12
-rw-r--r--drivers/media/dvb-frontends/cxd2820r_c.c2
-rw-r--r--drivers/media/dvb-frontends/cxd2820r_t.c2
-rw-r--r--drivers/media/dvb-frontends/cxd2820r_t2.c2
-rw-r--r--drivers/media/dvb-frontends/cxd2841er.c12
-rw-r--r--drivers/media/dvb-frontends/drx39xyj/drxj.c2
-rw-r--r--drivers/media/dvb-frontends/mb86a20s.c54
-rw-r--r--drivers/media/dvb-frontends/mt312.c13
-rw-r--r--drivers/media/dvb-frontends/si2168.h47
-rw-r--r--drivers/media/dvb-frontends/si2168_priv.h10
-rw-r--r--drivers/media/dvb-frontends/tc90522.c27
-rw-r--r--drivers/media/dvb-frontends/tc90522.h3
-rw-r--r--drivers/media/i2c/Kconfig80
-rw-r--r--drivers/media/i2c/Makefile2
-rw-r--r--drivers/media/i2c/ad5820.c35
-rw-r--r--drivers/media/i2c/adv7180.c6
-rw-r--r--drivers/media/i2c/adv7842.c4
-rw-r--r--drivers/media/i2c/bt819.c2
-rw-r--r--drivers/media/i2c/hi556.c1200
-rw-r--r--drivers/media/i2c/imx214.c9
-rw-r--r--drivers/media/i2c/imx290.c884
-rw-r--r--drivers/media/i2c/lm3646.c2
-rw-r--r--drivers/media/i2c/max2175.c4
-rw-r--r--drivers/media/i2c/max2175.h4
-rw-r--r--drivers/media/i2c/mt9m001.c2
-rw-r--r--drivers/media/i2c/ov2659.c139
-rw-r--r--drivers/media/i2c/ov5640.c33
-rw-r--r--drivers/media/i2c/ov5695.c2
-rw-r--r--drivers/media/i2c/ov6650.c266
-rw-r--r--drivers/media/i2c/saa711x_regs.h2
-rw-r--r--drivers/media/i2c/smiapp/smiapp-core.c326
-rw-r--r--drivers/media/i2c/smiapp/smiapp-reg.h36
-rw-r--r--drivers/media/i2c/smiapp/smiapp.h3
-rw-r--r--drivers/media/i2c/st-mipid02.c5
-rw-r--r--drivers/media/i2c/tda1997x_regs.h2
-rw-r--r--drivers/media/i2c/tvp5150_reg.h2
-rw-r--r--drivers/media/i2c/vpx3220.c2
-rw-r--r--drivers/media/mc/mc-device.c65
-rw-r--r--drivers/media/pci/cx18/cx18-ioctl.c2
-rw-r--r--drivers/media/pci/cx23885/cx23888-ir.c5
-rw-r--r--drivers/media/pci/cx88/cx88-cards.c43
-rw-r--r--drivers/media/pci/cx88/cx88-dvb.c1
-rw-r--r--drivers/media/pci/cx88/cx88-video.c11
-rw-r--r--drivers/media/pci/cx88/cx88.h1
-rw-r--r--drivers/media/pci/dm1105/dm1105.c1
-rw-r--r--drivers/media/pci/ivtv/ivtv-vbi.c2
-rw-r--r--drivers/media/pci/mantis/hopper_cards.c4
-rw-r--r--drivers/media/pci/mantis/mantis_cards.c4
-rw-r--r--drivers/media/pci/saa7164/saa7164-core.c166
-rw-r--r--drivers/media/pci/smipcie/smipcie.h1
-rw-r--r--drivers/media/pci/solo6x10/solo6x10-g723.c2
-rw-r--r--drivers/media/pci/tw686x/tw686x-audio.c2
-rw-r--r--drivers/media/platform/Kconfig17
-rw-r--r--drivers/media/platform/Makefile4
-rw-r--r--drivers/media/platform/am437x/am437x-vpfe.c861
-rw-r--r--drivers/media/platform/am437x/am437x-vpfe.h43
-rw-r--r--drivers/media/platform/am437x/am437x-vpfe_regs.h10
-rw-r--r--drivers/media/platform/aspeed-video.c58
-rw-r--r--drivers/media/platform/cadence/cdns-csi2rx.c2
-rw-r--r--drivers/media/platform/cec-gpio/cec-gpio.c41
-rw-r--r--drivers/media/platform/coda/coda-common.c13
-rw-r--r--drivers/media/platform/coda/coda.h1
-rw-r--r--drivers/media/platform/cros-ec-cec/cros-ec-cec.c6
-rw-r--r--drivers/media/platform/exynos4-is/fimc-isp-video.c2
-rw-r--r--drivers/media/platform/exynos4-is/media-dev.c7
-rw-r--r--drivers/media/platform/meson/ao-cec-g12a.c38
-rw-r--r--drivers/media/platform/meson/ao-cec.c32
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c20
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c8
-rw-r--r--drivers/media/platform/mtk-vcodec/vdec/vdec_h264_if.c1
-rw-r--r--drivers/media/platform/mtk-vcodec/vdec/vdec_vp8_if.c1
-rw-r--r--drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c1
-rw-r--r--drivers/media/platform/mtk-vcodec/vdec_vpu_if.c9
-rw-r--r--drivers/media/platform/mtk-vcodec/vdec_vpu_if.h9
-rw-r--r--drivers/media/platform/mtk-vpu/mtk_vpu.c4
-rw-r--r--drivers/media/platform/qcom/venus/core.c56
-rw-r--r--drivers/media/platform/qcom/venus/core.h30
-rw-r--r--drivers/media/platform/qcom/venus/helpers.c247
-rw-r--r--drivers/media/platform/qcom/venus/helpers.h3
-rw-r--r--drivers/media/platform/qcom/venus/hfi_venus.c6
-rw-r--r--drivers/media/platform/qcom/venus/vdec.c11
-rw-r--r--drivers/media/platform/qcom/venus/venc.c7
-rw-r--r--drivers/media/platform/rcar-vin/rcar-core.c17
-rw-r--r--drivers/media/platform/rcar-vin/rcar-csi2.c4
-rw-r--r--drivers/media/platform/rcar-vin/rcar-dma.c63
-rw-r--r--drivers/media/platform/rcar-vin/rcar-v4l2.c156
-rw-r--r--drivers/media/platform/rcar-vin/rcar-vin.h6
-rw-r--r--drivers/media/platform/rcar_drif.c1
-rw-r--r--drivers/media/platform/rcar_fdp1.c2
-rw-r--r--drivers/media/platform/s3c-camif/camif-regs.c2
-rw-r--r--drivers/media/platform/s5p-cec/s5p_cec.c4
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-core.c1
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-core.h2
-rw-r--r--drivers/media/platform/seco-cec/seco-cec.c5
-rw-r--r--drivers/media/platform/sti/bdisp/bdisp-v4l2.c3
-rw-r--r--drivers/media/platform/sti/c8sectpfe/c8sectpfe-debugfs.c26
-rw-r--r--drivers/media/platform/sti/cec/stih-cec.c4
-rw-r--r--drivers/media/platform/sunxi/Makefile1
-rw-r--r--drivers/media/platform/sunxi/sun8i-di/Makefile2
-rw-r--r--drivers/media/platform/sunxi/sun8i-di/sun8i-di.c1028
-rw-r--r--drivers/media/platform/sunxi/sun8i-di/sun8i-di.h237
-rw-r--r--drivers/media/platform/tegra-cec/tegra_cec.c4
-rw-r--r--drivers/media/platform/ti-vpe/csc.c254
-rw-r--r--drivers/media/platform/ti-vpe/csc.h4
-rw-r--r--drivers/media/platform/ti-vpe/vpdma.c13
-rw-r--r--drivers/media/platform/ti-vpe/vpdma.h2
-rw-r--r--drivers/media/platform/ti-vpe/vpdma_priv.h5
-rw-r--r--drivers/media/platform/ti-vpe/vpe.c396
-rw-r--r--drivers/media/platform/vicodec/vicodec-core.c4
-rw-r--r--drivers/media/platform/vim2m.c8
-rw-r--r--drivers/media/platform/vimc/Makefile7
-rw-r--r--drivers/media/platform/vimc/vimc-capture.c107
-rw-r--r--drivers/media/platform/vimc/vimc-common.c171
-rw-r--r--drivers/media/platform/vimc/vimc-common.h120
-rw-r--r--drivers/media/platform/vimc/vimc-core.c215
-rw-r--r--drivers/media/platform/vimc/vimc-debayer.c182
-rw-r--r--drivers/media/platform/vimc/vimc-scaler.c102
-rw-r--r--drivers/media/platform/vimc/vimc-sensor.c109
-rw-r--r--drivers/media/platform/vimc/vimc-streamer.c19
-rw-r--r--drivers/media/platform/vivid/Makefile2
-rw-r--r--drivers/media/platform/vivid/vivid-cec.c7
-rw-r--r--drivers/media/platform/vivid/vivid-core.c368
-rw-r--r--drivers/media/platform/vivid/vivid-core.h25
-rw-r--r--drivers/media/platform/vivid/vivid-ctrls.c89
-rw-r--r--drivers/media/platform/vivid/vivid-kthread-cap.c62
-rw-r--r--drivers/media/platform/vivid/vivid-kthread-out.c57
-rw-r--r--drivers/media/platform/vivid/vivid-meta-cap.c201
-rw-r--r--drivers/media/platform/vivid/vivid-meta-cap.h29
-rw-r--r--drivers/media/platform/vivid/vivid-meta-out.c174
-rw-r--r--drivers/media/platform/vivid/vivid-meta-out.h25
-rw-r--r--drivers/media/platform/vivid/vivid-sdr-cap.c8
-rw-r--r--drivers/media/platform/vivid/vivid-vid-cap.c8
-rw-r--r--drivers/media/platform/vivid/vivid-vid-out.c8
-rw-r--r--drivers/media/platform/xilinx/xilinx-dma.h2
-rw-r--r--drivers/media/platform/xilinx/xilinx-vip.h2
-rw-r--r--drivers/media/platform/xilinx/xilinx-vipp.h2
-rw-r--r--drivers/media/platform/xilinx/xilinx-vtc.h2
-rw-r--r--drivers/media/radio/radio-wl1273.c3
-rw-r--r--drivers/media/radio/si470x/radio-si470x-i2c.c2
-rw-r--r--drivers/media/rc/imon.c64
-rw-r--r--drivers/media/rc/imon_raw.c22
-rw-r--r--drivers/media/rc/ir-rcmm-decoder.c6
-rw-r--r--drivers/media/rc/ite-cir.c2
-rw-r--r--drivers/media/rc/keymaps/Makefile2
-rw-r--r--drivers/media/rc/keymaps/rc-beelink-gs1.c84
-rw-r--r--drivers/media/rc/keymaps/rc-vega-s9x.c54
-rw-r--r--drivers/media/rc/lirc_dev.c4
-rw-r--r--drivers/media/rc/mceusb.c141
-rw-r--r--drivers/media/rc/rc-core-priv.h2
-rw-r--r--drivers/media/rc/rc-main.c1
-rw-r--r--drivers/media/rc/tango-ir.c14
-rw-r--r--drivers/media/tuners/qm1d1c0042.c2
-rw-r--r--drivers/media/tuners/si2157.c6
-rw-r--r--drivers/media/tuners/si2157.h33
-rw-r--r--drivers/media/tuners/si2157_priv.h5
-rw-r--r--drivers/media/tuners/tuner-xc2028-types.h2
-rw-r--r--drivers/media/tuners/tuner-xc2028.h2
-rw-r--r--drivers/media/usb/b2c2/flexcop-usb.c13
-rw-r--r--drivers/media/usb/cx231xx/Kconfig2
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-417.c508
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-audio.c1
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-avcore.c2
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-cards.c6
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-vbi.c172
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-vbi.h2
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-video.c795
-rw-r--r--drivers/media/usb/cx231xx/cx231xx.h30
-rw-r--r--drivers/media/usb/dvb-usb-v2/af9035.c37
-rw-r--r--drivers/media/usb/dvb-usb-v2/dvb_usb.h2
-rw-r--r--drivers/media/usb/dvb-usb-v2/dvb_usb_core.c1
-rw-r--r--drivers/media/usb/dvb-usb-v2/dvbsky.c28
-rw-r--r--drivers/media/usb/dvb-usb-v2/gl861.c391
-rw-r--r--drivers/media/usb/dvb-usb-v2/gl861.h14
-rw-r--r--drivers/media/usb/dvb-usb-v2/rtl28xxu.c6
-rw-r--r--drivers/media/usb/dvb-usb/af9005.c5
-rw-r--r--drivers/media/usb/dvb-usb/cxusb.c3
-rw-r--r--drivers/media/usb/em28xx/em28xx-audio.c1
-rw-r--r--drivers/media/usb/em28xx/em28xx-cards.c20
-rw-r--r--drivers/media/usb/em28xx/em28xx-dvb.c30
-rw-r--r--drivers/media/usb/em28xx/em28xx-i2c.c4
-rw-r--r--drivers/media/usb/em28xx/em28xx.h1
-rw-r--r--drivers/media/usb/gspca/sq905.c3
-rw-r--r--drivers/media/usb/gspca/sq905c.c3
-rw-r--r--drivers/media/usb/gspca/stv0680.c2
-rw-r--r--drivers/media/usb/gspca/stv06xx/stv06xx_st6422.c2
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-v4l2.c9
-rw-r--r--drivers/media/usb/tm6000/tm6000-regs.h2
-rw-r--r--drivers/media/usb/tm6000/tm6000-usb-isoc.h2
-rw-r--r--drivers/media/usb/tm6000/tm6000.h2
-rw-r--r--drivers/media/usb/usbtv/usbtv-audio.c3
-rw-r--r--drivers/media/usb/usbvision/usbvision-video.c29
-rw-r--r--drivers/media/usb/uvc/uvc_debugfs.c10
-rw-r--r--drivers/media/usb/uvc/uvc_driver.c28
-rw-r--r--drivers/media/usb/uvc/uvc_metadata.c4
-rw-r--r--drivers/media/usb/uvc/uvc_queue.c2
-rw-r--r--drivers/media/usb/uvc/uvcvideo.h2
-rw-r--r--drivers/media/usb/zr364xx/zr364xx.c7
-rw-r--r--drivers/media/v4l2-core/v4l2-common.c128
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls.c199
-rw-r--r--drivers/media/v4l2-core/v4l2-dev.c112
-rw-r--r--drivers/media/v4l2-core/v4l2-dv-timings.c4
-rw-r--r--drivers/media/v4l2-core/v4l2-fwnode.c1
-rw-r--r--drivers/media/v4l2-core/v4l2-ioctl.c77
-rw-r--r--drivers/media/v4l2-core/v4l2-mem2mem.c190
-rw-r--r--drivers/media/v4l2-core/v4l2-subdev.c6
-rw-r--r--drivers/memory/mtk-smi.c4
-rw-r--r--drivers/memstick/core/Kconfig18
-rw-r--r--drivers/memstick/host/Kconfig4
-rw-r--r--drivers/memstick/host/jmb38x_ms.c14
-rw-r--r--drivers/mfd/Kconfig7
-rw-r--r--drivers/mfd/Makefile1
-rw-r--r--drivers/mfd/ab8500-core.c138
-rw-r--r--drivers/mfd/ab8500-debugfs.c715
-rw-r--r--drivers/mfd/ab8500-gpadc.c1075
-rw-r--r--drivers/mfd/arizona-core.c6
-rw-r--r--drivers/mfd/cros_ec_dev.c235
-rw-r--r--drivers/mfd/cs5535-mfd.c108
-rw-r--r--drivers/mfd/db8500-prcmu.c84
-rw-r--r--drivers/mfd/intel-lpss-pci.c41
-rw-r--r--drivers/mfd/intel-lpss.c2
-rw-r--r--drivers/mfd/intel_soc_pmic_crc.c5
-rw-r--r--drivers/mfd/ipaq-micro.c6
-rw-r--r--drivers/mfd/madera-core.c27
-rw-r--r--drivers/mfd/max77620.c5
-rw-r--r--drivers/mfd/mfd-core.c118
-rw-r--r--drivers/mfd/mt6397-core.c12
-rw-r--r--drivers/mfd/qcom-spmi-pmic.c4
-rw-r--r--drivers/mfd/rk808.c22
-rw-r--r--drivers/mfd/rohm-bd70528.c17
-rw-r--r--drivers/mfd/syscon.c1
-rw-r--r--drivers/mfd/ti_am335x_tscadc.c2
-rw-r--r--drivers/mfd/wm8998-tables.c12
-rw-r--r--drivers/misc/Kconfig17
-rw-r--r--drivers/misc/atmel_tclib.c4
-rw-r--r--drivers/misc/cardreader/Makefile2
-rw-r--r--drivers/misc/cardreader/rts5260.c3
-rw-r--r--drivers/misc/cardreader/rts5261.c792
-rw-r--r--drivers/misc/cardreader/rts5261.h233
-rw-r--r--drivers/misc/cardreader/rtsx_pcr.c43
-rw-r--r--drivers/misc/cardreader/rtsx_pcr.h1
-rw-r--r--drivers/misc/cxl/flash.c8
-rw-r--r--drivers/misc/eeprom/at24.c9
-rw-r--r--drivers/misc/eeprom/eeprom.c4
-rw-r--r--drivers/misc/fastrpc.c209
-rw-r--r--drivers/misc/genwqe/card_dev.c23
-rw-r--r--drivers/misc/habanalabs/command_submission.c127
-rw-r--r--drivers/misc/habanalabs/debugfs.c112
-rw-r--r--drivers/misc/habanalabs/device.c18
-rw-r--r--drivers/misc/habanalabs/firmware_if.c5
-rw-r--r--drivers/misc/habanalabs/goya/goya.c78
-rw-r--r--drivers/misc/habanalabs/goya/goyaP.h2
-rw-r--r--drivers/misc/habanalabs/goya/goya_coresight.c53
-rw-r--r--drivers/misc/habanalabs/goya/goya_hwmgr.c31
-rw-r--r--drivers/misc/habanalabs/habanalabs.h171
-rw-r--r--drivers/misc/habanalabs/habanalabs_ioctl.c73
-rw-r--r--drivers/misc/habanalabs/hw_queue.c249
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/goya_masks.h2
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/goya_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/psoc_etr_regs.h114
-rw-r--r--drivers/misc/habanalabs/include/hl_boot_if.h2
-rw-r--r--drivers/misc/habanalabs/include/hw_ip/mmu/mmu_general.h7
-rw-r--r--drivers/misc/habanalabs/include/qman_if.h12
-rw-r--r--drivers/misc/habanalabs/memory.c392
-rw-r--r--drivers/misc/habanalabs/mmu.c204
-rw-r--r--drivers/misc/hpilo.h2
-rw-r--r--drivers/misc/ibmvmc.h4
-rw-r--r--drivers/misc/lis3lv02d/lis3lv02d.c80
-rw-r--r--drivers/misc/lis3lv02d/lis3lv02d.h4
-rw-r--r--drivers/misc/lkdtm/bugs.c39
-rw-r--r--drivers/misc/lkdtm/core.c3
-rw-r--r--drivers/misc/lkdtm/lkdtm.h3
-rw-r--r--drivers/misc/mei/bus-fixup.c9
-rw-r--r--drivers/misc/mei/bus.c42
-rw-r--r--drivers/misc/mei/client.h36
-rw-r--r--drivers/misc/mei/hdcp/mei_hdcp.c45
-rw-r--r--drivers/misc/mei/hdcp/mei_hdcp.h17
-rw-r--r--drivers/misc/mei/hw-me-regs.h4
-rw-r--r--drivers/misc/mei/hw-me.c74
-rw-r--r--drivers/misc/mei/hw-me.h12
-rw-r--r--drivers/misc/mei/hw-txe.c10
-rw-r--r--drivers/misc/mei/init.c6
-rw-r--r--drivers/misc/mei/main.c46
-rw-r--r--drivers/misc/mei/mei_dev.h18
-rw-r--r--drivers/misc/mei/pci-me.c16
-rw-r--r--drivers/misc/mic/Kconfig16
-rw-r--r--drivers/misc/ocxl/ocxl_internal.h2
-rw-r--r--drivers/misc/ocxl/trace.h2
-rw-r--r--drivers/misc/pci_endpoint_test.c8
-rw-r--r--drivers/misc/sgi-gru/gruprocfs.c11
-rw-r--r--drivers/misc/sram.c28
-rw-r--r--drivers/misc/ti-st/st_core.c4
-rw-r--r--drivers/misc/vmw_vmci/vmci_host.c2
-rw-r--r--drivers/mmc/core/block.c151
-rw-r--r--drivers/mmc/core/core.c12
-rw-r--r--drivers/mmc/core/core.h2
-rw-r--r--drivers/mmc/core/mmc.c9
-rw-r--r--drivers/mmc/core/quirks.h7
-rw-r--r--drivers/mmc/core/sdio.c28
-rw-r--r--drivers/mmc/core/sdio_bus.c9
-rw-r--r--drivers/mmc/host/Kconfig21
-rw-r--r--drivers/mmc/host/Makefile2
-rw-r--r--drivers/mmc/host/atmel-mci.c13
-rw-r--r--drivers/mmc/host/bcm2835.c4
-rw-r--r--drivers/mmc/host/cavium-octeon.c15
-rw-r--r--drivers/mmc/host/dw_mmc.c14
-rw-r--r--drivers/mmc/host/jz4740_mmc.c41
-rw-r--r--drivers/mmc/host/mmc_spi.c2
-rw-r--r--drivers/mmc/host/mmci.c198
-rw-r--r--drivers/mmc/host/mmci.h5
-rw-r--r--drivers/mmc/host/mmci_stm32_sdmmc.c46
-rw-r--r--drivers/mmc/host/moxart-mmc.c4
-rw-r--r--drivers/mmc/host/omap_hsmmc.c31
-rw-r--r--drivers/mmc/host/owl-mmc.c696
-rw-r--r--drivers/mmc/host/renesas_sdhi_internal_dmac.c1
-rw-r--r--drivers/mmc/host/sdhci-acpi.c49
-rw-r--r--drivers/mmc/host/sdhci-esdhc.h14
-rw-r--r--drivers/mmc/host/sdhci-milbeaut.c362
-rw-r--r--drivers/mmc/host/sdhci-of-arasan.c493
-rw-r--r--drivers/mmc/host/sdhci-of-aspeed.c12
-rw-r--r--drivers/mmc/host/sdhci-of-at91.c19
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c257
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c53
-rw-r--r--drivers/mmc/host/sdhci-pci.h2
-rw-r--r--drivers/mmc/host/sdhci.c15
-rw-r--r--drivers/mmc/host/sdhci_am654.c71
-rw-r--r--drivers/mmc/host/sdhci_f_sdh30.c26
-rw-r--r--drivers/mmc/host/sdhci_f_sdh30.h32
-rw-r--r--drivers/mmc/host/tmio_mmc.h1
-rw-r--r--drivers/mmc/host/tmio_mmc_core.c12
-rw-r--r--drivers/mmc/host/vub300.c7
-rw-r--r--drivers/mtd/nand/onenand/Makefile2
-rw-r--r--drivers/mtd/nand/onenand/samsung_mtd.c (renamed from drivers/mtd/nand/onenand/samsung.c)0
-rw-r--r--drivers/mtd/ubi/cdev.c36
-rw-r--r--drivers/mtd/ubi/debug.c1
-rw-r--r--drivers/mtd/ubi/fastmap-wl.c31
-rw-r--r--drivers/mtd/ubi/fastmap.c14
-rw-r--r--drivers/mtd/ubi/ubi.h8
-rw-r--r--drivers/mtd/ubi/wl.c32
-rw-r--r--drivers/mtd/ubi/wl.h1
-rw-r--r--drivers/net/caif/caif_serial.c4
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c1
-rw-r--r--drivers/net/ethernet/emulex/benet/Kconfig2
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c2
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c3
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c192
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h2
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000.h1
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c2
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb.h1
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_main.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c74
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mr.c28
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c55
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_if.h4
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c4
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-pci.c2
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.c12
-rw-r--r--drivers/net/hyperv/hyperv_net.h3
-rw-r--r--drivers/net/hyperv/netvsc_drv.c57
-rw-r--r--drivers/net/phy/aquantia.h4
-rw-r--r--drivers/net/phy/bcm-phy-lib.h2
-rw-r--r--drivers/net/phy/dp83869.c49
-rw-r--r--drivers/net/phy/mdio-cavium.h2
-rw-r--r--drivers/net/phy/mdio-i2c.h2
-rw-r--r--drivers/net/phy/mdio-xgene.h2
-rw-r--r--drivers/net/phy/realtek.c9
-rw-r--r--drivers/net/ppp/ppp_generic.c245
-rw-r--r--drivers/net/tap.c12
-rw-r--r--drivers/net/usb/aqc111.h4
-rw-r--r--drivers/net/usb/hso.c5
-rw-r--r--drivers/net/usb/usbnet.c9
-rw-r--r--drivers/net/wan/z85230.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/rx.c13
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.c5
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.h1
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sdio.c33
-rw-r--r--drivers/net/wireless/ti/wl1251/sdio.c25
-rw-r--r--drivers/net/wireless/ti/wlcore/sdio.c8
-rw-r--r--drivers/ntb/test/ntb_pingpong.c5
-rw-r--r--drivers/nvdimm/Kconfig1
-rw-r--r--drivers/nvdimm/btt.c18
-rw-r--r--drivers/nvdimm/btt_devs.c24
-rw-r--r--drivers/nvdimm/bus.c48
-rw-r--r--drivers/nvdimm/claim.c14
-rw-r--r--drivers/nvdimm/core.c9
-rw-r--r--drivers/nvdimm/dax_devs.c27
-rw-r--r--drivers/nvdimm/dimm_devs.c30
-rw-r--r--drivers/nvdimm/e820.c13
-rw-r--r--drivers/nvdimm/namespace_devs.c114
-rw-r--r--drivers/nvdimm/nd-core.h22
-rw-r--r--drivers/nvdimm/nd.h27
-rw-r--r--drivers/nvdimm/of_pmem.c13
-rw-r--r--drivers/nvdimm/pfn_devs.c64
-rw-r--r--drivers/nvdimm/pmem.c18
-rw-r--r--drivers/nvdimm/region_devs.c248
-rw-r--r--drivers/nvme/host/core.c12
-rw-r--r--drivers/nvmem/Kconfig23
-rw-r--r--drivers/nvmem/Makefile4
-rw-r--r--drivers/nvmem/imx-ocotp-scu.c120
-rw-r--r--drivers/nvmem/imx-ocotp.c4
-rw-r--r--drivers/nvmem/rockchip-otp.c268
-rw-r--r--drivers/nvmem/sc27xx-efuse.c13
-rw-r--r--drivers/nvmem/sprd-efuse.c424
-rw-r--r--drivers/of/address.c103
-rw-r--r--drivers/of/base.c32
-rw-r--r--drivers/of/device.c9
-rw-r--r--drivers/of/fdt.c4
-rw-r--r--drivers/of/of_private.h14
-rw-r--r--drivers/of/overlay.c37
-rw-r--r--drivers/of/platform.c12
-rw-r--r--drivers/of/property.c340
-rw-r--r--drivers/of/unittest-data/testcases.dts1
-rw-r--r--drivers/of/unittest-data/tests-address.dtsi48
-rw-r--r--drivers/of/unittest.c96
-rw-r--r--drivers/opp/core.c69
-rw-r--r--drivers/parport/daisy.c40
-rw-r--r--drivers/parport/probe.c2
-rw-r--r--drivers/parport/share.c26
-rw-r--r--drivers/pci/Kconfig26
-rw-r--r--drivers/pci/Makefile3
-rw-r--r--drivers/pci/access.c2
-rw-r--r--drivers/pci/ats.c207
-rw-r--r--drivers/pci/controller/Kconfig31
-rw-r--r--drivers/pci/controller/Makefile4
-rw-r--r--drivers/pci/controller/cadence/Kconfig45
-rw-r--r--drivers/pci/controller/cadence/Makefile5
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-ep.c (renamed from drivers/pci/controller/pcie-cadence-ep.c)96
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-host.c (renamed from drivers/pci/controller/pcie-cadence-host.c)97
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-plat.c174
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence.c (renamed from drivers/pci/controller/pcie-cadence.c)0
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence.h (renamed from drivers/pci/controller/pcie-cadence.h)79
-rw-r--r--drivers/pci/controller/dwc/Kconfig6
-rw-r--r--drivers/pci/controller/dwc/pci-dra7xx.c2
-rw-r--r--drivers/pci/controller/dwc/pci-layerscape-ep.c2
-rw-r--r--drivers/pci/controller/dwc/pci-layerscape.c1
-rw-r--r--drivers/pci/controller/dwc/pci-meson.c136
-rw-r--r--drivers/pci/controller/dwc/pcie-artpec6.c2
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-host.c41
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-plat.c2
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.h2
-rw-r--r--drivers/pci/controller/dwc/pcie-tegra194.c6
-rw-r--r--drivers/pci/controller/dwc/pcie-uniphier.c10
-rw-r--r--drivers/pci/controller/pci-aardvark.c133
-rw-r--r--drivers/pci/controller/pci-ftpci100.c79
-rw-r--r--drivers/pci/controller/pci-host-common.c2
-rw-r--r--drivers/pci/controller/pci-hyperv.c218
-rw-r--r--drivers/pci/controller/pci-mvebu.c4
-rw-r--r--drivers/pci/controller/pci-thunder-pem.c1
-rw-r--r--drivers/pci/controller/pci-v3-semi.c74
-rw-r--r--drivers/pci/controller/pci-versatile.c71
-rw-r--r--drivers/pci/controller/pci-xgene.c73
-rw-r--r--drivers/pci/controller/pcie-altera.c41
-rw-r--r--drivers/pci/controller/pcie-iproc-msi.c5
-rw-r--r--drivers/pci/controller/pcie-iproc-platform.c9
-rw-r--r--drivers/pci/controller/pcie-iproc.c106
-rw-r--r--drivers/pci/controller/pcie-mediatek.c43
-rw-r--r--drivers/pci/controller/pcie-mobiveil.c146
-rw-r--r--drivers/pci/controller/pcie-rcar.c92
-rw-r--r--drivers/pci/controller/pcie-rockchip-host.c158
-rw-r--r--drivers/pci/controller/pcie-rockchip.h7
-rw-r--r--drivers/pci/controller/pcie-xilinx-nwl.c21
-rw-r--r--drivers/pci/controller/pcie-xilinx.c18
-rw-r--r--drivers/pci/controller/vmd.c34
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-test.c10
-rw-r--r--drivers/pci/endpoint/pci-epc-mem.c2
-rw-r--r--drivers/pci/hotplug/Kconfig2
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c12
-rw-r--r--drivers/pci/hotplug/pciehp.h8
-rw-r--r--drivers/pci/hotplug/pciehp_core.c36
-rw-r--r--drivers/pci/hotplug/pciehp_ctrl.c10
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c67
-rw-r--r--drivers/pci/hotplug/rpaphp_core.c131
-rw-r--r--drivers/pci/iov.c9
-rw-r--r--drivers/pci/msi.c25
-rw-r--r--drivers/pci/of.c67
-rw-r--r--drivers/pci/pci-bridge-emul.c25
-rw-r--r--drivers/pci/pci-bridge-emul.h78
-rw-r--r--drivers/pci/pci-driver.c198
-rw-r--r--drivers/pci/pci-sysfs.c28
-rw-r--r--drivers/pci/pci.c390
-rw-r--r--drivers/pci/pci.h48
-rw-r--r--drivers/pci/pcie/Kconfig10
-rw-r--r--drivers/pci/pcie/aer.c88
-rw-r--r--drivers/pci/pcie/aspm.c245
-rw-r--r--drivers/pci/pcie/dpc.c2
-rw-r--r--drivers/pci/pcie/portdrv.h2
-rw-r--r--drivers/pci/pcie/portdrv_core.c7
-rw-r--r--drivers/pci/pcie/portdrv_pci.c8
-rw-r--r--drivers/pci/pcie/ptm.c2
-rw-r--r--drivers/pci/probe.c60
-rw-r--r--drivers/pci/proc.c4
-rw-r--r--drivers/pci/quirks.c157
-rw-r--r--drivers/pci/setup-bus.c70
-rw-r--r--drivers/pci/switch/switchtec.c4
-rw-r--r--drivers/phy/allwinner/Kconfig11
-rw-r--r--drivers/phy/allwinner/Makefile1
-rw-r--r--drivers/phy/allwinner/phy-sun50i-usb3.c190
-rw-r--r--drivers/phy/amlogic/phy-meson-g12a-usb3-pcie.c70
-rw-r--r--drivers/phy/broadcom/phy-brcm-usb-init.c10
-rw-r--r--drivers/phy/hisilicon/phy-hisi-inno-usb2.c4
-rw-r--r--drivers/phy/hisilicon/phy-histb-combphy.c4
-rw-r--r--drivers/phy/lantiq/phy-lantiq-vrx200-pcie.c3
-rw-r--r--drivers/phy/marvell/phy-mvebu-a3700-utmi.c9
-rw-r--r--drivers/phy/phy-xgene.c2
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp.c120
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp.h96
-rw-r--r--drivers/phy/qualcomm/phy-qcom-usb-hs.c7
-rw-r--r--drivers/phy/renesas/phy-rcar-gen2.c5
-rw-r--r--drivers/phy/renesas/phy-rcar-gen3-usb2.c7
-rw-r--r--drivers/phy/rockchip/Kconfig8
-rw-r--r--drivers/phy/rockchip/Makefile1
-rw-r--r--drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c805
-rw-r--r--drivers/phy/rockchip/phy-rockchip-inno-usb2.c1
-rw-r--r--drivers/phy/tegra/xusb-tegra186.c23
-rw-r--r--drivers/phy/tegra/xusb-tegra210.c137
-rw-r--r--drivers/phy/tegra/xusb.c93
-rw-r--r--drivers/phy/tegra/xusb.h4
-rw-r--r--drivers/phy/ti/phy-dm816x-usb.c3
-rw-r--r--drivers/phy/ti/phy-gmii-sel.c2
-rw-r--r--drivers/pinctrl/Kconfig36
-rw-r--r--drivers/pinctrl/Makefile1
-rw-r--r--drivers/pinctrl/actions/pinctrl-owl.c4
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm281xx.c4
-rw-r--r--drivers/pinctrl/bcm/pinctrl-cygnus-mux.c7
-rw-r--r--drivers/pinctrl/bcm/pinctrl-iproc-gpio.c5
-rw-r--r--drivers/pinctrl/bcm/pinctrl-ns2-mux.c6
-rw-r--r--drivers/pinctrl/bcm/pinctrl-nsp-gpio.c119
-rw-r--r--drivers/pinctrl/bcm/pinctrl-nsp-mux.c6
-rw-r--r--drivers/pinctrl/devicetree.c50
-rw-r--r--drivers/pinctrl/devicetree.h7
-rw-r--r--drivers/pinctrl/freescale/Kconfig12
-rw-r--r--drivers/pinctrl/intel/Kconfig7
-rw-r--r--drivers/pinctrl/intel/Makefile1
-rw-r--r--drivers/pinctrl/intel/pinctrl-cherryview.c6
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.c119
-rw-r--r--drivers/pinctrl/intel/pinctrl-lewisburg.c171
-rw-r--r--drivers/pinctrl/intel/pinctrl-tigerlake.c454
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common.c4
-rw-r--r--drivers/pinctrl/meson/Kconfig6
-rw-r--r--drivers/pinctrl/meson/Makefile1
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-a1.c942
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-axg.c1
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-g12a.c9
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-gxbb.c1
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-gxl.c1
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson.c38
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson.h7
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson8.c1
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson8b.c1
-rw-r--r--drivers/pinctrl/mvebu/Kconfig10
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-armada-37xx.c40
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-mvebu.c4
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-orion.c7
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-nomadik-db8500.c12
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-nomadik.c81
-rw-r--r--drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c32
-rw-r--r--drivers/pinctrl/pinctrl-amd.c3
-rw-r--r--drivers/pinctrl/pinctrl-artpec6.c4
-rw-r--r--drivers/pinctrl/pinctrl-at91-pio4.c65
-rw-r--r--drivers/pinctrl/pinctrl-at91.c55
-rw-r--r--drivers/pinctrl/pinctrl-bm1880.c4
-rw-r--r--drivers/pinctrl/pinctrl-coh901.c54
-rw-r--r--drivers/pinctrl/pinctrl-da850-pupd.c4
-rw-r--r--drivers/pinctrl/pinctrl-digicolor.c4
-rw-r--r--drivers/pinctrl/pinctrl-equilibrium.c945
-rw-r--r--drivers/pinctrl/pinctrl-equilibrium.h144
-rw-r--r--drivers/pinctrl/pinctrl-ingenic.c50
-rw-r--r--drivers/pinctrl/pinctrl-lpc18xx.c4
-rw-r--r--drivers/pinctrl/pinctrl-ocelot.c23
-rw-r--r--drivers/pinctrl/pinctrl-oxnas.c29
-rw-r--r--drivers/pinctrl/pinctrl-pic32.c29
-rw-r--r--drivers/pinctrl/pinctrl-pistachio.c30
-rw-r--r--drivers/pinctrl/pinctrl-rockchip.c382
-rw-r--r--drivers/pinctrl/pinctrl-rza1.c8
-rw-r--r--drivers/pinctrl/pinctrl-rza2.c8
-rw-r--r--drivers/pinctrl/pinctrl-rzn1.c2
-rw-r--r--drivers/pinctrl/pinctrl-st.c53
-rw-r--r--drivers/pinctrl/pinctrl-stmfx.c21
-rw-r--r--drivers/pinctrl/pinctrl-tb10x.c4
-rw-r--r--drivers/pinctrl/pinctrl-u300.c4
-rw-r--r--drivers/pinctrl/pinctrl-xway.c4
-rw-r--r--drivers/pinctrl/pxa/pinctrl-pxa25x.c13
-rw-r--r--drivers/pinctrl/pxa/pinctrl-pxa27x.c13
-rw-r--r--drivers/pinctrl/qcom/Kconfig101
-rw-r--r--drivers/pinctrl/qcom/Makefile1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.c115
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.h14
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8976.c1127
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sc7180.c18
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sdm845.c23
-rw-r--r--drivers/pinctrl/qcom/pinctrl-spmi-gpio.c5
-rw-r--r--drivers/pinctrl/qcom/pinctrl-spmi-mpp.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c121
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos.c14
-rw-r--r--drivers/pinctrl/samsung/pinctrl-s3c24xx.c6
-rw-r--r--drivers/pinctrl/samsung/pinctrl-s3c64xx.c6
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.c10
-rw-r--r--drivers/pinctrl/sh-pfc/Kconfig12
-rw-r--r--drivers/pinctrl/sh-pfc/Makefile4
-rw-r--r--drivers/pinctrl/sh-pfc/core.c32
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7795-es1.c2
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7795.c2
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7796.c35
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a77965.c863
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a77990.c57
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7734.c4
-rw-r--r--drivers/pinctrl/sh-pfc/sh_pfc.h8
-rw-r--r--drivers/pinctrl/sirf/pinctrl-atlas7.c41
-rw-r--r--drivers/pinctrl/sirf/pinctrl-sirf.c43
-rw-r--r--drivers/pinctrl/spear/pinctrl-plgpio.c51
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear.c4
-rw-r--r--drivers/pinctrl/sprd/pinctrl-sprd.c23
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.c4
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra-xusb.c10
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra.c3
-rw-r--r--drivers/pinctrl/ti/pinctrl-ti-iodelay.c2
-rw-r--r--drivers/pinctrl/vt8500/pinctrl-wmt.c4
-rw-r--r--drivers/pinctrl/zte/pinctrl-zx.c4
-rw-r--r--drivers/platform/chrome/Kconfig19
-rw-r--r--drivers/platform/chrome/Makefile1
-rw-r--r--drivers/platform/chrome/cros_ec.c84
-rw-r--r--drivers/platform/chrome/cros_ec_ishtp.c25
-rw-r--r--drivers/platform/chrome/cros_ec_lpc.c17
-rw-r--r--drivers/platform/chrome/cros_ec_proto.c267
-rw-r--r--drivers/platform/chrome/cros_ec_rpmsg.c19
-rw-r--r--drivers/platform/chrome/cros_ec_sensorhub.c199
-rw-r--r--drivers/platform/chrome/cros_ec_trace.c5
-rw-r--r--drivers/platform/chrome/cros_usbpd_logger.c1
-rw-r--r--drivers/platform/chrome/wilco_ec/Kconfig2
-rw-r--r--drivers/platform/chrome/wilco_ec/Makefile3
-rw-r--r--drivers/platform/chrome/wilco_ec/core.c28
-rw-r--r--drivers/platform/chrome/wilco_ec/debugfs.c47
-rw-r--r--drivers/platform/chrome/wilco_ec/keyboard_leds.c191
-rw-r--r--drivers/platform/chrome/wilco_ec/sysfs.c91
-rw-r--r--drivers/platform/chrome/wilco_ec/telemetry.c2
-rw-r--r--drivers/platform/goldfish/Kconfig3
-rw-r--r--drivers/platform/mellanox/Kconfig16
-rw-r--r--drivers/platform/mellanox/Makefile1
-rw-r--r--drivers/platform/mellanox/mlxbf-bootctl.c321
-rw-r--r--drivers/platform/mellanox/mlxbf-bootctl.h103
-rw-r--r--drivers/platform/x86/Kconfig37
-rw-r--r--drivers/platform/x86/Makefile5
-rw-r--r--drivers/platform/x86/acerhdf.c7
-rw-r--r--drivers/platform/x86/asus-laptop.c71
-rw-r--r--drivers/platform/x86/dell-laptop.c26
-rw-r--r--drivers/platform/x86/dell_rbu.c2
-rw-r--r--drivers/platform/x86/hdaps.c40
-rw-r--r--drivers/platform/x86/hp-wmi.c10
-rw-r--r--drivers/platform/x86/huawei-wmi.c876
-rw-r--r--drivers/platform/x86/intel_cht_int33fe_common.c147
-rw-r--r--drivers/platform/x86/intel_cht_int33fe_common.h41
-rw-r--r--drivers/platform/x86/intel_cht_int33fe_microb.c57
-rw-r--r--drivers/platform/x86/intel_cht_int33fe_typec.c (renamed from drivers/platform/x86/intel_cht_int33fe.c)78
-rw-r--r--drivers/platform/x86/intel_int0002_vgpio.c28
-rw-r--r--drivers/platform/x86/intel_pmc_core.c17
-rw-r--r--drivers/platform/x86/intel_punit_ipc.c48
-rw-r--r--drivers/platform/x86/peaq-wmi.c66
-rw-r--r--drivers/platform/x86/system76_acpi.c384
-rw-r--r--drivers/platform/x86/touchscreen_dmi.c52
-rw-r--r--drivers/platform/x86/wmi.c2
-rw-r--r--drivers/power/avs/smartreflex.c2
-rw-r--r--drivers/power/reset/Kconfig10
-rw-r--r--drivers/power/reset/Makefile1
-rw-r--r--drivers/power/reset/at91-reset.c6
-rw-r--r--drivers/power/reset/at91-sama5d2_shdwc.c8
-rw-r--r--drivers/power/reset/mt6323-poweroff.c97
-rw-r--r--drivers/power/supply/Kconfig2
-rw-r--r--drivers/power/supply/ab8500_btemp.c50
-rw-r--r--drivers/power/supply/ab8500_charger.c83
-rw-r--r--drivers/power/supply/ab8500_fg.c49
-rw-r--r--drivers/power/supply/abx500_chargalg.c8
-rw-r--r--drivers/power/supply/axp20x_usb_power.c8
-rw-r--r--drivers/power/supply/bd70528-charger.c1
-rw-r--r--drivers/power/supply/cpcap-battery.c271
-rw-r--r--drivers/power/supply/cpcap-charger.c222
-rw-r--r--drivers/power/supply/test_power.c61
-rw-r--r--drivers/powercap/intel_rapl_common.c2
-rw-r--r--drivers/pwm/Kconfig9
-rw-r--r--drivers/pwm/Makefile1
-rw-r--r--drivers/rapidio/devices/tsi721.c2
-rw-r--r--drivers/remoteproc/qcom_q6v5_mss.c52
-rw-r--r--drivers/remoteproc/remoteproc_core.c9
-rw-r--r--drivers/remoteproc/remoteproc_debugfs.c3
-rw-r--r--drivers/remoteproc/stm32_rproc.c100
-rw-r--r--drivers/rpmsg/Kconfig2
-rw-r--r--drivers/rpmsg/qcom_glink_native.c53
-rw-r--r--drivers/rpmsg/qcom_glink_smem.c2
-rw-r--r--drivers/rpmsg/rpmsg_char.c16
-rw-r--r--drivers/rtc/Kconfig31
-rw-r--r--drivers/rtc/dev.c33
-rw-r--r--drivers/rtc/interface.c58
-rw-r--r--drivers/rtc/rtc-ab-b5ze-s3.c11
-rw-r--r--drivers/rtc/rtc-armada38x.c10
-rw-r--r--drivers/rtc/rtc-asm9260.c4
-rw-r--r--drivers/rtc/rtc-aspeed.c4
-rw-r--r--drivers/rtc/rtc-at91rm9200.c19
-rw-r--r--drivers/rtc/rtc-at91sam9.c4
-rw-r--r--drivers/rtc/rtc-bd70528.c1
-rw-r--r--drivers/rtc/rtc-brcmstb-waketimer.c5
-rw-r--r--drivers/rtc/rtc-cadence.c4
-rw-r--r--drivers/rtc/rtc-coh901331.c4
-rw-r--r--drivers/rtc/rtc-cros-ec.c22
-rw-r--r--drivers/rtc/rtc-da9063.c3
-rw-r--r--drivers/rtc/rtc-davinci.c4
-rw-r--r--drivers/rtc/rtc-digicolor.c4
-rw-r--r--drivers/rtc/rtc-ds1216.c4
-rw-r--r--drivers/rtc/rtc-ds1286.c4
-rw-r--r--drivers/rtc/rtc-ds1302.c2
-rw-r--r--drivers/rtc/rtc-ds1343.c297
-rw-r--r--drivers/rtc/rtc-ds1347.c102
-rw-r--r--drivers/rtc/rtc-ds1374.c4
-rw-r--r--drivers/rtc/rtc-ds1511.c4
-rw-r--r--drivers/rtc/rtc-ds1553.c4
-rw-r--r--drivers/rtc/rtc-ds1685.c116
-rw-r--r--drivers/rtc/rtc-em3027.c4
-rw-r--r--drivers/rtc/rtc-ep93xx.c4
-rw-r--r--drivers/rtc/rtc-fsl-ftm-alarm.c24
-rw-r--r--drivers/rtc/rtc-goldfish.c8
-rw-r--r--drivers/rtc/rtc-jz4740.c4
-rw-r--r--drivers/rtc/rtc-lpc24xx.c4
-rw-r--r--drivers/rtc/rtc-lpc32xx.c15
-rw-r--r--drivers/rtc/rtc-m41t80.c7
-rw-r--r--drivers/rtc/rtc-m48t86.c11
-rw-r--r--drivers/rtc/rtc-mc146818-lib.c15
-rw-r--r--drivers/rtc/rtc-meson.c6
-rw-r--r--drivers/rtc/rtc-msm6242.c23
-rw-r--r--drivers/rtc/rtc-mt6397.c107
-rw-r--r--drivers/rtc/rtc-mt7622.c4
-rw-r--r--drivers/rtc/rtc-mv.c4
-rw-r--r--drivers/rtc/rtc-omap.c4
-rw-r--r--drivers/rtc/rtc-pcf2127.c10
-rw-r--r--drivers/rtc/rtc-pcf8523.c18
-rw-r--r--drivers/rtc/rtc-pcf8563.c2
-rw-r--r--drivers/rtc/rtc-pic32.c4
-rw-r--r--drivers/rtc/rtc-pm8xxx.c2
-rw-r--r--drivers/rtc/rtc-r7301.c7
-rw-r--r--drivers/rtc/rtc-rtd119x.c4
-rw-r--r--drivers/rtc/rtc-rv3028.c146
-rw-r--r--drivers/rtc/rtc-rx6110.c16
-rw-r--r--drivers/rtc/rtc-s35390a.c16
-rw-r--r--drivers/rtc/rtc-s3c.c4
-rw-r--r--drivers/rtc/rtc-sa1100.c4
-rw-r--r--drivers/rtc/rtc-sc27xx.c7
-rw-r--r--drivers/rtc/rtc-sirfsoc.c8
-rw-r--r--drivers/rtc/rtc-spear.c4
-rw-r--r--drivers/rtc/rtc-st-lpc.c5
-rw-r--r--drivers/rtc/rtc-stk17ta8.c4
-rw-r--r--drivers/rtc/rtc-stm32.c4
-rw-r--r--drivers/rtc/rtc-sun6i.c2
-rw-r--r--drivers/rtc/rtc-sunxi.c4
-rw-r--r--drivers/rtc/rtc-tegra.c8
-rw-r--r--drivers/rtc/rtc-tps65910.c21
-rw-r--r--drivers/rtc/rtc-tx4939.c4
-rw-r--r--drivers/rtc/rtc-v3020.c3
-rw-r--r--drivers/rtc/rtc-vr41xx.c8
-rw-r--r--drivers/rtc/rtc-vt8500.c32
-rw-r--r--drivers/rtc/rtc-wilco-ec.c8
-rw-r--r--drivers/rtc/rtc-xgene.c6
-rw-r--r--drivers/rtc/rtc-zynqmp.c7
-rw-r--r--drivers/rtc/sysfs.c5
-rw-r--r--drivers/s390/char/tape_char.c41
-rw-r--r--drivers/s390/crypto/zcrypt_error.h2
-rw-r--r--drivers/s390/scsi/Makefile2
-rw-r--r--drivers/s390/scsi/zfcp_aux.c12
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c8
-rw-r--r--drivers/s390/scsi/zfcp_def.h4
-rw-r--r--drivers/s390/scsi/zfcp_diag.c305
-rw-r--r--drivers/s390/scsi/zfcp_diag.h101
-rw-r--r--drivers/s390/scsi/zfcp_erp.c4
-rw-r--r--drivers/s390/scsi/zfcp_ext.h1
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c73
-rw-r--r--drivers/s390/scsi/zfcp_fsf.h21
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c4
-rw-r--r--drivers/s390/scsi/zfcp_sysfs.c170
-rw-r--r--drivers/sbus/char/display7seg.c2
-rw-r--r--drivers/sbus/char/envctrl.c4
-rw-r--r--drivers/scsi/3w-xxxx.c4
-rw-r--r--drivers/scsi/NCR5380.c37
-rw-r--r--drivers/scsi/aacraid/aachba.c11
-rw-r--r--drivers/scsi/aacraid/aacraid.h23
-rw-r--r--drivers/scsi/aacraid/comminit.c5
-rw-r--r--drivers/scsi/aacraid/commsup.c21
-rw-r--r--drivers/scsi/aacraid/linit.c35
-rw-r--r--drivers/scsi/aacraid/src.c10
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c6
-rw-r--r--drivers/scsi/arm/acornscsi.c4
-rw-r--r--drivers/scsi/atari_scsi.c6
-rw-r--r--drivers/scsi/atp870u.c2
-rw-r--r--drivers/scsi/bfa/bfad.c3
-rw-r--r--drivers/scsi/bfa/bfad_attr.c4
-rw-r--r--drivers/scsi/bnx2fc/57xx_hsi_bnx2fc.h2
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_io.c2
-rw-r--r--drivers/scsi/bnx2i/bnx2i_iscsi.c2
-rw-r--r--drivers/scsi/csiostor/csio_hw.c20
-rw-r--r--drivers/scsi/csiostor/csio_init.c7
-rw-r--r--drivers/scsi/csiostor/csio_lnode.c18
-rw-r--r--drivers/scsi/csiostor/csio_mb.c2
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c2
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c28
-rw-r--r--drivers/scsi/cxlflash/main.c4
-rw-r--r--drivers/scsi/esas2r/esas2r_flash.c1
-rw-r--r--drivers/scsi/esas2r/esas2r_main.c2
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c3
-rw-r--r--drivers/scsi/fnic/vnic_dev.c2
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas.h67
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c376
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v1_hw.c6
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c13
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c30
-rw-r--r--drivers/scsi/hosts.c19
-rw-r--r--drivers/scsi/ips.c2
-rw-r--r--drivers/scsi/isci/port_config.c2
-rw-r--r--drivers/scsi/isci/remote_device.c2
-rw-r--r--drivers/scsi/iscsi_tcp.c8
-rw-r--r--drivers/scsi/lpfc/lpfc.h40
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c298
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c18
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h7
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c28
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c118
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c57
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c200
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h31
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c954
-rw-r--r--drivers/scsi/lpfc/lpfc_logmsg.h17
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c1
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c3
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c149
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c85
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.c103
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c43
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c391
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h42
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/mac_scsi.c2
-rw-r--r--drivers/scsi/megaraid/megaraid_mm.c28
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h3
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c8
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fp.c7
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c36
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h15
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.c344
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.h9
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c4
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c12
-rw-r--r--drivers/scsi/mvsas/mv_sas.c2
-rw-r--r--drivers/scsi/ncr53c8xx.c2
-rw-r--r--drivers/scsi/nsp32.c2
-rw-r--r--drivers/scsi/pcmcia/Kconfig2
-rw-r--r--drivers/scsi/pcmcia/nsp_cs.c2
-rw-r--r--drivers/scsi/pm8001/pm8001_ctl.c20
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c133
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c38
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.c70
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.h24
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.c451
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.h3
-rw-r--r--drivers/scsi/pmcraid.c4
-rw-r--r--drivers/scsi/qedf/qedf_dbg.h2
-rw-r--r--drivers/scsi/qedf/qedf_main.c8
-rw-r--r--drivers/scsi/qedi/qedi_dbg.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h34
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h1
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c66
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c140
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h12
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c106
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c36
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c15
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c11
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c174
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_tmpl.c29
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c3
-rw-r--r--drivers/scsi/scsi.c6
-rw-r--r--drivers/scsi/scsi_debug.c9
-rw-r--r--drivers/scsi/scsi_lib.c45
-rw-r--r--drivers/scsi/scsi_logging.c10
-rw-r--r--drivers/scsi/scsi_priv.h2
-rw-r--r--drivers/scsi/scsi_sysfs.c22
-rw-r--r--drivers/scsi/scsi_trace.c124
-rw-r--r--drivers/scsi/sd.c18
-rw-r--r--drivers/scsi/sg.c150
-rw-r--r--drivers/scsi/smartpqi/smartpqi.h77
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c437
-rw-r--r--drivers/scsi/smartpqi/smartpqi_sas_transport.c22
-rw-r--r--drivers/scsi/sr_vendor.c18
-rw-r--r--drivers/scsi/st.c28
-rw-r--r--drivers/scsi/storvsc_drv.c41
-rw-r--r--drivers/scsi/sun3_scsi.c4
-rw-r--r--drivers/scsi/ufs/Kconfig10
-rw-r--r--drivers/scsi/ufs/Makefile1
-rw-r--r--drivers/scsi/ufs/ti-j721e-ufs.c90
-rw-r--r--drivers/scsi/ufs/ufs-hisi.c5
-rw-r--r--drivers/scsi/ufs/ufs-mediatek.c3
-rw-r--r--drivers/scsi/ufs/ufs-qcom.c53
-rw-r--r--drivers/scsi/ufs/ufs-qcom.h3
-rw-r--r--drivers/scsi/ufs/ufs-sysfs.c15
-rw-r--r--drivers/scsi/ufs/ufs_bsg.c1
-rw-r--r--drivers/scsi/ufs/ufshcd-dwc.c2
-rw-r--r--drivers/scsi/ufs/ufshcd-pltfrm.c1
-rw-r--r--drivers/scsi/ufs/ufshcd.c214
-rw-r--r--drivers/scsi/ufs/ufshcd.h12
-rw-r--r--drivers/scsi/ufs/ufshci.h2
-rw-r--r--drivers/scsi/zorro_esp.c11
-rw-r--r--drivers/soundwire/Kconfig2
-rw-r--r--drivers/soundwire/bus.c7
-rw-r--r--drivers/soundwire/cadence_master.c292
-rw-r--r--drivers/soundwire/cadence_master.h39
-rw-r--r--drivers/soundwire/intel.c201
-rw-r--r--drivers/soundwire/intel_init.c1
-rw-r--r--drivers/soundwire/slave.c98
-rw-r--r--drivers/staging/Kconfig2
-rw-r--r--drivers/staging/Makefile1
-rw-r--r--drivers/staging/android/ion/ion.c4
-rw-r--r--drivers/staging/axis-fifo/axis-fifo.c301
-rw-r--r--drivers/staging/axis-fifo/axis-fifo.txt18
-rw-r--r--drivers/staging/board/armadillo800eva.c12
-rw-r--r--drivers/staging/clocking-wizard/clk-xlnx-clock-wizard.c4
-rw-r--r--drivers/staging/comedi/drivers/dt3000.c3
-rw-r--r--drivers/staging/comedi/drivers/ni_routes.c2
-rw-r--r--drivers/staging/comedi/drivers/usbduxfast.c21
-rw-r--r--drivers/staging/emxx_udc/emxx_udc.c27
-rw-r--r--drivers/staging/exfat/Kconfig9
-rw-r--r--drivers/staging/exfat/TODO61
-rw-r--r--drivers/staging/exfat/exfat.h186
-rw-r--r--drivers/staging/exfat/exfat_blkdev.c28
-rw-r--r--drivers/staging/exfat/exfat_cache.c303
-rw-r--r--drivers/staging/exfat/exfat_core.c1938
-rw-r--r--drivers/staging/exfat/exfat_nls.c192
-rw-r--r--drivers/staging/exfat/exfat_super.c896
-rw-r--r--drivers/staging/fbtft/Kconfig21
-rw-r--r--drivers/staging/fbtft/Makefile1
-rw-r--r--drivers/staging/fbtft/fb_seps525.c213
-rw-r--r--drivers/staging/fbtft/fb_uc1611.c22
-rw-r--r--drivers/staging/fbtft/fbtft-core.c129
-rw-r--r--drivers/staging/fbtft/fbtft.h4
-rw-r--r--drivers/staging/fieldbus/anybuss/anybuss-client.h11
-rw-r--r--drivers/staging/fieldbus/anybuss/arcx-anybus.c8
-rw-r--r--drivers/staging/fieldbus/anybuss/hms-profinet.c2
-rw-r--r--drivers/staging/fieldbus/anybuss/host.c6
-rw-r--r--drivers/staging/fieldbus/dev_core.c3
-rw-r--r--drivers/staging/fieldbus/fieldbus_dev.h6
-rw-r--r--drivers/staging/fsl-dpaa2/ethsw/ethsw.c50
-rw-r--r--drivers/staging/fsl-dpaa2/ethsw/ethsw.h5
-rw-r--r--drivers/staging/fwserial/Kconfig26
-rw-r--r--drivers/staging/gasket/gasket_constants.h3
-rw-r--r--drivers/staging/gasket/gasket_core.c12
-rw-r--r--drivers/staging/gasket/gasket_core.h4
-rw-r--r--drivers/staging/gasket/gasket_ioctl.c16
-rw-r--r--drivers/staging/iio/accel/adis16240.c1
-rw-r--r--drivers/staging/iio/adc/ad7192.c79
-rw-r--r--drivers/staging/iio/frequency/ad9834.c4
-rw-r--r--drivers/staging/isdn/avm/b1.c41
-rw-r--r--drivers/staging/isdn/gigaset/interface.c2
-rw-r--r--drivers/staging/kpc2000/kpc2000_i2c.c204
-rw-r--r--drivers/staging/kpc2000/kpc2000_spi.c24
-rw-r--r--drivers/staging/media/allegro-dvt/nal-h264.c2
-rw-r--r--drivers/staging/media/hantro/hantro.h20
-rw-r--r--drivers/staging/media/hantro/hantro_drv.c16
-rw-r--r--drivers/staging/media/hantro/hantro_g1_h264_dec.c52
-rw-r--r--drivers/staging/media/hantro/hantro_g1_mpeg2_dec.c11
-rw-r--r--drivers/staging/media/hantro/hantro_g1_vp8_dec.c11
-rw-r--r--drivers/staging/media/hantro/hantro_h1_jpeg_enc.c4
-rw-r--r--drivers/staging/media/hantro/hantro_h264.c120
-rw-r--r--drivers/staging/media/hantro/hantro_hw.h7
-rw-r--r--drivers/staging/media/hantro/hantro_v4l2.c48
-rw-r--r--drivers/staging/media/hantro/rk3288_vpu_hw.c20
-rw-r--r--drivers/staging/media/hantro/rk3399_vpu_hw.c12
-rw-r--r--drivers/staging/media/hantro/rk3399_vpu_hw_jpeg_enc.c4
-rw-r--r--drivers/staging/media/hantro/rk3399_vpu_hw_mpeg2_dec.c11
-rw-r--r--drivers/staging/media/hantro/rk3399_vpu_hw_vp8_dec.c12
-rw-r--r--drivers/staging/media/imx/imx-ic-prp.c25
-rw-r--r--drivers/staging/media/imx/imx-ic-prpencvf.c51
-rw-r--r--drivers/staging/media/imx/imx-media-capture.c21
-rw-r--r--drivers/staging/media/imx/imx-media-csi.c41
-rw-r--r--drivers/staging/media/imx/imx-media-utils.c10
-rw-r--r--drivers/staging/media/imx/imx-media-vdic.c27
-rw-r--r--drivers/staging/media/imx/imx6-mipi-csi2.c27
-rw-r--r--drivers/staging/media/imx/imx7-media-csi.c38
-rw-r--r--drivers/staging/media/imx/imx7-mipi-csis.c36
-rw-r--r--drivers/staging/media/ipu3/Makefile6
-rw-r--r--drivers/staging/media/ipu3/TODO5
-rw-r--r--drivers/staging/media/ipu3/include/intel-ipu3.h5
-rw-r--r--drivers/staging/media/omap4iss/iss.c6
-rw-r--r--drivers/staging/media/omap4iss/iss_video.c4
-rw-r--r--drivers/staging/media/sunxi/cedrus/Makefile2
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus.c64
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus.h33
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_dec.c9
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_h264.c147
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_h265.c616
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_hw.c33
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_hw.h2
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_mpeg2.c2
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_regs.h318
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_video.c102
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_video.h1
-rw-r--r--drivers/staging/most/Kconfig8
-rw-r--r--drivers/staging/most/cdev/cdev.c1
-rw-r--r--drivers/staging/most/configfs.c124
-rw-r--r--drivers/staging/most/core.c108
-rw-r--r--drivers/staging/most/core.h1
-rw-r--r--drivers/staging/most/net/net.c1
-rw-r--r--drivers/staging/most/sound/sound.c9
-rw-r--r--drivers/staging/most/video/video.c1
-rw-r--r--drivers/staging/mt7621-dma/mtk-hsdma.c21
-rw-r--r--drivers/staging/mt7621-pci/Kconfig1
-rw-r--r--drivers/staging/mt7621-pci/pci-mt7621.c23
-rw-r--r--drivers/staging/netlogic/TODO2
-rw-r--r--drivers/staging/netlogic/xlr_net.c3
-rw-r--r--drivers/staging/nvec/Kconfig10
-rw-r--r--drivers/staging/octeon-usb/octeon-hcd.c3
-rw-r--r--drivers/staging/octeon/ethernet-mdio.c6
-rw-r--r--drivers/staging/octeon/ethernet-rgmii.c4
-rw-r--r--drivers/staging/octeon/ethernet-rx.c6
-rw-r--r--drivers/staging/octeon/ethernet-tx.c6
-rw-r--r--drivers/staging/octeon/ethernet.c6
-rw-r--r--drivers/staging/octeon/octeon-ethernet.h4
-rw-r--r--drivers/staging/octeon/octeon-stubs.h106
-rw-r--r--drivers/staging/olpc_dcon/Kconfig21
-rw-r--r--drivers/staging/olpc_dcon/Makefile4
-rw-r--r--drivers/staging/olpc_dcon/TODO1
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon.c6
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon.h5
-rw-r--r--drivers/staging/pi433/Kconfig24
-rw-r--r--drivers/staging/pi433/pi433_if.c12
-rw-r--r--drivers/staging/qlge/TODO3
-rw-r--r--drivers/staging/qlge/qlge.h145
-rw-r--r--drivers/staging/qlge/qlge_dbg.c291
-rw-r--r--drivers/staging/qlge/qlge_main.c909
-rw-r--r--drivers/staging/qlge/qlge_mpi.c1
-rw-r--r--drivers/staging/ralink-gdma/ralink-gdma.c4
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_ap.c43
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_efuse.c5
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_mlme.c4
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_mlme_ext.c2
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_pwrctrl.c4
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_sta_mgt.c167
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_wlan_util.c26
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_xmit.c4
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c55
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c1
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c3
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c3
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_recv.h2
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_xmit.h2
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_efuse.h1
-rw-r--r--drivers/staging/rtl8188eu/include/sta_info.h2
-rw-r--r--drivers/staging/rtl8188eu/os_dep/ioctl_linux.c30
-rw-r--r--drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c8
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c3
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_core.c9
-rw-r--r--drivers/staging/rtl8192e/rtllib_softmac.c7
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c4
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c9
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c8
-rw-r--r--drivers/staging/rtl8192u/r8190_rtl8256.c4
-rw-r--r--drivers/staging/rtl8192u/r8192U_core.c135
-rw-r--r--drivers/staging/rtl8192u/r819xU_cmdpkt.c25
-rw-r--r--drivers/staging/rtl8712/rtl8712_led.c2
-rw-r--r--drivers/staging/rtl8712/rtl8712_recv.c47
-rw-r--r--drivers/staging/rtl8712/rtl871x_ioctl_linux.c8
-rw-r--r--drivers/staging/rtl8712/rtl871x_mp_ioctl.c103
-rw-r--r--drivers/staging/rtl8712/rtl871x_xmit.c5
-rw-r--r--drivers/staging/rtl8712/rtl871x_xmit.h2
-rw-r--r--drivers/staging/rtl8712/usb_ops_linux.c4
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_ap.c11
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_cmd.c20
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_mlme.c174
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_mlme_ext.c23
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_pwrctrl.c1
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_recv.c9
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_security.c159
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_sta_mgt.c3
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_wlan_util.c19
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_xmit.c402
-rw-r--r--drivers/staging/rtl8723bs/hal/HalPhyRf_8723B.c2
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_btcoex.c2
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_com.c1
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_com_phycfg.c1076
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_DIG.c1
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723b_cmd.c10
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723b_dm.c3
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c12
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723b_phycfg.c77
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723b_rf6052.c41
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c13
-rw-r--r--drivers/staging/rtl8723bs/hal/sdio_halinit.c5
-rw-r--r--drivers/staging/rtl8723bs/hal/sdio_ops.c127
-rw-r--r--drivers/staging/rtl8723bs/include/drv_types.h4
-rw-r--r--drivers/staging/rtl8723bs/include/hal_com_phycfg.h26
-rw-r--r--drivers/staging/rtl8723bs/include/hal_data.h21
-rw-r--r--drivers/staging/rtl8723bs/include/osdep_service.h4
-rw-r--r--drivers/staging/rtl8723bs/include/osdep_service_linux.h7
-rw-r--r--drivers/staging/rtl8723bs/include/rtl8723b_hal.h15
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_mlme_ext.h1
-rw-r--r--drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c56
-rw-r--r--drivers/staging/rtl8723bs/os_dep/ioctl_linux.c59
-rw-r--r--drivers/staging/rtl8723bs/os_dep/os_intfs.c23
-rw-r--r--drivers/staging/rtl8723bs/os_dep/osdep_service.c136
-rw-r--r--drivers/staging/rtl8723bs/os_dep/sdio_intf.c8
-rw-r--r--drivers/staging/rts5208/ms.c86
-rw-r--r--drivers/staging/rts5208/ms.h70
-rw-r--r--drivers/staging/rts5208/rtsx.c3
-rw-r--r--drivers/staging/rts5208/rtsx_transport.c4
-rw-r--r--drivers/staging/rts5208/sd.h2
-rw-r--r--drivers/staging/rts5208/xd.c8
-rw-r--r--drivers/staging/rts5208/xd.h6
-rw-r--r--drivers/staging/sm750fb/ddk750_chip.c41
-rw-r--r--drivers/staging/sm750fb/ddk750_chip.h18
-rw-r--r--drivers/staging/sm750fb/ddk750_display.c4
-rw-r--r--drivers/staging/sm750fb/ddk750_mode.c16
-rw-r--r--drivers/staging/sm750fb/ddk750_sii164.c28
-rw-r--r--drivers/staging/sm750fb/ddk750_sii164.h11
-rw-r--r--drivers/staging/sm750fb/sm750_accel.c94
-rw-r--r--drivers/staging/sm750fb/sm750_accel.h83
-rw-r--r--drivers/staging/sm750fb/sm750_cursor.h17
-rw-r--r--drivers/staging/uwb/rsv.c4
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/Kconfig12
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c9
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/bcm2835.h2
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c4
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c6
-rw-r--r--drivers/staging/vc04_services/interface/vchi/vchi.h102
-rw-r--r--drivers/staging/vc04_services/interface/vchi/vchi_cfg.h172
-rw-r--r--drivers/staging/vc04_services/interface/vchi/vchi_common.h28
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c23
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c370
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.h32
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c231
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h104
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c14
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.h4
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_if.h96
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_ioctl.h6
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_shim.c164
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.c4
-rw-r--r--drivers/staging/vme/devices/vme_user.c2
-rw-r--r--drivers/staging/vt6655/card.c24
-rw-r--r--drivers/staging/vt6655/card.h2
-rw-r--r--drivers/staging/vt6655/device_main.c14
-rw-r--r--drivers/staging/vt6655/power.c10
-rw-r--r--drivers/staging/vt6655/rf.c5
-rw-r--r--drivers/staging/vt6655/rf.h19
-rw-r--r--drivers/staging/vt6655/rxtx.c5
-rw-r--r--drivers/staging/vt6656/main_usb.c9
-rw-r--r--drivers/staging/vt6656/rxtx.c8
-rw-r--r--drivers/staging/wfx/Documentation/devicetree/bindings/net/wireless/siliabs,wfx.txt97
-rw-r--r--drivers/staging/wfx/Kconfig8
-rw-r--r--drivers/staging/wfx/Makefile24
-rw-r--r--drivers/staging/wfx/TODO17
-rw-r--r--drivers/staging/wfx/bh.c321
-rw-r--r--drivers/staging/wfx/bh.h32
-rw-r--r--drivers/staging/wfx/bus.h36
-rw-r--r--drivers/staging/wfx/bus_sdio.c271
-rw-r--r--drivers/staging/wfx/bus_spi.c267
-rw-r--r--drivers/staging/wfx/data_rx.c213
-rw-r--r--drivers/staging/wfx/data_rx.h19
-rw-r--r--drivers/staging/wfx/data_tx.c837
-rw-r--r--drivers/staging/wfx/data_tx.h93
-rw-r--r--drivers/staging/wfx/debug.c311
-rw-r--r--drivers/staging/wfx/debug.h19
-rw-r--r--drivers/staging/wfx/fwio.c413
-rw-r--r--drivers/staging/wfx/fwio.h15
-rw-r--r--drivers/staging/wfx/hif_api_cmd.h681
-rw-r--r--drivers/staging/wfx/hif_api_general.h437
-rw-r--r--drivers/staging/wfx/hif_api_mib.h557
-rw-r--r--drivers/staging/wfx/hif_rx.c364
-rw-r--r--drivers/staging/wfx/hif_rx.h18
-rw-r--r--drivers/staging/wfx/hif_tx.c493
-rw-r--r--drivers/staging/wfx/hif_tx.h68
-rw-r--r--drivers/staging/wfx/hif_tx_mib.h293
-rw-r--r--drivers/staging/wfx/hwio.c352
-rw-r--r--drivers/staging/wfx/hwio.h80
-rw-r--r--drivers/staging/wfx/key.c268
-rw-r--r--drivers/staging/wfx/key.h22
-rw-r--r--drivers/staging/wfx/main.c491
-rw-r--r--drivers/staging/wfx/main.h47
-rw-r--r--drivers/staging/wfx/queue.c619
-rw-r--r--drivers/staging/wfx/queue.h61
-rw-r--r--drivers/staging/wfx/scan.c294
-rw-r--r--drivers/staging/wfx/scan.h42
-rw-r--r--drivers/staging/wfx/secure_link.h57
-rw-r--r--drivers/staging/wfx/sta.c1684
-rw-r--r--drivers/staging/wfx/sta.h103
-rw-r--r--drivers/staging/wfx/traces.h443
-rw-r--r--drivers/staging/wfx/wfx.h208
-rw-r--r--drivers/staging/wilc1000/Makefile8
-rw-r--r--drivers/staging/wilc1000/cfg80211.c (renamed from drivers/staging/wilc1000/wilc_wfi_cfgoperations.c)246
-rw-r--r--drivers/staging/wilc1000/cfg80211.h (renamed from drivers/staging/wilc1000/wilc_wfi_cfgoperations.h)8
-rw-r--r--drivers/staging/wilc1000/hif.c (renamed from drivers/staging/wilc1000/wilc_hif.c)43
-rw-r--r--drivers/staging/wilc1000/hif.h (renamed from drivers/staging/wilc1000/wilc_hif.h)6
-rw-r--r--drivers/staging/wilc1000/mon.c (renamed from drivers/staging/wilc1000/wilc_mon.c)4
-rw-r--r--drivers/staging/wilc1000/netdev.c (renamed from drivers/staging/wilc1000/wilc_netdev.c)146
-rw-r--r--drivers/staging/wilc1000/netdev.h (renamed from drivers/staging/wilc1000/wilc_wfi_netdevice.h)34
-rw-r--r--drivers/staging/wilc1000/sdio.c (renamed from drivers/staging/wilc1000/wilc_sdio.c)4
-rw-r--r--drivers/staging/wilc1000/spi.c (renamed from drivers/staging/wilc1000/wilc_spi.c)15
-rw-r--r--drivers/staging/wilc1000/wlan.c (renamed from drivers/staging/wilc1000/wilc_wlan.c)4
-rw-r--r--drivers/staging/wilc1000/wlan.h (renamed from drivers/staging/wilc1000/wilc_wlan.h)2
-rw-r--r--drivers/staging/wilc1000/wlan_cfg.c (renamed from drivers/staging/wilc1000/wilc_wlan_cfg.c)30
-rw-r--r--drivers/staging/wilc1000/wlan_cfg.h (renamed from drivers/staging/wilc1000/wilc_wlan_cfg.h)0
-rw-r--r--drivers/staging/wilc1000/wlan_if.h (renamed from drivers/staging/wilc1000/wilc_wlan_if.h)8
-rw-r--r--drivers/staging/wlan-ng/hfa384x.h18
-rw-r--r--drivers/staging/wlan-ng/hfa384x_usb.c2
-rw-r--r--drivers/staging/wlan-ng/p80211wep.c64
-rw-r--r--drivers/staging/wlan-ng/prism2usb.c6
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_ddp.c3
-rw-r--r--drivers/target/iscsi/iscsi_target.c24
-rw-r--r--drivers/target/iscsi/iscsi_target_auth.c232
-rw-r--r--drivers/target/iscsi/iscsi_target_auth.h17
-rw-r--r--drivers/target/iscsi/iscsi_target_parameters.h3
-rw-r--r--drivers/target/target_core_fabric_lib.c2
-rw-r--r--drivers/target/target_core_tpg.c12
-rw-r--r--drivers/target/target_core_transport.c28
-rw-r--r--drivers/target/target_core_user.c6
-rw-r--r--drivers/target/target_core_xcopy.c1
-rw-r--r--drivers/tee/tee_core.c2
-rw-r--r--drivers/thermal/gov_bang_bang.c2
-rw-r--r--drivers/thunderbolt/cap.c6
-rw-r--r--drivers/thunderbolt/ctl.c8
-rw-r--r--drivers/thunderbolt/eeprom.c11
-rw-r--r--drivers/thunderbolt/icm.c157
-rw-r--r--drivers/thunderbolt/lc.c193
-rw-r--r--drivers/thunderbolt/path.c52
-rw-r--r--drivers/thunderbolt/switch.c586
-rw-r--r--drivers/thunderbolt/tb.c340
-rw-r--r--drivers/thunderbolt/tb.h81
-rw-r--r--drivers/thunderbolt/tb_msgs.h2
-rw-r--r--drivers/thunderbolt/tb_regs.h97
-rw-r--r--drivers/thunderbolt/tunnel.c364
-rw-r--r--drivers/thunderbolt/tunnel.h10
-rw-r--r--drivers/thunderbolt/xdomain.c5
-rw-r--r--drivers/tty/Kconfig40
-rw-r--r--drivers/tty/amiserial.c84
-rw-r--r--drivers/tty/hvc/Kconfig30
-rw-r--r--drivers/tty/hvc/hvc_dcc.c28
-rw-r--r--drivers/tty/rocket.c32
-rw-r--r--drivers/tty/serdev/core.c111
-rw-r--r--drivers/tty/serial/8250/8250_aspeed_vuart.c84
-rw-r--r--drivers/tty/serial/8250/8250_dw.c83
-rw-r--r--drivers/tty/serial/8250/8250_exar.c19
-rw-r--r--drivers/tty/serial/8250/8250_lpss.c21
-rw-r--r--drivers/tty/serial/8250/8250_men_mcb.c1
-rw-r--r--drivers/tty/serial/8250/8250_mtk.c2
-rw-r--r--drivers/tty/serial/8250/8250_of.c31
-rw-r--r--drivers/tty/serial/8250/8250_pci.c300
-rw-r--r--drivers/tty/serial/8250/8250_port.c14
-rw-r--r--drivers/tty/serial/8250/Kconfig3
-rw-r--r--drivers/tty/serial/Kconfig106
-rw-r--r--drivers/tty/serial/Makefile2
-rw-r--r--drivers/tty/serial/amba-pl011.c12
-rw-r--r--drivers/tty/serial/fsl_linflexuart.c4
-rw-r--r--drivers/tty/serial/fsl_lpuart.c84
-rw-r--r--drivers/tty/serial/ifx6x60.c3
-rw-r--r--drivers/tty/serial/imx.c7
-rw-r--r--drivers/tty/serial/men_z135_uart.c1
-rw-r--r--drivers/tty/serial/msm_serial.c10
-rw-r--r--drivers/tty/serial/pch_uart.c5
-rw-r--r--drivers/tty/serial/qcom_geni_serial.c68
-rw-r--r--drivers/tty/serial/samsung_tty.c (renamed from drivers/tty/serial/samsung.c)0
-rw-r--r--drivers/tty/serial/serial-tegra.c3
-rw-r--r--drivers/tty/serial/serial_core.c2
-rw-r--r--drivers/tty/serial/sh-sci.c11
-rw-r--r--drivers/tty/serial/sirfsoc_uart.h5
-rw-r--r--drivers/tty/serial/sprd_serial.c33
-rw-r--r--drivers/tty/serial/stm32-usart.c6
-rw-r--r--drivers/tty/serial/uartlite.c97
-rw-r--r--drivers/tty/tty_io.c19
-rw-r--r--drivers/tty/tty_ldisc.c7
-rw-r--r--drivers/tty/vt/keyboard.c2
-rw-r--r--drivers/tty/vt/vc_screen.c3
-rw-r--r--drivers/uio/uio_dmem_genirq.c14
-rw-r--r--drivers/usb/cdns3/Kconfig10
-rw-r--r--drivers/usb/cdns3/Makefile1
-rw-r--r--drivers/usb/cdns3/cdns3-ti.c236
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.c79
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.h2
-rw-r--r--drivers/usb/chipidea/ci_hdrc_tegra.c22
-rw-r--r--drivers/usb/chipidea/core.c2
-rw-r--r--drivers/usb/chipidea/debug.c2
-rw-r--r--drivers/usb/chipidea/udc.c75
-rw-r--r--drivers/usb/chipidea/usbmisc_imx.c31
-rw-r--r--drivers/usb/class/cdc-wdm.c2
-rw-r--r--drivers/usb/class/usbtmc.c4
-rw-r--r--drivers/usb/core/config.c12
-rw-r--r--drivers/usb/core/devio.c35
-rw-r--r--drivers/usb/core/hcd-pci.c2
-rw-r--r--drivers/usb/core/hcd.c8
-rw-r--r--drivers/usb/core/hub.c201
-rw-r--r--drivers/usb/dwc2/core.c2
-rw-r--r--drivers/usb/dwc2/core.h2
-rw-r--r--drivers/usb/dwc2/debugfs.c2
-rw-r--r--drivers/usb/dwc3/Kconfig30
-rw-r--r--drivers/usb/dwc3/core.c37
-rw-r--r--drivers/usb/dwc3/debug.h4
-rw-r--r--drivers/usb/dwc3/debugfs.c2
-rw-r--r--drivers/usb/dwc3/dwc3-of-simple.c28
-rw-r--r--drivers/usb/gadget/composite.c6
-rw-r--r--drivers/usb/gadget/configfs.c1
-rw-r--r--drivers/usb/gadget/function/f_acm.c21
-rw-r--r--drivers/usb/gadget/function/f_fs.c12
-rw-r--r--drivers/usb/gadget/function/f_obex.c2
-rw-r--r--drivers/usb/gadget/function/f_serial.c21
-rw-r--r--drivers/usb/gadget/function/f_tcm.c13
-rw-r--r--drivers/usb/gadget/function/u_audio.c2
-rw-r--r--drivers/usb/gadget/function/u_serial.c516
-rw-r--r--drivers/usb/gadget/function/u_serial.h8
-rw-r--r--drivers/usb/gadget/legacy/Kconfig26
-rw-r--r--drivers/usb/gadget/legacy/acm_ms.c3
-rw-r--r--drivers/usb/gadget/legacy/mass_storage.c3
-rw-r--r--drivers/usb/gadget/legacy/serial.c49
-rw-r--r--drivers/usb/gadget/udc/Kconfig19
-rw-r--r--drivers/usb/gadget/udc/Makefile1
-rw-r--r--drivers/usb/gadget/udc/at91_udc.c4
-rw-r--r--drivers/usb/gadget/udc/atmel_usba_udc.c3
-rw-r--r--drivers/usb/gadget/udc/bcm63xx_udc.c9
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_core.c4
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_udc.c2
-rw-r--r--drivers/usb/gadget/udc/dummy_hcd.c10
-rw-r--r--drivers/usb/gadget/udc/fsl_qe_udc.h4
-rw-r--r--drivers/usb/gadget/udc/fsl_udc_core.c12
-rw-r--r--drivers/usb/gadget/udc/gr_udc.c7
-rw-r--r--drivers/usb/gadget/udc/lpc32xx_udc.c6
-rw-r--r--drivers/usb/gadget/udc/mv_u3d.h2
-rw-r--r--drivers/usb/gadget/udc/pch_udc.c1
-rw-r--r--drivers/usb/gadget/udc/pxa25x_udc.c4
-rw-r--r--drivers/usb/gadget/udc/pxa27x_udc.c6
-rw-r--r--drivers/usb/gadget/udc/r8a66597-udc.c5
-rw-r--r--drivers/usb/gadget/udc/renesas_usb3.c21
-rw-r--r--drivers/usb/gadget/udc/s3c-hsudc.c5
-rw-r--r--drivers/usb/gadget/udc/s3c2410_udc.c3
-rw-r--r--drivers/usb/gadget/udc/tegra-xudc.c3810
-rw-r--r--drivers/usb/host/Kconfig106
-rw-r--r--drivers/usb/host/bcma-hcd.c5
-rw-r--r--drivers/usb/host/fotg210-hcd.c8
-rw-r--r--drivers/usb/host/imx21-dbg.c2
-rw-r--r--drivers/usb/host/isp1362-hcd.c5
-rw-r--r--drivers/usb/host/ohci-at91.c8
-rw-r--r--drivers/usb/host/ohci-nxp.c2
-rw-r--r--drivers/usb/host/oxu210hp-hcd.c14
-rw-r--r--drivers/usb/host/pci-quirks.c2
-rw-r--r--drivers/usb/host/u132-hcd.c2
-rw-r--r--drivers/usb/host/xhci-pci.c4
-rw-r--r--drivers/usb/host/xhci-ring.c68
-rw-r--r--drivers/usb/host/xhci-tegra.c126
-rw-r--r--drivers/usb/host/xhci-trace.h26
-rw-r--r--drivers/usb/host/xhci.c3
-rw-r--r--drivers/usb/host/xhci.h29
-rw-r--r--drivers/usb/image/microtek.c3
-rw-r--r--drivers/usb/isp1760/isp1760-hcd.c2
-rw-r--r--drivers/usb/misc/Kconfig22
-rw-r--r--drivers/usb/misc/appledisplay.c8
-rw-r--r--drivers/usb/misc/chaoskey.c24
-rw-r--r--drivers/usb/misc/ftdi-elan.c6
-rw-r--r--drivers/usb/misc/idmouse.c36
-rw-r--r--drivers/usb/misc/legousbtower.c303
-rw-r--r--drivers/usb/misc/sisusbvga/Kconfig2
-rw-r--r--drivers/usb/misc/usb251xb.c66
-rw-r--r--drivers/usb/mtu3/mtu3_gadget_ep0.c35
-rw-r--r--drivers/usb/musb/musb_core.c4
-rw-r--r--drivers/usb/musb/musb_debugfs.c2
-rw-r--r--drivers/usb/musb/musb_dsps.c2
-rw-r--r--drivers/usb/musb/musb_gadget.c5
-rw-r--r--drivers/usb/phy/phy-keystone.c4
-rw-r--r--drivers/usb/phy/phy-mxs-usb.c4
-rw-r--r--drivers/usb/renesas_usbhs/common.c5
-rw-r--r--drivers/usb/renesas_usbhs/common.h3
-rw-r--r--drivers/usb/renesas_usbhs/fifo.c4
-rw-r--r--drivers/usb/renesas_usbhs/mod.c19
-rw-r--r--drivers/usb/renesas_usbhs/mod_gadget.c12
-rw-r--r--drivers/usb/roles/class.c21
-rw-r--r--drivers/usb/serial/Kconfig48
-rw-r--r--drivers/usb/serial/ch341.c97
-rw-r--r--drivers/usb/serial/cp210x.c1
-rw-r--r--drivers/usb/serial/ftdi_sio.c3
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h7
-rw-r--r--drivers/usb/serial/mos7720.c4
-rw-r--r--drivers/usb/serial/mos7840.c762
-rw-r--r--drivers/usb/serial/option.c7
-rw-r--r--drivers/usb/serial/pl2303.c124
-rw-r--r--drivers/usb/serial/pl2303.h6
-rw-r--r--drivers/usb/storage/ene_ub6250.c2
-rw-r--r--drivers/usb/storage/scsiglue.c4
-rw-r--r--drivers/usb/storage/transport.c3
-rw-r--r--drivers/usb/storage/uas.c11
-rw-r--r--drivers/usb/storage/unusual_uas.h7
-rw-r--r--drivers/usb/typec/Kconfig11
-rw-r--r--drivers/usb/typec/Makefile1
-rw-r--r--drivers/usb/typec/class.c42
-rw-r--r--drivers/usb/typec/hd3ss3220.c269
-rw-r--r--drivers/usb/typec/tcpm/tcpm.c135
-rw-r--r--drivers/usb/typec/tps6598x.c49
-rw-r--r--drivers/usb/typec/ucsi/displayport.c40
-rw-r--r--drivers/usb/typec/ucsi/trace.c11
-rw-r--r--drivers/usb/typec/ucsi/trace.h79
-rw-r--r--drivers/usb/typec/ucsi/ucsi.c609
-rw-r--r--drivers/usb/typec/ucsi/ucsi.h417
-rw-r--r--drivers/usb/typec/ucsi/ucsi_acpi.c91
-rw-r--r--drivers/usb/typec/ucsi/ucsi_ccg.c166
-rw-r--r--drivers/usb/usbip/Kconfig1
-rw-r--r--drivers/usb/usbip/stub_rx.c50
-rw-r--r--drivers/usb/usbip/stub_tx.c2
-rw-r--r--drivers/vfio/pci/vfio_pci.c11
-rw-r--r--drivers/vfio/pci/vfio_pci_config.c32
-rw-r--r--drivers/vfio/pci/vfio_pci_private.h4
-rw-r--r--drivers/vfio/vfio.c39
-rw-r--r--drivers/vhost/net.c12
-rw-r--r--drivers/vhost/scsi.c12
-rw-r--r--drivers/vhost/test.c12
-rw-r--r--drivers/vhost/vsock.c12
-rw-r--r--drivers/video/backlight/Kconfig12
-rw-r--r--drivers/video/backlight/Makefile2
-rw-r--r--drivers/video/backlight/gpio_backlight.c128
-rw-r--r--drivers/video/backlight/ipaq_micro_bl.c2
-rw-r--r--drivers/video/backlight/lm3630a_bl.c13
-rw-r--r--drivers/video/backlight/pm8941-wled.c424
-rw-r--r--drivers/video/backlight/pwm_bl.c39
-rw-r--r--drivers/video/backlight/qcom-wled.c1296
-rw-r--r--drivers/video/backlight/tosa_bl.c10
-rw-r--r--drivers/video/backlight/tosa_bl.h8
-rw-r--r--drivers/video/backlight/tosa_lcd.c28
-rw-r--r--drivers/video/fbdev/Kconfig1
-rw-r--r--drivers/video/fbdev/aty/atyfb_base.c12
-rw-r--r--drivers/video/fbdev/aty/radeon_pm.c2
-rw-r--r--drivers/video/fbdev/core/fbmem.c21
-rw-r--r--drivers/video/fbdev/efifb.c2
-rw-r--r--drivers/video/fbdev/hyperv_fb.c428
-rw-r--r--drivers/video/fbdev/matrox/i2c-matroxfb.c4
-rw-r--r--drivers/video/fbdev/sa1100fb.c13
-rw-r--r--drivers/video/hdmi.c8
-rw-r--r--drivers/video/logo/.gitignore1
-rw-r--r--drivers/video/logo/Makefile15
-rw-r--r--drivers/video/logo/pnmtologo.c (renamed from scripts/pnmtologo.c)0
-rw-r--r--drivers/virt/fsl_hypervisor.c2
-rw-r--r--drivers/w1/masters/sgi_w1.c4
-rw-r--r--drivers/w1/slaves/Kconfig8
-rw-r--r--drivers/w1/slaves/Makefile1
-rw-r--r--drivers/w1/slaves/w1_ds2430.c295
-rw-r--r--drivers/watchdog/Kconfig5
-rw-r--r--drivers/watchdog/acquirewdt.c1
-rw-r--r--drivers/watchdog/advantechwdt.c1
-rw-r--r--drivers/watchdog/alim1535_wdt.c1
-rw-r--r--drivers/watchdog/alim7101_wdt.c1
-rw-r--r--drivers/watchdog/ar7_wdt.c1
-rw-r--r--drivers/watchdog/aspeed_wdt.c16
-rw-r--r--drivers/watchdog/at91rm9200_wdt.c1
-rw-r--r--drivers/watchdog/at91sam9_wdt.h34
-rw-r--r--drivers/watchdog/ath79_wdt.c1
-rw-r--r--drivers/watchdog/bcm63xx_wdt.c1
-rw-r--r--drivers/watchdog/bd70528_wdt.c4
-rw-r--r--drivers/watchdog/cadence_wdt.c6
-rw-r--r--drivers/watchdog/cpu5wdt.c1
-rw-r--r--drivers/watchdog/eurotechwdt.c1
-rw-r--r--drivers/watchdog/f71808e_wdt.c1
-rw-r--r--drivers/watchdog/gef_wdt.c1
-rw-r--r--drivers/watchdog/geodewdt.c1
-rw-r--r--drivers/watchdog/ib700wdt.c1
-rw-r--r--drivers/watchdog/ibmasr.c1
-rw-r--r--drivers/watchdog/imx2_wdt.c30
-rw-r--r--drivers/watchdog/imx7ulp_wdt.c45
-rw-r--r--drivers/watchdog/indydog.c1
-rw-r--r--drivers/watchdog/intel-mid_wdt.c1
-rw-r--r--drivers/watchdog/intel_scu_watchdog.c1
-rw-r--r--drivers/watchdog/iop_wdt.c1
-rw-r--r--drivers/watchdog/it8712f_wdt.c1
-rw-r--r--drivers/watchdog/ixp4xx_wdt.c1
-rw-r--r--drivers/watchdog/jz4740_wdt.c108
-rw-r--r--drivers/watchdog/m54xx_wdt.c1
-rw-r--r--drivers/watchdog/machzwd.c1
-rw-r--r--drivers/watchdog/menz69_wdt.c1
-rw-r--r--drivers/watchdog/mixcomwd.c1
-rw-r--r--drivers/watchdog/mtx-1_wdt.c1
-rw-r--r--drivers/watchdog/mv64x60_wdt.c1
-rw-r--r--drivers/watchdog/nv_tco.c1
-rw-r--r--drivers/watchdog/pc87413_wdt.c1
-rw-r--r--drivers/watchdog/pcwd.c1
-rw-r--r--drivers/watchdog/pcwd_pci.c1
-rw-r--r--drivers/watchdog/pcwd_usb.c1
-rw-r--r--drivers/watchdog/pika_wdt.c1
-rw-r--r--drivers/watchdog/pnx833x_wdt.c1
-rw-r--r--drivers/watchdog/rc32434_wdt.c1
-rw-r--r--drivers/watchdog/rdc321x_wdt.c1
-rw-r--r--drivers/watchdog/riowd.c1
-rw-r--r--drivers/watchdog/sa1100_wdt.c1
-rw-r--r--drivers/watchdog/sb_wdog.c1
-rw-r--r--drivers/watchdog/sbc60xxwdt.c1
-rw-r--r--drivers/watchdog/sbc7240_wdt.c4
-rw-r--r--drivers/watchdog/sbc_epx_c3.c1
-rw-r--r--drivers/watchdog/sbc_fitpc2_wdt.c1
-rw-r--r--drivers/watchdog/sc1200wdt.c1
-rw-r--r--drivers/watchdog/sc520_wdt.c1
-rw-r--r--drivers/watchdog/sch311x_wdt.c1
-rw-r--r--drivers/watchdog/scx200_wdt.c1
-rw-r--r--drivers/watchdog/smsc37b787_wdt.c1
-rw-r--r--drivers/watchdog/sprd_wdt.c6
-rw-r--r--drivers/watchdog/w83627hf_wdt.c11
-rw-r--r--drivers/watchdog/w83877f_wdt.c1
-rw-r--r--drivers/watchdog/w83977f_wdt.c1
-rw-r--r--drivers/watchdog/wafer5823wdt.c1
-rw-r--r--drivers/watchdog/watchdog_dev.c102
-rw-r--r--drivers/watchdog/wdat_wdt.c2
-rw-r--r--drivers/watchdog/wdrtas.c1
-rw-r--r--drivers/watchdog/wdt.c1
-rw-r--r--drivers/watchdog/wdt285.c1
-rw-r--r--drivers/watchdog/wdt977.c1
-rw-r--r--drivers/watchdog/wdt_pci.c1
-rw-r--r--drivers/xen/balloon.c1
-rw-r--r--drivers/xen/gntdev-common.h8
-rw-r--r--drivers/xen/gntdev.c179
-rw-r--r--drivers/xen/platform-pci.c14
-rw-r--r--drivers/xen/swiotlb-xen.c12
-rw-r--r--fs/afs/cmservice.c6
-rw-r--r--fs/afs/dir_edit.c12
-rw-r--r--fs/afs/file.c6
-rw-r--r--fs/afs/fsclient.c16
-rw-r--r--fs/afs/internal.h16
-rw-r--r--fs/afs/rxrpc.c12
-rw-r--r--fs/afs/server.c3
-rw-r--r--fs/afs/vlclient.c6
-rw-r--r--fs/afs/xattr.c16
-rw-r--r--fs/afs/yfsclient.c11
-rw-r--r--fs/aio.c2
-rw-r--r--fs/binfmt_elf.c12
-rw-r--r--fs/binfmt_elf_fdpic.c12
-rw-r--r--fs/btrfs/super.c2
-rw-r--r--fs/buffer.c54
-rw-r--r--fs/ceph/dir.c1
-rw-r--r--fs/ceph/file.c2
-rw-r--r--fs/cifs/cifs_debug.c43
-rw-r--r--fs/cifs/cifs_spnego.c2
-rw-r--r--fs/cifs/cifsacl.c2
-rw-r--r--fs/cifs/cifsfs.c46
-rw-r--r--fs/cifs/cifsfs.h3
-rw-r--r--fs/cifs/cifsglob.h90
-rw-r--r--fs/cifs/cifsproto.h8
-rw-r--r--fs/cifs/connect.c191
-rw-r--r--fs/cifs/dfs_cache.c3
-rw-r--r--fs/cifs/dir.c6
-rw-r--r--fs/cifs/file.c159
-rw-r--r--fs/cifs/inode.c333
-rw-r--r--fs/cifs/misc.c17
-rw-r--r--fs/cifs/sess.c230
-rw-r--r--fs/cifs/smb1ops.c8
-rw-r--r--fs/cifs/smb2misc.c175
-rw-r--r--fs/cifs/smb2ops.c141
-rw-r--r--fs/cifs/smb2pdu.c168
-rw-r--r--fs/cifs/smb2pdu.h2
-rw-r--r--fs/cifs/smb2proto.h6
-rw-r--r--fs/cifs/smb2transport.c165
-rw-r--r--fs/cifs/smbdirect.c36
-rw-r--r--fs/cifs/transport.c37
-rw-r--r--fs/compat_binfmt_elf.c4
-rw-r--r--fs/compat_ioctl.c931
-rw-r--r--fs/dax.c13
-rw-r--r--fs/debugfs/file.c87
-rw-r--r--fs/direct-io.c21
-rw-r--r--fs/ecryptfs/file.c1
-rw-r--r--fs/erofs/Kconfig1
-rw-r--r--fs/erofs/decompressor.c2
-rw-r--r--fs/erofs/erofs_fs.h3
-rw-r--r--fs/erofs/internal.h7
-rw-r--r--fs/erofs/super.c39
-rw-r--r--fs/erofs/utils.c17
-rw-r--r--fs/erofs/zdata.c288
-rw-r--r--fs/erofs/zdata.h8
-rw-r--r--fs/erofs/zmap.c28
-rw-r--r--fs/exec.c3
-rw-r--r--fs/ext2/balloc.c75
-rw-r--r--fs/ext2/ext2.h12
-rw-r--r--fs/ext2/inode.c9
-rw-r--r--fs/ext2/ioctl.c5
-rw-r--r--fs/ext2/super.c13
-rw-r--r--fs/ext4/ext4.h22
-rw-r--r--fs/ext4/ext4_jbd2.c32
-rw-r--r--fs/ext4/ext4_jbd2.h106
-rw-r--r--fs/ext4/extents.c149
-rw-r--r--fs/ext4/file.c412
-rw-r--r--fs/ext4/fsync.c72
-rw-r--r--fs/ext4/ialloc.c7
-rw-r--r--fs/ext4/indirect.c125
-rw-r--r--fs/ext4/inode.c926
-rw-r--r--fs/ext4/ioctl.c1
-rw-r--r--fs/ext4/migrate.c103
-rw-r--r--fs/ext4/namei.c50
-rw-r--r--fs/ext4/page-io.c167
-rw-r--r--fs/ext4/readpage.c6
-rw-r--r--fs/ext4/resize.c46
-rw-r--r--fs/ext4/super.c59
-rw-r--r--fs/ext4/xattr.c94
-rw-r--r--fs/f2fs/checkpoint.c2
-rw-r--r--fs/f2fs/data.c190
-rw-r--r--fs/f2fs/dir.c7
-rw-r--r--fs/f2fs/f2fs.h63
-rw-r--r--fs/f2fs/file.c48
-rw-r--r--fs/f2fs/gc.c46
-rw-r--r--fs/f2fs/inode.c8
-rw-r--r--fs/f2fs/namei.c15
-rw-r--r--fs/f2fs/node.c3
-rw-r--r--fs/f2fs/recovery.c2
-rw-r--r--fs/f2fs/segment.c64
-rw-r--r--fs/f2fs/segment.h2
-rw-r--r--fs/f2fs/super.c52
-rw-r--r--fs/f2fs/sysfs.c4
-rw-r--r--fs/f2fs/xattr.c14
-rw-r--r--fs/fat/file.c13
-rw-r--r--fs/fuse/dev.c33
-rw-r--r--fs/gfs2/bmap.c3
-rw-r--r--fs/gfs2/file.c36
-rw-r--r--fs/hpfs/dir.c1
-rw-r--r--fs/hpfs/file.c1
-rw-r--r--fs/hugetlbfs/inode.c63
-rw-r--r--fs/io-wq.c187
-rw-r--r--fs/io-wq.h63
-rw-r--r--fs/io_uring.c834
-rw-r--r--fs/ioctl.c92
-rw-r--r--fs/iomap/Makefile16
-rw-r--r--fs/iomap/apply.c32
-rw-r--r--fs/iomap/buffered-io.c756
-rw-r--r--fs/iomap/direct-io.c63
-rw-r--r--fs/iomap/fiemap.c10
-rw-r--r--fs/iomap/seek.c4
-rw-r--r--fs/iomap/swapfile.c3
-rw-r--r--fs/iomap/trace.c12
-rw-r--r--fs/iomap/trace.h191
-rw-r--r--fs/jbd2/checkpoint.c2
-rw-r--r--fs/jbd2/commit.c26
-rw-r--r--fs/jbd2/journal.c65
-rw-r--r--fs/jbd2/revoke.c6
-rw-r--r--fs/jbd2/transaction.c400
-rw-r--r--fs/jffs2/nodelist.c2
-rw-r--r--fs/namei.c8
-rw-r--r--fs/nilfs2/ioctl.c1
-rw-r--r--fs/notify/fanotify/fanotify_user.c2
-rw-r--r--fs/notify/fdinfo.c2
-rw-r--r--fs/notify/fsnotify.c2
-rw-r--r--fs/notify/fsnotify.h2
-rw-r--r--fs/ocfs2/acl.c4
-rw-r--r--fs/ocfs2/alloc.c32
-rw-r--r--fs/ocfs2/aops.c1
-rw-r--r--fs/ocfs2/ioctl.c1
-rw-r--r--fs/ocfs2/journal.c8
-rw-r--r--fs/ocfs2/quota_global.c2
-rw-r--r--fs/ocfs2/suballoc.c19
-rw-r--r--fs/ocfs2/super.c4
-rw-r--r--fs/pipe.c232
-rw-r--r--fs/proc/array.c2
-rw-r--r--fs/pstore/platform.c2
-rw-r--r--fs/quota/dquot.c289
-rw-r--r--fs/quota/quota.c7
-rw-r--r--fs/quota/quota_v1.c1
-rw-r--r--fs/reiserfs/file.c10
-rw-r--r--fs/reiserfs/inode.c12
-rw-r--r--fs/reiserfs/namei.c7
-rw-r--r--fs/reiserfs/reiserfs.h2
-rw-r--r--fs/reiserfs/super.c2
-rw-r--r--fs/reiserfs/xattr.c19
-rw-r--r--fs/reiserfs/xattr_acl.c4
-rw-r--r--fs/select.c10
-rw-r--r--fs/splice.c199
-rw-r--r--fs/timerfd.c14
-rw-r--r--fs/ubifs/debug.c12
-rw-r--r--fs/ubifs/journal.c4
-rw-r--r--fs/ubifs/orphan.c17
-rw-r--r--fs/ubifs/sb.c2
-rw-r--r--fs/ubifs/super.c4
-rw-r--r--fs/ubifs/tnc_commit.c34
-rw-r--r--fs/userfaultfd.c23
-rw-r--r--fs/utimes.c8
-rw-r--r--fs/xfs/Makefile1
-rw-r--r--fs/xfs/kmem.c2
-rw-r--r--fs/xfs/kmem.h30
-rw-r--r--fs/xfs/libxfs/xfs_ag_resv.c2
-rw-r--r--fs/xfs/libxfs/xfs_alloc.c1236
-rw-r--r--fs/xfs/libxfs/xfs_alloc.h16
-rw-r--r--fs/xfs/libxfs/xfs_alloc_btree.c1
-rw-r--r--fs/xfs/libxfs/xfs_attr.c24
-rw-r--r--fs/xfs/libxfs/xfs_attr_leaf.c134
-rw-r--r--fs/xfs/libxfs/xfs_attr_leaf.h30
-rw-r--r--fs/xfs/libxfs/xfs_attr_remote.c1
-rw-r--r--fs/xfs/libxfs/xfs_bit.c1
-rw-r--r--fs/xfs/libxfs/xfs_bmap.c700
-rw-r--r--fs/xfs/libxfs/xfs_bmap.h3
-rw-r--r--fs/xfs/libxfs/xfs_btree.c97
-rw-r--r--fs/xfs/libxfs/xfs_btree.h37
-rw-r--r--fs/xfs/libxfs/xfs_da_btree.c668
-rw-r--r--fs/xfs/libxfs/xfs_da_btree.h73
-rw-r--r--fs/xfs/libxfs/xfs_da_format.c888
-rw-r--r--fs/xfs/libxfs/xfs_da_format.h59
-rw-r--r--fs/xfs/libxfs/xfs_dir2.c72
-rw-r--r--fs/xfs/libxfs/xfs_dir2.h90
-rw-r--r--fs/xfs/libxfs/xfs_dir2_block.c131
-rw-r--r--fs/xfs/libxfs/xfs_dir2_data.c282
-rw-r--r--fs/xfs/libxfs/xfs_dir2_leaf.c307
-rw-r--r--fs/xfs/libxfs/xfs_dir2_node.c431
-rw-r--r--fs/xfs/libxfs/xfs_dir2_priv.h114
-rw-r--r--fs/xfs/libxfs/xfs_dir2_sf.c424
-rw-r--r--fs/xfs/libxfs/xfs_dquot_buf.c8
-rw-r--r--fs/xfs/libxfs/xfs_format.h14
-rw-r--r--fs/xfs/libxfs/xfs_fs.h4
-rw-r--r--fs/xfs/libxfs/xfs_ialloc.c117
-rw-r--r--fs/xfs/libxfs/xfs_iext_tree.c2
-rw-r--r--fs/xfs/libxfs/xfs_inode_buf.c21
-rw-r--r--fs/xfs/libxfs/xfs_inode_buf.h5
-rw-r--r--fs/xfs/libxfs/xfs_inode_fork.c22
-rw-r--r--fs/xfs/libxfs/xfs_inode_fork.h18
-rw-r--r--fs/xfs/libxfs/xfs_log_format.h4
-rw-r--r--fs/xfs/libxfs/xfs_log_recover.h4
-rw-r--r--fs/xfs/libxfs/xfs_refcount.c174
-rw-r--r--fs/xfs/libxfs/xfs_rmap.c377
-rw-r--r--fs/xfs/libxfs/xfs_rtbitmap.c4
-rw-r--r--fs/xfs/libxfs/xfs_sb.c1
-rw-r--r--fs/xfs/libxfs/xfs_trans_inode.c8
-rw-r--r--fs/xfs/libxfs/xfs_trans_resv.c6
-rw-r--r--fs/xfs/libxfs/xfs_types.h2
-rw-r--r--fs/xfs/scrub/attr.c11
-rw-r--r--fs/xfs/scrub/bitmap.c3
-rw-r--r--fs/xfs/scrub/common.h9
-rw-r--r--fs/xfs/scrub/dabtree.c62
-rw-r--r--fs/xfs/scrub/dabtree.h3
-rw-r--r--fs/xfs/scrub/dir.c132
-rw-r--r--fs/xfs/scrub/fscounters.c8
-rw-r--r--fs/xfs/scrub/health.c1
-rw-r--r--fs/xfs/scrub/parent.c27
-rw-r--r--fs/xfs/scrub/quota.c7
-rw-r--r--fs/xfs/scrub/scrub.c1
-rw-r--r--fs/xfs/xfs_acl.c18
-rw-r--r--fs/xfs/xfs_aops.c791
-rw-r--r--fs/xfs/xfs_aops.h20
-rw-r--r--fs/xfs/xfs_attr_inactive.c76
-rw-r--r--fs/xfs/xfs_attr_list.c75
-rw-r--r--fs/xfs/xfs_bmap_item.c11
-rw-r--r--fs/xfs/xfs_bmap_util.c255
-rw-r--r--fs/xfs/xfs_bmap_util.h4
-rw-r--r--fs/xfs/xfs_buf.c32
-rw-r--r--fs/xfs/xfs_buf.h1
-rw-r--r--fs/xfs/xfs_buf_item.c6
-rw-r--r--fs/xfs/xfs_dir2_readdir.c137
-rw-r--r--fs/xfs/xfs_discard.c6
-rw-r--r--fs/xfs/xfs_dquot.c46
-rw-r--r--fs/xfs/xfs_dquot.h98
-rw-r--r--fs/xfs/xfs_dquot_item.h34
-rw-r--r--fs/xfs/xfs_error.c31
-rw-r--r--fs/xfs/xfs_error.h33
-rw-r--r--fs/xfs/xfs_extent_busy.c2
-rw-r--r--fs/xfs/xfs_extfree_item.c9
-rw-r--r--fs/xfs/xfs_file.c113
-rw-r--r--fs/xfs/xfs_filestream.c3
-rw-r--r--fs/xfs/xfs_fsmap.c1
-rw-r--r--fs/xfs/xfs_icache.c8
-rw-r--r--fs/xfs/xfs_icreate_item.c2
-rw-r--r--fs/xfs/xfs_inode.c48
-rw-r--r--fs/xfs/xfs_inode.h31
-rw-r--r--fs/xfs/xfs_inode_item.c15
-rw-r--r--fs/xfs/xfs_ioctl.c203
-rw-r--r--fs/xfs/xfs_ioctl.h7
-rw-r--r--fs/xfs/xfs_ioctl32.c49
-rw-r--r--fs/xfs/xfs_ioctl32.h13
-rw-r--r--fs/xfs/xfs_iomap.c865
-rw-r--r--fs/xfs/xfs_iomap.h13
-rw-r--r--fs/xfs/xfs_iops.c70
-rw-r--r--fs/xfs/xfs_itable.c6
-rw-r--r--fs/xfs/xfs_iwalk.c3
-rw-r--r--fs/xfs/xfs_linux.h14
-rw-r--r--fs/xfs/xfs_log.c434
-rw-r--r--fs/xfs/xfs_log_cil.c6
-rw-r--r--fs/xfs/xfs_log_priv.h33
-rw-r--r--fs/xfs/xfs_log_recover.c148
-rw-r--r--fs/xfs/xfs_message.c22
-rw-r--r--fs/xfs/xfs_message.h6
-rw-r--r--fs/xfs/xfs_mount.c58
-rw-r--r--fs/xfs/xfs_mount.h57
-rw-r--r--fs/xfs/xfs_pnfs.c58
-rw-r--r--fs/xfs/xfs_qm.c67
-rw-r--r--fs/xfs/xfs_qm.h6
-rw-r--r--fs/xfs/xfs_qm_bhv.c8
-rw-r--r--fs/xfs/xfs_qm_syscalls.c139
-rw-r--r--fs/xfs/xfs_quotaops.c3
-rw-r--r--fs/xfs/xfs_refcount_item.c9
-rw-r--r--fs/xfs/xfs_reflink.c138
-rw-r--r--fs/xfs/xfs_reflink.h4
-rw-r--r--fs/xfs/xfs_rmap_item.c13
-rw-r--r--fs/xfs/xfs_rtalloc.c3
-rw-r--r--fs/xfs/xfs_super.c1471
-rw-r--r--fs/xfs/xfs_super.h10
-rw-r--r--fs/xfs/xfs_symlink.c1
-rw-r--r--fs/xfs/xfs_symlink.h2
-rw-r--r--fs/xfs/xfs_trace.h100
-rw-r--r--fs/xfs/xfs_trans.c2
-rw-r--r--fs/xfs/xfs_trans_ail.c10
-rw-r--r--fs/xfs/xfs_trans_dquot.c56
-rw-r--r--fs/xfs/xfs_xattr.c1
-rw-r--r--include/Kbuild1187
-rw-r--r--include/acpi/acpi_bus.h8
-rw-r--r--include/acpi/acpixf.h8
-rw-r--r--include/acpi/button.h12
-rw-r--r--include/asm-generic/4level-fixup.h1
-rw-r--r--include/asm-generic/5level-fixup.h1
-rw-r--r--include/asm-generic/Kbuild1
-rw-r--r--include/asm-generic/export.h3
-rw-r--r--include/asm-generic/io.h89
-rw-r--r--include/asm-generic/mshyperv.h2
-rw-r--r--include/asm-generic/pgtable-nop4d.h2
-rw-r--r--include/asm-generic/pgtable-nopmd.h2
-rw-r--r--include/asm-generic/pgtable-nopud.h2
-rw-r--r--include/asm-generic/pgtable.h51
-rw-r--r--include/asm-generic/tlb.h6
-rw-r--r--include/asm-generic/vmlinux.lds.h13
-rw-r--r--include/drm/amd_asic_type.h56
-rw-r--r--include/drm/bridge/dw_hdmi.h6
-rw-r--r--include/drm/drmP.h103
-rw-r--r--include/drm/drm_bridge.h33
-rw-r--r--include/drm/drm_connector.h25
-rw-r--r--include/drm/drm_crtc.h1
-rw-r--r--include/drm/drm_dp_helper.h140
-rw-r--r--include/drm/drm_dp_mst_helper.h172
-rw-r--r--include/drm/drm_drv.h2
-rw-r--r--include/drm/drm_edid.h5
-rw-r--r--include/drm/drm_encoder.h6
-rw-r--r--include/drm/drm_fb_helper.h7
-rw-r--r--include/drm/drm_gem.h15
-rw-r--r--include/drm/drm_gem_shmem_helper.h30
-rw-r--r--include/drm/drm_gem_ttm_helper.h21
-rw-r--r--include/drm/drm_gem_vram_helper.h107
-rw-r--r--include/drm/drm_mm.h7
-rw-r--r--include/drm/drm_modeset_helper_vtables.h7
-rw-r--r--include/drm/drm_modeset_lock.h9
-rw-r--r--include/drm/drm_os_linux.h55
-rw-r--r--include/drm/drm_panel.h13
-rw-r--r--include/drm/drm_plane.h31
-rw-r--r--include/drm/drm_prime.h2
-rw-r--r--include/drm/drm_print.h26
-rw-r--r--include/drm/drm_rect.h31
-rw-r--r--include/drm/drm_simple_kms_helper.h2
-rw-r--r--include/drm/drm_vblank.h15
-rw-r--r--include/drm/drm_vram_mm_helper.h104
-rw-r--r--include/drm/gpu_scheduler.h3
-rw-r--r--include/drm/i915_drm.h18
-rw-r--r--include/drm/i915_mei_hdcp_interface.h42
-rw-r--r--include/drm/ttm/ttm_bo_api.h80
-rw-r--r--include/drm/ttm/ttm_bo_driver.h32
-rw-r--r--include/drm/ttm/ttm_execbuf_util.h2
-rw-r--r--include/drm/ttm/ttm_memory.h1
-rw-r--r--include/drm/ttm/ttm_page_alloc.h2
-rw-r--r--include/dt-bindings/clock/aspeed-clock.h2
-rw-r--r--include/dt-bindings/clock/ast2600-clock.h4
-rw-r--r--include/dt-bindings/clock/axg-audio-clkc.h10
-rw-r--r--include/dt-bindings/clock/bm1880-clock.h82
-rw-r--r--include/dt-bindings/clock/imx7ulp-clock.h1
-rw-r--r--include/dt-bindings/clock/imx8mm-clock.h19
-rw-r--r--include/dt-bindings/clock/imx8mn-clock.h19
-rw-r--r--include/dt-bindings/clock/imx8mq-clock.h24
-rw-r--r--include/dt-bindings/clock/omap5.h4
-rw-r--r--include/dt-bindings/clock/px30-cru.h2
-rw-r--r--include/dt-bindings/clock/qcom,gcc-msm8998.h6
-rw-r--r--include/dt-bindings/clock/qcom,gcc-sc7180.h155
-rw-r--r--include/dt-bindings/clock/qcom,q6sstopcc-qcs404.h18
-rw-r--r--include/dt-bindings/clock/r8a774b1-cpg-mssr.h57
-rw-r--r--include/dt-bindings/clock/r8a77961-cpg-mssr.h65
-rw-r--r--include/dt-bindings/clock/sun8i-h3-ccu.h2
-rw-r--r--include/dt-bindings/clock/tegra124-car-common.h3
-rw-r--r--include/dt-bindings/clock/tegra210-car.h6
-rw-r--r--include/dt-bindings/clock/x1000-cgu.h44
-rw-r--r--include/dt-bindings/dma/x1000-dma.h40
-rw-r--r--include/dt-bindings/gpio/meson-a1-gpio.h73
-rw-r--r--include/dt-bindings/iio/adc/ingenic,adc.h1
-rw-r--r--include/dt-bindings/interconnect/qcom,msm8974.h146
-rw-r--r--include/dt-bindings/pinctrl/at91.h4
-rw-r--r--include/dt-bindings/pmu/exynos_ppmu.h25
-rw-r--r--include/dt-bindings/power/r8a774b1-sysc.h26
-rw-r--r--include/dt-bindings/power/r8a77961-sysc.h32
-rw-r--r--include/dt-bindings/reset/amlogic,meson-g12a-audio-reset.h15
-rw-r--r--include/dt-bindings/sound/samsung-i2s.h12
-rw-r--r--include/keys/system_keyring.h6
-rw-r--r--include/linux/acpi.h8
-rw-r--r--include/linux/aer.h4
-rw-r--r--include/linux/arch_topology.h1
-rw-r--r--include/linux/audit.h5
-rw-r--r--include/linux/blkdev.h2
-rw-r--r--include/linux/bsearch.h2
-rw-r--r--include/linux/clk-provider.h1
-rw-r--r--include/linux/clk/tegra.h24
-rw-r--r--include/linux/clk/ti.h3
-rw-r--r--include/linux/compat.h19
-rw-r--r--include/linux/coresight.h6
-rw-r--r--include/linux/counter.h76
-rw-r--r--include/linux/cpu.h7
-rw-r--r--include/linux/cpuidle.h27
-rw-r--r--include/linux/debugfs.h136
-rw-r--r--include/linux/device.h39
-rw-r--r--include/linux/device_cgroup.h19
-rw-r--r--include/linux/dma-buf.h63
-rw-r--r--include/linux/dma-direct.h35
-rw-r--r--include/linux/dma-mapping.h15
-rw-r--r--include/linux/dma-noncoherent.h22
-rw-r--r--include/linux/dma/sprd-dma.h4
-rw-r--r--include/linux/dmar.h2
-rw-r--r--include/linux/dmi.h4
-rw-r--r--include/linux/efi.h16
-rw-r--r--include/linux/export.h1
-rw-r--r--include/linux/falloc.h26
-rw-r--r--include/linux/fb.h2
-rw-r--r--include/linux/firmware/intel/stratix10-svc-client.h8
-rw-r--r--include/linux/firmware/xlnx-zynqmp.h13
-rw-r--r--include/linux/fs.h15
-rw-r--r--include/linux/ftrace.h112
-rw-r--r--include/linux/fwnode.h52
-rw-r--r--include/linux/gfp.h2
-rw-r--r--include/linux/gpio/driver.h8
-rw-r--r--include/linux/hmm.h190
-rw-r--r--include/linux/host1x.h26
-rw-r--r--include/linux/hrtimer.h14
-rw-r--r--include/linux/huge_mm.h2
-rw-r--r--include/linux/hugetlb.h140
-rw-r--r--include/linux/hyperv.h31
-rw-r--r--include/linux/i2c-pxa.h18
-rw-r--r--include/linux/i2c.h21
-rw-r--r--include/linux/iio/adc/ad_sigma_delta.h2
-rw-r--r--include/linux/iio/iio.h2
-rw-r--r--include/linux/iio/imu/adis.h6
-rw-r--r--include/linux/ima.h3
-rw-r--r--include/linux/input.h1
-rw-r--r--include/linux/interrupt.h16
-rw-r--r--include/linux/io-pgtable.h2
-rw-r--r--include/linux/io.h2
-rw-r--r--include/linux/ioasid.h76
-rw-r--r--include/linux/iomap.h129
-rw-r--r--include/linux/iommu.h65
-rw-r--r--include/linux/ioport.h1
-rw-r--r--include/linux/irq.h6
-rw-r--r--include/linux/irq_work.h10
-rw-r--r--include/linux/irqchip/arm-gic-v3.h4
-rw-r--r--include/linux/irqchip/ingenic.h14
-rw-r--r--include/linux/irqdomain.h1
-rw-r--r--include/linux/jbd2.h118
-rw-r--r--include/linux/journal-head.h21
-rw-r--r--include/linux/kasan.h31
-rw-r--r--include/linux/led-class-flash.h41
-rw-r--r--include/linux/leds.h100
-rw-r--r--include/linux/libfdt_env.h5
-rw-r--r--include/linux/libnvdimm.h7
-rw-r--r--include/linux/license.h1
-rw-r--r--include/linux/memblock.h3
-rw-r--r--include/linux/memcontrol.h49
-rw-r--r--include/linux/memory_hotplug.h11
-rw-r--r--include/linux/memregion.h23
-rw-r--r--include/linux/mfd/abx500/ab8500-gpadc.h75
-rw-r--r--include/linux/mfd/arizona/registers.h7
-rw-r--r--include/linux/mfd/core.h49
-rw-r--r--include/linux/mfd/db8500-prcmu.h4
-rw-r--r--include/linux/mfd/dbx500-prcmu.h7
-rw-r--r--include/linux/mfd/madera/core.h11
-rw-r--r--include/linux/mfd/max77620.h1
-rw-r--r--include/linux/mfd/mt6397/rtc.h71
-rw-r--r--include/linux/mfd/rk808.h2
-rw-r--r--include/linux/mfd/twl.h12
-rw-r--r--include/linux/miscdevice.h1
-rw-r--r--include/linux/mlx5/driver.h4
-rw-r--r--include/linux/mm.h47
-rw-r--r--include/linux/mmc/card.h3
-rw-r--r--include/linux/mmc/sdio_ids.h2
-rw-r--r--include/linux/mmu_notifier.h147
-rw-r--r--include/linux/mmzone.h34
-rw-r--r--include/linux/module.h7
-rw-r--r--include/linux/moduleloader.h2
-rw-r--r--include/linux/mtio.h60
-rw-r--r--include/linux/nd.h2
-rw-r--r--include/linux/netdevice.h4
-rw-r--r--include/linux/nvmem-consumer.h2
-rw-r--r--include/linux/of_address.h21
-rw-r--r--include/linux/of_pci.h5
-rw-r--r--include/linux/page-isolation.h4
-rw-r--r--include/linux/pagewalk.h9
-rw-r--r--include/linux/parport.h1
-rw-r--r--include/linux/pci-ats.h77
-rw-r--r--include/linux/pci-epc.h2
-rw-r--r--include/linux/pci.h61
-rw-r--r--include/linux/pci_ids.h1
-rw-r--r--include/linux/percpu-refcount.h16
-rw-r--r--include/linux/phy/phy.h3
-rw-r--r--include/linux/phy/tegra/xusb.h4
-rw-r--r--include/linux/pipe_fs_i.h64
-rw-r--r--include/linux/platform_data/cros_ec_commands.h285
-rw-r--r--include/linux/platform_data/cros_ec_proto.h138
-rw-r--r--include/linux/platform_data/cros_ec_sensorhub.h30
-rw-r--r--include/linux/platform_data/gpio_backlight.h3
-rw-r--r--include/linux/platform_data/hsmmc-omap.h3
-rw-r--r--include/linux/platform_data/i2c-pxa.h4
-rw-r--r--include/linux/platform_data/pixcir_i2c_ts.h64
-rw-r--r--include/linux/platform_data/st_sensors_pdata.h2
-rw-r--r--include/linux/platform_data/wilco-ec.h15
-rw-r--r--include/linux/platform_device.h70
-rw-r--r--include/linux/pm.h2
-rw-r--r--include/linux/pm_domain.h5
-rw-r--r--include/linux/pm_opp.h13
-rw-r--r--include/linux/power/smartreflex.h3
-rw-r--r--include/linux/property.h106
-rw-r--r--include/linux/qcom_scm.h2
-rw-r--r--include/linux/quota.h2
-rw-r--r--include/linux/quotaops.h14
-rw-r--r--include/linux/resource_ext.h12
-rw-r--r--include/linux/rtc.h5
-rw-r--r--include/linux/rtc/ds1685.h12
-rw-r--r--include/linux/rtsx_pci.h1
-rw-r--r--include/linux/sched.h2
-rw-r--r--include/linux/seccomp.h6
-rw-r--r--include/linux/security.h2
-rw-r--r--include/linux/seq_buf.h3
-rw-r--r--include/linux/skbuff.h7
-rw-r--r--include/linux/skmsg.h28
-rw-r--r--include/linux/slab.h20
-rw-r--r--include/linux/soc/qcom/irq.h34
-rw-r--r--include/linux/socket.h3
-rw-r--r--include/linux/sort.h8
-rw-r--r--include/linux/soundwire/sdw.h7
-rw-r--r--include/linux/string.h2
-rw-r--r--include/linux/swap.h2
-rw-r--r--include/linux/sys_soc.h1
-rw-r--r--include/linux/syscalls.h16
-rw-r--r--include/linux/time.h9
-rw-r--r--include/linux/time32.h2
-rw-r--r--include/linux/trace.h8
-rw-r--r--include/linux/trace_events.h8
-rw-r--r--include/linux/trace_seq.h4
-rw-r--r--include/linux/types.h7
-rw-r--r--include/linux/uio.h4
-rw-r--r--include/linux/usb/role.h3
-rw-r--r--include/linux/usb/tcpm.h41
-rw-r--r--include/linux/usb/typec.h41
-rw-r--r--include/linux/vmalloc.h12
-rw-r--r--include/linux/w1.h1
-rw-r--r--include/linux/wait.h11
-rw-r--r--include/media/cec-notifier.h7
-rw-r--r--include/media/cec-pin.h10
-rw-r--r--include/media/cec.h31
-rw-r--r--include/media/dvb-usb-ids.h1
-rw-r--r--include/media/hevc-ctrls.h212
-rw-r--r--include/media/i2c/smiapp.h1
-rw-r--r--include/media/rc-map.h24
-rw-r--r--include/media/v4l2-common.h33
-rw-r--r--include/media/v4l2-ctrls.h87
-rw-r--r--include/media/v4l2-device.h2
-rw-r--r--include/media/v4l2-mem2mem.h44
-rw-r--r--include/media/videobuf2-core.h3
-rw-r--r--include/media/videobuf2-v4l2.h5
-rw-r--r--include/net/ip.h12
-rw-r--r--include/net/tls.h3
-rw-r--r--include/rdma/ib_cm.h32
-rw-r--r--include/rdma/ib_mad.h40
-rw-r--r--include/rdma/ib_umem.h4
-rw-r--r--include/rdma/ib_umem_odp.h86
-rw-r--r--include/rdma/ib_verbs.h81
-rw-r--r--include/rdma/restrack.h5
-rw-r--r--include/scsi/iscsi_proto.h1
-rw-r--r--include/scsi/scsi_cmnd.h5
-rw-r--r--include/scsi/scsi_device.h5
-rw-r--r--include/scsi/scsi_host.h19
-rw-r--r--include/soc/mscc/ocelot.h9
-rw-r--r--include/sound/core.h1
-rw-r--r--include/sound/dmaengine_pcm.h5
-rw-r--r--include/sound/hda_codec.h1
-rw-r--r--include/sound/intel-dsp-config.h34
-rw-r--r--include/sound/memalloc.h2
-rw-r--r--include/sound/pcm.h20
-rw-r--r--include/sound/pxa2xx-lib.h26
-rw-r--r--include/sound/rt5682.h1
-rw-r--r--include/sound/simple_card_utils.h1
-rw-r--r--include/sound/soc-acpi-intel-match.h3
-rw-r--r--include/sound/soc-acpi.h4
-rw-r--r--include/sound/soc-component.h52
-rw-r--r--include/sound/soc-dpcm.h18
-rw-r--r--include/sound/soc.h38
-rw-r--r--include/sound/sof.h3
-rw-r--r--include/sound/sof/dai-imx.h34
-rw-r--r--include/sound/sof/dai.h2
-rw-r--r--include/sound/sof/header.h2
-rw-r--r--include/sound/sof/pm.h8
-rw-r--r--include/sound/sof/stream.h4
-rw-r--r--include/sound/timer.h6
-rw-r--r--include/sound/wm8904.h2
-rw-r--r--include/target/target_core_base.h1
-rw-r--r--include/trace/events/ext4.h13
-rw-r--r--include/trace/events/fsi.h6
-rw-r--r--include/trace/events/fsi_master_aspeed.h77
-rw-r--r--include/trace/events/io_uring.h16
-rw-r--r--include/trace/events/jbd2.h16
-rw-r--r--include/trace/events/kmem.h47
-rw-r--r--include/trace/events/timer.h16
-rw-r--r--include/trace/trace_events.h6
-rw-r--r--include/uapi/asm-generic/msgbuf.h12
-rw-r--r--include/uapi/asm-generic/posix_types.h1
-rw-r--r--include/uapi/asm-generic/sembuf.h7
-rw-r--r--include/uapi/asm-generic/shmbuf.h12
-rw-r--r--include/uapi/drm/amdgpu_drm.h2
-rw-r--r--include/uapi/drm/drm.h3
-rw-r--r--include/uapi/drm/drm_fourcc.h28
-rw-r--r--include/uapi/drm/exynos_drm.h2
-rw-r--r--include/uapi/drm/i915_drm.h128
-rw-r--r--include/uapi/drm/omap_drm.h18
-rw-r--r--include/uapi/drm/v3d_drm.h8
-rw-r--r--include/uapi/drm/vmwgfx_drm.h4
-rw-r--r--include/uapi/linux/audit.h1
-rw-r--r--include/uapi/linux/cec-funcs.h34
-rw-r--r--include/uapi/linux/cec.h133
-rw-r--r--include/uapi/linux/chio.h11
-rw-r--r--include/uapi/linux/cyclades.h6
-rw-r--r--include/uapi/linux/elfcore.h8
-rw-r--r--include/uapi/linux/errqueue.h7
-rw-r--r--include/uapi/linux/gpio.h24
-rw-r--r--include/uapi/linux/input-event-codes.h75
-rw-r--r--include/uapi/linux/io_uring.h1
-rw-r--r--include/uapi/linux/iommu.h169
-rw-r--r--include/uapi/linux/magic.h1
-rw-r--r--include/uapi/linux/msg.h6
-rw-r--r--include/uapi/linux/pci_regs.h3
-rw-r--r--include/uapi/linux/ppp-ioctl.h2
-rw-r--r--include/uapi/linux/ppp_defs.h18
-rw-r--r--include/uapi/linux/resource.h4
-rw-r--r--include/uapi/linux/seccomp.h29
-rw-r--r--include/uapi/linux/sem.h4
-rw-r--r--include/uapi/linux/serial_core.h2
-rw-r--r--include/uapi/linux/shm.h6
-rw-r--r--include/uapi/linux/time.h6
-rw-r--r--include/uapi/linux/time_types.h5
-rw-r--r--include/uapi/linux/utime.h4
-rw-r--r--include/uapi/linux/v4l2-controls.h1
-rw-r--r--include/uapi/linux/videodev2.h22
-rw-r--r--include/uapi/misc/fastrpc.h15
-rw-r--r--include/uapi/misc/habanalabs.h48
-rw-r--r--include/uapi/rdma/cxgb3-abi.h82
-rw-r--r--include/uapi/rdma/efa-abi.h6
-rw-r--r--include/uapi/rdma/ib_user_ioctl_verbs.h22
-rw-r--r--include/uapi/rdma/mlx5_user_ioctl_cmds.h1
-rw-r--r--include/uapi/rdma/nes-abi.h115
-rw-r--r--include/uapi/rdma/qedr-abi.h25
-rw-r--r--include/uapi/rdma/rdma_user_ioctl_cmds.h22
-rw-r--r--include/uapi/rdma/vmw_pvrdma-abi.h5
-rw-r--r--include/uapi/sound/compress_params.h10
-rw-r--r--include/uapi/sound/sof/abi.h2
-rw-r--r--include/uapi/sound/sof/tokens.h11
-rw-r--r--include/xen/swiotlb-xen.h8
-rw-r--r--init/Kconfig39
-rw-r--r--ipc/syscall.c2
-rw-r--r--kernel/Makefile2
-rw-r--r--kernel/audit.c15
-rw-r--r--kernel/bpf/stackmap.c2
-rw-r--r--kernel/compat.c24
-rw-r--r--kernel/debug/debug_core.c34
-rw-r--r--kernel/debug/debug_core.h3
-rw-r--r--kernel/debug/kdb/kdb_bt.c116
-rw-r--r--kernel/debug/kdb/kdb_io.c231
-rw-r--r--kernel/debug/kdb/kdb_private.h1
-rw-r--r--kernel/dma/Kconfig12
-rw-r--r--kernel/dma/coherent.c16
-rw-r--r--kernel/dma/contiguous.c9
-rw-r--r--kernel/dma/debug.c39
-rw-r--r--kernel/dma/direct.c177
-rw-r--r--kernel/dma/mapping.c45
-rw-r--r--kernel/dma/remap.c55
-rw-r--r--kernel/dma/swiotlb.c2
-rw-r--r--kernel/events/uprobes.c2
-rw-r--r--kernel/exit.c2
-rw-r--r--kernel/fork.c7
-rwxr-xr-xkernel/gen_kheaders.sh64
-rw-r--r--kernel/irq/chip.c44
-rw-r--r--kernel/irq/irqdesc.c2
-rw-r--r--kernel/irq_work.c34
-rw-r--r--kernel/kexec_file.c4
-rw-r--r--kernel/livepatch/patch.c3
-rw-r--r--kernel/module.c6
-rw-r--r--kernel/power/power.h2
-rw-r--r--kernel/power/snapshot.c9
-rw-r--r--kernel/printk/printk.c2
-rw-r--r--kernel/sched/idle.c24
-rw-r--r--kernel/sched/wait.c37
-rw-r--r--kernel/seccomp.c28
-rw-r--r--kernel/sys.c4
-rw-r--r--kernel/sys_ni.c23
-rw-r--r--kernel/sysctl.c2
-rw-r--r--kernel/sysctl_binary.c1305
-rw-r--r--kernel/time/hrtimer.c13
-rw-r--r--kernel/time/itimer.c189
-rw-r--r--kernel/time/time.c58
-rw-r--r--kernel/trace/Kconfig27
-rw-r--r--kernel/trace/bpf_trace.c2
-rw-r--r--kernel/trace/fgraph.c11
-rw-r--r--kernel/trace/ftrace.c613
-rw-r--r--kernel/trace/preemptirq_delay_test.c144
-rw-r--r--kernel/trace/ring_buffer_benchmark.c4
-rw-r--r--kernel/trace/trace.c214
-rw-r--r--kernel/trace/trace.h25
-rw-r--r--kernel/trace/trace_branch.c8
-rw-r--r--kernel/trace/trace_events.c29
-rw-r--r--kernel/trace/trace_events_hist.c2
-rw-r--r--kernel/trace/trace_export.c4
-rw-r--r--kernel/trace/trace_hwlat.c15
-rw-r--r--kernel/trace/trace_kprobe.c27
-rw-r--r--kernel/trace/trace_output.c15
-rw-r--r--kernel/trace/trace_seq.c30
-rw-r--r--kernel/trace/trace_stat.c6
-rw-r--r--kernel/trace/trace_stat.h2
-rw-r--r--kernel/trace/trace_syscalls.c32
-rw-r--r--lib/Kconfig6
-rw-r--r--lib/Kconfig.debug18
-rw-r--r--lib/Kconfig.kasan16
-rw-r--r--lib/Makefile1
-rw-r--r--lib/bsearch.c2
-rw-r--r--lib/devres.c83
-rw-r--r--lib/genalloc.c2
-rw-r--r--lib/ioremap.c39
-rw-r--r--lib/iov_iter.c270
-rw-r--r--lib/memregion.c18
-rw-r--r--lib/seq_buf.c62
-rw-r--r--lib/sort.c15
-rw-r--r--lib/test_kasan.c26
-rw-r--r--lib/test_printf.c32
-rw-r--r--lib/vdso/gettimeofday.c4
-rw-r--r--lib/vsprintf.c133
-rw-r--r--mm/Kconfig45
-rw-r--r--mm/Makefile1
-rw-r--r--mm/cma.c6
-rw-r--r--mm/cma_debug.c10
-rw-r--r--mm/filemap.c54
-rw-r--r--mm/gup.c40
-rw-r--r--mm/hmm.c523
-rw-r--r--mm/huge_memory.c2
-rw-r--r--mm/hugetlb.c288
-rw-r--r--mm/hwpoison-inject.c4
-rw-r--r--mm/internal.h27
-rw-r--r--mm/kasan/common.c233
-rw-r--r--mm/kasan/generic_report.c3
-rw-r--r--mm/kasan/kasan.h1
-rw-r--r--mm/khugepaged.c18
-rw-r--r--mm/madvise.c14
-rw-r--r--mm/mapping_dirty_helpers.c315
-rw-r--r--mm/memblock.c111
-rw-r--r--mm/memcontrol.c167
-rw-r--r--mm/memory-failure.c61
-rw-r--r--mm/memory.c52
-rw-r--r--mm/memory_hotplug.c86
-rw-r--r--mm/mempolicy.c47
-rw-r--r--mm/migrate.c16
-rw-r--r--mm/mmap.c63
-rw-r--r--mm/mmu_notifier.c557
-rw-r--r--mm/mprotect.c8
-rw-r--r--mm/mremap.c4
-rw-r--r--mm/nommu.c10
-rw-r--r--mm/page_alloc.c137
-rw-r--r--mm/page_io.c15
-rw-r--r--mm/page_isolation.c12
-rw-r--r--mm/pagewalk.c99
-rw-r--r--mm/pgtable-generic.c9
-rw-r--r--mm/rmap.c65
-rw-r--r--mm/shmem.c29
-rw-r--r--mm/slab.c7
-rw-r--r--mm/slab.h6
-rw-r--r--mm/slab_common.c99
-rw-r--r--mm/slub.c36
-rw-r--r--mm/sparse.c18
-rw-r--r--mm/swap.c29
-rw-r--r--mm/swapfile.c7
-rw-r--r--mm/userfaultfd.c73
-rw-r--r--mm/util.c22
-rw-r--r--mm/vmalloc.c192
-rw-r--r--mm/vmscan.c662
-rw-r--r--mm/workingset.c69
-rw-r--r--mm/z3fold.c375
-rw-r--r--net/bluetooth/hci_sock.c21
-rw-r--r--net/bluetooth/rfcomm/sock.c14
-rw-r--r--net/compat.c2
-rw-r--r--net/core/filter.c8
-rw-r--r--net/core/rtnetlink.c14
-rw-r--r--net/core/scm.c6
-rw-r--r--net/core/skmsg.c2
-rw-r--r--net/ipv4/af_inet.c2
-rw-r--r--net/ipv4/tcp.c28
-rw-r--r--net/ipv4/tcp_bpf.c2
-rw-r--r--net/ipv6/af_inet6.c2
-rw-r--r--net/mac80211/debugfs_sta.c17
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c2
-rw-r--r--net/openvswitch/datapath.c17
-rw-r--r--net/psample/psample.c2
-rw-r--r--net/rfkill/core.c11
-rw-r--r--net/sched/sch_mq.c3
-rw-r--r--net/sched/sch_mqprio.c4
-rw-r--r--net/sched/sch_multiq.c2
-rw-r--r--net/sched/sch_prio.c2
-rw-r--r--net/sctp/socket.c16
-rw-r--r--net/socket.c221
-rw-r--r--net/tipc/netlink_compat.c4
-rw-r--r--net/tipc/socket.c24
-rw-r--r--net/tls/tls_main.c13
-rw-r--r--net/tls/tls_sw.c32
-rw-r--r--net/unix/af_unix.c19
-rw-r--r--net/vmw_vsock/hyperv_transport.c20
-rw-r--r--samples/Kconfig22
-rw-r--r--samples/Makefile3
-rw-r--r--samples/ftrace/Makefile8
-rw-r--r--samples/ftrace/ftrace-direct-modify.c88
-rw-r--r--samples/ftrace/ftrace-direct-too.c51
-rw-r--r--samples/ftrace/ftrace-direct.c45
-rw-r--r--samples/ftrace/sample-trace-array.c131
-rw-r--r--samples/ftrace/sample-trace-array.h84
-rw-r--r--samples/mei/Makefile12
-rw-r--r--scripts/.gitignore1
-rw-r--r--scripts/Kbuild.include15
-rw-r--r--scripts/Makefile2
-rw-r--r--scripts/Makefile.build20
-rw-r--r--scripts/Makefile.headersinst18
-rw-r--r--scripts/Makefile.lib14
-rw-r--r--scripts/Makefile.modpost18
-rw-r--r--scripts/Makefile.package6
-rwxr-xr-xscripts/checkpatch.pl17
-rw-r--r--scripts/dtc/Makefile4
-rwxr-xr-xscripts/dtc/dtx_diff12
-rwxr-xr-xscripts/jobserver-exec66
-rw-r--r--scripts/kallsyms.c287
-rw-r--r--scripts/kconfig/Makefile10
-rw-r--r--scripts/kconfig/conf.c13
-rwxr-xr-xscripts/kconfig/mconf-cfg.sh3
-rwxr-xr-xscripts/kconfig/nconf-cfg.sh3
-rw-r--r--scripts/kconfig/parser.y1
-rwxr-xr-xscripts/kernel-doc27
-rw-r--r--scripts/mod/modpost.c188
-rw-r--r--scripts/mod/modpost.h5
-rw-r--r--scripts/nsdeps29
-rwxr-xr-xscripts/package/buildtar8
-rwxr-xr-xscripts/setlocalversion22
-rw-r--r--scripts/spelling.txt28
-rwxr-xr-xscripts/sphinx-pre-install30
-rwxr-xr-xscripts/ver_linux2
-rw-r--r--security/apparmor/Kconfig2
-rw-r--r--security/apparmor/apparmorfs.c130
-rw-r--r--security/apparmor/domain.c46
-rw-r--r--security/apparmor/file.c45
-rw-r--r--security/apparmor/include/apparmor.h1
-rw-r--r--security/apparmor/include/file.h2
-rw-r--r--security/apparmor/include/match.h3
-rw-r--r--security/apparmor/include/path.h50
-rw-r--r--security/apparmor/include/policy_unpack.h8
-rw-r--r--security/apparmor/label.c12
-rw-r--r--security/apparmor/lsm.c198
-rw-r--r--security/apparmor/match.c6
-rw-r--r--security/apparmor/mount.c67
-rw-r--r--security/apparmor/policy.c5
-rw-r--r--security/apparmor/policy_unpack.c116
-rw-r--r--security/device_cgroup.c15
-rw-r--r--security/integrity/Kconfig9
-rw-r--r--security/integrity/Makefile7
-rw-r--r--security/integrity/ima/ima.h11
-rw-r--r--security/integrity/ima/ima_appraise.c33
-rw-r--r--security/integrity/ima/ima_main.c70
-rw-r--r--security/integrity/ima/ima_policy.c12
-rw-r--r--security/integrity/integrity.h1
-rw-r--r--security/integrity/platform_certs/keyring_handler.c80
-rw-r--r--security/integrity/platform_certs/keyring_handler.h32
-rw-r--r--security/integrity/platform_certs/load_powerpc.c96
-rw-r--r--security/integrity/platform_certs/load_uefi.c72
-rw-r--r--security/lockdown/lockdown.c2
-rw-r--r--security/selinux/hooks.c26
-rw-r--r--security/selinux/include/security.h3
-rw-r--r--security/selinux/selinuxfs.c4
-rw-r--r--security/selinux/ss/context.h32
-rw-r--r--security/selinux/ss/ebitmap.c18
-rw-r--r--security/selinux/ss/ebitmap.h1
-rw-r--r--security/selinux/ss/mls.c3
-rw-r--r--security/selinux/ss/policydb.c5
-rw-r--r--security/selinux/ss/policydb.h1
-rw-r--r--security/smack/smack_lsm.c1
-rw-r--r--sound/aoa/soundbus/i2sbus/pcm.c2
-rw-r--r--sound/arm/pxa2xx-pcm-lib.c80
-rw-r--r--sound/core/Kconfig28
-rw-r--r--sound/core/init.c1
-rw-r--r--sound/core/memalloc.c25
-rw-r--r--sound/core/oss/pcm_oss.c4
-rw-r--r--sound/core/pcm_dmaengine.c83
-rw-r--r--sound/core/pcm_local.h7
-rw-r--r--sound/core/pcm_memory.c88
-rw-r--r--sound/core/pcm_native.c68
-rw-r--r--sound/core/seq/seq_timer.c18
-rw-r--r--sound/core/timer.c182
-rw-r--r--sound/drivers/Kconfig21
-rw-r--r--sound/drivers/aloop.c665
-rw-r--r--sound/drivers/dummy.c2
-rw-r--r--sound/drivers/ml403-ac97cr.c2
-rw-r--r--sound/drivers/pcsp/pcsp_lib.c4
-rw-r--r--sound/drivers/vx/vx_pcm.c10
-rw-r--r--sound/firewire/Kconfig6
-rw-r--r--sound/firewire/amdtp-stream.c407
-rw-r--r--sound/firewire/amdtp-stream.h28
-rw-r--r--sound/firewire/bebob/bebob.h4
-rw-r--r--sound/firewire/bebob/bebob_midi.c2
-rw-r--r--sound/firewire/bebob/bebob_pcm.c80
-rw-r--r--sound/firewire/bebob/bebob_stream.c92
-rw-r--r--sound/firewire/dice/dice-midi.c2
-rw-r--r--sound/firewire/dice/dice-pcm.c83
-rw-r--r--sound/firewire/dice/dice-stream.c11
-rw-r--r--sound/firewire/dice/dice.h4
-rw-r--r--sound/firewire/digi00x/digi00x-midi.c2
-rw-r--r--sound/firewire/digi00x/digi00x-pcm.c66
-rw-r--r--sound/firewire/digi00x/digi00x-stream.c14
-rw-r--r--sound/firewire/digi00x/digi00x.h4
-rw-r--r--sound/firewire/fireface/ff-pcm.c60
-rw-r--r--sound/firewire/fireface/ff-stream.c22
-rw-r--r--sound/firewire/fireface/ff.h4
-rw-r--r--sound/firewire/fireworks/fireworks.h4
-rw-r--r--sound/firewire/fireworks/fireworks_midi.c2
-rw-r--r--sound/firewire/fireworks/fireworks_pcm.c72
-rw-r--r--sound/firewire/fireworks/fireworks_stream.c14
-rw-r--r--sound/firewire/isight.c8
-rw-r--r--sound/firewire/motu/motu-midi.c2
-rw-r--r--sound/firewire/motu/motu-pcm.c63
-rw-r--r--sound/firewire/motu/motu-proc.c4
-rw-r--r--sound/firewire/motu/motu-protocol-v2.c142
-rw-r--r--sound/firewire/motu/motu-protocol-v3.c4
-rw-r--r--sound/firewire/motu/motu-stream.c14
-rw-r--r--sound/firewire/motu/motu.c34
-rw-r--r--sound/firewire/motu/motu.h10
-rw-r--r--sound/firewire/oxfw/oxfw-midi.c4
-rw-r--r--sound/firewire/oxfw/oxfw-pcm.c80
-rw-r--r--sound/firewire/oxfw/oxfw-stream.c15
-rw-r--r--sound/firewire/oxfw/oxfw.h4
-rw-r--r--sound/firewire/tascam/tascam-pcm.c65
-rw-r--r--sound/firewire/tascam/tascam-stream.c14
-rw-r--r--sound/firewire/tascam/tascam.h4
-rw-r--r--sound/hda/Kconfig10
-rw-r--r--sound/hda/Makefile5
-rw-r--r--sound/hda/hdac_regmap.c1
-rw-r--r--sound/hda/intel-dsp-config.c357
-rw-r--r--sound/hda/intel-nhlt.c3
-rw-r--r--sound/isa/Kconfig18
-rw-r--r--sound/isa/cs423x/cs4236.c3
-rw-r--r--sound/mips/Kconfig12
-rw-r--r--sound/mips/hal2.c3
-rw-r--r--sound/mips/sgio2audio.c12
-rw-r--r--sound/oss/dmasound/dmasound_core.c2
-rw-r--r--sound/pci/Kconfig2
-rw-r--r--sound/pci/ad1889.c6
-rw-r--r--sound/pci/ali5451/ali5451.c2
-rw-r--r--sound/pci/als300.c3
-rw-r--r--sound/pci/als4000.c3
-rw-r--r--sound/pci/asihpi/asihpi.c4
-rw-r--r--sound/pci/atiixp.c6
-rw-r--r--sound/pci/atiixp_modem.c4
-rw-r--r--sound/pci/au88x0/au88x0_pcm.c3
-rw-r--r--sound/pci/aw2/aw2-alsa.c6
-rw-r--r--sound/pci/azt3328.c8
-rw-r--r--sound/pci/bt87x.c5
-rw-r--r--sound/pci/ca0106/ca0106_main.c6
-rw-r--r--sound/pci/cmipci.c6
-rw-r--r--sound/pci/cs4281.c3
-rw-r--r--sound/pci/cs46xx/cs46xx_lib.c16
-rw-r--r--sound/pci/cs5535audio/cs5535audio_pcm.c6
-rw-r--r--sound/pci/ctxfi/ctpcm.c5
-rw-r--r--sound/pci/ctxfi/ctvmem.c2
-rw-r--r--sound/pci/echoaudio/echoaudio.c24
-rw-r--r--sound/pci/emu10k1/emu10k1.c5
-rw-r--r--sound/pci/emu10k1/emu10k1x.c6
-rw-r--r--sound/pci/emu10k1/emufx.c2
-rw-r--r--sound/pci/emu10k1/emupcm.c12
-rw-r--r--sound/pci/emu10k1/memory.c4
-rw-r--r--sound/pci/emu10k1/p16v.c4
-rw-r--r--sound/pci/ens1370.c8
-rw-r--r--sound/pci/es1938.c3
-rw-r--r--sound/pci/es1968.c4
-rw-r--r--sound/pci/fm801.c2
-rw-r--r--sound/pci/hda/Kconfig11
-rw-r--r--sound/pci/hda/hda_bind.c4
-rw-r--r--sound/pci/hda/hda_controller.c1
-rw-r--r--sound/pci/hda/hda_intel.c66
-rw-r--r--sound/pci/hda/hda_jack.c151
-rw-r--r--sound/pci/hda/hda_jack.h107
-rw-r--r--sound/pci/hda/patch_conexant.c1
-rw-r--r--sound/pci/hda/patch_hdmi.c330
-rw-r--r--sound/pci/hda/patch_realtek.c63
-rw-r--r--sound/pci/ice1712/ice1712.c9
-rw-r--r--sound/pci/ice1712/ice1724.c6
-rw-r--r--sound/pci/intel8x0.c4
-rw-r--r--sound/pci/intel8x0m.c4
-rw-r--r--sound/pci/korg1212/korg1212.c8
-rw-r--r--sound/pci/lola/lola.c2
-rw-r--r--sound/pci/lola/lola_pcm.c5
-rw-r--r--sound/pci/lx6464es/lx6464es.c2
-rw-r--r--sound/pci/maestro3.c3
-rw-r--r--sound/pci/mixart/mixart.c7
-rw-r--r--sound/pci/oxygen/oxygen_pcm.c10
-rw-r--r--sound/pci/pcxhr/pcxhr.c4
-rw-r--r--sound/pci/riptide/riptide.c6
-rw-r--r--sound/pci/rme32.c4
-rw-r--r--sound/pci/rme9652/hdsp.c7
-rw-r--r--sound/pci/rme9652/hdspm.c3
-rw-r--r--sound/pci/rme9652/rme9652.c7
-rw-r--r--sound/pci/sis7019.c3
-rw-r--r--sound/pci/sonicvibes.c3
-rw-r--r--sound/pci/trident/trident_main.c24
-rw-r--r--sound/pci/via82xx.c17
-rw-r--r--sound/pci/via82xx_modem.c6
-rw-r--r--sound/pci/ymfpci/ymfpci_main.c16
-rw-r--r--sound/pcmcia/pdaudiocf/pdaudiocf_pcm.c9
-rw-r--r--sound/sh/aica.c2
-rw-r--r--sound/sh/sh_dac_audio.c2
-rw-r--r--sound/soc/amd/acp-pcm-dma.c63
-rw-r--r--sound/soc/amd/raven/acp3x-pcm-dma.c56
-rw-r--r--sound/soc/atmel/atmel-pcm-pdc.c48
-rw-r--r--sound/soc/au1x/dbdma2.c64
-rw-r--r--sound/soc/au1x/dma.c65
-rw-r--r--sound/soc/bcm/cygnus-pcm.c56
-rw-r--r--sound/soc/cirrus/Kconfig14
-rw-r--r--sound/soc/codecs/Kconfig74
-rw-r--r--sound/soc/codecs/Makefile10
-rw-r--r--sound/soc/codecs/adau1761.c129
-rw-r--r--sound/soc/codecs/adau7118-hw.c43
-rw-r--r--sound/soc/codecs/adau7118-i2c.c82
-rw-r--r--sound/soc/codecs/adau7118.c586
-rw-r--r--sound/soc/codecs/adau7118.h24
-rw-r--r--sound/soc/codecs/cros_ec_codec.c1128
-rw-r--r--sound/soc/codecs/cx2072x.c2
-rw-r--r--sound/soc/codecs/hdac_hda.c114
-rw-r--r--sound/soc/codecs/hdac_hda.h13
-rw-r--r--sound/soc/codecs/madera.h2
-rw-r--r--sound/soc/codecs/msm8916-wcd-analog.c54
-rw-r--r--sound/soc/codecs/mt6358.c105
-rw-r--r--sound/soc/codecs/pcm3168a.c143
-rw-r--r--sound/soc/codecs/rt1011.c249
-rw-r--r--sound/soc/codecs/rt1011.h24
-rw-r--r--sound/soc/codecs/rt5514-spi.c48
-rw-r--r--sound/soc/codecs/rt5645.c19
-rw-r--r--sound/soc/codecs/rt5663.c4
-rw-r--r--sound/soc/codecs/rt5677-spi.c398
-rw-r--r--sound/soc/codecs/rt5677-spi.h1
-rw-r--r--sound/soc/codecs/rt5677.c445
-rw-r--r--sound/soc/codecs/rt5677.h11
-rw-r--r--sound/soc/codecs/rt5682.c43
-rw-r--r--sound/soc/codecs/tas2562.c590
-rw-r--r--sound/soc/codecs/tas2562.h85
-rw-r--r--sound/soc/codecs/tas2770.c819
-rw-r--r--sound/soc/codecs/tas2770.h143
-rw-r--r--sound/soc/codecs/tlv320aic31xx.c45
-rw-r--r--sound/soc/codecs/tlv320aic31xx.h8
-rw-r--r--sound/soc/codecs/tlv320aic32x4.c3
-rw-r--r--sound/soc/codecs/wcd9335.c10
-rw-r--r--sound/soc/codecs/wm2200.c5
-rw-r--r--sound/soc/codecs/wm5100.c2
-rw-r--r--sound/soc/codecs/wm8904.c73
-rw-r--r--sound/soc/codecs/wm8904.h1
-rw-r--r--sound/soc/codecs/wm8958-dsp2.c22
-rw-r--r--sound/soc/codecs/wm8994.c156
-rw-r--r--sound/soc/codecs/wm8994.h10
-rw-r--r--sound/soc/codecs/wm_adsp.c81
-rw-r--r--sound/soc/codecs/wm_adsp.h4
-rw-r--r--sound/soc/dwc/dwc-pcm.c50
-rw-r--r--sound/soc/fsl/Kconfig10
-rw-r--r--sound/soc/fsl/Makefile2
-rw-r--r--sound/soc/fsl/fsl_asrc.c110
-rw-r--r--sound/soc/fsl/fsl_asrc.h7
-rw-r--r--sound/soc/fsl/fsl_asrc_dma.c120
-rw-r--r--sound/soc/fsl/fsl_audmix.c6
-rw-r--r--sound/soc/fsl/fsl_audmix.h1
-rw-r--r--sound/soc/fsl/fsl_dma.c54
-rw-r--r--sound/soc/fsl/fsl_esai.c12
-rw-r--r--sound/soc/fsl/fsl_mqs.c335
-rw-r--r--sound/soc/fsl/imx-pcm-fiq.c56
-rw-r--r--sound/soc/fsl/mpc5200_dma.c51
-rw-r--r--sound/soc/generic/audio-graph-card.c4
-rw-r--r--sound/soc/generic/simple-card.c4
-rw-r--r--sound/soc/intel/Kconfig21
-rw-r--r--sound/soc/intel/atom/sst-mfld-platform-pcm.c30
-rw-r--r--sound/soc/intel/baytrail/sst-baytrail-pcm.c52
-rw-r--r--sound/soc/intel/boards/Kconfig100
-rw-r--r--sound/soc/intel/boards/Makefile14
-rw-r--r--sound/soc/intel/boards/bdw-rt5677.c51
-rw-r--r--sound/soc/intel/boards/bxt_da7219_max98357a.c11
-rw-r--r--sound/soc/intel/boards/bxt_rt298.c11
-rw-r--r--sound/soc/intel/boards/bytcr_rt5640.c10
-rw-r--r--sound/soc/intel/boards/cht_bsw_rt5645.c26
-rw-r--r--sound/soc/intel/boards/cml_rt1011_rt5682.c487
-rw-r--r--sound/soc/intel/boards/glk_rt5682_max98357a.c11
-rw-r--r--sound/soc/intel/boards/hda_dsp_common.c85
-rw-r--r--sound/soc/intel/boards/hda_dsp_common.h32
-rw-r--r--sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c149
-rw-r--r--sound/soc/intel/boards/skl_hda_dsp_common.c6
-rw-r--r--sound/soc/intel/boards/skl_hda_dsp_common.h27
-rw-r--r--sound/soc/intel/boards/skl_hda_dsp_generic.c3
-rw-r--r--sound/soc/intel/boards/sof_rt5682.c13
-rw-r--r--sound/soc/intel/common/Makefile4
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-cfl-match.c18
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-cml-match.c56
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-cnl-match.c31
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-jsl-match.c18
-rw-r--r--sound/soc/intel/haswell/sst-haswell-pcm.c50
-rw-r--r--sound/soc/intel/skylake/skl-pcm.c57
-rw-r--r--sound/soc/intel/skylake/skl.c19
-rw-r--r--sound/soc/jz4740/jz4740-i2s.c6
-rw-r--r--sound/soc/kirkwood/kirkwood-dma.c52
-rw-r--r--sound/soc/mediatek/Kconfig1
-rw-r--r--sound/soc/mediatek/common/mtk-afe-platform-driver.c28
-rw-r--r--sound/soc/mediatek/common/mtk-afe-platform-driver.h10
-rw-r--r--sound/soc/mediatek/common/mtk-btcvsd.c76
-rw-r--r--sound/soc/mediatek/mt6797/mt6797-afe-pcm.c11
-rw-r--r--sound/soc/mediatek/mt8183/mt8183-afe-pcm.c26
-rw-r--r--sound/soc/mediatek/mt8183/mt8183-mt6358-ts3a227-max98357.c70
-rw-r--r--sound/soc/meson/axg-fifo.c56
-rw-r--r--sound/soc/meson/axg-fifo.h20
-rw-r--r--sound/soc/meson/axg-frddr.c24
-rw-r--r--sound/soc/meson/axg-toddr.c24
-rw-r--r--sound/soc/pxa/Kconfig16
-rw-r--r--sound/soc/pxa/mmp-pcm.c62
-rw-r--r--sound/soc/pxa/poodle.c2
-rw-r--r--sound/soc/pxa/pxa-ssp.c14
-rw-r--r--sound/soc/pxa/pxa2xx-ac97.c14
-rw-r--r--sound/soc/pxa/pxa2xx-i2s.c14
-rw-r--r--sound/soc/pxa/pxa2xx-pcm.c14
-rw-r--r--sound/soc/qcom/Kconfig20
-rw-r--r--sound/soc/qcom/lpass-platform.c70
-rw-r--r--sound/soc/qcom/qdsp6/q6asm-dai.c106
-rw-r--r--sound/soc/qcom/qdsp6/q6asm.c55
-rw-r--r--sound/soc/qcom/qdsp6/q6asm.h15
-rw-r--r--sound/soc/qcom/qdsp6/q6routing.c14
-rw-r--r--sound/soc/rockchip/Kconfig3
-rw-r--r--sound/soc/rockchip/rockchip_max98090.c313
-rw-r--r--sound/soc/samsung/Kconfig12
-rw-r--r--sound/soc/samsung/Makefile4
-rw-r--r--sound/soc/samsung/arndale.c217
-rw-r--r--sound/soc/samsung/arndale_rt5631.c164
-rw-r--r--sound/soc/samsung/idma.c58
-rw-r--r--sound/soc/sh/dma-sh7760.c48
-rw-r--r--sound/soc/sh/fsi.c31
-rw-r--r--sound/soc/sh/rcar/core.c54
-rw-r--r--sound/soc/sh/rcar/dma.c30
-rw-r--r--sound/soc/sh/siu_pcm.c44
-rw-r--r--sound/soc/soc-component.c142
-rw-r--r--sound/soc/soc-compress.c52
-rw-r--r--sound/soc/soc-core.c1169
-rw-r--r--sound/soc/soc-generic-dmaengine-pcm.c165
-rw-r--r--sound/soc/soc-jack.c3
-rw-r--r--sound/soc/soc-ops.c11
-rw-r--r--sound/soc/soc-pcm.c241
-rw-r--r--sound/soc/soc-topology.c17
-rw-r--r--sound/soc/soc-utils.c11
-rw-r--r--sound/soc/sof/Kconfig43
-rw-r--r--sound/soc/sof/control.c34
-rw-r--r--sound/soc/sof/core.c44
-rw-r--r--sound/soc/sof/debug.c16
-rw-r--r--sound/soc/sof/imx/Kconfig20
-rw-r--r--sound/soc/sof/imx/imx8.c7
-rw-r--r--sound/soc/sof/intel/Kconfig74
-rw-r--r--sound/soc/sof/intel/apl.c8
-rw-r--r--sound/soc/sof/intel/bdw.c31
-rw-r--r--sound/soc/sof/intel/byt.c44
-rw-r--r--sound/soc/sof/intel/cnl.c67
-rw-r--r--sound/soc/sof/intel/hda-codec.c22
-rw-r--r--sound/soc/sof/intel/hda-dsp.c137
-rw-r--r--sound/soc/sof/intel/hda-ipc.c6
-rw-r--r--sound/soc/sof/intel/hda-ipc.h51
-rw-r--r--sound/soc/sof/intel/hda-loader.c25
-rw-r--r--sound/soc/sof/intel/hda-pcm.c15
-rw-r--r--sound/soc/sof/intel/hda-stream.c24
-rw-r--r--sound/soc/sof/intel/hda.c25
-rw-r--r--sound/soc/sof/intel/hda.h23
-rw-r--r--sound/soc/sof/ipc.c10
-rw-r--r--sound/soc/sof/ops.h10
-rw-r--r--sound/soc/sof/pcm.c123
-rw-r--r--sound/soc/sof/pm.c130
-rw-r--r--sound/soc/sof/sof-acpi-dev.c12
-rw-r--r--sound/soc/sof/sof-pci-dev.c94
-rw-r--r--sound/soc/sof/sof-priv.h54
-rw-r--r--sound/soc/sof/topology.c498
-rw-r--r--sound/soc/sof/trace.c17
-rw-r--r--sound/soc/sprd/sprd-pcm-dma.c74
-rw-r--r--sound/soc/stm/stm32_adfsdm.c42
-rw-r--r--sound/soc/stm/stm32_sai.c2
-rw-r--r--sound/soc/stm/stm32_spdifrx.c18
-rw-r--r--sound/soc/sunxi/sun4i-codec.c6
-rw-r--r--sound/soc/tegra/tegra30_i2s.c56
-rw-r--r--sound/soc/ti/davinci-mcasp.c2
-rw-r--r--sound/soc/txx9/txx9aclc.c48
-rw-r--r--sound/soc/uniphier/aio-dma.c51
-rw-r--r--sound/soc/ux500/ux500_msp_i2s.c3
-rw-r--r--sound/soc/xilinx/Kconfig20
-rw-r--r--sound/soc/xilinx/xlnx_formatter_pcm.c63
-rw-r--r--sound/soc/xtensa/xtfpga-i2s.c30
-rw-r--r--sound/soc/zte/Kconfig12
-rw-r--r--sound/sparc/amd7930.c2
-rw-r--r--sound/sparc/dbri.c2
-rw-r--r--sound/usb/6fire/pcm.c16
-rw-r--r--sound/usb/Kconfig32
-rw-r--r--sound/usb/caiaq/audio.c8
-rw-r--r--sound/usb/card.c3
-rw-r--r--sound/usb/clock.c10
-rw-r--r--sound/usb/hiface/pcm.c9
-rw-r--r--sound/usb/line6/pcm.c4
-rw-r--r--sound/usb/misc/ua101.c14
-rw-r--r--sound/usb/mixer.c3
-rw-r--r--sound/usb/mixer_scarlett.c23
-rw-r--r--sound/usb/mixer_scarlett_gen2.c36
-rw-r--r--sound/usb/pcm.c50
-rw-r--r--sound/usb/usbaudio.h1
-rw-r--r--sound/usb/usx2y/usbusx2yaudio.c4
-rw-r--r--sound/usb/usx2y/usx2yhwdeppcm.c4
-rw-r--r--sound/usb/validate.c23
-rw-r--r--sound/x86/intel_hdmi_audio.c4
-rw-r--r--tools/arch/x86/lib/x86-opcode-map.txt44
-rw-r--r--tools/build/Makefile.feature3
-rw-r--r--tools/build/feature/Makefile4
-rw-r--r--tools/build/feature/test-libbpf.c7
-rwxr-xr-xtools/hv/vmbus_testing376
-rw-r--r--tools/iio/Build1
-rw-r--r--tools/iio/Makefile10
-rw-r--r--tools/pci/pcitest.c1
-rw-r--r--tools/perf/Makefile.config10
-rw-r--r--tools/perf/Makefile.perf6
-rw-r--r--tools/perf/arch/arm/tests/dwarf-unwind.c4
-rw-r--r--tools/perf/arch/arm64/tests/dwarf-unwind.c4
-rw-r--r--tools/perf/arch/powerpc/tests/dwarf-unwind.c4
-rw-r--r--tools/perf/arch/s390/annotate/instructions.c2
-rw-r--r--tools/perf/arch/x86/tests/dwarf-unwind.c4
-rw-r--r--tools/perf/arch/x86/tests/insn-x86-dat-32.c366
-rw-r--r--tools/perf/arch/x86/tests/insn-x86-dat-64.c484
-rw-r--r--tools/perf/arch/x86/tests/insn-x86-dat-src.c655
-rw-r--r--tools/perf/arch/x86/util/event.c5
-rw-r--r--tools/perf/builtin-diff.c6
-rw-r--r--tools/perf/builtin-report.c7
-rw-r--r--tools/perf/builtin-script.c46
-rw-r--r--tools/perf/tests/Build4
-rw-r--r--tools/perf/tests/builtin-test.c8
-rw-r--r--tools/perf/tests/code-reading.c2
-rw-r--r--tools/perf/tests/maps.c (renamed from tools/perf/tests/map_groups.c)26
-rw-r--r--tools/perf/tests/tests.h4
-rw-r--r--tools/perf/tests/thread-maps-share.c (renamed from tools/perf/tests/thread-mg-share.c)36
-rw-r--r--tools/perf/tests/vmlinux-kallsyms.c9
-rw-r--r--tools/perf/ui/browsers/annotate.c2
-rw-r--r--tools/perf/ui/stdio/hist.c4
-rw-r--r--tools/perf/util/Build2
-rw-r--r--tools/perf/util/affinity.c73
-rw-r--r--tools/perf/util/affinity.h17
-rw-r--r--tools/perf/util/annotate.c8
-rw-r--r--tools/perf/util/bpf-event.c4
-rw-r--r--tools/perf/util/callchain.c8
-rw-r--r--tools/perf/util/cs-etm.c2
-rw-r--r--tools/perf/util/db-export.c12
-rw-r--r--tools/perf/util/event.c14
-rw-r--r--tools/perf/util/fncache.c63
-rw-r--r--tools/perf/util/fncache.h7
-rw-r--r--tools/perf/util/hist.c8
-rw-r--r--tools/perf/util/intel-pt.c2
-rw-r--r--tools/perf/util/machine.c80
-rw-r--r--tools/perf/util/machine.h10
-rw-r--r--tools/perf/util/map.c223
-rw-r--r--tools/perf/util/map.h14
-rw-r--r--tools/perf/util/map_groups.h106
-rw-r--r--tools/perf/util/map_symbol.h4
-rw-r--r--tools/perf/util/maps.h87
-rw-r--r--tools/perf/util/perf_regs.h2
-rw-r--r--tools/perf/util/pmu.c34
-rw-r--r--tools/perf/util/probe-event.c4
-rw-r--r--tools/perf/util/python-ext-sources1
-rw-r--r--tools/perf/util/scripting-engines/trace-event-python.c2
-rw-r--r--tools/perf/util/srccode.c9
-rw-r--r--tools/perf/util/symbol-elf.c16
-rw-r--r--tools/perf/util/symbol.c91
-rw-r--r--tools/perf/util/symbol.h6
-rw-r--r--tools/perf/util/synthetic-events.c2
-rw-r--r--tools/perf/util/thread-stack.c4
-rw-r--r--tools/perf/util/thread.c38
-rw-r--r--tools/perf/util/thread.h4
-rw-r--r--tools/perf/util/unwind-libdw.c4
-rw-r--r--tools/perf/util/unwind-libunwind-local.c22
-rw-r--r--tools/perf/util/unwind-libunwind.c36
-rw-r--r--tools/perf/util/unwind.h27
-rw-r--r--tools/perf/util/vdso.c2
-rw-r--r--tools/power/cpupower/ToDo14
-rw-r--r--tools/power/cpupower/utils/cpupower-info.c9
-rw-r--r--tools/power/cpupower/utils/cpupower-set.c9
-rw-r--r--tools/power/cpupower/utils/helpers/cpuid.c4
-rw-r--r--tools/power/cpupower/utils/helpers/helpers.h1
-rw-r--r--tools/power/cpupower/utils/idle_monitor/amd_fam14h_idle.c2
-rw-r--r--tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c2
-rw-r--r--tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c2
-rw-r--r--tools/power/cpupower/utils/idle_monitor/cpupower-monitor.h5
-rw-r--r--tools/power/cpupower/utils/idle_monitor/hsw_ext_idle.c3
-rw-r--r--tools/power/cpupower/utils/idle_monitor/mperf_monitor.c64
-rw-r--r--tools/power/cpupower/utils/idle_monitor/nhm_idle.c2
-rw-r--r--tools/power/cpupower/utils/idle_monitor/snb_idle.c2
-rw-r--r--tools/power/x86/intel-speed-select/isst-config.c1014
-rw-r--r--tools/power/x86/intel-speed-select/isst-core.c176
-rw-r--r--tools/power/x86/intel-speed-select/isst-display.c156
-rw-r--r--tools/power/x86/intel-speed-select/isst.h5
-rw-r--r--tools/testing/selftests/Makefile5
-rw-r--r--tools/testing/selftests/bpf/test_sockmap.c47
-rw-r--r--tools/testing/selftests/bpf/xdping.c2
-rw-r--r--tools/testing/selftests/ftrace/settings1
-rw-r--r--tools/testing/selftests/ftrace/test.d/direct/ftrace-direct.tc69
-rw-r--r--tools/testing/selftests/ftrace/test.d/direct/kprobe-direct.tc84
-rw-r--r--tools/testing/selftests/livepatch/Makefile3
-rw-r--r--tools/testing/selftests/livepatch/functions.sh34
-rwxr-xr-xtools/testing/selftests/livepatch/test-callbacks.sh2
-rwxr-xr-xtools/testing/selftests/livepatch/test-ftrace.sh65
-rwxr-xr-xtools/testing/selftests/livepatch/test-livepatch.sh2
-rwxr-xr-xtools/testing/selftests/livepatch/test-shadow-vars.sh2
-rw-r--r--tools/testing/selftests/memfd/memfd_test.c36
-rw-r--r--tools/testing/selftests/net/forwarding/tc_common.sh39
-rwxr-xr-xtools/testing/selftests/net/pmtu.sh5
-rw-r--r--tools/testing/selftests/net/tls.c60
-rw-r--r--tools/testing/selftests/powerpc/include/utils.h1
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/Makefile7
-rw-r--r--tools/testing/selftests/powerpc/ptrace/perf-hwbreak.c119
-rw-r--r--tools/testing/selftests/powerpc/ptrace/ptrace-hwbreak.c581
-rw-r--r--tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-tar.c2
-rw-r--r--tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-vsx.c4
-rw-r--r--tools/testing/selftests/powerpc/ptrace/ptrace-tm-tar.c2
-rw-r--r--tools/testing/selftests/powerpc/ptrace/ptrace-tm-vsx.c4
-rw-r--r--tools/testing/selftests/powerpc/security/Makefile5
-rw-r--r--tools/testing/selftests/powerpc/security/branch_loops.S82
-rw-r--r--tools/testing/selftests/powerpc/security/spectre_v2.c218
-rw-r--r--tools/testing/selftests/powerpc/signal/sigfuz.c2
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-signal-sigreturn-nt.c4
-rw-r--r--tools/testing/selftests/powerpc/utils.c20
-rw-r--r--tools/testing/selftests/seccomp/seccomp_bpf.c118
-rw-r--r--tools/testing/selftests/vm/config1
-rw-r--r--tools/testing/selftests/x86/single_step_syscall.c94
-rw-r--r--tools/usb/usbip/libsrc/usbip_host_common.c2
-rw-r--r--usr/include/Makefile19
6108 files changed, 240731 insertions, 120623 deletions
diff --git a/.gitignore b/.gitignore
index 70580bdd352c..72ef86a5570d 100644
--- a/.gitignore
+++ b/.gitignore
@@ -32,7 +32,6 @@
*.lzo
*.mod
*.mod.c
-*.ns_deps
*.o
*.o.*
*.patch
@@ -61,6 +60,7 @@ modules.order
/System.map
/Module.markers
/modules.builtin.modinfo
+/modules.nsdeps
#
# RPM spec file (make rpm-pkg)
diff --git a/.mailmap b/.mailmap
index e7f0341a6dbf..1fd03c781f58 100644
--- a/.mailmap
+++ b/.mailmap
@@ -156,6 +156,7 @@ Mark Brown <broonie@sirena.org.uk>
Mark Yao <markyao0591@gmail.com> <mark.yao@rock-chips.com>
Martin Kepplinger <martink@posteo.de> <martin.kepplinger@theobroma-systems.com>
Martin Kepplinger <martink@posteo.de> <martin.kepplinger@ginzinger.com>
+Martin Kepplinger <martink@posteo.de> <martin.kepplinger@puri.sm>
Mathieu Othacehe <m.othacehe@gmail.com>
Matthew Wilcox <willy@infradead.org> <matthew.r.wilcox@intel.com>
Matthew Wilcox <willy@infradead.org> <matthew@wil.cx>
diff --git a/CREDITS b/CREDITS
index 031605d46b4d..9602b0fa1c95 100644
--- a/CREDITS
+++ b/CREDITS
@@ -1875,8 +1875,9 @@ S: The Netherlands
N: Martin Kepplinger
E: martink@posteo.de
-E: martin.kepplinger@ginzinger.com
+E: martin.kepplinger@puri.sm
W: http://www.martinkepplinger.com
+P: 4096R/5AB387D3 F208 2B88 0F9E 4239 3468 6E3F 5003 98DF 5AB3 87D3
D: mma8452 accelerators iio driver
D: pegasus_notetaker input driver
D: Kernel fixes and cleanups
diff --git a/Documentation/ABI/stable/sysfs-class-infiniband b/Documentation/ABI/stable/sysfs-class-infiniband
index aed21b8916a2..96dfe1926b76 100644
--- a/Documentation/ABI/stable/sysfs-class-infiniband
+++ b/Documentation/ABI/stable/sysfs-class-infiniband
@@ -314,25 +314,6 @@ Description:
board_id: (RO) Manufacturing board ID
-sysfs interface for Chelsio T3 RDMA Driver (cxgb3)
---------------------------------------------------
-
-What: /sys/class/infiniband/cxgb3_X/hw_rev
-What: /sys/class/infiniband/cxgb3_X/hca_type
-What: /sys/class/infiniband/cxgb3_X/board_id
-Date: Feb, 2007
-KernelVersion: v2.6.21
-Contact: linux-rdma@vger.kernel.org
-Description:
- hw_rev: (RO) Hardware revision number
-
- hca_type: (RO) HCA type. Here it is a driver short name.
- It should normally match the name in its bus
- driver structure (e.g. pci_driver::name).
-
- board_id: (RO) Manufacturing board id
-
-
sysfs interface for Mellanox ConnectX HCA IB driver (mlx4)
----------------------------------------------------------
diff --git a/Documentation/ABI/stable/sysfs-driver-aspeed-vuart b/Documentation/ABI/stable/sysfs-driver-aspeed-vuart
index 8062953ce77b..950cafc9443a 100644
--- a/Documentation/ABI/stable/sysfs-driver-aspeed-vuart
+++ b/Documentation/ABI/stable/sysfs-driver-aspeed-vuart
@@ -6,10 +6,19 @@ Description: Configures which IO port the host side of the UART
Users: OpenBMC. Proposed changes should be mailed to
openbmc@lists.ozlabs.org
-What: /sys/bus/platform/drivers/aspeed-vuart*/sirq
+What: /sys/bus/platform/drivers/aspeed-vuart/*/sirq
Date: April 2017
Contact: Jeremy Kerr <jk@ozlabs.org>
Description: Configures which interrupt number the host side of
the UART will appear on the host <-> BMC LPC bus.
Users: OpenBMC. Proposed changes should be mailed to
openbmc@lists.ozlabs.org
+
+What: /sys/bus/platform/drivers/aspeed-vuart/*/sirq_polarity
+Date: July 2019
+Contact: Oskar Senft <osk@google.com>
+Description: Configures the polarity of the serial interrupt to the
+ host via the BMC LPC bus.
+ Set to 0 for active-low or 1 for active-high.
+Users: OpenBMC. Proposed changes should be mailed to
+ openbmc@lists.ozlabs.org
diff --git a/Documentation/ABI/stable/sysfs-driver-ib_srp b/Documentation/ABI/stable/sysfs-driver-ib_srp
index 7049a2b50359..84972a57caae 100644
--- a/Documentation/ABI/stable/sysfs-driver-ib_srp
+++ b/Documentation/ABI/stable/sysfs-driver-ib_srp
@@ -67,6 +67,8 @@ Description: Interface for making ib_srp connect to a new target.
initiator is allowed to queue per SCSI host. The default
value for this parameter is 62. The lowest supported value
is 2.
+ * max_it_iu_size, a decimal number specifying the maximum
+ initiator to target information unit length.
What: /sys/class/infiniband_srp/srp-<hca>-<port_number>/ibdev
Date: January 2, 2006
diff --git a/Documentation/ABI/testing/debugfs-hyperv b/Documentation/ABI/testing/debugfs-hyperv
new file mode 100644
index 000000000000..9185e1b06bba
--- /dev/null
+++ b/Documentation/ABI/testing/debugfs-hyperv
@@ -0,0 +1,23 @@
+What: /sys/kernel/debug/hyperv/<UUID>/fuzz_test_state
+Date: October 2019
+KernelVersion: 5.5
+Contact: Branden Bonaby <brandonbonaby94@gmail.com>
+Description: Fuzz testing status of a vmbus device, whether its in an ON
+ state or a OFF state
+Users: Debugging tools
+
+What: /sys/kernel/debug/hyperv/<UUID>/delay/fuzz_test_buffer_interrupt_delay
+Date: October 2019
+KernelVersion: 5.5
+Contact: Branden Bonaby <brandonbonaby94@gmail.com>
+Description: Fuzz testing buffer interrupt delay value between 0 - 1000
+ microseconds (inclusive).
+Users: Debugging tools
+
+What: /sys/kernel/debug/hyperv/<UUID>/delay/fuzz_test_message_delay
+Date: October 2019
+KernelVersion: 5.5
+Contact: Branden Bonaby <brandonbonaby94@gmail.com>
+Description: Fuzz testing message delay value between 0 - 1000 microseconds
+ (inclusive).
+Users: Debugging tools
diff --git a/Documentation/ABI/testing/ima_policy b/Documentation/ABI/testing/ima_policy
index 29ebe9afdac4..29aaedf33246 100644
--- a/Documentation/ABI/testing/ima_policy
+++ b/Documentation/ABI/testing/ima_policy
@@ -25,6 +25,7 @@ Description:
lsm: [[subj_user=] [subj_role=] [subj_type=]
[obj_user=] [obj_role=] [obj_type=]]
option: [[appraise_type=]] [template=] [permit_directio]
+ [appraise_flag=]
base: func:= [BPRM_CHECK][MMAP_CHECK][CREDS_CHECK][FILE_CHECK][MODULE_CHECK]
[FIRMWARE_CHECK]
[KEXEC_KERNEL_CHECK] [KEXEC_INITRAMFS_CHECK]
@@ -38,6 +39,9 @@ Description:
fowner:= decimal value
lsm: are LSM specific
option: appraise_type:= [imasig] [imasig|modsig]
+ appraise_flag:= [check_blacklist]
+ Currently, blacklist check is only for files signed with appended
+ signature.
template:= name of a defined IMA template type
(eg, ima-ng). Only valid when action is "measure".
pcr:= decimal value
diff --git a/Documentation/ABI/testing/sysfs-bus-coresight-devices-etm4x b/Documentation/ABI/testing/sysfs-bus-coresight-devices-etm4x
index 36258bc1b473..614874e2cf53 100644
--- a/Documentation/ABI/testing/sysfs-bus-coresight-devices-etm4x
+++ b/Documentation/ABI/testing/sysfs-bus-coresight-devices-etm4x
@@ -1,4 +1,4 @@
-What: /sys/bus/coresight/devices/<memory_map>.etm/enable_source
+What: /sys/bus/coresight/devices/etm<N>/enable_source
Date: April 2015
KernelVersion: 4.01
Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
@@ -8,82 +8,82 @@ Description: (RW) Enable/disable tracing on this specific trace entiry.
of coresight components linking the source to the sink is
configured and managed automatically by the coresight framework.
-What: /sys/bus/coresight/devices/<memory_map>.etm/cpu
+What: /sys/bus/coresight/devices/etm<N>/cpu
Date: April 2015
KernelVersion: 4.01
Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
Description: (R) The CPU this tracing entity is associated with.
-What: /sys/bus/coresight/devices/<memory_map>.etm/nr_pe_cmp
+What: /sys/bus/coresight/devices/etm<N>/nr_pe_cmp
Date: April 2015
KernelVersion: 4.01
Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
Description: (R) Indicates the number of PE comparator inputs that are
available for tracing.
-What: /sys/bus/coresight/devices/<memory_map>.etm/nr_addr_cmp
+What: /sys/bus/coresight/devices/etm<N>/nr_addr_cmp
Date: April 2015
KernelVersion: 4.01
Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
Description: (R) Indicates the number of address comparator pairs that are
available for tracing.
-What: /sys/bus/coresight/devices/<memory_map>.etm/nr_cntr
+What: /sys/bus/coresight/devices/etm<N>/nr_cntr
Date: April 2015
KernelVersion: 4.01
Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
Description: (R) Indicates the number of counters that are available for
tracing.
-What: /sys/bus/coresight/devices/<memory_map>.etm/nr_ext_inp
+What: /sys/bus/coresight/devices/etm<N>/nr_ext_inp
Date: April 2015
KernelVersion: 4.01
Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
Description: (R) Indicates how many external inputs are implemented.
-What: /sys/bus/coresight/devices/<memory_map>.etm/numcidc
+What: /sys/bus/coresight/devices/etm<N>/numcidc
Date: April 2015
KernelVersion: 4.01
Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
Description: (R) Indicates the number of Context ID comparators that are
available for tracing.
-What: /sys/bus/coresight/devices/<memory_map>.etm/numvmidc
+What: /sys/bus/coresight/devices/etm<N>/numvmidc
Date: April 2015
KernelVersion: 4.01
Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
Description: (R) Indicates the number of VMID comparators that are available
for tracing.
-What: /sys/bus/coresight/devices/<memory_map>.etm/nrseqstate
+What: /sys/bus/coresight/devices/etm<N>/nrseqstate
Date: April 2015
KernelVersion: 4.01
Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
Description: (R) Indicates the number of sequencer states that are
implemented.
-What: /sys/bus/coresight/devices/<memory_map>.etm/nr_resource
+What: /sys/bus/coresight/devices/etm<N>/nr_resource
Date: April 2015
KernelVersion: 4.01
Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
Description: (R) Indicates the number of resource selection pairs that are
available for tracing.
-What: /sys/bus/coresight/devices/<memory_map>.etm/nr_ss_cmp
+What: /sys/bus/coresight/devices/etm<N>/nr_ss_cmp
Date: April 2015
KernelVersion: 4.01
Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
Description: (R) Indicates the number of single-shot comparator controls that
are available for tracing.
-What: /sys/bus/coresight/devices/<memory_map>.etm/reset
+What: /sys/bus/coresight/devices/etm<N>/reset
Date: April 2015
KernelVersion: 4.01
Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
Description: (W) Cancels all configuration on a trace unit and set it back
to its boot configuration.
-What: /sys/bus/coresight/devices/<memory_map>.etm/mode
+What: /sys/bus/coresight/devices/etm<N>/mode
Date: April 2015
KernelVersion: 4.01
Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
@@ -91,302 +91,349 @@ Description: (RW) Controls various modes supported by this ETM, for example
P0 instruction tracing, branch broadcast, cycle counting and
context ID tracing.
-What: /sys/bus/coresight/devices/<memory_map>.etm/pe
+What: /sys/bus/coresight/devices/etm<N>/pe
Date: April 2015
KernelVersion: 4.01
Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
Description: (RW) Controls which PE to trace.
-What: /sys/bus/coresight/devices/<memory_map>.etm/event
+What: /sys/bus/coresight/devices/etm<N>/event
Date: April 2015
KernelVersion: 4.01
Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
Description: (RW) Controls the tracing of arbitrary events from bank 0 to 3.
-What: /sys/bus/coresight/devices/<memory_map>.etm/event_instren
+What: /sys/bus/coresight/devices/etm<N>/event_instren
Date: April 2015
KernelVersion: 4.01
Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
Description: (RW) Controls the behavior of the events in bank 0 to 3.
-What: /sys/bus/coresight/devices/<memory_map>.etm/event_ts
+What: /sys/bus/coresight/devices/etm<N>/event_ts
Date: April 2015
KernelVersion: 4.01
Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
Description: (RW) Controls the insertion of global timestamps in the trace
streams.
-What: /sys/bus/coresight/devices/<memory_map>.etm/syncfreq
+What: /sys/bus/coresight/devices/etm<N>/syncfreq
Date: April 2015
KernelVersion: 4.01
Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
Description: (RW) Controls how often trace synchronization requests occur.
-What: /sys/bus/coresight/devices/<memory_map>.etm/cyc_threshold
+What: /sys/bus/coresight/devices/etm<N>/cyc_threshold
Date: April 2015
KernelVersion: 4.01
Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
Description: (RW) Sets the threshold value for cycle counting.
-What: /sys/bus/coresight/devices/<memory_map>.etm/bb_ctrl
+What: /sys/bus/coresight/devices/etm<N>/bb_ctrl
Date: April 2015
KernelVersion: 4.01
Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
Description: (RW) Controls which regions in the memory map are enabled to
use branch broadcasting.
-What: /sys/bus/coresight/devices/<memory_map>.etm/event_vinst
+What: /sys/bus/coresight/devices/etm<N>/event_vinst
Date: April 2015
KernelVersion: 4.01
Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
Description: (RW) Controls instruction trace filtering.
-What: /sys/bus/coresight/devices/<memory_map>.etm/s_exlevel_vinst
+What: /sys/bus/coresight/devices/etm<N>/s_exlevel_vinst
Date: April 2015
KernelVersion: 4.01
Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
Description: (RW) In Secure state, each bit controls whether instruction
tracing is enabled for the corresponding exception level.
-What: /sys/bus/coresight/devices/<memory_map>.etm/ns_exlevel_vinst
+What: /sys/bus/coresight/devices/etm<N>/ns_exlevel_vinst
Date: April 2015
KernelVersion: 4.01
Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
Description: (RW) In non-secure state, each bit controls whether instruction
tracing is enabled for the corresponding exception level.
-What: /sys/bus/coresight/devices/<memory_map>.etm/addr_idx
+What: /sys/bus/coresight/devices/etm<N>/addr_idx
Date: April 2015
KernelVersion: 4.01
Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
Description: (RW) Select which address comparator or pair (of comparators) to
work with.
-What: /sys/bus/coresight/devices/<memory_map>.etm/addr_instdatatype
+What: /sys/bus/coresight/devices/etm<N>/addr_instdatatype
Date: April 2015
KernelVersion: 4.01
Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
Description: (RW) Controls what type of comparison the trace unit performs.
-What: /sys/bus/coresight/devices/<memory_map>.etm/addr_single
+What: /sys/bus/coresight/devices/etm<N>/addr_single
Date: April 2015
KernelVersion: 4.01
Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
Description: (RW) Used to setup single address comparator values.
-What: /sys/bus/coresight/devices/<memory_map>.etm/addr_range
+What: /sys/bus/coresight/devices/etm<N>/addr_range
Date: April 2015
KernelVersion: 4.01
Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
Description: (RW) Used to setup address range comparator values.
-What: /sys/bus/coresight/devices/<memory_map>.etm/seq_idx
+What: /sys/bus/coresight/devices/etm<N>/seq_idx
Date: April 2015
KernelVersion: 4.01
Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
Description: (RW) Select which sequensor.
-What: /sys/bus/coresight/devices/<memory_map>.etm/seq_state
+What: /sys/bus/coresight/devices/etm<N>/seq_state
Date: April 2015
KernelVersion: 4.01
Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
Description: (RW) Use this to set, or read, the sequencer state.
-What: /sys/bus/coresight/devices/<memory_map>.etm/seq_event
+What: /sys/bus/coresight/devices/etm<N>/seq_event
Date: April 2015
KernelVersion: 4.01
Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
Description: (RW) Moves the sequencer state to a specific state.
-What: /sys/bus/coresight/devices/<memory_map>.etm/seq_reset_event
+What: /sys/bus/coresight/devices/etm<N>/seq_reset_event
Date: April 2015
KernelVersion: 4.01
Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
Description: (RW) Moves the sequencer to state 0 when a programmed event
occurs.
-What: /sys/bus/coresight/devices/<memory_map>.etm/cntr_idx
+What: /sys/bus/coresight/devices/etm<N>/cntr_idx
Date: April 2015
KernelVersion: 4.01
Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
Description: (RW) Select which counter unit to work with.
-What: /sys/bus/coresight/devices/<memory_map>.etm/cntrldvr
+What: /sys/bus/coresight/devices/etm<N>/cntrldvr
Date: April 2015
KernelVersion: 4.01
Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
Description: (RW) This sets or returns the reload count value of the
specific counter.
-What: /sys/bus/coresight/devices/<memory_map>.etm/cntr_val
+What: /sys/bus/coresight/devices/etm<N>/cntr_val
Date: April 2015
KernelVersion: 4.01
Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
Description: (RW) This sets or returns the current count value of the
specific counter.
-What: /sys/bus/coresight/devices/<memory_map>.etm/cntr_ctrl
+What: /sys/bus/coresight/devices/etm<N>/cntr_ctrl
Date: April 2015
KernelVersion: 4.01
Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
Description: (RW) Controls the operation of the selected counter.
-What: /sys/bus/coresight/devices/<memory_map>.etm/res_idx
+What: /sys/bus/coresight/devices/etm<N>/res_idx
Date: April 2015
KernelVersion: 4.01
Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
Description: (RW) Select which resource selection unit to work with.
-What: /sys/bus/coresight/devices/<memory_map>.etm/res_ctrl
+What: /sys/bus/coresight/devices/etm<N>/res_ctrl
Date: April 2015
KernelVersion: 4.01
Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
Description: (RW) Controls the selection of the resources in the trace unit.
-What: /sys/bus/coresight/devices/<memory_map>.etm/ctxid_idx
+What: /sys/bus/coresight/devices/etm<N>/ctxid_idx
Date: April 2015
KernelVersion: 4.01
Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
Description: (RW) Select which context ID comparator to work with.
-What: /sys/bus/coresight/devices/<memory_map>.etm/ctxid_pid
+What: /sys/bus/coresight/devices/etm<N>/ctxid_pid
Date: April 2015
KernelVersion: 4.01
Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
Description: (RW) Get/Set the context ID comparator value to trigger on.
-What: /sys/bus/coresight/devices/<memory_map>.etm/ctxid_masks
+What: /sys/bus/coresight/devices/etm<N>/ctxid_masks
Date: April 2015
KernelVersion: 4.01
Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
Description: (RW) Mask for all 8 context ID comparator value
registers (if implemented).
-What: /sys/bus/coresight/devices/<memory_map>.etm/vmid_idx
+What: /sys/bus/coresight/devices/etm<N>/vmid_idx
Date: April 2015
KernelVersion: 4.01
Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
Description: (RW) Select which virtual machine ID comparator to work with.
-What: /sys/bus/coresight/devices/<memory_map>.etm/vmid_val
+What: /sys/bus/coresight/devices/etm<N>/vmid_val
Date: April 2015
KernelVersion: 4.01
Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
Description: (RW) Get/Set the virtual machine ID comparator value to
trigger on.
-What: /sys/bus/coresight/devices/<memory_map>.etm/vmid_masks
+What: /sys/bus/coresight/devices/etm<N>/vmid_masks
Date: April 2015
KernelVersion: 4.01
Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
Description: (RW) Mask for all 8 virtual machine ID comparator value
registers (if implemented).
-What: /sys/bus/coresight/devices/<memory_map>.etm/mgmt/trcoslsr
+What: /sys/bus/coresight/devices/etm<N>/addr_exlevel_s_ns
+Date: December 2019
+KernelVersion: 5.5
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: (RW) Set the Exception Level matching bits for secure and
+ non-secure exception levels.
+
+What: /sys/bus/coresight/devices/etm<N>/vinst_pe_cmp_start_stop
+Date: December 2019
+KernelVersion: 5.5
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: (RW) Access the start stop control register for PE input
+ comparators.
+
+What: /sys/bus/coresight/devices/etm<N>/addr_cmp_view
+Date: December 2019
+KernelVersion: 5.5
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: (R) Print the current settings for the selected address
+ comparator.
+
+What: /sys/bus/coresight/devices/etm<N>/sshot_idx
+Date: December 2019
+KernelVersion: 5.5
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: (RW) Select the single shot control register to access.
+
+What: /sys/bus/coresight/devices/etm<N>/sshot_ctrl
+Date: December 2019
+KernelVersion: 5.5
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: (RW) Access the selected single shot control register.
+
+What: /sys/bus/coresight/devices/etm<N>/sshot_status
+Date: December 2019
+KernelVersion: 5.5
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: (R) Print the current value of the selected single shot
+ status register.
+
+What: /sys/bus/coresight/devices/etm<N>/sshot_pe_ctrl
+Date: December 2019
+KernelVersion: 5.5
+Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
+Description: (RW) Access the selected single show PE comparator control
+ register.
+
+What: /sys/bus/coresight/devices/etm<N>/mgmt/trcoslsr
Date: April 2015
KernelVersion: 4.01
Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
Description: (R) Print the content of the OS Lock Status Register (0x304).
The value it taken directly from the HW.
-What: /sys/bus/coresight/devices/<memory_map>.etm/mgmt/trcpdcr
+What: /sys/bus/coresight/devices/etm<N>/mgmt/trcpdcr
Date: April 2015
KernelVersion: 4.01
Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
Description: (R) Print the content of the Power Down Control Register
(0x310). The value is taken directly from the HW.
-What: /sys/bus/coresight/devices/<memory_map>.etm/mgmt/trcpdsr
+What: /sys/bus/coresight/devices/etm<N>/mgmt/trcpdsr
Date: April 2015
KernelVersion: 4.01
Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
Description: (R) Print the content of the Power Down Status Register
(0x314). The value is taken directly from the HW.
-What: /sys/bus/coresight/devices/<memory_map>.etm/mgmt/trclsr
+What: /sys/bus/coresight/devices/etm<N>/mgmt/trclsr
Date: April 2015
KernelVersion: 4.01
Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
Description: (R) Print the content of the SW Lock Status Register
(0xFB4). The value is taken directly from the HW.
-What: /sys/bus/coresight/devices/<memory_map>.etm/mgmt/trcauthstatus
+What: /sys/bus/coresight/devices/etm<N>/mgmt/trcauthstatus
Date: April 2015
KernelVersion: 4.01
Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
Description: (R) Print the content of the Authentication Status Register
(0xFB8). The value is taken directly from the HW.
-What: /sys/bus/coresight/devices/<memory_map>.etm/mgmt/trcdevid
+What: /sys/bus/coresight/devices/etm<N>/mgmt/trcdevid
Date: April 2015
KernelVersion: 4.01
Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
Description: (R) Print the content of the Device ID Register
(0xFC8). The value is taken directly from the HW.
-What: /sys/bus/coresight/devices/<memory_map>.etm/mgmt/trcdevtype
+What: /sys/bus/coresight/devices/etm<N>/mgmt/trcdevtype
Date: April 2015
KernelVersion: 4.01
Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
Description: (R) Print the content of the Device Type Register
(0xFCC). The value is taken directly from the HW.
-What: /sys/bus/coresight/devices/<memory_map>.etm/mgmt/trcpidr0
+What: /sys/bus/coresight/devices/etm<N>/mgmt/trcpidr0
Date: April 2015
KernelVersion: 4.01
Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
Description: (R) Print the content of the Peripheral ID0 Register
(0xFE0). The value is taken directly from the HW.
-What: /sys/bus/coresight/devices/<memory_map>.etm/mgmt/trcpidr1
+What: /sys/bus/coresight/devices/etm<N>/mgmt/trcpidr1
Date: April 2015
KernelVersion: 4.01
Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
Description: (R) Print the content of the Peripheral ID1 Register
(0xFE4). The value is taken directly from the HW.
-What: /sys/bus/coresight/devices/<memory_map>.etm/mgmt/trcpidr2
+What: /sys/bus/coresight/devices/etm<N>/mgmt/trcpidr2
Date: April 2015
KernelVersion: 4.01
Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
Description: (R) Print the content of the Peripheral ID2 Register
(0xFE8). The value is taken directly from the HW.
-What: /sys/bus/coresight/devices/<memory_map>.etm/mgmt/trcpidr3
+What: /sys/bus/coresight/devices/etm<N>/mgmt/trcpidr3
Date: April 2015
KernelVersion: 4.01
Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
Description: (R) Print the content of the Peripheral ID3 Register
(0xFEC). The value is taken directly from the HW.
-What: /sys/bus/coresight/devices/<memory_map>.etm/mgmt/trcconfig
+What: /sys/bus/coresight/devices/etm<N>/mgmt/trcconfig
Date: February 2016
KernelVersion: 4.07
Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
Description: (R) Print the content of the trace configuration register
(0x010) as currently set by SW.
-What: /sys/bus/coresight/devices/<memory_map>.etm/mgmt/trctraceid
+What: /sys/bus/coresight/devices/etm<N>/mgmt/trctraceid
Date: February 2016
KernelVersion: 4.07
Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
Description: (R) Print the content of the trace ID register (0x040).
-What: /sys/bus/coresight/devices/<memory_map>.etm/trcidr/trcidr0
+What: /sys/bus/coresight/devices/etm<N>/trcidr/trcidr0
Date: April 2015
KernelVersion: 4.01
Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
Description: (R) Returns the tracing capabilities of the trace unit (0x1E0).
The value is taken directly from the HW.
-What: /sys/bus/coresight/devices/<memory_map>.etm/trcidr/trcidr1
+What: /sys/bus/coresight/devices/etm<N>/trcidr/trcidr1
Date: April 2015
KernelVersion: 4.01
Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
Description: (R) Returns the tracing capabilities of the trace unit (0x1E4).
The value is taken directly from the HW.
-What: /sys/bus/coresight/devices/<memory_map>.etm/trcidr/trcidr2
+What: /sys/bus/coresight/devices/etm<N>/trcidr/trcidr2
Date: April 2015
KernelVersion: 4.01
Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
@@ -394,7 +441,7 @@ Description: (R) Returns the maximum size of the data value, data address,
VMID, context ID and instuction address in the trace unit
(0x1E8). The value is taken directly from the HW.
-What: /sys/bus/coresight/devices/<memory_map>.etm/trcidr/trcidr3
+What: /sys/bus/coresight/devices/etm<N>/trcidr/trcidr3
Date: April 2015
KernelVersion: 4.01
Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
@@ -403,42 +450,42 @@ Description: (R) Returns the value associated with various resources
architecture specification for more details (0x1E8).
The value is taken directly from the HW.
-What: /sys/bus/coresight/devices/<memory_map>.etm/trcidr/trcidr4
+What: /sys/bus/coresight/devices/etm<N>/trcidr/trcidr4
Date: April 2015
KernelVersion: 4.01
Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
Description: (R) Returns how many resources the trace unit supports (0x1F0).
The value is taken directly from the HW.
-What: /sys/bus/coresight/devices/<memory_map>.etm/trcidr/trcidr5
+What: /sys/bus/coresight/devices/etm<N>/trcidr/trcidr5
Date: April 2015
KernelVersion: 4.01
Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
Description: (R) Returns how many resources the trace unit supports (0x1F4).
The value is taken directly from the HW.
-What: /sys/bus/coresight/devices/<memory_map>.etm/trcidr/trcidr8
+What: /sys/bus/coresight/devices/etm<N>/trcidr/trcidr8
Date: April 2015
KernelVersion: 4.01
Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
Description: (R) Returns the maximum speculation depth of the instruction
trace stream. (0x180). The value is taken directly from the HW.
-What: /sys/bus/coresight/devices/<memory_map>.etm/trcidr/trcidr9
+What: /sys/bus/coresight/devices/etm<N>/trcidr/trcidr9
Date: April 2015
KernelVersion: 4.01
Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
Description: (R) Returns the number of P0 right-hand keys that the trace unit
can use (0x184). The value is taken directly from the HW.
-What: /sys/bus/coresight/devices/<memory_map>.etm/trcidr/trcidr10
+What: /sys/bus/coresight/devices/etm<N>/trcidr/trcidr10
Date: April 2015
KernelVersion: 4.01
Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
Description: (R) Returns the number of P1 right-hand keys that the trace unit
can use (0x188). The value is taken directly from the HW.
-What: /sys/bus/coresight/devices/<memory_map>.etm/trcidr/trcidr11
+What: /sys/bus/coresight/devices/etm<N>/trcidr/trcidr11
Date: April 2015
KernelVersion: 4.01
Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
@@ -446,7 +493,7 @@ Description: (R) Returns the number of special P1 right-hand keys that the
trace unit can use (0x18C). The value is taken directly from
the HW.
-What: /sys/bus/coresight/devices/<memory_map>.etm/trcidr/trcidr12
+What: /sys/bus/coresight/devices/etm<N>/trcidr/trcidr12
Date: April 2015
KernelVersion: 4.01
Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
@@ -454,7 +501,7 @@ Description: (R) Returns the number of conditional P1 right-hand keys that
the trace unit can use (0x190). The value is taken directly
from the HW.
-What: /sys/bus/coresight/devices/<memory_map>.etm/trcidr/trcidr13
+What: /sys/bus/coresight/devices/etm<N>/trcidr/trcidr13
Date: April 2015
KernelVersion: 4.01
Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
diff --git a/Documentation/ABI/testing/sysfs-bus-fsi b/Documentation/ABI/testing/sysfs-bus-fsi
index 57c806350d6c..320697bdf41d 100644
--- a/Documentation/ABI/testing/sysfs-bus-fsi
+++ b/Documentation/ABI/testing/sysfs-bus-fsi
@@ -1,25 +1,25 @@
-What: /sys/bus/platform/devices/fsi-master/rescan
+What: /sys/bus/platform/devices/../fsi-master/fsi0/rescan
Date: May 2017
KernelVersion: 4.12
-Contact: cbostic@linux.vnet.ibm.com
+Contact: linux-fsi@lists.ozlabs.org
Description:
Initiates a FSI master scan for all connected slave devices
on its links.
-What: /sys/bus/platform/devices/fsi-master/break
+What: /sys/bus/platform/devices/../fsi-master/fsi0/break
Date: May 2017
KernelVersion: 4.12
-Contact: cbostic@linux.vnet.ibm.com
+Contact: linux-fsi@lists.ozlabs.org
Description:
Sends an FSI BREAK command on a master's communication
link to any connnected slaves. A BREAK resets connected
device's logic and preps it to receive further commands
from the master.
-What: /sys/bus/platform/devices/fsi-master/slave@00:00/term
+What: /sys/bus/platform/devices/../fsi-master/fsi0/slave@00:00/term
Date: May 2017
KernelVersion: 4.12
-Contact: cbostic@linux.vnet.ibm.com
+Contact: linux-fsi@lists.ozlabs.org
Description:
Sends an FSI terminate command from the master to its
connected slave. A terminate resets the slave's state machines
@@ -29,10 +29,10 @@ Description:
ongoing operation in case of an expired 'Master Time Out'
timer.
-What: /sys/bus/platform/devices/fsi-master/slave@00:00/raw
+What: /sys/bus/platform/devices/../fsi-master/fsi0/slave@00:00/raw
Date: May 2017
KernelVersion: 4.12
-Contact: cbostic@linux.vnet.ibm.com
+Contact: linux-fsi@lists.ozlabs.org
Description:
Provides a means of reading/writing a 32 bit value from/to a
specified FSI bus address.
diff --git a/Documentation/ABI/testing/sysfs-bus-iio b/Documentation/ABI/testing/sysfs-bus-iio
index 680451695422..faaa2166d741 100644
--- a/Documentation/ABI/testing/sysfs-bus-iio
+++ b/Documentation/ABI/testing/sysfs-bus-iio
@@ -753,6 +753,8 @@ What: /sys/.../events/in_illuminance0_thresh_falling_value
what: /sys/.../events/in_illuminance0_thresh_rising_value
what: /sys/.../events/in_proximity0_thresh_falling_value
what: /sys/.../events/in_proximity0_thresh_rising_value
+What: /sys/.../events/in_illuminance_thresh_rising_value
+What: /sys/.../events/in_illuminance_thresh_falling_value
KernelVersion: 2.6.37
Contact: linux-iio@vger.kernel.org
Description:
@@ -972,6 +974,7 @@ What: /sys/.../events/in_activity_jogging_thresh_rising_period
What: /sys/.../events/in_activity_jogging_thresh_falling_period
What: /sys/.../events/in_activity_running_thresh_rising_period
What: /sys/.../events/in_activity_running_thresh_falling_period
+What: /sys/.../events/in_illuminance_thresh_either_period
KernelVersion: 2.6.37
Contact: linux-iio@vger.kernel.org
Description:
@@ -1715,3 +1718,11 @@ Description:
Mass concentration reading of particulate matter in ug / m3.
pmX consists of particles with aerodynamic diameter less or
equal to X micrometers.
+
+What: /sys/bus/iio/devices/iio:deviceX/events/in_illuminance_period_available
+Date: November 2019
+KernelVersion: 5.4
+Contact: linux-iio@vger.kernel.org
+Description:
+ List of valid periods (in seconds) for which the light intensity
+ must be above the threshold level before interrupt is asserted.
diff --git a/Documentation/ABI/testing/sysfs-bus-iio-adc-ad7192 b/Documentation/ABI/testing/sysfs-bus-iio-adc-ad7192
new file mode 100644
index 000000000000..7627d3be08f5
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-iio-adc-ad7192
@@ -0,0 +1,39 @@
+What: /sys/bus/iio/devices/iio:deviceX/ac_excitation_en
+KernelVersion:
+Contact: linux-iio@vger.kernel.org
+Description:
+ Reading gives the state of AC excitation.
+ Writing '1' enables AC excitation.
+
+What: /sys/bus/iio/devices/iio:deviceX/bridge_switch_en
+KernelVersion:
+Contact: linux-iio@vger.kernel.org
+Description:
+ This bridge switch is used to disconnect it when there is a
+ need to minimize the system current consumption.
+ Reading gives the state of the bridge switch.
+ Writing '1' enables the bridge switch.
+
+What: /sys/bus/iio/devices/iio:deviceX/in_voltagex_sys_calibration
+KernelVersion:
+Contact: linux-iio@vger.kernel.org
+Description:
+ Initiates the system calibration procedure. This is done on a
+ single channel at a time. Write '1' to start the calibration.
+
+What: /sys/bus/iio/devices/iio:deviceX/in_voltagex_sys_calibration_mode_available
+KernelVersion:
+Contact: linux-iio@vger.kernel.org
+Description:
+ Reading returns a list with the possible calibration modes.
+ There are two available options:
+ "zero_scale" - calibrate to zero scale
+ "full_scale" - calibrate to full scale
+
+What: /sys/bus/iio/devices/iio:deviceX/in_voltagex_sys_calibration_mode
+KernelVersion:
+Contact: linux-iio@vger.kernel.org
+Description:
+ Sets up the calibration mode used in the system calibration
+ procedure. Reading returns the current calibration mode.
+ Writing sets the system calibration mode.
diff --git a/Documentation/ABI/testing/sysfs-bus-mei b/Documentation/ABI/testing/sysfs-bus-mei
index 6bd45346ac7e..3d37e2796d5a 100644
--- a/Documentation/ABI/testing/sysfs-bus-mei
+++ b/Documentation/ABI/testing/sysfs-bus-mei
@@ -4,7 +4,7 @@ KernelVersion: 3.10
Contact: Samuel Ortiz <sameo@linux.intel.com>
linux-mei@linux.intel.com
Description: Stores the same MODALIAS value emitted by uevent
- Format: mei:<mei device name>:<device uuid>:
+ Format: mei:<mei device name>:<device uuid>:<protocol version>
What: /sys/bus/mei/devices/.../name
Date: May 2015
@@ -26,3 +26,24 @@ KernelVersion: 4.3
Contact: Tomas Winkler <tomas.winkler@intel.com>
Description: Stores mei client protocol version
Format: %d
+
+What: /sys/bus/mei/devices/.../max_conn
+Date: Nov 2019
+KernelVersion: 5.5
+Contact: Tomas Winkler <tomas.winkler@intel.com>
+Description: Stores mei client maximum number of connections
+ Format: %d
+
+What: /sys/bus/mei/devices/.../fixed
+Date: Nov 2019
+KernelVersion: 5.5
+Contact: Tomas Winkler <tomas.winkler@intel.com>
+Description: Stores mei client fixed address, if any
+ Format: %d
+
+What: /sys/bus/mei/devices/.../max_len
+Date: Nov 2019
+KernelVersion: 5.5
+Contact: Tomas Winkler <tomas.winkler@intel.com>
+Description: Stores mei client maximum message length
+ Format: %d
diff --git a/Documentation/ABI/testing/sysfs-bus-pci b/Documentation/ABI/testing/sysfs-bus-pci
index 8bfee557e50e..450296cc7948 100644
--- a/Documentation/ABI/testing/sysfs-bus-pci
+++ b/Documentation/ABI/testing/sysfs-bus-pci
@@ -347,3 +347,16 @@ Description:
If the device has any Peer-to-Peer memory registered, this
file contains a '1' if the memory has been published for
use outside the driver that owns the device.
+
+What: /sys/bus/pci/devices/.../link/clkpm
+ /sys/bus/pci/devices/.../link/l0s_aspm
+ /sys/bus/pci/devices/.../link/l1_aspm
+ /sys/bus/pci/devices/.../link/l1_1_aspm
+ /sys/bus/pci/devices/.../link/l1_2_aspm
+ /sys/bus/pci/devices/.../link/l1_1_pcipm
+ /sys/bus/pci/devices/.../link/l1_2_pcipm
+Date: October 2019
+Contact: Heiner Kallweit <hkallweit1@gmail.com>
+Description: If ASPM is supported for an endpoint, these files can be
+ used to disable or enable the individual power management
+ states. Write y/1/on to enable, n/0/off to disable.
diff --git a/Documentation/ABI/testing/sysfs-bus-thunderbolt b/Documentation/ABI/testing/sysfs-bus-thunderbolt
index b21fba14689b..82e80de78dd0 100644
--- a/Documentation/ABI/testing/sysfs-bus-thunderbolt
+++ b/Documentation/ABI/testing/sysfs-bus-thunderbolt
@@ -80,6 +80,14 @@ Contact: thunderbolt-software@lists.01.org
Description: This attribute contains 1 if Thunderbolt device was already
authorized on boot and 0 otherwise.
+What: /sys/bus/thunderbolt/devices/.../generation
+Date: Jan 2020
+KernelVersion: 5.5
+Contact: Christian Kellner <christian@kellner.me>
+Description: This attribute contains the generation of the Thunderbolt
+ controller associated with the device. It will contain 4
+ for USB4.
+
What: /sys/bus/thunderbolt/devices/.../key
Date: Sep 2017
KernelVersion: 4.13
@@ -104,6 +112,34 @@ Contact: thunderbolt-software@lists.01.org
Description: This attribute contains name of this device extracted from
the device DROM.
+What: /sys/bus/thunderbolt/devices/.../rx_speed
+Date: Jan 2020
+KernelVersion: 5.5
+Contact: Mika Westerberg <mika.westerberg@linux.intel.com>
+Description: This attribute reports the device RX speed per lane.
+ All RX lanes run at the same speed.
+
+What: /sys/bus/thunderbolt/devices/.../rx_lanes
+Date: Jan 2020
+KernelVersion: 5.5
+Contact: Mika Westerberg <mika.westerberg@linux.intel.com>
+Description: This attribute reports number of RX lanes the device is
+ using simultaneusly through its upstream port.
+
+What: /sys/bus/thunderbolt/devices/.../tx_speed
+Date: Jan 2020
+KernelVersion: 5.5
+Contact: Mika Westerberg <mika.westerberg@linux.intel.com>
+Description: This attribute reports the TX speed per lane.
+ All TX lanes run at the same speed.
+
+What: /sys/bus/thunderbolt/devices/.../tx_lanes
+Date: Jan 2020
+KernelVersion: 5.5
+Contact: Mika Westerberg <mika.westerberg@linux.intel.com>
+Description: This attribute reports number of TX lanes the device is
+ using simultaneusly through its upstream port.
+
What: /sys/bus/thunderbolt/devices/.../vendor
Date: Sep 2017
KernelVersion: 4.13
diff --git a/Documentation/ABI/testing/sysfs-class-led-driver-el15203000 b/Documentation/ABI/testing/sysfs-class-led-driver-el15203000
new file mode 100644
index 000000000000..f520ece9b64c
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-class-led-driver-el15203000
@@ -0,0 +1,139 @@
+What: /sys/class/leds/<led>/hw_pattern
+Date: September 2019
+KernelVersion: 5.5
+Description:
+ Specify a hardware pattern for the EL15203000 LED.
+ The LEDs board supports only predefined patterns by firmware
+ for specific LEDs.
+
+ Breathing mode for Screen frame light tube:
+ "0 4000 1 4000"
+
+ ^
+ |
+ Max-| ---
+ | / \
+ | / \
+ | / \ /
+ | / \ /
+ Min-|- ---
+ |
+ 0------4------8--> time (sec)
+
+ Cascade mode for Pipe LED:
+ "1 800 2 800 4 800 8 800 16 800"
+
+ ^
+ |
+ 0 On -|----+ +----+ +---
+ | | | | |
+ Off-| +-------------------+ +-------------------+
+ |
+ 1 On -| +----+ +----+
+ | | | | |
+ Off |----+ +-------------------+ +------------------
+ |
+ 2 On -| +----+ +----+
+ | | | | |
+ Off-|---------+ +-------------------+ +-------------
+ |
+ 3 On -| +----+ +----+
+ | | | | |
+ Off-|--------------+ +-------------------+ +--------
+ |
+ 4 On -| +----+ +----+
+ | | | | |
+ Off-|-------------------+ +-------------------+ +---
+ |
+ 0---0.8--1.6--2.4--3.2---4---4.8--5.6--6.4--7.2---8--> time (sec)
+
+ Inverted cascade mode for Pipe LED:
+ "30 800 29 800 27 800 23 800 15 800"
+
+ ^
+ |
+ 0 On -| +-------------------+ +-------------------+
+ | | | | |
+ Off-|----+ +----+ +---
+ |
+ 1 On -|----+ +-------------------+ +------------------
+ | | | | |
+ Off | +----+ +----+
+ |
+ 2 On -|---------+ +-------------------+ +-------------
+ | | | | |
+ Off-| +----+ +----+
+ |
+ 3 On -|--------------+ +-------------------+ +--------
+ | | | | |
+ Off-| +----+ +----+
+ |
+ 4 On -|-------------------+ +-------------------+ +---
+ | | | | |
+ Off-| +----+ +----+
+ |
+ 0---0.8--1.6--2.4--3.2---4---4.8--5.6--6.4--7.2---8--> time (sec)
+
+ Bounce mode for Pipe LED:
+ "1 800 2 800 4 800 8 800 16 800 16 800 8 800 4 800 2 800 1 800"
+
+ ^
+ |
+ 0 On -|----+ +--------
+ | | |
+ Off-| +---------------------------------------+
+ |
+ 1 On -| +----+ +----+
+ | | | | |
+ Off |----+ +-----------------------------+ +--------
+ |
+ 2 On -| +----+ +----+
+ | | | | |
+ Off-|---------+ +-------------------+ +-------------
+ |
+ 3 On -| +----+ +----+
+ | | | | |
+ Off-|--------------+ +---------+ +------------------
+ |
+ 4 On -| +---------+
+ | | |
+ Off-|-------------------+ +-----------------------
+ |
+ 0---0.8--1.6--2.4--3.2---4---4.8--5.6--6.4--7.2---8--> time (sec)
+
+ Inverted bounce mode for Pipe LED:
+ "30 800 29 800 27 800 23 800 15 800 15 800 23 800 27 800 29 800 30 800"
+
+ ^
+ |
+ 0 On -| +---------------------------------------+
+ | | |
+ Off-|----+ +--------
+ |
+ 1 On -|----+ +-----------------------------+ +--------
+ | | | | |
+ Off | +----+ +----+
+ |
+ 2 On -|---------+ +-------------------+ +-------------
+ | | | | |
+ Off-| +----+ +----+
+ |
+ 3 On -|--------------+ +---------+ +------------------
+ | | | | |
+ Off-| +----+ +----+
+ |
+ 4 On -|-------------------+ +-----------------------
+ | | |
+ Off-| +---------+
+ |
+ 0---0.8--1.6--2.4--3.2---4---4.8--5.6--6.4--7.2---8--> time (sec)
+
+What: /sys/class/leds/<led>/repeat
+Date: September 2019
+KernelVersion: 5.5
+Description:
+ EL15203000 supports only indefinitely patterns,
+ so this file should always store -1.
+
+ For more info, please see:
+ Documentation/ABI/testing/sysfs-class-led-trigger-pattern
diff --git a/Documentation/ABI/testing/sysfs-class-mei b/Documentation/ABI/testing/sysfs-class-mei
index a92d844f806e..e9dc110650ae 100644
--- a/Documentation/ABI/testing/sysfs-class-mei
+++ b/Documentation/ABI/testing/sysfs-class-mei
@@ -80,3 +80,13 @@ Description: Display the ME device state.
DISABLED
POWER_DOWN
POWER_UP
+
+What: /sys/class/mei/meiN/trc
+Date: Nov 2019
+KernelVersion: 5.5
+Contact: Tomas Winkler <tomas.winkler@intel.com>
+Description: Display trc status register content
+
+ The ME FW writes Glitch Detection HW (TRC)
+ status information into trc status register
+ for BIOS and OS to monitor fw health.
diff --git a/Documentation/ABI/testing/sysfs-class-watchdog b/Documentation/ABI/testing/sysfs-class-watchdog
index 675f9b537661..9860a8b2ba75 100644
--- a/Documentation/ABI/testing/sysfs-class-watchdog
+++ b/Documentation/ABI/testing/sysfs-class-watchdog
@@ -17,8 +17,13 @@ What: /sys/class/watchdog/watchdogn/nowayout
Date: August 2015
Contact: Wim Van Sebroeck <wim@iguana.be>
Description:
- It is a read only file. While reading, it gives '1' if that
- device supports nowayout feature else, it gives '0'.
+ It is a read/write file. While reading, it gives '1'
+ if the device has the nowayout feature set, otherwise
+ it gives '0'. Writing a '1' to the file enables the
+ nowayout feature. Once set, the nowayout feature
+ cannot be disabled, so writing a '0' either has no
+ effect (if the feature was already disabled) or
+ results in a permission error.
What: /sys/class/watchdog/watchdogn/state
Date: August 2015
diff --git a/Documentation/ABI/testing/sysfs-fs-f2fs b/Documentation/ABI/testing/sysfs-fs-f2fs
index 7ab2b1b5e255..aedeae1e8ec1 100644
--- a/Documentation/ABI/testing/sysfs-fs-f2fs
+++ b/Documentation/ABI/testing/sysfs-fs-f2fs
@@ -31,6 +31,12 @@ Contact: "Jaegeuk Kim" <jaegeuk.kim@samsung.com>
Description:
Controls the issue rate of segment discard commands.
+What: /sys/fs/f2fs/<disk>/max_blkaddr
+Date: November 2019
+Contact: "Ramon Pantin" <pantin@google.com>
+Description:
+ Shows first block address of MAIN area.
+
What: /sys/fs/f2fs/<disk>/ipu_policy
Date: November 2013
Contact: "Jaegeuk Kim" <jaegeuk.kim@samsung.com>
diff --git a/Documentation/ABI/testing/sysfs-platform-dfl-fme b/Documentation/ABI/testing/sysfs-platform-dfl-fme
index 72634d3ae4f4..3683cb1cdc3d 100644
--- a/Documentation/ABI/testing/sysfs-platform-dfl-fme
+++ b/Documentation/ABI/testing/sysfs-platform-dfl-fme
@@ -106,3 +106,135 @@ KernelVersion: 5.4
Contact: Wu Hao <hao.wu@intel.com>
Description: Read-only. Read this file to get the second error detected by
hardware.
+
+What: /sys/bus/platform/devices/dfl-fme.0/hwmon/hwmonX/name
+Date: October 2019
+KernelVersion: 5.5
+Contact: Wu Hao <hao.wu@intel.com>
+Description: Read-Only. Read this file to get the name of hwmon device, it
+ supports values:
+ 'dfl_fme_thermal' - thermal hwmon device name
+ 'dfl_fme_power' - power hwmon device name
+
+What: /sys/bus/platform/devices/dfl-fme.0/hwmon/hwmonX/temp1_input
+Date: October 2019
+KernelVersion: 5.5
+Contact: Wu Hao <hao.wu@intel.com>
+Description: Read-Only. It returns FPGA device temperature in millidegrees
+ Celsius.
+
+What: /sys/bus/platform/devices/dfl-fme.0/hwmon/hwmonX/temp1_max
+Date: October 2019
+KernelVersion: 5.5
+Contact: Wu Hao <hao.wu@intel.com>
+Description: Read-Only. It returns hardware threshold1 temperature in
+ millidegrees Celsius. If temperature rises at or above this
+ threshold, hardware starts 50% or 90% throttling (see
+ 'temp1_max_policy').
+
+What: /sys/bus/platform/devices/dfl-fme.0/hwmon/hwmonX/temp1_crit
+Date: October 2019
+KernelVersion: 5.5
+Contact: Wu Hao <hao.wu@intel.com>
+Description: Read-Only. It returns hardware threshold2 temperature in
+ millidegrees Celsius. If temperature rises at or above this
+ threshold, hardware starts 100% throttling.
+
+What: /sys/bus/platform/devices/dfl-fme.0/hwmon/hwmonX/temp1_emergency
+Date: October 2019
+KernelVersion: 5.5
+Contact: Wu Hao <hao.wu@intel.com>
+Description: Read-Only. It returns hardware trip threshold temperature in
+ millidegrees Celsius. If temperature rises at or above this
+ threshold, a fatal event will be triggered to board management
+ controller (BMC) to shutdown FPGA.
+
+What: /sys/bus/platform/devices/dfl-fme.0/hwmon/hwmonX/temp1_max_alarm
+Date: October 2019
+KernelVersion: 5.5
+Contact: Wu Hao <hao.wu@intel.com>
+Description: Read-only. It returns 1 if temperature is currently at or above
+ hardware threshold1 (see 'temp1_max'), otherwise 0.
+
+What: /sys/bus/platform/devices/dfl-fme.0/hwmon/hwmonX/temp1_crit_alarm
+Date: October 2019
+KernelVersion: 5.5
+Contact: Wu Hao <hao.wu@intel.com>
+Description: Read-only. It returns 1 if temperature is currently at or above
+ hardware threshold2 (see 'temp1_crit'), otherwise 0.
+
+What: /sys/bus/platform/devices/dfl-fme.0/hwmon/hwmonX/temp1_max_policy
+Date: October 2019
+KernelVersion: 5.5
+Contact: Wu Hao <hao.wu@intel.com>
+Description: Read-Only. Read this file to get the policy of hardware threshold1
+ (see 'temp1_max'). It only supports two values (policies):
+ 0 - AP2 state (90% throttling)
+ 1 - AP1 state (50% throttling)
+
+What: /sys/bus/platform/devices/dfl-fme.0/hwmon/hwmonX/power1_input
+Date: October 2019
+KernelVersion: 5.5
+Contact: Wu Hao <hao.wu@intel.com>
+Description: Read-Only. It returns current FPGA power consumption in uW.
+
+What: /sys/bus/platform/devices/dfl-fme.0/hwmon/hwmonX/power1_max
+Date: October 2019
+KernelVersion: 5.5
+Contact: Wu Hao <hao.wu@intel.com>
+Description: Read-Write. Read this file to get current hardware power
+ threshold1 in uW. If power consumption rises at or above
+ this threshold, hardware starts 50% throttling.
+ Write this file to set current hardware power threshold1 in uW.
+ As hardware only accepts values in Watts, so input value will
+ be round down per Watts (< 1 watts part will be discarded) and
+ clamped within the range from 0 to 127 Watts. Write fails with
+ -EINVAL if input parsing fails.
+
+What: /sys/bus/platform/devices/dfl-fme.0/hwmon/hwmonX/power1_crit
+Date: October 2019
+KernelVersion: 5.5
+Contact: Wu Hao <hao.wu@intel.com>
+Description: Read-Write. Read this file to get current hardware power
+ threshold2 in uW. If power consumption rises at or above
+ this threshold, hardware starts 90% throttling.
+ Write this file to set current hardware power threshold2 in uW.
+ As hardware only accepts values in Watts, so input value will
+ be round down per Watts (< 1 watts part will be discarded) and
+ clamped within the range from 0 to 127 Watts. Write fails with
+ -EINVAL if input parsing fails.
+
+What: /sys/bus/platform/devices/dfl-fme.0/hwmon/hwmonX/power1_max_alarm
+Date: October 2019
+KernelVersion: 5.5
+Contact: Wu Hao <hao.wu@intel.com>
+Description: Read-only. It returns 1 if power consumption is currently at or
+ above hardware threshold1 (see 'power1_max'), otherwise 0.
+
+What: /sys/bus/platform/devices/dfl-fme.0/hwmon/hwmonX/power1_crit_alarm
+Date: October 2019
+KernelVersion: 5.5
+Contact: Wu Hao <hao.wu@intel.com>
+Description: Read-only. It returns 1 if power consumption is currently at or
+ above hardware threshold2 (see 'power1_crit'), otherwise 0.
+
+What: /sys/bus/platform/devices/dfl-fme.0/hwmon/hwmonX/power1_xeon_limit
+Date: October 2019
+KernelVersion: 5.5
+Contact: Wu Hao <hao.wu@intel.com>
+Description: Read-Only. It returns power limit for XEON in uW.
+
+What: /sys/bus/platform/devices/dfl-fme.0/hwmon/hwmonX/power1_fpga_limit
+Date: October 2019
+KernelVersion: 5.5
+Contact: Wu Hao <hao.wu@intel.com>
+Description: Read-Only. It returns power limit for FPGA in uW.
+
+What: /sys/bus/platform/devices/dfl-fme.0/hwmon/hwmonX/power1_ltr
+Date: October 2019
+KernelVersion: 5.5
+Contact: Wu Hao <hao.wu@intel.com>
+Description: Read-only. Read this file to get current Latency Tolerance
+ Reporting (ltr) value. It returns 1 if all Accelerated
+ Function Units (AFUs) can tolerate latency >= 40us for memory
+ access or 0 if any AFU is latency sensitive (< 40us).
diff --git a/Documentation/ABI/testing/sysfs-platform-mellanox-bootctl b/Documentation/ABI/testing/sysfs-platform-mellanox-bootctl
new file mode 100644
index 000000000000..c65a80574869
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-platform-mellanox-bootctl
@@ -0,0 +1,58 @@
+What: /sys/bus/platform/devices/MLNXBF04:00/driver/lifecycle_state
+Date: Oct 2019
+KernelVersion: 5.5
+Contact: "Liming Sun <lsun@mellanox.com>"
+Description:
+ The Life-cycle state of the SoC, which could be one of the
+ following values.
+ Production - Production state and can be updated to secure
+ GA Secured - Secure chip and not able to change state
+ GA Non-Secured - Non-Secure chip and not able to change state
+ RMA - Return Merchandise Authorization
+
+What: /sys/bus/platform/devices/MLNXBF04:00/driver/post_reset_wdog
+Date: Oct 2019
+KernelVersion: 5.5
+Contact: "Liming Sun <lsun@mellanox.com>"
+Description:
+ The watchdog setting in seconds for the next booting. It's used
+ to reboot the chip and recover it to the old state if the new
+ boot partition fails.
+
+What: /sys/bus/platform/devices/MLNXBF04:00/driver/reset_action
+Date: Oct 2019
+KernelVersion: 5.5
+Contact: "Liming Sun <lsun@mellanox.com>"
+Description:
+ The source of the boot stream for the next reset. It could be
+ one of the following values.
+ external - boot from external source (USB or PCIe)
+ emmc - boot from the onchip eMMC
+ emmc_legacy - boot from the onchip eMMC in legacy (slow) mode
+
+What: /sys/bus/platform/devices/MLNXBF04:00/driver/second_reset_action
+Date: Oct 2019
+KernelVersion: 5.5
+Contact: "Liming Sun <lsun@mellanox.com>"
+Description:
+ Update the source of the boot stream after next reset. It could
+ be one of the following values and will be applied after next
+ reset.
+ external - boot from external source (USB or PCIe)
+ emmc - boot from the onchip eMMC
+ emmc_legacy - boot from the onchip eMMC in legacy (slow) mode
+ swap_emmc - swap the primary / secondary boot partition
+ none - cancel the action
+
+What: /sys/bus/platform/devices/MLNXBF04:00/driver/secure_boot_fuse_state
+Date: Oct 2019
+KernelVersion: 5.5
+Contact: "Liming Sun <lsun@mellanox.com>"
+Description:
+ The state of eFuse versions with the following values.
+ InUse - burnt, valid and currently in use
+ Used - burnt and valid
+ Free - not burnt and free to use
+ Skipped - not burnt but not free (skipped)
+ Wasted - burnt and invalid
+ Invalid - not burnt but marked as valid (error state).
diff --git a/Documentation/ABI/testing/sysfs-platform-wilco-ec b/Documentation/ABI/testing/sysfs-platform-wilco-ec
index 8827a734f933..5f60b184a5a5 100644
--- a/Documentation/ABI/testing/sysfs-platform-wilco-ec
+++ b/Documentation/ABI/testing/sysfs-platform-wilco-ec
@@ -31,6 +31,23 @@ Description:
Output will a version string be similar to the example below:
08B6
+What: /sys/bus/platform/devices/GOOG000C\:00/usb_charge
+Date: October 2019
+KernelVersion: 5.5
+Description:
+ Control the USB PowerShare Policy. USB PowerShare is a policy
+ which affects charging via the special USB PowerShare port
+ (marked with a small lightning bolt or battery icon) when in
+ low power states:
+ - In S0, the port will always provide power.
+ - In S0ix, if usb_charge is enabled, then power will be
+ supplied to the port when on AC or if battery is > 50%.
+ Else no power is supplied.
+ - In S5, if usb_charge is enabled, then power will be supplied
+ to the port when on AC. Else no power is supplied.
+
+ Input should be either "0" or "1".
+
What: /sys/bus/platform/devices/GOOG000C\:00/version
Date: May 2019
KernelVersion: 5.3
diff --git a/Documentation/ABI/testing/sysfs-secvar b/Documentation/ABI/testing/sysfs-secvar
new file mode 100644
index 000000000000..feebb8c57294
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-secvar
@@ -0,0 +1,46 @@
+What: /sys/firmware/secvar
+Date: August 2019
+Contact: Nayna Jain <nayna@linux.ibm.com>
+Description: This directory is created if the POWER firmware supports OS
+ secureboot, thereby secure variables. It exposes interface
+ for reading/writing the secure variables
+
+What: /sys/firmware/secvar/vars
+Date: August 2019
+Contact: Nayna Jain <nayna@linux.ibm.com>
+Description: This directory lists all the secure variables that are supported
+ by the firmware.
+
+What: /sys/firmware/secvar/format
+Date: August 2019
+Contact: Nayna Jain <nayna@linux.ibm.com>
+Description: A string indicating which backend is in use by the firmware.
+ This determines the format of the variable and the accepted
+ format of variable updates.
+
+What: /sys/firmware/secvar/vars/<variable name>
+Date: August 2019
+Contact: Nayna Jain <nayna@linux.ibm.com>
+Description: Each secure variable is represented as a directory named as
+ <variable_name>. The variable name is unique and is in ASCII
+ representation. The data and size can be determined by reading
+ their respective attribute files.
+
+What: /sys/firmware/secvar/vars/<variable_name>/size
+Date: August 2019
+Contact: Nayna Jain <nayna@linux.ibm.com>
+Description: An integer representation of the size of the content of the
+ variable. In other words, it represents the size of the data.
+
+What: /sys/firmware/secvar/vars/<variable_name>/data
+Date: August 2019
+Contact: Nayna Jain h<nayna@linux.ibm.com>
+Description: A read-only file containing the value of the variable. The size
+ of the file represents the maximum size of the variable data.
+
+What: /sys/firmware/secvar/vars/<variable_name>/update
+Date: August 2019
+Contact: Nayna Jain <nayna@linux.ibm.com>
+Description: A write-only file that is used to submit the new value for the
+ variable. The size of the file represents the maximum size of
+ the variable data that can be written.
diff --git a/Documentation/DMA-attributes.txt b/Documentation/DMA-attributes.txt
index 8f8d97f65d73..29dcbe8826e8 100644
--- a/Documentation/DMA-attributes.txt
+++ b/Documentation/DMA-attributes.txt
@@ -5,24 +5,6 @@ DMA attributes
This document describes the semantics of the DMA attributes that are
defined in linux/dma-mapping.h.
-DMA_ATTR_WRITE_BARRIER
-----------------------
-
-DMA_ATTR_WRITE_BARRIER is a (write) barrier attribute for DMA. DMA
-to a memory region with the DMA_ATTR_WRITE_BARRIER attribute forces
-all pending DMA writes to complete, and thus provides a mechanism to
-strictly order DMA from a device across all intervening busses and
-bridges. This barrier is not specific to a particular type of
-interconnect, it applies to the system as a whole, and so its
-implementation must account for the idiosyncrasies of the system all
-the way from the DMA device to memory.
-
-As an example of a situation where DMA_ATTR_WRITE_BARRIER would be
-useful, suppose that a device does a DMA write to indicate that data is
-ready and available in memory. The DMA of the "completion indication"
-could race with data DMA. Mapping the memory used for completion
-indications with DMA_ATTR_WRITE_BARRIER would prevent the race.
-
DMA_ATTR_WEAK_ORDERING
----------------------
diff --git a/Documentation/Makefile b/Documentation/Makefile
index e145e4db508b..d77bb607aea4 100644
--- a/Documentation/Makefile
+++ b/Documentation/Makefile
@@ -13,7 +13,7 @@ endif
SPHINXBUILD = sphinx-build
SPHINXOPTS =
SPHINXDIRS = .
-_SPHINXDIRS = $(patsubst $(srctree)/Documentation/%/conf.py,%,$(wildcard $(srctree)/Documentation/*/conf.py))
+_SPHINXDIRS = $(patsubst $(srctree)/Documentation/%/index.rst,%,$(wildcard $(srctree)/Documentation/*/index.rst))
SPHINX_CONF = conf.py
PAPER =
BUILDDIR = $(obj)/output
@@ -33,8 +33,6 @@ ifeq ($(HAVE_SPHINX),0)
else # HAVE_SPHINX
-export SPHINXOPTS = $(shell perl -e 'open IN,"sphinx-build --version 2>&1 |"; while (<IN>) { if (m/([\d\.]+)/) { print "-jauto" if ($$1 >= "1.7") } ;} close IN')
-
# User-friendly check for pdflatex and latexmk
HAVE_PDFLATEX := $(shell if which $(PDFLATEX) >/dev/null 2>&1; then echo 1; else echo 0; fi)
HAVE_LATEXMK := $(shell if which latexmk >/dev/null 2>&1; then echo 1; else echo 0; fi)
@@ -67,6 +65,8 @@ quiet_cmd_sphinx = SPHINX $@ --> file://$(abspath $(BUILDDIR)/$3/$4)
cmd_sphinx = $(MAKE) BUILDDIR=$(abspath $(BUILDDIR)) $(build)=Documentation/media $2 && \
PYTHONDONTWRITEBYTECODE=1 \
BUILDDIR=$(abspath $(BUILDDIR)) SPHINX_CONF=$(abspath $(srctree)/$(src)/$5/$(SPHINX_CONF)) \
+ $(PYTHON) $(srctree)/scripts/jobserver-exec \
+ $(SHELL) $(srctree)/Documentation/sphinx/parallel-wrapper.sh \
$(SPHINXBUILD) \
-b $2 \
-c $(abspath $(srctree)/$(src)) \
@@ -128,8 +128,10 @@ dochelp:
@echo ' pdfdocs - PDF'
@echo ' epubdocs - EPUB'
@echo ' xmldocs - XML'
- @echo ' linkcheckdocs - check for broken external links (will connect to external hosts)'
- @echo ' refcheckdocs - check for references to non-existing files under Documentation'
+ @echo ' linkcheckdocs - check for broken external links'
+ @echo ' (will connect to external hosts)'
+ @echo ' refcheckdocs - check for references to non-existing files under'
+ @echo ' Documentation'
@echo ' cleandocs - clean all generated files'
@echo
@echo ' make SPHINXDIRS="s1 s2" [target] Generate only docs of folder s1, s2'
diff --git a/Documentation/admin-guide/LSM/SafeSetID.rst b/Documentation/admin-guide/LSM/SafeSetID.rst
index 212434ef65ad..7bff07ce4fdd 100644
--- a/Documentation/admin-guide/LSM/SafeSetID.rst
+++ b/Documentation/admin-guide/LSM/SafeSetID.rst
@@ -56,7 +56,7 @@ setid capabilities from the application completely and refactor the process
spawning semantics in the application (e.g. by using a privileged helper program
to do process spawning and UID/GID transitions). Unfortunately, there are a
number of semantics around process spawning that would be affected by this, such
-as fork() calls where the program doesn???t immediately call exec() after the
+as fork() calls where the program doesn't immediately call exec() after the
fork(), parent processes specifying custom environment variables or command line
args for spawned child processes, or inheritance of file handles across a
fork()/exec(). Because of this, as solution that uses a privileged helper in
@@ -72,7 +72,7 @@ own user namespace, and only approved UIDs/GIDs could be mapped back to the
initial system user namespace, affectively preventing privilege escalation.
Unfortunately, it is not generally feasible to use user namespaces in isolation,
without pairing them with other namespace types, which is not always an option.
-Linux checks for capabilities based off of the user namespace that ???owns??? some
+Linux checks for capabilities based off of the user namespace that "owns" some
entity. For example, Linux has the notion that network namespaces are owned by
the user namespace in which they were created. A consequence of this is that
capability checks for access to a given network namespace are done by checking
diff --git a/Documentation/admin-guide/cgroup-v2.rst b/Documentation/admin-guide/cgroup-v2.rst
index 007ba86aef78..0636bcb60b5a 100644
--- a/Documentation/admin-guide/cgroup-v2.rst
+++ b/Documentation/admin-guide/cgroup-v2.rst
@@ -1120,8 +1120,9 @@ PAGE_SIZE multiple when read back.
Best-effort memory protection. If the memory usage of a
cgroup is within its effective low boundary, the cgroup's
- memory won't be reclaimed unless memory can be reclaimed
- from unprotected cgroups. Above the effective low boundary (or
+ memory won't be reclaimed unless there is no reclaimable
+ memory available in unprotected cgroups.
+ Above the effective low boundary (or
effective min boundary if it is higher), pages are reclaimed
proportionally to the overage, reducing reclaim pressure for
smaller overages.
@@ -1288,7 +1289,12 @@ PAGE_SIZE multiple when read back.
inactive_anon, active_anon, inactive_file, active_file, unevictable
Amount of memory, swap-backed and filesystem-backed,
on the internal memory management lists used by the
- page reclaim algorithm
+ page reclaim algorithm.
+
+ As these represent internal list state (eg. shmem pages are on anon
+ memory management lists), inactive_foo + active_foo may not be equal to
+ the value for the foo counter, since the foo counter is type-based, not
+ list-based.
slab_reclaimable
Part of "slab" that might be reclaimed, such as
@@ -1920,7 +1926,7 @@ Cpuset Interface Files
It accepts only the following input values when written to.
- "root" - a paritition root
+ "root" - a partition root
"member" - a non-root member of a partition
When set to be a partition root, the current cgroup is the
diff --git a/Documentation/driver-api/dell_rbu.rst b/Documentation/admin-guide/dell_rbu.rst
index 5d1ce7bcd04d..8d70e1fc9f9d 100644
--- a/Documentation/driver-api/dell_rbu.rst
+++ b/Documentation/admin-guide/dell_rbu.rst
@@ -1,11 +1,11 @@
-=============================================================
-Usage of the new open sourced rbu (Remote BIOS Update) driver
-=============================================================
+=========================================
+Dell Remote BIOS Update driver (dell_rbu)
+=========================================
Purpose
=======
-Document demonstrating the use of the Dell Remote BIOS Update driver.
+Document demonstrating the use of the Dell Remote BIOS Update driver
for updating BIOS images on Dell servers and desktops.
Scope
@@ -37,7 +37,7 @@ maintains a link list of packets for reading them back.
If the dell_rbu driver is unloaded all the allocated memory is freed.
-The rbu driver needs to have an application (as mentioned above)which will
+The rbu driver needs to have an application (as mentioned above) which will
inform the BIOS to enable the update in the next system reboot.
The user should not unload the rbu driver after downloading the BIOS image
@@ -71,7 +71,7 @@ be downloaded. It is done as below::
echo XXXX > /sys/devices/platform/dell_rbu/packet_size
In the packet update mechanism, the user needs to create a new file having
-packets of data arranged back to back. It can be done as follows
+packets of data arranged back to back. It can be done as follows:
The user creates packets header, gets the chunk of the BIOS image and
places it next to the packetheader; now, the packetheader + BIOS image chunk
added together should match the specified packet_size. This makes one
@@ -114,7 +114,7 @@ The entries can be recreated by doing the following::
echo init > /sys/devices/platform/dell_rbu/image_type
-.. note:: echoing init in image_type does not change it original value.
+.. note:: echoing init in image_type does not change its original value.
Also the driver provides /sys/devices/platform/dell_rbu/data readonly file to
read back the image downloaded.
diff --git a/Documentation/admin-guide/device-mapper/dm-dust.txt b/Documentation/admin-guide/device-mapper/dm-dust.rst
index 954d402a1f6a..b6e7e7ead831 100644
--- a/Documentation/admin-guide/device-mapper/dm-dust.txt
+++ b/Documentation/admin-guide/device-mapper/dm-dust.rst
@@ -31,218 +31,233 @@ configured "bad blocks" will be treated as bad, or bypassed.
This allows the pre-writing of test data and metadata prior to
simulating a "failure" event where bad sectors start to appear.
-Table parameters:
------------------
+Table parameters
+----------------
<device_path> <offset> <blksz>
Mandatory parameters:
- <device_path>: path to the block device.
- <offset>: offset to data area from start of device_path
- <blksz>: block size in bytes
+ <device_path>:
+ Path to the block device.
+
+ <offset>:
+ Offset to data area from start of device_path
+
+ <blksz>:
+ Block size in bytes
+
(minimum 512, maximum 1073741824, must be a power of 2)
-Usage instructions:
--------------------
+Usage instructions
+------------------
-First, find the size (in 512-byte sectors) of the device to be used:
+First, find the size (in 512-byte sectors) of the device to be used::
-$ sudo blockdev --getsz /dev/vdb1
-33552384
+ $ sudo blockdev --getsz /dev/vdb1
+ 33552384
Create the dm-dust device:
(For a device with a block size of 512 bytes)
-$ sudo dmsetup create dust1 --table '0 33552384 dust /dev/vdb1 0 512'
+
+::
+
+ $ sudo dmsetup create dust1 --table '0 33552384 dust /dev/vdb1 0 512'
(For a device with a block size of 4096 bytes)
-$ sudo dmsetup create dust1 --table '0 33552384 dust /dev/vdb1 0 4096'
+
+::
+
+ $ sudo dmsetup create dust1 --table '0 33552384 dust /dev/vdb1 0 4096'
Check the status of the read behavior ("bypass" indicates that all I/O
-will be passed through to the underlying device):
-$ sudo dmsetup status dust1
-0 33552384 dust 252:17 bypass
+will be passed through to the underlying device)::
+
+ $ sudo dmsetup status dust1
+ 0 33552384 dust 252:17 bypass
-$ sudo dd if=/dev/mapper/dust1 of=/dev/null bs=512 count=128 iflag=direct
-128+0 records in
-128+0 records out
+ $ sudo dd if=/dev/mapper/dust1 of=/dev/null bs=512 count=128 iflag=direct
+ 128+0 records in
+ 128+0 records out
-$ sudo dd if=/dev/zero of=/dev/mapper/dust1 bs=512 count=128 oflag=direct
-128+0 records in
-128+0 records out
+ $ sudo dd if=/dev/zero of=/dev/mapper/dust1 bs=512 count=128 oflag=direct
+ 128+0 records in
+ 128+0 records out
-Adding and removing bad blocks:
--------------------------------
+Adding and removing bad blocks
+------------------------------
At any time (i.e.: whether the device has the "bad block" emulation
enabled or disabled), bad blocks may be added or removed from the
-device via the "addbadblock" and "removebadblock" messages:
+device via the "addbadblock" and "removebadblock" messages::
-$ sudo dmsetup message dust1 0 addbadblock 60
-kernel: device-mapper: dust: badblock added at block 60
+ $ sudo dmsetup message dust1 0 addbadblock 60
+ kernel: device-mapper: dust: badblock added at block 60
-$ sudo dmsetup message dust1 0 addbadblock 67
-kernel: device-mapper: dust: badblock added at block 67
+ $ sudo dmsetup message dust1 0 addbadblock 67
+ kernel: device-mapper: dust: badblock added at block 67
-$ sudo dmsetup message dust1 0 addbadblock 72
-kernel: device-mapper: dust: badblock added at block 72
+ $ sudo dmsetup message dust1 0 addbadblock 72
+ kernel: device-mapper: dust: badblock added at block 72
These bad blocks will be stored in the "bad block list".
-While the device is in "bypass" mode, reads and writes will succeed:
+While the device is in "bypass" mode, reads and writes will succeed::
-$ sudo dmsetup status dust1
-0 33552384 dust 252:17 bypass
+ $ sudo dmsetup status dust1
+ 0 33552384 dust 252:17 bypass
-Enabling block read failures:
------------------------------
+Enabling block read failures
+----------------------------
-To enable the "fail read on bad block" behavior, send the "enable" message:
+To enable the "fail read on bad block" behavior, send the "enable" message::
-$ sudo dmsetup message dust1 0 enable
-kernel: device-mapper: dust: enabling read failures on bad sectors
+ $ sudo dmsetup message dust1 0 enable
+ kernel: device-mapper: dust: enabling read failures on bad sectors
-$ sudo dmsetup status dust1
-0 33552384 dust 252:17 fail_read_on_bad_block
+ $ sudo dmsetup status dust1
+ 0 33552384 dust 252:17 fail_read_on_bad_block
With the device in "fail read on bad block" mode, attempting to read a
-block will encounter an "Input/output error":
+block will encounter an "Input/output error"::
-$ sudo dd if=/dev/mapper/dust1 of=/dev/null bs=512 count=1 skip=67 iflag=direct
-dd: error reading '/dev/mapper/dust1': Input/output error
-0+0 records in
-0+0 records out
-0 bytes copied, 0.00040651 s, 0.0 kB/s
+ $ sudo dd if=/dev/mapper/dust1 of=/dev/null bs=512 count=1 skip=67 iflag=direct
+ dd: error reading '/dev/mapper/dust1': Input/output error
+ 0+0 records in
+ 0+0 records out
+ 0 bytes copied, 0.00040651 s, 0.0 kB/s
...and writing to the bad blocks will remove the blocks from the list,
-therefore emulating the "remap" behavior of hard disk drives:
+therefore emulating the "remap" behavior of hard disk drives::
-$ sudo dd if=/dev/zero of=/dev/mapper/dust1 bs=512 count=128 oflag=direct
-128+0 records in
-128+0 records out
+ $ sudo dd if=/dev/zero of=/dev/mapper/dust1 bs=512 count=128 oflag=direct
+ 128+0 records in
+ 128+0 records out
-kernel: device-mapper: dust: block 60 removed from badblocklist by write
-kernel: device-mapper: dust: block 67 removed from badblocklist by write
-kernel: device-mapper: dust: block 72 removed from badblocklist by write
-kernel: device-mapper: dust: block 87 removed from badblocklist by write
+ kernel: device-mapper: dust: block 60 removed from badblocklist by write
+ kernel: device-mapper: dust: block 67 removed from badblocklist by write
+ kernel: device-mapper: dust: block 72 removed from badblocklist by write
+ kernel: device-mapper: dust: block 87 removed from badblocklist by write
-Bad block add/remove error handling:
-------------------------------------
+Bad block add/remove error handling
+-----------------------------------
Attempting to add a bad block that already exists in the list will
-result in an "Invalid argument" error, as well as a helpful message:
+result in an "Invalid argument" error, as well as a helpful message::
-$ sudo dmsetup message dust1 0 addbadblock 88
-device-mapper: message ioctl on dust1 failed: Invalid argument
-kernel: device-mapper: dust: block 88 already in badblocklist
+ $ sudo dmsetup message dust1 0 addbadblock 88
+ device-mapper: message ioctl on dust1 failed: Invalid argument
+ kernel: device-mapper: dust: block 88 already in badblocklist
Attempting to remove a bad block that doesn't exist in the list will
-result in an "Invalid argument" error, as well as a helpful message:
+result in an "Invalid argument" error, as well as a helpful message::
-$ sudo dmsetup message dust1 0 removebadblock 87
-device-mapper: message ioctl on dust1 failed: Invalid argument
-kernel: device-mapper: dust: block 87 not found in badblocklist
+ $ sudo dmsetup message dust1 0 removebadblock 87
+ device-mapper: message ioctl on dust1 failed: Invalid argument
+ kernel: device-mapper: dust: block 87 not found in badblocklist
-Counting the number of bad blocks in the bad block list:
---------------------------------------------------------
+Counting the number of bad blocks in the bad block list
+-------------------------------------------------------
To count the number of bad blocks configured in the device, run the
-following message command:
+following message command::
-$ sudo dmsetup message dust1 0 countbadblocks
+ $ sudo dmsetup message dust1 0 countbadblocks
A message will print with the number of bad blocks currently
-configured on the device:
+configured on the device::
-kernel: device-mapper: dust: countbadblocks: 895 badblock(s) found
+ kernel: device-mapper: dust: countbadblocks: 895 badblock(s) found
-Querying for specific bad blocks:
----------------------------------
+Querying for specific bad blocks
+--------------------------------
To find out if a specific block is in the bad block list, run the
-following message command:
+following message command::
-$ sudo dmsetup message dust1 0 queryblock 72
+ $ sudo dmsetup message dust1 0 queryblock 72
-The following message will print if the block is in the list:
-device-mapper: dust: queryblock: block 72 found in badblocklist
+The following message will print if the block is in the list::
-The following message will print if the block is in the list:
-device-mapper: dust: queryblock: block 72 not found in badblocklist
+ device-mapper: dust: queryblock: block 72 found in badblocklist
+
+The following message will print if the block is not in the list::
+
+ device-mapper: dust: queryblock: block 72 not found in badblocklist
The "queryblock" message command will work in both the "enabled"
and "disabled" modes, allowing the verification of whether a block
will be treated as "bad" without having to issue I/O to the device,
or having to "enable" the bad block emulation.
-Clearing the bad block list:
-----------------------------
+Clearing the bad block list
+---------------------------
To clear the bad block list (without needing to individually run
a "removebadblock" message command for every block), run the
-following message command:
+following message command::
-$ sudo dmsetup message dust1 0 clearbadblocks
+ $ sudo dmsetup message dust1 0 clearbadblocks
-After clearing the bad block list, the following message will appear:
+After clearing the bad block list, the following message will appear::
-kernel: device-mapper: dust: clearbadblocks: badblocks cleared
+ kernel: device-mapper: dust: clearbadblocks: badblocks cleared
If there were no bad blocks to clear, the following message will
-appear:
+appear::
-kernel: device-mapper: dust: clearbadblocks: no badblocks found
+ kernel: device-mapper: dust: clearbadblocks: no badblocks found
-Message commands list:
-----------------------
+Message commands list
+---------------------
Below is a list of the messages that can be sent to a dust device:
-Operations on blocks (requires a <blknum> argument):
+Operations on blocks (requires a <blknum> argument)::
-addbadblock <blknum>
-queryblock <blknum>
-removebadblock <blknum>
+ addbadblock <blknum>
+ queryblock <blknum>
+ removebadblock <blknum>
...where <blknum> is a block number within range of the device
- (corresponding to the block size of the device.)
+(corresponding to the block size of the device.)
-Single argument message commands:
+Single argument message commands::
-countbadblocks
-clearbadblocks
-disable
-enable
-quiet
+ countbadblocks
+ clearbadblocks
+ disable
+ enable
+ quiet
-Device removal:
----------------
+Device removal
+--------------
-When finished, remove the device via the "dmsetup remove" command:
+When finished, remove the device via the "dmsetup remove" command::
-$ sudo dmsetup remove dust1
+ $ sudo dmsetup remove dust1
-Quiet mode:
------------
+Quiet mode
+----------
On test runs with many bad blocks, it may be desirable to avoid
excessive logging (from bad blocks added, removed, or "remapped").
-This can be done by enabling "quiet mode" via the following message:
+This can be done by enabling "quiet mode" via the following message::
-$ sudo dmsetup message dust1 0 quiet
+ $ sudo dmsetup message dust1 0 quiet
This will suppress log messages from add / remove / removed by write
operations. Log messages from "countbadblocks" or "queryblock"
message commands will still print in quiet mode.
-The status of quiet mode can be seen by running "dmsetup status":
+The status of quiet mode can be seen by running "dmsetup status"::
-$ sudo dmsetup status dust1
-0 33552384 dust 252:17 fail_read_on_bad_block quiet
+ $ sudo dmsetup status dust1
+ 0 33552384 dust 252:17 fail_read_on_bad_block quiet
-To disable quiet mode, send the "quiet" message again:
+To disable quiet mode, send the "quiet" message again::
-$ sudo dmsetup message dust1 0 quiet
+ $ sudo dmsetup message dust1 0 quiet
-$ sudo dmsetup status dust1
-0 33552384 dust 252:17 fail_read_on_bad_block verbose
+ $ sudo dmsetup status dust1
+ 0 33552384 dust 252:17 fail_read_on_bad_block verbose
(The presence of "verbose" indicates normal logging.)
diff --git a/Documentation/admin-guide/device-mapper/index.rst b/Documentation/admin-guide/device-mapper/index.rst
index c77c58b8f67b..4872fb6d2952 100644
--- a/Documentation/admin-guide/device-mapper/index.rst
+++ b/Documentation/admin-guide/device-mapper/index.rst
@@ -9,6 +9,7 @@ Device Mapper
cache
delay
dm-crypt
+ dm-dust
dm-flakey
dm-init
dm-integrity
diff --git a/Documentation/admin-guide/index.rst b/Documentation/admin-guide/index.rst
index 34cc20ee7f3a..4405b7485312 100644
--- a/Documentation/admin-guide/index.rst
+++ b/Documentation/admin-guide/index.rst
@@ -57,60 +57,61 @@ configure specific aspects of kernel behavior to your liking.
.. toctree::
:maxdepth: 1
- initrd
- cgroup-v2
- cgroup-v1/index
- serial-console
- braille-console
- parport
- md
- module-signing
- rapidio
- sysrq
- unicode
- vga-softcursor
- binfmt-misc
- mono
- java
- ras
- bcache
- blockdev/index
- ext4
- binderfs
- cifs/index
- xfs
- jfs
- ufs
- pm/index
- thunderbolt
- LSM/index
- mm/index
- namespaces/index
- perf-security
acpi/index
aoe/index
+ auxdisplay/index
+ bcache
+ binderfs
+ binfmt-misc
+ blockdev/index
+ braille-console
btmrvl
+ cgroup-v1/index
+ cgroup-v2
+ cifs/index
clearing-warn-once
cpu-load
cputopology
+ dell_rbu
device-mapper/index
efi-stub
+ ext4
gpio/index
highuid
hw_random
+ initrd
iostats
+ java
+ jfs
kernel-per-CPU-kthreads
laptops/index
- auxdisplay/index
lcd-panel-cgram
ldm
lockup-watchdogs
+ LSM/index
+ md
+ mm/index
+ module-signing
+ mono
+ namespaces/index
numastat
+ parport
+ perf-security
+ pm/index
pnp
+ rapidio
+ ras
rtc
+ serial-console
svga
- wimax/index
+ sysrq
+ thunderbolt
+ ufs
+ unicode
+ vga-softcursor
video-output
+ wimax/index
+ xfs
.. only:: subproject and html
diff --git a/Documentation/admin-guide/iostats.rst b/Documentation/admin-guide/iostats.rst
index 4f0462af3ca7..df5b8345c41d 100644
--- a/Documentation/admin-guide/iostats.rst
+++ b/Documentation/admin-guide/iostats.rst
@@ -46,78 +46,79 @@ each snapshot of your disk statistics.
In 2.4, the statistics fields are those after the device name. In
the above example, the first field of statistics would be 446216.
By contrast, in 2.6+ if you look at ``/sys/block/hda/stat``, you'll
-find just the eleven fields, beginning with 446216. If you look at
-``/proc/diskstats``, the eleven fields will be preceded by the major and
+find just the 15 fields, beginning with 446216. If you look at
+``/proc/diskstats``, the 15 fields will be preceded by the major and
minor device numbers, and device name. Each of these formats provides
-eleven fields of statistics, each meaning exactly the same things.
+15 fields of statistics, each meaning exactly the same things.
All fields except field 9 are cumulative since boot. Field 9 should
go to zero as I/Os complete; all others only increase (unless they
-overflow and wrap). Yes, these are (32-bit or 64-bit) unsigned long
-(native word size) numbers, and on a very busy or long-lived system they
-may wrap. Applications should be prepared to deal with that; unless
-your observations are measured in large numbers of minutes or hours,
-they should not wrap twice before you notice them.
+overflow and wrap). Wrapping might eventually occur on a very busy
+or long-lived system; so applications should be prepared to deal with
+it. Regarding wrapping, the types of the fields are either unsigned
+int (32 bit) or unsigned long (32-bit or 64-bit, depending on your
+machine) as noted per-field below. Unless your observations are very
+spread in time, these fields should not wrap twice before you notice it.
Each set of stats only applies to the indicated device; if you want
system-wide stats you'll have to find all the devices and sum them all up.
-Field 1 -- # of reads completed
+Field 1 -- # of reads completed (unsigned long)
This is the total number of reads completed successfully.
-Field 2 -- # of reads merged, field 6 -- # of writes merged
+Field 2 -- # of reads merged, field 6 -- # of writes merged (unsigned long)
Reads and writes which are adjacent to each other may be merged for
efficiency. Thus two 4K reads may become one 8K read before it is
ultimately handed to the disk, and so it will be counted (and queued)
as only one I/O. This field lets you know how often this was done.
-Field 3 -- # of sectors read
+Field 3 -- # of sectors read (unsigned long)
This is the total number of sectors read successfully.
-Field 4 -- # of milliseconds spent reading
+Field 4 -- # of milliseconds spent reading (unsigned int)
This is the total number of milliseconds spent by all reads (as
measured from __make_request() to end_that_request_last()).
-Field 5 -- # of writes completed
+Field 5 -- # of writes completed (unsigned long)
This is the total number of writes completed successfully.
-Field 6 -- # of writes merged
+Field 6 -- # of writes merged (unsigned long)
See the description of field 2.
-Field 7 -- # of sectors written
+Field 7 -- # of sectors written (unsigned long)
This is the total number of sectors written successfully.
-Field 8 -- # of milliseconds spent writing
+Field 8 -- # of milliseconds spent writing (unsigned int)
This is the total number of milliseconds spent by all writes (as
measured from __make_request() to end_that_request_last()).
-Field 9 -- # of I/Os currently in progress
+Field 9 -- # of I/Os currently in progress (unsigned int)
The only field that should go to zero. Incremented as requests are
given to appropriate struct request_queue and decremented as they finish.
-Field 10 -- # of milliseconds spent doing I/Os
+Field 10 -- # of milliseconds spent doing I/Os (unsigned int)
This field increases so long as field 9 is nonzero.
Since 5.0 this field counts jiffies when at least one request was
started or completed. If request runs more than 2 jiffies then some
I/O time will not be accounted unless there are other requests.
-Field 11 -- weighted # of milliseconds spent doing I/Os
+Field 11 -- weighted # of milliseconds spent doing I/Os (unsigned int)
This field is incremented at each I/O start, I/O completion, I/O
merge, or read of these stats by the number of I/Os in progress
(field 9) times the number of milliseconds spent doing I/O since the
last update of this field. This can provide an easy measure of both
I/O completion time and the backlog that may be accumulating.
-Field 12 -- # of discards completed
+Field 12 -- # of discards completed (unsigned long)
This is the total number of discards completed successfully.
-Field 13 -- # of discards merged
+Field 13 -- # of discards merged (unsigned long)
See the description of field 2
-Field 14 -- # of sectors discarded
+Field 14 -- # of sectors discarded (unsigned long)
This is the total number of sectors discarded successfully.
-Field 15 -- # of milliseconds spent discarding
+Field 15 -- # of milliseconds spent discarding (unsigned int)
This is the total number of milliseconds spent by all discards (as
measured from __make_request() to end_that_request_last()).
diff --git a/Documentation/admin-guide/kernel-parameters.rst b/Documentation/admin-guide/kernel-parameters.rst
index d05d531b4ec9..6d421694d98e 100644
--- a/Documentation/admin-guide/kernel-parameters.rst
+++ b/Documentation/admin-guide/kernel-parameters.rst
@@ -127,6 +127,7 @@ parameter is applicable::
NET Appropriate network support is enabled.
NUMA NUMA support is enabled.
NFS Appropriate NFS support is enabled.
+ OF Devicetree is enabled.
OSS OSS sound support is enabled.
PV_OPS A paravirtualized kernel is enabled.
PARIDE The ParIDE (parallel port IDE) subsystem is enabled.
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index a0a4732eedbb..5a92d89a1bd4 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -437,8 +437,6 @@
no delay (0).
Format: integer
- bootmem_debug [KNL] Enable bootmem allocator debug messages.
-
bert_disable [ACPI]
Disable BERT OS support on buggy BIOSes.
@@ -983,12 +981,10 @@
earlycon= [KNL] Output early console device and options.
- [ARM64] The early console is determined by the
- stdout-path property in device tree's chosen node,
- or determined by the ACPI SPCR table.
-
- [X86] When used with no options the early console is
- determined by the ACPI SPCR table.
+ When used with no options, the early console is
+ determined by stdout-path property in device tree's
+ chosen node or the ACPI SPCR table if supported by
+ the platform.
cdns,<addr>[,options]
Start an early, polled-mode console on a Cadence
@@ -1101,7 +1097,7 @@
mapped with the correct attributes.
linflex,<addr>
- Use early console provided by Freescale LinFlex UART
+ Use early console provided by Freescale LINFlexD UART
serial driver for NXP S32V234 SoCs. A valid base
address must be provided, and the serial port must
already be setup and configured.
@@ -1168,7 +1164,8 @@
Format: {"off" | "on" | "skip[mbr]"}
efi= [EFI]
- Format: { "old_map", "nochunk", "noruntime", "debug" }
+ Format: { "old_map", "nochunk", "noruntime", "debug",
+ "nosoftreserve" }
old_map [X86-64]: switch to the old ioremap-based EFI
runtime services mapping. 32-bit still uses this one by
default.
@@ -1177,6 +1174,12 @@
firmware implementations.
noruntime : disable EFI runtime services support
debug: enable misc debug output
+ nosoftreserve: The EFI_MEMORY_SP (Specific Purpose)
+ attribute may cause the kernel to reserve the
+ memory range for a memory mapping driver to
+ claim. Specify efi=nosoftreserve to disable this
+ reservation and treat the memory by its base type
+ (i.e. EFI_CONVENTIONAL_MEMORY / "System RAM").
efi_no_storage_paranoia [EFI; X86]
Using this parameter you can use more than 50% of
@@ -1189,15 +1192,21 @@
updating original EFI memory map.
Region of memory which aa attribute is added to is
from ss to ss+nn.
+
If efi_fake_mem=2G@4G:0x10000,2G@0x10a0000000:0x10000
is specified, EFI_MEMORY_MORE_RELIABLE(0x10000)
attribute is added to range 0x100000000-0x180000000 and
0x10a0000000-0x1120000000.
+ If efi_fake_mem=8G@9G:0x40000 is specified, the
+ EFI_MEMORY_SP(0x40000) attribute is added to
+ range 0x240000000-0x43fffffff.
+
Using this parameter you can do debugging of EFI memmap
- related feature. For example, you can do debugging of
+ related features. For example, you can do debugging of
Address Range Mirroring feature even if your box
- doesn't support it.
+ doesn't support it, or mark specific memory as
+ "soft reserved".
efivar_ssdt= [EFI; X86] Name of an EFI variable that contains an SSDT
that is to be dynamically loaded by Linux. If there are
@@ -3227,6 +3236,12 @@
This can be set from sysctl after boot.
See Documentation/admin-guide/sysctl/vm.rst for details.
+ of_devlink [OF, KNL] Create device links between consumer and
+ supplier devices by scanning the devictree to infer the
+ consumer/supplier relationships. A consumer device
+ will not be probed until all the supplier devices have
+ probed successfully.
+
ohci1394_dma=early [HW] enable debugging via the ohci1394 driver.
See Documentation/debugging-via-ohci1394.txt for more
info.
@@ -3525,8 +3540,15 @@
hpiosize=nn[KMG] The fixed amount of bus space which is
reserved for hotplug bridge's IO window.
Default size is 256 bytes.
+ hpmmiosize=nn[KMG] The fixed amount of bus space which is
+ reserved for hotplug bridge's MMIO window.
+ Default size is 2 megabytes.
+ hpmmioprefsize=nn[KMG] The fixed amount of bus space which is
+ reserved for hotplug bridge's MMIO_PREF window.
+ Default size is 2 megabytes.
hpmemsize=nn[KMG] The fixed amount of bus space which is
- reserved for hotplug bridge's memory window.
+ reserved for hotplug bridge's MMIO and
+ MMIO_PREF window.
Default size is 2 megabytes.
hpbussize=nn The minimum amount of additional bus numbers
reserved for buses below a hotplug bridge.
@@ -3573,6 +3595,8 @@
even if the platform doesn't give the OS permission to
use them. This may cause conflicts if the platform
also tries to use these services.
+ dpc-native Use native PCIe service for DPC only. May
+ cause conflicts if firmware uses AER or DPC.
compat Disable native PCIe services (PME, AER, DPC, PCIe
hotplug).
@@ -5101,13 +5125,13 @@
Flags is a set of characters, each corresponding
to a common usb-storage quirk flag as follows:
a = SANE_SENSE (collect more than 18 bytes
- of sense data);
+ of sense data, not on uas);
b = BAD_SENSE (don't collect more than 18
- bytes of sense data);
+ bytes of sense data, not on uas);
c = FIX_CAPACITY (decrease the reported
device capacity by one sector);
d = NO_READ_DISC_INFO (don't use
- READ_DISC_INFO command);
+ READ_DISC_INFO command, not on uas);
e = NO_READ_CAPACITY_16 (don't use
READ_CAPACITY_16 command);
f = NO_REPORT_OPCODES (don't use report opcodes
@@ -5122,17 +5146,18 @@
j = NO_REPORT_LUNS (don't use report luns
command, uas only);
l = NOT_LOCKABLE (don't try to lock and
- unlock ejectable media);
+ unlock ejectable media, not on uas);
m = MAX_SECTORS_64 (don't transfer more
- than 64 sectors = 32 KB at a time);
+ than 64 sectors = 32 KB at a time,
+ not on uas);
n = INITIAL_READ10 (force a retry of the
- initial READ(10) command);
+ initial READ(10) command, not on uas);
o = CAPACITY_OK (accept the capacity
- reported by the device);
+ reported by the device, not on uas);
p = WRITE_CACHE (the device cache is ON
- by default);
+ by default, not on uas);
r = IGNORE_RESIDUE (the device reports
- bogus residue values);
+ bogus residue values, not on uas);
s = SINGLE_LUN (the device has only one
Logical Unit);
t = NO_ATA_1X (don't allow ATA(12) and ATA(16)
@@ -5141,7 +5166,8 @@
w = NO_WP_DETECT (don't test whether the
medium is write-protected).
y = ALWAYS_SYNC (issue a SYNCHRONIZE_CACHE
- even if the device claims no cache)
+ even if the device claims no cache,
+ not on uas)
Example: quirks=0419:aaf5:rl,0421:0433:rc
user_debug= [KNL,ARM]
diff --git a/Documentation/admin-guide/perf/imx-ddr.rst b/Documentation/admin-guide/perf/imx-ddr.rst
index 90056e4e8859..3726a10a03ba 100644
--- a/Documentation/admin-guide/perf/imx-ddr.rst
+++ b/Documentation/admin-guide/perf/imx-ddr.rst
@@ -19,7 +19,9 @@ devices/imx8_ddr0/format/. The "events" directory describes the events types
hardware supported that can be used with perf tool, see /sys/bus/event_source/
devices/imx8_ddr0/events/. The "caps" directory describes filter features implemented
in DDR PMU, see /sys/bus/events_source/devices/imx8_ddr0/caps/.
- e.g.::
+
+ .. code-block:: bash
+
perf stat -a -e imx8_ddr0/cycles/ cmd
perf stat -a -e imx8_ddr0/read/,imx8_ddr0/write/ cmd
@@ -35,24 +37,31 @@ value 1 for supported.
Filter is defined with two configuration parts:
--AXI_ID defines AxID matching value.
--AXI_MASKING defines which bits of AxID are meaningful for the matching.
- 0:corresponding bit is masked.
- 1: corresponding bit is not masked, i.e. used to do the matching.
+
+ - 0: corresponding bit is masked.
+ - 1: corresponding bit is not masked, i.e. used to do the matching.
AXI_ID and AXI_MASKING are mapped on DPCR1 register in performance counter.
When non-masked bits are matching corresponding AXI_ID bits then counter is
incremented. Perf counter is incremented if
- AxID && AXI_MASKING == AXI_ID && AXI_MASKING
+ AxID && AXI_MASKING == AXI_ID && AXI_MASKING
This filter doesn't support filter different AXI ID for axid-read and axid-write
event at the same time as this filter is shared between counters.
- e.g.::
- perf stat -a -e imx8_ddr0/axid-read,axi_mask=0xMMMM,axi_id=0xDDDD/ cmd
- perf stat -a -e imx8_ddr0/axid-write,axi_mask=0xMMMM,axi_id=0xDDDD/ cmd
-
- NOTE: axi_mask is inverted in userspace(i.e. set bits are bits to mask), and
- it will be reverted in driver automatically. so that the user can just specify
- axi_id to monitor a specific id, rather than having to specify axi_mask.
- e.g.::
+
+ .. code-block:: bash
+
+ perf stat -a -e imx8_ddr0/axid-read,axi_mask=0xMMMM,axi_id=0xDDDD/ cmd
+ perf stat -a -e imx8_ddr0/axid-write,axi_mask=0xMMMM,axi_id=0xDDDD/ cmd
+
+ .. note::
+
+ axi_mask is inverted in userspace(i.e. set bits are bits to mask), and
+ it will be reverted in driver automatically. so that the user can just specify
+ axi_id to monitor a specific id, rather than having to specify axi_mask.
+
+ .. code-block:: bash
+
perf stat -a -e imx8_ddr0/axid-read,axi_id=0x12/ cmd, which will monitor ARID=0x12
* With DDR_CAP_AXI_ID_FILTER_ENHANCED quirk(filter: 1, enhanced_filter: 1).
diff --git a/Documentation/admin-guide/perf/index.rst b/Documentation/admin-guide/perf/index.rst
index ee4bfd2a740f..47c99f40cc16 100644
--- a/Documentation/admin-guide/perf/index.rst
+++ b/Documentation/admin-guide/perf/index.rst
@@ -8,6 +8,7 @@ Performance monitor support
:maxdepth: 1
hisi-pmu
+ imx-ddr
qcom_l2_pmu
qcom_l3_pmu
arm-ccn
diff --git a/Documentation/admin-guide/sysctl/kernel.rst b/Documentation/admin-guide/sysctl/kernel.rst
index 032c7cd3cede..def074807cee 100644
--- a/Documentation/admin-guide/sysctl/kernel.rst
+++ b/Documentation/admin-guide/sysctl/kernel.rst
@@ -831,8 +831,8 @@ printk_ratelimit:
=================
Some warning messages are rate limited. printk_ratelimit specifies
-the minimum length of time between these messages (in jiffies), by
-default we allow one every 5 seconds.
+the minimum length of time between these messages (in seconds).
+The default value is 5 seconds.
A value of 0 will disable rate limiting.
@@ -845,6 +845,8 @@ seconds, we do allow a burst of messages to pass through.
printk_ratelimit_burst specifies the number of messages we can
send before ratelimiting kicks in.
+The default value is 10 messages.
+
printk_devkmsg:
===============
@@ -1101,7 +1103,7 @@ During initialization the kernel sets this value such that even if the
maximum number of threads is created, the thread structures occupy only
a part (1/8th) of the available RAM pages.
-The minimum value that can be written to threads-max is 20.
+The minimum value that can be written to threads-max is 1.
The maximum value that can be written to threads-max is given by the
constant FUTEX_TID_MASK (0x3fffffff).
@@ -1109,10 +1111,6 @@ constant FUTEX_TID_MASK (0x3fffffff).
If a value outside of this range is written to threads-max an error
EINVAL occurs.
-The value written is checked against the available RAM pages. If the
-thread structures would occupy too much (more than 1/8th) of the
-available RAM pages threads-max is reduced accordingly.
-
unknown_nmi_panic:
==================
diff --git a/Documentation/conf.py b/Documentation/conf.py
index a8fe845832bc..3c7bdf4cd31f 100644
--- a/Documentation/conf.py
+++ b/Documentation/conf.py
@@ -37,7 +37,8 @@ needs_sphinx = '1.3'
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['kerneldoc', 'rstFlatTable', 'kernel_include', 'cdomain',
- 'kfigure', 'sphinx.ext.ifconfig', 'automarkup']
+ 'kfigure', 'sphinx.ext.ifconfig', 'automarkup',
+ 'maintainers_include']
# The name of the math extension changed on Sphinx 1.4
if (major == 1 and minor > 3) or (major > 1):
diff --git a/Documentation/core-api/genalloc.rst b/Documentation/core-api/genalloc.rst
index 6b38a39fab24..098a46f55798 100644
--- a/Documentation/core-api/genalloc.rst
+++ b/Documentation/core-api/genalloc.rst
@@ -23,7 +23,7 @@ begins with the creation of a pool using one of:
.. kernel-doc:: lib/genalloc.c
:functions: devm_gen_pool_create
-A call to :c:func:`gen_pool_create` will create a pool. The granularity of
+A call to gen_pool_create() will create a pool. The granularity of
allocations is set with min_alloc_order; it is a log-base-2 number like
those used by the page allocator, but it refers to bytes rather than pages.
So, if min_alloc_order is passed as 3, then all allocations will be a
@@ -32,7 +32,7 @@ required to track the memory in the pool. The nid parameter specifies
which NUMA node should be used for the allocation of the housekeeping
structures; it can be -1 if the caller doesn't care.
-The "managed" interface :c:func:`devm_gen_pool_create` ties the pool to a
+The "managed" interface devm_gen_pool_create() ties the pool to a
specific device. Among other things, it will automatically clean up the
pool when the given device is destroyed.
@@ -53,32 +53,32 @@ to the pool. That can be done with one of:
:functions: gen_pool_add
.. kernel-doc:: lib/genalloc.c
- :functions: gen_pool_add_virt
+ :functions: gen_pool_add_owner
-A call to :c:func:`gen_pool_add` will place the size bytes of memory
+A call to gen_pool_add() will place the size bytes of memory
starting at addr (in the kernel's virtual address space) into the given
pool, once again using nid as the node ID for ancillary memory allocations.
-The :c:func:`gen_pool_add_virt` variant associates an explicit physical
+The gen_pool_add_virt() variant associates an explicit physical
address with the memory; this is only necessary if the pool will be used
for DMA allocations.
The functions for allocating memory from the pool (and putting it back)
are:
-.. kernel-doc:: lib/genalloc.c
+.. kernel-doc:: include/linux/genalloc.h
:functions: gen_pool_alloc
.. kernel-doc:: lib/genalloc.c
:functions: gen_pool_dma_alloc
.. kernel-doc:: lib/genalloc.c
- :functions: gen_pool_free
+ :functions: gen_pool_free_owner
-As one would expect, :c:func:`gen_pool_alloc` will allocate size< bytes
-from the given pool. The :c:func:`gen_pool_dma_alloc` variant allocates
+As one would expect, gen_pool_alloc() will allocate size< bytes
+from the given pool. The gen_pool_dma_alloc() variant allocates
memory for use with DMA operations, returning the associated physical
address in the space pointed to by dma. This will only work if the memory
-was added with :c:func:`gen_pool_add_virt`. Note that this function
+was added with gen_pool_add_virt(). Note that this function
departs from the usual genpool pattern of using unsigned long values to
represent kernel addresses; it returns a void * instead.
@@ -89,14 +89,14 @@ return. If that sort of control is needed, the following functions will be
of interest:
.. kernel-doc:: lib/genalloc.c
- :functions: gen_pool_alloc_algo
+ :functions: gen_pool_alloc_algo_owner
.. kernel-doc:: lib/genalloc.c
:functions: gen_pool_set_algo
-Allocations with :c:func:`gen_pool_alloc_algo` specify an algorithm to be
+Allocations with gen_pool_alloc_algo() specify an algorithm to be
used to choose the memory to be allocated; the default algorithm can be set
-with :c:func:`gen_pool_set_algo`. The data value is passed to the
+with gen_pool_set_algo(). The data value is passed to the
algorithm; most ignore it, but it is occasionally needed. One can,
naturally, write a special-purpose algorithm, but there is a fair set
already available:
diff --git a/Documentation/core-api/genericirq.rst b/Documentation/core-api/genericirq.rst
index 4da67b65cecf..8f06d885c310 100644
--- a/Documentation/core-api/genericirq.rst
+++ b/Documentation/core-api/genericirq.rst
@@ -26,7 +26,7 @@ Rationale
=========
The original implementation of interrupt handling in Linux uses the
-:c:func:`__do_IRQ` super-handler, which is able to deal with every type of
+__do_IRQ() super-handler, which is able to deal with every type of
interrupt logic.
Originally, Russell King identified different types of handlers to build
@@ -43,7 +43,7 @@ During the implementation we identified another type:
- Fast EOI type
-In the SMP world of the :c:func:`__do_IRQ` super-handler another type was
+In the SMP world of the __do_IRQ() super-handler another type was
identified:
- Per CPU type
@@ -83,7 +83,7 @@ IRQ-flow implementation for 'level type' interrupts and add a
(sub)architecture specific 'edge type' implementation.
To make the transition to the new model easier and prevent the breakage
-of existing implementations, the :c:func:`__do_IRQ` super-handler is still
+of existing implementations, the __do_IRQ() super-handler is still
available. This leads to a kind of duality for the time being. Over time
the new model should be used in more and more architectures, as it
enables smaller and cleaner IRQ subsystems. It's deprecated for three
@@ -116,7 +116,7 @@ status information and pointers to the interrupt flow method and the
interrupt chip structure which are assigned to this interrupt.
Whenever an interrupt triggers, the low-level architecture code calls
-into the generic interrupt code by calling :c:func:`desc->handle_irq`. This
+into the generic interrupt code by calling desc->handle_irq(). This
high-level IRQ handling function only uses desc->irq_data.chip
primitives referenced by the assigned chip descriptor structure.
@@ -125,27 +125,29 @@ High-level Driver API
The high-level Driver API consists of following functions:
-- :c:func:`request_irq`
+- request_irq()
-- :c:func:`free_irq`
+- request_threaded_irq()
-- :c:func:`disable_irq`
+- free_irq()
-- :c:func:`enable_irq`
+- disable_irq()
-- :c:func:`disable_irq_nosync` (SMP only)
+- enable_irq()
-- :c:func:`synchronize_irq` (SMP only)
+- disable_irq_nosync() (SMP only)
-- :c:func:`irq_set_irq_type`
+- synchronize_irq() (SMP only)
-- :c:func:`irq_set_irq_wake`
+- irq_set_irq_type()
-- :c:func:`irq_set_handler_data`
+- irq_set_irq_wake()
-- :c:func:`irq_set_chip`
+- irq_set_handler_data()
-- :c:func:`irq_set_chip_data`
+- irq_set_chip()
+
+- irq_set_chip_data()
See the autogenerated function documentation for details.
@@ -154,19 +156,19 @@ High-level IRQ flow handlers
The generic layer provides a set of pre-defined irq-flow methods:
-- :c:func:`handle_level_irq`
+- handle_level_irq()
-- :c:func:`handle_edge_irq`
+- handle_edge_irq()
-- :c:func:`handle_fasteoi_irq`
+- handle_fasteoi_irq()
-- :c:func:`handle_simple_irq`
+- handle_simple_irq()
-- :c:func:`handle_percpu_irq`
+- handle_percpu_irq()
-- :c:func:`handle_edge_eoi_irq`
+- handle_edge_eoi_irq()
-- :c:func:`handle_bad_irq`
+- handle_bad_irq()
The interrupt flow handlers (either pre-defined or architecture
specific) are assigned to specific interrupts by the architecture either
@@ -325,14 +327,14 @@ Delayed interrupt disable
This per interrupt selectable feature, which was introduced by Russell
King in the ARM interrupt implementation, does not mask an interrupt at
-the hardware level when :c:func:`disable_irq` is called. The interrupt is kept
+the hardware level when disable_irq() is called. The interrupt is kept
enabled and is masked in the flow handler when an interrupt event
happens. This prevents losing edge interrupts on hardware which does not
store an edge interrupt event while the interrupt is disabled at the
hardware level. When an interrupt arrives while the IRQ_DISABLED flag
is set, then the interrupt is masked at the hardware level and the
IRQ_PENDING bit is set. When the interrupt is re-enabled by
-:c:func:`enable_irq` the pending bit is checked and if it is set, the interrupt
+enable_irq() the pending bit is checked and if it is set, the interrupt
is resent either via hardware or by a software resend mechanism. (It's
necessary to enable CONFIG_HARDIRQS_SW_RESEND when you want to use
the delayed interrupt disable feature and your hardware is not capable
@@ -369,7 +371,7 @@ handler(s) to use these basic units of low-level functionality.
__do_IRQ entry point
====================
-The original implementation :c:func:`__do_IRQ` was an alternative entry point
+The original implementation __do_IRQ() was an alternative entry point
for all types of interrupts. It no longer exists.
This handler turned out to be not suitable for all interrupt hardware
diff --git a/Documentation/core-api/memory-allocation.rst b/Documentation/core-api/memory-allocation.rst
index 939e3dfc86e9..4aa82ddd01b8 100644
--- a/Documentation/core-api/memory-allocation.rst
+++ b/Documentation/core-api/memory-allocation.rst
@@ -88,10 +88,11 @@ Selecting memory allocator
==========================
The most straightforward way to allocate memory is to use a function
-from the :c:func:`kmalloc` family. And, to be on the safe size it's
-best to use routines that set memory to zero, like
-:c:func:`kzalloc`. If you need to allocate memory for an array, there
-are :c:func:`kmalloc_array` and :c:func:`kcalloc` helpers.
+from the kmalloc() family. And, to be on the safe side it's best to use
+routines that set memory to zero, like kzalloc(). If you need to
+allocate memory for an array, there are kmalloc_array() and kcalloc()
+helpers. The helpers struct_size(), array_size() and array3_size() can
+be used to safely calculate object sizes without overflowing.
The maximal size of a chunk that can be allocated with `kmalloc` is
limited. The actual limit depends on the hardware and the kernel
@@ -102,29 +103,26 @@ The address of a chunk allocated with `kmalloc` is aligned to at least
ARCH_KMALLOC_MINALIGN bytes. For sizes which are a power of two, the
alignment is also guaranteed to be at least the respective size.
-For large allocations you can use :c:func:`vmalloc` and
-:c:func:`vzalloc`, or directly request pages from the page
-allocator. The memory allocated by `vmalloc` and related functions is
-not physically contiguous.
+For large allocations you can use vmalloc() and vzalloc(), or directly
+request pages from the page allocator. The memory allocated by `vmalloc`
+and related functions is not physically contiguous.
If you are not sure whether the allocation size is too large for
-`kmalloc`, it is possible to use :c:func:`kvmalloc` and its
-derivatives. It will try to allocate memory with `kmalloc` and if the
-allocation fails it will be retried with `vmalloc`. There are
-restrictions on which GFP flags can be used with `kvmalloc`; please
-see :c:func:`kvmalloc_node` reference documentation. Note that
-`kvmalloc` may return memory that is not physically contiguous.
+`kmalloc`, it is possible to use kvmalloc() and its derivatives. It will
+try to allocate memory with `kmalloc` and if the allocation fails it
+will be retried with `vmalloc`. There are restrictions on which GFP
+flags can be used with `kvmalloc`; please see kvmalloc_node() reference
+documentation. Note that `kvmalloc` may return memory that is not
+physically contiguous.
If you need to allocate many identical objects you can use the slab
-cache allocator. The cache should be set up with
-:c:func:`kmem_cache_create` or :c:func:`kmem_cache_create_usercopy`
-before it can be used. The second function should be used if a part of
-the cache might be copied to the userspace. After the cache is
-created :c:func:`kmem_cache_alloc` and its convenience wrappers can
-allocate memory from that cache.
-
-When the allocated memory is no longer needed it must be freed. You
-can use :c:func:`kvfree` for the memory allocated with `kmalloc`,
-`vmalloc` and `kvmalloc`. The slab caches should be freed with
-:c:func:`kmem_cache_free`. And don't forget to destroy the cache with
-:c:func:`kmem_cache_destroy`.
+cache allocator. The cache should be set up with kmem_cache_create() or
+kmem_cache_create_usercopy() before it can be used. The second function
+should be used if a part of the cache might be copied to the userspace.
+After the cache is created kmem_cache_alloc() and its convenience
+wrappers can allocate memory from that cache.
+
+When the allocated memory is no longer needed it must be freed. You can
+use kvfree() for the memory allocated with `kmalloc`, `vmalloc` and
+`kvmalloc`. The slab caches should be freed with kmem_cache_free(). And
+don't forget to destroy the cache with kmem_cache_destroy().
diff --git a/Documentation/core-api/mm-api.rst b/Documentation/core-api/mm-api.rst
index 128e8a721c1e..be726986ff75 100644
--- a/Documentation/core-api/mm-api.rst
+++ b/Documentation/core-api/mm-api.rst
@@ -11,7 +11,7 @@ User Space Memory Access
.. kernel-doc:: arch/x86/lib/usercopy_32.c
:export:
-.. kernel-doc:: mm/util.c
+.. kernel-doc:: mm/gup.c
:functions: get_user_pages_fast
.. _mm-api-gfp-flags:
diff --git a/Documentation/core-api/printk-formats.rst b/Documentation/core-api/printk-formats.rst
index 050f34f3a70f..8ebe46b1af39 100644
--- a/Documentation/core-api/printk-formats.rst
+++ b/Documentation/core-api/printk-formats.rst
@@ -98,8 +98,6 @@ Symbols/Function Pointers
%pS versatile_init+0x0/0x110
%ps versatile_init
- %pF versatile_init+0x0/0x110
- %pf versatile_init
%pSR versatile_init+0x9/0x110
(with __builtin_extract_return_addr() translation)
%pB prev_fn_of_versatile_init+0x88/0x88
@@ -109,14 +107,6 @@ The ``S`` and ``s`` specifiers are used for printing a pointer in symbolic
format. They result in the symbol name with (S) or without (s)
offsets. If KALLSYMS are disabled then the symbol address is printed instead.
-Note, that the ``F`` and ``f`` specifiers are identical to ``S`` (``s``)
-and thus deprecated. We have ``F`` and ``f`` because on ia64, ppc64 and
-parisc64 function pointers are indirect and, in fact, are function
-descriptors, which require additional dereferencing before we can lookup
-the symbol. As of now, ``S`` and ``s`` perform dereferencing on those
-platforms (when needed), so ``F`` and ``f`` exist for compatibility
-reasons only.
-
The ``B`` specifier results in the symbol name with offsets and should be
used when printing stack backtraces. The specifier takes into
consideration the effect of compiler optimisations which may occur
@@ -147,6 +137,20 @@ equivalent to %lx (or %lu). %px is preferred because it is more uniquely
grep'able. If in the future we need to modify the way the kernel handles
printing pointers we will be better equipped to find the call sites.
+Pointer Differences
+-------------------
+
+::
+
+ %td 2560
+ %tx a00
+
+For printing the pointer differences, use the %t modifier for ptrdiff_t.
+
+Example::
+
+ printk("test: difference between pointers: %td\n", ptr2 - ptr1);
+
Struct Resources
----------------
@@ -440,6 +444,30 @@ Examples::
Passed by reference.
+Fwnode handles
+--------------
+
+::
+
+ %pfw[fP]
+
+For printing information on fwnode handles. The default is to print the full
+node name, including the path. The modifiers are functionally equivalent to
+%pOF above.
+
+ - f - full name of the node, including the path
+ - P - the name of the node including an address (if there is one)
+
+Examples (ACPI)::
+
+ %pfwf \_SB.PCI0.CIO2.port@1.endpoint@0 - Full node name
+ %pfwP endpoint@0 - Node name
+
+Examples (OF)::
+
+ %pfwf /ocp@68000000/i2c@48072000/camera@10/port/endpoint - Full name
+ %pfwP endpoint - Node name
+
Time and date (struct rtc_time)
-------------------------------
diff --git a/Documentation/core-api/refcount-vs-atomic.rst b/Documentation/core-api/refcount-vs-atomic.rst
index 976e85adffe8..79a009ce11df 100644
--- a/Documentation/core-api/refcount-vs-atomic.rst
+++ b/Documentation/core-api/refcount-vs-atomic.rst
@@ -35,7 +35,7 @@ atomics & refcounters only provide atomicity and
program order (po) relation (on the same CPU). It guarantees that
each ``atomic_*()`` and ``refcount_*()`` operation is atomic and instructions
are executed in program order on a single CPU.
-This is implemented using :c:func:`READ_ONCE`/:c:func:`WRITE_ONCE` and
+This is implemented using READ_ONCE()/WRITE_ONCE() and
compare-and-swap primitives.
A strong (full) memory ordering guarantees that all prior loads and
@@ -44,7 +44,7 @@ before any po-later instruction is executed on the same CPU.
It also guarantees that all po-earlier stores on the same CPU
and all propagated stores from other CPUs must propagate to all
other CPUs before any po-later instruction is executed on the original
-CPU (A-cumulative property). This is implemented using :c:func:`smp_mb`.
+CPU (A-cumulative property). This is implemented using smp_mb().
A RELEASE memory ordering guarantees that all prior loads and
stores (all po-earlier instructions) on the same CPU are completed
@@ -52,14 +52,14 @@ before the operation. It also guarantees that all po-earlier
stores on the same CPU and all propagated stores from other CPUs
must propagate to all other CPUs before the release operation
(A-cumulative property). This is implemented using
-:c:func:`smp_store_release`.
+smp_store_release().
An ACQUIRE memory ordering guarantees that all post loads and
stores (all po-later instructions) on the same CPU are
completed after the acquire operation. It also guarantees that all
po-later stores on the same CPU must propagate to all other CPUs
after the acquire operation executes. This is implemented using
-:c:func:`smp_acquire__after_ctrl_dep`.
+smp_acquire__after_ctrl_dep().
A control dependency (on success) for refcounters guarantees that
if a reference for an object was successfully obtained (reference
@@ -78,8 +78,8 @@ case 1) - non-"Read/Modify/Write" (RMW) ops
Function changes:
- * :c:func:`atomic_set` --> :c:func:`refcount_set`
- * :c:func:`atomic_read` --> :c:func:`refcount_read`
+ * atomic_set() --> refcount_set()
+ * atomic_read() --> refcount_read()
Memory ordering guarantee changes:
@@ -91,8 +91,8 @@ case 2) - increment-based ops that return no value
Function changes:
- * :c:func:`atomic_inc` --> :c:func:`refcount_inc`
- * :c:func:`atomic_add` --> :c:func:`refcount_add`
+ * atomic_inc() --> refcount_inc()
+ * atomic_add() --> refcount_add()
Memory ordering guarantee changes:
@@ -103,7 +103,7 @@ case 3) - decrement-based RMW ops that return no value
Function changes:
- * :c:func:`atomic_dec` --> :c:func:`refcount_dec`
+ * atomic_dec() --> refcount_dec()
Memory ordering guarantee changes:
@@ -115,8 +115,8 @@ case 4) - increment-based RMW ops that return a value
Function changes:
- * :c:func:`atomic_inc_not_zero` --> :c:func:`refcount_inc_not_zero`
- * no atomic counterpart --> :c:func:`refcount_add_not_zero`
+ * atomic_inc_not_zero() --> refcount_inc_not_zero()
+ * no atomic counterpart --> refcount_add_not_zero()
Memory ordering guarantees changes:
@@ -131,8 +131,8 @@ case 5) - generic dec/sub decrement-based RMW ops that return a value
Function changes:
- * :c:func:`atomic_dec_and_test` --> :c:func:`refcount_dec_and_test`
- * :c:func:`atomic_sub_and_test` --> :c:func:`refcount_sub_and_test`
+ * atomic_dec_and_test() --> refcount_dec_and_test()
+ * atomic_sub_and_test() --> refcount_sub_and_test()
Memory ordering guarantees changes:
@@ -144,14 +144,14 @@ case 6) other decrement-based RMW ops that return a value
Function changes:
- * no atomic counterpart --> :c:func:`refcount_dec_if_one`
+ * no atomic counterpart --> refcount_dec_if_one()
* ``atomic_add_unless(&var, -1, 1)`` --> ``refcount_dec_not_one(&var)``
Memory ordering guarantees changes:
* fully ordered --> RELEASE ordering + control dependency
-.. note:: :c:func:`atomic_add_unless` only provides full order on success.
+.. note:: atomic_add_unless() only provides full order on success.
case 7) - lock-based RMW
@@ -159,10 +159,10 @@ case 7) - lock-based RMW
Function changes:
- * :c:func:`atomic_dec_and_lock` --> :c:func:`refcount_dec_and_lock`
- * :c:func:`atomic_dec_and_mutex_lock` --> :c:func:`refcount_dec_and_mutex_lock`
+ * atomic_dec_and_lock() --> refcount_dec_and_lock()
+ * atomic_dec_and_mutex_lock() --> refcount_dec_and_mutex_lock()
Memory ordering guarantees changes:
* fully ordered --> RELEASE ordering + control dependency + hold
- :c:func:`spin_lock` on success
+ spin_lock() on success
diff --git a/Documentation/core-api/symbol-namespaces.rst b/Documentation/core-api/symbol-namespaces.rst
index 982ed7b568ac..9b76337f6756 100644
--- a/Documentation/core-api/symbol-namespaces.rst
+++ b/Documentation/core-api/symbol-namespaces.rst
@@ -152,3 +152,6 @@ in-tree modules::
- notice the warning of modpost telling about a missing import
- run `make nsdeps` to add the import to the correct code location
+You can also run nsdeps for external module builds. A typical usage is::
+
+ $ make -C <path_to_kernel_src> M=$PWD nsdeps
diff --git a/Documentation/dev-tools/kasan.rst b/Documentation/dev-tools/kasan.rst
index 525296121d89..e4d66e7c50de 100644
--- a/Documentation/dev-tools/kasan.rst
+++ b/Documentation/dev-tools/kasan.rst
@@ -218,3 +218,66 @@ brk handler is used to print bug reports.
A potential expansion of this mode is a hardware tag-based mode, which would
use hardware memory tagging support instead of compiler instrumentation and
manual shadow memory manipulation.
+
+What memory accesses are sanitised by KASAN?
+--------------------------------------------
+
+The kernel maps memory in a number of different parts of the address
+space. This poses something of a problem for KASAN, which requires
+that all addresses accessed by instrumented code have a valid shadow
+region.
+
+The range of kernel virtual addresses is large: there is not enough
+real memory to support a real shadow region for every address that
+could be accessed by the kernel.
+
+By default
+~~~~~~~~~~
+
+By default, architectures only map real memory over the shadow region
+for the linear mapping (and potentially other small areas). For all
+other areas - such as vmalloc and vmemmap space - a single read-only
+page is mapped over the shadow area. This read-only shadow page
+declares all memory accesses as permitted.
+
+This presents a problem for modules: they do not live in the linear
+mapping, but in a dedicated module space. By hooking in to the module
+allocator, KASAN can temporarily map real shadow memory to cover
+them. This allows detection of invalid accesses to module globals, for
+example.
+
+This also creates an incompatibility with ``VMAP_STACK``: if the stack
+lives in vmalloc space, it will be shadowed by the read-only page, and
+the kernel will fault when trying to set up the shadow data for stack
+variables.
+
+CONFIG_KASAN_VMALLOC
+~~~~~~~~~~~~~~~~~~~~
+
+With ``CONFIG_KASAN_VMALLOC``, KASAN can cover vmalloc space at the
+cost of greater memory usage. Currently this is only supported on x86.
+
+This works by hooking into vmalloc and vmap, and dynamically
+allocating real shadow memory to back the mappings.
+
+Most mappings in vmalloc space are small, requiring less than a full
+page of shadow space. Allocating a full shadow page per mapping would
+therefore be wasteful. Furthermore, to ensure that different mappings
+use different shadow pages, mappings would have to be aligned to
+``KASAN_SHADOW_SCALE_SIZE * PAGE_SIZE``.
+
+Instead, we share backing space across multiple mappings. We allocate
+a backing page when a mapping in vmalloc space uses a particular page
+of the shadow region. This page can be shared by other vmalloc
+mappings later on.
+
+We hook in to the vmap infrastructure to lazily clean up unused shadow
+memory.
+
+To avoid the difficulties around swapping mappings around, we expect
+that the part of the shadow region that covers the vmalloc space will
+not be covered by the early shadow page, but will be left
+unmapped. This will require changes in arch-specific code.
+
+This allows ``VMAP_STACK`` support on x86, and can simplify support of
+architectures that do not have a fixed module region.
diff --git a/Documentation/dev-tools/kmemleak.rst b/Documentation/dev-tools/kmemleak.rst
index 3621cd5e1eef..3a289e8a1d12 100644
--- a/Documentation/dev-tools/kmemleak.rst
+++ b/Documentation/dev-tools/kmemleak.rst
@@ -69,7 +69,7 @@ the kernel command line.
Memory may be allocated or freed before kmemleak is initialised and
these actions are stored in an early log buffer. The size of this buffer
-is configured via the CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE option.
+is configured via the CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE option.
If CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF are enabled, the kmemleak is
disabled by default. Passing ``kmemleak=on`` on the kernel command
diff --git a/Documentation/devicetree/bindings/Makefile b/Documentation/devicetree/bindings/Makefile
index 5138a2f6232a..646cb3525373 100644
--- a/Documentation/devicetree/bindings/Makefile
+++ b/Documentation/devicetree/bindings/Makefile
@@ -12,7 +12,6 @@ $(obj)/%.example.dts: $(src)/%.yaml FORCE
$(call if_changed,chk_binding)
DT_TMP_SCHEMA := processed-schema.yaml
-extra-y += $(DT_TMP_SCHEMA)
quiet_cmd_mk_schema = SCHEMA $@
cmd_mk_schema = $(DT_MK_SCHEMA) $(DT_MK_SCHEMA_FLAGS) -o $@ $(real-prereqs)
@@ -26,8 +25,12 @@ DT_DOCS = $(shell \
DT_SCHEMA_FILES ?= $(addprefix $(src)/,$(DT_DOCS))
+ifeq ($(CHECK_DTBS),)
extra-y += $(patsubst $(src)/%.yaml,%.example.dts, $(DT_SCHEMA_FILES))
extra-y += $(patsubst $(src)/%.yaml,%.example.dt.yaml, $(DT_SCHEMA_FILES))
+endif
$(obj)/$(DT_TMP_SCHEMA): $(DT_SCHEMA_FILES) FORCE
$(call if_changed,mk_schema)
+
+extra-y += $(DT_TMP_SCHEMA)
diff --git a/Documentation/devicetree/bindings/arm/amlogic/smp-sram.txt b/Documentation/devicetree/bindings/arm/amlogic/smp-sram.txt
deleted file mode 100644
index 3473ddaadfac..000000000000
--- a/Documentation/devicetree/bindings/arm/amlogic/smp-sram.txt
+++ /dev/null
@@ -1,32 +0,0 @@
-Amlogic Meson8 and Meson8b SRAM for smp bringup:
-------------------------------------------------
-
-Amlogic's SMP-capable SoCs use part of the sram for the bringup of the cores.
-Once the core gets powered up it executes the code that is residing at a
-specific location.
-
-Therefore a reserved section sub-node has to be added to the mmio-sram
-declaration.
-
-Required sub-node properties:
-- compatible : depending on the SoC this should be one of:
- "amlogic,meson8-smp-sram"
- "amlogic,meson8b-smp-sram"
-
-The rest of the properties should follow the generic mmio-sram discription
-found in ../../misc/sram.txt
-
-Example:
-
- sram: sram@d9000000 {
- compatible = "mmio-sram";
- reg = <0xd9000000 0x20000>;
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0 0xd9000000 0x20000>;
-
- smp-sram@1ff80 {
- compatible = "amlogic,meson8b-smp-sram";
- reg = <0x1ff80 0x8>;
- };
- };
diff --git a/Documentation/devicetree/bindings/arm/arm,scmi.txt b/Documentation/devicetree/bindings/arm/arm,scmi.txt
index 083dbf96ee00..f493d69e6194 100644
--- a/Documentation/devicetree/bindings/arm/arm,scmi.txt
+++ b/Documentation/devicetree/bindings/arm/arm,scmi.txt
@@ -100,7 +100,7 @@ Required sub-node properties:
[0] http://infocenter.arm.com/help/topic/com.arm.doc.den0056a/index.html
[1] Documentation/devicetree/bindings/clock/clock-bindings.txt
-[2] Documentation/devicetree/bindings/power/power_domain.txt
+[2] Documentation/devicetree/bindings/power/power-domain.yaml
[3] Documentation/devicetree/bindings/thermal/thermal.txt
[4] Documentation/devicetree/bindings/sram/sram.txt
[5] Documentation/devicetree/bindings/reset/reset.txt
diff --git a/Documentation/devicetree/bindings/arm/arm,scpi.txt b/Documentation/devicetree/bindings/arm/arm,scpi.txt
index 401831973638..7b83ef43b418 100644
--- a/Documentation/devicetree/bindings/arm/arm,scpi.txt
+++ b/Documentation/devicetree/bindings/arm/arm,scpi.txt
@@ -110,7 +110,7 @@ Required properties:
[1] Documentation/devicetree/bindings/clock/clock-bindings.txt
[2] Documentation/devicetree/bindings/thermal/thermal.txt
[3] Documentation/devicetree/bindings/sram/sram.txt
-[4] Documentation/devicetree/bindings/power/power_domain.txt
+[4] Documentation/devicetree/bindings/power/power-domain.yaml
Example:
diff --git a/Documentation/devicetree/bindings/arm/axentia.txt b/Documentation/devicetree/bindings/arm/axentia.txt
deleted file mode 100644
index de58f2463880..000000000000
--- a/Documentation/devicetree/bindings/arm/axentia.txt
+++ /dev/null
@@ -1,28 +0,0 @@
-Device tree bindings for Axentia ARM devices
-============================================
-
-Linea CPU module
-----------------
-
-Required root node properties:
-compatible = "axentia,linea",
- "atmel,sama5d31", "atmel,sama5d3", "atmel,sama5";
-and following the rules from atmel-at91.txt for a sama5d31 SoC.
-
-
-Nattis v2 board with Natte v2 power board
------------------------------------------
-
-Required root node properties:
-compatible = "axentia,nattis-2", "axentia,natte-2", "axentia,linea",
- "atmel,sama5d31", "atmel,sama5d3", "atmel,sama5";
-and following the rules from above for the axentia,linea CPU module.
-
-
-TSE-850 v3 board
-----------------
-
-Required root node properties:
-compatible = "axentia,tse850v3", "axentia,linea",
- "atmel,sama5d31", "atmel,sama5d3", "atmel,sama5";
-and following the rules from above for the axentia,linea CPU module.
diff --git a/Documentation/devicetree/bindings/arm/coresight.txt b/Documentation/devicetree/bindings/arm/coresight.txt
index fcc3bacfd8bc..d02c42d21f2f 100644
--- a/Documentation/devicetree/bindings/arm/coresight.txt
+++ b/Documentation/devicetree/bindings/arm/coresight.txt
@@ -87,6 +87,15 @@ its hardware characteristcs.
* port or ports: see "Graph bindings for Coresight" below.
+* Optional properties for all components:
+
+ * arm,coresight-loses-context-with-cpu : boolean. Indicates that the
+ hardware will lose register context on CPU power down (e.g. CPUIdle).
+ An example of where this may be needed are systems which contain a
+ coresight component and CPU in the same power domain. When the CPU
+ powers down the coresight component also powers down and loses its
+ context. This property is currently only used for the ETM 4.x driver.
+
* Optional properties for ETM/PTMs:
* arm,cp14: must be present if the system accesses ETM/PTM management
diff --git a/Documentation/devicetree/bindings/arm/freescale/fsl,scu.txt b/Documentation/devicetree/bindings/arm/freescale/fsl,scu.txt
index c149fadc6f47..e07735a8c2c7 100644
--- a/Documentation/devicetree/bindings/arm/freescale/fsl,scu.txt
+++ b/Documentation/devicetree/bindings/arm/freescale/fsl,scu.txt
@@ -124,7 +124,7 @@ Required properties for Pinctrl sub nodes:
CONFIG settings.
[1] Documentation/devicetree/bindings/clock/clock-bindings.txt
-[2] Documentation/devicetree/bindings/power/power_domain.txt
+[2] Documentation/devicetree/bindings/power/power-domain.yaml
[3] Documentation/devicetree/bindings/pinctrl/fsl,imx-pinctrl.txt
RTC bindings based on SCU Message Protocol
@@ -157,6 +157,15 @@ Required properties:
Optional properties:
- timeout-sec: contains the watchdog timeout in seconds.
+SCU key bindings based on SCU Message Protocol
+------------------------------------------------------------
+
+Required properties:
+- compatible: should be:
+ "fsl,imx8qxp-sc-key"
+ followed by "fsl,imx-sc-key";
+- linux,keycodes: See Documentation/devicetree/bindings/input/keys.txt
+
Example (imx8qxp):
-------------
aliases {
@@ -220,6 +229,11 @@ firmware {
compatible = "fsl,imx8qxp-sc-rtc";
};
+ scu_key: scu-key {
+ compatible = "fsl,imx8qxp-sc-key", "fsl,imx-sc-key";
+ linux,keycodes = <KEY_POWER>;
+ };
+
watchdog {
compatible = "fsl,imx8qxp-sc-wdt", "fsl,imx-sc-wdt";
timeout-sec = <60>;
diff --git a/Documentation/devicetree/bindings/arm/omap/omap.txt b/Documentation/devicetree/bindings/arm/omap/omap.txt
index b301f753ed2c..e77635c5422c 100644
--- a/Documentation/devicetree/bindings/arm/omap/omap.txt
+++ b/Documentation/devicetree/bindings/arm/omap/omap.txt
@@ -43,7 +43,7 @@ SoC Families:
- OMAP2 generic - defaults to OMAP2420
compatible = "ti,omap2"
-- OMAP3 generic - defaults to OMAP3430
+- OMAP3 generic
compatible = "ti,omap3"
- OMAP4 generic - defaults to OMAP4430
compatible = "ti,omap4"
@@ -51,6 +51,8 @@ SoC Families:
compatible = "ti,omap5"
- DRA7 generic - defaults to DRA742
compatible = "ti,dra7"
+- AM33x generic
+ compatible = "ti,am33xx"
- AM43x generic - defaults to AM4372
compatible = "ti,am43"
@@ -63,12 +65,14 @@ SoCs:
- OMAP3430
compatible = "ti,omap3430", "ti,omap3"
+ legacy: "ti,omap34xx" - please do not use any more
- AM3517
compatible = "ti,am3517", "ti,omap3"
- OMAP3630
- compatible = "ti,omap36xx", "ti,omap3"
-- AM33xx
- compatible = "ti,am33xx", "ti,omap3"
+ compatible = "ti,omap3630", "ti,omap3"
+ legacy: "ti,omap36xx" - please do not use any more
+- AM335x
+ compatible = "ti,am33xx"
- OMAP4430
compatible = "ti,omap4430", "ti,omap4"
@@ -110,19 +114,19 @@ SoCs:
- AM4372
compatible = "ti,am4372", "ti,am43"
-Boards:
+Boards (incomplete list of examples):
- OMAP3 BeagleBoard : Low cost community board
- compatible = "ti,omap3-beagle", "ti,omap3"
+ compatible = "ti,omap3-beagle", "ti,omap3430", "ti,omap3"
- OMAP3 Tobi with Overo : Commercial expansion board with daughter board
- compatible = "gumstix,omap3-overo-tobi", "gumstix,omap3-overo", "ti,omap3"
+ compatible = "gumstix,omap3-overo-tobi", "gumstix,omap3-overo", "ti,omap3430", "ti,omap3"
- OMAP4 SDP : Software Development Board
- compatible = "ti,omap4-sdp", "ti,omap4430"
+ compatible = "ti,omap4-sdp", "ti,omap4430", "ti,omap4"
- OMAP4 PandaBoard : Low cost community board
- compatible = "ti,omap4-panda", "ti,omap4430"
+ compatible = "ti,omap4-panda", "ti,omap4430", "ti,omap4"
- OMAP4 DuoVero with Parlor : Commercial expansion board with daughter board
compatible = "gumstix,omap4-duovero-parlor", "gumstix,omap4-duovero", "ti,omap4430", "ti,omap4";
@@ -134,16 +138,16 @@ Boards:
compatible = "variscite,var-dvk-om44", "variscite,var-som-om44", "ti,omap4460", "ti,omap4";
- OMAP3 EVM : Software Development Board for OMAP35x, AM/DM37x
- compatible = "ti,omap3-evm", "ti,omap3"
+ compatible = "ti,omap3-evm", "ti,omap3630", "ti,omap3"
- AM335X EVM : Software Development Board for AM335x
- compatible = "ti,am335x-evm", "ti,am33xx", "ti,omap3"
+ compatible = "ti,am335x-evm", "ti,am33xx"
- AM335X Bone : Low cost community board
- compatible = "ti,am335x-bone", "ti,am33xx", "ti,omap3"
+ compatible = "ti,am335x-bone", "ti,am33xx"
- AM3359 ICEv2 : Low cost Industrial Communication Engine EVM.
- compatible = "ti,am3359-icev2", "ti,am33xx", "ti,omap3"
+ compatible = "ti,am3359-icev2", "ti,am33xx"
- AM335X OrionLXm : Substation Automation Platform
compatible = "novatech,am335x-lxm", "ti,am33xx"
diff --git a/Documentation/devicetree/bindings/arm/samsung/exynos-chipid.txt b/Documentation/devicetree/bindings/arm/samsung/exynos-chipid.txt
deleted file mode 100644
index 85c5dfd4a720..000000000000
--- a/Documentation/devicetree/bindings/arm/samsung/exynos-chipid.txt
+++ /dev/null
@@ -1,12 +0,0 @@
-SAMSUNG Exynos SoCs Chipid driver.
-
-Required properties:
-- compatible : Should at least contain "samsung,exynos4210-chipid".
-
-- reg: offset and length of the register set
-
-Example:
- chipid@10000000 {
- compatible = "samsung,exynos4210-chipid";
- reg = <0x10000000 0x100>;
- };
diff --git a/Documentation/devicetree/bindings/arm/samsung/exynos-chipid.yaml b/Documentation/devicetree/bindings/arm/samsung/exynos-chipid.yaml
new file mode 100644
index 000000000000..afcd70803c12
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/samsung/exynos-chipid.yaml
@@ -0,0 +1,39 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/arm/samsung/exynos-chipid.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Samsung Exynos SoC series Chipid driver
+
+maintainers:
+ - Krzysztof Kozlowski <krzk@kernel.org>
+
+properties:
+ compatible:
+ items:
+ - const: samsung,exynos4210-chipid
+
+ reg:
+ maxItems: 1
+
+ samsung,asv-bin:
+ description:
+ Adaptive Supply Voltage bin selection. This can be used
+ to determine the ASV bin of an SoC if respective information
+ is missing in the CHIPID registers or in the OTP memory.
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ - enum: [ 0, 1, 2, 3 ]
+
+required:
+ - compatible
+ - reg
+
+examples:
+ - |
+ chipid@10000000 {
+ compatible = "samsung,exynos4210-chipid";
+ reg = <0x10000000 0x100>;
+ samsung,asv-bin = <2>;
+ };
diff --git a/Documentation/devicetree/bindings/arm/samsung/pmu.txt b/Documentation/devicetree/bindings/arm/samsung/pmu.txt
deleted file mode 100644
index 433bfd7593ac..000000000000
--- a/Documentation/devicetree/bindings/arm/samsung/pmu.txt
+++ /dev/null
@@ -1,72 +0,0 @@
-SAMSUNG Exynos SoC series PMU Registers
-
-Properties:
- - compatible : should contain two values. First value must be one from following list:
- - "samsung,exynos3250-pmu" - for Exynos3250 SoC,
- - "samsung,exynos4210-pmu" - for Exynos4210 SoC,
- - "samsung,exynos4412-pmu" - for Exynos4412 SoC,
- - "samsung,exynos5250-pmu" - for Exynos5250 SoC,
- - "samsung,exynos5260-pmu" - for Exynos5260 SoC.
- - "samsung,exynos5410-pmu" - for Exynos5410 SoC,
- - "samsung,exynos5420-pmu" - for Exynos5420 SoC.
- - "samsung,exynos5433-pmu" - for Exynos5433 SoC.
- - "samsung,exynos7-pmu" - for Exynos7 SoC.
- second value must be always "syscon".
-
- - reg : offset and length of the register set.
-
- - #clock-cells : must be <1>, since PMU requires once cell as clock specifier.
- The single specifier cell is used as index to list of clocks
- provided by PMU, which is currently:
- 0 : SoC clock output (CLKOUT pin)
-
- - clock-names : list of clock names for particular CLKOUT mux inputs in
- following format:
- "clkoutN", where N is a decimal number corresponding to
- CLKOUT mux control bits value for given input, e.g.
- "clkout0", "clkout7", "clkout15".
-
- - clocks : list of phandles and specifiers to all input clocks listed in
- clock-names property.
-
-Optional properties:
-
-Some PMUs are capable of behaving as an interrupt controller (mostly
-to wake up a suspended PMU). In which case, they can have the
-following properties:
-
-- interrupt-controller: indicate that said PMU is an interrupt controller
-
-- #interrupt-cells: must be identical to the that of the parent interrupt
- controller.
-
-
-Optional nodes:
-
-- nodes defining the restart and poweroff syscon children
-
-
-Example :
-pmu_system_controller: system-controller@10040000 {
- compatible = "samsung,exynos5250-pmu", "syscon";
- reg = <0x10040000 0x5000>;
- interrupt-controller;
- #interrupt-cells = <3>;
- interrupt-parent = <&gic>;
- #clock-cells = <1>;
- clock-names = "clkout0", "clkout1", "clkout2", "clkout3",
- "clkout4", "clkout8", "clkout9";
- clocks = <&clock CLK_OUT_DMC>, <&clock CLK_OUT_TOP>,
- <&clock CLK_OUT_LEFTBUS>, <&clock CLK_OUT_RIGHTBUS>,
- <&clock CLK_OUT_CPU>, <&clock CLK_XXTI>,
- <&clock CLK_XUSBXTI>;
-};
-
-Example of clock consumer :
-
-usb3503: usb3503@8 {
- /* ... */
- clock-names = "refclk";
- clocks = <&pmu_system_controller 0>;
- /* ... */
-};
diff --git a/Documentation/devicetree/bindings/arm/samsung/pmu.yaml b/Documentation/devicetree/bindings/arm/samsung/pmu.yaml
new file mode 100644
index 000000000000..73b56fc5bf58
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/samsung/pmu.yaml
@@ -0,0 +1,105 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/arm/samsung/pmu.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Samsung Exynos SoC series Power Management Unit (PMU)
+
+maintainers:
+ - Krzysztof Kozlowski <krzk@kernel.org>
+
+# Custom select to avoid matching all nodes with 'syscon'
+select:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - samsung,exynos3250-pmu
+ - samsung,exynos4210-pmu
+ - samsung,exynos4412-pmu
+ - samsung,exynos5250-pmu
+ - samsung,exynos5260-pmu
+ - samsung,exynos5410-pmu
+ - samsung,exynos5420-pmu
+ - samsung,exynos5433-pmu
+ - samsung,exynos7-pmu
+ required:
+ - compatible
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - samsung,exynos3250-pmu
+ - samsung,exynos4210-pmu
+ - samsung,exynos4412-pmu
+ - samsung,exynos5250-pmu
+ - samsung,exynos5260-pmu
+ - samsung,exynos5410-pmu
+ - samsung,exynos5420-pmu
+ - samsung,exynos5433-pmu
+ - samsung,exynos7-pmu
+ - const: syscon
+
+ reg:
+ maxItems: 1
+
+ '#clock-cells':
+ const: 1
+
+ clock-names:
+ description:
+ List of clock names for particular CLKOUT mux inputs
+ minItems: 1
+ maxItems: 32
+ items:
+ pattern: '^clkout([0-9]|[12][0-9]|3[0-1])$'
+
+ clocks:
+ minItems: 1
+ maxItems: 32
+
+ interrupt-controller:
+ description:
+ Some PMUs are capable of behaving as an interrupt controller (mostly
+ to wake up a suspended PMU).
+
+ '#interrupt-cells':
+ description:
+ Must be identical to the that of the parent interrupt controller.
+ const: 3
+
+ syscon-poweroff:
+ $ref: "../../power/reset/syscon-poweroff.yaml#"
+ type: object
+ description:
+ Node for power off method
+
+ syscon-reboot:
+ $ref: "../../power/reset/syscon-reboot.yaml#"
+ type: object
+ description:
+ Node for reboot method
+
+required:
+ - compatible
+ - reg
+ - '#clock-cells'
+ - clock-names
+ - clocks
+
+examples:
+ - |
+ #include <dt-bindings/clock/exynos5250.h>
+
+ pmu_system_controller: system-controller@10040000 {
+ compatible = "samsung,exynos5250-pmu", "syscon";
+ reg = <0x10040000 0x5000>;
+ interrupt-controller;
+ #interrupt-cells = <3>;
+ interrupt-parent = <&gic>;
+ #clock-cells = <1>;
+ clock-names = "clkout16";
+ clocks = <&clock CLK_FIN_PLL>;
+ };
diff --git a/Documentation/devicetree/bindings/arm/samsung/samsung-boards.txt b/Documentation/devicetree/bindings/arm/samsung/samsung-boards.txt
deleted file mode 100644
index 56021bf2a916..000000000000
--- a/Documentation/devicetree/bindings/arm/samsung/samsung-boards.txt
+++ /dev/null
@@ -1,83 +0,0 @@
-* Samsung's Exynos and S5P SoC based boards
-
-Required root node properties:
- - compatible = should be one or more of the following.
- - "samsung,aries" - for S5PV210-based Samsung Aries board.
- - "samsung,fascinate4g" - for S5PV210-based Samsung Galaxy S Fascinate 4G (SGH-T959P) board.
- - "samsung,galaxys" - for S5PV210-based Samsung Galaxy S (i9000) board.
- - "samsung,artik5" - for Exynos3250-based Samsung ARTIK5 module.
- - "samsung,artik5-eval" - for Exynos3250-based Samsung ARTIK5 eval board.
- - "samsung,monk" - for Exynos3250-based Samsung Simband board.
- - "samsung,rinato" - for Exynos3250-based Samsung Gear2 board.
- - "samsung,smdkv310" - for Exynos4210-based Samsung SMDKV310 eval board.
- - "samsung,trats" - for Exynos4210-based Tizen Reference board.
- - "samsung,universal_c210" - for Exynos4210-based Samsung board.
- - "samsung,i9300" - for Exynos4412-based Samsung GT-I9300 board.
- - "samsung,i9305" - for Exynos4412-based Samsung GT-I9305 board.
- - "samsung,midas" - for Exynos4412-based Samsung Midas board.
- - "samsung,smdk4412", - for Exynos4412-based Samsung SMDK4412 eval board.
- - "samsung,n710x" - for Exynos4412-based Samsung GT-N7100/GT-N7105 board.
- - "samsung,trats2" - for Exynos4412-based Tizen Reference board.
- - "samsung,smdk5250" - for Exynos5250-based Samsung SMDK5250 eval board.
- - "samsung,xyref5260" - for Exynos5260-based Samsung board.
- - "samsung,smdk5410" - for Exynos5410-based Samsung SMDK5410 eval board.
- - "samsung,smdk5420" - for Exynos5420-based Samsung SMDK5420 eval board.
- - "samsung,tm2" - for Exynos5433-based Samsung TM2 board.
- - "samsung,tm2e" - for Exynos5433-based Samsung TM2E board.
-
-* Other companies Exynos SoC based
- * FriendlyARM
- - "friendlyarm,tiny4412" - for Exynos4412-based FriendlyARM
- TINY4412 board.
- * TOPEET
- - "topeet,itop4412-elite" - for Exynos4412-based TOPEET
- Elite base board.
-
- * Google
- - "google,pi" - for Exynos5800-based Google Peach Pi
- Rev 10+ board,
- also: "google,pi-rev16", "google,pi-rev15", "google,pi-rev14",
- "google,pi-rev13", "google,pi-rev12", "google,pi-rev11",
- "google,pi-rev10", "google,peach".
-
- - "google,pit" - for Exynos5420-based Google Peach Pit
- Rev 6+ (Exynos5420),
- also: "google,pit-rev16", "google,pit-rev15", "google,pit-rev14",
- "google,pit-rev13", "google,pit-rev12", "google,pit-rev11",
- "google,pit-rev10", "google,pit-rev9", "google,pit-rev8",
- "google,pit-rev7", "google,pit-rev6", "google,peach".
-
- - "google,snow-rev4" - for Exynos5250-based Google Snow board,
- also: "google,snow"
- - "google,snow-rev5" - for Exynos5250-based Google Snow
- Rev 5+ board.
- - "google,spring" - for Exynos5250-based Google Spring board.
-
- * Hardkernel
- - "hardkernel,odroid-u3" - for Exynos4412-based Hardkernel Odroid U3.
- - "hardkernel,odroid-x" - for Exynos4412-based Hardkernel Odroid X.
- - "hardkernel,odroid-x2" - for Exynos4412-based Hardkernel Odroid X2.
- - "hardkernel,odroid-xu" - for Exynos5410-based Hardkernel Odroid XU.
- - "hardkernel,odroid-xu3" - for Exynos5422-based Hardkernel Odroid XU3.
- - "hardkernel,odroid-xu3-lite" - for Exynos5422-based Hardkernel
- Odroid XU3 Lite board.
- - "hardkernel,odroid-xu4" - for Exynos5422-based Hardkernel Odroid XU4.
- - "hardkernel,odroid-hc1" - for Exynos5422-based Hardkernel Odroid HC1.
-
- * Insignal
- - "insignal,arndale" - for Exynos5250-based Insignal Arndale board.
- - "insignal,arndale-octa" - for Exynos5420-based Insignal Arndale
- Octa board.
- - "insignal,origen" - for Exynos4210-based Insignal Origen board.
- - "insignal,origen4412" - for Exynos4412-based Insignal Origen board.
-
-
-Optional nodes:
- - firmware node, specifying presence and type of secure firmware:
- - compatible: only "samsung,secure-firmware" is currently supported
- - reg: address of non-secure SYSRAM used for communication with firmware
-
- firmware@203f000 {
- compatible = "samsung,secure-firmware";
- reg = <0x0203F000 0x1000>;
- };
diff --git a/Documentation/devicetree/bindings/arm/samsung/samsung-boards.yaml b/Documentation/devicetree/bindings/arm/samsung/samsung-boards.yaml
new file mode 100644
index 000000000000..63acd57c4799
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/samsung/samsung-boards.yaml
@@ -0,0 +1,181 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/arm/samsung/samsung-boards.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Samsung Exynos and S5P SoC based boards
+
+maintainers:
+ - Krzysztof Kozlowski <krzk@kernel.org>
+
+properties:
+ $nodename:
+ const: '/'
+ compatible:
+ oneOf:
+ - description: S5PV210 based boards
+ items:
+ - enum:
+ - aesop,torbreck # aESOP Torbreck based on S5PV210
+ - samsung,aquila # Samsung Aquila based on S5PC110
+ - samsung,goni # Samsung Goni based on S5PC110
+ - yic,smdkc110 # YIC System SMDKC110 based on S5PC110
+ - yic,smdkv210 # YIC System SMDKV210 based on S5PV210
+ - const: samsung,s5pv210
+
+ - description: S5PV210 based Aries boards
+ items:
+ - enum:
+ - samsung,fascinate4g # Samsung Galaxy S Fascinate 4G (SGH-T959P)
+ - samsung,galaxys # Samsung Galaxy S (i9000)
+ - const: samsung,aries
+ - const: samsung,s5pv210
+
+ - description: Exynos3250 based boards
+ items:
+ - enum:
+ - samsung,monk # Samsung Simband
+ - samsung,rinato # Samsung Gear2
+ - const: samsung,exynos3250
+ - const: samsung,exynos3
+
+ - description: Samsung ARTIK5 boards
+ items:
+ - enum:
+ - samsung,artik5-eval # Samsung ARTIK5 eval board
+ - const: samsung,artik5 # Samsung ARTIK5 module
+ - const: samsung,exynos3250
+ - const: samsung,exynos3
+
+ - description: Exynos4210 based boards
+ items:
+ - enum:
+ - insignal,origen # Insignal Origen
+ - samsung,smdkv310 # Samsung SMDKV310 eval
+ - samsung,trats # Samsung Tizen Reference
+ - samsung,universal_c210 # Samsung C210
+ - const: samsung,exynos4210
+ - const: samsung,exynos4
+
+ - description: Exynos4412 based boards
+ items:
+ - enum:
+ - friendlyarm,tiny4412 # FriendlyARM TINY4412
+ - hardkernel,odroid-u3 # Hardkernel Odroid U3
+ - hardkernel,odroid-x # Hardkernel Odroid X
+ - hardkernel,odroid-x2 # Hardkernel Odroid X2
+ - insignal,origen4412 # Insignal Origen
+ - samsung,smdk4412 # Samsung SMDK4412 eval
+ - topeet,itop4412-elite # TOPEET Elite base
+ - const: samsung,exynos4412
+ - const: samsung,exynos4
+
+ - description: Samsung Midas family boards
+ items:
+ - enum:
+ - samsung,i9300 # Samsung GT-I9300
+ - samsung,i9305 # Samsung GT-I9305
+ - samsung,n710x # Samsung GT-N7100/GT-N7105
+ - samsung,trats2 # Samsung Tizen Reference
+ - const: samsung,midas
+ - const: samsung,exynos4412
+ - const: samsung,exynos4
+
+ - description: Exynos5250 based boards
+ items:
+ - enum:
+ - google,snow-rev5 # Google Snow Rev 5+
+ - google,spring # Google Spring
+ - insignal,arndale # Insignal Arndale
+ - samsung,smdk5250 # Samsung SMDK5250 eval
+ - const: samsung,exynos5250
+ - const: samsung,exynos5
+
+ - description: Google Snow Boards (Rev 4+)
+ items:
+ - const: google,snow-rev4
+ - const: google,snow
+ - const: samsung,exynos5250
+ - const: samsung,exynos5
+
+ - description: Exynos5260 based boards
+ items:
+ - enum:
+ - samsung,xyref5260 # Samsung Xyref5260 eval
+ - const: samsung,exynos5260
+ - const: samsung,exynos5
+
+ - description: Exynos5410 based boards
+ items:
+ - enum:
+ - hardkernel,odroid-xu # Hardkernel Odroid XU
+ - samsung,smdk5410 # Samsung SMDK5410 eval
+ - const: samsung,exynos5410
+ - const: samsung,exynos5
+
+ - description: Exynos5420 based boards
+ items:
+ - enum:
+ - insignal,arndale-octa # Insignal Arndale Octa
+ - samsung,smdk5420 # Samsung SMDK5420 eval
+ - const: samsung,exynos5420
+ - const: samsung,exynos5
+
+ - description: Google Peach Pit Boards (Rev 6+)
+ items:
+ - const: google,pit-rev16
+ - const: google,pit-rev15
+ - const: google,pit-rev14
+ - const: google,pit-rev13
+ - const: google,pit-rev12
+ - const: google,pit-rev11
+ - const: google,pit-rev10
+ - const: google,pit-rev9
+ - const: google,pit-rev8
+ - const: google,pit-rev7
+ - const: google,pit-rev6
+ - const: google,pit
+ - const: google,peach
+ - const: samsung,exynos5420
+ - const: samsung,exynos5
+
+ - description: Exynos5800 based boards
+ items:
+ - enum:
+ - hardkernel,odroid-xu3 # Hardkernel Odroid XU3
+ - hardkernel,odroid-xu3-lite # Hardkernel Odroid XU3 Lite
+ - hardkernel,odroid-xu4 # Hardkernel Odroid XU4
+ - hardkernel,odroid-hc1 # Hardkernel Odroid HC1
+ - const: samsung,exynos5800
+ - const: samsung,exynos5
+
+ - description: Google Peach Pi Boards (Rev 10+)
+ items:
+ - const: google,pi-rev16
+ - const: google,pi-rev15
+ - const: google,pi-rev14
+ - const: google,pi-rev13
+ - const: google,pi-rev12
+ - const: google,pi-rev11
+ - const: google,pi-rev10
+ - const: google,pi
+ - const: google,peach
+ - const: samsung,exynos5800
+ - const: samsung,exynos5
+
+ - description: Exynos5433 based boards
+ items:
+ - enum:
+ - samsung,tm2 # Samsung TM2
+ - samsung,tm2e # Samsung TM2E
+ - const: samsung,exynos5433
+
+ - description: Exynos7 based boards
+ items:
+ - enum:
+ - samsung,exynos7-espresso # Samsung Exynos7 Espresso
+ - const: samsung,exynos7
+
+required:
+ - compatible
diff --git a/Documentation/devicetree/bindings/arm/samsung/samsung-secure-firmware.yaml b/Documentation/devicetree/bindings/arm/samsung/samsung-secure-firmware.yaml
new file mode 100644
index 000000000000..51d23b6f8a94
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/samsung/samsung-secure-firmware.yaml
@@ -0,0 +1,31 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/arm/samsung/samsung-secure-firmware.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Samsung Exynos Secure Firmware
+
+maintainers:
+ - Krzysztof Kozlowski <krzk@kernel.org>
+
+properties:
+ compatible:
+ items:
+ - const: samsung,secure-firmware
+
+ reg:
+ description:
+ Address of non-secure SYSRAM used for communication with firmware.
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+
+examples:
+ - |
+ firmware@203f000 {
+ compatible = "samsung,secure-firmware";
+ reg = <0x0203f000 0x1000>;
+ };
diff --git a/Documentation/devicetree/bindings/arm/samsung/sysreg.txt b/Documentation/devicetree/bindings/arm/samsung/sysreg.txt
deleted file mode 100644
index 4fced6e9d5e4..000000000000
--- a/Documentation/devicetree/bindings/arm/samsung/sysreg.txt
+++ /dev/null
@@ -1,19 +0,0 @@
-SAMSUNG S5P/Exynos SoC series System Registers (SYSREG)
-
-Properties:
- - compatible : should contain two values. First value must be one from following list:
- - "samsung,exynos4-sysreg" - for Exynos4 based SoCs,
- - "samsung,exynos5-sysreg" - for Exynos5 based SoCs.
- second value must be always "syscon".
- - reg : offset and length of the register set.
-
-Example:
- syscon@10010000 {
- compatible = "samsung,exynos4-sysreg", "syscon";
- reg = <0x10010000 0x400>;
- };
-
- syscon@10050000 {
- compatible = "samsung,exynos5-sysreg", "syscon";
- reg = <0x10050000 0x5000>;
- };
diff --git a/Documentation/devicetree/bindings/arm/samsung/sysreg.yaml b/Documentation/devicetree/bindings/arm/samsung/sysreg.yaml
new file mode 100644
index 000000000000..3b7811804cb4
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/samsung/sysreg.yaml
@@ -0,0 +1,45 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/arm/samsung/sysreg.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Samsung S5P/Exynos SoC series System Registers (SYSREG)
+
+maintainers:
+ - Krzysztof Kozlowski <krzk@kernel.org>
+
+# Custom select to avoid matching all nodes with 'syscon'
+select:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - samsung,exynos4-sysreg
+ - samsung,exynos5-sysreg
+ required:
+ - compatible
+
+properties:
+ compatible:
+ allOf:
+ - items:
+ - enum:
+ - samsung,exynos4-sysreg
+ - samsung,exynos5-sysreg
+ - const: syscon
+
+ reg:
+ maxItems: 1
+
+examples:
+ - |
+ syscon@10010000 {
+ compatible = "samsung,exynos4-sysreg", "syscon";
+ reg = <0x10010000 0x400>;
+ };
+
+ syscon@10050000 {
+ compatible = "samsung,exynos5-sysreg", "syscon";
+ reg = <0x10050000 0x5000>;
+ };
diff --git a/Documentation/devicetree/bindings/arm/sprd.txt b/Documentation/devicetree/bindings/arm/sprd.txt
deleted file mode 100644
index 3df034b13e28..000000000000
--- a/Documentation/devicetree/bindings/arm/sprd.txt
+++ /dev/null
@@ -1,14 +0,0 @@
-Spreadtrum SoC Platforms Device Tree Bindings
-----------------------------------------------------
-
-SC9836 openphone Board
-Required root node properties:
- - compatible = "sprd,sc9836-openphone", "sprd,sc9836";
-
-SC9860 SoC
-Required root node properties:
- - compatible = "sprd,sc9860"
-
-SP9860G 3GFHD Board
-Required root node properties:
- - compatible = "sprd,sp9860g-1h10", "sprd,sc9860";
diff --git a/Documentation/devicetree/bindings/arm/sprd.yaml b/Documentation/devicetree/bindings/arm/sprd.yaml
new file mode 100644
index 000000000000..c35fb845ccaa
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/sprd.yaml
@@ -0,0 +1,33 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+# Copyright 2019 Unisoc Inc.
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/arm/sprd.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Unisoc platforms device tree bindings
+
+maintainers:
+ - Orson Zhai <orsonzhai@gmail.com>
+ - Baolin Wang <baolin.wang7@gmail.com>
+ - Chunyan Zhang <zhang.lyra@gmail.com>
+
+properties:
+ $nodename:
+ const: '/'
+ compatible:
+ oneOf:
+ - items:
+ - enum:
+ - sprd,sc9836-openphone
+ - const: sprd,sc9836
+ - items:
+ - enum:
+ - sprd,sp9860g-1h10
+ - const: sprd,sc9860
+ - items:
+ - enum:
+ - sprd,sp9863a-1h10
+ - const: sprd,sc9863a
+
+...
diff --git a/Documentation/devicetree/bindings/arm/stm32/stm32.yaml b/Documentation/devicetree/bindings/arm/stm32/stm32.yaml
index 4d194f1eb03a..1fcf306bd2d1 100644
--- a/Documentation/devicetree/bindings/arm/stm32/stm32.yaml
+++ b/Documentation/devicetree/bindings/arm/stm32/stm32.yaml
@@ -13,19 +13,38 @@ properties:
compatible:
oneOf:
- items:
+ - enum:
+ - st,stm32f429i-disco
+ - st,stm32429i-eval
- const: st,stm32f429
-
- items:
+ - enum:
+ - st,stm32f469i-disco
- const: st,stm32f469
-
- items:
+ - enum:
+ - st,stm32f746-disco
+ - st,stm32746g-eval
- const: st,stm32f746
-
- items:
+ - enum:
+ - st,stm32f769-disco
+ - const: st,stm32f769
+ - items:
+ - enum:
+ - st,stm32h743i-disco
+ - st,stm32h743i-eval
- const: st,stm32h743
-
- items:
- enum:
- arrow,stm32mp157a-avenger96 # Avenger96
+ - st,stm32mp157c-ed1
+ - st,stm32mp157a-dk1
+ - st,stm32mp157c-dk2
+
+ - const: st,stm32mp157
+ - items:
+ - const: st,stm32mp157c-ev1
+ - const: st,stm32mp157c-ed1
- const: st,stm32mp157
...
diff --git a/Documentation/devicetree/bindings/arm/sunxi/smp-sram.txt b/Documentation/devicetree/bindings/arm/sunxi/smp-sram.txt
deleted file mode 100644
index 082e6a9382d3..000000000000
--- a/Documentation/devicetree/bindings/arm/sunxi/smp-sram.txt
+++ /dev/null
@@ -1,44 +0,0 @@
-Allwinner SRAM for smp bringup:
-------------------------------------------------
-
-Allwinner's A80 SoC uses part of the secure sram for hotplugging of the
-primary core (cpu0). Once the core gets powered up it checks if a magic
-value is set at a specific location. If it is then the BROM will jump
-to the software entry address, instead of executing a standard boot.
-
-Therefore a reserved section sub-node has to be added to the mmio-sram
-declaration.
-
-Note that this is separate from the Allwinner SRAM controller found in
-../../sram/sunxi-sram.txt. This SRAM is secure only and not mappable to
-any device.
-
-Also there are no "secure-only" properties. The implementation should
-check if this SRAM is usable first.
-
-Required sub-node properties:
-- compatible : depending on the SoC this should be one of:
- "allwinner,sun9i-a80-smp-sram"
-
-The rest of the properties should follow the generic mmio-sram discription
-found in ../../misc/sram.txt
-
-Example:
-
- sram_b: sram@20000 {
- /* 256 KiB secure SRAM at 0x20000 */
- compatible = "mmio-sram";
- reg = <0x00020000 0x40000>;
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0 0x00020000 0x40000>;
-
- smp-sram@1000 {
- /*
- * This is checked by BROM to determine if
- * cpu0 should jump to SMP entry vector
- */
- compatible = "allwinner,sun9i-a80-smp-sram";
- reg = <0x1000 0x8>;
- };
- };
diff --git a/Documentation/devicetree/bindings/ata/sata_rcar.txt b/Documentation/devicetree/bindings/ata/sata_rcar.txt
index 4268e17d2411..a2fbdc91570d 100644
--- a/Documentation/devicetree/bindings/ata/sata_rcar.txt
+++ b/Documentation/devicetree/bindings/ata/sata_rcar.txt
@@ -2,6 +2,7 @@
Required properties:
- compatible : should contain one or more of the following:
+ - "renesas,sata-r8a774b1" for RZ/G2N
- "renesas,sata-r8a7779" for R-Car H1
- "renesas,sata-r8a7790-es1" for R-Car H2 ES1
- "renesas,sata-r8a7790" for R-Car H2 other than ES1
@@ -9,8 +10,10 @@ Required properties:
- "renesas,sata-r8a7793" for R-Car M2-N
- "renesas,sata-r8a7795" for R-Car H3
- "renesas,sata-r8a77965" for R-Car M3-N
- - "renesas,rcar-gen2-sata" for a generic R-Car Gen2 compatible device
- - "renesas,rcar-gen3-sata" for a generic R-Car Gen3 compatible device
+ - "renesas,rcar-gen2-sata" for a generic R-Car Gen2
+ compatible device
+ - "renesas,rcar-gen3-sata" for a generic R-Car Gen3 or
+ RZ/G2 compatible device
- "renesas,rcar-sata" is deprecated
When compatible with the generic version nodes
diff --git a/Documentation/devicetree/bindings/board/fsl-board.txt b/Documentation/devicetree/bindings/board/fsl-board.txt
index eb52f6b35159..9cde57015921 100644
--- a/Documentation/devicetree/bindings/board/fsl-board.txt
+++ b/Documentation/devicetree/bindings/board/fsl-board.txt
@@ -47,36 +47,6 @@ Example (LS2080A-RDB):
reg = <0x3 0 0x10000>;
};
-* Freescale BCSR GPIO banks
-
-Some BCSR registers act as simple GPIO controllers, each such
-register can be represented by the gpio-controller node.
-
-Required properities:
-- compatible : Should be "fsl,<board>-bcsr-gpio".
-- reg : Should contain the address and the length of the GPIO bank
- register.
-- #gpio-cells : Should be two. The first cell is the pin number and the
- second cell is used to specify optional parameters (currently unused).
-- gpio-controller : Marks the port as GPIO controller.
-
-Example:
-
- bcsr@1,0 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "fsl,mpc8360mds-bcsr";
- reg = <1 0 0x8000>;
- ranges = <0 1 0 0x8000>;
-
- bcsr13: gpio-controller@d {
- #gpio-cells = <2>;
- compatible = "fsl,mpc8360mds-bcsr-gpio";
- reg = <0xd 1>;
- gpio-controller;
- };
- };
-
* Freescale on-board FPGA connected on I2C bus
Some Freescale boards like BSC9132QDS have on board FPGA connected on
diff --git a/Documentation/devicetree/bindings/bus/renesas,bsc.txt b/Documentation/devicetree/bindings/bus/renesas,bsc.txt
deleted file mode 100644
index 90e947269437..000000000000
--- a/Documentation/devicetree/bindings/bus/renesas,bsc.txt
+++ /dev/null
@@ -1,46 +0,0 @@
-Renesas Bus State Controller (BSC)
-==================================
-
-The Renesas Bus State Controller (BSC, sometimes called "LBSC within Bus
-Bridge", or "External Bus Interface") can be found in several Renesas ARM SoCs.
-It provides an external bus for connecting multiple external devices to the
-SoC, driving several chip select lines, for e.g. NOR FLASH, Ethernet and USB.
-
-While the BSC is a fairly simple memory-mapped bus, it may be part of a PM
-domain, and may have a gateable functional clock.
-Before a device connected to the BSC can be accessed, the PM domain
-containing the BSC must be powered on, and the functional clock
-driving the BSC must be enabled.
-
-The bindings for the BSC extend the bindings for "simple-pm-bus".
-
-
-Required properties
- - compatible: Must contain an SoC-specific value, and "renesas,bsc" and
- "simple-pm-bus" as fallbacks.
- SoC-specific values can be:
- "renesas,bsc-r8a73a4" for R-Mobile APE6 (r8a73a4)
- "renesas,bsc-sh73a0" for SH-Mobile AG5 (sh73a0)
- - #address-cells, #size-cells, ranges: Must describe the mapping between
- parent address and child address spaces.
- - reg: Must contain the base address and length to access the bus controller.
-
-Optional properties:
- - interrupts: Must contain a reference to the BSC interrupt, if available.
- - clocks: Must contain a reference to the functional clock, if available.
- - power-domains: Must contain a reference to the PM domain, if available.
-
-
-Example:
-
- bsc: bus@fec10000 {
- compatible = "renesas,bsc-sh73a0", "renesas,bsc",
- "simple-pm-bus";
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0 0 0x20000000>;
- reg = <0xfec10000 0x400>;
- interrupts = <0 39 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&zb_clk>;
- power-domains = <&pd_a4s>;
- };
diff --git a/Documentation/devicetree/bindings/bus/renesas,bsc.yaml b/Documentation/devicetree/bindings/bus/renesas,bsc.yaml
new file mode 100644
index 000000000000..7d10b62a52d5
--- /dev/null
+++ b/Documentation/devicetree/bindings/bus/renesas,bsc.yaml
@@ -0,0 +1,60 @@
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/bus/renesas,bsc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Renesas Bus State Controller (BSC)
+
+maintainers:
+ - Geert Uytterhoeven <geert+renesas@glider.be>
+
+description: |
+ The Renesas Bus State Controller (BSC, sometimes called "LBSC within Bus
+ Bridge", or "External Bus Interface") can be found in several Renesas ARM
+ SoCs. It provides an external bus for connecting multiple external
+ devices to the SoC, driving several chip select lines, for e.g. NOR
+ FLASH, Ethernet and USB.
+
+ While the BSC is a fairly simple memory-mapped bus, it may be part of a
+ PM domain, and may have a gateable functional clock. Before a device
+ connected to the BSC can be accessed, the PM domain containing the BSC
+ must be powered on, and the functional clock driving the BSC must be
+ enabled.
+
+ The bindings for the BSC extend the bindings for "simple-pm-bus".
+
+allOf:
+ - $ref: simple-pm-bus.yaml#
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - renesas,bsc-r8a73a4 # R-Mobile APE6 (r8a73a4)
+ - renesas,bsc-sh73a0 # SH-Mobile AG5 (sh73a0)
+ - const: renesas,bsc
+ - {} # simple-pm-bus, but not listed here to avoid false select
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+required:
+ - reg
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ bsc: bus@fec10000 {
+ compatible = "renesas,bsc-sh73a0", "renesas,bsc", "simple-pm-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0 0x20000000>;
+ reg = <0xfec10000 0x400>;
+ interrupts = <0 39 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&zb_clk>;
+ power-domains = <&pd_a4s>;
+ };
diff --git a/Documentation/devicetree/bindings/bus/simple-pm-bus.txt b/Documentation/devicetree/bindings/bus/simple-pm-bus.txt
deleted file mode 100644
index 6f15037131ed..000000000000
--- a/Documentation/devicetree/bindings/bus/simple-pm-bus.txt
+++ /dev/null
@@ -1,44 +0,0 @@
-Simple Power-Managed Bus
-========================
-
-A Simple Power-Managed Bus is a transparent bus that doesn't need a real
-driver, as it's typically initialized by the boot loader.
-
-However, its bus controller is part of a PM domain, or under the control of a
-functional clock. Hence, the bus controller's PM domain and/or clock must be
-enabled for child devices connected to the bus (either on-SoC or externally)
-to function.
-
-While "simple-pm-bus" follows the "simple-bus" set of properties, as specified
-in the Devicetree Specification, it is not an extension of "simple-bus".
-
-
-Required properties:
- - compatible: Must contain at least "simple-pm-bus".
- Must not contain "simple-bus".
- It's recommended to let this be preceded by one or more
- vendor-specific compatible values.
- - #address-cells, #size-cells, ranges: Must describe the mapping between
- parent address and child address spaces.
-
-Optional platform-specific properties for clock or PM domain control (at least
-one of them is required):
- - clocks: Must contain a reference to the functional clock(s),
- - power-domains: Must contain a reference to the PM domain.
-Please refer to the binding documentation for the clock and/or PM domain
-providers for more details.
-
-
-Example:
-
- bsc: bus@fec10000 {
- compatible = "renesas,bsc-sh73a0", "renesas,bsc",
- "simple-pm-bus";
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0 0 0x20000000>;
- reg = <0xfec10000 0x400>;
- interrupts = <0 39 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&zb_clk>;
- power-domains = <&pd_a4s>;
- };
diff --git a/Documentation/devicetree/bindings/bus/simple-pm-bus.yaml b/Documentation/devicetree/bindings/bus/simple-pm-bus.yaml
new file mode 100644
index 000000000000..33326ffdb266
--- /dev/null
+++ b/Documentation/devicetree/bindings/bus/simple-pm-bus.yaml
@@ -0,0 +1,75 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/bus/simple-pm-bus.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Simple Power-Managed Bus
+
+maintainers:
+ - Geert Uytterhoeven <geert+renesas@glider.be>
+
+description: |
+ A Simple Power-Managed Bus is a transparent bus that doesn't need a real
+ driver, as it's typically initialized by the boot loader.
+
+ However, its bus controller is part of a PM domain, or under the control
+ of a functional clock. Hence, the bus controller's PM domain and/or
+ clock must be enabled for child devices connected to the bus (either
+ on-SoC or externally) to function.
+
+ While "simple-pm-bus" follows the "simple-bus" set of properties, as
+ specified in the Devicetree Specification, it is not an extension of
+ "simple-bus".
+
+properties:
+ $nodename:
+ pattern: "^bus(@[0-9a-f]+)?$"
+
+ compatible:
+ contains:
+ const: simple-pm-bus
+ description:
+ Shall contain "simple-pm-bus" in addition to a optional bus-specific
+ compatible strings defined in individual pm-bus bindings.
+
+ '#address-cells':
+ enum: [ 1, 2 ]
+
+ '#size-cells':
+ enum: [ 1, 2 ]
+
+ ranges: true
+
+ clocks: true
+ # Functional clocks
+ # Required if power-domains is absent, optional otherwise
+
+ power-domains:
+ # Required if clocks is absent, optional otherwise
+ minItems: 1
+
+required:
+ - compatible
+ - '#address-cells'
+ - '#size-cells'
+ - ranges
+
+anyOf:
+ - required:
+ - clocks
+ - required:
+ - power-domains
+
+examples:
+ - |
+ #include <dt-bindings/clock/qcom,gcc-msm8996.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ bus {
+ power-domains = <&gcc AGGRE0_NOC_GDSC>;
+ compatible = "simple-pm-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ };
diff --git a/Documentation/devicetree/bindings/clock/amlogic,axg-audio-clkc.txt b/Documentation/devicetree/bindings/clock/amlogic,axg-audio-clkc.txt
index b3957d10d241..3a8948c04bc9 100644
--- a/Documentation/devicetree/bindings/clock/amlogic,axg-audio-clkc.txt
+++ b/Documentation/devicetree/bindings/clock/amlogic,axg-audio-clkc.txt
@@ -7,7 +7,8 @@ devices.
Required Properties:
- compatible : should be "amlogic,axg-audio-clkc" for the A113X and A113D,
- "amlogic,g12a-audio-clkc" for G12A.
+ "amlogic,g12a-audio-clkc" for G12A,
+ "amlogic,sm1-audio-clkc" for S905X3.
- reg : physical base address of the clock controller and length of
memory mapped region.
- clocks : a list of phandle + clock-specifier pairs for the clocks listed
diff --git a/Documentation/devicetree/bindings/clock/armada3700-periph-clock.txt b/Documentation/devicetree/bindings/clock/armada3700-periph-clock.txt
index 1e3370ba189f..fbf58c443c04 100644
--- a/Documentation/devicetree/bindings/clock/armada3700-periph-clock.txt
+++ b/Documentation/devicetree/bindings/clock/armada3700-periph-clock.txt
@@ -9,7 +9,7 @@ bridge.
The peripheral clock consumer should specify the desired clock by
having the clock ID in its "clocks" phandle cell.
-The following is a list of provided IDs for Armada 370 North bridge clocks:
+The following is a list of provided IDs for Armada 3700 North bridge clocks:
ID Clock name Description
-----------------------------------
0 mmc MMC controller
@@ -30,7 +30,7 @@ ID Clock name Description
15 eip97 EIP 97
16 cpu CPU
-The following is a list of provided IDs for Armada 370 South bridge clocks:
+The following is a list of provided IDs for Armada 3700 South bridge clocks:
ID Clock name Description
-----------------------------------
0 gbe-50 50 MHz parent clock for Gigabit Ethernet
@@ -46,6 +46,7 @@ ID Clock name Description
10 sdio SDIO
11 usb32-sub2-sys USB 2 clock
12 usb32-ss-sys USB 3 clock
+13 pcie PCIe controller
Required properties:
diff --git a/Documentation/devicetree/bindings/clock/bitmain,bm1880-clk.yaml b/Documentation/devicetree/bindings/clock/bitmain,bm1880-clk.yaml
new file mode 100644
index 000000000000..e63827399c1a
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/bitmain,bm1880-clk.yaml
@@ -0,0 +1,76 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/bindings/clock/bitmain,bm1880-clk.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Bitmain BM1880 Clock Controller
+
+maintainers:
+ - Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+
+description: |
+ The Bitmain BM1880 clock controller generates and supplies clock to
+ various peripherals within the SoC.
+
+ This binding uses common clock bindings
+ [1] Documentation/devicetree/bindings/clock/clock-bindings.txt
+
+properties:
+ compatible:
+ const: bitmain,bm1880-clk
+
+ reg:
+ items:
+ - description: pll registers
+ - description: system registers
+
+ reg-names:
+ items:
+ - const: pll
+ - const: sys
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ const: osc
+
+ '#clock-cells':
+ const: 1
+
+required:
+ - compatible
+ - reg
+ - reg-names
+ - clocks
+ - clock-names
+ - '#clock-cells'
+
+additionalProperties: false
+
+examples:
+ # Clock controller node:
+ - |
+ clk: clock-controller@e8 {
+ compatible = "bitmain,bm1880-clk";
+ reg = <0xe8 0x0c>, <0x800 0xb0>;
+ reg-names = "pll", "sys";
+ clocks = <&osc>;
+ clock-names = "osc";
+ #clock-cells = <1>;
+ };
+
+ # Example UART controller node that consumes clock generated by the clock controller:
+ - |
+ uart0: serial@58018000 {
+ compatible = "snps,dw-apb-uart";
+ reg = <0x0 0x58018000 0x0 0x2000>;
+ clocks = <&clk 45>, <&clk 46>;
+ clock-names = "baudclk", "apb_pclk";
+ interrupts = <0 9 4>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/clock/imx7ulp-clock.txt b/Documentation/devicetree/bindings/clock/imx7ulp-clock.txt
index a4f8cd478f92..93d89adb7afe 100644
--- a/Documentation/devicetree/bindings/clock/imx7ulp-clock.txt
+++ b/Documentation/devicetree/bindings/clock/imx7ulp-clock.txt
@@ -82,7 +82,6 @@ pcc2: pcc2@403f0000 {
<&scg1 IMX7ULP_CLK_APLL_PFD0>,
<&scg1 IMX7ULP_CLK_UPLL>,
<&scg1 IMX7ULP_CLK_SOSC_BUS_CLK>,
- <&scg1 IMX7ULP_CLK_MIPI_PLL>,
<&scg1 IMX7ULP_CLK_FIRC_BUS_CLK>,
<&scg1 IMX7ULP_CLK_ROSC>,
<&scg1 IMX7ULP_CLK_SPLL_BUS_CLK>;
diff --git a/Documentation/devicetree/bindings/clock/ingenic,cgu.txt b/Documentation/devicetree/bindings/clock/ingenic,cgu.txt
index ba5a442026b7..75598e655067 100644
--- a/Documentation/devicetree/bindings/clock/ingenic,cgu.txt
+++ b/Documentation/devicetree/bindings/clock/ingenic,cgu.txt
@@ -11,6 +11,7 @@ Required properties:
* ingenic,jz4725b-cgu
* ingenic,jz4770-cgu
* ingenic,jz4780-cgu
+ * ingenic,x1000-cgu
- reg : The address & length of the CGU registers.
- clocks : List of phandle & clock specifiers for clocks external to the CGU.
Two such external clocks should be specified - first the external crystal
diff --git a/Documentation/devicetree/bindings/clock/qcom,gcc.txt b/Documentation/devicetree/bindings/clock/qcom,gcc.txt
deleted file mode 100644
index d14362ad4132..000000000000
--- a/Documentation/devicetree/bindings/clock/qcom,gcc.txt
+++ /dev/null
@@ -1,94 +0,0 @@
-Qualcomm Global Clock & Reset Controller Binding
-------------------------------------------------
-
-Required properties :
-- compatible : shall contain only one of the following:
-
- "qcom,gcc-apq8064"
- "qcom,gcc-apq8084"
- "qcom,gcc-ipq8064"
- "qcom,gcc-ipq4019"
- "qcom,gcc-ipq8074"
- "qcom,gcc-msm8660"
- "qcom,gcc-msm8916"
- "qcom,gcc-msm8960"
- "qcom,gcc-msm8974"
- "qcom,gcc-msm8974pro"
- "qcom,gcc-msm8974pro-ac"
- "qcom,gcc-msm8994"
- "qcom,gcc-msm8996"
- "qcom,gcc-msm8998"
- "qcom,gcc-mdm9615"
- "qcom,gcc-qcs404"
- "qcom,gcc-sdm630"
- "qcom,gcc-sdm660"
- "qcom,gcc-sdm845"
- "qcom,gcc-sm8150"
-
-- reg : shall contain base register location and length
-- #clock-cells : shall contain 1
-- #reset-cells : shall contain 1
-
-Optional properties :
-- #power-domain-cells : shall contain 1
-- Qualcomm TSENS (thermal sensor device) on some devices can
-be part of GCC and hence the TSENS properties can also be
-part of the GCC/clock-controller node.
-For more details on the TSENS properties please refer
-Documentation/devicetree/bindings/thermal/qcom-tsens.txt
-- protected-clocks : Protected clock specifier list as per common clock
- binding.
-
-For SM8150 only:
- - clocks: a list of phandles and clock-specifier pairs,
- one for each entry in clock-names.
- - clock-names: "bi_tcxo" (required)
- "sleep_clk" (optional)
- "aud_ref_clock" (optional)
-
-Example:
- clock-controller@900000 {
- compatible = "qcom,gcc-msm8960";
- reg = <0x900000 0x4000>;
- #clock-cells = <1>;
- #reset-cells = <1>;
- #power-domain-cells = <1>;
- };
-
-Example of GCC with TSENS properties:
- clock-controller@900000 {
- compatible = "qcom,gcc-apq8064";
- reg = <0x00900000 0x4000>;
- nvmem-cells = <&tsens_calib>, <&tsens_backup>;
- nvmem-cell-names = "calib", "calib_backup";
- #clock-cells = <1>;
- #reset-cells = <1>;
- #thermal-sensor-cells = <1>;
- };
-
-Example of GCC with protected-clocks properties:
- clock-controller@100000 {
- compatible = "qcom,gcc-sdm845";
- reg = <0x100000 0x1f0000>;
- #clock-cells = <1>;
- #reset-cells = <1>;
- #power-domain-cells = <1>;
- protected-clocks = <GCC_QSPI_CORE_CLK>,
- <GCC_QSPI_CORE_CLK_SRC>,
- <GCC_QSPI_CNOC_PERIPH_AHB_CLK>,
- <GCC_LPASS_Q6_AXI_CLK>,
- <GCC_LPASS_SWAY_CLK>;
- };
-
-Example of GCC with clocks
- gcc: clock-controller@100000 {
- compatible = "qcom,gcc-sm8150";
- reg = <0x00100000 0x1f0000>;
- #clock-cells = <1>;
- #reset-cells = <1>;
- #power-domain-cells = <1>;
- clock-names = "bi_tcxo",
- "sleep_clk";
- clocks = <&rpmcc RPM_SMD_XO_CLK_SRC>,
- <&sleep_clk>;
- };
diff --git a/Documentation/devicetree/bindings/clock/qcom,gcc.yaml b/Documentation/devicetree/bindings/clock/qcom,gcc.yaml
new file mode 100644
index 000000000000..e73a56fb60ca
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/qcom,gcc.yaml
@@ -0,0 +1,188 @@
+# SPDX-License-Identifier: GPL-2.0-only
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/bindings/clock/qcom,gcc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm Global Clock & Reset Controller Binding
+
+maintainers:
+ - Stephen Boyd <sboyd@kernel.org>
+ - Taniya Das <tdas@codeaurora.org>
+
+description: |
+ Qualcomm global clock control module which supports the clocks, resets and
+ power domains.
+
+properties:
+ compatible :
+ enum:
+ - qcom,gcc-apq8064
+ - qcom,gcc-apq8084
+ - qcom,gcc-ipq8064
+ - qcom,gcc-ipq4019
+ - qcom,gcc-ipq8074
+ - qcom,gcc-msm8660
+ - qcom,gcc-msm8916
+ - qcom,gcc-msm8960
+ - qcom,gcc-msm8974
+ - qcom,gcc-msm8974pro
+ - qcom,gcc-msm8974pro-ac
+ - qcom,gcc-msm8994
+ - qcom,gcc-msm8996
+ - qcom,gcc-msm8998
+ - qcom,gcc-mdm9615
+ - qcom,gcc-qcs404
+ - qcom,gcc-sc7180
+ - qcom,gcc-sdm630
+ - qcom,gcc-sdm660
+ - qcom,gcc-sdm845
+ - qcom,gcc-sm8150
+
+ clocks:
+ minItems: 1
+ maxItems: 3
+ items:
+ - description: Board XO source
+ - description: Board active XO source
+ - description: Sleep clock source
+
+ clock-names:
+ minItems: 1
+ maxItems: 3
+ items:
+ - const: bi_tcxo
+ - const: bi_tcxo_ao
+ - const: sleep_clk
+
+ '#clock-cells':
+ const: 1
+
+ '#reset-cells':
+ const: 1
+
+ '#power-domain-cells':
+ const: 1
+
+ reg:
+ maxItems: 1
+
+ nvmem-cells:
+ minItems: 1
+ maxItems: 2
+ description:
+ Qualcomm TSENS (thermal sensor device) on some devices can
+ be part of GCC and hence the TSENS properties can also be part
+ of the GCC/clock-controller node.
+ For more details on the TSENS properties please refer
+ Documentation/devicetree/bindings/thermal/qcom-tsens.txt
+
+ nvmem-cell-names:
+ minItems: 1
+ maxItems: 2
+ description:
+ Names for each nvmem-cells specified.
+ items:
+ - const: calib
+ - const: calib_backup
+
+ 'thermal-sensor-cells':
+ const: 1
+
+ protected-clocks:
+ description:
+ Protected clock specifier list as per common clock binding
+
+required:
+ - compatible
+ - reg
+ - '#clock-cells'
+ - '#reset-cells'
+ - '#power-domain-cells'
+
+if:
+ properties:
+ compatible:
+ contains:
+ const: qcom,gcc-apq8064
+
+then:
+ required:
+ - nvmem-cells
+ - nvmem-cell-names
+ - '#thermal-sensor-cells'
+
+else:
+ if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,gcc-sm8150
+ - qcom,gcc-sc7180
+ then:
+ required:
+ - clocks
+ - clock-names
+
+
+examples:
+ # Example for GCC for MSM8960:
+ - |
+ clock-controller@900000 {
+ compatible = "qcom,gcc-msm8960";
+ reg = <0x900000 0x4000>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ #power-domain-cells = <1>;
+ };
+
+
+ # Example of GCC with TSENS properties:
+ - |
+ clock-controller@900000 {
+ compatible = "qcom,gcc-apq8064";
+ reg = <0x00900000 0x4000>;
+ nvmem-cells = <&tsens_calib>, <&tsens_backup>;
+ nvmem-cell-names = "calib", "calib_backup";
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ #power-domain-cells = <1>;
+ #thermal-sensor-cells = <1>;
+ };
+
+ # Example of GCC with protected-clocks properties:
+ - |
+ clock-controller@100000 {
+ compatible = "qcom,gcc-sdm845";
+ reg = <0x100000 0x1f0000>;
+ protected-clocks = <187>, <188>, <189>, <190>, <191>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ #power-domain-cells = <1>;
+ };
+
+ # Example of GCC with clock node properties for SM8150:
+ - |
+ clock-controller@100000 {
+ compatible = "qcom,gcc-sm8150";
+ reg = <0x00100000 0x1f0000>;
+ clocks = <&rpmhcc 0>, <&rpmhcc 1>, <&sleep_clk>;
+ clock-names = "bi_tcxo", "bi_tcxo_ao", "sleep_clk";
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ #power-domain-cells = <1>;
+ };
+
+ # Example of GCC with clock nodes properties for SC7180:
+ - |
+ clock-controller@100000 {
+ compatible = "qcom,gcc-sc7180";
+ reg = <0x100000 0x1f0000>;
+ clocks = <&rpmhcc 0>, <&rpmhcc 1>;
+ clock-names = "bi_tcxo", "bi_tcxo_ao";
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ #power-domain-cells = <1>;
+ };
+...
diff --git a/Documentation/devicetree/bindings/clock/qcom,q6sstopcc.yaml b/Documentation/devicetree/bindings/clock/qcom,q6sstopcc.yaml
new file mode 100644
index 000000000000..bbaaf1e2a203
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/qcom,q6sstopcc.yaml
@@ -0,0 +1,43 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/qcom,q6sstopcc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Q6SSTOP clock Controller
+
+maintainers:
+ - Govind Singh <govinds@codeaurora.org>
+
+properties:
+ compatible:
+ const: "qcom,qcs404-q6sstopcc"
+
+ reg:
+ items:
+ - description: Q6SSTOP clocks register region
+ - description: Q6SSTOP_TCSR register region
+
+ clocks:
+ items:
+ - description: ahb clock for the q6sstopCC
+
+ '#clock-cells':
+ const: 1
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - '#clock-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ q6sstopcc: clock-controller@7500000 {
+ compatible = "qcom,qcs404-q6sstopcc";
+ reg = <0x07500000 0x4e000>, <0x07550000 0x10000>;
+ clocks = <&gcc 141>;
+ #clock-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/clock/qcom,rpmh-clk.txt b/Documentation/devicetree/bindings/clock/qcom,rpmh-clk.txt
deleted file mode 100644
index 365bbde599b1..000000000000
--- a/Documentation/devicetree/bindings/clock/qcom,rpmh-clk.txt
+++ /dev/null
@@ -1,27 +0,0 @@
-Qualcomm Technologies, Inc. RPMh Clocks
--------------------------------------------------------
-
-Resource Power Manager Hardened (RPMh) manages shared resources on
-some Qualcomm Technologies Inc. SoCs. It accepts clock requests from
-other hardware subsystems via RSC to control clocks.
-
-Required properties :
-- compatible : must be one of:
- "qcom,sdm845-rpmh-clk"
- "qcom,sm8150-rpmh-clk"
-
-- #clock-cells : must contain 1
-- clocks: a list of phandles and clock-specifier pairs,
- one for each entry in clock-names.
-- clock-names: Parent board clock: "xo".
-
-Example :
-
-#include <dt-bindings/clock/qcom,rpmh.h>
-
- &apps_rsc {
- rpmhcc: clock-controller {
- compatible = "qcom,sdm845-rpmh-clk";
- #clock-cells = <1>;
- };
- };
diff --git a/Documentation/devicetree/bindings/clock/qcom,rpmhcc.yaml b/Documentation/devicetree/bindings/clock/qcom,rpmhcc.yaml
new file mode 100644
index 000000000000..94e2f14eb967
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/qcom,rpmhcc.yaml
@@ -0,0 +1,49 @@
+# SPDX-License-Identifier: GPL-2.0-only
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/bindings/clock/qcom,rpmhcc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm Technologies, Inc. RPMh Clocks Bindings
+
+maintainers:
+ - Taniya Das <tdas@codeaurora.org>
+
+description: |
+ Resource Power Manager Hardened (RPMh) manages shared resources on
+ some Qualcomm Technologies Inc. SoCs. It accepts clock requests from
+ other hardware subsystems via RSC to control clocks.
+
+properties:
+ compatible:
+ enum:
+ - qcom,sc7180-rpmh-clk
+ - qcom,sdm845-rpmh-clk
+ - qcom,sm8150-rpmh-clk
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ items:
+ - const: xo
+
+ '#clock-cells':
+ const: 1
+
+required:
+ - compatible
+ - '#clock-cells'
+
+examples:
+ # Example for GCC for SDM845: The below node should be defined inside
+ # &apps_rsc node.
+ - |
+ #include <dt-bindings/clock/qcom,rpmh.h>
+ rpmhcc: clock-controller {
+ compatible = "qcom,sdm845-rpmh-clk";
+ clocks = <&xo_board>;
+ clock-names = "xo";
+ #clock-cells = <1>;
+ };
+...
diff --git a/Documentation/devicetree/bindings/clock/renesas,cpg-mssr.txt b/Documentation/devicetree/bindings/clock/renesas,cpg-mssr.txt
index 916a601b76a7..c7674d0267a3 100644
--- a/Documentation/devicetree/bindings/clock/renesas,cpg-mssr.txt
+++ b/Documentation/devicetree/bindings/clock/renesas,cpg-mssr.txt
@@ -19,6 +19,7 @@ Required Properties:
- "renesas,r8a7745-cpg-mssr" for the r8a7745 SoC (RZ/G1E)
- "renesas,r8a77470-cpg-mssr" for the r8a77470 SoC (RZ/G1C)
- "renesas,r8a774a1-cpg-mssr" for the r8a774a1 SoC (RZ/G2M)
+ - "renesas,r8a774b1-cpg-mssr" for the r8a774a1 SoC (RZ/G2N)
- "renesas,r8a774c0-cpg-mssr" for the r8a774c0 SoC (RZ/G2E)
- "renesas,r8a7790-cpg-mssr" for the r8a7790 SoC (R-Car H2)
- "renesas,r8a7791-cpg-mssr" for the r8a7791 SoC (R-Car M2-W)
@@ -26,7 +27,8 @@ Required Properties:
- "renesas,r8a7793-cpg-mssr" for the r8a7793 SoC (R-Car M2-N)
- "renesas,r8a7794-cpg-mssr" for the r8a7794 SoC (R-Car E2)
- "renesas,r8a7795-cpg-mssr" for the r8a7795 SoC (R-Car H3)
- - "renesas,r8a7796-cpg-mssr" for the r8a7796 SoC (R-Car M3-W)
+ - "renesas,r8a7796-cpg-mssr" for the r8a77960 SoC (R-Car M3-W)
+ - "renesas,r8a77961-cpg-mssr" for the r8a77961 SoC (R-Car M3-W+)
- "renesas,r8a77965-cpg-mssr" for the r8a77965 SoC (R-Car M3-N)
- "renesas,r8a77970-cpg-mssr" for the r8a77970 SoC (R-Car V3M)
- "renesas,r8a77980-cpg-mssr" for the r8a77980 SoC (R-Car V3H)
@@ -40,10 +42,11 @@ Required Properties:
clock-names
- clock-names: List of external parent clock names. Valid names are:
- "extal" (r7s9210, r8a7743, r8a7744, r8a7745, r8a77470, r8a774a1,
- r8a774c0, r8a7790, r8a7791, r8a7792, r8a7793, r8a7794,
- r8a7795, r8a7796, r8a77965, r8a77970, r8a77980, r8a77990,
- r8a77995)
- - "extalr" (r8a774a1, r8a7795, r8a7796, r8a77965, r8a77970, r8a77980)
+ r8a774b1, r8a774c0, r8a7790, r8a7791, r8a7792, r8a7793,
+ r8a7794, r8a7795, r8a77960, r8a77961, r8a77965, r8a77970,
+ r8a77980, r8a77990, r8a77995)
+ - "extalr" (r8a774a1, r8a774b1, r8a7795, r8a77960, r8a77961, r8a77965,
+ r8a77970, r8a77980)
- "usb_extal" (r8a7743, r8a7744, r8a7745, r8a77470, r8a7790, r8a7791,
r8a7793, r8a7794)
@@ -59,7 +62,7 @@ Required Properties:
power-managed through Module Standby should refer to the CPG device
node in their "power-domains" property, as documented by the generic PM
Domain bindings in
- Documentation/devicetree/bindings/power/power_domain.txt.
+ Documentation/devicetree/bindings/power/power-domain.yaml.
- #reset-cells: Must be 1
- The single reset specifier cell must be the module number, as defined
diff --git a/Documentation/devicetree/bindings/clock/renesas,rcar-gen2-cpg-clocks.txt b/Documentation/devicetree/bindings/clock/renesas,rcar-gen2-cpg-clocks.txt
deleted file mode 100644
index f8c05bb4116e..000000000000
--- a/Documentation/devicetree/bindings/clock/renesas,rcar-gen2-cpg-clocks.txt
+++ /dev/null
@@ -1,60 +0,0 @@
-* Renesas R-Car Gen2 Clock Pulse Generator (CPG)
-
-The CPG generates core clocks for the R-Car Gen2 SoCs. It includes three PLLs
-and several fixed ratio dividers.
-The CPG also provides a Clock Domain for SoC devices, in combination with the
-CPG Module Stop (MSTP) Clocks.
-
-Required Properties:
-
- - compatible: Must be one of
- - "renesas,r8a7790-cpg-clocks" for the r8a7790 CPG
- - "renesas,r8a7791-cpg-clocks" for the r8a7791 CPG
- - "renesas,r8a7792-cpg-clocks" for the r8a7792 CPG
- - "renesas,r8a7793-cpg-clocks" for the r8a7793 CPG
- - "renesas,r8a7794-cpg-clocks" for the r8a7794 CPG
- and "renesas,rcar-gen2-cpg-clocks" as a fallback.
-
- - reg: Base address and length of the memory resource used by the CPG
-
- - clocks: References to the parent clocks: first to the EXTAL clock, second
- to the USB_EXTAL clock
- - #clock-cells: Must be 1
- - clock-output-names: The names of the clocks. Supported clocks are "main",
- "pll0", "pll1", "pll3", "lb", "qspi", "sdh", "sd0", "sd1", "z", "rcan", and
- "adsp"
- - #power-domain-cells: Must be 0
-
-SoC devices that are part of the CPG/MSTP Clock Domain and can be power-managed
-through an MSTP clock should refer to the CPG device node in their
-"power-domains" property, as documented by the generic PM domain bindings in
-Documentation/devicetree/bindings/power/power_domain.txt.
-
-
-Examples
---------
-
- - CPG device node:
-
- cpg_clocks: cpg_clocks@e6150000 {
- compatible = "renesas,r8a7790-cpg-clocks",
- "renesas,rcar-gen2-cpg-clocks";
- reg = <0 0xe6150000 0 0x1000>;
- clocks = <&extal_clk &usb_extal_clk>;
- #clock-cells = <1>;
- clock-output-names = "main", "pll0, "pll1", "pll3",
- "lb", "qspi", "sdh", "sd0", "sd1", "z",
- "rcan", "adsp";
- #power-domain-cells = <0>;
- };
-
-
- - CPG/MSTP Clock Domain member device node:
-
- thermal@e61f0000 {
- compatible = "renesas,thermal-r8a7790", "renesas,rcar-thermal";
- reg = <0 0xe61f0000 0 0x14>, <0 0xe61f0100 0 0x38>;
- interrupts = <0 69 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&mstp5_clks R8A7790_CLK_THERMAL>;
- power-domains = <&cpg_clocks>;
- };
diff --git a/Documentation/devicetree/bindings/clock/renesas,rcar-usb2-clock-sel.txt b/Documentation/devicetree/bindings/clock/renesas,rcar-usb2-clock-sel.txt
index e96e085271c1..83f6c6a7c41c 100644
--- a/Documentation/devicetree/bindings/clock/renesas,rcar-usb2-clock-sel.txt
+++ b/Documentation/devicetree/bindings/clock/renesas,rcar-usb2-clock-sel.txt
@@ -46,7 +46,7 @@ Required properties:
Example (R-Car H3):
usb2_clksel: clock-controller@e6590630 {
- compatible = "renesas,r8a77950-rcar-usb2-clock-sel",
+ compatible = "renesas,r8a7795-rcar-usb2-clock-sel",
"renesas,rcar-gen3-usb2-clock-sel";
reg = <0 0xe6590630 0 0x02>;
clocks = <&cpg CPG_MOD 703>, <&usb_extal>, <&usb_xtal>;
diff --git a/Documentation/devicetree/bindings/clock/ti/davinci/psc.txt b/Documentation/devicetree/bindings/clock/ti/davinci/psc.txt
index dae4ad8e198c..5f746ebf7a2c 100644
--- a/Documentation/devicetree/bindings/clock/ti/davinci/psc.txt
+++ b/Documentation/devicetree/bindings/clock/ti/davinci/psc.txt
@@ -67,5 +67,5 @@ Examples:
Also see:
- Documentation/devicetree/bindings/clock/clock-bindings.txt
-- Documentation/devicetree/bindings/power/power_domain.txt
+- Documentation/devicetree/bindings/power/power-domain.yaml
- Documentation/devicetree/bindings/reset/reset.txt
diff --git a/Documentation/devicetree/bindings/counter/stm32-lptimer-cnt.txt b/Documentation/devicetree/bindings/counter/stm32-lptimer-cnt.txt
deleted file mode 100644
index e90bc47f752a..000000000000
--- a/Documentation/devicetree/bindings/counter/stm32-lptimer-cnt.txt
+++ /dev/null
@@ -1,29 +0,0 @@
-STMicroelectronics STM32 Low-Power Timer quadrature encoder and counter
-
-STM32 Low-Power Timer provides several counter modes. It can be used as:
-- quadrature encoder to detect angular position and direction of rotary
- elements, from IN1 and IN2 input signals.
-- simple counter from IN1 input signal.
-
-Must be a sub-node of an STM32 Low-Power Timer device tree node.
-See ../mfd/stm32-lptimer.txt for details about the parent node.
-
-Required properties:
-- compatible: Must be "st,stm32-lptimer-counter".
-- pinctrl-names: Set to "default". An additional "sleep" state can be
- defined to set pins in sleep state.
-- pinctrl-n: List of phandles pointing to pin configuration nodes,
- to set IN1/IN2 pins in mode of operation for Low-Power
- Timer input on external pin.
-
-Example:
- timer@40002400 {
- compatible = "st,stm32-lptimer";
- ...
- counter {
- compatible = "st,stm32-lptimer-counter";
- pinctrl-names = "default", "sleep";
- pinctrl-0 = <&lptim1_in_pins>;
- pinctrl-1 = <&lptim1_sleep_in_pins>;
- };
- };
diff --git a/Documentation/devicetree/bindings/counter/stm32-timer-cnt.txt b/Documentation/devicetree/bindings/counter/stm32-timer-cnt.txt
deleted file mode 100644
index c52fcdd4bf6c..000000000000
--- a/Documentation/devicetree/bindings/counter/stm32-timer-cnt.txt
+++ /dev/null
@@ -1,31 +0,0 @@
-STMicroelectronics STM32 Timer quadrature encoder
-
-STM32 Timer provides quadrature encoder to detect
-angular position and direction of rotary elements,
-from IN1 and IN2 input signals.
-
-Must be a sub-node of an STM32 Timer device tree node.
-See ../mfd/stm32-timers.txt for details about the parent node.
-
-Required properties:
-- compatible: Must be "st,stm32-timer-counter".
-- pinctrl-names: Set to "default".
-- pinctrl-0: List of phandles pointing to pin configuration nodes,
- to set CH1/CH2 pins in mode of operation for STM32
- Timer input on external pin.
-
-Example:
- timers@40010000 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "st,stm32-timers";
- reg = <0x40010000 0x400>;
- clocks = <&rcc 0 160>;
- clock-names = "int";
-
- counter {
- compatible = "st,stm32-timer-counter";
- pinctrl-names = "default";
- pinctrl-0 = <&tim1_in_pins>;
- };
- };
diff --git a/Documentation/devicetree/bindings/counter/ti-eqep.yaml b/Documentation/devicetree/bindings/counter/ti-eqep.yaml
new file mode 100644
index 000000000000..85f1ff83afe7
--- /dev/null
+++ b/Documentation/devicetree/bindings/counter/ti-eqep.yaml
@@ -0,0 +1,50 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/counter/ti-eqep.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Texas Instruments Enhanced Quadrature Encoder Pulse (eQEP) Module
+
+maintainers:
+ - David Lechner <david@lechnology.com>
+
+properties:
+ compatible:
+ const: ti,am3352-eqep
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ description: The eQEP event interrupt
+ maxItems: 1
+
+ clocks:
+ description: The clock that determines the SYSCLKOUT rate for the eQEP
+ peripheral.
+ maxItems: 1
+
+ clock-names:
+ const: sysclkout
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - clock-names
+
+additionalProperties: false
+
+examples:
+ - |
+ eqep0: counter@180 {
+ compatible = "ti,am3352-eqep";
+ reg = <0x180 0x80>;
+ clocks = <&l4ls_gclk>;
+ clock-names = "sysclkout";
+ interrupts = <79>;
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/cpu/cpu-topology.txt b/Documentation/devicetree/bindings/cpu/cpu-topology.txt
index 99918189403c..9bd530a35d14 100644
--- a/Documentation/devicetree/bindings/cpu/cpu-topology.txt
+++ b/Documentation/devicetree/bindings/cpu/cpu-topology.txt
@@ -549,5 +549,5 @@ Example 3: HiFive Unleashed (RISC-V 64 bit, 4 core system)
[2] Devicetree NUMA binding description
Documentation/devicetree/bindings/numa.txt
[3] RISC-V Linux kernel documentation
- Documentation/devicetree/bindings/riscv/cpus.txt
+ Documentation/devicetree/bindings/riscv/cpus.yaml
[4] https://www.devicetree.org/specifications/
diff --git a/Documentation/devicetree/bindings/cpufreq/ti-cpufreq.txt b/Documentation/devicetree/bindings/cpufreq/ti-cpufreq.txt
index 0c38e4b8fc51..1758051798fe 100644
--- a/Documentation/devicetree/bindings/cpufreq/ti-cpufreq.txt
+++ b/Documentation/devicetree/bindings/cpufreq/ti-cpufreq.txt
@@ -15,12 +15,16 @@ In 'cpus' nodes:
In 'operating-points-v2' table:
- compatible: Should be
- - 'operating-points-v2-ti-cpu' for am335x, am43xx, and dra7xx/am57xx SoCs
+ - 'operating-points-v2-ti-cpu' for am335x, am43xx, and dra7xx/am57xx,
+ omap34xx, omap36xx and am3517 SoCs
- syscon: A phandle pointing to a syscon node representing the control module
register space of the SoC.
Optional properties:
--------------------
+- "vdd-supply", "vbb-supply": to define two regulators for dra7xx
+- "cpu0-supply", "vbb-supply": to define two regulators for omap36xx
+
For each opp entry in 'operating-points-v2' table:
- opp-supported-hw: Two bitfields indicating:
1. Which revision of the SoC the OPP is supported by
diff --git a/Documentation/devicetree/bindings/crypto/samsung-slimsss.txt b/Documentation/devicetree/bindings/crypto/samsung-slimsss.txt
deleted file mode 100644
index 7ec9a5a7727a..000000000000
--- a/Documentation/devicetree/bindings/crypto/samsung-slimsss.txt
+++ /dev/null
@@ -1,19 +0,0 @@
-Samsung SoC SlimSSS (Slim Security SubSystem) module
-
-The SlimSSS module in Exynos5433 SoC supports the following:
--- Feeder (FeedCtrl)
--- Advanced Encryption Standard (AES) with ECB,CBC,CTR,XTS and (CBC/XTS)/CTS
--- SHA-1/SHA-256 and (SHA-1/SHA-256)/HMAC
-
-Required properties:
-
-- compatible : Should contain entry for slimSSS version:
- - "samsung,exynos5433-slim-sss" for Exynos5433 SoC.
-- reg : Offset and length of the register set for the module
-- interrupts : interrupt specifiers of SlimSSS module interrupts (one feed
- control interrupt).
-
-- clocks : list of clock phandle and specifier pairs for all clocks listed in
- clock-names property.
-- clock-names : list of device clock input names; should contain "pclk" and
- "aclk" for slim-sss in Exynos5433.
diff --git a/Documentation/devicetree/bindings/crypto/samsung-slimsss.yaml b/Documentation/devicetree/bindings/crypto/samsung-slimsss.yaml
new file mode 100644
index 000000000000..04fe5dfa794a
--- /dev/null
+++ b/Documentation/devicetree/bindings/crypto/samsung-slimsss.yaml
@@ -0,0 +1,47 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/crypto/samsung-slimsss.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Samsung Exynos SoC SlimSSS (Slim Security SubSystem) module
+
+maintainers:
+ - Krzysztof Kozlowski <krzk@kernel.org>
+ - Kamil Konieczny <k.konieczny@partner.samsung.com>
+
+description: |+
+ The SlimSSS module in Exynos5433 SoC supports the following:
+ -- Feeder (FeedCtrl)
+ -- Advanced Encryption Standard (AES) with ECB,CBC,CTR,XTS and (CBC/XTS)/CTS
+ -- SHA-1/SHA-256 and (SHA-1/SHA-256)/HMAC
+
+properties:
+ compatible:
+ items:
+ - const: samsung,exynos5433-slim-ss
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ minItems: 2
+ maxItems: 2
+
+ clock-names:
+ items:
+ - const: pclk
+ - const: aclk
+
+ interrupts:
+ description: One feed control interrupt.
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - clock-names
+ - clocks
+ - interrupts
+
+additionalProperties: false
diff --git a/Documentation/devicetree/bindings/crypto/samsung-sss.txt b/Documentation/devicetree/bindings/crypto/samsung-sss.txt
deleted file mode 100644
index 7a5ca56683cc..000000000000
--- a/Documentation/devicetree/bindings/crypto/samsung-sss.txt
+++ /dev/null
@@ -1,32 +0,0 @@
-Samsung SoC SSS (Security SubSystem) module
-
-The SSS module in S5PV210 SoC supports the following:
--- Feeder (FeedCtrl)
--- Advanced Encryption Standard (AES)
--- Data Encryption Standard (DES)/3DES
--- Public Key Accelerator (PKA)
--- SHA-1/SHA-256/MD5/HMAC (SHA-1/SHA-256/MD5)/PRNG
--- PRNG: Pseudo Random Number Generator
-
-The SSS module in Exynos4 (Exynos4210) and
-Exynos5 (Exynos5420 and Exynos5250) SoCs
-supports the following also:
--- ARCFOUR (ARC4)
--- True Random Number Generator (TRNG)
--- Secure Key Manager
-
-Required properties:
-
-- compatible : Should contain entries for this and backward compatible
- SSS versions:
- - "samsung,s5pv210-secss" for S5PV210 SoC.
- - "samsung,exynos4210-secss" for Exynos4210, Exynos4212, Exynos4412, Exynos5250,
- Exynos5260 and Exynos5420 SoCs.
-- reg : Offset and length of the register set for the module
-- interrupts : interrupt specifiers of SSS module interrupts (one feed
- control interrupt).
-
-- clocks : list of clock phandle and specifier pairs for all clocks listed in
- clock-names property.
-- clock-names : list of device clock input names; should contain one entry
- "secss".
diff --git a/Documentation/devicetree/bindings/crypto/samsung-sss.yaml b/Documentation/devicetree/bindings/crypto/samsung-sss.yaml
new file mode 100644
index 000000000000..cf1c47a81d7f
--- /dev/null
+++ b/Documentation/devicetree/bindings/crypto/samsung-sss.yaml
@@ -0,0 +1,58 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/crypto/samsung-sss.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Samsung Exynos SoC SSS (Security SubSystem) module
+
+maintainers:
+ - Krzysztof Kozlowski <krzk@kernel.org>
+ - Kamil Konieczny <k.konieczny@partner.samsung.com>
+
+description: |+
+ The SSS module in S5PV210 SoC supports the following:
+ -- Feeder (FeedCtrl)
+ -- Advanced Encryption Standard (AES)
+ -- Data Encryption Standard (DES)/3DES
+ -- Public Key Accelerator (PKA)
+ -- SHA-1/SHA-256/MD5/HMAC (SHA-1/SHA-256/MD5)/PRNG
+ -- PRNG: Pseudo Random Number Generator
+
+ The SSS module in Exynos4 (Exynos4210) and Exynos5 (Exynos5420 and Exynos5250)
+ SoCs supports the following also:
+ -- ARCFOUR (ARC4)
+ -- True Random Number Generator (TRNG)
+ -- Secure Key Manager
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - samsung,s5pv210-secss # for S5PV210
+ - samsung,exynos4210-secss # for Exynos4210, Exynos4212,
+ # Exynos4412, Exynos5250,
+ # Exynos5260 and Exynos5420
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ items:
+ - const: secss
+
+ interrupts:
+ description: One feed control interrupt.
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - clock-names
+ - clocks
+ - interrupts
+
+additionalProperties: false
diff --git a/Documentation/devicetree/bindings/crypto/st,stm32-crc.txt b/Documentation/devicetree/bindings/crypto/st,stm32-crc.txt
deleted file mode 100644
index 3ba92a5e9b36..000000000000
--- a/Documentation/devicetree/bindings/crypto/st,stm32-crc.txt
+++ /dev/null
@@ -1,16 +0,0 @@
-* STMicroelectronics STM32 CRC
-
-Required properties:
-- compatible: Should be "st,stm32f7-crc".
-- reg: The address and length of the peripheral registers space
-- clocks: The input clock of the CRC instance
-
-Optional properties: none
-
-Example:
-
-crc: crc@40023000 {
- compatible = "st,stm32f7-crc";
- reg = <0x40023000 0x400>;
- clocks = <&rcc 0 12>;
-};
diff --git a/Documentation/devicetree/bindings/crypto/st,stm32-crc.yaml b/Documentation/devicetree/bindings/crypto/st,stm32-crc.yaml
new file mode 100644
index 000000000000..cee624c14f07
--- /dev/null
+++ b/Documentation/devicetree/bindings/crypto/st,stm32-crc.yaml
@@ -0,0 +1,38 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/crypto/st,stm32-crc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: STMicroelectronics STM32 CRC bindings
+
+maintainers:
+ - Lionel Debieve <lionel.debieve@st.com>
+
+properties:
+ compatible:
+ const: st,stm32f7-crc
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - clocks
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/stm32mp1-clks.h>
+ crc@40023000 {
+ compatible = "st,stm32f7-crc";
+ reg = <0x40023000 0x400>;
+ clocks = <&rcc 0 12>;
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/crypto/st,stm32-cryp.txt b/Documentation/devicetree/bindings/crypto/st,stm32-cryp.txt
deleted file mode 100644
index 970487fa40b8..000000000000
--- a/Documentation/devicetree/bindings/crypto/st,stm32-cryp.txt
+++ /dev/null
@@ -1,19 +0,0 @@
-* STMicroelectronics STM32 CRYP
-
-Required properties:
-- compatible: Should be "st,stm32f756-cryp".
-- reg: The address and length of the peripheral registers space
-- clocks: The input clock of the CRYP instance
-- interrupts: The CRYP interrupt
-
-Optional properties:
-- resets: The input reset of the CRYP instance
-
-Example:
-crypto@50060000 {
- compatible = "st,stm32f756-cryp";
- reg = <0x50060000 0x400>;
- interrupts = <79>;
- clocks = <&rcc 0 STM32F7_AHB2_CLOCK(CRYP)>;
- resets = <&rcc STM32F7_AHB2_RESET(CRYP)>;
-};
diff --git a/Documentation/devicetree/bindings/crypto/st,stm32-cryp.yaml b/Documentation/devicetree/bindings/crypto/st,stm32-cryp.yaml
new file mode 100644
index 000000000000..a4574552502a
--- /dev/null
+++ b/Documentation/devicetree/bindings/crypto/st,stm32-cryp.yaml
@@ -0,0 +1,51 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/crypto/st,stm32-cryp.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: STMicroelectronics STM32 CRYP bindings
+
+maintainers:
+ - Lionel Debieve <lionel.debieve@st.com>
+
+properties:
+ compatible:
+ enum:
+ - st,stm32f756-cryp
+ - st,stm32mp1-cryp
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ resets:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - interrupts
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/clock/stm32mp1-clks.h>
+ #include <dt-bindings/reset/stm32mp1-resets.h>
+ cryp@54001000 {
+ compatible = "st,stm32mp1-cryp";
+ reg = <0x54001000 0x400>;
+ interrupts = <GIC_SPI 79 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc CRYP1>;
+ resets = <&rcc CRYP1_R>;
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/crypto/st,stm32-hash.txt b/Documentation/devicetree/bindings/crypto/st,stm32-hash.txt
deleted file mode 100644
index 04fc246f02f7..000000000000
--- a/Documentation/devicetree/bindings/crypto/st,stm32-hash.txt
+++ /dev/null
@@ -1,30 +0,0 @@
-* STMicroelectronics STM32 HASH
-
-Required properties:
-- compatible: Should contain entries for this and backward compatible
- HASH versions:
- - "st,stm32f456-hash" for stm32 F456.
- - "st,stm32f756-hash" for stm32 F756.
-- reg: The address and length of the peripheral registers space
-- interrupts: the interrupt specifier for the HASH
-- clocks: The input clock of the HASH instance
-
-Optional properties:
-- resets: The input reset of the HASH instance
-- dmas: DMA specifiers for the HASH. See the DMA client binding,
- Documentation/devicetree/bindings/dma/dma.txt
-- dma-names: DMA request name. Should be "in" if a dma is present.
-- dma-maxburst: Set number of maximum dma burst supported
-
-Example:
-
-hash1: hash@50060400 {
- compatible = "st,stm32f756-hash";
- reg = <0x50060400 0x400>;
- interrupts = <80>;
- clocks = <&rcc 0 STM32F7_AHB2_CLOCK(HASH)>;
- resets = <&rcc STM32F7_AHB2_RESET(HASH)>;
- dmas = <&dma2 7 2 0x400 0x0>;
- dma-names = "in";
- dma-maxburst = <0>;
-};
diff --git a/Documentation/devicetree/bindings/crypto/st,stm32-hash.yaml b/Documentation/devicetree/bindings/crypto/st,stm32-hash.yaml
new file mode 100644
index 000000000000..57ae1c0b6d18
--- /dev/null
+++ b/Documentation/devicetree/bindings/crypto/st,stm32-hash.yaml
@@ -0,0 +1,69 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/crypto/st,stm32-hash.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: STMicroelectronics STM32 HASH bindings
+
+maintainers:
+ - Lionel Debieve <lionel.debieve@st.com>
+
+properties:
+ compatible:
+ enum:
+ - st,stm32f456-hash
+ - st,stm32f756-hash
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ resets:
+ maxItems: 1
+
+ dmas:
+ maxItems: 1
+
+ dma-names:
+ items:
+ - const: in
+
+ dma-maxburst:
+ description: Set number of maximum dma burst supported
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ - minimum: 0
+ - maximum: 2
+ - default: 0
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - interrupts
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/clock/stm32mp1-clks.h>
+ #include <dt-bindings/reset/stm32mp1-resets.h>
+ hash@54002000 {
+ compatible = "st,stm32f756-hash";
+ reg = <0x54002000 0x400>;
+ interrupts = <GIC_SPI 80 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc HASH1>;
+ resets = <&rcc HASH1_R>;
+ dmas = <&mdma1 31 0x10 0x1000A02 0x0 0x0>;
+ dma-names = "in";
+ dma-maxburst = <2>;
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/devfreq/event/exynos-ppmu.txt b/Documentation/devicetree/bindings/devfreq/event/exynos-ppmu.txt
index 3e36c1d11386..fb46b491791c 100644
--- a/Documentation/devicetree/bindings/devfreq/event/exynos-ppmu.txt
+++ b/Documentation/devicetree/bindings/devfreq/event/exynos-ppmu.txt
@@ -10,14 +10,23 @@ The Exynos PPMU driver uses the devfreq-event class to provide event data
to various devfreq devices. The devfreq devices would use the event data when
derterming the current state of each IP.
-Required properties:
+Required properties for PPMU device:
- compatible: Should be "samsung,exynos-ppmu" or "samsung,exynos-ppmu-v2.
- reg: physical base address of each PPMU and length of memory mapped region.
-Optional properties:
+Optional properties for PPMU device:
- clock-names : the name of clock used by the PPMU, "ppmu"
- clocks : phandles for clock specified in "clock-names" property
+Required properties for 'events' child node of PPMU device:
+- event-name : the unique event name among PPMU device
+Optional properties for 'events' child node of PPMU device:
+- event-data-type : Define the type of data which shell be counted
+by the counter. You can check include/dt-bindings/pmu/exynos_ppmu.h for
+all possible type, i.e. count read requests, count write data in bytes,
+etc. This field is optional and when it is missing, the driver code
+will use default data type.
+
Example1 : PPMUv1 nodes in exynos3250.dtsi are listed below.
ppmu_dmc0: ppmu_dmc0@106a0000 {
@@ -145,3 +154,16 @@ Example3 : PPMUv2 nodes in exynos5433.dtsi are listed below.
reg = <0x104d0000 0x2000>;
status = "disabled";
};
+
+Example4 : 'event-data-type' in exynos4412-ppmu-common.dtsi are listed below.
+
+ &ppmu_dmc0 {
+ status = "okay";
+ events {
+ ppmu_dmc0_3: ppmu-event3-dmc0 {
+ event-name = "ppmu-event3-dmc0";
+ event-data-type = <(PPMU_RO_DATA_CNT |
+ PPMU_WO_DATA_CNT)>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/devfreq/exynos-bus.txt b/Documentation/devicetree/bindings/devfreq/exynos-bus.txt
index f8e946471a58..e71f752cc18f 100644
--- a/Documentation/devicetree/bindings/devfreq/exynos-bus.txt
+++ b/Documentation/devicetree/bindings/devfreq/exynos-bus.txt
@@ -50,8 +50,6 @@ Required properties only for passive bus device:
Optional properties only for parent bus device:
- exynos,saturation-ratio: the percentage value which is used to calibrate
the performance count against total cycle count.
-- exynos,voltage-tolerance: the percentage value for bus voltage tolerance
- which is used to calculate the max voltage.
Detailed correlation between sub-blocks and power line according to Exynos SoC:
- In case of Exynos3250, there are two power line as following:
diff --git a/Documentation/devicetree/bindings/display/allwinner,sun6i-a31-mipi-dsi.yaml b/Documentation/devicetree/bindings/display/allwinner,sun6i-a31-mipi-dsi.yaml
index 47950fced28d..dafc0980c4fa 100644
--- a/Documentation/devicetree/bindings/display/allwinner,sun6i-a31-mipi-dsi.yaml
+++ b/Documentation/devicetree/bindings/display/allwinner,sun6i-a31-mipi-dsi.yaml
@@ -36,6 +36,9 @@ properties:
resets:
maxItems: 1
+ vcc-dsi-supply:
+ description: VCC-DSI power supply of the DSI encoder
+
phys:
maxItems: 1
@@ -64,6 +67,7 @@ required:
- phys
- phy-names
- resets
+ - vcc-dsi-supply
- port
additionalProperties: false
@@ -79,6 +83,7 @@ examples:
resets = <&ccu 4>;
phys = <&dphy0>;
phy-names = "dphy";
+ vcc-dsi-supply = <&reg_dcdc1>;
#address-cells = <1>;
#size-cells = <0>;
diff --git a/Documentation/devicetree/bindings/display/amlogic,meson-dw-hdmi.yaml b/Documentation/devicetree/bindings/display/amlogic,meson-dw-hdmi.yaml
index fb747682006d..0da42ab8fd3a 100644
--- a/Documentation/devicetree/bindings/display/amlogic,meson-dw-hdmi.yaml
+++ b/Documentation/devicetree/bindings/display/amlogic,meson-dw-hdmi.yaml
@@ -79,8 +79,6 @@ properties:
hdmi-supply:
description: phandle to an external 5V regulator to power the HDMI logic
- allOf:
- - $ref: /schemas/types.yaml#/definitions/phandle
port@0:
type: object
diff --git a/Documentation/devicetree/bindings/display/arm,malidp.txt b/Documentation/devicetree/bindings/display/arm,malidp.txt
index 2f7870983ef1..7a97a2b48c2a 100644
--- a/Documentation/devicetree/bindings/display/arm,malidp.txt
+++ b/Documentation/devicetree/bindings/display/arm,malidp.txt
@@ -37,6 +37,8 @@ Optional properties:
Documentation/devicetree/bindings/reserved-memory/reserved-memory.txt)
to be used for the framebuffer; if not present, the framebuffer may
be located anywhere in memory.
+ - arm,malidp-arqos-high-level: integer of u32 value describing the ARQoS
+ levels of DP500's QoS signaling.
Example:
@@ -54,6 +56,7 @@ Example:
clocks = <&oscclk2>, <&fpgaosc0>, <&fpgaosc1>, <&fpgaosc1>;
clock-names = "pxlclk", "mclk", "aclk", "pclk";
arm,malidp-output-port-lines = /bits/ 8 <8 8 8>;
+ arm,malidp-arqos-high-level = <0xd000d000>;
port {
dp0_output: endpoint {
remote-endpoint = <&tda998x_2_input>;
diff --git a/Documentation/devicetree/bindings/display/bridge/anx7814.txt b/Documentation/devicetree/bindings/display/bridge/anx7814.txt
index dbd7c84ee584..17258747fff6 100644
--- a/Documentation/devicetree/bindings/display/bridge/anx7814.txt
+++ b/Documentation/devicetree/bindings/display/bridge/anx7814.txt
@@ -6,7 +6,11 @@ designed for portable devices.
Required properties:
- - compatible : "analogix,anx7814"
+ - compatible : Must be one of:
+ "analogix,anx7808"
+ "analogix,anx7812"
+ "analogix,anx7814"
+ "analogix,anx7818"
- reg : I2C address of the device
- interrupts : Should contain the INTP interrupt
- hpd-gpios : Which GPIO to use for hpd
diff --git a/Documentation/devicetree/bindings/display/bridge/renesas,dw-hdmi.txt b/Documentation/devicetree/bindings/display/bridge/renesas,dw-hdmi.txt
index db680413e89c..819f3e31013c 100644
--- a/Documentation/devicetree/bindings/display/bridge/renesas,dw-hdmi.txt
+++ b/Documentation/devicetree/bindings/display/bridge/renesas,dw-hdmi.txt
@@ -13,6 +13,7 @@ Required properties:
- compatible : Shall contain one or more of
- "renesas,r8a774a1-hdmi" for R8A774A1 (RZ/G2M) compatible HDMI TX
+ - "renesas,r8a774b1-hdmi" for R8A774B1 (RZ/G2N) compatible HDMI TX
- "renesas,r8a7795-hdmi" for R8A7795 (R-Car H3) compatible HDMI TX
- "renesas,r8a7796-hdmi" for R8A7796 (R-Car M3-W) compatible HDMI TX
- "renesas,r8a77965-hdmi" for R8A77965 (R-Car M3-N) compatible HDMI TX
diff --git a/Documentation/devicetree/bindings/display/bridge/renesas,lvds.txt b/Documentation/devicetree/bindings/display/bridge/renesas,lvds.txt
index c6a196d0b075..c62ce2494ed9 100644
--- a/Documentation/devicetree/bindings/display/bridge/renesas,lvds.txt
+++ b/Documentation/devicetree/bindings/display/bridge/renesas,lvds.txt
@@ -10,6 +10,7 @@ Required properties:
- "renesas,r8a7743-lvds" for R8A7743 (RZ/G1M) compatible LVDS encoders
- "renesas,r8a7744-lvds" for R8A7744 (RZ/G1N) compatible LVDS encoders
- "renesas,r8a774a1-lvds" for R8A774A1 (RZ/G2M) compatible LVDS encoders
+ - "renesas,r8a774b1-lvds" for R8A774B1 (RZ/G2N) compatible LVDS encoders
- "renesas,r8a774c0-lvds" for R8A774C0 (RZ/G2E) compatible LVDS encoders
- "renesas,r8a7790-lvds" for R8A7790 (R-Car H2) compatible LVDS encoders
- "renesas,r8a7791-lvds" for R8A7791 (R-Car M2-W) compatible LVDS encoders
diff --git a/Documentation/devicetree/bindings/display/bridge/ti,sn65dsi86.txt b/Documentation/devicetree/bindings/display/bridge/ti,sn65dsi86.txt
index 0a3fbb53a16e..8ec4a7f2623a 100644
--- a/Documentation/devicetree/bindings/display/bridge/ti,sn65dsi86.txt
+++ b/Documentation/devicetree/bindings/display/bridge/ti,sn65dsi86.txt
@@ -21,7 +21,7 @@ Optional properties:
- #gpio-cells : Should be two. The first cell is the pin number and
the second cell is used to specify flags.
See ../../gpio/gpio.txt for more information.
-- #pwm-cells : Should be one. See ../../pwm/pwm.txt for description of
+- #pwm-cells : Should be one. See ../../pwm/pwm.yaml for description of
the cell formats.
- clock-names: should be "refclk"
diff --git a/Documentation/devicetree/bindings/display/cirrus,clps711x-fb.txt b/Documentation/devicetree/bindings/display/cirrus,clps711x-fb.txt
index b0e506610400..0ab5f0663611 100644
--- a/Documentation/devicetree/bindings/display/cirrus,clps711x-fb.txt
+++ b/Documentation/devicetree/bindings/display/cirrus,clps711x-fb.txt
@@ -27,11 +27,11 @@ Example:
display: display {
model = "320x240x4";
- native-mode = <&timing0>;
bits-per-pixel = <4>;
ac-prescale = <17>;
display-timings {
+ native-mode = <&timing0>;
timing0: 320x240 {
hactive = <320>;
hback-porch = <0>;
diff --git a/Documentation/devicetree/bindings/display/imx/fsl,imx-fb.txt b/Documentation/devicetree/bindings/display/imx/fsl,imx-fb.txt
index e5a8b363d829..f4df9e83bcd2 100644
--- a/Documentation/devicetree/bindings/display/imx/fsl,imx-fb.txt
+++ b/Documentation/devicetree/bindings/display/imx/fsl,imx-fb.txt
@@ -38,10 +38,10 @@ Example:
display0: display0 {
model = "Primeview-PD050VL1";
- native-mode = <&timing_disp0>;
bits-per-pixel = <16>;
fsl,pcr = <0xf0c88080>; /* non-standard but required */
display-timings {
+ native-mode = <&timing_disp0>;
timing_disp0: 640x480 {
hactive = <640>;
vactive = <480>;
diff --git a/Documentation/devicetree/bindings/display/mediatek/mediatek,disp.txt b/Documentation/devicetree/bindings/display/mediatek/mediatek,disp.txt
index 8469de510001..b91e709db7a4 100644
--- a/Documentation/devicetree/bindings/display/mediatek/mediatek,disp.txt
+++ b/Documentation/devicetree/bindings/display/mediatek/mediatek,disp.txt
@@ -27,19 +27,22 @@ Documentation/devicetree/bindings/display/mediatek/mediatek,dpi.txt.
Required properties (all function blocks):
- compatible: "mediatek,<chip>-disp-<function>", one of
- "mediatek,<chip>-disp-ovl" - overlay (4 layers, blending, csc)
- "mediatek,<chip>-disp-rdma" - read DMA / line buffer
- "mediatek,<chip>-disp-wdma" - write DMA
- "mediatek,<chip>-disp-color" - color processor
- "mediatek,<chip>-disp-aal" - adaptive ambient light controller
- "mediatek,<chip>-disp-gamma" - gamma correction
- "mediatek,<chip>-disp-merge" - merge streams from two RDMA sources
- "mediatek,<chip>-disp-split" - split stream to two encoders
- "mediatek,<chip>-disp-ufoe" - data compression engine
- "mediatek,<chip>-dsi" - DSI controller, see mediatek,dsi.txt
- "mediatek,<chip>-dpi" - DPI controller, see mediatek,dpi.txt
- "mediatek,<chip>-disp-mutex" - display mutex
- "mediatek,<chip>-disp-od" - overdrive
+ "mediatek,<chip>-disp-ovl" - overlay (4 layers, blending, csc)
+ "mediatek,<chip>-disp-ovl-2l" - overlay (2 layers, blending, csc)
+ "mediatek,<chip>-disp-rdma" - read DMA / line buffer
+ "mediatek,<chip>-disp-wdma" - write DMA
+ "mediatek,<chip>-disp-ccorr" - color correction
+ "mediatek,<chip>-disp-color" - color processor
+ "mediatek,<chip>-disp-dither" - dither
+ "mediatek,<chip>-disp-aal" - adaptive ambient light controller
+ "mediatek,<chip>-disp-gamma" - gamma correction
+ "mediatek,<chip>-disp-merge" - merge streams from two RDMA sources
+ "mediatek,<chip>-disp-split" - split stream to two encoders
+ "mediatek,<chip>-disp-ufoe" - data compression engine
+ "mediatek,<chip>-dsi" - DSI controller, see mediatek,dsi.txt
+ "mediatek,<chip>-dpi" - DPI controller, see mediatek,dpi.txt
+ "mediatek,<chip>-disp-mutex" - display mutex
+ "mediatek,<chip>-disp-od" - overdrive
the supported chips are mt2701, mt2712 and mt8173.
- reg: Physical base address and length of the function block register space
- interrupts: The interrupt signal from the function block (required, except for
@@ -49,6 +52,7 @@ Required properties (all function blocks):
For most function blocks this is just a single clock input. Only the DSI and
DPI controller nodes have multiple clock inputs. These are documented in
mediatek,dsi.txt and mediatek,dpi.txt, respectively.
+ An exception is that the mt8183 mutex is always free running with no clocks property.
Required properties (DMA function blocks):
- compatible: Should be one of
diff --git a/Documentation/devicetree/bindings/display/mediatek/mediatek,dsi.txt b/Documentation/devicetree/bindings/display/mediatek/mediatek,dsi.txt
index fadf327c7cdf..a19a6cc375ed 100644
--- a/Documentation/devicetree/bindings/display/mediatek/mediatek,dsi.txt
+++ b/Documentation/devicetree/bindings/display/mediatek/mediatek,dsi.txt
@@ -7,7 +7,7 @@ channel output.
Required properties:
- compatible: "mediatek,<chip>-dsi"
- the supported chips are mt2701 and mt8173.
+ the supported chips are mt2701, mt8173 and mt8183.
- reg: Physical base address and length of the controller's registers
- interrupts: The interrupt signal from the function block.
- clocks: device clocks
@@ -26,7 +26,7 @@ The MIPI TX configuration module controls the MIPI D-PHY.
Required properties:
- compatible: "mediatek,<chip>-mipi-tx"
- the supported chips are mt2701 and mt8173.
+ the supported chips are mt2701, mt8173 and mt8183.
- reg: Physical base address and length of the controller's registers
- clocks: PLL reference clock
- clock-output-names: name of the output clock line to the DSI encoder
diff --git a/Documentation/devicetree/bindings/display/panel/sharp,ld-d5116z01b.txt b/Documentation/devicetree/bindings/display/panel/sharp,ld-d5116z01b.txt
deleted file mode 100644
index fd9cf39bde77..000000000000
--- a/Documentation/devicetree/bindings/display/panel/sharp,ld-d5116z01b.txt
+++ /dev/null
@@ -1,26 +0,0 @@
-Sharp LD-D5116Z01B 12.3" WUXGA+ eDP panel
-
-Required properties:
-- compatible: should be "sharp,ld-d5116z01b"
-- power-supply: regulator to provide the VCC supply voltage (3.3 volts)
-
-This binding is compatible with the simple-panel binding.
-
-The device node can contain one 'port' child node with one child
-'endpoint' node, according to the bindings defined in [1]. This
-node should describe panel's video bus.
-
-[1]: Documentation/devicetree/bindings/media/video-interfaces.txt
-
-Example:
-
- panel: panel {
- compatible = "sharp,ld-d5116z01b";
- power-supply = <&vlcd_3v3>;
-
- port {
- panel_ep: endpoint {
- remote-endpoint = <&bridge_out_ep>;
- };
- };
- };
diff --git a/Documentation/devicetree/bindings/display/panel/sharp,ld-d5116z01b.yaml b/Documentation/devicetree/bindings/display/panel/sharp,ld-d5116z01b.yaml
new file mode 100644
index 000000000000..fbb647eb33c9
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/sharp,ld-d5116z01b.yaml
@@ -0,0 +1,30 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/sharp,ld-d5116z01b.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Sharp LD-D5116Z01B 12.3" WUXGA+ eDP panel
+
+maintainers:
+ - Jeffrey Hugo <jeffrey.l.hugo@gmail.com>
+
+allOf:
+ - $ref: panel-common.yaml#
+
+properties:
+ compatible:
+ const: sharp,ld-d5116z01b
+
+ power-supply: true
+ backlight: true
+ port: true
+ no-hpd: true
+
+additionalProperties: false
+
+required:
+ - compatible
+ - power-supply
+
+...
diff --git a/Documentation/devicetree/bindings/display/renesas,du.txt b/Documentation/devicetree/bindings/display/renesas,du.txt
index c97dfacad281..17cb2771364b 100644
--- a/Documentation/devicetree/bindings/display/renesas,du.txt
+++ b/Documentation/devicetree/bindings/display/renesas,du.txt
@@ -8,6 +8,7 @@ Required Properties:
- "renesas,du-r8a7745" for R8A7745 (RZ/G1E) compatible DU
- "renesas,du-r8a77470" for R8A77470 (RZ/G1C) compatible DU
- "renesas,du-r8a774a1" for R8A774A1 (RZ/G2M) compatible DU
+ - "renesas,du-r8a774b1" for R8A774B1 (RZ/G2N) compatible DU
- "renesas,du-r8a774c0" for R8A774C0 (RZ/G2E) compatible DU
- "renesas,du-r8a7779" for R8A7779 (R-Car H1) compatible DU
- "renesas,du-r8a7790" for R8A7790 (R-Car H2) compatible DU
@@ -60,6 +61,7 @@ corresponding to each DU output.
R8A7745 (RZ/G1E) DPAD 0 DPAD 1 - -
R8A77470 (RZ/G1C) DPAD 0 DPAD 1 LVDS 0 -
R8A774A1 (RZ/G2M) DPAD 0 HDMI 0 LVDS 0 -
+ R8A774B1 (RZ/G2N) DPAD 0 HDMI 0 LVDS 0 -
R8A774C0 (RZ/G2E) DPAD 0 LVDS 0 LVDS 1 -
R8A7779 (R-Car H1) DPAD 0 DPAD 1 - -
R8A7790 (R-Car H2) DPAD 0 LVDS 0 LVDS 1 -
diff --git a/Documentation/devicetree/bindings/display/rockchip/rockchip-vop.txt b/Documentation/devicetree/bindings/display/rockchip/rockchip-vop.txt
index 4f58c5a2d195..8b3a5f514205 100644
--- a/Documentation/devicetree/bindings/display/rockchip/rockchip-vop.txt
+++ b/Documentation/devicetree/bindings/display/rockchip/rockchip-vop.txt
@@ -20,6 +20,10 @@ Required properties:
"rockchip,rk3228-vop";
"rockchip,rk3328-vop";
+- reg: Must contain one entry corresponding to the base address and length
+ of the register space. Can optionally contain a second entry
+ corresponding to the CRTC gamma LUT address.
+
- interrupts: should contain a list of all VOP IP block interrupts in the
order: VSYNC, LCD_SYSTEM. The interrupt specifier
format depends on the interrupt controller used.
@@ -48,7 +52,7 @@ Example:
SoC specific DT entry:
vopb: vopb@ff930000 {
compatible = "rockchip,rk3288-vop";
- reg = <0xff930000 0x19c>;
+ reg = <0x0 0xff930000 0x0 0x19c>, <0x0 0xff931000 0x0 0x1000>;
interrupts = <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cru ACLK_VOP0>, <&cru DCLK_VOP0>, <&cru HCLK_VOP0>;
clock-names = "aclk_vop", "dclk_vop", "hclk_vop";
diff --git a/Documentation/devicetree/bindings/display/st,stm32-dsi.yaml b/Documentation/devicetree/bindings/display/st,stm32-dsi.yaml
new file mode 100644
index 000000000000..3be76d15bf6c
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/st,stm32-dsi.yaml
@@ -0,0 +1,150 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/st,stm32-dsi.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: STMicroelectronics STM32 DSI host controller
+
+maintainers:
+ - Philippe Cornu <philippe.cornu@st.com>
+ - Yannick Fertre <yannick.fertre@st.com>
+
+description:
+ The STMicroelectronics STM32 DSI controller uses the Synopsys DesignWare MIPI-DSI host controller.
+
+properties:
+ compatible:
+ const: st,stm32-dsi
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ items:
+ - description: Module Clock
+ - description: DSI bus clock
+ - description: Pixel clock
+ minItems: 2
+ maxItems: 3
+
+ clock-names:
+ items:
+ - const: pclk
+ - const: ref
+ - const: px_clk
+ minItems: 2
+ maxItems: 3
+
+ resets:
+ maxItems: 1
+
+ reset-names:
+ items:
+ - const: apb
+
+ phy-dsi-supply:
+ description:
+ Phandle of the regulator that provides the supply voltage.
+
+ ports:
+ type: object
+ description:
+ A node containing DSI input & output port nodes with endpoint
+ definitions as documented in
+ Documentation/devicetree/bindings/media/video-interfaces.txt
+ Documentation/devicetree/bindings/graph.txt
+ properties:
+ port@0:
+ type: object
+ description:
+ DSI input port node, connected to the ltdc rgb output port.
+
+ port@1:
+ type: object
+ description:
+ DSI output port node, connected to a panel or a bridge input port"
+
+patternProperties:
+ "^(panel|panel-dsi)@[0-9]$":
+ type: object
+ description:
+ A node containing the panel or bridge description as documented in
+ Documentation/devicetree/bindings/display/mipi-dsi-bus.txt
+ properties:
+ port:
+ type: object
+ description:
+ Panel or bridge port node, connected to the DSI output port (port@1)
+
+ "#address-cells":
+ const: 1
+
+ "#size-cells":
+ const: 0
+
+required:
+ - "#address-cells"
+ - "#size-cells"
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+ - ports
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/clock/stm32mp1-clks.h>
+ #include <dt-bindings/reset/stm32mp1-resets.h>
+ #include <dt-bindings/gpio/gpio.h>
+ dsi: dsi@5a000000 {
+ compatible = "st,stm32-dsi";
+ reg = <0x5a000000 0x800>;
+ clocks = <&rcc DSI_K>, <&clk_hse>, <&rcc DSI_PX>;
+ clock-names = "pclk", "ref", "px_clk";
+ resets = <&rcc DSI_R>;
+ reset-names = "apb";
+ phy-dsi-supply = <&reg18>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ dsi_in: endpoint {
+ remote-endpoint = <&ltdc_ep1_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ dsi_out: endpoint {
+ remote-endpoint = <&panel_in>;
+ };
+ };
+ };
+
+ panel-dsi@0 {
+ compatible = "orisetech,otm8009a";
+ reg = <0>;
+ reset-gpios = <&gpioe 4 GPIO_ACTIVE_LOW>;
+ power-supply = <&v3v3>;
+
+ port {
+ panel_in: endpoint {
+ remote-endpoint = <&dsi_out>;
+ };
+ };
+ };
+ };
+
+...
+
+
diff --git a/Documentation/devicetree/bindings/display/st,stm32-ltdc.txt b/Documentation/devicetree/bindings/display/st,stm32-ltdc.txt
deleted file mode 100644
index 60c54da4e526..000000000000
--- a/Documentation/devicetree/bindings/display/st,stm32-ltdc.txt
+++ /dev/null
@@ -1,144 +0,0 @@
-* STMicroelectronics STM32 lcd-tft display controller
-
-- ltdc: lcd-tft display controller host
- Required properties:
- - compatible: "st,stm32-ltdc"
- - reg: Physical base address of the IP registers and length of memory mapped region.
- - clocks: A list of phandle + clock-specifier pairs, one for each
- entry in 'clock-names'.
- - clock-names: A list of clock names. For ltdc it should contain:
- - "lcd" for the clock feeding the output pixel clock & IP clock.
- - resets: reset to be used by the device (defined by use of RCC macro).
- Required nodes:
- - Video port for DPI RGB output: ltdc has one video port with up to 2
- endpoints:
- - for external dpi rgb panel or bridge, using gpios.
- - for internal dpi input of the MIPI DSI host controller.
- Note: These 2 endpoints cannot be activated simultaneously.
-
-* STMicroelectronics STM32 DSI controller specific extensions to Synopsys
- DesignWare MIPI DSI host controller
-
-The STMicroelectronics STM32 DSI controller uses the Synopsys DesignWare MIPI
-DSI host controller. For all mandatory properties & nodes, please refer
-to the related documentation in [5].
-
-Mandatory properties specific to STM32 DSI:
-- #address-cells: Should be <1>.
-- #size-cells: Should be <0>.
-- compatible: "st,stm32-dsi".
-- clock-names:
- - phy pll reference clock string name, must be "ref".
-- resets: see [5].
-- reset-names: see [5].
-
-Mandatory nodes specific to STM32 DSI:
-- ports: A node containing DSI input & output port nodes with endpoint
- definitions as documented in [3] & [4].
- - port@0: DSI input port node, connected to the ltdc rgb output port.
- - port@1: DSI output port node, connected to a panel or a bridge input port.
-- panel or bridge node: A node containing the panel or bridge description as
- documented in [6].
- - port: panel or bridge port node, connected to the DSI output port (port@1).
-Optional properties:
-- phy-dsi-supply: phandle of the regulator that provides the supply voltage.
-
-Note: You can find more documentation in the following references
-[1] Documentation/devicetree/bindings/clock/clock-bindings.txt
-[2] Documentation/devicetree/bindings/reset/reset.txt
-[3] Documentation/devicetree/bindings/media/video-interfaces.txt
-[4] Documentation/devicetree/bindings/graph.txt
-[5] Documentation/devicetree/bindings/display/bridge/dw_mipi_dsi.txt
-[6] Documentation/devicetree/bindings/display/mipi-dsi-bus.txt
-
-Example 1: RGB panel
-/ {
- ...
- soc {
- ...
- ltdc: display-controller@40016800 {
- compatible = "st,stm32-ltdc";
- reg = <0x40016800 0x200>;
- interrupts = <88>, <89>;
- resets = <&rcc STM32F4_APB2_RESET(LTDC)>;
- clocks = <&rcc 1 CLK_LCD>;
- clock-names = "lcd";
-
- port {
- ltdc_out_rgb: endpoint {
- };
- };
- };
- };
-};
-
-Example 2: DSI panel
-
-/ {
- ...
- soc {
- ...
- ltdc: display-controller@40016800 {
- compatible = "st,stm32-ltdc";
- reg = <0x40016800 0x200>;
- interrupts = <88>, <89>;
- resets = <&rcc STM32F4_APB2_RESET(LTDC)>;
- clocks = <&rcc 1 CLK_LCD>;
- clock-names = "lcd";
-
- port {
- ltdc_out_dsi: endpoint {
- remote-endpoint = <&dsi_in>;
- };
- };
- };
-
-
- dsi: dsi@40016c00 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "st,stm32-dsi";
- reg = <0x40016c00 0x800>;
- clocks = <&rcc 1 CLK_F469_DSI>, <&clk_hse>;
- clock-names = "pclk", "ref";
- resets = <&rcc STM32F4_APB2_RESET(DSI)>;
- reset-names = "apb";
- phy-dsi-supply = <&reg18>;
-
- ports {
- #address-cells = <1>;
- #size-cells = <0>;
-
- port@0 {
- reg = <0>;
- dsi_in: endpoint {
- remote-endpoint = <&ltdc_out_dsi>;
- };
- };
-
- port@1 {
- reg = <1>;
- dsi_out: endpoint {
- remote-endpoint = <&dsi_in_panel>;
- };
- };
-
- };
-
- panel-dsi@0 {
- reg = <0>; /* dsi virtual channel (0..3) */
- compatible = ...;
- enable-gpios = ...;
-
- port {
- dsi_in_panel: endpoint {
- remote-endpoint = <&dsi_out>;
- };
- };
-
- };
-
- };
-
- };
-};
diff --git a/Documentation/devicetree/bindings/display/st,stm32-ltdc.yaml b/Documentation/devicetree/bindings/display/st,stm32-ltdc.yaml
new file mode 100644
index 000000000000..bf8ad916e9b0
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/st,stm32-ltdc.yaml
@@ -0,0 +1,81 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/st,stm32-ltdc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: STMicroelectronics STM32 lcd-tft display controller
+
+maintainers:
+ - Philippe Cornu <philippe.cornu@st.com>
+ - Yannick Fertre <yannick.fertre@st.com>
+
+properties:
+ compatible:
+ const: st,stm32-ltdc
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ items:
+ - description: events interrupt line.
+ - description: errors interrupt line.
+ minItems: 1
+ maxItems: 2
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ items:
+ - const: lcd
+
+ resets:
+ maxItems: 1
+
+ port:
+ type: object
+ description:
+ "Video port for DPI RGB output.
+ ltdc has one video port with up to 2 endpoints:
+ - for external dpi rgb panel or bridge, using gpios.
+ - for internal dpi input of the MIPI DSI host controller.
+ Note: These 2 endpoints cannot be activated simultaneously.
+ Please refer to the bindings defined in
+ Documentation/devicetree/bindings/media/video-interfaces.txt."
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - clock-names
+ - resets
+ - port
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/clock/stm32mp1-clks.h>
+ #include <dt-bindings/reset/stm32mp1-resets.h>
+ ltdc: display-controller@40016800 {
+ compatible = "st,stm32-ltdc";
+ reg = <0x5a001000 0x400>;
+ interrupts = <GIC_SPI 88 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 89 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc LTDC_PX>;
+ clock-names = "lcd";
+ resets = <&rcc LTDC_R>;
+
+ port {
+ ltdc_out_dsi: endpoint {
+ remote-endpoint = <&dsi_in>;
+ };
+ };
+ };
+
+...
+
diff --git a/Documentation/devicetree/bindings/dma/allwinner,sun50i-a64-dma.yaml b/Documentation/devicetree/bindings/dma/allwinner,sun50i-a64-dma.yaml
index 4cb9d6b93138..387d599522c7 100644
--- a/Documentation/devicetree/bindings/dma/allwinner,sun50i-a64-dma.yaml
+++ b/Documentation/devicetree/bindings/dma/allwinner,sun50i-a64-dma.yaml
@@ -68,9 +68,7 @@ else:
clocks:
maxItems: 1
-# FIXME: We should set it, but it would report all the generic
-# properties as additional properties.
-# additionalProperties: false
+unevaluatedProperties: false
examples:
- |
diff --git a/Documentation/devicetree/bindings/dma/dma-common.yaml b/Documentation/devicetree/bindings/dma/dma-common.yaml
index ed0a49a6f020..02a34ba2b49b 100644
--- a/Documentation/devicetree/bindings/dma/dma-common.yaml
+++ b/Documentation/devicetree/bindings/dma/dma-common.yaml
@@ -25,11 +25,18 @@ properties:
Used to provide DMA controller specific information.
dma-channel-mask:
- $ref: /schemas/types.yaml#definitions/uint32
description:
Bitmask of available DMA channels in ascending order that are
not reserved by firmware and are available to the
kernel. i.e. first channel corresponds to LSB.
+ The first item in the array is for channels 0-31, the second is for
+ channels 32-63, etc.
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32-array
+ items:
+ minItems: 1
+ # Should be enough
+ maxItems: 255
dma-channels:
$ref: /schemas/types.yaml#definitions/uint32
diff --git a/Documentation/devicetree/bindings/dma/jz4780-dma.txt b/Documentation/devicetree/bindings/dma/jz4780-dma.txt
index 636fcb26b164..ec89782d9498 100644
--- a/Documentation/devicetree/bindings/dma/jz4780-dma.txt
+++ b/Documentation/devicetree/bindings/dma/jz4780-dma.txt
@@ -7,10 +7,11 @@ Required properties:
* ingenic,jz4725b-dma
* ingenic,jz4770-dma
* ingenic,jz4780-dma
+ * ingenic,x1000-dma
- reg: Should contain the DMA channel registers location and length, followed
by the DMA controller registers location and length.
- interrupts: Should contain the interrupt specifier of the DMA controller.
-- clocks: Should contain a clock specifier for the JZ4780 PDMA clock.
+- clocks: Should contain a clock specifier for the JZ4780/X1000 PDMA clock.
- #dma-cells: Must be <2>. Number of integer cells in the dmas property of
DMA clients (see below).
diff --git a/Documentation/devicetree/bindings/dma/milbeaut-m10v-hdmac.txt b/Documentation/devicetree/bindings/dma/milbeaut-m10v-hdmac.txt
new file mode 100644
index 000000000000..1f0875bd5abc
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/milbeaut-m10v-hdmac.txt
@@ -0,0 +1,32 @@
+* Milbeaut AHB DMA Controller
+
+Milbeaut AHB DMA controller has transfer capability below.
+ - device to memory transfer
+ - memory to device transfer
+
+Required property:
+- compatible: Should be "socionext,milbeaut-m10v-hdmac"
+- reg: Should contain DMA registers location and length.
+- interrupts: Should contain all of the per-channel DMA interrupts.
+ Number of channels is configurable - 2, 4 or 8, so
+ the number of interrupts specified should be {2,4,8}.
+- #dma-cells: Should be 1. Specify the ID of the slave.
+- clocks: Phandle to the clock used by the HDMAC module.
+
+
+Example:
+
+ hdmac1: dma-controller@1e110000 {
+ compatible = "socionext,milbeaut-m10v-hdmac";
+ reg = <0x1e110000 0x10000>;
+ interrupts = <0 132 4>,
+ <0 133 4>,
+ <0 134 4>,
+ <0 135 4>,
+ <0 136 4>,
+ <0 137 4>,
+ <0 138 4>,
+ <0 139 4>;
+ #dma-cells = <1>;
+ clocks = <&dummy_clk>;
+ };
diff --git a/Documentation/devicetree/bindings/dma/milbeaut-m10v-xdmac.txt b/Documentation/devicetree/bindings/dma/milbeaut-m10v-xdmac.txt
new file mode 100644
index 000000000000..305791804062
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/milbeaut-m10v-xdmac.txt
@@ -0,0 +1,24 @@
+* Milbeaut AXI DMA Controller
+
+Milbeaut AXI DMA controller has only memory to memory transfer capability.
+
+* DMA controller
+
+Required property:
+- compatible: Should be "socionext,milbeaut-m10v-xdmac"
+- reg: Should contain DMA registers location and length.
+- interrupts: Should contain all of the per-channel DMA interrupts.
+ Number of channels is configurable - 2, 4 or 8, so
+ the number of interrupts specified should be {2,4,8}.
+- #dma-cells: Should be 1.
+
+Example:
+ xdmac0: dma-controller@1c250000 {
+ compatible = "socionext,milbeaut-m10v-xdmac";
+ reg = <0x1c250000 0x1000>;
+ interrupts = <0 17 0x4>,
+ <0 18 0x4>,
+ <0 19 0x4>,
+ <0 20 0x4>;
+ #dma-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt b/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt
index 5a512c5ea76a..5551e929fd99 100644
--- a/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt
+++ b/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt
@@ -21,6 +21,7 @@ Required Properties:
- "renesas,dmac-r8a7745" (RZ/G1E)
- "renesas,dmac-r8a77470" (RZ/G1C)
- "renesas,dmac-r8a774a1" (RZ/G2M)
+ - "renesas,dmac-r8a774b1" (RZ/G2N)
- "renesas,dmac-r8a774c0" (RZ/G2E)
- "renesas,dmac-r8a7790" (R-Car H2)
- "renesas,dmac-r8a7791" (R-Car M2-W)
diff --git a/Documentation/devicetree/bindings/dma/renesas,usb-dmac.txt b/Documentation/devicetree/bindings/dma/renesas,usb-dmac.txt
index 372f0eeb5a2a..f1f95f678739 100644
--- a/Documentation/devicetree/bindings/dma/renesas,usb-dmac.txt
+++ b/Documentation/devicetree/bindings/dma/renesas,usb-dmac.txt
@@ -8,6 +8,7 @@ Required Properties:
- "renesas,r8a7745-usb-dmac" (RZ/G1E)
- "renesas,r8a77470-usb-dmac" (RZ/G1C)
- "renesas,r8a774a1-usb-dmac" (RZ/G2M)
+ - "renesas,r8a774b1-usb-dmac" (RZ/G2N)
- "renesas,r8a774c0-usb-dmac" (RZ/G2E)
- "renesas,r8a7790-usb-dmac" (R-Car H2)
- "renesas,r8a7791-usb-dmac" (R-Car M2-W)
diff --git a/Documentation/devicetree/bindings/dma/sifive,fu540-c000-pdma.yaml b/Documentation/devicetree/bindings/dma/sifive,fu540-c000-pdma.yaml
new file mode 100644
index 000000000000..2ca3ddbe1ff4
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/sifive,fu540-c000-pdma.yaml
@@ -0,0 +1,55 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/dma/sifive,fu540-c000-pdma.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: SiFive Unleashed Rev C000 Platform DMA
+
+maintainers:
+ - Green Wan <green.wan@sifive.com>
+ - Palmer Debbelt <palmer@sifive.com>
+ - Paul Walmsley <paul.walmsley@sifive.com>
+
+description: |
+ Platform DMA is a DMA engine of SiFive Unleashed. It supports 4
+ channels. Each channel has 2 interrupts. One is for DMA done and
+ the other is for DME error.
+
+ In different SoC, DMA could be attached to different IRQ line.
+ DT file need to be changed to meet the difference. For technical
+ doc,
+
+ https://static.dev.sifive.com/FU540-C000-v1.0.pdf
+
+properties:
+ compatible:
+ items:
+ - const: sifive,fu540-c000-pdma
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ minItems: 1
+ maxItems: 8
+
+ '#dma-cells':
+ const: 1
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - '#dma-cells'
+
+examples:
+ - |
+ dma@3000000 {
+ compatible = "sifive,fu540-c000-pdma";
+ reg = <0x0 0x3000000 0x0 0x8000>;
+ interrupts = <23 24 25 26 27 28 29 30>;
+ #dma-cells = <1>;
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/dma/ti-edma.txt b/Documentation/devicetree/bindings/dma/ti-edma.txt
index 4bbc94d829c8..0e1398f93aa2 100644
--- a/Documentation/devicetree/bindings/dma/ti-edma.txt
+++ b/Documentation/devicetree/bindings/dma/ti-edma.txt
@@ -42,6 +42,11 @@ Optional properties:
- ti,edma-reserved-slot-ranges: PaRAM slot ranges which should not be used by
the driver, they are allocated to be used by for example the
DSP. See example.
+- dma-channel-mask: Mask of usable channels.
+ Single uint32 for EDMA with 32 channels, array of two uint32 for
+ EDMA with 64 channels. See example and
+ Documentation/devicetree/bindings/dma/dma-common.yaml
+
------------------------------------------------------------------------------
eDMA3 Transfer Controller
@@ -91,6 +96,9 @@ edma: edma@49000000 {
ti,edma-memcpy-channels = <20 21>;
/* The following PaRAM slots are reserved: 35-44 and 100-109 */
ti,edma-reserved-slot-ranges = <35 10>, <100 10>;
+ /* The following channels are reserved: 35-44 */
+ dma-channel-mask = <0xffffffff /* Channel 0-31 */
+ 0xffffe007>; /* Channel 32-63 */
};
edma_tptc0: tptc@49800000 {
diff --git a/Documentation/devicetree/bindings/dma/xilinx/xilinx_dma.txt b/Documentation/devicetree/bindings/dma/xilinx/xilinx_dma.txt
index 93b6d961dd4f..325aca52cd43 100644
--- a/Documentation/devicetree/bindings/dma/xilinx/xilinx_dma.txt
+++ b/Documentation/devicetree/bindings/dma/xilinx/xilinx_dma.txt
@@ -11,9 +11,16 @@ is to receive from the device.
Xilinx AXI CDMA engine, it does transfers between memory-mapped source
address and a memory-mapped destination address.
+Xilinx AXI MCDMA engine, it does transfer between memory and AXI4 stream
+target devices. It can be configured to have up to 16 independent transmit
+and receive channels.
+
Required properties:
-- compatible: Should be "xlnx,axi-vdma-1.00.a" or "xlnx,axi-dma-1.00.a" or
- "xlnx,axi-cdma-1.00.a""
+- compatible: Should be one of-
+ "xlnx,axi-vdma-1.00.a"
+ "xlnx,axi-dma-1.00.a"
+ "xlnx,axi-cdma-1.00.a"
+ "xlnx,axi-mcdma-1.00.a"
- #dma-cells: Should be <1>, see "dmas" property below
- reg: Should contain VDMA registers location and length.
- xlnx,addrwidth: Should be the vdma addressing size in bits(ex: 32 bits).
@@ -29,7 +36,7 @@ Required properties:
"m_axis_mm2s_aclk", "s_axis_s2mm_aclk"
For CDMA:
Required elements: "s_axi_lite_aclk", "m_axi_aclk"
- FOR AXIDMA:
+ For AXIDMA and MCDMA:
Required elements: "s_axi_lite_aclk"
Optional elements: "m_axi_mm2s_aclk", "m_axi_s2mm_aclk",
"m_axi_sg_aclk"
@@ -37,12 +44,11 @@ Required properties:
Required properties for VDMA:
- xlnx,num-fstores: Should be the number of framebuffers as configured in h/w.
-Optional properties for AXI DMA:
+Optional properties for AXI DMA and MCDMA:
- xlnx,sg-length-width: Should be set to the width in bits of the length
register as configured in h/w. Takes values {8...26}. If the property
is missing or invalid then the default value 23 is used. This is the
maximum value that is supported by all IP versions.
-- xlnx,mcdma: Tells whether configured for multi-channel mode in the hardware.
Optional properties for VDMA:
- xlnx,flush-fsync: Tells which channel to Flush on Frame sync.
It takes following values:
@@ -55,8 +61,8 @@ Required child node properties:
For VDMA: It should be either "xlnx,axi-vdma-mm2s-channel" or
"xlnx,axi-vdma-s2mm-channel".
For CDMA: It should be "xlnx,axi-cdma-channel".
- For AXIDMA: It should be either "xlnx,axi-dma-mm2s-channel" or
- "xlnx,axi-dma-s2mm-channel".
+ For AXIDMA and MCDMA: It should be either "xlnx,axi-dma-mm2s-channel"
+ or "xlnx,axi-dma-s2mm-channel".
- interrupts: Should contain per channel VDMA interrupts.
- xlnx,datawidth: Should contain the stream data width, take values
{32,64...1024}.
@@ -69,8 +75,8 @@ Optional child node properties for VDMA:
enabled/disabled in hardware.
- xlnx,enable-vert-flip: Tells vertical flip is
enabled/disabled in hardware(S2MM path).
-Optional child node properties for AXI DMA:
--dma-channels: Number of dma channels in child node.
+Optional child node properties for MCDMA:
+- dma-channels: Number of dma channels in child node.
Example:
++++++++
diff --git a/Documentation/devicetree/bindings/eeprom/at24.txt b/Documentation/devicetree/bindings/eeprom/at24.txt
index 22aead844d0f..c94acbb8cb0c 100644
--- a/Documentation/devicetree/bindings/eeprom/at24.txt
+++ b/Documentation/devicetree/bindings/eeprom/at24.txt
@@ -1,89 +1 @@
-EEPROMs (I2C)
-
-Required properties:
-
- - compatible: Must be a "<manufacturer>,<model>" pair. The following <model>
- values are supported (assuming "atmel" as manufacturer):
-
- "atmel,24c00",
- "atmel,24c01",
- "atmel,24cs01",
- "atmel,24c02",
- "atmel,24cs02",
- "atmel,24mac402",
- "atmel,24mac602",
- "atmel,spd",
- "atmel,24c04",
- "atmel,24cs04",
- "atmel,24c08",
- "atmel,24cs08",
- "atmel,24c16",
- "atmel,24cs16",
- "atmel,24c32",
- "atmel,24cs32",
- "atmel,24c64",
- "atmel,24cs64",
- "atmel,24c128",
- "atmel,24c256",
- "atmel,24c512",
- "atmel,24c1024",
- "atmel,24c2048",
-
- If <manufacturer> is not "atmel", then a fallback must be used
- with the same <model> and "atmel" as manufacturer.
-
- Example:
- compatible = "microchip,24c128", "atmel,24c128";
-
- Supported manufacturers are:
-
- "catalyst",
- "microchip",
- "nxp",
- "ramtron",
- "renesas",
- "rohm",
- "st",
-
- Some vendors use different model names for chips which are just
- variants of the above. Known such exceptions are listed below:
-
- "nxp,se97b" - the fallback is "atmel,24c02",
- "renesas,r1ex24002" - the fallback is "atmel,24c02"
- "renesas,r1ex24016" - the fallback is "atmel,24c16"
- "renesas,r1ex24128" - the fallback is "atmel,24c128"
- "rohm,br24t01" - the fallback is "atmel,24c01"
-
- - reg: The I2C address of the EEPROM.
-
-Optional properties:
-
- - pagesize: The length of the pagesize for writing. Please consult the
- manual of your device, that value varies a lot. A wrong value
- may result in data loss! If not specified, a safety value of
- '1' is used which will be very slow.
-
- - read-only: This parameterless property disables writes to the eeprom.
-
- - size: Total eeprom size in bytes.
-
- - no-read-rollover: This parameterless property indicates that the
- multi-address eeprom does not automatically roll over
- reads to the next slave address. Please consult the
- manual of your device.
-
- - wp-gpios: GPIO to which the write-protect pin of the chip is connected.
-
- - address-width: number of address bits (one of 8, 16).
-
- - num-addresses: total number of i2c slave addresses this device takes
-
-Example:
-
-eeprom@52 {
- compatible = "atmel,24c32";
- reg = <0x52>;
- pagesize = <32>;
- wp-gpios = <&gpio1 3 0>;
- num-addresses = <8>;
-};
+This file has been moved to at24.yaml.
diff --git a/Documentation/devicetree/bindings/eeprom/at24.yaml b/Documentation/devicetree/bindings/eeprom/at24.yaml
new file mode 100644
index 000000000000..e8778560d966
--- /dev/null
+++ b/Documentation/devicetree/bindings/eeprom/at24.yaml
@@ -0,0 +1,188 @@
+# SPDX-License-Identifier: GPL-2.0-only
+# Copyright 2019 BayLibre SAS
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/eeprom/at24.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: I2C EEPROMs compatible with Atmel's AT24
+
+maintainers:
+ - Bartosz Golaszewski <bgolaszewski@baylibre.com>
+
+select:
+ properties:
+ compatible:
+ contains:
+ pattern: "^atmel,(24(c|cs|mac)[0-9]+|spd)$"
+ required:
+ - compatible
+
+properties:
+ $nodename:
+ pattern: "^eeprom@[0-9a-f]{1,2}$"
+
+ # There are multiple known vendors who manufacture EEPROM chips compatible
+ # with Atmel's AT24. The compatible string requires either a single item
+ # if the memory comes from Atmel (in which case the vendor part must be
+ # 'atmel') or two items with the same 'model' part where the vendor part of
+ # the first one is the actual manufacturer and the second item is the
+ # corresponding 'atmel,<model>' from Atmel.
+ compatible:
+ oneOf:
+ - allOf:
+ - minItems: 1
+ maxItems: 2
+ items:
+ - pattern: "^(atmel|catalyst|microchip|nxp|ramtron|renesas|rohm|st),(24(c|cs|mac)[0-9]+|spd)$"
+ - pattern: "^atmel,(24(c|cs|mac)[0-9]+|spd)$"
+ - oneOf:
+ - items:
+ pattern: c00$
+ - items:
+ pattern: c01$
+ - items:
+ pattern: cs01$
+ - items:
+ pattern: c02$
+ - items:
+ pattern: cs02$
+ - items:
+ pattern: mac402$
+ - items:
+ pattern: mac602$
+ - items:
+ pattern: c04$
+ - items:
+ pattern: cs04$
+ - items:
+ pattern: c08$
+ - items:
+ pattern: cs08$
+ - items:
+ pattern: c16$
+ - items:
+ pattern: cs16$
+ - items:
+ pattern: c32$
+ - items:
+ pattern: cs32$
+ - items:
+ pattern: c64$
+ - items:
+ pattern: cs64$
+ - items:
+ pattern: c128$
+ - items:
+ pattern: cs128$
+ - items:
+ pattern: c256$
+ - items:
+ pattern: cs256$
+ - items:
+ pattern: c512$
+ - items:
+ pattern: cs512$
+ - items:
+ pattern: c1024$
+ - items:
+ pattern: cs1024$
+ - items:
+ pattern: c2048$
+ - items:
+ pattern: cs2048$
+ - items:
+ pattern: spd$
+ # These are special cases that don't conform to the above pattern.
+ # Each requires a standard at24 model as fallback.
+ - items:
+ - const: rohm,br24t01
+ - const: atmel,24c01
+ - items:
+ - const: nxp,se97b
+ - const: atmel,24c02
+ - items:
+ - const: renesas,r1ex24002
+ - const: atmel,24c02
+ - items:
+ - const: renesas,r1ex24016
+ - const: atmel,24c16
+ - items:
+ - const: giantec,gt24c32a
+ - const: atmel,24c32
+ - items:
+ - const: renesas,r1ex24128
+ - const: atmel,24c128
+
+ reg:
+ maxItems: 1
+
+ pagesize:
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ description:
+ The length of the pagesize for writing. Please consult the
+ manual of your device, that value varies a lot. A wrong value
+ may result in data loss! If not specified, a safety value of
+ '1' is used which will be very slow.
+ enum: [ 1, 8, 16, 32, 64, 128, 258 ]
+ default: 1
+
+ read-only:
+ $ref: /schemas/types.yaml#definitions/flag
+ description:
+ Disables writes to the eeprom.
+
+ size:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description:
+ Total eeprom size in bytes.
+
+ no-read-rollover:
+ $ref: /schemas/types.yaml#definitions/flag
+ description:
+ Indicates that the multi-address eeprom does not automatically roll
+ over reads to the next slave address. Please consult the manual of
+ your device.
+
+ wp-gpios:
+ description:
+ GPIO to which the write-protect pin of the chip is connected.
+ maxItems: 1
+
+ address-width:
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ description:
+ Number of address bits.
+ default: 8
+ enum: [ 8, 16 ]
+
+ num-addresses:
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ description:
+ Total number of i2c slave addresses this device takes.
+ default: 1
+ minimum: 1
+ maximum: 8
+
+required:
+ - compatible
+ - reg
+
+examples:
+ - |
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@52 {
+ compatible = "microchip,24c32", "atmel,24c32";
+ reg = <0x52>;
+ pagesize = <32>;
+ wp-gpios = <&gpio1 3 0>;
+ num-addresses = <8>;
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/example-schema.yaml b/Documentation/devicetree/bindings/example-schema.yaml
index c43819c2783a..4ddcf709cc3c 100644
--- a/Documentation/devicetree/bindings/example-schema.yaml
+++ b/Documentation/devicetree/bindings/example-schema.yaml
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
# Copyright 2018 Linaro Ltd.
%YAML 1.2
---
@@ -71,7 +71,7 @@ properties:
# minItems/maxItems equal to 2 is implied
reg-names:
- # The core schema enforces this is a string array
+ # The core schema enforces this (*-names) is a string array
items:
- const: core
- const: aux
@@ -79,7 +79,8 @@ properties:
clocks:
# Cases that have only a single entry just need to express that with maxItems
maxItems: 1
- description: bus clock
+ description: bus clock. A description is only needed for a single item if
+ there's something unique to add.
clock-names:
items:
@@ -127,6 +128,14 @@ properties:
maxItems: 1
description: A connection of the 'foo' gpio line.
+ # *-supply is always a single phandle, so nothing more to define.
+ foo-supply: true
+
+ # Vendor specific properties
+ #
+ # Vendor specific properties have slightly different schema requirements than
+ # common properties. They must have at least a type definition and
+ # 'description'.
vendor,int-property:
description: Vendor specific properties must have a description
# 'allOf' is the json-schema way of subclassing a schema. Here the base
@@ -137,9 +146,9 @@ properties:
- enum: [2, 4, 6, 8, 10]
vendor,bool-property:
- description: Vendor specific properties must have a description
- # boolean properties is one case where the json-schema 'type' keyword
- # can be used directly
+ description: Vendor specific properties must have a description. Boolean
+ properties are one case where the json-schema 'type' keyword can be used
+ directly.
type: boolean
vendor,string-array-property:
@@ -151,14 +160,72 @@ properties:
- enum: [ foo, bar ]
- enum: [ baz, boo ]
+ vendor,property-in-standard-units-microvolt:
+ description: Vendor specific properties having a standard unit suffix
+ don't need a type.
+ enum: [ 100, 200, 300 ]
+
+ child-node:
+ description: Child nodes are just another property from a json-schema
+ perspective.
+ type: object # DT nodes are json objects
+ properties:
+ vendor,a-child-node-property:
+ description: Child node properties have all the same schema
+ requirements.
+ type: boolean
+
+ required:
+ - vendor,a-child-node-property
+
+# Describe the relationship between different properties
+dependencies:
+ # 'vendor,bool-property' is only allowed when 'vendor,string-array-property'
+ # is present
+ vendor,bool-property: [ vendor,string-array-property ]
+ # Expressing 2 properties in both orders means all of the set of properties
+ # must be present or none of them.
+ vendor,string-array-property: [ vendor,bool-property ]
+
required:
- compatible
- reg
- interrupts
- interrupt-controller
+# if/then schema can be used to handle conditions on a property affecting
+# another property. A typical case is a specific 'compatible' value changes the
+# constraints on other properties.
+#
+# For multiple 'if' schema, group them under an 'allOf'.
+#
+# If the conditionals become too unweldy, then it may be better to just split
+# the binding into separate schema documents.
+if:
+ properties:
+ compatible:
+ contains:
+ const: vendor,soc2-ip
+then:
+ required:
+ - foo-supply
+
+# Ideally, the schema should have this line otherwise any other properties
+# present are allowed. There's a few common properties such as 'status' and
+# 'pinctrl-*' which are added automatically by the tooling.
+#
+# This can't be used in cases where another schema is referenced
+# (i.e. allOf: [{$ref: ...}]).
+additionalProperties: false
+
examples:
- # Examples are now compiled with dtc
+ # Examples are now compiled with dtc and validated against the schemas
+ #
+ # Examples have a default #address-cells and #size-cells value of 1. This can
+ # be overridden or an appropriate parent bus node should be shown (such as on
+ # i2c buses).
+ #
+ # Any includes used have to be explicitly included.
- |
node@1000 {
compatible = "vendor,soc4-ip", "vendor,soc1-ip";
diff --git a/Documentation/devicetree/bindings/firmware/intel,ixp4xx-network-processing-engine.yaml b/Documentation/devicetree/bindings/firmware/intel,ixp4xx-network-processing-engine.yaml
index 4f0db8ee226a..878a2079ebb6 100644
--- a/Documentation/devicetree/bindings/firmware/intel,ixp4xx-network-processing-engine.yaml
+++ b/Documentation/devicetree/bindings/firmware/intel,ixp4xx-network-processing-engine.yaml
@@ -25,8 +25,6 @@ properties:
- const: intel,ixp4xx-network-processing-engine
reg:
- minItems: 3
- maxItems: 3
items:
- description: NPE0 register range
- description: NPE1 register range
diff --git a/Documentation/devicetree/bindings/firmware/nvidia,tegra186-bpmp.txt b/Documentation/devicetree/bindings/firmware/nvidia,tegra186-bpmp.txt
index ff380dadb5f9..e44a13bc06ed 100644
--- a/Documentation/devicetree/bindings/firmware/nvidia,tegra186-bpmp.txt
+++ b/Documentation/devicetree/bindings/firmware/nvidia,tegra186-bpmp.txt
@@ -32,7 +32,7 @@ implemented by this node:
- .../clock/clock-bindings.txt
- <dt-bindings/clock/tegra186-clock.h>
-- ../power/power_domain.txt
+- ../power/power-domain.yaml
- <dt-bindings/power/tegra186-powergate.h>
- .../reset/reset.txt
- <dt-bindings/reset/tegra186-reset.h>
diff --git a/Documentation/devicetree/bindings/fsi/fsi-master-aspeed.txt b/Documentation/devicetree/bindings/fsi/fsi-master-aspeed.txt
new file mode 100644
index 000000000000..b758f91914f7
--- /dev/null
+++ b/Documentation/devicetree/bindings/fsi/fsi-master-aspeed.txt
@@ -0,0 +1,24 @@
+Device-tree bindings for AST2600 FSI master
+-------------------------------------------
+
+The AST2600 contains two identical FSI masters. They share a clock and have a
+separate interrupt line and output pins.
+
+Required properties:
+ - compatible: "aspeed,ast2600-fsi-master"
+ - reg: base address and length
+ - clocks: phandle and clock number
+ - interrupts: platform dependent interrupt description
+ - pinctrl-0: phandle to pinctrl node
+ - pinctrl-names: pinctrl state
+
+Examples:
+
+ fsi-master {
+ compatible = "aspeed,ast2600-fsi-master", "fsi-master";
+ reg = <0x1e79b000 0x94>;
+ interrupts = <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_fsi1_default>;
+ clocks = <&syscon ASPEED_CLK_GATE_FSICLK>;
+ };
diff --git a/Documentation/devicetree/bindings/gpio/brcm,xgs-iproc-gpio.yaml b/Documentation/devicetree/bindings/gpio/brcm,xgs-iproc-gpio.yaml
new file mode 100644
index 000000000000..64e279a4bc10
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpio/brcm,xgs-iproc-gpio.yaml
@@ -0,0 +1,70 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/gpio/brcm,xgs-iproc-gpio.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Broadcom XGS iProc GPIO controller
+
+maintainers:
+ - Chris Packham <chris.packham@alliedtelesis.co.nz>
+
+description: |
+ This controller is the Chip Common A GPIO present on a number of Broadcom
+ switch ASICs with integrated SoCs.
+
+properties:
+ compatible:
+ const: brcm,iproc-gpio-cca
+
+ reg:
+ items:
+ - description: the I/O address containing the GPIO controller
+ registers.
+ - description: the I/O address containing the Chip Common A interrupt
+ registers.
+
+ gpio-controller: true
+
+ '#gpio-cells':
+ const: 2
+
+ ngpios:
+ minimum: 0
+ maximum: 32
+
+ interrupt-controller: true
+
+ '#interrupt-cells':
+ const: 2
+
+ interrupts:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - "#gpio-cells"
+ - gpio-controller
+
+dependencies:
+ interrupt-controller: [ interrupts ]
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ gpio@18000060 {
+ compatible = "brcm,iproc-gpio-cca";
+ #gpio-cells = <2>;
+ reg = <0x18000060 0x50>,
+ <0x18000000 0x50>;
+ ngpios = <12>;
+ gpio-controller;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ interrupts = <GIC_SPI 91 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+
+...
diff --git a/Documentation/devicetree/bindings/gpio/gpio-rda.yaml b/Documentation/devicetree/bindings/gpio/gpio-rda.yaml
new file mode 100644
index 000000000000..6ece555f074f
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpio/gpio-rda.yaml
@@ -0,0 +1,50 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/gpio/gpio-rda.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: RDA Micro GPIO controller
+
+maintainers:
+ - Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+
+properties:
+ compatible:
+ const: rda,8810pl-gpio
+
+ reg:
+ maxItems: 1
+
+ gpio-controller: true
+
+ "#gpio-cells":
+ const: 2
+
+ ngpios:
+ description:
+ Number of available gpios in a bank.
+ minimum: 1
+ maximum: 32
+
+ interrupt-controller: true
+
+ "#interrupt-cells":
+ const: 2
+
+ interrupts:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - gpio-controller
+ - "#gpio-cells"
+ - ngpios
+ - interrupt-controller
+ - "#interrupt-cells"
+ - interrupts
+
+additionalProperties: false
+
+...
diff --git a/Documentation/devicetree/bindings/gpio/renesas,gpio-rcar.txt b/Documentation/devicetree/bindings/gpio/renesas,gpio-rcar.txt
index f3f2c468c1b6..41e5fed0f842 100644
--- a/Documentation/devicetree/bindings/gpio/renesas,gpio-rcar.txt
+++ b/Documentation/devicetree/bindings/gpio/renesas,gpio-rcar.txt
@@ -8,6 +8,7 @@ Required Properties:
- "renesas,gpio-r8a7745": for R8A7745 (RZ/G1E) compatible GPIO controller.
- "renesas,gpio-r8a77470": for R8A77470 (RZ/G1C) compatible GPIO controller.
- "renesas,gpio-r8a774a1": for R8A774A1 (RZ/G2M) compatible GPIO controller.
+ - "renesas,gpio-r8a774b1": for R8A774B1 (RZ/G2N) compatible GPIO controller.
- "renesas,gpio-r8a774c0": for R8A774C0 (RZ/G2E) compatible GPIO controller.
- "renesas,gpio-r8a7778": for R8A7778 (R-Car M1) compatible GPIO controller.
- "renesas,gpio-r8a7779": for R8A7779 (R-Car H1) compatible GPIO controller.
diff --git a/Documentation/devicetree/bindings/gpu/arm,mali-bifrost.yaml b/Documentation/devicetree/bindings/gpu/arm,mali-bifrost.yaml
index 5f1fd6d7ee0f..0c426e371e71 100644
--- a/Documentation/devicetree/bindings/gpu/arm,mali-bifrost.yaml
+++ b/Documentation/devicetree/bindings/gpu/arm,mali-bifrost.yaml
@@ -17,6 +17,7 @@ properties:
items:
- enum:
- amlogic,meson-g12a-mali
+ - realtek,rtd1619-mali
- const: arm,mali-bifrost # Mali Bifrost GPU model/revision is fully discoverable
reg:
@@ -37,8 +38,7 @@ properties:
clocks:
maxItems: 1
- mali-supply:
- maxItems: 1
+ mali-supply: true
operating-points-v2: true
diff --git a/Documentation/devicetree/bindings/gpu/arm,mali-midgard.yaml b/Documentation/devicetree/bindings/gpu/arm,mali-midgard.yaml
index 47bc1ac36426..c9bdf1074305 100644
--- a/Documentation/devicetree/bindings/gpu/arm,mali-midgard.yaml
+++ b/Documentation/devicetree/bindings/gpu/arm,mali-midgard.yaml
@@ -16,31 +16,32 @@ properties:
oneOf:
- items:
- enum:
+ - samsung,exynos5250-mali
+ - const: arm,mali-t604
+ - items:
+ - enum:
+ - samsung,exynos5420-mali
+ - const: arm,mali-t628
+ - items:
+ - enum:
- allwinner,sun50i-h6-mali
- const: arm,mali-t720
- items:
- enum:
- amlogic,meson-gxm-mali
+ - realtek,rtd1295-mali
- const: arm,mali-t820
- items:
- enum:
- rockchip,rk3288-mali
+ - samsung,exynos5433-mali
- const: arm,mali-t760
- items:
- enum:
- rockchip,rk3399-mali
- const: arm,mali-t860
- - items:
- - enum:
- - samsung,exynos5250-mali
- - const: arm,mali-t604
- - items:
- - enum:
- - samsung,exynos5433-mali
- - const: arm,mali-t760
# "arm,mali-t624"
- # "arm,mali-t628"
# "arm,mali-t830"
# "arm,mali-t880"
@@ -69,8 +70,7 @@ properties:
- const: core
- const: bus
- mali-supply:
- maxItems: 1
+ mali-supply: true
resets:
minItems: 1
diff --git a/Documentation/devicetree/bindings/gpu/arm,mali-utgard.yaml b/Documentation/devicetree/bindings/gpu/arm,mali-utgard.yaml
index c5d93c5839d3..afde81be3c29 100644
--- a/Documentation/devicetree/bindings/gpu/arm,mali-utgard.yaml
+++ b/Documentation/devicetree/bindings/gpu/arm,mali-utgard.yaml
@@ -97,8 +97,7 @@ properties:
memory-region: true
- mali-supply:
- maxItems: 1
+ mali-supply: true
power-domains:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/gpu/samsung-g2d.txt b/Documentation/devicetree/bindings/gpu/samsung-g2d.txt
deleted file mode 100644
index 1e7959332dbc..000000000000
--- a/Documentation/devicetree/bindings/gpu/samsung-g2d.txt
+++ /dev/null
@@ -1,27 +0,0 @@
-* Samsung 2D Graphics Accelerator
-
-Required properties:
- - compatible : value should be one among the following:
- (a) "samsung,s5pv210-g2d" for G2D IP present in S5PV210 & Exynos4210 SoC
- (b) "samsung,exynos4212-g2d" for G2D IP present in Exynos4x12 SoCs
- (c) "samsung,exynos5250-g2d" for G2D IP present in Exynos5250 SoC
-
- - reg : Physical base address of the IP registers and length of memory
- mapped region.
-
- - interrupts : G2D interrupt number to the CPU.
- - clocks : from common clock binding: handle to G2D clocks.
- - clock-names : names of clocks listed in clocks property, in the same
- order, depending on SoC type:
- - for S5PV210 and Exynos4 based SoCs: "fimg2d" and
- "sclk_fimg2d"
- - for Exynos5250 SoC: "fimg2d".
-
-Example:
- g2d@12800000 {
- compatible = "samsung,s5pv210-g2d";
- reg = <0x12800000 0x1000>;
- interrupts = <0 89 0>;
- clocks = <&clock 177>, <&clock 277>;
- clock-names = "sclk_fimg2d", "fimg2d";
- };
diff --git a/Documentation/devicetree/bindings/gpu/samsung-g2d.yaml b/Documentation/devicetree/bindings/gpu/samsung-g2d.yaml
new file mode 100644
index 000000000000..e7daae862578
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpu/samsung-g2d.yaml
@@ -0,0 +1,75 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/gpu/samsung-g2d.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Samsung SoC 2D Graphics Accelerator
+
+maintainers:
+ - Inki Dae <inki.dae@samsung.com>
+
+properties:
+ compatible:
+ enum:
+ - samsung,s5pv210-g2d # in S5PV210 & Exynos4210 SoC
+ - samsung,exynos4212-g2d # in Exynos4x12 SoCs
+ - samsung,exynos5250-g2d
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks: {}
+ clock-names: {}
+ iommus: {}
+ power-domains: {}
+
+if:
+ properties:
+ compatible:
+ contains:
+ const: samsung,exynos5250-g2d
+
+then:
+ properties:
+ clocks:
+ items:
+ - description: fimg2d clock
+ clock-names:
+ items:
+ - const: fimg2d
+
+else:
+ properties:
+ clocks:
+ items:
+ - description: sclk_fimg2d clock
+ - description: fimg2d clock
+ clock-names:
+ items:
+ - const: sclk_fimg2d
+ - const: fimg2d
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - clock-names
+
+additionalProperties: false
+
+examples:
+ - |
+ g2d@12800000 {
+ compatible = "samsung,s5pv210-g2d";
+ reg = <0x12800000 0x1000>;
+ interrupts = <0 89 0>;
+ clocks = <&clock 177>, <&clock 277>;
+ clock-names = "sclk_fimg2d", "fimg2d";
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/gpu/samsung-rotator.txt b/Documentation/devicetree/bindings/gpu/samsung-rotator.txt
deleted file mode 100644
index 3aca2578da0b..000000000000
--- a/Documentation/devicetree/bindings/gpu/samsung-rotator.txt
+++ /dev/null
@@ -1,28 +0,0 @@
-* Samsung Image Rotator
-
-Required properties:
- - compatible : value should be one of the following:
- * "samsung,s5pv210-rotator" for Rotator IP in S5PV210
- * "samsung,exynos4210-rotator" for Rotator IP in Exynos4210
- * "samsung,exynos4212-rotator" for Rotator IP in Exynos4212/4412
- * "samsung,exynos5250-rotator" for Rotator IP in Exynos5250
-
- - reg : Physical base address of the IP registers and length of memory
- mapped region.
-
- - interrupts : Interrupt specifier for rotator interrupt, according to format
- specific to interrupt parent.
-
- - clocks : Clock specifier for rotator clock, according to generic clock
- bindings. (See Documentation/devicetree/bindings/clock/exynos*.txt)
-
- - clock-names : Names of clocks. For exynos rotator, it should be "rotator".
-
-Example:
- rotator@12810000 {
- compatible = "samsung,exynos4210-rotator";
- reg = <0x12810000 0x1000>;
- interrupts = <0 83 0>;
- clocks = <&clock 278>;
- clock-names = "rotator";
- };
diff --git a/Documentation/devicetree/bindings/gpu/samsung-rotator.yaml b/Documentation/devicetree/bindings/gpu/samsung-rotator.yaml
new file mode 100644
index 000000000000..f4dfa6fc724c
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpu/samsung-rotator.yaml
@@ -0,0 +1,48 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/gpu/samsung-rotator.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Samsung SoC Image Rotator
+
+maintainers:
+ - Inki Dae <inki.dae@samsung.com>
+
+properties:
+ compatible:
+ enum:
+ - "samsung,s5pv210-rotator"
+ - "samsung,exynos4210-rotator"
+ - "samsung,exynos4212-rotator"
+ - "samsung,exynos5250-rotator"
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ items:
+ - const: rotator
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - clock-names
+
+examples:
+ - |
+ rotator@12810000 {
+ compatible = "samsung,exynos4210-rotator";
+ reg = <0x12810000 0x1000>;
+ interrupts = <0 83 0>;
+ clocks = <&clock 278>;
+ clock-names = "rotator";
+ };
+
diff --git a/Documentation/devicetree/bindings/gpu/samsung-scaler.txt b/Documentation/devicetree/bindings/gpu/samsung-scaler.txt
deleted file mode 100644
index 9c3d98105dfd..000000000000
--- a/Documentation/devicetree/bindings/gpu/samsung-scaler.txt
+++ /dev/null
@@ -1,27 +0,0 @@
-* Samsung Exynos Image Scaler
-
-Required properties:
- - compatible : value should be one of the following:
- (a) "samsung,exynos5420-scaler" for Scaler IP in Exynos5420
- (b) "samsung,exynos5433-scaler" for Scaler IP in Exynos5433
-
- - reg : Physical base address of the IP registers and length of memory
- mapped region.
-
- - interrupts : Interrupt specifier for scaler interrupt, according to format
- specific to interrupt parent.
-
- - clocks : Clock specifier for scaler clock, according to generic clock
- bindings. (See Documentation/devicetree/bindings/clock/exynos*.txt)
-
- - clock-names : Names of clocks. For exynos scaler, it should be "mscl"
- on 5420 and "pclk", "aclk" and "aclk_xiu" on 5433.
-
-Example:
- scaler@12800000 {
- compatible = "samsung,exynos5420-scaler";
- reg = <0x12800000 0x1294>;
- interrupts = <0 220 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clock CLK_MSCL0>;
- clock-names = "mscl";
- };
diff --git a/Documentation/devicetree/bindings/gpu/samsung-scaler.yaml b/Documentation/devicetree/bindings/gpu/samsung-scaler.yaml
new file mode 100644
index 000000000000..5317ac64426a
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpu/samsung-scaler.yaml
@@ -0,0 +1,81 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/gpu/samsung-scaler.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Samsung Exynos SoC Image Scaler
+
+maintainers:
+ - Inki Dae <inki.dae@samsung.com>
+
+properties:
+ compatible:
+ enum:
+ - samsung,exynos5420-scaler
+ - samsung,exynos5433-scaler
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks: {}
+ clock-names: {}
+ iommus: {}
+ power-domains: {}
+
+if:
+ properties:
+ compatible:
+ contains:
+ const: samsung,exynos5420-scaler
+
+then:
+ properties:
+ clocks:
+ items:
+ - description: mscl clock
+
+ clock-names:
+ items:
+ - const: mscl
+
+else:
+ properties:
+ clocks:
+ items:
+ - description: pclk clock
+ - description: aclk clock
+ - description: aclk_xiu clock
+
+ clock-names:
+ items:
+ - const: pclk
+ - const: aclk
+ - const: aclk_xiu
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - clock-names
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/exynos5420.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ scaler@12800000 {
+ compatible = "samsung,exynos5420-scaler";
+ reg = <0x12800000 0x1294>;
+ interrupts = <GIC_SPI 220 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clock CLK_MSCL0>;
+ clock-names = "mscl";
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/hwlock/st,stm32-hwspinlock.txt b/Documentation/devicetree/bindings/hwlock/st,stm32-hwspinlock.txt
deleted file mode 100644
index adf4f000ea3d..000000000000
--- a/Documentation/devicetree/bindings/hwlock/st,stm32-hwspinlock.txt
+++ /dev/null
@@ -1,23 +0,0 @@
-STM32 Hardware Spinlock Device Binding
--------------------------------------
-
-Required properties :
-- compatible : should be "st,stm32-hwspinlock".
-- reg : the register address of hwspinlock.
-- #hwlock-cells : hwlock users only use the hwlock id to represent a specific
- hwlock, so the number of cells should be <1> here.
-- clock-names : Must contain "hsem".
-- clocks : Must contain a phandle entry for the clock in clock-names, see the
- common clock bindings.
-
-Please look at the generic hwlock binding for usage information for consumers,
-"Documentation/devicetree/bindings/hwlock/hwlock.txt"
-
-Example of hwlock provider:
- hwspinlock@4c000000 {
- compatible = "st,stm32-hwspinlock";
- #hwlock-cells = <1>;
- reg = <0x4c000000 0x400>;
- clocks = <&rcc HSEM>;
- clock-names = "hsem";
- };
diff --git a/Documentation/devicetree/bindings/hwlock/st,stm32-hwspinlock.yaml b/Documentation/devicetree/bindings/hwlock/st,stm32-hwspinlock.yaml
new file mode 100644
index 000000000000..47cf9c8d97e9
--- /dev/null
+++ b/Documentation/devicetree/bindings/hwlock/st,stm32-hwspinlock.yaml
@@ -0,0 +1,50 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/hwlock/st,stm32-hwspinlock.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: STMicroelectronics STM32 Hardware Spinlock bindings
+
+maintainers:
+ - Benjamin Gaignard <benjamin.gaignard@st.com>
+ - Fabien Dessenne <fabien.dessenne@st.com>
+
+properties:
+ "#hwlock-cells":
+ const: 1
+
+ compatible:
+ const: st,stm32-hwspinlock
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ items:
+ - const: hsem
+
+required:
+ - "#hwlock-cells"
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/stm32mp1-clks.h>
+ hwspinlock@4c000000 {
+ compatible = "st,stm32-hwspinlock";
+ #hwlock-cells = <1>;
+ reg = <0x4c000000 0x400>;
+ clocks = <&rcc HSEM>;
+ clock-names = "hsem";
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/hwmon/adi,ltc2947.yaml b/Documentation/devicetree/bindings/hwmon/adi,ltc2947.yaml
new file mode 100644
index 000000000000..ae04903f34bf
--- /dev/null
+++ b/Documentation/devicetree/bindings/hwmon/adi,ltc2947.yaml
@@ -0,0 +1,104 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/bindings/hwmon/adi,ltc2947.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Analog Devices LTC2947 high precision power and energy monitor
+
+maintainers:
+ - Nuno Sá <nuno.sa@analog.com>
+
+description: |
+ Analog Devices LTC2947 high precision power and energy monitor over SPI or I2C.
+
+ https://www.analog.com/media/en/technical-documentation/data-sheets/LTC2947.pdf
+
+properties:
+ compatible:
+ enum:
+ - adi,ltc2947
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ description:
+ The LTC2947 uses either a trimmed internal oscillator or an external clock
+ as the time base for determining the integration period to represent time,
+ charge and energy. When an external clock is used, this property must be
+ set accordingly.
+ maxItems: 1
+
+ adi,accumulator-ctl-pol:
+ description:
+ This property controls the polarity of current that is accumulated to
+ calculate charge and energy so that, they can be only accumulated for
+ positive current for example. Since there are two sets of registers for
+ the accumulated values, this entry can also have two items which sets
+ energy1/charge1 and energy2/charger2 respectively. Check table 12 of the
+ datasheet for more information on the supported options.
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32-array
+ - minItems: 2
+ maxItems: 2
+ items:
+ enum: [0, 1, 2, 3]
+ default: 0
+
+ adi,accumulation-deadband-microamp:
+ description:
+ This property controls the Accumulation Dead band which allows to set the
+ level of current below which no accumulation takes place.
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ maximum: 255
+ default: 0
+
+ adi,gpio-out-pol:
+ description:
+ This property controls the GPIO polarity. Setting it to one makes the GPIO
+ active high, setting it to zero makets it active low. When this property
+ is present, the GPIO is automatically configured as output and set to
+ control a fan as a function of measured temperature.
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [0, 1]
+ default: 0
+
+ adi,gpio-in-accum:
+ description:
+ When set, this property sets the GPIO as input. It is then used to control
+ the accumulation of charge, energy and time. This function can be
+ enabled/configured separately for each of the two sets of accumulation
+ registers. Check table 13 of the datasheet for more information on the
+ supported options. This property cannot be used together with
+ adi,gpio-out-pol.
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32-array
+ - minItems: 2
+ maxItems: 2
+ items:
+ enum: [0, 1, 2]
+ default: 0
+
+required:
+ - compatible
+ - reg
+
+
+examples:
+ - |
+ spi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ltc2947_spi: ltc2947@0 {
+ compatible = "adi,ltc2947";
+ reg = <0>;
+ /* accumulation takes place always for energ1/charge1. */
+ /* accumulation only on positive current for energy2/charge2. */
+ adi,accumulator-ctl-pol = <0 1>;
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/hwmon/ibm,cffps1.txt b/Documentation/devicetree/bindings/hwmon/ibm,cffps1.txt
index 1036f65fb778..d9a2719f9243 100644
--- a/Documentation/devicetree/bindings/hwmon/ibm,cffps1.txt
+++ b/Documentation/devicetree/bindings/hwmon/ibm,cffps1.txt
@@ -5,6 +5,9 @@ Required properties:
- compatible : Must be one of the following:
"ibm,cffps1"
"ibm,cffps2"
+ or "ibm,cffps" if the system
+ must support any version of the
+ power supply
- reg = < I2C bus address >; : Address of the power supply on the
I2C bus.
diff --git a/Documentation/devicetree/bindings/hwmon/ti,tmp513.yaml b/Documentation/devicetree/bindings/hwmon/ti,tmp513.yaml
new file mode 100644
index 000000000000..168235ad5d81
--- /dev/null
+++ b/Documentation/devicetree/bindings/hwmon/ti,tmp513.yaml
@@ -0,0 +1,93 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+
+$id: http://devicetree.org/schemas/hwmon/ti,tmp513.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: TMP513/512 system monitor sensor
+
+maintainers:
+ - Eric Tremblay <etremblay@distech-controls.com>
+
+description: |
+ The TMP512 (dual-channel) and TMP513 (triple-channel) are system monitors
+ that include remote sensors, a local temperature sensor, and a high-side
+ current shunt monitor. These system monitors have the capability of measuring
+ remote temperatures, on-chip temperatures, and system voltage/power/current
+ consumption.
+
+ Datasheets:
+ http://www.ti.com/lit/gpn/tmp513
+ http://www.ti.com/lit/gpn/tmp512
+
+
+properties:
+ compatible:
+ enum:
+ - ti,tmp512
+ - ti,tmp513
+
+ reg:
+ maxItems: 1
+
+ shunt-resistor-micro-ohms:
+ description: |
+ If 0, the calibration process will be skiped and the current and power
+ measurement engine will not work. Temperature and voltage measurement
+ will continue to work. The shunt value also need to respect:
+ rshunt <= pga-gain * 40 * 1000 * 1000.
+ If not, it's not possible to compute a valid calibration value.
+ default: 1000
+
+ ti,pga-gain:
+ description: |
+ The gain value for the PGA function. This is 8, 4, 2 or 1.
+ The PGA gain affect the shunt voltage range.
+ The range will be equal to: pga-gain * 40mV
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [1, 2, 4, 8]
+ default: 8
+
+ ti,bus-range-microvolt:
+ description: |
+ This is the operating range of the bus voltage in microvolt
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [16000000, 32000000]
+ default: 32000000
+
+ ti,nfactor:
+ description: |
+ Array of three(TMP513) or two(TMP512) n-Factor value for each remote
+ temperature channel.
+ See datasheet Table 11 for n-Factor range list and value interpretation.
+ allOf:
+ - $ref: /schemas/types.yaml#definitions/uint32-array
+ - minItems: 2
+ maxItems: 3
+ items:
+ default: 0x00
+ minimum: 0x00
+ maximum: 0xFF
+
+required:
+ - compatible
+ - reg
+
+examples:
+ - |
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ tmp513@5c {
+ compatible = "ti,tmp513";
+ reg = <0x5C>;
+ shunt-resistor-micro-ohms = <330000>;
+ ti,bus-range-microvolt = <32000000>;
+ ti,pga-gain = <8>;
+ ti,nfactor = <0x1 0xF3 0x00>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/i2c/allwinner,sun6i-a31-p2wi.yaml b/Documentation/devicetree/bindings/i2c/allwinner,sun6i-a31-p2wi.yaml
index f9d526b7da01..9346ef6ba61b 100644
--- a/Documentation/devicetree/bindings/i2c/allwinner,sun6i-a31-p2wi.yaml
+++ b/Documentation/devicetree/bindings/i2c/allwinner,sun6i-a31-p2wi.yaml
@@ -40,9 +40,7 @@ required:
- clocks
- resets
-# FIXME: We should set it, but it would report all the generic
-# properties as additional properties.
-# additionalProperties: false
+unevaluatedProperties: false
examples:
- |
diff --git a/Documentation/devicetree/bindings/i2c/amlogic,meson6-i2c.yaml b/Documentation/devicetree/bindings/i2c/amlogic,meson6-i2c.yaml
new file mode 100644
index 000000000000..49cad273c8e5
--- /dev/null
+++ b/Documentation/devicetree/bindings/i2c/amlogic,meson6-i2c.yaml
@@ -0,0 +1,53 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+# Copyright 2019 BayLibre, SAS
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/i2c/amlogic,meson6-i2c.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Amlogic Meson I2C Controller
+
+maintainers:
+ - Neil Armstrong <narmstrong@baylibre.com>
+ - Beniamino Galvani <b.galvani@gmail.com>
+
+allOf:
+ - $ref: /schemas/i2c/i2c-controller.yaml#
+
+properties:
+ compatible:
+ enum:
+ - amlogic,meson6-i2c # Meson6, Meson8 and compatible SoCs
+ - amlogic,meson-gxbb-i2c # GXBB and compatible SoCs
+ - amlogic,meson-axg-i2c # AXG and compatible SoCs
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ minItems: 1
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+
+examples:
+ - |
+ i2c@c8100500 {
+ compatible = "amlogic,meson6-i2c";
+ reg = <0xc8100500 0x20>;
+ interrupts = <92>;
+ clocks = <&clk81>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@52 {
+ compatible = "atmel,24c32";
+ reg = <0x52>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/i2c/i2c-aspeed.txt b/Documentation/devicetree/bindings/i2c/i2c-aspeed.txt
index 8fbd8633a387..b47f6ccb196a 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-aspeed.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-aspeed.txt
@@ -1,4 +1,4 @@
-Device tree configuration for the I2C busses on the AST24XX and AST25XX SoCs.
+Device tree configuration for the I2C busses on the AST24XX, AST25XX, and AST26XX SoCs.
Required Properties:
- #address-cells : should be 1
@@ -6,6 +6,7 @@ Required Properties:
- reg : address offset and range of bus
- compatible : should be "aspeed,ast2400-i2c-bus"
or "aspeed,ast2500-i2c-bus"
+ or "aspeed,ast2600-i2c-bus"
- clocks : root clock of bus, should reference the APB
clock in the second cell
- resets : phandle to reset controller with the reset number in
diff --git a/Documentation/devicetree/bindings/i2c/i2c-at91.txt b/Documentation/devicetree/bindings/i2c/i2c-at91.txt
index b7cec17c3daf..2210f4359c45 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-at91.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-at91.txt
@@ -3,7 +3,8 @@ I2C for Atmel platforms
Required properties :
- compatible : Must be "atmel,at91rm9200-i2c", "atmel,at91sam9261-i2c",
"atmel,at91sam9260-i2c", "atmel,at91sam9g20-i2c", "atmel,at91sam9g10-i2c",
- "atmel,at91sam9x5-i2c", "atmel,sama5d4-i2c" or "atmel,sama5d2-i2c"
+ "atmel,at91sam9x5-i2c", "atmel,sama5d4-i2c", "atmel,sama5d2-i2c" or
+ "microchip,sam9x60-i2c"
- reg: physical base address of the controller and length of memory mapped
region.
- interrupts: interrupt number to the cpu.
diff --git a/Documentation/devicetree/bindings/i2c/i2c-meson.txt b/Documentation/devicetree/bindings/i2c/i2c-meson.txt
deleted file mode 100644
index 13d410de077c..000000000000
--- a/Documentation/devicetree/bindings/i2c/i2c-meson.txt
+++ /dev/null
@@ -1,30 +0,0 @@
-Amlogic Meson I2C controller
-
-Required properties:
- - compatible: must be:
- "amlogic,meson6-i2c" for Meson8 and compatible SoCs
- "amlogic,meson-gxbb-i2c" for GXBB and compatible SoCs
- "amlogic,meson-axg-i2c"for AXG and compatible SoCs
-
- - reg: physical address and length of the device registers
- - interrupts: a single interrupt specifier
- - clocks: clock for the device
- - #address-cells: should be <1>
- - #size-cells: should be <0>
-
-For details regarding the following core I2C bindings see also i2c.txt.
-
-Optional properties:
-- clock-frequency: the desired I2C bus clock frequency in Hz; in
- absence of this property the default value is used (100 kHz).
-
-Examples:
-
- i2c@c8100500 {
- compatible = "amlogic,meson6-i2c";
- reg = <0xc8100500 0x20>;
- interrupts = <0 92 1>;
- clocks = <&clk81>;
- #address-cells = <1>;
- #size-cells = <0>;
- };
diff --git a/Documentation/devicetree/bindings/i2c/i2c-stm32.txt b/Documentation/devicetree/bindings/i2c/i2c-stm32.txt
deleted file mode 100644
index ce3df2fff6c8..000000000000
--- a/Documentation/devicetree/bindings/i2c/i2c-stm32.txt
+++ /dev/null
@@ -1,65 +0,0 @@
-* I2C controller embedded in STMicroelectronics STM32 I2C platform
-
-Required properties:
-- compatible: Must be one of the following
- - "st,stm32f4-i2c"
- - "st,stm32f7-i2c"
-- reg: Offset and length of the register set for the device
-- interrupts: Must contain the interrupt id for I2C event and then the
- interrupt id for I2C error.
-- resets: Must contain the phandle to the reset controller.
-- clocks: Must contain the input clock of the I2C instance.
-- A pinctrl state named "default" must be defined to set pins in mode of
- operation for I2C transfer
-- #address-cells = <1>;
-- #size-cells = <0>;
-
-Optional properties:
-- clock-frequency: Desired I2C bus clock frequency in Hz. If not specified,
- the default 100 kHz frequency will be used.
- For STM32F4 SoC Standard-mode and Fast-mode are supported, possible values are
- 100000 and 400000.
- For STM32F7, STM32H7 and STM32MP1 SoCs, Standard-mode, Fast-mode and Fast-mode
- Plus are supported, possible values are 100000, 400000 and 1000000.
-- dmas: List of phandles to rx and tx DMA channels. Refer to stm32-dma.txt.
-- dma-names: List of dma names. Valid names are: "rx" and "tx".
-- i2c-scl-rising-time-ns: I2C SCL Rising time for the board (default: 25)
- For STM32F7, STM32H7 and STM32MP1 only.
-- i2c-scl-falling-time-ns: I2C SCL Falling time for the board (default: 10)
- For STM32F7, STM32H7 and STM32MP1 only.
- I2C Timings are derived from these 2 values
-- st,syscfg-fmp: Use to set Fast Mode Plus bit within SYSCFG when Fast Mode
- Plus speed is selected by slave.
- 1st cell: phandle to syscfg
- 2nd cell: register offset within SYSCFG
- 3rd cell: register bitmask for FMP bit
- For STM32F7, STM32H7 and STM32MP1 only.
-
-Example:
-
- i2c@40005400 {
- compatible = "st,stm32f4-i2c";
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0x40005400 0x400>;
- interrupts = <31>,
- <32>;
- resets = <&rcc 277>;
- clocks = <&rcc 0 149>;
- pinctrl-0 = <&i2c1_sda_pin>, <&i2c1_scl_pin>;
- pinctrl-names = "default";
- };
-
- i2c@40005400 {
- compatible = "st,stm32f7-i2c";
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0x40005400 0x400>;
- interrupts = <31>,
- <32>;
- resets = <&rcc STM32F7_APB1_RESET(I2C1)>;
- clocks = <&rcc 1 CLK_I2C1>;
- pinctrl-0 = <&i2c1_sda_pin>, <&i2c1_scl_pin>;
- pinctrl-names = "default";
- st,syscfg-fmp = <&syscfg 0x4 0x1>;
- };
diff --git a/Documentation/devicetree/bindings/i2c/i2c.txt b/Documentation/devicetree/bindings/i2c/i2c.txt
index 44efafdfd7f5..9a53df4243c6 100644
--- a/Documentation/devicetree/bindings/i2c/i2c.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c.txt
@@ -55,6 +55,24 @@ wants to support one of the below features, it should adapt the bindings below.
Number of nanoseconds the SDA signal takes to fall; t(f) in the I2C
specification.
+- i2c-analog-filter
+ Enable analog filter for i2c lines.
+
+- i2c-digital-filter
+ Enable digital filter for i2c lines.
+
+- i2c-digital-filter-width-ns
+ Width of spikes which can be filtered by digital filter
+ (i2c-digital-filter). This width is specified in nanoseconds.
+
+- i2c-analog-filter-cutoff-frequency
+ Frequency that the analog filter (i2c-analog-filter) uses to distinguish
+ which signal to filter. Signal with higher frequency than specified will
+ be filtered out. Only lower frequency will pass (this is applicable to
+ a low-pass analog filter). Typical value should be above the normal
+ i2c bus clock frequency (clock-frequency).
+ Specified in Hz.
+
- interrupts
interrupts used by the device.
diff --git a/Documentation/devicetree/bindings/i2c/marvell,mv64xxx-i2c.yaml b/Documentation/devicetree/bindings/i2c/marvell,mv64xxx-i2c.yaml
index c779000515d6..2ceb05ba2df5 100644
--- a/Documentation/devicetree/bindings/i2c/marvell,mv64xxx-i2c.yaml
+++ b/Documentation/devicetree/bindings/i2c/marvell,mv64xxx-i2c.yaml
@@ -93,9 +93,7 @@ allOf:
required:
- resets
-# FIXME: We should set it, but it would report all the generic
-# properties as additional properties.
-# additionalProperties: false
+unevaluatedProperties: false
examples:
- |
diff --git a/Documentation/devicetree/bindings/i2c/renesas,i2c.txt b/Documentation/devicetree/bindings/i2c/renesas,i2c.txt
index 3ee5e8f6ee01..0660a3eb2547 100644
--- a/Documentation/devicetree/bindings/i2c/renesas,i2c.txt
+++ b/Documentation/devicetree/bindings/i2c/renesas,i2c.txt
@@ -7,6 +7,7 @@ Required properties:
"renesas,i2c-r8a7745" if the device is a part of a R8A7745 SoC.
"renesas,i2c-r8a77470" if the device is a part of a R8A77470 SoC.
"renesas,i2c-r8a774a1" if the device is a part of a R8A774A1 SoC.
+ "renesas,i2c-r8a774b1" if the device is a part of a R8A774B1 SoC.
"renesas,i2c-r8a774c0" if the device is a part of a R8A774C0 SoC.
"renesas,i2c-r8a7778" if the device is a part of a R8A7778 SoC.
"renesas,i2c-r8a7779" if the device is a part of a R8A7779 SoC.
diff --git a/Documentation/devicetree/bindings/i2c/renesas,iic.txt b/Documentation/devicetree/bindings/i2c/renesas,iic.txt
index 202602e6e837..64d11ffb07c4 100644
--- a/Documentation/devicetree/bindings/i2c/renesas,iic.txt
+++ b/Documentation/devicetree/bindings/i2c/renesas,iic.txt
@@ -8,6 +8,7 @@ Required properties:
- "renesas,iic-r8a7744" (RZ/G1N)
- "renesas,iic-r8a7745" (RZ/G1E)
- "renesas,iic-r8a774a1" (RZ/G2M)
+ - "renesas,iic-r8a774b1" (RZ/G2N)
- "renesas,iic-r8a774c0" (RZ/G2E)
- "renesas,iic-r8a7790" (R-Car H2)
- "renesas,iic-r8a7791" (R-Car M2-W)
diff --git a/Documentation/devicetree/bindings/i2c/st,stm32-i2c.yaml b/Documentation/devicetree/bindings/i2c/st,stm32-i2c.yaml
new file mode 100644
index 000000000000..900ec1ab6a47
--- /dev/null
+++ b/Documentation/devicetree/bindings/i2c/st,stm32-i2c.yaml
@@ -0,0 +1,141 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/i2c/st,stm32-i2c.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: I2C controller embedded in STMicroelectronics STM32 I2C platform
+
+maintainers:
+ - Pierre-Yves MORDRET <pierre-yves.mordret@st.com>
+
+allOf:
+ - $ref: /schemas/i2c/i2c-controller.yaml#
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - st,stm32f7-i2c
+ then:
+ properties:
+ i2c-scl-rising-time-ns:
+ default: 25
+
+ i2c-scl-falling-time-ns:
+ default: 10
+
+ st,syscfg-fmp:
+ description: Use to set Fast Mode Plus bit within SYSCFG when
+ Fast Mode Plus speed is selected by slave.
+ Format is phandle to syscfg / register offset within
+ syscfg / register bitmask for FMP bit.
+ allOf:
+ - $ref: "/schemas/types.yaml#/definitions/phandle-array"
+ - items:
+ minItems: 3
+ maxItems: 3
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - st,stm32f4-i2c
+ then:
+ properties:
+ clock-frequency:
+ enum: [100000, 400000]
+
+properties:
+ compatible:
+ enum:
+ - st,stm32f4-i2c
+ - st,stm32f7-i2c
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ items:
+ - description: interrupt ID for I2C event
+ - description: interrupt ID for I2C error
+
+ resets:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ dmas:
+ items:
+ - description: RX DMA Channel phandle
+ - description: TX DMA Channel phandle
+
+ dma-names:
+ items:
+ - const: rx
+ - const: tx
+
+ clock-frequency:
+ description: Desired I2C bus clock frequency in Hz. If not specified,
+ the default 100 kHz frequency will be used.
+ For STM32F7, STM32H7 and STM32MP1 SoCs, Standard-mode,
+ Fast-mode and Fast-mode Plus are supported, possible
+ values are 100000, 400000 and 1000000.
+ default: 100000
+ enum: [100000, 400000, 1000000]
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - resets
+ - clocks
+
+examples:
+ - |
+ #include <dt-bindings/mfd/stm32f7-rcc.h>
+ #include <dt-bindings/clock/stm32fx-clock.h>
+ //Example 1 (with st,stm32f4-i2c compatible)
+ i2c@40005400 {
+ compatible = "st,stm32f4-i2c";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x40005400 0x400>;
+ interrupts = <31>,
+ <32>;
+ resets = <&rcc 277>;
+ clocks = <&rcc 0 149>;
+ };
+
+ //Example 2 (with st,stm32f7-i2c compatible)
+ i2c@40005800 {
+ compatible = "st,stm32f7-i2c";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x40005800 0x400>;
+ interrupts = <31>,
+ <32>;
+ resets = <&rcc STM32F7_APB1_RESET(I2C1)>;
+ clocks = <&rcc 1 CLK_I2C1>;
+ };
+
+ //Example 3 (with st,stm32f7-i2c compatible on stm32mp)
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/clock/stm32mp1-clks.h>
+ #include <dt-bindings/reset/stm32mp1-resets.h>
+ i2c@40013000 {
+ compatible = "st,stm32f7-i2c";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x40013000 0x400>;
+ interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc I2C2_K>;
+ resets = <&rcc I2C2_R>;
+ i2c-scl-rising-time-ns = <185>;
+ i2c-scl-falling-time-ns = <20>;
+ st,syscfg-fmp = <&syscfg 0x4 0x2>;
+ };
+...
diff --git a/Documentation/devicetree/bindings/iio/adc/adi,ad7124.yaml b/Documentation/devicetree/bindings/iio/adc/adi,ad7124.yaml
index 9692b7f719f5..e932d5aed02f 100644
--- a/Documentation/devicetree/bindings/iio/adc/adi,ad7124.yaml
+++ b/Documentation/devicetree/bindings/iio/adc/adi,ad7124.yaml
@@ -45,15 +45,12 @@ properties:
refin1-supply:
description: refin1 supply can be used as reference for conversion.
- maxItems: 1
refin2-supply:
description: refin2 supply can be used as reference for conversion.
- maxItems: 1
avdd-supply:
description: avdd supply can be used as reference for conversion.
- maxItems: 1
required:
- compatible
diff --git a/Documentation/devicetree/bindings/iio/adc/adi,ad7292.yaml b/Documentation/devicetree/bindings/iio/adc/adi,ad7292.yaml
new file mode 100644
index 000000000000..b68be3aaf587
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/adc/adi,ad7292.yaml
@@ -0,0 +1,104 @@
+# SPDX-License-Identifier: GPL-2.0-only
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iio/adc/adi,ad7292.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Analog Devices AD7292 10-Bit Monitor and Control System
+
+maintainers:
+ - Marcelo Schmitt <marcelo.schmitt1@gmail.com>
+
+description: |
+ Analog Devices AD7292 10-Bit Monitor and Control System with ADC, DACs,
+ Temperature Sensor, and GPIOs
+
+ Specifications about the part can be found at:
+ https://www.analog.com/media/en/technical-documentation/data-sheets/ad7292.pdf
+
+properties:
+ compatible:
+ enum:
+ - adi,ad7292
+
+ reg:
+ maxItems: 1
+
+ vref-supply:
+ description: |
+ The regulator supply for ADC and DAC reference voltage.
+
+ spi-cpha: true
+
+ '#address-cells':
+ const: 1
+
+ '#size-cells':
+ const: 0
+
+required:
+ - compatible
+ - reg
+ - spi-cpha
+
+patternProperties:
+ "^channel@[0-7]$":
+ type: object
+ description: |
+ Represents the external channels which are connected to the ADC.
+ See Documentation/devicetree/bindings/iio/adc/adc.txt.
+
+ properties:
+ reg:
+ description: |
+ The channel number. It can have up to 8 channels numbered from 0 to 7.
+ items:
+ maximum: 7
+
+ diff-channels:
+ description: see Documentation/devicetree/bindings/iio/adc/adc.txt
+ maxItems: 1
+
+ required:
+ - reg
+
+examples:
+ - |
+ spi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ad7292: adc@0 {
+ compatible = "adi,ad7292";
+ reg = <0>;
+ spi-max-frequency = <25000000>;
+ vref-supply = <&adc_vref>;
+ spi-cpha;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ channel@0 {
+ reg = <0>;
+ diff-channels = <0 1>;
+ };
+ channel@2 {
+ reg = <2>;
+ };
+ channel@3 {
+ reg = <3>;
+ };
+ channel@4 {
+ reg = <4>;
+ };
+ channel@5 {
+ reg = <5>;
+ };
+ channel@6 {
+ reg = <6>;
+ };
+ channel@7 {
+ reg = <7>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/iio/adc/adi,ad7606.yaml b/Documentation/devicetree/bindings/iio/adc/adi,ad7606.yaml
index cc544fdc38be..6eb33207a167 100644
--- a/Documentation/devicetree/bindings/iio/adc/adi,ad7606.yaml
+++ b/Documentation/devicetree/bindings/iio/adc/adi,ad7606.yaml
@@ -31,10 +31,7 @@ properties:
spi-cpha: true
- avcc-supply:
- description:
- Phandle to the Avcc power supply
- maxItems: 1
+ avcc-supply: true
interrupts:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/iio/adc/adi,ad7780.yaml b/Documentation/devicetree/bindings/iio/adc/adi,ad7780.yaml
index d1109416963c..9acde6d2e2d9 100644
--- a/Documentation/devicetree/bindings/iio/adc/adi,ad7780.yaml
+++ b/Documentation/devicetree/bindings/iio/adc/adi,ad7780.yaml
@@ -39,7 +39,6 @@ properties:
avdd-supply:
description:
The regulator supply for the ADC reference voltage.
- maxItems: 1
powerdown-gpios:
description:
diff --git a/Documentation/devicetree/bindings/iio/adc/avia-hx711.yaml b/Documentation/devicetree/bindings/iio/adc/avia-hx711.yaml
index d76ece97c76c..91ab9c842273 100644
--- a/Documentation/devicetree/bindings/iio/adc/avia-hx711.yaml
+++ b/Documentation/devicetree/bindings/iio/adc/avia-hx711.yaml
@@ -41,7 +41,6 @@ properties:
avdd-supply:
description:
Definition of the regulator used as analog supply
- maxItems: 1
clock-frequency:
minimum: 20000
diff --git a/Documentation/devicetree/bindings/iio/adc/ingenic,adc.txt b/Documentation/devicetree/bindings/iio/adc/ingenic,adc.txt
index f01159f20d87..cd9048cf9dcf 100644
--- a/Documentation/devicetree/bindings/iio/adc/ingenic,adc.txt
+++ b/Documentation/devicetree/bindings/iio/adc/ingenic,adc.txt
@@ -5,6 +5,7 @@ Required properties:
- compatible: Should be one of:
* ingenic,jz4725b-adc
* ingenic,jz4740-adc
+ * ingenic,jz4770-adc
- reg: ADC controller registers location and length.
- clocks: phandle to the SoC's ADC clock.
- clock-names: Must be set to "adc".
diff --git a/Documentation/devicetree/bindings/iio/adc/max1027-adc.txt b/Documentation/devicetree/bindings/iio/adc/max1027-adc.txt
deleted file mode 100644
index e680c61dfb84..000000000000
--- a/Documentation/devicetree/bindings/iio/adc/max1027-adc.txt
+++ /dev/null
@@ -1,20 +0,0 @@
-* Maxim 1027/1029/1031 Analog to Digital Converter (ADC)
-
-Required properties:
- - compatible: Should be "maxim,max1027" or "maxim,max1029" or "maxim,max1031"
- - reg: SPI chip select number for the device
- - interrupts: IRQ line for the ADC
- see: Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
-
-Recommended properties:
-- spi-max-frequency: Definition as per
- Documentation/devicetree/bindings/spi/spi-bus.txt
-
-Example:
-adc@0 {
- compatible = "maxim,max1027";
- reg = <0>;
- interrupt-parent = <&gpio5>;
- interrupts = <15 IRQ_TYPE_EDGE_RISING>;
- spi-max-frequency = <1000000>;
-};
diff --git a/Documentation/devicetree/bindings/iio/adc/mcp3911.txt b/Documentation/devicetree/bindings/iio/adc/mcp3911.txt
deleted file mode 100644
index 3071f48fb30b..000000000000
--- a/Documentation/devicetree/bindings/iio/adc/mcp3911.txt
+++ /dev/null
@@ -1,30 +0,0 @@
-* Microchip MCP3911 Dual channel analog front end (ADC)
-
-Required properties:
- - compatible: Should be "microchip,mcp3911"
- - reg: SPI chip select number for the device
-
-Recommended properties:
- - spi-max-frequency: Definition as per
- Documentation/devicetree/bindings/spi/spi-bus.txt.
- Max frequency for this chip is 20MHz.
-
-Optional properties:
- - clocks: Phandle and clock identifier for sampling clock
- - interrupt-parent: Phandle to the parent interrupt controller
- - interrupts: IRQ line for the ADC
- - microchip,device-addr: Device address when multiple MCP3911 chips are present on the
- same SPI bus. Valid values are 0-3. Defaults to 0.
- - vref-supply: Phandle to the external reference voltage supply.
-
-Example:
-adc@0 {
- compatible = "microchip,mcp3911";
- reg = <0>;
- interrupt-parent = <&gpio5>;
- interrupts = <15 IRQ_TYPE_EDGE_RISING>;
- spi-max-frequency = <20000000>;
- microchip,device-addr = <0>;
- vref-supply = <&vref_reg>;
- clocks = <&xtal>;
-};
diff --git a/Documentation/devicetree/bindings/iio/adc/microchip,mcp3911.yaml b/Documentation/devicetree/bindings/iio/adc/microchip,mcp3911.yaml
new file mode 100644
index 000000000000..881059b80d61
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/adc/microchip,mcp3911.yaml
@@ -0,0 +1,71 @@
+# SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+# Copyright 2019 Marcus Folkesson <marcus.folkesson@gmail.com>
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/bindings/iio/adc/microchip,mcp3911.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Microchip MCP3911 Dual channel analog front end (ADC)
+
+maintainers:
+ - Marcus Folkesson <marcus.folkesson@gmail.com>
+ - Kent Gustavsson <nedo80@gmail.com>
+
+description: |
+ Bindings for the Microchip MCP3911 Dual channel ADC device. Datasheet can be
+ found here: https://ww1.microchip.com/downloads/en/DeviceDoc/20002286C.pdf
+
+properties:
+ compatible:
+ enum:
+ - microchip,mcp3911
+
+ reg:
+ maxItems: 1
+
+ spi-max-frequency:
+ maximum: 20000000
+
+ clocks:
+ description: |
+ Phandle and clock identifier for external sampling clock.
+ If not specified, the internal crystal oscillator will be used.
+ maxItems: 1
+
+ interrupts:
+ description: IRQ line of the ADC
+ maxItems: 1
+
+ microchip,device-addr:
+ description: Device address when multiple MCP3911 chips are present on the same SPI bus.
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ - enum: [0, 1, 2, 3]
+ - default: 0
+
+ vref-supply:
+ description: |
+ Phandle to the external reference voltage supply.
+ If not specified, the internal voltage reference (1.2V) will be used.
+
+required:
+ - compatible
+ - reg
+
+examples:
+ - |
+ spi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ adc@0 {
+ compatible = "microchip,mcp3911";
+ reg = <0>;
+ interrupt-parent = <&gpio5>;
+ interrupts = <15 2>;
+ spi-max-frequency = <20000000>;
+ microchip,device-addr = <0>;
+ vref-supply = <&vref_reg>;
+ clocks = <&xtal>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/iio/adc/samsung,exynos-adc.txt b/Documentation/devicetree/bindings/iio/adc/samsung,exynos-adc.txt
deleted file mode 100644
index e1fe02f3e3e9..000000000000
--- a/Documentation/devicetree/bindings/iio/adc/samsung,exynos-adc.txt
+++ /dev/null
@@ -1,107 +0,0 @@
-Samsung Exynos Analog to Digital Converter bindings
-
-The devicetree bindings are for the new ADC driver written for
-Exynos4 and upward SoCs from Samsung.
-
-New driver handles the following
-1. Supports ADC IF found on EXYNOS4412/EXYNOS5250
- and future SoCs from Samsung
-2. Add ADC driver under iio/adc framework
-3. Also adds the Documentation for device tree bindings
-
-Required properties:
-- compatible: Must be "samsung,exynos-adc-v1"
- for Exynos5250 controllers.
- Must be "samsung,exynos-adc-v2" for
- future controllers.
- Must be "samsung,exynos3250-adc" for
- controllers compatible with ADC of Exynos3250.
- Must be "samsung,exynos4212-adc" for
- controllers compatible with ADC of Exynos4212 and Exynos4412.
- Must be "samsung,exynos7-adc" for
- the ADC in Exynos7 and compatibles
- Must be "samsung,s3c2410-adc" for
- the ADC in s3c2410 and compatibles
- Must be "samsung,s3c2416-adc" for
- the ADC in s3c2416 and compatibles
- Must be "samsung,s3c2440-adc" for
- the ADC in s3c2440 and compatibles
- Must be "samsung,s3c2443-adc" for
- the ADC in s3c2443 and compatibles
- Must be "samsung,s3c6410-adc" for
- the ADC in s3c6410 and compatibles
- Must be "samsung,s5pv210-adc" for
- the ADC in s5pv210 and compatibles
-- reg: List of ADC register address range
- - The base address and range of ADC register
- - The base address and range of ADC_PHY register (every
- SoC except for s3c24xx/s3c64xx ADC)
-- interrupts: Contains the interrupt information for the timer. The
- format is being dependent on which interrupt controller
- the Samsung device uses.
-- #io-channel-cells = <1>; As ADC has multiple outputs
-- clocks From common clock bindings: handles to clocks specified
- in "clock-names" property, in the same order.
-- clock-names From common clock bindings: list of clock input names
- used by ADC block:
- - "adc" : ADC bus clock
- - "sclk" : ADC special clock (only for Exynos3250 and
- compatible ADC block)
-- vdd-supply VDD input supply.
-
-- samsung,syscon-phandle Contains the PMU system controller node
- (To access the ADC_PHY register on Exynos5250/5420/5800/3250)
-Optional properties:
-- has-touchscreen: If present, indicates that a touchscreen is
- connected an usable.
-
-Note: child nodes can be added for auto probing from device tree.
-
-Example: adding device info in dtsi file
-
-adc: adc@12d10000 {
- compatible = "samsung,exynos-adc-v1";
- reg = <0x12D10000 0x100>;
- interrupts = <0 106 0>;
- #io-channel-cells = <1>;
- io-channel-ranges;
-
- clocks = <&clock 303>;
- clock-names = "adc";
-
- vdd-supply = <&buck5_reg>;
- samsung,syscon-phandle = <&pmu_system_controller>;
-};
-
-Example: adding device info in dtsi file for Exynos3250 with additional sclk
-
-adc: adc@126c0000 {
- compatible = "samsung,exynos3250-adc", "samsung,exynos-adc-v2;
- reg = <0x126C0000 0x100>;
- interrupts = <0 137 0>;
- #io-channel-cells = <1>;
- io-channel-ranges;
-
- clocks = <&cmu CLK_TSADC>, <&cmu CLK_SCLK_TSADC>;
- clock-names = "adc", "sclk";
-
- vdd-supply = <&buck5_reg>;
- samsung,syscon-phandle = <&pmu_system_controller>;
-};
-
-Example: Adding child nodes in dts file
-
-adc@12d10000 {
-
- /* NTC thermistor is a hwmon device */
- ncp15wb473@0 {
- compatible = "murata,ncp15wb473";
- pullup-uv = <1800000>;
- pullup-ohm = <47000>;
- pulldown-ohm = <0>;
- io-channels = <&adc 4>;
- };
-};
-
-Note: Does not apply to ADC driver under arch/arm/plat-samsung/
-Note: The child node can be added under the adc node or separately.
diff --git a/Documentation/devicetree/bindings/iio/adc/samsung,exynos-adc.yaml b/Documentation/devicetree/bindings/iio/adc/samsung,exynos-adc.yaml
new file mode 100644
index 000000000000..f46de17c0878
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/adc/samsung,exynos-adc.yaml
@@ -0,0 +1,151 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iio/adc/samsung,exynos-adc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Samsung Exynos Analog to Digital Converter (ADC)
+
+maintainers:
+ - Krzysztof Kozlowski <krzk@kernel.org>
+
+properties:
+ compatible:
+ enum:
+ - samsung,exynos-adc-v1 # Exynos5250
+ - samsung,exynos-adc-v2
+ - samsung,exynos3250-adc
+ - samsung,exynos4212-adc # Exynos4212 and Exynos4412
+ - samsung,exynos7-adc
+ - samsung,s3c2410-adc
+ - samsung,s3c2416-adc
+ - samsung,s3c2440-adc
+ - samsung,s3c2443-adc
+ - samsung,s3c6410-adc
+ - samsung,s5pv210-adc
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ description:
+ Phandle to ADC bus clock. For Exynos3250 additional clock is needed.
+ minItems: 1
+ maxItems: 2
+
+ clock-names:
+ description:
+ Must contain clock names (adc, sclk) matching phandles in clocks
+ property.
+ minItems: 1
+ maxItems: 2
+
+ interrupts:
+ maxItems: 1
+
+ "#io-channel-cells":
+ const: 1
+
+ vdd-supply: true
+
+ samsung,syscon-phandle:
+ $ref: '/schemas/types.yaml#/definitions/phandle'
+ description:
+ Phandle to the PMU system controller node (to access the ADC_PHY
+ register on Exynos3250/4x12/5250/5420/5800).
+
+ has-touchscreen:
+ description:
+ If present, indicates that a touchscreen is connected and usable.
+ type: boolean
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+ - interrupts
+ - "#io-channel-cells"
+ - vdd-supply
+
+allOf:
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - samsung,exynos-adc-v1
+ - samsung,exynos-adc-v2
+ - samsung,exynos3250-adc
+ - samsung,exynos4212-adc
+ - samsung,s5pv210-adc
+ then:
+ required:
+ - samsung,syscon-phandle
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - samsung,exynos3250-adc
+ then:
+ properties:
+ clocks:
+ minItems: 2
+ maxItems: 2
+ clock-names:
+ items:
+ - const: adc
+ - const: sclk
+ else:
+ properties:
+ clocks:
+ minItems: 1
+ maxItems: 1
+ clock-names:
+ items:
+ - const: adc
+
+examples:
+ - |
+ adc: adc@12d10000 {
+ compatible = "samsung,exynos-adc-v1";
+ reg = <0x12d10000 0x100>;
+ interrupts = <0 106 0>;
+ #io-channel-cells = <1>;
+ io-channel-ranges;
+
+ clocks = <&clock 303>;
+ clock-names = "adc";
+
+ vdd-supply = <&buck5_reg>;
+ samsung,syscon-phandle = <&pmu_system_controller>;
+
+ /* NTC thermistor is a hwmon device */
+ ncp15wb473@0 {
+ compatible = "murata,ncp15wb473";
+ pullup-uv = <1800000>;
+ pullup-ohm = <47000>;
+ pulldown-ohm = <0>;
+ io-channels = <&adc 4>;
+ };
+ };
+
+ - |
+ #include <dt-bindings/clock/exynos3250.h>
+
+ adc@126c0000 {
+ compatible = "samsung,exynos3250-adc";
+ reg = <0x126C0000 0x100>;
+ interrupts = <0 137 0>;
+ #io-channel-cells = <1>;
+ io-channel-ranges;
+
+ clocks = <&cmu CLK_TSADC>,
+ <&cmu CLK_SCLK_TSADC>;
+ clock-names = "adc", "sclk";
+
+ vdd-supply = <&buck5_reg>;
+ samsung,syscon-phandle = <&pmu_system_controller>;
+ };
diff --git a/Documentation/devicetree/bindings/iio/adc/st,stm32-adc.txt b/Documentation/devicetree/bindings/iio/adc/st,stm32-adc.txt
index 4c0da8c74bb2..8de933146771 100644
--- a/Documentation/devicetree/bindings/iio/adc/st,stm32-adc.txt
+++ b/Documentation/devicetree/bindings/iio/adc/st,stm32-adc.txt
@@ -53,6 +53,8 @@ Optional properties:
analog input switches on stm32mp1.
- st,syscfg: Phandle to system configuration controller. It can be used to
control the analog circuitry on stm32mp1.
+- st,max-clk-rate-hz: Allow to specify desired max clock rate used by analog
+ circuitry.
Contents of a stm32 adc child node:
-----------------------------------
diff --git a/Documentation/devicetree/bindings/iio/chemical/plantower,pms7003.yaml b/Documentation/devicetree/bindings/iio/chemical/plantower,pms7003.yaml
index a551d3101f93..19e53930ebf6 100644
--- a/Documentation/devicetree/bindings/iio/chemical/plantower,pms7003.yaml
+++ b/Documentation/devicetree/bindings/iio/chemical/plantower,pms7003.yaml
@@ -25,7 +25,6 @@ properties:
vcc-supply:
description: regulator that provides power to the sensor
- maxItems: 1
plantower,set-gpios:
description: GPIO connected to the SET line
diff --git a/Documentation/devicetree/bindings/iio/dac/lltc,ltc1660.yaml b/Documentation/devicetree/bindings/iio/dac/lltc,ltc1660.yaml
new file mode 100644
index 000000000000..13d005b68931
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/dac/lltc,ltc1660.yaml
@@ -0,0 +1,49 @@
+# SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+# Copyright 2019 Marcus Folkesson <marcus.folkesson@gmail.com>
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/bindings/iio/dac/lltc,ltc1660.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Linear Technology Micropower octal 8-Bit and 10-Bit DACs
+
+maintainers:
+ - Marcus Folkesson <marcus.folkesson@gmail.com>
+
+description: |
+ Bindings for the Linear Technology Micropower octal 8-Bit and 10-Bit DAC.
+ Datasheet can be found here: https://www.analog.com/media/en/technical-documentation/data-sheets/166560fa.pdf
+
+properties:
+ compatible:
+ enum:
+ - lltc,ltc1660
+ - lltc,ltc1665
+
+ reg:
+ maxItems: 1
+
+ spi-max-frequency:
+ maximum: 5000000
+
+ vref-supply:
+ description: Phandle to the external reference voltage supply.
+
+required:
+ - compatible
+ - reg
+ - vref-supply
+
+examples:
+ - |
+ spi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ dac@0 {
+ compatible = "lltc,ltc1660";
+ reg = <0>;
+ spi-max-frequency = <5000000>;
+ vref-supply = <&vref_reg>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/iio/dac/ltc1660.txt b/Documentation/devicetree/bindings/iio/dac/ltc1660.txt
deleted file mode 100644
index c5b5f22d6c64..000000000000
--- a/Documentation/devicetree/bindings/iio/dac/ltc1660.txt
+++ /dev/null
@@ -1,21 +0,0 @@
-* Linear Technology Micropower octal 8-Bit and 10-Bit DACs
-
-Required properties:
- - compatible: Must be one of the following:
- "lltc,ltc1660"
- "lltc,ltc1665"
- - reg: SPI chip select number for the device
- - vref-supply: Phandle to the voltage reference supply
-
-Recommended properties:
- - spi-max-frequency: Definition as per
- Documentation/devicetree/bindings/spi/spi-bus.txt.
- Max frequency for this chip is 5 MHz.
-
-Example:
-dac@0 {
- compatible = "lltc,ltc1660";
- reg = <0>;
- spi-max-frequency = <5000000>;
- vref-supply = <&vref_reg>;
-};
diff --git a/Documentation/devicetree/bindings/iio/iio-bindings.txt b/Documentation/devicetree/bindings/iio/iio-bindings.txt
index 68d6f8ce063b..af33267727f4 100644
--- a/Documentation/devicetree/bindings/iio/iio-bindings.txt
+++ b/Documentation/devicetree/bindings/iio/iio-bindings.txt
@@ -18,12 +18,17 @@ Required properties:
with a single IIO output and 1 for nodes with multiple
IIO outputs.
+Optional properties:
+label: A symbolic name for the device.
+
+
Example for a simple configuration with no trigger:
adc: voltage-sensor@35 {
compatible = "maxim,max1139";
reg = <0x35>;
#io-channel-cells = <1>;
+ label = "voltage_feedback_group1";
};
Example for a configuration with trigger:
diff --git a/Documentation/devicetree/bindings/iio/imu/inv_mpu6050.txt b/Documentation/devicetree/bindings/iio/imu/inv_mpu6050.txt
index 268bf7568e19..c5ee8a20af9f 100644
--- a/Documentation/devicetree/bindings/iio/imu/inv_mpu6050.txt
+++ b/Documentation/devicetree/bindings/iio/imu/inv_mpu6050.txt
@@ -21,6 +21,7 @@ Required properties:
bindings.
Optional properties:
+ - vdd-supply: regulator phandle for VDD supply
- vddio-supply: regulator phandle for VDDIO supply
- mount-matrix: an optional 3x3 mounting rotation matrix
- i2c-gate node. These devices also support an auxiliary i2c bus. This is
diff --git a/Documentation/devicetree/bindings/iio/imu/nxp,fxos8700.yaml b/Documentation/devicetree/bindings/iio/imu/nxp,fxos8700.yaml
new file mode 100644
index 000000000000..63bcb73ae309
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/imu/nxp,fxos8700.yaml
@@ -0,0 +1,76 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iio/imu/nxp,fxos8700.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Freescale FXOS8700 Inertial Measurement Unit
+
+maintainers:
+ - Robert Jones <rjones@gateworks.com>
+
+description: |
+ Accelerometer and magnetometer combo device with an i2c and SPI interface.
+ https://www.nxp.com/products/sensors/motion-sensors/6-axis/digital-motion-sensor-3d-accelerometer-2g-4g-8g-plus-3d-magnetometer:FXOS8700CQ
+
+properties:
+ compatible:
+ enum:
+ - nxp,fxos8700
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ minItems: 1
+ maxItems: 2
+
+ interrupt-names:
+ minItems: 1
+ maxItems: 2
+ items:
+ enum:
+ - INT1
+ - INT2
+
+ drive-open-drain:
+ type: boolean
+
+required:
+ - compatible
+ - reg
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+ i2c0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fxos8700@1e {
+ compatible = "nxp,fxos8700";
+ reg = <0x1e>;
+
+ interrupt-parent = <&gpio2>;
+ interrupts = <7 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "INT1";
+ };
+ };
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+ spi0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fxos8700@0 {
+ compatible = "nxp,fxos8700";
+ reg = <0>;
+
+ spi-max-frequency = <1000000>;
+ interrupt-parent = <&gpio1>;
+ interrupts = <7 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "INT2";
+ };
+ };
diff --git a/Documentation/devicetree/bindings/iio/imu/st_lsm6dsx.txt b/Documentation/devicetree/bindings/iio/imu/st_lsm6dsx.txt
index 6d0c050d89fe..cef4bc16fce1 100644
--- a/Documentation/devicetree/bindings/iio/imu/st_lsm6dsx.txt
+++ b/Documentation/devicetree/bindings/iio/imu/st_lsm6dsx.txt
@@ -14,6 +14,8 @@ Required properties:
"st,lsm6ds3tr-c"
"st,ism330dhcx"
"st,lsm9ds1-imu"
+ "st,lsm6ds0"
+ "st,lsm6dsrx"
- reg: i2c address of the sensor / spi cs line
Optional properties:
@@ -31,6 +33,7 @@ Optional properties:
- interrupts: interrupt mapping for IRQ. It should be configured with
flags IRQ_TYPE_LEVEL_HIGH, IRQ_TYPE_EDGE_RISING, IRQ_TYPE_LEVEL_LOW or
IRQ_TYPE_EDGE_FALLING.
+- wakeup-source: Enables wake up of host system on event.
Refer to interrupt-controller/interrupts.txt for generic interrupt
client node bindings.
diff --git a/Documentation/devicetree/bindings/iio/light/adux1020.yaml b/Documentation/devicetree/bindings/iio/light/adux1020.yaml
new file mode 100644
index 000000000000..69bd5c06319d
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/light/adux1020.yaml
@@ -0,0 +1,47 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iio/light/adux1020.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Analog Devices ADUX1020 Photometric sensor
+
+maintainers:
+ - Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+
+description: |
+ Photometric sensor over an i2c interface.
+ https://www.analog.com/media/en/technical-documentation/data-sheets/ADUX1020.pdf
+
+properties:
+ compatible:
+ enum:
+ - adi,adux1020
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ i2c {
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ adux1020@64 {
+ compatible = "adi,adux1020";
+ reg = <0x64>;
+ interrupt-parent = <&msmgpio>;
+ interrupts = <24 IRQ_TYPE_LEVEL_HIGH>;
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/iio/light/bh1750.txt b/Documentation/devicetree/bindings/iio/light/bh1750.txt
deleted file mode 100644
index 1e7685797d7a..000000000000
--- a/Documentation/devicetree/bindings/iio/light/bh1750.txt
+++ /dev/null
@@ -1,18 +0,0 @@
-ROHM BH1750 - ALS, Ambient light sensor
-
-Required properties:
-
-- compatible: Must be one of:
- "rohm,bh1710"
- "rohm,bh1715"
- "rohm,bh1721"
- "rohm,bh1750"
- "rohm,bh1751"
-- reg: the I2C address of the sensor
-
-Example:
-
-light-sensor@23 {
- compatible = "rohm,bh1750";
- reg = <0x23>;
-};
diff --git a/Documentation/devicetree/bindings/iio/light/bh1750.yaml b/Documentation/devicetree/bindings/iio/light/bh1750.yaml
new file mode 100644
index 000000000000..1cc60d7ecfa0
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/light/bh1750.yaml
@@ -0,0 +1,43 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iio/light/bh1750.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: ROHM BH1750 ambient light sensor
+
+maintainers:
+ - Tomasz Duszynski <tduszyns@gmail.com>
+
+description: |
+ Ambient light sensor with an i2c interface.
+
+properties:
+ compatible:
+ enum:
+ - rohm,bh1710
+ - rohm,bh1715
+ - rohm,bh1721
+ - rohm,bh1750
+ - rohm,bh1751
+
+ reg:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+
+examples:
+ - |
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ light-sensor@23 {
+ compatible = "rohm,bh1750";
+ reg = <0x23>;
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/iio/light/veml6030.yaml b/Documentation/devicetree/bindings/iio/light/veml6030.yaml
new file mode 100644
index 000000000000..0ff9b11f9d18
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/light/veml6030.yaml
@@ -0,0 +1,62 @@
+# SPDX-License-Identifier: GPL-2.0+
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iio/light/veml6030.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: VEML6030 Ambient Light Sensor (ALS)
+
+maintainers:
+ - Rishi Gupta <gupt21@gmail.com>
+
+description: |
+ Bindings for the ambient light sensor veml6030 from Vishay
+ Semiconductors over an i2c interface.
+
+ Irrespective of whether interrupt is used or not, application
+ can get the ALS and White channel reading from IIO raw interface.
+
+ If the interrupts are used, application will receive an IIO event
+ whenever configured threshold is crossed.
+
+ Specifications about the sensor can be found at:
+ https://www.vishay.com/docs/84366/veml6030.pdf
+
+properties:
+ compatible:
+ enum:
+ - vishay,veml6030
+
+ reg:
+ description:
+ I2C address of the device.
+ enum:
+ - 0x10 # ADDR pin pulled down
+ - 0x48 # ADDR pin pulled up
+
+ interrupts:
+ description:
+ interrupt mapping for IRQ. Configure with IRQ_TYPE_LEVEL_LOW.
+ Refer to interrupt-controller/interrupts.txt for generic
+ interrupt client node bindings.
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ light-sensor@10 {
+ compatible = "vishay,veml6030";
+ reg = <0x10>;
+ interrupts = <12 IRQ_TYPE_LEVEL_LOW>;
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/iio/pressure/bmp085.yaml b/Documentation/devicetree/bindings/iio/pressure/bmp085.yaml
index c6721a7e8938..519137e5c170 100644
--- a/Documentation/devicetree/bindings/iio/pressure/bmp085.yaml
+++ b/Documentation/devicetree/bindings/iio/pressure/bmp085.yaml
@@ -28,12 +28,10 @@ properties:
vddd-supply:
description:
digital voltage regulator (see regulator/regulator.txt)
- maxItems: 1
vdda-supply:
description:
analog voltage regulator (see regulator/regulator.txt)
- maxItems: 1
reset-gpios:
description:
diff --git a/Documentation/devicetree/bindings/iio/proximity/maxbotix,mb1232.txt b/Documentation/devicetree/bindings/iio/proximity/maxbotix,mb1232.txt
deleted file mode 100644
index dd1058fbe9c3..000000000000
--- a/Documentation/devicetree/bindings/iio/proximity/maxbotix,mb1232.txt
+++ /dev/null
@@ -1,29 +0,0 @@
-* MaxBotix I2CXL-MaxSonar ultrasonic distance sensor of type mb1202,
- mb1212, mb1222, mb1232, mb1242, mb7040 or mb7137 using the i2c interface
- for ranging
-
-Required properties:
- - compatible: "maxbotix,mb1202",
- "maxbotix,mb1212",
- "maxbotix,mb1222",
- "maxbotix,mb1232",
- "maxbotix,mb1242",
- "maxbotix,mb7040" or
- "maxbotix,mb7137"
-
- - reg: i2c address of the device, see also i2c/i2c.txt
-
-Optional properties:
- - interrupts: Interrupt used to announce the preceding reading
- request has finished and that data is available.
- If no interrupt is specified the device driver
- falls back to wait a fixed amount of time until
- data can be retrieved.
-
-Example:
-proximity@70 {
- compatible = "maxbotix,mb1232";
- reg = <0x70>;
- interrupt-parent = <&gpio2>;
- interrupts = <2 IRQ_TYPE_EDGE_FALLING>;
-};
diff --git a/Documentation/devicetree/bindings/iio/proximity/maxbotix,mb1232.yaml b/Documentation/devicetree/bindings/iio/proximity/maxbotix,mb1232.yaml
new file mode 100644
index 000000000000..3eac248f291d
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/proximity/maxbotix,mb1232.yaml
@@ -0,0 +1,60 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iio/proximity/maxbotix,mb1232.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: MaxBotix I2CXL-MaxSonar ultrasonic distance sensor
+
+maintainers:
+ - Andreas Klinger <ak@it-klinger.de>
+
+description: |
+ MaxBotix I2CXL-MaxSonar ultrasonic distance sensor of type mb1202,
+ mb1212, mb1222, mb1232, mb1242, mb7040 or mb7137 using the i2c interface
+ for ranging
+
+ Specifications about the devices can be found at:
+ https://www.maxbotix.com/documents/I2CXL-MaxSonar-EZ_Datasheet.pdf
+
+properties:
+ compatible:
+ enum:
+ - maxbotix,mb1202
+ - maxbotix,mb1212
+ - maxbotix,mb1222
+ - maxbotix,mb1232
+ - maxbotix,mb1242
+ - maxbotix,mb7040
+ - maxbotix,mb7137
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ description:
+ Interrupt used to announce the preceding reading request has finished
+ and that data is available. If no interrupt is specified the device
+ driver falls back to wait a fixed amount of time until data can be
+ retrieved.
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ proximity@70 {
+ compatible = "maxbotix,mb1232";
+ reg = <0x70>;
+ interrupt-parent = <&gpio2>;
+ interrupts = <2 IRQ_TYPE_EDGE_FALLING>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/iio/temperature/adi,ltc2983.yaml b/Documentation/devicetree/bindings/iio/temperature/adi,ltc2983.yaml
new file mode 100644
index 000000000000..d4922f9f0376
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/temperature/adi,ltc2983.yaml
@@ -0,0 +1,480 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iio/temperature/adi,ltc2983.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Analog Devices LTC2983 Multi-sensor Temperature system
+
+maintainers:
+ - Nuno Sá <nuno.sa@analog.com>
+
+description: |
+ Analog Devices LTC2983 Multi-Sensor Digital Temperature Measurement System
+ https://www.analog.com/media/en/technical-documentation/data-sheets/2983fc.pdf
+
+properties:
+ compatible:
+ enum:
+ - adi,ltc2983
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ adi,mux-delay-config-us:
+ description:
+ The LTC2983 performs 2 or 3 internal conversion cycles per temperature
+ result. Each conversion cycle is performed with different excitation and
+ input multiplexer configurations. Prior to each conversion, these
+ excitation circuits and input switch configurations are changed and an
+ internal 1ms delay ensures settling prior to the conversion cycle in most
+ cases. An extra delay can be configured using this property. The value is
+ rounded to nearest 100us.
+ maximum: 255
+
+ adi,filter-notch-freq:
+ description:
+ Set's the default setting of the digital filter. The default is
+ simultaneous 50/60Hz rejection.
+ 0 - 50/60Hz rejection
+ 1 - 60Hz rejection
+ 2 - 50Hz rejection
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ - minimum: 0
+ maximum: 2
+
+ '#address-cells':
+ const: 1
+
+ '#size-cells':
+ const: 0
+
+patternProperties:
+ "@([1-9]|1[0-9]|20)$":
+ type: object
+
+ properties:
+ reg:
+ description:
+ The channel number. It can be connected to one of the 20 channels of
+ the device.
+ minimum: 1
+ maximum: 20
+
+ adi,sensor-type:
+ description: Identifies the type of sensor connected to the device.
+ $ref: /schemas/types.yaml#/definitions/uint32
+
+ required:
+ - reg
+ - adi,sensor-type
+
+ "^thermocouple@":
+ type: object
+ description:
+ Represents a thermocouple sensor which is connected to one of the device
+ channels.
+
+ properties:
+ adi,sensor-type:
+ description: |
+ 1 - Type J Thermocouple
+ 2 - Type K Thermocouple
+ 3 - Type E Thermocouple
+ 4 - Type N Thermocouple
+ 5 - Type R Thermocouple
+ 6 - Type S Thermocouple
+ 7 - Type T Thermocouple
+ 8 - Type B Thermocouple
+ 9 - Custom Thermocouple
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 1
+ maximum: 9
+
+ adi,single-ended:
+ description:
+ Boolean property which set's the thermocouple as single-ended.
+ type: boolean
+
+ adi,sensor-oc-current-microamp:
+ description:
+ This property set's the pulsed current value applied during
+ open-circuit detect.
+ enum: [10, 100, 500, 1000]
+
+ adi,cold-junction-handle:
+ description:
+ Phandle which points to a sensor object responsible for measuring
+ the thermocouple cold junction temperature.
+ $ref: "/schemas/types.yaml#/definitions/phandle"
+
+ adi,custom-thermocouple:
+ description:
+ This is a table, where each entry should be a pair of
+ voltage(mv)-temperature(K). The entries must be given in nv and uK
+ so that, the original values must be multiplied by 1000000. For
+ more details look at table 69 and 70.
+ Note should be signed, but dtc doesn't currently maintain the
+ sign.
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint64-matrix
+ items:
+ minItems: 3
+ maxItems: 64
+ items:
+ minItems: 2
+ maxItems: 2
+
+ "^diode@":
+ type: object
+ description:
+ Represents a diode sensor which is connected to one of the device
+ channels.
+
+ properties:
+ adi,sensor-type:
+ description: Identifies the sensor as a diode.
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ const: 28
+
+ adi,single-ended:
+ description: Boolean property which set's the diode as single-ended.
+ type: boolean
+
+ adi,three-conversion-cycles:
+ description:
+ Boolean property which set's three conversion cycles removing
+ parasitic resistance effects between the LTC2983 and the diode.
+ type: boolean
+
+ adi,average-on:
+ description:
+ Boolean property which enables a running average of the diode
+ temperature reading. This reduces the noise when the diode is used
+ as a cold junction temperature element on an isothermal block
+ where temperatures change slowly.
+ type: boolean
+
+ adi,excitation-current-microamp:
+ description:
+ This property controls the magnitude of the excitation current
+ applied to the diode. Depending on the number of conversions
+ cycles, this property will assume different predefined values on
+ each cycle. Just set the value of the first cycle (1l).
+ enum: [10, 20, 40, 80]
+
+ adi,ideal-factor-value:
+ description:
+ This property sets the diode ideality factor. The real value must
+ be multiplied by 1000000 to remove the fractional part. For more
+ information look at table 20 of the datasheet.
+ $ref: /schemas/types.yaml#/definitions/uint32
+
+ "^rtd@":
+ type: object
+ description:
+ Represents a rtd sensor which is connected to one of the device channels.
+
+ properties:
+ reg:
+ minimum: 2
+ maximum: 20
+
+ adi,sensor-type:
+ description: |
+ 10 - RTD PT-10
+ 11 - RTD PT-50
+ 12 - RTD PT-100
+ 13 - RTD PT-200
+ 14 - RTD PT-500
+ 15 - RTD PT-1000
+ 16 - RTD PT-1000 (0.00375)
+ 17 - RTD NI-120
+ 18 - RTD Custom
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 10
+ maximum: 18
+
+ adi,rsense-handle:
+ description:
+ Phandle pointing to a rsense object associated with this RTD.
+ $ref: "/schemas/types.yaml#/definitions/phandle"
+
+ adi,number-of-wires:
+ description:
+ Identifies the number of wires used by the RTD. Setting this
+ property to 5 means 4 wires with Kelvin Rsense.
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ - enum: [2, 3, 4, 5]
+
+ adi,rsense-share:
+ description:
+ Boolean property which enables Rsense sharing, where one sense
+ resistor is used for multiple 2-, 3-, and/or 4-wire RTDs.
+ type: boolean
+
+ adi,current-rotate:
+ description:
+ Boolean property which enables excitation current rotation to
+ automatically remove parasitic thermocouple effects. Note that
+ this property is not allowed for 2- and 3-wire RTDs.
+ type: boolean
+
+ adi,excitation-current-microamp:
+ description:
+ This property controls the magnitude of the excitation current
+ applied to the RTD.
+ enum: [5, 10, 25, 50, 100, 250, 500, 1000]
+
+ adi,rtd-curve:
+ description:
+ This property set the RTD curve used and the corresponding
+ Callendar-VanDusen constants. Look at table 30 of the datasheet.
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ - minimum: 0
+ maximum: 3
+
+ adi,custom-rtd:
+ description:
+ This is a table, where each entry should be a pair of
+ resistance(ohm)-temperature(K). The entries added here are in uohm
+ and uK. For more details values look at table 74 and 75.
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint64-matrix
+ items:
+ minItems: 3
+ maxItems: 64
+ items:
+ minItems: 2
+ maxItems: 2
+
+ required:
+ - adi,rsense-handle
+
+ dependencies:
+ adi,current-rotate: [ adi,rsense-share ]
+
+ "^thermistor@":
+ type: object
+ description:
+ Represents a thermistor sensor which is connected to one of the device
+ channels.
+
+ properties:
+ adi,sensor-type:
+ description:
+ 19 - Thermistor 44004/44033 2.252kohm at 25°C
+ 20 - Thermistor 44005/44030 3kohm at 25°C
+ 21 - Thermistor 44007/44034 5kohm at 25°C
+ 22 - Thermistor 44006/44031 10kohm at 25°C
+ 23 - Thermistor 44008/44032 30kohm at 25°C
+ 24 - Thermistor YSI 400 2.252kohm at 25°C
+ 25 - Thermistor Spectrum 1003k 1kohm
+ 26 - Thermistor Custom Steinhart-Hart
+ 27 - Custom Thermistor
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 19
+ maximum: 27
+
+ adi,rsense-handle:
+ description:
+ Phandle pointing to a rsense object associated with this
+ thermistor.
+ $ref: "/schemas/types.yaml#/definitions/phandle"
+
+ adi,single-ended:
+ description:
+ Boolean property which set's the thermistor as single-ended.
+ type: boolean
+
+ adi,rsense-share:
+ description:
+ Boolean property which enables Rsense sharing, where one sense
+ resistor is used for multiple thermistors. Note that this property
+ is ignored if adi,single-ended is set.
+ type: boolean
+
+ adi,current-rotate:
+ description:
+ Boolean property which enables excitation current rotation to
+ automatically remove parasitic thermocouple effects.
+ type: boolean
+
+ adi,excitation-current-nanoamp:
+ description:
+ This property controls the magnitude of the excitation current
+ applied to the thermistor. Value 0 set's the sensor in auto-range
+ mode.
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ - enum: [0, 250, 500, 1000, 5000, 10000, 25000, 50000, 100000,
+ 250000, 500000, 1000000]
+
+ adi,custom-thermistor:
+ description:
+ This is a table, where each entry should be a pair of
+ resistance(ohm)-temperature(K). The entries added here are in uohm
+ and uK only for custom thermistors. For more details look at table
+ 78 and 79.
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint64-matrix
+ items:
+ minItems: 3
+ maxItems: 64
+ items:
+ minItems: 2
+ maxItems: 2
+
+ adi,custom-steinhart:
+ description:
+ Steinhart-Hart coefficients are also supported and can
+ be programmed into the device memory using this property. For
+ Steinhart sensors the coefficients are given in the raw
+ format. Look at table 82 for more information.
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32-array
+ items:
+ minItems: 6
+ maxItems: 6
+
+ required:
+ - adi,rsense-handle
+
+ dependencies:
+ adi,current-rotate: [ adi,rsense-share ]
+
+ "^adc@":
+ type: object
+ description: Represents a channel which is being used as a direct adc.
+
+ properties:
+ adi,sensor-type:
+ description: Identifies the sensor as a direct adc.
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ const: 30
+
+ adi,single-ended:
+ description: Boolean property which set's the adc as single-ended.
+ type: boolean
+
+ "^rsense@":
+ type: object
+ description:
+ Represents a rsense which is connected to one of the device channels.
+ Rsense are used by thermistors and RTD's.
+
+ properties:
+ reg:
+ minimum: 2
+ maximum: 20
+
+ adi,sensor-type:
+ description: Identifies the sensor as a rsense.
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ const: 29
+
+ adi,rsense-val-milli-ohms:
+ description:
+ Sets the value of the sense resistor. Look at table 20 of the
+ datasheet for information.
+
+ required:
+ - adi,rsense-val-milli-ohms
+
+required:
+ - compatible
+ - reg
+ - interrupts
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+ spi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ sensor_ltc2983: ltc2983@0 {
+ compatible = "adi,ltc2983";
+ reg = <0>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ interrupts = <20 IRQ_TYPE_EDGE_RISING>;
+ interrupt-parent = <&gpio>;
+
+ thermocouple@18 {
+ reg = <18>;
+ adi,sensor-type = <8>; //Type B
+ adi,sensor-oc-current-microamp = <10>;
+ adi,cold-junction-handle = <&diode5>;
+ };
+
+ diode5: diode@5 {
+ reg = <5>;
+ adi,sensor-type = <28>;
+ };
+
+ rsense2: rsense@2 {
+ reg = <2>;
+ adi,sensor-type = <29>;
+ adi,rsense-val-milli-ohms = <1200000>; //1.2Kohms
+ };
+
+ rtd@14 {
+ reg = <14>;
+ adi,sensor-type = <15>; //PT1000
+ /*2-wire, internal gnd, no current rotation*/
+ adi,number-of-wires = <2>;
+ adi,rsense-share;
+ adi,excitation-current-microamp = <500>;
+ adi,rsense-handle = <&rsense2>;
+ };
+
+ adc@10 {
+ reg = <10>;
+ adi,sensor-type = <30>;
+ adi,single-ended;
+ };
+
+ thermistor@12 {
+ reg = <12>;
+ adi,sensor-type = <26>; //Steinhart
+ adi,rsense-handle = <&rsense2>;
+ adi,custom-steinhart = <0x00F371EC 0x12345678
+ 0x2C0F8733 0x10018C66 0xA0FEACCD
+ 0x90021D99>; //6 entries
+ };
+
+ thermocouple@20 {
+ reg = <20>;
+ adi,sensor-type = <9>; //custom thermocouple
+ adi,single-ended;
+ adi,custom-thermocouple = /bits/ 64
+ <(-50220000) 0
+ (-30200000) 99100000
+ (-5300000) 135400000
+ 0 273150000
+ 40200000 361200000
+ 55300000 522100000
+ 88300000 720300000
+ 132200000 811200000
+ 188700000 922500000
+ 460400000 1000000000>; //10 pairs
+ };
+
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/iio/timer/stm32-lptimer-trigger.txt b/Documentation/devicetree/bindings/iio/timer/stm32-lptimer-trigger.txt
deleted file mode 100644
index 85e6806b17d7..000000000000
--- a/Documentation/devicetree/bindings/iio/timer/stm32-lptimer-trigger.txt
+++ /dev/null
@@ -1,23 +0,0 @@
-STMicroelectronics STM32 Low-Power Timer Trigger
-
-STM32 Low-Power Timer provides trigger source (LPTIM output) that can be used
-by STM32 internal ADC and/or DAC.
-
-Must be a sub-node of an STM32 Low-Power Timer device tree node.
-See ../mfd/stm32-lptimer.txt for details about the parent node.
-
-Required properties:
-- compatible: Must be "st,stm32-lptimer-trigger".
-- reg: Identify trigger hardware block. Must be 0, 1 or 2
- respectively for lptimer1, lptimer2 or lptimer3
- trigger output.
-
-Example:
- timer@40002400 {
- compatible = "st,stm32-lptimer";
- ...
- trigger@0 {
- compatible = "st,stm32-lptimer-trigger";
- reg = <0>;
- };
- };
diff --git a/Documentation/devicetree/bindings/iio/timer/stm32-timer-trigger.txt b/Documentation/devicetree/bindings/iio/timer/stm32-timer-trigger.txt
deleted file mode 100644
index b8e8c769d434..000000000000
--- a/Documentation/devicetree/bindings/iio/timer/stm32-timer-trigger.txt
+++ /dev/null
@@ -1,25 +0,0 @@
-STMicroelectronics STM32 Timers IIO timer bindings
-
-Must be a sub-node of an STM32 Timers device tree node.
-See ../mfd/stm32-timers.txt for details about the parent node.
-
-Required parameters:
-- compatible: Must be one of:
- "st,stm32-timer-trigger"
- "st,stm32h7-timer-trigger"
-- reg: Identify trigger hardware block.
-
-Example:
- timers@40010000 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "st,stm32-timers";
- reg = <0x40010000 0x400>;
- clocks = <&rcc 0 160>;
- clock-names = "int";
-
- timer@0 {
- compatible = "st,stm32-timer-trigger";
- reg = <0>;
- };
- };
diff --git a/Documentation/devicetree/bindings/input/fsl,mpr121-touchkey.yaml b/Documentation/devicetree/bindings/input/fsl,mpr121-touchkey.yaml
new file mode 100644
index 000000000000..5b37be0be4e9
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/fsl,mpr121-touchkey.yaml
@@ -0,0 +1,89 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/input/fsl,mpr121-touchkey.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Freescale MPR121 capacitive touch sensor controller
+
+maintainers:
+ - Dmitry Torokhov <dmitry.torokhov@gmail.com>
+
+description: |
+ The MPR121 supports up to 12 completely independent electrodes/capacitance
+ sensing inputs in which 8 are multifunctional for LED driving and GPIO.
+ https://www.nxp.com/docs/en/data-sheet/MPR121.pdf
+
+allOf:
+ - $ref: input.yaml#
+
+anyOf:
+ - required: [ interrupts ]
+ - required: [ poll-interval ]
+
+properties:
+ compatible:
+ const: fsl,mpr121-touchkey
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ vdd-supply:
+ maxItems: 1
+
+ linux,keycodes:
+ minItems: 1
+ maxItems: 12
+
+ wakeup-source:
+ description: Use any event on keypad as wakeup event.
+ type: boolean
+
+required:
+ - compatible
+ - reg
+ - vdd-supply
+ - linux,keycodes
+
+examples:
+ - |
+ // Example with interrupts
+ #include "dt-bindings/input/input.h"
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ mpr121@5a {
+ compatible = "fsl,mpr121-touchkey";
+ reg = <0x5a>;
+ interrupt-parent = <&gpio1>;
+ interrupts = <28 2>;
+ autorepeat;
+ vdd-supply = <&ldo4_reg>;
+ linux,keycodes = <KEY_0>, <KEY_1>, <KEY_2>, <KEY_3>,
+ <KEY_4>, <KEY_5>, <KEY_6>, <KEY_7>,
+ <KEY_8>, <KEY_9>, <KEY_A>, <KEY_B>;
+ };
+ };
+
+ - |
+ // Example with polling
+ #include "dt-bindings/input/input.h"
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ mpr121@5a {
+ compatible = "fsl,mpr121-touchkey";
+ reg = <0x5a>;
+ poll-interval = <20>;
+ autorepeat;
+ vdd-supply = <&ldo4_reg>;
+ linux,keycodes = <KEY_0>, <KEY_1>, <KEY_2>, <KEY_3>,
+ <KEY_4>, <KEY_5>, <KEY_6>, <KEY_7>,
+ <KEY_8>, <KEY_9>, <KEY_A>, <KEY_B>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/input/ilitek,ili2xxx.txt b/Documentation/devicetree/bindings/input/ilitek,ili2xxx.txt
index b2a76301e632..dc194b2c151a 100644
--- a/Documentation/devicetree/bindings/input/ilitek,ili2xxx.txt
+++ b/Documentation/devicetree/bindings/input/ilitek,ili2xxx.txt
@@ -1,8 +1,9 @@
-Ilitek ILI210x/ILI251x touchscreen controller
+Ilitek ILI210x/ILI2117/ILI251x touchscreen controller
Required properties:
- compatible:
ilitek,ili210x for ILI210x
+ ilitek,ili2117 for ILI2117
ilitek,ili251x for ILI251x
- reg: The I2C address of the device
diff --git a/Documentation/devicetree/bindings/input/input.yaml b/Documentation/devicetree/bindings/input/input.yaml
new file mode 100644
index 000000000000..6d519046b3af
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/input.yaml
@@ -0,0 +1,36 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/input/input.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Common input schema binding
+
+maintainers:
+ - Dmitry Torokhov <dmitry.torokhov@gmail.com>
+
+properties:
+ autorepeat:
+ description: Enable autorepeat when key is pressed and held down.
+ type: boolean
+
+ linux,keycodes:
+ description:
+ Specifies an array of numeric keycode values to be used for reporting
+ button presses.
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32-array
+ - items:
+ minimum: 0
+ maximum: 0xff
+
+ poll-interval:
+ description: Poll interval time in milliseconds.
+ $ref: /schemas/types.yaml#/definitions/uint32
+
+ power-off-time-sec:
+ description:
+ Duration in seconds which the key should be kept pressed for device to
+ power off automatically. Device with key pressed shutdown feature can
+ specify this property.
+ $ref: /schemas/types.yaml#/definitions/uint32
diff --git a/Documentation/devicetree/bindings/input/keys.txt b/Documentation/devicetree/bindings/input/keys.txt
deleted file mode 100644
index f5a5ddde53f1..000000000000
--- a/Documentation/devicetree/bindings/input/keys.txt
+++ /dev/null
@@ -1,8 +0,0 @@
-General Keys Properties:
-
-Optional properties for Keys:
-- power-off-time-sec: Duration in seconds which the key should be kept
- pressed for device to power off automatically. Device with key pressed
- shutdown feature can specify this property.
-- linux,keycodes: Specifies the numeric keycode values to be used for
- reporting key presses.
diff --git a/Documentation/devicetree/bindings/input/max77650-onkey.txt b/Documentation/devicetree/bindings/input/max77650-onkey.txt
deleted file mode 100644
index 477dc74f452a..000000000000
--- a/Documentation/devicetree/bindings/input/max77650-onkey.txt
+++ /dev/null
@@ -1,26 +0,0 @@
-Onkey driver for MAX77650 PMIC from Maxim Integrated.
-
-This module is part of the MAX77650 MFD device. For more details
-see Documentation/devicetree/bindings/mfd/max77650.txt.
-
-The onkey controller is represented as a sub-node of the PMIC node on
-the device tree.
-
-Required properties:
---------------------
-- compatible: Must be "maxim,max77650-onkey".
-
-Optional properties:
-- linux,code: The key-code to be reported when the key is pressed.
- Defaults to KEY_POWER.
-- maxim,onkey-slide: The system's button is a slide switch, not the default
- push button.
-
-Example:
---------
-
- onkey {
- compatible = "maxim,max77650-onkey";
- linux,code = <KEY_END>;
- maxim,onkey-slide;
- };
diff --git a/Documentation/devicetree/bindings/input/max77650-onkey.yaml b/Documentation/devicetree/bindings/input/max77650-onkey.yaml
new file mode 100644
index 000000000000..2f2e0b6ebbbd
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/max77650-onkey.yaml
@@ -0,0 +1,35 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/input/max77650-onkey.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Onkey driver for MAX77650 PMIC from Maxim Integrated.
+
+maintainers:
+ - Bartosz Golaszewski <bgolaszewski@baylibre.com>
+
+description: |
+ This module is part of the MAX77650 MFD device. For more details
+ see Documentation/devicetree/bindings/mfd/max77650.yaml.
+
+ The onkey controller is represented as a sub-node of the PMIC node on
+ the device tree.
+
+properties:
+ compatible:
+ const: maxim,max77650-onkey
+
+ linux,code:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description:
+ The key-code to be reported when the key is pressed. Defaults
+ to KEY_POWER.
+
+ maxim,onkey-slide:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description:
+ The system's button is a slide switch, not the default push button.
+
+required:
+ - compatible
diff --git a/Documentation/devicetree/bindings/input/mpr121-touchkey.txt b/Documentation/devicetree/bindings/input/mpr121-touchkey.txt
deleted file mode 100644
index b7c61ee5841b..000000000000
--- a/Documentation/devicetree/bindings/input/mpr121-touchkey.txt
+++ /dev/null
@@ -1,30 +0,0 @@
-* Freescale MPR121 Controllor
-
-Required Properties:
-- compatible: Should be "fsl,mpr121-touchkey"
-- reg: The I2C slave address of the device.
-- interrupts: The interrupt number to the cpu.
-- vdd-supply: Phandle to the Vdd power supply.
-- linux,keycodes: Specifies an array of numeric keycode values to
- be used for reporting button presses. The array can
- contain up to 12 entries.
-
-Optional Properties:
-- wakeup-source: Use any event on keypad as wakeup event.
-- autorepeat: Enable autorepeat feature.
-
-Example:
-
-#include "dt-bindings/input/input.h"
-
- touchkey: mpr121@5a {
- compatible = "fsl,mpr121-touchkey";
- reg = <0x5a>;
- interrupt-parent = <&gpio1>;
- interrupts = <28 2>;
- autorepeat;
- vdd-supply = <&ldo4_reg>;
- linux,keycodes = <KEY_0>, <KEY_1>, <KEY_2>, <KEY_3>,
- <KEY_4> <KEY_5>, <KEY_6>, <KEY_7>,
- <KEY_8>, <KEY_9>, <KEY_A>, <KEY_B>;
- };
diff --git a/Documentation/devicetree/bindings/input/mtk-pmic-keys.txt b/Documentation/devicetree/bindings/input/mtk-pmic-keys.txt
index 2888d07c2ef0..535d92885372 100644
--- a/Documentation/devicetree/bindings/input/mtk-pmic-keys.txt
+++ b/Documentation/devicetree/bindings/input/mtk-pmic-keys.txt
@@ -10,13 +10,13 @@ Documentation/devicetree/bindings/mfd/mt6397.txt
Required properties:
- compatible: "mediatek,mt6397-keys" or "mediatek,mt6323-keys"
-- linux,keycodes: See Documentation/devicetree/bindings/input/keys.txt
+- linux,keycodes: See Documentation/devicetree/bindings/input/input.yaml
Optional Properties:
- wakeup-source: See Documentation/devicetree/bindings/power/wakeup-source.txt
- mediatek,long-press-mode: Long press key shutdown setting, 1 for
pwrkey only, 2 for pwrkey/homekey together, others for disabled.
-- power-off-time-sec: See Documentation/devicetree/bindings/input/keys.txt
+- power-off-time-sec: See Documentation/devicetree/bindings/input/input.yaml
Example:
diff --git a/Documentation/devicetree/bindings/input/st,stpmic1-onkey.txt b/Documentation/devicetree/bindings/input/st,stpmic1-onkey.txt
index 4494613ae7ad..eb8e83736c02 100644
--- a/Documentation/devicetree/bindings/input/st,stpmic1-onkey.txt
+++ b/Documentation/devicetree/bindings/input/st,stpmic1-onkey.txt
@@ -15,7 +15,7 @@ Optional properties:
- st,onkey-pu-inactive: onkey pull up is not active
- power-off-time-sec: Duration in seconds which the key should be kept
pressed for device to power off automatically (from 1 to 16 seconds).
- see See Documentation/devicetree/bindings/input/keys.txt
+ see See Documentation/devicetree/bindings/input/input.yaml
Example:
diff --git a/Documentation/devicetree/bindings/input/touchscreen/ad7879.txt b/Documentation/devicetree/bindings/input/touchscreen/ad7879.txt
index cdd743a1f2d5..afa38dc069f0 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/ad7879.txt
+++ b/Documentation/devicetree/bindings/input/touchscreen/ad7879.txt
@@ -38,7 +38,7 @@ Optional properties:
Example:
- ad7879@2c {
+ touchscreen0@2c {
compatible = "adi,ad7879-1";
reg = <0x2c>;
interrupt-parent = <&gpio1>;
@@ -52,7 +52,7 @@ Example:
adi,conversion-interval = /bits/ 8 <255>;
};
- ad7879@1 {
+ touchscreen1@1 {
compatible = "adi,ad7879";
spi-max-frequency = <5000000>;
reg = <1>;
diff --git a/Documentation/devicetree/bindings/input/touchscreen/edt-ft5x06.txt b/Documentation/devicetree/bindings/input/touchscreen/edt-ft5x06.txt
index 870b8c5cce9b..0f6950073d6f 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/edt-ft5x06.txt
+++ b/Documentation/devicetree/bindings/input/touchscreen/edt-ft5x06.txt
@@ -30,6 +30,7 @@ Required properties:
Optional properties:
- reset-gpios: GPIO specification for the RESET input
- wake-gpios: GPIO specification for the WAKE input
+ - vcc-supply: Regulator that supplies the touchscreen
- pinctrl-names: should be "default"
- pinctrl-0: a phandle pointing to the pin settings for the
diff --git a/Documentation/devicetree/bindings/interconnect/qcom,msm8974.yaml b/Documentation/devicetree/bindings/interconnect/qcom,msm8974.yaml
new file mode 100644
index 000000000000..9af3c6e59cff
--- /dev/null
+++ b/Documentation/devicetree/bindings/interconnect/qcom,msm8974.yaml
@@ -0,0 +1,62 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/interconnect/qcom,msm8974.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm MSM8974 Network-On-Chip Interconnect
+
+maintainers:
+ - Brian Masney <masneyb@onstation.org>
+
+description: |
+ The Qualcomm MSM8974 interconnect providers support setting system
+ bandwidth requirements between various network-on-chip fabrics.
+
+properties:
+ reg:
+ maxItems: 1
+
+ compatible:
+ enum:
+ - qcom,msm8974-bimc
+ - qcom,msm8974-cnoc
+ - qcom,msm8974-mmssnoc
+ - qcom,msm8974-ocmemnoc
+ - qcom,msm8974-pnoc
+ - qcom,msm8974-snoc
+
+ '#interconnect-cells':
+ const: 1
+
+ clock-names:
+ items:
+ - const: bus
+ - const: bus_a
+
+ clocks:
+ items:
+ - description: Bus Clock
+ - description: Bus A Clock
+
+required:
+ - compatible
+ - reg
+ - '#interconnect-cells'
+ - clock-names
+ - clocks
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/qcom,rpmcc.h>
+
+ bimc: interconnect@fc380000 {
+ reg = <0xfc380000 0x6a000>;
+ compatible = "qcom,msm8974-bimc";
+ #interconnect-cells = <1>;
+ clock-names = "bus", "bus_a";
+ clocks = <&rpmcc RPM_SMD_BIMC_CLK>,
+ <&rpmcc RPM_SMD_BIMC_A_CLK>;
+ };
diff --git a/Documentation/devicetree/bindings/interconnect/qcom,qcs404.txt b/Documentation/devicetree/bindings/interconnect/qcom,qcs404.txt
deleted file mode 100644
index c07d89812b73..000000000000
--- a/Documentation/devicetree/bindings/interconnect/qcom,qcs404.txt
+++ /dev/null
@@ -1,45 +0,0 @@
-Qualcomm QCS404 Network-On-Chip interconnect driver binding
------------------------------------------------------------
-
-Required properties :
-- compatible : shall contain only one of the following:
- "qcom,qcs404-bimc"
- "qcom,qcs404-pcnoc"
- "qcom,qcs404-snoc"
-- #interconnect-cells : should contain 1
-
-reg : specifies the physical base address and size of registers
-clocks : list of phandles and specifiers to all interconnect bus clocks
-clock-names : clock names should include both "bus" and "bus_a"
-
-Example:
-
-soc {
- ...
- bimc: interconnect@400000 {
- reg = <0x00400000 0x80000>;
- compatible = "qcom,qcs404-bimc";
- #interconnect-cells = <1>;
- clock-names = "bus", "bus_a";
- clocks = <&rpmcc RPM_SMD_BIMC_CLK>,
- <&rpmcc RPM_SMD_BIMC_A_CLK>;
- };
-
- pnoc: interconnect@500000 {
- reg = <0x00500000 0x15080>;
- compatible = "qcom,qcs404-pcnoc";
- #interconnect-cells = <1>;
- clock-names = "bus", "bus_a";
- clocks = <&rpmcc RPM_SMD_PNOC_CLK>,
- <&rpmcc RPM_SMD_PNOC_A_CLK>;
- };
-
- snoc: interconnect@580000 {
- reg = <0x00580000 0x23080>;
- compatible = "qcom,qcs404-snoc";
- #interconnect-cells = <1>;
- clock-names = "bus", "bus_a";
- clocks = <&rpmcc RPM_SMD_SNOC_CLK>,
- <&rpmcc RPM_SMD_SNOC_A_CLK>;
- };
-};
diff --git a/Documentation/devicetree/bindings/interconnect/qcom,qcs404.yaml b/Documentation/devicetree/bindings/interconnect/qcom,qcs404.yaml
new file mode 100644
index 000000000000..8d65c5f80679
--- /dev/null
+++ b/Documentation/devicetree/bindings/interconnect/qcom,qcs404.yaml
@@ -0,0 +1,77 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/interconnect/qcom,qcs404.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm QCS404 Network-On-Chip interconnect
+
+maintainers:
+ - Georgi Djakov <georgi.djakov@linaro.org>
+
+description: |
+ The Qualcomm QCS404 interconnect providers support adjusting the
+ bandwidth requirements between the various NoC fabrics.
+
+properties:
+ reg:
+ maxItems: 1
+
+ compatible:
+ enum:
+ - qcom,qcs404-bimc
+ - qcom,qcs404-pcnoc
+ - qcom,qcs404-snoc
+
+ '#interconnect-cells':
+ const: 1
+
+ clock-names:
+ items:
+ - const: bus
+ - const: bus_a
+
+ clocks:
+ items:
+ - description: Bus Clock
+ - description: Bus A Clock
+
+required:
+ - compatible
+ - reg
+ - '#interconnect-cells'
+ - clock-names
+ - clocks
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/qcom,rpmcc.h>
+
+ bimc: interconnect@400000 {
+ reg = <0x00400000 0x80000>;
+ compatible = "qcom,qcs404-bimc";
+ #interconnect-cells = <1>;
+ clock-names = "bus", "bus_a";
+ clocks = <&rpmcc RPM_SMD_BIMC_CLK>,
+ <&rpmcc RPM_SMD_BIMC_A_CLK>;
+ };
+
+ pnoc: interconnect@500000 {
+ reg = <0x00500000 0x15080>;
+ compatible = "qcom,qcs404-pcnoc";
+ #interconnect-cells = <1>;
+ clock-names = "bus", "bus_a";
+ clocks = <&rpmcc RPM_SMD_PNOC_CLK>,
+ <&rpmcc RPM_SMD_PNOC_A_CLK>;
+ };
+
+ snoc: interconnect@580000 {
+ reg = <0x00580000 0x23080>;
+ compatible = "qcom,qcs404-snoc";
+ #interconnect-cells = <1>;
+ clock-names = "bus", "bus_a";
+ clocks = <&rpmcc RPM_SMD_SNOC_CLK>,
+ <&rpmcc RPM_SMD_SNOC_A_CLK>;
+ };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/allwinner,sun7i-a20-sc-nmi.yaml b/Documentation/devicetree/bindings/interrupt-controller/allwinner,sun7i-a20-sc-nmi.yaml
index 0eccf5551786..8cd08cfb25be 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/allwinner,sun7i-a20-sc-nmi.yaml
+++ b/Documentation/devicetree/bindings/interrupt-controller/allwinner,sun7i-a20-sc-nmi.yaml
@@ -52,9 +52,7 @@ required:
- interrupts
- interrupt-controller
-# FIXME: We should set it, but it would report all the generic
-# properties as additional properties.
-# additionalProperties: false
+unevaluatedProperties: false
examples:
- |
diff --git a/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.yaml b/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.yaml
index 1fe147daca4c..66aacd106503 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.yaml
+++ b/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.yaml
@@ -138,6 +138,7 @@ properties:
containing a set of sub-nodes.
patternProperties:
"^interrupt-partition-[0-9]+$":
+ type: object
properties:
affinity:
$ref: /schemas/types.yaml#/definitions/phandle-array
diff --git a/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm7038-l1-intc.txt b/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm7038-l1-intc.txt
index 2117d4ac1ae5..5ddef1dc0c1a 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm7038-l1-intc.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm7038-l1-intc.txt
@@ -31,6 +31,17 @@ Required properties:
- interrupts: specifies the interrupt line(s) in the interrupt-parent controller
node; valid values depend on the type of parent interrupt controller
+Optional properties:
+
+- brcm,irq-can-wake: If present, this means the L1 controller can be used as a
+ wakeup source for system suspend/resume.
+
+Optional properties:
+
+- brcm,int-fwd-mask: if present, a bit mask to indicate which interrupts
+ have already been configured by the firmware and should be left unmanaged.
+ This should have one 32-bit word per status/set/clear/mask group.
+
If multiple reg ranges and interrupt-parent entries are present on an SMP
system, the driver will allow IRQ SMP affinity to be set up through the
/proc/irq/ interface. In the simplest possible configuration, only one
diff --git a/Documentation/devicetree/bindings/interrupt-controller/fsl,ls-extirq.txt b/Documentation/devicetree/bindings/interrupt-controller/fsl,ls-extirq.txt
new file mode 100644
index 000000000000..f0ad7801e8cf
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/fsl,ls-extirq.txt
@@ -0,0 +1,49 @@
+* Freescale Layerscape external IRQs
+
+Some Layerscape SOCs (LS1021A, LS1043A, LS1046A) support inverting
+the polarity of certain external interrupt lines.
+
+The device node must be a child of the node representing the
+Supplemental Configuration Unit (SCFG).
+
+Required properties:
+- compatible: should be "fsl,<soc-name>-extirq", e.g. "fsl,ls1021a-extirq".
+- #interrupt-cells: Must be 2. The first element is the index of the
+ external interrupt line. The second element is the trigger type.
+- #address-cells: Must be 0.
+- interrupt-controller: Identifies the node as an interrupt controller
+- reg: Specifies the Interrupt Polarity Control Register (INTPCR) in
+ the SCFG.
+- interrupt-map: Specifies the mapping from external interrupts to GIC
+ interrupts.
+- interrupt-map-mask: Must be <0xffffffff 0>.
+
+Example:
+ scfg: scfg@1570000 {
+ compatible = "fsl,ls1021a-scfg", "syscon";
+ reg = <0x0 0x1570000 0x0 0x10000>;
+ big-endian;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x0 0x1570000 0x10000>;
+
+ extirq: interrupt-controller@1ac {
+ compatible = "fsl,ls1021a-extirq";
+ #interrupt-cells = <2>;
+ #address-cells = <0>;
+ interrupt-controller;
+ reg = <0x1ac 4>;
+ interrupt-map =
+ <0 0 &gic GIC_SPI 163 IRQ_TYPE_LEVEL_HIGH>,
+ <1 0 &gic GIC_SPI 164 IRQ_TYPE_LEVEL_HIGH>,
+ <2 0 &gic GIC_SPI 165 IRQ_TYPE_LEVEL_HIGH>,
+ <3 0 &gic GIC_SPI 167 IRQ_TYPE_LEVEL_HIGH>,
+ <4 0 &gic GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>,
+ <5 0 &gic GIC_SPI 169 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-map-mask = <0xffffffff 0x0>;
+ };
+ };
+
+
+ interrupts-extended = <&gic GIC_SPI 88 IRQ_TYPE_LEVEL_HIGH>,
+ <&extirq 1 IRQ_TYPE_LEVEL_LOW>;
diff --git a/Documentation/devicetree/bindings/interrupt-controller/interrupts.txt b/Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
index 4a3ee253f7f0..4ebfa0008781 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
@@ -108,3 +108,15 @@ commonly used:
sensitivity = <7>;
};
};
+
+3) Interrupt wakeup parent
+--------------------------
+
+Some interrupt controllers in a SoC, are always powered on and have a select
+interrupts routed to them, so that they can wakeup the SoC from suspend. These
+interrupt controllers do not fall into the category of a parent interrupt
+controller and can be specified by the "wakeup-parent" property and contain a
+single phandle referring to the wakeup capable interrupt controller.
+
+ Example:
+ wakeup-parent = <&pdc_intc>;
diff --git a/Documentation/devicetree/bindings/interrupt-controller/qcom,pdc.txt b/Documentation/devicetree/bindings/interrupt-controller/qcom,pdc.txt
index 8e0797cb1487..1df293953327 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/qcom,pdc.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/qcom,pdc.txt
@@ -17,7 +17,8 @@ Properties:
- compatible:
Usage: required
Value type: <string>
- Definition: Should contain "qcom,<soc>-pdc"
+ Definition: Should contain "qcom,<soc>-pdc" and "qcom,pdc"
+ - "qcom,sc7180-pdc": For SC7180
- "qcom,sdm845-pdc": For SDM845
- reg:
diff --git a/Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.txt b/Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.txt
deleted file mode 100644
index f977ea7617f6..000000000000
--- a/Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.txt
+++ /dev/null
@@ -1,48 +0,0 @@
-DT bindings for the R-Mobile/R-Car/RZ/G interrupt controller
-
-Required properties:
-
-- compatible: must be "renesas,irqc-<soctype>" or "renesas,intc-ex-<soctype>",
- and "renesas,irqc" as fallback.
- Examples with soctypes are:
- - "renesas,irqc-r8a73a4" (R-Mobile APE6)
- - "renesas,irqc-r8a7743" (RZ/G1M)
- - "renesas,irqc-r8a7744" (RZ/G1N)
- - "renesas,irqc-r8a7745" (RZ/G1E)
- - "renesas,irqc-r8a77470" (RZ/G1C)
- - "renesas,irqc-r8a7790" (R-Car H2)
- - "renesas,irqc-r8a7791" (R-Car M2-W)
- - "renesas,irqc-r8a7792" (R-Car V2H)
- - "renesas,irqc-r8a7793" (R-Car M2-N)
- - "renesas,irqc-r8a7794" (R-Car E2)
- - "renesas,intc-ex-r8a774a1" (RZ/G2M)
- - "renesas,intc-ex-r8a774c0" (RZ/G2E)
- - "renesas,intc-ex-r8a7795" (R-Car H3)
- - "renesas,intc-ex-r8a7796" (R-Car M3-W)
- - "renesas,intc-ex-r8a77965" (R-Car M3-N)
- - "renesas,intc-ex-r8a77970" (R-Car V3M)
- - "renesas,intc-ex-r8a77980" (R-Car V3H)
- - "renesas,intc-ex-r8a77990" (R-Car E3)
- - "renesas,intc-ex-r8a77995" (R-Car D3)
-- #interrupt-cells: has to be <2>: an interrupt index and flags, as defined in
- interrupts.txt in this directory
-- clocks: Must contain a reference to the functional clock.
-
-Optional properties:
-
-- any properties, listed in interrupts.txt, and any standard resource allocation
- properties
-
-Example:
-
- irqc0: interrupt-controller@e61c0000 {
- compatible = "renesas,irqc-r8a7790", "renesas,irqc";
- #interrupt-cells = <2>;
- interrupt-controller;
- reg = <0 0xe61c0000 0 0x200>;
- interrupts = <0 0 IRQ_TYPE_LEVEL_HIGH>,
- <0 1 IRQ_TYPE_LEVEL_HIGH>,
- <0 2 IRQ_TYPE_LEVEL_HIGH>,
- <0 3 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&mstp4_clks R8A7790_CLK_IRQC>;
- };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.yaml b/Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.yaml
new file mode 100644
index 000000000000..ee5273b6c5a3
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.yaml
@@ -0,0 +1,87 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/interrupt-controller/renesas,irqc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: DT bindings for the R-Mobile/R-Car/RZ/G interrupt controller
+
+maintainers:
+ - Geert Uytterhoeven <geert+renesas@glider.be>
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - renesas,irqc-r8a73a4 # R-Mobile APE6
+ - renesas,irqc-r8a7743 # RZ/G1M
+ - renesas,irqc-r8a7744 # RZ/G1N
+ - renesas,irqc-r8a7745 # RZ/G1E
+ - renesas,irqc-r8a77470 # RZ/G1C
+ - renesas,irqc-r8a7790 # R-Car H2
+ - renesas,irqc-r8a7791 # R-Car M2-W
+ - renesas,irqc-r8a7792 # R-Car V2H
+ - renesas,irqc-r8a7793 # R-Car M2-N
+ - renesas,irqc-r8a7794 # R-Car E2
+ - renesas,intc-ex-r8a774a1 # RZ/G2M
+ - renesas,intc-ex-r8a774b1 # RZ/G2N
+ - renesas,intc-ex-r8a774c0 # RZ/G2E
+ - renesas,intc-ex-r8a7795 # R-Car H3
+ - renesas,intc-ex-r8a7796 # R-Car M3-W
+ - renesas,intc-ex-r8a77965 # R-Car M3-N
+ - renesas,intc-ex-r8a77970 # R-Car V3M
+ - renesas,intc-ex-r8a77980 # R-Car V3H
+ - renesas,intc-ex-r8a77990 # R-Car E3
+ - renesas,intc-ex-r8a77995 # R-Car D3
+ - const: renesas,irqc
+
+ '#interrupt-cells':
+ # an interrupt index and flags, as defined in interrupts.txt in
+ # this directory
+ const: 2
+
+ interrupt-controller: true
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ minItems: 1
+ maxItems: 32
+
+ clocks:
+ maxItems: 1
+
+ power-domains:
+ maxItems: 1
+
+ resets:
+ maxItems: 1
+
+required:
+ - compatible
+ - '#interrupt-cells'
+ - interrupt-controller
+ - reg
+ - interrupts
+ - clocks
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/r8a7790-cpg-mssr.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ irqc0: interrupt-controller@e61c0000 {
+ compatible = "renesas,irqc-r8a7790", "renesas,irqc";
+ #interrupt-cells = <2>;
+ interrupt-controller;
+ reg = <0 0xe61c0000 0 0x200>;
+ interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 407>;
+ };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/st,stm32-exti.txt b/Documentation/devicetree/bindings/interrupt-controller/st,stm32-exti.txt
deleted file mode 100644
index cd01b2292ec6..000000000000
--- a/Documentation/devicetree/bindings/interrupt-controller/st,stm32-exti.txt
+++ /dev/null
@@ -1,29 +0,0 @@
-STM32 External Interrupt Controller
-
-Required properties:
-
-- compatible: Should be:
- "st,stm32-exti"
- "st,stm32h7-exti"
- "st,stm32mp1-exti"
-- reg: Specifies base physical address and size of the registers
-- interrupt-controller: Indentifies the node as an interrupt controller
-- #interrupt-cells: Specifies the number of cells to encode an interrupt
- specifier, shall be 2
-- interrupts: interrupts references to primary interrupt controller
- (only needed for exti controller with multiple exti under
- same parent interrupt: st,stm32-exti and st,stm32h7-exti)
-
-Optional properties:
-
-- hwlocks: reference to a phandle of a hardware spinlock provider node.
-
-Example:
-
-exti: interrupt-controller@40013c00 {
- compatible = "st,stm32-exti";
- interrupt-controller;
- #interrupt-cells = <2>;
- reg = <0x40013C00 0x400>;
- interrupts = <1>, <2>, <3>, <6>, <7>, <8>, <9>, <10>, <23>, <40>, <41>, <42>, <62>, <76>;
-};
diff --git a/Documentation/devicetree/bindings/interrupt-controller/st,stm32-exti.yaml b/Documentation/devicetree/bindings/interrupt-controller/st,stm32-exti.yaml
new file mode 100644
index 000000000000..9e5c6608b4e3
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/st,stm32-exti.yaml
@@ -0,0 +1,98 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/interrupt-controller/st,stm32-exti.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: STM32 External Interrupt Controller Device Tree Bindings
+
+maintainers:
+ - Alexandre Torgue <alexandre.torgue@st.com>
+ - Ludovic Barre <ludovic.barre@st.com>
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - enum:
+ - st,stm32-exti
+ - st,stm32h7-exti
+ - items:
+ - enum:
+ - st,stm32mp1-exti
+ - const: syscon
+
+ "#interrupt-cells":
+ const: 2
+
+ reg:
+ maxItems: 1
+
+ interrupt-controller: true
+
+ hwlocks:
+ maxItems: 1
+ description:
+ Reference to a phandle of a hardware spinlock provider node.
+
+ interrupts:
+ description:
+ Interrupts references to primary interrupt controller
+
+required:
+ - "#interrupt-cells"
+ - compatible
+ - reg
+ - interrupt-controller
+
+allOf:
+ - $ref: /schemas/interrupt-controller.yaml#
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - st,stm32-exti
+ then:
+ properties:
+ interrupts:
+ minItems: 1
+ maxItems: 32
+ required:
+ - interrupts
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - st,stm32h7-exti
+ then:
+ properties:
+ interrupts:
+ minItems: 1
+ maxItems: 96
+ required:
+ - interrupts
+
+additionalProperties: false
+
+examples:
+ - |
+ //Example 1
+ exti1: interrupt-controller@5000d000 {
+ compatible = "st,stm32mp1-exti", "syscon";
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ reg = <0x5000d000 0x400>;
+ };
+
+ //Example 2
+ exti2: interrupt-controller@40013c00 {
+ compatible = "st,stm32-exti";
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ reg = <0x40013C00 0x400>;
+ interrupts = <1>, <2>, <3>, <6>, <7>, <8>, <9>, <10>, <23>, <40>, <41>, <42>, <62>, <76>;
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/iommu/arm,smmu-v3.txt b/Documentation/devicetree/bindings/iommu/arm,smmu-v3.txt
deleted file mode 100644
index c9abbf3e4f68..000000000000
--- a/Documentation/devicetree/bindings/iommu/arm,smmu-v3.txt
+++ /dev/null
@@ -1,77 +0,0 @@
-* ARM SMMUv3 Architecture Implementation
-
-The SMMUv3 architecture is a significant departure from previous
-revisions, replacing the MMIO register interface with in-memory command
-and event queues and adding support for the ATS and PRI components of
-the PCIe specification.
-
-** SMMUv3 required properties:
-
-- compatible : Should include:
-
- * "arm,smmu-v3" for any SMMUv3 compliant
- implementation. This entry should be last in the
- compatible list.
-
-- reg : Base address and size of the SMMU.
-
-- interrupts : Non-secure interrupt list describing the wired
- interrupt sources corresponding to entries in
- interrupt-names. If no wired interrupts are
- present then this property may be omitted.
-
-- interrupt-names : When the interrupts property is present, should
- include the following:
- * "eventq" - Event Queue not empty
- * "priq" - PRI Queue not empty
- * "cmdq-sync" - CMD_SYNC complete
- * "gerror" - Global Error activated
- * "combined" - The combined interrupt is optional,
- and should only be provided if the
- hardware supports just a single,
- combined interrupt line.
- If provided, then the combined interrupt
- will be used in preference to any others.
-
-- #iommu-cells : See the generic IOMMU binding described in
- devicetree/bindings/pci/pci-iommu.txt
- for details. For SMMUv3, must be 1, with each cell
- describing a single stream ID. All possible stream
- IDs which a device may emit must be described.
-
-** SMMUv3 optional properties:
-
-- dma-coherent : Present if DMA operations made by the SMMU (page
- table walks, stream table accesses etc) are cache
- coherent with the CPU.
-
- NOTE: this only applies to the SMMU itself, not
- masters connected upstream of the SMMU.
-
-- msi-parent : See the generic MSI binding described in
- devicetree/bindings/interrupt-controller/msi.txt
- for a description of the msi-parent property.
-
-- hisilicon,broken-prefetch-cmd
- : Avoid sending CMD_PREFETCH_* commands to the SMMU.
-
-- cavium,cn9900-broken-page1-regspace
- : Replaces all page 1 offsets used for EVTQ_PROD/CONS,
- PRIQ_PROD/CONS register access with page 0 offsets.
- Set for Cavium ThunderX2 silicon that doesn't support
- SMMU page1 register space.
-
-** Example
-
- smmu@2b400000 {
- compatible = "arm,smmu-v3";
- reg = <0x0 0x2b400000 0x0 0x20000>;
- interrupts = <GIC_SPI 74 IRQ_TYPE_EDGE_RISING>,
- <GIC_SPI 75 IRQ_TYPE_EDGE_RISING>,
- <GIC_SPI 77 IRQ_TYPE_EDGE_RISING>,
- <GIC_SPI 79 IRQ_TYPE_EDGE_RISING>;
- interrupt-names = "eventq", "priq", "cmdq-sync", "gerror";
- dma-coherent;
- #iommu-cells = <1>;
- msi-parent = <&its 0xff0000>;
- };
diff --git a/Documentation/devicetree/bindings/iommu/arm,smmu-v3.yaml b/Documentation/devicetree/bindings/iommu/arm,smmu-v3.yaml
new file mode 100644
index 000000000000..5951c6f98c74
--- /dev/null
+++ b/Documentation/devicetree/bindings/iommu/arm,smmu-v3.yaml
@@ -0,0 +1,95 @@
+# SPDX-License-Identifier: GPL-2.0-only
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iommu/arm,smmu-v3.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: ARM SMMUv3 Architecture Implementation
+
+maintainers:
+ - Will Deacon <will@kernel.org>
+ - Robin Murphy <Robin.Murphy@arm.com>
+
+description: |+
+ The SMMUv3 architecture is a significant departure from previous
+ revisions, replacing the MMIO register interface with in-memory command
+ and event queues and adding support for the ATS and PRI components of
+ the PCIe specification.
+
+properties:
+ $nodename:
+ pattern: "^iommu@[0-9a-f]*"
+ compatible:
+ const: arm,smmu-v3
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ minItems: 1
+ maxItems: 4
+
+ interrupt-names:
+ oneOf:
+ - const: combined
+ description:
+ The combined interrupt is optional, and should only be provided if the
+ hardware supports just a single, combined interrupt line.
+ If provided, then the combined interrupt will be used in preference to
+ any others.
+ - minItems: 2
+ maxItems: 4
+ items:
+ - const: eventq # Event Queue not empty
+ - const: gerror # Global Error activated
+ - const: priq # PRI Queue not empty
+ - const: cmdq-sync # CMD_SYNC complete
+
+ '#iommu-cells':
+ const: 1
+
+ dma-coherent:
+ description: |
+ Present if page table walks made by the SMMU are cache coherent with the
+ CPU.
+
+ NOTE: this only applies to the SMMU itself, not masters connected
+ upstream of the SMMU.
+
+ msi-parent: true
+
+ hisilicon,broken-prefetch-cmd:
+ type: boolean
+ description: Avoid sending CMD_PREFETCH_* commands to the SMMU.
+
+ cavium,cn9900-broken-page1-regspace:
+ type: boolean
+ description:
+ Replaces all page 1 offsets used for EVTQ_PROD/CONS, PRIQ_PROD/CONS
+ register access with page 0 offsets. Set for Cavium ThunderX2 silicon that
+ doesn't support SMMU page1 register space.
+
+required:
+ - compatible
+ - reg
+ - '#iommu-cells'
+
+additionalProperties: false
+
+examples:
+ - |+
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ iommu@2b400000 {
+ compatible = "arm,smmu-v3";
+ reg = <0x2b400000 0x20000>;
+ interrupts = <GIC_SPI 74 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 75 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 77 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 79 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "eventq", "gerror", "priq", "cmdq-sync";
+ dma-coherent;
+ #iommu-cells = <1>;
+ msi-parent = <&its 0xff0000>;
+ };
diff --git a/Documentation/devicetree/bindings/iommu/arm,smmu.txt b/Documentation/devicetree/bindings/iommu/arm,smmu.txt
deleted file mode 100644
index 3133f3ba7567..000000000000
--- a/Documentation/devicetree/bindings/iommu/arm,smmu.txt
+++ /dev/null
@@ -1,182 +0,0 @@
-* ARM System MMU Architecture Implementation
-
-ARM SoCs may contain an implementation of the ARM System Memory
-Management Unit Architecture, which can be used to provide 1 or 2 stages
-of address translation to bus masters external to the CPU.
-
-The SMMU may also raise interrupts in response to various fault
-conditions.
-
-** System MMU required properties:
-
-- compatible : Should be one of:
-
- "arm,smmu-v1"
- "arm,smmu-v2"
- "arm,mmu-400"
- "arm,mmu-401"
- "arm,mmu-500"
- "cavium,smmu-v2"
- "qcom,smmu-v2"
-
- depending on the particular implementation and/or the
- version of the architecture implemented.
-
- Qcom SoCs must contain, as below, SoC-specific compatibles
- along with "qcom,smmu-v2":
- "qcom,msm8996-smmu-v2", "qcom,smmu-v2",
- "qcom,sdm845-smmu-v2", "qcom,smmu-v2".
-
- Qcom SoCs implementing "arm,mmu-500" must also include,
- as below, SoC-specific compatibles:
- "qcom,sdm845-smmu-500", "arm,mmu-500"
-
-- reg : Base address and size of the SMMU.
-
-- #global-interrupts : The number of global interrupts exposed by the
- device.
-
-- interrupts : Interrupt list, with the first #global-irqs entries
- corresponding to the global interrupts and any
- following entries corresponding to context interrupts,
- specified in order of their indexing by the SMMU.
-
- For SMMUv2 implementations, there must be exactly one
- interrupt per context bank. In the case of a single,
- combined interrupt, it must be listed multiple times.
-
-- #iommu-cells : See Documentation/devicetree/bindings/iommu/iommu.txt
- for details. With a value of 1, each IOMMU specifier
- represents a distinct stream ID emitted by that device
- into the relevant SMMU.
-
- SMMUs with stream matching support and complex masters
- may use a value of 2, where the second cell of the
- IOMMU specifier represents an SMR mask to combine with
- the ID in the first cell. Care must be taken to ensure
- the set of matched IDs does not result in conflicts.
-
-** System MMU optional properties:
-
-- dma-coherent : Present if page table walks made by the SMMU are
- cache coherent with the CPU.
-
- NOTE: this only applies to the SMMU itself, not
- masters connected upstream of the SMMU.
-
-- calxeda,smmu-secure-config-access : Enable proper handling of buggy
- implementations that always use secure access to
- SMMU configuration registers. In this case non-secure
- aliases of secure registers have to be used during
- SMMU configuration.
-
-- stream-match-mask : For SMMUs supporting stream matching and using
- #iommu-cells = <1>, specifies a mask of bits to ignore
- when matching stream IDs (e.g. this may be programmed
- into the SMRn.MASK field of every stream match register
- used). For cases where it is desirable to ignore some
- portion of every Stream ID (e.g. for certain MMU-500
- configurations given globally unique input IDs). This
- property is not valid for SMMUs using stream indexing,
- or using stream matching with #iommu-cells = <2>, and
- may be ignored if present in such cases.
-
-- clock-names: List of the names of clocks input to the device. The
- required list depends on particular implementation and
- is as follows:
- - for "qcom,smmu-v2":
- - "bus": clock required for downstream bus access and
- for the smmu ptw,
- - "iface": clock required to access smmu's registers
- through the TCU's programming interface.
- - unspecified for other implementations.
-
-- clocks: Specifiers for all clocks listed in the clock-names property,
- as per generic clock bindings.
-
-- power-domains: Specifiers for power domains required to be powered on for
- the SMMU to operate, as per generic power domain bindings.
-
-** Deprecated properties:
-
-- mmu-masters (deprecated in favour of the generic "iommus" binding) :
- A list of phandles to device nodes representing bus
- masters for which the SMMU can provide a translation
- and their corresponding Stream IDs. Each device node
- linked from this list must have a "#stream-id-cells"
- property, indicating the number of Stream ID
- arguments associated with its phandle.
-
-** Examples:
-
- /* SMMU with stream matching or stream indexing */
- smmu1: iommu {
- compatible = "arm,smmu-v1";
- reg = <0xba5e0000 0x10000>;
- #global-interrupts = <2>;
- interrupts = <0 32 4>,
- <0 33 4>,
- <0 34 4>, /* This is the first context interrupt */
- <0 35 4>,
- <0 36 4>,
- <0 37 4>;
- #iommu-cells = <1>;
- };
-
- /* device with two stream IDs, 0 and 7 */
- master1 {
- iommus = <&smmu1 0>,
- <&smmu1 7>;
- };
-
-
- /* SMMU with stream matching */
- smmu2: iommu {
- ...
- #iommu-cells = <2>;
- };
-
- /* device with stream IDs 0 and 7 */
- master2 {
- iommus = <&smmu2 0 0>,
- <&smmu2 7 0>;
- };
-
- /* device with stream IDs 1, 17, 33 and 49 */
- master3 {
- iommus = <&smmu2 1 0x30>;
- };
-
-
- /* ARM MMU-500 with 10-bit stream ID input configuration */
- smmu3: iommu {
- compatible = "arm,mmu-500", "arm,smmu-v2";
- ...
- #iommu-cells = <1>;
- /* always ignore appended 5-bit TBU number */
- stream-match-mask = 0x7c00;
- };
-
- bus {
- /* bus whose child devices emit one unique 10-bit stream
- ID each, but may master through multiple SMMU TBUs */
- iommu-map = <0 &smmu3 0 0x400>;
- ...
- };
-
- /* Qcom's arm,smmu-v2 implementation */
- smmu4: iommu@d00000 {
- compatible = "qcom,msm8996-smmu-v2", "qcom,smmu-v2";
- reg = <0xd00000 0x10000>;
-
- #global-interrupts = <1>;
- interrupts = <GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 320 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 321 IRQ_TYPE_LEVEL_HIGH>;
- #iommu-cells = <1>;
- power-domains = <&mmcc MDSS_GDSC>;
-
- clocks = <&mmcc SMMU_MDP_AXI_CLK>,
- <&mmcc SMMU_MDP_AHB_CLK>;
- clock-names = "bus", "iface";
- };
diff --git a/Documentation/devicetree/bindings/iommu/arm,smmu.yaml b/Documentation/devicetree/bindings/iommu/arm,smmu.yaml
new file mode 100644
index 000000000000..6515dbe47508
--- /dev/null
+++ b/Documentation/devicetree/bindings/iommu/arm,smmu.yaml
@@ -0,0 +1,230 @@
+# SPDX-License-Identifier: GPL-2.0-only
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iommu/arm,smmu.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: ARM System MMU Architecture Implementation
+
+maintainers:
+ - Will Deacon <will@kernel.org>
+ - Robin Murphy <Robin.Murphy@arm.com>
+
+description: |+
+ ARM SoCs may contain an implementation of the ARM System Memory
+ Management Unit Architecture, which can be used to provide 1 or 2 stages
+ of address translation to bus masters external to the CPU.
+
+ The SMMU may also raise interrupts in response to various fault
+ conditions.
+
+properties:
+ $nodename:
+ pattern: "^iommu@[0-9a-f]*"
+ compatible:
+ oneOf:
+ - description: Qcom SoCs implementing "arm,smmu-v2"
+ items:
+ - enum:
+ - qcom,msm8996-smmu-v2
+ - qcom,msm8998-smmu-v2
+ - qcom,sdm845-smmu-v2
+ - const: qcom,smmu-v2
+
+ - description: Qcom SoCs implementing "arm,mmu-500"
+ items:
+ - enum:
+ - qcom,sc7180-smmu-500
+ - qcom,sdm845-smmu-500
+ - const: arm,mmu-500
+ - items:
+ - const: arm,mmu-500
+ - const: arm,smmu-v2
+ - items:
+ - const: arm,mmu-401
+ - const: arm,smmu-v1
+ - enum:
+ - arm,smmu-v1
+ - arm,smmu-v2
+ - arm,mmu-400
+ - arm,mmu-401
+ - arm,mmu-500
+ - cavium,smmu-v2
+
+ reg:
+ maxItems: 1
+
+ '#global-interrupts':
+ description: The number of global interrupts exposed by the device.
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 260 # 2 secure, 2 non-secure, and up to 256 perf counters
+
+ '#iommu-cells':
+ enum: [ 1, 2 ]
+ description: |
+ See Documentation/devicetree/bindings/iommu/iommu.txt for details. With a
+ value of 1, each IOMMU specifier represents a distinct stream ID emitted
+ by that device into the relevant SMMU.
+
+ SMMUs with stream matching support and complex masters may use a value of
+ 2, where the second cell of the IOMMU specifier represents an SMR mask to
+ combine with the ID in the first cell. Care must be taken to ensure the
+ set of matched IDs does not result in conflicts.
+
+ interrupts:
+ minItems: 1
+ maxItems: 388 # 260 plus 128 contexts
+ description: |
+ Interrupt list, with the first #global-interrupts entries corresponding to
+ the global interrupts and any following entries corresponding to context
+ interrupts, specified in order of their indexing by the SMMU.
+
+ For SMMUv2 implementations, there must be exactly one interrupt per
+ context bank. In the case of a single, combined interrupt, it must be
+ listed multiple times.
+
+ dma-coherent:
+ description: |
+ Present if page table walks made by the SMMU are cache coherent with the
+ CPU.
+
+ NOTE: this only applies to the SMMU itself, not masters connected
+ upstream of the SMMU.
+
+ calxeda,smmu-secure-config-access:
+ type: boolean
+ description:
+ Enable proper handling of buggy implementations that always use secure
+ access to SMMU configuration registers. In this case non-secure aliases of
+ secure registers have to be used during SMMU configuration.
+
+ stream-match-mask:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: |
+ For SMMUs supporting stream matching and using #iommu-cells = <1>,
+ specifies a mask of bits to ignore when matching stream IDs (e.g. this may
+ be programmed into the SMRn.MASK field of every stream match register
+ used). For cases where it is desirable to ignore some portion of every
+ Stream ID (e.g. for certain MMU-500 configurations given globally unique
+ input IDs). This property is not valid for SMMUs using stream indexing, or
+ using stream matching with #iommu-cells = <2>, and may be ignored if
+ present in such cases.
+
+ clock-names:
+ items:
+ - const: bus
+ - const: iface
+
+ clocks:
+ items:
+ - description: bus clock required for downstream bus access and for the
+ smmu ptw
+ - description: interface clock required to access smmu's registers
+ through the TCU's programming interface.
+
+ power-domains:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - '#global-interrupts'
+ - '#iommu-cells'
+ - interrupts
+
+additionalProperties: false
+
+examples:
+ - |+
+ /* SMMU with stream matching or stream indexing */
+ smmu1: iommu@ba5e0000 {
+ compatible = "arm,smmu-v1";
+ reg = <0xba5e0000 0x10000>;
+ #global-interrupts = <2>;
+ interrupts = <0 32 4>,
+ <0 33 4>,
+ <0 34 4>, /* This is the first context interrupt */
+ <0 35 4>,
+ <0 36 4>,
+ <0 37 4>;
+ #iommu-cells = <1>;
+ };
+
+ /* device with two stream IDs, 0 and 7 */
+ master1 {
+ iommus = <&smmu1 0>,
+ <&smmu1 7>;
+ };
+
+
+ /* SMMU with stream matching */
+ smmu2: iommu@ba5f0000 {
+ compatible = "arm,smmu-v1";
+ reg = <0xba5f0000 0x10000>;
+ #global-interrupts = <2>;
+ interrupts = <0 38 4>,
+ <0 39 4>,
+ <0 40 4>, /* This is the first context interrupt */
+ <0 41 4>,
+ <0 42 4>,
+ <0 43 4>;
+ #iommu-cells = <2>;
+ };
+
+ /* device with stream IDs 0 and 7 */
+ master2 {
+ iommus = <&smmu2 0 0>,
+ <&smmu2 7 0>;
+ };
+
+ /* device with stream IDs 1, 17, 33 and 49 */
+ master3 {
+ iommus = <&smmu2 1 0x30>;
+ };
+
+
+ /* ARM MMU-500 with 10-bit stream ID input configuration */
+ smmu3: iommu@ba600000 {
+ compatible = "arm,mmu-500", "arm,smmu-v2";
+ reg = <0xba600000 0x10000>;
+ #global-interrupts = <2>;
+ interrupts = <0 44 4>,
+ <0 45 4>,
+ <0 46 4>, /* This is the first context interrupt */
+ <0 47 4>,
+ <0 48 4>,
+ <0 49 4>;
+ #iommu-cells = <1>;
+ /* always ignore appended 5-bit TBU number */
+ stream-match-mask = <0x7c00>;
+ };
+
+ bus {
+ /* bus whose child devices emit one unique 10-bit stream
+ ID each, but may master through multiple SMMU TBUs */
+ iommu-map = <0 &smmu3 0 0x400>;
+
+
+ };
+
+ - |+
+ /* Qcom's arm,smmu-v2 implementation */
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+ smmu4: iommu@d00000 {
+ compatible = "qcom,msm8996-smmu-v2", "qcom,smmu-v2";
+ reg = <0xd00000 0x10000>;
+
+ #global-interrupts = <1>;
+ interrupts = <GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 320 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 321 IRQ_TYPE_LEVEL_HIGH>;
+ #iommu-cells = <1>;
+ power-domains = <&mmcc 0>;
+
+ clocks = <&mmcc 123>,
+ <&mmcc 124>;
+ clock-names = "bus", "iface";
+ };
diff --git a/Documentation/devicetree/bindings/iommu/renesas,ipmmu-vmsa.txt b/Documentation/devicetree/bindings/iommu/renesas,ipmmu-vmsa.txt
index b6bfbec3a849..020d6f226efb 100644
--- a/Documentation/devicetree/bindings/iommu/renesas,ipmmu-vmsa.txt
+++ b/Documentation/devicetree/bindings/iommu/renesas,ipmmu-vmsa.txt
@@ -15,6 +15,7 @@ Required Properties:
- "renesas,ipmmu-r8a7744" for the R8A7744 (RZ/G1N) IPMMU.
- "renesas,ipmmu-r8a7745" for the R8A7745 (RZ/G1E) IPMMU.
- "renesas,ipmmu-r8a774a1" for the R8A774A1 (RZ/G2M) IPMMU.
+ - "renesas,ipmmu-r8a774b1" for the R8A774B1 (RZ/G2N) IPMMU.
- "renesas,ipmmu-r8a774c0" for the R8A774C0 (RZ/G2E) IPMMU.
- "renesas,ipmmu-r8a7790" for the R8A7790 (R-Car H2) IPMMU.
- "renesas,ipmmu-r8a7791" for the R8A7791 (R-Car M2-W) IPMMU.
diff --git a/Documentation/devicetree/bindings/iommu/samsung,sysmmu.txt b/Documentation/devicetree/bindings/iommu/samsung,sysmmu.txt
deleted file mode 100644
index 525ec82615a6..000000000000
--- a/Documentation/devicetree/bindings/iommu/samsung,sysmmu.txt
+++ /dev/null
@@ -1,67 +0,0 @@
-Samsung Exynos IOMMU H/W, System MMU (System Memory Management Unit)
-
-Samsung's Exynos architecture contains System MMUs that enables scattered
-physical memory chunks visible as a contiguous region to DMA-capable peripheral
-devices like MFC, FIMC, FIMD, GScaler, FIMC-IS and so forth.
-
-System MMU is an IOMMU and supports identical translation table format to
-ARMv7 translation tables with minimum set of page properties including access
-permissions, shareability and security protection. In addition, System MMU has
-another capabilities like L2 TLB or block-fetch buffers to minimize translation
-latency.
-
-System MMUs are in many to one relation with peripheral devices, i.e. single
-peripheral device might have multiple System MMUs (usually one for each bus
-master), but one System MMU can handle transactions from only one peripheral
-device. The relation between a System MMU and the peripheral device needs to be
-defined in device node of the peripheral device.
-
-MFC in all Exynos SoCs and FIMD, M2M Scalers and G2D in Exynos5420 has 2 System
-MMUs.
-* MFC has one System MMU on its left and right bus.
-* FIMD in Exynos5420 has one System MMU for window 0 and 4, the other system MMU
- for window 1, 2 and 3.
-* M2M Scalers and G2D in Exynos5420 has one System MMU on the read channel and
- the other System MMU on the write channel.
-
-For information on assigning System MMU controller to its peripheral devices,
-see generic IOMMU bindings.
-
-Required properties:
-- compatible: Should be "samsung,exynos-sysmmu"
-- reg: A tuple of base address and size of System MMU registers.
-- #iommu-cells: Should be <0>.
-- interrupts: An interrupt specifier for interrupt signal of System MMU,
- according to the format defined by a particular interrupt
- controller.
-- clock-names: Should be "sysmmu" or a pair of "aclk" and "pclk" to gate
- SYSMMU core clocks.
- Optional "master" if the clock to the System MMU is gated by
- another gate clock other core (usually main gate clock
- of peripheral device this SYSMMU belongs to).
-- clocks: Phandles for respective clocks described by clock-names.
-- power-domains: Required if the System MMU is needed to gate its power.
- Please refer to the following document:
- Documentation/devicetree/bindings/power/pd-samsung.txt
-
-Examples:
- gsc_0: gsc@13e00000 {
- compatible = "samsung,exynos5-gsc";
- reg = <0x13e00000 0x1000>;
- interrupts = <0 85 0>;
- power-domains = <&pd_gsc>;
- clocks = <&clock CLK_GSCL0>;
- clock-names = "gscl";
- iommus = <&sysmmu_gsc0>;
- };
-
- sysmmu_gsc0: sysmmu@13e80000 {
- compatible = "samsung,exynos-sysmmu";
- reg = <0x13E80000 0x1000>;
- interrupt-parent = <&combiner>;
- interrupts = <2 0>;
- clock-names = "sysmmu", "master";
- clocks = <&clock CLK_SMMU_GSCL0>, <&clock CLK_GSCL0>;
- power-domains = <&pd_gsc>;
- #iommu-cells = <0>;
- };
diff --git a/Documentation/devicetree/bindings/iommu/samsung,sysmmu.yaml b/Documentation/devicetree/bindings/iommu/samsung,sysmmu.yaml
new file mode 100644
index 000000000000..7cdd3aaa2ba4
--- /dev/null
+++ b/Documentation/devicetree/bindings/iommu/samsung,sysmmu.yaml
@@ -0,0 +1,108 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iommu/samsung,sysmmu.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Samsung Exynos IOMMU H/W, System MMU (System Memory Management Unit)
+
+maintainers:
+ - Marek Szyprowski <m.szyprowski@samsung.com>
+
+description: |+
+ Samsung's Exynos architecture contains System MMUs that enables scattered
+ physical memory chunks visible as a contiguous region to DMA-capable peripheral
+ devices like MFC, FIMC, FIMD, GScaler, FIMC-IS and so forth.
+
+ System MMU is an IOMMU and supports identical translation table format to
+ ARMv7 translation tables with minimum set of page properties including access
+ permissions, shareability and security protection. In addition, System MMU has
+ another capabilities like L2 TLB or block-fetch buffers to minimize translation
+ latency.
+
+ System MMUs are in many to one relation with peripheral devices, i.e. single
+ peripheral device might have multiple System MMUs (usually one for each bus
+ master), but one System MMU can handle transactions from only one peripheral
+ device. The relation between a System MMU and the peripheral device needs to be
+ defined in device node of the peripheral device.
+
+ MFC in all Exynos SoCs and FIMD, M2M Scalers and G2D in Exynos5420 has 2 System
+ MMUs.
+ * MFC has one System MMU on its left and right bus.
+ * FIMD in Exynos5420 has one System MMU for window 0 and 4, the other system MMU
+ for window 1, 2 and 3.
+ * M2M Scalers and G2D in Exynos5420 has one System MMU on the read channel and
+ the other System MMU on the write channel.
+
+ For information on assigning System MMU controller to its peripheral devices,
+ see generic IOMMU bindings.
+
+properties:
+ compatible:
+ const: samsung,exynos-sysmmu
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ minItems: 1
+ maxItems: 2
+
+ clock-names:
+ oneOf:
+ - items:
+ - const: sysmmu
+ - items:
+ - const: sysmmu
+ - const: master
+ - items:
+ - const: aclk
+ - const: pclk
+
+ "#iommu-cells":
+ const: 0
+
+ power-domains:
+ description: |
+ Required if the System MMU is needed to gate its power.
+ Please refer to the following document:
+ Documentation/devicetree/bindings/power/pd-samsung.yaml
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - clock-names
+ - "#iommu-cells"
+
+examples:
+ - |
+ #include <dt-bindings/clock/exynos5250.h>
+
+ gsc_0: scaler@13e00000 {
+ compatible = "samsung,exynos5-gsc";
+ reg = <0x13e00000 0x1000>;
+ interrupts = <0 85 0>;
+ power-domains = <&pd_gsc>;
+ clocks = <&clock CLK_GSCL0>;
+ clock-names = "gscl";
+ iommus = <&sysmmu_gsc0>;
+ };
+
+ sysmmu_gsc0: iommu@13e80000 {
+ compatible = "samsung,exynos-sysmmu";
+ reg = <0x13E80000 0x1000>;
+ interrupt-parent = <&combiner>;
+ interrupts = <2 0>;
+ clock-names = "sysmmu", "master";
+ clocks = <&clock CLK_SMMU_GSCL0>,
+ <&clock CLK_GSCL0>;
+ power-domains = <&pd_gsc>;
+ #iommu-cells = <0>;
+ };
+
diff --git a/Documentation/devicetree/bindings/leds/backlight/led-backlight.txt b/Documentation/devicetree/bindings/leds/backlight/led-backlight.txt
new file mode 100644
index 000000000000..4c7dfbe7f67a
--- /dev/null
+++ b/Documentation/devicetree/bindings/leds/backlight/led-backlight.txt
@@ -0,0 +1,28 @@
+led-backlight bindings
+
+This binding is used to describe a basic backlight device made of LEDs.
+It can also be used to describe a backlight device controlled by the output of
+a LED driver.
+
+Required properties:
+ - compatible: "led-backlight"
+ - leds: a list of LEDs
+
+Optional properties:
+ - brightness-levels: Array of distinct brightness levels. The levels must be
+ in the range accepted by the underlying LED devices.
+ This is used to translate a backlight brightness level
+ into a LED brightness level. If it is not provided, the
+ identity mapping is used.
+
+ - default-brightness-level: The default brightness level.
+
+Example:
+
+ backlight {
+ compatible = "led-backlight";
+
+ leds = <&led1>, <&led2>;
+ brightness-levels = <0 4 8 16 32 64 128 255>;
+ default-brightness-level = <6>;
+ };
diff --git a/Documentation/devicetree/bindings/leds/backlight/lm3630a-backlight.yaml b/Documentation/devicetree/bindings/leds/backlight/lm3630a-backlight.yaml
index dc129d9a329e..08fe5cf8614a 100644
--- a/Documentation/devicetree/bindings/leds/backlight/lm3630a-backlight.yaml
+++ b/Documentation/devicetree/bindings/leds/backlight/lm3630a-backlight.yaml
@@ -29,6 +29,10 @@ properties:
'#size-cells':
const: 0
+ enable-gpios:
+ description: GPIO to use to enable/disable the backlight (HWEN pin).
+ maxItems: 1
+
required:
- compatible
- reg
@@ -89,6 +93,7 @@ additionalProperties: false
examples:
- |
+ #include <dt-bindings/gpio/gpio.h>
i2c {
#address-cells = <1>;
#size-cells = <0>;
@@ -96,6 +101,7 @@ examples:
led-controller@38 {
compatible = "ti,lm3630a";
reg = <0x38>;
+ enable-gpios = <&gpio2 5 GPIO_ACTIVE_HIGH>;
#address-cells = <1>;
#size-cells = <0>;
diff --git a/Documentation/devicetree/bindings/leds/backlight/pm8941-wled.txt b/Documentation/devicetree/bindings/leds/backlight/pm8941-wled.txt
deleted file mode 100644
index e5b294dafc58..000000000000
--- a/Documentation/devicetree/bindings/leds/backlight/pm8941-wled.txt
+++ /dev/null
@@ -1,42 +0,0 @@
-Binding for Qualcomm PM8941 WLED driver
-
-Required properties:
-- compatible: should be "qcom,pm8941-wled"
-- reg: slave address
-
-Optional properties:
-- default-brightness: brightness value on boot, value from: 0-4095
- default: 2048
-- label: The name of the backlight device
-- qcom,cs-out: bool; enable current sink output
-- qcom,cabc: bool; enable content adaptive backlight control
-- qcom,ext-gen: bool; use externally generated modulator signal to dim
-- qcom,current-limit: mA; per-string current limit; value from 0 to 25
- default: 20mA
-- qcom,current-boost-limit: mA; boost current limit; one of:
- 105, 385, 525, 805, 980, 1260, 1400, 1680
- default: 805mA
-- qcom,switching-freq: kHz; switching frequency; one of:
- 600, 640, 685, 738, 800, 872, 960, 1066, 1200, 1371,
- 1600, 1920, 2400, 3200, 4800, 9600,
- default: 1600kHz
-- qcom,ovp: V; Over-voltage protection limit; one of:
- 27, 29, 32, 35
- default: 29V
-- qcom,num-strings: #; number of led strings attached; value from 1 to 3
- default: 2
-
-Example:
-
-pm8941-wled@d800 {
- compatible = "qcom,pm8941-wled";
- reg = <0xd800>;
- label = "backlight";
-
- qcom,cs-out;
- qcom,current-limit = <20>;
- qcom,current-boost-limit = <805>;
- qcom,switching-freq = <1600>;
- qcom,ovp = <29>;
- qcom,num-strings = <2>;
-};
diff --git a/Documentation/devicetree/bindings/leds/backlight/qcom-wled.txt b/Documentation/devicetree/bindings/leds/backlight/qcom-wled.txt
new file mode 100644
index 000000000000..c06863badfbd
--- /dev/null
+++ b/Documentation/devicetree/bindings/leds/backlight/qcom-wled.txt
@@ -0,0 +1,154 @@
+Binding for Qualcomm Technologies, Inc. WLED driver
+
+WLED (White Light Emitting Diode) driver is used for controlling display
+backlight that is part of PMIC on Qualcomm Technologies, Inc. reference
+platforms. The PMIC is connected to the host processor via SPMI bus.
+
+- compatible
+ Usage: required
+ Value type: <string>
+ Definition: should be one of:
+ "qcom,pm8941-wled"
+ "qcom,pmi8998-wled"
+ "qcom,pm660l-wled"
+
+- reg
+ Usage: required
+ Value type: <prop encoded array>
+ Definition: Base address of the WLED modules.
+
+- default-brightness
+ Usage: optional
+ Value type: <u32>
+ Definition: brightness value on boot, value from: 0-4095.
+ Default: 2048
+
+- label
+ Usage: required
+ Value type: <string>
+ Definition: The name of the backlight device
+
+- qcom,cs-out
+ Usage: optional
+ Value type: <bool>
+ Definition: enable current sink output.
+ This property is supported only for PM8941.
+
+- qcom,cabc
+ Usage: optional
+ Value type: <bool>
+ Definition: enable content adaptive backlight control.
+
+- qcom,ext-gen
+ Usage: optional
+ Value type: <bool>
+ Definition: use externally generated modulator signal to dim.
+ This property is supported only for PM8941.
+
+- qcom,current-limit
+ Usage: optional
+ Value type: <u32>
+ Definition: mA; per-string current limit; value from 0 to 25 with
+ 1 mA step. Default 20 mA.
+ This property is supported only for pm8941.
+
+- qcom,current-limit-microamp
+ Usage: optional
+ Value type: <u32>
+ Definition: uA; per-string current limit; value from 0 to 30000 with
+ 2500 uA step. Default 25 mA.
+
+- qcom,current-boost-limit
+ Usage: optional
+ Value type: <u32>
+ Definition: mA; boost current limit.
+ For pm8941: one of: 105, 385, 525, 805, 980, 1260, 1400,
+ 1680. Default: 805 mA.
+ For pmi8998: one of: 105, 280, 450, 620, 970, 1150, 1300,
+ 1500. Default: 970 mA.
+
+- qcom,switching-freq
+ Usage: optional
+ Value type: <u32>
+ Definition: kHz; switching frequency; one of: 600, 640, 685, 738,
+ 800, 872, 960, 1066, 1200, 1371, 1600, 1920, 2400, 3200,
+ 4800, 9600.
+ Default: for pm8941: 1600 kHz
+ for pmi8998: 800 kHz
+
+- qcom,ovp
+ Usage: optional
+ Value type: <u32>
+ Definition: V; Over-voltage protection limit; one of:
+ 27, 29, 32, 35. Default: 29V
+ This property is supported only for PM8941.
+
+- qcom,ovp-millivolt
+ Usage: optional
+ Value type: <u32>
+ Definition: mV; Over-voltage protection limit;
+ For pmi8998: one of 18100, 19600, 29600, 31100.
+ Default 29600 mV.
+ If this property is not specified for PM8941, it
+ falls back to "qcom,ovp" property.
+
+- qcom,num-strings
+ Usage: optional
+ Value type: <u32>
+ Definition: #; number of led strings attached;
+ value: For PM8941 from 1 to 3. Default: 2
+ For PMI8998 from 1 to 4.
+
+- interrupts
+ Usage: optional
+ Value type: <prop encoded array>
+ Definition: Interrupts associated with WLED. This should be
+ "short" and "ovp" interrupts. Interrupts can be
+ specified as per the encoding listed under
+ Documentation/devicetree/bindings/spmi/
+ qcom,spmi-pmic-arb.txt.
+
+- interrupt-names
+ Usage: optional
+ Value type: <string>
+ Definition: Interrupt names associated with the interrupts.
+ Must be "short" and "ovp". The short circuit detection
+ is not supported for PM8941.
+
+- qcom,enabled-strings
+ Usage: optional
+ Value tyoe: <u32 array>
+ Definition: Array of the WLED strings numbered from 0 to 3. Each
+ string of leds are operated individually. Specify the
+ list of strings used by the device. Any combination of
+ led strings can be used.
+
+- qcom,external-pfet
+ Usage: optional
+ Value type: <bool>
+ Definition: Specify if external PFET control for short circuit
+ protection is used. This property is supported only
+ for PMI8998.
+
+- qcom,auto-string-detection
+ Usage: optional
+ Value type: <bool>
+ Definition: Enables auto-detection of the WLED string configuration.
+ This feature is not supported for PM8941.
+
+
+Example:
+
+pm8941-wled@d800 {
+ compatible = "qcom,pm8941-wled";
+ reg = <0xd800>;
+ label = "backlight";
+
+ qcom,cs-out;
+ qcom,current-limit = <20>;
+ qcom,current-boost-limit = <805>;
+ qcom,switching-freq = <1600>;
+ qcom,ovp = <29>;
+ qcom,num-strings = <2>;
+ qcom,enabled-strings = <0 1>;
+};
diff --git a/Documentation/devicetree/bindings/leds/leds-el15203000.txt b/Documentation/devicetree/bindings/leds/leds-el15203000.txt
new file mode 100644
index 000000000000..182f0035ed28
--- /dev/null
+++ b/Documentation/devicetree/bindings/leds/leds-el15203000.txt
@@ -0,0 +1,69 @@
+Crane Merchandising System - EL15203000 LED driver
+--------------------------------------------------
+
+This LED Board (aka RED LEDs board) is widely used in
+coffee vending machines produced by Crane Merchandising Systems.
+The board manages 3 LEDs and supports predefined blinking patterns
+for specific leds.
+
+Vending area LED encoded with symbol 'V' (hex code 0x56).
+Doesn't have any hardware blinking pattern.
+
+Screen light tube LED which surrounds vending machine screen and
+encoded with symbol 'S' (hex code 0x53). Supports blinking breathing pattern.
+
+Water Pipe LED encoded with symbol 'P' (hex code 0x50) and
+actually consists of 5 LEDs that exposed by protocol like one LED.
+Supports next patterns:
+- cascade pattern
+- inversed cascade pattern
+- bounce pattern
+- inversed bounce pattern
+
+Required properties:
+- compatible : "crane,el15203000"
+- #address-cells : must be 1
+- #size-cells : must be 0
+
+Property rules described in Documentation/devicetree/bindings/spi/spi-bus.txt
+apply. In particular, "reg" and "spi-max-frequency" properties must be given.
+
+Optional LED sub-node properties:
+- function:
+ see Documentation/devicetree/bindings/leds/common.txt
+- color:
+ see Documentation/devicetree/bindings/leds/common.txt
+
+Example
+-------
+
+#include <dt-bindings/leds/common.h>
+
+led-controller@0 {
+ compatible = "crane,el15203000";
+ reg = <0>;
+ spi-max-frequency = <50000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ /* water pipe */
+ led@50 {
+ reg = <0x50>;
+ function = "pipe";
+ color = <LED_COLOR_ID_RED>;
+ };
+
+ /* screen frame */
+ led@53 {
+ reg = <0x53>;
+ function = "screen";
+ color = <LED_COLOR_ID_RED>;
+ };
+
+ /* vending area */
+ led@56 {
+ reg = <0x56>;
+ function = "vend";
+ color = <LED_COLOR_ID_RED>;
+ };
+};
diff --git a/Documentation/devicetree/bindings/leds/leds-max77650.txt b/Documentation/devicetree/bindings/leds/leds-max77650.txt
deleted file mode 100644
index 3a67115cc1da..000000000000
--- a/Documentation/devicetree/bindings/leds/leds-max77650.txt
+++ /dev/null
@@ -1,57 +0,0 @@
-LED driver for MAX77650 PMIC from Maxim Integrated.
-
-This module is part of the MAX77650 MFD device. For more details
-see Documentation/devicetree/bindings/mfd/max77650.txt.
-
-The LED controller is represented as a sub-node of the PMIC node on
-the device tree.
-
-This device has three current sinks.
-
-Required properties:
---------------------
-- compatible: Must be "maxim,max77650-led"
-- #address-cells: Must be <1>.
-- #size-cells: Must be <0>.
-
-Each LED is represented as a sub-node of the LED-controller node. Up to
-three sub-nodes can be defined.
-
-Required properties of the sub-node:
-------------------------------------
-
-- reg: Must be <0>, <1> or <2>.
-
-Optional properties of the sub-node:
-------------------------------------
-
-- label: See Documentation/devicetree/bindings/leds/common.txt
-- linux,default-trigger: See Documentation/devicetree/bindings/leds/common.txt
-
-For more details, please refer to the generic GPIO DT binding document
-<devicetree/bindings/gpio/gpio.txt>.
-
-Example:
---------
-
- leds {
- compatible = "maxim,max77650-led";
- #address-cells = <1>;
- #size-cells = <0>;
-
- led@0 {
- reg = <0>;
- label = "blue:usr0";
- };
-
- led@1 {
- reg = <1>;
- label = "red:usr1";
- linux,default-trigger = "heartbeat";
- };
-
- led@2 {
- reg = <2>;
- label = "green:usr2";
- };
- };
diff --git a/Documentation/devicetree/bindings/leds/leds-max77650.yaml b/Documentation/devicetree/bindings/leds/leds-max77650.yaml
new file mode 100644
index 000000000000..8c43f1e1bf7d
--- /dev/null
+++ b/Documentation/devicetree/bindings/leds/leds-max77650.yaml
@@ -0,0 +1,51 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/leds/leds-max77650.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: LED driver for MAX77650 PMIC from Maxim Integrated.
+
+maintainers:
+ - Bartosz Golaszewski <bgolaszewski@baylibre.com>
+
+description: |
+ This module is part of the MAX77650 MFD device. For more details
+ see Documentation/devicetree/bindings/mfd/max77650.yaml.
+
+ The LED controller is represented as a sub-node of the PMIC node on
+ the device tree.
+
+ This device has three current sinks.
+
+properties:
+ compatible:
+ const: maxim,max77650-led
+
+ "#address-cells":
+ const: 1
+
+ "#size-cells":
+ const: 0
+
+patternProperties:
+ "^led@[0-2]$":
+ type: object
+ description: |
+ Properties for a single LED.
+
+ properties:
+ reg:
+ description:
+ Index of the LED.
+ minimum: 0
+ maximum: 2
+
+ label: true
+
+ linux,default-trigger: true
+
+required:
+ - compatible
+ - "#address-cells"
+ - "#size-cells"
diff --git a/Documentation/devicetree/bindings/mailbox/fsl,mu.txt b/Documentation/devicetree/bindings/mailbox/fsl,mu.txt
index f3cf77eb5ab4..9c43357c5924 100644
--- a/Documentation/devicetree/bindings/mailbox/fsl,mu.txt
+++ b/Documentation/devicetree/bindings/mailbox/fsl,mu.txt
@@ -21,6 +21,8 @@ Required properties:
imx6sx, imx7s, imx8qxp, imx8qm.
The "fsl,imx6sx-mu" compatible is seen as generic and should
be included together with SoC specific compatible.
+ There is a version 1.0 MU on imx7ulp, use "fsl,imx7ulp-mu"
+ compatible to support it.
- reg : Should contain the registers location and length
- interrupts : Interrupt number. The interrupt specifier format depends
on the interrupt controller parent.
diff --git a/Documentation/devicetree/bindings/mailbox/st,stm32-ipcc.yaml b/Documentation/devicetree/bindings/mailbox/st,stm32-ipcc.yaml
new file mode 100644
index 000000000000..5b13d6672996
--- /dev/null
+++ b/Documentation/devicetree/bindings/mailbox/st,stm32-ipcc.yaml
@@ -0,0 +1,84 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/mailbox/st,stm32-ipcc.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: STMicroelectronics STM32 IPC controller bindings
+
+description:
+ The IPCC block provides a non blocking signaling mechanism to post and
+ retrieve messages in an atomic way between two processors.
+ It provides the signaling for N bidirectionnal channels. The number of
+ channels (N) can be read from a dedicated register.
+
+maintainers:
+ - Fabien Dessenne <fabien.dessenne@st.com>
+ - Arnaud Pouliquen <arnaud.pouliquen@st.com>
+
+properties:
+ compatible:
+ const: st,stm32mp1-ipcc
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ interrupts:
+ items:
+ - description: rx channel occupied
+ - description: tx channel free
+ - description: wakeup source
+ minItems: 2
+ maxItems: 3
+
+ interrupt-names:
+ items:
+ - const: rx
+ - const: tx
+ - const: wakeup
+ minItems: 2
+ maxItems: 3
+
+ wakeup-source: true
+
+ "#mbox-cells":
+ const: 1
+
+ st,proc-id:
+ description: Processor id using the mailbox (0 or 1)
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ - enum: [ 0, 1 ]
+
+required:
+ - compatible
+ - reg
+ - st,proc-id
+ - clocks
+ - interrupt-names
+ - "#mbox-cells"
+ - interrupts
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/clock/stm32mp1-clks.h>
+ ipcc: mailbox@4c001000 {
+ compatible = "st,stm32mp1-ipcc";
+ #mbox-cells = <1>;
+ reg = <0x4c001000 0x400>;
+ st,proc-id = <0>;
+ interrupts-extended = <&intc GIC_SPI 100 IRQ_TYPE_NONE>,
+ <&intc GIC_SPI 101 IRQ_TYPE_NONE>,
+ <&aiec 62 1>;
+ interrupt-names = "rx", "tx", "wakeup";
+ clocks = <&rcc_clk IPCC>;
+ wakeup-source;
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/mailbox/stm32-ipcc.txt b/Documentation/devicetree/bindings/mailbox/stm32-ipcc.txt
deleted file mode 100644
index 1d2b7fee7b85..000000000000
--- a/Documentation/devicetree/bindings/mailbox/stm32-ipcc.txt
+++ /dev/null
@@ -1,47 +0,0 @@
-* STMicroelectronics STM32 IPCC (Inter-Processor Communication Controller)
-
-The IPCC block provides a non blocking signaling mechanism to post and
-retrieve messages in an atomic way between two processors.
-It provides the signaling for N bidirectionnal channels. The number of channels
-(N) can be read from a dedicated register.
-
-Required properties:
-- compatible: Must be "st,stm32mp1-ipcc"
-- reg: Register address range (base address and length)
-- st,proc-id: Processor id using the mailbox (0 or 1)
-- clocks: Input clock
-- interrupt-names: List of names for the interrupts described by the interrupt
- property. Must contain the following entries:
- - "rx"
- - "tx"
- - "wakeup"
-- interrupts: Interrupt specifiers for "rx channel occupied", "tx channel
- free" and "system wakeup".
-- #mbox-cells: Number of cells required for the mailbox specifier. Must be 1.
- The data contained in the mbox specifier of the "mboxes"
- property in the client node is the mailbox channel index.
-
-Optional properties:
-- wakeup-source: Flag to indicate whether this device can wake up the system
-
-
-
-Example:
- ipcc: mailbox@4c001000 {
- compatible = "st,stm32mp1-ipcc";
- #mbox-cells = <1>;
- reg = <0x4c001000 0x400>;
- st,proc-id = <0>;
- interrupts-extended = <&intc GIC_SPI 100 IRQ_TYPE_NONE>,
- <&intc GIC_SPI 101 IRQ_TYPE_NONE>,
- <&aiec 62 1>;
- interrupt-names = "rx", "tx", "wakeup";
- clocks = <&rcc_clk IPCC>;
- wakeup-source;
- }
-
-Client:
- mbox_test {
- ...
- mboxes = <&ipcc 0>, <&ipcc 1>;
- };
diff --git a/Documentation/devicetree/bindings/media/allwinner,sun4i-a10-ir.yaml b/Documentation/devicetree/bindings/media/allwinner,sun4i-a10-ir.yaml
index 98c1bdde9a86..dea36d68cdbe 100644
--- a/Documentation/devicetree/bindings/media/allwinner,sun4i-a10-ir.yaml
+++ b/Documentation/devicetree/bindings/media/allwinner,sun4i-a10-ir.yaml
@@ -60,9 +60,7 @@ required:
- clocks
- clock-names
-# FIXME: We should set it, but it would report all the generic
-# properties as additional properties.
-# additionalProperties: false
+unevaluatedProperties: false
examples:
- |
diff --git a/Documentation/devicetree/bindings/media/allwinner,sun8i-h3-deinterlace.yaml b/Documentation/devicetree/bindings/media/allwinner,sun8i-h3-deinterlace.yaml
new file mode 100644
index 000000000000..2e40f700e84f
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/allwinner,sun8i-h3-deinterlace.yaml
@@ -0,0 +1,76 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/media/allwinner,sun8i-h3-deinterlace.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Allwinner H3 Deinterlace Device Tree Bindings
+
+maintainers:
+ - Jernej Skrabec <jernej.skrabec@siol.net>
+ - Chen-Yu Tsai <wens@csie.org>
+ - Maxime Ripard <mripard@kernel.org>
+
+description: |-
+ The Allwinner H3 and later has a deinterlace core used for
+ deinterlacing interlaced video content.
+
+properties:
+ compatible:
+ const: allwinner,sun8i-h3-deinterlace
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ items:
+ - description: Deinterlace interface clock
+ - description: Deinterlace module clock
+ - description: Deinterlace DRAM clock
+
+ clock-names:
+ items:
+ - const: bus
+ - const: mod
+ - const: ram
+
+ resets:
+ maxItems: 1
+
+ interconnects:
+ maxItems: 1
+
+ interconnect-names:
+ const: dma-mem
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/clock/sun8i-h3-ccu.h>
+ #include <dt-bindings/reset/sun8i-h3-ccu.h>
+
+ deinterlace: deinterlace@1400000 {
+ compatible = "allwinner,sun8i-h3-deinterlace";
+ reg = <0x01400000 0x20000>;
+ clocks = <&ccu CLK_BUS_DEINTERLACE>,
+ <&ccu CLK_DEINTERLACE>,
+ <&ccu CLK_DRAM_DEINTERLACE>;
+ clock-names = "bus", "mod", "ram";
+ resets = <&ccu RST_BUS_DEINTERLACE>;
+ interrupts = <GIC_SPI 93 IRQ_TYPE_LEVEL_HIGH>;
+ interconnects = <&mbus 9>;
+ interconnect-names = "dma-mem";
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/media/amlogic,meson-gx-ao-cec.yaml b/Documentation/devicetree/bindings/media/amlogic,meson-gx-ao-cec.yaml
new file mode 100644
index 000000000000..41197578f19a
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/amlogic,meson-gx-ao-cec.yaml
@@ -0,0 +1,91 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+# Copyright 2019 BayLibre, SAS
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/media/amlogic,meson-gx-ao-cec.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Amlogic Meson AO-CEC Controller
+
+maintainers:
+ - Neil Armstrong <narmstrong@baylibre.com>
+
+description: |
+ The Amlogic Meson AO-CEC module is present is Amlogic SoCs and its purpose is
+ to handle communication between HDMI connected devices over the CEC bus.
+
+properties:
+ compatible:
+ enum:
+ - amlogic,meson-gx-ao-cec # GXBB, GXL, GXM, G12A and SM1 AO_CEC_A module
+ - amlogic,meson-g12a-ao-cec # G12A AO_CEC_B module
+ - amlogic,meson-sm1-ao-cec # SM1 AO_CEC_B module
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ hdmi-phandle:
+ description: phandle to the HDMI controller
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/phandle
+
+allOf:
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - amlogic,meson-gx-ao-cec
+
+ then:
+ properties:
+ clocks:
+ items:
+ - description: AO-CEC clock
+
+ clock-names:
+ maxItems: 1
+ items:
+ - const: core
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - amlogic,meson-g12a-ao-cec
+ - amlogic,meson-sm1-ao-cec
+
+ then:
+ properties:
+ clocks:
+ items:
+ - description: AO-CEC clock generator source
+
+ clock-names:
+ maxItems: 1
+ items:
+ - const: oscin
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - hdmi-phandle
+ - clocks
+ - clock-names
+
+examples:
+ - |
+ cec_AO: cec@100 {
+ compatible = "amlogic,meson-gx-ao-cec";
+ reg = <0x0 0x00100 0x0 0x14>;
+ interrupts = <199>;
+ clocks = <&clkc_cec>;
+ clock-names = "core";
+ hdmi-phandle = <&hdmi_tx>;
+ };
+
diff --git a/Documentation/devicetree/bindings/media/i2c/ad5820.txt b/Documentation/devicetree/bindings/media/i2c/ad5820.txt
index 5940ca11c021..5764cbedf9b7 100644
--- a/Documentation/devicetree/bindings/media/i2c/ad5820.txt
+++ b/Documentation/devicetree/bindings/media/i2c/ad5820.txt
@@ -2,12 +2,20 @@
Required Properties:
- - compatible: Must contain "adi,ad5820"
+ - compatible: Must contain one of:
+ - "adi,ad5820"
+ - "adi,ad5821"
+ - "adi,ad5823"
- reg: I2C slave address
- VANA-supply: supply of voltage for VANA pin
+Optional properties:
+
+ - enable-gpios : GPIO spec for the XSHUTDOWN pin. The XSHUTDOWN signal is
+active low, a high level on the pin enables the device.
+
Example:
ad5820: coil@c {
@@ -15,5 +23,6 @@ Example:
reg = <0x0c>;
VANA-supply = <&vaux4>;
+ enable-gpios = <&msmgpio 26 GPIO_ACTIVE_HIGH>;
};
diff --git a/Documentation/devicetree/bindings/media/i2c/imx290.txt b/Documentation/devicetree/bindings/media/i2c/imx290.txt
new file mode 100644
index 000000000000..a3cc21410f7c
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/i2c/imx290.txt
@@ -0,0 +1,57 @@
+* Sony IMX290 1/2.8-Inch CMOS Image Sensor
+
+The Sony IMX290 is a 1/2.8-Inch CMOS Solid-state image sensor with
+Square Pixel for Color Cameras. It is programmable through I2C and 4-wire
+interfaces. The sensor output is available via CMOS logic parallel SDR output,
+Low voltage LVDS DDR output and CSI-2 serial data output. The CSI-2 bus is the
+default. No bindings have been defined for the other busses.
+
+Required Properties:
+- compatible: Should be "sony,imx290"
+- reg: I2C bus address of the device
+- clocks: Reference to the xclk clock.
+- clock-names: Should be "xclk".
+- clock-frequency: Frequency of the xclk clock in Hz.
+- vdddo-supply: Sensor digital IO regulator.
+- vdda-supply: Sensor analog regulator.
+- vddd-supply: Sensor digital core regulator.
+
+Optional Properties:
+- reset-gpios: Sensor reset GPIO
+
+The imx290 device node should contain one 'port' child node with
+an 'endpoint' subnode. For further reading on port node refer to
+Documentation/devicetree/bindings/media/video-interfaces.txt.
+
+Required Properties on endpoint:
+- data-lanes: check ../video-interfaces.txt
+- link-frequencies: check ../video-interfaces.txt
+- remote-endpoint: check ../video-interfaces.txt
+
+Example:
+ &i2c1 {
+ ...
+ imx290: camera-sensor@1a {
+ compatible = "sony,imx290";
+ reg = <0x1a>;
+
+ reset-gpios = <&msmgpio 35 GPIO_ACTIVE_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&camera_rear_default>;
+
+ clocks = <&gcc GCC_CAMSS_MCLK0_CLK>;
+ clock-names = "xclk";
+ clock-frequency = <37125000>;
+
+ vdddo-supply = <&camera_vdddo_1v8>;
+ vdda-supply = <&camera_vdda_2v8>;
+ vddd-supply = <&camera_vddd_1v5>;
+
+ port {
+ imx290_ep: endpoint {
+ data-lanes = <1 2 3 4>;
+ link-frequencies = /bits/ 64 <445500000>;
+ remote-endpoint = <&csiphy0_ep>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/media/i2c/nokia,smia.txt b/Documentation/devicetree/bindings/media/i2c/nokia,smia.txt
index c3c3479233c4..10ece8108081 100644
--- a/Documentation/devicetree/bindings/media/i2c/nokia,smia.txt
+++ b/Documentation/devicetree/bindings/media/i2c/nokia,smia.txt
@@ -27,8 +27,6 @@ Mandatory properties
Optional properties
-------------------
-- nokia,nvm-size: The size of the NVM, in bytes. If the size is not given,
- the NVM contents will not be read.
- reset-gpios: XSHUTDOWN GPIO
- flash-leds: See ../video-interfaces.txt
- lens-focus: See ../video-interfaces.txt
diff --git a/Documentation/devicetree/bindings/media/i2c/ov2659.txt b/Documentation/devicetree/bindings/media/i2c/ov2659.txt
index cabc7d827dfb..92989a619f29 100644
--- a/Documentation/devicetree/bindings/media/i2c/ov2659.txt
+++ b/Documentation/devicetree/bindings/media/i2c/ov2659.txt
@@ -12,6 +12,12 @@ Required Properties:
- clock-names: should be "xvclk".
- link-frequencies: target pixel clock frequency.
+Optional Properties:
+- powerdown-gpios: reference to the GPIO connected to the pwdn pin, if any.
+ Active high with internal pull down resistor.
+- reset-gpios: reference to the GPIO connected to the resetb pin, if any.
+ Active low with internal pull up resistor.
+
For further reading on port node refer to
Documentation/devicetree/bindings/media/video-interfaces.txt.
@@ -27,6 +33,9 @@ Example:
clocks = <&clk_ov2659 0>;
clock-names = "xvclk";
+ powerdown-gpios = <&gpio6 14 GPIO_ACTIVE_HIGH>;
+ reset-gpios = <&gpio6 15 GPIO_ACTIVE_LOW>;
+
port {
ov2659_0: endpoint {
remote-endpoint = <&vpfe_ep>;
diff --git a/Documentation/devicetree/bindings/media/meson-ao-cec.txt b/Documentation/devicetree/bindings/media/meson-ao-cec.txt
deleted file mode 100644
index ad92ee41c0dd..000000000000
--- a/Documentation/devicetree/bindings/media/meson-ao-cec.txt
+++ /dev/null
@@ -1,37 +0,0 @@
-* Amlogic Meson AO-CEC driver
-
-The Amlogic Meson AO-CEC module is present is Amlogic SoCs and its purpose is
-to handle communication between HDMI connected devices over the CEC bus.
-
-Required properties:
- - compatible : value should be following depending on the SoC :
- For GXBB, GXL, GXM, G12A and SM1 (AO_CEC_A module) :
- "amlogic,meson-gx-ao-cec"
- For G12A (AO_CEC_B module) :
- "amlogic,meson-g12a-ao-cec"
- For SM1 (AO_CEC_B module) :
- "amlogic,meson-sm1-ao-cec"
-
- - reg : Physical base address of the IP registers and length of memory
- mapped region.
-
- - interrupts : AO-CEC interrupt number to the CPU.
- - clocks : from common clock binding: handle to AO-CEC clock.
- - clock-names : from common clock binding, must contain :
- For GXBB, GXL, GXM, G12A and SM1 (AO_CEC_A module) :
- - "core"
- For G12A, SM1 (AO_CEC_B module) :
- - "oscin"
- corresponding to entry in the clocks property.
- - hdmi-phandle: phandle to the HDMI controller
-
-Example:
-
-cec_AO: cec@100 {
- compatible = "amlogic,meson-gx-ao-cec";
- reg = <0x0 0x00100 0x0 0x14>;
- interrupts = <GIC_SPI 199 IRQ_TYPE_EDGE_RISING>;
- clocks = <&clkc_AO CLKID_AO_CEC_32K>;
- clock-names = "core";
- hdmi-phandle = <&hdmi_tx>;
-};
diff --git a/Documentation/devicetree/bindings/media/rc.yaml b/Documentation/devicetree/bindings/media/rc.yaml
index 9054555e6608..d11380794ff4 100644
--- a/Documentation/devicetree/bindings/media/rc.yaml
+++ b/Documentation/devicetree/bindings/media/rc.yaml
@@ -39,6 +39,7 @@ properties:
- rc-avermedia-rm-ks
- rc-avertv-303
- rc-azurewave-ad-tu700
+ - rc-beelink-gs1
- rc-behold
- rc-behold-columbus
- rc-budget-ci-old
@@ -82,6 +83,7 @@ properties:
- rc-it913x-v1
- rc-it913x-v2
- rc-kaiomy
+ - rc-khadas
- rc-kworld-315u
- rc-kworld-pc150u
- rc-kworld-plus-tv-analog
@@ -99,6 +101,7 @@ properties:
- rc-nec-terratec-cinergy-xs
- rc-norwood
- rc-npgtech
+ - rc-odroid
- rc-pctv-sedna
- rc-pinnacle-color
- rc-pinnacle-grey
@@ -119,6 +122,7 @@ properties:
- rc-streamzap
- rc-su3000
- rc-tango
+ - rc-tanix-tx3mini
- rc-tbs-nec
- rc-technisat-ts35
- rc-technisat-usb2
@@ -138,7 +142,10 @@ properties:
- rc-videomate-k100
- rc-videomate-s350
- rc-videomate-tv-pvr
+ - rc-wetek-hub
+ - rc-wetek-play2
- rc-winfast
- rc-winfast-usbii-deluxe
+ - rc-x96max
- rc-xbox-dvd
- rc-zx-irdec
diff --git a/Documentation/devicetree/bindings/media/renesas,csi2.txt b/Documentation/devicetree/bindings/media/renesas,csi2.txt
index 331409259752..2da6f60b2b56 100644
--- a/Documentation/devicetree/bindings/media/renesas,csi2.txt
+++ b/Documentation/devicetree/bindings/media/renesas,csi2.txt
@@ -9,6 +9,7 @@ Mandatory properties
--------------------
- compatible: Must be one or more of the following
- "renesas,r8a774a1-csi2" for the R8A774A1 device.
+ - "renesas,r8a774b1-csi2" for the R8A774B1 device.
- "renesas,r8a774c0-csi2" for the R8A774C0 device.
- "renesas,r8a7795-csi2" for the R8A7795 device.
- "renesas,r8a7796-csi2" for the R8A7796 device.
diff --git a/Documentation/devicetree/bindings/media/renesas,vin.txt b/Documentation/devicetree/bindings/media/renesas,vin.txt
index aa217b096279..e30b0d4eefdd 100644
--- a/Documentation/devicetree/bindings/media/renesas,vin.txt
+++ b/Documentation/devicetree/bindings/media/renesas,vin.txt
@@ -14,6 +14,7 @@ on Gen3 and RZ/G2 platforms to a CSI-2 receiver.
- "renesas,vin-r8a7744" for the R8A7744 device
- "renesas,vin-r8a7745" for the R8A7745 device
- "renesas,vin-r8a774a1" for the R8A774A1 device
+ - "renesas,vin-r8a774b1" for the R8A774B1 device
- "renesas,vin-r8a774c0" for the R8A774C0 device
- "renesas,vin-r8a7778" for the R8A7778 device
- "renesas,vin-r8a7779" for the R8A7779 device
@@ -43,7 +44,7 @@ on Gen3 and RZ/G2 platforms to a CSI-2 receiver.
Additionally, an alias named vinX will need to be created to specify
which video input device this is.
-The per-board settings Gen2 platforms:
+The per-board settings for Gen2 and RZ/G1 platforms:
- port - sub-node describing a single endpoint connected to the VIN
from external SoC pins as described in video-interfaces.txt[1].
@@ -63,7 +64,7 @@ The per-board settings Gen2 platforms:
- data-enable-active: polarity of CLKENB signal, see [1] for
description. Default is active high.
-The per-board settings Gen3 and RZ/G2 platforms:
+The per-board settings for Gen3 and RZ/G2 platforms:
Gen3 and RZ/G2 platforms can support both a single connected parallel input
source from external SoC pins (port@0) and/or multiple parallel input sources
diff --git a/Documentation/devicetree/bindings/media/sh_mobile_ceu.txt b/Documentation/devicetree/bindings/media/sh_mobile_ceu.txt
deleted file mode 100644
index cfa4ffada8ae..000000000000
--- a/Documentation/devicetree/bindings/media/sh_mobile_ceu.txt
+++ /dev/null
@@ -1,17 +0,0 @@
-Bindings, specific for the sh_mobile_ceu_camera.c driver:
- - compatible: Should be "renesas,sh-mobile-ceu"
- - reg: register base and size
- - interrupts: the interrupt number
- - renesas,max-width: maximum image width, supported on this SoC
- - renesas,max-height: maximum image height, supported on this SoC
-
-Example:
-
-ceu0: ceu@fe910000 {
- compatible = "renesas,sh-mobile-ceu";
- reg = <0xfe910000 0xa0>;
- interrupt-parent = <&intcs>;
- interrupts = <0x880>;
- renesas,max-width = <8188>;
- renesas,max-height = <8188>;
-};
diff --git a/Documentation/devicetree/bindings/media/st,stm32-cec.txt b/Documentation/devicetree/bindings/media/st,stm32-cec.txt
deleted file mode 100644
index 6be2381c180d..000000000000
--- a/Documentation/devicetree/bindings/media/st,stm32-cec.txt
+++ /dev/null
@@ -1,19 +0,0 @@
-STMicroelectronics STM32 CEC driver
-
-Required properties:
- - compatible : value should be "st,stm32-cec"
- - reg : Physical base address of the IP registers and length of memory
- mapped region.
- - clocks : from common clock binding: handle to CEC clocks
- - clock-names : from common clock binding: must be "cec" and "hdmi-cec".
- - interrupts : CEC interrupt number to the CPU.
-
-Example for stm32f746:
-
-cec: cec@40006c00 {
- compatible = "st,stm32-cec";
- reg = <0x40006C00 0x400>;
- interrupts = <94>;
- clocks = <&rcc 0 STM32F7_APB1_CLOCK(CEC)>, <&rcc 1 CLK_HDMI_CEC>;
- clock-names = "cec", "hdmi-cec";
-};
diff --git a/Documentation/devicetree/bindings/media/st,stm32-cec.yaml b/Documentation/devicetree/bindings/media/st,stm32-cec.yaml
new file mode 100644
index 000000000000..d75019c093a4
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/st,stm32-cec.yaml
@@ -0,0 +1,54 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/media/st,stm32-cec.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: STMicroelectronics STM32 CEC bindings
+
+maintainers:
+ - Benjamin Gaignard <benjamin.gaignard@st.com>
+ - Yannick Fertre <yannick.fertre@st.com>
+
+properties:
+ compatible:
+ const: st,stm32-cec
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ items:
+ - description: Module Clock
+ - description: Bus Clock
+
+ clock-names:
+ items:
+ - const: cec
+ - const: hdmi-cec
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - clock-names
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/clock/stm32mp1-clks.h>
+ cec: cec@40006c00 {
+ compatible = "st,stm32-cec";
+ reg = <0x40006c00 0x400>;
+ interrupts = <GIC_SPI 94 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc CEC_K>, <&clk_lse>;
+ clock-names = "cec", "hdmi-cec";
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/media/st,stm32-dcmi.txt b/Documentation/devicetree/bindings/media/st,stm32-dcmi.txt
deleted file mode 100644
index 3122ded82eb4..000000000000
--- a/Documentation/devicetree/bindings/media/st,stm32-dcmi.txt
+++ /dev/null
@@ -1,45 +0,0 @@
-STMicroelectronics STM32 Digital Camera Memory Interface (DCMI)
-
-Required properties:
-- compatible: "st,stm32-dcmi"
-- reg: physical base address and length of the registers set for the device
-- interrupts: should contain IRQ line for the DCMI
-- resets: reference to a reset controller,
- see Documentation/devicetree/bindings/reset/st,stm32-rcc.txt
-- clocks: list of clock specifiers, corresponding to entries in
- the clock-names property
-- clock-names: must contain "mclk", which is the DCMI peripherial clock
-- pinctrl: the pincontrol settings to configure muxing properly
- for pins that connect to DCMI device.
- See Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.yaml.
-- dmas: phandle to DMA controller node,
- see Documentation/devicetree/bindings/dma/stm32-dma.txt
-- dma-names: must contain "tx", which is the transmit channel from DCMI to DMA
-
-DCMI supports a single port node with parallel bus. It should contain one
-'port' child node with child 'endpoint' node. Please refer to the bindings
-defined in Documentation/devicetree/bindings/media/video-interfaces.txt.
-
-Example:
-
- dcmi: dcmi@50050000 {
- compatible = "st,stm32-dcmi";
- reg = <0x50050000 0x400>;
- interrupts = <78>;
- resets = <&rcc STM32F4_AHB2_RESET(DCMI)>;
- clocks = <&rcc 0 STM32F4_AHB2_CLOCK(DCMI)>;
- clock-names = "mclk";
- pinctrl-names = "default";
- pinctrl-0 = <&dcmi_pins>;
- dmas = <&dma2 1 1 0x414 0x3>;
- dma-names = "tx";
- port {
- dcmi_0: endpoint {
- remote-endpoint = <...>;
- bus-width = <8>;
- hsync-active = <0>;
- vsync-active = <0>;
- pclk-sample = <1>;
- };
- };
- };
diff --git a/Documentation/devicetree/bindings/media/st,stm32-dcmi.yaml b/Documentation/devicetree/bindings/media/st,stm32-dcmi.yaml
new file mode 100644
index 000000000000..3fe778cb5cc3
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/st,stm32-dcmi.yaml
@@ -0,0 +1,86 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/media/st,stm32-dcmi.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: STMicroelectronics STM32 Digital Camera Memory Interface (DCMI) binding
+
+maintainers:
+ - Hugues Fruchet <hugues.fruchet@st.com>
+
+properties:
+ compatible:
+ const: st,stm32-dcmi
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ items:
+ - const: mclk
+
+ dmas:
+ maxItems: 1
+
+ dma-names:
+ items:
+ - const: tx
+
+ resets:
+ maxItems: 1
+
+ port:
+ type: object
+ description:
+ DCMI supports a single port node with parallel bus. It should contain
+ one 'port' child node with child 'endpoint' node. Please refer to the
+ bindings defined in
+ Documentation/devicetree/bindings/media/video-interfaces.txt.
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - clock-names
+ - resets
+ - dmas
+ - dma-names
+ - port
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/clock/stm32mp1-clks.h>
+ #include <dt-bindings/reset/stm32mp1-resets.h>
+ dcmi: dcmi@4c006000 {
+ compatible = "st,stm32-dcmi";
+ reg = <0x4c006000 0x400>;
+ interrupts = <GIC_SPI 78 IRQ_TYPE_LEVEL_HIGH>;
+ resets = <&rcc CAMITF_R>;
+ clocks = <&rcc DCMI>;
+ clock-names = "mclk";
+ dmas = <&dmamux1 75 0x400 0x0d>;
+ dma-names = "tx";
+
+ port {
+ dcmi_0: endpoint {
+ remote-endpoint = <&ov5640_0>;
+ bus-width = <8>;
+ hsync-active = <0>;
+ vsync-active = <0>;
+ pclk-sample = <1>;
+ };
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/media/ti,vpe.yaml b/Documentation/devicetree/bindings/media/ti,vpe.yaml
new file mode 100644
index 000000000000..f3a8a350e85f
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/ti,vpe.yaml
@@ -0,0 +1,64 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/media/ti,vpe.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Texas Instruments DRA7x Video Processing Engine (VPE) Device Tree Bindings
+
+maintainers:
+ - Benoit Parrot <bparrot@ti.com>
+
+description: |-
+ The Video Processing Engine (VPE) is a key component for image post
+ processing applications. VPE consist of a single memory to memory
+ path which can perform chroma up/down sampling, deinterlacing,
+ scaling and color space conversion.
+
+properties:
+ compatible:
+ const: ti,dra7-vpe
+
+ reg:
+ items:
+ - description: The VPE main register region
+ - description: Scaler (SC) register region
+ - description: Color Space Conversion (CSC) register region
+ - description: Video Port Direct Memory Access (VPDMA) register region
+
+ reg-names:
+ items:
+ - const: vpe_top
+ - const: sc
+ - const: csc
+ - const: vpdma
+
+ interrupts:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - reg-names
+ - interrupts
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ vpe: vpe@489d0000 {
+ compatible = "ti,dra7-vpe";
+ reg = <0x489d0000 0x120>,
+ <0x489d0700 0x80>,
+ <0x489d5700 0x18>,
+ <0x489dd000 0x400>;
+ reg-names = "vpe_top",
+ "sc",
+ "csc",
+ "vpdma";
+ interrupts = <GIC_SPI 354 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/memory-controllers/exynos-srom.txt b/Documentation/devicetree/bindings/memory-controllers/exynos-srom.txt
deleted file mode 100644
index f633b5d0f8ca..000000000000
--- a/Documentation/devicetree/bindings/memory-controllers/exynos-srom.txt
+++ /dev/null
@@ -1,79 +0,0 @@
-SAMSUNG Exynos SoCs SROM Controller driver.
-
-Required properties:
-- compatible : Should contain "samsung,exynos4210-srom".
-
-- reg: offset and length of the register set
-
-Optional properties:
-The SROM controller can be used to attach external peripherals. In this case
-extra properties, describing the bus behind it, should be specified as below:
-
-- #address-cells: Must be set to 2 to allow device address translation.
- Address is specified as (bank#, offset).
-
-- #size-cells: Must be set to 1 to allow device size passing
-
-- ranges: Must be set up to reflect the memory layout with four integer values
- per bank:
- <bank-number> 0 <parent address of bank> <size>
-
-Sub-nodes:
-The actual device nodes should be added as subnodes to the SROMc node. These
-subnodes, in addition to regular device specification, should contain the following
-properties, describing configuration of the relevant SROM bank:
-
-Required properties:
-- reg: bank number, base address (relative to start of the bank) and size of
- the memory mapped for the device. Note that base address will be
- typically 0 as this is the start of the bank.
-
-- samsung,srom-timing : array of 6 integers, specifying bank timings in the
- following order: Tacp, Tcah, Tcoh, Tacc, Tcos, Tacs.
- Each value is specified in cycles and has the following
- meaning and valid range:
- Tacp : Page mode access cycle at Page mode (0 - 15)
- Tcah : Address holding time after CSn (0 - 15)
- Tcoh : Chip selection hold on OEn (0 - 15)
- Tacc : Access cycle (0 - 31, the actual time is N + 1)
- Tcos : Chip selection set-up before OEn (0 - 15)
- Tacs : Address set-up before CSn (0 - 15)
-
-Optional properties:
-- reg-io-width : data width in bytes (1 or 2). If omitted, default of 1 is used.
-
-- samsung,srom-page-mode : if page mode is set, 4 data page mode will be configured,
- else normal (1 data) page mode will be set.
-
-Example: basic definition, no banks are configured
- memory-controller@12570000 {
- compatible = "samsung,exynos4210-srom";
- reg = <0x12570000 0x14>;
- };
-
-Example: SROMc with SMSC911x ethernet chip on bank 3
- memory-controller@12570000 {
- #address-cells = <2>;
- #size-cells = <1>;
- ranges = <0 0 0x04000000 0x20000 // Bank0
- 1 0 0x05000000 0x20000 // Bank1
- 2 0 0x06000000 0x20000 // Bank2
- 3 0 0x07000000 0x20000>; // Bank3
-
- compatible = "samsung,exynos4210-srom";
- reg = <0x12570000 0x14>;
-
- ethernet@3,0 {
- compatible = "smsc,lan9115";
- reg = <3 0 0x10000>; // Bank 3, offset = 0
- phy-mode = "mii";
- interrupt-parent = <&gpx0>;
- interrupts = <5 8>;
- reg-io-width = <2>;
- smsc,irq-push-pull;
- smsc,force-internal-phy;
-
- samsung,srom-page-mode;
- samsung,srom-timing = <9 12 1 9 1 1>;
- };
- };
diff --git a/Documentation/devicetree/bindings/memory-controllers/exynos-srom.yaml b/Documentation/devicetree/bindings/memory-controllers/exynos-srom.yaml
new file mode 100644
index 000000000000..cdfe3f7f0ea9
--- /dev/null
+++ b/Documentation/devicetree/bindings/memory-controllers/exynos-srom.yaml
@@ -0,0 +1,128 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/memory-controllers/exynos-srom.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Samsung Exynos SoC SROM Controller driver
+
+maintainers:
+ - Krzysztof Kozlowski <krzk@kernel.org>
+
+description: |+
+ The SROM controller can be used to attach external peripherals. In this case
+ extra properties, describing the bus behind it, should be specified.
+
+properties:
+ compatible:
+ items:
+ - const: samsung,exynos4210-srom
+
+ reg:
+ maxItems: 1
+
+ "#address-cells":
+ const: 2
+
+ "#size-cells":
+ const: 1
+
+ ranges:
+ description: |
+ Reflects the memory layout with four integer values per bank. Format:
+ <bank-number> 0 <parent address of bank> <size>
+ Up to four banks are supported.
+
+patternProperties:
+ "^.*@[0-3],[a-f0-9]+$":
+ type: object
+ description:
+ The actual device nodes should be added as subnodes to the SROMc node.
+ These subnodes, in addition to regular device specification, should
+ contain the following properties, describing configuration
+ of the relevant SROM bank.
+
+ properties:
+ reg:
+ description:
+ Bank number, base address (relative to start of the bank) and size
+ of the memory mapped for the device. Note that base address will be
+ typically 0 as this is the start of the bank.
+ maxItems: 1
+
+ reg-io-width:
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ - enum: [1, 2]
+ description:
+ Data width in bytes (1 or 2). If omitted, default of 1 is used.
+
+ samsung,srom-page-mode:
+ description:
+ If page mode is set, 4 data page mode will be configured,
+ else normal (1 data) page mode will be set.
+ type: boolean
+
+ samsung,srom-timing:
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32-array
+ - items:
+ minItems: 6
+ maxItems: 6
+ description: |
+ Array of 6 integers, specifying bank timings in the following order:
+ Tacp, Tcah, Tcoh, Tacc, Tcos, Tacs.
+ Each value is specified in cycles and has the following meaning
+ and valid range:
+ Tacp: Page mode access cycle at Page mode (0 - 15)
+ Tcah: Address holding time after CSn (0 - 15)
+ Tcoh: Chip selection hold on OEn (0 - 15)
+ Tacc: Access cycle (0 - 31, the actual time is N + 1)
+ Tcos: Chip selection set-up before OEn (0 - 15)
+ Tacs: Address set-up before CSn (0 - 15)
+
+ required:
+ - reg
+ - samsung,srom-timing
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ // Example: basic definition, no banks are configured
+ memory-controller@12560000 {
+ compatible = "samsung,exynos4210-srom";
+ reg = <0x12560000 0x14>;
+ };
+
+ - |
+ // Example: SROMc with SMSC911x ethernet chip on bank 3
+ memory-controller@12570000 {
+ #address-cells = <2>;
+ #size-cells = <1>;
+ ranges = <0 0 0x04000000 0x20000 // Bank0
+ 1 0 0x05000000 0x20000 // Bank1
+ 2 0 0x06000000 0x20000 // Bank2
+ 3 0 0x07000000 0x20000>; // Bank3
+
+ compatible = "samsung,exynos4210-srom";
+ reg = <0x12570000 0x14>;
+
+ ethernet@3,0 {
+ compatible = "smsc,lan9115";
+ reg = <3 0 0x10000>; // Bank 3, offset = 0
+ phy-mode = "mii";
+ interrupt-parent = <&gpx0>;
+ interrupts = <5 8>;
+ reg-io-width = <2>;
+ smsc,irq-push-pull;
+ smsc,force-internal-phy;
+
+ samsung,srom-page-mode;
+ samsung,srom-timing = <9 12 1 9 1 1>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/mfd/ab8500.txt b/Documentation/devicetree/bindings/mfd/ab8500.txt
index cd9e90c5d171..b6bc30d7777e 100644
--- a/Documentation/devicetree/bindings/mfd/ab8500.txt
+++ b/Documentation/devicetree/bindings/mfd/ab8500.txt
@@ -69,6 +69,18 @@ Required child device properties:
- compatible : "stericsson,ab8500-[bm|btemp|charger|fg|gpadc|gpio|ponkey|
pwm|regulator|rtc|sysctrl|usb]";
+ A few child devices require ADC channels from the GPADC node. Those follow the
+ standard bindings from iio/iio-bindings.txt and iio/adc/adc.txt
+
+ abx500-temp : io-channels "aux1" and "aux2" for measuring external
+ temperatures.
+ ab8500-fg : io-channel "main_bat_v" for measuring main battery voltage,
+ ab8500-btemp : io-channels "btemp_ball" and "bat_ctrl" for measuring the
+ battery voltage.
+ ab8500-charger : io-channels "main_charger_v", "main_charger_c", "vbus_v",
+ "usb_charger_c" for measuring voltage and current of the
+ different charging supplies.
+
Optional child device properties:
- interrupts : contains the device IRQ(s) using the 2-cell format (see above)
- interrupt-names : contains names of IRQ resource in the order in which they were
@@ -102,8 +114,115 @@ ab8500 {
39 0x4>;
interrupt-names = "HW_CONV_END", "SW_CONV_END";
vddadc-supply = <&ab8500_ldo_tvout_reg>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ #io-channel-cells = <1>;
+
+ /* GPADC channels */
+ bat_ctrl: channel@1 {
+ reg = <0x01>;
+ };
+ btemp_ball: channel@2 {
+ reg = <0x02>;
+ };
+ main_charger_v: channel@3 {
+ reg = <0x03>;
+ };
+ acc_detect1: channel@4 {
+ reg = <0x04>;
+ };
+ acc_detect2: channel@5 {
+ reg = <0x05>;
+ };
+ adc_aux1: channel@6 {
+ reg = <0x06>;
+ };
+ adc_aux2: channel@7 {
+ reg = <0x07>;
+ };
+ main_batt_v: channel@8 {
+ reg = <0x08>;
+ };
+ vbus_v: channel@9 {
+ reg = <0x09>;
+ };
+ main_charger_c: channel@a {
+ reg = <0x0a>;
+ };
+ usb_charger_c: channel@b {
+ reg = <0x0b>;
+ };
+ bk_bat_v: channel@c {
+ reg = <0x0c>;
+ };
+ die_temp: channel@d {
+ reg = <0x0d>;
+ };
+ usb_id: channel@e {
+ reg = <0x0e>;
+ };
+ xtal_temp: channel@12 {
+ reg = <0x12>;
+ };
+ vbat_true_meas: channel@13 {
+ reg = <0x13>;
+ };
+ bat_ctrl_and_ibat: channel@1c {
+ reg = <0x1c>;
+ };
+ vbat_meas_and_ibat: channel@1d {
+ reg = <0x1d>;
+ };
+ vbat_true_meas_and_ibat: channel@1e {
+ reg = <0x1e>;
+ };
+ bat_temp_and_ibat: channel@1f {
+ reg = <0x1f>;
+ };
};
+ ab8500_temp {
+ compatible = "stericsson,abx500-temp";
+ io-channels = <&gpadc 0x06>,
+ <&gpadc 0x07>;
+ io-channel-name = "aux1", "aux2";
+ };
+
+ ab8500_battery: ab8500_battery {
+ stericsson,battery-type = "LIPO";
+ thermistor-on-batctrl;
+ };
+
+ ab8500_fg {
+ compatible = "stericsson,ab8500-fg";
+ battery = <&ab8500_battery>;
+ io-channels = <&gpadc 0x08>;
+ io-channel-name = "main_bat_v";
+ };
+
+ ab8500_btemp {
+ compatible = "stericsson,ab8500-btemp";
+ battery = <&ab8500_battery>;
+ io-channels = <&gpadc 0x02>,
+ <&gpadc 0x01>;
+ io-channel-name = "btemp_ball",
+ "bat_ctrl";
+ };
+
+ ab8500_charger {
+ compatible = "stericsson,ab8500-charger";
+ battery = <&ab8500_battery>;
+ vddadc-supply = <&ab8500_ldo_tvout_reg>;
+ io-channels = <&gpadc 0x03>,
+ <&gpadc 0x0a>,
+ <&gpadc 0x09>,
+ <&gpadc 0x0b>;
+ io-channel-name = "main_charger_v",
+ "main_charger_c",
+ "vbus_v",
+ "usb_charger_c";
+ };
+
ab8500-usb {
compatible = "stericsson,ab8500-usb";
interrupts = < 90 0x4
diff --git a/Documentation/devicetree/bindings/mfd/madera.txt b/Documentation/devicetree/bindings/mfd/madera.txt
index cad0f2800502..47e2b8bc6051 100644
--- a/Documentation/devicetree/bindings/mfd/madera.txt
+++ b/Documentation/devicetree/bindings/mfd/madera.txt
@@ -67,6 +67,14 @@ Optional properties:
As defined in bindings/gpio.txt.
Although optional, it is strongly recommended to use a hardware reset
+ - clocks: Should reference the clocks supplied on MCLK1, MCLK2 and MCLK3
+ - clock-names: May contain up to three strings:
+ "mclk1" for the clock supplied on MCLK1, recommended to be a high
+ quality audio reference clock
+ "mclk2" for the clock supplied on MCLK2, required to be an always on
+ 32k clock
+ "mclk3" for the clock supplied on MCLK3
+
- MICBIASx : Initial data for the MICBIAS regulators, as covered in
Documentation/devicetree/bindings/regulator/regulator.txt.
One for each MICBIAS generator (MICBIAS1, MICBIAS2, ...)
diff --git a/Documentation/devicetree/bindings/mfd/max77650.txt b/Documentation/devicetree/bindings/mfd/max77650.txt
deleted file mode 100644
index b529d8d19335..000000000000
--- a/Documentation/devicetree/bindings/mfd/max77650.txt
+++ /dev/null
@@ -1,46 +0,0 @@
-MAX77650 ultra low-power PMIC from Maxim Integrated.
-
-Required properties:
--------------------
-- compatible: Must be "maxim,max77650"
-- reg: I2C device address.
-- interrupts: The interrupt on the parent the controller is
- connected to.
-- interrupt-controller: Marks the device node as an interrupt controller.
-- #interrupt-cells: Must be <2>.
-
-- gpio-controller: Marks the device node as a gpio controller.
-- #gpio-cells: Must be <2>. The first cell is the pin number and
- the second cell is used to specify the gpio active
- state.
-
-Optional properties:
---------------------
-gpio-line-names: Single string containing the name of the GPIO line.
-
-The GPIO-controller module is represented as part of the top-level PMIC
-node. The device exposes a single GPIO line.
-
-For device-tree bindings of other sub-modules (regulator, power supply,
-LEDs and onkey) refer to the binding documents under the respective
-sub-system directories.
-
-For more details on GPIO bindings, please refer to the generic GPIO DT
-binding document <devicetree/bindings/gpio/gpio.txt>.
-
-Example:
---------
-
- pmic@48 {
- compatible = "maxim,max77650";
- reg = <0x48>;
-
- interrupt-controller;
- interrupt-parent = <&gpio2>;
- #interrupt-cells = <2>;
- interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
-
- gpio-controller;
- #gpio-cells = <2>;
- gpio-line-names = "max77650-charger";
- };
diff --git a/Documentation/devicetree/bindings/mfd/max77650.yaml b/Documentation/devicetree/bindings/mfd/max77650.yaml
new file mode 100644
index 000000000000..4a70f875a6eb
--- /dev/null
+++ b/Documentation/devicetree/bindings/mfd/max77650.yaml
@@ -0,0 +1,149 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mfd/max77650.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: MAX77650 ultra low-power PMIC from Maxim Integrated.
+
+maintainers:
+ - Bartosz Golaszewski <bgolaszewski@baylibre.com>
+
+description: |
+ MAX77650 is an ultra-low power PMIC providing battery charging and power
+ supply for low-power IoT and wearable applications.
+
+ The GPIO-controller module is represented as part of the top-level PMIC
+ node. The device exposes a single GPIO line.
+
+ For device-tree bindings of other sub-modules (regulator, power supply,
+ LEDs and onkey) refer to the binding documents under the respective
+ sub-system directories.
+
+properties:
+ compatible:
+ const: maxim,max77650
+
+ reg:
+ description:
+ I2C device address.
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ interrupt-controller: true
+
+ "#interrupt-cells":
+ const: 2
+ description:
+ The first cell is the IRQ number, the second cell is the trigger type.
+
+ gpio-controller: true
+
+ "#gpio-cells":
+ const: 2
+ description:
+ The first cell is the pin number and the second cell is used to specify
+ the gpio active state.
+
+ gpio-line-names:
+ maxItems: 1
+ description:
+ Single string containing the name of the GPIO line.
+
+ regulators:
+ $ref: ../regulator/max77650-regulator.yaml
+
+ charger:
+ $ref: ../power/supply/max77650-charger.yaml
+
+ leds:
+ $ref: ../leds/leds-max77650.yaml
+
+ onkey:
+ $ref: ../input/max77650-onkey.yaml
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - interrupt-controller
+ - "#interrupt-cells"
+ - gpio-controller
+ - "#gpio-cells"
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+ #include <dt-bindings/input/linux-event-codes.h>
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pmic@48 {
+ compatible = "maxim,max77650";
+ reg = <0x48>;
+
+ interrupt-controller;
+ interrupt-parent = <&gpio2>;
+ #interrupt-cells = <2>;
+ interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-line-names = "max77650-charger";
+
+ regulators {
+ compatible = "maxim,max77650-regulator";
+
+ max77650_ldo: regulator@0 {
+ regulator-compatible = "ldo";
+ regulator-name = "max77650-ldo";
+ regulator-min-microvolt = <1350000>;
+ regulator-max-microvolt = <2937500>;
+ };
+
+ max77650_sbb0: regulator@1 {
+ regulator-compatible = "sbb0";
+ regulator-name = "max77650-sbb0";
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <1587500>;
+ };
+ };
+
+ charger {
+ compatible = "maxim,max77650-charger";
+ input-voltage-min-microvolt = <4200000>;
+ input-current-limit-microamp = <285000>;
+ };
+
+ leds {
+ compatible = "maxim,max77650-led";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ led@0 {
+ reg = <0>;
+ label = "blue:usr0";
+ };
+
+ led@1 {
+ reg = <1>;
+ label = "red:usr1";
+ linux,default-trigger = "heartbeat";
+ };
+
+ led@2 {
+ reg = <2>;
+ label = "green:usr2";
+ };
+ };
+
+ onkey {
+ compatible = "maxim,max77650-onkey";
+ linux,code = <KEY_END>;
+ maxim,onkey-slide;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/mfd/max77693.txt b/Documentation/devicetree/bindings/mfd/max77693.txt
index a3c60a7a3be1..0ced96e16c16 100644
--- a/Documentation/devicetree/bindings/mfd/max77693.txt
+++ b/Documentation/devicetree/bindings/mfd/max77693.txt
@@ -175,6 +175,7 @@ Example:
maxim,thermal-regulation-celsius = <75>;
maxim,battery-overcurrent-microamp = <3000000>;
maxim,charge-input-threshold-microvolt = <4300000>;
+ };
led {
compatible = "maxim,max77693-led";
diff --git a/Documentation/devicetree/bindings/mfd/qcom,spmi-pmic.txt b/Documentation/devicetree/bindings/mfd/qcom,spmi-pmic.txt
index 143706222a51..fffc8fde3302 100644
--- a/Documentation/devicetree/bindings/mfd/qcom,spmi-pmic.txt
+++ b/Documentation/devicetree/bindings/mfd/qcom,spmi-pmic.txt
@@ -29,6 +29,8 @@ Required properties:
"qcom,pm8916",
"qcom,pm8004",
"qcom,pm8909",
+ "qcom,pm8950",
+ "qcom,pmi8950",
"qcom,pm8998",
"qcom,pmi8998",
"qcom,pm8005",
diff --git a/Documentation/devicetree/bindings/mfd/samsung,exynos5433-lpass.txt b/Documentation/devicetree/bindings/mfd/samsung,exynos5433-lpass.txt
index d759da606f75..30ea27c3936d 100644
--- a/Documentation/devicetree/bindings/mfd/samsung,exynos5433-lpass.txt
+++ b/Documentation/devicetree/bindings/mfd/samsung,exynos5433-lpass.txt
@@ -18,7 +18,7 @@ an optional sub-node. For "samsung,exynos5433-lpass" compatible this includes:
UART, SLIMBUS, PCM, I2S, DMAC, Timers 0...4, VIC, WDT 0...1 devices.
Bindings of the sub-nodes are described in:
- ../serial/samsung_uart.txt
+ ../serial/samsung_uart.yaml
../sound/samsung-i2s.txt
../dma/arm-pl330.txt
diff --git a/Documentation/devicetree/bindings/mfd/st,stm32-lptimer.yaml b/Documentation/devicetree/bindings/mfd/st,stm32-lptimer.yaml
new file mode 100644
index 000000000000..1a4cc5f3fb33
--- /dev/null
+++ b/Documentation/devicetree/bindings/mfd/st,stm32-lptimer.yaml
@@ -0,0 +1,120 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mfd/st,stm32-lptimer.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: STMicroelectronics STM32 Low-Power Timers bindings
+
+description: |
+ The STM32 Low-Power Timer (LPTIM) is a 16-bit timer that provides several
+ functions
+ - PWM output (with programmable prescaler, configurable polarity)
+ - Trigger source for STM32 ADC/DAC (LPTIM_OUT)
+ - Several counter modes:
+ - quadrature encoder to detect angular position and direction of rotary
+ elements, from IN1 and IN2 input signals.
+ - simple counter from IN1 input signal.
+
+maintainers:
+ - Fabrice Gasnier <fabrice.gasnier@st.com>
+
+properties:
+ compatible:
+ const: st,stm32-lptimer
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ items:
+ - const: mux
+
+ "#address-cells":
+ const: 1
+
+ "#size-cells":
+ const: 0
+
+ pwm:
+ type: object
+
+ properties:
+ compatible:
+ const: st,stm32-pwm-lp
+
+ "#pwm-cells":
+ const: 3
+
+ required:
+ - "#pwm-cells"
+ - compatible
+
+patternProperties:
+ "^trigger@[0-9]+$":
+ type: object
+
+ properties:
+ compatible:
+ const: st,stm32-lptimer-trigger
+
+ reg:
+ description: Identify trigger hardware block.
+ items:
+ minimum: 0
+ maximum: 2
+
+ required:
+ - compatible
+ - reg
+
+ counter:
+ type: object
+
+ properties:
+ compatible:
+ const: st,stm32-lptimer-counter
+
+ required:
+ - compatible
+
+required:
+ - "#address-cells"
+ - "#size-cells"
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/stm32mp1-clks.h>
+ timer@40002400 {
+ compatible = "st,stm32-lptimer";
+ reg = <0x40002400 0x400>;
+ clocks = <&timer_clk>;
+ clock-names = "mux";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pwm {
+ compatible = "st,stm32-pwm-lp";
+ #pwm-cells = <3>;
+ };
+
+ trigger@0 {
+ compatible = "st,stm32-lptimer-trigger";
+ reg = <0>;
+ };
+
+ counter {
+ compatible = "st,stm32-lptimer-counter";
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/mfd/st,stm32-timers.yaml b/Documentation/devicetree/bindings/mfd/st,stm32-timers.yaml
new file mode 100644
index 000000000000..590849ee9f32
--- /dev/null
+++ b/Documentation/devicetree/bindings/mfd/st,stm32-timers.yaml
@@ -0,0 +1,162 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mfd/st,stm32-timers.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: STMicroelectronics STM32 Timers bindings
+
+description: |
+ This hardware block provides 3 types of timer along with PWM functionality:
+ - advanced-control timers consist of a 16-bit auto-reload counter driven
+ by a programmable prescaler, break input feature, PWM outputs and
+ complementary PWM outputs channels.
+ - general-purpose timers consist of a 16-bit or 32-bit auto-reload counter
+ driven by a programmable prescaler and PWM outputs.
+ - basic timers consist of a 16-bit auto-reload counter driven by a
+ programmable prescaler.
+
+maintainers:
+ - Benjamin Gaignard <benjamin.gaignard@st.com>
+ - Fabrice Gasnier <fabrice.gasnier@st.com>
+
+properties:
+ compatible:
+ const: st,stm32-timers
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ items:
+ - const: int
+
+ reset:
+ maxItems: 1
+
+ dmas:
+ minItems: 1
+ maxItems: 7
+
+ dma-names:
+ items:
+ enum: [ ch1, ch2, ch3, ch4, up, trig, com ]
+ minItems: 1
+ maxItems: 7
+
+ "#address-cells":
+ const: 1
+
+ "#size-cells":
+ const: 0
+
+ pwm:
+ type: object
+
+ properties:
+ compatible:
+ const: st,stm32-pwm
+
+ "#pwm-cells":
+ const: 3
+
+ st,breakinput:
+ description:
+ One or two <index level filter> to describe break input
+ configurations.
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32-matrix
+ - items:
+ items:
+ - description: |
+ "index" indicates on which break input (0 or 1) the
+ configuration should be applied.
+ enum: [ 0 , 1]
+ - description: |
+ "level" gives the active level (0=low or 1=high) of the
+ input signal for this configuration
+ enum: [ 0, 1 ]
+ - description: |
+ "filter" gives the filtering value (up to 15) to be applied.
+ maximum: 15
+ minItems: 1
+ maxItems: 2
+
+ required:
+ - "#pwm-cells"
+ - compatible
+
+patternProperties:
+ "^timer@[0-9]+$":
+ type: object
+
+ properties:
+ compatible:
+ enum:
+ - st,stm32-timer-trigger
+ - st,stm32h7-timer-trigger
+
+ reg:
+ description: Identify trigger hardware block.
+ items:
+ minimum: 0
+ maximum: 16
+
+ required:
+ - compatible
+ - reg
+
+ counter:
+ type: object
+
+ properties:
+ compatible:
+ const: st,stm32-timer-counter
+
+ required:
+ - compatible
+
+required:
+ - "#address-cells"
+ - "#size-cells"
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/stm32mp1-clks.h>
+ timers2: timers@40000000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "st,stm32-timers";
+ reg = <0x40000000 0x400>;
+ clocks = <&rcc TIM2_K>;
+ clock-names = "int";
+ dmas = <&dmamux1 18 0x400 0x1>,
+ <&dmamux1 19 0x400 0x1>,
+ <&dmamux1 20 0x400 0x1>,
+ <&dmamux1 21 0x400 0x1>,
+ <&dmamux1 22 0x400 0x1>;
+ dma-names = "ch1", "ch2", "ch3", "ch4", "up";
+ pwm {
+ compatible = "st,stm32-pwm";
+ #pwm-cells = <3>;
+ st,breakinput = <0 1 5>;
+ };
+ timer@0 {
+ compatible = "st,stm32-timer-trigger";
+ reg = <0>;
+ };
+ counter {
+ compatible = "st,stm32-timer-counter";
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/mfd/stm32-lptimer.txt b/Documentation/devicetree/bindings/mfd/stm32-lptimer.txt
deleted file mode 100644
index fb54e4dad5b3..000000000000
--- a/Documentation/devicetree/bindings/mfd/stm32-lptimer.txt
+++ /dev/null
@@ -1,48 +0,0 @@
-STMicroelectronics STM32 Low-Power Timer
-
-The STM32 Low-Power Timer (LPTIM) is a 16-bit timer that provides several
-functions:
-- PWM output (with programmable prescaler, configurable polarity)
-- Quadrature encoder, counter
-- Trigger source for STM32 ADC/DAC (LPTIM_OUT)
-
-Required properties:
-- compatible: Must be "st,stm32-lptimer".
-- reg: Offset and length of the device's register set.
-- clocks: Phandle to the clock used by the LP Timer module.
-- clock-names: Must be "mux".
-- #address-cells: Should be '<1>'.
-- #size-cells: Should be '<0>'.
-
-Optional subnodes:
-- pwm: See ../pwm/pwm-stm32-lp.txt
-- counter: See ../counter/stm32-lptimer-cnt.txt
-- trigger: See ../iio/timer/stm32-lptimer-trigger.txt
-
-Example:
-
- timer@40002400 {
- compatible = "st,stm32-lptimer";
- reg = <0x40002400 0x400>;
- clocks = <&timer_clk>;
- clock-names = "mux";
- #address-cells = <1>;
- #size-cells = <0>;
-
- pwm {
- compatible = "st,stm32-pwm-lp";
- pinctrl-names = "default";
- pinctrl-0 = <&lppwm1_pins>;
- };
-
- trigger@0 {
- compatible = "st,stm32-lptimer-trigger";
- reg = <0>;
- };
-
- counter {
- compatible = "st,stm32-lptimer-counter";
- pinctrl-names = "default";
- pinctrl-0 = <&lptim1_in_pins>;
- };
- };
diff --git a/Documentation/devicetree/bindings/mfd/stm32-timers.txt b/Documentation/devicetree/bindings/mfd/stm32-timers.txt
deleted file mode 100644
index 15c3b87f51d9..000000000000
--- a/Documentation/devicetree/bindings/mfd/stm32-timers.txt
+++ /dev/null
@@ -1,73 +0,0 @@
-STM32 Timers driver bindings
-
-This IP provides 3 types of timer along with PWM functionality:
-- advanced-control timers consist of a 16-bit auto-reload counter driven by a programmable
- prescaler, break input feature, PWM outputs and complementary PWM ouputs channels.
-- general-purpose timers consist of a 16-bit or 32-bit auto-reload counter driven by a
- programmable prescaler and PWM outputs.
-- basic timers consist of a 16-bit auto-reload counter driven by a programmable prescaler.
-
-Required parameters:
-- compatible: must be "st,stm32-timers"
-
-- reg: Physical base address and length of the controller's
- registers.
-- clock-names: Set to "int".
-- clocks: Phandle to the clock used by the timer module.
- For Clk properties, please refer to ../clock/clock-bindings.txt
-
-Optional parameters:
-- resets: Phandle to the parent reset controller.
- See ../reset/st,stm32-rcc.txt
-- dmas: List of phandle to dma channels that can be used for
- this timer instance. There may be up to 7 dma channels.
-- dma-names: List of dma names. Must match 'dmas' property. Valid
- names are: "ch1", "ch2", "ch3", "ch4", "up", "trig",
- "com".
-
-Optional subnodes:
-- pwm: See ../pwm/pwm-stm32.txt
-- timer: See ../iio/timer/stm32-timer-trigger.txt
-- counter: See ../counter/stm32-timer-cnt.txt
-
-Example:
- timers@40010000 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "st,stm32-timers";
- reg = <0x40010000 0x400>;
- clocks = <&rcc 0 160>;
- clock-names = "int";
-
- pwm {
- compatible = "st,stm32-pwm";
- pinctrl-0 = <&pwm1_pins>;
- pinctrl-names = "default";
- };
-
- timer@0 {
- compatible = "st,stm32-timer-trigger";
- reg = <0>;
- };
-
- counter {
- compatible = "st,stm32-timer-counter";
- pinctrl-names = "default";
- pinctrl-0 = <&tim1_in_pins>;
- };
- };
-
-Example with all dmas:
- timer@40010000 {
- ...
- dmas = <&dmamux1 11 0x400 0x0>,
- <&dmamux1 12 0x400 0x0>,
- <&dmamux1 13 0x400 0x0>,
- <&dmamux1 14 0x400 0x0>,
- <&dmamux1 15 0x400 0x0>,
- <&dmamux1 16 0x400 0x0>,
- <&dmamux1 17 0x400 0x0>;
- dma-names = "ch1", "ch2", "ch3", "ch4", "up", "trig", "com";
- ...
- child nodes...
- };
diff --git a/Documentation/devicetree/bindings/mfd/syscon.txt b/Documentation/devicetree/bindings/mfd/syscon.txt
deleted file mode 100644
index 25d9e9c2fd53..000000000000
--- a/Documentation/devicetree/bindings/mfd/syscon.txt
+++ /dev/null
@@ -1,32 +0,0 @@
-* System Controller Registers R/W driver
-
-System controller node represents a register region containing a set
-of miscellaneous registers. The registers are not cohesive enough to
-represent as any specific type of device. The typical use-case is for
-some other node's driver, or platform-specific code, to acquire a
-reference to the syscon node (e.g. by phandle, node path, or search
-using a specific compatible value), interrogate the node (or associated
-OS driver) to determine the location of the registers, and access the
-registers directly.
-
-Required properties:
-- compatible: Should contain "syscon".
-- reg: the register region can be accessed from syscon
-
-Optional property:
-- reg-io-width: the size (in bytes) of the IO accesses that should be
- performed on the device.
-- hwlocks: reference to a phandle of a hardware spinlock provider node.
-
-Examples:
-gpr: iomuxc-gpr@20e0000 {
- compatible = "fsl,imx6q-iomuxc-gpr", "syscon";
- reg = <0x020e0000 0x38>;
- hwlocks = <&hwlock1 1>;
-};
-
-hwlock1: hwspinlock@40500000 {
- ...
- reg = <0x40500000 0x1000>;
- #hwlock-cells = <1>;
-};
diff --git a/Documentation/devicetree/bindings/mfd/syscon.yaml b/Documentation/devicetree/bindings/mfd/syscon.yaml
new file mode 100644
index 000000000000..39375e4313d2
--- /dev/null
+++ b/Documentation/devicetree/bindings/mfd/syscon.yaml
@@ -0,0 +1,84 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mfd/syscon.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: System Controller Registers R/W Device Tree Bindings
+
+description: |
+ System controller node represents a register region containing a set
+ of miscellaneous registers. The registers are not cohesive enough to
+ represent as any specific type of device. The typical use-case is
+ for some other node's driver, or platform-specific code, to acquire
+ a reference to the syscon node (e.g. by phandle, node path, or
+ search using a specific compatible value), interrogate the node (or
+ associated OS driver) to determine the location of the registers,
+ and access the registers directly.
+
+maintainers:
+ - Lee Jones <lee.jones@linaro.org>
+
+select:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - syscon
+
+ required:
+ - compatible
+
+properties:
+ compatible:
+ anyOf:
+ - items:
+ - enum:
+ - allwinner,sun8i-a83t-system-controller
+ - allwinner,sun8i-h3-system-controller
+ - allwinner,sun8i-v3s-system-controller
+ - allwinner,sun50i-a64-system-controller
+
+ - const: syscon
+
+ - contains:
+ const: syscon
+ additionalItems: true
+
+ reg:
+ maxItems: 1
+
+ reg-io-width:
+ description: |
+ The size (in bytes) of the IO accesses that should be performed
+ on the device.
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ - enum: [ 1, 2, 4, 8 ]
+
+ hwlocks:
+ maxItems: 1
+ description:
+ Reference to a phandle of a hardware spinlock provider node.
+
+required:
+ - compatible
+ - reg
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ syscon: syscon@1c00000 {
+ compatible = "allwinner,sun8i-h3-system-controller", "syscon";
+ reg = <0x01c00000 0x1000>;
+ };
+
+ - |
+ gpr: iomuxc-gpr@20e0000 {
+ compatible = "fsl,imx6q-iomuxc-gpr", "syscon";
+ reg = <0x020e0000 0x38>;
+ hwlocks = <&hwlock1 1>;
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/misc/allwinner,syscon.txt b/Documentation/devicetree/bindings/misc/allwinner,syscon.txt
deleted file mode 100644
index 31494a24fe69..000000000000
--- a/Documentation/devicetree/bindings/misc/allwinner,syscon.txt
+++ /dev/null
@@ -1,20 +0,0 @@
-* Allwinner sun8i system controller
-
-This file describes the bindings for the system controller present in
-Allwinner SoC H3, A83T and A64.
-The principal function of this syscon is to control EMAC PHY choice and
-config.
-
-Required properties for the system controller:
-- reg: address and length of the register for the device.
-- compatible: should be "syscon" and one of the following string:
- "allwinner,sun8i-h3-system-controller"
- "allwinner,sun8i-v3s-system-controller"
- "allwinner,sun50i-a64-system-controller"
- "allwinner,sun8i-a83t-system-controller"
-
-Example:
-syscon: syscon@1c00000 {
- compatible = "allwinner,sun8i-h3-system-controller", "syscon";
- reg = <0x01c00000 0x1000>;
-};
diff --git a/Documentation/devicetree/bindings/mmc/allwinner,sun4i-a10-mmc.yaml b/Documentation/devicetree/bindings/mmc/allwinner,sun4i-a10-mmc.yaml
index d2d4308596b8..64bca41031d5 100644
--- a/Documentation/devicetree/bindings/mmc/allwinner,sun4i-a10-mmc.yaml
+++ b/Documentation/devicetree/bindings/mmc/allwinner,sun4i-a10-mmc.yaml
@@ -85,6 +85,8 @@ required:
- clocks
- clock-names
+unevaluatedProperties: false
+
examples:
- |
mmc0: mmc@1c0f000 {
@@ -97,8 +99,4 @@ examples:
cd-gpios = <&pio 7 1 0>;
};
-# FIXME: We should set it, but it would report all the generic
-# properties as additional properties.
-# additionalProperties: false
-
...
diff --git a/Documentation/devicetree/bindings/mmc/arasan,sdhci.txt b/Documentation/devicetree/bindings/mmc/arasan,sdhci.txt
index 7ca0aa7ccc0b..428685eb2ded 100644
--- a/Documentation/devicetree/bindings/mmc/arasan,sdhci.txt
+++ b/Documentation/devicetree/bindings/mmc/arasan,sdhci.txt
@@ -15,10 +15,15 @@ Required Properties:
- "arasan,sdhci-5.1": generic Arasan SDHCI 5.1 PHY
- "rockchip,rk3399-sdhci-5.1", "arasan,sdhci-5.1": rk3399 eMMC PHY
For this device it is strongly suggested to include arasan,soc-ctl-syscon.
+ - "xlnx,zynqmp-8.9a": ZynqMP SDHCI 8.9a PHY
+ For this device it is strongly suggested to include clock-output-names and
+ #clock-cells.
- "ti,am654-sdhci-5.1", "arasan,sdhci-5.1": TI AM654 MMC PHY
Note: This binding has been deprecated and moved to [5].
- "intel,lgm-sdhci-5.1-emmc", "arasan,sdhci-5.1": Intel LGM eMMC PHY
For this device it is strongly suggested to include arasan,soc-ctl-syscon.
+ - "intel,lgm-sdhci-5.1-sdxc", "arasan,sdhci-5.1": Intel LGM SDXC PHY
+ For this device it is strongly suggested to include arasan,soc-ctl-syscon.
[5] Documentation/devicetree/bindings/mmc/sdhci-am654.txt
@@ -38,15 +43,19 @@ Optional Properties:
- clock-output-names: If specified, this will be the name of the card clock
which will be exposed by this device. Required if #clock-cells is
specified.
- - #clock-cells: If specified this should be the value <0>. With this property
- in place we will export a clock representing the Card Clock. This clock
- is expected to be consumed by our PHY. You must also specify
+ - #clock-cells: If specified this should be the value <0> or <1>. With this
+ property in place we will export one or two clocks representing the Card
+ Clock. These clocks are expected to be consumed by our PHY.
- xlnx,fails-without-test-cd: when present, the controller doesn't work when
the CD line is not connected properly, and the line is not connected
properly. Test mode can be used to force the controller to function.
- xlnx,int-clock-stable-broken: when present, the controller always reports
that the internal clock is stable even when it is not.
+ - xlnx,mio-bank: When specified, this will indicate the MIO bank number in
+ which the command and data lines are configured. If not specified, driver
+ will assume this as 0.
+
Example:
sdhci@e0100000 {
compatible = "arasan,sdhci-8.9a";
@@ -83,6 +92,18 @@ Example:
#clock-cells = <0>;
};
+ sdhci: mmc@ff160000 {
+ compatible = "xlnx,zynqmp-8.9a", "arasan,sdhci-8.9a";
+ interrupt-parent = <&gic>;
+ interrupts = <0 48 4>;
+ reg = <0x0 0xff160000 0x0 0x1000>;
+ clocks = <&clk200>, <&clk200>;
+ clock-names = "clk_xin", "clk_ahb";
+ clock-output-names = "clk_out_sd0", "clk_in_sd0";
+ #clock-cells = <1>;
+ clk-phase-sd-hs = <63>, <72>;
+ };
+
emmc: sdhci@ec700000 {
compatible = "intel,lgm-sdhci-5.1-emmc", "arasan,sdhci-5.1";
reg = <0xec700000 0x300>;
@@ -97,3 +118,18 @@ Example:
phy-names = "phy_arasan";
arasan,soc-ctl-syscon = <&sysconf>;
};
+
+ sdxc: sdhci@ec600000 {
+ compatible = "arasan,sdhci-5.1", "intel,lgm-sdhci-5.1-sdxc";
+ reg = <0xec600000 0x300>;
+ interrupt-parent = <&ioapic1>;
+ interrupts = <43 1>;
+ clocks = <&cgu0 LGM_CLK_SDIO>, <&cgu0 LGM_CLK_NGI>,
+ <&cgu0 LGM_GCLK_SDXC>;
+ clock-names = "clk_xin", "clk_ahb", "gate";
+ clock-output-names = "sdxc_cardclock";
+ #clock-cells = <0>;
+ phys = <&sdxc_phy>;
+ phy-names = "phy_arasan";
+ arasan,soc-ctl-syscon = <&sysconf>;
+ };
diff --git a/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt b/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt
index f707b8bee304..2fb466ca2a9d 100644
--- a/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt
+++ b/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt
@@ -18,6 +18,9 @@ Required properties:
"fsl,imx6ull-usdhc"
"fsl,imx7d-usdhc"
"fsl,imx7ulp-usdhc"
+ "fsl,imx8mq-usdhc"
+ "fsl,imx8mm-usdhc"
+ "fsl,imx8mn-usdhc"
"fsl,imx8qxp-usdhc"
Optional properties:
diff --git a/Documentation/devicetree/bindings/mmc/jz4740.txt b/Documentation/devicetree/bindings/mmc/jz4740.txt
index 8a6f87f13114..453d3b9d145d 100644
--- a/Documentation/devicetree/bindings/mmc/jz4740.txt
+++ b/Documentation/devicetree/bindings/mmc/jz4740.txt
@@ -1,14 +1,16 @@
-* Ingenic JZ47xx MMC controllers
+* Ingenic XBurst MMC controllers
This file documents the device tree properties used for the MMC controller in
-Ingenic JZ4740/JZ4780 SoCs. These are in addition to the core MMC properties
-described in mmc.txt.
+Ingenic JZ4740/JZ4760/JZ4780/X1000 SoCs. These are in addition to the core MMC
+properties described in mmc.txt.
Required properties:
- compatible: Should be one of the following:
- "ingenic,jz4740-mmc" for the JZ4740
- "ingenic,jz4725b-mmc" for the JZ4725B
+ - "ingenic,jz4760-mmc" for the JZ4760
- "ingenic,jz4780-mmc" for the JZ4780
+ - "ingenic,x1000-mmc" for the X1000
- reg: Should contain the MMC controller registers location and length.
- interrupts: Should contain the interrupt specifier of the MMC controller.
- clocks: Clock for the MMC controller.
diff --git a/Documentation/devicetree/bindings/mmc/mmc-controller.yaml b/Documentation/devicetree/bindings/mmc/mmc-controller.yaml
index 080754e0ef35..b130450c3b34 100644
--- a/Documentation/devicetree/bindings/mmc/mmc-controller.yaml
+++ b/Documentation/devicetree/bindings/mmc/mmc-controller.yaml
@@ -333,6 +333,19 @@ patternProperties:
required:
- reg
+ "^clk-phase-(legacy|sd-hs|mmc-(hs|hs[24]00|ddr52)|uhs-(sdr(12|25|50|104)|ddr50))$":
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32-array
+ minItems: 2
+ maxItems: 2
+ items:
+ minimum: 0
+ maximum: 359
+ description:
+ Set the clock (phase) delays which are to be configured in the
+ controller while switching to particular speed mode. These values
+ are in pair of degrees.
+
dependencies:
cd-debounce-delay-ms: [ cd-gpios ]
fixed-emmc-driver-type: [ non-removable ]
@@ -351,6 +364,7 @@ examples:
keep-power-in-suspend;
wakeup-source;
mmc-pwrseq = <&sdhci0_pwrseq>;
+ clk-phase-sd-hs = <63>, <72>;
};
- |
diff --git a/Documentation/devicetree/bindings/mmc/owl-mmc.yaml b/Documentation/devicetree/bindings/mmc/owl-mmc.yaml
new file mode 100644
index 000000000000..12b40213426d
--- /dev/null
+++ b/Documentation/devicetree/bindings/mmc/owl-mmc.yaml
@@ -0,0 +1,59 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mmc/owl-mmc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Actions Semi Owl SoCs SD/MMC/SDIO controller
+
+allOf:
+ - $ref: "mmc-controller.yaml"
+
+maintainers:
+ - Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+
+properties:
+ compatible:
+ const: actions,owl-mmc
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ minItems: 1
+
+ resets:
+ maxItems: 1
+
+ dmas:
+ maxItems: 1
+
+ dma-names:
+ const: mmc
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - resets
+ - dmas
+ - dma-names
+
+examples:
+ - |
+ mmc0: mmc@e0330000 {
+ compatible = "actions,owl-mmc";
+ reg = <0x0 0xe0330000 0x0 0x4000>;
+ interrupts = <0 42 4>;
+ clocks = <&cmu 56>;
+ resets = <&cmu 23>;
+ dmas = <&dma 2>;
+ dma-names = "mmc";
+ bus-width = <4>;
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/mmc/renesas,sdhi.txt b/Documentation/devicetree/bindings/mmc/renesas,sdhi.txt
index dd08d038a65c..bc08fc43a9be 100644
--- a/Documentation/devicetree/bindings/mmc/renesas,sdhi.txt
+++ b/Documentation/devicetree/bindings/mmc/renesas,sdhi.txt
@@ -11,6 +11,7 @@ Required properties:
"renesas,sdhi-r8a7744" - SDHI IP on R8A7744 SoC
"renesas,sdhi-r8a7745" - SDHI IP on R8A7745 SoC
"renesas,sdhi-r8a774a1" - SDHI IP on R8A774A1 SoC
+ "renesas,sdhi-r8a774b1" - SDHI IP on R8A774B1 SoC
"renesas,sdhi-r8a774c0" - SDHI IP on R8A774C0 SoC
"renesas,sdhi-r8a77470" - SDHI IP on R8A77470 SoC
"renesas,sdhi-mmc-r8a77470" - SDHI/MMC IP on R8A77470 SoC
diff --git a/Documentation/devicetree/bindings/mmc/sdhci-atmel.txt b/Documentation/devicetree/bindings/mmc/sdhci-atmel.txt
index 1b662d7171a0..503c6dbac1b2 100644
--- a/Documentation/devicetree/bindings/mmc/sdhci-atmel.txt
+++ b/Documentation/devicetree/bindings/mmc/sdhci-atmel.txt
@@ -9,6 +9,11 @@ Required properties:
- clocks: Phandlers to the clocks.
- clock-names: Must be "hclock", "multclk", "baseclk";
+Optional properties:
+- microchip,sdcal-inverted: when present, polarity on the SDCAL SoC pin is
+ inverted. The default polarity for this signal is described in the datasheet.
+ For instance on SAMA5D2, the pin is usually tied to the GND with a resistor
+ and a capacitor (see "SDMMC I/O Calibration" chapter).
Example:
diff --git a/Documentation/devicetree/bindings/mmc/sdhci-milbeaut.txt b/Documentation/devicetree/bindings/mmc/sdhci-milbeaut.txt
new file mode 100644
index 000000000000..627ee89c125b
--- /dev/null
+++ b/Documentation/devicetree/bindings/mmc/sdhci-milbeaut.txt
@@ -0,0 +1,30 @@
+* SOCIONEXT Milbeaut SDHCI controller
+
+This file documents differences between the core properties in mmc.txt
+and the properties used by the sdhci_milbeaut driver.
+
+Required properties:
+- compatible: "socionext,milbeaut-m10v-sdhci-3.0"
+- clocks: Must contain an entry for each entry in clock-names. It is a
+ list of phandles and clock-specifier pairs.
+ See ../clocks/clock-bindings.txt for details.
+- clock-names: Should contain the following two entries:
+ "iface" - clock used for sdhci interface
+ "core" - core clock for sdhci controller
+
+Optional properties:
+- fujitsu,cmd-dat-delay-select: boolean property indicating that this host
+ requires the CMD_DAT_DELAY control to be enabled.
+
+Example:
+ sdhci3: mmc@1b010000 {
+ compatible = "socionext,milbeaut-m10v-sdhci-3.0";
+ reg = <0x1b010000 0x10000>;
+ interrupts = <0 265 0x4>;
+ voltage-ranges = <3300 3300>;
+ bus-width = <4>;
+ clocks = <&clk 7>, <&ahb_clk>;
+ clock-names = "core", "iface";
+ cap-sdio-irq;
+ fujitsu,cmd-dat-delay-select;
+ };
diff --git a/Documentation/devicetree/bindings/mtd/st,stm32-fmc2-nand.yaml b/Documentation/devicetree/bindings/mtd/st,stm32-fmc2-nand.yaml
new file mode 100644
index 000000000000..b059267f6d20
--- /dev/null
+++ b/Documentation/devicetree/bindings/mtd/st,stm32-fmc2-nand.yaml
@@ -0,0 +1,98 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mtd/st,stm32-fmc2-nand.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: STMicroelectronics Flexible Memory Controller 2 (FMC2) Bindings
+
+maintainers:
+ - Christophe Kerello <christophe.kerello@st.com>
+
+allOf:
+ - $ref: "nand-controller.yaml#"
+
+properties:
+ compatible:
+ const: st,stm32mp15-fmc2
+
+ reg:
+ items:
+ - description: Registers
+ - description: Chip select 0 data
+ - description: Chip select 0 command
+ - description: Chip select 0 address space
+ - description: Chip select 1 data
+ - description: Chip select 1 command
+ - description: Chip select 1 address space
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ resets:
+ maxItems: 1
+
+ dmas:
+ items:
+ - description: tx DMA channel
+ - description: rx DMA channel
+ - description: ecc DMA channel
+
+ dma-names:
+ items:
+ - const: tx
+ - const: rx
+ - const: ecc
+
+patternProperties:
+ "^nand@[a-f0-9]$":
+ type: object
+ properties:
+ nand-ecc-step-size:
+ const: 512
+
+ nand-ecc-strength:
+ enum: [1, 4 ,8 ]
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/clock/stm32mp1-clks.h>
+ #include <dt-bindings/reset/stm32mp1-resets.h>
+ nand-controller@58002000 {
+ compatible = "st,stm32mp15-fmc2";
+ reg = <0x58002000 0x1000>,
+ <0x80000000 0x1000>,
+ <0x88010000 0x1000>,
+ <0x88020000 0x1000>,
+ <0x81000000 0x1000>,
+ <0x89010000 0x1000>,
+ <0x89020000 0x1000>;
+ interrupts = <GIC_SPI 48 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&mdma1 20 0x10 0x12000a02 0x0 0x0>,
+ <&mdma1 20 0x10 0x12000a08 0x0 0x0>,
+ <&mdma1 21 0x10 0x12000a0a 0x0 0x0>;
+ dma-names = "tx", "rx", "ecc";
+ clocks = <&rcc FMC_K>;
+ resets = <&rcc FMC_R>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ nand@0 {
+ reg = <0>;
+ nand-on-flash-bbt;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/mtd/stm32-fmc2-nand.txt b/Documentation/devicetree/bindings/mtd/stm32-fmc2-nand.txt
deleted file mode 100644
index e55895e8dae4..000000000000
--- a/Documentation/devicetree/bindings/mtd/stm32-fmc2-nand.txt
+++ /dev/null
@@ -1,61 +0,0 @@
-STMicroelectronics Flexible Memory Controller 2 (FMC2)
-NAND Interface
-
-Required properties:
-- compatible: Should be one of:
- * st,stm32mp15-fmc2
-- reg: NAND flash controller memory areas.
- First region contains the register location.
- Regions 2 to 4 respectively contain the data, command,
- and address space for CS0.
- Regions 5 to 7 contain the same areas for CS1.
-- interrupts: The interrupt number
-- pinctrl-0: Standard Pinctrl phandle (see: pinctrl/pinctrl-bindings.txt)
-- clocks: The clock needed by the NAND flash controller
-
-Optional properties:
-- resets: Reference to a reset controller asserting the FMC controller
-- dmas: DMA specifiers (see: dma/stm32-mdma.txt)
-- dma-names: Must be "tx", "rx" and "ecc"
-
-* NAND device bindings:
-
-Required properties:
-- reg: describes the CS lines assigned to the NAND device.
-
-Optional properties:
-- nand-on-flash-bbt: see nand-controller.yaml
-- nand-ecc-strength: see nand-controller.yaml
-- nand-ecc-step-size: see nand-controller.yaml
-
-The following ECC strength and step size are currently supported:
- - nand-ecc-strength = <1>, nand-ecc-step-size = <512> (Hamming)
- - nand-ecc-strength = <4>, nand-ecc-step-size = <512> (BCH4)
- - nand-ecc-strength = <8>, nand-ecc-step-size = <512> (BCH8) (default)
-
-Example:
-
- fmc: nand-controller@58002000 {
- compatible = "st,stm32mp15-fmc2";
- reg = <0x58002000 0x1000>,
- <0x80000000 0x1000>,
- <0x88010000 0x1000>,
- <0x88020000 0x1000>,
- <0x81000000 0x1000>,
- <0x89010000 0x1000>,
- <0x89020000 0x1000>;
- interrupts = <GIC_SPI 48 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&rcc FMC_K>;
- resets = <&rcc FMC_R>;
- pinctrl-names = "default";
- pinctrl-0 = <&fmc_pins_a>;
- #address-cells = <1>;
- #size-cells = <0>;
-
- nand@0 {
- reg = <0>;
- nand-on-flash-bbt;
- #address-cells = <1>;
- #size-cells = <1>;
- };
- };
diff --git a/Documentation/devicetree/bindings/net/allwinner,sun4i-a10-emac.yaml b/Documentation/devicetree/bindings/net/allwinner,sun4i-a10-emac.yaml
index 792196bf4abd..ae4796ec50a0 100644
--- a/Documentation/devicetree/bindings/net/allwinner,sun4i-a10-emac.yaml
+++ b/Documentation/devicetree/bindings/net/allwinner,sun4i-a10-emac.yaml
@@ -38,6 +38,8 @@ required:
- phy-handle
- allwinner,sram
+unevaluatedProperties: false
+
examples:
- |
emac: ethernet@1c0b000 {
@@ -49,8 +51,4 @@ examples:
allwinner,sram = <&emac_sram 1>;
};
-# FIXME: We should set it, but it would report all the generic
-# properties as additional properties.
-# additionalProperties: false
-
...
diff --git a/Documentation/devicetree/bindings/net/allwinner,sun4i-a10-mdio.yaml b/Documentation/devicetree/bindings/net/allwinner,sun4i-a10-mdio.yaml
index df24d9d969f7..e5562c525ed9 100644
--- a/Documentation/devicetree/bindings/net/allwinner,sun4i-a10-mdio.yaml
+++ b/Documentation/devicetree/bindings/net/allwinner,sun4i-a10-mdio.yaml
@@ -49,6 +49,8 @@ required:
- compatible
- reg
+unevaluatedProperties: false
+
examples:
- |
mdio@1c0b080 {
@@ -63,8 +65,4 @@ examples:
};
};
-# FIXME: We should set it, but it would report all the generic
-# properties as additional properties.
-# additionalProperties: false
-
...
diff --git a/Documentation/devicetree/bindings/net/allwinner,sun7i-a20-gmac.yaml b/Documentation/devicetree/bindings/net/allwinner,sun7i-a20-gmac.yaml
index ef446ae166f3..f683b7104e3e 100644
--- a/Documentation/devicetree/bindings/net/allwinner,sun7i-a20-gmac.yaml
+++ b/Documentation/devicetree/bindings/net/allwinner,sun7i-a20-gmac.yaml
@@ -49,6 +49,8 @@ required:
- clock-names
- phy-mode
+unevaluatedProperties: false
+
examples:
- |
gmac: ethernet@1c50000 {
@@ -61,8 +63,4 @@ examples:
phy-mode = "mii";
};
-# FIXME: We should set it, but it would report all the generic
-# properties as additional properties.
-# additionalProperties: false
-
...
diff --git a/Documentation/devicetree/bindings/net/allwinner,sun8i-a83t-emac.yaml b/Documentation/devicetree/bindings/net/allwinner,sun8i-a83t-emac.yaml
index 3fb0714e761e..11654d4b80fb 100644
--- a/Documentation/devicetree/bindings/net/allwinner,sun8i-a83t-emac.yaml
+++ b/Documentation/devicetree/bindings/net/allwinner,sun8i-a83t-emac.yaml
@@ -184,6 +184,8 @@ allOf:
- mdio-parent-bus
- mdio@1
+unevaluatedProperties: false
+
examples:
- |
ethernet@1c0b000 {
@@ -314,8 +316,4 @@ examples:
};
};
-# FIXME: We should set it, but it would report all the generic
-# properties as additional properties.
-# additionalProperties: false
-
...
diff --git a/Documentation/devicetree/bindings/net/can/allwinner,sun4i-a10-can.yaml b/Documentation/devicetree/bindings/net/can/allwinner,sun4i-a10-can.yaml
new file mode 100644
index 000000000000..770af7c46114
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/can/allwinner,sun4i-a10-can.yaml
@@ -0,0 +1,51 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/can/allwinner,sun4i-a10-can.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Allwinner A10 CAN Controller Device Tree Bindings
+
+maintainers:
+ - Chen-Yu Tsai <wens@csie.org>
+ - Maxime Ripard <maxime.ripard@bootlin.com>
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - const: allwinner,sun7i-a20-can
+ - const: allwinner,sun4i-a10-can
+ - const: allwinner,sun4i-a10-can
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/clock/sun7i-a20-ccu.h>
+
+ can0: can@1c2bc00 {
+ compatible = "allwinner,sun7i-a20-can",
+ "allwinner,sun4i-a10-can";
+ reg = <0x01c2bc00 0x400>;
+ interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&ccu CLK_APB1_CAN>;
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/net/can/sun4i_can.txt b/Documentation/devicetree/bindings/net/can/sun4i_can.txt
deleted file mode 100644
index f69845e6feaf..000000000000
--- a/Documentation/devicetree/bindings/net/can/sun4i_can.txt
+++ /dev/null
@@ -1,36 +0,0 @@
-Allwinner A10/A20 CAN controller Device Tree Bindings
------------------------------------------------------
-
-Required properties:
-- compatible: "allwinner,sun4i-a10-can"
-- reg: physical base address and size of the Allwinner A10/A20 CAN register map.
-- interrupts: interrupt specifier for the sole interrupt.
-- clock: phandle and clock specifier.
-
-Example
--------
-
-SoC common .dtsi file:
-
- can0_pins_a: can0@0 {
- allwinner,pins = "PH20","PH21";
- allwinner,function = "can";
- allwinner,drive = <0>;
- allwinner,pull = <0>;
- };
-...
- can0: can@1c2bc00 {
- compatible = "allwinner,sun4i-a10-can";
- reg = <0x01c2bc00 0x400>;
- interrupts = <0 26 4>;
- clocks = <&apb1_gates 4>;
- status = "disabled";
- };
-
-Board specific .dts file:
-
- can0: can@1c2bc00 {
- pinctrl-names = "default";
- pinctrl-0 = <&can0_pins_a>;
- status = "okay";
- };
diff --git a/Documentation/devicetree/bindings/net/davinci-mdio.txt b/Documentation/devicetree/bindings/net/davinci-mdio.txt
deleted file mode 100644
index e6527de80f10..000000000000
--- a/Documentation/devicetree/bindings/net/davinci-mdio.txt
+++ /dev/null
@@ -1,36 +0,0 @@
-TI SoC Davinci/Keystone2 MDIO Controller Device Tree Bindings
----------------------------------------------------
-
-Required properties:
-- compatible : Should be "ti,davinci_mdio"
- and "ti,keystone_mdio" for Keystone 2 SoCs
- and "ti,cpsw-mdio" for am335x, am472x, am57xx/dra7, dm814x SoCs
- and "ti,am4372-mdio" for am472x SoC
-- reg : physical base address and size of the davinci mdio
- registers map
-- bus_freq : Mdio Bus frequency
-
-Optional properties:
-- ti,hwmods : Must be "davinci_mdio"
-
-Note: "ti,hwmods" field is used to fetch the base address and irq
-resources from TI, omap hwmod data base during device registration.
-Future plan is to migrate hwmod data base contents into device tree
-blob so that, all the required data will be used from device tree dts
-file.
-
-Examples:
-
- mdio: davinci_mdio@4a101000 {
- compatible = "ti,davinci_mdio";
- reg = <0x4A101000 0x1000>;
- bus_freq = <1000000>;
- };
-
-(or)
-
- mdio: davinci_mdio@4a101000 {
- compatible = "ti,davinci_mdio";
- ti,hwmods = "davinci_mdio";
- bus_freq = <1000000>;
- };
diff --git a/Documentation/devicetree/bindings/net/ti,davinci-mdio.yaml b/Documentation/devicetree/bindings/net/ti,davinci-mdio.yaml
new file mode 100644
index 000000000000..242ac4935a4b
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/ti,davinci-mdio.yaml
@@ -0,0 +1,71 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/ti,davinci-mdio.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: TI SoC Davinci/Keystone2 MDIO Controller
+
+maintainers:
+ - Grygorii Strashko <grygorii.strashko@ti.com>
+
+description:
+ TI SoC Davinci/Keystone2 MDIO Controller
+
+allOf:
+ - $ref: "mdio.yaml#"
+
+properties:
+ compatible:
+ oneOf:
+ - const: ti,davinci_mdio
+ - items:
+ - const: ti,keystone_mdio
+ - const: ti,davinci_mdio
+ - items:
+ - const: ti,cpsw-mdio
+ - const: ti,davinci_mdio
+ - items:
+ - const: ti,am4372-mdio
+ - const: ti,cpsw-mdio
+ - const: ti,davinci_mdio
+
+ reg:
+ maxItems: 1
+
+ bus_freq:
+ maximum: 2500000
+ description:
+ MDIO Bus frequency
+
+ ti,hwmods:
+ description: TI hwmod name
+ deprecated: true
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/string-array
+ - items:
+ const: davinci_mdio
+
+if:
+ properties:
+ compatible:
+ contains:
+ const: ti,davinci_mdio
+ required:
+ - bus_freq
+
+required:
+ - compatible
+ - reg
+ - "#address-cells"
+ - "#size-cells"
+
+examples:
+ - |
+ davinci_mdio: mdio@4a101000 {
+ compatible = "ti,davinci_mdio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x4a101000 0x1000>;
+ bus_freq = <1000000>;
+ };
diff --git a/Documentation/devicetree/bindings/net/wireless/ti,wl1251.txt b/Documentation/devicetree/bindings/net/wireless/ti,wl1251.txt
index bb2fcde6f7ff..f38950560982 100644
--- a/Documentation/devicetree/bindings/net/wireless/ti,wl1251.txt
+++ b/Documentation/devicetree/bindings/net/wireless/ti,wl1251.txt
@@ -35,3 +35,29 @@ Examples:
ti,power-gpio = <&gpio3 23 GPIO_ACTIVE_HIGH>; /* 87 */
};
};
+
+&mmc3 {
+ vmmc-supply = <&wlan_en>;
+
+ bus-width = <4>;
+ non-removable;
+ ti,non-removable;
+ cap-power-off-card;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc3_pins>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ wlan: wifi@1 {
+ compatible = "ti,wl1251";
+
+ reg = <1>;
+
+ interrupt-parent = <&gpio1>;
+ interrupts = <21 IRQ_TYPE_LEVEL_HIGH>; /* GPIO_21 */
+
+ ti,wl1251-has-eeprom;
+ };
+};
diff --git a/Documentation/devicetree/bindings/nvmem/allwinner,sun4i-a10-sid.yaml b/Documentation/devicetree/bindings/nvmem/allwinner,sun4i-a10-sid.yaml
index 1084e9d2917d..659b02002a35 100644
--- a/Documentation/devicetree/bindings/nvmem/allwinner,sun4i-a10-sid.yaml
+++ b/Documentation/devicetree/bindings/nvmem/allwinner,sun4i-a10-sid.yaml
@@ -31,9 +31,7 @@ required:
- compatible
- reg
-# FIXME: We should set it, but it would report all the generic
-# properties as additional properties.
-# additionalProperties: false
+unevaluatedProperties: false
examples:
- |
diff --git a/Documentation/devicetree/bindings/nvmem/rockchip-otp.txt b/Documentation/devicetree/bindings/nvmem/rockchip-otp.txt
new file mode 100644
index 000000000000..40f649f7c2e5
--- /dev/null
+++ b/Documentation/devicetree/bindings/nvmem/rockchip-otp.txt
@@ -0,0 +1,25 @@
+Rockchip internal OTP (One Time Programmable) memory device tree bindings
+
+Required properties:
+- compatible: Should be one of the following.
+ - "rockchip,px30-otp" - for PX30 SoCs.
+ - "rockchip,rk3308-otp" - for RK3308 SoCs.
+- reg: Should contain the registers location and size
+- clocks: Must contain an entry for each entry in clock-names.
+- clock-names: Should be "otp", "apb_pclk" and "phy".
+- resets: Must contain an entry for each entry in reset-names.
+ See ../../reset/reset.txt for details.
+- reset-names: Should be "phy".
+
+See nvmem.txt for more information.
+
+Example:
+ otp: otp@ff290000 {
+ compatible = "rockchip,px30-otp";
+ reg = <0x0 0xff290000 0x0 0x4000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ clocks = <&cru SCLK_OTP_USR>, <&cru PCLK_OTP_NS>,
+ <&cru PCLK_OTP_PHY>;
+ clock-names = "otp", "apb_pclk", "phy";
+ };
diff --git a/Documentation/devicetree/bindings/nvmem/sprd-efuse.txt b/Documentation/devicetree/bindings/nvmem/sprd-efuse.txt
new file mode 100644
index 000000000000..96b6feec27f0
--- /dev/null
+++ b/Documentation/devicetree/bindings/nvmem/sprd-efuse.txt
@@ -0,0 +1,39 @@
+= Spreadtrum eFuse device tree bindings =
+
+Required properties:
+- compatible: Should be "sprd,ums312-efuse".
+- reg: Specify the address offset of efuse controller.
+- clock-names: Should be "enable".
+- clocks: The phandle and specifier referencing the controller's clock.
+- hwlocks: Reference to a phandle of a hwlock provider node.
+
+= Data cells =
+Are child nodes of eFuse, bindings of which as described in
+bindings/nvmem/nvmem.txt
+
+Example:
+
+ ap_efuse: efuse@32240000 {
+ compatible = "sprd,ums312-efuse";
+ reg = <0 0x32240000 0 0x10000>;
+ clock-names = "enable";
+ hwlocks = <&hwlock 8>;
+ clocks = <&aonapb_gate CLK_EFUSE_EB>;
+
+ /* Data cells */
+ thermal_calib: calib@10 {
+ reg = <0x10 0x2>;
+ };
+ };
+
+= Data consumers =
+Are device nodes which consume nvmem data cells.
+
+Example:
+
+ thermal {
+ ...
+
+ nvmem-cells = <&thermal_calib>;
+ nvmem-cell-names = "calibration";
+ };
diff --git a/Documentation/devicetree/bindings/pci/amlogic,meson-pcie.txt b/Documentation/devicetree/bindings/pci/amlogic,meson-pcie.txt
index efa2c8b9b85a..84fdc422792e 100644
--- a/Documentation/devicetree/bindings/pci/amlogic,meson-pcie.txt
+++ b/Documentation/devicetree/bindings/pci/amlogic,meson-pcie.txt
@@ -9,13 +9,16 @@ Additional properties are described here:
Required properties:
- compatible:
- should contain "amlogic,axg-pcie" to identify the core.
+ should contain :
+ - "amlogic,axg-pcie" for AXG SoC Family
+ - "amlogic,g12a-pcie" for G12A SoC Family
+ to identify the core.
- reg:
should contain the configuration address space.
- reg-names: Must be
- "elbi" External local bus interface registers
- "cfg" Meson specific registers
- - "phy" Meson PCIE PHY registers
+ - "phy" Meson PCIE PHY registers for AXG SoC Family
- "config" PCIe configuration space
- reset-gpios: The GPIO to generate PCIe PERST# assert and deassert signal.
- clocks: Must contain an entry for each entry in clock-names.
@@ -23,12 +26,13 @@ Required properties:
- "pclk" PCIe GEN 100M PLL clock
- "port" PCIe_x(A or B) RC clock gate
- "general" PCIe Phy clock
- - "mipi" PCIe_x(A or B) 100M ref clock gate
+ - "mipi" PCIe_x(A or B) 100M ref clock gate for AXG SoC Family
- resets: phandle to the reset lines.
- reset-names: must contain "phy" "port" and "apb"
- - "phy" Share PHY reset
+ - "phy" Share PHY reset for AXG SoC Family
- "port" Port A or B reset
- "apb" Share APB reset
+- phys: should contain a phandle to the shared phy for G12A SoC Family
- device_type:
should be "pci". As specified in designware-pcie.txt
diff --git a/Documentation/devicetree/bindings/pci/layerscape-pci.txt b/Documentation/devicetree/bindings/pci/layerscape-pci.txt
index e20ceaab9b38..99a386ea691c 100644
--- a/Documentation/devicetree/bindings/pci/layerscape-pci.txt
+++ b/Documentation/devicetree/bindings/pci/layerscape-pci.txt
@@ -21,6 +21,7 @@ Required properties:
"fsl,ls1046a-pcie"
"fsl,ls1043a-pcie"
"fsl,ls1012a-pcie"
+ "fsl,ls1028a-pcie"
EP mode:
"fsl,ls1046a-pcie-ep", "fsl,ls-pcie-ep"
- reg: base addresses and lengths of the PCIe controller register blocks.
diff --git a/Documentation/devicetree/bindings/pci/rcar-pci.txt b/Documentation/devicetree/bindings/pci/rcar-pci.txt
index 45bba9f88a51..12702c8c46ce 100644
--- a/Documentation/devicetree/bindings/pci/rcar-pci.txt
+++ b/Documentation/devicetree/bindings/pci/rcar-pci.txt
@@ -4,6 +4,7 @@ Required properties:
compatible: "renesas,pcie-r8a7743" for the R8A7743 SoC;
"renesas,pcie-r8a7744" for the R8A7744 SoC;
"renesas,pcie-r8a774a1" for the R8A774A1 SoC;
+ "renesas,pcie-r8a774b1" for the R8A774B1 SoC;
"renesas,pcie-r8a774c0" for the R8A774C0 SoC;
"renesas,pcie-r8a7779" for the R8A7779 SoC;
"renesas,pcie-r8a7790" for the R8A7790 SoC;
diff --git a/Documentation/devicetree/bindings/phy/allwinner,sun50i-h6-usb3-phy.yaml b/Documentation/devicetree/bindings/phy/allwinner,sun50i-h6-usb3-phy.yaml
new file mode 100644
index 000000000000..e5922b427342
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/allwinner,sun50i-h6-usb3-phy.yaml
@@ -0,0 +1,47 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+# Copyright 2019 Ondrej Jirman <megous@megous.com>
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/phy/allwinner,sun50i-h6-usb3-phy.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Allwinner H6 USB3 PHY
+
+maintainers:
+ - Ondrej Jirman <megous@megous.com>
+
+properties:
+ compatible:
+ enum:
+ - allwinner,sun50i-h6-usb3-phy
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ resets:
+ maxItems: 1
+
+ "#phy-cells":
+ const: 0
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - resets
+ - "#phy-cells"
+
+examples:
+ - |
+ #include <dt-bindings/clock/sun50i-h6-ccu.h>
+ #include <dt-bindings/reset/sun50i-h6-ccu.h>
+ phy@5210000 {
+ compatible = "allwinner,sun50i-h6-usb3-phy";
+ reg = <0x5210000 0x10000>;
+ clocks = <&ccu CLK_USB_PHY1>;
+ resets = <&ccu RST_USB_PHY1>;
+ #phy-cells = <0>;
+ };
diff --git a/Documentation/devicetree/bindings/phy/amlogic,meson-g12a-usb2-phy.yaml b/Documentation/devicetree/bindings/phy/amlogic,meson-g12a-usb2-phy.yaml
index 51254b4e65dd..57d8603076bd 100644
--- a/Documentation/devicetree/bindings/phy/amlogic,meson-g12a-usb2-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/amlogic,meson-g12a-usb2-phy.yaml
@@ -36,7 +36,6 @@ properties:
const: 0
phy-supply:
- maxItems: 1
description:
Phandle to a regulator that provides power to the PHY. This
regulator will be managed during the PHY power on/off sequence.
diff --git a/Documentation/devicetree/bindings/phy/phy-rockchip-inno-usb2.txt b/Documentation/devicetree/bindings/phy/phy-rockchip-inno-usb2.txt
index 00639baae74a..541f5298827c 100644
--- a/Documentation/devicetree/bindings/phy/phy-rockchip-inno-usb2.txt
+++ b/Documentation/devicetree/bindings/phy/phy-rockchip-inno-usb2.txt
@@ -2,6 +2,7 @@ ROCKCHIP USB2.0 PHY WITH INNO IP BLOCK
Required properties (phy (parent) node):
- compatible : should be one of the listed compatibles:
+ * "rockchip,px30-usb2phy"
* "rockchip,rk3228-usb2phy"
* "rockchip,rk3328-usb2phy"
* "rockchip,rk3366-usb2phy"
diff --git a/Documentation/devicetree/bindings/phy/qcom-qmp-phy.txt b/Documentation/devicetree/bindings/phy/qcom-qmp-phy.txt
index 085fbd676cfc..eac9ad3cbbc8 100644
--- a/Documentation/devicetree/bindings/phy/qcom-qmp-phy.txt
+++ b/Documentation/devicetree/bindings/phy/qcom-qmp-phy.txt
@@ -14,7 +14,8 @@ Required properties:
"qcom,msm8998-qmp-pcie-phy" for PCIe QMP phy on msm8998,
"qcom,sdm845-qmp-usb3-phy" for USB3 QMP V3 phy on sdm845,
"qcom,sdm845-qmp-usb3-uni-phy" for USB3 QMP V3 UNI phy on sdm845,
- "qcom,sdm845-qmp-ufs-phy" for UFS QMP phy on sdm845.
+ "qcom,sdm845-qmp-ufs-phy" for UFS QMP phy on sdm845,
+ "qcom,sm8150-qmp-ufs-phy" for UFS QMP phy on sm8150.
- reg:
- index 0: address and length of register set for PHY's common
@@ -57,6 +58,8 @@ Required properties:
"aux", "cfg_ahb", "ref", "com_aux".
For "qcom,sdm845-qmp-ufs-phy" must contain:
"ref", "ref_aux".
+ For "qcom,sm8150-qmp-ufs-phy" must contain:
+ "ref", "ref_aux".
- resets: a list of phandles and reset controller specifier pairs,
one for each entry in reset-names.
@@ -83,6 +86,8 @@ Required properties:
"phy", "common".
For "qcom,sdm845-qmp-ufs-phy": must contain:
"ufsphy".
+ For "qcom,sm8150-qmp-ufs-phy": must contain:
+ "ufsphy".
- vdda-phy-supply: Phandle to a regulator supply to PHY core block.
- vdda-pll-supply: Phandle to 1.8V regulator supply to PHY refclk pll block.
diff --git a/Documentation/devicetree/bindings/phy/rcar-gen3-phy-usb2.txt b/Documentation/devicetree/bindings/phy/rcar-gen3-phy-usb2.txt
index 503a8cfb3184..7734b219d9aa 100644
--- a/Documentation/devicetree/bindings/phy/rcar-gen3-phy-usb2.txt
+++ b/Documentation/devicetree/bindings/phy/rcar-gen3-phy-usb2.txt
@@ -10,6 +10,8 @@ Required properties:
SoC.
"renesas,usb2-phy-r8a774a1" if the device is a part of an R8A774A1
SoC.
+ "renesas,usb2-phy-r8a774b1" if the device is a part of an R8A774B1
+ SoC.
"renesas,usb2-phy-r8a774c0" if the device is a part of an R8A774C0
SoC.
"renesas,usb2-phy-r8a7795" if the device is a part of an R8A7795
diff --git a/Documentation/devicetree/bindings/phy/rcar-gen3-phy-usb3.txt b/Documentation/devicetree/bindings/phy/rcar-gen3-phy-usb3.txt
index 9d9826609c2f..0fe433b9a592 100644
--- a/Documentation/devicetree/bindings/phy/rcar-gen3-phy-usb3.txt
+++ b/Documentation/devicetree/bindings/phy/rcar-gen3-phy-usb3.txt
@@ -9,6 +9,8 @@ need this driver.
Required properties:
- compatible: "renesas,r8a774a1-usb3-phy" if the device is a part of an R8A774A1
SoC.
+ "renesas,r8a774b1-usb3-phy" if the device is a part of an R8A774B1
+ SoC.
"renesas,r8a7795-usb3-phy" if the device is a part of an R8A7795
SoC.
"renesas,r8a7796-usb3-phy" if the device is a part of an R8A7796
diff --git a/Documentation/devicetree/bindings/phy/rockchip,px30-dsi-dphy.yaml b/Documentation/devicetree/bindings/phy/rockchip,px30-dsi-dphy.yaml
new file mode 100644
index 000000000000..bb0da87bcd84
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/rockchip,px30-dsi-dphy.yaml
@@ -0,0 +1,75 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/phy/rockchip,px30-dsi-dphy.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Rockchip MIPI DPHY with additional LVDS/TTL modes
+
+maintainers:
+ - Heiko Stuebner <heiko@sntech.de>
+
+properties:
+ "#phy-cells":
+ const: 0
+
+ "#clock-cells":
+ const: 0
+
+ compatible:
+ enum:
+ - rockchip,px30-dsi-dphy
+ - rockchip,rk3128-dsi-dphy
+ - rockchip,rk3368-dsi-dphy
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ items:
+ - description: PLL reference clock
+ - description: Module clock
+
+ clock-names:
+ items:
+ - const: ref
+ - const: pclk
+
+ power-domains:
+ maxItems: 1
+ description: phandle to the associated power domain
+
+ resets:
+ items:
+ - description: exclusive PHY reset line
+
+ reset-names:
+ items:
+ - const: apb
+
+required:
+ - "#phy-cells"
+ - "#clock-cells"
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+ - resets
+ - reset-names
+
+additionalProperties: false
+
+examples:
+ - |
+ dsi_dphy: phy@ff2e0000 {
+ compatible = "rockchip,px30-video-phy";
+ reg = <0x0 0xff2e0000 0x0 0x10000>;
+ clocks = <&pmucru 13>, <&cru 12>;
+ clock-names = "ref", "pclk";
+ #clock-cells = <0>;
+ resets = <&cru 12>;
+ reset-names = "apb";
+ #phy-cells = <0>;
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/pinctrl/allwinner,sun4i-a10-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/allwinner,sun4i-a10-pinctrl.yaml
new file mode 100644
index 000000000000..cd0503b6fe36
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/allwinner,sun4i-a10-pinctrl.yaml
@@ -0,0 +1,243 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pinctrl/allwinner,sun4i-a10-pinctrl.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Allwinner A10 Pin Controller Device Tree Bindings
+
+maintainers:
+ - Chen-Yu Tsai <wens@csie.org>
+ - Maxime Ripard <maxime.ripard@bootlin.com>
+
+properties:
+ "#gpio-cells":
+ const: 3
+ description:
+ GPIO consumers must use three arguments, first the number of the
+ bank, then the pin number inside that bank, and finally the GPIO
+ flags.
+
+ "#interrupt-cells":
+ const: 3
+ description:
+ Interrupts consumers must use three arguments, first the number
+ of the bank, then the pin number inside that bank, and finally
+ the interrupts flags.
+
+ compatible:
+ enum:
+ - allwinner,sun4i-a10-pinctrl
+ - allwinner,sun5i-a10s-pinctrl
+ - allwinner,sun5i-a13-pinctrl
+ - allwinner,sun6i-a31-pinctrl
+ - allwinner,sun6i-a31-r-pinctrl
+ - allwinner,sun6i-a31s-pinctrl
+ - allwinner,sun7i-a20-pinctrl
+ - allwinner,sun8i-a23-pinctrl
+ - allwinner,sun8i-a23-r-pinctrl
+ - allwinner,sun8i-a33-pinctrl
+ - allwinner,sun8i-a83t-pinctrl
+ - allwinner,sun8i-a83t-r-pinctrl
+ - allwinner,sun8i-h3-pinctrl
+ - allwinner,sun8i-h3-r-pinctrl
+ - allwinner,sun8i-r40-pinctrl
+ - allwinner,sun8i-v3-pinctrl
+ - allwinner,sun8i-v3s-pinctrl
+ - allwinner,sun9i-a80-pinctrl
+ - allwinner,sun9i-a80-r-pinctrl
+ - allwinner,sun50i-a64-pinctrl
+ - allwinner,sun50i-a64-r-pinctrl
+ - allwinner,sun50i-h5-pinctrl
+ - allwinner,sun50i-h6-pinctrl
+ - allwinner,sun50i-h6-r-pinctrl
+ - allwinner,suniv-f1c100s-pinctrl
+ - nextthing,gr8-pinctrl
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ minItems: 1
+ maxItems: 5
+ description:
+ One interrupt per external interrupt bank supported on the
+ controller, sorted by bank number ascending order.
+
+ clocks:
+ items:
+ - description: Bus Clock
+ - description: High Frequency Oscillator
+ - description: Low Frequency Oscillator
+
+ clock-names:
+ items:
+ - const: apb
+ - const: hosc
+ - const: losc
+
+ resets:
+ maxItems: 1
+
+ gpio-controller: true
+ interrupt-controller: true
+ gpio-line-names: true
+
+ input-debounce:
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32-array
+ - minItems: 1
+ maxItems: 5
+ description:
+ Debouncing periods in microseconds, one period per interrupt
+ bank found in the controller
+
+patternProperties:
+ # It's pretty scary, but the basic idea is that:
+ # - One node name can start with either s- or r- for PRCM nodes,
+ # - Then, the name itself can be any repetition of <string>- (to
+ # accomodate with nodes like uart4-rts-cts-pins), where each
+ # string can be either starting with 'p' but in a string longer
+ # than 3, or something that doesn't start with 'p',
+ # - Then, the bank name is optional and will be between pa and pg,
+ # pl or pm. Some pins groups that have several options will have
+ # the pin numbers then,
+ # - Finally, the name will end with either -pin or pins.
+
+ "^([rs]-)?(([a-z0-9]{3,}|[a-oq-z][a-z0-9]*?)?-)+?(p[a-ilm][0-9]*?-)??pins?$":
+ type: object
+
+ properties:
+ pins: true
+ function: true
+ bias-disable: true
+ bias-pull-up: true
+ bias-pull-down: true
+
+ drive-strength:
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ - enum: [ 10, 20, 30, 40 ]
+
+ required:
+ - pins
+ - function
+
+ additionalProperties: false
+
+ "^vcc-p[a-hlm]-supply$":
+ description:
+ Power supplies for pin banks.
+
+required:
+ - "#gpio-cells"
+ - "#interrupt-cells"
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - clock-names
+ - gpio-controller
+ - interrupt-controller
+
+allOf:
+ # FIXME: We should have the pin bank supplies here, but not a lot of
+ # boards are defining it at the moment so it would generate a lot of
+ # warnings.
+
+ - if:
+ properties:
+ compatible:
+ enum:
+ - allwinner,sun9i-a80-pinctrl
+
+ then:
+ properties:
+ interrupts:
+ minItems: 5
+ maxItems: 5
+
+ else:
+ if:
+ properties:
+ compatible:
+ enum:
+ - allwinner,sun6i-a31-pinctrl
+ - allwinner,sun6i-a31s-pinctrl
+ - allwinner,sun50i-h6-pinctrl
+
+ then:
+ properties:
+ interrupts:
+ minItems: 4
+ maxItems: 4
+
+ else:
+ if:
+ properties:
+ compatible:
+ enum:
+ - allwinner,sun8i-a23-pinctrl
+ - allwinner,sun8i-a83t-pinctrl
+ - allwinner,sun50i-a64-pinctrl
+ - allwinner,sun50i-h5-pinctrl
+ - allwinner,suniv-f1c100s-pinctrl
+
+ then:
+ properties:
+ interrupts:
+ minItems: 3
+ maxItems: 3
+
+ else:
+ if:
+ properties:
+ compatible:
+ enum:
+ - allwinner,sun6i-a31-r-pinctrl
+ - allwinner,sun8i-a33-pinctrl
+ - allwinner,sun8i-h3-pinctrl
+ - allwinner,sun8i-v3-pinctrl
+ - allwinner,sun8i-v3s-pinctrl
+ - allwinner,sun9i-a80-r-pinctrl
+ - allwinner,sun50i-h6-r-pinctrl
+
+ then:
+ properties:
+ interrupts:
+ minItems: 2
+ maxItems: 2
+
+ else:
+ properties:
+ interrupts:
+ minItems: 1
+ maxItems: 1
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/sun5i-ccu.h>
+
+ pio: pinctrl@1c20800 {
+ compatible = "allwinner,sun5i-a13-pinctrl";
+ reg = <0x01c20800 0x400>;
+ interrupts = <28>;
+ clocks = <&ccu CLK_APB0_PIO>, <&osc24M>, <&osc32k>;
+ clock-names = "apb", "hosc", "losc";
+ gpio-controller;
+ interrupt-controller;
+ #interrupt-cells = <3>;
+ #gpio-cells = <3>;
+
+ uart1_pe_pins: uart1-pe-pins {
+ pins = "PE10", "PE11";
+ function = "uart1";
+ };
+
+ uart1_pg_pins: uart1-pg-pins {
+ pins = "PG3", "PG4";
+ function = "uart1";
+ };
+ };
diff --git a/Documentation/devicetree/bindings/pinctrl/allwinner,sunxi-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/allwinner,sunxi-pinctrl.txt
deleted file mode 100644
index 328585c6da58..000000000000
--- a/Documentation/devicetree/bindings/pinctrl/allwinner,sunxi-pinctrl.txt
+++ /dev/null
@@ -1,164 +0,0 @@
-* Allwinner A1X Pin Controller
-
-The pins controlled by sunXi pin controller are organized in banks,
-each bank has 32 pins. Each pin has 7 multiplexing functions, with
-the first two functions being GPIO in and out. The configuration on
-the pins includes drive strength and pull-up.
-
-Required properties:
-- compatible: Should be one of the following (depending on your SoC):
- "allwinner,sun4i-a10-pinctrl"
- "allwinner,sun5i-a10s-pinctrl"
- "allwinner,sun5i-a13-pinctrl"
- "allwinner,sun6i-a31-pinctrl"
- "allwinner,sun6i-a31s-pinctrl"
- "allwinner,sun6i-a31-r-pinctrl"
- "allwinner,sun7i-a20-pinctrl"
- "allwinner,sun8i-a23-pinctrl"
- "allwinner,sun8i-a23-r-pinctrl"
- "allwinner,sun8i-a33-pinctrl"
- "allwinner,sun9i-a80-pinctrl"
- "allwinner,sun9i-a80-r-pinctrl"
- "allwinner,sun8i-a83t-pinctrl"
- "allwinner,sun8i-a83t-r-pinctrl"
- "allwinner,sun8i-h3-pinctrl"
- "allwinner,sun8i-h3-r-pinctrl"
- "allwinner,sun8i-r40-pinctrl"
- "allwinner,sun8i-v3-pinctrl"
- "allwinner,sun8i-v3s-pinctrl"
- "allwinner,sun50i-a64-pinctrl"
- "allwinner,sun50i-a64-r-pinctrl"
- "allwinner,sun50i-h5-pinctrl"
- "allwinner,sun50i-h6-pinctrl"
- "allwinner,sun50i-h6-r-pinctrl"
- "allwinner,suniv-f1c100s-pinctrl"
- "nextthing,gr8-pinctrl"
-
-- reg: Should contain the register physical address and length for the
- pin controller.
-
-- clocks: phandle to the clocks feeding the pin controller:
- - "apb": the gated APB parent clock
- - "hosc": the high frequency oscillator in the system
- - "losc": the low frequency oscillator in the system
-
-Note: For backward compatibility reasons, the hosc and losc clocks are only
-required if you need to use the optional input-debounce property. Any new
-device tree should set them.
-
-Each pin bank, depending on the SoC, can have an associated regulator:
-
-- vcc-pa-supply: for the A10, A20, A31, A31s, A80 and R40 SoCs
-- vcc-pb-supply: for the A31, A31s, A80 and V3s SoCs
-- vcc-pc-supply: for the A10, A20, A31, A31s, A64, A80, H5, R40 and V3s SoCs
-- vcc-pd-supply: for the A23, A31, A31s, A64, A80, A83t, H3, H5 and R40 SoCs
-- vcc-pe-supply: for the A10, A20, A31, A31s, A64, A80, R40 and V3s SoCs
-- vcc-pf-supply: for the A10, A20, A31, A31s, A80, R40 and V3s SoCs
-- vcc-pg-supply: for the A10, A20, A31, A31s, A64, A80, H3, H5, R40 and V3s SoCs
-- vcc-ph-supply: for the A31, A31s and A80 SoCs
-- vcc-pl-supply: for the r-pinctrl of the A64, A80 and A83t SoCs
-- vcc-pm-supply: for the r-pinctrl of the A31, A31s and A80 SoCs
-
-Optional properties:
- - input-debounce: Array of debouncing periods in microseconds. One period per
- irq bank found in the controller. 0 if no setup required.
-
-
-Please refer to pinctrl-bindings.txt in this directory for details of the
-common pinctrl bindings used by client devices.
-
-A pinctrl node should contain at least one subnodes representing the
-pinctrl groups available on the machine. Each subnode will list the
-pins it needs, and how they should be configured, with regard to muxer
-configuration, drive strength and pullups. If one of these options is
-not set, its actual value will be unspecified.
-
-Allwinner A1X Pin Controller supports the generic pin multiplexing and
-configuration bindings. For details on each properties, you can refer to
- ./pinctrl-bindings.txt.
-
-Required sub-node properties:
- - pins
- - function
-
-Optional sub-node properties:
- - bias-disable
- - bias-pull-up
- - bias-pull-down
- - drive-strength
-
-*** Deprecated pin configuration and multiplexing binding
-
-Required subnode-properties:
-
-- allwinner,pins: List of strings containing the pin name.
-- allwinner,function: Function to mux the pins listed above to.
-
-Optional subnode-properties:
-- allwinner,drive: Integer. Represents the current sent to the pin
- 0: 10 mA
- 1: 20 mA
- 2: 30 mA
- 3: 40 mA
-- allwinner,pull: Integer.
- 0: No resistor
- 1: Pull-up resistor
- 2: Pull-down resistor
-
-Examples:
-
-pio: pinctrl@1c20800 {
- compatible = "allwinner,sun5i-a13-pinctrl";
- reg = <0x01c20800 0x400>;
- #address-cells = <1>;
- #size-cells = <0>;
-
- uart1_pins_a: uart1@0 {
- allwinner,pins = "PE10", "PE11";
- allwinner,function = "uart1";
- allwinner,drive = <0>;
- allwinner,pull = <0>;
- };
-
- uart1_pins_b: uart1@1 {
- allwinner,pins = "PG3", "PG4";
- allwinner,function = "uart1";
- allwinner,drive = <0>;
- allwinner,pull = <0>;
- };
-};
-
-
-GPIO and interrupt controller
------------------------------
-
-This hardware also acts as a GPIO controller and an interrupt
-controller.
-
-Consumers that would want to refer to one or the other (or both)
-should provide through the usual *-gpios and interrupts properties a
-cell with 3 arguments, first the number of the bank, then the pin
-inside that bank, and finally the flags for the GPIO/interrupts.
-
-Example:
-
-xio: gpio@38 {
- compatible = "nxp,pcf8574a";
- reg = <0x38>;
-
- gpio-controller;
- #gpio-cells = <2>;
-
- interrupt-parent = <&pio>;
- interrupts = <6 0 IRQ_TYPE_EDGE_FALLING>;
- interrupt-controller;
- #interrupt-cells = <2>;
-};
-
-reg_usb1_vbus: usb1-vbus {
- compatible = "regulator-fixed";
- regulator-name = "usb1-vbus";
- regulator-min-microvolt = <5000000>;
- regulator-max-microvolt = <5000000>;
- gpio = <&pio 7 6 GPIO_ACTIVE_HIGH>;
-};
diff --git a/Documentation/devicetree/bindings/pinctrl/intel,lgm-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/intel,lgm-pinctrl.yaml
new file mode 100644
index 000000000000..240d429f773b
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/intel,lgm-pinctrl.yaml
@@ -0,0 +1,116 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/bindings/pinctrl/intel,lgm-pinctrl.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Intel Lightning Mountain SoC pinmux & GPIO controller binding
+
+maintainers:
+ - Rahul Tanwar <rahul.tanwar@linux.intel.com>
+
+description: |
+ Pinmux & GPIO controller controls pin multiplexing & configuration including
+ GPIO function selection & GPIO attributes configuration.
+
+ Please refer to [1] for details of the common pinctrl bindings used by the
+ client devices.
+
+ [1] Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt
+
+properties:
+ compatible:
+ const: intel,lgm-io
+
+ reg:
+ maxItems: 1
+
+# Client device subnode's properties
+patternProperties:
+ '-pins$':
+ type: object
+ description:
+ Pinctrl node's client devices use subnodes for desired pin configuration.
+ Client device subnodes use below standard properties.
+
+ properties:
+ function:
+ $ref: /schemas/types.yaml#/definitions/string
+ description:
+ A string containing the name of the function to mux to the group.
+
+ groups:
+ $ref: /schemas/types.yaml#/definitions/string-array
+ description:
+ An array of strings identifying the list of groups.
+
+ pins:
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ description:
+ List of pins to select with this function.
+
+ pinmux:
+ description: The applicable mux group.
+ allOf:
+ - $ref: "/schemas/types.yaml#/definitions/uint32-array"
+
+ bias-pull-up:
+ type: boolean
+
+ bias-pull-down:
+ type: boolean
+
+ drive-strength:
+ description: |
+ Selects the drive strength for the specified pins in mA.
+ 0: 2 mA
+ 1: 4 mA
+ 2: 8 mA
+ 3: 12 mA
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ - enum: [0, 1, 2, 3]
+
+ slew-rate:
+ type: boolean
+ description: |
+ Sets slew rate for specified pins.
+ 0: slow slew
+ 1: fast slew
+
+ drive-open-drain:
+ type: boolean
+
+ output-enable:
+ type: boolean
+
+ required:
+ - function
+ - groups
+
+ additionalProperties: false
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ # Pinmux controller node
+ - |
+ pinctrl: pinctrl@e2880000 {
+ compatible = "intel,lgm-pinctrl";
+ reg = <0xe2880000 0x100000>;
+
+ uart0-pins {
+ pins = <64>, /* UART_RX0 */
+ <65>; /* UART_TX0 */
+ function = "CONSOLE_UART0";
+ pinmux = <1>,
+ <1>;
+ groups = "CONSOLE_UART0";
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/pinctrl/meson,pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/meson,pinctrl.txt
index 10dc4f7176ca..0aff1f28495c 100644
--- a/Documentation/devicetree/bindings/pinctrl/meson,pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/meson,pinctrl.txt
@@ -15,6 +15,7 @@ Required properties for the root node:
"amlogic,meson-axg-aobus-pinctrl"
"amlogic,meson-g12a-periphs-pinctrl"
"amlogic,meson-g12a-aobus-pinctrl"
+ "amlogic,meson-a1-periphs-pinctrl"
- reg: address and size of registers controlling irq functionality
=== GPIO sub-nodes ===
diff --git a/Documentation/devicetree/bindings/pinctrl/pincfg-node.yaml b/Documentation/devicetree/bindings/pinctrl/pincfg-node.yaml
new file mode 100644
index 000000000000..13b7ab9dd6d5
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/pincfg-node.yaml
@@ -0,0 +1,140 @@
+# SPDX-License-Identifier: GPL-2.0-only
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pinctrl/pincfg-node.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Generic pin configuration node schema
+
+maintainers:
+ - Linus Walleij <linus.walleij@linaro.org>
+
+description:
+ Many data items that are represented in a pin configuration node are common
+ and generic. Pin control bindings should use the properties defined below
+ where they are applicable; not all of these properties are relevant or useful
+ for all hardware or binding structures. Each individual binding document
+ should state which of these generic properties, if any, are used, and the
+ structure of the DT nodes that contain these properties.
+
+properties:
+ bias-disable:
+ type: boolean
+ description: disable any pin bias
+
+ bias-high-impedance:
+ type: boolean
+ description: high impedance mode ("third-state", "floating")
+
+ bias-bus-hold:
+ type: boolean
+ description: latch weakly
+
+ bias-pull-up:
+ oneOf:
+ - type: boolean
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ description: pull up the pin. Takes as optional argument on hardware
+ supporting it the pull strength in Ohm.
+
+ bias-pull-down:
+ oneOf:
+ - type: boolean
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ description: pull down the pin. Takes as optional argument on hardware
+ supporting it the pull strength in Ohm.
+
+ bias-pull-pin-default:
+ oneOf:
+ - type: boolean
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ description: use pin-default pull state. Takes as optional argument on
+ hardware supporting it the pull strength in Ohm.
+
+ drive-push-pull:
+ type: boolean
+ description: drive actively high and low
+
+ drive-open-drain:
+ type: boolean
+ description: drive with open drain
+
+ drive-open-source:
+ type: boolean
+ description: drive with open source
+
+ drive-strength:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: sink or source at most X mA
+
+ drive-strength-microamp:
+ description: sink or source at most X uA
+
+ input-enable:
+ type: boolean
+ description: enable input on pin (no effect on output, such as
+ enabling an input buffer)
+
+ input-disable:
+ type: boolean
+ description: disable input on pin (no effect on output, such as
+ disabling an input buffer)
+
+ input-schmitt-enable:
+ type: boolean
+ description: enable schmitt-trigger mode
+
+ input-schmitt-disable:
+ type: boolean
+ description: disable schmitt-trigger mode
+
+ input-debounce:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: Takes the debounce time in usec as argument or 0 to disable
+ debouncing
+
+ power-source:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: select between different power supplies
+
+ low-power-enable:
+ type: boolean
+ description: enable low power mode
+
+ low-power-disable:
+ type: boolean
+ description: disable low power mode
+
+ output-disable:
+ type: boolean
+ description: disable output on a pin (such as disable an output buffer)
+
+ output-enable:
+ type: boolean
+ description: enable output on a pin without actively driving it
+ (such as enabling an output buffer)
+
+ output-low:
+ type: boolean
+ description: set the pin to output mode with low level
+
+ output-high:
+ type: boolean
+ description: set the pin to output mode with high level
+
+ sleep-hardware-state:
+ type: boolean
+ description: indicate this is sleep related state which will be
+ programmed into the registers for the sleep state.
+
+ slew-rate:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: set the slew rate
+
+ skew-delay:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description:
+ this affects the expected clock skew on input pins
+ and the delay before latching a value to an output
+ pin. Typically indicates how many double-inverters are
+ used to delay the signal.
diff --git a/Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt b/Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt
index fcd37e93ed4d..4613bb17ace3 100644
--- a/Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt
+++ b/Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt
@@ -141,196 +141,8 @@ controller device.
== Generic pin multiplexing node content ==
-pin multiplexing nodes:
-
-function - the mux function to select
-groups - the list of groups to select with this function
- (either this or "pins" must be specified)
-pins - the list of pins to select with this function (either
- this or "groups" must be specified)
-
-Example:
-
-state_0_node_a {
- uart0 {
- function = "uart0";
- groups = "u0rxtx", "u0rtscts";
- };
-};
-state_1_node_a {
- spi0 {
- function = "spi0";
- groups = "spi0pins";
- };
-};
-state_2_node_a {
- function = "i2c0";
- pins = "mfio29", "mfio30";
-};
-
-Optionally an alternative binding can be used if more suitable depending on the
-pin controller hardware. For hardware where there is a large number of identical
-pin controller instances, naming each pin and function can easily become
-unmaintainable. This is especially the case if the same controller is used for
-different pins and functions depending on the SoC revision and packaging.
-
-For cases like this, the pin controller driver may use pinctrl-pin-array helper
-binding with a hardware based index and a number of pin configuration values:
-
-pincontroller {
- ... /* Standard DT properties for the device itself elided */
- #pinctrl-cells = <2>;
-
- state_0_node_a {
- pinctrl-pin-array = <
- 0 A_DELAY_PS(0) G_DELAY_PS(120)
- 4 A_DELAY_PS(0) G_DELAY_PS(360)
- ...
- >;
- };
- ...
-};
-
-Above #pinctrl-cells specifies the number of value cells in addition to the
-index of the registers. This is similar to the interrupts-extended binding with
-one exception. There is no need to specify the phandle for each entry as that
-is already known as the defined pins are always children of the pin controller
-node. Further having the phandle pointing to another pin controller would not
-currently work as the pinctrl framework uses named modes to group pins for each
-pin control device.
-
-The index for pinctrl-pin-array must relate to the hardware for the pinctrl
-registers, and must not be a virtual index of pin instances. The reason for
-this is to avoid mapping of the index in the dts files and the pin controller
-driver as it can change.
-
-For hardware where pin multiplexing configurations have to be specified for
-each single pin the number of required sub-nodes containing "pin" and
-"function" properties can quickly escalate and become hard to write and
-maintain.
-
-For cases like this, the pin controller driver may use the pinmux helper
-property, where the pin identifier is provided with mux configuration settings
-in a pinmux group. A pinmux group consists of the pin identifier and mux
-settings represented as a single integer or an array of integers.
-
-The pinmux property accepts an array of pinmux groups, each of them describing
-a single pin multiplexing configuration.
-
-pincontroller {
- state_0_node_a {
- pinmux = <PINMUX_GROUP>, <PINMUX_GROUP>, ...;
- };
-};
-
-Each individual pin controller driver bindings documentation shall specify
-how pin IDs and pin multiplexing configuration are defined and assembled
-together in a pinmux group.
+See pinmux-node.yaml
== Generic pin configuration node content ==
-Many data items that are represented in a pin configuration node are common
-and generic. Pin control bindings should use the properties defined below
-where they are applicable; not all of these properties are relevant or useful
-for all hardware or binding structures. Each individual binding document
-should state which of these generic properties, if any, are used, and the
-structure of the DT nodes that contain these properties.
-
-Supported generic properties are:
-
-pins - the list of pins that properties in the node
- apply to (either this, "group" or "pinmux" has to be
- specified)
-group - the group to apply the properties to, if the driver
- supports configuration of whole groups rather than
- individual pins (either this, "pins" or "pinmux" has
- to be specified)
-pinmux - the list of numeric pin ids and their mux settings
- that properties in the node apply to (either this,
- "pins" or "groups" have to be specified)
-bias-disable - disable any pin bias
-bias-high-impedance - high impedance mode ("third-state", "floating")
-bias-bus-hold - latch weakly
-bias-pull-up - pull up the pin
-bias-pull-down - pull down the pin
-bias-pull-pin-default - use pin-default pull state
-drive-push-pull - drive actively high and low
-drive-open-drain - drive with open drain
-drive-open-source - drive with open source
-drive-strength - sink or source at most X mA
-drive-strength-microamp - sink or source at most X uA
-input-enable - enable input on pin (no effect on output, such as
- enabling an input buffer)
-input-disable - disable input on pin (no effect on output, such as
- disabling an input buffer)
-input-schmitt-enable - enable schmitt-trigger mode
-input-schmitt-disable - disable schmitt-trigger mode
-input-debounce - debounce mode with debound time X
-power-source - select between different power supplies
-low-power-enable - enable low power mode
-low-power-disable - disable low power mode
-output-disable - disable output on a pin (such as disable an output
- buffer)
-output-enable - enable output on a pin without actively driving it
- (such as enabling an output buffer)
-output-low - set the pin to output mode with low level
-output-high - set the pin to output mode with high level
-sleep-hardware-state - indicate this is sleep related state which will be programmed
- into the registers for the sleep state.
-slew-rate - set the slew rate
-skew-delay - this affects the expected clock skew on input pins
- and the delay before latching a value to an output
- pin. Typically indicates how many double-inverters are
- used to delay the signal.
-
-For example:
-
-state_0_node_a {
- cts_rxd {
- pins = "GPIO0_AJ5", "GPIO2_AH4"; /* CTS+RXD */
- bias-pull-up;
- };
-};
-state_1_node_a {
- rts_txd {
- pins = "GPIO1_AJ3", "GPIO3_AH3"; /* RTS+TXD */
- output-high;
- };
-};
-state_2_node_a {
- foo {
- group = "foo-group";
- bias-pull-up;
- };
-};
-state_3_node_a {
- mux {
- pinmux = <GPIOx_PINm_MUXn>, <GPIOx_PINj_MUXk)>;
- input-enable;
- };
-};
-
-Some of the generic properties take arguments. For those that do, the
-arguments are described below.
-
-- pins takes a list of pin names or IDs as a required argument. The specific
- binding for the hardware defines:
- - Whether the entries are integers or strings, and their meaning.
-
-- pinmux takes a list of pin IDs and mux settings as required argument. The
- specific bindings for the hardware defines:
- - How pin IDs and mux settings are defined and assembled together in a single
- integer or an array of integers.
-
-- bias-pull-up, -down and -pin-default take as optional argument on hardware
- supporting it the pull strength in Ohm. bias-disable will disable the pull.
-
-- drive-strength takes as argument the target strength in mA.
-
-- drive-strength-microamp takes as argument the target strength in uA.
-
-- input-debounce takes the debounce time in usec as argument
- or 0 to disable debouncing
-
-More in-depth documentation on these parameters can be found in
-<include/linux/pinctrl/pinconf-generic.h>
+See pincfg-node.yaml
diff --git a/Documentation/devicetree/bindings/pinctrl/pinmux-node.yaml b/Documentation/devicetree/bindings/pinctrl/pinmux-node.yaml
new file mode 100644
index 000000000000..777623a57fd5
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/pinmux-node.yaml
@@ -0,0 +1,132 @@
+# SPDX-License-Identifier: GPL-2.0-only
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pinctrl/pinmux-node.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Generic pin multiplexing node schema
+
+maintainers:
+ - Linus Walleij <linus.walleij@linaro.org>
+
+description: |
+ The contents of the pin configuration child nodes are defined by the binding
+ for the individual pin controller device. The pin configuration nodes need not
+ be direct children of the pin controller device; they may be grandchildren,
+ for example. Whether this is legal, and whether there is any interaction
+ between the child and intermediate parent nodes, is again defined entirely by
+ the binding for the individual pin controller device.
+
+ While not required to be used, there are 3 generic forms of pin muxing nodes
+ which pin controller devices can use.
+
+ pin multiplexing nodes:
+
+ Example:
+
+ state_0_node_a {
+ uart0 {
+ function = "uart0";
+ groups = "u0rxtx", "u0rtscts";
+ };
+ };
+ state_1_node_a {
+ spi0 {
+ function = "spi0";
+ groups = "spi0pins";
+ };
+ };
+ state_2_node_a {
+ function = "i2c0";
+ pins = "mfio29", "mfio30";
+ };
+
+ Optionally an alternative binding can be used if more suitable depending on the
+ pin controller hardware. For hardware where there is a large number of identical
+ pin controller instances, naming each pin and function can easily become
+ unmaintainable. This is especially the case if the same controller is used for
+ different pins and functions depending on the SoC revision and packaging.
+
+ For cases like this, the pin controller driver may use pinctrl-pin-array helper
+ binding with a hardware based index and a number of pin configuration values:
+
+ pincontroller {
+ ... /* Standard DT properties for the device itself elided */
+ #pinctrl-cells = <2>;
+
+ state_0_node_a {
+ pinctrl-pin-array = <
+ 0 A_DELAY_PS(0) G_DELAY_PS(120)
+ 4 A_DELAY_PS(0) G_DELAY_PS(360)
+ ...
+ >;
+ };
+ ...
+ };
+
+ Above #pinctrl-cells specifies the number of value cells in addition to the
+ index of the registers. This is similar to the interrupts-extended binding with
+ one exception. There is no need to specify the phandle for each entry as that
+ is already known as the defined pins are always children of the pin controller
+ node. Further having the phandle pointing to another pin controller would not
+ currently work as the pinctrl framework uses named modes to group pins for each
+ pin control device.
+
+ The index for pinctrl-pin-array must relate to the hardware for the pinctrl
+ registers, and must not be a virtual index of pin instances. The reason for
+ this is to avoid mapping of the index in the dts files and the pin controller
+ driver as it can change.
+
+ For hardware where pin multiplexing configurations have to be specified for
+ each single pin the number of required sub-nodes containing "pin" and
+ "function" properties can quickly escalate and become hard to write and
+ maintain.
+
+ For cases like this, the pin controller driver may use the pinmux helper
+ property, where the pin identifier is provided with mux configuration settings
+ in a pinmux group. A pinmux group consists of the pin identifier and mux
+ settings represented as a single integer or an array of integers.
+
+ The pinmux property accepts an array of pinmux groups, each of them describing
+ a single pin multiplexing configuration.
+
+ pincontroller {
+ state_0_node_a {
+ pinmux = <PINMUX_GROUP>, <PINMUX_GROUP>, ...;
+ };
+ };
+
+ Each individual pin controller driver bindings documentation shall specify
+ how pin IDs and pin multiplexing configuration are defined and assembled
+ together in a pinmux group.
+
+properties:
+ function:
+ $ref: /schemas/types.yaml#/definitions/string
+ description: The mux function to select
+
+ pins:
+ oneOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32-array
+ - $ref: /schemas/types.yaml#/definitions/string-array
+ description:
+ The list of pin identifiers that properties in the node apply to. The
+ specific binding for the hardware defines whether the entries are integers
+ or strings, and their meaning.
+
+ group:
+ $ref: /schemas/types.yaml#/definitions/string-array
+ description:
+ the group to apply the properties to, if the driver supports
+ configuration of whole groups rather than individual pins (either
+ this, "pins" or "pinmux" has to be specified)
+
+ pinmux:
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32-array
+ description:
+ The list of numeric pin ids and their mux settings that properties in the
+ node apply to (either this, "pins" or "groups" have to be specified)
+
+ pinctrl-pin-array:
+ $ref: /schemas/types.yaml#/definitions/uint32-array
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,msm8976-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/qcom,msm8976-pinctrl.txt
new file mode 100644
index 000000000000..70d04d12f136
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,msm8976-pinctrl.txt
@@ -0,0 +1,183 @@
+Qualcomm MSM8976 TLMM block
+
+This binding describes the Top Level Mode Multiplexer block found in the
+MSM8956 and MSM8976 platforms.
+
+- compatible:
+ Usage: required
+ Value type: <string>
+ Definition: must be "qcom,msm8976-pinctrl"
+
+- reg:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: the base address and size of the TLMM register space.
+
+- interrupts:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: should specify the TLMM summary IRQ.
+
+- interrupt-controller:
+ Usage: required
+ Value type: <none>
+ Definition: identifies this node as an interrupt controller
+
+- #interrupt-cells:
+ Usage: required
+ Value type: <u32>
+ Definition: must be 2. Specifying the pin number and flags, as defined
+ in <dt-bindings/interrupt-controller/irq.h>
+
+- gpio-controller:
+ Usage: required
+ Value type: <none>
+ Definition: identifies this node as a gpio controller
+
+- #gpio-cells:
+ Usage: required
+ Value type: <u32>
+ Definition: must be 2. Specifying the pin number and flags, as defined
+ in <dt-bindings/gpio/gpio.h>
+
+- gpio-ranges:
+ Usage: required
+ Definition: see ../gpio/gpio.txt
+
+- gpio-reserved-ranges:
+ Usage: optional
+ Definition: see ../gpio/gpio.txt
+
+Please refer to ../gpio/gpio.txt and ../interrupt-controller/interrupts.txt for
+a general description of GPIO and interrupt bindings.
+
+Please refer to pinctrl-bindings.txt in this directory for details of the
+common pinctrl bindings used by client devices, including the meaning of the
+phrase "pin configuration node".
+
+The pin configuration nodes act as a container for an arbitrary number of
+subnodes. Each of these subnodes represents some desired configuration for a
+pin, a group, or a list of pins or groups. This configuration can include the
+mux function to select on those pin(s)/group(s), and various pin configuration
+parameters, such as pull-up, drive strength, etc.
+
+
+PIN CONFIGURATION NODES:
+
+The name of each subnode is not important; all subnodes should be enumerated
+and processed purely based on their content.
+
+Each subnode only affects those parameters that are explicitly listed. In
+other words, a subnode that lists a mux function but no pin configuration
+parameters implies no information about any pin configuration parameters.
+Similarly, a pin subnode that describes a pullup parameter implies no
+information about e.g. the mux function.
+
+
+The following generic properties as defined in pinctrl-bindings.txt are valid
+to specify in a pin configuration subnode:
+
+- pins:
+ Usage: required
+ Value type: <string-array>
+ Definition: List of gpio pins affected by the properties specified in
+ this subnode.
+
+ Valid pins are:
+ gpio0-gpio145
+ Supports mux, bias and drive-strength
+
+ sdc1_clk, sdc1_cmd, sdc1_data,
+ sdc2_clk, sdc2_cmd, sdc2_data,
+ sdc3_clk, sdc3_cmd, sdc3_data
+ Supports bias and drive-strength
+
+- function:
+ Usage: required
+ Value type: <string>
+ Definition: Specify the alternative function to be configured for the
+ specified pins. Functions are only valid for gpio pins.
+ Valid values are:
+
+ gpio, blsp_uart1, blsp_spi1, smb_int, blsp_i2c1, blsp_spi2,
+ blsp_uart2, blsp_i2c2, gcc_gp1_clk_b, blsp_spi3,
+ qdss_tracedata_b, blsp_i2c3, gcc_gp2_clk_b, gcc_gp3_clk_b,
+ blsp_spi4, cap_int, blsp_i2c4, blsp_spi5, blsp_uart5,
+ qdss_traceclk_a, m_voc, blsp_i2c5, qdss_tracectl_a,
+ qdss_tracedata_a, blsp_spi6, blsp_uart6, qdss_tracectl_b,
+ blsp_i2c6, qdss_traceclk_b, mdp_vsync, pri_mi2s_mclk_a,
+ sec_mi2s_mclk_a, cam_mclk, cci0_i2c, cci1_i2c, blsp1_spi,
+ blsp3_spi, gcc_gp1_clk_a, gcc_gp2_clk_a, gcc_gp3_clk_a,
+ uim_batt, sd_write, uim1_data, uim1_clk, uim1_reset,
+ uim1_present, uim2_data, uim2_clk, uim2_reset,
+ uim2_present, ts_xvdd, mipi_dsi0, us_euro, ts_resout,
+ ts_sample, sec_mi2s_mclk_b, pri_mi2s, codec_reset,
+ cdc_pdm0, us_emitter, pri_mi2s_mclk_b, pri_mi2s_mclk_c,
+ lpass_slimbus, lpass_slimbus0, lpass_slimbus1, codec_int1,
+ codec_int2, wcss_bt, sdc3, wcss_wlan2, wcss_wlan1,
+ wcss_wlan0, wcss_wlan, wcss_fm, key_volp, key_snapshot,
+ key_focus, key_home, pwr_down, dmic0_clk, hdmi_int,
+ dmic0_data, wsa_vi, wsa_en, blsp_spi8, wsa_irq, blsp_i2c8,
+ pa_indicator, modem_tsync, ssbi_wtr1, gsm1_tx, gsm0_tx,
+ sdcard_det, sec_mi2s, ss_switch,
+
+- bias-disable:
+ Usage: optional
+ Value type: <none>
+ Definition: The specified pins should be configured as no pull.
+
+- bias-pull-down:
+ Usage: optional
+ Value type: <none>
+ Definition: The specified pins should be configured as pull down.
+
+- bias-pull-up:
+ Usage: optional
+ Value type: <none>
+ Definition: The specified pins should be configured as pull up.
+
+- output-high:
+ Usage: optional
+ Value type: <none>
+ Definition: The specified pins are configured in output mode, driven
+ high.
+ Not valid for sdc pins.
+
+- output-low:
+ Usage: optional
+ Value type: <none>
+ Definition: The specified pins are configured in output mode, driven
+ low.
+ Not valid for sdc pins.
+
+- drive-strength:
+ Usage: optional
+ Value type: <u32>
+ Definition: Selects the drive strength for the specified pins, in mA.
+ Valid values are: 2, 4, 6, 8, 10, 12, 14 and 16
+
+Example:
+
+ tlmm: pinctrl@1000000 {
+ compatible = "qcom,msm8976-pinctrl";
+ reg = <0x1000000 0x300000>;
+ interrupts = <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&tlmm 0 0 145>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ blsp1_uart2_active: blsp1_uart2_active {
+ mux {
+ pins = "gpio4", "gpio5", "gpio6", "gpio7";
+ function = "blsp_uart2";
+ };
+
+ config {
+ pins = "gpio4", "gpio5", "gpio6", "gpio7";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.txt b/Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.txt
index c32bf3237545..7be5de8d253f 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.txt
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.txt
@@ -15,14 +15,18 @@ PMIC's from Qualcomm.
"qcom,pm8917-gpio"
"qcom,pm8921-gpio"
"qcom,pm8941-gpio"
+ "qcom,pm8950-gpio"
"qcom,pm8994-gpio"
"qcom,pm8998-gpio"
"qcom,pma8084-gpio"
+ "qcom,pmi8950-gpio"
"qcom,pmi8994-gpio"
"qcom,pmi8998-gpio"
"qcom,pms405-gpio"
"qcom,pm8150-gpio"
"qcom,pm8150b-gpio"
+ "qcom,pm6150-gpio"
+ "qcom,pm6150l-gpio"
And must contain either "qcom,spmi-gpio" or "qcom,ssbi-gpio"
if the device is on an spmi bus or an ssbi bus respectively
@@ -91,15 +95,19 @@ to specify in a pin configuration subnode:
gpio1-gpio38 for pm8917
gpio1-gpio44 for pm8921
gpio1-gpio36 for pm8941
+ gpio1-gpio8 for pm8950 (hole on gpio3)
gpio1-gpio22 for pm8994
gpio1-gpio26 for pm8998
gpio1-gpio22 for pma8084
+ gpio1-gpio2 for pmi8950
gpio1-gpio10 for pmi8994
gpio1-gpio12 for pms405 (holes on gpio1, gpio9 and gpio10)
gpio1-gpio10 for pm8150 (holes on gpio2, gpio5, gpio7
and gpio8)
gpio1-gpio12 for pm8150b (holes on gpio3, gpio4, gpio7)
gpio1-gpio12 for pm8150l (hole on gpio7)
+ gpio1-gpio10 for pm6150
+ gpio1-gpio12 for pm6150l
- function:
Usage: required
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,pmic-mpp.txt b/Documentation/devicetree/bindings/pinctrl/qcom,pmic-mpp.txt
index 2ab95bc26066..448d36a85730 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,pmic-mpp.txt
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,pmic-mpp.txt
@@ -16,6 +16,8 @@ of PMIC's from Qualcomm.
"qcom,pm8917-mpp",
"qcom,pm8921-mpp",
"qcom,pm8941-mpp",
+ "qcom,pm8950-mpp",
+ "qcom,pmi8950-mpp",
"qcom,pm8994-mpp",
"qcom,pma8084-mpp",
@@ -80,6 +82,8 @@ to specify in a pin configuration subnode:
mpp1-mpp4 for pm8841
mpp1-mpp4 for pm8916
mpp1-mpp8 for pm8941
+ mpp1-mpp4 for pm8950
+ mpp1-mpp4 for pmi8950
mpp1-mpp4 for pma8084
- function:
diff --git a/Documentation/devicetree/bindings/pinctrl/renesas,pfc-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/renesas,pfc-pinctrl.txt
index 3902efa18fd0..6eada23eaa31 100644
--- a/Documentation/devicetree/bindings/pinctrl/renesas,pfc-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/renesas,pfc-pinctrl.txt
@@ -18,6 +18,7 @@ Required Properties:
- "renesas,pfc-r8a7745": for R8A7745 (RZ/G1E) compatible pin-controller.
- "renesas,pfc-r8a77470": for R8A77470 (RZ/G1C) compatible pin-controller.
- "renesas,pfc-r8a774a1": for R8A774A1 (RZ/G2M) compatible pin-controller.
+ - "renesas,pfc-r8a774b1": for R8A774B1 (RZ/G2N) compatible pin-controller.
- "renesas,pfc-r8a774c0": for R8A774C0 (RZ/G2E) compatible pin-controller.
- "renesas,pfc-r8a7778": for R8A7778 (R-Car M1) compatible pin-controller.
- "renesas,pfc-r8a7779": for R8A7779 (R-Car H1) compatible pin-controller.
@@ -27,7 +28,8 @@ Required Properties:
- "renesas,pfc-r8a7793": for R8A7793 (R-Car M2-N) compatible pin-controller.
- "renesas,pfc-r8a7794": for R8A7794 (R-Car E2) compatible pin-controller.
- "renesas,pfc-r8a7795": for R8A7795 (R-Car H3) compatible pin-controller.
- - "renesas,pfc-r8a7796": for R8A7796 (R-Car M3-W) compatible pin-controller.
+ - "renesas,pfc-r8a7796": for R8A77960 (R-Car M3-W) compatible pin-controller.
+ - "renesas,pfc-r8a77961": for R8A77961 (R-Car M3-W+) compatible pin-controller.
- "renesas,pfc-r8a77965": for R8A77965 (R-Car M3-N) compatible pin-controller.
- "renesas,pfc-r8a77970": for R8A77970 (R-Car V3M) compatible pin-controller.
- "renesas,pfc-r8a77980": for R8A77980 (R-Car V3H) compatible pin-controller.
diff --git a/Documentation/devicetree/bindings/pinctrl/rockchip,pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/rockchip,pinctrl.txt
index 0919db294c17..2113cfaa26e6 100644
--- a/Documentation/devicetree/bindings/pinctrl/rockchip,pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/rockchip,pinctrl.txt
@@ -29,6 +29,7 @@ Required properties for iomux controller:
"rockchip,rk3188-pinctrl": for Rockchip RK3188
"rockchip,rk3228-pinctrl": for Rockchip RK3228
"rockchip,rk3288-pinctrl": for Rockchip RK3288
+ "rockchip,rk3308-pinctrl": for Rockchip RK3308
"rockchip,rk3328-pinctrl": for Rockchip RK3328
"rockchip,rk3368-pinctrl": for Rockchip RK3368
"rockchip,rk3399-pinctrl": for Rockchip RK3399
diff --git a/Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.yaml
index 400df2da018a..754ea7ab040a 100644
--- a/Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.yaml
@@ -40,10 +40,9 @@ properties:
allOf:
- $ref: "/schemas/types.yaml#/definitions/phandle-array"
description: Should be phandle/offset/mask
- items:
- - description: Phandle to the syscon node which includes IRQ mux selection.
- - description: The offset of the IRQ mux selection register.
- - description: The field mask of IRQ mux, needed if different of 0xf.
+ - Phandle to the syscon node which includes IRQ mux selection.
+ - The offset of the IRQ mux selection register.
+ - The field mask of IRQ mux, needed if different of 0xf.
st,package:
allOf:
diff --git a/Documentation/devicetree/bindings/power/amlogic,meson-gx-pwrc.txt b/Documentation/devicetree/bindings/power/amlogic,meson-gx-pwrc.txt
index 0fdc3dd1125e..99b5b10cda31 100644
--- a/Documentation/devicetree/bindings/power/amlogic,meson-gx-pwrc.txt
+++ b/Documentation/devicetree/bindings/power/amlogic,meson-gx-pwrc.txt
@@ -10,7 +10,7 @@ The Video Processing Unit power domain is controlled by this power controller,
but the domain requires some external resources to meet the correct power
sequences.
The bindings must respect the power domain bindings as described in the file
-power_domain.txt
+power-domain.yaml
Device Tree Bindings:
---------------------
diff --git a/Documentation/devicetree/bindings/power/fsl,imx-gpc.txt b/Documentation/devicetree/bindings/power/fsl,imx-gpc.txt
index 726ec2875223..f0f5553a9e74 100644
--- a/Documentation/devicetree/bindings/power/fsl,imx-gpc.txt
+++ b/Documentation/devicetree/bindings/power/fsl,imx-gpc.txt
@@ -19,7 +19,7 @@ Required properties:
- ipg
The power domains are generic power domain providers as documented in
-Documentation/devicetree/bindings/power/power_domain.txt. They are described as
+Documentation/devicetree/bindings/power/power-domain.yaml. They are described as
subnodes of the power gating controller 'pgc' node of the GPC and should
contain the following:
diff --git a/Documentation/devicetree/bindings/power/fsl,imx-gpcv2.txt b/Documentation/devicetree/bindings/power/fsl,imx-gpcv2.txt
index 7c7e972aaa42..61649202f6f5 100644
--- a/Documentation/devicetree/bindings/power/fsl,imx-gpcv2.txt
+++ b/Documentation/devicetree/bindings/power/fsl,imx-gpcv2.txt
@@ -17,7 +17,7 @@ Required properties:
Power domains contained within GPC node are generic power domain
providers, documented in
-Documentation/devicetree/bindings/power/power_domain.txt, which are
+Documentation/devicetree/bindings/power/power-domain.yaml, which are
described as subnodes of the power gating controller 'pgc' node,
which, in turn, is expected to contain the following:
diff --git a/Documentation/devicetree/bindings/power/pd-samsung.txt b/Documentation/devicetree/bindings/power/pd-samsung.txt
deleted file mode 100644
index 92ef355e8f64..000000000000
--- a/Documentation/devicetree/bindings/power/pd-samsung.txt
+++ /dev/null
@@ -1,45 +0,0 @@
-* Samsung Exynos Power Domains
-
-Exynos processors include support for multiple power domains which are used
-to gate power to one or more peripherals on the processor.
-
-Required Properties:
-- compatible: should be one of the following.
- * samsung,exynos4210-pd - for exynos4210 type power domain.
- * samsung,exynos5433-pd - for exynos5433 type power domain.
-- reg: physical base address of the controller and length of memory mapped
- region.
-- #power-domain-cells: number of cells in power domain specifier;
- must be 0.
-
-Optional Properties:
-- label: Human readable string with domain name. Will be visible in userspace
- to let user to distinguish between multiple domains in SoC.
-- power-domains: phandle pointing to the parent power domain, for more details
- see Documentation/devicetree/bindings/power/power_domain.txt
-
-Deprecated Properties:
-- clocks
-- clock-names
-
-Node of a device using power domains must have a power-domains property
-defined with a phandle to respective power domain.
-
-Example:
-
- lcd0: power-domain-lcd0 {
- compatible = "samsung,exynos4210-pd";
- reg = <0x10023C00 0x10>;
- #power-domain-cells = <0>;
- label = "LCD0";
- };
-
- mfc_pd: power-domain@10044060 {
- compatible = "samsung,exynos4210-pd";
- reg = <0x10044060 0x20>;
- #power-domain-cells = <0>;
- label = "MFC";
- };
-
-See Documentation/devicetree/bindings/power/power_domain.txt for description
-of consumer-side bindings.
diff --git a/Documentation/devicetree/bindings/power/pd-samsung.yaml b/Documentation/devicetree/bindings/power/pd-samsung.yaml
new file mode 100644
index 000000000000..09bdd96c1ec1
--- /dev/null
+++ b/Documentation/devicetree/bindings/power/pd-samsung.yaml
@@ -0,0 +1,66 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/power/pd-samsung.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Samsung Exynos SoC Power Domains
+
+maintainers:
+ - Krzysztof Kozlowski <krzk@kernel.org>
+
+description: |+
+ Exynos processors include support for multiple power domains which are used
+ to gate power to one or more peripherals on the processor.
+
+allOf:
+ - $ref: power-domain.yaml#
+
+properties:
+ compatible:
+ enum:
+ - samsung,exynos4210-pd
+ - samsung,exynos5433-pd
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ deprecated: true
+ maxItems: 1
+
+ clock-names:
+ deprecated: true
+ maxItems: 1
+
+ label:
+ description:
+ Human readable string with domain name. Will be visible in userspace
+ to let user to distinguish between multiple domains in SoC.
+
+ "#power-domain-cells":
+ const: 0
+
+ power-domains:
+ maxItems: 1
+
+required:
+ - compatible
+ - "#power-domain-cells"
+ - reg
+
+examples:
+ - |
+ lcd0_pd: power-domain@10023c80 {
+ compatible = "samsung,exynos4210-pd";
+ reg = <0x10023c80 0x20>;
+ #power-domain-cells = <0>;
+ label = "LCD0";
+ };
+
+ mfc_pd: power-domain@10044060 {
+ compatible = "samsung,exynos4210-pd";
+ reg = <0x10044060 0x20>;
+ #power-domain-cells = <0>;
+ label = "MFC";
+ };
diff --git a/Documentation/devicetree/bindings/power/power-domain.yaml b/Documentation/devicetree/bindings/power/power-domain.yaml
new file mode 100644
index 000000000000..455b573293ae
--- /dev/null
+++ b/Documentation/devicetree/bindings/power/power-domain.yaml
@@ -0,0 +1,133 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/power/power-domain.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Generic PM domains
+
+maintainers:
+ - Rafael J. Wysocki <rjw@rjwysocki.net>
+ - Kevin Hilman <khilman@kernel.org>
+ - Ulf Hansson <ulf.hansson@linaro.org>
+
+description: |+
+ System on chip designs are often divided into multiple PM domains that can be
+ used for power gating of selected IP blocks for power saving by reduced leakage
+ current.
+
+ This device tree binding can be used to bind PM domain consumer devices with
+ their PM domains provided by PM domain providers. A PM domain provider can be
+ represented by any node in the device tree and can provide one or more PM
+ domains. A consumer node can refer to the provider by a phandle and a set of
+ phandle arguments (so called PM domain specifiers) of length specified by the
+ \#power-domain-cells property in the PM domain provider node.
+
+properties:
+ $nodename:
+ pattern: "^(power-controller|power-domain)(@.*)?$"
+
+ domain-idle-states:
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ description:
+ A phandle of an idle-state that shall be soaked into a generic domain
+ power state. The idle state definitions are compatible with
+ domain-idle-state specified in
+ Documentation/devicetree/bindings/power/domain-idle-state.txt
+ phandles that are not compatible with domain-idle-state will be ignored.
+ The domain-idle-state property reflects the idle state of this PM domain
+ and not the idle states of the devices or sub-domains in the PM domain.
+ Devices and sub-domains have their own idle-states independent
+ of the parent domain's idle states. In the absence of this property,
+ the domain would be considered as capable of being powered-on
+ or powered-off.
+
+ operating-points-v2:
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ description:
+ Phandles to the OPP tables of power domains provided by a power domain
+ provider. If the provider provides a single power domain only or all
+ the power domains provided by the provider have identical OPP tables,
+ then this shall contain a single phandle. Refer to ../opp/opp.txt
+ for more information.
+
+ "#power-domain-cells":
+ description:
+ Number of cells in a PM domain specifier. Typically 0 for nodes
+ representing a single PM domain and 1 for nodes providing multiple PM
+ domains (e.g. power controllers), but can be any value as specified
+ by device tree binding documentation of particular provider.
+
+ power-domains:
+ description:
+ A phandle and PM domain specifier as defined by bindings of the power
+ controller specified by phandle. Some power domains might be powered
+ from another power domain (or have other hardware specific
+ dependencies). For representing such dependency a standard PM domain
+ consumer binding is used. When provided, all domains created
+ by the given provider should be subdomains of the domain specified
+ by this binding.
+
+required:
+ - "#power-domain-cells"
+
+examples:
+ - |
+ power: power-controller@12340000 {
+ compatible = "foo,power-controller";
+ reg = <0x12340000 0x1000>;
+ #power-domain-cells = <1>;
+ };
+
+ // The node above defines a power controller that is a PM domain provider and
+ // expects one cell as its phandle argument.
+
+ - |
+ parent2: power-controller@12340000 {
+ compatible = "foo,power-controller";
+ reg = <0x12340000 0x1000>;
+ #power-domain-cells = <1>;
+ };
+
+ child2: power-controller@12341000 {
+ compatible = "foo,power-controller";
+ reg = <0x12341000 0x1000>;
+ power-domains = <&parent2 0>;
+ #power-domain-cells = <1>;
+ };
+
+ // The nodes above define two power controllers: 'parent' and 'child'.
+ // Domains created by the 'child' power controller are subdomains of '0' power
+ // domain provided by the 'parent' power controller.
+
+ - |
+ parent3: power-controller@12340000 {
+ compatible = "foo,power-controller";
+ reg = <0x12340000 0x1000>;
+ #power-domain-cells = <0>;
+ domain-idle-states = <&DOMAIN_RET>, <&DOMAIN_PWR_DN>;
+ };
+
+ child3: power-controller@12341000 {
+ compatible = "foo,power-controller";
+ reg = <0x12341000 0x1000>;
+ power-domains = <&parent3>;
+ #power-domain-cells = <0>;
+ domain-idle-states = <&DOMAIN_PWR_DN>;
+ };
+
+ DOMAIN_RET: state@0 {
+ compatible = "domain-idle-state";
+ reg = <0x0 0x0>;
+ entry-latency-us = <1000>;
+ exit-latency-us = <2000>;
+ min-residency-us = <10000>;
+ };
+
+ DOMAIN_PWR_DN: state@1 {
+ compatible = "domain-idle-state";
+ reg = <0x1 0x0>;
+ entry-latency-us = <5000>;
+ exit-latency-us = <8000>;
+ min-residency-us = <7000>;
+ };
diff --git a/Documentation/devicetree/bindings/power/power_domain.txt b/Documentation/devicetree/bindings/power/power_domain.txt
index 8f8b25a24b8f..5b09b2deb483 100644
--- a/Documentation/devicetree/bindings/power/power_domain.txt
+++ b/Documentation/devicetree/bindings/power/power_domain.txt
@@ -13,100 +13,7 @@ phandle arguments (so called PM domain specifiers) of length specified by the
==PM domain providers==
-Required properties:
- - #power-domain-cells : Number of cells in a PM domain specifier;
- Typically 0 for nodes representing a single PM domain and 1 for nodes
- providing multiple PM domains (e.g. power controllers), but can be any value
- as specified by device tree binding documentation of particular provider.
-
-Optional properties:
- - power-domains : A phandle and PM domain specifier as defined by bindings of
- the power controller specified by phandle.
- Some power domains might be powered from another power domain (or have
- other hardware specific dependencies). For representing such dependency
- a standard PM domain consumer binding is used. When provided, all domains
- created by the given provider should be subdomains of the domain
- specified by this binding. More details about power domain specifier are
- available in the next section.
-
-- domain-idle-states : A phandle of an idle-state that shall be soaked into a
- generic domain power state. The idle state definitions are
- compatible with domain-idle-state specified in [1]. phandles
- that are not compatible with domain-idle-state will be
- ignored.
- The domain-idle-state property reflects the idle state of this PM domain and
- not the idle states of the devices or sub-domains in the PM domain. Devices
- and sub-domains have their own idle-states independent of the parent
- domain's idle states. In the absence of this property, the domain would be
- considered as capable of being powered-on or powered-off.
-
-- operating-points-v2 : Phandles to the OPP tables of power domains provided by
- a power domain provider. If the provider provides a single power domain only
- or all the power domains provided by the provider have identical OPP tables,
- then this shall contain a single phandle. Refer to ../opp/opp.txt for more
- information.
-
-Example:
-
- power: power-controller@12340000 {
- compatible = "foo,power-controller";
- reg = <0x12340000 0x1000>;
- #power-domain-cells = <1>;
- };
-
-The node above defines a power controller that is a PM domain provider and
-expects one cell as its phandle argument.
-
-Example 2:
-
- parent: power-controller@12340000 {
- compatible = "foo,power-controller";
- reg = <0x12340000 0x1000>;
- #power-domain-cells = <1>;
- };
-
- child: power-controller@12341000 {
- compatible = "foo,power-controller";
- reg = <0x12341000 0x1000>;
- power-domains = <&parent 0>;
- #power-domain-cells = <1>;
- };
-
-The nodes above define two power controllers: 'parent' and 'child'.
-Domains created by the 'child' power controller are subdomains of '0' power
-domain provided by the 'parent' power controller.
-
-Example 3:
- parent: power-controller@12340000 {
- compatible = "foo,power-controller";
- reg = <0x12340000 0x1000>;
- #power-domain-cells = <0>;
- domain-idle-states = <&DOMAIN_RET>, <&DOMAIN_PWR_DN>;
- };
-
- child: power-controller@12341000 {
- compatible = "foo,power-controller";
- reg = <0x12341000 0x1000>;
- power-domains = <&parent>;
- #power-domain-cells = <0>;
- domain-idle-states = <&DOMAIN_PWR_DN>;
- };
-
- DOMAIN_RET: state@0 {
- compatible = "domain-idle-state";
- reg = <0x0>;
- entry-latency-us = <1000>;
- exit-latency-us = <2000>;
- min-residency-us = <10000>;
- };
-
- DOMAIN_PWR_DN: state@1 {
- compatible = "domain-idle-state";
- reg = <0x1>;
- entry-latency-us = <5000>;
- exit-latency-us = <8000>;
- min-residency-us = <7000>;
- };
+See power-domain.yaml.
==PM domain consumers==
diff --git a/Documentation/devicetree/bindings/power/renesas,sysc-rmobile.txt b/Documentation/devicetree/bindings/power/renesas,sysc-rmobile.txt
index beda7d2efc30..49aba15dff8b 100644
--- a/Documentation/devicetree/bindings/power/renesas,sysc-rmobile.txt
+++ b/Documentation/devicetree/bindings/power/renesas,sysc-rmobile.txt
@@ -29,7 +29,7 @@ Optional nodes:
Each of the PM domain nodes represents a PM domain, as documented by the
generic PM domain bindings in
-Documentation/devicetree/bindings/power/power_domain.txt.
+Documentation/devicetree/bindings/power/power-domain.yaml.
The nodes should be named by the real power area names, and thus their names
should be unique.
diff --git a/Documentation/devicetree/bindings/power/reset/syscon-poweroff.txt b/Documentation/devicetree/bindings/power/reset/syscon-poweroff.txt
deleted file mode 100644
index 022ed1f3bc80..000000000000
--- a/Documentation/devicetree/bindings/power/reset/syscon-poweroff.txt
+++ /dev/null
@@ -1,30 +0,0 @@
-Generic SYSCON mapped register poweroff driver
-
-This is a generic poweroff driver using syscon to map the poweroff register.
-The poweroff is generally performed with a write to the poweroff register
-defined by the register map pointed by syscon reference plus the offset
-with the value and mask defined in the poweroff node.
-
-Required properties:
-- compatible: should contain "syscon-poweroff"
-- regmap: this is phandle to the register map node
-- offset: offset in the register map for the poweroff register (in bytes)
-- value: the poweroff value written to the poweroff register (32 bit access)
-
-Optional properties:
-- mask: update only the register bits defined by the mask (32 bit)
-
-Legacy usage:
-If a node doesn't contain a value property but contains a mask property, the
-mask property is used as the value.
-
-Default will be little endian mode, 32 bit access only.
-
-Examples:
-
- poweroff {
- compatible = "syscon-poweroff";
- regmap = <&regmapnode>;
- offset = <0x0>;
- mask = <0x7a>;
- };
diff --git a/Documentation/devicetree/bindings/power/reset/syscon-poweroff.yaml b/Documentation/devicetree/bindings/power/reset/syscon-poweroff.yaml
new file mode 100644
index 000000000000..520e07e6f21b
--- /dev/null
+++ b/Documentation/devicetree/bindings/power/reset/syscon-poweroff.yaml
@@ -0,0 +1,60 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/power/reset/syscon-poweroff.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Generic SYSCON mapped register poweroff driver
+
+maintainers:
+ - Sebastian Reichel <sre@kernel.org>
+
+description: |+
+ This is a generic poweroff driver using syscon to map the poweroff register.
+ The poweroff is generally performed with a write to the poweroff register
+ defined by the register map pointed by syscon reference plus the offset
+ with the value and mask defined in the poweroff node.
+ Default will be little endian mode, 32 bit access only.
+
+properties:
+ compatible:
+ const: syscon-poweroff
+
+ mask:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: Update only the register bits defined by the mask (32 bit).
+
+ offset:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: Offset in the register map for the poweroff register (in bytes).
+
+ regmap:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description: Phandle to the register map node.
+
+ value:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: The poweroff value written to the poweroff register (32 bit access).
+
+required:
+ - compatible
+ - regmap
+ - offset
+
+allOf:
+ - if:
+ not:
+ required:
+ - mask
+ then:
+ required:
+ - value
+
+examples:
+ - |
+ poweroff {
+ compatible = "syscon-poweroff";
+ regmap = <&regmapnode>;
+ offset = <0x0>;
+ mask = <0x7a>;
+ };
diff --git a/Documentation/devicetree/bindings/power/reset/syscon-reboot.txt b/Documentation/devicetree/bindings/power/reset/syscon-reboot.txt
deleted file mode 100644
index e23dea8344f8..000000000000
--- a/Documentation/devicetree/bindings/power/reset/syscon-reboot.txt
+++ /dev/null
@@ -1,30 +0,0 @@
-Generic SYSCON mapped register reset driver
-
-This is a generic reset driver using syscon to map the reset register.
-The reset is generally performed with a write to the reset register
-defined by the register map pointed by syscon reference plus the offset
-with the value and mask defined in the reboot node.
-
-Required properties:
-- compatible: should contain "syscon-reboot"
-- regmap: this is phandle to the register map node
-- offset: offset in the register map for the reboot register (in bytes)
-- value: the reset value written to the reboot register (32 bit access)
-
-Optional properties:
-- mask: update only the register bits defined by the mask (32 bit)
-
-Legacy usage:
-If a node doesn't contain a value property but contains a mask property, the
-mask property is used as the value.
-
-Default will be little endian mode, 32 bit access only.
-
-Examples:
-
- reboot {
- compatible = "syscon-reboot";
- regmap = <&regmapnode>;
- offset = <0x0>;
- mask = <0x1>;
- };
diff --git a/Documentation/devicetree/bindings/power/reset/syscon-reboot.yaml b/Documentation/devicetree/bindings/power/reset/syscon-reboot.yaml
new file mode 100644
index 000000000000..d38006b1f1f4
--- /dev/null
+++ b/Documentation/devicetree/bindings/power/reset/syscon-reboot.yaml
@@ -0,0 +1,60 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/power/reset/syscon-reboot.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Generic SYSCON mapped register reset driver
+
+maintainers:
+ - Sebastian Reichel <sre@kernel.org>
+
+description: |+
+ This is a generic reset driver using syscon to map the reset register.
+ The reset is generally performed with a write to the reset register
+ defined by the register map pointed by syscon reference plus the offset
+ with the value and mask defined in the reboot node.
+ Default will be little endian mode, 32 bit access only.
+
+properties:
+ compatible:
+ const: syscon-reboot
+
+ mask:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: Update only the register bits defined by the mask (32 bit).
+
+ offset:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: Offset in the register map for the reboot register (in bytes).
+
+ regmap:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description: Phandle to the register map node.
+
+ value:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: The reset value written to the reboot register (32 bit access).
+
+required:
+ - compatible
+ - regmap
+ - offset
+
+allOf:
+ - if:
+ not:
+ required:
+ - mask
+ then:
+ required:
+ - value
+
+examples:
+ - |
+ reboot {
+ compatible = "syscon-reboot";
+ regmap = <&regmapnode>;
+ offset = <0x0>;
+ mask = <0x1>;
+ };
diff --git a/Documentation/devicetree/bindings/power/supply/cpcap-charger.txt b/Documentation/devicetree/bindings/power/supply/cpcap-charger.txt
index 80bd873c3b1d..6048f636783f 100644
--- a/Documentation/devicetree/bindings/power/supply/cpcap-charger.txt
+++ b/Documentation/devicetree/bindings/power/supply/cpcap-charger.txt
@@ -5,7 +5,8 @@ Required properties:
- interrupts: Interrupt specifier for each name in interrupt-names
- interrupt-names: Should contain the following entries:
"chrg_det", "rvrs_chrg", "chrg_se1b", "se0conn",
- "rvrs_mode", "chrgcurr1", "vbusvld", "battdetb"
+ "rvrs_mode", "chrgcurr2", "chrgcurr1", "vbusvld",
+ "battdetb"
- io-channels: IIO ADC channel specifier for each name in io-channel-names
- io-channel-names: Should contain the following entries:
"battdetb", "battp", "vbus", "chg_isense", "batti"
@@ -21,11 +22,13 @@ cpcap_charger: charger {
compatible = "motorola,mapphone-cpcap-charger";
interrupts-extended = <
&cpcap 13 0 &cpcap 12 0 &cpcap 29 0 &cpcap 28 0
- &cpcap 22 0 &cpcap 20 0 &cpcap 19 0 &cpcap 54 0
+ &cpcap 22 0 &cpcap 21 0 &cpcap 20 0 &cpcap 19 0
+ &cpcap 54 0
>;
interrupt-names =
"chrg_det", "rvrs_chrg", "chrg_se1b", "se0conn",
- "rvrs_mode", "chrgcurr1", "vbusvld", "battdetb";
+ "rvrs_mode", "chrgcurr2", "chrgcurr1", "vbusvld",
+ "battdetb";
mode-gpios = <&gpio3 29 GPIO_ACTIVE_LOW
&gpio3 23 GPIO_ACTIVE_LOW>;
io-channels = <&cpcap_adc 0 &cpcap_adc 1
diff --git a/Documentation/devicetree/bindings/power/supply/max77650-charger.txt b/Documentation/devicetree/bindings/power/supply/max77650-charger.txt
deleted file mode 100644
index e6d0fb6ff94e..000000000000
--- a/Documentation/devicetree/bindings/power/supply/max77650-charger.txt
+++ /dev/null
@@ -1,28 +0,0 @@
-Battery charger driver for MAX77650 PMIC from Maxim Integrated.
-
-This module is part of the MAX77650 MFD device. For more details
-see Documentation/devicetree/bindings/mfd/max77650.txt.
-
-The charger is represented as a sub-node of the PMIC node on the device tree.
-
-Required properties:
---------------------
-- compatible: Must be "maxim,max77650-charger"
-
-Optional properties:
---------------------
-- input-voltage-min-microvolt: Minimum CHGIN regulation voltage. Must be one
- of: 4000000, 4100000, 4200000, 4300000,
- 4400000, 4500000, 4600000, 4700000.
-- input-current-limit-microamp: CHGIN input current limit (in microamps). Must
- be one of: 95000, 190000, 285000, 380000,
- 475000.
-
-Example:
---------
-
- charger {
- compatible = "maxim,max77650-charger";
- input-voltage-min-microvolt = <4200000>;
- input-current-limit-microamp = <285000>;
- };
diff --git a/Documentation/devicetree/bindings/power/supply/max77650-charger.yaml b/Documentation/devicetree/bindings/power/supply/max77650-charger.yaml
new file mode 100644
index 000000000000..deef010ec535
--- /dev/null
+++ b/Documentation/devicetree/bindings/power/supply/max77650-charger.yaml
@@ -0,0 +1,34 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/power/supply/max77650-charger.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Battery charger driver for MAX77650 PMIC from Maxim Integrated.
+
+maintainers:
+ - Bartosz Golaszewski <bgolaszewski@baylibre.com>
+
+description: |
+ This module is part of the MAX77650 MFD device. For more details
+ see Documentation/devicetree/bindings/mfd/max77650.yaml.
+
+ The charger is represented as a sub-node of the PMIC node on the device tree.
+
+properties:
+ compatible:
+ const: maxim,max77650-charger
+
+ input-voltage-min-microvolt:
+ description:
+ Minimum CHGIN regulation voltage.
+ enum: [ 4000000, 4100000, 4200000, 4300000,
+ 4400000, 4500000, 4600000, 4700000 ]
+
+ input-current-limit-microamp:
+ description:
+ CHGIN input current limit (in microamps).
+ enum: [ 95000, 190000, 285000, 380000, 475000 ]
+
+required:
+ - compatible
diff --git a/Documentation/devicetree/bindings/power/xlnx,zynqmp-genpd.txt b/Documentation/devicetree/bindings/power/xlnx,zynqmp-genpd.txt
index 8d1b8200ebd0..54b9f9d0f90f 100644
--- a/Documentation/devicetree/bindings/power/xlnx,zynqmp-genpd.txt
+++ b/Documentation/devicetree/bindings/power/xlnx,zynqmp-genpd.txt
@@ -4,7 +4,7 @@ Device Tree Bindings for the Xilinx Zynq MPSoC PM domains
The binding for zynqmp-power-controller follow the common
generic PM domain binding[1].
-[1] Documentation/devicetree/bindings/power/power_domain.txt
+[1] Documentation/devicetree/bindings/power/power-domain.yaml
== Zynq MPSoC Generic PM Domain Node ==
diff --git a/Documentation/devicetree/bindings/pwm/atmel-hlcdc-pwm.txt b/Documentation/devicetree/bindings/pwm/atmel-hlcdc-pwm.txt
index cfda0d57d302..afa501bf7f94 100644
--- a/Documentation/devicetree/bindings/pwm/atmel-hlcdc-pwm.txt
+++ b/Documentation/devicetree/bindings/pwm/atmel-hlcdc-pwm.txt
@@ -10,7 +10,7 @@ Required properties:
- pinctrl-0: should contain the pinctrl states described by pinctrl
default.
- #pwm-cells: should be set to 3. This PWM chip use the default 3 cells
- bindings defined in pwm.txt in this directory.
+ bindings defined in pwm.yaml in this directory.
Example:
diff --git a/Documentation/devicetree/bindings/pwm/atmel-pwm.txt b/Documentation/devicetree/bindings/pwm/atmel-pwm.txt
index 591ecdd39c7b..fbb5325be1f0 100644
--- a/Documentation/devicetree/bindings/pwm/atmel-pwm.txt
+++ b/Documentation/devicetree/bindings/pwm/atmel-pwm.txt
@@ -7,7 +7,7 @@ Required properties:
- "atmel,sama5d2-pwm"
- "microchip,sam9x60-pwm"
- reg: physical base address and length of the controller's registers
- - #pwm-cells: Should be 3. See pwm.txt in this directory for a
+ - #pwm-cells: Should be 3. See pwm.yaml in this directory for a
description of the cells format.
Example:
diff --git a/Documentation/devicetree/bindings/pwm/atmel-tcb-pwm.txt b/Documentation/devicetree/bindings/pwm/atmel-tcb-pwm.txt
index 8031148bcf85..985fcc65f8c4 100644
--- a/Documentation/devicetree/bindings/pwm/atmel-tcb-pwm.txt
+++ b/Documentation/devicetree/bindings/pwm/atmel-tcb-pwm.txt
@@ -2,7 +2,7 @@ Atmel TCB PWM controller
Required properties:
- compatible: should be "atmel,tcb-pwm"
-- #pwm-cells: should be 3. See pwm.txt in this directory for a description of
+- #pwm-cells: should be 3. See pwm.yaml in this directory for a description of
the cells format. The only third cell flag supported by this binding is
PWM_POLARITY_INVERTED.
- tc-block: The Timer Counter block to use as a PWM chip.
diff --git a/Documentation/devicetree/bindings/pwm/brcm,bcm7038-pwm.txt b/Documentation/devicetree/bindings/pwm/brcm,bcm7038-pwm.txt
index d9254a6da5ed..0e662d7f6bd1 100644
--- a/Documentation/devicetree/bindings/pwm/brcm,bcm7038-pwm.txt
+++ b/Documentation/devicetree/bindings/pwm/brcm,bcm7038-pwm.txt
@@ -4,7 +4,7 @@ Required properties:
- compatible: must be "brcm,bcm7038-pwm"
- reg: physical base address and length for this controller
-- #pwm-cells: should be 2. See pwm.txt in this directory for a description
+- #pwm-cells: should be 2. See pwm.yaml in this directory for a description
of the cells format
- clocks: a phandle to the reference clock for this block which is fed through
its internal variable clock frequency generator
diff --git a/Documentation/devicetree/bindings/pwm/brcm,iproc-pwm.txt b/Documentation/devicetree/bindings/pwm/brcm,iproc-pwm.txt
index 21f75bbd6dae..655f6cd4ef46 100644
--- a/Documentation/devicetree/bindings/pwm/brcm,iproc-pwm.txt
+++ b/Documentation/devicetree/bindings/pwm/brcm,iproc-pwm.txt
@@ -6,7 +6,7 @@ Required Properties :
- compatible: must be "brcm,iproc-pwm"
- reg: physical base address and length of the controller's registers
- clocks: phandle + clock specifier pair for the external clock
-- #pwm-cells: Should be 3. See pwm.txt in this directory for a
+- #pwm-cells: Should be 3. See pwm.yaml in this directory for a
description of the cells format.
Refer to clocks/clock-bindings.txt for generic clock consumer properties.
diff --git a/Documentation/devicetree/bindings/pwm/brcm,kona-pwm.txt b/Documentation/devicetree/bindings/pwm/brcm,kona-pwm.txt
index 8eae9fe7841c..c42eecfc81ed 100644
--- a/Documentation/devicetree/bindings/pwm/brcm,kona-pwm.txt
+++ b/Documentation/devicetree/bindings/pwm/brcm,kona-pwm.txt
@@ -6,7 +6,7 @@ Required Properties :
- compatible: should contain "brcm,kona-pwm"
- reg: physical base address and length of the controller's registers
- clocks: phandle + clock specifier pair for the external clock
-- #pwm-cells: Should be 3. See pwm.txt in this directory for a
+- #pwm-cells: Should be 3. See pwm.yaml in this directory for a
description of the cells format.
Refer to clocks/clock-bindings.txt for generic clock consumer properties.
diff --git a/Documentation/devicetree/bindings/pwm/img-pwm.txt b/Documentation/devicetree/bindings/pwm/img-pwm.txt
index fade5f26fcac..9db6de97317d 100644
--- a/Documentation/devicetree/bindings/pwm/img-pwm.txt
+++ b/Documentation/devicetree/bindings/pwm/img-pwm.txt
@@ -8,7 +8,7 @@ Required properties:
- clock-names: Must include the following entries.
- pwm: PWM operating clock.
- sys: PWM system interface clock.
- - #pwm-cells: Should be 2. See pwm.txt in this directory for the
+ - #pwm-cells: Should be 2. See pwm.yaml in this directory for the
description of the cells format.
- img,cr-periph: Must contain a phandle to the peripheral control
syscon node which contains PWM control registers.
diff --git a/Documentation/devicetree/bindings/pwm/imx-pwm.txt b/Documentation/devicetree/bindings/pwm/imx-pwm.txt
index c61bdf8cd41b..22f1c3d8b773 100644
--- a/Documentation/devicetree/bindings/pwm/imx-pwm.txt
+++ b/Documentation/devicetree/bindings/pwm/imx-pwm.txt
@@ -6,7 +6,7 @@ Required properties:
- "fsl,imx1-pwm" for PWM compatible with the one integrated on i.MX1
- "fsl,imx27-pwm" for PWM compatible with the one integrated on i.MX27
- reg: physical base address and length of the controller's registers
-- #pwm-cells: 2 for i.MX1 and 3 for i.MX27 and newer SoCs. See pwm.txt
+- #pwm-cells: 2 for i.MX1 and 3 for i.MX27 and newer SoCs. See pwm.yaml
in this directory for a description of the cells format.
- clocks : Clock specifiers for both ipg and per clocks.
- clock-names : Clock names should include both "ipg" and "per"
diff --git a/Documentation/devicetree/bindings/pwm/imx-tpm-pwm.txt b/Documentation/devicetree/bindings/pwm/imx-tpm-pwm.txt
index 3ba958d764ff..5bf20950a24e 100644
--- a/Documentation/devicetree/bindings/pwm/imx-tpm-pwm.txt
+++ b/Documentation/devicetree/bindings/pwm/imx-tpm-pwm.txt
@@ -3,7 +3,7 @@ Freescale i.MX TPM PWM controller
Required properties:
- compatible : Should be "fsl,imx7ulp-pwm".
- reg: Physical base address and length of the controller's registers.
-- #pwm-cells: Should be 3. See pwm.txt in this directory for a description of the cells format.
+- #pwm-cells: Should be 3. See pwm.yaml in this directory for a description of the cells format.
- clocks : The clock provided by the SoC to drive the PWM.
- interrupts: The interrupt for the PWM controller.
diff --git a/Documentation/devicetree/bindings/pwm/lpc1850-sct-pwm.txt b/Documentation/devicetree/bindings/pwm/lpc1850-sct-pwm.txt
index 36e49d4325cd..43d9f4f08a2e 100644
--- a/Documentation/devicetree/bindings/pwm/lpc1850-sct-pwm.txt
+++ b/Documentation/devicetree/bindings/pwm/lpc1850-sct-pwm.txt
@@ -7,7 +7,7 @@ Required properties:
See ../clock/clock-bindings.txt for details.
- clock-names: Must include the following entries.
- pwm: PWM operating clock.
- - #pwm-cells: Should be 3. See pwm.txt in this directory for the description
+ - #pwm-cells: Should be 3. See pwm.yaml in this directory for the description
of the cells format.
Example:
diff --git a/Documentation/devicetree/bindings/pwm/mxs-pwm.txt b/Documentation/devicetree/bindings/pwm/mxs-pwm.txt
index 96cdde5f6208..1b06f86a7091 100644
--- a/Documentation/devicetree/bindings/pwm/mxs-pwm.txt
+++ b/Documentation/devicetree/bindings/pwm/mxs-pwm.txt
@@ -3,7 +3,7 @@ Freescale MXS PWM controller
Required properties:
- compatible: should be "fsl,imx23-pwm"
- reg: physical base address and length of the controller's registers
-- #pwm-cells: should be 2. See pwm.txt in this directory for a description of
+- #pwm-cells: should be 2. See pwm.yaml in this directory for a description of
the cells format.
- fsl,pwm-number: the number of PWM devices
diff --git a/Documentation/devicetree/bindings/pwm/nvidia,tegra20-pwm.txt b/Documentation/devicetree/bindings/pwm/nvidia,tegra20-pwm.txt
index c57e11b8d937..0a69eadf44ce 100644
--- a/Documentation/devicetree/bindings/pwm/nvidia,tegra20-pwm.txt
+++ b/Documentation/devicetree/bindings/pwm/nvidia,tegra20-pwm.txt
@@ -10,7 +10,7 @@ Required properties:
- "nvidia,tegra210-pwm", "nvidia,tegra20-pwm": for Tegra210
- "nvidia,tegra186-pwm": for Tegra186
- reg: physical base address and length of the controller's registers
-- #pwm-cells: should be 2. See pwm.txt in this directory for a description of
+- #pwm-cells: should be 2. See pwm.yaml in this directory for a description of
the cells format.
- clocks: Must contain one entry, for the module clock.
See ../clocks/clock-bindings.txt for details.
diff --git a/Documentation/devicetree/bindings/pwm/nxp,pca9685-pwm.txt b/Documentation/devicetree/bindings/pwm/nxp,pca9685-pwm.txt
index f84ec9d291ea..f21b55c95738 100644
--- a/Documentation/devicetree/bindings/pwm/nxp,pca9685-pwm.txt
+++ b/Documentation/devicetree/bindings/pwm/nxp,pca9685-pwm.txt
@@ -3,7 +3,7 @@ NXP PCA9685 16-channel 12-bit PWM LED controller
Required properties:
- compatible: "nxp,pca9685-pwm"
- - #pwm-cells: Should be 2. See pwm.txt in this directory for a description of
+ - #pwm-cells: Should be 2. See pwm.yaml in this directory for a description of
the cells format.
The index 16 is the ALLCALL channel, that sets all PWM channels at the same
time.
diff --git a/Documentation/devicetree/bindings/pwm/pwm-bcm2835.txt b/Documentation/devicetree/bindings/pwm/pwm-bcm2835.txt
index 8cf87d1bfca5..f5753b3f79df 100644
--- a/Documentation/devicetree/bindings/pwm/pwm-bcm2835.txt
+++ b/Documentation/devicetree/bindings/pwm/pwm-bcm2835.txt
@@ -6,7 +6,7 @@ Required properties:
- clocks: This clock defines the base clock frequency of the PWM hardware
system, the period and the duty_cycle of the PWM signal is a multiple of
the base period.
-- #pwm-cells: Should be 3. See pwm.txt in this directory for a description of
+- #pwm-cells: Should be 3. See pwm.yaml in this directory for a description of
the cells format.
Examples:
diff --git a/Documentation/devicetree/bindings/pwm/pwm-berlin.txt b/Documentation/devicetree/bindings/pwm/pwm-berlin.txt
index 82cbe16fcbbc..f01e993a498a 100644
--- a/Documentation/devicetree/bindings/pwm/pwm-berlin.txt
+++ b/Documentation/devicetree/bindings/pwm/pwm-berlin.txt
@@ -4,7 +4,7 @@ Required properties:
- compatible: should be "marvell,berlin-pwm"
- reg: physical base address and length of the controller's registers
- clocks: phandle to the input clock
-- #pwm-cells: should be 3. See pwm.txt in this directory for a description of
+- #pwm-cells: should be 3. See pwm.yaml in this directory for a description of
the cells format.
Example:
diff --git a/Documentation/devicetree/bindings/pwm/pwm-fsl-ftm.txt b/Documentation/devicetree/bindings/pwm/pwm-fsl-ftm.txt
index 576ad002bc83..36532cd5ab25 100644
--- a/Documentation/devicetree/bindings/pwm/pwm-fsl-ftm.txt
+++ b/Documentation/devicetree/bindings/pwm/pwm-fsl-ftm.txt
@@ -21,7 +21,7 @@ Required properties:
- "fsl,vf610-ftm-pwm" for PWM compatible with the one integrated on VF610
- "fsl,imx8qm-ftm-pwm" for PWM compatible with the one integrated on i.MX8QM
- reg: Physical base address and length of the controller's registers
-- #pwm-cells: Should be 3. See pwm.txt in this directory for a description of
+- #pwm-cells: Should be 3. See pwm.yaml in this directory for a description of
the cells format.
- clock-names: Should include the following module clock source entries:
"ftm_sys" (module clock, also can be used as counter clock),
diff --git a/Documentation/devicetree/bindings/pwm/pwm-hibvt.txt b/Documentation/devicetree/bindings/pwm/pwm-hibvt.txt
index daedfef09bb6..54dbc2a0e648 100644
--- a/Documentation/devicetree/bindings/pwm/pwm-hibvt.txt
+++ b/Documentation/devicetree/bindings/pwm/pwm-hibvt.txt
@@ -10,7 +10,7 @@ Required properties:
- reg: physical base address and length of the controller's registers.
- clocks: phandle and clock specifier of the PWM reference clock.
- resets: phandle and reset specifier for the PWM controller reset.
-- #pwm-cells: Should be 3. See pwm.txt in this directory for a description of
+- #pwm-cells: Should be 3. See pwm.yaml in this directory for a description of
the cells format.
Example:
diff --git a/Documentation/devicetree/bindings/pwm/pwm-lp3943.txt b/Documentation/devicetree/bindings/pwm/pwm-lp3943.txt
index 7bd9d3b12ce1..f214305a8f5e 100644
--- a/Documentation/devicetree/bindings/pwm/pwm-lp3943.txt
+++ b/Documentation/devicetree/bindings/pwm/pwm-lp3943.txt
@@ -2,7 +2,7 @@ TI/National Semiconductor LP3943 PWM controller
Required properties:
- compatible: "ti,lp3943-pwm"
- - #pwm-cells: Should be 2. See pwm.txt in this directory for a
+ - #pwm-cells: Should be 2. See pwm.yaml in this directory for a
description of the cells format.
Note that this hardware limits the period length to the
range 6250~1600000.
diff --git a/Documentation/devicetree/bindings/pwm/pwm-mediatek.txt b/Documentation/devicetree/bindings/pwm/pwm-mediatek.txt
index c8501530173c..69cae11d80a6 100644
--- a/Documentation/devicetree/bindings/pwm/pwm-mediatek.txt
+++ b/Documentation/devicetree/bindings/pwm/pwm-mediatek.txt
@@ -9,7 +9,7 @@ Required properties:
- "mediatek,mt7629-pwm", "mediatek,mt7622-pwm": found on mt7629 SoC.
- "mediatek,mt8516-pwm": found on mt8516 SoC.
- reg: physical base address and length of the controller's registers.
- - #pwm-cells: must be 2. See pwm.txt in this directory for a description of
+ - #pwm-cells: must be 2. See pwm.yaml in this directory for a description of
the cell format.
- clocks: phandle and clock specifier of the PWM reference clock.
- clock-names: must contain the following, except for MT7628 which
diff --git a/Documentation/devicetree/bindings/pwm/pwm-meson.txt b/Documentation/devicetree/bindings/pwm/pwm-meson.txt
index 891632354065..bd02b0a1496f 100644
--- a/Documentation/devicetree/bindings/pwm/pwm-meson.txt
+++ b/Documentation/devicetree/bindings/pwm/pwm-meson.txt
@@ -10,7 +10,7 @@ Required properties:
or "amlogic,meson-g12a-ee-pwm"
or "amlogic,meson-g12a-ao-pwm-ab"
or "amlogic,meson-g12a-ao-pwm-cd"
-- #pwm-cells: Should be 3. See pwm.txt in this directory for a description of
+- #pwm-cells: Should be 3. See pwm.yaml in this directory for a description of
the cells format.
Optional properties:
diff --git a/Documentation/devicetree/bindings/pwm/pwm-mtk-disp.txt b/Documentation/devicetree/bindings/pwm/pwm-mtk-disp.txt
index 6f8af2bcc7b7..0521957c253f 100644
--- a/Documentation/devicetree/bindings/pwm/pwm-mtk-disp.txt
+++ b/Documentation/devicetree/bindings/pwm/pwm-mtk-disp.txt
@@ -6,7 +6,7 @@ Required properties:
- "mediatek,mt6595-disp-pwm": found on mt6595 SoC.
- "mediatek,mt8173-disp-pwm": found on mt8173 SoC.
- reg: physical base address and length of the controller's registers.
- - #pwm-cells: must be 2. See pwm.txt in this directory for a description of
+ - #pwm-cells: must be 2. See pwm.yaml in this directory for a description of
the cell format.
- clocks: phandle and clock specifier of the PWM reference clock.
- clock-names: must contain the following:
diff --git a/Documentation/devicetree/bindings/pwm/pwm-omap-dmtimer.txt b/Documentation/devicetree/bindings/pwm/pwm-omap-dmtimer.txt
index 5ccfcc82da08..d722ae3be363 100644
--- a/Documentation/devicetree/bindings/pwm/pwm-omap-dmtimer.txt
+++ b/Documentation/devicetree/bindings/pwm/pwm-omap-dmtimer.txt
@@ -4,7 +4,7 @@ Required properties:
- compatible: Shall contain "ti,omap-dmtimer-pwm".
- ti,timers: phandle to PWM capable OMAP timer. See timer/ti,timer.txt for info
about these timers.
-- #pwm-cells: Should be 3. See pwm.txt in this directory for a description of
+- #pwm-cells: Should be 3. See pwm.yaml in this directory for a description of
the cells format.
Optional properties:
diff --git a/Documentation/devicetree/bindings/pwm/pwm-rockchip.txt b/Documentation/devicetree/bindings/pwm/pwm-rockchip.txt
index 2c5e52a5bede..f70956dea77b 100644
--- a/Documentation/devicetree/bindings/pwm/pwm-rockchip.txt
+++ b/Documentation/devicetree/bindings/pwm/pwm-rockchip.txt
@@ -14,7 +14,7 @@ Required properties:
- For newer hardware (rk3328 and future socs): specified by name
- "pwm": This is used to derive the functional clock.
- "pclk": This is the APB bus clock.
- - #pwm-cells: must be 2 (rk2928) or 3 (rk3288). See pwm.txt in this directory
+ - #pwm-cells: must be 2 (rk2928) or 3 (rk3288). See pwm.yaml in this directory
for a description of the cell format.
Example:
diff --git a/Documentation/devicetree/bindings/pwm/pwm-samsung.txt b/Documentation/devicetree/bindings/pwm/pwm-samsung.txt
deleted file mode 100644
index 5538de9c2007..000000000000
--- a/Documentation/devicetree/bindings/pwm/pwm-samsung.txt
+++ /dev/null
@@ -1,51 +0,0 @@
-* Samsung PWM timers
-
-Samsung SoCs contain PWM timer blocks which can be used for system clock source
-and clock event timers, as well as to drive SoC outputs with PWM signal. Each
-PWM timer block provides 5 PWM channels (not all of them can drive physical
-outputs - see SoC and board manual).
-
-Be aware that the clocksource driver supports only uniprocessor systems.
-
-Required properties:
-- compatible : should be one of following:
- samsung,s3c2410-pwm - for 16-bit timers present on S3C24xx SoCs
- samsung,s3c6400-pwm - for 32-bit timers present on S3C64xx SoCs
- samsung,s5p6440-pwm - for 32-bit timers present on S5P64x0 SoCs
- samsung,s5pc100-pwm - for 32-bit timers present on S5PC100, S5PV210,
- Exynos4210 rev0 SoCs
- samsung,exynos4210-pwm - for 32-bit timers present on Exynos4210,
- Exynos4x12, Exynos5250 and Exynos5420 SoCs
-- reg: base address and size of register area
-- interrupts: list of timer interrupts (one interrupt per timer, starting at
- timer 0)
-- clock-names: should contain all following required clock names:
- - "timers" - PWM base clock used to generate PWM signals,
- and any subset of following optional clock names:
- - "pwm-tclk0" - first external PWM clock source,
- - "pwm-tclk1" - second external PWM clock source.
- Note that not all IP variants allow using all external clock sources.
- Refer to SoC documentation to learn which clock source configurations
- are available.
-- clocks: should contain clock specifiers of all clocks, which input names
- have been specified in clock-names property, in same order.
-- #pwm-cells: should be 3. See pwm.txt in this directory for a description of
- the cells format. The only third cell flag supported by this binding is
- PWM_POLARITY_INVERTED.
-
-Optional properties:
-- samsung,pwm-outputs: list of PWM channels used as PWM outputs on particular
- platform - an array of up to 5 elements being indices of PWM channels
- (from 0 to 4), the order does not matter.
-
-Example:
- pwm@7f006000 {
- compatible = "samsung,s3c6400-pwm";
- reg = <0x7f006000 0x1000>;
- interrupt-parent = <&vic0>;
- interrupts = <23>, <24>, <25>, <27>, <28>;
- clocks = <&clock 67>;
- clock-names = "timers";
- samsung,pwm-outputs = <0>, <1>;
- #pwm-cells = <3>;
- }
diff --git a/Documentation/devicetree/bindings/pwm/pwm-samsung.yaml b/Documentation/devicetree/bindings/pwm/pwm-samsung.yaml
new file mode 100644
index 000000000000..ea7f32905172
--- /dev/null
+++ b/Documentation/devicetree/bindings/pwm/pwm-samsung.yaml
@@ -0,0 +1,109 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pwm/pwm-samsung.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Samsung SoC PWM timers
+
+maintainers:
+ - Thierry Reding <thierry.reding@gmail.com>
+ - Krzysztof Kozlowski <krzk@kernel.org>
+
+description: |+
+ Samsung SoCs contain PWM timer blocks which can be used for system clock source
+ and clock event timers, as well as to drive SoC outputs with PWM signal. Each
+ PWM timer block provides 5 PWM channels (not all of them can drive physical
+ outputs - see SoC and board manual).
+
+ Be aware that the clocksource driver supports only uniprocessor systems.
+
+allOf:
+ - $ref: pwm.yaml#
+
+properties:
+ compatible:
+ enum:
+ - samsung,s3c2410-pwm # 16-bit, S3C24xx
+ - samsung,s3c6400-pwm # 32-bit, S3C64xx
+ - samsung,s5p6440-pwm # 32-bit, S5P64x0
+ - samsung,s5pc100-pwm # 32-bit, S5PC100, S5PV210, Exynos4210 rev0 SoCs
+ - samsung,exynos4210-pwm # 32-bit, Exynos
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ minItems: 1
+ maxItems: 3
+
+ clock-names:
+ description: |
+ Should contain all following required clock names:
+ - "timers" - PWM base clock used to generate PWM signals,
+ and any subset of following optional clock names:
+ - "pwm-tclk0" - first external PWM clock source,
+ - "pwm-tclk1" - second external PWM clock source.
+ Note that not all IP variants allow using all external clock sources.
+ Refer to SoC documentation to learn which clock source configurations
+ are available.
+ oneOf:
+ - items:
+ - const: timers
+ - items:
+ - const: timers
+ - const: pwm-tclk0
+ - items:
+ - const: timers
+ - const: pwm-tclk1
+ - items:
+ - const: timers
+ - const: pwm-tclk0
+ - const: pwm-tclk1
+
+ interrupts:
+ description:
+ One interrupt per timer, starting at timer 0.
+ minItems: 1
+ maxItems: 5
+
+ "#pwm-cells":
+ description:
+ The only third cell flag supported by this binding
+ is PWM_POLARITY_INVERTED.
+ const: 3
+
+ samsung,pwm-outputs:
+ description:
+ A list of PWM channels used as PWM outputs on particular platform.
+ It is an array of up to 5 elements being indices of PWM channels
+ (from 0 to 4), the order does not matter.
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32-array
+ - uniqueItems: true
+ - items:
+ minimum: 0
+ maximum: 4
+
+required:
+ - clocks
+ - clock-names
+ - compatible
+ - interrupts
+ - "#pwm-cells"
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ pwm@7f006000 {
+ compatible = "samsung,s3c6400-pwm";
+ reg = <0x7f006000 0x1000>;
+ interrupt-parent = <&vic0>;
+ interrupts = <23>, <24>, <25>, <27>, <28>;
+ clocks = <&clock 67>;
+ clock-names = "timers";
+ samsung,pwm-outputs = <0>, <1>;
+ #pwm-cells = <3>;
+ };
diff --git a/Documentation/devicetree/bindings/pwm/pwm-sifive.txt b/Documentation/devicetree/bindings/pwm/pwm-sifive.txt
index 36447e3c9378..3d1dd7b06efc 100644
--- a/Documentation/devicetree/bindings/pwm/pwm-sifive.txt
+++ b/Documentation/devicetree/bindings/pwm/pwm-sifive.txt
@@ -17,7 +17,7 @@ Required properties:
Please refer to sifive-blocks-ip-versioning.txt for details.
- reg: physical base address and length of the controller's registers
- clocks: Should contain a clock identifier for the PWM's parent clock.
-- #pwm-cells: Should be 3. See pwm.txt in this directory
+- #pwm-cells: Should be 3. See pwm.yaml in this directory
for a description of the cell format.
- interrupts: one interrupt per PWM channel
diff --git a/Documentation/devicetree/bindings/pwm/pwm-sprd.txt b/Documentation/devicetree/bindings/pwm/pwm-sprd.txt
index 16fa5a096206..87b206fd0618 100644
--- a/Documentation/devicetree/bindings/pwm/pwm-sprd.txt
+++ b/Documentation/devicetree/bindings/pwm/pwm-sprd.txt
@@ -9,7 +9,7 @@ Required properties:
- clock-names: Should contain following entries:
"pwmn": used to derive the functional clock for PWM channel n (n range: 0 ~ 3).
"enablen": for PWM channel n enable clock (n range: 0 ~ 3).
-- #pwm-cells: Should be 2. See pwm.txt in this directory for a description of
+- #pwm-cells: Should be 2. See pwm.yaml in this directory for a description of
the cells format.
Optional properties:
diff --git a/Documentation/devicetree/bindings/pwm/pwm-stm32-lp.txt b/Documentation/devicetree/bindings/pwm/pwm-stm32-lp.txt
deleted file mode 100644
index 6521bc44a74e..000000000000
--- a/Documentation/devicetree/bindings/pwm/pwm-stm32-lp.txt
+++ /dev/null
@@ -1,30 +0,0 @@
-STMicroelectronics STM32 Low-Power Timer PWM
-
-STM32 Low-Power Timer provides single channel PWM.
-
-Must be a sub-node of an STM32 Low-Power Timer device tree node.
-See ../mfd/stm32-lptimer.txt for details about the parent node.
-
-Required parameters:
-- compatible: Must be "st,stm32-pwm-lp".
-- #pwm-cells: Should be set to 3. This PWM chip uses the default 3 cells
- bindings defined in pwm.txt.
-
-Optional properties:
-- pinctrl-names: Set to "default". An additional "sleep" state can be
- defined to set pins in sleep state when in low power.
-- pinctrl-n: Phandle(s) pointing to pin configuration node for PWM,
- respectively for "default" and "sleep" states.
-
-Example:
- timer@40002400 {
- compatible = "st,stm32-lptimer";
- ...
- pwm {
- compatible = "st,stm32-pwm-lp";
- #pwm-cells = <3>;
- pinctrl-names = "default", "sleep";
- pinctrl-0 = <&lppwm1_pins>;
- pinctrl-1 = <&lppwm1_sleep_pins>;
- };
- };
diff --git a/Documentation/devicetree/bindings/pwm/pwm-stm32.txt b/Documentation/devicetree/bindings/pwm/pwm-stm32.txt
deleted file mode 100644
index a8690bfa5e1f..000000000000
--- a/Documentation/devicetree/bindings/pwm/pwm-stm32.txt
+++ /dev/null
@@ -1,38 +0,0 @@
-STMicroelectronics STM32 Timers PWM bindings
-
-Must be a sub-node of an STM32 Timers device tree node.
-See ../mfd/stm32-timers.txt for details about the parent node.
-
-Required parameters:
-- compatible: Must be "st,stm32-pwm".
-- pinctrl-names: Set to "default".
-- pinctrl-0: List of phandles pointing to pin configuration nodes for PWM module.
- For Pinctrl properties see ../pinctrl/pinctrl-bindings.txt
-- #pwm-cells: Should be set to 3. This PWM chip uses the default 3 cells
- bindings defined in pwm.txt.
-
-Optional parameters:
-- st,breakinput: One or two <index level filter> to describe break input configurations.
- "index" indicates on which break input (0 or 1) the configuration
- should be applied.
- "level" gives the active level (0=low or 1=high) of the input signal
- for this configuration.
- "filter" gives the filtering value to be applied.
-
-Example:
- timers@40010000 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "st,stm32-timers";
- reg = <0x40010000 0x400>;
- clocks = <&rcc 0 160>;
- clock-names = "int";
-
- pwm {
- compatible = "st,stm32-pwm";
- #pwm-cells = <3>;
- pinctrl-0 = <&pwm1_pins>;
- pinctrl-names = "default";
- st,breakinput = <0 1 5>;
- };
- };
diff --git a/Documentation/devicetree/bindings/pwm/pwm-tiecap.txt b/Documentation/devicetree/bindings/pwm/pwm-tiecap.txt
index b9a1d7402128..c7c4347a769a 100644
--- a/Documentation/devicetree/bindings/pwm/pwm-tiecap.txt
+++ b/Documentation/devicetree/bindings/pwm/pwm-tiecap.txt
@@ -8,7 +8,7 @@ Required properties:
for dra746 - compatible = "ti,dra746-ecap", "ti,am3352-ecap";
for 66ak2g - compatible = "ti,k2g-ecap", "ti,am3352-ecap";
for am654 - compatible = "ti,am654-ecap", "ti,am3352-ecap";
-- #pwm-cells: should be 3. See pwm.txt in this directory for a description of
+- #pwm-cells: should be 3. See pwm.yaml in this directory for a description of
the cells format. The PWM channel index ranges from 0 to 4. The only third
cell flag supported by this binding is PWM_POLARITY_INVERTED.
- reg: physical base address and size of the registers map.
diff --git a/Documentation/devicetree/bindings/pwm/pwm-tiehrpwm.txt b/Documentation/devicetree/bindings/pwm/pwm-tiehrpwm.txt
index 31c4577157dd..c7e28f6d28be 100644
--- a/Documentation/devicetree/bindings/pwm/pwm-tiehrpwm.txt
+++ b/Documentation/devicetree/bindings/pwm/pwm-tiehrpwm.txt
@@ -7,7 +7,7 @@ Required properties:
for am654 - compatible = "ti,am654-ehrpwm", "ti-am3352-ehrpwm";
for da850 - compatible = "ti,da850-ehrpwm", "ti-am3352-ehrpwm", "ti,am33xx-ehrpwm";
for dra746 - compatible = "ti,dra746-ehrpwm", "ti-am3352-ehrpwm";
-- #pwm-cells: should be 3. See pwm.txt in this directory for a description of
+- #pwm-cells: should be 3. See pwm.yaml in this directory for a description of
the cells format. The only third cell flag supported by this binding is
PWM_POLARITY_INVERTED.
- reg: physical base address and size of the registers map.
diff --git a/Documentation/devicetree/bindings/pwm/pwm-zx.txt b/Documentation/devicetree/bindings/pwm/pwm-zx.txt
index a6bcc75c9164..3c8fe7aa8269 100644
--- a/Documentation/devicetree/bindings/pwm/pwm-zx.txt
+++ b/Documentation/devicetree/bindings/pwm/pwm-zx.txt
@@ -7,7 +7,7 @@ Required properties:
- clock-names: "pclk" for PCLK, "wclk" for WCLK to the PWM controller. The
PCLK is for register access, while WCLK is the reference clock for
calculating period and duty cycles.
- - #pwm-cells: Should be 3. See pwm.txt in this directory for a description of
+ - #pwm-cells: Should be 3. See pwm.yaml in this directory for a description of
the cells format.
Example:
diff --git a/Documentation/devicetree/bindings/pwm/pwm.txt b/Documentation/devicetree/bindings/pwm/pwm.txt
index 8556263b8502..084886bd721e 100644
--- a/Documentation/devicetree/bindings/pwm/pwm.txt
+++ b/Documentation/devicetree/bindings/pwm/pwm.txt
@@ -57,13 +57,4 @@ Example with optional PWM specifier for inverse polarity
2) PWM controller nodes
-----------------------
-PWM controller nodes must specify the number of cells used for the
-specifier using the '#pwm-cells' property.
-
-An example PWM controller might look like this:
-
- pwm: pwm@7000a000 {
- compatible = "nvidia,tegra20-pwm";
- reg = <0x7000a000 0x100>;
- #pwm-cells = <2>;
- };
+See pwm.yaml.
diff --git a/Documentation/devicetree/bindings/pwm/pwm.yaml b/Documentation/devicetree/bindings/pwm/pwm.yaml
new file mode 100644
index 000000000000..fa4f9de92090
--- /dev/null
+++ b/Documentation/devicetree/bindings/pwm/pwm.yaml
@@ -0,0 +1,29 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pwm/pwm.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: PWM controllers (providers)
+
+maintainers:
+ - Thierry Reding <thierry.reding@gmail.com>
+
+properties:
+ $nodename:
+ pattern: "^pwm(@.*|-[0-9a-f])*$"
+
+ "#pwm-cells":
+ description:
+ Number of cells in a PWM specifier.
+
+required:
+ - "#pwm-cells"
+
+examples:
+ - |
+ pwm: pwm@7000a000 {
+ compatible = "nvidia,tegra20-pwm";
+ reg = <0x7000a000 0x100>;
+ #pwm-cells = <2>;
+ };
diff --git a/Documentation/devicetree/bindings/pwm/renesas,pwm-rcar.txt b/Documentation/devicetree/bindings/pwm/renesas,pwm-rcar.txt
deleted file mode 100644
index fbd6a4f943ce..000000000000
--- a/Documentation/devicetree/bindings/pwm/renesas,pwm-rcar.txt
+++ /dev/null
@@ -1,40 +0,0 @@
-* Renesas R-Car PWM Timer Controller
-
-Required Properties:
-- compatible: should be "renesas,pwm-rcar" and one of the following.
- - "renesas,pwm-r8a7743": for RZ/G1M
- - "renesas,pwm-r8a7744": for RZ/G1N
- - "renesas,pwm-r8a7745": for RZ/G1E
- - "renesas,pwm-r8a774a1": for RZ/G2M
- - "renesas,pwm-r8a774c0": for RZ/G2E
- - "renesas,pwm-r8a7778": for R-Car M1A
- - "renesas,pwm-r8a7779": for R-Car H1
- - "renesas,pwm-r8a7790": for R-Car H2
- - "renesas,pwm-r8a7791": for R-Car M2-W
- - "renesas,pwm-r8a7794": for R-Car E2
- - "renesas,pwm-r8a7795": for R-Car H3
- - "renesas,pwm-r8a7796": for R-Car M3-W
- - "renesas,pwm-r8a77965": for R-Car M3-N
- - "renesas,pwm-r8a77970": for R-Car V3M
- - "renesas,pwm-r8a77980": for R-Car V3H
- - "renesas,pwm-r8a77990": for R-Car E3
- - "renesas,pwm-r8a77995": for R-Car D3
-- reg: base address and length of the registers block for the PWM.
-- #pwm-cells: should be 2. See pwm.txt in this directory for a description of
- the cells format.
-- clocks: clock phandle and specifier pair.
-- pinctrl-0: phandle, referring to a default pin configuration node.
-- pinctrl-names: Set to "default".
-
-Example: R8A7743 (RZ/G1M) PWM Timer node
-
- pwm0: pwm@e6e30000 {
- compatible = "renesas,pwm-r8a7743", "renesas,pwm-rcar";
- reg = <0 0xe6e30000 0 0x8>;
- clocks = <&cpg CPG_MOD 523>;
- power-domains = <&sysc R8A7743_PD_ALWAYS_ON>;
- resets = <&cpg 523>;
- #pwm-cells = <2>;
- pinctrl-0 = <&pwm0_pins>;
- pinctrl-names = "default";
- };
diff --git a/Documentation/devicetree/bindings/pwm/renesas,pwm-rcar.yaml b/Documentation/devicetree/bindings/pwm/renesas,pwm-rcar.yaml
new file mode 100644
index 000000000000..945c14e1be35
--- /dev/null
+++ b/Documentation/devicetree/bindings/pwm/renesas,pwm-rcar.yaml
@@ -0,0 +1,78 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pwm/renesas,pwm-rcar.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Renesas R-Car PWM Timer Controller
+
+maintainers:
+ - Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - renesas,pwm-r8a7743 # RZ/G1M
+ - renesas,pwm-r8a7744 # RZ/G1N
+ - renesas,pwm-r8a7745 # RZ/G1E
+ - renesas,pwm-r8a77470 # RZ/G1C
+ - renesas,pwm-r8a774a1 # RZ/G2M
+ - renesas,pwm-r8a774b1 # RZ/G2N
+ - renesas,pwm-r8a774c0 # RZ/G2E
+ - renesas,pwm-r8a7778 # R-Car M1A
+ - renesas,pwm-r8a7779 # R-Car H1
+ - renesas,pwm-r8a7790 # R-Car H2
+ - renesas,pwm-r8a7791 # R-Car M2-W
+ - renesas,pwm-r8a7794 # R-Car E2
+ - renesas,pwm-r8a7795 # R-Car H3
+ - renesas,pwm-r8a7796 # R-Car M3-W
+ - renesas,pwm-r8a77965 # R-Car M3-N
+ - renesas,pwm-r8a77970 # R-Car V3M
+ - renesas,pwm-r8a77980 # R-Car V3H
+ - renesas,pwm-r8a77990 # R-Car E3
+ - renesas,pwm-r8a77995 # R-Car D3
+ - const: renesas,pwm-rcar
+
+ reg:
+ # base address and length of the registers block for the PWM.
+ maxItems: 1
+
+ '#pwm-cells':
+ # should be 2. See pwm.yaml in this directory for a description of
+ # the cells format.
+ const: 2
+
+ clocks:
+ # clock phandle and specifier pair.
+ maxItems: 1
+
+ power-domains:
+ maxItems: 1
+
+ resets:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - '#pwm-cells'
+ - clocks
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/r8a7743-cpg-mssr.h>
+ #include <dt-bindings/power/r8a7743-sysc.h>
+
+ pwm0: pwm@e6e30000 {
+ compatible = "renesas,pwm-r8a7743", "renesas,pwm-rcar";
+ reg = <0 0xe6e30000 0 0x8>;
+ clocks = <&cpg CPG_MOD 523>;
+ power-domains = <&sysc R8A7743_PD_ALWAYS_ON>;
+ resets = <&cpg 523>;
+ #pwm-cells = <2>;
+ pinctrl-0 = <&pwm0_pins>;
+ pinctrl-names = "default";
+ };
diff --git a/Documentation/devicetree/bindings/pwm/renesas,tpu-pwm.txt b/Documentation/devicetree/bindings/pwm/renesas,tpu-pwm.txt
deleted file mode 100644
index 848a92b53d81..000000000000
--- a/Documentation/devicetree/bindings/pwm/renesas,tpu-pwm.txt
+++ /dev/null
@@ -1,35 +0,0 @@
-* Renesas R-Car Timer Pulse Unit PWM Controller
-
-Required Properties:
-
- - compatible: must contain one or more of the following:
- - "renesas,tpu-r8a73a4": for R8A73A4 (R-Mobile APE6) compatible PWM controller.
- - "renesas,tpu-r8a7740": for R8A7740 (R-Mobile A1) compatible PWM controller.
- - "renesas,tpu-r8a7743": for R8A7743 (RZ/G1M) compatible PWM controller.
- - "renesas,tpu-r8a7744": for R8A7744 (RZ/G1N) compatible PWM controller.
- - "renesas,tpu-r8a7745": for R8A7745 (RZ/G1E) compatible PWM controller.
- - "renesas,tpu-r8a7790": for R8A7790 (R-Car H2) compatible PWM controller.
- - "renesas,tpu-r8a77970": for R8A77970 (R-Car V3M) compatible PWM
- controller.
- - "renesas,tpu-r8a77980": for R8A77980 (R-Car V3H) compatible PWM
- controller.
- - "renesas,tpu": for the generic TPU PWM controller; this is a fallback for
- the entries listed above.
-
- - reg: Base address and length of each memory resource used by the PWM
- controller hardware module.
-
- - #pwm-cells: should be 3. See pwm.txt in this directory for a description of
- the cells format. The only third cell flag supported by this binding is
- PWM_POLARITY_INVERTED.
-
-Please refer to pwm.txt in this directory for details of the common PWM bindings
-used by client devices.
-
-Example: R8A7740 (R-Mobile A1) TPU controller node
-
- tpu: pwm@e6600000 {
- compatible = "renesas,tpu-r8a7740", "renesas,tpu";
- reg = <0xe6600000 0x148>;
- #pwm-cells = <3>;
- };
diff --git a/Documentation/devicetree/bindings/pwm/renesas,tpu-pwm.yaml b/Documentation/devicetree/bindings/pwm/renesas,tpu-pwm.yaml
new file mode 100644
index 000000000000..4969a954993c
--- /dev/null
+++ b/Documentation/devicetree/bindings/pwm/renesas,tpu-pwm.yaml
@@ -0,0 +1,69 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pwm/renesas,tpu-pwm.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Renesas R-Car Timer Pulse Unit PWM Controller
+
+maintainers:
+ - Laurent Pinchart <laurent.pinchart+renesas@ideasonboard.com>
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - renesas,tpu-r8a73a4 # R-Mobile APE6
+ - renesas,tpu-r8a7740 # R-Mobile A1
+ - renesas,tpu-r8a7743 # RZ/G1M
+ - renesas,tpu-r8a7744 # RZ/G1N
+ - renesas,tpu-r8a7745 # RZ/G1E
+ - renesas,tpu-r8a7790 # R-Car H2
+ - renesas,tpu-r8a7795 # R-Car H3
+ - renesas,tpu-r8a7796 # R-Car M3-W
+ - renesas,tpu-r8a77965 # R-Car M3-N
+ - renesas,tpu-r8a77970 # R-Car V3M
+ - renesas,tpu-r8a77980 # R-Car V3H
+ - const: renesas,tpu
+
+ reg:
+ # Base address and length of each memory resource used by the PWM
+ # controller hardware module.
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ '#pwm-cells':
+ # should be 3. See pwm.yaml in this directory for a description of
+ # the cells format. The only third cell flag supported by this binding is
+ # PWM_POLARITY_INVERTED.
+ const: 3
+
+ clocks:
+ maxItems: 1
+
+ power-domains:
+ maxItems: 1
+
+ resets:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - '#pwm-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/r8a7740-clock.h>
+
+ tpu: pwm@e6600000 {
+ compatible = "renesas,tpu-r8a7740", "renesas,tpu";
+ reg = <0xe6600000 0x148>;
+ clocks = <&mstp3_clks R8A7740_CLK_TPU0>;
+ power-domains = <&pd_a3sp>;
+ #pwm-cells = <3>;
+ };
diff --git a/Documentation/devicetree/bindings/pwm/spear-pwm.txt b/Documentation/devicetree/bindings/pwm/spear-pwm.txt
index b486de2c3fe3..95894128b62f 100644
--- a/Documentation/devicetree/bindings/pwm/spear-pwm.txt
+++ b/Documentation/devicetree/bindings/pwm/spear-pwm.txt
@@ -5,7 +5,7 @@ Required properties:
- "st,spear320-pwm"
- "st,spear1340-pwm"
- reg: physical base address and length of the controller's registers
-- #pwm-cells: should be 2. See pwm.txt in this directory for a description of
+- #pwm-cells: should be 2. See pwm.yaml in this directory for a description of
the cells format.
Example:
diff --git a/Documentation/devicetree/bindings/pwm/st,stmpe-pwm.txt b/Documentation/devicetree/bindings/pwm/st,stmpe-pwm.txt
index cb209646bf13..f401316e0248 100644
--- a/Documentation/devicetree/bindings/pwm/st,stmpe-pwm.txt
+++ b/Documentation/devicetree/bindings/pwm/st,stmpe-pwm.txt
@@ -7,7 +7,7 @@ subdevices of the STMPE MFD device.
Required properties:
- compatible: should be:
- "st,stmpe-pwm"
-- #pwm-cells: should be 2. See pwm.txt in this directory for a description of
+- #pwm-cells: should be 2. See pwm.yaml in this directory for a description of
the cells format.
Example:
diff --git a/Documentation/devicetree/bindings/pwm/ti,twl-pwm.txt b/Documentation/devicetree/bindings/pwm/ti,twl-pwm.txt
index 4e32bee11201..d97ca1964e94 100644
--- a/Documentation/devicetree/bindings/pwm/ti,twl-pwm.txt
+++ b/Documentation/devicetree/bindings/pwm/ti,twl-pwm.txt
@@ -6,7 +6,7 @@ On TWL6030 series: PWM0 and PWM1
Required properties:
- compatible: "ti,twl4030-pwm" or "ti,twl6030-pwm"
-- #pwm-cells: should be 2. See pwm.txt in this directory for a description of
+- #pwm-cells: should be 2. See pwm.yaml in this directory for a description of
the cells format.
Example:
diff --git a/Documentation/devicetree/bindings/pwm/ti,twl-pwmled.txt b/Documentation/devicetree/bindings/pwm/ti,twl-pwmled.txt
index 9f4b46090782..31ca1b032ef0 100644
--- a/Documentation/devicetree/bindings/pwm/ti,twl-pwmled.txt
+++ b/Documentation/devicetree/bindings/pwm/ti,twl-pwmled.txt
@@ -6,7 +6,7 @@ On TWL6030 series: LED PWM (mainly used as charging indicator LED)
Required properties:
- compatible: "ti,twl4030-pwmled" or "ti,twl6030-pwmled"
-- #pwm-cells: should be 2. See pwm.txt in this directory for a description of
+- #pwm-cells: should be 2. See pwm.yaml in this directory for a description of
the cells format.
Example:
diff --git a/Documentation/devicetree/bindings/pwm/vt8500-pwm.txt b/Documentation/devicetree/bindings/pwm/vt8500-pwm.txt
index a76390e6df2e..4fba93ce1985 100644
--- a/Documentation/devicetree/bindings/pwm/vt8500-pwm.txt
+++ b/Documentation/devicetree/bindings/pwm/vt8500-pwm.txt
@@ -3,7 +3,7 @@ VIA/Wondermedia VT8500/WM8xxx series SoC PWM controller
Required properties:
- compatible: should be "via,vt8500-pwm"
- reg: physical base address and length of the controller's registers
-- #pwm-cells: should be 3. See pwm.txt in this directory for a description of
+- #pwm-cells: should be 3. See pwm.yaml in this directory for a description of
the cells format. The only third cell flag supported by this binding is
PWM_POLARITY_INVERTED.
- clocks: phandle to the PWM source clock
diff --git a/Documentation/devicetree/bindings/regulator/fixed-regulator.yaml b/Documentation/devicetree/bindings/regulator/fixed-regulator.yaml
index 59b4b73d4051..3dbb9cf86f15 100644
--- a/Documentation/devicetree/bindings/regulator/fixed-regulator.yaml
+++ b/Documentation/devicetree/bindings/regulator/fixed-regulator.yaml
@@ -68,7 +68,6 @@ properties:
vin-supply:
description: Input supply phandle.
- $ref: /schemas/types.yaml#/definitions/phandle
required:
- compatible
diff --git a/Documentation/devicetree/bindings/regulator/max77650-regulator.txt b/Documentation/devicetree/bindings/regulator/max77650-regulator.txt
deleted file mode 100644
index f1cbe813c30f..000000000000
--- a/Documentation/devicetree/bindings/regulator/max77650-regulator.txt
+++ /dev/null
@@ -1,41 +0,0 @@
-Regulator driver for MAX77650 PMIC from Maxim Integrated.
-
-This module is part of the MAX77650 MFD device. For more details
-see Documentation/devicetree/bindings/mfd/max77650.txt.
-
-The regulator controller is represented as a sub-node of the PMIC node
-on the device tree.
-
-The device has a single LDO regulator and a SIMO buck-boost regulator with
-three independent power rails.
-
-Required properties:
---------------------
-- compatible: Must be "maxim,max77650-regulator"
-
-Each rail must be instantiated under the regulators subnode of the top PMIC
-node. Up to four regulators can be defined. For standard regulator properties
-refer to Documentation/devicetree/bindings/regulator/regulator.txt.
-
-Available regulator compatible strings are: "ldo", "sbb0", "sbb1", "sbb2".
-
-Example:
---------
-
- regulators {
- compatible = "maxim,max77650-regulator";
-
- max77650_ldo: regulator@0 {
- regulator-compatible = "ldo";
- regulator-name = "max77650-ldo";
- regulator-min-microvolt = <1350000>;
- regulator-max-microvolt = <2937500>;
- };
-
- max77650_sbb0: regulator@1 {
- regulator-compatible = "sbb0";
- regulator-name = "max77650-sbb0";
- regulator-min-microvolt = <800000>;
- regulator-max-microvolt = <1587500>;
- };
- };
diff --git a/Documentation/devicetree/bindings/regulator/max77650-regulator.yaml b/Documentation/devicetree/bindings/regulator/max77650-regulator.yaml
new file mode 100644
index 000000000000..7d724159f890
--- /dev/null
+++ b/Documentation/devicetree/bindings/regulator/max77650-regulator.yaml
@@ -0,0 +1,31 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/regulator/max77650-regulator.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Regulator driver for MAX77650 PMIC from Maxim Integrated.
+
+maintainers:
+ - Bartosz Golaszewski <bgolaszewski@baylibre.com>
+
+description: |
+ This module is part of the MAX77650 MFD device. For more details
+ see Documentation/devicetree/bindings/mfd/max77650.yaml.
+
+ The regulator controller is represented as a sub-node of the PMIC node
+ on the device tree.
+
+ The device has a single LDO regulator and a SIMO buck-boost regulator with
+ three independent power rails.
+
+properties:
+ compatible:
+ const: maxim,max77650-regulator
+
+patternProperties:
+ "^regulator@[0-3]$":
+ $ref: "regulator.yaml#"
+
+required:
+ - compatible
diff --git a/Documentation/devicetree/bindings/remoteproc/qcom,q6v5.txt b/Documentation/devicetree/bindings/remoteproc/qcom,q6v5.txt
index 41ca5df5be5a..c416746f93cf 100644
--- a/Documentation/devicetree/bindings/remoteproc/qcom,q6v5.txt
+++ b/Documentation/devicetree/bindings/remoteproc/qcom,q6v5.txt
@@ -12,6 +12,7 @@ on the Qualcomm Hexagon core.
"qcom,msm8916-mss-pil",
"qcom,msm8974-mss-pil"
"qcom,msm8996-mss-pil"
+ "qcom,msm8998-mss-pil"
"qcom,sdm845-mss-pil"
- reg:
@@ -41,6 +42,7 @@ on the Qualcomm Hexagon core.
qcom,msm8974-mss-pil:
must be "wdog", "fatal", "ready", "handover", "stop-ack"
qcom,msm8996-mss-pil:
+ qcom,msm8998-mss-pil:
qcom,sdm845-mss-pil:
must be "wdog", "fatal", "ready", "handover", "stop-ack",
"shutdown-ack"
@@ -70,6 +72,9 @@ on the Qualcomm Hexagon core.
qcom,msm8996-mss-pil:
must be "iface", "bus", "mem", "xo", "gpll0_mss",
"snoc_axi", "mnoc_axi", "pnoc", "qdss"
+ qcom,msm8998-mss-pil:
+ must be "iface", "bus", "mem", "xo", "gpll0_mss",
+ "snoc_axi", "mnoc_axi", "qdss"
qcom,sdm845-mss-pil:
must be "iface", "bus", "mem", "xo", "gpll0_mss",
"snoc_axi", "mnoc_axi", "prng"
@@ -137,6 +142,7 @@ For the compatible string below the following supplies are required:
qcom,msm8974-mss-pil:
no power-domain names required
qcom,msm8996-mss-pil:
+ qcom,msm8998-mss-pil:
must be "cx", "mx"
qcom,sdm845-mss-pil:
must be "cx", "mx", "mss", "load_state"
diff --git a/Documentation/devicetree/bindings/remoteproc/st,stm32-rproc.yaml b/Documentation/devicetree/bindings/remoteproc/st,stm32-rproc.yaml
new file mode 100644
index 000000000000..acf18d170352
--- /dev/null
+++ b/Documentation/devicetree/bindings/remoteproc/st,stm32-rproc.yaml
@@ -0,0 +1,128 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/remoteproc/st,stm32-rproc.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: STMicroelectronics STM32 remote processor controller bindings
+
+description:
+ This document defines the binding for the remoteproc component that loads and
+ boots firmwares on the ST32MP family chipset.
+
+maintainers:
+ - Fabien Dessenne <fabien.dessenne@st.com>
+ - Arnaud Pouliquen <arnaud.pouliquen@st.com>
+
+properties:
+ compatible:
+ const: st,stm32mp1-m4
+
+ reg:
+ description:
+ Address ranges of the RETRAM and MCU SRAM memories used by the remote
+ processor.
+ maxItems: 3
+
+ resets:
+ maxItems: 1
+
+ st,syscfg-holdboot:
+ allOf:
+ - $ref: "/schemas/types.yaml#/definitions/phandle-array"
+ description: remote processor reset hold boot
+ - Phandle of syscon block.
+ - The offset of the hold boot setting register.
+ - The field mask of the hold boot.
+ maxItems: 1
+
+ st,syscfg-tz:
+ allOf:
+ - $ref: "/schemas/types.yaml#/definitions/phandle-array"
+ description:
+ Reference to the system configuration which holds the RCC trust zone mode
+ - Phandle of syscon block.
+ - The offset of the RCC trust zone mode register.
+ - The field mask of the RCC trust zone mode.
+ maxItems: 1
+
+ interrupts:
+ description: Should contain the WWDG1 watchdog reset interrupt
+ maxItems: 1
+
+ mboxes:
+ description:
+ This property is required only if the rpmsg/virtio functionality is used.
+ items:
+ - description: |
+ A channel (a) used to communicate through virtqueues with the
+ remote proc.
+ Bi-directional channel:
+ - from local to remote = send message
+ - from remote to local = send message ack
+ - description: |
+ A channel (b) working the opposite direction of channel (a)
+ - description: |
+ A channel (c) used by the local proc to notify the remote proc that it
+ is about to be shut down.
+ Unidirectional channel:
+ - from local to remote, where ACK from the remote means that it is
+ ready for shutdown
+ minItems: 1
+ maxItems: 3
+
+ mbox-names:
+ items:
+ - const: vq0
+ - const: vq1
+ - const: shutdown
+ minItems: 1
+ maxItems: 3
+
+ memory-region:
+ description:
+ List of phandles to the reserved memory regions associated with the
+ remoteproc device. This is variable and describes the memories shared with
+ the remote processor (e.g. remoteproc firmware and carveouts, rpmsg
+ vrings, ...).
+ (see ../reserved-memory/reserved-memory.txt)
+
+ st,syscfg-pdds:
+ allOf:
+ - $ref: "/schemas/types.yaml#/definitions/phandle-array"
+ description: |
+ Reference to the system configuration which holds the remote
+ 1st cell: phandle to syscon block
+ 2nd cell: register offset containing the deep sleep setting
+ 3rd cell: register bitmask for the deep sleep bit
+ maxItems: 1
+
+ st,auto-boot:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description:
+ If defined, when remoteproc is probed, it loads the default firmware and
+ starts the remote processor.
+
+required:
+ - compatible
+ - reg
+ - resets
+ - st,syscfg-holdboot
+ - st,syscfg-tz
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/reset/stm32mp1-resets.h>
+ m4_rproc: m4@10000000 {
+ compatible = "st,stm32mp1-m4";
+ reg = <0x10000000 0x40000>,
+ <0x30000000 0x40000>,
+ <0x38000000 0x10000>;
+ resets = <&rcc MCU_R>;
+ st,syscfg-holdboot = <&rcc 0x10C 0x1>;
+ st,syscfg-tz = <&rcc 0x000 0x1>;
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/remoteproc/stm32-rproc.txt b/Documentation/devicetree/bindings/remoteproc/stm32-rproc.txt
deleted file mode 100644
index 5fa915a4b736..000000000000
--- a/Documentation/devicetree/bindings/remoteproc/stm32-rproc.txt
+++ /dev/null
@@ -1,63 +0,0 @@
-STMicroelectronics STM32 Remoteproc
------------------------------------
-This document defines the binding for the remoteproc component that loads and
-boots firmwares on the ST32MP family chipset.
-
-Required properties:
-- compatible: Must be "st,stm32mp1-m4"
-- reg: Address ranges of the RETRAM and MCU SRAM memories used by the
- remote processor.
-- resets: Reference to a reset controller asserting the remote processor.
-- st,syscfg-holdboot: Reference to the system configuration which holds the
- remote processor reset hold boot
- 1st cell: phandle of syscon block
- 2nd cell: register offset containing the hold boot setting
- 3rd cell: register bitmask for the hold boot field
-- st,syscfg-tz: Reference to the system configuration which holds the RCC trust
- zone mode
- 1st cell: phandle to syscon block
- 2nd cell: register offset containing the RCC trust zone mode setting
- 3rd cell: register bitmask for the RCC trust zone mode bit
-
-Optional properties:
-- interrupts: Should contain the watchdog interrupt
-- mboxes: This property is required only if the rpmsg/virtio functionality
- is used. List of phandle and mailbox channel specifiers:
- - a channel (a) used to communicate through virtqueues with the
- remote proc.
- Bi-directional channel:
- - from local to remote = send message
- - from remote to local = send message ack
- - a channel (b) working the opposite direction of channel (a)
- - a channel (c) used by the local proc to notify the remote proc
- that it is about to be shut down.
- Unidirectional channel:
- - from local to remote, where ACK from the remote means
- that it is ready for shutdown
-- mbox-names: This property is required if the mboxes property is used.
- - must be "vq0" for channel (a)
- - must be "vq1" for channel (b)
- - must be "shutdown" for channel (c)
-- memory-region: List of phandles to the reserved memory regions associated with
- the remoteproc device. This is variable and describes the
- memories shared with the remote processor (eg: remoteproc
- firmware and carveouts, rpmsg vrings, ...).
- (see ../reserved-memory/reserved-memory.txt)
-- st,syscfg-pdds: Reference to the system configuration which holds the remote
- processor deep sleep setting
- 1st cell: phandle to syscon block
- 2nd cell: register offset containing the deep sleep setting
- 3rd cell: register bitmask for the deep sleep bit
-- st,auto-boot: If defined, when remoteproc is probed, it loads the default
- firmware and starts the remote processor.
-
-Example:
- m4_rproc: m4@10000000 {
- compatible = "st,stm32mp1-m4";
- reg = <0x10000000 0x40000>,
- <0x30000000 0x40000>,
- <0x38000000 0x10000>;
- resets = <&rcc MCU_R>;
- st,syscfg-holdboot = <&rcc 0x10C 0x1>;
- st,syscfg-tz = <&rcc 0x000 0x1>;
- };
diff --git a/Documentation/devicetree/bindings/rng/samsung,exynos4-rng.txt b/Documentation/devicetree/bindings/rng/samsung,exynos4-rng.txt
deleted file mode 100644
index a13fbdb4bd88..000000000000
--- a/Documentation/devicetree/bindings/rng/samsung,exynos4-rng.txt
+++ /dev/null
@@ -1,19 +0,0 @@
-Exynos Pseudo Random Number Generator
-
-Required properties:
-
-- compatible : One of:
- - "samsung,exynos4-rng" for Exynos4210 and Exynos4412
- - "samsung,exynos5250-prng" for Exynos5250+
-- reg : Specifies base physical address and size of the registers map.
-- clocks : Phandle to clock-controller plus clock-specifier pair.
-- clock-names : "secss" as a clock name.
-
-Example:
-
- rng@10830400 {
- compatible = "samsung,exynos4-rng";
- reg = <0x10830400 0x200>;
- clocks = <&clock CLK_SSS>;
- clock-names = "secss";
- };
diff --git a/Documentation/devicetree/bindings/rng/samsung,exynos4-rng.yaml b/Documentation/devicetree/bindings/rng/samsung,exynos4-rng.yaml
new file mode 100644
index 000000000000..3362cb1213c0
--- /dev/null
+++ b/Documentation/devicetree/bindings/rng/samsung,exynos4-rng.yaml
@@ -0,0 +1,45 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/rng/samsung,exynos4-rng.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Samsung Exynos SoC Pseudo Random Number Generator
+
+maintainers:
+ - Krzysztof Kozlowski <krzk@kernel.org>
+
+properties:
+ compatible:
+ enum:
+ - samsung,exynos4-rng # for Exynos4210 and Exynos4412
+ - samsung,exynos5250-prng # for Exynos5250+
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ items:
+ - const: secss
+
+required:
+ - compatible
+ - reg
+ - clock-names
+ - clocks
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/exynos4.h>
+
+ rng@10830400 {
+ compatible = "samsung,exynos4-rng";
+ reg = <0x10830400 0x200>;
+ clocks = <&clock CLK_SSS>;
+ clock-names = "secss";
+ };
diff --git a/Documentation/devicetree/bindings/rng/st,stm32-rng.txt b/Documentation/devicetree/bindings/rng/st,stm32-rng.txt
deleted file mode 100644
index 1dfa7d51e006..000000000000
--- a/Documentation/devicetree/bindings/rng/st,stm32-rng.txt
+++ /dev/null
@@ -1,25 +0,0 @@
-STMicroelectronics STM32 HW RNG
-===============================
-
-The STM32 hardware random number generator is a simple fixed purpose IP and
-is fully separated from other crypto functions.
-
-Required properties:
-
-- compatible : Should be "st,stm32-rng"
-- reg : Should be register base and length as documented in the datasheet
-- interrupts : The designated IRQ line for the RNG
-- clocks : The clock needed to enable the RNG
-
-Optional properties:
-- resets : The reset to properly start RNG
-- clock-error-detect : Enable the clock detection management
-
-Example:
-
- rng: rng@50060800 {
- compatible = "st,stm32-rng";
- reg = <0x50060800 0x400>;
- interrupts = <80>;
- clocks = <&rcc 0 38>;
- };
diff --git a/Documentation/devicetree/bindings/rng/st,stm32-rng.yaml b/Documentation/devicetree/bindings/rng/st,stm32-rng.yaml
new file mode 100644
index 000000000000..82bb2e97e889
--- /dev/null
+++ b/Documentation/devicetree/bindings/rng/st,stm32-rng.yaml
@@ -0,0 +1,48 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/rng/st,stm32-rng.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: STMicroelectronics STM32 RNG bindings
+
+description: |
+ The STM32 hardware random number generator is a simple fixed purpose
+ IP and is fully separated from other crypto functions.
+
+maintainers:
+ - Lionel Debieve <lionel.debieve@st.com>
+
+properties:
+ compatible:
+ const: st,stm32-rng
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ resets:
+ maxItems: 1
+
+ clock-error-detect:
+ description: If set enable the clock detection management
+
+required:
+ - compatible
+ - reg
+ - clocks
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/stm32mp1-clks.h>
+ rng@54003000 {
+ compatible = "st,stm32-rng";
+ reg = <0x54003000 0x400>;
+ clocks = <&rcc RNG1_K>;
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/rtc/renesas,sh-rtc.yaml b/Documentation/devicetree/bindings/rtc/renesas,sh-rtc.yaml
new file mode 100644
index 000000000000..dcff573cbdb1
--- /dev/null
+++ b/Documentation/devicetree/bindings/rtc/renesas,sh-rtc.yaml
@@ -0,0 +1,70 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/rtc/renesas,sh-rtc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Real Time Clock for Renesas SH and ARM SoCs
+
+maintainers:
+ - Chris Brandt <chris.brandt@renesas.com>
+ - Geert Uytterhoeven <geert+renesas@glider.be>
+
+properties:
+ compatible:
+ items:
+ - const: renesas,r7s72100-rtc # RZ/A1H
+ - const: renesas,sh-rtc
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 3
+
+ interrupt-names:
+ items:
+ - const: alarm
+ - const: period
+ - const: carry
+
+ clocks:
+ # The functional clock source for the RTC controller must be listed
+ # first (if it exists). Additionally, potential clock counting sources
+ # are to be listed.
+ minItems: 1
+ maxItems: 4
+
+ clock-names:
+ # The functional clock must be labeled as "fck". Other clocks
+ # may be named in accordance to the SoC hardware manuals.
+ minItems: 1
+ maxItems: 4
+ items:
+ enum: [ fck, rtc_x1, rtc_x3, extal ]
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - interrupt-names
+ - clocks
+ - clock-names
+
+examples:
+ - |
+ #include <dt-bindings/clock/r7s72100-clock.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ rtc: rtc@fcff1000 {
+ compatible = "renesas,r7s72100-rtc", "renesas,sh-rtc";
+ reg = <0xfcff1000 0x2e>;
+ interrupts = <GIC_SPI 276 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 277 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 278 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "alarm", "period", "carry";
+ clocks = <&mstp6_clks R7S72100_CLK_RTC>, <&rtc_x1_clk>,
+ <&rtc_x3_clk>, <&extal_clk>;
+ clock-names = "fck", "rtc_x1", "rtc_x3", "extal";
+ };
diff --git a/Documentation/devicetree/bindings/rtc/rtc-mt6397.txt b/Documentation/devicetree/bindings/rtc/rtc-mt6397.txt
new file mode 100644
index 000000000000..55a0c8874c03
--- /dev/null
+++ b/Documentation/devicetree/bindings/rtc/rtc-mt6397.txt
@@ -0,0 +1,29 @@
+Device-Tree bindings for MediaTek PMIC based RTC
+
+MediaTek PMIC based RTC is an independent function of MediaTek PMIC that works
+as a type of multi-function device (MFD). The RTC can be configured and set up
+with PMIC wrapper bus which is a common resource shared with the other
+functions found on the same PMIC.
+
+For MediaTek PMIC MFD bindings, see:
+../mfd/mt6397.txt
+
+For MediaTek PMIC wrapper bus bindings, see:
+../soc/mediatek/pwrap.txt
+
+Required properties:
+- compatible: Should be one of follows
+ "mediatek,mt6323-rtc": for MT6323 PMIC
+ "mediatek,mt6397-rtc": for MT6397 PMIC
+
+Example:
+
+ pmic {
+ compatible = "mediatek,mt6323";
+
+ ...
+
+ rtc {
+ compatible = "mediatek,mt6323-rtc";
+ };
+ };
diff --git a/Documentation/devicetree/bindings/rtc/rtc-sh.txt b/Documentation/devicetree/bindings/rtc/rtc-sh.txt
deleted file mode 100644
index 7676c7d28874..000000000000
--- a/Documentation/devicetree/bindings/rtc/rtc-sh.txt
+++ /dev/null
@@ -1,28 +0,0 @@
-* Real Time Clock for Renesas SH and ARM SoCs
-
-Required properties:
-- compatible: Should be "renesas,r7s72100-rtc" and "renesas,sh-rtc" as a
- fallback.
-- reg: physical base address and length of memory mapped region.
-- interrupts: 3 interrupts for alarm, period, and carry.
-- interrupt-names: The interrupts should be labeled as "alarm", "period", and
- "carry".
-- clocks: The functional clock source for the RTC controller must be listed
- first (if exists). Additionally, potential clock counting sources are to be
- listed.
-- clock-names: The functional clock must be labeled as "fck". Other clocks
- may be named in accordance to the SoC hardware manuals.
-
-
-Example:
-rtc: rtc@fcff1000 {
- compatible = "renesas,r7s72100-rtc", "renesas,sh-rtc";
- reg = <0xfcff1000 0x2e>;
- interrupts = <GIC_SPI 276 IRQ_TYPE_EDGE_RISING
- GIC_SPI 277 IRQ_TYPE_EDGE_RISING
- GIC_SPI 278 IRQ_TYPE_EDGE_RISING>;
- interrupt-names = "alarm", "period", "carry";
- clocks = <&mstp6_clks R7S72100_CLK_RTC>, <&rtc_x1_clk>,
- <&rtc_x3_clk>, <&extal_clk>;
- clock-names = "fck", "rtc_x1", "rtc_x3", "extal";
-};
diff --git a/Documentation/devicetree/bindings/rtc/s3c-rtc.txt b/Documentation/devicetree/bindings/rtc/s3c-rtc.txt
deleted file mode 100644
index fdde63a5419c..000000000000
--- a/Documentation/devicetree/bindings/rtc/s3c-rtc.txt
+++ /dev/null
@@ -1,31 +0,0 @@
-* Samsung's S3C Real Time Clock controller
-
-Required properties:
-- compatible: should be one of the following.
- * "samsung,s3c2410-rtc" - for controllers compatible with s3c2410 rtc.
- * "samsung,s3c2416-rtc" - for controllers compatible with s3c2416 rtc.
- * "samsung,s3c2443-rtc" - for controllers compatible with s3c2443 rtc.
- * "samsung,s3c6410-rtc" - for controllers compatible with s3c6410 rtc.
- * "samsung,exynos3250-rtc" - (deprecated) for controllers compatible with
- exynos3250 rtc (use "samsung,s3c6410-rtc").
-- reg: physical base address of the controller and length of memory mapped
- region.
-- interrupts: Two interrupt numbers to the cpu should be specified. First
- interrupt number is the rtc alarm interrupt and second interrupt number
- is the rtc tick interrupt. The number of cells representing a interrupt
- depends on the parent interrupt controller.
-- clocks: Must contain a list of phandle and clock specifier for the rtc
- clock and in the case of a s3c6410 compatible controller, also
- a source clock.
-- clock-names: Must contain "rtc" and for a s3c6410 compatible controller,
- a "rtc_src" sorted in the same order as the clocks property.
-
-Example:
-
- rtc@10070000 {
- compatible = "samsung,s3c6410-rtc";
- reg = <0x10070000 0x100>;
- interrupts = <44 0 45 0>;
- clocks = <&clock CLK_RTC>, <&s2mps11_osc S2MPS11_CLK_AP>;
- clock-names = "rtc", "rtc_src";
- };
diff --git a/Documentation/devicetree/bindings/rtc/s3c-rtc.yaml b/Documentation/devicetree/bindings/rtc/s3c-rtc.yaml
new file mode 100644
index 000000000000..76bbf8b7555b
--- /dev/null
+++ b/Documentation/devicetree/bindings/rtc/s3c-rtc.yaml
@@ -0,0 +1,89 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/rtc/s3c-rtc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Samsung S3C, S5P and Exynos Real Time Clock controller
+
+maintainers:
+ - Krzysztof Kozlowski <krzk@kernel.org>
+
+properties:
+ compatible:
+ oneOf:
+ - enum:
+ - samsung,s3c2410-rtc
+ - samsung,s3c2416-rtc
+ - samsung,s3c2443-rtc
+ - samsung,s3c6410-rtc
+ - const: samsung,exynos3250-rtc
+ deprecated: true
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ description:
+ Must contain a list of phandle and clock specifier for the rtc
+ clock and in the case of a s3c6410 compatible controller, also
+ a source clock.
+ minItems: 1
+ maxItems: 2
+
+ clock-names:
+ description:
+ Must contain "rtc" and for a s3c6410 compatible controller
+ also "rtc_src".
+ minItems: 1
+ maxItems: 2
+
+ interrupts:
+ description:
+ Two interrupt numbers to the cpu should be specified. First
+ interrupt number is the rtc alarm interrupt and second interrupt number
+ is the rtc tick interrupt. The number of cells representing a interrupt
+ depends on the parent interrupt controller.
+ minItems: 2
+ maxItems: 2
+
+allOf:
+ - $ref: rtc.yaml#
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - samsung,s3c6410-rtc
+ - samsung,exynos3250-rtc
+ then:
+ properties:
+ clocks:
+ minItems: 2
+ maxItems: 2
+ clock-names:
+ items:
+ - const: rtc
+ - const: rtc_src
+ else:
+ properties:
+ clocks:
+ minItems: 1
+ maxItems: 1
+ clock-names:
+ items:
+ - const: rtc
+
+examples:
+ - |
+ #include <dt-bindings/clock/exynos5420.h>
+ #include <dt-bindings/clock/samsung,s2mps11.h>
+
+ rtc@10070000 {
+ compatible = "samsung,s3c6410-rtc";
+ reg = <0x10070000 0x100>;
+ interrupts = <0 44 4>, <0 45 4>;
+ clocks = <&clock CLK_RTC>,
+ <&s2mps11_osc S2MPS11_CLK_AP>;
+ clock-names = "rtc", "rtc_src";
+ };
diff --git a/Documentation/devicetree/bindings/serial/8250.txt b/Documentation/devicetree/bindings/serial/8250.txt
index 20d351f268ef..55700f20f6ee 100644
--- a/Documentation/devicetree/bindings/serial/8250.txt
+++ b/Documentation/devicetree/bindings/serial/8250.txt
@@ -56,6 +56,11 @@ Optional properties:
- {rts,cts,dtr,dsr,rng,dcd}-gpios: specify a GPIO for RTS/CTS/DTR/DSR/RI/DCD
line respectively. It will use specified GPIO instead of the peripheral
function pin for the UART feature. If unsure, don't specify this property.
+- aspeed,sirq-polarity-sense: Only applicable to aspeed,ast2500-vuart.
+ phandle to aspeed,ast2500-scu compatible syscon alongside register offset
+ and bit number to identify how the SIRQ polarity should be configured.
+ One possible data source is the LPC/eSPI mode bit.
+ Example: aspeed,sirq-polarity-sense = <&syscon 0x70 25>
Note:
* fsl,ns16550:
diff --git a/Documentation/devicetree/bindings/serial/fsl-lpuart.txt b/Documentation/devicetree/bindings/serial/fsl-lpuart.txt
index 3495eee81d53..f5f5ab0fd14e 100644
--- a/Documentation/devicetree/bindings/serial/fsl-lpuart.txt
+++ b/Documentation/devicetree/bindings/serial/fsl-lpuart.txt
@@ -21,8 +21,7 @@ Required properties:
Optional properties:
- dmas: A list of two dma specifiers, one for each entry in dma-names.
- dma-names: should contain "tx" and "rx".
-- rs485-rts-delay, rs485-rts-active-low, rs485-rx-during-tx,
- linux,rs485-enabled-at-boot-time: see rs485.txt
+- rs485-rts-active-low, linux,rs485-enabled-at-boot-time: see rs485.txt
Note: Optional properties for DMA support. Write them both or both not.
diff --git a/Documentation/devicetree/bindings/serial/renesas,sci-serial.txt b/Documentation/devicetree/bindings/serial/renesas,sci-serial.txt
index b143d9a21b2d..a5edf4b70c7a 100644
--- a/Documentation/devicetree/bindings/serial/renesas,sci-serial.txt
+++ b/Documentation/devicetree/bindings/serial/renesas,sci-serial.txt
@@ -54,8 +54,10 @@ Required properties:
- "renesas,hscif-r8a7794" for R8A7794 (R-Car E2) HSCIF compatible UART.
- "renesas,scif-r8a7795" for R8A7795 (R-Car H3) SCIF compatible UART.
- "renesas,hscif-r8a7795" for R8A7795 (R-Car H3) HSCIF compatible UART.
- - "renesas,scif-r8a7796" for R8A7796 (R-Car M3-W) SCIF compatible UART.
- - "renesas,hscif-r8a7796" for R8A7796 (R-Car M3-W) HSCIF compatible UART.
+ - "renesas,scif-r8a7796" for R8A77960 (R-Car M3-W) SCIF compatible UART.
+ - "renesas,hscif-r8a7796" for R8A77960 (R-Car M3-W) HSCIF compatible UART.
+ - "renesas,scif-r8a77961" for R8A77961 (R-Car M3-W+) SCIF compatible UART.
+ - "renesas,hscif-r8a77961" for R8A77961 (R-Car M3-W+) HSCIF compatible UART.
- "renesas,scif-r8a77965" for R8A77965 (R-Car M3-N) SCIF compatible UART.
- "renesas,hscif-r8a77965" for R8A77965 (R-Car M3-N) HSCIF compatible UART.
- "renesas,scif-r8a77970" for R8A77970 (R-Car V3M) SCIF compatible UART.
diff --git a/Documentation/devicetree/bindings/serial/samsung_uart.txt b/Documentation/devicetree/bindings/serial/samsung_uart.txt
deleted file mode 100644
index e85f37ec33f0..000000000000
--- a/Documentation/devicetree/bindings/serial/samsung_uart.txt
+++ /dev/null
@@ -1,58 +0,0 @@
-* Samsung's UART Controller
-
-The Samsung's UART controller is used for interfacing SoC with serial
-communicaion devices.
-
-Required properties:
-- compatible: should be one of following:
- - "samsung,exynos4210-uart" - Exynos4210 SoC,
- - "samsung,s3c2410-uart" - compatible with ports present on S3C2410 SoC,
- - "samsung,s3c2412-uart" - compatible with ports present on S3C2412 SoC,
- - "samsung,s3c2440-uart" - compatible with ports present on S3C2440 SoC,
- - "samsung,s3c6400-uart" - compatible with ports present on S3C6400 SoC,
- - "samsung,s5pv210-uart" - compatible with ports present on S5PV210 SoC.
-
-- reg: base physical address of the controller and length of memory mapped
- region.
-
-- interrupts: a single interrupt signal to SoC interrupt controller,
- according to interrupt bindings documentation [1].
-
-- clock-names: input names of clocks used by the controller:
- - "uart" - controller bus clock,
- - "clk_uart_baudN" - Nth baud base clock input (N = 0, 1, ...),
- according to SoC User's Manual (only N = 0 is allowedfor SoCs without
- internal baud clock mux).
-- clocks: phandles and specifiers for all clocks specified in "clock-names"
- property, in the same order, according to clock bindings documentation [2].
-
-[1] Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
-[2] Documentation/devicetree/bindings/clock/clock-bindings.txt
-
-Optional properties:
-- samsung,uart-fifosize: The fifo size supported by the UART channel
-
-Note: Each Samsung UART should have an alias correctly numbered in the
-"aliases" node, according to serialN format, where N is the port number
-(non-negative decimal integer) as specified by User's Manual of respective
-SoC.
-
-Example:
- aliases {
- serial0 = &uart0;
- serial1 = &uart1;
- serial2 = &uart2;
- };
-
-Example:
- uart1: serial@7f005400 {
- compatible = "samsung,s3c6400-uart";
- reg = <0x7f005400 0x100>;
- interrupt-parent = <&vic1>;
- interrupts = <6>;
- clock-names = "uart", "clk_uart_baud2",
- "clk_uart_baud3";
- clocks = <&clocks PCLK_UART1>, <&clocks PCLK_UART1>,
- <&clocks SCLK_UART>;
- samsung,uart-fifosize = <16>;
- };
diff --git a/Documentation/devicetree/bindings/serial/samsung_uart.yaml b/Documentation/devicetree/bindings/serial/samsung_uart.yaml
new file mode 100644
index 000000000000..9d2ce347875b
--- /dev/null
+++ b/Documentation/devicetree/bindings/serial/samsung_uart.yaml
@@ -0,0 +1,118 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/serial/samsung_uart.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Samsung S3C, S5P and Exynos SoC UART Controller
+
+maintainers:
+ - Krzysztof Kozlowski <krzk@kernel.org>
+ - Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+description: |+
+ Each Samsung UART should have an alias correctly numbered in the "aliases"
+ node, according to serialN format, where N is the port number (non-negative
+ decimal integer) as specified by User's Manual of respective SoC.
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - samsung,s3c2410-uart
+ - samsung,s3c2412-uart
+ - samsung,s3c2440-uart
+ - samsung,s3c6400-uart
+ - samsung,s5pv210-uart
+ - samsung,exynos4210-uart
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ minItems: 2
+ maxItems: 5
+
+ clock-names:
+ description: N = 0 is allowed for SoCs without internal baud clock mux.
+ minItems: 2
+ maxItems: 5
+ items:
+ - const: uart
+ - pattern: '^clk_uart_baud[0-3]$'
+ - pattern: '^clk_uart_baud[0-3]$'
+ - pattern: '^clk_uart_baud[0-3]$'
+ - pattern: '^clk_uart_baud[0-3]$'
+
+ interrupts:
+ description: RX interrupt and optionally TX interrupt.
+ minItems: 1
+ maxItems: 2
+
+ samsung,uart-fifosize:
+ description: The fifo size supported by the UART channel.
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ - enum: [16, 64, 256]
+
+required:
+ - compatible
+ - clocks
+ - clock-names
+ - interrupts
+ - reg
+
+allOf:
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - samsung,s3c2410-uart
+ - samsung,s5pv210-uart
+ then:
+ properties:
+ clocks:
+ minItems: 2
+ maxItems: 3
+ clock-names:
+ minItems: 2
+ maxItems: 3
+ items:
+ - const: uart
+ - pattern: '^clk_uart_baud[0-1]$'
+ - pattern: '^clk_uart_baud[0-1]$'
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - samsung,exynos4210-uart
+ then:
+ properties:
+ clocks:
+ minItems: 2
+ maxItems: 2
+ clock-names:
+ minItems: 2
+ maxItems: 2
+ items:
+ - const: uart
+ - const: clk_uart_baud0
+
+examples:
+ - |
+ #include <dt-bindings/clock/samsung,s3c64xx-clock.h>
+
+ uart0: serial@7f005000 {
+ compatible = "samsung,s3c6400-uart";
+ reg = <0x7f005000 0x100>;
+ interrupt-parent = <&vic1>;
+ interrupts = <5>;
+ clock-names = "uart", "clk_uart_baud2",
+ "clk_uart_baud3";
+ clocks = <&clocks PCLK_UART0>, <&clocks PCLK_UART0>,
+ <&clocks SCLK_UART>;
+ samsung,uart-fifosize = <16>;
+ };
diff --git a/Documentation/devicetree/bindings/serial/sprd-uart.txt b/Documentation/devicetree/bindings/serial/sprd-uart.txt
deleted file mode 100644
index 9607dc616205..000000000000
--- a/Documentation/devicetree/bindings/serial/sprd-uart.txt
+++ /dev/null
@@ -1,32 +0,0 @@
-* Spreadtrum serial UART
-
-Required properties:
-- compatible: must be one of:
- * "sprd,sc9836-uart"
- * "sprd,sc9860-uart", "sprd,sc9836-uart"
-
-- reg: offset and length of the register set for the device
-- interrupts: exactly one interrupt specifier
-- clock-names: Should contain following entries:
- "enable" for UART module enable clock,
- "uart" for UART clock,
- "source" for UART source (parent) clock.
-- clocks: Should contain a clock specifier for each entry in clock-names.
- UART clock and source clock are optional properties, but enable clock
- is required.
-
-Optional properties:
-- dma-names: Should contain "rx" for receive and "tx" for transmit channels.
-- dmas: A list of dma specifiers, one for each entry in dma-names.
-
-Example:
- uart0: serial@0 {
- compatible = "sprd,sc9860-uart",
- "sprd,sc9836-uart";
- reg = <0x0 0x100>;
- interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>;
- dma-names = "rx", "tx";
- dmas = <&ap_dma 19>, <&ap_dma 20>;
- clock-names = "enable", "uart", "source";
- clocks = <&clk_ap_apb_gates 9>, <&clk_uart0>, <&ext_26m>;
- };
diff --git a/Documentation/devicetree/bindings/serial/sprd-uart.yaml b/Documentation/devicetree/bindings/serial/sprd-uart.yaml
new file mode 100644
index 000000000000..e66b2e92a7fc
--- /dev/null
+++ b/Documentation/devicetree/bindings/serial/sprd-uart.yaml
@@ -0,0 +1,72 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+# Copyright 2019 Unisoc Inc.
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/serial/sprd-uart.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Spreadtrum serial UART
+
+maintainers:
+ - Orson Zhai <orsonzhai@gmail.com>
+ - Baolin Wang <baolin.wang7@gmail.com>
+ - Chunyan Zhang <zhang.lyra@gmail.com>
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - enum:
+ - sprd,sc9860-uart
+ - sprd,sc9863a-uart
+ - const: sprd,sc9836-uart
+ - const: sprd,sc9836-uart
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ minItems: 1
+ maxItems: 3
+
+ clock-names:
+ description: |
+ "enable" for UART module enable clock, "uart" for UART clock, "source"
+ for UART source (parent) clock.
+ items:
+ - const: enable
+ - const: uart
+ - const: source
+
+ dmas:
+ minItems: 1
+ maxItems: 2
+
+ dma-names:
+ minItems: 1
+ items:
+ - const: rx
+ - const: tx
+
+required:
+ - compatible
+ - reg
+ - interrupts
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ serial@0 {
+ compatible = "sprd,sc9860-uart", "sprd,sc9836-uart";
+ reg = <0x0 0x100>;
+ interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>;
+ dma-names = "rx", "tx";
+ dmas = <&ap_dma 19>, <&ap_dma 20>;
+ clock-names = "enable", "uart", "source";
+ clocks = <&clk_ap_apb_gates 9>, <&clk_uart0>, <&ext_26m>;
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/serio/allwinner,sun4i-a10-ps2.yaml b/Documentation/devicetree/bindings/serio/allwinner,sun4i-a10-ps2.yaml
new file mode 100644
index 000000000000..ee9712f1c97d
--- /dev/null
+++ b/Documentation/devicetree/bindings/serio/allwinner,sun4i-a10-ps2.yaml
@@ -0,0 +1,51 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/serio/allwinner,sun4i-a10-ps2.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Allwinner A10 PS2 Host Controller Device Tree Bindings
+
+maintainers:
+ - Chen-Yu Tsai <wens@csie.org>
+ - Maxime Ripard <maxime.ripard@bootlin.com>
+
+description:
+ A20 PS2 is dual role controller (PS2 host and PS2 device). These
+ bindings for PS2 A10/A20 host controller. IBM compliant IBM PS2 and
+ AT-compatible keyboard and mouse can be connected.
+
+properties:
+ compatible:
+ const: allwinner,sun4i-a10-ps2
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/clock/sun7i-a20-ccu.h>
+
+ ps20: ps2@1c2a000 {
+ compatible = "allwinner,sun4i-a10-ps2";
+ reg = <0x01c2a000 0x400>;
+ interrupts = <GIC_SPI 62 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&ccu CLK_APB1_PS20>;
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/serio/allwinner,sun4i-ps2.txt b/Documentation/devicetree/bindings/serio/allwinner,sun4i-ps2.txt
deleted file mode 100644
index 75996b6111bb..000000000000
--- a/Documentation/devicetree/bindings/serio/allwinner,sun4i-ps2.txt
+++ /dev/null
@@ -1,22 +0,0 @@
-* Device tree bindings for Allwinner A10, A20 PS2 host controller
-
-A20 PS2 is dual role controller (PS2 host and PS2 device). These bindings are
-for PS2 A10/A20 host controller. IBM compliant IBM PS2 and AT-compatible keyboard
-and mouse can be connected.
-
-Required properties:
-
- - reg : Offset and length of the register set for the device.
- - compatible : Should be as of the following:
- - "allwinner,sun4i-a10-ps2"
- - interrupts : The interrupt line connected to the PS2.
- - clocks : The gate clk connected to the PS2.
-
-
-Example:
- ps20: ps2@01c2a000 {
- compatible = "allwinner,sun4i-a10-ps2";
- reg = <0x01c2a000 0x400>;
- interrupts = <0 62 4>;
- clocks = <&apb1_gates 6>;
- };
diff --git a/Documentation/devicetree/bindings/soc/amlogic/amlogic,canvas.txt b/Documentation/devicetree/bindings/soc/amlogic/amlogic,canvas.txt
deleted file mode 100644
index e876f3ce54f6..000000000000
--- a/Documentation/devicetree/bindings/soc/amlogic/amlogic,canvas.txt
+++ /dev/null
@@ -1,33 +0,0 @@
-Amlogic Canvas
-================================
-
-A canvas is a collection of metadata that describes a pixel buffer.
-Those metadata include: width, height, phyaddr, wrapping and block mode.
-Starting with GXBB the endianness can also be described.
-
-Many IPs within Amlogic SoCs rely on canvas indexes to read/write pixel data
-rather than use the phy addresses directly. For instance, this is the case for
-the video decoders and the display.
-
-Amlogic SoCs have 256 canvas.
-
-Device Tree Bindings:
----------------------
-
-Video Lookup Table
---------------------------
-
-Required properties:
-- compatible: has to be one of:
- - "amlogic,meson8-canvas", "amlogic,canvas" on Meson8
- - "amlogic,meson8b-canvas", "amlogic,canvas" on Meson8b
- - "amlogic,meson8m2-canvas", "amlogic,canvas" on Meson8m2
- - "amlogic,canvas" on GXBB and newer
-- reg: Base physical address and size of the canvas registers.
-
-Example:
-
-canvas: video-lut@48 {
- compatible = "amlogic,canvas";
- reg = <0x0 0x48 0x0 0x14>;
-};
diff --git a/Documentation/devicetree/bindings/soc/amlogic/amlogic,canvas.yaml b/Documentation/devicetree/bindings/soc/amlogic/amlogic,canvas.yaml
new file mode 100644
index 000000000000..f548594d020b
--- /dev/null
+++ b/Documentation/devicetree/bindings/soc/amlogic/amlogic,canvas.yaml
@@ -0,0 +1,49 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+# Copyright 2019 BayLibre, SAS
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/soc/amlogic/amlogic,canvas.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Amlogic Canvas Video Lookup Table
+
+maintainers:
+ - Neil Armstrong <narmstrong@baylibre.com>
+ - Maxime Jourdan <mjourdan@baylibre.com>
+
+description: |
+ A canvas is a collection of metadata that describes a pixel buffer.
+ Those metadata include: width, height, phyaddr, wrapping and block mode.
+ Starting with GXBB the endianness can also be described.
+
+ Many IPs within Amlogic SoCs rely on canvas indexes to read/write pixel data
+ rather than use the phy addresses directly. For instance, this is the case for
+ the video decoders and the display.
+
+ Amlogic SoCs have 256 canvas.
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - enum:
+ - amlogic,meson8-canvas
+ - amlogic,meson8b-canvas
+ - amlogic,meson8m2-canvas
+ - const: amlogic,canvas
+ - const: amlogic,canvas # GXBB and newer SoCs
+
+ reg:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+
+examples:
+ - |
+ canvas: video-lut@48 {
+ compatible = "amlogic,canvas";
+ reg = <0x48 0x14>;
+ };
+
diff --git a/Documentation/devicetree/bindings/soc/bcm/brcm,bcm2835-pm.txt b/Documentation/devicetree/bindings/soc/bcm/brcm,bcm2835-pm.txt
index 3b7d32956391..72ff033565e5 100644
--- a/Documentation/devicetree/bindings/soc/bcm/brcm,bcm2835-pm.txt
+++ b/Documentation/devicetree/bindings/soc/bcm/brcm,bcm2835-pm.txt
@@ -26,7 +26,7 @@ Optional properties:
system power. This node follows the power controller bindings[3].
[1] Documentation/devicetree/bindings/reset/reset.txt
-[2] Documentation/devicetree/bindings/power/power_domain.txt
+[2] Documentation/devicetree/bindings/power/power-domain.yaml
[3] Documentation/devicetree/bindings/power/power-controller.txt
Example:
diff --git a/Documentation/devicetree/bindings/soc/mediatek/scpsys.txt b/Documentation/devicetree/bindings/soc/mediatek/scpsys.txt
index 876693a7ada5..8f469d85833b 100644
--- a/Documentation/devicetree/bindings/soc/mediatek/scpsys.txt
+++ b/Documentation/devicetree/bindings/soc/mediatek/scpsys.txt
@@ -8,7 +8,7 @@ The System Power Manager (SPM) inside the SCPSYS is for the MTCMOS power
domain control.
The driver implements the Generic PM domain bindings described in
-power/power_domain.txt. It provides the power domains defined in
+power/power-domain.yaml. It provides the power domains defined in
- include/dt-bindings/power/mt8173-power.h
- include/dt-bindings/power/mt6797-power.h
- include/dt-bindings/power/mt2701-power.h
diff --git a/Documentation/devicetree/bindings/soc/ti/sci-pm-domain.txt b/Documentation/devicetree/bindings/soc/ti/sci-pm-domain.txt
index f541d1f776a2..6217e64309de 100644
--- a/Documentation/devicetree/bindings/soc/ti/sci-pm-domain.txt
+++ b/Documentation/devicetree/bindings/soc/ti/sci-pm-domain.txt
@@ -12,7 +12,7 @@ PM Domain Node
==============
The PM domain node represents the global PM domain managed by the PMMC, which
in this case is the implementation as documented by the generic PM domain
-bindings in Documentation/devicetree/bindings/power/power_domain.txt. Because
+bindings in Documentation/devicetree/bindings/power/power-domain.yaml. Because
this relies on the TI SCI protocol to communicate with the PMMC it must be a
child of the pmmc node.
diff --git a/Documentation/devicetree/bindings/sound/adi,adau7118.yaml b/Documentation/devicetree/bindings/sound/adi,adau7118.yaml
new file mode 100644
index 000000000000..75e0cbe6be70
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/adi,adau7118.yaml
@@ -0,0 +1,85 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/sound/adi,adau7118.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+
+title: Analog Devices ADAU7118 8 Channel PDM to I2S/TDM Converter
+
+maintainers:
+ - Nuno Sá <nuno.sa@analog.com>
+
+description: |
+ Analog Devices ADAU7118 8 Channel PDM to I2S/TDM Converter over I2C or HW
+ standalone mode.
+ https://www.analog.com/media/en/technical-documentation/data-sheets/ADAU7118.pdf
+
+properties:
+ compatible:
+ enum:
+ - adi,adau7118
+
+ reg:
+ maxItems: 1
+
+ "#sound-dai-cells":
+ const: 0
+
+ iovdd-supply:
+ description: Digital Input/Output Power Supply.
+
+ dvdd-supply:
+ description: Internal Core Digital Power Supply.
+
+ adi,decimation-ratio:
+ description: |
+ This property set's the decimation ratio of PDM to PCM audio data.
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ - enum: [64, 32, 16]
+ default: 64
+
+ adi,pdm-clk-map:
+ description: |
+ The ADAU7118 has two PDM clocks for the four Inputs. Each input must be
+ assigned to one of these two clocks. This property set's the mapping
+ between the clocks and the inputs.
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32-array
+ - minItems: 4
+ maxItems: 4
+ items:
+ maximum: 1
+ default: [0, 0, 1, 1]
+
+required:
+ - "#sound-dai-cells"
+ - compatible
+ - iovdd-supply
+ - dvdd-supply
+
+examples:
+ - |
+ i2c {
+ /* example with i2c support */
+ #address-cells = <1>;
+ #size-cells = <0>;
+ adau7118_codec: audio-codec@14 {
+ compatible = "adi,adau7118";
+ reg = <0x14>;
+ #sound-dai-cells = <0>;
+ iovdd-supply = <&supply>;
+ dvdd-supply = <&supply>;
+ adi,pdm-clk-map = <1 1 0 0>;
+ adi,decimation-ratio = <16>;
+ };
+ };
+
+ /* example with hw standalone mode */
+ adau7118_codec_hw: adau7118-codec-hw {
+ compatible = "adi,adau7118";
+ #sound-dai-cells = <0>;
+ iovdd-supply = <&supply>;
+ dvdd-supply = <&supply>;
+ };
diff --git a/Documentation/devicetree/bindings/sound/allwinner,sun4i-a10-codec.yaml b/Documentation/devicetree/bindings/sound/allwinner,sun4i-a10-codec.yaml
new file mode 100644
index 000000000000..b8f89c7258eb
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/allwinner,sun4i-a10-codec.yaml
@@ -0,0 +1,267 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/sound/allwinner,sun4i-a10-codec.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Allwinner A10 Codec Device Tree Bindings
+
+maintainers:
+ - Chen-Yu Tsai <wens@csie.org>
+ - Maxime Ripard <maxime.ripard@bootlin.com>
+
+properties:
+ "#sound-dai-cells":
+ const: 0
+
+ compatible:
+ enum:
+ - allwinner,sun4i-a10-codec
+ - allwinner,sun6i-a31-codec
+ - allwinner,sun7i-a20-codec
+ - allwinner,sun8i-a23-codec
+ - allwinner,sun8i-h3-codec
+ - allwinner,sun8i-v3s-codec
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ items:
+ - description: Bus Clock
+ - description: Module Clock
+
+ clock-names:
+ items:
+ - const: apb
+ - const: codec
+
+ dmas:
+ items:
+ - description: RX DMA Channel
+ - description: TX DMA Channel
+
+ dma-names:
+ items:
+ - const: rx
+ - const: tx
+
+ resets:
+ maxItems: 1
+
+ allwinner,audio-routing:
+ description: |-
+ A list of the connections between audio components. Each entry
+ is a pair of strings, the first being the connection's sink, the
+ second being the connection's source.
+ allOf:
+ - $ref: /schemas/types.yaml#definitions/non-unique-string-array
+ - minItems: 2
+ maxItems: 18
+ items:
+ enum:
+ # Audio Pins on the SoC
+ - HP
+ - HPCOM
+ - LINEIN
+ - LINEOUT
+ - MIC1
+ - MIC2
+ - MIC3
+
+ # Microphone Biases from the SoC
+ - HBIAS
+ - MBIAS
+
+ # Board Connectors
+ - Headphone
+ - Headset Mic
+ - Line In
+ - Line Out
+ - Mic
+ - Speaker
+
+ allwinner,codec-analog-controls:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description: Phandle to the codec analog controls in the PRCM
+
+ allwinner,pa-gpios:
+ description: GPIO to enable the external amplifier
+
+required:
+ - "#sound-dai-cells"
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - clock-names
+ - dmas
+ - dma-names
+
+allOf:
+ - if:
+ properties:
+ compatible:
+ enum:
+ - allwinner,sun6i-a31-codec
+ - allwinner,sun8i-a23-codec
+ - allwinner,sun8i-h3-codec
+ - allwinner,sun8i-v3s-codec
+
+ then:
+ if:
+ properties:
+ compatible:
+ const: allwinner,sun6i-a31-codec
+
+ then:
+ required:
+ - resets
+ - allwinner,audio-routing
+
+ else:
+ required:
+ - resets
+ - allwinner,audio-routing
+ - allwinner,codec-analog-controls
+
+ - if:
+ properties:
+ compatible:
+ enum:
+ - allwinner,sun6i-a31-codec
+
+ then:
+ properties:
+ allwinner,audio-routing:
+ items:
+ enum:
+ - HP
+ - HPCOM
+ - LINEIN
+ - LINEOUT
+ - MIC1
+ - MIC2
+ - MIC3
+ - HBIAS
+ - MBIAS
+ - Headphone
+ - Headset Mic
+ - Line In
+ - Line Out
+ - Mic
+ - Speaker
+
+ - if:
+ properties:
+ compatible:
+ enum:
+ - allwinner,sun8i-a23-codec
+
+ then:
+ properties:
+ allwinner,audio-routing:
+ items:
+ enum:
+ - HP
+ - HPCOM
+ - LINEIN
+ - MIC1
+ - MIC2
+ - HBIAS
+ - MBIAS
+ - Headphone
+ - Headset Mic
+ - Line In
+ - Line Out
+ - Mic
+ - Speaker
+
+ - if:
+ properties:
+ compatible:
+ enum:
+ - allwinner,sun8i-h3-codec
+
+ then:
+ properties:
+ allwinner,audio-routing:
+ items:
+ enum:
+ - HP
+ - HPCOM
+ - LINEIN
+ - LINEOUT
+ - MIC1
+ - MIC2
+ - HBIAS
+ - MBIAS
+ - Headphone
+ - Headset Mic
+ - Line In
+ - Line Out
+ - Mic
+ - Speaker
+
+ - if:
+ properties:
+ compatible:
+ enum:
+ - allwinner,sun8i-v3s-codec
+
+ then:
+ properties:
+ allwinner,audio-routing:
+ items:
+ enum:
+ - HP
+ - HPCOM
+ - MIC1
+ - HBIAS
+ - Headphone
+ - Headset Mic
+ - Line In
+ - Line Out
+ - Mic
+ - Speaker
+
+additionalProperties: false
+
+examples:
+ - |
+ codec@1c22c00 {
+ #sound-dai-cells = <0>;
+ compatible = "allwinner,sun7i-a20-codec";
+ reg = <0x01c22c00 0x40>;
+ interrupts = <0 30 4>;
+ clocks = <&apb0_gates 0>, <&codec_clk>;
+ clock-names = "apb", "codec";
+ dmas = <&dma 0 19>, <&dma 0 19>;
+ dma-names = "rx", "tx";
+ };
+
+ - |
+ codec@1c22c00 {
+ #sound-dai-cells = <0>;
+ compatible = "allwinner,sun6i-a31-codec";
+ reg = <0x01c22c00 0x98>;
+ interrupts = <0 29 4>;
+ clocks = <&ccu 61>, <&ccu 135>;
+ clock-names = "apb", "codec";
+ resets = <&ccu 42>;
+ dmas = <&dma 15>, <&dma 15>;
+ dma-names = "rx", "tx";
+ allwinner,audio-routing =
+ "Headphone", "HP",
+ "Speaker", "LINEOUT",
+ "LINEIN", "Line In",
+ "MIC1", "MBIAS",
+ "MIC1", "Mic",
+ "MIC2", "HBIAS",
+ "MIC2", "Headset Mic";
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/sound/allwinner,sun8i-a23-codec-analog.yaml b/Documentation/devicetree/bindings/sound/allwinner,sun8i-a23-codec-analog.yaml
new file mode 100644
index 000000000000..85305b4c2729
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/allwinner,sun8i-a23-codec-analog.yaml
@@ -0,0 +1,38 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/sound/allwinner,sun8i-a23-codec-analog.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Allwinner A23 Analog Codec Device Tree Bindings
+
+maintainers:
+ - Chen-Yu Tsai <wens@csie.org>
+ - Maxime Ripard <maxime.ripard@bootlin.com>
+
+properties:
+ compatible:
+ enum:
+ # FIXME: This is documented in the PRCM binding, but needs to be
+ # migrated here at some point
+ # - allwinner,sun8i-a23-codec-analog
+ - allwinner,sun8i-h3-codec-analog
+ - allwinner,sun8i-v3s-codec-analog
+
+ reg:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ codec_analog: codec-analog@1f015c0 {
+ compatible = "allwinner,sun8i-h3-codec-analog";
+ reg = <0x01f015c0 0x4>;
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/sound/arndale.txt b/Documentation/devicetree/bindings/sound/arndale.txt
index 0e76946385ae..17530120ccfc 100644
--- a/Documentation/devicetree/bindings/sound/arndale.txt
+++ b/Documentation/devicetree/bindings/sound/arndale.txt
@@ -1,8 +1,9 @@
Audio Binding for Arndale boards
Required properties:
-- compatible : Can be the following,
- "samsung,arndale-rt5631"
+- compatible : Can be one of the following:
+ "samsung,arndale-rt5631",
+ "samsung,arndale-wm1811"
- samsung,audio-cpu: The phandle of the Samsung I2S controller
- samsung,audio-codec: The phandle of the audio codec
diff --git a/Documentation/devicetree/bindings/sound/fsl,mqs.txt b/Documentation/devicetree/bindings/sound/fsl,mqs.txt
new file mode 100644
index 000000000000..40353fc30255
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/fsl,mqs.txt
@@ -0,0 +1,36 @@
+fsl,mqs audio CODEC
+
+Required properties:
+ - compatible : Must contain one of "fsl,imx6sx-mqs", "fsl,codec-mqs"
+ "fsl,imx8qm-mqs", "fsl,imx8qxp-mqs".
+ - clocks : A list of phandles + clock-specifiers, one for each entry in
+ clock-names
+ - clock-names : "mclk" - must required.
+ "core" - required if compatible is "fsl,imx8qm-mqs", it
+ is for register access.
+ - gpr : A phandle of General Purpose Registers in IOMUX Controller.
+ Required if compatible is "fsl,imx6sx-mqs".
+
+Required if compatible is "fsl,imx8qm-mqs":
+ - power-domains: A phandle of PM domain provider node.
+ - reg: Offset and length of the register set for the device.
+
+Example:
+
+mqs: mqs {
+ compatible = "fsl,imx6sx-mqs";
+ gpr = <&gpr>;
+ clocks = <&clks IMX6SX_CLK_SAI1>;
+ clock-names = "mclk";
+ status = "disabled";
+};
+
+mqs: mqs@59850000 {
+ compatible = "fsl,imx8qm-mqs";
+ reg = <0x59850000 0x10000>;
+ clocks = <&clk IMX8QM_AUD_MQS_IPG>,
+ <&clk IMX8QM_AUD_MQS_HMCLK>;
+ clock-names = "core", "mclk";
+ power-domains = <&pd_mqs0>;
+ status = "disabled";
+};
diff --git a/Documentation/devicetree/bindings/sound/google,cros-ec-codec.txt b/Documentation/devicetree/bindings/sound/google,cros-ec-codec.txt
index 1084f7f22eea..8ca52dcc5572 100644
--- a/Documentation/devicetree/bindings/sound/google,cros-ec-codec.txt
+++ b/Documentation/devicetree/bindings/sound/google,cros-ec-codec.txt
@@ -1,4 +1,4 @@
-* Audio codec controlled by ChromeOS EC
+Audio codec controlled by ChromeOS EC
Google's ChromeOS EC codec is a digital mic codec provided by the
Embedded Controller (EC) and is controlled via a host-command interface.
@@ -9,10 +9,27 @@ Documentation/devicetree/bindings/mfd/cros-ec.txt).
Required properties:
- compatible: Must contain "google,cros-ec-codec"
- #sound-dai-cells: Should be 1. The cell specifies number of DAIs.
-- max-dmic-gain: A number for maximum gain in dB on digital microphone.
+
+Optional properties:
+- reg: Pysical base address and length of shared memory region from EC.
+ It contains 3 unsigned 32-bit integer. The first 2 integers
+ combine to become an unsigned 64-bit physical address. The last
+ one integer is length of the shared memory.
+- memory-region: Shared memory region to EC. A "shared-dma-pool". See
+ ../reserved-memory/reserved-memory.txt for details.
Example:
+{
+ ...
+
+ reserved_mem: reserved_mem {
+ compatible = "shared-dma-pool";
+ reg = <0 0x52800000 0 0x100000>;
+ no-map;
+ };
+}
+
cros-ec@0 {
compatible = "google,cros-ec-spi";
@@ -21,6 +38,7 @@ cros-ec@0 {
cros_ec_codec: ec-codec {
compatible = "google,cros-ec-codec";
#sound-dai-cells = <1>;
- max-dmic-gain = <43>;
+ reg = <0x0 0x10500000 0x80000>;
+ memory-region = <&reserved_mem>;
};
};
diff --git a/Documentation/devicetree/bindings/sound/mt8183-afe-pcm.txt b/Documentation/devicetree/bindings/sound/mt8183-afe-pcm.txt
index 396ba38619f6..1f1cba4152ce 100644
--- a/Documentation/devicetree/bindings/sound/mt8183-afe-pcm.txt
+++ b/Documentation/devicetree/bindings/sound/mt8183-afe-pcm.txt
@@ -4,6 +4,10 @@ Required properties:
- compatible = "mediatek,mt68183-audio";
- reg: register location and size
- interrupts: should contain AFE interrupt
+- resets: Must contain an entry for each entry in reset-names
+ See ../reset/reset.txt for details.
+- reset-names: should have these reset names:
+ "audiosys";
- power-domains: should define the power domain
- clocks: Must contain an entry for each entry in clock-names
- clock-names: should have these clock names:
@@ -20,6 +24,8 @@ Example:
compatible = "mediatek,mt8183-audio";
reg = <0 0x11220000 0 0x1000>;
interrupts = <GIC_SPI 161 IRQ_TYPE_LEVEL_LOW>;
+ resets = <&watchdog MT8183_TOPRGU_AUDIO_SW_RST>;
+ reset-names = "audiosys";
power-domains = <&scpsys MT8183_POWER_DOMAIN_AUDIO>;
clocks = <&infrasys CLK_INFRA_AUDIO>,
<&infrasys CLK_INFRA_AUDIO_26M_BCLK>,
diff --git a/Documentation/devicetree/bindings/sound/mt8183-mt6358-ts3a227-max98357.txt b/Documentation/devicetree/bindings/sound/mt8183-mt6358-ts3a227-max98357.txt
index d6d5207fa996..decaa013a07e 100644
--- a/Documentation/devicetree/bindings/sound/mt8183-mt6358-ts3a227-max98357.txt
+++ b/Documentation/devicetree/bindings/sound/mt8183-mt6358-ts3a227-max98357.txt
@@ -2,14 +2,19 @@ MT8183 with MT6358, TS3A227 and MAX98357 CODECS
Required properties:
- compatible : "mediatek,mt8183_mt6358_ts3a227_max98357"
-- mediatek,headset-codec: the phandles of ts3a227 codecs
- mediatek,platform: the phandle of MT8183 ASoC platform
+Optional properties:
+- mediatek,headset-codec: the phandles of ts3a227 codecs
+- mediatek,ec-codec: the phandle of EC codecs.
+ See google,cros-ec-codec.txt for more details.
+
Example:
sound {
compatible = "mediatek,mt8183_mt6358_ts3a227_max98357";
mediatek,headset-codec = <&ts3a227>;
+ mediatek,ec-codec = <&ec_codec>;
mediatek,platform = <&afe>;
};
diff --git a/Documentation/devicetree/bindings/sound/renesas,fsi.txt b/Documentation/devicetree/bindings/sound/renesas,fsi.txt
deleted file mode 100644
index 0cf0f819b823..000000000000
--- a/Documentation/devicetree/bindings/sound/renesas,fsi.txt
+++ /dev/null
@@ -1,31 +0,0 @@
-Renesas FSI
-
-Required properties:
-- compatible : "renesas,fsi2-<soctype>",
- "renesas,sh_fsi2" or "renesas,sh_fsi" as
- fallback.
- Examples with soctypes are:
- - "renesas,fsi2-r8a7740" (R-Mobile A1)
- - "renesas,fsi2-sh73a0" (SH-Mobile AG5)
-- reg : Should contain the register physical address and length
-- interrupts : Should contain FSI interrupt
-
-- fsia,spdif-connection : FSI is connected by S/PDIF
-- fsia,stream-mode-support : FSI supports 16bit stream mode.
-- fsia,use-internal-clock : FSI uses internal clock when master mode.
-
-- fsib,spdif-connection : same as fsia
-- fsib,stream-mode-support : same as fsia
-- fsib,use-internal-clock : same as fsia
-
-Example:
-
-sh_fsi2: sh_fsi2@ec230000 {
- compatible = "renesas,sh_fsi2";
- reg = <0xec230000 0x400>;
- interrupts = <0 146 0x4>;
-
- fsia,spdif-connection;
- fsia,stream-mode-support;
- fsia,use-internal-clock;
-};
diff --git a/Documentation/devicetree/bindings/sound/renesas,fsi.yaml b/Documentation/devicetree/bindings/sound/renesas,fsi.yaml
new file mode 100644
index 000000000000..140a37fc3c0b
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/renesas,fsi.yaml
@@ -0,0 +1,76 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/sound/renesas,fsi.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Renesas FSI Sound Driver Device Tree Bindings
+
+maintainers:
+ - Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
+
+properties:
+ $nodename:
+ pattern: "^sound@.*"
+
+ compatible:
+ oneOf:
+ # for FSI2 SoC
+ - items:
+ - enum:
+ - renesas,fsi2-sh73a0
+ - renesas,fsi2-r8a7740
+ - enum:
+ - renesas,sh_fsi2
+ # for Generic
+ - items:
+ - enum:
+ - renesas,sh_fsi
+ - renesas,sh_fsi2
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ fsia,spdif-connection:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description: FSI is connected by S/PDIF
+
+ fsia,stream-mode-support:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description: FSI supports 16bit stream mode
+
+ fsia,use-internal-clock:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description: FSI uses internal clock when master mode
+
+ fsib,spdif-connection:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description: same as fsia
+
+ fsib,stream-mode-support:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description: same as fsia
+
+ fsib,use-internal-clock:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description: same as fsia
+
+required:
+ - compatible
+ - reg
+ - interrupts
+
+examples:
+ - |
+ sh_fsi2: sound@ec230000 {
+ compatible = "renesas,fsi2-r8a7740", "renesas,sh_fsi2";
+ reg = <0xec230000 0x400>;
+ interrupts = <0 146 0x4>;
+
+ fsia,spdif-connection;
+ fsia,stream-mode-support;
+ fsia,use-internal-clock;
+ };
diff --git a/Documentation/devicetree/bindings/sound/renesas,rsnd.txt b/Documentation/devicetree/bindings/sound/renesas,rsnd.txt
index 5c52182f7dcf..797fd035434c 100644
--- a/Documentation/devicetree/bindings/sound/renesas,rsnd.txt
+++ b/Documentation/devicetree/bindings/sound/renesas,rsnd.txt
@@ -268,6 +268,7 @@ Required properties:
- "renesas,rcar_sound-r8a7745" (RZ/G1E)
- "renesas,rcar_sound-r8a77470" (RZ/G1C)
- "renesas,rcar_sound-r8a774a1" (RZ/G2M)
+ - "renesas,rcar_sound-r8a774b1" (RZ/G2N)
- "renesas,rcar_sound-r8a774c0" (RZ/G2E)
- "renesas,rcar_sound-r8a7778" (R-Car M1A)
- "renesas,rcar_sound-r8a7779" (R-Car H1)
diff --git a/Documentation/devicetree/bindings/sound/rockchip-max98090.txt b/Documentation/devicetree/bindings/sound/rockchip-max98090.txt
index a805aa99ad75..e9c58b204399 100644
--- a/Documentation/devicetree/bindings/sound/rockchip-max98090.txt
+++ b/Documentation/devicetree/bindings/sound/rockchip-max98090.txt
@@ -5,15 +5,38 @@ Required properties:
- rockchip,model: The user-visible name of this sound complex
- rockchip,i2s-controller: The phandle of the Rockchip I2S controller that's
connected to the CODEC
-- rockchip,audio-codec: The phandle of the MAX98090 audio codec
-- rockchip,headset-codec: The phandle of Ext chip for jack detection
+
+Optional properties:
+- rockchip,audio-codec: The phandle of the MAX98090 audio codec.
+- rockchip,headset-codec: The phandle of Ext chip for jack detection. This is
+ required if there is rockchip,audio-codec.
+- rockchip,hdmi-codec: The phandle of HDMI device for HDMI codec.
Example:
+/* For max98090-only board. */
+sound {
+ compatible = "rockchip,rockchip-audio-max98090";
+ rockchip,model = "ROCKCHIP-I2S";
+ rockchip,i2s-controller = <&i2s>;
+ rockchip,audio-codec = <&max98090>;
+ rockchip,headset-codec = <&headsetcodec>;
+};
+
+/* For HDMI-only board. */
+sound {
+ compatible = "rockchip,rockchip-audio-max98090";
+ rockchip,model = "ROCKCHIP-I2S";
+ rockchip,i2s-controller = <&i2s>;
+ rockchip,hdmi-codec = <&hdmi>;
+};
+
+/* For max98090 plus HDMI board. */
sound {
compatible = "rockchip,rockchip-audio-max98090";
rockchip,model = "ROCKCHIP-I2S";
rockchip,i2s-controller = <&i2s>;
rockchip,audio-codec = <&max98090>;
rockchip,headset-codec = <&headsetcodec>;
+ rockchip,hdmi-codec = <&hdmi>;
};
diff --git a/Documentation/devicetree/bindings/sound/rt1011.txt b/Documentation/devicetree/bindings/sound/rt1011.txt
index 35a23e60d679..02d53b9aa247 100644
--- a/Documentation/devicetree/bindings/sound/rt1011.txt
+++ b/Documentation/devicetree/bindings/sound/rt1011.txt
@@ -20,6 +20,14 @@ Required properties:
| 1 | 1 | 0x3b |
-------------------------------------
+Optional properties:
+
+- realtek,temperature_calib
+ u32. The temperature was measured while doing the calibration. Units: Celsius degree
+
+- realtek,r0_calib
+ u32. This is r0 calibration data which was measured in factory mode.
+
Pins on the device (for linking into audio routes) for RT1011:
* SPO
@@ -29,4 +37,6 @@ Example:
rt1011: codec@38 {
compatible = "realtek,rt1011";
reg = <0x38>;
+ realtek,temperature_calib = <25>;
+ realtek,r0_calib = <0x224050>;
};
diff --git a/Documentation/devicetree/bindings/sound/rt5682.txt b/Documentation/devicetree/bindings/sound/rt5682.txt
index 312e9a129530..30e927a28369 100644
--- a/Documentation/devicetree/bindings/sound/rt5682.txt
+++ b/Documentation/devicetree/bindings/sound/rt5682.txt
@@ -27,6 +27,11 @@ Optional properties:
- realtek,ldo1-en-gpios : The GPIO that controls the CODEC's LDO1_EN pin.
+- realtek,btndet-delay
+ The debounce delay for push button.
+ The delay time is realtek,btndet-delay value multiple of 8.192 ms.
+ If absent, the default is 16.
+
Pins on the device (for linking into audio routes) for RT5682:
* DMIC L1
@@ -47,4 +52,5 @@ rt5682 {
realtek,dmic1-data-pin = <1>;
realtek,dmic1-clk-pin = <1>;
realtek,jd-src = <1>;
+ realtek,btndet-delay = <16>;
};
diff --git a/Documentation/devicetree/bindings/sound/samsung,odroid.txt b/Documentation/devicetree/bindings/sound/samsung,odroid.txt
deleted file mode 100644
index e9da2200e173..000000000000
--- a/Documentation/devicetree/bindings/sound/samsung,odroid.txt
+++ /dev/null
@@ -1,54 +0,0 @@
-Samsung Exynos Odroid XU3/XU4 audio complex with MAX98090 codec
-
-Required properties:
-
- - compatible - "hardkernel,odroid-xu3-audio" - for Odroid XU3 board,
- "hardkernel,odroid-xu4-audio" - for Odroid XU4 board (deprecated),
- "samsung,odroid-xu3-audio" - for Odroid XU3 board (deprecated),
- "samsung,odroid-xu4-audio" - for Odroid XU4 board (deprecated)
- - model - the user-visible name of this sound complex
- - clocks - should contain entries matching clock names in the clock-names
- property
- - samsung,audio-widgets - this property specifies off-codec audio elements
- like headphones or speakers, for details see widgets.txt
- - samsung,audio-routing - a list of the connections between audio
- components; each entry is a pair of strings, the first being the
- connection's sink, the second being the connection's source;
- valid names for sources and sinks are the MAX98090's pins (as
- documented in its binding), and the jacks on the board
-
- For Odroid X2:
- "Headphone Jack", "Mic Jack", "DMIC"
-
- For Odroid U3, XU3:
- "Headphone Jack", "Speakers"
-
- For Odroid XU4:
- no entries
-
-Required sub-nodes:
-
- - 'cpu' subnode with a 'sound-dai' property containing the phandle of the I2S
- controller
- - 'codec' subnode with a 'sound-dai' property containing list of phandles
- to the CODEC nodes, first entry must be corresponding to the MAX98090
- CODEC and the second entry must be the phandle of the HDMI IP block node
-
-Example:
-
-sound {
- compatible = "hardkernel,odroid-xu3-audio";
- model = "Odroid-XU3";
- samsung,audio-routing =
- "Headphone Jack", "HPL",
- "Headphone Jack", "HPR",
- "IN1", "Mic Jack",
- "Mic Jack", "MICBIAS";
-
- cpu {
- sound-dai = <&i2s0 0>;
- };
- codec {
- sound-dai = <&hdmi>, <&max98090>;
- };
-};
diff --git a/Documentation/devicetree/bindings/sound/samsung,odroid.yaml b/Documentation/devicetree/bindings/sound/samsung,odroid.yaml
new file mode 100644
index 000000000000..c6b244352d05
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/samsung,odroid.yaml
@@ -0,0 +1,91 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/sound/samsung,odroid.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Samsung Exynos Odroid XU3/XU4 audio complex with MAX98090 codec
+
+maintainers:
+ - Krzysztof Kozlowski <krzk@kernel.org>
+ - Sylwester Nawrocki <s.nawrocki@samsung.com>
+
+properties:
+ compatible:
+ oneOf:
+ - const: hardkernel,odroid-xu3-audio
+
+ - const: hardkernel,odroid-xu4-audio
+ deprecated: true
+
+ - const: samsung,odroid-xu3-audio
+ deprecated: true
+
+ - const: samsung,odroid-xu4-audio
+ deprecated: true
+
+ model:
+ $ref: /schemas/types.yaml#/definitions/string
+ description: The user-visible name of this sound complex.
+
+ cpu:
+ type: object
+ properties:
+ sound-dai:
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ description: phandles to the I2S controllers
+
+ codec:
+ type: object
+ properties:
+ sound-dai:
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ description: |
+ List of phandles to the CODEC nodes,
+ first entry must be corresponding to the MAX98090 CODEC and
+ the second entry must be the phandle of the HDMI IP block node.
+
+ samsung,audio-routing:
+ $ref: /schemas/types.yaml#/definitions/non-unique-string-array
+ description: |
+ List of the connections between audio
+ components; each entry is a pair of strings, the first being the
+ connection's sink, the second being the connection's source;
+ valid names for sources and sinks are the MAX98090's pins (as
+ documented in its binding), and the jacks on the board.
+ For Odroid X2: "Headphone Jack", "Mic Jack", "DMIC"
+ For Odroid U3, XU3: "Headphone Jack", "Speakers"
+ For Odroid XU4: no entries
+
+ samsung,audio-widgets:
+ $ref: /schemas/types.yaml#/definitions/non-unique-string-array
+ description: |
+ This property specifies off-codec audio elements
+ like headphones or speakers, for details see widgets.txt
+
+required:
+ - compatible
+ - model
+ - cpu
+ - codec
+
+examples:
+ - |
+ sound {
+ compatible = "hardkernel,odroid-xu3-audio";
+ model = "Odroid-XU3";
+ samsung,audio-routing =
+ "Headphone Jack", "HPL",
+ "Headphone Jack", "HPR",
+ "IN1", "Mic Jack",
+ "Mic Jack", "MICBIAS";
+
+ cpu {
+ sound-dai = <&i2s0 0>;
+ };
+
+ codec {
+ sound-dai = <&hdmi>, <&max98090>;
+ };
+ };
+
diff --git a/Documentation/devicetree/bindings/sound/samsung-i2s.txt b/Documentation/devicetree/bindings/sound/samsung-i2s.txt
deleted file mode 100644
index a88cb00fa096..000000000000
--- a/Documentation/devicetree/bindings/sound/samsung-i2s.txt
+++ /dev/null
@@ -1,84 +0,0 @@
-* Samsung I2S controller
-
-Required SoC Specific Properties:
-
-- compatible : should be one of the following.
- - samsung,s3c6410-i2s: for 8/16/24bit stereo I2S.
- - samsung,s5pv210-i2s: for 8/16/24bit multichannel(5.1) I2S with
- secondary fifo, s/w reset control and internal mux for root clk src.
- - samsung,exynos5420-i2s: for 8/16/24bit multichannel(5.1) I2S for
- playback, stereo channel capture, secondary fifo using internal
- or external dma, s/w reset control, internal mux for root clk src
- and 7.1 channel TDM support for playback. TDM (Time division multiplexing)
- is to allow transfer of multiple channel audio data on single data line.
- - samsung,exynos7-i2s: with all the available features of exynos5 i2s,
- exynos7 I2S has 7.1 channel TDM support for capture, secondary fifo
- with only external dma and more no.of root clk sampling frequencies.
- - samsung,exynos7-i2s1: I2S1 on previous samsung platforms supports
- stereo channels. exynos7 i2s1 upgraded to 5.1 multichannel with
- slightly modified bit offsets.
-
-- reg: physical base address of the controller and length of memory mapped
- region.
-- dmas: list of DMA controller phandle and DMA request line ordered pairs.
-- dma-names: identifier string for each DMA request line in the dmas property.
- These strings correspond 1:1 with the ordered pairs in dmas.
-- clocks: Handle to iis clock and RCLK source clk.
-- clock-names:
- i2s0 uses some base clocks from CMU and some are from audio subsystem internal
- clock controller. The clock names for i2s0 should be "iis", "i2s_opclk0" and
- "i2s_opclk1" as shown in the example below.
- i2s1 and i2s2 uses clocks from CMU. The clock names for i2s1 and i2s2 should
- be "iis" and "i2s_opclk0".
- "iis" is the i2s bus clock and i2s_opclk0, i2s_opclk1 are sources of the root
- clk. i2s0 has internal mux to select the source of root clk and i2s1 and i2s2
- doesn't have any such mux.
-- #clock-cells: should be 1, this property must be present if the I2S device
- is a clock provider in terms of the common clock bindings, described in
- ../clock/clock-bindings.txt.
-- clock-output-names (deprecated): from the common clock bindings, names of
- the CDCLK I2S output clocks, suggested values are "i2s_cdclk0", "i2s_cdclk1",
- "i2s_cdclk3" for the I2S0, I2S1, I2S2 devices respectively.
-
-There are following clocks available at the I2S device nodes:
- CLK_I2S_CDCLK - the CDCLK (CODECLKO) gate clock,
- CLK_I2S_RCLK_PSR - the RCLK prescaler divider clock (corresponding to the
- IISPSR register),
- CLK_I2S_RCLK_SRC - the RCLKSRC mux clock (corresponding to RCLKSRC bit in
- IISMOD register).
-
-Refer to the SoC datasheet for availability of the above clocks.
-The CLK_I2S_RCLK_PSR and CLK_I2S_RCLK_SRC clocks are usually only available
-in the IIS Multi Audio Interface.
-
-Note: Old DTs may not have the #clock-cells property and then not use the I2S
-node as a clock supplier.
-
-Optional SoC Specific Properties:
-
-- samsung,idma-addr: Internal DMA register base address of the audio
- sub system(used in secondary sound source).
-- pinctrl-0: Should specify pin control groups used for this controller.
-- pinctrl-names: Should contain only one value - "default".
-- #sound-dai-cells: should be 1.
-
-
-Example:
-
-i2s0: i2s@3830000 {
- compatible = "samsung,s5pv210-i2s";
- reg = <0x03830000 0x100>;
- dmas = <&pdma0 10
- &pdma0 9
- &pdma0 8>;
- dma-names = "tx", "rx", "tx-sec";
- clocks = <&clock_audss EXYNOS_I2S_BUS>,
- <&clock_audss EXYNOS_I2S_BUS>,
- <&clock_audss EXYNOS_SCLK_I2S>;
- clock-names = "iis", "i2s_opclk0", "i2s_opclk1";
- #clock-cells = <1>;
- samsung,idma-addr = <0x03000000>;
- pinctrl-names = "default";
- pinctrl-0 = <&i2s0_bus>;
- #sound-dai-cells = <1>;
-};
diff --git a/Documentation/devicetree/bindings/sound/samsung-i2s.yaml b/Documentation/devicetree/bindings/sound/samsung-i2s.yaml
new file mode 100644
index 000000000000..53e3bad4178c
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/samsung-i2s.yaml
@@ -0,0 +1,138 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/sound/samsung-i2s.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Samsung SoC I2S controller
+
+maintainers:
+ - Krzysztof Kozlowski <krzk@kernel.org>
+ - Sylwester Nawrocki <s.nawrocki@samsung.com>
+
+properties:
+ compatible:
+ description: |
+ samsung,s3c6410-i2s: for 8/16/24bit stereo I2S.
+
+ samsung,s5pv210-i2s: for 8/16/24bit multichannel (5.1) I2S with
+ secondary FIFO, s/w reset control and internal mux for root clock
+ source.
+
+ samsung,exynos5420-i2s: for 8/16/24bit multichannel (5.1) I2S for
+ playback, stereo channel capture, secondary FIFO using internal
+ or external DMA, s/w reset control, internal mux for root clock
+ source and 7.1 channel TDM support for playback; TDM (Time division
+ multiplexing) is to allow transfer of multiple channel audio data on
+ single data line.
+
+ samsung,exynos7-i2s: with all the available features of Exynos5 I2S.
+ Exynos7 I2S has 7.1 channel TDM support for capture, secondary FIFO
+ with only external DMA and more number of root clock sampling
+ frequencies.
+
+ samsung,exynos7-i2s1: I2S1 on previous samsung platforms supports
+ stereo channels. Exynos7 I2S1 upgraded to 5.1 multichannel with
+ slightly modified bit offsets.
+ enum:
+ - samsung,s3c6410-i2s
+ - samsung,s5pv210-i2s
+ - samsung,exynos5420-i2s
+ - samsung,exynos7-i2s
+ - samsung,exynos7-i2s1
+
+ reg:
+ maxItems: 1
+
+ dmas:
+ minItems: 2
+ maxItems: 3
+
+ dma-names:
+ oneOf:
+ - items:
+ - const: tx
+ - const: rx
+ - items:
+ - const: tx
+ - const: rx
+ - const: tx-sec
+
+ clocks:
+ minItems: 1
+ maxItems: 3
+
+ clock-names:
+ oneOf:
+ - items:
+ - const: iis
+ - items: # for I2S0
+ - const: iis
+ - const: i2s_opclk0
+ - const: i2s_opclk1
+ - items: # for I2S1 and I2S2
+ - const: iis
+ - const: i2s_opclk0
+ description: |
+ "iis" is the I2S bus clock and i2s_opclk0, i2s_opclk1 are sources
+ of the root clock. I2S0 has internal mux to select the source
+ of root clock and I2S1 and I2S2 doesn't have any such mux.
+
+ "#clock-cells":
+ const: 1
+
+ clock-output-names:
+ deprecated: true
+ oneOf:
+ - items: # for I2S0
+ - const: i2s_cdclk0
+ - items: # for I2S1
+ - const: i2s_cdclk1
+ - items: # for I2S2
+ - const: i2s_cdclk2
+ description: Names of the CDCLK I2S output clocks.
+
+ samsung,idma-addr:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: |
+ Internal DMA register base address of the audio
+ subsystem (used in secondary sound source).
+
+ pinctrl-0:
+ description: Should specify pin control groups used for this controller.
+
+ pinctrl-names:
+ const: default
+
+ "#sound-dai-cells":
+ const: 1
+
+required:
+ - compatible
+ - reg
+ - dmas
+ - dma-names
+ - clocks
+ - clock-names
+
+examples:
+ - |
+ #include <dt-bindings/clock/exynos-audss-clk.h>
+
+ i2s0: i2s@3830000 {
+ compatible = "samsung,s5pv210-i2s";
+ reg = <0x03830000 0x100>;
+ dmas = <&pdma0 10>,
+ <&pdma0 9>,
+ <&pdma0 8>;
+ dma-names = "tx", "rx", "tx-sec";
+ clocks = <&clock_audss EXYNOS_I2S_BUS>,
+ <&clock_audss EXYNOS_I2S_BUS>,
+ <&clock_audss EXYNOS_SCLK_I2S>;
+ clock-names = "iis", "i2s_opclk0", "i2s_opclk1";
+ #clock-cells = <1>;
+ samsung,idma-addr = <0x03000000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2s0_bus>;
+ #sound-dai-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/sound/sun4i-codec.txt b/Documentation/devicetree/bindings/sound/sun4i-codec.txt
deleted file mode 100644
index 66579bbd3294..000000000000
--- a/Documentation/devicetree/bindings/sound/sun4i-codec.txt
+++ /dev/null
@@ -1,94 +0,0 @@
-* Allwinner A10 Codec
-
-Required properties:
-- compatible: must be one of the following compatibles:
- - "allwinner,sun4i-a10-codec"
- - "allwinner,sun6i-a31-codec"
- - "allwinner,sun7i-a20-codec"
- - "allwinner,sun8i-a23-codec"
- - "allwinner,sun8i-h3-codec"
- - "allwinner,sun8i-v3s-codec"
-- reg: must contain the registers location and length
-- interrupts: must contain the codec interrupt
-- dmas: DMA channels for tx and rx dma. See the DMA client binding,
- Documentation/devicetree/bindings/dma/dma.txt
-- dma-names: should include "tx" and "rx".
-- clocks: a list of phandle + clock-specifer pairs, one for each entry
- in clock-names.
-- clock-names: should contain the following:
- - "apb": the parent APB clock for this controller
- - "codec": the parent module clock
-
-Optional properties:
-- allwinner,pa-gpios: gpio to enable external amplifier
-
-Required properties for the following compatibles:
- - "allwinner,sun6i-a31-codec"
- - "allwinner,sun8i-a23-codec"
- - "allwinner,sun8i-h3-codec"
- - "allwinner,sun8i-v3s-codec"
-- resets: phandle to the reset control for this device
-- allwinner,audio-routing: A list of the connections between audio components.
- Each entry is a pair of strings, the first being the
- connection's sink, the second being the connection's
- source. Valid names include:
-
- Audio pins on the SoC:
- "HP"
- "HPCOM"
- "LINEIN" (not on sun8i-v3s)
- "LINEOUT" (not on sun8i-a23 or sun8i-v3s)
- "MIC1"
- "MIC2" (not on sun8i-v3s)
- "MIC3" (sun6i-a31 only)
-
- Microphone biases from the SoC:
- "HBIAS"
- "MBIAS" (not on sun8i-v3s)
-
- Board connectors:
- "Headphone"
- "Headset Mic"
- "Line In"
- "Line Out"
- "Mic"
- "Speaker"
-
-Required properties for the following compatibles:
- - "allwinner,sun8i-a23-codec"
- - "allwinner,sun8i-h3-codec"
- - "allwinner,sun8i-v3s-codec"
-- allwinner,codec-analog-controls: A phandle to the codec analog controls
- block in the PRCM.
-
-Example:
-codec: codec@1c22c00 {
- #sound-dai-cells = <0>;
- compatible = "allwinner,sun7i-a20-codec";
- reg = <0x01c22c00 0x40>;
- interrupts = <0 30 4>;
- clocks = <&apb0_gates 0>, <&codec_clk>;
- clock-names = "apb", "codec";
- dmas = <&dma 0 19>, <&dma 0 19>;
- dma-names = "rx", "tx";
-};
-
-codec: codec@1c22c00 {
- #sound-dai-cells = <0>;
- compatible = "allwinner,sun6i-a31-codec";
- reg = <0x01c22c00 0x98>;
- interrupts = <GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&ccu CLK_APB1_CODEC>, <&ccu CLK_CODEC>;
- clock-names = "apb", "codec";
- resets = <&ccu RST_APB1_CODEC>;
- dmas = <&dma 15>, <&dma 15>;
- dma-names = "rx", "tx";
- allwinner,audio-routing =
- "Headphone", "HP",
- "Speaker", "LINEOUT",
- "LINEIN", "Line In",
- "MIC1", "MBIAS",
- "MIC1", "Mic",
- "MIC2", "HBIAS",
- "MIC2", "Headset Mic";
-};
diff --git a/Documentation/devicetree/bindings/sound/sun8i-codec-analog.txt b/Documentation/devicetree/bindings/sound/sun8i-codec-analog.txt
deleted file mode 100644
index 07356758bd91..000000000000
--- a/Documentation/devicetree/bindings/sound/sun8i-codec-analog.txt
+++ /dev/null
@@ -1,17 +0,0 @@
-* Allwinner Codec Analog Controls
-
-Required properties:
-- compatible: must be one of the following compatibles:
- - "allwinner,sun8i-a23-codec-analog"
- - "allwinner,sun8i-h3-codec-analog"
- - "allwinner,sun8i-v3s-codec-analog"
-
-Required properties if not a sub-node of the PRCM node:
-- reg: must contain the registers location and length
-
-Example:
-prcm: prcm@1f01400 {
- codec_analog: codec-analog {
- compatible = "allwinner,sun8i-a23-codec-analog";
- };
-};
diff --git a/Documentation/devicetree/bindings/sound/tas2562.txt b/Documentation/devicetree/bindings/sound/tas2562.txt
new file mode 100644
index 000000000000..658e1fb18a99
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/tas2562.txt
@@ -0,0 +1,34 @@
+Texas Instruments TAS2562 Smart PA
+
+The TAS2562 is a mono, digital input Class-D audio amplifier optimized for
+efficiently driving high peak power into small loudspeakers.
+Integrated speaker voltage and current sense provides for
+real time monitoring of loudspeaker behavior.
+
+Required properties:
+ - #address-cells - Should be <1>.
+ - #size-cells - Should be <0>.
+ - compatible: - Should contain "ti,tas2562".
+ - reg: - The i2c address. Should be 0x4c, 0x4d, 0x4e or 0x4f.
+ - ti,imon-slot-no:- TDM TX current sense time slot.
+
+Optional properties:
+- interrupt-parent: phandle to the interrupt controller which provides
+ the interrupt.
+- interrupts: (GPIO) interrupt to which the chip is connected.
+- shut-down: GPIO used to control the state of the device.
+
+Examples:
+tas2562@4c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "ti,tas2562";
+ reg = <0x4c>;
+
+ interrupt-parent = <&gpio1>;
+ interrupts = <14>;
+
+ shut-down = <&gpio1 15 0>;
+ ti,imon-slot-no = <0>;
+};
+
diff --git a/Documentation/devicetree/bindings/sound/tas2770.txt b/Documentation/devicetree/bindings/sound/tas2770.txt
new file mode 100644
index 000000000000..ede6bb3d9637
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/tas2770.txt
@@ -0,0 +1,37 @@
+Texas Instruments TAS2770 Smart PA
+
+The TAS2770 is a mono, digital input Class-D audio amplifier optimized for
+efficiently driving high peak power into small loudspeakers.
+Integrated speaker voltage and current sense provides for
+real time monitoring of loudspeaker behavior.
+
+Required properties:
+
+ - compatible: - Should contain "ti,tas2770".
+ - reg: - The i2c address. Should contain <0x4c>, <0x4d>,<0x4e>, or <0x4f>.
+ - #address-cells - Should be <1>.
+ - #size-cells - Should be <0>.
+ - ti,asi-format: - Sets TDM RX capture edge. 0->Rising; 1->Falling.
+ - ti,imon-slot-no:- TDM TX current sense time slot.
+ - ti,vmon-slot-no:- TDM TX voltage sense time slot.
+
+Optional properties:
+
+- interrupt-parent: the phandle to the interrupt controller which provides
+ the interrupt.
+- interrupts: interrupt specification for data-ready.
+
+Examples:
+
+ tas2770@4c {
+ compatible = "ti,tas2770";
+ reg = <0x4c>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupt-parent = <&msm_gpio>;
+ interrupts = <97 0>;
+ ti,asi-format = <0>;
+ ti,imon-slot-no = <0>;
+ ti,vmon-slot-no = <2>;
+ };
+
diff --git a/Documentation/devicetree/bindings/sound/ti,pcm3168a.txt b/Documentation/devicetree/bindings/sound/ti,pcm3168a.txt
index 5d9cb84c661d..a02ecaab5183 100644
--- a/Documentation/devicetree/bindings/sound/ti,pcm3168a.txt
+++ b/Documentation/devicetree/bindings/sound/ti,pcm3168a.txt
@@ -25,6 +25,13 @@ Required properties:
For required properties on SPI/I2C, consult SPI/I2C device tree documentation
+Optional properties:
+
+ - reset-gpios : Optional reset gpio line connected to RST pin of the codec.
+ The RST line is low active:
+ RST = low: device power-down
+ RST = high: device is enabled
+
Examples:
i2c0: i2c0@0 {
@@ -34,6 +41,7 @@ i2c0: i2c0@0 {
pcm3168a: audio-codec@44 {
compatible = "ti,pcm3168a";
reg = <0x44>;
+ reset-gpios = <&gpio0 4 GPIO_ACTIVE_LOW>;
clocks = <&clk_core CLK_AUDIO>;
clock-names = "scki";
VDD1-supply = <&supply3v3>;
diff --git a/Documentation/devicetree/bindings/sound/tlv320aic31xx.txt b/Documentation/devicetree/bindings/sound/tlv320aic31xx.txt
index 5b3c33bb99e5..e372303697dc 100644
--- a/Documentation/devicetree/bindings/sound/tlv320aic31xx.txt
+++ b/Documentation/devicetree/bindings/sound/tlv320aic31xx.txt
@@ -29,6 +29,11 @@ Optional properties:
3 or MICBIAS_AVDD - MICBIAS output is connected to AVDD
If this node is not mentioned or if the value is unknown, then
micbias is set to 2.0V.
+- ai31xx-ocmv - output common-mode voltage setting
+ 0 - 1.35V,
+ 1 - 1.5V,
+ 2 - 1.65V,
+ 3 - 1.8V
Deprecated properties:
diff --git a/Documentation/devicetree/bindings/sram/milbeaut-smp-sram.txt b/Documentation/devicetree/bindings/sram/milbeaut-smp-sram.txt
deleted file mode 100644
index 194f6a3c1c1e..000000000000
--- a/Documentation/devicetree/bindings/sram/milbeaut-smp-sram.txt
+++ /dev/null
@@ -1,24 +0,0 @@
-Milbeaut SRAM for smp bringup
-
-Milbeaut SoCs use a part of the sram for the bringup of the secondary cores.
-Once they get powered up in the bootloader, they stay at the specific part
-of the sram.
-Therefore the part needs to be added as the sub-node of mmio-sram.
-
-Required sub-node properties:
-- compatible : should be "socionext,milbeaut-smp-sram"
-
-Example:
-
- sram: sram@0 {
- compatible = "mmio-sram";
- reg = <0x0 0x10000>;
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0 0x0 0x10000>;
-
- smp-sram@f100 {
- compatible = "socionext,milbeaut-smp-sram";
- reg = <0xf100 0x20>;
- };
- };
diff --git a/Documentation/devicetree/bindings/sram/renesas,smp-sram.txt b/Documentation/devicetree/bindings/sram/renesas,smp-sram.txt
deleted file mode 100644
index 712d05e3e15e..000000000000
--- a/Documentation/devicetree/bindings/sram/renesas,smp-sram.txt
+++ /dev/null
@@ -1,27 +0,0 @@
-* Renesas SMP SRAM
-
-Renesas R-Car Gen2 and RZ/G1 SoCs need a small piece of SRAM for the jump stub
-for secondary CPU bringup and CPU hotplug.
-This memory is reserved by adding a child node to a "mmio-sram" node, cfr.
-Documentation/devicetree/bindings/sram/sram.txt.
-
-Required child node properties:
- - compatible: Must be "renesas,smp-sram",
- - reg: Address and length of the reserved SRAM.
- The full physical (bus) address must be aligned to a 256 KiB boundary.
-
-
-Example:
-
- icram1: sram@e63c0000 {
- compatible = "mmio-sram";
- reg = <0 0xe63c0000 0 0x1000>;
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0 0 0xe63c0000 0x1000>;
-
- smp-sram@0 {
- compatible = "renesas,smp-sram";
- reg = <0 0x10>;
- };
- };
diff --git a/Documentation/devicetree/bindings/sram/rockchip-smp-sram.txt b/Documentation/devicetree/bindings/sram/rockchip-smp-sram.txt
deleted file mode 100644
index 800701ecffca..000000000000
--- a/Documentation/devicetree/bindings/sram/rockchip-smp-sram.txt
+++ /dev/null
@@ -1,30 +0,0 @@
-Rockchip SRAM for smp bringup:
-------------------------------
-
-Rockchip's smp-capable SoCs use the first part of the sram for the bringup
-of the cores. Once the core gets powered up it executes the code that is
-residing at the very beginning of the sram.
-
-Therefore a reserved section sub-node has to be added to the mmio-sram
-declaration.
-
-Required sub-node properties:
-- compatible : should be "rockchip,rk3066-smp-sram"
-
-The rest of the properties should follow the generic mmio-sram discription
-found in Documentation/devicetree/bindings/sram/sram.txt
-
-Example:
-
- sram: sram@10080000 {
- compatible = "mmio-sram";
- reg = <0x10080000 0x10000>;
- #address-cells = <1>;
- #size-cells = <1>;
- ranges;
-
- smp-sram@10080000 {
- compatible = "rockchip,rk3066-smp-sram";
- reg = <0x10080000 0x50>;
- };
- };
diff --git a/Documentation/devicetree/bindings/sram/samsung-sram.txt b/Documentation/devicetree/bindings/sram/samsung-sram.txt
deleted file mode 100644
index 61a9bbed303d..000000000000
--- a/Documentation/devicetree/bindings/sram/samsung-sram.txt
+++ /dev/null
@@ -1,38 +0,0 @@
-Samsung Exynos SYSRAM for SMP bringup:
-------------------------------------
-
-Samsung SMP-capable Exynos SoCs use part of the SYSRAM for the bringup
-of the secondary cores. Once the core gets powered up it executes the
-code that is residing at some specific location of the SYSRAM.
-
-Therefore reserved section sub-nodes have to be added to the mmio-sram
-declaration. These nodes are of two types depending upon secure or
-non-secure execution environment.
-
-Required sub-node properties:
-- compatible : depending upon boot mode, should be
- "samsung,exynos4210-sysram" : for Secure SYSRAM
- "samsung,exynos4210-sysram-ns" : for Non-secure SYSRAM
-
-The rest of the properties should follow the generic mmio-sram discription
-found in Documentation/devicetree/bindings/sram/sram.txt
-
-Example:
-
- sysram@2020000 {
- compatible = "mmio-sram";
- reg = <0x02020000 0x54000>;
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0 0x02020000 0x54000>;
-
- smp-sysram@0 {
- compatible = "samsung,exynos4210-sysram";
- reg = <0x0 0x1000>;
- };
-
- smp-sysram@53000 {
- compatible = "samsung,exynos4210-sysram-ns";
- reg = <0x53000 0x1000>;
- };
- };
diff --git a/Documentation/devicetree/bindings/sram/sram.txt b/Documentation/devicetree/bindings/sram/sram.txt
deleted file mode 100644
index e98908bd4227..000000000000
--- a/Documentation/devicetree/bindings/sram/sram.txt
+++ /dev/null
@@ -1,80 +0,0 @@
-Generic on-chip SRAM
-
-Simple IO memory regions to be managed by the genalloc API.
-
-Required properties:
-
-- compatible : mmio-sram or atmel,sama5d2-securam
-
-- reg : SRAM iomem address range
-
-Reserving sram areas:
----------------------
-
-Each child of the sram node specifies a region of reserved memory. Each
-child node should use a 'reg' property to specify a specific range of
-reserved memory.
-
-Following the generic-names recommended practice, node names should
-reflect the purpose of the node. Unit address (@<address>) should be
-appended to the name.
-
-Required properties in the sram node:
-
-- #address-cells, #size-cells : should use the same values as the root node
-- ranges : standard definition, should translate from local addresses
- within the sram to bus addresses
-
-Optional properties in the sram node:
-
-- no-memory-wc : the flag indicating, that SRAM memory region has not to
- be remapped as write combining. WC is used by default.
-
-Required properties in the area nodes:
-
-- reg : iomem address range, relative to the SRAM range
-
-Optional properties in the area nodes:
-
-- compatible : standard definition, should contain a vendor specific string
- in the form <vendor>,[<device>-]<usage>
-- pool : indicates that the particular reserved SRAM area is addressable
- and in use by another device or devices
-- export : indicates that the reserved SRAM area may be accessed outside
- of the kernel, e.g. by bootloader or userspace
-- protect-exec : Same as 'pool' above but with the additional
- constraint that code wil be run from the region and
- that the memory is maintained as read-only, executable
- during code execution. NOTE: This region must be page
- aligned on start and end in order to properly allow
- manipulation of the page attributes.
-- label : the name for the reserved partition, if omitted, the label
- is taken from the node name excluding the unit address.
-- clocks : a list of phandle and clock specifier pair that controls the
- single SRAM clock.
-
-Example:
-
-sram: sram@5c000000 {
- compatible = "mmio-sram";
- reg = <0x5c000000 0x40000>; /* 256 KiB SRAM at address 0x5c000000 */
-
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0 0x5c000000 0x40000>;
-
- smp-sram@100 {
- compatible = "socvendor,smp-sram";
- reg = <0x100 0x50>;
- };
-
- device-sram@1000 {
- reg = <0x1000 0x1000>;
- pool;
- };
-
- exported@20000 {
- reg = <0x20000 0x20000>;
- export;
- };
-};
diff --git a/Documentation/devicetree/bindings/sram/sram.yaml b/Documentation/devicetree/bindings/sram/sram.yaml
new file mode 100644
index 000000000000..ee2287a1b14d
--- /dev/null
+++ b/Documentation/devicetree/bindings/sram/sram.yaml
@@ -0,0 +1,257 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/sram/sram.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Generic on-chip SRAM
+
+maintainers:
+ - Rob Herring <robh@kernel.org>
+
+description: |+
+ Simple IO memory regions to be managed by the genalloc API.
+
+ Each child of the sram node specifies a region of reserved memory. Each
+ child node should use a 'reg' property to specify a specific range of
+ reserved memory.
+
+ Following the generic-names recommended practice, node names should
+ reflect the purpose of the node. Unit address (@<address>) should be
+ appended to the name.
+
+properties:
+ $nodename:
+ pattern: "^sram(@.*)?"
+
+ compatible:
+ contains:
+ enum:
+ - mmio-sram
+ - atmel,sama5d2-securam
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ description:
+ A list of phandle and clock specifier pair that controls the single
+ SRAM clock.
+
+ "#address-cells":
+ const: 1
+
+ "#size-cells":
+ const: 1
+
+ ranges:
+ description:
+ Should translate from local addresses within the sram to bus addresses.
+
+ no-memory-wc:
+ description:
+ The flag indicating, that SRAM memory region has not to be remapped
+ as write combining. WC is used by default.
+ type: boolean
+
+patternProperties:
+ "^([a-z]*-)?sram@[a-f0-9]+$":
+ type: object
+ description:
+ Each child of the sram node specifies a region of reserved memory.
+ properties:
+ compatible:
+ description:
+ Should contain a vendor specific string in the form
+ <vendor>,[<device>-]<usage>
+ enum:
+ - allwinner,sun9i-a80-smp-sram
+ - amlogic,meson8-smp-sram
+ - amlogic,meson8b-smp-sram
+ - renesas,smp-sram
+ - rockchip,rk3066-smp-sram
+ - samsung,exynos4210-sysram
+ - samsung,exynos4210-sysram-ns
+ - socionext,milbeaut-smp-sram
+
+ reg:
+ description:
+ IO mem address range, relative to the SRAM range.
+ maxItems: 1
+
+ pool:
+ description:
+ Indicates that the particular reserved SRAM area is addressable
+ and in use by another device or devices.
+ type: boolean
+
+ export:
+ description:
+ Indicates that the reserved SRAM area may be accessed outside
+ of the kernel, e.g. by bootloader or userspace.
+ type: boolean
+
+ protect-exec:
+ description: |
+ Same as 'pool' above but with the additional constraint that code
+ will be run from the region and that the memory is maintained as
+ read-only, executable during code execution. NOTE: This region must
+ be page aligned on start and end in order to properly allow
+ manipulation of the page attributes.
+ type: boolean
+
+ label:
+ description:
+ The name for the reserved partition, if omitted, the label is taken
+ from the node name excluding the unit address.
+
+ required:
+ - reg
+
+ additionalProperties: false
+
+required:
+ - compatible
+ - reg
+ - "#address-cells"
+ - "#size-cells"
+ - ranges
+
+additionalProperties: false
+
+examples:
+ - |
+ sram@5c000000 {
+ compatible = "mmio-sram";
+ reg = <0x5c000000 0x40000>; /* 256 KiB SRAM at address 0x5c000000 */
+
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0x5c000000 0x40000>;
+
+ smp-sram@100 {
+ reg = <0x100 0x50>;
+ };
+
+ device-sram@1000 {
+ reg = <0x1000 0x1000>;
+ pool;
+ };
+
+ exported-sram@20000 {
+ reg = <0x20000 0x20000>;
+ export;
+ };
+ };
+
+ - |
+ // Samsung SMP-capable Exynos SoCs use part of the SYSRAM for the bringup
+ // of the secondary cores. Once the core gets powered up it executes the
+ // code that is residing at some specific location of the SYSRAM.
+ //
+ // Therefore reserved section sub-nodes have to be added to the mmio-sram
+ // declaration. These nodes are of two types depending upon secure or
+ // non-secure execution environment.
+ sram@2020000 {
+ compatible = "mmio-sram";
+ reg = <0x02020000 0x54000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0x02020000 0x54000>;
+
+ smp-sram@0 {
+ compatible = "samsung,exynos4210-sysram";
+ reg = <0x0 0x1000>;
+ };
+
+ smp-sram@53000 {
+ compatible = "samsung,exynos4210-sysram-ns";
+ reg = <0x53000 0x1000>;
+ };
+ };
+
+ - |
+ // Amlogic's SMP-capable SoCs use part of the sram for the bringup of the cores.
+ // Once the core gets powered up it executes the code that is residing at a
+ // specific location.
+ //
+ // Therefore a reserved section sub-node has to be added to the mmio-sram
+ // declaration.
+ sram@d9000000 {
+ compatible = "mmio-sram";
+ reg = <0xd9000000 0x20000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0xd9000000 0x20000>;
+
+ smp-sram@1ff80 {
+ compatible = "amlogic,meson8b-smp-sram";
+ reg = <0x1ff80 0x8>;
+ };
+ };
+
+ - |
+ sram@e63c0000 {
+ compatible = "mmio-sram";
+ reg = <0xe63c0000 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0xe63c0000 0x1000>;
+
+ smp-sram@0 {
+ compatible = "renesas,smp-sram";
+ reg = <0 0x10>;
+ };
+ };
+
+ - |
+ sram@10080000 {
+ compatible = "mmio-sram";
+ reg = <0x10080000 0x10000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ smp-sram@10080000 {
+ compatible = "rockchip,rk3066-smp-sram";
+ reg = <0x10080000 0x50>;
+ };
+ };
+
+ - |
+ // Allwinner's A80 SoC uses part of the secure sram for hotplugging of the
+ // primary core (cpu0). Once the core gets powered up it checks if a magic
+ // value is set at a specific location. If it is then the BROM will jump
+ // to the software entry address, instead of executing a standard boot.
+ //
+ // Also there are no "secure-only" properties. The implementation should
+ // check if this SRAM is usable first.
+ sram@20000 {
+ // 256 KiB secure SRAM at 0x20000
+ compatible = "mmio-sram";
+ reg = <0x00020000 0x40000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0x00020000 0x40000>;
+
+ smp-sram@1000 {
+ // This is checked by BROM to determine if
+ // cpu0 should jump to SMP entry vector
+ compatible = "allwinner,sun9i-a80-smp-sram";
+ reg = <0x1000 0x8>;
+ };
+ };
+
+ - |
+ sram@0 {
+ compatible = "mmio-sram";
+ reg = <0x0 0x10000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0x0 0x10000>;
+
+ smp-sram@f100 {
+ compatible = "socionext,milbeaut-smp-sram";
+ reg = <0xf100 0x20>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/submitting-patches.txt b/Documentation/devicetree/bindings/submitting-patches.txt
index de0d6090c0fd..98bee6240b65 100644
--- a/Documentation/devicetree/bindings/submitting-patches.txt
+++ b/Documentation/devicetree/bindings/submitting-patches.txt
@@ -15,17 +15,28 @@ I. For patch submitters
use "Documentation" or "doc" because that is implied. All bindings are
docs. Repeating "binding" again should also be avoided.
- 2) Submit the entire series to the devicetree mailinglist at
+ 2) DT binding files are written in DT schema format using json-schema
+ vocabulary and YAML file format. The DT binding files must pass validation
+ by running:
+
+ make dt_binding_check
+
+ See ../writing-schema.rst for more details about schema and tools setup.
+
+ 3) DT binding files should be dual licensed. The preferred license tag is
+ (GPL-2.0-only OR BSD-2-Clause).
+
+ 4) Submit the entire series to the devicetree mailinglist at
devicetree@vger.kernel.org
and Cc: the DT maintainers. Use scripts/get_maintainer.pl to identify
all of the DT maintainers.
- 3) The Documentation/ portion of the patch should come in the series before
+ 5) The Documentation/ portion of the patch should come in the series before
the code implementing the binding.
- 4) Any compatible strings used in a chip or board DTS file must be
+ 6) Any compatible strings used in a chip or board DTS file must be
previously documented in the corresponding DT binding text file
in Documentation/devicetree/bindings. This rule applies even if
the Linux device driver does not yet match on the compatible
@@ -33,7 +44,7 @@ I. For patch submitters
followed as of commit bff5da4335256513497cc8c79f9a9d1665e09864
("checkpatch: add DT compatible string documentation checks"). ]
- 5) The wildcard "<chip>" may be used in compatible strings, as in
+ 7) The wildcard "<chip>" may be used in compatible strings, as in
the following example:
- compatible: Must contain '"nvidia,<chip>-pcie",
@@ -42,7 +53,7 @@ I. For patch submitters
As in the above example, the known values of "<chip>" should be
documented if it is used.
- 6) If a documented compatible string is not yet matched by the
+ 8) If a documented compatible string is not yet matched by the
driver, the documentation should also include a compatible
string that is matched by the driver (as in the "nvidia,tegra20-pcie"
example above).
diff --git a/Documentation/devicetree/bindings/thermal/st,stm32-thermal.yaml b/Documentation/devicetree/bindings/thermal/st,stm32-thermal.yaml
new file mode 100644
index 000000000000..c0f59c56003d
--- /dev/null
+++ b/Documentation/devicetree/bindings/thermal/st,stm32-thermal.yaml
@@ -0,0 +1,79 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/thermal/st,stm32-thermal.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: STMicroelectronics STM32 digital thermal sensor (DTS) binding
+
+maintainers:
+ - David Hernandez Sanchez <david.hernandezsanchez@st.com>
+
+properties:
+ compatible:
+ const: st,stm32-thermal
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ items:
+ - const: pclk
+
+ "#thermal-sensor-cells":
+ const: 0
+
+required:
+ - "#thermal-sensor-cells"
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - clock-names
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/clock/stm32mp1-clks.h>
+ dts: thermal@50028000 {
+ compatible = "st,stm32-thermal";
+ reg = <0x50028000 0x100>;
+ clocks = <&rcc TMPSENS>;
+ clock-names = "pclk";
+ #thermal-sensor-cells = <0>;
+ interrupts = <GIC_SPI 147 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ thermal-zones {
+ cpu_thermal: cpu-thermal {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+
+ thermal-sensors = <&dts>;
+ trips {
+ cpu_alert1: cpu-alert1 {
+ temperature = <85000>;
+ hysteresis = <0>;
+ type = "passive";
+ };
+
+ cpu_crit: cpu-crit {
+ temperature = <120000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+
+ cooling-maps {
+ };
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/thermal/stm32-thermal.txt b/Documentation/devicetree/bindings/thermal/stm32-thermal.txt
deleted file mode 100644
index 8c0d5a4d8031..000000000000
--- a/Documentation/devicetree/bindings/thermal/stm32-thermal.txt
+++ /dev/null
@@ -1,61 +0,0 @@
-Binding for Thermal Sensor for STMicroelectronics STM32 series of SoCs.
-
-On STM32 SoCs, the Digital Temperature Sensor (DTS) is in charge of managing an
-analog block which delivers a frequency depending on the internal SoC's
-temperature. By using a reference frequency, DTS is able to provide a sample
-number which can be translated into a temperature by the user.
-
-DTS provides interrupt notification mechanism by threshold. This mechanism
-offers two temperature trip points: passive and critical. The first is intended
-for passive cooling notification while the second is used for over-temperature
-reset.
-
-Required parameters:
--------------------
-
-compatible: Should be "st,stm32-thermal"
-reg: This should be the physical base address and length of the
- sensor's registers.
-clocks: Phandle of the clock used by the thermal sensor.
- See: Documentation/devicetree/bindings/clock/clock-bindings.txt
-clock-names: Should be "pclk" for register access clock and reference clock.
- See: Documentation/devicetree/bindings/resource-names.txt
-#thermal-sensor-cells: Should be 0. See ./thermal.txt for a description.
-interrupts: Standard way to define interrupt number.
-
-Example:
-
- thermal-zones {
- cpu_thermal: cpu-thermal {
- polling-delay-passive = <0>;
- polling-delay = <0>;
-
- thermal-sensors = <&thermal>;
-
- trips {
- cpu_alert1: cpu-alert1 {
- temperature = <85000>;
- hysteresis = <0>;
- type = "passive";
- };
-
- cpu-crit: cpu-crit {
- temperature = <120000>;
- hysteresis = <0>;
- type = "critical";
- };
- };
-
- cooling-maps {
- };
- };
- };
-
- thermal: thermal@50028000 {
- compatible = "st,stm32-thermal";
- reg = <0x50028000 0x100>;
- clocks = <&rcc TMPSENS>;
- clock-names = "pclk";
- #thermal-sensor-cells = <0>;
- interrupts = <GIC_SPI 147 IRQ_TYPE_LEVEL_HIGH>;
- };
diff --git a/Documentation/devicetree/bindings/timer/ingenic,tcu.txt b/Documentation/devicetree/bindings/timer/ingenic,tcu.txt
index 5a4b9ddd9470..0b63cebc5f45 100644
--- a/Documentation/devicetree/bindings/timer/ingenic,tcu.txt
+++ b/Documentation/devicetree/bindings/timer/ingenic,tcu.txt
@@ -2,7 +2,7 @@ Ingenic JZ47xx SoCs Timer/Counter Unit devicetree bindings
==========================================================
For a description of the TCU hardware and drivers, have a look at
-Documentation/mips/ingenic-tcu.txt.
+Documentation/mips/ingenic-tcu.rst.
Required properties:
@@ -42,7 +42,7 @@ Required properties:
- compatible: Must be one of:
* ingenic,jz4740-pwm
* ingenic,jz4725b-pwm
-- #pwm-cells: Should be 3. See ../pwm/pwm.txt for a description of the cell
+- #pwm-cells: Should be 3. See ../pwm/pwm.yaml for a description of the cell
format.
- clocks: List of phandle & clock specifiers for the TCU clocks.
- clock-names: List of name strings for the TCU clocks.
diff --git a/Documentation/devicetree/bindings/timer/samsung,exynos4210-mct.txt b/Documentation/devicetree/bindings/timer/samsung,exynos4210-mct.txt
deleted file mode 100644
index 8f78640ad64c..000000000000
--- a/Documentation/devicetree/bindings/timer/samsung,exynos4210-mct.txt
+++ /dev/null
@@ -1,88 +0,0 @@
-Samsung's Multi Core Timer (MCT)
-
-The Samsung's Multi Core Timer (MCT) module includes two main blocks, the
-global timer and CPU local timers. The global timer is a 64-bit free running
-up-counter and can generate 4 interrupts when the counter reaches one of the
-four preset counter values. The CPU local timers are 32-bit free running
-down-counters and generate an interrupt when the counter expires. There is
-one CPU local timer instantiated in MCT for every CPU in the system.
-
-Required properties:
-
-- compatible: should be "samsung,exynos4210-mct".
- (a) "samsung,exynos4210-mct", for mct compatible with Exynos4210 mct.
- (b) "samsung,exynos4412-mct", for mct compatible with Exynos4412 mct.
-
-- reg: base address of the mct controller and length of the address space
- it occupies.
-
-- interrupts: the list of interrupts generated by the controller. The following
- should be the order of the interrupts specified. The local timer interrupts
- should be specified after the four global timer interrupts have been
- specified.
-
- 0: Global Timer Interrupt 0
- 1: Global Timer Interrupt 1
- 2: Global Timer Interrupt 2
- 3: Global Timer Interrupt 3
- 4: Local Timer Interrupt 0
- 5: Local Timer Interrupt 1
- 6: ..
- 7: ..
- i: Local Timer Interrupt n
-
- For MCT block that uses a per-processor interrupt for local timers, such
- as ones compatible with "samsung,exynos4412-mct", only one local timer
- interrupt might be specified, meaning that all local timers use the same
- per processor interrupt.
-
-Example 1: In this example, the IP contains two local timers, using separate
- interrupts, so two local timer interrupts have been specified,
- in addition to four global timer interrupts.
-
- mct@10050000 {
- compatible = "samsung,exynos4210-mct";
- reg = <0x10050000 0x800>;
- interrupts = <0 57 0>, <0 69 0>, <0 70 0>, <0 71 0>,
- <0 42 0>, <0 48 0>;
- };
-
-Example 2: In this example, the timer interrupts are connected to two separate
- interrupt controllers. Hence, an interrupt-map is created to map
- the interrupts to the respective interrupt controllers.
-
- mct@101c0000 {
- compatible = "samsung,exynos4210-mct";
- reg = <0x101C0000 0x800>;
- interrupt-parent = <&mct_map>;
- interrupts = <0>, <1>, <2>, <3>, <4>, <5>;
-
- mct_map: mct-map {
- #interrupt-cells = <1>;
- #address-cells = <0>;
- #size-cells = <0>;
- interrupt-map = <0 &gic 0 57 0>,
- <1 &gic 0 69 0>,
- <2 &combiner 12 6>,
- <3 &combiner 12 7>,
- <4 &gic 0 42 0>,
- <5 &gic 0 48 0>;
- };
- };
-
-Example 3: In this example, the IP contains four local timers, but using
- a per-processor interrupt to handle them. Either all the local
- timer interrupts can be specified, with the same interrupt specifier
- value or just the first one.
-
- mct@10050000 {
- compatible = "samsung,exynos4412-mct";
- reg = <0x10050000 0x800>;
-
- /* Both ways are possible in this case. Either: */
- interrupts = <0 57 0>, <0 69 0>, <0 70 0>, <0 71 0>,
- <0 42 0>;
- /* or: */
- interrupts = <0 57 0>, <0 69 0>, <0 70 0>, <0 71 0>,
- <0 42 0>, <0 42 0>, <0 42 0>, <0 42 0>;
- };
diff --git a/Documentation/devicetree/bindings/timer/samsung,exynos4210-mct.yaml b/Documentation/devicetree/bindings/timer/samsung,exynos4210-mct.yaml
new file mode 100644
index 000000000000..273e359854dd
--- /dev/null
+++ b/Documentation/devicetree/bindings/timer/samsung,exynos4210-mct.yaml
@@ -0,0 +1,124 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/timer/samsung,exynos4210-mct.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Samsung Exynos SoC Multi Core Timer (MCT)
+
+maintainers:
+ - Krzysztof Kozlowski <krzk@kernel.org>
+
+description: |+
+ The Samsung's Multi Core Timer (MCT) module includes two main blocks, the
+ global timer and CPU local timers. The global timer is a 64-bit free running
+ up-counter and can generate 4 interrupts when the counter reaches one of the
+ four preset counter values. The CPU local timers are 32-bit free running
+ down-counters and generate an interrupt when the counter expires. There is
+ one CPU local timer instantiated in MCT for every CPU in the system.
+
+properties:
+ compatible:
+ enum:
+ - samsung,exynos4210-mct
+ - samsung,exynos4412-mct
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ description: |
+ Interrupts should be put in specific order. This is, the local timer
+ interrupts should be specified after the four global timer interrupts
+ have been specified:
+ 0: Global Timer Interrupt 0
+ 1: Global Timer Interrupt 1
+ 2: Global Timer Interrupt 2
+ 3: Global Timer Interrupt 3
+ 4: Local Timer Interrupt 0
+ 5: Local Timer Interrupt 1
+ 6: ..
+ 7: ..
+ i: Local Timer Interrupt n
+ For MCT block that uses a per-processor interrupt for local timers, such
+ as ones compatible with "samsung,exynos4412-mct", only one local timer
+ interrupt might be specified, meaning that all local timers use the same
+ per processor interrupt.
+ minItems: 5 # 4 Global + 1 local
+ maxItems: 20 # 4 Global + 16 local
+
+required:
+ - compatible
+ - interrupts
+ - reg
+
+examples:
+ - |
+ // In this example, the IP contains two local timers, using separate
+ // interrupts, so two local timer interrupts have been specified,
+ // in addition to four global timer interrupts.
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ timer@10050000 {
+ compatible = "samsung,exynos4210-mct";
+ reg = <0x10050000 0x800>;
+ interrupts = <GIC_SPI 57 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 70 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 71 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 48 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ - |
+ // In this example, the timer interrupts are connected to two separate
+ // interrupt controllers. Hence, an interrupts-extended is needed.
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ timer@101c0000 {
+ compatible = "samsung,exynos4210-mct";
+ reg = <0x101C0000 0x800>;
+ interrupts-extended = <&gic GIC_SPI 57 IRQ_TYPE_LEVEL_HIGH>,
+ <&gic GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>,
+ <&combiner 12 6>,
+ <&combiner 12 7>,
+ <&gic GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>,
+ <&gic GIC_SPI 48 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ - |
+ // In this example, the IP contains four local timers, but using
+ // a per-processor interrupt to handle them. Only one first local
+ // interrupt is specified.
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ timer@10050000 {
+ compatible = "samsung,exynos4412-mct";
+ reg = <0x10050000 0x800>;
+
+ interrupts = <GIC_SPI 57 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 70 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 71 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_PPI 42 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ - |
+ // In this example, the IP contains four local timers, but using
+ // a per-processor interrupt to handle them. All the local timer
+ // interrupts are specified.
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ timer@10050000 {
+ compatible = "samsung,exynos4412-mct";
+ reg = <0x10050000 0x800>;
+
+ interrupts = <GIC_SPI 57 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 70 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 71 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_PPI 42 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_PPI 42 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_PPI 42 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_PPI 42 IRQ_TYPE_LEVEL_HIGH>;
+ };
diff --git a/Documentation/devicetree/bindings/timer/st,stm32-timer.txt b/Documentation/devicetree/bindings/timer/st,stm32-timer.txt
deleted file mode 100644
index 8ef28e70d6e8..000000000000
--- a/Documentation/devicetree/bindings/timer/st,stm32-timer.txt
+++ /dev/null
@@ -1,22 +0,0 @@
-. STMicroelectronics STM32 timer
-
-The STM32 MCUs family has several general-purpose 16 and 32 bits timers.
-
-Required properties:
-- compatible : Should be "st,stm32-timer"
-- reg : Address and length of the register set
-- clocks : Reference on the timer input clock
-- interrupts : Reference to the timer interrupt
-
-Optional properties:
-- resets: Reference to a reset controller asserting the timer
-
-Example:
-
-timer5: timer@40000c00 {
- compatible = "st,stm32-timer";
- reg = <0x40000c00 0x400>;
- interrupts = <50>;
- resets = <&rrc 259>;
- clocks = <&clk_pmtr1>;
-};
diff --git a/Documentation/devicetree/bindings/timer/st,stm32-timer.yaml b/Documentation/devicetree/bindings/timer/st,stm32-timer.yaml
new file mode 100644
index 000000000000..176aa3c9baf8
--- /dev/null
+++ b/Documentation/devicetree/bindings/timer/st,stm32-timer.yaml
@@ -0,0 +1,47 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/timer/st,stm32-timer.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: STMicroelectronics STM32 general-purpose 16 and 32 bits timers bindings
+
+maintainers:
+ - Benjamin Gaignard <benjamin.gaignard@st.com>
+
+properties:
+ compatible:
+ const: st,stm32-timer
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ resets:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/clock/stm32mp1-clks.h>
+ timer: timer@40000c00 {
+ compatible = "st,stm32-timer";
+ reg = <0x40000c00 0x400>;
+ interrupts = <50>;
+ clocks = <&clk_pmtr1>;
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/trivial-devices.yaml b/Documentation/devicetree/bindings/trivial-devices.yaml
index 870ac52d2225..765fd1c170df 100644
--- a/Documentation/devicetree/bindings/trivial-devices.yaml
+++ b/Documentation/devicetree/bindings/trivial-devices.yaml
@@ -114,6 +114,18 @@ properties:
- isil,isl68137
# 5 Bit Programmable, Pulse-Width Modulator
- maxim,ds1050
+ # 10-bit 8 channels 300ks/s SPI ADC with temperature sensor
+ - maxim,max1027
+ # 10-bit 12 channels 300ks/s SPI ADC with temperature sensor
+ - maxim,max1029
+ # 10-bit 16 channels 300ks/s SPI ADC with temperature sensor
+ - maxim,max1031
+ # 12-bit 8 channels 300ks/s SPI ADC with temperature sensor
+ - maxim,max1227
+ # 12-bit 12 channels 300ks/s SPI ADC with temperature sensor
+ - maxim,max1229
+ # 12-bit 16 channels 300ks/s SPI ADC with temperature sensor
+ - maxim,max1231
# Low-Power, 4-/12-Channel, 2-Wire Serial, 12-Bit ADCs
- maxim,max1237
# PECI-to-I2C translator for PECI-to-SMBus/I2C protocol conversion
diff --git a/Documentation/devicetree/bindings/ufs/ti,j721e-ufs.yaml b/Documentation/devicetree/bindings/ufs/ti,j721e-ufs.yaml
new file mode 100644
index 000000000000..c8a2a92074df
--- /dev/null
+++ b/Documentation/devicetree/bindings/ufs/ti,j721e-ufs.yaml
@@ -0,0 +1,68 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/ufs/ti,j721e-ufs.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: TI J721e UFS Host Controller Glue Driver
+
+maintainers:
+ - Vignesh Raghavendra <vigneshr@ti.com>
+
+properties:
+ compatible:
+ items:
+ - const: ti,j721e-ufs
+
+ reg:
+ maxItems: 1
+ description: address of TI UFS glue registers
+
+ clocks:
+ maxItems: 1
+ description: phandle to the M-PHY clock
+
+ power-domains:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - power-domains
+
+patternProperties:
+ "^ufs@[0-9a-f]+$":
+ type: object
+ description: |
+ Cadence UFS controller node must be the child node. Refer
+ Documentation/devicetree/bindings/ufs/cdns,ufshc.txt for binding
+ documentation of child node
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ ufs_wrapper: ufs-wrapper@4e80000 {
+ compatible = "ti,j721e-ufs";
+ reg = <0x0 0x4e80000 0x0 0x100>;
+ power-domains = <&k3_pds 277>;
+ clocks = <&k3_clks 277 1>;
+ assigned-clocks = <&k3_clks 277 1>;
+ assigned-clock-parents = <&k3_clks 277 4>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ ufs@4e84000 {
+ compatible = "cdns,ufshc-m31-16nm", "jedec,ufs-2.0";
+ reg = <0x0 0x4e84000 0x0 0x10000>;
+ interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
+ freq-table-hz = <19200000 19200000>;
+ power-domains = <&k3_pds 277>;
+ clocks = <&k3_clks 277 1>;
+ assigned-clocks = <&k3_clks 277 1>;
+ assigned-clock-parents = <&k3_clks 277 4>;
+ clock-names = "core_clk";
+ };
+ };
diff --git a/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt b/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt
index d78ef63935f9..415ccdd7442d 100644
--- a/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt
+++ b/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt
@@ -13,6 +13,7 @@ Required properties:
"qcom,msm8996-ufshc", "qcom,ufshc", "jedec,ufs-2.0"
"qcom,msm8998-ufshc", "qcom,ufshc", "jedec,ufs-2.0"
"qcom,sdm845-ufshc", "qcom,ufshc", "jedec,ufs-2.0"
+ "qcom,sm8150-ufshc", "qcom,ufshc", "jedec,ufs-2.0"
- interrupts : <interrupt mapping for UFS host controller IRQ>
- reg : <registers mapping>
diff --git a/Documentation/devicetree/bindings/usb/allwinner,sun4i-a10-musb.txt b/Documentation/devicetree/bindings/usb/allwinner,sun4i-a10-musb.txt
deleted file mode 100644
index 50abb20fe319..000000000000
--- a/Documentation/devicetree/bindings/usb/allwinner,sun4i-a10-musb.txt
+++ /dev/null
@@ -1,28 +0,0 @@
-Allwinner sun4i A10 musb DRC/OTG controller
--------------------------------------------
-
-Required properties:
- - compatible : "allwinner,sun4i-a10-musb", "allwinner,sun6i-a31-musb",
- "allwinner,sun8i-a33-musb" or "allwinner,sun8i-h3-musb"
- - reg : mmio address range of the musb controller
- - clocks : clock specifier for the musb controller ahb gate clock
- - reset : reset specifier for the ahb reset (A31 and newer only)
- - interrupts : interrupt to which the musb controller is connected
- - interrupt-names : must be "mc"
- - phys : phy specifier for the otg phy
- - phy-names : must be "usb"
- - dr_mode : Dual-Role mode must be "host" or "otg"
- - extcon : extcon specifier for the otg phy
-
-Example:
-
- usb_otg: usb@1c13000 {
- compatible = "allwinner,sun4i-a10-musb";
- reg = <0x01c13000 0x0400>;
- clocks = <&ahb_gates 0>;
- interrupts = <38>;
- interrupt-names = "mc";
- phys = <&usbphy 0>;
- phy-names = "usb";
- extcon = <&usbphy 0>;
- };
diff --git a/Documentation/devicetree/bindings/usb/allwinner,sun4i-a10-musb.yaml b/Documentation/devicetree/bindings/usb/allwinner,sun4i-a10-musb.yaml
new file mode 100644
index 000000000000..0af70fc8de5a
--- /dev/null
+++ b/Documentation/devicetree/bindings/usb/allwinner,sun4i-a10-musb.yaml
@@ -0,0 +1,100 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/usb/allwinner,sun4i-a10-musb.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Allwinner A10 mUSB OTG Controller Device Tree Bindings
+
+maintainers:
+ - Chen-Yu Tsai <wens@csie.org>
+ - Maxime Ripard <maxime.ripard@bootlin.com>
+
+properties:
+ compatible:
+ oneOf:
+ - const: allwinner,sun4i-a10-musb
+ - const: allwinner,sun6i-a31-musb
+ - const: allwinner,sun8i-a33-musb
+ - const: allwinner,sun8i-h3-musb
+ - items:
+ - enum:
+ - allwinner,sun8i-a83t-musb
+ - allwinner,sun50i-h6-musb
+ - const: allwinner,sun8i-a33-musb
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ interrupt-names:
+ const: mc
+
+ clocks:
+ maxItems: 1
+
+ resets:
+ maxItems: 1
+
+ phys:
+ description: PHY specifier for the OTG PHY
+
+ phy-names:
+ const: usb
+
+ extcon:
+ description: Extcon specifier for the OTG PHY
+
+ dr_mode:
+ enum:
+ - host
+ - otg
+ - peripheral
+
+ allwinner,sram:
+ description: Phandle to the device SRAM
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - interrupt-names
+ - clocks
+ - phys
+ - phy-names
+ - dr_mode
+ - extcon
+
+if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - allwinner,sun6i-a31-musb
+ - allwinner,sun8i-a33-musb
+ - allwinner,sun8i-h3-musb
+
+then:
+ required:
+ - resets
+
+additionalProperties: false
+
+examples:
+ - |
+ usb_otg: usb@1c13000 {
+ compatible = "allwinner,sun4i-a10-musb";
+ reg = <0x01c13000 0x0400>;
+ clocks = <&ahb_gates 0>;
+ interrupts = <38>;
+ interrupt-names = "mc";
+ phys = <&usbphy 0>;
+ phy-names = "usb";
+ extcon = <&usbphy 0>;
+ dr_mode = "peripheral";
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/usb/amlogic,dwc3.txt b/Documentation/devicetree/bindings/usb/amlogic,dwc3.txt
index 6ffb09be7a76..9a8b631904fd 100644
--- a/Documentation/devicetree/bindings/usb/amlogic,dwc3.txt
+++ b/Documentation/devicetree/bindings/usb/amlogic,dwc3.txt
@@ -40,91 +40,3 @@ Example device nodes:
phy-names = "usb2-phy", "usb3-phy";
};
};
-
-Amlogic Meson G12A DWC3 USB SoC Controller Glue
-
-The Amlogic G12A embeds a DWC3 USB IP Core configured for USB2 and USB3
-in host-only mode, and a DWC2 IP Core configured for USB2 peripheral mode
-only.
-
-A glue connects the DWC3 core to USB2 PHYs and optionnaly to an USB3 PHY.
-
-One of the USB2 PHY can be re-routed in peripheral mode to a DWC2 USB IP.
-
-The DWC3 Glue controls the PHY routing and power, an interrupt line is
-connected to the Glue to serve as OTG ID change detection.
-
-Required properties:
-- compatible: Should be "amlogic,meson-g12a-usb-ctrl"
-- clocks: a handle for the "USB" clock
-- resets: a handle for the shared "USB" reset line
-- reg: The base address and length of the registers
-- interrupts: the interrupt specifier for the OTG detection
-- phys: handle to used PHYs on the system
- - a <0> phandle can be used if a PHY is not used
-- phy-names: names of the used PHYs on the system :
- - "usb2-phy0" for USB2 PHY0 if USBHOST_A port is used
- - "usb2-phy1" for USB2 PHY1 if USBOTG_B port is used
- - "usb3-phy0" for USB3 PHY if USB3_0 is used
-- dr_mode: should be "host", "peripheral", or "otg" depending on
- the usage and configuration of the OTG Capable port.
- - "host" and "peripheral" means a fixed Host or Device only connection
- - "otg" means the port can be used as both Host or Device and
- be switched automatically using the OTG ID pin.
-
-Optional properties:
-- vbus-supply: should be a phandle to the regulator controlling the VBUS
- power supply when used in OTG switchable mode
-
-Required child nodes:
-
-A child node must exist to represent the core DWC3 IP block. The name of
-the node is not important. The content of the node is defined in dwc3.txt.
-
-A child node must exist to represent the core DWC2 IP block. The name of
-the node is not important. The content of the node is defined in dwc2.txt.
-
-PHY documentation is provided in the following places:
-- Documentation/devicetree/bindings/phy/amlogic,meson-g12a-usb2-phy.yaml
-- Documentation/devicetree/bindings/phy/amlogic,meson-g12a-usb3-pcie-phy.yaml
-
-Example device nodes:
- usb: usb@ffe09000 {
- compatible = "amlogic,meson-g12a-usb-ctrl";
- reg = <0x0 0xffe09000 0x0 0xa0>;
- interrupts = <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>;
- #address-cells = <2>;
- #size-cells = <2>;
- ranges;
-
- clocks = <&clkc CLKID_USB>;
- resets = <&reset RESET_USB>;
-
- dr_mode = "otg";
-
- phys = <&usb2_phy0>, <&usb2_phy1>,
- <&usb3_pcie_phy PHY_TYPE_USB3>;
- phy-names = "usb2-phy0", "usb2-phy1", "usb3-phy0";
-
- dwc2: usb@ff400000 {
- compatible = "amlogic,meson-g12a-usb", "snps,dwc2";
- reg = <0x0 0xff400000 0x0 0x40000>;
- interrupts = <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clkc CLKID_USB1_DDR_BRIDGE>;
- clock-names = "ddr";
- phys = <&usb2_phy1>;
- dr_mode = "peripheral";
- g-rx-fifo-size = <192>;
- g-np-tx-fifo-size = <128>;
- g-tx-fifo-size = <128 128 16 16 16>;
- };
-
- dwc3: usb@ff500000 {
- compatible = "snps,dwc3";
- reg = <0x0 0xff500000 0x0 0x100000>;
- interrupts = <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>;
- dr_mode = "host";
- snps,dis_u2_susphy_quirk;
- snps,quirk-frame-length-adjustment;
- };
- };
diff --git a/Documentation/devicetree/bindings/usb/amlogic,meson-g12a-usb-ctrl.yaml b/Documentation/devicetree/bindings/usb/amlogic,meson-g12a-usb-ctrl.yaml
new file mode 100644
index 000000000000..4efb77b653ab
--- /dev/null
+++ b/Documentation/devicetree/bindings/usb/amlogic,meson-g12a-usb-ctrl.yaml
@@ -0,0 +1,127 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+# Copyright 2019 BayLibre, SAS
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/usb/amlogic,meson-g12a-usb-ctrl.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Amlogic Meson G12A DWC3 USB SoC Controller Glue
+
+maintainers:
+ - Neil Armstrong <narmstrong@baylibre.com>
+
+description: |
+ The Amlogic G12A embeds a DWC3 USB IP Core configured for USB2 and USB3
+ in host-only mode, and a DWC2 IP Core configured for USB2 peripheral mode
+ only.
+
+ A glue connects the DWC3 core to USB2 PHYs and optionally to an USB3 PHY.
+
+ One of the USB2 PHYs can be re-routed in peripheral mode to a DWC2 USB IP.
+
+ The DWC3 Glue controls the PHY routing and power, an interrupt line is
+ connected to the Glue to serve as OTG ID change detection.
+
+properties:
+ compatible:
+ enum:
+ - amlogic,meson-g12a-usb-ctrl
+
+ ranges: true
+
+ "#address-cells":
+ enum: [ 1, 2 ]
+
+ "#size-cells":
+ enum: [ 1, 2 ]
+
+ clocks:
+ minItems: 1
+
+ resets:
+ minItems: 1
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ phy-names:
+ items:
+ - const: usb2-phy0 # USB2 PHY0 if USBHOST_A port is used
+ - const: usb2-phy1 # USB2 PHY1 if USBOTG_B port is used
+ - const: usb3-phy0 # USB3 PHY if USB3_0 is used
+
+ phys:
+ minItems: 1
+ maxItems: 3
+
+ dr_mode: true
+
+ power-domains:
+ maxItems: 1
+
+ vbus-supply:
+ description: VBUS power supply when used in OTG switchable mode
+
+patternProperties:
+ "^usb@[0-9a-f]+$":
+ type: object
+
+additionalProperties: false
+
+required:
+ - compatible
+ - "#address-cells"
+ - "#size-cells"
+ - ranges
+ - clocks
+ - resets
+ - reg
+ - interrupts
+ - phy-names
+ - phys
+ - dr_mode
+
+examples:
+ - |
+ usb: usb@ffe09000 {
+ compatible = "amlogic,meson-g12a-usb-ctrl";
+ reg = <0x0 0xffe09000 0x0 0xa0>;
+ interrupts = <16>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ clocks = <&clkc_usb>;
+ resets = <&reset_usb>;
+
+ dr_mode = "otg";
+
+ phys = <&usb2_phy0>, <&usb2_phy1>, <&usb3_phy0>;
+ phy-names = "usb2-phy0", "usb2-phy1", "usb3-phy0";
+
+ dwc2: usb@ff400000 {
+ compatible = "amlogic,meson-g12a-usb", "snps,dwc2";
+ reg = <0xff400000 0x40000>;
+ interrupts = <31>;
+ clocks = <&clkc_usb1>;
+ clock-names = "ddr";
+ phys = <&usb2_phy1>;
+ dr_mode = "peripheral";
+ g-rx-fifo-size = <192>;
+ g-np-tx-fifo-size = <128>;
+ g-tx-fifo-size = <128 128 16 16 16>;
+ };
+
+ dwc3: usb@ff500000 {
+ compatible = "snps,dwc3";
+ reg = <0xff500000 0x100000>;
+ interrupts = <30>;
+ dr_mode = "host";
+ snps,dis_u2_susphy_quirk;
+ snps,quirk-frame-length-adjustment;
+ };
+ };
+
diff --git a/Documentation/devicetree/bindings/usb/generic-ehci.yaml b/Documentation/devicetree/bindings/usb/generic-ehci.yaml
index 1ca64c85191a..10edd05872ea 100644
--- a/Documentation/devicetree/bindings/usb/generic-ehci.yaml
+++ b/Documentation/devicetree/bindings/usb/generic-ehci.yaml
@@ -63,6 +63,11 @@ properties:
description:
Set this flag to force EHCI reset after resume.
+ companion:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description:
+ Phandle of a companion.
+
phys:
description: PHY specifier for the USB PHY
diff --git a/Documentation/devicetree/bindings/usb/renesas,usb3-peri.txt b/Documentation/devicetree/bindings/usb/renesas,usb3-peri.txt
deleted file mode 100644
index 35039e720515..000000000000
--- a/Documentation/devicetree/bindings/usb/renesas,usb3-peri.txt
+++ /dev/null
@@ -1,41 +0,0 @@
-Renesas Electronics USB3.0 Peripheral driver
-
-Required properties:
- - compatible: Must contain one of the following:
- - "renesas,r8a774a1-usb3-peri"
- - "renesas,r8a774c0-usb3-peri"
- - "renesas,r8a7795-usb3-peri"
- - "renesas,r8a7796-usb3-peri"
- - "renesas,r8a77965-usb3-peri"
- - "renesas,r8a77990-usb3-peri"
- - "renesas,rcar-gen3-usb3-peri" for a generic R-Car Gen3 or RZ/G2
- compatible device
-
- When compatible with the generic version, nodes must list the
- SoC-specific version corresponding to the platform first
- followed by the generic version.
-
- - reg: Base address and length of the register for the USB3.0 Peripheral
- - interrupts: Interrupt specifier for the USB3.0 Peripheral
- - clocks: clock phandle and specifier pair
-
-Optional properties:
- - phys: phandle + phy specifier pair
- - phy-names: must be "usb"
-
-Example of R-Car H3 ES1.x:
- usb3_peri0: usb@ee020000 {
- compatible = "renesas,r8a7795-usb3-peri",
- "renesas,rcar-gen3-usb3-peri";
- reg = <0 0xee020000 0 0x400>;
- interrupts = <GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&cpg CPG_MOD 328>;
- };
-
- usb3_peri1: usb@ee060000 {
- compatible = "renesas,r8a7795-usb3-peri",
- "renesas,rcar-gen3-usb3-peri";
- reg = <0 0xee060000 0 0x400>;
- interrupts = <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&cpg CPG_MOD 327>;
- };
diff --git a/Documentation/devicetree/bindings/usb/renesas,usb3-peri.yaml b/Documentation/devicetree/bindings/usb/renesas,usb3-peri.yaml
new file mode 100644
index 000000000000..92d8631b9aa6
--- /dev/null
+++ b/Documentation/devicetree/bindings/usb/renesas,usb3-peri.yaml
@@ -0,0 +1,86 @@
+# SPDX-License-Identifier: GPL-2.0-only
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/usb/renesas,usb3-peri.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Renesas USB 3.0 Peripheral controller
+
+maintainers:
+ - Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - renesas,r8a774a1-usb3-peri # RZ/G2M
+ - renesas,r8a774b1-usb3-peri # RZ/G2N
+ - renesas,r8a774c0-usb3-peri # RZ/G2E
+ - renesas,r8a7795-usb3-peri # R-Car H3
+ - renesas,r8a7796-usb3-peri # R-Car M3-W
+ - renesas,r8a77965-usb3-peri # R-Car M3-N
+ - renesas,r8a77990-usb3-peri # R-Car E3
+ - const: renesas,rcar-gen3-usb3-peri
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ phys:
+ maxItems: 1
+
+ phy-names:
+ const: usb
+
+ power-domains:
+ maxItems: 1
+
+ resets:
+ maxItems: 1
+
+ usb-role-switch:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description: Support role switch.
+
+ companion:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description: phandle of a companion.
+
+ port:
+ description: |
+ any connector to the data bus of this controller should be modelled
+ using the OF graph bindings specified, if the "usb-role-switch"
+ property is used.
+
+required:
+ - compatible
+ - interrupts
+ - clocks
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/r8a774c0-cpg-mssr.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/power/r8a774c0-sysc.h>
+
+ usb3_peri0: usb@ee020000 {
+ compatible = "renesas,r8a774c0-usb3-peri", "renesas,rcar-gen3-usb3-peri";
+ reg = <0 0xee020000 0 0x400>;
+ interrupts = <GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 328>;
+ companion = <&xhci0>;
+ usb-role-switch;
+
+ port {
+ usb3_role_switch: endpoint {
+ remote-endpoint = <&hd3ss3220_ep>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/usb/renesas,usbhs.txt b/Documentation/devicetree/bindings/usb/renesas,usbhs.txt
deleted file mode 100644
index e39255ea6e4f..000000000000
--- a/Documentation/devicetree/bindings/usb/renesas,usbhs.txt
+++ /dev/null
@@ -1,57 +0,0 @@
-Renesas Electronics USBHS driver
-
-Required properties:
- - compatible: Must contain one or more of the following:
-
- - "renesas,usbhs-r8a7743" for r8a7743 (RZ/G1M) compatible device
- - "renesas,usbhs-r8a7744" for r8a7744 (RZ/G1N) compatible device
- - "renesas,usbhs-r8a7745" for r8a7745 (RZ/G1E) compatible device
- - "renesas,usbhs-r8a77470" for r8a77470 (RZ/G1C) compatible device
- - "renesas,usbhs-r8a774a1" for r8a774a1 (RZ/G2M) compatible device
- - "renesas,usbhs-r8a774c0" for r8a774c0 (RZ/G2E) compatible device
- - "renesas,usbhs-r8a7790" for r8a7790 (R-Car H2) compatible device
- - "renesas,usbhs-r8a7791" for r8a7791 (R-Car M2-W) compatible device
- - "renesas,usbhs-r8a7792" for r8a7792 (R-Car V2H) compatible device
- - "renesas,usbhs-r8a7793" for r8a7793 (R-Car M2-N) compatible device
- - "renesas,usbhs-r8a7794" for r8a7794 (R-Car E2) compatible device
- - "renesas,usbhs-r8a7795" for r8a7795 (R-Car H3) compatible device
- - "renesas,usbhs-r8a7796" for r8a7796 (R-Car M3-W) compatible device
- - "renesas,usbhs-r8a77965" for r8a77965 (R-Car M3-N) compatible device
- - "renesas,usbhs-r8a77990" for r8a77990 (R-Car E3) compatible device
- - "renesas,usbhs-r8a77995" for r8a77995 (R-Car D3) compatible device
- - "renesas,usbhs-r7s72100" for r7s72100 (RZ/A1) compatible device
- - "renesas,usbhs-r7s9210" for r7s9210 (RZ/A2) compatible device
- - "renesas,rcar-gen2-usbhs" for R-Car Gen2 or RZ/G1 compatible devices
- - "renesas,rcar-gen3-usbhs" for R-Car Gen3 or RZ/G2 compatible devices
- - "renesas,rza1-usbhs" for RZ/A1 compatible device
- - "renesas,rza2-usbhs" for RZ/A2 compatible device
-
- When compatible with the generic version, nodes must list the
- SoC-specific version corresponding to the platform first followed
- by the generic version.
-
- - reg: Base address and length of the register for the USBHS
- - interrupts: Interrupt specifier for the USBHS
- - clocks: A list of phandle + clock specifier pairs.
- - In case of "renesas,rcar-gen3-usbhs", two clocks are required.
- First clock should be peripheral and second one should be host.
- - In case of except above, one clock is required. First clock
- should be peripheral.
-
-Optional properties:
- - renesas,buswait: Integer to use BUSWAIT register
- - renesas,enable-gpio: A gpio specifier to check GPIO determining if USB
- function should be enabled
- - phys: phandle + phy specifier pair
- - phy-names: must be "usb"
- - dmas: Must contain a list of references to DMA specifiers.
- - dma-names : named "ch%d", where %d is the channel number ranging from zero
- to the number of channels (DnFIFOs) minus one.
-
-Example:
- usbhs: usb@e6590000 {
- compatible = "renesas,usbhs-r8a7790", "renesas,rcar-gen2-usbhs";
- reg = <0 0xe6590000 0 0x100>;
- interrupts = <0 107 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&mstp7_clks R8A7790_CLK_HSUSB>;
- };
diff --git a/Documentation/devicetree/bindings/usb/renesas,usbhs.yaml b/Documentation/devicetree/bindings/usb/renesas,usbhs.yaml
new file mode 100644
index 000000000000..469affa872d3
--- /dev/null
+++ b/Documentation/devicetree/bindings/usb/renesas,usbhs.yaml
@@ -0,0 +1,126 @@
+# SPDX-License-Identifier: GPL-2.0-only
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/usb/renesas,usbhs.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Renesas USBHS (HS-USB) controller
+
+maintainers:
+ - Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - const: renesas,usbhs-r7s72100 # RZ/A1
+ - const: renesas,rza1-usbhs
+
+ - items:
+ - const: renesas,usbhs-r7s9210 # RZ/A2
+ - const: renesas,rza2-usbhs
+
+ - items:
+ - enum:
+ - renesas,usbhs-r8a7743 # RZ/G1M
+ - renesas,usbhs-r8a7744 # RZ/G1N
+ - renesas,usbhs-r8a7745 # RZ/G1E
+ - renesas,usbhs-r8a77470 # RZ/G1C
+ - renesas,usbhs-r8a7790 # R-Car H2
+ - renesas,usbhs-r8a7791 # R-Car M2-W
+ - renesas,usbhs-r8a7792 # R-Car V2H
+ - renesas,usbhs-r8a7793 # R-Car M2-N
+ - renesas,usbhs-r8a7794 # R-Car E2
+ - const: renesas,rcar-gen2-usbhs
+
+ - items:
+ - enum:
+ - renesas,usbhs-r8a774a1 # RZ/G2M
+ - renesas,usbhs-r8a774b1 # RZ/G2N
+ - renesas,usbhs-r8a774c0 # RZ/G2E
+ - renesas,usbhs-r8a7795 # R-Car H3
+ - renesas,usbhs-r8a7796 # R-Car M3-W
+ - renesas,usbhs-r8a77965 # R-Car M3-N
+ - renesas,usbhs-r8a77990 # R-Car E3
+ - renesas,usbhs-r8a77995 # R-Car D3
+ - const: renesas,rcar-gen3-usbhs
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ minItems: 1
+ maxItems: 3
+ items:
+ - description: USB 2.0 host
+ - description: USB 2.0 peripheral
+ - description: USB 2.0 clock selector
+
+ interrupts:
+ maxItems: 1
+
+ renesas,buswait:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: |
+ Integer to use BUSWAIT register.
+
+ renesas,enable-gpio:
+ description: |
+ gpio specifier to check GPIO determining if USB function should be
+ enabled.
+
+ phys:
+ maxItems: 1
+ items:
+ - description: phandle + phy specifier pair.
+
+ phy-names:
+ maxItems: 1
+ items:
+ - const: usb
+
+ dmas:
+ minItems: 2
+ maxItems: 4
+
+ dma-names:
+ minItems: 2
+ maxItems: 4
+ items:
+ - const: ch0
+ - const: ch1
+ - const: ch2
+ - const: ch3
+
+ dr_mode: true
+
+ power-domains:
+ maxItems: 1
+
+ resets:
+ minItems: 1
+ maxItems: 2
+ items:
+ - description: USB 2.0 host
+ - description: USB 2.0 peripheral
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - interrupts
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/r8a7790-cpg-mssr.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/power/r8a7790-sysc.h>
+
+ usbhs: usb@e6590000 {
+ compatible = "renesas,usbhs-r8a7790", "renesas,rcar-gen2-usbhs";
+ reg = <0 0xe6590000 0 0x100>;
+ interrupts = <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 704>;
+ };
diff --git a/Documentation/devicetree/bindings/usb/richtek,rt1711h.txt b/Documentation/devicetree/bindings/usb/richtek,rt1711h.txt
index d4cf53c071d9..e3fc57e605ed 100644
--- a/Documentation/devicetree/bindings/usb/richtek,rt1711h.txt
+++ b/Documentation/devicetree/bindings/usb/richtek,rt1711h.txt
@@ -6,10 +6,39 @@ Required properties:
- interrupts : <a b> where a is the interrupt number and b represents an
encoding of the sense and level information for the interrupt.
+Required sub-node:
+- connector: The "usb-c-connector" attached to the tcpci chip, the bindings
+ of connector node are specified in
+ Documentation/devicetree/bindings/connector/usb-connector.txt
+
Example :
rt1711h@4e {
compatible = "richtek,rt1711h";
reg = <0x4e>;
interrupt-parent = <&gpio26>;
interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
+
+ usb_con: connector {
+ compatible = "usb-c-connector";
+ label = "USB-C";
+ data-role = "dual";
+ power-role = "dual";
+ try-power-role = "sink";
+ source-pdos = <PDO_FIXED(5000, 2000, PDO_FIXED_USB_COMM)>;
+ sink-pdos = <PDO_FIXED(5000, 2000, PDO_FIXED_USB_COMM)
+ PDO_VAR(5000, 12000, 2000)>;
+ op-sink-microwatt = <10000000>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@1 {
+ reg = <1>;
+ usb_con_ss: endpoint {
+ remote-endpoint = <&usb3_data_ss>;
+ };
+ };
+ };
+ };
};
diff --git a/Documentation/devicetree/bindings/usb/ti,hd3ss3220.txt b/Documentation/devicetree/bindings/usb/ti,hd3ss3220.txt
new file mode 100644
index 000000000000..25780e945b15
--- /dev/null
+++ b/Documentation/devicetree/bindings/usb/ti,hd3ss3220.txt
@@ -0,0 +1,38 @@
+TI HD3SS3220 TypeC DRP Port Controller.
+
+Required properties:
+ - compatible: Must be "ti,hd3ss3220".
+ - reg: I2C slave address, must be 0x47 or 0x67 based on ADDR pin.
+ - interrupts: An interrupt specifier.
+
+Required sub-node:
+ - connector: The "usb-c-connector" attached to the hd3ss3220 chip. The
+ bindings of the connector node are specified in:
+
+ Documentation/devicetree/bindings/connector/usb-connector.txt
+
+Example:
+hd3ss3220@47 {
+ compatible = "ti,hd3ss3220";
+ reg = <0x47>;
+ interrupt-parent = <&gpio6>;
+ interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
+
+ connector {
+ compatible = "usb-c-connector";
+ label = "USB-C";
+ data-role = "dual";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@1 {
+ reg = <1>;
+ hd3ss3220_ep: endpoint {
+ remote-endpoint = <&usb3_role_switch>;
+ };
+ };
+ };
+ };
+};
diff --git a/Documentation/devicetree/bindings/usb/ti,j721e-usb.yaml b/Documentation/devicetree/bindings/usb/ti,j721e-usb.yaml
new file mode 100644
index 000000000000..5f5264b2e9ad
--- /dev/null
+++ b/Documentation/devicetree/bindings/usb/ti,j721e-usb.yaml
@@ -0,0 +1,86 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/usb/ti,j721e-usb.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Bindings for the TI wrapper module for the Cadence USBSS-DRD controller
+
+maintainers:
+ - Roger Quadros <rogerq@ti.com>
+
+properties:
+ compatible:
+ items:
+ - const: ti,j721e-usb
+
+ reg:
+ description: module registers
+
+ power-domains:
+ description:
+ PM domain provider node and an args specifier containing
+ the USB device id value. See,
+ Documentation/devicetree/bindings/soc/ti/sci-pm-domain.txt
+
+ clocks:
+ description: Clock phandles to usb2_refclk and lpm_clk
+ minItems: 2
+ maxItems: 2
+
+ clock-names:
+ items:
+ - const: ref
+ - const: lpm
+
+ ti,usb2-only:
+ description:
+ If present, it restricts the controller to USB2.0 mode of
+ operation. Must be present if USB3 PHY is not available
+ for USB.
+ type: boolean
+
+ ti,vbus-divider:
+ description:
+ Should be present if USB VBUS line is connected to the
+ VBUS pin of the SoC via a 1/3 voltage divider.
+ type: boolean
+
+required:
+ - compatible
+ - reg
+ - power-domains
+ - clocks
+ - clock-names
+
+examples:
+ - |
+ #include <dt-bindings/soc/ti,sci_pm_domain.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ cdns_usb@4104000 {
+ compatible = "ti,j721e-usb";
+ reg = <0x00 0x4104000 0x00 0x100>;
+ power-domains = <&k3_pds 288 TI_SCI_PD_EXCLUSIVE>;
+ clocks = <&k3_clks 288 15>, <&k3_clks 288 3>;
+ clock-names = "ref", "lpm";
+ assigned-clocks = <&k3_clks 288 15>; /* USB2_REFCLK */
+ assigned-clock-parents = <&k3_clks 288 16>; /* HFOSC0 */
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ usb@6000000 {
+ compatible = "cdns,usb3";
+ reg = <0x00 0x6000000 0x00 0x10000>,
+ <0x00 0x6010000 0x00 0x10000>,
+ <0x00 0x6020000 0x00 0x10000>;
+ reg-names = "otg", "xhci", "dev";
+ interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>, /* irq.0 */
+ <GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>, /* irq.6 */
+ <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>; /* otgirq.0 */
+ interrupt-names = "host",
+ "peripheral",
+ "otg";
+ maximum-speed = "super-speed";
+ dr_mode = "otg";
+ };
+ };
diff --git a/Documentation/devicetree/bindings/usb/usb-xhci.txt b/Documentation/devicetree/bindings/usb/usb-xhci.txt
index b49b819571f9..3f378951d624 100644
--- a/Documentation/devicetree/bindings/usb/usb-xhci.txt
+++ b/Documentation/devicetree/bindings/usb/usb-xhci.txt
@@ -10,6 +10,7 @@ Required properties:
- "renesas,xhci-r8a7743" for r8a7743 SoC
- "renesas,xhci-r8a7744" for r8a7744 SoC
- "renesas,xhci-r8a774a1" for r8a774a1 SoC
+ - "renesas,xhci-r8a774b1" for r8a774b1 SoC
- "renesas,xhci-r8a774c0" for r8a774c0 SoC
- "renesas,xhci-r8a7790" for r8a7790 SoC
- "renesas,xhci-r8a7791" for r8a7791 SoC
diff --git a/Documentation/devicetree/bindings/usb/usb251xb.txt b/Documentation/devicetree/bindings/usb/usb251xb.txt
index 17915f64b8ee..1a934eab175e 100644
--- a/Documentation/devicetree/bindings/usb/usb251xb.txt
+++ b/Documentation/devicetree/bindings/usb/usb251xb.txt
@@ -7,11 +7,12 @@ Required properties :
- compatible : Should be "microchip,usb251xb" or one of the specific types:
"microchip,usb2512b", "microchip,usb2512bi", "microchip,usb2513b",
"microchip,usb2513bi", "microchip,usb2514b", "microchip,usb2514bi",
- "microchip,usb2517", "microchip,usb2517i"
+ "microchip,usb2517", "microchip,usb2517i", "microchip,usb2422"
- reg : I2C address on the selected bus (default is <0x2C>)
Optional properties :
- reset-gpios : Should specify the gpio for hub reset
+ - vdd-supply : Should specify the phandle to the regulator supplying vdd
- skip-config : Skip Hub configuration, but only send the USB-Attach command
- vendor-id : Set USB Vendor ID of the hub (16 bit, default is 0x0424)
- product-id : Set USB Product ID of the hub (16 bit, default depends on type)
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.yaml b/Documentation/devicetree/bindings/vendor-prefixes.yaml
index 05b3904a995b..fd6fa07c45b8 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.yaml
+++ b/Documentation/devicetree/bindings/vendor-prefixes.yaml
@@ -16,7 +16,7 @@ properties: {}
patternProperties:
# Prefixes which are not vendors, but followed the pattern
# DO NOT ADD NEW PROPERTIES TO THIS LIST
- "^(at25|devbus|dmacap|dsa|exynos|gpio-fan|gpio|gpmc|hdmi|i2c-gpio),.*": true
+ "^(at25|devbus|dmacap|dsa|exynos|fsi[ab]|gpio-fan|gpio|gpmc|hdmi|i2c-gpio),.*": true
"^(keypad|m25p|max8952|max8997|max8998|mpmc),.*": true
"^(pinctrl-single|#pinctrl-single|PowerPC),.*": true
"^(pl022|pxa-mmc|rcar_sound|rotary-encoder|s5m8767|sdhci),.*": true
diff --git a/Documentation/devicetree/bindings/watchdog/amlogic,meson-gxbb-wdt.yaml b/Documentation/devicetree/bindings/watchdog/amlogic,meson-gxbb-wdt.yaml
index d7352f709b37..4ddae6feef3b 100644
--- a/Documentation/devicetree/bindings/watchdog/amlogic,meson-gxbb-wdt.yaml
+++ b/Documentation/devicetree/bindings/watchdog/amlogic,meson-gxbb-wdt.yaml
@@ -10,6 +10,9 @@ title: Meson GXBB SoCs Watchdog timer
maintainers:
- Neil Armstrong <narmstrong@baylibre.com>
+allOf:
+ - $ref: watchdog.yaml#
+
properties:
compatible:
enum:
diff --git a/Documentation/devicetree/bindings/watchdog/atmel-sama5d4-wdt.txt b/Documentation/devicetree/bindings/watchdog/atmel-sama5d4-wdt.txt
index 4fec1e3725b4..44727fcc2729 100644
--- a/Documentation/devicetree/bindings/watchdog/atmel-sama5d4-wdt.txt
+++ b/Documentation/devicetree/bindings/watchdog/atmel-sama5d4-wdt.txt
@@ -1,7 +1,7 @@
* Atmel SAMA5D4 Watchdog Timer (WDT) Controller
Required properties:
-- compatible: "atmel,sama5d4-wdt"
+- compatible: "atmel,sama5d4-wdt" or "microchip,sam9x60-wdt"
- reg: base physical address and length of memory mapped region.
Optional properties:
diff --git a/Documentation/devicetree/bindings/watchdog/renesas,wdt.txt b/Documentation/devicetree/bindings/watchdog/renesas,wdt.txt
index 9f365c1a3399..a5bf04dba410 100644
--- a/Documentation/devicetree/bindings/watchdog/renesas,wdt.txt
+++ b/Documentation/devicetree/bindings/watchdog/renesas,wdt.txt
@@ -10,6 +10,7 @@ Required properties:
- "renesas,r8a7745-wdt" (RZ/G1E)
- "renesas,r8a77470-wdt" (RZ/G1C)
- "renesas,r8a774a1-wdt" (RZ/G2M)
+ - "renesas,r8a774b1-wdt" (RZ/G2N)
- "renesas,r8a774c0-wdt" (RZ/G2E)
- "renesas,r8a7790-wdt" (R-Car H2)
- "renesas,r8a7791-wdt" (R-Car M2-W)
diff --git a/Documentation/devicetree/bindings/watchdog/samsung-wdt.txt b/Documentation/devicetree/bindings/watchdog/samsung-wdt.txt
deleted file mode 100644
index 46dcb48e75b4..000000000000
--- a/Documentation/devicetree/bindings/watchdog/samsung-wdt.txt
+++ /dev/null
@@ -1,35 +0,0 @@
-* Samsung's Watchdog Timer Controller
-
-The Samsung's Watchdog controller is used for resuming system operation
-after a preset amount of time during which the WDT reset event has not
-occurred.
-
-Required properties:
-- compatible : should be one among the following
- - "samsung,s3c2410-wdt" for S3C2410
- - "samsung,s3c6410-wdt" for S3C6410, S5PV210 and Exynos4
- - "samsung,exynos5250-wdt" for Exynos5250
- - "samsung,exynos5420-wdt" for Exynos5420
- - "samsung,exynos7-wdt" for Exynos7
-
-- reg : base physical address of the controller and length of memory mapped
- region.
-- interrupts : interrupt number to the cpu.
-- samsung,syscon-phandle : reference to syscon node (This property required only
- in case of compatible being "samsung,exynos5250-wdt" or "samsung,exynos5420-wdt".
- In case of Exynos5250 and 5420 this property points to syscon node holding the PMU
- base address)
-
-Optional properties:
-- timeout-sec : contains the watchdog timeout in seconds.
-
-Example:
-
-watchdog@101d0000 {
- compatible = "samsung,exynos5250-wdt";
- reg = <0x101D0000 0x100>;
- interrupts = <0 42 0>;
- clocks = <&clock 336>;
- clock-names = "watchdog";
- samsung,syscon-phandle = <&pmu_syscon>;
-};
diff --git a/Documentation/devicetree/bindings/watchdog/samsung-wdt.yaml b/Documentation/devicetree/bindings/watchdog/samsung-wdt.yaml
new file mode 100644
index 000000000000..2fa40d8864b2
--- /dev/null
+++ b/Documentation/devicetree/bindings/watchdog/samsung-wdt.yaml
@@ -0,0 +1,74 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/watchdog/samsung-wdt.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Samsung SoC Watchdog Timer Controller
+
+maintainers:
+ - Krzysztof Kozlowski <krzk@kernel.org>
+
+description: |+
+ The Samsung's Watchdog controller is used for resuming system operation
+ after a preset amount of time during which the WDT reset event has not
+ occurred.
+
+properties:
+ compatible:
+ enum:
+ - samsung,s3c2410-wdt # for S3C2410
+ - samsung,s3c6410-wdt # for S3C6410, S5PV210 and Exynos4
+ - samsung,exynos5250-wdt # for Exynos5250
+ - samsung,exynos5420-wdt # for Exynos5420
+ - samsung,exynos7-wdt # for Exynos7
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ items:
+ - const: watchdog
+
+ interrupts:
+ maxItems: 1
+
+ samsung,syscon-phandle:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description:
+ Phandle to the PMU system controller node (in case of Exynos5250
+ and Exynos5420).
+
+required:
+ - compatible
+ - clocks
+ - clock-names
+ - interrupts
+ - reg
+
+allOf:
+ - $ref: watchdog.yaml#
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - samsung,exynos5250-wdt
+ - samsung,exynos5420-wdt
+ then:
+ required:
+ - samsung,syscon-phandle
+
+examples:
+ - |
+ watchdog@101d0000 {
+ compatible = "samsung,exynos5250-wdt";
+ reg = <0x101D0000 0x100>;
+ interrupts = <0 42 0>;
+ clocks = <&clock 336>;
+ clock-names = "watchdog";
+ samsung,syscon-phandle = <&pmu_syscon>;
+ };
diff --git a/Documentation/devicetree/writing-schema.rst b/Documentation/devicetree/writing-schema.rst
index f4a638072262..efcd5d21dc2b 100644
--- a/Documentation/devicetree/writing-schema.rst
+++ b/Documentation/devicetree/writing-schema.rst
@@ -117,6 +117,9 @@ project can be installed with pip::
pip3 install git+https://github.com/devicetree-org/dt-schema.git@master
+Several executables (dt-doc-validate, dt-mk-schema, dt-validate) will be
+installed. Ensure they are in your PATH (~/.local/bin by default).
+
dtc must also be built with YAML output support enabled. This requires that
libyaml and its headers be installed on the host system.
@@ -130,11 +133,13 @@ binding schema. All of the DT binding documents can be validated using the
make dt_binding_check
-In order to perform validation of DT source files, use the `dtbs_check` target::
+In order to perform validation of DT source files, use the ``dtbs_check`` target::
make dtbs_check
-This will first run the `dt_binding_check` which generates the processed schema.
+Note that ``dtbs_check`` will skip any binding schema files with errors. It is
+necessary to use ``dt_binding_check`` to get all the validation errors in the
+binding schema files.
It is also possible to run checks with a single schema file by setting the
``DT_SCHEMA_FILES`` variable to a specific schema file.
diff --git a/Documentation/doc-guide/kernel-doc.rst b/Documentation/doc-guide/kernel-doc.rst
index 192c36af39e2..fff6604631ea 100644
--- a/Documentation/doc-guide/kernel-doc.rst
+++ b/Documentation/doc-guide/kernel-doc.rst
@@ -476,6 +476,22 @@ internal: *[source-pattern ...]*
.. kernel-doc:: drivers/gpu/drm/i915/intel_audio.c
:internal:
+identifiers: *[ function/type ...]*
+ Include documentation for each *function* and *type* in *source*.
+ If no *function* is specified, the documentation for all functions
+ and types in the *source* will be included.
+
+ Examples::
+
+ .. kernel-doc:: lib/bitmap.c
+ :identifiers: bitmap_parselist bitmap_parselist_user
+
+ .. kernel-doc:: lib/idr.c
+ :identifiers:
+
+functions: *[ function/type ...]*
+ This is an alias of the 'identifiers' directive and deprecated.
+
doc: *title*
Include documentation for the ``DOC:`` paragraph identified by *title* in
*source*. Spaces are allowed in *title*; do not quote the *title*. The *title*
@@ -488,19 +504,6 @@ doc: *title*
.. kernel-doc:: drivers/gpu/drm/i915/intel_audio.c
:doc: High Definition Audio over HDMI and Display Port
-functions: *[ function ...]*
- Include documentation for each *function* in *source*.
- If no *function* is specified, the documentation for all functions
- and types in the *source* will be included.
-
- Examples::
-
- .. kernel-doc:: lib/bitmap.c
- :functions: bitmap_parselist bitmap_parselist_user
-
- .. kernel-doc:: lib/idr.c
- :functions:
-
Without options, the kernel-doc directive includes all documentation comments
from the source file.
diff --git a/Documentation/dontdiff b/Documentation/dontdiff
index 9f4392876099..72fc2e9e2b63 100644
--- a/Documentation/dontdiff
+++ b/Documentation/dontdiff
@@ -179,6 +179,7 @@ mkutf8data
modpost
modules.builtin
modules.builtin.modinfo
+modules.nsdeps
modules.order
modversions.h*
nconf
diff --git a/Documentation/driver-api/devfreq.rst b/Documentation/driver-api/devfreq.rst
new file mode 100644
index 000000000000..4a0bf87a3b13
--- /dev/null
+++ b/Documentation/driver-api/devfreq.rst
@@ -0,0 +1,30 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+========================
+Device Frequency Scaling
+========================
+
+Introduction
+------------
+
+This framework provides a standard kernel interface for Dynamic Voltage and
+Frequency Switching on arbitrary devices.
+
+It exposes controls for adjusting frequency through sysfs files which are
+similar to the cpufreq subsystem.
+
+Devices for which current usage can be measured can have their frequency
+automatically adjusted by governors.
+
+API
+---
+
+Device drivers need to initialize a :c:type:`devfreq_profile` and call the
+:c:func:`devfreq_add_device` function to create a :c:type:`devfreq` instance.
+
+.. kernel-doc:: include/linux/devfreq.h
+.. kernel-doc:: include/linux/devfreq-event.h
+.. kernel-doc:: drivers/devfreq/devfreq.c
+ :export:
+.. kernel-doc:: drivers/devfreq/devfreq-event.c
+ :export:
diff --git a/Documentation/driver-api/device_link.rst b/Documentation/driver-api/device_link.rst
index 1b5020ec6517..bc2d89af88ce 100644
--- a/Documentation/driver-api/device_link.rst
+++ b/Documentation/driver-api/device_link.rst
@@ -281,7 +281,8 @@ State machine
:c:func:`driver_bound()`.)
* Before a consumer device is probed, presence of supplier drivers is
- verified by checking that links to suppliers are in ``DL_STATE_AVAILABLE``
+ verified by checking the consumer device is not in the wait_for_suppliers
+ list and by checking that links to suppliers are in ``DL_STATE_AVAILABLE``
state. The state of the links is updated to ``DL_STATE_CONSUMER_PROBE``.
(Call to :c:func:`device_links_check_suppliers()` from
:c:func:`really_probe()`.)
diff --git a/Documentation/driver-api/dma-buf.rst b/Documentation/driver-api/dma-buf.rst
index b541e97c7ab1..c78db28519f7 100644
--- a/Documentation/driver-api/dma-buf.rst
+++ b/Documentation/driver-api/dma-buf.rst
@@ -118,13 +118,13 @@ Kernel Functions and Structures Reference
Reservation Objects
-------------------
-.. kernel-doc:: drivers/dma-buf/reservation.c
+.. kernel-doc:: drivers/dma-buf/dma-resv.c
:doc: Reservation Object Overview
-.. kernel-doc:: drivers/dma-buf/reservation.c
+.. kernel-doc:: drivers/dma-buf/dma-resv.c
:export:
-.. kernel-doc:: include/linux/reservation.h
+.. kernel-doc:: include/linux/dma-resv.h
:internal:
DMA Fences
diff --git a/Documentation/driver-api/driver-model/devres.rst b/Documentation/driver-api/driver-model/devres.rst
index a100bef54952..13046fcf0a5d 100644
--- a/Documentation/driver-api/driver-model/devres.rst
+++ b/Documentation/driver-api/driver-model/devres.rst
@@ -314,8 +314,13 @@ IOMAP
devm_ioport_unmap()
devm_ioremap()
devm_ioremap_nocache()
+ devm_ioremap_uc()
devm_ioremap_wc()
devm_ioremap_resource() : checks resource, requests memory region, ioremaps
+ devm_ioremap_resource_wc()
+ devm_platform_ioremap_resource() : calls devm_ioremap_resource() for platform device
+ devm_platform_ioremap_resource_wc()
+ devm_platform_ioremap_resource_byname()
devm_iounmap()
pcim_iomap()
pcim_iomap_regions() : do request_region() and iomap() on multiple BARs
diff --git a/Documentation/driver-api/driver-model/driver.rst b/Documentation/driver-api/driver-model/driver.rst
index 11d281506a04..baa6a85c8287 100644
--- a/Documentation/driver-api/driver-model/driver.rst
+++ b/Documentation/driver-api/driver-model/driver.rst
@@ -169,6 +169,49 @@ A driver's probe() may return a negative errno value to indicate that
the driver did not bind to this device, in which case it should have
released all resources it allocated::
+ void (*sync_state)(struct device *dev);
+
+sync_state is called only once for a device. It's called when all the consumer
+devices of the device have successfully probed. The list of consumers of the
+device is obtained by looking at the device links connecting that device to its
+consumer devices.
+
+The first attempt to call sync_state() is made during late_initcall_sync() to
+give firmware and drivers time to link devices to each other. During the first
+attempt at calling sync_state(), if all the consumers of the device at that
+point in time have already probed successfully, sync_state() is called right
+away. If there are no consumers of the device during the first attempt, that
+too is considered as "all consumers of the device have probed" and sync_state()
+is called right away.
+
+If during the first attempt at calling sync_state() for a device, there are
+still consumers that haven't probed successfully, the sync_state() call is
+postponed and reattempted in the future only when one or more consumers of the
+device probe successfully. If during the reattempt, the driver core finds that
+there are one or more consumers of the device that haven't probed yet, then
+sync_state() call is postponed again.
+
+A typical use case for sync_state() is to have the kernel cleanly take over
+management of devices from the bootloader. For example, if a device is left on
+and at a particular hardware configuration by the bootloader, the device's
+driver might need to keep the device in the boot configuration until all the
+consumers of the device have probed. Once all the consumers of the device have
+probed, the device's driver can synchronize the hardware state of the device to
+match the aggregated software state requested by all the consumers. Hence the
+name sync_state().
+
+While obvious examples of resources that can benefit from sync_state() include
+resources such as regulator, sync_state() can also be useful for complex
+resources like IOMMUs. For example, IOMMUs with multiple consumers (devices
+whose addresses are remapped by the IOMMU) might need to keep their mappings
+fixed at (or additive to) the boot configuration until all its consumers have
+probed.
+
+While the typical use case for sync_state() is to have the kernel cleanly take
+over management of devices from the bootloader, the usage of sync_state() is
+not restricted to that. Use it whenever it makes sense to take an action after
+all the consumers of a device have probed.
+
int (*remove) (struct device *dev);
remove is called to unbind a driver from a device. This may be
diff --git a/Documentation/driver-api/generic-counter.rst b/Documentation/driver-api/generic-counter.rst
index 8382f01a53e3..e622f8f6e56a 100644
--- a/Documentation/driver-api/generic-counter.rst
+++ b/Documentation/driver-api/generic-counter.rst
@@ -7,7 +7,7 @@ Generic Counter Interface
Introduction
============
-Counter devices are prevalent within a diverse spectrum of industries.
+Counter devices are prevalent among a diverse spectrum of industries.
The ubiquitous presence of these devices necessitates a common interface
and standard of interaction and exposure. This driver API attempts to
resolve the issue of duplicate code found among existing counter device
@@ -26,23 +26,72 @@ the Generic Counter interface.
There are three core components to a counter:
-* Count:
- Count data for a set of Signals.
-
* Signal:
- Input data that is evaluated by the counter to determine the count
- data.
+ Stream of data to be evaluated by the counter.
* Synapse:
- The association of a Signal with a respective Count.
+ Association of a Signal, and evaluation trigger, with a Count.
+
+* Count:
+ Accumulation of the effects of connected Synapses.
+
+SIGNAL
+------
+A Signal represents a stream of data. This is the input data that is
+evaluated by the counter to determine the count data; e.g. a quadrature
+signal output line of a rotary encoder. Not all counter devices provide
+user access to the Signal data, so exposure is optional for drivers.
+
+When the Signal data is available for user access, the Generic Counter
+interface provides the following available signal values:
+
+* SIGNAL_LOW:
+ Signal line is in a low state.
+
+* SIGNAL_HIGH:
+ Signal line is in a high state.
+
+A Signal may be associated with one or more Counts.
+
+SYNAPSE
+-------
+A Synapse represents the association of a Signal with a Count. Signal
+data affects respective Count data, and the Synapse represents this
+relationship.
+
+The Synapse action mode specifies the Signal data condition that
+triggers the respective Count's count function evaluation to update the
+count data. The Generic Counter interface provides the following
+available action modes:
+
+* None:
+ Signal does not trigger the count function. In Pulse-Direction count
+ function mode, this Signal is evaluated as Direction.
+
+* Rising Edge:
+ Low state transitions to high state.
+
+* Falling Edge:
+ High state transitions to low state.
+
+* Both Edges:
+ Any state transition.
+
+A counter is defined as a set of input signals associated with count
+data that are generated by the evaluation of the state of the associated
+input signals as defined by the respective count functions. Within the
+context of the Generic Counter interface, a counter consists of Counts
+each associated with a set of Signals, whose respective Synapse
+instances represent the count function update conditions for the
+associated Counts.
+
+A Synapse associates one Signal with one Count.
COUNT
-----
-A Count represents the count data for a set of Signals. The Generic
-Counter interface provides the following available count data types:
-
-* COUNT_POSITION:
- Unsigned integer value representing position.
+A Count represents the accumulation of the effects of connected
+Synapses; i.e. the count data for a set of Signals. The Generic
+Counter interface represents the count data as a natural number.
A Count has a count function mode which represents the update behavior
for the count data. The Generic Counter interface provides the following
@@ -86,60 +135,7 @@ available count function modes:
Any state transition on either quadrature pair signals updates the
respective count. Quadrature encoding determines the direction.
-A Count has a set of one or more associated Signals.
-
-SIGNAL
-------
-A Signal represents a counter input data; this is the input data that is
-evaluated by the counter to determine the count data; e.g. a quadrature
-signal output line of a rotary encoder. Not all counter devices provide
-user access to the Signal data.
-
-The Generic Counter interface provides the following available signal
-data types for when the Signal data is available for user access:
-
-* SIGNAL_LEVEL:
- Signal line state level. The following states are possible:
-
- - SIGNAL_LEVEL_LOW:
- Signal line is in a low state.
-
- - SIGNAL_LEVEL_HIGH:
- Signal line is in a high state.
-
-A Signal may be associated with one or more Counts.
-
-SYNAPSE
--------
-A Synapse represents the association of a Signal with a respective
-Count. Signal data affects respective Count data, and the Synapse
-represents this relationship.
-
-The Synapse action mode specifies the Signal data condition which
-triggers the respective Count's count function evaluation to update the
-count data. The Generic Counter interface provides the following
-available action modes:
-
-* None:
- Signal does not trigger the count function. In Pulse-Direction count
- function mode, this Signal is evaluated as Direction.
-
-* Rising Edge:
- Low state transitions to high state.
-
-* Falling Edge:
- High state transitions to low state.
-
-* Both Edges:
- Any state transition.
-
-A counter is defined as a set of input signals associated with count
-data that are generated by the evaluation of the state of the associated
-input signals as defined by the respective count functions. Within the
-context of the Generic Counter interface, a counter consists of Counts
-each associated with a set of Signals, whose respective Synapse
-instances represent the count function update conditions for the
-associated Counts.
+A Count has a set of one or more associated Synapses.
Paradigm
========
@@ -286,10 +282,36 @@ if device memory-managed registration is desired.
Extension sysfs attributes can be created for auxiliary functionality
and data by passing in defined counter_device_ext, counter_count_ext,
and counter_signal_ext structures. In these cases, the
-counter_device_ext structure is used for global configuration of the
-respective Counter device, while the counter_count_ext and
-counter_signal_ext structures allow for auxiliary exposure and
-configuration of a specific Count or Signal respectively.
+counter_device_ext structure is used for global/miscellaneous exposure
+and configuration of the respective Counter device, while the
+counter_count_ext and counter_signal_ext structures allow for auxiliary
+exposure and configuration of a specific Count or Signal respectively.
+
+Determining the type of extension to create is a matter of scope.
+
+* Signal extensions are attributes that expose information/control
+ specific to a Signal. These types of attributes will exist under a
+ Signal's directory in sysfs.
+
+ For example, if you have an invert feature for a Signal, you can have
+ a Signal extension called "invert" that toggles that feature:
+ /sys/bus/counter/devices/counterX/signalY/invert
+
+* Count extensions are attributes that expose information/control
+ specific to a Count. These type of attributes will exist under a
+ Count's directory in sysfs.
+
+ For example, if you want to pause/unpause a Count from updating, you
+ can have a Count extension called "enable" that toggles such:
+ /sys/bus/counter/devices/counterX/countY/enable
+
+* Device extensions are attributes that expose information/control
+ non-specific to a particular Count or Signal. This is where you would
+ put your global features or other miscellanous functionality.
+
+ For example, if your device has an overtemp sensor, you can report the
+ chip overheated via a device extension called "error_overtemp":
+ /sys/bus/counter/devices/counterX/error_overtemp
Architecture
============
diff --git a/Documentation/driver-api/bt8xxgpio.rst b/Documentation/driver-api/gpio/bt8xxgpio.rst
index a845feb074de..d7e75f1234e7 100644
--- a/Documentation/driver-api/bt8xxgpio.rst
+++ b/Documentation/driver-api/gpio/bt8xxgpio.rst
@@ -2,7 +2,7 @@
A driver for a selfmade cheap BT8xx based PCI GPIO-card (bt8xxgpio)
===================================================================
-For advanced documentation, see http://www.bu3sch.de/btgpio.php
+For advanced documentation, see https://bues.ch/cms/unmaintained/btgpio.html
A generic digital 24-port PCI GPIO card can be built out of an ordinary
Brooktree bt848, bt849, bt878 or bt879 based analog TV tuner card. The
diff --git a/Documentation/driver-api/gpio/driver.rst b/Documentation/driver-api/gpio/driver.rst
index 3fdb32422f8a..2ff743105927 100644
--- a/Documentation/driver-api/gpio/driver.rst
+++ b/Documentation/driver-api/gpio/driver.rst
@@ -5,7 +5,7 @@ GPIO Driver Interface
This document serves as a guide for writers of GPIO chip drivers.
Each GPIO controller driver needs to include the following header, which defines
-the structures used to define a GPIO driver:
+the structures used to define a GPIO driver::
#include <linux/gpio/driver.h>
@@ -398,12 +398,15 @@ provided. A big portion of overhead code will be managed by gpiolib,
under the assumption that your interrupts are 1-to-1-mapped to the
GPIO line index:
- GPIO line offset Hardware IRQ
- 0 0
- 1 1
- 2 2
- ... ...
- ngpio-1 ngpio-1
+.. csv-table::
+ :header: GPIO line offset, Hardware IRQ
+
+ 0,0
+ 1,1
+ 2,2
+ ...,...
+ ngpio-1, ngpio-1
+
If some GPIO lines do not have corresponding IRQs, the bitmask valid_mask
and the flag need_valid_mask in gpio_irq_chip can be used to mask off some
@@ -413,7 +416,9 @@ The preferred way to set up the helpers is to fill in the
struct gpio_irq_chip inside struct gpio_chip before adding the gpio_chip.
If you do this, the additional irq_chip will be set up by gpiolib at the
same time as setting up the rest of the GPIO functionality. The following
-is a typical example of a cascaded interrupt handler using gpio_irq_chip:
+is a typical example of a cascaded interrupt handler using gpio_irq_chip::
+
+.. code-block:: c
/* Typical state container with dynamic irqchip */
struct my_gpio {
@@ -448,7 +453,9 @@ is a typical example of a cascaded interrupt handler using gpio_irq_chip:
return devm_gpiochip_add_data(dev, &g->gc, g);
The helper support using hierarchical interrupt controllers as well.
-In this case the typical set-up will look like this:
+In this case the typical set-up will look like this::
+
+.. code-block:: c
/* Typical state container with dynamic irqchip */
struct my_gpio {
@@ -493,7 +500,7 @@ available but we try to move away from this:
gpiochip. It will pass the struct gpio_chip* for the chip to all IRQ
callbacks, so the callbacks need to embed the gpio_chip in its state
container and obtain a pointer to the container using container_of().
- (See Documentation/driver-model/design-patterns.txt)
+ (See Documentation/driver-api/driver-model/design-patterns.rst)
- gpiochip_irqchip_add_nested(): adds a nested cascaded irqchip to a gpiochip,
as discussed above regarding different types of cascaded irqchips. The
diff --git a/Documentation/driver-api/gpio/index.rst b/Documentation/driver-api/gpio/index.rst
index c5b8467f9104..5b61032aa4ea 100644
--- a/Documentation/driver-api/gpio/index.rst
+++ b/Documentation/driver-api/gpio/index.rst
@@ -13,6 +13,7 @@ Contents:
board
drivers-on-gpio
legacy
+ bt8xxgpio
Core
====
diff --git a/Documentation/driver-api/index.rst b/Documentation/driver-api/index.rst
index 38e638abe3eb..0ebe205efd0c 100644
--- a/Documentation/driver-api/index.rst
+++ b/Documentation/driver-api/index.rst
@@ -26,6 +26,7 @@ available subsections can be seen below.
device_link
component
message-based
+ infiniband
sound
frame-buffer
regulator
@@ -39,6 +40,7 @@ available subsections can be seen below.
ipmb
i3c/index
interconnect
+ devfreq
hsi
edac
scsi
@@ -69,11 +71,9 @@ available subsections can be seen below.
fpga/index
acpi/index
backlight/lp855x-driver.rst
- bt8xxgpio
connector
console
dcdbas
- dell_rbu
edid
eisa
ipmb
@@ -93,7 +93,6 @@ available subsections can be seen below.
pwm
rfkill
serial/index
- sgi-ioc4
sm501
smsc_ece1099
switchtec
diff --git a/Documentation/driver-api/infiniband.rst b/Documentation/driver-api/infiniband.rst
new file mode 100644
index 000000000000..1a3116f32ff0
--- /dev/null
+++ b/Documentation/driver-api/infiniband.rst
@@ -0,0 +1,127 @@
+===========================================
+InfiniBand and Remote DMA (RDMA) Interfaces
+===========================================
+
+Introduction and Overview
+=========================
+
+TBD
+
+InfiniBand core interfaces
+==========================
+
+.. kernel-doc:: drivers/infiniband/core/iwpm_util.h
+ :internal:
+
+.. kernel-doc:: drivers/infiniband/core/cq.c
+ :export:
+
+.. kernel-doc:: drivers/infiniband/core/cm.c
+ :export:
+
+.. kernel-doc:: drivers/infiniband/core/rw.c
+ :export:
+
+.. kernel-doc:: drivers/infiniband/core/device.c
+ :export:
+
+.. kernel-doc:: drivers/infiniband/core/verbs.c
+ :export:
+
+.. kernel-doc:: drivers/infiniband/core/packer.c
+ :export:
+
+.. kernel-doc:: drivers/infiniband/core/sa_query.c
+ :export:
+
+.. kernel-doc:: drivers/infiniband/core/ud_header.c
+ :export:
+
+.. kernel-doc:: drivers/infiniband/core/fmr_pool.c
+ :export:
+
+.. kernel-doc:: drivers/infiniband/core/umem.c
+ :export:
+
+.. kernel-doc:: drivers/infiniband/core/umem_odp.c
+ :export:
+
+RDMA Verbs transport library
+============================
+
+.. kernel-doc:: drivers/infiniband/sw/rdmavt/mr.c
+ :export:
+
+.. kernel-doc:: drivers/infiniband/sw/rdmavt/rc.c
+ :export:
+
+.. kernel-doc:: drivers/infiniband/sw/rdmavt/ah.c
+ :export:
+
+.. kernel-doc:: drivers/infiniband/sw/rdmavt/vt.c
+ :export:
+
+.. kernel-doc:: drivers/infiniband/sw/rdmavt/cq.c
+ :export:
+
+.. kernel-doc:: drivers/infiniband/sw/rdmavt/qp.c
+ :export:
+
+.. kernel-doc:: drivers/infiniband/sw/rdmavt/mcast.c
+ :export:
+
+Upper Layer Protocols
+=====================
+
+iSCSI Extensions for RDMA (iSER)
+--------------------------------
+
+.. kernel-doc:: drivers/infiniband/ulp/iser/iscsi_iser.h
+ :internal:
+
+.. kernel-doc:: drivers/infiniband/ulp/iser/iscsi_iser.c
+ :functions: iscsi_iser_pdu_alloc iser_initialize_task_headers \
+ iscsi_iser_task_init iscsi_iser_mtask_xmit iscsi_iser_task_xmit \
+ iscsi_iser_cleanup_task iscsi_iser_check_protection \
+ iscsi_iser_conn_create iscsi_iser_conn_bind \
+ iscsi_iser_conn_start iscsi_iser_conn_stop \
+ iscsi_iser_session_destroy iscsi_iser_session_create \
+ iscsi_iser_set_param iscsi_iser_ep_connect iscsi_iser_ep_poll \
+ iscsi_iser_ep_disconnect
+
+.. kernel-doc:: drivers/infiniband/ulp/iser/iser_initiator.c
+ :internal:
+
+.. kernel-doc:: drivers/infiniband/ulp/iser/iser_verbs.c
+ :internal:
+
+Omni-Path (OPA) Virtual NIC support
+-----------------------------------
+
+.. kernel-doc:: drivers/infiniband/ulp/opa_vnic/opa_vnic_internal.h
+ :internal:
+
+.. kernel-doc:: drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.h
+ :internal:
+
+.. kernel-doc:: drivers/infiniband/ulp/opa_vnic/opa_vnic_vema_iface.c
+ :internal:
+
+.. kernel-doc:: drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c
+ :internal:
+
+InfiniBand SCSI RDMA protocol target support
+--------------------------------------------
+
+.. kernel-doc:: drivers/infiniband/ulp/srpt/ib_srpt.h
+ :internal:
+
+.. kernel-doc:: drivers/infiniband/ulp/srpt/ib_srpt.c
+ :internal:
+
+iSCSI Extensions for RDMA (iSER) target support
+-----------------------------------------------
+
+.. kernel-doc:: drivers/infiniband/ulp/isert/ib_isert.c
+ :internal:
+
diff --git a/Documentation/driver-api/infrastructure.rst b/Documentation/driver-api/infrastructure.rst
index 6172f3cc3d0b..06d98c4526df 100644
--- a/Documentation/driver-api/infrastructure.rst
+++ b/Documentation/driver-api/infrastructure.rst
@@ -49,9 +49,6 @@ Device Drivers Base
Device Drivers DMA Management
-----------------------------
-.. kernel-doc:: kernel/dma/coherent.c
- :export:
-
.. kernel-doc:: kernel/dma/mapping.c
:export:
diff --git a/Documentation/driver-api/interconnect.rst b/Documentation/driver-api/interconnect.rst
index c3e004893796..cdeb5825f314 100644
--- a/Documentation/driver-api/interconnect.rst
+++ b/Documentation/driver-api/interconnect.rst
@@ -1,7 +1,7 @@
.. SPDX-License-Identifier: GPL-2.0
=====================================
-GENERIC SYSTEM INTERCONNECT SUBSYSTEM
+Generic System Interconnect Subsystem
=====================================
Introduction
diff --git a/Documentation/driver-api/pti_intel_mid.rst b/Documentation/driver-api/pti_intel_mid.rst
index 20f1cff42d5f..bacc2a4ee89f 100644
--- a/Documentation/driver-api/pti_intel_mid.rst
+++ b/Documentation/driver-api/pti_intel_mid.rst
@@ -49,7 +49,9 @@ but is not just blindly executing as 'root'. Keep in mind
the use of ioctl(,TIOCSETD,) is not specific to the n_tracerouter
and n_tracesink line discpline drivers but is a generic
operation for a program to use a line discpline driver
-on a tty port other than the default n_tty::
+on a tty port other than the default n_tty:
+
+.. code-block:: c
/////////// To hook up n_tracerouter and n_tracesink /////////
diff --git a/Documentation/features/core/tracehook/arch-support.txt b/Documentation/features/core/tracehook/arch-support.txt
index d344b99aae1e..964667052eda 100644
--- a/Documentation/features/core/tracehook/arch-support.txt
+++ b/Documentation/features/core/tracehook/arch-support.txt
@@ -30,5 +30,5 @@
| um: | TODO |
| unicore32: | TODO |
| x86: | ok |
- | xtensa: | TODO |
+ | xtensa: | ok |
-----------------------
diff --git a/Documentation/filesystems/autofs.txt b/Documentation/filesystems/autofs.rst
index 3af38c7fd26d..681c6a492bc0 100644
--- a/Documentation/filesystems/autofs.txt
+++ b/Documentation/filesystems/autofs.rst
@@ -1,12 +1,9 @@
-<head>
-<style> p { max-width:50em} ol, ul {max-width: 40em}</style>
-</head>
-
+=====================
autofs - how it works
=====================
Purpose
--------
+=======
The goal of autofs is to provide on-demand mounting and race free
automatic unmounting of various other filesystems. This provides two
@@ -28,7 +25,7 @@ key advantages:
first accessed a name.
Context
--------
+=======
The "autofs" filesystem module is only one part of an autofs system.
There also needs to be a user-space program which looks up names
@@ -43,7 +40,7 @@ filesystem type. Several "autofs" filesystems can be mounted and they
can each be managed separately, or all managed by the same daemon.
Content
--------
+=======
An autofs filesystem can contain 3 sorts of objects: directories,
symbolic links and mount traps. Mount traps are directories with
@@ -52,9 +49,10 @@ extra properties as described in the next section.
Objects can only be created by the automount daemon: symlinks are
created with a regular `symlink` system call, while directories and
mount traps are created with `mkdir`. The determination of whether a
-directory should be a mount trap or not is quite _ad hoc_, largely for
-historical reasons, and is determined in part by the
-*direct*/*indirect*/*offset* mount options, and the *maxproto* mount option.
+directory should be a mount trap is based on a master map. This master
+map is consulted by autofs to determine which directories are mount
+points. Mount points can be *direct*/*indirect*/*offset*.
+On most systems, the default master map is located at */etc/auto.master*.
If neither the *direct* or *offset* mount options are given (so the
mount is considered to be *indirect*), then the root directory is
@@ -80,7 +78,7 @@ where in the tree they are (root, top level, or lower), the *maxproto*,
and whether the mount was *indirect* or not.
Mount Traps
----------------
+===========
A core element of the implementation of autofs is the Mount Traps
which are provided by the Linux VFS. Any directory provided by a
@@ -201,7 +199,7 @@ initiated or is being considered, otherwise it returns 0.
Mountpoint expiry
------------------
+=================
The VFS has a mechanism for automatically expiring unused mounts,
much as it can expire any unused dentry information from the dcache.
@@ -301,7 +299,7 @@ completed (together with removing any directories that might have been
necessary), or has been aborted.
Communicating with autofs: detecting the daemon
------------------------------------------------
+===============================================
There are several forms of communication between the automount daemon
and the filesystem. As we have already seen, the daemon can create and
@@ -317,33 +315,39 @@ If the daemon ever has to be stopped and restarted a new pgid can be
provided through an ioctl as will be described below.
Communicating with autofs: the event pipe
------------------------------------------
+=========================================
When an autofs filesystem is mounted, the 'write' end of a pipe must
be passed using the 'fd=' mount option. autofs will write
notification messages to this pipe for the daemon to respond to.
-For version 5, the format of the message is:
-
- struct autofs_v5_packet {
- int proto_version; /* Protocol version */
- int type; /* Type of packet */
- autofs_wqt_t wait_queue_token;
- __u32 dev;
- __u64 ino;
- __u32 uid;
- __u32 gid;
- __u32 pid;
- __u32 tgid;
- __u32 len;
- char name[NAME_MAX+1];
+For version 5, the format of the message is::
+
+ struct autofs_v5_packet {
+ struct autofs_packet_hdr hdr;
+ autofs_wqt_t wait_queue_token;
+ __u32 dev;
+ __u64 ino;
+ __u32 uid;
+ __u32 gid;
+ __u32 pid;
+ __u32 tgid;
+ __u32 len;
+ char name[NAME_MAX+1];
};
-where the type is one of
+And the format of the header is::
+
+ struct autofs_packet_hdr {
+ int proto_version; /* Protocol version */
+ int type; /* Type of packet */
+ };
- autofs_ptype_missing_indirect
- autofs_ptype_expire_indirect
- autofs_ptype_missing_direct
- autofs_ptype_expire_direct
+where the type is one of ::
+
+ autofs_ptype_missing_indirect
+ autofs_ptype_expire_indirect
+ autofs_ptype_missing_direct
+ autofs_ptype_expire_direct
so messages can indicate that a name is missing (something tried to
access it but it isn't there) or that it has been selected for expiry.
@@ -360,7 +364,7 @@ acknowledged using one of the ioctls below with the relevant
`wait_queue_token`.
Communicating with autofs: root directory ioctls
-------------------------------------------------
+================================================
The root directory of an autofs filesystem will respond to a number of
ioctls. The process issuing the ioctl must have the CAP_SYS_ADMIN
@@ -368,58 +372,66 @@ capability, or must be the automount daemon.
The available ioctl commands are:
-- **AUTOFS_IOC_READY**: a notification has been handled. The argument
- to the ioctl command is the "wait_queue_token" number
- corresponding to the notification being acknowledged.
-- **AUTOFS_IOC_FAIL**: similar to above, but indicates failure with
- the error code `ENOENT`.
-- **AUTOFS_IOC_CATATONIC**: Causes the autofs to enter "catatonic"
- mode meaning that it stops sending notifications to the daemon.
- This mode is also entered if a write to the pipe fails.
-- **AUTOFS_IOC_PROTOVER**: This returns the protocol version in use.
-- **AUTOFS_IOC_PROTOSUBVER**: Returns the protocol sub-version which
- is really a version number for the implementation.
-- **AUTOFS_IOC_SETTIMEOUT**: This passes a pointer to an unsigned
- long. The value is used to set the timeout for expiry, and
- the current timeout value is stored back through the pointer.
-- **AUTOFS_IOC_ASKUMOUNT**: Returns, in the pointed-to `int`, 1 if
- the filesystem could be unmounted. This is only a hint as
- the situation could change at any instant. This call can be
- used to avoid a more expensive full unmount attempt.
-- **AUTOFS_IOC_EXPIRE**: as described above, this asks if there is
- anything suitable to expire. A pointer to a packet:
-
- struct autofs_packet_expire_multi {
- int proto_version; /* Protocol version */
- int type; /* Type of packet */
- autofs_wqt_t wait_queue_token;
- int len;
- char name[NAME_MAX+1];
- };
+- **AUTOFS_IOC_READY**:
+ a notification has been handled. The argument
+ to the ioctl command is the "wait_queue_token" number
+ corresponding to the notification being acknowledged.
+- **AUTOFS_IOC_FAIL**:
+ similar to above, but indicates failure with
+ the error code `ENOENT`.
+- **AUTOFS_IOC_CATATONIC**:
+ Causes the autofs to enter "catatonic"
+ mode meaning that it stops sending notifications to the daemon.
+ This mode is also entered if a write to the pipe fails.
+- **AUTOFS_IOC_PROTOVER**:
+ This returns the protocol version in use.
+- **AUTOFS_IOC_PROTOSUBVER**:
+ Returns the protocol sub-version which
+ is really a version number for the implementation.
+- **AUTOFS_IOC_SETTIMEOUT**:
+ This passes a pointer to an unsigned
+ long. The value is used to set the timeout for expiry, and
+ the current timeout value is stored back through the pointer.
+- **AUTOFS_IOC_ASKUMOUNT**:
+ Returns, in the pointed-to `int`, 1 if
+ the filesystem could be unmounted. This is only a hint as
+ the situation could change at any instant. This call can be
+ used to avoid a more expensive full unmount attempt.
+- **AUTOFS_IOC_EXPIRE**:
+ as described above, this asks if there is
+ anything suitable to expire. A pointer to a packet::
+
+ struct autofs_packet_expire_multi {
+ struct autofs_packet_hdr hdr;
+ autofs_wqt_t wait_queue_token;
+ int len;
+ char name[NAME_MAX+1];
+ };
- is required. This is filled in with the name of something
- that can be unmounted or removed. If nothing can be expired,
- `errno` is set to `EAGAIN`. Even though a `wait_queue_token`
- is present in the structure, no "wait queue" is established
- and no acknowledgment is needed.
-- **AUTOFS_IOC_EXPIRE_MULTI**: This is similar to
- **AUTOFS_IOC_EXPIRE** except that it causes notification to be
- sent to the daemon, and it blocks until the daemon acknowledges.
- The argument is an integer which can contain two different flags.
+ is required. This is filled in with the name of something
+ that can be unmounted or removed. If nothing can be expired,
+ `errno` is set to `EAGAIN`. Even though a `wait_queue_token`
+ is present in the structure, no "wait queue" is established
+ and no acknowledgment is needed.
+- **AUTOFS_IOC_EXPIRE_MULTI**:
+ This is similar to
+ **AUTOFS_IOC_EXPIRE** except that it causes notification to be
+ sent to the daemon, and it blocks until the daemon acknowledges.
+ The argument is an integer which can contain two different flags.
- **AUTOFS_EXP_IMMEDIATE** causes `last_used` time to be ignored
- and objects are expired if the are not in use.
+ **AUTOFS_EXP_IMMEDIATE** causes `last_used` time to be ignored
+ and objects are expired if the are not in use.
- **AUTOFS_EXP_FORCED** causes the in use status to be ignored
- and objects are expired ieven if they are in use. This assumes
- that the daemon has requested this because it is capable of
- performing the umount.
+ **AUTOFS_EXP_FORCED** causes the in use status to be ignored
+ and objects are expired ieven if they are in use. This assumes
+ that the daemon has requested this because it is capable of
+ performing the umount.
- **AUTOFS_EXP_LEAVES** will select a leaf rather than a top-level
- name to expire. This is only safe when *maxproto* is 4.
+ **AUTOFS_EXP_LEAVES** will select a leaf rather than a top-level
+ name to expire. This is only safe when *maxproto* is 4.
Communicating with autofs: char-device ioctls
----------------------------------------------
+=============================================
It is not always possible to open the root of an autofs filesystem,
particularly a *direct* mounted filesystem. If the automount daemon
@@ -429,9 +441,9 @@ need there is a "miscellaneous" character device (major 10, minor 235)
which can be used to communicate directly with the autofs filesystem.
It requires CAP_SYS_ADMIN for access.
-The `ioctl`s that can be used on this device are described in a separate
+The 'ioctl's that can be used on this device are described in a separate
document `autofs-mount-control.txt`, and are summarised briefly here.
-Each ioctl is passed a pointer to an `autofs_dev_ioctl` structure:
+Each ioctl is passed a pointer to an `autofs_dev_ioctl` structure::
struct autofs_dev_ioctl {
__u32 ver_major;
@@ -469,41 +481,50 @@ that the kernel module can support.
Commands are:
-- **AUTOFS_DEV_IOCTL_VERSION_CMD**: does nothing, except validate and
- set version numbers.
-- **AUTOFS_DEV_IOCTL_OPENMOUNT_CMD**: return an open file descriptor
- on the root of an autofs filesystem. The filesystem is identified
- by name and device number, which is stored in `openmount.devid`.
- Device numbers for existing filesystems can be found in
- `/proc/self/mountinfo`.
-- **AUTOFS_DEV_IOCTL_CLOSEMOUNT_CMD**: same as `close(ioctlfd)`.
-- **AUTOFS_DEV_IOCTL_SETPIPEFD_CMD**: if the filesystem is in
- catatonic mode, this can provide the write end of a new pipe
- in `setpipefd.pipefd` to re-establish communication with a daemon.
- The process group of the calling process is used to identify the
- daemon.
-- **AUTOFS_DEV_IOCTL_REQUESTER_CMD**: `path` should be a
- name within the filesystem that has been auto-mounted on.
- On successful return, `requester.uid` and `requester.gid` will be
- the UID and GID of the process which triggered that mount.
-- **AUTOFS_DEV_IOCTL_ISMOUNTPOINT_CMD**: Check if path is a
- mountpoint of a particular type - see separate documentation for
- details.
-- **AUTOFS_DEV_IOCTL_PROTOVER_CMD**:
-- **AUTOFS_DEV_IOCTL_PROTOSUBVER_CMD**:
-- **AUTOFS_DEV_IOCTL_READY_CMD**:
-- **AUTOFS_DEV_IOCTL_FAIL_CMD**:
-- **AUTOFS_DEV_IOCTL_CATATONIC_CMD**:
-- **AUTOFS_DEV_IOCTL_TIMEOUT_CMD**:
-- **AUTOFS_DEV_IOCTL_EXPIRE_CMD**:
-- **AUTOFS_DEV_IOCTL_ASKUMOUNT_CMD**: These all have the same
- function as the similarly named **AUTOFS_IOC** ioctls, except
- that **FAIL** can be given an explicit error number in `fail.status`
- instead of assuming `ENOENT`, and this **EXPIRE** command
- corresponds to **AUTOFS_IOC_EXPIRE_MULTI**.
+- **AUTOFS_DEV_IOCTL_VERSION_CMD**:
+ does nothing, except validate and
+ set version numbers.
+- **AUTOFS_DEV_IOCTL_OPENMOUNT_CMD**:
+ return an open file descriptor
+ on the root of an autofs filesystem. The filesystem is identified
+ by name and device number, which is stored in `openmount.devid`.
+ Device numbers for existing filesystems can be found in
+ `/proc/self/mountinfo`.
+- **AUTOFS_DEV_IOCTL_CLOSEMOUNT_CMD**:
+ same as `close(ioctlfd)`.
+- **AUTOFS_DEV_IOCTL_SETPIPEFD_CMD**:
+ if the filesystem is in
+ catatonic mode, this can provide the write end of a new pipe
+ in `setpipefd.pipefd` to re-establish communication with a daemon.
+ The process group of the calling process is used to identify the
+ daemon.
+- **AUTOFS_DEV_IOCTL_REQUESTER_CMD**:
+ `path` should be a
+ name within the filesystem that has been auto-mounted on.
+ On successful return, `requester.uid` and `requester.gid` will be
+ the UID and GID of the process which triggered that mount.
+- **AUTOFS_DEV_IOCTL_ISMOUNTPOINT_CMD**:
+ Check if path is a
+ mountpoint of a particular type - see separate documentation for
+ details.
+
+- **AUTOFS_DEV_IOCTL_PROTOVER_CMD**
+- **AUTOFS_DEV_IOCTL_PROTOSUBVER_CMD**
+- **AUTOFS_DEV_IOCTL_READY_CMD**
+- **AUTOFS_DEV_IOCTL_FAIL_CMD**
+- **AUTOFS_DEV_IOCTL_CATATONIC_CMD**
+- **AUTOFS_DEV_IOCTL_TIMEOUT_CMD**
+- **AUTOFS_DEV_IOCTL_EXPIRE_CMD**
+- **AUTOFS_DEV_IOCTL_ASKUMOUNT_CMD**
+
+These all have the same
+function as the similarly named **AUTOFS_IOC** ioctls, except
+that **FAIL** can be given an explicit error number in `fail.status`
+instead of assuming `ENOENT`, and this **EXPIRE** command
+corresponds to **AUTOFS_IOC_EXPIRE_MULTI**.
Catatonic mode
---------------
+==============
As mentioned, an autofs mount can enter "catatonic" mode. This
happens if a write to the notification pipe fails, or if it is
@@ -527,7 +548,7 @@ Catatonic mode can only be left via the
**AUTOFS_DEV_IOCTL_OPENMOUNT_CMD** ioctl on the `/dev/autofs`.
The "ignore" mount option
--------------------------
+=========================
The "ignore" mount option can be used to provide a generic indicator
to applications that the mount entry should be ignored when displaying
@@ -542,18 +563,18 @@ This is intended to be used by user space programs to exclude autofs
mounts from consideration when reading the mounts list.
autofs, name spaces, and shared mounts
---------------------------------------
+======================================
With bind mounts and name spaces it is possible for an autofs
filesystem to appear at multiple places in one or more filesystem
name spaces. For this to work sensibly, the autofs filesystem should
-always be mounted "shared". e.g.
+always be mounted "shared". e.g. ::
-> `mount --make-shared /autofs/mount/point`
+ mount --make-shared /autofs/mount/point
The automount daemon is only able to manage a single mount location for
an autofs filesystem and if mounts on that are not 'shared', other
locations will not behave as expected. In particular access to those
-other locations will likely result in the `ELOOP` error
+other locations will likely result in the `ELOOP` error ::
-> Too many levels of symbolic links
+ Too many levels of symbolic links
diff --git a/Documentation/filesystems/debugfs.txt b/Documentation/filesystems/debugfs.txt
index 9e27c843d00e..dc497b96fa4f 100644
--- a/Documentation/filesystems/debugfs.txt
+++ b/Documentation/filesystems/debugfs.txt
@@ -68,41 +68,49 @@ actually necessary; the debugfs code provides a number of helper functions
for simple situations. Files containing a single integer value can be
created with any of:
- struct dentry *debugfs_create_u8(const char *name, umode_t mode,
- struct dentry *parent, u8 *value);
- struct dentry *debugfs_create_u16(const char *name, umode_t mode,
- struct dentry *parent, u16 *value);
+ void debugfs_create_u8(const char *name, umode_t mode,
+ struct dentry *parent, u8 *value);
+ void debugfs_create_u16(const char *name, umode_t mode,
+ struct dentry *parent, u16 *value);
struct dentry *debugfs_create_u32(const char *name, umode_t mode,
struct dentry *parent, u32 *value);
- struct dentry *debugfs_create_u64(const char *name, umode_t mode,
- struct dentry *parent, u64 *value);
+ void debugfs_create_u64(const char *name, umode_t mode,
+ struct dentry *parent, u64 *value);
These files support both reading and writing the given value; if a specific
file should not be written to, simply set the mode bits accordingly. The
values in these files are in decimal; if hexadecimal is more appropriate,
the following functions can be used instead:
- struct dentry *debugfs_create_x8(const char *name, umode_t mode,
- struct dentry *parent, u8 *value);
- struct dentry *debugfs_create_x16(const char *name, umode_t mode,
- struct dentry *parent, u16 *value);
- struct dentry *debugfs_create_x32(const char *name, umode_t mode,
- struct dentry *parent, u32 *value);
- struct dentry *debugfs_create_x64(const char *name, umode_t mode,
- struct dentry *parent, u64 *value);
+ void debugfs_create_x8(const char *name, umode_t mode,
+ struct dentry *parent, u8 *value);
+ void debugfs_create_x16(const char *name, umode_t mode,
+ struct dentry *parent, u16 *value);
+ void debugfs_create_x32(const char *name, umode_t mode,
+ struct dentry *parent, u32 *value);
+ void debugfs_create_x64(const char *name, umode_t mode,
+ struct dentry *parent, u64 *value);
These functions are useful as long as the developer knows the size of the
value to be exported. Some types can have different widths on different
-architectures, though, complicating the situation somewhat. There is a
-function meant to help out in one special case:
+architectures, though, complicating the situation somewhat. There are
+functions meant to help out in such special cases:
- struct dentry *debugfs_create_size_t(const char *name, umode_t mode,
- struct dentry *parent,
- size_t *value);
+ void debugfs_create_size_t(const char *name, umode_t mode,
+ struct dentry *parent, size_t *value);
As might be expected, this function will create a debugfs file to represent
a variable of type size_t.
+Similarly, there are helpers for variables of type unsigned long, in decimal
+and hexadecimal:
+
+ struct dentry *debugfs_create_ulong(const char *name, umode_t mode,
+ struct dentry *parent,
+ unsigned long *value);
+ void debugfs_create_xul(const char *name, umode_t mode,
+ struct dentry *parent, unsigned long *value);
+
Boolean values can be placed in debugfs with:
struct dentry *debugfs_create_bool(const char *name, umode_t mode,
@@ -114,8 +122,8 @@ lower-case values, or 1 or 0. Any other input will be silently ignored.
Also, atomic_t values can be placed in debugfs with:
- struct dentry *debugfs_create_atomic_t(const char *name, umode_t mode,
- struct dentry *parent, atomic_t *value)
+ void debugfs_create_atomic_t(const char *name, umode_t mode,
+ struct dentry *parent, atomic_t *value)
A read of this file will get atomic_t values, and a write of this file
will set atomic_t values.
diff --git a/Documentation/filesystems/f2fs.txt b/Documentation/filesystems/f2fs.txt
index 7e1991328473..3135b80df6da 100644
--- a/Documentation/filesystems/f2fs.txt
+++ b/Documentation/filesystems/f2fs.txt
@@ -297,6 +297,9 @@ Files in /sys/fs/f2fs/<devname>
reclaim the prefree segments to free segments.
By default, 5% over total # of segments.
+ main_blkaddr This value gives the first block address of
+ MAIN area in the partition.
+
max_small_discards This parameter controls the number of discard
commands that consist small blocks less than 2MB.
The candidates to be discarded are cached until
@@ -346,7 +349,7 @@ Files in /sys/fs/f2fs/<devname>
ram_thresh This parameter controls the memory footprint used
by free nids and cached nat entries. By default,
- 10 is set, which indicates 10 MB / 1 GB RAM.
+ 1 is set, which indicates 10 MB / 1 GB RAM.
ra_nid_pages When building free nids, F2FS reads NAT blocks
ahead for speed up. Default is 0.
diff --git a/Documentation/filesystems/fscrypt.rst b/Documentation/filesystems/fscrypt.rst
index 471a511c7508..68c2bc8275cf 100644
--- a/Documentation/filesystems/fscrypt.rst
+++ b/Documentation/filesystems/fscrypt.rst
@@ -342,8 +342,8 @@ Contents encryption
-------------------
For file contents, each filesystem block is encrypted independently.
-Currently, only the case where the filesystem block size is equal to
-the system's page size (usually 4096 bytes) is supported.
+Starting from Linux kernel 5.5, encryption of filesystems with block
+size less than system's page size is supported.
Each block's IV is set to the logical block number within the file as
a little endian number, except that:
diff --git a/Documentation/filesystems/index.rst b/Documentation/filesystems/index.rst
index 2c3a9f761205..ad6315a48d14 100644
--- a/Documentation/filesystems/index.rst
+++ b/Documentation/filesystems/index.rst
@@ -46,4 +46,5 @@ Documentation for filesystem implementations.
.. toctree::
:maxdepth: 2
+ autofs
virtiofs
diff --git a/Documentation/filesystems/locking.rst b/Documentation/filesystems/locking.rst
index fc3a0704553c..5057e4d9dcd1 100644
--- a/Documentation/filesystems/locking.rst
+++ b/Documentation/filesystems/locking.rst
@@ -105,7 +105,7 @@ getattr: no
listxattr: no
fiemap: no
update_time: no
-atomic_open: exclusive
+atomic_open: shared (exclusive if O_CREAT is set in open flags)
tmpfile: no
============ =============================================
diff --git a/Documentation/firmware-guide/acpi/namespace.rst b/Documentation/firmware-guide/acpi/namespace.rst
index 835521baeb89..3eb763d6656d 100644
--- a/Documentation/firmware-guide/acpi/namespace.rst
+++ b/Documentation/firmware-guide/acpi/namespace.rst
@@ -261,7 +261,7 @@ Description Tables contain information used for the creation of the
struct acpi_device objects represented by the given row (xSDT means DSDT
or SSDT).
-The forth column of the above table indicates the 'bus_id' generation
+The fourth column of the above table indicates the 'bus_id' generation
rule of the struct acpi_device object:
_HID:
diff --git a/Documentation/fpga/dfl.rst b/Documentation/fpga/dfl.rst
index 6fa483fc823e..094fc8aacd8e 100644
--- a/Documentation/fpga/dfl.rst
+++ b/Documentation/fpga/dfl.rst
@@ -108,6 +108,16 @@ More functions are exposed through sysfs
error reporting sysfs interfaces allow user to read errors detected by the
hardware, and clear the logged errors.
+ Power management (dfl_fme_power hwmon)
+ power management hwmon sysfs interfaces allow user to read power management
+ information (power consumption, thresholds, threshold status, limits, etc.)
+ and configure power thresholds for different throttling levels.
+
+ Thermal management (dfl_fme_thermal hwmon)
+ thermal management hwmon sysfs interfaces allow user to read thermal
+ management information (current temperature, thresholds, threshold status,
+ etc.).
+
FIU - PORT
==========
diff --git a/Documentation/gpu/amdgpu.rst b/Documentation/gpu/amdgpu.rst
index 5acdd1842ea2..0efede580039 100644
--- a/Documentation/gpu/amdgpu.rst
+++ b/Documentation/gpu/amdgpu.rst
@@ -79,16 +79,71 @@ AMDGPU XGMI Support
.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
:internal:
-AMDGPU RAS debugfs control interface
-====================================
+AMDGPU RAS Support
+==================
+
+The AMDGPU RAS interfaces are exposed via sysfs (for informational queries) and
+debugfs (for error injection).
+
+RAS debugfs/sysfs Control and Error Injection Interfaces
+--------------------------------------------------------
.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
:doc: AMDGPU RAS debugfs control interface
+RAS Reboot Behavior for Unrecoverable Errors
+--------------------------------------------------------
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+ :doc: AMDGPU RAS Reboot Behavior for Unrecoverable Errors
+
+RAS Error Count sysfs Interface
+-------------------------------
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+ :doc: AMDGPU RAS sysfs Error Count Interface
+
+RAS EEPROM debugfs Interface
+----------------------------
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+ :doc: AMDGPU RAS debugfs EEPROM table reset interface
+
+RAS VRAM Bad Pages sysfs Interface
+----------------------------------
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+ :doc: AMDGPU RAS sysfs gpu_vram_bad_pages Interface
.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
:internal:
+Sample Code
+-----------
+Sample code for testing error injection can be found here:
+https://cgit.freedesktop.org/mesa/drm/tree/tests/amdgpu/ras_tests.c
+
+This is part of the libdrm amdgpu unit tests which cover several areas of the GPU.
+There are four sets of tests:
+
+RAS Basic Test
+
+The test verifies the RAS feature enabled status and makes sure the necessary sysfs and debugfs files
+are present.
+
+RAS Query Test
+
+This test checks the RAS availability and enablement status for each supported IP block as well as
+the error counts.
+
+RAS Inject Test
+
+This test injects errors for each IP.
+
+RAS Disable Test
+
+This test tests disabling of RAS features for each IP block.
+
GPU Power/Thermal Controls and Monitoring
=========================================
@@ -130,11 +185,11 @@ pp_od_clk_voltage
.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
:doc: pp_od_clk_voltage
-pp_dpm_sclk pp_dpm_mclk pp_dpm_pcie
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+pp_dpm_*
+~~~~~~~~
.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
- :doc: pp_dpm_sclk pp_dpm_mclk pp_dpm_pcie
+ :doc: pp_dpm_sclk pp_dpm_mclk pp_dpm_socclk pp_dpm_fclk pp_dpm_dcefclk pp_dpm_pcie
pp_power_profile_mode
~~~~~~~~~~~~~~~~~~~~~
diff --git a/Documentation/gpu/drm-kms-helpers.rst b/Documentation/gpu/drm-kms-helpers.rst
index 3868008db8a9..9668a7fe2408 100644
--- a/Documentation/gpu/drm-kms-helpers.rst
+++ b/Documentation/gpu/drm-kms-helpers.rst
@@ -77,9 +77,6 @@ Atomic State Reset and Initialization
Atomic State Helper Reference
-----------------------------
-.. kernel-doc:: include/drm/drm_atomic_state_helper.h
- :internal:
-
.. kernel-doc:: drivers/gpu/drm/drm_atomic_state_helper.c
:export:
diff --git a/Documentation/gpu/drm-mm.rst b/Documentation/gpu/drm-mm.rst
index b664f054c259..59619296c84b 100644
--- a/Documentation/gpu/drm-mm.rst
+++ b/Documentation/gpu/drm-mm.rst
@@ -400,16 +400,13 @@ GEM VRAM Helper Functions Reference
.. kernel-doc:: drivers/gpu/drm/drm_gem_vram_helper.c
:export:
-VRAM MM Helper Functions Reference
-----------------------------------
+GEM TTM Helper Functions Reference
+-----------------------------------
-.. kernel-doc:: drivers/gpu/drm/drm_vram_mm_helper.c
+.. kernel-doc:: drivers/gpu/drm/drm_gem_ttm_helper.c
:doc: overview
-.. kernel-doc:: include/drm/drm_vram_mm_helper.h
- :internal:
-
-.. kernel-doc:: drivers/gpu/drm/drm_vram_mm_helper.c
+.. kernel-doc:: drivers/gpu/drm/drm_gem_ttm_helper.c
:export:
VMA Offset Manager
diff --git a/Documentation/gpu/i915.rst b/Documentation/gpu/i915.rst
index 3415255ad3dc..d0947c5c4ab8 100644
--- a/Documentation/gpu/i915.rst
+++ b/Documentation/gpu/i915.rst
@@ -246,6 +246,15 @@ Display PLLs
.. kernel-doc:: drivers/gpu/drm/i915/display/intel_dpll_mgr.h
:internal:
+Display State Buffer
+--------------------
+
+.. kernel-doc:: drivers/gpu/drm/i915/display/intel_dsb.c
+ :doc: DSB
+
+.. kernel-doc:: drivers/gpu/drm/i915/display/intel_dsb.c
+ :internal:
+
Memory Management and Command Submission
========================================
@@ -358,15 +367,6 @@ Batchbuffer Parsing
.. kernel-doc:: drivers/gpu/drm/i915/i915_cmd_parser.c
:internal:
-Batchbuffer Pools
------------------
-
-.. kernel-doc:: drivers/gpu/drm/i915/i915_gem_batch_pool.c
- :doc: batch pool
-
-.. kernel-doc:: drivers/gpu/drm/i915/i915_gem_batch_pool.c
- :internal:
-
User Batchbuffer Execution
--------------------------
@@ -415,32 +415,53 @@ Object Tiling IOCTLs
.. kernel-doc:: drivers/gpu/drm/i915/gem/i915_gem_tiling.c
:doc: buffer object tiling
+Microcontrollers
+================
+
+Starting from gen9, three microcontrollers are available on the HW: the
+graphics microcontroller (GuC), the HEVC/H.265 microcontroller (HuC) and the
+display microcontroller (DMC). The driver is responsible for loading the
+firmwares on the microcontrollers; the GuC and HuC firmwares are transferred
+to WOPCM using the DMA engine, while the DMC firmware is written through MMIO.
+
WOPCM
-=====
+-----
WOPCM Layout
-------------
+~~~~~~~~~~~~
.. kernel-doc:: drivers/gpu/drm/i915/intel_wopcm.c
:doc: WOPCM Layout
GuC
-===
+---
-Firmware Layout
--------------------
+.. kernel-doc:: drivers/gpu/drm/i915/gt/uc/intel_guc.c
+ :doc: GuC
+
+GuC Firmware Layout
+~~~~~~~~~~~~~~~~~~~
.. kernel-doc:: drivers/gpu/drm/i915/gt/uc/intel_uc_fw_abi.h
:doc: Firmware Layout
+GuC Memory Management
+~~~~~~~~~~~~~~~~~~~~~
+
+.. kernel-doc:: drivers/gpu/drm/i915/gt/uc/intel_guc.c
+ :doc: GuC Memory Management
+.. kernel-doc:: drivers/gpu/drm/i915/gt/uc/intel_guc.c
+ :functions: intel_guc_allocate_vma
+
+
GuC-specific firmware loader
-----------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. kernel-doc:: drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
:internal:
GuC-based command submission
-----------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. kernel-doc:: drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
:doc: GuC-based command submission
@@ -448,11 +469,26 @@ GuC-based command submission
.. kernel-doc:: drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
:internal:
-GuC Address Space
------------------
+HuC
+---
+.. kernel-doc:: drivers/gpu/drm/i915/gt/uc/intel_huc.c
+ :doc: HuC
+.. kernel-doc:: drivers/gpu/drm/i915/gt/uc/intel_huc.c
+ :functions: intel_huc_auth
-.. kernel-doc:: drivers/gpu/drm/i915/gt/uc/intel_guc.c
- :doc: GuC Address Space
+HuC Memory Management
+~~~~~~~~~~~~~~~~~~~~~
+
+.. kernel-doc:: drivers/gpu/drm/i915/gt/uc/intel_huc.c
+ :doc: HuC Memory Management
+
+HuC Firmware Layout
+~~~~~~~~~~~~~~~~~~~
+The HuC FW layout is the same as the GuC one, see `GuC Firmware Layout`_
+
+DMC
+---
+See `CSR firmware support for DMC`_
Tracing
=======
@@ -514,9 +550,9 @@ i915 Perf Stream
This section covers the stream-semantics-agnostic structures and functions
for representing an i915 perf stream FD and associated file operations.
-.. kernel-doc:: drivers/gpu/drm/i915/i915_drv.h
+.. kernel-doc:: drivers/gpu/drm/i915/i915_perf_types.h
:functions: i915_perf_stream
-.. kernel-doc:: drivers/gpu/drm/i915/i915_drv.h
+.. kernel-doc:: drivers/gpu/drm/i915/i915_perf_types.h
:functions: i915_perf_stream_ops
.. kernel-doc:: drivers/gpu/drm/i915/i915_perf.c
@@ -541,7 +577,7 @@ for representing an i915 perf stream FD and associated file operations.
i915 Perf Observation Architecture Stream
-----------------------------------------
-.. kernel-doc:: drivers/gpu/drm/i915/i915_drv.h
+.. kernel-doc:: drivers/gpu/drm/i915/i915_perf_types.h
:functions: i915_oa_ops
.. kernel-doc:: drivers/gpu/drm/i915/i915_perf.c
diff --git a/Documentation/gpu/mcde.rst b/Documentation/gpu/mcde.rst
index c69e977defda..dd43dde379e0 100644
--- a/Documentation/gpu/mcde.rst
+++ b/Documentation/gpu/mcde.rst
@@ -5,4 +5,4 @@
=======================================================
.. kernel-doc:: drivers/gpu/drm/mcde/mcde_drv.c
- :doc: ST-Ericsson MCDE DRM Driver
+ :doc: ST-Ericsson MCDE Driver
diff --git a/Documentation/gpu/todo.rst b/Documentation/gpu/todo.rst
index 32787acff0a8..6792fa9b6b6b 100644
--- a/Documentation/gpu/todo.rst
+++ b/Documentation/gpu/todo.rst
@@ -7,6 +7,22 @@ TODO list
This section contains a list of smaller janitorial tasks in the kernel DRM
graphics subsystem useful as newbie projects. Or for slow rainy days.
+Difficulty
+----------
+
+To make it easier task are categorized into different levels:
+
+Starter: Good tasks to get started with the DRM subsystem.
+
+Intermediate: Tasks which need some experience with working in the DRM
+subsystem, or some specific GPU/display graphics knowledge. For debugging issue
+it's good to have the relevant hardware (or a virtual driver set up) available
+for testing.
+
+Advanced: Tricky tasks that need fairly good understanding of the DRM subsystem
+and graphics topics. Generally need the relevant hardware for development and
+testing.
+
Subsystem-wide refactorings
===========================
@@ -20,6 +36,8 @@ implementations), and then remove it.
Contact: Daniel Vetter, respective driver maintainers
+Level: Intermediate
+
Convert existing KMS drivers to atomic modesetting
--------------------------------------------------
@@ -38,6 +56,8 @@ do by directly using the new atomic helper driver callbacks.
Contact: Daniel Vetter, respective driver maintainers
+Level: Advanced
+
Clean up the clipped coordination confusion around planes
---------------------------------------------------------
@@ -50,6 +70,8 @@ helpers.
Contact: Ville Syrjälä, Daniel Vetter, driver maintainers
+Level: Advanced
+
Convert early atomic drivers to async commit helpers
----------------------------------------------------
@@ -63,6 +85,8 @@ events for atomic commits correctly. But fixing these bugs is good anyway.
Contact: Daniel Vetter, respective driver maintainers
+Level: Advanced
+
Fallout from atomic KMS
-----------------------
@@ -91,6 +115,8 @@ interfaces to fix these issues:
Contact: Daniel Vetter
+Level: Intermediate
+
Get rid of dev->struct_mutex from GEM drivers
---------------------------------------------
@@ -114,6 +140,8 @@ fine-grained per-buffer object and per-context lockings scheme. Currently only t
Contact: Daniel Vetter, respective driver maintainers
+Level: Advanced
+
Convert instances of dev_info/dev_err/dev_warn to their DRM_DEV_* equivalent
----------------------------------------------------------------------------
@@ -129,6 +157,8 @@ are better.
Contact: Sean Paul, Maintainer of the driver you plan to convert
+Level: Starter
+
Convert drivers to use simple modeset suspend/resume
----------------------------------------------------
@@ -139,6 +169,8 @@ of the atomic suspend/resume code in older atomic modeset drivers.
Contact: Maintainer of the driver you plan to convert
+Level: Intermediate
+
Convert drivers to use drm_fb_helper_fbdev_setup/teardown()
-----------------------------------------------------------
@@ -157,6 +189,8 @@ probably use drm_fb_helper_fbdev_teardown().
Contact: Maintainer of the driver you plan to convert
+Level: Intermediate
+
Clean up mmap forwarding
------------------------
@@ -166,14 +200,16 @@ There's drm_gem_prime_mmap() for this now, but still needs to be rolled out.
Contact: Daniel Vetter
+Level: Intermediate
+
Generic fbdev defio support
---------------------------
The defio support code in the fbdev core has some very specific requirements,
-which means drivers need to have a special framebuffer for fbdev. Which prevents
-us from using the generic fbdev emulation code everywhere. The main issue is
-that it uses some fields in struct page itself, which breaks shmem gem objects
-(and other things).
+which means drivers need to have a special framebuffer for fbdev. The main
+issue is that it uses some fields in struct page itself, which breaks shmem
+gem objects (and other things). To support defio, affected drivers require
+the use of a shadow buffer, which may add CPU and memory overhead.
Possible solution would be to write our own defio mmap code in the drm fbdev
emulation. It would need to fully wrap the existing mmap ops, forwarding
@@ -196,6 +232,8 @@ Might be good to also have some igt testcases for this.
Contact: Daniel Vetter, Noralf Tronnes
+Level: Advanced
+
idr_init_base()
---------------
@@ -206,6 +244,8 @@ efficient.
Contact: Daniel Vetter
+Level: Starter
+
struct drm_gem_object_funcs
---------------------------
@@ -216,6 +256,8 @@ We also need a 2nd version of the CMA define that doesn't require the
vmapping to be present (different hook for prime importing). Plus this needs to
be rolled out to all drivers using their own implementations, too.
+Level: Intermediate
+
Use DRM_MODESET_LOCK_ALL_* helpers instead of boilerplate
---------------------------------------------------------
@@ -231,6 +273,8 @@ As a reference, take a look at the conversions already completed in drm core.
Contact: Sean Paul, respective driver maintainers
+Level: Starter
+
Rename CMA helpers to DMA helpers
---------------------------------
@@ -241,6 +285,9 @@ no one knows what that means) since underneath they just use dma_alloc_coherent.
Contact: Laurent Pinchart, Daniel Vetter
+Level: Intermediate (mostly because it is a huge tasks without good partial
+milestones, not technically itself that challenging)
+
Convert direct mode.vrefresh accesses to use drm_mode_vrefresh()
----------------------------------------------------------------
@@ -259,6 +306,8 @@ drm_display_mode to avoid future use.
Contact: Sean Paul
+Level: Starter
+
Remove drm_display_mode.hsync
-----------------------------
@@ -269,6 +318,8 @@ it to use drm_mode_hsync() instead.
Contact: Sean Paul
+Level: Starter
+
drm_fb_helper tasks
-------------------
@@ -284,20 +335,24 @@ drm_fb_helper tasks
removed: drm_fb_helper_single_add_all_connectors(),
drm_fb_helper_add_one_connector() and drm_fb_helper_remove_one_connector().
-Core refactorings
-=================
+Level: Intermediate
-Clean up the DRM header mess
-----------------------------
+connector register/unregister fixes
+-----------------------------------
-The DRM subsystem originally had only one huge global header, ``drmP.h``. This
-is now split up, but many source files still include it. The remaining part of
-the cleanup work here is to replace any ``#include <drm/drmP.h>`` by only the
-headers needed (and fixing up any missing pre-declarations in the headers).
+- For most connectors it's a no-op to call drm_connector_register/unregister
+ directly from driver code, drm_dev_register/unregister take care of this
+ already. We can remove all of them.
-In the end no .c file should need to include ``drmP.h`` anymore.
+- For dp drivers it's a bit more a mess, since we need the connector to be
+ registered when calling drm_dp_aux_register. Fix this by instead calling
+ drm_dp_aux_init, and moving the actual registering into a late_register
+ callback as recommended in the kerneldoc.
-Contact: Daniel Vetter
+Level: Intermediate
+
+Core refactorings
+=================
Make panic handling work
------------------------
@@ -338,6 +393,8 @@ This is a really varied tasks with lots of little bits and pieces:
Contact: Daniel Vetter
+Level: Advanced
+
Clean up the debugfs support
----------------------------
@@ -367,6 +424,8 @@ There's a bunch of issues with it:
Contact: Daniel Vetter
+Level: Intermediate
+
KMS cleanups
------------
@@ -382,6 +441,8 @@ Some of these date from the very introduction of KMS in 2008 ...
end, for which we could add drm_*_cleanup_kfree(). And then there's the (for
historical reasons) misnamed drm_primary_helper_destroy() function.
+Level: Intermediate
+
Better Testing
==============
@@ -390,6 +451,8 @@ Enable trinity for DRM
And fix up the fallout. Should be really interesting ...
+Level: Advanced
+
Make KMS tests in i-g-t generic
-------------------------------
@@ -403,6 +466,8 @@ converting things over. For modeset tests we also first need a bit of
infrastructure to use dumb buffers for untiled buffers, to be able to run all
the non-i915 specific modeset tests.
+Level: Advanced
+
Extend virtual test driver (VKMS)
---------------------------------
@@ -412,6 +477,8 @@ fit the available time.
Contact: Daniel Vetter
+Level: See details
+
Backlight Refactoring
---------------------
@@ -425,6 +492,8 @@ Plan to fix this:
Contact: Daniel Vetter
+Level: Intermediate
+
Driver Specific
===============
@@ -438,13 +507,6 @@ See drivers/gpu/drm/amd/display/TODO for tasks.
Contact: Harry Wentland, Alex Deucher
-i915
-----
-
-- Our early/late pm callbacks could be removed in favour of using
- device_link_add to model the dependency between i915 and snd_had. See
- https://dri.freedesktop.org/docs/drm/driver-api/device_link.html
-
Bootsplash
==========
@@ -460,5 +522,36 @@ for fbdev.
Contact: Sam Ravnborg
+Level: Advanced
+
Outside DRM
===========
+
+Convert fbdev drivers to DRM
+----------------------------
+
+There are plenty of fbdev drivers for older hardware. Some hwardware has
+become obsolete, but some still provides good(-enough) framebuffers. The
+drivers that are still useful should be converted to DRM and afterwards
+removed from fbdev.
+
+Very simple fbdev drivers can best be converted by starting with a new
+DRM driver. Simple KMS helpers and SHMEM should be able to handle any
+existing hardware. The new driver's call-back functions are filled from
+existing fbdev code.
+
+More complex fbdev drivers can be refactored step-by-step into a DRM
+driver with the help of the DRM fbconv helpers. [1] These helpers provide
+the transition layer between the DRM core infrastructure and the fbdev
+driver interface. Create a new DRM driver on top of the fbconv helpers,
+copy over the fbdev driver, and hook it up to the DRM code. Examples for
+several fbdev drivers are available at [1] and a tutorial of this process
+available at [2]. The result is a primitive DRM driver that can run X11
+and Weston.
+
+ - [1] https://gitlab.freedesktop.org/tzimmermann/linux/tree/fbconv
+ - [2] https://gitlab.freedesktop.org/tzimmermann/linux/blob/fbconv/drivers/gpu/drm/drm_fbconv_helper.c
+
+Contact: Thomas Zimmermann <tzimmermann@suse.de>
+
+Level: Advanced
diff --git a/Documentation/hwmon/bel-pfe.rst b/Documentation/hwmon/bel-pfe.rst
new file mode 100644
index 000000000000..4b4a7d67854c
--- /dev/null
+++ b/Documentation/hwmon/bel-pfe.rst
@@ -0,0 +1,112 @@
+Kernel driver bel-pfe
+======================
+
+Supported chips:
+
+ * BEL PFE1100
+
+ Prefixes: 'pfe1100'
+
+ Addresses scanned: -
+
+ Datasheet: https://www.belfuse.com/resources/datasheets/powersolutions/ds-bps-pfe1100-12-054xa.pdf
+
+ * BEL PFE3000
+
+ Prefixes: 'pfe3000'
+
+ Addresses scanned: -
+
+ Datasheet: https://www.belfuse.com/resources/datasheets/powersolutions/ds-bps-pfe3000-series.pdf
+
+Author: Tao Ren <rentao.bupt@gmail.com>
+
+
+Description
+-----------
+
+This driver supports hardware monitoring for below power supply devices
+which support PMBus Protocol:
+
+ * BEL PFE1100
+
+ 1100 Watt AC to DC power-factor-corrected (PFC) power supply.
+ PMBus Communication Manual is not publicly available.
+
+ * BEL PFE3000
+
+ 3000 Watt AC/DC power-factor-corrected (PFC) and DC-DC power supply.
+ PMBus Communication Manual is not publicly available.
+
+The driver is a client driver to the core PMBus driver. Please see
+Documentation/hwmon/pmbus.rst for details on PMBus client drivers.
+
+
+Usage Notes
+-----------
+
+This driver does not auto-detect devices. You will have to instantiate the
+devices explicitly. Please see Documentation/i2c/instantiating-devices.rst for
+details.
+
+Example: the following will load the driver for an PFE3000 at address 0x20
+on I2C bus #1::
+
+ $ modprobe bel-pfe
+ $ echo pfe3000 0x20 > /sys/bus/i2c/devices/i2c-1/new_device
+
+
+Platform data support
+---------------------
+
+The driver supports standard PMBus driver platform data.
+
+
+Sysfs entries
+-------------
+
+======================= =======================================================
+curr1_label "iin"
+curr1_input Measured input current
+curr1_max Input current max value
+curr1_max_alarm Input current max alarm
+
+curr[2-3]_label "iout[1-2]"
+curr[2-3]_input Measured output current
+curr[2-3]_max Output current max value
+curr[2-3]_max_alarm Output current max alarm
+
+fan[1-2]_input Fan 1 and 2 speed in RPM
+fan1_target Set fan speed reference for both fans
+
+in1_label "vin"
+in1_input Measured input voltage
+in1_crit Input voltage critical max value
+in1_crit_alarm Input voltage critical max alarm
+in1_lcrit Input voltage critical min value
+in1_lcrit_alarm Input voltage critical min alarm
+in1_max Input voltage max value
+in1_max_alarm Input voltage max alarm
+
+in2_label "vcap"
+in2_input Hold up capacitor voltage
+
+in[3-8]_label "vout[1-3,5-7]"
+in[3-8]_input Measured output voltage
+in[3-4]_alarm vout[1-2] output voltage alarm
+
+power[1-2]_label "pin[1-2]"
+power[1-2]_input Measured input power
+power[1-2]_alarm Input power high alarm
+
+power[3-4]_label "pout[1-2]"
+power[3-4]_input Measured output power
+
+temp[1-3]_input Measured temperature
+temp[1-3]_alarm Temperature alarm
+======================= =======================================================
+
+.. note::
+
+ - curr3, fan2, vout[2-7], vcap, pin2, pout2 and temp3 attributes only
+ exist for PFE3000.
diff --git a/Documentation/hwmon/dell-smm-hwmon.rst b/Documentation/hwmon/dell-smm-hwmon.rst
new file mode 100644
index 000000000000..3bf77a5df995
--- /dev/null
+++ b/Documentation/hwmon/dell-smm-hwmon.rst
@@ -0,0 +1,164 @@
+.. SPDX-License-Identifier: GPL-2.0-or-later
+
+.. include:: <isonum.txt>
+
+Kernel driver dell-smm-hwmon
+============================
+
+:Copyright: |copy| 2002-2005 Massimo Dal Zotto <dz@debian.org>
+:Copyright: |copy| 2019 Giovanni Mascellani <gio@debian.org>
+
+Description
+-----------
+
+On many Dell laptops the System Management Mode (SMM) BIOS can be
+queried for the status of fans and temperature sensors. Userspace
+utilities like ``sensors`` can be used to return the readings. The
+userspace suite `i8kutils`__ can also be used to read the sensors and
+automatically adjust fan speed (please notice that it currently uses
+the deprecated ``/proc/i8k`` interface).
+
+ __ https://github.com/vitorafsr/i8kutils
+
+``sysfs`` interface
+-------------------
+
+Temperature sensors and fans can be queried and set via the standard
+``hwmon`` interface on ``sysfs``, under the directory
+``/sys/class/hwmon/hwmonX`` for some value of ``X`` (search for the
+``X`` such that ``/sys/class/hwmon/hwmonX/name`` has content
+``dell_smm``). A number of other attributes can be read or written:
+
+=============================== ======= =======================================
+Name Perm Description
+=============================== ======= =======================================
+fan[1-3]_input RO Fan speed in RPM.
+fan[1-3]_label RO Fan label.
+pwm[1-3] RW Control the fan PWM duty-cycle.
+pwm1_enable WO Enable or disable automatic BIOS fan
+ control (not supported on all laptops,
+ see below for details).
+temp[1-10]_input RO Temperature reading in milli-degrees
+ Celsius.
+temp[1-10]_label RO Temperature sensor label.
+=============================== ======= =======================================
+
+Disabling automatic BIOS fan control
+------------------------------------
+
+On some laptops the BIOS automatically sets fan speed every few
+seconds. Therefore the fan speed set by mean of this driver is quickly
+overwritten.
+
+There is experimental support for disabling automatic BIOS fan
+control, at least on laptops where the corresponding SMM command is
+known, by writing the value ``1`` in the attribute ``pwm1_enable``
+(writing ``2`` enables automatic BIOS control again). Even if you have
+more than one fan, all of them are set to either enabled or disabled
+automatic fan control at the same time and, notwithstanding the name,
+``pwm1_enable`` sets automatic control for all fans.
+
+If ``pwm1_enable`` is not available, then it means that SMM codes for
+enabling and disabling automatic BIOS fan control are not whitelisted
+for your hardware. It is possible that codes that work for other
+laptops actually work for yours as well, or that you have to discover
+new codes.
+
+Check the list ``i8k_whitelist_fan_control`` in file
+``drivers/hwmon/dell-smm-hwmon.c`` in the kernel tree: as a first
+attempt you can try to add your machine and use an already-known code
+pair. If, after recompiling the kernel, you see that ``pwm1_enable``
+is present and works (i.e., you can manually control the fan speed),
+then please submit your finding as a kernel patch, so that other users
+can benefit from it. Please see
+:ref:`Documentation/process/submitting-patches.rst <submittingpatches>`
+for information on submitting patches.
+
+If no known code works on your machine, you need to resort to do some
+probing, because unfortunately Dell does not publish datasheets for
+its SMM. You can experiment with the code in `this repository`__ to
+probe the BIOS on your machine and discover the appropriate codes.
+
+ __ https://github.com/clopez/dellfan/
+
+Again, when you find new codes, we'd be happy to have your patches!
+
+Module parameters
+-----------------
+
+* force:bool
+ Force loading without checking for supported
+ models. (default: 0)
+
+* ignore_dmi:bool
+ Continue probing hardware even if DMI data does not
+ match. (default: 0)
+
+* restricted:bool
+ Allow fan control only to processes with the
+ ``CAP_SYS_ADMIN`` capability set or processes run
+ as root when using the legacy ``/proc/i8k``
+ interface. In this case normal users will be able
+ to read temperature and fan status but not to
+ control the fan. If your notebook is shared with
+ other users and you don't trust them you may want
+ to use this option. (default: 1, only available
+ with ``CONFIG_I8K``)
+
+* power_status:bool
+ Report AC status in ``/proc/i8k``. (default: 0,
+ only available with ``CONFIG_I8K``)
+
+* fan_mult:uint
+ Factor to multiply fan speed with. (default:
+ autodetect)
+
+* fan_max:uint
+ Maximum configurable fan speed. (default:
+ autodetect)
+
+Legacy ``/proc`` interface
+--------------------------
+
+.. warning:: This interface is obsolete and deprecated and should not
+ used in new applications. This interface is only
+ available when kernel is compiled with option
+ ``CONFIG_I8K``.
+
+The information provided by the kernel driver can be accessed by
+simply reading the ``/proc/i8k`` file. For example::
+
+ $ cat /proc/i8k
+ 1.0 A17 2J59L02 52 2 1 8040 6420 1 2
+
+The fields read from ``/proc/i8k`` are::
+
+ 1.0 A17 2J59L02 52 2 1 8040 6420 1 2
+ | | | | | | | | | |
+ | | | | | | | | | +------- 10. buttons status
+ | | | | | | | | +--------- 9. AC status
+ | | | | | | | +-------------- 8. fan0 RPM
+ | | | | | | +------------------- 7. fan1 RPM
+ | | | | | +--------------------- 6. fan0 status
+ | | | | +----------------------- 5. fan1 status
+ | | | +-------------------------- 4. temp0 reading (Celsius)
+ | | +---------------------------------- 3. Dell service tag (later known as 'serial number')
+ | +-------------------------------------- 2. BIOS version
+ +------------------------------------------ 1. /proc/i8k format version
+
+A negative value, for example -22, indicates that the BIOS doesn't
+return the corresponding information. This is normal on some
+models/BIOSes.
+
+For performance reasons the ``/proc/i8k`` doesn't report by default
+the AC status since this SMM call takes a long time to execute and is
+not really needed. If you want to see the ac status in ``/proc/i8k``
+you must explictitly enable this option by passing the
+``power_status=1`` parameter to insmod. If AC status is not
+available -1 is printed instead.
+
+The driver provides also an ioctl interface which can be used to
+obtain the same information and to control the fan status. The ioctl
+interface can be accessed from C programs or from shell using the
+i8kctl utility. See the source file of ``i8kutils`` for more
+information on how to use the ioctl interface.
diff --git a/Documentation/hwmon/ina3221.rst b/Documentation/hwmon/ina3221.rst
index f6007ae8f4e2..297f7323b441 100644
--- a/Documentation/hwmon/ina3221.rst
+++ b/Documentation/hwmon/ina3221.rst
@@ -41,6 +41,18 @@ curr[123]_max Warning alert current(mA) setting, activates the
average is above this value.
curr[123]_max_alarm Warning alert current limit exceeded
in[456]_input Shunt voltage(uV) for channels 1, 2, and 3 respectively
+in7_input Sum of shunt voltage(uV) channels
+in7_label Channel label for sum of shunt voltage
+curr4_input Sum of current(mA) measurement channels,
+ (only available when all channels use the same resistor
+ value for their shunt resistors)
+curr4_crit Critical alert current(mA) setting for sum of current
+ measurements, activates the corresponding alarm
+ when the respective current is above this value
+ (only effective when all channels use the same resistor
+ value for their shunt resistors)
+curr4_crit_alarm Critical alert current limit exceeded for sum of
+ current measurements.
samples Number of samples using in the averaging mode.
Supports the list of number of samples:
diff --git a/Documentation/hwmon/index.rst b/Documentation/hwmon/index.rst
index 230ad59b462b..43cc605741ea 100644
--- a/Documentation/hwmon/index.rst
+++ b/Documentation/hwmon/index.rst
@@ -41,9 +41,11 @@ Hardware Monitoring Kernel Drivers
asb100
asc7621
aspeed-pwm-tacho
+ bel-pfe
coretemp
da9052
da9055
+ dell-smm-hwmon
dme1737
ds1621
ds620
@@ -90,6 +92,7 @@ Hardware Monitoring Kernel Drivers
lm95245
lochnagar
ltc2945
+ ltc2947
ltc2978
ltc2990
ltc3815
@@ -153,6 +156,7 @@ Hardware Monitoring Kernel Drivers
tmp108
tmp401
tmp421
+ tmp513
tps40422
twl4030-madc-hwmon
ucd9000
diff --git a/Documentation/hwmon/inspur-ipsps1.rst b/Documentation/hwmon/inspur-ipsps1.rst
index 292c0c26bdd1..4825046ecb25 100644
--- a/Documentation/hwmon/inspur-ipsps1.rst
+++ b/Documentation/hwmon/inspur-ipsps1.rst
@@ -17,7 +17,7 @@ Usage Notes
-----------
This driver does not auto-detect devices. You will have to instantiate the
-devices explicitly. Please see Documentation/i2c/instantiating-devices for
+devices explicitly. Please see Documentation/i2c/instantiating-devices.rst for
details.
Sysfs entries
diff --git a/Documentation/hwmon/ltc2947.rst b/Documentation/hwmon/ltc2947.rst
new file mode 100644
index 000000000000..419fc84fe934
--- /dev/null
+++ b/Documentation/hwmon/ltc2947.rst
@@ -0,0 +1,100 @@
+Kernel drivers ltc2947-i2c and ltc2947-spi
+==========================================
+
+Supported chips:
+
+ * Analog Devices LTC2947
+
+ Prefix: 'ltc2947'
+
+ Addresses scanned: -
+
+ Datasheet:
+
+ https://www.analog.com/media/en/technical-documentation/data-sheets/LTC2947.pdf
+
+Author: Nuno Sá <nuno.sa@analog.com>
+
+Description
+___________
+
+The LTC2947 is a high precision power and energy monitor that measures current,
+voltage, power, temperature, charge and energy. The device supports both SPI
+and I2C depending on the chip configuration.
+The device also measures accumulated quantities as energy. It has two banks of
+register's to read/set energy related values. These banks can be configured
+independently to have setups like: energy1 accumulates always and enrgy2 only
+accumulates if current is positive (to check battery charging efficiency for
+example). The device also supports a GPIO pin that can be configured as output
+to control a fan as a function of measured temperature. Then, the GPIO becomes
+active as soon as a temperature reading is higher than a defined threshold. The
+temp2 channel is used to control this thresholds and to read the respective
+alarms.
+
+Sysfs entries
+_____________
+
+The following attributes are supported. Limits are read-write, reset_history
+is write-only and all the other attributes are read-only.
+
+======================= ==========================================
+in0_input VP-VM voltage (mV).
+in0_min Undervoltage threshold
+in0_max Overvoltage threshold
+in0_lowest Lowest measured voltage
+in0_highest Highest measured voltage
+in0_reset_history Write 1 to reset in1 history
+in0_min_alarm Undervoltage alarm
+in0_max_alarm Overvoltage alarm
+in0_label Channel label (VP-VM)
+
+in1_input DVCC voltage (mV)
+in1_min Undervoltage threshold
+in1_max Overvoltage threshold
+in1_lowest Lowest measured voltage
+in1_highest Highest measured voltage
+in1_reset_history Write 1 to reset in2 history
+in1_min_alarm Undervoltage alarm
+in1_max_alarm Overvoltage alarm
+in1_label Channel label (DVCC)
+
+curr1_input IP-IM Sense current (mA)
+curr1_min Undercurrent threshold
+curr1_max Overcurrent threshold
+curr1_lowest Lowest measured current
+curr1_highest Highest measured current
+curr1_reset_history Write 1 to reset curr1 history
+curr1_min_alarm Undercurrent alarm
+curr1_max_alarm Overcurrent alarm
+curr1_label Channel label (IP-IM)
+
+power1_input Power (in uW)
+power1_min Low power threshold
+power1_max High power threshold
+power1_input_lowest Historical minimum power use
+power1_input_highest Historical maximum power use
+power1_reset_history Write 1 to reset power1 history
+power1_min_alarm Low power alarm
+power1_max_alarm High power alarm
+power1_label Channel label (Power)
+
+temp1_input Chip Temperature (in milliC)
+temp1_min Low temperature threshold
+temp1_max High temperature threshold
+temp1_input_lowest Historical minimum temperature use
+temp1_input_highest Historical maximum temperature use
+temp1_reset_history Write 1 to reset temp1 history
+temp1_min_alarm Low temperature alarm
+temp1_max_alarm High temperature alarm
+temp1_label Channel label (Ambient)
+
+temp2_min Low temperature threshold for fan control
+temp2_max High temperature threshold for fan control
+temp2_min_alarm Low temperature fan control alarm
+temp2_max_alarm High temperature fan control alarm
+temp2_label Channel label (TEMPFAN)
+
+energy1_input Measured energy over time (in microJoule)
+
+energy2_input Measured energy over time (in microJoule)
+======================= ==========================================
diff --git a/Documentation/hwmon/tmp513.rst b/Documentation/hwmon/tmp513.rst
new file mode 100644
index 000000000000..6c8fae4b1a75
--- /dev/null
+++ b/Documentation/hwmon/tmp513.rst
@@ -0,0 +1,103 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+Kernel driver tmp513
+====================
+
+Supported chips:
+
+ * Texas Instruments TMP512
+
+ Prefix: 'tmp512'
+
+ Datasheet: http://www.ti.com/lit/ds/symlink/tmp512.pdf
+
+ * Texas Instruments TMP513
+
+ Prefix: 'tmp513'
+
+ Datasheet: http://www.ti.com/lit/ds/symlink/tmp513.pdf
+
+Authors:
+
+ Eric Tremblay <etremblay@distech-controls.com>
+
+Description
+-----------
+
+This driver implements support for Texas Instruments TMP512, and TMP513.
+The TMP512 (dual-channel) and TMP513 (triple-channel) are system monitors
+that include remote sensors, a local temperature sensor, and a high-side current
+shunt monitor. These system monitors have the capability of measuring remote
+temperatures, on-chip temperatures, and system voltage/power/current
+consumption.
+
+The temperatures are measured in degrees Celsius with a range of
+-40 to + 125 degrees with a resolution of 0.0625 degree C.
+
+For hysteresis value, only the first channel is writable. Writing to it
+will affect all other values since each channels are sharing the same
+hysteresis value. The hysteresis is in degrees Celsius with a range of
+0 to 127.5 degrees with a resolution of 0.5 degree.
+
+The driver exports the temperature values via the following sysfs files:
+
+**temp[1-4]_input**
+
+**temp[1-4]_crit**
+
+**temp[1-4]_crit_alarm**
+
+**temp[1-4]_crit_hyst**
+
+The driver read the shunt voltage from the chip and convert it to current.
+The readable range depends on the "ti,pga-gain" property (default to 8) and the
+shunt resistor value. The value resolution will be equal to 10uV/Rshunt.
+
+The driver exports the shunt currents values via the following sysFs files:
+
+**curr1_input**
+
+**curr1_lcrit**
+
+**curr1_lcrit_alarm**
+
+**curr1_crit**
+
+**curr1_crit_alarm**
+
+The bus voltage range is read from the chip with a resolution of 4mV. The chip
+can be configurable in two different range (32V or 16V) using the
+ti,bus-range-microvolt property in the devicetree.
+
+The driver exports the bus voltage values via the following sysFs files:
+
+**in0_input**
+
+**in0_lcrit**
+
+**in0_lcrit_alarm**
+
+**in0_crit**
+
+**in0_crit_alarm**
+
+The bus power and bus currents range and resolution depends on the calibration
+register value. Those values are calculate by the hardware using those
+formulas:
+
+Current = (ShuntVoltage * CalibrationRegister) / 4096
+Power = (Current * BusVoltage) / 5000
+
+The driver exports the bus current and bus power values via the following
+sysFs files:
+
+**curr2_input**
+
+**power1_input**
+
+**power1_crit**
+
+**power1_crit_alarm**
+
+The calibration process follow the procedure of the datasheet (without overflow)
+and depend on the shunt resistor value and the pga_gain value.
diff --git a/Documentation/i2c/busses/i2c-i801.rst b/Documentation/i2c/busses/i2c-i801.rst
index 2a570c214880..b83da0e94184 100644
--- a/Documentation/i2c/busses/i2c-i801.rst
+++ b/Documentation/i2c/busses/i2c-i801.rst
@@ -42,6 +42,7 @@ Supported adapters:
* Intel Comet Lake (PCH)
* Intel Elkhart Lake (PCH)
* Intel Tiger Lake (PCH)
+ * Intel Jasper Lake (SOC)
Datasheets: Publicly available at the Intel website
diff --git a/Documentation/i2c/busses/index.rst b/Documentation/i2c/busses/index.rst
index 97ca4d510816..2a26e251a335 100644
--- a/Documentation/i2c/busses/index.rst
+++ b/Documentation/i2c/busses/index.rst
@@ -1,4 +1,4 @@
-. SPDX-License-Identifier: GPL-2.0
+.. SPDX-License-Identifier: GPL-2.0
===============
I2C Bus Drivers
diff --git a/Documentation/i2c/index.rst b/Documentation/i2c/index.rst
index cd8d020f7ac5..a0fbaf6d0675 100644
--- a/Documentation/i2c/index.rst
+++ b/Documentation/i2c/index.rst
@@ -1,4 +1,4 @@
-. SPDX-License-Identifier: GPL-2.0
+.. SPDX-License-Identifier: GPL-2.0
===================
I2C/SMBus Subsystem
diff --git a/Documentation/i2c/instantiating-devices.rst b/Documentation/i2c/instantiating-devices.rst
index 1238f1fa3382..875ebe9e78e3 100644
--- a/Documentation/i2c/instantiating-devices.rst
+++ b/Documentation/i2c/instantiating-devices.rst
@@ -123,7 +123,7 @@ present or not (for example for an optional feature which is not present
on cheap variants of a board but you have no way to tell them apart), or
it may have different addresses from one board to the next (manufacturer
changing its design without notice). In this case, you can call
-i2c_new_probed_device() instead of i2c_new_device().
+i2c_new_scanned_device() instead of i2c_new_device().
Example (from the nxp OHCI driver)::
@@ -139,8 +139,8 @@ Example (from the nxp OHCI driver)::
i2c_adap = i2c_get_adapter(2);
memset(&i2c_info, 0, sizeof(struct i2c_board_info));
strscpy(i2c_info.type, "isp1301_nxp", sizeof(i2c_info.type));
- isp1301_i2c_client = i2c_new_probed_device(i2c_adap, &i2c_info,
- normal_i2c, NULL);
+ isp1301_i2c_client = i2c_new_scanned_device(i2c_adap, &i2c_info,
+ normal_i2c, NULL);
i2c_put_adapter(i2c_adap);
(...)
}
@@ -153,14 +153,14 @@ simply gives up.
The driver which instantiated the I2C device is responsible for destroying
it on cleanup. This is done by calling i2c_unregister_device() on the
pointer that was earlier returned by i2c_new_device() or
-i2c_new_probed_device().
+i2c_new_scanned_device().
Method 3: Probe an I2C bus for certain devices
----------------------------------------------
Sometimes you do not have enough information about an I2C device, not even
-to call i2c_new_probed_device(). The typical case is hardware monitoring
+to call i2c_new_scanned_device(). The typical case is hardware monitoring
chips on PC mainboards. There are several dozen models, which can live
at 25 different addresses. Given the huge number of mainboards out there,
it is next to impossible to build an exhaustive list of the hardware
diff --git a/Documentation/i2c/writing-clients.rst b/Documentation/i2c/writing-clients.rst
index dddf0a14ab7c..ced309b5e0cc 100644
--- a/Documentation/i2c/writing-clients.rst
+++ b/Documentation/i2c/writing-clients.rst
@@ -185,14 +185,14 @@ Sometimes you know that a device is connected to a given I2C bus, but you
don't know the exact address it uses. This happens on TV adapters for
example, where the same driver supports dozens of slightly different
models, and I2C device addresses change from one model to the next. In
-that case, you can use the i2c_new_probed_device() variant, which is
+that case, you can use the i2c_new_scanned_device() variant, which is
similar to i2c_new_device(), except that it takes an additional list of
possible I2C addresses to probe. A device is created for the first
responsive address in the list. If you expect more than one device to be
-present in the address range, simply call i2c_new_probed_device() that
+present in the address range, simply call i2c_new_scanned_device() that
many times.
-The call to i2c_new_device() or i2c_new_probed_device() typically happens
+The call to i2c_new_device() or i2c_new_scanned_device() typically happens
in the I2C bus driver. You may want to save the returned i2c_client
reference for later use.
@@ -237,7 +237,7 @@ Device Deletion
---------------
Each I2C device which has been created using i2c_new_device() or
-i2c_new_probed_device() can be unregistered by calling
+i2c_new_scanned_device() can be unregistered by calling
i2c_unregister_device(). If you don't call it explicitly, it will be
called automatically before the underlying I2C bus itself is removed, as a
device can't survive its parent in the device driver model.
diff --git a/Documentation/index.rst b/Documentation/index.rst
index 2ceab197246f..e99d0bd2589d 100644
--- a/Documentation/index.rst
+++ b/Documentation/index.rst
@@ -57,7 +57,6 @@ the kernel interface as seen by application developers.
:maxdepth: 2
userspace-api/index
- ioctl/index
Introduction to kernel development
diff --git a/Documentation/kbuild/makefiles.rst b/Documentation/kbuild/makefiles.rst
index b89c88168d6a..b9b50553bfc5 100644
--- a/Documentation/kbuild/makefiles.rst
+++ b/Documentation/kbuild/makefiles.rst
@@ -1115,23 +1115,6 @@ When kbuild executes, the following steps are followed (roughly):
In this example, extra-y is used to list object files that
shall be built, but shall not be linked as part of built-in.a.
- header-test-y
-
- header-test-y specifies headers (`*.h`) in the current directory that
- should be compile tested to ensure they are self-contained,
- i.e. compilable as standalone units. If CONFIG_HEADER_TEST is enabled,
- this builds them as part of extra-y.
-
- header-test-pattern-y
-
- This works as a weaker version of header-test-y, and accepts wildcard
- patterns. The typical usage is::
-
- header-test-pattern-y += *.h
-
- This specifies all the files that matches to `*.h` in the current
- directory, but the files in 'header-test-' are excluded.
-
6.7 Commands useful for building a boot image
---------------------------------------------
diff --git a/Documentation/kbuild/modules.rst b/Documentation/kbuild/modules.rst
index 774a998dcf37..69fa48ee93d6 100644
--- a/Documentation/kbuild/modules.rst
+++ b/Documentation/kbuild/modules.rst
@@ -492,11 +492,8 @@ build.
to the symbols from the kernel to check if all external symbols
are defined. This is done in the MODPOST step. modpost obtains
the symbols by reading Module.symvers from the kernel source
- tree. If a Module.symvers file is present in the directory
- where the external module is being built, this file will be
- read too. During the MODPOST step, a new Module.symvers file
- will be written containing all exported symbols that were not
- defined in the kernel.
+ tree. During the MODPOST step, a new Module.symvers file will be
+ written containing all exported symbols from that external module.
6.3 Symbols From Another External Module
----------------------------------------
@@ -504,7 +501,7 @@ build.
Sometimes, an external module uses exported symbols from
another external module. Kbuild needs to have full knowledge of
all symbols to avoid spitting out warnings about undefined
- symbols. Three solutions exist for this situation.
+ symbols. Two solutions exist for this situation.
NOTE: The method with a top-level kbuild file is recommended
but may be impractical in certain situations.
@@ -544,8 +541,8 @@ build.
all symbols defined and not part of the kernel.
Use "make" variable KBUILD_EXTRA_SYMBOLS
- If it is impractical to copy Module.symvers from
- another module, you can assign a space separated list
+ If it is impractical to add a top-level kbuild file,
+ you can assign a space separated list
of files to KBUILD_EXTRA_SYMBOLS in your build file.
These files will be loaded by modpost during the
initialization of its symbol tables.
diff --git a/Documentation/maintainer/configure-git.rst b/Documentation/maintainer/configure-git.rst
index 78bbbb0d2c84..80ae5030a590 100644
--- a/Documentation/maintainer/configure-git.rst
+++ b/Documentation/maintainer/configure-git.rst
@@ -32,3 +32,33 @@ You may also like to tell ``gpg`` which ``tty`` to use (add to your shell rc fil
::
export GPG_TTY=$(tty)
+
+
+Creating commit links to lore.kernel.org
+----------------------------------------
+
+The web site http://lore.kernel.org is meant as a grand archive of all mail
+list traffic concerning or influencing the kernel development. Storing archives
+of patches here is a recommended practice, and when a maintainer applies a
+patch to a subsystem tree, it is a good idea to provide a Link: tag with a
+reference back to the lore archive so that people that browse the commit
+history can find related discussions and rationale behind a certain change.
+The link tag will look like this:
+
+ Link: https://lore.kernel.org/r/<message-id>
+
+This can be configured to happen automatically any time you issue ``git am``
+by adding the following hook into your git:
+
+.. code-block:: none
+
+ $ git config am.messageid true
+ $ cat >.git/hooks/applypatch-msg <<'EOF'
+ #!/bin/sh
+ . git-sh-setup
+ perl -pi -e 's|^Message-Id:\s*<?([^>]+)>?$|Link: https://lore.kernel.org/r/$1|g;' "$1"
+ test -x "$GIT_DIR/hooks/commit-msg" &&
+ exec "$GIT_DIR/hooks/commit-msg" ${1+"$@"}
+ :
+ EOF
+ $ chmod a+x .git/hooks/applypatch-msg
diff --git a/Documentation/maintainer/index.rst b/Documentation/maintainer/index.rst
index 56e2c09dfa39..d904e74e1159 100644
--- a/Documentation/maintainer/index.rst
+++ b/Documentation/maintainer/index.rst
@@ -12,4 +12,5 @@ additions to this manual.
configure-git
rebasing-and-merging
pull-requests
+ maintainer-entry-profile
diff --git a/Documentation/maintainer/maintainer-entry-profile.rst b/Documentation/maintainer/maintainer-entry-profile.rst
new file mode 100644
index 000000000000..3eaddc8ac56d
--- /dev/null
+++ b/Documentation/maintainer/maintainer-entry-profile.rst
@@ -0,0 +1,102 @@
+.. _maintainerentryprofile:
+
+Maintainer Entry Profile
+========================
+
+The Maintainer Entry Profile supplements the top-level process documents
+(submitting-patches, submitting drivers...) with
+subsystem/device-driver-local customs as well as details about the patch
+submission life-cycle. A contributor uses this document to level set
+their expectations and avoid common mistakes, maintainers may use these
+profiles to look across subsystems for opportunities to converge on
+common practices.
+
+
+Overview
+--------
+Provide an introduction to how the subsystem operates. While MAINTAINERS
+tells the contributor where to send patches for which files, it does not
+convey other subsystem-local infrastructure and mechanisms that aid
+development.
+
+Example questions to consider:
+
+- Are there notifications when patches are applied to the local tree, or
+ merged upstream?
+- Does the subsystem have a patchwork instance? Are patchwork state
+ changes notified?
+- Any bots or CI infrastructure that watches the list, or automated
+ testing feedback that the subsystem gates acceptance?
+- Git branches that are pulled into -next?
+- What branch should contributors submit against?
+- Links to any other Maintainer Entry Profiles? For example a
+ device-driver may point to an entry for its parent subsystem. This makes
+ the contributor aware of obligations a maintainer may have have for
+ other maintainers in the submission chain.
+
+
+Submit Checklist Addendum
+-------------------------
+List mandatory and advisory criteria, beyond the common "submit-checklist",
+for a patch to be considered healthy enough for maintainer attention.
+For example: "pass checkpatch.pl with no errors, or warning. Pass the
+unit test detailed at $URI".
+
+The Submit Checklist Addendum can also include details about the status
+of related hardware specifications. For example, does the subsystem
+require published specifications at a certain revision before patches
+will be considered.
+
+
+Key Cycle Dates
+---------------
+One of the common misunderstandings of submitters is that patches can be
+sent at any time before the merge window closes and can still be
+considered for the next -rc1. The reality is that most patches need to
+be settled in soaking in linux-next in advance of the merge window
+opening. Clarify for the submitter the key dates (in terms rc release
+week) that patches might considered for merging and when patches need to
+wait for the next -rc. At a minimum:
+
+- Last -rc for new feature submissions:
+ New feature submissions targeting the next merge window should have
+ their first posting for consideration before this point. Patches that
+ are submitted after this point should be clear that they are targeting
+ the NEXT+1 merge window, or should come with sufficient justification
+ why they should be considered on an expedited schedule. A general
+ guideline is to set expectation with contributors that new feature
+ submissions should appear before -rc5.
+
+- Last -rc to merge features: Deadline for merge decisions
+ Indicate to contributors the point at which an as yet un-applied patch
+ set will need to wait for the NEXT+1 merge window. Of course there is no
+ obligation to ever except any given patchset, but if the review has not
+ concluded by this point the expectation the contributor should wait and
+ resubmit for the following merge window.
+
+Optional:
+
+- First -rc at which the development baseline branch, listed in the
+ overview section, should be considered ready for new submissions.
+
+
+Review Cadence
+--------------
+One of the largest sources of contributor angst is how soon to ping
+after a patchset has been posted without receiving any feedback. In
+addition to specifying how long to wait before a resubmission this
+section can also indicate a preferred style of update like, resend the
+full series, or privately send a reminder email. This section might also
+list how review works for this code area and methods to get feedback
+that are not directly from the maintainer.
+
+Existing profiles
+-----------------
+
+For now, existing maintainer profiles are listed here; we will likely want
+to do something different in the near future.
+
+.. toctree::
+ :maxdepth: 1
+
+ ../nvdimm/maintainer-entry-profile
diff --git a/Documentation/media/cec.h.rst.exceptions b/Documentation/media/cec.h.rst.exceptions
index 014816d04b9e..d83790ccac8e 100644
--- a/Documentation/media/cec.h.rst.exceptions
+++ b/Documentation/media/cec.h.rst.exceptions
@@ -335,6 +335,95 @@ ignore define CEC_OP_MENU_STATE_DEACTIVATED
ignore define CEC_MSG_USER_CONTROL_PRESSED
+ignore define CEC_OP_UI_CMD_SELECT
+ignore define CEC_OP_UI_CMD_UP
+ignore define CEC_OP_UI_CMD_DOWN
+ignore define CEC_OP_UI_CMD_LEFT
+ignore define CEC_OP_UI_CMD_RIGHT
+ignore define CEC_OP_UI_CMD_RIGHT_UP
+ignore define CEC_OP_UI_CMD_RIGHT_DOWN
+ignore define CEC_OP_UI_CMD_LEFT_UP
+ignore define CEC_OP_UI_CMD_LEFT_DOWN
+ignore define CEC_OP_UI_CMD_DEVICE_ROOT_MENU
+ignore define CEC_OP_UI_CMD_DEVICE_SETUP_MENU
+ignore define CEC_OP_UI_CMD_CONTENTS_MENU
+ignore define CEC_OP_UI_CMD_FAVORITE_MENU
+ignore define CEC_OP_UI_CMD_BACK
+ignore define CEC_OP_UI_CMD_MEDIA_TOP_MENU
+ignore define CEC_OP_UI_CMD_MEDIA_CONTEXT_SENSITIVE_MENU
+ignore define CEC_OP_UI_CMD_NUMBER_ENTRY_MODE
+ignore define CEC_OP_UI_CMD_NUMBER_11
+ignore define CEC_OP_UI_CMD_NUMBER_12
+ignore define CEC_OP_UI_CMD_NUMBER_0_OR_NUMBER_10
+ignore define CEC_OP_UI_CMD_NUMBER_1
+ignore define CEC_OP_UI_CMD_NUMBER_2
+ignore define CEC_OP_UI_CMD_NUMBER_3
+ignore define CEC_OP_UI_CMD_NUMBER_4
+ignore define CEC_OP_UI_CMD_NUMBER_5
+ignore define CEC_OP_UI_CMD_NUMBER_6
+ignore define CEC_OP_UI_CMD_NUMBER_7
+ignore define CEC_OP_UI_CMD_NUMBER_8
+ignore define CEC_OP_UI_CMD_NUMBER_9
+ignore define CEC_OP_UI_CMD_DOT
+ignore define CEC_OP_UI_CMD_ENTER
+ignore define CEC_OP_UI_CMD_CLEAR
+ignore define CEC_OP_UI_CMD_NEXT_FAVORITE
+ignore define CEC_OP_UI_CMD_CHANNEL_UP
+ignore define CEC_OP_UI_CMD_CHANNEL_DOWN
+ignore define CEC_OP_UI_CMD_PREVIOUS_CHANNEL
+ignore define CEC_OP_UI_CMD_SOUND_SELECT
+ignore define CEC_OP_UI_CMD_INPUT_SELECT
+ignore define CEC_OP_UI_CMD_DISPLAY_INFORMATION
+ignore define CEC_OP_UI_CMD_HELP
+ignore define CEC_OP_UI_CMD_PAGE_UP
+ignore define CEC_OP_UI_CMD_PAGE_DOWN
+ignore define CEC_OP_UI_CMD_POWER
+ignore define CEC_OP_UI_CMD_VOLUME_UP
+ignore define CEC_OP_UI_CMD_VOLUME_DOWN
+ignore define CEC_OP_UI_CMD_MUTE
+ignore define CEC_OP_UI_CMD_PLAY
+ignore define CEC_OP_UI_CMD_STOP
+ignore define CEC_OP_UI_CMD_PAUSE
+ignore define CEC_OP_UI_CMD_RECORD
+ignore define CEC_OP_UI_CMD_REWIND
+ignore define CEC_OP_UI_CMD_FAST_FORWARD
+ignore define CEC_OP_UI_CMD_EJECT
+ignore define CEC_OP_UI_CMD_SKIP_FORWARD
+ignore define CEC_OP_UI_CMD_SKIP_BACKWARD
+ignore define CEC_OP_UI_CMD_STOP_RECORD
+ignore define CEC_OP_UI_CMD_PAUSE_RECORD
+ignore define CEC_OP_UI_CMD_ANGLE
+ignore define CEC_OP_UI_CMD_SUB_PICTURE
+ignore define CEC_OP_UI_CMD_VIDEO_ON_DEMAND
+ignore define CEC_OP_UI_CMD_ELECTRONIC_PROGRAM_GUIDE
+ignore define CEC_OP_UI_CMD_TIMER_PROGRAMMING
+ignore define CEC_OP_UI_CMD_INITIAL_CONFIGURATION
+ignore define CEC_OP_UI_CMD_SELECT_BROADCAST_TYPE
+ignore define CEC_OP_UI_CMD_SELECT_SOUND_PRESENTATION
+ignore define CEC_OP_UI_CMD_AUDIO_DESCRIPTION
+ignore define CEC_OP_UI_CMD_INTERNET
+ignore define CEC_OP_UI_CMD_3D_MODE
+ignore define CEC_OP_UI_CMD_PLAY_FUNCTION
+ignore define CEC_OP_UI_CMD_PAUSE_PLAY_FUNCTION
+ignore define CEC_OP_UI_CMD_RECORD_FUNCTION
+ignore define CEC_OP_UI_CMD_PAUSE_RECORD_FUNCTION
+ignore define CEC_OP_UI_CMD_STOP_FUNCTION
+ignore define CEC_OP_UI_CMD_MUTE_FUNCTION
+ignore define CEC_OP_UI_CMD_RESTORE_VOLUME_FUNCTION
+ignore define CEC_OP_UI_CMD_TUNE_FUNCTION
+ignore define CEC_OP_UI_CMD_SELECT_MEDIA_FUNCTION
+ignore define CEC_OP_UI_CMD_SELECT_AV_INPUT_FUNCTION
+ignore define CEC_OP_UI_CMD_SELECT_AUDIO_INPUT_FUNCTION
+ignore define CEC_OP_UI_CMD_POWER_TOGGLE_FUNCTION
+ignore define CEC_OP_UI_CMD_POWER_OFF_FUNCTION
+ignore define CEC_OP_UI_CMD_POWER_ON_FUNCTION
+ignore define CEC_OP_UI_CMD_F1_BLUE
+ignore define CEC_OP_UI_CMD_F2_RED
+ignore define CEC_OP_UI_CMD_F3_GREEN
+ignore define CEC_OP_UI_CMD_F4_YELLOW
+ignore define CEC_OP_UI_CMD_F5
+ignore define CEC_OP_UI_CMD_DATA
+
ignore define CEC_OP_UI_BCAST_TYPE_TOGGLE_ALL
ignore define CEC_OP_UI_BCAST_TYPE_TOGGLE_DIG_ANA
ignore define CEC_OP_UI_BCAST_TYPE_ANALOGUE
diff --git a/Documentation/media/kapi/v4l2-controls.rst b/Documentation/media/kapi/v4l2-controls.rst
index ebe2a55908be..b20800cae3f2 100644
--- a/Documentation/media/kapi/v4l2-controls.rst
+++ b/Documentation/media/kapi/v4l2-controls.rst
@@ -140,6 +140,15 @@ Menu controls with a driver specific menu are added by calling
const struct v4l2_ctrl_ops *ops, u32 id, s32 max,
s32 skip_mask, s32 def, const char * const *qmenu);
+Standard compound controls can be added by calling
+:c:func:`v4l2_ctrl_new_std_compound`:
+
+.. code-block:: c
+
+ struct v4l2_ctrl *v4l2_ctrl_new_std_compound(struct v4l2_ctrl_handler *hdl,
+ const struct v4l2_ctrl_ops *ops, u32 id,
+ const union v4l2_ctrl_ptr p_def);
+
Integer menu controls with a driver specific menu can be added by calling
:c:func:`v4l2_ctrl_new_int_menu`:
diff --git a/Documentation/media/uapi/cec/cec-funcs.rst b/Documentation/media/uapi/cec/cec-funcs.rst
index 620590b168c9..dc6da9c639a8 100644
--- a/Documentation/media/uapi/cec/cec-funcs.rst
+++ b/Documentation/media/uapi/cec/cec-funcs.rst
@@ -24,6 +24,7 @@ Function Reference
cec-ioc-adap-g-caps
cec-ioc-adap-g-log-addrs
cec-ioc-adap-g-phys-addr
+ cec-ioc-adap-g-conn-info
cec-ioc-dqevent
cec-ioc-g-mode
cec-ioc-receive
diff --git a/Documentation/media/uapi/cec/cec-ioc-adap-g-caps.rst b/Documentation/media/uapi/cec/cec-ioc-adap-g-caps.rst
index 0c44f31a9b59..76761a98c312 100644
--- a/Documentation/media/uapi/cec/cec-ioc-adap-g-caps.rst
+++ b/Documentation/media/uapi/cec/cec-ioc-adap-g-caps.rst
@@ -135,8 +135,12 @@ returns the information to the application. The ioctl never fails.
- The CEC hardware can monitor CEC pin changes from low to high voltage
and vice versa. When in pin monitoring mode the application will
receive ``CEC_EVENT_PIN_CEC_LOW`` and ``CEC_EVENT_PIN_CEC_HIGH`` events.
+ * .. _`CEC-CAP-CONNECTOR-INFO`:
-
+ - ``CEC_CAP_CONNECTOR_INFO``
+ - 0x00000100
+ - If this capability is set, then :ref:`CEC_ADAP_G_CONNECTOR_INFO` can
+ be used.
Return Value
============
diff --git a/Documentation/media/uapi/cec/cec-ioc-adap-g-conn-info.rst b/Documentation/media/uapi/cec/cec-ioc-adap-g-conn-info.rst
new file mode 100644
index 000000000000..a21659d55c6b
--- /dev/null
+++ b/Documentation/media/uapi/cec/cec-ioc-adap-g-conn-info.rst
@@ -0,0 +1,105 @@
+.. SPDX-License-Identifier: GPL-2.0
+..
+.. Copyright 2019 Google LLC
+..
+.. _CEC_ADAP_G_CONNECTOR_INFO:
+
+*******************************
+ioctl CEC_ADAP_G_CONNECTOR_INFO
+*******************************
+
+Name
+====
+
+CEC_ADAP_G_CONNECTOR_INFO - Query HDMI connector information
+
+Synopsis
+========
+
+.. c:function:: int ioctl( int fd, CEC_ADAP_G_CONNECTOR_INFO, struct cec_connector_info *argp )
+ :name: CEC_ADAP_G_CONNECTOR_INFO
+
+Arguments
+=========
+
+``fd``
+ File descriptor returned by :c:func:`open() <cec-open>`.
+
+``argp``
+
+
+Description
+===========
+
+Using this ioctl an application can learn which HDMI connector this CEC
+device corresponds to. While calling this ioctl the application should
+provide a pointer to a cec_connector_info struct which will be populated
+by the kernel with the info provided by the adapter's driver. This ioctl
+is only available if the ``CEC_CAP_CONNECTOR_INFO`` capability is set.
+
+.. tabularcolumns:: |p{1.0cm}|p{4.4cm}|p{2.5cm}|p{9.6cm}|
+
+.. c:type:: cec_connector_info
+
+.. flat-table:: struct cec_connector_info
+ :header-rows: 0
+ :stub-columns: 0
+ :widths: 1 1 1 8
+
+ * - __u32
+ - ``type``
+ - The type of connector this adapter is associated with.
+ * - union
+ - ``(anonymous)``
+ -
+ * -
+ - ``struct cec_drm_connector_info``
+ - drm
+ - :ref:`cec-drm-connector-info`
+
+
+.. tabularcolumns:: |p{4.4cm}|p{2.5cm}|p{10.6cm}|
+
+.. _connector-type:
+
+.. flat-table:: Connector types
+ :header-rows: 0
+ :stub-columns: 0
+ :widths: 3 1 8
+
+ * .. _`CEC-CONNECTOR-TYPE-NO-CONNECTOR`:
+
+ - ``CEC_CONNECTOR_TYPE_NO_CONNECTOR``
+ - 0
+ - No connector is associated with the adapter/the information is not
+ provided by the driver.
+ * .. _`CEC-CONNECTOR-TYPE-DRM`:
+
+ - ``CEC_CONNECTOR_TYPE_DRM``
+ - 1
+ - Indicates that a DRM connector is associated with this adapter.
+ Information about the connector can be found in
+ :ref:`cec-drm-connector-info`.
+
+.. tabularcolumns:: |p{4.4cm}|p{2.5cm}|p{10.6cm}|
+
+.. c:type:: cec_drm_connector_info
+
+.. _cec-drm-connector-info:
+
+.. flat-table:: struct cec_drm_connector_info
+ :header-rows: 0
+ :stub-columns: 0
+ :widths: 3 1 8
+
+ * .. _`CEC-DRM-CONNECTOR-TYPE-CARD-NO`:
+
+ - __u32
+ - ``card_no``
+ - DRM card number: the number from a card's path, e.g. 0 in case of
+ /dev/card0.
+ * .. _`CEC-DRM-CONNECTOR-TYPE-CONNECTOR_ID`:
+
+ - __u32
+ - ``connector_id``
+ - DRM connector ID.
diff --git a/Documentation/media/uapi/cec/cec-ioc-dqevent.rst b/Documentation/media/uapi/cec/cec-ioc-dqevent.rst
index 46a1c99a595e..5e21b1fbfc01 100644
--- a/Documentation/media/uapi/cec/cec-ioc-dqevent.rst
+++ b/Documentation/media/uapi/cec/cec-ioc-dqevent.rst
@@ -70,6 +70,14 @@ it is guaranteed that the state did change in between the two events.
addresses are claimed or if ``phys_addr`` is ``CEC_PHYS_ADDR_INVALID``.
If bit 15 is set (``1 << CEC_LOG_ADDR_UNREGISTERED``) then this device
has the unregistered logical address. In that case all other bits are 0.
+ * - __u16
+ - ``have_conn_info``
+ - If non-zero, then HDMI connector information is available.
+ This field is only valid if ``CEC_CAP_CONNECTOR_INFO`` is set. If that
+ capability is set and ``have_conn_info`` is zero, then that indicates
+ that the HDMI connector device is not instantiated, either because
+ the HDMI driver is still configuring the device or because the HDMI
+ device was unbound.
.. c:type:: cec_event_lost_msgs
diff --git a/Documentation/media/uapi/mediactl/request-api.rst b/Documentation/media/uapi/mediactl/request-api.rst
index a74c82d95609..01abe8103bdd 100644
--- a/Documentation/media/uapi/mediactl/request-api.rst
+++ b/Documentation/media/uapi/mediactl/request-api.rst
@@ -53,8 +53,8 @@ with different configurations in advance, knowing that the configuration will be
applied when needed to get the expected result. Configuration values at the time
of request completion are also available for reading.
-Usage
-=====
+General Usage
+-------------
The Request API extends the Media Controller API and cooperates with
subsystem-specific APIs to support request usage. At the Media Controller
diff --git a/Documentation/media/uapi/v4l/biblio.rst b/Documentation/media/uapi/v4l/biblio.rst
index ad2ff258afa8..8095f57d3d75 100644
--- a/Documentation/media/uapi/v4l/biblio.rst
+++ b/Documentation/media/uapi/v4l/biblio.rst
@@ -131,6 +131,15 @@ ITU-T Rec. H.264 Specification (04/2017 Edition)
:author: International Telecommunication Union (http://www.itu.ch)
+.. _hevc:
+
+ITU H.265/HEVC
+==============
+
+:title: ITU-T Rec. H.265 | ISO/IEC 23008-2 "High Efficiency Video Coding"
+
+:author: International Telecommunication Union (http://www.itu.ch), International Organisation for Standardisation (http://www.iso.ch)
+
.. _jfif:
JFIF
diff --git a/Documentation/media/uapi/v4l/buffer.rst b/Documentation/media/uapi/v4l/buffer.rst
index 1cbd9cde57f3..9149b57728e5 100644
--- a/Documentation/media/uapi/v4l/buffer.rst
+++ b/Documentation/media/uapi/v4l/buffer.rst
@@ -607,6 +607,19 @@ Buffer Flags
applications shall use this flag for output buffers if the data in
this buffer has not been created by the CPU but by some
DMA-capable unit, in which case caches have not been used.
+ * .. _`V4L2-BUF-FLAG-M2M-HOLD-CAPTURE-BUF`:
+
+ - ``V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF``
+ - 0x00000200
+ - Only valid if ``V4L2_BUF_CAP_SUPPORTS_M2M_HOLD_CAPTURE_BUF`` is
+ set. It is typically used with stateless decoders where multiple
+ output buffers each decode to a slice of the decoded frame.
+ Applications can set this flag when queueing the output buffer
+ to prevent the driver from dequeueing the capture buffer after
+ the output buffer has been decoded (i.e. the capture buffer is
+ 'held'). If the timestamp of this output buffer differs from that
+ of the previous output buffer, then that indicates the start of a
+ new frame and the previously held capture buffer is dequeued.
* .. _`V4L2-BUF-FLAG-LAST`:
- ``V4L2_BUF_FLAG_LAST``
diff --git a/Documentation/media/uapi/v4l/dev-mem2mem.rst b/Documentation/media/uapi/v4l/dev-mem2mem.rst
index caa05f5f6380..70953958cee6 100644
--- a/Documentation/media/uapi/v4l/dev-mem2mem.rst
+++ b/Documentation/media/uapi/v4l/dev-mem2mem.rst
@@ -46,3 +46,4 @@ devices are given in the following sections.
:maxdepth: 1
dev-decoder
+ dev-stateless-decoder
diff --git a/Documentation/media/uapi/v4l/dev-stateless-decoder.rst b/Documentation/media/uapi/v4l/dev-stateless-decoder.rst
new file mode 100644
index 000000000000..4a26646eeec5
--- /dev/null
+++ b/Documentation/media/uapi/v4l/dev-stateless-decoder.rst
@@ -0,0 +1,424 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+.. _stateless_decoder:
+
+**************************************************
+Memory-to-memory Stateless Video Decoder Interface
+**************************************************
+
+A stateless decoder is a decoder that works without retaining any kind of state
+between processed frames. This means that each frame is decoded independently
+of any previous and future frames, and that the client is responsible for
+maintaining the decoding state and providing it to the decoder with each
+decoding request. This is in contrast to the stateful video decoder interface,
+where the hardware and driver maintain the decoding state and all the client
+has to do is to provide the raw encoded stream and dequeue decoded frames in
+display order.
+
+This section describes how user-space ("the client") is expected to communicate
+with stateless decoders in order to successfully decode an encoded stream.
+Compared to stateful codecs, the decoder/client sequence is simpler, but the
+cost of this simplicity is extra complexity in the client which is responsible
+for maintaining a consistent decoding state.
+
+Stateless decoders make use of the :ref:`media-request-api`. A stateless
+decoder must expose the ``V4L2_BUF_CAP_SUPPORTS_REQUESTS`` capability on its
+``OUTPUT`` queue when :c:func:`VIDIOC_REQBUFS` or :c:func:`VIDIOC_CREATE_BUFS`
+are invoked.
+
+Depending on the encoded formats supported by the decoder, a single decoded
+frame may be the result of several decode requests (for instance, H.264 streams
+with multiple slices per frame). Decoders that support such formats must also
+expose the ``V4L2_BUF_CAP_SUPPORTS_M2M_HOLD_CAPTURE_BUF`` capability on their
+``OUTPUT`` queue.
+
+Querying capabilities
+=====================
+
+1. To enumerate the set of coded formats supported by the decoder, the client
+ calls :c:func:`VIDIOC_ENUM_FMT` on the ``OUTPUT`` queue.
+
+ * The driver must always return the full set of supported ``OUTPUT`` formats,
+ irrespective of the format currently set on the ``CAPTURE`` queue.
+
+ * Simultaneously, the driver must restrain the set of values returned by
+ codec-specific capability controls (such as H.264 profiles) to the set
+ actually supported by the hardware.
+
+2. To enumerate the set of supported raw formats, the client calls
+ :c:func:`VIDIOC_ENUM_FMT` on the ``CAPTURE`` queue.
+
+ * The driver must return only the formats supported for the format currently
+ active on the ``OUTPUT`` queue.
+
+ * Depending on the currently set ``OUTPUT`` format, the set of supported raw
+ formats may depend on the value of some codec-dependent controls.
+ The client is responsible for making sure that these controls are set
+ before querying the ``CAPTURE`` queue. Failure to do so will result in the
+ default values for these controls being used, and a returned set of formats
+ that may not be usable for the media the client is trying to decode.
+
+3. The client may use :c:func:`VIDIOC_ENUM_FRAMESIZES` to detect supported
+ resolutions for a given format, passing desired pixel format in
+ :c:type:`v4l2_frmsizeenum`'s ``pixel_format``.
+
+4. Supported profiles and levels for the current ``OUTPUT`` format, if
+ applicable, may be queried using their respective controls via
+ :c:func:`VIDIOC_QUERYCTRL`.
+
+Initialization
+==============
+
+1. Set the coded format on the ``OUTPUT`` queue via :c:func:`VIDIOC_S_FMT`.
+
+ * **Required fields:**
+
+ ``type``
+ a ``V4L2_BUF_TYPE_*`` enum appropriate for ``OUTPUT``.
+
+ ``pixelformat``
+ a coded pixel format.
+
+ ``width``, ``height``
+ coded width and height parsed from the stream.
+
+ other fields
+ follow standard semantics.
+
+ .. note::
+
+ Changing the ``OUTPUT`` format may change the currently set ``CAPTURE``
+ format. The driver will derive a new ``CAPTURE`` format from the
+ ``OUTPUT`` format being set, including resolution, colorimetry
+ parameters, etc. If the client needs a specific ``CAPTURE`` format,
+ it must adjust it afterwards.
+
+2. Call :c:func:`VIDIOC_S_EXT_CTRLS` to set all the controls (parsed headers,
+ etc.) required by the ``OUTPUT`` format to enumerate the ``CAPTURE`` formats.
+
+3. Call :c:func:`VIDIOC_G_FMT` for ``CAPTURE`` queue to get the format for the
+ destination buffers parsed/decoded from the bytestream.
+
+ * **Required fields:**
+
+ ``type``
+ a ``V4L2_BUF_TYPE_*`` enum appropriate for ``CAPTURE``.
+
+ * **Returned fields:**
+
+ ``width``, ``height``
+ frame buffer resolution for the decoded frames.
+
+ ``pixelformat``
+ pixel format for decoded frames.
+
+ ``num_planes`` (for _MPLANE ``type`` only)
+ number of planes for pixelformat.
+
+ ``sizeimage``, ``bytesperline``
+ as per standard semantics; matching frame buffer format.
+
+ .. note::
+
+ The value of ``pixelformat`` may be any pixel format supported for the
+ ``OUTPUT`` format, based on the hardware capabilities. It is suggested
+ that the driver chooses the preferred/optimal format for the current
+ configuration. For example, a YUV format may be preferred over an RGB
+ format, if an additional conversion step would be required for RGB.
+
+4. *[optional]* Enumerate ``CAPTURE`` formats via :c:func:`VIDIOC_ENUM_FMT` on
+ the ``CAPTURE`` queue. The client may use this ioctl to discover which
+ alternative raw formats are supported for the current ``OUTPUT`` format and
+ select one of them via :c:func:`VIDIOC_S_FMT`.
+
+ .. note::
+
+ The driver will return only formats supported for the currently selected
+ ``OUTPUT`` format and currently set controls, even if more formats may be
+ supported by the decoder in general.
+
+ For example, a decoder may support YUV and RGB formats for
+ resolutions 1920x1088 and lower, but only YUV for higher resolutions (due
+ to hardware limitations). After setting a resolution of 1920x1088 or lower
+ as the ``OUTPUT`` format, :c:func:`VIDIOC_ENUM_FMT` may return a set of
+ YUV and RGB pixel formats, but after setting a resolution higher than
+ 1920x1088, the driver will not return RGB pixel formats, since they are
+ unsupported for this resolution.
+
+5. *[optional]* Choose a different ``CAPTURE`` format than suggested via
+ :c:func:`VIDIOC_S_FMT` on ``CAPTURE`` queue. It is possible for the client to
+ choose a different format than selected/suggested by the driver in
+ :c:func:`VIDIOC_G_FMT`.
+
+ * **Required fields:**
+
+ ``type``
+ a ``V4L2_BUF_TYPE_*`` enum appropriate for ``CAPTURE``.
+
+ ``pixelformat``
+ a raw pixel format.
+
+ ``width``, ``height``
+ frame buffer resolution of the decoded stream; typically unchanged from
+ what was returned with :c:func:`VIDIOC_G_FMT`, but it may be different
+ if the hardware supports composition and/or scaling.
+
+ After performing this step, the client must perform step 3 again in order
+ to obtain up-to-date information about the buffers size and layout.
+
+6. Allocate source (bytestream) buffers via :c:func:`VIDIOC_REQBUFS` on
+ ``OUTPUT`` queue.
+
+ * **Required fields:**
+
+ ``count``
+ requested number of buffers to allocate; greater than zero.
+
+ ``type``
+ a ``V4L2_BUF_TYPE_*`` enum appropriate for ``OUTPUT``.
+
+ ``memory``
+ follows standard semantics.
+
+ * **Return fields:**
+
+ ``count``
+ actual number of buffers allocated.
+
+ * If required, the driver will adjust ``count`` to be equal or bigger to the
+ minimum of required number of ``OUTPUT`` buffers for the given format and
+ requested count. The client must check this value after the ioctl returns
+ to get the actual number of buffers allocated.
+
+7. Allocate destination (raw format) buffers via :c:func:`VIDIOC_REQBUFS` on the
+ ``CAPTURE`` queue.
+
+ * **Required fields:**
+
+ ``count``
+ requested number of buffers to allocate; greater than zero. The client
+ is responsible for deducing the minimum number of buffers required
+ for the stream to be properly decoded (taking e.g. reference frames
+ into account) and pass an equal or bigger number.
+
+ ``type``
+ a ``V4L2_BUF_TYPE_*`` enum appropriate for ``CAPTURE``.
+
+ ``memory``
+ follows standard semantics. ``V4L2_MEMORY_USERPTR`` is not supported
+ for ``CAPTURE`` buffers.
+
+ * **Return fields:**
+
+ ``count``
+ adjusted to allocated number of buffers, in case the codec requires
+ more buffers than requested.
+
+ * The driver must adjust count to the minimum of required number of
+ ``CAPTURE`` buffers for the current format, stream configuration and
+ requested count. The client must check this value after the ioctl
+ returns to get the number of buffers allocated.
+
+8. Allocate requests (likely one per ``OUTPUT`` buffer) via
+ :c:func:`MEDIA_IOC_REQUEST_ALLOC` on the media device.
+
+9. Start streaming on both ``OUTPUT`` and ``CAPTURE`` queues via
+ :c:func:`VIDIOC_STREAMON`.
+
+Decoding
+========
+
+For each frame, the client is responsible for submitting at least one request to
+which the following is attached:
+
+* The amount of encoded data expected by the codec for its current
+ configuration, as a buffer submitted to the ``OUTPUT`` queue. Typically, this
+ corresponds to one frame worth of encoded data, but some formats may allow (or
+ require) different amounts per unit.
+* All the metadata needed to decode the submitted encoded data, in the form of
+ controls relevant to the format being decoded.
+
+The amount of data and contents of the source ``OUTPUT`` buffer, as well as the
+controls that must be set on the request, depend on the active coded pixel
+format and might be affected by codec-specific extended controls, as stated in
+documentation of each format.
+
+If there is a possibility that the decoded frame will require one or more
+decode requests after the current one in order to be produced, then the client
+must set the ``V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF`` flag on the ``OUTPUT``
+buffer. This will result in the (potentially partially) decoded ``CAPTURE``
+buffer not being made available for dequeueing, and reused for the next decode
+request if the timestamp of the next ``OUTPUT`` buffer has not changed.
+
+A typical frame would thus be decoded using the following sequence:
+
+1. Queue an ``OUTPUT`` buffer containing one unit of encoded bytestream data for
+ the decoding request, using :c:func:`VIDIOC_QBUF`.
+
+ * **Required fields:**
+
+ ``index``
+ index of the buffer being queued.
+
+ ``type``
+ type of the buffer.
+
+ ``bytesused``
+ number of bytes taken by the encoded data frame in the buffer.
+
+ ``flags``
+ the ``V4L2_BUF_FLAG_REQUEST_FD`` flag must be set. Additionally, if
+ we are not sure that the current decode request is the last one needed
+ to produce a fully decoded frame, then
+ ``V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF`` must also be set.
+
+ ``request_fd``
+ must be set to the file descriptor of the decoding request.
+
+ ``timestamp``
+ must be set to a unique value per frame. This value will be propagated
+ into the decoded frame's buffer and can also be used to use this frame
+ as the reference of another. If using multiple decode requests per
+ frame, then the timestamps of all the ``OUTPUT`` buffers for a given
+ frame must be identical. If the timestamp changes, then the currently
+ held ``CAPTURE`` buffer will be made available for dequeuing and the
+ current request will work on a new ``CAPTURE`` buffer.
+
+2. Set the codec-specific controls for the decoding request, using
+ :c:func:`VIDIOC_S_EXT_CTRLS`.
+
+ * **Required fields:**
+
+ ``which``
+ must be ``V4L2_CTRL_WHICH_REQUEST_VAL``.
+
+ ``request_fd``
+ must be set to the file descriptor of the decoding request.
+
+ other fields
+ other fields are set as usual when setting controls. The ``controls``
+ array must contain all the codec-specific controls required to decode
+ a frame.
+
+ .. note::
+
+ It is possible to specify the controls in different invocations of
+ :c:func:`VIDIOC_S_EXT_CTRLS`, or to overwrite a previously set control, as
+ long as ``request_fd`` and ``which`` are properly set. The controls state
+ at the moment of request submission is the one that will be considered.
+
+ .. note::
+
+ The order in which steps 1 and 2 take place is interchangeable.
+
+3. Submit the request by invoking :c:func:`MEDIA_REQUEST_IOC_QUEUE` on the
+ request FD.
+
+ If the request is submitted without an ``OUTPUT`` buffer, or if some of the
+ required controls are missing from the request, then
+ :c:func:`MEDIA_REQUEST_IOC_QUEUE` will return ``-ENOENT``. If more than one
+ ``OUTPUT`` buffer is queued, then it will return ``-EINVAL``.
+ :c:func:`MEDIA_REQUEST_IOC_QUEUE` returning non-zero means that no
+ ``CAPTURE`` buffer will be produced for this request.
+
+``CAPTURE`` buffers must not be part of the request, and are queued
+independently. They are returned in decode order (i.e. the same order as coded
+frames were submitted to the ``OUTPUT`` queue).
+
+Runtime decoding errors are signaled by the dequeued ``CAPTURE`` buffers
+carrying the ``V4L2_BUF_FLAG_ERROR`` flag. If a decoded reference frame has an
+error, then all following decoded frames that refer to it also have the
+``V4L2_BUF_FLAG_ERROR`` flag set, although the decoder will still try to
+produce (likely corrupted) frames.
+
+Buffer management while decoding
+================================
+Contrary to stateful decoders, a stateless decoder does not perform any kind of
+buffer management: it only guarantees that dequeued ``CAPTURE`` buffers can be
+used by the client for as long as they are not queued again. "Used" here
+encompasses using the buffer for compositing or display.
+
+A dequeued capture buffer can also be used as the reference frame of another
+buffer.
+
+A frame is specified as reference by converting its timestamp into nanoseconds,
+and storing it into the relevant member of a codec-dependent control structure.
+The :c:func:`v4l2_timeval_to_ns` function must be used to perform that
+conversion. The timestamp of a frame can be used to reference it as soon as all
+its units of encoded data are successfully submitted to the ``OUTPUT`` queue.
+
+A decoded buffer containing a reference frame must not be reused as a decoding
+target until all the frames referencing it have been decoded. The safest way to
+achieve this is to refrain from queueing a reference buffer until all the
+decoded frames referencing it have been dequeued. However, if the driver can
+guarantee that buffers queued to the ``CAPTURE`` queue are processed in queued
+order, then user-space can take advantage of this guarantee and queue a
+reference buffer when the following conditions are met:
+
+1. All the requests for frames affected by the reference frame have been
+ queued, and
+
+2. A sufficient number of ``CAPTURE`` buffers to cover all the decoded
+ referencing frames have been queued.
+
+When queuing a decoding request, the driver will increase the reference count of
+all the resources associated with reference frames. This means that the client
+can e.g. close the DMABUF file descriptors of reference frame buffers if it
+won't need them afterwards.
+
+Seeking
+=======
+In order to seek, the client just needs to submit requests using input buffers
+corresponding to the new stream position. It must however be aware that
+resolution may have changed and follow the dynamic resolution change sequence in
+that case. Also depending on the codec used, picture parameters (e.g. SPS/PPS
+for H.264) may have changed and the client is responsible for making sure that a
+valid state is sent to the decoder.
+
+The client is then free to ignore any returned ``CAPTURE`` buffer that comes
+from the pre-seek position.
+
+Pausing
+=======
+
+In order to pause, the client can just cease queuing buffers onto the ``OUTPUT``
+queue. Without source bytestream data, there is no data to process and the codec
+will remain idle.
+
+Dynamic resolution change
+=========================
+
+If the client detects a resolution change in the stream, it will need to perform
+the initialization sequence again with the new resolution:
+
+1. If the last submitted request resulted in a ``CAPTURE`` buffer being
+ held by the use of the ``V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF`` flag, then the
+ last frame is not available on the ``CAPTURE`` queue. In this case, a
+ ``V4L2_DEC_CMD_FLUSH`` command shall be sent. This will make the driver
+ dequeue the held ``CAPTURE`` buffer.
+
+2. Wait until all submitted requests have completed and dequeue the
+ corresponding output buffers.
+
+3. Call :c:func:`VIDIOC_STREAMOFF` on both the ``OUTPUT`` and ``CAPTURE``
+ queues.
+
+4. Free all ``CAPTURE`` buffers by calling :c:func:`VIDIOC_REQBUFS` on the
+ ``CAPTURE`` queue with a buffer count of zero.
+
+5. Perform the initialization sequence again (minus the allocation of
+ ``OUTPUT`` buffers), with the new resolution set on the ``OUTPUT`` queue.
+ Note that due to resolution constraints, a different format may need to be
+ picked on the ``CAPTURE`` queue.
+
+Drain
+=====
+
+If the last submitted request resulted in a ``CAPTURE`` buffer being
+held by the use of the ``V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF`` flag, then the
+last frame is not available on the ``CAPTURE`` queue. In this case, a
+``V4L2_DEC_CMD_FLUSH`` command shall be sent. This will make the driver
+dequeue the held ``CAPTURE`` buffer.
+
+After that, in order to drain the stream on a stateless decoder, the client
+just needs to wait until all the submitted requests are completed.
diff --git a/Documentation/media/uapi/v4l/ext-ctrls-codec.rst b/Documentation/media/uapi/v4l/ext-ctrls-codec.rst
index bc5dd8e76567..28313c0f4e7c 100644
--- a/Documentation/media/uapi/v4l/ext-ctrls-codec.rst
+++ b/Documentation/media/uapi/v4l/ext-ctrls-codec.rst
@@ -1713,10 +1713,14 @@ enum v4l2_mpeg_video_h264_hierarchical_coding_type -
* - __u8
- ``scaling_list_4x4[6][16]``
- -
+ - Scaling matrix after applying the inverse scanning process.
+ Expected list order is Intra Y, Intra Cb, Intra Cr, Inter Y,
+ Inter Cb, Inter Cr.
* - __u8
- ``scaling_list_8x8[6][64]``
- -
+ - Scaling matrix after applying the inverse scanning process.
+ Expected list order is Intra Y, Inter Y, Intra Cb, Inter Cb,
+ Intra Cr, Inter Cr.
``V4L2_CID_MPEG_VIDEO_H264_SLICE_PARAMS (struct)``
Specifies the slice parameters (as extracted from the bitstream)
@@ -1796,7 +1800,7 @@ enum v4l2_mpeg_video_h264_hierarchical_coding_type -
-
* - __u32
- ``dec_ref_pic_marking_bit_size``
- -
+ - Size in bits of the dec_ref_pic_marking() syntax element.
* - __u32
- ``pic_order_cnt_bit_size``
-
@@ -1820,10 +1824,12 @@ enum v4l2_mpeg_video_h264_hierarchical_coding_type -
-
* - __u8
- ``num_ref_idx_l0_active_minus1``
- -
+ - If num_ref_idx_active_override_flag is not set, this field must be
+ set to the value of num_ref_idx_l0_default_active_minus1.
* - __u8
- ``num_ref_idx_l1_active_minus1``
- -
+ - If num_ref_idx_active_override_flag is not set, this field must be
+ set to the value of num_ref_idx_l1_default_active_minus1.
* - __u32
- ``slice_group_change_cycle``
-
@@ -1983,9 +1989,9 @@ enum v4l2_mpeg_video_h264_hierarchical_coding_type -
- ``reference_ts``
- Timestamp of the V4L2 capture buffer to use as reference, used
with B-coded and P-coded frames. The timestamp refers to the
- ``timestamp`` field in struct :c:type:`v4l2_buffer`. Use the
- :c:func:`v4l2_timeval_to_ns()` function to convert the struct
- :c:type:`timeval` in struct :c:type:`v4l2_buffer` to a __u64.
+ ``timestamp`` field in struct :c:type:`v4l2_buffer`. Use the
+ :c:func:`v4l2_timeval_to_ns()` function to convert the struct
+ :c:type:`timeval` in struct :c:type:`v4l2_buffer` to a __u64.
* - __u16
- ``frame_num``
-
@@ -3693,3 +3699,550 @@ enum v4l2_mpeg_video_hevc_size_of_length_field -
Indicates whether to generate SPS and PPS at every IDR. Setting it to 0
disables generating SPS and PPS at every IDR. Setting it to one enables
generating SPS and PPS at every IDR.
+
+.. _v4l2-mpeg-hevc:
+
+``V4L2_CID_MPEG_VIDEO_HEVC_SPS (struct)``
+ Specifies the Sequence Parameter Set fields (as extracted from the
+ bitstream) for the associated HEVC slice data.
+ These bitstream parameters are defined according to :ref:`hevc`.
+ They are described in section 7.4.3.2 "Sequence parameter set RBSP
+ semantics" of the specification.
+
+.. c:type:: v4l2_ctrl_hevc_sps
+
+.. cssclass:: longtable
+
+.. flat-table:: struct v4l2_ctrl_hevc_sps
+ :header-rows: 0
+ :stub-columns: 0
+ :widths: 1 1 2
+
+ * - __u16
+ - ``pic_width_in_luma_samples``
+ -
+ * - __u16
+ - ``pic_height_in_luma_samples``
+ -
+ * - __u8
+ - ``bit_depth_luma_minus8``
+ -
+ * - __u8
+ - ``bit_depth_chroma_minus8``
+ -
+ * - __u8
+ - ``log2_max_pic_order_cnt_lsb_minus4``
+ -
+ * - __u8
+ - ``sps_max_dec_pic_buffering_minus1``
+ -
+ * - __u8
+ - ``sps_max_num_reorder_pics``
+ -
+ * - __u8
+ - ``sps_max_latency_increase_plus1``
+ -
+ * - __u8
+ - ``log2_min_luma_coding_block_size_minus3``
+ -
+ * - __u8
+ - ``log2_diff_max_min_luma_coding_block_size``
+ -
+ * - __u8
+ - ``log2_min_luma_transform_block_size_minus2``
+ -
+ * - __u8
+ - ``log2_diff_max_min_luma_transform_block_size``
+ -
+ * - __u8
+ - ``max_transform_hierarchy_depth_inter``
+ -
+ * - __u8
+ - ``max_transform_hierarchy_depth_intra``
+ -
+ * - __u8
+ - ``pcm_sample_bit_depth_luma_minus1``
+ -
+ * - __u8
+ - ``pcm_sample_bit_depth_chroma_minus1``
+ -
+ * - __u8
+ - ``log2_min_pcm_luma_coding_block_size_minus3``
+ -
+ * - __u8
+ - ``log2_diff_max_min_pcm_luma_coding_block_size``
+ -
+ * - __u8
+ - ``num_short_term_ref_pic_sets``
+ -
+ * - __u8
+ - ``num_long_term_ref_pics_sps``
+ -
+ * - __u8
+ - ``chroma_format_idc``
+ -
+ * - __u64
+ - ``flags``
+ - See :ref:`Sequence Parameter Set Flags <hevc_sps_flags>`
+
+.. _hevc_sps_flags:
+
+``Sequence Parameter Set Flags``
+
+.. cssclass:: longtable
+
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 0
+ :widths: 1 1 2
+
+ * - ``V4L2_HEVC_SPS_FLAG_SEPARATE_COLOUR_PLANE``
+ - 0x00000001
+ -
+ * - ``V4L2_HEVC_SPS_FLAG_SCALING_LIST_ENABLED``
+ - 0x00000002
+ -
+ * - ``V4L2_HEVC_SPS_FLAG_AMP_ENABLED``
+ - 0x00000004
+ -
+ * - ``V4L2_HEVC_SPS_FLAG_SAMPLE_ADAPTIVE_OFFSET``
+ - 0x00000008
+ -
+ * - ``V4L2_HEVC_SPS_FLAG_PCM_ENABLED``
+ - 0x00000010
+ -
+ * - ``V4L2_HEVC_SPS_FLAG_PCM_LOOP_FILTER_DISABLED``
+ - 0x00000020
+ -
+ * - ``V4L2_HEVC_SPS_FLAG_LONG_TERM_REF_PICS_PRESENT``
+ - 0x00000040
+ -
+ * - ``V4L2_HEVC_SPS_FLAG_SPS_TEMPORAL_MVP_ENABLED``
+ - 0x00000080
+ -
+ * - ``V4L2_HEVC_SPS_FLAG_STRONG_INTRA_SMOOTHING_ENABLED``
+ - 0x00000100
+ -
+
+``V4L2_CID_MPEG_VIDEO_HEVC_PPS (struct)``
+ Specifies the Picture Parameter Set fields (as extracted from the
+ bitstream) for the associated HEVC slice data.
+ These bitstream parameters are defined according to :ref:`hevc`.
+ They are described in section 7.4.3.3 "Picture parameter set RBSP
+ semantics" of the specification.
+
+.. c:type:: v4l2_ctrl_hevc_pps
+
+.. cssclass:: longtable
+
+.. flat-table:: struct v4l2_ctrl_hevc_pps
+ :header-rows: 0
+ :stub-columns: 0
+ :widths: 1 1 2
+
+ * - __u8
+ - ``num_extra_slice_header_bits``
+ -
+ * - __s8
+ - ``init_qp_minus26``
+ -
+ * - __u8
+ - ``diff_cu_qp_delta_depth``
+ -
+ * - __s8
+ - ``pps_cb_qp_offset``
+ -
+ * - __s8
+ - ``pps_cr_qp_offset``
+ -
+ * - __u8
+ - ``num_tile_columns_minus1``
+ -
+ * - __u8
+ - ``num_tile_rows_minus1``
+ -
+ * - __u8
+ - ``column_width_minus1[20]``
+ -
+ * - __u8
+ - ``row_height_minus1[22]``
+ -
+ * - __s8
+ - ``pps_beta_offset_div2``
+ -
+ * - __s8
+ - ``pps_tc_offset_div2``
+ -
+ * - __u8
+ - ``log2_parallel_merge_level_minus2``
+ -
+ * - __u8
+ - ``padding[4]``
+ - Applications and drivers must set this to zero.
+ * - __u64
+ - ``flags``
+ - See :ref:`Picture Parameter Set Flags <hevc_pps_flags>`
+
+.. _hevc_pps_flags:
+
+``Picture Parameter Set Flags``
+
+.. cssclass:: longtable
+
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 0
+ :widths: 1 1 2
+
+ * - ``V4L2_HEVC_PPS_FLAG_DEPENDENT_SLICE_SEGMENT``
+ - 0x00000001
+ -
+ * - ``V4L2_HEVC_PPS_FLAG_OUTPUT_FLAG_PRESENT``
+ - 0x00000002
+ -
+ * - ``V4L2_HEVC_PPS_FLAG_SIGN_DATA_HIDING_ENABLED``
+ - 0x00000004
+ -
+ * - ``V4L2_HEVC_PPS_FLAG_CABAC_INIT_PRESENT``
+ - 0x00000008
+ -
+ * - ``V4L2_HEVC_PPS_FLAG_CONSTRAINED_INTRA_PRED``
+ - 0x00000010
+ -
+ * - ``V4L2_HEVC_PPS_FLAG_TRANSFORM_SKIP_ENABLED``
+ - 0x00000020
+ -
+ * - ``V4L2_HEVC_PPS_FLAG_CU_QP_DELTA_ENABLED``
+ - 0x00000040
+ -
+ * - ``V4L2_HEVC_PPS_FLAG_PPS_SLICE_CHROMA_QP_OFFSETS_PRESENT``
+ - 0x00000080
+ -
+ * - ``V4L2_HEVC_PPS_FLAG_WEIGHTED_PRED``
+ - 0x00000100
+ -
+ * - ``V4L2_HEVC_PPS_FLAG_WEIGHTED_BIPRED``
+ - 0x00000200
+ -
+ * - ``V4L2_HEVC_PPS_FLAG_TRANSQUANT_BYPASS_ENABLED``
+ - 0x00000400
+ -
+ * - ``V4L2_HEVC_PPS_FLAG_TILES_ENABLED``
+ - 0x00000800
+ -
+ * - ``V4L2_HEVC_PPS_FLAG_ENTROPY_CODING_SYNC_ENABLED``
+ - 0x00001000
+ -
+ * - ``V4L2_HEVC_PPS_FLAG_LOOP_FILTER_ACROSS_TILES_ENABLED``
+ - 0x00002000
+ -
+ * - ``V4L2_HEVC_PPS_FLAG_PPS_LOOP_FILTER_ACROSS_SLICES_ENABLED``
+ - 0x00004000
+ -
+ * - ``V4L2_HEVC_PPS_FLAG_DEBLOCKING_FILTER_OVERRIDE_ENABLED``
+ - 0x00008000
+ -
+ * - ``V4L2_HEVC_PPS_FLAG_PPS_DISABLE_DEBLOCKING_FILTER``
+ - 0x00010000
+ -
+ * - ``V4L2_HEVC_PPS_FLAG_LISTS_MODIFICATION_PRESENT``
+ - 0x00020000
+ -
+ * - ``V4L2_HEVC_PPS_FLAG_SLICE_SEGMENT_HEADER_EXTENSION_PRESENT``
+ - 0x00040000
+ -
+
+``V4L2_CID_MPEG_VIDEO_HEVC_SLICE_PARAMS (struct)``
+ Specifies various slice-specific parameters, especially from the NAL unit
+ header, general slice segment header and weighted prediction parameter
+ parts of the bitstream.
+ These bitstream parameters are defined according to :ref:`hevc`.
+ They are described in section 7.4.7 "General slice segment header
+ semantics" of the specification.
+
+.. c:type:: v4l2_ctrl_hevc_slice_params
+
+.. cssclass:: longtable
+
+.. flat-table:: struct v4l2_ctrl_hevc_slice_params
+ :header-rows: 0
+ :stub-columns: 0
+ :widths: 1 1 2
+
+ * - __u32
+ - ``bit_size``
+ - Size (in bits) of the current slice data.
+ * - __u32
+ - ``data_bit_offset``
+ - Offset (in bits) to the video data in the current slice data.
+ * - __u8
+ - ``nal_unit_type``
+ -
+ * - __u8
+ - ``nuh_temporal_id_plus1``
+ -
+ * - __u8
+ - ``slice_type``
+ -
+ (V4L2_HEVC_SLICE_TYPE_I, V4L2_HEVC_SLICE_TYPE_P or
+ V4L2_HEVC_SLICE_TYPE_B).
+ * - __u8
+ - ``colour_plane_id``
+ -
+ * - __u16
+ - ``slice_pic_order_cnt``
+ -
+ * - __u8
+ - ``num_ref_idx_l0_active_minus1``
+ -
+ * - __u8
+ - ``num_ref_idx_l1_active_minus1``
+ -
+ * - __u8
+ - ``collocated_ref_idx``
+ -
+ * - __u8
+ - ``five_minus_max_num_merge_cand``
+ -
+ * - __s8
+ - ``slice_qp_delta``
+ -
+ * - __s8
+ - ``slice_cb_qp_offset``
+ -
+ * - __s8
+ - ``slice_cr_qp_offset``
+ -
+ * - __s8
+ - ``slice_act_y_qp_offset``
+ -
+ * - __s8
+ - ``slice_act_cb_qp_offset``
+ -
+ * - __s8
+ - ``slice_act_cr_qp_offset``
+ -
+ * - __s8
+ - ``slice_beta_offset_div2``
+ -
+ * - __s8
+ - ``slice_tc_offset_div2``
+ -
+ * - __u8
+ - ``pic_struct``
+ -
+ * - __u8
+ - ``num_active_dpb_entries``
+ - The number of entries in ``dpb``.
+ * - __u8
+ - ``ref_idx_l0[V4L2_HEVC_DPB_ENTRIES_NUM_MAX]``
+ - The list of L0 reference elements as indices in the DPB.
+ * - __u8
+ - ``ref_idx_l1[V4L2_HEVC_DPB_ENTRIES_NUM_MAX]``
+ - The list of L1 reference elements as indices in the DPB.
+ * - __u8
+ - ``num_rps_poc_st_curr_before``
+ - The number of reference pictures in the short-term set that come before
+ the current frame.
+ * - __u8
+ - ``num_rps_poc_st_curr_after``
+ - The number of reference pictures in the short-term set that come after
+ the current frame.
+ * - __u8
+ - ``num_rps_poc_lt_curr``
+ - The number of reference pictures in the long-term set.
+ * - __u8
+ - ``padding[7]``
+ - Applications and drivers must set this to zero.
+ * - struct :c:type:`v4l2_hevc_dpb_entry`
+ - ``dpb[V4L2_HEVC_DPB_ENTRIES_NUM_MAX]``
+ - The decoded picture buffer, for meta-data about reference frames.
+ * - struct :c:type:`v4l2_hevc_pred_weight_table`
+ - ``pred_weight_table``
+ - The prediction weight coefficients for inter-picture prediction.
+ * - __u64
+ - ``flags``
+ - See :ref:`Slice Parameters Flags <hevc_slice_params_flags>`
+
+.. _hevc_slice_params_flags:
+
+``Slice Parameters Flags``
+
+.. cssclass:: longtable
+
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 0
+ :widths: 1 1 2
+
+ * - ``V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_SAO_LUMA``
+ - 0x00000001
+ -
+ * - ``V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_SAO_CHROMA``
+ - 0x00000002
+ -
+ * - ``V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_TEMPORAL_MVP_ENABLED``
+ - 0x00000004
+ -
+ * - ``V4L2_HEVC_SLICE_PARAMS_FLAG_MVD_L1_ZERO``
+ - 0x00000008
+ -
+ * - ``V4L2_HEVC_SLICE_PARAMS_FLAG_CABAC_INIT``
+ - 0x00000010
+ -
+ * - ``V4L2_HEVC_SLICE_PARAMS_FLAG_COLLOCATED_FROM_L0``
+ - 0x00000020
+ -
+ * - ``V4L2_HEVC_SLICE_PARAMS_FLAG_USE_INTEGER_MV``
+ - 0x00000040
+ -
+ * - ``V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_DEBLOCKING_FILTER_DISABLED``
+ - 0x00000080
+ -
+ * - ``V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_LOOP_FILTER_ACROSS_SLICES_ENABLED``
+ - 0x00000100
+ -
+
+.. c:type:: v4l2_hevc_dpb_entry
+
+.. cssclass:: longtable
+
+.. flat-table:: struct v4l2_hevc_dpb_entry
+ :header-rows: 0
+ :stub-columns: 0
+ :widths: 1 1 2
+
+ * - __u64
+ - ``timestamp``
+ - Timestamp of the V4L2 capture buffer to use as reference, used
+ with B-coded and P-coded frames. The timestamp refers to the
+ ``timestamp`` field in struct :c:type:`v4l2_buffer`. Use the
+ :c:func:`v4l2_timeval_to_ns()` function to convert the struct
+ :c:type:`timeval` in struct :c:type:`v4l2_buffer` to a __u64.
+ * - __u8
+ - ``rps``
+ - The reference set for the reference frame
+ (V4L2_HEVC_DPB_ENTRY_RPS_ST_CURR_BEFORE,
+ V4L2_HEVC_DPB_ENTRY_RPS_ST_CURR_AFTER or
+ V4L2_HEVC_DPB_ENTRY_RPS_LT_CURR)
+ * - __u8
+ - ``field_pic``
+ - Whether the reference is a field picture or a frame.
+ * - __u16
+ - ``pic_order_cnt[2]``
+ - The picture order count of the reference. Only the first element of the
+ array is used for frame pictures, while the first element identifies the
+ top field and the second the bottom field in field-coded pictures.
+ * - __u8
+ - ``padding[2]``
+ - Applications and drivers must set this to zero.
+
+.. c:type:: v4l2_hevc_pred_weight_table
+
+.. cssclass:: longtable
+
+.. flat-table:: struct v4l2_hevc_pred_weight_table
+ :header-rows: 0
+ :stub-columns: 0
+ :widths: 1 1 2
+
+ * - __u8
+ - ``luma_log2_weight_denom``
+ -
+ * - __s8
+ - ``delta_chroma_log2_weight_denom``
+ -
+ * - __s8
+ - ``delta_luma_weight_l0[V4L2_HEVC_DPB_ENTRIES_NUM_MAX]``
+ -
+ * - __s8
+ - ``luma_offset_l0[V4L2_HEVC_DPB_ENTRIES_NUM_MAX]``
+ -
+ * - __s8
+ - ``delta_chroma_weight_l0[V4L2_HEVC_DPB_ENTRIES_NUM_MAX][2]``
+ -
+ * - __s8
+ - ``chroma_offset_l0[V4L2_HEVC_DPB_ENTRIES_NUM_MAX][2]``
+ -
+ * - __s8
+ - ``delta_luma_weight_l1[V4L2_HEVC_DPB_ENTRIES_NUM_MAX]``
+ -
+ * - __s8
+ - ``luma_offset_l1[V4L2_HEVC_DPB_ENTRIES_NUM_MAX]``
+ -
+ * - __s8
+ - ``delta_chroma_weight_l1[V4L2_HEVC_DPB_ENTRIES_NUM_MAX][2]``
+ -
+ * - __s8
+ - ``chroma_offset_l1[V4L2_HEVC_DPB_ENTRIES_NUM_MAX][2]``
+ -
+ * - __u8
+ - ``padding[6]``
+ - Applications and drivers must set this to zero.
+
+``V4L2_CID_MPEG_VIDEO_HEVC_DECODE_MODE (enum)``
+ Specifies the decoding mode to use. Currently exposes slice-based and
+ frame-based decoding but new modes might be added later on.
+ This control is used as a modifier for V4L2_PIX_FMT_HEVC_SLICE
+ pixel format. Applications that support V4L2_PIX_FMT_HEVC_SLICE
+ are required to set this control in order to specify the decoding mode
+ that is expected for the buffer.
+ Drivers may expose a single or multiple decoding modes, depending
+ on what they can support.
+
+ .. note::
+
+ This menu control is not yet part of the public kernel API and
+ it is expected to change.
+
+.. c:type:: v4l2_mpeg_video_hevc_decode_mode
+
+.. cssclass:: longtable
+
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 0
+ :widths: 1 1 2
+
+ * - ``V4L2_MPEG_VIDEO_HEVC_DECODE_MODE_SLICE_BASED``
+ - 0
+ - Decoding is done at the slice granularity.
+ The OUTPUT buffer must contain a single slice.
+ * - ``V4L2_MPEG_VIDEO_HEVC_DECODE_MODE_FRAME_BASED``
+ - 1
+ - Decoding is done at the frame granularity.
+ The OUTPUT buffer must contain all slices needed to decode the
+ frame. The OUTPUT buffer must also contain both fields.
+
+``V4L2_CID_MPEG_VIDEO_HEVC_START_CODE (enum)``
+ Specifies the HEVC slice start code expected for each slice.
+ This control is used as a modifier for V4L2_PIX_FMT_HEVC_SLICE
+ pixel format. Applications that support V4L2_PIX_FMT_HEVC_SLICE
+ are required to set this control in order to specify the start code
+ that is expected for the buffer.
+ Drivers may expose a single or multiple start codes, depending
+ on what they can support.
+
+ .. note::
+
+ This menu control is not yet part of the public kernel API and
+ it is expected to change.
+
+.. c:type:: v4l2_mpeg_video_hevc_start_code
+
+.. cssclass:: longtable
+
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 0
+ :widths: 1 1 2
+
+ * - ``V4L2_MPEG_VIDEO_HEVC_START_CODE_NONE``
+ - 0
+ - Selecting this value specifies that HEVC slices are passed
+ to the driver without any start code.
+ * - ``V4L2_MPEG_VIDEO_HEVC_START_CODE_ANNEX_B``
+ - 1
+ - Selecting this value specifies that HEVC slices are expected
+ to be prefixed by Annex B start codes. According to :ref:`hevc`
+ valid start codes can be 3-bytes 0x000001 or 4-bytes 0x00000001.
diff --git a/Documentation/media/uapi/v4l/ext-ctrls-flash.rst b/Documentation/media/uapi/v4l/ext-ctrls-flash.rst
index eff056b17167..b9a6b08fbf32 100644
--- a/Documentation/media/uapi/v4l/ext-ctrls-flash.rst
+++ b/Documentation/media/uapi/v4l/ext-ctrls-flash.rst
@@ -98,7 +98,7 @@ Flash Control IDs
V4L2_CID_FLASH_STROBE control.
* - ``V4L2_FLASH_STROBE_SOURCE_EXTERNAL``
- The flash strobe is triggered by an external source. Typically
- this is a sensor, which makes it possible to synchronises the
+ this is a sensor, which makes it possible to synchronise the
flash strobe start to exposure start.
diff --git a/Documentation/media/uapi/v4l/ext-ctrls-image-source.rst b/Documentation/media/uapi/v4l/ext-ctrls-image-source.rst
index 2c3ab5796d76..2d3e2b83d6dd 100644
--- a/Documentation/media/uapi/v4l/ext-ctrls-image-source.rst
+++ b/Documentation/media/uapi/v4l/ext-ctrls-image-source.rst
@@ -55,3 +55,13 @@ Image Source Control IDs
``V4L2_CID_TEST_PATTERN_GREENB (integer)``
Test pattern green (next to blue) colour component.
+
+``V4L2_CID_UNIT_CELL_SIZE (struct)``
+ This control returns the unit cell size in nanometers. The struct
+ :c:type:`v4l2_area` provides the width and the height in separate
+ fields to take into consideration asymmetric pixels.
+ This control does not take into consideration any possible hardware
+ binning.
+ The unit cell consists of the whole area of the pixel, sensitive and
+ non-sensitive.
+ This control is required for automatic calibration of sensors/cameras.
diff --git a/Documentation/media/uapi/v4l/meta-formats.rst b/Documentation/media/uapi/v4l/meta-formats.rst
index b10ca9ee3968..74c8659ee9d6 100644
--- a/Documentation/media/uapi/v4l/meta-formats.rst
+++ b/Documentation/media/uapi/v4l/meta-formats.rst
@@ -24,3 +24,4 @@ These formats are used for the :ref:`metadata` interface only.
pixfmt-meta-uvc
pixfmt-meta-vsp1-hgo
pixfmt-meta-vsp1-hgt
+ pixfmt-meta-vivid
diff --git a/Documentation/media/uapi/v4l/pixfmt-compressed.rst b/Documentation/media/uapi/v4l/pixfmt-compressed.rst
index 292fdc116c77..561bda112809 100644
--- a/Documentation/media/uapi/v4l/pixfmt-compressed.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-compressed.rst
@@ -61,10 +61,10 @@ Compressed Formats
- ``V4L2_PIX_FMT_H264_SLICE``
- 'S264'
- - H264 parsed slice data, without the start code and as
- extracted from the H264 bitstream. This format is adapted for
- stateless video decoders that implement an H264 pipeline
- (using the :ref:`mem2mem` and :ref:`media-request-api`).
+ - H264 parsed slice data, including slice headers, either with or
+ without the start code, as extracted from the H264 bitstream.
+ This format is adapted for stateless video decoders that implement an
+ H264 pipeline (using the :ref:`mem2mem` and :ref:`media-request-api`).
This pixelformat has two modifiers that must be set at least once
through the ``V4L2_CID_MPEG_VIDEO_H264_DECODE_MODE``
and ``V4L2_CID_MPEG_VIDEO_H264_START_CODE`` controls.
@@ -80,6 +80,10 @@ Compressed Formats
appropriate number of macroblocks to decode a full
corresponding frame to the matching capture buffer.
+ The syntax for this format is documented in :ref:`h264`, section
+ 7.3.2.8 "Slice layer without partitioning RBSP syntax" and the following
+ sections.
+
.. note::
This format is not yet part of the public kernel API and it
@@ -188,6 +192,29 @@ Compressed Formats
If :ref:`VIDIOC_ENUM_FMT` reports ``V4L2_FMT_FLAG_CONTINUOUS_BYTESTREAM``
then the decoder has no requirements since it can parse all the
information from the raw bytestream.
+ * .. _V4L2-PIX-FMT-HEVC-SLICE:
+
+ - ``V4L2_PIX_FMT_HEVC_SLICE``
+ - 'S265'
+ - HEVC parsed slice data, as extracted from the HEVC bitstream.
+ This format is adapted for stateless video decoders that implement a
+ HEVC pipeline (using the :ref:`mem2mem` and :ref:`media-request-api`).
+ This pixelformat has two modifiers that must be set at least once
+ through the ``V4L2_CID_MPEG_VIDEO_HEVC_DECODE_MODE``
+ and ``V4L2_CID_MPEG_VIDEO_HEVC_START_CODE`` controls.
+ Metadata associated with the frame to decode is required to be passed
+ through the following controls :
+ * ``V4L2_CID_MPEG_VIDEO_HEVC_SPS``
+ * ``V4L2_CID_MPEG_VIDEO_HEVC_PPS``
+ * ``V4L2_CID_MPEG_VIDEO_HEVC_SLICE_PARAMS``
+ See the :ref:`associated Codec Control IDs <v4l2-mpeg-hevc>`.
+ Buffers associated with this pixel format must contain the appropriate
+ number of macroblocks to decode a full corresponding frame.
+
+ .. note::
+
+ This format is not yet part of the public kernel API and it
+ is expected to change.
* .. _V4L2-PIX-FMT-FWHT:
- ``V4L2_PIX_FMT_FWHT``
diff --git a/Documentation/media/uapi/v4l/pixfmt-meta-vivid.rst b/Documentation/media/uapi/v4l/pixfmt-meta-vivid.rst
new file mode 100644
index 000000000000..eed20eaefe24
--- /dev/null
+++ b/Documentation/media/uapi/v4l/pixfmt-meta-vivid.rst
@@ -0,0 +1,60 @@
+.. This file is dual-licensed: you can use it either under the terms
+.. of the GPL 2.0 or the GFDL 1.1+ license, at your option. Note that this
+.. dual licensing only applies to this file, and not this project as a
+.. whole.
+..
+.. a) This file is free software; you can redistribute it and/or
+.. modify it under the terms of the GNU General Public License as
+.. published by the Free Software Foundation version 2 of
+.. the License.
+..
+.. This file is distributed in the hope that it will be useful,
+.. but WITHOUT ANY WARRANTY; without even the implied warranty of
+.. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+.. GNU General Public License for more details.
+..
+.. Or, alternatively,
+..
+.. b) Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GPL-2.0 OR GFDL-1.1-or-later WITH no-invariant-sections
+
+.. _v4l2-meta-fmt-vivid:
+
+*******************************
+V4L2_META_FMT_VIVID ('VIVD')
+*******************************
+
+VIVID Metadata Format
+
+
+Description
+===========
+
+This describes metadata format used by the vivid driver.
+
+It sets Brightness, Saturation, Contrast and Hue, each of which maps to
+corresponding controls of the vivid driver with respect to the range and default values.
+
+It contains the following fields:
+
+.. flat-table:: VIVID Metadata
+ :widths: 1 4
+ :header-rows: 1
+ :stub-columns: 0
+
+ * - Field
+ - Description
+ * - u16 brightness;
+ - Image brightness, the value is in the range 0 to 255, with the default value as 128.
+ * - u16 contrast;
+ - Image contrast, the value is in the range 0 to 255, with the default value as 128.
+ * - u16 saturation;
+ - Image color saturation, the value is in the range 0 to 255, with the default value as 128.
+ * - s16 hue;
+ - Image color balance, the value is in the range -128 to 128, with the default value as 0.
diff --git a/Documentation/media/uapi/v4l/v4l2-selection-targets.rst b/Documentation/media/uapi/v4l/v4l2-selection-targets.rst
index f74f239b0510..aae0c0013eb1 100644
--- a/Documentation/media/uapi/v4l/v4l2-selection-targets.rst
+++ b/Documentation/media/uapi/v4l/v4l2-selection-targets.rst
@@ -38,8 +38,10 @@ of the two interfaces they are used.
* - ``V4L2_SEL_TGT_CROP_DEFAULT``
- 0x0001
- Suggested cropping rectangle that covers the "whole picture".
+ This includes only active pixels and excludes other non-active
+ pixels such as black pixels.
+ - Yes
- Yes
- - No
* - ``V4L2_SEL_TGT_CROP_BOUNDS``
- 0x0002
- Bounds of the crop rectangle. All valid crop rectangles fit inside
diff --git a/Documentation/media/uapi/v4l/vidioc-decoder-cmd.rst b/Documentation/media/uapi/v4l/vidioc-decoder-cmd.rst
index 57f0066f4cff..f1a504836f31 100644
--- a/Documentation/media/uapi/v4l/vidioc-decoder-cmd.rst
+++ b/Documentation/media/uapi/v4l/vidioc-decoder-cmd.rst
@@ -208,7 +208,15 @@ introduced in Linux 3.3. They are, however, mandatory for stateful mem2mem decod
been started yet, the driver will return an ``EPERM`` error code. When
the decoder is already running, this command does nothing. No
flags are defined for this command.
-
+ * - ``V4L2_DEC_CMD_FLUSH``
+ - 4
+ - Flush any held capture buffers. Only valid for stateless decoders.
+ This command is typically used when the application reached the
+ end of the stream and the last output buffer had the
+ ``V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF`` flag set. This would prevent
+ dequeueing the capture buffer containing the last decoded frame.
+ So this command can be used to explicitly flush that final decoded
+ frame. This command does nothing if there are no held capture buffers.
Return Value
============
diff --git a/Documentation/media/uapi/v4l/vidioc-g-ext-ctrls.rst b/Documentation/media/uapi/v4l/vidioc-g-ext-ctrls.rst
index 13dc1a986249..271cac18afbb 100644
--- a/Documentation/media/uapi/v4l/vidioc-g-ext-ctrls.rst
+++ b/Documentation/media/uapi/v4l/vidioc-g-ext-ctrls.rst
@@ -199,6 +199,11 @@ still cause this situation.
- A pointer to a matrix control of unsigned 32-bit values. Valid if
this control is of type ``V4L2_CTRL_TYPE_U32``.
* -
+ - :c:type:`v4l2_area` *
+ - ``p_area``
+ - A pointer to a struct :c:type:`v4l2_area`. Valid if this control is
+ of type ``V4L2_CTRL_TYPE_AREA``.
+ * -
- void *
- ``ptr``
- A pointer to a compound type which can be an N-dimensional array
diff --git a/Documentation/media/uapi/v4l/vidioc-g-fbuf.rst b/Documentation/media/uapi/v4l/vidioc-g-fbuf.rst
index 7b6179627803..2d197e6bba8f 100644
--- a/Documentation/media/uapi/v4l/vidioc-g-fbuf.rst
+++ b/Documentation/media/uapi/v4l/vidioc-g-fbuf.rst
@@ -63,7 +63,7 @@ EINVAL error code when overlays are not supported.
To set the parameters for a *Video Output Overlay*, applications must
initialize the ``flags`` field of a struct
-struct :c:type:`v4l2_framebuffer`. Since the framebuffer is
+:c:type:`v4l2_framebuffer`. Since the framebuffer is
implemented on the TV card all other parameters are determined by the
driver. When an application calls :ref:`VIDIOC_S_FBUF <VIDIOC_G_FBUF>` with a pointer to
this structure, the driver prepares for the overlay and returns the
diff --git a/Documentation/media/uapi/v4l/vidioc-queryctrl.rst b/Documentation/media/uapi/v4l/vidioc-queryctrl.rst
index a3d56ffbf4cc..6690928e657b 100644
--- a/Documentation/media/uapi/v4l/vidioc-queryctrl.rst
+++ b/Documentation/media/uapi/v4l/vidioc-queryctrl.rst
@@ -443,6 +443,12 @@ See also the examples in :ref:`control`.
- n/a
- A struct :c:type:`v4l2_ctrl_mpeg2_quantization`, containing MPEG-2
quantization matrices for stateless video decoders.
+ * - ``V4L2_CTRL_TYPE_AREA``
+ - n/a
+ - n/a
+ - n/a
+ - A struct :c:type:`v4l2_area`, containing the width and the height
+ of a rectangular area. Units depend on the use case.
* - ``V4L2_CTRL_TYPE_H264_SPS``
- n/a
- n/a
@@ -473,6 +479,24 @@ See also the examples in :ref:`control`.
- n/a
- A struct :c:type:`v4l2_ctrl_h264_decode_params`, containing H264
decode parameters for stateless video decoders.
+ * - ``V4L2_CTRL_TYPE_HEVC_SPS``
+ - n/a
+ - n/a
+ - n/a
+ - A struct :c:type:`v4l2_ctrl_hevc_sps`, containing HEVC Sequence
+ Parameter Set for stateless video decoders.
+ * - ``V4L2_CTRL_TYPE_HEVC_PPS``
+ - n/a
+ - n/a
+ - n/a
+ - A struct :c:type:`v4l2_ctrl_hevc_pps`, containing HEVC Picture
+ Parameter Set for stateless video decoders.
+ * - ``V4L2_CTRL_TYPE_HEVC_SLICE_PARAMS``
+ - n/a
+ - n/a
+ - n/a
+ - A struct :c:type:`v4l2_ctrl_hevc_slice_params`, containing HEVC
+ slice parameters for stateless video decoders.
.. tabularcolumns:: |p{6.6cm}|p{2.2cm}|p{8.7cm}|
diff --git a/Documentation/media/uapi/v4l/vidioc-reqbufs.rst b/Documentation/media/uapi/v4l/vidioc-reqbufs.rst
index d7faef10e39b..d0c643db477a 100644
--- a/Documentation/media/uapi/v4l/vidioc-reqbufs.rst
+++ b/Documentation/media/uapi/v4l/vidioc-reqbufs.rst
@@ -125,6 +125,7 @@ aborting or finishing any DMA in progress, an implicit
.. _V4L2-BUF-CAP-SUPPORTS-DMABUF:
.. _V4L2-BUF-CAP-SUPPORTS-REQUESTS:
.. _V4L2-BUF-CAP-SUPPORTS-ORPHANED-BUFS:
+.. _V4L2-BUF-CAP-SUPPORTS-M2M-HOLD-CAPTURE-BUF:
.. cssclass:: longtable
@@ -150,6 +151,11 @@ aborting or finishing any DMA in progress, an implicit
- The kernel allows calling :ref:`VIDIOC_REQBUFS` while buffers are still
mapped or exported via DMABUF. These orphaned buffers will be freed
when they are unmapped or when the exported DMABUF fds are closed.
+ * - ``V4L2_BUF_CAP_SUPPORTS_M2M_HOLD_CAPTURE_BUF``
+ - 0x00000020
+ - Only valid for stateless decoders. If set, then userspace can set the
+ ``V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF`` flag to hold off on returning the
+ capture buffer until the OUTPUT timestamp changes.
Return Value
============
diff --git a/Documentation/media/v4l-drivers/imx.rst b/Documentation/media/v4l-drivers/imx.rst
index 1d7eb8c7bd5c..1246573c1019 100644
--- a/Documentation/media/v4l-drivers/imx.rst
+++ b/Documentation/media/v4l-drivers/imx.rst
@@ -515,10 +515,10 @@ Streaming can then begin independently on the capture device nodes
be used to select any supported YUV pixelformat on the capture device
nodes, including planar.
-SabreAuto with ADV7180 decoder
-------------------------------
+i.MX6Q SabreAuto with ADV7180 decoder
+-------------------------------------
-On the SabreAuto, an on-board ADV7180 SD decoder is connected to the
+On the i.MX6Q SabreAuto, an on-board ADV7180 SD decoder is connected to the
parallel bus input on the internal video mux to IPU1 CSI0.
The following example configures a pipeline to capture from the ADV7180
@@ -547,8 +547,6 @@ This example configures a pipeline to capture from the ADV7180
video decoder, assuming PAL 720x576 input signals, with Motion
Compensated de-interlacing. The adv7180 must output sequential or
alternating fields (field type 'seq-tb' for PAL, or 'alternate').
-$outputfmt can be any format supported by the ipu1_ic_prpvf entity
-at its output pad:
.. code-block:: none
@@ -565,11 +563,70 @@ at its output pad:
media-ctl -V "'ipu1_csi0':1 [fmt:AYUV32/720x576]"
media-ctl -V "'ipu1_vdic':2 [fmt:AYUV32/720x576 field:none]"
media-ctl -V "'ipu1_ic_prp':2 [fmt:AYUV32/720x576 field:none]"
- media-ctl -V "'ipu1_ic_prpvf':1 [fmt:$outputfmt field:none]"
+ media-ctl -V "'ipu1_ic_prpvf':1 [fmt:AYUV32/720x576 field:none]"
+ # Configure "ipu1_ic_prpvf capture" interface (assumed at /dev/video2)
+ v4l2-ctl -d2 --set-fmt-video=field=none
+
+Streaming can then begin on /dev/video2. The v4l2-ctl tool can also be
+used to select any supported YUV pixelformat on /dev/video2.
+
+This platform accepts Composite Video analog inputs to the ADV7180 on
+Ain1 (connector J42).
+
+i.MX6DL SabreAuto with ADV7180 decoder
+--------------------------------------
+
+On the i.MX6DL SabreAuto, an on-board ADV7180 SD decoder is connected to the
+parallel bus input on the internal video mux to IPU1 CSI0.
+
+The following example configures a pipeline to capture from the ADV7180
+video decoder, assuming NTSC 720x480 input signals, using simple
+interweave (unconverted and without motion compensation). The adv7180
+must output sequential or alternating fields (field type 'seq-bt' for
+NTSC, or 'alternate'):
+
+.. code-block:: none
+
+ # Setup links
+ media-ctl -l "'adv7180 4-0021':0 -> 'ipu1_csi0_mux':4[1]"
+ media-ctl -l "'ipu1_csi0_mux':5 -> 'ipu1_csi0':0[1]"
+ media-ctl -l "'ipu1_csi0':2 -> 'ipu1_csi0 capture':0[1]"
+ # Configure pads
+ media-ctl -V "'adv7180 4-0021':0 [fmt:UYVY2X8/720x480 field:seq-bt]"
+ media-ctl -V "'ipu1_csi0_mux':5 [fmt:UYVY2X8/720x480]"
+ media-ctl -V "'ipu1_csi0':2 [fmt:AYUV32/720x480]"
+ # Configure "ipu1_csi0 capture" interface (assumed at /dev/video0)
+ v4l2-ctl -d0 --set-fmt-video=field=interlaced_bt
+
+Streaming can then begin on /dev/video0. The v4l2-ctl tool can also be
+used to select any supported YUV pixelformat on /dev/video0.
+
+This example configures a pipeline to capture from the ADV7180
+video decoder, assuming PAL 720x576 input signals, with Motion
+Compensated de-interlacing. The adv7180 must output sequential or
+alternating fields (field type 'seq-tb' for PAL, or 'alternate').
+
+.. code-block:: none
+
+ # Setup links
+ media-ctl -l "'adv7180 4-0021':0 -> 'ipu1_csi0_mux':4[1]"
+ media-ctl -l "'ipu1_csi0_mux':5 -> 'ipu1_csi0':0[1]"
+ media-ctl -l "'ipu1_csi0':1 -> 'ipu1_vdic':0[1]"
+ media-ctl -l "'ipu1_vdic':2 -> 'ipu1_ic_prp':0[1]"
+ media-ctl -l "'ipu1_ic_prp':2 -> 'ipu1_ic_prpvf':0[1]"
+ media-ctl -l "'ipu1_ic_prpvf':1 -> 'ipu1_ic_prpvf capture':0[1]"
+ # Configure pads
+ media-ctl -V "'adv7180 4-0021':0 [fmt:UYVY2X8/720x576 field:seq-tb]"
+ media-ctl -V "'ipu1_csi0_mux':5 [fmt:UYVY2X8/720x576]"
+ media-ctl -V "'ipu1_csi0':1 [fmt:AYUV32/720x576]"
+ media-ctl -V "'ipu1_vdic':2 [fmt:AYUV32/720x576 field:none]"
+ media-ctl -V "'ipu1_ic_prp':2 [fmt:AYUV32/720x576 field:none]"
+ media-ctl -V "'ipu1_ic_prpvf':1 [fmt:AYUV32/720x576 field:none]"
+ # Configure "ipu1_ic_prpvf capture" interface (assumed at /dev/video2)
+ v4l2-ctl -d2 --set-fmt-video=field=none
-Streaming can then begin on the capture device node at
-"ipu1_ic_prpvf capture". The v4l2-ctl tool can be used to select any
-supported YUV or RGB pixelformat on the capture device node.
+Streaming can then begin on /dev/video2. The v4l2-ctl tool can also be
+used to select any supported YUV pixelformat on /dev/video2.
This platform accepts Composite Video analog inputs to the ADV7180 on
Ain1 (connector J42).
diff --git a/Documentation/media/v4l-drivers/ipu3.rst b/Documentation/media/v4l-drivers/ipu3.rst
index c9f780404eee..e4904ab44e60 100644
--- a/Documentation/media/v4l-drivers/ipu3.rst
+++ b/Documentation/media/v4l-drivers/ipu3.rst
@@ -265,19 +265,56 @@ below.
yavta -w "0x009819A1 1" /dev/v4l-subdev7
-RAW Bayer frames go through the following ImgU pipeline HW blocks to have the
+Certain hardware blocks in ImgU pipeline can change the frame resolution by
+cropping or scaling, these hardware blocks include Input Feeder(IF), Bayer Down
+Scaler (BDS) and Geometric Distortion Correction (GDC).
+There is also a block which can change the frame resolution - YUV Scaler, it is
+only applicable to the secondary output.
+
+RAW Bayer frames go through these ImgU pipeline hardware blocks and the final
processed image output to the DDR memory.
-RAW Bayer frame -> Input Feeder -> Bayer Down Scaling (BDS) -> Geometric
-Distortion Correction (GDC) -> DDR
+.. kernel-figure:: ipu3_rcb.svg
+ :alt: ipu3 resolution blocks image
-The ImgU V4L2 subdev has to be configured with the supported resolutions in all
-the above HW blocks, for a given input resolution.
+ IPU3 resolution change hardware blocks
+
+**Input Feeder**
+
+Input Feeder gets the Bayer frame data from the sensor, it can enable cropping
+of lines and columns from the frame and then store pixels into device's internal
+pixel buffer which are ready to readout by following blocks.
+
+**Bayer Down Scaler**
+
+Bayer Down Scaler is capable of performing image scaling in Bayer domain, the
+downscale factor can be configured from 1X to 1/4X in each axis with
+configuration steps of 0.03125 (1/32).
+**Geometric Distortion Correction**
+
+Geometric Distortion Correction is used to performe correction of distortions
+and image filtering. It needs some extra filter and envelop padding pixels to
+work, so the input resolution of GDC should be larger than the output
+resolution.
+
+**YUV Scaler**
+
+YUV Scaler which similar with BDS, but it is mainly do image down scaling in
+YUV domain, it can support up to 1/12X down scaling, but it can not be applied
+to the main output.
+
+The ImgU V4L2 subdev has to be configured with the supported resolutions in all
+the above hardware blocks, for a given input resolution.
For a given supported resolution for an input frame, the Input Feeder, Bayer
-Down Scaling and GDC blocks should be configured with the supported resolutions.
-This information can be obtained by looking at the following IPU3 ImgU
-configuration table.
+Down Scaler and GDC blocks should be configured with the supported resolutions
+as each hardware block has its own alignment requirement.
+
+You must configure the output resolution of the hardware blocks smartly to meet
+the hardware requirement along with keeping the maximum field of view.
+The intermediate resolutions can be generated by specific tool and this
+information can be obtained by looking at the following IPU3 ImgU configuration
+table.
https://chromium.googlesource.com/chromiumos/overlays/board-overlays/+/master
diff --git a/Documentation/media/v4l-drivers/ipu3_rcb.svg b/Documentation/media/v4l-drivers/ipu3_rcb.svg
new file mode 100644
index 000000000000..d878421b42a0
--- /dev/null
+++ b/Documentation/media/v4l-drivers/ipu3_rcb.svg
@@ -0,0 +1,331 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" width="774pt" height="152pt" viewBox="0 0 774 152" version="1.1">
+<defs>
+<g>
+<symbol overflow="visible" id="glyph0-0">
+<path style="stroke:none;" d="M 1 0 L 1 -15 L 9 -15 L 9 0 Z M 8 -1 L 8 -14 L 2 -14 L 2 -1 Z M 8 -1 "/>
+</symbol>
+<symbol overflow="visible" id="glyph0-1">
+<path style="stroke:none;" d="M 4.6875 -1.15625 C 5.519531 -1.15625 6.15625 -1.316406 6.59375 -1.640625 C 7.039062 -1.960938 7.265625 -2.441406 7.265625 -3.078125 C 7.265625 -3.460938 7.179688 -3.789062 7.015625 -4.0625 C 6.859375 -4.34375 6.644531 -4.582031 6.375 -4.78125 C 6.113281 -4.988281 5.816406 -5.171875 5.484375 -5.328125 C 5.148438 -5.484375 4.804688 -5.628906 4.453125 -5.765625 C 4.054688 -5.921875 3.675781 -6.097656 3.3125 -6.296875 C 2.945312 -6.492188 2.617188 -6.726562 2.328125 -7 C 2.046875 -7.269531 1.820312 -7.582031 1.65625 -7.9375 C 1.488281 -8.300781 1.40625 -8.726562 1.40625 -9.21875 C 1.40625 -10.300781 1.742188 -11.144531 2.421875 -11.75 C 3.097656 -12.351562 4.046875 -12.65625 5.265625 -12.65625 C 5.597656 -12.65625 5.925781 -12.628906 6.25 -12.578125 C 6.570312 -12.535156 6.875 -12.476562 7.15625 -12.40625 C 7.4375 -12.34375 7.6875 -12.265625 7.90625 -12.171875 C 8.125 -12.085938 8.300781 -12 8.4375 -11.90625 L 7.921875 -10.515625 C 7.648438 -10.679688 7.28125 -10.84375 6.8125 -11 C 6.351562 -11.15625 5.835938 -11.234375 5.265625 -11.234375 C 4.660156 -11.234375 4.140625 -11.082031 3.703125 -10.78125 C 3.265625 -10.488281 3.046875 -10.039062 3.046875 -9.4375 C 3.046875 -9.09375 3.109375 -8.800781 3.234375 -8.5625 C 3.359375 -8.320312 3.53125 -8.109375 3.75 -7.921875 C 3.96875 -7.742188 4.222656 -7.582031 4.515625 -7.4375 C 4.804688 -7.289062 5.128906 -7.144531 5.484375 -7 C 5.984375 -6.789062 6.441406 -6.578125 6.859375 -6.359375 C 7.285156 -6.148438 7.648438 -5.894531 7.953125 -5.59375 C 8.253906 -5.300781 8.488281 -4.953125 8.65625 -4.546875 C 8.820312 -4.148438 8.90625 -3.664062 8.90625 -3.09375 C 8.90625 -2.019531 8.539062 -1.191406 7.8125 -0.609375 C 7.082031 -0.0234375 6.039062 0.265625 4.6875 0.265625 C 4.238281 0.265625 3.820312 0.234375 3.4375 0.171875 C 3.050781 0.109375 2.707031 0.03125 2.40625 -0.0625 C 2.101562 -0.15625 1.835938 -0.25 1.609375 -0.34375 C 1.390625 -0.4375 1.21875 -0.519531 1.09375 -0.59375 L 1.59375 -1.953125 C 1.863281 -1.804688 2.257812 -1.632812 2.78125 -1.4375 C 3.300781 -1.25 3.9375 -1.15625 4.6875 -1.15625 Z M 4.6875 -1.15625 "/>
+</symbol>
+<symbol overflow="visible" id="glyph0-2">
+<path style="stroke:none;" d="M 5.1875 -9.5 C 6.4375 -9.5 7.398438 -9.109375 8.078125 -8.328125 C 8.753906 -7.546875 9.09375 -6.363281 9.09375 -4.78125 L 9.09375 -4.203125 L 2.453125 -4.203125 C 2.523438 -3.242188 2.84375 -2.515625 3.40625 -2.015625 C 3.976562 -1.515625 4.773438 -1.265625 5.796875 -1.265625 C 6.390625 -1.265625 6.890625 -1.3125 7.296875 -1.40625 C 7.710938 -1.5 8.023438 -1.597656 8.234375 -1.703125 L 8.453125 -0.296875 C 8.253906 -0.191406 7.894531 -0.0820312 7.375 0.03125 C 6.851562 0.15625 6.269531 0.21875 5.625 0.21875 C 4.820312 0.21875 4.113281 0.0976562 3.5 -0.140625 C 2.894531 -0.390625 2.394531 -0.726562 2 -1.15625 C 1.601562 -1.582031 1.300781 -2.09375 1.09375 -2.6875 C 0.894531 -3.28125 0.796875 -3.925781 0.796875 -4.625 C 0.796875 -5.445312 0.921875 -6.164062 1.171875 -6.78125 C 1.429688 -7.394531 1.765625 -7.898438 2.171875 -8.296875 C 2.585938 -8.703125 3.054688 -9.003906 3.578125 -9.203125 C 4.097656 -9.398438 4.632812 -9.5 5.1875 -9.5 Z M 7.421875 -5.546875 C 7.421875 -6.328125 7.210938 -6.945312 6.796875 -7.40625 C 6.390625 -7.863281 5.84375 -8.09375 5.15625 -8.09375 C 4.769531 -8.09375 4.421875 -8.019531 4.109375 -7.875 C 3.796875 -7.726562 3.523438 -7.535156 3.296875 -7.296875 C 3.066406 -7.054688 2.882812 -6.78125 2.75 -6.46875 C 2.625 -6.164062 2.539062 -5.859375 2.5 -5.546875 Z M 7.421875 -5.546875 "/>
+</symbol>
+<symbol overflow="visible" id="glyph0-3">
+<path style="stroke:none;" d="M 1.421875 -9.015625 C 2.015625 -9.160156 2.609375 -9.273438 3.203125 -9.359375 C 3.796875 -9.441406 4.351562 -9.484375 4.875 -9.484375 C 6.113281 -9.484375 7.050781 -9.160156 7.6875 -8.515625 C 8.320312 -7.878906 8.640625 -6.851562 8.640625 -5.4375 L 8.640625 0 L 7 0 L 7 -5.140625 C 7 -5.742188 6.945312 -6.226562 6.84375 -6.59375 C 6.738281 -6.96875 6.585938 -7.257812 6.390625 -7.46875 C 6.191406 -7.675781 5.957031 -7.816406 5.6875 -7.890625 C 5.414062 -7.972656 5.117188 -8.015625 4.796875 -8.015625 C 4.535156 -8.015625 4.253906 -8 3.953125 -7.96875 C 3.648438 -7.9375 3.359375 -7.894531 3.078125 -7.84375 L 3.078125 0 L 1.421875 0 Z M 1.421875 -9.015625 "/>
+</symbol>
+<symbol overflow="visible" id="glyph0-4">
+<path style="stroke:none;" d="M 7.015625 -2.3125 C 7.015625 -2.644531 6.878906 -2.914062 6.609375 -3.125 C 6.335938 -3.34375 6 -3.53125 5.59375 -3.6875 C 5.1875 -3.851562 4.742188 -4.015625 4.265625 -4.171875 C 3.785156 -4.328125 3.335938 -4.515625 2.921875 -4.734375 C 2.515625 -4.960938 2.175781 -5.242188 1.90625 -5.578125 C 1.632812 -5.910156 1.5 -6.34375 1.5 -6.875 C 1.5 -7.625 1.800781 -8.25 2.40625 -8.75 C 3.007812 -9.25 3.960938 -9.5 5.265625 -9.5 C 5.765625 -9.5 6.285156 -9.460938 6.828125 -9.390625 C 7.367188 -9.316406 7.832031 -9.21875 8.21875 -9.09375 L 7.921875 -7.625 C 7.816406 -7.675781 7.671875 -7.726562 7.484375 -7.78125 C 7.296875 -7.84375 7.082031 -7.894531 6.84375 -7.9375 C 6.601562 -7.988281 6.34375 -8.023438 6.0625 -8.046875 C 5.789062 -8.078125 5.53125 -8.09375 5.28125 -8.09375 C 3.84375 -8.09375 3.125 -7.703125 3.125 -6.921875 C 3.125 -6.640625 3.257812 -6.398438 3.53125 -6.203125 C 3.800781 -6.015625 4.144531 -5.835938 4.5625 -5.671875 C 4.976562 -5.515625 5.425781 -5.351562 5.90625 -5.1875 C 6.382812 -5.019531 6.828125 -4.816406 7.234375 -4.578125 C 7.648438 -4.335938 7.992188 -4.046875 8.265625 -3.703125 C 8.546875 -3.367188 8.6875 -2.941406 8.6875 -2.421875 C 8.6875 -1.578125 8.359375 -0.925781 7.703125 -0.46875 C 7.046875 -0.0078125 6.007812 0.21875 4.59375 0.21875 C 3.957031 0.21875 3.375 0.164062 2.84375 0.0625 C 2.3125 -0.0390625 1.800781 -0.203125 1.3125 -0.421875 L 1.640625 -1.921875 C 2.109375 -1.703125 2.597656 -1.523438 3.109375 -1.390625 C 3.617188 -1.253906 4.171875 -1.1875 4.765625 -1.1875 C 6.265625 -1.1875 7.015625 -1.5625 7.015625 -2.3125 Z M 7.015625 -2.3125 "/>
+</symbol>
+<symbol overflow="visible" id="glyph0-5">
+<path style="stroke:none;" d="M 9.203125 -4.640625 C 9.203125 -3.910156 9.097656 -3.25 8.890625 -2.65625 C 8.679688 -2.0625 8.390625 -1.550781 8.015625 -1.125 C 7.640625 -0.695312 7.191406 -0.363281 6.671875 -0.125 C 6.160156 0.101562 5.597656 0.21875 4.984375 0.21875 C 4.378906 0.21875 3.820312 0.101562 3.3125 -0.125 C 2.800781 -0.363281 2.359375 -0.695312 1.984375 -1.125 C 1.609375 -1.550781 1.316406 -2.0625 1.109375 -2.65625 C 0.898438 -3.25 0.796875 -3.910156 0.796875 -4.640625 C 0.796875 -5.367188 0.898438 -6.035156 1.109375 -6.640625 C 1.316406 -7.242188 1.609375 -7.753906 1.984375 -8.171875 C 2.359375 -8.585938 2.800781 -8.910156 3.3125 -9.140625 C 3.820312 -9.378906 4.378906 -9.5 4.984375 -9.5 C 5.597656 -9.5 6.160156 -9.378906 6.671875 -9.140625 C 7.191406 -8.910156 7.640625 -8.585938 8.015625 -8.171875 C 8.390625 -7.753906 8.679688 -7.242188 8.890625 -6.640625 C 9.097656 -6.035156 9.203125 -5.367188 9.203125 -4.640625 Z M 7.5 -4.640625 C 7.5 -5.691406 7.269531 -6.519531 6.8125 -7.125 C 6.363281 -7.738281 5.753906 -8.046875 4.984375 -8.046875 C 4.222656 -8.046875 3.617188 -7.738281 3.171875 -7.125 C 2.722656 -6.519531 2.5 -5.691406 2.5 -4.640625 C 2.5 -3.597656 2.722656 -2.773438 3.171875 -2.171875 C 3.617188 -1.566406 4.222656 -1.265625 4.984375 -1.265625 C 5.753906 -1.265625 6.363281 -1.566406 6.8125 -2.171875 C 7.269531 -2.773438 7.5 -3.597656 7.5 -4.640625 Z M 7.5 -4.640625 "/>
+</symbol>
+<symbol overflow="visible" id="glyph0-6">
+<path style="stroke:none;" d="M 2.140625 0 L 2.140625 -8.78125 C 3.503906 -9.25 4.878906 -9.484375 6.265625 -9.484375 C 6.691406 -9.484375 7.097656 -9.460938 7.484375 -9.421875 C 7.867188 -9.390625 8.296875 -9.320312 8.765625 -9.21875 L 8.453125 -7.765625 C 8.023438 -7.878906 7.648438 -7.953125 7.328125 -7.984375 C 7.003906 -8.023438 6.648438 -8.046875 6.265625 -8.046875 C 5.453125 -8.046875 4.625 -7.929688 3.78125 -7.703125 L 3.78125 0 Z M 2.140625 0 "/>
+</symbol>
+<symbol overflow="visible" id="glyph0-7">
+<path style="stroke:none;" d="M 5.8125 -10.984375 L 5.8125 -1.40625 L 8.21875 -1.40625 L 8.21875 0 L 1.78125 0 L 1.78125 -1.40625 L 4.1875 -1.40625 L 4.1875 -10.984375 L 1.78125 -10.984375 L 1.78125 -12.375 L 8.21875 -12.375 L 8.21875 -10.984375 Z M 5.8125 -10.984375 "/>
+</symbol>
+<symbol overflow="visible" id="glyph0-8">
+<path style="stroke:none;" d="M 1.8125 0 L 1.8125 -12.375 L 8.84375 -12.375 L 8.84375 -10.984375 L 3.453125 -10.984375 L 3.453125 -7.125 L 8.203125 -7.125 L 8.203125 -5.734375 L 3.453125 -5.734375 L 3.453125 0 Z M 1.8125 0 "/>
+</symbol>
+<symbol overflow="visible" id="glyph0-9">
+<path style="stroke:none;" d="M 4.078125 0.09375 C 3.878906 0.09375 3.644531 0.0859375 3.375 0.078125 C 3.113281 0.0664062 2.847656 0.0507812 2.578125 0.03125 C 2.316406 0.0078125 2.050781 -0.0195312 1.78125 -0.0625 C 1.507812 -0.101562 1.273438 -0.148438 1.078125 -0.203125 L 1.078125 -12.203125 C 1.273438 -12.253906 1.503906 -12.300781 1.765625 -12.34375 C 2.023438 -12.382812 2.289062 -12.410156 2.5625 -12.421875 C 2.84375 -12.441406 3.113281 -12.457031 3.375 -12.46875 C 3.632812 -12.488281 3.867188 -12.5 4.078125 -12.5 C 4.691406 -12.5 5.265625 -12.445312 5.796875 -12.34375 C 6.328125 -12.238281 6.789062 -12.054688 7.1875 -11.796875 C 7.582031 -11.546875 7.890625 -11.210938 8.109375 -10.796875 C 8.328125 -10.390625 8.4375 -9.878906 8.4375 -9.265625 C 8.4375 -8.960938 8.390625 -8.675781 8.296875 -8.40625 C 8.203125 -8.132812 8.070312 -7.878906 7.90625 -7.640625 C 7.738281 -7.398438 7.546875 -7.1875 7.328125 -7 C 7.109375 -6.820312 6.875 -6.6875 6.625 -6.59375 C 7.300781 -6.40625 7.867188 -6.0625 8.328125 -5.5625 C 8.785156 -5.0625 9.015625 -4.414062 9.015625 -3.625 C 9.015625 -2.394531 8.617188 -1.46875 7.828125 -0.84375 C 7.046875 -0.21875 5.796875 0.09375 4.078125 0.09375 Z M 2.71875 -5.78125 L 2.71875 -1.359375 C 2.75 -1.347656 2.898438 -1.332031 3.171875 -1.3125 C 3.441406 -1.289062 3.785156 -1.28125 4.203125 -1.28125 C 4.609375 -1.28125 5 -1.3125 5.375 -1.375 C 5.757812 -1.445312 6.097656 -1.570312 6.390625 -1.75 C 6.691406 -1.925781 6.929688 -2.160156 7.109375 -2.453125 C 7.285156 -2.753906 7.375 -3.132812 7.375 -3.59375 C 7.375 -4.007812 7.289062 -4.359375 7.125 -4.640625 C 6.957031 -4.921875 6.738281 -5.144531 6.46875 -5.3125 C 6.195312 -5.476562 5.878906 -5.597656 5.515625 -5.671875 C 5.160156 -5.742188 4.789062 -5.78125 4.40625 -5.78125 Z M 2.71875 -7.140625 L 4.015625 -7.140625 C 4.347656 -7.140625 4.679688 -7.171875 5.015625 -7.234375 C 5.347656 -7.304688 5.644531 -7.414062 5.90625 -7.5625 C 6.175781 -7.707031 6.390625 -7.90625 6.546875 -8.15625 C 6.710938 -8.414062 6.796875 -8.738281 6.796875 -9.125 C 6.796875 -9.476562 6.722656 -9.78125 6.578125 -10.03125 C 6.429688 -10.289062 6.238281 -10.5 6 -10.65625 C 5.757812 -10.820312 5.484375 -10.9375 5.171875 -11 C 4.859375 -11.0625 4.53125 -11.09375 4.1875 -11.09375 C 3.832031 -11.09375 3.523438 -11.085938 3.265625 -11.078125 C 3.003906 -11.078125 2.820312 -11.066406 2.71875 -11.046875 Z M 2.71875 -7.140625 "/>
+</symbol>
+<symbol overflow="visible" id="glyph0-10">
+<path style="stroke:none;" d="M 9.203125 -6.203125 C 9.203125 -5.054688 9.054688 -4.082031 8.765625 -3.28125 C 8.484375 -2.476562 8.09375 -1.828125 7.59375 -1.328125 C 7.09375 -0.828125 6.5 -0.460938 5.8125 -0.234375 C 5.125 -0.015625 4.378906 0.09375 3.578125 0.09375 C 2.753906 0.09375 1.921875 -0.00390625 1.078125 -0.203125 L 1.078125 -12.203125 C 1.921875 -12.398438 2.753906 -12.5 3.578125 -12.5 C 4.378906 -12.5 5.125 -12.382812 5.8125 -12.15625 C 6.5 -11.925781 7.09375 -11.554688 7.59375 -11.046875 C 8.09375 -10.546875 8.484375 -9.894531 8.765625 -9.09375 C 9.054688 -8.300781 9.203125 -7.335938 9.203125 -6.203125 Z M 2.71875 -1.375 C 3.050781 -1.332031 3.390625 -1.3125 3.734375 -1.3125 C 4.335938 -1.3125 4.875 -1.398438 5.34375 -1.578125 C 5.8125 -1.765625 6.203125 -2.054688 6.515625 -2.453125 C 6.835938 -2.847656 7.082031 -3.351562 7.25 -3.96875 C 7.425781 -4.59375 7.515625 -5.335938 7.515625 -6.203125 C 7.515625 -7.878906 7.191406 -9.109375 6.546875 -9.890625 C 5.898438 -10.679688 4.945312 -11.078125 3.6875 -11.078125 C 3.507812 -11.078125 3.335938 -11.070312 3.171875 -11.0625 C 3.003906 -11.0625 2.851562 -11.046875 2.71875 -11.015625 Z M 2.71875 -1.375 "/>
+</symbol>
+<symbol overflow="visible" id="glyph0-11">
+<path style="stroke:none;" d="M 7.453125 -6.09375 L 9.09375 -6.09375 L 9.09375 -0.296875 C 8.84375 -0.203125 8.4375 -0.0859375 7.875 0.046875 C 7.320312 0.191406 6.664062 0.265625 5.90625 0.265625 C 5.15625 0.265625 4.472656 0.125 3.859375 -0.15625 C 3.242188 -0.445312 2.71875 -0.863281 2.28125 -1.40625 C 1.851562 -1.957031 1.519531 -2.632812 1.28125 -3.4375 C 1.039062 -4.25 0.921875 -5.171875 0.921875 -6.203125 C 0.921875 -7.242188 1.050781 -8.160156 1.3125 -8.953125 C 1.582031 -9.753906 1.945312 -10.425781 2.40625 -10.96875 C 2.863281 -11.519531 3.398438 -11.9375 4.015625 -12.21875 C 4.628906 -12.507812 5.289062 -12.65625 6 -12.65625 C 6.457031 -12.65625 6.859375 -12.617188 7.203125 -12.546875 C 7.546875 -12.484375 7.835938 -12.40625 8.078125 -12.3125 C 8.328125 -12.226562 8.53125 -12.132812 8.6875 -12.03125 C 8.851562 -11.925781 8.976562 -11.847656 9.0625 -11.796875 L 8.515625 -10.421875 C 8.210938 -10.660156 7.847656 -10.851562 7.421875 -11 C 7.003906 -11.15625 6.5625 -11.234375 6.09375 -11.234375 C 5.59375 -11.234375 5.125 -11.113281 4.6875 -10.875 C 4.257812 -10.632812 3.890625 -10.296875 3.578125 -9.859375 C 3.273438 -9.421875 3.035156 -8.890625 2.859375 -8.265625 C 2.679688 -7.648438 2.59375 -6.960938 2.59375 -6.203125 C 2.59375 -5.453125 2.671875 -4.769531 2.828125 -4.15625 C 2.984375 -3.539062 3.207031 -3.015625 3.5 -2.578125 C 3.789062 -2.140625 4.148438 -1.796875 4.578125 -1.546875 C 5.015625 -1.304688 5.515625 -1.1875 6.078125 -1.1875 C 6.460938 -1.1875 6.757812 -1.210938 6.96875 -1.265625 C 7.1875 -1.316406 7.347656 -1.367188 7.453125 -1.421875 Z M 7.453125 -6.09375 "/>
+</symbol>
+<symbol overflow="visible" id="glyph0-12">
+<path style="stroke:none;" d="M 9.203125 -0.515625 C 8.734375 -0.253906 8.234375 -0.0625 7.703125 0.0625 C 7.179688 0.195312 6.617188 0.265625 6.015625 0.265625 C 5.285156 0.265625 4.609375 0.132812 3.984375 -0.125 C 3.367188 -0.382812 2.832031 -0.773438 2.375 -1.296875 C 1.925781 -1.828125 1.570312 -2.5 1.3125 -3.3125 C 1.050781 -4.132812 0.921875 -5.097656 0.921875 -6.203125 C 0.921875 -7.253906 1.054688 -8.179688 1.328125 -8.984375 C 1.597656 -9.785156 1.96875 -10.457031 2.4375 -11 C 2.90625 -11.539062 3.453125 -11.953125 4.078125 -12.234375 C 4.703125 -12.515625 5.367188 -12.65625 6.078125 -12.65625 C 6.566406 -12.65625 7.066406 -12.585938 7.578125 -12.453125 C 8.097656 -12.328125 8.601562 -12.109375 9.09375 -11.796875 L 8.625 -10.4375 C 7.738281 -10.945312 6.910156 -11.203125 6.140625 -11.203125 C 5.585938 -11.203125 5.09375 -11.082031 4.65625 -10.84375 C 4.226562 -10.613281 3.859375 -10.28125 3.546875 -9.84375 C 3.242188 -9.40625 3.007812 -8.878906 2.84375 -8.265625 C 2.675781 -7.648438 2.59375 -6.960938 2.59375 -6.203125 C 2.59375 -5.347656 2.679688 -4.609375 2.859375 -3.984375 C 3.046875 -3.359375 3.296875 -2.835938 3.609375 -2.421875 C 3.929688 -2.003906 4.316406 -1.695312 4.765625 -1.5 C 5.210938 -1.300781 5.695312 -1.203125 6.21875 -1.203125 C 6.601562 -1.203125 7.007812 -1.25 7.4375 -1.34375 C 7.863281 -1.445312 8.304688 -1.625 8.765625 -1.875 Z M 9.203125 -0.515625 "/>
+</symbol>
+<symbol overflow="visible" id="glyph1-0">
+<path style="stroke:none;" d="M 0.59375 0 L 0.59375 -9 L 5.40625 -9 L 5.40625 0 Z M 4.796875 -0.59375 L 4.796875 -8.40625 L 1.203125 -8.40625 L 1.203125 -0.59375 Z M 4.796875 -0.59375 "/>
+</symbol>
+<symbol overflow="visible" id="glyph1-1">
+<path style="stroke:none;" d="M 2.515625 0 L 2.515625 -2.765625 C 2.023438 -3.554688 1.582031 -4.332031 1.1875 -5.09375 C 0.789062 -5.851562 0.445312 -6.628906 0.15625 -7.421875 L 1.265625 -7.421875 C 1.492188 -6.753906 1.757812 -6.113281 2.0625 -5.5 C 2.363281 -4.882812 2.6875 -4.253906 3.03125 -3.609375 C 3.394531 -4.285156 3.71875 -4.929688 4 -5.546875 C 4.28125 -6.160156 4.539062 -6.785156 4.78125 -7.421875 L 5.859375 -7.421875 C 5.554688 -6.640625 5.207031 -5.875 4.8125 -5.125 C 4.414062 -4.382812 3.976562 -3.601562 3.5 -2.78125 L 3.5 0 Z M 2.515625 0 "/>
+</symbol>
+<symbol overflow="visible" id="glyph1-2">
+<path style="stroke:none;" d="M 3 0.15625 C 2.5625 0.15625 2.1875 0.09375 1.875 -0.03125 C 1.570312 -0.164062 1.320312 -0.347656 1.125 -0.578125 C 0.9375 -0.804688 0.796875 -1.085938 0.703125 -1.421875 C 0.617188 -1.765625 0.578125 -2.144531 0.578125 -2.5625 L 0.578125 -7.421875 L 1.5625 -7.421875 L 1.5625 -2.65625 C 1.5625 -2.28125 1.59375 -1.96875 1.65625 -1.71875 C 1.726562 -1.46875 1.828125 -1.265625 1.953125 -1.109375 C 2.078125 -0.960938 2.222656 -0.859375 2.390625 -0.796875 C 2.566406 -0.734375 2.769531 -0.703125 3 -0.703125 C 3.226562 -0.703125 3.425781 -0.734375 3.59375 -0.796875 C 3.769531 -0.859375 3.921875 -0.960938 4.046875 -1.109375 C 4.171875 -1.265625 4.265625 -1.46875 4.328125 -1.71875 C 4.398438 -1.96875 4.4375 -2.28125 4.4375 -2.65625 L 4.4375 -7.421875 L 5.421875 -7.421875 L 5.421875 -2.5625 C 5.421875 -2.144531 5.375 -1.765625 5.28125 -1.421875 C 5.195312 -1.085938 5.054688 -0.804688 4.859375 -0.578125 C 4.671875 -0.347656 4.421875 -0.164062 4.109375 -0.03125 C 3.804688 0.09375 3.4375 0.15625 3 0.15625 Z M 3 0.15625 "/>
+</symbol>
+<symbol overflow="visible" id="glyph1-3">
+<path style="stroke:none;" d="M 1.21875 -7.421875 C 1.320312 -6.921875 1.445312 -6.375 1.59375 -5.78125 C 1.738281 -5.1875 1.890625 -4.585938 2.046875 -3.984375 C 2.210938 -3.390625 2.378906 -2.820312 2.546875 -2.28125 C 2.722656 -1.738281 2.882812 -1.265625 3.03125 -0.859375 C 3.15625 -1.265625 3.300781 -1.742188 3.46875 -2.296875 C 3.644531 -2.847656 3.816406 -3.421875 3.984375 -4.015625 C 4.148438 -4.609375 4.304688 -5.203125 4.453125 -5.796875 C 4.609375 -6.390625 4.734375 -6.929688 4.828125 -7.421875 L 5.859375 -7.421875 C 5.796875 -7.109375 5.691406 -6.679688 5.546875 -6.140625 C 5.398438 -5.597656 5.226562 -4.992188 5.03125 -4.328125 C 4.832031 -3.660156 4.609375 -2.953125 4.359375 -2.203125 C 4.117188 -1.453125 3.863281 -0.71875 3.59375 0 L 2.375 0 C 2.125 -0.71875 1.878906 -1.445312 1.640625 -2.1875 C 1.410156 -2.9375 1.195312 -3.644531 1 -4.3125 C 0.800781 -4.976562 0.628906 -5.582031 0.484375 -6.125 C 0.335938 -6.675781 0.226562 -7.109375 0.15625 -7.421875 Z M 1.21875 -7.421875 "/>
+</symbol>
+<symbol overflow="visible" id="glyph1-4">
+<path style="stroke:none;" d=""/>
+</symbol>
+<symbol overflow="visible" id="glyph1-5">
+<path style="stroke:none;" d="M 5.515625 -3.71875 C 5.515625 -3.03125 5.425781 -2.445312 5.25 -1.96875 C 5.082031 -1.488281 4.847656 -1.097656 4.546875 -0.796875 C 4.253906 -0.492188 3.898438 -0.273438 3.484375 -0.140625 C 3.078125 -0.00390625 2.628906 0.0625 2.140625 0.0625 C 1.648438 0.0625 1.148438 0 0.640625 -0.125 L 0.640625 -7.3125 C 1.148438 -7.4375 1.648438 -7.5 2.140625 -7.5 C 2.628906 -7.5 3.078125 -7.429688 3.484375 -7.296875 C 3.898438 -7.160156 4.253906 -6.941406 4.546875 -6.640625 C 4.847656 -6.335938 5.082031 -5.941406 5.25 -5.453125 C 5.425781 -4.972656 5.515625 -4.394531 5.515625 -3.71875 Z M 1.625 -0.828125 C 1.832031 -0.804688 2.039062 -0.796875 2.25 -0.796875 C 2.601562 -0.796875 2.921875 -0.847656 3.203125 -0.953125 C 3.484375 -1.054688 3.71875 -1.226562 3.90625 -1.46875 C 4.101562 -1.707031 4.253906 -2.007812 4.359375 -2.375 C 4.460938 -2.75 4.515625 -3.195312 4.515625 -3.71875 C 4.515625 -4.726562 4.316406 -5.46875 3.921875 -5.9375 C 3.535156 -6.40625 2.960938 -6.640625 2.203125 -6.640625 C 2.097656 -6.640625 1.992188 -6.640625 1.890625 -6.640625 C 1.796875 -6.640625 1.707031 -6.628906 1.625 -6.609375 Z M 1.625 -0.828125 "/>
+</symbol>
+<symbol overflow="visible" id="glyph1-6">
+<path style="stroke:none;" d="M 5.515625 -2.78125 C 5.515625 -2.34375 5.453125 -1.945312 5.328125 -1.59375 C 5.203125 -1.238281 5.023438 -0.929688 4.796875 -0.671875 C 4.578125 -0.410156 4.3125 -0.210938 4 -0.078125 C 3.695312 0.0546875 3.359375 0.125 2.984375 0.125 C 2.628906 0.125 2.296875 0.0546875 1.984375 -0.078125 C 1.679688 -0.210938 1.414062 -0.410156 1.1875 -0.671875 C 0.96875 -0.929688 0.796875 -1.238281 0.671875 -1.59375 C 0.546875 -1.945312 0.484375 -2.34375 0.484375 -2.78125 C 0.484375 -3.21875 0.546875 -3.617188 0.671875 -3.984375 C 0.796875 -4.347656 0.96875 -4.65625 1.1875 -4.90625 C 1.414062 -5.15625 1.679688 -5.347656 1.984375 -5.484375 C 2.296875 -5.628906 2.628906 -5.703125 2.984375 -5.703125 C 3.359375 -5.703125 3.695312 -5.628906 4 -5.484375 C 4.3125 -5.347656 4.578125 -5.15625 4.796875 -4.90625 C 5.023438 -4.65625 5.203125 -4.347656 5.328125 -3.984375 C 5.453125 -3.617188 5.515625 -3.21875 5.515625 -2.78125 Z M 4.5 -2.78125 C 4.5 -3.414062 4.363281 -3.914062 4.09375 -4.28125 C 3.820312 -4.644531 3.453125 -4.828125 2.984375 -4.828125 C 2.523438 -4.828125 2.160156 -4.644531 1.890625 -4.28125 C 1.628906 -3.914062 1.5 -3.414062 1.5 -2.78125 C 1.5 -2.15625 1.628906 -1.660156 1.890625 -1.296875 C 2.160156 -0.929688 2.523438 -0.75 2.984375 -0.75 C 3.453125 -0.75 3.820312 -0.929688 4.09375 -1.296875 C 4.363281 -1.660156 4.5 -2.15625 4.5 -2.78125 Z M 4.5 -2.78125 "/>
+</symbol>
+<symbol overflow="visible" id="glyph1-7">
+<path style="stroke:none;" d="M 4.109375 0 C 3.992188 -0.269531 3.890625 -0.515625 3.796875 -0.734375 C 3.710938 -0.960938 3.628906 -1.1875 3.546875 -1.40625 C 3.460938 -1.632812 3.378906 -1.867188 3.296875 -2.109375 C 3.210938 -2.359375 3.113281 -2.640625 3 -2.953125 C 2.882812 -2.640625 2.78125 -2.359375 2.6875 -2.109375 C 2.601562 -1.867188 2.519531 -1.632812 2.4375 -1.40625 C 2.351562 -1.1875 2.265625 -0.960938 2.171875 -0.734375 C 2.085938 -0.515625 1.984375 -0.269531 1.859375 0 L 1.109375 0 C 0.890625 -0.976562 0.707031 -1.953125 0.5625 -2.921875 C 0.414062 -3.890625 0.304688 -4.769531 0.234375 -5.5625 L 1.15625 -5.5625 C 1.1875 -5.25 1.210938 -4.941406 1.234375 -4.640625 C 1.265625 -4.347656 1.300781 -4.035156 1.34375 -3.703125 C 1.382812 -3.378906 1.429688 -3.023438 1.484375 -2.640625 C 1.535156 -2.253906 1.59375 -1.820312 1.65625 -1.34375 C 1.78125 -1.664062 1.882812 -1.945312 1.96875 -2.1875 C 2.0625 -2.425781 2.144531 -2.648438 2.21875 -2.859375 C 2.289062 -3.078125 2.359375 -3.296875 2.421875 -3.515625 C 2.492188 -3.742188 2.570312 -4 2.65625 -4.28125 L 3.390625 -4.28125 C 3.472656 -4 3.546875 -3.742188 3.609375 -3.515625 C 3.671875 -3.296875 3.738281 -3.078125 3.8125 -2.859375 C 3.882812 -2.648438 3.957031 -2.425781 4.03125 -2.1875 C 4.113281 -1.945312 4.21875 -1.671875 4.34375 -1.359375 C 4.414062 -1.796875 4.476562 -2.203125 4.53125 -2.578125 C 4.59375 -2.953125 4.640625 -3.304688 4.671875 -3.640625 C 4.710938 -3.972656 4.75 -4.296875 4.78125 -4.609375 C 4.820312 -4.921875 4.851562 -5.238281 4.875 -5.5625 L 5.765625 -5.5625 C 5.734375 -5.164062 5.6875 -4.738281 5.625 -4.28125 C 5.570312 -3.820312 5.503906 -3.351562 5.421875 -2.875 C 5.335938 -2.394531 5.25 -1.910156 5.15625 -1.421875 C 5.0625 -0.929688 4.960938 -0.457031 4.859375 0 Z M 4.109375 0 "/>
+</symbol>
+<symbol overflow="visible" id="glyph1-8">
+<path style="stroke:none;" d="M 0.859375 -5.40625 C 1.210938 -5.5 1.566406 -5.566406 1.921875 -5.609375 C 2.273438 -5.660156 2.609375 -5.6875 2.921875 -5.6875 C 3.671875 -5.6875 4.234375 -5.492188 4.609375 -5.109375 C 4.992188 -4.722656 5.1875 -4.109375 5.1875 -3.265625 L 5.1875 0 L 4.203125 0 L 4.203125 -3.078125 C 4.203125 -3.441406 4.171875 -3.734375 4.109375 -3.953125 C 4.046875 -4.179688 3.953125 -4.359375 3.828125 -4.484375 C 3.710938 -4.609375 3.570312 -4.691406 3.40625 -4.734375 C 3.25 -4.785156 3.070312 -4.8125 2.875 -4.8125 C 2.71875 -4.8125 2.546875 -4.800781 2.359375 -4.78125 C 2.179688 -4.757812 2.007812 -4.734375 1.84375 -4.703125 L 1.84375 0 L 0.859375 0 Z M 0.859375 -5.40625 "/>
+</symbol>
+<symbol overflow="visible" id="glyph1-9">
+<path style="stroke:none;" d="M 4.21875 -1.390625 C 4.21875 -1.585938 4.132812 -1.75 3.96875 -1.875 C 3.800781 -2.007812 3.59375 -2.125 3.34375 -2.21875 C 3.101562 -2.3125 2.835938 -2.40625 2.546875 -2.5 C 2.265625 -2.59375 2 -2.707031 1.75 -2.84375 C 1.507812 -2.976562 1.304688 -3.144531 1.140625 -3.34375 C 0.984375 -3.539062 0.90625 -3.800781 0.90625 -4.125 C 0.90625 -4.570312 1.082031 -4.945312 1.4375 -5.25 C 1.800781 -5.550781 2.375 -5.703125 3.15625 -5.703125 C 3.457031 -5.703125 3.769531 -5.675781 4.09375 -5.625 C 4.414062 -5.582031 4.695312 -5.523438 4.9375 -5.453125 L 4.75 -4.578125 C 4.6875 -4.609375 4.597656 -4.640625 4.484375 -4.671875 C 4.367188 -4.710938 4.238281 -4.742188 4.09375 -4.765625 C 3.957031 -4.796875 3.804688 -4.816406 3.640625 -4.828125 C 3.472656 -4.847656 3.316406 -4.859375 3.171875 -4.859375 C 2.304688 -4.859375 1.875 -4.625 1.875 -4.15625 C 1.875 -3.988281 1.953125 -3.84375 2.109375 -3.71875 C 2.273438 -3.601562 2.484375 -3.5 2.734375 -3.40625 C 2.984375 -3.3125 3.25 -3.210938 3.53125 -3.109375 C 3.820312 -3.015625 4.09375 -2.894531 4.34375 -2.75 C 4.59375 -2.601562 4.796875 -2.425781 4.953125 -2.21875 C 5.117188 -2.019531 5.203125 -1.765625 5.203125 -1.453125 C 5.203125 -0.953125 5.003906 -0.5625 4.609375 -0.28125 C 4.222656 -0.0078125 3.609375 0.125 2.765625 0.125 C 2.378906 0.125 2.023438 0.09375 1.703125 0.03125 C 1.378906 -0.03125 1.078125 -0.125 0.796875 -0.25 L 0.984375 -1.15625 C 1.265625 -1.019531 1.554688 -0.910156 1.859375 -0.828125 C 2.171875 -0.742188 2.503906 -0.703125 2.859375 -0.703125 C 3.765625 -0.703125 4.21875 -0.929688 4.21875 -1.390625 Z M 4.21875 -1.390625 "/>
+</symbol>
+<symbol overflow="visible" id="glyph1-10">
+<path style="stroke:none;" d="M 0.59375 -2.765625 C 0.59375 -3.273438 0.671875 -3.710938 0.828125 -4.078125 C 0.984375 -4.441406 1.203125 -4.742188 1.484375 -4.984375 C 1.765625 -5.234375 2.09375 -5.414062 2.46875 -5.53125 C 2.84375 -5.644531 3.238281 -5.703125 3.65625 -5.703125 C 3.925781 -5.703125 4.195312 -5.679688 4.46875 -5.640625 C 4.738281 -5.609375 5.023438 -5.546875 5.328125 -5.453125 L 5.09375 -4.59375 C 4.832031 -4.6875 4.59375 -4.75 4.375 -4.78125 C 4.15625 -4.8125 3.929688 -4.828125 3.703125 -4.828125 C 3.421875 -4.828125 3.148438 -4.785156 2.890625 -4.703125 C 2.640625 -4.628906 2.414062 -4.507812 2.21875 -4.34375 C 2.03125 -4.1875 1.878906 -3.976562 1.765625 -3.71875 C 1.660156 -3.457031 1.609375 -3.140625 1.609375 -2.765625 C 1.609375 -2.421875 1.660156 -2.117188 1.765625 -1.859375 C 1.867188 -1.609375 2.015625 -1.398438 2.203125 -1.234375 C 2.390625 -1.078125 2.613281 -0.957031 2.875 -0.875 C 3.144531 -0.789062 3.4375 -0.75 3.75 -0.75 C 4.007812 -0.75 4.253906 -0.765625 4.484375 -0.796875 C 4.722656 -0.828125 4.984375 -0.890625 5.265625 -0.984375 L 5.40625 -0.15625 C 5.125 -0.0507812 4.835938 0.0195312 4.546875 0.0625 C 4.265625 0.101562 3.957031 0.125 3.625 0.125 C 3.175781 0.125 2.765625 0.0664062 2.390625 -0.046875 C 2.023438 -0.171875 1.707031 -0.351562 1.4375 -0.59375 C 1.164062 -0.832031 0.957031 -1.132812 0.8125 -1.5 C 0.664062 -1.863281 0.59375 -2.285156 0.59375 -2.765625 Z M 0.59375 -2.765625 "/>
+</symbol>
+<symbol overflow="visible" id="glyph1-11">
+<path style="stroke:none;" d="M 3.0625 -0.703125 C 3.3125 -0.703125 3.53125 -0.707031 3.71875 -0.71875 C 3.914062 -0.738281 4.082031 -0.765625 4.21875 -0.796875 L 4.21875 -2.453125 C 4.082031 -2.492188 3.925781 -2.523438 3.75 -2.546875 C 3.570312 -2.566406 3.382812 -2.578125 3.1875 -2.578125 C 3 -2.578125 2.816406 -2.5625 2.640625 -2.53125 C 2.460938 -2.507812 2.304688 -2.460938 2.171875 -2.390625 C 2.035156 -2.316406 1.921875 -2.222656 1.828125 -2.109375 C 1.742188 -1.992188 1.703125 -1.847656 1.703125 -1.671875 C 1.703125 -1.304688 1.820312 -1.050781 2.0625 -0.90625 C 2.3125 -0.769531 2.644531 -0.703125 3.0625 -0.703125 Z M 2.96875 -5.703125 C 3.382812 -5.703125 3.734375 -5.648438 4.015625 -5.546875 C 4.296875 -5.441406 4.523438 -5.296875 4.703125 -5.109375 C 4.878906 -4.929688 5.003906 -4.707031 5.078125 -4.4375 C 5.148438 -4.175781 5.1875 -3.890625 5.1875 -3.578125 L 5.1875 -0.09375 C 4.957031 -0.0507812 4.648438 -0.00390625 4.265625 0.046875 C 3.890625 0.0976562 3.5 0.125 3.09375 0.125 C 2.789062 0.125 2.492188 0.0976562 2.203125 0.046875 C 1.921875 -0.00390625 1.664062 -0.09375 1.4375 -0.21875 C 1.21875 -0.351562 1.039062 -0.535156 0.90625 -0.765625 C 0.769531 -0.992188 0.703125 -1.289062 0.703125 -1.65625 C 0.703125 -1.976562 0.769531 -2.25 0.90625 -2.46875 C 1.039062 -2.6875 1.21875 -2.863281 1.4375 -3 C 1.664062 -3.132812 1.921875 -3.234375 2.203125 -3.296875 C 2.484375 -3.359375 2.769531 -3.390625 3.0625 -3.390625 C 3.445312 -3.390625 3.832031 -3.34375 4.21875 -3.25 L 4.21875 -3.53125 C 4.21875 -3.695312 4.195312 -3.859375 4.15625 -4.015625 C 4.125 -4.171875 4.054688 -4.3125 3.953125 -4.4375 C 3.847656 -4.5625 3.707031 -4.660156 3.53125 -4.734375 C 3.363281 -4.816406 3.144531 -4.859375 2.875 -4.859375 C 2.53125 -4.859375 2.226562 -4.832031 1.96875 -4.78125 C 1.71875 -4.738281 1.523438 -4.691406 1.390625 -4.640625 L 1.265625 -5.453125 C 1.398438 -5.523438 1.625 -5.582031 1.9375 -5.625 C 2.257812 -5.675781 2.601562 -5.703125 2.96875 -5.703125 Z M 2.96875 -5.703125 "/>
+</symbol>
+<symbol overflow="visible" id="glyph1-12">
+<path style="stroke:none;" d="M 4.0625 0.125 C 3.707031 0.125 3.410156 0.078125 3.171875 -0.015625 C 2.941406 -0.109375 2.757812 -0.25 2.625 -0.4375 C 2.488281 -0.632812 2.390625 -0.875 2.328125 -1.15625 C 2.273438 -1.4375 2.25 -1.765625 2.25 -2.140625 L 2.25 -7.421875 L 0.640625 -7.421875 L 0.640625 -8.25 L 3.234375 -8.25 L 3.234375 -2.140625 C 3.234375 -1.867188 3.25 -1.644531 3.28125 -1.46875 C 3.320312 -1.289062 3.378906 -1.144531 3.453125 -1.03125 C 3.535156 -0.925781 3.628906 -0.851562 3.734375 -0.8125 C 3.847656 -0.769531 3.984375 -0.75 4.140625 -0.75 C 4.367188 -0.75 4.582031 -0.773438 4.78125 -0.828125 C 4.988281 -0.890625 5.144531 -0.953125 5.25 -1.015625 L 5.40625 -0.1875 C 5.351562 -0.15625 5.28125 -0.117188 5.1875 -0.078125 C 5.101562 -0.046875 5 -0.015625 4.875 0.015625 C 4.757812 0.046875 4.628906 0.0703125 4.484375 0.09375 C 4.347656 0.113281 4.207031 0.125 4.0625 0.125 Z M 4.0625 0.125 "/>
+</symbol>
+<symbol overflow="visible" id="glyph1-13">
+<path style="stroke:none;" d="M 2.515625 -6.4375 C 2.304688 -6.4375 2.125 -6.503906 1.96875 -6.640625 C 1.8125 -6.785156 1.734375 -6.984375 1.734375 -7.234375 C 1.734375 -7.484375 1.8125 -7.679688 1.96875 -7.828125 C 2.125 -7.972656 2.304688 -8.046875 2.515625 -8.046875 C 2.722656 -8.046875 2.898438 -7.972656 3.046875 -7.828125 C 3.203125 -7.679688 3.28125 -7.484375 3.28125 -7.234375 C 3.28125 -6.984375 3.203125 -6.785156 3.046875 -6.640625 C 2.898438 -6.503906 2.722656 -6.4375 2.515625 -6.4375 Z M 2.25 -4.734375 L 0.640625 -4.734375 L 0.640625 -5.5625 L 3.234375 -5.5625 L 3.234375 -2.140625 C 3.234375 -1.585938 3.3125 -1.21875 3.46875 -1.03125 C 3.625 -0.84375 3.851562 -0.75 4.15625 -0.75 C 4.382812 -0.75 4.597656 -0.773438 4.796875 -0.828125 C 4.992188 -0.890625 5.144531 -0.953125 5.25 -1.015625 L 5.40625 -0.1875 C 5.351562 -0.15625 5.28125 -0.117188 5.1875 -0.078125 C 5.101562 -0.046875 5.003906 -0.015625 4.890625 0.015625 C 4.773438 0.046875 4.644531 0.0703125 4.5 0.09375 C 4.363281 0.113281 4.21875 0.125 4.0625 0.125 C 3.71875 0.125 3.425781 0.078125 3.1875 -0.015625 C 2.957031 -0.109375 2.769531 -0.25 2.625 -0.4375 C 2.488281 -0.632812 2.390625 -0.875 2.328125 -1.15625 C 2.273438 -1.4375 2.25 -1.765625 2.25 -2.140625 Z M 2.25 -4.734375 "/>
+</symbol>
+<symbol overflow="visible" id="glyph1-14">
+<path style="stroke:none;" d="M 4.15625 -0.515625 C 4.039062 -0.453125 3.863281 -0.382812 3.625 -0.3125 C 3.394531 -0.238281 3.128906 -0.203125 2.828125 -0.203125 C 2.503906 -0.203125 2.195312 -0.253906 1.90625 -0.359375 C 1.625 -0.472656 1.378906 -0.640625 1.171875 -0.859375 C 0.960938 -1.078125 0.796875 -1.351562 0.671875 -1.6875 C 0.546875 -2.03125 0.484375 -2.4375 0.484375 -2.90625 C 0.484375 -3.3125 0.539062 -3.679688 0.65625 -4.015625 C 0.769531 -4.359375 0.9375 -4.65625 1.15625 -4.90625 C 1.375 -5.15625 1.644531 -5.347656 1.96875 -5.484375 C 2.289062 -5.628906 2.65625 -5.703125 3.0625 -5.703125 C 3.539062 -5.703125 3.945312 -5.664062 4.28125 -5.59375 C 4.625 -5.53125 4.910156 -5.46875 5.140625 -5.40625 L 5.140625 -0.4375 C 5.140625 0.425781 4.921875 1.050781 4.484375 1.4375 C 4.054688 1.820312 3.398438 2.015625 2.515625 2.015625 C 2.160156 2.015625 1.835938 1.984375 1.546875 1.921875 C 1.253906 1.867188 0.992188 1.804688 0.765625 1.734375 L 0.953125 0.859375 C 1.160156 0.941406 1.394531 1.007812 1.65625 1.0625 C 1.925781 1.125 2.222656 1.15625 2.546875 1.15625 C 3.117188 1.15625 3.53125 1.035156 3.78125 0.796875 C 4.03125 0.566406 4.15625 0.191406 4.15625 -0.328125 Z M 4.15625 -4.6875 C 4.0625 -4.71875 3.925781 -4.75 3.75 -4.78125 C 3.582031 -4.8125 3.359375 -4.828125 3.078125 -4.828125 C 2.554688 -4.828125 2.160156 -4.648438 1.890625 -4.296875 C 1.628906 -3.941406 1.5 -3.472656 1.5 -2.890625 C 1.5 -2.566406 1.535156 -2.289062 1.609375 -2.0625 C 1.691406 -1.84375 1.796875 -1.65625 1.921875 -1.5 C 2.054688 -1.351562 2.207031 -1.242188 2.375 -1.171875 C 2.539062 -1.109375 2.722656 -1.078125 2.921875 -1.078125 C 3.160156 -1.078125 3.390625 -1.113281 3.609375 -1.1875 C 3.835938 -1.257812 4.019531 -1.34375 4.15625 -1.4375 Z M 4.15625 -4.6875 "/>
+</symbol>
+<symbol overflow="visible" id="glyph1-15">
+<path style="stroke:none;" d="M 2.8125 -0.703125 C 3.3125 -0.703125 3.691406 -0.796875 3.953125 -0.984375 C 4.222656 -1.171875 4.359375 -1.457031 4.359375 -1.84375 C 4.359375 -2.082031 4.304688 -2.28125 4.203125 -2.4375 C 4.109375 -2.601562 3.984375 -2.75 3.828125 -2.875 C 3.671875 -3 3.488281 -3.109375 3.28125 -3.203125 C 3.082031 -3.296875 2.878906 -3.378906 2.671875 -3.453125 C 2.429688 -3.546875 2.203125 -3.648438 1.984375 -3.765625 C 1.765625 -3.890625 1.566406 -4.03125 1.390625 -4.1875 C 1.222656 -4.351562 1.085938 -4.546875 0.984375 -4.765625 C 0.890625 -4.984375 0.84375 -5.238281 0.84375 -5.53125 C 0.84375 -6.175781 1.046875 -6.679688 1.453125 -7.046875 C 1.859375 -7.410156 2.425781 -7.59375 3.15625 -7.59375 C 3.351562 -7.59375 3.550781 -7.578125 3.75 -7.546875 C 3.945312 -7.523438 4.128906 -7.492188 4.296875 -7.453125 C 4.460938 -7.410156 4.609375 -7.359375 4.734375 -7.296875 C 4.867188 -7.242188 4.976562 -7.191406 5.0625 -7.140625 L 4.75 -6.3125 C 4.59375 -6.40625 4.375 -6.5 4.09375 -6.59375 C 3.8125 -6.695312 3.5 -6.75 3.15625 -6.75 C 2.789062 -6.75 2.476562 -6.65625 2.21875 -6.46875 C 1.957031 -6.289062 1.828125 -6.019531 1.828125 -5.65625 C 1.828125 -5.457031 1.863281 -5.285156 1.9375 -5.140625 C 2.007812 -4.992188 2.113281 -4.863281 2.25 -4.75 C 2.382812 -4.644531 2.535156 -4.546875 2.703125 -4.453125 C 2.878906 -4.367188 3.070312 -4.285156 3.28125 -4.203125 C 3.59375 -4.078125 3.875 -3.945312 4.125 -3.8125 C 4.375 -3.6875 4.585938 -3.535156 4.765625 -3.359375 C 4.953125 -3.179688 5.09375 -2.972656 5.1875 -2.734375 C 5.289062 -2.492188 5.34375 -2.203125 5.34375 -1.859375 C 5.34375 -1.210938 5.125 -0.710938 4.6875 -0.359375 C 4.25 -0.015625 3.625 0.15625 2.8125 0.15625 C 2.539062 0.15625 2.289062 0.132812 2.0625 0.09375 C 1.832031 0.0625 1.625 0.0195312 1.4375 -0.03125 C 1.257812 -0.09375 1.101562 -0.148438 0.96875 -0.203125 C 0.832031 -0.253906 0.726562 -0.304688 0.65625 -0.359375 L 0.953125 -1.171875 C 1.117188 -1.085938 1.359375 -0.988281 1.671875 -0.875 C 1.984375 -0.757812 2.363281 -0.703125 2.8125 -0.703125 Z M 2.8125 -0.703125 "/>
+</symbol>
+<symbol overflow="visible" id="glyph1-16">
+<path style="stroke:none;" d="M 3.109375 -5.703125 C 3.859375 -5.703125 4.4375 -5.46875 4.84375 -5 C 5.25 -4.53125 5.453125 -3.820312 5.453125 -2.875 L 5.453125 -2.515625 L 1.46875 -2.515625 C 1.507812 -1.941406 1.703125 -1.503906 2.046875 -1.203125 C 2.390625 -0.898438 2.867188 -0.75 3.484375 -0.75 C 3.835938 -0.75 4.132812 -0.773438 4.375 -0.828125 C 4.625 -0.890625 4.8125 -0.953125 4.9375 -1.015625 L 5.078125 -0.1875 C 4.953125 -0.113281 4.734375 -0.046875 4.421875 0.015625 C 4.109375 0.0859375 3.757812 0.125 3.375 0.125 C 2.894531 0.125 2.472656 0.0507812 2.109375 -0.09375 C 1.742188 -0.238281 1.441406 -0.4375 1.203125 -0.6875 C 0.960938 -0.945312 0.78125 -1.253906 0.65625 -1.609375 C 0.539062 -1.960938 0.484375 -2.347656 0.484375 -2.765625 C 0.484375 -3.265625 0.554688 -3.695312 0.703125 -4.0625 C 0.859375 -4.4375 1.0625 -4.742188 1.3125 -4.984375 C 1.5625 -5.222656 1.835938 -5.398438 2.140625 -5.515625 C 2.453125 -5.640625 2.773438 -5.703125 3.109375 -5.703125 Z M 4.453125 -3.328125 C 4.453125 -3.796875 4.328125 -4.164062 4.078125 -4.4375 C 3.828125 -4.71875 3.5 -4.859375 3.09375 -4.859375 C 2.863281 -4.859375 2.65625 -4.8125 2.46875 -4.71875 C 2.28125 -4.632812 2.117188 -4.519531 1.984375 -4.375 C 1.847656 -4.226562 1.738281 -4.0625 1.65625 -3.875 C 1.570312 -3.695312 1.519531 -3.515625 1.5 -3.328125 Z M 4.453125 -3.328125 "/>
+</symbol>
+<symbol overflow="visible" id="glyph1-17">
+<path style="stroke:none;" d="M 4.15625 -4.390625 C 4.039062 -4.492188 3.875 -4.59375 3.65625 -4.6875 C 3.445312 -4.78125 3.222656 -4.828125 2.984375 -4.828125 C 2.722656 -4.828125 2.5 -4.773438 2.3125 -4.671875 C 2.125 -4.566406 1.96875 -4.421875 1.84375 -4.234375 C 1.726562 -4.054688 1.640625 -3.84375 1.578125 -3.59375 C 1.523438 -3.34375 1.5 -3.070312 1.5 -2.78125 C 1.5 -2.132812 1.648438 -1.632812 1.953125 -1.28125 C 2.253906 -0.925781 2.648438 -0.75 3.140625 -0.75 C 3.390625 -0.75 3.597656 -0.757812 3.765625 -0.78125 C 3.941406 -0.8125 4.070312 -0.835938 4.15625 -0.859375 Z M 4.15625 -8.140625 L 5.140625 -8.3125 L 5.140625 -0.15625 C 4.929688 -0.09375 4.65625 -0.03125 4.3125 0.03125 C 3.976562 0.09375 3.585938 0.125 3.140625 0.125 C 2.742188 0.125 2.378906 0.0546875 2.046875 -0.078125 C 1.722656 -0.210938 1.441406 -0.40625 1.203125 -0.65625 C 0.972656 -0.90625 0.796875 -1.207031 0.671875 -1.5625 C 0.546875 -1.925781 0.484375 -2.332031 0.484375 -2.78125 C 0.484375 -3.21875 0.535156 -3.613281 0.640625 -3.96875 C 0.742188 -4.320312 0.898438 -4.625 1.109375 -4.875 C 1.316406 -5.132812 1.566406 -5.335938 1.859375 -5.484375 C 2.148438 -5.628906 2.488281 -5.703125 2.875 -5.703125 C 3.164062 -5.703125 3.421875 -5.664062 3.640625 -5.59375 C 3.867188 -5.519531 4.039062 -5.441406 4.15625 -5.359375 Z M 4.15625 -8.140625 "/>
+</symbol>
+<symbol overflow="visible" id="glyph1-18">
+<path style="stroke:none;" d="M 1.28125 0 L 1.28125 -5.265625 C 2.101562 -5.546875 2.925781 -5.6875 3.75 -5.6875 C 4.007812 -5.6875 4.253906 -5.675781 4.484375 -5.65625 C 4.722656 -5.632812 4.976562 -5.59375 5.25 -5.53125 L 5.078125 -4.65625 C 4.816406 -4.726562 4.585938 -4.773438 4.390625 -4.796875 C 4.203125 -4.816406 3.988281 -4.828125 3.75 -4.828125 C 3.269531 -4.828125 2.773438 -4.757812 2.265625 -4.625 L 2.265625 0 Z M 1.28125 0 "/>
+</symbol>
+<symbol overflow="visible" id="glyph1-19">
+<path style="stroke:none;" d="M 0.609375 1.046875 C 0.679688 1.085938 0.78125 1.117188 0.90625 1.140625 C 1.039062 1.160156 1.164062 1.171875 1.28125 1.171875 C 1.675781 1.171875 1.984375 1.082031 2.203125 0.90625 C 2.421875 0.738281 2.625 0.460938 2.8125 0.078125 C 2.363281 -0.773438 1.941406 -1.6875 1.546875 -2.65625 C 1.148438 -3.625 0.828125 -4.59375 0.578125 -5.5625 L 1.65625 -5.5625 C 1.738281 -5.25 1.832031 -4.90625 1.9375 -4.53125 C 2.039062 -4.15625 2.160156 -3.769531 2.296875 -3.375 C 2.441406 -2.988281 2.585938 -2.597656 2.734375 -2.203125 C 2.890625 -1.804688 3.054688 -1.425781 3.234375 -1.0625 C 3.367188 -1.4375 3.488281 -1.800781 3.59375 -2.15625 C 3.707031 -2.519531 3.8125 -2.882812 3.90625 -3.25 C 4.007812 -3.613281 4.109375 -3.984375 4.203125 -4.359375 C 4.296875 -4.742188 4.394531 -5.144531 4.5 -5.5625 L 5.53125 -5.5625 C 5.269531 -4.53125 4.984375 -3.523438 4.671875 -2.546875 C 4.359375 -1.566406 4.019531 -0.660156 3.65625 0.171875 C 3.519531 0.484375 3.375 0.753906 3.21875 0.984375 C 3.0625 1.222656 2.890625 1.414062 2.703125 1.5625 C 2.523438 1.71875 2.316406 1.832031 2.078125 1.90625 C 1.847656 1.976562 1.585938 2.015625 1.296875 2.015625 C 1.140625 2.015625 0.972656 1.992188 0.796875 1.953125 C 0.617188 1.910156 0.5 1.875 0.4375 1.84375 Z M 0.609375 1.046875 "/>
+</symbol>
+<symbol overflow="visible" id="glyph1-20">
+<path style="stroke:none;" d="M 0.34375 -3.71875 C 0.34375 -4.382812 0.40625 -4.960938 0.53125 -5.453125 C 0.664062 -5.941406 0.847656 -6.34375 1.078125 -6.65625 C 1.304688 -6.96875 1.582031 -7.203125 1.90625 -7.359375 C 2.238281 -7.515625 2.601562 -7.59375 3 -7.59375 C 3.394531 -7.59375 3.753906 -7.515625 4.078125 -7.359375 C 4.410156 -7.203125 4.691406 -6.96875 4.921875 -6.65625 C 5.148438 -6.34375 5.328125 -5.941406 5.453125 -5.453125 C 5.585938 -4.960938 5.65625 -4.382812 5.65625 -3.71875 C 5.65625 -3.050781 5.585938 -2.472656 5.453125 -1.984375 C 5.328125 -1.503906 5.148438 -1.101562 4.921875 -0.78125 C 4.691406 -0.457031 4.410156 -0.21875 4.078125 -0.0625 C 3.753906 0.0820312 3.394531 0.15625 3 0.15625 C 2.601562 0.15625 2.238281 0.0820312 1.90625 -0.0625 C 1.582031 -0.21875 1.304688 -0.457031 1.078125 -0.78125 C 0.847656 -1.101562 0.664062 -1.503906 0.53125 -1.984375 C 0.40625 -2.472656 0.34375 -3.050781 0.34375 -3.71875 Z M 1.359375 -3.71875 C 1.359375 -2.738281 1.488281 -1.988281 1.75 -1.46875 C 2.007812 -0.957031 2.414062 -0.703125 2.96875 -0.703125 C 3.53125 -0.703125 3.953125 -0.957031 4.234375 -1.46875 C 4.515625 -1.988281 4.65625 -2.738281 4.65625 -3.71875 C 4.65625 -4.695312 4.515625 -5.445312 4.234375 -5.96875 C 3.953125 -6.488281 3.53125 -6.75 2.96875 -6.75 C 2.414062 -6.75 2.007812 -6.488281 1.75 -5.96875 C 1.488281 -5.445312 1.359375 -4.695312 1.359375 -3.71875 Z M 1.359375 -3.71875 "/>
+</symbol>
+<symbol overflow="visible" id="glyph1-21">
+<path style="stroke:none;" d="M 5.140625 -0.15625 C 4.929688 -0.101562 4.644531 -0.046875 4.28125 0.015625 C 3.925781 0.0859375 3.507812 0.125 3.03125 0.125 C 2.613281 0.125 2.265625 0.0625 1.984375 -0.0625 C 1.703125 -0.1875 1.472656 -0.363281 1.296875 -0.59375 C 1.117188 -0.820312 0.992188 -1.09375 0.921875 -1.40625 C 0.847656 -1.71875 0.8125 -2.0625 0.8125 -2.4375 L 0.8125 -5.5625 L 1.796875 -5.5625 L 1.796875 -2.65625 C 1.796875 -1.96875 1.894531 -1.476562 2.09375 -1.1875 C 2.300781 -0.894531 2.644531 -0.75 3.125 -0.75 C 3.226562 -0.75 3.332031 -0.753906 3.4375 -0.765625 C 3.550781 -0.773438 3.65625 -0.785156 3.75 -0.796875 C 3.851562 -0.804688 3.9375 -0.816406 4 -0.828125 C 4.070312 -0.847656 4.125 -0.859375 4.15625 -0.859375 L 4.15625 -5.5625 L 5.140625 -5.5625 Z M 5.140625 -0.15625 "/>
+</symbol>
+<symbol overflow="visible" id="glyph1-22">
+<path style="stroke:none;" d="M 2.921875 -5.5625 L 5.265625 -5.5625 L 5.265625 -4.734375 L 2.921875 -4.734375 L 2.921875 -2.140625 C 2.921875 -1.867188 2.9375 -1.644531 2.96875 -1.46875 C 3.007812 -1.289062 3.078125 -1.144531 3.171875 -1.03125 C 3.265625 -0.925781 3.382812 -0.851562 3.53125 -0.8125 C 3.675781 -0.769531 3.851562 -0.75 4.0625 -0.75 C 4.34375 -0.75 4.570312 -0.773438 4.75 -0.828125 C 4.925781 -0.878906 5.09375 -0.941406 5.25 -1.015625 L 5.40625 -0.1875 C 5.289062 -0.132812 5.109375 -0.0703125 4.859375 0 C 4.617188 0.0820312 4.316406 0.125 3.953125 0.125 C 3.546875 0.125 3.207031 0.078125 2.9375 -0.015625 C 2.675781 -0.109375 2.46875 -0.25 2.3125 -0.4375 C 2.164062 -0.632812 2.066406 -0.875 2.015625 -1.15625 C 1.960938 -1.4375 1.9375 -1.765625 1.9375 -2.140625 L 1.9375 -4.734375 L 0.75 -4.734375 L 0.75 -5.5625 L 1.9375 -5.5625 L 1.9375 -7.125 L 2.921875 -7.296875 Z M 2.921875 -5.5625 "/>
+</symbol>
+<symbol overflow="visible" id="glyph1-23">
+<path style="stroke:none;" d="M 4.5 -2.765625 C 4.5 -3.421875 4.347656 -3.925781 4.046875 -4.28125 C 3.742188 -4.632812 3.347656 -4.8125 2.859375 -4.8125 C 2.585938 -4.8125 2.375 -4.796875 2.21875 -4.765625 C 2.0625 -4.742188 1.9375 -4.71875 1.84375 -4.6875 L 1.84375 -1.171875 C 1.957031 -1.066406 2.117188 -0.96875 2.328125 -0.875 C 2.546875 -0.789062 2.773438 -0.75 3.015625 -0.75 C 3.273438 -0.75 3.5 -0.800781 3.6875 -0.90625 C 3.875 -1.007812 4.023438 -1.148438 4.140625 -1.328125 C 4.265625 -1.515625 4.351562 -1.726562 4.40625 -1.96875 C 4.46875 -2.21875 4.5 -2.484375 4.5 -2.765625 Z M 5.515625 -2.765625 C 5.515625 -2.347656 5.460938 -1.957031 5.359375 -1.59375 C 5.253906 -1.238281 5.097656 -0.929688 4.890625 -0.671875 C 4.679688 -0.421875 4.429688 -0.222656 4.140625 -0.078125 C 3.847656 0.0546875 3.507812 0.125 3.125 0.125 C 2.832031 0.125 2.570312 0.0859375 2.34375 0.015625 C 2.125 -0.046875 1.957031 -0.125 1.84375 -0.21875 L 1.84375 1.984375 L 0.859375 1.984375 L 0.859375 -5.40625 C 1.066406 -5.46875 1.34375 -5.53125 1.6875 -5.59375 C 2.03125 -5.65625 2.421875 -5.6875 2.859375 -5.6875 C 3.253906 -5.6875 3.613281 -5.617188 3.9375 -5.484375 C 4.269531 -5.347656 4.550781 -5.15625 4.78125 -4.90625 C 5.019531 -4.65625 5.203125 -4.347656 5.328125 -3.984375 C 5.453125 -3.617188 5.515625 -3.210938 5.515625 -2.765625 Z M 5.515625 -2.765625 "/>
+</symbol>
+<symbol overflow="visible" id="glyph1-24">
+<path style="stroke:none;" d="M 3.015625 -3.734375 L 4.15625 -7.421875 L 5.09375 -7.421875 C 5.25 -6.253906 5.359375 -5.054688 5.421875 -3.828125 C 5.492188 -2.609375 5.554688 -1.332031 5.609375 0 L 4.65625 0 C 4.644531 -0.425781 4.632812 -0.890625 4.625 -1.390625 C 4.625 -1.898438 4.613281 -2.421875 4.59375 -2.953125 C 4.582031 -3.492188 4.570312 -4.039062 4.5625 -4.59375 C 4.550781 -5.144531 4.539062 -5.679688 4.53125 -6.203125 L 3.4375 -2.8125 L 2.578125 -2.8125 L 1.46875 -6.203125 C 1.46875 -5.679688 1.457031 -5.144531 1.4375 -4.59375 C 1.425781 -4.050781 1.414062 -3.507812 1.40625 -2.96875 C 1.394531 -2.425781 1.382812 -1.898438 1.375 -1.390625 C 1.363281 -0.890625 1.351562 -0.425781 1.34375 0 L 0.390625 0 C 0.410156 -0.601562 0.4375 -1.222656 0.46875 -1.859375 C 0.5 -2.503906 0.535156 -3.144531 0.578125 -3.78125 C 0.617188 -4.414062 0.671875 -5.039062 0.734375 -5.65625 C 0.796875 -6.269531 0.863281 -6.859375 0.9375 -7.421875 L 1.84375 -7.421875 Z M 3.015625 -3.734375 "/>
+</symbol>
+<symbol overflow="visible" id="glyph2-0">
+<path style="stroke:none;" d="M 0.640625 2.296875 L 0.640625 -9.171875 L 7.140625 -9.171875 L 7.140625 2.296875 Z M 1.375 1.578125 L 6.421875 1.578125 L 6.421875 -8.4375 L 1.375 -8.4375 Z M 1.375 1.578125 "/>
+</symbol>
+<symbol overflow="visible" id="glyph2-1">
+<path style="stroke:none;" d="M 6.34375 -6.84375 L 6.34375 -5.75 C 6.007812 -5.925781 5.675781 -6.0625 5.34375 -6.15625 C 5.007812 -6.25 4.675781 -6.296875 4.34375 -6.296875 C 3.582031 -6.296875 2.992188 -6.050781 2.578125 -5.5625 C 2.160156 -5.082031 1.953125 -4.410156 1.953125 -3.546875 C 1.953125 -2.679688 2.160156 -2.007812 2.578125 -1.53125 C 2.992188 -1.050781 3.582031 -0.8125 4.34375 -0.8125 C 4.675781 -0.8125 5.007812 -0.851562 5.34375 -0.9375 C 5.675781 -1.03125 6.007812 -1.171875 6.34375 -1.359375 L 6.34375 -0.265625 C 6.019531 -0.117188 5.679688 -0.0078125 5.328125 0.0625 C 4.984375 0.144531 4.613281 0.1875 4.21875 0.1875 C 3.144531 0.1875 2.289062 -0.144531 1.65625 -0.8125 C 1.03125 -1.488281 0.71875 -2.398438 0.71875 -3.546875 C 0.71875 -4.703125 1.035156 -5.613281 1.671875 -6.28125 C 2.304688 -6.945312 3.179688 -7.28125 4.296875 -7.28125 C 4.648438 -7.28125 5 -7.242188 5.34375 -7.171875 C 5.6875 -7.097656 6.019531 -6.988281 6.34375 -6.84375 Z M 6.34375 -6.84375 "/>
+</symbol>
+<symbol overflow="visible" id="glyph2-2">
+<path style="stroke:none;" d="M 5.34375 -6.015625 C 5.207031 -6.085938 5.0625 -6.140625 4.90625 -6.171875 C 4.757812 -6.210938 4.59375 -6.234375 4.40625 -6.234375 C 3.75 -6.234375 3.242188 -6.019531 2.890625 -5.59375 C 2.535156 -5.164062 2.359375 -4.550781 2.359375 -3.75 L 2.359375 0 L 1.1875 0 L 1.1875 -7.109375 L 2.359375 -7.109375 L 2.359375 -6 C 2.597656 -6.4375 2.914062 -6.757812 3.3125 -6.96875 C 3.707031 -7.175781 4.1875 -7.28125 4.75 -7.28125 C 4.832031 -7.28125 4.921875 -7.273438 5.015625 -7.265625 C 5.109375 -7.253906 5.21875 -7.238281 5.34375 -7.21875 Z M 5.34375 -6.015625 "/>
+</symbol>
+<symbol overflow="visible" id="glyph2-3">
+<path style="stroke:none;" d="M 3.984375 -6.296875 C 3.359375 -6.296875 2.863281 -6.050781 2.5 -5.5625 C 2.132812 -5.070312 1.953125 -4.398438 1.953125 -3.546875 C 1.953125 -2.691406 2.128906 -2.019531 2.484375 -1.53125 C 2.847656 -1.050781 3.347656 -0.8125 3.984375 -0.8125 C 4.597656 -0.8125 5.085938 -1.054688 5.453125 -1.546875 C 5.816406 -2.035156 6 -2.703125 6 -3.546875 C 6 -4.390625 5.816406 -5.054688 5.453125 -5.546875 C 5.085938 -6.046875 4.597656 -6.296875 3.984375 -6.296875 Z M 3.984375 -7.28125 C 4.992188 -7.28125 5.789062 -6.945312 6.375 -6.28125 C 6.957031 -5.625 7.25 -4.710938 7.25 -3.546875 C 7.25 -2.378906 6.957031 -1.460938 6.375 -0.796875 C 5.789062 -0.140625 4.992188 0.1875 3.984375 0.1875 C 2.960938 0.1875 2.160156 -0.140625 1.578125 -0.796875 C 1.003906 -1.460938 0.71875 -2.378906 0.71875 -3.546875 C 0.71875 -4.710938 1.003906 -5.625 1.578125 -6.28125 C 2.160156 -6.945312 2.960938 -7.28125 3.984375 -7.28125 Z M 3.984375 -7.28125 "/>
+</symbol>
+<symbol overflow="visible" id="glyph2-4">
+<path style="stroke:none;" d="M 2.359375 -1.0625 L 2.359375 2.703125 L 1.1875 2.703125 L 1.1875 -7.109375 L 2.359375 -7.109375 L 2.359375 -6.03125 C 2.597656 -6.457031 2.90625 -6.769531 3.28125 -6.96875 C 3.65625 -7.175781 4.101562 -7.28125 4.625 -7.28125 C 5.488281 -7.28125 6.191406 -6.9375 6.734375 -6.25 C 7.273438 -5.5625 7.546875 -4.660156 7.546875 -3.546875 C 7.546875 -2.429688 7.273438 -1.53125 6.734375 -0.84375 C 6.191406 -0.15625 5.488281 0.1875 4.625 0.1875 C 4.101562 0.1875 3.65625 0.0820312 3.28125 -0.125 C 2.90625 -0.332031 2.597656 -0.644531 2.359375 -1.0625 Z M 6.328125 -3.546875 C 6.328125 -4.410156 6.148438 -5.082031 5.796875 -5.5625 C 5.441406 -6.050781 4.957031 -6.296875 4.34375 -6.296875 C 3.726562 -6.296875 3.242188 -6.050781 2.890625 -5.5625 C 2.535156 -5.082031 2.359375 -4.410156 2.359375 -3.546875 C 2.359375 -2.691406 2.535156 -2.019531 2.890625 -1.53125 C 3.242188 -1.039062 3.726562 -0.796875 4.34375 -0.796875 C 4.957031 -0.796875 5.441406 -1.039062 5.796875 -1.53125 C 6.148438 -2.019531 6.328125 -2.691406 6.328125 -3.546875 Z M 6.328125 -3.546875 "/>
+</symbol>
+<symbol overflow="visible" id="glyph2-5">
+<path style="stroke:none;" d="M 5.75 -6.90625 L 5.75 -5.796875 C 5.425781 -5.960938 5.085938 -6.085938 4.734375 -6.171875 C 4.378906 -6.253906 4.007812 -6.296875 3.625 -6.296875 C 3.039062 -6.296875 2.601562 -6.207031 2.3125 -6.03125 C 2.03125 -5.851562 1.890625 -5.585938 1.890625 -5.234375 C 1.890625 -4.960938 1.988281 -4.75 2.1875 -4.59375 C 2.394531 -4.445312 2.816406 -4.300781 3.453125 -4.15625 L 3.84375 -4.0625 C 4.675781 -3.882812 5.265625 -3.632812 5.609375 -3.3125 C 5.960938 -2.988281 6.140625 -2.539062 6.140625 -1.96875 C 6.140625 -1.300781 5.878906 -0.773438 5.359375 -0.390625 C 4.835938 -0.00390625 4.117188 0.1875 3.203125 0.1875 C 2.816406 0.1875 2.414062 0.148438 2 0.078125 C 1.59375 0.00390625 1.160156 -0.109375 0.703125 -0.265625 L 0.703125 -1.46875 C 1.140625 -1.238281 1.566406 -1.066406 1.984375 -0.953125 C 2.398438 -0.847656 2.8125 -0.796875 3.21875 -0.796875 C 3.769531 -0.796875 4.191406 -0.890625 4.484375 -1.078125 C 4.785156 -1.265625 4.9375 -1.53125 4.9375 -1.875 C 4.9375 -2.1875 4.828125 -2.425781 4.609375 -2.59375 C 4.398438 -2.769531 3.9375 -2.9375 3.21875 -3.09375 L 2.8125 -3.1875 C 2.082031 -3.34375 1.554688 -3.578125 1.234375 -3.890625 C 0.910156 -4.203125 0.75 -4.632812 0.75 -5.1875 C 0.75 -5.851562 0.984375 -6.367188 1.453125 -6.734375 C 1.929688 -7.097656 2.609375 -7.28125 3.484375 -7.28125 C 3.910156 -7.28125 4.316406 -7.25 4.703125 -7.1875 C 5.085938 -7.125 5.4375 -7.03125 5.75 -6.90625 Z M 5.75 -6.90625 "/>
+</symbol>
+<symbol overflow="visible" id="glyph2-6">
+<path style="stroke:none;" d="M 4.453125 -3.578125 C 3.515625 -3.578125 2.863281 -3.46875 2.5 -3.25 C 2.132812 -3.03125 1.953125 -2.660156 1.953125 -2.140625 C 1.953125 -1.734375 2.085938 -1.40625 2.359375 -1.15625 C 2.628906 -0.914062 3 -0.796875 3.46875 -0.796875 C 4.113281 -0.796875 4.632812 -1.023438 5.03125 -1.484375 C 5.425781 -1.941406 5.625 -2.550781 5.625 -3.3125 L 5.625 -3.578125 Z M 6.78125 -4.0625 L 6.78125 0 L 5.625 0 L 5.625 -1.078125 C 5.351562 -0.648438 5.019531 -0.332031 4.625 -0.125 C 4.226562 0.0820312 3.738281 0.1875 3.15625 0.1875 C 2.425781 0.1875 1.847656 -0.015625 1.421875 -0.421875 C 0.992188 -0.835938 0.78125 -1.382812 0.78125 -2.0625 C 0.78125 -2.863281 1.046875 -3.46875 1.578125 -3.875 C 2.117188 -4.28125 2.921875 -4.484375 3.984375 -4.484375 L 5.625 -4.484375 L 5.625 -4.609375 C 5.625 -5.140625 5.445312 -5.550781 5.09375 -5.84375 C 4.738281 -6.144531 4.238281 -6.296875 3.59375 -6.296875 C 3.1875 -6.296875 2.789062 -6.242188 2.40625 -6.140625 C 2.019531 -6.046875 1.648438 -5.898438 1.296875 -5.703125 L 1.296875 -6.78125 C 1.722656 -6.945312 2.140625 -7.070312 2.546875 -7.15625 C 2.953125 -7.238281 3.34375 -7.28125 3.71875 -7.28125 C 4.75 -7.28125 5.515625 -7.015625 6.015625 -6.484375 C 6.523438 -5.953125 6.78125 -5.144531 6.78125 -4.0625 Z M 6.78125 -4.0625 "/>
+</symbol>
+<symbol overflow="visible" id="glyph2-7">
+<path style="stroke:none;" d="M 1.21875 -9.875 L 2.390625 -9.875 L 2.390625 0 L 1.21875 0 Z M 1.21875 -9.875 "/>
+</symbol>
+<symbol overflow="visible" id="glyph2-8">
+<path style="stroke:none;" d="M 7.3125 -3.84375 L 7.3125 -3.28125 L 1.9375 -3.28125 C 1.988281 -2.46875 2.226562 -1.851562 2.65625 -1.4375 C 3.09375 -1.019531 3.695312 -0.8125 4.46875 -0.8125 C 4.914062 -0.8125 5.347656 -0.863281 5.765625 -0.96875 C 6.191406 -1.082031 6.613281 -1.25 7.03125 -1.46875 L 7.03125 -0.359375 C 6.613281 -0.179688 6.179688 -0.046875 5.734375 0.046875 C 5.296875 0.140625 4.851562 0.1875 4.40625 0.1875 C 3.269531 0.1875 2.367188 -0.140625 1.703125 -0.796875 C 1.046875 -1.460938 0.71875 -2.359375 0.71875 -3.484375 C 0.71875 -4.648438 1.03125 -5.570312 1.65625 -6.25 C 2.289062 -6.9375 3.140625 -7.28125 4.203125 -7.28125 C 5.160156 -7.28125 5.914062 -6.972656 6.46875 -6.359375 C 7.03125 -5.742188 7.3125 -4.90625 7.3125 -3.84375 Z M 6.140625 -4.1875 C 6.128906 -4.820312 5.945312 -5.332031 5.59375 -5.71875 C 5.25 -6.101562 4.789062 -6.296875 4.21875 -6.296875 C 3.5625 -6.296875 3.035156 -6.109375 2.640625 -5.734375 C 2.253906 -5.367188 2.03125 -4.851562 1.96875 -4.1875 Z M 6.140625 -4.1875 "/>
+</symbol>
+<symbol overflow="visible" id="glyph2-9">
+<path style="stroke:none;" d="M 6.328125 -3.546875 C 6.328125 -4.410156 6.148438 -5.082031 5.796875 -5.5625 C 5.441406 -6.050781 4.957031 -6.296875 4.34375 -6.296875 C 3.726562 -6.296875 3.242188 -6.050781 2.890625 -5.5625 C 2.535156 -5.082031 2.359375 -4.410156 2.359375 -3.546875 C 2.359375 -2.691406 2.535156 -2.019531 2.890625 -1.53125 C 3.242188 -1.039062 3.726562 -0.796875 4.34375 -0.796875 C 4.957031 -0.796875 5.441406 -1.039062 5.796875 -1.53125 C 6.148438 -2.019531 6.328125 -2.691406 6.328125 -3.546875 Z M 2.359375 -6.03125 C 2.597656 -6.457031 2.90625 -6.769531 3.28125 -6.96875 C 3.65625 -7.175781 4.101562 -7.28125 4.625 -7.28125 C 5.488281 -7.28125 6.191406 -6.9375 6.734375 -6.25 C 7.273438 -5.5625 7.546875 -4.660156 7.546875 -3.546875 C 7.546875 -2.429688 7.273438 -1.53125 6.734375 -0.84375 C 6.191406 -0.15625 5.488281 0.1875 4.625 0.1875 C 4.101562 0.1875 3.65625 0.0820312 3.28125 -0.125 C 2.90625 -0.332031 2.597656 -0.644531 2.359375 -1.0625 L 2.359375 0 L 1.1875 0 L 1.1875 -9.875 L 2.359375 -9.875 Z M 2.359375 -6.03125 "/>
+</symbol>
+<symbol overflow="visible" id="glyph2-10">
+<path style="stroke:none;" d="M 1.21875 -7.109375 L 2.390625 -7.109375 L 2.390625 0 L 1.21875 0 Z M 1.21875 -9.875 L 2.390625 -9.875 L 2.390625 -8.390625 L 1.21875 -8.390625 Z M 1.21875 -9.875 "/>
+</symbol>
+<symbol overflow="visible" id="glyph2-11">
+<path style="stroke:none;" d="M 7.140625 -4.296875 L 7.140625 0 L 5.96875 0 L 5.96875 -4.25 C 5.96875 -4.925781 5.835938 -5.429688 5.578125 -5.765625 C 5.316406 -6.097656 4.921875 -6.265625 4.390625 -6.265625 C 3.765625 -6.265625 3.269531 -6.0625 2.90625 -5.65625 C 2.539062 -5.257812 2.359375 -4.710938 2.359375 -4.015625 L 2.359375 0 L 1.1875 0 L 1.1875 -7.109375 L 2.359375 -7.109375 L 2.359375 -6 C 2.640625 -6.425781 2.96875 -6.742188 3.34375 -6.953125 C 3.71875 -7.171875 4.15625 -7.28125 4.65625 -7.28125 C 5.46875 -7.28125 6.082031 -7.023438 6.5 -6.515625 C 6.925781 -6.015625 7.140625 -5.273438 7.140625 -4.296875 Z M 7.140625 -4.296875 "/>
+</symbol>
+<symbol overflow="visible" id="glyph2-12">
+<path style="stroke:none;" d="M 5.90625 -3.640625 C 5.90625 -4.484375 5.726562 -5.132812 5.375 -5.59375 C 5.03125 -6.0625 4.539062 -6.296875 3.90625 -6.296875 C 3.28125 -6.296875 2.789062 -6.0625 2.4375 -5.59375 C 2.09375 -5.132812 1.921875 -4.484375 1.921875 -3.640625 C 1.921875 -2.796875 2.09375 -2.140625 2.4375 -1.671875 C 2.789062 -1.210938 3.28125 -0.984375 3.90625 -0.984375 C 4.539062 -0.984375 5.03125 -1.210938 5.375 -1.671875 C 5.726562 -2.140625 5.90625 -2.796875 5.90625 -3.640625 Z M 7.078125 -0.875 C 7.078125 0.332031 6.804688 1.226562 6.265625 1.8125 C 5.722656 2.40625 4.898438 2.703125 3.796875 2.703125 C 3.390625 2.703125 3.003906 2.671875 2.640625 2.609375 C 2.273438 2.546875 1.921875 2.453125 1.578125 2.328125 L 1.578125 1.1875 C 1.921875 1.375 2.257812 1.507812 2.59375 1.59375 C 2.925781 1.6875 3.265625 1.734375 3.609375 1.734375 C 4.378906 1.734375 4.953125 1.535156 5.328125 1.140625 C 5.710938 0.742188 5.90625 0.140625 5.90625 -0.671875 L 5.90625 -1.25 C 5.664062 -0.832031 5.351562 -0.519531 4.96875 -0.3125 C 4.59375 -0.101562 4.144531 0 3.625 0 C 2.75 0 2.046875 -0.332031 1.515625 -1 C 0.984375 -1.664062 0.71875 -2.546875 0.71875 -3.640625 C 0.71875 -4.734375 0.984375 -5.613281 1.515625 -6.28125 C 2.046875 -6.945312 2.75 -7.28125 3.625 -7.28125 C 4.144531 -7.28125 4.59375 -7.175781 4.96875 -6.96875 C 5.351562 -6.757812 5.664062 -6.445312 5.90625 -6.03125 L 5.90625 -7.109375 L 7.078125 -7.109375 Z M 7.078125 -0.875 "/>
+</symbol>
+<symbol overflow="visible" id="glyph2-13">
+<path style="stroke:none;" d="M 5.125 -8.609375 C 4.1875 -8.609375 3.441406 -8.257812 2.890625 -7.5625 C 2.347656 -6.875 2.078125 -5.929688 2.078125 -4.734375 C 2.078125 -3.535156 2.347656 -2.585938 2.890625 -1.890625 C 3.441406 -1.203125 4.1875 -0.859375 5.125 -0.859375 C 6.050781 -0.859375 6.785156 -1.203125 7.328125 -1.890625 C 7.878906 -2.585938 8.15625 -3.535156 8.15625 -4.734375 C 8.15625 -5.929688 7.878906 -6.875 7.328125 -7.5625 C 6.785156 -8.257812 6.050781 -8.609375 5.125 -8.609375 Z M 5.125 -9.65625 C 6.445312 -9.65625 7.503906 -9.207031 8.296875 -8.3125 C 9.097656 -7.414062 9.5 -6.222656 9.5 -4.734375 C 9.5 -3.234375 9.097656 -2.035156 8.296875 -1.140625 C 7.503906 -0.253906 6.445312 0.1875 5.125 0.1875 C 3.789062 0.1875 2.722656 -0.253906 1.921875 -1.140625 C 1.128906 -2.035156 0.734375 -3.234375 0.734375 -4.734375 C 0.734375 -6.222656 1.128906 -7.414062 1.921875 -8.3125 C 2.722656 -9.207031 3.789062 -9.65625 5.125 -9.65625 Z M 5.125 -9.65625 "/>
+</symbol>
+<symbol overflow="visible" id="glyph2-14">
+<path style="stroke:none;" d="M 1.109375 -2.8125 L 1.109375 -7.109375 L 2.265625 -7.109375 L 2.265625 -2.84375 C 2.265625 -2.175781 2.394531 -1.671875 2.65625 -1.328125 C 2.925781 -0.992188 3.320312 -0.828125 3.84375 -0.828125 C 4.476562 -0.828125 4.976562 -1.023438 5.34375 -1.421875 C 5.707031 -1.828125 5.890625 -2.378906 5.890625 -3.078125 L 5.890625 -7.109375 L 7.0625 -7.109375 L 7.0625 0 L 5.890625 0 L 5.890625 -1.09375 C 5.609375 -0.65625 5.28125 -0.332031 4.90625 -0.125 C 4.53125 0.0820312 4.09375 0.1875 3.59375 0.1875 C 2.78125 0.1875 2.160156 -0.0664062 1.734375 -0.578125 C 1.316406 -1.085938 1.109375 -1.832031 1.109375 -2.8125 Z M 4.046875 -7.28125 Z M 4.046875 -7.28125 "/>
+</symbol>
+<symbol overflow="visible" id="glyph2-15">
+<path style="stroke:none;" d="M 2.375 -9.125 L 2.375 -7.109375 L 4.78125 -7.109375 L 4.78125 -6.203125 L 2.375 -6.203125 L 2.375 -2.34375 C 2.375 -1.757812 2.453125 -1.382812 2.609375 -1.21875 C 2.773438 -1.0625 3.101562 -0.984375 3.59375 -0.984375 L 4.78125 -0.984375 L 4.78125 0 L 3.59375 0 C 2.6875 0 2.0625 -0.164062 1.71875 -0.5 C 1.375 -0.84375 1.203125 -1.457031 1.203125 -2.34375 L 1.203125 -6.203125 L 0.34375 -6.203125 L 0.34375 -7.109375 L 1.203125 -7.109375 L 1.203125 -9.125 Z M 2.375 -9.125 "/>
+</symbol>
+<symbol overflow="visible" id="glyph2-16">
+<path style="stroke:none;" d=""/>
+</symbol>
+<symbol overflow="visible" id="glyph2-17">
+<path style="stroke:none;" d="M 1.28125 -9.484375 L 6.71875 -9.484375 L 6.71875 -8.390625 L 2.5625 -8.390625 L 2.5625 -5.609375 L 6.3125 -5.609375 L 6.3125 -4.53125 L 2.5625 -4.53125 L 2.5625 0 L 1.28125 0 Z M 1.28125 -9.484375 "/>
+</symbol>
+<symbol overflow="visible" id="glyph2-18">
+<path style="stroke:none;" d="M 6.765625 -5.75 C 7.054688 -6.269531 7.40625 -6.65625 7.8125 -6.90625 C 8.21875 -7.15625 8.695312 -7.28125 9.25 -7.28125 C 9.988281 -7.28125 10.554688 -7.019531 10.953125 -6.5 C 11.359375 -5.976562 11.5625 -5.242188 11.5625 -4.296875 L 11.5625 0 L 10.390625 0 L 10.390625 -4.25 C 10.390625 -4.9375 10.265625 -5.441406 10.015625 -5.765625 C 9.773438 -6.097656 9.410156 -6.265625 8.921875 -6.265625 C 8.316406 -6.265625 7.835938 -6.0625 7.484375 -5.65625 C 7.128906 -5.257812 6.953125 -4.710938 6.953125 -4.015625 L 6.953125 0 L 5.78125 0 L 5.78125 -4.25 C 5.78125 -4.9375 5.660156 -5.441406 5.421875 -5.765625 C 5.179688 -6.097656 4.804688 -6.265625 4.296875 -6.265625 C 3.703125 -6.265625 3.226562 -6.0625 2.875 -5.65625 C 2.53125 -5.25 2.359375 -4.703125 2.359375 -4.015625 L 2.359375 0 L 1.1875 0 L 1.1875 -7.109375 L 2.359375 -7.109375 L 2.359375 -6 C 2.617188 -6.4375 2.9375 -6.757812 3.3125 -6.96875 C 3.6875 -7.175781 4.128906 -7.28125 4.640625 -7.28125 C 5.160156 -7.28125 5.597656 -7.148438 5.953125 -6.890625 C 6.316406 -6.628906 6.585938 -6.25 6.765625 -5.75 Z M 6.765625 -5.75 "/>
+</symbol>
+<symbol overflow="visible" id="glyph2-19">
+<path style="stroke:none;" d="M 6.953125 -9.171875 L 6.953125 -7.921875 C 6.472656 -8.148438 6.015625 -8.320312 5.578125 -8.4375 C 5.148438 -8.550781 4.734375 -8.609375 4.328125 -8.609375 C 3.628906 -8.609375 3.085938 -8.472656 2.703125 -8.203125 C 2.328125 -7.929688 2.140625 -7.546875 2.140625 -7.046875 C 2.140625 -6.628906 2.265625 -6.3125 2.515625 -6.09375 C 2.773438 -5.882812 3.253906 -5.710938 3.953125 -5.578125 L 4.734375 -5.421875 C 5.679688 -5.234375 6.382812 -4.910156 6.84375 -4.453125 C 7.300781 -3.992188 7.53125 -3.378906 7.53125 -2.609375 C 7.53125 -1.691406 7.222656 -0.992188 6.609375 -0.515625 C 5.992188 -0.046875 5.085938 0.1875 3.890625 0.1875 C 3.441406 0.1875 2.960938 0.132812 2.453125 0.03125 C 1.953125 -0.0703125 1.429688 -0.222656 0.890625 -0.421875 L 0.890625 -1.734375 C 1.410156 -1.441406 1.921875 -1.222656 2.421875 -1.078125 C 2.921875 -0.929688 3.410156 -0.859375 3.890625 -0.859375 C 4.628906 -0.859375 5.195312 -1 5.59375 -1.28125 C 5.988281 -1.570312 6.1875 -1.984375 6.1875 -2.515625 C 6.1875 -2.984375 6.039062 -3.347656 5.75 -3.609375 C 5.46875 -3.867188 5.003906 -4.066406 4.359375 -4.203125 L 3.578125 -4.359375 C 2.617188 -4.546875 1.925781 -4.84375 1.5 -5.25 C 1.070312 -5.65625 0.859375 -6.21875 0.859375 -6.9375 C 0.859375 -7.78125 1.148438 -8.441406 1.734375 -8.921875 C 2.328125 -9.410156 3.144531 -9.65625 4.1875 -9.65625 C 4.625 -9.65625 5.070312 -9.613281 5.53125 -9.53125 C 6 -9.445312 6.472656 -9.328125 6.953125 -9.171875 Z M 6.953125 -9.171875 "/>
+</symbol>
+<symbol overflow="visible" id="glyph2-20">
+<path style="stroke:none;" d="M 4.1875 0.65625 C 3.851562 1.507812 3.53125 2.0625 3.21875 2.3125 C 2.90625 2.570312 2.488281 2.703125 1.96875 2.703125 L 1.03125 2.703125 L 1.03125 1.734375 L 1.71875 1.734375 C 2.039062 1.734375 2.289062 1.65625 2.46875 1.5 C 2.644531 1.34375 2.835938 0.984375 3.046875 0.421875 L 3.265625 -0.109375 L 0.390625 -7.109375 L 1.625 -7.109375 L 3.84375 -1.546875 L 6.0625 -7.109375 L 7.3125 -7.109375 Z M 4.1875 0.65625 "/>
+</symbol>
+</g>
+</defs>
+<g id="surface268880">
+<rect x="0" y="0" width="774" height="152" style="fill:rgb(100%,100%,100%);fill-opacity:1;stroke:none;"/>
+<path style="fill-rule:evenodd;fill:rgb(100%,100%,100%);fill-opacity:1;stroke-width:0.1;stroke-linecap:butt;stroke-linejoin:miter;stroke:rgb(0%,0%,0%);stroke-opacity:1;stroke-miterlimit:10;" d="M 21.75297 10.408118 L 26.433829 10.408118 L 26.433829 12.281165 L 21.75297 12.281165 Z M 21.75297 10.408118 " transform="matrix(20,0,0,20,-434.059401,-172.47877)"/>
+<path style="fill-rule:evenodd;fill:rgb(100%,100%,100%);fill-opacity:1;stroke-width:0.1;stroke-linecap:butt;stroke-linejoin:miter;stroke:rgb(0%,0%,0%);stroke-opacity:1;stroke-miterlimit:10;" d="M 29.079728 10.51222 L 32.829728 10.51222 L 32.829728 12.149915 L 29.079728 12.149915 Z M 29.079728 10.51222 " transform="matrix(20,0,0,20,-434.059401,-172.47877)"/>
+<g style="fill:rgb(0%,0%,0%);fill-opacity:1;">
+ <use xlink:href="#glyph0-1" x="20.171875" y="57.705621"/>
+ <use xlink:href="#glyph0-2" x="30.171875" y="57.705621"/>
+ <use xlink:href="#glyph0-3" x="40.171875" y="57.705621"/>
+ <use xlink:href="#glyph0-4" x="50.171875" y="57.705621"/>
+ <use xlink:href="#glyph0-5" x="60.171875" y="57.705621"/>
+ <use xlink:href="#glyph0-6" x="70.171875" y="57.705621"/>
+</g>
+<g style="fill:rgb(0%,0%,0%);fill-opacity:1;">
+ <use xlink:href="#glyph0-7" x="174.203125" y="60.053277"/>
+ <use xlink:href="#glyph0-8" x="184.203125" y="60.053277"/>
+</g>
+<path style="fill-rule:evenodd;fill:rgb(100%,100%,100%);fill-opacity:1;stroke-width:0.1;stroke-linecap:butt;stroke-linejoin:miter;stroke:rgb(0%,0%,0%);stroke-opacity:1;stroke-miterlimit:10;" d="M 40.925236 10.544446 L 44.675236 10.544446 L 44.675236 12.090345 L 40.925236 12.090345 Z M 40.925236 10.544446 " transform="matrix(20,0,0,20,-434.059401,-172.47877)"/>
+<path style="fill-rule:evenodd;fill:rgb(100%,100%,100%);fill-opacity:1;stroke-width:0.1;stroke-linecap:butt;stroke-linejoin:miter;stroke:rgb(0%,0%,0%);stroke-opacity:1;stroke-miterlimit:10;" d="M 34.883439 10.536634 L 38.633439 10.536634 L 38.633439 12.120032 L 34.883439 12.120032 Z M 34.883439 10.536634 " transform="matrix(20,0,0,20,-434.059401,-172.47877)"/>
+<path style="fill-rule:evenodd;fill:rgb(100%,100%,100%);fill-opacity:1;stroke-width:0.1;stroke-linecap:butt;stroke-linejoin:miter;stroke:rgb(0%,0%,0%);stroke-opacity:1;stroke-miterlimit:10;" d="M 47.084806 10.484876 L 52.045743 10.484876 L 52.045743 12.130774 L 47.084806 12.130774 Z M 47.084806 10.484876 " transform="matrix(20,0,0,20,-434.059401,-172.47877)"/>
+<path style="fill-rule:evenodd;fill:rgb(100%,100%,100%);fill-opacity:1;stroke-width:0.1;stroke-linecap:butt;stroke-linejoin:miter;stroke:rgb(0%,0%,0%);stroke-opacity:1;stroke-miterlimit:10;" d="M 53.980118 10.376868 L 59.866642 10.376868 L 59.866642 12.279603 L 53.980118 12.279603 Z M 53.980118 10.376868 " transform="matrix(20,0,0,20,-434.059401,-172.47877)"/>
+<path style="fill-rule:evenodd;fill:rgb(100%,100%,100%);fill-opacity:1;stroke-width:0.1;stroke-linecap:butt;stroke-linejoin:miter;stroke:rgb(0%,0%,0%);stroke-opacity:1;stroke-miterlimit:10;" d="M 54.048478 13.825501 L 59.868009 13.825501 L 59.868009 15.490345 L 54.048478 15.490345 Z M 54.048478 13.825501 " transform="matrix(20,0,0,20,-434.059401,-172.47877)"/>
+<path style="fill:none;stroke-width:0.1;stroke-linecap:butt;stroke-linejoin:miter;stroke:rgb(0%,0%,0%);stroke-opacity:1;stroke-miterlimit:10;" d="M 26.481876 11.338001 L 28.593009 11.332337 " transform="matrix(20,0,0,20,-434.059401,-172.47877)"/>
+<path style="fill-rule:evenodd;fill:rgb(0%,0%,0%);fill-opacity:1;stroke-width:0.1;stroke-linecap:butt;stroke-linejoin:miter;stroke:rgb(0%,0%,0%);stroke-opacity:1;stroke-miterlimit:10;" d="M 28.968009 11.33136 L 28.468595 11.582728 L 28.593009 11.332337 L 28.467228 11.082728 Z M 28.968009 11.33136 " transform="matrix(20,0,0,20,-434.059401,-172.47877)"/>
+<path style="fill:none;stroke-width:0.1;stroke-linecap:butt;stroke-linejoin:miter;stroke:rgb(0%,0%,0%);stroke-opacity:1;stroke-miterlimit:10;" d="M 32.876798 11.329798 L 34.396525 11.328626 " transform="matrix(20,0,0,20,-434.059401,-172.47877)"/>
+<path style="fill-rule:evenodd;fill:rgb(0%,0%,0%);fill-opacity:1;stroke-width:0.1;stroke-linecap:butt;stroke-linejoin:miter;stroke:rgb(0%,0%,0%);stroke-opacity:1;stroke-miterlimit:10;" d="M 34.771525 11.328431 L 34.27172 11.578821 L 34.396525 11.328626 L 34.271329 11.078821 Z M 34.771525 11.328431 " transform="matrix(20,0,0,20,-434.059401,-172.47877)"/>
+<path style="fill:none;stroke-width:0.1;stroke-linecap:butt;stroke-linejoin:miter;stroke:rgb(0%,0%,0%);stroke-opacity:1;stroke-miterlimit:10;" d="M 38.633439 11.328431 L 40.438517 11.319642 " transform="matrix(20,0,0,20,-434.059401,-172.47877)"/>
+<path style="fill-rule:evenodd;fill:rgb(0%,0%,0%);fill-opacity:1;stroke-width:0.1;stroke-linecap:butt;stroke-linejoin:miter;stroke:rgb(0%,0%,0%);stroke-opacity:1;stroke-miterlimit:10;" d="M 40.813517 11.317884 L 40.314689 11.570228 L 40.438517 11.319642 L 40.312345 11.070423 Z M 40.813517 11.317884 " transform="matrix(20,0,0,20,-434.059401,-172.47877)"/>
+<path style="fill:none;stroke-width:0.1;stroke-linecap:butt;stroke-linejoin:miter;stroke:rgb(0%,0%,0%);stroke-opacity:1;stroke-miterlimit:10;" d="M 44.675236 11.317298 L 46.597892 11.309876 " transform="matrix(20,0,0,20,-434.059401,-172.47877)"/>
+<path style="fill-rule:evenodd;fill:rgb(0%,0%,0%);fill-opacity:1;stroke-width:0.1;stroke-linecap:butt;stroke-linejoin:miter;stroke:rgb(0%,0%,0%);stroke-opacity:1;stroke-miterlimit:10;" d="M 46.972892 11.308313 L 46.473868 11.560267 L 46.597892 11.309876 L 46.471915 11.060267 Z M 46.972892 11.308313 " transform="matrix(20,0,0,20,-434.059401,-172.47877)"/>
+<path style="fill:none;stroke-width:0.1;stroke-linecap:butt;stroke-linejoin:miter;stroke:rgb(0%,0%,0%);stroke-opacity:1;stroke-miterlimit:10;" d="M 52.045743 11.307923 L 53.4934 11.323157 " transform="matrix(20,0,0,20,-434.059401,-172.47877)"/>
+<path style="fill-rule:evenodd;fill:rgb(0%,0%,0%);fill-opacity:1;stroke-width:0.1;stroke-linecap:butt;stroke-linejoin:miter;stroke:rgb(0%,0%,0%);stroke-opacity:1;stroke-miterlimit:10;" d="M 53.8684 11.327063 L 53.365861 11.57179 L 53.4934 11.323157 L 53.370939 11.07179 Z M 53.8684 11.327063 " transform="matrix(20,0,0,20,-434.059401,-172.47877)"/>
+<g style="fill:rgb(0%,0%,0%);fill-opacity:1;">
+ <use xlink:href="#glyph0-9" x="286.757813" y="60.611871"/>
+ <use xlink:href="#glyph0-10" x="296.757813" y="60.611871"/>
+ <use xlink:href="#glyph0-1" x="306.757813" y="60.611871"/>
+</g>
+<g style="fill:rgb(0%,0%,0%);fill-opacity:1;">
+ <use xlink:href="#glyph0-11" x="405.660156" y="59.904839"/>
+ <use xlink:href="#glyph0-10" x="415.660156" y="59.904839"/>
+ <use xlink:href="#glyph0-12" x="425.660156" y="59.904839"/>
+</g>
+<g style="fill:rgb(0%,0%,0%);fill-opacity:1;">
+ <use xlink:href="#glyph1-1" x="511.308594" y="58.064616"/>
+ <use xlink:href="#glyph1-2" x="517.308757" y="58.064616"/>
+ <use xlink:href="#glyph1-3" x="523.308919" y="58.064616"/>
+ <use xlink:href="#glyph1-4" x="529.309082" y="58.064616"/>
+ <use xlink:href="#glyph1-5" x="535.309245" y="58.064616"/>
+ <use xlink:href="#glyph1-6" x="541.309408" y="58.064616"/>
+ <use xlink:href="#glyph1-7" x="547.30957" y="58.064616"/>
+ <use xlink:href="#glyph1-8" x="553.309733" y="58.064616"/>
+ <use xlink:href="#glyph1-9" x="559.309896" y="58.064616"/>
+ <use xlink:href="#glyph1-10" x="565.310059" y="58.064616"/>
+ <use xlink:href="#glyph1-11" x="571.310221" y="58.064616"/>
+ <use xlink:href="#glyph1-12" x="577.310384" y="58.064616"/>
+ <use xlink:href="#glyph1-13" x="583.310547" y="58.064616"/>
+ <use xlink:href="#glyph1-8" x="589.31071" y="58.064616"/>
+ <use xlink:href="#glyph1-14" x="595.310872" y="58.064616"/>
+</g>
+<path style="fill:none;stroke-width:0.1;stroke-linecap:butt;stroke-linejoin:miter;stroke:rgb(0%,0%,0%);stroke-opacity:1;stroke-miterlimit:10;" d="M 45.671915 11.342298 L 45.655704 11.342298 L 45.655704 14.657923 L 53.561759 14.657923 " transform="matrix(20,0,0,20,-434.059401,-172.47877)"/>
+<path style="fill-rule:evenodd;fill:rgb(0%,0%,0%);fill-opacity:1;stroke-width:0.1;stroke-linecap:butt;stroke-linejoin:miter;stroke:rgb(0%,0%,0%);stroke-opacity:1;stroke-miterlimit:10;" d="M 53.936759 14.657923 L 53.436759 14.907923 L 53.561759 14.657923 L 53.436759 14.407923 Z M 53.936759 14.657923 " transform="matrix(20,0,0,20,-434.059401,-172.47877)"/>
+<g style="fill:rgb(0%,0%,0%);fill-opacity:1;">
+ <use xlink:href="#glyph1-15" x="657.078125" y="57.724772"/>
+ <use xlink:href="#glyph1-16" x="663.078288" y="57.724772"/>
+ <use xlink:href="#glyph1-10" x="669.078451" y="57.724772"/>
+ <use xlink:href="#glyph1-6" x="675.078613" y="57.724772"/>
+ <use xlink:href="#glyph1-8" x="681.078776" y="57.724772"/>
+ <use xlink:href="#glyph1-17" x="687.078939" y="57.724772"/>
+ <use xlink:href="#glyph1-11" x="693.079102" y="57.724772"/>
+ <use xlink:href="#glyph1-18" x="699.079264" y="57.724772"/>
+ <use xlink:href="#glyph1-19" x="705.079427" y="57.724772"/>
+ <use xlink:href="#glyph1-4" x="711.07959" y="57.724772"/>
+ <use xlink:href="#glyph1-20" x="717.079753" y="57.724772"/>
+ <use xlink:href="#glyph1-21" x="723.079915" y="57.724772"/>
+ <use xlink:href="#glyph1-22" x="729.080078" y="57.724772"/>
+ <use xlink:href="#glyph1-23" x="735.080241" y="57.724772"/>
+ <use xlink:href="#glyph1-21" x="741.080404" y="57.724772"/>
+ <use xlink:href="#glyph1-22" x="747.080566" y="57.724772"/>
+</g>
+<g style="fill:rgb(0%,0%,0%);fill-opacity:1;">
+ <use xlink:href="#glyph1-24" x="673.335938" y="124.170085"/>
+ <use xlink:href="#glyph1-11" x="679.3361" y="124.170085"/>
+ <use xlink:href="#glyph1-13" x="685.336263" y="124.170085"/>
+ <use xlink:href="#glyph1-8" x="691.336426" y="124.170085"/>
+ <use xlink:href="#glyph1-4" x="697.336589" y="124.170085"/>
+ <use xlink:href="#glyph1-20" x="703.336751" y="124.170085"/>
+ <use xlink:href="#glyph1-21" x="709.336914" y="124.170085"/>
+ <use xlink:href="#glyph1-22" x="715.337077" y="124.170085"/>
+ <use xlink:href="#glyph1-23" x="721.33724" y="124.170085"/>
+ <use xlink:href="#glyph1-21" x="727.337402" y="124.170085"/>
+ <use xlink:href="#glyph1-22" x="733.337565" y="124.170085"/>
+</g>
+<g style="fill:rgb(0%,0%,0%);fill-opacity:1;">
+ <use xlink:href="#glyph2-1" x="168.71875" y="31.959093"/>
+ <use xlink:href="#glyph2-2" x="175.866102" y="31.959093"/>
+ <use xlink:href="#glyph2-3" x="180.92551" y="31.959093"/>
+ <use xlink:href="#glyph2-4" x="188.879069" y="31.959093"/>
+</g>
+<g style="fill:rgb(0%,0%,0%);fill-opacity:1;">
+ <use xlink:href="#glyph2-5" x="288.109375" y="31.681749"/>
+ <use xlink:href="#glyph2-1" x="294.882378" y="31.681749"/>
+ <use xlink:href="#glyph2-6" x="302.029731" y="31.681749"/>
+ <use xlink:href="#glyph2-7" x="309.996039" y="31.681749"/>
+ <use xlink:href="#glyph2-8" x="313.607964" y="31.681749"/>
+</g>
+<g style="fill:rgb(0%,0%,0%);fill-opacity:1;">
+ <use xlink:href="#glyph2-5" x="535.988281" y="33.365343"/>
+ <use xlink:href="#glyph2-1" x="542.761285" y="33.365343"/>
+ <use xlink:href="#glyph2-6" x="549.908637" y="33.365343"/>
+ <use xlink:href="#glyph2-7" x="557.874946" y="33.365343"/>
+ <use xlink:href="#glyph2-8" x="561.486871" y="33.365343"/>
+</g>
+<g style="fill:rgb(0%,0%,0%);fill-opacity:1;">
+ <use xlink:href="#glyph2-9" x="26.695313" y="32.365343"/>
+ <use xlink:href="#glyph2-10" x="34.947266" y="32.365343"/>
+ <use xlink:href="#glyph2-11" x="38.559191" y="32.365343"/>
+ <use xlink:href="#glyph2-11" x="46.798394" y="32.365343"/>
+ <use xlink:href="#glyph2-10" x="55.037598" y="32.365343"/>
+ <use xlink:href="#glyph2-11" x="58.649523" y="32.365343"/>
+ <use xlink:href="#glyph2-12" x="66.888726" y="32.365343"/>
+</g>
+<path style="fill:none;stroke-width:0.1;stroke-linecap:butt;stroke-linejoin:miter;stroke:rgb(0%,0%,0%);stroke-opacity:1;stroke-dasharray:0.14,0.14;stroke-miterlimit:10;" d="M 45.300431 9.486438 L 60.373478 9.486438 L 60.373478 16.175696 L 45.300431 16.175696 Z M 45.300431 9.486438 " transform="matrix(20,0,0,20,-434.059401,-172.47877)"/>
+<g style="fill:rgb(0%,0%,0%);fill-opacity:1;">
+ <use xlink:href="#glyph2-13" x="532.003906" y="11.904405"/>
+ <use xlink:href="#glyph2-14" x="542.236382" y="11.904405"/>
+ <use xlink:href="#glyph2-15" x="550.475586" y="11.904405"/>
+ <use xlink:href="#glyph2-4" x="555.5727" y="11.904405"/>
+ <use xlink:href="#glyph2-14" x="563.824653" y="11.904405"/>
+ <use xlink:href="#glyph2-15" x="572.063856" y="11.904405"/>
+ <use xlink:href="#glyph2-16" x="577.16097" y="11.904405"/>
+ <use xlink:href="#glyph2-17" x="581.293186" y="11.904405"/>
+ <use xlink:href="#glyph2-3" x="588.307346" y="11.904405"/>
+ <use xlink:href="#glyph2-2" x="596.260905" y="11.904405"/>
+ <use xlink:href="#glyph2-18" x="601.377279" y="11.904405"/>
+ <use xlink:href="#glyph2-6" x="614.040853" y="11.904405"/>
+ <use xlink:href="#glyph2-15" x="622.007161" y="11.904405"/>
+ <use xlink:href="#glyph2-15" x="627.104275" y="11.904405"/>
+ <use xlink:href="#glyph2-8" x="632.201389" y="11.904405"/>
+ <use xlink:href="#glyph2-2" x="640.199436" y="11.904405"/>
+ <use xlink:href="#glyph2-16" x="645.544217" y="11.904405"/>
+ <use xlink:href="#glyph2-19" x="649.676432" y="11.904405"/>
+ <use xlink:href="#glyph2-20" x="657.928385" y="11.904405"/>
+ <use xlink:href="#glyph2-5" x="665.621799" y="11.904405"/>
+ <use xlink:href="#glyph2-15" x="672.394803" y="11.904405"/>
+ <use xlink:href="#glyph2-8" x="677.491916" y="11.904405"/>
+ <use xlink:href="#glyph2-18" x="685.489963" y="11.904405"/>
+</g>
+</g>
+</svg>
diff --git a/Documentation/media/v4l-drivers/vimc.rst b/Documentation/media/v4l-drivers/vimc.rst
index 406417680db5..8f5d7f8d83bb 100644
--- a/Documentation/media/v4l-drivers/vimc.rst
+++ b/Documentation/media/v4l-drivers/vimc.rst
@@ -76,27 +76,19 @@ vimc-capture:
* 1 Pad sink
* 1 Pad source
-Module options
----------------
-Vimc has a few module parameters to configure the driver. You should pass
-those arguments to each subdevice, not to the vimc module. For example::
+Module options
+--------------
- vimc_subdevice.param=value
+Vimc has a module parameter to configure the driver.
-* ``vimc_scaler.sca_mult=<unsigned int>``
+* ``sca_mult=<unsigned int>``
Image size multiplier factor to be used to multiply both width and
height, so the image size will be ``sca_mult^2`` bigger than the
original one. Currently, only supports scaling up (the default value
is 3).
-* ``vimc_debayer.deb_mean_win_size=<unsigned int>``
-
- Window size to calculate the mean. Note: the window size needs to be an
- odd number, as the main pixel stays in the center of the window,
- otherwise the next odd number is considered (the default value is 3).
-
Source code documentation
-------------------------
diff --git a/Documentation/media/videodev2.h.rst.exceptions b/Documentation/media/videodev2.h.rst.exceptions
index adeb6b7a15cb..cb6ccf91776e 100644
--- a/Documentation/media/videodev2.h.rst.exceptions
+++ b/Documentation/media/videodev2.h.rst.exceptions
@@ -141,6 +141,10 @@ replace symbol V4L2_CTRL_TYPE_H264_PPS :c:type:`v4l2_ctrl_type`
replace symbol V4L2_CTRL_TYPE_H264_SCALING_MATRIX :c:type:`v4l2_ctrl_type`
replace symbol V4L2_CTRL_TYPE_H264_SLICE_PARAMS :c:type:`v4l2_ctrl_type`
replace symbol V4L2_CTRL_TYPE_H264_DECODE_PARAMS :c:type:`v4l2_ctrl_type`
+replace symbol V4L2_CTRL_TYPE_HEVC_SPS :c:type:`v4l2_ctrl_type`
+replace symbol V4L2_CTRL_TYPE_HEVC_PPS :c:type:`v4l2_ctrl_type`
+replace symbol V4L2_CTRL_TYPE_HEVC_SLICE_PARAMS :c:type:`v4l2_ctrl_type`
+replace symbol V4L2_CTRL_TYPE_AREA :c:type:`v4l2_ctrl_type`
# V4L2 capability defines
replace define V4L2_CAP_VIDEO_CAPTURE device-capabilities
@@ -434,6 +438,7 @@ replace define V4L2_DEC_CMD_START decoder-cmds
replace define V4L2_DEC_CMD_STOP decoder-cmds
replace define V4L2_DEC_CMD_PAUSE decoder-cmds
replace define V4L2_DEC_CMD_RESUME decoder-cmds
+replace define V4L2_DEC_CMD_FLUSH decoder-cmds
replace define V4L2_DEC_CMD_START_MUTE_AUDIO decoder-cmds
replace define V4L2_DEC_CMD_PAUSE_TO_BLACK decoder-cmds
diff --git a/Documentation/memory-barriers.txt b/Documentation/memory-barriers.txt
index 1adbb8a371c7..ec3b5865c1be 100644
--- a/Documentation/memory-barriers.txt
+++ b/Documentation/memory-barriers.txt
@@ -63,7 +63,6 @@ CONTENTS
- Compiler barrier.
- CPU memory barriers.
- - MMIO write barrier.
(*) Implicit kernel memory barriers.
@@ -75,7 +74,6 @@ CONTENTS
(*) Inter-CPU acquiring barrier effects.
- Acquires vs memory accesses.
- - Acquires vs I/O accesses.
(*) Where are memory barriers needed?
@@ -492,10 +490,9 @@ And a couple of implicit varieties:
happen before it completes.
The use of ACQUIRE and RELEASE operations generally precludes the need
- for other sorts of memory barrier (but note the exceptions mentioned in
- the subsection "MMIO write barrier"). In addition, a RELEASE+ACQUIRE
- pair is -not- guaranteed to act as a full memory barrier. However, after
- an ACQUIRE on a given variable, all memory accesses preceding any prior
+ for other sorts of memory barrier. In addition, a RELEASE+ACQUIRE pair is
+ -not- guaranteed to act as a full memory barrier. However, after an
+ ACQUIRE on a given variable, all memory accesses preceding any prior
RELEASE on that same variable are guaranteed to be visible. In other
words, within a given variable's critical section, all accesses of all
previous critical sections for that variable are guaranteed to have
@@ -1512,8 +1509,6 @@ levels:
(*) CPU memory barriers.
- (*) MMIO write barrier.
-
COMPILER BARRIER
----------------
diff --git a/Documentation/mips/ingenic-tcu.rst b/Documentation/mips/ingenic-tcu.rst
index c4ef4c45aade..c5a646b14450 100644
--- a/Documentation/mips/ingenic-tcu.rst
+++ b/Documentation/mips/ingenic-tcu.rst
@@ -68,4 +68,4 @@ and frameworks can be controlled from the same registers, all of these
drivers access their registers through the same regmap.
For more information regarding the devicetree bindings of the TCU drivers,
-have a look at Documentation/devicetree/bindings/mfd/ingenic,tcu.txt.
+have a look at Documentation/devicetree/bindings/timer/ingenic,tcu.txt.
diff --git a/Documentation/misc-devices/xilinx_sdfec.rst b/Documentation/misc-devices/xilinx_sdfec.rst
new file mode 100644
index 000000000000..2245fcfa224d
--- /dev/null
+++ b/Documentation/misc-devices/xilinx_sdfec.rst
@@ -0,0 +1,291 @@
+.. SPDX-License-Identifier: GPL-2.0+
+====================
+Xilinx SD-FEC Driver
+====================
+
+Overview
+========
+
+This driver supports SD-FEC Integrated Block for Zynq |Ultrascale+ (TM)| RFSoCs.
+
+.. |Ultrascale+ (TM)| unicode:: Ultrascale+ U+2122
+ .. with trademark sign
+
+For a full description of SD-FEC core features, see the `SD-FEC Product Guide (PG256) <https://www.xilinx.com/cgi-bin/docs/ipdoc?c=sd_fec;v=latest;d=pg256-sdfec-integrated-block.pdf>`_
+
+This driver supports the following features:
+
+ - Retrieval of the Integrated Block configuration and status information
+ - Configuration of LDPC codes
+ - Configuration of Turbo decoding
+ - Monitoring errors
+
+Missing features, known issues, and limitations of the SD-FEC driver are as
+follows:
+
+ - Only allows a single open file handler to any instance of the driver at any time
+ - Reset of the SD-FEC Integrated Block is not controlled by this driver
+ - Does not support shared LDPC code table wraparound
+
+The device tree entry is described in:
+`linux-xlnx/Documentation/devicetree/bindings/misc/xlnx,sd-fec.txt <https://github.com/Xilinx/linux-xlnx/blob/master/Documentation/devicetree/bindings/misc/xlnx%2Csd-fec.txt>`_
+
+
+Modes of Operation
+------------------
+
+The driver works with the SD-FEC core in two modes of operation:
+
+ - Run-time configuration
+ - Programmable Logic (PL) initialization
+
+
+Run-time Configuration
+~~~~~~~~~~~~~~~~~~~~~~
+
+For Run-time configuration the role of driver is to allow the software application to do the following:
+
+ - Load the configuration parameters for either Turbo decode or LDPC encode or decode
+ - Activate the SD-FEC core
+ - Monitor the SD-FEC core for errors
+ - Retrieve the status and configuration of the SD-FEC core
+
+Programmable Logic (PL) Initialization
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+For PL initialization, supporting logic loads configuration parameters for either
+the Turbo decode or LDPC encode or decode. The role of the driver is to allow
+the software application to do the following:
+
+ - Activate the SD-FEC core
+ - Monitor the SD-FEC core for errors
+ - Retrieve the status and configuration of the SD-FEC core
+
+
+Driver Structure
+================
+
+The driver provides a platform device where the ``probe`` and ``remove``
+operations are provided.
+
+ - probe: Updates configuration register with device-tree entries plus determines the current activate state of the core, for example, is the core bypassed or has the core been started.
+
+
+The driver defines the following driver file operations to provide user
+application interfaces:
+
+ - open: Implements restriction that only a single file descriptor can be open per SD-FEC instance at any time
+ - release: Allows another file descriptor to be open, that is after current file descriptor is closed
+ - poll: Provides a method to monitor for SD-FEC Error events
+ - unlocked_ioctl: Provides the the following ioctl commands that allows the application configure the SD-FEC core:
+
+ - :c:macro:`XSDFEC_START_DEV`
+ - :c:macro:`XSDFEC_STOP_DEV`
+ - :c:macro:`XSDFEC_GET_STATUS`
+ - :c:macro:`XSDFEC_SET_IRQ`
+ - :c:macro:`XSDFEC_SET_TURBO`
+ - :c:macro:`XSDFEC_ADD_LDPC_CODE_PARAMS`
+ - :c:macro:`XSDFEC_GET_CONFIG`
+ - :c:macro:`XSDFEC_SET_ORDER`
+ - :c:macro:`XSDFEC_SET_BYPASS`
+ - :c:macro:`XSDFEC_IS_ACTIVE`
+ - :c:macro:`XSDFEC_CLEAR_STATS`
+ - :c:macro:`XSDFEC_SET_DEFAULT_CONFIG`
+
+
+Driver Usage
+============
+
+
+Overview
+--------
+
+After opening the driver, the user should find out what operations need to be
+performed to configure and activate the SD-FEC core and determine the
+configuration of the driver.
+The following outlines the flow the user should perform:
+
+ - Determine Configuration
+ - Set the order, if not already configured as desired
+ - Set Turbo decode, LPDC encode or decode parameters, depending on how the
+ SD-FEC core is configured plus if the SD-FEC has not been configured for PL
+ initialization
+ - Enable interrupts, if not already enabled
+ - Bypass the SD-FEC core, if required
+ - Start the SD-FEC core if not already started
+ - Get the SD-FEC core status
+ - Monitor for interrupts
+ - Stop the SD-FEC core
+
+
+Note: When monitoring for interrupts if a critical error is detected where a reset is required, the driver will be required to load the default configuration.
+
+
+Determine Configuration
+-----------------------
+
+Determine the configuration of the SD-FEC core by using the ioctl
+:c:macro:`XSDFEC_GET_CONFIG`.
+
+Set the Order
+-------------
+
+Setting the order determines how the order of Blocks can change from input to output.
+
+Setting the order is done by using the ioctl :c:macro:`XSDFEC_SET_ORDER`
+
+Setting the order can only be done if the following restrictions are met:
+
+ - The ``state`` member of struct :c:type:`xsdfec_status <xsdfec_status>` filled by the ioctl :c:macro:`XSDFEC_GET_STATUS` indicates the SD-FEC core has not STARTED
+
+
+Add LDPC Codes
+--------------
+
+The following steps indicate how to add LDPC codes to the SD-FEC core:
+
+ - Use the auto-generated parameters to fill the :c:type:`struct xsdfec_ldpc_params <xsdfec_ldpc_params>` for the desired LDPC code.
+ - Set the SC, QA, and LA table offsets for the LPDC parameters and the parameters in the structure :c:type:`struct xsdfec_ldpc_params <xsdfec_ldpc_params>`
+ - Set the desired Code Id value in the structure :c:type:`struct xsdfec_ldpc_params <xsdfec_ldpc_params>`
+ - Add the LPDC Code Parameters using the ioctl :c:macro:`XSDFEC_ADD_LDPC_CODE_PARAMS`
+ - For the applied LPDC Code Parameter use the function :c:func:`xsdfec_calculate_shared_ldpc_table_entry_size` to calculate the size of shared LPDC code tables. This allows the user to determine the shared table usage so when selecting the table offsets for the next LDPC code parameters unused table areas can be selected.
+ - Repeat for each LDPC code parameter.
+
+Adding LDPC codes can only be done if the following restrictions are met:
+
+ - The ``code`` member of :c:type:`struct xsdfec_config <xsdfec_config>` filled by the ioctl :c:macro:`XSDFEC_GET_CONFIG` indicates the SD-FEC core is configured as LDPC
+ - The ``code_wr_protect`` of :c:type:`struct xsdfec_config <xsdfec_config>` filled by the ioctl :c:macro:`XSDFEC_GET_CONFIG` indicates that write protection is not enabled
+ - The ``state`` member of struct :c:type:`xsdfec_status <xsdfec_status>` filled by the ioctl :c:macro:`XSDFEC_GET_STATUS` indicates the SD-FEC core has not started
+
+Set Turbo Decode
+----------------
+
+Configuring the Turbo decode parameters is done by using the ioctl :c:macro:`XSDFEC_SET_TURBO` using auto-generated parameters to fill the :c:type:`struct xsdfec_turbo <xsdfec_turbo>` for the desired Turbo code.
+
+Adding Turbo decode can only be done if the following restrictions are met:
+
+ - The ``code`` member of :c:type:`struct xsdfec_config <xsdfec_config>` filled by the ioctl :c:macro:`XSDFEC_GET_CONFIG` indicates the SD-FEC core is configured as TURBO
+ - The ``state`` member of struct :c:type:`xsdfec_status <xsdfec_status>` filled by the ioctl :c:macro:`XSDFEC_GET_STATUS` indicates the SD-FEC core has not STARTED
+
+Enable Interrupts
+-----------------
+
+Enabling or disabling interrupts is done by using the ioctl :c:macro:`XSDFEC_SET_IRQ`. The members of the parameter passed, :c:type:`struct xsdfec_irq <xsdfec_irq>`, to the ioctl are used to set and clear different categories of interrupts. The category of interrupt is controlled as following:
+
+ - ``enable_isr`` controls the ``tlast`` interrupts
+ - ``enable_ecc_isr`` controls the ECC interrupts
+
+If the ``code`` member of :c:type:`struct xsdfec_config <xsdfec_config>` filled by the ioctl :c:macro:`XSDFEC_GET_CONFIG` indicates the SD-FEC core is configured as TURBO then the enabling ECC errors is not required.
+
+Bypass the SD-FEC
+-----------------
+
+Bypassing the SD-FEC is done by using the ioctl :c:macro:`XSDFEC_SET_BYPASS`
+
+Bypassing the SD-FEC can only be done if the following restrictions are met:
+
+ - The ``state`` member of :c:type:`struct xsdfec_status <xsdfec_status>` filled by the ioctl :c:macro:`XSDFEC_GET_STATUS` indicates the SD-FEC core has not STARTED
+
+Start the SD-FEC core
+---------------------
+
+Start the SD-FEC core by using the ioctl :c:macro:`XSDFEC_START_DEV`
+
+Get SD-FEC Status
+-----------------
+
+Get the SD-FEC status of the device by using the ioctl :c:macro:`XSDFEC_GET_STATUS`, which will fill the :c:type:`struct xsdfec_status <xsdfec_status>`
+
+Monitor for Interrupts
+----------------------
+
+ - Use the poll system call to monitor for an interrupt. The poll system call waits for an interrupt to wake it up or times out if no interrupt occurs.
+ - On return Poll ``revents`` will indicate whether stats and/or state have been updated
+ - ``POLLPRI`` indicates a critical error and the user should use :c:macro:`XSDFEC_GET_STATUS` and :c:macro:`XSDFEC_GET_STATS` to confirm
+ - ``POLLRDNORM`` indicates a non-critical error has occurred and the user should use :c:macro:`XSDFEC_GET_STATS` to confirm
+ - Get stats by using the ioctl :c:macro:`XSDFEC_GET_STATS`
+ - For critical error the ``isr_err_count`` or ``uecc_count`` member of :c:type:`struct xsdfec_stats <xsdfec_stats>` is non-zero
+ - For non-critical errors the ``cecc_count`` member of :c:type:`struct xsdfec_stats <xsdfec_stats>` is non-zero
+ - Get state by using the ioctl :c:macro:`XSDFEC_GET_STATUS`
+ - For a critical error the ``state`` of :c:type:`xsdfec_status <xsdfec_status>` will indicate a Reset Is Required
+ - Clear stats by using the ioctl :c:macro:`XSDFEC_CLEAR_STATS`
+
+If a critical error is detected where a reset is required. The application is required to call the ioctl :c:macro:`XSDFEC_SET_DEFAULT_CONFIG`, after the reset and it is not required to call the ioctl :c:macro:`XSDFEC_STOP_DEV`
+
+Note: Using poll system call prevents busy looping using :c:macro:`XSDFEC_GET_STATS` and :c:macro:`XSDFEC_GET_STATUS`
+
+Stop the SD-FEC Core
+---------------------
+
+Stop the device by using the ioctl :c:macro:`XSDFEC_STOP_DEV`
+
+Set the Default Configuration
+-----------------------------
+
+Load default configuration by using the ioctl :c:macro:`XSDFEC_SET_DEFAULT_CONFIG` to restore the driver.
+
+Limitations
+-----------
+
+Users should not duplicate SD-FEC device file handlers, for example fork() or dup() a process that has a created an SD-FEC file handler.
+
+Driver IOCTLs
+==============
+
+.. c:macro:: XSDFEC_START_DEV
+.. kernel-doc:: include/uapi/misc/xilinx_sdfec.h
+ :doc: XSDFEC_START_DEV
+
+.. c:macro:: XSDFEC_STOP_DEV
+.. kernel-doc:: include/uapi/misc/xilinx_sdfec.h
+ :doc: XSDFEC_STOP_DEV
+
+.. c:macro:: XSDFEC_GET_STATUS
+.. kernel-doc:: include/uapi/misc/xilinx_sdfec.h
+ :doc: XSDFEC_GET_STATUS
+
+.. c:macro:: XSDFEC_SET_IRQ
+.. kernel-doc:: include/uapi/misc/xilinx_sdfec.h
+ :doc: XSDFEC_SET_IRQ
+
+.. c:macro:: XSDFEC_SET_TURBO
+.. kernel-doc:: include/uapi/misc/xilinx_sdfec.h
+ :doc: XSDFEC_SET_TURBO
+
+.. c:macro:: XSDFEC_ADD_LDPC_CODE_PARAMS
+.. kernel-doc:: include/uapi/misc/xilinx_sdfec.h
+ :doc: XSDFEC_ADD_LDPC_CODE_PARAMS
+
+.. c:macro:: XSDFEC_GET_CONFIG
+.. kernel-doc:: include/uapi/misc/xilinx_sdfec.h
+ :doc: XSDFEC_GET_CONFIG
+
+.. c:macro:: XSDFEC_SET_ORDER
+.. kernel-doc:: include/uapi/misc/xilinx_sdfec.h
+ :doc: XSDFEC_SET_ORDER
+
+.. c:macro:: XSDFEC_SET_BYPASS
+.. kernel-doc:: include/uapi/misc/xilinx_sdfec.h
+ :doc: XSDFEC_SET_BYPASS
+
+.. c:macro:: XSDFEC_IS_ACTIVE
+.. kernel-doc:: include/uapi/misc/xilinx_sdfec.h
+ :doc: XSDFEC_IS_ACTIVE
+
+.. c:macro:: XSDFEC_CLEAR_STATS
+.. kernel-doc:: include/uapi/misc/xilinx_sdfec.h
+ :doc: XSDFEC_CLEAR_STATS
+
+.. c:macro:: XSDFEC_GET_STATS
+.. kernel-doc:: include/uapi/misc/xilinx_sdfec.h
+ :doc: XSDFEC_GET_STATS
+
+.. c:macro:: XSDFEC_SET_DEFAULT_CONFIG
+.. kernel-doc:: include/uapi/misc/xilinx_sdfec.h
+ :doc: XSDFEC_SET_DEFAULT_CONFIG
+
+Driver Type Definitions
+=======================
+
+.. kernel-doc:: include/uapi/misc/xilinx_sdfec.h
+ :internal:
diff --git a/Documentation/networking/device_drivers/mellanox/mlx5.rst b/Documentation/networking/device_drivers/mellanox/mlx5.rst
index 7599dceba9f1..f575a49790e8 100644
--- a/Documentation/networking/device_drivers/mellanox/mlx5.rst
+++ b/Documentation/networking/device_drivers/mellanox/mlx5.rst
@@ -279,7 +279,7 @@ mlx5 tracepoints
================
mlx5 driver provides internal trace points for tracking and debugging using
-kernel tracepoints interfaces (refer to Documentation/trace/ftrase.rst).
+kernel tracepoints interfaces (refer to Documentation/trace/ftrace.rst).
For the list of support mlx5 events check /sys/kernel/debug/tracing/events/mlx5/
diff --git a/Documentation/networking/devlink-trap.rst b/Documentation/networking/devlink-trap.rst
index dc9659ca06fa..03311849bfb1 100644
--- a/Documentation/networking/devlink-trap.rst
+++ b/Documentation/networking/devlink-trap.rst
@@ -233,7 +233,7 @@ help debug packet drops caused by these exceptions. The following list includes
links to the description of driver-specific traps registered by various device
drivers:
- * :doc:`/devlink-trap-netdevsim`
+ * :doc:`devlink-trap-netdevsim`
Generic Packet Trap Groups
==========================
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index 099a55bd1432..fd26788e8c96 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -904,8 +904,9 @@ ip_local_port_range - 2 INTEGERS
Defines the local port range that is used by TCP and UDP to
choose the local port. The first number is the first, the
second the last local port number.
- If possible, it is better these numbers have different parity.
- (one even and one odd values)
+ If possible, it is better these numbers have different parity
+ (one even and one odd value).
+ Must be greater than or equal to ip_unprivileged_port_start.
The default values are 32768 and 60999 respectively.
ip_local_reserved_ports - list of comma separated ranges
@@ -943,8 +944,8 @@ ip_unprivileged_port_start - INTEGER
This is a per-namespace sysctl. It defines the first
unprivileged port in the network namespace. Privileged ports
require root or CAP_NET_BIND_SERVICE in order to bind to them.
- To disable all privileged ports, set this to 0. It may not
- overlap with the ip_local_reserved_ports range.
+ To disable all privileged ports, set this to 0. They must not
+ overlap with the ip_local_port_range.
Default: 1024
diff --git a/Documentation/networking/phy.rst b/Documentation/networking/phy.rst
index cda1c0a0492a..e0a7c7af6525 100644
--- a/Documentation/networking/phy.rst
+++ b/Documentation/networking/phy.rst
@@ -73,7 +73,7 @@ The Reduced Gigabit Medium Independent Interface (RGMII) is a 12-pin
electrical signal interface using a synchronous 125Mhz clock signal and several
data lines. Due to this design decision, a 1.5ns to 2ns delay must be added
between the clock line (RXC or TXC) and the data lines to let the PHY (clock
-sink) have enough setup and hold times to sample the data lines correctly. The
+sink) have a large enough setup and hold time to sample the data lines correctly. The
PHY library offers different types of PHY_INTERFACE_MODE_RGMII* values to let
the PHY driver and optionally the MAC driver, implement the required delay. The
values of phy_interface_t must be understood from the perspective of the PHY
diff --git a/Documentation/networking/ppp_generic.txt b/Documentation/networking/ppp_generic.txt
index 61daf4b39600..fd563aff5fc9 100644
--- a/Documentation/networking/ppp_generic.txt
+++ b/Documentation/networking/ppp_generic.txt
@@ -378,6 +378,8 @@ an interface unit are:
CONFIG_PPP_FILTER option is enabled, the set of packets which reset
the transmit and receive idle timers is restricted to those which
pass the `active' packet filter.
+ Two versions of this command exist, to deal with user space
+ expecting times as either 32-bit or 64-bit time_t seconds.
* PPPIOCSMAXCID sets the maximum connection-ID parameter (and thus the
number of connection slots) for the TCP header compressor and
diff --git a/Documentation/nvdimm/maintainer-entry-profile.rst b/Documentation/nvdimm/maintainer-entry-profile.rst
new file mode 100644
index 000000000000..77081fd9be95
--- /dev/null
+++ b/Documentation/nvdimm/maintainer-entry-profile.rst
@@ -0,0 +1,59 @@
+LIBNVDIMM Maintainer Entry Profile
+==================================
+
+Overview
+--------
+The libnvdimm subsystem manages persistent memory across multiple
+architectures. The mailing list, is tracked by patchwork here:
+https://patchwork.kernel.org/project/linux-nvdimm/list/
+...and that instance is configured to give feedback to submitters on
+patch acceptance and upstream merge. Patches are merged to either the
+'libnvdimm-fixes', or 'libnvdimm-for-next' branch. Those branches are
+available here:
+https://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm.git/
+
+In general patches can be submitted against the latest -rc, however if
+the incoming code change is dependent on other pending changes then the
+patch should be based on the libnvdimm-for-next branch. However, since
+persistent memory sits at the intersection of storage and memory there
+are cases where patches are more suitable to be merged through a
+Filesystem or the Memory Management tree. When in doubt copy the nvdimm
+list and the maintainers will help route.
+
+Submissions will be exposed to the kbuild robot for compile regression
+testing. It helps to get a success notification from that infrastructure
+before submitting, but it is not required.
+
+
+Submit Checklist Addendum
+-------------------------
+There are unit tests for the subsystem via the ndctl utility:
+https://github.com/pmem/ndctl
+Those tests need to be passed before the patches go upstream, but not
+necessarily before initial posting. Contact the list if you need help
+getting the test environment set up.
+
+### ACPI Device Specific Methods (_DSM)
+Before patches enabling for a new _DSM family will be considered it must
+be assigned a format-interface-code from the NVDIMM Sub-team of the ACPI
+Specification Working Group. In general, the stance of the subsystem is
+to push back on the proliferation of NVDIMM command sets, do strongly
+consider implementing support for an existing command set. See
+drivers/acpi/nfit/nfit.h for the set of support command sets.
+
+
+Key Cycle Dates
+---------------
+New submissions can be sent at any time, but if they intend to hit the
+next merge window they should be sent before -rc4, and ideally
+stabilized in the libnvdimm-for-next branch by -rc6. Of course if a
+patch set requires more than 2 weeks of review -rc4 is already too late
+and some patches may require multiple development cycles to review.
+
+
+Review Cadence
+--------------
+In general, please wait up to one week before pinging for feedback. A
+private mail reminder is preferred. Alternatively ask for other
+developers that have Reviewed-by tags for libnvdimm changes to take a
+look and offer their opinion.
diff --git a/Documentation/power/drivers-testing.rst b/Documentation/power/drivers-testing.rst
index e53f1999fc39..d77d2894f9fe 100644
--- a/Documentation/power/drivers-testing.rst
+++ b/Documentation/power/drivers-testing.rst
@@ -39,9 +39,10 @@ c) Compile the driver directly into the kernel and try the test modes of
d) Attempt to hibernate with the driver compiled directly into the kernel
in the "reboot", "shutdown" and "platform" modes.
-e) Try the test modes of suspend (see: Documentation/power/basic-pm-debugging.rst,
- 2). [As far as the STR tests are concerned, it should not matter whether or
- not the driver is built as a module.]
+e) Try the test modes of suspend (see:
+ Documentation/power/basic-pm-debugging.rst, 2). [As far as the STR tests are
+ concerned, it should not matter whether or not the driver is built as a
+ module.]
f) Attempt to suspend to RAM using the s2ram tool with the driver loaded
(see: Documentation/power/basic-pm-debugging.rst, 2).
diff --git a/Documentation/power/freezing-of-tasks.rst b/Documentation/power/freezing-of-tasks.rst
index ef110fe55e82..8bd693399834 100644
--- a/Documentation/power/freezing-of-tasks.rst
+++ b/Documentation/power/freezing-of-tasks.rst
@@ -215,30 +215,31 @@ VI. Are there any precautions to be taken to prevent freezing failures?
Yes, there are.
-First of all, grabbing the 'system_transition_mutex' lock to mutually exclude a piece of code
-from system-wide sleep such as suspend/hibernation is not encouraged.
-If possible, that piece of code must instead hook onto the suspend/hibernation
-notifiers to achieve mutual exclusion. Look at the CPU-Hotplug code
-(kernel/cpu.c) for an example.
-
-However, if that is not feasible, and grabbing 'system_transition_mutex' is deemed necessary,
-it is strongly discouraged to directly call mutex_[un]lock(&system_transition_mutex) since
-that could lead to freezing failures, because if the suspend/hibernate code
-successfully acquired the 'system_transition_mutex' lock, and hence that other entity failed
-to acquire the lock, then that task would get blocked in TASK_UNINTERRUPTIBLE
-state. As a consequence, the freezer would not be able to freeze that task,
-leading to freezing failure.
+First of all, grabbing the 'system_transition_mutex' lock to mutually exclude a
+piece of code from system-wide sleep such as suspend/hibernation is not
+encouraged. If possible, that piece of code must instead hook onto the
+suspend/hibernation notifiers to achieve mutual exclusion. Look at the
+CPU-Hotplug code (kernel/cpu.c) for an example.
+
+However, if that is not feasible, and grabbing 'system_transition_mutex' is
+deemed necessary, it is strongly discouraged to directly call
+mutex_[un]lock(&system_transition_mutex) since that could lead to freezing
+failures, because if the suspend/hibernate code successfully acquired the
+'system_transition_mutex' lock, and hence that other entity failed to acquire
+the lock, then that task would get blocked in TASK_UNINTERRUPTIBLE state. As a
+consequence, the freezer would not be able to freeze that task, leading to
+freezing failure.
However, the [un]lock_system_sleep() APIs are safe to use in this scenario,
since they ask the freezer to skip freezing this task, since it is anyway
-"frozen enough" as it is blocked on 'system_transition_mutex', which will be released
-only after the entire suspend/hibernation sequence is complete.
-So, to summarize, use [un]lock_system_sleep() instead of directly using
+"frozen enough" as it is blocked on 'system_transition_mutex', which will be
+released only after the entire suspend/hibernation sequence is complete. So, to
+summarize, use [un]lock_system_sleep() instead of directly using
mutex_[un]lock(&system_transition_mutex). That would prevent freezing failures.
V. Miscellaneous
================
/sys/power/pm_freeze_timeout controls how long it will cost at most to freeze
-all user space processes or all freezable kernel threads, in unit of millisecond.
-The default value is 20000, with range of unsigned integer.
+all user space processes or all freezable kernel threads, in unit of
+millisecond. The default value is 20000, with range of unsigned integer.
diff --git a/Documentation/power/opp.rst b/Documentation/power/opp.rst
index 209c7613f5a4..e3cc4f349ea8 100644
--- a/Documentation/power/opp.rst
+++ b/Documentation/power/opp.rst
@@ -73,19 +73,21 @@ factors. Example usage: Thermal management or other exceptional situations where
SoC framework might choose to disable a higher frequency OPP to safely continue
operations until that OPP could be re-enabled if possible.
-OPP library facilitates this concept in it's implementation. The following
+OPP library facilitates this concept in its implementation. The following
operational functions operate only on available opps:
-opp_find_freq_{ceil, floor}, dev_pm_opp_get_voltage, dev_pm_opp_get_freq, dev_pm_opp_get_opp_count
+opp_find_freq_{ceil, floor}, dev_pm_opp_get_voltage, dev_pm_opp_get_freq,
+dev_pm_opp_get_opp_count
-dev_pm_opp_find_freq_exact is meant to be used to find the opp pointer which can then
-be used for dev_pm_opp_enable/disable functions to make an opp available as required.
+dev_pm_opp_find_freq_exact is meant to be used to find the opp pointer
+which can then be used for dev_pm_opp_enable/disable functions to make an
+opp available as required.
WARNING: Users of OPP library should refresh their availability count using
-get_opp_count if dev_pm_opp_enable/disable functions are invoked for a device, the
-exact mechanism to trigger these or the notification mechanism to other
-dependent subsystems such as cpufreq are left to the discretion of the SoC
-specific framework which uses the OPP library. Similar care needs to be taken
-care to refresh the cpufreq table in cases of these operations.
+get_opp_count if dev_pm_opp_enable/disable functions are invoked for a
+device, the exact mechanism to trigger these or the notification mechanism
+to other dependent subsystems such as cpufreq are left to the discretion of
+the SoC specific framework which uses the OPP library. Similar care needs
+to be taken care to refresh the cpufreq table in cases of these operations.
2. Initial OPP List Registration
================================
@@ -99,11 +101,11 @@ OPPs dynamically using the dev_pm_opp_enable / disable functions.
dev_pm_opp_add
Add a new OPP for a specific domain represented by the device pointer.
The OPP is defined using the frequency and voltage. Once added, the OPP
- is assumed to be available and control of it's availability can be done
- with the dev_pm_opp_enable/disable functions. OPP library internally stores
- and manages this information in the opp struct. This function may be
- used by SoC framework to define a optimal list as per the demands of
- SoC usage environment.
+ is assumed to be available and control of its availability can be done
+ with the dev_pm_opp_enable/disable functions. OPP library
+ internally stores and manages this information in the opp struct.
+ This function may be used by SoC framework to define a optimal list
+ as per the demands of SoC usage environment.
WARNING:
Do not use this function in interrupt context.
@@ -354,7 +356,7 @@ struct dev_pm_opp
struct device
This is used to identify a domain to the OPP layer. The
- nature of the device and it's implementation is left to the user of
+ nature of the device and its implementation is left to the user of
OPP library such as the SoC framework.
Overall, in a simplistic view, the data structure operations is represented as
diff --git a/Documentation/power/pci.rst b/Documentation/power/pci.rst
index 0e2ef7429304..0924d29636ad 100644
--- a/Documentation/power/pci.rst
+++ b/Documentation/power/pci.rst
@@ -130,8 +130,8 @@ a full power-on reset sequence and the power-on defaults are restored to the
device by hardware just as at initial power up.
PCI devices supporting the PCI PM Spec can be programmed to generate PMEs
-while in a low-power state (D1-D3), but they are not required to be capable
-of generating PMEs from all supported low-power states. In particular, the
+while in any power state (D0-D3), but they are not required to be capable
+of generating PMEs from all supported power states. In particular, the
capability of generating PMEs from D3cold is optional and depends on the
presence of additional voltage (3.3Vaux) allowing the device to remain
sufficiently active to generate a wakeup signal.
@@ -426,12 +426,12 @@ pm->runtime_idle() callback.
2.4. System-Wide Power Transitions
----------------------------------
There are a few different types of system-wide power transitions, described in
-Documentation/driver-api/pm/devices.rst. Each of them requires devices to be handled
-in a specific way and the PM core executes subsystem-level power management
-callbacks for this purpose. They are executed in phases such that each phase
-involves executing the same subsystem-level callback for every device belonging
-to the given subsystem before the next phase begins. These phases always run
-after tasks have been frozen.
+Documentation/driver-api/pm/devices.rst. Each of them requires devices to be
+handled in a specific way and the PM core executes subsystem-level power
+management callbacks for this purpose. They are executed in phases such that
+each phase involves executing the same subsystem-level callback for every device
+belonging to the given subsystem before the next phase begins. These phases
+always run after tasks have been frozen.
2.4.1. System Suspend
^^^^^^^^^^^^^^^^^^^^^
@@ -600,17 +600,17 @@ using the following PCI bus type's callbacks::
respectively.
-The first of them, pci_pm_thaw_noirq(), is analogous to pci_pm_resume_noirq(),
-but it doesn't put the device into the full power state and doesn't attempt to
-restore its standard configuration registers. It also executes the device
-driver's pm->thaw_noirq() callback, if defined, instead of pm->resume_noirq().
+The first of them, pci_pm_thaw_noirq(), is analogous to pci_pm_resume_noirq().
+It puts the device into the full power state and restores its standard
+configuration registers. It also executes the device driver's pm->thaw_noirq()
+callback, if defined, instead of pm->resume_noirq().
The pci_pm_thaw() routine is similar to pci_pm_resume(), but it runs the device
driver's pm->thaw() callback instead of pm->resume(). It is executed
asynchronously for different PCI devices that don't depend on each other in a
known way.
-The complete phase it the same as for system resume.
+The complete phase is the same as for system resume.
After saving the image, devices need to be powered down before the system can
enter the target sleep state (ACPI S4 for ACPI-based systems). This is done in
@@ -636,12 +636,12 @@ System restore requires a hibernation image to be loaded into memory and the
pre-hibernation memory contents to be restored before the pre-hibernation system
activity can be resumed.
-As described in Documentation/driver-api/pm/devices.rst, the hibernation image is loaded
-into memory by a fresh instance of the kernel, called the boot kernel, which in
-turn is loaded and run by a boot loader in the usual way. After the boot kernel
-has loaded the image, it needs to replace its own code and data with the code
-and data of the "hibernated" kernel stored within the image, called the image
-kernel. For this purpose all devices are frozen just like before creating
+As described in Documentation/driver-api/pm/devices.rst, the hibernation image
+is loaded into memory by a fresh instance of the kernel, called the boot kernel,
+which in turn is loaded and run by a boot loader in the usual way. After the
+boot kernel has loaded the image, it needs to replace its own code and data with
+the code and data of the "hibernated" kernel stored within the image, called the
+image kernel. For this purpose all devices are frozen just like before creating
the image during hibernation, in the
prepare, freeze, freeze_noirq
@@ -691,12 +691,12 @@ controlling the runtime power management of their devices.
At the time of this writing there are two ways to define power management
callbacks for a PCI device driver, the recommended one, based on using a
-dev_pm_ops structure described in Documentation/driver-api/pm/devices.rst, and the
-"legacy" one, in which the .suspend(), .suspend_late(), .resume_early(), and
-.resume() callbacks from struct pci_driver are used. The legacy approach,
-however, doesn't allow one to define runtime power management callbacks and is
-not really suitable for any new drivers. Therefore it is not covered by this
-document (refer to the source code to learn more about it).
+dev_pm_ops structure described in Documentation/driver-api/pm/devices.rst, and
+the "legacy" one, in which the .suspend() and .resume() callbacks from struct
+pci_driver are used. The legacy approach, however, doesn't allow one to define
+runtime power management callbacks and is not really suitable for any new
+drivers. Therefore it is not covered by this document (refer to the source code
+to learn more about it).
It is recommended that all PCI device drivers define a struct dev_pm_ops object
containing pointers to power management (PM) callbacks that will be executed by
diff --git a/Documentation/power/pm_qos_interface.rst b/Documentation/power/pm_qos_interface.rst
index 3097694fba69..0d62d506caf0 100644
--- a/Documentation/power/pm_qos_interface.rst
+++ b/Documentation/power/pm_qos_interface.rst
@@ -8,8 +8,8 @@ one of the parameters.
Two different PM QoS frameworks are available:
1. PM QoS classes for cpu_dma_latency
-2. the per-device PM QoS framework provides the API to manage the per-device latency
-constraints and PM QoS flags.
+2. The per-device PM QoS framework provides the API to manage the
+ per-device latency constraints and PM QoS flags.
Each parameters have defined units:
@@ -47,14 +47,14 @@ void pm_qos_add_request(handle, param_class, target_value):
pm_qos API functions.
void pm_qos_update_request(handle, new_target_value):
- Will update the list element pointed to by the handle with the new target value
- and recompute the new aggregated target, calling the notification tree if the
- target is changed.
+ Will update the list element pointed to by the handle with the new target
+ value and recompute the new aggregated target, calling the notification tree
+ if the target is changed.
void pm_qos_remove_request(handle):
- Will remove the element. After removal it will update the aggregate target and
- call the notification tree if the target was changed as a result of removing
- the request.
+ Will remove the element. After removal it will update the aggregate target
+ and call the notification tree if the target was changed as a result of
+ removing the request.
int pm_qos_request(param_class):
Returns the aggregated value for a given PM QoS class.
@@ -167,9 +167,9 @@ int dev_pm_qos_expose_flags(device, value)
change the value of the PM_QOS_FLAG_NO_POWER_OFF flag.
void dev_pm_qos_hide_flags(device)
- Drop the request added by dev_pm_qos_expose_flags() from the device's PM QoS list
- of flags and remove sysfs attribute pm_qos_no_power_off from the device's power
- directory.
+ Drop the request added by dev_pm_qos_expose_flags() from the device's PM QoS
+ list of flags and remove sysfs attribute pm_qos_no_power_off from the device's
+ power directory.
Notification mechanisms:
@@ -179,8 +179,8 @@ int dev_pm_qos_add_notifier(device, notifier, type):
Adds a notification callback function for the device for a particular request
type.
- The callback is called when the aggregated value of the device constraints list
- is changed.
+ The callback is called when the aggregated value of the device constraints
+ list is changed.
int dev_pm_qos_remove_notifier(device, notifier, type):
Removes the notification callback function for the device.
diff --git a/Documentation/power/runtime_pm.rst b/Documentation/power/runtime_pm.rst
index 2c2ec99b5088..ab8406c84254 100644
--- a/Documentation/power/runtime_pm.rst
+++ b/Documentation/power/runtime_pm.rst
@@ -268,8 +268,8 @@ defined in include/linux/pm.h:
`unsigned int runtime_auto;`
- if set, indicates that the user space has allowed the device driver to
power manage the device at run time via the /sys/devices/.../power/control
- `interface;` it may only be modified with the help of the pm_runtime_allow()
- and pm_runtime_forbid() helper functions
+ `interface;` it may only be modified with the help of the
+ pm_runtime_allow() and pm_runtime_forbid() helper functions
`unsigned int no_callbacks;`
- indicates that the device does not use the runtime PM callbacks (see
diff --git a/Documentation/power/suspend-and-cpuhotplug.rst b/Documentation/power/suspend-and-cpuhotplug.rst
index 7ac8e1f549f4..572d968c5375 100644
--- a/Documentation/power/suspend-and-cpuhotplug.rst
+++ b/Documentation/power/suspend-and-cpuhotplug.rst
@@ -106,8 +106,8 @@ execution during resume):
* Release system_transition_mutex lock.
-It is to be noted here that the system_transition_mutex lock is acquired at the very
-beginning, when we are just starting out to suspend, and then released only
+It is to be noted here that the system_transition_mutex lock is acquired at the
+very beginning, when we are just starting out to suspend, and then released only
after the entire cycle is complete (i.e., suspend + resume).
::
@@ -165,7 +165,8 @@ Important files and functions/entry points:
- kernel/power/process.c : freeze_processes(), thaw_processes()
- kernel/power/suspend.c : suspend_prepare(), suspend_enter(), suspend_finish()
-- kernel/cpu.c: cpu_[up|down](), _cpu_[up|down](), [disable|enable]_nonboot_cpus()
+- kernel/cpu.c: cpu_[up|down](), _cpu_[up|down](),
+ [disable|enable]_nonboot_cpus()
diff --git a/Documentation/power/swsusp.rst b/Documentation/power/swsusp.rst
index d000312f6965..8524f079e05c 100644
--- a/Documentation/power/swsusp.rst
+++ b/Documentation/power/swsusp.rst
@@ -118,7 +118,8 @@ In a really perfect world::
echo 1 > /proc/acpi/sleep # for standby
echo 2 > /proc/acpi/sleep # for suspend to ram
- echo 3 > /proc/acpi/sleep # for suspend to ram, but with more power conservative
+ echo 3 > /proc/acpi/sleep # for suspend to ram, but with more power
+ # conservative
echo 4 > /proc/acpi/sleep # for suspend to disk
echo 5 > /proc/acpi/sleep # for shutdown unfriendly the system
@@ -192,8 +193,8 @@ Q:
A:
The freezing of tasks is a mechanism by which user space processes and some
- kernel threads are controlled during hibernation or system-wide suspend (on some
- architectures). See freezing-of-tasks.txt for details.
+ kernel threads are controlled during hibernation or system-wide suspend (on
+ some architectures). See freezing-of-tasks.txt for details.
Q:
What is the difference between "platform" and "shutdown"?
@@ -282,7 +283,8 @@ A:
suspend(PMSG_FREEZE): devices are frozen so that they don't interfere
with state snapshot
- state snapshot: copy of whole used memory is taken with interrupts disabled
+ state snapshot: copy of whole used memory is taken with interrupts
+ disabled
resume(): devices are woken up so that we can write image to swap
@@ -353,8 +355,8 @@ Q:
A:
Generally, yes, you can. However, it requires you to use the "resume=" and
- "resume_offset=" kernel command line parameters, so the resume from a swap file
- cannot be initiated from an initrd or initramfs image. See
+ "resume_offset=" kernel command line parameters, so the resume from a swap
+ file cannot be initiated from an initrd or initramfs image. See
swsusp-and-swap-files.txt for details.
Q:
diff --git a/Documentation/powerpc/index.rst b/Documentation/powerpc/index.rst
index db7b6a880f52..ba5edb3211c0 100644
--- a/Documentation/powerpc/index.rst
+++ b/Documentation/powerpc/index.rst
@@ -19,6 +19,7 @@ powerpc
firmware-assisted-dump
hvcs
isa-versions
+ kaslr-booke32
mpc52xx
pci_iov_resource_on_powernv
pmu-ebb
diff --git a/Documentation/powerpc/kaslr-booke32.rst b/Documentation/powerpc/kaslr-booke32.rst
new file mode 100644
index 000000000000..8b259fdfdf03
--- /dev/null
+++ b/Documentation/powerpc/kaslr-booke32.rst
@@ -0,0 +1,42 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+===========================
+KASLR for Freescale BookE32
+===========================
+
+The word KASLR stands for Kernel Address Space Layout Randomization.
+
+This document tries to explain the implementation of the KASLR for
+Freescale BookE32. KASLR is a security feature that deters exploit
+attempts relying on knowledge of the location of kernel internals.
+
+Since CONFIG_RELOCATABLE has already supported, what we need to do is
+map or copy kernel to a proper place and relocate. Freescale Book-E
+parts expect lowmem to be mapped by fixed TLB entries(TLB1). The TLB1
+entries are not suitable to map the kernel directly in a randomized
+region, so we chose to copy the kernel to a proper place and restart to
+relocate.
+
+Entropy is derived from the banner and timer base, which will change every
+build and boot. This not so much safe so additionally the bootloader may
+pass entropy via the /chosen/kaslr-seed node in device tree.
+
+We will use the first 512M of the low memory to randomize the kernel
+image. The memory will be split in 64M zones. We will use the lower 8
+bit of the entropy to decide the index of the 64M zone. Then we chose a
+16K aligned offset inside the 64M zone to put the kernel in::
+
+ KERNELBASE
+
+ |--> 64M <--|
+ | |
+ +---------------+ +----------------+---------------+
+ | |....| |kernel| | |
+ +---------------+ +----------------+---------------+
+ | |
+ |-----> offset <-----|
+
+ kernstart_virt_addr
+
+To enable KASLR, set CONFIG_RANDOMIZE_BASE = y. If KASLR is enable and you
+want to disable it at runtime, add "nokaslr" to the kernel cmdline.
diff --git a/Documentation/ioctl/botching-up-ioctls.rst b/Documentation/process/botching-up-ioctls.rst
index ac697fef3545..2d4829b2fb09 100644
--- a/Documentation/ioctl/botching-up-ioctls.rst
+++ b/Documentation/process/botching-up-ioctls.rst
@@ -46,7 +46,7 @@ will need to add a 32-bit compat layer:
conversion or worse, fiddle the raw __u64 through your code since that
diminishes the checking tools like sparse can provide. The macro
u64_to_user_ptr can be used in the kernel to avoid warnings about integers
- and pointres of different sizes.
+ and pointers of different sizes.
Basics
diff --git a/Documentation/process/embargoed-hardware-issues.rst b/Documentation/process/embargoed-hardware-issues.rst
index a3c3349046c4..799580acc8de 100644
--- a/Documentation/process/embargoed-hardware-issues.rst
+++ b/Documentation/process/embargoed-hardware-issues.rst
@@ -240,7 +240,7 @@ an involved disclosed party. The current ambassadors list:
============= ========================================================
ARM
- AMD
+ AMD Tom Lendacky <tom.lendacky@amd.com>
IBM
Intel Tony Luck <tony.luck@intel.com>
Qualcomm Trilok Soni <tsoni@codeaurora.org>
diff --git a/Documentation/process/index.rst b/Documentation/process/index.rst
index e2c9ffc682c5..21aa7d5358e6 100644
--- a/Documentation/process/index.rst
+++ b/Documentation/process/index.rst
@@ -46,6 +46,7 @@ Other guides to the community that are of interest to most developers are:
kernel-docs
deprecated
embargoed-hardware-issues
+ maintainers
These are some overall technical guides that have been put here for now for
lack of a better place.
@@ -57,6 +58,7 @@ lack of a better place.
adding-syscalls
magic-number
volatile-considered-harmful
+ botching-up-ioctls
clang-format
.. only:: subproject and html
diff --git a/Documentation/process/magic-number.rst b/Documentation/process/magic-number.rst
index 547bbf28e615..eee9b44553b3 100644
--- a/Documentation/process/magic-number.rst
+++ b/Documentation/process/magic-number.rst
@@ -81,7 +81,6 @@ FF_MAGIC 0x4646 fc_info ``drivers/net/ip
ISICOM_MAGIC 0x4d54 isi_port ``include/linux/isicom.h``
PTY_MAGIC 0x5001 ``drivers/char/pty.c``
PPP_MAGIC 0x5002 ppp ``include/linux/if_pppvar.h``
-SERIAL_MAGIC 0x5301 async_struct ``include/linux/serial.h``
SSTATE_MAGIC 0x5302 serial_state ``include/linux/serial.h``
SLIP_MAGIC 0x5302 slip ``drivers/net/slip.h``
STRIP_MAGIC 0x5303 strip ``drivers/net/strip.c``
diff --git a/Documentation/process/maintainers.rst b/Documentation/process/maintainers.rst
new file mode 100644
index 000000000000..6174cfb4138f
--- /dev/null
+++ b/Documentation/process/maintainers.rst
@@ -0,0 +1 @@
+.. maintainers-include::
diff --git a/Documentation/process/submitting-patches.rst b/Documentation/process/submitting-patches.rst
index fb56297f70dc..ba5e944c7a63 100644
--- a/Documentation/process/submitting-patches.rst
+++ b/Documentation/process/submitting-patches.rst
@@ -782,7 +782,58 @@ helpful, you can use the https://lkml.kernel.org/ redirector (e.g., in
the cover email text) to link to an earlier version of the patch series.
-16) Sending ``git pull`` requests
+16) Providing base tree information
+-----------------------------------
+
+When other developers receive your patches and start the review process,
+it is often useful for them to know where in the tree history they
+should place your work. This is particularly useful for automated CI
+processes that attempt to run a series of tests in order to establish
+the quality of your submission before the maintainer starts the review.
+
+If you are using ``git format-patch`` to generate your patches, you can
+automatically include the base tree information in your submission by
+using the ``--base`` flag. The easiest and most convenient way to use
+this option is with topical branches::
+
+ $ git checkout -t -b my-topical-branch master
+ Branch 'my-topical-branch' set up to track local branch 'master'.
+ Switched to a new branch 'my-topical-branch'
+
+ [perform your edits and commits]
+
+ $ git format-patch --base=auto --cover-letter -o outgoing/ master
+ outgoing/0000-cover-letter.patch
+ outgoing/0001-First-Commit.patch
+ outgoing/...
+
+When you open ``outgoing/0000-cover-letter.patch`` for editing, you will
+notice that it will have the ``base-commit:`` trailer at the very
+bottom, which provides the reviewer and the CI tools enough information
+to properly perform ``git am`` without worrying about conflicts::
+
+ $ git checkout -b patch-review [base-commit-id]
+ Switched to a new branch 'patch-review'
+ $ git am patches.mbox
+ Applying: First Commit
+ Applying: ...
+
+Please see ``man git-format-patch`` for more information about this
+option.
+
+.. note::
+
+ The ``--base`` feature was introduced in git version 2.9.0.
+
+If you are not using git to format your patches, you can still include
+the same ``base-commit`` trailer to indicate the commit hash of the tree
+on which your work is based. You should add it either in the cover
+letter or in the first patch of the series and it should be placed
+either below the ``---`` line or at the very bottom of all other
+content, right before your email signature.
+
+
+17) Sending ``git pull`` requests
---------------------------------
If you have a series of patches, it may be most convenient to have the
diff --git a/Documentation/riscv/boot-image-header.rst b/Documentation/riscv/boot-image-header.rst
index 7b4d1d747585..518d46d2389d 100644
--- a/Documentation/riscv/boot-image-header.rst
+++ b/Documentation/riscv/boot-image-header.rst
@@ -21,7 +21,7 @@ The following 64-byte header is present in decompressed Linux kernel image::
u32 res1 = 0; /* Reserved */
u64 res2 = 0; /* Reserved */
u64 magic = 0x5643534952; /* Magic number, little endian, "RISCV" */
- u32 magic2 = 0x56534905; /* Magic number 2, little endian, "RSC\x05" */
+ u32 magic2 = 0x05435352; /* Magic number 2, little endian, "RSC\x05" */
u32 res4; /* Reserved for PE COFF offset */
This header format is compliant with PE/COFF header and largely inspired from
diff --git a/Documentation/scheduler/sched-stats.rst b/Documentation/scheduler/sched-stats.rst
index 0cb0aa714545..dd9b99a025f7 100644
--- a/Documentation/scheduler/sched-stats.rst
+++ b/Documentation/scheduler/sched-stats.rst
@@ -28,7 +28,7 @@ of these will need to start with a baseline observation and then calculate
the change in the counters at each subsequent observation. A perl script
which does this for many of the fields is available at
- http://eaglet.rain.com/rick/linux/schedstat/
+ http://eaglet.pdxhosts.com/rick/linux/schedstat/
Note that any such script will necessarily be version-specific, as the main
reason to change versions is changes in the output format. For those wishing
@@ -164,4 +164,4 @@ report on how well a particular process or set of processes is faring
under the scheduler's policies. A simple version of such a program is
available at
- http://eaglet.rain.com/rick/linux/schedstat/v12/latency.c
+ http://eaglet.pdxhosts.com/rick/linux/schedstat/v12/latency.c
diff --git a/Documentation/scsi/scsi_mid_low_api.txt b/Documentation/scsi/scsi_mid_low_api.txt
index c1dd4939f4ae..2a4be1c3e6db 100644
--- a/Documentation/scsi/scsi_mid_low_api.txt
+++ b/Documentation/scsi/scsi_mid_low_api.txt
@@ -1084,7 +1084,8 @@ of interest:
commands to the adapter.
this_id - scsi id of host (scsi initiator) or -1 if not known
sg_tablesize - maximum scatter gather elements allowed by host.
- 0 implies scatter gather not supported by host
+ Set this to SG_ALL or less to avoid chained SG lists.
+ Must be at least 1.
max_sectors - maximum number of sectors (usually 512 bytes) allowed
in a single SCSI command. The default value of 0 leads
to a setting of SCSI_DEFAULT_MAX_SECTORS (defined in
diff --git a/Documentation/security/keys/core.rst b/Documentation/security/keys/core.rst
index d6d8b0b756b6..d9b0b859018b 100644
--- a/Documentation/security/keys/core.rst
+++ b/Documentation/security/keys/core.rst
@@ -1102,7 +1102,7 @@ payload contents" for more information.
See also Documentation/security/keys/request-key.rst.
- * To search for a key in a specific domain, call:
+ * To search for a key in a specific domain, call::
struct key *request_key_tag(const struct key_type *type,
const char *description,
diff --git a/Documentation/security/lsm.rst b/Documentation/security/lsm.rst
index ad4dfd020e0d..aadf47c808c0 100644
--- a/Documentation/security/lsm.rst
+++ b/Documentation/security/lsm.rst
@@ -56,7 +56,7 @@ the infrastructure to support security modules. The LSM kernel patch
also moves most of the capabilities logic into an optional security
module, with the system defaulting to the traditional superuser logic.
This capabilities module is discussed further in
-`LSM Capabilities Module <#cap>`__.
+`LSM Capabilities Module`_.
The LSM kernel patch adds security fields to kernel data structures and
inserts calls to hook functions at critical points in the kernel code to
diff --git a/Documentation/sound/kernel-api/writing-an-alsa-driver.rst b/Documentation/sound/kernel-api/writing-an-alsa-driver.rst
index 132f5eb9b530..f169d58ca019 100644
--- a/Documentation/sound/kernel-api/writing-an-alsa-driver.rst
+++ b/Documentation/sound/kernel-api/writing-an-alsa-driver.rst
@@ -805,6 +805,7 @@ destructor and PCI entries. Example code is shown first, below.
return -EBUSY;
}
chip->irq = pci->irq;
+ card->sync_irq = chip->irq;
/* (2) initialization of the chip hardware */
.... /* (not implemented in this document) */
@@ -965,6 +966,15 @@ usually like the following:
return IRQ_HANDLED;
}
+After requesting the IRQ, you can passed it to ``card->sync_irq``
+field:
+::
+
+ card->irq = chip->irq;
+
+This allows PCM core automatically performing
+:c:func:`synchronize_irq()` at the necessary timing like ``hw_free``.
+See the later section `sync_stop callback`_ for details.
Now let's write the corresponding destructor for the resources above.
The role of destructor is simple: disable the hardware (if already
@@ -1270,21 +1280,23 @@ shows only the skeleton, how to build up the PCM interfaces.
/* the hardware-specific codes will be here */
....
return 0;
-
}
/* hw_params callback */
static int snd_mychip_pcm_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *hw_params)
{
- return snd_pcm_lib_malloc_pages(substream,
- params_buffer_bytes(hw_params));
+ /* the hardware-specific codes will be here */
+ ....
+ return 0;
}
/* hw_free callback */
static int snd_mychip_pcm_hw_free(struct snd_pcm_substream *substream)
{
- return snd_pcm_lib_free_pages(substream);
+ /* the hardware-specific codes will be here */
+ ....
+ return 0;
}
/* prepare callback */
@@ -1339,7 +1351,6 @@ shows only the skeleton, how to build up the PCM interfaces.
static struct snd_pcm_ops snd_mychip_playback_ops = {
.open = snd_mychip_playback_open,
.close = snd_mychip_playback_close,
- .ioctl = snd_pcm_lib_ioctl,
.hw_params = snd_mychip_pcm_hw_params,
.hw_free = snd_mychip_pcm_hw_free,
.prepare = snd_mychip_pcm_prepare,
@@ -1351,7 +1362,6 @@ shows only the skeleton, how to build up the PCM interfaces.
static struct snd_pcm_ops snd_mychip_capture_ops = {
.open = snd_mychip_capture_open,
.close = snd_mychip_capture_close,
- .ioctl = snd_pcm_lib_ioctl,
.hw_params = snd_mychip_pcm_hw_params,
.hw_free = snd_mychip_pcm_hw_free,
.prepare = snd_mychip_pcm_prepare,
@@ -1382,9 +1392,9 @@ shows only the skeleton, how to build up the PCM interfaces.
&snd_mychip_capture_ops);
/* pre-allocation of buffers */
/* NOTE: this may fail */
- snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(chip->pci),
- 64*1024, 64*1024);
+ snd_pcm_set_managed_buffer_all(pcm, SNDRV_DMA_TYPE_DEV,
+ &chip->pci->dev,
+ 64*1024, 64*1024);
return 0;
}
@@ -1454,7 +1464,6 @@ The operators are defined typically like this:
static struct snd_pcm_ops snd_mychip_playback_ops = {
.open = snd_mychip_pcm_open,
.close = snd_mychip_pcm_close,
- .ioctl = snd_pcm_lib_ioctl,
.hw_params = snd_mychip_pcm_hw_params,
.hw_free = snd_mychip_pcm_hw_free,
.prepare = snd_mychip_pcm_prepare,
@@ -1465,13 +1474,14 @@ The operators are defined typically like this:
All the callbacks are described in the Operators_ subsection.
After setting the operators, you probably will want to pre-allocate the
-buffer. For the pre-allocation, simply call the following:
+buffer and set up the managed allocation mode.
+For that, simply call the following:
::
- snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(chip->pci),
- 64*1024, 64*1024);
+ snd_pcm_set_managed_buffer_all(pcm, SNDRV_DMA_TYPE_DEV,
+ &chip->pci->dev,
+ 64*1024, 64*1024);
It will allocate a buffer up to 64kB as default. Buffer management
details will be described in the later section `Buffer and Memory
@@ -1621,8 +1631,7 @@ For the operators (callbacks) of each sound driver, most of these
records are supposed to be read-only. Only the PCM middle-layer changes
/ updates them. The exceptions are the hardware description (hw) DMA
buffer information and the private data. Besides, if you use the
-standard buffer allocation method via
-:c:func:`snd_pcm_lib_malloc_pages()`, you don't need to set the
+standard managed buffer allocation mode, you don't need to set the
DMA buffer information by yourself.
In the sections below, important records are explained.
@@ -1776,8 +1785,8 @@ the physical address of the buffer. This field is specified only when
the buffer is a linear buffer. ``dma_bytes`` holds the size of buffer
in bytes. ``dma_private`` is used for the ALSA DMA allocator.
-If you use a standard ALSA function,
-:c:func:`snd_pcm_lib_malloc_pages()`, for allocating the buffer,
+If you use either the managed buffer allocation mode or the standard
+API function :c:func:`snd_pcm_lib_malloc_pages()` for allocating the buffer,
these fields are set by the ALSA middle layer, and you should *not*
change them by yourself. You can read them but not write them. On the
other hand, if you want to allocate the buffer by yourself, you'll
@@ -1911,7 +1920,10 @@ ioctl callback
~~~~~~~~~~~~~~
This is used for any special call to pcm ioctls. But usually you can
-pass a generic ioctl callback, :c:func:`snd_pcm_lib_ioctl()`.
+leave it as NULL, then PCM core calls the generic ioctl callback
+function :c:func:`snd_pcm_lib_ioctl()`. If you need to deal with the
+unique setup of channel info or reset procedure, you can pass your own
+callback function here.
hw_params callback
~~~~~~~~~~~~~~~~~~~
@@ -1929,8 +1941,12 @@ Many hardware setups should be done in this callback, including the
allocation of buffers.
Parameters to be initialized are retrieved by
-:c:func:`params_xxx()` macros. To allocate buffer, you can call a
-helper function,
+:c:func:`params_xxx()` macros.
+
+When you set up the managed buffer allocation mode for the substream,
+a buffer is already allocated before this callback gets
+called. Alternatively, you can call a helper function below for
+allocating the buffer, too.
::
@@ -1964,18 +1980,23 @@ hw_free callback
static int snd_xxx_hw_free(struct snd_pcm_substream *substream);
This is called to release the resources allocated via
-``hw_params``. For example, releasing the buffer via
-:c:func:`snd_pcm_lib_malloc_pages()` is done by calling the
-following:
-
-::
-
- snd_pcm_lib_free_pages(substream);
+``hw_params``.
This function is always called before the close callback is called.
Also, the callback may be called multiple times, too. Keep track
whether the resource was already released.
+When you have set up the managed buffer allocation mode for the PCM
+substream, the allocated PCM buffer will be automatically released
+after this callback gets called. Otherwise you'll have to release the
+buffer manually. Typically, when the buffer was allocated from the
+pre-allocated pool, you can use the standard API function
+:c:func:`snd_pcm_lib_malloc_pages()` like:
+
+::
+
+ snd_pcm_lib_free_pages(substream);
+
prepare callback
~~~~~~~~~~~~~~~~
@@ -2048,6 +2069,37 @@ flag set, and you cannot call functions which may sleep. The
triggering the DMA. The other stuff should be initialized
``hw_params`` and ``prepare`` callbacks properly beforehand.
+sync_stop callback
+~~~~~~~~~~~~~~~~~~
+
+::
+
+ static int snd_xxx_sync_stop(struct snd_pcm_substream *substream);
+
+This callback is optional, and NULL can be passed. It's called after
+the PCM core stops the stream and changes the stream state
+``prepare``, ``hw_params`` or ``hw_free``.
+Since the IRQ handler might be still pending, we need to wait until
+the pending task finishes before moving to the next step; otherwise it
+might lead to a crash due to resource conflicts or access to the freed
+resources. A typical behavior is to call a synchronization function
+like :c:func:`synchronize_irq()` here.
+
+For majority of drivers that need only a call of
+:c:func:`synchronize_irq()`, there is a simpler setup, too.
+While keeping NULL to ``sync_stop`` PCM callback, the driver can set
+``card->sync_irq`` field to store the valid interrupt number after
+requesting an IRQ, instead. Then PCM core will look call
+:c:func:`synchronize_irq()` with the given IRQ appropriately.
+
+If the IRQ handler is released at the card destructor, you don't need
+to clear ``card->sync_irq``, as the card itself is being released.
+So, usually you'll need to add just a single line for assigning
+``card->sync_irq`` in the driver code unless the driver re-acquires
+the IRQ. When the driver frees and re-acquires the IRQ dynamically
+(e.g. for suspend/resume), it needs to clear and re-set
+``card->sync_irq`` again appropriately.
+
pointer callback
~~~~~~~~~~~~~~~~
@@ -2095,10 +2147,12 @@ This callback is atomic as default.
page callback
~~~~~~~~~~~~~
-This callback is optional too. This callback is used mainly for
-non-contiguous buffers. The mmap calls this callback to get the page
-address. Some examples will be explained in the later section `Buffer
-and Memory Management`_, too.
+This callback is optional too. The mmap calls this callback to get the
+page fault address.
+
+Since the recent changes, you need no special callback any longer for
+the standard SG-buffer or vmalloc-buffer. Hence this callback should
+be rarely used.
mmap calllback
~~~~~~~~~~~~~~
@@ -3512,7 +3566,7 @@ bus).
::
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(pci), size, max);
+ &pci->dev, size, max);
where ``size`` is the byte size to be pre-allocated and the ``max`` is
the maximum size to be changed via the ``prealloc`` proc file. The
@@ -3523,12 +3577,14 @@ The second argument (type) and the third argument (device pointer) are
dependent on the bus. For normal devices, pass the device pointer
(typically identical as ``card->dev``) to the third argument with
``SNDRV_DMA_TYPE_DEV`` type. For the continuous buffer unrelated to the
-bus can be pre-allocated with ``SNDRV_DMA_TYPE_CONTINUOUS`` type and the
-``snd_dma_continuous_data(GFP_KERNEL)`` device pointer, where
-``GFP_KERNEL`` is the kernel allocation flag to use. For the
-scatter-gather buffers, use ``SNDRV_DMA_TYPE_DEV_SG`` with the device
-pointer (see the `Non-Contiguous Buffers`_
-section).
+bus can be pre-allocated with ``SNDRV_DMA_TYPE_CONTINUOUS`` type.
+You can pass NULL to the device pointer in that case, which is the
+default mode implying to allocate with ``GFP_KRENEL`` flag.
+If you need a different GFP flag, you can pass it by encoding the flag
+into the device pointer via a special macro
+:c:func:`snd_dma_continuous_data()`.
+For the scatter-gather buffers, use ``SNDRV_DMA_TYPE_DEV_SG`` with the
+device pointer (see the `Non-Contiguous Buffers`_ section).
Once the buffer is pre-allocated, you can use the allocator in the
``hw_params`` callback:
@@ -3539,6 +3595,25 @@ Once the buffer is pre-allocated, you can use the allocator in the
Note that you have to pre-allocate to use this function.
+Most of drivers use, though, rather the newly introduced "managed
+buffer allocation mode" instead of the manual allocation or release.
+This is done by calling :c:func:`snd_pcm_set_managed_buffer_all()`
+instead of :c:func:`snd_pcm_lib_preallocate_pages_for_all()`.
+
+::
+
+ snd_pcm_set_managed_buffer_all(pcm, SNDRV_DMA_TYPE_DEV,
+ &pci->dev, size, max);
+
+where passed arguments are identical in both functions.
+The difference in the managed mode is that PCM core will call
+:c:func:`snd_pcm_lib_malloc_pages()` internally already before calling
+the PCM ``hw_params`` callback, and call :c:func:`snd_pcm_lib_free_pages()`
+after the PCM ``hw_free`` callback automatically. So the driver
+doesn't have to call these functions explicitly in its callback any
+longer. This made many driver code having NULL ``hw_params`` and
+``hw_free`` entries.
+
External Hardware Buffers
-------------------------
@@ -3693,20 +3768,26 @@ provides an interface for handling SG-buffers. The API is provided in
``<sound/pcm.h>``.
For creating the SG-buffer handler, call
-:c:func:`snd_pcm_lib_preallocate_pages()` or
-:c:func:`snd_pcm_lib_preallocate_pages_for_all()` with
+:c:func:`snd_pcm_set_managed_buffer()` or
+:c:func:`snd_pcm_set_managed_buffer_all()` with
``SNDRV_DMA_TYPE_DEV_SG`` in the PCM constructor like other PCI
-pre-allocator. You need to pass ``snd_dma_pci_data(pci)``, where pci is
+pre-allocator. You need to pass ``&pci->dev``, where pci is
the :c:type:`struct pci_dev <pci_dev>` pointer of the chip as
-well. The ``struct snd_sg_buf`` instance is created as
-``substream->dma_private``. You can cast the pointer like:
+well.
+
+::
+
+ snd_pcm_set_managed_buffer_all(pcm, SNDRV_DMA_TYPE_DEV_SG,
+ &pci->dev, size, max);
+
+The ``struct snd_sg_buf`` instance is created as
+``substream->dma_private`` in turn. You can cast the pointer like:
::
struct snd_sg_buf *sgbuf = (struct snd_sg_buf *)substream->dma_private;
-Then call :c:func:`snd_pcm_lib_malloc_pages()` in the ``hw_params``
-callback as well as in the case of normal PCI buffer. The SG-buffer
+Then in :c:func:`snd_pcm_lib_malloc_pages()` call, the common SG-buffer
handler will allocate the non-contiguous kernel pages of the given size
and map them onto the virtually contiguous memory. The virtual pointer
is addressed in runtime->dma_area. The physical address
@@ -3715,41 +3796,40 @@ physically non-contiguous. The physical address table is set up in
``sgbuf->table``. You can get the physical address at a certain offset
via :c:func:`snd_pcm_sgbuf_get_addr()`.
-When a SG-handler is used, you need to set
-:c:func:`snd_pcm_sgbuf_ops_page()` as the ``page`` callback. (See
-`page callback`_ section.)
-
-To release the data, call :c:func:`snd_pcm_lib_free_pages()` in
-the ``hw_free`` callback as usual.
+If you need to release the SG-buffer data explicitly, call the
+standard API function :c:func:`snd_pcm_lib_free_pages()` as usual.
Vmalloc'ed Buffers
------------------
It's possible to use a buffer allocated via :c:func:`vmalloc()`, for
-example, for an intermediate buffer. Since the allocated pages are not
-contiguous, you need to set the ``page`` callback to obtain the physical
-address at every offset.
+example, for an intermediate buffer. In the recent version of kernel,
+you can simply allocate it via standard
+:c:func:`snd_pcm_lib_malloc_pages()` and co after setting up the
+buffer preallocation with ``SNDRV_DMA_TYPE_VMALLOC`` type.
-The easiest way to achieve it would be to use
-:c:func:`snd_pcm_lib_alloc_vmalloc_buffer()` for allocating the buffer
-via :c:func:`vmalloc()`, and set :c:func:`snd_pcm_sgbuf_ops_page()` to
-the ``page`` callback. At release, you need to call
-:c:func:`snd_pcm_lib_free_vmalloc_buffer()`.
+::
-If you want to implementation the ``page`` manually, it would be like
-this:
+ snd_pcm_set_managed_buffer_all(pcm, SNDRV_DMA_TYPE_VMALLOC,
+ NULL, 0, 0);
-::
+The NULL is passed to the device pointer argument, which indicates
+that the default pages (GFP_KERNEL and GFP_HIGHMEM) will be
+allocated.
- #include <linux/vmalloc.h>
+Also, note that zero is passed to both the size and the max size
+arguments here. Since each vmalloc call should succeed at any time,
+we don't need to pre-allocate the buffers like other continuous
+pages.
- /* get the physical page pointer on the given offset */
- static struct page *mychip_page(struct snd_pcm_substream *substream,
- unsigned long offset)
- {
- void *pageptr = substream->runtime->dma_area + offset;
- return vmalloc_to_page(pageptr);
- }
+If you need the 32bit DMA allocation, pass the device pointer encoded
+by :c:func:`snd_dma_continuous_data()` with ``GFP_KERNEL|__GFP_DMA32``
+argument.
+
+::
+
+ snd_pcm_set_managed_buffer_all(pcm, SNDRV_DMA_TYPE_VMALLOC,
+ snd_dma_continuous_data(GFP_KERNEL | __GFP_DMA32), 0, 0);
Proc Interface
==============
diff --git a/Documentation/sphinx-static/theme_overrides.css b/Documentation/sphinx-static/theme_overrides.css
index e21e36cd6761..459ec5b29d68 100644
--- a/Documentation/sphinx-static/theme_overrides.css
+++ b/Documentation/sphinx-static/theme_overrides.css
@@ -53,6 +53,16 @@ div[class^="highlight"] pre {
line-height: normal;
}
+/* Keep fields from being strangely far apart due to inheirited table CSS. */
+.rst-content table.field-list th.field-name {
+ padding-top: 1px;
+ padding-bottom: 1px;
+}
+.rst-content table.field-list td.field-body {
+ padding-top: 1px;
+ padding-bottom: 1px;
+}
+
@media screen {
/* content column
diff --git a/Documentation/sphinx/kerneldoc.py b/Documentation/sphinx/kerneldoc.py
index 1159405cb920..4bcbd6ae01cd 100644
--- a/Documentation/sphinx/kerneldoc.py
+++ b/Documentation/sphinx/kerneldoc.py
@@ -59,9 +59,10 @@ class KernelDocDirective(Directive):
optional_arguments = 4
option_spec = {
'doc': directives.unchanged_required,
- 'functions': directives.unchanged,
'export': directives.unchanged,
'internal': directives.unchanged,
+ 'identifiers': directives.unchanged,
+ 'functions': directives.unchanged,
}
has_content = False
@@ -77,6 +78,10 @@ class KernelDocDirective(Directive):
tab_width = self.options.get('tab-width', self.state.document.settings.tab_width)
+ # 'function' is an alias of 'identifiers'
+ if 'functions' in self.options:
+ self.options['identifiers'] = self.options.get('functions')
+
# FIXME: make this nicer and more robust against errors
if 'export' in self.options:
cmd += ['-export']
@@ -86,11 +91,11 @@ class KernelDocDirective(Directive):
export_file_patterns = str(self.options.get('internal')).split()
elif 'doc' in self.options:
cmd += ['-function', str(self.options.get('doc'))]
- elif 'functions' in self.options:
- functions = self.options.get('functions').split()
- if functions:
- for f in functions:
- cmd += ['-function', f]
+ elif 'identifiers' in self.options:
+ identifiers = self.options.get('identifiers').split()
+ if identifiers:
+ for i in identifiers:
+ cmd += ['-function', i]
else:
cmd += ['-no-doc-sections']
diff --git a/Documentation/sphinx/maintainers_include.py b/Documentation/sphinx/maintainers_include.py
new file mode 100755
index 000000000000..dc8fed48d3c2
--- /dev/null
+++ b/Documentation/sphinx/maintainers_include.py
@@ -0,0 +1,197 @@
+#!/usr/bin/env python
+# SPDX-License-Identifier: GPL-2.0
+# -*- coding: utf-8; mode: python -*-
+# pylint: disable=R0903, C0330, R0914, R0912, E0401
+
+u"""
+ maintainers-include
+ ~~~~~~~~~~~~~~~~~~~
+
+ Implementation of the ``maintainers-include`` reST-directive.
+
+ :copyright: Copyright (C) 2019 Kees Cook <keescook@chromium.org>
+ :license: GPL Version 2, June 1991 see linux/COPYING for details.
+
+ The ``maintainers-include`` reST-directive performs extensive parsing
+ specific to the Linux kernel's standard "MAINTAINERS" file, in an
+ effort to avoid needing to heavily mark up the original plain text.
+"""
+
+import sys
+import re
+import os.path
+
+from docutils import statemachine
+from docutils.utils.error_reporting import ErrorString
+from docutils.parsers.rst import Directive
+from docutils.parsers.rst.directives.misc import Include
+
+__version__ = '1.0'
+
+def setup(app):
+ app.add_directive("maintainers-include", MaintainersInclude)
+ return dict(
+ version = __version__,
+ parallel_read_safe = True,
+ parallel_write_safe = True
+ )
+
+class MaintainersInclude(Include):
+ u"""MaintainersInclude (``maintainers-include``) directive"""
+ required_arguments = 0
+
+ def parse_maintainers(self, path):
+ """Parse all the MAINTAINERS lines into ReST for human-readability"""
+
+ result = list()
+ result.append(".. _maintainers:")
+ result.append("")
+
+ # Poor man's state machine.
+ descriptions = False
+ maintainers = False
+ subsystems = False
+
+ # Field letter to field name mapping.
+ field_letter = None
+ fields = dict()
+
+ prev = None
+ field_prev = ""
+ field_content = ""
+
+ for line in open(path):
+ if sys.version_info.major == 2:
+ line = unicode(line, 'utf-8')
+ # Have we reached the end of the preformatted Descriptions text?
+ if descriptions and line.startswith('Maintainers'):
+ descriptions = False
+ # Ensure a blank line following the last "|"-prefixed line.
+ result.append("")
+
+ # Start subsystem processing? This is to skip processing the text
+ # between the Maintainers heading and the first subsystem name.
+ if maintainers and not subsystems:
+ if re.search('^[A-Z0-9]', line):
+ subsystems = True
+
+ # Drop needless input whitespace.
+ line = line.rstrip()
+
+ # Linkify all non-wildcard refs to ReST files in Documentation/.
+ pat = '(Documentation/([^\s\?\*]*)\.rst)'
+ m = re.search(pat, line)
+ if m:
+ # maintainers.rst is in a subdirectory, so include "../".
+ line = re.sub(pat, ':doc:`%s <../%s>`' % (m.group(2), m.group(2)), line)
+
+ # Check state machine for output rendering behavior.
+ output = None
+ if descriptions:
+ # Escape the escapes in preformatted text.
+ output = "| %s" % (line.replace("\\", "\\\\"))
+ # Look for and record field letter to field name mappings:
+ # R: Designated *reviewer*: FullName <address@domain>
+ m = re.search("\s(\S):\s", line)
+ if m:
+ field_letter = m.group(1)
+ if field_letter and not field_letter in fields:
+ m = re.search("\*([^\*]+)\*", line)
+ if m:
+ fields[field_letter] = m.group(1)
+ elif subsystems:
+ # Skip empty lines: subsystem parser adds them as needed.
+ if len(line) == 0:
+ continue
+ # Subsystem fields are batched into "field_content"
+ if line[1] != ':':
+ # Render a subsystem entry as:
+ # SUBSYSTEM NAME
+ # ~~~~~~~~~~~~~~
+
+ # Flush pending field content.
+ output = field_content + "\n\n"
+ field_content = ""
+
+ # Collapse whitespace in subsystem name.
+ heading = re.sub("\s+", " ", line)
+ output = output + "%s\n%s" % (heading, "~" * len(heading))
+ field_prev = ""
+ else:
+ # Render a subsystem field as:
+ # :Field: entry
+ # entry...
+ field, details = line.split(':', 1)
+ details = details.strip()
+
+ # Mark paths (and regexes) as literal text for improved
+ # readability and to escape any escapes.
+ if field in ['F', 'N', 'X', 'K']:
+ # But only if not already marked :)
+ if not ':doc:' in details:
+ details = '``%s``' % (details)
+
+ # Comma separate email field continuations.
+ if field == field_prev and field_prev in ['M', 'R', 'L']:
+ field_content = field_content + ","
+
+ # Do not repeat field names, so that field entries
+ # will be collapsed together.
+ if field != field_prev:
+ output = field_content + "\n"
+ field_content = ":%s:" % (fields.get(field, field))
+ field_content = field_content + "\n\t%s" % (details)
+ field_prev = field
+ else:
+ output = line
+
+ # Re-split on any added newlines in any above parsing.
+ if output != None:
+ for separated in output.split('\n'):
+ result.append(separated)
+
+ # Update the state machine when we find heading separators.
+ if line.startswith('----------'):
+ if prev.startswith('Descriptions'):
+ descriptions = True
+ if prev.startswith('Maintainers'):
+ maintainers = True
+
+ # Retain previous line for state machine transitions.
+ prev = line
+
+ # Flush pending field contents.
+ if field_content != "":
+ for separated in field_content.split('\n'):
+ result.append(separated)
+
+ output = "\n".join(result)
+ # For debugging the pre-rendered results...
+ #print(output, file=open("/tmp/MAINTAINERS.rst", "w"))
+
+ self.state_machine.insert_input(
+ statemachine.string2lines(output), path)
+
+ def run(self):
+ """Include the MAINTAINERS file as part of this reST file."""
+ if not self.state.document.settings.file_insertion_enabled:
+ raise self.warning('"%s" directive disabled.' % self.name)
+
+ # Walk up source path directories to find Documentation/../
+ path = self.state_machine.document.attributes['source']
+ path = os.path.realpath(path)
+ tail = path
+ while tail != "Documentation" and tail != "":
+ (path, tail) = os.path.split(path)
+
+ # Append "MAINTAINERS"
+ path = os.path.join(path, "MAINTAINERS")
+
+ try:
+ self.state.document.settings.record_dependencies.add(path)
+ lines = self.parse_maintainers(path)
+ except IOError as error:
+ raise self.severe('Problems with "%s" directive path:\n%s.' %
+ (self.name, ErrorString(error)))
+
+ return []
diff --git a/Documentation/sphinx/parallel-wrapper.sh b/Documentation/sphinx/parallel-wrapper.sh
new file mode 100644
index 000000000000..7daf5133bdd3
--- /dev/null
+++ b/Documentation/sphinx/parallel-wrapper.sh
@@ -0,0 +1,33 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Figure out if we should follow a specific parallelism from the make
+# environment (as exported by scripts/jobserver-exec), or fall back to
+# the "auto" parallelism when "-jN" is not specified at the top-level
+# "make" invocation.
+
+sphinx="$1"
+shift || true
+
+parallel="$PARALLELISM"
+if [ -z "$parallel" ] ; then
+ # If no parallelism is specified at the top-level make, then
+ # fall back to the expected "-jauto" mode that the "htmldocs"
+ # target has had.
+ auto=$(perl -e 'open IN,"'"$sphinx"' --version 2>&1 |";
+ while (<IN>) {
+ if (m/([\d\.]+)/) {
+ print "auto" if ($1 >= "1.7")
+ }
+ }
+ close IN')
+ if [ -n "$auto" ] ; then
+ parallel="$auto"
+ fi
+fi
+# Only if some parallelism has been determined do we add the -jN option.
+if [ -n "$parallel" ] ; then
+ parallel="-j$parallel"
+fi
+
+exec "$sphinx" "$parallel" "$@"
diff --git a/Documentation/trace/coresight-cpu-debug.rst b/Documentation/trace/coresight/coresight-cpu-debug.rst
index 993dd294b81b..993dd294b81b 100644
--- a/Documentation/trace/coresight-cpu-debug.rst
+++ b/Documentation/trace/coresight/coresight-cpu-debug.rst
diff --git a/Documentation/trace/coresight/coresight-etm4x-reference.rst b/Documentation/trace/coresight/coresight-etm4x-reference.rst
new file mode 100644
index 000000000000..b64d9a9c79df
--- /dev/null
+++ b/Documentation/trace/coresight/coresight-etm4x-reference.rst
@@ -0,0 +1,798 @@
+===============================================
+ETMv4 sysfs linux driver programming reference.
+===============================================
+
+ :Author: Mike Leach <mike.leach@linaro.org>
+ :Date: October 11th, 2019
+
+Supplement to existing ETMv4 driver documentation.
+
+Sysfs files and directories
+---------------------------
+
+Root: ``/sys/bus/coresight/devices/etm<N>``
+
+
+The following paragraphs explain the association between sysfs files and the
+ETMv4 registers that they effect. Note the register names are given without
+the ‘TRC’ prefix.
+
+----
+
+:File: ``mode`` (rw)
+:Trace Registers: {CONFIGR + others}
+:Notes:
+ Bit select trace features. See ‘mode’ section below. Bits
+ in this will cause equivalent programming of trace config and
+ other registers to enable the features requested.
+
+:Syntax & eg:
+ ``echo bitfield > mode``
+
+ bitfield up to 32 bits setting trace features.
+
+:Example:
+ ``$> echo 0x012 > mode``
+
+----
+
+:File: ``reset`` (wo)
+:Trace Registers: All
+:Notes:
+ Reset all programming to trace nothing / no logic programmed.
+
+:Syntax:
+ ``echo 1 > reset``
+
+----
+
+:File: ``enable_source`` (wo)
+:Trace Registers: PRGCTLR, All hardware regs.
+:Notes:
+ - > 0 : Programs up the hardware with the current values held in the driver
+ and enables trace.
+
+ - = 0 : disable trace hardware.
+
+:Syntax:
+ ``echo 1 > enable_source``
+
+----
+
+:File: ``cpu`` (ro)
+:Trace Registers: None.
+:Notes:
+ CPU ID that this ETM is attached to.
+
+:Example:
+ ``$> cat cpu``
+
+ ``$> 0``
+
+----
+
+:File: ``addr_idx`` (rw)
+:Trace Registers: None.
+:Notes:
+ Virtual register to index address comparator and range
+ features. Set index for first of the pair in a range.
+
+:Syntax:
+ ``echo idx > addr_idx``
+
+ Where idx < nr_addr_cmp x 2
+
+----
+
+:File: ``addr_range`` (rw)
+:Trace Registers: ACVR[idx, idx+1], VIIECTLR
+:Notes:
+ Pair of addresses for a range selected by addr_idx. Include
+ / exclude according to the optional parameter, or if omitted
+ uses the current ‘mode’ setting. Select comparator range in
+ control register. Error if index is odd value.
+
+:Depends: ``mode, addr_idx``
+:Syntax:
+ ``echo addr1 addr2 [exclude] > addr_range``
+
+ Where addr1 and addr2 define the range and addr1 < addr2.
+
+ Optional exclude value:-
+
+ - 0 for include
+ - 1 for exclude.
+:Example:
+ ``$> echo 0x0000 0x2000 0 > addr_range``
+
+----
+
+:File: ``addr_single`` (rw)
+:Trace Registers: ACVR[idx]
+:Notes:
+ Set a single address comparator according to addr_idx. This
+ is used if the address comparator is used as part of event
+ generation logic etc.
+
+:Depends: ``addr_idx``
+:Syntax:
+ ``echo addr1 > addr_single``
+
+----
+
+:File: ``addr_start`` (rw)
+:Trace Registers: ACVR[idx], VISSCTLR
+:Notes:
+ Set a trace start address comparator according to addr_idx.
+ Select comparator in control register.
+
+:Depends: ``addr_idx``
+:Syntax:
+ ``echo addr1 > addr_start``
+
+----
+
+:File: ``addr_stop`` (rw)
+:Trace Registers: ACVR[idx], VISSCTLR
+:Notes:
+ Set a trace stop address comparator according to addr_idx.
+ Select comparator in control register.
+
+:Depends: ``addr_idx``
+:Syntax:
+ ``echo addr1 > addr_stop``
+
+----
+
+:File: ``addr_context`` (rw)
+:Trace Registers: ACATR[idx,{6:4}]
+:Notes:
+ Link context ID comparator to address comparator addr_idx
+
+:Depends: ``addr_idx``
+:Syntax:
+ ``echo ctxt_idx > addr_context``
+
+ Where ctxt_idx is the index of the linked context id / vmid
+ comparator.
+
+----
+
+:File: ``addr_ctxtype`` (rw)
+:Trace Registers: ACATR[idx,{3:2}]
+:Notes:
+ Input value string. Set type for linked context ID comparator
+
+:Depends: ``addr_idx``
+:Syntax:
+ ``echo type > addr_ctxtype``
+
+ Type one of {all, vmid, ctxid, none}
+:Example:
+ ``$> echo ctxid > addr_ctxtype``
+
+----
+
+:File: ``addr_exlevel_s_ns`` (rw)
+:Trace Registers: ACATR[idx,{14:8}]
+:Notes:
+ Set the ELx secure and non-secure matching bits for the
+ selected address comparator
+
+:Depends: ``addr_idx``
+:Syntax:
+ ``echo val > addr_exlevel_s_ns``
+
+ val is a 7 bit value for exception levels to exclude. Input
+ value shifted to correct bits in register.
+:Example:
+ ``$> echo 0x4F > addr_exlevel_s_ns``
+
+----
+
+:File: ``addr_instdatatype`` (rw)
+:Trace Registers: ACATR[idx,{1:0}]
+:Notes:
+ Set the comparator address type for matching. Driver only
+ supports setting instruction address type.
+
+:Depends: ``addr_idx``
+
+----
+
+:File: ``addr_cmp_view`` (ro)
+:Trace Registers: ACVR[idx, idx+1], ACATR[idx], VIIECTLR
+:Notes:
+ Read the currently selected address comparator. If part of
+ address range then display both addresses.
+
+:Depends: ``addr_idx``
+:Syntax:
+ ``cat addr_cmp_view``
+:Example:
+ ``$> cat addr_cmp_view``
+
+ ``addr_cmp[0] range 0x0 0xffffffffffffffff include ctrl(0x4b00)``
+
+----
+
+:File: ``nr_addr_cmp`` (ro)
+:Trace Registers: From IDR4
+:Notes:
+ Number of address comparator pairs
+
+----
+
+:File: ``sshot_idx`` (rw)
+:Trace Registers: None
+:Notes:
+ Select single shot register set.
+
+----
+
+:File: ``sshot_ctrl`` (rw)
+:Trace Registers: SSCCR[idx]
+:Notes:
+ Access a single shot comparator control register.
+
+:Depends: ``sshot_idx``
+:Syntax:
+ ``echo val > sshot_ctrl``
+
+ Writes val into the selected control register.
+
+----
+
+:File: ``sshot_status`` (ro)
+:Trace Registers: SSCSR[idx]
+:Notes:
+ Read a single shot comparator status register
+
+:Depends: ``sshot_idx``
+:Syntax:
+ ``cat sshot_status``
+
+ Read status.
+:Example:
+ ``$> cat sshot_status``
+
+ ``0x1``
+
+----
+
+:File: ``sshot_pe_ctrl`` (rw)
+:Trace Registers: SSPCICR[idx]
+:Notes:
+ Access a single shot PE comparator input control register.
+
+:Depends: ``sshot_idx``
+:Syntax:
+ ``echo val > sshot_pe_ctrl``
+
+ Writes val into the selected control register.
+
+----
+
+:File: ``ns_exlevel_vinst`` (rw)
+:Trace Registers: VICTLR{23:20}
+:Notes:
+ Program non-secure exception level filters. Set / clear NS
+ exception filter bits. Setting ‘1’ excludes trace from the
+ exception level.
+
+:Syntax:
+ ``echo bitfield > ns_exlevel_viinst``
+
+ Where bitfield contains bits to set clear for EL0 to EL2
+:Example:
+ ``%> echo 0x4 > ns_exlevel_viinst``
+
+ Excludes EL2 NS trace.
+
+----
+
+:File: ``vinst_pe_cmp_start_stop`` (rw)
+:Trace Registers: VIPCSSCTLR
+:Notes:
+ Access PE start stop comparator input control registers
+
+----
+
+:File: ``bb_ctrl`` (rw)
+:Trace Registers: BBCTLR
+:Notes:
+ Define ranges that Branch Broadcast will operate in.
+ Default (0x0) is all addresses.
+
+:Depends: BB enabled.
+
+----
+
+:File: ``cyc_threshold`` (rw)
+:Trace Registers: CCCTLR
+:Notes:
+ Set the threshold for which cycle counts will be emitted.
+ Error if attempt to set below minimum defined in IDR3, masked
+ to width of valid bits.
+
+:Depends: CC enabled.
+
+----
+
+:File: ``syncfreq`` (rw)
+:Trace Registers: SYNCPR
+:Notes:
+ Set trace synchronisation period. Power of 2 value, 0 (off)
+ or 8-20. Driver defaults to 12 (every 4096 bytes).
+
+----
+
+:File: ``cntr_idx`` (rw)
+:Trace Registers: none
+:Notes:
+ Select the counter to access
+
+:Syntax:
+ ``echo idx > cntr_idx``
+
+ Where idx < nr_cntr
+
+----
+
+:File: ``cntr_ctrl`` (rw)
+:Trace Registers: CNTCTLR[idx]
+:Notes:
+ Set counter control value.
+
+:Depends: ``cntr_idx``
+:Syntax:
+ ``echo val > cntr_ctrl``
+
+ Where val is per ETMv4 spec.
+
+----
+
+:File: ``cntrldvr`` (rw)
+:Trace Registers: CNTRLDVR[idx]
+:Notes:
+ Set counter reload value.
+
+:Depends: ``cntr_idx``
+:Syntax:
+ ``echo val > cntrldvr``
+
+ Where val is per ETMv4 spec.
+
+----
+
+:File: ``nr_cntr`` (ro)
+:Trace Registers: From IDR5
+
+:Notes:
+ Number of counters implemented.
+
+----
+
+:File: ``ctxid_idx`` (rw)
+:Trace Registers: None
+:Notes:
+ Select the context ID comparator to access
+
+:Syntax:
+ ``echo idx > ctxid_idx``
+
+ Where idx < numcidc
+
+----
+
+:File: ``ctxid_pid`` (rw)
+:Trace Registers: CIDCVR[idx]
+:Notes:
+ Set the context ID comparator value
+
+:Depends: ``ctxid_idx``
+
+----
+
+:File: ``ctxid_masks`` (rw)
+:Trace Registers: CIDCCTLR0, CIDCCTLR1, CIDCVR<0-7>
+:Notes:
+ Pair of values to set the byte masks for 1-8 context ID
+ comparators. Automatically clears masked bytes to 0 in CID
+ value registers.
+
+:Syntax:
+ ``echo m3m2m1m0 [m7m6m5m4] > ctxid_masks``
+
+ 32 bit values made up of mask bytes, where mN represents a
+ byte mask value for Context ID comparator N.
+
+ Second value not required on systems that have fewer than 4
+ context ID comparators
+
+----
+
+:File: ``numcidc`` (ro)
+:Trace Registers: From IDR4
+:Notes:
+ Number of Context ID comparators
+
+----
+
+:File: ``vmid_idx`` (rw)
+:Trace Registers: None
+:Notes:
+ Select the VM ID comparator to access.
+
+:Syntax:
+ ``echo idx > vmid_idx``
+
+ Where idx <  numvmidc
+
+----
+
+:File: ``vmid_val`` (rw)
+:Trace Registers: VMIDCVR[idx]
+:Notes:
+ Set the VM ID comparator value
+
+:Depends: ``vmid_idx``
+
+----
+
+:File: ``vmid_masks`` (rw)
+:Trace Registers: VMIDCCTLR0, VMIDCCTLR1, VMIDCVR<0-7>
+:Notes:
+ Pair of values to set the byte masks for 1-8 VM ID comparators.
+ Automatically clears masked bytes to 0 in VMID value registers.
+
+:Syntax:
+ ``echo m3m2m1m0 [m7m6m5m4] > vmid_masks``
+
+ Where mN represents a byte mask value for VMID comparator N.
+ Second value not required on systems that have fewer than 4
+ VMID comparators.
+
+----
+
+:File: ``numvmidc`` (ro)
+:Trace Registers: From IDR4
+:Notes:
+ Number of VMID comparators
+
+----
+
+:File: ``res_idx`` (rw)
+:Trace Registers: None.
+:Notes:
+ Select the resource selector control to access. Must be 2 or
+ higher as selectors 0 and 1 are hardwired.
+
+:Syntax:
+ ``echo idx > res_idx``
+
+ Where 2 <= idx < nr_resource x 2
+
+----
+
+:File: ``res_ctrl`` (rw)
+:Trace Registers: RSCTLR[idx]
+:Notes:
+ Set resource selector control value. Value per ETMv4 spec.
+
+:Depends: ``res_idx``
+:Syntax:
+ ``echo val > res_cntr``
+
+ Where val is per ETMv4 spec.
+
+----
+
+:File: ``nr_resource`` (ro)
+:Trace Registers: From IDR4
+:Notes:
+ Number of resource selector pairs
+
+----
+
+:File: ``event`` (rw)
+:Trace Registers: EVENTCTRL0R
+:Notes:
+ Set up to 4 implemented event fields.
+
+:Syntax:
+ ``echo ev3ev2ev1ev0 > event``
+
+ Where evN is an 8 bit event field. Up to 4 event fields make up the
+ 32-bit input value. Number of valid fields is implementation dependent,
+ defined in IDR0.
+
+----
+
+:File: ``event_instren`` (rw)
+:Trace Registers: EVENTCTRL1R
+:Notes:
+ Choose events which insert event packets into trace stream.
+
+:Depends: EVENTCTRL0R
+:Syntax:
+ ``echo bitfield > event_instren``
+
+ Where bitfield is up to 4 bits according to number of event fields.
+
+----
+
+:File: ``event_ts`` (rw)
+:Trace Registers: TSCTLR
+:Notes:
+ Set the event that will generate timestamp requests.
+
+:Depends: ``TS activated``
+:Syntax:
+ ``echo evfield > event_ts``
+
+ Where evfield is an 8 bit event selector.
+
+----
+
+:File: ``seq_idx`` (rw)
+:Trace Registers: None
+:Notes:
+ Sequencer event register select - 0 to 2
+
+----
+
+:File: ``seq_state`` (rw)
+:Trace Registers: SEQSTR
+:Notes:
+ Sequencer current state - 0 to 3.
+
+----
+
+:File: ``seq_event`` (rw)
+:Trace Registers: SEQEVR[idx]
+:Notes:
+ State transition event registers
+
+:Depends: ``seq_idx``
+:Syntax:
+ ``echo evBevF > seq_event``
+
+ Where evBevF is a 16 bit value made up of two event selectors,
+
+ - evB : back
+ - evF : forwards.
+
+----
+
+:File: ``seq_reset_event`` (rw)
+:Trace Registers: SEQRSTEVR
+:Notes:
+ Sequencer reset event
+
+:Syntax:
+ ``echo evfield > seq_reset_event``
+
+ Where evfield is an 8 bit event selector.
+
+----
+
+:File: ``nrseqstate`` (ro)
+:Trace Registers: From IDR5
+:Notes:
+ Number of sequencer states (0 or 4)
+
+----
+
+:File: ``nr_pe_cmp`` (ro)
+:Trace Registers: From IDR4
+:Notes:
+ Number of PE comparator inputs
+
+----
+
+:File: ``nr_ext_inp`` (ro)
+:Trace Registers: From IDR5
+:Notes:
+ Number of external inputs
+
+----
+
+:File: ``nr_ss_cmp`` (ro)
+:Trace Registers: From IDR4
+:Notes:
+ Number of Single Shot control registers
+
+----
+
+*Note:* When programming any address comparator the driver will tag the
+comparator with a type used - i.e. RANGE, SINGLE, START, STOP. Once this tag
+is set, then only the values can be changed using the same sysfs file / type
+used to program it.
+
+Thus::
+
+ % echo 0 > addr_idx ; select address comparator 0
+ % echo 0x1000 0x5000 0 > addr_range ; set address range on comparators 0, 1.
+ % echo 0x2000 > addr_start ; error as comparator 0 is a range comparator
+ % echo 2 > addr_idx ; select address comparator 2
+ % echo 0x2000 > addr_start ; this is OK as comparator 2 is unused.
+ % echo 0x3000 > addr_stop ; error as comparator 2 set as start address.
+ % echo 2 > addr_idx ; select address comparator 3
+ % echo 0x3000 > addr_stop ; this is OK
+
+To remove programming on all the comparators (and all the other hardware) use
+the reset parameter::
+
+ % echo 1 > reset
+
+
+
+The ‘mode’ sysfs parameter.
+---------------------------
+
+This is a bitfield selection parameter that sets the overall trace mode for the
+ETM. The table below describes the bits, using the defines from the driver
+source file, along with a description of the feature these represent. Many
+features are optional and therefore dependent on implementation in the
+hardware.
+
+Bit assignments shown below:-
+
+----
+
+**bit (0):**
+ ETM_MODE_EXCLUDE
+
+**description:**
+ This is the default value for the include / exclude function when
+ setting address ranges. Set 1 for exclude range. When the mode
+ parameter is set this value is applied to the currently indexed
+ address range.
+
+
+**bit (4):**
+ ETM_MODE_BB
+
+**description:**
+ Set to enable branch broadcast if supported in hardware [IDR0].
+
+
+**bit (5):**
+ ETMv4_MODE_CYCACC
+
+**description:**
+ Set to enable cycle accurate trace if supported [IDR0].
+
+
+**bit (6):**
+ ETMv4_MODE_CTXID
+
+**description:**
+ Set to enable context ID tracing if supported in hardware [IDR2].
+
+
+**bit (7):**
+ ETM_MODE_VMID
+
+**description:**
+ Set to enable virtual machine ID tracing if supported [IDR2].
+
+
+**bit (11):**
+ ETMv4_MODE_TIMESTAMP
+
+**description:**
+ Set to enable timestamp generation if supported [IDR0].
+
+
+**bit (12):**
+ ETM_MODE_RETURNSTACK
+**description:**
+ Set to enable trace return stack use if supported [IDR0].
+
+
+**bit (13-14):**
+ ETM_MODE_QELEM(val)
+
+**description:**
+ ‘val’ determines level of Q element support enabled if
+ implemented by the ETM [IDR0]
+
+
+**bit (19):**
+ ETM_MODE_ATB_TRIGGER
+
+**description:**
+ Set to enable the ATBTRIGGER bit in the event control register
+ [EVENTCTLR1] if supported [IDR5].
+
+
+**bit (20):**
+ ETM_MODE_LPOVERRIDE
+
+**description:**
+ Set to enable the LPOVERRIDE bit in the event control register
+ [EVENTCTLR1], if supported [IDR5].
+
+
+**bit (21):**
+ ETM_MODE_ISTALL_EN
+
+**description:**
+ Set to enable the ISTALL bit in the stall control register
+ [STALLCTLR]
+
+
+**bit (23):**
+ ETM_MODE_INSTPRIO
+
+**description:**
+ Set to enable the INSTPRIORITY bit in the stall control register
+ [STALLCTLR] , if supported [IDR0].
+
+
+**bit (24):**
+ ETM_MODE_NOOVERFLOW
+
+**description:**
+ Set to enable the NOOVERFLOW bit in the stall control register
+ [STALLCTLR], if supported [IDR3].
+
+
+**bit (25):**
+ ETM_MODE_TRACE_RESET
+
+**description:**
+ Set to enable the TRCRESET bit in the viewinst control register
+ [VICTLR] , if supported [IDR3].
+
+
+**bit (26):**
+ ETM_MODE_TRACE_ERR
+
+**description:**
+ Set to enable the TRCCTRL bit in the viewinst control register
+ [VICTLR].
+
+
+**bit (27):**
+ ETM_MODE_VIEWINST_STARTSTOP
+
+**description:**
+ Set the initial state value of the ViewInst start / stop logic
+ in the viewinst control register [VICTLR]
+
+
+**bit (30):**
+ ETM_MODE_EXCL_KERN
+
+**description:**
+ Set default trace setup to exclude kernel mode trace (see note a)
+
+
+**bit (31):**
+ ETM_MODE_EXCL_USER
+
+**description:**
+ Set default trace setup to exclude user space trace (see note a)
+
+----
+
+*Note a)* On startup the ETM is programmed to trace the complete address space
+using address range comparator 0. ‘mode’ bits 30 / 31 modify this setting to
+set EL exclude bits for NS state in either user space (EL0) or kernel space
+(EL1) in the address range comparator. (the default setting excludes all
+secure EL, and NS EL2)
+
+Once the reset parameter has been used, and/or custom programming has been
+implemented - using these bits will result in the EL bits for address
+comparator 0 being set in the same way.
+
+*Note b)* Bits 2-3, 8-10, 15-16, 18, 22, control features that only work with
+data trace. As A-profile data trace is architecturally prohibited in ETMv4,
+these have been omitted here. Possible uses could be where a kernel has
+support for control of R or M profile infrastructure as part of a heterogeneous
+system.
+
+Bits 17, 28-29 are unused.
diff --git a/Documentation/trace/coresight.rst b/Documentation/trace/coresight/coresight.rst
index 72f4b7ef1bad..a566719f8e7e 100644
--- a/Documentation/trace/coresight.rst
+++ b/Documentation/trace/coresight/coresight.rst
@@ -489,7 +489,7 @@ interface provided for that purpose by the generic STM API::
crw------- 1 root root 10, 61 Jan 3 18:11 /dev/stm0
root@genericarmv8:~#
-Details on how to use the generic STM API can be found here [#second]_.
+Details on how to use the generic STM API can be found here:- :doc:`../stm` [#second]_.
.. [#first] Documentation/ABI/testing/sysfs-bus-coresight-devices-stm
diff --git a/Documentation/trace/coresight/index.rst b/Documentation/trace/coresight/index.rst
new file mode 100644
index 000000000000..8d31b155a87c
--- /dev/null
+++ b/Documentation/trace/coresight/index.rst
@@ -0,0 +1,9 @@
+==============================
+CoreSight - ARM Hardware Trace
+==============================
+
+.. toctree::
+ :maxdepth: 2
+ :glob:
+
+ *
diff --git a/Documentation/trace/ftrace-uses.rst b/Documentation/trace/ftrace-uses.rst
index 1fbc69894eed..2a05e770618a 100644
--- a/Documentation/trace/ftrace-uses.rst
+++ b/Documentation/trace/ftrace-uses.rst
@@ -146,7 +146,7 @@ FTRACE_OPS_FL_RECURSION_SAFE
itself or any nested functions that those functions call.
If this flag is set, it is possible that the callback will also
- be called with preemption enabled (when CONFIG_PREEMPT is set),
+ be called with preemption enabled (when CONFIG_PREEMPTION is set),
but this is not guaranteed.
FTRACE_OPS_FL_IPMODIFY
@@ -170,6 +170,14 @@ FTRACE_OPS_FL_RCU
a callback may be executed and RCU synchronization will not protect
it.
+FTRACE_OPS_FL_PERMANENT
+ If this is set on any ftrace ops, then the tracing cannot disabled by
+ writing 0 to the proc sysctl ftrace_enabled. Equally, a callback with
+ the flag set cannot be registered if ftrace_enabled is 0.
+
+ Livepatch uses it not to lose the function redirection, so the system
+ stays protected.
+
Filtering which functions to trace
==================================
diff --git a/Documentation/trace/ftrace.rst b/Documentation/trace/ftrace.rst
index e3060eedb22d..d2b5657ed33e 100644
--- a/Documentation/trace/ftrace.rst
+++ b/Documentation/trace/ftrace.rst
@@ -2976,7 +2976,9 @@ Note, the proc sysctl ftrace_enable is a big on/off switch for the
function tracer. By default it is enabled (when function tracing is
enabled in the kernel). If it is disabled, all function tracing is
disabled. This includes not only the function tracers for ftrace, but
-also for any other uses (perf, kprobes, stack tracing, profiling, etc).
+also for any other uses (perf, kprobes, stack tracing, profiling, etc). It
+cannot be disabled if there is a callback with FTRACE_OPS_FL_PERMANENT set
+registered.
Please disable this with care.
diff --git a/Documentation/trace/index.rst b/Documentation/trace/index.rst
index b7891cb1ab4d..04acd277c5f6 100644
--- a/Documentation/trace/index.rst
+++ b/Documentation/trace/index.rst
@@ -23,5 +23,4 @@ Linux Tracing Technologies
intel_th
stm
sys-t
- coresight
- coresight-cpu-debug
+ coresight/index
diff --git a/Documentation/trace/intel_th.rst b/Documentation/trace/intel_th.rst
index baa12eb09ef4..70b7126eaeeb 100644
--- a/Documentation/trace/intel_th.rst
+++ b/Documentation/trace/intel_th.rst
@@ -44,7 +44,8 @@ Documentation/trace/stm.rst for more information on that.
MSU can be configured to collect trace data into a system memory
buffer, which can later on be read from its device nodes via read() or
-mmap() interface.
+mmap() interface and directed to a "software sink" driver that will
+consume the data and/or relay it further.
On the whole, Intel(R) Trace Hub does not require any special
userspace software to function; everything can be configured, started
@@ -122,3 +123,28 @@ In order to enable the host mode, set the 'host_mode' parameter of the
will show up on the intel_th bus. Also, trace configuration and
capture controlling attribute groups of the 'gth' device will not be
exposed. The 'sth' device will operate as usual.
+
+Software Sinks
+--------------
+
+The Memory Storage Unit (MSU) driver provides an in-kernel API for
+drivers to register themselves as software sinks for the trace data.
+Such drivers can further export the data via other devices, such as
+USB device controllers or network cards.
+
+The API has two main parts::
+ - notifying the software sink that a particular window is full, and
+ "locking" that window, that is, making it unavailable for the trace
+ collection; when this happens, the MSU driver will automatically
+ switch to the next window in the buffer if it is unlocked, or stop
+ the trace capture if it's not;
+ - tracking the "locked" state of windows and providing a way for the
+ software sink driver to notify the MSU driver when a window is
+ unlocked and can be used again to collect trace data.
+
+An example sink driver, msu-sink illustrates the implementation of a
+software sink. Functionally, it simply unlocks windows as soon as they
+are full, keeping the MSU running in a circular buffer mode. Unlike the
+"multi" mode, it will fill out all the windows in the buffer as opposed
+to just the first one. It can be enabled by writing "sink" to the "mode"
+file (assuming msu-sink.ko is loaded).
diff --git a/Documentation/translations/it_IT/process/magic-number.rst b/Documentation/translations/it_IT/process/magic-number.rst
index ed1121d0ba84..783e0de314a0 100644
--- a/Documentation/translations/it_IT/process/magic-number.rst
+++ b/Documentation/translations/it_IT/process/magic-number.rst
@@ -87,7 +87,6 @@ FF_MAGIC 0x4646 fc_info ``drivers/net/ip
ISICOM_MAGIC 0x4d54 isi_port ``include/linux/isicom.h``
PTY_MAGIC 0x5001 ``drivers/char/pty.c``
PPP_MAGIC 0x5002 ppp ``include/linux/if_pppvar.h``
-SERIAL_MAGIC 0x5301 async_struct ``include/linux/serial.h``
SSTATE_MAGIC 0x5302 serial_state ``include/linux/serial.h``
SLIP_MAGIC 0x5302 slip ``drivers/net/slip.h``
STRIP_MAGIC 0x5303 strip ``drivers/net/strip.c``
diff --git a/Documentation/translations/it_IT/process/maintainer-pgp-guide.rst b/Documentation/translations/it_IT/process/maintainer-pgp-guide.rst
index 118fb4153e8f..f3c8e8d377ee 100644
--- a/Documentation/translations/it_IT/process/maintainer-pgp-guide.rst
+++ b/Documentation/translations/it_IT/process/maintainer-pgp-guide.rst
@@ -455,7 +455,7 @@ soluzioni disponibili:
`GnuK`_ della FSIJ. Questo è uno dei pochi dispositivi a supportare le chiavi
ECC ED25519, ma offre meno funzionalità di sicurezza (come la resistenza
alla manomissione o alcuni attacchi ad un canale laterale).
-- `Nitrokey Pro`_: è simile alla Nitrokey Start, ma è più resistente alla
+- `Nitrokey Pro 2`_: è simile alla Nitrokey Start, ma è più resistente alla
manomissione e offre più funzionalità di sicurezza. La Pro 2 supporta la
crittografia ECC (NISTP).
- `Yubikey 5`_: l'hardware e il software sono proprietari, ma è più economica
diff --git a/Documentation/translations/ko_KR/howto.rst b/Documentation/translations/ko_KR/howto.rst
index b3f51b19de7c..ae3ad897d2ae 100644
--- a/Documentation/translations/ko_KR/howto.rst
+++ b/Documentation/translations/ko_KR/howto.rst
@@ -240,21 +240,21 @@ ReST 마í¬ì—…ì„ ì‚¬ìš©í•˜ëŠ” ë¬¸ì„œë“¤ì€ Documentation/output ì— ìƒì„±ëœë‹
ì„œë¸Œì‹œìŠ¤í…œì— íŠ¹í™”ëœ ì»¤ë„ ë¸Œëžœì¹˜ë“¤ë¡œ 구성ëœë‹¤. 몇몇 다른 ë©”ì¸
ë¸Œëžœì¹˜ë“¤ì€ ë‹¤ìŒê³¼ 같다.
- - main 4.x ì»¤ë„ íŠ¸ë¦¬
- - 4.x.y - ì•ˆì •ëœ ì»¤ë„ íŠ¸ë¦¬
- - ì„œë¸Œì‹œìŠ¤í…œì„ ìœ„í•œ ì»¤ë„ íŠ¸ë¦¬ë“¤ê³¼ 패치들
- - 4.x - 통합 테스트를 위한 next ì»¤ë„ íŠ¸ë¦¬
+ - ë¦¬ëˆ„ìŠ¤ì˜ ë©”ì¸ë¼ì¸ 트리
+ - 여러 ë©”ì´ì € 넘버를 갖는 다양한 ì•ˆì •ëœ ì»¤ë„ íŠ¸ë¦¬ë“¤
+ - ì„œë¸Œì‹œìŠ¤í…œì„ ìœ„í•œ ì»¤ë„ íŠ¸ë¦¬ë“¤
+ - 통합 테스트를 위한 linux-next ì»¤ë„ íŠ¸ë¦¬
-4.x ì»¤ë„ íŠ¸ë¦¬
+ë©”ì¸ë¼ì¸ 트리
~~~~~~~~~~~~~
-4.x 커ë„ë“¤ì€ Linus Torvaldsê°€ 관리하며 https://kernel.org ì˜
-pub/linux/kernel/v4.x/ 디렉토리ì—ì„œ ì°¸ì¡°ë  ìˆ˜ 있다.개발 프로세스는 다ìŒê³¼ 같다.
+ë©”ì¸ë¼ì¸ 트리는 Linus Torvaldsê°€ 관리하며 https://kernel.org ë˜ëŠ” 소스
+저장소ì—ì„œ ì°¸ì¡°ë  ìˆ˜ 있다.개발 프로세스는 다ìŒê³¼ 같다.
- 새로운 커ë„ì´ ë°°í¬ë˜ìžë§ˆìž 2ì£¼ì˜ ì‹œê°„ì´ ì£¼ì–´ì§„ë‹¤. ì´ ê¸°ê°„ë™ì€
ë©”ì¸í…Œì´ë„ˆë“¤ì€ í° diffë“¤ì„ Linusì—게 제출할 수 있다. 대개 ì´ íŒ¨ì¹˜ë“¤ì€
- 몇 주 ë™ì•ˆ -next 커ë„ë‚´ì— ì´ë¯¸ ìžˆì—ˆë˜ ê²ƒë“¤ì´ë‹¤. í° ë³€ê²½ë“¤ì„ ì œì¶œí•˜ëŠ” ë°
- 선호ë˜ëŠ” ë°©ë²•ì€ git(커ë„ì˜ ì†ŒìŠ¤ 관리 툴, ë” ë§Žì€ ì •ë³´ë“¤ì€
+ 몇 주 ë™ì•ˆ linux-next 커ë„ë‚´ì— ì´ë¯¸ ìžˆì—ˆë˜ ê²ƒë“¤ì´ë‹¤. í° ë³€ê²½ë“¤ì„ ì œì¶œí•˜ëŠ”
+ ë° ì„ í˜¸ë˜ëŠ” ë°©ë²•ì€ git(커ë„ì˜ ì†ŒìŠ¤ 관리 툴, ë” ë§Žì€ ì •ë³´ë“¤ì€
https://git-scm.com/ ì—ì„œ 참조할 수 있다)를 사용하는 것ì´ì§€ë§Œ 순수한
패치파ì¼ì˜ 형ì‹ìœ¼ë¡œ 보내는 ê²ƒë„ ë¬´ê´€í•˜ë‹¤.
- 2주 í›„ì— -rc1 커ë„ì´ ë¦´ë¦¬ì¦ˆë˜ë©° ì—¬ê¸°ì„œë¶€í„°ì˜ ì£¼ì•ˆì ì€ 새로운 커ë„ì„
@@ -281,28 +281,25 @@ Andrew Mortonì˜ ê¸€ì´ ìžˆë‹¤.
ë²„ê·¸ì˜ ìƒí™©ì— ë”°ë¼ ë°°í¬ë˜ëŠ” 것ì´ì§€ 미리정해 ë†“ì€ ì‹œê°„ì— ë”°ë¼
ë°°í¬ë˜ëŠ” ê²ƒì€ ì•„ë‹ˆê¸° 때문ì´ë‹¤."*
-4.x.y - 안정 ì»¤ë„ íŠ¸ë¦¬
-~~~~~~~~~~~~~~~~~~~~~~
+여러 ë©”ì´ì € 넘버를 갖는 다양한 ì•ˆì •ëœ ì»¤ë„ íŠ¸ë¦¬ë“¤
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-3 ìžë¦¬ 숫ìžë¡œ ì´ë£¨ì–´ì§„ ë²„ì ¼ì˜ ì»¤ë„ë“¤ì€ -stable 커ë„들ì´ë‹¤. ê·¸ê²ƒë“¤ì€ 4.x
-커ë„ì—ì„œ ë°œê²¬ëœ í° íšŒê·€ë“¤ì´ë‚˜ 보안 문제들 중 비êµì  ìž‘ê³  중요한 수정들ì„
-í¬í•¨í•œë‹¤.
+3 ìžë¦¬ 숫ìžë¡œ ì´ë£¨ì–´ì§„ ë²„ì ¼ì˜ ì»¤ë„ë“¤ì€ -stable 커ë„들ì´ë‹¤. ê·¸ê²ƒë“¤ì€ í•´ë‹¹ ë©”ì´ì €
+ë©”ì¸ë¼ì¸ 릴리즈ì—ì„œ ë°œê²¬ëœ í° íšŒê·€ë“¤ì´ë‚˜ 보안 문제들 중 비êµì  ìž‘ê³  중요한
+ìˆ˜ì •ë“¤ì„ í¬í•¨í•˜ë©°, ì•žì˜ ë‘ ë²„ì „ 넘버는 ê°™ì€ ê¸°ë°˜ ë²„ì „ì„ ì˜ë¯¸í•œë‹¤.
ì´ê²ƒì€ 가장 ìµœê·¼ì˜ ì•ˆì •ì ì¸ 커ë„ì„ ì›í•˜ëŠ” 사용ìžì—게 추천ë˜ëŠ” 브랜치ì´ë©°,
개발/ì‹¤í—˜ì  ë²„ì ¼ì„ í…ŒìŠ¤íŠ¸í•˜ëŠ” ê²ƒì„ ë•ê³ ìž 하는 사용ìžë“¤ê³¼ëŠ” 별로 ê´€ë ¨ì´ ì—†ë‹¤.
-ì–´ë–¤ 4.x.y 커ë„ë„ ì‚¬ìš©í•  수 없다면 그때는 가장 ë†’ì€ ìˆ«ìžì˜ 4.x
-커ë„ì´ í˜„ìž¬ì˜ ì•ˆì • 커ë„ì´ë‹¤.
-
-4.x.y는 "stable" 팀<stable@vger.kernel.org>ì— ì˜í•´ 관리ë˜ë©° ê±°ì˜ ë§¤ë²ˆ 격주로
-ë°°í¬ëœë‹¤.
+-stable íŠ¸ë¦¬ë“¤ì€ "stable" 팀<stable@vger.kernel.org>ì— ì˜í•´ 관리ë˜ë©° ê±°ì˜ ë§¤ë²ˆ
+격주로 ë°°í¬ëœë‹¤.
ì»¤ë„ íŠ¸ë¦¬ 문서들 ë‚´ì˜ :ref:`Documentation/process/stable-kernel-rules.rst <stable_kernel_rules>`
파ì¼ì€ ì–´ë–¤ ì¢…ë¥˜ì˜ ë³€ê²½ë“¤ì´ -stable 트리로 들어왔는지와
ë°°í¬ í”„ë¡œì„¸ìŠ¤ê°€ 어떻게 진행ë˜ëŠ”지를 설명한다.
-서브시스템 ì»¤ë„ íŠ¸ë¦¬ë“¤ê³¼ 패치들
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+서브시스템 ì»¤ë„ íŠ¸ë¦¬ë“¤
+~~~~~~~~~~~~~~~~~~~~~~
다양한 ì»¤ë„ ì„œë¸Œì‹œìŠ¤í…œì˜ ë©”ì¸í…Œì´ë„ˆë“¤ --- 그리고 ë§Žì€ ì»¤ë„ ì„œë¸Œì‹œìŠ¤í…œ 개발ìžë“¤
--- ì€ ê·¸ë“¤ì˜ í˜„ìž¬ 개발 ìƒíƒœë¥¼ 소스 저장소로 노출한다. ì´ë¥¼ 통해 다른 사람들ë„
@@ -324,17 +321,18 @@ Andrew Mortonì˜ ê¸€ì´ ìžˆë‹¤.
ëŒ€ë¶€ë¶„ì˜ ì´ëŸ¬í•œ patchwork 사ì´íŠ¸ëŠ” https://patchwork.kernel.org/ ë˜ëŠ”
http://patchwork.ozlabs.org/ ì— ë‚˜ì—´ë˜ì–´ 있다.
-4.x - 통합 테스트를 위한 next ì»¤ë„ íŠ¸ë¦¬
----------------------------------------
-서브시스템 íŠ¸ë¦¬ë“¤ì˜ ë³€ê²½ì‚¬í•­ë“¤ì€ mainline 4.x 트리로 들어오기 ì „ì— í†µí•©
-테스트를 ê±°ì³ì•¼ 한다. ì´ëŸ° 목ì ìœ¼ë¡œ, 모든 서브시스템 íŠ¸ë¦¬ì˜ ë³€ê²½ì‚¬í•­ì„ ê±°ì˜
-ë§¤ì¼ ë°›ì•„ê°€ëŠ” 특수한 테스트 저장소가 존재한다:
+통합 테스트를 위한 linux-next ì»¤ë„ íŠ¸ë¦¬
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+서브시스템 íŠ¸ë¦¬ë“¤ì˜ ë³€ê²½ì‚¬í•­ë“¤ì€ mainline 트리로 들어오기 ì „ì— í†µí•© 테스트를
+ê±°ì³ì•¼ 한다. ì´ëŸ° 목ì ìœ¼ë¡œ, 모든 서브시스템 íŠ¸ë¦¬ì˜ ë³€ê²½ì‚¬í•­ì„ ê±°ì˜ ë§¤ì¼
+받아가는 특수한 테스트 저장소가 존재한다:
https://git.kernel.org/?p=linux/kernel/git/sfr/linux-next.git
-ì´ëŸ° ì‹ìœ¼ë¡œ, -next 커ë„ì„ í†µí•´ ë‹¤ìŒ ë¨¸ì§€ ê¸°ê°„ì— ë©”ì¸ë¼ì¸ 커ë„ì— ì–´ë–¤ 변경ì´
-가해질 것ì¸ì§€ 간략히 ì•Œ 수 있다. 모험심 ê°•í•œ 테스터ë¼ë©´ -next 커ë„ì—ì„œ 테스트를
-수행하는 ê²ƒë„ ì¢‹ì„ ê²ƒì´ë‹¤.
+ì´ëŸ° ì‹ìœ¼ë¡œ, linux-next 커ë„ì„ í†µí•´ ë‹¤ìŒ ë¨¸ì§€ ê¸°ê°„ì— ë©”ì¸ë¼ì¸ 커ë„ì— ì–´ë–¤
+ë³€ê²½ì´ ê°€í•´ì§ˆ 것ì¸ì§€ 간략히 ì•Œ 수 있다. 모험심 ê°•í•œ 테스터ë¼ë©´ linux-next
+커ë„ì—ì„œ 테스트를 수행하는 ê²ƒë„ ì¢‹ì„ ê²ƒì´ë‹¤.
버그 보고
diff --git a/Documentation/translations/ko_KR/index.rst b/Documentation/translations/ko_KR/index.rst
index 0b695345abc7..27995c4233de 100644
--- a/Documentation/translations/ko_KR/index.rst
+++ b/Documentation/translations/ko_KR/index.rst
@@ -3,8 +3,8 @@
\renewcommand\thesection*
\renewcommand\thesubsection*
-Korean translations
-===================
+한국어 번역
+===========
.. toctree::
:maxdepth: 1
diff --git a/Documentation/translations/ko_KR/memory-barriers.txt b/Documentation/translations/ko_KR/memory-barriers.txt
index 2774624ee843..f07c40a068b5 100644
--- a/Documentation/translations/ko_KR/memory-barriers.txt
+++ b/Documentation/translations/ko_KR/memory-barriers.txt
@@ -1907,21 +1907,6 @@ Mandatory ë°°ë¦¬ì–´ë“¤ì€ SMP 시스템ì—ì„œë„ UP 시스템ì—ì„œë„ SMP 효ê³
위해선 Documentation/DMA-API.txt 문서를 참고하세요.
-MMIO 쓰기 배리어
-----------------
-
-리눅스 커ë„ì€ ë˜í•œ memory-mapped I/O 쓰기를 위한 특별한 ë°°ë¦¬ì–´ë„ ê°€ì§€ê³ 
-있습니다:
-
- mmiowb();
-
-ì´ê²ƒì€ mandatory 쓰기 ë°°ë¦¬ì–´ì˜ ë³€ì¢…ìœ¼ë¡œ, ì™„í™”ëœ ìˆœì„œ ê·œì¹™ì˜ I/O ì˜ì—­ì—으로ì˜
-쓰기가 부분ì ìœ¼ë¡œ 순서를 맞추ë„ë¡ í•´ì¤ë‹ˆë‹¤. ì´ í•¨ìˆ˜ëŠ” CPU->하드웨어 사ì´ë¥¼
-넘어서 실제 하드웨어ì—까지 ì¼ë¶€ ìˆ˜ì¤€ì˜ ì˜í–¥ì„ ë¼ì¹©ë‹ˆë‹¤.
-
-ë” ë§Žì€ ì •ë³´ë¥¼ 위해선 "Acquire vs I/O 액세스" ì„œë¸Œì„¹ì…˜ì„ ì°¸ê³ í•˜ì„¸ìš”.
-
-
=========================
ì•”ë¬µì  ì»¤ë„ ë©”ëª¨ë¦¬ 배리어
=========================
@@ -2283,73 +2268,6 @@ ACQUIRE VS 메모리 액세스
*E, *F or *G following RELEASE Q
-
-ACQUIRE VS I/O 액세스
-----------------------
-
-특정한 (특히 NUMA ê°€ 관련ëœ) 환경 하ì—ì„œ ë‘ê°œì˜ CPU ì—ì„œ ë™ì¼í•œ 스핀ë½ìœ¼ë¡œ
-보호ë˜ëŠ” ë‘ê°œì˜ í¬ë¦¬í‹°ì»¬ 섹션 ì•ˆì˜ I/O 액세스는 PCI ë¸Œë¦¿ì§€ì— ê²¹ì³ì§„ I/O
-액세스로 ë³´ì¼ ìˆ˜ 있는ë°, PCI 브릿지는 ìºì‹œ ì¼ê´€ì„± 프로토콜과 í•©ì„ ë§žì¶°ì•¼ í• 
-ì˜ë¬´ê°€ 없으므로, 필요한 ì½ê¸° 메모리 배리어가 요청ë˜ì§€ 않기 때문입니다.
-
-예를 들어서:
-
- CPU 1 CPU 2
- =============================== ===============================
- spin_lock(Q)
- writel(0, ADDR)
- writel(1, DATA);
- spin_unlock(Q);
- spin_lock(Q);
- writel(4, ADDR);
- writel(5, DATA);
- spin_unlock(Q);
-
-는 PCI ë¸Œë¦¿ì§€ì— ë‹¤ìŒê³¼ ê°™ì´ ë³´ì¼ ìˆ˜ 있습니다:
-
- STORE *ADDR = 0, STORE *ADDR = 4, STORE *DATA = 1, STORE *DATA = 5
-
-ì´ë ‡ê²Œ ë˜ë©´ í•˜ë“œì›¨ì–´ì˜ ì˜¤ë™ìž‘ì„ ì¼ìœ¼í‚¬ 수 있습니다.
-
-
-ì´ëŸ° 경우엔 ìž¡ì•„ë‘” 스핀ë½ì„ 내려놓기 ì „ì— mmiowb() 를 수행해야 하는ë°, 예를
-들면 다ìŒê³¼ 같습니다:
-
- CPU 1 CPU 2
- =============================== ===============================
- spin_lock(Q)
- writel(0, ADDR)
- writel(1, DATA);
- mmiowb();
- spin_unlock(Q);
- spin_lock(Q);
- writel(4, ADDR);
- writel(5, DATA);
- mmiowb();
- spin_unlock(Q);
-
-ì´ ì½”ë“œëŠ” CPU 1 ì—ì„œ ìš”ì²­ëœ ë‘ê°œì˜ ìŠ¤í† ì–´ê°€ PCI ë¸Œë¦¿ì§€ì— CPU 2 ì—ì„œ 요청ëœ
-스토어들보다 먼저 ë³´ì—¬ì§ì„ 보장합니다.
-
-
-ë˜í•œ, ê°™ì€ ë””ë°”ì´ìŠ¤ì—ì„œ 스토어를 ì´ì–´ 로드가 수행ë˜ë©´ ì´ ë¡œë“œëŠ” 로드가 수행ë˜ê¸°
-ì „ì— ìŠ¤í† ì–´ê°€ 완료ë˜ê¸°ë¥¼ 강제하므로 mmiowb() ì˜ í•„ìš”ê°€ 없어집니다:
-
- CPU 1 CPU 2
- =============================== ===============================
- spin_lock(Q)
- writel(0, ADDR)
- a = readl(DATA);
- spin_unlock(Q);
- spin_lock(Q);
- writel(4, ADDR);
- b = readl(DATA);
- spin_unlock(Q);
-
-
-ë” ë§Žì€ ì •ë³´ë¥¼ 위해선 Documentation/driver-api/device-io.rst 를 참고하세요.
-
-
=========================
메모리 배리어가 필요한 곳
=========================
@@ -2494,14 +2412,9 @@ _않습니다_.
리눅스 ì»¤ë„ ë‚´ë¶€ì—ì„œ, I/O 는 어떻게 ì•¡ì„¸ìŠ¤ë“¤ì„ ì ì ˆížˆ 순차ì ì´ê²Œ 만들 수 있는지
알고 있는, - inb() 나 writel() ê³¼ ê°™ì€ - ì ì ˆí•œ 액세스 ë£¨í‹´ì„ í†µí•´ ì´ë£¨ì–´ì ¸ì•¼ë§Œ
합니다. ì´ê²ƒë“¤ì€ ëŒ€ë¶€ë¶„ì˜ ê²½ìš°ì—는 ëª…ì‹œì  ë©”ëª¨ë¦¬ 배리어 와 함께 ì‚¬ìš©ë  í•„ìš”ê°€
-없습니다만, 다ìŒì˜ ë‘가지 ìƒí™©ì—서는 ëª…ì‹œì  ë©”ëª¨ë¦¬ 배리어가 필요할 수 있습니다:
-
- (1) ì¼ë¶€ 시스템ì—ì„œ I/O 스토어는 모든 CPU ì— ì¼ê´€ë˜ê²Œ 순서 맞춰지지 않는ë°,
- ë”°ë¼ì„œ _모든_ ì¼ë°˜ì ì¸ ë“œë¼ì´ë²„ë“¤ì— ë½ì´ 사용ë˜ì–´ì•¼ë§Œ 하고 ì´ í¬ë¦¬í‹°ì»¬
- ì„¹ì…˜ì„ ë¹ ì ¸ë‚˜ì˜¤ê¸° ì „ì— mmiowb() ê°€ ê¼­ 호출ë˜ì–´ì•¼ 합니다.
-
- (2) 만약 액세스 í•¨ìˆ˜ë“¤ì´ ì™„í™”ëœ ë©”ëª¨ë¦¬ 액세스 ì†ì„±ì„ 갖는 I/O 메모리 윈ë„우를
- 사용한다면, 순서를 강제하기 위해선 _mandatory_ 메모리 배리어가 필요합니다.
+없습니다만, ì™„í™”ëœ ë©”ëª¨ë¦¬ 액세스 ì†ì„±ìœ¼ë¡œ I/O 메모리 윈ë„ìš°ë¡œì˜ ì°¸ì¡°ë¥¼ 위해
+액세스 함수가 사용ëœë‹¤ë©´ 순서를 강제하기 위해 _madatory_ 메모리 배리어가
+필요합니다.
ë” ë§Žì€ ì •ë³´ë¥¼ 위해선 Documentation/driver-api/device-io.rst 를 참고하십시오.
@@ -2545,10 +2458,9 @@ _않습니다_.
ì¸í„°ëŸ½íŠ¸ ë‚´ì—ì„œ ì¼ì–´ë‚œ 액세스와 ì„žì¼ ìˆ˜ 있다고 - 그리고 ê·¸ ë°˜ëŒ€ë„ - 가정해야만
합니다.
-그런 ì˜ì—­ 안ì—ì„œ ì¼ì–´ë‚˜ëŠ” I/O ì•¡ì„¸ìŠ¤ë“¤ì€ ì—„ê²©í•œ 순서 ê·œì¹™ì˜ I/O 레지스터ì—
-ë¬µì‹œì  I/O 배리어를 형성하는 ë™ê¸°ì  (synchronous) 로드 오í¼ë ˆì´ì…˜ì„ í¬í•¨í•˜ê¸°
-ë•Œë¬¸ì— ì¼ë°˜ì ìœ¼ë¡œëŠ” ì´ëŸ°ê²Œ 문제가 ë˜ì§€ 않습니다. 만약 ì´ê±¸ë¡œëŠ” 충분치 않다면
-mmiowb() ê°€ 명시ì ìœ¼ë¡œ ì‚¬ìš©ë  í•„ìš”ê°€ 있습니다.
+그런 ì˜ì—­ 안ì—ì„œ ì¼ì–´ë‚˜ëŠ” I/O 액세스는 ë¬µì‹œì  I/O 배리어를 형성하는, 엄격한
+순서 ê·œì¹™ì˜ I/O ë ˆì§€ìŠ¤í„°ë¡œì˜ ë¡œë“œ 오í¼ë ˆì´ì…˜ì„ í¬í•¨í•˜ê¸° ë•Œë¬¸ì— ì¼ë°˜ì ìœ¼ë¡œëŠ”
+문제가 ë˜ì§€ 않습니다.
í•˜ë‚˜ì˜ ì¸í„°ëŸ½íŠ¸ 루틴과 별ë„ì˜ CPU ì—ì„œ 수행중ì´ë©° 서로 í†µì‹ ì„ í•˜ëŠ” ë‘ ë£¨í‹´
@@ -2560,67 +2472,102 @@ mmiowb() ê°€ 명시ì ìœ¼ë¡œ ì‚¬ìš©ë  í•„ìš”ê°€ 있습니다.
ì»¤ë„ I/O ë°°ë¦¬ì–´ì˜ íš¨ê³¼
======================
-I/O ë©”ëª¨ë¦¬ì— ì•¡ì„¸ìŠ¤í•  ë•Œ, ë“œë¼ì´ë²„는 ì ì ˆí•œ 액세스 함수를 사용해야 합니다:
+I/O 액세스를 통한 ì£¼ë³€ìž¥ì¹˜ì™€ì˜ í†µì‹ ì€ ì•„í‚¤í…ì³ì™€ ê¸°ê¸°ì— ë§¤ìš° 종ì†ì ìž…니다.
+ë”°ë¼ì„œ, 본질ì ìœ¼ë¡œ ì´ì‹ì„±ì´ 없는 ë“œë¼ì´ë²„는 가능한 가장 ì ì€ 오버헤드로
+ë™ê¸°í™”를 하기 위해 ê°ìžì˜ 타겟 ì‹œìŠ¤í…œì˜ íŠ¹ì • ë™ìž‘ì— ì˜ì¡´í•  ê²ë‹ˆë‹¤. 다양한
+아키í…ì³ì™€ 버스 êµ¬í˜„ì— ì´ì‹ì„±ì„ 가지려 하는 ë“œë¼ì´ë²„를 위해, 커ë„ì€ ë‹¤ì–‘í•œ
+ì •ë„ì˜ ìˆœì„œ ë³´ìž¥ì„ ì œê³µí•˜ëŠ” ì¼ë ¨ì˜ 액세스 함수를 제공합니다.
- (*) inX(), outX():
-
- ì´ê²ƒë“¤ì€ 메모리 공간보다는 I/O ê³µê°„ì— ì´ì•¼ê¸°ë¥¼ 하려는 ì˜ë„ë¡œ
- 만들어졌습니다만, 그건 기본ì ìœ¼ë¡œ CPU 마다 다른 컨셉입니다. i386 ê³¼
- x86_64 í”„ë¡œì„¸ì„œë“¤ì€ íŠ¹ë³„í•œ I/O 공간 액세스 사ì´í´ê³¼ 명령어를 실제로 가지고
- 있지만, 다른 ë§Žì€ CPU 들ì—는 그런 ì»¨ì…‰ì´ ì¡´ìž¬í•˜ì§€ 않습니다.
-
- 다른 것들 중ì—ì„œë„ PCI 버스가 I/O 공간 ì»¨ì…‰ì„ ì •ì˜í•˜ëŠ”ë°, ì´ëŠ” - i386 ê³¼
- x86_64 ê°™ì€ CPU ì—ì„œ - CPU ì˜ I/O 공간 컨셉으로 쉽게 매치ë©ë‹ˆë‹¤. 하지만,
- 대체할 I/O ê³µê°„ì´ ì—†ëŠ” CPU ì—서는 CPU ì˜ ë©”ëª¨ë¦¬ ë§µì˜ ê°€ìƒ I/O 공간으로
- ë§¤í•‘ë  ìˆ˜ë„ ìžˆìŠµë‹ˆë‹¤.
-
- ì´ ê³µê°„ìœ¼ë¡œì˜ ì•¡ì„¸ìŠ¤ëŠ” (i386 등ì—서는) 완전하게 ë™ê¸°í™” ë©ë‹ˆë‹¤ë§Œ, 중간ì˜
- (PCI 호스트 브리지와 ê°™ì€) ë¸Œë¦¬ì§€ë“¤ì€ ì´ë¥¼ 완전히 보장하진 ì•Šì„수ë„
- 있습니다.
+ (*) readX(), writeX():
- ì´ê²ƒë“¤ì˜ ìƒí˜¸ê°„ì˜ ìˆœì„œëŠ” 완전하게 보장ë©ë‹ˆë‹¤.
+ readX() 와 writeX() MMIO 액세스 함수는 ì ‘ê·¼ë˜ëŠ” ì£¼ë³€ìž¥ì¹˜ë¡œì˜ í¬ì¸í„°ë¥¼
+ __iomem * 패러미터로 받습니다. ë””í´íŠ¸ I/O 기능으로 매핑ë˜ëŠ” í¬ì¸í„°
+ (예: ioremap() 으로 반환ë˜ëŠ” 것) ì˜ ìˆœì„œ ë³´ìž¥ì€ ë‹¤ìŒê³¼ 같습니다:
+
+ 1. ê°™ì€ ì£¼ë³€ìž¥ì¹˜ë¡œì˜ ëª¨ë“  readX() 와 writeX() 액세스는 ê°ìžì— 대해
+ 순서지어집니다. ì´ëŠ” ê°™ì€ CPU ì“°ë ˆë“œì— ì˜í•œ 특정 디바ì´ìŠ¤ë¡œì˜ MMIO
+ 레지스터 액세스가 프로그램 순서대로 ë„ì°©í•  ê²ƒì„ ë³´ìž¥í•©ë‹ˆë‹¤.
+
+ 2. í•œ 스핀ë½ì„ ìž¡ì€ CPU ì“°ë ˆë“œì— ì˜í•œ writeX() 는 ê°™ì€ ìŠ¤í•€ë½ì„ 나중ì—
+ ìž¡ì€ ë‹¤ë¥¸ CPU ì“°ë ˆë“œì— ì˜í•´ ê°™ì€ ì£¼ë³€ìž¥ì¹˜ë¥¼ 향해 í˜¸ì¶œëœ writeX()
+ 앞으로 순서지어집니다. ì´ëŠ” 스핀ë½ì„ ìž¡ì€ ì±„ 특정 디바ì´ìŠ¤ë¥¼ 향해
+ í˜¸ì¶œëœ MMIO 레지스터 쓰기는 해당 ë½ì˜ íšë“ì— ì¼ê´€ì ì¸ 순서로 ë„달할
+ ê²ƒì„ ë³´ìž¥í•©ë‹ˆë‹¤.
+
+ 3. 특정 주변장치를 향한 특정 CPU ì“°ë ˆë“œì˜ writeX() 는 먼저 해당
+ 쓰레드로 전파ë˜ëŠ”, ë˜ëŠ” 해당 ì“°ë ˆë“œì— ì˜í•´ ìš”ì²­ëœ ëª¨ë“  ì•žì„  메모리
+ 쓰기가 완료ë˜ê¸° 전까지 먼저 기다립니다. ì´ëŠ” dma_alloc_coherent()
+ 를 통해 í• ë‹¹ëœ ì „ì†¡ìš© DMA 버í¼ë¡œì˜ 해당 CPU ì˜ ì“°ê¸°ê°€ ì´ CPU ê°€ ì´
+ ì „ì†¡ì„ ì‹œìž‘ì‹œí‚¤ê¸° 위해 MMIO 컨트롤 ë ˆì§€ìŠ¤í„°ì— ì“°ê¸°ë¥¼ í•  ë•Œ DMA
+ ì—”ì§„ì— ë³´ì—¬ì§ˆ ê²ƒì„ ë³´ìž¥í•©ë‹ˆë‹¤.
+
+ 4. 특정 CPU ì“°ë ˆë“œì— ì˜í•œ ì£¼ë³€ìž¥ì¹˜ë¡œì˜ readX() 는 ê°™ì€ ì“°ë ˆë“œì— ì˜í•œ
+ 모든 뒤따르는 메모리 ì½ê¸°ê°€ 시작ë˜ê¸° ì „ì— ì™„ë£Œë©ë‹ˆë‹¤. ì´ëŠ”
+ dma_alloc_coherent() 를 통해 í• ë‹¹ëœ ìˆ˜ì‹ ìš© DMA 버í¼ë¡œë¶€í„°ì˜ CPU ì˜
+ ì½ê¸°ëŠ” ì´ DMA ìˆ˜ì‹ ì˜ ì™„ë£Œë¥¼ 표시하는 DMA ì—”ì§„ì˜ MMIO ìƒíƒœ 레지스터
+ ì½ê¸° 후ì—는 ì˜¤ì—¼ëœ ë°ì´í„°ë¥¼ ì½ì§€ ì•Šì„ ê²ƒì„ ë³´ìž¥í•©ë‹ˆë‹¤.
+
+ 5. CPU ì— ì˜í•œ ì£¼ë³€ìž¥ì¹˜ë¡œì˜ readX() 는 모든 뒤따르는 delay() 루프가
+ ìˆ˜í–‰ì„ ì‹œìž‘í•˜ê¸° ì „ì— ì™„ë£Œë©ë‹ˆë‹¤. ì´ëŠ” CPU ì˜ íŠ¹ì •
+ ì£¼ë³€ìž¥ì¹˜ë¡œì˜ ë‘ê°œì˜ MMIO 레지스터 쓰기가 í–‰í•´ì§€ëŠ”ë° ì²«ë²ˆì§¸ 쓰기가
+ readX() 를 통해 곧바로 ì½ì–´ì¡Œê³  ì´ì–´ ë‘번째 writeX() ì „ì— udelay(1)
+ ì´ í˜¸ì¶œë˜ì—ˆë‹¤ë©´ ì´ ë‘ê°œì˜ ì“°ê¸°ëŠ” 최소 1us ì˜ ê°„ê²©ì„ ë‘ê³  행해질 것ì„
+ 보장합니다:
+
+ writel(42, DEVICE_REGISTER_0); // 디바ì´ìŠ¤ì— ë„착함...
+ readl(DEVICE_REGISTER_0);
+ udelay(1);
+ writel(42, DEVICE_REGISTER_1); // ...ì´ê²ƒë³´ë‹¤ 최소 1us ì „ì—.
+
+ ë””í´íŠ¸ê°€ ì•„ë‹Œ ê¸°ëŠ¥ì„ í†µí•´ 얻어지는 __iomem í¬ì¸í„° (예: ioremap_wc() 를
+ 통해 리턴ë˜ëŠ” 것) ì˜ ìˆœì„œ ì†ì„±ì€ 실제 아키í…ì³ì— ì˜ì¡´ì ì´ì–´ì„œ ì´ëŸ°
+ ì¢…ë¥˜ì˜ ë§¤í•‘ìœ¼ë¡œì˜ ì•¡ì„¸ìŠ¤ëŠ” ì•žì„œ ì„¤ëª…ëœ ë³´ìž¥ì‚¬í•­ì— ì˜ì¡´í•  수 없습니다.
- 다른 íƒ€ìž…ì˜ ë©”ëª¨ë¦¬ 오í¼ë ˆì´ì…˜, I/O 오í¼ë ˆì´ì…˜ì— 대한 순서는 완전하게
- 보장ë˜ì§€ëŠ” 않습니다.
+ (*) readX_relaxed(), writeX_relaxed()
- (*) readX(), writeX():
+ ì´ê²ƒë“¤ì€ readX() 와 writeX() ëž‘ 비슷하지만, ë” ì™„í™”ëœ ë©”ëª¨ë¦¬ 순서
+ ë³´ìž¥ì„ ì œê³µí•©ë‹ˆë‹¤. 구체ì ìœ¼ë¡œ, ì´ê²ƒë“¤ì€ ì¼ë°˜ì  메모리 액세스나 delay()
+ 루프 (예:ì•žì˜ 2-5 항목) ì— ëŒ€í•´ 순서를 보장하지 않습니다만 ë””í´íŠ¸ I/O
+ 기능으로 ë§¤í•‘ëœ __iomem í¬ì¸í„°ì— 대해 ë™ìž‘í•  ë•Œ, ê°™ì€ CPU ì“°ë ˆë“œì— ì˜í•´
+ ê°™ì€ ì£¼ë³€ìž¥ì¹˜ë¡œì˜ ì•¡ì„¸ìŠ¤ì—는 순서가 맞춰질 ê²ƒì´ ë³´ìž¥ë©ë‹ˆë‹¤.
- ì´ê²ƒë“¤ì´ 수행 요청ë˜ëŠ” CPU ì—ì„œ 서로ì—게 완전히 순서가 맞춰지고 ë…립ì ìœ¼ë¡œ
- 수행ë˜ëŠ”ì§€ì— ëŒ€í•œ 보장 여부는 ì´ë“¤ì´ 액세스 하는 메모리 윈ë„ìš°ì— ì •ì˜ëœ
- íŠ¹ì„±ì— ì˜í•´ ê²°ì •ë©ë‹ˆë‹¤. 예를 들어, ìµœì‹ ì˜ i386 아키í…ì³ ë¨¸ì‹ ì—서는 MTRR
- 레지스터로 ì´ íŠ¹ì„±ì´ ì¡°ì •ë©ë‹ˆë‹¤.
+ (*) readsX(), writesX():
- ì¼ë°˜ì ìœ¼ë¡œëŠ”, 프리페치 (prefetch) 가능한 디바ì´ìŠ¤ë¥¼ 액세스 하는게
- 아니ë¼ë©´, ì´ê²ƒë“¤ì€ 완전히 순서가 맞춰지고 ê²°í•©ë˜ì§€ 않게 ë³´ìž¥ë  ê²ë‹ˆë‹¤.
+ readsX() 와 writesX() MMIO 액세스 함수는 DMA 를 ìˆ˜í–‰í•˜ëŠ”ë° ì ì ˆì¹˜ ì•Šì€,
+ 주변장치 ë‚´ì˜ ë©”ëª¨ë¦¬ ë§¤í•‘ëœ ë ˆì§€ìŠ¤í„° 기반 FIFO ë¡œì˜ ì•¡ì„¸ìŠ¤ë¥¼ 위해
+ 설계ë˜ì—ˆìŠµë‹ˆë‹¤. ë”°ë¼ì„œ, ì´ ê¸°ëŠ¥ë“¤ì€ ì•žì„œ ì„¤ëª…ëœ readX_relaxed() 와
+ writeX_relaxed() ì˜ ìˆœì„œ ë³´ìž¥ë§Œì„ ì œê³µí•©ë‹ˆë‹¤.
- 하지만, (PCI 브리지와 ê°™ì€) ì¤‘ê°„ì˜ í•˜ë“œì›¨ì–´ëŠ” ìžì‹ ì´ ì›í•œë‹¤ë©´ 집행ì„
- 연기시킬 수 있습니다; 스토어 ëª…ë ¹ì„ ì‹¤ì œë¡œ 하드웨어로 내려보내기(flush)
- 위해서는 ê°™ì€ ìœ„ì¹˜ë¡œë¶€í„° 로드를 하는 ë°©ë²•ì´ ìžˆìŠµë‹ˆë‹¤ë§Œ[*], PCI ì˜ ê²½ìš°ëŠ”
- ê°™ì€ ë””ë°”ì´ìŠ¤ë‚˜ 환경 구성 ì˜ì—­ì—ì„œì˜ ë¡œë“œë§Œìœ¼ë¡œë„ ì¶©ë¶„í•  ê²ë‹ˆë‹¤.
+ (*) inX(), outX():
- [*] 주ì˜! 쓰여진 것과 ê°™ì€ ìœ„ì¹˜ë¡œë¶€í„°ì˜ ë¡œë“œë¥¼ ì‹œë„하는 ê²ƒì€ ì˜¤ë™ìž‘ì„
- ì¼ìœ¼í‚¬ ìˆ˜ë„ ìžˆìŠµë‹ˆë‹¤ - 예로 16650 Rx/Tx 시리얼 레지스터를 ìƒê°í•´
- 보세요.
+ inX() 와 outX() 액세스 함수는 ì¼ë¶€ 아키í…ì³ (특히 x86) ì—서는 특수한
+ 명령어를 필요로 하며 í¬íŠ¸ì— 매핑ë˜ëŠ”, ê³¼ê±°ì˜ ìœ ì‚°ì¸ I/O 주변장치로ì˜
+ ì ‘ê·¼ì„ ìœ„í•´ 만들어졌습니다.
- 프리페치 가능한 I/O 메모리가 사용ë˜ë©´, 스토어 ëª…ë ¹ë“¤ì´ ìˆœì„œë¥¼ 지키ë„ë¡
- 하기 위해 mmiowb() 배리어가 필요할 수 있습니다.
+ ë§Žì€ CPU 아키í…ì³ê°€ ê²°êµ­ì€ ì´ëŸ° 주변장치를 ë‚´ë¶€ì˜ ê°€ìƒ ë©”ëª¨ë¦¬ 매핑ì„
+ 통해 접근하기 때문ì—, inX() 와 outX() ê°€ 제공하는 ì´ì‹ì„± 있는 순서
+ ë³´ìž¥ì€ ë””í´íŠ¸ I/O ê¸°ëŠ¥ì„ í†µí•œ ë§¤í•‘ì„ ì ‘ê·¼í•  ë•Œì˜ readX() 와 writeX() ì—
+ ì˜í•´ 제공ë˜ëŠ” 것과 ê°ê° ë™ì¼í•©ë‹ˆë‹¤.
- PCI 트랜잭션 사ì´ì˜ ìƒí˜¸ìž‘ìš©ì— ëŒ€í•´ ë” ë§Žì€ ì •ë³´ë¥¼ 위해선 PCI 명세서를
- 참고하시기 ë°”ëžë‹ˆë‹¤.
+ 디바ì´ìŠ¤ ë“œë¼ì´ë²„는 outX() ê°€ 리턴하기 ì „ì— í•´ë‹¹ I/O 주변장치로부터ì˜
+ 완료 ì‘ë‹µì„ ê¸°ë‹¤ë¦¬ëŠ” 쓰기 íŠ¸ëžœìž­ì…˜ì„ ë§Œë“¤ì–´ 낸다고 기대할 수ë„
+ 있습니다. ì´ëŠ” 모든 아키í…ì³ì—ì„œ 보장ë˜ì§€ëŠ” ì•Šê³ , ë”°ë¼ì„œ ì´ì‹ì„± 있는
+ 순서 ê·œì¹™ì˜ ì¼ë¶€ë¶„ì´ ì•„ë‹™ë‹ˆë‹¤.
- (*) readX_relaxed(), writeX_relaxed()
+ (*) insX(), outsX():
- ì´ê²ƒë“¤ì€ readX() 와 writeX() ëž‘ 비슷하지만, ë” ì™„í™”ëœ ë©”ëª¨ë¦¬ 순서 보장ì„
- 제공합니다. 구체ì ìœ¼ë¡œ, ì´ê²ƒë“¤ì€ ì¼ë°˜ì  메모리 액세스 (예: DMA 버í¼) ì—ë„
- LOCK ì´ë‚˜ UNLOCK 오í¼ë ˆì´ì…˜ë“¤ì—ë„ ìˆœì„œë¥¼ 보장하지 않습니다. LOCK ì´ë‚˜
- UNLOCK 오í¼ë ˆì´ì…˜ë“¤ì— 맞춰지는 순서가 필요하다면, mmiowb() 배리어가 사용ë 
- 수 있습니다. ê°™ì€ ì£¼ë³€ 장치ì—ì˜ ì™„í™”ëœ ì•¡ì„¸ìŠ¤ë¼ë¦¬ëŠ” 순서가 지켜ì§ì„ 알아
- ë‘시기 ë°”ëžë‹ˆë‹¤.
+ ì•žì—서와 ê°™ì´, insX() 와 outsX() 액세스 함수는 ë””í´íŠ¸ I/O ê¸°ëŠ¥ì„ í†µí•œ
+ ë§¤í•‘ì„ ì ‘ê·¼í•  ë•Œ ê°ê° readX() 와 writeX() 와 ê°™ì€ ìˆœì„œ 보장ì„
+ 제공합니다.
(*) ioreadX(), iowriteX()
- ì´ê²ƒë“¤ì€ inX()/outX() 나 readX()/writeX() 처럼 실제로 수행하는 액세스ì˜
- ì¢…ë¥˜ì— ë”°ë¼ ì ì ˆí•˜ê²Œ ìˆ˜í–‰ë  ê²ƒìž…ë‹ˆë‹¤.
+ ì´ê²ƒë“¤ì€ inX()/outX() 나 readX()/writeX() 처럼 실제로 수행하는 액세스ì˜
+ ì¢…ë¥˜ì— ë”°ë¼ ì ì ˆí•˜ê²Œ ìˆ˜í–‰ë  ê²ƒìž…ë‹ˆë‹¤.
+
+String 액세스 함수 (insX(), outsX(), readsX() 그리고 writesX()) ì˜ ì˜ˆì™¸ë¥¼
+제외하고는, ì•žì˜ ëª¨ë“  ê²ƒì´ ì•„ëž«ë‹¨ì˜ ì£¼ë³€ìž¥ì¹˜ê°€ little-endian ì´ë¼ 가정하며,
+ë”°ë¼ì„œ big-endian 아키í…ì³ì—서는 byte-swapping 오í¼ë ˆì´ì…˜ì„ 수행합니다.
===================================
diff --git a/Documentation/translations/zh_CN/process/magic-number.rst b/Documentation/translations/zh_CN/process/magic-number.rst
index 15c592518194..e4c225996af0 100644
--- a/Documentation/translations/zh_CN/process/magic-number.rst
+++ b/Documentation/translations/zh_CN/process/magic-number.rst
@@ -70,7 +70,6 @@ FF_MAGIC 0x4646 fc_info ``drivers/net/ip
ISICOM_MAGIC 0x4d54 isi_port ``include/linux/isicom.h``
PTY_MAGIC 0x5001 ``drivers/char/pty.c``
PPP_MAGIC 0x5002 ppp ``include/linux/if_pppvar.h``
-SERIAL_MAGIC 0x5301 async_struct ``include/linux/serial.h``
SSTATE_MAGIC 0x5302 serial_state ``include/linux/serial.h``
SLIP_MAGIC 0x5302 slip ``drivers/net/slip.h``
STRIP_MAGIC 0x5303 strip ``drivers/net/strip.c``
diff --git a/Documentation/userspace-api/index.rst b/Documentation/userspace-api/index.rst
index ad494da40009..e983488b48b1 100644
--- a/Documentation/userspace-api/index.rst
+++ b/Documentation/userspace-api/index.rst
@@ -21,6 +21,7 @@ place where this information is gathered.
unshare
spec_ctrl
accelerators/ocxl
+ ioctl/index
.. only:: subproject and html
diff --git a/Documentation/ioctl/cdrom.rst b/Documentation/userspace-api/ioctl/cdrom.rst
index 3b4c0506de46..3b4c0506de46 100644
--- a/Documentation/ioctl/cdrom.rst
+++ b/Documentation/userspace-api/ioctl/cdrom.rst
diff --git a/Documentation/ioctl/hdio.rst b/Documentation/userspace-api/ioctl/hdio.rst
index e822e3dff176..e822e3dff176 100644
--- a/Documentation/ioctl/hdio.rst
+++ b/Documentation/userspace-api/ioctl/hdio.rst
diff --git a/Documentation/ioctl/index.rst b/Documentation/userspace-api/ioctl/index.rst
index 0f0a857f6615..475675eae086 100644
--- a/Documentation/ioctl/index.rst
+++ b/Documentation/userspace-api/ioctl/index.rst
@@ -9,7 +9,6 @@ IOCTLs
ioctl-number
- botching-up-ioctls
ioctl-decoding
cdrom
diff --git a/Documentation/ioctl/ioctl-decoding.rst b/Documentation/userspace-api/ioctl/ioctl-decoding.rst
index 380d6bb3e3ea..380d6bb3e3ea 100644
--- a/Documentation/ioctl/ioctl-decoding.rst
+++ b/Documentation/userspace-api/ioctl/ioctl-decoding.rst
diff --git a/Documentation/ioctl/ioctl-number.rst b/Documentation/userspace-api/ioctl/ioctl-number.rst
index 4ef86433bd67..4ef86433bd67 100644
--- a/Documentation/ioctl/ioctl-number.rst
+++ b/Documentation/userspace-api/ioctl/ioctl-number.rst
diff --git a/Documentation/vm/hmm.rst b/Documentation/vm/hmm.rst
index 0a5960beccf7..893a8ba0e9fe 100644
--- a/Documentation/vm/hmm.rst
+++ b/Documentation/vm/hmm.rst
@@ -147,49 +147,16 @@ Address space mirroring implementation and API
Address space mirroring's main objective is to allow duplication of a range of
CPU page table into a device page table; HMM helps keep both synchronized. A
device driver that wants to mirror a process address space must start with the
-registration of an hmm_mirror struct::
-
- int hmm_mirror_register(struct hmm_mirror *mirror,
- struct mm_struct *mm);
-
-The mirror struct has a set of callbacks that are used
-to propagate CPU page tables::
-
- struct hmm_mirror_ops {
- /* release() - release hmm_mirror
- *
- * @mirror: pointer to struct hmm_mirror
- *
- * This is called when the mm_struct is being released. The callback
- * must ensure that all access to any pages obtained from this mirror
- * is halted before the callback returns. All future access should
- * fault.
- */
- void (*release)(struct hmm_mirror *mirror);
-
- /* sync_cpu_device_pagetables() - synchronize page tables
- *
- * @mirror: pointer to struct hmm_mirror
- * @update: update information (see struct mmu_notifier_range)
- * Return: -EAGAIN if update.blockable false and callback need to
- * block, 0 otherwise.
- *
- * This callback ultimately originates from mmu_notifiers when the CPU
- * page table is updated. The device driver must update its page table
- * in response to this callback. The update argument tells what action
- * to perform.
- *
- * The device driver must not return from this callback until the device
- * page tables are completely updated (TLBs flushed, etc); this is a
- * synchronous call.
- */
- int (*sync_cpu_device_pagetables)(struct hmm_mirror *mirror,
- const struct hmm_update *update);
- };
-
-The device driver must perform the update action to the range (mark range
-read only, or fully unmap, etc.). The device must complete the update before
-the driver callback returns.
+registration of a mmu_interval_notifier::
+
+ mni->ops = &driver_ops;
+ int mmu_interval_notifier_insert(struct mmu_interval_notifier *mni,
+ unsigned long start, unsigned long length,
+ struct mm_struct *mm);
+
+During the driver_ops->invalidate() callback the device driver must perform
+the update action to the range (mark range read only, or fully unmap,
+etc.). The device must complete the update before the driver callback returns.
When the device driver wants to populate a range of virtual addresses, it can
use::
@@ -216,70 +183,46 @@ The usage pattern is::
struct hmm_range range;
...
+ range.notifier = &mni;
range.start = ...;
range.end = ...;
range.pfns = ...;
range.flags = ...;
range.values = ...;
range.pfn_shift = ...;
- hmm_range_register(&range, mirror);
- /*
- * Just wait for range to be valid, safe to ignore return value as we
- * will use the return value of hmm_range_fault() below under the
- * mmap_sem to ascertain the validity of the range.
- */
- hmm_range_wait_until_valid(&range, TIMEOUT_IN_MSEC);
+ if (!mmget_not_zero(mni->notifier.mm))
+ return -EFAULT;
again:
+ range.notifier_seq = mmu_interval_read_begin(&mni);
down_read(&mm->mmap_sem);
ret = hmm_range_fault(&range, HMM_RANGE_SNAPSHOT);
if (ret) {
up_read(&mm->mmap_sem);
- if (ret == -EBUSY) {
- /*
- * No need to check hmm_range_wait_until_valid() return value
- * on retry we will get proper error with hmm_range_fault()
- */
- hmm_range_wait_until_valid(&range, TIMEOUT_IN_MSEC);
- goto again;
- }
- hmm_range_unregister(&range);
+ if (ret == -EBUSY)
+ goto again;
return ret;
}
+ up_read(&mm->mmap_sem);
+
take_lock(driver->update);
- if (!hmm_range_valid(&range)) {
+ if (mmu_interval_read_retry(&ni, range.notifier_seq) {
release_lock(driver->update);
- up_read(&mm->mmap_sem);
goto again;
}
- // Use pfns array content to update device page table
+ /* Use pfns array content to update device page table,
+ * under the update lock */
- hmm_range_unregister(&range);
release_lock(driver->update);
- up_read(&mm->mmap_sem);
return 0;
}
The driver->update lock is the same lock that the driver takes inside its
-sync_cpu_device_pagetables() callback. That lock must be held before calling
-hmm_range_valid() to avoid any race with a concurrent CPU page table update.
-
-HMM implements all this on top of the mmu_notifier API because we wanted a
-simpler API and also to be able to perform optimizations latter on like doing
-concurrent device updates in multi-devices scenario.
-
-HMM also serves as an impedance mismatch between how CPU page table updates
-are done (by CPU write to the page table and TLB flushes) and how devices
-update their own page table. Device updates are a multi-step process. First,
-appropriate commands are written to a buffer, then this buffer is scheduled for
-execution on the device. It is only once the device has executed commands in
-the buffer that the update is done. Creating and scheduling the update command
-buffer can happen concurrently for multiple devices. Waiting for each device to
-report commands as executed is serialized (there is no point in doing this
-concurrently).
-
+invalidate() callback. That lock must be held before calling
+mmu_interval_read_retry() to avoid any race with a concurrent CPU page table
+update.
Leverage default_flags and pfn_flags_mask
=========================================
diff --git a/Documentation/w1/index.rst b/Documentation/w1/index.rst
index 57cba81865e2..156279f17553 100644
--- a/Documentation/w1/index.rst
+++ b/Documentation/w1/index.rst
@@ -1,4 +1,4 @@
-. SPDX-License-Identifier: GPL-2.0
+.. SPDX-License-Identifier: GPL-2.0
================
1-Wire Subsystem
diff --git a/MAINTAINERS b/MAINTAINERS
index 3ff146d4677e..061d59a4a80b 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1,12 +1,14 @@
-
-
- List of maintainers and how to submit kernel changes
+List of maintainers and how to submit kernel changes
+====================================================
Please try to follow the guidelines below. This will make things
easier on the maintainers. Not all of these guidelines matter for every
trivial patch so apply some common sense.
-1. Always _test_ your changes, however small, on at least 4 or
+Tips for patch submitters
+-------------------------
+
+1. Always *test* your changes, however small, on at least 4 or
5 people, preferably many more.
2. Try to release a few ALPHA test versions to the net. Announce
@@ -25,7 +27,7 @@ trivial patch so apply some common sense.
testing and await feedback.
5. Make a patch available to the relevant maintainer in the list. Use
- 'diff -u' to make the patch easy to merge. Be prepared to get your
+ ``diff -u`` to make the patch easy to merge. Be prepared to get your
changes sent back with seemingly silly requests about formatting
and variable names. These aren't as silly as they seem. One
job the maintainers (and especially Linus) do is to keep things
@@ -38,7 +40,7 @@ trivial patch so apply some common sense.
See Documentation/process/coding-style.rst for guidance here.
PLEASE CC: the maintainers and mailing lists that are generated
- by scripts/get_maintainer.pl. The results returned by the
+ by ``scripts/get_maintainer.pl.`` The results returned by the
script will be best if you have git installed and are making
your changes in a branch derived from Linus' latest git tree.
See Documentation/process/submitting-patches.rst for details.
@@ -70,26 +72,27 @@ trivial patch so apply some common sense.
not represent an immediate threat and are better handled publicly,
and ideally, should come with a patch proposal. Please do not send
automated reports to this list either. Such bugs will be handled
- better and faster in the usual public places.
+ better and faster in the usual public places. See
+ Documentation/admin-guide/security-bugs.rst for details.
8. Happy hacking.
-Descriptions of section entries:
+Descriptions of section entries
+-------------------------------
- P: Person (obsolete)
- M: Mail patches to: FullName <address@domain>
- R: Designated reviewer: FullName <address@domain>
+ M: *Mail* patches to: FullName <address@domain>
+ R: Designated *Reviewer*: FullName <address@domain>
These reviewers should be CCed on patches.
- L: Mailing list that is relevant to this area
- W: Web-page with status/info
- B: URI for where to file bugs. A web-page with detailed bug
+ L: *Mailing list* that is relevant to this area
+ W: *Web-page* with status/info
+ B: URI for where to file *bugs*. A web-page with detailed bug
filing info, a direct bug tracker link, or a mailto: URI.
- C: URI for chat protocol, server and channel where developers
+ C: URI for *chat* protocol, server and channel where developers
usually hang out, for example irc://server/channel.
- Q: Patchwork web based patch tracking system site
- T: SCM tree type and location.
+ Q: *Patchwork* web based patch tracking system site
+ T: *SCM* tree type and location.
Type is one of: git, hg, quilt, stgit, topgit
- S: Status, one of the following:
+ S: *Status*, one of the following:
Supported: Someone is actually paid to look after this.
Maintained: Someone actually looks after it.
Odd Fixes: It has a maintainer but they don't have time to do
@@ -99,13 +102,17 @@ Descriptions of section entries:
Obsolete: Old code. Something tagged obsolete generally means
it has been replaced by a better system and you
should be using that.
- F: Files and directories with wildcard patterns.
+ P: Subsystem Profile document for more details submitting
+ patches to the given subsystem. This is either an in-tree file,
+ or a URI. See Documentation/maintainer/maintainer-entry-profile.rst
+ for details.
+ F: *Files* and directories wildcard patterns.
A trailing slash includes all files and subdirectory files.
F: drivers/net/ all files in and below drivers/net
F: drivers/net/* all files in drivers/net, but not below
F: */net/* all files in "any top level directory"/net
One pattern per line. Multiple F: lines acceptable.
- N: Files and directories with regex patterns.
+ N: Files and directories *Regex* patterns.
N: [^a-z]tegra all files whose path contains the word tegra
One pattern per line. Multiple N: lines acceptable.
scripts/get_maintainer.pl has different behavior for files that
@@ -113,14 +120,14 @@ Descriptions of section entries:
get_maintainer will not look at git log history when an F: pattern
match occurs. When an N: match occurs, git log history is used
to also notify the people that have git commit signatures.
- X: Files and directories that are NOT maintained, same rules as F:
- Files exclusions are tested before file matches.
+ X: *Excluded* files and directories that are NOT maintained, same
+ rules as F:. Files exclusions are tested before file matches.
Can be useful for excluding a specific subdirectory, for instance:
F: net/
X: net/ipv6/
matches all files in and below net excluding net/ipv6/
- K: Keyword perl extended regex pattern to match content in a
- patch or file. For instance:
+ K: *Content regex* (perl extended) pattern match in a patch or file.
+ For instance:
K: of_get_profile
matches patches or files that contain "of_get_profile"
K: \b(printk|pr_(info|err))\b
@@ -128,13 +135,12 @@ Descriptions of section entries:
printk, pr_info or pr_err
One regex pattern per line. Multiple K: lines acceptable.
-Note: For the hard of thinking, this list is meant to remain in alphabetical
-order. If you could add yourselves to it in alphabetical order that would be
-so much easier [Ed]
+Maintainers List
+----------------
-Maintainers List (try to look for most precise areas first)
-
- -----------------------------------
+.. note:: When reading this list, please look for the most precise areas
+ first. When adding to this list, please keep the entries in
+ alphabetical order.
3C59X NETWORK DRIVER
M: Steffen Klassert <klassert@kernel.org>
@@ -295,7 +301,7 @@ S: Maintained
F: drivers/net/ethernet/alteon/acenic*
ACER ASPIRE ONE TEMPERATURE AND FAN DRIVER
-M: Peter Feuerer <peter@piie.net>
+M: Peter Kaestle <peter@piie.net>
L: platform-driver-x86@vger.kernel.org
W: http://piie.net/?section=acerhdf
S: Maintained
@@ -817,7 +823,7 @@ S: Orphan
F: drivers/usb/gadget/udc/amd5536udc.*
AMD GEODE PROCESSOR/CHIPSET SUPPORT
-P: Andres Salomon <dilinger@queued.net>
+M: Andres Salomon <dilinger@queued.net>
L: linux-geode@lists.infradead.org (moderated for non-subscribers)
W: http://www.amd.com/us-en/ConnectivitySolutions/TechnicalResources/0,,50_2334_2452_11363,00.html
S: Supported
@@ -901,6 +907,14 @@ S: Supported
F: drivers/iio/adc/ad7124.c
F: Documentation/devicetree/bindings/iio/adc/adi,ad7124.yaml
+ANALOG DEVICES INC AD7292 DRIVER
+M: Marcelo Schmitt <marcelo.schmitt1@gmail.com>
+L: linux-iio@vger.kernel.org
+W: http://ez.analog.com/community/linux-device-drivers
+S: Supported
+F: drivers/iio/adc/ad7292.c
+F: Documentation/devicetree/bindings/iio/adc/adi,ad7292.yaml
+
ANALOG DEVICES INC AD7606 DRIVER
M: Stefan Popa <stefan.popa@analog.com>
M: Beniamin Bia <beniamin.bia@analog.com>
@@ -1002,6 +1016,7 @@ F: drivers/media/i2c/adv7842*
ANALOG DEVICES INC ASOC CODEC DRIVERS
M: Lars-Peter Clausen <lars@metafoo.de>
+M: Nuno Sá <nuno.sa@analog.com>
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
W: http://wiki.analog.com/
W: http://ez.analog.com/community/linux-device-drivers
@@ -1040,6 +1055,7 @@ F: drivers/clk/analogbits/*
F: include/linux/clk/analogbits*
ANDES ARCHITECTURE
+M: Nick Hu <nickhu@andestech.com>
M: Greentime Hu <green.hu@gmail.com>
M: Vincent Chen <deanbo422@gmail.com>
T: git https://git.kernel.org/pub/scm/linux/kernel/git/greentime/linux.git
@@ -1258,6 +1274,7 @@ F: Documentation/devicetree/bindings/display/arm,hdlcd.txt
ARM KOMEDA DRM-KMS DRIVER
M: James (Qian) Wang <james.qian.wang@arm.com>
M: Liviu Dudau <liviu.dudau@arm.com>
+M: Mihail Atanassov <mihail.atanassov@arm.com>
L: Mali DP Maintainers <malidp@foss.arm.com>
S: Supported
T: git git://anongit.freedesktop.org/drm/drm-misc
@@ -1279,6 +1296,8 @@ F: Documentation/gpu/afbc.rst
ARM MALI PANFROST DRM DRIVER
M: Rob Herring <robh@kernel.org>
M: Tomeu Vizoso <tomeu.vizoso@collabora.com>
+R: Steven Price <steven.price@arm.com>
+R: Alyssa Rosenzweig <alyssa.rosenzweig@collabora.com>
L: dri-devel@lists.freedesktop.org
S: Supported
T: git git://anongit.freedesktop.org/drm/drm-misc
@@ -1398,6 +1417,7 @@ F: drivers/clk/actions/
F: drivers/clocksource/timer-owl*
F: drivers/dma/owl-dma.c
F: drivers/i2c/busses/i2c-owl.c
+F: drivers/mmc/host/owl-mmc.c
F: drivers/pinctrl/actions/*
F: drivers/soc/actions/
F: include/dt-bindings/power/owl-*
@@ -1406,6 +1426,7 @@ F: Documentation/devicetree/bindings/arm/actions.yaml
F: Documentation/devicetree/bindings/clock/actions,owl-cmu.txt
F: Documentation/devicetree/bindings/dma/owl-dma.txt
F: Documentation/devicetree/bindings/i2c/i2c-owl.txt
+F: Documentation/devicetree/bindings/mmc/owl-mmc.yaml
F: Documentation/devicetree/bindings/pinctrl/actions,s900-pinctrl.txt
F: Documentation/devicetree/bindings/power/actions,owl-sps.txt
F: Documentation/devicetree/bindings/timer/actions,owl-timer.txt
@@ -1544,8 +1565,10 @@ M: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: arch/arm64/boot/dts/bitmain/
+F: drivers/clk/clk-bm1880.c
F: drivers/pinctrl/pinctrl-bm1880.c
F: Documentation/devicetree/bindings/arm/bitmain.yaml
+F: Documentation/devicetree/bindings/clock/bitmain,bm1880-clk.yaml
F: Documentation/devicetree/bindings/pinctrl/bitmain,bm1880-pinctrl.txt
ARM/CALXEDA HIGHBANK ARCHITECTURE
@@ -1623,8 +1646,7 @@ R: Suzuki K Poulose <suzuki.poulose@arm.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: drivers/hwtracing/coresight/*
-F: Documentation/trace/coresight.rst
-F: Documentation/trace/coresight-cpu-debug.rst
+F: Documentation/trace/coresight/*
F: Documentation/devicetree/bindings/arm/coresight.txt
F: Documentation/devicetree/bindings/arm/coresight-cpu-debug.txt
F: Documentation/ABI/testing/sysfs-bus-coresight-devices-*
@@ -2020,6 +2042,7 @@ F: drivers/dma/ste_dma40*
F: drivers/hwspinlock/u8500_hsem.c
F: drivers/i2c/busses/i2c-nomadik.c
F: drivers/i2c/busses/i2c-stu300.c
+F: drivers/iio/adc/ab8500-gpadc.c
F: drivers/mfd/ab3100*
F: drivers/mfd/ab8500*
F: drivers/mfd/abx500*
@@ -2165,9 +2188,11 @@ L: linux-unisoc@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: arch/arm/boot/dts/rda8810pl-*
F: drivers/clocksource/timer-rda.c
+F: drivers/gpio/gpio-rda.c
F: drivers/irqchip/irq-rda-intc.c
F: drivers/tty/serial/rda-uart.c
F: Documentation/devicetree/bindings/arm/rda.yaml
+F: Documentation/devicetree/bindings/gpio/gpio-rda.yaml
F: Documentation/devicetree/bindings/interrupt-controller/rda,8810pl-intc.txt
F: Documentation/devicetree/bindings/serial/rda,8810pl-uart.txt
F: Documentation/devicetree/bindings/timer/rda,8810pl-timer.txt
@@ -2248,8 +2273,7 @@ F: drivers/soc/samsung/
F: include/linux/soc/samsung/
F: Documentation/arm/samsung/
F: Documentation/devicetree/bindings/arm/samsung/
-F: Documentation/devicetree/bindings/sram/samsung-sram.txt
-F: Documentation/devicetree/bindings/power/pd-samsung.txt
+F: Documentation/devicetree/bindings/power/pd-samsung.yaml
N: exynos
ARM/SAMSUNG MOBILE MACHINE SUPPORT
@@ -2503,10 +2527,10 @@ F: drivers/reset/reset-uniphier.c
F: drivers/tty/serial/8250/8250_uniphier.c
N: uniphier
-ARM/Ux500 CLOCK FRAMEWORK SUPPORT
+Ux500 CLOCK DRIVERS
M: Ulf Hansson <ulf.hansson@linaro.org>
+L: linux-clk@vger.kernel.org
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-T: git git://git.linaro.org/people/ulfh/clk.git
S: Maintained
F: drivers/clk/ux500/
@@ -2713,7 +2737,7 @@ M: Bartosz Golaszewski <bgolaszewski@baylibre.com>
L: linux-i2c@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/brgl/linux.git
S: Maintained
-F: Documentation/devicetree/bindings/eeprom/at24.txt
+F: Documentation/devicetree/bindings/eeprom/at24.yaml
F: drivers/misc/eeprom/at24.c
ATA OVER ETHERNET (AOE) DRIVER
@@ -2874,7 +2898,6 @@ AXENTIA ARM DEVICES
M: Peter Rosin <peda@axentia.se>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
-F: Documentation/devicetree/bindings/arm/axentia.txt
F: arch/arm/boot/dts/at91-linea.dtsi
F: arch/arm/boot/dts/at91-natte.dtsi
F: arch/arm/boot/dts/at91-nattis-2-natte-2.dts
@@ -3548,7 +3571,7 @@ BUS FREQUENCY DRIVER FOR SAMSUNG EXYNOS
M: Chanwoo Choi <cw00.choi@samsung.com>
L: linux-pm@vger.kernel.org
L: linux-samsung-soc@vger.kernel.org
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/mzx/devfreq.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/chanwoo/linux.git
S: Maintained
F: drivers/devfreq/exynos-bus.c
F: Documentation/devicetree/bindings/devfreq/exynos-bus.txt
@@ -3691,7 +3714,7 @@ M: Oleksij Rempel <o.rempel@pengutronix.de>
R: Pengutronix Kernel Team <kernel@pengutronix.de>
L: linux-can@vger.kernel.org
S: Maintained
-F: Documentation/networking/j1939.txt
+F: Documentation/networking/j1939.rst
F: net/can/j1939/
F: include/uapi/linux/can/j1939.h
@@ -4282,14 +4305,13 @@ F: include/linux/cpufreq.h
F: include/linux/sched/cpufreq.h
F: tools/testing/selftests/cpufreq/
-CPU FREQUENCY DRIVERS - ARM BIG LITTLE
+CPU FREQUENCY DRIVERS - VEXPRESS SPC ARM BIG LITTLE
M: Viresh Kumar <viresh.kumar@linaro.org>
M: Sudeep Holla <sudeep.holla@arm.com>
L: linux-pm@vger.kernel.org
W: http://www.arm.com/products/processors/technologies/biglittleprocessing.php
S: Maintained
-F: drivers/cpufreq/arm_big_little.h
-F: drivers/cpufreq/arm_big_little.c
+F: drivers/cpufreq/vexpress-spc-cpufreq.c
CPU POWER MONITORING SUBSYSTEM
M: Thomas Renninger <trenn@suse.com>
@@ -4468,14 +4490,6 @@ W: http://www.chelsio.com
S: Supported
F: drivers/scsi/cxgbi/cxgb3i
-CXGB3 IWARP RNIC DRIVER (IW_CXGB3)
-M: Potnuri Bharat Teja <bharat@chelsio.com>
-L: linux-rdma@vger.kernel.org
-W: http://www.openfabrics.org
-S: Supported
-F: drivers/infiniband/hw/cxgb3/
-F: include/uapi/rdma/cxgb3-abi.h
-
CXGB4 CRYPTO DRIVER (chcr)
M: Atul Gupta <atul.gupta@chelsio.com>
L: linux-crypto@vger.kernel.org
@@ -4650,6 +4664,14 @@ M: "Maciej W. Rozycki" <macro@linux-mips.org>
S: Maintained
F: drivers/net/fddi/defxx.*
+DEINTERLACE DRIVERS FOR ALLWINNER H3
+M: Jernej Skrabec <jernej.skrabec@siol.net>
+L: linux-media@vger.kernel.org
+T: git git://linuxtv.org/media_tree.git
+S: Maintained
+F: drivers/media/platform/sunxi/sun8i-di/
+F: Documentation/devicetree/bindings/media/allwinner,sun8i-h3-deinterlace.yaml
+
DELL SMBIOS DRIVER
M: Pali Rohár <pali.rohar@gmail.com>
M: Mario Limonciello <mario.limonciello@dell.com>
@@ -4774,9 +4796,9 @@ F: include/linux/devcoredump.h
DEVICE FREQUENCY (DEVFREQ)
M: MyungJoo Ham <myungjoo.ham@samsung.com>
M: Kyungmin Park <kyungmin.park@samsung.com>
-R: Chanwoo Choi <cw00.choi@samsung.com>
+M: Chanwoo Choi <cw00.choi@samsung.com>
L: linux-pm@vger.kernel.org
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/mzx/devfreq.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/chanwoo/linux.git
S: Maintained
F: drivers/devfreq/
F: include/linux/devfreq.h
@@ -4786,10 +4808,11 @@ F: include/trace/events/devfreq.h
DEVICE FREQUENCY EVENT (DEVFREQ-EVENT)
M: Chanwoo Choi <cw00.choi@samsung.com>
L: linux-pm@vger.kernel.org
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/mzx/devfreq.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/chanwoo/linux.git
S: Supported
F: drivers/devfreq/event/
F: drivers/devfreq/devfreq-event.c
+F: include/dt-bindings/pmu/exynos_ppmu.h
F: include/linux/devfreq-event.h
F: Documentation/devicetree/bindings/devfreq/event/
@@ -4891,7 +4914,6 @@ F: include/trace/events/fs_dax.h
DEVICE DIRECT ACCESS (DAX)
M: Dan Williams <dan.j.williams@intel.com>
M: Vishal Verma <vishal.l.verma@intel.com>
-M: Keith Busch <keith.busch@intel.com>
M: Dave Jiang <dave.jiang@intel.com>
L: linux-nvdimm@lists.01.org
S: Supported
@@ -5389,12 +5411,22 @@ F: include/linux/vga*
DRM DRIVERS FOR ALLWINNER A10
M: Maxime Ripard <mripard@kernel.org>
+M: Chen-Yu Tsai <wens@csie.org>
L: dri-devel@lists.freedesktop.org
S: Supported
F: drivers/gpu/drm/sun4i/
F: Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt
T: git git://anongit.freedesktop.org/drm/drm-misc
+DRM DRIVER FOR ALLWINNER DE2 AND DE3 ENGINE
+M: Maxime Ripard <mripard@kernel.org>
+M: Chen-Yu Tsai <wens@csie.org>
+R: Jernej Skrabec <jernej.skrabec@siol.net>
+L: dri-devel@lists.freedesktop.org
+S: Supported
+F: drivers/gpu/drm/sun4i/sun8i*
+T: git git://anongit.freedesktop.org/drm/drm-misc
+
DRM DRIVERS FOR AMLOGIC SOCS
M: Neil Armstrong <narmstrong@baylibre.com>
L: dri-devel@lists.freedesktop.org
@@ -6178,6 +6210,7 @@ F: include/uapi/linux/mii.h
EXFAT FILE SYSTEM
M: Valdis Kletnieks <valdis.kletnieks@vt.edu>
+L: linux-fsdevel@vger.kernel.org
S: Maintained
F: drivers/staging/exfat/
@@ -6732,6 +6765,7 @@ FSNOTIFY: FILESYSTEM NOTIFICATION INFRASTRUCTURE
M: Jan Kara <jack@suse.cz>
R: Amir Goldstein <amir73il@gmail.com>
L: linux-fsdevel@vger.kernel.org
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/jack/linux-fs.git fsnotify
S: Maintained
F: fs/notify/
F: include/linux/fsnotify*.h
@@ -6901,7 +6935,7 @@ L: linux-pm@vger.kernel.org
S: Supported
F: drivers/base/power/domain*.c
F: include/linux/pm_domain.h
-F: Documentation/devicetree/bindings/power/power_domain.txt
+F: Documentation/devicetree/bindings/power/power?domain*
GENERIC RESISTIVE TOUCHSCREEN ADC DRIVER
M: Eugen Hristev <eugen.hristev@microchip.com>
@@ -7252,7 +7286,7 @@ M: Ohad Ben-Cohen <ohad@wizery.com>
M: Bjorn Andersson <bjorn.andersson@linaro.org>
L: linux-remoteproc@vger.kernel.org
S: Maintained
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/ohad/hwspinlock.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/andersson/remoteproc.git hwspinlock-next
F: Documentation/devicetree/bindings/hwlock/
F: Documentation/hwspinlock.txt
F: drivers/hwspinlock/
@@ -7587,6 +7621,13 @@ L: linux-kernel@vger.kernel.org
S: Maintained
F: arch/x86/kernel/cpu/hygon.c
+HYNIX HI556 SENSOR DRIVER
+M: Shawn Tu <shawnx.tu@intel.com>
+L: linux-media@vger.kernel.org
+T: git git://linuxtv.org/media_tree.git
+S: Maintained
+F: drivers/media/i2c/hi556.c
+
Hyper-V CORE AND DRIVERS
M: "K. Y. Srinivasan" <kys@microsoft.com>
M: Haiyang Zhang <haiyangz@microsoft.com>
@@ -7619,6 +7660,7 @@ F: include/uapi/linux/hyperv.h
F: include/asm-generic/mshyperv.h
F: tools/hv/
F: Documentation/ABI/stable/sysfs-bus-vmbus
+F: Documentation/ABI/testing/debugfs-hyperv
HYPERBUS SUPPORT
M: Vignesh Raghavendra <vigneshr@ti.com>
@@ -8291,7 +8333,7 @@ F: Documentation/fb/intelfb.rst
F: drivers/video/fbdev/intelfb/
INTEL GPIO DRIVERS
-M: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+M: Andy Shevchenko <andy@kernel.org>
L: linux-gpio@vger.kernel.org
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/andy/linux-gpio-intel.git
@@ -8377,6 +8419,7 @@ S: Maintained
F: drivers/staging/media/ipu3/
F: Documentation/media/uapi/v4l/pixfmt-meta-intel-ipu3.rst
F: Documentation/media/v4l-drivers/ipu3.rst
+F: Documentation/media/v4l-drivers/ipu3_rcb.svg
INTEL IXP4XX QMGR, NPE, ETHERNET and HSS SUPPORT
M: Krzysztof Halasa <khalasa@piap.pl>
@@ -8444,7 +8487,7 @@ F: arch/x86/include/asm/intel_pmc_ipc.h
F: arch/x86/include/asm/intel_punit_ipc.h
INTEL PMIC GPIO DRIVERS
-M: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+M: Andy Shevchenko <andy@kernel.org>
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/andy/linux-gpio-intel.git
F: drivers/gpio/gpio-*cove.c
@@ -8891,7 +8934,7 @@ F: mm/kasan/
F: scripts/Makefile.kasan
KCONFIG
-M: Masahiro Yamada <yamada.masahiro@socionext.com>
+M: Masahiro Yamada <masahiroy@kernel.org>
T: git git://git.kernel.org/pub/scm/linux/kernel/git/masahiroy/linux-kbuild.git kconfig
L: linux-kbuild@vger.kernel.org
S: Maintained
@@ -8923,7 +8966,7 @@ S: Maintained
F: fs/autofs/
KERNEL BUILD + files below scripts/ (unless maintained elsewhere)
-M: Masahiro Yamada <yamada.masahiro@socionext.com>
+M: Masahiro Yamada <masahiroy@kernel.org>
M: Michal Marek <michal.lkml@markovi.net>
T: git git://git.kernel.org/pub/scm/linux/kernel/git/masahiroy/linux-kbuild.git
L: linux-kbuild@vger.kernel.org
@@ -9363,6 +9406,7 @@ M: Dan Williams <dan.j.williams@intel.com>
M: Vishal Verma <vishal.l.verma@intel.com>
M: Dave Jiang <dave.jiang@intel.com>
L: linux-nvdimm@lists.01.org
+P: Documentation/nvdimm/maintainer-entry-profile.rst
Q: https://patchwork.kernel.org/project/linux-nvdimm/list/
S: Supported
F: drivers/nvdimm/blk.c
@@ -9373,6 +9417,7 @@ M: Vishal Verma <vishal.l.verma@intel.com>
M: Dan Williams <dan.j.williams@intel.com>
M: Dave Jiang <dave.jiang@intel.com>
L: linux-nvdimm@lists.01.org
+P: Documentation/nvdimm/maintainer-entry-profile.rst
Q: https://patchwork.kernel.org/project/linux-nvdimm/list/
S: Supported
F: drivers/nvdimm/btt*
@@ -9382,6 +9427,7 @@ M: Dan Williams <dan.j.williams@intel.com>
M: Vishal Verma <vishal.l.verma@intel.com>
M: Dave Jiang <dave.jiang@intel.com>
L: linux-nvdimm@lists.01.org
+P: Documentation/nvdimm/maintainer-entry-profile.rst
Q: https://patchwork.kernel.org/project/linux-nvdimm/list/
S: Supported
F: drivers/nvdimm/pmem*
@@ -9398,9 +9444,9 @@ LIBNVDIMM: NON-VOLATILE MEMORY DEVICE SUBSYSTEM
M: Dan Williams <dan.j.williams@intel.com>
M: Vishal Verma <vishal.l.verma@intel.com>
M: Dave Jiang <dave.jiang@intel.com>
-M: Keith Busch <keith.busch@intel.com>
M: Ira Weiny <ira.weiny@intel.com>
L: linux-nvdimm@lists.01.org
+P: Documentation/nvdimm/maintainer-entry-profile.rst
Q: https://patchwork.kernel.org/project/linux-nvdimm/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm.git
S: Supported
@@ -9668,6 +9714,13 @@ S: Maintained
F: Documentation/admin-guide/ldm.rst
F: block/partitions/ldm.*
+LOGITECH HID GAMING KEYBOARDS
+M: Hans de Goede <hdegoede@redhat.com>
+L: linux-input@vger.kernel.org
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/hid/hid.git
+S: Maintained
+F: drivers/hid/hid-lg-g15.c
+
LSILOGIC MPT FUSION DRIVERS (FC/SAS/SPI)
M: Sathya Prakash <sathya.prakash@broadcom.com>
M: Chaitra P B <chaitra.basappa@broadcom.com>
@@ -9689,9 +9742,17 @@ LTC1660 DAC DRIVER
M: Marcus Folkesson <marcus.folkesson@gmail.com>
L: linux-iio@vger.kernel.org
S: Maintained
-F: Documentation/devicetree/bindings/iio/dac/ltc1660.txt
+F: Documentation/devicetree/bindings/iio/dac/lltc,ltc1660.yaml
F: drivers/iio/dac/ltc1660.c
+LTC2983 IIO TEMPERATURE DRIVER
+M: Nuno Sá <nuno.sa@analog.com>
+W: http://ez.analog.com/community/linux-device-drivers
+L: linux-iio@vger.kernel.org
+S: Supported
+F: drivers/iio/temperature/ltc2983.c
+F: Documentation/devicetree/bindings/iio/temperature/adi,ltc2983.yaml
+
LTC4261 HARDWARE MONITOR DRIVER
M: Guenter Roeck <linux@roeck-us.net>
L: linux-hwmon@vger.kernel.org
@@ -9699,6 +9760,17 @@ S: Maintained
F: Documentation/hwmon/ltc4261.rst
F: drivers/hwmon/ltc4261.c
+LTC2947 HARDWARE MONITOR DRIVER
+M: Nuno Sá <nuno.sa@analog.com>
+W: http://ez.analog.com/community/linux-device-drivers
+L: linux-hwmon@vger.kernel.org
+S: Supported
+F: drivers/hwmon/ltc2947-core.c
+F: drivers/hwmon/ltc2947-spi.c
+F: drivers/hwmon/ltc2947-i2c.c
+F: drivers/hwmon/ltc2947.h
+F: Documentation/devicetree/bindings/hwmon/adi,ltc2947.yaml
+
LTC4306 I2C MULTIPLEXER DRIVER
M: Michael Hennerich <michael.hennerich@analog.com>
W: http://ez.analog.com/community/linux-device-drivers
@@ -9932,7 +10004,7 @@ F: Documentation/hwmon/max16065.rst
F: drivers/hwmon/max16065.c
MAX2175 SDR TUNER DRIVER
-M: Ramesh Shanmugasundaram <ramesh.shanmugasundaram@bp.renesas.com>
+M: Ramesh Shanmugasundaram <rashanmu@gmail.com>
L: linux-media@vger.kernel.org
T: git git://linuxtv.org/media_tree.git
S: Maintained
@@ -9974,8 +10046,8 @@ MAXIM MAX77650 PMIC MFD DRIVER
M: Bartosz Golaszewski <bgolaszewski@baylibre.com>
L: linux-kernel@vger.kernel.org
S: Maintained
-F: Documentation/devicetree/bindings/*/*max77650.txt
-F: Documentation/devicetree/bindings/*/max77650*.txt
+F: Documentation/devicetree/bindings/*/*max77650.yaml
+F: Documentation/devicetree/bindings/*/max77650*.yaml
F: include/linux/mfd/max77650.h
F: drivers/mfd/max77650.c
F: drivers/regulator/max77650-regulator.c
@@ -10194,7 +10266,7 @@ F: drivers/media/platform/renesas-ceu.c
F: include/media/drv-intf/renesas-ceu.h
MEDIA DRIVERS FOR RENESAS - DRIF
-M: Ramesh Shanmugasundaram <ramesh.shanmugasundaram@bp.renesas.com>
+M: Ramesh Shanmugasundaram <rashanmu@gmail.com>
L: linux-media@vger.kernel.org
L: linux-renesas-soc@vger.kernel.org
T: git git://linuxtv.org/media_tree.git
@@ -10274,7 +10346,6 @@ F: drivers/staging/media/tegra-vde/
MEDIA INPUT INFRASTRUCTURE (V4L/DVB)
M: Mauro Carvalho Chehab <mchehab@kernel.org>
-P: LinuxTV.org Project
L: linux-media@vger.kernel.org
W: https://linuxtv.org
Q: http://patchwork.kernel.org/project/linux-media/list/
@@ -10338,6 +10409,13 @@ S: Maintained
F: drivers/net/dsa/mt7530.*
F: net/dsa/tag_mtk.c
+MEDIATEK BOARD LEVEL SHUTDOWN DRIVERS
+M: Sean Wang <sean.wang@mediatek.com>
+L: linux-pm@vger.kernel.org
+S: Maintained
+F: Documentation/devicetree/bindings/power/reset/mt6323-poweroff.txt
+F: drivers/power/reset/mt6323-poweroff.c
+
MEDIATEK JPEG DRIVER
M: Rick Chang <rick.chang@mediatek.com>
M: Bin Liu <bin.liu@mediatek.com>
@@ -10503,6 +10581,7 @@ M: Darren Hart <dvhart@infradead.org>
M: Vadim Pasternak <vadimp@mellanox.com>
L: platform-driver-x86@vger.kernel.org
S: Supported
+F: Documentation/ABI/testing/sysfs-platform-mellanox-bootctl
F: drivers/platform/mellanox/
F: include/linux/platform_data/mlxreg.h
@@ -10655,7 +10734,7 @@ W: http://linux-meson.com/
S: Supported
F: drivers/media/platform/meson/ao-cec.c
F: drivers/media/platform/meson/ao-cec-g12a.c
-F: Documentation/devicetree/bindings/media/meson-ao-cec.txt
+F: Documentation/devicetree/bindings/media/amlogic,meson-gx-ao-cec.yaml
T: git git://linuxtv.org/media_tree.git
MESON NAND CONTROLLER DRIVER FOR AMLOGIC SOCS
@@ -10796,7 +10875,7 @@ M: Kent Gustavsson <kent@minoris.se>
L: linux-iio@vger.kernel.org
S: Supported
F: drivers/iio/adc/mcp3911.c
-F: Documentation/devicetree/bindings/iio/adc/mcp3911.txt
+F: Documentation/devicetree/bindings/iio/adc/microchip,mcp3911.yaml
MICROCHIP NAND DRIVER
M: Tudor Ambarus <tudor.ambarus@microchip.com>
@@ -12521,7 +12600,6 @@ F: Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.txt
F: drivers/pci/controller/dwc/*imx6*
PCI DRIVER FOR INTEL VOLUME MANAGEMENT DEVICE (VMD)
-M: Keith Busch <keith.busch@intel.com>
M: Jonathan Derrick <jonathan.derrick@intel.com>
L: linux-pci@vger.kernel.org
S: Supported
@@ -12564,7 +12642,8 @@ F: Documentation/devicetree/bindings/pci/nvidia,tegra20-pcie.txt
F: drivers/pci/controller/pci-tegra.c
PCI DRIVER FOR RENESAS R-CAR
-M: Simon Horman <horms@verge.net.au>
+M: Marek Vasut <marek.vasut+renesas@gmail.com>
+M: Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
L: linux-pci@vger.kernel.org
L: linux-renesas-soc@vger.kernel.org
S: Maintained
@@ -12946,7 +13025,7 @@ F: Documentation/devicetree/bindings/pinctrl/fsl,*
PIN CONTROLLER - INTEL
M: Mika Westerberg <mika.westerberg@linux.intel.com>
-M: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+M: Andy Shevchenko <andy@kernel.org>
T: git git://git.kernel.org/pub/scm/linux/kernel/git/pinctrl/intel.git
S: Maintained
F: drivers/pinctrl/intel/
@@ -13076,6 +13155,15 @@ L: linux-scsi@vger.kernel.org
S: Supported
F: drivers/scsi/pm8001/
+PM-GRAPH UTILITY
+M: "Todd E Brandt" <todd.e.brandt@linux.intel.com>
+L: linux-pm@vger.kernel.org
+W: https://01.org/pm-graph
+B: https://bugzilla.kernel.org/buglist.cgi?component=pm-graph&product=Tools
+T: git git://github.com/intel/pm-graph
+S: Supported
+F: tools/power/pm-graph
+
PNP SUPPORT
M: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>
S: Maintained
@@ -13692,7 +13780,6 @@ S: Maintained
F: arch/mips/ralink
RALINK RT2X00 WIRELESS LAN DRIVER
-P: rt2x00 project
M: Stanislaw Gruszka <sgruszka@redhat.com>
M: Helmut Schaa <helmut.schaa@googlemail.com>
L: linux-wireless@vger.kernel.org
@@ -14028,7 +14115,6 @@ S: Supported
F: drivers/net/ethernet/rocker/
ROCKETPORT DRIVER
-P: Comtrol Corp.
W: http://www.comtrol.com
S: Maintained
F: Documentation/driver-api/serial/rocket.rst
@@ -14040,6 +14126,12 @@ L: linux-serial@vger.kernel.org
S: Odd Fixes
F: drivers/tty/serial/rp2.*
+ROHM BH1750 AMBIENT LIGHT SENSOR DRIVER
+M: Tomasz Duszynski <tduszyns@gmail.com>
+S: Maintained
+F: drivers/iio/light/bh1750.c
+F: Documentation/devicetree/bindings/iio/light/bh1750.yaml
+
ROHM MULTIFUNCTION BD9571MWV-M PMIC DEVICE DRIVERS
M: Marek Vasut <marek.vasut+renesas@gmail.com>
L: linux-kernel@vger.kernel.org
@@ -14301,7 +14393,7 @@ L: linux-crypto@vger.kernel.org
L: linux-samsung-soc@vger.kernel.org
S: Maintained
F: drivers/crypto/exynos-rng.c
-F: Documentation/devicetree/bindings/rng/samsung,exynos4-rng.txt
+F: Documentation/devicetree/bindings/rng/samsung,exynos4-rng.yaml
SAMSUNG EXYNOS TRUE RANDOM NUMBER GENERATOR (TRNG) DRIVER
M: Åukasz Stelmach <l.stelmach@samsung.com>
@@ -14376,8 +14468,8 @@ M: Kamil Konieczny <k.konieczny@partner.samsung.com>
L: linux-crypto@vger.kernel.org
L: linux-samsung-soc@vger.kernel.org
S: Maintained
-F: Documentation/devicetree/bindings/crypto/samsung-slimsss.txt
-F: Documentation/devicetree/bindings/crypto/samsung-sss.txt
+F: Documentation/devicetree/bindings/crypto/samsung-slimsss.yaml
+F: Documentation/devicetree/bindings/crypto/samsung-sss.yaml
F: drivers/crypto/s5p-sss.c
SAMSUNG S5P/EXYNOS4 SOC SERIES CAMERA SUBSYSTEM DRIVERS
@@ -14865,6 +14957,12 @@ F: drivers/media/usb/siano/
F: drivers/media/usb/siano/
F: drivers/media/mmc/siano/
+SIFIVE PDMA DRIVER
+M: Green Wan <green.wan@sifive.com>
+S: Maintained
+F: drivers/dma/sf-pdma/
+F: Documentation/devicetree/bindings/dma/sifive,fu540-c000-pdma.yaml
+
SIFIVE DRIVERS
M: Palmer Dabbelt <palmer@dabbelt.com>
M: Paul Walmsley <paul.walmsley@sifive.com>
@@ -14891,6 +14989,11 @@ S: Maintained
F: drivers/input/touchscreen/silead.c
F: drivers/platform/x86/touchscreen_dmi.c
+SILICON LABS WIRELESS DRIVERS (for WFxxx series)
+M: Jérôme Pouiller <jerome.pouiller@silabs.com>
+S: Supported
+F: drivers/staging/wfx/
+
SILICON MOTION SM712 FRAME BUFFER DRIVER
M: Sudip Mukherjee <sudipm.mukherjee@gmail.com>
M: Teddy Wang <teddy.wang@siliconmotion.com>
@@ -14919,15 +15022,13 @@ F: drivers/video/fbdev/simplefb.c
F: include/linux/platform_data/simplefb.h
SIMTEC EB110ATX (Chalice CATS)
-P: Ben Dooks
-P: Vincent Sanders <vince@simtec.co.uk>
+M: Vincent Sanders <vince@simtec.co.uk>
M: Simtec Linux Team <linux@simtec.co.uk>
W: http://www.simtec.co.uk/products/EB110ATX/
S: Supported
SIMTEC EB2410ITX (BAST)
-P: Ben Dooks
-P: Vincent Sanders <vince@simtec.co.uk>
+M: Vincent Sanders <vince@simtec.co.uk>
M: Simtec Linux Team <linux@simtec.co.uk>
W: http://www.simtec.co.uk/products/EB2410ITX/
S: Supported
@@ -15225,6 +15326,14 @@ S: Maintained
F: drivers/media/i2c/imx274.c
F: Documentation/devicetree/bindings/media/i2c/imx274.txt
+SONY IMX290 SENSOR DRIVER
+M: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+L: linux-media@vger.kernel.org
+T: git git://linuxtv.org/media_tree.git
+S: Maintained
+F: drivers/media/i2c/imx290.c
+F: Documentation/devicetree/bindings/media/i2c/imx290.txt
+
SONY IMX319 SENSOR DRIVER
M: Bingbu Cao <bingbu.cao@intel.com>
L: linux-media@vger.kernel.org
@@ -15548,6 +15657,14 @@ L: linux-wireless@vger.kernel.org
S: Supported
F: drivers/staging/wilc1000/
+STAGING - SEPS525 LCD CONTROLLER DRIVERS
+M: Michael Hennerich <michael.hennerich@analog.com>
+M: Beniamin Bia <beniamin.bia@analog.com>
+L: linux-fbdev@vger.kernel.org
+S: Supported
+F: drivers/staging/fbtft/fb_seps525.c
+F: Documentation/devicetree/bindings/iio/adc/adi,ad7606.yaml
+
STAGING SUBSYSTEM
M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging.git
@@ -15626,7 +15743,7 @@ SUN4I LOW RES ADC ATTACHED TABLET KEYS DRIVER
M: Hans de Goede <hdegoede@redhat.com>
L: linux-input@vger.kernel.org
S: Maintained
-F: Documentation/devicetree/bindings/input/sun4i-lradc-keys.txt
+F: Documentation/devicetree/bindings/input/allwinner,sun4i-a10-lradc-keys.yaml
F: drivers/input/keyboard/sun4i-lradc-keys.c
SUNDANCE NETWORK DRIVER
@@ -15838,6 +15955,13 @@ F: drivers/hwtracing/stm/
F: include/linux/stm.h
F: include/uapi/linux/stm.h
+SYSTEM76 ACPI DRIVER
+M: Jeremy Soller <jeremy@system76.com>
+M: System76 Product Development <productdev@system76.com>
+L: platform-driver-x86@vger.kernel.org
+S: Maintained
+F: drivers/platform/x86/system76_acpi.c
+
SYSV FILESYSTEM
M: Christoph Hellwig <hch@infradead.org>
S: Maintained
@@ -16290,6 +16414,12 @@ S: Maintained
F: drivers/media/platform/davinci/
F: include/media/davinci/
+TI ENHANCED QUADRATURE ENCODER PULSE (eQEP) DRIVER
+R: David Lechner <david@lechnology.com>
+L: linux-iio@vger.kernel.org
+F: Documentation/devicetree/bindings/counter/ti-eqep.yaml
+F: drivers/counter/ti-eqep.c
+
TI ETHERNET SWITCH DRIVER (CPSW)
R: Grygorii Strashko <grygorii.strashko@ti.com>
L: linux-omap@vger.kernel.org
@@ -16387,6 +16517,7 @@ W: http://linuxtv.org/
Q: http://patchwork.linuxtv.org/project/linux-media/list/
S: Maintained
F: drivers/media/platform/ti-vpe/
+F: Documentation/devicetree/bindings/media/ti,vpe.yaml
TI WILINK WIRELESS DRIVERS
L: linux-wireless@vger.kernel.org
@@ -16457,6 +16588,13 @@ S: Maintained
F: Documentation/hwmon/tmp401.rst
F: drivers/hwmon/tmp401.c
+TMP513 HARDWARE MONITOR DRIVER
+M: Eric Tremblay <etremblay@distech-controls.com>
+L: linux-hwmon@vger.kernel.org
+S: Maintained
+F: Documentation/hwmon/tmp513.rst
+F: drivers/hwmon/tmp513.c
+
TMPFS (SHMEM FILESYSTEM)
M: Hugh Dickins <hughd@google.com>
L: linux-mm@kvack.org
@@ -17276,6 +17414,7 @@ F: include/media/videobuf2-*
VIMC VIRTUAL MEDIA CONTROLLER DRIVER
M: Helen Koike <helen.koike@collabora.com>
+R: Shuah Khan <skhan@linuxfoundation.org>
L: linux-media@vger.kernel.org
T: git git://linuxtv.org/media_tree.git
W: https://linuxtv.org
@@ -17557,10 +17696,8 @@ S: Maintained
F: drivers/hwmon/vt8231.c
VUB300 USB to SDIO/SD/MMC bridge chip
-M: Tony Olech <tony.olech@elandigitalsystems.com>
L: linux-mmc@vger.kernel.org
-L: linux-usb@vger.kernel.org
-S: Supported
+S: Orphan
F: drivers/mmc/host/vub300.c
W1 DALLAS'S 1-WIRE BUS
diff --git a/Makefile b/Makefile
index d4d36c61940b..999a197d67d2 100644
--- a/Makefile
+++ b/Makefile
@@ -618,7 +618,6 @@ ifeq ($(KBUILD_EXTMOD),)
init-y := init/
drivers-y := drivers/ sound/
drivers-$(CONFIG_SAMPLES) += samples/
-drivers-$(CONFIG_KERNEL_HEADER_TEST) += include/
net-y := net/
libs-y := lib/
core-y := usr/
@@ -1011,6 +1010,7 @@ endif
PHONY += prepare0
export MODORDER := $(extmod-prefix)modules.order
+export MODULES_NSDEPS := $(extmod-prefix)modules.nsdeps
ifeq ($(KBUILD_EXTMOD),)
core-y += kernel/ certs/ mm/ fs/ ipc/ security/ crypto/ block/
@@ -1196,19 +1196,15 @@ headers: $(version_h) scripts_unifdef uapi-asm-generic archheaders archscripts
$(Q)$(MAKE) $(hdr-inst)=include/uapi
$(Q)$(MAKE) $(hdr-inst)=arch/$(SRCARCH)/include/uapi
+# Deprecated. It is no-op now.
PHONY += headers_check
-headers_check: headers
- $(Q)$(MAKE) $(hdr-inst)=include/uapi HDRCHECK=1
- $(Q)$(MAKE) $(hdr-inst)=arch/$(SRCARCH)/include/uapi HDRCHECK=1
+headers_check:
+ @:
ifdef CONFIG_HEADERS_INSTALL
prepare: headers
endif
-ifdef CONFIG_HEADERS_CHECK
-all: headers_check
-endif
-
PHONY += scripts_unifdef
scripts_unifdef: scripts_basic
$(Q)$(MAKE) $(build)=scripts scripts/unifdef
@@ -1360,7 +1356,7 @@ endif # CONFIG_MODULES
# Directories & files removed with 'make clean'
CLEAN_DIRS += include/ksym
-CLEAN_FILES += modules.builtin.modinfo
+CLEAN_FILES += modules.builtin.modinfo modules.nsdeps
# Directories & files removed with 'make mrproper'
MRPROPER_DIRS += include/config include/generated \
@@ -1476,7 +1472,6 @@ help:
@echo ' versioncheck - Sanity check on version.h usage'
@echo ' includecheck - Check for duplicate included header files'
@echo ' export_report - List the usages of all exported symbols'
- @echo ' headers_check - Sanity check on exported headers'
@echo ' headerdep - Detect inclusion cycles in headers'
@echo ' coccicheck - Check with Coccinelle'
@echo ''
@@ -1515,7 +1510,7 @@ help:
@echo ''
@$(if $(boards), \
$(foreach b, $(boards), \
- printf " %-24s - Build for %s\\n" $(b) $(subst _defconfig,,$(b));) \
+ printf " %-27s - Build for %s\\n" $(b) $(subst _defconfig,,$(b));) \
echo '')
@$(if $(board-dirs), \
$(foreach b, $(board-dirs), \
@@ -1526,7 +1521,8 @@ help:
@echo ' make V=0|1 [targets] 0 => quiet build (default), 1 => verbose build'
@echo ' make V=2 [targets] 2 => give reason for rebuild of target'
@echo ' make O=dir [targets] Locate all output files in "dir", including .config'
- @echo ' make C=1 [targets] Check re-compiled c source with $$CHECK (sparse by default)'
+ @echo ' make C=1 [targets] Check re-compiled c source with $$CHECK'
+ @echo ' (sparse by default)'
@echo ' make C=2 [targets] Force check of all c source with $$CHECK'
@echo ' make RECORDMCOUNT_WARN=1 [targets] Warn about ignored mcount sections'
@echo ' make W=n [targets] Enable extra build checks, n=1,2,3 where'
@@ -1622,7 +1618,7 @@ _emodinst_post: _emodinst_
$(call cmd,depmod)
clean-dirs := $(KBUILD_EXTMOD)
-clean: rm-files := $(KBUILD_EXTMOD)/Module.symvers
+clean: rm-files := $(KBUILD_EXTMOD)/Module.symvers $(KBUILD_EXTMOD)/modules.nsdeps
PHONY += /
/:
@@ -1641,6 +1637,50 @@ help:
PHONY += prepare
endif # KBUILD_EXTMOD
+# Single targets
+# ---------------------------------------------------------------------------
+# To build individual files in subdirectories, you can do like this:
+#
+# make foo/bar/baz.s
+#
+# The supported suffixes for single-target are listed in 'single-targets'
+#
+# To build only under specific subdirectories, you can do like this:
+#
+# make foo/bar/baz/
+
+ifdef single-build
+
+# .ko is special because modpost is needed
+single-ko := $(sort $(filter %.ko, $(MAKECMDGOALS)))
+single-no-ko := $(sort $(patsubst %.ko,%.mod, $(MAKECMDGOALS)))
+
+$(single-ko): single_modpost
+ @:
+$(single-no-ko): descend
+ @:
+
+ifeq ($(KBUILD_EXTMOD),)
+# For the single build of in-tree modules, use a temporary file to avoid
+# the situation of modules_install installing an invalid modules.order.
+MODORDER := .modules.tmp
+endif
+
+PHONY += single_modpost
+single_modpost: $(single-no-ko)
+ $(Q){ $(foreach m, $(single-ko), echo $(extmod-prefix)$m;) } > $(MODORDER)
+ $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
+
+KBUILD_MODULES := 1
+
+export KBUILD_SINGLE_TARGETS := $(addprefix $(extmod-prefix), $(single-no-ko))
+
+# trim unrelated directories
+build-dirs := $(foreach d, $(build-dirs), \
+ $(if $(filter $(d)/%, $(KBUILD_SINGLE_TARGETS)), $(d)))
+
+endif
+
# Handle descending into subdirectories listed in $(build-dirs)
# Preset locale variables to speed up the build process. Limit locale
# tweaks to this spot to avoid wrong language settings when running
@@ -1649,7 +1689,9 @@ endif # KBUILD_EXTMOD
PHONY += descend $(build-dirs)
descend: $(build-dirs)
$(build-dirs): prepare
- $(Q)$(MAKE) $(build)=$@ single-build=$(single-build) need-builtin=1 need-modorder=1
+ $(Q)$(MAKE) $(build)=$@ \
+ single-build=$(if $(filter-out $@/, $(single-no-ko)),1) \
+ need-builtin=1 need-modorder=1
clean-dirs := $(addprefix _clean_, $(clean-dirs))
PHONY += $(clean-dirs) clean
@@ -1664,7 +1706,7 @@ clean: $(clean-dirs)
-o -name '*.ko.*' \
-o -name '*.dtb' -o -name '*.dtb.S' -o -name '*.dt.yaml' \
-o -name '*.dwo' -o -name '*.lst' \
- -o -name '*.su' -o -name '*.mod' -o -name '*.ns_deps' \
+ -o -name '*.su' -o -name '*.mod' \
-o -name '.*.d' -o -name '.*.tmp' -o -name '*.mod.c' \
-o -name '*.lex.c' -o -name '*.tab.[ch]' \
-o -name '*.asn1.[ch]' \
@@ -1686,10 +1728,9 @@ tags TAGS cscope gtags: FORCE
# ---------------------------------------------------------------------------
PHONY += nsdeps
-
+nsdeps: export KBUILD_NSDEPS=1
nsdeps: modules
- $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost nsdeps
- $(Q)$(CONFIG_SHELL) $(srctree)/scripts/$@
+ $(Q)$(CONFIG_SHELL) $(srctree)/scripts/nsdeps
# Scripts to check various things for consistency
# ---------------------------------------------------------------------------
@@ -1753,50 +1794,6 @@ tools/%: FORCE
$(Q)mkdir -p $(objtree)/tools
$(Q)$(MAKE) LDFLAGS= MAKEFLAGS="$(tools_silent) $(filter --j% -j,$(MAKEFLAGS))" O=$(abspath $(objtree)) subdir=tools -C $(srctree)/tools/ $*
-# Single targets
-# ---------------------------------------------------------------------------
-# To build individual files in subdirectories, you can do like this:
-#
-# make foo/bar/baz.s
-#
-# The supported suffixes for single-target are listed in 'single-targets'
-#
-# To build only under specific subdirectories, you can do like this:
-#
-# make foo/bar/baz/
-
-ifdef single-build
-
-single-all := $(filter $(single-targets), $(MAKECMDGOALS))
-
-# .ko is special because modpost is needed
-single-ko := $(sort $(filter %.ko, $(single-all)))
-single-no-ko := $(sort $(patsubst %.ko,%.mod, $(single-all)))
-
-$(single-ko): single_modpost
- @:
-$(single-no-ko): descend
- @:
-
-ifeq ($(KBUILD_EXTMOD),)
-# For the single build of in-tree modules, use a temporary file to avoid
-# the situation of modules_install installing an invalid modules.order.
-MODORDER := .modules.tmp
-endif
-
-PHONY += single_modpost
-single_modpost: $(single-no-ko)
- $(Q){ $(foreach m, $(single-ko), echo $(extmod-prefix)$m;) } > $(MODORDER)
- $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
-
-KBUILD_MODULES := 1
-
-export KBUILD_SINGLE_TARGETS := $(addprefix $(extmod-prefix), $(single-no-ko))
-
-single-build = $(if $(filter-out $@/, $(single-no-ko)),1)
-
-endif
-
# FIXME Should go into a make.lib or something
# ===========================================================================
diff --git a/arch/Kconfig b/arch/Kconfig
index 8bcc1c746142..7b861fe3f900 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -796,16 +796,9 @@ config OLD_SIGACTION
config COMPAT_OLD_SIGACTION
bool
-config 64BIT_TIME
- def_bool y
- help
- This should be selected by all architectures that need to support
- new system calls with a 64-bit time_t. This is relevant on all 32-bit
- architectures, and 64-bit architectures as part of compat syscall
- handling.
-
config COMPAT_32BIT_TIME
- def_bool !64BIT || COMPAT
+ bool "Provide system calls for 32-bit time_t"
+ default !64BIT || COMPAT
help
This enables 32 bit time_t support in addition to 64 bit time_t support.
This is relevant on all 32-bit architectures, and 64-bit architectures
@@ -843,16 +836,17 @@ config HAVE_ARCH_VMAP_STACK
config VMAP_STACK
default y
bool "Use a virtually-mapped stack"
- depends on HAVE_ARCH_VMAP_STACK && !KASAN
+ depends on HAVE_ARCH_VMAP_STACK
+ depends on !KASAN || KASAN_VMALLOC
---help---
Enable this if you want the use virtually-mapped kernel stacks
with guard pages. This causes kernel stack overflows to be
caught immediately rather than causing difficult-to-diagnose
corruption.
- This is presently incompatible with KASAN because KASAN expects
- the stack to map directly to the KASAN shadow map using a formula
- that is incorrect if the stack is in vmalloc space.
+ To use this with KASAN, the architecture must support backing
+ virtual mappings with real shadow memory, and KASAN_VMALLOC must
+ be enabled.
config ARCH_OPTIONAL_KERNEL_RWX
def_bool n
@@ -939,6 +933,14 @@ config RELR
config ARCH_HAS_MEM_ENCRYPT
bool
+config HAVE_SPARSE_SYSCALL_NR
+ bool
+ help
+ An architecture should select this if its syscall numbering is sparse
+ to save space. For example, MIPS architecture has a syscall array with
+ entries at 4000, 5000 and 6000 locations. This option turns on syscall
+ related optimizations for a given architecture.
+
source "kernel/gcov/Kconfig"
source "scripts/gcc-plugins/Kconfig"
diff --git a/arch/alpha/include/asm/io.h b/arch/alpha/include/asm/io.h
index af2c0063dc75..1989b946a28d 100644
--- a/arch/alpha/include/asm/io.h
+++ b/arch/alpha/include/asm/io.h
@@ -283,12 +283,6 @@ static inline void __iomem *ioremap(unsigned long port, unsigned long size)
return IO_CONCAT(__IO_PREFIX,ioremap) (port, size);
}
-static inline void __iomem *__ioremap(unsigned long port, unsigned long size,
- unsigned long flags)
-{
- return ioremap(port, size);
-}
-
static inline void __iomem * ioremap_nocache(unsigned long offset,
unsigned long size)
{
diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
index bf497b8b0ec6..94e4cde8071a 100644
--- a/arch/alpha/kernel/osf_sys.c
+++ b/arch/alpha/kernel/osf_sys.c
@@ -963,7 +963,7 @@ put_tv32(struct timeval32 __user *o, struct timespec64 *i)
}
static inline long
-put_tv_to_tv32(struct timeval32 __user *o, struct timeval *i)
+put_tv_to_tv32(struct timeval32 __user *o, struct __kernel_old_timeval *i)
{
return copy_to_user(o, &(struct timeval32){
.tv_sec = i->tv_sec,
@@ -971,30 +971,6 @@ put_tv_to_tv32(struct timeval32 __user *o, struct timeval *i)
sizeof(struct timeval32));
}
-static inline long
-get_it32(struct itimerval *o, struct itimerval32 __user *i)
-{
- struct itimerval32 itv;
- if (copy_from_user(&itv, i, sizeof(struct itimerval32)))
- return -EFAULT;
- o->it_interval.tv_sec = itv.it_interval.tv_sec;
- o->it_interval.tv_usec = itv.it_interval.tv_usec;
- o->it_value.tv_sec = itv.it_value.tv_sec;
- o->it_value.tv_usec = itv.it_value.tv_usec;
- return 0;
-}
-
-static inline long
-put_it32(struct itimerval32 __user *o, struct itimerval *i)
-{
- return copy_to_user(o, &(struct itimerval32){
- .it_interval.tv_sec = o->it_interval.tv_sec,
- .it_interval.tv_usec = o->it_interval.tv_usec,
- .it_value.tv_sec = o->it_value.tv_sec,
- .it_value.tv_usec = o->it_value.tv_usec},
- sizeof(struct itimerval32));
-}
-
static inline void
jiffies_to_timeval32(unsigned long jiffies, struct timeval32 *value)
{
@@ -1039,47 +1015,6 @@ SYSCALL_DEFINE2(osf_settimeofday, struct timeval32 __user *, tv,
asmlinkage long sys_ni_posix_timers(void);
-SYSCALL_DEFINE2(osf_getitimer, int, which, struct itimerval32 __user *, it)
-{
- struct itimerval kit;
- int error;
-
- if (!IS_ENABLED(CONFIG_POSIX_TIMERS))
- return sys_ni_posix_timers();
-
- error = do_getitimer(which, &kit);
- if (!error && put_it32(it, &kit))
- error = -EFAULT;
-
- return error;
-}
-
-SYSCALL_DEFINE3(osf_setitimer, int, which, struct itimerval32 __user *, in,
- struct itimerval32 __user *, out)
-{
- struct itimerval kin, kout;
- int error;
-
- if (!IS_ENABLED(CONFIG_POSIX_TIMERS))
- return sys_ni_posix_timers();
-
- if (in) {
- if (get_it32(&kin, in))
- return -EFAULT;
- } else
- memset(&kin, 0, sizeof(kin));
-
- error = do_setitimer(which, &kin, out ? &kout : NULL);
- if (error || !out)
- return error;
-
- if (put_it32(out, &kout))
- return -EFAULT;
-
- return 0;
-
-}
-
SYSCALL_DEFINE2(osf_utimes, const char __user *, filename,
struct timeval32 __user *, tvs)
{
diff --git a/arch/alpha/kernel/pci-sysfs.c b/arch/alpha/kernel/pci-sysfs.c
index f94c732fedeb..0021580d79ad 100644
--- a/arch/alpha/kernel/pci-sysfs.c
+++ b/arch/alpha/kernel/pci-sysfs.c
@@ -71,10 +71,10 @@ static int pci_mmap_resource(struct kobject *kobj,
struct pci_bus_region bar;
int i;
- for (i = 0; i < PCI_ROM_RESOURCE; i++)
+ for (i = 0; i < PCI_STD_NUM_BARS; i++)
if (res == &pdev->resource[i])
break;
- if (i >= PCI_ROM_RESOURCE)
+ if (i >= PCI_STD_NUM_BARS)
return -ENODEV;
if (res->flags & IORESOURCE_MEM && iomem_is_exclusive(res->start))
@@ -115,7 +115,7 @@ void pci_remove_resource_files(struct pci_dev *pdev)
{
int i;
- for (i = 0; i < PCI_ROM_RESOURCE; i++) {
+ for (i = 0; i < PCI_STD_NUM_BARS; i++) {
struct bin_attribute *res_attr;
res_attr = pdev->res_attr[i];
@@ -232,7 +232,7 @@ int pci_create_resource_files(struct pci_dev *pdev)
int retval;
/* Expose the PCI resources from this device as files */
- for (i = 0; i < PCI_ROM_RESOURCE; i++) {
+ for (i = 0; i < PCI_STD_NUM_BARS; i++) {
/* skip empty resources */
if (!pci_resource_len(pdev, i))
diff --git a/arch/alpha/kernel/syscalls/syscall.tbl b/arch/alpha/kernel/syscalls/syscall.tbl
index 728fe028c02c..8e13b0b2928d 100644
--- a/arch/alpha/kernel/syscalls/syscall.tbl
+++ b/arch/alpha/kernel/syscalls/syscall.tbl
@@ -89,10 +89,10 @@
80 common setgroups sys_setgroups
81 common osf_old_getpgrp sys_ni_syscall
82 common setpgrp sys_setpgid
-83 common osf_setitimer sys_osf_setitimer
+83 common osf_setitimer compat_sys_setitimer
84 common osf_old_wait sys_ni_syscall
85 common osf_table sys_ni_syscall
-86 common osf_getitimer sys_osf_getitimer
+86 common osf_getitimer compat_sys_getitimer
87 common gethostname sys_gethostname
88 common sethostname sys_sethostname
89 common getdtablesize sys_getdtablesize
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index 8383155c8c82..4d7b671c8ff4 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -6,7 +6,6 @@
config ARC
def_bool y
select ARC_TIMERS
- select ARCH_HAS_DMA_COHERENT_TO_PFN
select ARCH_HAS_DMA_PREP_COHERENT
select ARCH_HAS_PTE_SPECIAL
select ARCH_HAS_SETUP_DMA_OPS
diff --git a/arch/arc/configs/nps_defconfig b/arch/arc/configs/nps_defconfig
index 5978d4d7d5b0..07f26ed39f02 100644
--- a/arch/arc/configs/nps_defconfig
+++ b/arch/arc/configs/nps_defconfig
@@ -7,7 +7,6 @@ CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE_O3=y
-CONFIG_SYSCTL_SYSCALL=y
# CONFIG_EPOLL is not set
# CONFIG_SIGNALFD is not set
# CONFIG_TIMERFD is not set
diff --git a/arch/arc/configs/tb10x_defconfig b/arch/arc/configs/tb10x_defconfig
index 3a138f8c7299..a12656ec0072 100644
--- a/arch/arc/configs/tb10x_defconfig
+++ b/arch/arc/configs/tb10x_defconfig
@@ -15,7 +15,6 @@ CONFIG_INITRAMFS_ROOT_UID=2100
CONFIG_INITRAMFS_ROOT_GID=501
# CONFIG_RD_GZIP is not set
CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE_O3=y
-CONFIG_SYSCTL_SYSCALL=y
CONFIG_KALLSYMS_ALL=y
# CONFIG_AIO is not set
CONFIG_EMBEDDED=y
diff --git a/arch/arc/include/asm/Kbuild b/arch/arc/include/asm/Kbuild
index 393d4f5e1450..1b505694691e 100644
--- a/arch/arc/include/asm/Kbuild
+++ b/arch/arc/include/asm/Kbuild
@@ -17,7 +17,6 @@ generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
generic-y += mmiowb.h
-generic-y += msi.h
generic-y += parport.h
generic-y += percpu.h
generic-y += preempt.h
diff --git a/arch/arc/include/asm/io.h b/arch/arc/include/asm/io.h
index 72f7929736f8..8f777d6441a5 100644
--- a/arch/arc/include/asm/io.h
+++ b/arch/arc/include/asm/io.h
@@ -34,10 +34,6 @@ static inline void ioport_unmap(void __iomem *addr)
extern void iounmap(const void __iomem *addr);
-#define ioremap_nocache(phy, sz) ioremap(phy, sz)
-#define ioremap_wc(phy, sz) ioremap(phy, sz)
-#define ioremap_wt(phy, sz) ioremap(phy, sz)
-
/*
* io{read,write}{16,32}be() macros
*/
diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h
index 7addd0301c51..b917b596f7fb 100644
--- a/arch/arc/include/asm/pgtable.h
+++ b/arch/arc/include/asm/pgtable.h
@@ -33,7 +33,6 @@
#define _ASM_ARC_PGTABLE_H
#include <linux/bits.h>
-#define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopmd.h>
#include <asm/page.h>
#include <asm/mmu.h> /* to propagate CONFIG_ARC_MMU_VER <n> */
diff --git a/arch/arc/mm/dma.c b/arch/arc/mm/dma.c
index 73a7e88a1e92..e947572a521e 100644
--- a/arch/arc/mm/dma.c
+++ b/arch/arc/mm/dma.c
@@ -48,8 +48,8 @@ void arch_dma_prep_coherent(struct page *page, size_t size)
* upper layer functions (in include/linux/dma-mapping.h)
*/
-void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
- size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir)
{
switch (dir) {
case DMA_TO_DEVICE:
@@ -69,8 +69,8 @@ void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
}
}
-void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
- size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir)
{
switch (dir) {
case DMA_TO_DEVICE:
diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c
index 3861543b66a0..fb86bc3e9b35 100644
--- a/arch/arc/mm/fault.c
+++ b/arch/arc/mm/fault.c
@@ -30,6 +30,7 @@ noinline static int handle_kernel_vaddr_fault(unsigned long address)
* with the 'reference' page table.
*/
pgd_t *pgd, *pgd_k;
+ p4d_t *p4d, *p4d_k;
pud_t *pud, *pud_k;
pmd_t *pmd, *pmd_k;
@@ -39,8 +40,13 @@ noinline static int handle_kernel_vaddr_fault(unsigned long address)
if (!pgd_present(*pgd_k))
goto bad_area;
- pud = pud_offset(pgd, address);
- pud_k = pud_offset(pgd_k, address);
+ p4d = p4d_offset(pgd, address);
+ p4d_k = p4d_offset(pgd_k, address);
+ if (!p4d_present(*p4d_k))
+ goto bad_area;
+
+ pud = pud_offset(p4d, address);
+ pud_k = pud_offset(p4d_k, address);
if (!pud_present(*pud_k))
goto bad_area;
diff --git a/arch/arc/mm/highmem.c b/arch/arc/mm/highmem.c
index a4856bfaedf3..fc8849e4f72e 100644
--- a/arch/arc/mm/highmem.c
+++ b/arch/arc/mm/highmem.c
@@ -111,12 +111,14 @@ EXPORT_SYMBOL(__kunmap_atomic);
static noinline pte_t * __init alloc_kmap_pgtable(unsigned long kvaddr)
{
pgd_t *pgd_k;
+ p4d_t *p4d_k;
pud_t *pud_k;
pmd_t *pmd_k;
pte_t *pte_k;
pgd_k = pgd_offset_k(kvaddr);
- pud_k = pud_offset(pgd_k, kvaddr);
+ p4d_k = p4d_offset(pgd_k, kvaddr);
+ pud_k = pud_offset(p4d_k, kvaddr);
pmd_k = pmd_offset(pud_k, kvaddr);
pte_k = (pte_t *)memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 0d3c5d7cceb7..5aed42e07a48 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -7,7 +7,6 @@ config ARM
select ARCH_HAS_BINFMT_FLAT
select ARCH_HAS_DEBUG_VIRTUAL if MMU
select ARCH_HAS_DEVMEM_IS_ALLOWED
- select ARCH_HAS_DMA_COHERENT_TO_PFN if SWIOTLB
select ARCH_HAS_DMA_WRITE_COMBINE if !ARM_DMA_MEM_BUFFERABLE
select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_FORTIFY_SOURCE
@@ -1019,7 +1018,7 @@ config ARM_ERRATA_775420
depends on CPU_V7
help
This option enables the workaround for the 775420 Cortex-A9 (r2p2,
- r2p6,r2p8,r2p10,r3p0) erratum. In case a date cache maintenance
+ r2p6,r2p8,r2p10,r3p0) erratum. In case a data cache maintenance
operation aborts with MMU exception, it might cause the processor
to deadlock. This workaround puts DSB before executing ISB if
an abort may occur on cache maintenance.
diff --git a/arch/arm/boot/bootp/init.S b/arch/arm/boot/bootp/init.S
index 5c476bd2b4ce..b562da2f7040 100644
--- a/arch/arm/boot/bootp/init.S
+++ b/arch/arm/boot/bootp/init.S
@@ -13,7 +13,7 @@
* size immediately following the kernel, we could build this into
* a binary blob, and concatenate the zImage using the cat command.
*/
- .section .start,#alloc,#execinstr
+ .section .start, "ax"
.type _start, #function
.globl _start
diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile
index 9219389bbe61..a1e883c5e5c4 100644
--- a/arch/arm/boot/compressed/Makefile
+++ b/arch/arm/boot/compressed/Makefile
@@ -121,7 +121,7 @@ ccflags-y := -fpic $(call cc-option,-mno-single-pic-base,) -fno-builtin -I$(obj)
asflags-y := -DZIMAGE
# Supply kernel BSS size to the decompressor via a linker symbol.
-KBSS_SZ = $(shell echo $$(($$($(CROSS_COMPILE)nm $(obj)/../../../../vmlinux | \
+KBSS_SZ = $(shell echo $$(($$($(NM) $(obj)/../../../../vmlinux | \
sed -n -e 's/^\([^ ]*\) [AB] __bss_start$$/-0x\1/p' \
-e 's/^\([^ ]*\) [AB] __bss_stop$$/+0x\1/p') )) )
LDFLAGS_vmlinux = --defsym _kernel_bss_size=$(KBSS_SZ)
@@ -165,7 +165,7 @@ $(obj)/bswapsdi2.S: $(srctree)/arch/$(SRCARCH)/lib/bswapsdi2.S
# The .data section is already discarded by the linker script so no need
# to bother about it here.
check_for_bad_syms = \
-bad_syms=$$($(CROSS_COMPILE)nm $@ | sed -n 's/^.\{8\} [bc] \(.*\)/\1/p') && \
+bad_syms=$$($(NM) $@ | sed -n 's/^.\{8\} [bc] \(.*\)/\1/p') && \
[ -z "$$bad_syms" ] || \
( echo "following symbols must have non local/private scope:" >&2; \
echo "$$bad_syms" >&2; false )
diff --git a/arch/arm/boot/compressed/atags_to_fdt.c b/arch/arm/boot/compressed/atags_to_fdt.c
index 330cd3c2eae5..64c49747f8a3 100644
--- a/arch/arm/boot/compressed/atags_to_fdt.c
+++ b/arch/arm/boot/compressed/atags_to_fdt.c
@@ -19,7 +19,7 @@ static int node_offset(void *fdt, const char *node_path)
}
static int setprop(void *fdt, const char *node_path, const char *property,
- uint32_t *val_array, int size)
+ void *val_array, int size)
{
int offset = node_offset(fdt, node_path);
if (offset < 0)
@@ -60,7 +60,7 @@ static uint32_t get_cell_size(const void *fdt)
{
int len;
uint32_t cell_size = 1;
- const uint32_t *size_len = getprop(fdt, "/", "#size-cells", &len);
+ const __be32 *size_len = getprop(fdt, "/", "#size-cells", &len);
if (size_len)
cell_size = fdt32_to_cpu(*size_len);
@@ -129,7 +129,7 @@ int atags_to_fdt(void *atag_list, void *fdt, int total_space)
struct tag *atag = atag_list;
/* In the case of 64 bits memory size, need to reserve 2 cells for
* address and size for each bank */
- uint32_t mem_reg_property[2 * 2 * NR_BANKS];
+ __be32 mem_reg_property[2 * 2 * NR_BANKS];
int memcount = 0;
int ret, memsize;
@@ -138,7 +138,7 @@ int atags_to_fdt(void *atag_list, void *fdt, int total_space)
return 1;
/* if we get a DTB here we're done already */
- if (*(u32 *)atag_list == fdt32_to_cpu(FDT_MAGIC))
+ if (*(__be32 *)atag_list == cpu_to_fdt32(FDT_MAGIC))
return 0;
/* validate the ATAG */
@@ -177,8 +177,8 @@ int atags_to_fdt(void *atag_list, void *fdt, int total_space)
/* if memsize is 2, that means that
* each data needs 2 cells of 32 bits,
* so the data are 64 bits */
- uint64_t *mem_reg_prop64 =
- (uint64_t *)mem_reg_property;
+ __be64 *mem_reg_prop64 =
+ (__be64 *)mem_reg_property;
mem_reg_prop64[memcount++] =
cpu_to_fdt64(atag->u.mem.start);
mem_reg_prop64[memcount++] =
diff --git a/arch/arm/boot/compressed/big-endian.S b/arch/arm/boot/compressed/big-endian.S
index 88e2a88d324b..0e092c36da2f 100644
--- a/arch/arm/boot/compressed/big-endian.S
+++ b/arch/arm/boot/compressed/big-endian.S
@@ -6,7 +6,7 @@
* Author: Nicolas Pitre
*/
- .section ".start", #alloc, #execinstr
+ .section ".start", "ax"
mrc p15, 0, r0, c1, c0, 0 @ read control reg
orr r0, r0, #(1 << 7) @ enable big endian mode
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index 93dffed0ac6e..ead21e5f2b80 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -140,7 +140,7 @@
#endif
.endm
- .section ".start", #alloc, #execinstr
+ .section ".start", "ax"
/*
* sort out different calling conventions
*/
@@ -1273,7 +1273,7 @@ iflush:
__armv5tej_mmu_cache_flush:
tst r4, #1
movne pc, lr
-1: mrc p15, 0, r15, c7, c14, 3 @ test,clean,invalidate D cache
+1: mrc p15, 0, APSR_nzcv, c7, c14, 3 @ test,clean,invalidate D cache
bne 1b
mcr p15, 0, r0, c7, c5, 0 @ flush I cache
mcr p15, 0, r0, c7, c10, 4 @ drain WB
diff --git a/arch/arm/boot/compressed/libfdt_env.h b/arch/arm/boot/compressed/libfdt_env.h
index b36c0289a308..6a0f1f524466 100644
--- a/arch/arm/boot/compressed/libfdt_env.h
+++ b/arch/arm/boot/compressed/libfdt_env.h
@@ -2,11 +2,13 @@
#ifndef _ARM_LIBFDT_ENV_H
#define _ARM_LIBFDT_ENV_H
+#include <linux/limits.h>
#include <linux/types.h>
#include <linux/string.h>
#include <asm/byteorder.h>
-#define INT_MAX ((int)(~0U>>1))
+#define INT32_MAX S32_MAX
+#define UINT32_MAX U32_MAX
typedef __be16 fdt16_t;
typedef __be32 fdt32_t;
diff --git a/arch/arm/boot/compressed/piggy.S b/arch/arm/boot/compressed/piggy.S
index 0284f84dcf38..27577644ee72 100644
--- a/arch/arm/boot/compressed/piggy.S
+++ b/arch/arm/boot/compressed/piggy.S
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
- .section .piggydata,#alloc
+ .section .piggydata, "a"
.globl input_data
input_data:
.incbin "arch/arm/boot/compressed/piggy_data"
diff --git a/arch/arm/boot/dts/am3517.dtsi b/arch/arm/boot/dts/am3517.dtsi
index bf3002009b00..76f819f4ba48 100644
--- a/arch/arm/boot/dts/am3517.dtsi
+++ b/arch/arm/boot/dts/am3517.dtsi
@@ -16,6 +16,37 @@
can = &hecc;
};
+ cpus {
+ cpu: cpu@0 {
+ /* Based on OMAP3630 variants OPP50 and OPP100 */
+ operating-points-v2 = <&cpu0_opp_table>;
+
+ clock-latency = <300000>; /* From legacy driver */
+ };
+ };
+
+ cpu0_opp_table: opp-table {
+ compatible = "operating-points-v2-ti-cpu";
+ syscon = <&scm_conf>;
+ /*
+ * AM3517 TRM only lists 600MHz @ 1.2V, but omap36xx
+ * appear to operate at 300MHz as well. Since AM3517 only
+ * lists one operating voltage, it will remain fixed at 1.2V
+ */
+ opp50-300000000 {
+ opp-hz = /bits/ 64 <300000000>;
+ opp-microvolt = <1200000>;
+ opp-supported-hw = <0xffffffff 0xffffffff>;
+ opp-suspend;
+ };
+
+ opp100-600000000 {
+ opp-hz = /bits/ 64 <600000000>;
+ opp-microvolt = <1200000>;
+ opp-supported-hw = <0xffffffff 0xffffffff>;
+ };
+ };
+
ocp@68000000 {
am35x_otg_hs: am35x_otg_hs@5c040000 {
compatible = "ti,omap3-musb";
diff --git a/arch/arm/boot/dts/am3517_mt_ventoux.dts b/arch/arm/boot/dts/am3517_mt_ventoux.dts
index e507e4ae0d88..e7d7124a34ba 100644
--- a/arch/arm/boot/dts/am3517_mt_ventoux.dts
+++ b/arch/arm/boot/dts/am3517_mt_ventoux.dts
@@ -8,7 +8,7 @@
/ {
model = "TeeJet Mt.Ventoux";
- compatible = "teejet,mt_ventoux", "ti,omap3";
+ compatible = "teejet,mt_ventoux", "ti,am3517", "ti,omap3";
memory@80000000 {
device_type = "memory";
diff --git a/arch/arm/boot/dts/aspeed-g5.dtsi b/arch/arm/boot/dts/aspeed-g5.dtsi
index e8feb8b66a2f..f56b8d143ba7 100644
--- a/arch/arm/boot/dts/aspeed-g5.dtsi
+++ b/arch/arm/boot/dts/aspeed-g5.dtsi
@@ -379,6 +379,7 @@
interrupts = <8>;
clocks = <&syscon ASPEED_CLK_APB>;
no-loopback-test;
+ aspeed,sirq-polarity-sense = <&syscon 0x70 25>;
status = "disabled";
};
diff --git a/arch/arm/boot/dts/logicpd-som-lv-35xx-devkit.dts b/arch/arm/boot/dts/logicpd-som-lv-35xx-devkit.dts
index f7a841a28865..2a0a98fe67f0 100644
--- a/arch/arm/boot/dts/logicpd-som-lv-35xx-devkit.dts
+++ b/arch/arm/boot/dts/logicpd-som-lv-35xx-devkit.dts
@@ -9,5 +9,5 @@
/ {
model = "LogicPD Zoom OMAP35xx SOM-LV Development Kit";
- compatible = "logicpd,dm3730-som-lv-devkit", "ti,omap3";
+ compatible = "logicpd,dm3730-som-lv-devkit", "ti,omap3430", "ti,omap3";
};
diff --git a/arch/arm/boot/dts/logicpd-torpedo-35xx-devkit.dts b/arch/arm/boot/dts/logicpd-torpedo-35xx-devkit.dts
index 7675bc3fa868..57bae2aa910e 100644
--- a/arch/arm/boot/dts/logicpd-torpedo-35xx-devkit.dts
+++ b/arch/arm/boot/dts/logicpd-torpedo-35xx-devkit.dts
@@ -9,5 +9,5 @@
/ {
model = "LogicPD Zoom OMAP35xx Torpedo Development Kit";
- compatible = "logicpd,dm3730-torpedo-devkit", "ti,omap3";
+ compatible = "logicpd,dm3730-torpedo-devkit", "ti,omap3430", "ti,omap3";
};
diff --git a/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi b/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi
index d1eae47b83f6..08bae935605c 100644
--- a/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi
+++ b/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi
@@ -43,11 +43,13 @@
compatible = "motorola,mapphone-cpcap-charger";
interrupts-extended = <
&cpcap 13 0 &cpcap 12 0 &cpcap 29 0 &cpcap 28 0
- &cpcap 22 0 &cpcap 20 0 &cpcap 19 0 &cpcap 54 0
+ &cpcap 22 0 &cpcap 21 0 &cpcap 20 0 &cpcap 19 0
+ &cpcap 54 0
>;
interrupt-names =
"chrg_det", "rvrs_chrg", "chrg_se1b", "se0conn",
- "rvrs_mode", "chrgcurr1", "vbusvld", "battdetb";
+ "rvrs_mode", "chrgcurr2", "chrgcurr1", "vbusvld",
+ "battdetb";
mode-gpios = <&gpio3 29 GPIO_ACTIVE_LOW
&gpio3 23 GPIO_ACTIVE_LOW>;
io-channels = <&cpcap_adc 0 &cpcap_adc 1
diff --git a/arch/arm/boot/dts/omap3-beagle-xm.dts b/arch/arm/boot/dts/omap3-beagle-xm.dts
index 1aa99fc1487a..125ed933ca75 100644
--- a/arch/arm/boot/dts/omap3-beagle-xm.dts
+++ b/arch/arm/boot/dts/omap3-beagle-xm.dts
@@ -8,7 +8,7 @@
/ {
model = "TI OMAP3 BeagleBoard xM";
- compatible = "ti,omap3-beagle-xm", "ti,omap36xx", "ti,omap3";
+ compatible = "ti,omap3-beagle-xm", "ti,omap3630", "ti,omap36xx", "ti,omap3";
cpus {
cpu@0 {
diff --git a/arch/arm/boot/dts/omap3-beagle.dts b/arch/arm/boot/dts/omap3-beagle.dts
index e3df3c166902..4ed3f93f5841 100644
--- a/arch/arm/boot/dts/omap3-beagle.dts
+++ b/arch/arm/boot/dts/omap3-beagle.dts
@@ -8,7 +8,7 @@
/ {
model = "TI OMAP3 BeagleBoard";
- compatible = "ti,omap3-beagle", "ti,omap3";
+ compatible = "ti,omap3-beagle", "ti,omap3430", "ti,omap3";
cpus {
cpu@0 {
diff --git a/arch/arm/boot/dts/omap3-cm-t3530.dts b/arch/arm/boot/dts/omap3-cm-t3530.dts
index 76e52c78cbb4..32dbaeaed147 100644
--- a/arch/arm/boot/dts/omap3-cm-t3530.dts
+++ b/arch/arm/boot/dts/omap3-cm-t3530.dts
@@ -9,7 +9,7 @@
/ {
model = "CompuLab CM-T3530";
- compatible = "compulab,omap3-cm-t3530", "ti,omap34xx", "ti,omap3";
+ compatible = "compulab,omap3-cm-t3530", "ti,omap3430", "ti,omap34xx", "ti,omap3";
/* Regulator to trigger the reset signal of the Wifi module */
mmc2_sdio_reset: regulator-mmc2-sdio-reset {
diff --git a/arch/arm/boot/dts/omap3-cm-t3730.dts b/arch/arm/boot/dts/omap3-cm-t3730.dts
index 6e944dfa0f3d..683819bf0915 100644
--- a/arch/arm/boot/dts/omap3-cm-t3730.dts
+++ b/arch/arm/boot/dts/omap3-cm-t3730.dts
@@ -9,7 +9,7 @@
/ {
model = "CompuLab CM-T3730";
- compatible = "compulab,omap3-cm-t3730", "ti,omap36xx", "ti,omap3";
+ compatible = "compulab,omap3-cm-t3730", "ti,omap3630", "ti,omap36xx", "ti,omap3";
wl12xx_vmmc2: wl12xx_vmmc2 {
compatible = "regulator-fixed";
diff --git a/arch/arm/boot/dts/omap3-devkit8000-lcd43.dts b/arch/arm/boot/dts/omap3-devkit8000-lcd43.dts
index a80fc60bc773..afed85078ad8 100644
--- a/arch/arm/boot/dts/omap3-devkit8000-lcd43.dts
+++ b/arch/arm/boot/dts/omap3-devkit8000-lcd43.dts
@@ -11,7 +11,7 @@
#include "omap3-devkit8000-lcd-common.dtsi"
/ {
model = "TimLL OMAP3 Devkit8000 with 4.3'' LCD panel";
- compatible = "timll,omap3-devkit8000", "ti,omap3";
+ compatible = "timll,omap3-devkit8000", "ti,omap3430", "ti,omap3";
lcd0: display {
panel-timing {
diff --git a/arch/arm/boot/dts/omap3-devkit8000-lcd70.dts b/arch/arm/boot/dts/omap3-devkit8000-lcd70.dts
index 0753776071f8..07c51a105c0d 100644
--- a/arch/arm/boot/dts/omap3-devkit8000-lcd70.dts
+++ b/arch/arm/boot/dts/omap3-devkit8000-lcd70.dts
@@ -11,7 +11,7 @@
#include "omap3-devkit8000-lcd-common.dtsi"
/ {
model = "TimLL OMAP3 Devkit8000 with 7.0'' LCD panel";
- compatible = "timll,omap3-devkit8000", "ti,omap3";
+ compatible = "timll,omap3-devkit8000", "ti,omap3430", "ti,omap3";
lcd0: display {
panel-timing {
diff --git a/arch/arm/boot/dts/omap3-devkit8000.dts b/arch/arm/boot/dts/omap3-devkit8000.dts
index faafc48d8f61..162d0726b008 100644
--- a/arch/arm/boot/dts/omap3-devkit8000.dts
+++ b/arch/arm/boot/dts/omap3-devkit8000.dts
@@ -7,7 +7,7 @@
#include "omap3-devkit8000-common.dtsi"
/ {
model = "TimLL OMAP3 Devkit8000";
- compatible = "timll,omap3-devkit8000", "ti,omap3";
+ compatible = "timll,omap3-devkit8000", "ti,omap3430", "ti,omap3";
aliases {
display1 = &dvi0;
diff --git a/arch/arm/boot/dts/omap3-gta04.dtsi b/arch/arm/boot/dts/omap3-gta04.dtsi
index b6ef1a7ac8a4..409a758c99f1 100644
--- a/arch/arm/boot/dts/omap3-gta04.dtsi
+++ b/arch/arm/boot/dts/omap3-gta04.dtsi
@@ -11,7 +11,7 @@
/ {
model = "OMAP3 GTA04";
- compatible = "ti,omap3-gta04", "ti,omap36xx", "ti,omap3";
+ compatible = "ti,omap3-gta04", "ti,omap3630", "ti,omap36xx", "ti,omap3";
cpus {
cpu@0 {
diff --git a/arch/arm/boot/dts/omap3-ha-lcd.dts b/arch/arm/boot/dts/omap3-ha-lcd.dts
index badb9b3c8897..c9ecbc45c8e2 100644
--- a/arch/arm/boot/dts/omap3-ha-lcd.dts
+++ b/arch/arm/boot/dts/omap3-ha-lcd.dts
@@ -8,7 +8,7 @@
/ {
model = "TI OMAP3 HEAD acoustics LCD-baseboard with TAO3530 SOM";
- compatible = "headacoustics,omap3-ha-lcd", "technexion,omap3-tao3530", "ti,omap34xx", "ti,omap3";
+ compatible = "headacoustics,omap3-ha-lcd", "technexion,omap3-tao3530", "ti,omap3430", "ti,omap34xx", "ti,omap3";
};
&omap3_pmx_core {
diff --git a/arch/arm/boot/dts/omap3-ha.dts b/arch/arm/boot/dts/omap3-ha.dts
index a5365252bfbe..35c4e15abeb7 100644
--- a/arch/arm/boot/dts/omap3-ha.dts
+++ b/arch/arm/boot/dts/omap3-ha.dts
@@ -8,7 +8,7 @@
/ {
model = "TI OMAP3 HEAD acoustics baseboard with TAO3530 SOM";
- compatible = "headacoustics,omap3-ha", "technexion,omap3-tao3530", "ti,omap34xx", "ti,omap3";
+ compatible = "headacoustics,omap3-ha", "technexion,omap3-tao3530", "ti,omap3430", "ti,omap34xx", "ti,omap3";
};
&omap3_pmx_core {
diff --git a/arch/arm/boot/dts/omap3-igep0020-rev-f.dts b/arch/arm/boot/dts/omap3-igep0020-rev-f.dts
index 03dcd05fb8a0..d134ce1cffc0 100644
--- a/arch/arm/boot/dts/omap3-igep0020-rev-f.dts
+++ b/arch/arm/boot/dts/omap3-igep0020-rev-f.dts
@@ -10,7 +10,7 @@
/ {
model = "IGEPv2 Rev. F (TI OMAP AM/DM37x)";
- compatible = "isee,omap3-igep0020-rev-f", "ti,omap36xx", "ti,omap3";
+ compatible = "isee,omap3-igep0020-rev-f", "ti,omap3630", "ti,omap36xx", "ti,omap3";
/* Regulator to trigger the WL_EN signal of the Wifi module */
lbep5clwmc_wlen: regulator-lbep5clwmc-wlen {
diff --git a/arch/arm/boot/dts/omap3-igep0020.dts b/arch/arm/boot/dts/omap3-igep0020.dts
index 6d0519e3dfd0..e341535a7162 100644
--- a/arch/arm/boot/dts/omap3-igep0020.dts
+++ b/arch/arm/boot/dts/omap3-igep0020.dts
@@ -10,7 +10,7 @@
/ {
model = "IGEPv2 Rev. C (TI OMAP AM/DM37x)";
- compatible = "isee,omap3-igep0020", "ti,omap36xx", "ti,omap3";
+ compatible = "isee,omap3-igep0020", "ti,omap3630", "ti,omap36xx", "ti,omap3";
vmmcsdio_fixed: fixedregulator-mmcsdio {
compatible = "regulator-fixed";
diff --git a/arch/arm/boot/dts/omap3-igep0030-rev-g.dts b/arch/arm/boot/dts/omap3-igep0030-rev-g.dts
index 060acd1e803a..9ca1d0f61964 100644
--- a/arch/arm/boot/dts/omap3-igep0030-rev-g.dts
+++ b/arch/arm/boot/dts/omap3-igep0030-rev-g.dts
@@ -10,7 +10,7 @@
/ {
model = "IGEP COM MODULE Rev. G (TI OMAP AM/DM37x)";
- compatible = "isee,omap3-igep0030-rev-g", "ti,omap36xx", "ti,omap3";
+ compatible = "isee,omap3-igep0030-rev-g", "ti,omap3630", "ti,omap36xx", "ti,omap3";
/* Regulator to trigger the WL_EN signal of the Wifi module */
lbep5clwmc_wlen: regulator-lbep5clwmc-wlen {
diff --git a/arch/arm/boot/dts/omap3-igep0030.dts b/arch/arm/boot/dts/omap3-igep0030.dts
index 25170bd3c573..32f31035daa2 100644
--- a/arch/arm/boot/dts/omap3-igep0030.dts
+++ b/arch/arm/boot/dts/omap3-igep0030.dts
@@ -10,7 +10,7 @@
/ {
model = "IGEP COM MODULE Rev. E (TI OMAP AM/DM37x)";
- compatible = "isee,omap3-igep0030", "ti,omap36xx", "ti,omap3";
+ compatible = "isee,omap3-igep0030", "ti,omap3630", "ti,omap36xx", "ti,omap3";
vmmcsdio_fixed: fixedregulator-mmcsdio {
compatible = "regulator-fixed";
diff --git a/arch/arm/boot/dts/omap3-ldp.dts b/arch/arm/boot/dts/omap3-ldp.dts
index 9a5fde2d9bce..ec9ba04ef43b 100644
--- a/arch/arm/boot/dts/omap3-ldp.dts
+++ b/arch/arm/boot/dts/omap3-ldp.dts
@@ -10,7 +10,7 @@
/ {
model = "TI OMAP3430 LDP (Zoom1 Labrador)";
- compatible = "ti,omap3-ldp", "ti,omap3";
+ compatible = "ti,omap3-ldp", "ti,omap3430", "ti,omap3";
memory@80000000 {
device_type = "memory";
diff --git a/arch/arm/boot/dts/omap3-lilly-a83x.dtsi b/arch/arm/boot/dts/omap3-lilly-a83x.dtsi
index c22833d4e568..73d477898ec2 100644
--- a/arch/arm/boot/dts/omap3-lilly-a83x.dtsi
+++ b/arch/arm/boot/dts/omap3-lilly-a83x.dtsi
@@ -7,7 +7,7 @@
/ {
model = "INCOstartec LILLY-A83X module (DM3730)";
- compatible = "incostartec,omap3-lilly-a83x", "ti,omap36xx", "ti,omap3";
+ compatible = "incostartec,omap3-lilly-a83x", "ti,omap3630", "ti,omap36xx", "ti,omap3";
chosen {
bootargs = "console=ttyO0,115200n8 vt.global_cursor_default=0 consoleblank=0";
diff --git a/arch/arm/boot/dts/omap3-lilly-dbb056.dts b/arch/arm/boot/dts/omap3-lilly-dbb056.dts
index fec335400074..ecb4ef738e07 100644
--- a/arch/arm/boot/dts/omap3-lilly-dbb056.dts
+++ b/arch/arm/boot/dts/omap3-lilly-dbb056.dts
@@ -8,7 +8,7 @@
/ {
model = "INCOstartec LILLY-DBB056 (DM3730)";
- compatible = "incostartec,omap3-lilly-dbb056", "incostartec,omap3-lilly-a83x", "ti,omap36xx", "ti,omap3";
+ compatible = "incostartec,omap3-lilly-dbb056", "incostartec,omap3-lilly-a83x", "ti,omap3630", "ti,omap36xx", "ti,omap3";
};
&twl {
diff --git a/arch/arm/boot/dts/omap3-n9.dts b/arch/arm/boot/dts/omap3-n9.dts
index 74c0ff2350d3..2495a696cec6 100644
--- a/arch/arm/boot/dts/omap3-n9.dts
+++ b/arch/arm/boot/dts/omap3-n9.dts
@@ -12,7 +12,7 @@
/ {
model = "Nokia N9";
- compatible = "nokia,omap3-n9", "ti,omap36xx", "ti,omap3";
+ compatible = "nokia,omap3-n9", "ti,omap3630", "ti,omap36xx", "ti,omap3";
};
&i2c2 {
diff --git a/arch/arm/boot/dts/omap3-n950-n9.dtsi b/arch/arm/boot/dts/omap3-n950-n9.dtsi
index 6681d4519e97..a075b63f3087 100644
--- a/arch/arm/boot/dts/omap3-n950-n9.dtsi
+++ b/arch/arm/boot/dts/omap3-n950-n9.dtsi
@@ -11,13 +11,6 @@
cpus {
cpu@0 {
cpu0-supply = <&vcc>;
- operating-points = <
- /* kHz uV */
- 300000 1012500
- 600000 1200000
- 800000 1325000
- 1000000 1375000
- >;
};
};
diff --git a/arch/arm/boot/dts/omap3-n950.dts b/arch/arm/boot/dts/omap3-n950.dts
index 9886bf8b90ab..31d47a1fad84 100644
--- a/arch/arm/boot/dts/omap3-n950.dts
+++ b/arch/arm/boot/dts/omap3-n950.dts
@@ -12,7 +12,7 @@
/ {
model = "Nokia N950";
- compatible = "nokia,omap3-n950", "ti,omap36xx", "ti,omap3";
+ compatible = "nokia,omap3-n950", "ti,omap3630", "ti,omap36xx", "ti,omap3";
keys {
compatible = "gpio-keys";
diff --git a/arch/arm/boot/dts/omap3-overo-storm-alto35.dts b/arch/arm/boot/dts/omap3-overo-storm-alto35.dts
index 18338576c41d..7f04dfad8203 100644
--- a/arch/arm/boot/dts/omap3-overo-storm-alto35.dts
+++ b/arch/arm/boot/dts/omap3-overo-storm-alto35.dts
@@ -14,5 +14,5 @@
/ {
model = "OMAP36xx/AM37xx/DM37xx Gumstix Overo on Alto35";
- compatible = "gumstix,omap3-overo-alto35", "gumstix,omap3-overo", "ti,omap36xx", "ti,omap3";
+ compatible = "gumstix,omap3-overo-alto35", "gumstix,omap3-overo", "ti,omap3630", "ti,omap36xx", "ti,omap3";
};
diff --git a/arch/arm/boot/dts/omap3-overo-storm-chestnut43.dts b/arch/arm/boot/dts/omap3-overo-storm-chestnut43.dts
index f204c8af8281..bc5a04e03336 100644
--- a/arch/arm/boot/dts/omap3-overo-storm-chestnut43.dts
+++ b/arch/arm/boot/dts/omap3-overo-storm-chestnut43.dts
@@ -14,7 +14,7 @@
/ {
model = "OMAP36xx/AM37xx/DM37xx Gumstix Overo on Chestnut43";
- compatible = "gumstix,omap3-overo-chestnut43", "gumstix,omap3-overo", "ti,omap36xx", "ti,omap3";
+ compatible = "gumstix,omap3-overo-chestnut43", "gumstix,omap3-overo", "ti,omap3630", "ti,omap36xx", "ti,omap3";
};
&omap3_pmx_core2 {
diff --git a/arch/arm/boot/dts/omap3-overo-storm-gallop43.dts b/arch/arm/boot/dts/omap3-overo-storm-gallop43.dts
index c633f7cee68e..065c31cbf0e2 100644
--- a/arch/arm/boot/dts/omap3-overo-storm-gallop43.dts
+++ b/arch/arm/boot/dts/omap3-overo-storm-gallop43.dts
@@ -14,7 +14,7 @@
/ {
model = "OMAP36xx/AM37xx/DM37xx Gumstix Overo on Gallop43";
- compatible = "gumstix,omap3-overo-gallop43", "gumstix,omap3-overo", "ti,omap36xx", "ti,omap3";
+ compatible = "gumstix,omap3-overo-gallop43", "gumstix,omap3-overo", "ti,omap3630", "ti,omap36xx", "ti,omap3";
};
&omap3_pmx_core2 {
diff --git a/arch/arm/boot/dts/omap3-overo-storm-palo35.dts b/arch/arm/boot/dts/omap3-overo-storm-palo35.dts
index fb88ebc9858c..e38c1c51392c 100644
--- a/arch/arm/boot/dts/omap3-overo-storm-palo35.dts
+++ b/arch/arm/boot/dts/omap3-overo-storm-palo35.dts
@@ -14,7 +14,7 @@
/ {
model = "OMAP36xx/AM37xx/DM37xx Gumstix Overo on Palo35";
- compatible = "gumstix,omap3-overo-palo35", "gumstix,omap3-overo", "ti,omap36xx", "ti,omap3";
+ compatible = "gumstix,omap3-overo-palo35", "gumstix,omap3-overo", "ti,omap3630", "ti,omap36xx", "ti,omap3";
};
&omap3_pmx_core2 {
diff --git a/arch/arm/boot/dts/omap3-overo-storm-palo43.dts b/arch/arm/boot/dts/omap3-overo-storm-palo43.dts
index 76cca00d97b6..e6dc23159c4d 100644
--- a/arch/arm/boot/dts/omap3-overo-storm-palo43.dts
+++ b/arch/arm/boot/dts/omap3-overo-storm-palo43.dts
@@ -14,7 +14,7 @@
/ {
model = "OMAP36xx/AM37xx/DM37xx Gumstix Overo on Palo43";
- compatible = "gumstix,omap3-overo-palo43", "gumstix,omap3-overo", "ti,omap36xx", "ti,omap3";
+ compatible = "gumstix,omap3-overo-palo43", "gumstix,omap3-overo", "ti,omap3630", "ti,omap36xx", "ti,omap3";
};
&omap3_pmx_core2 {
diff --git a/arch/arm/boot/dts/omap3-overo-storm-summit.dts b/arch/arm/boot/dts/omap3-overo-storm-summit.dts
index cc081a9e4c1e..587c08ce282d 100644
--- a/arch/arm/boot/dts/omap3-overo-storm-summit.dts
+++ b/arch/arm/boot/dts/omap3-overo-storm-summit.dts
@@ -14,7 +14,7 @@
/ {
model = "OMAP36xx/AM37xx/DM37xx Gumstix Overo on Summit";
- compatible = "gumstix,omap3-overo-summit", "gumstix,omap3-overo", "ti,omap36xx", "ti,omap3";
+ compatible = "gumstix,omap3-overo-summit", "gumstix,omap3-overo", "ti,omap3630", "ti,omap36xx", "ti,omap3";
};
&omap3_pmx_core2 {
diff --git a/arch/arm/boot/dts/omap3-overo-storm-tobi.dts b/arch/arm/boot/dts/omap3-overo-storm-tobi.dts
index 1de41c0826e0..f57de6010994 100644
--- a/arch/arm/boot/dts/omap3-overo-storm-tobi.dts
+++ b/arch/arm/boot/dts/omap3-overo-storm-tobi.dts
@@ -14,6 +14,6 @@
/ {
model = "OMAP36xx/AM37xx/DM37xx Gumstix Overo on Tobi";
- compatible = "gumstix,omap3-overo-tobi", "gumstix,omap3-overo", "ti,omap36xx", "ti,omap3";
+ compatible = "gumstix,omap3-overo-tobi", "gumstix,omap3-overo", "ti,omap3630", "ti,omap36xx", "ti,omap3";
};
diff --git a/arch/arm/boot/dts/omap3-overo-storm-tobiduo.dts b/arch/arm/boot/dts/omap3-overo-storm-tobiduo.dts
index 9ed13118ed8e..281af6c113be 100644
--- a/arch/arm/boot/dts/omap3-overo-storm-tobiduo.dts
+++ b/arch/arm/boot/dts/omap3-overo-storm-tobiduo.dts
@@ -14,5 +14,5 @@
/ {
model = "OMAP36xx/AM37xx/DM37xx Gumstix Overo on TobiDuo";
- compatible = "gumstix,omap3-overo-tobiduo", "gumstix,omap3-overo", "ti,omap36xx", "ti,omap3";
+ compatible = "gumstix,omap3-overo-tobiduo", "gumstix,omap3-overo", "ti,omap3630", "ti,omap36xx", "ti,omap3";
};
diff --git a/arch/arm/boot/dts/omap3-pandora-1ghz.dts b/arch/arm/boot/dts/omap3-pandora-1ghz.dts
index 81b957f33c9f..ea509956d7ac 100644
--- a/arch/arm/boot/dts/omap3-pandora-1ghz.dts
+++ b/arch/arm/boot/dts/omap3-pandora-1ghz.dts
@@ -16,7 +16,7 @@
/ {
model = "Pandora Handheld Console 1GHz";
- compatible = "openpandora,omap3-pandora-1ghz", "ti,omap36xx", "ti,omap3";
+ compatible = "openpandora,omap3-pandora-1ghz", "ti,omap3630", "ti,omap36xx", "ti,omap3";
};
&omap3_pmx_core2 {
diff --git a/arch/arm/boot/dts/omap3-pandora-common.dtsi b/arch/arm/boot/dts/omap3-pandora-common.dtsi
index ec5891718ae6..150d5be42d27 100644
--- a/arch/arm/boot/dts/omap3-pandora-common.dtsi
+++ b/arch/arm/boot/dts/omap3-pandora-common.dtsi
@@ -226,6 +226,17 @@
gpio = <&gpio6 4 GPIO_ACTIVE_HIGH>; /* GPIO_164 */
};
+ /* wl1251 wifi+bt module */
+ wlan_en: fixed-regulator-wg7210_en {
+ compatible = "regulator-fixed";
+ regulator-name = "vwlan";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ startup-delay-us = <50000>;
+ enable-active-high;
+ gpio = <&gpio1 23 GPIO_ACTIVE_HIGH>;
+ };
+
/* wg7210 (wifi+bt module) 32k clock buffer */
wg7210_32k: fixed-regulator-wg7210_32k {
compatible = "regulator-fixed";
@@ -522,9 +533,30 @@
/*wp-gpios = <&gpio4 31 GPIO_ACTIVE_HIGH>;*/ /* GPIO_127 */
};
-/* mmc3 is probed using pdata-quirks to pass wl1251 card data */
&mmc3 {
- status = "disabled";
+ vmmc-supply = <&wlan_en>;
+
+ bus-width = <4>;
+ non-removable;
+ ti,non-removable;
+ cap-power-off-card;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc3_pins>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ wlan: wifi@1 {
+ compatible = "ti,wl1251";
+
+ reg = <1>;
+
+ interrupt-parent = <&gpio1>;
+ interrupts = <21 IRQ_TYPE_LEVEL_HIGH>; /* GPIO_21 */
+
+ ti,wl1251-has-eeprom;
+ };
};
/* bluetooth*/
diff --git a/arch/arm/boot/dts/omap3-sbc-t3530.dts b/arch/arm/boot/dts/omap3-sbc-t3530.dts
index ae96002abb3b..24bf3fd86641 100644
--- a/arch/arm/boot/dts/omap3-sbc-t3530.dts
+++ b/arch/arm/boot/dts/omap3-sbc-t3530.dts
@@ -8,7 +8,7 @@
/ {
model = "CompuLab SBC-T3530 with CM-T3530";
- compatible = "compulab,omap3-sbc-t3530", "compulab,omap3-cm-t3530", "ti,omap34xx", "ti,omap3";
+ compatible = "compulab,omap3-sbc-t3530", "compulab,omap3-cm-t3530", "ti,omap3430", "ti,omap34xx", "ti,omap3";
aliases {
display0 = &dvi0;
diff --git a/arch/arm/boot/dts/omap3-sbc-t3730.dts b/arch/arm/boot/dts/omap3-sbc-t3730.dts
index 7de6df16fc17..eb3893b9535e 100644
--- a/arch/arm/boot/dts/omap3-sbc-t3730.dts
+++ b/arch/arm/boot/dts/omap3-sbc-t3730.dts
@@ -8,7 +8,7 @@
/ {
model = "CompuLab SBC-T3730 with CM-T3730";
- compatible = "compulab,omap3-sbc-t3730", "compulab,omap3-cm-t3730", "ti,omap36xx", "ti,omap3";
+ compatible = "compulab,omap3-sbc-t3730", "compulab,omap3-cm-t3730", "ti,omap3630", "ti,omap36xx", "ti,omap3";
aliases {
display0 = &dvi0;
diff --git a/arch/arm/boot/dts/omap3-sniper.dts b/arch/arm/boot/dts/omap3-sniper.dts
index 40a87330e8c3..b6879cdc5c13 100644
--- a/arch/arm/boot/dts/omap3-sniper.dts
+++ b/arch/arm/boot/dts/omap3-sniper.dts
@@ -9,7 +9,7 @@
/ {
model = "LG Optimus Black";
- compatible = "lg,omap3-sniper", "ti,omap36xx", "ti,omap3";
+ compatible = "lg,omap3-sniper", "ti,omap3630", "ti,omap36xx", "ti,omap3";
cpus {
cpu@0 {
diff --git a/arch/arm/boot/dts/omap3-thunder.dts b/arch/arm/boot/dts/omap3-thunder.dts
index 6276e7079b36..64221e3b3477 100644
--- a/arch/arm/boot/dts/omap3-thunder.dts
+++ b/arch/arm/boot/dts/omap3-thunder.dts
@@ -8,7 +8,7 @@
/ {
model = "TI OMAP3 Thunder baseboard with TAO3530 SOM";
- compatible = "technexion,omap3-thunder", "technexion,omap3-tao3530", "ti,omap34xx", "ti,omap3";
+ compatible = "technexion,omap3-thunder", "technexion,omap3-tao3530", "ti,omap3430", "ti,omap34xx", "ti,omap3";
};
&omap3_pmx_core {
diff --git a/arch/arm/boot/dts/omap3-zoom3.dts b/arch/arm/boot/dts/omap3-zoom3.dts
index db3a2fe84e99..d240e39f2151 100644
--- a/arch/arm/boot/dts/omap3-zoom3.dts
+++ b/arch/arm/boot/dts/omap3-zoom3.dts
@@ -9,7 +9,7 @@
/ {
model = "TI Zoom3";
- compatible = "ti,omap3-zoom3", "ti,omap36xx", "ti,omap3";
+ compatible = "ti,omap3-zoom3", "ti,omap3630", "ti,omap36xx", "ti,omap3";
cpus {
cpu@0 {
diff --git a/arch/arm/boot/dts/omap3430-sdp.dts b/arch/arm/boot/dts/omap3430-sdp.dts
index 0abd61108a53..7bfde8aac7ae 100644
--- a/arch/arm/boot/dts/omap3430-sdp.dts
+++ b/arch/arm/boot/dts/omap3430-sdp.dts
@@ -8,7 +8,7 @@
/ {
model = "TI OMAP3430 SDP";
- compatible = "ti,omap3430-sdp", "ti,omap3";
+ compatible = "ti,omap3430-sdp", "ti,omap3430", "ti,omap3";
memory@80000000 {
device_type = "memory";
diff --git a/arch/arm/boot/dts/omap34xx.dtsi b/arch/arm/boot/dts/omap34xx.dtsi
index 7b09cbee8bb8..c4dd9801840d 100644
--- a/arch/arm/boot/dts/omap34xx.dtsi
+++ b/arch/arm/boot/dts/omap34xx.dtsi
@@ -16,19 +16,67 @@
/ {
cpus {
cpu: cpu@0 {
- /* OMAP343x/OMAP35xx variants OPP1-5 */
- operating-points = <
- /* kHz uV */
- 125000 975000
- 250000 1075000
- 500000 1200000
- 550000 1270000
- 600000 1350000
- >;
+ /* OMAP343x/OMAP35xx variants OPP1-6 */
+ operating-points-v2 = <&cpu0_opp_table>;
+
clock-latency = <300000>; /* From legacy driver */
};
};
+ /* see Documentation/devicetree/bindings/opp/opp.txt */
+ cpu0_opp_table: opp-table {
+ compatible = "operating-points-v2-ti-cpu";
+ syscon = <&scm_conf>;
+
+ opp1-125000000 {
+ opp-hz = /bits/ 64 <125000000>;
+ /*
+ * we currently only select the max voltage from table
+ * Table 3-3 of the omap3530 Data sheet (SPRS507F).
+ * Format is: <target min max>
+ */
+ opp-microvolt = <975000 975000 975000>;
+ /*
+ * first value is silicon revision bit mask
+ * second one 720MHz Device Identification bit mask
+ */
+ opp-supported-hw = <0xffffffff 3>;
+ };
+
+ opp2-250000000 {
+ opp-hz = /bits/ 64 <250000000>;
+ opp-microvolt = <1075000 1075000 1075000>;
+ opp-supported-hw = <0xffffffff 3>;
+ opp-suspend;
+ };
+
+ opp3-500000000 {
+ opp-hz = /bits/ 64 <500000000>;
+ opp-microvolt = <1200000 1200000 1200000>;
+ opp-supported-hw = <0xffffffff 3>;
+ };
+
+ opp4-550000000 {
+ opp-hz = /bits/ 64 <550000000>;
+ opp-microvolt = <1275000 1275000 1275000>;
+ opp-supported-hw = <0xffffffff 3>;
+ };
+
+ opp5-600000000 {
+ opp-hz = /bits/ 64 <600000000>;
+ opp-microvolt = <1350000 1350000 1350000>;
+ opp-supported-hw = <0xffffffff 3>;
+ };
+
+ opp6-720000000 {
+ opp-hz = /bits/ 64 <720000000>;
+ opp-microvolt = <1350000 1350000 1350000>;
+ /* only high-speed grade omap3530 devices */
+ opp-supported-hw = <0xffffffff 2>;
+ turbo-mode;
+ };
+ };
+
ocp@68000000 {
omap3_pmx_core2: pinmux@480025d8 {
compatible = "ti,omap3-padconf", "pinctrl-single";
diff --git a/arch/arm/boot/dts/omap36xx-clocks.dtsi b/arch/arm/boot/dts/omap36xx-clocks.dtsi
index e66fc57ec35d..4e9cc9003594 100644
--- a/arch/arm/boot/dts/omap36xx-clocks.dtsi
+++ b/arch/arm/boot/dts/omap36xx-clocks.dtsi
@@ -105,3 +105,7 @@
<&mcbsp4_ick>, <&uart4_fck>;
};
};
+
+&dpll4_m4_ck {
+ ti,max-div = <31>;
+};
diff --git a/arch/arm/boot/dts/omap36xx.dtsi b/arch/arm/boot/dts/omap36xx.dtsi
index 1e552f08f120..c618cb257d00 100644
--- a/arch/arm/boot/dts/omap36xx.dtsi
+++ b/arch/arm/boot/dts/omap36xx.dtsi
@@ -19,16 +19,65 @@
};
cpus {
- /* OMAP3630/OMAP37xx 'standard device' variants OPP50 to OPP130 */
+ /* OMAP3630/OMAP37xx variants OPP50 to OPP130 and OPP1G */
cpu: cpu@0 {
- operating-points = <
- /* kHz uV */
- 300000 1012500
- 600000 1200000
- 800000 1325000
- >;
- clock-latency = <300000>; /* From legacy driver */
+ operating-points-v2 = <&cpu0_opp_table>;
+
+ vbb-supply = <&abb_mpu_iva>;
+ clock-latency = <300000>; /* From omap-cpufreq driver */
+ };
+ };
+
+ /* see Documentation/devicetree/bindings/opp/opp.txt */
+ cpu0_opp_table: opp-table {
+ compatible = "operating-points-v2-ti-cpu";
+ syscon = <&scm_conf>;
+
+ opp50-300000000 {
+ opp-hz = /bits/ 64 <300000000>;
+ /*
+ * we currently only select the max voltage from table
+ * Table 4-19 of the DM3730 Data sheet (SPRS685B)
+ * Format is: cpu0-supply: <target min max>
+ * vbb-supply: <target min max>
+ */
+ opp-microvolt = <1012500 1012500 1012500>,
+ <1012500 1012500 1012500>;
+ /*
+ * first value is silicon revision bit mask
+ * second one is "speed binned" bit mask
+ */
+ opp-supported-hw = <0xffffffff 3>;
+ opp-suspend;
+ };
+
+ opp100-600000000 {
+ opp-hz = /bits/ 64 <600000000>;
+ opp-microvolt = <1200000 1200000 1200000>,
+ <1200000 1200000 1200000>;
+ opp-supported-hw = <0xffffffff 3>;
+ };
+
+ opp130-800000000 {
+ opp-hz = /bits/ 64 <800000000>;
+ opp-microvolt = <1325000 1325000 1325000>,
+ <1325000 1325000 1325000>;
+ opp-supported-hw = <0xffffffff 3>;
};
+
+ opp1g-1000000000 {
+ opp-hz = /bits/ 64 <1000000000>;
+ opp-microvolt = <1375000 1375000 1375000>,
+ <1375000 1375000 1375000>;
+ /* only on am/dm37x with speed-binned bit set */
+ opp-supported-hw = <0xffffffff 2>;
+ turbo-mode;
+ };
+ };
+
+ opp_supply_mpu_iva: opp_supply {
+ compatible = "ti,omap-opp-supply";
+ ti,absolute-max-voltage-uv = <1375000>;
};
ocp@68000000 {
diff --git a/arch/arm/boot/dts/omap3xxx-clocks.dtsi b/arch/arm/boot/dts/omap3xxx-clocks.dtsi
index 685c82a9d03e..0656c32439d2 100644
--- a/arch/arm/boot/dts/omap3xxx-clocks.dtsi
+++ b/arch/arm/boot/dts/omap3xxx-clocks.dtsi
@@ -416,7 +416,7 @@
#clock-cells = <0>;
compatible = "ti,divider-clock";
clocks = <&dpll4_ck>;
- ti,max-div = <32>;
+ ti,max-div = <16>;
reg = <0x0e40>;
ti,index-starts-at-one;
};
diff --git a/arch/arm/configs/axm55xx_defconfig b/arch/arm/configs/axm55xx_defconfig
index 31bfe1647d28..f53634af014b 100644
--- a/arch/arm/configs/axm55xx_defconfig
+++ b/arch/arm/configs/axm55xx_defconfig
@@ -20,7 +20,6 @@ CONFIG_NAMESPACES=y
CONFIG_SCHED_AUTOGROUP=y
CONFIG_RELAY=y
CONFIG_BLK_DEV_INITRD=y
-CONFIG_SYSCTL_SYSCALL=y
CONFIG_EMBEDDED=y
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
diff --git a/arch/arm/configs/keystone_defconfig b/arch/arm/configs/keystone_defconfig
index 3d5f5b501330..f33f5d76365f 100644
--- a/arch/arm/configs/keystone_defconfig
+++ b/arch/arm/configs/keystone_defconfig
@@ -11,7 +11,6 @@ CONFIG_CGROUP_CPUACCT=y
CONFIG_CGROUP_SCHED=y
CONFIG_BLK_CGROUP=y
CONFIG_BLK_DEV_INITRD=y
-CONFIG_SYSCTL_SYSCALL=y
CONFIG_KALLSYMS_ALL=y
# CONFIG_ELF_CORE is not set
# CONFIG_BASE_FULL is not set
diff --git a/arch/arm/configs/lpc32xx_defconfig b/arch/arm/configs/lpc32xx_defconfig
index 09deb57db942..989bcc84e7fb 100644
--- a/arch/arm/configs/lpc32xx_defconfig
+++ b/arch/arm/configs/lpc32xx_defconfig
@@ -9,7 +9,6 @@ CONFIG_SYSFS_DEPRECATED=y
CONFIG_SYSFS_DEPRECATED_V2=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
-CONFIG_SYSCTL_SYSCALL=y
CONFIG_EMBEDDED=y
CONFIG_SLAB=y
# CONFIG_ARCH_MULTI_V7 is not set
diff --git a/arch/arm/configs/moxart_defconfig b/arch/arm/configs/moxart_defconfig
index 9b98761e51c9..45d27190c9c9 100644
--- a/arch/arm/configs/moxart_defconfig
+++ b/arch/arm/configs/moxart_defconfig
@@ -4,7 +4,6 @@ CONFIG_SYSVIPC=y
CONFIG_NO_HZ=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
-CONFIG_SYSCTL_SYSCALL=y
# CONFIG_ELF_CORE is not set
# CONFIG_BASE_FULL is not set
# CONFIG_SIGNALFD is not set
diff --git a/arch/arm/configs/qcom_defconfig b/arch/arm/configs/qcom_defconfig
index 02f1e7b7c8f6..67c306fff376 100644
--- a/arch/arm/configs/qcom_defconfig
+++ b/arch/arm/configs/qcom_defconfig
@@ -5,7 +5,6 @@ CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_CGROUPS=y
CONFIG_BLK_DEV_INITRD=y
-CONFIG_SYSCTL_SYSCALL=y
CONFIG_KALLSYMS_ALL=y
CONFIG_EMBEDDED=y
# CONFIG_SLUB_DEBUG is not set
diff --git a/arch/arm/configs/zx_defconfig b/arch/arm/configs/zx_defconfig
index c4070c19ea6c..4d2ef785ed34 100644
--- a/arch/arm/configs/zx_defconfig
+++ b/arch/arm/configs/zx_defconfig
@@ -11,7 +11,6 @@ CONFIG_RT_GROUP_SCHED=y
CONFIG_NAMESPACES=y
CONFIG_USER_NS=y
CONFIG_BLK_DEV_INITRD=y
-CONFIG_SYSCTL_SYSCALL=y
CONFIG_KALLSYMS_ALL=y
CONFIG_EMBEDDED=y
CONFIG_PERF_EVENTS=y
diff --git a/arch/arm/crypto/chacha-glue.c b/arch/arm/crypto/chacha-glue.c
index 3f0c057aa050..6ebbb2b241d2 100644
--- a/arch/arm/crypto/chacha-glue.c
+++ b/arch/arm/crypto/chacha-glue.c
@@ -286,11 +286,13 @@ static struct skcipher_alg neon_algs[] = {
static int __init chacha_simd_mod_init(void)
{
- int err;
+ int err = 0;
- err = crypto_register_skciphers(arm_algs, ARRAY_SIZE(arm_algs));
- if (err)
- return err;
+ if (IS_REACHABLE(CONFIG_CRYPTO_SKCIPHER)) {
+ err = crypto_register_skciphers(arm_algs, ARRAY_SIZE(arm_algs));
+ if (err)
+ return err;
+ }
if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && (elf_hwcap & HWCAP_NEON)) {
int i;
@@ -310,18 +312,22 @@ static int __init chacha_simd_mod_init(void)
static_branch_enable(&use_neon);
}
- err = crypto_register_skciphers(neon_algs, ARRAY_SIZE(neon_algs));
- if (err)
- crypto_unregister_skciphers(arm_algs, ARRAY_SIZE(arm_algs));
+ if (IS_REACHABLE(CONFIG_CRYPTO_SKCIPHER)) {
+ err = crypto_register_skciphers(neon_algs, ARRAY_SIZE(neon_algs));
+ if (err)
+ crypto_unregister_skciphers(arm_algs, ARRAY_SIZE(arm_algs));
+ }
}
return err;
}
static void __exit chacha_simd_mod_fini(void)
{
- crypto_unregister_skciphers(arm_algs, ARRAY_SIZE(arm_algs));
- if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && (elf_hwcap & HWCAP_NEON))
- crypto_unregister_skciphers(neon_algs, ARRAY_SIZE(neon_algs));
+ if (IS_REACHABLE(CONFIG_CRYPTO_SKCIPHER)) {
+ crypto_unregister_skciphers(arm_algs, ARRAY_SIZE(arm_algs));
+ if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && (elf_hwcap & HWCAP_NEON))
+ crypto_unregister_skciphers(neon_algs, ARRAY_SIZE(neon_algs));
+ }
}
module_init(chacha_simd_mod_init);
diff --git a/arch/arm/crypto/curve25519-glue.c b/arch/arm/crypto/curve25519-glue.c
index 2e9e12d2f642..f3f42cf3b893 100644
--- a/arch/arm/crypto/curve25519-glue.c
+++ b/arch/arm/crypto/curve25519-glue.c
@@ -108,14 +108,15 @@ static int __init mod_init(void)
{
if (elf_hwcap & HWCAP_NEON) {
static_branch_enable(&have_neon);
- return crypto_register_kpp(&curve25519_alg);
+ return IS_REACHABLE(CONFIG_CRYPTO_KPP) ?
+ crypto_register_kpp(&curve25519_alg) : 0;
}
return 0;
}
static void __exit mod_exit(void)
{
- if (elf_hwcap & HWCAP_NEON)
+ if (IS_REACHABLE(CONFIG_CRYPTO_KPP) && elf_hwcap & HWCAP_NEON)
crypto_unregister_kpp(&curve25519_alg);
}
diff --git a/arch/arm/crypto/poly1305-glue.c b/arch/arm/crypto/poly1305-glue.c
index 74a725ac89c9..abe3f2d587dc 100644
--- a/arch/arm/crypto/poly1305-glue.c
+++ b/arch/arm/crypto/poly1305-glue.c
@@ -249,16 +249,19 @@ static int __init arm_poly1305_mod_init(void)
if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) &&
(elf_hwcap & HWCAP_NEON))
static_branch_enable(&have_neon);
- else
+ else if (IS_REACHABLE(CONFIG_CRYPTO_HASH))
/* register only the first entry */
return crypto_register_shash(&arm_poly1305_algs[0]);
- return crypto_register_shashes(arm_poly1305_algs,
- ARRAY_SIZE(arm_poly1305_algs));
+ return IS_REACHABLE(CONFIG_CRYPTO_HASH) ?
+ crypto_register_shashes(arm_poly1305_algs,
+ ARRAY_SIZE(arm_poly1305_algs)) : 0;
}
static void __exit arm_poly1305_mod_exit(void)
{
+ if (!IS_REACHABLE(CONFIG_CRYPTO_HASH))
+ return;
if (!static_branch_likely(&have_neon)) {
crypto_unregister_shash(&arm_poly1305_algs[0]);
return;
diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild
index 68ca86f85eb7..fa579b23b4df 100644
--- a/arch/arm/include/asm/Kbuild
+++ b/arch/arm/include/asm/Kbuild
@@ -12,7 +12,6 @@ generic-y += local.h
generic-y += local64.h
generic-y += mm-arch-hooks.h
generic-y += mmiowb.h
-generic-y += msi.h
generic-y += parport.h
generic-y += preempt.h
generic-y += seccomp.h
diff --git a/arch/arm/include/asm/arch_gicv3.h b/arch/arm/include/asm/arch_gicv3.h
index 0555f14cc8be..fa50bb04f580 100644
--- a/arch/arm/include/asm/arch_gicv3.h
+++ b/arch/arm/include/asm/arch_gicv3.h
@@ -333,7 +333,7 @@ static inline u64 __gic_readq_nonatomic(const volatile void __iomem *addr)
* GITS_VPENDBASER - the Valid bit must be cleared before changing
* anything else.
*/
-static inline void gits_write_vpendbaser(u64 val, void * __iomem addr)
+static inline void gits_write_vpendbaser(u64 val, void __iomem *addr)
{
u32 tmp;
diff --git a/arch/arm/include/asm/dma-direct.h b/arch/arm/include/asm/dma-direct.h
index b67e5fc1fe43..7c3001a6a775 100644
--- a/arch/arm/include/asm/dma-direct.h
+++ b/arch/arm/include/asm/dma-direct.h
@@ -14,23 +14,4 @@ static inline phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t dev_addr)
return __pfn_to_phys(dma_to_pfn(dev, dev_addr)) + offset;
}
-static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
-{
- u64 limit, mask;
-
- if (!dev->dma_mask)
- return 0;
-
- mask = *dev->dma_mask;
-
- limit = (mask + 1) & ~mask;
- if (limit && size > limit)
- return 0;
-
- if ((addr | (addr + size - 1)) & ~mask)
- return 0;
-
- return 1;
-}
-
#endif /* ASM_ARM_DMA_DIRECT_H */
diff --git a/arch/arm/include/asm/ftrace.h b/arch/arm/include/asm/ftrace.h
index 18b0197f2384..48ec1d0337da 100644
--- a/arch/arm/include/asm/ftrace.h
+++ b/arch/arm/include/asm/ftrace.h
@@ -11,7 +11,6 @@
#define MCOUNT_INSN_SIZE 4 /* sizeof mcount call */
#ifndef __ASSEMBLY__
-extern void mcount(void);
extern void __gnu_mcount_nc(void);
#ifdef CONFIG_DYNAMIC_FTRACE
@@ -23,9 +22,6 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr)
/* With Thumb-2, the recorded addresses have the lsb set */
return addr & ~1;
}
-
-extern void ftrace_caller_old(void);
-extern void ftrace_call_old(void);
#endif
#endif
diff --git a/arch/arm/include/asm/hw_breakpoint.h b/arch/arm/include/asm/hw_breakpoint.h
index ac54c06764e6..62358d3ca0a8 100644
--- a/arch/arm/include/asm/hw_breakpoint.h
+++ b/arch/arm/include/asm/hw_breakpoint.h
@@ -53,6 +53,9 @@ static inline void decode_ctrl_reg(u32 reg,
#define ARM_DEBUG_ARCH_V7_MM 4
#define ARM_DEBUG_ARCH_V7_1 5
#define ARM_DEBUG_ARCH_V8 6
+#define ARM_DEBUG_ARCH_V8_1 7
+#define ARM_DEBUG_ARCH_V8_2 8
+#define ARM_DEBUG_ARCH_V8_4 9
/* Breakpoint */
#define ARM_BREAKPOINT_EXECUTE 0
diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h
index 7a0596fcb2e7..aefdabdbeb84 100644
--- a/arch/arm/include/asm/io.h
+++ b/arch/arm/include/asm/io.h
@@ -392,7 +392,6 @@ static inline void memcpy_toio(volatile void __iomem *to, const void *from,
*/
void __iomem *ioremap(resource_size_t res_cookie, size_t size);
#define ioremap ioremap
-#define ioremap_nocache ioremap
/*
* Do not use ioremap_cache for mapping memory. Use memremap instead.
@@ -400,12 +399,6 @@ void __iomem *ioremap(resource_size_t res_cookie, size_t size);
void __iomem *ioremap_cache(resource_size_t res_cookie, size_t size);
#define ioremap_cache ioremap_cache
-/*
- * Do not use ioremap_cached in new code. Provided for the benefit of
- * the pxa2xx-flash MTD driver only.
- */
-void __iomem *ioremap_cached(resource_size_t res_cookie, size_t size);
-
void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size);
#define ioremap_wc ioremap_wc
#define ioremap_wt ioremap_wc
diff --git a/arch/arm/include/asm/pci.h b/arch/arm/include/asm/pci.h
index 0abd389cf0ec..68e6f25784a4 100644
--- a/arch/arm/include/asm/pci.h
+++ b/arch/arm/include/asm/pci.h
@@ -27,5 +27,7 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
return channel ? 15 : 14;
}
+extern void pcibios_report_status(unsigned int status_mask, int warn);
+
#endif /* __KERNEL__ */
#endif
diff --git a/arch/arm/include/asm/vdso/gettimeofday.h b/arch/arm/include/asm/vdso/gettimeofday.h
new file mode 100644
index 000000000000..5b879ae7afc1
--- /dev/null
+++ b/arch/arm/include/asm/vdso/gettimeofday.h
@@ -0,0 +1,94 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 ARM Limited
+ */
+#ifndef __ASM_VDSO_GETTIMEOFDAY_H
+#define __ASM_VDSO_GETTIMEOFDAY_H
+
+#ifndef __ASSEMBLY__
+
+#include <asm/barrier.h>
+#include <asm/cp15.h>
+#include <asm/unistd.h>
+#include <uapi/linux/time.h>
+
+#define VDSO_HAS_CLOCK_GETRES 1
+
+extern struct vdso_data *__get_datapage(void);
+
+static __always_inline int gettimeofday_fallback(
+ struct __kernel_old_timeval *_tv,
+ struct timezone *_tz)
+{
+ register struct timezone *tz asm("r1") = _tz;
+ register struct __kernel_old_timeval *tv asm("r0") = _tv;
+ register long ret asm ("r0");
+ register long nr asm("r7") = __NR_gettimeofday;
+
+ asm volatile(
+ " swi #0\n"
+ : "=r" (ret)
+ : "r" (tv), "r" (tz), "r" (nr)
+ : "memory");
+
+ return ret;
+}
+
+static __always_inline long clock_gettime_fallback(
+ clockid_t _clkid,
+ struct __kernel_timespec *_ts)
+{
+ register struct __kernel_timespec *ts asm("r1") = _ts;
+ register clockid_t clkid asm("r0") = _clkid;
+ register long ret asm ("r0");
+ register long nr asm("r7") = __NR_clock_gettime64;
+
+ asm volatile(
+ " swi #0\n"
+ : "=r" (ret)
+ : "r" (clkid), "r" (ts), "r" (nr)
+ : "memory");
+
+ return ret;
+}
+
+static __always_inline int clock_getres_fallback(
+ clockid_t _clkid,
+ struct __kernel_timespec *_ts)
+{
+ register struct __kernel_timespec *ts asm("r1") = _ts;
+ register clockid_t clkid asm("r0") = _clkid;
+ register long ret asm ("r0");
+ register long nr asm("r7") = __NR_clock_getres_time64;
+
+ asm volatile(
+ " swi #0\n"
+ : "=r" (ret)
+ : "r" (clkid), "r" (ts), "r" (nr)
+ : "memory");
+
+ return ret;
+}
+
+static __always_inline u64 __arch_get_hw_counter(int clock_mode)
+{
+#ifdef CONFIG_ARM_ARCH_TIMER
+ u64 cycle_now;
+
+ isb();
+ cycle_now = read_sysreg(CNTVCT);
+
+ return cycle_now;
+#else
+ return -EINVAL; /* use fallback */
+#endif
+}
+
+static __always_inline const struct vdso_data *__arch_get_vdso_data(void)
+{
+ return __get_datapage();
+}
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* __ASM_VDSO_GETTIMEOFDAY_H */
diff --git a/arch/arm/include/asm/vdso/vsyscall.h b/arch/arm/include/asm/vdso/vsyscall.h
new file mode 100644
index 000000000000..c4166f317071
--- /dev/null
+++ b/arch/arm/include/asm/vdso/vsyscall.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_VDSO_VSYSCALL_H
+#define __ASM_VDSO_VSYSCALL_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/timekeeper_internal.h>
+#include <vdso/datapage.h>
+#include <asm/cacheflush.h>
+
+extern struct vdso_data *vdso_data;
+extern bool cntvct_ok;
+
+static __always_inline
+bool tk_is_cntvct(const struct timekeeper *tk)
+{
+ if (!IS_ENABLED(CONFIG_ARM_ARCH_TIMER))
+ return false;
+
+ if (!tk->tkr_mono.clock->archdata.vdso_direct)
+ return false;
+
+ return true;
+}
+
+/*
+ * Update the vDSO data page to keep in sync with kernel timekeeping.
+ */
+static __always_inline
+struct vdso_data *__arm_get_k_vdso_data(void)
+{
+ return vdso_data;
+}
+#define __arch_get_k_vdso_data __arm_get_k_vdso_data
+
+static __always_inline
+int __arm_update_vdso_data(void)
+{
+ return !cntvct_ok;
+}
+#define __arch_update_vdso_data __arm_update_vdso_data
+
+static __always_inline
+int __arm_get_clock_mode(struct timekeeper *tk)
+{
+ u32 __tk_is_cntvct = tk_is_cntvct(tk);
+
+ return __tk_is_cntvct;
+}
+#define __arch_get_clock_mode __arm_get_clock_mode
+
+static __always_inline
+int __arm_use_vsyscall(struct vdso_data *vdata)
+{
+ return vdata[CS_HRES_COARSE].clock_mode;
+}
+#define __arch_use_vsyscall __arm_use_vsyscall
+
+static __always_inline
+void __arm_sync_vdso_data(struct vdso_data *vdata)
+{
+ flush_dcache_page(virt_to_page(vdata));
+}
+#define __arch_sync_vdso_data __arm_sync_vdso_data
+
+/* The asm-generic header needs to be included after the definitions above */
+#include <asm-generic/vdso/vsyscall.h>
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* __ASM_VDSO_VSYSCALL_H */
diff --git a/arch/arm/include/asm/vdso_datapage.h b/arch/arm/include/asm/vdso_datapage.h
index 7910abf89b1c..bef68f59928d 100644
--- a/arch/arm/include/asm/vdso_datapage.h
+++ b/arch/arm/include/asm/vdso_datapage.h
@@ -11,35 +11,12 @@
#ifndef __ASSEMBLY__
+#include <vdso/datapage.h>
#include <asm/page.h>
-/* Try to be cache-friendly on systems that don't implement the
- * generic timer: fit the unconditionally updated fields in the first
- * 32 bytes.
- */
-struct vdso_data {
- u32 seq_count; /* sequence count - odd during updates */
- u16 tk_is_cntvct; /* fall back to syscall if false */
- u16 cs_shift; /* clocksource shift */
- u32 xtime_coarse_sec; /* coarse time */
- u32 xtime_coarse_nsec;
-
- u32 wtm_clock_sec; /* wall to monotonic offset */
- u32 wtm_clock_nsec;
- u32 xtime_clock_sec; /* CLOCK_REALTIME - seconds */
- u32 cs_mult; /* clocksource multiplier */
-
- u64 cs_cycle_last; /* last cycle value */
- u64 cs_mask; /* clocksource mask */
-
- u64 xtime_clock_snsec; /* CLOCK_REALTIME sub-ns base */
- u32 tz_minuteswest; /* timezone info for gettimeofday(2) */
- u32 tz_dsttime;
-};
-
union vdso_data_store {
- struct vdso_data data;
- u8 page[PAGE_SIZE];
+ struct vdso_data data[CS_BASES];
+ u8 page[PAGE_SIZE];
};
#endif /* !__ASSEMBLY__ */
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index 8cad59465af3..8b679e2ca3c3 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -17,10 +17,14 @@ CFLAGS_REMOVE_return_address.o = -pg
# Object file lists.
obj-y := elf.o entry-common.o irq.o opcodes.o \
- process.o ptrace.o reboot.o return_address.o \
+ process.o ptrace.o reboot.o \
setup.o signal.o sigreturn_codes.o \
stacktrace.o sys_arm.o time.o traps.o
+ifneq ($(CONFIG_ARM_UNWIND),y)
+obj-$(CONFIG_FRAME_POINTER) += return_address.o
+endif
+
obj-$(CONFIG_ATAGS) += atags_parse.o
obj-$(CONFIG_ATAGS_PROC) += atags_proc.o
obj-$(CONFIG_DEPRECATED_PARAM_STRUCT) += atags_compat.o
diff --git a/arch/arm/kernel/arch_timer.c b/arch/arm/kernel/arch_timer.c
index c125582de2e7..b5e217907686 100644
--- a/arch/arm/kernel/arch_timer.c
+++ b/arch/arm/kernel/arch_timer.c
@@ -10,6 +10,7 @@
#include <linux/errno.h>
#include <asm/delay.h>
+#include <asm/arch_timer.h>
#include <clocksource/arm_arch_timer.h>
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
index b0c195e3a06d..02ca7adf5375 100644
--- a/arch/arm/kernel/hw_breakpoint.c
+++ b/arch/arm/kernel/hw_breakpoint.c
@@ -246,6 +246,9 @@ static int enable_monitor_mode(void)
case ARM_DEBUG_ARCH_V7_ECP14:
case ARM_DEBUG_ARCH_V7_1:
case ARM_DEBUG_ARCH_V8:
+ case ARM_DEBUG_ARCH_V8_1:
+ case ARM_DEBUG_ARCH_V8_2:
+ case ARM_DEBUG_ARCH_V8_4:
ARM_DBG_WRITE(c0, c2, 2, (dscr | ARM_DSCR_MDBGEN));
isb();
break;
diff --git a/arch/arm/kernel/module-plts.c b/arch/arm/kernel/module-plts.c
index b647741c0ab0..6e626abaefc5 100644
--- a/arch/arm/kernel/module-plts.c
+++ b/arch/arm/kernel/module-plts.c
@@ -7,6 +7,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/sort.h>
+#include <linux/moduleloader.h>
#include <asm/cache.h>
#include <asm/opcodes.h>
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 9485acc520a4..cea1c27c29cb 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -36,6 +36,8 @@
#include <asm/tls.h>
#include <asm/vdso.h>
+#include "signal.h"
+
#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK)
#include <linux/stackprotector.h>
unsigned long __stack_chk_guard __read_mostly;
diff --git a/arch/arm/kernel/psci_smp.c b/arch/arm/kernel/psci_smp.c
index aba6b2ab7a58..d4392e177484 100644
--- a/arch/arm/kernel/psci_smp.c
+++ b/arch/arm/kernel/psci_smp.c
@@ -51,7 +51,7 @@ static int psci_boot_secondary(unsigned int cpu, struct task_struct *idle)
}
#ifdef CONFIG_HOTPLUG_CPU
-int psci_cpu_disable(unsigned int cpu)
+static int psci_cpu_disable(unsigned int cpu)
{
/* Fail early if we don't have CPU_OFF support */
if (!psci_ops.cpu_off)
@@ -64,7 +64,7 @@ int psci_cpu_disable(unsigned int cpu)
return 0;
}
-void psci_cpu_die(unsigned int cpu)
+static void psci_cpu_die(unsigned int cpu)
{
u32 state = PSCI_POWER_STATE_TYPE_POWER_DOWN <<
PSCI_0_2_POWER_STATE_TYPE_SHIFT;
@@ -76,7 +76,7 @@ void psci_cpu_die(unsigned int cpu)
panic("psci: cpu %d failed to shutdown\n", cpu);
}
-int psci_cpu_kill(unsigned int cpu)
+static int psci_cpu_kill(unsigned int cpu)
{
int err, i;
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
index 324352787aea..b606cded90cd 100644
--- a/arch/arm/kernel/ptrace.c
+++ b/arch/arm/kernel/ptrace.c
@@ -923,7 +923,7 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs, int scno)
/* Do seccomp after ptrace; syscall may have changed. */
#ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER
- if (secure_computing(NULL) == -1)
+ if (secure_computing() == -1)
return -1;
#else
/* XXX: remove this once OABI gets fixed */
diff --git a/arch/arm/kernel/return_address.c b/arch/arm/kernel/return_address.c
index b0d2f1fe891d..7b42ac010fdf 100644
--- a/arch/arm/kernel/return_address.c
+++ b/arch/arm/kernel/return_address.c
@@ -7,8 +7,6 @@
*/
#include <linux/export.h>
#include <linux/ftrace.h>
-
-#if defined(CONFIG_FRAME_POINTER) && !defined(CONFIG_ARM_UNWIND)
#include <linux/sched.h>
#include <asm/stacktrace.h>
@@ -53,6 +51,4 @@ void *return_address(unsigned int level)
return NULL;
}
-#endif /* if defined(CONFIG_FRAME_POINTER) && !defined(CONFIG_ARM_UNWIND) */
-
EXPORT_SYMBOL_GPL(return_address);
diff --git a/arch/arm/kernel/signal.h b/arch/arm/kernel/signal.h
index b7b838b05229..cb076d30ab38 100644
--- a/arch/arm/kernel/signal.h
+++ b/arch/arm/kernel/signal.h
@@ -9,3 +9,5 @@ struct rt_sigframe {
struct siginfo info;
struct sigframe sig;
};
+
+extern struct page *get_signal_page(void);
diff --git a/arch/arm/kernel/tcm.c b/arch/arm/kernel/tcm.c
index 9d9b1db73932..d3a85f01b328 100644
--- a/arch/arm/kernel/tcm.c
+++ b/arch/arm/kernel/tcm.c
@@ -18,6 +18,7 @@
#include <asm/memory.h>
#include <asm/system_info.h>
#include <asm/traps.h>
+#include <asm/tcm.h>
#define TCMTR_FORMAT_MASK 0xe0000000U
@@ -30,8 +31,8 @@ extern char __itcm_start, __sitcm_text, __eitcm_text;
extern char __dtcm_start, __sdtcm_data, __edtcm_data;
/* These will be increased as we run */
-u32 dtcm_end = DTCM_OFFSET;
-u32 itcm_end = ITCM_OFFSET;
+static u32 dtcm_end = DTCM_OFFSET;
+static u32 itcm_end = ITCM_OFFSET;
/*
* TCM memory resources
diff --git a/arch/arm/kernel/time.c b/arch/arm/kernel/time.c
index b996b2cf0703..dddc7ebf4db4 100644
--- a/arch/arm/kernel/time.c
+++ b/arch/arm/kernel/time.c
@@ -9,6 +9,7 @@
* reading the RTC at bootup, etc...
*/
#include <linux/clk-provider.h>
+#include <linux/clockchips.h>
#include <linux/clocksource.h>
#include <linux/errno.h>
#include <linux/export.h>
@@ -107,5 +108,6 @@ void __init time_init(void)
of_clk_init(NULL);
#endif
timer_probe();
+ tick_setup_hrtimer_broadcast();
}
}
diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c
index 5b9faba03afb..3a4dde081c13 100644
--- a/arch/arm/kernel/topology.c
+++ b/arch/arm/kernel/topology.c
@@ -95,7 +95,7 @@ static void __init parse_dt_topology(void)
GFP_NOWAIT);
for_each_possible_cpu(cpu) {
- const u32 *rate;
+ const __be32 *rate;
int len;
/* too early to use cpu->of_node */
diff --git a/arch/arm/kernel/vdso.c b/arch/arm/kernel/vdso.c
index 9bf16c93ee6a..c89ac1b9d28b 100644
--- a/arch/arm/kernel/vdso.c
+++ b/arch/arm/kernel/vdso.c
@@ -23,6 +23,8 @@
#include <asm/vdso.h>
#include <asm/vdso_datapage.h>
#include <clocksource/arm_arch_timer.h>
+#include <vdso/helpers.h>
+#include <vdso/vsyscall.h>
#define MAX_SYMNAME 64
@@ -37,7 +39,7 @@ unsigned int vdso_total_pages __ro_after_init;
* The VDSO data page.
*/
static union vdso_data_store vdso_data_store __page_aligned_data;
-static struct vdso_data *vdso_data = &vdso_data_store.data;
+struct vdso_data *vdso_data = vdso_data_store.data;
static struct page *vdso_data_page __ro_after_init;
static const struct vm_special_mapping vdso_data_mapping = {
@@ -77,7 +79,7 @@ struct elfinfo {
/* Cached result of boot-time check for whether the arch timer exists,
* and if so, whether the virtual counter is useable.
*/
-static bool cntvct_ok __ro_after_init;
+bool cntvct_ok __ro_after_init;
static bool __init cntvct_functional(void)
{
@@ -262,84 +264,3 @@ void arm_install_vdso(struct mm_struct *mm, unsigned long addr)
mm->context.vdso = addr;
}
-static void vdso_write_begin(struct vdso_data *vdata)
-{
- ++vdso_data->seq_count;
- smp_wmb(); /* Pairs with smp_rmb in vdso_read_retry */
-}
-
-static void vdso_write_end(struct vdso_data *vdata)
-{
- smp_wmb(); /* Pairs with smp_rmb in vdso_read_begin */
- ++vdso_data->seq_count;
-}
-
-static bool tk_is_cntvct(const struct timekeeper *tk)
-{
- if (!IS_ENABLED(CONFIG_ARM_ARCH_TIMER))
- return false;
-
- if (!tk->tkr_mono.clock->archdata.vdso_direct)
- return false;
-
- return true;
-}
-
-/**
- * update_vsyscall - update the vdso data page
- *
- * Increment the sequence counter, making it odd, indicating to
- * userspace that an update is in progress. Update the fields used
- * for coarse clocks and, if the architected system timer is in use,
- * the fields used for high precision clocks. Increment the sequence
- * counter again, making it even, indicating to userspace that the
- * update is finished.
- *
- * Userspace is expected to sample seq_count before reading any other
- * fields from the data page. If seq_count is odd, userspace is
- * expected to wait until it becomes even. After copying data from
- * the page, userspace must sample seq_count again; if it has changed
- * from its previous value, userspace must retry the whole sequence.
- *
- * Calls to update_vsyscall are serialized by the timekeeping core.
- */
-void update_vsyscall(struct timekeeper *tk)
-{
- struct timespec64 *wtm = &tk->wall_to_monotonic;
-
- if (!cntvct_ok) {
- /* The entry points have been zeroed, so there is no
- * point in updating the data page.
- */
- return;
- }
-
- vdso_write_begin(vdso_data);
-
- vdso_data->tk_is_cntvct = tk_is_cntvct(tk);
- vdso_data->xtime_coarse_sec = tk->xtime_sec;
- vdso_data->xtime_coarse_nsec = (u32)(tk->tkr_mono.xtime_nsec >>
- tk->tkr_mono.shift);
- vdso_data->wtm_clock_sec = wtm->tv_sec;
- vdso_data->wtm_clock_nsec = wtm->tv_nsec;
-
- if (vdso_data->tk_is_cntvct) {
- vdso_data->cs_cycle_last = tk->tkr_mono.cycle_last;
- vdso_data->xtime_clock_sec = tk->xtime_sec;
- vdso_data->xtime_clock_snsec = tk->tkr_mono.xtime_nsec;
- vdso_data->cs_mult = tk->tkr_mono.mult;
- vdso_data->cs_shift = tk->tkr_mono.shift;
- vdso_data->cs_mask = tk->tkr_mono.mask;
- }
-
- vdso_write_end(vdso_data);
-
- flush_dcache_page(virt_to_page(vdso_data));
-}
-
-void update_vsyscall_tz(void)
-{
- vdso_data->tz_minuteswest = sys_tz.tz_minuteswest;
- vdso_data->tz_dsttime = sys_tz.tz_dsttime;
- flush_dcache_page(virt_to_page(vdso_data));
-}
diff --git a/arch/arm/mach-footbridge/dc21285.c b/arch/arm/mach-footbridge/dc21285.c
index 8b81a17f675d..416462e3f5d6 100644
--- a/arch/arm/mach-footbridge/dc21285.c
+++ b/arch/arm/mach-footbridge/dc21285.c
@@ -31,7 +31,6 @@
PCI_STATUS_PARITY) << 16)
extern int setup_arm_irq(int, struct irqaction *);
-extern void pcibios_report_status(u_int status_mask, int warn);
static unsigned long
dc21285_base_address(struct pci_bus *bus, unsigned int devfn)
diff --git a/arch/arm/mach-imx/cpuidle-imx6q.c b/arch/arm/mach-imx/cpuidle-imx6q.c
index 39a7d9393641..24dd5bbe60e4 100644
--- a/arch/arm/mach-imx/cpuidle-imx6q.c
+++ b/arch/arm/mach-imx/cpuidle-imx6q.c
@@ -62,13 +62,13 @@ static struct cpuidle_driver imx6q_cpuidle_driver = {
*/
void imx6q_cpuidle_fec_irqs_used(void)
{
- imx6q_cpuidle_driver.states[1].disabled = true;
+ cpuidle_driver_state_disabled(&imx6q_cpuidle_driver, 1, true);
}
EXPORT_SYMBOL_GPL(imx6q_cpuidle_fec_irqs_used);
void imx6q_cpuidle_fec_irqs_unused(void)
{
- imx6q_cpuidle_driver.states[1].disabled = false;
+ cpuidle_driver_state_disabled(&imx6q_cpuidle_driver, 1, false);
}
EXPORT_SYMBOL_GPL(imx6q_cpuidle_fec_irqs_unused);
diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile
index 8f208197988f..1e1e86d17fc5 100644
--- a/arch/arm/mach-omap2/Makefile
+++ b/arch/arm/mach-omap2/Makefile
@@ -216,9 +216,6 @@ obj-$(CONFIG_MACH_NOKIA_N8X0) += board-n8x0.o
# Platform specific device init code
-omap-hsmmc-$(CONFIG_MMC_OMAP_HS) := hsmmc.o
-obj-y += $(omap-hsmmc-m) $(omap-hsmmc-y)
-
obj-y += omap_phy_internal.o
obj-$(CONFIG_MACH_OMAP2_TUSB6010) += usb-tusb6010.o
diff --git a/arch/arm/mach-omap2/common.h b/arch/arm/mach-omap2/common.h
index 6316da3623b3..223b37c48389 100644
--- a/arch/arm/mach-omap2/common.h
+++ b/arch/arm/mach-omap2/common.h
@@ -352,7 +352,6 @@ void omap_pcs_legacy_init(int irq, void (*rearm)(void));
struct omap_sdrc_params;
extern void omap_sdrc_init(struct omap_sdrc_params *sdrc_cs0,
struct omap_sdrc_params *sdrc_cs1);
-struct omap2_hsmmc_info;
extern void omap_reserve(void);
struct omap_hwmod;
diff --git a/arch/arm/mach-omap2/hsmmc.c b/arch/arm/mach-omap2/hsmmc.c
deleted file mode 100644
index 63423ea6a240..000000000000
--- a/arch/arm/mach-omap2/hsmmc.c
+++ /dev/null
@@ -1,171 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * linux/arch/arm/mach-omap2/hsmmc.c
- *
- * Copyright (C) 2007-2008 Texas Instruments
- * Copyright (C) 2008 Nokia Corporation
- * Author: Texas Instruments
- */
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/delay.h>
-#include <linux/mmc/host.h>
-#include <linux/platform_data/hsmmc-omap.h>
-
-#include "soc.h"
-#include "omap_device.h"
-
-#include "hsmmc.h"
-#include "control.h"
-
-#if IS_ENABLED(CONFIG_MMC_OMAP_HS)
-
-static u16 control_pbias_offset;
-static u16 control_devconf1_offset;
-
-#define HSMMC_NAME_LEN 9
-
-static int __init omap_hsmmc_pdata_init(struct omap2_hsmmc_info *c,
- struct omap_hsmmc_platform_data *mmc)
-{
- char *hc_name;
-
- hc_name = kzalloc(HSMMC_NAME_LEN + 1, GFP_KERNEL);
- if (!hc_name)
- return -ENOMEM;
-
- snprintf(hc_name, (HSMMC_NAME_LEN + 1), "mmc%islot%i", c->mmc, 1);
- mmc->name = hc_name;
- mmc->caps = c->caps;
- mmc->reg_offset = 0;
-
- return 0;
-}
-
-static int omap_hsmmc_done;
-
-void omap_hsmmc_late_init(struct omap2_hsmmc_info *c)
-{
- struct platform_device *pdev;
- int res;
-
- if (omap_hsmmc_done)
- return;
-
- omap_hsmmc_done = 1;
-
- for (; c->mmc; c++) {
- pdev = c->pdev;
- if (!pdev)
- continue;
- res = omap_device_register(pdev);
- if (res)
- pr_err("Could not late init MMC\n");
- }
-}
-
-#define MAX_OMAP_MMC_HWMOD_NAME_LEN 16
-
-static void __init omap_hsmmc_init_one(struct omap2_hsmmc_info *hsmmcinfo,
- int ctrl_nr)
-{
- struct omap_hwmod *oh;
- struct omap_hwmod *ohs[1];
- struct omap_device *od;
- struct platform_device *pdev;
- char oh_name[MAX_OMAP_MMC_HWMOD_NAME_LEN];
- struct omap_hsmmc_platform_data *mmc_data;
- struct omap_hsmmc_dev_attr *mmc_dev_attr;
- char *name;
- int res;
-
- mmc_data = kzalloc(sizeof(*mmc_data), GFP_KERNEL);
- if (!mmc_data)
- return;
-
- res = omap_hsmmc_pdata_init(hsmmcinfo, mmc_data);
- if (res < 0)
- goto free_mmc;
-
- name = "omap_hsmmc";
- res = snprintf(oh_name, MAX_OMAP_MMC_HWMOD_NAME_LEN,
- "mmc%d", ctrl_nr);
- WARN(res >= MAX_OMAP_MMC_HWMOD_NAME_LEN,
- "String buffer overflow in MMC%d device setup\n", ctrl_nr);
-
- oh = omap_hwmod_lookup(oh_name);
- if (!oh) {
- pr_err("Could not look up %s\n", oh_name);
- goto free_name;
- }
- ohs[0] = oh;
- if (oh->dev_attr != NULL) {
- mmc_dev_attr = oh->dev_attr;
- mmc_data->controller_flags = mmc_dev_attr->flags;
- }
-
- pdev = platform_device_alloc(name, ctrl_nr - 1);
- if (!pdev) {
- pr_err("Could not allocate pdev for %s\n", name);
- goto free_name;
- }
- dev_set_name(&pdev->dev, "%s.%d", pdev->name, pdev->id);
-
- od = omap_device_alloc(pdev, ohs, 1);
- if (IS_ERR(od)) {
- pr_err("Could not allocate od for %s\n", name);
- goto put_pdev;
- }
-
- res = platform_device_add_data(pdev, mmc_data,
- sizeof(struct omap_hsmmc_platform_data));
- if (res) {
- pr_err("Could not add pdata for %s\n", name);
- goto put_pdev;
- }
-
- hsmmcinfo->pdev = pdev;
-
- res = omap_device_register(pdev);
- if (res) {
- pr_err("Could not register od for %s\n", name);
- goto free_od;
- }
-
- goto free_mmc;
-
-free_od:
- omap_device_delete(od);
-
-put_pdev:
- platform_device_put(pdev);
-
-free_name:
- kfree(mmc_data->name);
-
-free_mmc:
- kfree(mmc_data);
-}
-
-void __init omap_hsmmc_init(struct omap2_hsmmc_info *controllers)
-{
- if (omap_hsmmc_done)
- return;
-
- omap_hsmmc_done = 1;
-
- if (cpu_is_omap2430()) {
- control_pbias_offset = OMAP243X_CONTROL_PBIAS_LITE;
- control_devconf1_offset = OMAP243X_CONTROL_DEVCONF1;
- } else {
- control_pbias_offset = OMAP343X_CONTROL_PBIAS_LITE;
- control_devconf1_offset = OMAP343X_CONTROL_DEVCONF1;
- }
-
- for (; controllers->mmc; controllers++)
- omap_hsmmc_init_one(controllers, controllers->mmc);
-
-}
-
-#endif
diff --git a/arch/arm/mach-omap2/hsmmc.h b/arch/arm/mach-omap2/hsmmc.h
deleted file mode 100644
index 76c5ed2afa72..000000000000
--- a/arch/arm/mach-omap2/hsmmc.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * MMC definitions for OMAP2
- */
-
-struct mmc_card;
-
-struct omap2_hsmmc_info {
- u8 mmc; /* controller 1/2/3 */
- u32 caps; /* 4/8 wires and any additional host
- * capabilities OR'd (ref. linux/mmc/host.h) */
- struct platform_device *pdev; /* mmc controller instance */
- /* init some special card */
- void (*init_card)(struct mmc_card *card);
-};
-
-#if IS_ENABLED(CONFIG_MMC_OMAP_HS)
-
-void omap_hsmmc_init(struct omap2_hsmmc_info *);
-void omap_hsmmc_late_init(struct omap2_hsmmc_info *);
-
-#else
-
-static inline void omap_hsmmc_init(struct omap2_hsmmc_info *info)
-{
-}
-
-static inline void omap_hsmmc_late_init(struct omap2_hsmmc_info *info)
-{
-}
-
-#endif
diff --git a/arch/arm/mach-omap2/pdata-quirks.c b/arch/arm/mach-omap2/pdata-quirks.c
index de8c54034a5a..c47a2afc91e5 100644
--- a/arch/arm/mach-omap2/pdata-quirks.c
+++ b/arch/arm/mach-omap2/pdata-quirks.c
@@ -7,7 +7,6 @@
#include <linux/clk.h>
#include <linux/davinci_emac.h>
#include <linux/gpio.h>
-#include <linux/gpio/machine.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/of_platform.h>
@@ -33,7 +32,6 @@
#include "omap_device.h"
#include "omap-secure.h"
#include "soc.h"
-#include "hsmmc.h"
static struct omap_hsmmc_platform_data __maybe_unused mmc_pdata[2];
@@ -300,118 +298,15 @@ static void __init omap3_logicpd_torpedo_init(void)
}
/* omap3pandora legacy devices */
-#define PANDORA_WIFI_IRQ_GPIO 21
-#define PANDORA_WIFI_NRESET_GPIO 23
static struct platform_device pandora_backlight = {
.name = "pandora-backlight",
.id = -1,
};
-static struct regulator_consumer_supply pandora_vmmc3_supply[] = {
- REGULATOR_SUPPLY("vmmc", "omap_hsmmc.2"),
-};
-
-static struct regulator_init_data pandora_vmmc3 = {
- .constraints = {
- .valid_ops_mask = REGULATOR_CHANGE_STATUS,
- },
- .num_consumer_supplies = ARRAY_SIZE(pandora_vmmc3_supply),
- .consumer_supplies = pandora_vmmc3_supply,
-};
-
-static struct fixed_voltage_config pandora_vwlan = {
- .supply_name = "vwlan",
- .microvolts = 1800000, /* 1.8V */
- .startup_delay = 50000, /* 50ms */
- .init_data = &pandora_vmmc3,
-};
-
-static struct platform_device pandora_vwlan_device = {
- .name = "reg-fixed-voltage",
- .id = 1,
- .dev = {
- .platform_data = &pandora_vwlan,
- },
-};
-
-static struct gpiod_lookup_table pandora_vwlan_gpiod_table = {
- .dev_id = "reg-fixed-voltage.1",
- .table = {
- /*
- * As this is a low GPIO number it should be at the first
- * GPIO bank.
- */
- GPIO_LOOKUP("gpio-0-31", PANDORA_WIFI_NRESET_GPIO,
- NULL, GPIO_ACTIVE_HIGH),
- { },
- },
-};
-
-static void pandora_wl1251_init_card(struct mmc_card *card)
-{
- /*
- * We have TI wl1251 attached to MMC3. Pass this information to
- * SDIO core because it can't be probed by normal methods.
- */
- if (card->type == MMC_TYPE_SDIO || card->type == MMC_TYPE_SD_COMBO) {
- card->quirks |= MMC_QUIRK_NONSTD_SDIO;
- card->cccr.wide_bus = 1;
- card->cis.vendor = 0x104c;
- card->cis.device = 0x9066;
- card->cis.blksize = 512;
- card->cis.max_dtr = 24000000;
- card->ocr = 0x80;
- }
-}
-
-static struct omap2_hsmmc_info pandora_mmc3[] = {
- {
- .mmc = 3,
- .caps = MMC_CAP_4_BIT_DATA | MMC_CAP_POWER_OFF_CARD,
- .init_card = pandora_wl1251_init_card,
- },
- {} /* Terminator */
-};
-
-static void __init pandora_wl1251_init(void)
-{
- struct wl1251_platform_data pandora_wl1251_pdata;
- int ret;
-
- memset(&pandora_wl1251_pdata, 0, sizeof(pandora_wl1251_pdata));
-
- pandora_wl1251_pdata.power_gpio = -1;
-
- ret = gpio_request_one(PANDORA_WIFI_IRQ_GPIO, GPIOF_IN, "wl1251 irq");
- if (ret < 0)
- goto fail;
-
- pandora_wl1251_pdata.irq = gpio_to_irq(PANDORA_WIFI_IRQ_GPIO);
- if (pandora_wl1251_pdata.irq < 0)
- goto fail_irq;
-
- pandora_wl1251_pdata.use_eeprom = true;
- ret = wl1251_set_platform_data(&pandora_wl1251_pdata);
- if (ret < 0)
- goto fail_irq;
-
- return;
-
-fail_irq:
- gpio_free(PANDORA_WIFI_IRQ_GPIO);
-fail:
- pr_err("wl1251 board initialisation failed\n");
-}
-
static void __init omap3_pandora_legacy_init(void)
{
platform_device_register(&pandora_backlight);
- gpiod_add_lookup_table(&pandora_vwlan_gpiod_table);
- platform_device_register(&pandora_vwlan_device);
- omap_hsmmc_init(pandora_mmc3);
- omap_hsmmc_late_init(pandora_mmc3);
- pandora_wl1251_init();
}
#endif /* CONFIG_ARCH_OMAP3 */
diff --git a/arch/arm/mach-pxa/include/mach/tosa.h b/arch/arm/mach-pxa/include/mach/tosa.h
index a499ed17931e..8bfaca3a8b64 100644
--- a/arch/arm/mach-pxa/include/mach/tosa.h
+++ b/arch/arm/mach-pxa/include/mach/tosa.h
@@ -73,18 +73,6 @@
#define TOSA_GPIO_BAT1_TH_ON (TOSA_TC6393XB_GPIO_BASE + 15)
/*
- * Timing Generator
- */
-#define TG_PNLCTL 0x00
-#define TG_TPOSCTL 0x01
-#define TG_DUTYCTL 0x02
-#define TG_GPOSR 0x03
-#define TG_GPODR1 0x04
-#define TG_GPODR2 0x05
-#define TG_PINICTL 0x06
-#define TG_HPOSCTL 0x07
-
-/*
* PXA GPIOs
*/
#define TOSA_GPIO_POWERON (0)
@@ -192,7 +180,4 @@
#define TOSA_KEY_MAIL KEY_MAIL
#endif
-struct spi_device;
-extern int tosa_bl_enable(struct spi_device *spi, int enable);
-
#endif /* _ASM_ARCH_TOSA_H_ */
diff --git a/arch/arm/mach-pxa/tosa.c b/arch/arm/mach-pxa/tosa.c
index f537ff1c3ba7..4e13893edeb9 100644
--- a/arch/arm/mach-pxa/tosa.c
+++ b/arch/arm/mach-pxa/tosa.c
@@ -813,6 +813,26 @@ static struct pxa2xx_spi_controller pxa_ssp_master_info = {
.num_chipselect = 1,
};
+static struct gpiod_lookup_table tosa_lcd_gpio_table = {
+ .dev_id = "spi2.0",
+ .table = {
+ GPIO_LOOKUP("tc6393xb",
+ TOSA_GPIO_TG_ON - TOSA_TC6393XB_GPIO_BASE,
+ "tg #pwr", GPIO_ACTIVE_HIGH),
+ { },
+ },
+};
+
+static struct gpiod_lookup_table tosa_lcd_bl_gpio_table = {
+ .dev_id = "i2c-tosa-bl",
+ .table = {
+ GPIO_LOOKUP("tc6393xb",
+ TOSA_GPIO_BL_C20MA - TOSA_TC6393XB_GPIO_BASE,
+ "backlight", GPIO_ACTIVE_HIGH),
+ { },
+ },
+};
+
static struct spi_board_info spi_board_info[] __initdata = {
{
.modalias = "tosa-lcd",
@@ -923,6 +943,8 @@ static void __init tosa_init(void)
platform_scoop_config = &tosa_pcmcia_config;
pxa2xx_set_spi_info(2, &pxa_ssp_master_info);
+ gpiod_add_lookup_table(&tosa_lcd_gpio_table);
+ gpiod_add_lookup_table(&tosa_lcd_bl_gpio_table);
spi_register_board_info(spi_board_info, ARRAY_SIZE(spi_board_info));
clk_add_alias("CLK_CK3P6MI", tc6393xb_device.name, "GPIO11_CLK", NULL);
diff --git a/arch/arm/mach-tegra/cpuidle-tegra20.c b/arch/arm/mach-tegra/cpuidle-tegra20.c
index 2447427cb4a8..69f3fa270fbe 100644
--- a/arch/arm/mach-tegra/cpuidle-tegra20.c
+++ b/arch/arm/mach-tegra/cpuidle-tegra20.c
@@ -203,7 +203,7 @@ void tegra20_cpuidle_pcie_irqs_in_use(void)
{
pr_info_once(
"Disabling cpuidle LP2 state, since PCIe IRQs are in use\n");
- tegra_idle_driver.states[1].disabled = true;
+ cpuidle_driver_state_disabled(&tegra_idle_driver, 1, true);
}
int __init tegra20_cpuidle_init(void)
diff --git a/arch/arm/mach-ux500/cpu-db8500.c b/arch/arm/mach-ux500/cpu-db8500.c
index 3875027ef8fc..e929aaa744c0 100644
--- a/arch/arm/mach-ux500/cpu-db8500.c
+++ b/arch/arm/mach-ux500/cpu-db8500.c
@@ -84,6 +84,7 @@ static void __init ux500_init_irq(void)
struct resource r;
irqchip_init();
+ prcmu_early_init();
np = of_find_compatible_node(NULL, NULL, "stericsson,db8500-prcmu");
of_address_to_resource(np, 0, &r);
of_node_put(np);
@@ -91,7 +92,6 @@ static void __init ux500_init_irq(void)
pr_err("could not find PRCMU base resource\n");
return;
}
- prcmu_early_init(r.start, r.end-r.start);
ux500_pm_init(r.start, r.end-r.start);
/* Unlock before init */
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 0ab3a86b1f52..f112dde735de 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -896,7 +896,10 @@ config VDSO
bool "Enable VDSO for acceleration of some system calls"
depends on AEABI && MMU && CPU_V7
default y if ARM_ARCH_TIMER
+ select HAVE_GENERIC_VDSO
select GENERIC_TIME_VSYSCALL
+ select GENERIC_VDSO_32
+ select GENERIC_GETTIMEOFDAY
help
Place in the process address space an ELF shared object
providing fast implementations of gettimeofday and
diff --git a/arch/arm/mm/dma-mapping-nommu.c b/arch/arm/mm/dma-mapping-nommu.c
index db9247898300..287ef898a55e 100644
--- a/arch/arm/mm/dma-mapping-nommu.c
+++ b/arch/arm/mm/dma-mapping-nommu.c
@@ -35,7 +35,7 @@ static void *arm_nommu_dma_alloc(struct device *dev, size_t size,
unsigned long attrs)
{
- void *ret = dma_alloc_from_global_coherent(size, dma_handle);
+ void *ret = dma_alloc_from_global_coherent(dev, size, dma_handle);
/*
* dma_alloc_from_global_coherent() may fail because:
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 7d042d5c43e3..1df6eb42f22e 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -1559,7 +1559,7 @@ static int arm_coherent_iommu_mmap_attrs(struct device *dev,
* free a page as defined by the above mapping.
* Must not be called with IRQs disabled.
*/
-void __arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
+static void __arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t handle, unsigned long attrs, int coherent_flag)
{
struct page **pages;
@@ -1583,13 +1583,14 @@ void __arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
__iommu_free_buffer(dev, pages, size, attrs);
}
-void arm_iommu_free_attrs(struct device *dev, size_t size,
- void *cpu_addr, dma_addr_t handle, unsigned long attrs)
+static void arm_iommu_free_attrs(struct device *dev, size_t size,
+ void *cpu_addr, dma_addr_t handle,
+ unsigned long attrs)
{
__arm_iommu_free_attrs(dev, size, cpu_addr, handle, attrs, NORMAL);
}
-void arm_coherent_iommu_free_attrs(struct device *dev, size_t size,
+static void arm_coherent_iommu_free_attrs(struct device *dev, size_t size,
void *cpu_addr, dma_addr_t handle, unsigned long attrs)
{
__arm_iommu_free_attrs(dev, size, cpu_addr, handle, attrs, COHERENT);
@@ -1713,7 +1714,7 @@ bad_mapping:
* possible) and tagged with the appropriate dma address and length. They are
* obtained via sg_dma_{address,length}.
*/
-int arm_coherent_iommu_map_sg(struct device *dev, struct scatterlist *sg,
+static int arm_coherent_iommu_map_sg(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir, unsigned long attrs)
{
return __iommu_map_sg(dev, sg, nents, dir, attrs, true);
@@ -1731,7 +1732,7 @@ int arm_coherent_iommu_map_sg(struct device *dev, struct scatterlist *sg,
* tagged with the appropriate dma address and length. They are obtained via
* sg_dma_{address,length}.
*/
-int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg,
+static int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir, unsigned long attrs)
{
return __iommu_map_sg(dev, sg, nents, dir, attrs, false);
@@ -1764,8 +1765,8 @@ static void __iommu_unmap_sg(struct device *dev, struct scatterlist *sg,
* Unmap a set of streaming mode DMA translations. Again, CPU access
* rules concerning calls here are the same as for dma_unmap_single().
*/
-void arm_coherent_iommu_unmap_sg(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction dir,
+static void arm_coherent_iommu_unmap_sg(struct device *dev,
+ struct scatterlist *sg, int nents, enum dma_data_direction dir,
unsigned long attrs)
{
__iommu_unmap_sg(dev, sg, nents, dir, attrs, true);
@@ -1781,9 +1782,10 @@ void arm_coherent_iommu_unmap_sg(struct device *dev, struct scatterlist *sg,
* Unmap a set of streaming mode DMA translations. Again, CPU access
* rules concerning calls here are the same as for dma_unmap_single().
*/
-void arm_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
- enum dma_data_direction dir,
- unsigned long attrs)
+static void arm_iommu_unmap_sg(struct device *dev,
+ struct scatterlist *sg, int nents,
+ enum dma_data_direction dir,
+ unsigned long attrs)
{
__iommu_unmap_sg(dev, sg, nents, dir, attrs, false);
}
@@ -1795,7 +1797,8 @@ void arm_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
* @nents: number of buffers to map (returned from dma_map_sg)
* @dir: DMA transfer direction (same as was passed to dma_map_sg)
*/
-void arm_iommu_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
+static void arm_iommu_sync_sg_for_cpu(struct device *dev,
+ struct scatterlist *sg,
int nents, enum dma_data_direction dir)
{
struct scatterlist *s;
@@ -1813,7 +1816,8 @@ void arm_iommu_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
* @nents: number of buffers to map (returned from dma_map_sg)
* @dir: DMA transfer direction (same as was passed to dma_map_sg)
*/
-void arm_iommu_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
+static void arm_iommu_sync_sg_for_device(struct device *dev,
+ struct scatterlist *sg,
int nents, enum dma_data_direction dir)
{
struct scatterlist *s;
@@ -2015,7 +2019,7 @@ static void arm_iommu_sync_single_for_device(struct device *dev,
__dma_page_cpu_to_dev(page, offset, size, dir);
}
-const struct dma_map_ops iommu_ops = {
+static const struct dma_map_ops iommu_ops = {
.alloc = arm_iommu_alloc_attrs,
.free = arm_iommu_free_attrs,
.mmap = arm_iommu_mmap_attrs,
@@ -2037,7 +2041,7 @@ const struct dma_map_ops iommu_ops = {
.dma_supported = arm_dma_supported,
};
-const struct dma_map_ops iommu_coherent_ops = {
+static const struct dma_map_ops iommu_coherent_ops = {
.alloc = arm_coherent_iommu_alloc_attrs,
.free = arm_coherent_iommu_free_attrs,
.mmap = arm_coherent_iommu_mmap_attrs,
@@ -2332,26 +2336,20 @@ void arch_teardown_dma_ops(struct device *dev)
}
#ifdef CONFIG_SWIOTLB
-void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
- size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir)
{
__dma_page_cpu_to_dev(phys_to_page(paddr), paddr & (PAGE_SIZE - 1),
size, dir);
}
-void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
- size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir)
{
__dma_page_dev_to_cpu(phys_to_page(paddr), paddr & (PAGE_SIZE - 1),
size, dir);
}
-long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr,
- dma_addr_t dma_addr)
-{
- return dma_to_pfn(dev, dma_addr);
-}
-
void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
gfp_t gfp, unsigned long attrs)
{
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index b4be3baa83d4..3ef204137e73 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -30,6 +30,7 @@
#include <asm/prom.h>
#include <asm/sections.h>
#include <asm/setup.h>
+#include <asm/set_memory.h>
#include <asm/system_info.h>
#include <asm/tlb.h>
#include <asm/fixmap.h>
@@ -180,7 +181,7 @@ int pfn_valid(unsigned long pfn)
if (__phys_to_pfn(addr) != pfn)
return 0;
- return memblock_is_map_memory(__pfn_to_phys(pfn));
+ return memblock_is_map_memory(addr);
}
EXPORT_SYMBOL(pfn_valid);
#endif
@@ -593,8 +594,8 @@ static inline bool arch_has_strict_perms(void)
return !!(get_cr() & CR_XP);
}
-void set_section_perms(struct section_perm *perms, int n, bool set,
- struct mm_struct *mm)
+static void set_section_perms(struct section_perm *perms, int n, bool set,
+ struct mm_struct *mm)
{
size_t i;
unsigned long addr;
diff --git a/arch/arm/mm/iomap.c b/arch/arm/mm/iomap.c
index 091ddc56827e..415d0a454237 100644
--- a/arch/arm/mm/iomap.c
+++ b/arch/arm/mm/iomap.c
@@ -10,6 +10,8 @@
#include <linux/ioport.h>
#include <linux/io.h>
+#include <asm/vga.h>
+
unsigned long vga_base;
EXPORT_SYMBOL(vga_base);
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index d42b93316183..72286f9a4d30 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -382,15 +382,11 @@ void __iomem *ioremap(resource_size_t res_cookie, size_t size)
EXPORT_SYMBOL(ioremap);
void __iomem *ioremap_cache(resource_size_t res_cookie, size_t size)
- __alias(ioremap_cached);
-
-void __iomem *ioremap_cached(resource_size_t res_cookie, size_t size)
{
return arch_ioremap_caller(res_cookie, size, MT_DEVICE_CACHED,
__builtin_return_address(0));
}
EXPORT_SYMBOL(ioremap_cache);
-EXPORT_SYMBOL(ioremap_cached);
void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size)
{
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 48c2888297dd..5d0d0f86e790 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -259,7 +259,7 @@ static struct mem_type mem_types[] __ro_after_init = {
.prot_sect = PROT_SECT_DEVICE,
.domain = DOMAIN_IO,
},
- [MT_DEVICE_CACHED] = { /* ioremap_cached */
+ [MT_DEVICE_CACHED] = { /* ioremap_cache */
.prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_CACHED,
.prot_l1 = PMD_TYPE_TABLE,
.prot_sect = PROT_SECT_DEVICE | PMD_SECT_WB,
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c
index 24ecf8d30a1e..8b3d7191e2b8 100644
--- a/arch/arm/mm/nommu.c
+++ b/arch/arm/mm/nommu.c
@@ -206,15 +206,11 @@ void __iomem *ioremap(resource_size_t res_cookie, size_t size)
EXPORT_SYMBOL(ioremap);
void __iomem *ioremap_cache(resource_size_t res_cookie, size_t size)
- __alias(ioremap_cached);
-
-void __iomem *ioremap_cached(resource_size_t res_cookie, size_t size)
{
return __arm_ioremap_caller(res_cookie, size, MT_DEVICE_CACHED,
__builtin_return_address(0));
}
EXPORT_SYMBOL(ioremap_cache);
-EXPORT_SYMBOL(ioremap_cached);
void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size)
{
diff --git a/arch/arm/mm/proc-arm1020.S b/arch/arm/mm/proc-arm1020.S
index 4fa5371bc662..2785da387c91 100644
--- a/arch/arm/mm/proc-arm1020.S
+++ b/arch/arm/mm/proc-arm1020.S
@@ -491,7 +491,7 @@ cpu_arm1020_name:
.align
- .section ".proc.info.init", #alloc
+ .section ".proc.info.init", "a"
.type __arm1020_proc_info,#object
__arm1020_proc_info:
diff --git a/arch/arm/mm/proc-arm1020e.S b/arch/arm/mm/proc-arm1020e.S
index 5d8a8339e09a..e9ea237ed785 100644
--- a/arch/arm/mm/proc-arm1020e.S
+++ b/arch/arm/mm/proc-arm1020e.S
@@ -449,7 +449,7 @@ arm1020e_crval:
.align
- .section ".proc.info.init", #alloc
+ .section ".proc.info.init", "a"
.type __arm1020e_proc_info,#object
__arm1020e_proc_info:
diff --git a/arch/arm/mm/proc-arm1022.S b/arch/arm/mm/proc-arm1022.S
index b3dd95c345e4..920c279e7879 100644
--- a/arch/arm/mm/proc-arm1022.S
+++ b/arch/arm/mm/proc-arm1022.S
@@ -443,7 +443,7 @@ arm1022_crval:
.align
- .section ".proc.info.init", #alloc
+ .section ".proc.info.init", "a"
.type __arm1022_proc_info,#object
__arm1022_proc_info:
diff --git a/arch/arm/mm/proc-arm1026.S b/arch/arm/mm/proc-arm1026.S
index ac5afde12f35..0bdf25a95b10 100644
--- a/arch/arm/mm/proc-arm1026.S
+++ b/arch/arm/mm/proc-arm1026.S
@@ -138,7 +138,7 @@ ENTRY(arm1026_flush_kern_cache_all)
mov ip, #0
__flush_whole_cache:
#ifndef CONFIG_CPU_DCACHE_DISABLE
-1: mrc p15, 0, r15, c7, c14, 3 @ test, clean, invalidate
+1: mrc p15, 0, APSR_nzcv, c7, c14, 3 @ test, clean, invalidate
bne 1b
#endif
tst r2, #VM_EXEC
@@ -363,7 +363,7 @@ ENTRY(cpu_arm1026_switch_mm)
#ifdef CONFIG_MMU
mov r1, #0
#ifndef CONFIG_CPU_DCACHE_DISABLE
-1: mrc p15, 0, r15, c7, c14, 3 @ test, clean, invalidate
+1: mrc p15, 0, APSR_nzcv, c7, c14, 3 @ test, clean, invalidate
bne 1b
#endif
#ifndef CONFIG_CPU_ICACHE_DISABLE
@@ -437,7 +437,7 @@ arm1026_crval:
string cpu_arm1026_name, "ARM1026EJ-S"
.align
- .section ".proc.info.init", #alloc
+ .section ".proc.info.init", "a"
.type __arm1026_proc_info,#object
__arm1026_proc_info:
diff --git a/arch/arm/mm/proc-arm720.S b/arch/arm/mm/proc-arm720.S
index c99d24363f32..39361e196d61 100644
--- a/arch/arm/mm/proc-arm720.S
+++ b/arch/arm/mm/proc-arm720.S
@@ -172,7 +172,7 @@ arm720_crval:
* See <asm/procinfo.h> for a definition of this structure.
*/
- .section ".proc.info.init", #alloc
+ .section ".proc.info.init", "a"
.macro arm720_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req, cpu_flush:req
.type __\name\()_proc_info,#object
diff --git a/arch/arm/mm/proc-arm740.S b/arch/arm/mm/proc-arm740.S
index 1b4a3838393f..1a94bbf6e53f 100644
--- a/arch/arm/mm/proc-arm740.S
+++ b/arch/arm/mm/proc-arm740.S
@@ -128,7 +128,7 @@ __arm740_setup:
.align
- .section ".proc.info.init", #alloc
+ .section ".proc.info.init", "a"
.type __arm740_proc_info,#object
__arm740_proc_info:
.long 0x41807400
diff --git a/arch/arm/mm/proc-arm7tdmi.S b/arch/arm/mm/proc-arm7tdmi.S
index 17a4687065c7..52b66cf0259e 100644
--- a/arch/arm/mm/proc-arm7tdmi.S
+++ b/arch/arm/mm/proc-arm7tdmi.S
@@ -72,7 +72,7 @@ __arm7tdmi_setup:
.align
- .section ".proc.info.init", #alloc
+ .section ".proc.info.init", "a"
.macro arm7tdmi_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req, \
extra_hwcaps=0
diff --git a/arch/arm/mm/proc-arm920.S b/arch/arm/mm/proc-arm920.S
index 298c76b47749..31ac8acc34dc 100644
--- a/arch/arm/mm/proc-arm920.S
+++ b/arch/arm/mm/proc-arm920.S
@@ -434,7 +434,7 @@ arm920_crval:
.align
- .section ".proc.info.init", #alloc
+ .section ".proc.info.init", "a"
.type __arm920_proc_info,#object
__arm920_proc_info:
diff --git a/arch/arm/mm/proc-arm922.S b/arch/arm/mm/proc-arm922.S
index 824be3a0bc23..ca2c7ca8af21 100644
--- a/arch/arm/mm/proc-arm922.S
+++ b/arch/arm/mm/proc-arm922.S
@@ -412,7 +412,7 @@ arm922_crval:
.align
- .section ".proc.info.init", #alloc
+ .section ".proc.info.init", "a"
.type __arm922_proc_info,#object
__arm922_proc_info:
diff --git a/arch/arm/mm/proc-arm925.S b/arch/arm/mm/proc-arm925.S
index d40cff8f102c..a381a0c9f109 100644
--- a/arch/arm/mm/proc-arm925.S
+++ b/arch/arm/mm/proc-arm925.S
@@ -477,7 +477,7 @@ arm925_crval:
.align
- .section ".proc.info.init", #alloc
+ .section ".proc.info.init", "a"
.macro arm925_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req, cache
.type __\name\()_proc_info,#object
diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S
index f3cd08f353f0..1ba253c2bce1 100644
--- a/arch/arm/mm/proc-arm926.S
+++ b/arch/arm/mm/proc-arm926.S
@@ -131,7 +131,7 @@ __flush_whole_cache:
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache
#else
-1: mrc p15, 0, r15, c7, c14, 3 @ test,clean,invalidate
+1: mrc p15, 0, APSR_nzcv, c7, c14, 3 @ test,clean,invalidate
bne 1b
#endif
tst r2, #VM_EXEC
@@ -358,7 +358,7 @@ ENTRY(cpu_arm926_switch_mm)
mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache
#else
@ && 'Clean & Invalidate whole DCache'
-1: mrc p15, 0, r15, c7, c14, 3 @ test,clean,invalidate
+1: mrc p15, 0, APSR_nzcv, c7, c14, 3 @ test,clean,invalidate
bne 1b
#endif
mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache
@@ -460,7 +460,7 @@ arm926_crval:
.align
- .section ".proc.info.init", #alloc
+ .section ".proc.info.init", "a"
.type __arm926_proc_info,#object
__arm926_proc_info:
diff --git a/arch/arm/mm/proc-arm940.S b/arch/arm/mm/proc-arm940.S
index 1c26d991386d..4b8a00220cc9 100644
--- a/arch/arm/mm/proc-arm940.S
+++ b/arch/arm/mm/proc-arm940.S
@@ -340,7 +340,7 @@ __arm940_setup:
.align
- .section ".proc.info.init", #alloc
+ .section ".proc.info.init", "a"
.type __arm940_proc_info,#object
__arm940_proc_info:
diff --git a/arch/arm/mm/proc-arm946.S b/arch/arm/mm/proc-arm946.S
index 2dc1c75a4fd4..555becf9c758 100644
--- a/arch/arm/mm/proc-arm946.S
+++ b/arch/arm/mm/proc-arm946.S
@@ -395,7 +395,7 @@ __arm946_setup:
.align
- .section ".proc.info.init", #alloc
+ .section ".proc.info.init", "a"
.type __arm946_proc_info,#object
__arm946_proc_info:
.long 0x41009460
diff --git a/arch/arm/mm/proc-arm9tdmi.S b/arch/arm/mm/proc-arm9tdmi.S
index 913c06e590af..ef517530130b 100644
--- a/arch/arm/mm/proc-arm9tdmi.S
+++ b/arch/arm/mm/proc-arm9tdmi.S
@@ -66,7 +66,7 @@ __arm9tdmi_setup:
.align
- .section ".proc.info.init", #alloc
+ .section ".proc.info.init", "a"
.macro arm9tdmi_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req
.type __\name\()_proc_info, #object
diff --git a/arch/arm/mm/proc-fa526.S b/arch/arm/mm/proc-fa526.S
index 8120b6f4dbb8..dddf833fe000 100644
--- a/arch/arm/mm/proc-fa526.S
+++ b/arch/arm/mm/proc-fa526.S
@@ -185,7 +185,7 @@ fa526_cr1_set:
.align
- .section ".proc.info.init", #alloc
+ .section ".proc.info.init", "a"
.type __fa526_proc_info,#object
__fa526_proc_info:
diff --git a/arch/arm/mm/proc-feroceon.S b/arch/arm/mm/proc-feroceon.S
index bb6dc34d42a3..b12b76bc8d30 100644
--- a/arch/arm/mm/proc-feroceon.S
+++ b/arch/arm/mm/proc-feroceon.S
@@ -571,7 +571,7 @@ feroceon_crval:
.align
- .section ".proc.info.init", #alloc
+ .section ".proc.info.init", "a"
.macro feroceon_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req, cache:req
.type __\name\()_proc_info,#object
diff --git a/arch/arm/mm/proc-mohawk.S b/arch/arm/mm/proc-mohawk.S
index f08308578885..d47d6c5cee63 100644
--- a/arch/arm/mm/proc-mohawk.S
+++ b/arch/arm/mm/proc-mohawk.S
@@ -416,7 +416,7 @@ mohawk_crval:
.align
- .section ".proc.info.init", #alloc
+ .section ".proc.info.init", "a"
.type __88sv331x_proc_info,#object
__88sv331x_proc_info:
diff --git a/arch/arm/mm/proc-sa110.S b/arch/arm/mm/proc-sa110.S
index d5bc5d702563..baba503ba816 100644
--- a/arch/arm/mm/proc-sa110.S
+++ b/arch/arm/mm/proc-sa110.S
@@ -196,7 +196,7 @@ sa110_crval:
.align
- .section ".proc.info.init", #alloc
+ .section ".proc.info.init", "a"
.type __sa110_proc_info,#object
__sa110_proc_info:
diff --git a/arch/arm/mm/proc-sa1100.S b/arch/arm/mm/proc-sa1100.S
index be7b611c76c7..75ebacc8e4e5 100644
--- a/arch/arm/mm/proc-sa1100.S
+++ b/arch/arm/mm/proc-sa1100.S
@@ -239,7 +239,7 @@ sa1100_crval:
.align
- .section ".proc.info.init", #alloc
+ .section ".proc.info.init", "a"
.macro sa1100_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req
.type __\name\()_proc_info,#object
diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S
index c1c85eb3484f..1dd0d5ca27da 100644
--- a/arch/arm/mm/proc-v6.S
+++ b/arch/arm/mm/proc-v6.S
@@ -261,7 +261,7 @@ v6_crval:
string cpu_elf_name, "v6"
.align
- .section ".proc.info.init", #alloc
+ .section ".proc.info.init", "a"
/*
* Match any ARMv6 processor core.
diff --git a/arch/arm/mm/proc-v7-bugs.c b/arch/arm/mm/proc-v7-bugs.c
index 7c90b4c615a5..c0fbfca5da8b 100644
--- a/arch/arm/mm/proc-v7-bugs.c
+++ b/arch/arm/mm/proc-v7-bugs.c
@@ -64,6 +64,9 @@ static void cpu_v7_spectre_init(void)
break;
#ifdef CONFIG_ARM_PSCI
+ case ARM_CPU_PART_BRAHMA_B53:
+ /* Requires no workaround */
+ break;
default:
/* Other ARM CPUs require no workaround */
if (read_cpuid_implementor() == ARM_CPU_IMP_ARM)
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index c4e8006a1a8c..48e0ef6f0dcc 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -644,7 +644,7 @@ __v7_setup_stack:
string cpu_elf_name, "v7"
.align
- .section ".proc.info.init", #alloc
+ .section ".proc.info.init", "a"
/*
* Standard v7 proc info content
diff --git a/arch/arm/mm/proc-v7m.S b/arch/arm/mm/proc-v7m.S
index 1a49d503eafc..84459c1d31b8 100644
--- a/arch/arm/mm/proc-v7m.S
+++ b/arch/arm/mm/proc-v7m.S
@@ -93,7 +93,7 @@ ENTRY(cpu_cm7_proc_fin)
ret lr
ENDPROC(cpu_cm7_proc_fin)
- .section ".init.text", #alloc, #execinstr
+ .section ".init.text", "ax"
__v7m_cm7_setup:
mov r8, #(V7M_SCB_CCR_DC | V7M_SCB_CCR_IC| V7M_SCB_CCR_BP)
@@ -177,7 +177,7 @@ ENDPROC(__v7m_setup)
string cpu_elf_name "v7m"
string cpu_v7m_name "ARMv7-M"
- .section ".proc.info.init", #alloc
+ .section ".proc.info.init", "a"
.macro __v7m_proc name, initfunc, cache_fns = nop_cache_fns, hwcaps = 0, proc_fns = v7m_processor_functions
.long 0 /* proc_info_list.__cpu_mm_mmu_flags */
diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S
index 1ac0fbbe9f12..42eaecc43cfe 100644
--- a/arch/arm/mm/proc-xsc3.S
+++ b/arch/arm/mm/proc-xsc3.S
@@ -496,7 +496,7 @@ xsc3_crval:
.align
- .section ".proc.info.init", #alloc
+ .section ".proc.info.init", "a"
.macro xsc3_proc_info name:req, cpu_val:req, cpu_mask:req
.type __\name\()_proc_info,#object
diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S
index bdb2b7749b03..18ac5a1f8922 100644
--- a/arch/arm/mm/proc-xscale.S
+++ b/arch/arm/mm/proc-xscale.S
@@ -610,7 +610,7 @@ xscale_crval:
.align
- .section ".proc.info.init", #alloc
+ .section ".proc.info.init", "a"
.macro xscale_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req, cache
.type __\name\()_proc_info,#object
diff --git a/arch/arm/vdso/Makefile b/arch/arm/vdso/Makefile
index 87b7769214e0..0fda344beb0b 100644
--- a/arch/arm/vdso/Makefile
+++ b/arch/arm/vdso/Makefile
@@ -1,7 +1,13 @@
# SPDX-License-Identifier: GPL-2.0
+
+# Absolute relocation type $(ARCH_REL_TYPE_ABS) needs to be defined before
+# the inclusion of generic Makefile.
+ARCH_REL_TYPE_ABS := R_ARM_JUMP_SLOT|R_ARM_GLOB_DAT|R_ARM_ABS32
+include $(srctree)/lib/vdso/Makefile
+
hostprogs-y := vdsomunge
-obj-vdso := vgettimeofday.o datapage.o
+obj-vdso := vgettimeofday.o datapage.o note.o
# Build rules
targets := $(obj-vdso) vdso.so vdso.so.dbg vdso.so.raw vdso.lds
@@ -24,7 +30,11 @@ CFLAGS_REMOVE_vdso.o = -pg
# Force -O2 to avoid libgcc dependencies
CFLAGS_REMOVE_vgettimeofday.o = -pg -Os
+ifeq ($(c-gettimeofday-y),)
CFLAGS_vgettimeofday.o = -O2
+else
+CFLAGS_vgettimeofday.o = -O2 -include $(c-gettimeofday-y)
+endif
# Disable gcov profiling for VDSO code
GCOV_PROFILE := n
@@ -37,7 +47,7 @@ $(obj)/vdso.o : $(obj)/vdso.so
# Link rule for the .so file
$(obj)/vdso.so.raw: $(obj)/vdso.lds $(obj-vdso) FORCE
- $(call if_changed,ld)
+ $(call if_changed,vdsold_and_vdso_check)
$(obj)/vdso.so.dbg: $(obj)/vdso.so.raw $(obj)/vdsomunge FORCE
$(call if_changed,vdsomunge)
@@ -47,6 +57,10 @@ $(obj)/%.so: OBJCOPYFLAGS := -S
$(obj)/%.so: $(obj)/%.so.dbg FORCE
$(call if_changed,objcopy)
+# Actual build commands
+quiet_cmd_vdsold_and_vdso_check = LD $@
+ cmd_vdsold_and_vdso_check = $(cmd_ld); $(cmd_vdso_check)
+
quiet_cmd_vdsomunge = MUNGE $@
cmd_vdsomunge = $(objtree)/$(obj)/vdsomunge $< $@
diff --git a/arch/s390/kernel/vdso32/note.S b/arch/arm/vdso/note.c
index db19d0680a0a..eff5bf9efb8b 100644
--- a/arch/s390/kernel/vdso32/note.S
+++ b/arch/arm/vdso/note.c
@@ -1,5 +1,7 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+// SPDX-License-Identifier: GPL-2.0
/*
+ * Copyright (C) 2012-2018 ARM Limited
+ *
* This supplies .note.* sections to go into the PT_NOTE inside the vDSO text.
* Here we can supply some information useful to userland.
*/
@@ -7,7 +9,7 @@
#include <linux/uts.h>
#include <linux/version.h>
#include <linux/elfnote.h>
+#include <linux/build-salt.h>
-ELFNOTE_START(Linux, 0, "a")
- .long LINUX_VERSION_CODE
-ELFNOTE_END
+ELFNOTE32("Linux", 0, LINUX_VERSION_CODE);
+BUILD_SALT;
diff --git a/arch/arm/vdso/vdso.lds.S b/arch/arm/vdso/vdso.lds.S
index 73cf205b003e..165d1d2eb76b 100644
--- a/arch/arm/vdso/vdso.lds.S
+++ b/arch/arm/vdso/vdso.lds.S
@@ -71,6 +71,8 @@ VERSION
global:
__vdso_clock_gettime;
__vdso_gettimeofday;
+ __vdso_clock_getres;
+ __vdso_clock_gettime64;
local: *;
};
}
diff --git a/arch/arm/vdso/vgettimeofday.c b/arch/arm/vdso/vgettimeofday.c
index d1fdbb12760a..1976c6f325a4 100644
--- a/arch/arm/vdso/vgettimeofday.c
+++ b/arch/arm/vdso/vgettimeofday.c
@@ -1,259 +1,34 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
+ * ARM userspace implementations of gettimeofday() and similar.
+ *
* Copyright 2015 Mentor Graphics Corporation.
*/
-
-#include <linux/compiler.h>
-#include <linux/hrtimer.h>
#include <linux/time.h>
-#include <asm/barrier.h>
-#include <asm/bug.h>
-#include <asm/cp15.h>
-#include <asm/page.h>
-#include <asm/unistd.h>
-#include <asm/vdso_datapage.h>
-
-#ifndef CONFIG_AEABI
-#error This code depends on AEABI system call conventions
-#endif
-
-extern struct vdso_data *__get_datapage(void);
-
-static notrace u32 __vdso_read_begin(const struct vdso_data *vdata)
-{
- u32 seq;
-repeat:
- seq = READ_ONCE(vdata->seq_count);
- if (seq & 1) {
- cpu_relax();
- goto repeat;
- }
- return seq;
-}
-
-static notrace u32 vdso_read_begin(const struct vdso_data *vdata)
-{
- u32 seq;
-
- seq = __vdso_read_begin(vdata);
-
- smp_rmb(); /* Pairs with smp_wmb in vdso_write_end */
- return seq;
-}
+#include <linux/types.h>
-static notrace int vdso_read_retry(const struct vdso_data *vdata, u32 start)
+int __vdso_clock_gettime(clockid_t clock,
+ struct old_timespec32 *ts)
{
- smp_rmb(); /* Pairs with smp_wmb in vdso_write_begin */
- return vdata->seq_count != start;
+ return __cvdso_clock_gettime32(clock, ts);
}
-static notrace long clock_gettime_fallback(clockid_t _clkid,
- struct timespec *_ts)
+int __vdso_clock_gettime64(clockid_t clock,
+ struct __kernel_timespec *ts)
{
- register struct timespec *ts asm("r1") = _ts;
- register clockid_t clkid asm("r0") = _clkid;
- register long ret asm ("r0");
- register long nr asm("r7") = __NR_clock_gettime;
-
- asm volatile(
- " swi #0\n"
- : "=r" (ret)
- : "r" (clkid), "r" (ts), "r" (nr)
- : "memory");
-
- return ret;
+ return __cvdso_clock_gettime(clock, ts);
}
-static notrace int do_realtime_coarse(struct timespec *ts,
- struct vdso_data *vdata)
+int __vdso_gettimeofday(struct __kernel_old_timeval *tv,
+ struct timezone *tz)
{
- u32 seq;
-
- do {
- seq = vdso_read_begin(vdata);
-
- ts->tv_sec = vdata->xtime_coarse_sec;
- ts->tv_nsec = vdata->xtime_coarse_nsec;
-
- } while (vdso_read_retry(vdata, seq));
-
- return 0;
+ return __cvdso_gettimeofday(tv, tz);
}
-static notrace int do_monotonic_coarse(struct timespec *ts,
- struct vdso_data *vdata)
+int __vdso_clock_getres(clockid_t clock_id,
+ struct old_timespec32 *res)
{
- struct timespec tomono;
- u32 seq;
-
- do {
- seq = vdso_read_begin(vdata);
-
- ts->tv_sec = vdata->xtime_coarse_sec;
- ts->tv_nsec = vdata->xtime_coarse_nsec;
-
- tomono.tv_sec = vdata->wtm_clock_sec;
- tomono.tv_nsec = vdata->wtm_clock_nsec;
-
- } while (vdso_read_retry(vdata, seq));
-
- ts->tv_sec += tomono.tv_sec;
- timespec_add_ns(ts, tomono.tv_nsec);
-
- return 0;
-}
-
-#ifdef CONFIG_ARM_ARCH_TIMER
-
-static notrace u64 get_ns(struct vdso_data *vdata)
-{
- u64 cycle_delta;
- u64 cycle_now;
- u64 nsec;
-
- isb();
- cycle_now = read_sysreg(CNTVCT);
-
- cycle_delta = (cycle_now - vdata->cs_cycle_last) & vdata->cs_mask;
-
- nsec = (cycle_delta * vdata->cs_mult) + vdata->xtime_clock_snsec;
- nsec >>= vdata->cs_shift;
-
- return nsec;
-}
-
-static notrace int do_realtime(struct timespec *ts, struct vdso_data *vdata)
-{
- u64 nsecs;
- u32 seq;
-
- do {
- seq = vdso_read_begin(vdata);
-
- if (!vdata->tk_is_cntvct)
- return -1;
-
- ts->tv_sec = vdata->xtime_clock_sec;
- nsecs = get_ns(vdata);
-
- } while (vdso_read_retry(vdata, seq));
-
- ts->tv_nsec = 0;
- timespec_add_ns(ts, nsecs);
-
- return 0;
-}
-
-static notrace int do_monotonic(struct timespec *ts, struct vdso_data *vdata)
-{
- struct timespec tomono;
- u64 nsecs;
- u32 seq;
-
- do {
- seq = vdso_read_begin(vdata);
-
- if (!vdata->tk_is_cntvct)
- return -1;
-
- ts->tv_sec = vdata->xtime_clock_sec;
- nsecs = get_ns(vdata);
-
- tomono.tv_sec = vdata->wtm_clock_sec;
- tomono.tv_nsec = vdata->wtm_clock_nsec;
-
- } while (vdso_read_retry(vdata, seq));
-
- ts->tv_sec += tomono.tv_sec;
- ts->tv_nsec = 0;
- timespec_add_ns(ts, nsecs + tomono.tv_nsec);
-
- return 0;
-}
-
-#else /* CONFIG_ARM_ARCH_TIMER */
-
-static notrace int do_realtime(struct timespec *ts, struct vdso_data *vdata)
-{
- return -1;
-}
-
-static notrace int do_monotonic(struct timespec *ts, struct vdso_data *vdata)
-{
- return -1;
-}
-
-#endif /* CONFIG_ARM_ARCH_TIMER */
-
-notrace int __vdso_clock_gettime(clockid_t clkid, struct timespec *ts)
-{
- struct vdso_data *vdata;
- int ret = -1;
-
- vdata = __get_datapage();
-
- switch (clkid) {
- case CLOCK_REALTIME_COARSE:
- ret = do_realtime_coarse(ts, vdata);
- break;
- case CLOCK_MONOTONIC_COARSE:
- ret = do_monotonic_coarse(ts, vdata);
- break;
- case CLOCK_REALTIME:
- ret = do_realtime(ts, vdata);
- break;
- case CLOCK_MONOTONIC:
- ret = do_monotonic(ts, vdata);
- break;
- default:
- break;
- }
-
- if (ret)
- ret = clock_gettime_fallback(clkid, ts);
-
- return ret;
-}
-
-static notrace long gettimeofday_fallback(struct timeval *_tv,
- struct timezone *_tz)
-{
- register struct timezone *tz asm("r1") = _tz;
- register struct timeval *tv asm("r0") = _tv;
- register long ret asm ("r0");
- register long nr asm("r7") = __NR_gettimeofday;
-
- asm volatile(
- " swi #0\n"
- : "=r" (ret)
- : "r" (tv), "r" (tz), "r" (nr)
- : "memory");
-
- return ret;
-}
-
-notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
-{
- struct timespec ts;
- struct vdso_data *vdata;
- int ret;
-
- vdata = __get_datapage();
-
- ret = do_realtime(&ts, vdata);
- if (ret)
- return gettimeofday_fallback(tv, tz);
-
- if (tv) {
- tv->tv_sec = ts.tv_sec;
- tv->tv_usec = ts.tv_nsec / 1000;
- }
- if (tz) {
- tz->tz_minuteswest = vdata->tz_minuteswest;
- tz->tz_dsttime = vdata->tz_dsttime;
- }
-
- return ret;
+ return __cvdso_clock_getres_time32(clock_id, res);
}
/* Avoid unresolved references emitted by GCC */
diff --git a/arch/arm/xen/mm.c b/arch/arm/xen/mm.c
index 3c7645d7b9b4..d40e9e5fc52b 100644
--- a/arch/arm/xen/mm.c
+++ b/arch/arm/xen/mm.c
@@ -71,20 +71,20 @@ static void dma_cache_maint(dma_addr_t handle, size_t size, u32 op)
* pfn_valid returns true the pages is local and we can use the native
* dma-direct functions, otherwise we call the Xen specific version.
*/
-void xen_dma_sync_for_cpu(struct device *dev, dma_addr_t handle,
- phys_addr_t paddr, size_t size, enum dma_data_direction dir)
+void xen_dma_sync_for_cpu(dma_addr_t handle, phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir)
{
if (pfn_valid(PFN_DOWN(handle)))
- arch_sync_dma_for_cpu(dev, paddr, size, dir);
+ arch_sync_dma_for_cpu(paddr, size, dir);
else if (dir != DMA_TO_DEVICE)
dma_cache_maint(handle, size, GNTTAB_CACHE_INVAL);
}
-void xen_dma_sync_for_device(struct device *dev, dma_addr_t handle,
- phys_addr_t paddr, size_t size, enum dma_data_direction dir)
+void xen_dma_sync_for_device(dma_addr_t handle, phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir)
{
if (pfn_valid(PFN_DOWN(handle)))
- arch_sync_dma_for_device(dev, paddr, size, dir);
+ arch_sync_dma_for_device(paddr, size, dir);
else if (dir == DMA_FROM_DEVICE)
dma_cache_maint(handle, size, GNTTAB_CACHE_INVAL);
else
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index afe6412fe769..b1b4476ddb83 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -12,7 +12,6 @@ config ARM64
select ARCH_CLOCKSOURCE_DATA
select ARCH_HAS_DEBUG_VIRTUAL
select ARCH_HAS_DEVMEM_IS_ALLOWED
- select ARCH_HAS_DMA_COHERENT_TO_PFN
select ARCH_HAS_DMA_PREP_COHERENT
select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI
select ARCH_HAS_FAST_MULTIPLIER
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts b/arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts
index 1d05d570142f..ce4b0679839d 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts
@@ -252,6 +252,7 @@
};
&r_ir {
+ linux,rc-map-name = "rc-beelink-gs1";
status = "okay";
};
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi
index 3f39e020f74e..7ab71172cd3c 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi
@@ -95,6 +95,39 @@
#size-cells = <2>;
ranges;
+ pcie: pcie@fc000000 {
+ compatible = "amlogic,g12a-pcie", "snps,dw-pcie";
+ reg = <0x0 0xfc000000 0x0 0x400000
+ 0x0 0xff648000 0x0 0x2000
+ 0x0 0xfc400000 0x0 0x200000>;
+ reg-names = "elbi", "cfg", "config";
+ interrupts = <GIC_SPI 221 IRQ_TYPE_LEVEL_HIGH>;
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 0>;
+ interrupt-map = <0 0 0 0 &gic GIC_SPI 223 IRQ_TYPE_LEVEL_HIGH>;
+ bus-range = <0x0 0xff>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+ device_type = "pci";
+ ranges = <0x81000000 0 0 0x0 0xfc600000 0 0x00100000
+ 0x82000000 0 0xfc700000 0x0 0xfc700000 0 0x1900000>;
+
+ clocks = <&clkc CLKID_PCIE_PHY
+ &clkc CLKID_PCIE_COMB
+ &clkc CLKID_PCIE_PLL>;
+ clock-names = "general",
+ "pclk",
+ "port";
+ resets = <&reset RESET_PCIE_CTRL_A>,
+ <&reset RESET_PCIE_APB>;
+ reset-names = "port",
+ "apb";
+ num-lanes = <1>;
+ phys = <&usb3_pcie_phy PHY_TYPE_PCIE>;
+ phy-names = "pcie";
+ status = "disabled";
+ };
+
ethmac: ethernet@ff3f0000 {
compatible = "amlogic,meson-axg-dwmac",
"snps,dwmac-3.70a",
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12b-a311d-khadas-vim3.dts b/arch/arm64/boot/dts/amlogic/meson-g12b-a311d-khadas-vim3.dts
index 3a6a1e0c1e32..124a80901084 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12b-a311d-khadas-vim3.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-g12b-a311d-khadas-vim3.dts
@@ -14,3 +14,28 @@
/ {
compatible = "khadas,vim3", "amlogic,a311d", "amlogic,g12b";
};
+
+/*
+ * The VIM3 on-board MCU can mux the PCIe/USB3.0 shared differential
+ * lines using a FUSB340TMX USB 3.1 SuperSpeed Data Switch between
+ * an USB3.0 Type A connector and a M.2 Key M slot.
+ * The PHY driving these differential lines is shared between
+ * the USB3.0 controller and the PCIe Controller, thus only
+ * a single controller can use it.
+ * If the MCU is configured to mux the PCIe/USB3.0 differential lines
+ * to the M.2 Key M slot, uncomment the following block to disable
+ * USB3.0 from the USB Complex and enable the PCIe controller.
+ * The End User is not expected to uncomment the following except for
+ * testing purposes, but instead rely on the firmware/bootloader to
+ * update these nodes accordingly if PCIe mode is selected by the MCU.
+ */
+/*
+&pcie {
+ status = "okay";
+};
+
+&usb {
+ phys = <&usb2_phy0>, <&usb2_phy1>;
+ phy-names = "usb2-phy0", "usb2-phy1";
+};
+ */
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12b-s922x-khadas-vim3.dts b/arch/arm64/boot/dts/amlogic/meson-g12b-s922x-khadas-vim3.dts
index b73deb282120..bba98f982ad6 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12b-s922x-khadas-vim3.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-g12b-s922x-khadas-vim3.dts
@@ -14,3 +14,28 @@
/ {
compatible = "khadas,vim3", "amlogic,s922x", "amlogic,g12b";
};
+
+/*
+ * The VIM3 on-board MCU can mux the PCIe/USB3.0 shared differential
+ * lines using a FUSB340TMX USB 3.1 SuperSpeed Data Switch between
+ * an USB3.0 Type A connector and a M.2 Key M slot.
+ * The PHY driving these differential lines is shared between
+ * the USB3.0 controller and the PCIe Controller, thus only
+ * a single controller can use it.
+ * If the MCU is configured to mux the PCIe/USB3.0 differential lines
+ * to the M.2 Key M slot, uncomment the following block to disable
+ * USB3.0 from the USB Complex and enable the PCIe controller.
+ * The End User is not expected to uncomment the following except for
+ * testing purposes, but instead rely on the firmware/bootloader to
+ * update these nodes accordingly if PCIe mode is selected by the MCU.
+ */
+/*
+&pcie {
+ status = "okay";
+};
+
+&usb {
+ phys = <&usb2_phy0>, <&usb2_phy1>;
+ phy-names = "usb2-phy0", "usb2-phy1";
+};
+ */
diff --git a/arch/arm64/boot/dts/amlogic/meson-khadas-vim3.dtsi b/arch/arm64/boot/dts/amlogic/meson-khadas-vim3.dtsi
index 8647da7d6609..eac5720dc15f 100644
--- a/arch/arm64/boot/dts/amlogic/meson-khadas-vim3.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-khadas-vim3.dtsi
@@ -246,6 +246,10 @@
linux,rc-map-name = "rc-khadas";
};
+&pcie {
+ reset-gpios = <&gpio GPIOA_8 GPIO_ACTIVE_LOW>;
+};
+
&pwm_ef {
status = "okay";
pinctrl-0 = <&pwm_e_pins>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-sm1-khadas-vim3l.dts b/arch/arm64/boot/dts/amlogic/meson-sm1-khadas-vim3l.dts
index 5233bd7cacfb..dbbf29a0dbf6 100644
--- a/arch/arm64/boot/dts/amlogic/meson-sm1-khadas-vim3l.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-sm1-khadas-vim3l.dts
@@ -68,3 +68,28 @@
clock-names = "clkin1";
status = "okay";
};
+
+/*
+ * The VIM3 on-board MCU can mux the PCIe/USB3.0 shared differential
+ * lines using a FUSB340TMX USB 3.1 SuperSpeed Data Switch between
+ * an USB3.0 Type A connector and a M.2 Key M slot.
+ * The PHY driving these differential lines is shared between
+ * the USB3.0 controller and the PCIe Controller, thus only
+ * a single controller can use it.
+ * If the MCU is configured to mux the PCIe/USB3.0 differential lines
+ * to the M.2 Key M slot, uncomment the following block to disable
+ * USB3.0 from the USB Complex and enable the PCIe controller.
+ * The End User is not expected to uncomment the following except for
+ * testing purposes, but instead rely on the firmware/bootloader to
+ * update these nodes accordingly if PCIe mode is selected by the MCU.
+ */
+/*
+&pcie {
+ status = "okay";
+};
+
+&usb {
+ phys = <&usb2_phy0>, <&usb2_phy1>;
+ phy-names = "usb2-phy0", "usb2-phy1";
+};
+ */
diff --git a/arch/arm64/boot/dts/amlogic/meson-sm1.dtsi b/arch/arm64/boot/dts/amlogic/meson-sm1.dtsi
index 521573f3a5ba..256ea0349ffc 100644
--- a/arch/arm64/boot/dts/amlogic/meson-sm1.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-sm1.dtsi
@@ -134,6 +134,10 @@
power-domains = <&pwrc PWRC_SM1_ETH_ID>;
};
+&pcie {
+ power-domains = <&pwrc PWRC_SM1_PCIE_ID>;
+};
+
&pwrc {
compatible = "amlogic,meson-sm1-pwrc";
};
diff --git a/arch/arm64/crypto/chacha-neon-glue.c b/arch/arm64/crypto/chacha-neon-glue.c
index b08029d7bde6..c1f9660d104c 100644
--- a/arch/arm64/crypto/chacha-neon-glue.c
+++ b/arch/arm64/crypto/chacha-neon-glue.c
@@ -211,12 +211,13 @@ static int __init chacha_simd_mod_init(void)
static_branch_enable(&have_neon);
- return crypto_register_skciphers(algs, ARRAY_SIZE(algs));
+ return IS_REACHABLE(CONFIG_CRYPTO_SKCIPHER) ?
+ crypto_register_skciphers(algs, ARRAY_SIZE(algs)) : 0;
}
static void __exit chacha_simd_mod_fini(void)
{
- if (cpu_have_named_feature(ASIMD))
+ if (IS_REACHABLE(CONFIG_CRYPTO_SKCIPHER) && cpu_have_named_feature(ASIMD))
crypto_unregister_skciphers(algs, ARRAY_SIZE(algs));
}
diff --git a/arch/arm64/crypto/poly1305-glue.c b/arch/arm64/crypto/poly1305-glue.c
index dd843d0ee83a..83a2338a8826 100644
--- a/arch/arm64/crypto/poly1305-glue.c
+++ b/arch/arm64/crypto/poly1305-glue.c
@@ -220,12 +220,13 @@ static int __init neon_poly1305_mod_init(void)
static_branch_enable(&have_neon);
- return crypto_register_shash(&neon_poly1305_alg);
+ return IS_REACHABLE(CONFIG_CRYPTO_HASH) ?
+ crypto_register_shash(&neon_poly1305_alg) : 0;
}
static void __exit neon_poly1305_mod_exit(void)
{
- if (cpu_have_named_feature(ASIMD))
+ if (IS_REACHABLE(CONFIG_CRYPTO_HASH) && cpu_have_named_feature(ASIMD))
crypto_unregister_shash(&neon_poly1305_alg);
}
diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild
index 98a5405c8558..bd23f87d6c55 100644
--- a/arch/arm64/include/asm/Kbuild
+++ b/arch/arm64/include/asm/Kbuild
@@ -16,7 +16,6 @@ generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
generic-y += mmiowb.h
-generic-y += msi.h
generic-y += qrwlock.h
generic-y += qspinlock.h
generic-y += serial.h
diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h
index 323cb306bd28..4e531f57147d 100644
--- a/arch/arm64/include/asm/io.h
+++ b/arch/arm64/include/asm/io.h
@@ -167,9 +167,7 @@ extern void iounmap(volatile void __iomem *addr);
extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size);
#define ioremap(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE))
-#define ioremap_nocache(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE))
#define ioremap_wc(addr, size) __ioremap((addr), (size), __pgprot(PROT_NORMAL_NC))
-#define ioremap_wt(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE))
/*
* PCI configuration space mapping function.
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
index 21176d02e21a..6771c399d40c 100644
--- a/arch/arm64/kernel/ptrace.c
+++ b/arch/arm64/kernel/ptrace.c
@@ -1816,7 +1816,7 @@ int syscall_trace_enter(struct pt_regs *regs)
}
/* Do the secure computing after ptrace; failures should be fast. */
- if (secure_computing(NULL) == -1)
+ if (secure_computing() == -1)
return -1;
if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index 9239416e93d4..6c45350e33aa 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -13,14 +13,14 @@
#include <asm/cacheflush.h>
-void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
- size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir)
{
__dma_map_area(phys_to_virt(paddr), size, dir);
}
-void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
- size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir)
{
__dma_unmap_area(phys_to_virt(paddr), size, dir);
}
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index a9f541912289..5a3b15a14a7f 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -1060,6 +1060,8 @@ int arch_add_memory(int nid, u64 start, u64 size,
__create_pgd_mapping(swapper_pg_dir, start, __phys_to_virt(start),
size, PAGE_KERNEL, __pgd_pgtable_alloc, flags);
+ memblock_clear_nomap(start, size);
+
return __add_pages(nid, start >> PAGE_SHIFT, size >> PAGE_SHIFT,
restrictions);
}
diff --git a/arch/c6x/mm/dma-coherent.c b/arch/c6x/mm/dma-coherent.c
index b319808e8f6b..a5909091cb14 100644
--- a/arch/c6x/mm/dma-coherent.c
+++ b/arch/c6x/mm/dma-coherent.c
@@ -140,7 +140,7 @@ void __init coherent_mem_init(phys_addr_t start, u32 size)
sizeof(long));
}
-static void c6x_dma_sync(struct device *dev, phys_addr_t paddr, size_t size,
+static void c6x_dma_sync(phys_addr_t paddr, size_t size,
enum dma_data_direction dir)
{
BUG_ON(!valid_dma_direction(dir));
@@ -160,14 +160,14 @@ static void c6x_dma_sync(struct device *dev, phys_addr_t paddr, size_t size,
}
}
-void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
- size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir)
{
- return c6x_dma_sync(dev, paddr, size, dir);
+ return c6x_dma_sync(paddr, size, dir);
}
-void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
- size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir)
{
- return c6x_dma_sync(dev, paddr, size, dir);
+ return c6x_dma_sync(paddr, size, dir);
}
diff --git a/arch/csky/Kconfig b/arch/csky/Kconfig
index 3973847b5f42..da09c884cc30 100644
--- a/arch/csky/Kconfig
+++ b/arch/csky/Kconfig
@@ -17,6 +17,7 @@ config CSKY
select IRQ_DOMAIN
select HANDLE_DOMAIN_IRQ
select DW_APB_TIMER_OF
+ select GENERIC_IOREMAP
select GENERIC_LIB_ASHLDI3
select GENERIC_LIB_ASHRDI3
select GENERIC_LIB_LSHRDI3
diff --git a/arch/csky/include/asm/io.h b/arch/csky/include/asm/io.h
index 80d071e2567f..332f51bc68fb 100644
--- a/arch/csky/include/asm/io.h
+++ b/arch/csky/include/asm/io.h
@@ -36,14 +36,9 @@
/*
* I/O memory mapping functions.
*/
-extern void __iomem *ioremap_cache(phys_addr_t addr, size_t size);
-extern void __iomem *__ioremap(phys_addr_t addr, size_t size, pgprot_t prot);
-extern void iounmap(void *addr);
-
-#define ioremap(addr, size) __ioremap((addr), (size), pgprot_noncached(PAGE_KERNEL))
-#define ioremap_wc(addr, size) __ioremap((addr), (size), pgprot_writecombine(PAGE_KERNEL))
-#define ioremap_nocache(addr, size) ioremap((addr), (size))
-#define ioremap_cache ioremap_cache
+#define ioremap_wc(addr, size) \
+ ioremap_prot((addr), (size), \
+ (_PAGE_IOREMAP & ~_CACHE_MASK) | _CACHE_UNCACHED)
#include <asm-generic/io.h>
diff --git a/arch/csky/include/asm/pgtable.h b/arch/csky/include/asm/pgtable.h
index 7c21985c60dc..4b2a41e15f2e 100644
--- a/arch/csky/include/asm/pgtable.h
+++ b/arch/csky/include/asm/pgtable.h
@@ -86,6 +86,10 @@
#define PAGE_USERIO __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
_CACHE_CACHED)
+#define _PAGE_IOREMAP \
+ (_PAGE_PRESENT | __READABLE | __WRITEABLE | _PAGE_GLOBAL | \
+ _CACHE_UNCACHED | _PAGE_SO)
+
#define __P000 PAGE_NONE
#define __P001 PAGE_READONLY
#define __P010 PAGE_COPY
diff --git a/arch/csky/mm/dma-mapping.c b/arch/csky/mm/dma-mapping.c
index 06e85b565454..8f6571ae27c8 100644
--- a/arch/csky/mm/dma-mapping.c
+++ b/arch/csky/mm/dma-mapping.c
@@ -58,8 +58,8 @@ void arch_dma_prep_coherent(struct page *page, size_t size)
cache_op(page_to_phys(page), size, dma_wbinv_set_zero_range);
}
-void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
- size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir)
{
switch (dir) {
case DMA_TO_DEVICE:
@@ -74,8 +74,8 @@ void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
}
}
-void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
- size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir)
{
switch (dir) {
case DMA_TO_DEVICE:
diff --git a/arch/csky/mm/ioremap.c b/arch/csky/mm/ioremap.c
index e13cd3497628..70c8268d3b2b 100644
--- a/arch/csky/mm/ioremap.c
+++ b/arch/csky/mm/ioremap.c
@@ -3,60 +3,8 @@
#include <linux/export.h>
#include <linux/mm.h>
-#include <linux/vmalloc.h>
#include <linux/io.h>
-#include <asm/pgtable.h>
-
-static void __iomem *__ioremap_caller(phys_addr_t addr, size_t size,
- pgprot_t prot, void *caller)
-{
- phys_addr_t last_addr;
- unsigned long offset, vaddr;
- struct vm_struct *area;
-
- last_addr = addr + size - 1;
- if (!size || last_addr < addr)
- return NULL;
-
- offset = addr & (~PAGE_MASK);
- addr &= PAGE_MASK;
- size = PAGE_ALIGN(size + offset);
-
- area = get_vm_area_caller(size, VM_IOREMAP, caller);
- if (!area)
- return NULL;
-
- vaddr = (unsigned long)area->addr;
-
- if (ioremap_page_range(vaddr, vaddr + size, addr, prot)) {
- free_vm_area(area);
- return NULL;
- }
-
- return (void __iomem *)(vaddr + offset);
-}
-
-void __iomem *__ioremap(phys_addr_t phys_addr, size_t size, pgprot_t prot)
-{
- return __ioremap_caller(phys_addr, size, prot,
- __builtin_return_address(0));
-}
-EXPORT_SYMBOL(__ioremap);
-
-void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size)
-{
- return __ioremap_caller(phys_addr, size, PAGE_KERNEL,
- __builtin_return_address(0));
-}
-EXPORT_SYMBOL(ioremap_cache);
-
-void iounmap(void __iomem *addr)
-{
- vunmap((void *)((unsigned long)addr & PAGE_MASK));
-}
-EXPORT_SYMBOL(iounmap);
-
pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
unsigned long size, pgprot_t vma_prot)
{
diff --git a/arch/hexagon/include/asm/io.h b/arch/hexagon/include/asm/io.h
index ba1a444d55b3..539e3efcf39c 100644
--- a/arch/hexagon/include/asm/io.h
+++ b/arch/hexagon/include/asm/io.h
@@ -27,7 +27,7 @@
extern int remap_area_pages(unsigned long start, unsigned long phys_addr,
unsigned long end, unsigned long flags);
-extern void __iounmap(const volatile void __iomem *addr);
+extern void iounmap(const volatile void __iomem *addr);
/* Defined in lib/io.c, needed for smc91x driver. */
extern void __raw_readsw(const void __iomem *addr, void *data, int wordlen);
@@ -171,21 +171,9 @@ static inline void writel(u32 data, volatile void __iomem *addr)
#define writew_relaxed __raw_writew
#define writel_relaxed __raw_writel
-/*
- * Need an mtype somewhere in here, for cache type deals?
- * This is probably too long for an inline.
- */
-void __iomem *ioremap_nocache(unsigned long phys_addr, unsigned long size);
+void __iomem *ioremap(unsigned long phys_addr, unsigned long size);
+#define ioremap_nocache ioremap
-static inline void __iomem *ioremap(unsigned long phys_addr, unsigned long size)
-{
- return ioremap_nocache(phys_addr, size);
-}
-
-static inline void iounmap(volatile void __iomem *addr)
-{
- __iounmap(addr);
-}
#define __raw_writel writel
diff --git a/arch/hexagon/include/uapi/asm/bitsperlong.h b/arch/hexagon/include/uapi/asm/bitsperlong.h
deleted file mode 100644
index 5adca0d26913..000000000000
--- a/arch/hexagon/include/uapi/asm/bitsperlong.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-/*
- * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- */
-
-#ifndef __ASM_HEXAGON_BITSPERLONG_H
-#define __ASM_HEXAGON_BITSPERLONG_H
-
-#define __BITS_PER_LONG 32
-
-#include <asm-generic/bitsperlong.h>
-
-#endif
diff --git a/arch/hexagon/kernel/dma.c b/arch/hexagon/kernel/dma.c
index f561b127c4b4..25f388d9cfcc 100644
--- a/arch/hexagon/kernel/dma.c
+++ b/arch/hexagon/kernel/dma.c
@@ -55,8 +55,8 @@ void arch_dma_free(struct device *dev, size_t size, void *vaddr,
gen_pool_free(coherent_pool, (unsigned long) vaddr, size);
}
-void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
- size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir)
{
void *addr = phys_to_virt(paddr);
diff --git a/arch/hexagon/kernel/hexagon_ksyms.c b/arch/hexagon/kernel/hexagon_ksyms.c
index cf8974beb500..6fb1aaab1c29 100644
--- a/arch/hexagon/kernel/hexagon_ksyms.c
+++ b/arch/hexagon/kernel/hexagon_ksyms.c
@@ -14,13 +14,13 @@
EXPORT_SYMBOL(__clear_user_hexagon);
EXPORT_SYMBOL(raw_copy_from_user);
EXPORT_SYMBOL(raw_copy_to_user);
-EXPORT_SYMBOL(__iounmap);
+EXPORT_SYMBOL(iounmap);
EXPORT_SYMBOL(__strnlen_user);
EXPORT_SYMBOL(__vmgetie);
EXPORT_SYMBOL(__vmsetie);
EXPORT_SYMBOL(__vmyield);
EXPORT_SYMBOL(empty_zero_page);
-EXPORT_SYMBOL(ioremap_nocache);
+EXPORT_SYMBOL(ioremap);
EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(memset);
diff --git a/arch/hexagon/mm/ioremap.c b/arch/hexagon/mm/ioremap.c
index 77d8e1e69e9b..255c5b1ee1a7 100644
--- a/arch/hexagon/mm/ioremap.c
+++ b/arch/hexagon/mm/ioremap.c
@@ -9,7 +9,7 @@
#include <linux/vmalloc.h>
#include <linux/mm.h>
-void __iomem *ioremap_nocache(unsigned long phys_addr, unsigned long size)
+void __iomem *ioremap(unsigned long phys_addr, unsigned long size)
{
unsigned long last_addr, addr;
unsigned long offset = phys_addr & ~PAGE_MASK;
@@ -38,7 +38,7 @@ void __iomem *ioremap_nocache(unsigned long phys_addr, unsigned long size)
return (void __iomem *) (offset + addr);
}
-void __iounmap(const volatile void __iomem *addr)
+void iounmap(const volatile void __iomem *addr)
{
vunmap((void *) ((unsigned long) addr & PAGE_MASK));
}
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 16714477eef4..bab7cd878464 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -33,7 +33,7 @@ config IA64
select HAVE_ARCH_TRACEHOOK
select HAVE_MEMBLOCK_NODE_MAP
select HAVE_VIRT_CPU_ACCOUNTING
- select ARCH_HAS_DMA_COHERENT_TO_PFN
+ select DMA_NONCOHERENT_MMAP
select ARCH_HAS_SYNC_DMA_FOR_CPU
select VIRT_TO_BUS
select GENERIC_IRQ_PROBE
diff --git a/arch/ia64/include/asm/io.h b/arch/ia64/include/asm/io.h
index 54e70c21352a..3d666a11a2de 100644
--- a/arch/ia64/include/asm/io.h
+++ b/arch/ia64/include/asm/io.h
@@ -256,16 +256,15 @@ static inline void outsl(unsigned long port, const void *src,
# ifdef __KERNEL__
extern void __iomem * ioremap(unsigned long offset, unsigned long size);
-extern void __iomem * ioremap_nocache (unsigned long offset, unsigned long size);
+extern void __iomem * ioremap_uc(unsigned long offset, unsigned long size);
extern void iounmap (volatile void __iomem *addr);
static inline void __iomem * ioremap_cache (unsigned long phys_addr, unsigned long size)
{
return ioremap(phys_addr, size);
}
#define ioremap ioremap
-#define ioremap_nocache ioremap_nocache
#define ioremap_cache ioremap_cache
-#define ioremap_uc ioremap_nocache
+#define ioremap_uc ioremap_uc
#define iounmap iounmap
/*
diff --git a/arch/ia64/include/asm/iommu.h b/arch/ia64/include/asm/iommu.h
index 7904f591a79b..eb0db20c9d4c 100644
--- a/arch/ia64/include/asm/iommu.h
+++ b/arch/ia64/include/asm/iommu.h
@@ -2,6 +2,8 @@
#ifndef _ASM_IA64_IOMMU_H
#define _ASM_IA64_IOMMU_H 1
+#include <linux/acpi.h>
+
/* 10 seconds */
#define DMAR_OPERATION_TIMEOUT (((cycles_t) local_cpu_data->itc_freq)*10)
@@ -9,6 +11,9 @@ extern void no_iommu_init(void);
#ifdef CONFIG_INTEL_IOMMU
extern int force_iommu, no_iommu;
extern int iommu_detected;
+
+static inline int __init
+arch_rmrr_sanity_check(struct acpi_dmar_reserved_memory *rmrr) { return 0; }
#else
#define no_iommu (1)
#define iommu_detected (0)
diff --git a/arch/ia64/include/uapi/asm/errno.h b/arch/ia64/include/uapi/asm/errno.h
deleted file mode 100644
index 9addba592646..000000000000
--- a/arch/ia64/include/uapi/asm/errno.h
+++ /dev/null
@@ -1,2 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-#include <asm-generic/errno.h>
diff --git a/arch/ia64/include/uapi/asm/ioctl.h b/arch/ia64/include/uapi/asm/ioctl.h
deleted file mode 100644
index b809c4566e5f..000000000000
--- a/arch/ia64/include/uapi/asm/ioctl.h
+++ /dev/null
@@ -1,2 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-#include <asm-generic/ioctl.h>
diff --git a/arch/ia64/include/uapi/asm/ioctls.h b/arch/ia64/include/uapi/asm/ioctls.h
deleted file mode 100644
index b86001940209..000000000000
--- a/arch/ia64/include/uapi/asm/ioctls.h
+++ /dev/null
@@ -1,7 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-#ifndef _ASM_IA64_IOCTLS_H
-#define _ASM_IA64_IOCTLS_H
-
-#include <asm-generic/ioctls.h>
-
-#endif /* _ASM_IA64_IOCTLS_H */
diff --git a/arch/ia64/kernel/asm-offsets.c b/arch/ia64/kernel/asm-offsets.c
index 00e8e2a1eb19..fb0deb8a4221 100644
--- a/arch/ia64/kernel/asm-offsets.c
+++ b/arch/ia64/kernel/asm-offsets.c
@@ -211,7 +211,7 @@ void foo(void)
offsetof (struct cpuinfo_ia64, ptce_stride));
BLANK();
DEFINE(IA64_TIMESPEC_TV_NSEC_OFFSET,
- offsetof (struct timespec, tv_nsec));
+ offsetof (struct __kernel_old_timespec, tv_nsec));
DEFINE(IA64_TIME_SN_SPEC_SNSEC_OFFSET,
offsetof (struct time_sn_spec, snsec));
diff --git a/arch/ia64/kernel/dma-mapping.c b/arch/ia64/kernel/dma-mapping.c
index 4a3262795890..09ef9ce9988d 100644
--- a/arch/ia64/kernel/dma-mapping.c
+++ b/arch/ia64/kernel/dma-mapping.c
@@ -19,9 +19,3 @@ void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
{
dma_direct_free_pages(dev, size, cpu_addr, dma_addr, attrs);
}
-
-long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr,
- dma_addr_t dma_addr)
-{
- return page_to_pfn(virt_to_page(cpu_addr));
-}
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index bf9df2625bc8..58fd67068bac 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -73,8 +73,8 @@ __ia64_sync_icache_dcache (pte_t pte)
* DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to
* flush them when they get mapped into an executable vm-area.
*/
-void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
- size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir)
{
unsigned long pfn = PHYS_PFN(paddr);
diff --git a/arch/ia64/mm/ioremap.c b/arch/ia64/mm/ioremap.c
index 0c0de2c4ec69..a09cfa064536 100644
--- a/arch/ia64/mm/ioremap.c
+++ b/arch/ia64/mm/ioremap.c
@@ -99,14 +99,14 @@ ioremap (unsigned long phys_addr, unsigned long size)
EXPORT_SYMBOL(ioremap);
void __iomem *
-ioremap_nocache (unsigned long phys_addr, unsigned long size)
+ioremap_uc(unsigned long phys_addr, unsigned long size)
{
if (kern_mem_attribute(phys_addr, size) & EFI_MEMORY_WB)
return NULL;
return __ioremap_uc(phys_addr);
}
-EXPORT_SYMBOL(ioremap_nocache);
+EXPORT_SYMBOL(ioremap_uc);
void
early_iounmap (volatile void __iomem *addr, unsigned long size)
diff --git a/arch/m68k/configs/m5475evb_defconfig b/arch/m68k/configs/m5475evb_defconfig
index 434bd3750966..579fd98afed6 100644
--- a/arch/m68k/configs/m5475evb_defconfig
+++ b/arch/m68k/configs/m5475evb_defconfig
@@ -1,6 +1,5 @@
# CONFIG_SWAP is not set
CONFIG_LOG_BUF_SHIFT=14
-CONFIG_SYSCTL_SYSCALL=y
# CONFIG_KALLSYMS is not set
# CONFIG_FUTEX is not set
# CONFIG_EPOLL is not set
diff --git a/arch/m68k/include/asm/kmap.h b/arch/m68k/include/asm/kmap.h
index 421b6c9c769d..559cb91bede1 100644
--- a/arch/m68k/include/asm/kmap.h
+++ b/arch/m68k/include/asm/kmap.h
@@ -20,7 +20,6 @@ extern void __iomem *__ioremap(unsigned long physaddr, unsigned long size,
int cacheflag);
#define iounmap iounmap
extern void iounmap(void __iomem *addr);
-extern void __iounmap(void *addr, unsigned long size);
#define ioremap ioremap
static inline void __iomem *ioremap(unsigned long physaddr, unsigned long size)
diff --git a/arch/m68k/kernel/dma.c b/arch/m68k/kernel/dma.c
index 3fab684cc0db..871a0e11da34 100644
--- a/arch/m68k/kernel/dma.c
+++ b/arch/m68k/kernel/dma.c
@@ -61,8 +61,8 @@ void arch_dma_free(struct device *dev, size_t size, void *vaddr,
#endif /* CONFIG_MMU && !CONFIG_COLDFIRE */
-void arch_sync_dma_for_device(struct device *dev, phys_addr_t handle,
- size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_device(phys_addr_t handle, size_t size,
+ enum dma_data_direction dir)
{
switch (dir) {
case DMA_BIDIRECTIONAL:
diff --git a/arch/m68k/mm/kmap.c b/arch/m68k/mm/kmap.c
index 40a3b327da07..23f9466aabb5 100644
--- a/arch/m68k/mm/kmap.c
+++ b/arch/m68k/mm/kmap.c
@@ -54,6 +54,55 @@ static inline void free_io_area(void *addr)
static struct vm_struct *iolist;
+/*
+ * __free_io_area unmaps nearly everything, so be careful
+ * Currently it doesn't free pointer/page tables anymore but this
+ * wasn't used anyway and might be added later.
+ */
+static void __free_io_area(void *addr, unsigned long size)
+{
+ unsigned long virtaddr = (unsigned long)addr;
+ pgd_t *pgd_dir;
+ pmd_t *pmd_dir;
+ pte_t *pte_dir;
+
+ while ((long)size > 0) {
+ pgd_dir = pgd_offset_k(virtaddr);
+ if (pgd_bad(*pgd_dir)) {
+ printk("iounmap: bad pgd(%08lx)\n", pgd_val(*pgd_dir));
+ pgd_clear(pgd_dir);
+ return;
+ }
+ pmd_dir = pmd_offset(pgd_dir, virtaddr);
+
+ if (CPU_IS_020_OR_030) {
+ int pmd_off = (virtaddr/PTRTREESIZE) & 15;
+ int pmd_type = pmd_dir->pmd[pmd_off] & _DESCTYPE_MASK;
+
+ if (pmd_type == _PAGE_PRESENT) {
+ pmd_dir->pmd[pmd_off] = 0;
+ virtaddr += PTRTREESIZE;
+ size -= PTRTREESIZE;
+ continue;
+ } else if (pmd_type == 0)
+ continue;
+ }
+
+ if (pmd_bad(*pmd_dir)) {
+ printk("iounmap: bad pmd (%08lx)\n", pmd_val(*pmd_dir));
+ pmd_clear(pmd_dir);
+ return;
+ }
+ pte_dir = pte_offset_kernel(pmd_dir, virtaddr);
+
+ pte_val(*pte_dir) = 0;
+ virtaddr += PAGE_SIZE;
+ size -= PAGE_SIZE;
+ }
+
+ flush_tlb_all();
+}
+
static struct vm_struct *get_io_area(unsigned long size)
{
unsigned long addr;
@@ -90,7 +139,7 @@ static inline void free_io_area(void *addr)
if (tmp->addr == addr) {
*p = tmp->next;
/* remove gap added in get_io_area() */
- __iounmap(tmp->addr, tmp->size - IO_SIZE);
+ __free_io_area(tmp->addr, tmp->size - IO_SIZE);
kfree(tmp);
return;
}
@@ -250,55 +299,6 @@ void iounmap(void __iomem *addr)
EXPORT_SYMBOL(iounmap);
/*
- * __iounmap unmaps nearly everything, so be careful
- * Currently it doesn't free pointer/page tables anymore but this
- * wasn't used anyway and might be added later.
- */
-void __iounmap(void *addr, unsigned long size)
-{
- unsigned long virtaddr = (unsigned long)addr;
- pgd_t *pgd_dir;
- pmd_t *pmd_dir;
- pte_t *pte_dir;
-
- while ((long)size > 0) {
- pgd_dir = pgd_offset_k(virtaddr);
- if (pgd_bad(*pgd_dir)) {
- printk("iounmap: bad pgd(%08lx)\n", pgd_val(*pgd_dir));
- pgd_clear(pgd_dir);
- return;
- }
- pmd_dir = pmd_offset(pgd_dir, virtaddr);
-
- if (CPU_IS_020_OR_030) {
- int pmd_off = (virtaddr/PTRTREESIZE) & 15;
- int pmd_type = pmd_dir->pmd[pmd_off] & _DESCTYPE_MASK;
-
- if (pmd_type == _PAGE_PRESENT) {
- pmd_dir->pmd[pmd_off] = 0;
- virtaddr += PTRTREESIZE;
- size -= PTRTREESIZE;
- continue;
- } else if (pmd_type == 0)
- continue;
- }
-
- if (pmd_bad(*pmd_dir)) {
- printk("iounmap: bad pmd (%08lx)\n", pmd_val(*pmd_dir));
- pmd_clear(pmd_dir);
- return;
- }
- pte_dir = pte_offset_kernel(pmd_dir, virtaddr);
-
- pte_val(*pte_dir) = 0;
- virtaddr += PAGE_SIZE;
- size -= PAGE_SIZE;
- }
-
- flush_tlb_all();
-}
-
-/*
* Set new cache mode for some kernel address space.
* The caller must push data for that range itself, if such data may already
* be in the cache.
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig
index c9c4be822456..5f46ebe7bfe3 100644
--- a/arch/microblaze/Kconfig
+++ b/arch/microblaze/Kconfig
@@ -4,7 +4,6 @@ config MICROBLAZE
select ARCH_32BIT_OFF_T
select ARCH_NO_SWAP
select ARCH_HAS_BINFMT_FLAT if !MMU
- select ARCH_HAS_DMA_COHERENT_TO_PFN if MMU
select ARCH_HAS_DMA_PREP_COHERENT
select ARCH_HAS_GCOV_PROFILE_ALL
select ARCH_HAS_SYNC_DMA_FOR_CPU
@@ -46,6 +45,7 @@ config MICROBLAZE
select VIRT_TO_BUS
select CPU_NO_EFFICIENT_FFS
select MMU_GATHER_NO_RANGE if MMU
+ select SPARSE_IRQ
# Endianness selection
choice
diff --git a/arch/microblaze/configs/mmu_defconfig b/arch/microblaze/configs/mmu_defconfig
index 654edfdc7867..b3b433db89d8 100644
--- a/arch/microblaze/configs/mmu_defconfig
+++ b/arch/microblaze/configs/mmu_defconfig
@@ -33,6 +33,8 @@ CONFIG_INET=y
# CONFIG_IPV6 is not set
CONFIG_BRIDGE=m
CONFIG_PCI=y
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
CONFIG_MTD=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_INTELEXT=y
@@ -73,6 +75,7 @@ CONFIG_UIO_PDRV_GENIRQ=y
CONFIG_UIO_DMEM_GENIRQ=y
CONFIG_EXT2_FS=y
# CONFIG_DNOTIFY is not set
+CONFIG_TMPFS=y
CONFIG_CRAMFS=y
CONFIG_ROMFS_FS=y
CONFIG_NFS_FS=y
diff --git a/arch/microblaze/include/asm/io.h b/arch/microblaze/include/asm/io.h
index 86c95b2a1ce1..d33c61737b8b 100644
--- a/arch/microblaze/include/asm/io.h
+++ b/arch/microblaze/include/asm/io.h
@@ -39,9 +39,6 @@ extern resource_size_t isa_mem_base;
extern void iounmap(volatile void __iomem *addr);
extern void __iomem *ioremap(phys_addr_t address, unsigned long size);
-#define ioremap_nocache(addr, size) ioremap((addr), (size))
-#define ioremap_wc(addr, size) ioremap((addr), (size))
-#define ioremap_wt(addr, size) ioremap((addr), (size))
#endif /* CONFIG_MMU */
diff --git a/arch/microblaze/include/asm/irq.h b/arch/microblaze/include/asm/irq.h
index d785defeeed5..eac2fb4b3fb9 100644
--- a/arch/microblaze/include/asm/irq.h
+++ b/arch/microblaze/include/asm/irq.h
@@ -9,7 +9,6 @@
#ifndef _ASM_MICROBLAZE_IRQ_H
#define _ASM_MICROBLAZE_IRQ_H
-#define NR_IRQS (32 + 1)
#include <asm-generic/irq.h>
struct pt_regs;
diff --git a/arch/microblaze/kernel/dma.c b/arch/microblaze/kernel/dma.c
index a89c2d4ed5ff..d7bebd04247b 100644
--- a/arch/microblaze/kernel/dma.c
+++ b/arch/microblaze/kernel/dma.c
@@ -15,7 +15,7 @@
#include <linux/bug.h>
#include <asm/cacheflush.h>
-static void __dma_sync(struct device *dev, phys_addr_t paddr, size_t size,
+static void __dma_sync(phys_addr_t paddr, size_t size,
enum dma_data_direction direction)
{
switch (direction) {
@@ -31,14 +31,14 @@ static void __dma_sync(struct device *dev, phys_addr_t paddr, size_t size,
}
}
-void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
- size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir)
{
- __dma_sync(dev, paddr, size, dir);
+ __dma_sync(paddr, size, dir);
}
-void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
- size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir)
{
- __dma_sync(dev, paddr, size, dir);
+ __dma_sync(paddr, size, dir);
}
diff --git a/arch/microblaze/kernel/entry.S b/arch/microblaze/kernel/entry.S
index 4e1b567becd6..de7083bd1d24 100644
--- a/arch/microblaze/kernel/entry.S
+++ b/arch/microblaze/kernel/entry.S
@@ -738,14 +738,9 @@ no_intr_resched:
andi r5, r5, _TIF_NEED_RESCHED;
beqi r5, restore /* if zero jump over */
-preempt:
/* interrupts are off that's why I am calling preempt_chedule_irq */
bralid r15, preempt_schedule_irq
nop
- lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
- lwi r5, r11, TI_FLAGS; /* get flags in thread info */
- andi r5, r5, _TIF_NEED_RESCHED;
- bnei r5, preempt /* if non zero jump to resched */
restore:
#endif
VM_OFF /* MS: turn off MMU */
diff --git a/arch/microblaze/kernel/head.S b/arch/microblaze/kernel/head.S
index f264fdcf152a..7d2894418691 100644
--- a/arch/microblaze/kernel/head.S
+++ b/arch/microblaze/kernel/head.S
@@ -99,7 +99,7 @@ big_endian:
_prepare_copy_fdt:
or r11, r0, r0 /* incremment */
ori r4, r0, TOPHYS(_fdt_start)
- ori r3, r0, (0x8000 - 4)
+ ori r3, r0, (0x10000 - 4)
_copy_fdt:
lw r12, r7, r11 /* r12 = r7 + r11 */
sw r12, r4, r11 /* addr[r4 + r11] = r12 */
diff --git a/arch/microblaze/kernel/vmlinux.lds.S b/arch/microblaze/kernel/vmlinux.lds.S
index 760cac41cbfe..2c09fa3a8a01 100644
--- a/arch/microblaze/kernel/vmlinux.lds.S
+++ b/arch/microblaze/kernel/vmlinux.lds.S
@@ -48,7 +48,7 @@ SECTIONS {
__fdt_blob : AT(ADDR(__fdt_blob) - LOAD_OFFSET) {
_fdt_start = . ; /* place for fdt blob */
*(__fdt_blob) ; /* Any link-placed DTB */
- . = _fdt_start + 0x8000; /* Pad up to 32kbyte */
+ . = _fdt_start + 0x10000; /* Pad up to 64kbyte */
_fdt_end = . ;
}
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index c86be02b6d89..add388236f4e 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -74,6 +74,7 @@ config MIPS
select HAVE_PERF_EVENTS
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_RSEQ
+ select HAVE_SPARSE_SYSCALL_NR
select HAVE_STACKPROTECTOR
select HAVE_SYSCALL_TRACEPOINTS
select HAVE_VIRT_CPU_ACCOUNTING_GEN if 64BIT || !SMP
@@ -1193,9 +1194,9 @@ config DMA_NONCOHERENT
select ARCH_HAS_DMA_WRITE_COMBINE
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
select ARCH_HAS_UNCACHED_SEGMENT
- select NEED_DMA_MAP_STATE
- select ARCH_HAS_DMA_COHERENT_TO_PFN
+ select DMA_NONCOHERENT_MMAP
select DMA_NONCOHERENT_CACHE_SYNC
+ select NEED_DMA_MAP_STATE
config SYS_HAS_EARLY_PRINTK
bool
diff --git a/arch/mips/bmips/dma.c b/arch/mips/bmips/dma.c
index 3d13c77c125f..df56bf4179e3 100644
--- a/arch/mips/bmips/dma.c
+++ b/arch/mips/bmips/dma.c
@@ -64,7 +64,7 @@ phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t dma_addr)
return dma_addr;
}
-void arch_sync_dma_for_cpu_all(struct device *dev)
+void arch_sync_dma_for_cpu_all(void)
{
void __iomem *cbr = BMIPS_GET_CBR();
u32 cfg;
diff --git a/arch/mips/configs/ci20_defconfig b/arch/mips/configs/ci20_defconfig
index cb4aa23a2bf4..be41df2a81fb 100644
--- a/arch/mips/configs/ci20_defconfig
+++ b/arch/mips/configs/ci20_defconfig
@@ -17,7 +17,6 @@ CONFIG_CGROUP_CPUACCT=y
CONFIG_NAMESPACES=y
CONFIG_USER_NS=y
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
-CONFIG_SYSCTL_SYSCALL=y
CONFIG_KALLSYMS_ALL=y
CONFIG_EMBEDDED=y
# CONFIG_VM_EVENT_COUNTERS is not set
diff --git a/arch/mips/configs/loongson3_defconfig b/arch/mips/configs/loongson3_defconfig
index c16a2330e84d..360c6b2d397a 100644
--- a/arch/mips/configs/loongson3_defconfig
+++ b/arch/mips/configs/loongson3_defconfig
@@ -20,7 +20,6 @@ CONFIG_SCHED_AUTOGROUP=y
CONFIG_SYSFS_DEPRECATED=y
CONFIG_RELAY=y
CONFIG_BLK_DEV_INITRD=y
-CONFIG_SYSCTL_SYSCALL=y
CONFIG_EMBEDDED=y
CONFIG_MACH_LOONGSON64=y
CONFIG_SMP=y
diff --git a/arch/mips/configs/malta_qemu_32r6_defconfig b/arch/mips/configs/malta_qemu_32r6_defconfig
index e6c600dc1814..614af02d83e6 100644
--- a/arch/mips/configs/malta_qemu_32r6_defconfig
+++ b/arch/mips/configs/malta_qemu_32r6_defconfig
@@ -5,7 +5,6 @@ CONFIG_NO_HZ=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=15
-CONFIG_SYSCTL_SYSCALL=y
CONFIG_EMBEDDED=y
CONFIG_SLAB=y
CONFIG_MIPS_MALTA=y
diff --git a/arch/mips/configs/maltaaprp_defconfig b/arch/mips/configs/maltaaprp_defconfig
index 82b44b774553..9c051f8fd330 100644
--- a/arch/mips/configs/maltaaprp_defconfig
+++ b/arch/mips/configs/maltaaprp_defconfig
@@ -5,7 +5,6 @@ CONFIG_AUDIT=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=15
-CONFIG_SYSCTL_SYSCALL=y
CONFIG_EMBEDDED=y
CONFIG_SLAB=y
CONFIG_MIPS_MALTA=y
diff --git a/arch/mips/configs/maltasmvp_defconfig b/arch/mips/configs/maltasmvp_defconfig
index 4190fc6189a0..2e90d97551d6 100644
--- a/arch/mips/configs/maltasmvp_defconfig
+++ b/arch/mips/configs/maltasmvp_defconfig
@@ -5,7 +5,6 @@ CONFIG_NO_HZ=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=15
-CONFIG_SYSCTL_SYSCALL=y
CONFIG_EMBEDDED=y
CONFIG_SLAB=y
CONFIG_MIPS_MALTA=y
diff --git a/arch/mips/configs/maltasmvp_eva_defconfig b/arch/mips/configs/maltasmvp_eva_defconfig
index a13c10e910ec..d1f7fdb27284 100644
--- a/arch/mips/configs/maltasmvp_eva_defconfig
+++ b/arch/mips/configs/maltasmvp_eva_defconfig
@@ -5,7 +5,6 @@ CONFIG_NO_HZ=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=15
-CONFIG_SYSCTL_SYSCALL=y
CONFIG_EMBEDDED=y
CONFIG_SLAB=y
CONFIG_MIPS_MALTA=y
diff --git a/arch/mips/configs/maltaup_defconfig b/arch/mips/configs/maltaup_defconfig
index b35f1fc690fb..48e5bd492452 100644
--- a/arch/mips/configs/maltaup_defconfig
+++ b/arch/mips/configs/maltaup_defconfig
@@ -6,7 +6,6 @@ CONFIG_NO_HZ=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=15
-CONFIG_SYSCTL_SYSCALL=y
CONFIG_EMBEDDED=y
CONFIG_SLAB=y
CONFIG_MIPS_MALTA=y
diff --git a/arch/mips/configs/omega2p_defconfig b/arch/mips/configs/omega2p_defconfig
index a39426e57e91..fc39ddf610a9 100644
--- a/arch/mips/configs/omega2p_defconfig
+++ b/arch/mips/configs/omega2p_defconfig
@@ -16,7 +16,6 @@ CONFIG_CGROUP_CPUACCT=y
CONFIG_NAMESPACES=y
CONFIG_USER_NS=y
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
-CONFIG_SYSCTL_SYSCALL=y
CONFIG_KALLSYMS_ALL=y
CONFIG_EMBEDDED=y
# CONFIG_VM_EVENT_COUNTERS is not set
diff --git a/arch/mips/configs/qi_lb60_defconfig b/arch/mips/configs/qi_lb60_defconfig
index d3f4d5248d9f..97c9a69d1528 100644
--- a/arch/mips/configs/qi_lb60_defconfig
+++ b/arch/mips/configs/qi_lb60_defconfig
@@ -2,7 +2,6 @@
CONFIG_SYSVIPC=y
# CONFIG_CROSS_MEMORY_ATTACH is not set
CONFIG_LOG_BUF_SHIFT=14
-CONFIG_SYSCTL_SYSCALL=y
CONFIG_KALLSYMS_ALL=y
CONFIG_EMBEDDED=y
# CONFIG_VM_EVENT_COUNTERS is not set
diff --git a/arch/mips/configs/vocore2_defconfig b/arch/mips/configs/vocore2_defconfig
index 523b944fd527..a14f8ea5c386 100644
--- a/arch/mips/configs/vocore2_defconfig
+++ b/arch/mips/configs/vocore2_defconfig
@@ -16,7 +16,6 @@ CONFIG_CGROUP_CPUACCT=y
CONFIG_NAMESPACES=y
CONFIG_USER_NS=y
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
-CONFIG_SYSCTL_SYSCALL=y
CONFIG_KALLSYMS_ALL=y
CONFIG_EMBEDDED=y
# CONFIG_VM_EVENT_COUNTERS is not set
diff --git a/arch/mips/crypto/chacha-glue.c b/arch/mips/crypto/chacha-glue.c
index 779e399c9bef..d1fd23e6ef84 100644
--- a/arch/mips/crypto/chacha-glue.c
+++ b/arch/mips/crypto/chacha-glue.c
@@ -128,12 +128,14 @@ static struct skcipher_alg algs[] = {
static int __init chacha_simd_mod_init(void)
{
- return crypto_register_skciphers(algs, ARRAY_SIZE(algs));
+ return IS_REACHABLE(CONFIG_CRYPTO_SKCIPHER) ?
+ crypto_register_skciphers(algs, ARRAY_SIZE(algs)) : 0;
}
static void __exit chacha_simd_mod_fini(void)
{
- crypto_unregister_skciphers(algs, ARRAY_SIZE(algs));
+ if (IS_REACHABLE(CONFIG_CRYPTO_SKCIPHER))
+ crypto_unregister_skciphers(algs, ARRAY_SIZE(algs));
}
module_init(chacha_simd_mod_init);
diff --git a/arch/mips/crypto/poly1305-glue.c b/arch/mips/crypto/poly1305-glue.c
index b759b6ccc361..b37d29cf5d0a 100644
--- a/arch/mips/crypto/poly1305-glue.c
+++ b/arch/mips/crypto/poly1305-glue.c
@@ -187,12 +187,14 @@ static struct shash_alg mips_poly1305_alg = {
static int __init mips_poly1305_mod_init(void)
{
- return crypto_register_shash(&mips_poly1305_alg);
+ return IS_REACHABLE(CONFIG_CRYPTO_HASH) ?
+ crypto_register_shash(&mips_poly1305_alg) : 0;
}
static void __exit mips_poly1305_mod_exit(void)
{
- crypto_unregister_shash(&mips_poly1305_alg);
+ if (IS_REACHABLE(CONFIG_CRYPTO_HASH))
+ crypto_unregister_shash(&mips_poly1305_alg);
}
module_init(mips_poly1305_mod_init);
diff --git a/arch/mips/include/asm/Kbuild b/arch/mips/include/asm/Kbuild
index c8b595c60910..61b0fc2026e6 100644
--- a/arch/mips/include/asm/Kbuild
+++ b/arch/mips/include/asm/Kbuild
@@ -13,7 +13,6 @@ generic-y += irq_work.h
generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
-generic-y += msi.h
generic-y += parport.h
generic-y += percpu.h
generic-y += preempt.h
diff --git a/arch/mips/include/asm/dma-direct.h b/arch/mips/include/asm/dma-direct.h
index b5c240806e1b..14e352651ce9 100644
--- a/arch/mips/include/asm/dma-direct.h
+++ b/arch/mips/include/asm/dma-direct.h
@@ -2,14 +2,6 @@
#ifndef _MIPS_DMA_DIRECT_H
#define _MIPS_DMA_DIRECT_H 1
-static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
-{
- if (!dev->dma_mask)
- return false;
-
- return addr + size - 1 <= *dev->dma_mask;
-}
-
dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr);
phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t daddr);
diff --git a/arch/mips/include/uapi/asm/msgbuf.h b/arch/mips/include/uapi/asm/msgbuf.h
index 46aa15b13e4e..9e0c2e230274 100644
--- a/arch/mips/include/uapi/asm/msgbuf.h
+++ b/arch/mips/include/uapi/asm/msgbuf.h
@@ -15,9 +15,9 @@
#if defined(__mips64)
struct msqid64_ds {
struct ipc64_perm msg_perm;
- __kernel_time_t msg_stime; /* last msgsnd time */
- __kernel_time_t msg_rtime; /* last msgrcv time */
- __kernel_time_t msg_ctime; /* last change time */
+ long msg_stime; /* last msgsnd time */
+ long msg_rtime; /* last msgrcv time */
+ long msg_ctime; /* last change time */
unsigned long msg_cbytes; /* current number of bytes on queue */
unsigned long msg_qnum; /* number of messages in queue */
unsigned long msg_qbytes; /* max number of bytes on queue */
diff --git a/arch/mips/include/uapi/asm/sembuf.h b/arch/mips/include/uapi/asm/sembuf.h
index 60c89e6cb25b..43e1b4a2f68a 100644
--- a/arch/mips/include/uapi/asm/sembuf.h
+++ b/arch/mips/include/uapi/asm/sembuf.h
@@ -14,8 +14,8 @@
#ifdef __mips64
struct semid64_ds {
struct ipc64_perm sem_perm; /* permissions .. see ipc.h */
- __kernel_time_t sem_otime; /* last semop time */
- __kernel_time_t sem_ctime; /* last change time */
+ long sem_otime; /* last semop time */
+ long sem_ctime; /* last change time */
unsigned long sem_nsems; /* no. of semaphores in array */
unsigned long __unused1;
unsigned long __unused2;
diff --git a/arch/mips/include/uapi/asm/shmbuf.h b/arch/mips/include/uapi/asm/shmbuf.h
index 9b9bba3401f2..680bb95b2240 100644
--- a/arch/mips/include/uapi/asm/shmbuf.h
+++ b/arch/mips/include/uapi/asm/shmbuf.h
@@ -17,9 +17,9 @@
struct shmid64_ds {
struct ipc64_perm shm_perm; /* operation perms */
size_t shm_segsz; /* size of segment (bytes) */
- __kernel_time_t shm_atime; /* last attach time */
- __kernel_time_t shm_dtime; /* last detach time */
- __kernel_time_t shm_ctime; /* last change time */
+ long shm_atime; /* last attach time */
+ long shm_dtime; /* last detach time */
+ long shm_ctime; /* last change time */
__kernel_pid_t shm_cpid; /* pid of creator */
__kernel_pid_t shm_lpid; /* pid of last operator */
unsigned long shm_nattch; /* no. of current attaches */
diff --git a/arch/mips/include/uapi/asm/stat.h b/arch/mips/include/uapi/asm/stat.h
index 95416f366d7f..3d2a3b71845c 100644
--- a/arch/mips/include/uapi/asm/stat.h
+++ b/arch/mips/include/uapi/asm/stat.h
@@ -26,17 +26,17 @@ struct stat {
gid_t st_gid;
unsigned st_rdev;
long st_pad2[2];
- off_t st_size;
+ long st_size;
long st_pad3;
/*
* Actually this should be timestruc_t st_atime, st_mtime and st_ctime
* but we don't have it under Linux.
*/
- time_t st_atime;
+ long st_atime;
long st_atime_nsec;
- time_t st_mtime;
+ long st_mtime;
long st_mtime_nsec;
- time_t st_ctime;
+ long st_ctime;
long st_ctime_nsec;
long st_blksize;
long st_blocks;
@@ -70,13 +70,13 @@ struct stat64 {
* Actually this should be timestruc_t st_atime, st_mtime and st_ctime
* but we don't have it under Linux.
*/
- time_t st_atime;
+ long st_atime;
unsigned long st_atime_nsec; /* Reserved for st_atime expansion */
- time_t st_mtime;
+ long st_mtime;
unsigned long st_mtime_nsec; /* Reserved for st_mtime expansion */
- time_t st_ctime;
+ long st_ctime;
unsigned long st_ctime_nsec; /* Reserved for st_ctime expansion */
unsigned long st_blksize;
@@ -105,7 +105,7 @@ struct stat {
unsigned int st_rdev;
unsigned int st_pad1[3]; /* Reserved for st_rdev expansion */
- off_t st_size;
+ long st_size;
/*
* Actually this should be timestruc_t st_atime, st_mtime and st_ctime
diff --git a/arch/mips/jazz/jazzdma.c b/arch/mips/jazz/jazzdma.c
index a01e14955187..c64a297e82b3 100644
--- a/arch/mips/jazz/jazzdma.c
+++ b/arch/mips/jazz/jazzdma.c
@@ -592,7 +592,7 @@ static dma_addr_t jazz_dma_map_page(struct device *dev, struct page *page,
phys_addr_t phys = page_to_phys(page) + offset;
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
- arch_sync_dma_for_device(dev, phys, size, dir);
+ arch_sync_dma_for_device(phys, size, dir);
return vdma_alloc(phys, size);
}
@@ -600,7 +600,7 @@ static void jazz_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
- arch_sync_dma_for_cpu(dev, vdma_log2phys(dma_addr), size, dir);
+ arch_sync_dma_for_cpu(vdma_log2phys(dma_addr), size, dir);
vdma_free(dma_addr);
}
@@ -612,7 +612,7 @@ static int jazz_dma_map_sg(struct device *dev, struct scatterlist *sglist,
for_each_sg(sglist, sg, nents, i) {
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
- arch_sync_dma_for_device(dev, sg_phys(sg), sg->length,
+ arch_sync_dma_for_device(sg_phys(sg), sg->length,
dir);
sg->dma_address = vdma_alloc(sg_phys(sg), sg->length);
if (sg->dma_address == DMA_MAPPING_ERROR)
@@ -631,8 +631,7 @@ static void jazz_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
for_each_sg(sglist, sg, nents, i) {
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
- arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length,
- dir);
+ arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir);
vdma_free(sg->dma_address);
}
}
@@ -640,13 +639,13 @@ static void jazz_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
static void jazz_dma_sync_single_for_device(struct device *dev,
dma_addr_t addr, size_t size, enum dma_data_direction dir)
{
- arch_sync_dma_for_device(dev, vdma_log2phys(addr), size, dir);
+ arch_sync_dma_for_device(vdma_log2phys(addr), size, dir);
}
static void jazz_dma_sync_single_for_cpu(struct device *dev,
dma_addr_t addr, size_t size, enum dma_data_direction dir)
{
- arch_sync_dma_for_cpu(dev, vdma_log2phys(addr), size, dir);
+ arch_sync_dma_for_cpu(vdma_log2phys(addr), size, dir);
}
static void jazz_dma_sync_sg_for_device(struct device *dev,
@@ -656,7 +655,7 @@ static void jazz_dma_sync_sg_for_device(struct device *dev,
int i;
for_each_sg(sgl, sg, nents, i)
- arch_sync_dma_for_device(dev, sg_phys(sg), sg->length, dir);
+ arch_sync_dma_for_device(sg_phys(sg), sg->length, dir);
}
static void jazz_dma_sync_sg_for_cpu(struct device *dev,
@@ -666,7 +665,7 @@ static void jazz_dma_sync_sg_for_cpu(struct device *dev,
int i;
for_each_sg(sgl, sg, nents, i)
- arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length, dir);
+ arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir);
}
const struct dma_map_ops jazz_dma_ops = {
diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
index 7a12763d553a..6ee3f7218c67 100644
--- a/arch/mips/kernel/binfmt_elfn32.c
+++ b/arch/mips/kernel/binfmt_elfn32.c
@@ -100,7 +100,7 @@ jiffies_to_old_timeval32(unsigned long jiffies, struct old_timeval32 *value)
#undef TASK_SIZE
#define TASK_SIZE TASK_SIZE32
-#undef ns_to_timeval
-#define ns_to_timeval ns_to_old_timeval32
+#undef ns_to_kernel_old_timeval
+#define ns_to_kernel_old_timeval ns_to_old_timeval32
#include "../../../fs/binfmt_elf.c"
diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
index e6db06a1d31a..6dd103d3cebb 100644
--- a/arch/mips/kernel/binfmt_elfo32.c
+++ b/arch/mips/kernel/binfmt_elfo32.c
@@ -103,7 +103,7 @@ jiffies_to_old_timeval32(unsigned long jiffies, struct old_timeval32 *value)
#undef TASK_SIZE
#define TASK_SIZE TASK_SIZE32
-#undef ns_to_timeval
-#define ns_to_timeval ns_to_old_timeval32
+#undef ns_to_kernel_old_timeval
+#define ns_to_kernel_old_timeval ns_to_old_timeval32
#include "../../../fs/binfmt_elf.c"
diff --git a/arch/mips/mm/dma-noncoherent.c b/arch/mips/mm/dma-noncoherent.c
index 1d4d57dd9acf..dc42ffc83825 100644
--- a/arch/mips/mm/dma-noncoherent.c
+++ b/arch/mips/mm/dma-noncoherent.c
@@ -27,7 +27,7 @@
* R10000 and R12000 are used in such systems, the SGI IP28 Indigo² rsp.
* SGI IP32 aka O2.
*/
-static inline bool cpu_needs_post_dma_flush(struct device *dev)
+static inline bool cpu_needs_post_dma_flush(void)
{
switch (boot_cpu_type()) {
case CPU_R10000:
@@ -59,12 +59,6 @@ void *cached_kernel_address(void *addr)
return __va(addr) - UNCAC_BASE;
}
-long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr,
- dma_addr_t dma_addr)
-{
- return page_to_pfn(virt_to_page(cached_kernel_address(cpu_addr)));
-}
-
static inline void dma_sync_virt(void *addr, size_t size,
enum dma_data_direction dir)
{
@@ -118,17 +112,17 @@ static inline void dma_sync_phys(phys_addr_t paddr, size_t size,
} while (left);
}
-void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
- size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir)
{
dma_sync_phys(paddr, size, dir);
}
#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU
-void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
- size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir)
{
- if (cpu_needs_post_dma_flush(dev))
+ if (cpu_needs_post_dma_flush())
dma_sync_phys(paddr, size, dir);
}
#endif
diff --git a/arch/mips/pci/fixup-sb1250.c b/arch/mips/pci/fixup-sb1250.c
index 8a41b359cf90..40efc990cdce 100644
--- a/arch/mips/pci/fixup-sb1250.c
+++ b/arch/mips/pci/fixup-sb1250.c
@@ -21,22 +21,22 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SIBYTE, PCI_DEVICE_ID_BCM1250_PCI,
/*
* The BCM1250, etc. PCI host bridge does not support DAC on its 32-bit
- * bus, so we set the bus's DMA mask accordingly. However the HT link
+ * bus, so we set the bus's DMA limit accordingly. However the HT link
* down the artificial PCI-HT bridge supports 40-bit addressing and the
* SP1011 HT-PCI bridge downstream supports both DAC and a 64-bit bus
* width, so we record the PCI-HT bridge's secondary and subordinate bus
- * numbers and do not set the mask for devices present in the inclusive
+ * numbers and do not set the limit for devices present in the inclusive
* range of those.
*/
-struct sb1250_bus_dma_mask_exclude {
+struct sb1250_bus_dma_limit_exclude {
bool set;
unsigned char start;
unsigned char end;
};
-static int sb1250_bus_dma_mask(struct pci_dev *dev, void *data)
+static int sb1250_bus_dma_limit(struct pci_dev *dev, void *data)
{
- struct sb1250_bus_dma_mask_exclude *exclude = data;
+ struct sb1250_bus_dma_limit_exclude *exclude = data;
bool exclude_this;
bool ht_bridge;
@@ -55,7 +55,7 @@ static int sb1250_bus_dma_mask(struct pci_dev *dev, void *data)
exclude->start, exclude->end);
} else {
dev_dbg(&dev->dev, "disabling DAC for device");
- dev->dev.bus_dma_mask = DMA_BIT_MASK(32);
+ dev->dev.bus_dma_limit = DMA_BIT_MASK(32);
}
return 0;
@@ -63,9 +63,9 @@ static int sb1250_bus_dma_mask(struct pci_dev *dev, void *data)
static void quirk_sb1250_pci_dac(struct pci_dev *dev)
{
- struct sb1250_bus_dma_mask_exclude exclude = { .set = false };
+ struct sb1250_bus_dma_limit_exclude exclude = { .set = false };
- pci_walk_bus(dev->bus, sb1250_bus_dma_mask, &exclude);
+ pci_walk_bus(dev->bus, sb1250_bus_dma_limit, &exclude);
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SIBYTE, PCI_DEVICE_ID_BCM1250_PCI,
quirk_sb1250_pci_dac);
diff --git a/arch/mips/ralink/Kconfig b/arch/mips/ralink/Kconfig
index 1434fa60f3db..94e9ce994494 100644
--- a/arch/mips/ralink/Kconfig
+++ b/arch/mips/ralink/Kconfig
@@ -51,6 +51,7 @@ choice
select MIPS_GIC
select COMMON_CLK
select CLKSRC_MIPS_GIC
+ select HAVE_PCI if PCI_MT7621
endchoice
choice
diff --git a/arch/mips/sgi-ip32/ip32-platform.c b/arch/mips/sgi-ip32/ip32-platform.c
index 5a2a82148d8d..c3909bd8dd1a 100644
--- a/arch/mips/sgi-ip32/ip32-platform.c
+++ b/arch/mips/sgi-ip32/ip32-platform.c
@@ -115,7 +115,7 @@ ip32_rtc_platform_data[] = {
.bcd_mode = true,
.no_irq = false,
.uie_unsupported = false,
- .alloc_io_resources = true,
+ .access_type = ds1685_reg_direct,
.plat_prepare_poweroff = ip32_prepare_poweroff,
},
};
diff --git a/arch/nds32/Kconfig b/arch/nds32/Kconfig
index fbd68329737f..12c06a833b7c 100644
--- a/arch/nds32/Kconfig
+++ b/arch/nds32/Kconfig
@@ -20,6 +20,7 @@ config NDS32
select GENERIC_CLOCKEVENTS
select GENERIC_IRQ_CHIP
select GENERIC_IRQ_SHOW
+ select GENERIC_IOREMAP
select GENERIC_LIB_ASHLDI3
select GENERIC_LIB_ASHRDI3
select GENERIC_LIB_CMPDI2
diff --git a/arch/nds32/Kconfig.cpu b/arch/nds32/Kconfig.cpu
index f80a4ab63da2..f88a12fdf0f3 100644
--- a/arch/nds32/Kconfig.cpu
+++ b/arch/nds32/Kconfig.cpu
@@ -13,7 +13,7 @@ config FPU
default n
help
If FPU ISA is used in user space, this configuration shall be Y to
- enable required support in kerenl such as fpu context switch and
+ enable required support in kernel such as fpu context switch and
fpu exception handler.
If no FPU ISA is used in user space, say N.
@@ -27,7 +27,7 @@ config LAZY_FPU
enhance system performance by reducing the context switch
frequency of the FPU register.
- For nomal case, say Y.
+ For normal case, say Y.
config SUPPORT_DENORMAL_ARITHMETIC
bool "Denormal arithmetic support"
@@ -36,7 +36,7 @@ config SUPPORT_DENORMAL_ARITHMETIC
help
Say Y here to enable arithmetic of denormalized number. Enabling
this feature can enhance the precision for tininess number.
- However, performance loss in float pointe calculations is
+ However, performance loss in float point calculations is
possibly significant due to additional FPU exception.
If the calculated tolerance for tininess number is not critical,
@@ -73,7 +73,7 @@ choice
the cache aliasing issue. The rest cpus(N13, N10 and D10) are
implemented as VIPT data cache. It may cause the cache aliasing issue
if its cache way size is larger than page size. You can specify the
- CPU type direcly or choose CPU_V3 if unsure.
+ CPU type directly or choose CPU_V3 if unsure.
A kernel built for N10 is able to run on N15, D15, N13, N10 or D10.
A kernel built for N15 is able to run on N15 or D15.
diff --git a/arch/nds32/boot/dts/Makefile b/arch/nds32/boot/dts/Makefile
index fff8ade7a84f..f84bd529b6fd 100644
--- a/arch/nds32/boot/dts/Makefile
+++ b/arch/nds32/boot/dts/Makefile
@@ -5,5 +5,3 @@ else
BUILTIN_DTB :=
endif
obj-$(CONFIG_OF) += $(BUILTIN_DTB)
-
-clean-files := *.dtb *.dtb.S
diff --git a/arch/nds32/include/asm/io.h b/arch/nds32/include/asm/io.h
index 16f262322b8f..e57378d04006 100644
--- a/arch/nds32/include/asm/io.h
+++ b/arch/nds32/include/asm/io.h
@@ -6,7 +6,6 @@
#include <linux/types.h>
-extern void iounmap(volatile void __iomem *addr);
#define __raw_writeb __raw_writeb
static inline void __raw_writeb(u8 val, volatile void __iomem *addr)
{
@@ -79,5 +78,7 @@ static inline u32 __raw_readl(const volatile void __iomem *addr)
#define writeb(v,c) ({ __iowmb(); writeb_relaxed((v),(c)); })
#define writew(v,c) ({ __iowmb(); writew_relaxed((v),(c)); })
#define writel(v,c) ({ __iowmb(); writel_relaxed((v),(c)); })
+
#include <asm-generic/io.h>
+
#endif /* __ASM_NDS32_IO_H */
diff --git a/arch/nds32/include/asm/pgtable.h b/arch/nds32/include/asm/pgtable.h
index 0588ec99725c..6fbf251cfc26 100644
--- a/arch/nds32/include/asm/pgtable.h
+++ b/arch/nds32/include/asm/pgtable.h
@@ -12,7 +12,6 @@
#include <asm/nds32.h>
#ifndef __ASSEMBLY__
#include <asm/fixmap.h>
-#include <asm/io.h>
#include <nds32_intrinsic.h>
#endif
@@ -130,6 +129,9 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
#define _PAGE_CACHE _PAGE_C_MEM_WB
#endif
+#define _PAGE_IOREMAP \
+ (_PAGE_V | _PAGE_M_KRW | _PAGE_D | _PAGE_G | _PAGE_C_DEV)
+
/*
* + Level 1 descriptor (PMD)
*/
diff --git a/arch/nds32/kernel/dma.c b/arch/nds32/kernel/dma.c
index 4206d4b6c8ce..69d762182d49 100644
--- a/arch/nds32/kernel/dma.c
+++ b/arch/nds32/kernel/dma.c
@@ -46,8 +46,8 @@ static inline void cache_op(phys_addr_t paddr, size_t size,
} while (left);
}
-void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
- size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir)
{
switch (dir) {
case DMA_FROM_DEVICE:
@@ -61,8 +61,8 @@ void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
}
}
-void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
- size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir)
{
switch (dir) {
case DMA_TO_DEVICE:
diff --git a/arch/nds32/kernel/perf_event_cpu.c b/arch/nds32/kernel/perf_event_cpu.c
index 334c2a6cec23..0ce6f9f307e6 100644
--- a/arch/nds32/kernel/perf_event_cpu.c
+++ b/arch/nds32/kernel/perf_event_cpu.c
@@ -1119,7 +1119,7 @@ static void cpu_pmu_init(struct nds32_pmu *cpu_pmu)
on_each_cpu(cpu_pmu->reset, cpu_pmu, 1);
}
-const static struct of_device_id cpu_pmu_of_device_ids[] = {
+static const struct of_device_id cpu_pmu_of_device_ids[] = {
{.compatible = "andestech,nds32v3-pmu",
.data = device_pmu_init},
{},
diff --git a/arch/nds32/kernel/vdso/gettimeofday.c b/arch/nds32/kernel/vdso/gettimeofday.c
index b02581891c33..9ec03cf0ec54 100644
--- a/arch/nds32/kernel/vdso/gettimeofday.c
+++ b/arch/nds32/kernel/vdso/gettimeofday.c
@@ -48,9 +48,9 @@ static notrace int vdso_read_retry(const struct vdso_data *vdata, u32 start)
}
static notrace long clock_gettime_fallback(clockid_t _clkid,
- struct timespec *_ts)
+ struct __kernel_old_timespec *_ts)
{
- register struct timespec *ts asm("$r1") = _ts;
+ register struct __kernel_old_timespec *ts asm("$r1") = _ts;
register clockid_t clkid asm("$r0") = _clkid;
register long ret asm("$r0");
@@ -63,7 +63,7 @@ static notrace long clock_gettime_fallback(clockid_t _clkid,
return ret;
}
-static notrace int do_realtime_coarse(struct timespec *ts,
+static notrace int do_realtime_coarse(struct __kernel_old_timespec *ts,
struct vdso_data *vdata)
{
u32 seq;
@@ -78,25 +78,23 @@ static notrace int do_realtime_coarse(struct timespec *ts,
return 0;
}
-static notrace int do_monotonic_coarse(struct timespec *ts,
+static notrace int do_monotonic_coarse(struct __kernel_old_timespec *ts,
struct vdso_data *vdata)
{
- struct timespec tomono;
u32 seq;
+ u64 ns;
do {
seq = vdso_read_begin(vdata);
- ts->tv_sec = vdata->xtime_coarse_sec;
- ts->tv_nsec = vdata->xtime_coarse_nsec;
-
- tomono.tv_sec = vdata->wtm_clock_sec;
- tomono.tv_nsec = vdata->wtm_clock_nsec;
+ ts->tv_sec = vdata->xtime_coarse_sec + vdata->wtm_clock_sec;
+ ns = vdata->xtime_coarse_nsec + vdata->wtm_clock_nsec;
} while (vdso_read_retry(vdata, seq));
- ts->tv_sec += tomono.tv_sec;
- timespec_add_ns(ts, tomono.tv_nsec);
+ ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
+ ts->tv_nsec = ns;
+
return 0;
}
@@ -115,7 +113,7 @@ static notrace inline u64 vgetsns(struct vdso_data *vdso)
return ((u64) cycle_delta & vdso->cs_mask) * vdso->cs_mult;
}
-static notrace int do_realtime(struct timespec *ts, struct vdso_data *vdata)
+static notrace int do_realtime(struct __kernel_old_timespec *ts, struct vdso_data *vdata)
{
unsigned count;
u64 ns;
@@ -133,32 +131,31 @@ static notrace int do_realtime(struct timespec *ts, struct vdso_data *vdata)
return 0;
}
-static notrace int do_monotonic(struct timespec *ts, struct vdso_data *vdata)
+static notrace int do_monotonic(struct __kernel_old_timespec *ts, struct vdso_data *vdata)
{
- struct timespec tomono;
- u64 nsecs;
+ u64 ns;
u32 seq;
do {
seq = vdso_read_begin(vdata);
ts->tv_sec = vdata->xtime_clock_sec;
- nsecs = vdata->xtime_clock_nsec;
- nsecs += vgetsns(vdata);
- nsecs >>= vdata->cs_shift;
+ ns = vdata->xtime_clock_nsec;
+ ns += vgetsns(vdata);
+ ns >>= vdata->cs_shift;
- tomono.tv_sec = vdata->wtm_clock_sec;
- tomono.tv_nsec = vdata->wtm_clock_nsec;
+ ts->tv_sec += vdata->wtm_clock_sec;
+ ns += vdata->wtm_clock_nsec;
} while (vdso_read_retry(vdata, seq));
- ts->tv_sec += tomono.tv_sec;
- ts->tv_nsec = 0;
- timespec_add_ns(ts, nsecs + tomono.tv_nsec);
+ ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
+ ts->tv_nsec = ns;
+
return 0;
}
-notrace int __vdso_clock_gettime(clockid_t clkid, struct timespec *ts)
+notrace int __vdso_clock_gettime(clockid_t clkid, struct __kernel_old_timespec *ts)
{
struct vdso_data *vdata;
int ret = -1;
@@ -191,10 +188,10 @@ notrace int __vdso_clock_gettime(clockid_t clkid, struct timespec *ts)
}
static notrace int clock_getres_fallback(clockid_t _clk_id,
- struct timespec *_res)
+ struct __kernel_old_timespec *_res)
{
register clockid_t clk_id asm("$r0") = _clk_id;
- register struct timespec *res asm("$r1") = _res;
+ register struct __kernel_old_timespec *res asm("$r1") = _res;
register int ret asm("$r0");
asm volatile ("movi $r15, %3\n"
@@ -206,7 +203,7 @@ static notrace int clock_getres_fallback(clockid_t _clk_id,
return ret;
}
-notrace int __vdso_clock_getres(clockid_t clk_id, struct timespec *res)
+notrace int __vdso_clock_getres(clockid_t clk_id, struct __kernel_old_timespec *res)
{
struct vdso_data *vdata = __get_datapage();
@@ -230,10 +227,10 @@ notrace int __vdso_clock_getres(clockid_t clk_id, struct timespec *res)
return 0;
}
-static notrace inline int gettimeofday_fallback(struct timeval *_tv,
+static notrace inline int gettimeofday_fallback(struct __kernel_old_timeval *_tv,
struct timezone *_tz)
{
- register struct timeval *tv asm("$r0") = _tv;
+ register struct __kernel_old_timeval *tv asm("$r0") = _tv;
register struct timezone *tz asm("$r1") = _tz;
register int ret asm("$r0");
@@ -246,9 +243,9 @@ static notrace inline int gettimeofday_fallback(struct timeval *_tv,
return ret;
}
-notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
+notrace int __vdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz)
{
- struct timespec ts;
+ struct __kernel_old_timespec ts;
struct vdso_data *vdata;
int ret;
diff --git a/arch/nds32/mm/Makefile b/arch/nds32/mm/Makefile
index bd360e4583b5..897ecaf5cf54 100644
--- a/arch/nds32/mm/Makefile
+++ b/arch/nds32/mm/Makefile
@@ -1,6 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-y := extable.o tlb.o \
- fault.o init.o ioremap.o mmap.o \
+obj-y := extable.o tlb.o fault.o init.o mmap.o \
mm-nds32.o cacheflush.o proc.o
obj-$(CONFIG_ALIGNMENT_TRAP) += alignment.o
diff --git a/arch/nds32/mm/ioremap.c b/arch/nds32/mm/ioremap.c
deleted file mode 100644
index 690140bb23a2..000000000000
--- a/arch/nds32/mm/ioremap.c
+++ /dev/null
@@ -1,62 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-// Copyright (C) 2005-2017 Andes Technology Corporation
-
-#include <linux/vmalloc.h>
-#include <linux/io.h>
-#include <linux/mm.h>
-#include <asm/pgtable.h>
-
-void __iomem *ioremap(phys_addr_t phys_addr, size_t size);
-
-static void __iomem *__ioremap_caller(phys_addr_t phys_addr, size_t size,
- void *caller)
-{
- struct vm_struct *area;
- unsigned long addr, offset, last_addr;
- pgprot_t prot;
-
- /* Don't allow wraparound or zero size */
- last_addr = phys_addr + size - 1;
- if (!size || last_addr < phys_addr)
- return NULL;
-
- /*
- * Mappings have to be page-aligned
- */
- offset = phys_addr & ~PAGE_MASK;
- phys_addr &= PAGE_MASK;
- size = PAGE_ALIGN(last_addr + 1) - phys_addr;
-
- /*
- * Ok, go for it..
- */
- area = get_vm_area_caller(size, VM_IOREMAP, caller);
- if (!area)
- return NULL;
-
- area->phys_addr = phys_addr;
- addr = (unsigned long)area->addr;
- prot = __pgprot(_PAGE_V | _PAGE_M_KRW | _PAGE_D |
- _PAGE_G | _PAGE_C_DEV);
- if (ioremap_page_range(addr, addr + size, phys_addr, prot)) {
- vunmap((void *)addr);
- return NULL;
- }
- return (__force void __iomem *)(offset + (char *)addr);
-
-}
-
-void __iomem *ioremap(phys_addr_t phys_addr, size_t size)
-{
- return __ioremap_caller(phys_addr, size,
- __builtin_return_address(0));
-}
-
-EXPORT_SYMBOL(ioremap);
-
-void iounmap(volatile void __iomem * addr)
-{
- vunmap((void *)(PAGE_MASK & (unsigned long)addr));
-}
-
-EXPORT_SYMBOL(iounmap);
diff --git a/arch/nios2/configs/10m50_defconfig b/arch/nios2/configs/10m50_defconfig
index 1137ef2ed3b0..a7967b4cfb6e 100644
--- a/arch/nios2/configs/10m50_defconfig
+++ b/arch/nios2/configs/10m50_defconfig
@@ -2,7 +2,6 @@ CONFIG_SYSVIPC=y
CONFIG_NO_HZ_IDLE=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_LOG_BUF_SHIFT=14
-CONFIG_SYSCTL_SYSCALL=y
# CONFIG_ELF_CORE is not set
# CONFIG_EPOLL is not set
# CONFIG_SIGNALFD is not set
diff --git a/arch/nios2/configs/3c120_defconfig b/arch/nios2/configs/3c120_defconfig
index a0f160ba7598..423a0c40a162 100644
--- a/arch/nios2/configs/3c120_defconfig
+++ b/arch/nios2/configs/3c120_defconfig
@@ -2,7 +2,6 @@ CONFIG_SYSVIPC=y
CONFIG_NO_HZ_IDLE=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_LOG_BUF_SHIFT=14
-CONFIG_SYSCTL_SYSCALL=y
# CONFIG_ELF_CORE is not set
# CONFIG_EPOLL is not set
# CONFIG_SIGNALFD is not set
diff --git a/arch/nios2/include/asm/io.h b/arch/nios2/include/asm/io.h
index 9010243077ab..746853ac7d8d 100644
--- a/arch/nios2/include/asm/io.h
+++ b/arch/nios2/include/asm/io.h
@@ -25,29 +25,8 @@
#define writew_relaxed(x, addr) writew(x, addr)
#define writel_relaxed(x, addr) writel(x, addr)
-extern void __iomem *__ioremap(unsigned long physaddr, unsigned long size,
- unsigned long cacheflag);
-extern void __iounmap(void __iomem *addr);
-
-static inline void __iomem *ioremap(unsigned long physaddr, unsigned long size)
-{
- return __ioremap(physaddr, size, 0);
-}
-
-static inline void __iomem *ioremap_nocache(unsigned long physaddr,
- unsigned long size)
-{
- return __ioremap(physaddr, size, 0);
-}
-
-static inline void iounmap(void __iomem *addr)
-{
- __iounmap(addr);
-}
-
-#define ioremap_nocache ioremap_nocache
-#define ioremap_wc ioremap_nocache
-#define ioremap_wt ioremap_nocache
+void __iomem *ioremap(unsigned long physaddr, unsigned long size);
+void iounmap(void __iomem *addr);
/* Pages to physical address... */
#define page_to_phys(page) virt_to_phys(page_to_virt(page))
diff --git a/arch/nios2/mm/dma-mapping.c b/arch/nios2/mm/dma-mapping.c
index 9cb238664584..0ed711e37902 100644
--- a/arch/nios2/mm/dma-mapping.c
+++ b/arch/nios2/mm/dma-mapping.c
@@ -18,8 +18,8 @@
#include <linux/cache.h>
#include <asm/cacheflush.h>
-void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
- size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir)
{
void *vaddr = phys_to_virt(paddr);
@@ -42,8 +42,8 @@ void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
}
}
-void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
- size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir)
{
void *vaddr = phys_to_virt(paddr);
diff --git a/arch/nios2/mm/ioremap.c b/arch/nios2/mm/ioremap.c
index 3a28177a01eb..b56af759dcdf 100644
--- a/arch/nios2/mm/ioremap.c
+++ b/arch/nios2/mm/ioremap.c
@@ -112,8 +112,7 @@ static int remap_area_pages(unsigned long address, unsigned long phys_addr,
/*
* Map some physical address range into the kernel address space.
*/
-void __iomem *__ioremap(unsigned long phys_addr, unsigned long size,
- unsigned long cacheflag)
+void __iomem *ioremap(unsigned long phys_addr, unsigned long size)
{
struct vm_struct *area;
unsigned long offset;
@@ -139,15 +138,6 @@ void __iomem *__ioremap(unsigned long phys_addr, unsigned long size,
return NULL;
}
- /*
- * Map uncached objects in the low part of address space to
- * CONFIG_NIOS2_IO_REGION_BASE
- */
- if (IS_MAPPABLE_UNCACHEABLE(phys_addr) &&
- IS_MAPPABLE_UNCACHEABLE(last_addr) &&
- !(cacheflag & _PAGE_CACHED))
- return (void __iomem *)(CONFIG_NIOS2_IO_REGION_BASE + phys_addr);
-
/* Mappings have to be page-aligned */
offset = phys_addr & ~PAGE_MASK;
phys_addr &= PAGE_MASK;
@@ -158,21 +148,20 @@ void __iomem *__ioremap(unsigned long phys_addr, unsigned long size,
if (!area)
return NULL;
addr = area->addr;
- if (remap_area_pages((unsigned long) addr, phys_addr, size,
- cacheflag)) {
+ if (remap_area_pages((unsigned long) addr, phys_addr, size, 0)) {
vunmap(addr);
return NULL;
}
return (void __iomem *) (offset + (char *)addr);
}
-EXPORT_SYMBOL(__ioremap);
+EXPORT_SYMBOL(ioremap);
/*
- * __iounmap unmaps nearly everything, so be careful
+ * iounmap unmaps nearly everything, so be careful
* it doesn't free currently pointer/page tables anymore but it
* wasn't used anyway and might be added later.
*/
-void __iounmap(void __iomem *addr)
+void iounmap(void __iomem *addr)
{
struct vm_struct *p;
@@ -184,4 +173,4 @@ void __iounmap(void __iomem *addr)
pr_err("iounmap: bad address %p\n", addr);
kfree(p);
}
-EXPORT_SYMBOL(__iounmap);
+EXPORT_SYMBOL(iounmap);
diff --git a/arch/openrisc/Kconfig b/arch/openrisc/Kconfig
index bf326f0edd2f..1928e061ff96 100644
--- a/arch/openrisc/Kconfig
+++ b/arch/openrisc/Kconfig
@@ -13,7 +13,7 @@ config OPENRISC
select IRQ_DOMAIN
select HANDLE_DOMAIN_IRQ
select GPIOLIB
- select HAVE_ARCH_TRACEHOOK
+ select HAVE_ARCH_TRACEHOOK
select SPARSE_IRQ
select GENERIC_IRQ_CHIP
select GENERIC_IRQ_PROBE
@@ -51,12 +51,12 @@ config NO_IOPORT_MAP
def_bool y
config TRACE_IRQFLAGS_SUPPORT
- def_bool y
+ def_bool y
# For now, use generic checksum functions
#These can be reimplemented in assembly later if so inclined
config GENERIC_CSUM
- def_bool y
+ def_bool y
config STACKTRACE_SUPPORT
def_bool y
@@ -89,8 +89,8 @@ config DCACHE_WRITETHROUGH
If unsure say N here
config OPENRISC_BUILTIN_DTB
- string "Builtin DTB"
- default ""
+ string "Builtin DTB"
+ default ""
menu "Class II Instructions"
@@ -161,13 +161,13 @@ config OPENRISC_HAVE_SHADOW_GPRS
On a unicore system it's safe to say N here if you are unsure.
config CMDLINE
- string "Default kernel command string"
- default ""
- help
- On some architectures there is currently no way for the boot loader
- to pass arguments to the kernel. For these architectures, you should
- supply some command-line options at build time by entering them
- here.
+ string "Default kernel command string"
+ default ""
+ help
+ On some architectures there is currently no way for the boot loader
+ to pass arguments to the kernel. For these architectures, you should
+ supply some command-line options at build time by entering them
+ here.
menu "Debugging options"
@@ -185,7 +185,7 @@ config OPENRISC_ESR_EXCEPTION_BUG_CHECK
default n
help
This option enables some checks that might expose some problems
- in kernel.
+ in kernel.
Say N if you are unsure.
diff --git a/arch/openrisc/include/asm/io.h b/arch/openrisc/include/asm/io.h
index 5b81a96ab85e..e18f038b2a6d 100644
--- a/arch/openrisc/include/asm/io.h
+++ b/arch/openrisc/include/asm/io.h
@@ -25,7 +25,6 @@
#define PIO_OFFSET 0
#define PIO_MASK 0
-#define ioremap_nocache ioremap
#include <asm-generic/io.h>
#include <asm/pgtable.h>
diff --git a/arch/openrisc/kernel/dma.c b/arch/openrisc/kernel/dma.c
index 4d5b8bd1d795..adec711ad39d 100644
--- a/arch/openrisc/kernel/dma.c
+++ b/arch/openrisc/kernel/dma.c
@@ -125,7 +125,7 @@ arch_dma_free(struct device *dev, size_t size, void *vaddr,
free_pages_exact(vaddr, size);
}
-void arch_sync_dma_for_device(struct device *dev, phys_addr_t addr, size_t size,
+void arch_sync_dma_for_device(phys_addr_t addr, size_t size,
enum dma_data_direction dir)
{
unsigned long cl;
diff --git a/arch/parisc/configs/c8000_defconfig b/arch/parisc/configs/c8000_defconfig
index 507f0644fcf8..db864b18962a 100644
--- a/arch/parisc/configs/c8000_defconfig
+++ b/arch/parisc/configs/c8000_defconfig
@@ -9,7 +9,6 @@ CONFIG_IKCONFIG_PROC=y
CONFIG_RELAY=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_EXPERT=y
-CONFIG_SYSCTL_SYSCALL=y
CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
diff --git a/arch/parisc/configs/generic-32bit_defconfig b/arch/parisc/configs/generic-32bit_defconfig
index 18b072a47a10..c7a5726728a4 100644
--- a/arch/parisc/configs/generic-32bit_defconfig
+++ b/arch/parisc/configs/generic-32bit_defconfig
@@ -8,7 +8,6 @@ CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=16
CONFIG_BLK_DEV_INITRD=y
CONFIG_EXPERT=y
-CONFIG_SYSCTL_SYSCALL=y
CONFIG_PERF_EVENTS=y
CONFIG_SLAB=y
CONFIG_MODULES=y
diff --git a/arch/parisc/include/asm/checksum.h b/arch/parisc/include/asm/checksum.h
index 3cbf1f1c1188..c1c22819a04d 100644
--- a/arch/parisc/include/asm/checksum.h
+++ b/arch/parisc/include/asm/checksum.h
@@ -42,31 +42,32 @@ extern __wsum csum_partial_copy_from_user(const void __user *src,
static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
{
unsigned int sum;
+ unsigned long t0, t1, t2;
__asm__ __volatile__ (
" ldws,ma 4(%1), %0\n"
" addib,<= -4, %2, 2f\n"
"\n"
-" ldws 4(%1), %%r20\n"
-" ldws 8(%1), %%r21\n"
-" add %0, %%r20, %0\n"
-" ldws,ma 12(%1), %%r19\n"
-" addc %0, %%r21, %0\n"
-" addc %0, %%r19, %0\n"
-"1: ldws,ma 4(%1), %%r19\n"
+" ldws 4(%1), %4\n"
+" ldws 8(%1), %5\n"
+" add %0, %4, %0\n"
+" ldws,ma 12(%1), %3\n"
+" addc %0, %5, %0\n"
+" addc %0, %3, %0\n"
+"1: ldws,ma 4(%1), %3\n"
" addib,< 0, %2, 1b\n"
-" addc %0, %%r19, %0\n"
+" addc %0, %3, %0\n"
"\n"
-" extru %0, 31, 16, %%r20\n"
-" extru %0, 15, 16, %%r21\n"
-" addc %%r20, %%r21, %0\n"
-" extru %0, 15, 16, %%r21\n"
-" add %0, %%r21, %0\n"
+" extru %0, 31, 16, %4\n"
+" extru %0, 15, 16, %5\n"
+" addc %4, %5, %0\n"
+" extru %0, 15, 16, %5\n"
+" add %0, %5, %0\n"
" subi -1, %0, %0\n"
"2:\n"
- : "=r" (sum), "=r" (iph), "=r" (ihl)
+ : "=r" (sum), "=r" (iph), "=r" (ihl), "=r" (t0), "=r" (t1), "=r" (t2)
: "1" (iph), "2" (ihl)
- : "r19", "r20", "r21", "memory");
+ : "memory");
return (__force __sum16)sum;
}
@@ -126,6 +127,10 @@ static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
__u32 len, __u8 proto,
__wsum sum)
{
+ unsigned long t0, t1, t2, t3;
+
+ len += proto; /* add 16-bit proto + len */
+
__asm__ __volatile__ (
#if BITS_PER_LONG > 32
@@ -136,20 +141,19 @@ static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
** Try to keep 4 registers with "live" values ahead of the ALU.
*/
-" ldd,ma 8(%1), %%r19\n" /* get 1st saddr word */
-" ldd,ma 8(%2), %%r20\n" /* get 1st daddr word */
-" add %8, %3, %3\n"/* add 16-bit proto + len */
-" add %%r19, %0, %0\n"
-" ldd,ma 8(%1), %%r21\n" /* 2cd saddr */
-" ldd,ma 8(%2), %%r22\n" /* 2cd daddr */
-" add,dc %%r20, %0, %0\n"
-" add,dc %%r21, %0, %0\n"
-" add,dc %%r22, %0, %0\n"
+" ldd,ma 8(%1), %4\n" /* get 1st saddr word */
+" ldd,ma 8(%2), %5\n" /* get 1st daddr word */
+" add %4, %0, %0\n"
+" ldd,ma 8(%1), %6\n" /* 2nd saddr */
+" ldd,ma 8(%2), %7\n" /* 2nd daddr */
+" add,dc %5, %0, %0\n"
+" add,dc %6, %0, %0\n"
+" add,dc %7, %0, %0\n"
" add,dc %3, %0, %0\n" /* fold in proto+len | carry bit */
-" extrd,u %0, 31, 32, %%r19\n" /* copy upper half down */
-" depdi 0, 31, 32, %0\n" /* clear upper half */
-" add %%r19, %0, %0\n" /* fold into 32-bits */
-" addc 0, %0, %0\n" /* add carry */
+" extrd,u %0, 31, 32, %4\n"/* copy upper half down */
+" depdi 0, 31, 32, %0\n"/* clear upper half */
+" add %4, %0, %0\n" /* fold into 32-bits */
+" addc 0, %0, %0\n" /* add carry */
#else
@@ -158,30 +162,29 @@ static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
** Insn stream is serialized on the carry bit here too.
** result from the previous operation (eg r0 + x)
*/
-
-" ldw,ma 4(%1), %%r19\n" /* get 1st saddr word */
-" ldw,ma 4(%2), %%r20\n" /* get 1st daddr word */
-" add %8, %3, %3\n" /* add 16-bit proto + len */
-" add %%r19, %0, %0\n"
-" ldw,ma 4(%1), %%r21\n" /* 2cd saddr */
-" addc %%r20, %0, %0\n"
-" ldw,ma 4(%2), %%r22\n" /* 2cd daddr */
-" addc %%r21, %0, %0\n"
-" ldw,ma 4(%1), %%r19\n" /* 3rd saddr */
-" addc %%r22, %0, %0\n"
-" ldw,ma 4(%2), %%r20\n" /* 3rd daddr */
-" addc %%r19, %0, %0\n"
-" ldw,ma 4(%1), %%r21\n" /* 4th saddr */
-" addc %%r20, %0, %0\n"
-" ldw,ma 4(%2), %%r22\n" /* 4th daddr */
-" addc %%r21, %0, %0\n"
-" addc %%r22, %0, %0\n"
+" ldw,ma 4(%1), %4\n" /* get 1st saddr word */
+" ldw,ma 4(%2), %5\n" /* get 1st daddr word */
+" add %4, %0, %0\n"
+" ldw,ma 4(%1), %6\n" /* 2nd saddr */
+" addc %5, %0, %0\n"
+" ldw,ma 4(%2), %7\n" /* 2nd daddr */
+" addc %6, %0, %0\n"
+" ldw,ma 4(%1), %4\n" /* 3rd saddr */
+" addc %7, %0, %0\n"
+" ldw,ma 4(%2), %5\n" /* 3rd daddr */
+" addc %4, %0, %0\n"
+" ldw,ma 4(%1), %6\n" /* 4th saddr */
+" addc %5, %0, %0\n"
+" ldw,ma 4(%2), %7\n" /* 4th daddr */
+" addc %6, %0, %0\n"
+" addc %7, %0, %0\n"
" addc %3, %0, %0\n" /* fold in proto+len, catch carry */
#endif
- : "=r" (sum), "=r" (saddr), "=r" (daddr), "=r" (len)
- : "0" (sum), "1" (saddr), "2" (daddr), "3" (len), "r" (proto)
- : "r19", "r20", "r21", "r22", "memory");
+ : "=r" (sum), "=r" (saddr), "=r" (daddr), "=r" (len),
+ "=r" (t0), "=r" (t1), "=r" (t2), "=r" (t3)
+ : "0" (sum), "1" (saddr), "2" (daddr), "3" (len)
+ : "memory");
return csum_fold(sum);
}
diff --git a/arch/parisc/include/asm/io.h b/arch/parisc/include/asm/io.h
index 93d37010b375..46212b52c23e 100644
--- a/arch/parisc/include/asm/io.h
+++ b/arch/parisc/include/asm/io.h
@@ -127,16 +127,7 @@ static inline void gsc_writeq(unsigned long long val, unsigned long addr)
/*
* The standard PCI ioremap interfaces
*/
-
-extern void __iomem * __ioremap(unsigned long offset, unsigned long size, unsigned long flags);
-
-/* Most machines react poorly to I/O-space being cacheable... Instead let's
- * define ioremap() in terms of ioremap_nocache().
- */
-static inline void __iomem * ioremap(unsigned long offset, unsigned long size)
-{
- return __ioremap(offset, size, _PAGE_NO_CACHE);
-}
+void __iomem *ioremap(unsigned long offset, unsigned long size);
#define ioremap_nocache(off, sz) ioremap((off), (sz))
#define ioremap_wc ioremap_nocache
#define ioremap_uc ioremap_nocache
diff --git a/arch/parisc/include/uapi/asm/msgbuf.h b/arch/parisc/include/uapi/asm/msgbuf.h
index 6a2e9ab2ef8d..3b877335da38 100644
--- a/arch/parisc/include/uapi/asm/msgbuf.h
+++ b/arch/parisc/include/uapi/asm/msgbuf.h
@@ -16,9 +16,9 @@
struct msqid64_ds {
struct ipc64_perm msg_perm;
#if __BITS_PER_LONG == 64
- __kernel_time_t msg_stime; /* last msgsnd time */
- __kernel_time_t msg_rtime; /* last msgrcv time */
- __kernel_time_t msg_ctime; /* last change time */
+ long msg_stime; /* last msgsnd time */
+ long msg_rtime; /* last msgrcv time */
+ long msg_ctime; /* last change time */
#else
unsigned long msg_stime_high;
unsigned long msg_stime; /* last msgsnd time */
diff --git a/arch/parisc/include/uapi/asm/sembuf.h b/arch/parisc/include/uapi/asm/sembuf.h
index 3c31163b1241..8241cf126018 100644
--- a/arch/parisc/include/uapi/asm/sembuf.h
+++ b/arch/parisc/include/uapi/asm/sembuf.h
@@ -16,8 +16,8 @@
struct semid64_ds {
struct ipc64_perm sem_perm; /* permissions .. see ipc.h */
#if __BITS_PER_LONG == 64
- __kernel_time_t sem_otime; /* last semop time */
- __kernel_time_t sem_ctime; /* last change time */
+ long sem_otime; /* last semop time */
+ long sem_ctime; /* last change time */
#else
unsigned long sem_otime_high;
unsigned long sem_otime; /* last semop time */
diff --git a/arch/parisc/include/uapi/asm/shmbuf.h b/arch/parisc/include/uapi/asm/shmbuf.h
index c89b3dd8db21..5da3089be65e 100644
--- a/arch/parisc/include/uapi/asm/shmbuf.h
+++ b/arch/parisc/include/uapi/asm/shmbuf.h
@@ -16,9 +16,9 @@
struct shmid64_ds {
struct ipc64_perm shm_perm; /* operation perms */
#if __BITS_PER_LONG == 64
- __kernel_time_t shm_atime; /* last attach time */
- __kernel_time_t shm_dtime; /* last detach time */
- __kernel_time_t shm_ctime; /* last change time */
+ long shm_atime; /* last attach time */
+ long shm_dtime; /* last detach time */
+ long shm_ctime; /* last change time */
#else
unsigned long shm_atime_high;
unsigned long shm_atime; /* last attach time */
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
index a82b3eaa5398..2407b0b789d3 100644
--- a/arch/parisc/kernel/cache.c
+++ b/arch/parisc/kernel/cache.c
@@ -365,7 +365,7 @@ void flush_dcache_page(struct page *page)
if (old_addr == 0 || (old_addr & (SHM_COLOUR - 1))
!= (addr & (SHM_COLOUR - 1))) {
__flush_cache_page(mpnt, addr, page_to_phys(page));
- if (old_addr)
+ if (parisc_requires_coherency() && old_addr)
printk(KERN_ERR "INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %pD\n", old_addr, addr, mpnt->vm_file);
old_addr = addr;
}
diff --git a/arch/parisc/kernel/pci-dma.c b/arch/parisc/kernel/pci-dma.c
index ca35d9a76e50..a60d47fd4d55 100644
--- a/arch/parisc/kernel/pci-dma.c
+++ b/arch/parisc/kernel/pci-dma.c
@@ -439,14 +439,14 @@ void arch_dma_free(struct device *dev, size_t size, void *vaddr,
free_pages((unsigned long)__va(dma_handle), order);
}
-void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
- size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir)
{
flush_kernel_dcache_range((unsigned long)phys_to_virt(paddr), size);
}
-void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
- size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir)
{
flush_kernel_dcache_range((unsigned long)phys_to_virt(paddr), size);
}
diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c
index 9f6ff7bc06f9..f8c07dcbfb49 100644
--- a/arch/parisc/kernel/ptrace.c
+++ b/arch/parisc/kernel/ptrace.c
@@ -342,7 +342,7 @@ long do_syscall_trace_enter(struct pt_regs *regs)
}
/* Do the secure computing check after ptrace. */
- if (secure_computing(NULL) == -1)
+ if (secure_computing() == -1)
return -1;
#ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
diff --git a/arch/parisc/mm/ioremap.c b/arch/parisc/mm/ioremap.c
index f29f682352f0..6e7c005aa09b 100644
--- a/arch/parisc/mm/ioremap.c
+++ b/arch/parisc/mm/ioremap.c
@@ -25,7 +25,7 @@
* have to convert them into an offset in a page-aligned mapping, but the
* caller shouldn't need to know that small detail.
*/
-void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
+void __iomem *ioremap(unsigned long phys_addr, unsigned long size)
{
void __iomem *addr;
struct vm_struct *area;
@@ -36,10 +36,8 @@ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned l
unsigned long end = phys_addr + size - 1;
/* Support EISA addresses */
if ((phys_addr >= 0x00080000 && end < 0x000fffff) ||
- (phys_addr >= 0x00500000 && end < 0x03bfffff)) {
+ (phys_addr >= 0x00500000 && end < 0x03bfffff))
phys_addr |= F_EXTEND(0xfc000000);
- flags |= _PAGE_NO_CACHE;
- }
#endif
/* Don't allow wraparound or zero size */
@@ -65,7 +63,7 @@ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned l
}
pgprot = __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY |
- _PAGE_ACCESSED | flags);
+ _PAGE_ACCESSED | _PAGE_NO_CACHE);
/*
* Mappings have to be page-aligned
@@ -90,7 +88,7 @@ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned l
return (void __iomem *) (offset + (char __iomem *)addr);
}
-EXPORT_SYMBOL(__ioremap);
+EXPORT_SYMBOL(ioremap);
void iounmap(const volatile void __iomem *io_addr)
{
diff --git a/arch/powerpc/Kbuild b/arch/powerpc/Kbuild
index 51e6908323ad..5e2f9eaa3ee7 100644
--- a/arch/powerpc/Kbuild
+++ b/arch/powerpc/Kbuild
@@ -14,4 +14,5 @@ obj-$(CONFIG_XMON) += xmon/
obj-$(CONFIG_KVM) += kvm/
obj-$(CONFIG_PERF_EVENTS) += perf/
+obj-$(CONFIG_KEXEC_CORE) += kexec/
obj-$(CONFIG_KEXEC_FILE) += purgatory/
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 3e56c9c2f16e..e446bb5b3f8d 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -161,6 +161,7 @@ config PPC
select GENERIC_CMOS_UPDATE
select GENERIC_CPU_AUTOPROBE
select GENERIC_CPU_VULNERABILITIES if PPC_BARRIER_NOSPEC
+ select GENERIC_EARLY_IOREMAP
select GENERIC_IRQ_SHOW
select GENERIC_IRQ_SHOW_LEVEL
select GENERIC_PCI_IOMAP if PCI
@@ -551,6 +552,17 @@ config RELOCATABLE
setting can still be useful to bootwrappers that need to know the
load address of the kernel (eg. u-boot/mkimage).
+config RANDOMIZE_BASE
+ bool "Randomize the address of the kernel image"
+ depends on (FSL_BOOKE && FLATMEM && PPC32)
+ depends on RELOCATABLE
+ help
+ Randomizes the virtual address at which the kernel image is
+ loaded, as a security feature that deters exploit attempts
+ relying on knowledge of the location of kernel internals.
+
+ If unsure, say Y.
+
config RELOCATABLE_TEST
bool "Test relocatable kernel"
depends on (PPC64 && RELOCATABLE)
@@ -874,15 +886,33 @@ config CMDLINE
some command-line options at build time by entering them here. In
most cases you will need to specify the root device here.
+choice
+ prompt "Kernel command line type" if CMDLINE != ""
+ default CMDLINE_FROM_BOOTLOADER
+
+config CMDLINE_FROM_BOOTLOADER
+ bool "Use bootloader kernel arguments if available"
+ help
+ Uses the command-line options passed by the boot loader. If
+ the boot loader doesn't provide any, the default kernel command
+ string provided in CMDLINE will be used.
+
+config CMDLINE_EXTEND
+ bool "Extend bootloader kernel arguments"
+ help
+ The command-line arguments provided by the boot loader will be
+ appended to the default kernel command string.
+
config CMDLINE_FORCE
bool "Always use the default kernel command string"
- depends on CMDLINE_BOOL
help
Always use the default kernel command string, even if the boot
loader passes other arguments to the kernel.
This is useful if you cannot or don't want to change the
command-line options your boot loader passes to the kernel.
+endchoice
+
config EXTRA_TARGETS
string "Additional default image types"
help
@@ -934,6 +964,28 @@ config PPC_MEM_KEYS
If unsure, say y.
+config PPC_SECURE_BOOT
+ prompt "Enable secure boot support"
+ bool
+ depends on PPC_POWERNV
+ depends on IMA_ARCH_POLICY
+ help
+ Systems with firmware secure boot enabled need to define security
+ policies to extend secure boot to the OS. This config allows a user
+ to enable OS secure boot on systems that have firmware support for
+ it. If in doubt say N.
+
+config PPC_SECVAR_SYSFS
+ bool "Enable sysfs interface for POWER secure variables"
+ default y
+ depends on PPC_SECURE_BOOT
+ depends on SYSFS
+ help
+ POWER secure variables are managed and controlled by firmware.
+ These variables are exposed to userspace via sysfs to enable
+ read/write operations on these variables. Say Y if you have
+ secure boot enabled and want to expose variables to userspace.
+
endmenu
config ISA_DMA_API
diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug
index c59920920ddc..4e1d39847462 100644
--- a/arch/powerpc/Kconfig.debug
+++ b/arch/powerpc/Kconfig.debug
@@ -122,8 +122,8 @@ config XMON_DEFAULT_RO_MODE
depends on XMON
default y
help
- Operate xmon in read-only mode. The cmdline options 'xmon=rw' and
- 'xmon=ro' override this default.
+ Operate xmon in read-only mode. The cmdline options 'xmon=rw' and
+ 'xmon=ro' override this default.
config DEBUGGER
bool
@@ -222,7 +222,7 @@ config PPC_EARLY_DEBUG_44x
help
Select this to enable early debugging for IBM 44x chips via the
inbuilt serial port. If you enable this, ensure you set
- PPC_EARLY_DEBUG_44x_PHYSLOW below to suit your target board.
+ PPC_EARLY_DEBUG_44x_PHYSLOW below to suit your target board.
config PPC_EARLY_DEBUG_40x
bool "Early serial debugging for IBM/AMCC 40x CPUs"
@@ -325,7 +325,7 @@ config PPC_EARLY_DEBUG_44x_PHYSLOW
default "0x40000200"
help
You probably want 0x40000200 for ebony boards and
- 0x40000300 for taishan
+ 0x40000300 for taishan
config PPC_EARLY_DEBUG_44x_PHYSHIGH
hex "EPRN of early debug UART physical address"
@@ -359,9 +359,9 @@ config FAIL_IOMMU
If you are unsure, say N.
config PPC_PTDUMP
- bool "Export kernel pagetable layout to userspace via debugfs"
- depends on DEBUG_KERNEL && DEBUG_FS
- help
+ bool "Export kernel pagetable layout to userspace via debugfs"
+ depends on DEBUG_KERNEL && DEBUG_FS
+ help
This option exports the state of the kernel pagetables to a
debugfs file. This is only useful for kernel developers who are
working in architecture specific areas of the kernel - probably
@@ -390,8 +390,8 @@ config PPC_DEBUG_WX
config PPC_FAST_ENDIAN_SWITCH
bool "Deprecated fast endian-switch syscall"
- depends on DEBUG_KERNEL && PPC_BOOK3S_64
- help
+ depends on DEBUG_KERNEL && PPC_BOOK3S_64
+ help
If you're unsure what this is, say N.
config KASAN_SHADOW_OFFSET
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index 83522c9fc7b6..f35730548e42 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -91,11 +91,13 @@ MULTIPLEWORD := -mmultiple
endif
ifdef CONFIG_PPC64
+ifndef CONFIG_CC_IS_CLANG
cflags-$(CONFIG_CPU_BIG_ENDIAN) += $(call cc-option,-mabi=elfv1)
cflags-$(CONFIG_CPU_BIG_ENDIAN) += $(call cc-option,-mcall-aixdesc)
aflags-$(CONFIG_CPU_BIG_ENDIAN) += $(call cc-option,-mabi=elfv1)
aflags-$(CONFIG_CPU_LITTLE_ENDIAN) += -mabi=elfv2
endif
+endif
ifndef CONFIG_CC_IS_CLANG
cflags-$(CONFIG_CPU_LITTLE_ENDIAN) += -mno-strict-align
@@ -141,6 +143,7 @@ endif
endif
CFLAGS-$(CONFIG_PPC64) := $(call cc-option,-mtraceback=no)
+ifndef CONFIG_CC_IS_CLANG
ifdef CONFIG_CPU_LITTLE_ENDIAN
CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv2,$(call cc-option,-mcall-aixdesc))
AFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv2)
@@ -149,6 +152,7 @@ CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv1)
CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mcall-aixdesc)
AFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv1)
endif
+endif
CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mcmodel=medium,$(call cc-option,-mminimal-toc))
CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mno-pointers-to-nested-functions)
@@ -330,32 +334,32 @@ powernv_be_defconfig:
PHONY += mpc85xx_defconfig
mpc85xx_defconfig:
- $(call merge_into_defconfig,mpc85xx_basic_defconfig,\
+ $(call merge_into_defconfig,mpc85xx_base.config,\
85xx-32bit 85xx-hw fsl-emb-nonhw)
PHONY += mpc85xx_smp_defconfig
mpc85xx_smp_defconfig:
- $(call merge_into_defconfig,mpc85xx_basic_defconfig,\
+ $(call merge_into_defconfig,mpc85xx_base.config,\
85xx-32bit 85xx-smp 85xx-hw fsl-emb-nonhw)
PHONY += corenet32_smp_defconfig
corenet32_smp_defconfig:
- $(call merge_into_defconfig,corenet_basic_defconfig,\
+ $(call merge_into_defconfig,corenet_base.config,\
85xx-32bit 85xx-smp 85xx-hw fsl-emb-nonhw dpaa)
PHONY += corenet64_smp_defconfig
corenet64_smp_defconfig:
- $(call merge_into_defconfig,corenet_basic_defconfig,\
+ $(call merge_into_defconfig,corenet_base.config,\
85xx-64bit 85xx-smp altivec 85xx-hw fsl-emb-nonhw dpaa)
PHONY += mpc86xx_defconfig
mpc86xx_defconfig:
- $(call merge_into_defconfig,mpc86xx_basic_defconfig,\
+ $(call merge_into_defconfig,mpc86xx_base.config,\
86xx-hw fsl-emb-nonhw)
PHONY += mpc86xx_smp_defconfig
mpc86xx_smp_defconfig:
- $(call merge_into_defconfig,mpc86xx_basic_defconfig,\
+ $(call merge_into_defconfig,mpc86xx_base.config,\
86xx-smp 86xx-hw fsl-emb-nonhw)
PHONY += ppc32_allmodconfig
diff --git a/arch/powerpc/boot/dts/fsl/kmcent2.dts b/arch/powerpc/boot/dts/fsl/kmcent2.dts
index 48b7f9797124..8e7f0828af29 100644
--- a/arch/powerpc/boot/dts/fsl/kmcent2.dts
+++ b/arch/powerpc/boot/dts/fsl/kmcent2.dts
@@ -210,13 +210,19 @@
fman@400000 {
ethernet@e0000 {
- fixed-link = <0 1 1000 0 0>;
- phy-connection-type = "sgmii";
+ phy-mode = "sgmii";
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ };
};
ethernet@e2000 {
- fixed-link = <1 1 1000 0 0>;
- phy-connection-type = "sgmii";
+ phy-mode = "sgmii";
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ };
};
ethernet@e4000 {
@@ -229,7 +235,7 @@
ethernet@e8000 {
phy-handle = <&front_phy>;
- phy-connection-type = "rgmii";
+ phy-mode = "rgmii-id";
};
mdio0: mdio@fc000 {
@@ -258,14 +264,50 @@
pci1: pcie@ffe250000 {
status = "disabled";
+ reg = <0xf 0xfe250000 0 0x10000>;
+ ranges = <0x02000000 0 0xe0000000 0xc 0x10000000 0 0x10000000
+ 0x01000000 0 0 0xf 0xf8010000 0 0x00010000>;
+ pcie@0 {
+ ranges = <0x02000000 0 0xe0000000
+ 0x02000000 0 0xe0000000
+ 0 0x10000000
+
+ 0x01000000 0 0x00000000
+ 0x01000000 0 0x00000000
+ 0 0x00010000>;
+ };
};
pci2: pcie@ffe260000 {
status = "disabled";
+ reg = <0xf 0xfe260000 0 0x10000>;
+ ranges = <0x02000000 0 0xe0000000 0xc 0x20000000 0 0x10000000
+ 0x01000000 0 0x00000000 0xf 0xf8020000 0 0x00010000>;
+ pcie@0 {
+ ranges = <0x02000000 0 0xe0000000
+ 0x02000000 0 0xe0000000
+ 0 0x10000000
+
+ 0x01000000 0 0x00000000
+ 0x01000000 0 0x00000000
+ 0 0x00010000>;
+ };
};
pci3: pcie@ffe270000 {
status = "disabled";
+ reg = <0xf 0xfe270000 0 0x10000>;
+ ranges = <0x02000000 0 0xe0000000 0xc 0x30000000 0 0x10000000
+ 0x01000000 0 0x00000000 0xf 0xf8030000 0 0x00010000>;
+ pcie@0 {
+ ranges = <0x02000000 0 0xe0000000
+ 0x02000000 0 0xe0000000
+ 0 0x10000000
+
+ 0x01000000 0 0x00000000
+ 0x01000000 0 0x00000000
+ 0 0x00010000>;
+ };
};
qe: qe@ffe140000 {
diff --git a/arch/powerpc/boot/libfdt_env.h b/arch/powerpc/boot/libfdt_env.h
index 2abc8e83b95e..9757d4f6331e 100644
--- a/arch/powerpc/boot/libfdt_env.h
+++ b/arch/powerpc/boot/libfdt_env.h
@@ -6,6 +6,8 @@
#include <string.h>
#define INT_MAX ((int)(~0U>>1))
+#define UINT32_MAX ((u32)~0U)
+#define INT32_MAX ((s32)(UINT32_MAX >> 1))
#include "of.h"
diff --git a/arch/powerpc/configs/40x/acadia_defconfig b/arch/powerpc/configs/40x/acadia_defconfig
index 5a75e4f14273..db93c117be36 100644
--- a/arch/powerpc/configs/40x/acadia_defconfig
+++ b/arch/powerpc/configs/40x/acadia_defconfig
@@ -18,9 +18,6 @@ CONFIG_INET=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_IPV6 is not set
CONFIG_CONNECTOR=y
CONFIG_MTD=y
diff --git a/arch/powerpc/configs/40x/ep405_defconfig b/arch/powerpc/configs/40x/ep405_defconfig
index e2691c5db766..a3854cf65f8d 100644
--- a/arch/powerpc/configs/40x/ep405_defconfig
+++ b/arch/powerpc/configs/40x/ep405_defconfig
@@ -17,9 +17,6 @@ CONFIG_INET=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_IPV6 is not set
CONFIG_CONNECTOR=y
CONFIG_MTD=y
diff --git a/arch/powerpc/configs/40x/kilauea_defconfig b/arch/powerpc/configs/40x/kilauea_defconfig
index 949989ef2322..edc22464dfb5 100644
--- a/arch/powerpc/configs/40x/kilauea_defconfig
+++ b/arch/powerpc/configs/40x/kilauea_defconfig
@@ -20,9 +20,6 @@ CONFIG_INET=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_IPV6 is not set
CONFIG_CONNECTOR=y
CONFIG_MTD=y
diff --git a/arch/powerpc/configs/40x/klondike_defconfig b/arch/powerpc/configs/40x/klondike_defconfig
index 4347a87088dc..579fa846839c 100644
--- a/arch/powerpc/configs/40x/klondike_defconfig
+++ b/arch/powerpc/configs/40x/klondike_defconfig
@@ -4,7 +4,6 @@ CONFIG_LOG_BUF_SHIFT=14
CONFIG_SYSFS_DEPRECATED=y
CONFIG_SYSFS_DEPRECATED_V2=y
CONFIG_BLK_DEV_INITRD=y
-CONFIG_SYSCTL_SYSCALL=y
CONFIG_EMBEDDED=y
CONFIG_SLAB=y
CONFIG_MODULES=y
diff --git a/arch/powerpc/configs/40x/makalu_defconfig b/arch/powerpc/configs/40x/makalu_defconfig
index 90b759bbf426..188789b9aa4c 100644
--- a/arch/powerpc/configs/40x/makalu_defconfig
+++ b/arch/powerpc/configs/40x/makalu_defconfig
@@ -17,9 +17,6 @@ CONFIG_INET=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_IPV6 is not set
CONFIG_CONNECTOR=y
CONFIG_MTD=y
diff --git a/arch/powerpc/configs/40x/obs600_defconfig b/arch/powerpc/configs/40x/obs600_defconfig
index 881c300c011d..5bf6af7ef093 100644
--- a/arch/powerpc/configs/40x/obs600_defconfig
+++ b/arch/powerpc/configs/40x/obs600_defconfig
@@ -20,9 +20,6 @@ CONFIG_INET=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_IPV6 is not set
CONFIG_CONNECTOR=y
CONFIG_MTD=y
diff --git a/arch/powerpc/configs/40x/walnut_defconfig b/arch/powerpc/configs/40x/walnut_defconfig
index 0ed46704b9fa..9eaaf1a1d2c6 100644
--- a/arch/powerpc/configs/40x/walnut_defconfig
+++ b/arch/powerpc/configs/40x/walnut_defconfig
@@ -15,9 +15,6 @@ CONFIG_INET=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_IPV6 is not set
CONFIG_CONNECTOR=y
CONFIG_MTD=y
diff --git a/arch/powerpc/configs/44x/akebono_defconfig b/arch/powerpc/configs/44x/akebono_defconfig
index 2fa553ebfdc9..f0c8a07cc274 100644
--- a/arch/powerpc/configs/44x/akebono_defconfig
+++ b/arch/powerpc/configs/44x/akebono_defconfig
@@ -29,9 +29,6 @@ CONFIG_INET=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_IPV6 is not set
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
diff --git a/arch/powerpc/configs/44x/arches_defconfig b/arch/powerpc/configs/44x/arches_defconfig
index 5a1b9ee18075..82c6f49b8dcb 100644
--- a/arch/powerpc/configs/44x/arches_defconfig
+++ b/arch/powerpc/configs/44x/arches_defconfig
@@ -20,9 +20,6 @@ CONFIG_INET=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_IPV6 is not set
CONFIG_CONNECTOR=y
CONFIG_MTD=y
diff --git a/arch/powerpc/configs/44x/bamboo_defconfig b/arch/powerpc/configs/44x/bamboo_defconfig
index 22e1ef5272ab..679213214a75 100644
--- a/arch/powerpc/configs/44x/bamboo_defconfig
+++ b/arch/powerpc/configs/44x/bamboo_defconfig
@@ -18,9 +18,6 @@ CONFIG_INET=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_IPV6 is not set
CONFIG_CONNECTOR=y
CONFIG_BLK_DEV_RAM=y
diff --git a/arch/powerpc/configs/44x/canyonlands_defconfig b/arch/powerpc/configs/44x/canyonlands_defconfig
index 86f34ea4173a..ccc14eb7a2f1 100644
--- a/arch/powerpc/configs/44x/canyonlands_defconfig
+++ b/arch/powerpc/configs/44x/canyonlands_defconfig
@@ -20,9 +20,6 @@ CONFIG_INET=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_IPV6 is not set
CONFIG_CONNECTOR=y
CONFIG_MTD=y
diff --git a/arch/powerpc/configs/44x/currituck_defconfig b/arch/powerpc/configs/44x/currituck_defconfig
index ce3ec5a2cd15..be76e066df01 100644
--- a/arch/powerpc/configs/44x/currituck_defconfig
+++ b/arch/powerpc/configs/44x/currituck_defconfig
@@ -27,9 +27,6 @@ CONFIG_INET=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_IPV6 is not set
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
diff --git a/arch/powerpc/configs/44x/ebony_defconfig b/arch/powerpc/configs/44x/ebony_defconfig
index f67447c92e6f..93d2a4e64af9 100644
--- a/arch/powerpc/configs/44x/ebony_defconfig
+++ b/arch/powerpc/configs/44x/ebony_defconfig
@@ -16,9 +16,6 @@ CONFIG_INET=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_IPV6 is not set
CONFIG_CONNECTOR=y
CONFIG_MTD=y
diff --git a/arch/powerpc/configs/44x/eiger_defconfig b/arch/powerpc/configs/44x/eiger_defconfig
index 5dbd83a1c11b..1abaa63e067f 100644
--- a/arch/powerpc/configs/44x/eiger_defconfig
+++ b/arch/powerpc/configs/44x/eiger_defconfig
@@ -21,9 +21,6 @@ CONFIG_INET=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_IPV6 is not set
CONFIG_CONNECTOR=y
CONFIG_MTD=y
diff --git a/arch/powerpc/configs/44x/fsp2_defconfig b/arch/powerpc/configs/44x/fsp2_defconfig
index e49114f0e526..e67fc041ca3e 100644
--- a/arch/powerpc/configs/44x/fsp2_defconfig
+++ b/arch/powerpc/configs/44x/fsp2_defconfig
@@ -39,9 +39,6 @@ CONFIG_INET=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_IPV6 is not set
CONFIG_VLAN_8021Q=m
CONFIG_DEVTMPFS=y
diff --git a/arch/powerpc/configs/44x/icon_defconfig b/arch/powerpc/configs/44x/icon_defconfig
index fa5378af44f9..7d7ff84c8200 100644
--- a/arch/powerpc/configs/44x/icon_defconfig
+++ b/arch/powerpc/configs/44x/icon_defconfig
@@ -20,9 +20,6 @@ CONFIG_INET=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_IPV6 is not set
CONFIG_CONNECTOR=y
CONFIG_MTD=y
diff --git a/arch/powerpc/configs/44x/iss476-smp_defconfig b/arch/powerpc/configs/44x/iss476-smp_defconfig
index aae879c21239..fb5c73a29bf4 100644
--- a/arch/powerpc/configs/44x/iss476-smp_defconfig
+++ b/arch/powerpc/configs/44x/iss476-smp_defconfig
@@ -29,9 +29,6 @@ CONFIG_INET=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_IPV6 is not set
CONFIG_CONNECTOR=y
CONFIG_MTD=y
diff --git a/arch/powerpc/configs/44x/katmai_defconfig b/arch/powerpc/configs/44x/katmai_defconfig
index 56eddca998c6..c6dc1445fc04 100644
--- a/arch/powerpc/configs/44x/katmai_defconfig
+++ b/arch/powerpc/configs/44x/katmai_defconfig
@@ -18,9 +18,6 @@ CONFIG_INET=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_IPV6 is not set
CONFIG_CONNECTOR=y
CONFIG_MTD=y
diff --git a/arch/powerpc/configs/44x/rainier_defconfig b/arch/powerpc/configs/44x/rainier_defconfig
index 369bfd2e451d..c83ad03182df 100644
--- a/arch/powerpc/configs/44x/rainier_defconfig
+++ b/arch/powerpc/configs/44x/rainier_defconfig
@@ -19,9 +19,6 @@ CONFIG_INET=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_IPV6 is not set
CONFIG_CONNECTOR=y
CONFIG_MTD=y
diff --git a/arch/powerpc/configs/44x/redwood_defconfig b/arch/powerpc/configs/44x/redwood_defconfig
index 8be95f6fe3a7..640fe1d5af28 100644
--- a/arch/powerpc/configs/44x/redwood_defconfig
+++ b/arch/powerpc/configs/44x/redwood_defconfig
@@ -21,9 +21,6 @@ CONFIG_INET=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_IPV6 is not set
CONFIG_CONNECTOR=y
CONFIG_MTD=y
diff --git a/arch/powerpc/configs/44x/sam440ep_defconfig b/arch/powerpc/configs/44x/sam440ep_defconfig
index 974a4f038cda..ed02f12dbd54 100644
--- a/arch/powerpc/configs/44x/sam440ep_defconfig
+++ b/arch/powerpc/configs/44x/sam440ep_defconfig
@@ -23,9 +23,6 @@ CONFIG_INET=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_IPV6 is not set
CONFIG_CONNECTOR=y
CONFIG_BLK_DEV_LOOP=y
diff --git a/arch/powerpc/configs/44x/sequoia_defconfig b/arch/powerpc/configs/44x/sequoia_defconfig
index 10e517b69fa4..2c0973db8837 100644
--- a/arch/powerpc/configs/44x/sequoia_defconfig
+++ b/arch/powerpc/configs/44x/sequoia_defconfig
@@ -20,9 +20,6 @@ CONFIG_INET=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_IPV6 is not set
CONFIG_CONNECTOR=y
CONFIG_MTD=y
diff --git a/arch/powerpc/configs/44x/taishan_defconfig b/arch/powerpc/configs/44x/taishan_defconfig
index cd08f3ddd609..a2d355ca62b2 100644
--- a/arch/powerpc/configs/44x/taishan_defconfig
+++ b/arch/powerpc/configs/44x/taishan_defconfig
@@ -18,9 +18,6 @@ CONFIG_INET=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_IPV6 is not set
CONFIG_CONNECTOR=y
CONFIG_MTD=y
diff --git a/arch/powerpc/configs/52xx/pcm030_defconfig b/arch/powerpc/configs/52xx/pcm030_defconfig
index 303600ff1fdb..fdb11daeb688 100644
--- a/arch/powerpc/configs/52xx/pcm030_defconfig
+++ b/arch/powerpc/configs/52xx/pcm030_defconfig
@@ -31,9 +31,6 @@ CONFIG_IP_MULTICAST=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_INET_DIAG is not set
# CONFIG_IPV6 is not set
# CONFIG_FW_LOADER is not set
diff --git a/arch/powerpc/configs/83xx/kmeter1_defconfig b/arch/powerpc/configs/83xx/kmeter1_defconfig
index d21b5cb365f2..648c6b3dccf9 100644
--- a/arch/powerpc/configs/83xx/kmeter1_defconfig
+++ b/arch/powerpc/configs/83xx/kmeter1_defconfig
@@ -25,9 +25,6 @@ CONFIG_UNIX=y
CONFIG_INET=y
CONFIG_IP_MULTICAST=y
CONFIG_IP_PNP=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_IPV6 is not set
CONFIG_TIPC=y
CONFIG_BRIDGE=m
diff --git a/arch/powerpc/configs/83xx/mpc837x_rdb_defconfig b/arch/powerpc/configs/83xx/mpc837x_rdb_defconfig
index dad53ef86b49..cbcae2a927e9 100644
--- a/arch/powerpc/configs/83xx/mpc837x_rdb_defconfig
+++ b/arch/powerpc/configs/83xx/mpc837x_rdb_defconfig
@@ -22,9 +22,6 @@ CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_SYN_COOKIES=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_IPV6 is not set
# CONFIG_FW_LOADER is not set
CONFIG_BLK_DEV_LOOP=y
diff --git a/arch/powerpc/configs/85xx/ge_imp3a_defconfig b/arch/powerpc/configs/85xx/ge_imp3a_defconfig
index 920f37316fdb..f29c166998af 100644
--- a/arch/powerpc/configs/85xx/ge_imp3a_defconfig
+++ b/arch/powerpc/configs/85xx/ge_imp3a_defconfig
@@ -60,7 +60,6 @@ CONFIG_SYN_COOKIES=y
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
CONFIG_INET_IPCOMP=m
-# CONFIG_INET_XFRM_MODE_BEET is not set
CONFIG_INET6_AH=m
CONFIG_INET6_IPCOMP=m
CONFIG_IPV6_TUNNEL=m
diff --git a/arch/powerpc/configs/adder875_defconfig b/arch/powerpc/configs/adder875_defconfig
index f7a803ab2285..510f7fd1f6a3 100644
--- a/arch/powerpc/configs/adder875_defconfig
+++ b/arch/powerpc/configs/adder875_defconfig
@@ -22,9 +22,6 @@ CONFIG_INET=y
CONFIG_IP_MULTICAST=y
CONFIG_IP_PNP=y
CONFIG_SYN_COOKIES=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_IPV6 is not set
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
diff --git a/arch/powerpc/configs/amigaone_defconfig b/arch/powerpc/configs/amigaone_defconfig
index cf94d28d0e31..f6d140f2d922 100644
--- a/arch/powerpc/configs/amigaone_defconfig
+++ b/arch/powerpc/configs/amigaone_defconfig
@@ -26,9 +26,6 @@ CONFIG_UNIX=y
CONFIG_INET=y
CONFIG_IP_MULTICAST=y
CONFIG_SYN_COOKIES=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_IPV6 is not set
CONFIG_NETFILTER=y
# CONFIG_NETFILTER_ADVANCED is not set
diff --git a/arch/powerpc/configs/cell_defconfig b/arch/powerpc/configs/cell_defconfig
index 2dd1b58a18ae..42fbc70cec33 100644
--- a/arch/powerpc/configs/cell_defconfig
+++ b/arch/powerpc/configs/cell_defconfig
@@ -51,11 +51,9 @@ CONFIG_IP_PNP_BOOTP=y
CONFIG_IP_PNP_RARP=y
CONFIG_NET_IPIP=y
CONFIG_SYN_COOKIES=y
-# CONFIG_INET_XFRM_MODE_BEET is not set
CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
CONFIG_INET6_IPCOMP=m
-# CONFIG_INET6_XFRM_MODE_BEET is not set
# CONFIG_IPV6_SIT is not set
CONFIG_IPV6_TUNNEL=m
CONFIG_NETFILTER=y
diff --git a/arch/powerpc/configs/chrp32_defconfig b/arch/powerpc/configs/chrp32_defconfig
index 9ff493dd8439..502a75d49789 100644
--- a/arch/powerpc/configs/chrp32_defconfig
+++ b/arch/powerpc/configs/chrp32_defconfig
@@ -27,9 +27,6 @@ CONFIG_UNIX=y
CONFIG_INET=y
CONFIG_IP_MULTICAST=y
CONFIG_SYN_COOKIES=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_IPV6 is not set
CONFIG_NETFILTER=y
# CONFIG_NETFILTER_ADVANCED is not set
diff --git a/arch/powerpc/configs/corenet_basic_defconfig b/arch/powerpc/configs/corenet_base.config
index b568d465e59e..b568d465e59e 100644
--- a/arch/powerpc/configs/corenet_basic_defconfig
+++ b/arch/powerpc/configs/corenet_base.config
diff --git a/arch/powerpc/configs/debug.config b/arch/powerpc/configs/debug.config
new file mode 100644
index 000000000000..a14ae1f20d60
--- /dev/null
+++ b/arch/powerpc/configs/debug.config
@@ -0,0 +1 @@
+CONFIG_SCOM_DEBUGFS=y
diff --git a/arch/powerpc/configs/ep88xc_defconfig b/arch/powerpc/configs/ep88xc_defconfig
index b20bd0cf3543..9c1bf60f1e19 100644
--- a/arch/powerpc/configs/ep88xc_defconfig
+++ b/arch/powerpc/configs/ep88xc_defconfig
@@ -24,9 +24,6 @@ CONFIG_INET=y
CONFIG_IP_MULTICAST=y
CONFIG_IP_PNP=y
CONFIG_SYN_COOKIES=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_IPV6 is not set
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
diff --git a/arch/powerpc/configs/gamecube_defconfig b/arch/powerpc/configs/gamecube_defconfig
index 85e73c3bd859..24c0e0ea5aeb 100644
--- a/arch/powerpc/configs/gamecube_defconfig
+++ b/arch/powerpc/configs/gamecube_defconfig
@@ -29,9 +29,6 @@ CONFIG_INET=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_RARP=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_INET_DIAG is not set
# CONFIG_IPV6 is not set
# CONFIG_WIRELESS is not set
diff --git a/arch/powerpc/configs/mpc512x_defconfig b/arch/powerpc/configs/mpc512x_defconfig
index 6203c1093a3a..1f3a045ab081 100644
--- a/arch/powerpc/configs/mpc512x_defconfig
+++ b/arch/powerpc/configs/mpc512x_defconfig
@@ -25,9 +25,6 @@ CONFIG_PACKET=y
CONFIG_UNIX=y
CONFIG_INET=y
CONFIG_IP_PNP=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_INET_DIAG is not set
# CONFIG_IPV6 is not set
CONFIG_CAN=y
diff --git a/arch/powerpc/configs/mpc5200_defconfig b/arch/powerpc/configs/mpc5200_defconfig
index 6f87a5c74960..83d801307178 100644
--- a/arch/powerpc/configs/mpc5200_defconfig
+++ b/arch/powerpc/configs/mpc5200_defconfig
@@ -15,7 +15,6 @@ CONFIG_PPC_MEDIA5200=y
CONFIG_PPC_MPC5200_BUGFIX=y
CONFIG_PPC_MPC5200_LPBFIFO=m
# CONFIG_PPC_PMAC is not set
-CONFIG_SIMPLE_GPIO=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
diff --git a/arch/powerpc/configs/mpc85xx_basic_defconfig b/arch/powerpc/configs/mpc85xx_base.config
index b1593fe6f70b..b1593fe6f70b 100644
--- a/arch/powerpc/configs/mpc85xx_basic_defconfig
+++ b/arch/powerpc/configs/mpc85xx_base.config
diff --git a/arch/powerpc/configs/mpc86xx_basic_defconfig b/arch/powerpc/configs/mpc86xx_base.config
index 67bd1fa036ee..67bd1fa036ee 100644
--- a/arch/powerpc/configs/mpc86xx_basic_defconfig
+++ b/arch/powerpc/configs/mpc86xx_base.config
diff --git a/arch/powerpc/configs/mpc885_ads_defconfig b/arch/powerpc/configs/mpc885_ads_defconfig
index 285d506c5a76..0327a329316f 100644
--- a/arch/powerpc/configs/mpc885_ads_defconfig
+++ b/arch/powerpc/configs/mpc885_ads_defconfig
@@ -23,9 +23,6 @@ CONFIG_INET=y
CONFIG_IP_MULTICAST=y
CONFIG_IP_PNP=y
CONFIG_SYN_COOKIES=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_IPV6 is not set
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
diff --git a/arch/powerpc/configs/pmac32_defconfig b/arch/powerpc/configs/pmac32_defconfig
index 4e6e95f92646..f492e7d35925 100644
--- a/arch/powerpc/configs/pmac32_defconfig
+++ b/arch/powerpc/configs/pmac32_defconfig
@@ -38,8 +38,6 @@ CONFIG_IP_MULTICAST=y
CONFIG_SYN_COOKIES=y
CONFIG_INET_AH=y
CONFIG_INET_ESP=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_IPV6 is not set
CONFIG_NETFILTER=y
CONFIG_NF_CONNTRACK=m
diff --git a/arch/powerpc/configs/powernv_defconfig b/arch/powerpc/configs/powernv_defconfig
index 6658cceb928c..32841456a573 100644
--- a/arch/powerpc/configs/powernv_defconfig
+++ b/arch/powerpc/configs/powernv_defconfig
@@ -83,9 +83,6 @@ CONFIG_INET_IPCOMP=m
CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
CONFIG_INET6_IPCOMP=m
-CONFIG_INET6_XFRM_MODE_TRANSPORT=m
-CONFIG_INET6_XFRM_MODE_TUNNEL=m
-CONFIG_INET6_XFRM_MODE_BEET=m
CONFIG_IPV6_SIT=m
CONFIG_NETFILTER=y
# CONFIG_NETFILTER_ADVANCED is not set
diff --git a/arch/powerpc/configs/ppc44x_defconfig b/arch/powerpc/configs/ppc44x_defconfig
index 67952819593e..a41eedfe0a5f 100644
--- a/arch/powerpc/configs/ppc44x_defconfig
+++ b/arch/powerpc/configs/ppc44x_defconfig
@@ -32,9 +32,6 @@ CONFIG_INET=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
CONFIG_BRIDGE=m
CONFIG_CONNECTOR=y
CONFIG_MTD=y
diff --git a/arch/powerpc/configs/ppc6xx_defconfig b/arch/powerpc/configs/ppc6xx_defconfig
index 9dca4cffa623..7e28919041cf 100644
--- a/arch/powerpc/configs/ppc6xx_defconfig
+++ b/arch/powerpc/configs/ppc6xx_defconfig
@@ -109,9 +109,6 @@ CONFIG_SYN_COOKIES=y
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
CONFIG_INET_IPCOMP=m
-CONFIG_INET_XFRM_MODE_TRANSPORT=m
-CONFIG_INET_XFRM_MODE_TUNNEL=m
-CONFIG_INET_XFRM_MODE_BEET=m
CONFIG_INET_DIAG=m
CONFIG_TCP_CONG_ADVANCED=y
CONFIG_TCP_CONG_HSTCP=m
@@ -129,7 +126,6 @@ CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
CONFIG_INET6_IPCOMP=m
CONFIG_IPV6_MIP6=m
-CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
CONFIG_IPV6_TUNNEL=m
CONFIG_IPV6_MULTIPLE_TABLES=y
CONFIG_IPV6_SUBTREES=y
diff --git a/arch/powerpc/configs/ps3_defconfig b/arch/powerpc/configs/ps3_defconfig
index 314c63939816..4db51719342a 100644
--- a/arch/powerpc/configs/ps3_defconfig
+++ b/arch/powerpc/configs/ps3_defconfig
@@ -47,9 +47,6 @@ CONFIG_INET=y
CONFIG_IP_MULTICAST=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_INET_DIAG is not set
CONFIG_BT=m
CONFIG_BT_RFCOMM=m
diff --git a/arch/powerpc/configs/skiroot_defconfig b/arch/powerpc/configs/skiroot_defconfig
index 1253482a67c0..069f67f12731 100644
--- a/arch/powerpc/configs/skiroot_defconfig
+++ b/arch/powerpc/configs/skiroot_defconfig
@@ -46,6 +46,7 @@ CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
CONFIG_CPU_IDLE=y
CONFIG_HZ_100=y
CONFIG_KEXEC=y
+CONFIG_PRESERVE_FA_DUMP=y
CONFIG_IRQ_ALL_CPUS=y
CONFIG_NUMA=y
# CONFIG_COMPACTION is not set
@@ -63,9 +64,6 @@ CONFIG_INET=y
CONFIG_IP_MULTICAST=y
CONFIG_NET_IPIP=y
CONFIG_SYN_COOKIES=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
CONFIG_DNS_RESOLVER=y
# CONFIG_WIRELESS is not set
CONFIG_DEVTMPFS=y
diff --git a/arch/powerpc/configs/storcenter_defconfig b/arch/powerpc/configs/storcenter_defconfig
index 6c39c52b8e4a..29b19ec7e5d7 100644
--- a/arch/powerpc/configs/storcenter_defconfig
+++ b/arch/powerpc/configs/storcenter_defconfig
@@ -22,9 +22,6 @@ CONFIG_INET=y
CONFIG_IP_MULTICAST=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_IPV6 is not set
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
diff --git a/arch/powerpc/configs/tqm8xx_defconfig b/arch/powerpc/configs/tqm8xx_defconfig
index 7493f36dd6e9..ffed2b4256d6 100644
--- a/arch/powerpc/configs/tqm8xx_defconfig
+++ b/arch/powerpc/configs/tqm8xx_defconfig
@@ -27,9 +27,6 @@ CONFIG_UNIX=y
CONFIG_INET=y
CONFIG_IP_PNP=y
CONFIG_SYN_COOKIES=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_IPV6 is not set
# CONFIG_WIRELESS is not set
# CONFIG_FW_LOADER is not set
diff --git a/arch/powerpc/configs/wii_defconfig b/arch/powerpc/configs/wii_defconfig
index 5a04448ad6b5..379c171f3ddd 100644
--- a/arch/powerpc/configs/wii_defconfig
+++ b/arch/powerpc/configs/wii_defconfig
@@ -29,9 +29,6 @@ CONFIG_INET=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_RARP=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_INET_DIAG is not set
# CONFIG_IPV6 is not set
CONFIG_BT=y
diff --git a/arch/powerpc/crypto/crc-vpmsum_test.c b/arch/powerpc/crypto/crc-vpmsum_test.c
index 47985219a68f..dce86e75f1a8 100644
--- a/arch/powerpc/crypto/crc-vpmsum_test.c
+++ b/arch/powerpc/crypto/crc-vpmsum_test.c
@@ -103,6 +103,7 @@ static int __init crc_test_init(void)
crc32, verify32, len);
break;
}
+ cond_resched();
}
pr_info("crc-vpmsum_test done, completed %lu iterations\n", i);
} while (0);
diff --git a/arch/powerpc/include/asm/Kbuild b/arch/powerpc/include/asm/Kbuild
index 64870c7be4a3..d0a23d0db863 100644
--- a/arch/powerpc/include/asm/Kbuild
+++ b/arch/powerpc/include/asm/Kbuild
@@ -4,10 +4,11 @@ generated-y += syscall_table_64.h
generated-y += syscall_table_c32.h
generated-y += syscall_table_spu.h
generic-y += div64.h
+generic-y += dma-mapping.h
generic-y += export.h
generic-y += irq_regs.h
generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += preempt.h
generic-y += vtime.h
-generic-y += msi.h
+generic-y += early_ioremap.h
diff --git a/arch/powerpc/include/asm/asm-prototypes.h b/arch/powerpc/include/asm/asm-prototypes.h
index 8561498e653c..983c0084fb3f 100644
--- a/arch/powerpc/include/asm/asm-prototypes.h
+++ b/arch/powerpc/include/asm/asm-prototypes.h
@@ -92,7 +92,8 @@ long sys_swapcontext(struct ucontext __user *old_ctx,
long sys_debug_setcontext(struct ucontext __user *ctx,
int ndbg, struct sig_dbg_op __user *dbg);
int
-ppc_select(int n, fd_set __user *inp, fd_set __user *outp, fd_set __user *exp, struct timeval __user *tvp);
+ppc_select(int n, fd_set __user *inp, fd_set __user *outp, fd_set __user *exp,
+ struct __kernel_old_timeval __user *tvp);
unsigned long __init early_init(unsigned long dt_ptr);
void __init machine_init(u64 dt_ptr);
#endif
@@ -152,9 +153,12 @@ void _kvmppc_save_tm_pr(struct kvm_vcpu *vcpu, u64 guest_msr);
/* Patch sites */
extern s32 patch__call_flush_count_cache;
extern s32 patch__flush_count_cache_return;
+extern s32 patch__flush_link_stack_return;
+extern s32 patch__call_kvm_flush_link_stack;
extern s32 patch__memset_nocache, patch__memcpy_nocache;
extern long flush_count_cache;
+extern long kvm_flush_link_stack;
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
void kvmppc_save_tm_hv(struct kvm_vcpu *vcpu, u64 msr, bool preserve_nv);
diff --git a/arch/powerpc/include/asm/book3s/64/pgalloc.h b/arch/powerpc/include/asm/book3s/64/pgalloc.h
index d5a44912902f..f6968c811026 100644
--- a/arch/powerpc/include/asm/book3s/64/pgalloc.h
+++ b/arch/powerpc/include/asm/book3s/64/pgalloc.h
@@ -122,11 +122,6 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
unsigned long address)
{
- /*
- * By now all the pud entries should be none entries. So go
- * ahead and flush the page walk cache
- */
- flush_tlb_pgtable(tlb, address);
pgtable_free_tlb(tlb, pud, PUD_INDEX);
}
@@ -143,11 +138,6 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
unsigned long address)
{
- /*
- * By now all the pud entries should be none entries. So go
- * ahead and flush the page walk cache
- */
- flush_tlb_pgtable(tlb, address);
return pgtable_free_tlb(tlb, pmd, PMD_INDEX);
}
@@ -166,11 +156,6 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
unsigned long address)
{
- /*
- * By now all the pud entries should be none entries. So go
- * ahead and flush the page walk cache
- */
- flush_tlb_pgtable(tlb, address);
pgtable_free_tlb(tlb, table, PTE_INDEX);
}
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable-4k.h b/arch/powerpc/include/asm/book3s/64/pgtable-4k.h
index a069dfcac9a9..4e697bc2f4cd 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable-4k.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable-4k.h
@@ -70,9 +70,6 @@ static inline int get_hugepd_cache_index(int index)
/* should not reach */
}
-#else /* !CONFIG_HUGETLB_PAGE */
-static inline int pmd_huge(pmd_t pmd) { return 0; }
-static inline int pud_huge(pud_t pud) { return 0; }
#endif /* CONFIG_HUGETLB_PAGE */
#endif /* __ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable-64k.h b/arch/powerpc/include/asm/book3s/64/pgtable-64k.h
index e3d4dd4ae2fa..34d1018896b3 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable-64k.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable-64k.h
@@ -59,9 +59,6 @@ static inline int get_hugepd_cache_index(int index)
BUG();
}
-#else /* !CONFIG_HUGETLB_PAGE */
-static inline int pmd_huge(pmd_t pmd) { return 0; }
-static inline int pud_huge(pud_t pud) { return 0; }
#endif /* CONFIG_HUGETLB_PAGE */
static inline int remap_4k_pfn(struct vm_area_struct *vma, unsigned long addr,
diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush.h b/arch/powerpc/include/asm/book3s/64/tlbflush.h
index 7aa8195b6cff..dcb5c3839d2f 100644
--- a/arch/powerpc/include/asm/book3s/64/tlbflush.h
+++ b/arch/powerpc/include/asm/book3s/64/tlbflush.h
@@ -147,22 +147,6 @@ static inline void flush_tlb_fix_spurious_fault(struct vm_area_struct *vma,
flush_tlb_page(vma, address);
}
-/*
- * flush the page walk cache for the address
- */
-static inline void flush_tlb_pgtable(struct mmu_gather *tlb, unsigned long address)
-{
- /*
- * Flush the page table walk cache on freeing a page table. We already
- * have marked the upper/higher level page table entry none by now.
- * So it is safe to flush PWC here.
- */
- if (!radix_enabled())
- return;
-
- radix__flush_tlb_pwc(tlb, address);
-}
-
extern bool tlbie_capable;
extern bool tlbie_enabled;
diff --git a/arch/powerpc/include/asm/bug.h b/arch/powerpc/include/asm/bug.h
index f47e6ff6554d..338f36cd9934 100644
--- a/arch/powerpc/include/asm/bug.h
+++ b/arch/powerpc/include/asm/bug.h
@@ -49,6 +49,15 @@
".previous\n"
#endif
+#define BUG_ENTRY(insn, flags, ...) \
+ __asm__ __volatile__( \
+ "1: " insn "\n" \
+ _EMIT_BUG_ENTRY \
+ : : "i" (__FILE__), "i" (__LINE__), \
+ "i" (flags), \
+ "i" (sizeof(struct bug_entry)), \
+ ##__VA_ARGS__)
+
/*
* BUG_ON() and WARN_ON() do their best to cooperate with compile-time
* optimisations. However depending on the complexity of the condition
@@ -56,11 +65,7 @@
*/
#define BUG() do { \
- __asm__ __volatile__( \
- "1: twi 31,0,0\n" \
- _EMIT_BUG_ENTRY \
- : : "i" (__FILE__), "i" (__LINE__), \
- "i" (0), "i" (sizeof(struct bug_entry))); \
+ BUG_ENTRY("twi 31, 0, 0", 0); \
unreachable(); \
} while (0)
@@ -69,23 +74,11 @@
if (x) \
BUG(); \
} else { \
- __asm__ __volatile__( \
- "1: "PPC_TLNEI" %4,0\n" \
- _EMIT_BUG_ENTRY \
- : : "i" (__FILE__), "i" (__LINE__), "i" (0), \
- "i" (sizeof(struct bug_entry)), \
- "r" ((__force long)(x))); \
+ BUG_ENTRY(PPC_TLNEI " %4, 0", 0, "r" ((__force long)(x))); \
} \
} while (0)
-#define __WARN_FLAGS(flags) do { \
- __asm__ __volatile__( \
- "1: twi 31,0,0\n" \
- _EMIT_BUG_ENTRY \
- : : "i" (__FILE__), "i" (__LINE__), \
- "i" (BUGFLAG_WARNING|(flags)), \
- "i" (sizeof(struct bug_entry))); \
-} while (0)
+#define __WARN_FLAGS(flags) BUG_ENTRY("twi 31, 0, 0", BUGFLAG_WARNING | (flags))
#define WARN_ON(x) ({ \
int __ret_warn_on = !!(x); \
@@ -93,13 +86,9 @@
if (__ret_warn_on) \
__WARN(); \
} else { \
- __asm__ __volatile__( \
- "1: "PPC_TLNEI" %4,0\n" \
- _EMIT_BUG_ENTRY \
- : : "i" (__FILE__), "i" (__LINE__), \
- "i" (BUGFLAG_WARNING|BUGFLAG_TAINT(TAINT_WARN)),\
- "i" (sizeof(struct bug_entry)), \
- "r" (__ret_warn_on)); \
+ BUG_ENTRY(PPC_TLNEI " %4, 0", \
+ BUGFLAG_WARNING | BUGFLAG_TAINT(TAINT_WARN), \
+ "r" (__ret_warn_on)); \
} \
unlikely(__ret_warn_on); \
})
diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
index 45e3137ccd71..72b81015cebe 100644
--- a/arch/powerpc/include/asm/cache.h
+++ b/arch/powerpc/include/asm/cache.h
@@ -55,42 +55,48 @@ struct ppc64_caches {
extern struct ppc64_caches ppc64_caches;
-static inline u32 l1_cache_shift(void)
+static inline u32 l1_dcache_shift(void)
{
return ppc64_caches.l1d.log_block_size;
}
-static inline u32 l1_cache_bytes(void)
+static inline u32 l1_dcache_bytes(void)
{
return ppc64_caches.l1d.block_size;
}
+
+static inline u32 l1_icache_shift(void)
+{
+ return ppc64_caches.l1i.log_block_size;
+}
+
+static inline u32 l1_icache_bytes(void)
+{
+ return ppc64_caches.l1i.block_size;
+}
#else
-static inline u32 l1_cache_shift(void)
+static inline u32 l1_dcache_shift(void)
{
return L1_CACHE_SHIFT;
}
-static inline u32 l1_cache_bytes(void)
+static inline u32 l1_dcache_bytes(void)
{
return L1_CACHE_BYTES;
}
+
+static inline u32 l1_icache_shift(void)
+{
+ return L1_CACHE_SHIFT;
+}
+
+static inline u32 l1_icache_bytes(void)
+{
+ return L1_CACHE_BYTES;
+}
+
#endif
-#endif /* ! __ASSEMBLY__ */
-
-#if defined(__ASSEMBLY__)
-/*
- * For a snooping icache, we still need a dummy icbi to purge all the
- * prefetched instructions from the ifetch buffers. We also need a sync
- * before the icbi to order the the actual stores to memory that might
- * have modified instructions with the icbi.
- */
-#define PURGE_PREFETCHED_INS \
- sync; \
- icbi 0,r3; \
- sync; \
- isync
-#else
#define __read_mostly __attribute__((__section__(".data..read_mostly")))
#ifdef CONFIG_PPC_BOOK3S_32
@@ -124,6 +130,17 @@ static inline void dcbst(void *addr)
{
__asm__ __volatile__ ("dcbst 0, %0" : : "r"(addr) : "memory");
}
+
+static inline void icbi(void *addr)
+{
+ asm volatile ("icbi 0, %0" : : "r"(addr) : "memory");
+}
+
+static inline void iccci(void *addr)
+{
+ asm volatile ("iccci 0, %0" : : "r"(addr) : "memory");
+}
+
#endif /* !__ASSEMBLY__ */
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_CACHE_H */
diff --git a/arch/powerpc/include/asm/cacheflush.h b/arch/powerpc/include/asm/cacheflush.h
index eef388f2659f..4a1c9f0200e1 100644
--- a/arch/powerpc/include/asm/cacheflush.h
+++ b/arch/powerpc/include/asm/cacheflush.h
@@ -42,29 +42,25 @@ extern void flush_dcache_page(struct page *page);
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
-extern void flush_icache_range(unsigned long, unsigned long);
+void flush_icache_range(unsigned long start, unsigned long stop);
extern void flush_icache_user_range(struct vm_area_struct *vma,
struct page *page, unsigned long addr,
int len);
-extern void __flush_dcache_icache(void *page_va);
extern void flush_dcache_icache_page(struct page *page);
-#if defined(CONFIG_PPC32) && !defined(CONFIG_BOOKE)
-extern void __flush_dcache_icache_phys(unsigned long physaddr);
-#else
-static inline void __flush_dcache_icache_phys(unsigned long physaddr)
-{
- BUG();
-}
-#endif
-
-/*
- * Write any modified data cache blocks out to memory and invalidate them.
- * Does not invalidate the corresponding instruction cache blocks.
+void __flush_dcache_icache(void *page);
+
+/**
+ * flush_dcache_range(): Write any modified data cache blocks out to memory and
+ * invalidate them. Does not invalidate the corresponding instruction cache
+ * blocks.
+ *
+ * @start: the start address
+ * @stop: the stop address (exclusive)
*/
static inline void flush_dcache_range(unsigned long start, unsigned long stop)
{
- unsigned long shift = l1_cache_shift();
- unsigned long bytes = l1_cache_bytes();
+ unsigned long shift = l1_dcache_shift();
+ unsigned long bytes = l1_dcache_bytes();
void *addr = (void *)(start & ~(bytes - 1));
unsigned long size = stop - (unsigned long)addr + (bytes - 1);
unsigned long i;
@@ -89,8 +85,8 @@ static inline void flush_dcache_range(unsigned long start, unsigned long stop)
*/
static inline void clean_dcache_range(unsigned long start, unsigned long stop)
{
- unsigned long shift = l1_cache_shift();
- unsigned long bytes = l1_cache_bytes();
+ unsigned long shift = l1_dcache_shift();
+ unsigned long bytes = l1_dcache_bytes();
void *addr = (void *)(start & ~(bytes - 1));
unsigned long size = stop - (unsigned long)addr + (bytes - 1);
unsigned long i;
@@ -108,8 +104,8 @@ static inline void clean_dcache_range(unsigned long start, unsigned long stop)
static inline void invalidate_dcache_range(unsigned long start,
unsigned long stop)
{
- unsigned long shift = l1_cache_shift();
- unsigned long bytes = l1_cache_bytes();
+ unsigned long shift = l1_dcache_shift();
+ unsigned long bytes = l1_dcache_bytes();
void *addr = (void *)(start & ~(bytes - 1));
unsigned long size = stop - (unsigned long)addr + (bytes - 1);
unsigned long i;
diff --git a/arch/powerpc/include/asm/dma-direct.h b/arch/powerpc/include/asm/dma-direct.h
index a2912b47102c..abc154d784b0 100644
--- a/arch/powerpc/include/asm/dma-direct.h
+++ b/arch/powerpc/include/asm/dma-direct.h
@@ -2,26 +2,13 @@
#ifndef ASM_POWERPC_DMA_DIRECT_H
#define ASM_POWERPC_DMA_DIRECT_H 1
-static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
-{
- if (!dev->dma_mask)
- return false;
-
- return addr + size - 1 <=
- min_not_zero(*dev->dma_mask, dev->bus_dma_mask);
-}
-
static inline dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
{
- if (!dev)
- return paddr + PCI_DRAM_OFFSET;
return paddr + dev->archdata.dma_offset;
}
static inline phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t daddr)
{
- if (!dev)
- return daddr - PCI_DRAM_OFFSET;
return daddr - dev->archdata.dma_offset;
}
#endif /* ASM_POWERPC_DMA_DIRECT_H */
diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h
deleted file mode 100644
index 565d6f74b189..000000000000
--- a/arch/powerpc/include/asm/dma-mapping.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (C) 2004 IBM
- */
-#ifndef _ASM_DMA_MAPPING_H
-#define _ASM_DMA_MAPPING_H
-
-static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
-{
- /* We don't handle the NULL dev case for ISA for now. We could
- * do it via an out of line call but it is not needed for now. The
- * only ISA DMA device we support is the floppy and we have a hack
- * in the floppy driver directly to get a device for us.
- */
- return NULL;
-}
-
-#endif /* _ASM_DMA_MAPPING_H */
diff --git a/arch/powerpc/include/asm/fixmap.h b/arch/powerpc/include/asm/fixmap.h
index 0cfc365d814b..2ef155a3c821 100644
--- a/arch/powerpc/include/asm/fixmap.h
+++ b/arch/powerpc/include/asm/fixmap.h
@@ -15,6 +15,7 @@
#define _ASM_FIXMAP_H
#ifndef __ASSEMBLY__
+#include <linux/sizes.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#ifdef CONFIG_HIGHMEM
@@ -63,7 +64,22 @@ enum fixed_addresses {
FIX_IMMR_BASE = __ALIGN_MASK(FIX_IMMR_START, FIX_IMMR_SIZE - 1) - 1 +
FIX_IMMR_SIZE,
#endif
+#ifdef CONFIG_PPC_83xx
+ /* For IMMR we need an aligned 2M area */
+#define FIX_IMMR_SIZE (SZ_2M / PAGE_SIZE)
+ FIX_IMMR_START,
+ FIX_IMMR_BASE = __ALIGN_MASK(FIX_IMMR_START, FIX_IMMR_SIZE - 1) - 1 +
+ FIX_IMMR_SIZE,
+#endif
/* FIX_PCIE_MCFG, */
+ __end_of_permanent_fixed_addresses,
+
+#define NR_FIX_BTMAPS (SZ_256K / PAGE_SIZE)
+#define FIX_BTMAPS_SLOTS 16
+#define TOTAL_FIX_BTMAPS (NR_FIX_BTMAPS * FIX_BTMAPS_SLOTS)
+
+ FIX_BTMAP_END = __end_of_permanent_fixed_addresses,
+ FIX_BTMAP_BEGIN = FIX_BTMAP_END + TOTAL_FIX_BTMAPS - 1,
__end_of_fixed_addresses
};
@@ -71,14 +87,22 @@ enum fixed_addresses {
#define FIXADDR_START (FIXADDR_TOP - __FIXADDR_SIZE)
#define FIXMAP_PAGE_NOCACHE PAGE_KERNEL_NCG
+#define FIXMAP_PAGE_IO PAGE_KERNEL_NCG
#include <asm-generic/fixmap.h>
static inline void __set_fixmap(enum fixed_addresses idx,
phys_addr_t phys, pgprot_t flags)
{
- map_kernel_page(fix_to_virt(idx), phys, flags);
+ if (__builtin_constant_p(idx))
+ BUILD_BUG_ON(idx >= __end_of_fixed_addresses);
+ else if (WARN_ON(idx >= __end_of_fixed_addresses))
+ return;
+
+ map_kernel_page(__fix_to_virt(idx), phys, flags);
}
+#define __early_set_fixmap __set_fixmap
+
#endif /* !__ASSEMBLY__ */
#endif
diff --git a/arch/powerpc/include/asm/hw_breakpoint.h b/arch/powerpc/include/asm/hw_breakpoint.h
index 67e2da195eae..27ac6f5d2891 100644
--- a/arch/powerpc/include/asm/hw_breakpoint.h
+++ b/arch/powerpc/include/asm/hw_breakpoint.h
@@ -14,6 +14,7 @@ struct arch_hw_breakpoint {
unsigned long address;
u16 type;
u16 len; /* length of the target data symbol */
+ u16 hw_len; /* length programmed in hw */
};
/* Note: Don't change the the first 6 bits below as they are in the same order
@@ -33,6 +34,11 @@ struct arch_hw_breakpoint {
#define HW_BRK_TYPE_PRIV_ALL (HW_BRK_TYPE_USER | HW_BRK_TYPE_KERNEL | \
HW_BRK_TYPE_HYP)
+#define HW_BREAKPOINT_ALIGN 0x7
+
+#define DABR_MAX_LEN 8
+#define DAWR_MAX_LEN 512
+
#ifdef CONFIG_HAVE_HW_BREAKPOINT
#include <linux/kdebug.h>
#include <asm/reg.h>
@@ -44,8 +50,6 @@ struct pmu;
struct perf_sample_data;
struct task_struct;
-#define HW_BREAKPOINT_ALIGN 0x7
-
extern int hw_breakpoint_slots(int type);
extern int arch_bp_generic_fields(int type, int *gen_bp_type);
extern int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw);
@@ -70,6 +74,7 @@ static inline void hw_breakpoint_disable(void)
brk.address = 0;
brk.type = 0;
brk.len = 0;
+ brk.hw_len = 0;
if (ppc_breakpoint_available())
__set_breakpoint(&brk);
}
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h
index 32a18f2f49bc..e3a905e3d573 100644
--- a/arch/powerpc/include/asm/hw_irq.h
+++ b/arch/powerpc/include/asm/hw_irq.h
@@ -226,8 +226,8 @@ static inline bool arch_irqs_disabled(void)
#endif /* CONFIG_PPC_BOOK3S */
#ifdef CONFIG_PPC_BOOK3E
-#define __hard_irq_enable() asm volatile("wrteei 1" : : : "memory")
-#define __hard_irq_disable() asm volatile("wrteei 0" : : : "memory")
+#define __hard_irq_enable() wrtee(MSR_EE)
+#define __hard_irq_disable() wrtee(0)
#else
#define __hard_irq_enable() __mtmsrd(MSR_EE|MSR_RI, 1)
#define __hard_irq_disable() __mtmsrd(MSR_RI, 1)
@@ -280,8 +280,6 @@ extern void force_external_irq_replay(void);
#else /* CONFIG_PPC64 */
-#define SET_MSR_EE(x) mtmsr(x)
-
static inline unsigned long arch_local_save_flags(void)
{
return mfmsr();
@@ -289,47 +287,44 @@ static inline unsigned long arch_local_save_flags(void)
static inline void arch_local_irq_restore(unsigned long flags)
{
-#if defined(CONFIG_BOOKE)
- asm volatile("wrtee %0" : : "r" (flags) : "memory");
-#else
- mtmsr(flags);
-#endif
+ if (IS_ENABLED(CONFIG_BOOKE))
+ wrtee(flags);
+ else
+ mtmsr(flags);
}
static inline unsigned long arch_local_irq_save(void)
{
unsigned long flags = arch_local_save_flags();
-#ifdef CONFIG_BOOKE
- asm volatile("wrteei 0" : : : "memory");
-#elif defined(CONFIG_PPC_8xx)
- wrtspr(SPRN_EID);
-#else
- SET_MSR_EE(flags & ~MSR_EE);
-#endif
+
+ if (IS_ENABLED(CONFIG_BOOKE))
+ wrtee(0);
+ else if (IS_ENABLED(CONFIG_PPC_8xx))
+ wrtspr(SPRN_EID);
+ else
+ mtmsr(flags & ~MSR_EE);
+
return flags;
}
static inline void arch_local_irq_disable(void)
{
-#ifdef CONFIG_BOOKE
- asm volatile("wrteei 0" : : : "memory");
-#elif defined(CONFIG_PPC_8xx)
- wrtspr(SPRN_EID);
-#else
- arch_local_irq_save();
-#endif
+ if (IS_ENABLED(CONFIG_BOOKE))
+ wrtee(0);
+ else if (IS_ENABLED(CONFIG_PPC_8xx))
+ wrtspr(SPRN_EID);
+ else
+ mtmsr(mfmsr() & ~MSR_EE);
}
static inline void arch_local_irq_enable(void)
{
-#ifdef CONFIG_BOOKE
- asm volatile("wrteei 1" : : : "memory");
-#elif defined(CONFIG_PPC_8xx)
- wrtspr(SPRN_EIE);
-#else
- unsigned long msr = mfmsr();
- SET_MSR_EE(msr | MSR_EE);
-#endif
+ if (IS_ENABLED(CONFIG_BOOKE))
+ wrtee(MSR_EE);
+ else if (IS_ENABLED(CONFIG_PPC_8xx))
+ wrtspr(SPRN_EIE);
+ else
+ mtmsr(mfmsr() | MSR_EE);
}
static inline bool arch_irqs_disabled_flags(unsigned long flags)
diff --git a/arch/powerpc/include/asm/nohash/32/kup-8xx.h b/arch/powerpc/include/asm/nohash/32/kup-8xx.h
index 1c3133b5f86a..1006a427e99c 100644
--- a/arch/powerpc/include/asm/nohash/32/kup-8xx.h
+++ b/arch/powerpc/include/asm/nohash/32/kup-8xx.h
@@ -3,6 +3,7 @@
#define _ASM_POWERPC_KUP_8XX_H_
#include <asm/bug.h>
+#include <asm/mmu.h>
#ifdef CONFIG_PPC_KUAP
diff --git a/arch/powerpc/include/asm/nohash/mmu-book3e.h b/arch/powerpc/include/asm/nohash/mmu-book3e.h
index 4c9777d256fb..b41004664312 100644
--- a/arch/powerpc/include/asm/nohash/mmu-book3e.h
+++ b/arch/powerpc/include/asm/nohash/mmu-book3e.h
@@ -75,7 +75,6 @@
#define MAS2_E 0x00000001
#define MAS2_WIMGE_MASK 0x0000001f
#define MAS2_EPN_MASK(size) (~0 << (size + 10))
-#define MAS2_VAL(addr, size, flags) ((addr) & MAS2_EPN_MASK(size) | (flags))
#define MAS3_RPN 0xFFFFF000
#define MAS3_U0 0x00000200
@@ -221,6 +220,16 @@
#define TLBILX_T_CLASS2 6
#define TLBILX_T_CLASS3 7
+/*
+ * The mapping only needs to be cache-coherent on SMP, except on
+ * Freescale e500mc derivatives where it's also needed for coherent DMA.
+ */
+#if defined(CONFIG_SMP) || defined(CONFIG_PPC_E500MC)
+#define MAS2_M_IF_NEEDED MAS2_M
+#else
+#define MAS2_M_IF_NEEDED 0
+#endif
+
#ifndef __ASSEMBLY__
#include <asm/bug.h>
diff --git a/arch/powerpc/include/asm/opal-api.h b/arch/powerpc/include/asm/opal-api.h
index 378e3997845a..c1f25a760eb1 100644
--- a/arch/powerpc/include/asm/opal-api.h
+++ b/arch/powerpc/include/asm/opal-api.h
@@ -211,7 +211,10 @@
#define OPAL_MPIPL_UPDATE 173
#define OPAL_MPIPL_REGISTER_TAG 174
#define OPAL_MPIPL_QUERY_TAG 175
-#define OPAL_LAST 175
+#define OPAL_SECVAR_GET 176
+#define OPAL_SECVAR_GET_NEXT 177
+#define OPAL_SECVAR_ENQUEUE_UPDATE 178
+#define OPAL_LAST 178
#define QUIESCE_HOLD 1 /* Spin all calls at entry */
#define QUIESCE_REJECT 2 /* Fail all calls with OPAL_BUSY */
diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h
index a0cf8fba4d12..9986ac34b8e2 100644
--- a/arch/powerpc/include/asm/opal.h
+++ b/arch/powerpc/include/asm/opal.h
@@ -298,6 +298,13 @@ int opal_sensor_group_clear(u32 group_hndl, int token);
int opal_sensor_group_enable(u32 group_hndl, int token, bool enable);
int opal_nx_coproc_init(uint32_t chip_id, uint32_t ct);
+int opal_secvar_get(const char *key, uint64_t key_len, u8 *data,
+ uint64_t *data_size);
+int opal_secvar_get_next(const char *key, uint64_t *key_len,
+ uint64_t key_buf_size);
+int opal_secvar_enqueue_update(const char *key, uint64_t key_len, u8 *data,
+ uint64_t data_size);
+
s64 opal_mpipl_update(enum opal_mpipl_ops op, u64 src, u64 dest, u64 size);
s64 opal_mpipl_register_tag(enum opal_mpipl_tags tag, u64 addr);
s64 opal_mpipl_query_tag(enum opal_mpipl_tags tag, u64 *addr);
diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
index f6c562acc3f8..7f1fd41e3065 100644
--- a/arch/powerpc/include/asm/page.h
+++ b/arch/powerpc/include/asm/page.h
@@ -325,6 +325,13 @@ void arch_free_page(struct page *page, int order);
struct vm_area_struct;
+extern unsigned long kernstart_virt_addr;
+
+static inline unsigned long kaslr_offset(void)
+{
+ return kernstart_virt_addr - KERNELBASE;
+}
+
#include <asm-generic/memory_model.h>
#endif /* __ASSEMBLY__ */
#include <asm/slice.h>
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
index 4053b2ab427c..0e4ec8cc37b7 100644
--- a/arch/powerpc/include/asm/pgtable.h
+++ b/arch/powerpc/include/asm/pgtable.h
@@ -157,13 +157,9 @@ static inline bool pgd_is_leaf(pgd_t pgd)
#define is_ioremap_addr is_ioremap_addr
static inline bool is_ioremap_addr(const void *x)
{
-#ifdef CONFIG_MMU
unsigned long addr = (unsigned long)x;
return addr >= IOREMAP_BASE && addr < IOREMAP_END;
-#else
- return false;
-#endif
}
#endif /* CONFIG_PPC64 */
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index 75c7e95a321b..1aa46dff0957 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -25,9 +25,7 @@
#include <asm/reg_fsl_emb.h>
#endif
-#ifdef CONFIG_PPC_8xx
#include <asm/reg_8xx.h>
-#endif /* CONFIG_PPC_8xx */
#define MSR_SF_LG 63 /* Enable 64 bit mode */
#define MSR_ISF_LG 61 /* Interrupt 64b mode valid on 630 */
@@ -1382,6 +1380,14 @@ static inline void mtmsr_isync(unsigned long val)
#define wrtspr(rn) asm volatile("mtspr " __stringify(rn) ",0" : \
: : "memory")
+static inline void wrtee(unsigned long val)
+{
+ if (__builtin_constant_p(val))
+ asm volatile("wrteei %0" : : "i" ((val & MSR_EE) ? 1 : 0) : "memory");
+ else
+ asm volatile("wrtee %0" : : "r" (val) : "memory");
+}
+
extern unsigned long msr_check_and_set(unsigned long bits);
extern bool strict_msr_control;
extern void __msr_check_and_clear(unsigned long bits);
@@ -1396,19 +1402,9 @@ static inline void msr_check_and_clear(unsigned long bits)
#define mftb() ({unsigned long rval; \
asm volatile( \
"90: mfspr %0, %2;\n" \
- "97: cmpwi %0,0;\n" \
- " beq- 90b;\n" \
- "99:\n" \
- ".section __ftr_fixup,\"a\"\n" \
- ".align 3\n" \
- "98:\n" \
- " .8byte %1\n" \
- " .8byte %1\n" \
- " .8byte 97b-98b\n" \
- " .8byte 99b-98b\n" \
- " .8byte 0\n" \
- " .8byte 0\n" \
- ".previous" \
+ ASM_FTR_IFSET( \
+ "97: cmpwi %0,0;\n" \
+ " beq- 90b;\n", "", %1) \
: "=r" (rval) \
: "i" (CPU_FTR_CELL_TB_BUG), "i" (SPRN_TBRL) : "cr0"); \
rval;})
diff --git a/arch/powerpc/include/asm/reg_8xx.h b/arch/powerpc/include/asm/reg_8xx.h
index 7192eece6c3e..07df35ee8cbc 100644
--- a/arch/powerpc/include/asm/reg_8xx.h
+++ b/arch/powerpc/include/asm/reg_8xx.h
@@ -5,8 +5,6 @@
#ifndef _ASM_POWERPC_REG_8xx_H
#define _ASM_POWERPC_REG_8xx_H
-#include <asm/mmu.h>
-
/* Cache control on the MPC8xx is provided through some additional
* special purpose registers.
*/
@@ -38,7 +36,9 @@
#define SPRN_CMPF 153
#define SPRN_LCTRL1 156
#define SPRN_LCTRL2 157
+#ifdef CONFIG_PPC_8xx
#define SPRN_ICTRL 158
+#endif
#define SPRN_BAR 159
/* Commands. Only the first few are available to the instruction cache.
diff --git a/arch/powerpc/include/asm/sections.h b/arch/powerpc/include/asm/sections.h
index 5a9b6eb651b6..d19871763ed4 100644
--- a/arch/powerpc/include/asm/sections.h
+++ b/arch/powerpc/include/asm/sections.h
@@ -5,8 +5,22 @@
#include <linux/elf.h>
#include <linux/uaccess.h>
+
+#define arch_is_kernel_initmem_freed arch_is_kernel_initmem_freed
+
#include <asm-generic/sections.h>
+extern bool init_mem_is_free;
+
+static inline int arch_is_kernel_initmem_freed(unsigned long addr)
+{
+ if (!init_mem_is_free)
+ return 0;
+
+ return addr >= (unsigned long)__init_begin &&
+ addr < (unsigned long)__init_end;
+}
+
extern char __head_end[];
#ifdef __powerpc64__
diff --git a/arch/powerpc/include/asm/secure_boot.h b/arch/powerpc/include/asm/secure_boot.h
new file mode 100644
index 000000000000..a2ff556916c6
--- /dev/null
+++ b/arch/powerpc/include/asm/secure_boot.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Secure boot definitions
+ *
+ * Copyright (C) 2019 IBM Corporation
+ * Author: Nayna Jain
+ */
+#ifndef _ASM_POWER_SECURE_BOOT_H
+#define _ASM_POWER_SECURE_BOOT_H
+
+#ifdef CONFIG_PPC_SECURE_BOOT
+
+bool is_ppc_secureboot_enabled(void);
+bool is_ppc_trustedboot_enabled(void);
+
+#else
+
+static inline bool is_ppc_secureboot_enabled(void)
+{
+ return false;
+}
+
+static inline bool is_ppc_trustedboot_enabled(void)
+{
+ return false;
+}
+
+#endif
+#endif
diff --git a/arch/powerpc/include/asm/security_features.h b/arch/powerpc/include/asm/security_features.h
index 759597bf0fd8..7c05e95a5c44 100644
--- a/arch/powerpc/include/asm/security_features.h
+++ b/arch/powerpc/include/asm/security_features.h
@@ -9,7 +9,7 @@
#define _ASM_POWERPC_SECURITY_FEATURES_H
-extern unsigned long powerpc_security_features;
+extern u64 powerpc_security_features;
extern bool rfi_flush;
/* These are bit flags */
@@ -24,17 +24,17 @@ void setup_stf_barrier(void);
void do_stf_barrier_fixups(enum stf_barrier_type types);
void setup_count_cache_flush(void);
-static inline void security_ftr_set(unsigned long feature)
+static inline void security_ftr_set(u64 feature)
{
powerpc_security_features |= feature;
}
-static inline void security_ftr_clear(unsigned long feature)
+static inline void security_ftr_clear(u64 feature)
{
powerpc_security_features &= ~feature;
}
-static inline bool security_ftr_enabled(unsigned long feature)
+static inline bool security_ftr_enabled(u64 feature)
{
return !!(powerpc_security_features & feature);
}
@@ -81,6 +81,9 @@ static inline bool security_ftr_enabled(unsigned long feature)
// Software required to flush count cache on context switch
#define SEC_FTR_FLUSH_COUNT_CACHE 0x0000000000000400ull
+// Software required to flush link stack on context switch
+#define SEC_FTR_FLUSH_LINK_STACK 0x0000000000001000ull
+
// Features enabled by default
#define SEC_FTR_DEFAULT \
diff --git a/arch/powerpc/include/asm/secvar.h b/arch/powerpc/include/asm/secvar.h
new file mode 100644
index 000000000000..4cc35b58b986
--- /dev/null
+++ b/arch/powerpc/include/asm/secvar.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019 IBM Corporation
+ * Author: Nayna Jain
+ *
+ * PowerPC secure variable operations.
+ */
+#ifndef SECVAR_OPS_H
+#define SECVAR_OPS_H
+
+#include <linux/types.h>
+#include <linux/errno.h>
+
+extern const struct secvar_operations *secvar_ops;
+
+struct secvar_operations {
+ int (*get)(const char *key, uint64_t key_len, u8 *data,
+ uint64_t *data_size);
+ int (*get_next)(const char *key, uint64_t *key_len,
+ uint64_t keybufsize);
+ int (*set)(const char *key, uint64_t key_len, u8 *data,
+ uint64_t data_size);
+};
+
+#ifdef CONFIG_PPC_SECURE_BOOT
+
+extern void set_secvar_ops(const struct secvar_operations *ops);
+
+#else
+
+static inline void set_secvar_ops(const struct secvar_operations *ops) { }
+
+#endif
+
+#endif
diff --git a/arch/powerpc/include/asm/vdso_datapage.h b/arch/powerpc/include/asm/vdso_datapage.h
index c61d59ed3b45..a115970a6809 100644
--- a/arch/powerpc/include/asm/vdso_datapage.h
+++ b/arch/powerpc/include/asm/vdso_datapage.h
@@ -81,7 +81,8 @@ struct vdso_data {
__u32 stamp_sec_fraction; /* fractional seconds of stamp_xtime */
__s32 wtom_clock_nsec; /* Wall to monotonic clock nsec */
__s64 wtom_clock_sec; /* Wall to monotonic clock sec */
- struct timespec stamp_xtime; /* xtime as at tb_orig_stamp */
+ __s64 stamp_xtime_sec; /* xtime secs as at tb_orig_stamp */
+ __s64 stamp_xtime_nsec; /* xtime nsecs as at tb_orig_stamp */
__u32 syscall_map_64[SYSCALL_MAP_SIZE]; /* map of syscalls */
__u32 syscall_map_32[SYSCALL_MAP_SIZE]; /* map of syscalls */
};
@@ -101,7 +102,8 @@ struct vdso_data {
__u32 tz_dsttime; /* Type of dst correction 0x5C */
__s32 wtom_clock_sec; /* Wall to monotonic clock */
__s32 wtom_clock_nsec;
- struct timespec stamp_xtime; /* xtime as at tb_orig_stamp */
+ __s32 stamp_xtime_sec; /* xtime seconds as at tb_orig_stamp */
+ __s32 stamp_xtime_nsec; /* xtime nsecs as at tb_orig_stamp */
__u32 stamp_sec_fraction; /* fractional seconds of stamp_xtime */
__u32 syscall_map_32[SYSCALL_MAP_SIZE]; /* map of syscalls */
__u32 dcache_block_size; /* L1 d-cache block size */
diff --git a/arch/powerpc/include/uapi/asm/msgbuf.h b/arch/powerpc/include/uapi/asm/msgbuf.h
index 2b1b37797a47..969bd83e4d3d 100644
--- a/arch/powerpc/include/uapi/asm/msgbuf.h
+++ b/arch/powerpc/include/uapi/asm/msgbuf.h
@@ -11,9 +11,9 @@
struct msqid64_ds {
struct ipc64_perm msg_perm;
#ifdef __powerpc64__
- __kernel_time_t msg_stime; /* last msgsnd time */
- __kernel_time_t msg_rtime; /* last msgrcv time */
- __kernel_time_t msg_ctime; /* last change time */
+ long msg_stime; /* last msgsnd time */
+ long msg_rtime; /* last msgrcv time */
+ long msg_ctime; /* last change time */
#else
unsigned long msg_stime_high;
unsigned long msg_stime; /* last msgsnd time */
diff --git a/arch/powerpc/include/uapi/asm/sembuf.h b/arch/powerpc/include/uapi/asm/sembuf.h
index 3f60946f77e3..008ae77c6746 100644
--- a/arch/powerpc/include/uapi/asm/sembuf.h
+++ b/arch/powerpc/include/uapi/asm/sembuf.h
@@ -26,8 +26,8 @@ struct semid64_ds {
unsigned long sem_ctime_high;
unsigned long sem_ctime; /* last change time */
#else
- __kernel_time_t sem_otime; /* last semop time */
- __kernel_time_t sem_ctime; /* last change time */
+ long sem_otime; /* last semop time */
+ long sem_ctime; /* last change time */
#endif
unsigned long sem_nsems; /* no. of semaphores in array */
unsigned long __unused3;
diff --git a/arch/powerpc/include/uapi/asm/shmbuf.h b/arch/powerpc/include/uapi/asm/shmbuf.h
index b591c4d7e4c5..00422b2f3c63 100644
--- a/arch/powerpc/include/uapi/asm/shmbuf.h
+++ b/arch/powerpc/include/uapi/asm/shmbuf.h
@@ -22,9 +22,9 @@
struct shmid64_ds {
struct ipc64_perm shm_perm; /* operation perms */
#ifdef __powerpc64__
- __kernel_time_t shm_atime; /* last attach time */
- __kernel_time_t shm_dtime; /* last detach time */
- __kernel_time_t shm_ctime; /* last change time */
+ long shm_atime; /* last attach time */
+ long shm_dtime; /* last detach time */
+ long shm_ctime; /* last change time */
#else
unsigned long shm_atime_high;
unsigned long shm_atime; /* last attach time */
diff --git a/arch/powerpc/include/uapi/asm/spu_info.h b/arch/powerpc/include/uapi/asm/spu_info.h
index cabfcbba9eac..45f97150587b 100644
--- a/arch/powerpc/include/uapi/asm/spu_info.h
+++ b/arch/powerpc/include/uapi/asm/spu_info.h
@@ -5,20 +5,6 @@
* (C) Copyright 2006 IBM Corp.
*
* Author: Dwayne Grant McConnell <decimal@us.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef _UAPI_SPU_INFO_H
diff --git a/arch/powerpc/include/uapi/asm/stat.h b/arch/powerpc/include/uapi/asm/stat.h
index afd25f2ff4e8..7871055e5e32 100644
--- a/arch/powerpc/include/uapi/asm/stat.h
+++ b/arch/powerpc/include/uapi/asm/stat.h
@@ -40,7 +40,7 @@ struct stat {
uid_t st_uid;
gid_t st_gid;
unsigned long st_rdev;
- off_t st_size;
+ long st_size;
unsigned long st_blksize;
unsigned long st_blocks;
unsigned long st_atime;
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index a7ca8fe62368..157b0147921f 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -5,9 +5,6 @@
CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"'
-# Disable clang warning for using setjmp without setjmp.h header
-CFLAGS_crash.o += $(call cc-disable-warning, builtin-requires-header)
-
ifdef CONFIG_PPC64
CFLAGS_prom_init.o += $(NO_MINIMAL_TOC)
endif
@@ -22,6 +19,8 @@ CFLAGS_btext.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
CFLAGS_prom.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
CFLAGS_prom_init.o += $(call cc-option, -fno-stack-protector)
+CFLAGS_prom_init.o += -DDISABLE_BRANCH_PROFILING
+CFLAGS_prom_init.o += -ffreestanding
ifdef CONFIG_FUNCTION_TRACER
# Do not trace early boot code
@@ -39,7 +38,6 @@ KASAN_SANITIZE_btext.o := n
ifdef CONFIG_KASAN
CFLAGS_early_32.o += -DDISABLE_BRANCH_PROFILING
CFLAGS_cputable.o += -DDISABLE_BRANCH_PROFILING
-CFLAGS_prom_init.o += -DDISABLE_BRANCH_PROFILING
CFLAGS_btext.o += -DDISABLE_BRANCH_PROFILING
endif
@@ -78,9 +76,8 @@ obj-$(CONFIG_EEH) += eeh.o eeh_pe.o eeh_dev.o eeh_cache.o \
eeh_driver.o eeh_event.o eeh_sysfs.o
obj-$(CONFIG_GENERIC_TBSYNC) += smp-tbsync.o
obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
-ifneq ($(CONFIG_FA_DUMP)$(CONFIG_PRESERVE_FA_DUMP),)
-obj-y += fadump.o
-endif
+obj-$(CONFIG_FA_DUMP) += fadump.o
+obj-$(CONFIG_PRESERVE_FA_DUMP) += fadump.o
ifdef CONFIG_PPC32
obj-$(CONFIG_E500) += idle_e500.o
endif
@@ -126,14 +123,6 @@ pci64-$(CONFIG_PPC64) += pci_dn.o pci-hotplug.o isa-bridge.o
obj-$(CONFIG_PCI) += pci_$(BITS).o $(pci64-y) \
pci-common.o pci_of_scan.o
obj-$(CONFIG_PCI_MSI) += msi.o
-obj-$(CONFIG_KEXEC_CORE) += machine_kexec.o crash.o \
- machine_kexec_$(BITS).o
-obj-$(CONFIG_KEXEC_FILE) += machine_kexec_file_$(BITS).o kexec_elf_$(BITS).o
-ifdef CONFIG_HAVE_IMA_KEXEC
-ifdef CONFIG_IMA
-obj-y += ima_kexec.o
-endif
-endif
obj-$(CONFIG_AUDIT) += audit.o
obj64-$(CONFIG_AUDIT) += compat_audit.o
@@ -161,16 +150,13 @@ ifneq ($(CONFIG_PPC_POWERNV)$(CONFIG_PPC_SVM),)
obj-y += ucall.o
endif
+obj-$(CONFIG_PPC_SECURE_BOOT) += secure_boot.o ima_arch.o secvar-ops.o
+obj-$(CONFIG_PPC_SECVAR_SYSFS) += secvar-sysfs.o
+
# Disable GCOV, KCOV & sanitizers in odd or sensitive code
GCOV_PROFILE_prom_init.o := n
KCOV_INSTRUMENT_prom_init.o := n
UBSAN_SANITIZE_prom_init.o := n
-GCOV_PROFILE_machine_kexec_64.o := n
-KCOV_INSTRUMENT_machine_kexec_64.o := n
-UBSAN_SANITIZE_machine_kexec_64.o := n
-GCOV_PROFILE_machine_kexec_32.o := n
-KCOV_INSTRUMENT_machine_kexec_32.o := n
-UBSAN_SANITIZE_machine_kexec_32.o := n
GCOV_PROFILE_kprobes.o := n
KCOV_INSTRUMENT_kprobes.o := n
UBSAN_SANITIZE_kprobes.o := n
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 484f54dab247..f22bd6d1fe93 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -385,7 +385,8 @@ int main(void)
OFFSET(CFG_SYSCALL_MAP32, vdso_data, syscall_map_32);
OFFSET(WTOM_CLOCK_SEC, vdso_data, wtom_clock_sec);
OFFSET(WTOM_CLOCK_NSEC, vdso_data, wtom_clock_nsec);
- OFFSET(STAMP_XTIME, vdso_data, stamp_xtime);
+ OFFSET(STAMP_XTIME_SEC, vdso_data, stamp_xtime_sec);
+ OFFSET(STAMP_XTIME_NSEC, vdso_data, stamp_xtime_nsec);
OFFSET(STAMP_SEC_FRAC, vdso_data, stamp_sec_fraction);
OFFSET(CFG_ICACHE_BLOCKSZ, vdso_data, icache_block_size);
OFFSET(CFG_DCACHE_BLOCKSZ, vdso_data, dcache_block_size);
@@ -393,20 +394,15 @@ int main(void)
OFFSET(CFG_DCACHE_LOGBLOCKSZ, vdso_data, dcache_log_block_size);
#ifdef CONFIG_PPC64
OFFSET(CFG_SYSCALL_MAP64, vdso_data, syscall_map_64);
- OFFSET(TVAL64_TV_SEC, timeval, tv_sec);
- OFFSET(TVAL64_TV_USEC, timeval, tv_usec);
+ OFFSET(TVAL64_TV_SEC, __kernel_old_timeval, tv_sec);
+ OFFSET(TVAL64_TV_USEC, __kernel_old_timeval, tv_usec);
+#endif
+ OFFSET(TSPC64_TV_SEC, __kernel_timespec, tv_sec);
+ OFFSET(TSPC64_TV_NSEC, __kernel_timespec, tv_nsec);
OFFSET(TVAL32_TV_SEC, old_timeval32, tv_sec);
OFFSET(TVAL32_TV_USEC, old_timeval32, tv_usec);
- OFFSET(TSPC64_TV_SEC, timespec, tv_sec);
- OFFSET(TSPC64_TV_NSEC, timespec, tv_nsec);
OFFSET(TSPC32_TV_SEC, old_timespec32, tv_sec);
OFFSET(TSPC32_TV_NSEC, old_timespec32, tv_nsec);
-#else
- OFFSET(TVAL32_TV_SEC, timeval, tv_sec);
- OFFSET(TVAL32_TV_USEC, timeval, tv_usec);
- OFFSET(TSPC32_TV_SEC, timespec, tv_sec);
- OFFSET(TSPC32_TV_NSEC, timespec, tv_nsec);
-#endif
/* timeval/timezone offsets for use by vdso */
OFFSET(TZONE_TZ_MINWEST, timezone, tz_minuteswest);
OFFSET(TZONE_TZ_DSTTIME, timezone, tz_dsttime);
diff --git a/arch/powerpc/kernel/cpu_setup_fsl_booke.S b/arch/powerpc/kernel/cpu_setup_fsl_booke.S
index 2b4f3ec0acf7..1d308780e0d3 100644
--- a/arch/powerpc/kernel/cpu_setup_fsl_booke.S
+++ b/arch/powerpc/kernel/cpu_setup_fsl_booke.S
@@ -231,7 +231,7 @@ _GLOBAL(__setup_cpu_e5500)
blr
#endif
-/* flush L1 date cache, it can apply to e500v2, e500mc and e5500 */
+/* flush L1 data cache, it can apply to e500v2, e500mc and e5500 */
_GLOBAL(flush_dcache_L1)
mfmsr r10
wrteei 0
diff --git a/arch/powerpc/kernel/dawr.c b/arch/powerpc/kernel/dawr.c
index 5f66b95b6858..cc14aa6c4a1b 100644
--- a/arch/powerpc/kernel/dawr.c
+++ b/arch/powerpc/kernel/dawr.c
@@ -30,10 +30,10 @@ int set_dawr(struct arch_hw_breakpoint *brk)
* DAWR length is stored in field MDR bits 48:53. Matches range in
* doublewords (64 bits) baised by -1 eg. 0b000000=1DW and
* 0b111111=64DW.
- * brk->len is in bytes.
+ * brk->hw_len is in bytes.
* This aligns up to double word size, shifts and does the bias.
*/
- mrd = ((brk->len + 7) >> 3) - 1;
+ mrd = ((brk->hw_len + 7) >> 3) - 1;
dawrx |= (mrd & 0x3f) << (63 - 53);
if (ppc_md.set_dawr)
@@ -54,7 +54,7 @@ static ssize_t dawr_write_file_bool(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
- struct arch_hw_breakpoint null_brk = {0, 0, 0};
+ struct arch_hw_breakpoint null_brk = {0};
size_t rc;
/* Send error to user if they hypervisor won't allow us to write DAWR */
diff --git a/arch/powerpc/kernel/early_32.c b/arch/powerpc/kernel/early_32.c
index 3482118ffe76..ef2ad4945904 100644
--- a/arch/powerpc/kernel/early_32.c
+++ b/arch/powerpc/kernel/early_32.c
@@ -19,10 +19,13 @@
*/
notrace unsigned long __init early_init(unsigned long dt_ptr)
{
- unsigned long offset = reloc_offset();
+ unsigned long kva, offset = reloc_offset();
+
+ kva = *PTRRELOC(&kernstart_virt_addr);
/* First zero the BSS */
- memset(PTRRELOC(&__bss_start), 0, __bss_stop - __bss_start);
+ if (kva == KERNELBASE)
+ memset(PTRRELOC(&__bss_start), 0, __bss_stop - __bss_start);
/*
* Identify the CPU type and fix up code sections
@@ -32,5 +35,5 @@ notrace unsigned long __init early_init(unsigned long dt_ptr)
apply_feature_fixups();
- return KERNELBASE + offset;
+ return kva + offset;
}
diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
index d9279d0ee9f5..3dd1a422fc29 100644
--- a/arch/powerpc/kernel/eeh_driver.c
+++ b/arch/powerpc/kernel/eeh_driver.c
@@ -1,25 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* PCI Error Recovery Driver for RPA-compliant PPC64 platform.
* Copyright IBM Corp. 2004 2005
* Copyright Linas Vepstas <linas@linas.org> 2004, 2005
*
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or (at
- * your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
* Send comments and feedback to Linas Vepstas <linas@austin.ibm.com>
*/
#include <linux/delay.h>
@@ -897,12 +881,12 @@ void eeh_handle_normal_event(struct eeh_pe *pe)
/* Log the event */
if (pe->type & EEH_PE_PHB) {
- pr_err("EEH: PHB#%x failure detected, location: %s\n",
+ pr_err("EEH: Recovering PHB#%x, location: %s\n",
pe->phb->global_number, eeh_pe_loc_get(pe));
} else {
struct eeh_pe *phb_pe = eeh_phb_pe_get(pe->phb);
- pr_err("EEH: Frozen PHB#%x-PE#%x detected\n",
+ pr_err("EEH: Recovering PHB#%x-PE#%x\n",
pe->phb->global_number, pe->addr);
pr_err("EEH: PE location: %s, PHB location: %s\n",
eeh_pe_loc_get(pe), eeh_pe_loc_get(phb_pe));
diff --git a/arch/powerpc/kernel/eeh_sysfs.c b/arch/powerpc/kernel/eeh_sysfs.c
index 3fa04dda1737..ab44d965a53c 100644
--- a/arch/powerpc/kernel/eeh_sysfs.c
+++ b/arch/powerpc/kernel/eeh_sysfs.c
@@ -1,25 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Sysfs entries for PCI Error Recovery for PAPR-compliant platform.
* Copyright IBM Corporation 2007
* Copyright Linas Vepstas <linas@austin.ibm.com> 2007
*
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or (at
- * your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
* Send comments and feedback to Linas Vepstas <linas@austin.ibm.com>
*/
#include <linux/pci.h>
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 6467bdab8d40..3fd3ef352e3f 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -537,6 +537,7 @@ flush_count_cache:
/* Save LR into r9 */
mflr r9
+ // Flush the link stack
.rept 64
bl .+4
.endr
@@ -546,6 +547,11 @@ flush_count_cache:
.balign 32
/* Restore LR */
1: mtlr r9
+
+ // If we're just flushing the link stack, return here
+3: nop
+ patch_site 3b patch__flush_link_stack_return
+
li r9,0x7fff
mtctr r9
diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
index 829950b96d29..e4076e3c072d 100644
--- a/arch/powerpc/kernel/exceptions-64e.S
+++ b/arch/powerpc/kernel/exceptions-64e.S
@@ -1346,16 +1346,6 @@ skpinv: addi r6,r6,1 /* Increment */
sync
isync
-/*
- * The mapping only needs to be cache-coherent on SMP, except on
- * Freescale e500mc derivatives where it's also needed for coherent DMA.
- */
-#if defined(CONFIG_SMP) || defined(CONFIG_PPC_E500MC)
-#define M_IF_NEEDED MAS2_M
-#else
-#define M_IF_NEEDED 0
-#endif
-
/* 6. Setup KERNELBASE mapping in TLB[0]
*
* r3 = MAS0 w/TLBSEL & ESEL for the entry we started in
@@ -1368,7 +1358,7 @@ skpinv: addi r6,r6,1 /* Increment */
ori r6,r6,(MAS1_TSIZE(BOOK3E_PAGESZ_1GB))@l
mtspr SPRN_MAS1,r6
- LOAD_REG_IMMEDIATE(r6, PAGE_OFFSET | M_IF_NEEDED)
+ LOAD_REG_IMMEDIATE(r6, PAGE_OFFSET | MAS2_M_IF_NEEDED)
mtspr SPRN_MAS2,r6
rlwinm r5,r5,0,0,25
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index d0018dd17e0a..46508b148e16 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -514,7 +514,7 @@ END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,948)
* If stack=0, then the stack is already set in r1, and r1 is saved in r10.
* PPR save and CPU accounting is not done for the !stack case (XXX why not?)
*/
-.macro INT_COMMON vec, area, stack, kaup, reconcile, dar, dsisr
+.macro INT_COMMON vec, area, stack, kuap, reconcile, dar, dsisr
.if \stack
andi. r10,r12,MSR_PR /* See if coming from user */
mr r10,r1 /* Save r1 */
@@ -533,7 +533,7 @@ END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,948)
std r10,GPR1(r1) /* save r1 in stackframe */
.if \stack
- .if \kaup
+ .if \kuap
kuap_save_amr_and_lock r9, r10, cr1, cr0
.endif
beq 101f /* if from kernel mode */
@@ -541,7 +541,7 @@ END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,948)
SAVE_PPR(\area, r9)
101:
.else
- .if \kaup
+ .if \kuap
kuap_save_amr_and_lock r9, r10, cr1
.endif
.endif
diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c
index ed59855430b9..ff0114aeba9b 100644
--- a/arch/powerpc/kernel/fadump.c
+++ b/arch/powerpc/kernel/fadump.c
@@ -1466,16 +1466,15 @@ static void fadump_init_files(void)
*/
int __init setup_fadump(void)
{
- if (!fw_dump.fadump_enabled)
- return 0;
-
- if (!fw_dump.fadump_supported) {
- printk(KERN_ERR "Firmware-assisted dump is not supported on"
- " this hardware\n");
+ if (!fw_dump.fadump_supported)
return 0;
- }
+ fadump_init_files();
fadump_show_config();
+
+ if (!fw_dump.fadump_enabled)
+ return 1;
+
/*
* If dump data is available then see if it is valid and prepare for
* saving it to the disk.
@@ -1492,8 +1491,6 @@ int __init setup_fadump(void)
else if (fw_dump.reserve_dump_area_size)
fw_dump.ops->fadump_init_mem_struct(&fw_dump);
- fadump_init_files();
-
return 1;
}
subsys_initcall(setup_fadump);
diff --git a/arch/powerpc/kernel/fsl_booke_entry_mapping.S b/arch/powerpc/kernel/fsl_booke_entry_mapping.S
index ea065282b303..8bccce6544b5 100644
--- a/arch/powerpc/kernel/fsl_booke_entry_mapping.S
+++ b/arch/powerpc/kernel/fsl_booke_entry_mapping.S
@@ -153,35 +153,24 @@ skpinv: addi r6,r6,1 /* Increment */
tlbivax 0,r9
TLBSYNC
-/*
- * The mapping only needs to be cache-coherent on SMP, except on
- * Freescale e500mc derivatives where it's also needed for coherent DMA.
- */
-#if defined(CONFIG_SMP) || defined(CONFIG_PPC_E500MC)
-#define M_IF_NEEDED MAS2_M
-#else
-#define M_IF_NEEDED 0
-#endif
-
#if defined(ENTRY_MAPPING_BOOT_SETUP)
-/* 6. Setup KERNELBASE mapping in TLB1[0] */
+/* 6. Setup kernstart_virt_addr mapping in TLB1[0] */
lis r6,0x1000 /* Set MAS0(TLBSEL) = TLB1(1), ESEL = 0 */
mtspr SPRN_MAS0,r6
lis r6,(MAS1_VALID|MAS1_IPROT)@h
ori r6,r6,(MAS1_TSIZE(BOOK3E_PAGESZ_64M))@l
mtspr SPRN_MAS1,r6
- lis r6,MAS2_VAL(PAGE_OFFSET, BOOK3E_PAGESZ_64M, M_IF_NEEDED)@h
- ori r6,r6,MAS2_VAL(PAGE_OFFSET, BOOK3E_PAGESZ_64M, M_IF_NEEDED)@l
+ lis r6,MAS2_EPN_MASK(BOOK3E_PAGESZ_64M)@h
+ ori r6,r6,MAS2_EPN_MASK(BOOK3E_PAGESZ_64M)@l
+ and r6,r6,r20
+ ori r6,r6,MAS2_M_IF_NEEDED@l
mtspr SPRN_MAS2,r6
mtspr SPRN_MAS3,r8
tlbwe
-/* 7. Jump to KERNELBASE mapping */
- lis r6,(KERNELBASE & ~0xfff)@h
- ori r6,r6,(KERNELBASE & ~0xfff)@l
- rlwinm r7,r25,0,0x03ffffff
- add r6,r7,r6
+/* 7. Jump to kernstart_virt_addr mapping */
+ mr r6,r20
#elif defined(ENTRY_MAPPING_KEXEC_SETUP)
/*
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S
index adf0505dbe02..838d9d4650c7 100644
--- a/arch/powerpc/kernel/head_fsl_booke.S
+++ b/arch/powerpc/kernel/head_fsl_booke.S
@@ -155,6 +155,8 @@ _ENTRY(_start);
*/
_ENTRY(__early_start)
+ LOAD_REG_ADDR_PIC(r20, kernstart_virt_addr)
+ lwz r20,0(r20)
#define ENTRY_MAPPING_BOOT_SETUP
#include "fsl_booke_entry_mapping.S"
@@ -277,8 +279,8 @@ set_ivor:
ori r6, r6, swapper_pg_dir@l
lis r5, abatron_pteptrs@h
ori r5, r5, abatron_pteptrs@l
- lis r4, KERNELBASE@h
- ori r4, r4, KERNELBASE@l
+ lis r3, kernstart_virt_addr@ha
+ lwz r4, kernstart_virt_addr@l(r3)
stw r5, 0(r4) /* Save abatron_pteptrs at a fixed location */
stw r6, 0(r5)
@@ -1067,7 +1069,12 @@ __secondary_start:
mr r5,r25 /* phys kernel start */
rlwinm r5,r5,0,~0x3ffffff /* aligned 64M */
subf r4,r5,r4 /* memstart_addr - phys kernel start */
- li r5,0 /* no device tree */
+ lis r7,KERNELBASE@h
+ ori r7,r7,KERNELBASE@l
+ cmpw r20,r7 /* if kernstart_virt_addr != KERNELBASE, randomized */
+ beq 2f
+ li r4,0
+2: li r5,0 /* no device tree */
li r6,0 /* not boot cpu */
bl restore_to_as0
@@ -1115,6 +1122,54 @@ __secondary_hold_acknowledge:
#endif
/*
+ * Create a 64M tlb by address and entry
+ * r3 - entry
+ * r4 - virtual address
+ * r5/r6 - physical address
+ */
+_GLOBAL(create_kaslr_tlb_entry)
+ lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */
+ rlwimi r7,r3,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r6) */
+ mtspr SPRN_MAS0,r7 /* Write MAS0 */
+
+ lis r3,(MAS1_VALID|MAS1_IPROT)@h
+ ori r3,r3,(MAS1_TSIZE(BOOK3E_PAGESZ_64M))@l
+ mtspr SPRN_MAS1,r3 /* Write MAS1 */
+
+ lis r3,MAS2_EPN_MASK(BOOK3E_PAGESZ_64M)@h
+ ori r3,r3,MAS2_EPN_MASK(BOOK3E_PAGESZ_64M)@l
+ and r3,r3,r4
+ ori r3,r3,MAS2_M_IF_NEEDED@l
+ mtspr SPRN_MAS2,r3 /* Write MAS2(EPN) */
+
+#ifdef CONFIG_PHYS_64BIT
+ ori r8,r6,(MAS3_SW|MAS3_SR|MAS3_SX)
+ mtspr SPRN_MAS3,r8 /* Write MAS3(RPN) */
+ mtspr SPRN_MAS7,r5
+#else
+ ori r8,r5,(MAS3_SW|MAS3_SR|MAS3_SX)
+ mtspr SPRN_MAS3,r8 /* Write MAS3(RPN) */
+#endif
+
+ tlbwe /* Write TLB */
+ isync
+ sync
+ blr
+
+/*
+ * Return to the start of the relocated kernel and run again
+ * r3 - virtual address of fdt
+ * r4 - entry of the kernel
+ */
+_GLOBAL(reloc_kernel_entry)
+ mfmsr r7
+ rlwinm r7, r7, 0, ~(MSR_IS | MSR_DS)
+
+ mtspr SPRN_SRR0,r4
+ mtspr SPRN_SRR1,r7
+ rfi
+
+/*
* Create a tlb entry with the same effective and physical address as
* the tlb entry used by the current running code. But set the TS to 1.
* Then switch to the address space 1. It will return with the r3 set to
diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c
index 1007ec36b4cb..58ce3d37c2a3 100644
--- a/arch/powerpc/kernel/hw_breakpoint.c
+++ b/arch/powerpc/kernel/hw_breakpoint.c
@@ -127,15 +127,58 @@ int arch_bp_generic_fields(int type, int *gen_bp_type)
}
/*
+ * Watchpoint match range is always doubleword(8 bytes) aligned on
+ * powerpc. If the given range is crossing doubleword boundary, we
+ * need to increase the length such that next doubleword also get
+ * covered. Ex,
+ *
+ * address len = 6 bytes
+ * |=========.
+ * |------------v--|------v--------|
+ * | | | | | | | | | | | | | | | | |
+ * |---------------|---------------|
+ * <---8 bytes--->
+ *
+ * In this case, we should configure hw as:
+ * start_addr = address & ~HW_BREAKPOINT_ALIGN
+ * len = 16 bytes
+ *
+ * @start_addr and @end_addr are inclusive.
+ */
+static int hw_breakpoint_validate_len(struct arch_hw_breakpoint *hw)
+{
+ u16 max_len = DABR_MAX_LEN;
+ u16 hw_len;
+ unsigned long start_addr, end_addr;
+
+ start_addr = hw->address & ~HW_BREAKPOINT_ALIGN;
+ end_addr = (hw->address + hw->len - 1) | HW_BREAKPOINT_ALIGN;
+ hw_len = end_addr - start_addr + 1;
+
+ if (dawr_enabled()) {
+ max_len = DAWR_MAX_LEN;
+ /* DAWR region can't cross 512 bytes boundary */
+ if ((start_addr >> 9) != (end_addr >> 9))
+ return -EINVAL;
+ }
+
+ if (hw_len > max_len)
+ return -EINVAL;
+
+ hw->hw_len = hw_len;
+ return 0;
+}
+
+/*
* Validate the arch-specific HW Breakpoint register settings
*/
int hw_breakpoint_arch_parse(struct perf_event *bp,
const struct perf_event_attr *attr,
struct arch_hw_breakpoint *hw)
{
- int ret = -EINVAL, length_max;
+ int ret = -EINVAL;
- if (!bp)
+ if (!bp || !attr->bp_len)
return ret;
hw->type = HW_BRK_TYPE_TRANSLATE;
@@ -155,26 +198,10 @@ int hw_breakpoint_arch_parse(struct perf_event *bp,
hw->address = attr->bp_addr;
hw->len = attr->bp_len;
- /*
- * Since breakpoint length can be a maximum of HW_BREAKPOINT_LEN(8)
- * and breakpoint addresses are aligned to nearest double-word
- * HW_BREAKPOINT_ALIGN by rounding off to the lower address, the
- * 'symbolsize' should satisfy the check below.
- */
if (!ppc_breakpoint_available())
return -ENODEV;
- length_max = 8; /* DABR */
- if (dawr_enabled()) {
- length_max = 512 ; /* 64 doublewords */
- /* DAWR region can't cross 512 boundary */
- if ((attr->bp_addr >> 9) !=
- ((attr->bp_addr + attr->bp_len - 1) >> 9))
- return -EINVAL;
- }
- if (hw->len >
- (length_max - (hw->address & HW_BREAKPOINT_ALIGN)))
- return -EINVAL;
- return 0;
+
+ return hw_breakpoint_validate_len(hw);
}
/*
@@ -195,33 +222,49 @@ void thread_change_pc(struct task_struct *tsk, struct pt_regs *regs)
tsk->thread.last_hit_ubp = NULL;
}
-static bool is_larx_stcx_instr(struct pt_regs *regs, unsigned int instr)
+static bool dar_within_range(unsigned long dar, struct arch_hw_breakpoint *info)
{
- int ret, type;
- struct instruction_op op;
+ return ((info->address <= dar) && (dar - info->address < info->len));
+}
- ret = analyse_instr(&op, regs, instr);
- type = GETTYPE(op.type);
- return (!ret && (type == LARX || type == STCX));
+static bool
+dar_range_overlaps(unsigned long dar, int size, struct arch_hw_breakpoint *info)
+{
+ return ((dar <= info->address + info->len - 1) &&
+ (dar + size - 1 >= info->address));
}
/*
* Handle debug exception notifications.
*/
static bool stepping_handler(struct pt_regs *regs, struct perf_event *bp,
- unsigned long addr)
+ struct arch_hw_breakpoint *info)
{
unsigned int instr = 0;
+ int ret, type, size;
+ struct instruction_op op;
+ unsigned long addr = info->address;
if (__get_user_inatomic(instr, (unsigned int *)regs->nip))
goto fail;
- if (is_larx_stcx_instr(regs, instr)) {
+ ret = analyse_instr(&op, regs, instr);
+ type = GETTYPE(op.type);
+ size = GETSIZE(op.type);
+
+ if (!ret && (type == LARX || type == STCX)) {
printk_ratelimited("Breakpoint hit on instruction that can't be emulated."
" Breakpoint at 0x%lx will be disabled.\n", addr);
goto disable;
}
+ /*
+ * If it's extraneous event, we still need to emulate/single-
+ * step the instruction, but we don't generate an event.
+ */
+ if (size && !dar_range_overlaps(regs->dar, size, info))
+ info->type |= HW_BRK_TYPE_EXTRANEOUS_IRQ;
+
/* Do not emulate user-space instructions, instead single-step them */
if (user_mode(regs)) {
current->thread.last_hit_ubp = bp;
@@ -253,7 +296,6 @@ int hw_breakpoint_handler(struct die_args *args)
struct perf_event *bp;
struct pt_regs *regs = args->regs;
struct arch_hw_breakpoint *info;
- unsigned long dar = regs->dar;
/* Disable breakpoints during exception handling */
hw_breakpoint_disable();
@@ -285,19 +327,14 @@ int hw_breakpoint_handler(struct die_args *args)
goto out;
}
- /*
- * Verify if dar lies within the address range occupied by the symbol
- * being watched to filter extraneous exceptions. If it doesn't,
- * we still need to single-step the instruction, but we don't
- * generate an event.
- */
info->type &= ~HW_BRK_TYPE_EXTRANEOUS_IRQ;
- if (!((bp->attr.bp_addr <= dar) &&
- (dar - bp->attr.bp_addr < bp->attr.bp_len)))
- info->type |= HW_BRK_TYPE_EXTRANEOUS_IRQ;
-
- if (!IS_ENABLED(CONFIG_PPC_8xx) && !stepping_handler(regs, bp, info->address))
- goto out;
+ if (IS_ENABLED(CONFIG_PPC_8xx)) {
+ if (!dar_within_range(regs->dar, info))
+ info->type |= HW_BRK_TYPE_EXTRANEOUS_IRQ;
+ } else {
+ if (!stepping_handler(regs, bp, info))
+ goto out;
+ }
/*
* As a policy, the callback is invoked in a 'trigger-after-execute'
diff --git a/arch/powerpc/kernel/ima_arch.c b/arch/powerpc/kernel/ima_arch.c
new file mode 100644
index 000000000000..e34116255ced
--- /dev/null
+++ b/arch/powerpc/kernel/ima_arch.c
@@ -0,0 +1,78 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 IBM Corporation
+ * Author: Nayna Jain
+ */
+
+#include <linux/ima.h>
+#include <asm/secure_boot.h>
+
+bool arch_ima_get_secureboot(void)
+{
+ return is_ppc_secureboot_enabled();
+}
+
+/*
+ * The "secure_rules" are enabled only on "secureboot" enabled systems.
+ * These rules verify the file signatures against known good values.
+ * The "appraise_type=imasig|modsig" option allows the known good signature
+ * to be stored as an xattr or as an appended signature.
+ *
+ * To avoid duplicate signature verification as much as possible, the IMA
+ * policy rule for module appraisal is added only if CONFIG_MODULE_SIG_FORCE
+ * is not enabled.
+ */
+static const char *const secure_rules[] = {
+ "appraise func=KEXEC_KERNEL_CHECK appraise_flag=check_blacklist appraise_type=imasig|modsig",
+#ifndef CONFIG_MODULE_SIG_FORCE
+ "appraise func=MODULE_CHECK appraise_flag=check_blacklist appraise_type=imasig|modsig",
+#endif
+ NULL
+};
+
+/*
+ * The "trusted_rules" are enabled only on "trustedboot" enabled systems.
+ * These rules add the kexec kernel image and kernel modules file hashes to
+ * the IMA measurement list.
+ */
+static const char *const trusted_rules[] = {
+ "measure func=KEXEC_KERNEL_CHECK",
+ "measure func=MODULE_CHECK",
+ NULL
+};
+
+/*
+ * The "secure_and_trusted_rules" contains rules for both the secure boot and
+ * trusted boot. The "template=ima-modsig" option includes the appended
+ * signature, when available, in the IMA measurement list.
+ */
+static const char *const secure_and_trusted_rules[] = {
+ "measure func=KEXEC_KERNEL_CHECK template=ima-modsig",
+ "measure func=MODULE_CHECK template=ima-modsig",
+ "appraise func=KEXEC_KERNEL_CHECK appraise_flag=check_blacklist appraise_type=imasig|modsig",
+#ifndef CONFIG_MODULE_SIG_FORCE
+ "appraise func=MODULE_CHECK appraise_flag=check_blacklist appraise_type=imasig|modsig",
+#endif
+ NULL
+};
+
+/*
+ * Returns the relevant IMA arch-specific policies based on the system secure
+ * boot state.
+ */
+const char *const *arch_get_ima_policy(void)
+{
+ if (is_ppc_secureboot_enabled()) {
+ if (IS_ENABLED(CONFIG_MODULE_SIG))
+ set_module_sig_enforced();
+
+ if (is_ppc_trustedboot_enabled())
+ return secure_and_trusted_rules;
+ else
+ return secure_rules;
+ } else if (is_ppc_trustedboot_enabled()) {
+ return trusted_rules;
+ }
+
+ return NULL;
+}
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index 82df4b09e79f..d80212be8698 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -6,11 +6,6 @@
* Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
* and Paul Mackerras.
*
- * kexec bits:
- * Copyright (C) 2002-2003 Eric Biederman <ebiederm@xmission.com>
- * GameCube/ppc32 port Copyright (C) 2004 Albert Herranz
- * PPC44x port. Copyright (C) 2011, IBM Corporation
- * Author: Suzuki Poulose <suzuki@in.ibm.com>
*/
#include <linux/sys.h>
@@ -25,7 +20,6 @@
#include <asm/thread_info.h>
#include <asm/asm-offsets.h>
#include <asm/processor.h>
-#include <asm/kexec.h>
#include <asm/bug.h>
#include <asm/ptrace.h>
#include <asm/export.h>
@@ -317,126 +311,6 @@ EXPORT_SYMBOL(flush_instruction_cache)
#endif /* CONFIG_PPC_8xx */
/*
- * Write any modified data cache blocks out to memory
- * and invalidate the corresponding instruction cache blocks.
- * This is a no-op on the 601.
- *
- * flush_icache_range(unsigned long start, unsigned long stop)
- */
-_GLOBAL(flush_icache_range)
-#if defined(CONFIG_PPC_BOOK3S_601) || defined(CONFIG_E200)
- PURGE_PREFETCHED_INS
- blr /* for 601 and e200, do nothing */
-#else
- rlwinm r3,r3,0,0,31 - L1_CACHE_SHIFT
- subf r4,r3,r4
- addi r4,r4,L1_CACHE_BYTES - 1
- srwi. r4,r4,L1_CACHE_SHIFT
- beqlr
- mtctr r4
- mr r6,r3
-1: dcbst 0,r3
- addi r3,r3,L1_CACHE_BYTES
- bdnz 1b
- sync /* wait for dcbst's to get to ram */
-#ifndef CONFIG_44x
- mtctr r4
-2: icbi 0,r6
- addi r6,r6,L1_CACHE_BYTES
- bdnz 2b
-#else
- /* Flash invalidate on 44x because we are passed kmapped addresses and
- this doesn't work for userspace pages due to the virtually tagged
- icache. Sigh. */
- iccci 0, r0
-#endif
- sync /* additional sync needed on g4 */
- isync
- blr
-#endif
-_ASM_NOKPROBE_SYMBOL(flush_icache_range)
-EXPORT_SYMBOL(flush_icache_range)
-
-/*
- * Flush a particular page from the data cache to RAM.
- * Note: this is necessary because the instruction cache does *not*
- * snoop from the data cache.
- * This is a no-op on the 601 and e200 which have a unified cache.
- *
- * void __flush_dcache_icache(void *page)
- */
-_GLOBAL(__flush_dcache_icache)
-#if defined(CONFIG_PPC_BOOK3S_601) || defined(CONFIG_E200)
- PURGE_PREFETCHED_INS
- blr
-#else
- rlwinm r3,r3,0,0,31-PAGE_SHIFT /* Get page base address */
- li r4,PAGE_SIZE/L1_CACHE_BYTES /* Number of lines in a page */
- mtctr r4
- mr r6,r3
-0: dcbst 0,r3 /* Write line to ram */
- addi r3,r3,L1_CACHE_BYTES
- bdnz 0b
- sync
-#ifdef CONFIG_44x
- /* We don't flush the icache on 44x. Those have a virtual icache
- * and we don't have access to the virtual address here (it's
- * not the page vaddr but where it's mapped in user space). The
- * flushing of the icache on these is handled elsewhere, when
- * a change in the address space occurs, before returning to
- * user space
- */
-BEGIN_MMU_FTR_SECTION
- blr
-END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_44x)
-#endif /* CONFIG_44x */
- mtctr r4
-1: icbi 0,r6
- addi r6,r6,L1_CACHE_BYTES
- bdnz 1b
- sync
- isync
- blr
-#endif
-
-#ifndef CONFIG_BOOKE
-/*
- * Flush a particular page from the data cache to RAM, identified
- * by its physical address. We turn off the MMU so we can just use
- * the physical address (this may be a highmem page without a kernel
- * mapping).
- *
- * void __flush_dcache_icache_phys(unsigned long physaddr)
- */
-_GLOBAL(__flush_dcache_icache_phys)
-#if defined(CONFIG_PPC_BOOK3S_601) || defined(CONFIG_E200)
- PURGE_PREFETCHED_INS
- blr /* for 601 and e200, do nothing */
-#else
- mfmsr r10
- rlwinm r0,r10,0,28,26 /* clear DR */
- mtmsr r0
- isync
- rlwinm r3,r3,0,0,31-PAGE_SHIFT /* Get page base address */
- li r4,PAGE_SIZE/L1_CACHE_BYTES /* Number of lines in a page */
- mtctr r4
- mr r6,r3
-0: dcbst 0,r3 /* Write line to ram */
- addi r3,r3,L1_CACHE_BYTES
- bdnz 0b
- sync
- mtctr r4
-1: icbi 0,r6
- addi r6,r6,L1_CACHE_BYTES
- bdnz 1b
- sync
- mtmsr r10 /* restore DR */
- isync
- blr
-#endif
-#endif /* CONFIG_BOOKE */
-
-/*
* Copy a whole page. We use the dcbz instruction on the destination
* to reduce memory traffic (it eliminates the unnecessary reads of
* the destination into cache). This requires that the destination
@@ -614,488 +488,3 @@ _GLOBAL(start_secondary_resume)
*/
_GLOBAL(__main)
blr
-
-#ifdef CONFIG_KEXEC_CORE
- /*
- * Must be relocatable PIC code callable as a C function.
- */
- .globl relocate_new_kernel
-relocate_new_kernel:
- /* r3 = page_list */
- /* r4 = reboot_code_buffer */
- /* r5 = start_address */
-
-#ifdef CONFIG_FSL_BOOKE
-
- mr r29, r3
- mr r30, r4
- mr r31, r5
-
-#define ENTRY_MAPPING_KEXEC_SETUP
-#include "fsl_booke_entry_mapping.S"
-#undef ENTRY_MAPPING_KEXEC_SETUP
-
- mr r3, r29
- mr r4, r30
- mr r5, r31
-
- li r0, 0
-#elif defined(CONFIG_44x)
-
- /* Save our parameters */
- mr r29, r3
- mr r30, r4
- mr r31, r5
-
-#ifdef CONFIG_PPC_47x
- /* Check for 47x cores */
- mfspr r3,SPRN_PVR
- srwi r3,r3,16
- cmplwi cr0,r3,PVR_476FPE@h
- beq setup_map_47x
- cmplwi cr0,r3,PVR_476@h
- beq setup_map_47x
- cmplwi cr0,r3,PVR_476_ISS@h
- beq setup_map_47x
-#endif /* CONFIG_PPC_47x */
-
-/*
- * Code for setting up 1:1 mapping for PPC440x for KEXEC
- *
- * We cannot switch off the MMU on PPC44x.
- * So we:
- * 1) Invalidate all the mappings except the one we are running from.
- * 2) Create a tmp mapping for our code in the other address space(TS) and
- * jump to it. Invalidate the entry we started in.
- * 3) Create a 1:1 mapping for 0-2GiB in chunks of 256M in original TS.
- * 4) Jump to the 1:1 mapping in original TS.
- * 5) Invalidate the tmp mapping.
- *
- * - Based on the kexec support code for FSL BookE
- *
- */
-
- /*
- * Load the PID with kernel PID (0).
- * Also load our MSR_IS and TID to MMUCR for TLB search.
- */
- li r3, 0
- mtspr SPRN_PID, r3
- mfmsr r4
- andi. r4,r4,MSR_IS@l
- beq wmmucr
- oris r3,r3,PPC44x_MMUCR_STS@h
-wmmucr:
- mtspr SPRN_MMUCR,r3
- sync
-
- /*
- * Invalidate all the TLB entries except the current entry
- * where we are running from
- */
- bl 0f /* Find our address */
-0: mflr r5 /* Make it accessible */
- tlbsx r23,0,r5 /* Find entry we are in */
- li r4,0 /* Start at TLB entry 0 */
- li r3,0 /* Set PAGEID inval value */
-1: cmpw r23,r4 /* Is this our entry? */
- beq skip /* If so, skip the inval */
- tlbwe r3,r4,PPC44x_TLB_PAGEID /* If not, inval the entry */
-skip:
- addi r4,r4,1 /* Increment */
- cmpwi r4,64 /* Are we done? */
- bne 1b /* If not, repeat */
- isync
-
- /* Create a temp mapping and jump to it */
- andi. r6, r23, 1 /* Find the index to use */
- addi r24, r6, 1 /* r24 will contain 1 or 2 */
-
- mfmsr r9 /* get the MSR */
- rlwinm r5, r9, 27, 31, 31 /* Extract the MSR[IS] */
- xori r7, r5, 1 /* Use the other address space */
-
- /* Read the current mapping entries */
- tlbre r3, r23, PPC44x_TLB_PAGEID
- tlbre r4, r23, PPC44x_TLB_XLAT
- tlbre r5, r23, PPC44x_TLB_ATTRIB
-
- /* Save our current XLAT entry */
- mr r25, r4
-
- /* Extract the TLB PageSize */
- li r10, 1 /* r10 will hold PageSize */
- rlwinm r11, r3, 0, 24, 27 /* bits 24-27 */
-
- /* XXX: As of now we use 256M, 4K pages */
- cmpwi r11, PPC44x_TLB_256M
- bne tlb_4k
- rotlwi r10, r10, 28 /* r10 = 256M */
- b write_out
-tlb_4k:
- cmpwi r11, PPC44x_TLB_4K
- bne default
- rotlwi r10, r10, 12 /* r10 = 4K */
- b write_out
-default:
- rotlwi r10, r10, 10 /* r10 = 1K */
-
-write_out:
- /*
- * Write out the tmp 1:1 mapping for this code in other address space
- * Fixup EPN = RPN , TS=other address space
- */
- insrwi r3, r7, 1, 23 /* Bit 23 is TS for PAGEID field */
-
- /* Write out the tmp mapping entries */
- tlbwe r3, r24, PPC44x_TLB_PAGEID
- tlbwe r4, r24, PPC44x_TLB_XLAT
- tlbwe r5, r24, PPC44x_TLB_ATTRIB
-
- subi r11, r10, 1 /* PageOffset Mask = PageSize - 1 */
- not r10, r11 /* Mask for PageNum */
-
- /* Switch to other address space in MSR */
- insrwi r9, r7, 1, 26 /* Set MSR[IS] = r7 */
-
- bl 1f
-1: mflr r8
- addi r8, r8, (2f-1b) /* Find the target offset */
-
- /* Jump to the tmp mapping */
- mtspr SPRN_SRR0, r8
- mtspr SPRN_SRR1, r9
- rfi
-
-2:
- /* Invalidate the entry we were executing from */
- li r3, 0
- tlbwe r3, r23, PPC44x_TLB_PAGEID
-
- /* attribute fields. rwx for SUPERVISOR mode */
- li r5, 0
- ori r5, r5, (PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_SX | PPC44x_TLB_G)
-
- /* Create 1:1 mapping in 256M pages */
- xori r7, r7, 1 /* Revert back to Original TS */
-
- li r8, 0 /* PageNumber */
- li r6, 3 /* TLB Index, start at 3 */
-
-next_tlb:
- rotlwi r3, r8, 28 /* Create EPN (bits 0-3) */
- mr r4, r3 /* RPN = EPN */
- ori r3, r3, (PPC44x_TLB_VALID | PPC44x_TLB_256M) /* SIZE = 256M, Valid */
- insrwi r3, r7, 1, 23 /* Set TS from r7 */
-
- tlbwe r3, r6, PPC44x_TLB_PAGEID /* PageID field : EPN, V, SIZE */
- tlbwe r4, r6, PPC44x_TLB_XLAT /* Address translation : RPN */
- tlbwe r5, r6, PPC44x_TLB_ATTRIB /* Attributes */
-
- addi r8, r8, 1 /* Increment PN */
- addi r6, r6, 1 /* Increment TLB Index */
- cmpwi r8, 8 /* Are we done ? */
- bne next_tlb
- isync
-
- /* Jump to the new mapping 1:1 */
- li r9,0
- insrwi r9, r7, 1, 26 /* Set MSR[IS] = r7 */
-
- bl 1f
-1: mflr r8
- and r8, r8, r11 /* Get our offset within page */
- addi r8, r8, (2f-1b)
-
- and r5, r25, r10 /* Get our target PageNum */
- or r8, r8, r5 /* Target jump address */
-
- mtspr SPRN_SRR0, r8
- mtspr SPRN_SRR1, r9
- rfi
-2:
- /* Invalidate the tmp entry we used */
- li r3, 0
- tlbwe r3, r24, PPC44x_TLB_PAGEID
- sync
- b ppc44x_map_done
-
-#ifdef CONFIG_PPC_47x
-
- /* 1:1 mapping for 47x */
-
-setup_map_47x:
-
- /*
- * Load the kernel pid (0) to PID and also to MMUCR[TID].
- * Also set the MSR IS->MMUCR STS
- */
- li r3, 0
- mtspr SPRN_PID, r3 /* Set PID */
- mfmsr r4 /* Get MSR */
- andi. r4, r4, MSR_IS@l /* TS=1? */
- beq 1f /* If not, leave STS=0 */
- oris r3, r3, PPC47x_MMUCR_STS@h /* Set STS=1 */
-1: mtspr SPRN_MMUCR, r3 /* Put MMUCR */
- sync
-
- /* Find the entry we are running from */
- bl 2f
-2: mflr r23
- tlbsx r23, 0, r23
- tlbre r24, r23, 0 /* TLB Word 0 */
- tlbre r25, r23, 1 /* TLB Word 1 */
- tlbre r26, r23, 2 /* TLB Word 2 */
-
-
- /*
- * Invalidates all the tlb entries by writing to 256 RPNs(r4)
- * of 4k page size in all 4 ways (0-3 in r3).
- * This would invalidate the entire UTLB including the one we are
- * running from. However the shadow TLB entries would help us
- * to continue the execution, until we flush them (rfi/isync).
- */
- addis r3, 0, 0x8000 /* specify the way */
- addi r4, 0, 0 /* TLB Word0 = (EPN=0, VALID = 0) */
- addi r5, 0, 0
- b clear_utlb_entry
-
- /* Align the loop to speed things up. from head_44x.S */
- .align 6
-
-clear_utlb_entry:
-
- tlbwe r4, r3, 0
- tlbwe r5, r3, 1
- tlbwe r5, r3, 2
- addis r3, r3, 0x2000 /* Increment the way */
- cmpwi r3, 0
- bne clear_utlb_entry
- addis r3, 0, 0x8000
- addis r4, r4, 0x100 /* Increment the EPN */
- cmpwi r4, 0
- bne clear_utlb_entry
-
- /* Create the entries in the other address space */
- mfmsr r5
- rlwinm r7, r5, 27, 31, 31 /* Get the TS (Bit 26) from MSR */
- xori r7, r7, 1 /* r7 = !TS */
-
- insrwi r24, r7, 1, 21 /* Change the TS in the saved TLB word 0 */
-
- /*
- * write out the TLB entries for the tmp mapping
- * Use way '0' so that we could easily invalidate it later.
- */
- lis r3, 0x8000 /* Way '0' */
-
- tlbwe r24, r3, 0
- tlbwe r25, r3, 1
- tlbwe r26, r3, 2
-
- /* Update the msr to the new TS */
- insrwi r5, r7, 1, 26
-
- bl 1f
-1: mflr r6
- addi r6, r6, (2f-1b)
-
- mtspr SPRN_SRR0, r6
- mtspr SPRN_SRR1, r5
- rfi
-
- /*
- * Now we are in the tmp address space.
- * Create a 1:1 mapping for 0-2GiB in the original TS.
- */
-2:
- li r3, 0
- li r4, 0 /* TLB Word 0 */
- li r5, 0 /* TLB Word 1 */
- li r6, 0
- ori r6, r6, PPC47x_TLB2_S_RWX /* TLB word 2 */
-
- li r8, 0 /* PageIndex */
-
- xori r7, r7, 1 /* revert back to original TS */
-
-write_utlb:
- rotlwi r5, r8, 28 /* RPN = PageIndex * 256M */
- /* ERPN = 0 as we don't use memory above 2G */
-
- mr r4, r5 /* EPN = RPN */
- ori r4, r4, (PPC47x_TLB0_VALID | PPC47x_TLB0_256M)
- insrwi r4, r7, 1, 21 /* Insert the TS to Word 0 */
-
- tlbwe r4, r3, 0 /* Write out the entries */
- tlbwe r5, r3, 1
- tlbwe r6, r3, 2
- addi r8, r8, 1
- cmpwi r8, 8 /* Have we completed ? */
- bne write_utlb
-
- /* make sure we complete the TLB write up */
- isync
-
- /*
- * Prepare to jump to the 1:1 mapping.
- * 1) Extract page size of the tmp mapping
- * DSIZ = TLB_Word0[22:27]
- * 2) Calculate the physical address of the address
- * to jump to.
- */
- rlwinm r10, r24, 0, 22, 27
-
- cmpwi r10, PPC47x_TLB0_4K
- bne 0f
- li r10, 0x1000 /* r10 = 4k */
- bl 1f
-
-0:
- /* Defaults to 256M */
- lis r10, 0x1000
-
- bl 1f
-1: mflr r4
- addi r4, r4, (2f-1b) /* virtual address of 2f */
-
- subi r11, r10, 1 /* offsetmask = Pagesize - 1 */
- not r10, r11 /* Pagemask = ~(offsetmask) */
-
- and r5, r25, r10 /* Physical page */
- and r6, r4, r11 /* offset within the current page */
-
- or r5, r5, r6 /* Physical address for 2f */
-
- /* Switch the TS in MSR to the original one */
- mfmsr r8
- insrwi r8, r7, 1, 26
-
- mtspr SPRN_SRR1, r8
- mtspr SPRN_SRR0, r5
- rfi
-
-2:
- /* Invalidate the tmp mapping */
- lis r3, 0x8000 /* Way '0' */
-
- clrrwi r24, r24, 12 /* Clear the valid bit */
- tlbwe r24, r3, 0
- tlbwe r25, r3, 1
- tlbwe r26, r3, 2
-
- /* Make sure we complete the TLB write and flush the shadow TLB */
- isync
-
-#endif
-
-ppc44x_map_done:
-
-
- /* Restore the parameters */
- mr r3, r29
- mr r4, r30
- mr r5, r31
-
- li r0, 0
-#else
- li r0, 0
-
- /*
- * Set Machine Status Register to a known status,
- * switch the MMU off and jump to 1: in a single step.
- */
-
- mr r8, r0
- ori r8, r8, MSR_RI|MSR_ME
- mtspr SPRN_SRR1, r8
- addi r8, r4, 1f - relocate_new_kernel
- mtspr SPRN_SRR0, r8
- sync
- rfi
-
-1:
-#endif
- /* from this point address translation is turned off */
- /* and interrupts are disabled */
-
- /* set a new stack at the bottom of our page... */
- /* (not really needed now) */
- addi r1, r4, KEXEC_CONTROL_PAGE_SIZE - 8 /* for LR Save+Back Chain */
- stw r0, 0(r1)
-
- /* Do the copies */
- li r6, 0 /* checksum */
- mr r0, r3
- b 1f
-
-0: /* top, read another word for the indirection page */
- lwzu r0, 4(r3)
-
-1:
- /* is it a destination page? (r8) */
- rlwinm. r7, r0, 0, 31, 31 /* IND_DESTINATION (1<<0) */
- beq 2f
-
- rlwinm r8, r0, 0, 0, 19 /* clear kexec flags, page align */
- b 0b
-
-2: /* is it an indirection page? (r3) */
- rlwinm. r7, r0, 0, 30, 30 /* IND_INDIRECTION (1<<1) */
- beq 2f
-
- rlwinm r3, r0, 0, 0, 19 /* clear kexec flags, page align */
- subi r3, r3, 4
- b 0b
-
-2: /* are we done? */
- rlwinm. r7, r0, 0, 29, 29 /* IND_DONE (1<<2) */
- beq 2f
- b 3f
-
-2: /* is it a source page? (r9) */
- rlwinm. r7, r0, 0, 28, 28 /* IND_SOURCE (1<<3) */
- beq 0b
-
- rlwinm r9, r0, 0, 0, 19 /* clear kexec flags, page align */
-
- li r7, PAGE_SIZE / 4
- mtctr r7
- subi r9, r9, 4
- subi r8, r8, 4
-9:
- lwzu r0, 4(r9) /* do the copy */
- xor r6, r6, r0
- stwu r0, 4(r8)
- dcbst 0, r8
- sync
- icbi 0, r8
- bdnz 9b
-
- addi r9, r9, 4
- addi r8, r8, 4
- b 0b
-
-3:
-
- /* To be certain of avoiding problems with self-modifying code
- * execute a serializing instruction here.
- */
- isync
- sync
-
- mfspr r3, SPRN_PIR /* current core we are running on */
- mr r4, r5 /* load physical address of chunk called */
-
- /* jump to the entry point, usually the setup routine */
- mtlr r5
- blrl
-
-1: b 1b
-
-relocate_new_kernel_end:
-
- .globl relocate_new_kernel_size
-relocate_new_kernel_size:
- .long relocate_new_kernel_end - relocate_new_kernel
-#endif
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index b55a7b4cb543..1864605eca29 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -49,108 +49,6 @@ _GLOBAL(call_do_irq)
mtlr r0
blr
- .section ".toc","aw"
-PPC64_CACHES:
- .tc ppc64_caches[TC],ppc64_caches
- .section ".text"
-
-/*
- * Write any modified data cache blocks out to memory
- * and invalidate the corresponding instruction cache blocks.
- *
- * flush_icache_range(unsigned long start, unsigned long stop)
- *
- * flush all bytes from start through stop-1 inclusive
- */
-
-_GLOBAL_TOC(flush_icache_range)
-BEGIN_FTR_SECTION
- PURGE_PREFETCHED_INS
- blr
-END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
-/*
- * Flush the data cache to memory
- *
- * Different systems have different cache line sizes
- * and in some cases i-cache and d-cache line sizes differ from
- * each other.
- */
- ld r10,PPC64_CACHES@toc(r2)
- lwz r7,DCACHEL1BLOCKSIZE(r10)/* Get cache block size */
- addi r5,r7,-1
- andc r6,r3,r5 /* round low to line bdy */
- subf r8,r6,r4 /* compute length */
- add r8,r8,r5 /* ensure we get enough */
- lwz r9,DCACHEL1LOGBLOCKSIZE(r10) /* Get log-2 of cache block size */
- srw. r8,r8,r9 /* compute line count */
- beqlr /* nothing to do? */
- mtctr r8
-1: dcbst 0,r6
- add r6,r6,r7
- bdnz 1b
- sync
-
-/* Now invalidate the instruction cache */
-
- lwz r7,ICACHEL1BLOCKSIZE(r10) /* Get Icache block size */
- addi r5,r7,-1
- andc r6,r3,r5 /* round low to line bdy */
- subf r8,r6,r4 /* compute length */
- add r8,r8,r5
- lwz r9,ICACHEL1LOGBLOCKSIZE(r10) /* Get log-2 of Icache block size */
- srw. r8,r8,r9 /* compute line count */
- beqlr /* nothing to do? */
- mtctr r8
-2: icbi 0,r6
- add r6,r6,r7
- bdnz 2b
- isync
- blr
-_ASM_NOKPROBE_SYMBOL(flush_icache_range)
-EXPORT_SYMBOL(flush_icache_range)
-
-/*
- * Flush a particular page from the data cache to RAM.
- * Note: this is necessary because the instruction cache does *not*
- * snoop from the data cache.
- *
- * void __flush_dcache_icache(void *page)
- */
-_GLOBAL(__flush_dcache_icache)
-/*
- * Flush the data cache to memory
- *
- * Different systems have different cache line sizes
- */
-
-BEGIN_FTR_SECTION
- PURGE_PREFETCHED_INS
- blr
-END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
-
-/* Flush the dcache */
- ld r7,PPC64_CACHES@toc(r2)
- clrrdi r3,r3,PAGE_SHIFT /* Page align */
- lwz r4,DCACHEL1BLOCKSPERPAGE(r7) /* Get # dcache blocks per page */
- lwz r5,DCACHEL1BLOCKSIZE(r7) /* Get dcache block size */
- mr r6,r3
- mtctr r4
-0: dcbst 0,r6
- add r6,r6,r5
- bdnz 0b
- sync
-
-/* Now invalidate the icache */
-
- lwz r4,ICACHEL1BLOCKSPERPAGE(r7) /* Get # icache blocks per page */
- lwz r5,ICACHEL1BLOCKSIZE(r7) /* Get icache block size */
- mtctr r4
-1: icbi 0,r3
- add r3,r3,r5
- bdnz 1b
- isync
- blr
-
_GLOBAL(__bswapdi2)
EXPORT_SYMBOL(__bswapdi2)
srdi r8,r3,32
@@ -432,18 +330,13 @@ kexec_create_tlb:
rlwimi r9,r10,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r9) */
/* Set up a temp identity mapping v:0 to p:0 and return to it. */
-#if defined(CONFIG_SMP) || defined(CONFIG_PPC_E500MC)
-#define M_IF_NEEDED MAS2_M
-#else
-#define M_IF_NEEDED 0
-#endif
mtspr SPRN_MAS0,r9
lis r9,(MAS1_VALID|MAS1_IPROT)@h
ori r9,r9,(MAS1_TSIZE(BOOK3E_PAGESZ_1GB))@l
mtspr SPRN_MAS1,r9
- LOAD_REG_IMMEDIATE(r9, 0x0 | M_IF_NEEDED)
+ LOAD_REG_IMMEDIATE(r9, 0x0 | MAS2_M_IF_NEEDED)
mtspr SPRN_MAS2,r9
LOAD_REG_IMMEDIATE(r9, 0x0 | MAS3_SR | MAS3_SW | MAS3_SX)
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 639ceae7da9d..4df94b6e2f32 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -715,6 +715,8 @@ static void set_debug_reg_defaults(struct thread_struct *thread)
{
thread->hw_brk.address = 0;
thread->hw_brk.type = 0;
+ thread->hw_brk.len = 0;
+ thread->hw_brk.hw_len = 0;
if (ppc_breakpoint_available())
set_breakpoint(&thread->hw_brk);
}
@@ -816,6 +818,7 @@ static inline bool hw_brk_match(struct arch_hw_breakpoint *a,
return false;
if (a->len != b->len)
return false;
+ /* no need to check hw_len. it's calculated from address and len */
return true;
}
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index 100f1b57ec2f..577345382b23 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -303,16 +303,24 @@ static char __init *prom_strstr(const char *s1, const char *s2)
return NULL;
}
-static size_t __init prom_strlcpy(char *dest, const char *src, size_t size)
-{
- size_t ret = prom_strlen(src);
+static size_t __init prom_strlcat(char *dest, const char *src, size_t count)
+{
+ size_t dsize = prom_strlen(dest);
+ size_t len = prom_strlen(src);
+ size_t res = dsize + len;
+
+ /* This would be a bug */
+ if (dsize >= count)
+ return count;
+
+ dest += dsize;
+ count -= dsize;
+ if (len >= count)
+ len = count-1;
+ memcpy(dest, src, len);
+ dest[len] = 0;
+ return res;
- if (size) {
- size_t len = (ret >= size) ? size - 1 : ret;
- memcpy(dest, src, len);
- dest[len] = '\0';
- }
- return ret;
}
#ifdef CONFIG_PPC_PSERIES
@@ -764,10 +772,14 @@ static void __init early_cmdline_parse(void)
prom_cmd_line[0] = 0;
p = prom_cmd_line;
- if ((long)prom.chosen > 0)
+
+ if (!IS_ENABLED(CONFIG_CMDLINE_FORCE) && (long)prom.chosen > 0)
l = prom_getprop(prom.chosen, "bootargs", p, COMMAND_LINE_SIZE-1);
- if (IS_ENABLED(CONFIG_CMDLINE_BOOL) && (l <= 0 || p[0] == '\0')) /* dbl check */
- prom_strlcpy(prom_cmd_line, CONFIG_CMDLINE, sizeof(prom_cmd_line));
+
+ if (IS_ENABLED(CONFIG_CMDLINE_EXTEND) || l <= 0 || p[0] == '\0')
+ prom_strlcat(prom_cmd_line, " " CONFIG_CMDLINE,
+ sizeof(prom_cmd_line));
+
prom_printf("command line: %s\n", prom_cmd_line);
#ifdef CONFIG_PPC64
@@ -1053,7 +1065,7 @@ static const struct ibm_arch_vec ibm_architecture_vec_template __initconst = {
.reserved2 = 0,
.reserved3 = 0,
.subprocessors = 1,
- .byte22 = OV5_FEAT(OV5_DRMEM_V2),
+ .byte22 = OV5_FEAT(OV5_DRMEM_V2) | OV5_FEAT(OV5_DRC_INFO),
.intarch = 0,
.mmu = 0,
.hash_ext = 0,
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index 8c92febf5f44..25c0424e8868 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -2425,7 +2425,8 @@ static int ptrace_set_debugreg(struct task_struct *task, unsigned long addr,
return -EIO;
hw_brk.address = data & (~HW_BRK_TYPE_DABR);
hw_brk.type = (data & HW_BRK_TYPE_DABR) | HW_BRK_TYPE_PRIV_ALL;
- hw_brk.len = 8;
+ hw_brk.len = DABR_MAX_LEN;
+ hw_brk.hw_len = DABR_MAX_LEN;
set_bp = (data) && (hw_brk.type & HW_BRK_TYPE_RDWR);
#ifdef CONFIG_HAVE_HW_BREAKPOINT
bp = thread->ptrace_bps[0];
@@ -2439,6 +2440,7 @@ static int ptrace_set_debugreg(struct task_struct *task, unsigned long addr,
if (bp) {
attr = bp->attr;
attr.bp_addr = hw_brk.address;
+ attr.bp_len = DABR_MAX_LEN;
arch_bp_generic_fields(hw_brk.type, &attr.bp_type);
/* Enable breakpoint */
@@ -2456,7 +2458,7 @@ static int ptrace_set_debugreg(struct task_struct *task, unsigned long addr,
/* Create a new breakpoint request if one doesn't exist already */
hw_breakpoint_init(&attr);
attr.bp_addr = hw_brk.address;
- attr.bp_len = 8;
+ attr.bp_len = DABR_MAX_LEN;
arch_bp_generic_fields(hw_brk.type,
&attr.bp_type);
@@ -2880,18 +2882,14 @@ static long ppc_set_hwdebug(struct task_struct *child,
if ((unsigned long)bp_info->addr >= TASK_SIZE)
return -EIO;
- brk.address = bp_info->addr & ~7UL;
+ brk.address = bp_info->addr & ~HW_BREAKPOINT_ALIGN;
brk.type = HW_BRK_TYPE_TRANSLATE;
- brk.len = 8;
+ brk.len = DABR_MAX_LEN;
if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
brk.type |= HW_BRK_TYPE_READ;
if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
brk.type |= HW_BRK_TYPE_WRITE;
#ifdef CONFIG_HAVE_HW_BREAKPOINT
- /*
- * Check if the request is for 'range' breakpoints. We can
- * support it if range < 8 bytes.
- */
if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE)
len = bp_info->addr2 - bp_info->addr;
else if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_EXACT)
@@ -2904,7 +2902,7 @@ static long ppc_set_hwdebug(struct task_struct *child,
/* Create a new breakpoint request if one doesn't exist already */
hw_breakpoint_init(&attr);
- attr.bp_addr = (unsigned long)bp_info->addr & ~HW_BREAKPOINT_ALIGN;
+ attr.bp_addr = (unsigned long)bp_info->addr;
attr.bp_len = len;
arch_bp_generic_fields(brk.type, &attr.bp_type);
@@ -3361,6 +3359,12 @@ void do_syscall_trace_leave(struct pt_regs *regs)
user_enter();
}
+void __init pt_regs_check(void);
+
+/*
+ * Dummy function, its purpose is to break the build if struct pt_regs and
+ * struct user_pt_regs don't match.
+ */
void __init pt_regs_check(void)
{
BUILD_BUG_ON(offsetof(struct pt_regs, gpr) !=
@@ -3398,4 +3402,67 @@ void __init pt_regs_check(void)
offsetof(struct user_pt_regs, result));
BUILD_BUG_ON(sizeof(struct user_pt_regs) > sizeof(struct pt_regs));
+
+ // Now check that the pt_regs offsets match the uapi #defines
+ #define CHECK_REG(_pt, _reg) \
+ BUILD_BUG_ON(_pt != (offsetof(struct user_pt_regs, _reg) / \
+ sizeof(unsigned long)));
+
+ CHECK_REG(PT_R0, gpr[0]);
+ CHECK_REG(PT_R1, gpr[1]);
+ CHECK_REG(PT_R2, gpr[2]);
+ CHECK_REG(PT_R3, gpr[3]);
+ CHECK_REG(PT_R4, gpr[4]);
+ CHECK_REG(PT_R5, gpr[5]);
+ CHECK_REG(PT_R6, gpr[6]);
+ CHECK_REG(PT_R7, gpr[7]);
+ CHECK_REG(PT_R8, gpr[8]);
+ CHECK_REG(PT_R9, gpr[9]);
+ CHECK_REG(PT_R10, gpr[10]);
+ CHECK_REG(PT_R11, gpr[11]);
+ CHECK_REG(PT_R12, gpr[12]);
+ CHECK_REG(PT_R13, gpr[13]);
+ CHECK_REG(PT_R14, gpr[14]);
+ CHECK_REG(PT_R15, gpr[15]);
+ CHECK_REG(PT_R16, gpr[16]);
+ CHECK_REG(PT_R17, gpr[17]);
+ CHECK_REG(PT_R18, gpr[18]);
+ CHECK_REG(PT_R19, gpr[19]);
+ CHECK_REG(PT_R20, gpr[20]);
+ CHECK_REG(PT_R21, gpr[21]);
+ CHECK_REG(PT_R22, gpr[22]);
+ CHECK_REG(PT_R23, gpr[23]);
+ CHECK_REG(PT_R24, gpr[24]);
+ CHECK_REG(PT_R25, gpr[25]);
+ CHECK_REG(PT_R26, gpr[26]);
+ CHECK_REG(PT_R27, gpr[27]);
+ CHECK_REG(PT_R28, gpr[28]);
+ CHECK_REG(PT_R29, gpr[29]);
+ CHECK_REG(PT_R30, gpr[30]);
+ CHECK_REG(PT_R31, gpr[31]);
+ CHECK_REG(PT_NIP, nip);
+ CHECK_REG(PT_MSR, msr);
+ CHECK_REG(PT_ORIG_R3, orig_gpr3);
+ CHECK_REG(PT_CTR, ctr);
+ CHECK_REG(PT_LNK, link);
+ CHECK_REG(PT_XER, xer);
+ CHECK_REG(PT_CCR, ccr);
+#ifdef CONFIG_PPC64
+ CHECK_REG(PT_SOFTE, softe);
+#else
+ CHECK_REG(PT_MQ, mq);
+#endif
+ CHECK_REG(PT_TRAP, trap);
+ CHECK_REG(PT_DAR, dar);
+ CHECK_REG(PT_DSISR, dsisr);
+ CHECK_REG(PT_RESULT, result);
+ #undef CHECK_REG
+
+ BUILD_BUG_ON(PT_REGS_COUNT != sizeof(struct user_pt_regs) / sizeof(unsigned long));
+
+ /*
+ * PT_DSCR isn't a real reg, but it's important that it doesn't overlap the
+ * real registers.
+ */
+ BUILD_BUG_ON(PT_DSCR < sizeof(struct user_pt_regs) / sizeof(unsigned long));
}
diff --git a/arch/powerpc/kernel/secure_boot.c b/arch/powerpc/kernel/secure_boot.c
new file mode 100644
index 000000000000..4b982324d368
--- /dev/null
+++ b/arch/powerpc/kernel/secure_boot.c
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 IBM Corporation
+ * Author: Nayna Jain
+ */
+#include <linux/types.h>
+#include <linux/of.h>
+#include <asm/secure_boot.h>
+
+static struct device_node *get_ppc_fw_sb_node(void)
+{
+ static const struct of_device_id ids[] = {
+ { .compatible = "ibm,secureboot", },
+ { .compatible = "ibm,secureboot-v1", },
+ { .compatible = "ibm,secureboot-v2", },
+ {},
+ };
+
+ return of_find_matching_node(NULL, ids);
+}
+
+bool is_ppc_secureboot_enabled(void)
+{
+ struct device_node *node;
+ bool enabled = false;
+
+ node = get_ppc_fw_sb_node();
+ enabled = of_property_read_bool(node, "os-secureboot-enforcing");
+
+ of_node_put(node);
+
+ pr_info("Secure boot mode %s\n", enabled ? "enabled" : "disabled");
+
+ return enabled;
+}
+
+bool is_ppc_trustedboot_enabled(void)
+{
+ struct device_node *node;
+ bool enabled = false;
+
+ node = get_ppc_fw_sb_node();
+ enabled = of_property_read_bool(node, "trusted-enabled");
+
+ of_node_put(node);
+
+ pr_info("Trusted boot mode %s\n", enabled ? "enabled" : "disabled");
+
+ return enabled;
+}
diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c
index 7cfcb294b11c..bd70f5be1c27 100644
--- a/arch/powerpc/kernel/security.c
+++ b/arch/powerpc/kernel/security.c
@@ -16,7 +16,7 @@
#include <asm/setup.h>
-unsigned long powerpc_security_features __read_mostly = SEC_FTR_DEFAULT;
+u64 powerpc_security_features __read_mostly = SEC_FTR_DEFAULT;
enum count_cache_flush_type {
COUNT_CACHE_FLUSH_NONE = 0x1,
@@ -24,6 +24,7 @@ enum count_cache_flush_type {
COUNT_CACHE_FLUSH_HW = 0x4,
};
static enum count_cache_flush_type count_cache_flush_type = COUNT_CACHE_FLUSH_NONE;
+static bool link_stack_flush_enabled;
bool barrier_nospec_enabled;
static bool no_nospec;
@@ -94,13 +95,14 @@ static int barrier_nospec_get(void *data, u64 *val)
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(fops_barrier_nospec,
- barrier_nospec_get, barrier_nospec_set, "%llu\n");
+DEFINE_DEBUGFS_ATTRIBUTE(fops_barrier_nospec, barrier_nospec_get,
+ barrier_nospec_set, "%llu\n");
static __init int barrier_nospec_debugfs_init(void)
{
- debugfs_create_file("barrier_nospec", 0600, powerpc_debugfs_root, NULL,
- &fops_barrier_nospec);
+ debugfs_create_file_unsafe("barrier_nospec", 0600,
+ powerpc_debugfs_root, NULL,
+ &fops_barrier_nospec);
return 0;
}
device_initcall(barrier_nospec_debugfs_init);
@@ -108,7 +110,7 @@ device_initcall(barrier_nospec_debugfs_init);
static __init int security_feature_debugfs_init(void)
{
debugfs_create_x64("security_features", 0400, powerpc_debugfs_root,
- (u64 *)&powerpc_security_features);
+ &powerpc_security_features);
return 0;
}
device_initcall(security_feature_debugfs_init);
@@ -141,32 +143,33 @@ ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, cha
thread_priv = security_ftr_enabled(SEC_FTR_L1D_THREAD_PRIV);
- if (rfi_flush || thread_priv) {
+ if (rfi_flush) {
struct seq_buf s;
seq_buf_init(&s, buf, PAGE_SIZE - 1);
- seq_buf_printf(&s, "Mitigation: ");
-
- if (rfi_flush)
- seq_buf_printf(&s, "RFI Flush");
-
- if (rfi_flush && thread_priv)
- seq_buf_printf(&s, ", ");
-
+ seq_buf_printf(&s, "Mitigation: RFI Flush");
if (thread_priv)
- seq_buf_printf(&s, "L1D private per thread");
+ seq_buf_printf(&s, ", L1D private per thread");
seq_buf_printf(&s, "\n");
return s.len;
}
+ if (thread_priv)
+ return sprintf(buf, "Vulnerable: L1D private per thread\n");
+
if (!security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) &&
!security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR))
return sprintf(buf, "Not affected\n");
return sprintf(buf, "Vulnerable\n");
}
+
+ssize_t cpu_show_l1tf(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return cpu_show_meltdown(dev, attr, buf);
+}
#endif
ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf)
@@ -212,11 +215,19 @@ ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, c
if (ccd)
seq_buf_printf(&s, "Indirect branch cache disabled");
+
+ if (link_stack_flush_enabled)
+ seq_buf_printf(&s, ", Software link stack flush");
+
} else if (count_cache_flush_type != COUNT_CACHE_FLUSH_NONE) {
seq_buf_printf(&s, "Mitigation: Software count cache flush");
if (count_cache_flush_type == COUNT_CACHE_FLUSH_HW)
seq_buf_printf(&s, " (hardware accelerated)");
+
+ if (link_stack_flush_enabled)
+ seq_buf_printf(&s, ", Software link stack flush");
+
} else if (btb_flush_enabled) {
seq_buf_printf(&s, "Mitigation: Branch predictor state flush");
} else {
@@ -367,28 +378,61 @@ static int stf_barrier_get(void *data, u64 *val)
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(fops_stf_barrier, stf_barrier_get, stf_barrier_set, "%llu\n");
+DEFINE_DEBUGFS_ATTRIBUTE(fops_stf_barrier, stf_barrier_get, stf_barrier_set,
+ "%llu\n");
static __init int stf_barrier_debugfs_init(void)
{
- debugfs_create_file("stf_barrier", 0600, powerpc_debugfs_root, NULL, &fops_stf_barrier);
+ debugfs_create_file_unsafe("stf_barrier", 0600, powerpc_debugfs_root,
+ NULL, &fops_stf_barrier);
return 0;
}
device_initcall(stf_barrier_debugfs_init);
#endif /* CONFIG_DEBUG_FS */
+static void no_count_cache_flush(void)
+{
+ count_cache_flush_type = COUNT_CACHE_FLUSH_NONE;
+ pr_info("count-cache-flush: software flush disabled.\n");
+}
+
static void toggle_count_cache_flush(bool enable)
{
- if (!enable || !security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE)) {
+ if (!security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE) &&
+ !security_ftr_enabled(SEC_FTR_FLUSH_LINK_STACK))
+ enable = false;
+
+ if (!enable) {
patch_instruction_site(&patch__call_flush_count_cache, PPC_INST_NOP);
- count_cache_flush_type = COUNT_CACHE_FLUSH_NONE;
- pr_info("count-cache-flush: software flush disabled.\n");
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+ patch_instruction_site(&patch__call_kvm_flush_link_stack, PPC_INST_NOP);
+#endif
+ pr_info("link-stack-flush: software flush disabled.\n");
+ link_stack_flush_enabled = false;
+ no_count_cache_flush();
return;
}
+ // This enables the branch from _switch to flush_count_cache
patch_branch_site(&patch__call_flush_count_cache,
(u64)&flush_count_cache, BRANCH_SET_LINK);
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+ // This enables the branch from guest_exit_cont to kvm_flush_link_stack
+ patch_branch_site(&patch__call_kvm_flush_link_stack,
+ (u64)&kvm_flush_link_stack, BRANCH_SET_LINK);
+#endif
+
+ pr_info("link-stack-flush: software flush enabled.\n");
+ link_stack_flush_enabled = true;
+
+ // If we just need to flush the link stack, patch an early return
+ if (!security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE)) {
+ patch_instruction_site(&patch__flush_link_stack_return, PPC_INST_BLR);
+ no_count_cache_flush();
+ return;
+ }
+
if (!security_ftr_enabled(SEC_FTR_BCCTR_FLUSH_ASSIST)) {
count_cache_flush_type = COUNT_CACHE_FLUSH_SW;
pr_info("count-cache-flush: full software flush sequence enabled.\n");
@@ -407,11 +451,20 @@ void setup_count_cache_flush(void)
if (no_spectrev2 || cpu_mitigations_off()) {
if (security_ftr_enabled(SEC_FTR_BCCTRL_SERIALISED) ||
security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED))
- pr_warn("Spectre v2 mitigations not under software control, can't disable\n");
+ pr_warn("Spectre v2 mitigations not fully under software control, can't disable\n");
enable = false;
}
+ /*
+ * There's no firmware feature flag/hypervisor bit to tell us we need to
+ * flush the link stack on context switch. So we set it here if we see
+ * either of the Spectre v2 mitigations that aim to protect userspace.
+ */
+ if (security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED) ||
+ security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE))
+ security_ftr_set(SEC_FTR_FLUSH_LINK_STACK);
+
toggle_count_cache_flush(enable);
}
@@ -442,13 +495,14 @@ static int count_cache_flush_get(void *data, u64 *val)
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(fops_count_cache_flush, count_cache_flush_get,
- count_cache_flush_set, "%llu\n");
+DEFINE_DEBUGFS_ATTRIBUTE(fops_count_cache_flush, count_cache_flush_get,
+ count_cache_flush_set, "%llu\n");
static __init int count_cache_flush_debugfs_init(void)
{
- debugfs_create_file("count_cache_flush", 0600, powerpc_debugfs_root,
- NULL, &fops_count_cache_flush);
+ debugfs_create_file_unsafe("count_cache_flush", 0600,
+ powerpc_debugfs_root, NULL,
+ &fops_count_cache_flush);
return 0;
}
device_initcall(count_cache_flush_debugfs_init);
diff --git a/arch/powerpc/kernel/secvar-ops.c b/arch/powerpc/kernel/secvar-ops.c
new file mode 100644
index 000000000000..6a29777d6a2d
--- /dev/null
+++ b/arch/powerpc/kernel/secvar-ops.c
@@ -0,0 +1,17 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 IBM Corporation
+ * Author: Nayna Jain
+ *
+ * This file initializes secvar operations for PowerPC Secureboot
+ */
+
+#include <linux/cache.h>
+#include <asm/secvar.h>
+
+const struct secvar_operations *secvar_ops __ro_after_init;
+
+void set_secvar_ops(const struct secvar_operations *ops)
+{
+ secvar_ops = ops;
+}
diff --git a/arch/powerpc/kernel/secvar-sysfs.c b/arch/powerpc/kernel/secvar-sysfs.c
new file mode 100644
index 000000000000..a0a78aba2083
--- /dev/null
+++ b/arch/powerpc/kernel/secvar-sysfs.c
@@ -0,0 +1,248 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2019 IBM Corporation <nayna@linux.ibm.com>
+ *
+ * This code exposes secure variables to user via sysfs
+ */
+
+#define pr_fmt(fmt) "secvar-sysfs: "fmt
+
+#include <linux/slab.h>
+#include <linux/compat.h>
+#include <linux/string.h>
+#include <linux/of.h>
+#include <asm/secvar.h>
+
+#define NAME_MAX_SIZE 1024
+
+static struct kobject *secvar_kobj;
+static struct kset *secvar_kset;
+
+static ssize_t format_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ ssize_t rc = 0;
+ struct device_node *node;
+ const char *format;
+
+ node = of_find_compatible_node(NULL, NULL, "ibm,secvar-backend");
+ if (!of_device_is_available(node))
+ return -ENODEV;
+
+ rc = of_property_read_string(node, "format", &format);
+ if (rc)
+ return rc;
+
+ rc = sprintf(buf, "%s\n", format);
+
+ of_node_put(node);
+
+ return rc;
+}
+
+
+static ssize_t size_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ uint64_t dsize;
+ int rc;
+
+ rc = secvar_ops->get(kobj->name, strlen(kobj->name) + 1, NULL, &dsize);
+ if (rc) {
+ pr_err("Error retrieving %s variable size %d\n", kobj->name,
+ rc);
+ return rc;
+ }
+
+ return sprintf(buf, "%llu\n", dsize);
+}
+
+static ssize_t data_read(struct file *filep, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf, loff_t off,
+ size_t count)
+{
+ uint64_t dsize;
+ char *data;
+ int rc;
+
+ rc = secvar_ops->get(kobj->name, strlen(kobj->name) + 1, NULL, &dsize);
+ if (rc) {
+ pr_err("Error getting %s variable size %d\n", kobj->name, rc);
+ return rc;
+ }
+ pr_debug("dsize is %llu\n", dsize);
+
+ data = kzalloc(dsize, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ rc = secvar_ops->get(kobj->name, strlen(kobj->name) + 1, data, &dsize);
+ if (rc) {
+ pr_err("Error getting %s variable %d\n", kobj->name, rc);
+ goto data_fail;
+ }
+
+ rc = memory_read_from_buffer(buf, count, &off, data, dsize);
+
+data_fail:
+ kfree(data);
+ return rc;
+}
+
+static ssize_t update_write(struct file *filep, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf, loff_t off,
+ size_t count)
+{
+ int rc;
+
+ pr_debug("count is %ld\n", count);
+ rc = secvar_ops->set(kobj->name, strlen(kobj->name) + 1, buf, count);
+ if (rc) {
+ pr_err("Error setting the %s variable %d\n", kobj->name, rc);
+ return rc;
+ }
+
+ return count;
+}
+
+static struct kobj_attribute format_attr = __ATTR_RO(format);
+
+static struct kobj_attribute size_attr = __ATTR_RO(size);
+
+static struct bin_attribute data_attr = __BIN_ATTR_RO(data, 0);
+
+static struct bin_attribute update_attr = __BIN_ATTR_WO(update, 0);
+
+static struct bin_attribute *secvar_bin_attrs[] = {
+ &data_attr,
+ &update_attr,
+ NULL,
+};
+
+static struct attribute *secvar_attrs[] = {
+ &size_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group secvar_attr_group = {
+ .attrs = secvar_attrs,
+ .bin_attrs = secvar_bin_attrs,
+};
+__ATTRIBUTE_GROUPS(secvar_attr);
+
+static struct kobj_type secvar_ktype = {
+ .sysfs_ops = &kobj_sysfs_ops,
+ .default_groups = secvar_attr_groups,
+};
+
+static int update_kobj_size(void)
+{
+
+ struct device_node *node;
+ u64 varsize;
+ int rc = 0;
+
+ node = of_find_compatible_node(NULL, NULL, "ibm,secvar-backend");
+ if (!of_device_is_available(node)) {
+ rc = -ENODEV;
+ goto out;
+ }
+
+ rc = of_property_read_u64(node, "max-var-size", &varsize);
+ if (rc)
+ goto out;
+
+ data_attr.size = varsize;
+ update_attr.size = varsize;
+
+out:
+ of_node_put(node);
+
+ return rc;
+}
+
+static int secvar_sysfs_load(void)
+{
+ char *name;
+ uint64_t namesize = 0;
+ struct kobject *kobj;
+ int rc;
+
+ name = kzalloc(NAME_MAX_SIZE, GFP_KERNEL);
+ if (!name)
+ return -ENOMEM;
+
+ do {
+ rc = secvar_ops->get_next(name, &namesize, NAME_MAX_SIZE);
+ if (rc) {
+ if (rc != -ENOENT)
+ pr_err("error getting secvar from firmware %d\n",
+ rc);
+ break;
+ }
+
+ kobj = kzalloc(sizeof(*kobj), GFP_KERNEL);
+ if (!kobj) {
+ rc = -ENOMEM;
+ break;
+ }
+
+ kobject_init(kobj, &secvar_ktype);
+
+ rc = kobject_add(kobj, &secvar_kset->kobj, "%s", name);
+ if (rc) {
+ pr_warn("kobject_add error %d for attribute: %s\n", rc,
+ name);
+ kobject_put(kobj);
+ kobj = NULL;
+ }
+
+ if (kobj)
+ kobject_uevent(kobj, KOBJ_ADD);
+
+ } while (!rc);
+
+ kfree(name);
+ return rc;
+}
+
+static int secvar_sysfs_init(void)
+{
+ int rc;
+
+ if (!secvar_ops) {
+ pr_warn("secvar: failed to retrieve secvar operations.\n");
+ return -ENODEV;
+ }
+
+ secvar_kobj = kobject_create_and_add("secvar", firmware_kobj);
+ if (!secvar_kobj) {
+ pr_err("secvar: Failed to create firmware kobj\n");
+ return -ENOMEM;
+ }
+
+ rc = sysfs_create_file(secvar_kobj, &format_attr.attr);
+ if (rc) {
+ kobject_put(secvar_kobj);
+ return -ENOMEM;
+ }
+
+ secvar_kset = kset_create_and_add("vars", NULL, secvar_kobj);
+ if (!secvar_kset) {
+ pr_err("secvar: sysfs kobject registration failed.\n");
+ kobject_put(secvar_kobj);
+ return -ENOMEM;
+ }
+
+ rc = update_kobj_size();
+ if (rc) {
+ pr_err("Cannot read the size of the attribute\n");
+ return rc;
+ }
+
+ secvar_sysfs_load();
+
+ return 0;
+}
+
+late_initcall(secvar_sysfs_init);
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 25aaa3903000..488f1eecc0de 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -715,8 +715,28 @@ static struct notifier_block ppc_panic_block = {
.priority = INT_MIN /* may not return; must be done last */
};
+/*
+ * Dump out kernel offset information on panic.
+ */
+static int dump_kernel_offset(struct notifier_block *self, unsigned long v,
+ void *p)
+{
+ pr_emerg("Kernel Offset: 0x%lx from 0x%lx\n",
+ kaslr_offset(), KERNELBASE);
+
+ return 0;
+}
+
+static struct notifier_block kernel_offset_notifier = {
+ .notifier_call = dump_kernel_offset
+};
+
void __init setup_panic(void)
{
+ if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && kaslr_offset() > 0)
+ atomic_notifier_chain_register(&panic_notifier_list,
+ &kernel_offset_notifier);
+
/* PPC64 always does a hard irq disable in its panic handler */
if (!IS_ENABLED(CONFIG_PPC64) && !ppc_md.panic)
return;
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index a7541edf0cdb..dcffe927f5b9 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -44,6 +44,7 @@
#include <asm/asm-prototypes.h>
#include <asm/kdump.h>
#include <asm/feature-fixups.h>
+#include <asm/early_ioremap.h>
#include "setup.h"
@@ -80,6 +81,8 @@ notrace void __init machine_init(u64 dt_ptr)
/* Configure static keys first, now that we're relocated. */
setup_feature_keys();
+ early_ioremap_setup();
+
/* Enable early debugging if any specified (see udbg.h) */
udbg_early_init();
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 44b4c432a273..6104917a282d 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -65,15 +65,10 @@
#include <asm/hw_irq.h>
#include <asm/feature-fixups.h>
#include <asm/kup.h>
+#include <asm/early_ioremap.h>
#include "setup.h"
-#ifdef DEBUG
-#define DBG(fmt...) udbg_printf(fmt)
-#else
-#define DBG(fmt...)
-#endif
-
int spinning_secondaries;
u64 ppc64_pft_size;
@@ -305,7 +300,7 @@ void __init early_setup(unsigned long dt_ptr)
/* Enable early debugging if any specified (see udbg.h) */
udbg_early_init();
- DBG(" -> early_setup(), dt_ptr: 0x%lx\n", dt_ptr);
+ udbg_printf(" -> %s(), dt_ptr: 0x%lx\n", __func__, dt_ptr);
/*
* Do early initialization using the flattened device
@@ -338,6 +333,8 @@ void __init early_setup(unsigned long dt_ptr)
apply_feature_fixups();
setup_feature_keys();
+ early_ioremap_setup();
+
/* Initialize the hash table or TLB handling */
early_init_mmu();
@@ -362,11 +359,11 @@ void __init early_setup(unsigned long dt_ptr)
*/
this_cpu_enable_ftrace();
- DBG(" <- early_setup()\n");
+ udbg_printf(" <- %s()\n", __func__);
#ifdef CONFIG_PPC_EARLY_DEBUG_BOOTX
/*
- * This needs to be done *last* (after the above DBG() even)
+ * This needs to be done *last* (after the above udbg_printf() even)
*
* Right after we return from this function, we turn on the MMU
* which means the real-mode access trick that btext does will
@@ -436,8 +433,6 @@ void smp_release_cpus(void)
if (!use_spinloop())
return;
- DBG(" -> smp_release_cpus()\n");
-
/* All secondary cpus are spinning on a common spinloop, release them
* all now so they can start to spin on their individual paca
* spinloops. For non SMP kernels, the secondary cpus never get out
@@ -456,9 +451,7 @@ void smp_release_cpus(void)
break;
udelay(1);
}
- DBG("spinning_secondaries = %d\n", spinning_secondaries);
-
- DBG(" <- smp_release_cpus()\n");
+ pr_debug("spinning_secondaries = %d\n", spinning_secondaries);
}
#endif /* CONFIG_SMP || CONFIG_KEXEC_CORE */
@@ -551,8 +544,6 @@ void __init initialize_cache_info(void)
struct device_node *cpu = NULL, *l2, *l3 = NULL;
u32 pvr;
- DBG(" -> initialize_cache_info()\n");
-
/*
* All shipping POWER8 machines have a firmware bug that
* puts incorrect information in the device-tree. This will
@@ -576,10 +567,10 @@ void __init initialize_cache_info(void)
*/
if (cpu) {
if (!parse_cache_info(cpu, false, &ppc64_caches.l1d))
- DBG("Argh, can't find dcache properties !\n");
+ pr_warn("Argh, can't find dcache properties !\n");
if (!parse_cache_info(cpu, true, &ppc64_caches.l1i))
- DBG("Argh, can't find icache properties !\n");
+ pr_warn("Argh, can't find icache properties !\n");
/*
* Try to find the L2 and L3 if any. Assume they are
@@ -604,8 +595,6 @@ void __init initialize_cache_info(void)
cur_cpu_spec->dcache_bsize = dcache_bsize;
cur_cpu_spec->icache_bsize = icache_bsize;
-
- DBG(" <- initialize_cache_info()\n");
}
/*
diff --git a/arch/powerpc/kernel/syscalls.c b/arch/powerpc/kernel/syscalls.c
index 3bfb3888e897..078608ec2e92 100644
--- a/arch/powerpc/kernel/syscalls.c
+++ b/arch/powerpc/kernel/syscalls.c
@@ -79,7 +79,7 @@ SYSCALL_DEFINE6(mmap, unsigned long, addr, size_t, len,
* sys_select() with the appropriate args. -- Cort
*/
int
-ppc_select(int n, fd_set __user *inp, fd_set __user *outp, fd_set __user *exp, struct timeval __user *tvp)
+ppc_select(int n, fd_set __user *inp, fd_set __user *outp, fd_set __user *exp, struct __kernel_old_timeval __user *tvp)
{
if ( (unsigned long)n >= 4096 )
{
@@ -89,7 +89,7 @@ ppc_select(int n, fd_set __user *inp, fd_set __user *outp, fd_set __user *exp, s
|| __get_user(inp, ((fd_set __user * __user *)(buffer+1)))
|| __get_user(outp, ((fd_set __user * __user *)(buffer+2)))
|| __get_user(exp, ((fd_set __user * __user *)(buffer+3)))
- || __get_user(tvp, ((struct timeval __user * __user *)(buffer+4))))
+ || __get_user(tvp, ((struct __kernel_old_timeval __user * __user *)(buffer+4))))
return -EFAULT;
}
return sys_select(n, inp, outp, exp, tvp);
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 84827da01d45..2d13cea13954 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -232,7 +232,7 @@ static u64 scan_dispatch_log(u64 stop_tb)
* Accumulate stolen time by scanning the dispatch trace log.
* Called on entry from user mode.
*/
-void accumulate_stolen_time(void)
+void notrace accumulate_stolen_time(void)
{
u64 sst, ust;
unsigned long save_irq_soft_mask = irq_soft_mask_return();
@@ -885,7 +885,7 @@ static notrace u64 timebase_read(struct clocksource *cs)
void update_vsyscall(struct timekeeper *tk)
{
- struct timespec xt;
+ struct timespec64 xt;
struct clocksource *clock = tk->tkr_mono.clock;
u32 mult = tk->tkr_mono.mult;
u32 shift = tk->tkr_mono.shift;
@@ -957,7 +957,8 @@ void update_vsyscall(struct timekeeper *tk)
vdso_data->tb_to_xs = new_tb_to_xs;
vdso_data->wtom_clock_sec = tk->wall_to_monotonic.tv_sec;
vdso_data->wtom_clock_nsec = tk->wall_to_monotonic.tv_nsec;
- vdso_data->stamp_xtime = xt;
+ vdso_data->stamp_xtime_sec = xt.tv_sec;
+ vdso_data->stamp_xtime_nsec = xt.tv_nsec;
vdso_data->stamp_sec_fraction = frac_sec;
smp_wmb();
++(vdso_data->tb_update_count);
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 82f43535e686..014ff0701f24 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -250,15 +250,22 @@ static void oops_end(unsigned long flags, struct pt_regs *regs,
}
NOKPROBE_SYMBOL(oops_end);
+static char *get_mmu_str(void)
+{
+ if (early_radix_enabled())
+ return " MMU=Radix";
+ if (early_mmu_has_feature(MMU_FTR_HPTE_TABLE))
+ return " MMU=Hash";
+ return "";
+}
+
static int __die(const char *str, struct pt_regs *regs, long err)
{
printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter);
- printk("%s PAGE_SIZE=%luK%s%s%s%s%s%s%s %s\n",
+ printk("%s PAGE_SIZE=%luK%s%s%s%s%s%s %s\n",
IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN) ? "LE" : "BE",
- PAGE_SIZE / 1024,
- early_radix_enabled() ? " MMU=Radix" : "",
- early_mmu_has_feature(MMU_FTR_HPTE_TABLE) ? " MMU=Hash" : "",
+ PAGE_SIZE / 1024, get_mmu_str(),
IS_ENABLED(CONFIG_PREEMPT) ? " PREEMPT" : "",
IS_ENABLED(CONFIG_SMP) ? " SMP" : "",
IS_ENABLED(CONFIG_SMP) ? (" NR_CPUS=" __stringify(NR_CPUS)) : "",
diff --git a/arch/powerpc/kernel/udbg.c b/arch/powerpc/kernel/udbg.c
index a384e7c8b01c..01595e8cafe7 100644
--- a/arch/powerpc/kernel/udbg.c
+++ b/arch/powerpc/kernel/udbg.c
@@ -120,13 +120,15 @@ int udbg_write(const char *s, int n)
#define UDBG_BUFSIZE 256
void udbg_printf(const char *fmt, ...)
{
- char buf[UDBG_BUFSIZE];
- va_list args;
+ if (udbg_putc) {
+ char buf[UDBG_BUFSIZE];
+ va_list args;
- va_start(args, fmt);
- vsnprintf(buf, UDBG_BUFSIZE, fmt, args);
- udbg_puts(buf);
- va_end(args);
+ va_start(args, fmt);
+ vsnprintf(buf, UDBG_BUFSIZE, fmt, args);
+ udbg_puts(buf);
+ va_end(args);
+ }
}
void __init udbg_progress(char *s, unsigned short hex)
diff --git a/arch/powerpc/kernel/vdso32/gettimeofday.S b/arch/powerpc/kernel/vdso32/gettimeofday.S
index becd9f8767ed..c8e6902cb01b 100644
--- a/arch/powerpc/kernel/vdso32/gettimeofday.S
+++ b/arch/powerpc/kernel/vdso32/gettimeofday.S
@@ -15,10 +15,8 @@
/* Offset for the low 32-bit part of a field of long type */
#ifdef CONFIG_PPC64
#define LOPART 4
-#define TSPEC_TV_SEC TSPC64_TV_SEC+LOPART
#else
#define LOPART 0
-#define TSPEC_TV_SEC TSPC32_TV_SEC
#endif
.text
@@ -192,7 +190,7 @@ V_FUNCTION_BEGIN(__kernel_time)
bl __get_datapage@local
mr r9, r3 /* datapage ptr in r9 */
- lwz r3,STAMP_XTIME+TSPEC_TV_SEC(r9)
+ lwz r3,STAMP_XTIME_SEC+LOPART(r9)
cmplwi r11,0 /* check if t is NULL */
beq 2f
@@ -268,7 +266,7 @@ __do_get_tspec:
* as a 32.32 fixed-point number in r3 and r4.
* Load & add the xtime stamp.
*/
- lwz r5,STAMP_XTIME+TSPEC_TV_SEC(r9)
+ lwz r5,STAMP_XTIME_SEC+LOPART(r9)
lwz r6,STAMP_SEC_FRAC(r9)
addc r4,r4,r6
adde r3,r3,r5
diff --git a/arch/powerpc/kernel/vdso64/cacheflush.S b/arch/powerpc/kernel/vdso64/cacheflush.S
index 3f92561a64c4..526f5ba2593e 100644
--- a/arch/powerpc/kernel/vdso64/cacheflush.S
+++ b/arch/powerpc/kernel/vdso64/cacheflush.S
@@ -35,7 +35,7 @@ V_FUNCTION_BEGIN(__kernel_sync_dicache)
subf r8,r6,r4 /* compute length */
add r8,r8,r5 /* ensure we get enough */
lwz r9,CFG_DCACHE_LOGBLOCKSZ(r10)
- srw. r8,r8,r9 /* compute line count */
+ srd. r8,r8,r9 /* compute line count */
crclr cr0*4+so
beqlr /* nothing to do? */
mtctr r8
@@ -52,7 +52,7 @@ V_FUNCTION_BEGIN(__kernel_sync_dicache)
subf r8,r6,r4 /* compute length */
add r8,r8,r5
lwz r9,CFG_ICACHE_LOGBLOCKSZ(r10)
- srw. r8,r8,r9 /* compute line count */
+ srd. r8,r8,r9 /* compute line count */
crclr cr0*4+so
beqlr /* nothing to do? */
mtctr r8
diff --git a/arch/powerpc/kernel/vdso64/gettimeofday.S b/arch/powerpc/kernel/vdso64/gettimeofday.S
index 07bfe33fe874..1f24e411af80 100644
--- a/arch/powerpc/kernel/vdso64/gettimeofday.S
+++ b/arch/powerpc/kernel/vdso64/gettimeofday.S
@@ -116,8 +116,8 @@ V_FUNCTION_BEGIN(__kernel_clock_gettime)
* CLOCK_REALTIME_COARSE, below values are needed for MONOTONIC_COARSE
* too
*/
- ld r4,STAMP_XTIME+TSPC64_TV_SEC(r3)
- ld r5,STAMP_XTIME+TSPC64_TV_NSEC(r3)
+ ld r4,STAMP_XTIME_SEC(r3)
+ ld r5,STAMP_XTIME_NSEC(r3)
bne cr6,75f
/* CLOCK_MONOTONIC_COARSE */
@@ -220,7 +220,7 @@ V_FUNCTION_BEGIN(__kernel_time)
mr r11,r3 /* r11 holds t */
bl V_LOCAL_FUNC(__get_datapage)
- ld r4,STAMP_XTIME+TSPC64_TV_SEC(r3)
+ ld r4,STAMP_XTIME_SEC(r3)
cmpldi r11,0 /* check if t is NULL */
beq 2f
@@ -265,7 +265,7 @@ V_FUNCTION_BEGIN(__do_get_tspec)
mulhdu r6,r6,r5 /* in units of 2^-32 seconds */
/* Add stamp since epoch */
- ld r4,STAMP_XTIME+TSPC64_TV_SEC(r3)
+ ld r4,STAMP_XTIME_SEC(r3)
lwz r5,STAMP_SEC_FRAC(r3)
or r0,r4,r5
or r0,r0,r6
diff --git a/arch/powerpc/kexec/Makefile b/arch/powerpc/kexec/Makefile
new file mode 100644
index 000000000000..378f6108a414
--- /dev/null
+++ b/arch/powerpc/kexec/Makefile
@@ -0,0 +1,25 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the linux kernel.
+#
+
+# Avoid clang warnings around longjmp/setjmp declarations
+CFLAGS_crash.o += -ffreestanding
+
+obj-y += core.o crash.o core_$(BITS).o
+
+obj-$(CONFIG_PPC32) += relocate_32.o
+
+obj-$(CONFIG_KEXEC_FILE) += file_load.o elf_$(BITS).o
+
+ifdef CONFIG_HAVE_IMA_KEXEC
+ifdef CONFIG_IMA
+obj-y += ima.o
+endif
+endif
+
+
+# Disable GCOV, KCOV & sanitizers in odd or sensitive code
+GCOV_PROFILE_core_$(BITS).o := n
+KCOV_INSTRUMENT_core_$(BITS).o := n
+UBSAN_SANITIZE_core_$(BITS).o := n
diff --git a/arch/powerpc/kernel/machine_kexec.c b/arch/powerpc/kexec/core.c
index c4ed328a7b96..078fe3d76feb 100644
--- a/arch/powerpc/kernel/machine_kexec.c
+++ b/arch/powerpc/kexec/core.c
@@ -86,6 +86,7 @@ void arch_crash_save_vmcoreinfo(void)
VMCOREINFO_STRUCT_SIZE(mmu_psize_def);
VMCOREINFO_OFFSET(mmu_psize_def, shift);
#endif
+ vmcoreinfo_append_str("KERNELOFFSET=%lx\n", kaslr_offset());
}
/*
diff --git a/arch/powerpc/kernel/machine_kexec_32.c b/arch/powerpc/kexec/core_32.c
index bf9f1f906d64..bf9f1f906d64 100644
--- a/arch/powerpc/kernel/machine_kexec_32.c
+++ b/arch/powerpc/kexec/core_32.c
diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kexec/core_64.c
index 04a7cba58eff..04a7cba58eff 100644
--- a/arch/powerpc/kernel/machine_kexec_64.c
+++ b/arch/powerpc/kexec/core_64.c
diff --git a/arch/powerpc/kernel/crash.c b/arch/powerpc/kexec/crash.c
index d488311efab1..d488311efab1 100644
--- a/arch/powerpc/kernel/crash.c
+++ b/arch/powerpc/kexec/crash.c
diff --git a/arch/powerpc/kernel/kexec_elf_64.c b/arch/powerpc/kexec/elf_64.c
index 3072fd6dbe94..3072fd6dbe94 100644
--- a/arch/powerpc/kernel/kexec_elf_64.c
+++ b/arch/powerpc/kexec/elf_64.c
diff --git a/arch/powerpc/kernel/machine_kexec_file_64.c b/arch/powerpc/kexec/file_load.c
index 143c91724617..143c91724617 100644
--- a/arch/powerpc/kernel/machine_kexec_file_64.c
+++ b/arch/powerpc/kexec/file_load.c
diff --git a/arch/powerpc/kernel/ima_kexec.c b/arch/powerpc/kexec/ima.c
index 720e50e490b6..720e50e490b6 100644
--- a/arch/powerpc/kernel/ima_kexec.c
+++ b/arch/powerpc/kexec/ima.c
diff --git a/arch/powerpc/kexec/relocate_32.S b/arch/powerpc/kexec/relocate_32.S
new file mode 100644
index 000000000000..61946c19e07c
--- /dev/null
+++ b/arch/powerpc/kexec/relocate_32.S
@@ -0,0 +1,500 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * This file contains kexec low-level functions.
+ *
+ * Copyright (C) 2002-2003 Eric Biederman <ebiederm@xmission.com>
+ * GameCube/ppc32 port Copyright (C) 2004 Albert Herranz
+ * PPC44x port. Copyright (C) 2011, IBM Corporation
+ * Author: Suzuki Poulose <suzuki@in.ibm.com>
+ */
+
+#include <asm/reg.h>
+#include <asm/page.h>
+#include <asm/mmu.h>
+#include <asm/ppc_asm.h>
+#include <asm/kexec.h>
+
+ .text
+
+ /*
+ * Must be relocatable PIC code callable as a C function.
+ */
+ .globl relocate_new_kernel
+relocate_new_kernel:
+ /* r3 = page_list */
+ /* r4 = reboot_code_buffer */
+ /* r5 = start_address */
+
+#ifdef CONFIG_FSL_BOOKE
+
+ mr r29, r3
+ mr r30, r4
+ mr r31, r5
+
+#define ENTRY_MAPPING_KEXEC_SETUP
+#include <kernel/fsl_booke_entry_mapping.S>
+#undef ENTRY_MAPPING_KEXEC_SETUP
+
+ mr r3, r29
+ mr r4, r30
+ mr r5, r31
+
+ li r0, 0
+#elif defined(CONFIG_44x)
+
+ /* Save our parameters */
+ mr r29, r3
+ mr r30, r4
+ mr r31, r5
+
+#ifdef CONFIG_PPC_47x
+ /* Check for 47x cores */
+ mfspr r3,SPRN_PVR
+ srwi r3,r3,16
+ cmplwi cr0,r3,PVR_476FPE@h
+ beq setup_map_47x
+ cmplwi cr0,r3,PVR_476@h
+ beq setup_map_47x
+ cmplwi cr0,r3,PVR_476_ISS@h
+ beq setup_map_47x
+#endif /* CONFIG_PPC_47x */
+
+/*
+ * Code for setting up 1:1 mapping for PPC440x for KEXEC
+ *
+ * We cannot switch off the MMU on PPC44x.
+ * So we:
+ * 1) Invalidate all the mappings except the one we are running from.
+ * 2) Create a tmp mapping for our code in the other address space(TS) and
+ * jump to it. Invalidate the entry we started in.
+ * 3) Create a 1:1 mapping for 0-2GiB in chunks of 256M in original TS.
+ * 4) Jump to the 1:1 mapping in original TS.
+ * 5) Invalidate the tmp mapping.
+ *
+ * - Based on the kexec support code for FSL BookE
+ *
+ */
+
+ /*
+ * Load the PID with kernel PID (0).
+ * Also load our MSR_IS and TID to MMUCR for TLB search.
+ */
+ li r3, 0
+ mtspr SPRN_PID, r3
+ mfmsr r4
+ andi. r4,r4,MSR_IS@l
+ beq wmmucr
+ oris r3,r3,PPC44x_MMUCR_STS@h
+wmmucr:
+ mtspr SPRN_MMUCR,r3
+ sync
+
+ /*
+ * Invalidate all the TLB entries except the current entry
+ * where we are running from
+ */
+ bl 0f /* Find our address */
+0: mflr r5 /* Make it accessible */
+ tlbsx r23,0,r5 /* Find entry we are in */
+ li r4,0 /* Start at TLB entry 0 */
+ li r3,0 /* Set PAGEID inval value */
+1: cmpw r23,r4 /* Is this our entry? */
+ beq skip /* If so, skip the inval */
+ tlbwe r3,r4,PPC44x_TLB_PAGEID /* If not, inval the entry */
+skip:
+ addi r4,r4,1 /* Increment */
+ cmpwi r4,64 /* Are we done? */
+ bne 1b /* If not, repeat */
+ isync
+
+ /* Create a temp mapping and jump to it */
+ andi. r6, r23, 1 /* Find the index to use */
+ addi r24, r6, 1 /* r24 will contain 1 or 2 */
+
+ mfmsr r9 /* get the MSR */
+ rlwinm r5, r9, 27, 31, 31 /* Extract the MSR[IS] */
+ xori r7, r5, 1 /* Use the other address space */
+
+ /* Read the current mapping entries */
+ tlbre r3, r23, PPC44x_TLB_PAGEID
+ tlbre r4, r23, PPC44x_TLB_XLAT
+ tlbre r5, r23, PPC44x_TLB_ATTRIB
+
+ /* Save our current XLAT entry */
+ mr r25, r4
+
+ /* Extract the TLB PageSize */
+ li r10, 1 /* r10 will hold PageSize */
+ rlwinm r11, r3, 0, 24, 27 /* bits 24-27 */
+
+ /* XXX: As of now we use 256M, 4K pages */
+ cmpwi r11, PPC44x_TLB_256M
+ bne tlb_4k
+ rotlwi r10, r10, 28 /* r10 = 256M */
+ b write_out
+tlb_4k:
+ cmpwi r11, PPC44x_TLB_4K
+ bne default
+ rotlwi r10, r10, 12 /* r10 = 4K */
+ b write_out
+default:
+ rotlwi r10, r10, 10 /* r10 = 1K */
+
+write_out:
+ /*
+ * Write out the tmp 1:1 mapping for this code in other address space
+ * Fixup EPN = RPN , TS=other address space
+ */
+ insrwi r3, r7, 1, 23 /* Bit 23 is TS for PAGEID field */
+
+ /* Write out the tmp mapping entries */
+ tlbwe r3, r24, PPC44x_TLB_PAGEID
+ tlbwe r4, r24, PPC44x_TLB_XLAT
+ tlbwe r5, r24, PPC44x_TLB_ATTRIB
+
+ subi r11, r10, 1 /* PageOffset Mask = PageSize - 1 */
+ not r10, r11 /* Mask for PageNum */
+
+ /* Switch to other address space in MSR */
+ insrwi r9, r7, 1, 26 /* Set MSR[IS] = r7 */
+
+ bl 1f
+1: mflr r8
+ addi r8, r8, (2f-1b) /* Find the target offset */
+
+ /* Jump to the tmp mapping */
+ mtspr SPRN_SRR0, r8
+ mtspr SPRN_SRR1, r9
+ rfi
+
+2:
+ /* Invalidate the entry we were executing from */
+ li r3, 0
+ tlbwe r3, r23, PPC44x_TLB_PAGEID
+
+ /* attribute fields. rwx for SUPERVISOR mode */
+ li r5, 0
+ ori r5, r5, (PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_SX | PPC44x_TLB_G)
+
+ /* Create 1:1 mapping in 256M pages */
+ xori r7, r7, 1 /* Revert back to Original TS */
+
+ li r8, 0 /* PageNumber */
+ li r6, 3 /* TLB Index, start at 3 */
+
+next_tlb:
+ rotlwi r3, r8, 28 /* Create EPN (bits 0-3) */
+ mr r4, r3 /* RPN = EPN */
+ ori r3, r3, (PPC44x_TLB_VALID | PPC44x_TLB_256M) /* SIZE = 256M, Valid */
+ insrwi r3, r7, 1, 23 /* Set TS from r7 */
+
+ tlbwe r3, r6, PPC44x_TLB_PAGEID /* PageID field : EPN, V, SIZE */
+ tlbwe r4, r6, PPC44x_TLB_XLAT /* Address translation : RPN */
+ tlbwe r5, r6, PPC44x_TLB_ATTRIB /* Attributes */
+
+ addi r8, r8, 1 /* Increment PN */
+ addi r6, r6, 1 /* Increment TLB Index */
+ cmpwi r8, 8 /* Are we done ? */
+ bne next_tlb
+ isync
+
+ /* Jump to the new mapping 1:1 */
+ li r9,0
+ insrwi r9, r7, 1, 26 /* Set MSR[IS] = r7 */
+
+ bl 1f
+1: mflr r8
+ and r8, r8, r11 /* Get our offset within page */
+ addi r8, r8, (2f-1b)
+
+ and r5, r25, r10 /* Get our target PageNum */
+ or r8, r8, r5 /* Target jump address */
+
+ mtspr SPRN_SRR0, r8
+ mtspr SPRN_SRR1, r9
+ rfi
+2:
+ /* Invalidate the tmp entry we used */
+ li r3, 0
+ tlbwe r3, r24, PPC44x_TLB_PAGEID
+ sync
+ b ppc44x_map_done
+
+#ifdef CONFIG_PPC_47x
+
+ /* 1:1 mapping for 47x */
+
+setup_map_47x:
+
+ /*
+ * Load the kernel pid (0) to PID and also to MMUCR[TID].
+ * Also set the MSR IS->MMUCR STS
+ */
+ li r3, 0
+ mtspr SPRN_PID, r3 /* Set PID */
+ mfmsr r4 /* Get MSR */
+ andi. r4, r4, MSR_IS@l /* TS=1? */
+ beq 1f /* If not, leave STS=0 */
+ oris r3, r3, PPC47x_MMUCR_STS@h /* Set STS=1 */
+1: mtspr SPRN_MMUCR, r3 /* Put MMUCR */
+ sync
+
+ /* Find the entry we are running from */
+ bl 2f
+2: mflr r23
+ tlbsx r23, 0, r23
+ tlbre r24, r23, 0 /* TLB Word 0 */
+ tlbre r25, r23, 1 /* TLB Word 1 */
+ tlbre r26, r23, 2 /* TLB Word 2 */
+
+
+ /*
+ * Invalidates all the tlb entries by writing to 256 RPNs(r4)
+ * of 4k page size in all 4 ways (0-3 in r3).
+ * This would invalidate the entire UTLB including the one we are
+ * running from. However the shadow TLB entries would help us
+ * to continue the execution, until we flush them (rfi/isync).
+ */
+ addis r3, 0, 0x8000 /* specify the way */
+ addi r4, 0, 0 /* TLB Word0 = (EPN=0, VALID = 0) */
+ addi r5, 0, 0
+ b clear_utlb_entry
+
+ /* Align the loop to speed things up. from head_44x.S */
+ .align 6
+
+clear_utlb_entry:
+
+ tlbwe r4, r3, 0
+ tlbwe r5, r3, 1
+ tlbwe r5, r3, 2
+ addis r3, r3, 0x2000 /* Increment the way */
+ cmpwi r3, 0
+ bne clear_utlb_entry
+ addis r3, 0, 0x8000
+ addis r4, r4, 0x100 /* Increment the EPN */
+ cmpwi r4, 0
+ bne clear_utlb_entry
+
+ /* Create the entries in the other address space */
+ mfmsr r5
+ rlwinm r7, r5, 27, 31, 31 /* Get the TS (Bit 26) from MSR */
+ xori r7, r7, 1 /* r7 = !TS */
+
+ insrwi r24, r7, 1, 21 /* Change the TS in the saved TLB word 0 */
+
+ /*
+ * write out the TLB entries for the tmp mapping
+ * Use way '0' so that we could easily invalidate it later.
+ */
+ lis r3, 0x8000 /* Way '0' */
+
+ tlbwe r24, r3, 0
+ tlbwe r25, r3, 1
+ tlbwe r26, r3, 2
+
+ /* Update the msr to the new TS */
+ insrwi r5, r7, 1, 26
+
+ bl 1f
+1: mflr r6
+ addi r6, r6, (2f-1b)
+
+ mtspr SPRN_SRR0, r6
+ mtspr SPRN_SRR1, r5
+ rfi
+
+ /*
+ * Now we are in the tmp address space.
+ * Create a 1:1 mapping for 0-2GiB in the original TS.
+ */
+2:
+ li r3, 0
+ li r4, 0 /* TLB Word 0 */
+ li r5, 0 /* TLB Word 1 */
+ li r6, 0
+ ori r6, r6, PPC47x_TLB2_S_RWX /* TLB word 2 */
+
+ li r8, 0 /* PageIndex */
+
+ xori r7, r7, 1 /* revert back to original TS */
+
+write_utlb:
+ rotlwi r5, r8, 28 /* RPN = PageIndex * 256M */
+ /* ERPN = 0 as we don't use memory above 2G */
+
+ mr r4, r5 /* EPN = RPN */
+ ori r4, r4, (PPC47x_TLB0_VALID | PPC47x_TLB0_256M)
+ insrwi r4, r7, 1, 21 /* Insert the TS to Word 0 */
+
+ tlbwe r4, r3, 0 /* Write out the entries */
+ tlbwe r5, r3, 1
+ tlbwe r6, r3, 2
+ addi r8, r8, 1
+ cmpwi r8, 8 /* Have we completed ? */
+ bne write_utlb
+
+ /* make sure we complete the TLB write up */
+ isync
+
+ /*
+ * Prepare to jump to the 1:1 mapping.
+ * 1) Extract page size of the tmp mapping
+ * DSIZ = TLB_Word0[22:27]
+ * 2) Calculate the physical address of the address
+ * to jump to.
+ */
+ rlwinm r10, r24, 0, 22, 27
+
+ cmpwi r10, PPC47x_TLB0_4K
+ bne 0f
+ li r10, 0x1000 /* r10 = 4k */
+ bl 1f
+
+0:
+ /* Defaults to 256M */
+ lis r10, 0x1000
+
+ bl 1f
+1: mflr r4
+ addi r4, r4, (2f-1b) /* virtual address of 2f */
+
+ subi r11, r10, 1 /* offsetmask = Pagesize - 1 */
+ not r10, r11 /* Pagemask = ~(offsetmask) */
+
+ and r5, r25, r10 /* Physical page */
+ and r6, r4, r11 /* offset within the current page */
+
+ or r5, r5, r6 /* Physical address for 2f */
+
+ /* Switch the TS in MSR to the original one */
+ mfmsr r8
+ insrwi r8, r7, 1, 26
+
+ mtspr SPRN_SRR1, r8
+ mtspr SPRN_SRR0, r5
+ rfi
+
+2:
+ /* Invalidate the tmp mapping */
+ lis r3, 0x8000 /* Way '0' */
+
+ clrrwi r24, r24, 12 /* Clear the valid bit */
+ tlbwe r24, r3, 0
+ tlbwe r25, r3, 1
+ tlbwe r26, r3, 2
+
+ /* Make sure we complete the TLB write and flush the shadow TLB */
+ isync
+
+#endif
+
+ppc44x_map_done:
+
+
+ /* Restore the parameters */
+ mr r3, r29
+ mr r4, r30
+ mr r5, r31
+
+ li r0, 0
+#else
+ li r0, 0
+
+ /*
+ * Set Machine Status Register to a known status,
+ * switch the MMU off and jump to 1: in a single step.
+ */
+
+ mr r8, r0
+ ori r8, r8, MSR_RI|MSR_ME
+ mtspr SPRN_SRR1, r8
+ addi r8, r4, 1f - relocate_new_kernel
+ mtspr SPRN_SRR0, r8
+ sync
+ rfi
+
+1:
+#endif
+ /* from this point address translation is turned off */
+ /* and interrupts are disabled */
+
+ /* set a new stack at the bottom of our page... */
+ /* (not really needed now) */
+ addi r1, r4, KEXEC_CONTROL_PAGE_SIZE - 8 /* for LR Save+Back Chain */
+ stw r0, 0(r1)
+
+ /* Do the copies */
+ li r6, 0 /* checksum */
+ mr r0, r3
+ b 1f
+
+0: /* top, read another word for the indirection page */
+ lwzu r0, 4(r3)
+
+1:
+ /* is it a destination page? (r8) */
+ rlwinm. r7, r0, 0, 31, 31 /* IND_DESTINATION (1<<0) */
+ beq 2f
+
+ rlwinm r8, r0, 0, 0, 19 /* clear kexec flags, page align */
+ b 0b
+
+2: /* is it an indirection page? (r3) */
+ rlwinm. r7, r0, 0, 30, 30 /* IND_INDIRECTION (1<<1) */
+ beq 2f
+
+ rlwinm r3, r0, 0, 0, 19 /* clear kexec flags, page align */
+ subi r3, r3, 4
+ b 0b
+
+2: /* are we done? */
+ rlwinm. r7, r0, 0, 29, 29 /* IND_DONE (1<<2) */
+ beq 2f
+ b 3f
+
+2: /* is it a source page? (r9) */
+ rlwinm. r7, r0, 0, 28, 28 /* IND_SOURCE (1<<3) */
+ beq 0b
+
+ rlwinm r9, r0, 0, 0, 19 /* clear kexec flags, page align */
+
+ li r7, PAGE_SIZE / 4
+ mtctr r7
+ subi r9, r9, 4
+ subi r8, r8, 4
+9:
+ lwzu r0, 4(r9) /* do the copy */
+ xor r6, r6, r0
+ stwu r0, 4(r8)
+ dcbst 0, r8
+ sync
+ icbi 0, r8
+ bdnz 9b
+
+ addi r9, r9, 4
+ addi r8, r8, 4
+ b 0b
+
+3:
+
+ /* To be certain of avoiding problems with self-modifying code
+ * execute a serializing instruction here.
+ */
+ isync
+ sync
+
+ mfspr r3, SPRN_PIR /* current core we are running on */
+ mr r4, r5 /* load physical address of chunk called */
+
+ /* jump to the entry point, usually the setup routine */
+ mtlr r5
+ blrl
+
+1: b 1b
+
+relocate_new_kernel_end:
+
+ .globl relocate_new_kernel_size
+relocate_new_kernel_size:
+ .long relocate_new_kernel_end - relocate_new_kernel
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index faebcbb8c4db..0496e66aaa56 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -11,6 +11,7 @@
*/
#include <asm/ppc_asm.h>
+#include <asm/code-patching-asm.h>
#include <asm/kvm_asm.h>
#include <asm/reg.h>
#include <asm/mmu.h>
@@ -1487,6 +1488,13 @@ guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
1:
#endif /* CONFIG_KVM_XICS */
+ /*
+ * Possibly flush the link stack here, before we do a blr in
+ * guest_exit_short_path.
+ */
+1: nop
+ patch_site 1b patch__call_kvm_flush_link_stack
+
/* If we came in through the P9 short path, go back out to C now */
lwz r0, STACK_SLOT_SHORT_PATH(r1)
cmpwi r0, 0
@@ -1963,6 +1971,28 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
mtlr r0
blr
+.balign 32
+.global kvm_flush_link_stack
+kvm_flush_link_stack:
+ /* Save LR into r0 */
+ mflr r0
+
+ /* Flush the link stack. On Power8 it's up to 32 entries in size. */
+ .rept 32
+ bl .+4
+ .endr
+
+ /* And on Power9 it's up to 64. */
+BEGIN_FTR_SECTION
+ .rept 32
+ bl .+4
+ .endr
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
+
+ /* Restore LR */
+ mtlr r0
+ blr
+
kvmppc_guest_external:
/* External interrupt, first check for host_ipi. If this is
* set, we know the host wants us out so let's do it now
diff --git a/arch/powerpc/mm/book3s32/mmu.c b/arch/powerpc/mm/book3s32/mmu.c
index 84d5fab94f8f..69b2419accef 100644
--- a/arch/powerpc/mm/book3s32/mmu.c
+++ b/arch/powerpc/mm/book3s32/mmu.c
@@ -251,9 +251,18 @@ void __init setbat(int index, unsigned long virt, phys_addr_t phys,
{
unsigned int bl;
int wimgxpp;
- struct ppc_bat *bat = BATS[index];
+ struct ppc_bat *bat;
unsigned long flags = pgprot_val(prot);
+ if (index == -1)
+ index = find_free_bat();
+ if (index == -1) {
+ pr_err("%s: no BAT available for mapping 0x%llx\n", __func__,
+ (unsigned long long)phys);
+ return;
+ }
+ bat = BATS[index];
+
if ((flags & _PAGE_NO_CACHE) ||
(cpu_has_feature(CPU_FTR_NEED_COHERENT) == 0))
flags &= ~_PAGE_COHERENT;
diff --git a/arch/powerpc/mm/book3s64/hash_native.c b/arch/powerpc/mm/book3s64/hash_native.c
index 523e42eb11da..d2d8237ea9d5 100644
--- a/arch/powerpc/mm/book3s64/hash_native.c
+++ b/arch/powerpc/mm/book3s64/hash_native.c
@@ -482,19 +482,12 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
return ret;
}
-static long native_hpte_find(unsigned long vpn, int psize, int ssize)
+static long __native_hpte_find(unsigned long want_v, unsigned long slot)
{
struct hash_pte *hptep;
- unsigned long hash;
+ unsigned long hpte_v;
unsigned long i;
- long slot;
- unsigned long want_v, hpte_v;
- hash = hpt_hash(vpn, mmu_psize_defs[psize].shift, ssize);
- want_v = hpte_encode_avpn(vpn, psize, ssize);
-
- /* Bolted mappings are only ever in the primary group */
- slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
for (i = 0; i < HPTES_PER_GROUP; i++) {
hptep = htab_address + slot;
@@ -508,6 +501,33 @@ static long native_hpte_find(unsigned long vpn, int psize, int ssize)
return -1;
}
+static long native_hpte_find(unsigned long vpn, int psize, int ssize)
+{
+ unsigned long hpte_group;
+ unsigned long want_v;
+ unsigned long hash;
+ long slot;
+
+ hash = hpt_hash(vpn, mmu_psize_defs[psize].shift, ssize);
+ want_v = hpte_encode_avpn(vpn, psize, ssize);
+
+ /*
+ * We try to keep bolted entries always in primary hash
+ * But in some case we can find them in secondary too.
+ */
+ hpte_group = (hash & htab_hash_mask) * HPTES_PER_GROUP;
+ slot = __native_hpte_find(want_v, hpte_group);
+ if (slot < 0) {
+ /* Try in secondary */
+ hpte_group = (~hash & htab_hash_mask) * HPTES_PER_GROUP;
+ slot = __native_hpte_find(want_v, hpte_group);
+ if (slot < 0)
+ return -1;
+ }
+
+ return slot;
+}
+
/*
* Update the page protection bits. Intended to be used to create
* guard pages for kernel data structures on pages which are bolted
diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c
index 6c123760164e..b30435c7d804 100644
--- a/arch/powerpc/mm/book3s64/hash_utils.c
+++ b/arch/powerpc/mm/book3s64/hash_utils.c
@@ -263,6 +263,7 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
unsigned long vsid = get_kernel_vsid(vaddr, ssize);
unsigned long vpn = hpt_vpn(vaddr, vsid, ssize);
unsigned long tprot = prot;
+ bool secondary_hash = false;
/*
* If we hit a bad address return error.
@@ -291,13 +292,31 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
BUG_ON(!mmu_hash_ops.hpte_insert);
+repeat:
ret = mmu_hash_ops.hpte_insert(hpteg, vpn, paddr, tprot,
HPTE_V_BOLTED, psize, psize,
ssize);
+ if (ret == -1) {
+ /*
+ * Try to to keep bolted entries in primary.
+ * Remove non bolted entries and try insert again
+ */
+ ret = mmu_hash_ops.hpte_remove(hpteg);
+ if (ret != -1)
+ ret = mmu_hash_ops.hpte_insert(hpteg, vpn, paddr, tprot,
+ HPTE_V_BOLTED, psize, psize,
+ ssize);
+ if (ret == -1 && !secondary_hash) {
+ secondary_hash = true;
+ hpteg = ((~hash & htab_hash_mask) * HPTES_PER_GROUP);
+ goto repeat;
+ }
+ }
if (ret < 0)
break;
+ cond_resched();
#ifdef CONFIG_DEBUG_PAGEALLOC
if (debug_pagealloc_enabled() &&
(paddr >> PAGE_SHIFT) < linear_map_hash_count)
diff --git a/arch/powerpc/mm/book3s64/pkeys.c b/arch/powerpc/mm/book3s64/pkeys.c
index ae7fca40e5b3..59e0ebbd8036 100644
--- a/arch/powerpc/mm/book3s64/pkeys.c
+++ b/arch/powerpc/mm/book3s64/pkeys.c
@@ -307,16 +307,6 @@ void thread_pkey_regs_init(struct thread_struct *thread)
write_iamr(pkey_iamr_mask);
}
-static inline bool pkey_allows_readwrite(int pkey)
-{
- int pkey_shift = pkeyshift(pkey);
-
- if (!is_pkey_enabled(pkey))
- return true;
-
- return !(read_amr() & ((AMR_RD_BIT|AMR_WR_BIT) << pkey_shift));
-}
-
int __execute_only_pkey(struct mm_struct *mm)
{
return mm->context.execute_only_pkey;
diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c
index 6ee17d09649c..974109bb85db 100644
--- a/arch/powerpc/mm/book3s64/radix_pgtable.c
+++ b/arch/powerpc/mm/book3s64/radix_pgtable.c
@@ -13,6 +13,7 @@
#include <linux/memblock.h>
#include <linux/of_fdt.h>
#include <linux/mm.h>
+#include <linux/hugetlb.h>
#include <linux/string_helpers.h>
#include <linux/stop_machine.h>
diff --git a/arch/powerpc/mm/book3s64/radix_tlb.c b/arch/powerpc/mm/book3s64/radix_tlb.c
index 67af871190c6..a95175c0972b 100644
--- a/arch/powerpc/mm/book3s64/radix_tlb.c
+++ b/arch/powerpc/mm/book3s64/radix_tlb.c
@@ -732,18 +732,13 @@ local:
}
preempt_enable();
}
+
void radix__flush_all_mm(struct mm_struct *mm)
{
__flush_all_mm(mm, false);
}
EXPORT_SYMBOL(radix__flush_all_mm);
-void radix__flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr)
-{
- tlb->need_flush_all = 1;
-}
-EXPORT_SYMBOL(radix__flush_tlb_pwc);
-
void radix__flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr,
int psize)
{
@@ -832,8 +827,7 @@ static unsigned long tlb_single_page_flush_ceiling __read_mostly = 33;
static unsigned long tlb_local_single_page_flush_ceiling __read_mostly = POWER9_TLB_SETS_RADIX * 2;
static inline void __radix__flush_tlb_range(struct mm_struct *mm,
- unsigned long start, unsigned long end,
- bool flush_all_sizes)
+ unsigned long start, unsigned long end)
{
unsigned long pid;
@@ -879,26 +873,16 @@ is_local:
}
}
} else {
- bool hflush = flush_all_sizes;
- bool gflush = flush_all_sizes;
+ bool hflush = false;
unsigned long hstart, hend;
- unsigned long gstart, gend;
- if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
- hflush = true;
-
- if (hflush) {
+ if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
hstart = (start + PMD_SIZE - 1) & PMD_MASK;
hend = end & PMD_MASK;
if (hstart == hend)
hflush = false;
- }
-
- if (gflush) {
- gstart = (start + PUD_SIZE - 1) & PUD_MASK;
- gend = end & PUD_MASK;
- if (gstart == gend)
- gflush = false;
+ else
+ hflush = true;
}
if (local) {
@@ -907,9 +891,6 @@ is_local:
if (hflush)
__tlbiel_va_range(hstart, hend, pid,
PMD_SIZE, MMU_PAGE_2M);
- if (gflush)
- __tlbiel_va_range(gstart, gend, pid,
- PUD_SIZE, MMU_PAGE_1G);
asm volatile("ptesync": : :"memory");
} else if (cputlb_use_tlbie()) {
asm volatile("ptesync": : :"memory");
@@ -917,10 +898,6 @@ is_local:
if (hflush)
__tlbie_va_range(hstart, hend, pid,
PMD_SIZE, MMU_PAGE_2M);
- if (gflush)
- __tlbie_va_range(gstart, gend, pid,
- PUD_SIZE, MMU_PAGE_1G);
-
asm volatile("eieio; tlbsync; ptesync": : :"memory");
} else {
_tlbiel_va_range_multicast(mm,
@@ -928,9 +905,6 @@ is_local:
if (hflush)
_tlbiel_va_range_multicast(mm,
hstart, hend, pid, PMD_SIZE, MMU_PAGE_2M, false);
- if (gflush)
- _tlbiel_va_range_multicast(mm,
- gstart, gend, pid, PUD_SIZE, MMU_PAGE_1G, false);
}
}
preempt_enable();
@@ -945,7 +919,7 @@ void radix__flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
return radix__flush_hugetlb_tlb_range(vma, start, end);
#endif
- __radix__flush_tlb_range(vma->vm_mm, start, end, false);
+ __radix__flush_tlb_range(vma->vm_mm, start, end);
}
EXPORT_SYMBOL(radix__flush_tlb_range);
@@ -1021,53 +995,19 @@ void radix__tlb_flush(struct mmu_gather *tlb)
* that flushes the process table entry cache upon process teardown.
* See the comment for radix in arch_exit_mmap().
*/
- if (tlb->fullmm) {
+ if (tlb->fullmm || tlb->need_flush_all) {
__flush_all_mm(mm, true);
-#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)
- } else if (mm_tlb_flush_nested(mm)) {
- /*
- * If there is a concurrent invalidation that is clearing ptes,
- * then it's possible this invalidation will miss one of those
- * cleared ptes and miss flushing the TLB. If this invalidate
- * returns before the other one flushes TLBs, that can result
- * in it returning while there are still valid TLBs inside the
- * range to be invalidated.
- *
- * See mm/memory.c:tlb_finish_mmu() for more details.
- *
- * The solution to this is ensure the entire range is always
- * flushed here. The problem for powerpc is that the flushes
- * are page size specific, so this "forced flush" would not
- * do the right thing if there are a mix of page sizes in
- * the range to be invalidated. So use __flush_tlb_range
- * which invalidates all possible page sizes in the range.
- *
- * PWC flush probably is not be required because the core code
- * shouldn't free page tables in this path, but accounting
- * for the possibility makes us a bit more robust.
- *
- * need_flush_all is an uncommon case because page table
- * teardown should be done with exclusive locks held (but
- * after locks are dropped another invalidate could come
- * in), it could be optimized further if necessary.
- */
- if (!tlb->need_flush_all)
- __radix__flush_tlb_range(mm, start, end, true);
- else
- radix__flush_all_mm(mm);
-#endif
} else if ( (psize = radix_get_mmu_psize(page_size)) == -1) {
- if (!tlb->need_flush_all)
+ if (!tlb->freed_tables)
radix__flush_tlb_mm(mm);
else
radix__flush_all_mm(mm);
} else {
- if (!tlb->need_flush_all)
+ if (!tlb->freed_tables)
radix__flush_tlb_range_psize(mm, start, end, psize);
else
radix__flush_tlb_pwc_range_psize(mm, start, end, psize);
}
- tlb->need_flush_all = 0;
}
static __always_inline void __radix__flush_tlb_range_psize(struct mm_struct *mm,
diff --git a/arch/powerpc/mm/dma-noncoherent.c b/arch/powerpc/mm/dma-noncoherent.c
index 2a82984356f8..5ab4f868e919 100644
--- a/arch/powerpc/mm/dma-noncoherent.c
+++ b/arch/powerpc/mm/dma-noncoherent.c
@@ -104,14 +104,14 @@ static void __dma_sync_page(phys_addr_t paddr, size_t size, int dir)
#endif
}
-void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
- size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir)
{
__dma_sync_page(paddr, size, dir);
}
-void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
- size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir)
{
__dma_sync_page(paddr, size, dir);
}
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index 8432c281de92..b5047f9b5dec 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -645,6 +645,7 @@ NOKPROBE_SYMBOL(do_page_fault);
void bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
{
const struct exception_table_entry *entry;
+ int is_write = page_fault_is_write(regs->dsisr);
/* Are we prepared to handle this fault? */
if ((entry = search_exception_tables(regs->nip)) != NULL) {
@@ -658,9 +659,10 @@ void bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
case 0x300:
case 0x380:
case 0xe00:
- pr_alert("BUG: %s at 0x%08lx\n",
+ pr_alert("BUG: %s on %s at 0x%08lx\n",
regs->dar < PAGE_SIZE ? "Kernel NULL pointer dereference" :
- "Unable to handle kernel data access", regs->dar);
+ "Unable to handle kernel data access",
+ is_write ? "write" : "read", regs->dar);
break;
case 0x400:
case 0x480:
diff --git a/arch/powerpc/mm/init-common.c b/arch/powerpc/mm/init-common.c
index a84da92920f7..42ef7a6e6098 100644
--- a/arch/powerpc/mm/init-common.c
+++ b/arch/powerpc/mm/init-common.c
@@ -21,6 +21,13 @@
#include <asm/pgtable.h>
#include <asm/kup.h>
+phys_addr_t memstart_addr __ro_after_init = (phys_addr_t)~0ull;
+EXPORT_SYMBOL_GPL(memstart_addr);
+phys_addr_t kernstart_addr __ro_after_init;
+EXPORT_SYMBOL_GPL(kernstart_addr);
+unsigned long kernstart_virt_addr __ro_after_init = KERNELBASE;
+EXPORT_SYMBOL_GPL(kernstart_virt_addr);
+
static bool disable_kuep = !IS_ENABLED(CONFIG_PPC_KUEP);
static bool disable_kuap = !IS_ENABLED(CONFIG_PPC_KUAP);
diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c
index b04896a88d79..872df48ae41b 100644
--- a/arch/powerpc/mm/init_32.c
+++ b/arch/powerpc/mm/init_32.c
@@ -56,11 +56,6 @@
phys_addr_t total_memory;
phys_addr_t total_lowmem;
-phys_addr_t memstart_addr = (phys_addr_t)~0ull;
-EXPORT_SYMBOL(memstart_addr);
-phys_addr_t kernstart_addr;
-EXPORT_SYMBOL(kernstart_addr);
-
#ifdef CONFIG_RELOCATABLE
/* Used in __va()/__pa() */
long long virt_phys_offset;
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index 4e08246acd79..4002ced3596f 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -63,38 +63,48 @@
#include <mm/mmu_decl.h>
-phys_addr_t memstart_addr = ~0;
-EXPORT_SYMBOL_GPL(memstart_addr);
-phys_addr_t kernstart_addr;
-EXPORT_SYMBOL_GPL(kernstart_addr);
-
#ifdef CONFIG_SPARSEMEM_VMEMMAP
/*
- * Given an address within the vmemmap, determine the pfn of the page that
- * represents the start of the section it is within. Note that we have to
+ * Given an address within the vmemmap, determine the page that
+ * represents the start of the subsection it is within. Note that we have to
* do this by hand as the proffered address may not be correctly aligned.
* Subtraction of non-aligned pointers produces undefined results.
*/
-static unsigned long __meminit vmemmap_section_start(unsigned long page)
+static struct page * __meminit vmemmap_subsection_start(unsigned long vmemmap_addr)
{
- unsigned long offset = page - ((unsigned long)(vmemmap));
+ unsigned long start_pfn;
+ unsigned long offset = vmemmap_addr - ((unsigned long)(vmemmap));
/* Return the pfn of the start of the section. */
- return (offset / sizeof(struct page)) & PAGE_SECTION_MASK;
+ start_pfn = (offset / sizeof(struct page)) & PAGE_SUBSECTION_MASK;
+ return pfn_to_page(start_pfn);
}
/*
- * Check if this vmemmap page is already initialised. If any section
- * which overlaps this vmemmap page is initialised then this page is
- * initialised already.
+ * Since memory is added in sub-section chunks, before creating a new vmemmap
+ * mapping, the kernel should check whether there is an existing memmap mapping
+ * covering the new subsection added. This is needed because kernel can map
+ * vmemmap area using 16MB pages which will cover a memory range of 16G. Such
+ * a range covers multiple subsections (2M)
+ *
+ * If any subsection in the 16G range mapped by vmemmap is valid we consider the
+ * vmemmap populated (There is a page table entry already present). We can't do
+ * a page table lookup here because with the hash translation we don't keep
+ * vmemmap details in linux page table.
*/
-static int __meminit vmemmap_populated(unsigned long start, int page_size)
+static int __meminit vmemmap_populated(unsigned long vmemmap_addr, int vmemmap_map_size)
{
- unsigned long end = start + page_size;
- start = (unsigned long)(pfn_to_page(vmemmap_section_start(start)));
+ struct page *start;
+ unsigned long vmemmap_end = vmemmap_addr + vmemmap_map_size;
+ start = vmemmap_subsection_start(vmemmap_addr);
- for (; start < end; start += (PAGES_PER_SECTION * sizeof(struct page)))
- if (pfn_valid(page_to_pfn((struct page *)start)))
+ for (; (unsigned long)start < vmemmap_end; start += PAGES_PER_SUBSECTION)
+ /*
+ * pfn valid check here is intended to really check
+ * whether we have any subsection already initialized
+ * in this range.
+ */
+ if (pfn_valid(page_to_pfn(start)))
return 1;
return 0;
@@ -201,6 +211,12 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
void *p = NULL;
int rc;
+ /*
+ * This vmemmap range is backing different subsections. If any
+ * of that subsection is marked valid, that means we already
+ * have initialized a page table covering this range and hence
+ * the vmemmap range is populated.
+ */
if (vmemmap_populated(start, page_size))
continue;
@@ -290,9 +306,10 @@ void __ref vmemmap_free(unsigned long start, unsigned long end,
struct page *page;
/*
- * the section has already be marked as invalid, so
- * vmemmap_populated() true means some other sections still
- * in this page, so skip it.
+ * We have already marked the subsection we are trying to remove
+ * invalid. So if we want to remove the vmemmap range, we
+ * need to make sure there is no subsection marked valid
+ * in this range.
*/
if (vmemmap_populated(start, page_size))
continue;
diff --git a/arch/powerpc/mm/ioremap_32.c b/arch/powerpc/mm/ioremap_32.c
index f36121f25243..743e11384dea 100644
--- a/arch/powerpc/mm/ioremap_32.c
+++ b/arch/powerpc/mm/ioremap_32.c
@@ -68,6 +68,7 @@ __ioremap_caller(phys_addr_t addr, unsigned long size, pgprot_t prot, void *call
/*
* Should check if it is a candidate for a BAT mapping
*/
+ pr_warn("ioremap() called early from %pS. Use early_ioremap() instead\n", caller);
err = early_ioremap_range(ioremap_bot - size, p, size, prot);
if (err)
diff --git a/arch/powerpc/mm/ioremap_64.c b/arch/powerpc/mm/ioremap_64.c
index fd29e51700cd..50a99d9684f7 100644
--- a/arch/powerpc/mm/ioremap_64.c
+++ b/arch/powerpc/mm/ioremap_64.c
@@ -81,6 +81,8 @@ void __iomem *__ioremap_caller(phys_addr_t addr, unsigned long size,
if (slab_is_available())
return do_ioremap(paligned, offset, size, prot, caller);
+ pr_warn("ioremap() called early from %pS. Use early_ioremap() instead\n", caller);
+
err = early_ioremap_range(ioremap_bot, paligned, size, prot);
if (err)
return NULL;
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index c95b7fe9f298..ad299e72ec30 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -105,6 +105,27 @@ int __weak remove_section_mapping(unsigned long start, unsigned long end)
return -ENODEV;
}
+#define FLUSH_CHUNK_SIZE SZ_1G
+/**
+ * flush_dcache_range_chunked(): Write any modified data cache blocks out to
+ * memory and invalidate them, in chunks of up to FLUSH_CHUNK_SIZE
+ * Does not invalidate the corresponding instruction cache blocks.
+ *
+ * @start: the start address
+ * @stop: the stop address (exclusive)
+ * @chunk: the max size of the chunks
+ */
+static void flush_dcache_range_chunked(unsigned long start, unsigned long stop,
+ unsigned long chunk)
+{
+ unsigned long i;
+
+ for (i = start; i < stop; i += chunk) {
+ flush_dcache_range(i, min(stop, start + chunk));
+ cond_resched();
+ }
+}
+
int __ref arch_add_memory(int nid, u64 start, u64 size,
struct mhp_restrictions *restrictions)
{
@@ -121,7 +142,6 @@ int __ref arch_add_memory(int nid, u64 start, u64 size,
start, start + size, rc);
return -EFAULT;
}
- flush_dcache_range(start, start + size);
return __add_pages(nid, start_pfn, nr_pages, restrictions);
}
@@ -138,7 +158,8 @@ void __ref arch_remove_memory(int nid, u64 start, u64 size,
/* Remove htab bolted mappings for this section of memory */
start = (unsigned long)__va(start);
- flush_dcache_range(start, start + size);
+ flush_dcache_range_chunked(start, start + size, FLUSH_CHUNK_SIZE);
+
ret = remove_section_mapping(start, start + size);
WARN_ON_ONCE(ret);
@@ -217,15 +238,13 @@ void __init paging_init(void)
unsigned long long total_ram = memblock_phys_mem_size();
phys_addr_t top_of_ram = memblock_end_of_DRAM();
-#ifdef CONFIG_PPC32
- unsigned long v = __fix_to_virt(__end_of_fixed_addresses - 1);
- unsigned long end = __fix_to_virt(FIX_HOLE);
+#ifdef CONFIG_HIGHMEM
+ unsigned long v = __fix_to_virt(FIX_KMAP_END);
+ unsigned long end = __fix_to_virt(FIX_KMAP_BEGIN);
for (; v < end; v += PAGE_SIZE)
map_kernel_page(v, 0, __pgprot(0)); /* XXX gross */
-#endif
-#ifdef CONFIG_HIGHMEM
map_kernel_page(PKMAP_BASE, 0, __pgprot(0)); /* XXX gross */
pkmap_page_table = virt_to_kpte(PKMAP_BASE);
@@ -328,6 +347,120 @@ void free_initmem(void)
free_initmem_default(POISON_FREE_INITMEM);
}
+/**
+ * flush_coherent_icache() - if a CPU has a coherent icache, flush it
+ * @addr: The base address to use (can be any valid address, the whole cache will be flushed)
+ * Return true if the cache was flushed, false otherwise
+ */
+static inline bool flush_coherent_icache(unsigned long addr)
+{
+ /*
+ * For a snooping icache, we still need a dummy icbi to purge all the
+ * prefetched instructions from the ifetch buffers. We also need a sync
+ * before the icbi to order the the actual stores to memory that might
+ * have modified instructions with the icbi.
+ */
+ if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) {
+ mb(); /* sync */
+ icbi((void *)addr);
+ mb(); /* sync */
+ isync();
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * invalidate_icache_range() - Flush the icache by issuing icbi across an address range
+ * @start: the start address
+ * @stop: the stop address (exclusive)
+ */
+static void invalidate_icache_range(unsigned long start, unsigned long stop)
+{
+ unsigned long shift = l1_icache_shift();
+ unsigned long bytes = l1_icache_bytes();
+ char *addr = (char *)(start & ~(bytes - 1));
+ unsigned long size = stop - (unsigned long)addr + (bytes - 1);
+ unsigned long i;
+
+ for (i = 0; i < size >> shift; i++, addr += bytes)
+ icbi(addr);
+
+ mb(); /* sync */
+ isync();
+}
+
+/**
+ * flush_icache_range: Write any modified data cache blocks out to memory
+ * and invalidate the corresponding blocks in the instruction cache
+ *
+ * Generic code will call this after writing memory, before executing from it.
+ *
+ * @start: the start address
+ * @stop: the stop address (exclusive)
+ */
+void flush_icache_range(unsigned long start, unsigned long stop)
+{
+ if (flush_coherent_icache(start))
+ return;
+
+ clean_dcache_range(start, stop);
+
+ if (IS_ENABLED(CONFIG_44x)) {
+ /*
+ * Flash invalidate on 44x because we are passed kmapped
+ * addresses and this doesn't work for userspace pages due to
+ * the virtually tagged icache.
+ */
+ iccci((void *)start);
+ mb(); /* sync */
+ isync();
+ } else
+ invalidate_icache_range(start, stop);
+}
+EXPORT_SYMBOL(flush_icache_range);
+
+#if !defined(CONFIG_PPC_8xx) && !defined(CONFIG_PPC64)
+/**
+ * flush_dcache_icache_phys() - Flush a page by it's physical address
+ * @physaddr: the physical address of the page
+ */
+static void flush_dcache_icache_phys(unsigned long physaddr)
+{
+ unsigned long bytes = l1_dcache_bytes();
+ unsigned long nb = PAGE_SIZE / bytes;
+ unsigned long addr = physaddr & PAGE_MASK;
+ unsigned long msr, msr0;
+ unsigned long loop1 = addr, loop2 = addr;
+
+ msr0 = mfmsr();
+ msr = msr0 & ~MSR_DR;
+ /*
+ * This must remain as ASM to prevent potential memory accesses
+ * while the data MMU is disabled
+ */
+ asm volatile(
+ " mtctr %2;\n"
+ " mtmsr %3;\n"
+ " isync;\n"
+ "0: dcbst 0, %0;\n"
+ " addi %0, %0, %4;\n"
+ " bdnz 0b;\n"
+ " sync;\n"
+ " mtctr %2;\n"
+ "1: icbi 0, %1;\n"
+ " addi %1, %1, %4;\n"
+ " bdnz 1b;\n"
+ " sync;\n"
+ " mtmsr %5;\n"
+ " isync;\n"
+ : "+&r" (loop1), "+&r" (loop2)
+ : "r" (nb), "r" (msr), "i" (bytes), "r" (msr0)
+ : "ctr", "memory");
+}
+#endif // !defined(CONFIG_PPC_8xx) && !defined(CONFIG_PPC64)
+
/*
* This is called when a page has been modified by the kernel.
* It just marks the page as not i-cache clean. We do the i-cache
@@ -360,12 +493,46 @@ void flush_dcache_icache_page(struct page *page)
__flush_dcache_icache(start);
kunmap_atomic(start);
} else {
- __flush_dcache_icache_phys(page_to_pfn(page) << PAGE_SHIFT);
+ unsigned long addr = page_to_pfn(page) << PAGE_SHIFT;
+
+ if (flush_coherent_icache(addr))
+ return;
+ flush_dcache_icache_phys(addr);
}
#endif
}
EXPORT_SYMBOL(flush_dcache_icache_page);
+/**
+ * __flush_dcache_icache(): Flush a particular page from the data cache to RAM.
+ * Note: this is necessary because the instruction cache does *not*
+ * snoop from the data cache.
+ *
+ * @page: the address of the page to flush
+ */
+void __flush_dcache_icache(void *p)
+{
+ unsigned long addr = (unsigned long)p;
+
+ if (flush_coherent_icache(addr))
+ return;
+
+ clean_dcache_range(addr, addr + PAGE_SIZE);
+
+ /*
+ * We don't flush the icache on 44x. Those have a virtual icache and we
+ * don't have access to the virtual address here (it's not the page
+ * vaddr but where it's mapped in user space). The flushing of the
+ * icache on these is handled elsewhere, when a change in the address
+ * space occurs, before returning to user space.
+ */
+
+ if (cpu_has_feature(MMU_FTR_TYPE_44x))
+ return;
+
+ invalidate_icache_range(addr, addr + PAGE_SIZE);
+}
+
void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
{
clear_page(page);
diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h
index c750ac9ec713..8e99649c24fc 100644
--- a/arch/powerpc/mm/mmu_decl.h
+++ b/arch/powerpc/mm/mmu_decl.h
@@ -139,10 +139,21 @@ extern unsigned long calc_cam_sz(unsigned long ram, unsigned long virt,
extern void adjust_total_lowmem(void);
extern int switch_to_as1(void);
extern void restore_to_as0(int esel, int offset, void *dt_ptr, int bootcpu);
+void create_kaslr_tlb_entry(int entry, unsigned long virt, phys_addr_t phys);
+void reloc_kernel_entry(void *fdt, int addr);
+extern int is_second_reloc;
#endif
extern void loadcam_entry(unsigned int index);
extern void loadcam_multi(int first_idx, int num, int tmp_idx);
+#ifdef CONFIG_RANDOMIZE_BASE
+void kaslr_early_init(void *dt_ptr, phys_addr_t size);
+void kaslr_late_init(void);
+#else
+static inline void kaslr_early_init(void *dt_ptr, phys_addr_t size) {}
+static inline void kaslr_late_init(void) {}
+#endif
+
struct tlbcam {
u32 MAS0;
u32 MAS1;
diff --git a/arch/powerpc/mm/nohash/8xx.c b/arch/powerpc/mm/nohash/8xx.c
index 4a06cb342da2..090af2d2d3e4 100644
--- a/arch/powerpc/mm/nohash/8xx.c
+++ b/arch/powerpc/mm/nohash/8xx.c
@@ -103,6 +103,19 @@ static void mmu_patch_addis(s32 *site, long simm)
patch_instruction_site(site, instr);
}
+void __init mmu_mapin_ram_chunk(unsigned long offset, unsigned long top, pgprot_t prot)
+{
+ unsigned long s = offset;
+ unsigned long v = PAGE_OFFSET + s;
+ phys_addr_t p = memstart_addr + s;
+
+ for (; s < top; s += PAGE_SIZE) {
+ map_kernel_page(v, p, prot);
+ v += PAGE_SIZE;
+ p += PAGE_SIZE;
+ }
+}
+
unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
{
unsigned long mapped;
@@ -115,10 +128,20 @@ unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
if (!IS_ENABLED(CONFIG_PIN_TLB_TEXT))
mmu_patch_cmp_limit(&patch__itlbmiss_linmem_top, 0);
} else {
+ unsigned long einittext8 = ALIGN(__pa(_einittext), SZ_8M);
+
mapped = top & ~(LARGE_PAGE_SIZE_8M - 1);
if (!IS_ENABLED(CONFIG_PIN_TLB_TEXT))
- mmu_patch_cmp_limit(&patch__itlbmiss_linmem_top,
- _ALIGN(__pa(_einittext), 8 << 20));
+ mmu_patch_cmp_limit(&patch__itlbmiss_linmem_top, einittext8);
+
+ /*
+ * Populate page tables to:
+ * - have them appear in /sys/kernel/debug/kernel_page_tables
+ * - allow the BDI to find the pages when they are not PINNED
+ */
+ mmu_mapin_ram_chunk(0, einittext8, PAGE_KERNEL_X);
+ mmu_mapin_ram_chunk(einittext8, mapped, PAGE_KERNEL);
+ mmu_mapin_immr();
}
mmu_patch_cmp_limit(&patch__dtlbmiss_linmem_top, mapped);
@@ -144,18 +167,41 @@ void mmu_mark_initmem_nx(void)
if (IS_ENABLED(CONFIG_STRICT_KERNEL_RWX) && CONFIG_ETEXT_SHIFT < 23)
mmu_patch_addis(&patch__itlbmiss_linmem_top8,
-((long)_etext & ~(LARGE_PAGE_SIZE_8M - 1)));
- if (!IS_ENABLED(CONFIG_PIN_TLB_TEXT))
+ if (!IS_ENABLED(CONFIG_PIN_TLB_TEXT)) {
+ unsigned long einittext8 = ALIGN(__pa(_einittext), SZ_8M);
+ unsigned long etext8 = ALIGN(__pa(_etext), SZ_8M);
+ unsigned long etext = __pa(_etext);
+
mmu_patch_cmp_limit(&patch__itlbmiss_linmem_top, __pa(_etext));
+
+ /* Update page tables for PTDUMP and BDI */
+ mmu_mapin_ram_chunk(0, einittext8, __pgprot(0));
+ if (IS_ENABLED(CONFIG_STRICT_KERNEL_RWX)) {
+ mmu_mapin_ram_chunk(0, etext, PAGE_KERNEL_TEXT);
+ mmu_mapin_ram_chunk(etext, einittext8, PAGE_KERNEL);
+ } else {
+ mmu_mapin_ram_chunk(0, etext8, PAGE_KERNEL_TEXT);
+ mmu_mapin_ram_chunk(etext8, einittext8, PAGE_KERNEL);
+ }
+ }
}
#ifdef CONFIG_STRICT_KERNEL_RWX
void mmu_mark_rodata_ro(void)
{
+ unsigned long sinittext = __pa(_sinittext);
+ unsigned long etext = __pa(_etext);
+
if (CONFIG_DATA_SHIFT < 23)
mmu_patch_addis(&patch__dtlbmiss_romem_top8,
-__pa(((unsigned long)_sinittext) &
~(LARGE_PAGE_SIZE_8M - 1)));
mmu_patch_addis(&patch__dtlbmiss_romem_top, -__pa(_sinittext));
+
+ /* Update page tables for PTDUMP and BDI */
+ mmu_mapin_ram_chunk(0, sinittext, __pgprot(0));
+ mmu_mapin_ram_chunk(0, etext, PAGE_KERNEL_ROX);
+ mmu_mapin_ram_chunk(etext, sinittext, PAGE_KERNEL_RO);
}
#endif
diff --git a/arch/powerpc/mm/nohash/Makefile b/arch/powerpc/mm/nohash/Makefile
index 33b6f6f29d3f..0424f6ce5bd8 100644
--- a/arch/powerpc/mm/nohash/Makefile
+++ b/arch/powerpc/mm/nohash/Makefile
@@ -8,6 +8,7 @@ obj-$(CONFIG_40x) += 40x.o
obj-$(CONFIG_44x) += 44x.o
obj-$(CONFIG_PPC_8xx) += 8xx.o
obj-$(CONFIG_PPC_FSL_BOOK3E) += fsl_booke.o
+obj-$(CONFIG_RANDOMIZE_BASE) += kaslr_booke.o
ifdef CONFIG_HUGETLB_PAGE
obj-$(CONFIG_PPC_FSL_BOOK3E) += book3e_hugetlbpage.o
endif
diff --git a/arch/powerpc/mm/nohash/fsl_booke.c b/arch/powerpc/mm/nohash/fsl_booke.c
index 556e3cd52a35..b4eb06ceb189 100644
--- a/arch/powerpc/mm/nohash/fsl_booke.c
+++ b/arch/powerpc/mm/nohash/fsl_booke.c
@@ -263,11 +263,13 @@ void setup_initial_memory_limit(phys_addr_t first_memblock_base,
int __initdata is_second_reloc;
notrace void __init relocate_init(u64 dt_ptr, phys_addr_t start)
{
- unsigned long base = KERNELBASE;
+ unsigned long base = kernstart_virt_addr;
+ phys_addr_t size;
kernstart_addr = start;
if (is_second_reloc) {
virt_phys_offset = PAGE_OFFSET - memstart_addr;
+ kaslr_late_init();
return;
}
@@ -291,7 +293,7 @@ notrace void __init relocate_init(u64 dt_ptr, phys_addr_t start)
start &= ~0x3ffffff;
base &= ~0x3ffffff;
virt_phys_offset = base - start;
- early_get_first_memblock_info(__va(dt_ptr), NULL);
+ early_get_first_memblock_info(__va(dt_ptr), &size);
/*
* We now get the memstart_addr, then we should check if this
* address is the same as what the PAGE_OFFSET map to now. If
@@ -316,6 +318,8 @@ notrace void __init relocate_init(u64 dt_ptr, phys_addr_t start)
/* We should never reach here */
panic("Relocation error");
}
+
+ kaslr_early_init(__va(dt_ptr), size);
}
#endif
#endif
diff --git a/arch/powerpc/mm/nohash/kaslr_booke.c b/arch/powerpc/mm/nohash/kaslr_booke.c
new file mode 100644
index 000000000000..4a75f2d9bf0e
--- /dev/null
+++ b/arch/powerpc/mm/nohash/kaslr_booke.c
@@ -0,0 +1,401 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright (C) 2019 Jason Yan <yanaijie@huawei.com>
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/swap.h>
+#include <linux/stddef.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/memblock.h>
+#include <linux/libfdt.h>
+#include <linux/crash_core.h>
+#include <asm/pgalloc.h>
+#include <asm/prom.h>
+#include <asm/kdump.h>
+#include <mm/mmu_decl.h>
+#include <generated/compile.h>
+#include <generated/utsrelease.h>
+
+struct regions {
+ unsigned long pa_start;
+ unsigned long pa_end;
+ unsigned long kernel_size;
+ unsigned long dtb_start;
+ unsigned long dtb_end;
+ unsigned long initrd_start;
+ unsigned long initrd_end;
+ unsigned long crash_start;
+ unsigned long crash_end;
+ int reserved_mem;
+ int reserved_mem_addr_cells;
+ int reserved_mem_size_cells;
+};
+
+/* Simplified build-specific string for starting entropy. */
+static const char build_str[] = UTS_RELEASE " (" LINUX_COMPILE_BY "@"
+ LINUX_COMPILE_HOST ") (" LINUX_COMPILER ") " UTS_VERSION;
+
+struct regions __initdata regions;
+
+static __init void kaslr_get_cmdline(void *fdt)
+{
+ int node = fdt_path_offset(fdt, "/chosen");
+
+ early_init_dt_scan_chosen(node, "chosen", 1, boot_command_line);
+}
+
+static unsigned long __init rotate_xor(unsigned long hash, const void *area,
+ size_t size)
+{
+ size_t i;
+ const unsigned long *ptr = area;
+
+ for (i = 0; i < size / sizeof(hash); i++) {
+ /* Rotate by odd number of bits and XOR. */
+ hash = (hash << ((sizeof(hash) * 8) - 7)) | (hash >> 7);
+ hash ^= ptr[i];
+ }
+
+ return hash;
+}
+
+/* Attempt to create a simple starting entropy. This can make it defferent for
+ * every build but it is still not enough. Stronger entropy should
+ * be added to make it change for every boot.
+ */
+static unsigned long __init get_boot_seed(void *fdt)
+{
+ unsigned long hash = 0;
+
+ hash = rotate_xor(hash, build_str, sizeof(build_str));
+ hash = rotate_xor(hash, fdt, fdt_totalsize(fdt));
+
+ return hash;
+}
+
+static __init u64 get_kaslr_seed(void *fdt)
+{
+ int node, len;
+ fdt64_t *prop;
+ u64 ret;
+
+ node = fdt_path_offset(fdt, "/chosen");
+ if (node < 0)
+ return 0;
+
+ prop = fdt_getprop_w(fdt, node, "kaslr-seed", &len);
+ if (!prop || len != sizeof(u64))
+ return 0;
+
+ ret = fdt64_to_cpu(*prop);
+ *prop = 0;
+ return ret;
+}
+
+static __init bool regions_overlap(u32 s1, u32 e1, u32 s2, u32 e2)
+{
+ return e1 >= s2 && e2 >= s1;
+}
+
+static __init bool overlaps_reserved_region(const void *fdt, u32 start,
+ u32 end)
+{
+ int subnode, len, i;
+ u64 base, size;
+
+ /* check for overlap with /memreserve/ entries */
+ for (i = 0; i < fdt_num_mem_rsv(fdt); i++) {
+ if (fdt_get_mem_rsv(fdt, i, &base, &size) < 0)
+ continue;
+ if (regions_overlap(start, end, base, base + size))
+ return true;
+ }
+
+ if (regions.reserved_mem < 0)
+ return false;
+
+ /* check for overlap with static reservations in /reserved-memory */
+ for (subnode = fdt_first_subnode(fdt, regions.reserved_mem);
+ subnode >= 0;
+ subnode = fdt_next_subnode(fdt, subnode)) {
+ const fdt32_t *reg;
+ u64 rsv_end;
+
+ len = 0;
+ reg = fdt_getprop(fdt, subnode, "reg", &len);
+ while (len >= (regions.reserved_mem_addr_cells +
+ regions.reserved_mem_size_cells)) {
+ base = fdt32_to_cpu(reg[0]);
+ if (regions.reserved_mem_addr_cells == 2)
+ base = (base << 32) | fdt32_to_cpu(reg[1]);
+
+ reg += regions.reserved_mem_addr_cells;
+ len -= 4 * regions.reserved_mem_addr_cells;
+
+ size = fdt32_to_cpu(reg[0]);
+ if (regions.reserved_mem_size_cells == 2)
+ size = (size << 32) | fdt32_to_cpu(reg[1]);
+
+ reg += regions.reserved_mem_size_cells;
+ len -= 4 * regions.reserved_mem_size_cells;
+
+ if (base >= regions.pa_end)
+ continue;
+
+ rsv_end = min(base + size, (u64)U32_MAX);
+
+ if (regions_overlap(start, end, base, rsv_end))
+ return true;
+ }
+ }
+ return false;
+}
+
+static __init bool overlaps_region(const void *fdt, u32 start,
+ u32 end)
+{
+ if (regions_overlap(start, end, __pa(_stext), __pa(_end)))
+ return true;
+
+ if (regions_overlap(start, end, regions.dtb_start,
+ regions.dtb_end))
+ return true;
+
+ if (regions_overlap(start, end, regions.initrd_start,
+ regions.initrd_end))
+ return true;
+
+ if (regions_overlap(start, end, regions.crash_start,
+ regions.crash_end))
+ return true;
+
+ return overlaps_reserved_region(fdt, start, end);
+}
+
+static void __init get_crash_kernel(void *fdt, unsigned long size)
+{
+#ifdef CONFIG_CRASH_CORE
+ unsigned long long crash_size, crash_base;
+ int ret;
+
+ ret = parse_crashkernel(boot_command_line, size, &crash_size,
+ &crash_base);
+ if (ret != 0 || crash_size == 0)
+ return;
+ if (crash_base == 0)
+ crash_base = KDUMP_KERNELBASE;
+
+ regions.crash_start = (unsigned long)crash_base;
+ regions.crash_end = (unsigned long)(crash_base + crash_size);
+
+ pr_debug("crash_base=0x%llx crash_size=0x%llx\n", crash_base, crash_size);
+#endif
+}
+
+static void __init get_initrd_range(void *fdt)
+{
+ u64 start, end;
+ int node, len;
+ const __be32 *prop;
+
+ node = fdt_path_offset(fdt, "/chosen");
+ if (node < 0)
+ return;
+
+ prop = fdt_getprop(fdt, node, "linux,initrd-start", &len);
+ if (!prop)
+ return;
+ start = of_read_number(prop, len / 4);
+
+ prop = fdt_getprop(fdt, node, "linux,initrd-end", &len);
+ if (!prop)
+ return;
+ end = of_read_number(prop, len / 4);
+
+ regions.initrd_start = (unsigned long)start;
+ regions.initrd_end = (unsigned long)end;
+
+ pr_debug("initrd_start=0x%llx initrd_end=0x%llx\n", start, end);
+}
+
+static __init unsigned long get_usable_address(const void *fdt,
+ unsigned long start,
+ unsigned long offset)
+{
+ unsigned long pa;
+ unsigned long pa_end;
+
+ for (pa = offset; (long)pa > (long)start; pa -= SZ_16K) {
+ pa_end = pa + regions.kernel_size;
+ if (overlaps_region(fdt, pa, pa_end))
+ continue;
+
+ return pa;
+ }
+ return 0;
+}
+
+static __init void get_cell_sizes(const void *fdt, int node, int *addr_cells,
+ int *size_cells)
+{
+ const int *prop;
+ int len;
+
+ /*
+ * Retrieve the #address-cells and #size-cells properties
+ * from the 'node', or use the default if not provided.
+ */
+ *addr_cells = *size_cells = 1;
+
+ prop = fdt_getprop(fdt, node, "#address-cells", &len);
+ if (len == 4)
+ *addr_cells = fdt32_to_cpu(*prop);
+ prop = fdt_getprop(fdt, node, "#size-cells", &len);
+ if (len == 4)
+ *size_cells = fdt32_to_cpu(*prop);
+}
+
+static unsigned long __init kaslr_legal_offset(void *dt_ptr, unsigned long index,
+ unsigned long offset)
+{
+ unsigned long koffset = 0;
+ unsigned long start;
+
+ while ((long)index >= 0) {
+ offset = memstart_addr + index * SZ_64M + offset;
+ start = memstart_addr + index * SZ_64M;
+ koffset = get_usable_address(dt_ptr, start, offset);
+ if (koffset)
+ break;
+ index--;
+ }
+
+ if (koffset != 0)
+ koffset -= memstart_addr;
+
+ return koffset;
+}
+
+static inline __init bool kaslr_disabled(void)
+{
+ return strstr(boot_command_line, "nokaslr") != NULL;
+}
+
+static unsigned long __init kaslr_choose_location(void *dt_ptr, phys_addr_t size,
+ unsigned long kernel_sz)
+{
+ unsigned long offset, random;
+ unsigned long ram, linear_sz;
+ u64 seed;
+ unsigned long index;
+
+ kaslr_get_cmdline(dt_ptr);
+ if (kaslr_disabled())
+ return 0;
+
+ random = get_boot_seed(dt_ptr);
+
+ seed = get_tb() << 32;
+ seed ^= get_tb();
+ random = rotate_xor(random, &seed, sizeof(seed));
+
+ /*
+ * Retrieve (and wipe) the seed from the FDT
+ */
+ seed = get_kaslr_seed(dt_ptr);
+ if (seed)
+ random = rotate_xor(random, &seed, sizeof(seed));
+ else
+ pr_warn("KASLR: No safe seed for randomizing the kernel base.\n");
+
+ ram = min_t(phys_addr_t, __max_low_memory, size);
+ ram = map_mem_in_cams(ram, CONFIG_LOWMEM_CAM_NUM, true);
+ linear_sz = min_t(unsigned long, ram, SZ_512M);
+
+ /* If the linear size is smaller than 64M, do not randmize */
+ if (linear_sz < SZ_64M)
+ return 0;
+
+ /* check for a reserved-memory node and record its cell sizes */
+ regions.reserved_mem = fdt_path_offset(dt_ptr, "/reserved-memory");
+ if (regions.reserved_mem >= 0)
+ get_cell_sizes(dt_ptr, regions.reserved_mem,
+ &regions.reserved_mem_addr_cells,
+ &regions.reserved_mem_size_cells);
+
+ regions.pa_start = memstart_addr;
+ regions.pa_end = memstart_addr + linear_sz;
+ regions.dtb_start = __pa(dt_ptr);
+ regions.dtb_end = __pa(dt_ptr) + fdt_totalsize(dt_ptr);
+ regions.kernel_size = kernel_sz;
+
+ get_initrd_range(dt_ptr);
+ get_crash_kernel(dt_ptr, ram);
+
+ /*
+ * Decide which 64M we want to start
+ * Only use the low 8 bits of the random seed
+ */
+ index = random & 0xFF;
+ index %= linear_sz / SZ_64M;
+
+ /* Decide offset inside 64M */
+ offset = random % (SZ_64M - kernel_sz);
+ offset = round_down(offset, SZ_16K);
+
+ return kaslr_legal_offset(dt_ptr, index, offset);
+}
+
+/*
+ * To see if we need to relocate the kernel to a random offset
+ * void *dt_ptr - address of the device tree
+ * phys_addr_t size - size of the first memory block
+ */
+notrace void __init kaslr_early_init(void *dt_ptr, phys_addr_t size)
+{
+ unsigned long tlb_virt;
+ phys_addr_t tlb_phys;
+ unsigned long offset;
+ unsigned long kernel_sz;
+
+ kernel_sz = (unsigned long)_end - (unsigned long)_stext;
+
+ offset = kaslr_choose_location(dt_ptr, size, kernel_sz);
+ if (offset == 0)
+ return;
+
+ kernstart_virt_addr += offset;
+ kernstart_addr += offset;
+
+ is_second_reloc = 1;
+
+ if (offset >= SZ_64M) {
+ tlb_virt = round_down(kernstart_virt_addr, SZ_64M);
+ tlb_phys = round_down(kernstart_addr, SZ_64M);
+
+ /* Create kernel map to relocate in */
+ create_kaslr_tlb_entry(1, tlb_virt, tlb_phys);
+ }
+
+ /* Copy the kernel to it's new location and run */
+ memcpy((void *)kernstart_virt_addr, (void *)_stext, kernel_sz);
+ flush_icache_range(kernstart_virt_addr, kernstart_virt_addr + kernel_sz);
+
+ reloc_kernel_entry(dt_ptr, kernstart_virt_addr);
+}
+
+void __init kaslr_late_init(void)
+{
+ /* If randomized, clear the original kernel */
+ if (kernstart_virt_addr != KERNELBASE) {
+ unsigned long kernel_sz;
+
+ kernel_sz = (unsigned long)_end - kernstart_virt_addr;
+ memzero_explicit((void *)KERNELBASE, kernel_sz);
+ }
+}
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index 8ec5dfb65b2e..73b84166d06a 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -117,10 +117,7 @@ void __init mapin_ram(void)
if (base >= top)
continue;
base = mmu_mapin_ram(base, top);
- if (IS_ENABLED(CONFIG_BDI_SWITCH))
- __mapin_ram_chunk(reg->base, top);
- else
- __mapin_ram_chunk(base, top);
+ __mapin_ram_chunk(base, top);
}
}
diff --git a/arch/powerpc/perf/callchain.c b/arch/powerpc/perf/callchain.c
index c84bbd4298a0..35d542515faf 100644
--- a/arch/powerpc/perf/callchain.c
+++ b/arch/powerpc/perf/callchain.c
@@ -284,16 +284,6 @@ static void perf_callchain_user_64(struct perf_callchain_entry_ctx *entry,
}
}
-static inline int current_is_64bit(void)
-{
- /*
- * We can't use test_thread_flag() here because we may be on an
- * interrupt stack, and the thread flags don't get copied over
- * from the thread_info on the main stack to the interrupt stack.
- */
- return !test_ti_thread_flag(task_thread_info(current), TIF_32BIT);
-}
-
#else /* CONFIG_PPC64 */
/*
* On 32-bit we just access the address and let hash_page create a
@@ -321,11 +311,6 @@ static inline void perf_callchain_user_64(struct perf_callchain_entry_ctx *entry
{
}
-static inline int current_is_64bit(void)
-{
- return 0;
-}
-
static inline int valid_user_sp(unsigned long sp, int is_64)
{
if (!sp || (sp & 7) || sp > TASK_SIZE - 32)
@@ -486,7 +471,7 @@ static void perf_callchain_user_32(struct perf_callchain_entry_ctx *entry,
void
perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
{
- if (current_is_64bit())
+ if (!is_32bit_task())
perf_callchain_user_64(entry, regs);
else
perf_callchain_user_32(entry, regs);
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_gpt.c b/arch/powerpc/platforms/52xx/mpc52xx_gpt.c
index ba12dc14a3d1..8c0d324f657e 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_gpt.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_gpt.c
@@ -650,6 +650,7 @@ static const struct file_operations mpc52xx_wdt_fops = {
.llseek = no_llseek,
.write = mpc52xx_wdt_write,
.unlocked_ioctl = mpc52xx_wdt_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.open = mpc52xx_wdt_open,
.release = mpc52xx_wdt_release,
};
diff --git a/arch/powerpc/platforms/83xx/misc.c b/arch/powerpc/platforms/83xx/misc.c
index f46d7bf3b140..6399865a625e 100644
--- a/arch/powerpc/platforms/83xx/misc.c
+++ b/arch/powerpc/platforms/83xx/misc.c
@@ -18,6 +18,8 @@
#include <sysdev/fsl_soc.h>
#include <sysdev/fsl_pci.h>
+#include <mm/mmu_decl.h>
+
#include "mpc83xx.h"
static __be32 __iomem *restart_reg_base;
@@ -145,6 +147,15 @@ void __init mpc83xx_setup_arch(void)
if (ppc_md.progress)
ppc_md.progress("mpc83xx_setup_arch()", 0);
+ if (!__map_without_bats) {
+ phys_addr_t immrbase = get_immrbase();
+ int immrsize = IS_ALIGNED(immrbase, SZ_2M) ? SZ_2M : SZ_1M;
+ unsigned long va = fix_to_virt(FIX_IMMR_BASE);
+
+ setbat(-1, va, immrbase, immrsize, PAGE_KERNEL_NCG);
+ update_bats();
+ }
+
mpc83xx_setup_pci();
}
diff --git a/arch/powerpc/platforms/83xx/mpc836x_mds.c b/arch/powerpc/platforms/83xx/mpc836x_mds.c
index 4a4efa906d35..240a26d88b07 100644
--- a/arch/powerpc/platforms/83xx/mpc836x_mds.c
+++ b/arch/powerpc/platforms/83xx/mpc836x_mds.c
@@ -39,7 +39,6 @@
#include <asm/udbg.h>
#include <sysdev/fsl_soc.h>
#include <sysdev/fsl_pci.h>
-#include <sysdev/simple_gpio.h>
#include <soc/fsl/qe/qe.h>
#include <soc/fsl/qe/qe_ic.h>
@@ -181,12 +180,6 @@ static int __init mpc836x_usb_cfg(void)
qe_usb_clock_set(QE_CLK21, 48000000);
} else {
setbits8(&bcsr[13], BCSR13_USBMODE);
- /*
- * The BCSR GPIOs are used to control power and
- * speed of the USB transceiver. This is needed for
- * the USB Host only.
- */
- simple_gpiochip_init("fsl,mpc8360mds-bcsr-gpio");
}
of_node_put(np);
diff --git a/arch/powerpc/platforms/85xx/common.c b/arch/powerpc/platforms/85xx/common.c
index fe0606439b5a..a554b6d87cf7 100644
--- a/arch/powerpc/platforms/85xx/common.c
+++ b/arch/powerpc/platforms/85xx/common.c
@@ -86,29 +86,6 @@ void __init mpc85xx_cpm2_pic_init(void)
#endif
#ifdef CONFIG_QUICC_ENGINE
-void __init mpc85xx_qe_init(void)
-{
- struct device_node *np;
-
- np = of_find_compatible_node(NULL, NULL, "fsl,qe");
- if (!np) {
- np = of_find_node_by_name(NULL, "qe");
- if (!np) {
- pr_err("%s: Could not find Quicc Engine node\n",
- __func__);
- return;
- }
- }
-
- if (!of_device_is_available(np)) {
- of_node_put(np);
- return;
- }
-
- of_node_put(np);
-
-}
-
void __init mpc85xx_qe_par_io_init(void)
{
struct device_node *np;
diff --git a/arch/powerpc/platforms/85xx/corenet_generic.c b/arch/powerpc/platforms/85xx/corenet_generic.c
index 7ee2c6628f64..a328a741b457 100644
--- a/arch/powerpc/platforms/85xx/corenet_generic.c
+++ b/arch/powerpc/platforms/85xx/corenet_generic.c
@@ -66,8 +66,6 @@ void __init corenet_gen_setup_arch(void)
swiotlb_detect_4g();
pr_info("%s board\n", ppc_md.name);
-
- mpc85xx_qe_init();
}
static const struct of_device_id of_device_ids[] = {
diff --git a/arch/powerpc/platforms/85xx/mpc85xx.h b/arch/powerpc/platforms/85xx/mpc85xx.h
index fa23f9b0592c..cb84c5c56c36 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx.h
+++ b/arch/powerpc/platforms/85xx/mpc85xx.h
@@ -10,10 +10,8 @@ static inline void __init mpc85xx_cpm2_pic_init(void) {}
#endif /* CONFIG_CPM2 */
#ifdef CONFIG_QUICC_ENGINE
-extern void mpc85xx_qe_init(void);
extern void mpc85xx_qe_par_io_init(void);
#else
-static inline void __init mpc85xx_qe_init(void) {}
static inline void __init mpc85xx_qe_par_io_init(void) {}
#endif
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_mds.c b/arch/powerpc/platforms/85xx/mpc85xx_mds.c
index 5ca254256c47..381a6ac8cb4b 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_mds.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_mds.c
@@ -43,7 +43,6 @@
#include <asm/udbg.h>
#include <sysdev/fsl_soc.h>
#include <sysdev/fsl_pci.h>
-#include <sysdev/simple_gpio.h>
#include <soc/fsl/qe/qe.h>
#include <soc/fsl/qe/qe_ic.h>
#include <asm/mpic.h>
@@ -238,7 +237,6 @@ static void __init mpc85xx_mds_qe_init(void)
{
struct device_node *np;
- mpc85xx_qe_init();
mpc85xx_qe_par_io_init();
mpc85xx_mds_reset_ucc_phys();
@@ -351,11 +349,6 @@ machine_arch_initcall(mpc8569_mds, board_fixups);
static int __init mpc85xx_publish_devices(void)
{
- if (machine_is(mpc8568_mds))
- simple_gpiochip_init("fsl,mpc8568mds-bcsr-gpio");
- if (machine_is(mpc8569_mds))
- simple_gpiochip_init("fsl,mpc8569mds-bcsr-gpio");
-
return mpc85xx_common_publish_devices();
}
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_rdb.c b/arch/powerpc/platforms/85xx/mpc85xx_rdb.c
index d3c540ee558f..7f9a84f85766 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_rdb.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_rdb.c
@@ -89,7 +89,6 @@ static void __init mpc85xx_rdb_setup_arch(void)
fsl_pci_assign_primary();
#ifdef CONFIG_QUICC_ENGINE
- mpc85xx_qe_init();
mpc85xx_qe_par_io_init();
#if defined(CONFIG_UCC_GETH) || defined(CONFIG_SERIAL_QE)
if (machine_is(p1025_rdb)) {
diff --git a/arch/powerpc/platforms/85xx/twr_p102x.c b/arch/powerpc/platforms/85xx/twr_p102x.c
index 720b0c0f03ba..6c3c0cdaee9a 100644
--- a/arch/powerpc/platforms/85xx/twr_p102x.c
+++ b/arch/powerpc/platforms/85xx/twr_p102x.c
@@ -72,7 +72,6 @@ static void __init twr_p1025_setup_arch(void)
fsl_pci_assign_primary();
#ifdef CONFIG_QUICC_ENGINE
- mpc85xx_qe_init();
mpc85xx_qe_par_io_init();
#if IS_ENABLED(CONFIG_UCC_GETH) || IS_ENABLED(CONFIG_SERIAL_QE)
diff --git a/arch/powerpc/platforms/86xx/mpc8610_hpcd.c b/arch/powerpc/platforms/86xx/mpc8610_hpcd.c
index 96b27f6fdd0f..7733d0607da2 100644
--- a/arch/powerpc/platforms/86xx/mpc8610_hpcd.c
+++ b/arch/powerpc/platforms/86xx/mpc8610_hpcd.c
@@ -34,7 +34,6 @@
#include <linux/of_platform.h>
#include <sysdev/fsl_pci.h>
#include <sysdev/fsl_soc.h>
-#include <sysdev/simple_gpio.h>
#include "mpc86xx.h"
@@ -93,9 +92,6 @@ static const struct of_device_id mpc8610_ids[] __initconst = {
static int __init mpc8610_declare_of_platform_devices(void)
{
- /* Firstly, register PIXIS GPIOs. */
- simple_gpiochip_init("fsl,fpga-pixis-gpio-bank");
-
/* Enable wakeup on PIXIS' event IRQ. */
mpc8610_suspend_init();
diff --git a/arch/powerpc/platforms/8xx/cpm1.c b/arch/powerpc/platforms/8xx/cpm1.c
index 0f65c51271db..a43ee7d1ff85 100644
--- a/arch/powerpc/platforms/8xx/cpm1.c
+++ b/arch/powerpc/platforms/8xx/cpm1.c
@@ -51,7 +51,7 @@
#define CPM_MAP_SIZE (0x4000)
cpm8xx_t __iomem *cpmp; /* Pointer to comm processor space */
-immap_t __iomem *mpc8xx_immr;
+immap_t __iomem *mpc8xx_immr = (void __iomem *)VIRT_IMMR_BASE;
static cpic8xx_t __iomem *cpic_reg;
static struct irq_domain *cpm_pic_host;
@@ -130,7 +130,7 @@ static const struct irq_domain_ops cpm_pic_host_ops = {
.map = cpm_pic_host_map,
};
-unsigned int cpm_pic_init(void)
+unsigned int __init cpm_pic_init(void)
{
struct device_node *np = NULL;
struct resource res;
@@ -201,12 +201,6 @@ void __init cpm_reset(void)
{
sysconf8xx_t __iomem *siu_conf;
- mpc8xx_immr = ioremap(get_immrbase(), 0x4000);
- if (!mpc8xx_immr) {
- printk(KERN_CRIT "Could not map IMMR\n");
- return;
- }
-
cpmp = &mpc8xx_immr->im_cpm;
#ifndef CONFIG_PPC_EARLY_DEBUG_CPM
@@ -306,7 +300,7 @@ struct cpm_ioport32e {
__be32 dir, par, sor, odr, dat;
};
-static void cpm1_set_pin32(int port, int pin, int flags)
+static void __init cpm1_set_pin32(int port, int pin, int flags)
{
struct cpm_ioport32e __iomem *iop;
pin = 1 << (31 - pin);
@@ -348,7 +342,7 @@ static void cpm1_set_pin32(int port, int pin, int flags)
}
}
-static void cpm1_set_pin16(int port, int pin, int flags)
+static void __init cpm1_set_pin16(int port, int pin, int flags)
{
struct cpm_ioport16 __iomem *iop =
(struct cpm_ioport16 __iomem *)&mpc8xx_immr->im_ioport;
@@ -386,7 +380,7 @@ static void cpm1_set_pin16(int port, int pin, int flags)
}
}
-void cpm1_set_pin(enum cpm_port port, int pin, int flags)
+void __init cpm1_set_pin(enum cpm_port port, int pin, int flags)
{
if (port == CPM_PORTB || port == CPM_PORTE)
cpm1_set_pin32(port, pin, flags);
@@ -394,7 +388,7 @@ void cpm1_set_pin(enum cpm_port port, int pin, int flags)
cpm1_set_pin16(port, pin, flags);
}
-int cpm1_clk_setup(enum cpm_clk_target target, int clock, int mode)
+int __init cpm1_clk_setup(enum cpm_clk_target target, int clock, int mode)
{
int shift;
int i, bits = 0;
diff --git a/arch/powerpc/platforms/8xx/pic.c b/arch/powerpc/platforms/8xx/pic.c
index e9617d35fd1f..f2ba837249d6 100644
--- a/arch/powerpc/platforms/8xx/pic.c
+++ b/arch/powerpc/platforms/8xx/pic.c
@@ -125,7 +125,7 @@ static const struct irq_domain_ops mpc8xx_pic_host_ops = {
.xlate = mpc8xx_pic_host_xlate,
};
-int mpc8xx_pic_init(void)
+int __init mpc8xx_pic_init(void)
{
struct resource res;
struct device_node *np;
diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig
index d82e3664ffdf..e28df298df56 100644
--- a/arch/powerpc/platforms/Kconfig
+++ b/arch/powerpc/platforms/Kconfig
@@ -303,16 +303,6 @@ config GEN_RTC
replacing their get_rtc_time/set_rtc_time callbacks with
a proper RTC device driver.
-config SIMPLE_GPIO
- bool "Support for simple, memory-mapped GPIO controllers"
- depends on PPC
- select GPIOLIB
- help
- Say Y here to support simple, memory-mapped GPIO controllers.
- These are usually BCSRs used to control board's switches, LEDs,
- chip-selects, Ethernet/USB PHY's power and various other small
- on-board peripherals.
-
config MCU_MPC8349EMITX
bool "MPC8349E-mITX MCU driver"
depends on I2C=y && PPC_83xx
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index 12543e53fa96..8d7f9c3dc771 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -415,13 +415,13 @@ config PPC_MM_SLICES
bool
config PPC_HAVE_PMU_SUPPORT
- bool
+ bool
config PPC_PERF_CTRS
- def_bool y
- depends on PERF_EVENTS && PPC_HAVE_PMU_SUPPORT
- help
- This enables the powerpc-specific perf_event back-end.
+ def_bool y
+ depends on PERF_EVENTS && PPC_HAVE_PMU_SUPPORT
+ help
+ This enables the powerpc-specific perf_event back-end.
config FORCE_SMP
# Allow platforms to force SMP=y by selecting this
@@ -459,7 +459,6 @@ config NOT_COHERENT_CACHE
bool
depends on 4xx || PPC_8xx || E200 || PPC_MPC512x || \
GAMECUBE_COMMON || AMIGAONE
- select ARCH_HAS_DMA_COHERENT_TO_PFN
select ARCH_HAS_DMA_PREP_COHERENT
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
select ARCH_HAS_SYNC_DMA_FOR_CPU
diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c
index 2dd452a047cd..9b1586b85152 100644
--- a/arch/powerpc/platforms/cell/spufs/inode.c
+++ b/arch/powerpc/platforms/cell/spufs/inode.c
@@ -198,14 +198,12 @@ static int spufs_fill_dir(struct dentry *dir,
static int spufs_dir_close(struct inode *inode, struct file *file)
{
- struct spu_context *ctx;
struct inode *parent;
struct dentry *dir;
int ret;
dir = file->f_path.dentry;
parent = d_inode(dir->d_parent);
- ctx = SPUFS_I(d_inode(dir))->i_ctx;
inode_lock_nested(parent, I_MUTEX_PARENT);
ret = spufs_rmdir(parent, dir);
diff --git a/arch/powerpc/platforms/powernv/Makefile b/arch/powerpc/platforms/powernv/Makefile
index a3ac9646119d..c0f8120045c3 100644
--- a/arch/powerpc/platforms/powernv/Makefile
+++ b/arch/powerpc/platforms/powernv/Makefile
@@ -20,3 +20,4 @@ obj-$(CONFIG_PPC_MEMTRACE) += memtrace.o
obj-$(CONFIG_PPC_VAS) += vas.o vas-window.o vas-debug.o
obj-$(CONFIG_OCXL_BASE) += ocxl.o
obj-$(CONFIG_SCOM_DEBUGFS) += opal-xscom.o
+obj-$(CONFIG_PPC_SECURE_BOOT) += opal-secvar.o
diff --git a/arch/powerpc/platforms/powernv/opal-call.c b/arch/powerpc/platforms/powernv/opal-call.c
index a2aa5e433ac8..5cd0f52d258f 100644
--- a/arch/powerpc/platforms/powernv/opal-call.c
+++ b/arch/powerpc/platforms/powernv/opal-call.c
@@ -290,3 +290,6 @@ OPAL_CALL(opal_nx_coproc_init, OPAL_NX_COPROC_INIT);
OPAL_CALL(opal_mpipl_update, OPAL_MPIPL_UPDATE);
OPAL_CALL(opal_mpipl_register_tag, OPAL_MPIPL_REGISTER_TAG);
OPAL_CALL(opal_mpipl_query_tag, OPAL_MPIPL_QUERY_TAG);
+OPAL_CALL(opal_secvar_get, OPAL_SECVAR_GET);
+OPAL_CALL(opal_secvar_get_next, OPAL_SECVAR_GET_NEXT);
+OPAL_CALL(opal_secvar_enqueue_update, OPAL_SECVAR_ENQUEUE_UPDATE);
diff --git a/arch/powerpc/platforms/powernv/opal-powercap.c b/arch/powerpc/platforms/powernv/opal-powercap.c
index dc599e787f78..c16d44f6f1d1 100644
--- a/arch/powerpc/platforms/powernv/opal-powercap.c
+++ b/arch/powerpc/platforms/powernv/opal-powercap.c
@@ -13,7 +13,7 @@
#include <asm/opal.h>
-DEFINE_MUTEX(powercap_mutex);
+static DEFINE_MUTEX(powercap_mutex);
static struct kobject *powercap_kobj;
diff --git a/arch/powerpc/platforms/powernv/opal-psr.c b/arch/powerpc/platforms/powernv/opal-psr.c
index b6ccb3026c6c..69d7e75950d1 100644
--- a/arch/powerpc/platforms/powernv/opal-psr.c
+++ b/arch/powerpc/platforms/powernv/opal-psr.c
@@ -13,11 +13,11 @@
#include <asm/opal.h>
-DEFINE_MUTEX(psr_mutex);
+static DEFINE_MUTEX(psr_mutex);
static struct kobject *psr_kobj;
-struct psr_attr {
+static struct psr_attr {
u32 handle;
struct kobj_attribute attr;
} *psr_attrs;
diff --git a/arch/powerpc/platforms/powernv/opal-secvar.c b/arch/powerpc/platforms/powernv/opal-secvar.c
new file mode 100644
index 000000000000..14133e120bdd
--- /dev/null
+++ b/arch/powerpc/platforms/powernv/opal-secvar.c
@@ -0,0 +1,140 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PowerNV code for secure variables
+ *
+ * Copyright (C) 2019 IBM Corporation
+ * Author: Claudio Carvalho
+ * Nayna Jain
+ *
+ * APIs to access secure variables managed by OPAL.
+ */
+
+#define pr_fmt(fmt) "secvar: "fmt
+
+#include <linux/types.h>
+#include <linux/platform_device.h>
+#include <linux/of_platform.h>
+#include <asm/opal.h>
+#include <asm/secvar.h>
+#include <asm/secure_boot.h>
+
+static int opal_status_to_err(int rc)
+{
+ int err;
+
+ switch (rc) {
+ case OPAL_SUCCESS:
+ err = 0;
+ break;
+ case OPAL_UNSUPPORTED:
+ err = -ENXIO;
+ break;
+ case OPAL_PARAMETER:
+ err = -EINVAL;
+ break;
+ case OPAL_RESOURCE:
+ err = -ENOSPC;
+ break;
+ case OPAL_HARDWARE:
+ err = -EIO;
+ break;
+ case OPAL_NO_MEM:
+ err = -ENOMEM;
+ break;
+ case OPAL_EMPTY:
+ err = -ENOENT;
+ break;
+ case OPAL_PARTIAL:
+ err = -EFBIG;
+ break;
+ default:
+ err = -EINVAL;
+ }
+
+ return err;
+}
+
+static int opal_get_variable(const char *key, uint64_t ksize,
+ u8 *data, uint64_t *dsize)
+{
+ int rc;
+
+ if (!key || !dsize)
+ return -EINVAL;
+
+ *dsize = cpu_to_be64(*dsize);
+
+ rc = opal_secvar_get(key, ksize, data, dsize);
+
+ *dsize = be64_to_cpu(*dsize);
+
+ return opal_status_to_err(rc);
+}
+
+static int opal_get_next_variable(const char *key, uint64_t *keylen,
+ uint64_t keybufsize)
+{
+ int rc;
+
+ if (!key || !keylen)
+ return -EINVAL;
+
+ *keylen = cpu_to_be64(*keylen);
+
+ rc = opal_secvar_get_next(key, keylen, keybufsize);
+
+ *keylen = be64_to_cpu(*keylen);
+
+ return opal_status_to_err(rc);
+}
+
+static int opal_set_variable(const char *key, uint64_t ksize, u8 *data,
+ uint64_t dsize)
+{
+ int rc;
+
+ if (!key || !data)
+ return -EINVAL;
+
+ rc = opal_secvar_enqueue_update(key, ksize, data, dsize);
+
+ return opal_status_to_err(rc);
+}
+
+static const struct secvar_operations opal_secvar_ops = {
+ .get = opal_get_variable,
+ .get_next = opal_get_next_variable,
+ .set = opal_set_variable,
+};
+
+static int opal_secvar_probe(struct platform_device *pdev)
+{
+ if (!opal_check_token(OPAL_SECVAR_GET)
+ || !opal_check_token(OPAL_SECVAR_GET_NEXT)
+ || !opal_check_token(OPAL_SECVAR_ENQUEUE_UPDATE)) {
+ pr_err("OPAL doesn't support secure variables\n");
+ return -ENODEV;
+ }
+
+ set_secvar_ops(&opal_secvar_ops);
+
+ return 0;
+}
+
+static const struct of_device_id opal_secvar_match[] = {
+ { .compatible = "ibm,secvar-backend",},
+ {},
+};
+
+static struct platform_driver opal_secvar_driver = {
+ .driver = {
+ .name = "secvar",
+ .of_match_table = opal_secvar_match,
+ },
+};
+
+static int __init opal_secvar_init(void)
+{
+ return platform_driver_probe(&opal_secvar_driver, opal_secvar_probe);
+}
+device_initcall(opal_secvar_init);
diff --git a/arch/powerpc/platforms/powernv/opal-sensor-groups.c b/arch/powerpc/platforms/powernv/opal-sensor-groups.c
index 31f13c13275f..f8ae1fb0c102 100644
--- a/arch/powerpc/platforms/powernv/opal-sensor-groups.c
+++ b/arch/powerpc/platforms/powernv/opal-sensor-groups.c
@@ -13,7 +13,7 @@
#include <asm/opal.h>
-DEFINE_MUTEX(sg_mutex);
+static DEFINE_MUTEX(sg_mutex);
static struct kobject *sg_kobj;
diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c
index 38e90270280b..a6ee08009f0f 100644
--- a/arch/powerpc/platforms/powernv/opal.c
+++ b/arch/powerpc/platforms/powernv/opal.c
@@ -35,6 +35,16 @@
#include "powernv.h"
+#define OPAL_MSG_QUEUE_MAX 16
+
+struct opal_msg_node {
+ struct list_head list;
+ struct opal_msg msg;
+};
+
+static DEFINE_SPINLOCK(msg_list_lock);
+static LIST_HEAD(msg_list);
+
/* /sys/firmware/opal */
struct kobject *opal_kobj;
@@ -50,6 +60,8 @@ struct mcheck_recoverable_range {
u64 recover_addr;
};
+static int msg_list_size;
+
static struct mcheck_recoverable_range *mc_recoverable_range;
static int mc_recoverable_range_len;
@@ -237,6 +249,43 @@ static int __init opal_register_exception_handlers(void)
}
machine_early_initcall(powernv, opal_register_exception_handlers);
+static void queue_replay_msg(void *msg)
+{
+ struct opal_msg_node *msg_node;
+
+ if (msg_list_size < OPAL_MSG_QUEUE_MAX) {
+ msg_node = kzalloc(sizeof(*msg_node), GFP_ATOMIC);
+ if (msg_node) {
+ INIT_LIST_HEAD(&msg_node->list);
+ memcpy(&msg_node->msg, msg, sizeof(struct opal_msg));
+ list_add_tail(&msg_node->list, &msg_list);
+ msg_list_size++;
+ } else
+ pr_warn_once("message queue no memory\n");
+
+ if (msg_list_size >= OPAL_MSG_QUEUE_MAX)
+ pr_warn_once("message queue full\n");
+ }
+}
+
+static void dequeue_replay_msg(enum opal_msg_type msg_type)
+{
+ struct opal_msg_node *msg_node, *tmp;
+
+ list_for_each_entry_safe(msg_node, tmp, &msg_list, list) {
+ if (be32_to_cpu(msg_node->msg.msg_type) != msg_type)
+ continue;
+
+ atomic_notifier_call_chain(&opal_msg_notifier_head[msg_type],
+ msg_type,
+ &msg_node->msg);
+
+ list_del(&msg_node->list);
+ kfree(msg_node);
+ msg_list_size--;
+ }
+}
+
/*
* Opal message notifier based on message type. Allow subscribers to get
* notified for specific messgae type.
@@ -244,14 +293,30 @@ machine_early_initcall(powernv, opal_register_exception_handlers);
int opal_message_notifier_register(enum opal_msg_type msg_type,
struct notifier_block *nb)
{
+ int ret;
+ unsigned long flags;
+
if (!nb || msg_type >= OPAL_MSG_TYPE_MAX) {
pr_warn("%s: Invalid arguments, msg_type:%d\n",
__func__, msg_type);
return -EINVAL;
}
- return atomic_notifier_chain_register(
- &opal_msg_notifier_head[msg_type], nb);
+ spin_lock_irqsave(&msg_list_lock, flags);
+ ret = atomic_notifier_chain_register(
+ &opal_msg_notifier_head[msg_type], nb);
+
+ /*
+ * If the registration succeeded, replay any queued messages that came
+ * in prior to the notifier chain registration. msg_list_lock held here
+ * to ensure they're delivered prior to any subsequent messages.
+ */
+ if (ret == 0)
+ dequeue_replay_msg(msg_type);
+
+ spin_unlock_irqrestore(&msg_list_lock, flags);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(opal_message_notifier_register);
@@ -265,6 +330,23 @@ EXPORT_SYMBOL_GPL(opal_message_notifier_unregister);
static void opal_message_do_notify(uint32_t msg_type, void *msg)
{
+ unsigned long flags;
+ bool queued = false;
+
+ spin_lock_irqsave(&msg_list_lock, flags);
+ if (opal_msg_notifier_head[msg_type].head == NULL) {
+ /*
+ * Queue up the msg since no notifiers have registered
+ * yet for this msg_type.
+ */
+ queue_replay_msg(msg);
+ queued = true;
+ }
+ spin_unlock_irqrestore(&msg_list_lock, flags);
+
+ if (queued)
+ return;
+
/* notify subscribers */
atomic_notifier_call_chain(&opal_msg_notifier_head[msg_type],
msg_type, msg);
@@ -1002,6 +1084,9 @@ static int __init opal_init(void)
/* Initialise OPAL Power control interface */
opal_power_control_init();
+ /* Initialize OPAL secure variables */
+ opal_pdev_init("ibm,secvar-backend");
+
return 0;
}
machine_subsys_initcall(powernv, opal_init);
diff --git a/arch/powerpc/platforms/powernv/pci-ioda-tce.c b/arch/powerpc/platforms/powernv/pci-ioda-tce.c
index a0b9c0c23ed2..5dc6847d5f4c 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda-tce.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda-tce.c
@@ -340,14 +340,6 @@ free_tces_exit:
return -ENOMEM;
}
-static void pnv_iommu_table_group_link_free(struct rcu_head *head)
-{
- struct iommu_table_group_link *tgl = container_of(head,
- struct iommu_table_group_link, rcu);
-
- kfree(tgl);
-}
-
void pnv_pci_unlink_table_and_group(struct iommu_table *tbl,
struct iommu_table_group *table_group)
{
@@ -363,7 +355,7 @@ void pnv_pci_unlink_table_and_group(struct iommu_table *tbl,
list_for_each_entry_rcu(tgl, &tbl->it_group_list, next) {
if (tgl->table_group == table_group) {
list_del_rcu(&tgl->next);
- call_rcu(&tgl->rcu, pnv_iommu_table_group_link_free);
+ kfree_rcu(tgl, rcu);
found = true;
break;
}
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index c28d0d9b7ee0..da1068a9c263 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -3086,8 +3086,8 @@ static int pnv_pci_diag_data_set(void *data, u64 val)
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(pnv_pci_diag_data_fops, NULL,
- pnv_pci_diag_data_set, "%llu\n");
+DEFINE_DEBUGFS_ATTRIBUTE(pnv_pci_diag_data_fops, NULL, pnv_pci_diag_data_set,
+ "%llu\n");
#endif /* CONFIG_DEBUG_FS */
@@ -3112,8 +3112,8 @@ static void pnv_pci_ioda_create_dbgfs(void)
continue;
}
- debugfs_create_file("dump_diag_regs", 0200, phb->dbgfs, hose,
- &pnv_pci_diag_data_fops);
+ debugfs_create_file_unsafe("dump_diag_regs", 0200, phb->dbgfs,
+ hose, &pnv_pci_diag_data_fops);
}
#endif /* CONFIG_DEBUG_FS */
}
diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c
index 2825d004dece..c0bea75ac27b 100644
--- a/arch/powerpc/platforms/powernv/pci.c
+++ b/arch/powerpc/platforms/powernv/pci.c
@@ -945,6 +945,23 @@ void __init pnv_pci_init(void)
if (!firmware_has_feature(FW_FEATURE_OPAL))
return;
+#ifdef CONFIG_PCIEPORTBUS
+ /*
+ * On PowerNV PCIe devices are (currently) managed in cooperation
+ * with firmware. This isn't *strictly* required, but there's enough
+ * assumptions baked into both firmware and the platform code that
+ * it's unwise to allow the portbus services to be used.
+ *
+ * We need to fix this eventually, but for now set this flag to disable
+ * the portbus driver. The AER service isn't required since that AER
+ * events are handled via EEH. The pciehp hotplug driver can't work
+ * without kernel changes (and portbus binding breaks pnv_php). The
+ * other services also require some thinking about how we're going
+ * to integrate them.
+ */
+ pcie_ports_disabled = true;
+#endif
+
/* Look for IODA IO-Hubs. */
for_each_compatible_node(np, NULL, "ibm,ioda-hub") {
pnv_pci_init_ioda_hub(np);
diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig
index 9e35cddddf73..595e9f8a6539 100644
--- a/arch/powerpc/platforms/pseries/Kconfig
+++ b/arch/powerpc/platforms/pseries/Kconfig
@@ -108,6 +108,7 @@ config PPC_SMLPAR
config CMM
tristate "Collaborative memory management"
depends on PPC_SMLPAR
+ select MEMORY_BALLOON
default y
help
Select this option, if you want to enable the kernel interface
diff --git a/arch/powerpc/platforms/pseries/cmm.c b/arch/powerpc/platforms/pseries/cmm.c
index b33251d75927..91571841df8a 100644
--- a/arch/powerpc/platforms/pseries/cmm.c
+++ b/arch/powerpc/platforms/pseries/cmm.c
@@ -19,6 +19,10 @@
#include <linux/stringify.h>
#include <linux/swap.h>
#include <linux/device.h>
+#include <linux/mount.h>
+#include <linux/pseudo_fs.h>
+#include <linux/magic.h>
+#include <linux/balloon_compaction.h>
#include <asm/firmware.h>
#include <asm/hvcall.h>
#include <asm/mmu.h>
@@ -38,12 +42,8 @@
#define CMM_MIN_MEM_MB 256
#define KB2PAGES(_p) ((_p)>>(PAGE_SHIFT-10))
#define PAGES2KB(_p) ((_p)<<(PAGE_SHIFT-10))
-/*
- * The priority level tries to ensure that this notifier is called as
- * late as possible to reduce thrashing in the shared memory pool.
- */
+
#define CMM_MEM_HOTPLUG_PRI 1
-#define CMM_MEM_ISOLATE_PRI 15
static unsigned int delay = CMM_DEFAULT_DELAY;
static unsigned int hotplug_delay = CMM_HOTPLUG_DELAY;
@@ -51,6 +51,8 @@ static unsigned int oom_kb = CMM_OOM_KB;
static unsigned int cmm_debug = CMM_DEBUG;
static unsigned int cmm_disabled = CMM_DISABLE;
static unsigned long min_mem_mb = CMM_MIN_MEM_MB;
+static bool __read_mostly simulate;
+static unsigned long simulate_loan_target_kb;
static struct device cmm_dev;
MODULE_AUTHOR("Brian King <brking@linux.vnet.ibm.com>");
@@ -74,35 +76,31 @@ MODULE_PARM_DESC(min_mem_mb, "Minimum amount of memory (in MB) to not balloon. "
module_param_named(debug, cmm_debug, uint, 0644);
MODULE_PARM_DESC(debug, "Enable module debugging logging. Set to 1 to enable. "
"[Default=" __stringify(CMM_DEBUG) "]");
-
-#define CMM_NR_PAGES ((PAGE_SIZE - sizeof(void *) - sizeof(unsigned long)) / sizeof(unsigned long))
+module_param_named(simulate, simulate, bool, 0444);
+MODULE_PARM_DESC(simulate, "Enable simulation mode (no communication with hw).");
#define cmm_dbg(...) if (cmm_debug) { printk(KERN_INFO "cmm: "__VA_ARGS__); }
-struct cmm_page_array {
- struct cmm_page_array *next;
- unsigned long index;
- unsigned long page[CMM_NR_PAGES];
-};
-
-static unsigned long loaned_pages;
+static atomic_long_t loaned_pages;
static unsigned long loaned_pages_target;
static unsigned long oom_freed_pages;
-static struct cmm_page_array *cmm_page_list;
-static DEFINE_SPINLOCK(cmm_lock);
-
static DEFINE_MUTEX(hotplug_mutex);
static int hotplug_occurred; /* protected by the hotplug mutex */
static struct task_struct *cmm_thread_ptr;
+static struct balloon_dev_info b_dev_info;
-static long plpar_page_set_loaned(unsigned long vpa)
+static long plpar_page_set_loaned(struct page *page)
{
+ const unsigned long vpa = page_to_phys(page);
unsigned long cmo_page_sz = cmo_get_page_size();
long rc = 0;
int i;
+ if (unlikely(simulate))
+ return 0;
+
for (i = 0; !rc && i < PAGE_SIZE; i += cmo_page_sz)
rc = plpar_hcall_norets(H_PAGE_INIT, H_PAGE_SET_LOANED, vpa + i, 0);
@@ -113,12 +111,16 @@ static long plpar_page_set_loaned(unsigned long vpa)
return rc;
}
-static long plpar_page_set_active(unsigned long vpa)
+static long plpar_page_set_active(struct page *page)
{
+ const unsigned long vpa = page_to_phys(page);
unsigned long cmo_page_sz = cmo_get_page_size();
long rc = 0;
int i;
+ if (unlikely(simulate))
+ return 0;
+
for (i = 0; !rc && i < PAGE_SIZE; i += cmo_page_sz)
rc = plpar_hcall_norets(H_PAGE_INIT, H_PAGE_SET_ACTIVE, vpa + i, 0);
@@ -138,8 +140,7 @@ static long plpar_page_set_active(unsigned long vpa)
**/
static long cmm_alloc_pages(long nr)
{
- struct cmm_page_array *pa, *npa;
- unsigned long addr;
+ struct page *page;
long rc;
cmm_dbg("Begin request for %ld pages\n", nr);
@@ -156,46 +157,19 @@ static long cmm_alloc_pages(long nr)
break;
}
- addr = __get_free_page(GFP_NOIO | __GFP_NOWARN |
- __GFP_NORETRY | __GFP_NOMEMALLOC);
- if (!addr)
+ page = balloon_page_alloc();
+ if (!page)
break;
- spin_lock(&cmm_lock);
- pa = cmm_page_list;
- if (!pa || pa->index >= CMM_NR_PAGES) {
- /* Need a new page for the page list. */
- spin_unlock(&cmm_lock);
- npa = (struct cmm_page_array *)__get_free_page(
- GFP_NOIO | __GFP_NOWARN |
- __GFP_NORETRY | __GFP_NOMEMALLOC);
- if (!npa) {
- pr_info("%s: Can not allocate new page list\n", __func__);
- free_page(addr);
- break;
- }
- spin_lock(&cmm_lock);
- pa = cmm_page_list;
-
- if (!pa || pa->index >= CMM_NR_PAGES) {
- npa->next = pa;
- npa->index = 0;
- pa = npa;
- cmm_page_list = pa;
- } else
- free_page((unsigned long) npa);
- }
-
- if ((rc = plpar_page_set_loaned(__pa(addr)))) {
+ rc = plpar_page_set_loaned(page);
+ if (rc) {
pr_err("%s: Can not set page to loaned. rc=%ld\n", __func__, rc);
- spin_unlock(&cmm_lock);
- free_page(addr);
+ __free_page(page);
break;
}
- pa->page[pa->index++] = addr;
- loaned_pages++;
- totalram_pages_dec();
- spin_unlock(&cmm_lock);
+ balloon_page_enqueue(&b_dev_info, page);
+ atomic_long_inc(&loaned_pages);
+ adjust_managed_page_count(page, -1);
nr--;
}
@@ -212,30 +186,19 @@ static long cmm_alloc_pages(long nr)
**/
static long cmm_free_pages(long nr)
{
- struct cmm_page_array *pa;
- unsigned long addr;
+ struct page *page;
cmm_dbg("Begin free of %ld pages.\n", nr);
- spin_lock(&cmm_lock);
- pa = cmm_page_list;
while (nr) {
- if (!pa || pa->index <= 0)
+ page = balloon_page_dequeue(&b_dev_info);
+ if (!page)
break;
- addr = pa->page[--pa->index];
-
- if (pa->index == 0) {
- pa = pa->next;
- free_page((unsigned long) cmm_page_list);
- cmm_page_list = pa;
- }
-
- plpar_page_set_active(__pa(addr));
- free_page(addr);
- loaned_pages--;
+ plpar_page_set_active(page);
+ adjust_managed_page_count(page, 1);
+ __free_page(page);
+ atomic_long_dec(&loaned_pages);
nr--;
- totalram_pages_inc();
}
- spin_unlock(&cmm_lock);
cmm_dbg("End request with %ld pages unfulfilled\n", nr);
return nr;
}
@@ -257,7 +220,7 @@ static int cmm_oom_notify(struct notifier_block *self,
cmm_dbg("OOM processing started\n");
nr = cmm_free_pages(nr);
- loaned_pages_target = loaned_pages;
+ loaned_pages_target = atomic_long_read(&loaned_pages);
*freed += KB2PAGES(oom_kb) - nr;
oom_freed_pages += KB2PAGES(oom_kb) - nr;
cmm_dbg("OOM processing complete\n");
@@ -274,19 +237,24 @@ static int cmm_oom_notify(struct notifier_block *self,
**/
static void cmm_get_mpp(void)
{
+ const long __loaned_pages = atomic_long_read(&loaned_pages);
+ const long total_pages = totalram_pages() + __loaned_pages;
int rc;
struct hvcall_mpp_data mpp_data;
signed long active_pages_target, page_loan_request, target;
- signed long total_pages = totalram_pages() + loaned_pages;
signed long min_mem_pages = (min_mem_mb * 1024 * 1024) / PAGE_SIZE;
- rc = h_get_mpp(&mpp_data);
-
- if (rc != H_SUCCESS)
- return;
-
- page_loan_request = div_s64((s64)mpp_data.loan_request, PAGE_SIZE);
- target = page_loan_request + (signed long)loaned_pages;
+ if (likely(!simulate)) {
+ rc = h_get_mpp(&mpp_data);
+ if (rc != H_SUCCESS)
+ return;
+ page_loan_request = div_s64((s64)mpp_data.loan_request,
+ PAGE_SIZE);
+ target = page_loan_request + __loaned_pages;
+ } else {
+ target = KB2PAGES(simulate_loan_target_kb);
+ page_loan_request = target - __loaned_pages;
+ }
if (target < 0 || total_pages < min_mem_pages)
target = 0;
@@ -307,7 +275,7 @@ static void cmm_get_mpp(void)
loaned_pages_target = target;
cmm_dbg("delta = %ld, loaned = %lu, target = %lu, oom = %lu, totalram = %lu\n",
- page_loan_request, loaned_pages, loaned_pages_target,
+ page_loan_request, __loaned_pages, loaned_pages_target,
oom_freed_pages, totalram_pages());
}
@@ -325,6 +293,7 @@ static struct notifier_block cmm_oom_nb = {
static int cmm_thread(void *dummy)
{
unsigned long timeleft;
+ long __loaned_pages;
while (1) {
timeleft = msleep_interruptible(delay * 1000);
@@ -355,11 +324,12 @@ static int cmm_thread(void *dummy)
cmm_get_mpp();
- if (loaned_pages_target > loaned_pages) {
- if (cmm_alloc_pages(loaned_pages_target - loaned_pages))
- loaned_pages_target = loaned_pages;
- } else if (loaned_pages_target < loaned_pages)
- cmm_free_pages(loaned_pages - loaned_pages_target);
+ __loaned_pages = atomic_long_read(&loaned_pages);
+ if (loaned_pages_target > __loaned_pages) {
+ if (cmm_alloc_pages(loaned_pages_target - __loaned_pages))
+ loaned_pages_target = __loaned_pages;
+ } else if (loaned_pages_target < __loaned_pages)
+ cmm_free_pages(__loaned_pages - loaned_pages_target);
}
return 0;
}
@@ -373,7 +343,7 @@ static int cmm_thread(void *dummy)
} \
static DEVICE_ATTR(name, 0444, show_##name, NULL)
-CMM_SHOW(loaned_kb, "%lu\n", PAGES2KB(loaned_pages));
+CMM_SHOW(loaned_kb, "%lu\n", PAGES2KB(atomic_long_read(&loaned_pages)));
CMM_SHOW(loaned_target_kb, "%lu\n", PAGES2KB(loaned_pages_target));
static ssize_t show_oom_pages(struct device *dev,
@@ -406,11 +376,18 @@ static struct device_attribute *cmm_attrs[] = {
&dev_attr_oom_freed_kb,
};
+static DEVICE_ULONG_ATTR(simulate_loan_target_kb, 0644,
+ simulate_loan_target_kb);
+
static struct bus_type cmm_subsys = {
.name = "cmm",
.dev_name = "cmm",
};
+static void cmm_release_device(struct device *dev)
+{
+}
+
/**
* cmm_sysfs_register - Register with sysfs
*
@@ -426,6 +403,7 @@ static int cmm_sysfs_register(struct device *dev)
dev->id = 0;
dev->bus = &cmm_subsys;
+ dev->release = cmm_release_device;
if ((rc = device_register(dev)))
goto subsys_unregister;
@@ -435,6 +413,11 @@ static int cmm_sysfs_register(struct device *dev)
goto fail;
}
+ if (!simulate)
+ return 0;
+ rc = device_create_file(dev, &dev_attr_simulate_loan_target_kb.attr);
+ if (rc)
+ goto fail;
return 0;
fail:
@@ -471,7 +454,7 @@ static int cmm_reboot_notifier(struct notifier_block *nb,
if (cmm_thread_ptr)
kthread_stop(cmm_thread_ptr);
cmm_thread_ptr = NULL;
- cmm_free_pages(loaned_pages);
+ cmm_free_pages(atomic_long_read(&loaned_pages));
}
return NOTIFY_DONE;
}
@@ -481,142 +464,6 @@ static struct notifier_block cmm_reboot_nb = {
};
/**
- * cmm_count_pages - Count the number of pages loaned in a particular range.
- *
- * @arg: memory_isolate_notify structure with address range and count
- *
- * Return value:
- * 0 on success
- **/
-static unsigned long cmm_count_pages(void *arg)
-{
- struct memory_isolate_notify *marg = arg;
- struct cmm_page_array *pa;
- unsigned long start = (unsigned long)pfn_to_kaddr(marg->start_pfn);
- unsigned long end = start + (marg->nr_pages << PAGE_SHIFT);
- unsigned long idx;
-
- spin_lock(&cmm_lock);
- pa = cmm_page_list;
- while (pa) {
- if ((unsigned long)pa >= start && (unsigned long)pa < end)
- marg->pages_found++;
- for (idx = 0; idx < pa->index; idx++)
- if (pa->page[idx] >= start && pa->page[idx] < end)
- marg->pages_found++;
- pa = pa->next;
- }
- spin_unlock(&cmm_lock);
- return 0;
-}
-
-/**
- * cmm_memory_isolate_cb - Handle memory isolation notifier calls
- * @self: notifier block struct
- * @action: action to take
- * @arg: struct memory_isolate_notify data for handler
- *
- * Return value:
- * NOTIFY_OK or notifier error based on subfunction return value
- **/
-static int cmm_memory_isolate_cb(struct notifier_block *self,
- unsigned long action, void *arg)
-{
- int ret = 0;
-
- if (action == MEM_ISOLATE_COUNT)
- ret = cmm_count_pages(arg);
-
- return notifier_from_errno(ret);
-}
-
-static struct notifier_block cmm_mem_isolate_nb = {
- .notifier_call = cmm_memory_isolate_cb,
- .priority = CMM_MEM_ISOLATE_PRI
-};
-
-/**
- * cmm_mem_going_offline - Unloan pages where memory is to be removed
- * @arg: memory_notify structure with page range to be offlined
- *
- * Return value:
- * 0 on success
- **/
-static int cmm_mem_going_offline(void *arg)
-{
- struct memory_notify *marg = arg;
- unsigned long start_page = (unsigned long)pfn_to_kaddr(marg->start_pfn);
- unsigned long end_page = start_page + (marg->nr_pages << PAGE_SHIFT);
- struct cmm_page_array *pa_curr, *pa_last, *npa;
- unsigned long idx;
- unsigned long freed = 0;
-
- cmm_dbg("Memory going offline, searching 0x%lx (%ld pages).\n",
- start_page, marg->nr_pages);
- spin_lock(&cmm_lock);
-
- /* Search the page list for pages in the range to be offlined */
- pa_last = pa_curr = cmm_page_list;
- while (pa_curr) {
- for (idx = (pa_curr->index - 1); (idx + 1) > 0; idx--) {
- if ((pa_curr->page[idx] < start_page) ||
- (pa_curr->page[idx] >= end_page))
- continue;
-
- plpar_page_set_active(__pa(pa_curr->page[idx]));
- free_page(pa_curr->page[idx]);
- freed++;
- loaned_pages--;
- totalram_pages_inc();
- pa_curr->page[idx] = pa_last->page[--pa_last->index];
- if (pa_last->index == 0) {
- if (pa_curr == pa_last)
- pa_curr = pa_last->next;
- pa_last = pa_last->next;
- free_page((unsigned long)cmm_page_list);
- cmm_page_list = pa_last;
- }
- }
- pa_curr = pa_curr->next;
- }
-
- /* Search for page list structures in the range to be offlined */
- pa_last = NULL;
- pa_curr = cmm_page_list;
- while (pa_curr) {
- if (((unsigned long)pa_curr >= start_page) &&
- ((unsigned long)pa_curr < end_page)) {
- npa = (struct cmm_page_array *)__get_free_page(
- GFP_NOIO | __GFP_NOWARN |
- __GFP_NORETRY | __GFP_NOMEMALLOC);
- if (!npa) {
- spin_unlock(&cmm_lock);
- cmm_dbg("Failed to allocate memory for list "
- "management. Memory hotplug "
- "failed.\n");
- return -ENOMEM;
- }
- memcpy(npa, pa_curr, PAGE_SIZE);
- if (pa_curr == cmm_page_list)
- cmm_page_list = npa;
- if (pa_last)
- pa_last->next = npa;
- free_page((unsigned long) pa_curr);
- freed++;
- pa_curr = npa;
- }
-
- pa_last = pa_curr;
- pa_curr = pa_curr->next;
- }
-
- spin_unlock(&cmm_lock);
- cmm_dbg("Released %ld pages in the search range.\n", freed);
-
- return 0;
-}
-
-/**
* cmm_memory_cb - Handle memory hotplug notifier calls
* @self: notifier block struct
* @action: action to take
@@ -635,7 +482,6 @@ static int cmm_memory_cb(struct notifier_block *self,
case MEM_GOING_OFFLINE:
mutex_lock(&hotplug_mutex);
hotplug_occurred = 1;
- ret = cmm_mem_going_offline(arg);
break;
case MEM_OFFLINE:
case MEM_CANCEL_OFFLINE:
@@ -656,6 +502,106 @@ static struct notifier_block cmm_mem_nb = {
.priority = CMM_MEM_HOTPLUG_PRI
};
+#ifdef CONFIG_BALLOON_COMPACTION
+static struct vfsmount *balloon_mnt;
+
+static int cmm_init_fs_context(struct fs_context *fc)
+{
+ return init_pseudo(fc, PPC_CMM_MAGIC) ? 0 : -ENOMEM;
+}
+
+static struct file_system_type balloon_fs = {
+ .name = "ppc-cmm",
+ .init_fs_context = cmm_init_fs_context,
+ .kill_sb = kill_anon_super,
+};
+
+static int cmm_migratepage(struct balloon_dev_info *b_dev_info,
+ struct page *newpage, struct page *page,
+ enum migrate_mode mode)
+{
+ unsigned long flags;
+
+ /*
+ * loan/"inflate" the newpage first.
+ *
+ * We might race against the cmm_thread who might discover after our
+ * loan request that another page is to be unloaned. However, once
+ * the cmm_thread runs again later, this error will automatically
+ * be corrected.
+ */
+ if (plpar_page_set_loaned(newpage)) {
+ /* Unlikely, but possible. Tell the caller not to retry now. */
+ pr_err_ratelimited("%s: Cannot set page to loaned.", __func__);
+ return -EBUSY;
+ }
+
+ /* balloon page list reference */
+ get_page(newpage);
+
+ spin_lock_irqsave(&b_dev_info->pages_lock, flags);
+ balloon_page_insert(b_dev_info, newpage);
+ balloon_page_delete(page);
+ b_dev_info->isolated_pages--;
+ spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
+
+ /*
+ * activate/"deflate" the old page. We ignore any errors just like the
+ * other callers.
+ */
+ plpar_page_set_active(page);
+
+ /* balloon page list reference */
+ put_page(page);
+
+ return MIGRATEPAGE_SUCCESS;
+}
+
+static int cmm_balloon_compaction_init(void)
+{
+ int rc;
+
+ balloon_devinfo_init(&b_dev_info);
+ b_dev_info.migratepage = cmm_migratepage;
+
+ balloon_mnt = kern_mount(&balloon_fs);
+ if (IS_ERR(balloon_mnt)) {
+ rc = PTR_ERR(balloon_mnt);
+ balloon_mnt = NULL;
+ return rc;
+ }
+
+ b_dev_info.inode = alloc_anon_inode(balloon_mnt->mnt_sb);
+ if (IS_ERR(b_dev_info.inode)) {
+ rc = PTR_ERR(b_dev_info.inode);
+ b_dev_info.inode = NULL;
+ kern_unmount(balloon_mnt);
+ balloon_mnt = NULL;
+ return rc;
+ }
+
+ b_dev_info.inode->i_mapping->a_ops = &balloon_aops;
+ return 0;
+}
+static void cmm_balloon_compaction_deinit(void)
+{
+ if (b_dev_info.inode)
+ iput(b_dev_info.inode);
+ b_dev_info.inode = NULL;
+ kern_unmount(balloon_mnt);
+ balloon_mnt = NULL;
+}
+#else /* CONFIG_BALLOON_COMPACTION */
+static int cmm_balloon_compaction_init(void)
+{
+ return 0;
+}
+
+static void cmm_balloon_compaction_deinit(void)
+{
+}
+#endif /* CONFIG_BALLOON_COMPACTION */
+
/**
* cmm_init - Module initialization
*
@@ -664,26 +610,31 @@ static struct notifier_block cmm_mem_nb = {
**/
static int cmm_init(void)
{
- int rc = -ENOMEM;
+ int rc;
- if (!firmware_has_feature(FW_FEATURE_CMO))
+ if (!firmware_has_feature(FW_FEATURE_CMO) && !simulate)
return -EOPNOTSUPP;
- if ((rc = register_oom_notifier(&cmm_oom_nb)) < 0)
+ rc = cmm_balloon_compaction_init();
+ if (rc)
return rc;
+ rc = register_oom_notifier(&cmm_oom_nb);
+ if (rc < 0)
+ goto out_balloon_compaction;
+
if ((rc = register_reboot_notifier(&cmm_reboot_nb)))
goto out_oom_notifier;
if ((rc = cmm_sysfs_register(&cmm_dev)))
goto out_reboot_notifier;
- if (register_memory_notifier(&cmm_mem_nb) ||
- register_memory_isolate_notifier(&cmm_mem_isolate_nb))
+ rc = register_memory_notifier(&cmm_mem_nb);
+ if (rc)
goto out_unregister_notifier;
if (cmm_disabled)
- return rc;
+ return 0;
cmm_thread_ptr = kthread_run(cmm_thread, NULL, "cmmthread");
if (IS_ERR(cmm_thread_ptr)) {
@@ -691,16 +642,16 @@ static int cmm_init(void)
goto out_unregister_notifier;
}
- return rc;
-
+ return 0;
out_unregister_notifier:
unregister_memory_notifier(&cmm_mem_nb);
- unregister_memory_isolate_notifier(&cmm_mem_isolate_nb);
cmm_unregister_sysfs(&cmm_dev);
out_reboot_notifier:
unregister_reboot_notifier(&cmm_reboot_nb);
out_oom_notifier:
unregister_oom_notifier(&cmm_oom_nb);
+out_balloon_compaction:
+ cmm_balloon_compaction_deinit();
return rc;
}
@@ -717,9 +668,9 @@ static void cmm_exit(void)
unregister_oom_notifier(&cmm_oom_nb);
unregister_reboot_notifier(&cmm_reboot_nb);
unregister_memory_notifier(&cmm_mem_nb);
- unregister_memory_isolate_notifier(&cmm_mem_isolate_nb);
- cmm_free_pages(loaned_pages);
+ cmm_free_pages(atomic_long_read(&loaned_pages));
cmm_unregister_sysfs(&cmm_dev);
+ cmm_balloon_compaction_deinit();
}
/**
@@ -739,7 +690,7 @@ static int cmm_set_disable(const char *val, const struct kernel_param *kp)
if (cmm_thread_ptr)
kthread_stop(cmm_thread_ptr);
cmm_thread_ptr = NULL;
- cmm_free_pages(loaned_pages);
+ cmm_free_pages(atomic_long_read(&loaned_pages));
} else if (!disable && cmm_disabled) {
cmm_thread_ptr = kthread_run(cmm_thread, NULL, "cmmthread");
if (IS_ERR(cmm_thread_ptr))
diff --git a/arch/powerpc/platforms/pseries/dtl.c b/arch/powerpc/platforms/pseries/dtl.c
index 2b87480f2837..eab8aa293743 100644
--- a/arch/powerpc/platforms/pseries/dtl.c
+++ b/arch/powerpc/platforms/pseries/dtl.c
@@ -19,7 +19,6 @@
struct dtl {
struct dtl_entry *buf;
- struct dentry *file;
int cpu;
int buf_entries;
u64 last_idx;
@@ -320,46 +319,28 @@ static const struct file_operations dtl_fops = {
static struct dentry *dtl_dir;
-static int dtl_setup_file(struct dtl *dtl)
+static void dtl_setup_file(struct dtl *dtl)
{
char name[10];
sprintf(name, "cpu-%d", dtl->cpu);
- dtl->file = debugfs_create_file(name, 0400, dtl_dir, dtl, &dtl_fops);
- if (!dtl->file)
- return -ENOMEM;
-
- return 0;
+ debugfs_create_file(name, 0400, dtl_dir, dtl, &dtl_fops);
}
static int dtl_init(void)
{
- struct dentry *event_mask_file, *buf_entries_file;
- int rc, i;
+ int i;
if (!firmware_has_feature(FW_FEATURE_SPLPAR))
return -ENODEV;
/* set up common debugfs structure */
- rc = -ENOMEM;
dtl_dir = debugfs_create_dir("dtl", powerpc_debugfs_root);
- if (!dtl_dir) {
- printk(KERN_WARNING "%s: can't create dtl root dir\n",
- __func__);
- goto err;
- }
- event_mask_file = debugfs_create_x8("dtl_event_mask", 0600,
- dtl_dir, &dtl_event_mask);
- buf_entries_file = debugfs_create_u32("dtl_buf_entries", 0400,
- dtl_dir, &dtl_buf_entries);
-
- if (!event_mask_file || !buf_entries_file) {
- printk(KERN_WARNING "%s: can't create dtl files\n", __func__);
- goto err_remove_dir;
- }
+ debugfs_create_x8("dtl_event_mask", 0600, dtl_dir, &dtl_event_mask);
+ debugfs_create_u32("dtl_buf_entries", 0400, dtl_dir, &dtl_buf_entries);
/* set up the per-cpu log structures */
for_each_possible_cpu(i) {
@@ -367,16 +348,9 @@ static int dtl_init(void)
spin_lock_init(&dtl->lock);
dtl->cpu = i;
- rc = dtl_setup_file(dtl);
- if (rc)
- goto err_remove_dir;
+ dtl_setup_file(dtl);
}
return 0;
-
-err_remove_dir:
- debugfs_remove_recursive(dtl_dir);
-err:
- return rc;
}
machine_arch_initcall(pseries, dtl_init);
diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c
index bbda646b63b5..3e8cbfe7a80f 100644
--- a/arch/powerpc/platforms/pseries/hotplug-cpu.c
+++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c
@@ -338,6 +338,62 @@ static void pseries_remove_processor(struct device_node *np)
cpu_maps_update_done();
}
+static int dlpar_offline_cpu(struct device_node *dn)
+{
+ int rc = 0;
+ unsigned int cpu;
+ int len, nthreads, i;
+ const __be32 *intserv;
+ u32 thread;
+
+ intserv = of_get_property(dn, "ibm,ppc-interrupt-server#s", &len);
+ if (!intserv)
+ return -EINVAL;
+
+ nthreads = len / sizeof(u32);
+
+ cpu_maps_update_begin();
+ for (i = 0; i < nthreads; i++) {
+ thread = be32_to_cpu(intserv[i]);
+ for_each_present_cpu(cpu) {
+ if (get_hard_smp_processor_id(cpu) != thread)
+ continue;
+
+ if (get_cpu_current_state(cpu) == CPU_STATE_OFFLINE)
+ break;
+
+ if (get_cpu_current_state(cpu) == CPU_STATE_ONLINE) {
+ set_preferred_offline_state(cpu,
+ CPU_STATE_OFFLINE);
+ cpu_maps_update_done();
+ timed_topology_update(1);
+ rc = device_offline(get_cpu_device(cpu));
+ if (rc)
+ goto out;
+ cpu_maps_update_begin();
+ break;
+ }
+
+ /*
+ * The cpu is in CPU_STATE_INACTIVE.
+ * Upgrade it's state to CPU_STATE_OFFLINE.
+ */
+ set_preferred_offline_state(cpu, CPU_STATE_OFFLINE);
+ WARN_ON(plpar_hcall_norets(H_PROD, thread) != H_SUCCESS);
+ __cpu_die(cpu);
+ break;
+ }
+ if (cpu == num_possible_cpus()) {
+ pr_warn("Could not find cpu to offline with physical id 0x%x\n",
+ thread);
+ }
+ }
+ cpu_maps_update_done();
+
+out:
+ return rc;
+}
+
static int dlpar_online_cpu(struct device_node *dn)
{
int rc = 0;
@@ -364,8 +420,10 @@ static int dlpar_online_cpu(struct device_node *dn)
timed_topology_update(1);
find_and_online_cpu_nid(cpu);
rc = device_online(get_cpu_device(cpu));
- if (rc)
+ if (rc) {
+ dlpar_offline_cpu(dn);
goto out;
+ }
cpu_maps_update_begin();
break;
@@ -407,17 +465,67 @@ static bool dlpar_cpu_exists(struct device_node *parent, u32 drc_index)
return found;
}
+static bool drc_info_valid_index(struct device_node *parent, u32 drc_index)
+{
+ struct property *info;
+ struct of_drc_info drc;
+ const __be32 *value;
+ u32 index;
+ int count, i, j;
+
+ info = of_find_property(parent, "ibm,drc-info", NULL);
+ if (!info)
+ return false;
+
+ value = of_prop_next_u32(info, NULL, &count);
+
+ /* First value of ibm,drc-info is number of drc-info records */
+ if (value)
+ value++;
+ else
+ return false;
+
+ for (i = 0; i < count; i++) {
+ if (of_read_drc_info_cell(&info, &value, &drc))
+ return false;
+
+ if (strncmp(drc.drc_type, "CPU", 3))
+ break;
+
+ if (drc_index > drc.last_drc_index)
+ continue;
+
+ index = drc.drc_index_start;
+ for (j = 0; j < drc.num_sequential_elems; j++) {
+ if (drc_index == index)
+ return true;
+
+ index += drc.sequential_inc;
+ }
+ }
+
+ return false;
+}
+
static bool valid_cpu_drc_index(struct device_node *parent, u32 drc_index)
{
bool found = false;
int rc, index;
- index = 0;
+ if (of_find_property(parent, "ibm,drc-info", NULL))
+ return drc_info_valid_index(parent, drc_index);
+
+ /* Note that the format of the ibm,drc-indexes array is
+ * the number of entries in the array followed by the array
+ * of drc values so we start looking at index = 1.
+ */
+ index = 1;
while (!found) {
u32 drc;
rc = of_property_read_u32_index(parent, "ibm,drc-indexes",
index++, &drc);
+
if (rc)
break;
@@ -505,63 +613,6 @@ static ssize_t dlpar_cpu_add(u32 drc_index)
return rc;
}
-static int dlpar_offline_cpu(struct device_node *dn)
-{
- int rc = 0;
- unsigned int cpu;
- int len, nthreads, i;
- const __be32 *intserv;
- u32 thread;
-
- intserv = of_get_property(dn, "ibm,ppc-interrupt-server#s", &len);
- if (!intserv)
- return -EINVAL;
-
- nthreads = len / sizeof(u32);
-
- cpu_maps_update_begin();
- for (i = 0; i < nthreads; i++) {
- thread = be32_to_cpu(intserv[i]);
- for_each_present_cpu(cpu) {
- if (get_hard_smp_processor_id(cpu) != thread)
- continue;
-
- if (get_cpu_current_state(cpu) == CPU_STATE_OFFLINE)
- break;
-
- if (get_cpu_current_state(cpu) == CPU_STATE_ONLINE) {
- set_preferred_offline_state(cpu,
- CPU_STATE_OFFLINE);
- cpu_maps_update_done();
- timed_topology_update(1);
- rc = device_offline(get_cpu_device(cpu));
- if (rc)
- goto out;
- cpu_maps_update_begin();
- break;
-
- }
-
- /*
- * The cpu is in CPU_STATE_INACTIVE.
- * Upgrade it's state to CPU_STATE_OFFLINE.
- */
- set_preferred_offline_state(cpu, CPU_STATE_OFFLINE);
- BUG_ON(plpar_hcall_norets(H_PROD, thread)
- != H_SUCCESS);
- __cpu_die(cpu);
- break;
- }
- if (cpu == num_possible_cpus())
- printk(KERN_WARNING "Could not find cpu to offline with physical id 0x%x\n", thread);
- }
- cpu_maps_update_done();
-
-out:
- return rc;
-
-}
-
static ssize_t dlpar_cpu_remove(struct device_node *dn, u32 drc_index)
{
int rc;
@@ -717,19 +768,52 @@ static int dlpar_cpu_remove_by_count(u32 cpus_to_remove)
return rc;
}
-static int find_dlpar_cpus_to_add(u32 *cpu_drcs, u32 cpus_to_add)
+static int find_drc_info_cpus_to_add(struct device_node *cpus,
+ struct property *info,
+ u32 *cpu_drcs, u32 cpus_to_add)
{
- struct device_node *parent;
+ struct of_drc_info drc;
+ const __be32 *value;
+ u32 count, drc_index;
int cpus_found = 0;
- int index, rc;
+ int i, j;
- parent = of_find_node_by_path("/cpus");
- if (!parent) {
- pr_warn("Could not find CPU root node in device tree\n");
- kfree(cpu_drcs);
+ if (!info)
return -1;
+
+ value = of_prop_next_u32(info, NULL, &count);
+ if (value)
+ value++;
+
+ for (i = 0; i < count; i++) {
+ of_read_drc_info_cell(&info, &value, &drc);
+ if (strncmp(drc.drc_type, "CPU", 3))
+ break;
+
+ drc_index = drc.drc_index_start;
+ for (j = 0; j < drc.num_sequential_elems; j++) {
+ if (dlpar_cpu_exists(cpus, drc_index))
+ continue;
+
+ cpu_drcs[cpus_found++] = drc_index;
+
+ if (cpus_found == cpus_to_add)
+ return cpus_found;
+
+ drc_index += drc.sequential_inc;
+ }
}
+ return cpus_found;
+}
+
+static int find_drc_index_cpus_to_add(struct device_node *cpus,
+ u32 *cpu_drcs, u32 cpus_to_add)
+{
+ int cpus_found = 0;
+ int index, rc;
+ u32 drc_index;
+
/* Search the ibm,drc-indexes array for possible CPU drcs to
* add. Note that the format of the ibm,drc-indexes array is
* the number of entries in the array followed by the array
@@ -737,25 +821,25 @@ static int find_dlpar_cpus_to_add(u32 *cpu_drcs, u32 cpus_to_add)
*/
index = 1;
while (cpus_found < cpus_to_add) {
- u32 drc;
+ rc = of_property_read_u32_index(cpus, "ibm,drc-indexes",
+ index++, &drc_index);
- rc = of_property_read_u32_index(parent, "ibm,drc-indexes",
- index++, &drc);
if (rc)
break;
- if (dlpar_cpu_exists(parent, drc))
+ if (dlpar_cpu_exists(cpus, drc_index))
continue;
- cpu_drcs[cpus_found++] = drc;
+ cpu_drcs[cpus_found++] = drc_index;
}
- of_node_put(parent);
return cpus_found;
}
static int dlpar_cpu_add_by_count(u32 cpus_to_add)
{
+ struct device_node *parent;
+ struct property *info;
u32 *cpu_drcs;
int cpus_added = 0;
int cpus_found;
@@ -767,7 +851,21 @@ static int dlpar_cpu_add_by_count(u32 cpus_to_add)
if (!cpu_drcs)
return -EINVAL;
- cpus_found = find_dlpar_cpus_to_add(cpu_drcs, cpus_to_add);
+ parent = of_find_node_by_path("/cpus");
+ if (!parent) {
+ pr_warn("Could not find CPU root node in device tree\n");
+ kfree(cpu_drcs);
+ return -1;
+ }
+
+ info = of_find_property(parent, "ibm,drc-info", NULL);
+ if (info)
+ cpus_found = find_drc_info_cpus_to_add(parent, info, cpu_drcs, cpus_to_add);
+ else
+ cpus_found = find_drc_index_cpus_to_add(parent, cpu_drcs, cpus_to_add);
+
+ of_node_put(parent);
+
if (cpus_found < cpus_to_add) {
pr_warn("Failed to find enough CPUs (%d of %d) to add\n",
cpus_found, cpus_to_add);
diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
index 8e700390f3d6..c126b94d1943 100644
--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
+++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
@@ -338,7 +338,7 @@ static int pseries_remove_mem_node(struct device_node *np)
static bool lmb_is_removable(struct drmem_lmb *lmb)
{
int i, scns_per_block;
- int rc = 1;
+ bool rc = true;
unsigned long pfn, block_sz;
u64 phys_addr;
@@ -363,11 +363,11 @@ static bool lmb_is_removable(struct drmem_lmb *lmb)
if (!pfn_present(pfn))
continue;
- rc &= is_mem_section_removable(pfn, PAGES_PER_SECTION);
+ rc = rc && is_mem_section_removable(pfn, PAGES_PER_SECTION);
phys_addr += MIN_MEMORY_BLOCK_SIZE;
}
- return rc ? true : false;
+ return rc;
}
static int dlpar_add_lmb(struct drmem_lmb *);
diff --git a/arch/powerpc/platforms/pseries/hvCall_inst.c b/arch/powerpc/platforms/pseries/hvCall_inst.c
index bcc1b67417a8..c40c62ec432e 100644
--- a/arch/powerpc/platforms/pseries/hvCall_inst.c
+++ b/arch/powerpc/platforms/pseries/hvCall_inst.c
@@ -129,7 +129,6 @@ static void probe_hcall_exit(void *ignored, unsigned long opcode, long retval,
static int __init hcall_inst_init(void)
{
struct dentry *hcall_root;
- struct dentry *hcall_file;
char cpu_name_buf[CPU_NAME_BUF_SIZE];
int cpu;
@@ -145,17 +144,12 @@ static int __init hcall_inst_init(void)
}
hcall_root = debugfs_create_dir(HCALL_ROOT_DIR, NULL);
- if (!hcall_root)
- return -ENOMEM;
for_each_possible_cpu(cpu) {
snprintf(cpu_name_buf, CPU_NAME_BUF_SIZE, "cpu%d", cpu);
- hcall_file = debugfs_create_file(cpu_name_buf, 0444,
- hcall_root,
- per_cpu(hcall_stats, cpu),
- &hcall_inst_seq_fops);
- if (!hcall_file)
- return -ENOMEM;
+ debugfs_create_file(cpu_name_buf, 0444, hcall_root,
+ per_cpu(hcall_stats, cpu),
+ &hcall_inst_seq_fops);
}
return 0;
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index f87a5c64e24d..60cb29ae4739 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -774,7 +774,7 @@ static long pSeries_lpar_hpte_remove(unsigned long hpte_group)
/* don't remove a bolted entry */
lpar_rc = plpar_pte_remove(H_ANDCOND, hpte_group + slot_offset,
- (0x1UL << 4), &dummy1, &dummy2);
+ HPTE_V_BOLTED, &dummy1, &dummy2);
if (lpar_rc == H_SUCCESS)
return i;
@@ -938,11 +938,19 @@ static long pSeries_lpar_hpte_find(unsigned long vpn, int psize, int ssize)
hash = hpt_hash(vpn, mmu_psize_defs[psize].shift, ssize);
want_v = hpte_encode_avpn(vpn, psize, ssize);
- /* Bolted entries are always in the primary group */
+ /*
+ * We try to keep bolted entries always in primary hash
+ * But in some case we can find them in secondary too.
+ */
hpte_group = (hash & htab_hash_mask) * HPTES_PER_GROUP;
slot = __pSeries_lpar_hpte_find(want_v, hpte_group);
- if (slot < 0)
- return -1;
+ if (slot < 0) {
+ /* Try in secondary */
+ hpte_group = (~hash & htab_hash_mask) * HPTES_PER_GROUP;
+ slot = __pSeries_lpar_hpte_find(want_v, hpte_group);
+ if (slot < 0)
+ return -1;
+ }
return hpte_group + slot;
}
@@ -1992,30 +2000,17 @@ static int __init vpa_debugfs_init(void)
{
char name[16];
long i;
- static struct dentry *vpa_dir;
+ struct dentry *vpa_dir;
if (!firmware_has_feature(FW_FEATURE_SPLPAR))
return 0;
vpa_dir = debugfs_create_dir("vpa", powerpc_debugfs_root);
- if (!vpa_dir) {
- pr_warn("%s: can't create vpa root dir\n", __func__);
- return -ENOMEM;
- }
/* set up the per-cpu vpa file*/
for_each_possible_cpu(i) {
- struct dentry *d;
-
sprintf(name, "cpu-%ld", i);
-
- d = debugfs_create_file(name, 0400, vpa_dir, (void *)i,
- &vpa_fops);
- if (!d) {
- pr_warn("%s: can't create per-cpu vpa file\n",
- __func__);
- return -ENOMEM;
- }
+ debugfs_create_file(name, 0400, vpa_dir, (void *)i, &vpa_fops);
}
return 0;
diff --git a/arch/powerpc/platforms/pseries/of_helpers.c b/arch/powerpc/platforms/pseries/of_helpers.c
index 6df192f38f80..66dfd8256712 100644
--- a/arch/powerpc/platforms/pseries/of_helpers.c
+++ b/arch/powerpc/platforms/pseries/of_helpers.c
@@ -45,14 +45,14 @@ struct device_node *pseries_of_derive_parent(const char *path)
int of_read_drc_info_cell(struct property **prop, const __be32 **curval,
struct of_drc_info *data)
{
- const char *p;
+ const char *p = (char *)(*curval);
const __be32 *p2;
if (!data)
return -EINVAL;
/* Get drc-type:encode-string */
- p = data->drc_type = (char*) (*curval);
+ data->drc_type = (char *)p;
p = of_prop_next_string(*prop, p);
if (!p)
return -EINVAL;
@@ -65,9 +65,7 @@ int of_read_drc_info_cell(struct property **prop, const __be32 **curval,
/* Get drc-index-start:encode-int */
p2 = (const __be32 *)p;
- p2 = of_prop_next_u32(*prop, p2, &data->drc_index_start);
- if (!p2)
- return -EINVAL;
+ data->drc_index_start = be32_to_cpu(*p2);
/* Get drc-name-suffix-start:encode-int */
p2 = of_prop_next_u32(*prop, p2, &data->drc_name_suffix_start);
diff --git a/arch/powerpc/platforms/pseries/papr_scm.c b/arch/powerpc/platforms/pseries/papr_scm.c
index 61883291defc..c2ef320ba1bf 100644
--- a/arch/powerpc/platforms/pseries/papr_scm.c
+++ b/arch/powerpc/platforms/pseries/papr_scm.c
@@ -152,7 +152,7 @@ static int papr_scm_meta_get(struct papr_scm_priv *p,
int len, read;
int64_t ret;
- if ((hdr->in_offset + hdr->in_length) >= p->metadata_size)
+ if ((hdr->in_offset + hdr->in_length) > p->metadata_size)
return -EINVAL;
for (len = hdr->in_length; len; len -= read) {
@@ -206,7 +206,7 @@ static int papr_scm_meta_set(struct papr_scm_priv *p,
__be64 data_be;
int64_t ret;
- if ((hdr->in_offset + hdr->in_length) >= p->metadata_size)
+ if ((hdr->in_offset + hdr->in_length) > p->metadata_size)
return -EINVAL;
for (len = hdr->in_length; len; len -= wrote) {
@@ -284,25 +284,6 @@ int papr_scm_ndctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
return 0;
}
-static const struct attribute_group *region_attr_groups[] = {
- &nd_region_attribute_group,
- &nd_device_attribute_group,
- &nd_mapping_attribute_group,
- &nd_numa_attribute_group,
- NULL,
-};
-
-static const struct attribute_group *bus_attr_groups[] = {
- &nvdimm_bus_attribute_group,
- NULL,
-};
-
-static const struct attribute_group *papr_scm_dimm_groups[] = {
- &nvdimm_attribute_group,
- &nd_device_attribute_group,
- NULL,
-};
-
static inline int papr_scm_node(int node)
{
int min_dist = INT_MAX, dist;
@@ -333,7 +314,6 @@ static int papr_scm_nvdimm_init(struct papr_scm_priv *p)
p->bus_desc.ndctl = papr_scm_ndctl;
p->bus_desc.module = THIS_MODULE;
p->bus_desc.of_node = p->pdev->dev.of_node;
- p->bus_desc.attr_groups = bus_attr_groups;
p->bus_desc.provider_name = kstrdup(p->pdev->name, GFP_KERNEL);
if (!p->bus_desc.provider_name)
@@ -348,8 +328,8 @@ static int papr_scm_nvdimm_init(struct papr_scm_priv *p)
dimm_flags = 0;
set_bit(NDD_ALIASING, &dimm_flags);
- p->nvdimm = nvdimm_create(p->bus, p, papr_scm_dimm_groups,
- dimm_flags, PAPR_SCM_DIMM_CMD_MASK, 0, NULL);
+ p->nvdimm = nvdimm_create(p->bus, p, NULL, dimm_flags,
+ PAPR_SCM_DIMM_CMD_MASK, 0, NULL);
if (!p->nvdimm) {
dev_err(dev, "Error creating DIMM object for %pOF\n", p->dn);
goto err;
@@ -366,7 +346,6 @@ static int papr_scm_nvdimm_init(struct papr_scm_priv *p)
mapping.size = p->blocks * p->block_size; // XXX: potential overflow?
memset(&ndr_desc, 0, sizeof(ndr_desc));
- ndr_desc.attr_groups = region_attr_groups;
target_nid = dev_to_node(&p->pdev->dev);
online_nid = papr_scm_node(target_nid);
ndr_desc.numa_node = online_nid;
@@ -513,7 +492,6 @@ static struct platform_driver papr_scm_driver = {
.remove = papr_scm_remove,
.driver = {
.name = "papr_scm",
- .owner = THIS_MODULE,
.of_match_table = papr_scm_match,
},
};
diff --git a/arch/powerpc/platforms/pseries/pci_dlpar.c b/arch/powerpc/platforms/pseries/pci_dlpar.c
index 561917fa54a8..361986e4354e 100644
--- a/arch/powerpc/platforms/pseries/pci_dlpar.c
+++ b/arch/powerpc/platforms/pseries/pci_dlpar.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* PCI Dynamic LPAR, PCI Hot Plug and PCI EEH recovery code
* for RPA-compliant PPC64 platform.
@@ -6,23 +7,6 @@
*
* Updates, 2005, John Rose <johnrose@austin.ibm.com>
* Updates, 2005, Linas Vepstas <linas@austin.ibm.com>
- *
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or (at
- * your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/pci.h>
diff --git a/arch/powerpc/platforms/pseries/pseries_energy.c b/arch/powerpc/platforms/pseries/pseries_energy.c
index a96874f9492f..09e98d301db0 100644
--- a/arch/powerpc/platforms/pseries/pseries_energy.c
+++ b/arch/powerpc/platforms/pseries/pseries_energy.c
@@ -36,6 +36,7 @@ static int sysfs_entries;
static u32 cpu_to_drc_index(int cpu)
{
struct device_node *dn = NULL;
+ struct property *info;
int thread_index;
int rc = 1;
u32 ret = 0;
@@ -47,20 +48,18 @@ static u32 cpu_to_drc_index(int cpu)
/* Convert logical cpu number to core number */
thread_index = cpu_core_index_of_thread(cpu);
- if (firmware_has_feature(FW_FEATURE_DRC_INFO)) {
- struct property *info = NULL;
+ info = of_find_property(dn, "ibm,drc-info", NULL);
+ if (info) {
struct of_drc_info drc;
int j;
u32 num_set_entries;
const __be32 *value;
- info = of_find_property(dn, "ibm,drc-info", NULL);
- if (info == NULL)
- goto err_of_node_put;
-
value = of_prop_next_u32(info, NULL, &num_set_entries);
if (!value)
goto err_of_node_put;
+ else
+ value++;
for (j = 0; j < num_set_entries; j++) {
@@ -110,6 +109,7 @@ err:
static int drc_index_to_cpu(u32 drc_index)
{
struct device_node *dn = NULL;
+ struct property *info;
const int *indexes;
int thread_index = 0, cpu = 0;
int rc = 1;
@@ -117,21 +117,18 @@ static int drc_index_to_cpu(u32 drc_index)
dn = of_find_node_by_path("/cpus");
if (dn == NULL)
goto err;
-
- if (firmware_has_feature(FW_FEATURE_DRC_INFO)) {
- struct property *info = NULL;
+ info = of_find_property(dn, "ibm,drc-info", NULL);
+ if (info) {
struct of_drc_info drc;
int j;
u32 num_set_entries;
const __be32 *value;
- info = of_find_property(dn, "ibm,drc-info", NULL);
- if (info == NULL)
- goto err_of_node_put;
-
value = of_prop_next_u32(info, NULL, &num_set_entries);
if (!value)
goto err_of_node_put;
+ else
+ value++;
for (j = 0; j < num_set_entries; j++) {
diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c
index 3acdcc3bb908..1d7f973c647b 100644
--- a/arch/powerpc/platforms/pseries/ras.c
+++ b/arch/powerpc/platforms/pseries/ras.c
@@ -255,7 +255,7 @@ static void rtas_parse_epow_errlog(struct rtas_error_log *log)
break;
case EPOW_SYSTEM_SHUTDOWN:
- handle_system_shutdown(epow_log->event_modifier);
+ handle_system_shutdown(modifier);
break;
case EPOW_SYSTEM_HALT:
diff --git a/arch/powerpc/sysdev/Makefile b/arch/powerpc/sysdev/Makefile
index 603b3c656d19..cb5a5bd2cef5 100644
--- a/arch/powerpc/sysdev/Makefile
+++ b/arch/powerpc/sysdev/Makefile
@@ -24,7 +24,6 @@ obj-$(CONFIG_FSL_CORENET_RCPM) += fsl_rcpm.o
obj-$(CONFIG_FSL_LBC) += fsl_lbc.o
obj-$(CONFIG_FSL_GTM) += fsl_gtm.o
obj-$(CONFIG_FSL_85XX_CACHE_SRAM) += fsl_85xx_l2ctlr.o fsl_85xx_cache_sram.o
-obj-$(CONFIG_SIMPLE_GPIO) += simple_gpio.o
obj-$(CONFIG_FSL_RIO) += fsl_rio.o fsl_rmu.o
obj-$(CONFIG_TSI108_BRIDGE) += tsi108_pci.o tsi108_dev.o
obj-$(CONFIG_RTC_DRV_CMOS) += rtc_cmos_setup.o
diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c
index ff0e2b156cb5..617a443d673d 100644
--- a/arch/powerpc/sysdev/fsl_pci.c
+++ b/arch/powerpc/sysdev/fsl_pci.c
@@ -115,8 +115,8 @@ static void pci_dma_dev_setup_swiotlb(struct pci_dev *pdev)
{
struct pci_controller *hose = pci_bus_to_host(pdev->bus);
- pdev->dev.bus_dma_mask =
- hose->dma_window_base_cur + hose->dma_window_size;
+ pdev->dev.bus_dma_limit =
+ hose->dma_window_base_cur + hose->dma_window_size - 1;
}
static void setup_swiotlb_ops(struct pci_controller *hose)
@@ -135,7 +135,7 @@ static void fsl_pci_dma_set_mask(struct device *dev, u64 dma_mask)
* mapping that allows addressing any RAM address from across PCI.
*/
if (dev_is_pci(dev) && dma_mask >= pci64_dma_offset * 2 - 1) {
- dev->bus_dma_mask = 0;
+ dev->bus_dma_limit = 0;
dev->archdata.dma_offset = pci64_dma_offset;
}
}
diff --git a/arch/powerpc/sysdev/simple_gpio.c b/arch/powerpc/sysdev/simple_gpio.c
deleted file mode 100644
index dc1740cd9e42..000000000000
--- a/arch/powerpc/sysdev/simple_gpio.c
+++ /dev/null
@@ -1,143 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Simple Memory-Mapped GPIOs
- *
- * Copyright (c) MontaVista Software, Inc. 2008.
- *
- * Author: Anton Vorontsov <avorontsov@ru.mvista.com>
- */
-
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/spinlock.h>
-#include <linux/types.h>
-#include <linux/ioport.h>
-#include <linux/io.h>
-#include <linux/of.h>
-#include <linux/of_gpio.h>
-#include <linux/gpio/driver.h>
-#include <linux/slab.h>
-#include <asm/prom.h>
-#include "simple_gpio.h"
-
-struct u8_gpio_chip {
- struct of_mm_gpio_chip mm_gc;
- spinlock_t lock;
-
- /* shadowed data register to clear/set bits safely */
- u8 data;
-};
-
-static u8 u8_pin2mask(unsigned int pin)
-{
- return 1 << (8 - 1 - pin);
-}
-
-static int u8_gpio_get(struct gpio_chip *gc, unsigned int gpio)
-{
- struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
-
- return !!(in_8(mm_gc->regs) & u8_pin2mask(gpio));
-}
-
-static void u8_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
-{
- struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
- struct u8_gpio_chip *u8_gc = gpiochip_get_data(gc);
- unsigned long flags;
-
- spin_lock_irqsave(&u8_gc->lock, flags);
-
- if (val)
- u8_gc->data |= u8_pin2mask(gpio);
- else
- u8_gc->data &= ~u8_pin2mask(gpio);
-
- out_8(mm_gc->regs, u8_gc->data);
-
- spin_unlock_irqrestore(&u8_gc->lock, flags);
-}
-
-static int u8_gpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
-{
- return 0;
-}
-
-static int u8_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
-{
- u8_gpio_set(gc, gpio, val);
- return 0;
-}
-
-static void u8_gpio_save_regs(struct of_mm_gpio_chip *mm_gc)
-{
- struct u8_gpio_chip *u8_gc =
- container_of(mm_gc, struct u8_gpio_chip, mm_gc);
-
- u8_gc->data = in_8(mm_gc->regs);
-}
-
-static int __init u8_simple_gpiochip_add(struct device_node *np)
-{
- int ret;
- struct u8_gpio_chip *u8_gc;
- struct of_mm_gpio_chip *mm_gc;
- struct gpio_chip *gc;
-
- u8_gc = kzalloc(sizeof(*u8_gc), GFP_KERNEL);
- if (!u8_gc)
- return -ENOMEM;
-
- spin_lock_init(&u8_gc->lock);
-
- mm_gc = &u8_gc->mm_gc;
- gc = &mm_gc->gc;
-
- mm_gc->save_regs = u8_gpio_save_regs;
- gc->ngpio = 8;
- gc->direction_input = u8_gpio_dir_in;
- gc->direction_output = u8_gpio_dir_out;
- gc->get = u8_gpio_get;
- gc->set = u8_gpio_set;
-
- ret = of_mm_gpiochip_add_data(np, mm_gc, u8_gc);
- if (ret)
- goto err;
- return 0;
-err:
- kfree(u8_gc);
- return ret;
-}
-
-void __init simple_gpiochip_init(const char *compatible)
-{
- struct device_node *np;
-
- for_each_compatible_node(np, NULL, compatible) {
- int ret;
- struct resource r;
-
- ret = of_address_to_resource(np, 0, &r);
- if (ret)
- goto err;
-
- switch (resource_size(&r)) {
- case 1:
- ret = u8_simple_gpiochip_add(np);
- if (ret)
- goto err;
- break;
- default:
- /*
- * Whenever you need support for GPIO bank width > 1,
- * please just turn u8_ code into huge macros, and
- * construct needed uX_ code with it.
- */
- ret = -ENOSYS;
- goto err;
- }
- continue;
-err:
- pr_err("%pOF: registration failed, status %d\n", np, ret);
- }
-}
diff --git a/arch/powerpc/sysdev/simple_gpio.h b/arch/powerpc/sysdev/simple_gpio.h
deleted file mode 100644
index f3f3a20d39e2..000000000000
--- a/arch/powerpc/sysdev/simple_gpio.h
+++ /dev/null
@@ -1,13 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __SYSDEV_SIMPLE_GPIO_H
-#define __SYSDEV_SIMPLE_GPIO_H
-
-#include <linux/errno.h>
-
-#ifdef CONFIG_SIMPLE_GPIO
-extern void simple_gpiochip_init(const char *compatible);
-#else
-static inline void simple_gpiochip_init(const char *compatible) {}
-#endif /* CONFIG_SIMPLE_GPIO */
-
-#endif /* __SYSDEV_SIMPLE_GPIO_H */
diff --git a/arch/powerpc/sysdev/xive/common.c b/arch/powerpc/sysdev/xive/common.c
index df832b09e3e9..f5fadbd2533a 100644
--- a/arch/powerpc/sysdev/xive/common.c
+++ b/arch/powerpc/sysdev/xive/common.c
@@ -1035,6 +1035,15 @@ static int xive_irq_alloc_data(unsigned int virq, irq_hw_number_t hw)
xd->target = XIVE_INVALID_TARGET;
irq_set_handler_data(virq, xd);
+ /*
+ * Turn OFF by default the interrupt being mapped. A side
+ * effect of this check is the mapping the ESB page of the
+ * interrupt in the Linux address space. This prevents page
+ * fault issues in the crash handler which masks all
+ * interrupts.
+ */
+ xive_esb_read(xd, XIVE_ESB_SET_PQ_01);
+
return 0;
}
diff --git a/arch/powerpc/tools/relocs_check.sh b/arch/powerpc/tools/relocs_check.sh
index 2b4e959caa36..7b9fe0a567cf 100755
--- a/arch/powerpc/tools/relocs_check.sh
+++ b/arch/powerpc/tools/relocs_check.sh
@@ -20,7 +20,7 @@ objdump="$1"
vmlinux="$2"
bad_relocs=$(
-"$objdump" -R "$vmlinux" |
+$objdump -R "$vmlinux" |
# Only look at relocation lines.
grep -E '\<R_' |
# These relocations are okay
diff --git a/arch/powerpc/tools/unrel_branch_check.sh b/arch/powerpc/tools/unrel_branch_check.sh
index 1e972df3107e..77114755dc6f 100755
--- a/arch/powerpc/tools/unrel_branch_check.sh
+++ b/arch/powerpc/tools/unrel_branch_check.sh
@@ -18,14 +18,14 @@ vmlinux="$2"
#__end_interrupts should be located within the first 64K
end_intr=0x$(
-"$objdump" -R "$vmlinux" -d --start-address=0xc000000000000000 \
+$objdump -R "$vmlinux" -d --start-address=0xc000000000000000 \
--stop-address=0xc000000000010000 |
grep '\<__end_interrupts>:' |
awk '{print $1}'
)
BRANCHES=$(
-"$objdump" -R "$vmlinux" -D --start-address=0xc000000000000000 \
+$objdump -R "$vmlinux" -D --start-address=0xc000000000000000 \
--stop-address=${end_intr} |
grep -e "^c[0-9a-f]*:[[:space:]]*\([0-9a-f][0-9a-f][[:space:]]\)\{4\}[[:space:]]*b" |
grep -v '\<__start_initialization_multiplatform>' |
diff --git a/arch/powerpc/xmon/Makefile b/arch/powerpc/xmon/Makefile
index f142570ad860..c3842dbeb1b7 100644
--- a/arch/powerpc/xmon/Makefile
+++ b/arch/powerpc/xmon/Makefile
@@ -1,8 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
# Makefile for xmon
-# Disable clang warning for using setjmp without setjmp.h header
-subdir-ccflags-y := $(call cc-disable-warning, builtin-requires-header)
+# Avoid clang warnings around longjmp/setjmp declarations
+subdir-ccflags-y := -ffreestanding
GCOV_PROFILE := n
KCOV_INSTRUMENT := n
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index d83364ebc5c5..a7056049709e 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -25,6 +25,7 @@
#include <linux/nmi.h>
#include <linux/ctype.h>
#include <linux/highmem.h>
+#include <linux/security.h>
#include <asm/debugfs.h>
#include <asm/ptrace.h>
@@ -187,6 +188,8 @@ static void dump_tlb_44x(void);
static void dump_tlb_book3e(void);
#endif
+static void clear_all_bpt(void);
+
#ifdef CONFIG_PPC64
#define REG "%.16lx"
#else
@@ -283,10 +286,38 @@ Commands:\n\
" U show uptime information\n"
" ? help\n"
" # n limit output to n lines per page (for dp, dpa, dl)\n"
-" zr reboot\n\
- zh halt\n"
+" zr reboot\n"
+" zh halt\n"
;
+#ifdef CONFIG_SECURITY
+static bool xmon_is_locked_down(void)
+{
+ static bool lockdown;
+
+ if (!lockdown) {
+ lockdown = !!security_locked_down(LOCKDOWN_XMON_RW);
+ if (lockdown) {
+ printf("xmon: Disabled due to kernel lockdown\n");
+ xmon_is_ro = true;
+ }
+ }
+
+ if (!xmon_is_ro) {
+ xmon_is_ro = !!security_locked_down(LOCKDOWN_XMON_WR);
+ if (xmon_is_ro)
+ printf("xmon: Read-only due to kernel lockdown\n");
+ }
+
+ return lockdown;
+}
+#else /* CONFIG_SECURITY */
+static inline bool xmon_is_locked_down(void)
+{
+ return false;
+}
+#endif
+
static struct pt_regs *xmon_regs;
static inline void sync(void)
@@ -438,7 +469,10 @@ static bool wait_for_other_cpus(int ncpus)
return false;
}
-#endif /* CONFIG_SMP */
+#else /* CONFIG_SMP */
+static inline void get_output_lock(void) {}
+static inline void release_output_lock(void) {}
+#endif
static inline int unrecoverable_excp(struct pt_regs *regs)
{
@@ -455,6 +489,7 @@ static int xmon_core(struct pt_regs *regs, int fromipi)
int cmd = 0;
struct bpt *bp;
long recurse_jmp[JMP_BUF_LEN];
+ bool locked_down;
unsigned long offset;
unsigned long flags;
#ifdef CONFIG_SMP
@@ -465,6 +500,8 @@ static int xmon_core(struct pt_regs *regs, int fromipi)
local_irq_save(flags);
hard_irq_disable();
+ locked_down = xmon_is_locked_down();
+
if (!fromipi) {
tracing_enabled = tracing_is_on();
tracing_off();
@@ -518,7 +555,8 @@ static int xmon_core(struct pt_regs *regs, int fromipi)
if (!fromipi) {
get_output_lock();
- excprint(regs);
+ if (!locked_down)
+ excprint(regs);
if (bp) {
printf("cpu 0x%x stopped at breakpoint 0x%tx (",
cpu, BP_NUM(bp));
@@ -570,10 +608,14 @@ static int xmon_core(struct pt_regs *regs, int fromipi)
}
remove_bpts();
disable_surveillance();
- /* for breakpoint or single step, print the current instr. */
- if (bp || TRAP(regs) == 0xd00)
- ppc_inst_dump(regs->nip, 1, 0);
- printf("enter ? for help\n");
+
+ if (!locked_down) {
+ /* for breakpoint or single step, print curr insn */
+ if (bp || TRAP(regs) == 0xd00)
+ ppc_inst_dump(regs->nip, 1, 0);
+ printf("enter ? for help\n");
+ }
+
mb();
xmon_gate = 1;
barrier();
@@ -597,8 +639,9 @@ static int xmon_core(struct pt_regs *regs, int fromipi)
spin_cpu_relax();
touch_nmi_watchdog();
} else {
- cmd = cmds(regs);
- if (cmd != 0) {
+ if (!locked_down)
+ cmd = cmds(regs);
+ if (locked_down || cmd != 0) {
/* exiting xmon */
insert_bpts();
xmon_gate = 0;
@@ -635,13 +678,16 @@ static int xmon_core(struct pt_regs *regs, int fromipi)
"can't continue\n");
remove_bpts();
disable_surveillance();
- /* for breakpoint or single step, print the current instr. */
- if (bp || TRAP(regs) == 0xd00)
- ppc_inst_dump(regs->nip, 1, 0);
- printf("enter ? for help\n");
+ if (!locked_down) {
+ /* for breakpoint or single step, print current insn */
+ if (bp || TRAP(regs) == 0xd00)
+ ppc_inst_dump(regs->nip, 1, 0);
+ printf("enter ? for help\n");
+ }
}
- cmd = cmds(regs);
+ if (!locked_down)
+ cmd = cmds(regs);
insert_bpts();
in_xmon = 0;
@@ -670,7 +716,10 @@ static int xmon_core(struct pt_regs *regs, int fromipi)
}
}
#endif
- insert_cpu_bpts();
+ if (locked_down)
+ clear_all_bpt();
+ else
+ insert_cpu_bpts();
touch_nmi_watchdog();
local_irq_restore(flags);
@@ -884,7 +933,7 @@ static void insert_cpu_bpts(void)
if (dabr.enabled) {
brk.address = dabr.address;
brk.type = (dabr.enabled & HW_BRK_TYPE_DABR) | HW_BRK_TYPE_PRIV_ALL;
- brk.len = 8;
+ brk.len = DABR_MAX_LEN;
__set_breakpoint(&brk);
}
@@ -1047,10 +1096,6 @@ cmds(struct pt_regs *excp)
set_lpp_cmd();
break;
case 'b':
- if (xmon_is_ro) {
- printf(xmon_ro_msg);
- break;
- }
bpt_cmds();
break;
case 'C':
@@ -1319,11 +1364,16 @@ bpt_cmds(void)
struct bpt *bp;
cmd = inchar();
+
switch (cmd) {
#ifndef CONFIG_PPC_8xx
static const char badaddr[] = "Only kernel addresses are permitted for breakpoints\n";
int mode;
case 'd': /* bd - hardware data breakpoint */
+ if (xmon_is_ro) {
+ printf(xmon_ro_msg);
+ break;
+ }
if (!ppc_breakpoint_available()) {
printf("Hardware data breakpoint not supported on this cpu\n");
break;
@@ -1351,6 +1401,10 @@ bpt_cmds(void)
break;
case 'i': /* bi - hardware instr breakpoint */
+ if (xmon_is_ro) {
+ printf(xmon_ro_msg);
+ break;
+ }
if (!cpu_has_feature(CPU_FTR_ARCH_207S)) {
printf("Hardware instruction breakpoint "
"not supported on this cpu\n");
@@ -1409,7 +1463,8 @@ bpt_cmds(void)
break;
}
termch = cmd;
- if (!scanhex(&a)) {
+
+ if (xmon_is_ro || !scanhex(&a)) {
/* print all breakpoints */
printf(" type address\n");
if (dabr.enabled) {
@@ -3762,6 +3817,11 @@ static void xmon_init(int enable)
#ifdef CONFIG_MAGIC_SYSRQ
static void sysrq_handle_xmon(int key)
{
+ if (xmon_is_locked_down()) {
+ clear_all_bpt();
+ xmon_init(0);
+ return;
+ }
/* ensure xmon is enabled */
xmon_init(1);
debugger(get_irq_regs());
@@ -3783,7 +3843,6 @@ static int __init setup_xmon_sysrq(void)
device_initcall(setup_xmon_sysrq);
#endif /* CONFIG_MAGIC_SYSRQ */
-#ifdef CONFIG_DEBUG_FS
static void clear_all_bpt(void)
{
int i;
@@ -3801,18 +3860,22 @@ static void clear_all_bpt(void)
iabr = NULL;
dabr.enabled = 0;
}
-
- printf("xmon: All breakpoints cleared\n");
}
+#ifdef CONFIG_DEBUG_FS
static int xmon_dbgfs_set(void *data, u64 val)
{
xmon_on = !!val;
xmon_init(xmon_on);
/* make sure all breakpoints removed when disabling */
- if (!xmon_on)
+ if (!xmon_on) {
clear_all_bpt();
+ get_output_lock();
+ printf("xmon: All breakpoints cleared\n");
+ release_output_lock();
+ }
+
return 0;
}
@@ -3838,7 +3901,11 @@ static int xmon_early __initdata;
static int __init early_parse_xmon(char *p)
{
- if (!p || strncmp(p, "early", 5) == 0) {
+ if (xmon_is_locked_down()) {
+ xmon_init(0);
+ xmon_early = 0;
+ xmon_on = 0;
+ } else if (!p || strncmp(p, "early", 5) == 0) {
/* just "xmon" is equivalent to "xmon=early" */
xmon_init(1);
xmon_early = 1;
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index 75a6c9117622..759ffb00267c 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -26,14 +26,16 @@ config RISCV
select GENERIC_IRQ_SHOW
select GENERIC_PCI_IOMAP
select GENERIC_SCHED_CLOCK
- select GENERIC_STRNCPY_FROM_USER
- select GENERIC_STRNLEN_USER
+ select GENERIC_STRNCPY_FROM_USER if MMU
+ select GENERIC_STRNLEN_USER if MMU
select GENERIC_SMP_IDLE_THREAD
select GENERIC_ATOMIC64 if !64BIT
+ select GENERIC_IOREMAP
select HAVE_ARCH_AUDITSYSCALL
+ select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ASM_MODVERSIONS
select HAVE_MEMBLOCK_NODE_MAP
- select HAVE_DMA_CONTIGUOUS
+ select HAVE_DMA_CONTIGUOUS if MMU
select HAVE_FUTEX_CMPXCHG if FUTEX
select HAVE_PERF_EVENTS
select HAVE_PERF_REGS
@@ -50,6 +52,7 @@ config RISCV
select PCI_DOMAINS_GENERIC if PCI
select PCI_MSI if PCI
select RISCV_TIMER
+ select UACCESS_MEMCPY if !MMU
select GENERIC_IRQ_MULTI_HANDLER
select GENERIC_ARCH_TOPOLOGY if SMP
select ARCH_HAS_PTE_SPECIAL
@@ -60,7 +63,7 @@ config RISCV
select ARCH_WANT_HUGE_PMD_SHARE if 64BIT
select SPARSEMEM_STATIC if 32BIT
select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU
- select HAVE_ARCH_MMAP_RND_BITS
+ select HAVE_ARCH_MMAP_RND_BITS if MMU
config ARCH_MMAP_RND_BITS_MIN
default 18 if 64BIT
@@ -72,8 +75,23 @@ config ARCH_MMAP_RND_BITS_MAX
default 24 if 64BIT # SV39 based
default 17
+# set if we run in machine mode, cleared if we run in supervisor mode
+config RISCV_M_MODE
+ bool
+ default !MMU
+
+# set if we are running in S-mode and can use SBI calls
+config RISCV_SBI
+ bool
+ depends on !RISCV_M_MODE
+ default y
+
config MMU
- def_bool y
+ bool "MMU-based Paged Memory Management Support"
+ default y
+ help
+ Select if you want MMU-based virtualised addressing space
+ support by paged memory management. If unsure, say 'Y'.
config ZONE_DMA32
bool
@@ -92,6 +110,7 @@ config PA_BITS
config PAGE_OFFSET
hex
default 0xC0000000 if 32BIT && MAXPHYSMEM_2GB
+ default 0x80000000 if 64BIT && !MMU
default 0xffffffff80000000 if 64BIT && MAXPHYSMEM_2GB
default 0xffffffe000000000 if 64BIT && MAXPHYSMEM_128GB
@@ -135,7 +154,7 @@ config GENERIC_HWEIGHT
def_bool y
config FIX_EARLYCON_MEM
- def_bool y
+ def_bool CONFIG_MMU
config PGTABLE_LEVELS
int
@@ -160,6 +179,7 @@ config ARCH_RV32I
select GENERIC_LIB_ASHRDI3
select GENERIC_LIB_LSHRDI3
select GENERIC_LIB_UCMPDI2
+ select MMU
config ARCH_RV64I
bool "RV64I"
@@ -168,9 +188,9 @@ config ARCH_RV64I
select HAVE_FUNCTION_TRACER
select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_FTRACE_MCOUNT_RECORD
- select HAVE_DYNAMIC_FTRACE
- select HAVE_DYNAMIC_FTRACE_WITH_REGS
- select SWIOTLB
+ select HAVE_DYNAMIC_FTRACE if MMU
+ select HAVE_DYNAMIC_FTRACE_WITH_REGS if HAVE_DYNAMIC_FTRACE
+ select SWIOTLB if MMU
endchoice
@@ -272,6 +292,19 @@ menu "Kernel features"
source "kernel/Kconfig.hz"
+config SECCOMP
+ bool "Enable seccomp to safely compute untrusted bytecode"
+ help
+ This kernel feature is useful for number crunching applications
+ that may need to compute untrusted bytecode during their
+ execution. By using pipes or other transports made available to
+ the process as file descriptors supporting the read/write
+ syscalls, it's possible to isolate those applications in
+ their own address space using seccomp. Once seccomp is
+ enabled via prctl(PR_SET_SECCOMP), it cannot be disabled
+ and the task is only allowed to execute a few safe syscalls
+ defined by each seccomp mode.
+
endmenu
menu "Boot options"
diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile
index f5e914210245..b9009a2fbaf5 100644
--- a/arch/riscv/Makefile
+++ b/arch/riscv/Makefile
@@ -83,13 +83,18 @@ PHONY += vdso_install
vdso_install:
$(Q)$(MAKE) $(build)=arch/riscv/kernel/vdso $@
-all: Image.gz
+ifeq ($(CONFIG_RISCV_M_MODE),y)
+KBUILD_IMAGE := $(boot)/loader
+else
+KBUILD_IMAGE := $(boot)/Image.gz
+endif
+BOOT_TARGETS := Image Image.gz loader
-Image: vmlinux
- $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
+all: $(notdir $(KBUILD_IMAGE))
-Image.%: Image
+$(BOOT_TARGETS): vmlinux
$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
+ @$(kecho) ' Kernel: $(boot)/$@ is ready'
zinstall install:
$(Q)$(MAKE) $(build)=$(boot) $@
diff --git a/arch/riscv/boot/Makefile b/arch/riscv/boot/Makefile
index 0990a9fdbe5d..a474f98ce4fa 100644
--- a/arch/riscv/boot/Makefile
+++ b/arch/riscv/boot/Makefile
@@ -16,7 +16,7 @@
OBJCOPYFLAGS_Image :=-O binary -R .note -R .note.gnu.build-id -R .comment -S
-targets := Image
+targets := Image loader
$(obj)/Image: vmlinux FORCE
$(call if_changed,objcopy)
@@ -24,6 +24,23 @@ $(obj)/Image: vmlinux FORCE
$(obj)/Image.gz: $(obj)/Image FORCE
$(call if_changed,gzip)
+loader.o: $(src)/loader.S $(obj)/Image
+
+$(obj)/loader: $(obj)/loader.o $(obj)/Image $(obj)/loader.lds FORCE
+ $(Q)$(LD) -T $(obj)/loader.lds -o $@ $(obj)/loader.o
+
+$(obj)/Image.bz2: $(obj)/Image FORCE
+ $(call if_changed,bzip2)
+
+$(obj)/Image.lz4: $(obj)/Image FORCE
+ $(call if_changed,lz4)
+
+$(obj)/Image.lzma: $(obj)/Image FORCE
+ $(call if_changed,lzma)
+
+$(obj)/Image.lzo: $(obj)/Image FORCE
+ $(call if_changed,lzo)
+
install:
$(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \
$(obj)/Image System.map "$(INSTALL_PATH)"
diff --git a/arch/riscv/boot/dts/sifive/fu540-c000.dtsi b/arch/riscv/boot/dts/sifive/fu540-c000.dtsi
index afa43c7ea369..70a1891e7cd0 100644
--- a/arch/riscv/boot/dts/sifive/fu540-c000.dtsi
+++ b/arch/riscv/boot/dts/sifive/fu540-c000.dtsi
@@ -162,6 +162,13 @@
clocks = <&prci PRCI_CLK_TLCLK>;
status = "disabled";
};
+ dma: dma@3000000 {
+ compatible = "sifive,fu540-c000-pdma";
+ reg = <0x0 0x3000000 0x0 0x8000>;
+ interrupt-parent = <&plic0>;
+ interrupts = <23 24 25 26 27 28 29 30>;
+ #dma-cells = <1>;
+ };
uart1: serial@10011000 {
compatible = "sifive,fu540-c000-uart", "sifive,uart0";
reg = <0x0 0x10011000 0x0 0x1000>;
diff --git a/arch/riscv/boot/loader.S b/arch/riscv/boot/loader.S
new file mode 100644
index 000000000000..dcf88cf44dc1
--- /dev/null
+++ b/arch/riscv/boot/loader.S
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+ .align 4
+ .section .payload, "ax", %progbits
+ .globl _start
+_start:
+ .incbin "arch/riscv/boot/Image"
+
diff --git a/arch/riscv/boot/loader.lds.S b/arch/riscv/boot/loader.lds.S
new file mode 100644
index 000000000000..47a5003c2e28
--- /dev/null
+++ b/arch/riscv/boot/loader.lds.S
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#include <asm/page.h>
+
+OUTPUT_ARCH(riscv)
+ENTRY(_start)
+
+SECTIONS
+{
+ . = PAGE_OFFSET;
+
+ .payload : {
+ *(.payload)
+ . = ALIGN(8);
+ }
+}
diff --git a/arch/riscv/configs/nommu_virt_defconfig b/arch/riscv/configs/nommu_virt_defconfig
new file mode 100644
index 000000000000..cf74e179bf90
--- /dev/null
+++ b/arch/riscv/configs/nommu_virt_defconfig
@@ -0,0 +1,78 @@
+# CONFIG_CPU_ISOLATION is not set
+CONFIG_LOG_BUF_SHIFT=16
+CONFIG_PRINTK_SAFE_LOG_BUF_SHIFT=12
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_RD_BZIP2 is not set
+# CONFIG_RD_LZMA is not set
+# CONFIG_RD_XZ is not set
+# CONFIG_RD_LZO is not set
+# CONFIG_RD_LZ4 is not set
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_EXPERT=y
+# CONFIG_SYSFS_SYSCALL is not set
+# CONFIG_FHANDLE is not set
+# CONFIG_BASE_FULL is not set
+# CONFIG_EPOLL is not set
+# CONFIG_SIGNALFD is not set
+# CONFIG_TIMERFD is not set
+# CONFIG_EVENTFD is not set
+# CONFIG_AIO is not set
+# CONFIG_IO_URING is not set
+# CONFIG_ADVISE_SYSCALLS is not set
+# CONFIG_MEMBARRIER is not set
+# CONFIG_KALLSYMS is not set
+# CONFIG_VM_EVENT_COUNTERS is not set
+# CONFIG_COMPAT_BRK is not set
+CONFIG_SLOB=y
+# CONFIG_SLAB_MERGE_DEFAULT is not set
+# CONFIG_MMU is not set
+CONFIG_MAXPHYSMEM_2GB=y
+CONFIG_SMP=y
+CONFIG_CMDLINE="root=/dev/vda rw earlycon=uart8250,mmio,0x10000000,115200n8 console=ttyS0"
+CONFIG_CMDLINE_FORCE=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_MSDOS_PARTITION is not set
+# CONFIG_EFI_PARTITION is not set
+# CONFIG_MQ_IOSCHED_DEADLINE is not set
+# CONFIG_MQ_IOSCHED_KYBER is not set
+CONFIG_BINFMT_FLAT=y
+# CONFIG_COREDUMP is not set
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+# CONFIG_FW_LOADER is not set
+# CONFIG_ALLOW_DEV_COREDUMP is not set
+CONFIG_VIRTIO_BLK=y
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO is not set
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_LDISC_AUTOLOAD is not set
+# CONFIG_DEVMEM is not set
+CONFIG_SERIAL_8250=y
+# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=1
+CONFIG_SERIAL_8250_RUNTIME_UARTS=1
+CONFIG_SERIAL_OF_PLATFORM=y
+# CONFIG_HW_RANDOM is not set
+# CONFIG_HWMON is not set
+# CONFIG_LCD_CLASS_DEVICE is not set
+# CONFIG_BACKLIGHT_CLASS_DEVICE is not set
+# CONFIG_VGA_CONSOLE is not set
+# CONFIG_HID is not set
+# CONFIG_USB_SUPPORT is not set
+CONFIG_VIRTIO_MMIO=y
+CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y
+CONFIG_SIFIVE_PLIC=y
+# CONFIG_VALIDATE_FS_PARSER is not set
+CONFIG_EXT2_FS=y
+# CONFIG_DNOTIFY is not set
+# CONFIG_INOTIFY_USER is not set
+# CONFIG_MISC_FILESYSTEMS is not set
+CONFIG_LSM="[]"
+CONFIG_PRINTK_TIME=y
+# CONFIG_SCHED_DEBUG is not set
+# CONFIG_RCU_TRACE is not set
+# CONFIG_FTRACE is not set
+# CONFIG_RUNTIME_TESTING_MENU is not set
diff --git a/arch/riscv/include/asm/Kbuild b/arch/riscv/include/asm/Kbuild
index 16970f246860..1efaeddf1e4b 100644
--- a/arch/riscv/include/asm/Kbuild
+++ b/arch/riscv/include/asm/Kbuild
@@ -22,7 +22,6 @@ generic-y += kvm_para.h
generic-y += local.h
generic-y += local64.h
generic-y += mm-arch-hooks.h
-generic-y += msi.h
generic-y += percpu.h
generic-y += preempt.h
generic-y += sections.h
diff --git a/arch/riscv/include/asm/asm-prototypes.h b/arch/riscv/include/asm/asm-prototypes.h
index c9fecd120d18..dd62b691c443 100644
--- a/arch/riscv/include/asm/asm-prototypes.h
+++ b/arch/riscv/include/asm/asm-prototypes.h
@@ -1,5 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_RISCV_PROTOTYPES_H
+#define _ASM_RISCV_PROTOTYPES_H
#include <linux/ftrace.h>
#include <asm-generic/asm-prototypes.h>
diff --git a/arch/riscv/include/asm/cache.h b/arch/riscv/include/asm/cache.h
index bfd523e8f0b2..9b58b104559e 100644
--- a/arch/riscv/include/asm/cache.h
+++ b/arch/riscv/include/asm/cache.h
@@ -11,4 +11,12 @@
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
+/*
+ * RISC-V requires the stack pointer to be 16-byte aligned, so ensure that
+ * the flat loader aligns it accordingly.
+ */
+#ifndef CONFIG_MMU
+#define ARCH_SLAB_MINALIGN 16
+#endif
+
#endif /* _ASM_RISCV_CACHE_H */
diff --git a/arch/riscv/include/asm/clint.h b/arch/riscv/include/asm/clint.h
new file mode 100644
index 000000000000..6eaa2eedd694
--- /dev/null
+++ b/arch/riscv/include/asm/clint.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_RISCV_CLINT_H
+#define _ASM_RISCV_CLINT_H 1
+
+#include <linux/io.h>
+#include <linux/smp.h>
+
+#ifdef CONFIG_RISCV_M_MODE
+extern u32 __iomem *clint_ipi_base;
+
+void clint_init_boot_cpu(void);
+
+static inline void clint_send_ipi_single(unsigned long hartid)
+{
+ writel(1, clint_ipi_base + hartid);
+}
+
+static inline void clint_send_ipi_mask(const struct cpumask *hartid_mask)
+{
+ int hartid;
+
+ for_each_cpu(hartid, hartid_mask)
+ clint_send_ipi_single(hartid);
+}
+
+static inline void clint_clear_ipi(unsigned long hartid)
+{
+ writel(0, clint_ipi_base + hartid);
+}
+#else /* CONFIG_RISCV_M_MODE */
+#define clint_init_boot_cpu() do { } while (0)
+
+/* stubs to for code is only reachable under IS_ENABLED(CONFIG_RISCV_M_MODE): */
+void clint_send_ipi_single(unsigned long hartid);
+void clint_send_ipi_mask(const struct cpumask *hartid_mask);
+void clint_clear_ipi(unsigned long hartid);
+#endif /* CONFIG_RISCV_M_MODE */
+
+#endif /* _ASM_RISCV_CLINT_H */
diff --git a/arch/riscv/include/asm/csr.h b/arch/riscv/include/asm/csr.h
index a18923fa23c8..0a62d2d68455 100644
--- a/arch/riscv/include/asm/csr.h
+++ b/arch/riscv/include/asm/csr.h
@@ -11,8 +11,11 @@
/* Status register flags */
#define SR_SIE _AC(0x00000002, UL) /* Supervisor Interrupt Enable */
+#define SR_MIE _AC(0x00000008, UL) /* Machine Interrupt Enable */
#define SR_SPIE _AC(0x00000020, UL) /* Previous Supervisor IE */
+#define SR_MPIE _AC(0x00000080, UL) /* Previous Machine IE */
#define SR_SPP _AC(0x00000100, UL) /* Previously Supervisor */
+#define SR_MPP _AC(0x00001800, UL) /* Previously Machine */
#define SR_SUM _AC(0x00040000, UL) /* Supervisor User Memory Access */
#define SR_FS _AC(0x00006000, UL) /* Floating-point Status */
@@ -44,9 +47,10 @@
#define SATP_MODE SATP_MODE_39
#endif
-/* SCAUSE */
-#define SCAUSE_IRQ_FLAG (_AC(1, UL) << (__riscv_xlen - 1))
+/* Exception cause high bit - is an interrupt if set */
+#define CAUSE_IRQ_FLAG (_AC(1, UL) << (__riscv_xlen - 1))
+/* Interrupt causes (minus the high bit) */
#define IRQ_U_SOFT 0
#define IRQ_S_SOFT 1
#define IRQ_M_SOFT 3
@@ -57,6 +61,7 @@
#define IRQ_S_EXT 9
#define IRQ_M_EXT 11
+/* Exception causes */
#define EXC_INST_MISALIGNED 0
#define EXC_INST_ACCESS 1
#define EXC_BREAKPOINT 3
@@ -67,14 +72,14 @@
#define EXC_LOAD_PAGE_FAULT 13
#define EXC_STORE_PAGE_FAULT 15
-/* SIE (Interrupt Enable) and SIP (Interrupt Pending) flags */
-#define SIE_SSIE (_AC(0x1, UL) << IRQ_S_SOFT)
-#define SIE_STIE (_AC(0x1, UL) << IRQ_S_TIMER)
-#define SIE_SEIE (_AC(0x1, UL) << IRQ_S_EXT)
-
+/* symbolic CSR names: */
#define CSR_CYCLE 0xc00
#define CSR_TIME 0xc01
#define CSR_INSTRET 0xc02
+#define CSR_CYCLEH 0xc80
+#define CSR_TIMEH 0xc81
+#define CSR_INSTRETH 0xc82
+
#define CSR_SSTATUS 0x100
#define CSR_SIE 0x104
#define CSR_STVEC 0x105
@@ -85,9 +90,58 @@
#define CSR_STVAL 0x143
#define CSR_SIP 0x144
#define CSR_SATP 0x180
-#define CSR_CYCLEH 0xc80
-#define CSR_TIMEH 0xc81
-#define CSR_INSTRETH 0xc82
+
+#define CSR_MSTATUS 0x300
+#define CSR_MISA 0x301
+#define CSR_MIE 0x304
+#define CSR_MTVEC 0x305
+#define CSR_MSCRATCH 0x340
+#define CSR_MEPC 0x341
+#define CSR_MCAUSE 0x342
+#define CSR_MTVAL 0x343
+#define CSR_MIP 0x344
+#define CSR_MHARTID 0xf14
+
+#ifdef CONFIG_RISCV_M_MODE
+# define CSR_STATUS CSR_MSTATUS
+# define CSR_IE CSR_MIE
+# define CSR_TVEC CSR_MTVEC
+# define CSR_SCRATCH CSR_MSCRATCH
+# define CSR_EPC CSR_MEPC
+# define CSR_CAUSE CSR_MCAUSE
+# define CSR_TVAL CSR_MTVAL
+# define CSR_IP CSR_MIP
+
+# define SR_IE SR_MIE
+# define SR_PIE SR_MPIE
+# define SR_PP SR_MPP
+
+# define IRQ_SOFT IRQ_M_SOFT
+# define IRQ_TIMER IRQ_M_TIMER
+# define IRQ_EXT IRQ_M_EXT
+#else /* CONFIG_RISCV_M_MODE */
+# define CSR_STATUS CSR_SSTATUS
+# define CSR_IE CSR_SIE
+# define CSR_TVEC CSR_STVEC
+# define CSR_SCRATCH CSR_SSCRATCH
+# define CSR_EPC CSR_SEPC
+# define CSR_CAUSE CSR_SCAUSE
+# define CSR_TVAL CSR_STVAL
+# define CSR_IP CSR_SIP
+
+# define SR_IE SR_SIE
+# define SR_PIE SR_SPIE
+# define SR_PP SR_SPP
+
+# define IRQ_SOFT IRQ_S_SOFT
+# define IRQ_TIMER IRQ_S_TIMER
+# define IRQ_EXT IRQ_S_EXT
+#endif /* CONFIG_RISCV_M_MODE */
+
+/* IE/IP (Supervisor/Machine Interrupt Enable/Pending) flags */
+#define IE_SIE (_AC(0x1, UL) << IRQ_SOFT)
+#define IE_TIE (_AC(0x1, UL) << IRQ_TIMER)
+#define IE_EIE (_AC(0x1, UL) << IRQ_EXT)
#ifndef __ASSEMBLY__
diff --git a/arch/riscv/include/asm/current.h b/arch/riscv/include/asm/current.h
index 44dcf7fc15ee..dd973efe5d7c 100644
--- a/arch/riscv/include/asm/current.h
+++ b/arch/riscv/include/asm/current.h
@@ -7,8 +7,8 @@
*/
-#ifndef __ASM_CURRENT_H
-#define __ASM_CURRENT_H
+#ifndef _ASM_RISCV_CURRENT_H
+#define _ASM_RISCV_CURRENT_H
#include <linux/bug.h>
#include <linux/compiler.h>
@@ -34,4 +34,4 @@ static __always_inline struct task_struct *get_current(void)
#endif /* __ASSEMBLY__ */
-#endif /* __ASM_CURRENT_H */
+#endif /* _ASM_RISCV_CURRENT_H */
diff --git a/arch/riscv/include/asm/elf.h b/arch/riscv/include/asm/elf.h
index ef04084bf0de..d83a4efd052b 100644
--- a/arch/riscv/include/asm/elf.h
+++ b/arch/riscv/include/asm/elf.h
@@ -56,16 +56,16 @@ extern unsigned long elf_hwcap;
*/
#define ELF_PLATFORM (NULL)
+#ifdef CONFIG_MMU
#define ARCH_DLINFO \
do { \
NEW_AUX_ENT(AT_SYSINFO_EHDR, \
(elf_addr_t)current->mm->context.vdso); \
} while (0)
-
-
#define ARCH_HAS_SETUP_ADDITIONAL_PAGES
struct linux_binprm;
extern int arch_setup_additional_pages(struct linux_binprm *bprm,
int uses_interp);
+#endif /* CONFIG_MMU */
#endif /* _ASM_RISCV_ELF_H */
diff --git a/arch/riscv/include/asm/fixmap.h b/arch/riscv/include/asm/fixmap.h
index 161f28d04a07..42d2c42f3cc9 100644
--- a/arch/riscv/include/asm/fixmap.h
+++ b/arch/riscv/include/asm/fixmap.h
@@ -11,6 +11,7 @@
#include <asm/page.h>
#include <asm/pgtable.h>
+#ifdef CONFIG_MMU
/*
* Here we define all the compile-time 'special' virtual addresses.
* The point is to have a constant address at compile time, but to
@@ -42,4 +43,5 @@ extern void __set_fixmap(enum fixed_addresses idx,
#include <asm-generic/fixmap.h>
+#endif /* CONFIG_MMU */
#endif /* _ASM_RISCV_FIXMAP_H */
diff --git a/arch/riscv/include/asm/ftrace.h b/arch/riscv/include/asm/ftrace.h
index c6dcc5291f97..ace8a6e2d11d 100644
--- a/arch/riscv/include/asm/ftrace.h
+++ b/arch/riscv/include/asm/ftrace.h
@@ -1,6 +1,9 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (C) 2017 Andes Technology Corporation */
+#ifndef _ASM_RISCV_FTRACE_H
+#define _ASM_RISCV_FTRACE_H
+
/*
* The graph frame test is not possible if CONFIG_FRAME_POINTER is not enabled.
* Check arch/riscv/kernel/mcount.S for detail.
@@ -64,3 +67,5 @@ do { \
*/
#define MCOUNT_INSN_SIZE 8
#endif
+
+#endif /* _ASM_RISCV_FTRACE_H */
diff --git a/arch/riscv/include/asm/futex.h b/arch/riscv/include/asm/futex.h
index 4ad6409c4647..fdfaf7f3df7c 100644
--- a/arch/riscv/include/asm/futex.h
+++ b/arch/riscv/include/asm/futex.h
@@ -4,14 +4,20 @@
* Copyright (c) 2018 Jim Wilson (jimw@sifive.com)
*/
-#ifndef _ASM_FUTEX_H
-#define _ASM_FUTEX_H
+#ifndef _ASM_RISCV_FUTEX_H
+#define _ASM_RISCV_FUTEX_H
#include <linux/futex.h>
#include <linux/uaccess.h>
#include <linux/errno.h>
#include <asm/asm.h>
+/* We don't even really need the extable code, but for now keep it simple */
+#ifndef CONFIG_MMU
+#define __enable_user_access() do { } while (0)
+#define __disable_user_access() do { } while (0)
+#endif
+
#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
{ \
uintptr_t tmp; \
@@ -112,4 +118,4 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
return ret;
}
-#endif /* _ASM_FUTEX_H */
+#endif /* _ASM_RISCV_FUTEX_H */
diff --git a/arch/riscv/include/asm/hwcap.h b/arch/riscv/include/asm/hwcap.h
index 7ecb7c6a57b1..1bb0cd04aec3 100644
--- a/arch/riscv/include/asm/hwcap.h
+++ b/arch/riscv/include/asm/hwcap.h
@@ -5,8 +5,8 @@
* Copyright (C) 2012 ARM Ltd.
* Copyright (C) 2017 SiFive
*/
-#ifndef __ASM_HWCAP_H
-#define __ASM_HWCAP_H
+#ifndef _ASM_RISCV_HWCAP_H
+#define _ASM_RISCV_HWCAP_H
#include <uapi/asm/hwcap.h>
@@ -23,4 +23,5 @@ enum {
extern unsigned long elf_hwcap;
#endif
-#endif
+
+#endif /* _ASM_RISCV_HWCAP_H */
diff --git a/arch/riscv/include/asm/image.h b/arch/riscv/include/asm/image.h
index 344db5244547..7b0f92ba0acc 100644
--- a/arch/riscv/include/asm/image.h
+++ b/arch/riscv/include/asm/image.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_IMAGE_H
-#define __ASM_IMAGE_H
+#ifndef _ASM_RISCV_IMAGE_H
+#define _ASM_RISCV_IMAGE_H
#define RISCV_IMAGE_MAGIC "RISCV\0\0\0"
#define RISCV_IMAGE_MAGIC2 "RSC\x05"
@@ -62,4 +62,4 @@ struct riscv_image_header {
u32 res4;
};
#endif /* __ASSEMBLY__ */
-#endif /* __ASM_IMAGE_H */
+#endif /* _ASM_RISCV_IMAGE_H */
diff --git a/arch/riscv/include/asm/io.h b/arch/riscv/include/asm/io.h
index 3ba4d93721d3..0f477206a4ed 100644
--- a/arch/riscv/include/asm/io.h
+++ b/arch/riscv/include/asm/io.h
@@ -15,158 +15,19 @@
#include <asm/mmiowb.h>
#include <asm/pgtable.h>
-extern void __iomem *ioremap(phys_addr_t offset, unsigned long size);
-
-/*
- * The RISC-V ISA doesn't yet specify how to query or modify PMAs, so we can't
- * change the properties of memory regions. This should be fixed by the
- * upcoming platform spec.
- */
-#define ioremap_nocache(addr, size) ioremap((addr), (size))
-#define ioremap_wc(addr, size) ioremap((addr), (size))
-#define ioremap_wt(addr, size) ioremap((addr), (size))
-
-extern void iounmap(volatile void __iomem *addr);
-
-/* Generic IO read/write. These perform native-endian accesses. */
-#define __raw_writeb __raw_writeb
-static inline void __raw_writeb(u8 val, volatile void __iomem *addr)
-{
- asm volatile("sb %0, 0(%1)" : : "r" (val), "r" (addr));
-}
-
-#define __raw_writew __raw_writew
-static inline void __raw_writew(u16 val, volatile void __iomem *addr)
-{
- asm volatile("sh %0, 0(%1)" : : "r" (val), "r" (addr));
-}
-
-#define __raw_writel __raw_writel
-static inline void __raw_writel(u32 val, volatile void __iomem *addr)
-{
- asm volatile("sw %0, 0(%1)" : : "r" (val), "r" (addr));
-}
-
-#ifdef CONFIG_64BIT
-#define __raw_writeq __raw_writeq
-static inline void __raw_writeq(u64 val, volatile void __iomem *addr)
-{
- asm volatile("sd %0, 0(%1)" : : "r" (val), "r" (addr));
-}
-#endif
-
-#define __raw_readb __raw_readb
-static inline u8 __raw_readb(const volatile void __iomem *addr)
-{
- u8 val;
-
- asm volatile("lb %0, 0(%1)" : "=r" (val) : "r" (addr));
- return val;
-}
-
-#define __raw_readw __raw_readw
-static inline u16 __raw_readw(const volatile void __iomem *addr)
-{
- u16 val;
-
- asm volatile("lh %0, 0(%1)" : "=r" (val) : "r" (addr));
- return val;
-}
-
-#define __raw_readl __raw_readl
-static inline u32 __raw_readl(const volatile void __iomem *addr)
-{
- u32 val;
-
- asm volatile("lw %0, 0(%1)" : "=r" (val) : "r" (addr));
- return val;
-}
-
-#ifdef CONFIG_64BIT
-#define __raw_readq __raw_readq
-static inline u64 __raw_readq(const volatile void __iomem *addr)
-{
- u64 val;
-
- asm volatile("ld %0, 0(%1)" : "=r" (val) : "r" (addr));
- return val;
-}
-#endif
-
/*
- * Unordered I/O memory access primitives. These are even more relaxed than
- * the relaxed versions, as they don't even order accesses between successive
- * operations to the I/O regions.
+ * MMIO access functions are separated out to break dependency cycles
+ * when using {read,write}* fns in low-level headers
*/
-#define readb_cpu(c) ({ u8 __r = __raw_readb(c); __r; })
-#define readw_cpu(c) ({ u16 __r = le16_to_cpu((__force __le16)__raw_readw(c)); __r; })
-#define readl_cpu(c) ({ u32 __r = le32_to_cpu((__force __le32)__raw_readl(c)); __r; })
-
-#define writeb_cpu(v,c) ((void)__raw_writeb((v),(c)))
-#define writew_cpu(v,c) ((void)__raw_writew((__force u16)cpu_to_le16(v),(c)))
-#define writel_cpu(v,c) ((void)__raw_writel((__force u32)cpu_to_le32(v),(c)))
-
-#ifdef CONFIG_64BIT
-#define readq_cpu(c) ({ u64 __r = le64_to_cpu((__force __le64)__raw_readq(c)); __r; })
-#define writeq_cpu(v,c) ((void)__raw_writeq((__force u64)cpu_to_le64(v),(c)))
-#endif
-
-/*
- * Relaxed I/O memory access primitives. These follow the Device memory
- * ordering rules but do not guarantee any ordering relative to Normal memory
- * accesses. These are defined to order the indicated access (either a read or
- * write) with all other I/O memory accesses. Since the platform specification
- * defines that all I/O regions are strongly ordered on channel 2, no explicit
- * fences are required to enforce this ordering.
- */
-/* FIXME: These are now the same as asm-generic */
-#define __io_rbr() do {} while (0)
-#define __io_rar() do {} while (0)
-#define __io_rbw() do {} while (0)
-#define __io_raw() do {} while (0)
-
-#define readb_relaxed(c) ({ u8 __v; __io_rbr(); __v = readb_cpu(c); __io_rar(); __v; })
-#define readw_relaxed(c) ({ u16 __v; __io_rbr(); __v = readw_cpu(c); __io_rar(); __v; })
-#define readl_relaxed(c) ({ u32 __v; __io_rbr(); __v = readl_cpu(c); __io_rar(); __v; })
-
-#define writeb_relaxed(v,c) ({ __io_rbw(); writeb_cpu((v),(c)); __io_raw(); })
-#define writew_relaxed(v,c) ({ __io_rbw(); writew_cpu((v),(c)); __io_raw(); })
-#define writel_relaxed(v,c) ({ __io_rbw(); writel_cpu((v),(c)); __io_raw(); })
-
-#ifdef CONFIG_64BIT
-#define readq_relaxed(c) ({ u64 __v; __io_rbr(); __v = readq_cpu(c); __io_rar(); __v; })
-#define writeq_relaxed(v,c) ({ __io_rbw(); writeq_cpu((v),(c)); __io_raw(); })
-#endif
-
-/*
- * I/O memory access primitives. Reads are ordered relative to any
- * following Normal memory access. Writes are ordered relative to any prior
- * Normal memory access. The memory barriers here are necessary as RISC-V
- * doesn't define any ordering between the memory space and the I/O space.
- */
-#define __io_br() do {} while (0)
-#define __io_ar(v) __asm__ __volatile__ ("fence i,r" : : : "memory");
-#define __io_bw() __asm__ __volatile__ ("fence w,o" : : : "memory");
-#define __io_aw() mmiowb_set_pending()
-
-#define readb(c) ({ u8 __v; __io_br(); __v = readb_cpu(c); __io_ar(__v); __v; })
-#define readw(c) ({ u16 __v; __io_br(); __v = readw_cpu(c); __io_ar(__v); __v; })
-#define readl(c) ({ u32 __v; __io_br(); __v = readl_cpu(c); __io_ar(__v); __v; })
-
-#define writeb(v,c) ({ __io_bw(); writeb_cpu((v),(c)); __io_aw(); })
-#define writew(v,c) ({ __io_bw(); writew_cpu((v),(c)); __io_aw(); })
-#define writel(v,c) ({ __io_bw(); writel_cpu((v),(c)); __io_aw(); })
-
-#ifdef CONFIG_64BIT
-#define readq(c) ({ u64 __v; __io_br(); __v = readq_cpu(c); __io_ar(__v); __v; })
-#define writeq(v,c) ({ __io_bw(); writeq_cpu((v),(c)); __io_aw(); })
-#endif
+#include <asm/mmio.h>
/*
* I/O port access constants.
*/
+#ifdef CONFIG_MMU
#define IO_SPACE_LIMIT (PCI_IO_SIZE - 1)
#define PCI_IOBASE ((void __iomem *)PCI_IO_START)
+#endif /* CONFIG_MMU */
/*
* Emulation routines for the port-mapped IO space used by some PCI drivers.
diff --git a/arch/riscv/include/asm/irqflags.h b/arch/riscv/include/asm/irqflags.h
index e70f647ce3b7..08d4d6a5b7e9 100644
--- a/arch/riscv/include/asm/irqflags.h
+++ b/arch/riscv/include/asm/irqflags.h
@@ -13,31 +13,31 @@
/* read interrupt enabled status */
static inline unsigned long arch_local_save_flags(void)
{
- return csr_read(CSR_SSTATUS);
+ return csr_read(CSR_STATUS);
}
/* unconditionally enable interrupts */
static inline void arch_local_irq_enable(void)
{
- csr_set(CSR_SSTATUS, SR_SIE);
+ csr_set(CSR_STATUS, SR_IE);
}
/* unconditionally disable interrupts */
static inline void arch_local_irq_disable(void)
{
- csr_clear(CSR_SSTATUS, SR_SIE);
+ csr_clear(CSR_STATUS, SR_IE);
}
/* get status and disable interrupts */
static inline unsigned long arch_local_irq_save(void)
{
- return csr_read_clear(CSR_SSTATUS, SR_SIE);
+ return csr_read_clear(CSR_STATUS, SR_IE);
}
/* test flags */
static inline int arch_irqs_disabled_flags(unsigned long flags)
{
- return !(flags & SR_SIE);
+ return !(flags & SR_IE);
}
/* test hardware interrupt enable bit */
@@ -49,7 +49,7 @@ static inline int arch_irqs_disabled(void)
/* set interrupt enabled status */
static inline void arch_local_irq_restore(unsigned long flags)
{
- csr_set(CSR_SSTATUS, flags & SR_SIE);
+ csr_set(CSR_STATUS, flags & SR_IE);
}
#endif /* _ASM_RISCV_IRQFLAGS_H */
diff --git a/arch/riscv/include/asm/kprobes.h b/arch/riscv/include/asm/kprobes.h
index 96e30ef637e8..56a98ea30731 100644
--- a/arch/riscv/include/asm/kprobes.h
+++ b/arch/riscv/include/asm/kprobes.h
@@ -6,9 +6,9 @@
* Copyright (C) 2017 SiFive
*/
-#ifndef _RISCV_KPROBES_H
-#define _RISCV_KPROBES_H
+#ifndef _ASM_RISCV_KPROBES_H
+#define _ASM_RISCV_KPROBES_H
#include <asm-generic/kprobes.h>
-#endif /* _RISCV_KPROBES_H */
+#endif /* _ASM_RISCV_KPROBES_H */
diff --git a/arch/riscv/include/asm/mmio.h b/arch/riscv/include/asm/mmio.h
new file mode 100644
index 000000000000..a2c809df2733
--- /dev/null
+++ b/arch/riscv/include/asm/mmio.h
@@ -0,0 +1,155 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * {read,write}{b,w,l,q} based on arch/arm64/include/asm/io.h
+ * which was based on arch/arm/include/io.h
+ *
+ * Copyright (C) 1996-2000 Russell King
+ * Copyright (C) 2012 ARM Ltd.
+ * Copyright (C) 2014 Regents of the University of California
+ */
+
+#ifndef _ASM_RISCV_MMIO_H
+#define _ASM_RISCV_MMIO_H
+
+#include <linux/types.h>
+#include <asm/mmiowb.h>
+
+#ifndef CONFIG_MMU
+#define pgprot_noncached(x) (x)
+#endif /* CONFIG_MMU */
+
+/* Generic IO read/write. These perform native-endian accesses. */
+#define __raw_writeb __raw_writeb
+static inline void __raw_writeb(u8 val, volatile void __iomem *addr)
+{
+ asm volatile("sb %0, 0(%1)" : : "r" (val), "r" (addr));
+}
+
+#define __raw_writew __raw_writew
+static inline void __raw_writew(u16 val, volatile void __iomem *addr)
+{
+ asm volatile("sh %0, 0(%1)" : : "r" (val), "r" (addr));
+}
+
+#define __raw_writel __raw_writel
+static inline void __raw_writel(u32 val, volatile void __iomem *addr)
+{
+ asm volatile("sw %0, 0(%1)" : : "r" (val), "r" (addr));
+}
+
+#ifdef CONFIG_64BIT
+#define __raw_writeq __raw_writeq
+static inline void __raw_writeq(u64 val, volatile void __iomem *addr)
+{
+ asm volatile("sd %0, 0(%1)" : : "r" (val), "r" (addr));
+}
+#endif
+
+#define __raw_readb __raw_readb
+static inline u8 __raw_readb(const volatile void __iomem *addr)
+{
+ u8 val;
+
+ asm volatile("lb %0, 0(%1)" : "=r" (val) : "r" (addr));
+ return val;
+}
+
+#define __raw_readw __raw_readw
+static inline u16 __raw_readw(const volatile void __iomem *addr)
+{
+ u16 val;
+
+ asm volatile("lh %0, 0(%1)" : "=r" (val) : "r" (addr));
+ return val;
+}
+
+#define __raw_readl __raw_readl
+static inline u32 __raw_readl(const volatile void __iomem *addr)
+{
+ u32 val;
+
+ asm volatile("lw %0, 0(%1)" : "=r" (val) : "r" (addr));
+ return val;
+}
+
+#ifdef CONFIG_64BIT
+#define __raw_readq __raw_readq
+static inline u64 __raw_readq(const volatile void __iomem *addr)
+{
+ u64 val;
+
+ asm volatile("ld %0, 0(%1)" : "=r" (val) : "r" (addr));
+ return val;
+}
+#endif
+
+/*
+ * Unordered I/O memory access primitives. These are even more relaxed than
+ * the relaxed versions, as they don't even order accesses between successive
+ * operations to the I/O regions.
+ */
+#define readb_cpu(c) ({ u8 __r = __raw_readb(c); __r; })
+#define readw_cpu(c) ({ u16 __r = le16_to_cpu((__force __le16)__raw_readw(c)); __r; })
+#define readl_cpu(c) ({ u32 __r = le32_to_cpu((__force __le32)__raw_readl(c)); __r; })
+
+#define writeb_cpu(v, c) ((void)__raw_writeb((v), (c)))
+#define writew_cpu(v, c) ((void)__raw_writew((__force u16)cpu_to_le16(v), (c)))
+#define writel_cpu(v, c) ((void)__raw_writel((__force u32)cpu_to_le32(v), (c)))
+
+#ifdef CONFIG_64BIT
+#define readq_cpu(c) ({ u64 __r = le64_to_cpu((__force __le64)__raw_readq(c)); __r; })
+#define writeq_cpu(v, c) ((void)__raw_writeq((__force u64)cpu_to_le64(v), (c)))
+#endif
+
+/*
+ * Relaxed I/O memory access primitives. These follow the Device memory
+ * ordering rules but do not guarantee any ordering relative to Normal memory
+ * accesses. These are defined to order the indicated access (either a read or
+ * write) with all other I/O memory accesses. Since the platform specification
+ * defines that all I/O regions are strongly ordered on channel 2, no explicit
+ * fences are required to enforce this ordering.
+ */
+/* FIXME: These are now the same as asm-generic */
+#define __io_rbr() do {} while (0)
+#define __io_rar() do {} while (0)
+#define __io_rbw() do {} while (0)
+#define __io_raw() do {} while (0)
+
+#define readb_relaxed(c) ({ u8 __v; __io_rbr(); __v = readb_cpu(c); __io_rar(); __v; })
+#define readw_relaxed(c) ({ u16 __v; __io_rbr(); __v = readw_cpu(c); __io_rar(); __v; })
+#define readl_relaxed(c) ({ u32 __v; __io_rbr(); __v = readl_cpu(c); __io_rar(); __v; })
+
+#define writeb_relaxed(v, c) ({ __io_rbw(); writeb_cpu((v), (c)); __io_raw(); })
+#define writew_relaxed(v, c) ({ __io_rbw(); writew_cpu((v), (c)); __io_raw(); })
+#define writel_relaxed(v, c) ({ __io_rbw(); writel_cpu((v), (c)); __io_raw(); })
+
+#ifdef CONFIG_64BIT
+#define readq_relaxed(c) ({ u64 __v; __io_rbr(); __v = readq_cpu(c); __io_rar(); __v; })
+#define writeq_relaxed(v, c) ({ __io_rbw(); writeq_cpu((v), (c)); __io_raw(); })
+#endif
+
+/*
+ * I/O memory access primitives. Reads are ordered relative to any
+ * following Normal memory access. Writes are ordered relative to any prior
+ * Normal memory access. The memory barriers here are necessary as RISC-V
+ * doesn't define any ordering between the memory space and the I/O space.
+ */
+#define __io_br() do {} while (0)
+#define __io_ar(v) __asm__ __volatile__ ("fence i,r" : : : "memory")
+#define __io_bw() __asm__ __volatile__ ("fence w,o" : : : "memory")
+#define __io_aw() mmiowb_set_pending()
+
+#define readb(c) ({ u8 __v; __io_br(); __v = readb_cpu(c); __io_ar(__v); __v; })
+#define readw(c) ({ u16 __v; __io_br(); __v = readw_cpu(c); __io_ar(__v); __v; })
+#define readl(c) ({ u32 __v; __io_br(); __v = readl_cpu(c); __io_ar(__v); __v; })
+
+#define writeb(v, c) ({ __io_bw(); writeb_cpu((v), (c)); __io_aw(); })
+#define writew(v, c) ({ __io_bw(); writew_cpu((v), (c)); __io_aw(); })
+#define writel(v, c) ({ __io_bw(); writel_cpu((v), (c)); __io_aw(); })
+
+#ifdef CONFIG_64BIT
+#define readq(c) ({ u64 __v; __io_br(); __v = readq_cpu(c); __io_ar(__v); __v; })
+#define writeq(v, c) ({ __io_bw(); writeq_cpu((v), (c)); __io_aw(); })
+#endif
+
+#endif /* _ASM_RISCV_MMIO_H */
diff --git a/arch/riscv/include/asm/mmiowb.h b/arch/riscv/include/asm/mmiowb.h
index 5d7e3a2b4e3b..bb4091ff4a21 100644
--- a/arch/riscv/include/asm/mmiowb.h
+++ b/arch/riscv/include/asm/mmiowb.h
@@ -11,4 +11,4 @@
#include <asm-generic/mmiowb.h>
-#endif /* ASM_RISCV_MMIOWB_H */
+#endif /* _ASM_RISCV_MMIOWB_H */
diff --git a/arch/riscv/include/asm/mmu.h b/arch/riscv/include/asm/mmu.h
index 151476fb58cb..967eacb01ab5 100644
--- a/arch/riscv/include/asm/mmu.h
+++ b/arch/riscv/include/asm/mmu.h
@@ -10,6 +10,9 @@
#ifndef __ASSEMBLY__
typedef struct {
+#ifndef CONFIG_MMU
+ unsigned long end_brk;
+#endif
void *vdso;
#ifdef CONFIG_SMP
/* A local icache flush is needed before user execution can resume. */
diff --git a/arch/riscv/include/asm/page.h b/arch/riscv/include/asm/page.h
index 3db261c4810f..ac699246ae7e 100644
--- a/arch/riscv/include/asm/page.h
+++ b/arch/riscv/include/asm/page.h
@@ -88,8 +88,14 @@ typedef struct page *pgtable_t;
#define PTE_FMT "%08lx"
#endif
+#ifdef CONFIG_MMU
extern unsigned long va_pa_offset;
extern unsigned long pfn_base;
+#define ARCH_PFN_OFFSET (pfn_base)
+#else
+#define va_pa_offset 0
+#define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT)
+#endif /* CONFIG_MMU */
extern unsigned long max_low_pfn;
extern unsigned long min_low_pfn;
@@ -112,11 +118,9 @@ extern unsigned long min_low_pfn;
#ifdef CONFIG_FLATMEM
#define pfn_valid(pfn) \
- (((pfn) >= pfn_base) && (((pfn)-pfn_base) < max_mapnr))
+ (((pfn) >= ARCH_PFN_OFFSET) && (((pfn) - ARCH_PFN_OFFSET) < max_mapnr))
#endif
-#define ARCH_PFN_OFFSET (pfn_base)
-
#endif /* __ASSEMBLY__ */
#define virt_addr_valid(vaddr) (pfn_valid(virt_to_pfn(vaddr)))
diff --git a/arch/riscv/include/asm/pci.h b/arch/riscv/include/asm/pci.h
index 5ac8daa1cc36..1c473a1bd986 100644
--- a/arch/riscv/include/asm/pci.h
+++ b/arch/riscv/include/asm/pci.h
@@ -3,8 +3,8 @@
* Copyright (C) 2016 SiFive
*/
-#ifndef __ASM_RISCV_PCI_H
-#define __ASM_RISCV_PCI_H
+#ifndef _ASM_RISCV_PCI_H
+#define _ASM_RISCV_PCI_H
#include <linux/types.h>
#include <linux/slab.h>
@@ -34,4 +34,4 @@ static inline int pci_proc_domain(struct pci_bus *bus)
}
#endif /* CONFIG_PCI */
-#endif /* __ASM_PCI_H */
+#endif /* _ASM_RISCV_PCI_H */
diff --git a/arch/riscv/include/asm/pgalloc.h b/arch/riscv/include/asm/pgalloc.h
index d59ea92285ec..3f601ee8233f 100644
--- a/arch/riscv/include/asm/pgalloc.h
+++ b/arch/riscv/include/asm/pgalloc.h
@@ -10,6 +10,7 @@
#include <linux/mm.h>
#include <asm/tlb.h>
+#ifdef CONFIG_MMU
#include <asm-generic/pgalloc.h> /* for pte_{alloc,free}_one */
static inline void pmd_populate_kernel(struct mm_struct *mm,
@@ -81,5 +82,6 @@ do { \
pgtable_pte_page_dtor(pte); \
tlb_remove_page((tlb), pte); \
} while (0)
+#endif /* CONFIG_MMU */
#endif /* _ASM_RISCV_PGALLOC_H */
diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
index d3221017194d..7ff0ed4f292e 100644
--- a/arch/riscv/include/asm/pgtable.h
+++ b/arch/riscv/include/asm/pgtable.h
@@ -25,6 +25,7 @@
#include <asm/pgtable-32.h>
#endif /* CONFIG_64BIT */
+#ifdef CONFIG_MMU
/* Number of entries in the page global directory */
#define PTRS_PER_PGD (PAGE_SIZE / sizeof(pgd_t))
/* Number of entries in the page table */
@@ -32,7 +33,6 @@
/* Number of PGD entries that a user-mode program can use */
#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
-#define FIRST_USER_ADDRESS 0
/* Page protection bits */
#define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER)
@@ -62,6 +62,12 @@
#define PAGE_TABLE __pgprot(_PAGE_TABLE)
+/*
+ * The RISC-V ISA doesn't yet specify how to query or modify PMAs, so we can't
+ * change the properties of memory regions.
+ */
+#define _PAGE_IOREMAP _PAGE_KERNEL
+
extern pgd_t swapper_pg_dir[];
/* MAP_PRIVATE permissions: xwr (copy-on-write) */
@@ -84,42 +90,6 @@ extern pgd_t swapper_pg_dir[];
#define __S110 PAGE_SHARED_EXEC
#define __S111 PAGE_SHARED_EXEC
-#define VMALLOC_SIZE (KERN_VIRT_SIZE >> 1)
-#define VMALLOC_END (PAGE_OFFSET - 1)
-#define VMALLOC_START (PAGE_OFFSET - VMALLOC_SIZE)
-#define PCI_IO_SIZE SZ_16M
-
-/*
- * Roughly size the vmemmap space to be large enough to fit enough
- * struct pages to map half the virtual address space. Then
- * position vmemmap directly below the VMALLOC region.
- */
-#define VMEMMAP_SHIFT \
- (CONFIG_VA_BITS - PAGE_SHIFT - 1 + STRUCT_PAGE_MAX_SHIFT)
-#define VMEMMAP_SIZE BIT(VMEMMAP_SHIFT)
-#define VMEMMAP_END (VMALLOC_START - 1)
-#define VMEMMAP_START (VMALLOC_START - VMEMMAP_SIZE)
-
-#define vmemmap ((struct page *)VMEMMAP_START)
-
-#define PCI_IO_END VMEMMAP_START
-#define PCI_IO_START (PCI_IO_END - PCI_IO_SIZE)
-#define FIXADDR_TOP PCI_IO_START
-
-#ifdef CONFIG_64BIT
-#define FIXADDR_SIZE PMD_SIZE
-#else
-#define FIXADDR_SIZE PGDIR_SIZE
-#endif
-#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
-
-/*
- * ZERO_PAGE is a global shared page that is always zero,
- * used for zero-mapped memory areas, etc.
- */
-extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
-#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
-
static inline int pmd_present(pmd_t pmd)
{
return (pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROT_NONE));
@@ -430,11 +400,34 @@ static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
-#define kern_addr_valid(addr) (1) /* FIXME */
+#define VMALLOC_SIZE (KERN_VIRT_SIZE >> 1)
+#define VMALLOC_END (PAGE_OFFSET - 1)
+#define VMALLOC_START (PAGE_OFFSET - VMALLOC_SIZE)
-extern void *dtb_early_va;
-extern void setup_bootmem(void);
-extern void paging_init(void);
+/*
+ * Roughly size the vmemmap space to be large enough to fit enough
+ * struct pages to map half the virtual address space. Then
+ * position vmemmap directly below the VMALLOC region.
+ */
+#define VMEMMAP_SHIFT \
+ (CONFIG_VA_BITS - PAGE_SHIFT - 1 + STRUCT_PAGE_MAX_SHIFT)
+#define VMEMMAP_SIZE BIT(VMEMMAP_SHIFT)
+#define VMEMMAP_END (VMALLOC_START - 1)
+#define VMEMMAP_START (VMALLOC_START - VMEMMAP_SIZE)
+
+#define vmemmap ((struct page *)VMEMMAP_START)
+
+#define PCI_IO_SIZE SZ_16M
+#define PCI_IO_END VMEMMAP_START
+#define PCI_IO_START (PCI_IO_END - PCI_IO_SIZE)
+
+#define FIXADDR_TOP PCI_IO_START
+#ifdef CONFIG_64BIT
+#define FIXADDR_SIZE PMD_SIZE
+#else
+#define FIXADDR_SIZE PGDIR_SIZE
+#endif
+#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
/*
* Task size is 0x4000000000 for RV64 or 0x9fc00000 for RV32.
@@ -446,6 +439,31 @@ extern void paging_init(void);
#define TASK_SIZE FIXADDR_START
#endif
+#else /* CONFIG_MMU */
+
+#define PAGE_KERNEL __pgprot(0)
+#define swapper_pg_dir NULL
+#define VMALLOC_START 0
+
+#define TASK_SIZE 0xffffffffUL
+
+#endif /* !CONFIG_MMU */
+
+#define kern_addr_valid(addr) (1) /* FIXME */
+
+extern void *dtb_early_va;
+void setup_bootmem(void);
+void paging_init(void);
+
+#define FIRST_USER_ADDRESS 0
+
+/*
+ * ZERO_PAGE is a global shared page that is always zero,
+ * used for zero-mapped memory areas, etc.
+ */
+extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
+#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
+
#include <asm-generic/pgtable.h>
#endif /* !__ASSEMBLY__ */
diff --git a/arch/riscv/include/asm/processor.h b/arch/riscv/include/asm/processor.h
index f539149d04c2..3ddb798264f1 100644
--- a/arch/riscv/include/asm/processor.h
+++ b/arch/riscv/include/asm/processor.h
@@ -42,7 +42,7 @@ struct thread_struct {
((struct pt_regs *)(task_stack_page(tsk) + THREAD_SIZE \
- ALIGN(sizeof(struct pt_regs), STACK_ALIGN)))
-#define KSTK_EIP(tsk) (task_pt_regs(tsk)->sepc)
+#define KSTK_EIP(tsk) (task_pt_regs(tsk)->epc)
#define KSTK_ESP(tsk) (task_pt_regs(tsk)->sp)
diff --git a/arch/riscv/include/asm/ptrace.h b/arch/riscv/include/asm/ptrace.h
index d48d1e13973c..ee49f80c9533 100644
--- a/arch/riscv/include/asm/ptrace.h
+++ b/arch/riscv/include/asm/ptrace.h
@@ -12,7 +12,7 @@
#ifndef __ASSEMBLY__
struct pt_regs {
- unsigned long sepc;
+ unsigned long epc;
unsigned long ra;
unsigned long sp;
unsigned long gp;
@@ -44,10 +44,10 @@ struct pt_regs {
unsigned long t4;
unsigned long t5;
unsigned long t6;
- /* Supervisor CSRs */
- unsigned long sstatus;
- unsigned long sbadaddr;
- unsigned long scause;
+ /* Supervisor/Machine CSRs */
+ unsigned long status;
+ unsigned long badaddr;
+ unsigned long cause;
/* a0 value before the syscall */
unsigned long orig_a0;
};
@@ -58,18 +58,18 @@ struct pt_regs {
#define REG_FMT "%08lx"
#endif
-#define user_mode(regs) (((regs)->sstatus & SR_SPP) == 0)
+#define user_mode(regs) (((regs)->status & SR_PP) == 0)
/* Helpers for working with the instruction pointer */
static inline unsigned long instruction_pointer(struct pt_regs *regs)
{
- return regs->sepc;
+ return regs->epc;
}
static inline void instruction_pointer_set(struct pt_regs *regs,
unsigned long val)
{
- regs->sepc = val;
+ regs->epc = val;
}
#define profile_pc(regs) instruction_pointer(regs)
diff --git a/arch/riscv/include/asm/sbi.h b/arch/riscv/include/asm/sbi.h
index 21134b3ef404..2570c1e683d3 100644
--- a/arch/riscv/include/asm/sbi.h
+++ b/arch/riscv/include/asm/sbi.h
@@ -8,6 +8,7 @@
#include <linux/types.h>
+#ifdef CONFIG_RISCV_SBI
#define SBI_SET_TIMER 0
#define SBI_CONSOLE_PUTCHAR 1
#define SBI_CONSOLE_GETCHAR 2
@@ -93,5 +94,11 @@ static inline void sbi_remote_sfence_vma_asid(const unsigned long *hart_mask,
{
SBI_CALL_4(SBI_REMOTE_SFENCE_VMA_ASID, hart_mask, start, size, asid);
}
-
-#endif
+#else /* CONFIG_RISCV_SBI */
+/* stubs for code that is only reachable under IS_ENABLED(CONFIG_RISCV_SBI): */
+void sbi_set_timer(uint64_t stime_value);
+void sbi_clear_ipi(void);
+void sbi_send_ipi(const unsigned long *hart_mask);
+void sbi_remote_fence_i(const unsigned long *hart_mask);
+#endif /* CONFIG_RISCV_SBI */
+#endif /* _ASM_RISCV_SBI_H */
diff --git a/arch/riscv/include/asm/seccomp.h b/arch/riscv/include/asm/seccomp.h
new file mode 100644
index 000000000000..bf7744ee3b3d
--- /dev/null
+++ b/arch/riscv/include/asm/seccomp.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _ASM_SECCOMP_H
+#define _ASM_SECCOMP_H
+
+#include <asm/unistd.h>
+
+#include <asm-generic/seccomp.h>
+
+#endif /* _ASM_SECCOMP_H */
diff --git a/arch/riscv/include/asm/sparsemem.h b/arch/riscv/include/asm/sparsemem.h
index b58ba2d9ed6e..45a7018a8118 100644
--- a/arch/riscv/include/asm/sparsemem.h
+++ b/arch/riscv/include/asm/sparsemem.h
@@ -1,11 +1,11 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_SPARSEMEM_H
-#define __ASM_SPARSEMEM_H
+#ifndef _ASM_RISCV_SPARSEMEM_H
+#define _ASM_RISCV_SPARSEMEM_H
#ifdef CONFIG_SPARSEMEM
#define MAX_PHYSMEM_BITS CONFIG_PA_BITS
#define SECTION_SIZE_BITS 27
#endif /* CONFIG_SPARSEMEM */
-#endif /* __ASM_SPARSEMEM_H */
+#endif /* _ASM_RISCV_SPARSEMEM_H */
diff --git a/arch/riscv/include/asm/spinlock_types.h b/arch/riscv/include/asm/spinlock_types.h
index 888cbf8e7111..f398e7638dd6 100644
--- a/arch/riscv/include/asm/spinlock_types.h
+++ b/arch/riscv/include/asm/spinlock_types.h
@@ -22,4 +22,4 @@ typedef struct {
#define __ARCH_RW_LOCK_UNLOCKED { 0 }
-#endif
+#endif /* _ASM_RISCV_SPINLOCK_TYPES_H */
diff --git a/arch/riscv/include/asm/switch_to.h b/arch/riscv/include/asm/switch_to.h
index ee4f0ac62c9d..407bcc96a710 100644
--- a/arch/riscv/include/asm/switch_to.h
+++ b/arch/riscv/include/asm/switch_to.h
@@ -17,19 +17,19 @@ extern void __fstate_restore(struct task_struct *restore_from);
static inline void __fstate_clean(struct pt_regs *regs)
{
- regs->sstatus = (regs->sstatus & ~SR_FS) | SR_FS_CLEAN;
+ regs->status = (regs->status & ~SR_FS) | SR_FS_CLEAN;
}
static inline void fstate_off(struct task_struct *task,
struct pt_regs *regs)
{
- regs->sstatus = (regs->sstatus & ~SR_FS) | SR_FS_OFF;
+ regs->status = (regs->status & ~SR_FS) | SR_FS_OFF;
}
static inline void fstate_save(struct task_struct *task,
struct pt_regs *regs)
{
- if ((regs->sstatus & SR_FS) == SR_FS_DIRTY) {
+ if ((regs->status & SR_FS) == SR_FS_DIRTY) {
__fstate_save(task);
__fstate_clean(regs);
}
@@ -38,7 +38,7 @@ static inline void fstate_save(struct task_struct *task,
static inline void fstate_restore(struct task_struct *task,
struct pt_regs *regs)
{
- if ((regs->sstatus & SR_FS) != SR_FS_OFF) {
+ if ((regs->status & SR_FS) != SR_FS_OFF) {
__fstate_restore(task);
__fstate_clean(regs);
}
@@ -50,7 +50,7 @@ static inline void __switch_to_aux(struct task_struct *prev,
struct pt_regs *regs;
regs = task_pt_regs(prev);
- if (unlikely(regs->sstatus & SR_SD))
+ if (unlikely(regs->status & SR_SD))
fstate_save(prev, regs);
fstate_restore(next, task_pt_regs(next));
}
diff --git a/arch/riscv/include/asm/thread_info.h b/arch/riscv/include/asm/thread_info.h
index 905372d7eeb8..1dd12a0cbb2b 100644
--- a/arch/riscv/include/asm/thread_info.h
+++ b/arch/riscv/include/asm/thread_info.h
@@ -75,6 +75,7 @@ struct thread_info {
#define TIF_MEMDIE 5 /* is terminating due to OOM killer */
#define TIF_SYSCALL_TRACEPOINT 6 /* syscall tracepoint instrumentation */
#define TIF_SYSCALL_AUDIT 7 /* syscall auditing */
+#define TIF_SECCOMP 8 /* syscall secure computing */
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
@@ -82,11 +83,13 @@ struct thread_info {
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
+#define _TIF_SECCOMP (1 << TIF_SECCOMP)
#define _TIF_WORK_MASK \
(_TIF_NOTIFY_RESUME | _TIF_SIGPENDING | _TIF_NEED_RESCHED)
#define _TIF_SYSCALL_WORK \
- (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_TRACEPOINT | _TIF_SYSCALL_AUDIT)
+ (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_TRACEPOINT | _TIF_SYSCALL_AUDIT | \
+ _TIF_SECCOMP)
#endif /* _ASM_RISCV_THREAD_INFO_H */
diff --git a/arch/riscv/include/asm/timex.h b/arch/riscv/include/asm/timex.h
index c7ef131b9e4c..bad2a7c2cda5 100644
--- a/arch/riscv/include/asm/timex.h
+++ b/arch/riscv/include/asm/timex.h
@@ -7,12 +7,25 @@
#define _ASM_RISCV_TIMEX_H
#include <asm/csr.h>
+#include <asm/mmio.h>
typedef unsigned long cycles_t;
+extern u64 __iomem *riscv_time_val;
+extern u64 __iomem *riscv_time_cmp;
+
+#ifdef CONFIG_64BIT
+#define mmio_get_cycles() readq_relaxed(riscv_time_val)
+#else
+#define mmio_get_cycles() readl_relaxed(riscv_time_val)
+#define mmio_get_cycles_hi() readl_relaxed(((u32 *)riscv_time_val) + 1)
+#endif
+
static inline cycles_t get_cycles(void)
{
- return csr_read(CSR_TIME);
+ if (IS_ENABLED(CONFIG_RISCV_SBI))
+ return csr_read(CSR_TIME);
+ return mmio_get_cycles();
}
#define get_cycles get_cycles
@@ -24,7 +37,9 @@ static inline u64 get_cycles64(void)
#else /* CONFIG_64BIT */
static inline u32 get_cycles_hi(void)
{
- return csr_read(CSR_TIMEH);
+ if (IS_ENABLED(CONFIG_RISCV_SBI))
+ return csr_read(CSR_TIMEH);
+ return mmio_get_cycles_hi();
}
static inline u64 get_cycles64(void)
diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h
index f02188a5b0f4..394cfbccdcd9 100644
--- a/arch/riscv/include/asm/tlbflush.h
+++ b/arch/riscv/include/asm/tlbflush.h
@@ -10,6 +10,7 @@
#include <linux/mm_types.h>
#include <asm/smp.h>
+#ifdef CONFIG_MMU
static inline void local_flush_tlb_all(void)
{
__asm__ __volatile__ ("sfence.vma" : : : "memory");
@@ -20,14 +21,19 @@ static inline void local_flush_tlb_page(unsigned long addr)
{
__asm__ __volatile__ ("sfence.vma %0" : : "r" (addr) : "memory");
}
+#else /* CONFIG_MMU */
+#define local_flush_tlb_all() do { } while (0)
+#define local_flush_tlb_page(addr) do { } while (0)
+#endif /* CONFIG_MMU */
-#ifdef CONFIG_SMP
+#if defined(CONFIG_SMP) && defined(CONFIG_MMU)
void flush_tlb_all(void);
void flush_tlb_mm(struct mm_struct *mm);
void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr);
void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end);
-#else /* CONFIG_SMP */
+#else /* CONFIG_SMP && CONFIG_MMU */
+
#define flush_tlb_all() local_flush_tlb_all()
#define flush_tlb_page(vma, addr) local_flush_tlb_page(addr)
@@ -38,7 +44,7 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
}
#define flush_tlb_mm(mm) flush_tlb_all()
-#endif /* CONFIG_SMP */
+#endif /* !CONFIG_SMP || !CONFIG_MMU */
/* Flush a range of kernel pages */
static inline void flush_tlb_kernel_range(unsigned long start,
diff --git a/arch/riscv/include/asm/uaccess.h b/arch/riscv/include/asm/uaccess.h
index e076437cfafe..f462a183a9c2 100644
--- a/arch/riscv/include/asm/uaccess.h
+++ b/arch/riscv/include/asm/uaccess.h
@@ -11,6 +11,7 @@
/*
* User space memory access functions
*/
+#ifdef CONFIG_MMU
#include <linux/errno.h>
#include <linux/compiler.h>
#include <linux/thread_info.h>
@@ -475,4 +476,7 @@ unsigned long __must_check clear_user(void __user *to, unsigned long n)
__ret; \
})
+#else /* CONFIG_MMU */
+#include <asm-generic/uaccess.h>
+#endif /* CONFIG_MMU */
#endif /* _ASM_RISCV_UACCESS_H */
diff --git a/arch/riscv/include/uapi/asm/elf.h b/arch/riscv/include/uapi/asm/elf.h
index 644a00ce6e2e..d696d6610231 100644
--- a/arch/riscv/include/uapi/asm/elf.h
+++ b/arch/riscv/include/uapi/asm/elf.h
@@ -9,8 +9,8 @@
* (at your option) any later version.
*/
-#ifndef _UAPI_ASM_ELF_H
-#define _UAPI_ASM_ELF_H
+#ifndef _UAPI_ASM_RISCV_ELF_H
+#define _UAPI_ASM_RISCV_ELF_H
#include <asm/ptrace.h>
@@ -95,4 +95,4 @@ typedef union __riscv_fp_state elf_fpregset_t;
#define R_RISCV_32_PCREL 57
-#endif /* _UAPI_ASM_ELF_H */
+#endif /* _UAPI_ASM_RISCV_ELF_H */
diff --git a/arch/riscv/include/uapi/asm/hwcap.h b/arch/riscv/include/uapi/asm/hwcap.h
index 4e7646077056..dee98ee28318 100644
--- a/arch/riscv/include/uapi/asm/hwcap.h
+++ b/arch/riscv/include/uapi/asm/hwcap.h
@@ -5,8 +5,8 @@
* Copyright (C) 2012 ARM Ltd.
* Copyright (C) 2017 SiFive
*/
-#ifndef __UAPI_ASM_HWCAP_H
-#define __UAPI_ASM_HWCAP_H
+#ifndef _UAPI_ASM_RISCV_HWCAP_H
+#define _UAPI_ASM_RISCV_HWCAP_H
/*
* Linux saves the floating-point registers according to the ISA Linux is
@@ -22,4 +22,4 @@
#define COMPAT_HWCAP_ISA_D (1 << ('D' - 'A'))
#define COMPAT_HWCAP_ISA_C (1 << ('C' - 'A'))
-#endif
+#endif /* _UAPI_ASM_RISCV_HWCAP_H */
diff --git a/arch/riscv/include/uapi/asm/ucontext.h b/arch/riscv/include/uapi/asm/ucontext.h
index 411dd7b52ed6..44eb993950e5 100644
--- a/arch/riscv/include/uapi/asm/ucontext.h
+++ b/arch/riscv/include/uapi/asm/ucontext.h
@@ -5,8 +5,8 @@
*
* This file was copied from arch/arm64/include/uapi/asm/ucontext.h
*/
-#ifndef _UAPI__ASM_UCONTEXT_H
-#define _UAPI__ASM_UCONTEXT_H
+#ifndef _UAPI_ASM_RISCV_UCONTEXT_H
+#define _UAPI_ASM_RISCV_UCONTEXT_H
#include <linux/types.h>
@@ -31,4 +31,4 @@ struct ucontext {
struct sigcontext uc_mcontext;
};
-#endif /* _UAPI__ASM_UCONTEXT_H */
+#endif /* _UAPI_ASM_RISCV_UCONTEXT_H */
diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile
index 696020ff72db..f40205cb9a22 100644
--- a/arch/riscv/kernel/Makefile
+++ b/arch/riscv/kernel/Makefile
@@ -25,10 +25,10 @@ obj-y += time.o
obj-y += traps.o
obj-y += riscv_ksyms.o
obj-y += stacktrace.o
-obj-y += vdso.o
obj-y += cacheinfo.o
-obj-y += vdso/
+obj-$(CONFIG_MMU) += vdso.o vdso/
+obj-$(CONFIG_RISCV_M_MODE) += clint.o
obj-$(CONFIG_FPU) += fpu.o
obj-$(CONFIG_SMP) += smpboot.o
obj-$(CONFIG_SMP) += smp.o
@@ -41,5 +41,6 @@ obj-$(CONFIG_DYNAMIC_FTRACE) += mcount-dyn.o
obj-$(CONFIG_PERF_EVENTS) += perf_event.o
obj-$(CONFIG_PERF_EVENTS) += perf_callchain.o
obj-$(CONFIG_HAVE_PERF_REGS) += perf_regs.o
+obj-$(CONFIG_RISCV_SBI) += sbi.o
clean:
diff --git a/arch/riscv/kernel/asm-offsets.c b/arch/riscv/kernel/asm-offsets.c
index 9f5628c38ac9..07cb9c10de4e 100644
--- a/arch/riscv/kernel/asm-offsets.c
+++ b/arch/riscv/kernel/asm-offsets.c
@@ -71,7 +71,7 @@ void asm_offsets(void)
OFFSET(TASK_THREAD_FCSR, task_struct, thread.fstate.fcsr);
DEFINE(PT_SIZE, sizeof(struct pt_regs));
- OFFSET(PT_SEPC, pt_regs, sepc);
+ OFFSET(PT_EPC, pt_regs, epc);
OFFSET(PT_RA, pt_regs, ra);
OFFSET(PT_FP, pt_regs, s0);
OFFSET(PT_S0, pt_regs, s0);
@@ -105,9 +105,9 @@ void asm_offsets(void)
OFFSET(PT_T6, pt_regs, t6);
OFFSET(PT_GP, pt_regs, gp);
OFFSET(PT_ORIG_A0, pt_regs, orig_a0);
- OFFSET(PT_SSTATUS, pt_regs, sstatus);
- OFFSET(PT_SBADADDR, pt_regs, sbadaddr);
- OFFSET(PT_SCAUSE, pt_regs, scause);
+ OFFSET(PT_STATUS, pt_regs, status);
+ OFFSET(PT_BADADDR, pt_regs, badaddr);
+ OFFSET(PT_CAUSE, pt_regs, cause);
/*
* THREAD_{F,X}* might be larger than a S-type offset can handle, but
diff --git a/arch/riscv/kernel/clint.c b/arch/riscv/kernel/clint.c
new file mode 100644
index 000000000000..3647980d14c3
--- /dev/null
+++ b/arch/riscv/kernel/clint.c
@@ -0,0 +1,44 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019 Christoph Hellwig.
+ */
+
+#include <linux/io.h>
+#include <linux/of_address.h>
+#include <linux/types.h>
+#include <asm/clint.h>
+#include <asm/csr.h>
+#include <asm/timex.h>
+#include <asm/smp.h>
+
+/*
+ * This is the layout used by the SiFive clint, which is also shared by the qemu
+ * virt platform, and the Kendryte KD210 at least.
+ */
+#define CLINT_IPI_OFF 0
+#define CLINT_TIME_CMP_OFF 0x4000
+#define CLINT_TIME_VAL_OFF 0xbff8
+
+u32 __iomem *clint_ipi_base;
+
+void clint_init_boot_cpu(void)
+{
+ struct device_node *np;
+ void __iomem *base;
+
+ np = of_find_compatible_node(NULL, NULL, "riscv,clint0");
+ if (!np) {
+ panic("clint not found");
+ return;
+ }
+
+ base = of_iomap(np, 0);
+ if (!base)
+ panic("could not map CLINT");
+
+ clint_ipi_base = base + CLINT_IPI_OFF;
+ riscv_time_cmp = base + CLINT_TIME_CMP_OFF;
+ riscv_time_val = base + CLINT_TIME_VAL_OFF;
+
+ clint_clear_ipi(boot_cpu_hartid);
+}
diff --git a/arch/riscv/kernel/cpu.c b/arch/riscv/kernel/cpu.c
index 7da3c6a93abd..40a3c442ac5f 100644
--- a/arch/riscv/kernel/cpu.c
+++ b/arch/riscv/kernel/cpu.c
@@ -46,51 +46,12 @@ int riscv_of_processor_hartid(struct device_node *node)
#ifdef CONFIG_PROC_FS
-static void print_isa(struct seq_file *f, const char *orig_isa)
+static void print_isa(struct seq_file *f, const char *isa)
{
- static const char *ext = "mafdcsu";
- const char *isa = orig_isa;
- const char *e;
-
- /*
- * Linux doesn't support rv32e or rv128i, and we only support booting
- * kernels on harts with the same ISA that the kernel is compiled for.
- */
-#if defined(CONFIG_32BIT)
- if (strncmp(isa, "rv32i", 5) != 0)
- return;
-#elif defined(CONFIG_64BIT)
- if (strncmp(isa, "rv64i", 5) != 0)
- return;
-#endif
-
- /* Print the base ISA, as we already know it's legal. */
+ /* Print the entire ISA as it is */
seq_puts(f, "isa\t\t: ");
- seq_write(f, isa, 5);
- isa += 5;
-
- /*
- * Check the rest of the ISA string for valid extensions, printing those
- * we find. RISC-V ISA strings define an order, so we only print the
- * extension bits when they're in order. Hide the supervisor (S)
- * extension from userspace as it's not accessible from there.
- */
- for (e = ext; *e != '\0'; ++e) {
- if (isa[0] == e[0]) {
- if (isa[0] != 's')
- seq_write(f, isa, 1);
-
- isa++;
- }
- }
+ seq_write(f, isa, strlen(isa));
seq_puts(f, "\n");
-
- /*
- * If we were given an unsupported ISA in the device tree then print
- * a bit of info describing what went wrong.
- */
- if (isa[0] != '\0')
- pr_info("unsupported ISA \"%s\" in device tree\n", orig_isa);
}
static void print_mmu(struct seq_file *f, const char *mmu_type)
diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S
index 8ca479831142..a1349ca64669 100644
--- a/arch/riscv/kernel/entry.S
+++ b/arch/riscv/kernel/entry.S
@@ -26,14 +26,14 @@
/*
* If coming from userspace, preserve the user thread pointer and load
- * the kernel thread pointer. If we came from the kernel, sscratch
- * will contain 0, and we should continue on the current TP.
+ * the kernel thread pointer. If we came from the kernel, the scratch
+ * register will contain 0, and we should continue on the current TP.
*/
- csrrw tp, CSR_SSCRATCH, tp
+ csrrw tp, CSR_SCRATCH, tp
bnez tp, _save_context
_restore_kernel_tpsp:
- csrr tp, CSR_SSCRATCH
+ csrr tp, CSR_SCRATCH
REG_S sp, TASK_TI_KERNEL_SP(tp)
_save_context:
REG_S sp, TASK_TI_USER_SP(tp)
@@ -79,16 +79,16 @@ _save_context:
li t0, SR_SUM | SR_FS
REG_L s0, TASK_TI_USER_SP(tp)
- csrrc s1, CSR_SSTATUS, t0
- csrr s2, CSR_SEPC
- csrr s3, CSR_STVAL
- csrr s4, CSR_SCAUSE
- csrr s5, CSR_SSCRATCH
+ csrrc s1, CSR_STATUS, t0
+ csrr s2, CSR_EPC
+ csrr s3, CSR_TVAL
+ csrr s4, CSR_CAUSE
+ csrr s5, CSR_SCRATCH
REG_S s0, PT_SP(sp)
- REG_S s1, PT_SSTATUS(sp)
- REG_S s2, PT_SEPC(sp)
- REG_S s3, PT_SBADADDR(sp)
- REG_S s4, PT_SCAUSE(sp)
+ REG_S s1, PT_STATUS(sp)
+ REG_S s2, PT_EPC(sp)
+ REG_S s3, PT_BADADDR(sp)
+ REG_S s4, PT_CAUSE(sp)
REG_S s5, PT_TP(sp)
.endm
@@ -97,7 +97,7 @@ _save_context:
* registers from the stack.
*/
.macro RESTORE_ALL
- REG_L a0, PT_SSTATUS(sp)
+ REG_L a0, PT_STATUS(sp)
/*
* The current load reservation is effectively part of the processor's
* state, in the sense that load reservations cannot be shared between
@@ -115,11 +115,11 @@ _save_context:
* completes, implementations are allowed to expand reservations to be
* arbitrarily large.
*/
- REG_L a2, PT_SEPC(sp)
- REG_SC x0, a2, PT_SEPC(sp)
+ REG_L a2, PT_EPC(sp)
+ REG_SC x0, a2, PT_EPC(sp)
- csrw CSR_SSTATUS, a0
- csrw CSR_SEPC, a2
+ csrw CSR_STATUS, a0
+ csrw CSR_EPC, a2
REG_L x1, PT_RA(sp)
REG_L x3, PT_GP(sp)
@@ -163,10 +163,10 @@ ENTRY(handle_exception)
SAVE_ALL
/*
- * Set sscratch register to 0, so that if a recursive exception
+ * Set the scratch register to 0, so that if a recursive exception
* occurs, the exception vector knows it came from the kernel
*/
- csrw CSR_SSCRATCH, x0
+ csrw CSR_SCRATCH, x0
/* Load the global pointer */
.option push
@@ -185,11 +185,13 @@ ENTRY(handle_exception)
move a0, sp /* pt_regs */
tail do_IRQ
1:
- /* Exceptions run with interrupts enabled or disabled
- depending on the state of sstatus.SR_SPIE */
- andi t0, s1, SR_SPIE
+ /*
+ * Exceptions run with interrupts enabled or disabled depending on the
+ * state of SR_PIE in m/sstatus.
+ */
+ andi t0, s1, SR_PIE
beqz t0, 1f
- csrs CSR_SSTATUS, SR_SIE
+ csrs CSR_STATUS, SR_IE
1:
/* Handle syscalls */
@@ -217,7 +219,7 @@ handle_syscall:
* scall instruction on sret
*/
addi s2, s2, 0x4
- REG_S s2, PT_SEPC(sp)
+ REG_S s2, PT_EPC(sp)
/* Trace syscalls, but only if requested by the user. */
REG_L t0, TASK_TI_FLAGS(tp)
andi t0, t0, _TIF_SYSCALL_WORK
@@ -226,8 +228,25 @@ check_syscall_nr:
/* Check to make sure we don't jump to a bogus syscall number. */
li t0, __NR_syscalls
la s0, sys_ni_syscall
- /* Syscall number held in a7 */
- bgeu a7, t0, 1f
+ /*
+ * The tracer can change syscall number to valid/invalid value.
+ * We use syscall_set_nr helper in syscall_trace_enter thus we
+ * cannot trust the current value in a7 and have to reload from
+ * the current task pt_regs.
+ */
+ REG_L a7, PT_A7(sp)
+ /*
+ * Syscall number held in a7.
+ * If syscall number is above allowed value, redirect to ni_syscall.
+ */
+ bge a7, t0, 1f
+ /*
+ * Check if syscall is rejected by tracer or seccomp, i.e., a7 == -1.
+ * If yes, we pretend it was executed.
+ */
+ li t1, -1
+ beq a7, t1, ret_from_syscall_rejected
+ /* Call syscall */
la s0, sys_call_table
slli t0, a7, RISCV_LGPTR
add s0, s0, t0
@@ -238,15 +257,27 @@ check_syscall_nr:
ret_from_syscall:
/* Set user a0 to kernel a0 */
REG_S a0, PT_A0(sp)
+ /*
+ * We didn't execute the actual syscall.
+ * Seccomp already set return value for the current task pt_regs.
+ * (If it was configured with SECCOMP_RET_ERRNO/TRACE)
+ */
+ret_from_syscall_rejected:
/* Trace syscalls, but only if requested by the user. */
REG_L t0, TASK_TI_FLAGS(tp)
andi t0, t0, _TIF_SYSCALL_WORK
bnez t0, handle_syscall_trace_exit
ret_from_exception:
- REG_L s0, PT_SSTATUS(sp)
- csrc CSR_SSTATUS, SR_SIE
+ REG_L s0, PT_STATUS(sp)
+ csrc CSR_STATUS, SR_IE
+#ifdef CONFIG_RISCV_M_MODE
+ /* the MPP value is too large to be used as an immediate arg for addi */
+ li t0, SR_MPP
+ and s0, s0, t0
+#else
andi s0, s0, SR_SPP
+#endif
bnez s0, resume_kernel
resume_userspace:
@@ -260,14 +291,18 @@ resume_userspace:
REG_S s0, TASK_TI_KERNEL_SP(tp)
/*
- * Save TP into sscratch, so we can find the kernel data structures
- * again.
+ * Save TP into the scratch register , so we can find the kernel data
+ * structures again.
*/
- csrw CSR_SSCRATCH, tp
+ csrw CSR_SCRATCH, tp
restore_all:
RESTORE_ALL
+#ifdef CONFIG_RISCV_M_MODE
+ mret
+#else
sret
+#endif
#if IS_ENABLED(CONFIG_PREEMPT)
resume_kernel:
@@ -287,7 +322,7 @@ work_pending:
bnez s1, work_resched
work_notifysig:
/* Handle pending signals and notify-resume requests */
- csrs CSR_SSTATUS, SR_SIE /* Enable interrupts for do_notify_resume() */
+ csrs CSR_STATUS, SR_IE /* Enable interrupts for do_notify_resume() */
move a0, sp /* pt_regs */
move a1, s0 /* current_thread_info->flags */
tail do_notify_resume
@@ -386,6 +421,10 @@ ENTRY(__switch_to)
ret
ENDPROC(__switch_to)
+#ifndef CONFIG_MMU
+#define do_page_fault do_trap_unknown
+#endif
+
.section ".rodata"
/* Exception vector table */
ENTRY(excp_vect_table)
@@ -407,3 +446,10 @@ ENTRY(excp_vect_table)
RISCV_PTR do_page_fault /* store page fault */
excp_vect_table_end:
END(excp_vect_table)
+
+#ifndef CONFIG_MMU
+ENTRY(__user_rt_sigreturn)
+ li a7, __NR_rt_sigreturn
+ scall
+END(__user_rt_sigreturn)
+#endif
diff --git a/arch/riscv/kernel/fpu.S b/arch/riscv/kernel/fpu.S
index 631d31540660..dd2205473de7 100644
--- a/arch/riscv/kernel/fpu.S
+++ b/arch/riscv/kernel/fpu.S
@@ -23,7 +23,7 @@ ENTRY(__fstate_save)
li a2, TASK_THREAD_F0
add a0, a0, a2
li t1, SR_FS
- csrs CSR_SSTATUS, t1
+ csrs CSR_STATUS, t1
frcsr t0
fsd f0, TASK_THREAD_F0_F0(a0)
fsd f1, TASK_THREAD_F1_F0(a0)
@@ -58,7 +58,7 @@ ENTRY(__fstate_save)
fsd f30, TASK_THREAD_F30_F0(a0)
fsd f31, TASK_THREAD_F31_F0(a0)
sw t0, TASK_THREAD_FCSR_F0(a0)
- csrc CSR_SSTATUS, t1
+ csrc CSR_STATUS, t1
ret
ENDPROC(__fstate_save)
@@ -67,7 +67,7 @@ ENTRY(__fstate_restore)
add a0, a0, a2
li t1, SR_FS
lw t0, TASK_THREAD_FCSR_F0(a0)
- csrs CSR_SSTATUS, t1
+ csrs CSR_STATUS, t1
fld f0, TASK_THREAD_F0_F0(a0)
fld f1, TASK_THREAD_F1_F0(a0)
fld f2, TASK_THREAD_F2_F0(a0)
@@ -101,6 +101,6 @@ ENTRY(__fstate_restore)
fld f30, TASK_THREAD_F30_F0(a0)
fld f31, TASK_THREAD_F31_F0(a0)
fscsr t0
- csrc CSR_SSTATUS, t1
+ csrc CSR_STATUS, t1
ret
ENDPROC(__fstate_restore)
diff --git a/arch/riscv/kernel/head.S b/arch/riscv/kernel/head.S
index 72f89b7590dd..84a6f0a4b120 100644
--- a/arch/riscv/kernel/head.S
+++ b/arch/riscv/kernel/head.S
@@ -11,6 +11,7 @@
#include <asm/thread_info.h>
#include <asm/page.h>
#include <asm/csr.h>
+#include <asm/hwcap.h>
#include <asm/image.h>
__INIT
@@ -47,8 +48,22 @@ ENTRY(_start)
.global _start_kernel
_start_kernel:
/* Mask all interrupts */
- csrw CSR_SIE, zero
- csrw CSR_SIP, zero
+ csrw CSR_IE, zero
+ csrw CSR_IP, zero
+
+#ifdef CONFIG_RISCV_M_MODE
+ /* flush the instruction cache */
+ fence.i
+
+ /* Reset all registers except ra, a0, a1 */
+ call reset_regs
+
+ /*
+ * The hartid in a0 is expected later on, and we have no firmware
+ * to hand it to us.
+ */
+ csrr a0, CSR_MHARTID
+#endif /* CONFIG_RISCV_M_MODE */
/* Load the global pointer */
.option push
@@ -61,7 +76,7 @@ _start_kernel:
* floating point in kernel space
*/
li t0, SR_FS
- csrc CSR_SSTATUS, t0
+ csrc CSR_STATUS, t0
#ifdef CONFIG_SMP
li t0, CONFIG_NR_CPUS
@@ -94,8 +109,10 @@ clear_bss_done:
la sp, init_thread_union + THREAD_SIZE
mv a0, s1
call setup_vm
+#ifdef CONFIG_MMU
la a0, early_pg_dir
call relocate
+#endif /* CONFIG_MMU */
/* Restore C environment */
la tp, init_task
@@ -106,6 +123,7 @@ clear_bss_done:
call parse_dtb
tail start_kernel
+#ifdef CONFIG_MMU
relocate:
/* Relocate return address */
li a1, PAGE_OFFSET
@@ -116,7 +134,7 @@ relocate:
/* Point stvec to virtual address of intruction after satp write */
la a2, 1f
add a2, a2, a1
- csrw CSR_STVEC, a2
+ csrw CSR_TVEC, a2
/* Compute satp for kernel page tables, but don't load it yet */
srl a2, a0, PAGE_SHIFT
@@ -138,7 +156,7 @@ relocate:
1:
/* Set trap vector to spin forever to help debug */
la a0, .Lsecondary_park
- csrw CSR_STVEC, a0
+ csrw CSR_TVEC, a0
/* Reload the global pointer */
.option push
@@ -156,12 +174,13 @@ relocate:
sfence.vma
ret
+#endif /* CONFIG_MMU */
.Lsecondary_start:
#ifdef CONFIG_SMP
/* Set trap vector to spin forever to help debug */
la a3, .Lsecondary_park
- csrw CSR_STVEC, a3
+ csrw CSR_TVEC, a3
slli a3, a0, LGREG
la a1, __cpu_up_stack_pointer
@@ -181,9 +200,11 @@ relocate:
beqz tp, .Lwait_for_cpu_up
fence
+#ifdef CONFIG_MMU
/* Enable virtual memory and relocate to virtual address */
la a0, swapper_pg_dir
call relocate
+#endif
tail smp_callin
#endif
@@ -195,6 +216,85 @@ relocate:
j .Lsecondary_park
END(_start)
+#ifdef CONFIG_RISCV_M_MODE
+ENTRY(reset_regs)
+ li sp, 0
+ li gp, 0
+ li tp, 0
+ li t0, 0
+ li t1, 0
+ li t2, 0
+ li s0, 0
+ li s1, 0
+ li a2, 0
+ li a3, 0
+ li a4, 0
+ li a5, 0
+ li a6, 0
+ li a7, 0
+ li s2, 0
+ li s3, 0
+ li s4, 0
+ li s5, 0
+ li s6, 0
+ li s7, 0
+ li s8, 0
+ li s9, 0
+ li s10, 0
+ li s11, 0
+ li t3, 0
+ li t4, 0
+ li t5, 0
+ li t6, 0
+ csrw sscratch, 0
+
+#ifdef CONFIG_FPU
+ csrr t0, CSR_MISA
+ andi t0, t0, (COMPAT_HWCAP_ISA_F | COMPAT_HWCAP_ISA_D)
+ bnez t0, .Lreset_regs_done
+
+ li t1, SR_FS
+ csrs CSR_STATUS, t1
+ fmv.s.x f0, zero
+ fmv.s.x f1, zero
+ fmv.s.x f2, zero
+ fmv.s.x f3, zero
+ fmv.s.x f4, zero
+ fmv.s.x f5, zero
+ fmv.s.x f6, zero
+ fmv.s.x f7, zero
+ fmv.s.x f8, zero
+ fmv.s.x f9, zero
+ fmv.s.x f10, zero
+ fmv.s.x f11, zero
+ fmv.s.x f12, zero
+ fmv.s.x f13, zero
+ fmv.s.x f14, zero
+ fmv.s.x f15, zero
+ fmv.s.x f16, zero
+ fmv.s.x f17, zero
+ fmv.s.x f18, zero
+ fmv.s.x f19, zero
+ fmv.s.x f20, zero
+ fmv.s.x f21, zero
+ fmv.s.x f22, zero
+ fmv.s.x f23, zero
+ fmv.s.x f24, zero
+ fmv.s.x f25, zero
+ fmv.s.x f26, zero
+ fmv.s.x f27, zero
+ fmv.s.x f28, zero
+ fmv.s.x f29, zero
+ fmv.s.x f30, zero
+ fmv.s.x f31, zero
+ csrw fcsr, 0
+ /* note that the caller must clear SR_FS */
+#endif /* CONFIG_FPU */
+.Lreset_regs_done:
+ ret
+END(reset_regs)
+#endif /* CONFIG_RISCV_M_MODE */
+
__PAGE_ALIGNED_BSS
/* Empty zero page */
.balign PAGE_SIZE
diff --git a/arch/riscv/kernel/irq.c b/arch/riscv/kernel/irq.c
index fffac6ddb0e0..3f07a91d5afb 100644
--- a/arch/riscv/kernel/irq.c
+++ b/arch/riscv/kernel/irq.c
@@ -11,13 +11,6 @@
#include <linux/seq_file.h>
#include <asm/smp.h>
-/*
- * Possible interrupt causes:
- */
-#define INTERRUPT_CAUSE_SOFTWARE IRQ_S_SOFT
-#define INTERRUPT_CAUSE_TIMER IRQ_S_TIMER
-#define INTERRUPT_CAUSE_EXTERNAL IRQ_S_EXT
-
int arch_show_interrupts(struct seq_file *p, int prec)
{
show_ipi_stats(p, prec);
@@ -29,12 +22,12 @@ asmlinkage __visible void __irq_entry do_IRQ(struct pt_regs *regs)
struct pt_regs *old_regs = set_irq_regs(regs);
irq_enter();
- switch (regs->scause & ~SCAUSE_IRQ_FLAG) {
- case INTERRUPT_CAUSE_TIMER:
+ switch (regs->cause & ~CAUSE_IRQ_FLAG) {
+ case IRQ_TIMER:
riscv_timer_interrupt();
break;
#ifdef CONFIG_SMP
- case INTERRUPT_CAUSE_SOFTWARE:
+ case IRQ_SOFT:
/*
* We only use software interrupts to pass IPIs, so if a non-SMP
* system gets one, then we don't know what to do.
@@ -42,11 +35,11 @@ asmlinkage __visible void __irq_entry do_IRQ(struct pt_regs *regs)
riscv_software_interrupt();
break;
#endif
- case INTERRUPT_CAUSE_EXTERNAL:
+ case IRQ_EXT:
handle_arch_irq(regs);
break;
default:
- pr_alert("unexpected interrupt cause 0x%lx", regs->scause);
+ pr_alert("unexpected interrupt cause 0x%lx", regs->cause);
BUG();
}
irq_exit();
diff --git a/arch/riscv/kernel/perf_callchain.c b/arch/riscv/kernel/perf_callchain.c
index 8d2804f05cf9..cf190197a22f 100644
--- a/arch/riscv/kernel/perf_callchain.c
+++ b/arch/riscv/kernel/perf_callchain.c
@@ -67,7 +67,7 @@ void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
return;
fp = regs->s0;
- perf_callchain_store(entry, regs->sepc);
+ perf_callchain_store(entry, regs->epc);
fp = user_backtrace(entry, fp, regs->ra);
while (fp && !(fp & 0x3) && entry->nr < entry->max_stack)
diff --git a/arch/riscv/kernel/process.c b/arch/riscv/kernel/process.c
index 85e3c39bb60b..95a3031e5c7c 100644
--- a/arch/riscv/kernel/process.c
+++ b/arch/riscv/kernel/process.c
@@ -35,8 +35,8 @@ void show_regs(struct pt_regs *regs)
{
show_regs_print_info(KERN_DEFAULT);
- pr_cont("sepc: " REG_FMT " ra : " REG_FMT " sp : " REG_FMT "\n",
- regs->sepc, regs->ra, regs->sp);
+ pr_cont("epc: " REG_FMT " ra : " REG_FMT " sp : " REG_FMT "\n",
+ regs->epc, regs->ra, regs->sp);
pr_cont(" gp : " REG_FMT " tp : " REG_FMT " t0 : " REG_FMT "\n",
regs->gp, regs->tp, regs->t0);
pr_cont(" t1 : " REG_FMT " t2 : " REG_FMT " s0 : " REG_FMT "\n",
@@ -58,23 +58,23 @@ void show_regs(struct pt_regs *regs)
pr_cont(" t5 : " REG_FMT " t6 : " REG_FMT "\n",
regs->t5, regs->t6);
- pr_cont("sstatus: " REG_FMT " sbadaddr: " REG_FMT " scause: " REG_FMT "\n",
- regs->sstatus, regs->sbadaddr, regs->scause);
+ pr_cont("status: " REG_FMT " badaddr: " REG_FMT " cause: " REG_FMT "\n",
+ regs->status, regs->badaddr, regs->cause);
}
void start_thread(struct pt_regs *regs, unsigned long pc,
unsigned long sp)
{
- regs->sstatus = SR_SPIE;
+ regs->status = SR_PIE;
if (has_fpu) {
- regs->sstatus |= SR_FS_INITIAL;
+ regs->status |= SR_FS_INITIAL;
/*
* Restore the initial value to the FP register
* before starting the user program.
*/
fstate_restore(current, regs);
}
- regs->sepc = pc;
+ regs->epc = pc;
regs->sp = sp;
set_fs(USER_DS);
}
@@ -110,7 +110,8 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
const register unsigned long gp __asm__ ("gp");
memset(childregs, 0, sizeof(struct pt_regs));
childregs->gp = gp;
- childregs->sstatus = SR_SPP | SR_SPIE; /* Supervisor, irqs on */
+ /* Supervisor/Machine, irqs on: */
+ childregs->status = SR_PP | SR_PIE;
p->thread.ra = (unsigned long)ret_from_kernel_thread;
p->thread.s[0] = usp; /* fn */
diff --git a/arch/riscv/kernel/ptrace.c b/arch/riscv/kernel/ptrace.c
index 1252113ef8b2..407464201b91 100644
--- a/arch/riscv/kernel/ptrace.c
+++ b/arch/riscv/kernel/ptrace.c
@@ -154,6 +154,16 @@ __visible void do_syscall_trace_enter(struct pt_regs *regs)
if (tracehook_report_syscall_entry(regs))
syscall_set_nr(current, regs, -1);
+ /*
+ * Do the secure computing after ptrace; failures should be fast.
+ * If this fails we might have return value in a0 from seccomp
+ * (via SECCOMP_RET_ERRNO/TRACE).
+ */
+ if (secure_computing() == -1) {
+ syscall_set_nr(current, regs, -1);
+ return;
+ }
+
#ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
trace_sys_enter(regs, syscall_get_nr(current, regs));
diff --git a/arch/riscv/kernel/reset.c b/arch/riscv/kernel/reset.c
index aa56bb135ec4..ee5878d968cc 100644
--- a/arch/riscv/kernel/reset.c
+++ b/arch/riscv/kernel/reset.c
@@ -5,12 +5,11 @@
#include <linux/reboot.h>
#include <linux/pm.h>
-#include <asm/sbi.h>
static void default_power_off(void)
{
- sbi_shutdown();
- while (1);
+ while (1)
+ wait_for_interrupt();
}
void (*pm_power_off)(void) = default_power_off;
diff --git a/arch/riscv/kernel/sbi.c b/arch/riscv/kernel/sbi.c
new file mode 100644
index 000000000000..f6c7c3e82d28
--- /dev/null
+++ b/arch/riscv/kernel/sbi.c
@@ -0,0 +1,17 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/init.h>
+#include <linux/pm.h>
+#include <asm/sbi.h>
+
+static void sbi_power_off(void)
+{
+ sbi_shutdown();
+}
+
+static int __init sbi_init(void)
+{
+ pm_power_off = sbi_power_off;
+ return 0;
+}
+early_initcall(sbi_init);
diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c
index 845ae0e12115..365ff8420bfe 100644
--- a/arch/riscv/kernel/setup.c
+++ b/arch/riscv/kernel/setup.c
@@ -17,6 +17,7 @@
#include <linux/sched/task.h>
#include <linux/swiotlb.h>
+#include <asm/clint.h>
#include <asm/setup.h>
#include <asm/sections.h>
#include <asm/pgtable.h>
@@ -67,6 +68,7 @@ void __init setup_arch(char **cmdline_p)
setup_bootmem();
paging_init();
unflatten_device_tree();
+ clint_init_boot_cpu();
#ifdef CONFIG_SWIOTLB
swiotlb_init(1);
diff --git a/arch/riscv/kernel/signal.c b/arch/riscv/kernel/signal.c
index d0f6f212f5df..17ba190e84a5 100644
--- a/arch/riscv/kernel/signal.c
+++ b/arch/riscv/kernel/signal.c
@@ -17,11 +17,16 @@
#include <asm/switch_to.h>
#include <asm/csr.h>
+extern u32 __user_rt_sigreturn[2];
+
#define DEBUG_SIG 0
struct rt_sigframe {
struct siginfo info;
struct ucontext uc;
+#ifndef CONFIG_MMU
+ u32 sigreturn_code[2];
+#endif
};
#ifdef CONFIG_FPU
@@ -124,7 +129,7 @@ badframe:
pr_info_ratelimited(
"%s[%d]: bad frame in %s: frame=%p pc=%p sp=%p\n",
task->comm, task_pid_nr(task), __func__,
- frame, (void *)regs->sepc, (void *)regs->sp);
+ frame, (void *)regs->epc, (void *)regs->sp);
}
force_sig(SIGSEGV);
return 0;
@@ -166,7 +171,6 @@ static inline void __user *get_sigframe(struct ksignal *ksig,
return (void __user *)sp;
}
-
static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
struct pt_regs *regs)
{
@@ -189,8 +193,19 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
return -EFAULT;
/* Set up to return from userspace. */
+#ifdef CONFIG_MMU
regs->ra = (unsigned long)VDSO_SYMBOL(
current->mm->context.vdso, rt_sigreturn);
+#else
+ /*
+ * For the nommu case we don't have a VDSO. Instead we push two
+ * instructions to call the rt_sigreturn syscall onto the user stack.
+ */
+ if (copy_to_user(&frame->sigreturn_code, __user_rt_sigreturn,
+ sizeof(frame->sigreturn_code)))
+ return -EFAULT;
+ regs->ra = (unsigned long)&frame->sigreturn_code;
+#endif /* CONFIG_MMU */
/*
* Set up registers for signal handler.
@@ -199,7 +214,7 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
* We always pass siginfo and mcontext, regardless of SA_SIGINFO,
* since some things rely on this (e.g. glibc's debug/segfault.c).
*/
- regs->sepc = (unsigned long)ksig->ka.sa.sa_handler;
+ regs->epc = (unsigned long)ksig->ka.sa.sa_handler;
regs->sp = (unsigned long)frame;
regs->a0 = ksig->sig; /* a0: signal number */
regs->a1 = (unsigned long)(&frame->info); /* a1: siginfo pointer */
@@ -208,7 +223,7 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
#if DEBUG_SIG
pr_info("SIG deliver (%s:%d): sig=%d pc=%p ra=%p sp=%p\n",
current->comm, task_pid_nr(current), ksig->sig,
- (void *)regs->sepc, (void *)regs->ra, frame);
+ (void *)regs->epc, (void *)regs->ra, frame);
#endif
return 0;
@@ -220,10 +235,9 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
int ret;
/* Are we from a system call? */
- if (regs->scause == EXC_SYSCALL) {
+ if (regs->cause == EXC_SYSCALL) {
/* Avoid additional syscall restarting via ret_from_exception */
- regs->scause = -1UL;
-
+ regs->cause = -1UL;
/* If so, check system call restarting.. */
switch (regs->a0) {
case -ERESTART_RESTARTBLOCK:
@@ -239,7 +253,7 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
/* fallthrough */
case -ERESTARTNOINTR:
regs->a0 = regs->orig_a0;
- regs->sepc -= 0x4;
+ regs->epc -= 0x4;
break;
}
}
@@ -261,9 +275,9 @@ static void do_signal(struct pt_regs *regs)
}
/* Did we come from a system call? */
- if (regs->scause == EXC_SYSCALL) {
+ if (regs->cause == EXC_SYSCALL) {
/* Avoid additional syscall restarting via ret_from_exception */
- regs->scause = -1UL;
+ regs->cause = -1UL;
/* Restart the system call - no handlers present */
switch (regs->a0) {
@@ -271,12 +285,12 @@ static void do_signal(struct pt_regs *regs)
case -ERESTARTSYS:
case -ERESTARTNOINTR:
regs->a0 = regs->orig_a0;
- regs->sepc -= 0x4;
+ regs->epc -= 0x4;
break;
case -ERESTART_RESTARTBLOCK:
regs->a0 = regs->orig_a0;
regs->a7 = __NR_restart_syscall;
- regs->sepc -= 0x4;
+ regs->epc -= 0x4;
break;
}
}
diff --git a/arch/riscv/kernel/smp.c b/arch/riscv/kernel/smp.c
index 5c9ec78422c2..eb878abcaaf8 100644
--- a/arch/riscv/kernel/smp.c
+++ b/arch/riscv/kernel/smp.c
@@ -16,6 +16,7 @@
#include <linux/seq_file.h>
#include <linux/delay.h>
+#include <asm/clint.h>
#include <asm/sbi.h>
#include <asm/tlbflush.h>
#include <asm/cacheflush.h>
@@ -92,7 +93,10 @@ static void send_ipi_mask(const struct cpumask *mask, enum ipi_message_type op)
smp_mb__after_atomic();
riscv_cpuid_to_hartid_mask(mask, &hartid_mask);
- sbi_send_ipi(cpumask_bits(&hartid_mask));
+ if (IS_ENABLED(CONFIG_RISCV_SBI))
+ sbi_send_ipi(cpumask_bits(&hartid_mask));
+ else
+ clint_send_ipi_mask(&hartid_mask);
}
static void send_ipi_single(int cpu, enum ipi_message_type op)
@@ -103,12 +107,18 @@ static void send_ipi_single(int cpu, enum ipi_message_type op)
set_bit(op, &ipi_data[cpu].bits);
smp_mb__after_atomic();
- sbi_send_ipi(cpumask_bits(cpumask_of(hartid)));
+ if (IS_ENABLED(CONFIG_RISCV_SBI))
+ sbi_send_ipi(cpumask_bits(cpumask_of(hartid)));
+ else
+ clint_send_ipi_single(hartid);
}
static inline void clear_ipi(void)
{
- csr_clear(CSR_SIP, SIE_SSIE);
+ if (IS_ENABLED(CONFIG_RISCV_SBI))
+ csr_clear(CSR_IP, IE_SIE);
+ else
+ clint_clear_ipi(cpuid_to_hartid_map(smp_processor_id()));
}
void riscv_software_interrupt(void)
diff --git a/arch/riscv/kernel/smpboot.c b/arch/riscv/kernel/smpboot.c
index 261f4087cc39..8bc01f0ca73b 100644
--- a/arch/riscv/kernel/smpboot.c
+++ b/arch/riscv/kernel/smpboot.c
@@ -24,6 +24,7 @@
#include <linux/of.h>
#include <linux/sched/task_stack.h>
#include <linux/sched/mm.h>
+#include <asm/clint.h>
#include <asm/irq.h>
#include <asm/mmu_context.h>
#include <asm/tlbflush.h>
@@ -137,6 +138,9 @@ asmlinkage __visible void __init smp_callin(void)
{
struct mm_struct *mm = &init_mm;
+ if (!IS_ENABLED(CONFIG_RISCV_SBI))
+ clint_clear_ipi(cpuid_to_hartid_map(smp_processor_id()));
+
/* All kernel threads share the same mm context. */
mmgrab(mm);
current->active_mm = mm;
diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c
index 473de3ae8bb7..f4cad5163bf2 100644
--- a/arch/riscv/kernel/traps.c
+++ b/arch/riscv/kernel/traps.c
@@ -41,7 +41,7 @@ void die(struct pt_regs *regs, const char *str)
print_modules();
show_regs(regs);
- ret = notify_die(DIE_OOPS, str, regs, 0, regs->scause, SIGSEGV);
+ ret = notify_die(DIE_OOPS, str, regs, 0, regs->cause, SIGSEGV);
bust_spinlocks(0);
add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
@@ -86,7 +86,7 @@ static void do_trap_error(struct pt_regs *regs, int signo, int code,
#define DO_ERROR_INFO(name, signo, code, str) \
asmlinkage __visible void name(struct pt_regs *regs) \
{ \
- do_trap_error(regs, signo, code, regs->sepc, "Oops - " str); \
+ do_trap_error(regs, signo, code, regs->epc, "Oops - " str); \
}
DO_ERROR_INFO(do_trap_unknown,
@@ -124,9 +124,9 @@ static inline unsigned long get_break_insn_length(unsigned long pc)
asmlinkage __visible void do_trap_break(struct pt_regs *regs)
{
if (user_mode(regs))
- force_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *)regs->sepc);
- else if (report_bug(regs->sepc, regs) == BUG_TRAP_TYPE_WARN)
- regs->sepc += get_break_insn_length(regs->sepc);
+ force_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *)regs->epc);
+ else if (report_bug(regs->epc, regs) == BUG_TRAP_TYPE_WARN)
+ regs->epc += get_break_insn_length(regs->epc);
else
die(regs, "Kernel BUG");
}
@@ -153,9 +153,9 @@ void __init trap_init(void)
* Set sup0 scratch register to 0, indicating to exception vector
* that we are presently executing in the kernel
*/
- csr_write(CSR_SSCRATCH, 0);
+ csr_write(CSR_SCRATCH, 0);
/* Set the exception vector address */
- csr_write(CSR_STVEC, &handle_exception);
+ csr_write(CSR_TVEC, &handle_exception);
/* Enable all interrupts */
- csr_write(CSR_SIE, -1);
+ csr_write(CSR_IE, -1);
}
diff --git a/arch/riscv/lib/Makefile b/arch/riscv/lib/Makefile
index 267feaa10f6a..47e7a8204460 100644
--- a/arch/riscv/lib/Makefile
+++ b/arch/riscv/lib/Makefile
@@ -1,7 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
-lib-y += delay.o
-lib-y += memcpy.o
-lib-y += memset.o
-lib-y += uaccess.o
-
-lib-$(CONFIG_64BIT) += tishift.o
+lib-y += delay.o
+lib-y += memcpy.o
+lib-y += memset.o
+lib-$(CONFIG_MMU) += uaccess.o
+lib-$(CONFIG_64BIT) += tishift.o
diff --git a/arch/riscv/lib/uaccess.S b/arch/riscv/lib/uaccess.S
index ed2696c0143d..fecd65657a6f 100644
--- a/arch/riscv/lib/uaccess.S
+++ b/arch/riscv/lib/uaccess.S
@@ -18,7 +18,7 @@ ENTRY(__asm_copy_from_user)
/* Enable access to user memory */
li t6, SR_SUM
- csrs CSR_SSTATUS, t6
+ csrs CSR_STATUS, t6
add a3, a1, a2
/* Use word-oriented copy only if low-order bits match */
@@ -47,7 +47,7 @@ ENTRY(__asm_copy_from_user)
3:
/* Disable access to user memory */
- csrc CSR_SSTATUS, t6
+ csrc CSR_STATUS, t6
li a0, 0
ret
4: /* Edge case: unalignment */
@@ -72,7 +72,7 @@ ENTRY(__clear_user)
/* Enable access to user memory */
li t6, SR_SUM
- csrs CSR_SSTATUS, t6
+ csrs CSR_STATUS, t6
add a3, a0, a1
addi t0, a0, SZREG-1
@@ -94,7 +94,7 @@ ENTRY(__clear_user)
3:
/* Disable access to user memory */
- csrc CSR_SSTATUS, t6
+ csrc CSR_STATUS, t6
li a0, 0
ret
4: /* Edge case: unalignment */
@@ -114,11 +114,11 @@ ENDPROC(__clear_user)
/* Fixup code for __copy_user(10) and __clear_user(11) */
10:
/* Disable access to user memory */
- csrs CSR_SSTATUS, t6
+ csrs CSR_STATUS, t6
mv a0, a2
ret
11:
- csrs CSR_SSTATUS, t6
+ csrs CSR_STATUS, t6
mv a0, a1
ret
.previous
diff --git a/arch/riscv/mm/Makefile b/arch/riscv/mm/Makefile
index 9d9a17335686..3c8b33258457 100644
--- a/arch/riscv/mm/Makefile
+++ b/arch/riscv/mm/Makefile
@@ -6,9 +6,8 @@ CFLAGS_REMOVE_init.o = -pg
endif
obj-y += init.o
-obj-y += fault.o
obj-y += extable.o
-obj-y += ioremap.o
+obj-$(CONFIG_MMU) += fault.o
obj-y += cacheflush.o
obj-y += context.o
obj-y += sifive_l2_cache.o
diff --git a/arch/riscv/mm/cacheflush.c b/arch/riscv/mm/cacheflush.c
index 3f15938dec89..8f1900686640 100644
--- a/arch/riscv/mm/cacheflush.c
+++ b/arch/riscv/mm/cacheflush.c
@@ -10,9 +10,17 @@
#include <asm/sbi.h>
+static void ipi_remote_fence_i(void *info)
+{
+ return local_flush_icache_all();
+}
+
void flush_icache_all(void)
{
- sbi_remote_fence_i(NULL);
+ if (IS_ENABLED(CONFIG_RISCV_SBI))
+ sbi_remote_fence_i(NULL);
+ else
+ on_each_cpu(ipi_remote_fence_i, NULL, 1);
}
/*
@@ -28,7 +36,7 @@ void flush_icache_all(void)
void flush_icache_mm(struct mm_struct *mm, bool local)
{
unsigned int cpu;
- cpumask_t others, hmask, *mask;
+ cpumask_t others, *mask;
preempt_disable();
@@ -46,10 +54,7 @@ void flush_icache_mm(struct mm_struct *mm, bool local)
*/
cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu));
local |= cpumask_empty(&others);
- if (mm != current->active_mm || !local) {
- riscv_cpuid_to_hartid_mask(&others, &hmask);
- sbi_remote_fence_i(hmask.bits);
- } else {
+ if (mm == current->active_mm && local) {
/*
* It's assumed that at least one strongly ordered operation is
* performed on this hart between setting a hart's cpumask bit
@@ -59,6 +64,13 @@ void flush_icache_mm(struct mm_struct *mm, bool local)
* with flush_icache_deferred().
*/
smp_mb();
+ } else if (IS_ENABLED(CONFIG_RISCV_SBI)) {
+ cpumask_t hartid_mask;
+
+ riscv_cpuid_to_hartid_mask(&others, &hartid_mask);
+ sbi_remote_fence_i(cpumask_bits(&hartid_mask));
+ } else {
+ on_each_cpu_mask(&others, ipi_remote_fence_i, NULL, 1);
}
preempt_enable();
@@ -66,6 +78,7 @@ void flush_icache_mm(struct mm_struct *mm, bool local)
#endif /* CONFIG_SMP */
+#ifdef CONFIG_MMU
void flush_icache_pte(pte_t pte)
{
struct page *page = pte_page(pte);
@@ -73,3 +86,4 @@ void flush_icache_pte(pte_t pte)
if (!test_and_set_bit(PG_dcache_clean, &page->flags))
flush_icache_all();
}
+#endif /* CONFIG_MMU */
diff --git a/arch/riscv/mm/context.c b/arch/riscv/mm/context.c
index ca66d44156b6..613ec81a8979 100644
--- a/arch/riscv/mm/context.c
+++ b/arch/riscv/mm/context.c
@@ -58,8 +58,10 @@ void switch_mm(struct mm_struct *prev, struct mm_struct *next,
cpumask_clear_cpu(cpu, mm_cpumask(prev));
cpumask_set_cpu(cpu, mm_cpumask(next));
+#ifdef CONFIG_MMU
csr_write(CSR_SATP, virt_to_pfn(next->pgd) | SATP_MODE);
local_flush_tlb_all();
+#endif
flush_icache_deferred(next);
}
diff --git a/arch/riscv/mm/extable.c b/arch/riscv/mm/extable.c
index 7aed9178d365..2fc729422151 100644
--- a/arch/riscv/mm/extable.c
+++ b/arch/riscv/mm/extable.c
@@ -15,9 +15,9 @@ int fixup_exception(struct pt_regs *regs)
{
const struct exception_table_entry *fixup;
- fixup = search_exception_tables(regs->sepc);
+ fixup = search_exception_tables(regs->epc);
if (fixup) {
- regs->sepc = fixup->fixup;
+ regs->epc = fixup->fixup;
return 1;
}
return 0;
diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c
index 247b8c859c44..cf7248e07f43 100644
--- a/arch/riscv/mm/fault.c
+++ b/arch/riscv/mm/fault.c
@@ -34,8 +34,8 @@ asmlinkage void do_page_fault(struct pt_regs *regs)
int code = SEGV_MAPERR;
vm_fault_t fault;
- cause = regs->scause;
- addr = regs->sbadaddr;
+ cause = regs->cause;
+ addr = regs->badaddr;
tsk = current;
mm = tsk->mm;
@@ -53,7 +53,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs)
goto vmalloc_fault;
/* Enable interrupts if they were enabled in the parent context. */
- if (likely(regs->sstatus & SR_SPIE))
+ if (likely(regs->status & SR_PIE))
local_irq_enable();
/*
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index 573463d1c799..b2fe9d1be833 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -26,6 +26,7 @@ unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
EXPORT_SYMBOL(empty_zero_page);
extern char _start[];
+void *dtb_early_va;
static void __init zone_sizes_init(void)
{
@@ -40,7 +41,7 @@ static void __init zone_sizes_init(void)
free_area_init_nodes(max_zone_pfns);
}
-void setup_zero_page(void)
+static void setup_zero_page(void)
{
memset((void *)empty_zero_page, 0, PAGE_SIZE);
}
@@ -142,12 +143,12 @@ void __init setup_bootmem(void)
}
}
+#ifdef CONFIG_MMU
unsigned long va_pa_offset;
EXPORT_SYMBOL(va_pa_offset);
unsigned long pfn_base;
EXPORT_SYMBOL(pfn_base);
-void *dtb_early_va;
pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
pgd_t trampoline_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
pte_t fixmap_pte[PTRS_PER_PTE] __page_aligned_bss;
@@ -273,7 +274,6 @@ static void __init create_pmd_mapping(pmd_t *pmdp,
#define get_pgd_next_virt(__pa) get_pmd_virt(__pa)
#define create_pgd_next_mapping(__nextp, __va, __pa, __sz, __prot) \
create_pmd_mapping(__nextp, __va, __pa, __sz, __prot)
-#define PTE_PARENT_SIZE PMD_SIZE
#define fixmap_pgd_next fixmap_pmd
#else
#define pgd_next_t pte_t
@@ -281,7 +281,6 @@ static void __init create_pmd_mapping(pmd_t *pmdp,
#define get_pgd_next_virt(__pa) get_pte_virt(__pa)
#define create_pgd_next_mapping(__nextp, __va, __pa, __sz, __prot) \
create_pte_mapping(__nextp, __va, __pa, __sz, __prot)
-#define PTE_PARENT_SIZE PGDIR_SIZE
#define fixmap_pgd_next fixmap_pte
#endif
@@ -314,14 +313,11 @@ static void __init create_pgd_mapping(pgd_t *pgdp,
static uintptr_t __init best_map_size(phys_addr_t base, phys_addr_t size)
{
- uintptr_t map_size = PAGE_SIZE;
-
- /* Upgrade to PMD/PGDIR mappings whenever possible */
- if (!(base & (PTE_PARENT_SIZE - 1)) &&
- !(size & (PTE_PARENT_SIZE - 1)))
- map_size = PTE_PARENT_SIZE;
+ /* Upgrade to PMD_SIZE mappings whenever possible */
+ if ((base & (PMD_SIZE - 1)) || (size & (PMD_SIZE - 1)))
+ return PAGE_SIZE;
- return map_size;
+ return PMD_SIZE;
}
/*
@@ -449,6 +445,16 @@ static void __init setup_vm_final(void)
csr_write(CSR_SATP, PFN_DOWN(__pa(swapper_pg_dir)) | SATP_MODE);
local_flush_tlb_all();
}
+#else
+asmlinkage void __init setup_vm(uintptr_t dtb_pa)
+{
+ dtb_early_va = (void *)dtb_pa;
+}
+
+static inline void setup_vm_final(void)
+{
+}
+#endif /* CONFIG_MMU */
void __init paging_init(void)
{
diff --git a/arch/riscv/mm/ioremap.c b/arch/riscv/mm/ioremap.c
deleted file mode 100644
index ac621ddb45c0..000000000000
--- a/arch/riscv/mm/ioremap.c
+++ /dev/null
@@ -1,84 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * (C) Copyright 1995 1996 Linus Torvalds
- * (C) Copyright 2012 Regents of the University of California
- */
-
-#include <linux/export.h>
-#include <linux/mm.h>
-#include <linux/vmalloc.h>
-#include <linux/io.h>
-
-#include <asm/pgtable.h>
-
-/*
- * Remap an arbitrary physical address space into the kernel virtual
- * address space. Needed when the kernel wants to access high addresses
- * directly.
- *
- * NOTE! We need to allow non-page-aligned mappings too: we will obviously
- * have to convert them into an offset in a page-aligned mapping, but the
- * caller shouldn't need to know that small detail.
- */
-static void __iomem *__ioremap_caller(phys_addr_t addr, size_t size,
- pgprot_t prot, void *caller)
-{
- phys_addr_t last_addr;
- unsigned long offset, vaddr;
- struct vm_struct *area;
-
- /* Disallow wrap-around or zero size */
- last_addr = addr + size - 1;
- if (!size || last_addr < addr)
- return NULL;
-
- /* Page-align mappings */
- offset = addr & (~PAGE_MASK);
- addr -= offset;
- size = PAGE_ALIGN(size + offset);
-
- area = get_vm_area_caller(size, VM_IOREMAP, caller);
- if (!area)
- return NULL;
- vaddr = (unsigned long)area->addr;
-
- if (ioremap_page_range(vaddr, vaddr + size, addr, prot)) {
- free_vm_area(area);
- return NULL;
- }
-
- return (void __iomem *)(vaddr + offset);
-}
-
-/*
- * ioremap - map bus memory into CPU space
- * @offset: bus address of the memory
- * @size: size of the resource to map
- *
- * ioremap performs a platform specific sequence of operations to
- * make bus memory CPU accessible via the readb/readw/readl/writeb/
- * writew/writel functions and the other mmio helpers. The returned
- * address is not guaranteed to be usable directly as a virtual
- * address.
- *
- * Must be freed with iounmap.
- */
-void __iomem *ioremap(phys_addr_t offset, unsigned long size)
-{
- return __ioremap_caller(offset, size, PAGE_KERNEL,
- __builtin_return_address(0));
-}
-EXPORT_SYMBOL(ioremap);
-
-
-/**
- * iounmap - Free a IO remapping
- * @addr: virtual address from ioremap_*
- *
- * Caller must ensure there is only one unmapping for the same pointer.
- */
-void iounmap(volatile void __iomem *addr)
-{
- vunmap((void *)((unsigned long)addr & PAGE_MASK));
-}
-EXPORT_SYMBOL(iounmap);
diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c
index 24cd33d2c48f..720b443c4528 100644
--- a/arch/riscv/mm/tlbflush.c
+++ b/arch/riscv/mm/tlbflush.c
@@ -2,6 +2,7 @@
#include <linux/mm.h>
#include <linux/smp.h>
+#include <linux/sched.h>
#include <asm/sbi.h>
void flush_tlb_all(void)
@@ -9,13 +10,33 @@ void flush_tlb_all(void)
sbi_remote_sfence_vma(NULL, 0, -1);
}
+/*
+ * This function must not be called with cmask being null.
+ * Kernel may panic if cmask is NULL.
+ */
static void __sbi_tlb_flush_range(struct cpumask *cmask, unsigned long start,
unsigned long size)
{
struct cpumask hmask;
+ unsigned int cpuid;
- riscv_cpuid_to_hartid_mask(cmask, &hmask);
- sbi_remote_sfence_vma(hmask.bits, start, size);
+ if (cpumask_empty(cmask))
+ return;
+
+ cpuid = get_cpu();
+
+ if (cpumask_any_but(cmask, cpuid) >= nr_cpu_ids) {
+ /* local cpu is the only cpu present in cpumask */
+ if (size <= PAGE_SIZE)
+ local_flush_tlb_page(start);
+ else
+ local_flush_tlb_all();
+ } else {
+ riscv_cpuid_to_hartid_mask(cmask, &hmask);
+ sbi_remote_sfence_vma(cpumask_bits(&hmask), start, size);
+ }
+
+ put_cpu();
}
void flush_tlb_mm(struct mm_struct *mm)
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index f0df9e48e651..d4051e88e625 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -170,6 +170,7 @@ config S390
select HAVE_PERF_EVENTS
select HAVE_RCU_TABLE_FREE
select HAVE_REGS_AND_STACK_ACCESS_API
+ select HAVE_RELIABLE_STACKTRACE
select HAVE_RSEQ
select HAVE_SYSCALL_TRACEPOINTS
select HAVE_VIRT_CPU_ACCOUNTING
@@ -426,9 +427,6 @@ config COMPAT
(and some other stuff like libraries and such) is needed for
executing 31 bit applications. It is safe to say "Y".
-config COMPAT_VDSO
- def_bool COMPAT && !CC_IS_CLANG
-
config SYSVIPC_COMPAT
def_bool y if COMPAT && SYSVIPC
@@ -1018,3 +1016,17 @@ config S390_GUEST
the KVM hypervisor.
endmenu
+
+menu "Selftests"
+
+config S390_UNWIND_SELFTEST
+ def_tristate n
+ prompt "Test unwind functions"
+ help
+ This option enables s390 specific stack unwinder testing kernel
+ module. This option is not useful for distributions or general
+ kernels, but only for kernel developers working on architecture code.
+
+ Say N if you are unsure.
+
+endmenu
diff --git a/arch/s390/Makefile b/arch/s390/Makefile
index 478b645b20dd..ba8556bb0fb1 100644
--- a/arch/s390/Makefile
+++ b/arch/s390/Makefile
@@ -157,7 +157,6 @@ zfcpdump:
vdso_install:
$(Q)$(MAKE) $(build)=arch/$(ARCH)/kernel/vdso64 $@
- $(Q)$(MAKE) $(build)=arch/$(ARCH)/kernel/vdso32 $@
archclean:
$(Q)$(MAKE) $(clean)=$(boot)
diff --git a/arch/s390/boot/startup.c b/arch/s390/boot/startup.c
index fbd341ea03b8..3b3a11f95269 100644
--- a/arch/s390/boot/startup.c
+++ b/arch/s390/boot/startup.c
@@ -170,6 +170,11 @@ void startup_kernel(void)
handle_relocs(__kaslr_offset);
if (__kaslr_offset) {
+ /*
+ * Save KASLR offset for early dumps, before vmcore_info is set.
+ * Mark as uneven to distinguish from real vmcore_info pointer.
+ */
+ S390_lowcore.vmcore_info = __kaslr_offset | 0x1UL;
/* Clear non-relocated kernel */
if (IS_ENABLED(CONFIG_KERNEL_UNCOMPRESSED))
memset(img, 0, vmlinux.image_size);
diff --git a/arch/s390/include/asm/cpu_mf.h b/arch/s390/include/asm/cpu_mf.h
index 819803a97c2b..0d90cbeb89b4 100644
--- a/arch/s390/include/asm/cpu_mf.h
+++ b/arch/s390/include/asm/cpu_mf.h
@@ -313,7 +313,7 @@ static inline unsigned long *trailer_entry_ptr(unsigned long v)
return (unsigned long *) ret;
}
-/* Return if the entry in the sample data block table (sdbt)
+/* Return true if the entry in the sample data block table (sdbt)
* is a link to the next sdbt */
static inline int is_link_entry(unsigned long *s)
{
diff --git a/arch/s390/include/asm/io.h b/arch/s390/include/asm/io.h
index ca421614722f..5a16f500515a 100644
--- a/arch/s390/include/asm/io.h
+++ b/arch/s390/include/asm/io.h
@@ -26,10 +26,6 @@ void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr);
#define IO_SPACE_LIMIT 0
-#define ioremap_nocache(addr, size) ioremap(addr, size)
-#define ioremap_wc ioremap_nocache
-#define ioremap_wt ioremap_nocache
-
void __iomem *ioremap(unsigned long offset, unsigned long size);
void iounmap(volatile void __iomem *addr);
diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h
index a2399eff84ca..3a06c264ea53 100644
--- a/arch/s390/include/asm/pci.h
+++ b/arch/s390/include/asm/pci.h
@@ -2,9 +2,6 @@
#ifndef __ASM_S390_PCI_H
#define __ASM_S390_PCI_H
-/* must be set before including pci_clp.h */
-#define PCI_BAR_COUNT 6
-
#include <linux/pci.h>
#include <linux/mutex.h>
#include <linux/iommu.h>
@@ -138,7 +135,7 @@ struct zpci_dev {
char res_name[16];
bool mio_capable;
- struct zpci_bar_struct bars[PCI_BAR_COUNT];
+ struct zpci_bar_struct bars[PCI_STD_NUM_BARS];
u64 start_dma; /* Start of available DMA addresses */
u64 end_dma; /* End of available DMA addresses */
diff --git a/arch/s390/include/asm/pci_clp.h b/arch/s390/include/asm/pci_clp.h
index 50359172cc48..bd2cb4ea7d93 100644
--- a/arch/s390/include/asm/pci_clp.h
+++ b/arch/s390/include/asm/pci_clp.h
@@ -77,7 +77,7 @@ struct mio_info {
struct {
u64 wb;
u64 wt;
- } addr[PCI_BAR_COUNT];
+ } addr[PCI_STD_NUM_BARS];
u32 reserved[6];
} __packed;
@@ -98,9 +98,9 @@ struct clp_rsp_query_pci {
u16 util_str_avail : 1; /* utility string available? */
u16 pfgid : 8; /* pci function group id */
u32 fid; /* pci function id */
- u8 bar_size[PCI_BAR_COUNT];
+ u8 bar_size[PCI_STD_NUM_BARS];
u16 pchid;
- __le32 bar[PCI_BAR_COUNT];
+ __le32 bar[PCI_STD_NUM_BARS];
u8 pfip[CLP_PFIP_NR_SEGMENTS]; /* pci function internal path */
u32 : 16;
u8 fmb_len;
diff --git a/arch/s390/include/asm/perf_event.h b/arch/s390/include/asm/perf_event.h
index 4652ffffe0b2..b9da71632827 100644
--- a/arch/s390/include/asm/perf_event.h
+++ b/arch/s390/include/asm/perf_event.h
@@ -12,6 +12,7 @@
#include <linux/perf_event.h>
#include <linux/device.h>
+#include <asm/stacktrace.h>
/* Per-CPU flags for PMU states */
#define PMU_F_RESERVED 0x1000
@@ -73,4 +74,10 @@ struct perf_sf_sde_regs {
#define SDB_FULL_BLOCKS(hwc) (SAMPL_FLAGS(hwc) & PERF_CPUM_SF_FULL_BLOCKS)
#define SAMPLE_FREQ_MODE(hwc) (SAMPL_FLAGS(hwc) & PERF_CPUM_SF_FREQ_MODE)
+#define perf_arch_fetch_caller_regs(regs, __ip) do { \
+ (regs)->psw.addr = (__ip); \
+ (regs)->gprs[15] = (unsigned long)__builtin_frame_address(0) - \
+ offsetof(struct stack_frame, back_chain); \
+} while (0)
+
#endif /* _ASM_S390_PERF_EVENT_H */
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index 881fc37c11c6..361ef5eda468 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -310,7 +310,7 @@ void enabled_wait(void);
/*
* Function to drop a processor into disabled wait state
*/
-static inline void __noreturn disabled_wait(void)
+static __always_inline void __noreturn disabled_wait(void)
{
psw_t psw;
diff --git a/arch/s390/include/asm/stacktrace.h b/arch/s390/include/asm/stacktrace.h
index fee40212af11..ee056f4a4fa3 100644
--- a/arch/s390/include/asm/stacktrace.h
+++ b/arch/s390/include/asm/stacktrace.h
@@ -33,12 +33,12 @@ static inline bool on_stack(struct stack_info *info,
return addr >= info->begin && addr + len <= info->end;
}
-static inline unsigned long get_stack_pointer(struct task_struct *task,
- struct pt_regs *regs)
+static __always_inline unsigned long get_stack_pointer(struct task_struct *task,
+ struct pt_regs *regs)
{
if (regs)
return (unsigned long) kernel_stack_pointer(regs);
- if (!task || task == current)
+ if (task == current)
return current_stack_pointer();
return (unsigned long) task->thread.ksp;
}
@@ -62,6 +62,17 @@ struct stack_frame {
};
#endif
+/*
+ * Unlike current_stack_pointer() which simply returns current value of %r15
+ * current_frame_address() returns function stack frame address, which matches
+ * %r15 upon function invocation. It may differ from %r15 later if function
+ * allocates stack for local variables or new stack frame to call other
+ * functions.
+ */
+#define current_frame_address() \
+ ((unsigned long)__builtin_frame_address(0) - \
+ offsetof(struct stack_frame, back_chain))
+
#define CALL_ARGS_0() \
register unsigned long r2 asm("2")
#define CALL_ARGS_1(arg1) \
@@ -95,20 +106,33 @@ struct stack_frame {
#define CALL_ON_STACK(fn, stack, nr, args...) \
({ \
+ unsigned long frame = current_frame_address(); \
CALL_ARGS_##nr(args); \
unsigned long prev; \
\
asm volatile( \
" la %[_prev],0(15)\n" \
- " la 15,0(%[_stack])\n" \
- " stg %[_prev],%[_bc](15)\n" \
+ " lg 15,%[_stack]\n" \
+ " stg %[_frame],%[_bc](15)\n" \
" brasl 14,%[_fn]\n" \
" la 15,0(%[_prev])\n" \
: [_prev] "=&a" (prev), CALL_FMT_##nr \
- [_stack] "a" (stack), \
+ [_stack] "R" (stack), \
[_bc] "i" (offsetof(struct stack_frame, back_chain)), \
+ [_frame] "d" (frame), \
[_fn] "X" (fn) : CALL_CLOBBER_##nr); \
r2; \
})
+#define CALL_ON_STACK_NORETURN(fn, stack) \
+({ \
+ asm volatile( \
+ " la 15,0(%[_stack])\n" \
+ " xc %[_bc](8,15),%[_bc](15)\n" \
+ " brasl 14,%[_fn]\n" \
+ ::[_bc] "i" (offsetof(struct stack_frame, back_chain)), \
+ [_stack] "a" (stack), [_fn] "X" (fn)); \
+ BUG(); \
+})
+
#endif /* _ASM_S390_STACKTRACE_H */
diff --git a/arch/s390/include/asm/unwind.h b/arch/s390/include/asm/unwind.h
index eaaefeceef6f..de9006b0cfeb 100644
--- a/arch/s390/include/asm/unwind.h
+++ b/arch/s390/include/asm/unwind.h
@@ -35,7 +35,6 @@ struct unwind_state {
struct task_struct *task;
struct pt_regs *regs;
unsigned long sp, ip;
- bool reuse_sp;
int graph_idx;
bool reliable;
bool error;
@@ -59,10 +58,11 @@ static inline bool unwind_error(struct unwind_state *state)
static inline void unwind_start(struct unwind_state *state,
struct task_struct *task,
struct pt_regs *regs,
- unsigned long sp)
+ unsigned long first_frame)
{
- sp = sp ? : get_stack_pointer(task, regs);
- __unwind_start(state, task, regs, sp);
+ task = task ?: current;
+ first_frame = first_frame ?: get_stack_pointer(task, regs);
+ __unwind_start(state, task, regs, first_frame);
}
static inline struct pt_regs *unwind_get_entry_regs(struct unwind_state *state)
diff --git a/arch/s390/include/asm/vdso.h b/arch/s390/include/asm/vdso.h
index 169d7604eb80..3bcfdeb01395 100644
--- a/arch/s390/include/asm/vdso.h
+++ b/arch/s390/include/asm/vdso.h
@@ -41,8 +41,17 @@ struct vdso_data {
struct vdso_per_cpu_data {
__u64 ectg_timer_base;
__u64 ectg_user_time;
- __u32 cpu_nr;
- __u32 node_id;
+ /*
+ * Note: node_id and cpu_nr must be at adjacent memory locations.
+ * VDSO userspace must read both values with a single instruction.
+ */
+ union {
+ __u64 getcpu_val;
+ struct {
+ __u32 node_id;
+ __u32 cpu_nr;
+ };
+ };
};
extern struct vdso_data *vdso_data;
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index 7edbbcd8228a..2b1203cf7be6 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -81,4 +81,3 @@ obj-$(CONFIG_TRACEPOINTS) += trace.o
# vdso
obj-y += vdso64/
-obj-$(CONFIG_COMPAT_VDSO) += vdso32/
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index 41ac4ad21311..ce33406cfe83 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -78,8 +78,7 @@ int main(void)
OFFSET(__VDSO_TS_END, vdso_data, ts_end);
OFFSET(__VDSO_ECTG_BASE, vdso_per_cpu_data, ectg_timer_base);
OFFSET(__VDSO_ECTG_USER, vdso_per_cpu_data, ectg_user_time);
- OFFSET(__VDSO_CPU_NR, vdso_per_cpu_data, cpu_nr);
- OFFSET(__VDSO_NODE_ID, vdso_per_cpu_data, node_id);
+ OFFSET(__VDSO_GETCPU_VAL, vdso_per_cpu_data, getcpu_val);
BLANK();
/* constants used by the vdso */
DEFINE(__CLOCK_REALTIME, CLOCK_REALTIME);
diff --git a/arch/s390/kernel/dumpstack.c b/arch/s390/kernel/dumpstack.c
index 34bdc60c0b11..d306fe04489a 100644
--- a/arch/s390/kernel/dumpstack.c
+++ b/arch/s390/kernel/dumpstack.c
@@ -38,6 +38,7 @@ const char *stack_type_name(enum stack_type type)
return "unknown";
}
}
+EXPORT_SYMBOL_GPL(stack_type_name);
static inline bool in_stack(unsigned long sp, struct stack_info *info,
enum stack_type type, unsigned long low,
@@ -93,7 +94,9 @@ int get_stack_info(unsigned long sp, struct task_struct *task,
if (!sp)
goto unknown;
- task = task ? : current;
+ /* Sanity check: ABI requires SP to be aligned 8 bytes. */
+ if (sp & 0x7)
+ goto unknown;
/* Check per-task stack */
if (in_task_stack(sp, task, info))
@@ -128,8 +131,6 @@ void show_stack(struct task_struct *task, unsigned long *stack)
struct unwind_state state;
printk("Call Trace:\n");
- if (!task)
- task = current;
unwind_for_each_frame(&state, task, NULL, (unsigned long) stack)
printk(state.reliable ? " [<%016lx>] %pSR \n" :
"([<%016lx>] %pSR)\n",
diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S
index b9e585f528a6..8b88dbbda7df 100644
--- a/arch/s390/kernel/head64.S
+++ b/arch/s390/kernel/head64.S
@@ -31,7 +31,7 @@ ENTRY(startup_continue)
#
larl %r14,init_task
stg %r14,__LC_CURRENT
- larl %r15,init_thread_union+THREAD_SIZE-STACK_FRAME_OVERHEAD
+ larl %r15,init_thread_union+THREAD_SIZE-STACK_FRAME_OVERHEAD-__PT_SIZE
#ifdef CONFIG_KASAN
brasl %r14,kasan_early_init
#endif
diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c
index 444a19125a81..cb8b1cc285c9 100644
--- a/arch/s390/kernel/machine_kexec.c
+++ b/arch/s390/kernel/machine_kexec.c
@@ -164,7 +164,9 @@ static bool kdump_csum_valid(struct kimage *image)
#ifdef CONFIG_CRASH_DUMP
int rc;
+ preempt_disable();
rc = CALL_ON_STACK(do_start_kdump, S390_lowcore.nodat_stack, 1, image);
+ preempt_enable();
return rc == 0;
#else
return false;
@@ -254,10 +256,10 @@ void arch_crash_save_vmcoreinfo(void)
VMCOREINFO_SYMBOL(lowcore_ptr);
VMCOREINFO_SYMBOL(high_memory);
VMCOREINFO_LENGTH(lowcore_ptr, NR_CPUS);
- mem_assign_absolute(S390_lowcore.vmcore_info, paddr_vmcoreinfo_note());
vmcoreinfo_append_str("SDMA=%lx\n", __sdma);
vmcoreinfo_append_str("EDMA=%lx\n", __edma);
vmcoreinfo_append_str("KERNELOFFSET=%lx\n", kaslr_offset());
+ mem_assign_absolute(S390_lowcore.vmcore_info, paddr_vmcoreinfo_note());
}
void machine_shutdown(void)
diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c
index 69506fdbd9a1..c07fdcd73726 100644
--- a/arch/s390/kernel/perf_cpum_sf.c
+++ b/arch/s390/kernel/perf_cpum_sf.c
@@ -156,8 +156,8 @@ static void free_sampling_buffer(struct sf_buffer *sfb)
}
}
- debug_sprintf_event(sfdbg, 5, "%s freed sdbt %p\n", __func__,
- sfb->sdbt);
+ debug_sprintf_event(sfdbg, 5, "%s: freed sdbt %#lx\n", __func__,
+ (unsigned long)sfb->sdbt);
memset(sfb, 0, sizeof(*sfb));
}
@@ -193,7 +193,7 @@ static int realloc_sampling_buffer(struct sf_buffer *sfb,
unsigned long num_sdb, gfp_t gfp_flags)
{
int i, rc;
- unsigned long *new, *tail;
+ unsigned long *new, *tail, *tail_prev = NULL;
if (!sfb->sdbt || !sfb->tail)
return -EINVAL;
@@ -213,9 +213,10 @@ static int realloc_sampling_buffer(struct sf_buffer *sfb,
*/
if (sfb->sdbt != get_next_sdbt(tail)) {
debug_sprintf_event(sfdbg, 3, "%s: "
- "sampling buffer is not linked: origin %p"
- " tail %p\n", __func__,
- (void *)sfb->sdbt, (void *)tail);
+ "sampling buffer is not linked: origin %#lx"
+ " tail %#lx\n", __func__,
+ (unsigned long)sfb->sdbt,
+ (unsigned long)tail);
return -EINVAL;
}
@@ -232,6 +233,7 @@ static int realloc_sampling_buffer(struct sf_buffer *sfb,
sfb->num_sdbt++;
/* Link current page to tail of chain */
*tail = (unsigned long)(void *) new + 1;
+ tail_prev = tail;
tail = new;
}
@@ -241,18 +243,30 @@ static int realloc_sampling_buffer(struct sf_buffer *sfb,
* issue, a new realloc call (if required) might succeed.
*/
rc = alloc_sample_data_block(tail, gfp_flags);
- if (rc)
+ if (rc) {
+ /* Undo last SDBT. An SDBT with no SDB at its first
+ * entry but with an SDBT entry instead can not be
+ * handled by the interrupt handler code.
+ * Avoid this situation.
+ */
+ if (tail_prev) {
+ sfb->num_sdbt--;
+ free_page((unsigned long) new);
+ tail = tail_prev;
+ }
break;
+ }
sfb->num_sdb++;
tail++;
+ tail_prev = new = NULL; /* Allocated at least one SBD */
}
/* Link sampling buffer to its origin */
*tail = (unsigned long) sfb->sdbt + 1;
sfb->tail = tail;
- debug_sprintf_event(sfdbg, 4, "realloc_sampling_buffer: new buffer"
- " settings: sdbt %lu sdb %lu\n",
+ debug_sprintf_event(sfdbg, 4, "%s: new buffer"
+ " settings: sdbt %lu sdb %lu\n", __func__,
sfb->num_sdbt, sfb->num_sdb);
return rc;
}
@@ -292,12 +306,13 @@ static int alloc_sampling_buffer(struct sf_buffer *sfb, unsigned long num_sdb)
rc = realloc_sampling_buffer(sfb, num_sdb, GFP_KERNEL);
if (rc) {
free_sampling_buffer(sfb);
- debug_sprintf_event(sfdbg, 4, "alloc_sampling_buffer: "
- "realloc_sampling_buffer failed with rc %i\n", rc);
+ debug_sprintf_event(sfdbg, 4, "%s: "
+ "realloc_sampling_buffer failed with rc %i\n",
+ __func__, rc);
} else
debug_sprintf_event(sfdbg, 4,
- "alloc_sampling_buffer: tear %p dear %p\n",
- sfb->sdbt, (void *)*sfb->sdbt);
+ "%s: tear %#lx dear %#lx\n", __func__,
+ (unsigned long)sfb->sdbt, (unsigned long)*sfb->sdbt);
return rc;
}
@@ -465,8 +480,8 @@ static void sfb_account_overflows(struct cpu_hw_sf *cpuhw,
if (num)
sfb_account_allocs(num, hwc);
- debug_sprintf_event(sfdbg, 5, "sfb: overflow: overflow %llu ratio %lu"
- " num %lu\n", OVERFLOW_REG(hwc), ratio, num);
+ debug_sprintf_event(sfdbg, 5, "%s: overflow %llu ratio %lu num %lu\n",
+ __func__, OVERFLOW_REG(hwc), ratio, num);
OVERFLOW_REG(hwc) = 0;
}
@@ -504,13 +519,13 @@ static void extend_sampling_buffer(struct sf_buffer *sfb,
*/
rc = realloc_sampling_buffer(sfb, num, GFP_ATOMIC);
if (rc)
- debug_sprintf_event(sfdbg, 5, "sfb: extend: realloc "
- "failed with rc %i\n", rc);
+ debug_sprintf_event(sfdbg, 5, "%s: realloc failed with rc %i\n",
+ __func__, rc);
if (sfb_has_pending_allocs(sfb, hwc))
- debug_sprintf_event(sfdbg, 5, "sfb: extend: "
+ debug_sprintf_event(sfdbg, 5, "%s: "
"req %lu alloc %lu remaining %lu\n",
- num, sfb->num_sdb - num_old,
+ __func__, num, sfb->num_sdb - num_old,
sfb_pending_allocs(sfb, hwc));
}
@@ -600,13 +615,6 @@ static void hw_init_period(struct hw_perf_event *hwc, u64 period)
local64_set(&hwc->period_left, hwc->sample_period);
}
-static void hw_reset_registers(struct hw_perf_event *hwc,
- unsigned long *sdbt_origin)
-{
- /* (Re)set to first sample-data-block-table */
- TEAR_REG(hwc) = (unsigned long) sdbt_origin;
-}
-
static unsigned long hw_limit_rate(const struct hws_qsi_info_block *si,
unsigned long rate)
{
@@ -698,9 +706,9 @@ static unsigned long getrate(bool freq, unsigned long sample,
*/
if (sample_rate_to_freq(si, rate) >
sysctl_perf_event_sample_rate) {
- debug_sprintf_event(sfdbg, 1,
+ debug_sprintf_event(sfdbg, 1, "%s: "
"Sampling rate exceeds maximum "
- "perf sample rate\n");
+ "perf sample rate\n", __func__);
rate = 0;
}
}
@@ -745,10 +753,9 @@ static int __hw_perf_event_init_rate(struct perf_event *event,
attr->sample_period = rate;
SAMPL_RATE(hwc) = rate;
hw_init_period(hwc, SAMPL_RATE(hwc));
- debug_sprintf_event(sfdbg, 4, "__hw_perf_event_init_rate:"
- "cpu:%d period:%#llx freq:%d,%#lx\n", event->cpu,
- event->attr.sample_period, event->attr.freq,
- SAMPLE_FREQ_MODE(hwc));
+ debug_sprintf_event(sfdbg, 4, "%s: cpu %d period %#llx freq %d,%#lx\n",
+ __func__, event->cpu, event->attr.sample_period,
+ event->attr.freq, SAMPLE_FREQ_MODE(hwc));
return 0;
}
@@ -951,8 +958,7 @@ static void cpumsf_pmu_enable(struct pmu *pmu)
* buffer extents
*/
sfb_account_overflows(cpuhw, hwc);
- if (sfb_has_pending_allocs(&cpuhw->sfb, hwc))
- extend_sampling_buffer(&cpuhw->sfb, hwc);
+ extend_sampling_buffer(&cpuhw->sfb, hwc);
}
/* Rate may be adjusted with ioctl() */
cpuhw->lsctl.interval = SAMPL_RATE(&cpuhw->event->hw);
@@ -973,12 +979,11 @@ static void cpumsf_pmu_enable(struct pmu *pmu)
/* Load current program parameter */
lpp(&S390_lowcore.lpp);
- debug_sprintf_event(sfdbg, 6, "pmu_enable: es %i cs %i ed %i cd %i "
- "interval %#lx tear %p dear %p\n",
+ debug_sprintf_event(sfdbg, 6, "%s: es %i cs %i ed %i cd %i "
+ "interval %#lx tear %#lx dear %#lx\n", __func__,
cpuhw->lsctl.es, cpuhw->lsctl.cs, cpuhw->lsctl.ed,
cpuhw->lsctl.cd, cpuhw->lsctl.interval,
- (void *) cpuhw->lsctl.tear,
- (void *) cpuhw->lsctl.dear);
+ cpuhw->lsctl.tear, cpuhw->lsctl.dear);
}
static void cpumsf_pmu_disable(struct pmu *pmu)
@@ -1019,8 +1024,8 @@ static void cpumsf_pmu_disable(struct pmu *pmu)
cpuhw->lsctl.dear = si.dear;
}
} else
- debug_sprintf_event(sfdbg, 3, "cpumsf_pmu_disable: "
- "qsi() failed with err %i\n", err);
+ debug_sprintf_event(sfdbg, 3, "%s: qsi() failed with err %i\n",
+ __func__, err);
cpuhw->flags &= ~PMU_F_ENABLED;
}
@@ -1265,9 +1270,9 @@ static void hw_perf_event_update(struct perf_event *event, int flush_all)
sampl_overflow += te->overflow;
/* Timestamps are valid for full sample-data-blocks only */
- debug_sprintf_event(sfdbg, 6, "%s: sdbt %p "
+ debug_sprintf_event(sfdbg, 6, "%s: sdbt %#lx "
"overflow %llu timestamp %#llx\n",
- __func__, sdbt, te->overflow,
+ __func__, (unsigned long)sdbt, te->overflow,
(te->f) ? trailer_timestamp(te) : 0ULL);
/* Collect all samples from a single sample-data-block and
@@ -1312,8 +1317,10 @@ static void hw_perf_event_update(struct perf_event *event, int flush_all)
sampl_overflow, 1 + num_sdb);
if (sampl_overflow || event_overflow)
debug_sprintf_event(sfdbg, 4, "%s: "
- "overflow stats: sample %llu event %llu\n",
- __func__, sampl_overflow, event_overflow);
+ "overflows: sample %llu event %llu"
+ " total %llu num_sdb %llu\n",
+ __func__, sampl_overflow, event_overflow,
+ OVERFLOW_REG(hwc), num_sdb);
}
#define AUX_SDB_INDEX(aux, i) ((i) % aux->sfb.num_sdb)
@@ -1424,10 +1431,10 @@ static int aux_output_begin(struct perf_output_handle *handle,
cpuhw->lsctl.tear = base + offset * sizeof(unsigned long);
cpuhw->lsctl.dear = aux->sdb_index[head];
- debug_sprintf_event(sfdbg, 6, "aux_output_begin: "
+ debug_sprintf_event(sfdbg, 6, "%s: "
"head->alert_mark->empty_mark (num_alert, range)"
"[%#lx -> %#lx -> %#lx] (%#lx, %#lx) "
- "tear index %#lx, tear %#lx dear %#lx\n",
+ "tear index %#lx, tear %#lx dear %#lx\n", __func__,
aux->head, aux->alert_mark, aux->empty_mark,
AUX_SDB_NUM_ALERT(aux), range,
head / CPUM_SF_SDB_PER_TABLE,
@@ -1571,7 +1578,9 @@ static void hw_collect_aux(struct cpu_hw_sf *cpuhw)
pr_err("The AUX buffer with %lu pages for the "
"diagnostic-sampling mode is full\n",
num_sdb);
- debug_sprintf_event(sfdbg, 1, "AUX buffer used up\n");
+ debug_sprintf_event(sfdbg, 1,
+ "%s: AUX buffer used up\n",
+ __func__);
break;
}
if (WARN_ON_ONCE(!aux))
@@ -1594,23 +1603,25 @@ static void hw_collect_aux(struct cpu_hw_sf *cpuhw)
perf_aux_output_end(&cpuhw->handle, size);
pr_err("Sample data caused the AUX buffer with %lu "
"pages to overflow\n", num_sdb);
- debug_sprintf_event(sfdbg, 1, "head %#lx range %#lx "
- "overflow %#llx\n",
+ debug_sprintf_event(sfdbg, 1, "%s: head %#lx range %#lx "
+ "overflow %#llx\n", __func__,
aux->head, range, overflow);
} else {
size = AUX_SDB_NUM_ALERT(aux) << PAGE_SHIFT;
perf_aux_output_end(&cpuhw->handle, size);
- debug_sprintf_event(sfdbg, 6, "head %#lx alert %#lx "
+ debug_sprintf_event(sfdbg, 6, "%s: head %#lx alert %#lx "
"already full, try another\n",
+ __func__,
aux->head, aux->alert_mark);
}
}
if (done)
- debug_sprintf_event(sfdbg, 6, "aux_reset_buffer: "
+ debug_sprintf_event(sfdbg, 6, "%s: aux_reset_buffer "
"[%#lx -> %#lx -> %#lx] (%#lx, %#lx)\n",
- aux->head, aux->alert_mark, aux->empty_mark,
- AUX_SDB_NUM_ALERT(aux), range);
+ __func__, aux->head, aux->alert_mark,
+ aux->empty_mark, AUX_SDB_NUM_ALERT(aux),
+ range);
}
/*
@@ -1633,8 +1644,8 @@ static void aux_buffer_free(void *data)
kfree(aux->sdb_index);
kfree(aux);
- debug_sprintf_event(sfdbg, 4, "aux_buffer_free: free "
- "%lu SDBTs\n", num_sdbt);
+ debug_sprintf_event(sfdbg, 4, "%s: free "
+ "%lu SDBTs\n", __func__, num_sdbt);
}
static void aux_sdb_init(unsigned long sdb)
@@ -1742,9 +1753,8 @@ static void *aux_buffer_setup(struct perf_event *event, void **pages,
*/
aux->empty_mark = sfb->num_sdb - 1;
- debug_sprintf_event(sfdbg, 4, "aux_buffer_setup: setup %lu SDBTs"
- " and %lu SDBs\n",
- sfb->num_sdbt, sfb->num_sdb);
+ debug_sprintf_event(sfdbg, 4, "%s: setup %lu SDBTs and %lu SDBs\n",
+ __func__, sfb->num_sdbt, sfb->num_sdb);
return aux;
@@ -1797,9 +1807,9 @@ static int cpumsf_pmu_check_period(struct perf_event *event, u64 value)
event->attr.sample_period = rate;
SAMPL_RATE(&event->hw) = rate;
hw_init_period(&event->hw, SAMPL_RATE(&event->hw));
- debug_sprintf_event(sfdbg, 4, "cpumsf_pmu_check_period:"
- "cpu:%d value:%#llx period:%#llx freq:%d\n",
- event->cpu, value,
+ debug_sprintf_event(sfdbg, 4, "%s:"
+ " cpu %d value %#llx period %#llx freq %d\n",
+ __func__, event->cpu, value,
event->attr.sample_period, do_freq);
return 0;
}
@@ -1875,7 +1885,7 @@ static int cpumsf_pmu_add(struct perf_event *event, int flags)
if (!SAMPL_DIAG_MODE(&event->hw)) {
cpuhw->lsctl.tear = (unsigned long) cpuhw->sfb.sdbt;
cpuhw->lsctl.dear = *(unsigned long *) cpuhw->sfb.sdbt;
- hw_reset_registers(&event->hw, cpuhw->sfb.sdbt);
+ TEAR_REG(&event->hw) = (unsigned long) cpuhw->sfb.sdbt;
}
/* Ensure sampling functions are in the disabled state. If disabled,
@@ -2030,7 +2040,7 @@ static void cpumf_measurement_alert(struct ext_code ext_code,
/* Report measurement alerts only for non-PRA codes */
if (alert != CPU_MF_INT_SF_PRA)
- debug_sprintf_event(sfdbg, 6, "measurement alert: %#x\n",
+ debug_sprintf_event(sfdbg, 6, "%s: alert %#x\n", __func__,
alert);
/* Sampling authorization change request */
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index ad71132374f0..58faa12542a1 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -856,7 +856,7 @@ asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
}
/* Do the secure computing check after ptrace. */
- if (secure_computing(NULL)) {
+ if (secure_computing()) {
/* seccomp failures shouldn't expose any additional code. */
return -1;
}
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 3ff291bc63b7..9cbf490fd162 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -355,7 +355,6 @@ early_initcall(async_stack_realloc);
void __init arch_call_rest_init(void)
{
- struct stack_frame *frame;
unsigned long stack;
stack = stack_alloc();
@@ -368,13 +367,7 @@ void __init arch_call_rest_init(void)
set_task_stack_end_magic(current);
stack += STACK_INIT_OFFSET;
S390_lowcore.kernel_stack = stack;
- frame = (struct stack_frame *) stack;
- memset(frame, 0, sizeof(*frame));
- /* Branch to rest_init on the new stack, never returns */
- asm volatile(
- " la 15,0(%[_frame])\n"
- " jg rest_init\n"
- : : [_frame] "a" (frame));
+ CALL_ON_STACK_NORETURN(rest_init, stack);
}
static void __init setup_lowcore_dat_off(void)
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 6acdcf1d4074..2794cad9312e 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -262,10 +262,13 @@ static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
lc->spinlock_index = 0;
lc->percpu_offset = __per_cpu_offset[cpu];
lc->kernel_asce = S390_lowcore.kernel_asce;
+ lc->user_asce = S390_lowcore.kernel_asce;
lc->machine_flags = S390_lowcore.machine_flags;
lc->user_timer = lc->system_timer =
lc->steal_timer = lc->avg_steal_timer = 0;
__ctl_store(lc->cregs_save_area, 0, 15);
+ lc->cregs_save_area[1] = lc->kernel_asce;
+ lc->cregs_save_area[7] = lc->vdso_asce;
save_access_regs((unsigned int *) lc->access_regs_save_area);
memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
sizeof(lc->stfle_fac_list));
@@ -844,6 +847,8 @@ static void smp_init_secondary(void)
S390_lowcore.last_update_clock = get_tod_clock();
restore_access_regs(S390_lowcore.access_regs_save_area);
+ set_cpu_flag(CIF_ASCE_PRIMARY);
+ set_cpu_flag(CIF_ASCE_SECONDARY);
cpu_init();
preempt_disable();
init_cpu_timer();
@@ -871,7 +876,7 @@ static void __no_sanitize_address smp_start_secondary(void *cpuvoid)
S390_lowcore.restart_source = -1UL;
__ctl_load(S390_lowcore.cregs_save_area, 0, 15);
__load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT);
- CALL_ON_STACK(smp_init_secondary, S390_lowcore.kernel_stack, 0);
+ CALL_ON_STACK_NORETURN(smp_init_secondary, S390_lowcore.kernel_stack);
}
/* Upping and downing of CPUs */
diff --git a/arch/s390/kernel/stacktrace.c b/arch/s390/kernel/stacktrace.c
index f8fc4f8aef9b..fc5419ac64c8 100644
--- a/arch/s390/kernel/stacktrace.c
+++ b/arch/s390/kernel/stacktrace.c
@@ -9,6 +9,7 @@
#include <linux/stacktrace.h>
#include <asm/stacktrace.h>
#include <asm/unwind.h>
+#include <asm/kprobes.h>
void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
struct task_struct *task, struct pt_regs *regs)
@@ -22,3 +23,45 @@ void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
break;
}
}
+
+/*
+ * This function returns an error if it detects any unreliable features of the
+ * stack. Otherwise it guarantees that the stack trace is reliable.
+ *
+ * If the task is not 'current', the caller *must* ensure the task is inactive.
+ */
+int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry,
+ void *cookie, struct task_struct *task)
+{
+ struct unwind_state state;
+ unsigned long addr;
+
+ unwind_for_each_frame(&state, task, NULL, 0) {
+ if (state.stack_info.type != STACK_TYPE_TASK)
+ return -EINVAL;
+
+ if (state.regs)
+ return -EINVAL;
+
+ addr = unwind_get_return_address(&state);
+ if (!addr)
+ return -EINVAL;
+
+#ifdef CONFIG_KPROBES
+ /*
+ * Mark stacktraces with kretprobed functions on them
+ * as unreliable.
+ */
+ if (state.ip == (unsigned long)kretprobe_trampoline)
+ return -EINVAL;
+#endif
+
+ if (!consume_entry(cookie, addr, false))
+ return -EINVAL;
+ }
+
+ /* Check for stack corruption */
+ if (unwind_error(&state))
+ return -EINVAL;
+ return 0;
+}
diff --git a/arch/s390/kernel/unwind_bc.c b/arch/s390/kernel/unwind_bc.c
index fa111d3d378f..da2d4d4c5b0e 100644
--- a/arch/s390/kernel/unwind_bc.c
+++ b/arch/s390/kernel/unwind_bc.c
@@ -36,6 +36,12 @@ static bool update_stack_info(struct unwind_state *state, unsigned long sp)
return true;
}
+static inline bool is_task_pt_regs(struct unwind_state *state,
+ struct pt_regs *regs)
+{
+ return task_pt_regs(state->task) == regs;
+}
+
bool unwind_next_frame(struct unwind_state *state)
{
struct stack_info *info = &state->stack_info;
@@ -46,20 +52,16 @@ bool unwind_next_frame(struct unwind_state *state)
regs = state->regs;
if (unlikely(regs)) {
- if (state->reuse_sp) {
- sp = state->sp;
- state->reuse_sp = false;
- } else {
- sp = READ_ONCE_NOCHECK(regs->gprs[15]);
- if (unlikely(outside_of_stack(state, sp))) {
- if (!update_stack_info(state, sp))
- goto out_err;
- }
- }
+ sp = state->sp;
sf = (struct stack_frame *) sp;
ip = READ_ONCE_NOCHECK(sf->gprs[8]);
reliable = false;
regs = NULL;
+ if (!__kernel_text_address(ip)) {
+ /* skip bogus %r14 */
+ state->regs = NULL;
+ return unwind_next_frame(state);
+ }
} else {
sf = (struct stack_frame *) state->sp;
sp = READ_ONCE_NOCHECK(sf->back_chain);
@@ -76,15 +78,24 @@ bool unwind_next_frame(struct unwind_state *state)
/* No back-chain, look for a pt_regs structure */
sp = state->sp + STACK_FRAME_OVERHEAD;
if (!on_stack(info, sp, sizeof(struct pt_regs)))
- goto out_stop;
+ goto out_err;
regs = (struct pt_regs *) sp;
- if (READ_ONCE_NOCHECK(regs->psw.mask) & PSW_MASK_PSTATE)
+ if (is_task_pt_regs(state, regs))
goto out_stop;
ip = READ_ONCE_NOCHECK(regs->psw.addr);
+ sp = READ_ONCE_NOCHECK(regs->gprs[15]);
+ if (unlikely(outside_of_stack(state, sp))) {
+ if (!update_stack_info(state, sp))
+ goto out_err;
+ }
reliable = true;
}
}
+ /* Sanity check: ABI requires SP to be aligned 8 bytes. */
+ if (sp & 0x7)
+ goto out_err;
+
ip = ftrace_graph_ret_addr(state->task, &state->graph_idx, ip, (void *) sp);
/* Update unwind state */
@@ -103,13 +114,11 @@ out_stop:
EXPORT_SYMBOL_GPL(unwind_next_frame);
void __unwind_start(struct unwind_state *state, struct task_struct *task,
- struct pt_regs *regs, unsigned long sp)
+ struct pt_regs *regs, unsigned long first_frame)
{
struct stack_info *info = &state->stack_info;
- unsigned long *mask = &state->stack_mask;
- bool reliable, reuse_sp;
struct stack_frame *sf;
- unsigned long ip;
+ unsigned long ip, sp;
memset(state, 0, sizeof(*state));
state->task = task;
@@ -121,25 +130,28 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task,
return;
}
+ /* Get the instruction pointer from pt_regs or the stack frame */
+ if (regs) {
+ ip = regs->psw.addr;
+ sp = regs->gprs[15];
+ } else if (task == current) {
+ sp = current_frame_address();
+ } else {
+ sp = task->thread.ksp;
+ }
+
/* Get current stack pointer and initialize stack info */
- if (get_stack_info(sp, task, info, mask) != 0 ||
- !on_stack(info, sp, sizeof(struct stack_frame))) {
+ if (!update_stack_info(state, sp)) {
/* Something is wrong with the stack pointer */
info->type = STACK_TYPE_UNKNOWN;
state->error = true;
return;
}
- /* Get the instruction pointer from pt_regs or the stack frame */
- if (regs) {
- ip = READ_ONCE_NOCHECK(regs->psw.addr);
- reliable = true;
- reuse_sp = true;
- } else {
- sf = (struct stack_frame *) sp;
+ if (!regs) {
+ /* Stack frame is within valid stack */
+ sf = (struct stack_frame *)sp;
ip = READ_ONCE_NOCHECK(sf->gprs[8]);
- reliable = false;
- reuse_sp = false;
}
ip = ftrace_graph_ret_addr(state->task, &state->graph_idx, ip, NULL);
@@ -147,7 +159,17 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task,
/* Update unwind state */
state->sp = sp;
state->ip = ip;
- state->reliable = reliable;
- state->reuse_sp = reuse_sp;
+ state->reliable = true;
+
+ if (!first_frame)
+ return;
+ /* Skip through the call chain to the specified starting frame */
+ while (!unwind_done(state)) {
+ if (on_stack(&state->stack_info, first_frame, sizeof(struct stack_frame))) {
+ if (state->sp >= first_frame)
+ break;
+ }
+ unwind_next_frame(state);
+ }
}
EXPORT_SYMBOL_GPL(__unwind_start);
diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c
index ed1fc08ccea2..bcc9bdb39ba2 100644
--- a/arch/s390/kernel/vdso.c
+++ b/arch/s390/kernel/vdso.c
@@ -29,13 +29,6 @@
#include <asm/vdso.h>
#include <asm/facility.h>
-#ifdef CONFIG_COMPAT_VDSO
-extern char vdso32_start, vdso32_end;
-static void *vdso32_kbase = &vdso32_start;
-static unsigned int vdso32_pages;
-static struct page **vdso32_pagelist;
-#endif
-
extern char vdso64_start, vdso64_end;
static void *vdso64_kbase = &vdso64_start;
static unsigned int vdso64_pages;
@@ -55,12 +48,6 @@ static vm_fault_t vdso_fault(const struct vm_special_mapping *sm,
vdso_pagelist = vdso64_pagelist;
vdso_pages = vdso64_pages;
-#ifdef CONFIG_COMPAT_VDSO
- if (vma->vm_mm->context.compat_mm) {
- vdso_pagelist = vdso32_pagelist;
- vdso_pages = vdso32_pages;
- }
-#endif
if (vmf->pgoff >= vdso_pages)
return VM_FAULT_SIGBUS;
@@ -76,10 +63,6 @@ static int vdso_mremap(const struct vm_special_mapping *sm,
unsigned long vdso_pages;
vdso_pages = vdso64_pages;
-#ifdef CONFIG_COMPAT_VDSO
- if (vma->vm_mm->context.compat_mm)
- vdso_pages = vdso32_pages;
-#endif
if ((vdso_pages << PAGE_SHIFT) != vma->vm_end - vma->vm_start)
return -EINVAL;
@@ -209,12 +192,10 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
if (!vdso_enabled)
return 0;
+ if (is_compat_task())
+ return 0;
+
vdso_pages = vdso64_pages;
-#ifdef CONFIG_COMPAT_VDSO
- mm->context.compat_mm = is_compat_task();
- if (mm->context.compat_mm)
- vdso_pages = vdso32_pages;
-#endif
/*
* vDSO has a problem and was disabled, just don't "enable" it for
* the process
@@ -267,23 +248,6 @@ static int __init vdso_init(void)
int i;
vdso_init_data(vdso_data);
-#ifdef CONFIG_COMPAT_VDSO
- /* Calculate the size of the 32 bit vDSO */
- vdso32_pages = ((&vdso32_end - &vdso32_start
- + PAGE_SIZE - 1) >> PAGE_SHIFT) + 1;
-
- /* Make sure pages are in the correct state */
- vdso32_pagelist = kcalloc(vdso32_pages + 1, sizeof(struct page *),
- GFP_KERNEL);
- BUG_ON(vdso32_pagelist == NULL);
- for (i = 0; i < vdso32_pages - 1; i++) {
- struct page *pg = virt_to_page(vdso32_kbase + i*PAGE_SIZE);
- get_page(pg);
- vdso32_pagelist[i] = pg;
- }
- vdso32_pagelist[vdso32_pages - 1] = virt_to_page(vdso_data);
- vdso32_pagelist[vdso32_pages] = NULL;
-#endif
/* Calculate the size of the 64 bit vDSO */
vdso64_pages = ((&vdso64_end - &vdso64_start
diff --git a/arch/s390/kernel/vdso32/.gitignore b/arch/s390/kernel/vdso32/.gitignore
deleted file mode 100644
index e45fba9d0ced..000000000000
--- a/arch/s390/kernel/vdso32/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-vdso32.lds
diff --git a/arch/s390/kernel/vdso32/Makefile b/arch/s390/kernel/vdso32/Makefile
deleted file mode 100644
index aee9ffbccb54..000000000000
--- a/arch/s390/kernel/vdso32/Makefile
+++ /dev/null
@@ -1,66 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-# List of files in the vdso, has to be asm only for now
-
-KCOV_INSTRUMENT := n
-
-obj-vdso32 = gettimeofday.o clock_getres.o clock_gettime.o note.o getcpu.o
-
-# Build rules
-
-targets := $(obj-vdso32) vdso32.so vdso32.so.dbg
-obj-vdso32 := $(addprefix $(obj)/, $(obj-vdso32))
-
-KBUILD_AFLAGS += -DBUILD_VDSO
-KBUILD_CFLAGS += -DBUILD_VDSO
-
-KBUILD_AFLAGS_31 := $(filter-out -m64,$(KBUILD_AFLAGS))
-KBUILD_AFLAGS_31 += -m31 -s
-
-KBUILD_CFLAGS_31 := $(filter-out -m64,$(KBUILD_CFLAGS))
-KBUILD_CFLAGS_31 += -m31 -fPIC -shared -fno-common -fno-builtin
-KBUILD_CFLAGS_31 += -nostdlib -Wl,-soname=linux-vdso32.so.1 \
- -Wl,--hash-style=both
-
-$(targets:%=$(obj)/%.dbg): KBUILD_CFLAGS = $(KBUILD_CFLAGS_31)
-$(targets:%=$(obj)/%.dbg): KBUILD_AFLAGS = $(KBUILD_AFLAGS_31)
-
-obj-y += vdso32_wrapper.o
-extra-y += vdso32.lds
-CPPFLAGS_vdso32.lds += -P -C -U$(ARCH)
-
-# Disable gcov profiling, ubsan and kasan for VDSO code
-GCOV_PROFILE := n
-UBSAN_SANITIZE := n
-KASAN_SANITIZE := n
-
-# Force dependency (incbin is bad)
-$(obj)/vdso32_wrapper.o : $(obj)/vdso32.so
-
-# link rule for the .so file, .lds has to be first
-$(obj)/vdso32.so.dbg: $(src)/vdso32.lds $(obj-vdso32) FORCE
- $(call if_changed,vdso32ld)
-
-# strip rule for the .so file
-$(obj)/%.so: OBJCOPYFLAGS := -S
-$(obj)/%.so: $(obj)/%.so.dbg FORCE
- $(call if_changed,objcopy)
-
-# assembly rules for the .S files
-$(obj-vdso32): %.o: %.S FORCE
- $(call if_changed_dep,vdso32as)
-
-# actual build commands
-quiet_cmd_vdso32ld = VDSO32L $@
- cmd_vdso32ld = $(CC) $(c_flags) -Wl,-T $(filter %.lds %.o,$^) -o $@
-quiet_cmd_vdso32as = VDSO32A $@
- cmd_vdso32as = $(CC) $(a_flags) -c -o $@ $<
-
-# install commands for the unstripped file
-quiet_cmd_vdso_install = INSTALL $@
- cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/$@
-
-vdso32.so: $(obj)/vdso32.so.dbg
- @mkdir -p $(MODLIB)/vdso
- $(call cmd,vdso_install)
-
-vdso_install: vdso32.so
diff --git a/arch/s390/kernel/vdso32/clock_getres.S b/arch/s390/kernel/vdso32/clock_getres.S
deleted file mode 100644
index eaf9cf1417f6..000000000000
--- a/arch/s390/kernel/vdso32/clock_getres.S
+++ /dev/null
@@ -1,44 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Userland implementation of clock_getres() for 32 bits processes in a
- * s390 kernel for use in the vDSO
- *
- * Copyright IBM Corp. 2008
- * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
- */
-#include <asm/vdso.h>
-#include <asm/asm-offsets.h>
-#include <asm/unistd.h>
-#include <asm/dwarf.h>
-
- .text
- .align 4
- .globl __kernel_clock_getres
- .type __kernel_clock_getres,@function
-__kernel_clock_getres:
- CFI_STARTPROC
- basr %r1,0
- la %r1,4f-.(%r1)
- chi %r2,__CLOCK_REALTIME
- je 0f
- chi %r2,__CLOCK_MONOTONIC
- je 0f
- la %r1,5f-4f(%r1)
- chi %r2,__CLOCK_REALTIME_COARSE
- je 0f
- chi %r2,__CLOCK_MONOTONIC_COARSE
- jne 3f
-0: ltr %r3,%r3
- jz 2f /* res == NULL */
-1: l %r0,0(%r1)
- xc 0(4,%r3),0(%r3) /* set tp->tv_sec to zero */
- st %r0,4(%r3) /* store tp->tv_usec */
-2: lhi %r2,0
- br %r14
-3: lhi %r1,__NR_clock_getres /* fallback to svc */
- svc 0
- br %r14
- CFI_ENDPROC
-4: .long __CLOCK_REALTIME_RES
-5: .long __CLOCK_COARSE_RES
- .size __kernel_clock_getres,.-__kernel_clock_getres
diff --git a/arch/s390/kernel/vdso32/clock_gettime.S b/arch/s390/kernel/vdso32/clock_gettime.S
deleted file mode 100644
index ada5c11a16e5..000000000000
--- a/arch/s390/kernel/vdso32/clock_gettime.S
+++ /dev/null
@@ -1,179 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Userland implementation of clock_gettime() for 32 bits processes in a
- * s390 kernel for use in the vDSO
- *
- * Copyright IBM Corp. 2008
- * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
- */
-#include <asm/vdso.h>
-#include <asm/asm-offsets.h>
-#include <asm/unistd.h>
-#include <asm/dwarf.h>
-#include <asm/ptrace.h>
-
- .text
- .align 4
- .globl __kernel_clock_gettime
- .type __kernel_clock_gettime,@function
-__kernel_clock_gettime:
- CFI_STARTPROC
- ahi %r15,-16
- CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD+16
- CFI_VAL_OFFSET 15, -STACK_FRAME_OVERHEAD
- basr %r5,0
-0: al %r5,21f-0b(%r5) /* get &_vdso_data */
- chi %r2,__CLOCK_REALTIME_COARSE
- je 10f
- chi %r2,__CLOCK_REALTIME
- je 11f
- chi %r2,__CLOCK_MONOTONIC_COARSE
- je 9f
- chi %r2,__CLOCK_MONOTONIC
- jne 19f
-
- /* CLOCK_MONOTONIC */
-1: l %r4,__VDSO_UPD_COUNT+4(%r5) /* load update counter */
- tml %r4,0x0001 /* pending update ? loop */
- jnz 1b
- stcke 0(%r15) /* Store TOD clock */
- lm %r0,%r1,1(%r15)
- s %r0,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
- sl %r1,__VDSO_XTIME_STAMP+4(%r5)
- brc 3,2f
- ahi %r0,-1
-2: ms %r0,__VDSO_TK_MULT(%r5) /* * tk->mult */
- lr %r2,%r0
- l %r0,__VDSO_TK_MULT(%r5)
- ltr %r1,%r1
- mr %r0,%r0
- jnm 3f
- a %r0,__VDSO_TK_MULT(%r5)
-3: alr %r0,%r2
- al %r0,__VDSO_WTOM_NSEC(%r5)
- al %r1,__VDSO_WTOM_NSEC+4(%r5)
- brc 12,5f
- ahi %r0,1
-5: l %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */
- srdl %r0,0(%r2) /* >> tk->shift */
- l %r2,__VDSO_WTOM_SEC+4(%r5)
- cl %r4,__VDSO_UPD_COUNT+4(%r5) /* check update counter */
- jne 1b
- basr %r5,0
-6: ltr %r0,%r0
- jnz 7f
- cl %r1,20f-6b(%r5)
- jl 8f
-7: ahi %r2,1
- sl %r1,20f-6b(%r5)
- brc 3,6b
- ahi %r0,-1
- j 6b
-8: st %r2,0(%r3) /* store tp->tv_sec */
- st %r1,4(%r3) /* store tp->tv_nsec */
- lhi %r2,0
- ahi %r15,16
- CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD
- CFI_RESTORE 15
- br %r14
-
- /* CLOCK_MONOTONIC_COARSE */
- CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD+16
- CFI_VAL_OFFSET 15, -STACK_FRAME_OVERHEAD
-9: l %r4,__VDSO_UPD_COUNT+4(%r5) /* load update counter */
- tml %r4,0x0001 /* pending update ? loop */
- jnz 9b
- l %r2,__VDSO_WTOM_CRS_SEC+4(%r5)
- l %r1,__VDSO_WTOM_CRS_NSEC+4(%r5)
- cl %r4,__VDSO_UPD_COUNT+4(%r5) /* check update counter */
- jne 9b
- j 8b
-
- /* CLOCK_REALTIME_COARSE */
-10: l %r4,__VDSO_UPD_COUNT+4(%r5) /* load update counter */
- tml %r4,0x0001 /* pending update ? loop */
- jnz 10b
- l %r2,__VDSO_XTIME_CRS_SEC+4(%r5)
- l %r1,__VDSO_XTIME_CRS_NSEC+4(%r5)
- cl %r4,__VDSO_UPD_COUNT+4(%r5) /* check update counter */
- jne 10b
- j 17f
-
- /* CLOCK_REALTIME */
-11: l %r4,__VDSO_UPD_COUNT+4(%r5) /* load update counter */
- tml %r4,0x0001 /* pending update ? loop */
- jnz 11b
- stcke 0(%r15) /* Store TOD clock */
- lm %r0,%r1,__VDSO_TS_END(%r5) /* TOD steering end time */
- s %r0,1(%r15) /* no - ts_steering_end */
- sl %r1,5(%r15)
- brc 3,22f
- ahi %r0,-1
-22: ltr %r0,%r0 /* past end of steering? */
- jm 24f
- srdl %r0,15 /* 1 per 2^16 */
- tm __VDSO_TS_DIR+3(%r5),0x01 /* steering direction? */
- jz 23f
- lcr %r0,%r0 /* negative TOD offset */
- lcr %r1,%r1
- je 23f
- ahi %r0,-1
-23: a %r0,1(%r15) /* add TOD timestamp */
- al %r1,5(%r15)
- brc 12,25f
- ahi %r0,1
- j 25f
-24: lm %r0,%r1,1(%r15) /* load TOD timestamp */
-25: s %r0,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
- sl %r1,__VDSO_XTIME_STAMP+4(%r5)
- brc 3,12f
- ahi %r0,-1
-12: ms %r0,__VDSO_TK_MULT(%r5) /* * tk->mult */
- lr %r2,%r0
- l %r0,__VDSO_TK_MULT(%r5)
- ltr %r1,%r1
- mr %r0,%r0
- jnm 13f
- a %r0,__VDSO_TK_MULT(%r5)
-13: alr %r0,%r2
- al %r0,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */
- al %r1,__VDSO_XTIME_NSEC+4(%r5)
- brc 12,14f
- ahi %r0,1
-14: l %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */
- srdl %r0,0(%r2) /* >> tk->shift */
- l %r2,__VDSO_XTIME_SEC+4(%r5)
- cl %r4,__VDSO_UPD_COUNT+4(%r5) /* check update counter */
- jne 11b
- basr %r5,0
-15: ltr %r0,%r0
- jnz 16f
- cl %r1,20f-15b(%r5)
- jl 17f
-16: ahi %r2,1
- sl %r1,20f-15b(%r5)
- brc 3,15b
- ahi %r0,-1
- j 15b
-17: st %r2,0(%r3) /* store tp->tv_sec */
- st %r1,4(%r3) /* store tp->tv_nsec */
- lhi %r2,0
- ahi %r15,16
- CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD
- CFI_RESTORE 15
- br %r14
-
- /* Fallback to system call */
- CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD+16
- CFI_VAL_OFFSET 15, -STACK_FRAME_OVERHEAD
-19: lhi %r1,__NR_clock_gettime
- svc 0
- ahi %r15,16
- CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD
- CFI_RESTORE 15
- br %r14
- CFI_ENDPROC
-
-20: .long 1000000000
-21: .long _vdso_data - 0b
- .size __kernel_clock_gettime,.-__kernel_clock_gettime
diff --git a/arch/s390/kernel/vdso32/getcpu.S b/arch/s390/kernel/vdso32/getcpu.S
deleted file mode 100644
index 25515f3fbcea..000000000000
--- a/arch/s390/kernel/vdso32/getcpu.S
+++ /dev/null
@@ -1,33 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Userland implementation of getcpu() for 32 bits processes in a
- * s390 kernel for use in the vDSO
- *
- * Copyright IBM Corp. 2016
- * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
- */
-#include <asm/vdso.h>
-#include <asm/asm-offsets.h>
-#include <asm/dwarf.h>
-
- .text
- .align 4
- .globl __kernel_getcpu
- .type __kernel_getcpu,@function
-__kernel_getcpu:
- CFI_STARTPROC
- la %r4,0
- sacf 256
- l %r5,__VDSO_CPU_NR(%r4)
- l %r4,__VDSO_NODE_ID(%r4)
- sacf 0
- ltr %r2,%r2
- jz 2f
- st %r5,0(%r2)
-2: ltr %r3,%r3
- jz 3f
- st %r4,0(%r3)
-3: lhi %r2,0
- br %r14
- CFI_ENDPROC
- .size __kernel_getcpu,.-__kernel_getcpu
diff --git a/arch/s390/kernel/vdso32/gettimeofday.S b/arch/s390/kernel/vdso32/gettimeofday.S
deleted file mode 100644
index b23063fbc892..000000000000
--- a/arch/s390/kernel/vdso32/gettimeofday.S
+++ /dev/null
@@ -1,103 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Userland implementation of gettimeofday() for 32 bits processes in a
- * s390 kernel for use in the vDSO
- *
- * Copyright IBM Corp. 2008
- * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
- */
-#include <asm/vdso.h>
-#include <asm/asm-offsets.h>
-#include <asm/unistd.h>
-#include <asm/dwarf.h>
-#include <asm/ptrace.h>
-
- .text
- .align 4
- .globl __kernel_gettimeofday
- .type __kernel_gettimeofday,@function
-__kernel_gettimeofday:
- CFI_STARTPROC
- ahi %r15,-16
- CFI_ADJUST_CFA_OFFSET 16
- CFI_VAL_OFFSET 15, -STACK_FRAME_OVERHEAD
- basr %r5,0
-0: al %r5,13f-0b(%r5) /* get &_vdso_data */
-1: ltr %r3,%r3 /* check if tz is NULL */
- je 2f
- mvc 0(8,%r3),__VDSO_TIMEZONE(%r5)
-2: ltr %r2,%r2 /* check if tv is NULL */
- je 10f
- l %r4,__VDSO_UPD_COUNT+4(%r5) /* load update counter */
- tml %r4,0x0001 /* pending update ? loop */
- jnz 1b
- stcke 0(%r15) /* Store TOD clock */
- lm %r0,%r1,__VDSO_TS_END(%r5) /* TOD steering end time */
- s %r0,1(%r15)
- sl %r1,5(%r15)
- brc 3,14f
- ahi %r0,-1
-14: ltr %r0,%r0 /* past end of steering? */
- jm 16f
- srdl %r0,15 /* 1 per 2^16 */
- tm __VDSO_TS_DIR+3(%r5),0x01 /* steering direction? */
- jz 15f
- lcr %r0,%r0 /* negative TOD offset */
- lcr %r1,%r1
- je 15f
- ahi %r0,-1
-15: a %r0,1(%r15) /* add TOD timestamp */
- al %r1,5(%r15)
- brc 12,17f
- ahi %r0,1
- j 17f
-16: lm %r0,%r1,1(%r15) /* load TOD timestamp */
-17: s %r0,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
- sl %r1,__VDSO_XTIME_STAMP+4(%r5)
- brc 3,3f
- ahi %r0,-1
-3: ms %r0,__VDSO_TK_MULT(%r5) /* * tk->mult */
- st %r0,0(%r15)
- l %r0,__VDSO_TK_MULT(%r5)
- ltr %r1,%r1
- mr %r0,%r0
- jnm 4f
- a %r0,__VDSO_TK_MULT(%r5)
-4: al %r0,0(%r15)
- al %r0,__VDSO_XTIME_NSEC(%r5) /* + xtime */
- al %r1,__VDSO_XTIME_NSEC+4(%r5)
- brc 12,5f
- ahi %r0,1
-5: mvc 0(4,%r15),__VDSO_XTIME_SEC+4(%r5)
- cl %r4,__VDSO_UPD_COUNT+4(%r5) /* check update counter */
- jne 1b
- l %r4,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */
- srdl %r0,0(%r4) /* >> tk->shift */
- l %r4,0(%r15) /* get tv_sec from stack */
- basr %r5,0
-6: ltr %r0,%r0
- jnz 7f
- cl %r1,11f-6b(%r5)
- jl 8f
-7: ahi %r4,1
- sl %r1,11f-6b(%r5)
- brc 3,6b
- ahi %r0,-1
- j 6b
-8: st %r4,0(%r2) /* store tv->tv_sec */
- ltr %r1,%r1
- m %r0,12f-6b(%r5)
- jnm 9f
- al %r0,12f-6b(%r5)
-9: srl %r0,6
- st %r0,4(%r2) /* store tv->tv_usec */
-10: slr %r2,%r2
- ahi %r15,16
- CFI_ADJUST_CFA_OFFSET -16
- CFI_RESTORE 15
- br %r14
- CFI_ENDPROC
-11: .long 1000000000
-12: .long 274877907
-13: .long _vdso_data - 0b
- .size __kernel_gettimeofday,.-__kernel_gettimeofday
diff --git a/arch/s390/kernel/vdso32/vdso32.lds.S b/arch/s390/kernel/vdso32/vdso32.lds.S
deleted file mode 100644
index 721c4954cb6e..000000000000
--- a/arch/s390/kernel/vdso32/vdso32.lds.S
+++ /dev/null
@@ -1,142 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * This is the infamous ld script for the 32 bits vdso
- * library
- */
-
-#include <asm/page.h>
-#include <asm/vdso.h>
-
-OUTPUT_FORMAT("elf32-s390", "elf32-s390", "elf32-s390")
-OUTPUT_ARCH(s390:31-bit)
-ENTRY(_start)
-
-SECTIONS
-{
- . = VDSO32_LBASE + SIZEOF_HEADERS;
-
- .hash : { *(.hash) } :text
- .gnu.hash : { *(.gnu.hash) }
- .dynsym : { *(.dynsym) }
- .dynstr : { *(.dynstr) }
- .gnu.version : { *(.gnu.version) }
- .gnu.version_d : { *(.gnu.version_d) }
- .gnu.version_r : { *(.gnu.version_r) }
-
- .note : { *(.note.*) } :text :note
-
- . = ALIGN(16);
- .text : {
- *(.text .stub .text.* .gnu.linkonce.t.*)
- } :text
- PROVIDE(__etext = .);
- PROVIDE(_etext = .);
- PROVIDE(etext = .);
-
- /*
- * Other stuff is appended to the text segment:
- */
- .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
- .rodata1 : { *(.rodata1) }
-
- .dynamic : { *(.dynamic) } :text :dynamic
-
- .eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr
- .eh_frame : { KEEP (*(.eh_frame)) } :text
- .gcc_except_table : { *(.gcc_except_table .gcc_except_table.*) }
-
- .rela.dyn ALIGN(8) : { *(.rela.dyn) }
- .got ALIGN(8) : { *(.got .toc) }
-
- _end = .;
- PROVIDE(end = .);
-
- /*
- * Stabs debugging sections are here too.
- */
- .stab 0 : { *(.stab) }
- .stabstr 0 : { *(.stabstr) }
- .stab.excl 0 : { *(.stab.excl) }
- .stab.exclstr 0 : { *(.stab.exclstr) }
- .stab.index 0 : { *(.stab.index) }
- .stab.indexstr 0 : { *(.stab.indexstr) }
- .comment 0 : { *(.comment) }
-
- /*
- * DWARF debug sections.
- * Symbols in the DWARF debugging sections are relative to the
- * beginning of the section so we begin them at 0.
- */
- /* DWARF 1 */
- .debug 0 : { *(.debug) }
- .line 0 : { *(.line) }
- /* GNU DWARF 1 extensions */
- .debug_srcinfo 0 : { *(.debug_srcinfo) }
- .debug_sfnames 0 : { *(.debug_sfnames) }
- /* DWARF 1.1 and DWARF 2 */
- .debug_aranges 0 : { *(.debug_aranges) }
- .debug_pubnames 0 : { *(.debug_pubnames) }
- /* DWARF 2 */
- .debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) }
- .debug_abbrev 0 : { *(.debug_abbrev) }
- .debug_line 0 : { *(.debug_line) }
- .debug_frame 0 : { *(.debug_frame) }
- .debug_str 0 : { *(.debug_str) }
- .debug_loc 0 : { *(.debug_loc) }
- .debug_macinfo 0 : { *(.debug_macinfo) }
- /* SGI/MIPS DWARF 2 extensions */
- .debug_weaknames 0 : { *(.debug_weaknames) }
- .debug_funcnames 0 : { *(.debug_funcnames) }
- .debug_typenames 0 : { *(.debug_typenames) }
- .debug_varnames 0 : { *(.debug_varnames) }
- /* DWARF 3 */
- .debug_pubtypes 0 : { *(.debug_pubtypes) }
- .debug_ranges 0 : { *(.debug_ranges) }
- .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) }
-
- . = ALIGN(PAGE_SIZE);
- PROVIDE(_vdso_data = .);
-
- /DISCARD/ : {
- *(.note.GNU-stack)
- *(.branch_lt)
- *(.data .data.* .gnu.linkonce.d.* .sdata*)
- *(.bss .sbss .dynbss .dynsbss)
- }
-}
-
-/*
- * Very old versions of ld do not recognize this name token; use the constant.
- */
-#define PT_GNU_EH_FRAME 0x6474e550
-
-/*
- * We must supply the ELF program headers explicitly to get just one
- * PT_LOAD segment, and set the flags explicitly to make segments read-only.
- */
-PHDRS
-{
- text PT_LOAD FILEHDR PHDRS FLAGS(5); /* PF_R|PF_X */
- dynamic PT_DYNAMIC FLAGS(4); /* PF_R */
- note PT_NOTE FLAGS(4); /* PF_R */
- eh_frame_hdr PT_GNU_EH_FRAME;
-}
-
-/*
- * This controls what symbols we export from the DSO.
- */
-VERSION
-{
- VDSO_VERSION_STRING {
- global:
- /*
- * Has to be there for the kernel to find
- */
- __kernel_gettimeofday;
- __kernel_clock_gettime;
- __kernel_clock_getres;
- __kernel_getcpu;
-
- local: *;
- };
-}
diff --git a/arch/s390/kernel/vdso32/vdso32_wrapper.S b/arch/s390/kernel/vdso32/vdso32_wrapper.S
deleted file mode 100644
index de2fb930471a..000000000000
--- a/arch/s390/kernel/vdso32/vdso32_wrapper.S
+++ /dev/null
@@ -1,15 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#include <linux/init.h>
-#include <linux/linkage.h>
-#include <asm/page.h>
-
- __PAGE_ALIGNED_DATA
-
- .globl vdso32_start, vdso32_end
- .balign PAGE_SIZE
-vdso32_start:
- .incbin "arch/s390/kernel/vdso32/vdso32.so"
- .balign PAGE_SIZE
-vdso32_end:
-
- .previous
diff --git a/arch/s390/kernel/vdso64/getcpu.S b/arch/s390/kernel/vdso64/getcpu.S
index 2446e9dac8ab..3c04f7328500 100644
--- a/arch/s390/kernel/vdso64/getcpu.S
+++ b/arch/s390/kernel/vdso64/getcpu.S
@@ -16,10 +16,8 @@
.type __kernel_getcpu,@function
__kernel_getcpu:
CFI_STARTPROC
- la %r4,0
sacf 256
- l %r5,__VDSO_CPU_NR(%r4)
- l %r4,__VDSO_NODE_ID(%r4)
+ lm %r4,%r5,__VDSO_GETCPU_VAL(%r0)
sacf 0
ltgr %r2,%r2
jz 2f
diff --git a/arch/s390/lib/Makefile b/arch/s390/lib/Makefile
index d7c218e8b559..28fd66d558ff 100644
--- a/arch/s390/lib/Makefile
+++ b/arch/s390/lib/Makefile
@@ -11,3 +11,6 @@ lib-$(CONFIG_UPROBES) += probes.o
# Instrumenting memory accesses to __user data (in different address space)
# produce false positives
KASAN_SANITIZE_uaccess.o := n
+
+obj-$(CONFIG_S390_UNWIND_SELFTEST) += test_unwind.o
+CFLAGS_test_unwind.o += -fno-optimize-sibling-calls
diff --git a/arch/s390/lib/test_unwind.c b/arch/s390/lib/test_unwind.c
new file mode 100644
index 000000000000..bda7ac0ddd29
--- /dev/null
+++ b/arch/s390/lib/test_unwind.c
@@ -0,0 +1,347 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Test module for unwind_for_each_frame
+ */
+
+#define pr_fmt(fmt) "test_unwind: " fmt
+#include <asm/unwind.h>
+#include <linux/completion.h>
+#include <linux/kallsyms.h>
+#include <linux/kthread.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/kprobes.h>
+#include <linux/wait.h>
+#include <asm/irq.h>
+#include <asm/delay.h>
+
+#define BT_BUF_SIZE (PAGE_SIZE * 4)
+
+/*
+ * To avoid printk line limit split backtrace by lines
+ */
+static void print_backtrace(char *bt)
+{
+ char *p;
+
+ while (true) {
+ p = strsep(&bt, "\n");
+ if (!p)
+ break;
+ pr_err("%s\n", p);
+ }
+}
+
+/*
+ * Calls unwind_for_each_frame(task, regs, sp) and verifies that the result
+ * contains unwindme_func2 followed by unwindme_func1.
+ */
+static noinline int test_unwind(struct task_struct *task, struct pt_regs *regs,
+ unsigned long sp)
+{
+ int frame_count, prev_is_func2, seen_func2_func1;
+ const int max_frames = 128;
+ struct unwind_state state;
+ size_t bt_pos = 0;
+ int ret = 0;
+ char *bt;
+
+ bt = kmalloc(BT_BUF_SIZE, GFP_ATOMIC);
+ if (!bt) {
+ pr_err("failed to allocate backtrace buffer\n");
+ return -ENOMEM;
+ }
+ /* Unwind. */
+ frame_count = 0;
+ prev_is_func2 = 0;
+ seen_func2_func1 = 0;
+ unwind_for_each_frame(&state, task, regs, sp) {
+ unsigned long addr = unwind_get_return_address(&state);
+ char sym[KSYM_SYMBOL_LEN];
+
+ if (frame_count++ == max_frames)
+ break;
+ if (state.reliable && !addr) {
+ pr_err("unwind state reliable but addr is 0\n");
+ return -EINVAL;
+ }
+ sprint_symbol(sym, addr);
+ if (bt_pos < BT_BUF_SIZE) {
+ bt_pos += snprintf(bt + bt_pos, BT_BUF_SIZE - bt_pos,
+ state.reliable ? " [%-7s%px] %pSR\n" :
+ "([%-7s%px] %pSR)\n",
+ stack_type_name(state.stack_info.type),
+ (void *)state.sp, (void *)state.ip);
+ if (bt_pos >= BT_BUF_SIZE)
+ pr_err("backtrace buffer is too small\n");
+ }
+ frame_count += 1;
+ if (prev_is_func2 && str_has_prefix(sym, "unwindme_func1"))
+ seen_func2_func1 = 1;
+ prev_is_func2 = str_has_prefix(sym, "unwindme_func2");
+ }
+
+ /* Check the results. */
+ if (unwind_error(&state)) {
+ pr_err("unwind error\n");
+ ret = -EINVAL;
+ }
+ if (!seen_func2_func1) {
+ pr_err("unwindme_func2 and unwindme_func1 not found\n");
+ ret = -EINVAL;
+ }
+ if (frame_count == max_frames) {
+ pr_err("Maximum number of frames exceeded\n");
+ ret = -EINVAL;
+ }
+ if (ret)
+ print_backtrace(bt);
+ kfree(bt);
+ return ret;
+}
+
+/* State of the task being unwound. */
+struct unwindme {
+ int flags;
+ int ret;
+ struct task_struct *task;
+ struct completion task_ready;
+ wait_queue_head_t task_wq;
+ unsigned long sp;
+};
+
+static struct unwindme *unwindme;
+
+/* Values of unwindme.flags. */
+#define UWM_DEFAULT 0x0
+#define UWM_THREAD 0x1 /* Unwind a separate task. */
+#define UWM_REGS 0x2 /* Pass regs to test_unwind(). */
+#define UWM_SP 0x4 /* Pass sp to test_unwind(). */
+#define UWM_CALLER 0x8 /* Unwind starting from caller. */
+#define UWM_SWITCH_STACK 0x10 /* Use CALL_ON_STACK. */
+#define UWM_IRQ 0x20 /* Unwind from irq context. */
+#define UWM_PGM 0x40 /* Unwind from program check handler. */
+
+static __always_inline unsigned long get_psw_addr(void)
+{
+ unsigned long psw_addr;
+
+ asm volatile(
+ "basr %[psw_addr],0\n"
+ : [psw_addr] "=d" (psw_addr));
+ return psw_addr;
+}
+
+#ifdef CONFIG_KPROBES
+static int pgm_pre_handler(struct kprobe *p, struct pt_regs *regs)
+{
+ struct unwindme *u = unwindme;
+
+ u->ret = test_unwind(NULL, (u->flags & UWM_REGS) ? regs : NULL,
+ (u->flags & UWM_SP) ? u->sp : 0);
+ return 0;
+}
+#endif
+
+/* This function may or may not appear in the backtrace. */
+static noinline int unwindme_func4(struct unwindme *u)
+{
+ if (!(u->flags & UWM_CALLER))
+ u->sp = current_frame_address();
+ if (u->flags & UWM_THREAD) {
+ complete(&u->task_ready);
+ wait_event(u->task_wq, kthread_should_park());
+ kthread_parkme();
+ return 0;
+#ifdef CONFIG_KPROBES
+ } else if (u->flags & UWM_PGM) {
+ struct kprobe kp;
+ int ret;
+
+ unwindme = u;
+ memset(&kp, 0, sizeof(kp));
+ kp.symbol_name = "do_report_trap";
+ kp.pre_handler = pgm_pre_handler;
+ ret = register_kprobe(&kp);
+ if (ret < 0) {
+ pr_err("register_kprobe failed %d\n", ret);
+ return -EINVAL;
+ }
+
+ /*
+ * trigger specification exception
+ */
+ asm volatile(
+ " mvcl %%r1,%%r1\n"
+ "0: nopr %%r7\n"
+ EX_TABLE(0b, 0b)
+ :);
+
+ unregister_kprobe(&kp);
+ unwindme = NULL;
+ return u->ret;
+#endif
+ } else {
+ struct pt_regs regs;
+
+ memset(&regs, 0, sizeof(regs));
+ regs.psw.addr = get_psw_addr();
+ regs.gprs[15] = current_stack_pointer();
+ return test_unwind(NULL,
+ (u->flags & UWM_REGS) ? &regs : NULL,
+ (u->flags & UWM_SP) ? u->sp : 0);
+ }
+}
+
+/* This function may or may not appear in the backtrace. */
+static noinline int unwindme_func3(struct unwindme *u)
+{
+ u->sp = current_frame_address();
+ return unwindme_func4(u);
+}
+
+/* This function must appear in the backtrace. */
+static noinline int unwindme_func2(struct unwindme *u)
+{
+ int rc;
+
+ if (u->flags & UWM_SWITCH_STACK) {
+ preempt_disable();
+ rc = CALL_ON_STACK(unwindme_func3, S390_lowcore.nodat_stack, 1, u);
+ preempt_enable();
+ return rc;
+ } else {
+ return unwindme_func3(u);
+ }
+}
+
+/* This function must follow unwindme_func2 in the backtrace. */
+static noinline int unwindme_func1(void *u)
+{
+ return unwindme_func2((struct unwindme *)u);
+}
+
+static void unwindme_irq_handler(struct ext_code ext_code,
+ unsigned int param32,
+ unsigned long param64)
+{
+ struct unwindme *u = READ_ONCE(unwindme);
+
+ if (u && u->task == current) {
+ unwindme = NULL;
+ u->task = NULL;
+ u->ret = unwindme_func1(u);
+ }
+}
+
+static int test_unwind_irq(struct unwindme *u)
+{
+ preempt_disable();
+ if (register_external_irq(EXT_IRQ_CLK_COMP, unwindme_irq_handler)) {
+ pr_info("Couldn't reqister external interrupt handler");
+ return -1;
+ }
+ u->task = current;
+ unwindme = u;
+ udelay(1);
+ unregister_external_irq(EXT_IRQ_CLK_COMP, unwindme_irq_handler);
+ preempt_enable();
+ return u->ret;
+}
+
+/* Spawns a task and passes it to test_unwind(). */
+static int test_unwind_task(struct unwindme *u)
+{
+ struct task_struct *task;
+ int ret;
+
+ /* Initialize thread-related fields. */
+ init_completion(&u->task_ready);
+ init_waitqueue_head(&u->task_wq);
+
+ /*
+ * Start the task and wait until it reaches unwindme_func4() and sleeps
+ * in (task_ready, unwind_done] range.
+ */
+ task = kthread_run(unwindme_func1, u, "%s", __func__);
+ if (IS_ERR(task)) {
+ pr_err("kthread_run() failed\n");
+ return PTR_ERR(task);
+ }
+ /*
+ * Make sure task reaches unwindme_func4 before parking it,
+ * we might park it before kthread function has been executed otherwise
+ */
+ wait_for_completion(&u->task_ready);
+ kthread_park(task);
+ /* Unwind. */
+ ret = test_unwind(task, NULL, (u->flags & UWM_SP) ? u->sp : 0);
+ kthread_stop(task);
+ return ret;
+}
+
+static int test_unwind_flags(int flags)
+{
+ struct unwindme u;
+
+ u.flags = flags;
+ if (u.flags & UWM_THREAD)
+ return test_unwind_task(&u);
+ else if (u.flags & UWM_IRQ)
+ return test_unwind_irq(&u);
+ else
+ return unwindme_func1(&u);
+}
+
+static int test_unwind_init(void)
+{
+ int ret = 0;
+
+#define TEST(flags) \
+do { \
+ pr_info("[ RUN ] " #flags "\n"); \
+ if (!test_unwind_flags((flags))) { \
+ pr_info("[ OK ] " #flags "\n"); \
+ } else { \
+ pr_err("[ FAILED ] " #flags "\n"); \
+ ret = -EINVAL; \
+ } \
+} while (0)
+
+ TEST(UWM_DEFAULT);
+ TEST(UWM_SP);
+ TEST(UWM_REGS);
+ TEST(UWM_SWITCH_STACK);
+ TEST(UWM_SP | UWM_REGS);
+ TEST(UWM_CALLER | UWM_SP);
+ TEST(UWM_CALLER | UWM_SP | UWM_REGS);
+ TEST(UWM_CALLER | UWM_SP | UWM_REGS | UWM_SWITCH_STACK);
+ TEST(UWM_THREAD);
+ TEST(UWM_THREAD | UWM_SP);
+ TEST(UWM_THREAD | UWM_CALLER | UWM_SP);
+ TEST(UWM_IRQ);
+ TEST(UWM_IRQ | UWM_SWITCH_STACK);
+ TEST(UWM_IRQ | UWM_SP);
+ TEST(UWM_IRQ | UWM_REGS);
+ TEST(UWM_IRQ | UWM_SP | UWM_REGS);
+ TEST(UWM_IRQ | UWM_CALLER | UWM_SP);
+ TEST(UWM_IRQ | UWM_CALLER | UWM_SP | UWM_REGS);
+ TEST(UWM_IRQ | UWM_CALLER | UWM_SP | UWM_REGS | UWM_SWITCH_STACK);
+#ifdef CONFIG_KPROBES
+ TEST(UWM_PGM);
+ TEST(UWM_PGM | UWM_SP);
+ TEST(UWM_PGM | UWM_REGS);
+ TEST(UWM_PGM | UWM_SP | UWM_REGS);
+#endif
+#undef TEST
+
+ return ret;
+}
+
+static void test_unwind_exit(void)
+{
+}
+
+module_init(test_unwind_init);
+module_exit(test_unwind_exit);
+MODULE_LICENSE("GPL");
diff --git a/arch/s390/mm/maccess.c b/arch/s390/mm/maccess.c
index 59ad7997fed1..de7ca4b6718f 100644
--- a/arch/s390/mm/maccess.c
+++ b/arch/s390/mm/maccess.c
@@ -119,9 +119,15 @@ static unsigned long __no_sanitize_address _memcpy_real(unsigned long dest,
*/
int memcpy_real(void *dest, void *src, size_t count)
{
- if (S390_lowcore.nodat_stack != 0)
- return CALL_ON_STACK(_memcpy_real, S390_lowcore.nodat_stack,
- 3, dest, src, count);
+ int rc;
+
+ if (S390_lowcore.nodat_stack != 0) {
+ preempt_disable();
+ rc = CALL_ON_STACK(_memcpy_real, S390_lowcore.nodat_stack, 3,
+ dest, src, count);
+ preempt_enable();
+ return rc;
+ }
/*
* This is a really early memcpy_real call, the stacks are
* not set up yet. Just call _memcpy_real on the early boot
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
index c7fea9bea8cb..8e872951c07b 100644
--- a/arch/s390/pci/pci.c
+++ b/arch/s390/pci/pci.c
@@ -27,6 +27,7 @@
#include <linux/seq_file.h>
#include <linux/jump_label.h>
#include <linux/pci.h>
+#include <linux/printk.h>
#include <asm/isc.h>
#include <asm/airq.h>
@@ -43,7 +44,7 @@ static DECLARE_BITMAP(zpci_domain, ZPCI_NR_DEVICES);
static DEFINE_SPINLOCK(zpci_domain_lock);
#define ZPCI_IOMAP_ENTRIES \
- min(((unsigned long) ZPCI_NR_DEVICES * PCI_BAR_COUNT / 2), \
+ min(((unsigned long) ZPCI_NR_DEVICES * PCI_STD_NUM_BARS / 2), \
ZPCI_IOMAP_MAX_ENTRIES)
static DEFINE_SPINLOCK(zpci_iomap_lock);
@@ -294,7 +295,7 @@ static void __iomem *pci_iomap_range_mio(struct pci_dev *pdev, int bar,
void __iomem *pci_iomap_range(struct pci_dev *pdev, int bar,
unsigned long offset, unsigned long max)
{
- if (!pci_resource_len(pdev, bar) || bar >= PCI_BAR_COUNT)
+ if (bar >= PCI_STD_NUM_BARS || !pci_resource_len(pdev, bar))
return NULL;
if (static_branch_likely(&have_mio))
@@ -324,7 +325,7 @@ static void __iomem *pci_iomap_wc_range_mio(struct pci_dev *pdev, int bar,
void __iomem *pci_iomap_wc_range(struct pci_dev *pdev, int bar,
unsigned long offset, unsigned long max)
{
- if (!pci_resource_len(pdev, bar) || bar >= PCI_BAR_COUNT)
+ if (bar >= PCI_STD_NUM_BARS || !pci_resource_len(pdev, bar))
return NULL;
if (static_branch_likely(&have_mio))
@@ -416,7 +417,7 @@ static void zpci_map_resources(struct pci_dev *pdev)
resource_size_t len;
int i;
- for (i = 0; i < PCI_BAR_COUNT; i++) {
+ for (i = 0; i < PCI_STD_NUM_BARS; i++) {
len = pci_resource_len(pdev, i);
if (!len)
continue;
@@ -451,7 +452,7 @@ static void zpci_unmap_resources(struct pci_dev *pdev)
if (zpci_use_mio(zdev))
return;
- for (i = 0; i < PCI_BAR_COUNT; i++) {
+ for (i = 0; i < PCI_STD_NUM_BARS; i++) {
len = pci_resource_len(pdev, i);
if (!len)
continue;
@@ -514,7 +515,7 @@ static int zpci_setup_bus_resources(struct zpci_dev *zdev,
snprintf(zdev->res_name, sizeof(zdev->res_name),
"PCI Bus %04x:%02x", zdev->domain, ZPCI_BUS_NR);
- for (i = 0; i < PCI_BAR_COUNT; i++) {
+ for (i = 0; i < PCI_STD_NUM_BARS; i++) {
if (!zdev->bars[i].size)
continue;
entry = zpci_alloc_iomap(zdev);
@@ -551,7 +552,7 @@ static void zpci_cleanup_bus_resources(struct zpci_dev *zdev)
{
int i;
- for (i = 0; i < PCI_BAR_COUNT; i++) {
+ for (i = 0; i < PCI_STD_NUM_BARS; i++) {
if (!zdev->bars[i].size || !zdev->bars[i].res)
continue;
@@ -573,7 +574,7 @@ int pcibios_add_device(struct pci_dev *pdev)
pdev->dev.dma_ops = &s390_pci_dma_ops;
zpci_map_resources(pdev);
- for (i = 0; i < PCI_BAR_COUNT; i++) {
+ for (i = 0; i < PCI_STD_NUM_BARS; i++) {
res = &pdev->resource[i];
if (res->parent || !res->flags)
continue;
@@ -659,6 +660,8 @@ static int zpci_alloc_domain(struct zpci_dev *zdev)
spin_lock(&zpci_domain_lock);
if (test_bit(zdev->domain, zpci_domain)) {
spin_unlock(&zpci_domain_lock);
+ pr_err("Adding PCI function %08x failed because domain %04x is already assigned\n",
+ zdev->fid, zdev->domain);
return -EEXIST;
}
set_bit(zdev->domain, zpci_domain);
@@ -670,6 +673,8 @@ static int zpci_alloc_domain(struct zpci_dev *zdev)
zdev->domain = find_first_zero_bit(zpci_domain, ZPCI_NR_DEVICES);
if (zdev->domain == ZPCI_NR_DEVICES) {
spin_unlock(&zpci_domain_lock);
+ pr_err("Adding PCI function %08x failed because the configured limit of %d is reached\n",
+ zdev->fid, ZPCI_NR_DEVICES);
return -ENOSPC;
}
set_bit(zdev->domain, zpci_domain);
diff --git a/arch/s390/pci/pci_clp.c b/arch/s390/pci/pci_clp.c
index e585a62d6530..4c613e569fe0 100644
--- a/arch/s390/pci/pci_clp.c
+++ b/arch/s390/pci/pci_clp.c
@@ -145,7 +145,7 @@ static int clp_store_query_pci_fn(struct zpci_dev *zdev,
{
int i;
- for (i = 0; i < PCI_BAR_COUNT; i++) {
+ for (i = 0; i < PCI_STD_NUM_BARS; i++) {
zdev->bars[i].val = le32_to_cpu(response->bar[i]);
zdev->bars[i].size = response->bar_size[i];
}
@@ -164,8 +164,8 @@ static int clp_store_query_pci_fn(struct zpci_dev *zdev,
sizeof(zdev->util_str));
}
zdev->mio_capable = response->mio_addr_avail;
- for (i = 0; i < PCI_BAR_COUNT; i++) {
- if (!(response->mio.valid & (1 << (PCI_BAR_COUNT - i - 1))))
+ for (i = 0; i < PCI_STD_NUM_BARS; i++) {
+ if (!(response->mio.valid & (1 << (PCI_STD_NUM_BARS - i - 1))))
continue;
zdev->bars[i].mio_wb = (void __iomem *) response->mio.addr[i].wb;
diff --git a/arch/sh/boards/mach-ecovec24/setup.c b/arch/sh/boards/mach-ecovec24/setup.c
index acaa97459531..dd427bac5cde 100644
--- a/arch/sh/boards/mach-ecovec24/setup.c
+++ b/arch/sh/boards/mach-ecovec24/setup.c
@@ -371,20 +371,32 @@ static struct platform_device lcdc_device = {
},
};
+static struct gpiod_lookup_table gpio_backlight_lookup = {
+ .dev_id = "gpio-backlight.0",
+ .table = {
+ GPIO_LOOKUP("sh7724_pfc", GPIO_PTR1, NULL, GPIO_ACTIVE_HIGH),
+ { }
+ },
+};
+
+static struct property_entry gpio_backlight_props[] = {
+ PROPERTY_ENTRY_BOOL("default-on"),
+ { }
+};
+
static struct gpio_backlight_platform_data gpio_backlight_data = {
.fbdev = &lcdc_device.dev,
- .gpio = GPIO_PTR1,
- .def_value = 1,
- .name = "backlight",
};
-static struct platform_device gpio_backlight_device = {
+static const struct platform_device_info gpio_backlight_device_info = {
.name = "gpio-backlight",
- .dev = {
- .platform_data = &gpio_backlight_data,
- },
+ .data = &gpio_backlight_data,
+ .size_data = sizeof(gpio_backlight_data),
+ .properties = gpio_backlight_props,
};
+static struct platform_device *gpio_backlight_device;
+
/* CEU0 */
static struct ceu_platform_data ceu0_pdata = {
.num_subdevs = 2,
@@ -1006,7 +1018,6 @@ static struct platform_device *ecovec_devices[] __initdata = {
&usb1_common_device,
&usbhs_device,
&lcdc_device,
- &gpio_backlight_device,
&keysc_device,
&cn12_power,
#if defined(CONFIG_MMC_SDHI) || defined(CONFIG_MMC_SDHI_MODULE)
@@ -1462,6 +1473,12 @@ static int __init arch_setup(void)
#endif
#endif
+ gpiod_add_lookup_table(&gpio_backlight_lookup);
+ gpio_backlight_device = platform_device_register_full(
+ &gpio_backlight_device_info);
+ if (IS_ERR(gpio_backlight_device))
+ return PTR_ERR(gpio_backlight_device);
+
return platform_add_devices(ecovec_devices,
ARRAY_SIZE(ecovec_devices));
}
diff --git a/arch/sh/boot/compressed/misc.c b/arch/sh/boot/compressed/misc.c
index c15cac9251b9..e69ec12cbbe6 100644
--- a/arch/sh/boot/compressed/misc.c
+++ b/arch/sh/boot/compressed/misc.c
@@ -111,6 +111,11 @@ void __stack_chk_fail(void)
error("stack-protector: Kernel stack is corrupted\n");
}
+/* Needed because vmlinux.lds.h references this */
+void ftrace_stub(void)
+{
+}
+
#ifdef CONFIG_SUPERH64
#define stackalign 8
#else
diff --git a/arch/sh/configs/rsk7264_defconfig b/arch/sh/configs/rsk7264_defconfig
index 2b0572b497c1..78643191c99e 100644
--- a/arch/sh/configs/rsk7264_defconfig
+++ b/arch/sh/configs/rsk7264_defconfig
@@ -8,7 +8,6 @@ CONFIG_NAMESPACES=y
CONFIG_SYSFS_DEPRECATED=y
CONFIG_SYSFS_DEPRECATED_V2=y
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
-CONFIG_SYSCTL_SYSCALL=y
CONFIG_KALLSYMS_ALL=y
CONFIG_EMBEDDED=y
CONFIG_PERF_COUNTERS=y
diff --git a/arch/sh/drivers/Makefile b/arch/sh/drivers/Makefile
index 3e93b434e604..56b0acace6e7 100644
--- a/arch/sh/drivers/Makefile
+++ b/arch/sh/drivers/Makefile
@@ -3,7 +3,7 @@
# Makefile for the Linux SuperH-specific device drivers.
#
-obj-y += dma/
+obj-y += dma/ platform_early.o
obj-$(CONFIG_PCI) += pci/
obj-$(CONFIG_SUPERHYWAY) += superhyway/
diff --git a/arch/sh/drivers/platform_early.c b/arch/sh/drivers/platform_early.c
new file mode 100644
index 000000000000..f6d148451dfc
--- /dev/null
+++ b/arch/sh/drivers/platform_early.c
@@ -0,0 +1,347 @@
+// SPDX--License-Identifier: GPL-2.0
+
+#include <asm/platform_early.h>
+#include <linux/mod_devicetable.h>
+#include <linux/pm.h>
+
+static __initdata LIST_HEAD(sh_early_platform_driver_list);
+static __initdata LIST_HEAD(sh_early_platform_device_list);
+
+static const struct platform_device_id *
+platform_match_id(const struct platform_device_id *id,
+ struct platform_device *pdev)
+{
+ while (id->name[0]) {
+ if (strcmp(pdev->name, id->name) == 0) {
+ pdev->id_entry = id;
+ return id;
+ }
+ id++;
+ }
+ return NULL;
+}
+
+static int platform_match(struct device *dev, struct device_driver *drv)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct platform_driver *pdrv = to_platform_driver(drv);
+
+ /* When driver_override is set, only bind to the matching driver */
+ if (pdev->driver_override)
+ return !strcmp(pdev->driver_override, drv->name);
+
+ /* Then try to match against the id table */
+ if (pdrv->id_table)
+ return platform_match_id(pdrv->id_table, pdev) != NULL;
+
+ /* fall-back to driver name match */
+ return (strcmp(pdev->name, drv->name) == 0);
+}
+
+#ifdef CONFIG_PM
+static void device_pm_init_common(struct device *dev)
+{
+ if (!dev->power.early_init) {
+ spin_lock_init(&dev->power.lock);
+ dev->power.qos = NULL;
+ dev->power.early_init = true;
+ }
+}
+
+static void pm_runtime_early_init(struct device *dev)
+{
+ dev->power.disable_depth = 1;
+ device_pm_init_common(dev);
+}
+#else
+static void pm_runtime_early_init(struct device *dev) {}
+#endif
+
+/**
+ * sh_early_platform_driver_register - register early platform driver
+ * @epdrv: sh_early_platform driver structure
+ * @buf: string passed from early_param()
+ *
+ * Helper function for sh_early_platform_init() / sh_early_platform_init_buffer()
+ */
+int __init sh_early_platform_driver_register(struct sh_early_platform_driver *epdrv,
+ char *buf)
+{
+ char *tmp;
+ int n;
+
+ /* Simply add the driver to the end of the global list.
+ * Drivers will by default be put on the list in compiled-in order.
+ */
+ if (!epdrv->list.next) {
+ INIT_LIST_HEAD(&epdrv->list);
+ list_add_tail(&epdrv->list, &sh_early_platform_driver_list);
+ }
+
+ /* If the user has specified device then make sure the driver
+ * gets prioritized. The driver of the last device specified on
+ * command line will be put first on the list.
+ */
+ n = strlen(epdrv->pdrv->driver.name);
+ if (buf && !strncmp(buf, epdrv->pdrv->driver.name, n)) {
+ list_move(&epdrv->list, &sh_early_platform_driver_list);
+
+ /* Allow passing parameters after device name */
+ if (buf[n] == '\0' || buf[n] == ',')
+ epdrv->requested_id = -1;
+ else {
+ epdrv->requested_id = simple_strtoul(&buf[n + 1],
+ &tmp, 10);
+
+ if (buf[n] != '.' || (tmp == &buf[n + 1])) {
+ epdrv->requested_id = EARLY_PLATFORM_ID_ERROR;
+ n = 0;
+ } else
+ n += strcspn(&buf[n + 1], ",") + 1;
+ }
+
+ if (buf[n] == ',')
+ n++;
+
+ if (epdrv->bufsize) {
+ memcpy(epdrv->buffer, &buf[n],
+ min_t(int, epdrv->bufsize, strlen(&buf[n]) + 1));
+ epdrv->buffer[epdrv->bufsize - 1] = '\0';
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * sh_early_platform_add_devices - adds a number of early platform devices
+ * @devs: array of early platform devices to add
+ * @num: number of early platform devices in array
+ *
+ * Used by early architecture code to register early platform devices and
+ * their platform data.
+ */
+void __init sh_early_platform_add_devices(struct platform_device **devs, int num)
+{
+ struct device *dev;
+ int i;
+
+ /* simply add the devices to list */
+ for (i = 0; i < num; i++) {
+ dev = &devs[i]->dev;
+
+ if (!dev->devres_head.next) {
+ pm_runtime_early_init(dev);
+ INIT_LIST_HEAD(&dev->devres_head);
+ list_add_tail(&dev->devres_head,
+ &sh_early_platform_device_list);
+ }
+ }
+}
+
+/**
+ * sh_early_platform_driver_register_all - register early platform drivers
+ * @class_str: string to identify early platform driver class
+ *
+ * Used by architecture code to register all early platform drivers
+ * for a certain class. If omitted then only early platform drivers
+ * with matching kernel command line class parameters will be registered.
+ */
+void __init sh_early_platform_driver_register_all(char *class_str)
+{
+ /* The "class_str" parameter may or may not be present on the kernel
+ * command line. If it is present then there may be more than one
+ * matching parameter.
+ *
+ * Since we register our early platform drivers using early_param()
+ * we need to make sure that they also get registered in the case
+ * when the parameter is missing from the kernel command line.
+ *
+ * We use parse_early_options() to make sure the early_param() gets
+ * called at least once. The early_param() may be called more than
+ * once since the name of the preferred device may be specified on
+ * the kernel command line. sh_early_platform_driver_register() handles
+ * this case for us.
+ */
+ parse_early_options(class_str);
+}
+
+/**
+ * sh_early_platform_match - find early platform device matching driver
+ * @epdrv: early platform driver structure
+ * @id: id to match against
+ */
+static struct platform_device * __init
+sh_early_platform_match(struct sh_early_platform_driver *epdrv, int id)
+{
+ struct platform_device *pd;
+
+ list_for_each_entry(pd, &sh_early_platform_device_list, dev.devres_head)
+ if (platform_match(&pd->dev, &epdrv->pdrv->driver))
+ if (pd->id == id)
+ return pd;
+
+ return NULL;
+}
+
+/**
+ * sh_early_platform_left - check if early platform driver has matching devices
+ * @epdrv: early platform driver structure
+ * @id: return true if id or above exists
+ */
+static int __init sh_early_platform_left(struct sh_early_platform_driver *epdrv,
+ int id)
+{
+ struct platform_device *pd;
+
+ list_for_each_entry(pd, &sh_early_platform_device_list, dev.devres_head)
+ if (platform_match(&pd->dev, &epdrv->pdrv->driver))
+ if (pd->id >= id)
+ return 1;
+
+ return 0;
+}
+
+/**
+ * sh_early_platform_driver_probe_id - probe drivers matching class_str and id
+ * @class_str: string to identify early platform driver class
+ * @id: id to match against
+ * @nr_probe: number of platform devices to successfully probe before exiting
+ */
+static int __init sh_early_platform_driver_probe_id(char *class_str,
+ int id,
+ int nr_probe)
+{
+ struct sh_early_platform_driver *epdrv;
+ struct platform_device *match;
+ int match_id;
+ int n = 0;
+ int left = 0;
+
+ list_for_each_entry(epdrv, &sh_early_platform_driver_list, list) {
+ /* only use drivers matching our class_str */
+ if (strcmp(class_str, epdrv->class_str))
+ continue;
+
+ if (id == -2) {
+ match_id = epdrv->requested_id;
+ left = 1;
+
+ } else {
+ match_id = id;
+ left += sh_early_platform_left(epdrv, id);
+
+ /* skip requested id */
+ switch (epdrv->requested_id) {
+ case EARLY_PLATFORM_ID_ERROR:
+ case EARLY_PLATFORM_ID_UNSET:
+ break;
+ default:
+ if (epdrv->requested_id == id)
+ match_id = EARLY_PLATFORM_ID_UNSET;
+ }
+ }
+
+ switch (match_id) {
+ case EARLY_PLATFORM_ID_ERROR:
+ pr_warn("%s: unable to parse %s parameter\n",
+ class_str, epdrv->pdrv->driver.name);
+ /* fall-through */
+ case EARLY_PLATFORM_ID_UNSET:
+ match = NULL;
+ break;
+ default:
+ match = sh_early_platform_match(epdrv, match_id);
+ }
+
+ if (match) {
+ /*
+ * Set up a sensible init_name to enable
+ * dev_name() and others to be used before the
+ * rest of the driver core is initialized.
+ */
+ if (!match->dev.init_name && slab_is_available()) {
+ if (match->id != -1)
+ match->dev.init_name =
+ kasprintf(GFP_KERNEL, "%s.%d",
+ match->name,
+ match->id);
+ else
+ match->dev.init_name =
+ kasprintf(GFP_KERNEL, "%s",
+ match->name);
+
+ if (!match->dev.init_name)
+ return -ENOMEM;
+ }
+
+ if (epdrv->pdrv->probe(match))
+ pr_warn("%s: unable to probe %s early.\n",
+ class_str, match->name);
+ else
+ n++;
+ }
+
+ if (n >= nr_probe)
+ break;
+ }
+
+ if (left)
+ return n;
+ else
+ return -ENODEV;
+}
+
+/**
+ * sh_early_platform_driver_probe - probe a class of registered drivers
+ * @class_str: string to identify early platform driver class
+ * @nr_probe: number of platform devices to successfully probe before exiting
+ * @user_only: only probe user specified early platform devices
+ *
+ * Used by architecture code to probe registered early platform drivers
+ * within a certain class. For probe to happen a registered early platform
+ * device matching a registered early platform driver is needed.
+ */
+int __init sh_early_platform_driver_probe(char *class_str,
+ int nr_probe,
+ int user_only)
+{
+ int k, n, i;
+
+ n = 0;
+ for (i = -2; n < nr_probe; i++) {
+ k = sh_early_platform_driver_probe_id(class_str, i, nr_probe - n);
+
+ if (k < 0)
+ break;
+
+ n += k;
+
+ if (user_only)
+ break;
+ }
+
+ return n;
+}
+
+/**
+ * sh_early_platform_cleanup - clean up early platform code
+ */
+static int __init sh_early_platform_cleanup(void)
+{
+ struct platform_device *pd, *pd2;
+
+ /* clean up the devres list used to chain devices */
+ list_for_each_entry_safe(pd, pd2, &sh_early_platform_device_list,
+ dev.devres_head) {
+ list_del(&pd->dev.devres_head);
+ memset(&pd->dev.devres_head, 0, sizeof(pd->dev.devres_head));
+ }
+
+ return 0;
+}
+/*
+ * This must happen once after all early devices are probed but before probing
+ * real platform devices.
+ */
+subsys_initcall(sh_early_platform_cleanup);
diff --git a/arch/sh/include/asm/io.h b/arch/sh/include/asm/io.h
index ac0561960c52..1495489225ac 100644
--- a/arch/sh/include/asm/io.h
+++ b/arch/sh/include/asm/io.h
@@ -267,7 +267,7 @@ unsigned long long poke_real_address_q(unsigned long long addr,
#ifdef CONFIG_MMU
void __iomem *__ioremap_caller(phys_addr_t offset, unsigned long size,
pgprot_t prot, void *caller);
-void __iounmap(void __iomem *addr);
+void iounmap(void __iomem *addr);
static inline void __iomem *
__ioremap(phys_addr_t offset, unsigned long size, pgprot_t prot)
@@ -328,7 +328,7 @@ __ioremap_mode(phys_addr_t offset, unsigned long size, pgprot_t prot)
#else
#define __ioremap(offset, size, prot) ((void __iomem *)(offset))
#define __ioremap_mode(offset, size, prot) ((void __iomem *)(offset))
-#define __iounmap(addr) do { } while (0)
+#define iounmap(addr) do { } while (0)
#endif /* CONFIG_MMU */
static inline void __iomem *ioremap(phys_addr_t offset, unsigned long size)
@@ -370,11 +370,6 @@ static inline int iounmap_fixed(void __iomem *addr) { return -EINVAL; }
#define ioremap_nocache ioremap
#define ioremap_uc ioremap
-static inline void iounmap(void __iomem *addr)
-{
- __iounmap(addr);
-}
-
/*
* Convert a physical pointer to a virtual kernel pointer for /dev/mem
* access
diff --git a/arch/sh/include/asm/platform_early.h b/arch/sh/include/asm/platform_early.h
new file mode 100644
index 000000000000..fc802137c37d
--- /dev/null
+++ b/arch/sh/include/asm/platform_early.h
@@ -0,0 +1,61 @@
+/* SPDX--License-Identifier: GPL-2.0 */
+
+#ifndef __PLATFORM_EARLY__
+#define __PLATFORM_EARLY__
+
+#include <linux/types.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+
+struct sh_early_platform_driver {
+ const char *class_str;
+ struct platform_driver *pdrv;
+ struct list_head list;
+ int requested_id;
+ char *buffer;
+ int bufsize;
+};
+
+#define EARLY_PLATFORM_ID_UNSET -2
+#define EARLY_PLATFORM_ID_ERROR -3
+
+extern int sh_early_platform_driver_register(struct sh_early_platform_driver *epdrv,
+ char *buf);
+extern void sh_early_platform_add_devices(struct platform_device **devs, int num);
+
+static inline int is_sh_early_platform_device(struct platform_device *pdev)
+{
+ return !pdev->dev.driver;
+}
+
+extern void sh_early_platform_driver_register_all(char *class_str);
+extern int sh_early_platform_driver_probe(char *class_str,
+ int nr_probe, int user_only);
+
+#define sh_early_platform_init(class_string, platdrv) \
+ sh_early_platform_init_buffer(class_string, platdrv, NULL, 0)
+
+#ifndef MODULE
+#define sh_early_platform_init_buffer(class_string, platdrv, buf, bufsiz) \
+static __initdata struct sh_early_platform_driver early_driver = { \
+ .class_str = class_string, \
+ .buffer = buf, \
+ .bufsize = bufsiz, \
+ .pdrv = platdrv, \
+ .requested_id = EARLY_PLATFORM_ID_UNSET, \
+}; \
+static int __init sh_early_platform_driver_setup_func(char *buffer) \
+{ \
+ return sh_early_platform_driver_register(&early_driver, buffer); \
+} \
+early_param(class_string, sh_early_platform_driver_setup_func)
+#else /* MODULE */
+#define sh_early_platform_init_buffer(class_string, platdrv, buf, bufsiz) \
+static inline char *sh_early_platform_driver_setup_func(void) \
+{ \
+ return bufsiz ? buf : NULL; \
+}
+#endif /* MODULE */
+
+#endif /* __PLATFORM_EARLY__ */
diff --git a/arch/sh/include/cpu-sh4/cpu/sh7734.h b/arch/sh/include/cpu-sh4/cpu/sh7734.h
index 96f0246ad2f2..82b63208135a 100644
--- a/arch/sh/include/cpu-sh4/cpu/sh7734.h
+++ b/arch/sh/include/cpu-sh4/cpu/sh7734.h
@@ -134,7 +134,7 @@ enum {
GPIO_FN_EX_WAIT1, GPIO_FN_SD1_DAT0_A, GPIO_FN_DREQ2, GPIO_FN_CAN1_TX_C,
GPIO_FN_ET0_LINK_C, GPIO_FN_ET0_ETXD5_A,
GPIO_FN_EX_WAIT0, GPIO_FN_TCLK1_B,
- GPIO_FN_RD_WR, GPIO_FN_TCLK0,
+ GPIO_FN_RD_WR, GPIO_FN_TCLK0, GPIO_FN_CAN_CLK_B, GPIO_FN_ET0_ETXD4,
GPIO_FN_EX_CS5, GPIO_FN_SD1_CMD_A, GPIO_FN_ATADIR, GPIO_FN_QSSL_B,
GPIO_FN_ET0_ETXD3_A,
GPIO_FN_EX_CS4, GPIO_FN_SD1_WP_A, GPIO_FN_ATAWR, GPIO_FN_QMI_QIO1_B,
diff --git a/arch/sh/kernel/cpu/sh2/setup-sh7619.c b/arch/sh/kernel/cpu/sh2/setup-sh7619.c
index f5b6841ef7e1..b1c877b6a420 100644
--- a/arch/sh/kernel/cpu/sh2/setup-sh7619.c
+++ b/arch/sh/kernel/cpu/sh2/setup-sh7619.c
@@ -12,6 +12,7 @@
#include <linux/sh_eth.h>
#include <linux/sh_timer.h>
#include <linux/io.h>
+#include <asm/platform_early.h>
enum {
UNUSED = 0,
@@ -199,6 +200,6 @@ void __init plat_early_device_setup(void)
/* enable CMT clock */
__raw_writeb(__raw_readb(STBCR3) & ~0x10, STBCR3);
- early_platform_add_devices(sh7619_early_devices,
+ sh_early_platform_add_devices(sh7619_early_devices,
ARRAY_SIZE(sh7619_early_devices));
}
diff --git a/arch/sh/kernel/cpu/sh2a/setup-mxg.c b/arch/sh/kernel/cpu/sh2a/setup-mxg.c
index 52350ad0b0a2..cefa07924c16 100644
--- a/arch/sh/kernel/cpu/sh2a/setup-mxg.c
+++ b/arch/sh/kernel/cpu/sh2a/setup-mxg.c
@@ -9,6 +9,7 @@
#include <linux/serial.h>
#include <linux/serial_sci.h>
#include <linux/sh_timer.h>
+#include <asm/platform_early.h>
enum {
UNUSED = 0,
@@ -169,6 +170,6 @@ static struct platform_device *mxg_early_devices[] __initdata = {
void __init plat_early_device_setup(void)
{
- early_platform_add_devices(mxg_early_devices,
+ sh_early_platform_add_devices(mxg_early_devices,
ARRAY_SIZE(mxg_early_devices));
}
diff --git a/arch/sh/kernel/cpu/sh2a/setup-sh7201.c b/arch/sh/kernel/cpu/sh2a/setup-sh7201.c
index b51ed761ae08..28f1bebf3405 100644
--- a/arch/sh/kernel/cpu/sh2a/setup-sh7201.c
+++ b/arch/sh/kernel/cpu/sh2a/setup-sh7201.c
@@ -11,6 +11,7 @@
#include <linux/serial_sci.h>
#include <linux/sh_timer.h>
#include <linux/io.h>
+#include <asm/platform_early.h>
enum {
UNUSED = 0,
@@ -412,6 +413,6 @@ void __init plat_early_device_setup(void)
/* enable MTU2 clock */
__raw_writeb(__raw_readb(STBCR3) & ~0x20, STBCR3);
- early_platform_add_devices(sh7201_early_devices,
+ sh_early_platform_add_devices(sh7201_early_devices,
ARRAY_SIZE(sh7201_early_devices));
}
diff --git a/arch/sh/kernel/cpu/sh2a/setup-sh7203.c b/arch/sh/kernel/cpu/sh2a/setup-sh7203.c
index 89b3e49fc250..4839f3aaeb4c 100644
--- a/arch/sh/kernel/cpu/sh2a/setup-sh7203.c
+++ b/arch/sh/kernel/cpu/sh2a/setup-sh7203.c
@@ -10,6 +10,7 @@
#include <linux/serial_sci.h>
#include <linux/sh_timer.h>
#include <linux/io.h>
+#include <asm/platform_early.h>
enum {
UNUSED = 0,
@@ -349,6 +350,6 @@ void __init plat_early_device_setup(void)
/* enable MTU2 clock */
__raw_writeb(__raw_readb(STBCR3) & ~0x20, STBCR3);
- early_platform_add_devices(sh7203_early_devices,
+ sh_early_platform_add_devices(sh7203_early_devices,
ARRAY_SIZE(sh7203_early_devices));
}
diff --git a/arch/sh/kernel/cpu/sh2a/setup-sh7206.c b/arch/sh/kernel/cpu/sh2a/setup-sh7206.c
index 36ff3a3139da..68add5af4cc5 100644
--- a/arch/sh/kernel/cpu/sh2a/setup-sh7206.c
+++ b/arch/sh/kernel/cpu/sh2a/setup-sh7206.c
@@ -11,6 +11,7 @@
#include <linux/serial_sci.h>
#include <linux/sh_timer.h>
#include <linux/io.h>
+#include <asm/platform_early.h>
enum {
UNUSED = 0,
@@ -285,6 +286,6 @@ void __init plat_early_device_setup(void)
/* enable MTU2 clock */
__raw_writeb(__raw_readb(STBCR3) & ~0x20, STBCR3);
- early_platform_add_devices(sh7206_early_devices,
+ sh_early_platform_add_devices(sh7206_early_devices,
ARRAY_SIZE(sh7206_early_devices));
}
diff --git a/arch/sh/kernel/cpu/sh2a/setup-sh7264.c b/arch/sh/kernel/cpu/sh2a/setup-sh7264.c
index d199618d877c..8a1cb613dd2e 100644
--- a/arch/sh/kernel/cpu/sh2a/setup-sh7264.c
+++ b/arch/sh/kernel/cpu/sh2a/setup-sh7264.c
@@ -11,6 +11,7 @@
#include <linux/usb/r8a66597.h>
#include <linux/sh_timer.h>
#include <linux/io.h>
+#include <asm/platform_early.h>
enum {
UNUSED = 0,
@@ -546,6 +547,6 @@ static struct platform_device *sh7264_early_devices[] __initdata = {
void __init plat_early_device_setup(void)
{
- early_platform_add_devices(sh7264_early_devices,
+ sh_early_platform_add_devices(sh7264_early_devices,
ARRAY_SIZE(sh7264_early_devices));
}
diff --git a/arch/sh/kernel/cpu/sh2a/setup-sh7269.c b/arch/sh/kernel/cpu/sh2a/setup-sh7269.c
index 9095c960b455..8b1ef3028320 100644
--- a/arch/sh/kernel/cpu/sh2a/setup-sh7269.c
+++ b/arch/sh/kernel/cpu/sh2a/setup-sh7269.c
@@ -12,6 +12,7 @@
#include <linux/usb/r8a66597.h>
#include <linux/sh_timer.h>
#include <linux/io.h>
+#include <asm/platform_early.h>
enum {
UNUSED = 0,
@@ -562,6 +563,6 @@ static struct platform_device *sh7269_early_devices[] __initdata = {
void __init plat_early_device_setup(void)
{
- early_platform_add_devices(sh7269_early_devices,
+ sh_early_platform_add_devices(sh7269_early_devices,
ARRAY_SIZE(sh7269_early_devices));
}
diff --git a/arch/sh/kernel/cpu/sh3/setup-sh3.c b/arch/sh/kernel/cpu/sh3/setup-sh3.c
index 8058c01cf09d..cf2a3f09fee4 100644
--- a/arch/sh/kernel/cpu/sh3/setup-sh3.c
+++ b/arch/sh/kernel/cpu/sh3/setup-sh3.c
@@ -8,6 +8,7 @@
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/io.h>
+#include <asm/platform_early.h>
/* All SH3 devices are equipped with IRQ0->5 (except sh7708) */
diff --git a/arch/sh/kernel/cpu/sh3/setup-sh7705.c b/arch/sh/kernel/cpu/sh3/setup-sh7705.c
index e19d1ce7b6ad..0544134b3f20 100644
--- a/arch/sh/kernel/cpu/sh3/setup-sh7705.c
+++ b/arch/sh/kernel/cpu/sh3/setup-sh7705.c
@@ -14,6 +14,7 @@
#include <linux/sh_intc.h>
#include <asm/rtc.h>
#include <cpu/serial.h>
+#include <asm/platform_early.h>
enum {
UNUSED = 0,
@@ -178,7 +179,7 @@ static struct platform_device *sh7705_early_devices[] __initdata = {
void __init plat_early_device_setup(void)
{
- early_platform_add_devices(sh7705_early_devices,
+ sh_early_platform_add_devices(sh7705_early_devices,
ARRAY_SIZE(sh7705_early_devices));
}
diff --git a/arch/sh/kernel/cpu/sh3/setup-sh770x.c b/arch/sh/kernel/cpu/sh3/setup-sh770x.c
index 5c5144bee6bc..4947f57748bc 100644
--- a/arch/sh/kernel/cpu/sh3/setup-sh770x.c
+++ b/arch/sh/kernel/cpu/sh3/setup-sh770x.c
@@ -18,6 +18,7 @@
#include <linux/sh_timer.h>
#include <linux/sh_intc.h>
#include <cpu/serial.h>
+#include <asm/platform_early.h>
enum {
UNUSED = 0,
@@ -230,7 +231,7 @@ static struct platform_device *sh770x_early_devices[] __initdata = {
void __init plat_early_device_setup(void)
{
- early_platform_add_devices(sh770x_early_devices,
+ sh_early_platform_add_devices(sh770x_early_devices,
ARRAY_SIZE(sh770x_early_devices));
}
diff --git a/arch/sh/kernel/cpu/sh3/setup-sh7710.c b/arch/sh/kernel/cpu/sh3/setup-sh7710.c
index 4776e2495738..381910761579 100644
--- a/arch/sh/kernel/cpu/sh3/setup-sh7710.c
+++ b/arch/sh/kernel/cpu/sh3/setup-sh7710.c
@@ -13,6 +13,7 @@
#include <linux/sh_timer.h>
#include <linux/sh_intc.h>
#include <asm/rtc.h>
+#include <asm/platform_early.h>
enum {
UNUSED = 0,
@@ -177,7 +178,7 @@ static struct platform_device *sh7710_early_devices[] __initdata = {
void __init plat_early_device_setup(void)
{
- early_platform_add_devices(sh7710_early_devices,
+ sh_early_platform_add_devices(sh7710_early_devices,
ARRAY_SIZE(sh7710_early_devices));
}
diff --git a/arch/sh/kernel/cpu/sh3/setup-sh7720.c b/arch/sh/kernel/cpu/sh3/setup-sh7720.c
index 1d4c34e7b7db..425d067dae9b 100644
--- a/arch/sh/kernel/cpu/sh3/setup-sh7720.c
+++ b/arch/sh/kernel/cpu/sh3/setup-sh7720.c
@@ -19,6 +19,7 @@
#include <linux/sh_intc.h>
#include <linux/usb/ohci_pdriver.h>
#include <asm/rtc.h>
+#include <asm/platform_early.h>
#include <cpu/serial.h>
static struct resource rtc_resources[] = {
@@ -211,7 +212,7 @@ static struct platform_device *sh7720_early_devices[] __initdata = {
void __init plat_early_device_setup(void)
{
- early_platform_add_devices(sh7720_early_devices,
+ sh_early_platform_add_devices(sh7720_early_devices,
ARRAY_SIZE(sh7720_early_devices));
}
diff --git a/arch/sh/kernel/cpu/sh4/setup-sh4-202.c b/arch/sh/kernel/cpu/sh4/setup-sh4-202.c
index a40ef35d101a..e6737f3d0df2 100644
--- a/arch/sh/kernel/cpu/sh4/setup-sh4-202.c
+++ b/arch/sh/kernel/cpu/sh4/setup-sh4-202.c
@@ -12,6 +12,7 @@
#include <linux/sh_timer.h>
#include <linux/sh_intc.h>
#include <linux/io.h>
+#include <asm/platform_early.h>
static struct plat_sci_port scif0_platform_data = {
.scscr = SCSCR_REIE,
@@ -76,7 +77,7 @@ static struct platform_device *sh4202_early_devices[] __initdata = {
void __init plat_early_device_setup(void)
{
- early_platform_add_devices(sh4202_early_devices,
+ sh_early_platform_add_devices(sh4202_early_devices,
ARRAY_SIZE(sh4202_early_devices));
}
diff --git a/arch/sh/kernel/cpu/sh4/setup-sh7750.c b/arch/sh/kernel/cpu/sh4/setup-sh7750.c
index b37bda66a532..19c8f1d69071 100644
--- a/arch/sh/kernel/cpu/sh4/setup-sh7750.c
+++ b/arch/sh/kernel/cpu/sh4/setup-sh7750.c
@@ -13,6 +13,7 @@
#include <linux/sh_intc.h>
#include <linux/serial_sci.h>
#include <generated/machtypes.h>
+#include <asm/platform_early.h>
static struct resource rtc_resources[] = {
[0] = {
@@ -161,15 +162,15 @@ void __init plat_early_device_setup(void)
if (mach_is_rts7751r2d()) {
scif_platform_data.scscr |= SCSCR_CKE1;
dev[0] = &scif_device;
- early_platform_add_devices(dev, 1);
+ sh_early_platform_add_devices(dev, 1);
} else {
dev[0] = &sci_device;
- early_platform_add_devices(dev, 1);
+ sh_early_platform_add_devices(dev, 1);
dev[0] = &scif_device;
- early_platform_add_devices(dev, 1);
+ sh_early_platform_add_devices(dev, 1);
}
- early_platform_add_devices(sh7750_early_devices,
+ sh_early_platform_add_devices(sh7750_early_devices,
ARRAY_SIZE(sh7750_early_devices));
}
diff --git a/arch/sh/kernel/cpu/sh4/setup-sh7760.c b/arch/sh/kernel/cpu/sh4/setup-sh7760.c
index 86845da85997..14212f5d803c 100644
--- a/arch/sh/kernel/cpu/sh4/setup-sh7760.c
+++ b/arch/sh/kernel/cpu/sh4/setup-sh7760.c
@@ -11,6 +11,7 @@
#include <linux/sh_intc.h>
#include <linux/serial_sci.h>
#include <linux/io.h>
+#include <asm/platform_early.h>
enum {
UNUSED = 0,
@@ -271,7 +272,7 @@ static struct platform_device *sh7760_early_devices[] __initdata = {
void __init plat_early_device_setup(void)
{
- early_platform_add_devices(sh7760_early_devices,
+ sh_early_platform_add_devices(sh7760_early_devices,
ARRAY_SIZE(sh7760_early_devices));
}
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7343.c b/arch/sh/kernel/cpu/sh4a/setup-sh7343.c
index a15e25690b5f..b6015188fab1 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7343.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7343.c
@@ -12,6 +12,7 @@
#include <linux/sh_timer.h>
#include <linux/sh_intc.h>
#include <asm/clock.h>
+#include <asm/platform_early.h>
/* Serial */
static struct plat_sci_port scif0_platform_data = {
@@ -296,7 +297,7 @@ static struct platform_device *sh7343_early_devices[] __initdata = {
void __init plat_early_device_setup(void)
{
- early_platform_add_devices(sh7343_early_devices,
+ sh_early_platform_add_devices(sh7343_early_devices,
ARRAY_SIZE(sh7343_early_devices));
}
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7366.c b/arch/sh/kernel/cpu/sh4a/setup-sh7366.c
index 7bd2776441ba..6676beef053e 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7366.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7366.c
@@ -15,6 +15,7 @@
#include <linux/sh_intc.h>
#include <linux/usb/r8a66597.h>
#include <asm/clock.h>
+#include <asm/platform_early.h>
static struct plat_sci_port scif0_platform_data = {
.scscr = SCSCR_REIE,
@@ -240,7 +241,7 @@ static struct platform_device *sh7366_early_devices[] __initdata = {
void __init plat_early_device_setup(void)
{
- early_platform_add_devices(sh7366_early_devices,
+ sh_early_platform_add_devices(sh7366_early_devices,
ARRAY_SIZE(sh7366_early_devices));
}
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7722.c b/arch/sh/kernel/cpu/sh4a/setup-sh7722.c
index 1ce65f88f060..0c6757ef63f4 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7722.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7722.c
@@ -18,6 +18,7 @@
#include <asm/clock.h>
#include <asm/mmzone.h>
#include <asm/siu.h>
+#include <asm/platform_early.h>
#include <cpu/dma-register.h>
#include <cpu/sh7722.h>
@@ -512,7 +513,7 @@ static struct platform_device *sh7722_early_devices[] __initdata = {
void __init plat_early_device_setup(void)
{
- early_platform_add_devices(sh7722_early_devices,
+ sh_early_platform_add_devices(sh7722_early_devices,
ARRAY_SIZE(sh7722_early_devices));
}
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7723.c b/arch/sh/kernel/cpu/sh4a/setup-sh7723.c
index edb649950662..83ae1ad4a86e 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7723.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7723.c
@@ -16,6 +16,7 @@
#include <linux/io.h>
#include <asm/clock.h>
#include <asm/mmzone.h>
+#include <asm/platform_early.h>
#include <cpu/sh7723.h>
/* Serial */
@@ -410,7 +411,7 @@ static struct platform_device *sh7723_early_devices[] __initdata = {
void __init plat_early_device_setup(void)
{
- early_platform_add_devices(sh7723_early_devices,
+ sh_early_platform_add_devices(sh7723_early_devices,
ARRAY_SIZE(sh7723_early_devices));
}
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7724.c b/arch/sh/kernel/cpu/sh4a/setup-sh7724.c
index 3e9825031d3d..0d990ab1ba2a 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7724.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7724.c
@@ -24,6 +24,7 @@
#include <asm/suspend.h>
#include <asm/clock.h>
#include <asm/mmzone.h>
+#include <asm/platform_early.h>
#include <cpu/dma-register.h>
#include <cpu/sh7724.h>
@@ -830,7 +831,7 @@ static struct platform_device *sh7724_early_devices[] __initdata = {
void __init plat_early_device_setup(void)
{
- early_platform_add_devices(sh7724_early_devices,
+ sh_early_platform_add_devices(sh7724_early_devices,
ARRAY_SIZE(sh7724_early_devices));
}
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7734.c b/arch/sh/kernel/cpu/sh4a/setup-sh7734.c
index 06a91569697a..9911da794358 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7734.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7734.c
@@ -18,6 +18,7 @@
#include <linux/io.h>
#include <asm/clock.h>
#include <asm/irq.h>
+#include <asm/platform_early.h>
#include <cpu/sh7734.h>
/* SCIF */
@@ -280,7 +281,7 @@ static struct platform_device *sh7734_early_devices[] __initdata = {
void __init plat_early_device_setup(void)
{
- early_platform_add_devices(sh7734_early_devices,
+ sh_early_platform_add_devices(sh7734_early_devices,
ARRAY_SIZE(sh7734_early_devices));
}
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7757.c b/arch/sh/kernel/cpu/sh4a/setup-sh7757.c
index 2501ce656511..67e330b7ea46 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7757.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7757.c
@@ -19,6 +19,7 @@
#include <linux/usb/ohci_pdriver.h>
#include <cpu/dma-register.h>
#include <cpu/sh7757.h>
+#include <asm/platform_early.h>
static struct plat_sci_port scif2_platform_data = {
.scscr = SCSCR_REIE,
@@ -767,7 +768,7 @@ static struct platform_device *sh7757_early_devices[] __initdata = {
void __init plat_early_device_setup(void)
{
- early_platform_add_devices(sh7757_early_devices,
+ sh_early_platform_add_devices(sh7757_early_devices,
ARRAY_SIZE(sh7757_early_devices));
}
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7763.c b/arch/sh/kernel/cpu/sh4a/setup-sh7763.c
index 419c5efe4a17..b0608664785f 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7763.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7763.c
@@ -14,6 +14,7 @@
#include <linux/io.h>
#include <linux/serial_sci.h>
#include <linux/usb/ohci_pdriver.h>
+#include <asm/platform_early.h>
static struct plat_sci_port scif0_platform_data = {
.scscr = SCSCR_REIE,
@@ -221,7 +222,7 @@ static struct platform_device *sh7763_early_devices[] __initdata = {
void __init plat_early_device_setup(void)
{
- early_platform_add_devices(sh7763_early_devices,
+ sh_early_platform_add_devices(sh7763_early_devices,
ARRAY_SIZE(sh7763_early_devices));
}
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7770.c b/arch/sh/kernel/cpu/sh4a/setup-sh7770.c
index 5fb4cf9b58c6..5efec6ceb04d 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7770.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7770.c
@@ -11,6 +11,7 @@
#include <linux/sh_timer.h>
#include <linux/sh_intc.h>
#include <linux/io.h>
+#include <asm/platform_early.h>
static struct plat_sci_port scif0_platform_data = {
.scscr = SCSCR_REIE | SCSCR_TOIE,
@@ -316,7 +317,7 @@ static struct platform_device *sh7770_early_devices[] __initdata = {
void __init plat_early_device_setup(void)
{
- early_platform_add_devices(sh7770_early_devices,
+ sh_early_platform_add_devices(sh7770_early_devices,
ARRAY_SIZE(sh7770_early_devices));
}
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7780.c b/arch/sh/kernel/cpu/sh4a/setup-sh7780.c
index ab7d6b715865..c818b788ecb0 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7780.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7780.c
@@ -13,6 +13,7 @@
#include <linux/sh_timer.h>
#include <linux/sh_intc.h>
#include <cpu/dma-register.h>
+#include <asm/platform_early.h>
static struct plat_sci_port scif0_platform_data = {
.scscr = SCSCR_REIE | SCSCR_CKE1,
@@ -285,7 +286,7 @@ void __init plat_early_device_setup(void)
scif1_platform_data.scscr &= ~SCSCR_CKE1;
}
- early_platform_add_devices(sh7780_early_devices,
+ sh_early_platform_add_devices(sh7780_early_devices,
ARRAY_SIZE(sh7780_early_devices));
}
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7785.c b/arch/sh/kernel/cpu/sh4a/setup-sh7785.c
index a438da47285d..3b4a414d60a9 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7785.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7785.c
@@ -14,6 +14,7 @@
#include <linux/sh_timer.h>
#include <linux/sh_intc.h>
#include <asm/mmzone.h>
+#include <asm/platform_early.h>
#include <cpu/dma-register.h>
static struct plat_sci_port scif0_platform_data = {
@@ -353,7 +354,7 @@ static struct platform_device *sh7785_early_devices[] __initdata = {
void __init plat_early_device_setup(void)
{
- early_platform_add_devices(sh7785_early_devices,
+ sh_early_platform_add_devices(sh7785_early_devices,
ARRAY_SIZE(sh7785_early_devices));
}
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7786.c b/arch/sh/kernel/cpu/sh4a/setup-sh7786.c
index d894165a0ef6..4b0db8259e3d 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7786.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7786.c
@@ -23,6 +23,7 @@
#include <linux/usb/ohci_pdriver.h>
#include <cpu/dma-register.h>
#include <asm/mmzone.h>
+#include <asm/platform_early.h>
static struct plat_sci_port scif0_platform_data = {
.scscr = SCSCR_REIE | SCSCR_CKE1,
@@ -834,6 +835,6 @@ arch_initcall(sh7786_devices_setup);
void __init plat_early_device_setup(void)
{
- early_platform_add_devices(sh7786_early_devices,
+ sh_early_platform_add_devices(sh7786_early_devices,
ARRAY_SIZE(sh7786_early_devices));
}
diff --git a/arch/sh/kernel/cpu/sh4a/setup-shx3.c b/arch/sh/kernel/cpu/sh4a/setup-shx3.c
index 14aa4552bc45..7014d6d199b3 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-shx3.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-shx3.c
@@ -14,6 +14,7 @@
#include <linux/sh_intc.h>
#include <cpu/shx3.h>
#include <asm/mmzone.h>
+#include <asm/platform_early.h>
/*
* This intentionally only registers SCIF ports 0, 1, and 3. SCIF 2
@@ -152,7 +153,7 @@ arch_initcall(shx3_devices_setup);
void __init plat_early_device_setup(void)
{
- early_platform_add_devices(shx3_early_devices,
+ sh_early_platform_add_devices(shx3_early_devices,
ARRAY_SIZE(shx3_early_devices));
}
diff --git a/arch/sh/kernel/cpu/sh5/setup-sh5.c b/arch/sh/kernel/cpu/sh5/setup-sh5.c
index 41c1673afc0b..dc8476d67244 100644
--- a/arch/sh/kernel/cpu/sh5/setup-sh5.c
+++ b/arch/sh/kernel/cpu/sh5/setup-sh5.c
@@ -12,6 +12,7 @@
#include <linux/mm.h>
#include <linux/sh_timer.h>
#include <asm/addrspace.h>
+#include <asm/platform_early.h>
static struct plat_sci_port scif0_platform_data = {
.flags = UPF_IOREMAP,
@@ -115,6 +116,6 @@ arch_initcall(sh5_devices_setup);
void __init plat_early_device_setup(void)
{
- early_platform_add_devices(sh5_early_devices,
+ sh_early_platform_add_devices(sh5_early_devices,
ARRAY_SIZE(sh5_early_devices));
}
diff --git a/arch/sh/kernel/dma-coherent.c b/arch/sh/kernel/dma-coherent.c
index b17514619b7e..eeb25a4fa55f 100644
--- a/arch/sh/kernel/dma-coherent.c
+++ b/arch/sh/kernel/dma-coherent.c
@@ -25,7 +25,7 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
* Pages from the page allocator may have data present in
* cache. So flush the cache before using uncached memory.
*/
- arch_sync_dma_for_device(dev, virt_to_phys(ret), size,
+ arch_sync_dma_for_device(virt_to_phys(ret), size,
DMA_BIDIRECTIONAL);
ret_nocache = (void __force *)ioremap_nocache(virt_to_phys(ret), size);
@@ -59,8 +59,8 @@ void arch_dma_free(struct device *dev, size_t size, void *vaddr,
iounmap(vaddr);
}
-void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
- size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir)
{
void *addr = sh_cacheop_vaddr(phys_to_virt(paddr));
diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c
index 6ef341f6cfee..d232cfa01877 100644
--- a/arch/sh/kernel/setup.c
+++ b/arch/sh/kernel/setup.c
@@ -44,6 +44,7 @@
#include <asm/mmu_context.h>
#include <asm/mmzone.h>
#include <asm/sparsemem.h>
+#include <asm/platform_early.h>
/*
* Initialize loops_per_jiffy as 10000000 (1000MIPS).
@@ -328,7 +329,7 @@ void __init setup_arch(char **cmdline_p)
sh_mv_setup();
/* Let earlyprintk output early console messages */
- early_platform_driver_probe("earlyprintk", 1, 1);
+ sh_early_platform_driver_probe("earlyprintk", 1, 1);
#ifdef CONFIG_OF_FLATTREE
#ifdef CONFIG_USE_BUILTIN_DTB
diff --git a/arch/sh/kernel/time.c b/arch/sh/kernel/time.c
index e16b2cd269a3..821a09cbd605 100644
--- a/arch/sh/kernel/time.c
+++ b/arch/sh/kernel/time.c
@@ -18,6 +18,7 @@
#include <linux/rtc.h>
#include <asm/clock.h>
#include <asm/rtc.h>
+#include <asm/platform_early.h>
static void __init sh_late_time_init(void)
{
@@ -30,8 +31,8 @@ static void __init sh_late_time_init(void)
* clocksource and the jiffies clocksource is used transparently
* instead. No error handling is necessary here.
*/
- early_platform_driver_register_all("earlytimer");
- early_platform_driver_probe("earlytimer", 2, 0);
+ sh_early_platform_driver_register_all("earlytimer");
+ sh_early_platform_driver_probe("earlytimer", 2, 0);
}
void __init time_init(void)
diff --git a/arch/sh/mm/ioremap.c b/arch/sh/mm/ioremap.c
index d09ddfe58fd8..f6d02246d665 100644
--- a/arch/sh/mm/ioremap.c
+++ b/arch/sh/mm/ioremap.c
@@ -103,7 +103,7 @@ static inline int iomapping_nontranslatable(unsigned long offset)
return 0;
}
-void __iounmap(void __iomem *addr)
+void iounmap(void __iomem *addr)
{
unsigned long vaddr = (unsigned long __force)addr;
struct vm_struct *p;
@@ -134,4 +134,4 @@ void __iounmap(void __iomem *addr)
kfree(p);
}
-EXPORT_SYMBOL(__iounmap);
+EXPORT_SYMBOL(iounmap);
diff --git a/arch/sparc/include/asm/Kbuild b/arch/sparc/include/asm/Kbuild
index b6212164847b..62de2eb2773d 100644
--- a/arch/sparc/include/asm/Kbuild
+++ b/arch/sparc/include/asm/Kbuild
@@ -18,7 +18,6 @@ generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
generic-y += mmiowb.h
generic-y += module.h
-generic-y += msi.h
generic-y += preempt.h
generic-y += serial.h
generic-y += trace_clock.h
diff --git a/arch/sparc/include/asm/io_32.h b/arch/sparc/include/asm/io_32.h
index df2dc1784673..9a52d9506f80 100644
--- a/arch/sparc/include/asm/io_32.h
+++ b/arch/sparc/include/asm/io_32.h
@@ -127,6 +127,7 @@ static inline void sbus_memcpy_toio(volatile void __iomem *dst,
* Bus number may be embedded in the higher bits of the physical address.
* This is why we have no bus number argument to ioremap().
*/
+void __iomem *ioremap(phys_addr_t offset, size_t size);
void iounmap(volatile void __iomem *addr);
/* Create a virtual mapping cookie for an IO port range */
void __iomem *ioport_map(unsigned long port, unsigned int nr);
diff --git a/arch/sparc/include/asm/io_64.h b/arch/sparc/include/asm/io_64.h
index 688911051b44..f4afa301954a 100644
--- a/arch/sparc/include/asm/io_64.h
+++ b/arch/sparc/include/asm/io_64.h
@@ -407,6 +407,7 @@ static inline void __iomem *ioremap(unsigned long offset, unsigned long size)
}
#define ioremap_nocache(X,Y) ioremap((X),(Y))
+#define ioremap_uc(X,Y) ioremap((X),(Y))
#define ioremap_wc(X,Y) ioremap((X),(Y))
#define ioremap_wt(X,Y) ioremap((X),(Y))
diff --git a/arch/sparc/include/uapi/asm/msgbuf.h b/arch/sparc/include/uapi/asm/msgbuf.h
index ffc46c211d6d..eeeb91933280 100644
--- a/arch/sparc/include/uapi/asm/msgbuf.h
+++ b/arch/sparc/include/uapi/asm/msgbuf.h
@@ -13,9 +13,9 @@
struct msqid64_ds {
struct ipc64_perm msg_perm;
#if defined(__sparc__) && defined(__arch64__)
- __kernel_time_t msg_stime; /* last msgsnd time */
- __kernel_time_t msg_rtime; /* last msgrcv time */
- __kernel_time_t msg_ctime; /* last change time */
+ long msg_stime; /* last msgsnd time */
+ long msg_rtime; /* last msgrcv time */
+ long msg_ctime; /* last change time */
#else
unsigned long msg_stime_high;
unsigned long msg_stime; /* last msgsnd time */
diff --git a/arch/sparc/include/uapi/asm/sembuf.h b/arch/sparc/include/uapi/asm/sembuf.h
index f3d309c2e1cd..cbcbaa4e7128 100644
--- a/arch/sparc/include/uapi/asm/sembuf.h
+++ b/arch/sparc/include/uapi/asm/sembuf.h
@@ -14,8 +14,8 @@
struct semid64_ds {
struct ipc64_perm sem_perm; /* permissions .. see ipc.h */
#if defined(__sparc__) && defined(__arch64__)
- __kernel_time_t sem_otime; /* last semop time */
- __kernel_time_t sem_ctime; /* last change time */
+ long sem_otime; /* last semop time */
+ long sem_ctime; /* last change time */
#else
unsigned long sem_otime_high;
unsigned long sem_otime; /* last semop time */
diff --git a/arch/sparc/include/uapi/asm/shmbuf.h b/arch/sparc/include/uapi/asm/shmbuf.h
index 06618b84822d..a5d7d8d681c4 100644
--- a/arch/sparc/include/uapi/asm/shmbuf.h
+++ b/arch/sparc/include/uapi/asm/shmbuf.h
@@ -14,9 +14,9 @@
struct shmid64_ds {
struct ipc64_perm shm_perm; /* operation perms */
#if defined(__sparc__) && defined(__arch64__)
- __kernel_time_t shm_atime; /* last attach time */
- __kernel_time_t shm_dtime; /* last detach time */
- __kernel_time_t shm_ctime; /* last change time */
+ long shm_atime; /* last attach time */
+ long shm_dtime; /* last detach time */
+ long shm_ctime; /* last change time */
#else
unsigned long shm_atime_high;
unsigned long shm_atime; /* last attach time */
diff --git a/arch/sparc/include/uapi/asm/stat.h b/arch/sparc/include/uapi/asm/stat.h
index b6ec4eb217f7..732c41720e24 100644
--- a/arch/sparc/include/uapi/asm/stat.h
+++ b/arch/sparc/include/uapi/asm/stat.h
@@ -14,12 +14,12 @@ struct stat {
uid_t st_uid;
gid_t st_gid;
unsigned int st_rdev;
- off_t st_size;
- time_t st_atime;
- time_t st_mtime;
- time_t st_ctime;
- off_t st_blksize;
- off_t st_blocks;
+ long st_size;
+ long st_atime;
+ long st_mtime;
+ long st_ctime;
+ long st_blksize;
+ long st_blocks;
unsigned long __unused4[2];
};
@@ -57,15 +57,15 @@ struct stat {
unsigned short st_uid;
unsigned short st_gid;
unsigned short st_rdev;
- off_t st_size;
- time_t st_atime;
+ long st_size;
+ long st_atime;
unsigned long st_atime_nsec;
- time_t st_mtime;
+ long st_mtime;
unsigned long st_mtime_nsec;
- time_t st_ctime;
+ long st_ctime;
unsigned long st_ctime_nsec;
- off_t st_blksize;
- off_t st_blocks;
+ long st_blksize;
+ long st_blocks;
unsigned long __unused4[2];
};
diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c
index f89603855f1e..e59461d03b9a 100644
--- a/arch/sparc/kernel/ioport.c
+++ b/arch/sparc/kernel/ioport.c
@@ -366,8 +366,8 @@ void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
/* IIep is write-through, not flushing on cpu to device transfer. */
-void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
- size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir)
{
if (dir != PCI_DMA_TODEVICE)
dma_make_coherent(paddr, PAGE_ALIGN(size));
diff --git a/arch/sparc/vdso/vclock_gettime.c b/arch/sparc/vdso/vclock_gettime.c
index fc5bdd14de76..e794edde6755 100644
--- a/arch/sparc/vdso/vclock_gettime.c
+++ b/arch/sparc/vdso/vclock_gettime.c
@@ -63,7 +63,7 @@ notrace static __always_inline struct vvar_data *get_vvar_data(void)
return (struct vvar_data *) ret;
}
-notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
+notrace static long vdso_fallback_gettime(long clock, struct __kernel_old_timespec *ts)
{
register long num __asm__("g1") = __NR_clock_gettime;
register long o0 __asm__("o0") = clock;
@@ -74,7 +74,7 @@ notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
return o0;
}
-notrace static long vdso_fallback_gettimeofday(struct timeval *tv, struct timezone *tz)
+notrace static long vdso_fallback_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz)
{
register long num __asm__("g1") = __NR_gettimeofday;
register long o0 __asm__("o0") = (long) tv;
@@ -144,7 +144,7 @@ notrace static __always_inline u64 vgetsns_stick(struct vvar_data *vvar)
}
notrace static __always_inline int do_realtime(struct vvar_data *vvar,
- struct timespec *ts)
+ struct __kernel_old_timespec *ts)
{
unsigned long seq;
u64 ns;
@@ -164,7 +164,7 @@ notrace static __always_inline int do_realtime(struct vvar_data *vvar,
}
notrace static __always_inline int do_realtime_stick(struct vvar_data *vvar,
- struct timespec *ts)
+ struct __kernel_old_timespec *ts)
{
unsigned long seq;
u64 ns;
@@ -184,7 +184,7 @@ notrace static __always_inline int do_realtime_stick(struct vvar_data *vvar,
}
notrace static __always_inline int do_monotonic(struct vvar_data *vvar,
- struct timespec *ts)
+ struct __kernel_old_timespec *ts)
{
unsigned long seq;
u64 ns;
@@ -204,7 +204,7 @@ notrace static __always_inline int do_monotonic(struct vvar_data *vvar,
}
notrace static __always_inline int do_monotonic_stick(struct vvar_data *vvar,
- struct timespec *ts)
+ struct __kernel_old_timespec *ts)
{
unsigned long seq;
u64 ns;
@@ -224,7 +224,7 @@ notrace static __always_inline int do_monotonic_stick(struct vvar_data *vvar,
}
notrace static int do_realtime_coarse(struct vvar_data *vvar,
- struct timespec *ts)
+ struct __kernel_old_timespec *ts)
{
unsigned long seq;
@@ -237,7 +237,7 @@ notrace static int do_realtime_coarse(struct vvar_data *vvar,
}
notrace static int do_monotonic_coarse(struct vvar_data *vvar,
- struct timespec *ts)
+ struct __kernel_old_timespec *ts)
{
unsigned long seq;
@@ -251,7 +251,7 @@ notrace static int do_monotonic_coarse(struct vvar_data *vvar,
}
notrace int
-__vdso_clock_gettime(clockid_t clock, struct timespec *ts)
+__vdso_clock_gettime(clockid_t clock, struct __kernel_old_timespec *ts)
{
struct vvar_data *vvd = get_vvar_data();
@@ -275,11 +275,11 @@ __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
return vdso_fallback_gettime(clock, ts);
}
int
-clock_gettime(clockid_t, struct timespec *)
+clock_gettime(clockid_t, struct __kernel_old_timespec *)
__attribute__((weak, alias("__vdso_clock_gettime")));
notrace int
-__vdso_clock_gettime_stick(clockid_t clock, struct timespec *ts)
+__vdso_clock_gettime_stick(clockid_t clock, struct __kernel_old_timespec *ts)
{
struct vvar_data *vvd = get_vvar_data();
@@ -304,15 +304,15 @@ __vdso_clock_gettime_stick(clockid_t clock, struct timespec *ts)
}
notrace int
-__vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
+__vdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz)
{
struct vvar_data *vvd = get_vvar_data();
if (likely(vvd->vclock_mode != VCLOCK_NONE)) {
if (likely(tv != NULL)) {
union tstv_t {
- struct timespec ts;
- struct timeval tv;
+ struct __kernel_old_timespec ts;
+ struct __kernel_old_timeval tv;
} *tstv = (union tstv_t *) tv;
do_realtime(vvd, &tstv->ts);
/*
@@ -336,19 +336,19 @@ __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
return vdso_fallback_gettimeofday(tv, tz);
}
int
-gettimeofday(struct timeval *, struct timezone *)
+gettimeofday(struct __kernel_old_timeval *, struct timezone *)
__attribute__((weak, alias("__vdso_gettimeofday")));
notrace int
-__vdso_gettimeofday_stick(struct timeval *tv, struct timezone *tz)
+__vdso_gettimeofday_stick(struct __kernel_old_timeval *tv, struct timezone *tz)
{
struct vvar_data *vvd = get_vvar_data();
if (likely(vvd->vclock_mode != VCLOCK_NONE)) {
if (likely(tv != NULL)) {
union tstv_t {
- struct timespec ts;
- struct timeval tv;
+ struct __kernel_old_timespec ts;
+ struct __kernel_old_timeval tv;
} *tstv = (union tstv_t *) tv;
do_realtime_stick(vvd, &tstv->ts);
/*
diff --git a/arch/um/Kconfig b/arch/um/Kconfig
index fec6b4ca2b6e..2a6d04fcb3e9 100644
--- a/arch/um/Kconfig
+++ b/arch/um/Kconfig
@@ -153,7 +153,7 @@ config KERNEL_STACK_ORDER
It is possible to reduce the stack to 1 for 64BIT and 0 for 32BIT on
older (pre-2017) CPUs. It is not recommended on newer CPUs due to the
increase in the size of the state which needs to be saved when handling
- signals.
+ signals.
config MMAPPER
tristate "iomem emulation driver"
diff --git a/arch/um/drivers/Kconfig b/arch/um/drivers/Kconfig
index fea5a0d522dc..388096fb45a2 100644
--- a/arch/um/drivers/Kconfig
+++ b/arch/um/drivers/Kconfig
@@ -337,7 +337,7 @@ config UML_NET_SLIRP
endmenu
config VIRTIO_UML
- tristate "UML driver for virtio devices"
+ bool "UML driver for virtio devices"
select VIRTIO
help
This driver provides support for virtio based paravirtual device
diff --git a/arch/um/drivers/harddog_kern.c b/arch/um/drivers/harddog_kern.c
index 000cb69ba0bc..e6d4f43deba8 100644
--- a/arch/um/drivers/harddog_kern.c
+++ b/arch/um/drivers/harddog_kern.c
@@ -165,6 +165,7 @@ static const struct file_operations harddog_fops = {
.owner = THIS_MODULE,
.write = harddog_write,
.unlocked_ioctl = harddog_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.open = harddog_open,
.release = harddog_release,
.llseek = no_llseek,
diff --git a/arch/um/drivers/hostaudio_kern.c b/arch/um/drivers/hostaudio_kern.c
index bf75b1ceac47..d35d3f305a31 100644
--- a/arch/um/drivers/hostaudio_kern.c
+++ b/arch/um/drivers/hostaudio_kern.c
@@ -298,6 +298,7 @@ static const struct file_operations hostaudio_fops = {
.write = hostaudio_write,
.poll = hostaudio_poll,
.unlocked_ioctl = hostaudio_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.mmap = NULL,
.open = hostaudio_open,
.release = hostaudio_release,
diff --git a/arch/um/drivers/vector_kern.c b/arch/um/drivers/vector_kern.c
index 769ffbd9e9a6..92617e16829e 100644
--- a/arch/um/drivers/vector_kern.c
+++ b/arch/um/drivers/vector_kern.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (C) 2017 - Cambridge Greys Limited
+ * Copyright (C) 2017 - 2019 Cambridge Greys Limited
* Copyright (C) 2011 - 2014 Cisco Systems Inc
* Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
* Copyright (C) 2001 Lennert Buytenhek (buytenh@gnu.org) and
@@ -21,6 +21,9 @@
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
+#include <linux/firmware.h>
+#include <linux/fs.h>
+#include <uapi/linux/filter.h>
#include <init.h>
#include <irq_kern.h>
#include <irq_user.h>
@@ -128,6 +131,23 @@ static int get_mtu(struct arglist *def)
return ETH_MAX_PACKET;
}
+static char *get_bpf_file(struct arglist *def)
+{
+ return uml_vector_fetch_arg(def, "bpffile");
+}
+
+static bool get_bpf_flash(struct arglist *def)
+{
+ char *allow = uml_vector_fetch_arg(def, "bpfflash");
+ long result;
+
+ if (allow != NULL) {
+ if (kstrtoul(allow, 10, &result) == 0)
+ return (allow > 0);
+ }
+ return false;
+}
+
static int get_depth(struct arglist *def)
{
char *mtu = uml_vector_fetch_arg(def, "depth");
@@ -176,6 +196,7 @@ static int get_transport_options(struct arglist *def)
int vec_rx = VECTOR_RX;
int vec_tx = VECTOR_TX;
long parsed;
+ int result = 0;
if (vector != NULL) {
if (kstrtoul(vector, 10, &parsed) == 0) {
@@ -186,14 +207,16 @@ static int get_transport_options(struct arglist *def)
}
}
+ if (get_bpf_flash(def))
+ result = VECTOR_BPF_FLASH;
if (strncmp(transport, TRANS_TAP, TRANS_TAP_LEN) == 0)
- return 0;
+ return result;
if (strncmp(transport, TRANS_HYBRID, TRANS_HYBRID_LEN) == 0)
- return (vec_rx | VECTOR_BPF);
+ return (result | vec_rx | VECTOR_BPF);
if (strncmp(transport, TRANS_RAW, TRANS_RAW_LEN) == 0)
- return (vec_rx | vec_tx | VECTOR_QDISC_BYPASS);
- return (vec_rx | vec_tx);
+ return (result | vec_rx | vec_tx | VECTOR_QDISC_BYPASS);
+ return (result | vec_rx | vec_tx);
}
@@ -1139,6 +1162,8 @@ static int vector_net_close(struct net_device *dev)
}
tasklet_kill(&vp->tx_poll);
if (vp->fds->rx_fd > 0) {
+ if (vp->bpf)
+ uml_vector_detach_bpf(vp->fds->rx_fd, vp->bpf);
os_close_file(vp->fds->rx_fd);
vp->fds->rx_fd = -1;
}
@@ -1146,7 +1171,10 @@ static int vector_net_close(struct net_device *dev)
os_close_file(vp->fds->tx_fd);
vp->fds->tx_fd = -1;
}
+ if (vp->bpf != NULL)
+ kfree(vp->bpf->filter);
kfree(vp->bpf);
+ vp->bpf = NULL;
kfree(vp->fds->remote_addr);
kfree(vp->transport_data);
kfree(vp->header_rxbuffer);
@@ -1181,6 +1209,7 @@ static void vector_reset_tx(struct work_struct *work)
netif_start_queue(vp->dev);
netif_wake_queue(vp->dev);
}
+
static int vector_net_open(struct net_device *dev)
{
struct vector_private *vp = netdev_priv(dev);
@@ -1196,6 +1225,8 @@ static int vector_net_open(struct net_device *dev)
vp->opened = true;
spin_unlock_irqrestore(&vp->lock, flags);
+ vp->bpf = uml_vector_user_bpf(get_bpf_file(vp->parsed));
+
vp->fds = uml_vector_user_open(vp->unit, vp->parsed);
if (vp->fds == NULL)
@@ -1267,8 +1298,11 @@ static int vector_net_open(struct net_device *dev)
if (!uml_raw_enable_qdisc_bypass(vp->fds->rx_fd))
vp->options |= VECTOR_BPF;
}
- if ((vp->options & VECTOR_BPF) != 0)
- vp->bpf = uml_vector_default_bpf(vp->fds->rx_fd, dev->dev_addr);
+ if (((vp->options & VECTOR_BPF) != 0) && (vp->bpf == NULL))
+ vp->bpf = uml_vector_default_bpf(dev->dev_addr);
+
+ if (vp->bpf != NULL)
+ uml_vector_attach_bpf(vp->fds->rx_fd, vp->bpf);
netif_start_queue(dev);
@@ -1347,6 +1381,65 @@ static void vector_net_get_drvinfo(struct net_device *dev,
strlcpy(info->version, DRIVER_VERSION, sizeof(info->version));
}
+static int vector_net_load_bpf_flash(struct net_device *dev,
+ struct ethtool_flash *efl)
+{
+ struct vector_private *vp = netdev_priv(dev);
+ struct vector_device *vdevice;
+ const struct firmware *fw;
+ int result = 0;
+
+ if (!(vp->options & VECTOR_BPF_FLASH)) {
+ netdev_err(dev, "loading firmware not permitted: %s\n", efl->data);
+ return -1;
+ }
+
+ spin_lock(&vp->lock);
+
+ if (vp->bpf != NULL) {
+ if (vp->opened)
+ uml_vector_detach_bpf(vp->fds->rx_fd, vp->bpf);
+ kfree(vp->bpf->filter);
+ vp->bpf->filter = NULL;
+ } else {
+ vp->bpf = kmalloc(sizeof(struct sock_fprog), GFP_KERNEL);
+ if (vp->bpf == NULL) {
+ netdev_err(dev, "failed to allocate memory for firmware\n");
+ goto flash_fail;
+ }
+ }
+
+ vdevice = find_device(vp->unit);
+
+ if (request_firmware(&fw, efl->data, &vdevice->pdev.dev))
+ goto flash_fail;
+
+ vp->bpf->filter = kmemdup(fw->data, fw->size, GFP_KERNEL);
+ if (!vp->bpf->filter)
+ goto free_buffer;
+
+ vp->bpf->len = fw->size / sizeof(struct sock_filter);
+ release_firmware(fw);
+
+ if (vp->opened)
+ result = uml_vector_attach_bpf(vp->fds->rx_fd, vp->bpf);
+
+ spin_unlock(&vp->lock);
+
+ return result;
+
+free_buffer:
+ release_firmware(fw);
+
+flash_fail:
+ spin_unlock(&vp->lock);
+ if (vp->bpf != NULL)
+ kfree(vp->bpf->filter);
+ kfree(vp->bpf);
+ vp->bpf = NULL;
+ return -1;
+}
+
static void vector_get_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring)
{
@@ -1424,6 +1517,7 @@ static const struct ethtool_ops vector_net_ethtool_ops = {
.get_ethtool_stats = vector_get_ethtool_stats,
.get_coalesce = vector_get_coalesce,
.set_coalesce = vector_set_coalesce,
+ .flash_device = vector_net_load_bpf_flash,
};
@@ -1528,8 +1622,9 @@ static void vector_eth_configure(
.in_write_poll = false,
.coalesce = 2,
.req_size = get_req_size(def),
- .in_error = false
- });
+ .in_error = false,
+ .bpf = NULL
+ });
dev->features = dev->hw_features = (NETIF_F_SG | NETIF_F_FRAGLIST);
tasklet_init(&vp->tx_poll, vector_tx_poll, (unsigned long)vp);
diff --git a/arch/um/drivers/vector_kern.h b/arch/um/drivers/vector_kern.h
index 4d292e6c07af..d0159082faf0 100644
--- a/arch/um/drivers/vector_kern.h
+++ b/arch/um/drivers/vector_kern.h
@@ -29,10 +29,13 @@
#define VECTOR_TX (1 << 1)
#define VECTOR_BPF (1 << 2)
#define VECTOR_QDISC_BYPASS (1 << 3)
+#define VECTOR_BPF_FLASH (1 << 4)
#define ETH_MAX_PACKET 1500
#define ETH_HEADER_OTHER 32 /* just in case someone decides to go mad on QnQ */
+#define MAX_FILTER_PROG (2 << 16)
+
struct vector_queue {
struct mmsghdr *mmsg_vector;
void **skbuff_vector;
@@ -118,10 +121,13 @@ struct vector_private {
bool in_write_poll;
bool in_error;
+ /* guest allowed to use ethtool flash to load bpf */
+ bool bpf_via_flash;
+
/* ethtool stats */
struct vector_estats estats;
- void *bpf;
+ struct sock_fprog *bpf;
char user[0];
};
diff --git a/arch/um/drivers/vector_user.c b/arch/um/drivers/vector_user.c
index e2c969b9f7ee..ddcd917be0af 100644
--- a/arch/um/drivers/vector_user.c
+++ b/arch/um/drivers/vector_user.c
@@ -46,7 +46,8 @@
#define TUN_GET_F_FAIL "tapraw: TUNGETFEATURES failed: %s"
#define L2TPV3_BIND_FAIL "l2tpv3_open : could not bind socket err=%i"
#define UNIX_BIND_FAIL "unix_open : could not bind socket err=%i"
-#define BPF_ATTACH_FAIL "Failed to attach filter size %d to %d, err %d\n"
+#define BPF_ATTACH_FAIL "Failed to attach filter size %d prog %px to %d, err %d\n"
+#define BPF_DETACH_FAIL "Failed to detach filter size %d prog %px to %d, err %d\n"
#define MAX_UN_LEN 107
@@ -660,31 +661,44 @@ int uml_vector_recvmmsg(
else
return -errno;
}
-int uml_vector_attach_bpf(int fd, void *bpf, int bpf_len)
+int uml_vector_attach_bpf(int fd, void *bpf)
{
- int err = setsockopt(fd, SOL_SOCKET, SO_ATTACH_FILTER, bpf, bpf_len);
+ struct sock_fprog *prog = bpf;
+
+ int err = setsockopt(fd, SOL_SOCKET, SO_ATTACH_FILTER, bpf, sizeof(struct sock_fprog));
if (err < 0)
- printk(KERN_ERR BPF_ATTACH_FAIL, bpf_len, fd, -errno);
+ printk(KERN_ERR BPF_ATTACH_FAIL, prog->len, prog->filter, fd, -errno);
return err;
}
-#define DEFAULT_BPF_LEN 6
+int uml_vector_detach_bpf(int fd, void *bpf)
+{
+ struct sock_fprog *prog = bpf;
-void *uml_vector_default_bpf(int fd, void *mac)
+ int err = setsockopt(fd, SOL_SOCKET, SO_DETACH_FILTER, bpf, sizeof(struct sock_fprog));
+ if (err < 0)
+ printk(KERN_ERR BPF_DETACH_FAIL, prog->len, prog->filter, fd, -errno);
+ return err;
+}
+void *uml_vector_default_bpf(void *mac)
{
struct sock_filter *bpf;
uint32_t *mac1 = (uint32_t *)(mac + 2);
uint16_t *mac2 = (uint16_t *) mac;
- struct sock_fprog bpf_prog = {
- .len = 6,
- .filter = NULL,
- };
+ struct sock_fprog *bpf_prog;
+ bpf_prog = uml_kmalloc(sizeof(struct sock_fprog), UM_GFP_KERNEL);
+ if (bpf_prog) {
+ bpf_prog->len = DEFAULT_BPF_LEN;
+ bpf_prog->filter = NULL;
+ } else {
+ return NULL;
+ }
bpf = uml_kmalloc(
sizeof(struct sock_filter) * DEFAULT_BPF_LEN, UM_GFP_KERNEL);
- if (bpf != NULL) {
- bpf_prog.filter = bpf;
+ if (bpf) {
+ bpf_prog->filter = bpf;
/* ld [8] */
bpf[0] = (struct sock_filter){ 0x20, 0, 0, 0x00000008 };
/* jeq #0xMAC[2-6] jt 2 jf 5*/
@@ -697,12 +711,56 @@ void *uml_vector_default_bpf(int fd, void *mac)
bpf[4] = (struct sock_filter){ 0x6, 0, 0, 0x00000000 };
/* ret #0x40000 */
bpf[5] = (struct sock_filter){ 0x6, 0, 0, 0x00040000 };
- if (uml_vector_attach_bpf(
- fd, &bpf_prog, sizeof(struct sock_fprog)) < 0) {
- kfree(bpf);
- bpf = NULL;
- }
+ } else {
+ kfree(bpf_prog);
+ bpf_prog = NULL;
}
- return bpf;
+ return bpf_prog;
}
+/* Note - this function requires a valid mac being passed as an arg */
+
+void *uml_vector_user_bpf(char *filename)
+{
+ struct sock_filter *bpf;
+ struct sock_fprog *bpf_prog;
+ struct stat statbuf;
+ int res, ffd = -1;
+
+ if (filename == NULL)
+ return NULL;
+
+ if (stat(filename, &statbuf) < 0) {
+ printk(KERN_ERR "Error %d reading bpf file", -errno);
+ return false;
+ }
+ bpf_prog = uml_kmalloc(sizeof(struct sock_fprog), UM_GFP_KERNEL);
+ if (bpf_prog != NULL) {
+ bpf_prog->len = statbuf.st_size / sizeof(struct sock_filter);
+ bpf_prog->filter = NULL;
+ }
+ ffd = os_open_file(filename, of_read(OPENFLAGS()), 0);
+ if (ffd < 0) {
+ printk(KERN_ERR "Error %d opening bpf file", -errno);
+ goto bpf_failed;
+ }
+ bpf = uml_kmalloc(statbuf.st_size, UM_GFP_KERNEL);
+ if (bpf == NULL) {
+ printk(KERN_ERR "Failed to allocate bpf buffer");
+ goto bpf_failed;
+ }
+ bpf_prog->filter = bpf;
+ res = os_read_file(ffd, bpf, statbuf.st_size);
+ if (res < statbuf.st_size) {
+ printk(KERN_ERR "Failed to read bpf program %s, error %d", filename, res);
+ kfree(bpf);
+ goto bpf_failed;
+ }
+ os_close_file(ffd);
+ return bpf_prog;
+bpf_failed:
+ if (ffd > 0)
+ os_close_file(ffd);
+ kfree(bpf_prog);
+ return NULL;
+}
diff --git a/arch/um/drivers/vector_user.h b/arch/um/drivers/vector_user.h
index 649ec250268b..91f35b266aba 100644
--- a/arch/um/drivers/vector_user.h
+++ b/arch/um/drivers/vector_user.h
@@ -28,6 +28,8 @@
#define TRANS_BESS "bess"
#define TRANS_BESS_LEN strlen(TRANS_BESS)
+#define DEFAULT_BPF_LEN 6
+
#ifndef IPPROTO_GRE
#define IPPROTO_GRE 0x2F
#endif
@@ -95,8 +97,10 @@ extern int uml_vector_recvmmsg(
unsigned int vlen,
unsigned int flags
);
-extern void *uml_vector_default_bpf(int fd, void *mac);
-extern int uml_vector_attach_bpf(int fd, void *bpf, int bpf_len);
+extern void *uml_vector_default_bpf(void *mac);
+extern void *uml_vector_user_bpf(char *filename);
+extern int uml_vector_attach_bpf(int fd, void *bpf);
+extern int uml_vector_detach_bpf(int fd, void *bpf);
extern bool uml_raw_enable_qdisc_bypass(int fd);
extern bool uml_raw_enable_vnet_headers(int fd);
extern bool uml_tap_enable_vnet_headers(int fd);
diff --git a/arch/um/drivers/virtio_uml.c b/arch/um/drivers/virtio_uml.c
index fc8c52cff5aa..023ced2250ea 100644
--- a/arch/um/drivers/virtio_uml.c
+++ b/arch/um/drivers/virtio_uml.c
@@ -4,12 +4,12 @@
*
* Copyright(c) 2019 Intel Corporation
*
- * This module allows virtio devices to be used over a vhost-user socket.
+ * This driver allows virtio devices to be used over a vhost-user socket.
*
* Guest devices can be instantiated by kernel module or command line
* parameters. One device will be created for each parameter. Syntax:
*
- * [virtio_uml.]device=<socket>:<virtio_id>[:<platform_id>]
+ * virtio_uml.device=<socket>:<virtio_id>[:<platform_id>]
* where:
* <socket> := vhost-user socket path to connect
* <virtio_id> := virtio device id (as in virtio_ids.h)
@@ -42,6 +42,13 @@
#define to_virtio_uml_device(_vdev) \
container_of(_vdev, struct virtio_uml_device, vdev)
+struct virtio_uml_platform_data {
+ u32 virtio_device_id;
+ const char *socket_path;
+ struct work_struct conn_broken_wk;
+ struct platform_device *pdev;
+};
+
struct virtio_uml_device {
struct virtio_device vdev;
struct platform_device *pdev;
@@ -50,6 +57,7 @@ struct virtio_uml_device {
u64 features;
u64 protocol_features;
u8 status;
+ u8 registered:1;
};
struct virtio_uml_vq_info {
@@ -83,7 +91,7 @@ static int full_sendmsg_fds(int fd, const void *buf, unsigned int len,
return 0;
}
-static int full_read(int fd, void *buf, int len)
+static int full_read(int fd, void *buf, int len, bool abortable)
{
int rc;
@@ -93,7 +101,7 @@ static int full_read(int fd, void *buf, int len)
buf += rc;
len -= rc;
}
- } while (len && (rc > 0 || rc == -EINTR));
+ } while (len && (rc > 0 || rc == -EINTR || (!abortable && rc == -EAGAIN)));
if (rc < 0)
return rc;
@@ -104,28 +112,37 @@ static int full_read(int fd, void *buf, int len)
static int vhost_user_recv_header(int fd, struct vhost_user_msg *msg)
{
- return full_read(fd, msg, sizeof(msg->header));
+ return full_read(fd, msg, sizeof(msg->header), true);
}
-static int vhost_user_recv(int fd, struct vhost_user_msg *msg,
+static int vhost_user_recv(struct virtio_uml_device *vu_dev,
+ int fd, struct vhost_user_msg *msg,
size_t max_payload_size)
{
size_t size;
int rc = vhost_user_recv_header(fd, msg);
+ if (rc == -ECONNRESET && vu_dev->registered) {
+ struct virtio_uml_platform_data *pdata;
+
+ pdata = vu_dev->pdev->dev.platform_data;
+
+ virtio_break_device(&vu_dev->vdev);
+ schedule_work(&pdata->conn_broken_wk);
+ }
if (rc)
return rc;
size = msg->header.size;
if (size > max_payload_size)
return -EPROTO;
- return full_read(fd, &msg->payload, size);
+ return full_read(fd, &msg->payload, size, false);
}
static int vhost_user_recv_resp(struct virtio_uml_device *vu_dev,
struct vhost_user_msg *msg,
size_t max_payload_size)
{
- int rc = vhost_user_recv(vu_dev->sock, msg, max_payload_size);
+ int rc = vhost_user_recv(vu_dev, vu_dev->sock, msg, max_payload_size);
if (rc)
return rc;
@@ -155,7 +172,7 @@ static int vhost_user_recv_req(struct virtio_uml_device *vu_dev,
struct vhost_user_msg *msg,
size_t max_payload_size)
{
- int rc = vhost_user_recv(vu_dev->req_fd, msg, max_payload_size);
+ int rc = vhost_user_recv(vu_dev, vu_dev->req_fd, msg, max_payload_size);
if (rc)
return rc;
@@ -963,11 +980,6 @@ static void virtio_uml_release_dev(struct device *d)
/* Platform device */
-struct virtio_uml_platform_data {
- u32 virtio_device_id;
- const char *socket_path;
-};
-
static int virtio_uml_probe(struct platform_device *pdev)
{
struct virtio_uml_platform_data *pdata = pdev->dev.platform_data;
@@ -1005,6 +1017,7 @@ static int virtio_uml_probe(struct platform_device *pdev)
rc = register_virtio_device(&vu_dev->vdev);
if (rc)
put_device(&vu_dev->vdev.dev);
+ vu_dev->registered = 1;
return rc;
error_init:
@@ -1034,13 +1047,31 @@ static struct device vu_cmdline_parent = {
static bool vu_cmdline_parent_registered;
static int vu_cmdline_id;
+static int vu_unregister_cmdline_device(struct device *dev, void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct virtio_uml_platform_data *pdata = pdev->dev.platform_data;
+
+ kfree(pdata->socket_path);
+ platform_device_unregister(pdev);
+ return 0;
+}
+
+static void vu_conn_broken(struct work_struct *wk)
+{
+ struct virtio_uml_platform_data *pdata;
+
+ pdata = container_of(wk, struct virtio_uml_platform_data, conn_broken_wk);
+ vu_unregister_cmdline_device(&pdata->pdev->dev, NULL);
+}
+
static int vu_cmdline_set(const char *device, const struct kernel_param *kp)
{
const char *ids = strchr(device, ':');
unsigned int virtio_device_id;
int processed, consumed, err;
char *socket_path;
- struct virtio_uml_platform_data pdata;
+ struct virtio_uml_platform_data pdata, *ppdata;
struct platform_device *pdev;
if (!ids || ids == device)
@@ -1079,6 +1110,11 @@ static int vu_cmdline_set(const char *device, const struct kernel_param *kp)
err = PTR_ERR_OR_ZERO(pdev);
if (err)
goto free;
+
+ ppdata = pdev->dev.platform_data;
+ ppdata->pdev = pdev;
+ INIT_WORK(&ppdata->conn_broken_wk, vu_conn_broken);
+
return 0;
free:
@@ -1121,16 +1157,6 @@ __uml_help(vu_cmdline_param_ops,
);
-static int vu_unregister_cmdline_device(struct device *dev, void *data)
-{
- struct platform_device *pdev = to_platform_device(dev);
- struct virtio_uml_platform_data *pdata = pdev->dev.platform_data;
-
- kfree(pdata->socket_path);
- platform_device_unregister(pdev);
- return 0;
-}
-
static void vu_unregister_cmdline_devices(void)
{
if (vu_cmdline_parent_registered) {
diff --git a/arch/um/kernel/skas/syscall.c b/arch/um/kernel/skas/syscall.c
index f574b1856bc6..40d90dddf3f1 100644
--- a/arch/um/kernel/skas/syscall.c
+++ b/arch/um/kernel/skas/syscall.c
@@ -35,7 +35,7 @@ void handle_syscall(struct uml_pt_regs *r)
goto out;
/* Do the seccomp check after ptrace; failures should be fast. */
- if (secure_computing(NULL) == -1)
+ if (secure_computing() == -1)
goto out;
syscall = UPT_SYSCALL_NR(r);
diff --git a/arch/um/os-Linux/main.c b/arch/um/os-Linux/main.c
index 8014dfac644d..c8a42ecbd7a2 100644
--- a/arch/um/os-Linux/main.c
+++ b/arch/um/os-Linux/main.c
@@ -170,7 +170,7 @@ int __init main(int argc, char **argv, char **envp)
* that they won't be delivered after the exec, when
* they are definitely not expected.
*/
- unblock_signals_trace();
+ unblock_signals();
os_info("\n");
/* Reboot */
diff --git a/arch/unicore32/include/asm/io.h b/arch/unicore32/include/asm/io.h
index c71aa4b95996..4b460e01acfa 100644
--- a/arch/unicore32/include/asm/io.h
+++ b/arch/unicore32/include/asm/io.h
@@ -18,10 +18,9 @@
#include <asm-generic/io.h>
/*
- * __uc32_ioremap and __uc32_ioremap_cached takes CPU physical address.
+ * __uc32_ioremap takes CPU physical address.
*/
extern void __iomem *__uc32_ioremap(unsigned long, size_t);
-extern void __iomem *__uc32_ioremap_cached(unsigned long, size_t);
extern void __uc32_iounmap(volatile void __iomem *addr);
/*
@@ -32,7 +31,6 @@ extern void __uc32_iounmap(volatile void __iomem *addr);
*
*/
#define ioremap(cookie, size) __uc32_ioremap(cookie, size)
-#define ioremap_cached(cookie, size) __uc32_ioremap_cached(cookie, size)
#define ioremap_nocache(cookie, size) __uc32_ioremap(cookie, size)
#define iounmap(cookie) __uc32_iounmap(cookie)
diff --git a/arch/unicore32/mm/ioremap.c b/arch/unicore32/mm/ioremap.c
index cf6d656f240c..46a64bd6156a 100644
--- a/arch/unicore32/mm/ioremap.c
+++ b/arch/unicore32/mm/ioremap.c
@@ -220,14 +220,6 @@ __uc32_ioremap(unsigned long phys_addr, size_t size)
}
EXPORT_SYMBOL(__uc32_ioremap);
-void __iomem *
-__uc32_ioremap_cached(unsigned long phys_addr, size_t size)
-{
- return __uc32_ioremap_caller(phys_addr, size, MT_DEVICE_CACHED,
- __builtin_return_address(0));
-}
-EXPORT_SYMBOL(__uc32_ioremap_cached);
-
void __uc32_iounmap(volatile void __iomem *io_addr)
{
void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 9c9bc348c412..5e8949953660 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -134,6 +134,7 @@ config X86
select HAVE_ARCH_JUMP_LABEL
select HAVE_ARCH_JUMP_LABEL_RELATIVE
select HAVE_ARCH_KASAN if X86_64
+ select HAVE_ARCH_KASAN_VMALLOC if X86_64
select HAVE_ARCH_KGDB
select HAVE_ARCH_MMAP_RND_BITS if MMU
select HAVE_ARCH_MMAP_RND_COMPAT_BITS if MMU && COMPAT
@@ -157,6 +158,7 @@ config X86
select HAVE_DMA_CONTIGUOUS
select HAVE_DYNAMIC_FTRACE
select HAVE_DYNAMIC_FTRACE_WITH_REGS
+ select HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
select HAVE_EBPF_JIT
select HAVE_EFFICIENT_UNALIGNED_ACCESS
select HAVE_EISA
@@ -707,7 +709,6 @@ config X86_SUPPORTS_MEMORY_FAILURE
config STA2X11
bool "STA2X11 Companion Chip Support"
depends on X86_32_NON_STANDARD && PCI
- select ARCH_HAS_PHYS_TO_DMA
select SWIOTLB
select MFD_STA2X11
select GPIOLIB
diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
index 409c00f74e60..c4eab8ed33a3 100644
--- a/arch/x86/Kconfig.debug
+++ b/arch/x86/Kconfig.debug
@@ -117,7 +117,7 @@ config DEBUG_WX
config DOUBLEFAULT
default y
- bool "Enable doublefault exception handler" if EXPERT
+ bool "Enable doublefault exception handler" if EXPERT && X86_32
---help---
This option allows trapping of rare doublefault exceptions that
would otherwise cause a system to silently reboot. Disabling this
diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
index 68945c5700bf..72b08fde6de6 100644
--- a/arch/x86/boot/compressed/eboot.c
+++ b/arch/x86/boot/compressed/eboot.c
@@ -554,7 +554,11 @@ setup_e820(struct boot_params *params, struct setup_data *e820ext, u32 e820ext_s
case EFI_BOOT_SERVICES_CODE:
case EFI_BOOT_SERVICES_DATA:
case EFI_CONVENTIONAL_MEMORY:
- e820_type = E820_TYPE_RAM;
+ if (efi_soft_reserve_enabled() &&
+ (d->attribute & EFI_MEMORY_SP))
+ e820_type = E820_TYPE_SOFT_RESERVED;
+ else
+ e820_type = E820_TYPE_RAM;
break;
case EFI_ACPI_MEMORY_NVS:
diff --git a/arch/x86/boot/compressed/kaslr.c b/arch/x86/boot/compressed/kaslr.c
index bb9bfef174ae..d7408af55738 100644
--- a/arch/x86/boot/compressed/kaslr.c
+++ b/arch/x86/boot/compressed/kaslr.c
@@ -132,8 +132,14 @@ char *skip_spaces(const char *str)
#include "../../../../lib/ctype.c"
#include "../../../../lib/cmdline.c"
+enum parse_mode {
+ PARSE_MEMMAP,
+ PARSE_EFI,
+};
+
static int
-parse_memmap(char *p, unsigned long long *start, unsigned long long *size)
+parse_memmap(char *p, unsigned long long *start, unsigned long long *size,
+ enum parse_mode mode)
{
char *oldp;
@@ -156,8 +162,29 @@ parse_memmap(char *p, unsigned long long *start, unsigned long long *size)
*start = memparse(p + 1, &p);
return 0;
case '@':
- /* memmap=nn@ss specifies usable region, should be skipped */
- *size = 0;
+ if (mode == PARSE_MEMMAP) {
+ /*
+ * memmap=nn@ss specifies usable region, should
+ * be skipped
+ */
+ *size = 0;
+ } else {
+ unsigned long long flags;
+
+ /*
+ * efi_fake_mem=nn@ss:attr the attr specifies
+ * flags that might imply a soft-reservation.
+ */
+ *start = memparse(p + 1, &p);
+ if (p && *p == ':') {
+ p++;
+ if (kstrtoull(p, 0, &flags) < 0)
+ *size = 0;
+ else if (flags & EFI_MEMORY_SP)
+ return 0;
+ }
+ *size = 0;
+ }
/* Fall through */
default:
/*
@@ -172,7 +199,7 @@ parse_memmap(char *p, unsigned long long *start, unsigned long long *size)
return -EINVAL;
}
-static void mem_avoid_memmap(char *str)
+static void mem_avoid_memmap(enum parse_mode mode, char *str)
{
static int i;
@@ -187,7 +214,7 @@ static void mem_avoid_memmap(char *str)
if (k)
*k++ = 0;
- rc = parse_memmap(str, &start, &size);
+ rc = parse_memmap(str, &start, &size, mode);
if (rc < 0)
break;
str = k;
@@ -238,7 +265,6 @@ static void parse_gb_huge_pages(char *param, char *val)
}
}
-
static void handle_mem_options(void)
{
char *args = (char *)get_cmd_line_ptr();
@@ -271,7 +297,7 @@ static void handle_mem_options(void)
}
if (!strcmp(param, "memmap")) {
- mem_avoid_memmap(val);
+ mem_avoid_memmap(PARSE_MEMMAP, val);
} else if (strstr(param, "hugepages")) {
parse_gb_huge_pages(param, val);
} else if (!strcmp(param, "mem")) {
@@ -284,6 +310,8 @@ static void handle_mem_options(void)
goto out;
mem_limit = mem_size;
+ } else if (!strcmp(param, "efi_fake_mem")) {
+ mem_avoid_memmap(PARSE_EFI, val);
}
}
@@ -772,6 +800,10 @@ process_efi_entries(unsigned long minimum, unsigned long image_size)
if (md->type != EFI_CONVENTIONAL_MEMORY)
continue;
+ if (efi_soft_reserve_enabled() &&
+ (md->attribute & EFI_MEMORY_SP))
+ continue;
+
if (efi_mirror_found &&
!(md->attribute & EFI_MEMORY_MORE_RELIABLE))
continue;
diff --git a/arch/x86/crypto/blake2s-glue.c b/arch/x86/crypto/blake2s-glue.c
index 4a37ba7cdbe5..1d9ff8a45e1f 100644
--- a/arch/x86/crypto/blake2s-glue.c
+++ b/arch/x86/crypto/blake2s-glue.c
@@ -210,12 +210,14 @@ static int __init blake2s_mod_init(void)
XFEATURE_MASK_AVX512, NULL))
static_branch_enable(&blake2s_use_avx512);
- return crypto_register_shashes(blake2s_algs, ARRAY_SIZE(blake2s_algs));
+ return IS_REACHABLE(CONFIG_CRYPTO_HASH) ?
+ crypto_register_shashes(blake2s_algs,
+ ARRAY_SIZE(blake2s_algs)) : 0;
}
static void __exit blake2s_mod_exit(void)
{
- if (boot_cpu_has(X86_FEATURE_SSSE3))
+ if (IS_REACHABLE(CONFIG_CRYPTO_HASH) && boot_cpu_has(X86_FEATURE_SSSE3))
crypto_unregister_shashes(blake2s_algs, ARRAY_SIZE(blake2s_algs));
}
diff --git a/arch/x86/crypto/chacha_glue.c b/arch/x86/crypto/chacha_glue.c
index a94e30b6f941..68a74953efaf 100644
--- a/arch/x86/crypto/chacha_glue.c
+++ b/arch/x86/crypto/chacha_glue.c
@@ -299,12 +299,13 @@ static int __init chacha_simd_mod_init(void)
boot_cpu_has(X86_FEATURE_AVX512BW)) /* kmovq */
static_branch_enable(&chacha_use_avx512vl);
}
- return crypto_register_skciphers(algs, ARRAY_SIZE(algs));
+ return IS_REACHABLE(CONFIG_CRYPTO_SKCIPHER) ?
+ crypto_register_skciphers(algs, ARRAY_SIZE(algs)) : 0;
}
static void __exit chacha_simd_mod_fini(void)
{
- if (boot_cpu_has(X86_FEATURE_SSSE3))
+ if (IS_REACHABLE(CONFIG_CRYPTO_SKCIPHER) && boot_cpu_has(X86_FEATURE_SSSE3))
crypto_unregister_skciphers(algs, ARRAY_SIZE(algs));
}
diff --git a/arch/x86/crypto/curve25519-x86_64.c b/arch/x86/crypto/curve25519-x86_64.c
index a52a3fb15727..eec7d2d24239 100644
--- a/arch/x86/crypto/curve25519-x86_64.c
+++ b/arch/x86/crypto/curve25519-x86_64.c
@@ -2457,13 +2457,14 @@ static int __init curve25519_mod_init(void)
static_branch_enable(&curve25519_use_adx);
else
return 0;
- return crypto_register_kpp(&curve25519_alg);
+ return IS_REACHABLE(CONFIG_CRYPTO_KPP) ?
+ crypto_register_kpp(&curve25519_alg) : 0;
}
static void __exit curve25519_mod_exit(void)
{
- if (boot_cpu_has(X86_FEATURE_BMI2) ||
- boot_cpu_has(X86_FEATURE_ADX))
+ if (IS_REACHABLE(CONFIG_CRYPTO_KPP) &&
+ (boot_cpu_has(X86_FEATURE_BMI2) || boot_cpu_has(X86_FEATURE_ADX)))
crypto_unregister_kpp(&curve25519_alg);
}
diff --git a/arch/x86/crypto/poly1305_glue.c b/arch/x86/crypto/poly1305_glue.c
index 370cd88068ec..0cc4537e6617 100644
--- a/arch/x86/crypto/poly1305_glue.c
+++ b/arch/x86/crypto/poly1305_glue.c
@@ -224,12 +224,13 @@ static int __init poly1305_simd_mod_init(void)
cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL))
static_branch_enable(&poly1305_use_avx2);
- return crypto_register_shash(&alg);
+ return IS_REACHABLE(CONFIG_CRYPTO_HASH) ? crypto_register_shash(&alg) : 0;
}
static void __exit poly1305_simd_mod_exit(void)
{
- crypto_unregister_shash(&alg);
+ if (IS_REACHABLE(CONFIG_CRYPTO_HASH))
+ crypto_unregister_shash(&alg);
}
module_init(poly1305_simd_mod_init);
diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
index 5832b11f01bb..7e0560442538 100644
--- a/arch/x86/entry/entry_32.S
+++ b/arch/x86/entry/entry_32.S
@@ -1090,7 +1090,6 @@ SYM_FUNC_START(entry_INT80_32)
restore_all:
TRACE_IRQS_IRET
SWITCH_TO_ENTRY_STACK
-.Lrestore_all_notrace:
CHECK_AND_APPLY_ESPFIX
.Lrestore_nocheck:
/* Switch back to user CR3 */
@@ -1537,6 +1536,48 @@ SYM_CODE_START(debug)
jmp common_exception
SYM_CODE_END(debug)
+#ifdef CONFIG_DOUBLEFAULT
+SYM_CODE_START(double_fault)
+1:
+ /*
+ * This is a task gate handler, not an interrupt gate handler.
+ * The error code is on the stack, but the stack is otherwise
+ * empty. Interrupts are off. Our state is sane with the following
+ * exceptions:
+ *
+ * - CR0.TS is set. "TS" literally means "task switched".
+ * - EFLAGS.NT is set because we're a "nested task".
+ * - The doublefault TSS has back_link set and has been marked busy.
+ * - TR points to the doublefault TSS and the normal TSS is busy.
+ * - CR3 is the normal kernel PGD. This would be delightful, except
+ * that the CPU didn't bother to save the old CR3 anywhere. This
+ * would make it very awkward to return back to the context we came
+ * from.
+ *
+ * The rest of EFLAGS is sanitized for us, so we don't need to
+ * worry about AC or DF.
+ *
+ * Don't even bother popping the error code. It's always zero,
+ * and ignoring it makes us a bit more robust against buggy
+ * hypervisor task gate implementations.
+ *
+ * We will manually undo the task switch instead of doing a
+ * task-switching IRET.
+ */
+
+ clts /* clear CR0.TS */
+ pushl $X86_EFLAGS_FIXED
+ popfl /* clear EFLAGS.NT */
+
+ call doublefault_shim
+
+ /* We don't support returning, so we have no IRET here. */
+1:
+ hlt
+ jmp 1b
+SYM_CODE_END(double_fault)
+#endif
+
/*
* NMI is doubly nasty. It can happen on the first instruction of
* entry_SYSENTER_32 (just like #DB), but it can also interrupt the beginning
diff --git a/arch/x86/entry/vdso/vclock_gettime.c b/arch/x86/entry/vdso/vclock_gettime.c
index d9ff616bb0f6..7d70935b6758 100644
--- a/arch/x86/entry/vdso/vclock_gettime.c
+++ b/arch/x86/entry/vdso/vclock_gettime.c
@@ -15,7 +15,7 @@
#include "../../../../lib/vdso/gettimeofday.c"
extern int __vdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz);
-extern time_t __vdso_time(time_t *t);
+extern __kernel_old_time_t __vdso_time(__kernel_old_time_t *t);
int __vdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz)
{
@@ -25,12 +25,12 @@ int __vdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz)
int gettimeofday(struct __kernel_old_timeval *, struct timezone *)
__attribute__((weak, alias("__vdso_gettimeofday")));
-time_t __vdso_time(time_t *t)
+__kernel_old_time_t __vdso_time(__kernel_old_time_t *t)
{
return __cvdso_time(t);
}
-time_t time(time_t *t) __attribute__((weak, alias("__vdso_time")));
+__kernel_old_time_t time(__kernel_old_time_t *t) __attribute__((weak, alias("__vdso_time")));
#if defined(CONFIG_X86_64) && !defined(BUILD_VDSO32_64)
diff --git a/arch/x86/entry/vsyscall/vsyscall_64.c b/arch/x86/entry/vsyscall/vsyscall_64.c
index e7c596dea947..44c33103a955 100644
--- a/arch/x86/entry/vsyscall/vsyscall_64.c
+++ b/arch/x86/entry/vsyscall/vsyscall_64.c
@@ -184,7 +184,7 @@ bool emulate_vsyscall(unsigned long error_code,
*/
switch (vsyscall_nr) {
case 0:
- if (!write_ok_or_segv(regs->di, sizeof(struct timeval)) ||
+ if (!write_ok_or_segv(regs->di, sizeof(struct __kernel_old_timeval)) ||
!write_ok_or_segv(regs->si, sizeof(struct timezone))) {
ret = -EFAULT;
goto check_fault;
@@ -194,7 +194,7 @@ bool emulate_vsyscall(unsigned long error_code,
break;
case 1:
- if (!write_ok_or_segv(regs->di, sizeof(time_t))) {
+ if (!write_ok_or_segv(regs->di, sizeof(__kernel_old_time_t))) {
ret = -EFAULT;
goto check_fault;
}
@@ -222,7 +222,7 @@ bool emulate_vsyscall(unsigned long error_code,
*/
regs->orig_ax = syscall_nr;
regs->ax = -ENOSYS;
- tmp = secure_computing(NULL);
+ tmp = secure_computing();
if ((!tmp && regs->orig_ax != syscall_nr) || regs->ip != address) {
warn_bad_vsyscall(KERN_DEBUG, regs,
"seccomp tried to change syscall nr or ip");
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index 6e3f0c18908e..9a89d98c55bd 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -49,6 +49,7 @@ DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
.enabled = 1,
};
+DEFINE_STATIC_KEY_FALSE(rdpmc_never_available_key);
DEFINE_STATIC_KEY_FALSE(rdpmc_always_available_key);
u64 __read_mostly hw_cache_event_ids
@@ -2181,21 +2182,26 @@ static ssize_t set_attr_rdpmc(struct device *cdev,
if (x86_pmu.attr_rdpmc_broken)
return -ENOTSUPP;
- if ((val == 2) != (x86_pmu.attr_rdpmc == 2)) {
+ if (val != x86_pmu.attr_rdpmc) {
/*
- * Changing into or out of always available, aka
- * perf-event-bypassing mode. This path is extremely slow,
+ * Changing into or out of never available or always available,
+ * aka perf-event-bypassing mode. This path is extremely slow,
* but only root can trigger it, so it's okay.
*/
+ if (val == 0)
+ static_branch_inc(&rdpmc_never_available_key);
+ else if (x86_pmu.attr_rdpmc == 0)
+ static_branch_dec(&rdpmc_never_available_key);
+
if (val == 2)
static_branch_inc(&rdpmc_always_available_key);
- else
+ else if (x86_pmu.attr_rdpmc == 2)
static_branch_dec(&rdpmc_always_available_key);
+
on_each_cpu(refresh_pce, NULL, 1);
+ x86_pmu.attr_rdpmc = val;
}
- x86_pmu.attr_rdpmc = val;
-
return count;
}
diff --git a/arch/x86/hyperv/hv_init.c b/arch/x86/hyperv/hv_init.c
index 50ff030d9224..caaf4dce99bf 100644
--- a/arch/x86/hyperv/hv_init.c
+++ b/arch/x86/hyperv/hv_init.c
@@ -7,6 +7,7 @@
* Author : K. Y. Srinivasan <kys@microsoft.com>
*/
+#include <linux/acpi.h>
#include <linux/efi.h>
#include <linux/types.h>
#include <asm/apic.h>
@@ -45,6 +46,14 @@ void *hv_alloc_hyperv_page(void)
}
EXPORT_SYMBOL_GPL(hv_alloc_hyperv_page);
+void *hv_alloc_hyperv_zeroed_page(void)
+{
+ BUILD_BUG_ON(PAGE_SIZE != HV_HYP_PAGE_SIZE);
+
+ return (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
+}
+EXPORT_SYMBOL_GPL(hv_alloc_hyperv_zeroed_page);
+
void hv_free_hyperv_page(unsigned long addr)
{
free_page(addr);
@@ -437,3 +446,9 @@ bool hv_is_hyperv_initialized(void)
return hypercall_msr.enable;
}
EXPORT_SYMBOL_GPL(hv_is_hyperv_initialized);
+
+bool hv_is_hibernation_supported(void)
+{
+ return acpi_sleep_state_supported(ACPI_STATE_S4);
+}
+EXPORT_SYMBOL_GPL(hv_is_hibernation_supported);
diff --git a/arch/x86/include/asm/cpu_entry_area.h b/arch/x86/include/asm/cpu_entry_area.h
index ea866c7bf31d..804734058c77 100644
--- a/arch/x86/include/asm/cpu_entry_area.h
+++ b/arch/x86/include/asm/cpu_entry_area.h
@@ -65,6 +65,13 @@ enum exception_stack_ordering {
#endif
+#ifdef CONFIG_X86_32
+struct doublefault_stack {
+ unsigned long stack[(PAGE_SIZE - sizeof(struct x86_hw_tss)) / sizeof(unsigned long)];
+ struct x86_hw_tss tss;
+} __aligned(PAGE_SIZE);
+#endif
+
/*
* cpu_entry_area is a percpu region that contains things needed by the CPU
* and early entry/exit code. Real types aren't used for all fields here
@@ -86,6 +93,11 @@ struct cpu_entry_area {
#endif
struct entry_stack_page entry_stack_page;
+#ifdef CONFIG_X86_32
+ char guard_doublefault_stack[PAGE_SIZE];
+ struct doublefault_stack doublefault_stack;
+#endif
+
/*
* On x86_64, the TSS is mapped RO. On x86_32, it's mapped RW because
* we need task switches to work, and task switches write to the TSS.
diff --git a/arch/x86/include/asm/device.h b/arch/x86/include/asm/device.h
index a8f6c809d9b1..5e12c63b47aa 100644
--- a/arch/x86/include/asm/device.h
+++ b/arch/x86/include/asm/device.h
@@ -6,9 +6,6 @@ struct dev_archdata {
#if defined(CONFIG_INTEL_IOMMU) || defined(CONFIG_AMD_IOMMU)
void *iommu; /* hook for IOMMU specific extension */
#endif
-#ifdef CONFIG_STA2X11
- bool is_sta2x11;
-#endif
};
#if defined(CONFIG_X86_DEV_DMA_OPS) && defined(CONFIG_PCI_DOMAINS)
diff --git a/arch/x86/include/asm/dma-direct.h b/arch/x86/include/asm/dma-direct.h
deleted file mode 100644
index 1a19251eaac9..000000000000
--- a/arch/x86/include/asm/dma-direct.h
+++ /dev/null
@@ -1,9 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef ASM_X86_DMA_DIRECT_H
-#define ASM_X86_DMA_DIRECT_H 1
-
-bool dma_capable(struct device *dev, dma_addr_t addr, size_t size);
-dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr);
-phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t daddr);
-
-#endif /* ASM_X86_DMA_DIRECT_H */
diff --git a/arch/x86/include/asm/doublefault.h b/arch/x86/include/asm/doublefault.h
new file mode 100644
index 000000000000..af9a14ac8962
--- /dev/null
+++ b/arch/x86/include/asm/doublefault.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_X86_DOUBLEFAULT_H
+#define _ASM_X86_DOUBLEFAULT_H
+
+#if defined(CONFIG_X86_32) && defined(CONFIG_DOUBLEFAULT)
+extern void doublefault_init_cpu_tss(void);
+#else
+static inline void doublefault_init_cpu_tss(void)
+{
+}
+#endif
+
+#endif /* _ASM_X86_DOUBLEFAULT_H */
diff --git a/arch/x86/include/asm/e820/types.h b/arch/x86/include/asm/e820/types.h
index c3aa4b5e49e2..314f75d886d0 100644
--- a/arch/x86/include/asm/e820/types.h
+++ b/arch/x86/include/asm/e820/types.h
@@ -29,6 +29,14 @@ enum e820_type {
E820_TYPE_PRAM = 12,
/*
+ * Special-purpose memory is indicated to the system via the
+ * EFI_MEMORY_SP attribute. Define an e820 translation of this
+ * memory type for the purpose of reserving this range and
+ * marking it with the IORES_DESC_SOFT_RESERVED designation.
+ */
+ E820_TYPE_SOFT_RESERVED = 0xefffffff,
+
+ /*
* Reserved RAM used by the kernel itself if
* CONFIG_INTEL_TXT=y is enabled, memory of this type
* will be included in the S3 integrity calculation
diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h
index 43a82e59c59d..d028e9acdf1c 100644
--- a/arch/x86/include/asm/efi.h
+++ b/arch/x86/include/asm/efi.h
@@ -140,7 +140,6 @@ extern void efi_delete_dummy_variable(void);
extern void efi_switch_mm(struct mm_struct *mm);
extern void efi_recover_from_page_fault(unsigned long phys_addr);
extern void efi_free_boot_services(void);
-extern void efi_reserve_boot_services(void);
struct efi_setup_data {
u64 fw_vendor;
@@ -244,6 +243,8 @@ static inline bool efi_is_64bit(void)
extern bool efi_reboot_required(void);
extern bool efi_is_table_address(unsigned long phys_addr);
+extern void efi_find_mirror(void);
+extern void efi_reserve_boot_services(void);
#else
static inline void parse_efi_setup(u64 phys_addr, u32 data_len) {}
static inline bool efi_reboot_required(void)
@@ -254,6 +255,20 @@ static inline bool efi_is_table_address(unsigned long phys_addr)
{
return false;
}
+static inline void efi_find_mirror(void)
+{
+}
+static inline void efi_reserve_boot_services(void)
+{
+}
#endif /* CONFIG_EFI */
+#ifdef CONFIG_EFI_FAKE_MEMMAP
+extern void __init efi_fake_memmap_early(void);
+#else
+static inline void efi_fake_memmap_early(void)
+{
+}
+#endif
+
#endif /* _ASM_X86_EFI_H */
diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h
index 4c95c365058a..44c48e34d799 100644
--- a/arch/x86/include/asm/fpu/internal.h
+++ b/arch/x86/include/asm/fpu/internal.h
@@ -509,7 +509,7 @@ static inline void __fpu_invalidate_fpregs_state(struct fpu *fpu)
static inline int fpregs_state_valid(struct fpu *fpu, unsigned int cpu)
{
- return fpu == this_cpu_read_stable(fpu_fpregs_owner_ctx) && cpu == fpu->last_cpu;
+ return fpu == this_cpu_read(fpu_fpregs_owner_ctx) && cpu == fpu->last_cpu;
}
/*
diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h
index c38a66661576..c2a7458f912c 100644
--- a/arch/x86/include/asm/ftrace.h
+++ b/arch/x86/include/asm/ftrace.h
@@ -28,6 +28,19 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr)
return addr;
}
+/*
+ * When a ftrace registered caller is tracing a function that is
+ * also set by a register_ftrace_direct() call, it needs to be
+ * differentiated in the ftrace_caller trampoline. To do this, we
+ * place the direct caller in the ORIG_AX part of pt_regs. This
+ * tells the ftrace_caller that there's a direct caller.
+ */
+static inline void arch_ftrace_set_direct_caller(struct pt_regs *regs, unsigned long addr)
+{
+ /* Emulate a call */
+ regs->orig_ax = addr;
+}
+
#ifdef CONFIG_DYNAMIC_FTRACE
struct dyn_arch_ftrace {
diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
index 6bed97ff6db2..9997521fc5cd 100644
--- a/arch/x86/include/asm/io.h
+++ b/arch/x86/include/asm/io.h
@@ -180,8 +180,6 @@ static inline unsigned int isa_virt_to_bus(volatile void *address)
* The default ioremap() behavior is non-cached; if you need something
* else, you probably want one of the following.
*/
-extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size);
-#define ioremap_nocache ioremap_nocache
extern void __iomem *ioremap_uc(resource_size_t offset, unsigned long size);
#define ioremap_uc ioremap_uc
extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size);
@@ -205,10 +203,7 @@ extern void __iomem *ioremap_encrypted(resource_size_t phys_addr, unsigned long
* If the area you are trying to map is a PCI BAR you should have a
* look at pci_iomap().
*/
-static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
-{
- return ioremap_nocache(offset, size);
-}
+void __iomem *ioremap(resource_size_t offset, unsigned long size);
#define ioremap ioremap
extern void iounmap(volatile void __iomem *addr);
diff --git a/arch/x86/include/asm/iommu.h b/arch/x86/include/asm/iommu.h
index b91623d521d9..bf1ed2ddc74b 100644
--- a/arch/x86/include/asm/iommu.h
+++ b/arch/x86/include/asm/iommu.h
@@ -2,10 +2,28 @@
#ifndef _ASM_X86_IOMMU_H
#define _ASM_X86_IOMMU_H
+#include <linux/acpi.h>
+
+#include <asm/e820/api.h>
+
extern int force_iommu, no_iommu;
extern int iommu_detected;
/* 10 seconds */
#define DMAR_OPERATION_TIMEOUT ((cycles_t) tsc_khz*10*1000)
+static inline int __init
+arch_rmrr_sanity_check(struct acpi_dmar_reserved_memory *rmrr)
+{
+ u64 start = rmrr->base_address;
+ u64 end = rmrr->end_address + 1;
+
+ if (e820__mapped_all(start, end, E820_TYPE_RESERVED))
+ return 0;
+
+ pr_err(FW_BUG "No firmware reserved region can cover this RMRR [%#018Lx-%#018Lx], contact BIOS vendor for fixes\n",
+ start, end - 1);
+ return -EINVAL;
+}
+
#endif /* _ASM_X86_IOMMU_H */
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
index 16ae821483c8..5f33924e200f 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -26,12 +26,14 @@ static inline void paravirt_activate_mm(struct mm_struct *prev,
#ifdef CONFIG_PERF_EVENTS
+DECLARE_STATIC_KEY_FALSE(rdpmc_never_available_key);
DECLARE_STATIC_KEY_FALSE(rdpmc_always_available_key);
static inline void load_mm_cr4_irqsoff(struct mm_struct *mm)
{
if (static_branch_unlikely(&rdpmc_always_available_key) ||
- atomic_read(&mm->context.perf_rdpmc_allowed))
+ (!static_branch_unlikely(&rdpmc_never_available_key) &&
+ atomic_read(&mm->context.perf_rdpmc_allowed)))
cr4_set_bits_irqsoff(X86_CR4_PCE);
else
cr4_clear_bits_irqsoff(X86_CR4_PCE);
diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h
index f4138aeb4280..6b79515abb82 100644
--- a/arch/x86/include/asm/mshyperv.h
+++ b/arch/x86/include/asm/mshyperv.h
@@ -219,6 +219,7 @@ static inline struct hv_vp_assist_page *hv_get_vp_assist_page(unsigned int cpu)
void __init hyperv_init(void);
void hyperv_setup_mmu_ops(void);
void *hv_alloc_hyperv_page(void);
+void *hv_alloc_hyperv_zeroed_page(void);
void hv_free_hyperv_page(unsigned long addr);
void hyperv_reenlightenment_intr(struct pt_regs *regs);
void set_hv_tscchange_cb(void (*cb)(void));
diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
index 19f5807260c3..0416d42e5bdd 100644
--- a/arch/x86/include/asm/pgtable_32_types.h
+++ b/arch/x86/include/asm/pgtable_32_types.h
@@ -41,10 +41,11 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
#endif
/*
- * Define this here and validate with BUILD_BUG_ON() in pgtable_32.c
- * to avoid include recursion hell
+ * This is an upper bound on sizeof(struct cpu_entry_area) / PAGE_SIZE.
+ * Define this here and validate with BUILD_BUG_ON() in cpu_entry_area.c
+ * to avoid include recursion hell.
*/
-#define CPU_ENTRY_AREA_PAGES (NR_CPUS * 41)
+#define CPU_ENTRY_AREA_PAGES (NR_CPUS * 43)
/* The +1 is for the readonly IDT page: */
#define CPU_ENTRY_AREA_BASE \
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index e51afbb0cbfb..0340aad3f2fc 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -166,7 +166,6 @@ enum cpuid_regs_idx {
extern struct cpuinfo_x86 boot_cpu_data;
extern struct cpuinfo_x86 new_cpu_data;
-extern struct x86_hw_tss doublefault_tss;
extern __u32 cpu_caps_cleared[NCAPINTS + NBUGINTS];
extern __u32 cpu_caps_set[NCAPINTS + NBUGINTS];
@@ -997,7 +996,6 @@ bool xen_set_default_idle(void);
#endif
void stop_this_cpu(void *dummy);
-void df_debug(struct pt_regs *regs, long error_code);
void microcode_check(void);
enum l1tf_mitigations {
diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h
index b25e633033c3..ffa0dc8a535e 100644
--- a/arch/x86/include/asm/traps.h
+++ b/arch/x86/include/asm/traps.h
@@ -69,6 +69,9 @@ dotraplinkage void do_overflow(struct pt_regs *regs, long error_code);
dotraplinkage void do_bounds(struct pt_regs *regs, long error_code);
dotraplinkage void do_invalid_op(struct pt_regs *regs, long error_code);
dotraplinkage void do_device_not_available(struct pt_regs *regs, long error_code);
+#if defined(CONFIG_X86_64) || defined(CONFIG_DOUBLEFAULT)
+dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code, unsigned long cr2);
+#endif
dotraplinkage void do_coprocessor_segment_overrun(struct pt_regs *regs, long error_code);
dotraplinkage void do_invalid_TSS(struct pt_regs *regs, long error_code);
dotraplinkage void do_segment_not_present(struct pt_regs *regs, long error_code);
diff --git a/arch/x86/include/asm/unwind_hints.h b/arch/x86/include/asm/unwind_hints.h
index 0bcdb1279361..f5e2eb12cb71 100644
--- a/arch/x86/include/asm/unwind_hints.h
+++ b/arch/x86/include/asm/unwind_hints.h
@@ -86,6 +86,14 @@
UNWIND_HINT sp_offset=\sp_offset
.endm
+.macro UNWIND_HINT_SAVE
+ UNWIND_HINT type=UNWIND_HINT_TYPE_SAVE
+.endm
+
+.macro UNWIND_HINT_RESTORE
+ UNWIND_HINT type=UNWIND_HINT_TYPE_RESTORE
+.endm
+
#else /* !__ASSEMBLY__ */
#define UNWIND_HINT(sp_reg, sp_offset, type, end) \
diff --git a/arch/x86/include/uapi/asm/msgbuf.h b/arch/x86/include/uapi/asm/msgbuf.h
index 90ab9a795b49..7c5bb43ed8af 100644
--- a/arch/x86/include/uapi/asm/msgbuf.h
+++ b/arch/x86/include/uapi/asm/msgbuf.h
@@ -15,9 +15,9 @@
struct msqid64_ds {
struct ipc64_perm msg_perm;
- __kernel_time_t msg_stime; /* last msgsnd time */
- __kernel_time_t msg_rtime; /* last msgrcv time */
- __kernel_time_t msg_ctime; /* last change time */
+ __kernel_long_t msg_stime; /* last msgsnd time */
+ __kernel_long_t msg_rtime; /* last msgrcv time */
+ __kernel_long_t msg_ctime; /* last change time */
__kernel_ulong_t msg_cbytes; /* current number of bytes on queue */
__kernel_ulong_t msg_qnum; /* number of messages in queue */
__kernel_ulong_t msg_qbytes; /* max number of bytes on queue */
diff --git a/arch/x86/include/uapi/asm/sembuf.h b/arch/x86/include/uapi/asm/sembuf.h
index 89de6cd9f0a7..93030e97269a 100644
--- a/arch/x86/include/uapi/asm/sembuf.h
+++ b/arch/x86/include/uapi/asm/sembuf.h
@@ -21,9 +21,9 @@ struct semid64_ds {
unsigned long sem_ctime; /* last change time */
unsigned long sem_ctime_high;
#else
- __kernel_time_t sem_otime; /* last semop time */
+ __kernel_long_t sem_otime; /* last semop time */
__kernel_ulong_t __unused1;
- __kernel_time_t sem_ctime; /* last change time */
+ __kernel_long_t sem_ctime; /* last change time */
__kernel_ulong_t __unused2;
#endif
__kernel_ulong_t sem_nsems; /* no. of semaphores in array */
diff --git a/arch/x86/include/uapi/asm/shmbuf.h b/arch/x86/include/uapi/asm/shmbuf.h
index 644421f3823b..f0305dc660c9 100644
--- a/arch/x86/include/uapi/asm/shmbuf.h
+++ b/arch/x86/include/uapi/asm/shmbuf.h
@@ -16,9 +16,9 @@
struct shmid64_ds {
struct ipc64_perm shm_perm; /* operation perms */
size_t shm_segsz; /* size of segment (bytes) */
- __kernel_time_t shm_atime; /* last attach time */
- __kernel_time_t shm_dtime; /* last detach time */
- __kernel_time_t shm_ctime; /* last change time */
+ __kernel_long_t shm_atime; /* last attach time */
+ __kernel_long_t shm_dtime; /* last detach time */
+ __kernel_long_t shm_ctime; /* last change time */
__kernel_pid_t shm_cpid; /* pid of creator */
__kernel_pid_t shm_lpid; /* pid of last operator */
__kernel_ulong_t shm_nattch; /* no. of current attaches */
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 32acb970f416..6175e370ee4a 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -100,7 +100,9 @@ obj-$(CONFIG_KEXEC_FILE) += kexec-bzimage64.o
obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o
obj-y += kprobes/
obj-$(CONFIG_MODULES) += module.o
-obj-$(CONFIG_DOUBLEFAULT) += doublefault.o
+ifeq ($(CONFIG_X86_32),y)
+obj-$(CONFIG_DOUBLEFAULT) += doublefault_32.o
+endif
obj-$(CONFIG_KGDB) += kgdb.o
obj-$(CONFIG_VM86) += vm86_32.o
obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
diff --git a/arch/x86/kernel/amd_gart_64.c b/arch/x86/kernel/amd_gart_64.c
index 4bbccb9d16dc..4e5f50236048 100644
--- a/arch/x86/kernel/amd_gart_64.c
+++ b/arch/x86/kernel/amd_gart_64.c
@@ -185,13 +185,13 @@ static void iommu_full(struct device *dev, size_t size, int dir)
static inline int
need_iommu(struct device *dev, unsigned long addr, size_t size)
{
- return force_iommu || !dma_capable(dev, addr, size);
+ return force_iommu || !dma_capable(dev, addr, size, true);
}
static inline int
nonforced_iommu(struct device *dev, unsigned long addr, size_t size)
{
- return !dma_capable(dev, addr, size);
+ return !dma_capable(dev, addr, size, true);
}
/* Map a single continuous physical area into the IOMMU.
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index baa2fed8deb6..2e4d90294fe6 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -24,6 +24,7 @@
#include <asm/stackprotector.h>
#include <asm/perf_event.h>
#include <asm/mmu_context.h>
+#include <asm/doublefault.h>
#include <asm/archrandom.h>
#include <asm/hypervisor.h>
#include <asm/processor.h>
@@ -1814,8 +1815,6 @@ static inline void tss_setup_ist(struct tss_struct *tss)
tss->x86_tss.ist[IST_INDEX_MCE] = __this_cpu_ist_top_va(MCE);
}
-static inline void gdt_setup_doublefault_tss(int cpu) { }
-
#else /* CONFIG_X86_64 */
static inline void setup_getcpu(int cpu) { }
@@ -1827,13 +1826,6 @@ static inline void ucode_cpu_init(int cpu)
static inline void tss_setup_ist(struct tss_struct *tss) { }
-static inline void gdt_setup_doublefault_tss(int cpu)
-{
-#ifdef CONFIG_DOUBLEFAULT
- /* Set up the doublefault TSS pointer in the GDT */
- __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss);
-#endif
-}
#endif /* !CONFIG_X86_64 */
static inline void tss_setup_io_bitmap(struct tss_struct *tss)
@@ -1923,7 +1915,7 @@ void cpu_init(void)
clear_all_debug_regs();
dbg_restore_debug_regs();
- gdt_setup_doublefault_tss(cpu);
+ doublefault_init_cpu_tss();
fpu__init_cpu();
diff --git a/arch/x86/kernel/cpu/mce/therm_throt.c b/arch/x86/kernel/cpu/mce/therm_throt.c
index d01e0da0163a..b38010b541d6 100644
--- a/arch/x86/kernel/cpu/mce/therm_throt.c
+++ b/arch/x86/kernel/cpu/mce/therm_throt.c
@@ -195,17 +195,24 @@ static const struct attribute_group thermal_attr_group = {
#define THERM_THROT_POLL_INTERVAL HZ
#define THERM_STATUS_PROCHOT_LOG BIT(1)
+#define THERM_STATUS_CLEAR_CORE_MASK (BIT(1) | BIT(3) | BIT(5) | BIT(7) | BIT(9) | BIT(11) | BIT(13) | BIT(15))
+#define THERM_STATUS_CLEAR_PKG_MASK (BIT(1) | BIT(3) | BIT(5) | BIT(7) | BIT(9) | BIT(11))
+
static void clear_therm_status_log(int level)
{
int msr;
- u64 msr_val;
+ u64 mask, msr_val;
- if (level == CORE_LEVEL)
- msr = MSR_IA32_THERM_STATUS;
- else
- msr = MSR_IA32_PACKAGE_THERM_STATUS;
+ if (level == CORE_LEVEL) {
+ msr = MSR_IA32_THERM_STATUS;
+ mask = THERM_STATUS_CLEAR_CORE_MASK;
+ } else {
+ msr = MSR_IA32_PACKAGE_THERM_STATUS;
+ mask = THERM_STATUS_CLEAR_PKG_MASK;
+ }
rdmsrl(msr, msr_val);
+ msr_val &= mask;
wrmsrl(msr, msr_val & ~THERM_STATUS_PROCHOT_LOG);
}
diff --git a/arch/x86/kernel/doublefault.c b/arch/x86/kernel/doublefault.c
deleted file mode 100644
index 0d6c657593f8..000000000000
--- a/arch/x86/kernel/doublefault.c
+++ /dev/null
@@ -1,86 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/mm.h>
-#include <linux/sched.h>
-#include <linux/sched/debug.h>
-#include <linux/init_task.h>
-#include <linux/fs.h>
-
-#include <linux/uaccess.h>
-#include <asm/pgtable.h>
-#include <asm/processor.h>
-#include <asm/desc.h>
-
-#ifdef CONFIG_X86_32
-
-#define DOUBLEFAULT_STACKSIZE (1024)
-static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
-
-#define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
-
-static void doublefault_fn(void)
-{
- struct desc_ptr gdt_desc = {0, 0};
- unsigned long gdt, tss;
-
- native_store_gdt(&gdt_desc);
- gdt = gdt_desc.address;
-
- printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
-
- if (ptr_ok(gdt)) {
- gdt += GDT_ENTRY_TSS << 3;
- tss = get_desc_base((struct desc_struct *)gdt);
- printk(KERN_EMERG "double fault, tss at %08lx\n", tss);
-
- if (ptr_ok(tss)) {
- struct x86_hw_tss *t = (struct x86_hw_tss *)tss;
-
- printk(KERN_EMERG "eip = %08lx, esp = %08lx\n",
- t->ip, t->sp);
-
- printk(KERN_EMERG "eax = %08lx, ebx = %08lx, ecx = %08lx, edx = %08lx\n",
- t->ax, t->bx, t->cx, t->dx);
- printk(KERN_EMERG "esi = %08lx, edi = %08lx\n",
- t->si, t->di);
- }
- }
-
- for (;;)
- cpu_relax();
-}
-
-struct x86_hw_tss doublefault_tss __cacheline_aligned = {
- .sp0 = STACK_START,
- .ss0 = __KERNEL_DS,
- .ldt = 0,
- .io_bitmap_base = IO_BITMAP_OFFSET_INVALID,
-
- .ip = (unsigned long) doublefault_fn,
- /* 0x2 bit is always set */
- .flags = X86_EFLAGS_SF | 0x2,
- .sp = STACK_START,
- .es = __USER_DS,
- .cs = __KERNEL_CS,
- .ss = __KERNEL_DS,
- .ds = __USER_DS,
- .fs = __KERNEL_PERCPU,
-#ifndef CONFIG_X86_32_LAZY_GS
- .gs = __KERNEL_STACK_CANARY,
-#endif
-
- .__cr3 = __pa_nodebug(swapper_pg_dir),
-};
-
-/* dummy for do_double_fault() call */
-void df_debug(struct pt_regs *regs, long error_code) {}
-
-#else /* !CONFIG_X86_32 */
-
-void df_debug(struct pt_regs *regs, long error_code)
-{
- pr_emerg("PANIC: double fault, error_code: 0x%lx\n", error_code);
- show_regs(regs);
- panic("Machine halted.");
-}
-#endif
diff --git a/arch/x86/kernel/doublefault_32.c b/arch/x86/kernel/doublefault_32.c
new file mode 100644
index 000000000000..3793646f0fb5
--- /dev/null
+++ b/arch/x86/kernel/doublefault_32.c
@@ -0,0 +1,136 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/sched/debug.h>
+#include <linux/init_task.h>
+#include <linux/fs.h>
+
+#include <linux/uaccess.h>
+#include <asm/pgtable.h>
+#include <asm/processor.h>
+#include <asm/desc.h>
+#include <asm/traps.h>
+
+extern void double_fault(void);
+#define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
+
+#define TSS(x) this_cpu_read(cpu_tss_rw.x86_tss.x)
+
+static void set_df_gdt_entry(unsigned int cpu);
+
+/*
+ * Called by double_fault with CR0.TS and EFLAGS.NT cleared. The CPU thinks
+ * we're running the doublefault task. Cannot return.
+ */
+asmlinkage notrace void __noreturn doublefault_shim(void)
+{
+ unsigned long cr2;
+ struct pt_regs regs;
+
+ BUILD_BUG_ON(sizeof(struct doublefault_stack) != PAGE_SIZE);
+
+ cr2 = native_read_cr2();
+
+ /* Reset back to the normal kernel task. */
+ force_reload_TR();
+ set_df_gdt_entry(smp_processor_id());
+
+ trace_hardirqs_off();
+
+ /*
+ * Fill in pt_regs. A downside of doing this in C is that the unwinder
+ * won't see it (no ENCODE_FRAME_POINTER), so a nested stack dump
+ * won't successfully unwind to the source of the double fault.
+ * The main dump from do_double_fault() is fine, though, since it
+ * uses these regs directly.
+ *
+ * If anyone ever cares, this could be moved to asm.
+ */
+ regs.ss = TSS(ss);
+ regs.__ssh = 0;
+ regs.sp = TSS(sp);
+ regs.flags = TSS(flags);
+ regs.cs = TSS(cs);
+ /* We won't go through the entry asm, so we can leave __csh as 0. */
+ regs.__csh = 0;
+ regs.ip = TSS(ip);
+ regs.orig_ax = 0;
+ regs.gs = TSS(gs);
+ regs.__gsh = 0;
+ regs.fs = TSS(fs);
+ regs.__fsh = 0;
+ regs.es = TSS(es);
+ regs.__esh = 0;
+ regs.ds = TSS(ds);
+ regs.__dsh = 0;
+ regs.ax = TSS(ax);
+ regs.bp = TSS(bp);
+ regs.di = TSS(di);
+ regs.si = TSS(si);
+ regs.dx = TSS(dx);
+ regs.cx = TSS(cx);
+ regs.bx = TSS(bx);
+
+ do_double_fault(&regs, 0, cr2);
+
+ /*
+ * x86_32 does not save the original CR3 anywhere on a task switch.
+ * This means that, even if we wanted to return, we would need to find
+ * some way to reconstruct CR3. We could make a credible guess based
+ * on cpu_tlbstate, but that would be racy and would not account for
+ * PTI.
+ *
+ * Instead, don't bother. We can return through
+ * rewind_stack_do_exit() instead.
+ */
+ panic("cannot return from double fault\n");
+}
+NOKPROBE_SYMBOL(doublefault_shim);
+
+DEFINE_PER_CPU_PAGE_ALIGNED(struct doublefault_stack, doublefault_stack) = {
+ .tss = {
+ /*
+ * No sp0 or ss0 -- we never run CPL != 0 with this TSS
+ * active. sp is filled in later.
+ */
+ .ldt = 0,
+ .io_bitmap_base = IO_BITMAP_OFFSET_INVALID,
+
+ .ip = (unsigned long) double_fault,
+ .flags = X86_EFLAGS_FIXED,
+ .es = __USER_DS,
+ .cs = __KERNEL_CS,
+ .ss = __KERNEL_DS,
+ .ds = __USER_DS,
+ .fs = __KERNEL_PERCPU,
+#ifndef CONFIG_X86_32_LAZY_GS
+ .gs = __KERNEL_STACK_CANARY,
+#endif
+
+ .__cr3 = __pa_nodebug(swapper_pg_dir),
+ },
+};
+
+static void set_df_gdt_entry(unsigned int cpu)
+{
+ /* Set up doublefault TSS pointer in the GDT */
+ __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS,
+ &get_cpu_entry_area(cpu)->doublefault_stack.tss);
+
+}
+
+void doublefault_init_cpu_tss(void)
+{
+ unsigned int cpu = smp_processor_id();
+ struct cpu_entry_area *cea = get_cpu_entry_area(cpu);
+
+ /*
+ * The linker isn't smart enough to initialize percpu variables that
+ * point to other places in percpu space.
+ */
+ this_cpu_write(doublefault_stack.tss.sp,
+ (unsigned long)&cea->doublefault_stack.stack +
+ sizeof(doublefault_stack.stack));
+
+ set_df_gdt_entry(cpu);
+}
diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
index 64a59d726639..8e3a8fedfa4d 100644
--- a/arch/x86/kernel/dumpstack_32.c
+++ b/arch/x86/kernel/dumpstack_32.c
@@ -29,6 +29,9 @@ const char *stack_type_name(enum stack_type type)
if (type == STACK_TYPE_ENTRY)
return "ENTRY_TRAMPOLINE";
+ if (type == STACK_TYPE_EXCEPTION)
+ return "#DF";
+
return NULL;
}
@@ -82,6 +85,30 @@ static bool in_softirq_stack(unsigned long *stack, struct stack_info *info)
return true;
}
+static bool in_doublefault_stack(unsigned long *stack, struct stack_info *info)
+{
+#ifdef CONFIG_DOUBLEFAULT
+ struct cpu_entry_area *cea = get_cpu_entry_area(raw_smp_processor_id());
+ struct doublefault_stack *ss = &cea->doublefault_stack;
+
+ void *begin = ss->stack;
+ void *end = begin + sizeof(ss->stack);
+
+ if ((void *)stack < begin || (void *)stack >= end)
+ return false;
+
+ info->type = STACK_TYPE_EXCEPTION;
+ info->begin = begin;
+ info->end = end;
+ info->next_sp = (unsigned long *)this_cpu_read(cpu_tss_rw.x86_tss.sp);
+
+ return true;
+#else
+ return false;
+#endif
+}
+
+
int get_stack_info(unsigned long *stack, struct task_struct *task,
struct stack_info *info, unsigned long *visit_mask)
{
@@ -105,6 +132,9 @@ int get_stack_info(unsigned long *stack, struct task_struct *task,
if (in_softirq_stack(stack, info))
goto recursion_check;
+ if (in_doublefault_stack(stack, info))
+ goto recursion_check;
+
goto unknown;
recursion_check:
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index 0bfe9a685b3b..c5399e80c59c 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -190,6 +190,7 @@ static void __init e820_print_type(enum e820_type type)
case E820_TYPE_RAM: /* Fall through: */
case E820_TYPE_RESERVED_KERN: pr_cont("usable"); break;
case E820_TYPE_RESERVED: pr_cont("reserved"); break;
+ case E820_TYPE_SOFT_RESERVED: pr_cont("soft reserved"); break;
case E820_TYPE_ACPI: pr_cont("ACPI data"); break;
case E820_TYPE_NVS: pr_cont("ACPI NVS"); break;
case E820_TYPE_UNUSABLE: pr_cont("unusable"); break;
@@ -1048,6 +1049,7 @@ static const char *__init e820_type_to_string(struct e820_entry *entry)
case E820_TYPE_PRAM: return "Persistent Memory (legacy)";
case E820_TYPE_PMEM: return "Persistent Memory";
case E820_TYPE_RESERVED: return "Reserved";
+ case E820_TYPE_SOFT_RESERVED: return "Soft Reserved";
default: return "Unknown E820 type";
}
}
@@ -1063,6 +1065,7 @@ static unsigned long __init e820_type_to_iomem_type(struct e820_entry *entry)
case E820_TYPE_PRAM: /* Fall-through: */
case E820_TYPE_PMEM: /* Fall-through: */
case E820_TYPE_RESERVED: /* Fall-through: */
+ case E820_TYPE_SOFT_RESERVED: /* Fall-through: */
default: return IORESOURCE_MEM;
}
}
@@ -1075,6 +1078,7 @@ static unsigned long __init e820_type_to_iores_desc(struct e820_entry *entry)
case E820_TYPE_PMEM: return IORES_DESC_PERSISTENT_MEMORY;
case E820_TYPE_PRAM: return IORES_DESC_PERSISTENT_MEMORY_LEGACY;
case E820_TYPE_RESERVED: return IORES_DESC_RESERVED;
+ case E820_TYPE_SOFT_RESERVED: return IORES_DESC_SOFT_RESERVED;
case E820_TYPE_RESERVED_KERN: /* Fall-through: */
case E820_TYPE_RAM: /* Fall-through: */
case E820_TYPE_UNUSABLE: /* Fall-through: */
@@ -1089,11 +1093,12 @@ static bool __init do_mark_busy(enum e820_type type, struct resource *res)
return true;
/*
- * Treat persistent memory like device memory, i.e. reserve it
- * for exclusive use of a driver
+ * Treat persistent memory and other special memory ranges like
+ * device memory, i.e. reserve it for exclusive use of a driver
*/
switch (type) {
case E820_TYPE_RESERVED:
+ case E820_TYPE_SOFT_RESERVED:
case E820_TYPE_PRAM:
case E820_TYPE_PMEM:
return false;
@@ -1296,6 +1301,9 @@ void __init e820__memblock_setup(void)
if (end != (resource_size_t)end)
continue;
+ if (entry->type == E820_TYPE_SOFT_RESERVED)
+ memblock_reserve(entry->addr, entry->size);
+
if (entry->type != E820_TYPE_RAM && entry->type != E820_TYPE_RESERVED_KERN)
continue;
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 024c3053dbba..060a361d9d11 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -1043,6 +1043,20 @@ void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent,
return;
/*
+ * If the return location is actually pointing directly to
+ * the start of a direct trampoline (if we trace the trampoline
+ * it will still be offset by MCOUNT_INSN_SIZE), then the
+ * return address is actually off by one word, and we
+ * need to adjust for that.
+ */
+ if (ftrace_direct_func_count) {
+ if (ftrace_find_direct_func(self_addr + MCOUNT_INSN_SIZE)) {
+ self_addr = *parent;
+ parent++;
+ }
+ }
+
+ /*
* Protect against fault, even if it shouldn't
* happen. This tool is too much intrusive to
* ignore such a protection.
diff --git a/arch/x86/kernel/ftrace_64.S b/arch/x86/kernel/ftrace_64.S
index 6e8961ca3605..369e61faacfe 100644
--- a/arch/x86/kernel/ftrace_64.S
+++ b/arch/x86/kernel/ftrace_64.S
@@ -85,6 +85,7 @@
movq %rdi, RDI(%rsp)
movq %r8, R8(%rsp)
movq %r9, R9(%rsp)
+ movq $0, ORIG_RAX(%rsp)
/*
* Save the original RBP. Even though the mcount ABI does not
* require this, it helps out callers.
@@ -111,7 +112,11 @@
subq $MCOUNT_INSN_SIZE, %rdi
.endm
-.macro restore_mcount_regs
+.macro restore_mcount_regs save=0
+
+ /* ftrace_regs_caller or frame pointers require this */
+ movq RBP(%rsp), %rbp
+
movq R9(%rsp), %r9
movq R8(%rsp), %r8
movq RDI(%rsp), %rdi
@@ -120,10 +125,7 @@
movq RCX(%rsp), %rcx
movq RAX(%rsp), %rax
- /* ftrace_regs_caller can modify %rbp */
- movq RBP(%rsp), %rbp
-
- addq $MCOUNT_REG_SIZE, %rsp
+ addq $MCOUNT_REG_SIZE-\save, %rsp
.endm
@@ -174,6 +176,8 @@ SYM_FUNC_START(ftrace_regs_caller)
/* Save the current flags before any operations that can change them */
pushfq
+ UNWIND_HINT_SAVE
+
/* added 8 bytes to save flags */
save_mcount_regs 8
/* save_mcount_regs fills in first two parameters */
@@ -226,7 +230,33 @@ SYM_INNER_LABEL(ftrace_regs_call, SYM_L_GLOBAL)
movq R10(%rsp), %r10
movq RBX(%rsp), %rbx
- restore_mcount_regs
+ movq ORIG_RAX(%rsp), %rax
+ movq %rax, MCOUNT_REG_SIZE-8(%rsp)
+
+ /* If ORIG_RAX is anything but zero, make this a call to that */
+ movq ORIG_RAX(%rsp), %rax
+ cmpq $0, %rax
+ je 1f
+
+ /* Swap the flags with orig_rax */
+ movq MCOUNT_REG_SIZE(%rsp), %rdi
+ movq %rdi, MCOUNT_REG_SIZE-8(%rsp)
+ movq %rax, MCOUNT_REG_SIZE(%rsp)
+
+ restore_mcount_regs 8
+
+ jmp 2f
+
+1: restore_mcount_regs
+
+
+2:
+ /*
+ * The stack layout is nondetermistic here, depending on which path was
+ * taken. This confuses objtool and ORC, rightfully so. For now,
+ * pretend the stack always looks like the non-direct case.
+ */
+ UNWIND_HINT_RESTORE
/* Restore flags */
popfq
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
index 57de2ebff7e2..5dcedad21dff 100644
--- a/arch/x86/kernel/pci-dma.c
+++ b/arch/x86/kernel/pci-dma.c
@@ -140,7 +140,7 @@ rootfs_initcall(pci_iommu_init);
static int via_no_dac_cb(struct pci_dev *pdev, void *data)
{
- pdev->dev.bus_dma_mask = DMA_BIT_MASK(32);
+ pdev->dev.bus_dma_limit = DMA_BIT_MASK(32);
return 0;
}
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index bd2a11ca5dd6..61e93a318983 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -377,37 +377,37 @@ static void tss_copy_io_bitmap(struct tss_struct *tss, struct io_bitmap *iobm)
void tss_update_io_bitmap(void)
{
struct tss_struct *tss = this_cpu_ptr(&cpu_tss_rw);
+ struct thread_struct *t = &current->thread;
u16 *base = &tss->x86_tss.io_bitmap_base;
- if (test_thread_flag(TIF_IO_BITMAP)) {
- struct thread_struct *t = &current->thread;
-
- if (IS_ENABLED(CONFIG_X86_IOPL_IOPERM) && t->iopl_emul == 3) {
- *base = IO_BITMAP_OFFSET_VALID_ALL;
- } else {
- struct io_bitmap *iobm = t->io_bitmap;
- /*
- * Only copy bitmap data when the sequence number
- * differs. The update time is accounted to the
- * incoming task.
- */
- if (tss->io_bitmap.prev_sequence != iobm->sequence)
- tss_copy_io_bitmap(tss, iobm);
-
- /* Enable the bitmap */
- *base = IO_BITMAP_OFFSET_VALID_MAP;
- }
+ if (!test_thread_flag(TIF_IO_BITMAP)) {
+ tss_invalidate_io_bitmap(tss);
+ return;
+ }
+
+ if (IS_ENABLED(CONFIG_X86_IOPL_IOPERM) && t->iopl_emul == 3) {
+ *base = IO_BITMAP_OFFSET_VALID_ALL;
+ } else {
+ struct io_bitmap *iobm = t->io_bitmap;
+
/*
- * Make sure that the TSS limit is covering the io bitmap.
- * It might have been cut down by a VMEXIT to 0x67 which
- * would cause a subsequent I/O access from user space to
- * trigger a #GP because tbe bitmap is outside the TSS
- * limit.
+ * Only copy bitmap data when the sequence number differs. The
+ * update time is accounted to the incoming task.
*/
- refresh_tss_limit();
- } else {
- tss_invalidate_io_bitmap(tss);
+ if (tss->io_bitmap.prev_sequence != iobm->sequence)
+ tss_copy_io_bitmap(tss, iobm);
+
+ /* Enable the bitmap */
+ *base = IO_BITMAP_OFFSET_VALID_MAP;
}
+
+ /*
+ * Make sure that the TSS limit is covering the IO bitmap. It might have
+ * been cut down by a VMEXIT to 0x67 which would cause a subsequent I/O
+ * access from user space to trigger a #GP because tbe bitmap is outside
+ * the TSS limit.
+ */
+ refresh_tss_limit();
}
#else /* CONFIG_X86_IOPL_IOPERM */
static inline void switch_to_bitmap(unsigned long tifp) { }
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index 066e5b01a7e0..f0e1ddbc2fd7 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -182,6 +182,9 @@ static u16 get_segment_reg(struct task_struct *task, unsigned long offset)
static int set_segment_reg(struct task_struct *task,
unsigned long offset, u16 value)
{
+ if (WARN_ON_ONCE(task == current))
+ return -EIO;
+
/*
* The value argument was already truncated to 16 bits.
*/
@@ -209,10 +212,7 @@ static int set_segment_reg(struct task_struct *task,
break;
case offsetof(struct user_regs_struct, gs):
- if (task == current)
- set_user_gs(task_pt_regs(task), value);
- else
- task_user_gs(task) = value;
+ task_user_gs(task) = value;
}
return 0;
@@ -272,32 +272,41 @@ static u16 get_segment_reg(struct task_struct *task, unsigned long offset)
static int set_segment_reg(struct task_struct *task,
unsigned long offset, u16 value)
{
+ if (WARN_ON_ONCE(task == current))
+ return -EIO;
+
/*
* The value argument was already truncated to 16 bits.
*/
if (invalid_selector(value))
return -EIO;
+ /*
+ * This function has some ABI oddities.
+ *
+ * A 32-bit ptracer probably expects that writing FS or GS will change
+ * FSBASE or GSBASE respectively. In the absence of FSGSBASE support,
+ * this code indeed has that effect. When FSGSBASE is added, this
+ * will require a special case.
+ *
+ * For existing 64-bit ptracers, writing FS or GS *also* currently
+ * changes the base if the selector is nonzero the next time the task
+ * is run. This behavior may not be needed, and trying to preserve it
+ * when FSGSBASE is added would be complicated at best.
+ */
+
switch (offset) {
case offsetof(struct user_regs_struct,fs):
task->thread.fsindex = value;
- if (task == current)
- loadsegment(fs, task->thread.fsindex);
break;
case offsetof(struct user_regs_struct,gs):
task->thread.gsindex = value;
- if (task == current)
- load_gs_index(task->thread.gsindex);
break;
case offsetof(struct user_regs_struct,ds):
task->thread.ds = value;
- if (task == current)
- loadsegment(ds, task->thread.ds);
break;
case offsetof(struct user_regs_struct,es):
task->thread.es = value;
- if (task == current)
- loadsegment(es, task->thread.es);
break;
/*
@@ -375,6 +384,9 @@ static int putreg(struct task_struct *child,
* When changing the FS base, use do_arch_prctl_64()
* to set the index to zero and to set the base
* as requested.
+ *
+ * NB: This behavior is nonsensical and likely needs to
+ * change when FSGSBASE support is added.
*/
if (child->thread.fsbase != value)
return do_arch_prctl_64(child, ARCH_SET_FS, value);
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index d398afd206b8..cedfe2077a69 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -1138,17 +1138,15 @@ void __init setup_arch(char **cmdline_p)
reserve_bios_regions();
- if (efi_enabled(EFI_MEMMAP)) {
- efi_fake_memmap();
- efi_find_mirror();
- efi_esrt_init();
+ efi_fake_memmap();
+ efi_find_mirror();
+ efi_esrt_init();
- /*
- * The EFI specification says that boot service code won't be
- * called after ExitBootServices(). This is, in fact, a lie.
- */
- efi_reserve_boot_services();
- }
+ /*
+ * The EFI specification says that boot service code won't be
+ * called after ExitBootServices(). This is, in fact, a lie.
+ */
+ efi_reserve_boot_services();
/* preallocate 4k for mptable mpc */
e820__memblock_alloc_reserved_mpc_new();
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index c90312146da0..05da6b5b167b 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -306,8 +306,23 @@ __visible void __noreturn handle_stack_overflow(const char *message,
}
#endif
-#ifdef CONFIG_X86_64
-/* Runs on IST stack */
+#if defined(CONFIG_X86_64) || defined(CONFIG_DOUBLEFAULT)
+/*
+ * Runs on an IST stack for x86_64 and on a special task stack for x86_32.
+ *
+ * On x86_64, this is more or less a normal kernel entry. Notwithstanding the
+ * SDM's warnings about double faults being unrecoverable, returning works as
+ * expected. Presumably what the SDM actually means is that the CPU may get
+ * the register state wrong on entry, so returning could be a bad idea.
+ *
+ * Various CPU engineers have promised that double faults due to an IRET fault
+ * while the stack is read-only are, in fact, recoverable.
+ *
+ * On x86_32, this is entered through a task gate, and regs are synthesized
+ * from the TSS. Returning is, in principle, okay, but changes to regs will
+ * be lost. If, for some reason, we need to return to a context with modified
+ * regs, the shim code could be adjusted to synchronize the registers.
+ */
dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code, unsigned long cr2)
{
static const char str[] = "double fault";
@@ -411,15 +426,9 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code, unsign
handle_stack_overflow("kernel stack overflow (double-fault)", regs, cr2);
#endif
-#ifdef CONFIG_DOUBLEFAULT
- df_debug(regs, error_code);
-#endif
- /*
- * This is always a kernel trap and never fixable (and thus must
- * never return).
- */
- for (;;)
- die(str, regs, error_code);
+ pr_emerg("PANIC: double fault, error_code: 0x%lx\n", error_code);
+ die("double fault", regs, error_code);
+ panic("Machine halted.");
}
#endif
diff --git a/arch/x86/lib/x86-opcode-map.txt b/arch/x86/lib/x86-opcode-map.txt
index 0a0e9112f284..8908c58bd6cd 100644
--- a/arch/x86/lib/x86-opcode-map.txt
+++ b/arch/x86/lib/x86-opcode-map.txt
@@ -695,16 +695,28 @@ AVXcode: 2
4d: vrcp14ss/d Vsd,Hpd,Wsd (66),(ev)
4e: vrsqrt14ps/d Vpd,Wpd (66),(ev)
4f: vrsqrt14ss/d Vsd,Hsd,Wsd (66),(ev)
-# Skip 0x50-0x57
+50: vpdpbusd Vx,Hx,Wx (66),(ev)
+51: vpdpbusds Vx,Hx,Wx (66),(ev)
+52: vdpbf16ps Vx,Hx,Wx (F3),(ev) | vpdpwssd Vx,Hx,Wx (66),(ev) | vp4dpwssd Vdqq,Hdqq,Wdq (F2),(ev)
+53: vpdpwssds Vx,Hx,Wx (66),(ev) | vp4dpwssds Vdqq,Hdqq,Wdq (F2),(ev)
+54: vpopcntb/w Vx,Wx (66),(ev)
+55: vpopcntd/q Vx,Wx (66),(ev)
58: vpbroadcastd Vx,Wx (66),(v)
59: vpbroadcastq Vx,Wx (66),(v) | vbroadcasti32x2 Vx,Wx (66),(evo)
5a: vbroadcasti128 Vqq,Mdq (66),(v) | vbroadcasti32x4/64x2 Vx,Wx (66),(evo)
5b: vbroadcasti32x8/64x4 Vqq,Mdq (66),(ev)
-# Skip 0x5c-0x63
+# Skip 0x5c-0x61
+62: vpexpandb/w Vx,Wx (66),(ev)
+63: vpcompressb/w Wx,Vx (66),(ev)
64: vpblendmd/q Vx,Hx,Wx (66),(ev)
65: vblendmps/d Vx,Hx,Wx (66),(ev)
66: vpblendmb/w Vx,Hx,Wx (66),(ev)
-# Skip 0x67-0x74
+68: vp2intersectd/q Kx,Hx,Wx (F2),(ev)
+# Skip 0x69-0x6f
+70: vpshldvw Vx,Hx,Wx (66),(ev)
+71: vpshldvd/q Vx,Hx,Wx (66),(ev)
+72: vcvtne2ps2bf16 Vx,Hx,Wx (F2),(ev) | vcvtneps2bf16 Vx,Wx (F3),(ev) | vpshrdvw Vx,Hx,Wx (66),(ev)
+73: vpshrdvd/q Vx,Hx,Wx (66),(ev)
75: vpermi2b/w Vx,Hx,Wx (66),(ev)
76: vpermi2d/q Vx,Hx,Wx (66),(ev)
77: vpermi2ps/d Vx,Hx,Wx (66),(ev)
@@ -727,6 +739,7 @@ AVXcode: 2
8c: vpmaskmovd/q Vx,Hx,Mx (66),(v)
8d: vpermb/w Vx,Hx,Wx (66),(ev)
8e: vpmaskmovd/q Mx,Vx,Hx (66),(v)
+8f: vpshufbitqmb Kx,Hx,Wx (66),(ev)
# 0x0f 0x38 0x90-0xbf (FMA)
90: vgatherdd/q Vx,Hx,Wx (66),(v) | vpgatherdd/q Vx,Wx (66),(evo)
91: vgatherqd/q Vx,Hx,Wx (66),(v) | vpgatherqd/q Vx,Wx (66),(evo)
@@ -738,8 +751,8 @@ AVXcode: 2
97: vfmsubadd132ps/d Vx,Hx,Wx (66),(v)
98: vfmadd132ps/d Vx,Hx,Wx (66),(v)
99: vfmadd132ss/d Vx,Hx,Wx (66),(v),(v1)
-9a: vfmsub132ps/d Vx,Hx,Wx (66),(v)
-9b: vfmsub132ss/d Vx,Hx,Wx (66),(v),(v1)
+9a: vfmsub132ps/d Vx,Hx,Wx (66),(v) | v4fmaddps Vdqq,Hdqq,Wdq (F2),(ev)
+9b: vfmsub132ss/d Vx,Hx,Wx (66),(v),(v1) | v4fmaddss Vdq,Hdq,Wdq (F2),(ev)
9c: vfnmadd132ps/d Vx,Hx,Wx (66),(v)
9d: vfnmadd132ss/d Vx,Hx,Wx (66),(v),(v1)
9e: vfnmsub132ps/d Vx,Hx,Wx (66),(v)
@@ -752,8 +765,8 @@ a6: vfmaddsub213ps/d Vx,Hx,Wx (66),(v)
a7: vfmsubadd213ps/d Vx,Hx,Wx (66),(v)
a8: vfmadd213ps/d Vx,Hx,Wx (66),(v)
a9: vfmadd213ss/d Vx,Hx,Wx (66),(v),(v1)
-aa: vfmsub213ps/d Vx,Hx,Wx (66),(v)
-ab: vfmsub213ss/d Vx,Hx,Wx (66),(v),(v1)
+aa: vfmsub213ps/d Vx,Hx,Wx (66),(v) | v4fnmaddps Vdqq,Hdqq,Wdq (F2),(ev)
+ab: vfmsub213ss/d Vx,Hx,Wx (66),(v),(v1) | v4fnmaddss Vdq,Hdq,Wdq (F2),(ev)
ac: vfnmadd213ps/d Vx,Hx,Wx (66),(v)
ad: vfnmadd213ss/d Vx,Hx,Wx (66),(v),(v1)
ae: vfnmsub213ps/d Vx,Hx,Wx (66),(v)
@@ -780,11 +793,12 @@ ca: sha1msg2 Vdq,Wdq | vrcp28ps/d Vx,Wx (66),(ev)
cb: sha256rnds2 Vdq,Wdq | vrcp28ss/d Vx,Hx,Wx (66),(ev)
cc: sha256msg1 Vdq,Wdq | vrsqrt28ps/d Vx,Wx (66),(ev)
cd: sha256msg2 Vdq,Wdq | vrsqrt28ss/d Vx,Hx,Wx (66),(ev)
+cf: vgf2p8mulb Vx,Wx (66)
db: VAESIMC Vdq,Wdq (66),(v1)
-dc: VAESENC Vdq,Hdq,Wdq (66),(v1)
-dd: VAESENCLAST Vdq,Hdq,Wdq (66),(v1)
-de: VAESDEC Vdq,Hdq,Wdq (66),(v1)
-df: VAESDECLAST Vdq,Hdq,Wdq (66),(v1)
+dc: vaesenc Vx,Hx,Wx (66)
+dd: vaesenclast Vx,Hx,Wx (66)
+de: vaesdec Vx,Hx,Wx (66)
+df: vaesdeclast Vx,Hx,Wx (66)
f0: MOVBE Gy,My | MOVBE Gw,Mw (66) | CRC32 Gd,Eb (F2) | CRC32 Gd,Eb (66&F2)
f1: MOVBE My,Gy | MOVBE Mw,Gw (66) | CRC32 Gd,Ey (F2) | CRC32 Gd,Ew (66&F2)
f2: ANDN Gy,By,Ey (v)
@@ -848,7 +862,7 @@ AVXcode: 3
41: vdppd Vdq,Hdq,Wdq,Ib (66),(v1)
42: vmpsadbw Vx,Hx,Wx,Ib (66),(v1) | vdbpsadbw Vx,Hx,Wx,Ib (66),(evo)
43: vshufi32x4/64x2 Vx,Hx,Wx,Ib (66),(ev)
-44: vpclmulqdq Vdq,Hdq,Wdq,Ib (66),(v1)
+44: vpclmulqdq Vx,Hx,Wx,Ib (66)
46: vperm2i128 Vqq,Hqq,Wqq,Ib (66),(v)
4a: vblendvps Vx,Hx,Wx,Lx (66),(v)
4b: vblendvpd Vx,Hx,Wx,Lx (66),(v)
@@ -865,7 +879,13 @@ AVXcode: 3
63: vpcmpistri Vdq,Wdq,Ib (66),(v1)
66: vfpclassps/d Vk,Wx,Ib (66),(ev)
67: vfpclassss/d Vk,Wx,Ib (66),(ev)
+70: vpshldw Vx,Hx,Wx,Ib (66),(ev)
+71: vpshldd/q Vx,Hx,Wx,Ib (66),(ev)
+72: vpshrdw Vx,Hx,Wx,Ib (66),(ev)
+73: vpshrdd/q Vx,Hx,Wx,Ib (66),(ev)
cc: sha1rnds4 Vdq,Wdq,Ib
+ce: vgf2p8affineqb Vx,Wx,Ib (66)
+cf: vgf2p8affineinvqb Vx,Wx,Ib (66)
df: VAESKEYGEN Vdq,Wdq,Ib (66),(v1)
f0: RORX Gy,Ey,Ib (F2),(v)
EndTable
diff --git a/arch/x86/mm/cpu_entry_area.c b/arch/x86/mm/cpu_entry_area.c
index 82ead8e27888..56f9189bbadb 100644
--- a/arch/x86/mm/cpu_entry_area.c
+++ b/arch/x86/mm/cpu_entry_area.c
@@ -17,6 +17,10 @@ static DEFINE_PER_CPU_PAGE_ALIGNED(struct exception_stacks, exception_stacks);
DEFINE_PER_CPU(struct cea_exception_stacks*, cea_exception_stacks);
#endif
+#if defined(CONFIG_X86_32) && defined(CONFIG_DOUBLEFAULT)
+DECLARE_PER_CPU_PAGE_ALIGNED(struct doublefault_stack, doublefault_stack);
+#endif
+
struct cpu_entry_area *get_cpu_entry_area(int cpu)
{
unsigned long va = CPU_ENTRY_AREA_PER_CPU + cpu * CPU_ENTRY_AREA_SIZE;
@@ -108,7 +112,15 @@ static void __init percpu_setup_exception_stacks(unsigned int cpu)
cea_map_stack(MCE);
}
#else
-static inline void percpu_setup_exception_stacks(unsigned int cpu) {}
+static inline void percpu_setup_exception_stacks(unsigned int cpu)
+{
+#ifdef CONFIG_DOUBLEFAULT
+ struct cpu_entry_area *cea = get_cpu_entry_area(cpu);
+
+ cea_map_percpu_pages(&cea->doublefault_stack,
+ &per_cpu(doublefault_stack, cpu), 1, PAGE_KERNEL);
+#endif
+}
#endif
/* Setup the fixmap mappings only once per-processor */
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 9ceacd1156db..304d31d8cbbc 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -197,7 +197,7 @@ void vmalloc_sync_all(void)
return;
for (address = VMALLOC_START & PMD_MASK;
- address >= TASK_SIZE_MAX && address < FIXADDR_TOP;
+ address >= TASK_SIZE_MAX && address < VMALLOC_END;
address += PMD_SIZE) {
struct page *page;
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index 1ff9c2030b4f..b3a2936377b5 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -280,11 +280,11 @@ err_free_memtype:
}
/**
- * ioremap_nocache - map bus memory into CPU space
+ * ioremap - map bus memory into CPU space
* @phys_addr: bus address of the memory
* @size: size of the resource to map
*
- * ioremap_nocache performs a platform specific sequence of operations to
+ * ioremap performs a platform specific sequence of operations to
* make bus memory CPU accessible via the readb/readw/readl/writeb/
* writew/writel functions and the other mmio helpers. The returned
* address is not guaranteed to be usable directly as a virtual
@@ -300,7 +300,7 @@ err_free_memtype:
*
* Must be freed with iounmap.
*/
-void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
+void __iomem *ioremap(resource_size_t phys_addr, unsigned long size)
{
/*
* Ideally, this should be:
@@ -315,7 +315,7 @@ void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
return __ioremap_caller(phys_addr, size, pcm,
__builtin_return_address(0), false);
}
-EXPORT_SYMBOL(ioremap_nocache);
+EXPORT_SYMBOL(ioremap);
/**
* ioremap_uc - map bus memory into CPU space as strongly uncachable
diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c
index 296da58f3013..cf5bc37c90ac 100644
--- a/arch/x86/mm/kasan_init_64.c
+++ b/arch/x86/mm/kasan_init_64.c
@@ -245,6 +245,49 @@ static void __init kasan_map_early_shadow(pgd_t *pgd)
} while (pgd++, addr = next, addr != end);
}
+static void __init kasan_shallow_populate_p4ds(pgd_t *pgd,
+ unsigned long addr,
+ unsigned long end)
+{
+ p4d_t *p4d;
+ unsigned long next;
+ void *p;
+
+ p4d = p4d_offset(pgd, addr);
+ do {
+ next = p4d_addr_end(addr, end);
+
+ if (p4d_none(*p4d)) {
+ p = early_alloc(PAGE_SIZE, NUMA_NO_NODE, true);
+ p4d_populate(&init_mm, p4d, p);
+ }
+ } while (p4d++, addr = next, addr != end);
+}
+
+static void __init kasan_shallow_populate_pgds(void *start, void *end)
+{
+ unsigned long addr, next;
+ pgd_t *pgd;
+ void *p;
+
+ addr = (unsigned long)start;
+ pgd = pgd_offset_k(addr);
+ do {
+ next = pgd_addr_end(addr, (unsigned long)end);
+
+ if (pgd_none(*pgd)) {
+ p = early_alloc(PAGE_SIZE, NUMA_NO_NODE, true);
+ pgd_populate(&init_mm, pgd, p);
+ }
+
+ /*
+ * we need to populate p4ds to be synced when running in
+ * four level mode - see sync_global_pgds_l4()
+ */
+ kasan_shallow_populate_p4ds(pgd, addr, next);
+ } while (pgd++, addr = next, addr != (unsigned long)end);
+}
+
#ifdef CONFIG_KASAN_INLINE
static int kasan_die_handler(struct notifier_block *self,
unsigned long val,
@@ -354,6 +397,24 @@ void __init kasan_init(void)
kasan_populate_early_shadow(
kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM),
+ kasan_mem_to_shadow((void *)VMALLOC_START));
+
+ /*
+ * If we're in full vmalloc mode, don't back vmalloc space with early
+ * shadow pages. Instead, prepopulate pgds/p4ds so they are synced to
+ * the global table and we can populate the lower levels on demand.
+ */
+ if (IS_ENABLED(CONFIG_KASAN_VMALLOC))
+ kasan_shallow_populate_pgds(
+ kasan_mem_to_shadow((void *)VMALLOC_START),
+ kasan_mem_to_shadow((void *)VMALLOC_END));
+ else
+ kasan_populate_early_shadow(
+ kasan_mem_to_shadow((void *)VMALLOC_START),
+ kasan_mem_to_shadow((void *)VMALLOC_END));
+
+ kasan_populate_early_shadow(
+ kasan_mem_to_shadow((void *)VMALLOC_END + 1),
shadow_cpu_entry_begin);
kasan_populate_shadow((unsigned long)shadow_cpu_entry_begin,
diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
index 9268c12458c8..a03614bd3e1a 100644
--- a/arch/x86/mm/mem_encrypt.c
+++ b/arch/x86/mm/mem_encrypt.c
@@ -367,7 +367,7 @@ bool force_dma_unencrypted(struct device *dev)
if (sme_active()) {
u64 dma_enc_mask = DMA_BIT_MASK(__ffs64(sme_me_mask));
u64 dma_dev_mask = min_not_zero(dev->coherent_dma_mask,
- dev->bus_dma_mask);
+ dev->bus_dma_limit);
if (dma_dev_mask <= dma_enc_mask)
return true;
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 0d09cc5aad61..1b99ad05b117 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -1784,7 +1784,7 @@ static inline int cpa_clear_pages_array(struct page **pages, int numpages,
int _set_memory_uc(unsigned long addr, int numpages)
{
/*
- * for now UC MINUS. see comments in ioremap_nocache()
+ * for now UC MINUS. see comments in ioremap()
* If you really need strong UC use ioremap_uc(), but note
* that you cannot override IO areas with set_memory_*() as
* these helpers cannot work with IO memory.
@@ -1799,7 +1799,7 @@ int set_memory_uc(unsigned long addr, int numpages)
int ret;
/*
- * for now UC MINUS. see comments in ioremap_nocache()
+ * for now UC MINUS. see comments in ioremap()
*/
ret = reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE,
_PAGE_CACHE_MODE_UC_MINUS, NULL);
diff --git a/arch/x86/mm/pat_interval.c b/arch/x86/mm/pat_interval.c
index 47a1bf30748f..6855362eaf21 100644
--- a/arch/x86/mm/pat_interval.c
+++ b/arch/x86/mm/pat_interval.c
@@ -56,7 +56,7 @@ static struct memtype *memtype_match(u64 start, u64 end, int match_type)
{
struct memtype *match;
- match = memtype_interval_iter_first(&memtype_rbroot, start, end);
+ match = memtype_interval_iter_first(&memtype_rbroot, start, end-1);
while (match != NULL && match->start < end) {
if ((match_type == MEMTYPE_EXACT_MATCH) &&
(match->start == start) && (match->end == end))
@@ -66,7 +66,7 @@ static struct memtype *memtype_match(u64 start, u64 end, int match_type)
(match->start < start) && (match->end == end))
return match;
- match = memtype_interval_iter_next(match, start, end);
+ match = memtype_interval_iter_next(match, start, end-1);
}
return NULL; /* Returns NULL if there is no match */
@@ -79,7 +79,7 @@ static int memtype_check_conflict(u64 start, u64 end,
struct memtype *match;
enum page_cache_mode found_type = reqtype;
- match = memtype_interval_iter_first(&memtype_rbroot, start, end);
+ match = memtype_interval_iter_first(&memtype_rbroot, start, end-1);
if (match == NULL)
goto success;
@@ -89,12 +89,12 @@ static int memtype_check_conflict(u64 start, u64 end,
dprintk("Overlap at 0x%Lx-0x%Lx\n", match->start, match->end);
found_type = match->type;
- match = memtype_interval_iter_next(match, start, end);
+ match = memtype_interval_iter_next(match, start, end-1);
while (match) {
if (match->type != found_type)
goto failure;
- match = memtype_interval_iter_next(match, start, end);
+ match = memtype_interval_iter_next(match, start, end-1);
}
success:
if (newtype)
@@ -160,7 +160,7 @@ struct memtype *memtype_erase(u64 start, u64 end)
struct memtype *memtype_lookup(u64 addr)
{
return memtype_interval_iter_first(&memtype_rbroot, addr,
- addr + PAGE_SIZE);
+ addr + PAGE_SIZE-1);
}
#if defined(CONFIG_DEBUG_FS)
diff --git a/arch/x86/pci/Makefile b/arch/x86/pci/Makefile
index c806b57d3f22..48bcada5cabe 100644
--- a/arch/x86/pci/Makefile
+++ b/arch/x86/pci/Makefile
@@ -24,6 +24,4 @@ obj-y += bus_numa.o
obj-$(CONFIG_AMD_NB) += amd_bus.o
obj-$(CONFIG_PCI_CNB20LE_QUIRK) += broadcom_bus.o
-ifeq ($(CONFIG_PCI_DEBUG),y)
-EXTRA_CFLAGS += -DDEBUG
-endif
+ccflags-$(CONFIG_PCI_DEBUG) += -DDEBUG
diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c
index 9acab6ac28f5..1e59df041456 100644
--- a/arch/x86/pci/common.c
+++ b/arch/x86/pci/common.c
@@ -135,7 +135,7 @@ static void pcibios_fixup_device_resources(struct pci_dev *dev)
* resource so the kernel doesn't attempt to assign
* it later on in pci_assign_unassigned_resources
*/
- for (bar = 0; bar <= PCI_STD_RESOURCE_END; bar++) {
+ for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
bar_r = &dev->resource[bar];
if (bar_r->start == 0 && bar_r->end != 0) {
bar_r->flags = 0;
diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c
index 527e69b12002..e723559c386a 100644
--- a/arch/x86/pci/fixup.c
+++ b/arch/x86/pci/fixup.c
@@ -589,6 +589,17 @@ static void pci_fixup_amd_ehci_pme(struct pci_dev *dev)
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x7808, pci_fixup_amd_ehci_pme);
/*
+ * Device [1022:7914]
+ * When in D0, PME# doesn't get asserted when plugging USB 2.0 device.
+ */
+static void pci_fixup_amd_fch_xhci_pme(struct pci_dev *dev)
+{
+ dev_info(&dev->dev, "PME# does not work under D0, disabling it\n");
+ dev->pme_support &= ~(PCI_PM_CAP_PME_D0 >> PCI_PM_CAP_PME_SHIFT);
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x7914, pci_fixup_amd_fch_xhci_pme);
+
+/*
* Apple MacBook Pro: Avoid [mem 0x7fa00000-0x7fbfffff]
*
* Using the [mem 0x7fa00000-0x7fbfffff] region, e.g., by assigning it to
diff --git a/arch/x86/pci/intel_mid_pci.c b/arch/x86/pci/intel_mid_pci.c
index 43867bc85368..00c62115f39c 100644
--- a/arch/x86/pci/intel_mid_pci.c
+++ b/arch/x86/pci/intel_mid_pci.c
@@ -382,7 +382,7 @@ static void pci_fixed_bar_fixup(struct pci_dev *dev)
PCI_DEVFN(2, 2) == dev->devfn)
return;
- for (i = 0; i < PCI_ROM_RESOURCE; i++) {
+ for (i = 0; i < PCI_STD_NUM_BARS; i++) {
pci_read_config_dword(dev, offset + 8 + (i * 4), &size);
dev->resource[i].end = dev->resource[i].start + size - 1;
dev->resource[i].flags |= IORESOURCE_PCI_FIXED;
diff --git a/arch/x86/pci/numachip.c b/arch/x86/pci/numachip.c
index 2e565e65c893..01a085d9135a 100644
--- a/arch/x86/pci/numachip.c
+++ b/arch/x86/pci/numachip.c
@@ -1,8 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
* Numascale NumaConnect-specific PCI code
*
* Copyright (C) 2012 Numascale AS. All rights reserved.
diff --git a/arch/x86/pci/sta2x11-fixup.c b/arch/x86/pci/sta2x11-fixup.c
index 6269a175385d..c313d784efab 100644
--- a/arch/x86/pci/sta2x11-fixup.c
+++ b/arch/x86/pci/sta2x11-fixup.c
@@ -30,7 +30,6 @@ struct sta2x11_ahb_regs { /* saved during suspend */
};
struct sta2x11_mapping {
- u32 amba_base;
int is_suspended;
struct sta2x11_ahb_regs regs[STA2X11_NR_FUNCS];
};
@@ -92,18 +91,6 @@ static int sta2x11_pdev_to_ep(struct pci_dev *pdev)
return pdev->bus->number - instance->bus0;
}
-static struct sta2x11_mapping *sta2x11_pdev_to_mapping(struct pci_dev *pdev)
-{
- struct sta2x11_instance *instance;
- int ep;
-
- instance = sta2x11_pdev_to_instance(pdev);
- if (!instance)
- return NULL;
- ep = sta2x11_pdev_to_ep(pdev);
- return instance->map + ep;
-}
-
/* This is exported, as some devices need to access the MFD registers */
struct sta2x11_instance *sta2x11_get_instance(struct pci_dev *pdev)
{
@@ -111,39 +98,6 @@ struct sta2x11_instance *sta2x11_get_instance(struct pci_dev *pdev)
}
EXPORT_SYMBOL(sta2x11_get_instance);
-
-/**
- * p2a - Translate physical address to STA2x11 AMBA address,
- * used for DMA transfers to STA2x11
- * @p: Physical address
- * @pdev: PCI device (must be hosted within the connext)
- */
-static dma_addr_t p2a(dma_addr_t p, struct pci_dev *pdev)
-{
- struct sta2x11_mapping *map;
- dma_addr_t a;
-
- map = sta2x11_pdev_to_mapping(pdev);
- a = p + map->amba_base;
- return a;
-}
-
-/**
- * a2p - Translate STA2x11 AMBA address to physical address
- * used for DMA transfers from STA2x11
- * @a: STA2x11 AMBA address
- * @pdev: PCI device (must be hosted within the connext)
- */
-static dma_addr_t a2p(dma_addr_t a, struct pci_dev *pdev)
-{
- struct sta2x11_mapping *map;
- dma_addr_t p;
-
- map = sta2x11_pdev_to_mapping(pdev);
- p = a - map->amba_base;
- return p;
-}
-
/* At setup time, we use our own ops if the device is a ConneXt one */
static void sta2x11_setup_pdev(struct pci_dev *pdev)
{
@@ -151,9 +105,6 @@ static void sta2x11_setup_pdev(struct pci_dev *pdev)
if (!instance) /* either a sta2x11 bridge or another ST device */
return;
- pci_set_consistent_dma_mask(pdev, STA2X11_AMBA_SIZE - 1);
- pci_set_dma_mask(pdev, STA2X11_AMBA_SIZE - 1);
- pdev->dev.archdata.is_sta2x11 = true;
/* We must enable all devices as master, for audio DMA to work */
pci_set_master(pdev);
@@ -161,61 +112,6 @@ static void sta2x11_setup_pdev(struct pci_dev *pdev)
DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_STMICRO, PCI_ANY_ID, sta2x11_setup_pdev);
/*
- * The following three functions are exported (used in swiotlb: FIXME)
- */
-/**
- * dma_capable - Check if device can manage DMA transfers (FIXME: kill it)
- * @dev: device for a PCI device
- * @addr: DMA address
- * @size: DMA size
- */
-bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
-{
- struct sta2x11_mapping *map;
-
- if (!dev->archdata.is_sta2x11) {
- if (!dev->dma_mask)
- return false;
- return addr + size - 1 <= *dev->dma_mask;
- }
-
- map = sta2x11_pdev_to_mapping(to_pci_dev(dev));
-
- if (!map || (addr < map->amba_base))
- return false;
- if (addr + size >= map->amba_base + STA2X11_AMBA_SIZE) {
- return false;
- }
-
- return true;
-}
-
-/**
- * __phys_to_dma - Return the DMA AMBA address used for this STA2x11 device
- * @dev: device for a PCI device
- * @paddr: Physical address
- */
-dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
-{
- if (!dev->archdata.is_sta2x11)
- return paddr;
- return p2a(paddr, to_pci_dev(dev));
-}
-
-/**
- * dma_to_phys - Return the physical address used for this STA2x11 DMA address
- * @dev: device for a PCI device
- * @daddr: STA2x11 AMBA DMA address
- */
-phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t daddr)
-{
- if (!dev->archdata.is_sta2x11)
- return daddr;
- return a2p(daddr, to_pci_dev(dev));
-}
-
-
-/*
* At boot we must set up the mappings for the pcie-to-amba bridge.
* It involves device access, and the same happens at suspend/resume time
*/
@@ -234,12 +130,22 @@ phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t daddr)
/* At probe time, enable mapping for each endpoint, using the pdev */
static void sta2x11_map_ep(struct pci_dev *pdev)
{
- struct sta2x11_mapping *map = sta2x11_pdev_to_mapping(pdev);
+ struct sta2x11_instance *instance = sta2x11_pdev_to_instance(pdev);
+ struct device *dev = &pdev->dev;
+ u32 amba_base, max_amba_addr;
int i;
- if (!map)
+ if (!instance)
return;
- pci_read_config_dword(pdev, AHB_BASE(0), &map->amba_base);
+
+ pci_read_config_dword(pdev, AHB_BASE(0), &amba_base);
+ max_amba_addr = amba_base + STA2X11_AMBA_SIZE - 1;
+
+ dev->dma_pfn_offset = PFN_DOWN(-amba_base);
+
+ dev->bus_dma_limit = max_amba_addr;
+ pci_set_consistent_dma_mask(pdev, max_amba_addr);
+ pci_set_dma_mask(pdev, max_amba_addr);
/* Configure AHB mapping */
pci_write_config_dword(pdev, AHB_PEXLBASE(0), 0);
@@ -253,13 +159,24 @@ static void sta2x11_map_ep(struct pci_dev *pdev)
dev_info(&pdev->dev,
"sta2x11: Map EP %i: AMBA address %#8x-%#8x\n",
- sta2x11_pdev_to_ep(pdev), map->amba_base,
- map->amba_base + STA2X11_AMBA_SIZE - 1);
+ sta2x11_pdev_to_ep(pdev), amba_base, max_amba_addr);
}
DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_STMICRO, PCI_ANY_ID, sta2x11_map_ep);
#ifdef CONFIG_PM /* Some register values must be saved and restored */
+static struct sta2x11_mapping *sta2x11_pdev_to_mapping(struct pci_dev *pdev)
+{
+ struct sta2x11_instance *instance;
+ int ep;
+
+ instance = sta2x11_pdev_to_instance(pdev);
+ if (!instance)
+ return NULL;
+ ep = sta2x11_pdev_to_ep(pdev);
+ return instance->map + ep;
+}
+
static void suspend_mapping(struct pci_dev *pdev)
{
struct sta2x11_mapping *map = sta2x11_pdev_to_mapping(pdev);
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index 425e025341db..38d44f36d5ed 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -128,6 +128,9 @@ void __init efi_find_mirror(void)
efi_memory_desc_t *md;
u64 mirror_size = 0, total_size = 0;
+ if (!efi_enabled(EFI_MEMMAP))
+ return;
+
for_each_efi_memory_desc(md) {
unsigned long long start = md->phys_addr;
unsigned long long size = md->num_pages << EFI_PAGE_SHIFT;
@@ -145,14 +148,18 @@ void __init efi_find_mirror(void)
/*
* Tell the kernel about the EFI memory map. This might include
- * more than the max 128 entries that can fit in the e820 legacy
- * (zeropage) memory map.
+ * more than the max 128 entries that can fit in the passed in e820
+ * legacy (zeropage) memory map, but the kernel's e820 table can hold
+ * E820_MAX_ENTRIES.
*/
static void __init do_add_efi_memmap(void)
{
efi_memory_desc_t *md;
+ if (!efi_enabled(EFI_MEMMAP))
+ return;
+
for_each_efi_memory_desc(md) {
unsigned long long start = md->phys_addr;
unsigned long long size = md->num_pages << EFI_PAGE_SHIFT;
@@ -164,7 +171,10 @@ static void __init do_add_efi_memmap(void)
case EFI_BOOT_SERVICES_CODE:
case EFI_BOOT_SERVICES_DATA:
case EFI_CONVENTIONAL_MEMORY:
- if (md->attribute & EFI_MEMORY_WB)
+ if (efi_soft_reserve_enabled()
+ && (md->attribute & EFI_MEMORY_SP))
+ e820_type = E820_TYPE_SOFT_RESERVED;
+ else if (md->attribute & EFI_MEMORY_WB)
e820_type = E820_TYPE_RAM;
else
e820_type = E820_TYPE_RESERVED;
@@ -190,11 +200,36 @@ static void __init do_add_efi_memmap(void)
e820_type = E820_TYPE_RESERVED;
break;
}
+
e820__range_add(start, size, e820_type);
}
e820__update_table(e820_table);
}
+/*
+ * Given add_efi_memmap defaults to 0 and there there is no alternative
+ * e820 mechanism for soft-reserved memory, import the full EFI memory
+ * map if soft reservations are present and enabled. Otherwise, the
+ * mechanism to disable the kernel's consideration of EFI_MEMORY_SP is
+ * the efi=nosoftreserve option.
+ */
+static bool do_efi_soft_reserve(void)
+{
+ efi_memory_desc_t *md;
+
+ if (!efi_enabled(EFI_MEMMAP))
+ return false;
+
+ if (!efi_soft_reserve_enabled())
+ return false;
+
+ for_each_efi_memory_desc(md)
+ if (md->type == EFI_CONVENTIONAL_MEMORY &&
+ (md->attribute & EFI_MEMORY_SP))
+ return true;
+ return false;
+}
+
int __init efi_memblock_x86_reserve_range(void)
{
struct efi_info *e = &boot_params.efi_info;
@@ -224,9 +259,11 @@ int __init efi_memblock_x86_reserve_range(void)
if (rv)
return rv;
- if (add_efi_memmap)
+ if (add_efi_memmap || do_efi_soft_reserve())
do_add_efi_memmap();
+ efi_fake_memmap_early();
+
WARN(efi.memmap.desc_version != 1,
"Unexpected EFI_MEMORY_DESCRIPTOR version %ld",
efi.memmap.desc_version);
@@ -779,6 +816,15 @@ static bool should_map_region(efi_memory_desc_t *md)
return false;
/*
+ * EFI specific purpose memory may be reserved by default
+ * depending on kernel config and boot options.
+ */
+ if (md->type == EFI_CONVENTIONAL_MEMORY &&
+ efi_soft_reserve_enabled() &&
+ (md->attribute & EFI_MEMORY_SP))
+ return false;
+
+ /*
* Map all of RAM so that we can access arguments in the 1:1
* mapping when making EFI runtime calls.
*/
diff --git a/arch/x86/platform/efi/quirks.c b/arch/x86/platform/efi/quirks.c
index 3b9fd679cea9..7675cf754d90 100644
--- a/arch/x86/platform/efi/quirks.c
+++ b/arch/x86/platform/efi/quirks.c
@@ -320,6 +320,9 @@ void __init efi_reserve_boot_services(void)
{
efi_memory_desc_t *md;
+ if (!efi_enabled(EFI_MEMMAP))
+ return;
+
for_each_efi_memory_desc(md) {
u64 start = md->phys_addr;
u64 size = md->num_pages << EFI_PAGE_SHIFT;
diff --git a/arch/x86/platform/olpc/olpc-xo1-pm.c b/arch/x86/platform/olpc/olpc-xo1-pm.c
index e1a32062a375..f067ac780ba7 100644
--- a/arch/x86/platform/olpc/olpc-xo1-pm.c
+++ b/arch/x86/platform/olpc/olpc-xo1-pm.c
@@ -12,7 +12,6 @@
#include <linux/platform_device.h>
#include <linux/export.h>
#include <linux/pm.h>
-#include <linux/mfd/core.h>
#include <linux/suspend.h>
#include <linux/olpc-ec.h>
@@ -120,16 +119,11 @@ static const struct platform_suspend_ops xo1_suspend_ops = {
static int xo1_pm_probe(struct platform_device *pdev)
{
struct resource *res;
- int err;
/* don't run on non-XOs */
if (!machine_is_olpc())
return -ENODEV;
- err = mfd_cell_enable(pdev);
- if (err)
- return err;
-
res = platform_get_resource(pdev, IORESOURCE_IO, 0);
if (!res) {
dev_err(&pdev->dev, "can't fetch device resource info\n");
@@ -152,8 +146,6 @@ static int xo1_pm_probe(struct platform_device *pdev)
static int xo1_pm_remove(struct platform_device *pdev)
{
- mfd_cell_disable(pdev);
-
if (strcmp(pdev->name, "cs5535-pms") == 0)
pms_base = 0;
else if (strcmp(pdev->name, "olpc-xo1-pm-acpi") == 0)
diff --git a/arch/x86/platform/olpc/olpc-xo1-sci.c b/arch/x86/platform/olpc/olpc-xo1-sci.c
index 99a28ce2244c..933dd4fe3a97 100644
--- a/arch/x86/platform/olpc/olpc-xo1-sci.c
+++ b/arch/x86/platform/olpc/olpc-xo1-sci.c
@@ -15,7 +15,6 @@
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/pm_wakeup.h>
-#include <linux/mfd/core.h>
#include <linux/power_supply.h>
#include <linux/suspend.h>
#include <linux/workqueue.h>
@@ -537,10 +536,6 @@ static int xo1_sci_probe(struct platform_device *pdev)
if (!machine_is_olpc())
return -ENODEV;
- r = mfd_cell_enable(pdev);
- if (r)
- return r;
-
res = platform_get_resource(pdev, IORESOURCE_IO, 0);
if (!res) {
dev_err(&pdev->dev, "can't fetch device resource info\n");
@@ -605,7 +600,6 @@ err_ebook:
static int xo1_sci_remove(struct platform_device *pdev)
{
- mfd_cell_disable(pdev);
free_irq(sci_irq, pdev);
cancel_work_sync(&sci_work);
free_ec_sci();
diff --git a/arch/x86/um/vdso/um_vdso.c b/arch/x86/um/vdso/um_vdso.c
index 891868756a51..2112b8d14668 100644
--- a/arch/x86/um/vdso/um_vdso.c
+++ b/arch/x86/um/vdso/um_vdso.c
@@ -13,7 +13,7 @@
#include <linux/getcpu.h>
#include <asm/unistd.h>
-int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
+int __vdso_clock_gettime(clockid_t clock, struct __kernel_old_timespec *ts)
{
long ret;
@@ -22,10 +22,10 @@ int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
return ret;
}
-int clock_gettime(clockid_t, struct timespec *)
+int clock_gettime(clockid_t, struct __kernel_old_timespec *)
__attribute__((weak, alias("__vdso_clock_gettime")));
-int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
+int __vdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz)
{
long ret;
@@ -34,10 +34,10 @@ int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
return ret;
}
-int gettimeofday(struct timeval *, struct timezone *)
+int gettimeofday(struct __kernel_old_timeval *, struct timezone *)
__attribute__((weak, alias("__vdso_gettimeofday")));
-time_t __vdso_time(time_t *t)
+__kernel_old_time_t __vdso_time(__kernel_old_time_t *t)
{
long secs;
@@ -47,7 +47,7 @@ time_t __vdso_time(time_t *t)
return secs;
}
-time_t time(time_t *t) __attribute__((weak, alias("__vdso_time")));
+__kernel_old_time_t time(__kernel_old_time_t *t) __attribute__((weak, alias("__vdso_time")));
long
__vdso_getcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *unused)
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
index a8e7beb6b7b5..4a3fa295d8fe 100644
--- a/arch/xtensa/Kconfig
+++ b/arch/xtensa/Kconfig
@@ -3,8 +3,10 @@ config XTENSA
def_bool y
select ARCH_32BIT_OFF_T
select ARCH_HAS_BINFMT_FLAT if !MMU
- select ARCH_HAS_SYNC_DMA_FOR_CPU
- select ARCH_HAS_SYNC_DMA_FOR_DEVICE
+ select ARCH_HAS_DMA_PREP_COHERENT if MMU
+ select ARCH_HAS_SYNC_DMA_FOR_CPU if MMU
+ select ARCH_HAS_SYNC_DMA_FOR_DEVICE if MMU
+ select ARCH_HAS_UNCACHED_SEGMENT if MMU
select ARCH_USE_QUEUED_RWLOCKS
select ARCH_USE_QUEUED_SPINLOCKS
select ARCH_WANT_FRAME_POINTERS
@@ -19,8 +21,8 @@ config XTENSA
select GENERIC_PCI_IOMAP
select GENERIC_SCHED_CLOCK
select GENERIC_STRNCPY_FROM_USER if KASAN
- select HAVE_ARCH_JUMP_LABEL
- select HAVE_ARCH_KASAN if MMU
+ select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL
+ select HAVE_ARCH_KASAN if MMU && !XIP_KERNEL
select HAVE_ARCH_TRACEHOOK
select HAVE_DEBUG_KMEMLEAK
select HAVE_DMA_CONTIGUOUS
@@ -213,151 +215,6 @@ config HOTPLUG_CPU
Say N if you want to disable CPU hotplug.
-config INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
- bool "Initialize Xtensa MMU inside the Linux kernel code"
- depends on !XTENSA_VARIANT_FSF && !XTENSA_VARIANT_DC232B
- default y if XTENSA_VARIANT_DC233C || XTENSA_VARIANT_CUSTOM
- help
- Earlier version initialized the MMU in the exception vector
- before jumping to _startup in head.S and had an advantage that
- it was possible to place a software breakpoint at 'reset' and
- then enter your normal kernel breakpoints once the MMU was mapped
- to the kernel mappings (0XC0000000).
-
- This unfortunately won't work for U-Boot and likely also wont
- work for using KEXEC to have a hot kernel ready for doing a
- KDUMP.
-
- So now the MMU is initialized in head.S but it's necessary to
- use hardware breakpoints (gdb 'hbreak' cmd) to break at _startup.
- xt-gdb can't place a Software Breakpoint in the 0XD region prior
- to mapping the MMU and after mapping even if the area of low memory
- was mapped gdb wouldn't remove the breakpoint on hitting it as the
- PC wouldn't match. Since Hardware Breakpoints are recommended for
- Linux configurations it seems reasonable to just assume they exist
- and leave this older mechanism for unfortunate souls that choose
- not to follow Tensilica's recommendation.
-
- Selecting this will cause U-Boot to set the KERNEL Load and Entry
- address at 0x00003000 instead of the mapped std of 0xD0003000.
-
- If in doubt, say Y.
-
-config MEMMAP_CACHEATTR
- hex "Cache attributes for the memory address space"
- depends on !MMU
- default 0x22222222
- help
- These cache attributes are set up for noMMU systems. Each hex digit
- specifies cache attributes for the corresponding 512MB memory
- region: bits 0..3 -- for addresses 0x00000000..0x1fffffff,
- bits 4..7 -- for addresses 0x20000000..0x3fffffff, and so on.
-
- Cache attribute values are specific for the MMU type.
- For region protection MMUs:
- 1: WT cached,
- 2: cache bypass,
- 4: WB cached,
- f: illegal.
- For ful MMU:
- bit 0: executable,
- bit 1: writable,
- bits 2..3:
- 0: cache bypass,
- 1: WB cache,
- 2: WT cache,
- 3: special (c and e are illegal, f is reserved).
- For MPU:
- 0: illegal,
- 1: WB cache,
- 2: WB, no-write-allocate cache,
- 3: WT cache,
- 4: cache bypass.
-
-config KSEG_PADDR
- hex "Physical address of the KSEG mapping"
- depends on INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX && MMU
- default 0x00000000
- help
- This is the physical address where KSEG is mapped. Please refer to
- the chosen KSEG layout help for the required address alignment.
- Unpacked kernel image (including vectors) must be located completely
- within KSEG.
- Physical memory below this address is not available to linux.
-
- If unsure, leave the default value here.
-
-config KERNEL_LOAD_ADDRESS
- hex "Kernel load address"
- default 0x60003000 if !MMU
- default 0x00003000 if MMU && INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
- default 0xd0003000 if MMU && !INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
- help
- This is the address where the kernel is loaded.
- It is virtual address for MMUv2 configurations and physical address
- for all other configurations.
-
- If unsure, leave the default value here.
-
-config VECTORS_OFFSET
- hex "Kernel vectors offset"
- default 0x00003000
- help
- This is the offset of the kernel image from the relocatable vectors
- base.
-
- If unsure, leave the default value here.
-
-choice
- prompt "KSEG layout"
- depends on MMU
- default XTENSA_KSEG_MMU_V2
-
-config XTENSA_KSEG_MMU_V2
- bool "MMUv2: 128MB cached + 128MB uncached"
- help
- MMUv2 compatible kernel memory map: TLB way 5 maps 128MB starting
- at KSEG_PADDR to 0xd0000000 with cache and to 0xd8000000
- without cache.
- KSEG_PADDR must be aligned to 128MB.
-
-config XTENSA_KSEG_256M
- bool "256MB cached + 256MB uncached"
- depends on INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
- help
- TLB way 6 maps 256MB starting at KSEG_PADDR to 0xb0000000
- with cache and to 0xc0000000 without cache.
- KSEG_PADDR must be aligned to 256MB.
-
-config XTENSA_KSEG_512M
- bool "512MB cached + 512MB uncached"
- depends on INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
- help
- TLB way 6 maps 512MB starting at KSEG_PADDR to 0xa0000000
- with cache and to 0xc0000000 without cache.
- KSEG_PADDR must be aligned to 256MB.
-
-endchoice
-
-config HIGHMEM
- bool "High Memory Support"
- depends on MMU
- help
- Linux can use the full amount of RAM in the system by
- default. However, the default MMUv2 setup only maps the
- lowermost 128 MB of memory linearly to the areas starting
- at 0xd0000000 (cached) and 0xd8000000 (uncached).
- When there are more than 128 MB memory in the system not
- all of it can be "permanently mapped" by the kernel.
- The physical memory that's not permanently mapped is called
- "high memory".
-
- If you are compiling a kernel which will never run on a
- machine with more than 128 MB total physical RAM, answer
- N here.
-
- If unsure, say Y.
-
config FAST_SYSCALL_XTENSA
bool "Enable fast atomic syscalls"
default n
@@ -444,6 +301,9 @@ config XTENSA_CALIBRATE_CCOUNT
config SERIAL_CONSOLE
def_bool n
+config PLATFORM_HAVE_XIP
+ def_bool n
+
menu "Platform options"
choice
@@ -470,6 +330,7 @@ config XTENSA_PLATFORM_XTFPGA
select PLATFORM_WANT_DEFAULT_MEM if !MMU
select SERIAL_CONSOLE
select XTENSA_CALIBRATE_CCOUNT
+ select PLATFORM_HAVE_XIP
help
XTFPGA is the name of Tensilica board family (LX60, LX110, LX200, ML605).
This hardware is capable of running a full Linux distribution.
@@ -561,34 +422,6 @@ config SIMDISK1_FILENAME
Another simulated disk in a host file for a buildroot-independent
storage.
-config FORCE_MAX_ZONEORDER
- int "Maximum zone order"
- default "11"
- help
- The kernel memory allocator divides physically contiguous memory
- blocks into "zones", where each zone is a power of two number of
- pages. This option selects the largest power of two that the kernel
- keeps in the memory allocator. If you need to allocate very large
- blocks of physically contiguous memory, then you may need to
- increase this value.
-
- This config option is actually maximum order plus one. For example,
- a value of 11 means that the largest free memory block is 2^10 pages.
-
-config PLATFORM_WANT_DEFAULT_MEM
- def_bool n
-
-config DEFAULT_MEM_START
- hex
- prompt "PAGE_OFFSET/PHYS_OFFSET" if !MMU && PLATFORM_WANT_DEFAULT_MEM
- default 0x60000000 if PLATFORM_WANT_DEFAULT_MEM
- default 0x00000000
- help
- This is the base address used for both PAGE_OFFSET and PHYS_OFFSET
- in noMMU configurations.
-
- If unsure, leave the default value here.
-
config XTFPGA_LCD
bool "Enable XTFPGA LCD driver"
depends on XTENSA_PLATFORM_XTFPGA
@@ -619,6 +452,221 @@ config XTFPGA_LCD_8BIT_ACCESS
only be used with 8-bit interface. Please consult prototyping user
guide for your board for the correct interface width.
+comment "Kernel memory layout"
+
+config INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
+ bool "Initialize Xtensa MMU inside the Linux kernel code"
+ depends on !XTENSA_VARIANT_FSF && !XTENSA_VARIANT_DC232B
+ default y if XTENSA_VARIANT_DC233C || XTENSA_VARIANT_CUSTOM
+ help
+ Earlier version initialized the MMU in the exception vector
+ before jumping to _startup in head.S and had an advantage that
+ it was possible to place a software breakpoint at 'reset' and
+ then enter your normal kernel breakpoints once the MMU was mapped
+ to the kernel mappings (0XC0000000).
+
+ This unfortunately won't work for U-Boot and likely also wont
+ work for using KEXEC to have a hot kernel ready for doing a
+ KDUMP.
+
+ So now the MMU is initialized in head.S but it's necessary to
+ use hardware breakpoints (gdb 'hbreak' cmd) to break at _startup.
+ xt-gdb can't place a Software Breakpoint in the 0XD region prior
+ to mapping the MMU and after mapping even if the area of low memory
+ was mapped gdb wouldn't remove the breakpoint on hitting it as the
+ PC wouldn't match. Since Hardware Breakpoints are recommended for
+ Linux configurations it seems reasonable to just assume they exist
+ and leave this older mechanism for unfortunate souls that choose
+ not to follow Tensilica's recommendation.
+
+ Selecting this will cause U-Boot to set the KERNEL Load and Entry
+ address at 0x00003000 instead of the mapped std of 0xD0003000.
+
+ If in doubt, say Y.
+
+config XIP_KERNEL
+ bool "Kernel Execute-In-Place from ROM"
+ depends on PLATFORM_HAVE_XIP
+ help
+ Execute-In-Place allows the kernel to run from non-volatile storage
+ directly addressable by the CPU, such as NOR flash. This saves RAM
+ space since the text section of the kernel is not loaded from flash
+ to RAM. Read-write sections, such as the data section and stack,
+ are still copied to RAM. The XIP kernel is not compressed since
+ it has to run directly from flash, so it will take more space to
+ store it. The flash address used to link the kernel object files,
+ and for storing it, is configuration dependent. Therefore, if you
+ say Y here, you must know the proper physical address where to
+ store the kernel image depending on your own flash memory usage.
+
+ Also note that the make target becomes "make xipImage" rather than
+ "make Image" or "make uImage". The final kernel binary to put in
+ ROM memory will be arch/xtensa/boot/xipImage.
+
+ If unsure, say N.
+
+config MEMMAP_CACHEATTR
+ hex "Cache attributes for the memory address space"
+ depends on !MMU
+ default 0x22222222
+ help
+ These cache attributes are set up for noMMU systems. Each hex digit
+ specifies cache attributes for the corresponding 512MB memory
+ region: bits 0..3 -- for addresses 0x00000000..0x1fffffff,
+ bits 4..7 -- for addresses 0x20000000..0x3fffffff, and so on.
+
+ Cache attribute values are specific for the MMU type.
+ For region protection MMUs:
+ 1: WT cached,
+ 2: cache bypass,
+ 4: WB cached,
+ f: illegal.
+ For ful MMU:
+ bit 0: executable,
+ bit 1: writable,
+ bits 2..3:
+ 0: cache bypass,
+ 1: WB cache,
+ 2: WT cache,
+ 3: special (c and e are illegal, f is reserved).
+ For MPU:
+ 0: illegal,
+ 1: WB cache,
+ 2: WB, no-write-allocate cache,
+ 3: WT cache,
+ 4: cache bypass.
+
+config KSEG_PADDR
+ hex "Physical address of the KSEG mapping"
+ depends on INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX && MMU
+ default 0x00000000
+ help
+ This is the physical address where KSEG is mapped. Please refer to
+ the chosen KSEG layout help for the required address alignment.
+ Unpacked kernel image (including vectors) must be located completely
+ within KSEG.
+ Physical memory below this address is not available to linux.
+
+ If unsure, leave the default value here.
+
+config KERNEL_VIRTUAL_ADDRESS
+ hex "Kernel virtual address"
+ depends on MMU && XIP_KERNEL
+ default 0xd0003000
+ help
+ This is the virtual address where the XIP kernel is mapped.
+ XIP kernel may be mapped into KSEG or KIO region, virtual address
+ provided here must match kernel load address provided in
+ KERNEL_LOAD_ADDRESS.
+
+config KERNEL_LOAD_ADDRESS
+ hex "Kernel load address"
+ default 0x60003000 if !MMU
+ default 0x00003000 if MMU && INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
+ default 0xd0003000 if MMU && !INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
+ help
+ This is the address where the kernel is loaded.
+ It is virtual address for MMUv2 configurations and physical address
+ for all other configurations.
+
+ If unsure, leave the default value here.
+
+config VECTORS_OFFSET
+ hex "Kernel vectors offset"
+ default 0x00003000
+ depends on !XIP_KERNEL
+ help
+ This is the offset of the kernel image from the relocatable vectors
+ base.
+
+ If unsure, leave the default value here.
+
+config XIP_DATA_ADDR
+ hex "XIP kernel data virtual address"
+ depends on XIP_KERNEL
+ default 0x00000000
+ help
+ This is the virtual address where XIP kernel data is copied.
+ It must be within KSEG if MMU is used.
+
+config PLATFORM_WANT_DEFAULT_MEM
+ def_bool n
+
+config DEFAULT_MEM_START
+ hex
+ prompt "PAGE_OFFSET/PHYS_OFFSET" if !MMU && PLATFORM_WANT_DEFAULT_MEM
+ default 0x60000000 if PLATFORM_WANT_DEFAULT_MEM
+ default 0x00000000
+ help
+ This is the base address used for both PAGE_OFFSET and PHYS_OFFSET
+ in noMMU configurations.
+
+ If unsure, leave the default value here.
+
+choice
+ prompt "KSEG layout"
+ depends on MMU
+ default XTENSA_KSEG_MMU_V2
+
+config XTENSA_KSEG_MMU_V2
+ bool "MMUv2: 128MB cached + 128MB uncached"
+ help
+ MMUv2 compatible kernel memory map: TLB way 5 maps 128MB starting
+ at KSEG_PADDR to 0xd0000000 with cache and to 0xd8000000
+ without cache.
+ KSEG_PADDR must be aligned to 128MB.
+
+config XTENSA_KSEG_256M
+ bool "256MB cached + 256MB uncached"
+ depends on INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
+ help
+ TLB way 6 maps 256MB starting at KSEG_PADDR to 0xb0000000
+ with cache and to 0xc0000000 without cache.
+ KSEG_PADDR must be aligned to 256MB.
+
+config XTENSA_KSEG_512M
+ bool "512MB cached + 512MB uncached"
+ depends on INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
+ help
+ TLB way 6 maps 512MB starting at KSEG_PADDR to 0xa0000000
+ with cache and to 0xc0000000 without cache.
+ KSEG_PADDR must be aligned to 256MB.
+
+endchoice
+
+config HIGHMEM
+ bool "High Memory Support"
+ depends on MMU
+ help
+ Linux can use the full amount of RAM in the system by
+ default. However, the default MMUv2 setup only maps the
+ lowermost 128 MB of memory linearly to the areas starting
+ at 0xd0000000 (cached) and 0xd8000000 (uncached).
+ When there are more than 128 MB memory in the system not
+ all of it can be "permanently mapped" by the kernel.
+ The physical memory that's not permanently mapped is called
+ "high memory".
+
+ If you are compiling a kernel which will never run on a
+ machine with more than 128 MB total physical RAM, answer
+ N here.
+
+ If unsure, say Y.
+
+config FORCE_MAX_ZONEORDER
+ int "Maximum zone order"
+ default "11"
+ help
+ The kernel memory allocator divides physically contiguous memory
+ blocks into "zones", where each zone is a power of two number of
+ pages. This option selects the largest power of two that the kernel
+ keeps in the memory allocator. If you need to allocate very large
+ blocks of physically contiguous memory, then you may need to
+ increase this value.
+
+ This config option is actually maximum order plus one. For example,
+ a value of 11 means that the largest free memory block is 2^10 pages.
+
endmenu
menu "Power management options"
diff --git a/arch/xtensa/Kconfig.debug b/arch/xtensa/Kconfig.debug
index 39de98e20018..83cc8d12fa0e 100644
--- a/arch/xtensa/Kconfig.debug
+++ b/arch/xtensa/Kconfig.debug
@@ -31,3 +31,10 @@ config S32C1I_SELFTEST
It is easy to make wrong hardware configuration, this test should catch it early.
Say 'N' on stable hardware.
+
+config PRINT_STACK_DEPTH
+ int "Stack depth to print" if DEBUG_KERNEL
+ default 64
+ help
+ This option allows you to set the stack depth that the kernel
+ prints in stack traces.
diff --git a/arch/xtensa/Makefile b/arch/xtensa/Makefile
index 1542018c9e57..67a7d151d1e7 100644
--- a/arch/xtensa/Makefile
+++ b/arch/xtensa/Makefile
@@ -87,7 +87,7 @@ drivers-$(CONFIG_OPROFILE) += arch/xtensa/oprofile/
boot := arch/xtensa/boot
-all Image zImage uImage: vmlinux
+all Image zImage uImage xipImage: vmlinux
$(Q)$(MAKE) $(build)=$(boot) $@
archheaders:
@@ -97,4 +97,5 @@ define archhelp
@echo '* Image - Kernel ELF image with reset vector'
@echo '* zImage - Compressed kernel image (arch/xtensa/boot/images/zImage.*)'
@echo '* uImage - U-Boot wrapped image'
+ @echo ' xipImage - XIP image'
endef
diff --git a/arch/xtensa/boot/Makefile b/arch/xtensa/boot/Makefile
index 294846117fc2..efb91bfda2b4 100644
--- a/arch/xtensa/boot/Makefile
+++ b/arch/xtensa/boot/Makefile
@@ -29,6 +29,7 @@ all: $(boot-y)
Image: boot-elf
zImage: boot-redboot
uImage: $(obj)/uImage
+xipImage: $(obj)/xipImage
boot-elf boot-redboot: $(addprefix $(obj)/,$(subdir-y))
$(Q)$(MAKE) $(build)=$(obj)/$@ $(MAKECMDGOALS)
@@ -50,3 +51,7 @@ UIMAGE_COMPRESSION = gzip
$(obj)/uImage: vmlinux.bin.gz FORCE
$(call if_changed,uimage)
$(Q)$(kecho) ' Kernel: $@ is ready'
+
+$(obj)/xipImage: vmlinux FORCE
+ $(call if_changed,objcopy)
+ $(Q)$(kecho) ' Kernel: $@ is ready'
diff --git a/arch/xtensa/configs/audio_kc705_defconfig b/arch/xtensa/configs/audio_kc705_defconfig
index f378e56f9ce6..b6367af71d65 100644
--- a/arch/xtensa/configs/audio_kc705_defconfig
+++ b/arch/xtensa/configs/audio_kc705_defconfig
@@ -16,7 +16,6 @@ CONFIG_SCHED_AUTOGROUP=y
CONFIG_RELAY=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_EXPERT=y
-CONFIG_SYSCTL_SYSCALL=y
CONFIG_KALLSYMS_ALL=y
CONFIG_PROFILING=y
CONFIG_OPROFILE=y
diff --git a/arch/xtensa/configs/cadence_csp_defconfig b/arch/xtensa/configs/cadence_csp_defconfig
index 62f32a902568..f4eef6decd2a 100644
--- a/arch/xtensa/configs/cadence_csp_defconfig
+++ b/arch/xtensa/configs/cadence_csp_defconfig
@@ -21,7 +21,6 @@ CONFIG_INITRAMFS_SOURCE="$$KERNEL_INITRAMFS_SOURCE"
# CONFIG_RD_LZO is not set
# CONFIG_RD_LZ4 is not set
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
-CONFIG_SYSCTL_SYSCALL=y
CONFIG_EMBEDDED=y
CONFIG_PROFILING=y
CONFIG_MODULES=y
diff --git a/arch/xtensa/configs/generic_kc705_defconfig b/arch/xtensa/configs/generic_kc705_defconfig
index 8bebe07f1060..c925165cf760 100644
--- a/arch/xtensa/configs/generic_kc705_defconfig
+++ b/arch/xtensa/configs/generic_kc705_defconfig
@@ -16,7 +16,6 @@ CONFIG_SCHED_AUTOGROUP=y
CONFIG_RELAY=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_EXPERT=y
-CONFIG_SYSCTL_SYSCALL=y
CONFIG_KALLSYMS_ALL=y
CONFIG_PROFILING=y
CONFIG_OPROFILE=y
diff --git a/arch/xtensa/configs/iss_defconfig b/arch/xtensa/configs/iss_defconfig
index 4bb5b76d9524..d1c01742baf4 100644
--- a/arch/xtensa/configs/iss_defconfig
+++ b/arch/xtensa/configs/iss_defconfig
@@ -1,7 +1,6 @@
CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_EXPERT=y
-CONFIG_SYSCTL_SYSCALL=y
# CONFIG_IOSCHED_DEADLINE is not set
# CONFIG_IOSCHED_CFQ is not set
# CONFIG_PCI is not set
diff --git a/arch/xtensa/configs/nommu_kc705_defconfig b/arch/xtensa/configs/nommu_kc705_defconfig
index 933ab2adf434..380e366730d5 100644
--- a/arch/xtensa/configs/nommu_kc705_defconfig
+++ b/arch/xtensa/configs/nommu_kc705_defconfig
@@ -21,7 +21,6 @@ CONFIG_BLK_DEV_INITRD=y
# CONFIG_RD_LZO is not set
# CONFIG_RD_LZ4 is not set
CONFIG_EXPERT=y
-CONFIG_SYSCTL_SYSCALL=y
CONFIG_KALLSYMS_ALL=y
CONFIG_PERF_EVENTS=y
CONFIG_MODULES=y
diff --git a/arch/xtensa/configs/smp_lx200_defconfig b/arch/xtensa/configs/smp_lx200_defconfig
index e29c5b179a5b..d46b58f34098 100644
--- a/arch/xtensa/configs/smp_lx200_defconfig
+++ b/arch/xtensa/configs/smp_lx200_defconfig
@@ -16,7 +16,6 @@ CONFIG_SCHED_AUTOGROUP=y
CONFIG_RELAY=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_EXPERT=y
-CONFIG_SYSCTL_SYSCALL=y
CONFIG_KALLSYMS_ALL=y
CONFIG_PROFILING=y
CONFIG_OPROFILE=y
diff --git a/arch/xtensa/configs/virt_defconfig b/arch/xtensa/configs/virt_defconfig
index bfc45a138e72..4fddd8512350 100644
--- a/arch/xtensa/configs/virt_defconfig
+++ b/arch/xtensa/configs/virt_defconfig
@@ -15,7 +15,6 @@ CONFIG_SCHED_AUTOGROUP=y
CONFIG_RELAY=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_EXPERT=y
-CONFIG_SYSCTL_SYSCALL=y
CONFIG_KALLSYMS_ALL=y
CONFIG_PERF_EVENTS=y
CONFIG_XTENSA_VARIANT_DC233C=y
diff --git a/arch/xtensa/configs/xip_kc705_defconfig b/arch/xtensa/configs/xip_kc705_defconfig
new file mode 100644
index 000000000000..f9e85c082afc
--- /dev/null
+++ b/arch/xtensa/configs/xip_kc705_defconfig
@@ -0,0 +1,119 @@
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_NO_HZ_IDLE=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_PREEMPT=y
+CONFIG_IRQ_TIME_ACCOUNTING=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_MEMCG=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_DEBUG=y
+CONFIG_NAMESPACES=y
+CONFIG_SCHED_AUTOGROUP=y
+CONFIG_RELAY=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_EXPERT=y
+CONFIG_KALLSYMS_ALL=y
+CONFIG_PROFILING=y
+CONFIG_XTENSA_VARIANT_DC233C=y
+CONFIG_XTENSA_UNALIGNED_USER=y
+CONFIG_XIP_KERNEL=y
+CONFIG_XIP_DATA_ADDR=0xd0000000
+CONFIG_KERNEL_VIRTUAL_ADDRESS=0xe6000000
+CONFIG_KERNEL_LOAD_ADDRESS=0xf6000000
+CONFIG_XTENSA_KSEG_512M=y
+CONFIG_HIGHMEM=y
+CONFIG_XTENSA_PLATFORM_XTFPGA=y
+CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE="earlycon=uart8250,mmio32native,0xfd050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug memmap=0x38000000@0"
+CONFIG_USE_OF=y
+CONFIG_BUILTIN_DTB_SOURCE="kc705"
+# CONFIG_PARSE_BOOTPARAM is not set
+CONFIG_OPROFILE=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+# CONFIG_COMPACTION is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+# CONFIG_IPV6 is not set
+CONFIG_NETFILTER=y
+# CONFIG_WIRELESS is not set
+CONFIG_UEVENT_HELPER=y
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+# CONFIG_STANDALONE is not set
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_NETDEVICES=y
+# CONFIG_NET_VENDOR_ARC is not set
+# CONFIG_NET_VENDOR_AURORA is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_SAMSUNG is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_VIA is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
+CONFIG_MARVELL_PHY=y
+# CONFIG_WLAN is not set
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO is not set
+CONFIG_DEVKMEM=y
+CONFIG_SERIAL_8250=y
+# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_OF_PLATFORM=y
+# CONFIG_HWMON is not set
+CONFIG_WATCHDOG=y
+CONFIG_WATCHDOG_NOWAYOUT=y
+CONFIG_SOFT_WATCHDOG=y
+# CONFIG_VGA_CONSOLE is not set
+# CONFIG_USB_SUPPORT is not set
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_EXT3_FS=y
+CONFIG_FANOTIFY=y
+CONFIG_VFAT_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V4=y
+CONFIG_NFS_SWAP=y
+CONFIG_ROOT_NFS=y
+CONFIG_SUNRPC_DEBUG=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_CRYPTO_ECHAINIV=y
+CONFIG_CRYPTO_DEFLATE=y
+CONFIG_CRYPTO_LZO=y
+CONFIG_CRYPTO_ANSI_CPRNG=y
+CONFIG_PRINTK_TIME=y
+CONFIG_DYNAMIC_DEBUG=y
+CONFIG_DEBUG_INFO=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DETECT_HUNG_TASK=y
+# CONFIG_SCHED_DEBUG is not set
+CONFIG_SCHEDSTATS=y
+CONFIG_DEBUG_RT_MUTEXES=y
+CONFIG_DEBUG_SPINLOCK=y
+CONFIG_DEBUG_MUTEXES=y
+CONFIG_DEBUG_ATOMIC_SLEEP=y
+CONFIG_STACKTRACE=y
+CONFIG_RCU_TRACE=y
+# CONFIG_FTRACE is not set
+# CONFIG_S32C1I_SELFTEST is not set
diff --git a/arch/xtensa/include/asm/Kbuild b/arch/xtensa/include/asm/Kbuild
index ffa0cf7f66c3..3acc31e55e02 100644
--- a/arch/xtensa/include/asm/Kbuild
+++ b/arch/xtensa/include/asm/Kbuild
@@ -11,6 +11,7 @@ generic-y += exec.h
generic-y += extable.h
generic-y += fb.h
generic-y += hardirq.h
+generic-y += hw_irq.h
generic-y += irq_regs.h
generic-y += irq_work.h
generic-y += kdebug.h
@@ -30,6 +31,7 @@ generic-y += qspinlock.h
generic-y += sections.h
generic-y += topology.h
generic-y += trace_clock.h
+generic-y += user.h
generic-y += vga.h
generic-y += word-at-a-time.h
generic-y += xor.h
diff --git a/arch/xtensa/include/asm/atomic.h b/arch/xtensa/include/asm/atomic.h
index 7b00d26f472e..3e7c6134ed32 100644
--- a/arch/xtensa/include/asm/atomic.h
+++ b/arch/xtensa/include/asm/atomic.h
@@ -64,13 +64,13 @@ static inline void atomic_##op(int i, atomic_t *v) \
int result; \
\
__asm__ __volatile__( \
- "1: l32ex %1, %3\n" \
- " " #op " %0, %1, %2\n" \
- " s32ex %0, %3\n" \
- " getex %0\n" \
- " beqz %0, 1b\n" \
- : "=&a" (result), "=&a" (tmp) \
- : "a" (i), "a" (v) \
+ "1: l32ex %[tmp], %[addr]\n" \
+ " " #op " %[result], %[tmp], %[i]\n" \
+ " s32ex %[result], %[addr]\n" \
+ " getex %[result]\n" \
+ " beqz %[result], 1b\n" \
+ : [result] "=&a" (result), [tmp] "=&a" (tmp) \
+ : [i] "a" (i), [addr] "a" (v) \
: "memory" \
); \
} \
@@ -82,14 +82,14 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
int result; \
\
__asm__ __volatile__( \
- "1: l32ex %1, %3\n" \
- " " #op " %0, %1, %2\n" \
- " s32ex %0, %3\n" \
- " getex %0\n" \
- " beqz %0, 1b\n" \
- " " #op " %0, %1, %2\n" \
- : "=&a" (result), "=&a" (tmp) \
- : "a" (i), "a" (v) \
+ "1: l32ex %[tmp], %[addr]\n" \
+ " " #op " %[result], %[tmp], %[i]\n" \
+ " s32ex %[result], %[addr]\n" \
+ " getex %[result]\n" \
+ " beqz %[result], 1b\n" \
+ " " #op " %[result], %[tmp], %[i]\n" \
+ : [result] "=&a" (result), [tmp] "=&a" (tmp) \
+ : [i] "a" (i), [addr] "a" (v) \
: "memory" \
); \
\
@@ -103,13 +103,13 @@ static inline int atomic_fetch_##op(int i, atomic_t *v) \
int result; \
\
__asm__ __volatile__( \
- "1: l32ex %1, %3\n" \
- " " #op " %0, %1, %2\n" \
- " s32ex %0, %3\n" \
- " getex %0\n" \
- " beqz %0, 1b\n" \
- : "=&a" (result), "=&a" (tmp) \
- : "a" (i), "a" (v) \
+ "1: l32ex %[tmp], %[addr]\n" \
+ " " #op " %[result], %[tmp], %[i]\n" \
+ " s32ex %[result], %[addr]\n" \
+ " getex %[result]\n" \
+ " beqz %[result], 1b\n" \
+ : [result] "=&a" (result), [tmp] "=&a" (tmp) \
+ : [i] "a" (i), [addr] "a" (v) \
: "memory" \
); \
\
@@ -124,13 +124,14 @@ static inline void atomic_##op(int i, atomic_t * v) \
int result; \
\
__asm__ __volatile__( \
- "1: l32i %1, %3, 0\n" \
- " wsr %1, scompare1\n" \
- " " #op " %0, %1, %2\n" \
- " s32c1i %0, %3, 0\n" \
- " bne %0, %1, 1b\n" \
- : "=&a" (result), "=&a" (tmp) \
- : "a" (i), "a" (v) \
+ "1: l32i %[tmp], %[mem]\n" \
+ " wsr %[tmp], scompare1\n" \
+ " " #op " %[result], %[tmp], %[i]\n" \
+ " s32c1i %[result], %[mem]\n" \
+ " bne %[result], %[tmp], 1b\n" \
+ : [result] "=&a" (result), [tmp] "=&a" (tmp), \
+ [mem] "+m" (*v) \
+ : [i] "a" (i) \
: "memory" \
); \
} \
@@ -142,14 +143,15 @@ static inline int atomic_##op##_return(int i, atomic_t * v) \
int result; \
\
__asm__ __volatile__( \
- "1: l32i %1, %3, 0\n" \
- " wsr %1, scompare1\n" \
- " " #op " %0, %1, %2\n" \
- " s32c1i %0, %3, 0\n" \
- " bne %0, %1, 1b\n" \
- " " #op " %0, %0, %2\n" \
- : "=&a" (result), "=&a" (tmp) \
- : "a" (i), "a" (v) \
+ "1: l32i %[tmp], %[mem]\n" \
+ " wsr %[tmp], scompare1\n" \
+ " " #op " %[result], %[tmp], %[i]\n" \
+ " s32c1i %[result], %[mem]\n" \
+ " bne %[result], %[tmp], 1b\n" \
+ " " #op " %[result], %[result], %[i]\n" \
+ : [result] "=&a" (result), [tmp] "=&a" (tmp), \
+ [mem] "+m" (*v) \
+ : [i] "a" (i) \
: "memory" \
); \
\
@@ -163,13 +165,14 @@ static inline int atomic_fetch_##op(int i, atomic_t * v) \
int result; \
\
__asm__ __volatile__( \
- "1: l32i %1, %3, 0\n" \
- " wsr %1, scompare1\n" \
- " " #op " %0, %1, %2\n" \
- " s32c1i %0, %3, 0\n" \
- " bne %0, %1, 1b\n" \
- : "=&a" (result), "=&a" (tmp) \
- : "a" (i), "a" (v) \
+ "1: l32i %[tmp], %[mem]\n" \
+ " wsr %[tmp], scompare1\n" \
+ " " #op " %[result], %[tmp], %[i]\n" \
+ " s32c1i %[result], %[mem]\n" \
+ " bne %[result], %[tmp], 1b\n" \
+ : [result] "=&a" (result), [tmp] "=&a" (tmp), \
+ [mem] "+m" (*v) \
+ : [i] "a" (i) \
: "memory" \
); \
\
@@ -184,14 +187,14 @@ static inline void atomic_##op(int i, atomic_t * v) \
unsigned int vval; \
\
__asm__ __volatile__( \
- " rsil a15, "__stringify(TOPLEVEL)"\n"\
- " l32i %0, %2, 0\n" \
- " " #op " %0, %0, %1\n" \
- " s32i %0, %2, 0\n" \
+ " rsil a15, "__stringify(TOPLEVEL)"\n" \
+ " l32i %[result], %[mem]\n" \
+ " " #op " %[result], %[result], %[i]\n" \
+ " s32i %[result], %[mem]\n" \
" wsr a15, ps\n" \
" rsync\n" \
- : "=&a" (vval) \
- : "a" (i), "a" (v) \
+ : [result] "=&a" (vval), [mem] "+m" (*v) \
+ : [i] "a" (i) \
: "a15", "memory" \
); \
} \
@@ -203,13 +206,13 @@ static inline int atomic_##op##_return(int i, atomic_t * v) \
\
__asm__ __volatile__( \
" rsil a15,"__stringify(TOPLEVEL)"\n" \
- " l32i %0, %2, 0\n" \
- " " #op " %0, %0, %1\n" \
- " s32i %0, %2, 0\n" \
+ " l32i %[result], %[mem]\n" \
+ " " #op " %[result], %[result], %[i]\n" \
+ " s32i %[result], %[mem]\n" \
" wsr a15, ps\n" \
" rsync\n" \
- : "=&a" (vval) \
- : "a" (i), "a" (v) \
+ : [result] "=&a" (vval), [mem] "+m" (*v) \
+ : [i] "a" (i) \
: "a15", "memory" \
); \
\
@@ -223,13 +226,14 @@ static inline int atomic_fetch_##op(int i, atomic_t * v) \
\
__asm__ __volatile__( \
" rsil a15,"__stringify(TOPLEVEL)"\n" \
- " l32i %0, %3, 0\n" \
- " " #op " %1, %0, %2\n" \
- " s32i %1, %3, 0\n" \
+ " l32i %[result], %[mem]\n" \
+ " " #op " %[tmp], %[result], %[i]\n" \
+ " s32i %[tmp], %[mem]\n" \
" wsr a15, ps\n" \
" rsync\n" \
- : "=&a" (vval), "=&a" (tmp) \
- : "a" (i), "a" (v) \
+ : [result] "=&a" (vval), [tmp] "=&a" (tmp), \
+ [mem] "+m" (*v) \
+ : [i] "a" (i) \
: "a15", "memory" \
); \
\
diff --git a/arch/xtensa/include/asm/bitops.h b/arch/xtensa/include/asm/bitops.h
index be8b2be5a98b..3f71d364ba90 100644
--- a/arch/xtensa/include/asm/bitops.h
+++ b/arch/xtensa/include/asm/bitops.h
@@ -98,247 +98,112 @@ static inline unsigned long __fls(unsigned long word)
#if XCHAL_HAVE_EXCLUSIVE
-static inline void set_bit(unsigned int bit, volatile unsigned long *p)
-{
- unsigned long tmp;
- unsigned long mask = 1UL << (bit & 31);
-
- p += bit >> 5;
-
- __asm__ __volatile__(
- "1: l32ex %0, %2\n"
- " or %0, %0, %1\n"
- " s32ex %0, %2\n"
- " getex %0\n"
- " beqz %0, 1b\n"
- : "=&a" (tmp)
- : "a" (mask), "a" (p)
- : "memory");
-}
-
-static inline void clear_bit(unsigned int bit, volatile unsigned long *p)
-{
- unsigned long tmp;
- unsigned long mask = 1UL << (bit & 31);
-
- p += bit >> 5;
-
- __asm__ __volatile__(
- "1: l32ex %0, %2\n"
- " and %0, %0, %1\n"
- " s32ex %0, %2\n"
- " getex %0\n"
- " beqz %0, 1b\n"
- : "=&a" (tmp)
- : "a" (~mask), "a" (p)
- : "memory");
-}
-
-static inline void change_bit(unsigned int bit, volatile unsigned long *p)
-{
- unsigned long tmp;
- unsigned long mask = 1UL << (bit & 31);
-
- p += bit >> 5;
-
- __asm__ __volatile__(
- "1: l32ex %0, %2\n"
- " xor %0, %0, %1\n"
- " s32ex %0, %2\n"
- " getex %0\n"
- " beqz %0, 1b\n"
- : "=&a" (tmp)
- : "a" (mask), "a" (p)
- : "memory");
-}
-
-static inline int
-test_and_set_bit(unsigned int bit, volatile unsigned long *p)
-{
- unsigned long tmp, value;
- unsigned long mask = 1UL << (bit & 31);
-
- p += bit >> 5;
-
- __asm__ __volatile__(
- "1: l32ex %1, %3\n"
- " or %0, %1, %2\n"
- " s32ex %0, %3\n"
- " getex %0\n"
- " beqz %0, 1b\n"
- : "=&a" (tmp), "=&a" (value)
- : "a" (mask), "a" (p)
- : "memory");
-
- return value & mask;
-}
-
-static inline int
-test_and_clear_bit(unsigned int bit, volatile unsigned long *p)
-{
- unsigned long tmp, value;
- unsigned long mask = 1UL << (bit & 31);
-
- p += bit >> 5;
-
- __asm__ __volatile__(
- "1: l32ex %1, %3\n"
- " and %0, %1, %2\n"
- " s32ex %0, %3\n"
- " getex %0\n"
- " beqz %0, 1b\n"
- : "=&a" (tmp), "=&a" (value)
- : "a" (~mask), "a" (p)
- : "memory");
-
- return value & mask;
-}
-
-static inline int
-test_and_change_bit(unsigned int bit, volatile unsigned long *p)
-{
- unsigned long tmp, value;
- unsigned long mask = 1UL << (bit & 31);
-
- p += bit >> 5;
-
- __asm__ __volatile__(
- "1: l32ex %1, %3\n"
- " xor %0, %1, %2\n"
- " s32ex %0, %3\n"
- " getex %0\n"
- " beqz %0, 1b\n"
- : "=&a" (tmp), "=&a" (value)
- : "a" (mask), "a" (p)
- : "memory");
-
- return value & mask;
+#define BIT_OP(op, insn, inv) \
+static inline void op##_bit(unsigned int bit, volatile unsigned long *p)\
+{ \
+ unsigned long tmp; \
+ unsigned long mask = 1UL << (bit & 31); \
+ \
+ p += bit >> 5; \
+ \
+ __asm__ __volatile__( \
+ "1: l32ex %[tmp], %[addr]\n" \
+ " "insn" %[tmp], %[tmp], %[mask]\n" \
+ " s32ex %[tmp], %[addr]\n" \
+ " getex %[tmp]\n" \
+ " beqz %[tmp], 1b\n" \
+ : [tmp] "=&a" (tmp) \
+ : [mask] "a" (inv mask), [addr] "a" (p) \
+ : "memory"); \
+}
+
+#define TEST_AND_BIT_OP(op, insn, inv) \
+static inline int \
+test_and_##op##_bit(unsigned int bit, volatile unsigned long *p) \
+{ \
+ unsigned long tmp, value; \
+ unsigned long mask = 1UL << (bit & 31); \
+ \
+ p += bit >> 5; \
+ \
+ __asm__ __volatile__( \
+ "1: l32ex %[value], %[addr]\n" \
+ " "insn" %[tmp], %[value], %[mask]\n" \
+ " s32ex %[tmp], %[addr]\n" \
+ " getex %[tmp]\n" \
+ " beqz %[tmp], 1b\n" \
+ : [tmp] "=&a" (tmp), [value] "=&a" (value) \
+ : [mask] "a" (inv mask), [addr] "a" (p) \
+ : "memory"); \
+ \
+ return value & mask; \
}
#elif XCHAL_HAVE_S32C1I
-static inline void set_bit(unsigned int bit, volatile unsigned long *p)
-{
- unsigned long tmp, value;
- unsigned long mask = 1UL << (bit & 31);
-
- p += bit >> 5;
-
- __asm__ __volatile__(
- "1: l32i %1, %3, 0\n"
- " wsr %1, scompare1\n"
- " or %0, %1, %2\n"
- " s32c1i %0, %3, 0\n"
- " bne %0, %1, 1b\n"
- : "=&a" (tmp), "=&a" (value)
- : "a" (mask), "a" (p)
- : "memory");
-}
-
-static inline void clear_bit(unsigned int bit, volatile unsigned long *p)
-{
- unsigned long tmp, value;
- unsigned long mask = 1UL << (bit & 31);
-
- p += bit >> 5;
-
- __asm__ __volatile__(
- "1: l32i %1, %3, 0\n"
- " wsr %1, scompare1\n"
- " and %0, %1, %2\n"
- " s32c1i %0, %3, 0\n"
- " bne %0, %1, 1b\n"
- : "=&a" (tmp), "=&a" (value)
- : "a" (~mask), "a" (p)
- : "memory");
+#define BIT_OP(op, insn, inv) \
+static inline void op##_bit(unsigned int bit, volatile unsigned long *p)\
+{ \
+ unsigned long tmp, value; \
+ unsigned long mask = 1UL << (bit & 31); \
+ \
+ p += bit >> 5; \
+ \
+ __asm__ __volatile__( \
+ "1: l32i %[value], %[mem]\n" \
+ " wsr %[value], scompare1\n" \
+ " "insn" %[tmp], %[value], %[mask]\n" \
+ " s32c1i %[tmp], %[mem]\n" \
+ " bne %[tmp], %[value], 1b\n" \
+ : [tmp] "=&a" (tmp), [value] "=&a" (value), \
+ [mem] "+m" (*p) \
+ : [mask] "a" (inv mask) \
+ : "memory"); \
+}
+
+#define TEST_AND_BIT_OP(op, insn, inv) \
+static inline int \
+test_and_##op##_bit(unsigned int bit, volatile unsigned long *p) \
+{ \
+ unsigned long tmp, value; \
+ unsigned long mask = 1UL << (bit & 31); \
+ \
+ p += bit >> 5; \
+ \
+ __asm__ __volatile__( \
+ "1: l32i %[value], %[mem]\n" \
+ " wsr %[value], scompare1\n" \
+ " "insn" %[tmp], %[value], %[mask]\n" \
+ " s32c1i %[tmp], %[mem]\n" \
+ " bne %[tmp], %[value], 1b\n" \
+ : [tmp] "=&a" (tmp), [value] "=&a" (value), \
+ [mem] "+m" (*p) \
+ : [mask] "a" (inv mask) \
+ : "memory"); \
+ \
+ return tmp & mask; \
}
-static inline void change_bit(unsigned int bit, volatile unsigned long *p)
-{
- unsigned long tmp, value;
- unsigned long mask = 1UL << (bit & 31);
-
- p += bit >> 5;
-
- __asm__ __volatile__(
- "1: l32i %1, %3, 0\n"
- " wsr %1, scompare1\n"
- " xor %0, %1, %2\n"
- " s32c1i %0, %3, 0\n"
- " bne %0, %1, 1b\n"
- : "=&a" (tmp), "=&a" (value)
- : "a" (mask), "a" (p)
- : "memory");
-}
+#else
-static inline int
-test_and_set_bit(unsigned int bit, volatile unsigned long *p)
-{
- unsigned long tmp, value;
- unsigned long mask = 1UL << (bit & 31);
-
- p += bit >> 5;
-
- __asm__ __volatile__(
- "1: l32i %1, %3, 0\n"
- " wsr %1, scompare1\n"
- " or %0, %1, %2\n"
- " s32c1i %0, %3, 0\n"
- " bne %0, %1, 1b\n"
- : "=&a" (tmp), "=&a" (value)
- : "a" (mask), "a" (p)
- : "memory");
-
- return tmp & mask;
-}
+#define BIT_OP(op, insn, inv)
+#define TEST_AND_BIT_OP(op, insn, inv)
-static inline int
-test_and_clear_bit(unsigned int bit, volatile unsigned long *p)
-{
- unsigned long tmp, value;
- unsigned long mask = 1UL << (bit & 31);
-
- p += bit >> 5;
-
- __asm__ __volatile__(
- "1: l32i %1, %3, 0\n"
- " wsr %1, scompare1\n"
- " and %0, %1, %2\n"
- " s32c1i %0, %3, 0\n"
- " bne %0, %1, 1b\n"
- : "=&a" (tmp), "=&a" (value)
- : "a" (~mask), "a" (p)
- : "memory");
-
- return tmp & mask;
-}
+#include <asm-generic/bitops/atomic.h>
-static inline int
-test_and_change_bit(unsigned int bit, volatile unsigned long *p)
-{
- unsigned long tmp, value;
- unsigned long mask = 1UL << (bit & 31);
-
- p += bit >> 5;
-
- __asm__ __volatile__(
- "1: l32i %1, %3, 0\n"
- " wsr %1, scompare1\n"
- " xor %0, %1, %2\n"
- " s32c1i %0, %3, 0\n"
- " bne %0, %1, 1b\n"
- : "=&a" (tmp), "=&a" (value)
- : "a" (mask), "a" (p)
- : "memory");
-
- return tmp & mask;
-}
+#endif /* XCHAL_HAVE_S32C1I */
-#else
+#define BIT_OPS(op, insn, inv) \
+ BIT_OP(op, insn, inv) \
+ TEST_AND_BIT_OP(op, insn, inv)
-#include <asm-generic/bitops/atomic.h>
+BIT_OPS(set, "or", )
+BIT_OPS(clear, "and", ~)
+BIT_OPS(change, "xor", )
-#endif /* XCHAL_HAVE_S32C1I */
+#undef BIT_OPS
+#undef BIT_OP
+#undef TEST_AND_BIT_OP
#include <asm-generic/bitops/find.h>
#include <asm-generic/bitops/le.h>
diff --git a/arch/xtensa/include/asm/cache.h b/arch/xtensa/include/asm/cache.h
index b21fd133ff62..54e147ac26bf 100644
--- a/arch/xtensa/include/asm/cache.h
+++ b/arch/xtensa/include/asm/cache.h
@@ -31,4 +31,10 @@
#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
+/*
+ * R/O after init is actually writable, it cannot go to .rodata
+ * according to vmlinux linker script.
+ */
+#define __ro_after_init __read_mostly
+
#endif /* _XTENSA_CACHE_H */
diff --git a/arch/xtensa/include/asm/cmpxchg.h b/arch/xtensa/include/asm/cmpxchg.h
index 7ccc5cbf441b..a175f8aec3fb 100644
--- a/arch/xtensa/include/asm/cmpxchg.h
+++ b/arch/xtensa/include/asm/cmpxchg.h
@@ -27,25 +27,25 @@ __cmpxchg_u32(volatile int *p, int old, int new)
unsigned long tmp, result;
__asm__ __volatile__(
- "1: l32ex %0, %3\n"
- " bne %0, %4, 2f\n"
- " mov %1, %2\n"
- " s32ex %1, %3\n"
- " getex %1\n"
- " beqz %1, 1b\n"
+ "1: l32ex %[result], %[addr]\n"
+ " bne %[result], %[cmp], 2f\n"
+ " mov %[tmp], %[new]\n"
+ " s32ex %[tmp], %[addr]\n"
+ " getex %[tmp]\n"
+ " beqz %[tmp], 1b\n"
"2:\n"
- : "=&a" (result), "=&a" (tmp)
- : "a" (new), "a" (p), "a" (old)
+ : [result] "=&a" (result), [tmp] "=&a" (tmp)
+ : [new] "a" (new), [addr] "a" (p), [cmp] "a" (old)
: "memory"
);
return result;
#elif XCHAL_HAVE_S32C1I
__asm__ __volatile__(
- " wsr %2, scompare1\n"
- " s32c1i %0, %1, 0\n"
- : "+a" (new)
- : "a" (p), "a" (old)
+ " wsr %[cmp], scompare1\n"
+ " s32c1i %[new], %[mem]\n"
+ : [new] "+a" (new), [mem] "+m" (*p)
+ : [cmp] "a" (old)
: "memory"
);
@@ -53,14 +53,14 @@ __cmpxchg_u32(volatile int *p, int old, int new)
#else
__asm__ __volatile__(
" rsil a15, "__stringify(TOPLEVEL)"\n"
- " l32i %0, %1, 0\n"
- " bne %0, %2, 1f\n"
- " s32i %3, %1, 0\n"
+ " l32i %[old], %[mem]\n"
+ " bne %[old], %[cmp], 1f\n"
+ " s32i %[new], %[mem]\n"
"1:\n"
" wsr a15, ps\n"
" rsync\n"
- : "=&a" (old)
- : "a" (p), "a" (old), "r" (new)
+ : [old] "=&a" (old), [mem] "+m" (*p)
+ : [cmp] "a" (old), [new] "r" (new)
: "a15", "memory");
return old;
#endif
@@ -129,13 +129,13 @@ static inline unsigned long xchg_u32(volatile int * m, unsigned long val)
unsigned long tmp, result;
__asm__ __volatile__(
- "1: l32ex %0, %3\n"
- " mov %1, %2\n"
- " s32ex %1, %3\n"
- " getex %1\n"
- " beqz %1, 1b\n"
- : "=&a" (result), "=&a" (tmp)
- : "a" (val), "a" (m)
+ "1: l32ex %[result], %[addr]\n"
+ " mov %[tmp], %[val]\n"
+ " s32ex %[tmp], %[addr]\n"
+ " getex %[tmp]\n"
+ " beqz %[tmp], 1b\n"
+ : [result] "=&a" (result), [tmp] "=&a" (tmp)
+ : [val] "a" (val), [addr] "a" (m)
: "memory"
);
@@ -143,13 +143,14 @@ static inline unsigned long xchg_u32(volatile int * m, unsigned long val)
#elif XCHAL_HAVE_S32C1I
unsigned long tmp, result;
__asm__ __volatile__(
- "1: l32i %1, %2, 0\n"
- " mov %0, %3\n"
- " wsr %1, scompare1\n"
- " s32c1i %0, %2, 0\n"
- " bne %0, %1, 1b\n"
- : "=&a" (result), "=&a" (tmp)
- : "a" (m), "a" (val)
+ "1: l32i %[tmp], %[mem]\n"
+ " mov %[result], %[val]\n"
+ " wsr %[tmp], scompare1\n"
+ " s32c1i %[result], %[mem]\n"
+ " bne %[result], %[tmp], 1b\n"
+ : [result] "=&a" (result), [tmp] "=&a" (tmp),
+ [mem] "+m" (*m)
+ : [val] "a" (val)
: "memory"
);
return result;
@@ -157,12 +158,12 @@ static inline unsigned long xchg_u32(volatile int * m, unsigned long val)
unsigned long tmp;
__asm__ __volatile__(
" rsil a15, "__stringify(TOPLEVEL)"\n"
- " l32i %0, %1, 0\n"
- " s32i %2, %1, 0\n"
+ " l32i %[tmp], %[mem]\n"
+ " s32i %[val], %[mem]\n"
" wsr a15, ps\n"
" rsync\n"
- : "=&a" (tmp)
- : "a" (m), "a" (val)
+ : [tmp] "=&a" (tmp), [mem] "+m" (*m)
+ : [val] "a" (val)
: "a15", "memory");
return tmp;
#endif
diff --git a/arch/xtensa/include/asm/fixmap.h b/arch/xtensa/include/asm/fixmap.h
index 7e25c1b50ac0..cfb8696917e9 100644
--- a/arch/xtensa/include/asm/fixmap.h
+++ b/arch/xtensa/include/asm/fixmap.h
@@ -78,8 +78,10 @@ static inline unsigned long virt_to_fix(const unsigned long vaddr)
#define kmap_get_fixmap_pte(vaddr) \
pte_offset_kernel( \
- pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), \
- (vaddr) \
- )
+ pmd_offset(pud_offset(p4d_offset(pgd_offset_k(vaddr), \
+ (vaddr)), \
+ (vaddr)), \
+ (vaddr)), \
+ (vaddr))
#endif
diff --git a/arch/xtensa/include/asm/futex.h b/arch/xtensa/include/asm/futex.h
index 0c4457ca0a85..964611083224 100644
--- a/arch/xtensa/include/asm/futex.h
+++ b/arch/xtensa/include/asm/futex.h
@@ -43,10 +43,10 @@
#elif XCHAL_HAVE_S32C1I
#define __futex_atomic_op(insn, ret, old, uaddr, arg) \
__asm__ __volatile( \
- "1: l32i %[oldval], %[addr], 0\n" \
+ "1: l32i %[oldval], %[mem]\n" \
insn "\n" \
" wsr %[oldval], scompare1\n" \
- "2: s32c1i %[newval], %[addr], 0\n" \
+ "2: s32c1i %[newval], %[mem]\n" \
" bne %[newval], %[oldval], 1b\n" \
" movi %[newval], 0\n" \
"3:\n" \
@@ -60,9 +60,9 @@
" .section __ex_table,\"a\"\n" \
" .long 1b, 5b, 2b, 5b\n" \
" .previous\n" \
- : [oldval] "=&r" (old), [newval] "=&r" (ret) \
- : [addr] "r" (uaddr), [oparg] "r" (arg), \
- [fault] "I" (-EFAULT) \
+ : [oldval] "=&r" (old), [newval] "=&r" (ret), \
+ [mem] "+m" (*(uaddr)) \
+ : [oparg] "r" (arg), [fault] "I" (-EFAULT) \
: "memory")
#endif
diff --git a/arch/xtensa/include/asm/hw_irq.h b/arch/xtensa/include/asm/hw_irq.h
deleted file mode 100644
index 3ddbea759b2b..000000000000
--- a/arch/xtensa/include/asm/hw_irq.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/*
- * include/asm-xtensa/hw_irq.h
- *
- * This file is subject to the terms and conditions of the GNU General
- * Public License. See the file "COPYING" in the main directory of
- * this archive for more details.
- *
- * Copyright (C) 2002 - 2005 Tensilica Inc.
- */
-
-#ifndef _XTENSA_HW_IRQ_H
-#define _XTENSA_HW_IRQ_H
-
-#endif
diff --git a/arch/xtensa/include/asm/initialize_mmu.h b/arch/xtensa/include/asm/initialize_mmu.h
index 3b054d2bede0..e3e1d9a1ef69 100644
--- a/arch/xtensa/include/asm/initialize_mmu.h
+++ b/arch/xtensa/include/asm/initialize_mmu.h
@@ -23,6 +23,7 @@
#ifndef _XTENSA_INITIALIZE_MMU_H
#define _XTENSA_INITIALIZE_MMU_H
+#include <linux/init.h>
#include <asm/pgtable.h>
#include <asm/vectors.h>
@@ -183,7 +184,7 @@
#endif
#if XCHAL_HAVE_MPU
- .data
+ __REFCONST
.align 4
.Lattribute_table:
.long 0x000000, 0x1fff00, 0x1ddf00, 0x1eef00
diff --git a/arch/xtensa/include/asm/io.h b/arch/xtensa/include/asm/io.h
index 988e08530a5c..54188e69b988 100644
--- a/arch/xtensa/include/asm/io.h
+++ b/arch/xtensa/include/asm/io.h
@@ -32,8 +32,7 @@ void xtensa_iounmap(volatile void __iomem *addr);
/*
* Return the virtual address for the specified bus memory.
*/
-static inline void __iomem *ioremap_nocache(unsigned long offset,
- unsigned long size)
+static inline void __iomem *ioremap(unsigned long offset, unsigned long size)
{
if (offset >= XCHAL_KIO_PADDR
&& offset - XCHAL_KIO_PADDR < XCHAL_KIO_SIZE)
@@ -52,15 +51,6 @@ static inline void __iomem *ioremap_cache(unsigned long offset,
return xtensa_ioremap_cache(offset, size);
}
#define ioremap_cache ioremap_cache
-#define ioremap_nocache ioremap_nocache
-
-#define ioremap_wc ioremap_nocache
-#define ioremap_wt ioremap_nocache
-
-static inline void __iomem *ioremap(unsigned long offset, unsigned long size)
-{
- return ioremap_nocache(offset, size);
-}
static inline void iounmap(volatile void __iomem *addr)
{
diff --git a/arch/xtensa/include/asm/kmem_layout.h b/arch/xtensa/include/asm/kmem_layout.h
index 9c12babc016c..7cbf68ca7106 100644
--- a/arch/xtensa/include/asm/kmem_layout.h
+++ b/arch/xtensa/include/asm/kmem_layout.h
@@ -11,6 +11,7 @@
#ifndef _XTENSA_KMEM_LAYOUT_H
#define _XTENSA_KMEM_LAYOUT_H
+#include <asm/core.h>
#include <asm/types.h>
#ifdef CONFIG_MMU
@@ -65,6 +66,34 @@
#endif
+/* KIO definition */
+
+#if XCHAL_HAVE_PTP_MMU
+#define XCHAL_KIO_CACHED_VADDR 0xe0000000
+#define XCHAL_KIO_BYPASS_VADDR 0xf0000000
+#define XCHAL_KIO_DEFAULT_PADDR 0xf0000000
+#else
+#define XCHAL_KIO_BYPASS_VADDR XCHAL_KIO_PADDR
+#define XCHAL_KIO_DEFAULT_PADDR 0x90000000
+#endif
+#define XCHAL_KIO_SIZE 0x10000000
+
+#if (!XCHAL_HAVE_PTP_MMU || XCHAL_HAVE_SPANNING_WAY) && defined(CONFIG_OF)
+#define XCHAL_KIO_PADDR xtensa_get_kio_paddr()
+#ifndef __ASSEMBLY__
+extern unsigned long xtensa_kio_paddr;
+
+static inline unsigned long xtensa_get_kio_paddr(void)
+{
+ return xtensa_kio_paddr;
+}
+#endif
+#else
+#define XCHAL_KIO_PADDR XCHAL_KIO_DEFAULT_PADDR
+#endif
+
+/* KERNEL_STACK definition */
+
#ifndef CONFIG_KASAN
#define KERNEL_STACK_SHIFT 13
#else
diff --git a/arch/xtensa/include/asm/page.h b/arch/xtensa/include/asm/page.h
index 09c56cba442e..f4771c29c7e9 100644
--- a/arch/xtensa/include/asm/page.h
+++ b/arch/xtensa/include/asm/page.h
@@ -169,7 +169,18 @@ static inline unsigned long ___pa(unsigned long va)
if (off >= XCHAL_KSEG_SIZE)
off -= XCHAL_KSEG_SIZE;
+#ifndef CONFIG_XIP_KERNEL
return off + PHYS_OFFSET;
+#else
+ if (off < XCHAL_KSEG_SIZE)
+ return off + PHYS_OFFSET;
+
+ off -= XCHAL_KSEG_SIZE;
+ if (off >= XCHAL_KIO_SIZE)
+ off -= XCHAL_KIO_SIZE;
+
+ return off + XCHAL_KIO_PADDR;
+#endif
}
#define __pa(x) ___pa((unsigned long)(x))
#else
diff --git a/arch/xtensa/include/asm/pgtable.h b/arch/xtensa/include/asm/pgtable.h
index 3f7fe5a8c286..27ac17c9da09 100644
--- a/arch/xtensa/include/asm/pgtable.h
+++ b/arch/xtensa/include/asm/pgtable.h
@@ -8,7 +8,6 @@
#ifndef _XTENSA_PGTABLE_H
#define _XTENSA_PGTABLE_H
-#define __ARCH_USE_5LEVEL_HACK
#include <asm/page.h>
#include <asm/kmem_layout.h>
#include <asm-generic/pgtable-nopmd.h>
@@ -371,9 +370,6 @@ ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
#define pgd_index(address) ((address) >> PGDIR_SHIFT)
-/* Find an entry in the second-level page table.. */
-#define pmd_offset(dir,address) ((pmd_t*)(dir))
-
/* Find an entry in the third-level page table.. */
#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
#define pte_offset_kernel(dir,addr) \
diff --git a/arch/xtensa/include/asm/platform.h b/arch/xtensa/include/asm/platform.h
index 913826dfa838..f2c48522c5a1 100644
--- a/arch/xtensa/include/asm/platform.h
+++ b/arch/xtensa/include/asm/platform.h
@@ -65,31 +65,4 @@ extern void platform_calibrate_ccount (void);
*/
void cpu_reset(void) __attribute__((noreturn));
-/*
- * Memory caching is platform-dependent in noMMU xtensa configurations.
- * The following set of functions should be implemented in platform code
- * in order to enable coherent DMA memory operations when CONFIG_MMU is not
- * enabled. Default implementations do nothing and issue a warning.
- */
-
-/*
- * Check whether p points to a cached memory.
- */
-bool platform_vaddr_cached(const void *p);
-
-/*
- * Check whether p points to an uncached memory.
- */
-bool platform_vaddr_uncached(const void *p);
-
-/*
- * Return pointer to an uncached view of the cached sddress p.
- */
-void *platform_vaddr_to_uncached(void *p);
-
-/*
- * Return pointer to a cached view of the uncached sddress p.
- */
-void *platform_vaddr_to_cached(void *p);
-
#endif /* _XTENSA_PLATFORM_H */
diff --git a/arch/xtensa/include/asm/processor.h b/arch/xtensa/include/asm/processor.h
index 7495520d7a3e..6fa903daf2a2 100644
--- a/arch/xtensa/include/asm/processor.h
+++ b/arch/xtensa/include/asm/processor.h
@@ -195,6 +195,7 @@ struct thread_struct {
/* Clearing a0 terminates the backtrace. */
#define start_thread(regs, new_pc, new_sp) \
do { \
+ unsigned long syscall = (regs)->syscall; \
memset((regs), 0, sizeof(*(regs))); \
(regs)->pc = (new_pc); \
(regs)->ps = USER_PS_VALUE; \
@@ -204,7 +205,7 @@ struct thread_struct {
(regs)->depc = 0; \
(regs)->windowbase = 0; \
(regs)->windowstart = 1; \
- (regs)->syscall = NO_SYSCALL; \
+ (regs)->syscall = syscall; \
} while (0)
/* Forward declaration */
diff --git a/arch/xtensa/include/asm/syscall.h b/arch/xtensa/include/asm/syscall.h
index 359ab40e935a..f9a671cbf933 100644
--- a/arch/xtensa/include/asm/syscall.h
+++ b/arch/xtensa/include/asm/syscall.h
@@ -51,7 +51,7 @@ static inline void syscall_set_return_value(struct task_struct *task,
struct pt_regs *regs,
int error, long val)
{
- regs->areg[0] = (long) error ? error : val;
+ regs->areg[2] = (long) error ? error : val;
}
#define SYSCALL_MAX_ARGS 6
@@ -79,7 +79,7 @@ static inline void syscall_set_arguments(struct task_struct *task,
regs->areg[reg[i]] = args[i];
}
-asmlinkage long xtensa_rt_sigreturn(struct pt_regs*);
+asmlinkage long xtensa_rt_sigreturn(void);
asmlinkage long xtensa_shmat(int, char __user *, int);
asmlinkage long xtensa_fadvise64_64(int, int,
unsigned long long, unsigned long long);
diff --git a/arch/xtensa/include/asm/uaccess.h b/arch/xtensa/include/asm/uaccess.h
index 3f80386f1883..47b7702aaa40 100644
--- a/arch/xtensa/include/asm/uaccess.h
+++ b/arch/xtensa/include/asm/uaccess.h
@@ -132,13 +132,13 @@ do { \
#define __check_align_1 ""
#define __check_align_2 \
- " _bbci.l %[addr], 0, 1f \n" \
+ " _bbci.l %[mem] * 0, 1f \n" \
" movi %[err], %[efault] \n" \
" _j 2f \n"
#define __check_align_4 \
- " _bbsi.l %[addr], 0, 0f \n" \
- " _bbci.l %[addr], 1, 1f \n" \
+ " _bbsi.l %[mem] * 0, 0f \n" \
+ " _bbci.l %[mem] * 0 + 1, 1f \n" \
"0: movi %[err], %[efault] \n" \
" _j 2f \n"
@@ -154,7 +154,7 @@ do { \
#define __put_user_asm(x_, addr_, err_, align, insn, cb)\
__asm__ __volatile__( \
__check_align_##align \
- "1: "insn" %[x], %[addr], 0 \n" \
+ "1: "insn" %[x], %[mem] \n" \
"2: \n" \
" .section .fixup,\"ax\" \n" \
" .align 4 \n" \
@@ -167,8 +167,8 @@ __asm__ __volatile__( \
" .section __ex_table,\"a\" \n" \
" .long 1b, 5b \n" \
" .previous" \
- :[err] "+r"(err_), [tmp] "=r"(cb) \
- :[x] "r"(x_), [addr] "r"(addr_), [efault] "i"(-EFAULT))
+ :[err] "+r"(err_), [tmp] "=r"(cb), [mem] "=m"(*(addr_)) \
+ :[x] "r"(x_), [efault] "i"(-EFAULT))
#define __get_user_nocheck(x, ptr, size) \
({ \
@@ -222,7 +222,7 @@ do { \
u32 __x = 0; \
__asm__ __volatile__( \
__check_align_##align \
- "1: "insn" %[x], %[addr], 0 \n" \
+ "1: "insn" %[x], %[mem] \n" \
"2: \n" \
" .section .fixup,\"ax\" \n" \
" .align 4 \n" \
@@ -236,7 +236,7 @@ do { \
" .long 1b, 5b \n" \
" .previous" \
:[err] "+r"(err_), [tmp] "=r"(cb), [x] "+r"(__x) \
- :[addr] "r"(addr_), [efault] "i"(-EFAULT)); \
+ :[mem] "m"(*(addr_)), [efault] "i"(-EFAULT)); \
(x_) = (__force __typeof__(*(addr_)))__x; \
} while (0)
diff --git a/arch/xtensa/include/asm/user.h b/arch/xtensa/include/asm/user.h
deleted file mode 100644
index 2c3ed23354a8..000000000000
--- a/arch/xtensa/include/asm/user.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * include/asm-xtensa/user.h
- *
- * Xtensa Processor version.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2001 - 2005 Tensilica Inc.
- */
-
-#ifndef _XTENSA_USER_H
-#define _XTENSA_USER_H
-
-/* This file usually defines a 'struct user' structure. However, it it only
- * used for a.out file, which are not supported on Xtensa.
- */
-
-#endif /* _XTENSA_USER_H */
diff --git a/arch/xtensa/include/asm/vectors.h b/arch/xtensa/include/asm/vectors.h
index 79fe3007919e..fd99b25037a7 100644
--- a/arch/xtensa/include/asm/vectors.h
+++ b/arch/xtensa/include/asm/vectors.h
@@ -21,50 +21,18 @@
#include <asm/core.h>
#include <asm/kmem_layout.h>
-#if XCHAL_HAVE_PTP_MMU
-#define XCHAL_KIO_CACHED_VADDR 0xe0000000
-#define XCHAL_KIO_BYPASS_VADDR 0xf0000000
-#define XCHAL_KIO_DEFAULT_PADDR 0xf0000000
+#if defined(CONFIG_MMU) && XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY
+#ifdef CONFIG_KERNEL_VIRTUAL_ADDRESS
+#define KERNELOFFSET CONFIG_KERNEL_VIRTUAL_ADDRESS
#else
-#define XCHAL_KIO_BYPASS_VADDR XCHAL_KIO_PADDR
-#define XCHAL_KIO_DEFAULT_PADDR 0x90000000
-#endif
-#define XCHAL_KIO_SIZE 0x10000000
-
-#if (!XCHAL_HAVE_PTP_MMU || XCHAL_HAVE_SPANNING_WAY) && defined(CONFIG_OF)
-#define XCHAL_KIO_PADDR xtensa_get_kio_paddr()
-#ifndef __ASSEMBLY__
-extern unsigned long xtensa_kio_paddr;
-
-static inline unsigned long xtensa_get_kio_paddr(void)
-{
- return xtensa_kio_paddr;
-}
-#endif
-#else
-#define XCHAL_KIO_PADDR XCHAL_KIO_DEFAULT_PADDR
-#endif
-
-#if defined(CONFIG_MMU)
-
-#if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY
-/* Image Virtual Start Address */
-#define KERNELOFFSET (XCHAL_KSEG_CACHED_VADDR + \
- CONFIG_KERNEL_LOAD_ADDRESS - \
+#define KERNELOFFSET (CONFIG_KERNEL_LOAD_ADDRESS + \
+ XCHAL_KSEG_CACHED_VADDR - \
XCHAL_KSEG_PADDR)
+#endif
#else
#define KERNELOFFSET CONFIG_KERNEL_LOAD_ADDRESS
#endif
-#else /* !defined(CONFIG_MMU) */
- /* MMU Not being used - Virtual == Physical */
-
-/* Location of the start of the kernel text, _start */
-#define KERNELOFFSET CONFIG_KERNEL_LOAD_ADDRESS
-
-
-#endif /* CONFIG_MMU */
-
#define RESET_VECTOR1_VADDR (XCHAL_RESET_VECTOR1_VADDR)
#ifdef CONFIG_VECTORS_OFFSET
#define VECBASE_VADDR (KERNELOFFSET - CONFIG_VECTORS_OFFSET)
diff --git a/arch/xtensa/kernel/Makefile b/arch/xtensa/kernel/Makefile
index 6f629027ac7d..d4082c6a121b 100644
--- a/arch/xtensa/kernel/Makefile
+++ b/arch/xtensa/kernel/Makefile
@@ -5,10 +5,11 @@
extra-y := head.o vmlinux.lds
-obj-y := align.o coprocessor.o entry.o irq.o pci-dma.o platform.o process.o \
+obj-y := align.o coprocessor.o entry.o irq.o platform.o process.o \
ptrace.o setup.o signal.o stacktrace.o syscall.o time.o traps.o \
vectors.o
+obj-$(CONFIG_MMU) += pci-dma.o
obj-$(CONFIG_PCI) += pci.o
obj-$(CONFIG_MODULES) += xtensa_ksyms.o module.o
obj-$(CONFIG_FUNCTION_TRACER) += mcount.o
diff --git a/arch/xtensa/kernel/coprocessor.S b/arch/xtensa/kernel/coprocessor.S
index 80828b95a51f..bb8e499b9900 100644
--- a/arch/xtensa/kernel/coprocessor.S
+++ b/arch/xtensa/kernel/coprocessor.S
@@ -15,17 +15,9 @@
#include <linux/linkage.h>
#include <asm/asm-offsets.h>
#include <asm/asmmacro.h>
-#include <asm/processor.h>
#include <asm/coprocessor.h>
-#include <asm/thread_info.h>
-#include <asm/asm-uaccess.h>
-#include <asm/unistd.h>
-#include <asm/ptrace.h>
#include <asm/current.h>
-#include <asm/pgtable.h>
-#include <asm/page.h>
-#include <asm/signal.h>
-#include <asm/tlbflush.h>
+#include <asm/regs.h>
#if XTENSA_HAVE_COPROCESSORS
diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S
index 9e3676879168..be897803834a 100644
--- a/arch/xtensa/kernel/entry.S
+++ b/arch/xtensa/kernel/entry.S
@@ -529,7 +529,7 @@ common_exception_return:
l32i a4, a2, TI_PRE_COUNT
bnez a4, 4f
call4 preempt_schedule_irq
- j 1b
+ j 4f
#endif
#if XTENSA_FAKE_NMI
@@ -1876,8 +1876,7 @@ ENDPROC(fast_store_prohibited)
ENTRY(system_call)
- /* reserve 4 bytes on stack for function parameter */
- abi_entry(4)
+ abi_entry_default
/* regs->syscall = regs->areg[2] */
@@ -1892,11 +1891,10 @@ ENTRY(system_call)
mov a6, a2
call4 do_syscall_trace_enter
+ beqz a6, .Lsyscall_exit
l32i a7, a2, PT_SYSCALL
1:
- s32i a7, a1, 4
-
/* syscall = sys_call_table[syscall_nr] */
movi a4, sys_call_table
@@ -1906,8 +1904,6 @@ ENTRY(system_call)
addx4 a4, a7, a4
l32i a4, a4, 0
- movi a5, sys_ni_syscall;
- beq a4, a5, 1f
/* Load args: arg0 - arg5 are passed via regs. */
@@ -1918,25 +1914,19 @@ ENTRY(system_call)
l32i a10, a2, PT_AREG8
l32i a11, a2, PT_AREG9
- /* Pass one additional argument to the syscall: pt_regs (on stack) */
- s32i a2, a1, 0
-
callx4 a4
1: /* regs->areg[2] = return_value */
s32i a6, a2, PT_AREG2
bnez a3, 1f
- abi_ret(4)
+.Lsyscall_exit:
+ abi_ret_default
1:
- l32i a4, a1, 4
- l32i a3, a2, PT_SYSCALL
- s32i a4, a2, PT_SYSCALL
mov a6, a2
call4 do_syscall_trace_leave
- s32i a3, a2, PT_SYSCALL
- abi_ret(4)
+ abi_ret_default
ENDPROC(system_call)
diff --git a/arch/xtensa/kernel/head.S b/arch/xtensa/kernel/head.S
index 4ae998b5a348..e0c1fac0910f 100644
--- a/arch/xtensa/kernel/head.S
+++ b/arch/xtensa/kernel/head.S
@@ -260,6 +260,13 @@ ENTRY(_startup)
___invalidate_icache_all a2 a3
isync
+#ifdef CONFIG_XIP_KERNEL
+ /* Setup bootstrap CPU stack in XIP kernel */
+
+ movi a1, start_info
+ l32i a1, a1, 0
+#endif
+
movi a6, 0
xsr a6, excsave1
@@ -355,10 +362,10 @@ ENDPROC(cpu_restart)
* DATA section
*/
- .section ".data.init.refok"
- .align 4
+ __REFDATA
+ .align 4
ENTRY(start_info)
- .long init_thread_union + KERNEL_STACK_SIZE
+ .long init_thread_union + KERNEL_STACK_SIZE
/*
* BSS section
diff --git a/arch/xtensa/kernel/pci-dma.c b/arch/xtensa/kernel/pci-dma.c
index 154979d62b73..72b6222daa0b 100644
--- a/arch/xtensa/kernel/pci-dma.c
+++ b/arch/xtensa/kernel/pci-dma.c
@@ -44,8 +44,8 @@ static void do_cache_op(phys_addr_t paddr, size_t size,
}
}
-void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
- size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir)
{
switch (dir) {
case DMA_BIDIRECTIONAL:
@@ -62,8 +62,8 @@ void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
}
}
-void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
- size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir)
{
switch (dir) {
case DMA_BIDIRECTIONAL:
@@ -81,122 +81,25 @@ void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
}
}
-#ifdef CONFIG_MMU
-bool platform_vaddr_cached(const void *p)
-{
- unsigned long addr = (unsigned long)p;
-
- return addr >= XCHAL_KSEG_CACHED_VADDR &&
- addr - XCHAL_KSEG_CACHED_VADDR < XCHAL_KSEG_SIZE;
-}
-
-bool platform_vaddr_uncached(const void *p)
-{
- unsigned long addr = (unsigned long)p;
-
- return addr >= XCHAL_KSEG_BYPASS_VADDR &&
- addr - XCHAL_KSEG_BYPASS_VADDR < XCHAL_KSEG_SIZE;
-}
-
-void *platform_vaddr_to_uncached(void *p)
-{
- return p + XCHAL_KSEG_BYPASS_VADDR - XCHAL_KSEG_CACHED_VADDR;
-}
-
-void *platform_vaddr_to_cached(void *p)
-{
- return p + XCHAL_KSEG_CACHED_VADDR - XCHAL_KSEG_BYPASS_VADDR;
-}
-#else
-bool __attribute__((weak)) platform_vaddr_cached(const void *p)
-{
- WARN_ONCE(1, "Default %s implementation is used\n", __func__);
- return true;
-}
-
-bool __attribute__((weak)) platform_vaddr_uncached(const void *p)
-{
- WARN_ONCE(1, "Default %s implementation is used\n", __func__);
- return false;
-}
-
-void __attribute__((weak)) *platform_vaddr_to_uncached(void *p)
+void arch_dma_prep_coherent(struct page *page, size_t size)
{
- WARN_ONCE(1, "Default %s implementation is used\n", __func__);
- return p;
-}
-
-void __attribute__((weak)) *platform_vaddr_to_cached(void *p)
-{
- WARN_ONCE(1, "Default %s implementation is used\n", __func__);
- return p;
+ __invalidate_dcache_range((unsigned long)page_address(page), size);
}
-#endif
/*
- * Note: We assume that the full memory space is always mapped to 'kseg'
- * Otherwise we have to use page attributes (not implemented).
+ * Memory caching is platform-dependent in noMMU xtensa configurations.
+ * The following two functions should be implemented in platform code
+ * in order to enable coherent DMA memory operations when CONFIG_MMU is not
+ * enabled.
*/
-
-void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
- gfp_t flag, unsigned long attrs)
-{
- unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
- struct page *page = NULL;
-
- /* ignore region speicifiers */
-
- flag &= ~(__GFP_DMA | __GFP_HIGHMEM);
-
- if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
- flag |= GFP_DMA;
-
- if (gfpflags_allow_blocking(flag))
- page = dma_alloc_from_contiguous(dev, count, get_order(size),
- flag & __GFP_NOWARN);
-
- if (!page)
- page = alloc_pages(flag | __GFP_ZERO, get_order(size));
-
- if (!page)
- return NULL;
-
- *handle = phys_to_dma(dev, page_to_phys(page));
-
#ifdef CONFIG_MMU
- if (PageHighMem(page)) {
- void *p;
-
- p = dma_common_contiguous_remap(page, size,
- pgprot_noncached(PAGE_KERNEL),
- __builtin_return_address(0));
- if (!p) {
- if (!dma_release_from_contiguous(dev, page, count))
- __free_pages(page, get_order(size));
- }
- return p;
- }
-#endif
- BUG_ON(!platform_vaddr_cached(page_address(page)));
- __invalidate_dcache_range((unsigned long)page_address(page), size);
- return platform_vaddr_to_uncached(page_address(page));
+void *uncached_kernel_address(void *p)
+{
+ return p + XCHAL_KSEG_BYPASS_VADDR - XCHAL_KSEG_CACHED_VADDR;
}
-void arch_dma_free(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_handle, unsigned long attrs)
+void *cached_kernel_address(void *p)
{
- unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
- struct page *page;
-
- if (platform_vaddr_uncached(vaddr)) {
- page = virt_to_page(platform_vaddr_to_cached(vaddr));
- } else {
-#ifdef CONFIG_MMU
- dma_common_free_remap(vaddr, size);
-#endif
- page = pfn_to_page(PHYS_PFN(dma_to_phys(dev, dma_handle)));
- }
-
- if (!dma_release_from_contiguous(dev, page, count))
- __free_pages(page, get_order(size));
+ return p + XCHAL_KSEG_CACHED_VADDR - XCHAL_KSEG_BYPASS_VADDR;
}
+#endif /* CONFIG_MMU */
diff --git a/arch/xtensa/kernel/process.c b/arch/xtensa/kernel/process.c
index db278a9e80c7..9e1c49134c07 100644
--- a/arch/xtensa/kernel/process.c
+++ b/arch/xtensa/kernel/process.c
@@ -264,6 +264,8 @@ int copy_thread(unsigned long clone_flags, unsigned long usp_thread_fn,
&regs->areg[XCHAL_NUM_AREGS - len/4], len);
}
+ childregs->syscall = regs->syscall;
+
/* The thread pointer is passed in the '4th argument' (= a5) */
if (clone_flags & CLONE_SETTLS)
childregs->threadptr = childregs->areg[5];
diff --git a/arch/xtensa/kernel/ptrace.c b/arch/xtensa/kernel/ptrace.c
index b964f0b2d886..145742d70a9f 100644
--- a/arch/xtensa/kernel/ptrace.c
+++ b/arch/xtensa/kernel/ptrace.c
@@ -542,14 +542,28 @@ long arch_ptrace(struct task_struct *child, long request,
return ret;
}
-void do_syscall_trace_enter(struct pt_regs *regs)
+void do_syscall_trace_leave(struct pt_regs *regs);
+int do_syscall_trace_enter(struct pt_regs *regs)
{
+ if (regs->syscall == NO_SYSCALL)
+ regs->areg[2] = -ENOSYS;
+
if (test_thread_flag(TIF_SYSCALL_TRACE) &&
- tracehook_report_syscall_entry(regs))
+ tracehook_report_syscall_entry(regs)) {
+ regs->areg[2] = -ENOSYS;
regs->syscall = NO_SYSCALL;
+ return 0;
+ }
+
+ if (regs->syscall == NO_SYSCALL) {
+ do_syscall_trace_leave(regs);
+ return 0;
+ }
if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
trace_sys_enter(regs, syscall_get_nr(current, regs));
+
+ return 1;
}
void do_syscall_trace_leave(struct pt_regs *regs)
diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c
index e0e1e1892b86..0f93b67c7a5a 100644
--- a/arch/xtensa/kernel/setup.c
+++ b/arch/xtensa/kernel/setup.c
@@ -308,6 +308,10 @@ extern char _Level6InterruptVector_text_end;
extern char _SecondaryResetVector_text_start;
extern char _SecondaryResetVector_text_end;
#endif
+#ifdef CONFIG_XIP_KERNEL
+extern char _xip_start[];
+extern char _xip_end[];
+#endif
static inline int __init_memblock mem_reserve(unsigned long start,
unsigned long end)
@@ -339,6 +343,9 @@ void __init setup_arch(char **cmdline_p)
#endif
mem_reserve(__pa(_stext), __pa(_end));
+#ifdef CONFIG_XIP_KERNEL
+ mem_reserve(__pa(_xip_start), __pa(_xip_end));
+#endif
#ifdef CONFIG_VECTORS_OFFSET
mem_reserve(__pa(&_WindowVectors_text_start),
diff --git a/arch/xtensa/kernel/signal.c b/arch/xtensa/kernel/signal.c
index dae83cddd6ca..76cee341507b 100644
--- a/arch/xtensa/kernel/signal.c
+++ b/arch/xtensa/kernel/signal.c
@@ -236,9 +236,9 @@ restore_sigcontext(struct pt_regs *regs, struct rt_sigframe __user *frame)
* Do a signal return; undo the signal stack.
*/
-asmlinkage long xtensa_rt_sigreturn(long a0, long a1, long a2, long a3,
- long a4, long a5, struct pt_regs *regs)
+asmlinkage long xtensa_rt_sigreturn(void)
{
+ struct pt_regs *regs = current_pt_regs();
struct rt_sigframe __user *frame;
sigset_t set;
int ret;
diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c
index 4a6c495ce9b6..87bd68dd7687 100644
--- a/arch/xtensa/kernel/traps.c
+++ b/arch/xtensa/kernel/traps.c
@@ -491,32 +491,27 @@ void show_trace(struct task_struct *task, unsigned long *sp)
pr_info("Call Trace:\n");
walk_stackframe(sp, show_trace_cb, NULL);
-#ifndef CONFIG_KALLSYMS
- pr_cont("\n");
-#endif
}
-static int kstack_depth_to_print = 24;
+#define STACK_DUMP_ENTRY_SIZE 4
+#define STACK_DUMP_LINE_SIZE 32
+static size_t kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH;
void show_stack(struct task_struct *task, unsigned long *sp)
{
- int i = 0;
- unsigned long *stack;
+ size_t len;
if (!sp)
sp = stack_pointer(task);
- stack = sp;
- pr_info("Stack:\n");
+ len = min((-(size_t)sp) & (THREAD_SIZE - STACK_DUMP_ENTRY_SIZE),
+ kstack_depth_to_print * STACK_DUMP_ENTRY_SIZE);
- for (i = 0; i < kstack_depth_to_print; i++) {
- if (kstack_end(sp))
- break;
- pr_cont(" %08lx", *sp++);
- if (i % 8 == 7)
- pr_cont("\n");
- }
- show_trace(task, stack);
+ pr_info("Stack:\n");
+ print_hex_dump(KERN_INFO, " ", DUMP_PREFIX_NONE,
+ STACK_DUMP_LINE_SIZE, STACK_DUMP_ENTRY_SIZE,
+ sp, len, false);
+ show_trace(task, sp);
}
DEFINE_SPINLOCK(die_lock);
diff --git a/arch/xtensa/kernel/vmlinux.lds.S b/arch/xtensa/kernel/vmlinux.lds.S
index 0043d5858f14..409c05cac15e 100644
--- a/arch/xtensa/kernel/vmlinux.lds.S
+++ b/arch/xtensa/kernel/vmlinux.lds.S
@@ -119,7 +119,7 @@ SECTIONS
SCHED_TEXT
CPUIDLE_TEXT
LOCK_TEXT
-
+ *(.fixup)
}
_etext = .;
PROVIDE (etext = .);
@@ -128,12 +128,11 @@ SECTIONS
RO_DATA(4096)
- /* Relocation table */
-
- .fixup : { *(.fixup) }
-
/* Data section */
+#ifdef CONFIG_XIP_KERNEL
+ INIT_TEXT_SECTION(PAGE_SIZE)
+#else
_sdata = .;
RW_DATA(XCHAL_ICACHE_LINESIZE, PAGE_SIZE, THREAD_SIZE)
_edata = .;
@@ -147,6 +146,11 @@ SECTIONS
.init.data :
{
INIT_DATA
+ }
+#endif
+
+ .init.rodata :
+ {
. = ALIGN(0x4);
__tagtable_begin = .;
*(.taglist)
@@ -187,12 +191,16 @@ SECTIONS
RELOCATE_ENTRY(_DebugInterruptVector_text,
.DebugInterruptVector.text);
#endif
+#ifdef CONFIG_XIP_KERNEL
+ RELOCATE_ENTRY(_xip_data, .data);
+ RELOCATE_ENTRY(_xip_init_data, .init.data);
+#else
#if defined(CONFIG_SMP)
RELOCATE_ENTRY(_SecondaryResetVector_text,
.SecondaryResetVector.text);
#endif
+#endif
-
__boot_reloc_table_end = ABSOLUTE(.) ;
INIT_SETUP(XCHAL_ICACHE_LINESIZE)
@@ -278,7 +286,7 @@ SECTIONS
. = (LOADADDR( .DoubleExceptionVector.text ) + SIZEOF( .DoubleExceptionVector.text ) + 3) & ~ 3;
#endif
-#if defined(CONFIG_SMP)
+#if !defined(CONFIG_XIP_KERNEL) && defined(CONFIG_SMP)
SECTION_VECTOR (_SecondaryResetVector_text,
.SecondaryResetVector.text,
@@ -291,12 +299,48 @@ SECTIONS
. = ALIGN(PAGE_SIZE);
+#ifndef CONFIG_XIP_KERNEL
__init_end = .;
BSS_SECTION(0, 8192, 0)
+#endif
_end = .;
+#ifdef CONFIG_XIP_KERNEL
+ . = CONFIG_XIP_DATA_ADDR;
+
+ _xip_start = .;
+
+#undef LOAD_OFFSET
+#define LOAD_OFFSET \
+ (CONFIG_XIP_DATA_ADDR - (LOADADDR(.dummy) + SIZEOF(.dummy) + 3) & ~ 3)
+
+ _xip_data_start = .;
+ _sdata = .;
+ RW_DATA(XCHAL_ICACHE_LINESIZE, PAGE_SIZE, THREAD_SIZE)
+ _edata = .;
+ _xip_data_end = .;
+
+ /* Initialization data: */
+
+ STRUCT_ALIGN();
+
+ _xip_init_data_start = .;
+ __init_begin = .;
+ .init.data :
+ {
+ INIT_DATA
+ }
+ _xip_init_data_end = .;
+ __init_end = .;
+ BSS_SECTION(0, 8192, 0)
+
+ _xip_end = .;
+
+#undef LOAD_OFFSET
+#endif
+
DWARF_DEBUG
.xt.prop 0 : { KEEP(*(.xt.prop .xt.prop.* .gnu.linkonce.prop.*)) }
diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c
index f81b1478da61..bee30a77cd70 100644
--- a/arch/xtensa/mm/fault.c
+++ b/arch/xtensa/mm/fault.c
@@ -197,6 +197,8 @@ vmalloc_fault:
struct mm_struct *act_mm = current->active_mm;
int index = pgd_index(address);
pgd_t *pgd, *pgd_k;
+ p4d_t *p4d, *p4d_k;
+ pud_t *pud, *pud_k;
pmd_t *pmd, *pmd_k;
pte_t *pte_k;
@@ -211,8 +213,18 @@ vmalloc_fault:
pgd_val(*pgd) = pgd_val(*pgd_k);
- pmd = pmd_offset(pgd, address);
- pmd_k = pmd_offset(pgd_k, address);
+ p4d = p4d_offset(pgd, address);
+ p4d_k = p4d_offset(pgd_k, address);
+ if (!p4d_present(*p4d) || !p4d_present(*p4d_k))
+ goto bad_page_fault;
+
+ pud = pud_offset(p4d, address);
+ pud_k = pud_offset(p4d_k, address);
+ if (!pud_present(*pud) || !pud_present(*pud_k))
+ goto bad_page_fault;
+
+ pmd = pmd_offset(pud, address);
+ pmd_k = pmd_offset(pud_k, address);
if (!pmd_present(*pmd) || !pmd_present(*pmd_k))
goto bad_page_fault;
diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c
index d898ed67d890..19c625e6d81f 100644
--- a/arch/xtensa/mm/init.c
+++ b/arch/xtensa/mm/init.c
@@ -193,8 +193,8 @@ void __init mem_init(void)
((max_low_pfn - min_low_pfn) * PAGE_SIZE) >> 20,
(unsigned long)_text, (unsigned long)_etext,
(unsigned long)(_etext - _text) >> 10,
- (unsigned long)__start_rodata, (unsigned long)_sdata,
- (unsigned long)(_sdata - __start_rodata) >> 10,
+ (unsigned long)__start_rodata, (unsigned long)__end_rodata,
+ (unsigned long)(__end_rodata - __start_rodata) >> 10,
(unsigned long)_sdata, (unsigned long)_edata,
(unsigned long)(_edata - _sdata) >> 10,
(unsigned long)__init_begin, (unsigned long)__init_end,
diff --git a/arch/xtensa/mm/kasan_init.c b/arch/xtensa/mm/kasan_init.c
index af7152560bc3..e3baa21ff24c 100644
--- a/arch/xtensa/mm/kasan_init.c
+++ b/arch/xtensa/mm/kasan_init.c
@@ -20,7 +20,9 @@ void __init kasan_early_init(void)
{
unsigned long vaddr = KASAN_SHADOW_START;
pgd_t *pgd = pgd_offset_k(vaddr);
- pmd_t *pmd = pmd_offset(pgd, vaddr);
+ p4d_t *p4d = p4d_offset(pgd, vaddr);
+ pud_t *pud = pud_offset(p4d, vaddr);
+ pmd_t *pmd = pmd_offset(pud, vaddr);
int i;
for (i = 0; i < PTRS_PER_PTE; ++i)
@@ -42,7 +44,9 @@ static void __init populate(void *start, void *end)
unsigned long i, j;
unsigned long vaddr = (unsigned long)start;
pgd_t *pgd = pgd_offset_k(vaddr);
- pmd_t *pmd = pmd_offset(pgd, vaddr);
+ p4d_t *p4d = p4d_offset(pgd, vaddr);
+ pud_t *pud = pud_offset(p4d, vaddr);
+ pmd_t *pmd = pmd_offset(pud, vaddr);
pte_t *pte = memblock_alloc(n_pages * sizeof(pte_t), PAGE_SIZE);
if (!pte)
@@ -56,7 +60,9 @@ static void __init populate(void *start, void *end)
for (k = 0; k < PTRS_PER_PTE; ++k, ++j) {
phys_addr_t phys =
- memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
+ memblock_phys_alloc_range(PAGE_SIZE, PAGE_SIZE,
+ 0,
+ MEMBLOCK_ALLOC_ANYWHERE);
if (!phys)
panic("Failed to allocate page table page\n");
diff --git a/arch/xtensa/mm/mmu.c b/arch/xtensa/mm/mmu.c
index 03678c4afc39..37e478a27877 100644
--- a/arch/xtensa/mm/mmu.c
+++ b/arch/xtensa/mm/mmu.c
@@ -22,7 +22,9 @@
static void * __init init_pmd(unsigned long vaddr, unsigned long n_pages)
{
pgd_t *pgd = pgd_offset_k(vaddr);
- pmd_t *pmd = pmd_offset(pgd, vaddr);
+ p4d_t *p4d = p4d_offset(pgd, vaddr);
+ pud_t *pud = pud_offset(p4d, vaddr);
+ pmd_t *pmd = pmd_offset(pud, vaddr);
pte_t *pte;
unsigned long i;
diff --git a/arch/xtensa/mm/tlb.c b/arch/xtensa/mm/tlb.c
index 59153d0aa890..f436cf2efd8b 100644
--- a/arch/xtensa/mm/tlb.c
+++ b/arch/xtensa/mm/tlb.c
@@ -169,6 +169,8 @@ static unsigned get_pte_for_vaddr(unsigned vaddr)
struct task_struct *task = get_current();
struct mm_struct *mm = task->mm;
pgd_t *pgd;
+ p4d_t *p4d;
+ pud_t *pud;
pmd_t *pmd;
pte_t *pte;
@@ -177,7 +179,13 @@ static unsigned get_pte_for_vaddr(unsigned vaddr)
pgd = pgd_offset(mm, vaddr);
if (pgd_none_or_clear_bad(pgd))
return 0;
- pmd = pmd_offset(pgd, vaddr);
+ p4d = p4d_offset(pgd, vaddr);
+ if (p4d_none_or_clear_bad(p4d))
+ return 0;
+ pud = pud_offset(p4d, vaddr);
+ if (pud_none_or_clear_bad(pud))
+ return 0;
+ pmd = pmd_offset(pud, vaddr);
if (pmd_none_or_clear_bad(pmd))
return 0;
pte = pte_offset_map(pmd, vaddr);
@@ -216,6 +224,8 @@ static int check_tlb_entry(unsigned w, unsigned e, bool dtlb)
unsigned tlbidx = w | (e << PAGE_SHIFT);
unsigned r0 = dtlb ?
read_dtlb_virtual(tlbidx) : read_itlb_virtual(tlbidx);
+ unsigned r1 = dtlb ?
+ read_dtlb_translation(tlbidx) : read_itlb_translation(tlbidx);
unsigned vpn = (r0 & PAGE_MASK) | (e << PAGE_SHIFT);
unsigned pte = get_pte_for_vaddr(vpn);
unsigned mm_asid = (get_rasid_register() >> 8) & ASID_MASK;
@@ -231,8 +241,6 @@ static int check_tlb_entry(unsigned w, unsigned e, bool dtlb)
}
if (tlb_asid == mm_asid) {
- unsigned r1 = dtlb ? read_dtlb_translation(tlbidx) :
- read_itlb_translation(tlbidx);
if ((pte ^ r1) & PAGE_MASK) {
pr_err("%cTLB: way: %u, entry: %u, mapping: %08x->%08x, PTE: %08x\n",
dtlb ? 'D' : 'I', w, e, r0, r1, pte);
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
index f5e0ad65e86a..650bade5ea5a 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -2,6 +2,7 @@
/*
* Copyright (C) 2001 Jens Axboe <axboe@suse.de>
*/
+#include <linux/compat.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
@@ -327,7 +328,14 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
struct iov_iter i;
struct iovec *iov = NULL;
- ret = import_iovec(rq_data_dir(rq),
+#ifdef CONFIG_COMPAT
+ if (in_compat_syscall())
+ ret = compat_import_iovec(rq_data_dir(rq),
+ hdr->dxferp, hdr->iovec_count,
+ 0, &iov, &i);
+ else
+#endif
+ ret = import_iovec(rq_data_dir(rq),
hdr->dxferp, hdr->iovec_count,
0, &iov, &i);
if (ret < 0)
@@ -542,6 +550,122 @@ static inline int blk_send_start_stop(struct request_queue *q,
return __blk_send_generic(q, bd_disk, GPCMD_START_STOP_UNIT, data);
}
+#ifdef CONFIG_COMPAT
+struct compat_sg_io_hdr {
+ compat_int_t interface_id; /* [i] 'S' for SCSI generic (required) */
+ compat_int_t dxfer_direction; /* [i] data transfer direction */
+ unsigned char cmd_len; /* [i] SCSI command length ( <= 16 bytes) */
+ unsigned char mx_sb_len; /* [i] max length to write to sbp */
+ unsigned short iovec_count; /* [i] 0 implies no scatter gather */
+ compat_uint_t dxfer_len; /* [i] byte count of data transfer */
+ compat_uint_t dxferp; /* [i], [*io] points to data transfer memory
+ or scatter gather list */
+ compat_uptr_t cmdp; /* [i], [*i] points to command to perform */
+ compat_uptr_t sbp; /* [i], [*o] points to sense_buffer memory */
+ compat_uint_t timeout; /* [i] MAX_UINT->no timeout (unit: millisec) */
+ compat_uint_t flags; /* [i] 0 -> default, see SG_FLAG... */
+ compat_int_t pack_id; /* [i->o] unused internally (normally) */
+ compat_uptr_t usr_ptr; /* [i->o] unused internally */
+ unsigned char status; /* [o] scsi status */
+ unsigned char masked_status; /* [o] shifted, masked scsi status */
+ unsigned char msg_status; /* [o] messaging level data (optional) */
+ unsigned char sb_len_wr; /* [o] byte count actually written to sbp */
+ unsigned short host_status; /* [o] errors from host adapter */
+ unsigned short driver_status; /* [o] errors from software driver */
+ compat_int_t resid; /* [o] dxfer_len - actual_transferred */
+ compat_uint_t duration; /* [o] time taken by cmd (unit: millisec) */
+ compat_uint_t info; /* [o] auxiliary information */
+};
+#endif
+
+int put_sg_io_hdr(const struct sg_io_hdr *hdr, void __user *argp)
+{
+#ifdef CONFIG_COMPAT
+ if (in_compat_syscall()) {
+ struct compat_sg_io_hdr hdr32 = {
+ .interface_id = hdr->interface_id,
+ .dxfer_direction = hdr->dxfer_direction,
+ .cmd_len = hdr->cmd_len,
+ .mx_sb_len = hdr->mx_sb_len,
+ .iovec_count = hdr->iovec_count,
+ .dxfer_len = hdr->dxfer_len,
+ .dxferp = (uintptr_t)hdr->dxferp,
+ .cmdp = (uintptr_t)hdr->cmdp,
+ .sbp = (uintptr_t)hdr->sbp,
+ .timeout = hdr->timeout,
+ .flags = hdr->flags,
+ .pack_id = hdr->pack_id,
+ .usr_ptr = (uintptr_t)hdr->usr_ptr,
+ .status = hdr->status,
+ .masked_status = hdr->masked_status,
+ .msg_status = hdr->msg_status,
+ .sb_len_wr = hdr->sb_len_wr,
+ .host_status = hdr->host_status,
+ .driver_status = hdr->driver_status,
+ .resid = hdr->resid,
+ .duration = hdr->duration,
+ .info = hdr->info,
+ };
+
+ if (copy_to_user(argp, &hdr32, sizeof(hdr32)))
+ return -EFAULT;
+
+ return 0;
+ }
+#endif
+
+ if (copy_to_user(argp, hdr, sizeof(*hdr)))
+ return -EFAULT;
+
+ return 0;
+}
+EXPORT_SYMBOL(put_sg_io_hdr);
+
+int get_sg_io_hdr(struct sg_io_hdr *hdr, const void __user *argp)
+{
+#ifdef CONFIG_COMPAT
+ struct compat_sg_io_hdr hdr32;
+
+ if (in_compat_syscall()) {
+ if (copy_from_user(&hdr32, argp, sizeof(hdr32)))
+ return -EFAULT;
+
+ *hdr = (struct sg_io_hdr) {
+ .interface_id = hdr32.interface_id,
+ .dxfer_direction = hdr32.dxfer_direction,
+ .cmd_len = hdr32.cmd_len,
+ .mx_sb_len = hdr32.mx_sb_len,
+ .iovec_count = hdr32.iovec_count,
+ .dxfer_len = hdr32.dxfer_len,
+ .dxferp = compat_ptr(hdr32.dxferp),
+ .cmdp = compat_ptr(hdr32.cmdp),
+ .sbp = compat_ptr(hdr32.sbp),
+ .timeout = hdr32.timeout,
+ .flags = hdr32.flags,
+ .pack_id = hdr32.pack_id,
+ .usr_ptr = compat_ptr(hdr32.usr_ptr),
+ .status = hdr32.status,
+ .masked_status = hdr32.masked_status,
+ .msg_status = hdr32.msg_status,
+ .sb_len_wr = hdr32.sb_len_wr,
+ .host_status = hdr32.host_status,
+ .driver_status = hdr32.driver_status,
+ .resid = hdr32.resid,
+ .duration = hdr32.duration,
+ .info = hdr32.info,
+ };
+
+ return 0;
+ }
+#endif
+
+ if (copy_from_user(hdr, argp, sizeof(*hdr)))
+ return -EFAULT;
+
+ return 0;
+}
+EXPORT_SYMBOL(get_sg_io_hdr);
+
int scsi_cmd_ioctl(struct request_queue *q, struct gendisk *bd_disk, fmode_t mode,
unsigned int cmd, void __user *arg)
{
@@ -581,14 +705,14 @@ int scsi_cmd_ioctl(struct request_queue *q, struct gendisk *bd_disk, fmode_t mod
case SG_IO: {
struct sg_io_hdr hdr;
- err = -EFAULT;
- if (copy_from_user(&hdr, arg, sizeof(hdr)))
+ err = get_sg_io_hdr(&hdr, arg);
+ if (err)
break;
err = sg_io(q, bd_disk, &hdr, mode);
if (err == -EFAULT)
break;
- if (copy_to_user(arg, &hdr, sizeof(hdr)))
+ if (put_sg_io_hdr(&hdr, arg))
err = -EFAULT;
break;
}
diff --git a/certs/blacklist.c b/certs/blacklist.c
index ec00bf337eb6..6514f9ebc943 100644
--- a/certs/blacklist.c
+++ b/certs/blacklist.c
@@ -135,6 +135,15 @@ int is_hash_blacklisted(const u8 *hash, size_t hash_len, const char *type)
}
EXPORT_SYMBOL_GPL(is_hash_blacklisted);
+int is_binary_blacklisted(const u8 *hash, size_t hash_len)
+{
+ if (is_hash_blacklisted(hash, hash_len, "bin") == -EKEYREJECTED)
+ return -EPERM;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(is_binary_blacklisted);
+
/*
* Initialise the blacklist
*/
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index ebe1e9e5fd81..4fb97511a16f 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -319,12 +319,6 @@ config ACPI_THERMAL
To compile this driver as a module, choose M here:
the module will be called thermal.
-config ACPI_NUMA
- bool "NUMA support"
- depends on NUMA
- depends on (X86 || IA64 || ARM64)
- default y if IA64 || ARM64
-
config ACPI_CUSTOM_DSDT_FILE
string "Custom DSDT Table file to include"
default ""
@@ -473,8 +467,7 @@ config ACPI_REDUCED_HARDWARE_ONLY
If you are unsure what to do, do not enable this option.
source "drivers/acpi/nfit/Kconfig"
-source "drivers/acpi/hmat/Kconfig"
-
+source "drivers/acpi/numa/Kconfig"
source "drivers/acpi/apei/Kconfig"
source "drivers/acpi/dptf/Kconfig"
@@ -513,11 +506,19 @@ menuconfig PMIC_OPREGION
PMIC chip.
if PMIC_OPREGION
-config CRC_PMIC_OPREGION
- bool "ACPI operation region support for CrystalCove PMIC"
+config BYTCRC_PMIC_OPREGION
+ bool "ACPI operation region support for Bay Trail Crystal Cove PMIC"
+ depends on INTEL_SOC_PMIC
+ help
+ This config adds ACPI operation region support for the Bay Trail
+ version of the Crystal Cove PMIC.
+
+config CHTCRC_PMIC_OPREGION
+ bool "ACPI operation region support for Cherry Trail Crystal Cove PMIC"
depends on INTEL_SOC_PMIC
help
- This config adds ACPI operation region support for CrystalCove PMIC.
+ This config adds ACPI operation region support for the Cherry Trail
+ version of the Crystal Cove PMIC.
config XPOWER_PMIC_OPREGION
bool "ACPI operation region support for XPower AXP288 PMIC"
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index 5d361e4e3405..33fdaf67454e 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -48,14 +48,13 @@ acpi-y += acpi_pnp.o
acpi-$(CONFIG_ARM_AMBA) += acpi_amba.o
acpi-y += power.o
acpi-y += event.o
-acpi-$(CONFIG_ACPI_REDUCED_HARDWARE_ONLY) += evged.o
+acpi-y += evged.o
acpi-y += sysfs.o
acpi-y += property.o
acpi-$(CONFIG_X86) += acpi_cmos_rtc.o
acpi-$(CONFIG_X86) += x86/apple.o
acpi-$(CONFIG_X86) += x86/utils.o
acpi-$(CONFIG_DEBUG_FS) += debugfs.o
-acpi-$(CONFIG_ACPI_NUMA) += numa.o
acpi-$(CONFIG_ACPI_PROCFS_POWER) += cm_sbs.o
acpi-y += acpi_lpat.o
acpi-$(CONFIG_ACPI_LPIT) += acpi_lpit.o
@@ -80,7 +79,7 @@ obj-$(CONFIG_ACPI_PROCESSOR) += processor.o
obj-$(CONFIG_ACPI) += container.o
obj-$(CONFIG_ACPI_THERMAL) += thermal.o
obj-$(CONFIG_ACPI_NFIT) += nfit/
-obj-$(CONFIG_ACPI_HMAT) += hmat/
+obj-$(CONFIG_ACPI_NUMA) += numa/
obj-$(CONFIG_ACPI) += acpi_memhotplug.o
obj-$(CONFIG_ACPI_HOTPLUG_IOAPIC) += ioapic.o
obj-$(CONFIG_ACPI_BATTERY) += battery.o
@@ -109,7 +108,8 @@ obj-$(CONFIG_ACPI_APEI) += apei/
obj-$(CONFIG_ACPI_EXTLOG) += acpi_extlog.o
obj-$(CONFIG_PMIC_OPREGION) += pmic/intel_pmic.o
-obj-$(CONFIG_CRC_PMIC_OPREGION) += pmic/intel_pmic_crc.o
+obj-$(CONFIG_BYTCRC_PMIC_OPREGION) += pmic/intel_pmic_bytcrc.o
+obj-$(CONFIG_CHTCRC_PMIC_OPREGION) += pmic/intel_pmic_chtcrc.o
obj-$(CONFIG_XPOWER_PMIC_OPREGION) += pmic/intel_pmic_xpower.o
obj-$(CONFIG_BXT_WC_PMIC_OPREGION) += pmic/intel_pmic_bxtwc.o
obj-$(CONFIG_CHT_WC_PMIC_OPREGION) += pmic/intel_pmic_chtwc.o
diff --git a/drivers/acpi/acpi_configfs.c b/drivers/acpi/acpi_configfs.c
index 57d9d574d4dd..ece8c1a921cc 100644
--- a/drivers/acpi/acpi_configfs.c
+++ b/drivers/acpi/acpi_configfs.c
@@ -53,7 +53,7 @@ static ssize_t acpi_table_aml_write(struct config_item *cfg,
if (!table->header)
return -ENOMEM;
- ret = acpi_load_table(table->header);
+ ret = acpi_load_table(table->header, &table->index);
if (ret) {
kfree(table->header);
table->header = NULL;
@@ -223,7 +223,7 @@ static void acpi_table_drop_item(struct config_group *group,
struct acpi_table *table = container_of(cfg, struct acpi_table, cfg);
ACPI_INFO(("Host-directed Dynamic ACPI Table Unload"));
- acpi_tb_unload_table(table->index);
+ acpi_unload_table(table->index);
}
static struct configfs_group_operations acpi_table_group_ops = {
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index 60bbc5090abe..70f740b09684 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -10,6 +10,7 @@
#include <linux/acpi.h>
#include <linux/clkdev.h>
#include <linux/clk-provider.h>
+#include <linux/dmi.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/mutex.h>
@@ -463,6 +464,18 @@ struct lpss_device_links {
const char *consumer_hid;
const char *consumer_uid;
u32 flags;
+ const struct dmi_system_id *dep_missing_ids;
+};
+
+/* Please keep this list sorted alphabetically by vendor and model */
+static const struct dmi_system_id i2c1_dep_missing_dmi_ids[] = {
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "T200TA"),
+ },
+ },
+ {}
};
/*
@@ -473,36 +486,29 @@ struct lpss_device_links {
* the supplier is not enumerated until after the consumer is probed.
*/
static const struct lpss_device_links lpss_device_links[] = {
+ /* CHT External sdcard slot controller depends on PMIC I2C ctrl */
{"808622C1", "7", "80860F14", "3", DL_FLAG_PM_RUNTIME},
+ /* CHT iGPU depends on PMIC I2C controller */
{"808622C1", "7", "LNXVIDEO", NULL, DL_FLAG_PM_RUNTIME},
+ /* BYT iGPU depends on the Embedded Controller I2C controller (UID 1) */
+ {"80860F41", "1", "LNXVIDEO", NULL, DL_FLAG_PM_RUNTIME,
+ i2c1_dep_missing_dmi_ids},
+ /* BYT CR iGPU depends on PMIC I2C controller (UID 5 on CR) */
{"80860F41", "5", "LNXVIDEO", NULL, DL_FLAG_PM_RUNTIME},
+ /* BYT iGPU depends on PMIC I2C controller (UID 7 on non CR) */
+ {"80860F41", "7", "LNXVIDEO", NULL, DL_FLAG_PM_RUNTIME},
};
-static bool hid_uid_match(struct acpi_device *adev,
- const char *hid2, const char *uid2)
-{
- const char *hid1 = acpi_device_hid(adev);
- const char *uid1 = acpi_device_uid(adev);
-
- if (strcmp(hid1, hid2))
- return false;
-
- if (!uid2)
- return true;
-
- return uid1 && !strcmp(uid1, uid2);
-}
-
static bool acpi_lpss_is_supplier(struct acpi_device *adev,
const struct lpss_device_links *link)
{
- return hid_uid_match(adev, link->supplier_hid, link->supplier_uid);
+ return acpi_dev_hid_uid_match(adev, link->supplier_hid, link->supplier_uid);
}
static bool acpi_lpss_is_consumer(struct acpi_device *adev,
const struct lpss_device_links *link)
{
- return hid_uid_match(adev, link->consumer_hid, link->consumer_uid);
+ return acpi_dev_hid_uid_match(adev, link->consumer_hid, link->consumer_uid);
}
struct hid_uid {
@@ -518,7 +524,7 @@ static int match_hid_uid(struct device *dev, const void *data)
if (!adev)
return 0;
- return hid_uid_match(adev, id->hid, id->uid);
+ return acpi_dev_hid_uid_match(adev, id->hid, id->uid);
}
static struct device *acpi_lpss_find_device(const char *hid, const char *uid)
@@ -570,7 +576,8 @@ static void acpi_lpss_link_consumer(struct device *dev1,
if (!dev2)
return;
- if (acpi_lpss_dep(ACPI_COMPANION(dev2), ACPI_HANDLE(dev1)))
+ if ((link->dep_missing_ids && dmi_check_system(link->dep_missing_ids))
+ || acpi_lpss_dep(ACPI_COMPANION(dev2), ACPI_HANDLE(dev1)))
device_link_add(dev2, dev1, link->flags);
put_device(dev2);
@@ -585,7 +592,8 @@ static void acpi_lpss_link_supplier(struct device *dev1,
if (!dev2)
return;
- if (acpi_lpss_dep(ACPI_COMPANION(dev1), ACPI_HANDLE(dev2)))
+ if ((link->dep_missing_ids && dmi_check_system(link->dep_missing_ids))
+ || acpi_lpss_dep(ACPI_COMPANION(dev1), ACPI_HANDLE(dev2)))
device_link_add(dev1, dev2, link->flags);
put_device(dev2);
diff --git a/drivers/acpi/acpi_platform.c b/drivers/acpi/acpi_platform.c
index 00ec4f2bf015..c05050f474cd 100644
--- a/drivers/acpi/acpi_platform.c
+++ b/drivers/acpi/acpi_platform.c
@@ -31,6 +31,44 @@ static const struct acpi_device_id forbidden_id_list[] = {
{"", 0},
};
+static struct platform_device *acpi_platform_device_find_by_companion(struct acpi_device *adev)
+{
+ struct device *dev;
+
+ dev = bus_find_device_by_acpi_dev(&platform_bus_type, adev);
+ return dev ? to_platform_device(dev) : NULL;
+}
+
+static int acpi_platform_device_remove_notify(struct notifier_block *nb,
+ unsigned long value, void *arg)
+{
+ struct acpi_device *adev = arg;
+ struct platform_device *pdev;
+
+ switch (value) {
+ case ACPI_RECONFIG_DEVICE_ADD:
+ /* Nothing to do here */
+ break;
+ case ACPI_RECONFIG_DEVICE_REMOVE:
+ if (!acpi_device_enumerated(adev))
+ break;
+
+ pdev = acpi_platform_device_find_by_companion(adev);
+ if (!pdev)
+ break;
+
+ platform_device_unregister(pdev);
+ put_device(&pdev->dev);
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block acpi_platform_notifier = {
+ .notifier_call = acpi_platform_device_remove_notify,
+};
+
static void acpi_platform_fill_resource(struct acpi_device *adev,
const struct resource *src, struct resource *dest)
{
@@ -130,3 +168,8 @@ struct platform_device *acpi_create_platform_device(struct acpi_device *adev,
return pdev;
}
EXPORT_SYMBOL_GPL(acpi_create_platform_device);
+
+void __init acpi_platform_init(void)
+{
+ acpi_reconfig_notifier_register(&acpi_platform_notifier);
+}
diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
index 4f325e47519f..2f380e7381d6 100644
--- a/drivers/acpi/acpi_video.c
+++ b/drivers/acpi/acpi_video.c
@@ -699,9 +699,13 @@ acpi_video_device_EDID(struct acpi_video_device *device,
* event notify code.
* lcd_flag :
* 0. The system BIOS should automatically control the brightness level
- * of the LCD when the power changes from AC to DC
+ * of the LCD when:
+ * - the power changes from AC to DC (ACPI appendix B)
+ * - a brightness hotkey gets pressed (implied by Win7/8 backlight docs)
* 1. The system BIOS should NOT automatically control the brightness
- * level of the LCD when the power changes from AC to DC.
+ * level of the LCD when:
+ * - the power changes from AC to DC (ACPI appendix B)
+ * - a brightness hotkey gets pressed (implied by Win7/8 backlight docs)
* Return Value:
* -EINVAL wrong arg.
*/
diff --git a/drivers/acpi/acpica/acdebug.h b/drivers/acpi/acpica/acdebug.h
index 32f2e38c7570..694cf206fa9a 100644
--- a/drivers/acpi/acpica/acdebug.h
+++ b/drivers/acpi/acpica/acdebug.h
@@ -148,6 +148,8 @@ void acpi_db_find_references(char *object_arg);
void acpi_db_get_bus_info(void);
+acpi_status acpi_db_display_fields(u32 address_space_id);
+
/*
* dbdisply - debug display commands
*/
diff --git a/drivers/acpi/acpica/acstruct.h b/drivers/acpi/acpica/acstruct.h
index 218ff4c8b817..2043dff370b1 100644
--- a/drivers/acpi/acpica/acstruct.h
+++ b/drivers/acpi/acpica/acstruct.h
@@ -192,6 +192,16 @@ struct acpi_device_walk_info {
u32 num_INI;
};
+/* Info used by Acpi acpi_db_display_fields */
+
+struct acpi_region_walk_info {
+ u32 debug_level;
+ u32 count;
+ acpi_owner_id owner_id;
+ u8 display_type;
+ u32 address_space_id;
+};
+
/* TBD: [Restructure] Merge with struct above */
struct acpi_walk_info {
diff --git a/drivers/acpi/acpica/acutils.h b/drivers/acpi/acpica/acutils.h
index 601808be86d1..5fb50634e08e 100644
--- a/drivers/acpi/acpica/acutils.h
+++ b/drivers/acpi/acpica/acutils.h
@@ -142,10 +142,11 @@ struct acpi_pkg_info {
/* acpi_ut_dump_buffer */
-#define DB_BYTE_DISPLAY 1
-#define DB_WORD_DISPLAY 2
-#define DB_DWORD_DISPLAY 4
-#define DB_QWORD_DISPLAY 8
+#define DB_BYTE_DISPLAY 0x01
+#define DB_WORD_DISPLAY 0x02
+#define DB_DWORD_DISPLAY 0x04
+#define DB_QWORD_DISPLAY 0x08
+#define DB_DISPLAY_DATA_ONLY 0x10
/*
* utascii - ASCII utilities
diff --git a/drivers/acpi/acpica/dbconvert.c b/drivers/acpi/acpica/dbconvert.c
index 9fd9a98a9cbe..2b84ac093698 100644
--- a/drivers/acpi/acpica/dbconvert.c
+++ b/drivers/acpi/acpica/dbconvert.c
@@ -106,6 +106,10 @@ acpi_db_convert_to_buffer(char *string, union acpi_object *object)
u8 *buffer;
acpi_status status;
+ /* Skip all preceding white space */
+
+ acpi_ut_remove_whitespace(&string);
+
/* Generate the final buffer length */
for (i = 0, length = 0; string[i];) {
diff --git a/drivers/acpi/acpica/dbdisply.c b/drivers/acpi/acpica/dbdisply.c
index 30ab62b0fec8..f2df416d0d2d 100644
--- a/drivers/acpi/acpica/dbdisply.c
+++ b/drivers/acpi/acpica/dbdisply.c
@@ -513,7 +513,6 @@ void acpi_db_display_results(void)
return;
}
- obj_desc = walk_state->method_desc;
node = walk_state->method_node;
if (walk_state->results) {
@@ -565,7 +564,6 @@ void acpi_db_display_calling_tree(void)
return;
}
- node = walk_state->method_node;
acpi_os_printf("Current Control Method Call Tree\n");
while (walk_state) {
diff --git a/drivers/acpi/acpica/dbfileio.c b/drivers/acpi/acpica/dbfileio.c
index c6e25734dc5c..e1b6e54a96ac 100644
--- a/drivers/acpi/acpica/dbfileio.c
+++ b/drivers/acpi/acpica/dbfileio.c
@@ -93,7 +93,7 @@ acpi_status acpi_db_load_tables(struct acpi_new_table_desc *list_head)
while (table_list_head) {
table = table_list_head->table;
- status = acpi_load_table(table);
+ status = acpi_load_table(table, NULL);
if (ACPI_FAILURE(status)) {
if (status == AE_ALREADY_EXISTS) {
acpi_os_printf
diff --git a/drivers/acpi/acpica/dbinput.c b/drivers/acpi/acpica/dbinput.c
index 55a7e10998d8..e1632b340182 100644
--- a/drivers/acpi/acpica/dbinput.c
+++ b/drivers/acpi/acpica/dbinput.c
@@ -50,6 +50,7 @@ enum acpi_ex_debugger_commands {
CMD_EVALUATE,
CMD_EXECUTE,
CMD_EXIT,
+ CMD_FIELDS,
CMD_FIND,
CMD_GO,
CMD_HANDLERS,
@@ -127,6 +128,7 @@ static const struct acpi_db_command_info acpi_gbl_db_commands[] = {
{"EVALUATE", 1},
{"EXECUTE", 1},
{"EXIT", 0},
+ {"FIELDS", 1},
{"FIND", 1},
{"GO", 0},
{"HANDLERS", 0},
@@ -200,6 +202,8 @@ static const struct acpi_db_command_help acpi_gbl_db_command_help[] = {
"Find ACPI name(s) with wildcards\n"},
{1, " Integrity", "Validate namespace integrity\n"},
{1, " Methods", "Display list of loaded control methods\n"},
+ {1, " Fields <AddressSpaceId>",
+ "Display list of loaded field units by space ID\n"},
{1, " Namespace [Object] [Depth]",
"Display loaded namespace tree/subtree\n"},
{1, " Notify <Object> <Value>", "Send a notification on Object\n"},
@@ -507,6 +511,21 @@ char *acpi_db_get_next_token(char *string,
}
break;
+ case '{':
+
+ /* This is the start of a field unit, scan until closing brace */
+
+ string++;
+ start = string;
+ type = ACPI_TYPE_FIELD_UNIT;
+
+ /* Find end of buffer */
+
+ while (*string && (*string != '}')) {
+ string++;
+ }
+ break;
+
case '[':
/* This is the start of a package, scan until closing bracket */
@@ -674,6 +693,7 @@ acpi_db_command_dispatch(char *input_buffer,
union acpi_parse_object *op)
{
u32 temp;
+ u64 temp64;
u32 command_index;
u32 param_count;
char *command_line;
@@ -689,7 +709,6 @@ acpi_db_command_dispatch(char *input_buffer,
param_count = acpi_db_get_line(input_buffer);
command_index = acpi_db_match_command(acpi_gbl_db_args[0]);
- temp = 0;
/*
* We don't want to add the !! command to the history buffer. It
@@ -790,6 +809,21 @@ acpi_db_command_dispatch(char *input_buffer,
status = acpi_db_find_name_in_namespace(acpi_gbl_db_args[1]);
break;
+ case CMD_FIELDS:
+
+ status = acpi_ut_strtoul64(acpi_gbl_db_args[1], &temp64);
+
+ if (ACPI_FAILURE(status)
+ || temp64 >= ACPI_NUM_PREDEFINED_REGIONS) {
+ acpi_os_printf
+ ("Invalid adress space ID: must be between 0 and %u inclusive\n",
+ ACPI_NUM_PREDEFINED_REGIONS - 1);
+ return (AE_OK);
+ }
+
+ status = acpi_db_display_fields((u32)temp64);
+ break;
+
case CMD_GO:
acpi_gbl_cm_single_step = FALSE;
diff --git a/drivers/acpi/acpica/dbmethod.c b/drivers/acpi/acpica/dbmethod.c
index 76a15b6ffc5d..4e48a7de7413 100644
--- a/drivers/acpi/acpica/dbmethod.c
+++ b/drivers/acpi/acpica/dbmethod.c
@@ -321,6 +321,10 @@ acpi_status acpi_db_disassemble_method(char *name)
walk_state->parse_flags |= ACPI_PARSE_DISASSEMBLE;
status = acpi_ps_parse_aml(walk_state);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+
(void)acpi_dm_parse_deferred_ops(op);
/* Now we can disassemble the method */
diff --git a/drivers/acpi/acpica/dbnames.c b/drivers/acpi/acpica/dbnames.c
index 63fe30e86807..3615e1a6efd8 100644
--- a/drivers/acpi/acpica/dbnames.c
+++ b/drivers/acpi/acpica/dbnames.c
@@ -10,6 +10,7 @@
#include "acnamesp.h"
#include "acdebug.h"
#include "acpredef.h"
+#include "acinterp.h"
#define _COMPONENT ACPI_CA_DEBUGGER
ACPI_MODULE_NAME("dbnames")
@@ -504,6 +505,86 @@ acpi_db_walk_for_object_counts(acpi_handle obj_handle,
/*******************************************************************************
*
+ * FUNCTION: acpi_db_walk_for_fields
+ *
+ * PARAMETERS: Callback from walk_namespace
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Display short info about objects in the namespace
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_db_walk_for_fields(acpi_handle obj_handle,
+ u32 nesting_level, void *context, void **return_value)
+{
+ union acpi_object *ret_value;
+ struct acpi_region_walk_info *info =
+ (struct acpi_region_walk_info *)context;
+ struct acpi_buffer buffer;
+ acpi_status status;
+ struct acpi_namespace_node *node = acpi_ns_validate_handle(obj_handle);
+
+ if (!node) {
+ return (AE_OK);
+ }
+ if (node->object->field.region_obj->region.space_id !=
+ info->address_space_id) {
+ return (AE_OK);
+ }
+
+ info->count++;
+
+ /* Get and display the full pathname to this object */
+
+ buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER;
+ status = acpi_ns_handle_to_pathname(obj_handle, &buffer, TRUE);
+ if (ACPI_FAILURE(status)) {
+ acpi_os_printf("Could Not get pathname for object %p\n",
+ obj_handle);
+ return (AE_OK);
+ }
+
+ acpi_os_printf("%s ", (char *)buffer.pointer);
+ ACPI_FREE(buffer.pointer);
+
+ buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER;
+ acpi_evaluate_object(obj_handle, NULL, NULL, &buffer);
+
+ /*
+ * Since this is a field unit, surround the output in braces
+ */
+ acpi_os_printf("{");
+
+ ret_value = (union acpi_object *)buffer.pointer;
+ switch (ret_value->type) {
+ case ACPI_TYPE_INTEGER:
+
+ acpi_os_printf("%8.8X%8.8X",
+ ACPI_FORMAT_UINT64(ret_value->integer.value));
+ break;
+
+ case ACPI_TYPE_BUFFER:
+
+ acpi_ut_dump_buffer(ret_value->buffer.pointer,
+ ret_value->buffer.length,
+ DB_DISPLAY_DATA_ONLY | DB_BYTE_DISPLAY, 0);
+ break;
+
+ default:
+
+ break;
+ }
+ acpi_os_printf("}\n");
+
+ ACPI_FREE(buffer.pointer);
+
+ return (AE_OK);
+}
+
+/*******************************************************************************
+ *
* FUNCTION: acpi_db_walk_for_specific_objects
*
* PARAMETERS: Callback from walk_namespace
@@ -630,6 +711,39 @@ acpi_status acpi_db_display_objects(char *obj_type_arg, char *display_count_arg)
/*******************************************************************************
*
+ * FUNCTION: acpi_db_display_fields
+ *
+ * PARAMETERS: obj_type_arg - Type of object to display
+ * display_count_arg - Max depth to display
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Display objects in the namespace of the requested type
+ *
+ ******************************************************************************/
+
+acpi_status acpi_db_display_fields(u32 address_space_id)
+{
+ struct acpi_region_walk_info info;
+
+ info.count = 0;
+ info.owner_id = ACPI_OWNER_ID_MAX;
+ info.debug_level = ACPI_UINT32_MAX;
+ info.display_type = ACPI_DISPLAY_SUMMARY | ACPI_DISPLAY_SHORT;
+ info.address_space_id = address_space_id;
+
+ /* Walk the namespace from the root */
+
+ (void)acpi_walk_namespace(ACPI_TYPE_LOCAL_REGION_FIELD,
+ ACPI_ROOT_OBJECT, ACPI_UINT32_MAX,
+ acpi_db_walk_for_fields, NULL, (void *)&info,
+ NULL);
+
+ return (AE_OK);
+}
+
+/*******************************************************************************
+ *
* FUNCTION: acpi_db_integrity_walk
*
* PARAMETERS: Callback from walk_namespace
diff --git a/drivers/acpi/acpica/dbobject.c b/drivers/acpi/acpica/dbobject.c
index f9fc84bc3e84..4b4c530a0654 100644
--- a/drivers/acpi/acpica/dbobject.c
+++ b/drivers/acpi/acpica/dbobject.c
@@ -464,7 +464,6 @@ void acpi_db_decode_arguments(struct acpi_walk_state *walk_state)
u8 display_args = FALSE;
node = walk_state->method_node;
- obj_desc = walk_state->method_desc;
/* There are no arguments for the module-level code case */
diff --git a/drivers/acpi/acpica/dscontrol.c b/drivers/acpi/acpica/dscontrol.c
index 4847f89c678c..5034fab9cf69 100644
--- a/drivers/acpi/acpica/dscontrol.c
+++ b/drivers/acpi/acpica/dscontrol.c
@@ -85,7 +85,7 @@ acpi_ds_exec_begin_control_op(struct acpi_walk_state *walk_state,
walk_state->parser_state.pkg_end;
control_state->control.opcode = op->common.aml_opcode;
control_state->control.loop_timeout = acpi_os_get_timer() +
- (u64)(acpi_gbl_max_loop_iterations * ACPI_100NSEC_PER_SEC);
+ ((u64)acpi_gbl_max_loop_iterations * ACPI_100NSEC_PER_SEC);
/* Push the control state on this walk's control stack */
diff --git a/drivers/acpi/acpica/dsfield.c b/drivers/acpi/acpica/dsfield.c
index cf4e061bb0f0..faa38a22263a 100644
--- a/drivers/acpi/acpica/dsfield.c
+++ b/drivers/acpi/acpica/dsfield.c
@@ -149,7 +149,6 @@ acpi_ds_create_buffer_field(union acpi_parse_object *op,
if (walk_state->deferred_node) {
node = walk_state->deferred_node;
- status = AE_OK;
} else {
/* Execute flag should always be set when this function is entered */
@@ -264,7 +263,6 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info,
union acpi_parse_object *child;
#ifdef ACPI_EXEC_APP
- u64 value = 0;
union acpi_operand_object *result_desc;
union acpi_operand_object *obj_desc;
char *name_path;
@@ -406,19 +404,17 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info,
name_path =
acpi_ns_get_external_pathname(info->
field_node);
- obj_desc =
- acpi_ut_create_integer_object
- (value);
if (ACPI_SUCCESS
(ae_lookup_init_file_entry
- (name_path, &value))) {
+ (name_path, &obj_desc))) {
acpi_ex_write_data_to_field
(obj_desc,
acpi_ns_get_attached_object
(info->field_node),
&result_desc);
+ acpi_ut_remove_reference
+ (obj_desc);
}
- acpi_ut_remove_reference(obj_desc);
ACPI_FREE(name_path);
#endif
}
@@ -636,8 +632,6 @@ acpi_ds_init_field_objects(union acpi_parse_object *op,
}
/* Name already exists, just ignore this error */
-
- status = AE_OK;
}
arg->common.node = node;
diff --git a/drivers/acpi/acpica/evgpeblk.c b/drivers/acpi/acpica/evgpeblk.c
index fb15e9e2373b..9c7adaa7b582 100644
--- a/drivers/acpi/acpica/evgpeblk.c
+++ b/drivers/acpi/acpica/evgpeblk.c
@@ -110,6 +110,9 @@ acpi_status acpi_ev_delete_gpe_block(struct acpi_gpe_block_info *gpe_block)
status =
acpi_hw_disable_gpe_block(gpe_block->xrupt_block, gpe_block, NULL);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
if (!gpe_block->previous && !gpe_block->next) {
@@ -359,10 +362,10 @@ acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device,
walk_info.gpe_device = gpe_device;
walk_info.execute_by_owner_id = FALSE;
- status = acpi_ns_walk_namespace(ACPI_TYPE_METHOD, gpe_device,
- ACPI_UINT32_MAX, ACPI_NS_WALK_NO_UNLOCK,
- acpi_ev_match_gpe_method, NULL,
- &walk_info, NULL);
+ (void)acpi_ns_walk_namespace(ACPI_TYPE_METHOD, gpe_device,
+ ACPI_UINT32_MAX, ACPI_NS_WALK_NO_UNLOCK,
+ acpi_ev_match_gpe_method, NULL, &walk_info,
+ NULL);
/* Return the new block */
diff --git a/drivers/acpi/acpica/evgpeinit.c b/drivers/acpi/acpica/evgpeinit.c
index b04f982e59fa..70d21d5ec5f3 100644
--- a/drivers/acpi/acpica/evgpeinit.c
+++ b/drivers/acpi/acpica/evgpeinit.c
@@ -156,8 +156,6 @@ acpi_status acpi_ev_gpe_initialize(void)
* GPE0 and GPE1 do not have to be contiguous in the GPE number
* space. However, GPE0 always starts at GPE number zero.
*/
- gpe_number_max = acpi_gbl_FADT.gpe1_base +
- ((register_count1 * ACPI_GPE_REGISTER_WIDTH) - 1);
}
}
@@ -169,7 +167,6 @@ acpi_status acpi_ev_gpe_initialize(void)
ACPI_DEBUG_PRINT((ACPI_DB_INIT,
"There are no GPE blocks defined in the FADT\n"));
- status = AE_OK;
goto cleanup;
}
diff --git a/drivers/acpi/acpica/evmisc.c b/drivers/acpi/acpica/evmisc.c
index d45f7639f7ee..aa98fe07cd1b 100644
--- a/drivers/acpi/acpica/evmisc.c
+++ b/drivers/acpi/acpica/evmisc.c
@@ -230,11 +230,15 @@ void acpi_ev_terminate(void)
/* Disable all GPEs in all GPE blocks */
status = acpi_ev_walk_gpe_list(acpi_hw_disable_gpe_block, NULL);
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "Could not disable GPEs in GPE block"));
+ }
status = acpi_ev_remove_global_lock_handler();
if (ACPI_FAILURE(status)) {
- ACPI_ERROR((AE_INFO,
- "Could not remove Global Lock handler"));
+ ACPI_EXCEPTION((AE_INFO, status,
+ "Could not remove Global Lock handler"));
}
acpi_gbl_events_initialized = FALSE;
@@ -250,6 +254,10 @@ void acpi_ev_terminate(void)
/* Deallocate all handler objects installed within GPE info structs */
status = acpi_ev_walk_gpe_list(acpi_ev_delete_gpe_handlers, NULL);
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "Could not delete GPE handlers"));
+ }
/* Return to original mode if necessary */
diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c
index 45dc797df05d..1ff126460007 100644
--- a/drivers/acpi/acpica/evregion.c
+++ b/drivers/acpi/acpica/evregion.c
@@ -836,11 +836,11 @@ acpi_ev_orphan_ec_reg_method(struct acpi_namespace_node *ec_device_node)
objects[1].type = ACPI_TYPE_INTEGER;
objects[1].integer.value = ACPI_REG_CONNECT;
- status = acpi_evaluate_object(reg_method, NULL, &args, NULL);
+ (void)acpi_evaluate_object(reg_method, NULL, &args, NULL);
exit:
/* We ignore all errors from above, don't care */
- status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+ (void)acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
return_VOID;
}
diff --git a/drivers/acpi/acpica/evrgnini.c b/drivers/acpi/acpica/evrgnini.c
index 0b47bbcd2a23..aee09640d710 100644
--- a/drivers/acpi/acpica/evrgnini.c
+++ b/drivers/acpi/acpica/evrgnini.c
@@ -198,7 +198,6 @@ acpi_ev_pci_config_region_setup(acpi_handle handle,
* root bridge. Still need to return a context object
* for the new PCI_Config operation region, however.
*/
- status = AE_OK;
} else {
ACPI_EXCEPTION((AE_INFO, status,
"Could not install PciConfig handler "
diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c
index abbf9702aa7f..2919746c9041 100644
--- a/drivers/acpi/acpica/hwxfsleep.c
+++ b/drivers/acpi/acpica/hwxfsleep.c
@@ -166,6 +166,9 @@ acpi_status acpi_enter_sleep_state_s4bios(void)
status = acpi_hw_write_port(acpi_gbl_FADT.smi_command,
(u32)acpi_gbl_FADT.s4_bios_request, 8);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
do {
acpi_os_stall(ACPI_USEC_PER_MSEC);
diff --git a/drivers/acpi/acpica/nsconvert.c b/drivers/acpi/acpica/nsconvert.c
index 14cbf63f1991..c86d0770ed6e 100644
--- a/drivers/acpi/acpica/nsconvert.c
+++ b/drivers/acpi/acpica/nsconvert.c
@@ -486,5 +486,5 @@ acpi_ns_convert_to_reference(struct acpi_namespace_node *scope,
error_exit:
ACPI_FREE(name);
*return_object = new_object;
- return (AE_OK);
+ return (status);
}
diff --git a/drivers/acpi/acpica/nsdump.c b/drivers/acpi/acpica/nsdump.c
index 9731d7cf1b83..9ad340f644a1 100644
--- a/drivers/acpi/acpica/nsdump.c
+++ b/drivers/acpi/acpica/nsdump.c
@@ -291,7 +291,7 @@ acpi_ns_dump_one_object(acpi_handle obj_handle,
for (i = 0;
(i < obj_desc->buffer.length
&& i < 12); i++) {
- acpi_os_printf(" %.2hX",
+ acpi_os_printf(" %2.2X",
obj_desc->buffer.
pointer[i]);
}
@@ -404,7 +404,7 @@ acpi_ns_dump_one_object(acpi_handle obj_handle,
case ACPI_TYPE_LOCAL_BANK_FIELD:
case ACPI_TYPE_LOCAL_INDEX_FIELD:
- acpi_os_printf(" Off %.3X Len %.2X Acc %.2hd\n",
+ acpi_os_printf(" Off %.3X Len %.2X Acc %.2X\n",
(obj_desc->common_field.
base_byte_offset * 8)
+
@@ -589,8 +589,6 @@ acpi_ns_dump_one_object(acpi_handle obj_handle,
goto cleanup;
}
-
- obj_type = ACPI_TYPE_INVALID; /* Terminate loop after next pass */
}
cleanup:
diff --git a/drivers/acpi/acpica/nsxfname.c b/drivers/acpi/acpica/nsxfname.c
index 55b4a5b3331f..161e60ddfb69 100644
--- a/drivers/acpi/acpica/nsxfname.c
+++ b/drivers/acpi/acpica/nsxfname.c
@@ -425,8 +425,8 @@ acpi_get_object_info(acpi_handle handle,
}
if (cls) {
- next_id_string = acpi_ns_copy_device_id(&info->class_code,
- cls, next_id_string);
+ (void)acpi_ns_copy_device_id(&info->class_code,
+ cls, next_id_string);
}
/* Copy the fixed-length data */
diff --git a/drivers/acpi/acpica/psobject.c b/drivers/acpi/acpica/psobject.c
index 98e5c7400e54..ded2779fc8ea 100644
--- a/drivers/acpi/acpica/psobject.c
+++ b/drivers/acpi/acpica/psobject.c
@@ -481,8 +481,7 @@ acpi_ps_complete_op(struct acpi_walk_state *walk_state,
walk_state->opcode = (*op)->common.aml_opcode;
status = walk_state->ascending_callback(walk_state);
- status =
- acpi_ps_next_parse_state(walk_state, *op, status);
+ (void)acpi_ps_next_parse_state(walk_state, *op, status);
status2 = acpi_ps_complete_this_op(walk_state, *op);
if (ACPI_FAILURE(status2)) {
@@ -490,7 +489,6 @@ acpi_ps_complete_op(struct acpi_walk_state *walk_state,
}
}
- status = AE_OK;
break;
case AE_CTRL_BREAK:
@@ -512,14 +510,13 @@ acpi_ps_complete_op(struct acpi_walk_state *walk_state,
walk_state->opcode = (*op)->common.aml_opcode;
status = walk_state->ascending_callback(walk_state);
- status = acpi_ps_next_parse_state(walk_state, *op, status);
+ (void)acpi_ps_next_parse_state(walk_state, *op, status);
status2 = acpi_ps_complete_this_op(walk_state, *op);
if (ACPI_FAILURE(status2)) {
return_ACPI_STATUS(status2);
}
- status = AE_OK;
break;
case AE_CTRL_TERMINATE:
diff --git a/drivers/acpi/acpica/rscreate.c b/drivers/acpi/acpica/rscreate.c
index 570ea0df8a1b..c659b54985a5 100644
--- a/drivers/acpi/acpica/rscreate.c
+++ b/drivers/acpi/acpica/rscreate.c
@@ -312,6 +312,9 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object,
path_buffer.pointer = user_prt->source;
status = acpi_ns_handle_to_pathname((acpi_handle)node, &path_buffer, FALSE);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
/* +1 to include null terminator */
diff --git a/drivers/acpi/acpica/tbdata.c b/drivers/acpi/acpica/tbdata.c
index 309440010ab2..2cf36451e46f 100644
--- a/drivers/acpi/acpica/tbdata.c
+++ b/drivers/acpi/acpica/tbdata.c
@@ -933,6 +933,9 @@ acpi_tb_load_table(u32 table_index, struct acpi_namespace_node *parent_node)
}
status = acpi_ns_load_table(table_index, parent_node);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
/*
* Update GPEs for any new _Lxx/_Exx methods. Ignore errors. The host is
diff --git a/drivers/acpi/acpica/tbxfload.c b/drivers/acpi/acpica/tbxfload.c
index 86f1693f6d29..0782acf85722 100644
--- a/drivers/acpi/acpica/tbxfload.c
+++ b/drivers/acpi/acpica/tbxfload.c
@@ -268,6 +268,8 @@ ACPI_EXPORT_SYMBOL_INIT(acpi_install_table)
*
* PARAMETERS: table - Pointer to a buffer containing the ACPI
* table to be loaded.
+ * table_idx - Pointer to a u32 for storing the table
+ * index, might be NULL
*
* RETURN: Status
*
@@ -278,7 +280,7 @@ ACPI_EXPORT_SYMBOL_INIT(acpi_install_table)
* to ensure that the table is not deleted or unmapped.
*
******************************************************************************/
-acpi_status acpi_load_table(struct acpi_table_header *table)
+acpi_status acpi_load_table(struct acpi_table_header *table, u32 *table_idx)
{
acpi_status status;
u32 table_index;
@@ -297,6 +299,10 @@ acpi_status acpi_load_table(struct acpi_table_header *table)
status = acpi_tb_install_and_load_table(ACPI_PTR_TO_PHYSADDR(table),
ACPI_TABLE_ORIGIN_EXTERNAL_VIRTUAL,
FALSE, &table_index);
+ if (table_idx) {
+ *table_idx = table_index;
+ }
+
if (ACPI_SUCCESS(status)) {
/* Complete the initialization/resolution of new objects */
@@ -390,3 +396,35 @@ acpi_status acpi_unload_parent_table(acpi_handle object)
}
ACPI_EXPORT_SYMBOL(acpi_unload_parent_table)
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_unload_table
+ *
+ * PARAMETERS: table_index - Index as returned by acpi_load_table
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Via the table_index representing an SSDT or OEMx table, unloads
+ * the table and deletes all namespace objects associated with
+ * that table. Unloading of the DSDT is not allowed.
+ * Note: Mainly intended to support hotplug removal of SSDTs.
+ *
+ ******************************************************************************/
+acpi_status acpi_unload_table(u32 table_index)
+{
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(acpi_unload_table);
+
+ if (table_index == 1) {
+
+ /* table_index==1 means DSDT is the owner. DSDT cannot be unloaded */
+
+ return_ACPI_STATUS(AE_TYPE);
+ }
+
+ status = acpi_tb_unload_table(table_index);
+ return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_unload_table)
diff --git a/drivers/acpi/acpica/utbuffer.c b/drivers/acpi/acpica/utbuffer.c
index 61db9967ebe4..db897af1de05 100644
--- a/drivers/acpi/acpica/utbuffer.c
+++ b/drivers/acpi/acpica/utbuffer.c
@@ -37,7 +37,9 @@ void acpi_ut_dump_buffer(u8 *buffer, u32 count, u32 display, u32 base_offset)
u32 j;
u32 temp32;
u8 buf_char;
+ u32 display_data_only = display & DB_DISPLAY_DATA_ONLY;
+ display &= ~DB_DISPLAY_DATA_ONLY;
if (!buffer) {
acpi_os_printf("Null Buffer Pointer in DumpBuffer!\n");
return;
@@ -53,7 +55,9 @@ void acpi_ut_dump_buffer(u8 *buffer, u32 count, u32 display, u32 base_offset)
/* Print current offset */
- acpi_os_printf("%8.4X: ", (base_offset + i));
+ if (!display_data_only) {
+ acpi_os_printf("%8.4X: ", (base_offset + i));
+ }
/* Print 16 hex chars */
@@ -109,32 +113,34 @@ void acpi_ut_dump_buffer(u8 *buffer, u32 count, u32 display, u32 base_offset)
* Print the ASCII equivalent characters but watch out for the bad
* unprintable ones (printable chars are 0x20 through 0x7E)
*/
- acpi_os_printf(" ");
- for (j = 0; j < 16; j++) {
- if (i + j >= count) {
- acpi_os_printf("\n");
- return;
+ if (!display_data_only) {
+ acpi_os_printf(" ");
+ for (j = 0; j < 16; j++) {
+ if (i + j >= count) {
+ acpi_os_printf("\n");
+ return;
+ }
+
+ /*
+ * Add comment characters so rest of line is ignored when
+ * compiled
+ */
+ if (j == 0) {
+ acpi_os_printf("// ");
+ }
+
+ buf_char = buffer[(acpi_size)i + j];
+ if (isprint(buf_char)) {
+ acpi_os_printf("%c", buf_char);
+ } else {
+ acpi_os_printf(".");
+ }
}
- /*
- * Add comment characters so rest of line is ignored when
- * compiled
- */
- if (j == 0) {
- acpi_os_printf("// ");
- }
+ /* Done with that line. */
- buf_char = buffer[(acpi_size)i + j];
- if (isprint(buf_char)) {
- acpi_os_printf("%c", buf_char);
- } else {
- acpi_os_printf(".");
- }
+ acpi_os_printf("\n");
}
-
- /* Done with that line. */
-
- acpi_os_printf("\n");
i += 16;
}
diff --git a/drivers/acpi/acpica/utids.c b/drivers/acpi/acpica/utids.c
index e805abdd95b8..30198c828ab6 100644
--- a/drivers/acpi/acpica/utids.c
+++ b/drivers/acpi/acpica/utids.c
@@ -289,9 +289,7 @@ acpi_ut_execute_CID(struct acpi_namespace_node *device_node,
value);
length = ACPI_EISAID_STRING_SIZE;
} else { /* ACPI_TYPE_STRING */
-
/* Copy the String CID from the returned object */
-
strcpy(next_id_string, cid_objects[i]->string.pointer);
length = cid_objects[i]->string.length + 1;
}
diff --git a/drivers/acpi/acpica/uttrack.c b/drivers/acpi/acpica/uttrack.c
index 8052f7ef5025..14de4d15e618 100644
--- a/drivers/acpi/acpica/uttrack.c
+++ b/drivers/acpi/acpica/uttrack.c
@@ -660,7 +660,7 @@ void acpi_ut_dump_allocations(u32 component, const char *module)
case ACPI_DESC_TYPE_PARSER:
acpi_os_printf
- ("AmlOpcode 0x%04hX\n",
+ ("AmlOpcode 0x%04X\n",
descriptor->op.asl.
aml_opcode);
break;
diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
index 5a7551d060f2..33f71983e001 100644
--- a/drivers/acpi/arm64/iort.c
+++ b/drivers/acpi/arm64/iort.c
@@ -1057,8 +1057,8 @@ static int rc_dma_get_range(struct device *dev, u64 *size)
*/
void iort_dma_setup(struct device *dev, u64 *dma_addr, u64 *dma_size)
{
- u64 mask, dmaaddr = 0, size = 0, offset = 0;
- int ret, msb;
+ u64 end, mask, dmaaddr = 0, size = 0, offset = 0;
+ int ret;
/*
* If @dev is expected to be DMA-capable then the bus code that created
@@ -1085,19 +1085,13 @@ void iort_dma_setup(struct device *dev, u64 *dma_addr, u64 *dma_size)
}
if (!ret) {
- msb = fls64(dmaaddr + size - 1);
/*
- * Round-up to the power-of-two mask or set
- * the mask to the whole 64-bit address space
- * in case the DMA region covers the full
- * memory window.
+ * Limit coherent and dma mask based on size retrieved from
+ * firmware.
*/
- mask = msb == 64 ? U64_MAX : (1ULL << msb) - 1;
- /*
- * Limit coherent and dma mask based on size
- * retrieved from firmware.
- */
- dev->bus_dma_mask = mask;
+ end = dmaaddr + size - 1;
+ mask = DMA_BIT_MASK(ilog2(end) + 1);
+ dev->bus_dma_limit = end;
dev->coherent_dma_mask = mask;
*dev->dma_mask = mask;
}
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index 4a2cde2c536a..d27b01c0323d 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -44,9 +44,19 @@
#define ACPI_BUTTON_DEVICE_NAME_LID "Lid Switch"
#define ACPI_BUTTON_TYPE_LID 0x05
-#define ACPI_BUTTON_LID_INIT_IGNORE 0x00
-#define ACPI_BUTTON_LID_INIT_OPEN 0x01
-#define ACPI_BUTTON_LID_INIT_METHOD 0x02
+enum {
+ ACPI_BUTTON_LID_INIT_IGNORE,
+ ACPI_BUTTON_LID_INIT_OPEN,
+ ACPI_BUTTON_LID_INIT_METHOD,
+ ACPI_BUTTON_LID_INIT_DISABLED,
+};
+
+static const char * const lid_init_state_str[] = {
+ [ACPI_BUTTON_LID_INIT_IGNORE] = "ignore",
+ [ACPI_BUTTON_LID_INIT_OPEN] = "open",
+ [ACPI_BUTTON_LID_INIT_METHOD] = "method",
+ [ACPI_BUTTON_LID_INIT_DISABLED] = "disabled",
+};
#define _COMPONENT ACPI_BUTTON_COMPONENT
ACPI_MODULE_NAME("button");
@@ -65,18 +75,39 @@ static const struct acpi_device_id button_device_ids[] = {
};
MODULE_DEVICE_TABLE(acpi, button_device_ids);
-/*
- * Some devices which don't even have a lid in anyway have a broken _LID
- * method (e.g. pointing to a floating gpio pin) causing spurious LID events.
- */
-static const struct dmi_system_id lid_blacklst[] = {
+/* Please keep this list sorted alphabetically by vendor and model */
+static const struct dmi_system_id dmi_lid_quirks[] = {
+ {
+ /*
+ * Asus T200TA, _LID keeps reporting closed after every second
+ * openening of the lid. Causing immediate re-suspend after
+ * opening every other open. Using LID_INIT_OPEN fixes this.
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "T200TA"),
+ },
+ .driver_data = (void *)(long)ACPI_BUTTON_LID_INIT_OPEN,
+ },
{
- /* GP-electronic T701 */
+ /* GP-electronic T701, _LID method points to a floating GPIO */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Insyde"),
DMI_MATCH(DMI_PRODUCT_NAME, "T701"),
DMI_MATCH(DMI_BIOS_VERSION, "BYT70A.YNCHENG.WIN.007"),
},
+ .driver_data = (void *)(long)ACPI_BUTTON_LID_INIT_DISABLED,
+ },
+ {
+ /*
+ * Medion Akoya E2215T, notification of the LID device only
+ * happens on close, not on open and _LID always returns closed.
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "MEDION"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "E2215T MD60198"),
+ },
+ .driver_data = (void *)(long)ACPI_BUTTON_LID_INIT_OPEN,
},
{}
};
@@ -116,9 +147,8 @@ struct acpi_button {
bool suspended;
};
-static BLOCKING_NOTIFIER_HEAD(acpi_lid_notifier);
static struct acpi_device *lid_device;
-static u8 lid_init_state = ACPI_BUTTON_LID_INIT_METHOD;
+static long lid_init_state = -1;
static unsigned long lid_report_interval __read_mostly = 500;
module_param(lid_report_interval, ulong, 0644);
@@ -146,7 +176,6 @@ static int acpi_lid_evaluate_state(struct acpi_device *device)
static int acpi_lid_notify_state(struct acpi_device *device, int state)
{
struct acpi_button *button = acpi_driver_data(device);
- int ret;
ktime_t next_report;
bool do_update;
@@ -223,18 +252,7 @@ static int acpi_lid_notify_state(struct acpi_device *device, int state)
button->last_time = ktime_get();
}
- ret = blocking_notifier_call_chain(&acpi_lid_notifier, state, device);
- if (ret == NOTIFY_DONE)
- ret = blocking_notifier_call_chain(&acpi_lid_notifier, state,
- device);
- if (ret == NOTIFY_DONE || ret == NOTIFY_OK) {
- /*
- * It is also regarded as success if the notifier_chain
- * returns NOTIFY_OK or NOTIFY_DONE.
- */
- ret = 0;
- }
- return ret;
+ return 0;
}
static int __maybe_unused acpi_button_state_seq_show(struct seq_file *seq,
@@ -331,18 +349,6 @@ static int acpi_button_remove_fs(struct acpi_device *device)
/* --------------------------------------------------------------------------
Driver Interface
-------------------------------------------------------------------------- */
-int acpi_lid_notifier_register(struct notifier_block *nb)
-{
- return blocking_notifier_chain_register(&acpi_lid_notifier, nb);
-}
-EXPORT_SYMBOL(acpi_lid_notifier_register);
-
-int acpi_lid_notifier_unregister(struct notifier_block *nb)
-{
- return blocking_notifier_chain_unregister(&acpi_lid_notifier, nb);
-}
-EXPORT_SYMBOL(acpi_lid_notifier_unregister);
-
int acpi_lid_open(void)
{
if (!lid_device)
@@ -472,7 +478,8 @@ static int acpi_button_add(struct acpi_device *device)
char *name, *class;
int error;
- if (!strcmp(hid, ACPI_BUTTON_HID_LID) && dmi_check_system(lid_blacklst))
+ if (!strcmp(hid, ACPI_BUTTON_HID_LID) &&
+ lid_init_state == ACPI_BUTTON_LID_INIT_DISABLED)
return -ENODEV;
button = kzalloc(sizeof(struct acpi_button), GFP_KERNEL);
@@ -578,36 +585,30 @@ static int acpi_button_remove(struct acpi_device *device)
static int param_set_lid_init_state(const char *val,
const struct kernel_param *kp)
{
- int result = 0;
-
- if (!strncmp(val, "open", sizeof("open") - 1)) {
- lid_init_state = ACPI_BUTTON_LID_INIT_OPEN;
- pr_info("Notify initial lid state as open\n");
- } else if (!strncmp(val, "method", sizeof("method") - 1)) {
- lid_init_state = ACPI_BUTTON_LID_INIT_METHOD;
- pr_info("Notify initial lid state with _LID return value\n");
- } else if (!strncmp(val, "ignore", sizeof("ignore") - 1)) {
- lid_init_state = ACPI_BUTTON_LID_INIT_IGNORE;
- pr_info("Do not notify initial lid state\n");
- } else
- result = -EINVAL;
- return result;
+ int i;
+
+ i = sysfs_match_string(lid_init_state_str, val);
+ if (i < 0)
+ return i;
+
+ lid_init_state = i;
+ pr_info("Initial lid state set to '%s'\n", lid_init_state_str[i]);
+ return 0;
}
-static int param_get_lid_init_state(char *buffer,
- const struct kernel_param *kp)
+static int param_get_lid_init_state(char *buf, const struct kernel_param *kp)
{
- switch (lid_init_state) {
- case ACPI_BUTTON_LID_INIT_OPEN:
- return sprintf(buffer, "open");
- case ACPI_BUTTON_LID_INIT_METHOD:
- return sprintf(buffer, "method");
- case ACPI_BUTTON_LID_INIT_IGNORE:
- return sprintf(buffer, "ignore");
- default:
- return sprintf(buffer, "invalid");
- }
- return 0;
+ int i, c = 0;
+
+ for (i = 0; i < ARRAY_SIZE(lid_init_state_str); i++)
+ if (i == lid_init_state)
+ c += sprintf(buf + c, "[%s] ", lid_init_state_str[i]);
+ else
+ c += sprintf(buf + c, "%s ", lid_init_state_str[i]);
+
+ buf[c - 1] = '\n'; /* Replace the final space with a newline */
+
+ return c;
}
module_param_call(lid_init_state,
@@ -617,6 +618,16 @@ MODULE_PARM_DESC(lid_init_state, "Behavior for reporting LID initial state");
static int acpi_button_register_driver(struct acpi_driver *driver)
{
+ const struct dmi_system_id *dmi_id;
+
+ if (lid_init_state == -1) {
+ dmi_id = dmi_first_match(dmi_lid_quirks);
+ if (dmi_id)
+ lid_init_state = (long)dmi_id->driver_data;
+ else
+ lid_init_state = ACPI_BUTTON_LID_INIT_METHOD;
+ }
+
/*
* Modules such as nouveau.ko and i915.ko have a link time dependency
* on acpi_lid_open(), and would therefore not be loadable on ACPI
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index da1e5c5ce150..4fd84fbdac29 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -95,12 +95,12 @@ enum {
EC_FLAGS_QUERY_ENABLED, /* Query is enabled */
EC_FLAGS_QUERY_PENDING, /* Query is pending */
EC_FLAGS_QUERY_GUARDING, /* Guard for SCI_EVT check */
- EC_FLAGS_GPE_HANDLER_INSTALLED, /* GPE handler installed */
+ EC_FLAGS_EVENT_HANDLER_INSTALLED, /* Event handler installed */
EC_FLAGS_EC_HANDLER_INSTALLED, /* OpReg handler installed */
- EC_FLAGS_EVT_HANDLER_INSTALLED, /* _Qxx handlers installed */
+ EC_FLAGS_QUERY_METHODS_INSTALLED, /* _Qxx handlers installed */
EC_FLAGS_STARTED, /* Driver is started */
EC_FLAGS_STOPPED, /* Driver is stopped */
- EC_FLAGS_GPE_MASKED, /* GPE masked */
+ EC_FLAGS_EVENTS_MASKED, /* Events masked */
};
#define ACPI_EC_COMMAND_POLL 0x01 /* Available for command byte */
@@ -397,8 +397,8 @@ static inline void acpi_ec_clear_gpe(struct acpi_ec *ec)
static void acpi_ec_submit_request(struct acpi_ec *ec)
{
ec->reference_count++;
- if (test_bit(EC_FLAGS_GPE_HANDLER_INSTALLED, &ec->flags) &&
- ec->reference_count == 1)
+ if (test_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags) &&
+ ec->gpe >= 0 && ec->reference_count == 1)
acpi_ec_enable_gpe(ec, true);
}
@@ -407,28 +407,36 @@ static void acpi_ec_complete_request(struct acpi_ec *ec)
bool flushed = false;
ec->reference_count--;
- if (test_bit(EC_FLAGS_GPE_HANDLER_INSTALLED, &ec->flags) &&
- ec->reference_count == 0)
+ if (test_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags) &&
+ ec->gpe >= 0 && ec->reference_count == 0)
acpi_ec_disable_gpe(ec, true);
flushed = acpi_ec_flushed(ec);
if (flushed)
wake_up(&ec->wait);
}
-static void acpi_ec_mask_gpe(struct acpi_ec *ec)
+static void acpi_ec_mask_events(struct acpi_ec *ec)
{
- if (!test_bit(EC_FLAGS_GPE_MASKED, &ec->flags)) {
- acpi_ec_disable_gpe(ec, false);
+ if (!test_bit(EC_FLAGS_EVENTS_MASKED, &ec->flags)) {
+ if (ec->gpe >= 0)
+ acpi_ec_disable_gpe(ec, false);
+ else
+ disable_irq_nosync(ec->irq);
+
ec_dbg_drv("Polling enabled");
- set_bit(EC_FLAGS_GPE_MASKED, &ec->flags);
+ set_bit(EC_FLAGS_EVENTS_MASKED, &ec->flags);
}
}
-static void acpi_ec_unmask_gpe(struct acpi_ec *ec)
+static void acpi_ec_unmask_events(struct acpi_ec *ec)
{
- if (test_bit(EC_FLAGS_GPE_MASKED, &ec->flags)) {
- clear_bit(EC_FLAGS_GPE_MASKED, &ec->flags);
- acpi_ec_enable_gpe(ec, false);
+ if (test_bit(EC_FLAGS_EVENTS_MASKED, &ec->flags)) {
+ clear_bit(EC_FLAGS_EVENTS_MASKED, &ec->flags);
+ if (ec->gpe >= 0)
+ acpi_ec_enable_gpe(ec, false);
+ else
+ enable_irq(ec->irq);
+
ec_dbg_drv("Polling disabled");
}
}
@@ -454,7 +462,7 @@ static bool acpi_ec_submit_flushable_request(struct acpi_ec *ec)
static void acpi_ec_submit_query(struct acpi_ec *ec)
{
- acpi_ec_mask_gpe(ec);
+ acpi_ec_mask_events(ec);
if (!acpi_ec_event_enabled(ec))
return;
if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) {
@@ -470,7 +478,7 @@ static void acpi_ec_complete_query(struct acpi_ec *ec)
if (test_and_clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags))
ec_dbg_evt("Command(%s) unblocked",
acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY));
- acpi_ec_unmask_gpe(ec);
+ acpi_ec_unmask_events(ec);
}
static inline void __acpi_ec_enable_event(struct acpi_ec *ec)
@@ -648,7 +656,9 @@ static void advance_transaction(struct acpi_ec *ec)
* ensure a hardware STS 0->1 change after this clearing can always
* trigger a GPE interrupt.
*/
- acpi_ec_clear_gpe(ec);
+ if (ec->gpe >= 0)
+ acpi_ec_clear_gpe(ec);
+
status = acpi_ec_read_status(ec);
t = ec->curr;
/*
@@ -717,7 +727,7 @@ err:
++t->irq_count;
/* Allow triggering on 0 threshold */
if (t->irq_count == ec_storm_threshold)
- acpi_ec_mask_gpe(ec);
+ acpi_ec_mask_events(ec);
}
}
out:
@@ -815,7 +825,7 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec,
spin_lock_irqsave(&ec->lock, tmp);
if (t->irq_count == ec_storm_threshold)
- acpi_ec_unmask_gpe(ec);
+ acpi_ec_unmask_events(ec);
ec_dbg_req("Command(%s) stopped", acpi_ec_cmd_string(t->command));
ec->curr = NULL;
/* Disable GPE for command processing (IBF=0/OBF=1) */
@@ -1275,18 +1285,28 @@ static void acpi_ec_event_handler(struct work_struct *work)
acpi_ec_check_event(ec);
}
-static u32 acpi_ec_gpe_handler(acpi_handle gpe_device,
- u32 gpe_number, void *data)
+static void acpi_ec_handle_interrupt(struct acpi_ec *ec)
{
unsigned long flags;
- struct acpi_ec *ec = data;
spin_lock_irqsave(&ec->lock, flags);
advance_transaction(ec);
spin_unlock_irqrestore(&ec->lock, flags);
+}
+
+static u32 acpi_ec_gpe_handler(acpi_handle gpe_device,
+ u32 gpe_number, void *data)
+{
+ acpi_ec_handle_interrupt(data);
return ACPI_INTERRUPT_HANDLED;
}
+static irqreturn_t acpi_ec_irq_handler(int irq, void *data)
+{
+ acpi_ec_handle_interrupt(data);
+ return IRQ_HANDLED;
+}
+
/* --------------------------------------------------------------------------
* Address Space Management
* -------------------------------------------------------------------------- */
@@ -1359,6 +1379,8 @@ static struct acpi_ec *acpi_ec_alloc(void)
ec->timestamp = jiffies;
ec->busy_polling = true;
ec->polling_guard = 0;
+ ec->gpe = -1;
+ ec->irq = -1;
return ec;
}
@@ -1406,9 +1428,13 @@ ec_parse_device(acpi_handle handle, u32 Level, void *context, void **retval)
/* Get GPE bit assignment (EC events). */
/* TODO: Add support for _GPE returning a package */
status = acpi_evaluate_integer(handle, "_GPE", NULL, &tmp);
- if (ACPI_FAILURE(status))
- return status;
- ec->gpe = tmp;
+ if (ACPI_SUCCESS(status))
+ ec->gpe = tmp;
+
+ /*
+ * Errors are non-fatal, allowing for ACPI Reduced Hardware
+ * platforms which use GpioInt instead of GPE.
+ */
}
/* Use the global lock for all EC transactions? */
tmp = 0;
@@ -1418,12 +1444,57 @@ ec_parse_device(acpi_handle handle, u32 Level, void *context, void **retval)
return AE_CTRL_TERMINATE;
}
+static void install_gpe_event_handler(struct acpi_ec *ec)
+{
+ acpi_status status =
+ acpi_install_gpe_raw_handler(NULL, ec->gpe,
+ ACPI_GPE_EDGE_TRIGGERED,
+ &acpi_ec_gpe_handler,
+ ec);
+ if (ACPI_SUCCESS(status)) {
+ /* This is not fatal as we can poll EC events */
+ set_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags);
+ acpi_ec_leave_noirq(ec);
+ if (test_bit(EC_FLAGS_STARTED, &ec->flags) &&
+ ec->reference_count >= 1)
+ acpi_ec_enable_gpe(ec, true);
+ }
+}
+
+/* ACPI reduced hardware platforms use a GpioInt specified in _CRS. */
+static int install_gpio_irq_event_handler(struct acpi_ec *ec,
+ struct acpi_device *device)
+{
+ int irq = acpi_dev_gpio_irq_get(device, 0);
+ int ret;
+
+ if (irq < 0)
+ return irq;
+
+ ret = request_irq(irq, acpi_ec_irq_handler, IRQF_SHARED,
+ "ACPI EC", ec);
+
+ /*
+ * Unlike the GPE case, we treat errors here as fatal, we'll only
+ * implement GPIO polling if we find a case that needs it.
+ */
+ if (ret < 0)
+ return ret;
+
+ ec->irq = irq;
+ set_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags);
+ acpi_ec_leave_noirq(ec);
+
+ return 0;
+}
+
/*
* Note: This function returns an error code only when the address space
* handler is not installed, which means "not able to handle
* transactions".
*/
-static int ec_install_handlers(struct acpi_ec *ec, bool handle_events)
+static int ec_install_handlers(struct acpi_ec *ec, struct acpi_device *device,
+ bool handle_events)
{
acpi_status status;
@@ -1456,24 +1527,23 @@ static int ec_install_handlers(struct acpi_ec *ec, bool handle_events)
if (!handle_events)
return 0;
- if (!test_bit(EC_FLAGS_EVT_HANDLER_INSTALLED, &ec->flags)) {
+ if (!test_bit(EC_FLAGS_QUERY_METHODS_INSTALLED, &ec->flags)) {
/* Find and register all query methods */
acpi_walk_namespace(ACPI_TYPE_METHOD, ec->handle, 1,
acpi_ec_register_query_methods,
NULL, ec, NULL);
- set_bit(EC_FLAGS_EVT_HANDLER_INSTALLED, &ec->flags);
+ set_bit(EC_FLAGS_QUERY_METHODS_INSTALLED, &ec->flags);
}
- if (!test_bit(EC_FLAGS_GPE_HANDLER_INSTALLED, &ec->flags)) {
- status = acpi_install_gpe_raw_handler(NULL, ec->gpe,
- ACPI_GPE_EDGE_TRIGGERED,
- &acpi_ec_gpe_handler, ec);
- /* This is not fatal as we can poll EC events */
- if (ACPI_SUCCESS(status)) {
- set_bit(EC_FLAGS_GPE_HANDLER_INSTALLED, &ec->flags);
- acpi_ec_leave_noirq(ec);
- if (test_bit(EC_FLAGS_STARTED, &ec->flags) &&
- ec->reference_count >= 1)
- acpi_ec_enable_gpe(ec, true);
+ if (!test_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags)) {
+ if (ec->gpe >= 0) {
+ install_gpe_event_handler(ec);
+ } else if (device) {
+ int ret = install_gpio_irq_event_handler(ec, device);
+
+ if (ret)
+ return ret;
+ } else { /* No GPE and no GpioInt? */
+ return -ENODEV;
}
}
/* EC is fully operational, allow queries */
@@ -1504,23 +1574,29 @@ static void ec_remove_handlers(struct acpi_ec *ec)
*/
acpi_ec_stop(ec, false);
- if (test_bit(EC_FLAGS_GPE_HANDLER_INSTALLED, &ec->flags)) {
- if (ACPI_FAILURE(acpi_remove_gpe_handler(NULL, ec->gpe,
- &acpi_ec_gpe_handler)))
+ if (test_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags)) {
+ if (ec->gpe >= 0 &&
+ ACPI_FAILURE(acpi_remove_gpe_handler(NULL, ec->gpe,
+ &acpi_ec_gpe_handler)))
pr_err("failed to remove gpe handler\n");
- clear_bit(EC_FLAGS_GPE_HANDLER_INSTALLED, &ec->flags);
+
+ if (ec->irq >= 0)
+ free_irq(ec->irq, ec);
+
+ clear_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags);
}
- if (test_bit(EC_FLAGS_EVT_HANDLER_INSTALLED, &ec->flags)) {
+ if (test_bit(EC_FLAGS_QUERY_METHODS_INSTALLED, &ec->flags)) {
acpi_ec_remove_query_handlers(ec, true, 0);
- clear_bit(EC_FLAGS_EVT_HANDLER_INSTALLED, &ec->flags);
+ clear_bit(EC_FLAGS_QUERY_METHODS_INSTALLED, &ec->flags);
}
}
-static int acpi_ec_setup(struct acpi_ec *ec, bool handle_events)
+static int acpi_ec_setup(struct acpi_ec *ec, struct acpi_device *device,
+ bool handle_events)
{
int ret;
- ret = ec_install_handlers(ec, handle_events);
+ ret = ec_install_handlers(ec, device, handle_events);
if (ret)
return ret;
@@ -1531,8 +1607,8 @@ static int acpi_ec_setup(struct acpi_ec *ec, bool handle_events)
}
acpi_handle_info(ec->handle,
- "GPE=0x%x, EC_CMD/EC_SC=0x%lx, EC_DATA=0x%lx\n",
- ec->gpe, ec->command_addr, ec->data_addr);
+ "GPE=0x%x, IRQ=%d, EC_CMD/EC_SC=0x%lx, EC_DATA=0x%lx\n",
+ ec->gpe, ec->irq, ec->command_addr, ec->data_addr);
return ret;
}
@@ -1596,7 +1672,7 @@ static int acpi_ec_add(struct acpi_device *device)
}
}
- ret = acpi_ec_setup(ec, true);
+ ret = acpi_ec_setup(ec, device, true);
if (ret)
goto err_query;
@@ -1716,7 +1792,7 @@ void __init acpi_ec_dsdt_probe(void)
* At this point, the GPE is not fully initialized, so do not to
* handle the events.
*/
- ret = acpi_ec_setup(ec, false);
+ ret = acpi_ec_setup(ec, NULL, false);
if (ret) {
acpi_ec_free(ec);
return;
@@ -1889,14 +1965,21 @@ void __init acpi_ec_ecdt_probe(void)
ec->command_addr = ecdt_ptr->control.address;
ec->data_addr = ecdt_ptr->data.address;
}
- ec->gpe = ecdt_ptr->gpe;
+
+ /*
+ * Ignore the GPE value on Reduced Hardware platforms.
+ * Some products have this set to an erroneous value.
+ */
+ if (!acpi_gbl_reduced_hardware)
+ ec->gpe = ecdt_ptr->gpe;
+
ec->handle = ACPI_ROOT_OBJECT;
/*
* At this point, the namespace is not initialized, so do not find
* the namespace objects, or handle the events.
*/
- ret = acpi_ec_setup(ec, false);
+ ret = acpi_ec_setup(ec, NULL, false);
if (ret) {
acpi_ec_free(ec);
return;
@@ -1928,7 +2011,7 @@ static int acpi_ec_suspend_noirq(struct device *dev)
* masked at the low level without side effects.
*/
if (ec_no_wakeup && test_bit(EC_FLAGS_STARTED, &ec->flags) &&
- ec->reference_count >= 1)
+ ec->gpe >= 0 && ec->reference_count >= 1)
acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_DISABLE);
acpi_ec_enter_noirq(ec);
@@ -1943,7 +2026,7 @@ static int acpi_ec_resume_noirq(struct device *dev)
acpi_ec_leave_noirq(ec);
if (ec_no_wakeup && test_bit(EC_FLAGS_STARTED, &ec->flags) &&
- ec->reference_count >= 1)
+ ec->gpe >= 0 && ec->reference_count >= 1)
acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_ENABLE);
return 0;
diff --git a/drivers/acpi/hmat/Makefile b/drivers/acpi/hmat/Makefile
deleted file mode 100644
index 1c20ef36a385..000000000000
--- a/drivers/acpi/hmat/Makefile
+++ /dev/null
@@ -1,2 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_ACPI_HMAT) := hmat.o
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index afe6636f9ad3..3616daec650b 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -165,7 +165,8 @@ static inline void acpi_early_processor_osc(void) {}
-------------------------------------------------------------------------- */
struct acpi_ec {
acpi_handle handle;
- u32 gpe;
+ int gpe;
+ int irq;
unsigned long command_addr;
unsigned long data_addr;
bool global_lock;
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index 14e68f202f81..a3320f93616d 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -1404,7 +1404,6 @@ static const struct attribute_group acpi_nfit_attribute_group = {
};
static const struct attribute_group *acpi_nfit_attribute_groups[] = {
- &nvdimm_bus_attribute_group,
&acpi_nfit_attribute_group,
NULL,
};
@@ -1698,8 +1697,6 @@ static const struct attribute_group acpi_nfit_dimm_attribute_group = {
};
static const struct attribute_group *acpi_nfit_dimm_attribute_groups[] = {
- &nvdimm_attribute_group,
- &nd_device_attribute_group,
&acpi_nfit_dimm_attribute_group,
NULL,
};
@@ -2197,10 +2194,6 @@ static const struct attribute_group acpi_nfit_region_attribute_group = {
};
static const struct attribute_group *acpi_nfit_region_attribute_groups[] = {
- &nd_region_attribute_group,
- &nd_mapping_attribute_group,
- &nd_device_attribute_group,
- &nd_numa_attribute_group,
&acpi_nfit_region_attribute_group,
NULL,
};
diff --git a/drivers/acpi/hmat/Kconfig b/drivers/acpi/numa/Kconfig
index 95a29964dbea..fcf2e556d69d 100644
--- a/drivers/acpi/hmat/Kconfig
+++ b/drivers/acpi/numa/Kconfig
@@ -1,8 +1,15 @@
# SPDX-License-Identifier: GPL-2.0
+config ACPI_NUMA
+ bool "NUMA support"
+ depends on NUMA
+ depends on (X86 || IA64 || ARM64)
+ default y if IA64 || ARM64
+
config ACPI_HMAT
bool "ACPI Heterogeneous Memory Attribute Table Support"
depends on ACPI_NUMA
select HMEM_REPORTING
+ select MEMREGION
help
If set, this option has the kernel parse and report the
platform's ACPI HMAT (Heterogeneous Memory Attributes Table),
diff --git a/drivers/acpi/numa/Makefile b/drivers/acpi/numa/Makefile
new file mode 100644
index 000000000000..517a6c689a94
--- /dev/null
+++ b/drivers/acpi/numa/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_ACPI_NUMA) += srat.o
+obj-$(CONFIG_ACPI_HMAT) += hmat.o
diff --git a/drivers/acpi/hmat/hmat.c b/drivers/acpi/numa/hmat.c
index 8b0de8a3c647..2c32cfb72370 100644
--- a/drivers/acpi/hmat/hmat.c
+++ b/drivers/acpi/numa/hmat.c
@@ -8,12 +8,18 @@
* the applicable attributes with the node's interfaces.
*/
+#define pr_fmt(fmt) "acpi/hmat: " fmt
+#define dev_fmt(fmt) "acpi/hmat: " fmt
+
#include <linux/acpi.h>
#include <linux/bitops.h>
#include <linux/device.h>
#include <linux/init.h>
#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/platform_device.h>
#include <linux/list_sort.h>
+#include <linux/memregion.h>
#include <linux/memory.h>
#include <linux/mutex.h>
#include <linux/node.h>
@@ -49,6 +55,7 @@ struct memory_target {
struct list_head node;
unsigned int memory_pxm;
unsigned int processor_pxm;
+ struct resource memregions;
struct node_hmem_attrs hmem_attrs;
struct list_head caches;
struct node_cache_attrs cache_attrs;
@@ -104,22 +111,36 @@ static __init void alloc_memory_initiator(unsigned int cpu_pxm)
list_add_tail(&initiator->node, &initiators);
}
-static __init void alloc_memory_target(unsigned int mem_pxm)
+static __init void alloc_memory_target(unsigned int mem_pxm,
+ resource_size_t start, resource_size_t len)
{
struct memory_target *target;
target = find_mem_target(mem_pxm);
- if (target)
- return;
-
- target = kzalloc(sizeof(*target), GFP_KERNEL);
- if (!target)
- return;
+ if (!target) {
+ target = kzalloc(sizeof(*target), GFP_KERNEL);
+ if (!target)
+ return;
+ target->memory_pxm = mem_pxm;
+ target->processor_pxm = PXM_INVAL;
+ target->memregions = (struct resource) {
+ .name = "ACPI mem",
+ .start = 0,
+ .end = -1,
+ .flags = IORESOURCE_MEM,
+ };
+ list_add_tail(&target->node, &targets);
+ INIT_LIST_HEAD(&target->caches);
+ }
- target->memory_pxm = mem_pxm;
- target->processor_pxm = PXM_INVAL;
- list_add_tail(&target->node, &targets);
- INIT_LIST_HEAD(&target->caches);
+ /*
+ * There are potentially multiple ranges per PXM, so record each
+ * in the per-target memregions resource tree.
+ */
+ if (!__request_region(&target->memregions, start, len, "memory target",
+ IORESOURCE_MEM))
+ pr_warn("failed to reserve %#llx - %#llx in pxm: %d\n",
+ start, start + len, mem_pxm);
}
static __init const char *hmat_data_type(u8 type)
@@ -272,7 +293,7 @@ static __init int hmat_parse_locality(union acpi_subtable_headers *header,
u8 type, mem_hier;
if (hmat_loc->header.length < sizeof(*hmat_loc)) {
- pr_notice("HMAT: Unexpected locality header length: %d\n",
+ pr_notice("HMAT: Unexpected locality header length: %u\n",
hmat_loc->header.length);
return -EINVAL;
}
@@ -284,12 +305,12 @@ static __init int hmat_parse_locality(union acpi_subtable_headers *header,
total_size = sizeof(*hmat_loc) + sizeof(*entries) * ipds * tpds +
sizeof(*inits) * ipds + sizeof(*targs) * tpds;
if (hmat_loc->header.length < total_size) {
- pr_notice("HMAT: Unexpected locality header length:%d, minimum required:%d\n",
+ pr_notice("HMAT: Unexpected locality header length:%u, minimum required:%u\n",
hmat_loc->header.length, total_size);
return -EINVAL;
}
- pr_info("HMAT: Locality: Flags:%02x Type:%s Initiator Domains:%d Target Domains:%d Base:%lld\n",
+ pr_info("HMAT: Locality: Flags:%02x Type:%s Initiator Domains:%u Target Domains:%u Base:%lld\n",
hmat_loc->flags, hmat_data_type(type), ipds, tpds,
hmat_loc->entry_base_unit);
@@ -302,7 +323,7 @@ static __init int hmat_parse_locality(union acpi_subtable_headers *header,
value = hmat_normalize(entries[init * tpds + targ],
hmat_loc->entry_base_unit,
type);
- pr_info(" Initiator-Target[%d-%d]:%d%s\n",
+ pr_info(" Initiator-Target[%u-%u]:%u%s\n",
inits[init], targs[targ], value,
hmat_data_type_suffix(type));
@@ -329,13 +350,13 @@ static __init int hmat_parse_cache(union acpi_subtable_headers *header,
u32 attrs;
if (cache->header.length < sizeof(*cache)) {
- pr_notice("HMAT: Unexpected cache header length: %d\n",
+ pr_notice("HMAT: Unexpected cache header length: %u\n",
cache->header.length);
return -EINVAL;
}
attrs = cache->cache_attributes;
- pr_info("HMAT: Cache: Domain:%d Size:%llu Attrs:%08x SMBIOS Handles:%d\n",
+ pr_info("HMAT: Cache: Domain:%u Size:%llu Attrs:%08x SMBIOS Handles:%d\n",
cache->memory_PD, cache->cache_size, attrs,
cache->number_of_SMBIOShandles);
@@ -390,17 +411,17 @@ static int __init hmat_parse_proximity_domain(union acpi_subtable_headers *heade
struct memory_target *target = NULL;
if (p->header.length != sizeof(*p)) {
- pr_notice("HMAT: Unexpected address range header length: %d\n",
+ pr_notice("HMAT: Unexpected address range header length: %u\n",
p->header.length);
return -EINVAL;
}
if (hmat_revision == 1)
- pr_info("HMAT: Memory (%#llx length %#llx) Flags:%04x Processor Domain:%d Memory Domain:%d\n",
+ pr_info("HMAT: Memory (%#llx length %#llx) Flags:%04x Processor Domain:%u Memory Domain:%u\n",
p->reserved3, p->reserved4, p->flags, p->processor_PD,
p->memory_PD);
else
- pr_info("HMAT: Memory Flags:%04x Processor Domain:%d Memory Domain:%d\n",
+ pr_info("HMAT: Memory Flags:%04x Processor Domain:%u Memory Domain:%u\n",
p->flags, p->processor_PD, p->memory_PD);
if (p->flags & ACPI_HMAT_MEMORY_PD_VALID && hmat_revision == 1) {
@@ -417,7 +438,7 @@ static int __init hmat_parse_proximity_domain(union acpi_subtable_headers *heade
pr_debug("HMAT: Invalid Processor Domain\n");
return -EINVAL;
}
- target->processor_pxm = p_node;
+ target->processor_pxm = p->processor_PD;
}
return 0;
@@ -452,7 +473,7 @@ static __init int srat_parse_mem_affinity(union acpi_subtable_headers *header,
return -EINVAL;
if (!(ma->flags & ACPI_SRAT_MEM_ENABLED))
return 0;
- alloc_memory_target(ma->proximity_domain);
+ alloc_memory_target(ma->proximity_domain, ma->base_address, ma->length);
return 0;
}
@@ -613,11 +634,92 @@ static void hmat_register_target_perf(struct memory_target *target)
node_set_perf_attrs(mem_nid, &target->hmem_attrs, 0);
}
+static void hmat_register_target_device(struct memory_target *target,
+ struct resource *r)
+{
+ /* define a clean / non-busy resource for the platform device */
+ struct resource res = {
+ .start = r->start,
+ .end = r->end,
+ .flags = IORESOURCE_MEM,
+ };
+ struct platform_device *pdev;
+ struct memregion_info info;
+ int rc, id;
+
+ rc = region_intersects(res.start, resource_size(&res), IORESOURCE_MEM,
+ IORES_DESC_SOFT_RESERVED);
+ if (rc != REGION_INTERSECTS)
+ return;
+
+ id = memregion_alloc(GFP_KERNEL);
+ if (id < 0) {
+ pr_err("memregion allocation failure for %pr\n", &res);
+ return;
+ }
+
+ pdev = platform_device_alloc("hmem", id);
+ if (!pdev) {
+ pr_err("hmem device allocation failure for %pr\n", &res);
+ goto out_pdev;
+ }
+
+ pdev->dev.numa_node = acpi_map_pxm_to_online_node(target->memory_pxm);
+ info = (struct memregion_info) {
+ .target_node = acpi_map_pxm_to_node(target->memory_pxm),
+ };
+ rc = platform_device_add_data(pdev, &info, sizeof(info));
+ if (rc < 0) {
+ pr_err("hmem memregion_info allocation failure for %pr\n", &res);
+ goto out_pdev;
+ }
+
+ rc = platform_device_add_resources(pdev, &res, 1);
+ if (rc < 0) {
+ pr_err("hmem resource allocation failure for %pr\n", &res);
+ goto out_resource;
+ }
+
+ rc = platform_device_add(pdev);
+ if (rc < 0) {
+ dev_err(&pdev->dev, "device add failed for %pr\n", &res);
+ goto out_resource;
+ }
+
+ return;
+
+out_resource:
+ put_device(&pdev->dev);
+out_pdev:
+ memregion_free(id);
+}
+
+static void hmat_register_target_devices(struct memory_target *target)
+{
+ struct resource *res;
+
+ /*
+ * Do not bother creating devices if no driver is available to
+ * consume them.
+ */
+ if (!IS_ENABLED(CONFIG_DEV_DAX_HMEM))
+ return;
+
+ for (res = target->memregions.child; res; res = res->sibling)
+ hmat_register_target_device(target, res);
+}
+
static void hmat_register_target(struct memory_target *target)
{
int nid = pxm_to_node(target->memory_pxm);
/*
+ * Devices may belong to either an offline or online
+ * node, so unconditionally add them.
+ */
+ hmat_register_target_devices(target);
+
+ /*
* Skip offline nodes. This can happen when memory
* marked EFI_MEMORY_SP, "specific purpose", is applied
* to all the memory in a promixity domain leading to
@@ -677,11 +779,21 @@ static __init void hmat_free_structures(void)
struct target_cache *tcache, *cnext;
list_for_each_entry_safe(target, tnext, &targets, node) {
+ struct resource *res, *res_next;
+
list_for_each_entry_safe(tcache, cnext, &target->caches, node) {
list_del(&tcache->node);
kfree(tcache);
}
+
list_del(&target->node);
+ res = target->memregions.child;
+ while (res) {
+ res_next = res->sibling;
+ __release_region(&target->memregions, res->start,
+ resource_size(res));
+ res = res_next;
+ }
kfree(target);
}
@@ -748,4 +860,4 @@ out_put:
acpi_put_table(tbl);
return 0;
}
-subsys_initcall(hmat_init);
+device_initcall(hmat_init);
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa/srat.c
index eadbf90e65d1..eadbf90e65d1 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa/srat.c
diff --git a/drivers/acpi/osi.c b/drivers/acpi/osi.c
index bec0bebc7f52..9f6853809138 100644
--- a/drivers/acpi/osi.c
+++ b/drivers/acpi/osi.c
@@ -473,9 +473,9 @@ static const struct dmi_system_id acpi_osi_dmi_table[] __initconst = {
*/
/*
- * Without this this EEEpc exports a non working WMI interface, with
- * this it exports a working "good old" eeepc_laptop interface, fixing
- * both brightness control, and rfkill not working.
+ * Without this EEEpc exports a non working WMI interface, with
+ * this it exports a working "good old" eeepc_laptop interface,
+ * fixing both brightness control, and rfkill not working.
*/
{
.callback = dmi_enable_osi_linux,
diff --git a/drivers/acpi/pmic/intel_pmic.c b/drivers/acpi/pmic/intel_pmic.c
index 452041398b34..a371f273f99d 100644
--- a/drivers/acpi/pmic/intel_pmic.c
+++ b/drivers/acpi/pmic/intel_pmic.c
@@ -252,7 +252,7 @@ int intel_pmic_install_opregion_handler(struct device *dev, acpi_handle handle,
struct regmap *regmap,
struct intel_pmic_opregion_data *d)
{
- acpi_status status;
+ acpi_status status = AE_OK;
struct intel_pmic_opregion *opregion;
int ret;
@@ -270,7 +270,8 @@ int intel_pmic_install_opregion_handler(struct device *dev, acpi_handle handle,
opregion->regmap = regmap;
opregion->lpat_table = acpi_lpat_get_conversion_table(handle);
- status = acpi_install_address_space_handler(handle,
+ if (d->power_table_count)
+ status = acpi_install_address_space_handler(handle,
PMIC_POWER_OPREGION_ID,
intel_pmic_power_handler,
NULL, opregion);
@@ -279,7 +280,8 @@ int intel_pmic_install_opregion_handler(struct device *dev, acpi_handle handle,
goto out_error;
}
- status = acpi_install_address_space_handler(handle,
+ if (d->thermal_table_count)
+ status = acpi_install_address_space_handler(handle,
PMIC_THERMAL_OPREGION_ID,
intel_pmic_thermal_handler,
NULL, opregion);
@@ -301,12 +303,16 @@ int intel_pmic_install_opregion_handler(struct device *dev, acpi_handle handle,
return 0;
out_remove_thermal_handler:
- acpi_remove_address_space_handler(handle, PMIC_THERMAL_OPREGION_ID,
- intel_pmic_thermal_handler);
+ if (d->thermal_table_count)
+ acpi_remove_address_space_handler(handle,
+ PMIC_THERMAL_OPREGION_ID,
+ intel_pmic_thermal_handler);
out_remove_power_handler:
- acpi_remove_address_space_handler(handle, PMIC_POWER_OPREGION_ID,
- intel_pmic_power_handler);
+ if (d->power_table_count)
+ acpi_remove_address_space_handler(handle,
+ PMIC_POWER_OPREGION_ID,
+ intel_pmic_power_handler);
out_error:
acpi_lpat_free_conversion_table(opregion->lpat_table);
diff --git a/drivers/acpi/pmic/intel_pmic_crc.c b/drivers/acpi/pmic/intel_pmic_bytcrc.c
index a0f411a6e5ac..2a692cc4b7ae 100644
--- a/drivers/acpi/pmic/intel_pmic_crc.c
+++ b/drivers/acpi/pmic/intel_pmic_bytcrc.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Intel CrystalCove PMIC operation region driver
+ * Intel Bay Trail Crystal Cove PMIC operation region driver
*
* Copyright (C) 2014 Intel Corporation. All rights reserved.
*/
@@ -295,7 +295,7 @@ static int intel_crc_pmic_opregion_probe(struct platform_device *pdev)
static struct platform_driver intel_crc_pmic_opregion_driver = {
.probe = intel_crc_pmic_opregion_probe,
.driver = {
- .name = "crystal_cove_pmic",
+ .name = "byt_crystal_cove_pmic",
},
};
builtin_platform_driver(intel_crc_pmic_opregion_driver);
diff --git a/drivers/acpi/pmic/intel_pmic_chtcrc.c b/drivers/acpi/pmic/intel_pmic_chtcrc.c
new file mode 100644
index 000000000000..ebf8d3187df1
--- /dev/null
+++ b/drivers/acpi/pmic/intel_pmic_chtcrc.c
@@ -0,0 +1,44 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel Cherry Trail Crystal Cove PMIC operation region driver
+ *
+ * Copyright (C) 2019 Hans de Goede <hdegoede@redhat.com>
+ */
+
+#include <linux/acpi.h>
+#include <linux/init.h>
+#include <linux/mfd/intel_soc_pmic.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include "intel_pmic.h"
+
+/*
+ * We have no docs for the CHT Crystal Cove PMIC. The Asus Zenfone-2 kernel
+ * code has 2 Crystal Cove regulator drivers, one calls the PMIC a "Crystal
+ * Cove Plus" PMIC and talks about Cherry Trail, so presuambly that one
+ * could be used to get register info for the regulators if we need to
+ * implement regulator support in the future.
+ *
+ * For now the sole purpose of this driver is to make
+ * intel_soc_pmic_exec_mipi_pmic_seq_element work on devices with a
+ * CHT Crystal Cove PMIC.
+ */
+static struct intel_pmic_opregion_data intel_chtcrc_pmic_opregion_data = {
+ .pmic_i2c_address = 0x6e,
+};
+
+static int intel_chtcrc_pmic_opregion_probe(struct platform_device *pdev)
+{
+ struct intel_soc_pmic *pmic = dev_get_drvdata(pdev->dev.parent);
+ return intel_pmic_install_opregion_handler(&pdev->dev,
+ ACPI_HANDLE(pdev->dev.parent), pmic->regmap,
+ &intel_chtcrc_pmic_opregion_data);
+}
+
+static struct platform_driver intel_chtcrc_pmic_opregion_driver = {
+ .probe = intel_chtcrc_pmic_opregion_probe,
+ .driver = {
+ .name = "cht_crystal_cove_pmic",
+ },
+};
+builtin_platform_driver(intel_chtcrc_pmic_opregion_driver);
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index ed56c6d20b08..2ae95df2e74f 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -642,6 +642,19 @@ static int acpi_idle_bm_check(void)
return bm_status;
}
+static void wait_for_freeze(void)
+{
+#ifdef CONFIG_X86
+ /* No delay is needed if we are in guest */
+ if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
+ return;
+#endif
+ /* Dummy wait op - must do something useless after P_LVL2 read
+ because chipsets cannot guarantee that STPCLK# signal
+ gets asserted in time to freeze execution properly. */
+ inl(acpi_gbl_FADT.xpm_timer_block.address);
+}
+
/**
* acpi_idle_do_entry - enter idle state using the appropriate method
* @cx: cstate data
@@ -658,10 +671,7 @@ static void __cpuidle acpi_idle_do_entry(struct acpi_processor_cx *cx)
} else {
/* IO port based C-state */
inb(cx->address);
- /* Dummy wait op - must do something useless after P_LVL2 read
- because chipsets cannot guarantee that STPCLK# signal
- gets asserted in time to freeze execution properly. */
- inl(acpi_gbl_FADT.xpm_timer_block.address);
+ wait_for_freeze();
}
}
@@ -682,8 +692,7 @@ static int acpi_idle_play_dead(struct cpuidle_device *dev, int index)
safe_halt();
else if (cx->entry_method == ACPI_CSTATE_SYSTEMIO) {
inb(cx->address);
- /* See comment in acpi_idle_do_entry() */
- inl(acpi_gbl_FADT.xpm_timer_block.address);
+ wait_for_freeze();
} else
return -ENODEV;
}
diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
index 3eacf474e1e3..e601c4511a8b 100644
--- a/drivers/acpi/property.c
+++ b/drivers/acpi/property.c
@@ -1317,6 +1317,52 @@ acpi_fwnode_get_reference_args(const struct fwnode_handle *fwnode,
args_count, args);
}
+static const char *acpi_fwnode_get_name(const struct fwnode_handle *fwnode)
+{
+ const struct acpi_device *adev;
+ struct fwnode_handle *parent;
+
+ /* Is this the root node? */
+ parent = fwnode_get_parent(fwnode);
+ if (!parent)
+ return "\\";
+
+ fwnode_handle_put(parent);
+
+ if (is_acpi_data_node(fwnode)) {
+ const struct acpi_data_node *dn = to_acpi_data_node(fwnode);
+
+ return dn->name;
+ }
+
+ adev = to_acpi_device_node(fwnode);
+ if (WARN_ON(!adev))
+ return NULL;
+
+ return acpi_device_bid(adev);
+}
+
+static const char *
+acpi_fwnode_get_name_prefix(const struct fwnode_handle *fwnode)
+{
+ struct fwnode_handle *parent;
+
+ /* Is this the root node? */
+ parent = fwnode_get_parent(fwnode);
+ if (!parent)
+ return "";
+
+ /* Is this 2nd node from the root? */
+ parent = fwnode_get_next_parent(parent);
+ if (!parent)
+ return "";
+
+ fwnode_handle_put(parent);
+
+ /* ACPI device or data node. */
+ return ".";
+}
+
static struct fwnode_handle *
acpi_fwnode_get_parent(struct fwnode_handle *fwnode)
{
@@ -1357,6 +1403,8 @@ acpi_fwnode_device_get_match_data(const struct fwnode_handle *fwnode,
.get_parent = acpi_node_get_parent, \
.get_next_child_node = acpi_get_next_subnode, \
.get_named_child_node = acpi_fwnode_get_named_child_node, \
+ .get_name = acpi_fwnode_get_name, \
+ .get_name_prefix = acpi_fwnode_get_name_prefix, \
.get_reference_args = acpi_fwnode_get_reference_args, \
.graph_get_next_endpoint = \
acpi_graph_get_next_endpoint, \
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index aad6be5c0af0..915650bf519f 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -2174,6 +2174,7 @@ int __init acpi_scan_init(void)
acpi_pci_root_init();
acpi_pci_link_init();
acpi_processor_init();
+ acpi_platform_init();
acpi_lpss_init();
acpi_apd_init();
acpi_cmos_rtc_init();
diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
index e3974a8f8fd4..804ac0df58ec 100644
--- a/drivers/acpi/utils.c
+++ b/drivers/acpi/utils.c
@@ -455,6 +455,7 @@ EXPORT_SYMBOL(acpi_evaluate_ost);
/**
* acpi_handle_path: Return the object path of handle
+ * @handle: ACPI device handle
*
* Caller must free the returned buffer
*/
@@ -473,6 +474,9 @@ static char *acpi_handle_path(acpi_handle handle)
/**
* acpi_handle_printk: Print message with ACPI prefix and object path
+ * @level: log level
+ * @handle: ACPI device handle
+ * @fmt: format string
*
* This function is called through acpi_handle_<level> macros and prints
* a message with ACPI prefix and object path. This function acquires
@@ -501,6 +505,9 @@ EXPORT_SYMBOL(acpi_handle_printk);
#if defined(CONFIG_DYNAMIC_DEBUG)
/**
* __acpi_handle_debug: pr_debug with ACPI prefix and object path
+ * @descriptor: Dynamic Debug descriptor
+ * @handle: ACPI device handle
+ * @fmt: format string
*
* This function is called through acpi_handle_debug macro and debug
* prints a message with ACPI prefix and object path. This function
@@ -695,6 +702,31 @@ bool acpi_check_dsm(acpi_handle handle, const guid_t *guid, u64 rev, u64 funcs)
EXPORT_SYMBOL(acpi_check_dsm);
/**
+ * acpi_dev_hid_uid_match - Match device by supplied HID and UID
+ * @adev: ACPI device to match.
+ * @hid2: Hardware ID of the device.
+ * @uid2: Unique ID of the device, pass NULL to not check _UID.
+ *
+ * Matches HID and UID in @adev with given @hid2 and @uid2.
+ * Returns true if matches.
+ */
+bool acpi_dev_hid_uid_match(struct acpi_device *adev,
+ const char *hid2, const char *uid2)
+{
+ const char *hid1 = acpi_device_hid(adev);
+ const char *uid1 = acpi_device_uid(adev);
+
+ if (strcmp(hid1, hid2))
+ return false;
+
+ if (!uid2)
+ return true;
+
+ return uid1 && !strcmp(uid1, uid2);
+}
+EXPORT_SYMBOL(acpi_dev_hid_uid_match);
+
+/**
* acpi_dev_found - Detect presence of a given ACPI device in the namespace.
* @hid: Hardware ID of the device.
*
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 265d9dd46a5e..e9bc9fcc7ea5 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -65,6 +65,7 @@
#include <linux/ratelimit.h>
#include <linux/syscalls.h>
#include <linux/task_work.h>
+#include <linux/sizes.h>
#include <uapi/linux/android/binder.h>
#include <uapi/linux/android/binderfs.h>
@@ -92,11 +93,6 @@ static atomic_t binder_last_id;
static int proc_show(struct seq_file *m, void *unused);
DEFINE_SHOW_ATTRIBUTE(proc);
-/* This is only defined in include/asm-arm/sizes.h */
-#ifndef SZ_1K
-#define SZ_1K 0x400
-#endif
-
#define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
enum {
@@ -6058,7 +6054,7 @@ const struct file_operations binder_fops = {
.owner = THIS_MODULE,
.poll = binder_poll,
.unlocked_ioctl = binder_ioctl,
- .compat_ioctl = binder_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.mmap = binder_mmap,
.open = binder_open,
.flush = binder_flush,
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index eb76a823fbb2..2d8b9b91dee0 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -268,7 +268,6 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
alloc->pages_high = index + 1;
trace_binder_alloc_page_end(alloc, index);
- /* vm_insert_page does not seem to increment the refcount */
}
if (mm) {
up_read(&mm->mmap_sem);
@@ -277,8 +276,7 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
return 0;
free_range:
- for (page_addr = end - PAGE_SIZE; page_addr >= start;
- page_addr -= PAGE_SIZE) {
+ for (page_addr = end - PAGE_SIZE; 1; page_addr -= PAGE_SIZE) {
bool ret;
size_t index;
@@ -291,6 +289,8 @@ free_range:
WARN_ON(!ret);
trace_binder_free_lru_end(alloc, index);
+ if (page_addr == start)
+ break;
continue;
err_vm_insert_page_failed:
@@ -298,7 +298,8 @@ err_vm_insert_page_failed:
page->page_ptr = NULL;
err_alloc_page_failed:
err_page_ptr_cleared:
- ;
+ if (page_addr == start)
+ break;
}
err_no_vma:
if (mm) {
@@ -681,17 +682,17 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
struct binder_buffer *buffer;
mutex_lock(&binder_alloc_mmap_lock);
- if (alloc->buffer) {
+ if (alloc->buffer_size) {
ret = -EBUSY;
failure_string = "already mapped";
goto err_already_mapped;
}
+ alloc->buffer_size = min_t(unsigned long, vma->vm_end - vma->vm_start,
+ SZ_4M);
+ mutex_unlock(&binder_alloc_mmap_lock);
alloc->buffer = (void __user *)vma->vm_start;
- mutex_unlock(&binder_alloc_mmap_lock);
- alloc->buffer_size = min_t(unsigned long, vma->vm_end - vma->vm_start,
- SZ_4M);
alloc->pages = kcalloc(alloc->buffer_size / PAGE_SIZE,
sizeof(alloc->pages[0]),
GFP_KERNEL);
@@ -722,8 +723,9 @@ err_alloc_buf_struct_failed:
kfree(alloc->pages);
alloc->pages = NULL;
err_alloc_pages_failed:
- mutex_lock(&binder_alloc_mmap_lock);
alloc->buffer = NULL;
+ mutex_lock(&binder_alloc_mmap_lock);
+ alloc->buffer_size = 0;
err_already_mapped:
mutex_unlock(&binder_alloc_mmap_lock);
binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
@@ -841,14 +843,20 @@ void binder_alloc_print_pages(struct seq_file *m,
int free = 0;
mutex_lock(&alloc->mutex);
- for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
- page = &alloc->pages[i];
- if (!page->page_ptr)
- free++;
- else if (list_empty(&page->lru))
- active++;
- else
- lru++;
+ /*
+ * Make sure the binder_alloc is fully initialized, otherwise we might
+ * read inconsistent state.
+ */
+ if (binder_alloc_get_vma(alloc) != NULL) {
+ for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
+ page = &alloc->pages[i];
+ if (!page->page_ptr)
+ free++;
+ else if (list_empty(&page->lru))
+ active++;
+ else
+ lru++;
+ }
}
mutex_unlock(&alloc->mutex);
seq_printf(m, " pages: %d:%d:%d\n", active, lru, free);
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index ec6c64fce74a..4bfd1b14b390 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -910,7 +910,7 @@ static int ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac)
* value, don't extend it here. This happens on STA2X11, for example.
*
* XXX: manipulating the DMA mask from platform code is completely
- * bogus, platform code should use dev->bus_dma_mask instead..
+ * bogus, platform code should use dev->bus_dma_limit instead..
*/
if (pdev->dma_mask && pdev->dma_mask < DMA_BIT_MASK(32))
return 0;
diff --git a/drivers/ata/ahci_imx.c b/drivers/ata/ahci_imx.c
index bfc617cc8ac5..948d2c6557f3 100644
--- a/drivers/ata/ahci_imx.c
+++ b/drivers/ata/ahci_imx.c
@@ -11,8 +11,8 @@
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/ahci_platform.h>
+#include <linux/gpio/consumer.h>
#include <linux/of_device.h>
-#include <linux/of_gpio.h>
#include <linux/mfd/syscon.h>
#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
#include <linux/libata.h>
@@ -100,7 +100,7 @@ struct imx_ahci_priv {
struct clk *phy_pclk0;
struct clk *phy_pclk1;
void __iomem *phy_base;
- int clkreq_gpio;
+ struct gpio_desc *clkreq_gpiod;
struct regmap *gpr;
bool no_device;
bool first_time;
@@ -980,7 +980,6 @@ static struct scsi_host_template ahci_platform_sht = {
static int imx8_sata_probe(struct device *dev, struct imx_ahci_priv *imxpriv)
{
- int ret;
struct resource *phy_res;
struct platform_device *pdev = imxpriv->ahci_pdev;
struct device_node *np = dev->of_node;
@@ -1033,20 +1032,12 @@ static int imx8_sata_probe(struct device *dev, struct imx_ahci_priv *imxpriv)
}
/* Fetch GPIO, then enable the external OSC */
- imxpriv->clkreq_gpio = of_get_named_gpio(np, "clkreq-gpio", 0);
- if (gpio_is_valid(imxpriv->clkreq_gpio)) {
- ret = devm_gpio_request_one(dev, imxpriv->clkreq_gpio,
- GPIOF_OUT_INIT_LOW,
- "SATA CLKREQ");
- if (ret == -EBUSY) {
- dev_info(dev, "clkreq had been initialized.\n");
- } else if (ret) {
- dev_err(dev, "%d unable to get clkreq.\n", ret);
- return ret;
- }
- } else if (imxpriv->clkreq_gpio == -EPROBE_DEFER) {
- return imxpriv->clkreq_gpio;
- }
+ imxpriv->clkreq_gpiod = devm_gpiod_get_optional(dev, "clkreq",
+ GPIOD_OUT_LOW | GPIOD_FLAGS_BIT_NONEXCLUSIVE);
+ if (IS_ERR(imxpriv->clkreq_gpiod))
+ return PTR_ERR(imxpriv->clkreq_gpiod);
+ if (imxpriv->clkreq_gpiod)
+ gpiod_set_consumer_name(imxpriv->clkreq_gpiod, "SATA CLKREQ");
return 0;
}
diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
index ebecab8c3f36..135173c8d138 100644
--- a/drivers/ata/pata_arasan_cf.c
+++ b/drivers/ata/pata_arasan_cf.c
@@ -219,7 +219,6 @@ struct arasan_cf_dev {
static struct scsi_host_template arasan_cf_sht = {
ATA_BASE_SHT(DRIVER_NAME),
- .sg_tablesize = SG_NONE,
.dma_boundary = 0xFFFFFFFFUL,
};
diff --git a/drivers/ata/pata_atp867x.c b/drivers/ata/pata_atp867x.c
index cfd0cf2cbca6..e01a3a6e4d46 100644
--- a/drivers/ata/pata_atp867x.c
+++ b/drivers/ata/pata_atp867x.c
@@ -422,7 +422,7 @@ static int atp867x_ata_pci_sff_init_host(struct ata_host *host)
#ifdef ATP867X_DEBUG
atp867x_check_res(pdev);
- for (i = 0; i < PCI_ROM_RESOURCE; i++)
+ for (i = 0; i < PCI_STD_NUM_BARS; i++)
printk(KERN_DEBUG "ATP867X: iomap[%d]=0x%llx\n", i,
(unsigned long long)(host->iomap[i]));
#endif
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
index 65ec8dff1c51..f3e62f5528bd 100644
--- a/drivers/ata/sata_nv.c
+++ b/drivers/ata/sata_nv.c
@@ -2329,7 +2329,7 @@ static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
// Make sure this is a SATA controller by counting the number of bars
// (NVIDIA SATA controllers will always have six bars). Otherwise,
// it's an IDE controller and we ignore it.
- for (bar = 0; bar < 6; bar++)
+ for (bar = 0; bar < PCI_STD_NUM_BARS; bar++)
if (pci_resource_start(pdev, bar) == 0)
return -ENODEV;
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 7bd9cd366d41..42a672456432 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -45,6 +45,10 @@ early_param("sysfs.deprecated", sysfs_deprecated_setup);
#endif
/* Device links support. */
+static LIST_HEAD(wait_for_suppliers);
+static DEFINE_MUTEX(wfs_lock);
+static LIST_HEAD(deferred_sync);
+static unsigned int defer_sync_state_count = 1;
#ifdef CONFIG_SRCU
static DEFINE_MUTEX(device_links_lock);
@@ -127,6 +131,9 @@ static int device_is_dependent(struct device *dev, void *target)
return ret;
list_for_each_entry(link, &dev->links.consumers, s_node) {
+ if (link->flags == (DL_FLAG_SYNC_STATE_ONLY | DL_FLAG_MANAGED))
+ continue;
+
if (link->consumer == target)
return 1;
@@ -196,8 +203,11 @@ static int device_reorder_to_tail(struct device *dev, void *not_used)
device_pm_move_last(dev);
device_for_each_child(dev, NULL, device_reorder_to_tail);
- list_for_each_entry(link, &dev->links.consumers, s_node)
+ list_for_each_entry(link, &dev->links.consumers, s_node) {
+ if (link->flags == (DL_FLAG_SYNC_STATE_ONLY | DL_FLAG_MANAGED))
+ continue;
device_reorder_to_tail(link->consumer, NULL);
+ }
return 0;
}
@@ -224,7 +234,8 @@ void device_pm_move_to_tail(struct device *dev)
#define DL_MANAGED_LINK_FLAGS (DL_FLAG_AUTOREMOVE_CONSUMER | \
DL_FLAG_AUTOREMOVE_SUPPLIER | \
- DL_FLAG_AUTOPROBE_CONSUMER)
+ DL_FLAG_AUTOPROBE_CONSUMER | \
+ DL_FLAG_SYNC_STATE_ONLY)
#define DL_ADD_VALID_FLAGS (DL_MANAGED_LINK_FLAGS | DL_FLAG_STATELESS | \
DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE)
@@ -292,6 +303,8 @@ struct device_link *device_link_add(struct device *consumer,
if (!consumer || !supplier || flags & ~DL_ADD_VALID_FLAGS ||
(flags & DL_FLAG_STATELESS && flags & DL_MANAGED_LINK_FLAGS) ||
+ (flags & DL_FLAG_SYNC_STATE_ONLY &&
+ flags != DL_FLAG_SYNC_STATE_ONLY) ||
(flags & DL_FLAG_AUTOPROBE_CONSUMER &&
flags & (DL_FLAG_AUTOREMOVE_CONSUMER |
DL_FLAG_AUTOREMOVE_SUPPLIER)))
@@ -312,11 +325,14 @@ struct device_link *device_link_add(struct device *consumer,
/*
* If the supplier has not been fully registered yet or there is a
- * reverse dependency between the consumer and the supplier already in
- * the graph, return NULL.
+ * reverse (non-SYNC_STATE_ONLY) dependency between the consumer and
+ * the supplier already in the graph, return NULL. If the link is a
+ * SYNC_STATE_ONLY link, we don't check for reverse dependencies
+ * because it only affects sync_state() callbacks.
*/
if (!device_pm_initialized(supplier)
- || device_is_dependent(consumer, supplier)) {
+ || (!(flags & DL_FLAG_SYNC_STATE_ONLY) &&
+ device_is_dependent(consumer, supplier))) {
link = NULL;
goto out;
}
@@ -343,9 +359,14 @@ struct device_link *device_link_add(struct device *consumer,
}
if (flags & DL_FLAG_STATELESS) {
- link->flags |= DL_FLAG_STATELESS;
kref_get(&link->kref);
- goto out;
+ if (link->flags & DL_FLAG_SYNC_STATE_ONLY &&
+ !(link->flags & DL_FLAG_STATELESS)) {
+ link->flags |= DL_FLAG_STATELESS;
+ goto reorder;
+ } else {
+ goto out;
+ }
}
/*
@@ -367,6 +388,12 @@ struct device_link *device_link_add(struct device *consumer,
link->flags |= DL_FLAG_MANAGED;
device_link_init_status(link, consumer, supplier);
}
+ if (link->flags & DL_FLAG_SYNC_STATE_ONLY &&
+ !(flags & DL_FLAG_SYNC_STATE_ONLY)) {
+ link->flags &= ~DL_FLAG_SYNC_STATE_ONLY;
+ goto reorder;
+ }
+
goto out;
}
@@ -406,6 +433,13 @@ struct device_link *device_link_add(struct device *consumer,
flags & DL_FLAG_PM_RUNTIME)
pm_runtime_resume(supplier);
+ if (flags & DL_FLAG_SYNC_STATE_ONLY) {
+ dev_dbg(consumer,
+ "Linked as a sync state only consumer to %s\n",
+ dev_name(supplier));
+ goto out;
+ }
+reorder:
/*
* Move the consumer and all of the devices depending on it to the end
* of dpm_list and the devices_kset list.
@@ -431,6 +465,70 @@ struct device_link *device_link_add(struct device *consumer,
}
EXPORT_SYMBOL_GPL(device_link_add);
+/**
+ * device_link_wait_for_supplier - Add device to wait_for_suppliers list
+ * @consumer: Consumer device
+ *
+ * Marks the @consumer device as waiting for suppliers to become available by
+ * adding it to the wait_for_suppliers list. The consumer device will never be
+ * probed until it's removed from the wait_for_suppliers list.
+ *
+ * The caller is responsible for adding the links to the supplier devices once
+ * they are available and removing the @consumer device from the
+ * wait_for_suppliers list once links to all the suppliers have been created.
+ *
+ * This function is NOT meant to be called from the probe function of the
+ * consumer but rather from code that creates/adds the consumer device.
+ */
+static void device_link_wait_for_supplier(struct device *consumer,
+ bool need_for_probe)
+{
+ mutex_lock(&wfs_lock);
+ list_add_tail(&consumer->links.needs_suppliers, &wait_for_suppliers);
+ consumer->links.need_for_probe = need_for_probe;
+ mutex_unlock(&wfs_lock);
+}
+
+static void device_link_wait_for_mandatory_supplier(struct device *consumer)
+{
+ device_link_wait_for_supplier(consumer, true);
+}
+
+static void device_link_wait_for_optional_supplier(struct device *consumer)
+{
+ device_link_wait_for_supplier(consumer, false);
+}
+
+/**
+ * device_link_add_missing_supplier_links - Add links from consumer devices to
+ * supplier devices, leaving any
+ * consumer with inactive suppliers on
+ * the wait_for_suppliers list
+ *
+ * Loops through all consumers waiting on suppliers and tries to add all their
+ * supplier links. If that succeeds, the consumer device is removed from
+ * wait_for_suppliers list. Otherwise, they are left in the wait_for_suppliers
+ * list. Devices left on the wait_for_suppliers list will not be probed.
+ *
+ * The fwnode add_links callback is expected to return 0 if it has found and
+ * added all the supplier links for the consumer device. It should return an
+ * error if it isn't able to do so.
+ *
+ * The caller of device_link_wait_for_supplier() is expected to call this once
+ * it's aware of potential suppliers becoming available.
+ */
+static void device_link_add_missing_supplier_links(void)
+{
+ struct device *dev, *tmp;
+
+ mutex_lock(&wfs_lock);
+ list_for_each_entry_safe(dev, tmp, &wait_for_suppliers,
+ links.needs_suppliers)
+ if (!fwnode_call_int_op(dev->fwnode, add_links, dev))
+ list_del_init(&dev->links.needs_suppliers);
+ mutex_unlock(&wfs_lock);
+}
+
static void device_link_free(struct device_link *link)
{
while (refcount_dec_not_one(&link->rpm_active))
@@ -565,10 +663,23 @@ int device_links_check_suppliers(struct device *dev)
struct device_link *link;
int ret = 0;
+ /*
+ * Device waiting for supplier to become available is not allowed to
+ * probe.
+ */
+ mutex_lock(&wfs_lock);
+ if (!list_empty(&dev->links.needs_suppliers) &&
+ dev->links.need_for_probe) {
+ mutex_unlock(&wfs_lock);
+ return -EPROBE_DEFER;
+ }
+ mutex_unlock(&wfs_lock);
+
device_links_write_lock();
list_for_each_entry(link, &dev->links.suppliers, c_node) {
- if (!(link->flags & DL_FLAG_MANAGED))
+ if (!(link->flags & DL_FLAG_MANAGED) ||
+ link->flags & DL_FLAG_SYNC_STATE_ONLY)
continue;
if (link->status != DL_STATE_AVAILABLE) {
@@ -585,6 +696,128 @@ int device_links_check_suppliers(struct device *dev)
}
/**
+ * __device_links_queue_sync_state - Queue a device for sync_state() callback
+ * @dev: Device to call sync_state() on
+ * @list: List head to queue the @dev on
+ *
+ * Queues a device for a sync_state() callback when the device links write lock
+ * isn't held. This allows the sync_state() execution flow to use device links
+ * APIs. The caller must ensure this function is called with
+ * device_links_write_lock() held.
+ *
+ * This function does a get_device() to make sure the device is not freed while
+ * on this list.
+ *
+ * So the caller must also ensure that device_links_flush_sync_list() is called
+ * as soon as the caller releases device_links_write_lock(). This is necessary
+ * to make sure the sync_state() is called in a timely fashion and the
+ * put_device() is called on this device.
+ */
+static void __device_links_queue_sync_state(struct device *dev,
+ struct list_head *list)
+{
+ struct device_link *link;
+
+ if (dev->state_synced)
+ return;
+
+ list_for_each_entry(link, &dev->links.consumers, s_node) {
+ if (!(link->flags & DL_FLAG_MANAGED))
+ continue;
+ if (link->status != DL_STATE_ACTIVE)
+ return;
+ }
+
+ /*
+ * Set the flag here to avoid adding the same device to a list more
+ * than once. This can happen if new consumers get added to the device
+ * and probed before the list is flushed.
+ */
+ dev->state_synced = true;
+
+ if (WARN_ON(!list_empty(&dev->links.defer_sync)))
+ return;
+
+ get_device(dev);
+ list_add_tail(&dev->links.defer_sync, list);
+}
+
+/**
+ * device_links_flush_sync_list - Call sync_state() on a list of devices
+ * @list: List of devices to call sync_state() on
+ *
+ * Calls sync_state() on all the devices that have been queued for it. This
+ * function is used in conjunction with __device_links_queue_sync_state().
+ */
+static void device_links_flush_sync_list(struct list_head *list)
+{
+ struct device *dev, *tmp;
+
+ list_for_each_entry_safe(dev, tmp, list, links.defer_sync) {
+ list_del_init(&dev->links.defer_sync);
+
+ device_lock(dev);
+
+ if (dev->bus->sync_state)
+ dev->bus->sync_state(dev);
+ else if (dev->driver && dev->driver->sync_state)
+ dev->driver->sync_state(dev);
+
+ device_unlock(dev);
+
+ put_device(dev);
+ }
+}
+
+void device_links_supplier_sync_state_pause(void)
+{
+ device_links_write_lock();
+ defer_sync_state_count++;
+ device_links_write_unlock();
+}
+
+void device_links_supplier_sync_state_resume(void)
+{
+ struct device *dev, *tmp;
+ LIST_HEAD(sync_list);
+
+ device_links_write_lock();
+ if (!defer_sync_state_count) {
+ WARN(true, "Unmatched sync_state pause/resume!");
+ goto out;
+ }
+ defer_sync_state_count--;
+ if (defer_sync_state_count)
+ goto out;
+
+ list_for_each_entry_safe(dev, tmp, &deferred_sync, links.defer_sync) {
+ /*
+ * Delete from deferred_sync list before queuing it to
+ * sync_list because defer_sync is used for both lists.
+ */
+ list_del_init(&dev->links.defer_sync);
+ __device_links_queue_sync_state(dev, &sync_list);
+ }
+out:
+ device_links_write_unlock();
+
+ device_links_flush_sync_list(&sync_list);
+}
+
+static int sync_state_resume_initcall(void)
+{
+ device_links_supplier_sync_state_resume();
+ return 0;
+}
+late_initcall(sync_state_resume_initcall);
+
+static void __device_links_supplier_defer_sync(struct device *sup)
+{
+ if (list_empty(&sup->links.defer_sync))
+ list_add_tail(&sup->links.defer_sync, &deferred_sync);
+}
+
+/**
* device_links_driver_bound - Update device links after probing its driver.
* @dev: Device to update the links for.
*
@@ -598,6 +831,16 @@ int device_links_check_suppliers(struct device *dev)
void device_links_driver_bound(struct device *dev)
{
struct device_link *link;
+ LIST_HEAD(sync_list);
+
+ /*
+ * If a device probes successfully, it's expected to have created all
+ * the device links it needs to or make new device links as it needs
+ * them. So, it no longer needs to wait on any suppliers.
+ */
+ mutex_lock(&wfs_lock);
+ list_del_init(&dev->links.needs_suppliers);
+ mutex_unlock(&wfs_lock);
device_links_write_lock();
@@ -628,11 +871,19 @@ void device_links_driver_bound(struct device *dev)
WARN_ON(link->status != DL_STATE_CONSUMER_PROBE);
WRITE_ONCE(link->status, DL_STATE_ACTIVE);
+
+ if (defer_sync_state_count)
+ __device_links_supplier_defer_sync(link->supplier);
+ else
+ __device_links_queue_sync_state(link->supplier,
+ &sync_list);
}
dev->links.status = DL_DEV_DRIVER_BOUND;
device_links_write_unlock();
+
+ device_links_flush_sync_list(&sync_list);
}
static void device_link_drop_managed(struct device_link *link)
@@ -744,6 +995,7 @@ void device_links_driver_cleanup(struct device *dev)
WRITE_ONCE(link->status, DL_STATE_DORMANT);
}
+ list_del_init(&dev->links.defer_sync);
__device_links_no_driver(dev);
device_links_write_unlock();
@@ -813,7 +1065,8 @@ void device_links_unbind_consumers(struct device *dev)
list_for_each_entry(link, &dev->links.consumers, s_node) {
enum device_link_state status;
- if (!(link->flags & DL_FLAG_MANAGED))
+ if (!(link->flags & DL_FLAG_MANAGED) ||
+ link->flags & DL_FLAG_SYNC_STATE_ONLY)
continue;
status = link->status;
@@ -849,6 +1102,10 @@ static void device_links_purge(struct device *dev)
{
struct device_link *link, *ln;
+ mutex_lock(&wfs_lock);
+ list_del(&dev->links.needs_suppliers);
+ mutex_unlock(&wfs_lock);
+
/*
* Delete all of the remaining links from this device to any other
* devices (either consumers or suppliers).
@@ -1713,6 +1970,8 @@ void device_initialize(struct device *dev)
#endif
INIT_LIST_HEAD(&dev->links.consumers);
INIT_LIST_HEAD(&dev->links.suppliers);
+ INIT_LIST_HEAD(&dev->links.needs_suppliers);
+ INIT_LIST_HEAD(&dev->links.defer_sync);
dev->links.status = DL_DEV_NO_DRIVER;
}
EXPORT_SYMBOL_GPL(device_initialize);
@@ -2101,7 +2360,7 @@ int device_add(struct device *dev)
struct device *parent;
struct kobject *kobj;
struct class_interface *class_intf;
- int error = -EINVAL;
+ int error = -EINVAL, fw_ret;
struct kobject *glue_dir = NULL;
dev = get_device(dev);
@@ -2199,6 +2458,32 @@ int device_add(struct device *dev)
BUS_NOTIFY_ADD_DEVICE, dev);
kobject_uevent(&dev->kobj, KOBJ_ADD);
+
+ if (dev->fwnode && !dev->fwnode->dev)
+ dev->fwnode->dev = dev;
+
+ /*
+ * Check if any of the other devices (consumers) have been waiting for
+ * this device (supplier) to be added so that they can create a device
+ * link to it.
+ *
+ * This needs to happen after device_pm_add() because device_link_add()
+ * requires the supplier be registered before it's called.
+ *
+ * But this also needs to happe before bus_probe_device() to make sure
+ * waiting consumers can link to it before the driver is bound to the
+ * device and the driver sync_state callback is called for this device.
+ */
+ device_link_add_missing_supplier_links();
+
+ if (fwnode_has_op(dev->fwnode, add_links)) {
+ fw_ret = fwnode_call_int_op(dev->fwnode, add_links, dev);
+ if (fw_ret == -ENODEV)
+ device_link_wait_for_mandatory_supplier(dev);
+ else if (fw_ret)
+ device_link_wait_for_optional_supplier(dev);
+ }
+
bus_probe_device(dev);
if (parent)
klist_add_tail(&dev->p->knode_parent,
@@ -2343,6 +2628,9 @@ void device_del(struct device *dev)
kill_device(dev);
device_unlock(dev);
+ if (dev->fwnode && dev->fwnode->dev == dev)
+ dev->fwnode->dev = NULL;
+
/* Notify clients of device removal. This call must come
* before dpm_sysfs_remove().
*/
diff --git a/drivers/base/firmware_loader/Kconfig b/drivers/base/firmware_loader/Kconfig
index 3f9e274e2ed3..5b24f3959255 100644
--- a/drivers/base/firmware_loader/Kconfig
+++ b/drivers/base/firmware_loader/Kconfig
@@ -148,7 +148,7 @@ config FW_LOADER_USER_HELPER_FALLBACK
to be used for all firmware requests which explicitly do not disable a
a fallback mechanism. Firmware calls which do prohibit a fallback
mechanism is request_firmware_direct(). This option is kept for
- backward compatibility purposes given this precise mechanism can also
+ backward compatibility purposes given this precise mechanism can also
be enabled by setting the proc sysctl value to true:
/proc/sys/kernel/firmware_config/force_sysfs_fallback
@@ -169,5 +169,17 @@ config FW_LOADER_COMPRESS
be compressed with either none or crc32 integrity check type (pass
"-C crc32" option to xz command).
+config FW_CACHE
+ bool "Enable firmware caching during suspend"
+ depends on PM_SLEEP
+ default y if PM_SLEEP
+ help
+ Because firmware caching generates uevent messages that are sent
+ over a netlink socket, it can prevent suspend on many platforms.
+ It is also not always useful, so on such platforms we have the
+ option.
+
+ If unsure, say Y.
+
endif # FW_LOADER
endmenu
diff --git a/drivers/base/firmware_loader/builtin/Makefile b/drivers/base/firmware_loader/builtin/Makefile
index 37e5ae387400..4a66888e7253 100644
--- a/drivers/base/firmware_loader/builtin/Makefile
+++ b/drivers/base/firmware_loader/builtin/Makefile
@@ -8,7 +8,8 @@ fwdir := $(addprefix $(srctree)/,$(filter-out /%,$(fwdir)))$(filter /%,$(fwdir))
obj-y := $(addsuffix .gen.o, $(subst $(quote),,$(CONFIG_EXTRA_FIRMWARE)))
FWNAME = $(patsubst $(obj)/%.gen.S,%,$@)
-FWSTR = $(subst /,_,$(subst .,_,$(subst -,_,$(FWNAME))))
+comma := ,
+FWSTR = $(subst $(comma),_,$(subst /,_,$(subst .,_,$(subst -,_,$(FWNAME)))))
ASM_WORD = $(if $(CONFIG_64BIT),.quad,.long)
ASM_ALIGN = $(if $(CONFIG_64BIT),3,2)
PROGBITS = $(if $(CONFIG_ARM),%,@)progbits
diff --git a/drivers/base/firmware_loader/main.c b/drivers/base/firmware_loader/main.c
index bf44c79beae9..249add8c5e05 100644
--- a/drivers/base/firmware_loader/main.c
+++ b/drivers/base/firmware_loader/main.c
@@ -4,7 +4,7 @@
*
* Copyright (c) 2003 Manuel Estrada Sainz
*
- * Please see Documentation/firmware_class/ for more information.
+ * Please see Documentation/driver-api/firmware/ for more information.
*
*/
@@ -51,7 +51,7 @@ struct firmware_cache {
struct list_head head;
int state;
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_FW_CACHE
/*
* Names of firmware images which have been cached successfully
* will be added into the below list so that device uncache
@@ -504,6 +504,7 @@ fw_get_filesystem_firmware(struct device *device, struct fw_priv *fw_priv,
path);
continue;
}
+ dev_dbg(device, "Loading firmware from %s\n", path);
if (decompress) {
dev_dbg(device, "f/w decompressing %s\n",
fw_priv->fw_name);
@@ -556,7 +557,7 @@ static void fw_set_page_data(struct fw_priv *fw_priv, struct firmware *fw)
(unsigned int)fw_priv->size);
}
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_FW_CACHE
static void fw_name_devm_release(struct device *dev, void *res)
{
struct fw_name_devm *fwn = res;
@@ -1046,7 +1047,7 @@ request_firmware_nowait(
}
EXPORT_SYMBOL(request_firmware_nowait);
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_FW_CACHE
static ASYNC_DOMAIN_EXCLUSIVE(fw_cache_domain);
/**
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 84c4e1f72cbd..799b43191dea 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -19,15 +19,12 @@
#include <linux/memory.h>
#include <linux/memory_hotplug.h>
#include <linux/mm.h>
-#include <linux/mutex.h>
#include <linux/stat.h>
#include <linux/slab.h>
#include <linux/atomic.h>
#include <linux/uaccess.h>
-static DEFINE_MUTEX(mem_sysfs_mutex);
-
#define MEMORY_CLASS_NAME "memory"
#define to_memory_block(dev) container_of(dev, struct memory_block, dev)
@@ -538,12 +535,7 @@ static ssize_t soft_offline_page_store(struct device *dev,
if (kstrtoull(buf, 0, &pfn) < 0)
return -EINVAL;
pfn >>= PAGE_SHIFT;
- if (!pfn_valid(pfn))
- return -ENXIO;
- /* Only online pages can be soft-offlined (esp., not ZONE_DEVICE). */
- if (!pfn_to_online_page(pfn))
- return -EIO;
- ret = soft_offline_page(pfn_to_page(pfn), 0);
+ ret = soft_offline_page(pfn, 0);
return ret == 0 ? count : ret;
}
@@ -705,6 +697,8 @@ static void unregister_memory(struct memory_block *memory)
* Create memory block devices for the given memory area. Start and size
* have to be aligned to memory block granularity. Memory block devices
* will be initialized as offline.
+ *
+ * Called under device_hotplug_lock.
*/
int create_memory_block_devices(unsigned long start, unsigned long size)
{
@@ -718,7 +712,6 @@ int create_memory_block_devices(unsigned long start, unsigned long size)
!IS_ALIGNED(size, memory_block_size_bytes())))
return -EINVAL;
- mutex_lock(&mem_sysfs_mutex);
for (block_id = start_block_id; block_id != end_block_id; block_id++) {
ret = init_memory_block(&mem, block_id, MEM_OFFLINE);
if (ret)
@@ -730,11 +723,12 @@ int create_memory_block_devices(unsigned long start, unsigned long size)
for (block_id = start_block_id; block_id != end_block_id;
block_id++) {
mem = find_memory_block_by_id(block_id);
+ if (WARN_ON_ONCE(!mem))
+ continue;
mem->section_count = 0;
unregister_memory(mem);
}
}
- mutex_unlock(&mem_sysfs_mutex);
return ret;
}
@@ -742,6 +736,8 @@ int create_memory_block_devices(unsigned long start, unsigned long size)
* Remove memory block devices for the given memory area. Start and size
* have to be aligned to memory block granularity. Memory block devices
* have to be offline.
+ *
+ * Called under device_hotplug_lock.
*/
void remove_memory_block_devices(unsigned long start, unsigned long size)
{
@@ -754,7 +750,6 @@ void remove_memory_block_devices(unsigned long start, unsigned long size)
!IS_ALIGNED(size, memory_block_size_bytes())))
return;
- mutex_lock(&mem_sysfs_mutex);
for (block_id = start_block_id; block_id != end_block_id; block_id++) {
mem = find_memory_block_by_id(block_id);
if (WARN_ON_ONCE(!mem))
@@ -763,7 +758,6 @@ void remove_memory_block_devices(unsigned long start, unsigned long size)
unregister_memory_block_under_nodes(mem);
unregister_memory(mem);
}
- mutex_unlock(&mem_sysfs_mutex);
}
/* return true if the memory block is offlined, otherwise, return false */
@@ -797,12 +791,13 @@ static const struct attribute_group *memory_root_attr_groups[] = {
};
/*
- * Initialize the sysfs support for memory devices...
+ * Initialize the sysfs support for memory devices. At the time this function
+ * is called, we cannot have concurrent creation/deletion of memory block
+ * devices, the device_hotplug_lock is not needed.
*/
void __init memory_dev_init(void)
{
int ret;
- int err;
unsigned long block_sz, nr;
/* Validate the configured memory block size */
@@ -813,24 +808,19 @@ void __init memory_dev_init(void)
ret = subsys_system_register(&memory_subsys, memory_root_attr_groups);
if (ret)
- goto out;
+ panic("%s() failed to register subsystem: %d\n", __func__, ret);
/*
* Create entries for memory sections that were found
* during boot and have been initialized
*/
- mutex_lock(&mem_sysfs_mutex);
for (nr = 0; nr <= __highest_present_section_nr;
nr += sections_per_block) {
- err = add_memory_block(nr);
- if (!ret)
- ret = err;
+ ret = add_memory_block(nr);
+ if (ret)
+ panic("%s() failed to add memory block: %d\n", __func__,
+ ret);
}
- mutex_unlock(&mem_sysfs_mutex);
-
-out:
- if (ret)
- panic("%s() failed: %d\n", __func__, ret);
}
/**
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index b230beb6ccb4..7c532548b0a6 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -60,6 +60,7 @@ struct resource *platform_get_resource(struct platform_device *dev,
}
EXPORT_SYMBOL_GPL(platform_get_resource);
+#ifdef CONFIG_HAS_IOMEM
/**
* devm_platform_ioremap_resource - call devm_ioremap_resource() for a platform
* device
@@ -68,7 +69,6 @@ EXPORT_SYMBOL_GPL(platform_get_resource);
* resource management
* @index: resource index
*/
-#ifdef CONFIG_HAS_IOMEM
void __iomem *devm_platform_ioremap_resource(struct platform_device *pdev,
unsigned int index)
{
@@ -78,9 +78,63 @@ void __iomem *devm_platform_ioremap_resource(struct platform_device *pdev,
return devm_ioremap_resource(&pdev->dev, res);
}
EXPORT_SYMBOL_GPL(devm_platform_ioremap_resource);
+
+/**
+ * devm_platform_ioremap_resource_wc - write-combined variant of
+ * devm_platform_ioremap_resource()
+ *
+ * @pdev: platform device to use both for memory resource lookup as well as
+ * resource management
+ * @index: resource index
+ */
+void __iomem *devm_platform_ioremap_resource_wc(struct platform_device *pdev,
+ unsigned int index)
+{
+ struct resource *res;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, index);
+ return devm_ioremap_resource_wc(&pdev->dev, res);
+}
+
+/**
+ * devm_platform_ioremap_resource_byname - call devm_ioremap_resource for
+ * a platform device, retrieve the
+ * resource by name
+ *
+ * @pdev: platform device to use both for memory resource lookup as well as
+ * resource management
+ * @name: name of the resource
+ */
+void __iomem *
+devm_platform_ioremap_resource_byname(struct platform_device *pdev,
+ const char *name)
+{
+ struct resource *res;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
+ return devm_ioremap_resource(&pdev->dev, res);
+}
+EXPORT_SYMBOL_GPL(devm_platform_ioremap_resource_byname);
#endif /* CONFIG_HAS_IOMEM */
-static int __platform_get_irq(struct platform_device *dev, unsigned int num)
+/**
+ * platform_get_irq_optional - get an optional IRQ for a device
+ * @dev: platform device
+ * @num: IRQ number index
+ *
+ * Gets an IRQ for a platform device. Device drivers should check the return
+ * value for errors so as to not pass a negative integer value to the
+ * request_irq() APIs. This is the same as platform_get_irq(), except that it
+ * does not print an error message if an IRQ can not be obtained.
+ *
+ * Example:
+ * int irq = platform_get_irq_optional(pdev, 0);
+ * if (irq < 0)
+ * return irq;
+ *
+ * Return: IRQ number on success, negative error number on failure.
+ */
+int platform_get_irq_optional(struct platform_device *dev, unsigned int num)
{
#ifdef CONFIG_SPARC
/* sparc does not have irqs represented as IORESOURCE_IRQ resources */
@@ -89,9 +143,9 @@ static int __platform_get_irq(struct platform_device *dev, unsigned int num)
return dev->archdata.irqs[num];
#else
struct resource *r;
- if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) {
- int ret;
+ int ret;
+ if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) {
ret = of_irq_get(dev->dev.of_node, num);
if (ret > 0 || ret == -EPROBE_DEFER)
return ret;
@@ -100,8 +154,6 @@ static int __platform_get_irq(struct platform_device *dev, unsigned int num)
r = platform_get_resource(dev, IORESOURCE_IRQ, num);
if (has_acpi_companion(&dev->dev)) {
if (r && r->flags & IORESOURCE_DISABLED) {
- int ret;
-
ret = acpi_irq_get(ACPI_HANDLE(&dev->dev), num, r);
if (ret)
return ret;
@@ -134,8 +186,7 @@ static int __platform_get_irq(struct platform_device *dev, unsigned int num)
* allows a common code path across either kind of resource.
*/
if (num == 0 && has_acpi_companion(&dev->dev)) {
- int ret = acpi_dev_gpio_irq_get(ACPI_COMPANION(&dev->dev), num);
-
+ ret = acpi_dev_gpio_irq_get(ACPI_COMPANION(&dev->dev), num);
/* Our callers expect -ENXIO for missing IRQs. */
if (ret >= 0 || ret == -EPROBE_DEFER)
return ret;
@@ -144,6 +195,7 @@ static int __platform_get_irq(struct platform_device *dev, unsigned int num)
return -ENXIO;
#endif
}
+EXPORT_SYMBOL_GPL(platform_get_irq_optional);
/**
* platform_get_irq - get an IRQ for a device
@@ -165,7 +217,7 @@ int platform_get_irq(struct platform_device *dev, unsigned int num)
{
int ret;
- ret = __platform_get_irq(dev, num);
+ ret = platform_get_irq_optional(dev, num);
if (ret < 0 && ret != -EPROBE_DEFER)
dev_err(&dev->dev, "IRQ index %u not found\n", num);
@@ -174,29 +226,6 @@ int platform_get_irq(struct platform_device *dev, unsigned int num)
EXPORT_SYMBOL_GPL(platform_get_irq);
/**
- * platform_get_irq_optional - get an optional IRQ for a device
- * @dev: platform device
- * @num: IRQ number index
- *
- * Gets an IRQ for a platform device. Device drivers should check the return
- * value for errors so as to not pass a negative integer value to the
- * request_irq() APIs. This is the same as platform_get_irq(), except that it
- * does not print an error message if an IRQ can not be obtained.
- *
- * Example:
- * int irq = platform_get_irq_optional(pdev, 0);
- * if (irq < 0)
- * return irq;
- *
- * Return: IRQ number on success, negative error number on failure.
- */
-int platform_get_irq_optional(struct platform_device *dev, unsigned int num)
-{
- return __platform_get_irq(dev, num);
-}
-EXPORT_SYMBOL_GPL(platform_get_irq_optional);
-
-/**
* platform_irq_count - Count the number of IRQs a platform device uses
* @dev: platform device
*
@@ -206,7 +235,7 @@ int platform_irq_count(struct platform_device *dev)
{
int ret, nr = 0;
- while ((ret = __platform_get_irq(dev, nr)) >= 0)
+ while ((ret = platform_get_irq_optional(dev, nr)) >= 0)
nr++;
if (ret == -EPROBE_DEFER)
@@ -245,10 +274,9 @@ static int __platform_get_irq_byname(struct platform_device *dev,
const char *name)
{
struct resource *r;
+ int ret;
if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) {
- int ret;
-
ret = of_irq_get_byname(dev->dev.of_node, name);
if (ret > 0 || ret == -EPROBE_DEFER)
return ret;
@@ -1278,6 +1306,11 @@ struct bus_type platform_bus_type = {
};
EXPORT_SYMBOL_GPL(platform_bus_type);
+static inline int __platform_match(struct device *dev, const void *drv)
+{
+ return platform_match(dev, (struct device_driver *)drv);
+}
+
/**
* platform_find_device_by_driver - Find a platform device with a given
* driver.
@@ -1288,7 +1321,7 @@ struct device *platform_find_device_by_driver(struct device *start,
const struct device_driver *drv)
{
return bus_find_device(&platform_bus_type, start, drv,
- (void *)platform_match);
+ __platform_match);
}
EXPORT_SYMBOL_GPL(platform_find_device_by_driver);
@@ -1296,8 +1329,6 @@ int __init platform_bus_init(void)
{
int error;
- early_platform_cleanup();
-
error = device_register(&platform_bus);
if (error) {
put_device(&platform_bus);
@@ -1309,289 +1340,3 @@ int __init platform_bus_init(void)
of_platform_register_reconfig_notifier();
return error;
}
-
-static __initdata LIST_HEAD(early_platform_driver_list);
-static __initdata LIST_HEAD(early_platform_device_list);
-
-/**
- * early_platform_driver_register - register early platform driver
- * @epdrv: early_platform driver structure
- * @buf: string passed from early_param()
- *
- * Helper function for early_platform_init() / early_platform_init_buffer()
- */
-int __init early_platform_driver_register(struct early_platform_driver *epdrv,
- char *buf)
-{
- char *tmp;
- int n;
-
- /* Simply add the driver to the end of the global list.
- * Drivers will by default be put on the list in compiled-in order.
- */
- if (!epdrv->list.next) {
- INIT_LIST_HEAD(&epdrv->list);
- list_add_tail(&epdrv->list, &early_platform_driver_list);
- }
-
- /* If the user has specified device then make sure the driver
- * gets prioritized. The driver of the last device specified on
- * command line will be put first on the list.
- */
- n = strlen(epdrv->pdrv->driver.name);
- if (buf && !strncmp(buf, epdrv->pdrv->driver.name, n)) {
- list_move(&epdrv->list, &early_platform_driver_list);
-
- /* Allow passing parameters after device name */
- if (buf[n] == '\0' || buf[n] == ',')
- epdrv->requested_id = -1;
- else {
- epdrv->requested_id = simple_strtoul(&buf[n + 1],
- &tmp, 10);
-
- if (buf[n] != '.' || (tmp == &buf[n + 1])) {
- epdrv->requested_id = EARLY_PLATFORM_ID_ERROR;
- n = 0;
- } else
- n += strcspn(&buf[n + 1], ",") + 1;
- }
-
- if (buf[n] == ',')
- n++;
-
- if (epdrv->bufsize) {
- memcpy(epdrv->buffer, &buf[n],
- min_t(int, epdrv->bufsize, strlen(&buf[n]) + 1));
- epdrv->buffer[epdrv->bufsize - 1] = '\0';
- }
- }
-
- return 0;
-}
-
-/**
- * early_platform_add_devices - adds a number of early platform devices
- * @devs: array of early platform devices to add
- * @num: number of early platform devices in array
- *
- * Used by early architecture code to register early platform devices and
- * their platform data.
- */
-void __init early_platform_add_devices(struct platform_device **devs, int num)
-{
- struct device *dev;
- int i;
-
- /* simply add the devices to list */
- for (i = 0; i < num; i++) {
- dev = &devs[i]->dev;
-
- if (!dev->devres_head.next) {
- pm_runtime_early_init(dev);
- INIT_LIST_HEAD(&dev->devres_head);
- list_add_tail(&dev->devres_head,
- &early_platform_device_list);
- }
- }
-}
-
-/**
- * early_platform_driver_register_all - register early platform drivers
- * @class_str: string to identify early platform driver class
- *
- * Used by architecture code to register all early platform drivers
- * for a certain class. If omitted then only early platform drivers
- * with matching kernel command line class parameters will be registered.
- */
-void __init early_platform_driver_register_all(char *class_str)
-{
- /* The "class_str" parameter may or may not be present on the kernel
- * command line. If it is present then there may be more than one
- * matching parameter.
- *
- * Since we register our early platform drivers using early_param()
- * we need to make sure that they also get registered in the case
- * when the parameter is missing from the kernel command line.
- *
- * We use parse_early_options() to make sure the early_param() gets
- * called at least once. The early_param() may be called more than
- * once since the name of the preferred device may be specified on
- * the kernel command line. early_platform_driver_register() handles
- * this case for us.
- */
- parse_early_options(class_str);
-}
-
-/**
- * early_platform_match - find early platform device matching driver
- * @epdrv: early platform driver structure
- * @id: id to match against
- */
-static struct platform_device * __init
-early_platform_match(struct early_platform_driver *epdrv, int id)
-{
- struct platform_device *pd;
-
- list_for_each_entry(pd, &early_platform_device_list, dev.devres_head)
- if (platform_match(&pd->dev, &epdrv->pdrv->driver))
- if (pd->id == id)
- return pd;
-
- return NULL;
-}
-
-/**
- * early_platform_left - check if early platform driver has matching devices
- * @epdrv: early platform driver structure
- * @id: return true if id or above exists
- */
-static int __init early_platform_left(struct early_platform_driver *epdrv,
- int id)
-{
- struct platform_device *pd;
-
- list_for_each_entry(pd, &early_platform_device_list, dev.devres_head)
- if (platform_match(&pd->dev, &epdrv->pdrv->driver))
- if (pd->id >= id)
- return 1;
-
- return 0;
-}
-
-/**
- * early_platform_driver_probe_id - probe drivers matching class_str and id
- * @class_str: string to identify early platform driver class
- * @id: id to match against
- * @nr_probe: number of platform devices to successfully probe before exiting
- */
-static int __init early_platform_driver_probe_id(char *class_str,
- int id,
- int nr_probe)
-{
- struct early_platform_driver *epdrv;
- struct platform_device *match;
- int match_id;
- int n = 0;
- int left = 0;
-
- list_for_each_entry(epdrv, &early_platform_driver_list, list) {
- /* only use drivers matching our class_str */
- if (strcmp(class_str, epdrv->class_str))
- continue;
-
- if (id == -2) {
- match_id = epdrv->requested_id;
- left = 1;
-
- } else {
- match_id = id;
- left += early_platform_left(epdrv, id);
-
- /* skip requested id */
- switch (epdrv->requested_id) {
- case EARLY_PLATFORM_ID_ERROR:
- case EARLY_PLATFORM_ID_UNSET:
- break;
- default:
- if (epdrv->requested_id == id)
- match_id = EARLY_PLATFORM_ID_UNSET;
- }
- }
-
- switch (match_id) {
- case EARLY_PLATFORM_ID_ERROR:
- pr_warn("%s: unable to parse %s parameter\n",
- class_str, epdrv->pdrv->driver.name);
- /* fall-through */
- case EARLY_PLATFORM_ID_UNSET:
- match = NULL;
- break;
- default:
- match = early_platform_match(epdrv, match_id);
- }
-
- if (match) {
- /*
- * Set up a sensible init_name to enable
- * dev_name() and others to be used before the
- * rest of the driver core is initialized.
- */
- if (!match->dev.init_name && slab_is_available()) {
- if (match->id != -1)
- match->dev.init_name =
- kasprintf(GFP_KERNEL, "%s.%d",
- match->name,
- match->id);
- else
- match->dev.init_name =
- kasprintf(GFP_KERNEL, "%s",
- match->name);
-
- if (!match->dev.init_name)
- return -ENOMEM;
- }
-
- if (epdrv->pdrv->probe(match))
- pr_warn("%s: unable to probe %s early.\n",
- class_str, match->name);
- else
- n++;
- }
-
- if (n >= nr_probe)
- break;
- }
-
- if (left)
- return n;
- else
- return -ENODEV;
-}
-
-/**
- * early_platform_driver_probe - probe a class of registered drivers
- * @class_str: string to identify early platform driver class
- * @nr_probe: number of platform devices to successfully probe before exiting
- * @user_only: only probe user specified early platform devices
- *
- * Used by architecture code to probe registered early platform drivers
- * within a certain class. For probe to happen a registered early platform
- * device matching a registered early platform driver is needed.
- */
-int __init early_platform_driver_probe(char *class_str,
- int nr_probe,
- int user_only)
-{
- int k, n, i;
-
- n = 0;
- for (i = -2; n < nr_probe; i++) {
- k = early_platform_driver_probe_id(class_str, i, nr_probe - n);
-
- if (k < 0)
- break;
-
- n += k;
-
- if (user_only)
- break;
- }
-
- return n;
-}
-
-/**
- * early_platform_cleanup - clean up early platform code
- */
-void __init early_platform_cleanup(void)
-{
- struct platform_device *pd, *pd2;
-
- /* clean up the devres list used to chain devices */
- list_for_each_entry_safe(pd, pd2, &early_platform_device_list,
- dev.devres_head) {
- list_del(&pd->dev.devres_head);
- memset(&pd->dev.devres_head, 0, sizeof(pd->dev.devres_head));
- }
-}
-
diff --git a/drivers/base/power/common.c b/drivers/base/power/common.c
index 8db98a1f83dc..bbddb267c2e6 100644
--- a/drivers/base/power/common.c
+++ b/drivers/base/power/common.c
@@ -188,6 +188,26 @@ void dev_pm_domain_detach(struct device *dev, bool power_off)
EXPORT_SYMBOL_GPL(dev_pm_domain_detach);
/**
+ * dev_pm_domain_start - Start the device through its PM domain.
+ * @dev: Device to start.
+ *
+ * This function should typically be called during probe by a subsystem/driver,
+ * when it needs to start its device from the PM domain's perspective. Note
+ * that, it's assumed that the PM domain is already powered on when this
+ * function is called.
+ *
+ * Returns 0 on success and negative error values on failures.
+ */
+int dev_pm_domain_start(struct device *dev)
+{
+ if (dev->pm_domain && dev->pm_domain->start)
+ return dev->pm_domain->start(dev);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dev_pm_domain_start);
+
+/**
* dev_pm_domain_set - Set PM domain of a device.
* @dev: Device whose PM domain is to be set.
* @pd: PM domain to be set, or NULL.
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index cc85e87eaf05..8e5725b11ee8 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -634,6 +634,13 @@ static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth)
return ret;
}
+static int genpd_dev_pm_start(struct device *dev)
+{
+ struct generic_pm_domain *genpd = dev_to_genpd(dev);
+
+ return genpd_start_dev(genpd, dev);
+}
+
static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
unsigned long val, void *ptr)
{
@@ -922,24 +929,6 @@ static int __init genpd_power_off_unused(void)
}
late_initcall(genpd_power_off_unused);
-#if defined(CONFIG_PM_SLEEP) || defined(CONFIG_PM_GENERIC_DOMAINS_OF)
-
-static bool genpd_present(const struct generic_pm_domain *genpd)
-{
- const struct generic_pm_domain *gpd;
-
- if (IS_ERR_OR_NULL(genpd))
- return false;
-
- list_for_each_entry(gpd, &gpd_list, gpd_list_node)
- if (gpd == genpd)
- return true;
-
- return false;
-}
-
-#endif
-
#ifdef CONFIG_PM_SLEEP
/**
@@ -1354,8 +1343,8 @@ static void genpd_syscore_switch(struct device *dev, bool suspend)
{
struct generic_pm_domain *genpd;
- genpd = dev_to_genpd(dev);
- if (!genpd_present(genpd))
+ genpd = dev_to_genpd_safe(dev);
+ if (!genpd)
return;
if (suspend) {
@@ -1805,6 +1794,7 @@ int pm_genpd_init(struct generic_pm_domain *genpd,
genpd->domain.ops.poweroff_noirq = genpd_poweroff_noirq;
genpd->domain.ops.restore_noirq = genpd_restore_noirq;
genpd->domain.ops.complete = genpd_complete;
+ genpd->domain.start = genpd_dev_pm_start;
if (genpd->flags & GENPD_FLAG_PM_CLK) {
genpd->dev_ops.stop = pm_clk_suspend;
@@ -2020,6 +2010,16 @@ static int genpd_add_provider(struct device_node *np, genpd_xlate_t xlate,
return 0;
}
+static bool genpd_present(const struct generic_pm_domain *genpd)
+{
+ const struct generic_pm_domain *gpd;
+
+ list_for_each_entry(gpd, &gpd_list, gpd_list_node)
+ if (gpd == genpd)
+ return true;
+ return false;
+}
+
/**
* of_genpd_add_provider_simple() - Register a simple PM domain provider
* @np: Device node pointer associated with the PM domain provider.
diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h
index 39a06a0cfdaa..444f5c169a0b 100644
--- a/drivers/base/power/power.h
+++ b/drivers/base/power/power.h
@@ -117,6 +117,13 @@ static inline bool device_pm_initialized(struct device *dev)
return dev->power.in_dpm_list;
}
+/* drivers/base/power/wakeup_stats.c */
+extern int wakeup_source_sysfs_add(struct device *parent,
+ struct wakeup_source *ws);
+extern void wakeup_source_sysfs_remove(struct wakeup_source *ws);
+
+extern int pm_wakeup_source_sysfs_add(struct device *parent);
+
#else /* !CONFIG_PM_SLEEP */
static inline void device_pm_sleep_init(struct device *dev) {}
@@ -141,6 +148,11 @@ static inline bool device_pm_initialized(struct device *dev)
return device_is_registered(dev);
}
+static inline int pm_wakeup_source_sysfs_add(struct device *parent)
+{
+ return 0;
+}
+
#endif /* !CONFIG_PM_SLEEP */
static inline void device_pm_init(struct device *dev)
@@ -149,21 +161,3 @@ static inline void device_pm_init(struct device *dev)
device_pm_sleep_init(dev);
pm_runtime_init(dev);
}
-
-#ifdef CONFIG_PM_SLEEP
-
-/* drivers/base/power/wakeup_stats.c */
-extern int wakeup_source_sysfs_add(struct device *parent,
- struct wakeup_source *ws);
-extern void wakeup_source_sysfs_remove(struct wakeup_source *ws);
-
-extern int pm_wakeup_source_sysfs_add(struct device *parent);
-
-#else /* !CONFIG_PM_SLEEP */
-
-static inline int pm_wakeup_source_sysfs_add(struct device *parent)
-{
- return 0;
-}
-
-#endif /* CONFIG_PM_SLEEP */
diff --git a/drivers/base/power/wakeirq.c b/drivers/base/power/wakeirq.c
index 5ce77d1ef9fc..8e021082dba8 100644
--- a/drivers/base/power/wakeirq.c
+++ b/drivers/base/power/wakeirq.c
@@ -272,7 +272,7 @@ void dev_pm_enable_wake_irq_check(struct device *dev,
{
struct wake_irq *wirq = dev->power.wakeirq;
- if (!wirq || !((wirq->status & WAKE_IRQ_DEDICATED_MASK)))
+ if (!wirq || !(wirq->status & WAKE_IRQ_DEDICATED_MASK))
return;
if (likely(wirq->status & WAKE_IRQ_DEDICATED_MANAGED)) {
@@ -299,7 +299,7 @@ void dev_pm_disable_wake_irq_check(struct device *dev)
{
struct wake_irq *wirq = dev->power.wakeirq;
- if (!wirq || !((wirq->status & WAKE_IRQ_DEDICATED_MASK)))
+ if (!wirq || !(wirq->status & WAKE_IRQ_DEDICATED_MASK))
return;
if (wirq->status & WAKE_IRQ_DEDICATED_MANAGED)
diff --git a/drivers/base/property.c b/drivers/base/property.c
index 81bd01ed4042..511f6d7acdfe 100644
--- a/drivers/base/property.c
+++ b/drivers/base/property.c
@@ -557,6 +557,42 @@ int device_add_properties(struct device *dev,
EXPORT_SYMBOL_GPL(device_add_properties);
/**
+ * fwnode_get_name - Return the name of a node
+ * @fwnode: The firmware node
+ *
+ * Returns a pointer to the node name.
+ */
+const char *fwnode_get_name(const struct fwnode_handle *fwnode)
+{
+ return fwnode_call_ptr_op(fwnode, get_name);
+}
+
+/**
+ * fwnode_get_name_prefix - Return the prefix of node for printing purposes
+ * @fwnode: The firmware node
+ *
+ * Returns the prefix of a node, intended to be printed right before the node.
+ * The prefix works also as a separator between the nodes.
+ */
+const char *fwnode_get_name_prefix(const struct fwnode_handle *fwnode)
+{
+ return fwnode_call_ptr_op(fwnode, get_name_prefix);
+}
+
+/**
+ * fwnode_get_parent - Return parent firwmare node
+ * @fwnode: Firmware whose parent is retrieved
+ *
+ * Return parent firmware node of the given node if possible or %NULL if no
+ * parent was available.
+ */
+struct fwnode_handle *fwnode_get_parent(const struct fwnode_handle *fwnode)
+{
+ return fwnode_call_ptr_op(fwnode, get_parent);
+}
+EXPORT_SYMBOL_GPL(fwnode_get_parent);
+
+/**
* fwnode_get_next_parent - Iterate to the node's parent
* @fwnode: Firmware whose parent is retrieved
*
@@ -578,17 +614,50 @@ struct fwnode_handle *fwnode_get_next_parent(struct fwnode_handle *fwnode)
EXPORT_SYMBOL_GPL(fwnode_get_next_parent);
/**
- * fwnode_get_parent - Return parent firwmare node
- * @fwnode: Firmware whose parent is retrieved
+ * fwnode_count_parents - Return the number of parents a node has
+ * @fwnode: The node the parents of which are to be counted
*
- * Return parent firmware node of the given node if possible or %NULL if no
- * parent was available.
+ * Returns the number of parents a node has.
*/
-struct fwnode_handle *fwnode_get_parent(const struct fwnode_handle *fwnode)
+unsigned int fwnode_count_parents(const struct fwnode_handle *fwnode)
{
- return fwnode_call_ptr_op(fwnode, get_parent);
+ struct fwnode_handle *__fwnode;
+ unsigned int count;
+
+ __fwnode = fwnode_get_parent(fwnode);
+
+ for (count = 0; __fwnode; count++)
+ __fwnode = fwnode_get_next_parent(__fwnode);
+
+ return count;
}
-EXPORT_SYMBOL_GPL(fwnode_get_parent);
+EXPORT_SYMBOL_GPL(fwnode_count_parents);
+
+/**
+ * fwnode_get_nth_parent - Return an nth parent of a node
+ * @fwnode: The node the parent of which is requested
+ * @depth: Distance of the parent from the node
+ *
+ * Returns the nth parent of a node. If there is no parent at the requested
+ * @depth, %NULL is returned. If @depth is 0, the functionality is equivalent to
+ * fwnode_handle_get(). For @depth == 1, it is fwnode_get_parent() and so on.
+ *
+ * The caller is responsible for calling fwnode_handle_put() for the returned
+ * node.
+ */
+struct fwnode_handle *fwnode_get_nth_parent(struct fwnode_handle *fwnode,
+ unsigned int depth)
+{
+ unsigned int i;
+
+ fwnode_handle_get(fwnode);
+
+ for (i = 0; i < depth && fwnode; i++)
+ fwnode = fwnode_get_next_parent(fwnode);
+
+ return fwnode;
+}
+EXPORT_SYMBOL_GPL(fwnode_get_nth_parent);
/**
* fwnode_get_next_child_node - Return the next child node handle for a node
diff --git a/drivers/base/soc.c b/drivers/base/soc.c
index 7c0c5ca5953d..4af11a423475 100644
--- a/drivers/base/soc.c
+++ b/drivers/base/soc.c
@@ -104,15 +104,12 @@ static const struct attribute_group soc_attr_group = {
.is_visible = soc_attribute_mode,
};
-static const struct attribute_group *soc_attr_groups[] = {
- &soc_attr_group,
- NULL,
-};
-
static void soc_release(struct device *dev)
{
struct soc_device *soc_dev = container_of(dev, struct soc_device, dev);
+ ida_simple_remove(&soc_ida, soc_dev->soc_dev_num);
+ kfree(soc_dev->dev.groups);
kfree(soc_dev);
}
@@ -121,6 +118,7 @@ static struct soc_device_attribute *early_soc_dev_attr;
struct soc_device *soc_device_register(struct soc_device_attribute *soc_dev_attr)
{
struct soc_device *soc_dev;
+ const struct attribute_group **soc_attr_groups;
int ret;
if (!soc_bus_type.p) {
@@ -136,10 +134,18 @@ struct soc_device *soc_device_register(struct soc_device_attribute *soc_dev_attr
goto out1;
}
+ soc_attr_groups = kcalloc(3, sizeof(*soc_attr_groups), GFP_KERNEL);
+ if (!soc_attr_groups) {
+ ret = -ENOMEM;
+ goto out2;
+ }
+ soc_attr_groups[0] = &soc_attr_group;
+ soc_attr_groups[1] = soc_dev_attr->custom_attr_group;
+
/* Fetch a unique (reclaimable) SOC ID. */
ret = ida_simple_get(&soc_ida, 0, 0, GFP_KERNEL);
if (ret < 0)
- goto out2;
+ goto out3;
soc_dev->soc_dev_num = ret;
soc_dev->attr = soc_dev_attr;
@@ -150,15 +156,15 @@ struct soc_device *soc_device_register(struct soc_device_attribute *soc_dev_attr
dev_set_name(&soc_dev->dev, "soc%d", soc_dev->soc_dev_num);
ret = device_register(&soc_dev->dev);
- if (ret)
- goto out3;
+ if (ret) {
+ put_device(&soc_dev->dev);
+ return ERR_PTR(ret);
+ }
return soc_dev;
out3:
- ida_simple_remove(&soc_ida, soc_dev->soc_dev_num);
- put_device(&soc_dev->dev);
- soc_dev = NULL;
+ kfree(soc_attr_groups);
out2:
kfree(soc_dev);
out1:
@@ -169,8 +175,6 @@ EXPORT_SYMBOL_GPL(soc_device_register);
/* Ensure soc_dev->attr is freed prior to calling soc_device_unregister. */
void soc_device_unregister(struct soc_device *soc_dev)
{
- ida_simple_remove(&soc_ida, soc_dev->soc_dev_num);
-
device_unregister(&soc_dev->dev);
early_soc_dev_attr = NULL;
}
diff --git a/drivers/base/swnode.c b/drivers/base/swnode.c
index a1f3f0994f9f..d8d0dc0ca5ac 100644
--- a/drivers/base/swnode.c
+++ b/drivers/base/swnode.c
@@ -71,9 +71,9 @@ software_node_to_swnode(const struct software_node *node)
return swnode;
}
-const struct software_node *to_software_node(struct fwnode_handle *fwnode)
+const struct software_node *to_software_node(const struct fwnode_handle *fwnode)
{
- struct swnode *swnode = to_swnode(fwnode);
+ const struct swnode *swnode = to_swnode(fwnode);
return swnode ? swnode->node : NULL;
}
@@ -103,71 +103,15 @@ property_entry_get(const struct property_entry *prop, const char *name)
return NULL;
}
-static void
-property_set_pointer(struct property_entry *prop, const void *pointer)
-{
- switch (prop->type) {
- case DEV_PROP_U8:
- if (prop->is_array)
- prop->pointer.u8_data = pointer;
- else
- prop->value.u8_data = *((u8 *)pointer);
- break;
- case DEV_PROP_U16:
- if (prop->is_array)
- prop->pointer.u16_data = pointer;
- else
- prop->value.u16_data = *((u16 *)pointer);
- break;
- case DEV_PROP_U32:
- if (prop->is_array)
- prop->pointer.u32_data = pointer;
- else
- prop->value.u32_data = *((u32 *)pointer);
- break;
- case DEV_PROP_U64:
- if (prop->is_array)
- prop->pointer.u64_data = pointer;
- else
- prop->value.u64_data = *((u64 *)pointer);
- break;
- case DEV_PROP_STRING:
- if (prop->is_array)
- prop->pointer.str = pointer;
- else
- prop->value.str = pointer;
- break;
- default:
- break;
- }
-}
-
static const void *property_get_pointer(const struct property_entry *prop)
{
- switch (prop->type) {
- case DEV_PROP_U8:
- if (prop->is_array)
- return prop->pointer.u8_data;
- return &prop->value.u8_data;
- case DEV_PROP_U16:
- if (prop->is_array)
- return prop->pointer.u16_data;
- return &prop->value.u16_data;
- case DEV_PROP_U32:
- if (prop->is_array)
- return prop->pointer.u32_data;
- return &prop->value.u32_data;
- case DEV_PROP_U64:
- if (prop->is_array)
- return prop->pointer.u64_data;
- return &prop->value.u64_data;
- case DEV_PROP_STRING:
- if (prop->is_array)
- return prop->pointer.str;
- return &prop->value.str;
- default:
+ if (!prop->length)
return NULL;
- }
+
+ if (prop->is_array)
+ return prop->pointer;
+
+ return &prop->value;
}
static const void *property_entry_find(const struct property_entry *props,
@@ -187,66 +131,6 @@ static const void *property_entry_find(const struct property_entry *props,
return pointer;
}
-static int property_entry_read_u8_array(const struct property_entry *props,
- const char *propname,
- u8 *values, size_t nval)
-{
- const void *pointer;
- size_t length = nval * sizeof(*values);
-
- pointer = property_entry_find(props, propname, length);
- if (IS_ERR(pointer))
- return PTR_ERR(pointer);
-
- memcpy(values, pointer, length);
- return 0;
-}
-
-static int property_entry_read_u16_array(const struct property_entry *props,
- const char *propname,
- u16 *values, size_t nval)
-{
- const void *pointer;
- size_t length = nval * sizeof(*values);
-
- pointer = property_entry_find(props, propname, length);
- if (IS_ERR(pointer))
- return PTR_ERR(pointer);
-
- memcpy(values, pointer, length);
- return 0;
-}
-
-static int property_entry_read_u32_array(const struct property_entry *props,
- const char *propname,
- u32 *values, size_t nval)
-{
- const void *pointer;
- size_t length = nval * sizeof(*values);
-
- pointer = property_entry_find(props, propname, length);
- if (IS_ERR(pointer))
- return PTR_ERR(pointer);
-
- memcpy(values, pointer, length);
- return 0;
-}
-
-static int property_entry_read_u64_array(const struct property_entry *props,
- const char *propname,
- u64 *values, size_t nval)
-{
- const void *pointer;
- size_t length = nval * sizeof(*values);
-
- pointer = property_entry_find(props, propname, length);
- if (IS_ERR(pointer))
- return PTR_ERR(pointer);
-
- memcpy(values, pointer, length);
- return 0;
-}
-
static int
property_entry_count_elems_of_size(const struct property_entry *props,
const char *propname, size_t length)
@@ -265,49 +149,45 @@ static int property_entry_read_int_array(const struct property_entry *props,
unsigned int elem_size, void *val,
size_t nval)
{
+ const void *pointer;
+ size_t length;
+
if (!val)
return property_entry_count_elems_of_size(props, name,
elem_size);
- switch (elem_size) {
- case sizeof(u8):
- return property_entry_read_u8_array(props, name, val, nval);
- case sizeof(u16):
- return property_entry_read_u16_array(props, name, val, nval);
- case sizeof(u32):
- return property_entry_read_u32_array(props, name, val, nval);
- case sizeof(u64):
- return property_entry_read_u64_array(props, name, val, nval);
- }
- return -ENXIO;
+ if (!is_power_of_2(elem_size) || elem_size > sizeof(u64))
+ return -ENXIO;
+
+ length = nval * elem_size;
+
+ pointer = property_entry_find(props, name, length);
+ if (IS_ERR(pointer))
+ return PTR_ERR(pointer);
+
+ memcpy(val, pointer, length);
+ return 0;
}
static int property_entry_read_string_array(const struct property_entry *props,
const char *propname,
const char **strings, size_t nval)
{
- const struct property_entry *prop;
const void *pointer;
- size_t array_len, length;
+ size_t length;
+ int array_len;
/* Find out the array length. */
- prop = property_entry_get(props, propname);
- if (!prop)
- return -EINVAL;
-
- if (prop->is_array)
- /* Find the length of an array. */
- array_len = property_entry_count_elems_of_size(props, propname,
- sizeof(const char *));
- else
- /* The array length for a non-array string property is 1. */
- array_len = 1;
+ array_len = property_entry_count_elems_of_size(props, propname,
+ sizeof(const char *));
+ if (array_len < 0)
+ return array_len;
/* Return how many there are if strings is NULL. */
if (!strings)
return array_len;
- array_len = min(nval, array_len);
+ array_len = min_t(size_t, nval, array_len);
length = array_len * sizeof(*strings);
pointer = property_entry_find(props, propname, length);
@@ -322,13 +202,15 @@ static int property_entry_read_string_array(const struct property_entry *props,
static void property_entry_free_data(const struct property_entry *p)
{
const void *pointer = property_get_pointer(p);
+ const char * const *src_str;
size_t i, nval;
if (p->is_array) {
- if (p->type == DEV_PROP_STRING && p->pointer.str) {
+ if (p->type == DEV_PROP_STRING && p->pointer) {
+ src_str = p->pointer;
nval = p->length / sizeof(const char *);
for (i = 0; i < nval; i++)
- kfree(p->pointer.str[i]);
+ kfree(src_str[i]);
}
kfree(pointer);
} else if (p->type == DEV_PROP_STRING) {
@@ -337,29 +219,29 @@ static void property_entry_free_data(const struct property_entry *p)
kfree(p->name);
}
-static int property_copy_string_array(struct property_entry *dst,
- const struct property_entry *src)
+static const char * const *
+property_copy_string_array(const struct property_entry *src)
{
const char **d;
+ const char * const *src_str = src->pointer;
size_t nval = src->length / sizeof(*d);
int i;
d = kcalloc(nval, sizeof(*d), GFP_KERNEL);
if (!d)
- return -ENOMEM;
+ return NULL;
for (i = 0; i < nval; i++) {
- d[i] = kstrdup(src->pointer.str[i], GFP_KERNEL);
- if (!d[i] && src->pointer.str[i]) {
+ d[i] = kstrdup(src_str[i], GFP_KERNEL);
+ if (!d[i] && src_str[i]) {
while (--i >= 0)
kfree(d[i]);
kfree(d);
- return -ENOMEM;
+ return NULL;
}
}
- dst->pointer.str = d;
- return 0;
+ return d;
}
static int property_entry_copy_data(struct property_entry *dst,
@@ -367,36 +249,35 @@ static int property_entry_copy_data(struct property_entry *dst,
{
const void *pointer = property_get_pointer(src);
const void *new;
- int error;
if (src->is_array) {
if (!src->length)
return -ENODATA;
if (src->type == DEV_PROP_STRING) {
- error = property_copy_string_array(dst, src);
- if (error)
- return error;
- new = dst->pointer.str;
+ new = property_copy_string_array(src);
+ if (!new)
+ return -ENOMEM;
} else {
new = kmemdup(pointer, src->length, GFP_KERNEL);
if (!new)
return -ENOMEM;
}
+
+ dst->is_array = true;
+ dst->pointer = new;
} else if (src->type == DEV_PROP_STRING) {
new = kstrdup(src->value.str, GFP_KERNEL);
if (!new && src->value.str)
return -ENOMEM;
+
+ dst->value.str = new;
} else {
- new = pointer;
+ dst->value = src->value;
}
dst->length = src->length;
- dst->is_array = src->is_array;
dst->type = src->type;
-
- property_set_pointer(dst, new);
-
dst->name = kstrdup(src->name, GFP_KERNEL);
if (!dst->name)
goto out_free_data;
@@ -515,12 +396,47 @@ static int software_node_read_string_array(const struct fwnode_handle *fwnode,
propname, val, nval);
}
+static const char *
+software_node_get_name(const struct fwnode_handle *fwnode)
+{
+ const struct swnode *swnode = to_swnode(fwnode);
+
+ if (!swnode)
+ return "(null)";
+
+ return kobject_name(&swnode->kobj);
+}
+
+static const char *
+software_node_get_name_prefix(const struct fwnode_handle *fwnode)
+{
+ struct fwnode_handle *parent;
+ const char *prefix;
+
+ parent = fwnode_get_parent(fwnode);
+ if (!parent)
+ return "";
+
+ /* Figure out the prefix from the parents. */
+ while (is_software_node(parent))
+ parent = fwnode_get_next_parent(parent);
+
+ prefix = fwnode_get_name_prefix(parent);
+ fwnode_handle_put(parent);
+
+ /* Guess something if prefix was NULL. */
+ return prefix ?: "/";
+}
+
static struct fwnode_handle *
software_node_get_parent(const struct fwnode_handle *fwnode)
{
struct swnode *swnode = to_swnode(fwnode);
- return swnode ? (swnode->parent ? &swnode->parent->fwnode : NULL) : NULL;
+ if (!swnode || !swnode->parent)
+ return NULL;
+
+ return fwnode_handle_get(&swnode->parent->fwnode);
}
static struct fwnode_handle *
@@ -612,6 +528,8 @@ static const struct fwnode_operations software_node_ops = {
.property_present = software_node_property_present,
.property_read_int_array = software_node_read_int_array,
.property_read_string_array = software_node_read_string_array,
+ .get_name = software_node_get_name,
+ .get_name_prefix = software_node_get_name_prefix,
.get_parent = software_node_get_parent,
.get_next_child_node = software_node_get_next_child,
.get_named_child_node = software_node_get_named_child_node,
diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c
index bd7d3bb8b890..1553d41f0b91 100644
--- a/drivers/block/ataflop.c
+++ b/drivers/block/ataflop.c
@@ -857,7 +857,7 @@ static void fd_calibrate( void )
}
if (ATARIHW_PRESENT(FDCSPEED))
- dma_wd.fdc_speed = 0; /* always seek with 8 Mhz */;
+ dma_wd.fdc_speed = 0; /* always seek with 8 Mhz */
DPRINT(("fd_calibrate\n"));
SET_IRQ_HANDLER( fd_calibrate_done );
/* we can't verify, since the speed may be incorrect */
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index f86cea4c0f8d..840c3aef3c5c 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -884,7 +884,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
start_new_tl_epoch(connection);
mod_rq_state(req, m, 0, RQ_NET_OK|RQ_NET_DONE);
break;
- };
+ }
return rv;
}
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 76457003f140..ee67bf929fac 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -2663,6 +2663,28 @@ static int pkt_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
return ret;
}
+#ifdef CONFIG_COMPAT
+static int pkt_compat_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg)
+{
+ switch (cmd) {
+ /* compatible */
+ case CDROMEJECT:
+ case CDROMMULTISESSION:
+ case CDROMREADTOCENTRY:
+ case SCSI_IOCTL_SEND_COMMAND:
+ return pkt_ioctl(bdev, mode, cmd, (unsigned long)compat_ptr(arg));
+
+
+ /* FIXME: no handler so far */
+ case CDROM_LAST_WRITTEN:
+ /* handled in compat_blkdev_driver_ioctl */
+ case CDROM_SEND_PACKET:
+ default:
+ return -ENOIOCTLCMD;
+ }
+}
+#endif
+
static unsigned int pkt_check_events(struct gendisk *disk,
unsigned int clearing)
{
@@ -2684,6 +2706,9 @@ static const struct block_device_operations pktcdvd_ops = {
.open = pkt_open,
.release = pkt_close,
.ioctl = pkt_ioctl,
+#ifdef CONFIG_COMPAT
+ .ioctl = pkt_compat_ioctl,
+#endif
.check_events = pkt_check_events,
};
diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c
index 6b2fd630de85..571612e233fe 100644
--- a/drivers/block/sunvdc.c
+++ b/drivers/block/sunvdc.c
@@ -634,7 +634,7 @@ static int generic_request(struct vdc_port *port, u8 op, void *buf, int len)
case VD_OP_GET_EFI:
case VD_OP_SET_EFI:
return -EOPNOTSUPP;
- };
+ }
map_perm |= LDC_MAP_SHADOW | LDC_MAP_DIRECT | LDC_MAP_IO;
diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig
index 6b331061d34b..97ab5ad171d4 100644
--- a/drivers/bus/Kconfig
+++ b/drivers/bus/Kconfig
@@ -150,6 +150,15 @@ config TEGRA_GMI
Driver for the Tegra Generic Memory Interface bus which can be used
to attach devices such as NOR, UART, FPGA and more.
+config TI_PWMSS
+ bool
+ default y if (ARCH_OMAP2PLUS) && (PWM_TIECAP || PWM_TIEHRPWM || TI_EQEP)
+ help
+ PWM Subsystem driver support for AM33xx SOC.
+
+ PWM submodules require PWM config space access from submodule
+ drivers and require common parent driver support.
+
config TI_SYSC
bool "TI sysc interconnect target module driver"
depends on ARCH_OMAP2PLUS
diff --git a/drivers/bus/Makefile b/drivers/bus/Makefile
index 16b43d3468c6..1320bcf9fa9d 100644
--- a/drivers/bus/Makefile
+++ b/drivers/bus/Makefile
@@ -27,6 +27,7 @@ obj-$(CONFIG_SUNXI_RSB) += sunxi-rsb.o
obj-$(CONFIG_SIMPLE_PM_BUS) += simple-pm-bus.o
obj-$(CONFIG_TEGRA_ACONNECT) += tegra-aconnect.o
obj-$(CONFIG_TEGRA_GMI) += tegra-gmi.o
+obj-$(CONFIG_TI_PWMSS) += ti-pwmss.o
obj-$(CONFIG_TI_SYSC) += ti-sysc.o
obj-$(CONFIG_TS_NBUS) += ts-nbus.o
obj-$(CONFIG_UNIPHIER_SYSTEM_BUS) += uniphier-system-bus.o
diff --git a/drivers/pwm/pwm-tipwmss.c b/drivers/bus/ti-pwmss.c
index e9c26c94251b..e9c26c94251b 100644
--- a/drivers/pwm/pwm-tipwmss.c
+++ b/drivers/bus/ti-pwmss.c
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index ac42ae4651ce..eebdcbef0578 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -996,6 +996,12 @@ static void cdrom_count_tracks(struct cdrom_device_info *cdi, tracktype *tracks)
tracks->xa = 0;
tracks->error = 0;
cd_dbg(CD_COUNT_TRACKS, "entering cdrom_count_tracks\n");
+
+ if (!CDROM_CAN(CDC_PLAY_AUDIO)) {
+ tracks->error = CDS_NO_INFO;
+ return;
+ }
+
/* Grab the TOC header so we can see how many tracks there are */
ret = cdi->ops->audio_ioctl(cdi, CDROMREADTOCHDR, &header);
if (ret) {
@@ -1162,7 +1168,8 @@ int cdrom_open(struct cdrom_device_info *cdi, struct block_device *bdev,
ret = open_for_data(cdi);
if (ret)
goto err;
- cdrom_mmc3_profile(cdi);
+ if (CDROM_CAN(CDC_GENERIC_PACKET))
+ cdrom_mmc3_profile(cdi);
if (mode & FMODE_WRITE) {
ret = -EROFS;
if (cdrom_open_write(cdi))
@@ -2882,6 +2889,9 @@ int cdrom_get_last_written(struct cdrom_device_info *cdi, long *last_written)
it doesn't give enough information or fails. then we return
the toc contents. */
use_toc:
+ if (!CDROM_CAN(CDC_PLAY_AUDIO))
+ return -ENOSYS;
+
toc.cdte_format = CDROM_MSF;
toc.cdte_track = CDROM_LEADOUT;
if ((ret = cdi->ops->audio_ioctl(cdi, CDROMREADTOCENTRY, &toc)))
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index df0fc997dc3e..26956c006987 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -439,8 +439,8 @@ config RAW_DRIVER
Once bound, I/O against /dev/raw/rawN uses efficient zero-copy I/O.
See the raw(8) manpage for more details.
- Applications should preferably open the device (eg /dev/hda1)
- with the O_DIRECT flag.
+ Applications should preferably open the device (eg /dev/hda1)
+ with the O_DIRECT flag.
config MAX_RAW_DEVS
int "Maximum number of RAW devices to support (1-65536)"
@@ -559,4 +559,4 @@ config RANDOM_TRUST_BOOTLOADER
device randomness. Say Y here to assume the entropy provided by the
booloader is trustworthy so it will be added to the kernel's entropy
pool. Otherwise, say N here so it will be regarded as device input that
- only mixes the entropy pool. \ No newline at end of file
+ only mixes the entropy pool.
diff --git a/drivers/char/agp/Kconfig b/drivers/char/agp/Kconfig
index 812d6aa6e013..bc54235a7022 100644
--- a/drivers/char/agp/Kconfig
+++ b/drivers/char/agp/Kconfig
@@ -63,7 +63,7 @@ config AGP_AMD64
This option gives you AGP support for the GLX component of
X using the on-CPU northbridge of the AMD Athlon64/Opteron CPUs.
You still need an external AGP bridge like the AMD 8151, VIA
- K8T400M, SiS755. It may also support other AGP bridges when loaded
+ K8T400M, SiS755. It may also support other AGP bridges when loaded
with agp_try_unsupported=1.
config AGP_INTEL
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index 3daae8ddd511..8486c29d8324 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -386,17 +386,17 @@ config HW_RANDOM_MESON
If unsure, say Y.
config HW_RANDOM_CAVIUM
- tristate "Cavium ThunderX Random Number Generator support"
- depends on HW_RANDOM && PCI && (ARM64 || (COMPILE_TEST && 64BIT))
- default HW_RANDOM
- ---help---
- This driver provides kernel-side support for the Random Number
- Generator hardware found on Cavium SoCs.
+ tristate "Cavium ThunderX Random Number Generator support"
+ depends on HW_RANDOM && PCI && (ARM64 || (COMPILE_TEST && 64BIT))
+ default HW_RANDOM
+ ---help---
+ This driver provides kernel-side support for the Random Number
+ Generator hardware found on Cavium SoCs.
- To compile this driver as a module, choose M here: the
- module will be called cavium_rng.
+ To compile this driver as a module, choose M here: the
+ module will be called cavium_rng.
- If unsure, say Y.
+ If unsure, say Y.
config HW_RANDOM_MTK
tristate "Mediatek Random Number Generator support"
diff --git a/drivers/char/ipmi/Kconfig b/drivers/char/ipmi/Kconfig
index 4bad0614109b..7dc2c3ec4051 100644
--- a/drivers/char/ipmi/Kconfig
+++ b/drivers/char/ipmi/Kconfig
@@ -4,38 +4,38 @@
#
menuconfig IPMI_HANDLER
- tristate 'IPMI top-level message handler'
- depends on HAS_IOMEM
- select IPMI_DMI_DECODE if DMI
- help
- This enables the central IPMI message handler, required for IPMI
- to work.
+ tristate 'IPMI top-level message handler'
+ depends on HAS_IOMEM
+ select IPMI_DMI_DECODE if DMI
+ help
+ This enables the central IPMI message handler, required for IPMI
+ to work.
- IPMI is a standard for managing sensors (temperature,
- voltage, etc.) in a system.
+ IPMI is a standard for managing sensors (temperature,
+ voltage, etc.) in a system.
- See <file:Documentation/IPMI.txt> for more details on the driver.
+ See <file:Documentation/IPMI.txt> for more details on the driver.
- If unsure, say N.
+ If unsure, say N.
config IPMI_DMI_DECODE
- select IPMI_PLAT_DATA
- bool
+ select IPMI_PLAT_DATA
+ bool
config IPMI_PLAT_DATA
- bool
+ bool
if IPMI_HANDLER
config IPMI_PANIC_EVENT
- bool 'Generate a panic event to all BMCs on a panic'
- help
- When a panic occurs, this will cause the IPMI message handler to,
- by default, generate an IPMI event describing the panic to each
- interface registered with the message handler. This is always
- available, the module parameter for ipmi_msghandler named
- panic_op can be set to "event" to chose this value, this config
- simply causes the default value to be set to "event".
+ bool 'Generate a panic event to all BMCs on a panic'
+ help
+ When a panic occurs, this will cause the IPMI message handler to,
+ by default, generate an IPMI event describing the panic to each
+ interface registered with the message handler. This is always
+ available, the module parameter for ipmi_msghandler named
+ panic_op can be set to "event" to chose this value, this config
+ simply causes the default value to be set to "event".
config IPMI_PANIC_STRING
bool 'Generate OEM events containing the panic string'
@@ -54,43 +54,43 @@ config IPMI_PANIC_STRING
causes the default value to be set to "string".
config IPMI_DEVICE_INTERFACE
- tristate 'Device interface for IPMI'
- help
- This provides an IOCTL interface to the IPMI message handler so
- userland processes may use IPMI. It supports poll() and select().
+ tristate 'Device interface for IPMI'
+ help
+ This provides an IOCTL interface to the IPMI message handler so
+ userland processes may use IPMI. It supports poll() and select().
config IPMI_SI
- tristate 'IPMI System Interface handler'
- select IPMI_PLAT_DATA
- help
- Provides a driver for System Interfaces (KCS, SMIC, BT).
- Currently, only KCS and SMIC are supported. If
- you are using IPMI, you should probably say "y" here.
+ tristate 'IPMI System Interface handler'
+ select IPMI_PLAT_DATA
+ help
+ Provides a driver for System Interfaces (KCS, SMIC, BT).
+ Currently, only KCS and SMIC are supported. If
+ you are using IPMI, you should probably say "y" here.
config IPMI_SSIF
- tristate 'IPMI SMBus handler (SSIF)'
- select I2C
- help
- Provides a driver for a SMBus interface to a BMC, meaning that you
- have a driver that must be accessed over an I2C bus instead of a
- standard interface. This module requires I2C support.
+ tristate 'IPMI SMBus handler (SSIF)'
+ select I2C
+ help
+ Provides a driver for a SMBus interface to a BMC, meaning that you
+ have a driver that must be accessed over an I2C bus instead of a
+ standard interface. This module requires I2C support.
config IPMI_POWERNV
- depends on PPC_POWERNV
- tristate 'POWERNV (OPAL firmware) IPMI interface'
- help
- Provides a driver for OPAL firmware-based IPMI interfaces.
+ depends on PPC_POWERNV
+ tristate 'POWERNV (OPAL firmware) IPMI interface'
+ help
+ Provides a driver for OPAL firmware-based IPMI interfaces.
config IPMI_WATCHDOG
- tristate 'IPMI Watchdog Timer'
- help
- This enables the IPMI watchdog timer.
+ tristate 'IPMI Watchdog Timer'
+ help
+ This enables the IPMI watchdog timer.
config IPMI_POWEROFF
- tristate 'IPMI Poweroff'
- help
- This enables a function to power off the system with IPMI if
- the IPMI management controller is capable of this.
+ tristate 'IPMI Poweroff'
+ help
+ This enables a function to power off the system with IPMI if
+ the IPMI management controller is capable of this.
endif # IPMI_HANDLER
@@ -126,7 +126,7 @@ config NPCM7XX_KCS_IPMI_BMC
config ASPEED_BT_IPMI_BMC
depends on ARCH_ASPEED || COMPILE_TEST
- depends on REGMAP && REGMAP_MMIO && MFD_SYSCON
+ depends on REGMAP && REGMAP_MMIO && MFD_SYSCON
tristate "BT IPMI bmc driver"
help
Provides a driver for the BT (Block Transfer) IPMI interface
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
index 74c6d1f34132..55986e10a124 100644
--- a/drivers/char/ipmi/ipmi_watchdog.c
+++ b/drivers/char/ipmi/ipmi_watchdog.c
@@ -893,6 +893,7 @@ static const struct file_operations ipmi_wdog_fops = {
.poll = ipmi_poll,
.write = ipmi_write,
.unlocked_ioctl = ipmi_unlocked_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.open = ipmi_open,
.release = ipmi_close,
.fasync = ipmi_fasync,
diff --git a/drivers/char/lp.c b/drivers/char/lp.c
index 7c9269e3477a..bd95aba1f9fe 100644
--- a/drivers/char/lp.c
+++ b/drivers/char/lp.c
@@ -713,6 +713,10 @@ static int lp_set_timeout64(unsigned int minor, void __user *arg)
if (copy_from_user(karg, arg, sizeof(karg)))
return -EFAULT;
+ /* sparc64 suseconds_t is 32-bit only */
+ if (IS_ENABLED(CONFIG_SPARC64) && !in_compat_syscall())
+ karg[1] >>= 32;
+
return lp_set_timeout(minor, karg[0], karg[1]);
}
diff --git a/drivers/char/ppdev.c b/drivers/char/ppdev.c
index c86f18aa8985..2c2381a806ae 100644
--- a/drivers/char/ppdev.c
+++ b/drivers/char/ppdev.c
@@ -619,20 +619,27 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
if (copy_from_user(time32, argp, sizeof(time32)))
return -EFAULT;
+ if ((time32[0] < 0) || (time32[1] < 0))
+ return -EINVAL;
+
return pp_set_timeout(pp->pdev, time32[0], time32[1]);
case PPSETTIME64:
if (copy_from_user(time64, argp, sizeof(time64)))
return -EFAULT;
+ if ((time64[0] < 0) || (time64[1] < 0))
+ return -EINVAL;
+
+ if (IS_ENABLED(CONFIG_SPARC64) && !in_compat_syscall())
+ time64[1] >>= 32;
+
return pp_set_timeout(pp->pdev, time64[0], time64[1]);
case PPGETTIME32:
jiffies_to_timespec64(pp->pdev->timeout, &ts);
time32[0] = ts.tv_sec;
time32[1] = ts.tv_nsec / NSEC_PER_USEC;
- if ((time32[0] < 0) || (time32[1] < 0))
- return -EINVAL;
if (copy_to_user(argp, time32, sizeof(time32)))
return -EFAULT;
@@ -643,8 +650,9 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
jiffies_to_timespec64(pp->pdev->timeout, &ts);
time64[0] = ts.tv_sec;
time64[1] = ts.tv_nsec / NSEC_PER_USEC;
- if ((time64[0] < 0) || (time64[1] < 0))
- return -EINVAL;
+
+ if (IS_ENABLED(CONFIG_SPARC64) && !in_compat_syscall())
+ time64[1] <<= 32;
if (copy_to_user(argp, time64, sizeof(time64)))
return -EFAULT;
@@ -670,14 +678,6 @@ static long pp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
return ret;
}
-#ifdef CONFIG_COMPAT
-static long pp_compat_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- return pp_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
-}
-#endif
-
static int pp_open(struct inode *inode, struct file *file)
{
unsigned int minor = iminor(inode);
@@ -786,9 +786,7 @@ static const struct file_operations pp_fops = {
.write = pp_write,
.poll = pp_poll,
.unlocked_ioctl = pp_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = pp_compat_ioctl,
-#endif
+ .compat_ioctl = compat_ptr_ioctl,
.open = pp_open,
.release = pp_release,
};
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 01b8868b9bed..909e0c3d82ea 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -2166,6 +2166,7 @@ const struct file_operations random_fops = {
.write = random_write,
.poll = random_poll,
.unlocked_ioctl = random_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.fasync = random_fasync,
.llseek = noop_llseek,
};
diff --git a/drivers/char/tpm/tpm_vtpm_proxy.c b/drivers/char/tpm/tpm_vtpm_proxy.c
index 2f6e087ec496..91c772e38bb5 100644
--- a/drivers/char/tpm/tpm_vtpm_proxy.c
+++ b/drivers/char/tpm/tpm_vtpm_proxy.c
@@ -670,20 +670,10 @@ static long vtpmx_fops_ioctl(struct file *f, unsigned int ioctl,
}
}
-#ifdef CONFIG_COMPAT
-static long vtpmx_fops_compat_ioctl(struct file *f, unsigned int ioctl,
- unsigned long arg)
-{
- return vtpmx_fops_ioctl(f, ioctl, (unsigned long)compat_ptr(arg));
-}
-#endif
-
static const struct file_operations vtpmx_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = vtpmx_fops_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = vtpmx_fops_compat_ioctl,
-#endif
+ .compat_ioctl = compat_ptr_ioctl,
.llseek = noop_llseek,
};
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 3259426f01dc..4df9b40d6342 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -919,6 +919,7 @@ static ssize_t port_fops_splice_write(struct pipe_inode_info *pipe,
.pos = *ppos,
.u.data = &sgl,
};
+ unsigned int occupancy;
/*
* Rproc_serial does not yet support splice. To support splice
@@ -929,21 +930,18 @@ static ssize_t port_fops_splice_write(struct pipe_inode_info *pipe,
if (is_rproc_serial(port->out_vq->vdev))
return -EINVAL;
- /*
- * pipe->nrbufs == 0 means there are no data to transfer,
- * so this returns just 0 for no data.
- */
pipe_lock(pipe);
- if (!pipe->nrbufs) {
- ret = 0;
+ ret = 0;
+ if (pipe_empty(pipe->head, pipe->tail))
goto error_out;
- }
ret = wait_port_writable(port, filp->f_flags & O_NONBLOCK);
if (ret < 0)
goto error_out;
- buf = alloc_buf(port->portdev->vdev, 0, pipe->nrbufs);
+ occupancy = pipe_occupancy(pipe->head, pipe->tail);
+ buf = alloc_buf(port->portdev->vdev, 0, occupancy);
+
if (!buf) {
ret = -ENOMEM;
goto error_out;
@@ -951,7 +949,7 @@ static ssize_t port_fops_splice_write(struct pipe_inode_info *pipe,
sgl.n = 0;
sgl.len = 0;
- sgl.size = pipe->nrbufs;
+ sgl.size = occupancy;
sgl.sg = buf->sg;
sg_init_table(sgl.sg, sgl.size);
ret = __splice_from_pipe(pipe, &sd, pipe_to_sg);
diff --git a/drivers/char/xillybus/xillybus_of.c b/drivers/char/xillybus/xillybus_of.c
index bfafd8f5e826..96b6de8a30e5 100644
--- a/drivers/char/xillybus/xillybus_of.c
+++ b/drivers/char/xillybus/xillybus_of.c
@@ -116,7 +116,6 @@ static int xilly_drv_probe(struct platform_device *op)
struct xilly_endpoint *endpoint;
int rc;
int irq;
- struct resource *res;
struct xilly_endpoint_hardware *ephw = &of_hw;
if (of_property_read_bool(dev->of_node, "dma-coherent"))
@@ -129,9 +128,7 @@ static int xilly_drv_probe(struct platform_device *op)
dev_set_drvdata(dev, endpoint);
- res = platform_get_resource(op, IORESOURCE_MEM, 0);
- endpoint->registers = devm_ioremap_resource(dev, res);
-
+ endpoint->registers = devm_platform_ioremap_resource(op, 0);
if (IS_ERR(endpoint->registers))
return PTR_ERR(endpoint->registers);
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index c44247d0b83e..dc920daa6dbb 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -136,6 +136,13 @@ config COMMON_CLK_SI570
This driver supports Silicon Labs 570/571/598/599 programmable
clock generators.
+config COMMON_CLK_BM1880
+ bool "Clock driver for Bitmain BM1880 SoC"
+ depends on ARCH_BITMAIN || COMPILE_TEST
+ default ARCH_BITMAIN
+ help
+ This driver supports the clocks on Bitmain BM1880 SoC.
+
config COMMON_CLK_CDCE706
tristate "Clock driver for TI CDCE706 clock synthesizer"
depends on I2C
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index 0138fb14e6f8..0696a0c1ab58 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -22,6 +22,7 @@ obj-$(CONFIG_MACH_ASM9260) += clk-asm9260.o
obj-$(CONFIG_COMMON_CLK_AXI_CLKGEN) += clk-axi-clkgen.o
obj-$(CONFIG_ARCH_AXXIA) += clk-axm5516.o
obj-$(CONFIG_COMMON_CLK_BD718XX) += clk-bd718x7.o
+obj-$(CONFIG_COMMON_CLK_BM1880) += clk-bm1880.o
obj-$(CONFIG_COMMON_CLK_CDCE706) += clk-cdce706.o
obj-$(CONFIG_COMMON_CLK_CDCE925) += clk-cdce925.o
obj-$(CONFIG_ARCH_CLPS711X) += clk-clps711x.o
diff --git a/drivers/clk/at91/sckc.c b/drivers/clk/at91/sckc.c
index fac0ca56d42d..15dc4cd86d76 100644
--- a/drivers/clk/at91/sckc.c
+++ b/drivers/clk/at91/sckc.c
@@ -487,8 +487,7 @@ static void __init of_sam9x60_sckc_setup(struct device_node *np)
if (IS_ERR(slow_osc))
goto unregister_slow_rc;
- clk_data = kzalloc(sizeof(*clk_data) + (2 * sizeof(struct clk_hw *)),
- GFP_KERNEL);
+ clk_data = kzalloc(struct_size(clk_data, hws, 2), GFP_KERNEL);
if (!clk_data)
goto unregister_slow_osc;
diff --git a/drivers/clk/axs10x/i2s_pll_clock.c b/drivers/clk/axs10x/i2s_pll_clock.c
index 71c2e9519ca8..e9da0e69bf6c 100644
--- a/drivers/clk/axs10x/i2s_pll_clock.c
+++ b/drivers/clk/axs10x/i2s_pll_clock.c
@@ -172,14 +172,12 @@ static int i2s_pll_clk_probe(struct platform_device *pdev)
struct clk *clk;
struct i2s_pll_clk *pll_clk;
struct clk_init_data init;
- struct resource *mem;
pll_clk = devm_kzalloc(dev, sizeof(*pll_clk), GFP_KERNEL);
if (!pll_clk)
return -ENOMEM;
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pll_clk->base = devm_ioremap_resource(dev, mem);
+ pll_clk->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pll_clk->base))
return PTR_ERR(pll_clk->base);
diff --git a/drivers/clk/axs10x/pll_clock.c b/drivers/clk/axs10x/pll_clock.c
index aba787b2e771..500345d99adb 100644
--- a/drivers/clk/axs10x/pll_clock.c
+++ b/drivers/clk/axs10x/pll_clock.c
@@ -221,7 +221,6 @@ static int axs10x_pll_clk_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
const char *parent_name;
struct axs10x_pll_clk *pll_clk;
- struct resource *mem;
struct clk_init_data init = { };
int ret;
@@ -229,13 +228,11 @@ static int axs10x_pll_clk_probe(struct platform_device *pdev)
if (!pll_clk)
return -ENOMEM;
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pll_clk->base = devm_ioremap_resource(dev, mem);
+ pll_clk->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pll_clk->base))
return PTR_ERR(pll_clk->base);
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- pll_clk->lock = devm_ioremap_resource(dev, mem);
+ pll_clk->lock = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(pll_clk->lock))
return PTR_ERR(pll_clk->lock);
diff --git a/drivers/clk/bcm/clk-bcm2835-aux.c b/drivers/clk/bcm/clk-bcm2835-aux.c
index b6d07ca0164f..290a2846a86b 100644
--- a/drivers/clk/bcm/clk-bcm2835-aux.c
+++ b/drivers/clk/bcm/clk-bcm2835-aux.c
@@ -19,7 +19,6 @@ static int bcm2835_aux_clk_probe(struct platform_device *pdev)
struct clk_hw_onecell_data *onecell;
const char *parent;
struct clk *parent_clk;
- struct resource *res;
void __iomem *reg, *gate;
parent_clk = devm_clk_get(dev, NULL);
@@ -27,8 +26,7 @@ static int bcm2835_aux_clk_probe(struct platform_device *pdev)
return PTR_ERR(parent_clk);
parent = __clk_get_name(parent_clk);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- reg = devm_ioremap_resource(dev, res);
+ reg = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(reg))
return PTR_ERR(reg);
diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c
index 802e488fd3c3..ded13ccf768e 100644
--- a/drivers/clk/bcm/clk-bcm2835.c
+++ b/drivers/clk/bcm/clk-bcm2835.c
@@ -2192,7 +2192,6 @@ static int bcm2835_clk_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct clk_hw **hws;
struct bcm2835_cprman *cprman;
- struct resource *res;
const struct bcm2835_clk_desc *desc;
const size_t asize = ARRAY_SIZE(clk_desc_array);
const struct cprman_plat_data *pdata;
@@ -2211,8 +2210,7 @@ static int bcm2835_clk_probe(struct platform_device *pdev)
spin_lock_init(&cprman->regs_lock);
cprman->dev = dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- cprman->regs = devm_ioremap_resource(dev, res);
+ cprman->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(cprman->regs))
return PTR_ERR(cprman->regs);
diff --git a/drivers/clk/clk-aspeed.c b/drivers/clk/clk-aspeed.c
index abf06fb6453e..411ff5fb2c07 100644
--- a/drivers/clk/clk-aspeed.c
+++ b/drivers/clk/clk-aspeed.c
@@ -14,7 +14,7 @@
#include "clk-aspeed.h"
-#define ASPEED_NUM_CLKS 36
+#define ASPEED_NUM_CLKS 38
#define ASPEED_RESET2_OFFSET 32
@@ -28,6 +28,7 @@
#define AST2400_HPLL_BYPASS_EN BIT(17)
#define ASPEED_MISC_CTRL 0x2c
#define UART_DIV13_EN BIT(12)
+#define ASPEED_MAC_CLK_DLY 0x48
#define ASPEED_STRAP 0x70
#define CLKIN_25MHZ_EN BIT(23)
#define AST2400_CLK_SOURCE_SEL BIT(18)
@@ -462,6 +463,30 @@ static int aspeed_clk_probe(struct platform_device *pdev)
return PTR_ERR(hw);
aspeed_clk_data->hws[ASPEED_CLK_MAC] = hw;
+ if (of_device_is_compatible(pdev->dev.of_node, "aspeed,ast2500-scu")) {
+ /* RMII 50MHz RCLK */
+ hw = clk_hw_register_fixed_rate(dev, "mac12rclk", "hpll", 0,
+ 50000000);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+
+ /* RMII1 50MHz (RCLK) output enable */
+ hw = clk_hw_register_gate(dev, "mac1rclk", "mac12rclk", 0,
+ scu_base + ASPEED_MAC_CLK_DLY, 29, 0,
+ &aspeed_clk_lock);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+ aspeed_clk_data->hws[ASPEED_CLK_MAC1RCLK] = hw;
+
+ /* RMII2 50MHz (RCLK) output enable */
+ hw = clk_hw_register_gate(dev, "mac2rclk", "mac12rclk", 0,
+ scu_base + ASPEED_MAC_CLK_DLY, 30, 0,
+ &aspeed_clk_lock);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+ aspeed_clk_data->hws[ASPEED_CLK_MAC2RCLK] = hw;
+ }
+
/* LPC Host (LHCLK) clock divider */
hw = clk_hw_register_divider_table(dev, "lhclk", "hpll", 0,
scu_base + ASPEED_CLK_SELECTION, 20, 3, 0,
diff --git a/drivers/clk/clk-ast2600.c b/drivers/clk/clk-ast2600.c
index b1318e6b655b..392d01705b97 100644
--- a/drivers/clk/clk-ast2600.c
+++ b/drivers/clk/clk-ast2600.c
@@ -15,7 +15,7 @@
#include "clk-aspeed.h"
-#define ASPEED_G6_NUM_CLKS 67
+#define ASPEED_G6_NUM_CLKS 71
#define ASPEED_G6_SILICON_REV 0x004
@@ -40,6 +40,9 @@
#define ASPEED_G6_STRAP1 0x500
+#define ASPEED_MAC12_CLK_DLY 0x340
+#define ASPEED_MAC34_CLK_DLY 0x350
+
/* Globally visible clocks */
static DEFINE_SPINLOCK(aspeed_g6_clk_lock);
@@ -116,8 +119,6 @@ static const struct aspeed_gate_data aspeed_g6_gates[] = {
[ASPEED_CLK_GATE_FSICLK] = { 62, 59, "fsiclk-gate", NULL, 0 }, /* FSI */
};
-static const char * const eclk_parent_names[] = { "mpll", "hpll", "dpll" };
-
static const struct clk_div_table ast2600_eclk_div_table[] = {
{ 0x0, 2 },
{ 0x1, 2 },
@@ -486,6 +487,11 @@ static int aspeed_g6_clk_probe(struct platform_device *pdev)
return PTR_ERR(hw);
aspeed_g6_clk_data->hws[ASPEED_CLK_SDIO] = hw;
+ /* MAC1/2 RMII 50MHz RCLK */
+ hw = clk_hw_register_fixed_rate(dev, "mac12rclk", "hpll", 0, 50000000);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+
/* MAC1/2 AHB bus clock divider */
hw = clk_hw_register_divider_table(dev, "mac12", "hpll", 0,
scu_g6_base + ASPEED_G6_CLK_SELECTION1, 16, 3, 0,
@@ -495,6 +501,27 @@ static int aspeed_g6_clk_probe(struct platform_device *pdev)
return PTR_ERR(hw);
aspeed_g6_clk_data->hws[ASPEED_CLK_MAC12] = hw;
+ /* RMII1 50MHz (RCLK) output enable */
+ hw = clk_hw_register_gate(dev, "mac1rclk", "mac12rclk", 0,
+ scu_g6_base + ASPEED_MAC12_CLK_DLY, 29, 0,
+ &aspeed_g6_clk_lock);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+ aspeed_g6_clk_data->hws[ASPEED_CLK_MAC1RCLK] = hw;
+
+ /* RMII2 50MHz (RCLK) output enable */
+ hw = clk_hw_register_gate(dev, "mac2rclk", "mac12rclk", 0,
+ scu_g6_base + ASPEED_MAC12_CLK_DLY, 30, 0,
+ &aspeed_g6_clk_lock);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+ aspeed_g6_clk_data->hws[ASPEED_CLK_MAC2RCLK] = hw;
+
+ /* MAC1/2 RMII 50MHz RCLK */
+ hw = clk_hw_register_fixed_rate(dev, "mac34rclk", "hclk", 0, 50000000);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+
/* MAC3/4 AHB bus clock divider */
hw = clk_hw_register_divider_table(dev, "mac34", "hpll", 0,
scu_g6_base + 0x310, 24, 3, 0,
@@ -504,6 +531,22 @@ static int aspeed_g6_clk_probe(struct platform_device *pdev)
return PTR_ERR(hw);
aspeed_g6_clk_data->hws[ASPEED_CLK_MAC34] = hw;
+ /* RMII3 50MHz (RCLK) output enable */
+ hw = clk_hw_register_gate(dev, "mac3rclk", "mac34rclk", 0,
+ scu_g6_base + ASPEED_MAC34_CLK_DLY, 29, 0,
+ &aspeed_g6_clk_lock);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+ aspeed_g6_clk_data->hws[ASPEED_CLK_MAC3RCLK] = hw;
+
+ /* RMII4 50MHz (RCLK) output enable */
+ hw = clk_hw_register_gate(dev, "mac4rclk", "mac34rclk", 0,
+ scu_g6_base + ASPEED_MAC34_CLK_DLY, 30, 0,
+ &aspeed_g6_clk_lock);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+ aspeed_g6_clk_data->hws[ASPEED_CLK_MAC4RCLK] = hw;
+
/* LPC Host (LHCLK) clock divider */
hw = clk_hw_register_divider_table(dev, "lhclk", "hpll", 0,
scu_g6_base + ASPEED_G6_CLK_SELECTION1, 20, 3, 0,
diff --git a/drivers/clk/clk-bd718x7.c b/drivers/clk/clk-bd718x7.c
index ae6e5baee330..00926c587390 100644
--- a/drivers/clk/clk-bd718x7.c
+++ b/drivers/clk/clk-bd718x7.c
@@ -133,3 +133,4 @@ module_platform_driver(bd71837_clk);
MODULE_AUTHOR("Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>");
MODULE_DESCRIPTION("BD71837/BD71847/BD70528 chip clk driver");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:bd718xx-clk");
diff --git a/drivers/clk/clk-bm1880.c b/drivers/clk/clk-bm1880.c
new file mode 100644
index 000000000000..4cd175afce9b
--- /dev/null
+++ b/drivers/clk/clk-bm1880.c
@@ -0,0 +1,969 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Bitmain BM1880 SoC clock driver
+ *
+ * Copyright (c) 2019 Linaro Ltd.
+ * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <dt-bindings/clock/bm1880-clock.h>
+
+#define BM1880_CLK_MPLL_CTL 0x00
+#define BM1880_CLK_SPLL_CTL 0x04
+#define BM1880_CLK_FPLL_CTL 0x08
+#define BM1880_CLK_DDRPLL_CTL 0x0c
+
+#define BM1880_CLK_ENABLE0 0x00
+#define BM1880_CLK_ENABLE1 0x04
+#define BM1880_CLK_SELECT 0x20
+#define BM1880_CLK_DIV0 0x40
+#define BM1880_CLK_DIV1 0x44
+#define BM1880_CLK_DIV2 0x48
+#define BM1880_CLK_DIV3 0x4c
+#define BM1880_CLK_DIV4 0x50
+#define BM1880_CLK_DIV5 0x54
+#define BM1880_CLK_DIV6 0x58
+#define BM1880_CLK_DIV7 0x5c
+#define BM1880_CLK_DIV8 0x60
+#define BM1880_CLK_DIV9 0x64
+#define BM1880_CLK_DIV10 0x68
+#define BM1880_CLK_DIV11 0x6c
+#define BM1880_CLK_DIV12 0x70
+#define BM1880_CLK_DIV13 0x74
+#define BM1880_CLK_DIV14 0x78
+#define BM1880_CLK_DIV15 0x7c
+#define BM1880_CLK_DIV16 0x80
+#define BM1880_CLK_DIV17 0x84
+#define BM1880_CLK_DIV18 0x88
+#define BM1880_CLK_DIV19 0x8c
+#define BM1880_CLK_DIV20 0x90
+#define BM1880_CLK_DIV21 0x94
+#define BM1880_CLK_DIV22 0x98
+#define BM1880_CLK_DIV23 0x9c
+#define BM1880_CLK_DIV24 0xa0
+#define BM1880_CLK_DIV25 0xa4
+#define BM1880_CLK_DIV26 0xa8
+#define BM1880_CLK_DIV27 0xac
+#define BM1880_CLK_DIV28 0xb0
+
+#define to_bm1880_pll_clk(_hw) container_of(_hw, struct bm1880_pll_hw_clock, hw)
+#define to_bm1880_div_clk(_hw) container_of(_hw, struct bm1880_div_hw_clock, hw)
+
+static DEFINE_SPINLOCK(bm1880_clk_lock);
+
+struct bm1880_clock_data {
+ void __iomem *pll_base;
+ void __iomem *sys_base;
+ struct clk_hw_onecell_data hw_data;
+};
+
+struct bm1880_gate_clock {
+ unsigned int id;
+ const char *name;
+ const char *parent;
+ u32 gate_reg;
+ s8 gate_shift;
+ unsigned long flags;
+};
+
+struct bm1880_mux_clock {
+ unsigned int id;
+ const char *name;
+ const char * const *parents;
+ s8 num_parents;
+ u32 reg;
+ s8 shift;
+ unsigned long flags;
+};
+
+struct bm1880_div_clock {
+ unsigned int id;
+ const char *name;
+ u32 reg;
+ u8 shift;
+ u8 width;
+ u32 initval;
+ const struct clk_div_table *table;
+ unsigned long flags;
+};
+
+struct bm1880_div_hw_clock {
+ struct bm1880_div_clock div;
+ void __iomem *base;
+ spinlock_t *lock;
+ struct clk_hw hw;
+ struct clk_init_data init;
+};
+
+struct bm1880_composite_clock {
+ unsigned int id;
+ const char *name;
+ const char *parent;
+ const char * const *parents;
+ unsigned int num_parents;
+ unsigned long flags;
+
+ u32 gate_reg;
+ u32 mux_reg;
+ u32 div_reg;
+
+ s8 gate_shift;
+ s8 mux_shift;
+ s8 div_shift;
+ s8 div_width;
+ s16 div_initval;
+ const struct clk_div_table *table;
+};
+
+struct bm1880_pll_clock {
+ unsigned int id;
+ const char *name;
+ u32 reg;
+ unsigned long flags;
+};
+
+struct bm1880_pll_hw_clock {
+ struct bm1880_pll_clock pll;
+ void __iomem *base;
+ struct clk_hw hw;
+ struct clk_init_data init;
+};
+
+static const struct clk_ops bm1880_pll_ops;
+static const struct clk_ops bm1880_clk_div_ops;
+
+#define GATE_DIV(_id, _name, _parent, _gate_reg, _gate_shift, _div_reg, \
+ _div_shift, _div_width, _div_initval, _table, \
+ _flags) { \
+ .id = _id, \
+ .parent = _parent, \
+ .name = _name, \
+ .gate_reg = _gate_reg, \
+ .gate_shift = _gate_shift, \
+ .div_reg = _div_reg, \
+ .div_shift = _div_shift, \
+ .div_width = _div_width, \
+ .div_initval = _div_initval, \
+ .table = _table, \
+ .mux_shift = -1, \
+ .flags = _flags, \
+ }
+
+#define GATE_MUX(_id, _name, _parents, _gate_reg, _gate_shift, \
+ _mux_reg, _mux_shift, _flags) { \
+ .id = _id, \
+ .parents = _parents, \
+ .num_parents = ARRAY_SIZE(_parents), \
+ .name = _name, \
+ .gate_reg = _gate_reg, \
+ .gate_shift = _gate_shift, \
+ .div_shift = -1, \
+ .mux_reg = _mux_reg, \
+ .mux_shift = _mux_shift, \
+ .flags = _flags, \
+ }
+
+#define CLK_PLL(_id, _name, _parent, _reg, _flags) { \
+ .pll.id = _id, \
+ .pll.name = _name, \
+ .pll.reg = _reg, \
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(_name, _parent, \
+ &bm1880_pll_ops, \
+ _flags), \
+ }
+
+#define CLK_DIV(_id, _name, _parent, _reg, _shift, _width, _initval, \
+ _table, _flags) { \
+ .div.id = _id, \
+ .div.name = _name, \
+ .div.reg = _reg, \
+ .div.shift = _shift, \
+ .div.width = _width, \
+ .div.initval = _initval, \
+ .div.table = _table, \
+ .hw.init = CLK_HW_INIT_HW(_name, _parent, \
+ &bm1880_clk_div_ops, \
+ _flags), \
+ }
+
+static struct clk_parent_data bm1880_pll_parent[] = {
+ { .fw_name = "osc", .name = "osc" },
+};
+
+/*
+ * All PLL clocks are marked as CRITICAL, hence they are very crucial
+ * for the functioning of the SoC
+ */
+static struct bm1880_pll_hw_clock bm1880_pll_clks[] = {
+ CLK_PLL(BM1880_CLK_MPLL, "clk_mpll", bm1880_pll_parent,
+ BM1880_CLK_MPLL_CTL, 0),
+ CLK_PLL(BM1880_CLK_SPLL, "clk_spll", bm1880_pll_parent,
+ BM1880_CLK_SPLL_CTL, 0),
+ CLK_PLL(BM1880_CLK_FPLL, "clk_fpll", bm1880_pll_parent,
+ BM1880_CLK_FPLL_CTL, 0),
+ CLK_PLL(BM1880_CLK_DDRPLL, "clk_ddrpll", bm1880_pll_parent,
+ BM1880_CLK_DDRPLL_CTL, 0),
+};
+
+/*
+ * Clocks marked as CRITICAL are needed for the proper functioning
+ * of the SoC.
+ */
+static const struct bm1880_gate_clock bm1880_gate_clks[] = {
+ { BM1880_CLK_AHB_ROM, "clk_ahb_rom", "clk_mux_axi6",
+ BM1880_CLK_ENABLE0, 2, 0 },
+ { BM1880_CLK_AXI_SRAM, "clk_axi_sram", "clk_axi1",
+ BM1880_CLK_ENABLE0, 3, 0 },
+ /*
+ * Since this clock is sourcing the DDR memory, let's mark it as
+ * critical to avoid gating.
+ */
+ { BM1880_CLK_DDR_AXI, "clk_ddr_axi", "clk_mux_axi6",
+ BM1880_CLK_ENABLE0, 4, CLK_IS_CRITICAL },
+ { BM1880_CLK_APB_EFUSE, "clk_apb_efuse", "clk_mux_axi6",
+ BM1880_CLK_ENABLE0, 6, 0 },
+ { BM1880_CLK_AXI5_EMMC, "clk_axi5_emmc", "clk_axi5",
+ BM1880_CLK_ENABLE0, 7, 0 },
+ { BM1880_CLK_AXI5_SD, "clk_axi5_sd", "clk_axi5",
+ BM1880_CLK_ENABLE0, 10, 0 },
+ { BM1880_CLK_AXI4_ETH0, "clk_axi4_eth0", "clk_axi4",
+ BM1880_CLK_ENABLE0, 14, 0 },
+ { BM1880_CLK_AXI4_ETH1, "clk_axi4_eth1", "clk_axi4",
+ BM1880_CLK_ENABLE0, 16, 0 },
+ { BM1880_CLK_AXI1_GDMA, "clk_axi1_gdma", "clk_axi1",
+ BM1880_CLK_ENABLE0, 17, 0 },
+ /* Don't gate GPIO clocks as it is not owned by the GPIO driver */
+ { BM1880_CLK_APB_GPIO, "clk_apb_gpio", "clk_mux_axi6",
+ BM1880_CLK_ENABLE0, 18, CLK_IGNORE_UNUSED },
+ { BM1880_CLK_APB_GPIO_INTR, "clk_apb_gpio_intr", "clk_mux_axi6",
+ BM1880_CLK_ENABLE0, 19, CLK_IGNORE_UNUSED },
+ { BM1880_CLK_AXI1_MINER, "clk_axi1_miner", "clk_axi1",
+ BM1880_CLK_ENABLE0, 21, 0 },
+ { BM1880_CLK_AHB_SF, "clk_ahb_sf", "clk_mux_axi6",
+ BM1880_CLK_ENABLE0, 22, 0 },
+ /*
+ * Not sure which module this clock is sourcing but gating this clock
+ * prevents the system from booting. So, let's mark it as critical.
+ */
+ { BM1880_CLK_SDMA_AXI, "clk_sdma_axi", "clk_axi5",
+ BM1880_CLK_ENABLE0, 23, CLK_IS_CRITICAL },
+ { BM1880_CLK_APB_I2C, "clk_apb_i2c", "clk_mux_axi6",
+ BM1880_CLK_ENABLE0, 25, 0 },
+ { BM1880_CLK_APB_WDT, "clk_apb_wdt", "clk_mux_axi6",
+ BM1880_CLK_ENABLE0, 26, 0 },
+ { BM1880_CLK_APB_JPEG, "clk_apb_jpeg", "clk_axi6",
+ BM1880_CLK_ENABLE0, 27, 0 },
+ { BM1880_CLK_AXI5_NF, "clk_axi5_nf", "clk_axi5",
+ BM1880_CLK_ENABLE0, 29, 0 },
+ { BM1880_CLK_APB_NF, "clk_apb_nf", "clk_axi6",
+ BM1880_CLK_ENABLE0, 30, 0 },
+ { BM1880_CLK_APB_PWM, "clk_apb_pwm", "clk_mux_axi6",
+ BM1880_CLK_ENABLE1, 0, 0 },
+ { BM1880_CLK_RV, "clk_rv", "clk_mux_rv",
+ BM1880_CLK_ENABLE1, 1, 0 },
+ { BM1880_CLK_APB_SPI, "clk_apb_spi", "clk_mux_axi6",
+ BM1880_CLK_ENABLE1, 2, 0 },
+ { BM1880_CLK_UART_500M, "clk_uart_500m", "clk_div_uart_500m",
+ BM1880_CLK_ENABLE1, 4, 0 },
+ { BM1880_CLK_APB_UART, "clk_apb_uart", "clk_axi6",
+ BM1880_CLK_ENABLE1, 5, 0 },
+ { BM1880_CLK_APB_I2S, "clk_apb_i2s", "clk_axi6",
+ BM1880_CLK_ENABLE1, 6, 0 },
+ { BM1880_CLK_AXI4_USB, "clk_axi4_usb", "clk_axi4",
+ BM1880_CLK_ENABLE1, 7, 0 },
+ { BM1880_CLK_APB_USB, "clk_apb_usb", "clk_axi6",
+ BM1880_CLK_ENABLE1, 8, 0 },
+ { BM1880_CLK_12M_USB, "clk_12m_usb", "clk_div_12m_usb",
+ BM1880_CLK_ENABLE1, 11, 0 },
+ { BM1880_CLK_APB_VIDEO, "clk_apb_video", "clk_axi6",
+ BM1880_CLK_ENABLE1, 12, 0 },
+ { BM1880_CLK_APB_VPP, "clk_apb_vpp", "clk_axi6",
+ BM1880_CLK_ENABLE1, 15, 0 },
+ { BM1880_CLK_AXI6, "clk_axi6", "clk_mux_axi6",
+ BM1880_CLK_ENABLE1, 21, 0 },
+};
+
+static const char * const clk_a53_parents[] = { "clk_spll", "clk_mpll" };
+static const char * const clk_rv_parents[] = { "clk_div_1_rv", "clk_div_0_rv" };
+static const char * const clk_axi1_parents[] = { "clk_div_1_axi1", "clk_div_0_axi1" };
+static const char * const clk_axi6_parents[] = { "clk_div_1_axi6", "clk_div_0_axi6" };
+
+static const struct bm1880_mux_clock bm1880_mux_clks[] = {
+ { BM1880_CLK_MUX_RV, "clk_mux_rv", clk_rv_parents, 2,
+ BM1880_CLK_SELECT, 1, 0 },
+ { BM1880_CLK_MUX_AXI6, "clk_mux_axi6", clk_axi6_parents, 2,
+ BM1880_CLK_SELECT, 3, 0 },
+};
+
+static const struct clk_div_table bm1880_div_table_0[] = {
+ { 0, 1 }, { 1, 2 }, { 2, 3 }, { 3, 4 },
+ { 4, 5 }, { 5, 6 }, { 6, 7 }, { 7, 8 },
+ { 8, 9 }, { 9, 10 }, { 10, 11 }, { 11, 12 },
+ { 12, 13 }, { 13, 14 }, { 14, 15 }, { 15, 16 },
+ { 16, 17 }, { 17, 18 }, { 18, 19 }, { 19, 20 },
+ { 20, 21 }, { 21, 22 }, { 22, 23 }, { 23, 24 },
+ { 24, 25 }, { 25, 26 }, { 26, 27 }, { 27, 28 },
+ { 28, 29 }, { 29, 30 }, { 30, 31 }, { 31, 32 },
+ { 0, 0 }
+};
+
+static const struct clk_div_table bm1880_div_table_1[] = {
+ { 0, 1 }, { 1, 2 }, { 2, 3 }, { 3, 4 },
+ { 4, 5 }, { 5, 6 }, { 6, 7 }, { 7, 8 },
+ { 8, 9 }, { 9, 10 }, { 10, 11 }, { 11, 12 },
+ { 12, 13 }, { 13, 14 }, { 14, 15 }, { 15, 16 },
+ { 16, 17 }, { 17, 18 }, { 18, 19 }, { 19, 20 },
+ { 20, 21 }, { 21, 22 }, { 22, 23 }, { 23, 24 },
+ { 24, 25 }, { 25, 26 }, { 26, 27 }, { 27, 28 },
+ { 28, 29 }, { 29, 30 }, { 30, 31 }, { 31, 32 },
+ { 127, 128 }, { 0, 0 }
+};
+
+static const struct clk_div_table bm1880_div_table_2[] = {
+ { 0, 1 }, { 1, 2 }, { 2, 3 }, { 3, 4 },
+ { 4, 5 }, { 5, 6 }, { 6, 7 }, { 7, 8 },
+ { 8, 9 }, { 9, 10 }, { 10, 11 }, { 11, 12 },
+ { 12, 13 }, { 13, 14 }, { 14, 15 }, { 15, 16 },
+ { 16, 17 }, { 17, 18 }, { 18, 19 }, { 19, 20 },
+ { 20, 21 }, { 21, 22 }, { 22, 23 }, { 23, 24 },
+ { 24, 25 }, { 25, 26 }, { 26, 27 }, { 27, 28 },
+ { 28, 29 }, { 29, 30 }, { 30, 31 }, { 31, 32 },
+ { 127, 128 }, { 255, 256 }, { 0, 0 }
+};
+
+static const struct clk_div_table bm1880_div_table_3[] = {
+ { 0, 1 }, { 1, 2 }, { 2, 3 }, { 3, 4 },
+ { 4, 5 }, { 5, 6 }, { 6, 7 }, { 7, 8 },
+ { 8, 9 }, { 9, 10 }, { 10, 11 }, { 11, 12 },
+ { 12, 13 }, { 13, 14 }, { 14, 15 }, { 15, 16 },
+ { 16, 17 }, { 17, 18 }, { 18, 19 }, { 19, 20 },
+ { 20, 21 }, { 21, 22 }, { 22, 23 }, { 23, 24 },
+ { 24, 25 }, { 25, 26 }, { 26, 27 }, { 27, 28 },
+ { 28, 29 }, { 29, 30 }, { 30, 31 }, { 31, 32 },
+ { 127, 128 }, { 255, 256 }, { 511, 512 }, { 0, 0 }
+};
+
+static const struct clk_div_table bm1880_div_table_4[] = {
+ { 0, 1 }, { 1, 2 }, { 2, 3 }, { 3, 4 },
+ { 4, 5 }, { 5, 6 }, { 6, 7 }, { 7, 8 },
+ { 8, 9 }, { 9, 10 }, { 10, 11 }, { 11, 12 },
+ { 12, 13 }, { 13, 14 }, { 14, 15 }, { 15, 16 },
+ { 16, 17 }, { 17, 18 }, { 18, 19 }, { 19, 20 },
+ { 20, 21 }, { 21, 22 }, { 22, 23 }, { 23, 24 },
+ { 24, 25 }, { 25, 26 }, { 26, 27 }, { 27, 28 },
+ { 28, 29 }, { 29, 30 }, { 30, 31 }, { 31, 32 },
+ { 127, 128 }, { 255, 256 }, { 511, 512 }, { 65535, 65536 },
+ { 0, 0 }
+};
+
+/*
+ * Clocks marked as CRITICAL are needed for the proper functioning
+ * of the SoC.
+ */
+static struct bm1880_div_hw_clock bm1880_div_clks[] = {
+ CLK_DIV(BM1880_CLK_DIV_0_RV, "clk_div_0_rv", &bm1880_pll_clks[1].hw,
+ BM1880_CLK_DIV12, 16, 5, 1, bm1880_div_table_0, 0),
+ CLK_DIV(BM1880_CLK_DIV_1_RV, "clk_div_1_rv", &bm1880_pll_clks[2].hw,
+ BM1880_CLK_DIV13, 16, 5, 1, bm1880_div_table_0, 0),
+ CLK_DIV(BM1880_CLK_DIV_UART_500M, "clk_div_uart_500m", &bm1880_pll_clks[2].hw,
+ BM1880_CLK_DIV15, 16, 7, 3, bm1880_div_table_1, 0),
+ CLK_DIV(BM1880_CLK_DIV_0_AXI1, "clk_div_0_axi1", &bm1880_pll_clks[0].hw,
+ BM1880_CLK_DIV21, 16, 5, 2, bm1880_div_table_0,
+ 0),
+ CLK_DIV(BM1880_CLK_DIV_1_AXI1, "clk_div_1_axi1", &bm1880_pll_clks[2].hw,
+ BM1880_CLK_DIV22, 16, 5, 3, bm1880_div_table_0,
+ 0),
+ CLK_DIV(BM1880_CLK_DIV_0_AXI6, "clk_div_0_axi6", &bm1880_pll_clks[2].hw,
+ BM1880_CLK_DIV27, 16, 5, 15, bm1880_div_table_0,
+ 0),
+ CLK_DIV(BM1880_CLK_DIV_1_AXI6, "clk_div_1_axi6", &bm1880_pll_clks[0].hw,
+ BM1880_CLK_DIV28, 16, 5, 11, bm1880_div_table_0,
+ 0),
+ CLK_DIV(BM1880_CLK_DIV_12M_USB, "clk_div_12m_usb", &bm1880_pll_clks[2].hw,
+ BM1880_CLK_DIV18, 16, 7, 125, bm1880_div_table_1, 0),
+};
+
+/*
+ * Clocks marked as CRITICAL are all needed for the proper functioning
+ * of the SoC.
+ */
+static struct bm1880_composite_clock bm1880_composite_clks[] = {
+ /*
+ * Since clk_a53 and clk_50m_a53 clocks are sourcing the CPU core,
+ * let's mark them as critical to avoid gating.
+ */
+ GATE_MUX(BM1880_CLK_A53, "clk_a53", clk_a53_parents,
+ BM1880_CLK_ENABLE0, 0, BM1880_CLK_SELECT, 0,
+ CLK_IS_CRITICAL),
+ GATE_DIV(BM1880_CLK_50M_A53, "clk_50m_a53", "clk_fpll",
+ BM1880_CLK_ENABLE0, 1, BM1880_CLK_DIV0, 16, 5, 30,
+ bm1880_div_table_0, CLK_IS_CRITICAL),
+ GATE_DIV(BM1880_CLK_EFUSE, "clk_efuse", "clk_fpll",
+ BM1880_CLK_ENABLE0, 5, BM1880_CLK_DIV1, 16, 7, 60,
+ bm1880_div_table_1, 0),
+ GATE_DIV(BM1880_CLK_EMMC, "clk_emmc", "clk_fpll",
+ BM1880_CLK_ENABLE0, 8, BM1880_CLK_DIV2, 16, 5, 15,
+ bm1880_div_table_0, 0),
+ GATE_DIV(BM1880_CLK_100K_EMMC, "clk_100k_emmc", "clk_div_12m_usb",
+ BM1880_CLK_ENABLE0, 9, BM1880_CLK_DIV3, 16, 8, 120,
+ bm1880_div_table_2, 0),
+ GATE_DIV(BM1880_CLK_SD, "clk_sd", "clk_fpll",
+ BM1880_CLK_ENABLE0, 11, BM1880_CLK_DIV4, 16, 5, 15,
+ bm1880_div_table_0, 0),
+ GATE_DIV(BM1880_CLK_100K_SD, "clk_100k_sd", "clk_div_12m_usb",
+ BM1880_CLK_ENABLE0, 12, BM1880_CLK_DIV5, 16, 8, 120,
+ bm1880_div_table_2, 0),
+ GATE_DIV(BM1880_CLK_500M_ETH0, "clk_500m_eth0", "clk_fpll",
+ BM1880_CLK_ENABLE0, 13, BM1880_CLK_DIV6, 16, 5, 3,
+ bm1880_div_table_0, 0),
+ GATE_DIV(BM1880_CLK_500M_ETH1, "clk_500m_eth1", "clk_fpll",
+ BM1880_CLK_ENABLE0, 15, BM1880_CLK_DIV7, 16, 5, 3,
+ bm1880_div_table_0, 0),
+ /* Don't gate GPIO clocks as it is not owned by the GPIO driver */
+ GATE_DIV(BM1880_CLK_GPIO_DB, "clk_gpio_db", "clk_div_12m_usb",
+ BM1880_CLK_ENABLE0, 20, BM1880_CLK_DIV8, 16, 16, 120,
+ bm1880_div_table_4, CLK_IGNORE_UNUSED),
+ GATE_DIV(BM1880_CLK_SDMA_AUD, "clk_sdma_aud", "clk_fpll",
+ BM1880_CLK_ENABLE0, 24, BM1880_CLK_DIV9, 16, 7, 61,
+ bm1880_div_table_1, 0),
+ GATE_DIV(BM1880_CLK_JPEG_AXI, "clk_jpeg_axi", "clk_fpll",
+ BM1880_CLK_ENABLE0, 28, BM1880_CLK_DIV10, 16, 5, 4,
+ bm1880_div_table_0, 0),
+ GATE_DIV(BM1880_CLK_NF, "clk_nf", "clk_fpll",
+ BM1880_CLK_ENABLE0, 31, BM1880_CLK_DIV11, 16, 5, 30,
+ bm1880_div_table_0, 0),
+ GATE_DIV(BM1880_CLK_TPU_AXI, "clk_tpu_axi", "clk_spll",
+ BM1880_CLK_ENABLE1, 3, BM1880_CLK_DIV14, 16, 5, 1,
+ bm1880_div_table_0, 0),
+ GATE_DIV(BM1880_CLK_125M_USB, "clk_125m_usb", "clk_fpll",
+ BM1880_CLK_ENABLE1, 9, BM1880_CLK_DIV16, 16, 5, 12,
+ bm1880_div_table_0, 0),
+ GATE_DIV(BM1880_CLK_33K_USB, "clk_33k_usb", "clk_div_12m_usb",
+ BM1880_CLK_ENABLE1, 10, BM1880_CLK_DIV17, 16, 9, 363,
+ bm1880_div_table_3, 0),
+ GATE_DIV(BM1880_CLK_VIDEO_AXI, "clk_video_axi", "clk_fpll",
+ BM1880_CLK_ENABLE1, 13, BM1880_CLK_DIV19, 16, 5, 4,
+ bm1880_div_table_0, 0),
+ GATE_DIV(BM1880_CLK_VPP_AXI, "clk_vpp_axi", "clk_fpll",
+ BM1880_CLK_ENABLE1, 14, BM1880_CLK_DIV20, 16, 5, 4,
+ bm1880_div_table_0, 0),
+ GATE_MUX(BM1880_CLK_AXI1, "clk_axi1", clk_axi1_parents,
+ BM1880_CLK_ENABLE1, 15, BM1880_CLK_SELECT, 2, 0),
+ GATE_DIV(BM1880_CLK_AXI2, "clk_axi2", "clk_fpll",
+ BM1880_CLK_ENABLE1, 17, BM1880_CLK_DIV23, 16, 5, 3,
+ bm1880_div_table_0, 0),
+ GATE_DIV(BM1880_CLK_AXI3, "clk_axi3", "clk_mux_rv",
+ BM1880_CLK_ENABLE1, 18, BM1880_CLK_DIV24, 16, 5, 2,
+ bm1880_div_table_0, 0),
+ GATE_DIV(BM1880_CLK_AXI4, "clk_axi4", "clk_fpll",
+ BM1880_CLK_ENABLE1, 19, BM1880_CLK_DIV25, 16, 5, 6,
+ bm1880_div_table_0, 0),
+ GATE_DIV(BM1880_CLK_AXI5, "clk_axi5", "clk_fpll",
+ BM1880_CLK_ENABLE1, 20, BM1880_CLK_DIV26, 16, 5, 15,
+ bm1880_div_table_0, 0),
+};
+
+static unsigned long bm1880_pll_rate_calc(u32 regval, unsigned long parent_rate)
+{
+ u64 numerator;
+ u32 fbdiv, fref, refdiv;
+ u32 postdiv1, postdiv2, denominator;
+
+ fbdiv = (regval >> 16) & 0xfff;
+ fref = parent_rate;
+ refdiv = regval & 0x1f;
+ postdiv1 = (regval >> 8) & 0x7;
+ postdiv2 = (regval >> 12) & 0x7;
+
+ numerator = parent_rate * fbdiv;
+ denominator = refdiv * postdiv1 * postdiv2;
+ do_div(numerator, denominator);
+
+ return (unsigned long)numerator;
+}
+
+static unsigned long bm1880_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct bm1880_pll_hw_clock *pll_hw = to_bm1880_pll_clk(hw);
+ unsigned long rate;
+ u32 regval;
+
+ regval = readl(pll_hw->base + pll_hw->pll.reg);
+ rate = bm1880_pll_rate_calc(regval, parent_rate);
+
+ return rate;
+}
+
+static const struct clk_ops bm1880_pll_ops = {
+ .recalc_rate = bm1880_pll_recalc_rate,
+};
+
+static struct clk_hw *bm1880_clk_register_pll(struct bm1880_pll_hw_clock *pll_clk,
+ void __iomem *sys_base)
+{
+ struct clk_hw *hw;
+ int err;
+
+ pll_clk->base = sys_base;
+ hw = &pll_clk->hw;
+
+ err = clk_hw_register(NULL, hw);
+ if (err)
+ return ERR_PTR(err);
+
+ return hw;
+}
+
+static void bm1880_clk_unregister_pll(struct clk_hw *hw)
+{
+ struct bm1880_pll_hw_clock *pll_hw = to_bm1880_pll_clk(hw);
+
+ clk_hw_unregister(hw);
+ kfree(pll_hw);
+}
+
+static int bm1880_clk_register_plls(struct bm1880_pll_hw_clock *clks,
+ int num_clks,
+ struct bm1880_clock_data *data)
+{
+ struct clk_hw *hw;
+ void __iomem *pll_base = data->pll_base;
+ int i;
+
+ for (i = 0; i < num_clks; i++) {
+ struct bm1880_pll_hw_clock *bm1880_clk = &clks[i];
+
+ hw = bm1880_clk_register_pll(bm1880_clk, pll_base);
+ if (IS_ERR(hw)) {
+ pr_err("%s: failed to register clock %s\n",
+ __func__, bm1880_clk->pll.name);
+ goto err_clk;
+ }
+
+ data->hw_data.hws[clks[i].pll.id] = hw;
+ }
+
+ return 0;
+
+err_clk:
+ while (i--)
+ bm1880_clk_unregister_pll(data->hw_data.hws[clks[i].pll.id]);
+
+ return PTR_ERR(hw);
+}
+
+static int bm1880_clk_register_mux(const struct bm1880_mux_clock *clks,
+ int num_clks,
+ struct bm1880_clock_data *data)
+{
+ struct clk_hw *hw;
+ void __iomem *sys_base = data->sys_base;
+ int i;
+
+ for (i = 0; i < num_clks; i++) {
+ hw = clk_hw_register_mux(NULL, clks[i].name,
+ clks[i].parents,
+ clks[i].num_parents,
+ clks[i].flags,
+ sys_base + clks[i].reg,
+ clks[i].shift, 1, 0,
+ &bm1880_clk_lock);
+ if (IS_ERR(hw)) {
+ pr_err("%s: failed to register clock %s\n",
+ __func__, clks[i].name);
+ goto err_clk;
+ }
+
+ data->hw_data.hws[clks[i].id] = hw;
+ }
+
+ return 0;
+
+err_clk:
+ while (i--)
+ clk_hw_unregister_mux(data->hw_data.hws[clks[i].id]);
+
+ return PTR_ERR(hw);
+}
+
+static unsigned long bm1880_clk_div_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct bm1880_div_hw_clock *div_hw = to_bm1880_div_clk(hw);
+ struct bm1880_div_clock *div = &div_hw->div;
+ void __iomem *reg_addr = div_hw->base + div->reg;
+ unsigned int val;
+ unsigned long rate;
+
+ if (!(readl(reg_addr) & BIT(3))) {
+ val = div->initval;
+ } else {
+ val = readl(reg_addr) >> div->shift;
+ val &= clk_div_mask(div->width);
+ }
+
+ rate = divider_recalc_rate(hw, parent_rate, val, div->table,
+ div->flags, div->width);
+
+ return rate;
+}
+
+static long bm1880_clk_div_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ struct bm1880_div_hw_clock *div_hw = to_bm1880_div_clk(hw);
+ struct bm1880_div_clock *div = &div_hw->div;
+ void __iomem *reg_addr = div_hw->base + div->reg;
+
+ if (div->flags & CLK_DIVIDER_READ_ONLY) {
+ u32 val;
+
+ val = readl(reg_addr) >> div->shift;
+ val &= clk_div_mask(div->width);
+
+ return divider_ro_round_rate(hw, rate, prate, div->table,
+ div->width, div->flags,
+ val);
+ }
+
+ return divider_round_rate(hw, rate, prate, div->table,
+ div->width, div->flags);
+}
+
+static int bm1880_clk_div_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct bm1880_div_hw_clock *div_hw = to_bm1880_div_clk(hw);
+ struct bm1880_div_clock *div = &div_hw->div;
+ void __iomem *reg_addr = div_hw->base + div->reg;
+ unsigned long flags = 0;
+ int value;
+ u32 val;
+
+ value = divider_get_val(rate, parent_rate, div->table,
+ div->width, div_hw->div.flags);
+ if (value < 0)
+ return value;
+
+ if (div_hw->lock)
+ spin_lock_irqsave(div_hw->lock, flags);
+ else
+ __acquire(div_hw->lock);
+
+ val = readl(reg_addr);
+ val &= ~(clk_div_mask(div->width) << div_hw->div.shift);
+ val |= (u32)value << div->shift;
+ writel(val, reg_addr);
+
+ if (div_hw->lock)
+ spin_unlock_irqrestore(div_hw->lock, flags);
+ else
+ __release(div_hw->lock);
+
+ return 0;
+}
+
+static const struct clk_ops bm1880_clk_div_ops = {
+ .recalc_rate = bm1880_clk_div_recalc_rate,
+ .round_rate = bm1880_clk_div_round_rate,
+ .set_rate = bm1880_clk_div_set_rate,
+};
+
+static struct clk_hw *bm1880_clk_register_div(struct bm1880_div_hw_clock *div_clk,
+ void __iomem *sys_base)
+{
+ struct clk_hw *hw;
+ int err;
+
+ div_clk->div.flags = CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO;
+ div_clk->base = sys_base;
+ div_clk->lock = &bm1880_clk_lock;
+
+ hw = &div_clk->hw;
+ err = clk_hw_register(NULL, hw);
+ if (err)
+ return ERR_PTR(err);
+
+ return hw;
+}
+
+static void bm1880_clk_unregister_div(struct clk_hw *hw)
+{
+ struct bm1880_div_hw_clock *div_hw = to_bm1880_div_clk(hw);
+
+ clk_hw_unregister(hw);
+ kfree(div_hw);
+}
+
+static int bm1880_clk_register_divs(struct bm1880_div_hw_clock *clks,
+ int num_clks,
+ struct bm1880_clock_data *data)
+{
+ struct clk_hw *hw;
+ void __iomem *sys_base = data->sys_base;
+ unsigned int i, id;
+
+ for (i = 0; i < num_clks; i++) {
+ struct bm1880_div_hw_clock *bm1880_clk = &clks[i];
+
+ hw = bm1880_clk_register_div(bm1880_clk, sys_base);
+ if (IS_ERR(hw)) {
+ pr_err("%s: failed to register clock %s\n",
+ __func__, bm1880_clk->div.name);
+ goto err_clk;
+ }
+
+ id = clks[i].div.id;
+ data->hw_data.hws[id] = hw;
+ }
+
+ return 0;
+
+err_clk:
+ while (i--)
+ bm1880_clk_unregister_div(data->hw_data.hws[clks[i].div.id]);
+
+ return PTR_ERR(hw);
+}
+
+static int bm1880_clk_register_gate(const struct bm1880_gate_clock *clks,
+ int num_clks,
+ struct bm1880_clock_data *data)
+{
+ struct clk_hw *hw;
+ void __iomem *sys_base = data->sys_base;
+ int i;
+
+ for (i = 0; i < num_clks; i++) {
+ hw = clk_hw_register_gate(NULL, clks[i].name,
+ clks[i].parent,
+ clks[i].flags,
+ sys_base + clks[i].gate_reg,
+ clks[i].gate_shift, 0,
+ &bm1880_clk_lock);
+ if (IS_ERR(hw)) {
+ pr_err("%s: failed to register clock %s\n",
+ __func__, clks[i].name);
+ goto err_clk;
+ }
+
+ data->hw_data.hws[clks[i].id] = hw;
+ }
+
+ return 0;
+
+err_clk:
+ while (i--)
+ clk_hw_unregister_gate(data->hw_data.hws[clks[i].id]);
+
+ return PTR_ERR(hw);
+}
+
+static struct clk_hw *bm1880_clk_register_composite(struct bm1880_composite_clock *clks,
+ void __iomem *sys_base)
+{
+ struct clk_hw *hw;
+ struct clk_mux *mux = NULL;
+ struct clk_gate *gate = NULL;
+ struct bm1880_div_hw_clock *div_hws = NULL;
+ struct clk_hw *mux_hw = NULL, *gate_hw = NULL, *div_hw = NULL;
+ const struct clk_ops *mux_ops = NULL, *gate_ops = NULL, *div_ops = NULL;
+ const char * const *parent_names;
+ const char *parent;
+ int num_parents;
+ int ret;
+
+ if (clks->mux_shift >= 0) {
+ mux = kzalloc(sizeof(*mux), GFP_KERNEL);
+ if (!mux)
+ return ERR_PTR(-ENOMEM);
+
+ mux->reg = sys_base + clks->mux_reg;
+ mux->mask = 1;
+ mux->shift = clks->mux_shift;
+ mux_hw = &mux->hw;
+ mux_ops = &clk_mux_ops;
+ mux->lock = &bm1880_clk_lock;
+
+ parent_names = clks->parents;
+ num_parents = clks->num_parents;
+ } else {
+ parent = clks->parent;
+ parent_names = &parent;
+ num_parents = 1;
+ }
+
+ if (clks->gate_shift >= 0) {
+ gate = kzalloc(sizeof(*gate), GFP_KERNEL);
+ if (!gate) {
+ ret = -ENOMEM;
+ goto err_out;
+ }
+
+ gate->reg = sys_base + clks->gate_reg;
+ gate->bit_idx = clks->gate_shift;
+ gate->lock = &bm1880_clk_lock;
+
+ gate_hw = &gate->hw;
+ gate_ops = &clk_gate_ops;
+ }
+
+ if (clks->div_shift >= 0) {
+ div_hws = kzalloc(sizeof(*div_hws), GFP_KERNEL);
+ if (!div_hws) {
+ ret = -ENOMEM;
+ goto err_out;
+ }
+
+ div_hws->base = sys_base;
+ div_hws->div.reg = clks->div_reg;
+ div_hws->div.shift = clks->div_shift;
+ div_hws->div.width = clks->div_width;
+ div_hws->div.table = clks->table;
+ div_hws->div.initval = clks->div_initval;
+ div_hws->lock = &bm1880_clk_lock;
+ div_hws->div.flags = CLK_DIVIDER_ONE_BASED |
+ CLK_DIVIDER_ALLOW_ZERO;
+
+ div_hw = &div_hws->hw;
+ div_ops = &bm1880_clk_div_ops;
+ }
+
+ hw = clk_hw_register_composite(NULL, clks->name, parent_names,
+ num_parents, mux_hw, mux_ops, div_hw,
+ div_ops, gate_hw, gate_ops,
+ clks->flags);
+
+ if (IS_ERR(hw)) {
+ ret = PTR_ERR(hw);
+ goto err_out;
+ }
+
+ return hw;
+
+err_out:
+ kfree(div_hws);
+ kfree(gate);
+ kfree(mux);
+
+ return ERR_PTR(ret);
+}
+
+static int bm1880_clk_register_composites(struct bm1880_composite_clock *clks,
+ int num_clks,
+ struct bm1880_clock_data *data)
+{
+ struct clk_hw *hw;
+ void __iomem *sys_base = data->sys_base;
+ int i;
+
+ for (i = 0; i < num_clks; i++) {
+ struct bm1880_composite_clock *bm1880_clk = &clks[i];
+
+ hw = bm1880_clk_register_composite(bm1880_clk, sys_base);
+ if (IS_ERR(hw)) {
+ pr_err("%s: failed to register clock %s\n",
+ __func__, bm1880_clk->name);
+ goto err_clk;
+ }
+
+ data->hw_data.hws[clks[i].id] = hw;
+ }
+
+ return 0;
+
+err_clk:
+ while (i--)
+ clk_hw_unregister_composite(data->hw_data.hws[clks[i].id]);
+
+ return PTR_ERR(hw);
+}
+
+static int bm1880_clk_probe(struct platform_device *pdev)
+{
+ struct bm1880_clock_data *clk_data;
+ void __iomem *pll_base, *sys_base;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ int num_clks, i;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ pll_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(pll_base))
+ return PTR_ERR(pll_base);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ sys_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(sys_base))
+ return PTR_ERR(sys_base);
+
+ num_clks = ARRAY_SIZE(bm1880_pll_clks) +
+ ARRAY_SIZE(bm1880_div_clks) +
+ ARRAY_SIZE(bm1880_mux_clks) +
+ ARRAY_SIZE(bm1880_composite_clks) +
+ ARRAY_SIZE(bm1880_gate_clks);
+
+ clk_data = devm_kzalloc(dev, struct_size(clk_data, hw_data.hws,
+ num_clks), GFP_KERNEL);
+ if (!clk_data)
+ return -ENOMEM;
+
+ clk_data->pll_base = pll_base;
+ clk_data->sys_base = sys_base;
+
+ for (i = 0; i < num_clks; i++)
+ clk_data->hw_data.hws[i] = ERR_PTR(-ENOENT);
+
+ clk_data->hw_data.num = num_clks;
+
+ bm1880_clk_register_plls(bm1880_pll_clks,
+ ARRAY_SIZE(bm1880_pll_clks),
+ clk_data);
+
+ bm1880_clk_register_divs(bm1880_div_clks,
+ ARRAY_SIZE(bm1880_div_clks),
+ clk_data);
+
+ bm1880_clk_register_mux(bm1880_mux_clks,
+ ARRAY_SIZE(bm1880_mux_clks),
+ clk_data);
+
+ bm1880_clk_register_composites(bm1880_composite_clks,
+ ARRAY_SIZE(bm1880_composite_clks),
+ clk_data);
+
+ bm1880_clk_register_gate(bm1880_gate_clks,
+ ARRAY_SIZE(bm1880_gate_clks),
+ clk_data);
+
+ return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get,
+ &clk_data->hw_data);
+}
+
+static const struct of_device_id bm1880_of_match[] = {
+ { .compatible = "bitmain,bm1880-clk", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, bm1880_of_match);
+
+static struct platform_driver bm1880_clk_driver = {
+ .driver = {
+ .name = "bm1880-clk",
+ .of_match_table = bm1880_of_match,
+ },
+ .probe = bm1880_clk_probe,
+};
+module_platform_driver(bm1880_clk_driver);
+
+MODULE_AUTHOR("Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>");
+MODULE_DESCRIPTION("Clock driver for Bitmain BM1880 SoC");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/clk-composite.c b/drivers/clk/clk-composite.c
index 4f13a681ddfc..3e9c3e608769 100644
--- a/drivers/clk/clk-composite.c
+++ b/drivers/clk/clk-composite.c
@@ -207,7 +207,7 @@ struct clk_hw *clk_hw_register_composite(struct device *dev, const char *name,
unsigned long flags)
{
struct clk_hw *hw;
- struct clk_init_data init;
+ struct clk_init_data init = {};
struct clk_composite *composite;
struct clk_ops *clk_composite_ops;
int ret;
@@ -343,3 +343,14 @@ void clk_unregister_composite(struct clk *clk)
clk_unregister(clk);
kfree(composite);
}
+
+void clk_hw_unregister_composite(struct clk_hw *hw)
+{
+ struct clk_composite *composite;
+
+ composite = to_clk_composite(hw);
+
+ clk_hw_unregister(hw);
+ kfree(composite);
+}
+EXPORT_SYMBOL_GPL(clk_hw_unregister_composite);
diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c
index 3f9ff78c4a2a..098b2b01f0af 100644
--- a/drivers/clk/clk-divider.c
+++ b/drivers/clk/clk-divider.c
@@ -471,7 +471,7 @@ static struct clk_hw *_register_divider(struct device *dev, const char *name,
{
struct clk_divider *div;
struct clk_hw *hw;
- struct clk_init_data init;
+ struct clk_init_data init = {};
int ret;
if (clk_divider_flags & CLK_DIVIDER_HIWORD_MASK) {
diff --git a/drivers/clk/clk-fixed-rate.c b/drivers/clk/clk-fixed-rate.c
index a7e4aef7a376..2c4486c09040 100644
--- a/drivers/clk/clk-fixed-rate.c
+++ b/drivers/clk/clk-fixed-rate.c
@@ -58,7 +58,7 @@ struct clk_hw *clk_hw_register_fixed_rate_with_accuracy(struct device *dev,
{
struct clk_fixed_rate *fixed;
struct clk_hw *hw;
- struct clk_init_data init;
+ struct clk_init_data init = {};
int ret;
/* allocate fixed-rate clock */
diff --git a/drivers/clk/clk-gate.c b/drivers/clk/clk-gate.c
index 1b99fc962745..670053c58c1a 100644
--- a/drivers/clk/clk-gate.c
+++ b/drivers/clk/clk-gate.c
@@ -141,7 +141,7 @@ struct clk_hw *clk_hw_register_gate(struct device *dev, const char *name,
{
struct clk_gate *gate;
struct clk_hw *hw;
- struct clk_init_data init;
+ struct clk_init_data init = {};
int ret;
if (clk_gate_flags & CLK_GATE_HIWORD_MASK) {
diff --git a/drivers/clk/clk-gpio.c b/drivers/clk/clk-gpio.c
index 9d930edd6516..13304cf5f2a8 100644
--- a/drivers/clk/clk-gpio.c
+++ b/drivers/clk/clk-gpio.c
@@ -280,7 +280,7 @@ static int gpio_clk_driver_probe(struct platform_device *pdev)
else
clk = clk_register_gpio_gate(&pdev->dev, node->name,
parent_names ? parent_names[0] : NULL, gpiod,
- 0);
+ CLK_SET_RATE_PARENT);
if (IS_ERR(clk))
return PTR_ERR(clk);
diff --git a/drivers/clk/clk-mux.c b/drivers/clk/clk-mux.c
index 66e91f740508..570b6e5b603b 100644
--- a/drivers/clk/clk-mux.c
+++ b/drivers/clk/clk-mux.c
@@ -153,7 +153,7 @@ struct clk_hw *clk_hw_register_mux_table(struct device *dev, const char *name,
{
struct clk_mux *mux;
struct clk_hw *hw;
- struct clk_init_data init;
+ struct clk_init_data init = {};
u8 width = 0;
int ret;
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 1c677d7f7f53..b68e200829f2 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -1187,7 +1187,7 @@ static void clk_core_disable_unprepare(struct clk_core *core)
clk_core_unprepare_lock(core);
}
-static void clk_unprepare_unused_subtree(struct clk_core *core)
+static void __init clk_unprepare_unused_subtree(struct clk_core *core)
{
struct clk_core *child;
@@ -1217,7 +1217,7 @@ static void clk_unprepare_unused_subtree(struct clk_core *core)
clk_pm_runtime_put(core);
}
-static void clk_disable_unused_subtree(struct clk_core *core)
+static void __init clk_disable_unused_subtree(struct clk_core *core)
{
struct clk_core *child;
unsigned long flags;
@@ -1263,7 +1263,7 @@ unprepare_out:
clk_core_disable_unprepare(core->parent);
}
-static bool clk_ignore_unused;
+static bool clk_ignore_unused __initdata;
static int __init clk_ignore_unused_setup(char *__unused)
{
clk_ignore_unused = true;
@@ -1271,7 +1271,7 @@ static int __init clk_ignore_unused_setup(char *__unused)
}
__setup("clk_ignore_unused", clk_ignore_unused_setup);
-static int clk_disable_unused(void)
+static int __init clk_disable_unused(void)
{
struct clk_core *core;
@@ -1674,6 +1674,24 @@ static int clk_fetch_parent_index(struct clk_core *core,
return i;
}
+/**
+ * clk_hw_get_parent_index - return the index of the parent clock
+ * @hw: clk_hw associated with the clk being consumed
+ *
+ * Fetches and returns the index of parent clock. Returns -EINVAL if the given
+ * clock does not have a current parent.
+ */
+int clk_hw_get_parent_index(struct clk_hw *hw)
+{
+ struct clk_hw *parent = clk_hw_get_parent(hw);
+
+ if (WARN_ON(parent == NULL))
+ return -EINVAL;
+
+ return clk_fetch_parent_index(hw->core, parent->core);
+}
+EXPORT_SYMBOL_GPL(clk_hw_get_parent_index);
+
/*
* Update the orphan status of @core and all its children.
*/
@@ -3879,6 +3897,7 @@ void clk_unregister(struct clk *clk)
__func__, clk->core->name);
kref_put(&clk->core->ref, __clk_release);
+ free_clk(clk);
unlock:
clk_prepare_unlock();
}
diff --git a/drivers/clk/davinci/pll.c b/drivers/clk/davinci/pll.c
index 1ac11b6a47a3..8a23d5dfd1f8 100644
--- a/drivers/clk/davinci/pll.c
+++ b/drivers/clk/davinci/pll.c
@@ -910,7 +910,6 @@ static int davinci_pll_probe(struct platform_device *pdev)
struct davinci_pll_platform_data *pdata;
const struct of_device_id *of_id;
davinci_pll_init pll_init = NULL;
- struct resource *res;
void __iomem *base;
of_id = of_match_device(davinci_pll_of_match, dev);
@@ -930,8 +929,7 @@ static int davinci_pll_probe(struct platform_device *pdev)
return -EINVAL;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/drivers/clk/davinci/psc.c b/drivers/clk/davinci/psc.c
index 5b69e24a224f..7387e7f6276e 100644
--- a/drivers/clk/davinci/psc.c
+++ b/drivers/clk/davinci/psc.c
@@ -531,7 +531,6 @@ static int davinci_psc_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
const struct of_device_id *of_id;
const struct davinci_psc_init_data *init_data = NULL;
- struct resource *res;
void __iomem *base;
int ret;
@@ -546,8 +545,7 @@ static int davinci_psc_probe(struct platform_device *pdev)
return -EINVAL;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/drivers/clk/hisilicon/clk-hi3660.c b/drivers/clk/hisilicon/clk-hi3660.c
index 5b3ad26dcc77..41f61726ab19 100644
--- a/drivers/clk/hisilicon/clk-hi3660.c
+++ b/drivers/clk/hisilicon/clk-hi3660.c
@@ -333,49 +333,49 @@ static const struct hisi_mux_clock hi3660_crgctrl_mux_clks[] = {
static const struct hisi_divider_clock hi3660_crgctrl_divider_clks[] = {
{ HI3660_CLK_DIV_UART0, "clk_div_uart0", "clk_andgt_uart0",
- CLK_SET_RATE_PARENT, 0xb0, 4, 4, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xb0, 4, 4, CLK_DIVIDER_HIWORD_MASK, },
{ HI3660_CLK_DIV_UART1, "clk_div_uart1", "clk_andgt_uart1",
- CLK_SET_RATE_PARENT, 0xb0, 8, 4, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xb0, 8, 4, CLK_DIVIDER_HIWORD_MASK, },
{ HI3660_CLK_DIV_UARTH, "clk_div_uarth", "clk_andgt_uarth",
- CLK_SET_RATE_PARENT, 0xb0, 12, 4, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xb0, 12, 4, CLK_DIVIDER_HIWORD_MASK, },
{ HI3660_CLK_DIV_MMC, "clk_div_mmc", "clk_andgt_mmc",
- CLK_SET_RATE_PARENT, 0xb4, 3, 4, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xb4, 3, 4, CLK_DIVIDER_HIWORD_MASK, },
{ HI3660_CLK_DIV_SD, "clk_div_sd", "clk_andgt_sd",
- CLK_SET_RATE_PARENT, 0xb8, 0, 4, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xb8, 0, 4, CLK_DIVIDER_HIWORD_MASK, },
{ HI3660_CLK_DIV_EDC0, "clk_div_edc0", "clk_andgt_edc0",
- CLK_SET_RATE_PARENT, 0xbc, 0, 6, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xbc, 0, 6, CLK_DIVIDER_HIWORD_MASK, },
{ HI3660_CLK_DIV_LDI0, "clk_div_ldi0", "clk_andgt_ldi0",
- CLK_SET_RATE_PARENT, 0xbc, 10, 6, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xbc, 10, 6, CLK_DIVIDER_HIWORD_MASK, },
{ HI3660_CLK_DIV_SDIO, "clk_div_sdio", "clk_andgt_sdio",
- CLK_SET_RATE_PARENT, 0xc0, 0, 4, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xc0, 0, 4, CLK_DIVIDER_HIWORD_MASK, },
{ HI3660_CLK_DIV_LDI1, "clk_div_ldi1", "clk_andgt_ldi1",
- CLK_SET_RATE_PARENT, 0xc0, 8, 6, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xc0, 8, 6, CLK_DIVIDER_HIWORD_MASK, },
{ HI3660_CLK_DIV_SPI, "clk_div_spi", "clk_andgt_spi",
- CLK_SET_RATE_PARENT, 0xc4, 12, 4, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xc4, 12, 4, CLK_DIVIDER_HIWORD_MASK, },
{ HI3660_CLK_DIV_VENC, "clk_div_venc", "clk_andgt_venc",
- CLK_SET_RATE_PARENT, 0xc8, 6, 5, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xc8, 6, 5, CLK_DIVIDER_HIWORD_MASK, },
{ HI3660_CLK_DIV_VDEC, "clk_div_vdec", "clk_andgt_vdec",
- CLK_SET_RATE_PARENT, 0xcc, 0, 5, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xcc, 0, 5, CLK_DIVIDER_HIWORD_MASK, },
{ HI3660_CLK_DIV_VIVOBUS, "clk_div_vivobus", "clk_vivobus_andgt",
- CLK_SET_RATE_PARENT, 0xd0, 7, 5, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xd0, 7, 5, CLK_DIVIDER_HIWORD_MASK, },
{ HI3660_CLK_DIV_I2C, "clk_div_i2c", "clk_div_320m",
- CLK_SET_RATE_PARENT, 0xe8, 4, 4, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xe8, 4, 4, CLK_DIVIDER_HIWORD_MASK, },
{ HI3660_CLK_DIV_UFSPHY, "clk_div_ufsphy_cfg", "clk_gate_ufsphy_gt",
- CLK_SET_RATE_PARENT, 0xe8, 9, 2, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xe8, 9, 2, CLK_DIVIDER_HIWORD_MASK, },
{ HI3660_CLK_DIV_CFGBUS, "clk_div_cfgbus", "clk_div_sysbus",
- CLK_SET_RATE_PARENT, 0xec, 0, 2, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xec, 0, 2, CLK_DIVIDER_HIWORD_MASK, },
{ HI3660_CLK_DIV_MMC0BUS, "clk_div_mmc0bus", "autodiv_emmc0bus",
- CLK_SET_RATE_PARENT, 0xec, 2, 1, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xec, 2, 1, CLK_DIVIDER_HIWORD_MASK, },
{ HI3660_CLK_DIV_MMC1BUS, "clk_div_mmc1bus", "clk_div_sysbus",
- CLK_SET_RATE_PARENT, 0xec, 3, 1, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xec, 3, 1, CLK_DIVIDER_HIWORD_MASK, },
{ HI3660_CLK_DIV_UFSPERI, "clk_div_ufsperi", "clk_gate_ufs_subsys",
- CLK_SET_RATE_PARENT, 0xec, 14, 1, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xec, 14, 1, CLK_DIVIDER_HIWORD_MASK, },
{ HI3660_CLK_DIV_AOMM, "clk_div_aomm", "clk_aomm_andgt",
- CLK_SET_RATE_PARENT, 0x100, 7, 4, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0x100, 7, 4, CLK_DIVIDER_HIWORD_MASK, },
{ HI3660_CLK_DIV_ISP_SNCLK, "clk_isp_snclk_div", "clk_isp_snclk_fac",
- CLK_SET_RATE_PARENT, 0x108, 0, 2, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0x108, 0, 2, CLK_DIVIDER_HIWORD_MASK, },
{ HI3660_CLK_DIV_IOPERI, "clk_div_ioperi", "clk_mux_ioperi",
- CLK_SET_RATE_PARENT, 0x108, 11, 4, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0x108, 11, 4, CLK_DIVIDER_HIWORD_MASK, },
};
/* clk_pmuctrl */
@@ -420,13 +420,13 @@ static const struct hisi_gate_clock hi3660_sctrl_gate_clks[] = {
{ HI3660_PCLK_MMBUF_ANDGT, "pclk_mmbuf_andgt", "clk_sw_mmbuf",
CLK_SET_RATE_PARENT, 0x258, 7, CLK_GATE_HIWORD_MASK, },
{ HI3660_CLK_MMBUF_PLL_ANDGT, "clk_mmbuf_pll_andgt", "clk_ppll0",
- CLK_SET_RATE_PARENT, 0x260, 11, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0x260, 11, CLK_DIVIDER_HIWORD_MASK, },
{ HI3660_CLK_FLL_MMBUF_ANDGT, "clk_fll_mmbuf_andgt", "clk_fll_src",
- CLK_SET_RATE_PARENT, 0x260, 12, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0x260, 12, CLK_DIVIDER_HIWORD_MASK, },
{ HI3660_CLK_SYS_MMBUF_ANDGT, "clk_sys_mmbuf_andgt", "clkin_sys",
- CLK_SET_RATE_PARENT, 0x260, 13, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0x260, 13, CLK_DIVIDER_HIWORD_MASK, },
{ HI3660_CLK_GATE_PCIEPHY_GT, "clk_gate_pciephy_gt", "clk_ppll0",
- CLK_SET_RATE_PARENT, 0x268, 11, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0x268, 11, CLK_DIVIDER_HIWORD_MASK, },
};
static const char *const
@@ -446,13 +446,13 @@ static const struct hisi_mux_clock hi3660_sctrl_mux_clks[] = {
static const struct hisi_divider_clock hi3660_sctrl_divider_clks[] = {
{ HI3660_CLK_DIV_AOBUS, "clk_div_aobus", "clk_ppll0",
- CLK_SET_RATE_PARENT, 0x254, 0, 6, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0x254, 0, 6, CLK_DIVIDER_HIWORD_MASK, },
{ HI3660_PCLK_DIV_MMBUF, "pclk_div_mmbuf", "pclk_mmbuf_andgt",
- CLK_SET_RATE_PARENT, 0x258, 10, 2, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0x258, 10, 2, CLK_DIVIDER_HIWORD_MASK, },
{ HI3660_ACLK_DIV_MMBUF, "aclk_div_mmbuf", "clk_mmbuf_pll_andgt",
- CLK_SET_RATE_PARENT, 0x258, 12, 4, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0x258, 12, 4, CLK_DIVIDER_HIWORD_MASK, },
{ HI3660_CLK_DIV_PCIEPHY, "clk_div_pciephy", "clk_gate_pciephy_gt",
- CLK_SET_RATE_PARENT, 0x268, 12, 4, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0x268, 12, 4, CLK_DIVIDER_HIWORD_MASK, },
};
/* clk_iomcu */
diff --git a/drivers/clk/hisilicon/clk-hi3670.c b/drivers/clk/hisilicon/clk-hi3670.c
index fd8c837a6ea3..4d05a71683a5 100644
--- a/drivers/clk/hisilicon/clk-hi3670.c
+++ b/drivers/clk/hisilicon/clk-hi3670.c
@@ -295,61 +295,61 @@ static const struct hisi_gate_clock hi3670_crgctrl_gate_sep_clks[] = {
static const struct hisi_gate_clock hi3670_crgctrl_gate_clks[] = {
{ HI3670_AUTODIV_SYSBUS, "autodiv_sysbus", "clk_div_sysbus",
- CLK_SET_RATE_PARENT, 0x404, 5, CLK_GATE_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0x404, 5, CLK_GATE_HIWORD_MASK, },
{ HI3670_AUTODIV_EMMC0BUS, "autodiv_emmc0bus", "autodiv_sysbus",
- CLK_SET_RATE_PARENT, 0x404, 1, CLK_GATE_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0x404, 1, CLK_GATE_HIWORD_MASK, },
{ HI3670_PCLK_ANDGT_MMC1_PCIE, "pclk_andgt_mmc1_pcie", "clk_div_320m",
- CLK_SET_RATE_PARENT, 0xf8, 13, CLK_GATE_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xf8, 13, CLK_GATE_HIWORD_MASK, },
{ HI3670_CLK_GATE_VCODECBUS_GT, "clk_gate_vcodecbus_gt", "clk_mux_vcodecbus",
- CLK_SET_RATE_PARENT, 0x0F0, 8, CLK_GATE_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0x0F0, 8, CLK_GATE_HIWORD_MASK, },
{ HI3670_CLK_ANDGT_SD, "clk_andgt_sd", "clk_mux_sd_pll",
- CLK_SET_RATE_PARENT, 0xF4, 3, CLK_GATE_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xF4, 3, CLK_GATE_HIWORD_MASK, },
{ HI3670_CLK_SD_SYS_GT, "clk_sd_sys_gt", "clkin_sys",
- CLK_SET_RATE_PARENT, 0xF4, 5, CLK_GATE_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xF4, 5, CLK_GATE_HIWORD_MASK, },
{ HI3670_CLK_ANDGT_SDIO, "clk_andgt_sdio", "clk_mux_sdio_pll",
- CLK_SET_RATE_PARENT, 0xF4, 8, CLK_GATE_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xF4, 8, CLK_GATE_HIWORD_MASK, },
{ HI3670_CLK_SDIO_SYS_GT, "clk_sdio_sys_gt", "clkin_sys",
- CLK_SET_RATE_PARENT, 0xF4, 6, CLK_GATE_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xF4, 6, CLK_GATE_HIWORD_MASK, },
{ HI3670_CLK_A53HPM_ANDGT, "clk_a53hpm_andgt", "clk_mux_a53hpm",
- CLK_SET_RATE_PARENT, 0x0F4, 7, CLK_GATE_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0x0F4, 7, CLK_GATE_HIWORD_MASK, },
{ HI3670_CLK_320M_PLL_GT, "clk_320m_pll_gt", "clk_mux_320m",
- CLK_SET_RATE_PARENT, 0xF8, 10, CLK_GATE_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xF8, 10, CLK_GATE_HIWORD_MASK, },
{ HI3670_CLK_ANDGT_UARTH, "clk_andgt_uarth", "clk_div_320m",
- CLK_SET_RATE_PARENT, 0xF4, 11, CLK_GATE_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xF4, 11, CLK_GATE_HIWORD_MASK, },
{ HI3670_CLK_ANDGT_UARTL, "clk_andgt_uartl", "clk_div_320m",
- CLK_SET_RATE_PARENT, 0xF4, 10, CLK_GATE_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xF4, 10, CLK_GATE_HIWORD_MASK, },
{ HI3670_CLK_ANDGT_UART0, "clk_andgt_uart0", "clk_div_320m",
- CLK_SET_RATE_PARENT, 0xF4, 9, CLK_GATE_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xF4, 9, CLK_GATE_HIWORD_MASK, },
{ HI3670_CLK_ANDGT_SPI, "clk_andgt_spi", "clk_div_320m",
- CLK_SET_RATE_PARENT, 0xF4, 13, CLK_GATE_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xF4, 13, CLK_GATE_HIWORD_MASK, },
{ HI3670_CLK_ANDGT_PCIEAXI, "clk_andgt_pcieaxi", "clk_mux_pcieaxi",
- CLK_SET_RATE_PARENT, 0xfc, 15, CLK_GATE_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xfc, 15, CLK_GATE_HIWORD_MASK, },
{ HI3670_CLK_DIV_AO_ASP_GT, "clk_div_ao_asp_gt", "clk_mux_ao_asp",
- CLK_SET_RATE_PARENT, 0xF4, 4, CLK_GATE_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xF4, 4, CLK_GATE_HIWORD_MASK, },
{ HI3670_CLK_GATE_CSI_TRANS, "clk_gate_csi_trans", "clk_ppll2",
- CLK_SET_RATE_PARENT, 0xF4, 14, CLK_GATE_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xF4, 14, CLK_GATE_HIWORD_MASK, },
{ HI3670_CLK_GATE_DSI_TRANS, "clk_gate_dsi_trans", "clk_ppll2",
- CLK_SET_RATE_PARENT, 0xF4, 1, CLK_GATE_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xF4, 1, CLK_GATE_HIWORD_MASK, },
{ HI3670_CLK_ANDGT_PTP, "clk_andgt_ptp", "clk_div_320m",
- CLK_SET_RATE_PARENT, 0xF8, 5, CLK_GATE_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xF8, 5, CLK_GATE_HIWORD_MASK, },
{ HI3670_CLK_ANDGT_OUT0, "clk_andgt_out0", "clk_ppll0",
- CLK_SET_RATE_PARENT, 0xF0, 10, CLK_GATE_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xF0, 10, CLK_GATE_HIWORD_MASK, },
{ HI3670_CLK_ANDGT_OUT1, "clk_andgt_out1", "clk_ppll0",
- CLK_SET_RATE_PARENT, 0xF0, 11, CLK_GATE_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xF0, 11, CLK_GATE_HIWORD_MASK, },
{ HI3670_CLKGT_DP_AUDIO_PLL_AO, "clkgt_dp_audio_pll_ao", "clk_ppll6",
- CLK_SET_RATE_PARENT, 0xF8, 15, CLK_GATE_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xF8, 15, CLK_GATE_HIWORD_MASK, },
{ HI3670_CLK_ANDGT_VDEC, "clk_andgt_vdec", "clk_mux_vdec",
- CLK_SET_RATE_PARENT, 0xF0, 13, CLK_GATE_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xF0, 13, CLK_GATE_HIWORD_MASK, },
{ HI3670_CLK_ANDGT_VENC, "clk_andgt_venc", "clk_mux_venc",
- CLK_SET_RATE_PARENT, 0xF0, 9, CLK_GATE_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xF0, 9, CLK_GATE_HIWORD_MASK, },
{ HI3670_CLK_ISP_SNCLK_ANGT, "clk_isp_snclk_angt", "clk_div_a53hpm",
- CLK_SET_RATE_PARENT, 0x108, 2, CLK_GATE_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0x108, 2, CLK_GATE_HIWORD_MASK, },
{ HI3670_CLK_ANDGT_RXDPHY, "clk_andgt_rxdphy", "clk_div_a53hpm",
- CLK_SET_RATE_PARENT, 0x0F0, 12, CLK_GATE_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0x0F0, 12, CLK_GATE_HIWORD_MASK, },
{ HI3670_CLK_ANDGT_ICS, "clk_andgt_ics", "clk_mux_ics",
- CLK_SET_RATE_PARENT, 0xf0, 14, CLK_GATE_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xf0, 14, CLK_GATE_HIWORD_MASK, },
{ HI3670_AUTODIV_DMABUS, "autodiv_dmabus", "autodiv_sysbus",
- CLK_SET_RATE_PARENT, 0x404, 3, CLK_GATE_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0x404, 3, CLK_GATE_HIWORD_MASK, },
};
static const char *const
@@ -485,57 +485,57 @@ static const struct hisi_mux_clock hi3670_crgctrl_mux_clks[] = {
static const struct hisi_divider_clock hi3670_crgctrl_divider_clks[] = {
{ HI3670_CLK_DIV_CFGBUS, "clk_div_cfgbus", "clk_div_sysbus",
- CLK_SET_RATE_PARENT, 0xEC, 0, 2, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xEC, 0, 2, CLK_DIVIDER_HIWORD_MASK, },
{ HI3670_CLK_DIV_MMC0BUS, "clk_div_mmc0bus", "autodiv_emmc0bus",
- CLK_SET_RATE_PARENT, 0x0EC, 2, 1, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0x0EC, 2, 1, CLK_DIVIDER_HIWORD_MASK, },
{ HI3670_CLK_DIV_MMC1BUS, "clk_div_mmc1bus", "clk_div_sysbus",
- CLK_SET_RATE_PARENT, 0x0EC, 3, 1, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0x0EC, 3, 1, CLK_DIVIDER_HIWORD_MASK, },
{ HI3670_PCLK_DIV_MMC1_PCIE, "pclk_div_mmc1_pcie", "pclk_andgt_mmc1_pcie",
- CLK_SET_RATE_PARENT, 0xb4, 6, 4, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xb4, 6, 4, CLK_DIVIDER_HIWORD_MASK, },
{ HI3670_CLK_DIV_VCODECBUS, "clk_div_vcodecbus", "clk_gate_vcodecbus_gt",
- CLK_SET_RATE_PARENT, 0x0BC, 0, 6, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0x0BC, 0, 6, CLK_DIVIDER_HIWORD_MASK, },
{ HI3670_CLK_DIV_SD, "clk_div_sd", "clk_andgt_sd",
- CLK_SET_RATE_PARENT, 0xB8, 0, 4, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xB8, 0, 4, CLK_DIVIDER_HIWORD_MASK, },
{ HI3670_CLK_DIV_SDIO, "clk_div_sdio", "clk_andgt_sdio",
- CLK_SET_RATE_PARENT, 0xC0, 0, 4, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xC0, 0, 4, CLK_DIVIDER_HIWORD_MASK, },
{ HI3670_CLK_DIV_UARTH, "clk_div_uarth", "clk_andgt_uarth",
- CLK_SET_RATE_PARENT, 0xB0, 12, 4, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xB0, 12, 4, CLK_DIVIDER_HIWORD_MASK, },
{ HI3670_CLK_DIV_UARTL, "clk_div_uartl", "clk_andgt_uartl",
- CLK_SET_RATE_PARENT, 0xB0, 8, 4, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xB0, 8, 4, CLK_DIVIDER_HIWORD_MASK, },
{ HI3670_CLK_DIV_UART0, "clk_div_uart0", "clk_andgt_uart0",
- CLK_SET_RATE_PARENT, 0xB0, 4, 4, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xB0, 4, 4, CLK_DIVIDER_HIWORD_MASK, },
{ HI3670_CLK_DIV_I2C, "clk_div_i2c", "clk_div_320m",
- CLK_SET_RATE_PARENT, 0xE8, 4, 4, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xE8, 4, 4, CLK_DIVIDER_HIWORD_MASK, },
{ HI3670_CLK_DIV_SPI, "clk_div_spi", "clk_andgt_spi",
- CLK_SET_RATE_PARENT, 0xC4, 12, 4, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xC4, 12, 4, CLK_DIVIDER_HIWORD_MASK, },
{ HI3670_CLK_DIV_PCIEAXI, "clk_div_pcieaxi", "clk_andgt_pcieaxi",
- CLK_SET_RATE_PARENT, 0xb4, 0, 5, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xb4, 0, 5, CLK_DIVIDER_HIWORD_MASK, },
{ HI3670_CLK_DIV_AO_ASP, "clk_div_ao_asp", "clk_div_ao_asp_gt",
- CLK_SET_RATE_PARENT, 0x108, 6, 4, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0x108, 6, 4, CLK_DIVIDER_HIWORD_MASK, },
{ HI3670_CLK_DIV_CSI_TRANS, "clk_div_csi_trans", "clk_gate_csi_trans",
- CLK_SET_RATE_PARENT, 0xD4, 0, 5, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xD4, 0, 5, CLK_DIVIDER_HIWORD_MASK, },
{ HI3670_CLK_DIV_DSI_TRANS, "clk_div_dsi_trans", "clk_gate_dsi_trans",
- CLK_SET_RATE_PARENT, 0xD4, 10, 5, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xD4, 10, 5, CLK_DIVIDER_HIWORD_MASK, },
{ HI3670_CLK_DIV_PTP, "clk_div_ptp", "clk_andgt_ptp",
- CLK_SET_RATE_PARENT, 0xD8, 0, 4, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xD8, 0, 4, CLK_DIVIDER_HIWORD_MASK, },
{ HI3670_CLK_DIV_CLKOUT0_PLL, "clk_div_clkout0_pll", "clk_andgt_out0",
- CLK_SET_RATE_PARENT, 0xe0, 4, 6, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xe0, 4, 6, CLK_DIVIDER_HIWORD_MASK, },
{ HI3670_CLK_DIV_CLKOUT1_PLL, "clk_div_clkout1_pll", "clk_andgt_out1",
- CLK_SET_RATE_PARENT, 0xe0, 10, 6, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xe0, 10, 6, CLK_DIVIDER_HIWORD_MASK, },
{ HI3670_CLKDIV_DP_AUDIO_PLL_AO, "clkdiv_dp_audio_pll_ao", "clkgt_dp_audio_pll_ao",
- CLK_SET_RATE_PARENT, 0xBC, 11, 4, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xBC, 11, 4, CLK_DIVIDER_HIWORD_MASK, },
{ HI3670_CLK_DIV_VDEC, "clk_div_vdec", "clk_andgt_vdec",
- CLK_SET_RATE_PARENT, 0xC4, 0, 6, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xC4, 0, 6, CLK_DIVIDER_HIWORD_MASK, },
{ HI3670_CLK_DIV_VENC, "clk_div_venc", "clk_andgt_venc",
- CLK_SET_RATE_PARENT, 0xC0, 8, 6, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xC0, 8, 6, CLK_DIVIDER_HIWORD_MASK, },
{ HI3670_CLK_ISP_SNCLK_DIV0, "clk_isp_snclk_div0", "clk_isp_snclk_fac",
- CLK_SET_RATE_PARENT, 0x108, 0, 2, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0x108, 0, 2, CLK_DIVIDER_HIWORD_MASK, },
{ HI3670_CLK_ISP_SNCLK_DIV1, "clk_isp_snclk_div1", "clk_isp_snclk_fac",
- CLK_SET_RATE_PARENT, 0x10C, 14, 2, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0x10C, 14, 2, CLK_DIVIDER_HIWORD_MASK, },
{ HI3670_CLK_ISP_SNCLK_DIV2, "clk_isp_snclk_div2", "clk_isp_snclk_fac",
- CLK_SET_RATE_PARENT, 0x10C, 11, 2, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0x10C, 11, 2, CLK_DIVIDER_HIWORD_MASK, },
{ HI3670_CLK_DIV_ICS, "clk_div_ics", "clk_andgt_ics",
- CLK_SET_RATE_PARENT, 0xE4, 9, 6, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0xE4, 9, 6, CLK_DIVIDER_HIWORD_MASK, },
};
/* clk_pmuctrl */
@@ -608,12 +608,12 @@ static const struct hisi_gate_clock hi3670_sctrl_gate_sep_clks[] = {
static const struct hisi_gate_clock hi3670_sctrl_gate_clks[] = {
{ HI3670_CLK_ANDGT_IOPERI, "clk_andgt_ioperi", "clk_ppll0",
- CLK_SET_RATE_PARENT, 0x270, 6, CLK_GATE_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0x270, 6, CLK_GATE_HIWORD_MASK, },
{ HI3670_CLKANDGT_ASP_SUBSYS_PERI, "clkandgt_asp_subsys_peri",
"clk_ppll0",
- CLK_SET_RATE_PARENT, 0x268, 3, CLK_GATE_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0x268, 3, CLK_GATE_HIWORD_MASK, },
{ HI3670_CLK_ANGT_ASP_SUBSYS, "clk_angt_asp_subsys", "clk_ppll0",
- CLK_SET_RATE_PARENT, 0x258, 0, CLK_GATE_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0x258, 0, CLK_GATE_HIWORD_MASK, },
};
static const char *const
@@ -650,19 +650,19 @@ static const struct hisi_mux_clock hi3670_sctrl_mux_clks[] = {
static const struct hisi_divider_clock hi3670_sctrl_divider_clks[] = {
{ HI3670_CLK_DIV_AOBUS, "clk_div_aobus", "clk_ppll0",
- CLK_SET_RATE_PARENT, 0x254, 0, 6, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0x254, 0, 6, CLK_DIVIDER_HIWORD_MASK, },
{ HI3670_CLK_DIV_UFS_SUBSYS, "clk_div_ufs_subsys", "clk_mux_ufs_subsys",
- CLK_SET_RATE_PARENT, 0x274, 0, 6, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0x274, 0, 6, CLK_DIVIDER_HIWORD_MASK, },
{ HI3670_CLK_DIV_IOPERI, "clk_div_ioperi", "clk_andgt_ioperi",
- CLK_SET_RATE_PARENT, 0x270, 0, 6, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0x270, 0, 6, CLK_DIVIDER_HIWORD_MASK, },
{ HI3670_CLK_DIV_CLKOUT0_TCXO, "clk_div_clkout0_tcxo", "clkin_sys",
- CLK_SET_RATE_PARENT, 0x254, 6, 3, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0x254, 6, 3, CLK_DIVIDER_HIWORD_MASK, },
{ HI3670_CLK_DIV_CLKOUT1_TCXO, "clk_div_clkout1_tcxo", "clkin_sys",
- CLK_SET_RATE_PARENT, 0x254, 9, 3, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0x254, 9, 3, CLK_DIVIDER_HIWORD_MASK, },
{ HI3670_CLK_ASP_SUBSYS_PERI_DIV, "clk_asp_subsys_peri_div", "clkandgt_asp_subsys_peri",
- CLK_SET_RATE_PARENT, 0x268, 0, 3, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0x268, 0, 3, CLK_DIVIDER_HIWORD_MASK, },
{ HI3670_CLK_DIV_ASP_SUBSYS, "clk_div_asp_subsys", "clk_angt_asp_subsys",
- CLK_SET_RATE_PARENT, 0x250, 0, 3, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0x250, 0, 3, CLK_DIVIDER_HIWORD_MASK, },
};
/* clk_iomcu */
@@ -732,17 +732,17 @@ static const struct hisi_gate_clock hi3670_media1_gate_sep_clks[] = {
static const struct hisi_gate_clock hi3670_media1_gate_clks[] = {
{ HI3670_CLK_GATE_VIVOBUS_ANDGT, "clk_gate_vivobus_andgt", "clk_mux_vivobus",
- CLK_SET_RATE_PARENT, 0x84, 3, CLK_GATE_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0x84, 3, CLK_GATE_HIWORD_MASK, },
{ HI3670_CLK_ANDGT_EDC0, "clk_andgt_edc0", "clk_mux_edc0",
- CLK_SET_RATE_PARENT, 0x84, 7, CLK_GATE_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0x84, 7, CLK_GATE_HIWORD_MASK, },
{ HI3670_CLK_ANDGT_LDI0, "clk_andgt_ldi0", "clk_mux_ldi0",
- CLK_SET_RATE_PARENT, 0x84, 9, CLK_GATE_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0x84, 9, CLK_GATE_HIWORD_MASK, },
{ HI3670_CLK_ANDGT_LDI1, "clk_andgt_ldi1", "clk_mux_ldi1",
- CLK_SET_RATE_PARENT, 0x84, 8, CLK_GATE_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0x84, 8, CLK_GATE_HIWORD_MASK, },
{ HI3670_CLK_MMBUF_PLL_ANDGT, "clk_mmbuf_pll_andgt", "clk_sw_mmbuf",
- CLK_SET_RATE_PARENT, 0x84, 14, CLK_GATE_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0x84, 14, CLK_GATE_HIWORD_MASK, },
{ HI3670_PCLK_MMBUF_ANDGT, "pclk_mmbuf_andgt", "aclk_div_mmbuf",
- CLK_SET_RATE_PARENT, 0x84, 15, CLK_GATE_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0x84, 15, CLK_GATE_HIWORD_MASK, },
};
static const char *const
@@ -799,17 +799,17 @@ static const struct hisi_mux_clock hi3670_media1_mux_clks[] = {
static const struct hisi_divider_clock hi3670_media1_divider_clks[] = {
{ HI3670_CLK_DIV_VIVOBUS, "clk_div_vivobus", "clk_gate_vivobus_andgt",
- CLK_SET_RATE_PARENT, 0x74, 0, 6, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0x74, 0, 6, CLK_DIVIDER_HIWORD_MASK, },
{ HI3670_CLK_DIV_EDC0, "clk_div_edc0", "clk_andgt_edc0",
- CLK_SET_RATE_PARENT, 0x68, 0, 6, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0x68, 0, 6, CLK_DIVIDER_HIWORD_MASK, },
{ HI3670_CLK_DIV_LDI0, "clk_div_ldi0", "clk_andgt_ldi0",
- CLK_SET_RATE_PARENT, 0x60, 0, 6, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0x60, 0, 6, CLK_DIVIDER_HIWORD_MASK, },
{ HI3670_CLK_DIV_LDI1, "clk_div_ldi1", "clk_andgt_ldi1",
- CLK_SET_RATE_PARENT, 0x64, 0, 6, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0x64, 0, 6, CLK_DIVIDER_HIWORD_MASK, },
{ HI3670_ACLK_DIV_MMBUF, "aclk_div_mmbuf", "clk_mmbuf_pll_andgt",
- CLK_SET_RATE_PARENT, 0x7C, 10, 6, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0x7C, 10, 6, CLK_DIVIDER_HIWORD_MASK, },
{ HI3670_PCLK_DIV_MMBUF, "pclk_div_mmbuf", "pclk_mmbuf_andgt",
- CLK_SET_RATE_PARENT, 0x78, 0, 2, CLK_DIVIDER_HIWORD_MASK, 0, },
+ CLK_SET_RATE_PARENT, 0x78, 0, 2, CLK_DIVIDER_HIWORD_MASK, },
};
/* clk_media2 */
diff --git a/drivers/clk/hisilicon/clk-hi6220.c b/drivers/clk/hisilicon/clk-hi6220.c
index b2c5b6bbb1c1..e7cdf72d4b06 100644
--- a/drivers/clk/hisilicon/clk-hi6220.c
+++ b/drivers/clk/hisilicon/clk-hi6220.c
@@ -86,7 +86,8 @@ static void __init hi6220_clk_ao_init(struct device_node *np)
hisi_clk_register_gate_sep(hi6220_separated_gate_clks_ao,
ARRAY_SIZE(hi6220_separated_gate_clks_ao), clk_data_ao);
}
-CLK_OF_DECLARE(hi6220_clk_ao, "hisilicon,hi6220-aoctrl", hi6220_clk_ao_init);
+/* Allow reset driver to probe as well */
+CLK_OF_DECLARE_DRIVER(hi6220_clk_ao, "hisilicon,hi6220-aoctrl", hi6220_clk_ao_init);
/* clocks in sysctrl */
diff --git a/drivers/clk/hisilicon/reset.c b/drivers/clk/hisilicon/reset.c
index 2e22fea2a2e7..93cee17db8b1 100644
--- a/drivers/clk/hisilicon/reset.c
+++ b/drivers/clk/hisilicon/reset.c
@@ -90,14 +90,12 @@ static const struct reset_control_ops hisi_reset_ops = {
struct hisi_reset_controller *hisi_reset_init(struct platform_device *pdev)
{
struct hisi_reset_controller *rstc;
- struct resource *res;
rstc = devm_kmalloc(&pdev->dev, sizeof(*rstc), GFP_KERNEL);
if (!rstc)
return NULL;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- rstc->membase = devm_ioremap_resource(&pdev->dev, res);
+ rstc->membase = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(rstc->membase))
return NULL;
diff --git a/drivers/clk/imgtec/clk-boston.c b/drivers/clk/imgtec/clk-boston.c
index 33ab4ff61165..b00cbd045af5 100644
--- a/drivers/clk/imgtec/clk-boston.c
+++ b/drivers/clk/imgtec/clk-boston.c
@@ -58,8 +58,7 @@ static void __init clk_boston_setup(struct device_node *np)
cpu_div = ext_field(mmcmdiv, BOSTON_PLAT_MMCMDIV_CLK1DIV);
cpu_freq = mult_frac(in_freq, mul, cpu_div);
- onecell = kzalloc(sizeof(*onecell) +
- (BOSTON_CLK_COUNT * sizeof(struct clk_hw *)),
+ onecell = kzalloc(struct_size(onecell, hws, BOSTON_CLK_COUNT),
GFP_KERNEL);
if (!onecell)
return;
diff --git a/drivers/clk/imx/clk-imx6sll.c b/drivers/clk/imx/clk-imx6sll.c
index 5f3e92c09a5e..8e8288bda4d0 100644
--- a/drivers/clk/imx/clk-imx6sll.c
+++ b/drivers/clk/imx/clk-imx6sll.c
@@ -107,12 +107,12 @@ static void __init imx6sll_clocks_init(struct device_node *ccm_node)
hws[IMX6SLL_CLK_DUMMY] = imx_clk_hw_fixed("dummy", 0);
- hws[IMX6SLL_CLK_CKIL] = __clk_get_hw(of_clk_get_by_name(ccm_node, "ckil"));
- hws[IMX6SLL_CLK_OSC] = __clk_get_hw(of_clk_get_by_name(ccm_node, "osc"));
+ hws[IMX6SLL_CLK_CKIL] = imx_obtain_fixed_clk_hw(ccm_node, "ckil");
+ hws[IMX6SLL_CLK_OSC] = imx_obtain_fixed_clk_hw(ccm_node, "osc");
/* ipp_di clock is external input */
- hws[IMX6SLL_CLK_IPP_DI0] = __clk_get_hw(of_clk_get_by_name(ccm_node, "ipp_di0"));
- hws[IMX6SLL_CLK_IPP_DI1] = __clk_get_hw(of_clk_get_by_name(ccm_node, "ipp_di1"));
+ hws[IMX6SLL_CLK_IPP_DI0] = imx_obtain_fixed_clk_hw(ccm_node, "ipp_di0");
+ hws[IMX6SLL_CLK_IPP_DI1] = imx_obtain_fixed_clk_hw(ccm_node, "ipp_di1");
np = of_find_compatible_node(NULL, NULL, "fsl,imx6sll-anatop");
base = of_iomap(np, 0);
diff --git a/drivers/clk/imx/clk-imx6sx.c b/drivers/clk/imx/clk-imx6sx.c
index c4685c01929a..89ba71271e5c 100644
--- a/drivers/clk/imx/clk-imx6sx.c
+++ b/drivers/clk/imx/clk-imx6sx.c
@@ -139,16 +139,16 @@ static void __init imx6sx_clocks_init(struct device_node *ccm_node)
hws[IMX6SX_CLK_DUMMY] = imx_clk_hw_fixed("dummy", 0);
- hws[IMX6SX_CLK_CKIL] = __clk_get_hw(of_clk_get_by_name(ccm_node, "ckil"));
- hws[IMX6SX_CLK_OSC] = __clk_get_hw(of_clk_get_by_name(ccm_node, "osc"));
+ hws[IMX6SX_CLK_CKIL] = imx_obtain_fixed_clk_hw(ccm_node, "ckil");
+ hws[IMX6SX_CLK_OSC] = imx_obtain_fixed_clk_hw(ccm_node, "osc");
/* ipp_di clock is external input */
- hws[IMX6SX_CLK_IPP_DI0] = __clk_get_hw(of_clk_get_by_name(ccm_node, "ipp_di0"));
- hws[IMX6SX_CLK_IPP_DI1] = __clk_get_hw(of_clk_get_by_name(ccm_node, "ipp_di1"));
+ hws[IMX6SX_CLK_IPP_DI0] = imx_obtain_fixed_clk_hw(ccm_node, "ipp_di0");
+ hws[IMX6SX_CLK_IPP_DI1] = imx_obtain_fixed_clk_hw(ccm_node, "ipp_di1");
/* Clock source from external clock via CLK1/2 PAD */
- hws[IMX6SX_CLK_ANACLK1] = __clk_get_hw(of_clk_get_by_name(ccm_node, "anaclk1"));
- hws[IMX6SX_CLK_ANACLK2] = __clk_get_hw(of_clk_get_by_name(ccm_node, "anaclk2"));
+ hws[IMX6SX_CLK_ANACLK1] = imx_obtain_fixed_clk_hw(ccm_node, "anaclk1");
+ hws[IMX6SX_CLK_ANACLK2] = imx_obtain_fixed_clk_hw(ccm_node, "anaclk2");
np = of_find_compatible_node(NULL, NULL, "fsl,imx6sx-anatop");
base = of_iomap(np, 0);
diff --git a/drivers/clk/imx/clk-imx6ul.c b/drivers/clk/imx/clk-imx6ul.c
index bc931988fe7b..dafc8806b03e 100644
--- a/drivers/clk/imx/clk-imx6ul.c
+++ b/drivers/clk/imx/clk-imx6ul.c
@@ -126,12 +126,12 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
hws[IMX6UL_CLK_DUMMY] = imx_clk_hw_fixed("dummy", 0);
- hws[IMX6UL_CLK_CKIL] = __clk_get_hw(of_clk_get_by_name(ccm_node, "ckil"));
- hws[IMX6UL_CLK_OSC] = __clk_get_hw(of_clk_get_by_name(ccm_node, "osc"));
+ hws[IMX6UL_CLK_CKIL] = imx_obtain_fixed_clk_hw(ccm_node, "ckil");
+ hws[IMX6UL_CLK_OSC] = imx_obtain_fixed_clk_hw(ccm_node, "osc");
/* ipp_di clock is external input */
- hws[IMX6UL_CLK_IPP_DI0] = __clk_get_hw(of_clk_get_by_name(ccm_node, "ipp_di0"));
- hws[IMX6UL_CLK_IPP_DI1] = __clk_get_hw(of_clk_get_by_name(ccm_node, "ipp_di1"));
+ hws[IMX6UL_CLK_IPP_DI0] = imx_obtain_fixed_clk_hw(ccm_node, "ipp_di0");
+ hws[IMX6UL_CLK_IPP_DI1] = imx_obtain_fixed_clk_hw(ccm_node, "ipp_di1");
np = of_find_compatible_node(NULL, NULL, "fsl,imx6ul-anatop");
base = of_iomap(np, 0);
diff --git a/drivers/clk/imx/clk-imx7d.c b/drivers/clk/imx/clk-imx7d.c
index fbea774ef687..0c9f7adb41ae 100644
--- a/drivers/clk/imx/clk-imx7d.c
+++ b/drivers/clk/imx/clk-imx7d.c
@@ -403,8 +403,8 @@ static void __init imx7d_clocks_init(struct device_node *ccm_node)
hws = clk_hw_data->hws;
hws[IMX7D_CLK_DUMMY] = imx_clk_hw_fixed("dummy", 0);
- hws[IMX7D_OSC_24M_CLK] = __clk_get_hw(of_clk_get_by_name(ccm_node, "osc"));
- hws[IMX7D_CKIL] = __clk_get_hw(of_clk_get_by_name(ccm_node, "ckil"));
+ hws[IMX7D_OSC_24M_CLK] = imx_obtain_fixed_clk_hw(ccm_node, "osc");
+ hws[IMX7D_CKIL] = imx_obtain_fixed_clk_hw(ccm_node, "ckil");
np = of_find_compatible_node(NULL, NULL, "fsl,imx7d-anatop");
base = of_iomap(np, 0);
diff --git a/drivers/clk/imx/clk-imx7ulp.c b/drivers/clk/imx/clk-imx7ulp.c
index 2022d9bead91..3fdf3d494f0a 100644
--- a/drivers/clk/imx/clk-imx7ulp.c
+++ b/drivers/clk/imx/clk-imx7ulp.c
@@ -24,11 +24,11 @@ static const char * const spll_pfd_sels[] = { "spll_pfd0", "spll_pfd1", "spll_pf
static const char * const spll_sels[] = { "spll", "spll_pfd_sel", };
static const char * const apll_pfd_sels[] = { "apll_pfd0", "apll_pfd1", "apll_pfd2", "apll_pfd3", };
static const char * const apll_sels[] = { "apll", "apll_pfd_sel", };
-static const char * const scs_sels[] = { "dummy", "sosc", "sirc", "firc", "dummy", "apll_sel", "spll_sel", "upll", };
-static const char * const ddr_sels[] = { "apll_pfd_sel", "upll", };
+static const char * const scs_sels[] = { "dummy", "sosc", "sirc", "firc", "dummy", "apll_sel", "spll_sel", "dummy", };
+static const char * const ddr_sels[] = { "apll_pfd_sel", "dummy", "dummy", "dummy", };
static const char * const nic_sels[] = { "firc", "ddr_clk", };
static const char * const periph_plat_sels[] = { "dummy", "nic1_bus_clk", "nic1_clk", "ddr_clk", "apll_pfd2", "apll_pfd1", "apll_pfd0", "upll", };
-static const char * const periph_bus_sels[] = { "dummy", "sosc_bus_clk", "mpll", "firc_bus_clk", "rosc", "nic1_bus_clk", "nic1_clk", "spll_bus_clk", };
+static const char * const periph_bus_sels[] = { "dummy", "sosc_bus_clk", "dummy", "firc_bus_clk", "rosc", "nic1_bus_clk", "nic1_clk", "spll_bus_clk", };
static const char * const arm_sels[] = { "divcore", "dummy", "dummy", "hsrun_divcore", };
/* used by sosc/sirc/firc/ddr/spll/apll dividers */
@@ -75,7 +75,6 @@ static void __init imx7ulp_clk_scg1_init(struct device_node *np)
clks[IMX7ULP_CLK_SOSC] = imx_obtain_fixed_clk_hw(np, "sosc");
clks[IMX7ULP_CLK_SIRC] = imx_obtain_fixed_clk_hw(np, "sirc");
clks[IMX7ULP_CLK_FIRC] = imx_obtain_fixed_clk_hw(np, "firc");
- clks[IMX7ULP_CLK_MIPI_PLL] = imx_obtain_fixed_clk_hw(np, "mpll");
clks[IMX7ULP_CLK_UPLL] = imx_obtain_fixed_clk_hw(np, "upll");
/* SCG1 */
@@ -118,7 +117,7 @@ static void __init imx7ulp_clk_scg1_init(struct device_node *np)
clks[IMX7ULP_CLK_SYS_SEL] = imx_clk_hw_mux2("scs_sel", base + 0x14, 24, 4, scs_sels, ARRAY_SIZE(scs_sels));
clks[IMX7ULP_CLK_HSRUN_SYS_SEL] = imx_clk_hw_mux2("hsrun_scs_sel", base + 0x1c, 24, 4, scs_sels, ARRAY_SIZE(scs_sels));
clks[IMX7ULP_CLK_NIC_SEL] = imx_clk_hw_mux2("nic_sel", base + 0x40, 28, 1, nic_sels, ARRAY_SIZE(nic_sels));
- clks[IMX7ULP_CLK_DDR_SEL] = imx_clk_hw_mux_flags("ddr_sel", base + 0x30, 24, 1, ddr_sels, ARRAY_SIZE(ddr_sels), CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE);
+ clks[IMX7ULP_CLK_DDR_SEL] = imx_clk_hw_mux_flags("ddr_sel", base + 0x30, 24, 2, ddr_sels, ARRAY_SIZE(ddr_sels), CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE);
clks[IMX7ULP_CLK_CORE_DIV] = imx_clk_hw_divider_flags("divcore", "scs_sel", base + 0x14, 16, 4, CLK_SET_RATE_PARENT);
clks[IMX7ULP_CLK_HSRUN_CORE_DIV] = imx_clk_hw_divider_flags("hsrun_divcore", "hsrun_scs_sel", base + 0x1c, 16, 4, CLK_SET_RATE_PARENT);
diff --git a/drivers/clk/imx/clk-imx8mm.c b/drivers/clk/imx/clk-imx8mm.c
index 172589e94f60..030b15d7c0ce 100644
--- a/drivers/clk/imx/clk-imx8mm.c
+++ b/drivers/clk/imx/clk-imx8mm.c
@@ -26,73 +26,6 @@ static u32 share_count_disp;
static u32 share_count_pdm;
static u32 share_count_nand;
-static const struct imx_pll14xx_rate_table imx8mm_pll1416x_tbl[] = {
- PLL_1416X_RATE(1800000000U, 225, 3, 0),
- PLL_1416X_RATE(1600000000U, 200, 3, 0),
- PLL_1416X_RATE(1200000000U, 300, 3, 1),
- PLL_1416X_RATE(1000000000U, 250, 3, 1),
- PLL_1416X_RATE(800000000U, 200, 3, 1),
- PLL_1416X_RATE(750000000U, 250, 2, 2),
- PLL_1416X_RATE(700000000U, 350, 3, 2),
- PLL_1416X_RATE(600000000U, 300, 3, 2),
-};
-
-static const struct imx_pll14xx_rate_table imx8mm_audiopll_tbl[] = {
- PLL_1443X_RATE(393216000U, 262, 2, 3, 9437),
- PLL_1443X_RATE(361267200U, 361, 3, 3, 17511),
-};
-
-static const struct imx_pll14xx_rate_table imx8mm_videopll_tbl[] = {
- PLL_1443X_RATE(650000000U, 325, 3, 2, 0),
- PLL_1443X_RATE(594000000U, 198, 2, 2, 0),
-};
-
-static const struct imx_pll14xx_rate_table imx8mm_drampll_tbl[] = {
- PLL_1443X_RATE(650000000U, 325, 3, 2, 0),
-};
-
-static struct imx_pll14xx_clk imx8mm_audio_pll = {
- .type = PLL_1443X,
- .rate_table = imx8mm_audiopll_tbl,
- .rate_count = ARRAY_SIZE(imx8mm_audiopll_tbl),
-};
-
-static struct imx_pll14xx_clk imx8mm_video_pll = {
- .type = PLL_1443X,
- .rate_table = imx8mm_videopll_tbl,
- .rate_count = ARRAY_SIZE(imx8mm_videopll_tbl),
-};
-
-static struct imx_pll14xx_clk imx8mm_dram_pll = {
- .type = PLL_1443X,
- .rate_table = imx8mm_drampll_tbl,
- .rate_count = ARRAY_SIZE(imx8mm_drampll_tbl),
-};
-
-static struct imx_pll14xx_clk imx8mm_arm_pll = {
- .type = PLL_1416X,
- .rate_table = imx8mm_pll1416x_tbl,
- .rate_count = ARRAY_SIZE(imx8mm_pll1416x_tbl),
-};
-
-static struct imx_pll14xx_clk imx8mm_gpu_pll = {
- .type = PLL_1416X,
- .rate_table = imx8mm_pll1416x_tbl,
- .rate_count = ARRAY_SIZE(imx8mm_pll1416x_tbl),
-};
-
-static struct imx_pll14xx_clk imx8mm_vpu_pll = {
- .type = PLL_1416X,
- .rate_table = imx8mm_pll1416x_tbl,
- .rate_count = ARRAY_SIZE(imx8mm_pll1416x_tbl),
-};
-
-static struct imx_pll14xx_clk imx8mm_sys_pll = {
- .type = PLL_1416X,
- .rate_table = imx8mm_pll1416x_tbl,
- .rate_count = ARRAY_SIZE(imx8mm_pll1416x_tbl),
-};
-
static const char *pll_ref_sels[] = { "osc_24m", "dummy", "dummy", "dummy", };
static const char *audio_pll1_bypass_sels[] = {"audio_pll1", "audio_pll1_ref_sel", };
static const char *audio_pll2_bypass_sels[] = {"audio_pll2", "audio_pll2_ref_sel", };
@@ -101,8 +34,6 @@ static const char *dram_pll_bypass_sels[] = {"dram_pll", "dram_pll_ref_sel", };
static const char *gpu_pll_bypass_sels[] = {"gpu_pll", "gpu_pll_ref_sel", };
static const char *vpu_pll_bypass_sels[] = {"vpu_pll", "vpu_pll_ref_sel", };
static const char *arm_pll_bypass_sels[] = {"arm_pll", "arm_pll_ref_sel", };
-static const char *sys_pll1_bypass_sels[] = {"sys_pll1", "sys_pll1_ref_sel", };
-static const char *sys_pll2_bypass_sels[] = {"sys_pll2", "sys_pll2_ref_sel", };
static const char *sys_pll3_bypass_sels[] = {"sys_pll3", "sys_pll3_ref_sel", };
/* CCM ROOT */
@@ -392,20 +323,18 @@ static int imx8mm_clocks_probe(struct platform_device *pdev)
clks[IMX8MM_GPU_PLL_REF_SEL] = imx_clk_mux("gpu_pll_ref_sel", base + 0x64, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
clks[IMX8MM_VPU_PLL_REF_SEL] = imx_clk_mux("vpu_pll_ref_sel", base + 0x74, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
clks[IMX8MM_ARM_PLL_REF_SEL] = imx_clk_mux("arm_pll_ref_sel", base + 0x84, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
- clks[IMX8MM_SYS_PLL1_REF_SEL] = imx_clk_mux("sys_pll1_ref_sel", base + 0x94, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
- clks[IMX8MM_SYS_PLL2_REF_SEL] = imx_clk_mux("sys_pll2_ref_sel", base + 0x104, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
clks[IMX8MM_SYS_PLL3_REF_SEL] = imx_clk_mux("sys_pll3_ref_sel", base + 0x114, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
- clks[IMX8MM_AUDIO_PLL1] = imx_clk_pll14xx("audio_pll1", "audio_pll1_ref_sel", base, &imx8mm_audio_pll);
- clks[IMX8MM_AUDIO_PLL2] = imx_clk_pll14xx("audio_pll2", "audio_pll2_ref_sel", base + 0x14, &imx8mm_audio_pll);
- clks[IMX8MM_VIDEO_PLL1] = imx_clk_pll14xx("video_pll1", "video_pll1_ref_sel", base + 0x28, &imx8mm_video_pll);
- clks[IMX8MM_DRAM_PLL] = imx_clk_pll14xx("dram_pll", "dram_pll_ref_sel", base + 0x50, &imx8mm_dram_pll);
- clks[IMX8MM_GPU_PLL] = imx_clk_pll14xx("gpu_pll", "gpu_pll_ref_sel", base + 0x64, &imx8mm_gpu_pll);
- clks[IMX8MM_VPU_PLL] = imx_clk_pll14xx("vpu_pll", "vpu_pll_ref_sel", base + 0x74, &imx8mm_vpu_pll);
- clks[IMX8MM_ARM_PLL] = imx_clk_pll14xx("arm_pll", "arm_pll_ref_sel", base + 0x84, &imx8mm_arm_pll);
- clks[IMX8MM_SYS_PLL1] = imx_clk_pll14xx("sys_pll1", "sys_pll1_ref_sel", base + 0x94, &imx8mm_sys_pll);
- clks[IMX8MM_SYS_PLL2] = imx_clk_pll14xx("sys_pll2", "sys_pll2_ref_sel", base + 0x104, &imx8mm_sys_pll);
- clks[IMX8MM_SYS_PLL3] = imx_clk_pll14xx("sys_pll3", "sys_pll3_ref_sel", base + 0x114, &imx8mm_sys_pll);
+ clks[IMX8MM_AUDIO_PLL1] = imx_clk_pll14xx("audio_pll1", "audio_pll1_ref_sel", base, &imx_1443x_pll);
+ clks[IMX8MM_AUDIO_PLL2] = imx_clk_pll14xx("audio_pll2", "audio_pll2_ref_sel", base + 0x14, &imx_1443x_pll);
+ clks[IMX8MM_VIDEO_PLL1] = imx_clk_pll14xx("video_pll1", "video_pll1_ref_sel", base + 0x28, &imx_1443x_pll);
+ clks[IMX8MM_DRAM_PLL] = imx_clk_pll14xx("dram_pll", "dram_pll_ref_sel", base + 0x50, &imx_1443x_pll);
+ clks[IMX8MM_GPU_PLL] = imx_clk_pll14xx("gpu_pll", "gpu_pll_ref_sel", base + 0x64, &imx_1416x_pll);
+ clks[IMX8MM_VPU_PLL] = imx_clk_pll14xx("vpu_pll", "vpu_pll_ref_sel", base + 0x74, &imx_1416x_pll);
+ clks[IMX8MM_ARM_PLL] = imx_clk_pll14xx("arm_pll", "arm_pll_ref_sel", base + 0x84, &imx_1416x_pll);
+ clks[IMX8MM_SYS_PLL1] = imx_clk_fixed("sys_pll1", 800000000);
+ clks[IMX8MM_SYS_PLL2] = imx_clk_fixed("sys_pll2", 1000000000);
+ clks[IMX8MM_SYS_PLL3] = imx_clk_pll14xx("sys_pll3", "sys_pll3_ref_sel", base + 0x114, &imx_1416x_pll);
/* PLL bypass out */
clks[IMX8MM_AUDIO_PLL1_BYPASS] = imx_clk_mux_flags("audio_pll1_bypass", base, 16, 1, audio_pll1_bypass_sels, ARRAY_SIZE(audio_pll1_bypass_sels), CLK_SET_RATE_PARENT);
@@ -415,8 +344,6 @@ static int imx8mm_clocks_probe(struct platform_device *pdev)
clks[IMX8MM_GPU_PLL_BYPASS] = imx_clk_mux_flags("gpu_pll_bypass", base + 0x64, 28, 1, gpu_pll_bypass_sels, ARRAY_SIZE(gpu_pll_bypass_sels), CLK_SET_RATE_PARENT);
clks[IMX8MM_VPU_PLL_BYPASS] = imx_clk_mux_flags("vpu_pll_bypass", base + 0x74, 28, 1, vpu_pll_bypass_sels, ARRAY_SIZE(vpu_pll_bypass_sels), CLK_SET_RATE_PARENT);
clks[IMX8MM_ARM_PLL_BYPASS] = imx_clk_mux_flags("arm_pll_bypass", base + 0x84, 28, 1, arm_pll_bypass_sels, ARRAY_SIZE(arm_pll_bypass_sels), CLK_SET_RATE_PARENT);
- clks[IMX8MM_SYS_PLL1_BYPASS] = imx_clk_mux_flags("sys_pll1_bypass", base + 0x94, 28, 1, sys_pll1_bypass_sels, ARRAY_SIZE(sys_pll1_bypass_sels), CLK_SET_RATE_PARENT);
- clks[IMX8MM_SYS_PLL2_BYPASS] = imx_clk_mux_flags("sys_pll2_bypass", base + 0x104, 28, 1, sys_pll2_bypass_sels, ARRAY_SIZE(sys_pll2_bypass_sels), CLK_SET_RATE_PARENT);
clks[IMX8MM_SYS_PLL3_BYPASS] = imx_clk_mux_flags("sys_pll3_bypass", base + 0x114, 28, 1, sys_pll3_bypass_sels, ARRAY_SIZE(sys_pll3_bypass_sels), CLK_SET_RATE_PARENT);
/* PLL out gate */
@@ -427,29 +354,48 @@ static int imx8mm_clocks_probe(struct platform_device *pdev)
clks[IMX8MM_GPU_PLL_OUT] = imx_clk_gate("gpu_pll_out", "gpu_pll_bypass", base + 0x64, 11);
clks[IMX8MM_VPU_PLL_OUT] = imx_clk_gate("vpu_pll_out", "vpu_pll_bypass", base + 0x74, 11);
clks[IMX8MM_ARM_PLL_OUT] = imx_clk_gate("arm_pll_out", "arm_pll_bypass", base + 0x84, 11);
- clks[IMX8MM_SYS_PLL1_OUT] = imx_clk_gate("sys_pll1_out", "sys_pll1_bypass", base + 0x94, 11);
- clks[IMX8MM_SYS_PLL2_OUT] = imx_clk_gate("sys_pll2_out", "sys_pll2_bypass", base + 0x104, 11);
clks[IMX8MM_SYS_PLL3_OUT] = imx_clk_gate("sys_pll3_out", "sys_pll3_bypass", base + 0x114, 11);
- /* SYS PLL fixed output */
- clks[IMX8MM_SYS_PLL1_40M] = imx_clk_fixed_factor("sys_pll1_40m", "sys_pll1_out", 1, 20);
- clks[IMX8MM_SYS_PLL1_80M] = imx_clk_fixed_factor("sys_pll1_80m", "sys_pll1_out", 1, 10);
- clks[IMX8MM_SYS_PLL1_100M] = imx_clk_fixed_factor("sys_pll1_100m", "sys_pll1_out", 1, 8);
- clks[IMX8MM_SYS_PLL1_133M] = imx_clk_fixed_factor("sys_pll1_133m", "sys_pll1_out", 1, 6);
- clks[IMX8MM_SYS_PLL1_160M] = imx_clk_fixed_factor("sys_pll1_160m", "sys_pll1_out", 1, 5);
- clks[IMX8MM_SYS_PLL1_200M] = imx_clk_fixed_factor("sys_pll1_200m", "sys_pll1_out", 1, 4);
- clks[IMX8MM_SYS_PLL1_266M] = imx_clk_fixed_factor("sys_pll1_266m", "sys_pll1_out", 1, 3);
- clks[IMX8MM_SYS_PLL1_400M] = imx_clk_fixed_factor("sys_pll1_400m", "sys_pll1_out", 1, 2);
+ /* SYS PLL1 fixed output */
+ clks[IMX8MM_SYS_PLL1_40M_CG] = imx_clk_gate("sys_pll1_40m_cg", "sys_pll1", base + 0x94, 27);
+ clks[IMX8MM_SYS_PLL1_80M_CG] = imx_clk_gate("sys_pll1_80m_cg", "sys_pll1", base + 0x94, 25);
+ clks[IMX8MM_SYS_PLL1_100M_CG] = imx_clk_gate("sys_pll1_100m_cg", "sys_pll1", base + 0x94, 23);
+ clks[IMX8MM_SYS_PLL1_133M_CG] = imx_clk_gate("sys_pll1_133m_cg", "sys_pll1", base + 0x94, 21);
+ clks[IMX8MM_SYS_PLL1_160M_CG] = imx_clk_gate("sys_pll1_160m_cg", "sys_pll1", base + 0x94, 19);
+ clks[IMX8MM_SYS_PLL1_200M_CG] = imx_clk_gate("sys_pll1_200m_cg", "sys_pll1", base + 0x94, 17);
+ clks[IMX8MM_SYS_PLL1_266M_CG] = imx_clk_gate("sys_pll1_266m_cg", "sys_pll1", base + 0x94, 15);
+ clks[IMX8MM_SYS_PLL1_400M_CG] = imx_clk_gate("sys_pll1_400m_cg", "sys_pll1", base + 0x94, 13);
+ clks[IMX8MM_SYS_PLL1_OUT] = imx_clk_gate("sys_pll1_out", "sys_pll1", base + 0x94, 11);
+
+ clks[IMX8MM_SYS_PLL1_40M] = imx_clk_fixed_factor("sys_pll1_40m", "sys_pll1_40m_cg", 1, 20);
+ clks[IMX8MM_SYS_PLL1_80M] = imx_clk_fixed_factor("sys_pll1_80m", "sys_pll1_80m_cg", 1, 10);
+ clks[IMX8MM_SYS_PLL1_100M] = imx_clk_fixed_factor("sys_pll1_100m", "sys_pll1_100m_cg", 1, 8);
+ clks[IMX8MM_SYS_PLL1_133M] = imx_clk_fixed_factor("sys_pll1_133m", "sys_pll1_133m_cg", 1, 6);
+ clks[IMX8MM_SYS_PLL1_160M] = imx_clk_fixed_factor("sys_pll1_160m", "sys_pll1_160m_cg", 1, 5);
+ clks[IMX8MM_SYS_PLL1_200M] = imx_clk_fixed_factor("sys_pll1_200m", "sys_pll1_200m_cg", 1, 4);
+ clks[IMX8MM_SYS_PLL1_266M] = imx_clk_fixed_factor("sys_pll1_266m", "sys_pll1_266m_cg", 1, 3);
+ clks[IMX8MM_SYS_PLL1_400M] = imx_clk_fixed_factor("sys_pll1_400m", "sys_pll1_400m_cg", 1, 2);
clks[IMX8MM_SYS_PLL1_800M] = imx_clk_fixed_factor("sys_pll1_800m", "sys_pll1_out", 1, 1);
- clks[IMX8MM_SYS_PLL2_50M] = imx_clk_fixed_factor("sys_pll2_50m", "sys_pll2_out", 1, 20);
- clks[IMX8MM_SYS_PLL2_100M] = imx_clk_fixed_factor("sys_pll2_100m", "sys_pll2_out", 1, 10);
- clks[IMX8MM_SYS_PLL2_125M] = imx_clk_fixed_factor("sys_pll2_125m", "sys_pll2_out", 1, 8);
- clks[IMX8MM_SYS_PLL2_166M] = imx_clk_fixed_factor("sys_pll2_166m", "sys_pll2_out", 1, 6);
- clks[IMX8MM_SYS_PLL2_200M] = imx_clk_fixed_factor("sys_pll2_200m", "sys_pll2_out", 1, 5);
- clks[IMX8MM_SYS_PLL2_250M] = imx_clk_fixed_factor("sys_pll2_250m", "sys_pll2_out", 1, 4);
- clks[IMX8MM_SYS_PLL2_333M] = imx_clk_fixed_factor("sys_pll2_333m", "sys_pll2_out", 1, 3);
- clks[IMX8MM_SYS_PLL2_500M] = imx_clk_fixed_factor("sys_pll2_500m", "sys_pll2_out", 1, 2);
+ /* SYS PLL2 fixed output */
+ clks[IMX8MM_SYS_PLL2_50M_CG] = imx_clk_gate("sys_pll2_50m_cg", "sys_pll2", base + 0x104, 27);
+ clks[IMX8MM_SYS_PLL2_100M_CG] = imx_clk_gate("sys_pll2_100m_cg", "sys_pll2", base + 0x104, 25);
+ clks[IMX8MM_SYS_PLL2_125M_CG] = imx_clk_gate("sys_pll2_125m_cg", "sys_pll2", base + 0x104, 23);
+ clks[IMX8MM_SYS_PLL2_166M_CG] = imx_clk_gate("sys_pll2_166m_cg", "sys_pll2", base + 0x104, 21);
+ clks[IMX8MM_SYS_PLL2_200M_CG] = imx_clk_gate("sys_pll2_200m_cg", "sys_pll2", base + 0x104, 19);
+ clks[IMX8MM_SYS_PLL2_250M_CG] = imx_clk_gate("sys_pll2_250m_cg", "sys_pll2", base + 0x104, 17);
+ clks[IMX8MM_SYS_PLL2_333M_CG] = imx_clk_gate("sys_pll2_333m_cg", "sys_pll2", base + 0x104, 15);
+ clks[IMX8MM_SYS_PLL2_500M_CG] = imx_clk_gate("sys_pll2_500m_cg", "sys_pll2", base + 0x104, 13);
+ clks[IMX8MM_SYS_PLL2_OUT] = imx_clk_gate("sys_pll2_out", "sys_pll2", base + 0x104, 11);
+
+ clks[IMX8MM_SYS_PLL2_50M] = imx_clk_fixed_factor("sys_pll2_50m", "sys_pll2_50m_cg", 1, 20);
+ clks[IMX8MM_SYS_PLL2_100M] = imx_clk_fixed_factor("sys_pll2_100m", "sys_pll2_100m_cg", 1, 10);
+ clks[IMX8MM_SYS_PLL2_125M] = imx_clk_fixed_factor("sys_pll2_125m", "sys_pll2_125m_cg", 1, 8);
+ clks[IMX8MM_SYS_PLL2_166M] = imx_clk_fixed_factor("sys_pll2_166m", "sys_pll2_166m_cg", 1, 6);
+ clks[IMX8MM_SYS_PLL2_200M] = imx_clk_fixed_factor("sys_pll2_200m", "sys_pll2_200m_cg", 1, 5);
+ clks[IMX8MM_SYS_PLL2_250M] = imx_clk_fixed_factor("sys_pll2_250m", "sys_pll2_250m_cg", 1, 4);
+ clks[IMX8MM_SYS_PLL2_333M] = imx_clk_fixed_factor("sys_pll2_333m", "sys_pll2_333m_cg", 1, 3);
+ clks[IMX8MM_SYS_PLL2_500M] = imx_clk_fixed_factor("sys_pll2_500m", "sys_pll2_500m_cg", 1, 2);
clks[IMX8MM_SYS_PLL2_1000M] = imx_clk_fixed_factor("sys_pll2_1000m", "sys_pll2_out", 1, 1);
np = dev->of_node;
diff --git a/drivers/clk/imx/clk-imx8mn.c b/drivers/clk/imx/clk-imx8mn.c
index 58b5acee3830..9f5a5a56b45e 100644
--- a/drivers/clk/imx/clk-imx8mn.c
+++ b/drivers/clk/imx/clk-imx8mn.c
@@ -25,89 +25,6 @@ static u32 share_count_disp;
static u32 share_count_pdm;
static u32 share_count_nand;
-enum {
- ARM_PLL,
- GPU_PLL,
- VPU_PLL,
- SYS_PLL1,
- SYS_PLL2,
- SYS_PLL3,
- DRAM_PLL,
- AUDIO_PLL1,
- AUDIO_PLL2,
- VIDEO_PLL2,
- NR_PLLS,
-};
-
-static const struct imx_pll14xx_rate_table imx8mn_pll1416x_tbl[] = {
- PLL_1416X_RATE(1800000000U, 225, 3, 0),
- PLL_1416X_RATE(1600000000U, 200, 3, 0),
- PLL_1416X_RATE(1500000000U, 375, 3, 1),
- PLL_1416X_RATE(1400000000U, 350, 3, 1),
- PLL_1416X_RATE(1200000000U, 300, 3, 1),
- PLL_1416X_RATE(1000000000U, 250, 3, 1),
- PLL_1416X_RATE(800000000U, 200, 3, 1),
- PLL_1416X_RATE(750000000U, 250, 2, 2),
- PLL_1416X_RATE(700000000U, 350, 3, 2),
- PLL_1416X_RATE(600000000U, 300, 3, 2),
-};
-
-static const struct imx_pll14xx_rate_table imx8mn_audiopll_tbl[] = {
- PLL_1443X_RATE(393216000U, 262, 2, 3, 9437),
- PLL_1443X_RATE(361267200U, 361, 3, 3, 17511),
-};
-
-static const struct imx_pll14xx_rate_table imx8mn_videopll_tbl[] = {
- PLL_1443X_RATE(650000000U, 325, 3, 2, 0),
- PLL_1443X_RATE(594000000U, 198, 2, 2, 0),
-};
-
-static const struct imx_pll14xx_rate_table imx8mn_drampll_tbl[] = {
- PLL_1443X_RATE(650000000U, 325, 3, 2, 0),
-};
-
-static struct imx_pll14xx_clk imx8mn_audio_pll = {
- .type = PLL_1443X,
- .rate_table = imx8mn_audiopll_tbl,
- .rate_count = ARRAY_SIZE(imx8mn_audiopll_tbl),
-};
-
-static struct imx_pll14xx_clk imx8mn_video_pll = {
- .type = PLL_1443X,
- .rate_table = imx8mn_videopll_tbl,
- .rate_count = ARRAY_SIZE(imx8mn_videopll_tbl),
-};
-
-static struct imx_pll14xx_clk imx8mn_dram_pll = {
- .type = PLL_1443X,
- .rate_table = imx8mn_drampll_tbl,
- .rate_count = ARRAY_SIZE(imx8mn_drampll_tbl),
-};
-
-static struct imx_pll14xx_clk imx8mn_arm_pll = {
- .type = PLL_1416X,
- .rate_table = imx8mn_pll1416x_tbl,
- .rate_count = ARRAY_SIZE(imx8mn_pll1416x_tbl),
-};
-
-static struct imx_pll14xx_clk imx8mn_gpu_pll = {
- .type = PLL_1416X,
- .rate_table = imx8mn_pll1416x_tbl,
- .rate_count = ARRAY_SIZE(imx8mn_pll1416x_tbl),
-};
-
-static struct imx_pll14xx_clk imx8mn_vpu_pll = {
- .type = PLL_1416X,
- .rate_table = imx8mn_pll1416x_tbl,
- .rate_count = ARRAY_SIZE(imx8mn_pll1416x_tbl),
-};
-
-static struct imx_pll14xx_clk imx8mn_sys_pll = {
- .type = PLL_1416X,
- .rate_table = imx8mn_pll1416x_tbl,
- .rate_count = ARRAY_SIZE(imx8mn_pll1416x_tbl),
-};
-
static const char * const pll_ref_sels[] = { "osc_24m", "dummy", "dummy", "dummy", };
static const char * const audio_pll1_bypass_sels[] = {"audio_pll1", "audio_pll1_ref_sel", };
static const char * const audio_pll2_bypass_sels[] = {"audio_pll2", "audio_pll2_ref_sel", };
@@ -116,8 +33,6 @@ static const char * const dram_pll_bypass_sels[] = {"dram_pll", "dram_pll_ref_se
static const char * const gpu_pll_bypass_sels[] = {"gpu_pll", "gpu_pll_ref_sel", };
static const char * const vpu_pll_bypass_sels[] = {"vpu_pll", "vpu_pll_ref_sel", };
static const char * const arm_pll_bypass_sels[] = {"arm_pll", "arm_pll_ref_sel", };
-static const char * const sys_pll1_bypass_sels[] = {"sys_pll1", "sys_pll1_ref_sel", };
-static const char * const sys_pll2_bypass_sels[] = {"sys_pll2", "sys_pll2_ref_sel", };
static const char * const sys_pll3_bypass_sels[] = {"sys_pll3", "sys_pll3_ref_sel", };
static const char * const imx8mn_a53_sels[] = {"osc_24m", "arm_pll_out", "sys_pll2_500m",
@@ -405,20 +320,18 @@ static int imx8mn_clocks_probe(struct platform_device *pdev)
clks[IMX8MN_GPU_PLL_REF_SEL] = imx_clk_mux("gpu_pll_ref_sel", base + 0x64, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
clks[IMX8MN_VPU_PLL_REF_SEL] = imx_clk_mux("vpu_pll_ref_sel", base + 0x74, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
clks[IMX8MN_ARM_PLL_REF_SEL] = imx_clk_mux("arm_pll_ref_sel", base + 0x84, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
- clks[IMX8MN_SYS_PLL1_REF_SEL] = imx_clk_mux("sys_pll1_ref_sel", base + 0x94, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
- clks[IMX8MN_SYS_PLL2_REF_SEL] = imx_clk_mux("sys_pll2_ref_sel", base + 0x104, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
clks[IMX8MN_SYS_PLL3_REF_SEL] = imx_clk_mux("sys_pll3_ref_sel", base + 0x114, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
- clks[IMX8MN_AUDIO_PLL1] = imx_clk_pll14xx("audio_pll1", "audio_pll1_ref_sel", base, &imx8mn_audio_pll);
- clks[IMX8MN_AUDIO_PLL2] = imx_clk_pll14xx("audio_pll2", "audio_pll2_ref_sel", base + 0x14, &imx8mn_audio_pll);
- clks[IMX8MN_VIDEO_PLL1] = imx_clk_pll14xx("video_pll1", "video_pll1_ref_sel", base + 0x28, &imx8mn_video_pll);
- clks[IMX8MN_DRAM_PLL] = imx_clk_pll14xx("dram_pll", "dram_pll_ref_sel", base + 0x50, &imx8mn_dram_pll);
- clks[IMX8MN_GPU_PLL] = imx_clk_pll14xx("gpu_pll", "gpu_pll_ref_sel", base + 0x64, &imx8mn_gpu_pll);
- clks[IMX8MN_VPU_PLL] = imx_clk_pll14xx("vpu_pll", "vpu_pll_ref_sel", base + 0x74, &imx8mn_vpu_pll);
- clks[IMX8MN_ARM_PLL] = imx_clk_pll14xx("arm_pll", "arm_pll_ref_sel", base + 0x84, &imx8mn_arm_pll);
- clks[IMX8MN_SYS_PLL1] = imx_clk_pll14xx("sys_pll1", "sys_pll1_ref_sel", base + 0x94, &imx8mn_sys_pll);
- clks[IMX8MN_SYS_PLL2] = imx_clk_pll14xx("sys_pll2", "sys_pll2_ref_sel", base + 0x104, &imx8mn_sys_pll);
- clks[IMX8MN_SYS_PLL3] = imx_clk_pll14xx("sys_pll3", "sys_pll3_ref_sel", base + 0x114, &imx8mn_sys_pll);
+ clks[IMX8MN_AUDIO_PLL1] = imx_clk_pll14xx("audio_pll1", "audio_pll1_ref_sel", base, &imx_1443x_pll);
+ clks[IMX8MN_AUDIO_PLL2] = imx_clk_pll14xx("audio_pll2", "audio_pll2_ref_sel", base + 0x14, &imx_1443x_pll);
+ clks[IMX8MN_VIDEO_PLL1] = imx_clk_pll14xx("video_pll1", "video_pll1_ref_sel", base + 0x28, &imx_1443x_pll);
+ clks[IMX8MN_DRAM_PLL] = imx_clk_pll14xx("dram_pll", "dram_pll_ref_sel", base + 0x50, &imx_1443x_pll);
+ clks[IMX8MN_GPU_PLL] = imx_clk_pll14xx("gpu_pll", "gpu_pll_ref_sel", base + 0x64, &imx_1416x_pll);
+ clks[IMX8MN_VPU_PLL] = imx_clk_pll14xx("vpu_pll", "vpu_pll_ref_sel", base + 0x74, &imx_1416x_pll);
+ clks[IMX8MN_ARM_PLL] = imx_clk_pll14xx("arm_pll", "arm_pll_ref_sel", base + 0x84, &imx_1416x_pll);
+ clks[IMX8MN_SYS_PLL1] = imx_clk_fixed("sys_pll1", 800000000);
+ clks[IMX8MN_SYS_PLL2] = imx_clk_fixed("sys_pll2", 1000000000);
+ clks[IMX8MN_SYS_PLL3] = imx_clk_pll14xx("sys_pll3", "sys_pll3_ref_sel", base + 0x114, &imx_1416x_pll);
/* PLL bypass out */
clks[IMX8MN_AUDIO_PLL1_BYPASS] = imx_clk_mux_flags("audio_pll1_bypass", base, 16, 1, audio_pll1_bypass_sels, ARRAY_SIZE(audio_pll1_bypass_sels), CLK_SET_RATE_PARENT);
@@ -428,8 +341,6 @@ static int imx8mn_clocks_probe(struct platform_device *pdev)
clks[IMX8MN_GPU_PLL_BYPASS] = imx_clk_mux_flags("gpu_pll_bypass", base + 0x64, 28, 1, gpu_pll_bypass_sels, ARRAY_SIZE(gpu_pll_bypass_sels), CLK_SET_RATE_PARENT);
clks[IMX8MN_VPU_PLL_BYPASS] = imx_clk_mux_flags("vpu_pll_bypass", base + 0x74, 28, 1, vpu_pll_bypass_sels, ARRAY_SIZE(vpu_pll_bypass_sels), CLK_SET_RATE_PARENT);
clks[IMX8MN_ARM_PLL_BYPASS] = imx_clk_mux_flags("arm_pll_bypass", base + 0x84, 28, 1, arm_pll_bypass_sels, ARRAY_SIZE(arm_pll_bypass_sels), CLK_SET_RATE_PARENT);
- clks[IMX8MN_SYS_PLL1_BYPASS] = imx_clk_mux_flags("sys_pll1_bypass", base + 0x94, 28, 1, sys_pll1_bypass_sels, ARRAY_SIZE(sys_pll1_bypass_sels), CLK_SET_RATE_PARENT);
- clks[IMX8MN_SYS_PLL2_BYPASS] = imx_clk_mux_flags("sys_pll2_bypass", base + 0x104, 28, 1, sys_pll2_bypass_sels, ARRAY_SIZE(sys_pll2_bypass_sels), CLK_SET_RATE_PARENT);
clks[IMX8MN_SYS_PLL3_BYPASS] = imx_clk_mux_flags("sys_pll3_bypass", base + 0x114, 28, 1, sys_pll3_bypass_sels, ARRAY_SIZE(sys_pll3_bypass_sels), CLK_SET_RATE_PARENT);
/* PLL out gate */
@@ -440,29 +351,48 @@ static int imx8mn_clocks_probe(struct platform_device *pdev)
clks[IMX8MN_GPU_PLL_OUT] = imx_clk_gate("gpu_pll_out", "gpu_pll_bypass", base + 0x64, 11);
clks[IMX8MN_VPU_PLL_OUT] = imx_clk_gate("vpu_pll_out", "vpu_pll_bypass", base + 0x74, 11);
clks[IMX8MN_ARM_PLL_OUT] = imx_clk_gate("arm_pll_out", "arm_pll_bypass", base + 0x84, 11);
- clks[IMX8MN_SYS_PLL1_OUT] = imx_clk_gate("sys_pll1_out", "sys_pll1_bypass", base + 0x94, 11);
- clks[IMX8MN_SYS_PLL2_OUT] = imx_clk_gate("sys_pll2_out", "sys_pll2_bypass", base + 0x104, 11);
clks[IMX8MN_SYS_PLL3_OUT] = imx_clk_gate("sys_pll3_out", "sys_pll3_bypass", base + 0x114, 11);
- /* SYS PLL fixed output */
- clks[IMX8MN_SYS_PLL1_40M] = imx_clk_fixed_factor("sys_pll1_40m", "sys_pll1_out", 1, 20);
- clks[IMX8MN_SYS_PLL1_80M] = imx_clk_fixed_factor("sys_pll1_80m", "sys_pll1_out", 1, 10);
- clks[IMX8MN_SYS_PLL1_100M] = imx_clk_fixed_factor("sys_pll1_100m", "sys_pll1_out", 1, 8);
- clks[IMX8MN_SYS_PLL1_133M] = imx_clk_fixed_factor("sys_pll1_133m", "sys_pll1_out", 1, 6);
- clks[IMX8MN_SYS_PLL1_160M] = imx_clk_fixed_factor("sys_pll1_160m", "sys_pll1_out", 1, 5);
- clks[IMX8MN_SYS_PLL1_200M] = imx_clk_fixed_factor("sys_pll1_200m", "sys_pll1_out", 1, 4);
- clks[IMX8MN_SYS_PLL1_266M] = imx_clk_fixed_factor("sys_pll1_266m", "sys_pll1_out", 1, 3);
- clks[IMX8MN_SYS_PLL1_400M] = imx_clk_fixed_factor("sys_pll1_400m", "sys_pll1_out", 1, 2);
+ /* SYS PLL1 fixed output */
+ clks[IMX8MN_SYS_PLL1_40M_CG] = imx_clk_gate("sys_pll1_40m_cg", "sys_pll1", base + 0x94, 27);
+ clks[IMX8MN_SYS_PLL1_80M_CG] = imx_clk_gate("sys_pll1_80m_cg", "sys_pll1", base + 0x94, 25);
+ clks[IMX8MN_SYS_PLL1_100M_CG] = imx_clk_gate("sys_pll1_100m_cg", "sys_pll1", base + 0x94, 23);
+ clks[IMX8MN_SYS_PLL1_133M_CG] = imx_clk_gate("sys_pll1_133m_cg", "sys_pll1", base + 0x94, 21);
+ clks[IMX8MN_SYS_PLL1_160M_CG] = imx_clk_gate("sys_pll1_160m_cg", "sys_pll1", base + 0x94, 19);
+ clks[IMX8MN_SYS_PLL1_200M_CG] = imx_clk_gate("sys_pll1_200m_cg", "sys_pll1", base + 0x94, 17);
+ clks[IMX8MN_SYS_PLL1_266M_CG] = imx_clk_gate("sys_pll1_266m_cg", "sys_pll1", base + 0x94, 15);
+ clks[IMX8MN_SYS_PLL1_400M_CG] = imx_clk_gate("sys_pll1_400m_cg", "sys_pll1", base + 0x94, 13);
+ clks[IMX8MN_SYS_PLL1_OUT] = imx_clk_gate("sys_pll1_out", "sys_pll1", base + 0x94, 11);
+
+ clks[IMX8MN_SYS_PLL1_40M] = imx_clk_fixed_factor("sys_pll1_40m", "sys_pll1_40m_cg", 1, 20);
+ clks[IMX8MN_SYS_PLL1_80M] = imx_clk_fixed_factor("sys_pll1_80m", "sys_pll1_80m_cg", 1, 10);
+ clks[IMX8MN_SYS_PLL1_100M] = imx_clk_fixed_factor("sys_pll1_100m", "sys_pll1_100m_cg", 1, 8);
+ clks[IMX8MN_SYS_PLL1_133M] = imx_clk_fixed_factor("sys_pll1_133m", "sys_pll1_133m_cg", 1, 6);
+ clks[IMX8MN_SYS_PLL1_160M] = imx_clk_fixed_factor("sys_pll1_160m", "sys_pll1_160m_cg", 1, 5);
+ clks[IMX8MN_SYS_PLL1_200M] = imx_clk_fixed_factor("sys_pll1_200m", "sys_pll1_200m_cg", 1, 4);
+ clks[IMX8MN_SYS_PLL1_266M] = imx_clk_fixed_factor("sys_pll1_266m", "sys_pll1_266m_cg", 1, 3);
+ clks[IMX8MN_SYS_PLL1_400M] = imx_clk_fixed_factor("sys_pll1_400m", "sys_pll1_400m_cg", 1, 2);
clks[IMX8MN_SYS_PLL1_800M] = imx_clk_fixed_factor("sys_pll1_800m", "sys_pll1_out", 1, 1);
- clks[IMX8MN_SYS_PLL2_50M] = imx_clk_fixed_factor("sys_pll2_50m", "sys_pll2_out", 1, 20);
- clks[IMX8MN_SYS_PLL2_100M] = imx_clk_fixed_factor("sys_pll2_100m", "sys_pll2_out", 1, 10);
- clks[IMX8MN_SYS_PLL2_125M] = imx_clk_fixed_factor("sys_pll2_125m", "sys_pll2_out", 1, 8);
- clks[IMX8MN_SYS_PLL2_166M] = imx_clk_fixed_factor("sys_pll2_166m", "sys_pll2_out", 1, 6);
- clks[IMX8MN_SYS_PLL2_200M] = imx_clk_fixed_factor("sys_pll2_200m", "sys_pll2_out", 1, 5);
- clks[IMX8MN_SYS_PLL2_250M] = imx_clk_fixed_factor("sys_pll2_250m", "sys_pll2_out", 1, 4);
- clks[IMX8MN_SYS_PLL2_333M] = imx_clk_fixed_factor("sys_pll2_333m", "sys_pll2_out", 1, 3);
- clks[IMX8MN_SYS_PLL2_500M] = imx_clk_fixed_factor("sys_pll2_500m", "sys_pll2_out", 1, 2);
+ /* SYS PLL2 fixed output */
+ clks[IMX8MN_SYS_PLL2_50M_CG] = imx_clk_gate("sys_pll2_50m_cg", "sys_pll2", base + 0x104, 27);
+ clks[IMX8MN_SYS_PLL2_100M_CG] = imx_clk_gate("sys_pll2_100m_cg", "sys_pll2", base + 0x104, 25);
+ clks[IMX8MN_SYS_PLL2_125M_CG] = imx_clk_gate("sys_pll2_125m_cg", "sys_pll2", base + 0x104, 23);
+ clks[IMX8MN_SYS_PLL2_166M_CG] = imx_clk_gate("sys_pll2_166m_cg", "sys_pll2", base + 0x104, 21);
+ clks[IMX8MN_SYS_PLL2_200M_CG] = imx_clk_gate("sys_pll2_200m_cg", "sys_pll2", base + 0x104, 19);
+ clks[IMX8MN_SYS_PLL2_250M_CG] = imx_clk_gate("sys_pll2_250m_cg", "sys_pll2", base + 0x104, 17);
+ clks[IMX8MN_SYS_PLL2_333M_CG] = imx_clk_gate("sys_pll2_333m_cg", "sys_pll2", base + 0x104, 15);
+ clks[IMX8MN_SYS_PLL2_500M_CG] = imx_clk_gate("sys_pll2_500m_cg", "sys_pll2", base + 0x104, 13);
+ clks[IMX8MN_SYS_PLL2_OUT] = imx_clk_gate("sys_pll2_out", "sys_pll2", base + 0x104, 11);
+
+ clks[IMX8MN_SYS_PLL2_50M] = imx_clk_fixed_factor("sys_pll2_50m", "sys_pll2_50m_cg", 1, 20);
+ clks[IMX8MN_SYS_PLL2_100M] = imx_clk_fixed_factor("sys_pll2_100m", "sys_pll2_100m_cg", 1, 10);
+ clks[IMX8MN_SYS_PLL2_125M] = imx_clk_fixed_factor("sys_pll2_125m", "sys_pll2_125m_cg", 1, 8);
+ clks[IMX8MN_SYS_PLL2_166M] = imx_clk_fixed_factor("sys_pll2_166m", "sys_pll2_166m_cg", 1, 6);
+ clks[IMX8MN_SYS_PLL2_200M] = imx_clk_fixed_factor("sys_pll2_200m", "sys_pll2_200m_cg", 1, 5);
+ clks[IMX8MN_SYS_PLL2_250M] = imx_clk_fixed_factor("sys_pll2_250m", "sys_pll2_250m_cg", 1, 4);
+ clks[IMX8MN_SYS_PLL2_333M] = imx_clk_fixed_factor("sys_pll2_333m", "sys_pll2_333m_cg", 1, 3);
+ clks[IMX8MN_SYS_PLL2_500M] = imx_clk_fixed_factor("sys_pll2_500m", "sys_pll2_500m_cg", 1, 2);
clks[IMX8MN_SYS_PLL2_1000M] = imx_clk_fixed_factor("sys_pll2_1000m", "sys_pll2_out", 1, 1);
np = dev->of_node;
diff --git a/drivers/clk/imx/clk-imx8mq.c b/drivers/clk/imx/clk-imx8mq.c
index 41fc9c63356e..5f10a606d836 100644
--- a/drivers/clk/imx/clk-imx8mq.c
+++ b/drivers/clk/imx/clk-imx8mq.c
@@ -34,10 +34,9 @@ static const char * const audio_pll1_bypass_sels[] = {"audio_pll1", "audio_pll1_
static const char * const audio_pll2_bypass_sels[] = {"audio_pll2", "audio_pll2_ref_sel", };
static const char * const video_pll1_bypass_sels[] = {"video_pll1", "video_pll1_ref_sel", };
-static const char * const sys1_pll_out_sels[] = {"sys1_pll1_ref_sel", };
-static const char * const sys2_pll_out_sels[] = {"sys1_pll1_ref_sel", "sys2_pll1_ref_sel", };
-static const char * const sys3_pll_out_sels[] = {"sys3_pll1_ref_sel", "sys2_pll1_ref_sel", };
+static const char * const sys3_pll_out_sels[] = {"sys3_pll1_ref_sel", };
static const char * const dram_pll_out_sels[] = {"dram_pll1_ref_sel", };
+static const char * const video2_pll_out_sels[] = {"video2_pll1_ref_sel", };
/* CCM ROOT */
static const char * const imx8mq_a53_sels[] = {"osc_25m", "arm_pll_out", "sys2_pll_500m", "sys2_pll_1000m",
@@ -307,10 +306,9 @@ static int imx8mq_clocks_probe(struct platform_device *pdev)
clks[IMX8MQ_AUDIO_PLL1_REF_SEL] = imx_clk_mux("audio_pll1_ref_sel", base + 0x0, 16, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
clks[IMX8MQ_AUDIO_PLL2_REF_SEL] = imx_clk_mux("audio_pll2_ref_sel", base + 0x8, 16, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
clks[IMX8MQ_VIDEO_PLL1_REF_SEL] = imx_clk_mux("video_pll1_ref_sel", base + 0x10, 16, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
- clks[IMX8MQ_SYS1_PLL1_REF_SEL] = imx_clk_mux("sys1_pll1_ref_sel", base + 0x30, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
- clks[IMX8MQ_SYS2_PLL1_REF_SEL] = imx_clk_mux("sys2_pll1_ref_sel", base + 0x3c, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
clks[IMX8MQ_SYS3_PLL1_REF_SEL] = imx_clk_mux("sys3_pll1_ref_sel", base + 0x48, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
clks[IMX8MQ_DRAM_PLL1_REF_SEL] = imx_clk_mux("dram_pll1_ref_sel", base + 0x60, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+ clks[IMX8MQ_VIDEO2_PLL1_REF_SEL] = imx_clk_mux("video2_pll1_ref_sel", base + 0x54, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
clks[IMX8MQ_ARM_PLL_REF_DIV] = imx_clk_divider("arm_pll_ref_div", "arm_pll_ref_sel", base + 0x28, 5, 6);
clks[IMX8MQ_GPU_PLL_REF_DIV] = imx_clk_divider("gpu_pll_ref_div", "gpu_pll_ref_sel", base + 0x18, 5, 6);
@@ -342,30 +340,53 @@ static int imx8mq_clocks_probe(struct platform_device *pdev)
clks[IMX8MQ_AUDIO_PLL2_OUT] = imx_clk_gate("audio_pll2_out", "audio_pll2_bypass", base + 0x8, 21);
clks[IMX8MQ_VIDEO_PLL1_OUT] = imx_clk_gate("video_pll1_out", "video_pll1_bypass", base + 0x10, 21);
- clks[IMX8MQ_SYS1_PLL_OUT] = imx_clk_sccg_pll("sys1_pll_out", sys1_pll_out_sels, ARRAY_SIZE(sys1_pll_out_sels), 0, 0, 0, base + 0x30, CLK_IS_CRITICAL);
- clks[IMX8MQ_SYS2_PLL_OUT] = imx_clk_sccg_pll("sys2_pll_out", sys2_pll_out_sels, ARRAY_SIZE(sys2_pll_out_sels), 0, 0, 1, base + 0x3c, CLK_IS_CRITICAL);
- clks[IMX8MQ_SYS3_PLL_OUT] = imx_clk_sccg_pll("sys3_pll_out", sys3_pll_out_sels, ARRAY_SIZE(sys3_pll_out_sels), 0, 0, 1, base + 0x48, CLK_IS_CRITICAL);
+ clks[IMX8MQ_SYS1_PLL_OUT] = imx_clk_fixed("sys1_pll_out", 800000000);
+ clks[IMX8MQ_SYS2_PLL_OUT] = imx_clk_fixed("sys2_pll_out", 1000000000);
+ clks[IMX8MQ_SYS3_PLL_OUT] = imx_clk_sccg_pll("sys3_pll_out", sys3_pll_out_sels, ARRAY_SIZE(sys3_pll_out_sels), 0, 0, 0, base + 0x48, CLK_IS_CRITICAL);
clks[IMX8MQ_DRAM_PLL_OUT] = imx_clk_sccg_pll("dram_pll_out", dram_pll_out_sels, ARRAY_SIZE(dram_pll_out_sels), 0, 0, 0, base + 0x60, CLK_IS_CRITICAL);
- /* SYS PLL fixed output */
- clks[IMX8MQ_SYS1_PLL_40M] = imx_clk_fixed_factor("sys1_pll_40m", "sys1_pll_out", 1, 20);
- clks[IMX8MQ_SYS1_PLL_80M] = imx_clk_fixed_factor("sys1_pll_80m", "sys1_pll_out", 1, 10);
- clks[IMX8MQ_SYS1_PLL_100M] = imx_clk_fixed_factor("sys1_pll_100m", "sys1_pll_out", 1, 8);
- clks[IMX8MQ_SYS1_PLL_133M] = imx_clk_fixed_factor("sys1_pll_133m", "sys1_pll_out", 1, 6);
- clks[IMX8MQ_SYS1_PLL_160M] = imx_clk_fixed_factor("sys1_pll_160m", "sys1_pll_out", 1, 5);
- clks[IMX8MQ_SYS1_PLL_200M] = imx_clk_fixed_factor("sys1_pll_200m", "sys1_pll_out", 1, 4);
- clks[IMX8MQ_SYS1_PLL_266M] = imx_clk_fixed_factor("sys1_pll_266m", "sys1_pll_out", 1, 3);
- clks[IMX8MQ_SYS1_PLL_400M] = imx_clk_fixed_factor("sys1_pll_400m", "sys1_pll_out", 1, 2);
- clks[IMX8MQ_SYS1_PLL_800M] = imx_clk_fixed_factor("sys1_pll_800m", "sys1_pll_out", 1, 1);
-
- clks[IMX8MQ_SYS2_PLL_50M] = imx_clk_fixed_factor("sys2_pll_50m", "sys2_pll_out", 1, 20);
- clks[IMX8MQ_SYS2_PLL_100M] = imx_clk_fixed_factor("sys2_pll_100m", "sys2_pll_out", 1, 10);
- clks[IMX8MQ_SYS2_PLL_125M] = imx_clk_fixed_factor("sys2_pll_125m", "sys2_pll_out", 1, 8);
- clks[IMX8MQ_SYS2_PLL_166M] = imx_clk_fixed_factor("sys2_pll_166m", "sys2_pll_out", 1, 6);
- clks[IMX8MQ_SYS2_PLL_200M] = imx_clk_fixed_factor("sys2_pll_200m", "sys2_pll_out", 1, 5);
- clks[IMX8MQ_SYS2_PLL_250M] = imx_clk_fixed_factor("sys2_pll_250m", "sys2_pll_out", 1, 4);
- clks[IMX8MQ_SYS2_PLL_333M] = imx_clk_fixed_factor("sys2_pll_333m", "sys2_pll_out", 1, 3);
- clks[IMX8MQ_SYS2_PLL_500M] = imx_clk_fixed_factor("sys2_pll_500m", "sys2_pll_out", 1, 2);
- clks[IMX8MQ_SYS2_PLL_1000M] = imx_clk_fixed_factor("sys2_pll_1000m", "sys2_pll_out", 1, 1);
+ clks[IMX8MQ_VIDEO2_PLL_OUT] = imx_clk_sccg_pll("video2_pll_out", video2_pll_out_sels, ARRAY_SIZE(video2_pll_out_sels), 0, 0, 0, base + 0x54, 0);
+
+ /* SYS PLL1 fixed output */
+ clks[IMX8MQ_SYS1_PLL_40M_CG] = imx_clk_gate("sys1_pll_40m_cg", "sys1_pll_out", base + 0x30, 9);
+ clks[IMX8MQ_SYS1_PLL_80M_CG] = imx_clk_gate("sys1_pll_80m_cg", "sys1_pll_out", base + 0x30, 11);
+ clks[IMX8MQ_SYS1_PLL_100M_CG] = imx_clk_gate("sys1_pll_100m_cg", "sys1_pll_out", base + 0x30, 13);
+ clks[IMX8MQ_SYS1_PLL_133M_CG] = imx_clk_gate("sys1_pll_133m_cg", "sys1_pll_out", base + 0x30, 15);
+ clks[IMX8MQ_SYS1_PLL_160M_CG] = imx_clk_gate("sys1_pll_160m_cg", "sys1_pll_out", base + 0x30, 17);
+ clks[IMX8MQ_SYS1_PLL_200M_CG] = imx_clk_gate("sys1_pll_200m_cg", "sys1_pll_out", base + 0x30, 19);
+ clks[IMX8MQ_SYS1_PLL_266M_CG] = imx_clk_gate("sys1_pll_266m_cg", "sys1_pll_out", base + 0x30, 21);
+ clks[IMX8MQ_SYS1_PLL_400M_CG] = imx_clk_gate("sys1_pll_400m_cg", "sys1_pll_out", base + 0x30, 23);
+ clks[IMX8MQ_SYS1_PLL_800M_CG] = imx_clk_gate("sys1_pll_800m_cg", "sys1_pll_out", base + 0x30, 25);
+
+ clks[IMX8MQ_SYS1_PLL_40M] = imx_clk_fixed_factor("sys1_pll_40m", "sys1_pll_40m_cg", 1, 20);
+ clks[IMX8MQ_SYS1_PLL_80M] = imx_clk_fixed_factor("sys1_pll_80m", "sys1_pll_80m_cg", 1, 10);
+ clks[IMX8MQ_SYS1_PLL_100M] = imx_clk_fixed_factor("sys1_pll_100m", "sys1_pll_100m_cg", 1, 8);
+ clks[IMX8MQ_SYS1_PLL_133M] = imx_clk_fixed_factor("sys1_pll_133m", "sys1_pll_133m_cg", 1, 6);
+ clks[IMX8MQ_SYS1_PLL_160M] = imx_clk_fixed_factor("sys1_pll_160m", "sys1_pll_160m_cg", 1, 5);
+ clks[IMX8MQ_SYS1_PLL_200M] = imx_clk_fixed_factor("sys1_pll_200m", "sys1_pll_200m_cg", 1, 4);
+ clks[IMX8MQ_SYS1_PLL_266M] = imx_clk_fixed_factor("sys1_pll_266m", "sys1_pll_266m_cg", 1, 3);
+ clks[IMX8MQ_SYS1_PLL_400M] = imx_clk_fixed_factor("sys1_pll_400m", "sys1_pll_400m_cg", 1, 2);
+ clks[IMX8MQ_SYS1_PLL_800M] = imx_clk_fixed_factor("sys1_pll_800m", "sys1_pll_800m_cg", 1, 1);
+
+ /* SYS PLL2 fixed output */
+ clks[IMX8MQ_SYS2_PLL_50M_CG] = imx_clk_gate("sys2_pll_50m_cg", "sys2_pll_out", base + 0x3c, 9);
+ clks[IMX8MQ_SYS2_PLL_100M_CG] = imx_clk_gate("sys2_pll_100m_cg", "sys2_pll_out", base + 0x3c, 11);
+ clks[IMX8MQ_SYS2_PLL_125M_CG] = imx_clk_gate("sys2_pll_125m_cg", "sys2_pll_out", base + 0x3c, 13);
+ clks[IMX8MQ_SYS2_PLL_166M_CG] = imx_clk_gate("sys2_pll_166m_cg", "sys2_pll_out", base + 0x3c, 15);
+ clks[IMX8MQ_SYS2_PLL_200M_CG] = imx_clk_gate("sys2_pll_200m_cg", "sys2_pll_out", base + 0x3c, 17);
+ clks[IMX8MQ_SYS2_PLL_250M_CG] = imx_clk_gate("sys2_pll_250m_cg", "sys2_pll_out", base + 0x3c, 19);
+ clks[IMX8MQ_SYS2_PLL_333M_CG] = imx_clk_gate("sys2_pll_333m_cg", "sys2_pll_out", base + 0x3c, 21);
+ clks[IMX8MQ_SYS2_PLL_500M_CG] = imx_clk_gate("sys2_pll_500m_cg", "sys2_pll_out", base + 0x3c, 23);
+ clks[IMX8MQ_SYS2_PLL_1000M_CG] = imx_clk_gate("sys2_pll_1000m_cg", "sys2_pll_out", base + 0x3c, 25);
+
+ clks[IMX8MQ_SYS2_PLL_50M] = imx_clk_fixed_factor("sys2_pll_50m", "sys2_pll_50m_cg", 1, 20);
+ clks[IMX8MQ_SYS2_PLL_100M] = imx_clk_fixed_factor("sys2_pll_100m", "sys2_pll_100m_cg", 1, 10);
+ clks[IMX8MQ_SYS2_PLL_125M] = imx_clk_fixed_factor("sys2_pll_125m", "sys2_pll_125m_cg", 1, 8);
+ clks[IMX8MQ_SYS2_PLL_166M] = imx_clk_fixed_factor("sys2_pll_166m", "sys2_pll_166m_cg", 1, 6);
+ clks[IMX8MQ_SYS2_PLL_200M] = imx_clk_fixed_factor("sys2_pll_200m", "sys2_pll_200m_cg", 1, 5);
+ clks[IMX8MQ_SYS2_PLL_250M] = imx_clk_fixed_factor("sys2_pll_250m", "sys2_pll_250m_cg", 1, 4);
+ clks[IMX8MQ_SYS2_PLL_333M] = imx_clk_fixed_factor("sys2_pll_333m", "sys2_pll_333m_cg", 1, 3);
+ clks[IMX8MQ_SYS2_PLL_500M] = imx_clk_fixed_factor("sys2_pll_500m", "sys2_pll_500m_cg", 1, 2);
+ clks[IMX8MQ_SYS2_PLL_1000M] = imx_clk_fixed_factor("sys2_pll_1000m", "sys2_pll_1000m_cg", 1, 1);
np = dev->of_node;
base = devm_platform_ioremap_resource(pdev, 0);
diff --git a/drivers/clk/imx/clk-pll14xx.c b/drivers/clk/imx/clk-pll14xx.c
index 7a815ec76aa5..5c458199060a 100644
--- a/drivers/clk/imx/clk-pll14xx.c
+++ b/drivers/clk/imx/clk-pll14xx.c
@@ -41,6 +41,38 @@ struct clk_pll14xx {
#define to_clk_pll14xx(_hw) container_of(_hw, struct clk_pll14xx, hw)
+static const struct imx_pll14xx_rate_table imx_pll1416x_tbl[] = {
+ PLL_1416X_RATE(1800000000U, 225, 3, 0),
+ PLL_1416X_RATE(1600000000U, 200, 3, 0),
+ PLL_1416X_RATE(1500000000U, 375, 3, 1),
+ PLL_1416X_RATE(1400000000U, 350, 3, 1),
+ PLL_1416X_RATE(1200000000U, 300, 3, 1),
+ PLL_1416X_RATE(1000000000U, 250, 3, 1),
+ PLL_1416X_RATE(800000000U, 200, 3, 1),
+ PLL_1416X_RATE(750000000U, 250, 2, 2),
+ PLL_1416X_RATE(700000000U, 350, 3, 2),
+ PLL_1416X_RATE(600000000U, 300, 3, 2),
+};
+
+static const struct imx_pll14xx_rate_table imx_pll1443x_tbl[] = {
+ PLL_1443X_RATE(650000000U, 325, 3, 2, 0),
+ PLL_1443X_RATE(594000000U, 198, 2, 2, 0),
+ PLL_1443X_RATE(393216000U, 262, 2, 3, 9437),
+ PLL_1443X_RATE(361267200U, 361, 3, 3, 17511),
+};
+
+struct imx_pll14xx_clk imx_1443x_pll = {
+ .type = PLL_1443X,
+ .rate_table = imx_pll1443x_tbl,
+ .rate_count = ARRAY_SIZE(imx_pll1443x_tbl),
+};
+
+struct imx_pll14xx_clk imx_1416x_pll = {
+ .type = PLL_1416X,
+ .rate_table = imx_pll1416x_tbl,
+ .rate_count = ARRAY_SIZE(imx_pll1416x_tbl),
+};
+
static const struct imx_pll14xx_rate_table *imx_get_pll_settings(
struct clk_pll14xx *pll, unsigned long rate)
{
@@ -112,43 +144,17 @@ static unsigned long clk_pll1443x_recalc_rate(struct clk_hw *hw,
return fvco;
}
-static inline bool clk_pll1416x_mp_change(const struct imx_pll14xx_rate_table *rate,
+static inline bool clk_pll14xx_mp_change(const struct imx_pll14xx_rate_table *rate,
u32 pll_div)
{
u32 old_mdiv, old_pdiv;
- old_mdiv = (pll_div >> MDIV_SHIFT) & MDIV_MASK;
- old_pdiv = (pll_div >> PDIV_SHIFT) & PDIV_MASK;
+ old_mdiv = (pll_div & MDIV_MASK) >> MDIV_SHIFT;
+ old_pdiv = (pll_div & PDIV_MASK) >> PDIV_SHIFT;
return rate->mdiv != old_mdiv || rate->pdiv != old_pdiv;
}
-static inline bool clk_pll1443x_mpk_change(const struct imx_pll14xx_rate_table *rate,
- u32 pll_div_ctl0, u32 pll_div_ctl1)
-{
- u32 old_mdiv, old_pdiv, old_kdiv;
-
- old_mdiv = (pll_div_ctl0 >> MDIV_SHIFT) & MDIV_MASK;
- old_pdiv = (pll_div_ctl0 >> PDIV_SHIFT) & PDIV_MASK;
- old_kdiv = (pll_div_ctl1 >> KDIV_SHIFT) & KDIV_MASK;
-
- return rate->mdiv != old_mdiv || rate->pdiv != old_pdiv ||
- rate->kdiv != old_kdiv;
-}
-
-static inline bool clk_pll1443x_mp_change(const struct imx_pll14xx_rate_table *rate,
- u32 pll_div_ctl0, u32 pll_div_ctl1)
-{
- u32 old_mdiv, old_pdiv, old_kdiv;
-
- old_mdiv = (pll_div_ctl0 >> MDIV_SHIFT) & MDIV_MASK;
- old_pdiv = (pll_div_ctl0 >> PDIV_SHIFT) & PDIV_MASK;
- old_kdiv = (pll_div_ctl1 >> KDIV_SHIFT) & KDIV_MASK;
-
- return rate->mdiv != old_mdiv || rate->pdiv != old_pdiv ||
- rate->kdiv != old_kdiv;
-}
-
static int clk_pll14xx_wait_lock(struct clk_pll14xx *pll)
{
u32 val;
@@ -174,7 +180,7 @@ static int clk_pll1416x_set_rate(struct clk_hw *hw, unsigned long drate,
tmp = readl_relaxed(pll->base + 4);
- if (!clk_pll1416x_mp_change(rate, tmp)) {
+ if (!clk_pll14xx_mp_change(rate, tmp)) {
tmp &= ~(SDIV_MASK) << SDIV_SHIFT;
tmp |= rate->sdiv << SDIV_SHIFT;
writel_relaxed(tmp, pll->base + 4);
@@ -239,13 +245,15 @@ static int clk_pll1443x_set_rate(struct clk_hw *hw, unsigned long drate,
}
tmp = readl_relaxed(pll->base + 4);
- div_val = readl_relaxed(pll->base + 8);
- if (!clk_pll1443x_mpk_change(rate, tmp, div_val)) {
+ if (!clk_pll14xx_mp_change(rate, tmp)) {
tmp &= ~(SDIV_MASK) << SDIV_SHIFT;
tmp |= rate->sdiv << SDIV_SHIFT;
writel_relaxed(tmp, pll->base + 4);
+ tmp = rate->kdiv << KDIV_SHIFT;
+ writel_relaxed(tmp, pll->base + 8);
+
return 0;
}
diff --git a/drivers/clk/imx/clk.h b/drivers/clk/imx/clk.h
index f7a389a50401..bc5bb6ac8636 100644
--- a/drivers/clk/imx/clk.h
+++ b/drivers/clk/imx/clk.h
@@ -50,6 +50,9 @@ struct imx_pll14xx_clk {
int flags;
};
+extern struct imx_pll14xx_clk imx_1416x_pll;
+extern struct imx_pll14xx_clk imx_1443x_pll;
+
#define imx_clk_cpu(name, parent_name, div, mux, pll, step) \
imx_clk_hw_cpu(name, parent_name, div, mux, pll, step)->clk
diff --git a/drivers/clk/ingenic/Kconfig b/drivers/clk/ingenic/Kconfig
index 1cb489959a99..b4555b465ea6 100644
--- a/drivers/clk/ingenic/Kconfig
+++ b/drivers/clk/ingenic/Kconfig
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
menu "Ingenic SoCs drivers"
- depends on MIPS
+ depends on MIPS || COMPILE_TEST
config INGENIC_CGU_COMMON
bool
@@ -45,6 +45,16 @@ config INGENIC_CGU_JZ4780
If building for a JZ4780 SoC, you want to say Y here.
+config INGENIC_CGU_X1000
+ bool "Ingenic X1000 CGU driver"
+ default MACH_X1000
+ select INGENIC_CGU_COMMON
+ help
+ Support the clocks provided by the CGU hardware on Ingenic X1000
+ and compatible SoCs.
+
+ If building for a X1000 SoC, you want to say Y here.
+
config INGENIC_TCU_CLK
bool "Ingenic JZ47xx TCU clocks driver"
default MACH_INGENIC
diff --git a/drivers/clk/ingenic/Makefile b/drivers/clk/ingenic/Makefile
index 097220b05131..8b1dad9b74a7 100644
--- a/drivers/clk/ingenic/Makefile
+++ b/drivers/clk/ingenic/Makefile
@@ -4,4 +4,5 @@ obj-$(CONFIG_INGENIC_CGU_JZ4740) += jz4740-cgu.o
obj-$(CONFIG_INGENIC_CGU_JZ4725B) += jz4725b-cgu.o
obj-$(CONFIG_INGENIC_CGU_JZ4770) += jz4770-cgu.o
obj-$(CONFIG_INGENIC_CGU_JZ4780) += jz4780-cgu.o
+obj-$(CONFIG_INGENIC_CGU_X1000) += x1000-cgu.o
obj-$(CONFIG_INGENIC_TCU_CLK) += tcu.o
diff --git a/drivers/clk/ingenic/tcu.c b/drivers/clk/ingenic/tcu.c
index a1a5f9cb439e..ad7daa494fd4 100644
--- a/drivers/clk/ingenic/tcu.c
+++ b/drivers/clk/ingenic/tcu.c
@@ -358,8 +358,7 @@ static int __init ingenic_tcu_probe(struct device_node *np)
}
}
- tcu->clocks = kzalloc(sizeof(*tcu->clocks) +
- sizeof(*tcu->clocks->hws) * TCU_CLK_COUNT,
+ tcu->clocks = kzalloc(struct_size(tcu->clocks, hws, TCU_CLK_COUNT),
GFP_KERNEL);
if (!tcu->clocks) {
ret = -ENOMEM;
diff --git a/drivers/clk/ingenic/x1000-cgu.c b/drivers/clk/ingenic/x1000-cgu.c
new file mode 100644
index 000000000000..b22d87b3f555
--- /dev/null
+++ b/drivers/clk/ingenic/x1000-cgu.c
@@ -0,0 +1,274 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * X1000 SoC CGU driver
+ * Copyright (c) 2019 Zhou Yanjie <zhouyanjie@zoho.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/of.h>
+#include <dt-bindings/clock/x1000-cgu.h>
+#include "cgu.h"
+#include "pm.h"
+
+/* CGU register offsets */
+#define CGU_REG_CPCCR 0x00
+#define CGU_REG_APLL 0x10
+#define CGU_REG_MPLL 0x14
+#define CGU_REG_CLKGR 0x20
+#define CGU_REG_OPCR 0x24
+#define CGU_REG_DDRCDR 0x2c
+#define CGU_REG_MACCDR 0x54
+#define CGU_REG_I2SCDR 0x60
+#define CGU_REG_LPCDR 0x64
+#define CGU_REG_MSC0CDR 0x68
+#define CGU_REG_I2SCDR1 0x70
+#define CGU_REG_SSICDR 0x74
+#define CGU_REG_CIMCDR 0x7c
+#define CGU_REG_PCMCDR 0x84
+#define CGU_REG_MSC1CDR 0xa4
+#define CGU_REG_CMP_INTR 0xb0
+#define CGU_REG_CMP_INTRE 0xb4
+#define CGU_REG_DRCG 0xd0
+#define CGU_REG_CPCSR 0xd4
+#define CGU_REG_PCMCDR1 0xe0
+#define CGU_REG_MACPHYC 0xe8
+
+/* bits within the OPCR register */
+#define OPCR_SPENDN0 BIT(7)
+#define OPCR_SPENDN1 BIT(6)
+
+static struct ingenic_cgu *cgu;
+
+static const s8 pll_od_encoding[8] = {
+ 0x0, 0x1, -1, 0x2, -1, -1, -1, 0x3,
+};
+
+static const struct ingenic_cgu_clk_info x1000_cgu_clocks[] = {
+
+ /* External clocks */
+
+ [X1000_CLK_EXCLK] = { "ext", CGU_CLK_EXT },
+ [X1000_CLK_RTCLK] = { "rtc", CGU_CLK_EXT },
+
+ /* PLLs */
+
+ [X1000_CLK_APLL] = {
+ "apll", CGU_CLK_PLL,
+ .parents = { X1000_CLK_EXCLK, -1, -1, -1 },
+ .pll = {
+ .reg = CGU_REG_APLL,
+ .m_shift = 24,
+ .m_bits = 7,
+ .m_offset = 1,
+ .n_shift = 18,
+ .n_bits = 5,
+ .n_offset = 1,
+ .od_shift = 16,
+ .od_bits = 2,
+ .od_max = 8,
+ .od_encoding = pll_od_encoding,
+ .bypass_bit = 9,
+ .enable_bit = 8,
+ .stable_bit = 10,
+ },
+ },
+
+ [X1000_CLK_MPLL] = {
+ "mpll", CGU_CLK_PLL,
+ .parents = { X1000_CLK_EXCLK, -1, -1, -1 },
+ .pll = {
+ .reg = CGU_REG_MPLL,
+ .m_shift = 24,
+ .m_bits = 7,
+ .m_offset = 1,
+ .n_shift = 18,
+ .n_bits = 5,
+ .n_offset = 1,
+ .od_shift = 16,
+ .od_bits = 2,
+ .od_max = 8,
+ .od_encoding = pll_od_encoding,
+ .bypass_bit = 6,
+ .enable_bit = 7,
+ .stable_bit = 0,
+ },
+ },
+
+ /* Muxes & dividers */
+
+ [X1000_CLK_SCLKA] = {
+ "sclk_a", CGU_CLK_MUX,
+ .parents = { -1, X1000_CLK_EXCLK, X1000_CLK_APLL, -1 },
+ .mux = { CGU_REG_CPCCR, 30, 2 },
+ },
+
+ [X1000_CLK_CPUMUX] = {
+ "cpu_mux", CGU_CLK_MUX,
+ .parents = { -1, X1000_CLK_SCLKA, X1000_CLK_MPLL, -1 },
+ .mux = { CGU_REG_CPCCR, 28, 2 },
+ },
+
+ [X1000_CLK_CPU] = {
+ "cpu", CGU_CLK_DIV,
+ .parents = { X1000_CLK_CPUMUX, -1, -1, -1 },
+ .div = { CGU_REG_CPCCR, 0, 1, 4, 22, -1, -1 },
+ },
+
+ [X1000_CLK_L2CACHE] = {
+ "l2cache", CGU_CLK_DIV,
+ .parents = { X1000_CLK_CPUMUX, -1, -1, -1 },
+ .div = { CGU_REG_CPCCR, 4, 1, 4, 22, -1, -1 },
+ },
+
+ [X1000_CLK_AHB0] = {
+ "ahb0", CGU_CLK_MUX | CGU_CLK_DIV,
+ .parents = { -1, X1000_CLK_SCLKA, X1000_CLK_MPLL, -1 },
+ .mux = { CGU_REG_CPCCR, 26, 2 },
+ .div = { CGU_REG_CPCCR, 8, 1, 4, 21, -1, -1 },
+ },
+
+ [X1000_CLK_AHB2PMUX] = {
+ "ahb2_apb_mux", CGU_CLK_MUX,
+ .parents = { -1, X1000_CLK_SCLKA, X1000_CLK_MPLL, -1 },
+ .mux = { CGU_REG_CPCCR, 24, 2 },
+ },
+
+ [X1000_CLK_AHB2] = {
+ "ahb2", CGU_CLK_DIV,
+ .parents = { X1000_CLK_AHB2PMUX, -1, -1, -1 },
+ .div = { CGU_REG_CPCCR, 12, 1, 4, 20, -1, -1 },
+ },
+
+ [X1000_CLK_PCLK] = {
+ "pclk", CGU_CLK_DIV,
+ .parents = { X1000_CLK_AHB2PMUX, -1, -1, -1 },
+ .div = { CGU_REG_CPCCR, 16, 1, 4, 20, -1, -1 },
+ },
+
+ [X1000_CLK_DDR] = {
+ "ddr", CGU_CLK_MUX | CGU_CLK_DIV | CGU_CLK_GATE,
+ .parents = { -1, X1000_CLK_SCLKA, X1000_CLK_MPLL, -1 },
+ .mux = { CGU_REG_DDRCDR, 30, 2 },
+ .div = { CGU_REG_DDRCDR, 0, 1, 4, 29, 28, 27 },
+ .gate = { CGU_REG_CLKGR, 31 },
+ },
+
+ [X1000_CLK_MAC] = {
+ "mac", CGU_CLK_MUX | CGU_CLK_DIV | CGU_CLK_GATE,
+ .parents = { X1000_CLK_SCLKA, X1000_CLK_MPLL},
+ .mux = { CGU_REG_MACCDR, 31, 1 },
+ .div = { CGU_REG_MACCDR, 0, 1, 8, 29, 28, 27 },
+ .gate = { CGU_REG_CLKGR, 25 },
+ },
+
+ [X1000_CLK_MSCMUX] = {
+ "msc_mux", CGU_CLK_MUX,
+ .parents = { X1000_CLK_SCLKA, X1000_CLK_MPLL},
+ .mux = { CGU_REG_MSC0CDR, 31, 1 },
+ },
+
+ [X1000_CLK_MSC0] = {
+ "msc0", CGU_CLK_DIV | CGU_CLK_GATE,
+ .parents = { X1000_CLK_MSCMUX, -1, -1, -1 },
+ .div = { CGU_REG_MSC0CDR, 0, 2, 8, 29, 28, 27 },
+ .gate = { CGU_REG_CLKGR, 4 },
+ },
+
+ [X1000_CLK_MSC1] = {
+ "msc1", CGU_CLK_DIV | CGU_CLK_GATE,
+ .parents = { X1000_CLK_MSCMUX, -1, -1, -1 },
+ .div = { CGU_REG_MSC1CDR, 0, 2, 8, 29, 28, 27 },
+ .gate = { CGU_REG_CLKGR, 5 },
+ },
+
+ [X1000_CLK_SSIPLL] = {
+ "ssi_pll", CGU_CLK_MUX | CGU_CLK_DIV,
+ .parents = { X1000_CLK_SCLKA, X1000_CLK_MPLL, -1, -1 },
+ .mux = { CGU_REG_SSICDR, 31, 1 },
+ .div = { CGU_REG_SSICDR, 0, 1, 8, 29, 28, 27 },
+ },
+
+ [X1000_CLK_SSIMUX] = {
+ "ssi_mux", CGU_CLK_MUX,
+ .parents = { X1000_CLK_EXCLK, X1000_CLK_SSIPLL, -1, -1 },
+ .mux = { CGU_REG_SSICDR, 30, 1 },
+ },
+
+ /* Gate-only clocks */
+
+ [X1000_CLK_SFC] = {
+ "sfc", CGU_CLK_GATE,
+ .parents = { X1000_CLK_SSIPLL, -1, -1, -1 },
+ .gate = { CGU_REG_CLKGR, 2 },
+ },
+
+ [X1000_CLK_I2C0] = {
+ "i2c0", CGU_CLK_GATE,
+ .parents = { X1000_CLK_PCLK, -1, -1, -1 },
+ .gate = { CGU_REG_CLKGR, 7 },
+ },
+
+ [X1000_CLK_I2C1] = {
+ "i2c1", CGU_CLK_GATE,
+ .parents = { X1000_CLK_PCLK, -1, -1, -1 },
+ .gate = { CGU_REG_CLKGR, 8 },
+ },
+
+ [X1000_CLK_I2C2] = {
+ "i2c2", CGU_CLK_GATE,
+ .parents = { X1000_CLK_PCLK, -1, -1, -1 },
+ .gate = { CGU_REG_CLKGR, 9 },
+ },
+
+ [X1000_CLK_UART0] = {
+ "uart0", CGU_CLK_GATE,
+ .parents = { X1000_CLK_EXCLK, -1, -1, -1 },
+ .gate = { CGU_REG_CLKGR, 14 },
+ },
+
+ [X1000_CLK_UART1] = {
+ "uart1", CGU_CLK_GATE,
+ .parents = { X1000_CLK_EXCLK, -1, -1, -1 },
+ .gate = { CGU_REG_CLKGR, 15 },
+ },
+
+ [X1000_CLK_UART2] = {
+ "uart2", CGU_CLK_GATE,
+ .parents = { X1000_CLK_EXCLK, -1, -1, -1 },
+ .gate = { CGU_REG_CLKGR, 16 },
+ },
+
+ [X1000_CLK_SSI] = {
+ "ssi", CGU_CLK_GATE,
+ .parents = { X1000_CLK_SSIMUX, -1, -1, -1 },
+ .gate = { CGU_REG_CLKGR, 19 },
+ },
+
+ [X1000_CLK_PDMA] = {
+ "pdma", CGU_CLK_GATE,
+ .parents = { X1000_CLK_EXCLK, -1, -1, -1 },
+ .gate = { CGU_REG_CLKGR, 21 },
+ },
+};
+
+static void __init x1000_cgu_init(struct device_node *np)
+{
+ int retval;
+
+ cgu = ingenic_cgu_new(x1000_cgu_clocks,
+ ARRAY_SIZE(x1000_cgu_clocks), np);
+ if (!cgu) {
+ pr_err("%s: failed to initialise CGU\n", __func__);
+ return;
+ }
+
+ retval = ingenic_cgu_register_clocks(cgu);
+ if (retval) {
+ pr_err("%s: failed to register CGU Clocks\n", __func__);
+ return;
+ }
+
+ ingenic_cgu_register_syscore_ops(cgu);
+}
+CLK_OF_DECLARE(x1000_cgu, "ingenic,x1000-cgu", x1000_cgu_init);
diff --git a/drivers/clk/mediatek/clk-mt2712.c b/drivers/clk/mediatek/clk-mt2712.c
index 354c26f663b4..a3bd9a107209 100644
--- a/drivers/clk/mediatek/clk-mt2712.c
+++ b/drivers/clk/mediatek/clk-mt2712.c
@@ -1306,9 +1306,8 @@ static int clk_mt2712_top_probe(struct platform_device *pdev)
int r, i;
struct device_node *node = pdev->dev.of_node;
void __iomem *base;
- struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base)) {
pr_err("%s(): ioremap failed\n", __func__);
return PTR_ERR(base);
@@ -1394,9 +1393,8 @@ static int clk_mt2712_mcu_probe(struct platform_device *pdev)
int r;
struct device_node *node = pdev->dev.of_node;
void __iomem *base;
- struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base)) {
pr_err("%s(): ioremap failed\n", __func__);
return PTR_ERR(base);
diff --git a/drivers/clk/mediatek/clk-mt6779.c b/drivers/clk/mediatek/clk-mt6779.c
index 608a9a6621a3..9766cccf5844 100644
--- a/drivers/clk/mediatek/clk-mt6779.c
+++ b/drivers/clk/mediatek/clk-mt6779.c
@@ -1225,12 +1225,11 @@ static int clk_mt6779_apmixed_probe(struct platform_device *pdev)
static int clk_mt6779_top_probe(struct platform_device *pdev)
{
- struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
void __iomem *base;
struct clk_onecell_data *clk_data;
struct device_node *node = pdev->dev.of_node;
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/drivers/clk/mediatek/clk-mt6797.c b/drivers/clk/mediatek/clk-mt6797.c
index f62b0428da0e..f35389a11af1 100644
--- a/drivers/clk/mediatek/clk-mt6797.c
+++ b/drivers/clk/mediatek/clk-mt6797.c
@@ -385,9 +385,8 @@ static int mtk_topckgen_init(struct platform_device *pdev)
struct clk_onecell_data *clk_data;
void __iomem *base;
struct device_node *node = pdev->dev.of_node;
- struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/drivers/clk/mediatek/clk-mt7622.c b/drivers/clk/mediatek/clk-mt7622.c
index 8190dab179d7..ef5947e15c75 100644
--- a/drivers/clk/mediatek/clk-mt7622.c
+++ b/drivers/clk/mediatek/clk-mt7622.c
@@ -614,9 +614,8 @@ static int mtk_topckgen_init(struct platform_device *pdev)
struct clk_onecell_data *clk_data;
void __iomem *base;
struct device_node *node = pdev->dev.of_node;
- struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
@@ -695,9 +694,8 @@ static int mtk_pericfg_init(struct platform_device *pdev)
void __iomem *base;
int r;
struct device_node *node = pdev->dev.of_node;
- struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/drivers/clk/mediatek/clk-mt7629.c b/drivers/clk/mediatek/clk-mt7629.c
index d6233994af5a..b73bdf152836 100644
--- a/drivers/clk/mediatek/clk-mt7629.c
+++ b/drivers/clk/mediatek/clk-mt7629.c
@@ -574,9 +574,8 @@ static int mtk_topckgen_init(struct platform_device *pdev)
struct clk_onecell_data *clk_data;
void __iomem *base;
struct device_node *node = pdev->dev.of_node;
- struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
@@ -626,9 +625,8 @@ static int mtk_pericfg_init(struct platform_device *pdev)
void __iomem *base;
int r;
struct device_node *node = pdev->dev.of_node;
- struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/drivers/clk/mediatek/clk-mt8183.c b/drivers/clk/mediatek/clk-mt8183.c
index 51c8d5c9a030..5046852eb0fd 100644
--- a/drivers/clk/mediatek/clk-mt8183.c
+++ b/drivers/clk/mediatek/clk-mt8183.c
@@ -1189,11 +1189,10 @@ CLK_OF_DECLARE_DRIVER(mt8183_topckgen, "mediatek,mt8183-topckgen",
static int clk_mt8183_top_probe(struct platform_device *pdev)
{
- struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
void __iomem *base;
struct device_node *node = pdev->dev.of_node;
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
@@ -1262,9 +1261,8 @@ static int clk_mt8183_mcu_probe(struct platform_device *pdev)
struct clk_onecell_data *clk_data;
struct device_node *node = pdev->dev.of_node;
void __iomem *base;
- struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/drivers/clk/meson/axg-audio.c b/drivers/clk/meson/axg-audio.c
index 18b23cdf679c..53715e36326c 100644
--- a/drivers/clk/meson/axg-audio.c
+++ b/drivers/clk/meson/axg-audio.c
@@ -20,12 +20,7 @@
#include "clk-phase.h"
#include "sclk-div.h"
-#define AUD_MST_IN_COUNT 8
-#define AUD_SLV_SCLK_COUNT 10
-#define AUD_SLV_LRCLK_COUNT 10
-
-#define AUD_GATE(_name, _reg, _bit, _phws, _iflags) \
-struct clk_regmap aud_##_name = { \
+#define AUD_GATE(_name, _reg, _bit, _pname, _iflags) { \
.data = &(struct clk_regmap_gate_data){ \
.offset = (_reg), \
.bit_idx = (_bit), \
@@ -33,14 +28,13 @@ struct clk_regmap aud_##_name = { \
.hw.init = &(struct clk_init_data) { \
.name = "aud_"#_name, \
.ops = &clk_regmap_gate_ops, \
- .parent_hws = (const struct clk_hw *[]) { &_phws.hw }, \
+ .parent_names = (const char *[]){ #_pname }, \
.num_parents = 1, \
.flags = CLK_DUTY_CYCLE_PARENT | (_iflags), \
}, \
}
-#define AUD_MUX(_name, _reg, _mask, _shift, _dflags, _pdata, _iflags) \
-struct clk_regmap aud_##_name = { \
+#define AUD_MUX(_name, _reg, _mask, _shift, _dflags, _pdata, _iflags) { \
.data = &(struct clk_regmap_mux_data){ \
.offset = (_reg), \
.mask = (_mask), \
@@ -56,8 +50,7 @@ struct clk_regmap aud_##_name = { \
}, \
}
-#define AUD_DIV(_name, _reg, _shift, _width, _dflags, _phws, _iflags) \
-struct clk_regmap aud_##_name = { \
+#define AUD_DIV(_name, _reg, _shift, _width, _dflags, _pname, _iflags) { \
.data = &(struct clk_regmap_div_data){ \
.offset = (_reg), \
.shift = (_shift), \
@@ -67,137 +60,27 @@ struct clk_regmap aud_##_name = { \
.hw.init = &(struct clk_init_data){ \
.name = "aud_"#_name, \
.ops = &clk_regmap_divider_ops, \
- .parent_hws = (const struct clk_hw *[]) { &_phws.hw }, \
+ .parent_names = (const char *[]){ #_pname }, \
.num_parents = 1, \
.flags = (_iflags), \
}, \
}
-#define AUD_PCLK_GATE(_name, _bit) \
-struct clk_regmap aud_##_name = { \
+#define AUD_PCLK_GATE(_name, _reg, _bit) { \
.data = &(struct clk_regmap_gate_data){ \
- .offset = (AUDIO_CLK_GATE_EN), \
+ .offset = (_reg), \
.bit_idx = (_bit), \
}, \
.hw.init = &(struct clk_init_data) { \
.name = "aud_"#_name, \
.ops = &clk_regmap_gate_ops, \
- .parent_data = &(const struct clk_parent_data) { \
- .fw_name = "pclk", \
- }, \
+ .parent_names = (const char *[]){ "aud_top" }, \
.num_parents = 1, \
}, \
}
-/* Audio peripheral clocks */
-static AUD_PCLK_GATE(ddr_arb, 0);
-static AUD_PCLK_GATE(pdm, 1);
-static AUD_PCLK_GATE(tdmin_a, 2);
-static AUD_PCLK_GATE(tdmin_b, 3);
-static AUD_PCLK_GATE(tdmin_c, 4);
-static AUD_PCLK_GATE(tdmin_lb, 5);
-static AUD_PCLK_GATE(tdmout_a, 6);
-static AUD_PCLK_GATE(tdmout_b, 7);
-static AUD_PCLK_GATE(tdmout_c, 8);
-static AUD_PCLK_GATE(frddr_a, 9);
-static AUD_PCLK_GATE(frddr_b, 10);
-static AUD_PCLK_GATE(frddr_c, 11);
-static AUD_PCLK_GATE(toddr_a, 12);
-static AUD_PCLK_GATE(toddr_b, 13);
-static AUD_PCLK_GATE(toddr_c, 14);
-static AUD_PCLK_GATE(loopback, 15);
-static AUD_PCLK_GATE(spdifin, 16);
-static AUD_PCLK_GATE(spdifout, 17);
-static AUD_PCLK_GATE(resample, 18);
-static AUD_PCLK_GATE(power_detect, 19);
-static AUD_PCLK_GATE(spdifout_b, 21);
-
-/* Audio Master Clocks */
-static const struct clk_parent_data mst_mux_parent_data[] = {
- { .fw_name = "mst_in0", },
- { .fw_name = "mst_in1", },
- { .fw_name = "mst_in2", },
- { .fw_name = "mst_in3", },
- { .fw_name = "mst_in4", },
- { .fw_name = "mst_in5", },
- { .fw_name = "mst_in6", },
- { .fw_name = "mst_in7", },
-};
-
-#define AUD_MST_MUX(_name, _reg, _flag) \
- AUD_MUX(_name##_sel, _reg, 0x7, 24, _flag, \
- mst_mux_parent_data, 0)
-
-#define AUD_MST_MCLK_MUX(_name, _reg) \
- AUD_MST_MUX(_name, _reg, CLK_MUX_ROUND_CLOSEST)
-
-#define AUD_MST_SYS_MUX(_name, _reg) \
- AUD_MST_MUX(_name, _reg, 0)
-
-static AUD_MST_MCLK_MUX(mst_a_mclk, AUDIO_MCLK_A_CTRL);
-static AUD_MST_MCLK_MUX(mst_b_mclk, AUDIO_MCLK_B_CTRL);
-static AUD_MST_MCLK_MUX(mst_c_mclk, AUDIO_MCLK_C_CTRL);
-static AUD_MST_MCLK_MUX(mst_d_mclk, AUDIO_MCLK_D_CTRL);
-static AUD_MST_MCLK_MUX(mst_e_mclk, AUDIO_MCLK_E_CTRL);
-static AUD_MST_MCLK_MUX(mst_f_mclk, AUDIO_MCLK_F_CTRL);
-static AUD_MST_MCLK_MUX(spdifout_clk, AUDIO_CLK_SPDIFOUT_CTRL);
-static AUD_MST_MCLK_MUX(pdm_dclk, AUDIO_CLK_PDMIN_CTRL0);
-static AUD_MST_SYS_MUX(spdifin_clk, AUDIO_CLK_SPDIFIN_CTRL);
-static AUD_MST_SYS_MUX(pdm_sysclk, AUDIO_CLK_PDMIN_CTRL1);
-static AUD_MST_MCLK_MUX(spdifout_b_clk, AUDIO_CLK_SPDIFOUT_B_CTRL);
-
-#define AUD_MST_DIV(_name, _reg, _flag) \
- AUD_DIV(_name##_div, _reg, 0, 16, _flag, \
- aud_##_name##_sel, CLK_SET_RATE_PARENT) \
-
-#define AUD_MST_MCLK_DIV(_name, _reg) \
- AUD_MST_DIV(_name, _reg, CLK_DIVIDER_ROUND_CLOSEST)
-
-#define AUD_MST_SYS_DIV(_name, _reg) \
- AUD_MST_DIV(_name, _reg, 0)
-
-static AUD_MST_MCLK_DIV(mst_a_mclk, AUDIO_MCLK_A_CTRL);
-static AUD_MST_MCLK_DIV(mst_b_mclk, AUDIO_MCLK_B_CTRL);
-static AUD_MST_MCLK_DIV(mst_c_mclk, AUDIO_MCLK_C_CTRL);
-static AUD_MST_MCLK_DIV(mst_d_mclk, AUDIO_MCLK_D_CTRL);
-static AUD_MST_MCLK_DIV(mst_e_mclk, AUDIO_MCLK_E_CTRL);
-static AUD_MST_MCLK_DIV(mst_f_mclk, AUDIO_MCLK_F_CTRL);
-static AUD_MST_MCLK_DIV(spdifout_clk, AUDIO_CLK_SPDIFOUT_CTRL);
-static AUD_MST_MCLK_DIV(pdm_dclk, AUDIO_CLK_PDMIN_CTRL0);
-static AUD_MST_SYS_DIV(spdifin_clk, AUDIO_CLK_SPDIFIN_CTRL);
-static AUD_MST_SYS_DIV(pdm_sysclk, AUDIO_CLK_PDMIN_CTRL1);
-static AUD_MST_MCLK_DIV(spdifout_b_clk, AUDIO_CLK_SPDIFOUT_B_CTRL);
-
-#define AUD_MST_MCLK_GATE(_name, _reg) \
- AUD_GATE(_name, _reg, 31, aud_##_name##_div, \
- CLK_SET_RATE_PARENT)
-
-static AUD_MST_MCLK_GATE(mst_a_mclk, AUDIO_MCLK_A_CTRL);
-static AUD_MST_MCLK_GATE(mst_b_mclk, AUDIO_MCLK_B_CTRL);
-static AUD_MST_MCLK_GATE(mst_c_mclk, AUDIO_MCLK_C_CTRL);
-static AUD_MST_MCLK_GATE(mst_d_mclk, AUDIO_MCLK_D_CTRL);
-static AUD_MST_MCLK_GATE(mst_e_mclk, AUDIO_MCLK_E_CTRL);
-static AUD_MST_MCLK_GATE(mst_f_mclk, AUDIO_MCLK_F_CTRL);
-static AUD_MST_MCLK_GATE(spdifout_clk, AUDIO_CLK_SPDIFOUT_CTRL);
-static AUD_MST_MCLK_GATE(spdifin_clk, AUDIO_CLK_SPDIFIN_CTRL);
-static AUD_MST_MCLK_GATE(pdm_dclk, AUDIO_CLK_PDMIN_CTRL0);
-static AUD_MST_MCLK_GATE(pdm_sysclk, AUDIO_CLK_PDMIN_CTRL1);
-static AUD_MST_MCLK_GATE(spdifout_b_clk, AUDIO_CLK_SPDIFOUT_B_CTRL);
-
-/* Sample Clocks */
-#define AUD_MST_SCLK_PRE_EN(_name, _reg) \
- AUD_GATE(mst_##_name##_sclk_pre_en, _reg, 31, \
- aud_mst_##_name##_mclk, 0)
-
-static AUD_MST_SCLK_PRE_EN(a, AUDIO_MST_A_SCLK_CTRL0);
-static AUD_MST_SCLK_PRE_EN(b, AUDIO_MST_B_SCLK_CTRL0);
-static AUD_MST_SCLK_PRE_EN(c, AUDIO_MST_C_SCLK_CTRL0);
-static AUD_MST_SCLK_PRE_EN(d, AUDIO_MST_D_SCLK_CTRL0);
-static AUD_MST_SCLK_PRE_EN(e, AUDIO_MST_E_SCLK_CTRL0);
-static AUD_MST_SCLK_PRE_EN(f, AUDIO_MST_F_SCLK_CTRL0);
#define AUD_SCLK_DIV(_name, _reg, _div_shift, _div_width, \
- _hi_shift, _hi_width, _phws, _iflags) \
-struct clk_regmap aud_##_name = { \
+ _hi_shift, _hi_width, _pname, _iflags) { \
.data = &(struct meson_sclk_div_data) { \
.div = { \
.reg_off = (_reg), \
@@ -213,38 +96,14 @@ struct clk_regmap aud_##_name = { \
.hw.init = &(struct clk_init_data) { \
.name = "aud_"#_name, \
.ops = &meson_sclk_div_ops, \
- .parent_hws = (const struct clk_hw *[]) { &_phws.hw }, \
+ .parent_names = (const char *[]){ #_pname }, \
.num_parents = 1, \
.flags = (_iflags), \
}, \
}
-#define AUD_MST_SCLK_DIV(_name, _reg) \
- AUD_SCLK_DIV(mst_##_name##_sclk_div, _reg, 20, 10, 0, 0, \
- aud_mst_##_name##_sclk_pre_en, \
- CLK_SET_RATE_PARENT)
-
-static AUD_MST_SCLK_DIV(a, AUDIO_MST_A_SCLK_CTRL0);
-static AUD_MST_SCLK_DIV(b, AUDIO_MST_B_SCLK_CTRL0);
-static AUD_MST_SCLK_DIV(c, AUDIO_MST_C_SCLK_CTRL0);
-static AUD_MST_SCLK_DIV(d, AUDIO_MST_D_SCLK_CTRL0);
-static AUD_MST_SCLK_DIV(e, AUDIO_MST_E_SCLK_CTRL0);
-static AUD_MST_SCLK_DIV(f, AUDIO_MST_F_SCLK_CTRL0);
-
-#define AUD_MST_SCLK_POST_EN(_name, _reg) \
- AUD_GATE(mst_##_name##_sclk_post_en, _reg, 30, \
- aud_mst_##_name##_sclk_div, CLK_SET_RATE_PARENT)
-
-static AUD_MST_SCLK_POST_EN(a, AUDIO_MST_A_SCLK_CTRL0);
-static AUD_MST_SCLK_POST_EN(b, AUDIO_MST_B_SCLK_CTRL0);
-static AUD_MST_SCLK_POST_EN(c, AUDIO_MST_C_SCLK_CTRL0);
-static AUD_MST_SCLK_POST_EN(d, AUDIO_MST_D_SCLK_CTRL0);
-static AUD_MST_SCLK_POST_EN(e, AUDIO_MST_E_SCLK_CTRL0);
-static AUD_MST_SCLK_POST_EN(f, AUDIO_MST_F_SCLK_CTRL0);
-
#define AUD_TRIPHASE(_name, _reg, _width, _shift0, _shift1, _shift2, \
- _phws, _iflags) \
-struct clk_regmap aud_##_name = { \
+ _pname, _iflags) { \
.data = &(struct meson_clk_triphase_data) { \
.ph0 = { \
.reg_off = (_reg), \
@@ -265,52 +124,91 @@ struct clk_regmap aud_##_name = { \
.hw.init = &(struct clk_init_data) { \
.name = "aud_"#_name, \
.ops = &meson_clk_triphase_ops, \
- .parent_hws = (const struct clk_hw *[]) { &_phws.hw }, \
+ .parent_names = (const char *[]){ #_pname }, \
.num_parents = 1, \
.flags = CLK_DUTY_CYCLE_PARENT | (_iflags), \
}, \
}
+#define AUD_PHASE(_name, _reg, _width, _shift, _pname, _iflags) { \
+ .data = &(struct meson_clk_phase_data) { \
+ .ph = { \
+ .reg_off = (_reg), \
+ .shift = (_shift), \
+ .width = (_width), \
+ }, \
+ }, \
+ .hw.init = &(struct clk_init_data) { \
+ .name = "aud_"#_name, \
+ .ops = &meson_clk_phase_ops, \
+ .parent_names = (const char *[]){ #_pname }, \
+ .num_parents = 1, \
+ .flags = (_iflags), \
+ }, \
+}
+
+/* Audio Master Clocks */
+static const struct clk_parent_data mst_mux_parent_data[] = {
+ { .fw_name = "mst_in0", },
+ { .fw_name = "mst_in1", },
+ { .fw_name = "mst_in2", },
+ { .fw_name = "mst_in3", },
+ { .fw_name = "mst_in4", },
+ { .fw_name = "mst_in5", },
+ { .fw_name = "mst_in6", },
+ { .fw_name = "mst_in7", },
+};
+
+#define AUD_MST_MUX(_name, _reg, _flag) \
+ AUD_MUX(_name##_sel, _reg, 0x7, 24, _flag, \
+ mst_mux_parent_data, 0)
+#define AUD_MST_DIV(_name, _reg, _flag) \
+ AUD_DIV(_name##_div, _reg, 0, 16, _flag, \
+ aud_##_name##_sel, CLK_SET_RATE_PARENT)
+#define AUD_MST_MCLK_GATE(_name, _reg) \
+ AUD_GATE(_name, _reg, 31, aud_##_name##_div, \
+ CLK_SET_RATE_PARENT)
+
+#define AUD_MST_MCLK_MUX(_name, _reg) \
+ AUD_MST_MUX(_name, _reg, CLK_MUX_ROUND_CLOSEST)
+#define AUD_MST_MCLK_DIV(_name, _reg) \
+ AUD_MST_DIV(_name, _reg, CLK_DIVIDER_ROUND_CLOSEST)
+
+#define AUD_MST_SYS_MUX(_name, _reg) \
+ AUD_MST_MUX(_name, _reg, 0)
+#define AUD_MST_SYS_DIV(_name, _reg) \
+ AUD_MST_DIV(_name, _reg, 0)
+
+/* Sample Clocks */
+#define AUD_MST_SCLK_PRE_EN(_name, _reg) \
+ AUD_GATE(mst_##_name##_sclk_pre_en, _reg, 31, \
+ aud_mst_##_name##_mclk, 0)
+#define AUD_MST_SCLK_DIV(_name, _reg) \
+ AUD_SCLK_DIV(mst_##_name##_sclk_div, _reg, 20, 10, 0, 0, \
+ aud_mst_##_name##_sclk_pre_en, \
+ CLK_SET_RATE_PARENT)
+#define AUD_MST_SCLK_POST_EN(_name, _reg) \
+ AUD_GATE(mst_##_name##_sclk_post_en, _reg, 30, \
+ aud_mst_##_name##_sclk_div, CLK_SET_RATE_PARENT)
#define AUD_MST_SCLK(_name, _reg) \
AUD_TRIPHASE(mst_##_name##_sclk, _reg, 1, 0, 2, 4, \
aud_mst_##_name##_sclk_post_en, CLK_SET_RATE_PARENT)
-static AUD_MST_SCLK(a, AUDIO_MST_A_SCLK_CTRL1);
-static AUD_MST_SCLK(b, AUDIO_MST_B_SCLK_CTRL1);
-static AUD_MST_SCLK(c, AUDIO_MST_C_SCLK_CTRL1);
-static AUD_MST_SCLK(d, AUDIO_MST_D_SCLK_CTRL1);
-static AUD_MST_SCLK(e, AUDIO_MST_E_SCLK_CTRL1);
-static AUD_MST_SCLK(f, AUDIO_MST_F_SCLK_CTRL1);
-
#define AUD_MST_LRCLK_DIV(_name, _reg) \
AUD_SCLK_DIV(mst_##_name##_lrclk_div, _reg, 0, 10, 10, 10, \
- aud_mst_##_name##_sclk_post_en, 0) \
-
-static AUD_MST_LRCLK_DIV(a, AUDIO_MST_A_SCLK_CTRL0);
-static AUD_MST_LRCLK_DIV(b, AUDIO_MST_B_SCLK_CTRL0);
-static AUD_MST_LRCLK_DIV(c, AUDIO_MST_C_SCLK_CTRL0);
-static AUD_MST_LRCLK_DIV(d, AUDIO_MST_D_SCLK_CTRL0);
-static AUD_MST_LRCLK_DIV(e, AUDIO_MST_E_SCLK_CTRL0);
-static AUD_MST_LRCLK_DIV(f, AUDIO_MST_F_SCLK_CTRL0);
-
+ aud_mst_##_name##_sclk_post_en, 0)
#define AUD_MST_LRCLK(_name, _reg) \
AUD_TRIPHASE(mst_##_name##_lrclk, _reg, 1, 1, 3, 5, \
aud_mst_##_name##_lrclk_div, CLK_SET_RATE_PARENT)
-static AUD_MST_LRCLK(a, AUDIO_MST_A_SCLK_CTRL1);
-static AUD_MST_LRCLK(b, AUDIO_MST_B_SCLK_CTRL1);
-static AUD_MST_LRCLK(c, AUDIO_MST_C_SCLK_CTRL1);
-static AUD_MST_LRCLK(d, AUDIO_MST_D_SCLK_CTRL1);
-static AUD_MST_LRCLK(e, AUDIO_MST_E_SCLK_CTRL1);
-static AUD_MST_LRCLK(f, AUDIO_MST_F_SCLK_CTRL1);
-
+/* TDM bit clock sources */
static const struct clk_parent_data tdm_sclk_parent_data[] = {
- { .hw = &aud_mst_a_sclk.hw, },
- { .hw = &aud_mst_b_sclk.hw, },
- { .hw = &aud_mst_c_sclk.hw, },
- { .hw = &aud_mst_d_sclk.hw, },
- { .hw = &aud_mst_e_sclk.hw, },
- { .hw = &aud_mst_f_sclk.hw, },
+ { .name = "aud_mst_a_sclk", .index = -1, },
+ { .name = "aud_mst_b_sclk", .index = -1, },
+ { .name = "aud_mst_c_sclk", .index = -1, },
+ { .name = "aud_mst_d_sclk", .index = -1, },
+ { .name = "aud_mst_e_sclk", .index = -1, },
+ { .name = "aud_mst_f_sclk", .index = -1, },
{ .fw_name = "slv_sclk0", },
{ .fw_name = "slv_sclk1", },
{ .fw_name = "slv_sclk2", },
@@ -323,78 +221,14 @@ static const struct clk_parent_data tdm_sclk_parent_data[] = {
{ .fw_name = "slv_sclk9", },
};
-#define AUD_TDM_SCLK_MUX(_name, _reg) \
- AUD_MUX(tdm##_name##_sclk_sel, _reg, 0xf, 24, \
- CLK_MUX_ROUND_CLOSEST, \
- tdm_sclk_parent_data, 0)
-
-static AUD_TDM_SCLK_MUX(in_a, AUDIO_CLK_TDMIN_A_CTRL);
-static AUD_TDM_SCLK_MUX(in_b, AUDIO_CLK_TDMIN_B_CTRL);
-static AUD_TDM_SCLK_MUX(in_c, AUDIO_CLK_TDMIN_C_CTRL);
-static AUD_TDM_SCLK_MUX(in_lb, AUDIO_CLK_TDMIN_LB_CTRL);
-static AUD_TDM_SCLK_MUX(out_a, AUDIO_CLK_TDMOUT_A_CTRL);
-static AUD_TDM_SCLK_MUX(out_b, AUDIO_CLK_TDMOUT_B_CTRL);
-static AUD_TDM_SCLK_MUX(out_c, AUDIO_CLK_TDMOUT_C_CTRL);
-
-#define AUD_TDM_SCLK_PRE_EN(_name, _reg) \
- AUD_GATE(tdm##_name##_sclk_pre_en, _reg, 31, \
- aud_tdm##_name##_sclk_sel, CLK_SET_RATE_PARENT)
-
-static AUD_TDM_SCLK_PRE_EN(in_a, AUDIO_CLK_TDMIN_A_CTRL);
-static AUD_TDM_SCLK_PRE_EN(in_b, AUDIO_CLK_TDMIN_B_CTRL);
-static AUD_TDM_SCLK_PRE_EN(in_c, AUDIO_CLK_TDMIN_C_CTRL);
-static AUD_TDM_SCLK_PRE_EN(in_lb, AUDIO_CLK_TDMIN_LB_CTRL);
-static AUD_TDM_SCLK_PRE_EN(out_a, AUDIO_CLK_TDMOUT_A_CTRL);
-static AUD_TDM_SCLK_PRE_EN(out_b, AUDIO_CLK_TDMOUT_B_CTRL);
-static AUD_TDM_SCLK_PRE_EN(out_c, AUDIO_CLK_TDMOUT_C_CTRL);
-
-#define AUD_TDM_SCLK_POST_EN(_name, _reg) \
- AUD_GATE(tdm##_name##_sclk_post_en, _reg, 30, \
- aud_tdm##_name##_sclk_pre_en, CLK_SET_RATE_PARENT)
-
-static AUD_TDM_SCLK_POST_EN(in_a, AUDIO_CLK_TDMIN_A_CTRL);
-static AUD_TDM_SCLK_POST_EN(in_b, AUDIO_CLK_TDMIN_B_CTRL);
-static AUD_TDM_SCLK_POST_EN(in_c, AUDIO_CLK_TDMIN_C_CTRL);
-static AUD_TDM_SCLK_POST_EN(in_lb, AUDIO_CLK_TDMIN_LB_CTRL);
-static AUD_TDM_SCLK_POST_EN(out_a, AUDIO_CLK_TDMOUT_A_CTRL);
-static AUD_TDM_SCLK_POST_EN(out_b, AUDIO_CLK_TDMOUT_B_CTRL);
-static AUD_TDM_SCLK_POST_EN(out_c, AUDIO_CLK_TDMOUT_C_CTRL);
-
-#define AUD_TDM_SCLK(_name, _reg) \
- struct clk_regmap aud_tdm##_name##_sclk = { \
- .data = &(struct meson_clk_phase_data) { \
- .ph = { \
- .reg_off = (_reg), \
- .shift = 29, \
- .width = 1, \
- }, \
- }, \
- .hw.init = &(struct clk_init_data) { \
- .name = "aud_tdm"#_name"_sclk", \
- .ops = &meson_clk_phase_ops, \
- .parent_hws = (const struct clk_hw *[]) { \
- &aud_tdm##_name##_sclk_post_en.hw \
- }, \
- .num_parents = 1, \
- .flags = CLK_DUTY_CYCLE_PARENT | CLK_SET_RATE_PARENT, \
- }, \
-}
-
-static AUD_TDM_SCLK(in_a, AUDIO_CLK_TDMIN_A_CTRL);
-static AUD_TDM_SCLK(in_b, AUDIO_CLK_TDMIN_B_CTRL);
-static AUD_TDM_SCLK(in_c, AUDIO_CLK_TDMIN_C_CTRL);
-static AUD_TDM_SCLK(in_lb, AUDIO_CLK_TDMIN_LB_CTRL);
-static AUD_TDM_SCLK(out_a, AUDIO_CLK_TDMOUT_A_CTRL);
-static AUD_TDM_SCLK(out_b, AUDIO_CLK_TDMOUT_B_CTRL);
-static AUD_TDM_SCLK(out_c, AUDIO_CLK_TDMOUT_C_CTRL);
-
+/* TDM sample clock sources */
static const struct clk_parent_data tdm_lrclk_parent_data[] = {
- { .hw = &aud_mst_a_lrclk.hw, },
- { .hw = &aud_mst_b_lrclk.hw, },
- { .hw = &aud_mst_c_lrclk.hw, },
- { .hw = &aud_mst_d_lrclk.hw, },
- { .hw = &aud_mst_e_lrclk.hw, },
- { .hw = &aud_mst_f_lrclk.hw, },
+ { .name = "aud_mst_a_lrclk", .index = -1, },
+ { .name = "aud_mst_b_lrclk", .index = -1, },
+ { .name = "aud_mst_c_lrclk", .index = -1, },
+ { .name = "aud_mst_d_lrclk", .index = -1, },
+ { .name = "aud_mst_e_lrclk", .index = -1, },
+ { .name = "aud_mst_f_lrclk", .index = -1, },
{ .fw_name = "slv_lrclk0", },
{ .fw_name = "slv_lrclk1", },
{ .fw_name = "slv_lrclk2", },
@@ -407,69 +241,536 @@ static const struct clk_parent_data tdm_lrclk_parent_data[] = {
{ .fw_name = "slv_lrclk9", },
};
-#define AUD_TDM_LRLCK(_name, _reg) \
- AUD_MUX(tdm##_name##_lrclk, _reg, 0xf, 20, \
- CLK_MUX_ROUND_CLOSEST, \
- tdm_lrclk_parent_data, 0)
+#define AUD_TDM_SCLK_MUX(_name, _reg) \
+ AUD_MUX(tdm##_name##_sclk_sel, _reg, 0xf, 24, \
+ CLK_MUX_ROUND_CLOSEST, tdm_sclk_parent_data, 0)
+#define AUD_TDM_SCLK_PRE_EN(_name, _reg) \
+ AUD_GATE(tdm##_name##_sclk_pre_en, _reg, 31, \
+ aud_tdm##_name##_sclk_sel, CLK_SET_RATE_PARENT)
+#define AUD_TDM_SCLK_POST_EN(_name, _reg) \
+ AUD_GATE(tdm##_name##_sclk_post_en, _reg, 30, \
+ aud_tdm##_name##_sclk_pre_en, CLK_SET_RATE_PARENT)
+#define AUD_TDM_SCLK(_name, _reg) \
+ AUD_PHASE(tdm##_name##_sclk, _reg, 1, 29, \
+ aud_tdm##_name##_sclk_post_en, \
+ CLK_DUTY_CYCLE_PARENT | CLK_SET_RATE_PARENT)
+
+#define AUD_TDM_LRLCK(_name, _reg) \
+ AUD_MUX(tdm##_name##_lrclk, _reg, 0xf, 20, \
+ CLK_MUX_ROUND_CLOSEST, tdm_lrclk_parent_data, 0)
+
+/* Pad master clock sources */
+static const struct clk_parent_data mclk_pad_ctrl_parent_data[] = {
+ { .name = "aud_mst_a_mclk", .index = -1, },
+ { .name = "aud_mst_b_mclk", .index = -1, },
+ { .name = "aud_mst_c_mclk", .index = -1, },
+ { .name = "aud_mst_d_mclk", .index = -1, },
+ { .name = "aud_mst_e_mclk", .index = -1, },
+ { .name = "aud_mst_f_mclk", .index = -1, },
+};
+
+/* Pad bit clock sources */
+static const struct clk_parent_data sclk_pad_ctrl_parent_data[] = {
+ { .name = "aud_mst_a_sclk", .index = -1, },
+ { .name = "aud_mst_b_sclk", .index = -1, },
+ { .name = "aud_mst_c_sclk", .index = -1, },
+ { .name = "aud_mst_d_sclk", .index = -1, },
+ { .name = "aud_mst_e_sclk", .index = -1, },
+ { .name = "aud_mst_f_sclk", .index = -1, },
+};
-static AUD_TDM_LRLCK(in_a, AUDIO_CLK_TDMIN_A_CTRL);
-static AUD_TDM_LRLCK(in_b, AUDIO_CLK_TDMIN_B_CTRL);
-static AUD_TDM_LRLCK(in_c, AUDIO_CLK_TDMIN_C_CTRL);
-static AUD_TDM_LRLCK(in_lb, AUDIO_CLK_TDMIN_LB_CTRL);
-static AUD_TDM_LRLCK(out_a, AUDIO_CLK_TDMOUT_A_CTRL);
-static AUD_TDM_LRLCK(out_b, AUDIO_CLK_TDMOUT_B_CTRL);
-static AUD_TDM_LRLCK(out_c, AUDIO_CLK_TDMOUT_C_CTRL);
+/* Pad sample clock sources */
+static const struct clk_parent_data lrclk_pad_ctrl_parent_data[] = {
+ { .name = "aud_mst_a_lrclk", .index = -1, },
+ { .name = "aud_mst_b_lrclk", .index = -1, },
+ { .name = "aud_mst_c_lrclk", .index = -1, },
+ { .name = "aud_mst_d_lrclk", .index = -1, },
+ { .name = "aud_mst_e_lrclk", .index = -1, },
+ { .name = "aud_mst_f_lrclk", .index = -1, },
+};
-/* G12a Pad control */
#define AUD_TDM_PAD_CTRL(_name, _reg, _shift, _parents) \
- AUD_MUX(tdm_##_name, _reg, 0x7, _shift, 0, _parents, \
+ AUD_MUX(_name, _reg, 0x7, _shift, 0, _parents, \
CLK_SET_RATE_NO_REPARENT)
-static const struct clk_parent_data mclk_pad_ctrl_parent_data[] = {
- { .hw = &aud_mst_a_mclk.hw },
- { .hw = &aud_mst_b_mclk.hw },
- { .hw = &aud_mst_c_mclk.hw },
- { .hw = &aud_mst_d_mclk.hw },
- { .hw = &aud_mst_e_mclk.hw },
- { .hw = &aud_mst_f_mclk.hw },
+/* Common Clocks */
+static struct clk_regmap ddr_arb =
+ AUD_PCLK_GATE(ddr_arb, AUDIO_CLK_GATE_EN, 0);
+static struct clk_regmap pdm =
+ AUD_PCLK_GATE(pdm, AUDIO_CLK_GATE_EN, 1);
+static struct clk_regmap tdmin_a =
+ AUD_PCLK_GATE(tdmin_a, AUDIO_CLK_GATE_EN, 2);
+static struct clk_regmap tdmin_b =
+ AUD_PCLK_GATE(tdmin_b, AUDIO_CLK_GATE_EN, 3);
+static struct clk_regmap tdmin_c =
+ AUD_PCLK_GATE(tdmin_c, AUDIO_CLK_GATE_EN, 4);
+static struct clk_regmap tdmin_lb =
+ AUD_PCLK_GATE(tdmin_lb, AUDIO_CLK_GATE_EN, 5);
+static struct clk_regmap tdmout_a =
+ AUD_PCLK_GATE(tdmout_a, AUDIO_CLK_GATE_EN, 6);
+static struct clk_regmap tdmout_b =
+ AUD_PCLK_GATE(tdmout_b, AUDIO_CLK_GATE_EN, 7);
+static struct clk_regmap tdmout_c =
+ AUD_PCLK_GATE(tdmout_c, AUDIO_CLK_GATE_EN, 8);
+static struct clk_regmap frddr_a =
+ AUD_PCLK_GATE(frddr_a, AUDIO_CLK_GATE_EN, 9);
+static struct clk_regmap frddr_b =
+ AUD_PCLK_GATE(frddr_b, AUDIO_CLK_GATE_EN, 10);
+static struct clk_regmap frddr_c =
+ AUD_PCLK_GATE(frddr_c, AUDIO_CLK_GATE_EN, 11);
+static struct clk_regmap toddr_a =
+ AUD_PCLK_GATE(toddr_a, AUDIO_CLK_GATE_EN, 12);
+static struct clk_regmap toddr_b =
+ AUD_PCLK_GATE(toddr_b, AUDIO_CLK_GATE_EN, 13);
+static struct clk_regmap toddr_c =
+ AUD_PCLK_GATE(toddr_c, AUDIO_CLK_GATE_EN, 14);
+static struct clk_regmap loopback =
+ AUD_PCLK_GATE(loopback, AUDIO_CLK_GATE_EN, 15);
+static struct clk_regmap spdifin =
+ AUD_PCLK_GATE(spdifin, AUDIO_CLK_GATE_EN, 16);
+static struct clk_regmap spdifout =
+ AUD_PCLK_GATE(spdifout, AUDIO_CLK_GATE_EN, 17);
+static struct clk_regmap resample =
+ AUD_PCLK_GATE(resample, AUDIO_CLK_GATE_EN, 18);
+static struct clk_regmap power_detect =
+ AUD_PCLK_GATE(power_detect, AUDIO_CLK_GATE_EN, 19);
+
+static struct clk_regmap spdifout_clk_sel =
+ AUD_MST_MCLK_MUX(spdifout_clk, AUDIO_CLK_SPDIFOUT_CTRL);
+static struct clk_regmap pdm_dclk_sel =
+ AUD_MST_MCLK_MUX(pdm_dclk, AUDIO_CLK_PDMIN_CTRL0);
+static struct clk_regmap spdifin_clk_sel =
+ AUD_MST_SYS_MUX(spdifin_clk, AUDIO_CLK_SPDIFIN_CTRL);
+static struct clk_regmap pdm_sysclk_sel =
+ AUD_MST_SYS_MUX(pdm_sysclk, AUDIO_CLK_PDMIN_CTRL1);
+static struct clk_regmap spdifout_b_clk_sel =
+ AUD_MST_MCLK_MUX(spdifout_b_clk, AUDIO_CLK_SPDIFOUT_B_CTRL);
+
+static struct clk_regmap spdifout_clk_div =
+ AUD_MST_MCLK_DIV(spdifout_clk, AUDIO_CLK_SPDIFOUT_CTRL);
+static struct clk_regmap pdm_dclk_div =
+ AUD_MST_MCLK_DIV(pdm_dclk, AUDIO_CLK_PDMIN_CTRL0);
+static struct clk_regmap spdifin_clk_div =
+ AUD_MST_SYS_DIV(spdifin_clk, AUDIO_CLK_SPDIFIN_CTRL);
+static struct clk_regmap pdm_sysclk_div =
+ AUD_MST_SYS_DIV(pdm_sysclk, AUDIO_CLK_PDMIN_CTRL1);
+static struct clk_regmap spdifout_b_clk_div =
+ AUD_MST_MCLK_DIV(spdifout_b_clk, AUDIO_CLK_SPDIFOUT_B_CTRL);
+
+static struct clk_regmap spdifout_clk =
+ AUD_MST_MCLK_GATE(spdifout_clk, AUDIO_CLK_SPDIFOUT_CTRL);
+static struct clk_regmap spdifin_clk =
+ AUD_MST_MCLK_GATE(spdifin_clk, AUDIO_CLK_SPDIFIN_CTRL);
+static struct clk_regmap pdm_dclk =
+ AUD_MST_MCLK_GATE(pdm_dclk, AUDIO_CLK_PDMIN_CTRL0);
+static struct clk_regmap pdm_sysclk =
+ AUD_MST_MCLK_GATE(pdm_sysclk, AUDIO_CLK_PDMIN_CTRL1);
+static struct clk_regmap spdifout_b_clk =
+ AUD_MST_MCLK_GATE(spdifout_b_clk, AUDIO_CLK_SPDIFOUT_B_CTRL);
+
+static struct clk_regmap mst_a_sclk_pre_en =
+ AUD_MST_SCLK_PRE_EN(a, AUDIO_MST_A_SCLK_CTRL0);
+static struct clk_regmap mst_b_sclk_pre_en =
+ AUD_MST_SCLK_PRE_EN(b, AUDIO_MST_B_SCLK_CTRL0);
+static struct clk_regmap mst_c_sclk_pre_en =
+ AUD_MST_SCLK_PRE_EN(c, AUDIO_MST_C_SCLK_CTRL0);
+static struct clk_regmap mst_d_sclk_pre_en =
+ AUD_MST_SCLK_PRE_EN(d, AUDIO_MST_D_SCLK_CTRL0);
+static struct clk_regmap mst_e_sclk_pre_en =
+ AUD_MST_SCLK_PRE_EN(e, AUDIO_MST_E_SCLK_CTRL0);
+static struct clk_regmap mst_f_sclk_pre_en =
+ AUD_MST_SCLK_PRE_EN(f, AUDIO_MST_F_SCLK_CTRL0);
+
+static struct clk_regmap mst_a_sclk_div =
+ AUD_MST_SCLK_DIV(a, AUDIO_MST_A_SCLK_CTRL0);
+static struct clk_regmap mst_b_sclk_div =
+ AUD_MST_SCLK_DIV(b, AUDIO_MST_B_SCLK_CTRL0);
+static struct clk_regmap mst_c_sclk_div =
+ AUD_MST_SCLK_DIV(c, AUDIO_MST_C_SCLK_CTRL0);
+static struct clk_regmap mst_d_sclk_div =
+ AUD_MST_SCLK_DIV(d, AUDIO_MST_D_SCLK_CTRL0);
+static struct clk_regmap mst_e_sclk_div =
+ AUD_MST_SCLK_DIV(e, AUDIO_MST_E_SCLK_CTRL0);
+static struct clk_regmap mst_f_sclk_div =
+ AUD_MST_SCLK_DIV(f, AUDIO_MST_F_SCLK_CTRL0);
+
+static struct clk_regmap mst_a_sclk_post_en =
+ AUD_MST_SCLK_POST_EN(a, AUDIO_MST_A_SCLK_CTRL0);
+static struct clk_regmap mst_b_sclk_post_en =
+ AUD_MST_SCLK_POST_EN(b, AUDIO_MST_B_SCLK_CTRL0);
+static struct clk_regmap mst_c_sclk_post_en =
+ AUD_MST_SCLK_POST_EN(c, AUDIO_MST_C_SCLK_CTRL0);
+static struct clk_regmap mst_d_sclk_post_en =
+ AUD_MST_SCLK_POST_EN(d, AUDIO_MST_D_SCLK_CTRL0);
+static struct clk_regmap mst_e_sclk_post_en =
+ AUD_MST_SCLK_POST_EN(e, AUDIO_MST_E_SCLK_CTRL0);
+static struct clk_regmap mst_f_sclk_post_en =
+ AUD_MST_SCLK_POST_EN(f, AUDIO_MST_F_SCLK_CTRL0);
+
+static struct clk_regmap mst_a_sclk =
+ AUD_MST_SCLK(a, AUDIO_MST_A_SCLK_CTRL1);
+static struct clk_regmap mst_b_sclk =
+ AUD_MST_SCLK(b, AUDIO_MST_B_SCLK_CTRL1);
+static struct clk_regmap mst_c_sclk =
+ AUD_MST_SCLK(c, AUDIO_MST_C_SCLK_CTRL1);
+static struct clk_regmap mst_d_sclk =
+ AUD_MST_SCLK(d, AUDIO_MST_D_SCLK_CTRL1);
+static struct clk_regmap mst_e_sclk =
+ AUD_MST_SCLK(e, AUDIO_MST_E_SCLK_CTRL1);
+static struct clk_regmap mst_f_sclk =
+ AUD_MST_SCLK(f, AUDIO_MST_F_SCLK_CTRL1);
+
+static struct clk_regmap mst_a_lrclk_div =
+ AUD_MST_LRCLK_DIV(a, AUDIO_MST_A_SCLK_CTRL0);
+static struct clk_regmap mst_b_lrclk_div =
+ AUD_MST_LRCLK_DIV(b, AUDIO_MST_B_SCLK_CTRL0);
+static struct clk_regmap mst_c_lrclk_div =
+ AUD_MST_LRCLK_DIV(c, AUDIO_MST_C_SCLK_CTRL0);
+static struct clk_regmap mst_d_lrclk_div =
+ AUD_MST_LRCLK_DIV(d, AUDIO_MST_D_SCLK_CTRL0);
+static struct clk_regmap mst_e_lrclk_div =
+ AUD_MST_LRCLK_DIV(e, AUDIO_MST_E_SCLK_CTRL0);
+static struct clk_regmap mst_f_lrclk_div =
+ AUD_MST_LRCLK_DIV(f, AUDIO_MST_F_SCLK_CTRL0);
+
+static struct clk_regmap mst_a_lrclk =
+ AUD_MST_LRCLK(a, AUDIO_MST_A_SCLK_CTRL1);
+static struct clk_regmap mst_b_lrclk =
+ AUD_MST_LRCLK(b, AUDIO_MST_B_SCLK_CTRL1);
+static struct clk_regmap mst_c_lrclk =
+ AUD_MST_LRCLK(c, AUDIO_MST_C_SCLK_CTRL1);
+static struct clk_regmap mst_d_lrclk =
+ AUD_MST_LRCLK(d, AUDIO_MST_D_SCLK_CTRL1);
+static struct clk_regmap mst_e_lrclk =
+ AUD_MST_LRCLK(e, AUDIO_MST_E_SCLK_CTRL1);
+static struct clk_regmap mst_f_lrclk =
+ AUD_MST_LRCLK(f, AUDIO_MST_F_SCLK_CTRL1);
+
+static struct clk_regmap tdmin_a_sclk_sel =
+ AUD_TDM_SCLK_MUX(in_a, AUDIO_CLK_TDMIN_A_CTRL);
+static struct clk_regmap tdmin_b_sclk_sel =
+ AUD_TDM_SCLK_MUX(in_b, AUDIO_CLK_TDMIN_B_CTRL);
+static struct clk_regmap tdmin_c_sclk_sel =
+ AUD_TDM_SCLK_MUX(in_c, AUDIO_CLK_TDMIN_C_CTRL);
+static struct clk_regmap tdmin_lb_sclk_sel =
+ AUD_TDM_SCLK_MUX(in_lb, AUDIO_CLK_TDMIN_LB_CTRL);
+static struct clk_regmap tdmout_a_sclk_sel =
+ AUD_TDM_SCLK_MUX(out_a, AUDIO_CLK_TDMOUT_A_CTRL);
+static struct clk_regmap tdmout_b_sclk_sel =
+ AUD_TDM_SCLK_MUX(out_b, AUDIO_CLK_TDMOUT_B_CTRL);
+static struct clk_regmap tdmout_c_sclk_sel =
+ AUD_TDM_SCLK_MUX(out_c, AUDIO_CLK_TDMOUT_C_CTRL);
+
+static struct clk_regmap tdmin_a_sclk_pre_en =
+ AUD_TDM_SCLK_PRE_EN(in_a, AUDIO_CLK_TDMIN_A_CTRL);
+static struct clk_regmap tdmin_b_sclk_pre_en =
+ AUD_TDM_SCLK_PRE_EN(in_b, AUDIO_CLK_TDMIN_B_CTRL);
+static struct clk_regmap tdmin_c_sclk_pre_en =
+ AUD_TDM_SCLK_PRE_EN(in_c, AUDIO_CLK_TDMIN_C_CTRL);
+static struct clk_regmap tdmin_lb_sclk_pre_en =
+ AUD_TDM_SCLK_PRE_EN(in_lb, AUDIO_CLK_TDMIN_LB_CTRL);
+static struct clk_regmap tdmout_a_sclk_pre_en =
+ AUD_TDM_SCLK_PRE_EN(out_a, AUDIO_CLK_TDMOUT_A_CTRL);
+static struct clk_regmap tdmout_b_sclk_pre_en =
+ AUD_TDM_SCLK_PRE_EN(out_b, AUDIO_CLK_TDMOUT_B_CTRL);
+static struct clk_regmap tdmout_c_sclk_pre_en =
+ AUD_TDM_SCLK_PRE_EN(out_c, AUDIO_CLK_TDMOUT_C_CTRL);
+
+static struct clk_regmap tdmin_a_sclk_post_en =
+ AUD_TDM_SCLK_POST_EN(in_a, AUDIO_CLK_TDMIN_A_CTRL);
+static struct clk_regmap tdmin_b_sclk_post_en =
+ AUD_TDM_SCLK_POST_EN(in_b, AUDIO_CLK_TDMIN_B_CTRL);
+static struct clk_regmap tdmin_c_sclk_post_en =
+ AUD_TDM_SCLK_POST_EN(in_c, AUDIO_CLK_TDMIN_C_CTRL);
+static struct clk_regmap tdmin_lb_sclk_post_en =
+ AUD_TDM_SCLK_POST_EN(in_lb, AUDIO_CLK_TDMIN_LB_CTRL);
+static struct clk_regmap tdmout_a_sclk_post_en =
+ AUD_TDM_SCLK_POST_EN(out_a, AUDIO_CLK_TDMOUT_A_CTRL);
+static struct clk_regmap tdmout_b_sclk_post_en =
+ AUD_TDM_SCLK_POST_EN(out_b, AUDIO_CLK_TDMOUT_B_CTRL);
+static struct clk_regmap tdmout_c_sclk_post_en =
+ AUD_TDM_SCLK_POST_EN(out_c, AUDIO_CLK_TDMOUT_C_CTRL);
+
+static struct clk_regmap tdmin_a_sclk =
+ AUD_TDM_SCLK(in_a, AUDIO_CLK_TDMIN_A_CTRL);
+static struct clk_regmap tdmin_b_sclk =
+ AUD_TDM_SCLK(in_b, AUDIO_CLK_TDMIN_B_CTRL);
+static struct clk_regmap tdmin_c_sclk =
+ AUD_TDM_SCLK(in_c, AUDIO_CLK_TDMIN_C_CTRL);
+static struct clk_regmap tdmin_lb_sclk =
+ AUD_TDM_SCLK(in_lb, AUDIO_CLK_TDMIN_LB_CTRL);
+static struct clk_regmap tdmout_a_sclk =
+ AUD_TDM_SCLK(out_a, AUDIO_CLK_TDMOUT_A_CTRL);
+static struct clk_regmap tdmout_b_sclk =
+ AUD_TDM_SCLK(out_b, AUDIO_CLK_TDMOUT_B_CTRL);
+static struct clk_regmap tdmout_c_sclk =
+ AUD_TDM_SCLK(out_c, AUDIO_CLK_TDMOUT_C_CTRL);
+
+static struct clk_regmap tdmin_a_lrclk =
+ AUD_TDM_LRLCK(in_a, AUDIO_CLK_TDMIN_A_CTRL);
+static struct clk_regmap tdmin_b_lrclk =
+ AUD_TDM_LRLCK(in_b, AUDIO_CLK_TDMIN_B_CTRL);
+static struct clk_regmap tdmin_c_lrclk =
+ AUD_TDM_LRLCK(in_c, AUDIO_CLK_TDMIN_C_CTRL);
+static struct clk_regmap tdmin_lb_lrclk =
+ AUD_TDM_LRLCK(in_lb, AUDIO_CLK_TDMIN_LB_CTRL);
+static struct clk_regmap tdmout_a_lrclk =
+ AUD_TDM_LRLCK(out_a, AUDIO_CLK_TDMOUT_A_CTRL);
+static struct clk_regmap tdmout_b_lrclk =
+ AUD_TDM_LRLCK(out_b, AUDIO_CLK_TDMOUT_B_CTRL);
+static struct clk_regmap tdmout_c_lrclk =
+ AUD_TDM_LRLCK(out_c, AUDIO_CLK_TDMOUT_C_CTRL);
+
+/* AXG/G12A Clocks */
+static struct clk_hw axg_aud_top = {
+ .init = &(struct clk_init_data) {
+ /* Provide aud_top signal name on axg and g12a */
+ .name = "aud_top",
+ .ops = &(const struct clk_ops) {},
+ .parent_data = &(const struct clk_parent_data) {
+ .fw_name = "pclk",
+ },
+ .num_parents = 1,
+ },
};
-static AUD_TDM_PAD_CTRL(mclk_pad_0, AUDIO_MST_PAD_CTRL0, 0,
- mclk_pad_ctrl_parent_data);
-static AUD_TDM_PAD_CTRL(mclk_pad_1, AUDIO_MST_PAD_CTRL0, 4,
- mclk_pad_ctrl_parent_data);
+static struct clk_regmap mst_a_mclk_sel =
+ AUD_MST_MCLK_MUX(mst_a_mclk, AUDIO_MCLK_A_CTRL);
+static struct clk_regmap mst_b_mclk_sel =
+ AUD_MST_MCLK_MUX(mst_b_mclk, AUDIO_MCLK_B_CTRL);
+static struct clk_regmap mst_c_mclk_sel =
+ AUD_MST_MCLK_MUX(mst_c_mclk, AUDIO_MCLK_C_CTRL);
+static struct clk_regmap mst_d_mclk_sel =
+ AUD_MST_MCLK_MUX(mst_d_mclk, AUDIO_MCLK_D_CTRL);
+static struct clk_regmap mst_e_mclk_sel =
+ AUD_MST_MCLK_MUX(mst_e_mclk, AUDIO_MCLK_E_CTRL);
+static struct clk_regmap mst_f_mclk_sel =
+ AUD_MST_MCLK_MUX(mst_f_mclk, AUDIO_MCLK_F_CTRL);
+
+static struct clk_regmap mst_a_mclk_div =
+ AUD_MST_MCLK_DIV(mst_a_mclk, AUDIO_MCLK_A_CTRL);
+static struct clk_regmap mst_b_mclk_div =
+ AUD_MST_MCLK_DIV(mst_b_mclk, AUDIO_MCLK_B_CTRL);
+static struct clk_regmap mst_c_mclk_div =
+ AUD_MST_MCLK_DIV(mst_c_mclk, AUDIO_MCLK_C_CTRL);
+static struct clk_regmap mst_d_mclk_div =
+ AUD_MST_MCLK_DIV(mst_d_mclk, AUDIO_MCLK_D_CTRL);
+static struct clk_regmap mst_e_mclk_div =
+ AUD_MST_MCLK_DIV(mst_e_mclk, AUDIO_MCLK_E_CTRL);
+static struct clk_regmap mst_f_mclk_div =
+ AUD_MST_MCLK_DIV(mst_f_mclk, AUDIO_MCLK_F_CTRL);
+
+static struct clk_regmap mst_a_mclk =
+ AUD_MST_MCLK_GATE(mst_a_mclk, AUDIO_MCLK_A_CTRL);
+static struct clk_regmap mst_b_mclk =
+ AUD_MST_MCLK_GATE(mst_b_mclk, AUDIO_MCLK_B_CTRL);
+static struct clk_regmap mst_c_mclk =
+ AUD_MST_MCLK_GATE(mst_c_mclk, AUDIO_MCLK_C_CTRL);
+static struct clk_regmap mst_d_mclk =
+ AUD_MST_MCLK_GATE(mst_d_mclk, AUDIO_MCLK_D_CTRL);
+static struct clk_regmap mst_e_mclk =
+ AUD_MST_MCLK_GATE(mst_e_mclk, AUDIO_MCLK_E_CTRL);
+static struct clk_regmap mst_f_mclk =
+ AUD_MST_MCLK_GATE(mst_f_mclk, AUDIO_MCLK_F_CTRL);
+
+/* G12a clocks */
+static struct clk_regmap g12a_tdm_mclk_pad_0 = AUD_TDM_PAD_CTRL(
+ mclk_pad_0, AUDIO_MST_PAD_CTRL0, 0, mclk_pad_ctrl_parent_data);
+static struct clk_regmap g12a_tdm_mclk_pad_1 = AUD_TDM_PAD_CTRL(
+ mclk_pad_1, AUDIO_MST_PAD_CTRL0, 4, mclk_pad_ctrl_parent_data);
+static struct clk_regmap g12a_tdm_lrclk_pad_0 = AUD_TDM_PAD_CTRL(
+ lrclk_pad_0, AUDIO_MST_PAD_CTRL1, 16, lrclk_pad_ctrl_parent_data);
+static struct clk_regmap g12a_tdm_lrclk_pad_1 = AUD_TDM_PAD_CTRL(
+ lrclk_pad_1, AUDIO_MST_PAD_CTRL1, 20, lrclk_pad_ctrl_parent_data);
+static struct clk_regmap g12a_tdm_lrclk_pad_2 = AUD_TDM_PAD_CTRL(
+ lrclk_pad_2, AUDIO_MST_PAD_CTRL1, 24, lrclk_pad_ctrl_parent_data);
+static struct clk_regmap g12a_tdm_sclk_pad_0 = AUD_TDM_PAD_CTRL(
+ sclk_pad_0, AUDIO_MST_PAD_CTRL1, 0, sclk_pad_ctrl_parent_data);
+static struct clk_regmap g12a_tdm_sclk_pad_1 = AUD_TDM_PAD_CTRL(
+ sclk_pad_1, AUDIO_MST_PAD_CTRL1, 4, sclk_pad_ctrl_parent_data);
+static struct clk_regmap g12a_tdm_sclk_pad_2 = AUD_TDM_PAD_CTRL(
+ sclk_pad_2, AUDIO_MST_PAD_CTRL1, 8, sclk_pad_ctrl_parent_data);
+
+/* G12a/SM1 clocks */
+static struct clk_regmap toram =
+ AUD_PCLK_GATE(toram, AUDIO_CLK_GATE_EN, 20);
+static struct clk_regmap spdifout_b =
+ AUD_PCLK_GATE(spdifout_b, AUDIO_CLK_GATE_EN, 21);
+static struct clk_regmap eqdrc =
+ AUD_PCLK_GATE(eqdrc, AUDIO_CLK_GATE_EN, 22);
+
+/* SM1 Clocks */
+static struct clk_regmap sm1_clk81_en = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = AUDIO_CLK81_EN,
+ .bit_idx = 31,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "aud_clk81_en",
+ .ops = &clk_regmap_gate_ops,
+ .parent_data = &(const struct clk_parent_data) {
+ .fw_name = "pclk",
+ },
+ .num_parents = 1,
+ },
+};
-static const struct clk_parent_data lrclk_pad_ctrl_parent_data[] = {
- { .hw = &aud_mst_a_lrclk.hw },
- { .hw = &aud_mst_b_lrclk.hw },
- { .hw = &aud_mst_c_lrclk.hw },
- { .hw = &aud_mst_d_lrclk.hw },
- { .hw = &aud_mst_e_lrclk.hw },
- { .hw = &aud_mst_f_lrclk.hw },
+static struct clk_regmap sm1_sysclk_a_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = AUDIO_CLK81_CTRL,
+ .shift = 0,
+ .width = 8,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "aud_sysclk_a_div",
+ .ops = &clk_regmap_divider_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &sm1_clk81_en.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
};
-static AUD_TDM_PAD_CTRL(lrclk_pad_0, AUDIO_MST_PAD_CTRL1, 16,
- lrclk_pad_ctrl_parent_data);
-static AUD_TDM_PAD_CTRL(lrclk_pad_1, AUDIO_MST_PAD_CTRL1, 20,
- lrclk_pad_ctrl_parent_data);
-static AUD_TDM_PAD_CTRL(lrclk_pad_2, AUDIO_MST_PAD_CTRL1, 24,
- lrclk_pad_ctrl_parent_data);
+static struct clk_regmap sm1_sysclk_a_en = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = AUDIO_CLK81_CTRL,
+ .bit_idx = 8,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "aud_sysclk_a_en",
+ .ops = &clk_regmap_gate_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &sm1_sysclk_a_div.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
-static const struct clk_parent_data sclk_pad_ctrl_parent_data[] = {
- { .hw = &aud_mst_a_sclk.hw },
- { .hw = &aud_mst_b_sclk.hw },
- { .hw = &aud_mst_c_sclk.hw },
- { .hw = &aud_mst_d_sclk.hw },
- { .hw = &aud_mst_e_sclk.hw },
- { .hw = &aud_mst_f_sclk.hw },
+static struct clk_regmap sm1_sysclk_b_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = AUDIO_CLK81_CTRL,
+ .shift = 16,
+ .width = 8,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "aud_sysclk_b_div",
+ .ops = &clk_regmap_divider_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &sm1_clk81_en.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
};
-static AUD_TDM_PAD_CTRL(sclk_pad_0, AUDIO_MST_PAD_CTRL1, 0,
- sclk_pad_ctrl_parent_data);
-static AUD_TDM_PAD_CTRL(sclk_pad_1, AUDIO_MST_PAD_CTRL1, 4,
- sclk_pad_ctrl_parent_data);
-static AUD_TDM_PAD_CTRL(sclk_pad_2, AUDIO_MST_PAD_CTRL1, 8,
- sclk_pad_ctrl_parent_data);
+static struct clk_regmap sm1_sysclk_b_en = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = AUDIO_CLK81_CTRL,
+ .bit_idx = 24,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "aud_sysclk_b_en",
+ .ops = &clk_regmap_gate_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &sm1_sysclk_b_div.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static const struct clk_hw *sm1_aud_top_parents[] = {
+ &sm1_sysclk_a_en.hw,
+ &sm1_sysclk_b_en.hw,
+};
+
+static struct clk_regmap sm1_aud_top = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = AUDIO_CLK81_CTRL,
+ .mask = 0x1,
+ .shift = 31,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "aud_top",
+ .ops = &clk_regmap_mux_ops,
+ .parent_hws = sm1_aud_top_parents,
+ .num_parents = ARRAY_SIZE(sm1_aud_top_parents),
+ .flags = CLK_SET_RATE_NO_REPARENT,
+ },
+};
+
+static struct clk_regmap resample_b =
+ AUD_PCLK_GATE(resample_b, AUDIO_CLK_GATE_EN, 26);
+static struct clk_regmap tovad =
+ AUD_PCLK_GATE(tovad, AUDIO_CLK_GATE_EN, 27);
+static struct clk_regmap locker =
+ AUD_PCLK_GATE(locker, AUDIO_CLK_GATE_EN, 28);
+static struct clk_regmap spdifin_lb =
+ AUD_PCLK_GATE(spdifin_lb, AUDIO_CLK_GATE_EN, 29);
+static struct clk_regmap frddr_d =
+ AUD_PCLK_GATE(frddr_d, AUDIO_CLK_GATE_EN1, 0);
+static struct clk_regmap toddr_d =
+ AUD_PCLK_GATE(toddr_d, AUDIO_CLK_GATE_EN1, 1);
+static struct clk_regmap loopback_b =
+ AUD_PCLK_GATE(loopback_b, AUDIO_CLK_GATE_EN1, 2);
+
+static struct clk_regmap sm1_mst_a_mclk_sel =
+ AUD_MST_MCLK_MUX(mst_a_mclk, AUDIO_SM1_MCLK_A_CTRL);
+static struct clk_regmap sm1_mst_b_mclk_sel =
+ AUD_MST_MCLK_MUX(mst_b_mclk, AUDIO_SM1_MCLK_B_CTRL);
+static struct clk_regmap sm1_mst_c_mclk_sel =
+ AUD_MST_MCLK_MUX(mst_c_mclk, AUDIO_SM1_MCLK_C_CTRL);
+static struct clk_regmap sm1_mst_d_mclk_sel =
+ AUD_MST_MCLK_MUX(mst_d_mclk, AUDIO_SM1_MCLK_D_CTRL);
+static struct clk_regmap sm1_mst_e_mclk_sel =
+ AUD_MST_MCLK_MUX(mst_e_mclk, AUDIO_SM1_MCLK_E_CTRL);
+static struct clk_regmap sm1_mst_f_mclk_sel =
+ AUD_MST_MCLK_MUX(mst_f_mclk, AUDIO_SM1_MCLK_F_CTRL);
+
+static struct clk_regmap sm1_mst_a_mclk_div =
+ AUD_MST_MCLK_DIV(mst_a_mclk, AUDIO_SM1_MCLK_A_CTRL);
+static struct clk_regmap sm1_mst_b_mclk_div =
+ AUD_MST_MCLK_DIV(mst_b_mclk, AUDIO_SM1_MCLK_B_CTRL);
+static struct clk_regmap sm1_mst_c_mclk_div =
+ AUD_MST_MCLK_DIV(mst_c_mclk, AUDIO_SM1_MCLK_C_CTRL);
+static struct clk_regmap sm1_mst_d_mclk_div =
+ AUD_MST_MCLK_DIV(mst_d_mclk, AUDIO_SM1_MCLK_D_CTRL);
+static struct clk_regmap sm1_mst_e_mclk_div =
+ AUD_MST_MCLK_DIV(mst_e_mclk, AUDIO_SM1_MCLK_E_CTRL);
+static struct clk_regmap sm1_mst_f_mclk_div =
+ AUD_MST_MCLK_DIV(mst_f_mclk, AUDIO_SM1_MCLK_F_CTRL);
+
+static struct clk_regmap sm1_mst_a_mclk =
+ AUD_MST_MCLK_GATE(mst_a_mclk, AUDIO_SM1_MCLK_A_CTRL);
+static struct clk_regmap sm1_mst_b_mclk =
+ AUD_MST_MCLK_GATE(mst_b_mclk, AUDIO_SM1_MCLK_B_CTRL);
+static struct clk_regmap sm1_mst_c_mclk =
+ AUD_MST_MCLK_GATE(mst_c_mclk, AUDIO_SM1_MCLK_C_CTRL);
+static struct clk_regmap sm1_mst_d_mclk =
+ AUD_MST_MCLK_GATE(mst_d_mclk, AUDIO_SM1_MCLK_D_CTRL);
+static struct clk_regmap sm1_mst_e_mclk =
+ AUD_MST_MCLK_GATE(mst_e_mclk, AUDIO_SM1_MCLK_E_CTRL);
+static struct clk_regmap sm1_mst_f_mclk =
+ AUD_MST_MCLK_GATE(mst_f_mclk, AUDIO_SM1_MCLK_F_CTRL);
+
+static struct clk_regmap sm1_tdm_mclk_pad_0 = AUD_TDM_PAD_CTRL(
+ tdm_mclk_pad_0, AUDIO_SM1_MST_PAD_CTRL0, 0, mclk_pad_ctrl_parent_data);
+static struct clk_regmap sm1_tdm_mclk_pad_1 = AUD_TDM_PAD_CTRL(
+ tdm_mclk_pad_1, AUDIO_SM1_MST_PAD_CTRL0, 4, mclk_pad_ctrl_parent_data);
+static struct clk_regmap sm1_tdm_lrclk_pad_0 = AUD_TDM_PAD_CTRL(
+ tdm_lrclk_pad_0, AUDIO_SM1_MST_PAD_CTRL1, 16, lrclk_pad_ctrl_parent_data);
+static struct clk_regmap sm1_tdm_lrclk_pad_1 = AUD_TDM_PAD_CTRL(
+ tdm_lrclk_pad_1, AUDIO_SM1_MST_PAD_CTRL1, 20, lrclk_pad_ctrl_parent_data);
+static struct clk_regmap sm1_tdm_lrclk_pad_2 = AUD_TDM_PAD_CTRL(
+ tdm_lrclk_pad_2, AUDIO_SM1_MST_PAD_CTRL1, 24, lrclk_pad_ctrl_parent_data);
+static struct clk_regmap sm1_tdm_sclk_pad_0 = AUD_TDM_PAD_CTRL(
+ tdm_sclk_pad_0, AUDIO_SM1_MST_PAD_CTRL1, 0, sclk_pad_ctrl_parent_data);
+static struct clk_regmap sm1_tdm_sclk_pad_1 = AUD_TDM_PAD_CTRL(
+ tdm_sclk_pad_1, AUDIO_SM1_MST_PAD_CTRL1, 4, sclk_pad_ctrl_parent_data);
+static struct clk_regmap sm1_tdm_sclk_pad_2 = AUD_TDM_PAD_CTRL(
+ tdm_sclk_pad_2, AUDIO_SM1_MST_PAD_CTRL1, 8, sclk_pad_ctrl_parent_data);
/*
* Array of all clocks provided by this provider
@@ -477,127 +778,128 @@ static AUD_TDM_PAD_CTRL(sclk_pad_2, AUDIO_MST_PAD_CTRL1, 8,
*/
static struct clk_hw_onecell_data axg_audio_hw_onecell_data = {
.hws = {
- [AUD_CLKID_DDR_ARB] = &aud_ddr_arb.hw,
- [AUD_CLKID_PDM] = &aud_pdm.hw,
- [AUD_CLKID_TDMIN_A] = &aud_tdmin_a.hw,
- [AUD_CLKID_TDMIN_B] = &aud_tdmin_b.hw,
- [AUD_CLKID_TDMIN_C] = &aud_tdmin_c.hw,
- [AUD_CLKID_TDMIN_LB] = &aud_tdmin_lb.hw,
- [AUD_CLKID_TDMOUT_A] = &aud_tdmout_a.hw,
- [AUD_CLKID_TDMOUT_B] = &aud_tdmout_b.hw,
- [AUD_CLKID_TDMOUT_C] = &aud_tdmout_c.hw,
- [AUD_CLKID_FRDDR_A] = &aud_frddr_a.hw,
- [AUD_CLKID_FRDDR_B] = &aud_frddr_b.hw,
- [AUD_CLKID_FRDDR_C] = &aud_frddr_c.hw,
- [AUD_CLKID_TODDR_A] = &aud_toddr_a.hw,
- [AUD_CLKID_TODDR_B] = &aud_toddr_b.hw,
- [AUD_CLKID_TODDR_C] = &aud_toddr_c.hw,
- [AUD_CLKID_LOOPBACK] = &aud_loopback.hw,
- [AUD_CLKID_SPDIFIN] = &aud_spdifin.hw,
- [AUD_CLKID_SPDIFOUT] = &aud_spdifout.hw,
- [AUD_CLKID_RESAMPLE] = &aud_resample.hw,
- [AUD_CLKID_POWER_DETECT] = &aud_power_detect.hw,
- [AUD_CLKID_MST_A_MCLK_SEL] = &aud_mst_a_mclk_sel.hw,
- [AUD_CLKID_MST_B_MCLK_SEL] = &aud_mst_b_mclk_sel.hw,
- [AUD_CLKID_MST_C_MCLK_SEL] = &aud_mst_c_mclk_sel.hw,
- [AUD_CLKID_MST_D_MCLK_SEL] = &aud_mst_d_mclk_sel.hw,
- [AUD_CLKID_MST_E_MCLK_SEL] = &aud_mst_e_mclk_sel.hw,
- [AUD_CLKID_MST_F_MCLK_SEL] = &aud_mst_f_mclk_sel.hw,
- [AUD_CLKID_MST_A_MCLK_DIV] = &aud_mst_a_mclk_div.hw,
- [AUD_CLKID_MST_B_MCLK_DIV] = &aud_mst_b_mclk_div.hw,
- [AUD_CLKID_MST_C_MCLK_DIV] = &aud_mst_c_mclk_div.hw,
- [AUD_CLKID_MST_D_MCLK_DIV] = &aud_mst_d_mclk_div.hw,
- [AUD_CLKID_MST_E_MCLK_DIV] = &aud_mst_e_mclk_div.hw,
- [AUD_CLKID_MST_F_MCLK_DIV] = &aud_mst_f_mclk_div.hw,
- [AUD_CLKID_MST_A_MCLK] = &aud_mst_a_mclk.hw,
- [AUD_CLKID_MST_B_MCLK] = &aud_mst_b_mclk.hw,
- [AUD_CLKID_MST_C_MCLK] = &aud_mst_c_mclk.hw,
- [AUD_CLKID_MST_D_MCLK] = &aud_mst_d_mclk.hw,
- [AUD_CLKID_MST_E_MCLK] = &aud_mst_e_mclk.hw,
- [AUD_CLKID_MST_F_MCLK] = &aud_mst_f_mclk.hw,
- [AUD_CLKID_SPDIFOUT_CLK_SEL] = &aud_spdifout_clk_sel.hw,
- [AUD_CLKID_SPDIFOUT_CLK_DIV] = &aud_spdifout_clk_div.hw,
- [AUD_CLKID_SPDIFOUT_CLK] = &aud_spdifout_clk.hw,
- [AUD_CLKID_SPDIFIN_CLK_SEL] = &aud_spdifin_clk_sel.hw,
- [AUD_CLKID_SPDIFIN_CLK_DIV] = &aud_spdifin_clk_div.hw,
- [AUD_CLKID_SPDIFIN_CLK] = &aud_spdifin_clk.hw,
- [AUD_CLKID_PDM_DCLK_SEL] = &aud_pdm_dclk_sel.hw,
- [AUD_CLKID_PDM_DCLK_DIV] = &aud_pdm_dclk_div.hw,
- [AUD_CLKID_PDM_DCLK] = &aud_pdm_dclk.hw,
- [AUD_CLKID_PDM_SYSCLK_SEL] = &aud_pdm_sysclk_sel.hw,
- [AUD_CLKID_PDM_SYSCLK_DIV] = &aud_pdm_sysclk_div.hw,
- [AUD_CLKID_PDM_SYSCLK] = &aud_pdm_sysclk.hw,
- [AUD_CLKID_MST_A_SCLK_PRE_EN] = &aud_mst_a_sclk_pre_en.hw,
- [AUD_CLKID_MST_B_SCLK_PRE_EN] = &aud_mst_b_sclk_pre_en.hw,
- [AUD_CLKID_MST_C_SCLK_PRE_EN] = &aud_mst_c_sclk_pre_en.hw,
- [AUD_CLKID_MST_D_SCLK_PRE_EN] = &aud_mst_d_sclk_pre_en.hw,
- [AUD_CLKID_MST_E_SCLK_PRE_EN] = &aud_mst_e_sclk_pre_en.hw,
- [AUD_CLKID_MST_F_SCLK_PRE_EN] = &aud_mst_f_sclk_pre_en.hw,
- [AUD_CLKID_MST_A_SCLK_DIV] = &aud_mst_a_sclk_div.hw,
- [AUD_CLKID_MST_B_SCLK_DIV] = &aud_mst_b_sclk_div.hw,
- [AUD_CLKID_MST_C_SCLK_DIV] = &aud_mst_c_sclk_div.hw,
- [AUD_CLKID_MST_D_SCLK_DIV] = &aud_mst_d_sclk_div.hw,
- [AUD_CLKID_MST_E_SCLK_DIV] = &aud_mst_e_sclk_div.hw,
- [AUD_CLKID_MST_F_SCLK_DIV] = &aud_mst_f_sclk_div.hw,
- [AUD_CLKID_MST_A_SCLK_POST_EN] = &aud_mst_a_sclk_post_en.hw,
- [AUD_CLKID_MST_B_SCLK_POST_EN] = &aud_mst_b_sclk_post_en.hw,
- [AUD_CLKID_MST_C_SCLK_POST_EN] = &aud_mst_c_sclk_post_en.hw,
- [AUD_CLKID_MST_D_SCLK_POST_EN] = &aud_mst_d_sclk_post_en.hw,
- [AUD_CLKID_MST_E_SCLK_POST_EN] = &aud_mst_e_sclk_post_en.hw,
- [AUD_CLKID_MST_F_SCLK_POST_EN] = &aud_mst_f_sclk_post_en.hw,
- [AUD_CLKID_MST_A_SCLK] = &aud_mst_a_sclk.hw,
- [AUD_CLKID_MST_B_SCLK] = &aud_mst_b_sclk.hw,
- [AUD_CLKID_MST_C_SCLK] = &aud_mst_c_sclk.hw,
- [AUD_CLKID_MST_D_SCLK] = &aud_mst_d_sclk.hw,
- [AUD_CLKID_MST_E_SCLK] = &aud_mst_e_sclk.hw,
- [AUD_CLKID_MST_F_SCLK] = &aud_mst_f_sclk.hw,
- [AUD_CLKID_MST_A_LRCLK_DIV] = &aud_mst_a_lrclk_div.hw,
- [AUD_CLKID_MST_B_LRCLK_DIV] = &aud_mst_b_lrclk_div.hw,
- [AUD_CLKID_MST_C_LRCLK_DIV] = &aud_mst_c_lrclk_div.hw,
- [AUD_CLKID_MST_D_LRCLK_DIV] = &aud_mst_d_lrclk_div.hw,
- [AUD_CLKID_MST_E_LRCLK_DIV] = &aud_mst_e_lrclk_div.hw,
- [AUD_CLKID_MST_F_LRCLK_DIV] = &aud_mst_f_lrclk_div.hw,
- [AUD_CLKID_MST_A_LRCLK] = &aud_mst_a_lrclk.hw,
- [AUD_CLKID_MST_B_LRCLK] = &aud_mst_b_lrclk.hw,
- [AUD_CLKID_MST_C_LRCLK] = &aud_mst_c_lrclk.hw,
- [AUD_CLKID_MST_D_LRCLK] = &aud_mst_d_lrclk.hw,
- [AUD_CLKID_MST_E_LRCLK] = &aud_mst_e_lrclk.hw,
- [AUD_CLKID_MST_F_LRCLK] = &aud_mst_f_lrclk.hw,
- [AUD_CLKID_TDMIN_A_SCLK_SEL] = &aud_tdmin_a_sclk_sel.hw,
- [AUD_CLKID_TDMIN_B_SCLK_SEL] = &aud_tdmin_b_sclk_sel.hw,
- [AUD_CLKID_TDMIN_C_SCLK_SEL] = &aud_tdmin_c_sclk_sel.hw,
- [AUD_CLKID_TDMIN_LB_SCLK_SEL] = &aud_tdmin_lb_sclk_sel.hw,
- [AUD_CLKID_TDMOUT_A_SCLK_SEL] = &aud_tdmout_a_sclk_sel.hw,
- [AUD_CLKID_TDMOUT_B_SCLK_SEL] = &aud_tdmout_b_sclk_sel.hw,
- [AUD_CLKID_TDMOUT_C_SCLK_SEL] = &aud_tdmout_c_sclk_sel.hw,
- [AUD_CLKID_TDMIN_A_SCLK_PRE_EN] = &aud_tdmin_a_sclk_pre_en.hw,
- [AUD_CLKID_TDMIN_B_SCLK_PRE_EN] = &aud_tdmin_b_sclk_pre_en.hw,
- [AUD_CLKID_TDMIN_C_SCLK_PRE_EN] = &aud_tdmin_c_sclk_pre_en.hw,
- [AUD_CLKID_TDMIN_LB_SCLK_PRE_EN] = &aud_tdmin_lb_sclk_pre_en.hw,
- [AUD_CLKID_TDMOUT_A_SCLK_PRE_EN] = &aud_tdmout_a_sclk_pre_en.hw,
- [AUD_CLKID_TDMOUT_B_SCLK_PRE_EN] = &aud_tdmout_b_sclk_pre_en.hw,
- [AUD_CLKID_TDMOUT_C_SCLK_PRE_EN] = &aud_tdmout_c_sclk_pre_en.hw,
- [AUD_CLKID_TDMIN_A_SCLK_POST_EN] = &aud_tdmin_a_sclk_post_en.hw,
- [AUD_CLKID_TDMIN_B_SCLK_POST_EN] = &aud_tdmin_b_sclk_post_en.hw,
- [AUD_CLKID_TDMIN_C_SCLK_POST_EN] = &aud_tdmin_c_sclk_post_en.hw,
- [AUD_CLKID_TDMIN_LB_SCLK_POST_EN] = &aud_tdmin_lb_sclk_post_en.hw,
- [AUD_CLKID_TDMOUT_A_SCLK_POST_EN] = &aud_tdmout_a_sclk_post_en.hw,
- [AUD_CLKID_TDMOUT_B_SCLK_POST_EN] = &aud_tdmout_b_sclk_post_en.hw,
- [AUD_CLKID_TDMOUT_C_SCLK_POST_EN] = &aud_tdmout_c_sclk_post_en.hw,
- [AUD_CLKID_TDMIN_A_SCLK] = &aud_tdmin_a_sclk.hw,
- [AUD_CLKID_TDMIN_B_SCLK] = &aud_tdmin_b_sclk.hw,
- [AUD_CLKID_TDMIN_C_SCLK] = &aud_tdmin_c_sclk.hw,
- [AUD_CLKID_TDMIN_LB_SCLK] = &aud_tdmin_lb_sclk.hw,
- [AUD_CLKID_TDMOUT_A_SCLK] = &aud_tdmout_a_sclk.hw,
- [AUD_CLKID_TDMOUT_B_SCLK] = &aud_tdmout_b_sclk.hw,
- [AUD_CLKID_TDMOUT_C_SCLK] = &aud_tdmout_c_sclk.hw,
- [AUD_CLKID_TDMIN_A_LRCLK] = &aud_tdmin_a_lrclk.hw,
- [AUD_CLKID_TDMIN_B_LRCLK] = &aud_tdmin_b_lrclk.hw,
- [AUD_CLKID_TDMIN_C_LRCLK] = &aud_tdmin_c_lrclk.hw,
- [AUD_CLKID_TDMIN_LB_LRCLK] = &aud_tdmin_lb_lrclk.hw,
- [AUD_CLKID_TDMOUT_A_LRCLK] = &aud_tdmout_a_lrclk.hw,
- [AUD_CLKID_TDMOUT_B_LRCLK] = &aud_tdmout_b_lrclk.hw,
- [AUD_CLKID_TDMOUT_C_LRCLK] = &aud_tdmout_c_lrclk.hw,
+ [AUD_CLKID_DDR_ARB] = &ddr_arb.hw,
+ [AUD_CLKID_PDM] = &pdm.hw,
+ [AUD_CLKID_TDMIN_A] = &tdmin_a.hw,
+ [AUD_CLKID_TDMIN_B] = &tdmin_b.hw,
+ [AUD_CLKID_TDMIN_C] = &tdmin_c.hw,
+ [AUD_CLKID_TDMIN_LB] = &tdmin_lb.hw,
+ [AUD_CLKID_TDMOUT_A] = &tdmout_a.hw,
+ [AUD_CLKID_TDMOUT_B] = &tdmout_b.hw,
+ [AUD_CLKID_TDMOUT_C] = &tdmout_c.hw,
+ [AUD_CLKID_FRDDR_A] = &frddr_a.hw,
+ [AUD_CLKID_FRDDR_B] = &frddr_b.hw,
+ [AUD_CLKID_FRDDR_C] = &frddr_c.hw,
+ [AUD_CLKID_TODDR_A] = &toddr_a.hw,
+ [AUD_CLKID_TODDR_B] = &toddr_b.hw,
+ [AUD_CLKID_TODDR_C] = &toddr_c.hw,
+ [AUD_CLKID_LOOPBACK] = &loopback.hw,
+ [AUD_CLKID_SPDIFIN] = &spdifin.hw,
+ [AUD_CLKID_SPDIFOUT] = &spdifout.hw,
+ [AUD_CLKID_RESAMPLE] = &resample.hw,
+ [AUD_CLKID_POWER_DETECT] = &power_detect.hw,
+ [AUD_CLKID_MST_A_MCLK_SEL] = &mst_a_mclk_sel.hw,
+ [AUD_CLKID_MST_B_MCLK_SEL] = &mst_b_mclk_sel.hw,
+ [AUD_CLKID_MST_C_MCLK_SEL] = &mst_c_mclk_sel.hw,
+ [AUD_CLKID_MST_D_MCLK_SEL] = &mst_d_mclk_sel.hw,
+ [AUD_CLKID_MST_E_MCLK_SEL] = &mst_e_mclk_sel.hw,
+ [AUD_CLKID_MST_F_MCLK_SEL] = &mst_f_mclk_sel.hw,
+ [AUD_CLKID_MST_A_MCLK_DIV] = &mst_a_mclk_div.hw,
+ [AUD_CLKID_MST_B_MCLK_DIV] = &mst_b_mclk_div.hw,
+ [AUD_CLKID_MST_C_MCLK_DIV] = &mst_c_mclk_div.hw,
+ [AUD_CLKID_MST_D_MCLK_DIV] = &mst_d_mclk_div.hw,
+ [AUD_CLKID_MST_E_MCLK_DIV] = &mst_e_mclk_div.hw,
+ [AUD_CLKID_MST_F_MCLK_DIV] = &mst_f_mclk_div.hw,
+ [AUD_CLKID_MST_A_MCLK] = &mst_a_mclk.hw,
+ [AUD_CLKID_MST_B_MCLK] = &mst_b_mclk.hw,
+ [AUD_CLKID_MST_C_MCLK] = &mst_c_mclk.hw,
+ [AUD_CLKID_MST_D_MCLK] = &mst_d_mclk.hw,
+ [AUD_CLKID_MST_E_MCLK] = &mst_e_mclk.hw,
+ [AUD_CLKID_MST_F_MCLK] = &mst_f_mclk.hw,
+ [AUD_CLKID_SPDIFOUT_CLK_SEL] = &spdifout_clk_sel.hw,
+ [AUD_CLKID_SPDIFOUT_CLK_DIV] = &spdifout_clk_div.hw,
+ [AUD_CLKID_SPDIFOUT_CLK] = &spdifout_clk.hw,
+ [AUD_CLKID_SPDIFIN_CLK_SEL] = &spdifin_clk_sel.hw,
+ [AUD_CLKID_SPDIFIN_CLK_DIV] = &spdifin_clk_div.hw,
+ [AUD_CLKID_SPDIFIN_CLK] = &spdifin_clk.hw,
+ [AUD_CLKID_PDM_DCLK_SEL] = &pdm_dclk_sel.hw,
+ [AUD_CLKID_PDM_DCLK_DIV] = &pdm_dclk_div.hw,
+ [AUD_CLKID_PDM_DCLK] = &pdm_dclk.hw,
+ [AUD_CLKID_PDM_SYSCLK_SEL] = &pdm_sysclk_sel.hw,
+ [AUD_CLKID_PDM_SYSCLK_DIV] = &pdm_sysclk_div.hw,
+ [AUD_CLKID_PDM_SYSCLK] = &pdm_sysclk.hw,
+ [AUD_CLKID_MST_A_SCLK_PRE_EN] = &mst_a_sclk_pre_en.hw,
+ [AUD_CLKID_MST_B_SCLK_PRE_EN] = &mst_b_sclk_pre_en.hw,
+ [AUD_CLKID_MST_C_SCLK_PRE_EN] = &mst_c_sclk_pre_en.hw,
+ [AUD_CLKID_MST_D_SCLK_PRE_EN] = &mst_d_sclk_pre_en.hw,
+ [AUD_CLKID_MST_E_SCLK_PRE_EN] = &mst_e_sclk_pre_en.hw,
+ [AUD_CLKID_MST_F_SCLK_PRE_EN] = &mst_f_sclk_pre_en.hw,
+ [AUD_CLKID_MST_A_SCLK_DIV] = &mst_a_sclk_div.hw,
+ [AUD_CLKID_MST_B_SCLK_DIV] = &mst_b_sclk_div.hw,
+ [AUD_CLKID_MST_C_SCLK_DIV] = &mst_c_sclk_div.hw,
+ [AUD_CLKID_MST_D_SCLK_DIV] = &mst_d_sclk_div.hw,
+ [AUD_CLKID_MST_E_SCLK_DIV] = &mst_e_sclk_div.hw,
+ [AUD_CLKID_MST_F_SCLK_DIV] = &mst_f_sclk_div.hw,
+ [AUD_CLKID_MST_A_SCLK_POST_EN] = &mst_a_sclk_post_en.hw,
+ [AUD_CLKID_MST_B_SCLK_POST_EN] = &mst_b_sclk_post_en.hw,
+ [AUD_CLKID_MST_C_SCLK_POST_EN] = &mst_c_sclk_post_en.hw,
+ [AUD_CLKID_MST_D_SCLK_POST_EN] = &mst_d_sclk_post_en.hw,
+ [AUD_CLKID_MST_E_SCLK_POST_EN] = &mst_e_sclk_post_en.hw,
+ [AUD_CLKID_MST_F_SCLK_POST_EN] = &mst_f_sclk_post_en.hw,
+ [AUD_CLKID_MST_A_SCLK] = &mst_a_sclk.hw,
+ [AUD_CLKID_MST_B_SCLK] = &mst_b_sclk.hw,
+ [AUD_CLKID_MST_C_SCLK] = &mst_c_sclk.hw,
+ [AUD_CLKID_MST_D_SCLK] = &mst_d_sclk.hw,
+ [AUD_CLKID_MST_E_SCLK] = &mst_e_sclk.hw,
+ [AUD_CLKID_MST_F_SCLK] = &mst_f_sclk.hw,
+ [AUD_CLKID_MST_A_LRCLK_DIV] = &mst_a_lrclk_div.hw,
+ [AUD_CLKID_MST_B_LRCLK_DIV] = &mst_b_lrclk_div.hw,
+ [AUD_CLKID_MST_C_LRCLK_DIV] = &mst_c_lrclk_div.hw,
+ [AUD_CLKID_MST_D_LRCLK_DIV] = &mst_d_lrclk_div.hw,
+ [AUD_CLKID_MST_E_LRCLK_DIV] = &mst_e_lrclk_div.hw,
+ [AUD_CLKID_MST_F_LRCLK_DIV] = &mst_f_lrclk_div.hw,
+ [AUD_CLKID_MST_A_LRCLK] = &mst_a_lrclk.hw,
+ [AUD_CLKID_MST_B_LRCLK] = &mst_b_lrclk.hw,
+ [AUD_CLKID_MST_C_LRCLK] = &mst_c_lrclk.hw,
+ [AUD_CLKID_MST_D_LRCLK] = &mst_d_lrclk.hw,
+ [AUD_CLKID_MST_E_LRCLK] = &mst_e_lrclk.hw,
+ [AUD_CLKID_MST_F_LRCLK] = &mst_f_lrclk.hw,
+ [AUD_CLKID_TDMIN_A_SCLK_SEL] = &tdmin_a_sclk_sel.hw,
+ [AUD_CLKID_TDMIN_B_SCLK_SEL] = &tdmin_b_sclk_sel.hw,
+ [AUD_CLKID_TDMIN_C_SCLK_SEL] = &tdmin_c_sclk_sel.hw,
+ [AUD_CLKID_TDMIN_LB_SCLK_SEL] = &tdmin_lb_sclk_sel.hw,
+ [AUD_CLKID_TDMOUT_A_SCLK_SEL] = &tdmout_a_sclk_sel.hw,
+ [AUD_CLKID_TDMOUT_B_SCLK_SEL] = &tdmout_b_sclk_sel.hw,
+ [AUD_CLKID_TDMOUT_C_SCLK_SEL] = &tdmout_c_sclk_sel.hw,
+ [AUD_CLKID_TDMIN_A_SCLK_PRE_EN] = &tdmin_a_sclk_pre_en.hw,
+ [AUD_CLKID_TDMIN_B_SCLK_PRE_EN] = &tdmin_b_sclk_pre_en.hw,
+ [AUD_CLKID_TDMIN_C_SCLK_PRE_EN] = &tdmin_c_sclk_pre_en.hw,
+ [AUD_CLKID_TDMIN_LB_SCLK_PRE_EN] = &tdmin_lb_sclk_pre_en.hw,
+ [AUD_CLKID_TDMOUT_A_SCLK_PRE_EN] = &tdmout_a_sclk_pre_en.hw,
+ [AUD_CLKID_TDMOUT_B_SCLK_PRE_EN] = &tdmout_b_sclk_pre_en.hw,
+ [AUD_CLKID_TDMOUT_C_SCLK_PRE_EN] = &tdmout_c_sclk_pre_en.hw,
+ [AUD_CLKID_TDMIN_A_SCLK_POST_EN] = &tdmin_a_sclk_post_en.hw,
+ [AUD_CLKID_TDMIN_B_SCLK_POST_EN] = &tdmin_b_sclk_post_en.hw,
+ [AUD_CLKID_TDMIN_C_SCLK_POST_EN] = &tdmin_c_sclk_post_en.hw,
+ [AUD_CLKID_TDMIN_LB_SCLK_POST_EN] = &tdmin_lb_sclk_post_en.hw,
+ [AUD_CLKID_TDMOUT_A_SCLK_POST_EN] = &tdmout_a_sclk_post_en.hw,
+ [AUD_CLKID_TDMOUT_B_SCLK_POST_EN] = &tdmout_b_sclk_post_en.hw,
+ [AUD_CLKID_TDMOUT_C_SCLK_POST_EN] = &tdmout_c_sclk_post_en.hw,
+ [AUD_CLKID_TDMIN_A_SCLK] = &tdmin_a_sclk.hw,
+ [AUD_CLKID_TDMIN_B_SCLK] = &tdmin_b_sclk.hw,
+ [AUD_CLKID_TDMIN_C_SCLK] = &tdmin_c_sclk.hw,
+ [AUD_CLKID_TDMIN_LB_SCLK] = &tdmin_lb_sclk.hw,
+ [AUD_CLKID_TDMOUT_A_SCLK] = &tdmout_a_sclk.hw,
+ [AUD_CLKID_TDMOUT_B_SCLK] = &tdmout_b_sclk.hw,
+ [AUD_CLKID_TDMOUT_C_SCLK] = &tdmout_c_sclk.hw,
+ [AUD_CLKID_TDMIN_A_LRCLK] = &tdmin_a_lrclk.hw,
+ [AUD_CLKID_TDMIN_B_LRCLK] = &tdmin_b_lrclk.hw,
+ [AUD_CLKID_TDMIN_C_LRCLK] = &tdmin_c_lrclk.hw,
+ [AUD_CLKID_TDMIN_LB_LRCLK] = &tdmin_lb_lrclk.hw,
+ [AUD_CLKID_TDMOUT_A_LRCLK] = &tdmout_a_lrclk.hw,
+ [AUD_CLKID_TDMOUT_B_LRCLK] = &tdmout_b_lrclk.hw,
+ [AUD_CLKID_TDMOUT_C_LRCLK] = &tdmout_c_lrclk.hw,
+ [AUD_CLKID_TOP] = &axg_aud_top,
[NR_CLKS] = NULL,
},
.num = NR_CLKS,
@@ -609,284 +911,596 @@ static struct clk_hw_onecell_data axg_audio_hw_onecell_data = {
*/
static struct clk_hw_onecell_data g12a_audio_hw_onecell_data = {
.hws = {
- [AUD_CLKID_DDR_ARB] = &aud_ddr_arb.hw,
- [AUD_CLKID_PDM] = &aud_pdm.hw,
- [AUD_CLKID_TDMIN_A] = &aud_tdmin_a.hw,
- [AUD_CLKID_TDMIN_B] = &aud_tdmin_b.hw,
- [AUD_CLKID_TDMIN_C] = &aud_tdmin_c.hw,
- [AUD_CLKID_TDMIN_LB] = &aud_tdmin_lb.hw,
- [AUD_CLKID_TDMOUT_A] = &aud_tdmout_a.hw,
- [AUD_CLKID_TDMOUT_B] = &aud_tdmout_b.hw,
- [AUD_CLKID_TDMOUT_C] = &aud_tdmout_c.hw,
- [AUD_CLKID_FRDDR_A] = &aud_frddr_a.hw,
- [AUD_CLKID_FRDDR_B] = &aud_frddr_b.hw,
- [AUD_CLKID_FRDDR_C] = &aud_frddr_c.hw,
- [AUD_CLKID_TODDR_A] = &aud_toddr_a.hw,
- [AUD_CLKID_TODDR_B] = &aud_toddr_b.hw,
- [AUD_CLKID_TODDR_C] = &aud_toddr_c.hw,
- [AUD_CLKID_LOOPBACK] = &aud_loopback.hw,
- [AUD_CLKID_SPDIFIN] = &aud_spdifin.hw,
- [AUD_CLKID_SPDIFOUT] = &aud_spdifout.hw,
- [AUD_CLKID_RESAMPLE] = &aud_resample.hw,
- [AUD_CLKID_POWER_DETECT] = &aud_power_detect.hw,
- [AUD_CLKID_SPDIFOUT_B] = &aud_spdifout_b.hw,
- [AUD_CLKID_MST_A_MCLK_SEL] = &aud_mst_a_mclk_sel.hw,
- [AUD_CLKID_MST_B_MCLK_SEL] = &aud_mst_b_mclk_sel.hw,
- [AUD_CLKID_MST_C_MCLK_SEL] = &aud_mst_c_mclk_sel.hw,
- [AUD_CLKID_MST_D_MCLK_SEL] = &aud_mst_d_mclk_sel.hw,
- [AUD_CLKID_MST_E_MCLK_SEL] = &aud_mst_e_mclk_sel.hw,
- [AUD_CLKID_MST_F_MCLK_SEL] = &aud_mst_f_mclk_sel.hw,
- [AUD_CLKID_MST_A_MCLK_DIV] = &aud_mst_a_mclk_div.hw,
- [AUD_CLKID_MST_B_MCLK_DIV] = &aud_mst_b_mclk_div.hw,
- [AUD_CLKID_MST_C_MCLK_DIV] = &aud_mst_c_mclk_div.hw,
- [AUD_CLKID_MST_D_MCLK_DIV] = &aud_mst_d_mclk_div.hw,
- [AUD_CLKID_MST_E_MCLK_DIV] = &aud_mst_e_mclk_div.hw,
- [AUD_CLKID_MST_F_MCLK_DIV] = &aud_mst_f_mclk_div.hw,
- [AUD_CLKID_MST_A_MCLK] = &aud_mst_a_mclk.hw,
- [AUD_CLKID_MST_B_MCLK] = &aud_mst_b_mclk.hw,
- [AUD_CLKID_MST_C_MCLK] = &aud_mst_c_mclk.hw,
- [AUD_CLKID_MST_D_MCLK] = &aud_mst_d_mclk.hw,
- [AUD_CLKID_MST_E_MCLK] = &aud_mst_e_mclk.hw,
- [AUD_CLKID_MST_F_MCLK] = &aud_mst_f_mclk.hw,
- [AUD_CLKID_SPDIFOUT_CLK_SEL] = &aud_spdifout_clk_sel.hw,
- [AUD_CLKID_SPDIFOUT_CLK_DIV] = &aud_spdifout_clk_div.hw,
- [AUD_CLKID_SPDIFOUT_CLK] = &aud_spdifout_clk.hw,
- [AUD_CLKID_SPDIFOUT_B_CLK_SEL] = &aud_spdifout_b_clk_sel.hw,
- [AUD_CLKID_SPDIFOUT_B_CLK_DIV] = &aud_spdifout_b_clk_div.hw,
- [AUD_CLKID_SPDIFOUT_B_CLK] = &aud_spdifout_b_clk.hw,
- [AUD_CLKID_SPDIFIN_CLK_SEL] = &aud_spdifin_clk_sel.hw,
- [AUD_CLKID_SPDIFIN_CLK_DIV] = &aud_spdifin_clk_div.hw,
- [AUD_CLKID_SPDIFIN_CLK] = &aud_spdifin_clk.hw,
- [AUD_CLKID_PDM_DCLK_SEL] = &aud_pdm_dclk_sel.hw,
- [AUD_CLKID_PDM_DCLK_DIV] = &aud_pdm_dclk_div.hw,
- [AUD_CLKID_PDM_DCLK] = &aud_pdm_dclk.hw,
- [AUD_CLKID_PDM_SYSCLK_SEL] = &aud_pdm_sysclk_sel.hw,
- [AUD_CLKID_PDM_SYSCLK_DIV] = &aud_pdm_sysclk_div.hw,
- [AUD_CLKID_PDM_SYSCLK] = &aud_pdm_sysclk.hw,
- [AUD_CLKID_MST_A_SCLK_PRE_EN] = &aud_mst_a_sclk_pre_en.hw,
- [AUD_CLKID_MST_B_SCLK_PRE_EN] = &aud_mst_b_sclk_pre_en.hw,
- [AUD_CLKID_MST_C_SCLK_PRE_EN] = &aud_mst_c_sclk_pre_en.hw,
- [AUD_CLKID_MST_D_SCLK_PRE_EN] = &aud_mst_d_sclk_pre_en.hw,
- [AUD_CLKID_MST_E_SCLK_PRE_EN] = &aud_mst_e_sclk_pre_en.hw,
- [AUD_CLKID_MST_F_SCLK_PRE_EN] = &aud_mst_f_sclk_pre_en.hw,
- [AUD_CLKID_MST_A_SCLK_DIV] = &aud_mst_a_sclk_div.hw,
- [AUD_CLKID_MST_B_SCLK_DIV] = &aud_mst_b_sclk_div.hw,
- [AUD_CLKID_MST_C_SCLK_DIV] = &aud_mst_c_sclk_div.hw,
- [AUD_CLKID_MST_D_SCLK_DIV] = &aud_mst_d_sclk_div.hw,
- [AUD_CLKID_MST_E_SCLK_DIV] = &aud_mst_e_sclk_div.hw,
- [AUD_CLKID_MST_F_SCLK_DIV] = &aud_mst_f_sclk_div.hw,
- [AUD_CLKID_MST_A_SCLK_POST_EN] = &aud_mst_a_sclk_post_en.hw,
- [AUD_CLKID_MST_B_SCLK_POST_EN] = &aud_mst_b_sclk_post_en.hw,
- [AUD_CLKID_MST_C_SCLK_POST_EN] = &aud_mst_c_sclk_post_en.hw,
- [AUD_CLKID_MST_D_SCLK_POST_EN] = &aud_mst_d_sclk_post_en.hw,
- [AUD_CLKID_MST_E_SCLK_POST_EN] = &aud_mst_e_sclk_post_en.hw,
- [AUD_CLKID_MST_F_SCLK_POST_EN] = &aud_mst_f_sclk_post_en.hw,
- [AUD_CLKID_MST_A_SCLK] = &aud_mst_a_sclk.hw,
- [AUD_CLKID_MST_B_SCLK] = &aud_mst_b_sclk.hw,
- [AUD_CLKID_MST_C_SCLK] = &aud_mst_c_sclk.hw,
- [AUD_CLKID_MST_D_SCLK] = &aud_mst_d_sclk.hw,
- [AUD_CLKID_MST_E_SCLK] = &aud_mst_e_sclk.hw,
- [AUD_CLKID_MST_F_SCLK] = &aud_mst_f_sclk.hw,
- [AUD_CLKID_MST_A_LRCLK_DIV] = &aud_mst_a_lrclk_div.hw,
- [AUD_CLKID_MST_B_LRCLK_DIV] = &aud_mst_b_lrclk_div.hw,
- [AUD_CLKID_MST_C_LRCLK_DIV] = &aud_mst_c_lrclk_div.hw,
- [AUD_CLKID_MST_D_LRCLK_DIV] = &aud_mst_d_lrclk_div.hw,
- [AUD_CLKID_MST_E_LRCLK_DIV] = &aud_mst_e_lrclk_div.hw,
- [AUD_CLKID_MST_F_LRCLK_DIV] = &aud_mst_f_lrclk_div.hw,
- [AUD_CLKID_MST_A_LRCLK] = &aud_mst_a_lrclk.hw,
- [AUD_CLKID_MST_B_LRCLK] = &aud_mst_b_lrclk.hw,
- [AUD_CLKID_MST_C_LRCLK] = &aud_mst_c_lrclk.hw,
- [AUD_CLKID_MST_D_LRCLK] = &aud_mst_d_lrclk.hw,
- [AUD_CLKID_MST_E_LRCLK] = &aud_mst_e_lrclk.hw,
- [AUD_CLKID_MST_F_LRCLK] = &aud_mst_f_lrclk.hw,
- [AUD_CLKID_TDMIN_A_SCLK_SEL] = &aud_tdmin_a_sclk_sel.hw,
- [AUD_CLKID_TDMIN_B_SCLK_SEL] = &aud_tdmin_b_sclk_sel.hw,
- [AUD_CLKID_TDMIN_C_SCLK_SEL] = &aud_tdmin_c_sclk_sel.hw,
- [AUD_CLKID_TDMIN_LB_SCLK_SEL] = &aud_tdmin_lb_sclk_sel.hw,
- [AUD_CLKID_TDMOUT_A_SCLK_SEL] = &aud_tdmout_a_sclk_sel.hw,
- [AUD_CLKID_TDMOUT_B_SCLK_SEL] = &aud_tdmout_b_sclk_sel.hw,
- [AUD_CLKID_TDMOUT_C_SCLK_SEL] = &aud_tdmout_c_sclk_sel.hw,
- [AUD_CLKID_TDMIN_A_SCLK_PRE_EN] = &aud_tdmin_a_sclk_pre_en.hw,
- [AUD_CLKID_TDMIN_B_SCLK_PRE_EN] = &aud_tdmin_b_sclk_pre_en.hw,
- [AUD_CLKID_TDMIN_C_SCLK_PRE_EN] = &aud_tdmin_c_sclk_pre_en.hw,
- [AUD_CLKID_TDMIN_LB_SCLK_PRE_EN] = &aud_tdmin_lb_sclk_pre_en.hw,
- [AUD_CLKID_TDMOUT_A_SCLK_PRE_EN] = &aud_tdmout_a_sclk_pre_en.hw,
- [AUD_CLKID_TDMOUT_B_SCLK_PRE_EN] = &aud_tdmout_b_sclk_pre_en.hw,
- [AUD_CLKID_TDMOUT_C_SCLK_PRE_EN] = &aud_tdmout_c_sclk_pre_en.hw,
- [AUD_CLKID_TDMIN_A_SCLK_POST_EN] = &aud_tdmin_a_sclk_post_en.hw,
- [AUD_CLKID_TDMIN_B_SCLK_POST_EN] = &aud_tdmin_b_sclk_post_en.hw,
- [AUD_CLKID_TDMIN_C_SCLK_POST_EN] = &aud_tdmin_c_sclk_post_en.hw,
- [AUD_CLKID_TDMIN_LB_SCLK_POST_EN] = &aud_tdmin_lb_sclk_post_en.hw,
- [AUD_CLKID_TDMOUT_A_SCLK_POST_EN] = &aud_tdmout_a_sclk_post_en.hw,
- [AUD_CLKID_TDMOUT_B_SCLK_POST_EN] = &aud_tdmout_b_sclk_post_en.hw,
- [AUD_CLKID_TDMOUT_C_SCLK_POST_EN] = &aud_tdmout_c_sclk_post_en.hw,
- [AUD_CLKID_TDMIN_A_SCLK] = &aud_tdmin_a_sclk.hw,
- [AUD_CLKID_TDMIN_B_SCLK] = &aud_tdmin_b_sclk.hw,
- [AUD_CLKID_TDMIN_C_SCLK] = &aud_tdmin_c_sclk.hw,
- [AUD_CLKID_TDMIN_LB_SCLK] = &aud_tdmin_lb_sclk.hw,
- [AUD_CLKID_TDMOUT_A_SCLK] = &aud_tdmout_a_sclk.hw,
- [AUD_CLKID_TDMOUT_B_SCLK] = &aud_tdmout_b_sclk.hw,
- [AUD_CLKID_TDMOUT_C_SCLK] = &aud_tdmout_c_sclk.hw,
- [AUD_CLKID_TDMIN_A_LRCLK] = &aud_tdmin_a_lrclk.hw,
- [AUD_CLKID_TDMIN_B_LRCLK] = &aud_tdmin_b_lrclk.hw,
- [AUD_CLKID_TDMIN_C_LRCLK] = &aud_tdmin_c_lrclk.hw,
- [AUD_CLKID_TDMIN_LB_LRCLK] = &aud_tdmin_lb_lrclk.hw,
- [AUD_CLKID_TDMOUT_A_LRCLK] = &aud_tdmout_a_lrclk.hw,
- [AUD_CLKID_TDMOUT_B_LRCLK] = &aud_tdmout_b_lrclk.hw,
- [AUD_CLKID_TDMOUT_C_LRCLK] = &aud_tdmout_c_lrclk.hw,
- [AUD_CLKID_TDM_MCLK_PAD0] = &aud_tdm_mclk_pad_0.hw,
- [AUD_CLKID_TDM_MCLK_PAD1] = &aud_tdm_mclk_pad_1.hw,
- [AUD_CLKID_TDM_LRCLK_PAD0] = &aud_tdm_lrclk_pad_0.hw,
- [AUD_CLKID_TDM_LRCLK_PAD1] = &aud_tdm_lrclk_pad_1.hw,
- [AUD_CLKID_TDM_LRCLK_PAD2] = &aud_tdm_lrclk_pad_2.hw,
- [AUD_CLKID_TDM_SCLK_PAD0] = &aud_tdm_sclk_pad_0.hw,
- [AUD_CLKID_TDM_SCLK_PAD1] = &aud_tdm_sclk_pad_1.hw,
- [AUD_CLKID_TDM_SCLK_PAD2] = &aud_tdm_sclk_pad_2.hw,
+ [AUD_CLKID_DDR_ARB] = &ddr_arb.hw,
+ [AUD_CLKID_PDM] = &pdm.hw,
+ [AUD_CLKID_TDMIN_A] = &tdmin_a.hw,
+ [AUD_CLKID_TDMIN_B] = &tdmin_b.hw,
+ [AUD_CLKID_TDMIN_C] = &tdmin_c.hw,
+ [AUD_CLKID_TDMIN_LB] = &tdmin_lb.hw,
+ [AUD_CLKID_TDMOUT_A] = &tdmout_a.hw,
+ [AUD_CLKID_TDMOUT_B] = &tdmout_b.hw,
+ [AUD_CLKID_TDMOUT_C] = &tdmout_c.hw,
+ [AUD_CLKID_FRDDR_A] = &frddr_a.hw,
+ [AUD_CLKID_FRDDR_B] = &frddr_b.hw,
+ [AUD_CLKID_FRDDR_C] = &frddr_c.hw,
+ [AUD_CLKID_TODDR_A] = &toddr_a.hw,
+ [AUD_CLKID_TODDR_B] = &toddr_b.hw,
+ [AUD_CLKID_TODDR_C] = &toddr_c.hw,
+ [AUD_CLKID_LOOPBACK] = &loopback.hw,
+ [AUD_CLKID_SPDIFIN] = &spdifin.hw,
+ [AUD_CLKID_SPDIFOUT] = &spdifout.hw,
+ [AUD_CLKID_RESAMPLE] = &resample.hw,
+ [AUD_CLKID_POWER_DETECT] = &power_detect.hw,
+ [AUD_CLKID_SPDIFOUT_B] = &spdifout_b.hw,
+ [AUD_CLKID_MST_A_MCLK_SEL] = &mst_a_mclk_sel.hw,
+ [AUD_CLKID_MST_B_MCLK_SEL] = &mst_b_mclk_sel.hw,
+ [AUD_CLKID_MST_C_MCLK_SEL] = &mst_c_mclk_sel.hw,
+ [AUD_CLKID_MST_D_MCLK_SEL] = &mst_d_mclk_sel.hw,
+ [AUD_CLKID_MST_E_MCLK_SEL] = &mst_e_mclk_sel.hw,
+ [AUD_CLKID_MST_F_MCLK_SEL] = &mst_f_mclk_sel.hw,
+ [AUD_CLKID_MST_A_MCLK_DIV] = &mst_a_mclk_div.hw,
+ [AUD_CLKID_MST_B_MCLK_DIV] = &mst_b_mclk_div.hw,
+ [AUD_CLKID_MST_C_MCLK_DIV] = &mst_c_mclk_div.hw,
+ [AUD_CLKID_MST_D_MCLK_DIV] = &mst_d_mclk_div.hw,
+ [AUD_CLKID_MST_E_MCLK_DIV] = &mst_e_mclk_div.hw,
+ [AUD_CLKID_MST_F_MCLK_DIV] = &mst_f_mclk_div.hw,
+ [AUD_CLKID_MST_A_MCLK] = &mst_a_mclk.hw,
+ [AUD_CLKID_MST_B_MCLK] = &mst_b_mclk.hw,
+ [AUD_CLKID_MST_C_MCLK] = &mst_c_mclk.hw,
+ [AUD_CLKID_MST_D_MCLK] = &mst_d_mclk.hw,
+ [AUD_CLKID_MST_E_MCLK] = &mst_e_mclk.hw,
+ [AUD_CLKID_MST_F_MCLK] = &mst_f_mclk.hw,
+ [AUD_CLKID_SPDIFOUT_CLK_SEL] = &spdifout_clk_sel.hw,
+ [AUD_CLKID_SPDIFOUT_CLK_DIV] = &spdifout_clk_div.hw,
+ [AUD_CLKID_SPDIFOUT_CLK] = &spdifout_clk.hw,
+ [AUD_CLKID_SPDIFOUT_B_CLK_SEL] = &spdifout_b_clk_sel.hw,
+ [AUD_CLKID_SPDIFOUT_B_CLK_DIV] = &spdifout_b_clk_div.hw,
+ [AUD_CLKID_SPDIFOUT_B_CLK] = &spdifout_b_clk.hw,
+ [AUD_CLKID_SPDIFIN_CLK_SEL] = &spdifin_clk_sel.hw,
+ [AUD_CLKID_SPDIFIN_CLK_DIV] = &spdifin_clk_div.hw,
+ [AUD_CLKID_SPDIFIN_CLK] = &spdifin_clk.hw,
+ [AUD_CLKID_PDM_DCLK_SEL] = &pdm_dclk_sel.hw,
+ [AUD_CLKID_PDM_DCLK_DIV] = &pdm_dclk_div.hw,
+ [AUD_CLKID_PDM_DCLK] = &pdm_dclk.hw,
+ [AUD_CLKID_PDM_SYSCLK_SEL] = &pdm_sysclk_sel.hw,
+ [AUD_CLKID_PDM_SYSCLK_DIV] = &pdm_sysclk_div.hw,
+ [AUD_CLKID_PDM_SYSCLK] = &pdm_sysclk.hw,
+ [AUD_CLKID_MST_A_SCLK_PRE_EN] = &mst_a_sclk_pre_en.hw,
+ [AUD_CLKID_MST_B_SCLK_PRE_EN] = &mst_b_sclk_pre_en.hw,
+ [AUD_CLKID_MST_C_SCLK_PRE_EN] = &mst_c_sclk_pre_en.hw,
+ [AUD_CLKID_MST_D_SCLK_PRE_EN] = &mst_d_sclk_pre_en.hw,
+ [AUD_CLKID_MST_E_SCLK_PRE_EN] = &mst_e_sclk_pre_en.hw,
+ [AUD_CLKID_MST_F_SCLK_PRE_EN] = &mst_f_sclk_pre_en.hw,
+ [AUD_CLKID_MST_A_SCLK_DIV] = &mst_a_sclk_div.hw,
+ [AUD_CLKID_MST_B_SCLK_DIV] = &mst_b_sclk_div.hw,
+ [AUD_CLKID_MST_C_SCLK_DIV] = &mst_c_sclk_div.hw,
+ [AUD_CLKID_MST_D_SCLK_DIV] = &mst_d_sclk_div.hw,
+ [AUD_CLKID_MST_E_SCLK_DIV] = &mst_e_sclk_div.hw,
+ [AUD_CLKID_MST_F_SCLK_DIV] = &mst_f_sclk_div.hw,
+ [AUD_CLKID_MST_A_SCLK_POST_EN] = &mst_a_sclk_post_en.hw,
+ [AUD_CLKID_MST_B_SCLK_POST_EN] = &mst_b_sclk_post_en.hw,
+ [AUD_CLKID_MST_C_SCLK_POST_EN] = &mst_c_sclk_post_en.hw,
+ [AUD_CLKID_MST_D_SCLK_POST_EN] = &mst_d_sclk_post_en.hw,
+ [AUD_CLKID_MST_E_SCLK_POST_EN] = &mst_e_sclk_post_en.hw,
+ [AUD_CLKID_MST_F_SCLK_POST_EN] = &mst_f_sclk_post_en.hw,
+ [AUD_CLKID_MST_A_SCLK] = &mst_a_sclk.hw,
+ [AUD_CLKID_MST_B_SCLK] = &mst_b_sclk.hw,
+ [AUD_CLKID_MST_C_SCLK] = &mst_c_sclk.hw,
+ [AUD_CLKID_MST_D_SCLK] = &mst_d_sclk.hw,
+ [AUD_CLKID_MST_E_SCLK] = &mst_e_sclk.hw,
+ [AUD_CLKID_MST_F_SCLK] = &mst_f_sclk.hw,
+ [AUD_CLKID_MST_A_LRCLK_DIV] = &mst_a_lrclk_div.hw,
+ [AUD_CLKID_MST_B_LRCLK_DIV] = &mst_b_lrclk_div.hw,
+ [AUD_CLKID_MST_C_LRCLK_DIV] = &mst_c_lrclk_div.hw,
+ [AUD_CLKID_MST_D_LRCLK_DIV] = &mst_d_lrclk_div.hw,
+ [AUD_CLKID_MST_E_LRCLK_DIV] = &mst_e_lrclk_div.hw,
+ [AUD_CLKID_MST_F_LRCLK_DIV] = &mst_f_lrclk_div.hw,
+ [AUD_CLKID_MST_A_LRCLK] = &mst_a_lrclk.hw,
+ [AUD_CLKID_MST_B_LRCLK] = &mst_b_lrclk.hw,
+ [AUD_CLKID_MST_C_LRCLK] = &mst_c_lrclk.hw,
+ [AUD_CLKID_MST_D_LRCLK] = &mst_d_lrclk.hw,
+ [AUD_CLKID_MST_E_LRCLK] = &mst_e_lrclk.hw,
+ [AUD_CLKID_MST_F_LRCLK] = &mst_f_lrclk.hw,
+ [AUD_CLKID_TDMIN_A_SCLK_SEL] = &tdmin_a_sclk_sel.hw,
+ [AUD_CLKID_TDMIN_B_SCLK_SEL] = &tdmin_b_sclk_sel.hw,
+ [AUD_CLKID_TDMIN_C_SCLK_SEL] = &tdmin_c_sclk_sel.hw,
+ [AUD_CLKID_TDMIN_LB_SCLK_SEL] = &tdmin_lb_sclk_sel.hw,
+ [AUD_CLKID_TDMOUT_A_SCLK_SEL] = &tdmout_a_sclk_sel.hw,
+ [AUD_CLKID_TDMOUT_B_SCLK_SEL] = &tdmout_b_sclk_sel.hw,
+ [AUD_CLKID_TDMOUT_C_SCLK_SEL] = &tdmout_c_sclk_sel.hw,
+ [AUD_CLKID_TDMIN_A_SCLK_PRE_EN] = &tdmin_a_sclk_pre_en.hw,
+ [AUD_CLKID_TDMIN_B_SCLK_PRE_EN] = &tdmin_b_sclk_pre_en.hw,
+ [AUD_CLKID_TDMIN_C_SCLK_PRE_EN] = &tdmin_c_sclk_pre_en.hw,
+ [AUD_CLKID_TDMIN_LB_SCLK_PRE_EN] = &tdmin_lb_sclk_pre_en.hw,
+ [AUD_CLKID_TDMOUT_A_SCLK_PRE_EN] = &tdmout_a_sclk_pre_en.hw,
+ [AUD_CLKID_TDMOUT_B_SCLK_PRE_EN] = &tdmout_b_sclk_pre_en.hw,
+ [AUD_CLKID_TDMOUT_C_SCLK_PRE_EN] = &tdmout_c_sclk_pre_en.hw,
+ [AUD_CLKID_TDMIN_A_SCLK_POST_EN] = &tdmin_a_sclk_post_en.hw,
+ [AUD_CLKID_TDMIN_B_SCLK_POST_EN] = &tdmin_b_sclk_post_en.hw,
+ [AUD_CLKID_TDMIN_C_SCLK_POST_EN] = &tdmin_c_sclk_post_en.hw,
+ [AUD_CLKID_TDMIN_LB_SCLK_POST_EN] = &tdmin_lb_sclk_post_en.hw,
+ [AUD_CLKID_TDMOUT_A_SCLK_POST_EN] = &tdmout_a_sclk_post_en.hw,
+ [AUD_CLKID_TDMOUT_B_SCLK_POST_EN] = &tdmout_b_sclk_post_en.hw,
+ [AUD_CLKID_TDMOUT_C_SCLK_POST_EN] = &tdmout_c_sclk_post_en.hw,
+ [AUD_CLKID_TDMIN_A_SCLK] = &tdmin_a_sclk.hw,
+ [AUD_CLKID_TDMIN_B_SCLK] = &tdmin_b_sclk.hw,
+ [AUD_CLKID_TDMIN_C_SCLK] = &tdmin_c_sclk.hw,
+ [AUD_CLKID_TDMIN_LB_SCLK] = &tdmin_lb_sclk.hw,
+ [AUD_CLKID_TDMOUT_A_SCLK] = &tdmout_a_sclk.hw,
+ [AUD_CLKID_TDMOUT_B_SCLK] = &tdmout_b_sclk.hw,
+ [AUD_CLKID_TDMOUT_C_SCLK] = &tdmout_c_sclk.hw,
+ [AUD_CLKID_TDMIN_A_LRCLK] = &tdmin_a_lrclk.hw,
+ [AUD_CLKID_TDMIN_B_LRCLK] = &tdmin_b_lrclk.hw,
+ [AUD_CLKID_TDMIN_C_LRCLK] = &tdmin_c_lrclk.hw,
+ [AUD_CLKID_TDMIN_LB_LRCLK] = &tdmin_lb_lrclk.hw,
+ [AUD_CLKID_TDMOUT_A_LRCLK] = &tdmout_a_lrclk.hw,
+ [AUD_CLKID_TDMOUT_B_LRCLK] = &tdmout_b_lrclk.hw,
+ [AUD_CLKID_TDMOUT_C_LRCLK] = &tdmout_c_lrclk.hw,
+ [AUD_CLKID_TDM_MCLK_PAD0] = &g12a_tdm_mclk_pad_0.hw,
+ [AUD_CLKID_TDM_MCLK_PAD1] = &g12a_tdm_mclk_pad_1.hw,
+ [AUD_CLKID_TDM_LRCLK_PAD0] = &g12a_tdm_lrclk_pad_0.hw,
+ [AUD_CLKID_TDM_LRCLK_PAD1] = &g12a_tdm_lrclk_pad_1.hw,
+ [AUD_CLKID_TDM_LRCLK_PAD2] = &g12a_tdm_lrclk_pad_2.hw,
+ [AUD_CLKID_TDM_SCLK_PAD0] = &g12a_tdm_sclk_pad_0.hw,
+ [AUD_CLKID_TDM_SCLK_PAD1] = &g12a_tdm_sclk_pad_1.hw,
+ [AUD_CLKID_TDM_SCLK_PAD2] = &g12a_tdm_sclk_pad_2.hw,
+ [AUD_CLKID_TOP] = &axg_aud_top,
[NR_CLKS] = NULL,
},
.num = NR_CLKS,
};
+/*
+ * Array of all SM1 clocks provided by this provider
+ * The input clocks of the controller will be populated at runtime
+ */
+static struct clk_hw_onecell_data sm1_audio_hw_onecell_data = {
+ .hws = {
+ [AUD_CLKID_DDR_ARB] = &ddr_arb.hw,
+ [AUD_CLKID_PDM] = &pdm.hw,
+ [AUD_CLKID_TDMIN_A] = &tdmin_a.hw,
+ [AUD_CLKID_TDMIN_B] = &tdmin_b.hw,
+ [AUD_CLKID_TDMIN_C] = &tdmin_c.hw,
+ [AUD_CLKID_TDMIN_LB] = &tdmin_lb.hw,
+ [AUD_CLKID_TDMOUT_A] = &tdmout_a.hw,
+ [AUD_CLKID_TDMOUT_B] = &tdmout_b.hw,
+ [AUD_CLKID_TDMOUT_C] = &tdmout_c.hw,
+ [AUD_CLKID_FRDDR_A] = &frddr_a.hw,
+ [AUD_CLKID_FRDDR_B] = &frddr_b.hw,
+ [AUD_CLKID_FRDDR_C] = &frddr_c.hw,
+ [AUD_CLKID_TODDR_A] = &toddr_a.hw,
+ [AUD_CLKID_TODDR_B] = &toddr_b.hw,
+ [AUD_CLKID_TODDR_C] = &toddr_c.hw,
+ [AUD_CLKID_LOOPBACK] = &loopback.hw,
+ [AUD_CLKID_SPDIFIN] = &spdifin.hw,
+ [AUD_CLKID_SPDIFOUT] = &spdifout.hw,
+ [AUD_CLKID_RESAMPLE] = &resample.hw,
+ [AUD_CLKID_SPDIFOUT_B] = &spdifout_b.hw,
+ [AUD_CLKID_MST_A_MCLK_SEL] = &sm1_mst_a_mclk_sel.hw,
+ [AUD_CLKID_MST_B_MCLK_SEL] = &sm1_mst_b_mclk_sel.hw,
+ [AUD_CLKID_MST_C_MCLK_SEL] = &sm1_mst_c_mclk_sel.hw,
+ [AUD_CLKID_MST_D_MCLK_SEL] = &sm1_mst_d_mclk_sel.hw,
+ [AUD_CLKID_MST_E_MCLK_SEL] = &sm1_mst_e_mclk_sel.hw,
+ [AUD_CLKID_MST_F_MCLK_SEL] = &sm1_mst_f_mclk_sel.hw,
+ [AUD_CLKID_MST_A_MCLK_DIV] = &sm1_mst_a_mclk_div.hw,
+ [AUD_CLKID_MST_B_MCLK_DIV] = &sm1_mst_b_mclk_div.hw,
+ [AUD_CLKID_MST_C_MCLK_DIV] = &sm1_mst_c_mclk_div.hw,
+ [AUD_CLKID_MST_D_MCLK_DIV] = &sm1_mst_d_mclk_div.hw,
+ [AUD_CLKID_MST_E_MCLK_DIV] = &sm1_mst_e_mclk_div.hw,
+ [AUD_CLKID_MST_F_MCLK_DIV] = &sm1_mst_f_mclk_div.hw,
+ [AUD_CLKID_MST_A_MCLK] = &sm1_mst_a_mclk.hw,
+ [AUD_CLKID_MST_B_MCLK] = &sm1_mst_b_mclk.hw,
+ [AUD_CLKID_MST_C_MCLK] = &sm1_mst_c_mclk.hw,
+ [AUD_CLKID_MST_D_MCLK] = &sm1_mst_d_mclk.hw,
+ [AUD_CLKID_MST_E_MCLK] = &sm1_mst_e_mclk.hw,
+ [AUD_CLKID_MST_F_MCLK] = &sm1_mst_f_mclk.hw,
+ [AUD_CLKID_SPDIFOUT_CLK_SEL] = &spdifout_clk_sel.hw,
+ [AUD_CLKID_SPDIFOUT_CLK_DIV] = &spdifout_clk_div.hw,
+ [AUD_CLKID_SPDIFOUT_CLK] = &spdifout_clk.hw,
+ [AUD_CLKID_SPDIFOUT_B_CLK_SEL] = &spdifout_b_clk_sel.hw,
+ [AUD_CLKID_SPDIFOUT_B_CLK_DIV] = &spdifout_b_clk_div.hw,
+ [AUD_CLKID_SPDIFOUT_B_CLK] = &spdifout_b_clk.hw,
+ [AUD_CLKID_SPDIFIN_CLK_SEL] = &spdifin_clk_sel.hw,
+ [AUD_CLKID_SPDIFIN_CLK_DIV] = &spdifin_clk_div.hw,
+ [AUD_CLKID_SPDIFIN_CLK] = &spdifin_clk.hw,
+ [AUD_CLKID_PDM_DCLK_SEL] = &pdm_dclk_sel.hw,
+ [AUD_CLKID_PDM_DCLK_DIV] = &pdm_dclk_div.hw,
+ [AUD_CLKID_PDM_DCLK] = &pdm_dclk.hw,
+ [AUD_CLKID_PDM_SYSCLK_SEL] = &pdm_sysclk_sel.hw,
+ [AUD_CLKID_PDM_SYSCLK_DIV] = &pdm_sysclk_div.hw,
+ [AUD_CLKID_PDM_SYSCLK] = &pdm_sysclk.hw,
+ [AUD_CLKID_MST_A_SCLK_PRE_EN] = &mst_a_sclk_pre_en.hw,
+ [AUD_CLKID_MST_B_SCLK_PRE_EN] = &mst_b_sclk_pre_en.hw,
+ [AUD_CLKID_MST_C_SCLK_PRE_EN] = &mst_c_sclk_pre_en.hw,
+ [AUD_CLKID_MST_D_SCLK_PRE_EN] = &mst_d_sclk_pre_en.hw,
+ [AUD_CLKID_MST_E_SCLK_PRE_EN] = &mst_e_sclk_pre_en.hw,
+ [AUD_CLKID_MST_F_SCLK_PRE_EN] = &mst_f_sclk_pre_en.hw,
+ [AUD_CLKID_MST_A_SCLK_DIV] = &mst_a_sclk_div.hw,
+ [AUD_CLKID_MST_B_SCLK_DIV] = &mst_b_sclk_div.hw,
+ [AUD_CLKID_MST_C_SCLK_DIV] = &mst_c_sclk_div.hw,
+ [AUD_CLKID_MST_D_SCLK_DIV] = &mst_d_sclk_div.hw,
+ [AUD_CLKID_MST_E_SCLK_DIV] = &mst_e_sclk_div.hw,
+ [AUD_CLKID_MST_F_SCLK_DIV] = &mst_f_sclk_div.hw,
+ [AUD_CLKID_MST_A_SCLK_POST_EN] = &mst_a_sclk_post_en.hw,
+ [AUD_CLKID_MST_B_SCLK_POST_EN] = &mst_b_sclk_post_en.hw,
+ [AUD_CLKID_MST_C_SCLK_POST_EN] = &mst_c_sclk_post_en.hw,
+ [AUD_CLKID_MST_D_SCLK_POST_EN] = &mst_d_sclk_post_en.hw,
+ [AUD_CLKID_MST_E_SCLK_POST_EN] = &mst_e_sclk_post_en.hw,
+ [AUD_CLKID_MST_F_SCLK_POST_EN] = &mst_f_sclk_post_en.hw,
+ [AUD_CLKID_MST_A_SCLK] = &mst_a_sclk.hw,
+ [AUD_CLKID_MST_B_SCLK] = &mst_b_sclk.hw,
+ [AUD_CLKID_MST_C_SCLK] = &mst_c_sclk.hw,
+ [AUD_CLKID_MST_D_SCLK] = &mst_d_sclk.hw,
+ [AUD_CLKID_MST_E_SCLK] = &mst_e_sclk.hw,
+ [AUD_CLKID_MST_F_SCLK] = &mst_f_sclk.hw,
+ [AUD_CLKID_MST_A_LRCLK_DIV] = &mst_a_lrclk_div.hw,
+ [AUD_CLKID_MST_B_LRCLK_DIV] = &mst_b_lrclk_div.hw,
+ [AUD_CLKID_MST_C_LRCLK_DIV] = &mst_c_lrclk_div.hw,
+ [AUD_CLKID_MST_D_LRCLK_DIV] = &mst_d_lrclk_div.hw,
+ [AUD_CLKID_MST_E_LRCLK_DIV] = &mst_e_lrclk_div.hw,
+ [AUD_CLKID_MST_F_LRCLK_DIV] = &mst_f_lrclk_div.hw,
+ [AUD_CLKID_MST_A_LRCLK] = &mst_a_lrclk.hw,
+ [AUD_CLKID_MST_B_LRCLK] = &mst_b_lrclk.hw,
+ [AUD_CLKID_MST_C_LRCLK] = &mst_c_lrclk.hw,
+ [AUD_CLKID_MST_D_LRCLK] = &mst_d_lrclk.hw,
+ [AUD_CLKID_MST_E_LRCLK] = &mst_e_lrclk.hw,
+ [AUD_CLKID_MST_F_LRCLK] = &mst_f_lrclk.hw,
+ [AUD_CLKID_TDMIN_A_SCLK_SEL] = &tdmin_a_sclk_sel.hw,
+ [AUD_CLKID_TDMIN_B_SCLK_SEL] = &tdmin_b_sclk_sel.hw,
+ [AUD_CLKID_TDMIN_C_SCLK_SEL] = &tdmin_c_sclk_sel.hw,
+ [AUD_CLKID_TDMIN_LB_SCLK_SEL] = &tdmin_lb_sclk_sel.hw,
+ [AUD_CLKID_TDMOUT_A_SCLK_SEL] = &tdmout_a_sclk_sel.hw,
+ [AUD_CLKID_TDMOUT_B_SCLK_SEL] = &tdmout_b_sclk_sel.hw,
+ [AUD_CLKID_TDMOUT_C_SCLK_SEL] = &tdmout_c_sclk_sel.hw,
+ [AUD_CLKID_TDMIN_A_SCLK_PRE_EN] = &tdmin_a_sclk_pre_en.hw,
+ [AUD_CLKID_TDMIN_B_SCLK_PRE_EN] = &tdmin_b_sclk_pre_en.hw,
+ [AUD_CLKID_TDMIN_C_SCLK_PRE_EN] = &tdmin_c_sclk_pre_en.hw,
+ [AUD_CLKID_TDMIN_LB_SCLK_PRE_EN] = &tdmin_lb_sclk_pre_en.hw,
+ [AUD_CLKID_TDMOUT_A_SCLK_PRE_EN] = &tdmout_a_sclk_pre_en.hw,
+ [AUD_CLKID_TDMOUT_B_SCLK_PRE_EN] = &tdmout_b_sclk_pre_en.hw,
+ [AUD_CLKID_TDMOUT_C_SCLK_PRE_EN] = &tdmout_c_sclk_pre_en.hw,
+ [AUD_CLKID_TDMIN_A_SCLK_POST_EN] = &tdmin_a_sclk_post_en.hw,
+ [AUD_CLKID_TDMIN_B_SCLK_POST_EN] = &tdmin_b_sclk_post_en.hw,
+ [AUD_CLKID_TDMIN_C_SCLK_POST_EN] = &tdmin_c_sclk_post_en.hw,
+ [AUD_CLKID_TDMIN_LB_SCLK_POST_EN] = &tdmin_lb_sclk_post_en.hw,
+ [AUD_CLKID_TDMOUT_A_SCLK_POST_EN] = &tdmout_a_sclk_post_en.hw,
+ [AUD_CLKID_TDMOUT_B_SCLK_POST_EN] = &tdmout_b_sclk_post_en.hw,
+ [AUD_CLKID_TDMOUT_C_SCLK_POST_EN] = &tdmout_c_sclk_post_en.hw,
+ [AUD_CLKID_TDMIN_A_SCLK] = &tdmin_a_sclk.hw,
+ [AUD_CLKID_TDMIN_B_SCLK] = &tdmin_b_sclk.hw,
+ [AUD_CLKID_TDMIN_C_SCLK] = &tdmin_c_sclk.hw,
+ [AUD_CLKID_TDMIN_LB_SCLK] = &tdmin_lb_sclk.hw,
+ [AUD_CLKID_TDMOUT_A_SCLK] = &tdmout_a_sclk.hw,
+ [AUD_CLKID_TDMOUT_B_SCLK] = &tdmout_b_sclk.hw,
+ [AUD_CLKID_TDMOUT_C_SCLK] = &tdmout_c_sclk.hw,
+ [AUD_CLKID_TDMIN_A_LRCLK] = &tdmin_a_lrclk.hw,
+ [AUD_CLKID_TDMIN_B_LRCLK] = &tdmin_b_lrclk.hw,
+ [AUD_CLKID_TDMIN_C_LRCLK] = &tdmin_c_lrclk.hw,
+ [AUD_CLKID_TDMIN_LB_LRCLK] = &tdmin_lb_lrclk.hw,
+ [AUD_CLKID_TDMOUT_A_LRCLK] = &tdmout_a_lrclk.hw,
+ [AUD_CLKID_TDMOUT_B_LRCLK] = &tdmout_b_lrclk.hw,
+ [AUD_CLKID_TDMOUT_C_LRCLK] = &tdmout_c_lrclk.hw,
+ [AUD_CLKID_TDM_MCLK_PAD0] = &sm1_tdm_mclk_pad_0.hw,
+ [AUD_CLKID_TDM_MCLK_PAD1] = &sm1_tdm_mclk_pad_1.hw,
+ [AUD_CLKID_TDM_LRCLK_PAD0] = &sm1_tdm_lrclk_pad_0.hw,
+ [AUD_CLKID_TDM_LRCLK_PAD1] = &sm1_tdm_lrclk_pad_1.hw,
+ [AUD_CLKID_TDM_LRCLK_PAD2] = &sm1_tdm_lrclk_pad_2.hw,
+ [AUD_CLKID_TDM_SCLK_PAD0] = &sm1_tdm_sclk_pad_0.hw,
+ [AUD_CLKID_TDM_SCLK_PAD1] = &sm1_tdm_sclk_pad_1.hw,
+ [AUD_CLKID_TDM_SCLK_PAD2] = &sm1_tdm_sclk_pad_2.hw,
+ [AUD_CLKID_TOP] = &sm1_aud_top.hw,
+ [AUD_CLKID_TORAM] = &toram.hw,
+ [AUD_CLKID_EQDRC] = &eqdrc.hw,
+ [AUD_CLKID_RESAMPLE_B] = &resample_b.hw,
+ [AUD_CLKID_TOVAD] = &tovad.hw,
+ [AUD_CLKID_LOCKER] = &locker.hw,
+ [AUD_CLKID_SPDIFIN_LB] = &spdifin_lb.hw,
+ [AUD_CLKID_FRDDR_D] = &frddr_d.hw,
+ [AUD_CLKID_TODDR_D] = &toddr_d.hw,
+ [AUD_CLKID_LOOPBACK_B] = &loopback_b.hw,
+ [AUD_CLKID_CLK81_EN] = &sm1_clk81_en.hw,
+ [AUD_CLKID_SYSCLK_A_DIV] = &sm1_sysclk_a_div.hw,
+ [AUD_CLKID_SYSCLK_A_EN] = &sm1_sysclk_a_en.hw,
+ [AUD_CLKID_SYSCLK_B_DIV] = &sm1_sysclk_b_div.hw,
+ [AUD_CLKID_SYSCLK_B_EN] = &sm1_sysclk_b_en.hw,
+ [NR_CLKS] = NULL,
+ },
+ .num = NR_CLKS,
+};
+
+
/* Convenience table to populate regmap in .probe()
* Note that this table is shared between both AXG and G12A,
* with spdifout_b clocks being exclusive to G12A. Since those
* clocks are not declared within the AXG onecell table, we do not
* feel the need to have separate AXG/G12A regmap tables.
*/
-static struct clk_regmap *const aud_clk_regmaps[] = {
- &aud_ddr_arb,
- &aud_pdm,
- &aud_tdmin_a,
- &aud_tdmin_b,
- &aud_tdmin_c,
- &aud_tdmin_lb,
- &aud_tdmout_a,
- &aud_tdmout_b,
- &aud_tdmout_c,
- &aud_frddr_a,
- &aud_frddr_b,
- &aud_frddr_c,
- &aud_toddr_a,
- &aud_toddr_b,
- &aud_toddr_c,
- &aud_loopback,
- &aud_spdifin,
- &aud_spdifout,
- &aud_resample,
- &aud_power_detect,
- &aud_spdifout_b,
- &aud_mst_a_mclk_sel,
- &aud_mst_b_mclk_sel,
- &aud_mst_c_mclk_sel,
- &aud_mst_d_mclk_sel,
- &aud_mst_e_mclk_sel,
- &aud_mst_f_mclk_sel,
- &aud_mst_a_mclk_div,
- &aud_mst_b_mclk_div,
- &aud_mst_c_mclk_div,
- &aud_mst_d_mclk_div,
- &aud_mst_e_mclk_div,
- &aud_mst_f_mclk_div,
- &aud_mst_a_mclk,
- &aud_mst_b_mclk,
- &aud_mst_c_mclk,
- &aud_mst_d_mclk,
- &aud_mst_e_mclk,
- &aud_mst_f_mclk,
- &aud_spdifout_clk_sel,
- &aud_spdifout_clk_div,
- &aud_spdifout_clk,
- &aud_spdifin_clk_sel,
- &aud_spdifin_clk_div,
- &aud_spdifin_clk,
- &aud_pdm_dclk_sel,
- &aud_pdm_dclk_div,
- &aud_pdm_dclk,
- &aud_pdm_sysclk_sel,
- &aud_pdm_sysclk_div,
- &aud_pdm_sysclk,
- &aud_mst_a_sclk_pre_en,
- &aud_mst_b_sclk_pre_en,
- &aud_mst_c_sclk_pre_en,
- &aud_mst_d_sclk_pre_en,
- &aud_mst_e_sclk_pre_en,
- &aud_mst_f_sclk_pre_en,
- &aud_mst_a_sclk_div,
- &aud_mst_b_sclk_div,
- &aud_mst_c_sclk_div,
- &aud_mst_d_sclk_div,
- &aud_mst_e_sclk_div,
- &aud_mst_f_sclk_div,
- &aud_mst_a_sclk_post_en,
- &aud_mst_b_sclk_post_en,
- &aud_mst_c_sclk_post_en,
- &aud_mst_d_sclk_post_en,
- &aud_mst_e_sclk_post_en,
- &aud_mst_f_sclk_post_en,
- &aud_mst_a_sclk,
- &aud_mst_b_sclk,
- &aud_mst_c_sclk,
- &aud_mst_d_sclk,
- &aud_mst_e_sclk,
- &aud_mst_f_sclk,
- &aud_mst_a_lrclk_div,
- &aud_mst_b_lrclk_div,
- &aud_mst_c_lrclk_div,
- &aud_mst_d_lrclk_div,
- &aud_mst_e_lrclk_div,
- &aud_mst_f_lrclk_div,
- &aud_mst_a_lrclk,
- &aud_mst_b_lrclk,
- &aud_mst_c_lrclk,
- &aud_mst_d_lrclk,
- &aud_mst_e_lrclk,
- &aud_mst_f_lrclk,
- &aud_tdmin_a_sclk_sel,
- &aud_tdmin_b_sclk_sel,
- &aud_tdmin_c_sclk_sel,
- &aud_tdmin_lb_sclk_sel,
- &aud_tdmout_a_sclk_sel,
- &aud_tdmout_b_sclk_sel,
- &aud_tdmout_c_sclk_sel,
- &aud_tdmin_a_sclk_pre_en,
- &aud_tdmin_b_sclk_pre_en,
- &aud_tdmin_c_sclk_pre_en,
- &aud_tdmin_lb_sclk_pre_en,
- &aud_tdmout_a_sclk_pre_en,
- &aud_tdmout_b_sclk_pre_en,
- &aud_tdmout_c_sclk_pre_en,
- &aud_tdmin_a_sclk_post_en,
- &aud_tdmin_b_sclk_post_en,
- &aud_tdmin_c_sclk_post_en,
- &aud_tdmin_lb_sclk_post_en,
- &aud_tdmout_a_sclk_post_en,
- &aud_tdmout_b_sclk_post_en,
- &aud_tdmout_c_sclk_post_en,
- &aud_tdmin_a_sclk,
- &aud_tdmin_b_sclk,
- &aud_tdmin_c_sclk,
- &aud_tdmin_lb_sclk,
- &aud_tdmout_a_sclk,
- &aud_tdmout_b_sclk,
- &aud_tdmout_c_sclk,
- &aud_tdmin_a_lrclk,
- &aud_tdmin_b_lrclk,
- &aud_tdmin_c_lrclk,
- &aud_tdmin_lb_lrclk,
- &aud_tdmout_a_lrclk,
- &aud_tdmout_b_lrclk,
- &aud_tdmout_c_lrclk,
- &aud_spdifout_b_clk_sel,
- &aud_spdifout_b_clk_div,
- &aud_spdifout_b_clk,
- &aud_tdm_mclk_pad_0,
- &aud_tdm_mclk_pad_1,
- &aud_tdm_lrclk_pad_0,
- &aud_tdm_lrclk_pad_1,
- &aud_tdm_lrclk_pad_2,
- &aud_tdm_sclk_pad_0,
- &aud_tdm_sclk_pad_1,
- &aud_tdm_sclk_pad_2,
+static struct clk_regmap *const axg_clk_regmaps[] = {
+ &ddr_arb,
+ &pdm,
+ &tdmin_a,
+ &tdmin_b,
+ &tdmin_c,
+ &tdmin_lb,
+ &tdmout_a,
+ &tdmout_b,
+ &tdmout_c,
+ &frddr_a,
+ &frddr_b,
+ &frddr_c,
+ &toddr_a,
+ &toddr_b,
+ &toddr_c,
+ &loopback,
+ &spdifin,
+ &spdifout,
+ &resample,
+ &power_detect,
+ &spdifout_b,
+ &mst_a_mclk_sel,
+ &mst_b_mclk_sel,
+ &mst_c_mclk_sel,
+ &mst_d_mclk_sel,
+ &mst_e_mclk_sel,
+ &mst_f_mclk_sel,
+ &mst_a_mclk_div,
+ &mst_b_mclk_div,
+ &mst_c_mclk_div,
+ &mst_d_mclk_div,
+ &mst_e_mclk_div,
+ &mst_f_mclk_div,
+ &mst_a_mclk,
+ &mst_b_mclk,
+ &mst_c_mclk,
+ &mst_d_mclk,
+ &mst_e_mclk,
+ &mst_f_mclk,
+ &spdifout_clk_sel,
+ &spdifout_clk_div,
+ &spdifout_clk,
+ &spdifin_clk_sel,
+ &spdifin_clk_div,
+ &spdifin_clk,
+ &pdm_dclk_sel,
+ &pdm_dclk_div,
+ &pdm_dclk,
+ &pdm_sysclk_sel,
+ &pdm_sysclk_div,
+ &pdm_sysclk,
+ &mst_a_sclk_pre_en,
+ &mst_b_sclk_pre_en,
+ &mst_c_sclk_pre_en,
+ &mst_d_sclk_pre_en,
+ &mst_e_sclk_pre_en,
+ &mst_f_sclk_pre_en,
+ &mst_a_sclk_div,
+ &mst_b_sclk_div,
+ &mst_c_sclk_div,
+ &mst_d_sclk_div,
+ &mst_e_sclk_div,
+ &mst_f_sclk_div,
+ &mst_a_sclk_post_en,
+ &mst_b_sclk_post_en,
+ &mst_c_sclk_post_en,
+ &mst_d_sclk_post_en,
+ &mst_e_sclk_post_en,
+ &mst_f_sclk_post_en,
+ &mst_a_sclk,
+ &mst_b_sclk,
+ &mst_c_sclk,
+ &mst_d_sclk,
+ &mst_e_sclk,
+ &mst_f_sclk,
+ &mst_a_lrclk_div,
+ &mst_b_lrclk_div,
+ &mst_c_lrclk_div,
+ &mst_d_lrclk_div,
+ &mst_e_lrclk_div,
+ &mst_f_lrclk_div,
+ &mst_a_lrclk,
+ &mst_b_lrclk,
+ &mst_c_lrclk,
+ &mst_d_lrclk,
+ &mst_e_lrclk,
+ &mst_f_lrclk,
+ &tdmin_a_sclk_sel,
+ &tdmin_b_sclk_sel,
+ &tdmin_c_sclk_sel,
+ &tdmin_lb_sclk_sel,
+ &tdmout_a_sclk_sel,
+ &tdmout_b_sclk_sel,
+ &tdmout_c_sclk_sel,
+ &tdmin_a_sclk_pre_en,
+ &tdmin_b_sclk_pre_en,
+ &tdmin_c_sclk_pre_en,
+ &tdmin_lb_sclk_pre_en,
+ &tdmout_a_sclk_pre_en,
+ &tdmout_b_sclk_pre_en,
+ &tdmout_c_sclk_pre_en,
+ &tdmin_a_sclk_post_en,
+ &tdmin_b_sclk_post_en,
+ &tdmin_c_sclk_post_en,
+ &tdmin_lb_sclk_post_en,
+ &tdmout_a_sclk_post_en,
+ &tdmout_b_sclk_post_en,
+ &tdmout_c_sclk_post_en,
+ &tdmin_a_sclk,
+ &tdmin_b_sclk,
+ &tdmin_c_sclk,
+ &tdmin_lb_sclk,
+ &tdmout_a_sclk,
+ &tdmout_b_sclk,
+ &tdmout_c_sclk,
+ &tdmin_a_lrclk,
+ &tdmin_b_lrclk,
+ &tdmin_c_lrclk,
+ &tdmin_lb_lrclk,
+ &tdmout_a_lrclk,
+ &tdmout_b_lrclk,
+ &tdmout_c_lrclk,
+ &spdifout_b_clk_sel,
+ &spdifout_b_clk_div,
+ &spdifout_b_clk,
+ &g12a_tdm_mclk_pad_0,
+ &g12a_tdm_mclk_pad_1,
+ &g12a_tdm_lrclk_pad_0,
+ &g12a_tdm_lrclk_pad_1,
+ &g12a_tdm_lrclk_pad_2,
+ &g12a_tdm_sclk_pad_0,
+ &g12a_tdm_sclk_pad_1,
+ &g12a_tdm_sclk_pad_2,
+ &toram,
+ &eqdrc,
+};
+
+static struct clk_regmap *const sm1_clk_regmaps[] = {
+ &ddr_arb,
+ &pdm,
+ &tdmin_a,
+ &tdmin_b,
+ &tdmin_c,
+ &tdmin_lb,
+ &tdmout_a,
+ &tdmout_b,
+ &tdmout_c,
+ &frddr_a,
+ &frddr_b,
+ &frddr_c,
+ &toddr_a,
+ &toddr_b,
+ &toddr_c,
+ &loopback,
+ &spdifin,
+ &spdifout,
+ &resample,
+ &spdifout_b,
+ &sm1_mst_a_mclk_sel,
+ &sm1_mst_b_mclk_sel,
+ &sm1_mst_c_mclk_sel,
+ &sm1_mst_d_mclk_sel,
+ &sm1_mst_e_mclk_sel,
+ &sm1_mst_f_mclk_sel,
+ &sm1_mst_a_mclk_div,
+ &sm1_mst_b_mclk_div,
+ &sm1_mst_c_mclk_div,
+ &sm1_mst_d_mclk_div,
+ &sm1_mst_e_mclk_div,
+ &sm1_mst_f_mclk_div,
+ &sm1_mst_a_mclk,
+ &sm1_mst_b_mclk,
+ &sm1_mst_c_mclk,
+ &sm1_mst_d_mclk,
+ &sm1_mst_e_mclk,
+ &sm1_mst_f_mclk,
+ &spdifout_clk_sel,
+ &spdifout_clk_div,
+ &spdifout_clk,
+ &spdifin_clk_sel,
+ &spdifin_clk_div,
+ &spdifin_clk,
+ &pdm_dclk_sel,
+ &pdm_dclk_div,
+ &pdm_dclk,
+ &pdm_sysclk_sel,
+ &pdm_sysclk_div,
+ &pdm_sysclk,
+ &mst_a_sclk_pre_en,
+ &mst_b_sclk_pre_en,
+ &mst_c_sclk_pre_en,
+ &mst_d_sclk_pre_en,
+ &mst_e_sclk_pre_en,
+ &mst_f_sclk_pre_en,
+ &mst_a_sclk_div,
+ &mst_b_sclk_div,
+ &mst_c_sclk_div,
+ &mst_d_sclk_div,
+ &mst_e_sclk_div,
+ &mst_f_sclk_div,
+ &mst_a_sclk_post_en,
+ &mst_b_sclk_post_en,
+ &mst_c_sclk_post_en,
+ &mst_d_sclk_post_en,
+ &mst_e_sclk_post_en,
+ &mst_f_sclk_post_en,
+ &mst_a_sclk,
+ &mst_b_sclk,
+ &mst_c_sclk,
+ &mst_d_sclk,
+ &mst_e_sclk,
+ &mst_f_sclk,
+ &mst_a_lrclk_div,
+ &mst_b_lrclk_div,
+ &mst_c_lrclk_div,
+ &mst_d_lrclk_div,
+ &mst_e_lrclk_div,
+ &mst_f_lrclk_div,
+ &mst_a_lrclk,
+ &mst_b_lrclk,
+ &mst_c_lrclk,
+ &mst_d_lrclk,
+ &mst_e_lrclk,
+ &mst_f_lrclk,
+ &tdmin_a_sclk_sel,
+ &tdmin_b_sclk_sel,
+ &tdmin_c_sclk_sel,
+ &tdmin_lb_sclk_sel,
+ &tdmout_a_sclk_sel,
+ &tdmout_b_sclk_sel,
+ &tdmout_c_sclk_sel,
+ &tdmin_a_sclk_pre_en,
+ &tdmin_b_sclk_pre_en,
+ &tdmin_c_sclk_pre_en,
+ &tdmin_lb_sclk_pre_en,
+ &tdmout_a_sclk_pre_en,
+ &tdmout_b_sclk_pre_en,
+ &tdmout_c_sclk_pre_en,
+ &tdmin_a_sclk_post_en,
+ &tdmin_b_sclk_post_en,
+ &tdmin_c_sclk_post_en,
+ &tdmin_lb_sclk_post_en,
+ &tdmout_a_sclk_post_en,
+ &tdmout_b_sclk_post_en,
+ &tdmout_c_sclk_post_en,
+ &tdmin_a_sclk,
+ &tdmin_b_sclk,
+ &tdmin_c_sclk,
+ &tdmin_lb_sclk,
+ &tdmout_a_sclk,
+ &tdmout_b_sclk,
+ &tdmout_c_sclk,
+ &tdmin_a_lrclk,
+ &tdmin_b_lrclk,
+ &tdmin_c_lrclk,
+ &tdmin_lb_lrclk,
+ &tdmout_a_lrclk,
+ &tdmout_b_lrclk,
+ &tdmout_c_lrclk,
+ &spdifout_b_clk_sel,
+ &spdifout_b_clk_div,
+ &spdifout_b_clk,
+ &sm1_tdm_mclk_pad_0,
+ &sm1_tdm_mclk_pad_1,
+ &sm1_tdm_lrclk_pad_0,
+ &sm1_tdm_lrclk_pad_1,
+ &sm1_tdm_lrclk_pad_2,
+ &sm1_tdm_sclk_pad_0,
+ &sm1_tdm_sclk_pad_1,
+ &sm1_tdm_sclk_pad_2,
+ &sm1_aud_top,
+ &toram,
+ &eqdrc,
+ &resample_b,
+ &tovad,
+ &locker,
+ &spdifin_lb,
+ &frddr_d,
+ &toddr_d,
+ &loopback_b,
+ &sm1_clk81_en,
+ &sm1_sysclk_a_div,
+ &sm1_sysclk_a_en,
+ &sm1_sysclk_b_div,
+ &sm1_sysclk_b_en,
};
static int devm_clk_get_enable(struct device *dev, char *id)
@@ -1001,10 +1615,12 @@ static const struct regmap_config axg_audio_regmap_cfg = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
- .max_register = AUDIO_CLK_PDMIN_CTRL1,
+ .max_register = AUDIO_CLK_SPDIFOUT_B_CTRL,
};
struct audioclk_data {
+ struct clk_regmap *const *regmap_clks;
+ unsigned int regmap_clk_num;
struct clk_hw_onecell_data *hw_onecell_data;
unsigned int reset_offset;
unsigned int reset_num;
@@ -1016,7 +1632,6 @@ static int axg_audio_clkc_probe(struct platform_device *pdev)
const struct audioclk_data *data;
struct axg_audio_reset_data *rst;
struct regmap *map;
- struct resource *res;
void __iomem *regs;
struct clk_hw *hw;
int ret, i;
@@ -1025,8 +1640,7 @@ static int axg_audio_clkc_probe(struct platform_device *pdev)
if (!data)
return -EINVAL;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- regs = devm_ioremap_resource(dev, res);
+ regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(regs))
return PTR_ERR(regs);
@@ -1048,8 +1662,8 @@ static int axg_audio_clkc_probe(struct platform_device *pdev)
}
/* Populate regmap for the regmap backed clocks */
- for (i = 0; i < ARRAY_SIZE(aud_clk_regmaps); i++)
- aud_clk_regmaps[i]->map = map;
+ for (i = 0; i < data->regmap_clk_num; i++)
+ data->regmap_clks[i]->map = map;
/* Take care to skip the registered input clocks */
for (i = AUD_CLKID_DDR_ARB; i < data->hw_onecell_data->num; i++) {
@@ -1093,15 +1707,27 @@ static int axg_audio_clkc_probe(struct platform_device *pdev)
}
static const struct audioclk_data axg_audioclk_data = {
+ .regmap_clks = axg_clk_regmaps,
+ .regmap_clk_num = ARRAY_SIZE(axg_clk_regmaps),
.hw_onecell_data = &axg_audio_hw_onecell_data,
};
static const struct audioclk_data g12a_audioclk_data = {
+ .regmap_clks = axg_clk_regmaps,
+ .regmap_clk_num = ARRAY_SIZE(axg_clk_regmaps),
.hw_onecell_data = &g12a_audio_hw_onecell_data,
.reset_offset = AUDIO_SW_RESET,
.reset_num = 26,
};
+static const struct audioclk_data sm1_audioclk_data = {
+ .regmap_clks = sm1_clk_regmaps,
+ .regmap_clk_num = ARRAY_SIZE(sm1_clk_regmaps),
+ .hw_onecell_data = &sm1_audio_hw_onecell_data,
+ .reset_offset = AUDIO_SM1_SW_RESET0,
+ .reset_num = 39,
+};
+
static const struct of_device_id clkc_match_table[] = {
{
.compatible = "amlogic,axg-audio-clkc",
@@ -1109,6 +1735,9 @@ static const struct of_device_id clkc_match_table[] = {
}, {
.compatible = "amlogic,g12a-audio-clkc",
.data = &g12a_audioclk_data
+ }, {
+ .compatible = "amlogic,sm1-audio-clkc",
+ .data = &sm1_audioclk_data
}, {}
};
MODULE_DEVICE_TABLE(of, clkc_match_table);
@@ -1122,6 +1751,6 @@ static struct platform_driver axg_audio_driver = {
};
module_platform_driver(axg_audio_driver);
-MODULE_DESCRIPTION("Amlogic AXG/G12A Audio Clock driver");
+MODULE_DESCRIPTION("Amlogic AXG/G12A/SM1 Audio Clock driver");
MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/meson/axg-audio.h b/drivers/clk/meson/axg-audio.h
index c00e28b2e1a9..fd65a7d0704b 100644
--- a/drivers/clk/meson/axg-audio.h
+++ b/drivers/clk/meson/axg-audio.h
@@ -50,6 +50,20 @@
#define AUDIO_CLK_PDMIN_CTRL1 0x0B0
#define AUDIO_CLK_SPDIFOUT_B_CTRL 0x0B4
+/* SM1 introduce new register and some shifts :( */
+#define AUDIO_CLK_GATE_EN1 0x004
+#define AUDIO_SM1_MCLK_A_CTRL 0x008
+#define AUDIO_SM1_MCLK_B_CTRL 0x00C
+#define AUDIO_SM1_MCLK_C_CTRL 0x010
+#define AUDIO_SM1_MCLK_D_CTRL 0x014
+#define AUDIO_SM1_MCLK_E_CTRL 0x018
+#define AUDIO_SM1_MCLK_F_CTRL 0x01C
+#define AUDIO_SM1_MST_PAD_CTRL0 0x020
+#define AUDIO_SM1_MST_PAD_CTRL1 0x024
+#define AUDIO_SM1_SW_RESET0 0x028
+#define AUDIO_SM1_SW_RESET1 0x02C
+#define AUDIO_CLK81_CTRL 0x030
+#define AUDIO_CLK81_EN 0x034
/*
* CLKID index values
* These indices are entirely contrived and do not map onto the hardware.
@@ -115,10 +129,15 @@
#define AUD_CLKID_TDMOUT_C_SCLK_POST_EN 150
#define AUD_CLKID_SPDIFOUT_B_CLK_SEL 153
#define AUD_CLKID_SPDIFOUT_B_CLK_DIV 154
+#define AUD_CLKID_CLK81_EN 173
+#define AUD_CLKID_SYSCLK_A_DIV 174
+#define AUD_CLKID_SYSCLK_B_DIV 175
+#define AUD_CLKID_SYSCLK_A_EN 176
+#define AUD_CLKID_SYSCLK_B_EN 177
/* include the CLKIDs which are part of the DT bindings */
#include <dt-bindings/clock/axg-audio-clkc.h>
-#define NR_CLKS 163
+#define NR_CLKS 178
#endif /*__AXG_AUDIO_CLKC_H */
diff --git a/drivers/clk/mvebu/ap-cpu-clk.c b/drivers/clk/mvebu/ap-cpu-clk.c
index af5e5acad370..6b394302c76a 100644
--- a/drivers/clk/mvebu/ap-cpu-clk.c
+++ b/drivers/clk/mvebu/ap-cpu-clk.c
@@ -274,8 +274,8 @@ static int ap_cpu_clock_probe(struct platform_device *pdev)
if (!ap_cpu_clk)
return -ENOMEM;
- ap_cpu_data = devm_kzalloc(dev, sizeof(*ap_cpu_data) +
- sizeof(struct clk_hw *) * nclusters,
+ ap_cpu_data = devm_kzalloc(dev, struct_size(ap_cpu_data, hws,
+ nclusters),
GFP_KERNEL);
if (!ap_cpu_data)
return -ENOMEM;
diff --git a/drivers/clk/mvebu/armada-37xx-periph.c b/drivers/clk/mvebu/armada-37xx-periph.c
index 5fc6d486a381..f5746f9ea929 100644
--- a/drivers/clk/mvebu/armada-37xx-periph.c
+++ b/drivers/clk/mvebu/armada-37xx-periph.c
@@ -303,6 +303,7 @@ PERIPH_CLK_GATE_DIV(gbe_bm, 12, DIV_SEL1, 0, clk_table1);
PERIPH_CLK_FULL_DD(sdio, 11, 14, DIV_SEL0, DIV_SEL0, 3, 6);
PERIPH_CLK_FULL_DD(usb32_usb2_sys, 16, 16, DIV_SEL0, DIV_SEL0, 9, 12);
PERIPH_CLK_FULL_DD(usb32_ss_sys, 17, 18, DIV_SEL0, DIV_SEL0, 15, 18);
+static PERIPH_GATE(pcie, 14);
static struct clk_periph_data data_sb[] = {
REF_CLK_MUX_DD(gbe_50),
@@ -318,6 +319,7 @@ static struct clk_periph_data data_sb[] = {
REF_CLK_FULL_DD(sdio),
REF_CLK_FULL_DD(usb32_usb2_sys),
REF_CLK_FULL_DD(usb32_ss_sys),
+ REF_CLK_GATE(pcie, "gbe_core"),
{ },
};
@@ -712,8 +714,8 @@ static int __maybe_unused armada_3700_periph_clock_resume(struct device *dev)
}
static const struct dev_pm_ops armada_3700_periph_clock_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(armada_3700_periph_clock_suspend,
- armada_3700_periph_clock_resume)
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(armada_3700_periph_clock_suspend,
+ armada_3700_periph_clock_resume)
};
static int armada_3700_periph_clock_probe(struct platform_device *pdev)
diff --git a/drivers/clk/mvebu/armada-xp.c b/drivers/clk/mvebu/armada-xp.c
index fa1568279c23..45665655a258 100644
--- a/drivers/clk/mvebu/armada-xp.c
+++ b/drivers/clk/mvebu/armada-xp.c
@@ -50,12 +50,6 @@ static u32 __init axp_get_tclk_freq(void __iomem *sar)
return 250000000;
}
-/* MV98DX3236 TCLK frequency is fixed to 200MHz */
-static u32 __init mv98dx3236_get_tclk_freq(void __iomem *sar)
-{
- return 200000000;
-}
-
static const u32 axp_cpu_freqs[] __initconst = {
1000000000,
1066000000,
@@ -93,12 +87,6 @@ static u32 __init axp_get_cpu_freq(void __iomem *sar)
return cpu_freq;
}
-/* MV98DX3236 CLK frequency is fixed to 800MHz */
-static u32 __init mv98dx3236_get_cpu_freq(void __iomem *sar)
-{
- return 800000000;
-}
-
static const int axp_nbclk_ratios[32][2] __initconst = {
{0, 1}, {1, 2}, {2, 2}, {2, 2},
{1, 2}, {1, 2}, {1, 1}, {2, 3},
@@ -168,11 +156,6 @@ static const struct coreclk_soc_desc axp_coreclks = {
.num_ratios = ARRAY_SIZE(axp_coreclk_ratios),
};
-static const struct coreclk_soc_desc mv98dx3236_coreclks = {
- .get_tclk_freq = mv98dx3236_get_tclk_freq,
- .get_cpu_freq = mv98dx3236_get_cpu_freq,
-};
-
/*
* Clock Gating Control
*/
@@ -210,15 +193,6 @@ static const struct clk_gating_soc_desc axp_gating_desc[] __initconst = {
{ }
};
-static const struct clk_gating_soc_desc mv98dx3236_gating_desc[] __initconst = {
- { "ge1", NULL, 3, 0 },
- { "ge0", NULL, 4, 0 },
- { "pex00", NULL, 5, 0 },
- { "sdio", NULL, 17, 0 },
- { "xor0", NULL, 22, 0 },
- { }
-};
-
static void __init axp_clk_init(struct device_node *np)
{
struct device_node *cgnp =
diff --git a/drivers/clk/mvebu/cp110-system-controller.c b/drivers/clk/mvebu/cp110-system-controller.c
index 808463276145..84c8900542e4 100644
--- a/drivers/clk/mvebu/cp110-system-controller.c
+++ b/drivers/clk/mvebu/cp110-system-controller.c
@@ -235,8 +235,8 @@ static int cp110_syscon_common_probe(struct platform_device *pdev,
if (ret)
return ret;
- cp110_clk_data = devm_kzalloc(dev, sizeof(*cp110_clk_data) +
- sizeof(struct clk_hw *) * CP110_CLK_NUM,
+ cp110_clk_data = devm_kzalloc(dev, struct_size(cp110_clk_data, hws,
+ CP110_CLK_NUM),
GFP_KERNEL);
if (!cp110_clk_data)
return -ENOMEM;
diff --git a/drivers/clk/pxa/clk-pxa27x.c b/drivers/clk/pxa/clk-pxa27x.c
index 287fdeae7c7c..7b123105b5de 100644
--- a/drivers/clk/pxa/clk-pxa27x.c
+++ b/drivers/clk/pxa/clk-pxa27x.c
@@ -459,6 +459,7 @@ struct dummy_clk {
};
static struct dummy_clk dummy_clks[] __initdata = {
DUMMY_CLK(NULL, "pxa27x-gpio", "osc_32_768khz"),
+ DUMMY_CLK(NULL, "pxa-rtc", "osc_32_768khz"),
DUMMY_CLK(NULL, "sa1100-rtc", "osc_32_768khz"),
DUMMY_CLK("UARTCLK", "pxa2xx-ir", "STUART"),
};
diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
index 32dbb4f09492..3b33ef129274 100644
--- a/drivers/clk/qcom/Kconfig
+++ b/drivers/clk/qcom/Kconfig
@@ -220,6 +220,15 @@ config MSM_GCC_8998
Say Y if you want to use peripheral devices such as UART, SPI,
i2c, USB, UFS, SD/eMMC, PCIe, etc.
+config MSM_GPUCC_8998
+ tristate "MSM8998 Graphics Clock Controller"
+ select MSM_GCC_8998
+ select QCOM_GDSC
+ help
+ Support for the graphics clock controller on MSM8998 devices.
+ Say Y if you want to support graphics controller devices and
+ functionality such as 3D graphics.
+
config QCS_GCC_404
tristate "QCS404 Global Clock Controller"
help
@@ -227,6 +236,15 @@ config QCS_GCC_404
Say Y if you want to use multimedia devices or peripheral
devices such as UART, SPI, I2C, USB, SD/eMMC, PCIe etc.
+config SC_GCC_7180
+ tristate "SC7180 Global Clock Controller"
+ select QCOM_GDSC
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the global clock controller on SC7180 devices.
+ Say Y if you want to use peripheral devices such as UART, SPI,
+ I2C, USB, UFS, SDCC, etc.
+
config SDM_CAMCC_845
tristate "SDM845 Camera Clock Controller"
select SDM_GCC_845
@@ -248,6 +266,14 @@ config QCS_TURING_404
Support for the Turing Clock Controller on QCS404, provides clocks
and resets for the Turing subsystem.
+config QCS_Q6SSTOP_404
+ tristate "QCS404 Q6SSTOP Clock Controller"
+ select QCS_GCC_404
+ help
+ Support for the Q6SSTOP clock controller on QCS404 devices.
+ Say Y if you want to use the Q6SSTOP branch clocks of the WCSS clock
+ controller to reset the Q6SSTOP subsystem.
+
config SDM_GCC_845
tristate "SDM845 Global Clock Controller"
select QCOM_GDSC
diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile
index 4a813b4055d0..d899661d0f44 100644
--- a/drivers/clk/qcom/Makefile
+++ b/drivers/clk/qcom/Makefile
@@ -33,6 +33,7 @@ obj-$(CONFIG_MSM_GCC_8994) += gcc-msm8994.o
obj-$(CONFIG_MSM_GCC_8996) += gcc-msm8996.o
obj-$(CONFIG_MSM_LCC_8960) += lcc-msm8960.o
obj-$(CONFIG_MSM_GCC_8998) += gcc-msm8998.o
+obj-$(CONFIG_MSM_GPUCC_8998) += gpucc-msm8998.o
obj-$(CONFIG_MSM_MMCC_8960) += mmcc-msm8960.o
obj-$(CONFIG_MSM_MMCC_8974) += mmcc-msm8974.o
obj-$(CONFIG_MSM_MMCC_8996) += mmcc-msm8996.o
@@ -42,7 +43,9 @@ obj-$(CONFIG_QCOM_CLK_RPM) += clk-rpm.o
obj-$(CONFIG_QCOM_CLK_RPMH) += clk-rpmh.o
obj-$(CONFIG_QCOM_CLK_SMD_RPM) += clk-smd-rpm.o
obj-$(CONFIG_QCS_GCC_404) += gcc-qcs404.o
+obj-$(CONFIG_QCS_Q6SSTOP_404) += q6sstop-qcs404.o
obj-$(CONFIG_QCS_TURING_404) += turingcc-qcs404.o
+obj-$(CONFIG_SC_GCC_7180) += gcc-sc7180.o
obj-$(CONFIG_SDM_CAMCC_845) += camcc-sdm845.o
obj-$(CONFIG_SDM_DISPCC_845) += dispcc-sdm845.o
obj-$(CONFIG_SDM_GCC_660) += gcc-sdm660.o
diff --git a/drivers/clk/qcom/clk-rcg.h b/drivers/clk/qcom/clk-rcg.h
index c25b57c3cbc8..78358b81d249 100644
--- a/drivers/clk/qcom/clk-rcg.h
+++ b/drivers/clk/qcom/clk-rcg.h
@@ -168,7 +168,7 @@ struct clk_rcg_dfs_data {
};
#define DEFINE_RCG_DFS(r) \
- { .rcg = &r##_src, .init = &r##_init }
+ { .rcg = &r, .init = &r##_init }
extern int qcom_cc_register_rcg_dfs(struct regmap *regmap,
const struct clk_rcg_dfs_data *rcgs,
diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
index b98b81ef43a1..8f4b9bec2956 100644
--- a/drivers/clk/qcom/clk-rcg2.c
+++ b/drivers/clk/qcom/clk-rcg2.c
@@ -206,7 +206,7 @@ static int _freq_tbl_determine_rate(struct clk_hw *hw, const struct freq_tbl *f,
break;
default:
return -EINVAL;
- };
+ }
if (!f)
return -EINVAL;
@@ -220,6 +220,8 @@ static int _freq_tbl_determine_rate(struct clk_hw *hw, const struct freq_tbl *f,
if (clk_flags & CLK_SET_RATE_PARENT) {
rate = f->freq;
if (f->pre_div) {
+ if (!rate)
+ rate = req->rate;
rate /= 2;
rate *= f->pre_div + 1;
}
@@ -319,7 +321,7 @@ static int __clk_rcg2_set_rate(struct clk_hw *hw, unsigned long rate,
break;
default:
return -EINVAL;
- };
+ }
if (!f)
return -EINVAL;
diff --git a/drivers/clk/qcom/clk-rpmh.c b/drivers/clk/qcom/clk-rpmh.c
index 96a36f6ff667..2dbbe47e8d4f 100644
--- a/drivers/clk/qcom/clk-rpmh.c
+++ b/drivers/clk/qcom/clk-rpmh.c
@@ -334,13 +334,14 @@ static const struct clk_ops clk_rpmh_bcm_ops = {
.recalc_rate = clk_rpmh_bcm_recalc_rate,
};
-/* Resource name must match resource id present in cmd-db. */
+/* Resource name must match resource id present in cmd-db */
DEFINE_CLK_RPMH_ARC(sdm845, bi_tcxo, bi_tcxo_ao, "xo.lvl", 0x3, 2);
DEFINE_CLK_RPMH_VRM(sdm845, ln_bb_clk2, ln_bb_clk2_ao, "lnbclka2", 2);
DEFINE_CLK_RPMH_VRM(sdm845, ln_bb_clk3, ln_bb_clk3_ao, "lnbclka3", 2);
DEFINE_CLK_RPMH_VRM(sdm845, rf_clk1, rf_clk1_ao, "rfclka1", 1);
DEFINE_CLK_RPMH_VRM(sdm845, rf_clk2, rf_clk2_ao, "rfclka2", 1);
DEFINE_CLK_RPMH_VRM(sdm845, rf_clk3, rf_clk3_ao, "rfclka3", 1);
+DEFINE_CLK_RPMH_VRM(sm8150, rf_clk3, rf_clk3_ao, "rfclka3", 1);
DEFINE_CLK_RPMH_BCM(sdm845, ipa, "IP0");
static struct clk_hw *sdm845_rpmh_clocks[] = {
@@ -364,26 +365,19 @@ static const struct clk_rpmh_desc clk_rpmh_sdm845 = {
.num_clks = ARRAY_SIZE(sdm845_rpmh_clocks),
};
-DEFINE_CLK_RPMH_ARC(sm8150, bi_tcxo, bi_tcxo_ao, "xo.lvl", 0x3, 2);
-DEFINE_CLK_RPMH_VRM(sm8150, ln_bb_clk2, ln_bb_clk2_ao, "lnbclka2", 2);
-DEFINE_CLK_RPMH_VRM(sm8150, ln_bb_clk3, ln_bb_clk3_ao, "lnbclka3", 2);
-DEFINE_CLK_RPMH_VRM(sm8150, rf_clk1, rf_clk1_ao, "rfclka1", 1);
-DEFINE_CLK_RPMH_VRM(sm8150, rf_clk2, rf_clk2_ao, "rfclka2", 1);
-DEFINE_CLK_RPMH_VRM(sm8150, rf_clk3, rf_clk3_ao, "rfclka3", 1);
-
static struct clk_hw *sm8150_rpmh_clocks[] = {
- [RPMH_CXO_CLK] = &sm8150_bi_tcxo.hw,
- [RPMH_CXO_CLK_A] = &sm8150_bi_tcxo_ao.hw,
- [RPMH_LN_BB_CLK2] = &sm8150_ln_bb_clk2.hw,
- [RPMH_LN_BB_CLK2_A] = &sm8150_ln_bb_clk2_ao.hw,
- [RPMH_LN_BB_CLK3] = &sm8150_ln_bb_clk3.hw,
- [RPMH_LN_BB_CLK3_A] = &sm8150_ln_bb_clk3_ao.hw,
- [RPMH_RF_CLK1] = &sm8150_rf_clk1.hw,
- [RPMH_RF_CLK1_A] = &sm8150_rf_clk1_ao.hw,
- [RPMH_RF_CLK2] = &sm8150_rf_clk2.hw,
- [RPMH_RF_CLK2_A] = &sm8150_rf_clk2_ao.hw,
- [RPMH_RF_CLK3] = &sm8150_rf_clk3.hw,
- [RPMH_RF_CLK3_A] = &sm8150_rf_clk3_ao.hw,
+ [RPMH_CXO_CLK] = &sdm845_bi_tcxo.hw,
+ [RPMH_CXO_CLK_A] = &sdm845_bi_tcxo_ao.hw,
+ [RPMH_LN_BB_CLK2] = &sdm845_ln_bb_clk2.hw,
+ [RPMH_LN_BB_CLK2_A] = &sdm845_ln_bb_clk2_ao.hw,
+ [RPMH_LN_BB_CLK3] = &sdm845_ln_bb_clk3.hw,
+ [RPMH_LN_BB_CLK3_A] = &sdm845_ln_bb_clk3_ao.hw,
+ [RPMH_RF_CLK1] = &sdm845_rf_clk1.hw,
+ [RPMH_RF_CLK1_A] = &sdm845_rf_clk1_ao.hw,
+ [RPMH_RF_CLK2] = &sdm845_rf_clk2.hw,
+ [RPMH_RF_CLK2_A] = &sdm845_rf_clk2_ao.hw,
+ [RPMH_RF_CLK3] = &sdm845_rf_clk3.hw,
+ [RPMH_RF_CLK3_A] = &sdm845_rf_clk3_ao.hw,
};
static const struct clk_rpmh_desc clk_rpmh_sm8150 = {
@@ -391,6 +385,24 @@ static const struct clk_rpmh_desc clk_rpmh_sm8150 = {
.num_clks = ARRAY_SIZE(sm8150_rpmh_clocks),
};
+static struct clk_hw *sc7180_rpmh_clocks[] = {
+ [RPMH_CXO_CLK] = &sdm845_bi_tcxo.hw,
+ [RPMH_CXO_CLK_A] = &sdm845_bi_tcxo_ao.hw,
+ [RPMH_LN_BB_CLK2] = &sdm845_ln_bb_clk2.hw,
+ [RPMH_LN_BB_CLK2_A] = &sdm845_ln_bb_clk2_ao.hw,
+ [RPMH_LN_BB_CLK3] = &sdm845_ln_bb_clk3.hw,
+ [RPMH_LN_BB_CLK3_A] = &sdm845_ln_bb_clk3_ao.hw,
+ [RPMH_RF_CLK1] = &sdm845_rf_clk1.hw,
+ [RPMH_RF_CLK1_A] = &sdm845_rf_clk1_ao.hw,
+ [RPMH_RF_CLK2] = &sdm845_rf_clk2.hw,
+ [RPMH_RF_CLK2_A] = &sdm845_rf_clk2_ao.hw,
+};
+
+static const struct clk_rpmh_desc clk_rpmh_sc7180 = {
+ .clks = sc7180_rpmh_clocks,
+ .num_clks = ARRAY_SIZE(sc7180_rpmh_clocks),
+};
+
static struct clk_hw *of_clk_rpmh_hw_get(struct of_phandle_args *clkspec,
void *data)
{
@@ -471,6 +483,7 @@ static int clk_rpmh_probe(struct platform_device *pdev)
static const struct of_device_id clk_rpmh_match_table[] = {
{ .compatible = "qcom,sdm845-rpmh-clk", .data = &clk_rpmh_sdm845},
{ .compatible = "qcom,sm8150-rpmh-clk", .data = &clk_rpmh_sm8150},
+ { .compatible = "qcom,sc7180-rpmh-clk", .data = &clk_rpmh_sc7180},
{ }
};
MODULE_DEVICE_TABLE(of, clk_rpmh_match_table);
diff --git a/drivers/clk/qcom/clk-smd-rpm.c b/drivers/clk/qcom/clk-smd-rpm.c
index fef5e8157061..930fa4a4c52a 100644
--- a/drivers/clk/qcom/clk-smd-rpm.c
+++ b/drivers/clk/qcom/clk-smd-rpm.c
@@ -648,6 +648,7 @@ static const struct rpm_smd_clk_desc rpm_clk_qcs404 = {
};
/* msm8998 */
+DEFINE_CLK_SMD_RPM(msm8998, pcnoc_clk, pcnoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 0);
DEFINE_CLK_SMD_RPM(msm8998, snoc_clk, snoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 1);
DEFINE_CLK_SMD_RPM(msm8998, cnoc_clk, cnoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 2);
DEFINE_CLK_SMD_RPM(msm8998, ce1_clk, ce1_a_clk, QCOM_SMD_RPM_CE_CLK, 0);
@@ -670,6 +671,8 @@ DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8998, rf_clk2_pin, rf_clk2_a_pin, 5);
DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8998, rf_clk3, rf_clk3_a, 6);
DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8998, rf_clk3_pin, rf_clk3_a_pin, 6);
static struct clk_smd_rpm *msm8998_clks[] = {
+ [RPM_SMD_PCNOC_CLK] = &msm8998_pcnoc_clk,
+ [RPM_SMD_PCNOC_A_CLK] = &msm8998_pcnoc_a_clk,
[RPM_SMD_SNOC_CLK] = &msm8998_snoc_clk,
[RPM_SMD_SNOC_A_CLK] = &msm8998_snoc_a_clk,
[RPM_SMD_CNOC_CLK] = &msm8998_cnoc_clk,
diff --git a/drivers/clk/qcom/common.c b/drivers/clk/qcom/common.c
index 28ddc747d703..60d2a78d1395 100644
--- a/drivers/clk/qcom/common.c
+++ b/drivers/clk/qcom/common.c
@@ -29,6 +29,9 @@ struct freq_tbl *qcom_find_freq(const struct freq_tbl *f, unsigned long rate)
if (!f)
return NULL;
+ if (!f->freq)
+ return f;
+
for (; f->freq; f++)
if (rate <= f->freq)
return f;
@@ -218,7 +221,7 @@ static struct clk_hw *qcom_cc_clk_hw_get(struct of_phandle_args *clkspec,
return ERR_PTR(-EINVAL);
}
- return cc->rclks[idx] ? &cc->rclks[idx]->hw : ERR_PTR(-ENOENT);
+ return cc->rclks[idx] ? &cc->rclks[idx]->hw : NULL;
}
int qcom_cc_really_probe(struct platform_device *pdev,
diff --git a/drivers/clk/qcom/gcc-msm8998.c b/drivers/clk/qcom/gcc-msm8998.c
index 091acd59c1d6..cf31b5d03270 100644
--- a/drivers/clk/qcom/gcc-msm8998.c
+++ b/drivers/clk/qcom/gcc-msm8998.c
@@ -1266,6 +1266,72 @@ static struct clk_branch gcc_bimc_mss_q6_axi_clk = {
},
};
+static struct clk_branch gcc_mss_cfg_ahb_clk = {
+ .halt_reg = 0x8a000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8a000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mss_snoc_axi_clk = {
+ .halt_reg = 0x8a03c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8a03c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_snoc_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mss_mnoc_bimc_axi_clk = {
+ .halt_reg = 0x8a004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8a004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_mnoc_bimc_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_boot_rom_ahb_clk = {
+ .halt_reg = 0x38004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x38004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(10),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_boot_rom_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mss_gpll0_div_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x5200c,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_gpll0_div_clk_src",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct clk_branch gcc_blsp1_ahb_clk = {
.halt_reg = 0x17004,
.halt_check = BRANCH_HALT_VOTED,
@@ -2832,6 +2898,11 @@ static struct clk_regmap *gcc_msm8998_clocks[] = {
[GCC_USB3_CLKREF_CLK] = &gcc_usb3_clkref_clk.clkr,
[GCC_PCIE_CLKREF_CLK] = &gcc_pcie_clkref_clk.clkr,
[GCC_RX1_USB2_CLKREF_CLK] = &gcc_rx1_usb2_clkref_clk.clkr,
+ [GCC_MSS_CFG_AHB_CLK] = &gcc_mss_cfg_ahb_clk.clkr,
+ [GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr,
+ [GCC_MSS_GPLL0_DIV_CLK_SRC] = &gcc_mss_gpll0_div_clk_src.clkr,
+ [GCC_MSS_SNOC_AXI_CLK] = &gcc_mss_snoc_axi_clk.clkr,
+ [GCC_MSS_MNOC_BIMC_AXI_CLK] = &gcc_mss_mnoc_bimc_axi_clk.clkr,
};
static struct gdsc *gcc_msm8998_gdscs[] = {
@@ -2928,6 +2999,7 @@ static const struct qcom_reset_map gcc_msm8998_resets[] = {
[GCC_GPU_BCR] = { 0x71000 },
[GCC_SPSS_BCR] = { 0x72000 },
[GCC_OBT_ODT_BCR] = { 0x73000 },
+ [GCC_MSS_RESTART] = { 0x79000 },
[GCC_VS_BCR] = { 0x7a000 },
[GCC_MSS_VS_RESET] = { 0x7a100 },
[GCC_GPU_VS_RESET] = { 0x7a104 },
diff --git a/drivers/clk/qcom/gcc-sc7180.c b/drivers/clk/qcom/gcc-sc7180.c
new file mode 100644
index 000000000000..38424e63bcae
--- /dev/null
+++ b/drivers/clk/qcom/gcc-sc7180.c
@@ -0,0 +1,2450 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,gcc-sc7180.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "common.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ P_BI_TCXO,
+ P_CORE_BI_PLL_TEST_SE,
+ P_GPLL0_OUT_EVEN,
+ P_GPLL0_OUT_MAIN,
+ P_GPLL1_OUT_MAIN,
+ P_GPLL4_OUT_MAIN,
+ P_GPLL6_OUT_MAIN,
+ P_GPLL7_OUT_MAIN,
+ P_SLEEP_CLK,
+};
+
+static struct clk_alpha_pll gpll0 = {
+ .offset = 0x0,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll0",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ .name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_fabia_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_gpll0_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv gpll0_out_even = {
+ .offset = 0x0,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_gpll0_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_gpll0_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll0_out_even",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gpll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_fabia_ops,
+ },
+};
+
+static struct clk_fixed_factor gcc_pll0_main_div_cdiv = {
+ .mult = 1,
+ .div = 2,
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pll0_main_div_cdiv",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gpll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_fixed_factor_ops,
+ },
+};
+
+static struct clk_alpha_pll gpll1 = {
+ .offset = 0x01000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll1",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ .name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_fabia_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gpll4 = {
+ .offset = 0x76000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll4",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ .name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_fabia_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gpll6 = {
+ .offset = 0x13000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(6),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll6",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ .name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_fabia_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gpll7 = {
+ .offset = 0x27000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr = {
+ .enable_reg = 0x52010,
+ .enable_mask = BIT(7),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll7",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ .name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_fabia_ops,
+ },
+ },
+};
+
+static const struct parent_map gcc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL0_OUT_EVEN, 6 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const struct clk_parent_data gcc_parent_data_0[] = {
+ { .fw_name = "bi_tcxo", .name = "bi_tcxo" },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll0_out_even.clkr.hw },
+ { .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" },
+};
+
+static const struct clk_parent_data gcc_parent_data_0_ao[] = {
+ { .fw_name = "bi_tcxo_ao", .name = "bi_tcxo_ao" },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll0_out_even.clkr.hw },
+ { .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" },
+};
+
+static const struct parent_map gcc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL6_OUT_MAIN, 2 },
+ { P_GPLL0_OUT_EVEN, 6 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const struct clk_parent_data gcc_parent_data_1[] = {
+ { .fw_name = "bi_tcxo", .name = "bi_tcxo" },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll6.clkr.hw },
+ { .hw = &gpll0_out_even.clkr.hw },
+ { .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" },
+};
+
+static const struct parent_map gcc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL1_OUT_MAIN, 4 },
+ { P_GPLL4_OUT_MAIN, 5 },
+ { P_GPLL0_OUT_EVEN, 6 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const struct clk_parent_data gcc_parent_data_2[] = {
+ { .fw_name = "bi_tcxo", .name = "bi_tcxo" },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll1.clkr.hw },
+ { .hw = &gpll4.clkr.hw },
+ { .hw = &gpll0_out_even.clkr.hw },
+ { .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" },
+};
+
+static const struct parent_map gcc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const struct clk_parent_data gcc_parent_data_3[] = {
+ { .fw_name = "bi_tcxo", .name = "bi_tcxo" },
+ { .hw = &gpll0.clkr.hw },
+ { .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" },
+};
+
+static const struct parent_map gcc_parent_map_4[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_SLEEP_CLK, 5 },
+ { P_GPLL0_OUT_EVEN, 6 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const struct clk_parent_data gcc_parent_data_4[] = {
+ { .fw_name = "bi_tcxo", .name = "bi_tcxo" },
+ { .hw = &gpll0.clkr.hw },
+ { .fw_name = "sleep_clk", .name = "sleep_clk" },
+ { .hw = &gpll0_out_even.clkr.hw },
+ { .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" },
+};
+
+static const struct parent_map gcc_parent_map_5[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL7_OUT_MAIN, 3 },
+ { P_GPLL0_OUT_EVEN, 6 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const struct clk_parent_data gcc_parent_data_5[] = {
+ { .fw_name = "bi_tcxo", .name = "bi_tcxo" },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll7.clkr.hw },
+ { .hw = &gpll0_out_even.clkr.hw },
+ { .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" },
+};
+
+static const struct parent_map gcc_parent_map_6[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_SLEEP_CLK, 5 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const struct clk_parent_data gcc_parent_data_6[] = {
+ { .fw_name = "bi_tcxo", .name = "bi_tcxo" },
+ { .hw = &gpll0.clkr.hw },
+ { .fw_name = "sleep_clk", .name = "sleep_clk" },
+ { .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" },
+};
+
+static const struct freq_tbl ftbl_gcc_cpuss_ahb_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_cpuss_ahb_clk_src = {
+ .cmd_rcgr = 0x48014,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_cpuss_ahb_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_cpuss_ahb_clk_src",
+ .parent_data = gcc_parent_data_0_ao,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_gp1_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(25000000, P_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GPLL0_OUT_EVEN, 3, 0, 0),
+ F(200000000, P_GPLL0_OUT_EVEN, 1.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_gp1_clk_src = {
+ .cmd_rcgr = 0x64004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_gp1_clk_src",
+ .parent_data = gcc_parent_data_4,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp2_clk_src = {
+ .cmd_rcgr = 0x65004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_gp2_clk_src",
+ .parent_data = gcc_parent_data_4,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp3_clk_src = {
+ .cmd_rcgr = 0x66004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_gp3_clk_src",
+ .parent_data = gcc_parent_data_4,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pdm2_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(60000000, P_GPLL0_OUT_EVEN, 5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pdm2_clk_src = {
+ .cmd_rcgr = 0x33010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pdm2_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm2_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_qspi_core_clk_src[] = {
+ F(75000000, P_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(150000000, P_GPLL0_OUT_EVEN, 2, 0, 0),
+ F(300000000, P_GPLL0_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_qspi_core_clk_src = {
+ .cmd_rcgr = 0x4b00c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_qspi_core_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_qspi_core_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = 6,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_wrap0_s0_clk_src[] = {
+ F(7372800, P_GPLL0_OUT_EVEN, 1, 384, 15625),
+ F(14745600, P_GPLL0_OUT_EVEN, 1, 768, 15625),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(29491200, P_GPLL0_OUT_EVEN, 1, 1536, 15625),
+ F(32000000, P_GPLL0_OUT_EVEN, 1, 8, 75),
+ F(48000000, P_GPLL0_OUT_EVEN, 1, 4, 25),
+ F(64000000, P_GPLL0_OUT_EVEN, 1, 16, 75),
+ F(75000000, P_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(80000000, P_GPLL0_OUT_EVEN, 1, 4, 15),
+ F(96000000, P_GPLL0_OUT_EVEN, 1, 8, 25),
+ F(100000000, P_GPLL0_OUT_EVEN, 3, 0, 0),
+ F(102400000, P_GPLL0_OUT_EVEN, 1, 128, 375),
+ F(112000000, P_GPLL0_OUT_EVEN, 1, 28, 75),
+ F(117964800, P_GPLL0_OUT_EVEN, 1, 6144, 15625),
+ F(120000000, P_GPLL0_OUT_EVEN, 2.5, 0, 0),
+ F(128000000, P_GPLL6_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s0_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s0_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s0_clk_src = {
+ .cmd_rcgr = 0x17034,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s0_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s1_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s1_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s1_clk_src = {
+ .cmd_rcgr = 0x17164,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s1_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s2_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s2_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s2_clk_src = {
+ .cmd_rcgr = 0x17294,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s2_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s3_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s3_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s3_clk_src = {
+ .cmd_rcgr = 0x173c4,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s3_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s4_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s4_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s4_clk_src = {
+ .cmd_rcgr = 0x174f4,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s4_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s5_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s5_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s5_clk_src = {
+ .cmd_rcgr = 0x17624,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s5_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s0_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s0_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s0_clk_src = {
+ .cmd_rcgr = 0x18018,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s0_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s1_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s1_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s1_clk_src = {
+ .cmd_rcgr = 0x18148,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s1_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s2_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s2_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s2_clk_src = {
+ .cmd_rcgr = 0x18278,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s2_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s3_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s3_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s3_clk_src = {
+ .cmd_rcgr = 0x183a8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s3_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s4_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s4_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s4_clk_src = {
+ .cmd_rcgr = 0x184d8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s4_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s5_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s5_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s5_clk_src = {
+ .cmd_rcgr = 0x18608,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s5_clk_src_init,
+};
+
+
+static const struct freq_tbl ftbl_gcc_sdcc1_apps_clk_src[] = {
+ F(144000, P_BI_TCXO, 16, 3, 25),
+ F(400000, P_BI_TCXO, 12, 1, 4),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(20000000, P_GPLL0_OUT_EVEN, 5, 1, 3),
+ F(25000000, P_GPLL0_OUT_EVEN, 6, 1, 2),
+ F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GPLL0_OUT_EVEN, 3, 0, 0),
+ F(192000000, P_GPLL6_OUT_MAIN, 2, 0, 0),
+ F(384000000, P_GPLL6_OUT_MAIN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc1_apps_clk_src = {
+ .cmd_rcgr = 0x12028,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_sdcc1_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_apps_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc1_ice_core_clk_src[] = {
+ F(100000000, P_GPLL0_OUT_EVEN, 3, 0, 0),
+ F(150000000, P_GPLL0_OUT_EVEN, 2, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ F(300000000, P_GPLL0_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc1_ice_core_clk_src = {
+ .cmd_rcgr = 0x12010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_sdcc1_ice_core_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_ice_core_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc2_apps_clk_src[] = {
+ F(400000, P_BI_TCXO, 12, 1, 4),
+ F(9600000, P_BI_TCXO, 2, 0, 0),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(25000000, P_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(100000000, P_GPLL0_OUT_EVEN, 3, 0, 0),
+ F(202000000, P_GPLL7_OUT_MAIN, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc2_apps_clk_src = {
+ .cmd_rcgr = 0x1400c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_5,
+ .freq_tbl = ftbl_gcc_sdcc2_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc2_apps_clk_src",
+ .parent_data = gcc_parent_data_5,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_axi_clk_src[] = {
+ F(25000000, P_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GPLL0_OUT_EVEN, 3, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ F(240000000, P_GPLL0_OUT_MAIN, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_axi_clk_src = {
+ .cmd_rcgr = 0x77020,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_phy_axi_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_axi_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_ice_core_clk_src[] = {
+ F(37500000, P_GPLL0_OUT_EVEN, 8, 0, 0),
+ F(75000000, P_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(150000000, P_GPLL0_OUT_EVEN, 2, 0, 0),
+ F(300000000, P_GPLL0_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_ice_core_clk_src = {
+ .cmd_rcgr = 0x77048,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_phy_ice_core_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_ice_core_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_phy_aux_clk_src[] = {
+ F(9600000, P_BI_TCXO, 2, 0, 0),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_phy_aux_clk_src = {
+ .cmd_rcgr = 0x77098,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_ufs_phy_phy_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_unipro_core_clk_src[] = {
+ F(37500000, P_GPLL0_OUT_EVEN, 8, 0, 0),
+ F(75000000, P_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(150000000, P_GPLL0_OUT_EVEN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_unipro_core_clk_src = {
+ .cmd_rcgr = 0x77060,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_phy_unipro_core_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_unipro_core_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb30_prim_master_clk_src[] = {
+ F(66666667, P_GPLL0_OUT_EVEN, 4.5, 0, 0),
+ F(133333333, P_GPLL0_OUT_MAIN, 4.5, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ F(240000000, P_GPLL0_OUT_MAIN, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb30_prim_master_clk_src = {
+ .cmd_rcgr = 0xf01c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb30_prim_master_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_master_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb30_prim_mock_utmi_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(20000000, P_GPLL0_OUT_EVEN, 15, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb30_prim_mock_utmi_clk_src = {
+ .cmd_rcgr = 0xf034,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb30_prim_mock_utmi_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_mock_utmi_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb3_prim_phy_aux_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb3_prim_phy_aux_clk_src = {
+ .cmd_rcgr = 0xf060,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_6,
+ .freq_tbl = ftbl_gcc_usb3_prim_phy_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_6,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_aggre_ufs_phy_axi_clk = {
+ .halt_reg = 0x82024,
+ .halt_check = BRANCH_HALT_DELAY,
+ .hwcg_reg = 0x82024,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x82024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre_ufs_phy_axi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_phy_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_usb3_prim_axi_clk = {
+ .halt_reg = 0x8201c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8201c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre_usb3_prim_axi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_boot_rom_ahb_clk = {
+ .halt_reg = 0x38004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x38004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(10),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_boot_rom_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camera_ahb_clk = {
+ .halt_reg = 0xb008,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0xb008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xb008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camera_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camera_hf_axi_clk = {
+ .halt_reg = 0xb020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camera_hf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camera_throttle_hf_axi_clk = {
+ .halt_reg = 0xb080,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0xb080,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xb080,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camera_throttle_hf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camera_xo_clk = {
+ .halt_reg = 0xb02c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb02c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camera_xo_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ce1_ahb_clk = {
+ .halt_reg = 0x4100c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x4100c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(3),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ce1_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ce1_axi_clk = {
+ .halt_reg = 0x41008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ce1_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ce1_clk = {
+ .halt_reg = 0x41004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(5),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ce1_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_usb3_prim_axi_clk = {
+ .halt_reg = 0x502c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x502c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cfg_noc_usb3_prim_axi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+/* For CPUSS functionality the AHB clock needs to be left enabled */
+static struct clk_branch gcc_cpuss_ahb_clk = {
+ .halt_reg = 0x48000,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(21),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cpuss_ahb_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_cpuss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_IS_CRITICAL | CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cpuss_rbcpr_clk = {
+ .halt_reg = 0x48008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x48008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cpuss_rbcpr_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ddrss_gpu_axi_clk = {
+ .halt_reg = 0x4452c,
+ .halt_check = BRANCH_VOTED,
+ .clkr = {
+ .enable_reg = 0x4452c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ddrss_gpu_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_gpll0_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(18),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_disp_gpll0_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gpll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_gpll0_div_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(19),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_disp_gpll0_div_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_pll0_main_div_cdiv.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_hf_axi_clk = {
+ .halt_reg = 0xb024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_disp_hf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_throttle_hf_axi_clk = {
+ .halt_reg = 0xb084,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0xb084,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xb084,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_disp_throttle_hf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_xo_clk = {
+ .halt_reg = 0xb030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb030,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_disp_xo_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp1_clk = {
+ .halt_reg = 0x64000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x64000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp1_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_gp1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp2_clk = {
+ .halt_reg = 0x65000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x65000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp2_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_gp2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp3_clk = {
+ .halt_reg = 0x66000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x66000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp3_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_gp3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_gpll0_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(15),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_gpll0_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gpll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_gpll0_div_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(16),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_gpll0_div_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_pll0_main_div_cdiv.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_memnoc_gfx_clk = {
+ .halt_reg = 0x7100c,
+ .halt_check = BRANCH_VOTED,
+ .clkr = {
+ .enable_reg = 0x7100c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_memnoc_gfx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_snoc_dvm_gfx_clk = {
+ .halt_reg = 0x71018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x71018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_snoc_dvm_gfx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_npu_axi_clk = {
+ .halt_reg = 0x4d008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4d008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_npu_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_npu_bwmon_axi_clk = {
+ .halt_reg = 0x73008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x73008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_npu_bwmon_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_npu_bwmon_dma_cfg_ahb_clk = {
+ .halt_reg = 0x73018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x73018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_npu_bwmon_dma_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_npu_bwmon_dsp_cfg_ahb_clk = {
+ .halt_reg = 0x7301c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7301c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_npu_bwmon_dsp_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_npu_cfg_ahb_clk = {
+ .halt_reg = 0x4d004,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x4d004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x4d004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_npu_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_npu_dma_clk = {
+ .halt_reg = 0x4d1a0,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x4d1a0,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x4d1a0,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_npu_dma_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_npu_gpll0_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(25),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_npu_gpll0_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gpll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_npu_gpll0_div_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(26),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_npu_gpll0_div_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_pll0_main_div_cdiv.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm2_clk = {
+ .halt_reg = 0x3300c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3300c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm2_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_pdm2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_ahb_clk = {
+ .halt_reg = 0x33004,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x33004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x33004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_xo4_clk = {
+ .halt_reg = 0x33008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x33008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm_xo4_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_prng_ahb_clk = {
+ .halt_reg = 0x34004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x34004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(13),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_prng_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qspi_cnoc_periph_ahb_clk = {
+ .halt_reg = 0x4b004,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x4b004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x4b004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qspi_cnoc_periph_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qspi_core_clk = {
+ .halt_reg = 0x4b008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4b008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qspi_core_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qspi_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_core_2x_clk = {
+ .halt_reg = 0x17014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_core_2x_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_core_clk = {
+ .halt_reg = 0x1700c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(8),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s0_clk = {
+ .halt_reg = 0x17030,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(10),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s0_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap0_s0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s1_clk = {
+ .halt_reg = 0x17160,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s1_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap0_s1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s2_clk = {
+ .halt_reg = 0x17290,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(12),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s2_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap0_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s3_clk = {
+ .halt_reg = 0x173c0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(13),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s3_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap0_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s4_clk = {
+ .halt_reg = 0x174f0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(14),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s4_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap0_s4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s5_clk = {
+ .halt_reg = 0x17620,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(15),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s5_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap0_s5_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_core_2x_clk = {
+ .halt_reg = 0x18004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(18),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_core_2x_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_core_clk = {
+ .halt_reg = 0x18008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(19),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s0_clk = {
+ .halt_reg = 0x18014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(22),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s0_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap1_s0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s1_clk = {
+ .halt_reg = 0x18144,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(23),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s1_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap1_s1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s2_clk = {
+ .halt_reg = 0x18274,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(24),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s2_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap1_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s3_clk = {
+ .halt_reg = 0x183a4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(25),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s3_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap1_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s4_clk = {
+ .halt_reg = 0x184d4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(26),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s4_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap1_s4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s5_clk = {
+ .halt_reg = 0x18604,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(27),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s5_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap1_s5_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_0_m_ahb_clk = {
+ .halt_reg = 0x17004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(6),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap_0_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_0_s_ahb_clk = {
+ .halt_reg = 0x17008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x17008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(7),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap_0_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_1_m_ahb_clk = {
+ .halt_reg = 0x1800c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(20),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap_1_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_1_s_ahb_clk = {
+ .halt_reg = 0x18010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x18010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x52008,
+ .enable_mask = BIT(21),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap_1_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_ahb_clk = {
+ .halt_reg = 0x12008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x12008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_apps_clk = {
+ .halt_reg = 0x1200c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1200c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_apps_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_sdcc1_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_ice_core_clk = {
+ .halt_reg = 0x12040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x12040,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_ice_core_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_sdcc1_ice_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_ahb_clk = {
+ .halt_reg = 0x14008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x14008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc2_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_apps_clk = {
+ .halt_reg = 0x14004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x14004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc2_apps_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_sdcc2_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+/* For CPUSS functionality the SYS NOC clock needs to be left enabled */
+static struct clk_branch gcc_sys_noc_cpuss_ahb_clk = {
+ .halt_reg = 0x4144,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sys_noc_cpuss_ahb_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_cpuss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_IS_CRITICAL | CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_mem_clkref_clk = {
+ .halt_reg = 0x8c000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_mem_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_ahb_clk = {
+ .halt_reg = 0x77014,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x77014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_axi_clk = {
+ .halt_reg = 0x77038,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x77038,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77038,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_axi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_phy_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_ice_core_clk = {
+ .halt_reg = 0x77090,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x77090,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77090,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_ice_core_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_phy_ice_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_phy_aux_clk = {
+ .halt_reg = 0x77094,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x77094,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77094,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_phy_aux_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_phy_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_rx_symbol_0_clk = {
+ .halt_reg = 0x7701c,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x7701c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_rx_symbol_0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_tx_symbol_0_clk = {
+ .halt_reg = 0x77018,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x77018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_tx_symbol_0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_unipro_core_clk = {
+ .halt_reg = 0x7708c,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x7708c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7708c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_unipro_core_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_phy_unipro_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_master_clk = {
+ .halt_reg = 0xf010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_master_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_mock_utmi_clk = {
+ .halt_reg = 0xf018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_mock_utmi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw =
+ &gcc_usb30_prim_mock_utmi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_sleep_clk = {
+ .halt_reg = 0xf014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_clkref_clk = {
+ .halt_reg = 0x8c010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_aux_clk = {
+ .halt_reg = 0xf050,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf050,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_phy_aux_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb3_prim_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_com_aux_clk = {
+ .halt_reg = 0xf054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf054,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_phy_com_aux_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb3_prim_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_pipe_clk = {
+ .halt_reg = 0xf058,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0xf058,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_phy_pipe_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb_phy_cfg_ahb2phy_clk = {
+ .halt_reg = 0x6a004,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x6a004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x6a004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb_phy_cfg_ahb2phy_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_axi_clk = {
+ .halt_reg = 0xb01c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_video_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_gpll0_div_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(20),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_video_gpll0_div_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_pll0_main_div_cdiv.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_throttle_axi_clk = {
+ .halt_reg = 0xb07c,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0xb07c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xb07c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_video_throttle_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_xo_clk = {
+ .halt_reg = 0xb028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_video_xo_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc ufs_phy_gdsc = {
+ .gdscr = 0x77004,
+ .pd = {
+ .name = "ufs_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc usb30_prim_gdsc = {
+ .gdscr = 0x0f004,
+ .pd = {
+ .name = "usb30_prim_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc = {
+ .gdscr = 0x7d040,
+ .pd = {
+ .name = "hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON | VOTABLE,
+};
+
+static struct gdsc hlos1_vote_mmnoc_mmu_tbu_sf_gdsc = {
+ .gdscr = 0x7d044,
+ .pd = {
+ .name = "hlos1_vote_mmnoc_mmu_tbu_sf_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON | VOTABLE,
+};
+
+static struct gdsc *gcc_sc7180_gdscs[] = {
+ [UFS_PHY_GDSC] = &ufs_phy_gdsc,
+ [USB30_PRIM_GDSC] = &usb30_prim_gdsc,
+ [HLOS1_VOTE_MMNOC_MMU_TBU_HF0_GDSC] =
+ &hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc,
+ [HLOS1_VOTE_MMNOC_MMU_TBU_SF_GDSC] =
+ &hlos1_vote_mmnoc_mmu_tbu_sf_gdsc,
+};
+
+
+static struct clk_hw *gcc_sc7180_hws[] = {
+ [GCC_GPLL0_MAIN_DIV_CDIV] = &gcc_pll0_main_div_cdiv.hw,
+};
+
+static struct clk_regmap *gcc_sc7180_clocks[] = {
+ [GCC_AGGRE_UFS_PHY_AXI_CLK] = &gcc_aggre_ufs_phy_axi_clk.clkr,
+ [GCC_AGGRE_USB3_PRIM_AXI_CLK] = &gcc_aggre_usb3_prim_axi_clk.clkr,
+ [GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr,
+ [GCC_CAMERA_AHB_CLK] = &gcc_camera_ahb_clk.clkr,
+ [GCC_CAMERA_HF_AXI_CLK] = &gcc_camera_hf_axi_clk.clkr,
+ [GCC_CAMERA_THROTTLE_HF_AXI_CLK] = &gcc_camera_throttle_hf_axi_clk.clkr,
+ [GCC_CAMERA_XO_CLK] = &gcc_camera_xo_clk.clkr,
+ [GCC_CE1_AHB_CLK] = &gcc_ce1_ahb_clk.clkr,
+ [GCC_CE1_AXI_CLK] = &gcc_ce1_axi_clk.clkr,
+ [GCC_CE1_CLK] = &gcc_ce1_clk.clkr,
+ [GCC_CFG_NOC_USB3_PRIM_AXI_CLK] = &gcc_cfg_noc_usb3_prim_axi_clk.clkr,
+ [GCC_CPUSS_AHB_CLK] = &gcc_cpuss_ahb_clk.clkr,
+ [GCC_CPUSS_AHB_CLK_SRC] = &gcc_cpuss_ahb_clk_src.clkr,
+ [GCC_CPUSS_RBCPR_CLK] = &gcc_cpuss_rbcpr_clk.clkr,
+ [GCC_DDRSS_GPU_AXI_CLK] = &gcc_ddrss_gpu_axi_clk.clkr,
+ [GCC_DISP_GPLL0_CLK_SRC] = &gcc_disp_gpll0_clk_src.clkr,
+ [GCC_DISP_GPLL0_DIV_CLK_SRC] = &gcc_disp_gpll0_div_clk_src.clkr,
+ [GCC_DISP_HF_AXI_CLK] = &gcc_disp_hf_axi_clk.clkr,
+ [GCC_DISP_THROTTLE_HF_AXI_CLK] = &gcc_disp_throttle_hf_axi_clk.clkr,
+ [GCC_DISP_XO_CLK] = &gcc_disp_xo_clk.clkr,
+ [GCC_GP1_CLK] = &gcc_gp1_clk.clkr,
+ [GCC_GP1_CLK_SRC] = &gcc_gp1_clk_src.clkr,
+ [GCC_GP2_CLK] = &gcc_gp2_clk.clkr,
+ [GCC_GP2_CLK_SRC] = &gcc_gp2_clk_src.clkr,
+ [GCC_GP3_CLK] = &gcc_gp3_clk.clkr,
+ [GCC_GP3_CLK_SRC] = &gcc_gp3_clk_src.clkr,
+ [GCC_GPU_GPLL0_CLK_SRC] = &gcc_gpu_gpll0_clk_src.clkr,
+ [GCC_GPU_GPLL0_DIV_CLK_SRC] = &gcc_gpu_gpll0_div_clk_src.clkr,
+ [GCC_GPU_MEMNOC_GFX_CLK] = &gcc_gpu_memnoc_gfx_clk.clkr,
+ [GCC_GPU_SNOC_DVM_GFX_CLK] = &gcc_gpu_snoc_dvm_gfx_clk.clkr,
+ [GCC_NPU_AXI_CLK] = &gcc_npu_axi_clk.clkr,
+ [GCC_NPU_BWMON_AXI_CLK] = &gcc_npu_bwmon_axi_clk.clkr,
+ [GCC_NPU_BWMON_DMA_CFG_AHB_CLK] = &gcc_npu_bwmon_dma_cfg_ahb_clk.clkr,
+ [GCC_NPU_BWMON_DSP_CFG_AHB_CLK] = &gcc_npu_bwmon_dsp_cfg_ahb_clk.clkr,
+ [GCC_NPU_CFG_AHB_CLK] = &gcc_npu_cfg_ahb_clk.clkr,
+ [GCC_NPU_DMA_CLK] = &gcc_npu_dma_clk.clkr,
+ [GCC_NPU_GPLL0_CLK_SRC] = &gcc_npu_gpll0_clk_src.clkr,
+ [GCC_NPU_GPLL0_DIV_CLK_SRC] = &gcc_npu_gpll0_div_clk_src.clkr,
+ [GCC_PDM2_CLK] = &gcc_pdm2_clk.clkr,
+ [GCC_PDM2_CLK_SRC] = &gcc_pdm2_clk_src.clkr,
+ [GCC_PDM_AHB_CLK] = &gcc_pdm_ahb_clk.clkr,
+ [GCC_PDM_XO4_CLK] = &gcc_pdm_xo4_clk.clkr,
+ [GCC_PRNG_AHB_CLK] = &gcc_prng_ahb_clk.clkr,
+ [GCC_QSPI_CNOC_PERIPH_AHB_CLK] = &gcc_qspi_cnoc_periph_ahb_clk.clkr,
+ [GCC_QSPI_CORE_CLK] = &gcc_qspi_core_clk.clkr,
+ [GCC_QSPI_CORE_CLK_SRC] = &gcc_qspi_core_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_CORE_2X_CLK] = &gcc_qupv3_wrap0_core_2x_clk.clkr,
+ [GCC_QUPV3_WRAP0_CORE_CLK] = &gcc_qupv3_wrap0_core_clk.clkr,
+ [GCC_QUPV3_WRAP0_S0_CLK] = &gcc_qupv3_wrap0_s0_clk.clkr,
+ [GCC_QUPV3_WRAP0_S0_CLK_SRC] = &gcc_qupv3_wrap0_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S1_CLK] = &gcc_qupv3_wrap0_s1_clk.clkr,
+ [GCC_QUPV3_WRAP0_S1_CLK_SRC] = &gcc_qupv3_wrap0_s1_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S2_CLK] = &gcc_qupv3_wrap0_s2_clk.clkr,
+ [GCC_QUPV3_WRAP0_S2_CLK_SRC] = &gcc_qupv3_wrap0_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S3_CLK] = &gcc_qupv3_wrap0_s3_clk.clkr,
+ [GCC_QUPV3_WRAP0_S3_CLK_SRC] = &gcc_qupv3_wrap0_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S4_CLK] = &gcc_qupv3_wrap0_s4_clk.clkr,
+ [GCC_QUPV3_WRAP0_S4_CLK_SRC] = &gcc_qupv3_wrap0_s4_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S5_CLK] = &gcc_qupv3_wrap0_s5_clk.clkr,
+ [GCC_QUPV3_WRAP0_S5_CLK_SRC] = &gcc_qupv3_wrap0_s5_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_CORE_2X_CLK] = &gcc_qupv3_wrap1_core_2x_clk.clkr,
+ [GCC_QUPV3_WRAP1_CORE_CLK] = &gcc_qupv3_wrap1_core_clk.clkr,
+ [GCC_QUPV3_WRAP1_S0_CLK] = &gcc_qupv3_wrap1_s0_clk.clkr,
+ [GCC_QUPV3_WRAP1_S0_CLK_SRC] = &gcc_qupv3_wrap1_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S1_CLK] = &gcc_qupv3_wrap1_s1_clk.clkr,
+ [GCC_QUPV3_WRAP1_S1_CLK_SRC] = &gcc_qupv3_wrap1_s1_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S2_CLK] = &gcc_qupv3_wrap1_s2_clk.clkr,
+ [GCC_QUPV3_WRAP1_S2_CLK_SRC] = &gcc_qupv3_wrap1_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S3_CLK] = &gcc_qupv3_wrap1_s3_clk.clkr,
+ [GCC_QUPV3_WRAP1_S3_CLK_SRC] = &gcc_qupv3_wrap1_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S4_CLK] = &gcc_qupv3_wrap1_s4_clk.clkr,
+ [GCC_QUPV3_WRAP1_S4_CLK_SRC] = &gcc_qupv3_wrap1_s4_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S5_CLK] = &gcc_qupv3_wrap1_s5_clk.clkr,
+ [GCC_QUPV3_WRAP1_S5_CLK_SRC] = &gcc_qupv3_wrap1_s5_clk_src.clkr,
+ [GCC_QUPV3_WRAP_0_M_AHB_CLK] = &gcc_qupv3_wrap_0_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_0_S_AHB_CLK] = &gcc_qupv3_wrap_0_s_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_1_M_AHB_CLK] = &gcc_qupv3_wrap_1_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_1_S_AHB_CLK] = &gcc_qupv3_wrap_1_s_ahb_clk.clkr,
+ [GCC_SDCC1_AHB_CLK] = &gcc_sdcc1_ahb_clk.clkr,
+ [GCC_SDCC1_APPS_CLK] = &gcc_sdcc1_apps_clk.clkr,
+ [GCC_SDCC1_APPS_CLK_SRC] = &gcc_sdcc1_apps_clk_src.clkr,
+ [GCC_SDCC1_ICE_CORE_CLK] = &gcc_sdcc1_ice_core_clk.clkr,
+ [GCC_SDCC1_ICE_CORE_CLK_SRC] = &gcc_sdcc1_ice_core_clk_src.clkr,
+ [GCC_SDCC2_AHB_CLK] = &gcc_sdcc2_ahb_clk.clkr,
+ [GCC_SDCC2_APPS_CLK] = &gcc_sdcc2_apps_clk.clkr,
+ [GCC_SDCC2_APPS_CLK_SRC] = &gcc_sdcc2_apps_clk_src.clkr,
+ [GCC_SYS_NOC_CPUSS_AHB_CLK] = &gcc_sys_noc_cpuss_ahb_clk.clkr,
+ [GCC_UFS_MEM_CLKREF_CLK] = &gcc_ufs_mem_clkref_clk.clkr,
+ [GCC_UFS_PHY_AHB_CLK] = &gcc_ufs_phy_ahb_clk.clkr,
+ [GCC_UFS_PHY_AXI_CLK] = &gcc_ufs_phy_axi_clk.clkr,
+ [GCC_UFS_PHY_AXI_CLK_SRC] = &gcc_ufs_phy_axi_clk_src.clkr,
+ [GCC_UFS_PHY_ICE_CORE_CLK] = &gcc_ufs_phy_ice_core_clk.clkr,
+ [GCC_UFS_PHY_ICE_CORE_CLK_SRC] = &gcc_ufs_phy_ice_core_clk_src.clkr,
+ [GCC_UFS_PHY_PHY_AUX_CLK] = &gcc_ufs_phy_phy_aux_clk.clkr,
+ [GCC_UFS_PHY_PHY_AUX_CLK_SRC] = &gcc_ufs_phy_phy_aux_clk_src.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_0_CLK] = &gcc_ufs_phy_rx_symbol_0_clk.clkr,
+ [GCC_UFS_PHY_TX_SYMBOL_0_CLK] = &gcc_ufs_phy_tx_symbol_0_clk.clkr,
+ [GCC_UFS_PHY_UNIPRO_CORE_CLK] = &gcc_ufs_phy_unipro_core_clk.clkr,
+ [GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC] =
+ &gcc_ufs_phy_unipro_core_clk_src.clkr,
+ [GCC_USB30_PRIM_MASTER_CLK] = &gcc_usb30_prim_master_clk.clkr,
+ [GCC_USB30_PRIM_MASTER_CLK_SRC] = &gcc_usb30_prim_master_clk_src.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_CLK] = &gcc_usb30_prim_mock_utmi_clk.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC] =
+ &gcc_usb30_prim_mock_utmi_clk_src.clkr,
+ [GCC_USB30_PRIM_SLEEP_CLK] = &gcc_usb30_prim_sleep_clk.clkr,
+ [GCC_USB3_PRIM_CLKREF_CLK] = &gcc_usb3_prim_clkref_clk.clkr,
+ [GCC_USB3_PRIM_PHY_AUX_CLK] = &gcc_usb3_prim_phy_aux_clk.clkr,
+ [GCC_USB3_PRIM_PHY_AUX_CLK_SRC] = &gcc_usb3_prim_phy_aux_clk_src.clkr,
+ [GCC_USB3_PRIM_PHY_COM_AUX_CLK] = &gcc_usb3_prim_phy_com_aux_clk.clkr,
+ [GCC_USB3_PRIM_PHY_PIPE_CLK] = &gcc_usb3_prim_phy_pipe_clk.clkr,
+ [GCC_USB_PHY_CFG_AHB2PHY_CLK] = &gcc_usb_phy_cfg_ahb2phy_clk.clkr,
+ [GCC_VIDEO_AXI_CLK] = &gcc_video_axi_clk.clkr,
+ [GCC_VIDEO_GPLL0_DIV_CLK_SRC] = &gcc_video_gpll0_div_clk_src.clkr,
+ [GCC_VIDEO_THROTTLE_AXI_CLK] = &gcc_video_throttle_axi_clk.clkr,
+ [GCC_VIDEO_XO_CLK] = &gcc_video_xo_clk.clkr,
+ [GPLL0] = &gpll0.clkr,
+ [GPLL0_OUT_EVEN] = &gpll0_out_even.clkr,
+ [GPLL6] = &gpll6.clkr,
+ [GPLL7] = &gpll7.clkr,
+ [GPLL4] = &gpll4.clkr,
+ [GPLL1] = &gpll1.clkr,
+};
+
+static const struct qcom_reset_map gcc_sc7180_resets[] = {
+ [GCC_QUSB2PHY_PRIM_BCR] = { 0x26000 },
+ [GCC_QUSB2PHY_SEC_BCR] = { 0x26004 },
+ [GCC_UFS_PHY_BCR] = { 0x77000 },
+ [GCC_USB30_PRIM_BCR] = { 0xf000 },
+ [GCC_USB3_PHY_PRIM_BCR] = { 0x50000 },
+ [GCC_USB3PHY_PHY_PRIM_BCR] = { 0x50004 },
+ [GCC_USB3_PHY_SEC_BCR] = { 0x5000c },
+ [GCC_USB3_DP_PHY_PRIM_BCR] = { 0x50008 },
+ [GCC_USB3PHY_PHY_SEC_BCR] = { 0x50010 },
+ [GCC_USB3_DP_PHY_SEC_BCR] = { 0x50014 },
+ [GCC_USB_PHY_CFG_AHB2PHY_BCR] = { 0x6a000 },
+};
+
+static struct clk_rcg_dfs_data gcc_dfs_clocks[] = {
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s0_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s1_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s2_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s3_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s4_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s5_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s0_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s1_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s2_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s3_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s4_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s5_clk_src),
+};
+
+static const struct regmap_config gcc_sc7180_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x18208c,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc gcc_sc7180_desc = {
+ .config = &gcc_sc7180_regmap_config,
+ .clk_hws = gcc_sc7180_hws,
+ .num_clk_hws = ARRAY_SIZE(gcc_sc7180_hws),
+ .clks = gcc_sc7180_clocks,
+ .num_clks = ARRAY_SIZE(gcc_sc7180_clocks),
+ .resets = gcc_sc7180_resets,
+ .num_resets = ARRAY_SIZE(gcc_sc7180_resets),
+ .gdscs = gcc_sc7180_gdscs,
+ .num_gdscs = ARRAY_SIZE(gcc_sc7180_gdscs),
+};
+
+static const struct of_device_id gcc_sc7180_match_table[] = {
+ { .compatible = "qcom,gcc-sc7180" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gcc_sc7180_match_table);
+
+static int gcc_sc7180_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+ int ret;
+
+ regmap = qcom_cc_map(pdev, &gcc_sc7180_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ /*
+ * Disable the GPLL0 active input to MM blocks, NPU
+ * and GPU via MISC registers.
+ */
+ regmap_update_bits(regmap, 0x09ffc, 0x3, 0x3);
+ regmap_update_bits(regmap, 0x4d110, 0x3, 0x3);
+ regmap_update_bits(regmap, 0x71028, 0x3, 0x3);
+
+ /*
+ * Keep the clocks always-ON
+ * GCC_CPUSS_GNOC_CLK, GCC_VIDEO_AHB_CLK, GCC_DISP_AHB_CLK
+ * GCC_GPU_CFG_AHB_CLK
+ */
+ regmap_update_bits(regmap, 0x48004, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x0b004, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x0b00c, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x71004, BIT(0), BIT(0));
+
+ ret = qcom_cc_register_rcg_dfs(regmap, gcc_dfs_clocks,
+ ARRAY_SIZE(gcc_dfs_clocks));
+ if (ret)
+ return ret;
+
+ return qcom_cc_really_probe(pdev, &gcc_sc7180_desc, regmap);
+}
+
+static struct platform_driver gcc_sc7180_driver = {
+ .probe = gcc_sc7180_probe,
+ .driver = {
+ .name = "gcc-sc7180",
+ .of_match_table = gcc_sc7180_match_table,
+ },
+};
+
+static int __init gcc_sc7180_init(void)
+{
+ return platform_driver_register(&gcc_sc7180_driver);
+}
+core_initcall(gcc_sc7180_init);
+
+static void __exit gcc_sc7180_exit(void)
+{
+ platform_driver_unregister(&gcc_sc7180_driver);
+}
+module_exit(gcc_sc7180_exit);
+
+MODULE_DESCRIPTION("QTI GCC SC7180 Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/gcc-sdm845.c b/drivers/clk/qcom/gcc-sdm845.c
index 95be125c3bdd..d2142fe46a8e 100644
--- a/drivers/clk/qcom/gcc-sdm845.c
+++ b/drivers/clk/qcom/gcc-sdm845.c
@@ -408,7 +408,7 @@ static const struct freq_tbl ftbl_gcc_qupv3_wrap0_s0_clk_src[] = {
{ }
};
-static struct clk_init_data gcc_qupv3_wrap0_s0_clk_init = {
+static struct clk_init_data gcc_qupv3_wrap0_s0_clk_src_init = {
.name = "gcc_qupv3_wrap0_s0_clk_src",
.parent_names = gcc_parent_names_0,
.num_parents = 4,
@@ -421,10 +421,10 @@ static struct clk_rcg2 gcc_qupv3_wrap0_s0_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &gcc_qupv3_wrap0_s0_clk_init,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s0_clk_src_init,
};
-static struct clk_init_data gcc_qupv3_wrap0_s1_clk_init = {
+static struct clk_init_data gcc_qupv3_wrap0_s1_clk_src_init = {
.name = "gcc_qupv3_wrap0_s1_clk_src",
.parent_names = gcc_parent_names_0,
.num_parents = 4,
@@ -437,10 +437,10 @@ static struct clk_rcg2 gcc_qupv3_wrap0_s1_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &gcc_qupv3_wrap0_s1_clk_init,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s1_clk_src_init,
};
-static struct clk_init_data gcc_qupv3_wrap0_s2_clk_init = {
+static struct clk_init_data gcc_qupv3_wrap0_s2_clk_src_init = {
.name = "gcc_qupv3_wrap0_s2_clk_src",
.parent_names = gcc_parent_names_0,
.num_parents = 4,
@@ -453,10 +453,10 @@ static struct clk_rcg2 gcc_qupv3_wrap0_s2_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &gcc_qupv3_wrap0_s2_clk_init,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s2_clk_src_init,
};
-static struct clk_init_data gcc_qupv3_wrap0_s3_clk_init = {
+static struct clk_init_data gcc_qupv3_wrap0_s3_clk_src_init = {
.name = "gcc_qupv3_wrap0_s3_clk_src",
.parent_names = gcc_parent_names_0,
.num_parents = 4,
@@ -469,10 +469,10 @@ static struct clk_rcg2 gcc_qupv3_wrap0_s3_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &gcc_qupv3_wrap0_s3_clk_init,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s3_clk_src_init,
};
-static struct clk_init_data gcc_qupv3_wrap0_s4_clk_init = {
+static struct clk_init_data gcc_qupv3_wrap0_s4_clk_src_init = {
.name = "gcc_qupv3_wrap0_s4_clk_src",
.parent_names = gcc_parent_names_0,
.num_parents = 4,
@@ -485,10 +485,10 @@ static struct clk_rcg2 gcc_qupv3_wrap0_s4_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &gcc_qupv3_wrap0_s4_clk_init,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s4_clk_src_init,
};
-static struct clk_init_data gcc_qupv3_wrap0_s5_clk_init = {
+static struct clk_init_data gcc_qupv3_wrap0_s5_clk_src_init = {
.name = "gcc_qupv3_wrap0_s5_clk_src",
.parent_names = gcc_parent_names_0,
.num_parents = 4,
@@ -501,10 +501,10 @@ static struct clk_rcg2 gcc_qupv3_wrap0_s5_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &gcc_qupv3_wrap0_s5_clk_init,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s5_clk_src_init,
};
-static struct clk_init_data gcc_qupv3_wrap0_s6_clk_init = {
+static struct clk_init_data gcc_qupv3_wrap0_s6_clk_src_init = {
.name = "gcc_qupv3_wrap0_s6_clk_src",
.parent_names = gcc_parent_names_0,
.num_parents = 4,
@@ -517,10 +517,10 @@ static struct clk_rcg2 gcc_qupv3_wrap0_s6_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &gcc_qupv3_wrap0_s6_clk_init,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s6_clk_src_init,
};
-static struct clk_init_data gcc_qupv3_wrap0_s7_clk_init = {
+static struct clk_init_data gcc_qupv3_wrap0_s7_clk_src_init = {
.name = "gcc_qupv3_wrap0_s7_clk_src",
.parent_names = gcc_parent_names_0,
.num_parents = 4,
@@ -533,10 +533,10 @@ static struct clk_rcg2 gcc_qupv3_wrap0_s7_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &gcc_qupv3_wrap0_s7_clk_init,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s7_clk_src_init,
};
-static struct clk_init_data gcc_qupv3_wrap1_s0_clk_init = {
+static struct clk_init_data gcc_qupv3_wrap1_s0_clk_src_init = {
.name = "gcc_qupv3_wrap1_s0_clk_src",
.parent_names = gcc_parent_names_0,
.num_parents = 4,
@@ -549,10 +549,10 @@ static struct clk_rcg2 gcc_qupv3_wrap1_s0_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &gcc_qupv3_wrap1_s0_clk_init,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s0_clk_src_init,
};
-static struct clk_init_data gcc_qupv3_wrap1_s1_clk_init = {
+static struct clk_init_data gcc_qupv3_wrap1_s1_clk_src_init = {
.name = "gcc_qupv3_wrap1_s1_clk_src",
.parent_names = gcc_parent_names_0,
.num_parents = 4,
@@ -565,10 +565,10 @@ static struct clk_rcg2 gcc_qupv3_wrap1_s1_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &gcc_qupv3_wrap1_s1_clk_init,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s1_clk_src_init,
};
-static struct clk_init_data gcc_qupv3_wrap1_s2_clk_init = {
+static struct clk_init_data gcc_qupv3_wrap1_s2_clk_src_init = {
.name = "gcc_qupv3_wrap1_s2_clk_src",
.parent_names = gcc_parent_names_0,
.num_parents = 4,
@@ -581,10 +581,10 @@ static struct clk_rcg2 gcc_qupv3_wrap1_s2_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &gcc_qupv3_wrap1_s2_clk_init,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s2_clk_src_init,
};
-static struct clk_init_data gcc_qupv3_wrap1_s3_clk_init = {
+static struct clk_init_data gcc_qupv3_wrap1_s3_clk_src_init = {
.name = "gcc_qupv3_wrap1_s3_clk_src",
.parent_names = gcc_parent_names_0,
.num_parents = 4,
@@ -597,10 +597,10 @@ static struct clk_rcg2 gcc_qupv3_wrap1_s3_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &gcc_qupv3_wrap1_s3_clk_init,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s3_clk_src_init,
};
-static struct clk_init_data gcc_qupv3_wrap1_s4_clk_init = {
+static struct clk_init_data gcc_qupv3_wrap1_s4_clk_src_init = {
.name = "gcc_qupv3_wrap1_s4_clk_src",
.parent_names = gcc_parent_names_0,
.num_parents = 4,
@@ -613,10 +613,10 @@ static struct clk_rcg2 gcc_qupv3_wrap1_s4_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &gcc_qupv3_wrap1_s4_clk_init,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s4_clk_src_init,
};
-static struct clk_init_data gcc_qupv3_wrap1_s5_clk_init = {
+static struct clk_init_data gcc_qupv3_wrap1_s5_clk_src_init = {
.name = "gcc_qupv3_wrap1_s5_clk_src",
.parent_names = gcc_parent_names_0,
.num_parents = 4,
@@ -629,10 +629,10 @@ static struct clk_rcg2 gcc_qupv3_wrap1_s5_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &gcc_qupv3_wrap1_s5_clk_init,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s5_clk_src_init,
};
-static struct clk_init_data gcc_qupv3_wrap1_s6_clk_init = {
+static struct clk_init_data gcc_qupv3_wrap1_s6_clk_src_init = {
.name = "gcc_qupv3_wrap1_s6_clk_src",
.parent_names = gcc_parent_names_0,
.num_parents = 4,
@@ -645,10 +645,10 @@ static struct clk_rcg2 gcc_qupv3_wrap1_s6_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &gcc_qupv3_wrap1_s6_clk_init,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s6_clk_src_init,
};
-static struct clk_init_data gcc_qupv3_wrap1_s7_clk_init = {
+static struct clk_init_data gcc_qupv3_wrap1_s7_clk_src_init = {
.name = "gcc_qupv3_wrap1_s7_clk_src",
.parent_names = gcc_parent_names_0,
.num_parents = 4,
@@ -661,7 +661,7 @@ static struct clk_rcg2 gcc_qupv3_wrap1_s7_clk_src = {
.hid_width = 5,
.parent_map = gcc_parent_map_0,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
- .clkr.hw.init = &gcc_qupv3_wrap1_s7_clk_init,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s7_clk_src_init,
};
static const struct freq_tbl ftbl_gcc_sdcc2_apps_clk_src[] = {
@@ -3577,22 +3577,22 @@ static const struct of_device_id gcc_sdm845_match_table[] = {
MODULE_DEVICE_TABLE(of, gcc_sdm845_match_table);
static const struct clk_rcg_dfs_data gcc_dfs_clocks[] = {
- DEFINE_RCG_DFS(gcc_qupv3_wrap0_s0_clk),
- DEFINE_RCG_DFS(gcc_qupv3_wrap0_s1_clk),
- DEFINE_RCG_DFS(gcc_qupv3_wrap0_s2_clk),
- DEFINE_RCG_DFS(gcc_qupv3_wrap0_s3_clk),
- DEFINE_RCG_DFS(gcc_qupv3_wrap0_s4_clk),
- DEFINE_RCG_DFS(gcc_qupv3_wrap0_s5_clk),
- DEFINE_RCG_DFS(gcc_qupv3_wrap0_s6_clk),
- DEFINE_RCG_DFS(gcc_qupv3_wrap0_s7_clk),
- DEFINE_RCG_DFS(gcc_qupv3_wrap1_s0_clk),
- DEFINE_RCG_DFS(gcc_qupv3_wrap1_s1_clk),
- DEFINE_RCG_DFS(gcc_qupv3_wrap1_s2_clk),
- DEFINE_RCG_DFS(gcc_qupv3_wrap1_s3_clk),
- DEFINE_RCG_DFS(gcc_qupv3_wrap1_s4_clk),
- DEFINE_RCG_DFS(gcc_qupv3_wrap1_s5_clk),
- DEFINE_RCG_DFS(gcc_qupv3_wrap1_s6_clk),
- DEFINE_RCG_DFS(gcc_qupv3_wrap1_s7_clk),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s0_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s1_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s2_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s3_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s4_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s5_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s6_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s7_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s0_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s1_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s2_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s3_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s4_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s5_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s6_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s7_clk_src),
};
static int gcc_sdm845_probe(struct platform_device *pdev)
diff --git a/drivers/clk/qcom/gpucc-msm8998.c b/drivers/clk/qcom/gpucc-msm8998.c
new file mode 100644
index 000000000000..e5e2492b20c5
--- /dev/null
+++ b/drivers/clk/qcom/gpucc-msm8998.c
@@ -0,0 +1,338 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019, Jeffrey Hugo
+ */
+
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/clk-provider.h>
+#include <linux/regmap.h>
+#include <linux/reset-controller.h>
+
+#include <dt-bindings/clock/qcom,gpucc-msm8998.h>
+
+#include "common.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "clk-alpha-pll.h"
+#include "clk-rcg.h"
+#include "clk-branch.h"
+#include "reset.h"
+#include "gdsc.h"
+
+enum {
+ P_XO,
+ P_GPLL0,
+ P_GPUPLL0_OUT_EVEN,
+};
+
+/* Instead of going directly to the block, XO is routed through this branch */
+static struct clk_branch gpucc_cxo_clk = {
+ .halt_reg = 0x1020,
+ .clkr = {
+ .enable_reg = 0x1020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpucc_cxo_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "xo",
+ .name = "xo"
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_IS_CRITICAL,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_fabia_even[] = {
+ { 0x0, 1 },
+ { 0x1, 2 },
+ { 0x3, 4 },
+ { 0x7, 8 },
+ { }
+};
+
+static struct clk_alpha_pll gpupll0 = {
+ .offset = 0x0,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpupll0",
+ .parent_hws = (const struct clk_hw *[]){ &gpucc_cxo_clk.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_fabia_ops,
+ },
+};
+
+static struct clk_alpha_pll_postdiv gpupll0_out_even = {
+ .offset = 0x0,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_fabia_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_fabia_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpupll0_out_even",
+ .parent_hws = (const struct clk_hw *[]){ &gpupll0.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_fabia_ops,
+ },
+};
+
+static const struct parent_map gpu_xo_gpll0_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 5 },
+};
+
+static const struct clk_parent_data gpu_xo_gpll0[] = {
+ { .hw = &gpucc_cxo_clk.clkr.hw },
+ { .fw_name = "gpll0", .name = "gpll0" },
+};
+
+static const struct parent_map gpu_xo_gpupll0_map[] = {
+ { P_XO, 0 },
+ { P_GPUPLL0_OUT_EVEN, 1 },
+};
+
+static const struct clk_parent_data gpu_xo_gpupll0[] = {
+ { .hw = &gpucc_cxo_clk.clkr.hw },
+ { .hw = &gpupll0_out_even.clkr.hw },
+};
+
+static const struct freq_tbl ftbl_rbcpr_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(50000000, P_GPLL0, 12, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 rbcpr_clk_src = {
+ .cmd_rcgr = 0x1030,
+ .hid_width = 5,
+ .parent_map = gpu_xo_gpll0_map,
+ .freq_tbl = ftbl_rbcpr_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "rbcpr_clk_src",
+ .parent_data = gpu_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gfx3d_clk_src[] = {
+ { .src = P_GPUPLL0_OUT_EVEN, .pre_div = 3 },
+ { }
+};
+
+static struct clk_rcg2 gfx3d_clk_src = {
+ .cmd_rcgr = 0x1070,
+ .hid_width = 5,
+ .parent_map = gpu_xo_gpupll0_map,
+ .freq_tbl = ftbl_gfx3d_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gfx3d_clk_src",
+ .parent_data = gpu_xo_gpupll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ .flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
+ },
+};
+
+static const struct freq_tbl ftbl_rbbmtimer_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 rbbmtimer_clk_src = {
+ .cmd_rcgr = 0x10b0,
+ .hid_width = 5,
+ .parent_map = gpu_xo_gpll0_map,
+ .freq_tbl = ftbl_rbbmtimer_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "rbbmtimer_clk_src",
+ .parent_data = gpu_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gfx3d_isense_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(40000000, P_GPLL0, 15, 0, 0),
+ F(200000000, P_GPLL0, 3, 0, 0),
+ F(300000000, P_GPLL0, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gfx3d_isense_clk_src = {
+ .cmd_rcgr = 0x1100,
+ .hid_width = 5,
+ .parent_map = gpu_xo_gpll0_map,
+ .freq_tbl = ftbl_gfx3d_isense_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gfx3d_isense_clk_src",
+ .parent_data = gpu_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch rbcpr_clk = {
+ .halt_reg = 0x1054,
+ .clkr = {
+ .enable_reg = 0x1054,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "rbcpr_clk",
+ .parent_hws = (const struct clk_hw *[]){ &rbcpr_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch gfx3d_clk = {
+ .halt_reg = 0x1098,
+ .clkr = {
+ .enable_reg = 0x1098,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gfx3d_clk",
+ .parent_hws = (const struct clk_hw *[]){ &gfx3d_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch rbbmtimer_clk = {
+ .halt_reg = 0x10d0,
+ .clkr = {
+ .enable_reg = 0x10d0,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "rbbmtimer_clk",
+ .parent_hws = (const struct clk_hw *[]){ &rbbmtimer_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch gfx3d_isense_clk = {
+ .halt_reg = 0x1124,
+ .clkr = {
+ .enable_reg = 0x1124,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gfx3d_isense_clk",
+ .parent_hws = (const struct clk_hw *[]){ &gfx3d_isense_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc gpu_cx_gdsc = {
+ .gdscr = 0x1004,
+ .pd = {
+ .name = "gpu_cx",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc gpu_gx_gdsc = {
+ .gdscr = 0x1094,
+ .clamp_io_ctrl = 0x130,
+ .pd = {
+ .name = "gpu_gx",
+ },
+ .parent = &gpu_cx_gdsc.pd,
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = CLAMP_IO | AON_RESET,
+};
+
+static struct clk_regmap *gpucc_msm8998_clocks[] = {
+ [GPUPLL0] = &gpupll0.clkr,
+ [GPUPLL0_OUT_EVEN] = &gpupll0_out_even.clkr,
+ [RBCPR_CLK_SRC] = &rbcpr_clk_src.clkr,
+ [GFX3D_CLK_SRC] = &gfx3d_clk_src.clkr,
+ [RBBMTIMER_CLK_SRC] = &rbbmtimer_clk_src.clkr,
+ [GFX3D_ISENSE_CLK_SRC] = &gfx3d_isense_clk_src.clkr,
+ [RBCPR_CLK] = &rbcpr_clk.clkr,
+ [GFX3D_CLK] = &gfx3d_clk.clkr,
+ [RBBMTIMER_CLK] = &rbbmtimer_clk.clkr,
+ [GFX3D_ISENSE_CLK] = &gfx3d_isense_clk.clkr,
+ [GPUCC_CXO_CLK] = &gpucc_cxo_clk.clkr,
+};
+
+static struct gdsc *gpucc_msm8998_gdscs[] = {
+ [GPU_CX_GDSC] = &gpu_cx_gdsc,
+ [GPU_GX_GDSC] = &gpu_gx_gdsc,
+};
+
+static const struct qcom_reset_map gpucc_msm8998_resets[] = {
+ [GPU_CX_BCR] = { 0x1000 },
+ [RBCPR_BCR] = { 0x1050 },
+ [GPU_GX_BCR] = { 0x1090 },
+ [GPU_ISENSE_BCR] = { 0x1120 },
+};
+
+static const struct regmap_config gpucc_msm8998_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x9000,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc gpucc_msm8998_desc = {
+ .config = &gpucc_msm8998_regmap_config,
+ .clks = gpucc_msm8998_clocks,
+ .num_clks = ARRAY_SIZE(gpucc_msm8998_clocks),
+ .resets = gpucc_msm8998_resets,
+ .num_resets = ARRAY_SIZE(gpucc_msm8998_resets),
+ .gdscs = gpucc_msm8998_gdscs,
+ .num_gdscs = ARRAY_SIZE(gpucc_msm8998_gdscs),
+};
+
+static const struct of_device_id gpucc_msm8998_match_table[] = {
+ { .compatible = "qcom,msm8998-gpucc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gpucc_msm8998_match_table);
+
+static int gpucc_msm8998_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+
+ regmap = qcom_cc_map(pdev, &gpucc_msm8998_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ /* force periph logic on to avoid perf counter corruption */
+ regmap_write_bits(regmap, gfx3d_clk.clkr.enable_reg, BIT(13), BIT(13));
+ /* tweak droop detector (GPUCC_GPU_DD_WRAP_CTRL) to reduce leakage */
+ regmap_write_bits(regmap, gfx3d_clk.clkr.enable_reg, BIT(0), BIT(0));
+
+ return qcom_cc_really_probe(pdev, &gpucc_msm8998_desc, regmap);
+}
+
+static struct platform_driver gpucc_msm8998_driver = {
+ .probe = gpucc_msm8998_probe,
+ .driver = {
+ .name = "gpucc-msm8998",
+ .of_match_table = gpucc_msm8998_match_table,
+ },
+};
+module_platform_driver(gpucc_msm8998_driver);
+
+MODULE_DESCRIPTION("QCOM GPUCC MSM8998 Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/q6sstop-qcs404.c b/drivers/clk/qcom/q6sstop-qcs404.c
new file mode 100644
index 000000000000..723f932fbf7d
--- /dev/null
+++ b/drivers/clk/qcom/q6sstop-qcs404.c
@@ -0,0 +1,223 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_clock.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,q6sstopcc-qcs404.h>
+
+#include "clk-regmap.h"
+#include "clk-branch.h"
+#include "common.h"
+#include "reset.h"
+
+static struct clk_branch lcc_ahbfabric_cbc_clk = {
+ .halt_reg = 0x1b004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1b004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "lcc_ahbfabric_cbc_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch lcc_q6ss_ahbs_cbc_clk = {
+ .halt_reg = 0x22000,
+ .halt_check = BRANCH_VOTED,
+ .clkr = {
+ .enable_reg = 0x22000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "lcc_q6ss_ahbs_cbc_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch lcc_q6ss_tcm_slave_cbc_clk = {
+ .halt_reg = 0x1c000,
+ .halt_check = BRANCH_VOTED,
+ .clkr = {
+ .enable_reg = 0x1c000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "lcc_q6ss_tcm_slave_cbc_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch lcc_q6ss_ahbm_cbc_clk = {
+ .halt_reg = 0x22004,
+ .halt_check = BRANCH_VOTED,
+ .clkr = {
+ .enable_reg = 0x22004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "lcc_q6ss_ahbm_cbc_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch lcc_q6ss_axim_cbc_clk = {
+ .halt_reg = 0x1c004,
+ .halt_check = BRANCH_VOTED,
+ .clkr = {
+ .enable_reg = 0x1c004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "lcc_q6ss_axim_cbc_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch lcc_q6ss_bcr_sleep_clk = {
+ .halt_reg = 0x6004,
+ .halt_check = BRANCH_VOTED,
+ .clkr = {
+ .enable_reg = 0x6004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "lcc_q6ss_bcr_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+/* TCSR clock */
+static struct clk_branch tcsr_lcc_csr_cbcr_clk = {
+ .halt_reg = 0x8008,
+ .halt_check = BRANCH_VOTED,
+ .clkr = {
+ .enable_reg = 0x8008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "tcsr_lcc_csr_cbcr_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct regmap_config q6sstop_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .fast_io = true,
+};
+
+static struct clk_regmap *q6sstop_qcs404_clocks[] = {
+ [LCC_AHBFABRIC_CBC_CLK] = &lcc_ahbfabric_cbc_clk.clkr,
+ [LCC_Q6SS_AHBS_CBC_CLK] = &lcc_q6ss_ahbs_cbc_clk.clkr,
+ [LCC_Q6SS_TCM_SLAVE_CBC_CLK] = &lcc_q6ss_tcm_slave_cbc_clk.clkr,
+ [LCC_Q6SS_AHBM_CBC_CLK] = &lcc_q6ss_ahbm_cbc_clk.clkr,
+ [LCC_Q6SS_AXIM_CBC_CLK] = &lcc_q6ss_axim_cbc_clk.clkr,
+ [LCC_Q6SS_BCR_SLEEP_CLK] = &lcc_q6ss_bcr_sleep_clk.clkr,
+};
+
+static const struct qcom_reset_map q6sstop_qcs404_resets[] = {
+ [Q6SSTOP_BCR_RESET] = { 0x6000 },
+};
+
+static const struct qcom_cc_desc q6sstop_qcs404_desc = {
+ .config = &q6sstop_regmap_config,
+ .clks = q6sstop_qcs404_clocks,
+ .num_clks = ARRAY_SIZE(q6sstop_qcs404_clocks),
+ .resets = q6sstop_qcs404_resets,
+ .num_resets = ARRAY_SIZE(q6sstop_qcs404_resets),
+};
+
+static struct clk_regmap *tcsr_qcs404_clocks[] = {
+ [TCSR_Q6SS_LCC_CBCR_CLK] = &tcsr_lcc_csr_cbcr_clk.clkr,
+};
+
+static const struct qcom_cc_desc tcsr_qcs404_desc = {
+ .config = &q6sstop_regmap_config,
+ .clks = tcsr_qcs404_clocks,
+ .num_clks = ARRAY_SIZE(tcsr_qcs404_clocks),
+};
+
+static const struct of_device_id q6sstopcc_qcs404_match_table[] = {
+ { .compatible = "qcom,qcs404-q6sstopcc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, q6sstopcc_qcs404_match_table);
+
+static int q6sstopcc_qcs404_probe(struct platform_device *pdev)
+{
+ const struct qcom_cc_desc *desc;
+ int ret;
+
+ pm_runtime_enable(&pdev->dev);
+ ret = pm_clk_create(&pdev->dev);
+ if (ret)
+ goto disable_pm_runtime;
+
+ ret = pm_clk_add(&pdev->dev, NULL);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to acquire iface clock\n");
+ goto destroy_pm_clk;
+ }
+
+ q6sstop_regmap_config.name = "q6sstop_tcsr";
+ desc = &tcsr_qcs404_desc;
+
+ ret = qcom_cc_probe_by_index(pdev, 1, desc);
+ if (ret)
+ goto destroy_pm_clk;
+
+ q6sstop_regmap_config.name = "q6sstop_cc";
+ desc = &q6sstop_qcs404_desc;
+
+ ret = qcom_cc_probe_by_index(pdev, 0, desc);
+ if (ret)
+ goto destroy_pm_clk;
+
+ return 0;
+
+destroy_pm_clk:
+ pm_clk_destroy(&pdev->dev);
+
+disable_pm_runtime:
+ pm_runtime_disable(&pdev->dev);
+
+ return ret;
+}
+
+static int q6sstopcc_qcs404_remove(struct platform_device *pdev)
+{
+ pm_clk_destroy(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+static const struct dev_pm_ops q6sstopcc_pm_ops = {
+ SET_RUNTIME_PM_OPS(pm_clk_suspend, pm_clk_resume, NULL)
+};
+
+static struct platform_driver q6sstopcc_qcs404_driver = {
+ .probe = q6sstopcc_qcs404_probe,
+ .remove = q6sstopcc_qcs404_remove,
+ .driver = {
+ .name = "qcs404-q6sstopcc",
+ .of_match_table = q6sstopcc_qcs404_match_table,
+ .pm = &q6sstopcc_pm_ops,
+ },
+};
+
+module_platform_driver(q6sstopcc_qcs404_driver);
+
+MODULE_DESCRIPTION("QTI QCS404 Q6SSTOP Clock Controller Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/renesas/Kconfig b/drivers/clk/renesas/Kconfig
index b879e3e3a6b4..4cd846bc98cc 100644
--- a/drivers/clk/renesas/Kconfig
+++ b/drivers/clk/renesas/Kconfig
@@ -12,6 +12,7 @@ config CLK_RENESAS
select CLK_R8A7745 if ARCH_R8A7745
select CLK_R8A77470 if ARCH_R8A77470
select CLK_R8A774A1 if ARCH_R8A774A1
+ select CLK_R8A774B1 if ARCH_R8A774B1
select CLK_R8A774C0 if ARCH_R8A774C0
select CLK_R8A7778 if ARCH_R8A7778
select CLK_R8A7779 if ARCH_R8A7779
@@ -20,7 +21,8 @@ config CLK_RENESAS
select CLK_R8A7792 if ARCH_R8A7792
select CLK_R8A7794 if ARCH_R8A7794
select CLK_R8A7795 if ARCH_R8A7795
- select CLK_R8A7796 if ARCH_R8A7796
+ select CLK_R8A77960 if ARCH_R8A77960 || ARCH_R8A7796
+ select CLK_R8A77961 if ARCH_R8A77961
select CLK_R8A77965 if ARCH_R8A77965
select CLK_R8A77970 if ARCH_R8A77970
select CLK_R8A77980 if ARCH_R8A77980
@@ -31,17 +33,6 @@ config CLK_RENESAS
if CLK_RENESAS
-config CLK_RENESAS_LEGACY
- bool "Legacy DT clock support"
- depends on CLK_R8A7790 || CLK_R8A7791 || CLK_R8A7792 || CLK_R8A7794
- help
- Enable backward compatibility with old device trees describing a
- hierarchical representation of the various CPG and MSTP clocks.
-
- Say Y if you want your kernel to work with old DTBs.
- It is safe to say N if you use the DTS that is supplied with the
- current kernel source tree.
-
# SoC
config CLK_EMEV2
bool "Emma Mobile EV2 clock support" if COMPILE_TEST
@@ -80,6 +71,10 @@ config CLK_R8A774A1
bool "RZ/G2M clock support" if COMPILE_TEST
select CLK_RCAR_GEN3_CPG
+config CLK_R8A774B1
+ bool "RZ/G2N clock support" if COMPILE_TEST
+ select CLK_RCAR_GEN3_CPG
+
config CLK_R8A774C0
bool "RZ/G2E clock support" if COMPILE_TEST
select CLK_RCAR_GEN3_CPG
@@ -94,24 +89,20 @@ config CLK_R8A7779
config CLK_R8A7790
bool "R-Car H2 clock support" if COMPILE_TEST
- select CLK_RCAR_GEN2 if CLK_RENESAS_LEGACY
select CLK_RCAR_GEN2_CPG
select CLK_RENESAS_DIV6
config CLK_R8A7791
bool "R-Car M2-W/N clock support" if COMPILE_TEST
- select CLK_RCAR_GEN2 if CLK_RENESAS_LEGACY
select CLK_RCAR_GEN2_CPG
select CLK_RENESAS_DIV6
config CLK_R8A7792
bool "R-Car V2H clock support" if COMPILE_TEST
- select CLK_RCAR_GEN2 if CLK_RENESAS_LEGACY
select CLK_RCAR_GEN2_CPG
config CLK_R8A7794
bool "R-Car E2 clock support" if COMPILE_TEST
- select CLK_RCAR_GEN2 if CLK_RENESAS_LEGACY
select CLK_RCAR_GEN2_CPG
select CLK_RENESAS_DIV6
@@ -119,10 +110,14 @@ config CLK_R8A7795
bool "R-Car H3 clock support" if COMPILE_TEST
select CLK_RCAR_GEN3_CPG
-config CLK_R8A7796
+config CLK_R8A77960
bool "R-Car M3-W clock support" if COMPILE_TEST
select CLK_RCAR_GEN3_CPG
+config CLK_R8A77961
+ bool "R-Car M3-W+ clock support" if COMPILE_TEST
+ select CLK_RCAR_GEN3_CPG
+
config CLK_R8A77965
bool "R-Car M3-N clock support" if COMPILE_TEST
select CLK_RCAR_GEN3_CPG
@@ -155,11 +150,6 @@ config CLK_SH73A0
# Family
-config CLK_RCAR_GEN2
- bool "R-Car Gen2 legacy clock support" if COMPILE_TEST
- select CLK_RENESAS_CPG_MSTP
- select CLK_RENESAS_DIV6
-
config CLK_RCAR_GEN2_CPG
bool "R-Car Gen2 CPG clock support" if COMPILE_TEST
select CLK_RENESAS_CPG_MSSR
diff --git a/drivers/clk/renesas/Makefile b/drivers/clk/renesas/Makefile
index c793e3cc9452..4a722bc5aac7 100644
--- a/drivers/clk/renesas/Makefile
+++ b/drivers/clk/renesas/Makefile
@@ -9,6 +9,7 @@ obj-$(CONFIG_CLK_R8A7743) += r8a7743-cpg-mssr.o
obj-$(CONFIG_CLK_R8A7745) += r8a7745-cpg-mssr.o
obj-$(CONFIG_CLK_R8A77470) += r8a77470-cpg-mssr.o
obj-$(CONFIG_CLK_R8A774A1) += r8a774a1-cpg-mssr.o
+obj-$(CONFIG_CLK_R8A774B1) += r8a774b1-cpg-mssr.o
obj-$(CONFIG_CLK_R8A774C0) += r8a774c0-cpg-mssr.o
obj-$(CONFIG_CLK_R8A7778) += clk-r8a7778.o
obj-$(CONFIG_CLK_R8A7779) += clk-r8a7779.o
@@ -17,7 +18,8 @@ obj-$(CONFIG_CLK_R8A7791) += r8a7791-cpg-mssr.o
obj-$(CONFIG_CLK_R8A7792) += r8a7792-cpg-mssr.o
obj-$(CONFIG_CLK_R8A7794) += r8a7794-cpg-mssr.o
obj-$(CONFIG_CLK_R8A7795) += r8a7795-cpg-mssr.o
-obj-$(CONFIG_CLK_R8A7796) += r8a7796-cpg-mssr.o
+obj-$(CONFIG_CLK_R8A77960) += r8a7796-cpg-mssr.o
+obj-$(CONFIG_CLK_R8A77961) += r8a7796-cpg-mssr.o
obj-$(CONFIG_CLK_R8A77965) += r8a77965-cpg-mssr.o
obj-$(CONFIG_CLK_R8A77970) += r8a77970-cpg-mssr.o
obj-$(CONFIG_CLK_R8A77980) += r8a77980-cpg-mssr.o
@@ -27,7 +29,6 @@ obj-$(CONFIG_CLK_R9A06G032) += r9a06g032-clocks.o
obj-$(CONFIG_CLK_SH73A0) += clk-sh73a0.o
# Family
-obj-$(CONFIG_CLK_RCAR_GEN2) += clk-rcar-gen2.o
obj-$(CONFIG_CLK_RCAR_GEN2_CPG) += rcar-gen2-cpg.o
obj-$(CONFIG_CLK_RCAR_GEN3_CPG) += rcar-gen3-cpg.o
obj-$(CONFIG_CLK_RCAR_USB2_CLOCK_SEL) += rcar-usb2-clock-sel.o
diff --git a/drivers/clk/renesas/clk-mstp.c b/drivers/clk/renesas/clk-mstp.c
index e326e6dc09fc..003e9ce45757 100644
--- a/drivers/clk/renesas/clk-mstp.c
+++ b/drivers/clk/renesas/clk-mstp.c
@@ -189,10 +189,8 @@ static void __init cpg_mstp_clocks_init(struct device_node *np)
unsigned int i;
group = kzalloc(struct_size(group, clks, MSTP_MAX_CLOCKS), GFP_KERNEL);
- if (group == NULL) {
- kfree(group);
+ if (!group)
return;
- }
clks = group->clks;
spin_lock_init(&group->lock);
diff --git a/drivers/clk/renesas/clk-rcar-gen2.c b/drivers/clk/renesas/clk-rcar-gen2.c
deleted file mode 100644
index da9fe3f032eb..000000000000
--- a/drivers/clk/renesas/clk-rcar-gen2.c
+++ /dev/null
@@ -1,457 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * rcar_gen2 Core CPG Clocks
- *
- * Copyright (C) 2013 Ideas On Board SPRL
- *
- * Contact: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
- */
-
-#include <linux/clk-provider.h>
-#include <linux/clk/renesas.h>
-#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/math64.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/soc/renesas/rcar-rst.h>
-
-struct rcar_gen2_cpg {
- struct clk_onecell_data data;
- spinlock_t lock;
- void __iomem *reg;
-};
-
-#define CPG_FRQCRB 0x00000004
-#define CPG_FRQCRB_KICK BIT(31)
-#define CPG_SDCKCR 0x00000074
-#define CPG_PLL0CR 0x000000d8
-#define CPG_FRQCRC 0x000000e0
-#define CPG_FRQCRC_ZFC_MASK (0x1f << 8)
-#define CPG_FRQCRC_ZFC_SHIFT 8
-#define CPG_ADSPCKCR 0x0000025c
-#define CPG_RCANCKCR 0x00000270
-
-/* -----------------------------------------------------------------------------
- * Z Clock
- *
- * Traits of this clock:
- * prepare - clk_prepare only ensures that parents are prepared
- * enable - clk_enable only ensures that parents are enabled
- * rate - rate is adjustable. clk->rate = parent->rate * mult / 32
- * parent - fixed parent. No clk_set_parent support
- */
-
-struct cpg_z_clk {
- struct clk_hw hw;
- void __iomem *reg;
- void __iomem *kick_reg;
-};
-
-#define to_z_clk(_hw) container_of(_hw, struct cpg_z_clk, hw)
-
-static unsigned long cpg_z_clk_recalc_rate(struct clk_hw *hw,
- unsigned long parent_rate)
-{
- struct cpg_z_clk *zclk = to_z_clk(hw);
- unsigned int mult;
- unsigned int val;
-
- val = (readl(zclk->reg) & CPG_FRQCRC_ZFC_MASK) >> CPG_FRQCRC_ZFC_SHIFT;
- mult = 32 - val;
-
- return div_u64((u64)parent_rate * mult, 32);
-}
-
-static long cpg_z_clk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
-{
- unsigned long prate = *parent_rate;
- unsigned int mult;
-
- if (!prate)
- prate = 1;
-
- mult = div_u64((u64)rate * 32, prate);
- mult = clamp(mult, 1U, 32U);
-
- return *parent_rate / 32 * mult;
-}
-
-static int cpg_z_clk_set_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long parent_rate)
-{
- struct cpg_z_clk *zclk = to_z_clk(hw);
- unsigned int mult;
- u32 val, kick;
- unsigned int i;
-
- mult = div_u64((u64)rate * 32, parent_rate);
- mult = clamp(mult, 1U, 32U);
-
- if (readl(zclk->kick_reg) & CPG_FRQCRB_KICK)
- return -EBUSY;
-
- val = readl(zclk->reg);
- val &= ~CPG_FRQCRC_ZFC_MASK;
- val |= (32 - mult) << CPG_FRQCRC_ZFC_SHIFT;
- writel(val, zclk->reg);
-
- /*
- * Set KICK bit in FRQCRB to update hardware setting and wait for
- * clock change completion.
- */
- kick = readl(zclk->kick_reg);
- kick |= CPG_FRQCRB_KICK;
- writel(kick, zclk->kick_reg);
-
- /*
- * Note: There is no HW information about the worst case latency.
- *
- * Using experimental measurements, it seems that no more than
- * ~10 iterations are needed, independently of the CPU rate.
- * Since this value might be dependent on external xtal rate, pll1
- * rate or even the other emulation clocks rate, use 1000 as a
- * "super" safe value.
- */
- for (i = 1000; i; i--) {
- if (!(readl(zclk->kick_reg) & CPG_FRQCRB_KICK))
- return 0;
-
- cpu_relax();
- }
-
- return -ETIMEDOUT;
-}
-
-static const struct clk_ops cpg_z_clk_ops = {
- .recalc_rate = cpg_z_clk_recalc_rate,
- .round_rate = cpg_z_clk_round_rate,
- .set_rate = cpg_z_clk_set_rate,
-};
-
-static struct clk * __init cpg_z_clk_register(struct rcar_gen2_cpg *cpg)
-{
- static const char *parent_name = "pll0";
- struct clk_init_data init;
- struct cpg_z_clk *zclk;
- struct clk *clk;
-
- zclk = kzalloc(sizeof(*zclk), GFP_KERNEL);
- if (!zclk)
- return ERR_PTR(-ENOMEM);
-
- init.name = "z";
- init.ops = &cpg_z_clk_ops;
- init.flags = 0;
- init.parent_names = &parent_name;
- init.num_parents = 1;
-
- zclk->reg = cpg->reg + CPG_FRQCRC;
- zclk->kick_reg = cpg->reg + CPG_FRQCRB;
- zclk->hw.init = &init;
-
- clk = clk_register(NULL, &zclk->hw);
- if (IS_ERR(clk))
- kfree(zclk);
-
- return clk;
-}
-
-static struct clk * __init cpg_rcan_clk_register(struct rcar_gen2_cpg *cpg,
- struct device_node *np)
-{
- const char *parent_name = of_clk_get_parent_name(np, 1);
- struct clk_fixed_factor *fixed;
- struct clk_gate *gate;
- struct clk *clk;
-
- fixed = kzalloc(sizeof(*fixed), GFP_KERNEL);
- if (!fixed)
- return ERR_PTR(-ENOMEM);
-
- fixed->mult = 1;
- fixed->div = 6;
-
- gate = kzalloc(sizeof(*gate), GFP_KERNEL);
- if (!gate) {
- kfree(fixed);
- return ERR_PTR(-ENOMEM);
- }
-
- gate->reg = cpg->reg + CPG_RCANCKCR;
- gate->bit_idx = 8;
- gate->flags = CLK_GATE_SET_TO_DISABLE;
- gate->lock = &cpg->lock;
-
- clk = clk_register_composite(NULL, "rcan", &parent_name, 1, NULL, NULL,
- &fixed->hw, &clk_fixed_factor_ops,
- &gate->hw, &clk_gate_ops, 0);
- if (IS_ERR(clk)) {
- kfree(gate);
- kfree(fixed);
- }
-
- return clk;
-}
-
-/* ADSP divisors */
-static const struct clk_div_table cpg_adsp_div_table[] = {
- { 1, 3 }, { 2, 4 }, { 3, 6 }, { 4, 8 },
- { 5, 12 }, { 6, 16 }, { 7, 18 }, { 8, 24 },
- { 10, 36 }, { 11, 48 }, { 0, 0 },
-};
-
-static struct clk * __init cpg_adsp_clk_register(struct rcar_gen2_cpg *cpg)
-{
- const char *parent_name = "pll1";
- struct clk_divider *div;
- struct clk_gate *gate;
- struct clk *clk;
-
- div = kzalloc(sizeof(*div), GFP_KERNEL);
- if (!div)
- return ERR_PTR(-ENOMEM);
-
- div->reg = cpg->reg + CPG_ADSPCKCR;
- div->width = 4;
- div->table = cpg_adsp_div_table;
- div->lock = &cpg->lock;
-
- gate = kzalloc(sizeof(*gate), GFP_KERNEL);
- if (!gate) {
- kfree(div);
- return ERR_PTR(-ENOMEM);
- }
-
- gate->reg = cpg->reg + CPG_ADSPCKCR;
- gate->bit_idx = 8;
- gate->flags = CLK_GATE_SET_TO_DISABLE;
- gate->lock = &cpg->lock;
-
- clk = clk_register_composite(NULL, "adsp", &parent_name, 1, NULL, NULL,
- &div->hw, &clk_divider_ops,
- &gate->hw, &clk_gate_ops, 0);
- if (IS_ERR(clk)) {
- kfree(gate);
- kfree(div);
- }
-
- return clk;
-}
-
-/* -----------------------------------------------------------------------------
- * CPG Clock Data
- */
-
-/*
- * MD EXTAL PLL0 PLL1 PLL3
- * 14 13 19 (MHz) *1 *1
- *---------------------------------------------------
- * 0 0 0 15 x 1 x172/2 x208/2 x106
- * 0 0 1 15 x 1 x172/2 x208/2 x88
- * 0 1 0 20 x 1 x130/2 x156/2 x80
- * 0 1 1 20 x 1 x130/2 x156/2 x66
- * 1 0 0 26 / 2 x200/2 x240/2 x122
- * 1 0 1 26 / 2 x200/2 x240/2 x102
- * 1 1 0 30 / 2 x172/2 x208/2 x106
- * 1 1 1 30 / 2 x172/2 x208/2 x88
- *
- * *1 : Table 7.6 indicates VCO output (PLLx = VCO/2)
- */
-#define CPG_PLL_CONFIG_INDEX(md) ((((md) & BIT(14)) >> 12) | \
- (((md) & BIT(13)) >> 12) | \
- (((md) & BIT(19)) >> 19))
-struct cpg_pll_config {
- unsigned int extal_div;
- unsigned int pll1_mult;
- unsigned int pll3_mult;
- unsigned int pll0_mult; /* For R-Car V2H and E2 only */
-};
-
-static const struct cpg_pll_config cpg_pll_configs[8] __initconst = {
- { 1, 208, 106, 200 }, { 1, 208, 88, 200 },
- { 1, 156, 80, 150 }, { 1, 156, 66, 150 },
- { 2, 240, 122, 230 }, { 2, 240, 102, 230 },
- { 2, 208, 106, 200 }, { 2, 208, 88, 200 },
-};
-
-/* SDHI divisors */
-static const struct clk_div_table cpg_sdh_div_table[] = {
- { 0, 2 }, { 1, 3 }, { 2, 4 }, { 3, 6 },
- { 4, 8 }, { 5, 12 }, { 6, 16 }, { 7, 18 },
- { 8, 24 }, { 10, 36 }, { 11, 48 }, { 0, 0 },
-};
-
-static const struct clk_div_table cpg_sd01_div_table[] = {
- { 4, 8 },
- { 5, 12 }, { 6, 16 }, { 7, 18 }, { 8, 24 },
- { 10, 36 }, { 11, 48 }, { 12, 10 }, { 0, 0 },
-};
-
-/* -----------------------------------------------------------------------------
- * Initialization
- */
-
-static u32 cpg_mode __initdata;
-
-static const char * const pll0_mult_match[] = {
- "renesas,r8a7792-cpg-clocks",
- "renesas,r8a7794-cpg-clocks",
- NULL
-};
-
-static struct clk * __init
-rcar_gen2_cpg_register_clock(struct device_node *np, struct rcar_gen2_cpg *cpg,
- const struct cpg_pll_config *config,
- const char *name)
-{
- const struct clk_div_table *table = NULL;
- const char *parent_name;
- unsigned int shift;
- unsigned int mult = 1;
- unsigned int div = 1;
-
- if (!strcmp(name, "main")) {
- parent_name = of_clk_get_parent_name(np, 0);
- div = config->extal_div;
- } else if (!strcmp(name, "pll0")) {
- /* PLL0 is a configurable multiplier clock. Register it as a
- * fixed factor clock for now as there's no generic multiplier
- * clock implementation and we currently have no need to change
- * the multiplier value.
- */
- if (of_device_compatible_match(np, pll0_mult_match)) {
- /* R-Car V2H and E2 do not have PLL0CR */
- mult = config->pll0_mult;
- div = 3;
- } else {
- u32 value = readl(cpg->reg + CPG_PLL0CR);
- mult = ((value >> 24) & ((1 << 7) - 1)) + 1;
- }
- parent_name = "main";
- } else if (!strcmp(name, "pll1")) {
- parent_name = "main";
- mult = config->pll1_mult / 2;
- } else if (!strcmp(name, "pll3")) {
- parent_name = "main";
- mult = config->pll3_mult;
- } else if (!strcmp(name, "lb")) {
- parent_name = "pll1";
- div = cpg_mode & BIT(18) ? 36 : 24;
- } else if (!strcmp(name, "qspi")) {
- parent_name = "pll1_div2";
- div = (cpg_mode & (BIT(3) | BIT(2) | BIT(1))) == BIT(2)
- ? 8 : 10;
- } else if (!strcmp(name, "sdh")) {
- parent_name = "pll1";
- table = cpg_sdh_div_table;
- shift = 8;
- } else if (!strcmp(name, "sd0")) {
- parent_name = "pll1";
- table = cpg_sd01_div_table;
- shift = 4;
- } else if (!strcmp(name, "sd1")) {
- parent_name = "pll1";
- table = cpg_sd01_div_table;
- shift = 0;
- } else if (!strcmp(name, "z")) {
- return cpg_z_clk_register(cpg);
- } else if (!strcmp(name, "rcan")) {
- return cpg_rcan_clk_register(cpg, np);
- } else if (!strcmp(name, "adsp")) {
- return cpg_adsp_clk_register(cpg);
- } else {
- return ERR_PTR(-EINVAL);
- }
-
- if (!table)
- return clk_register_fixed_factor(NULL, name, parent_name, 0,
- mult, div);
- else
- return clk_register_divider_table(NULL, name, parent_name, 0,
- cpg->reg + CPG_SDCKCR, shift,
- 4, 0, table, &cpg->lock);
-}
-
-/*
- * Reset register definitions.
- */
-#define MODEMR 0xe6160060
-
-static u32 __init rcar_gen2_read_mode_pins(void)
-{
- void __iomem *modemr = ioremap_nocache(MODEMR, 4);
- u32 mode;
-
- BUG_ON(!modemr);
- mode = ioread32(modemr);
- iounmap(modemr);
-
- return mode;
-}
-
-static void __init rcar_gen2_cpg_clocks_init(struct device_node *np)
-{
- const struct cpg_pll_config *config;
- struct rcar_gen2_cpg *cpg;
- struct clk **clks;
- unsigned int i;
- int num_clks;
-
- if (rcar_rst_read_mode_pins(&cpg_mode)) {
- /* Backward-compatibility with old DT */
- pr_warn("%pOF: failed to obtain mode pins from RST\n", np);
- cpg_mode = rcar_gen2_read_mode_pins();
- }
-
- num_clks = of_property_count_strings(np, "clock-output-names");
- if (num_clks < 0) {
- pr_err("%s: failed to count clocks\n", __func__);
- return;
- }
-
- cpg = kzalloc(sizeof(*cpg), GFP_KERNEL);
- clks = kcalloc(num_clks, sizeof(*clks), GFP_KERNEL);
- if (cpg == NULL || clks == NULL) {
- /* We're leaking memory on purpose, there's no point in cleaning
- * up as the system won't boot anyway.
- */
- return;
- }
-
- spin_lock_init(&cpg->lock);
-
- cpg->data.clks = clks;
- cpg->data.clk_num = num_clks;
-
- cpg->reg = of_iomap(np, 0);
- if (WARN_ON(cpg->reg == NULL))
- return;
-
- config = &cpg_pll_configs[CPG_PLL_CONFIG_INDEX(cpg_mode)];
-
- for (i = 0; i < num_clks; ++i) {
- const char *name;
- struct clk *clk;
-
- of_property_read_string_index(np, "clock-output-names", i,
- &name);
-
- clk = rcar_gen2_cpg_register_clock(np, cpg, config, name);
- if (IS_ERR(clk))
- pr_err("%s: failed to register %pOFn %s clock (%ld)\n",
- __func__, np, name, PTR_ERR(clk));
- else
- cpg->data.clks[i] = clk;
- }
-
- of_clk_add_provider(np, of_clk_src_onecell_get, &cpg->data);
-
- cpg_mstp_add_clk_domain(np);
-}
-CLK_OF_DECLARE(rcar_gen2_cpg_clks, "renesas,rcar-gen2-cpg-clocks",
- rcar_gen2_cpg_clocks_init);
diff --git a/drivers/clk/renesas/r8a774b1-cpg-mssr.c b/drivers/clk/renesas/r8a774b1-cpg-mssr.c
new file mode 100644
index 000000000000..c9af70917312
--- /dev/null
+++ b/drivers/clk/renesas/r8a774b1-cpg-mssr.c
@@ -0,0 +1,327 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * r8a774b1 Clock Pulse Generator / Module Standby and Software Reset
+ *
+ * Copyright (C) 2019 Renesas Electronics Corp.
+ *
+ * Based on r8a7796-cpg-mssr.c
+ *
+ * Copyright (C) 2016 Glider bvba
+ */
+
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/soc/renesas/rcar-rst.h>
+
+#include <dt-bindings/clock/r8a774b1-cpg-mssr.h>
+
+#include "renesas-cpg-mssr.h"
+#include "rcar-gen3-cpg.h"
+
+enum clk_ids {
+ /* Core Clock Outputs exported to DT */
+ LAST_DT_CORE_CLK = R8A774B1_CLK_CANFD,
+
+ /* External Input Clocks */
+ CLK_EXTAL,
+ CLK_EXTALR,
+
+ /* Internal Core Clocks */
+ CLK_MAIN,
+ CLK_PLL0,
+ CLK_PLL1,
+ CLK_PLL3,
+ CLK_PLL4,
+ CLK_PLL1_DIV2,
+ CLK_PLL1_DIV4,
+ CLK_S0,
+ CLK_S1,
+ CLK_S2,
+ CLK_S3,
+ CLK_SDSRC,
+ CLK_RINT,
+
+ /* Module Clocks */
+ MOD_CLK_BASE
+};
+
+static const struct cpg_core_clk r8a774b1_core_clks[] __initconst = {
+ /* External Clock Inputs */
+ DEF_INPUT("extal", CLK_EXTAL),
+ DEF_INPUT("extalr", CLK_EXTALR),
+
+ /* Internal Core Clocks */
+ DEF_BASE(".main", CLK_MAIN, CLK_TYPE_GEN3_MAIN, CLK_EXTAL),
+ DEF_BASE(".pll0", CLK_PLL0, CLK_TYPE_GEN3_PLL0, CLK_MAIN),
+ DEF_BASE(".pll1", CLK_PLL1, CLK_TYPE_GEN3_PLL1, CLK_MAIN),
+ DEF_BASE(".pll3", CLK_PLL3, CLK_TYPE_GEN3_PLL3, CLK_MAIN),
+ DEF_BASE(".pll4", CLK_PLL4, CLK_TYPE_GEN3_PLL4, CLK_MAIN),
+
+ DEF_FIXED(".pll1_div2", CLK_PLL1_DIV2, CLK_PLL1, 2, 1),
+ DEF_FIXED(".pll1_div4", CLK_PLL1_DIV4, CLK_PLL1_DIV2, 2, 1),
+ DEF_FIXED(".s0", CLK_S0, CLK_PLL1_DIV2, 2, 1),
+ DEF_FIXED(".s1", CLK_S1, CLK_PLL1_DIV2, 3, 1),
+ DEF_FIXED(".s2", CLK_S2, CLK_PLL1_DIV2, 4, 1),
+ DEF_FIXED(".s3", CLK_S3, CLK_PLL1_DIV2, 6, 1),
+ DEF_FIXED(".sdsrc", CLK_SDSRC, CLK_PLL1_DIV2, 2, 1),
+
+ DEF_GEN3_OSC(".r", CLK_RINT, CLK_EXTAL, 32),
+
+ /* Core Clock Outputs */
+ DEF_GEN3_Z("z", R8A774B1_CLK_Z, CLK_TYPE_GEN3_Z, CLK_PLL0, 2, 8),
+ DEF_FIXED("ztr", R8A774B1_CLK_ZTR, CLK_PLL1_DIV2, 6, 1),
+ DEF_FIXED("ztrd2", R8A774B1_CLK_ZTRD2, CLK_PLL1_DIV2, 12, 1),
+ DEF_FIXED("zt", R8A774B1_CLK_ZT, CLK_PLL1_DIV2, 4, 1),
+ DEF_FIXED("zx", R8A774B1_CLK_ZX, CLK_PLL1_DIV2, 2, 1),
+ DEF_FIXED("s0d1", R8A774B1_CLK_S0D1, CLK_S0, 1, 1),
+ DEF_FIXED("s0d2", R8A774B1_CLK_S0D2, CLK_S0, 2, 1),
+ DEF_FIXED("s0d3", R8A774B1_CLK_S0D3, CLK_S0, 3, 1),
+ DEF_FIXED("s0d4", R8A774B1_CLK_S0D4, CLK_S0, 4, 1),
+ DEF_FIXED("s0d6", R8A774B1_CLK_S0D6, CLK_S0, 6, 1),
+ DEF_FIXED("s0d8", R8A774B1_CLK_S0D8, CLK_S0, 8, 1),
+ DEF_FIXED("s0d12", R8A774B1_CLK_S0D12, CLK_S0, 12, 1),
+ DEF_FIXED("s1d2", R8A774B1_CLK_S1D2, CLK_S1, 2, 1),
+ DEF_FIXED("s1d4", R8A774B1_CLK_S1D4, CLK_S1, 4, 1),
+ DEF_FIXED("s2d1", R8A774B1_CLK_S2D1, CLK_S2, 1, 1),
+ DEF_FIXED("s2d2", R8A774B1_CLK_S2D2, CLK_S2, 2, 1),
+ DEF_FIXED("s2d4", R8A774B1_CLK_S2D4, CLK_S2, 4, 1),
+ DEF_FIXED("s3d1", R8A774B1_CLK_S3D1, CLK_S3, 1, 1),
+ DEF_FIXED("s3d2", R8A774B1_CLK_S3D2, CLK_S3, 2, 1),
+ DEF_FIXED("s3d4", R8A774B1_CLK_S3D4, CLK_S3, 4, 1),
+
+ DEF_GEN3_SD("sd0", R8A774B1_CLK_SD0, CLK_SDSRC, 0x074),
+ DEF_GEN3_SD("sd1", R8A774B1_CLK_SD1, CLK_SDSRC, 0x078),
+ DEF_GEN3_SD("sd2", R8A774B1_CLK_SD2, CLK_SDSRC, 0x268),
+ DEF_GEN3_SD("sd3", R8A774B1_CLK_SD3, CLK_SDSRC, 0x26c),
+
+ DEF_FIXED("cl", R8A774B1_CLK_CL, CLK_PLL1_DIV2, 48, 1),
+ DEF_FIXED("cp", R8A774B1_CLK_CP, CLK_EXTAL, 2, 1),
+ DEF_FIXED("cpex", R8A774B1_CLK_CPEX, CLK_EXTAL, 2, 1),
+
+ DEF_DIV6P1("canfd", R8A774B1_CLK_CANFD, CLK_PLL1_DIV4, 0x244),
+ DEF_DIV6P1("csi0", R8A774B1_CLK_CSI0, CLK_PLL1_DIV4, 0x00c),
+ DEF_DIV6P1("mso", R8A774B1_CLK_MSO, CLK_PLL1_DIV4, 0x014),
+ DEF_DIV6P1("hdmi", R8A774B1_CLK_HDMI, CLK_PLL1_DIV4, 0x250),
+
+ DEF_GEN3_OSC("osc", R8A774B1_CLK_OSC, CLK_EXTAL, 8),
+
+ DEF_BASE("r", R8A774B1_CLK_R, CLK_TYPE_GEN3_R, CLK_RINT),
+};
+
+static const struct mssr_mod_clk r8a774b1_mod_clks[] __initconst = {
+ DEF_MOD("tmu4", 121, R8A774B1_CLK_S0D6),
+ DEF_MOD("tmu3", 122, R8A774B1_CLK_S3D2),
+ DEF_MOD("tmu2", 123, R8A774B1_CLK_S3D2),
+ DEF_MOD("tmu1", 124, R8A774B1_CLK_S3D2),
+ DEF_MOD("tmu0", 125, R8A774B1_CLK_CP),
+ DEF_MOD("fdp1-0", 119, R8A774B1_CLK_S0D1),
+ DEF_MOD("scif5", 202, R8A774B1_CLK_S3D4),
+ DEF_MOD("scif4", 203, R8A774B1_CLK_S3D4),
+ DEF_MOD("scif3", 204, R8A774B1_CLK_S3D4),
+ DEF_MOD("scif1", 206, R8A774B1_CLK_S3D4),
+ DEF_MOD("scif0", 207, R8A774B1_CLK_S3D4),
+ DEF_MOD("msiof3", 208, R8A774B1_CLK_MSO),
+ DEF_MOD("msiof2", 209, R8A774B1_CLK_MSO),
+ DEF_MOD("msiof1", 210, R8A774B1_CLK_MSO),
+ DEF_MOD("msiof0", 211, R8A774B1_CLK_MSO),
+ DEF_MOD("sys-dmac2", 217, R8A774B1_CLK_S3D1),
+ DEF_MOD("sys-dmac1", 218, R8A774B1_CLK_S3D1),
+ DEF_MOD("sys-dmac0", 219, R8A774B1_CLK_S0D3),
+ DEF_MOD("cmt3", 300, R8A774B1_CLK_R),
+ DEF_MOD("cmt2", 301, R8A774B1_CLK_R),
+ DEF_MOD("cmt1", 302, R8A774B1_CLK_R),
+ DEF_MOD("cmt0", 303, R8A774B1_CLK_R),
+ DEF_MOD("tpu0", 304, R8A774B1_CLK_S3D4),
+ DEF_MOD("scif2", 310, R8A774B1_CLK_S3D4),
+ DEF_MOD("sdif3", 311, R8A774B1_CLK_SD3),
+ DEF_MOD("sdif2", 312, R8A774B1_CLK_SD2),
+ DEF_MOD("sdif1", 313, R8A774B1_CLK_SD1),
+ DEF_MOD("sdif0", 314, R8A774B1_CLK_SD0),
+ DEF_MOD("pcie1", 318, R8A774B1_CLK_S3D1),
+ DEF_MOD("pcie0", 319, R8A774B1_CLK_S3D1),
+ DEF_MOD("usb3-if0", 328, R8A774B1_CLK_S3D1),
+ DEF_MOD("usb-dmac0", 330, R8A774B1_CLK_S3D1),
+ DEF_MOD("usb-dmac1", 331, R8A774B1_CLK_S3D1),
+ DEF_MOD("rwdt", 402, R8A774B1_CLK_R),
+ DEF_MOD("intc-ex", 407, R8A774B1_CLK_CP),
+ DEF_MOD("intc-ap", 408, R8A774B1_CLK_S0D3),
+ DEF_MOD("audmac1", 501, R8A774B1_CLK_S1D2),
+ DEF_MOD("audmac0", 502, R8A774B1_CLK_S1D2),
+ DEF_MOD("hscif4", 516, R8A774B1_CLK_S3D1),
+ DEF_MOD("hscif3", 517, R8A774B1_CLK_S3D1),
+ DEF_MOD("hscif2", 518, R8A774B1_CLK_S3D1),
+ DEF_MOD("hscif1", 519, R8A774B1_CLK_S3D1),
+ DEF_MOD("hscif0", 520, R8A774B1_CLK_S3D1),
+ DEF_MOD("thermal", 522, R8A774B1_CLK_CP),
+ DEF_MOD("pwm", 523, R8A774B1_CLK_S0D12),
+ DEF_MOD("fcpvd1", 602, R8A774B1_CLK_S0D2),
+ DEF_MOD("fcpvd0", 603, R8A774B1_CLK_S0D2),
+ DEF_MOD("fcpvb0", 607, R8A774B1_CLK_S0D1),
+ DEF_MOD("fcpvi0", 611, R8A774B1_CLK_S0D1),
+ DEF_MOD("fcpf0", 615, R8A774B1_CLK_S0D1),
+ DEF_MOD("fcpcs", 619, R8A774B1_CLK_S0D2),
+ DEF_MOD("vspd1", 622, R8A774B1_CLK_S0D2),
+ DEF_MOD("vspd0", 623, R8A774B1_CLK_S0D2),
+ DEF_MOD("vspb", 626, R8A774B1_CLK_S0D1),
+ DEF_MOD("vspi0", 631, R8A774B1_CLK_S0D1),
+ DEF_MOD("ehci1", 702, R8A774B1_CLK_S3D2),
+ DEF_MOD("ehci0", 703, R8A774B1_CLK_S3D2),
+ DEF_MOD("hsusb", 704, R8A774B1_CLK_S3D2),
+ DEF_MOD("csi20", 714, R8A774B1_CLK_CSI0),
+ DEF_MOD("csi40", 716, R8A774B1_CLK_CSI0),
+ DEF_MOD("du3", 721, R8A774B1_CLK_S2D1),
+ DEF_MOD("du1", 723, R8A774B1_CLK_S2D1),
+ DEF_MOD("du0", 724, R8A774B1_CLK_S2D1),
+ DEF_MOD("lvds", 727, R8A774B1_CLK_S2D1),
+ DEF_MOD("hdmi0", 729, R8A774B1_CLK_HDMI),
+ DEF_MOD("vin7", 804, R8A774B1_CLK_S0D2),
+ DEF_MOD("vin6", 805, R8A774B1_CLK_S0D2),
+ DEF_MOD("vin5", 806, R8A774B1_CLK_S0D2),
+ DEF_MOD("vin4", 807, R8A774B1_CLK_S0D2),
+ DEF_MOD("vin3", 808, R8A774B1_CLK_S0D2),
+ DEF_MOD("vin2", 809, R8A774B1_CLK_S0D2),
+ DEF_MOD("vin1", 810, R8A774B1_CLK_S0D2),
+ DEF_MOD("vin0", 811, R8A774B1_CLK_S0D2),
+ DEF_MOD("etheravb", 812, R8A774B1_CLK_S0D6),
+ DEF_MOD("sata0", 815, R8A774B1_CLK_S3D2),
+ DEF_MOD("gpio7", 905, R8A774B1_CLK_S3D4),
+ DEF_MOD("gpio6", 906, R8A774B1_CLK_S3D4),
+ DEF_MOD("gpio5", 907, R8A774B1_CLK_S3D4),
+ DEF_MOD("gpio4", 908, R8A774B1_CLK_S3D4),
+ DEF_MOD("gpio3", 909, R8A774B1_CLK_S3D4),
+ DEF_MOD("gpio2", 910, R8A774B1_CLK_S3D4),
+ DEF_MOD("gpio1", 911, R8A774B1_CLK_S3D4),
+ DEF_MOD("gpio0", 912, R8A774B1_CLK_S3D4),
+ DEF_MOD("can-fd", 914, R8A774B1_CLK_S3D2),
+ DEF_MOD("can-if1", 915, R8A774B1_CLK_S3D4),
+ DEF_MOD("can-if0", 916, R8A774B1_CLK_S3D4),
+ DEF_MOD("i2c6", 918, R8A774B1_CLK_S0D6),
+ DEF_MOD("i2c5", 919, R8A774B1_CLK_S0D6),
+ DEF_MOD("i2c-dvfs", 926, R8A774B1_CLK_CP),
+ DEF_MOD("i2c4", 927, R8A774B1_CLK_S0D6),
+ DEF_MOD("i2c3", 928, R8A774B1_CLK_S0D6),
+ DEF_MOD("i2c2", 929, R8A774B1_CLK_S3D2),
+ DEF_MOD("i2c1", 930, R8A774B1_CLK_S3D2),
+ DEF_MOD("i2c0", 931, R8A774B1_CLK_S3D2),
+ DEF_MOD("ssi-all", 1005, R8A774B1_CLK_S3D4),
+ DEF_MOD("ssi9", 1006, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi8", 1007, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi7", 1008, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi6", 1009, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi5", 1010, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi4", 1011, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi3", 1012, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi2", 1013, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi1", 1014, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi0", 1015, MOD_CLK_ID(1005)),
+ DEF_MOD("scu-all", 1017, R8A774B1_CLK_S3D4),
+ DEF_MOD("scu-dvc1", 1018, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-dvc0", 1019, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-ctu1-mix1", 1020, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-ctu0-mix0", 1021, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src9", 1022, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src8", 1023, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src7", 1024, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src6", 1025, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src5", 1026, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src4", 1027, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src3", 1028, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src2", 1029, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src1", 1030, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src0", 1031, MOD_CLK_ID(1017)),
+};
+
+static const unsigned int r8a774b1_crit_mod_clks[] __initconst = {
+ MOD_CLK_ID(408), /* INTC-AP (GIC) */
+};
+
+/*
+ * CPG Clock Data
+ */
+
+/*
+ * MD EXTAL PLL0 PLL1 PLL3 PLL4 OSC
+ * 14 13 19 17 (MHz)
+ *-----------------------------------------------------------------
+ * 0 0 0 0 16.66 x 1 x180 x192 x192 x144 /16
+ * 0 0 0 1 16.66 x 1 x180 x192 x128 x144 /16
+ * 0 0 1 0 Prohibited setting
+ * 0 0 1 1 16.66 x 1 x180 x192 x192 x144 /16
+ * 0 1 0 0 20 x 1 x150 x160 x160 x120 /19
+ * 0 1 0 1 20 x 1 x150 x160 x106 x120 /19
+ * 0 1 1 0 Prohibited setting
+ * 0 1 1 1 20 x 1 x150 x160 x160 x120 /19
+ * 1 0 0 0 25 x 1 x120 x128 x128 x96 /24
+ * 1 0 0 1 25 x 1 x120 x128 x84 x96 /24
+ * 1 0 1 0 Prohibited setting
+ * 1 0 1 1 25 x 1 x120 x128 x128 x96 /24
+ * 1 1 0 0 33.33 / 2 x180 x192 x192 x144 /32
+ * 1 1 0 1 33.33 / 2 x180 x192 x128 x144 /32
+ * 1 1 1 0 Prohibited setting
+ * 1 1 1 1 33.33 / 2 x180 x192 x192 x144 /32
+ */
+#define CPG_PLL_CONFIG_INDEX(md) ((((md) & BIT(14)) >> 11) | \
+ (((md) & BIT(13)) >> 11) | \
+ (((md) & BIT(19)) >> 18) | \
+ (((md) & BIT(17)) >> 17))
+
+static const struct rcar_gen3_cpg_pll_config cpg_pll_configs[16] __initconst = {
+ /* EXTAL div PLL1 mult/div PLL3 mult/div OSC prediv */
+ { 1, 192, 1, 192, 1, 16, },
+ { 1, 192, 1, 128, 1, 16, },
+ { 0, /* Prohibited setting */ },
+ { 1, 192, 1, 192, 1, 16, },
+ { 1, 160, 1, 160, 1, 19, },
+ { 1, 160, 1, 106, 1, 19, },
+ { 0, /* Prohibited setting */ },
+ { 1, 160, 1, 160, 1, 19, },
+ { 1, 128, 1, 128, 1, 24, },
+ { 1, 128, 1, 84, 1, 24, },
+ { 0, /* Prohibited setting */ },
+ { 1, 128, 1, 128, 1, 24, },
+ { 2, 192, 1, 192, 1, 32, },
+ { 2, 192, 1, 128, 1, 32, },
+ { 0, /* Prohibited setting */ },
+ { 2, 192, 1, 192, 1, 32, },
+};
+
+static int __init r8a774b1_cpg_mssr_init(struct device *dev)
+{
+ const struct rcar_gen3_cpg_pll_config *cpg_pll_config;
+ u32 cpg_mode;
+ int error;
+
+ error = rcar_rst_read_mode_pins(&cpg_mode);
+ if (error)
+ return error;
+
+ cpg_pll_config = &cpg_pll_configs[CPG_PLL_CONFIG_INDEX(cpg_mode)];
+ if (!cpg_pll_config->extal_div) {
+ dev_err(dev, "Prohibited setting (cpg_mode=0x%x)\n", cpg_mode);
+ return -EINVAL;
+ }
+
+ return rcar_gen3_cpg_init(cpg_pll_config, CLK_EXTALR, cpg_mode);
+}
+
+const struct cpg_mssr_info r8a774b1_cpg_mssr_info __initconst = {
+ /* Core Clocks */
+ .core_clks = r8a774b1_core_clks,
+ .num_core_clks = ARRAY_SIZE(r8a774b1_core_clks),
+ .last_dt_core_clk = LAST_DT_CORE_CLK,
+ .num_total_core_clks = MOD_CLK_BASE,
+
+ /* Module Clocks */
+ .mod_clks = r8a774b1_mod_clks,
+ .num_mod_clks = ARRAY_SIZE(r8a774b1_mod_clks),
+ .num_hw_mod_clks = 12 * 32,
+
+ /* Critical Module Clocks */
+ .crit_mod_clks = r8a774b1_crit_mod_clks,
+ .num_crit_mod_clks = ARRAY_SIZE(r8a774b1_crit_mod_clks),
+
+ /* Callbacks */
+ .init = r8a774b1_cpg_mssr_init,
+ .cpg_clk_register = rcar_gen3_cpg_clk_register,
+};
diff --git a/drivers/clk/renesas/r8a7796-cpg-mssr.c b/drivers/clk/renesas/r8a7796-cpg-mssr.c
index 90cc6a102602..e8420d3ada94 100644
--- a/drivers/clk/renesas/r8a7796-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a7796-cpg-mssr.c
@@ -1,9 +1,10 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * r8a7796 Clock Pulse Generator / Module Standby and Software Reset
+ * r8a7796 (R-Car M3-W/W+) Clock Pulse Generator / Module Standby and Software
+ * Reset
*
- * Copyright (C) 2016 Glider bvba
- * Copyright (C) 2018 Renesas Electronics Corp.
+ * Copyright (C) 2016-2019 Glider bvba
+ * Copyright (C) 2018-2019 Renesas Electronics Corp.
*
* Based on r8a7795-cpg-mssr.c
*
@@ -14,6 +15,7 @@
#include <linux/device.h>
#include <linux/init.h>
#include <linux/kernel.h>
+#include <linux/of.h>
#include <linux/soc/renesas/rcar-rst.h>
#include <dt-bindings/clock/r8a7796-cpg-mssr.h>
@@ -116,7 +118,7 @@ static const struct cpg_core_clk r8a7796_core_clks[] __initconst = {
DEF_BASE("r", R8A7796_CLK_R, CLK_TYPE_GEN3_R, CLK_RINT),
};
-static const struct mssr_mod_clk r8a7796_mod_clks[] __initconst = {
+static struct mssr_mod_clk r8a7796_mod_clks[] __initdata = {
DEF_MOD("fdp1-0", 119, R8A7796_CLK_S0D1),
DEF_MOD("scif5", 202, R8A7796_CLK_S3D4),
DEF_MOD("scif4", 203, R8A7796_CLK_S3D4),
@@ -304,6 +306,14 @@ static const struct rcar_gen3_cpg_pll_config cpg_pll_configs[16] __initconst = {
{ 2, 192, 1, 192, 1, 32, },
};
+ /*
+ * Fixups for R-Car M3-W+
+ */
+
+static const unsigned int r8a77961_mod_nullify[] __initconst = {
+ MOD_CLK_ID(617), /* FCPCI0 */
+};
+
static int __init r8a7796_cpg_mssr_init(struct device *dev)
{
const struct rcar_gen3_cpg_pll_config *cpg_pll_config;
@@ -320,6 +330,12 @@ static int __init r8a7796_cpg_mssr_init(struct device *dev)
return -EINVAL;
}
+ if (of_device_is_compatible(dev->of_node, "renesas,r8a77961-cpg-mssr"))
+ mssr_mod_nullify(r8a7796_mod_clks,
+ ARRAY_SIZE(r8a7796_mod_clks),
+ r8a77961_mod_nullify,
+ ARRAY_SIZE(r8a77961_mod_nullify));
+
return rcar_gen3_cpg_init(cpg_pll_config, CLK_EXTALR, cpg_mode);
}
diff --git a/drivers/clk/renesas/r8a77965-cpg-mssr.c b/drivers/clk/renesas/r8a77965-cpg-mssr.c
index b4e8c5b7d515..b3af4da2ca74 100644
--- a/drivers/clk/renesas/r8a77965-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a77965-cpg-mssr.c
@@ -323,7 +323,7 @@ static int __init r8a77965_cpg_mssr_init(struct device *dev)
}
return rcar_gen3_cpg_init(cpg_pll_config, CLK_EXTALR, cpg_mode);
-};
+}
const struct cpg_mssr_info r8a77965_cpg_mssr_info __initconst = {
/* Core Clocks */
diff --git a/drivers/clk/renesas/rcar-gen2-cpg.c b/drivers/clk/renesas/rcar-gen2-cpg.c
index f596a2dafcf4..d4fa3dc3e2a2 100644
--- a/drivers/clk/renesas/rcar-gen2-cpg.c
+++ b/drivers/clk/renesas/rcar-gen2-cpg.c
@@ -63,19 +63,22 @@ static unsigned long cpg_z_clk_recalc_rate(struct clk_hw *hw,
return div_u64((u64)parent_rate * mult, 32);
}
-static long cpg_z_clk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int cpg_z_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- unsigned long prate = *parent_rate;
- unsigned int mult;
+ unsigned long prate = req->best_parent_rate;
+ unsigned int min_mult, max_mult, mult;
- if (!prate)
- prate = 1;
+ min_mult = max(div64_ul(req->min_rate * 32ULL, prate), 1ULL);
+ max_mult = min(div64_ul(req->max_rate * 32ULL, prate), 32ULL);
+ if (max_mult < min_mult)
+ return -EINVAL;
- mult = div_u64((u64)rate * 32, prate);
- mult = clamp(mult, 1U, 32U);
+ mult = div64_ul(req->rate * 32ULL, prate);
+ mult = clamp(mult, min_mult, max_mult);
- return *parent_rate / 32 * mult;
+ req->rate = div_u64((u64)prate * mult, 32);
+ return 0;
}
static int cpg_z_clk_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -86,7 +89,7 @@ static int cpg_z_clk_set_rate(struct clk_hw *hw, unsigned long rate,
u32 val, kick;
unsigned int i;
- mult = div_u64((u64)rate * 32, parent_rate);
+ mult = div64_ul(rate * 32ULL, parent_rate);
mult = clamp(mult, 1U, 32U);
if (readl(zclk->kick_reg) & CPG_FRQCRB_KICK)
@@ -126,7 +129,7 @@ static int cpg_z_clk_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops cpg_z_clk_ops = {
.recalc_rate = cpg_z_clk_recalc_rate,
- .round_rate = cpg_z_clk_round_rate,
+ .determine_rate = cpg_z_clk_determine_rate,
.set_rate = cpg_z_clk_set_rate,
};
diff --git a/drivers/clk/renesas/rcar-gen3-cpg.c b/drivers/clk/renesas/rcar-gen3-cpg.c
index d25c8ba00a65..c97b647db9b6 100644
--- a/drivers/clk/renesas/rcar-gen3-cpg.c
+++ b/drivers/clk/renesas/rcar-gen3-cpg.c
@@ -114,18 +114,24 @@ static unsigned long cpg_z_clk_recalc_rate(struct clk_hw *hw,
32 * zclk->fixed_div);
}
-static long cpg_z_clk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int cpg_z_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct cpg_z_clk *zclk = to_z_clk(hw);
+ unsigned int min_mult, max_mult, mult;
unsigned long prate;
- unsigned int mult;
- prate = *parent_rate / zclk->fixed_div;
- mult = div_u64(rate * 32ULL, prate);
- mult = clamp(mult, 1U, 32U);
+ prate = req->best_parent_rate / zclk->fixed_div;
+ min_mult = max(div64_ul(req->min_rate * 32ULL, prate), 1ULL);
+ max_mult = min(div64_ul(req->max_rate * 32ULL, prate), 32ULL);
+ if (max_mult < min_mult)
+ return -EINVAL;
+
+ mult = div64_ul(req->rate * 32ULL, prate);
+ mult = clamp(mult, min_mult, max_mult);
- return (u64)prate * mult / 32;
+ req->rate = div_u64((u64)prate * mult, 32);
+ return 0;
}
static int cpg_z_clk_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -172,7 +178,7 @@ static int cpg_z_clk_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops cpg_z_clk_ops = {
.recalc_rate = cpg_z_clk_recalc_rate,
- .round_rate = cpg_z_clk_round_rate,
+ .determine_rate = cpg_z_clk_determine_rate,
.set_rate = cpg_z_clk_set_rate,
};
@@ -309,44 +315,44 @@ static unsigned long cpg_sd_clock_recalc_rate(struct clk_hw *hw,
clock->div_table[clock->cur_div_idx].div);
}
-static unsigned int cpg_sd_clock_calc_div(struct sd_clock *clock,
- unsigned long rate,
- unsigned long parent_rate)
+static int cpg_sd_clock_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- unsigned long calc_rate, diff, diff_min = ULONG_MAX;
- unsigned int i, best_div = 0;
+ unsigned long best_rate = ULONG_MAX, diff_min = ULONG_MAX;
+ struct sd_clock *clock = to_sd_clock(hw);
+ unsigned long calc_rate, diff;
+ unsigned int i;
for (i = 0; i < clock->div_num; i++) {
- calc_rate = DIV_ROUND_CLOSEST(parent_rate,
+ calc_rate = DIV_ROUND_CLOSEST(req->best_parent_rate,
clock->div_table[i].div);
- diff = calc_rate > rate ? calc_rate - rate : rate - calc_rate;
+ if (calc_rate < req->min_rate || calc_rate > req->max_rate)
+ continue;
+
+ diff = calc_rate > req->rate ? calc_rate - req->rate
+ : req->rate - calc_rate;
if (diff < diff_min) {
- best_div = clock->div_table[i].div;
+ best_rate = calc_rate;
diff_min = diff;
}
}
- return best_div;
-}
-
-static long cpg_sd_clock_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
-{
- struct sd_clock *clock = to_sd_clock(hw);
- unsigned int div = cpg_sd_clock_calc_div(clock, rate, *parent_rate);
+ if (best_rate == ULONG_MAX)
+ return -EINVAL;
- return DIV_ROUND_CLOSEST(*parent_rate, div);
+ req->rate = best_rate;
+ return 0;
}
static int cpg_sd_clock_set_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long parent_rate)
+ unsigned long parent_rate)
{
struct sd_clock *clock = to_sd_clock(hw);
- unsigned int div = cpg_sd_clock_calc_div(clock, rate, parent_rate);
unsigned int i;
for (i = 0; i < clock->div_num; i++)
- if (div == clock->div_table[i].div)
+ if (rate == DIV_ROUND_CLOSEST(parent_rate,
+ clock->div_table[i].div))
break;
if (i >= clock->div_num)
@@ -366,7 +372,7 @@ static const struct clk_ops cpg_sd_clock_ops = {
.disable = cpg_sd_clock_disable,
.is_enabled = cpg_sd_clock_is_enabled,
.recalc_rate = cpg_sd_clock_recalc_rate,
- .round_rate = cpg_sd_clock_round_rate,
+ .determine_rate = cpg_sd_clock_determine_rate,
.set_rate = cpg_sd_clock_set_rate,
};
diff --git a/drivers/clk/renesas/renesas-cpg-mssr.c b/drivers/clk/renesas/renesas-cpg-mssr.c
index 132cc96895e3..a2663fbbd7a5 100644
--- a/drivers/clk/renesas/renesas-cpg-mssr.c
+++ b/drivers/clk/renesas/renesas-cpg-mssr.c
@@ -702,6 +702,12 @@ static const struct of_device_id cpg_mssr_match[] = {
.data = &r8a774a1_cpg_mssr_info,
},
#endif
+#ifdef CONFIG_CLK_R8A774B1
+ {
+ .compatible = "renesas,r8a774b1-cpg-mssr",
+ .data = &r8a774b1_cpg_mssr_info,
+ },
+#endif
#ifdef CONFIG_CLK_R8A774C0
{
.compatible = "renesas,r8a774c0-cpg-mssr",
@@ -743,12 +749,18 @@ static const struct of_device_id cpg_mssr_match[] = {
.data = &r8a7795_cpg_mssr_info,
},
#endif
-#ifdef CONFIG_CLK_R8A7796
+#ifdef CONFIG_CLK_R8A77960
{
.compatible = "renesas,r8a7796-cpg-mssr",
.data = &r8a7796_cpg_mssr_info,
},
#endif
+#ifdef CONFIG_CLK_R8A77961
+ {
+ .compatible = "renesas,r8a77961-cpg-mssr",
+ .data = &r8a7796_cpg_mssr_info,
+ },
+#endif
#ifdef CONFIG_CLK_R8A77965
{
.compatible = "renesas,r8a77965-cpg-mssr",
diff --git a/drivers/clk/renesas/renesas-cpg-mssr.h b/drivers/clk/renesas/renesas-cpg-mssr.h
index 4ddcdf3bfb95..3b852ba0ecec 100644
--- a/drivers/clk/renesas/renesas-cpg-mssr.h
+++ b/drivers/clk/renesas/renesas-cpg-mssr.h
@@ -159,6 +159,7 @@ extern const struct cpg_mssr_info r8a7743_cpg_mssr_info;
extern const struct cpg_mssr_info r8a7745_cpg_mssr_info;
extern const struct cpg_mssr_info r8a77470_cpg_mssr_info;
extern const struct cpg_mssr_info r8a774a1_cpg_mssr_info;
+extern const struct cpg_mssr_info r8a774b1_cpg_mssr_info;
extern const struct cpg_mssr_info r8a774c0_cpg_mssr_info;
extern const struct cpg_mssr_info r8a7790_cpg_mssr_info;
extern const struct cpg_mssr_info r8a7791_cpg_mssr_info;
diff --git a/drivers/clk/rockchip/clk-half-divider.c b/drivers/clk/rockchip/clk-half-divider.c
index ba9f00dc9740..b333fc28c94b 100644
--- a/drivers/clk/rockchip/clk-half-divider.c
+++ b/drivers/clk/rockchip/clk-half-divider.c
@@ -139,12 +139,11 @@ static int clk_half_divider_set_rate(struct clk_hw *hw, unsigned long rate,
return 0;
}
-const struct clk_ops clk_half_divider_ops = {
+static const struct clk_ops clk_half_divider_ops = {
.recalc_rate = clk_half_divider_recalc_rate,
.round_rate = clk_half_divider_round_rate,
.set_rate = clk_half_divider_set_rate,
};
-EXPORT_SYMBOL_GPL(clk_half_divider_ops);
/**
* Register a clock branch.
diff --git a/drivers/clk/rockchip/clk-px30.c b/drivers/clk/rockchip/clk-px30.c
index 3a501896b280..6fb9c98b7d24 100644
--- a/drivers/clk/rockchip/clk-px30.c
+++ b/drivers/clk/rockchip/clk-px30.c
@@ -167,6 +167,10 @@ PNAME(mux_uart5_p) = { "clk_uart5_src", "clk_uart5_np5", "clk_uart5_frac" };
PNAME(mux_cif_out_p) = { "xin24m", "dummy_cpll", "npll", "usb480m" };
PNAME(mux_dclk_vopb_p) = { "dclk_vopb_src", "dclk_vopb_frac", "xin24m" };
PNAME(mux_dclk_vopl_p) = { "dclk_vopl_src", "dclk_vopl_frac", "xin24m" };
+PNAME(mux_nandc_p) = { "clk_nandc_div", "clk_nandc_div50" };
+PNAME(mux_sdio_p) = { "clk_sdio_div", "clk_sdio_div50" };
+PNAME(mux_emmc_p) = { "clk_emmc_div", "clk_emmc_div50" };
+PNAME(mux_sdmmc_p) = { "clk_sdmmc_div", "clk_sdmmc_div50" };
PNAME(mux_gmac_p) = { "clk_gmac_src", "gmac_clkin" };
PNAME(mux_gmac_rmii_sel_p) = { "clk_gmac_rx_tx_div20", "clk_gmac_rx_tx_div2" };
PNAME(mux_rtc32k_pmu_p) = { "xin32k", "pmu_pvtm_32k", "clk_rtc32k_frac", };
@@ -460,16 +464,40 @@ static struct rockchip_clk_branch px30_clk_branches[] __initdata = {
/* PD_MMC_NAND */
GATE(HCLK_MMC_NAND, "hclk_mmc_nand", "hclk_peri_pre", 0,
PX30_CLKGATE_CON(6), 0, GFLAGS),
- COMPOSITE(SCLK_NANDC, "clk_nandc", mux_gpll_cpll_npll_p, 0,
+ COMPOSITE(SCLK_NANDC_DIV, "clk_nandc_div", mux_gpll_cpll_npll_p, 0,
PX30_CLKSEL_CON(15), 6, 2, MFLAGS, 0, 5, DFLAGS,
+ PX30_CLKGATE_CON(5), 11, GFLAGS),
+ COMPOSITE(SCLK_NANDC_DIV50, "clk_nandc_div50", mux_gpll_cpll_npll_p, 0,
+ PX30_CLKSEL_CON(15), 6, 2, MFLAGS, 8, 5, DFLAGS,
+ PX30_CLKGATE_CON(5), 12, GFLAGS),
+ COMPOSITE_NODIV(SCLK_NANDC, "clk_nandc", mux_nandc_p,
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+ PX30_CLKSEL_CON(15), 15, 1, MFLAGS,
PX30_CLKGATE_CON(5), 13, GFLAGS),
- COMPOSITE(SCLK_SDIO, "clk_sdio", mux_gpll_cpll_npll_xin24m_p, 0,
+ COMPOSITE(SCLK_SDIO_DIV, "clk_sdio_div", mux_gpll_cpll_npll_xin24m_p, 0,
PX30_CLKSEL_CON(18), 14, 2, MFLAGS, 0, 8, DFLAGS,
+ PX30_CLKGATE_CON(6), 1, GFLAGS),
+ COMPOSITE_DIV_OFFSET(SCLK_SDIO_DIV50, "clk_sdio_div50",
+ mux_gpll_cpll_npll_xin24m_p, 0,
+ PX30_CLKSEL_CON(18), 14, 2, MFLAGS,
+ PX30_CLKSEL_CON(19), 0, 8, DFLAGS,
+ PX30_CLKGATE_CON(6), 2, GFLAGS),
+ COMPOSITE_NODIV(SCLK_SDIO, "clk_sdio", mux_sdio_p,
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+ PX30_CLKSEL_CON(19), 15, 1, MFLAGS,
PX30_CLKGATE_CON(6), 3, GFLAGS),
- COMPOSITE(SCLK_EMMC, "clk_emmc", mux_gpll_cpll_npll_xin24m_p, 0,
+ COMPOSITE(SCLK_EMMC_DIV, "clk_emmc_div", mux_gpll_cpll_npll_xin24m_p, 0,
PX30_CLKSEL_CON(20), 14, 2, MFLAGS, 0, 8, DFLAGS,
+ PX30_CLKGATE_CON(6), 4, GFLAGS),
+ COMPOSITE_DIV_OFFSET(SCLK_EMMC_DIV50, "clk_emmc_div50", mux_gpll_cpll_npll_xin24m_p, 0,
+ PX30_CLKSEL_CON(20), 14, 2, MFLAGS,
+ PX30_CLKSEL_CON(21), 0, 8, DFLAGS,
+ PX30_CLKGATE_CON(6), 5, GFLAGS),
+ COMPOSITE_NODIV(SCLK_EMMC, "clk_emmc", mux_emmc_p,
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+ PX30_CLKSEL_CON(21), 15, 1, MFLAGS,
PX30_CLKGATE_CON(6), 6, GFLAGS),
COMPOSITE(SCLK_SFC, "clk_sfc", mux_gpll_cpll_p, 0,
@@ -494,8 +522,16 @@ static struct rockchip_clk_branch px30_clk_branches[] __initdata = {
/* PD_SDCARD */
GATE(0, "hclk_sdmmc_pre", "hclk_peri_pre", 0,
PX30_CLKGATE_CON(6), 12, GFLAGS),
- COMPOSITE(SCLK_SDMMC, "clk_sdmmc", mux_gpll_cpll_npll_xin24m_p, 0,
+ COMPOSITE(SCLK_SDMMC_DIV, "clk_sdmmc_div", mux_gpll_cpll_npll_xin24m_p, 0,
PX30_CLKSEL_CON(16), 14, 2, MFLAGS, 0, 8, DFLAGS,
+ PX30_CLKGATE_CON(6), 13, GFLAGS),
+ COMPOSITE_DIV_OFFSET(SCLK_SDMMC_DIV50, "clk_sdmmc_div50", mux_gpll_cpll_npll_xin24m_p, 0,
+ PX30_CLKSEL_CON(16), 14, 2, MFLAGS,
+ PX30_CLKSEL_CON(17), 0, 8, DFLAGS,
+ PX30_CLKGATE_CON(6), 14, GFLAGS),
+ COMPOSITE_NODIV(SCLK_SDMMC, "clk_sdmmc", mux_sdmmc_p,
+ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+ PX30_CLKSEL_CON(17), 15, 1, MFLAGS,
PX30_CLKGATE_CON(6), 15, GFLAGS),
/* PD_USB */
@@ -763,29 +799,29 @@ static struct rockchip_clk_branch px30_clk_branches[] __initdata = {
GATE(0, "pclk_ddrphy", "pclk_top_pre", CLK_IGNORE_UNUSED, PX30_CLKGATE_CON(16), 3, GFLAGS),
GATE(PCLK_MIPIDSIPHY, "pclk_mipidsiphy", "pclk_top_pre", 0, PX30_CLKGATE_CON(16), 4, GFLAGS),
GATE(PCLK_MIPICSIPHY, "pclk_mipicsiphy", "pclk_top_pre", 0, PX30_CLKGATE_CON(16), 5, GFLAGS),
- GATE(PCLK_USB_GRF, "pclk_usb_grf", "pclk_top_pre", CLK_IGNORE_UNUSED, PX30_CLKGATE_CON(16), 6, GFLAGS),
+ GATE(PCLK_USB_GRF, "pclk_usb_grf", "pclk_top_pre", 0, PX30_CLKGATE_CON(16), 6, GFLAGS),
GATE(0, "pclk_cpu_hoost", "pclk_top_pre", CLK_IGNORE_UNUSED, PX30_CLKGATE_CON(16), 7, GFLAGS),
/* PD_VI */
- GATE(0, "aclk_vi_niu", "aclk_vi_pre", CLK_IGNORE_UNUSED, PX30_CLKGATE_CON(4), 15, GFLAGS),
+ GATE(0, "aclk_vi_niu", "aclk_vi_pre", 0, PX30_CLKGATE_CON(4), 15, GFLAGS),
GATE(ACLK_CIF, "aclk_cif", "aclk_vi_pre", 0, PX30_CLKGATE_CON(5), 1, GFLAGS),
GATE(ACLK_ISP, "aclk_isp", "aclk_vi_pre", 0, PX30_CLKGATE_CON(5), 3, GFLAGS),
- GATE(0, "hclk_vi_niu", "hclk_vi_pre", CLK_IGNORE_UNUSED, PX30_CLKGATE_CON(5), 0, GFLAGS),
+ GATE(0, "hclk_vi_niu", "hclk_vi_pre", 0, PX30_CLKGATE_CON(5), 0, GFLAGS),
GATE(HCLK_CIF, "hclk_cif", "hclk_vi_pre", 0, PX30_CLKGATE_CON(5), 2, GFLAGS),
GATE(HCLK_ISP, "hclk_isp", "hclk_vi_pre", 0, PX30_CLKGATE_CON(5), 4, GFLAGS),
/* PD_VO */
- GATE(0, "aclk_vo_niu", "aclk_vo_pre", CLK_IGNORE_UNUSED, PX30_CLKGATE_CON(3), 0, GFLAGS),
+ GATE(0, "aclk_vo_niu", "aclk_vo_pre", 0, PX30_CLKGATE_CON(3), 0, GFLAGS),
GATE(ACLK_VOPB, "aclk_vopb", "aclk_vo_pre", 0, PX30_CLKGATE_CON(3), 3, GFLAGS),
GATE(ACLK_RGA, "aclk_rga", "aclk_vo_pre", 0, PX30_CLKGATE_CON(3), 7, GFLAGS),
GATE(ACLK_VOPL, "aclk_vopl", "aclk_vo_pre", 0, PX30_CLKGATE_CON(3), 5, GFLAGS),
- GATE(0, "hclk_vo_niu", "hclk_vo_pre", CLK_IGNORE_UNUSED, PX30_CLKGATE_CON(3), 1, GFLAGS),
+ GATE(0, "hclk_vo_niu", "hclk_vo_pre", 0, PX30_CLKGATE_CON(3), 1, GFLAGS),
GATE(HCLK_VOPB, "hclk_vopb", "hclk_vo_pre", 0, PX30_CLKGATE_CON(3), 4, GFLAGS),
GATE(HCLK_RGA, "hclk_rga", "hclk_vo_pre", 0, PX30_CLKGATE_CON(3), 8, GFLAGS),
GATE(HCLK_VOPL, "hclk_vopl", "hclk_vo_pre", 0, PX30_CLKGATE_CON(3), 6, GFLAGS),
- GATE(0, "pclk_vo_niu", "pclk_vo_pre", CLK_IGNORE_UNUSED, PX30_CLKGATE_CON(3), 2, GFLAGS),
+ GATE(0, "pclk_vo_niu", "pclk_vo_pre", 0, PX30_CLKGATE_CON(3), 2, GFLAGS),
GATE(PCLK_MIPI_DSI, "pclk_mipi_dsi", "pclk_vo_pre", 0, PX30_CLKGATE_CON(3), 9, GFLAGS),
/* PD_BUS */
@@ -940,7 +976,7 @@ static struct rockchip_clk_branch px30_clk_pmu_branches[] __initdata = {
GATE(0, "pclk_cru_pmu", "pclk_pmu_pre", CLK_IGNORE_UNUSED, PX30_PMU_CLKGATE_CON(0), 8, GFLAGS),
};
-static const char *const px30_pmucru_critical_clocks[] __initconst = {
+static const char *const px30_cru_critical_clocks[] __initconst = {
"aclk_bus_pre",
"pclk_bus_pre",
"hclk_bus_pre",
@@ -950,10 +986,16 @@ static const char *const px30_pmucru_critical_clocks[] __initconst = {
"pclk_top_pre",
"pclk_pmu_pre",
"hclk_usb_niu",
+ "pclk_vo_niu",
+ "aclk_vo_niu",
+ "hclk_vo_niu",
+ "aclk_vi_niu",
+ "hclk_vi_niu",
"pll_npll",
"usb480m",
"clk_uart2",
"pclk_uart2",
+ "pclk_usb_grf",
};
static void __init px30_clk_init(struct device_node *np)
@@ -985,6 +1027,9 @@ static void __init px30_clk_init(struct device_node *np)
&px30_cpuclk_data, px30_cpuclk_rates,
ARRAY_SIZE(px30_cpuclk_rates));
+ rockchip_clk_protect_critical(px30_cru_critical_clocks,
+ ARRAY_SIZE(px30_cru_critical_clocks));
+
rockchip_register_softrst(np, 12, reg_base + PX30_SOFTRST_CON(0),
ROCKCHIP_SOFTRST_HIWORD_MASK);
@@ -1017,9 +1062,6 @@ static void __init px30_pmu_clk_init(struct device_node *np)
rockchip_clk_register_branches(ctx, px30_clk_pmu_branches,
ARRAY_SIZE(px30_clk_pmu_branches));
- rockchip_clk_protect_critical(px30_pmucru_critical_clocks,
- ARRAY_SIZE(px30_pmucru_critical_clocks));
-
rockchip_clk_of_add_provider(np, ctx);
}
CLK_OF_DECLARE(px30_cru_pmu, "rockchip,px30-pmucru", px30_pmu_clk_init);
diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c
index 31466cd1842f..3a991ca1ee36 100644
--- a/drivers/clk/samsung/clk-exynos5420.c
+++ b/drivers/clk/samsung/clk-exynos5420.c
@@ -165,6 +165,8 @@ static const unsigned long exynos5x_clk_regs[] __initconst = {
GATE_BUS_CPU,
GATE_SCLK_CPU,
CLKOUT_CMU_CPU,
+ APLL_CON0,
+ KPLL_CON0,
CPLL_CON0,
DPLL_CON0,
EPLL_CON0,
@@ -611,7 +613,8 @@ static const struct samsung_mux_clock exynos5x_mux_clks[] __initconst = {
MUX(0, "mout_aclk66", mout_group1_p, SRC_TOP1, 8, 2),
MUX(0, "mout_aclk166", mout_group1_p, SRC_TOP1, 24, 2),
- MUX(0, "mout_aclk_g3d", mout_group5_p, SRC_TOP2, 16, 1),
+ MUX_F(0, "mout_aclk_g3d", mout_group5_p, SRC_TOP2, 16, 1,
+ CLK_SET_RATE_PARENT, 0),
MUX(0, "mout_user_aclk400_isp", mout_user_aclk400_isp_p,
SRC_TOP3, 0, 1),
@@ -653,8 +656,8 @@ static const struct samsung_mux_clock exynos5x_mux_clks[] __initconst = {
SRC_TOP5, 8, 1),
MUX(0, "mout_user_aclk266_g2d", mout_user_aclk266_g2d_p,
SRC_TOP5, 12, 1),
- MUX(CLK_MOUT_G3D, "mout_user_aclk_g3d", mout_user_aclk_g3d_p,
- SRC_TOP5, 16, 1),
+ MUX_F(CLK_MOUT_G3D, "mout_user_aclk_g3d", mout_user_aclk_g3d_p,
+ SRC_TOP5, 16, 1, CLK_SET_RATE_PARENT, 0),
MUX(0, "mout_user_aclk300_jpeg", mout_user_aclk300_jpeg_p,
SRC_TOP5, 20, 1),
MUX(CLK_MOUT_USER_ACLK300_DISP1, "mout_user_aclk300_disp1",
@@ -663,7 +666,8 @@ static const struct samsung_mux_clock exynos5x_mux_clks[] __initconst = {
mout_user_aclk300_gscl_p, SRC_TOP5, 28, 1),
MUX(0, "mout_sclk_mpll", mout_mpll_p, SRC_TOP6, 0, 1),
- MUX(CLK_MOUT_VPLL, "mout_sclk_vpll", mout_vpll_p, SRC_TOP6, 4, 1),
+ MUX_F(CLK_MOUT_VPLL, "mout_sclk_vpll", mout_vpll_p, SRC_TOP6, 4, 1,
+ CLK_SET_RATE_PARENT, 0),
MUX(CLK_MOUT_SCLK_SPLL, "mout_sclk_spll", mout_spll_p, SRC_TOP6, 8, 1),
MUX(0, "mout_sclk_ipll", mout_ipll_p, SRC_TOP6, 12, 1),
MUX(0, "mout_sclk_rpll", mout_rpll_p, SRC_TOP6, 16, 1),
@@ -707,7 +711,8 @@ static const struct samsung_mux_clock exynos5x_mux_clks[] __initconst = {
SRC_TOP12, 8, 1),
MUX(0, "mout_sw_aclk266_g2d", mout_sw_aclk266_g2d_p,
SRC_TOP12, 12, 1),
- MUX(0, "mout_sw_aclk_g3d", mout_sw_aclk_g3d_p, SRC_TOP12, 16, 1),
+ MUX_F(0, "mout_sw_aclk_g3d", mout_sw_aclk_g3d_p, SRC_TOP12, 16, 1,
+ CLK_SET_RATE_PARENT, 0),
MUX(0, "mout_sw_aclk300_jpeg", mout_sw_aclk300_jpeg_p,
SRC_TOP12, 20, 1),
MUX(CLK_MOUT_SW_ACLK300, "mout_sw_aclk300_disp1",
@@ -804,8 +809,8 @@ static const struct samsung_div_clock exynos5x_div_clks[] __initconst = {
DIV_TOP2, 8, 3),
DIV(CLK_DOUT_ACLK266_G2D, "dout_aclk266_g2d", "mout_aclk266_g2d",
DIV_TOP2, 12, 3),
- DIV(CLK_DOUT_ACLK_G3D, "dout_aclk_g3d", "mout_aclk_g3d", DIV_TOP2,
- 16, 3),
+ DIV_F(CLK_DOUT_ACLK_G3D, "dout_aclk_g3d", "mout_aclk_g3d", DIV_TOP2,
+ 16, 3, CLK_SET_RATE_PARENT, 0),
DIV(CLK_DOUT_ACLK300_JPEG, "dout_aclk300_jpeg", "mout_aclk300_jpeg",
DIV_TOP2, 20, 3),
DIV(CLK_DOUT_ACLK300_DISP1, "dout_aclk300_disp1",
@@ -1253,7 +1258,8 @@ static struct exynos5_subcmu_reg_dump exynos5x_gsc_suspend_regs[] = {
};
static const struct samsung_gate_clock exynos5x_g3d_gate_clks[] __initconst = {
- GATE(CLK_G3D, "g3d", "mout_user_aclk_g3d", GATE_IP_G3D, 9, 0, 0),
+ GATE(CLK_G3D, "g3d", "mout_user_aclk_g3d", GATE_IP_G3D, 9,
+ CLK_SET_RATE_PARENT, 0),
};
static struct exynos5_subcmu_reg_dump exynos5x_g3d_suspend_regs[] = {
@@ -1437,6 +1443,17 @@ static const struct samsung_pll_rate_table exynos5420_epll_24mhz_tbl[] = {
PLL_36XX_RATE(24 * MHZ, 32768001U, 131, 3, 5, 4719),
};
+static const struct samsung_pll_rate_table exynos5420_vpll_24mhz_tbl[] = {
+ PLL_35XX_RATE(24 * MHZ, 600000000U, 200, 2, 2),
+ PLL_35XX_RATE(24 * MHZ, 543000000U, 181, 2, 2),
+ PLL_35XX_RATE(24 * MHZ, 480000000U, 160, 2, 2),
+ PLL_35XX_RATE(24 * MHZ, 420000000U, 140, 2, 2),
+ PLL_35XX_RATE(24 * MHZ, 350000000U, 175, 3, 2),
+ PLL_35XX_RATE(24 * MHZ, 266000000U, 266, 3, 3),
+ PLL_35XX_RATE(24 * MHZ, 177000000U, 118, 2, 3),
+ PLL_35XX_RATE(24 * MHZ, 100000000U, 200, 3, 4),
+};
+
static struct samsung_pll_clock exynos5x_plls[nr_plls] __initdata = {
[apll] = PLL(pll_2550, CLK_FOUT_APLL, "fout_apll", "fin_pll", APLL_LOCK,
APLL_CON0, NULL),
@@ -1561,6 +1578,7 @@ static void __init exynos5x_clk_init(struct device_node *np,
exynos5x_plls[apll].rate_table = exynos5420_pll2550x_24mhz_tbl;
exynos5x_plls[epll].rate_table = exynos5420_epll_24mhz_tbl;
exynos5x_plls[kpll].rate_table = exynos5420_pll2550x_24mhz_tbl;
+ exynos5x_plls[vpll].rate_table = exynos5420_vpll_24mhz_tbl;
}
if (soc == EXYNOS5420)
diff --git a/drivers/clk/samsung/clk-s3c2410-dclk.c b/drivers/clk/samsung/clk-s3c2410-dclk.c
index 1281672cb00e..7dad9098e897 100644
--- a/drivers/clk/samsung/clk-s3c2410-dclk.c
+++ b/drivers/clk/samsung/clk-s3c2410-dclk.c
@@ -238,7 +238,6 @@ static SIMPLE_DEV_PM_OPS(s3c24xx_dclk_pm_ops,
static int s3c24xx_dclk_probe(struct platform_device *pdev)
{
struct s3c24xx_dclk *s3c24xx_dclk;
- struct resource *mem;
struct s3c24xx_dclk_drv_data *dclk_variant;
struct clk_hw **clk_table;
int ret, i;
@@ -257,8 +256,7 @@ static int s3c24xx_dclk_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, s3c24xx_dclk);
spin_lock_init(&s3c24xx_dclk->dclk_lock);
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- s3c24xx_dclk->base = devm_ioremap_resource(&pdev->dev, mem);
+ s3c24xx_dclk->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(s3c24xx_dclk->base))
return PTR_ERR(s3c24xx_dclk->base);
diff --git a/drivers/clk/samsung/clk.c b/drivers/clk/samsung/clk.c
index e544a38106dd..dad31308c071 100644
--- a/drivers/clk/samsung/clk.c
+++ b/drivers/clk/samsung/clk.c
@@ -60,8 +60,7 @@ struct samsung_clk_provider *__init samsung_clk_init(struct device_node *np,
struct samsung_clk_provider *ctx;
int i;
- ctx = kzalloc(sizeof(struct samsung_clk_provider) +
- sizeof(*ctx->clk_data.hws) * nr_clks, GFP_KERNEL);
+ ctx = kzalloc(struct_size(ctx, clk_data.hws, nr_clks), GFP_KERNEL);
if (!ctx)
panic("could not allocate clock provider context.\n");
diff --git a/drivers/clk/sprd/common.c b/drivers/clk/sprd/common.c
index 9d56eac43832..c0af4779892b 100644
--- a/drivers/clk/sprd/common.c
+++ b/drivers/clk/sprd/common.c
@@ -42,17 +42,15 @@ int sprd_clk_regmap_init(struct platform_device *pdev,
void __iomem *base;
struct device_node *node = pdev->dev.of_node;
struct regmap *regmap;
- struct resource *res;
if (of_find_property(node, "sprd,syscon", NULL)) {
regmap = syscon_regmap_lookup_by_phandle(node, "sprd,syscon");
- if (IS_ERR_OR_NULL(regmap)) {
+ if (IS_ERR(regmap)) {
pr_err("%s: failed to get syscon regmap\n", __func__);
return PTR_ERR(regmap);
}
} else {
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h6.c b/drivers/clk/sunxi-ng/ccu-sun50i-h6.c
index d89353a3cdec..f2497d0a4683 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-h6.c
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-h6.c
@@ -203,12 +203,21 @@ static struct ccu_nkmp pll_hsic_clk = {
* hardcode it to match with the clock names.
*/
#define SUN50I_H6_PLL_AUDIO_REG 0x078
+
+static struct ccu_sdm_setting pll_audio_sdm_table[] = {
+ { .rate = 541900800, .pattern = 0xc001288d, .m = 1, .n = 22 },
+ { .rate = 589824000, .pattern = 0xc00126e9, .m = 1, .n = 24 },
+};
+
static struct ccu_nm pll_audio_base_clk = {
.enable = BIT(31),
.lock = BIT(28),
.n = _SUNXI_CCU_MULT_MIN(8, 8, 12),
.m = _SUNXI_CCU_DIV(1, 1), /* input divider */
+ .sdm = _SUNXI_CCU_SDM(pll_audio_sdm_table,
+ BIT(24), 0x178, BIT(31)),
.common = {
+ .features = CCU_FEATURE_SIGMA_DELTA_MOD,
.reg = 0x078,
.hw.init = CLK_HW_INIT("pll-audio-base", "osc24M",
&ccu_nm_ops,
@@ -290,7 +299,7 @@ static SUNXI_CCU_M_WITH_MUX_GATE(gpu_clk, "gpu", gpu_parents, 0x670,
0, 3, /* M */
24, 1, /* mux */
BIT(31), /* gate */
- 0);
+ CLK_SET_RATE_PARENT);
static SUNXI_CCU_GATE(bus_gpu_clk, "bus-gpu", "psi-ahb1-ahb2",
0x67c, BIT(0), 0);
@@ -753,12 +762,12 @@ static const struct clk_hw *clk_parent_pll_audio[] = {
};
/*
- * The divider of pll-audio is fixed to 8 now, as pll-audio-4x has a
- * fixed post-divider 2.
+ * The divider of pll-audio is fixed to 24 for now, so 24576000 and 22579200
+ * rates can be set exactly in conjunction with sigma-delta modulation.
*/
static CLK_FIXED_FACTOR_HWS(pll_audio_clk, "pll-audio",
clk_parent_pll_audio,
- 8, 1, CLK_SET_RATE_PARENT);
+ 24, 1, CLK_SET_RATE_PARENT);
static CLK_FIXED_FACTOR_HWS(pll_audio_2x_clk, "pll-audio-2x",
clk_parent_pll_audio,
4, 1, CLK_SET_RATE_PARENT);
@@ -1215,12 +1224,12 @@ static int sun50i_h6_ccu_probe(struct platform_device *pdev)
}
/*
- * Force the post-divider of pll-audio to 8 and the output divider
- * of it to 1, to make the clock name represents the real frequency.
+ * Force the post-divider of pll-audio to 12 and the output divider
+ * of it to 2, so 24576000 and 22579200 rates can be set exactly.
*/
val = readl(reg + SUN50I_H6_PLL_AUDIO_REG);
val &= ~(GENMASK(21, 16) | BIT(0));
- writel(val | (7 << 16), reg + SUN50I_H6_PLL_AUDIO_REG);
+ writel(val | (11 << 16) | BIT(0), reg + SUN50I_H6_PLL_AUDIO_REG);
/*
* First clock parent (osc32K) is unusable for CEC. But since there
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-h3.h b/drivers/clk/sunxi-ng/ccu-sun8i-h3.h
index b6e2680ef354..d8c38447e11b 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-h3.h
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-h3.h
@@ -48,10 +48,6 @@
/* Some more module clocks are exported */
-#define CLK_MBUS 113
-
-/* And the GPU module clock is exported */
-
#define CLK_NUMBER_H3 (CLK_GPU + 1)
#define CLK_NUMBER_H5 (CLK_BUS_SCR1 + 1)
diff --git a/drivers/clk/tegra/Makefile b/drivers/clk/tegra/Makefile
index 4812e45c2214..df966ca06788 100644
--- a/drivers/clk/tegra/Makefile
+++ b/drivers/clk/tegra/Makefile
@@ -17,7 +17,9 @@ obj-y += clk-tegra-fixed.o
obj-y += clk-tegra-super-gen4.o
obj-$(CONFIG_TEGRA_CLK_EMC) += clk-emc.o
obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += clk-tegra20.o
+obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += clk-tegra20-emc.o
obj-$(CONFIG_ARCH_TEGRA_3x_SOC) += clk-tegra30.o
+obj-$(CONFIG_ARCH_TEGRA_3x_SOC) += clk-tegra20-emc.o
obj-$(CONFIG_ARCH_TEGRA_114_SOC) += clk-tegra114.o
obj-$(CONFIG_ARCH_TEGRA_124_SOC) += clk-tegra124.o
obj-$(CONFIG_TEGRA_CLK_DFLL) += clk-tegra124-dfll-fcpu.o
diff --git a/drivers/clk/tegra/clk-dfll.c b/drivers/clk/tegra/clk-dfll.c
index f8688c2ddf1a..c051d92c2bbf 100644
--- a/drivers/clk/tegra/clk-dfll.c
+++ b/drivers/clk/tegra/clk-dfll.c
@@ -1487,6 +1487,7 @@ static int dfll_init(struct tegra_dfll *td)
td->last_unrounded_rate = 0;
pm_runtime_enable(td->dev);
+ pm_runtime_irq_safe(td->dev);
pm_runtime_get_sync(td->dev);
dfll_set_mode(td, DFLL_DISABLED);
@@ -1513,6 +1514,61 @@ di_err1:
return ret;
}
+/**
+ * tegra_dfll_suspend - check DFLL is disabled
+ * @dev: DFLL device *
+ *
+ * DFLL clock should be disabled by the CPUFreq driver. So, make
+ * sure it is disabled and disable all clocks needed by the DFLL.
+ */
+int tegra_dfll_suspend(struct device *dev)
+{
+ struct tegra_dfll *td = dev_get_drvdata(dev);
+
+ if (dfll_is_running(td)) {
+ dev_err(td->dev, "DFLL still enabled while suspending\n");
+ return -EBUSY;
+ }
+
+ reset_control_assert(td->dvco_rst);
+
+ return 0;
+}
+EXPORT_SYMBOL(tegra_dfll_suspend);
+
+/**
+ * tegra_dfll_resume - reinitialize DFLL on resume
+ * @dev: DFLL instance
+ *
+ * DFLL is disabled and reset during suspend and resume.
+ * So, reinitialize the DFLL IP block back for use.
+ * DFLL clock is enabled later in closed loop mode by CPUFreq
+ * driver before switching its clock source to DFLL output.
+ */
+int tegra_dfll_resume(struct device *dev)
+{
+ struct tegra_dfll *td = dev_get_drvdata(dev);
+
+ reset_control_deassert(td->dvco_rst);
+
+ pm_runtime_get_sync(td->dev);
+
+ dfll_set_mode(td, DFLL_DISABLED);
+ dfll_set_default_params(td);
+
+ if (td->soc->init_clock_trimmers)
+ td->soc->init_clock_trimmers();
+
+ dfll_set_open_loop_config(td);
+
+ dfll_init_out_if(td);
+
+ pm_runtime_put_sync(td->dev);
+
+ return 0;
+}
+EXPORT_SYMBOL(tegra_dfll_resume);
+
/*
* DT data fetch
*/
diff --git a/drivers/clk/tegra/clk-dfll.h b/drivers/clk/tegra/clk-dfll.h
index 1b14ebe7268b..fb209eb5f365 100644
--- a/drivers/clk/tegra/clk-dfll.h
+++ b/drivers/clk/tegra/clk-dfll.h
@@ -42,5 +42,7 @@ int tegra_dfll_register(struct platform_device *pdev,
struct tegra_dfll_soc_data *tegra_dfll_unregister(struct platform_device *pdev);
int tegra_dfll_runtime_suspend(struct device *dev);
int tegra_dfll_runtime_resume(struct device *dev);
+int tegra_dfll_suspend(struct device *dev);
+int tegra_dfll_resume(struct device *dev);
#endif /* __DRIVERS_CLK_TEGRA_CLK_DFLL_H */
diff --git a/drivers/clk/tegra/clk-divider.c b/drivers/clk/tegra/clk-divider.c
index e76731fb7d69..ca0de5f11f84 100644
--- a/drivers/clk/tegra/clk-divider.c
+++ b/drivers/clk/tegra/clk-divider.c
@@ -109,10 +109,21 @@ static int clk_frac_div_set_rate(struct clk_hw *hw, unsigned long rate,
return 0;
}
+static void clk_divider_restore_context(struct clk_hw *hw)
+{
+ struct clk_hw *parent = clk_hw_get_parent(hw);
+ unsigned long parent_rate = clk_hw_get_rate(parent);
+ unsigned long rate = clk_hw_get_rate(hw);
+
+ if (clk_frac_div_set_rate(hw, rate, parent_rate) < 0)
+ WARN_ON(1);
+}
+
const struct clk_ops tegra_clk_frac_div_ops = {
.recalc_rate = clk_frac_div_recalc_rate,
.set_rate = clk_frac_div_set_rate,
.round_rate = clk_frac_div_round_rate,
+ .restore_context = clk_divider_restore_context,
};
struct clk *tegra_clk_register_divider(const char *name,
diff --git a/drivers/clk/tegra/clk-emc.c b/drivers/clk/tegra/clk-emc.c
index ea39caf3d762..745f9faa98d8 100644
--- a/drivers/clk/tegra/clk-emc.c
+++ b/drivers/clk/tegra/clk-emc.c
@@ -403,20 +403,16 @@ static int load_one_timing_from_dt(struct tegra_clk_emc *tegra,
}
timing->parent_index = 0xff;
- for (i = 0; i < ARRAY_SIZE(emc_parent_clk_names); i++) {
- if (!strcmp(emc_parent_clk_names[i],
- __clk_get_name(timing->parent))) {
- timing->parent_index = i;
- break;
- }
- }
- if (timing->parent_index == 0xff) {
+ i = match_string(emc_parent_clk_names, ARRAY_SIZE(emc_parent_clk_names),
+ __clk_get_name(timing->parent));
+ if (i < 0) {
pr_err("timing %pOF: %s is not a valid parent\n",
node, __clk_get_name(timing->parent));
clk_put(timing->parent);
return -EINVAL;
}
+ timing->parent_index = i;
return 0;
}
diff --git a/drivers/clk/tegra/clk-id.h b/drivers/clk/tegra/clk-id.h
index de466b4446da..c4faebd32760 100644
--- a/drivers/clk/tegra/clk-id.h
+++ b/drivers/clk/tegra/clk-id.h
@@ -236,9 +236,9 @@ enum clk_id {
tegra_clk_soc_therm,
tegra_clk_soc_therm_8,
tegra_clk_sor0,
- tegra_clk_sor0_lvds,
+ tegra_clk_sor0_out,
tegra_clk_sor1,
- tegra_clk_sor1_src,
+ tegra_clk_sor1_out,
tegra_clk_spdif,
tegra_clk_spdif_2x,
tegra_clk_spdif_in,
diff --git a/drivers/clk/tegra/clk-periph.c b/drivers/clk/tegra/clk-periph.c
index 58437da25156..67620c7ecd9e 100644
--- a/drivers/clk/tegra/clk-periph.c
+++ b/drivers/clk/tegra/clk-periph.c
@@ -3,6 +3,7 @@
* Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
*/
+#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/export.h>
#include <linux/slab.h>
@@ -99,6 +100,23 @@ static void clk_periph_disable(struct clk_hw *hw)
gate_ops->disable(gate_hw);
}
+static void clk_periph_restore_context(struct clk_hw *hw)
+{
+ struct tegra_clk_periph *periph = to_clk_periph(hw);
+ const struct clk_ops *div_ops = periph->div_ops;
+ struct clk_hw *div_hw = &periph->divider.hw;
+ int parent_id;
+
+ parent_id = clk_hw_get_parent_index(hw);
+ if (WARN_ON(parent_id < 0))
+ return;
+
+ if (!(periph->gate.flags & TEGRA_PERIPH_NO_DIV))
+ div_ops->restore_context(div_hw);
+
+ clk_periph_set_parent(hw, parent_id);
+}
+
const struct clk_ops tegra_clk_periph_ops = {
.get_parent = clk_periph_get_parent,
.set_parent = clk_periph_set_parent,
@@ -108,6 +126,7 @@ const struct clk_ops tegra_clk_periph_ops = {
.is_enabled = clk_periph_is_enabled,
.enable = clk_periph_enable,
.disable = clk_periph_disable,
+ .restore_context = clk_periph_restore_context,
};
static const struct clk_ops tegra_clk_periph_nodiv_ops = {
@@ -116,6 +135,7 @@ static const struct clk_ops tegra_clk_periph_nodiv_ops = {
.is_enabled = clk_periph_is_enabled,
.enable = clk_periph_enable,
.disable = clk_periph_disable,
+ .restore_context = clk_periph_restore_context,
};
static const struct clk_ops tegra_clk_periph_no_gate_ops = {
@@ -124,6 +144,7 @@ static const struct clk_ops tegra_clk_periph_no_gate_ops = {
.recalc_rate = clk_periph_recalc_rate,
.round_rate = clk_periph_round_rate,
.set_rate = clk_periph_set_rate,
+ .restore_context = clk_periph_restore_context,
};
static struct clk *_tegra_clk_register_periph(const char *name,
diff --git a/drivers/clk/tegra/clk-pll-out.c b/drivers/clk/tegra/clk-pll-out.c
index 35f2bf00e1e6..d8bf89a81e6d 100644
--- a/drivers/clk/tegra/clk-pll-out.c
+++ b/drivers/clk/tegra/clk-pll-out.c
@@ -69,10 +69,19 @@ static void clk_pll_out_disable(struct clk_hw *hw)
spin_unlock_irqrestore(pll_out->lock, flags);
}
+static void tegra_clk_pll_out_restore_context(struct clk_hw *hw)
+{
+ if (!__clk_get_enable_count(hw->clk))
+ clk_pll_out_disable(hw);
+ else
+ clk_pll_out_enable(hw);
+}
+
const struct clk_ops tegra_clk_pll_out_ops = {
.is_enabled = clk_pll_out_is_enabled,
.enable = clk_pll_out_enable,
.disable = clk_pll_out_disable,
+ .restore_context = tegra_clk_pll_out_restore_context,
};
struct clk *tegra_clk_register_pll_out(const char *name,
diff --git a/drivers/clk/tegra/clk-pll.c b/drivers/clk/tegra/clk-pll.c
index 1583f5fc992f..531c2b3d814e 100644
--- a/drivers/clk/tegra/clk-pll.c
+++ b/drivers/clk/tegra/clk-pll.c
@@ -1008,6 +1008,27 @@ static unsigned long clk_plle_recalc_rate(struct clk_hw *hw,
return rate;
}
+static void tegra_clk_pll_restore_context(struct clk_hw *hw)
+{
+ struct tegra_clk_pll *pll = to_clk_pll(hw);
+ struct clk_hw *parent = clk_hw_get_parent(hw);
+ unsigned long parent_rate = clk_hw_get_rate(parent);
+ unsigned long rate = clk_hw_get_rate(hw);
+
+ if (clk_pll_is_enabled(hw))
+ return;
+
+ if (pll->params->set_defaults)
+ pll->params->set_defaults(pll);
+
+ clk_pll_set_rate(hw, rate, parent_rate);
+
+ if (!__clk_get_enable_count(hw->clk))
+ clk_pll_disable(hw);
+ else
+ clk_pll_enable(hw);
+}
+
const struct clk_ops tegra_clk_pll_ops = {
.is_enabled = clk_pll_is_enabled,
.enable = clk_pll_enable,
@@ -1015,6 +1036,7 @@ const struct clk_ops tegra_clk_pll_ops = {
.recalc_rate = clk_pll_recalc_rate,
.round_rate = clk_pll_round_rate,
.set_rate = clk_pll_set_rate,
+ .restore_context = tegra_clk_pll_restore_context,
};
const struct clk_ops tegra_clk_plle_ops = {
@@ -1802,6 +1824,27 @@ out:
return ret;
}
+
+static void _clk_plle_tegra_init_parent(struct tegra_clk_pll *pll)
+{
+ u32 val, val_aux;
+
+ /* ensure parent is set to pll_ref */
+ val = pll_readl_base(pll);
+ val_aux = pll_readl(pll->params->aux_reg, pll);
+
+ if (val & PLL_BASE_ENABLE) {
+ if ((val_aux & PLLE_AUX_PLLRE_SEL) ||
+ (val_aux & PLLE_AUX_PLLP_SEL))
+ WARN(1, "pll_e enabled with unsupported parent %s\n",
+ (val_aux & PLLE_AUX_PLLP_SEL) ? "pllp_out0" :
+ "pll_re_vco");
+ } else {
+ val_aux &= ~(PLLE_AUX_PLLRE_SEL | PLLE_AUX_PLLP_SEL);
+ pll_writel(val_aux, pll->params->aux_reg, pll);
+ fence_udelay(1, pll->clk_base);
+ }
+}
#endif
static struct tegra_clk_pll *_tegra_init_pll(void __iomem *clk_base,
@@ -2214,27 +2257,12 @@ struct clk *tegra_clk_register_plle_tegra114(const char *name,
{
struct tegra_clk_pll *pll;
struct clk *clk;
- u32 val, val_aux;
pll = _tegra_init_pll(clk_base, NULL, pll_params, lock);
if (IS_ERR(pll))
return ERR_CAST(pll);
- /* ensure parent is set to pll_re_vco */
-
- val = pll_readl_base(pll);
- val_aux = pll_readl(pll_params->aux_reg, pll);
-
- if (val & PLL_BASE_ENABLE) {
- if ((val_aux & PLLE_AUX_PLLRE_SEL) ||
- (val_aux & PLLE_AUX_PLLP_SEL))
- WARN(1, "pll_e enabled with unsupported parent %s\n",
- (val_aux & PLLE_AUX_PLLP_SEL) ? "pllp_out0" :
- "pll_re_vco");
- } else {
- val_aux &= ~(PLLE_AUX_PLLRE_SEL | PLLE_AUX_PLLP_SEL);
- pll_writel(val_aux, pll_params->aux_reg, pll);
- }
+ _clk_plle_tegra_init_parent(pll);
clk = _tegra_clk_register_pll(pll, name, parent_name, flags,
&tegra_clk_plle_tegra114_ops);
@@ -2276,6 +2304,7 @@ static const struct clk_ops tegra_clk_pllss_ops = {
.recalc_rate = clk_pll_recalc_rate,
.round_rate = clk_pll_ramp_round_rate,
.set_rate = clk_pllxc_set_rate,
+ .restore_context = tegra_clk_pll_restore_context,
};
struct clk *tegra_clk_register_pllss(const char *name, const char *parent_name,
@@ -2520,11 +2549,19 @@ out:
spin_unlock_irqrestore(pll->lock, flags);
}
+static void tegra_clk_plle_t210_restore_context(struct clk_hw *hw)
+{
+ struct tegra_clk_pll *pll = to_clk_pll(hw);
+
+ _clk_plle_tegra_init_parent(pll);
+}
+
static const struct clk_ops tegra_clk_plle_tegra210_ops = {
.is_enabled = clk_plle_tegra210_is_enabled,
.enable = clk_plle_tegra210_enable,
.disable = clk_plle_tegra210_disable,
.recalc_rate = clk_pll_recalc_rate,
+ .restore_context = tegra_clk_plle_t210_restore_context,
};
struct clk *tegra_clk_register_plle_tegra210(const char *name,
@@ -2535,27 +2572,12 @@ struct clk *tegra_clk_register_plle_tegra210(const char *name,
{
struct tegra_clk_pll *pll;
struct clk *clk;
- u32 val, val_aux;
pll = _tegra_init_pll(clk_base, NULL, pll_params, lock);
if (IS_ERR(pll))
return ERR_CAST(pll);
- /* ensure parent is set to pll_re_vco */
-
- val = pll_readl_base(pll);
- val_aux = pll_readl(pll_params->aux_reg, pll);
-
- if (val & PLLE_BASE_ENABLE) {
- if ((val_aux & PLLE_AUX_PLLRE_SEL) ||
- (val_aux & PLLE_AUX_PLLP_SEL))
- WARN(1, "pll_e enabled with unsupported parent %s\n",
- (val_aux & PLLE_AUX_PLLP_SEL) ? "pllp_out0" :
- "pll_re_vco");
- } else {
- val_aux &= ~(PLLE_AUX_PLLRE_SEL | PLLE_AUX_PLLP_SEL);
- pll_writel(val_aux, pll_params->aux_reg, pll);
- }
+ _clk_plle_tegra_init_parent(pll);
clk = _tegra_clk_register_pll(pll, name, parent_name, flags,
&tegra_clk_plle_tegra210_ops);
diff --git a/drivers/clk/tegra/clk-sdmmc-mux.c b/drivers/clk/tegra/clk-sdmmc-mux.c
index a5cd3e31dbae..316912d3b1a4 100644
--- a/drivers/clk/tegra/clk-sdmmc-mux.c
+++ b/drivers/clk/tegra/clk-sdmmc-mux.c
@@ -194,6 +194,21 @@ static void clk_sdmmc_mux_disable(struct clk_hw *hw)
gate_ops->disable(gate_hw);
}
+static void clk_sdmmc_mux_restore_context(struct clk_hw *hw)
+{
+ struct clk_hw *parent = clk_hw_get_parent(hw);
+ unsigned long parent_rate = clk_hw_get_rate(parent);
+ unsigned long rate = clk_hw_get_rate(hw);
+ int parent_id;
+
+ parent_id = clk_hw_get_parent_index(hw);
+ if (WARN_ON(parent_id < 0))
+ return;
+
+ clk_sdmmc_mux_set_parent(hw, parent_id);
+ clk_sdmmc_mux_set_rate(hw, rate, parent_rate);
+}
+
static const struct clk_ops tegra_clk_sdmmc_mux_ops = {
.get_parent = clk_sdmmc_mux_get_parent,
.set_parent = clk_sdmmc_mux_set_parent,
@@ -203,6 +218,7 @@ static const struct clk_ops tegra_clk_sdmmc_mux_ops = {
.is_enabled = clk_sdmmc_mux_is_enabled,
.enable = clk_sdmmc_mux_enable,
.disable = clk_sdmmc_mux_disable,
+ .restore_context = clk_sdmmc_mux_restore_context,
};
struct clk *tegra_clk_register_sdmmc_mux_div(const char *name,
diff --git a/drivers/clk/tegra/clk-super.c b/drivers/clk/tegra/clk-super.c
index 39ef31b46df5..6099c6e9acd4 100644
--- a/drivers/clk/tegra/clk-super.c
+++ b/drivers/clk/tegra/clk-super.c
@@ -28,6 +28,9 @@
#define super_state_to_src_shift(m, s) ((m->width * s))
#define super_state_to_src_mask(m) (((1 << m->width) - 1))
+#define CCLK_SRC_PLLP_OUT0 4
+#define CCLK_SRC_PLLP_OUT4 5
+
static u8 clk_super_get_parent(struct clk_hw *hw)
{
struct tegra_clk_super_mux *mux = to_clk_super_mux(hw);
@@ -97,12 +100,23 @@ static int clk_super_set_parent(struct clk_hw *hw, u8 index)
if (index == mux->div2_index)
index = mux->pllx_index;
}
+
+ /* enable PLLP branches to CPU before selecting PLLP source */
+ if ((mux->flags & TEGRA210_CPU_CLK) &&
+ (index == CCLK_SRC_PLLP_OUT0 || index == CCLK_SRC_PLLP_OUT4))
+ tegra_clk_set_pllp_out_cpu(true);
+
val &= ~((super_state_to_src_mask(mux)) << shift);
val |= (index & (super_state_to_src_mask(mux))) << shift;
writel_relaxed(val, mux->reg);
udelay(2);
+ /* disable PLLP branches to CPU if not used */
+ if ((mux->flags & TEGRA210_CPU_CLK) &&
+ index != CCLK_SRC_PLLP_OUT0 && index != CCLK_SRC_PLLP_OUT4)
+ tegra_clk_set_pllp_out_cpu(false);
+
out:
if (mux->lock)
spin_unlock_irqrestore(mux->lock, flags);
@@ -110,9 +124,21 @@ out:
return err;
}
+static void clk_super_mux_restore_context(struct clk_hw *hw)
+{
+ int parent_id;
+
+ parent_id = clk_hw_get_parent_index(hw);
+ if (WARN_ON(parent_id < 0))
+ return;
+
+ clk_super_set_parent(hw, parent_id);
+}
+
static const struct clk_ops tegra_clk_super_mux_ops = {
.get_parent = clk_super_get_parent,
.set_parent = clk_super_set_parent,
+ .restore_context = clk_super_mux_restore_context,
};
static long clk_super_round_rate(struct clk_hw *hw, unsigned long rate,
@@ -148,12 +174,27 @@ static int clk_super_set_rate(struct clk_hw *hw, unsigned long rate,
return super->div_ops->set_rate(div_hw, rate, parent_rate);
}
+static void clk_super_restore_context(struct clk_hw *hw)
+{
+ struct tegra_clk_super_mux *super = to_clk_super_mux(hw);
+ struct clk_hw *div_hw = &super->frac_div.hw;
+ int parent_id;
+
+ parent_id = clk_hw_get_parent_index(hw);
+ if (WARN_ON(parent_id < 0))
+ return;
+
+ super->div_ops->restore_context(div_hw);
+ clk_super_set_parent(hw, parent_id);
+}
+
const struct clk_ops tegra_clk_super_ops = {
.get_parent = clk_super_get_parent,
.set_parent = clk_super_set_parent,
.set_rate = clk_super_set_rate,
.round_rate = clk_super_round_rate,
.recalc_rate = clk_super_recalc_rate,
+ .restore_context = clk_super_restore_context,
};
struct clk *tegra_clk_register_super_mux(const char *name,
diff --git a/drivers/clk/tegra/clk-tegra-fixed.c b/drivers/clk/tegra/clk-tegra-fixed.c
index 8d91b2b191cf..7c6c8abfcde6 100644
--- a/drivers/clk/tegra/clk-tegra-fixed.c
+++ b/drivers/clk/tegra/clk-tegra-fixed.c
@@ -17,6 +17,10 @@
#define OSC_CTRL 0x50
#define OSC_CTRL_OSC_FREQ_SHIFT 28
#define OSC_CTRL_PLL_REF_DIV_SHIFT 26
+#define OSC_CTRL_MASK (0x3f2 | \
+ (0xf << OSC_CTRL_OSC_FREQ_SHIFT))
+
+static u32 osc_ctrl_ctx;
int __init tegra_osc_clk_init(void __iomem *clk_base, struct tegra_clk *clks,
unsigned long *input_freqs, unsigned int num,
@@ -29,6 +33,7 @@ int __init tegra_osc_clk_init(void __iomem *clk_base, struct tegra_clk *clks,
unsigned osc_idx;
val = readl_relaxed(clk_base + OSC_CTRL);
+ osc_ctrl_ctx = val & OSC_CTRL_MASK;
osc_idx = val >> OSC_CTRL_OSC_FREQ_SHIFT;
if (osc_idx < num)
@@ -96,3 +101,13 @@ void __init tegra_fixed_clk_init(struct tegra_clk *tegra_clks)
*dt_clk = clk;
}
}
+
+void tegra_clk_osc_resume(void __iomem *clk_base)
+{
+ u32 val;
+
+ val = readl_relaxed(clk_base + OSC_CTRL) & ~OSC_CTRL_MASK;
+ val |= osc_ctrl_ctx;
+ writel_relaxed(val, clk_base + OSC_CTRL);
+ fence_udelay(2, clk_base);
+}
diff --git a/drivers/clk/tegra/clk-tegra-periph.c b/drivers/clk/tegra/clk-tegra-periph.c
index 1ed85f120a1b..0d07c0ba49b6 100644
--- a/drivers/clk/tegra/clk-tegra-periph.c
+++ b/drivers/clk/tegra/clk-tegra-periph.c
@@ -262,7 +262,6 @@
static DEFINE_SPINLOCK(PLLP_OUTA_lock);
static DEFINE_SPINLOCK(PLLP_OUTB_lock);
static DEFINE_SPINLOCK(PLLP_OUTC_lock);
-static DEFINE_SPINLOCK(sor0_lock);
#define MUX_I2S_SPDIF(_id) \
static const char *mux_pllaout0_##_id##_2x_pllp_clkm[] = { "pll_a_out0", \
@@ -587,11 +586,6 @@ static u32 mux_pllp_pllre_clkm_idx[] = {
[0] = 0, [1] = 2, [2] = 3,
};
-static const char *mux_clkm_plldp_sor0lvds[] = {
- "clk_m", "pll_dp", "sor0_lvds",
-};
-#define mux_clkm_plldp_sor0lvds_idx NULL
-
static const char * const mux_dmic1[] = {
"pll_a_out0", "dmic1_sync_clk", "pll_p", "clk_m"
};
@@ -731,14 +725,12 @@ static struct tegra_periph_init_data periph_clks[] = {
MUX8("hdmi_audio", mux_pllp3_pllc_clkm, CLK_SOURCE_HDMI_AUDIO, 176, TEGRA_PERIPH_NO_RESET, tegra_clk_hdmi_audio),
MUX8("clk72mhz", mux_pllp3_pllc_clkm, CLK_SOURCE_CLK72MHZ, 177, TEGRA_PERIPH_NO_RESET, tegra_clk_clk72Mhz),
MUX8("clk72mhz", mux_pllp_out3_pllp_pllc_clkm, CLK_SOURCE_CLK72MHZ, 177, TEGRA_PERIPH_NO_RESET, tegra_clk_clk72Mhz_8),
- MUX8_NOGATE_LOCK("sor0_lvds", mux_pllp_pllm_plld_plla_pllc_plld2_clkm, CLK_SOURCE_SOR0, tegra_clk_sor0_lvds, &sor0_lock),
MUX_FLAGS("csite", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_CSITE, 73, TEGRA_PERIPH_ON_APB, tegra_clk_csite, CLK_IGNORE_UNUSED),
MUX_FLAGS("csite", mux_pllp_pllre_clkm, CLK_SOURCE_CSITE, 73, TEGRA_PERIPH_ON_APB, tegra_clk_csite_8, CLK_IGNORE_UNUSED),
NODIV("disp1", mux_pllp_pllm_plld_plla_pllc_plld2_clkm, CLK_SOURCE_DISP1, 29, 7, 27, 0, tegra_clk_disp1, NULL),
NODIV("disp1", mux_pllp_plld_plld2_clkm, CLK_SOURCE_DISP1, 29, 7, 27, 0, tegra_clk_disp1_8, NULL),
NODIV("disp2", mux_pllp_pllm_plld_plla_pllc_plld2_clkm, CLK_SOURCE_DISP2, 29, 7, 26, 0, tegra_clk_disp2, NULL),
NODIV("disp2", mux_pllp_plld_plld2_clkm, CLK_SOURCE_DISP2, 29, 7, 26, 0, tegra_clk_disp2_8, NULL),
- NODIV("sor0", mux_clkm_plldp_sor0lvds, CLK_SOURCE_SOR0, 14, 3, 182, 0, tegra_clk_sor0, &sor0_lock),
UART("uarta", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_UARTA, 6, tegra_clk_uarta),
UART("uartb", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_UARTB, 7, tegra_clk_uartb),
UART("uartc", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_UARTC, 55, tegra_clk_uartc),
diff --git a/drivers/clk/tegra/clk-tegra-super-gen4.c b/drivers/clk/tegra/clk-tegra-super-gen4.c
index cdfe7c9697e1..5760c978bef7 100644
--- a/drivers/clk/tegra/clk-tegra-super-gen4.c
+++ b/drivers/clk/tegra/clk-tegra-super-gen4.c
@@ -180,7 +180,7 @@ static void __init tegra_super_clk_init(void __iomem *clk_base,
gen_info->num_cclk_g_parents,
CLK_SET_RATE_PARENT,
clk_base + CCLKG_BURST_POLICY,
- 0, 4, 8, 0, NULL);
+ TEGRA210_CPU_CLK, 4, 8, 0, NULL);
} else {
clk = tegra_clk_register_super_mux("cclk_g",
gen_info->cclk_g_parents,
@@ -196,6 +196,11 @@ static void __init tegra_super_clk_init(void __iomem *clk_base,
dt_clk = tegra_lookup_dt_id(tegra_clk_cclk_lp, tegra_clks);
if (dt_clk) {
if (gen_info->gen == gen5) {
+ /*
+ * TEGRA210_CPU_CLK flag is not needed for cclk_lp as
+ * cluster switching is not currently supported on
+ * Tegra210 and also cpu_lp is not used.
+ */
clk = tegra_clk_register_super_mux("cclk_lp",
gen_info->cclk_lp_parents,
gen_info->num_cclk_lp_parents,
diff --git a/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c b/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c
index e84b6d52cbbd..2ac2679d696d 100644
--- a/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c
+++ b/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c
@@ -631,6 +631,7 @@ static int tegra124_dfll_fcpu_remove(struct platform_device *pdev)
static const struct dev_pm_ops tegra124_dfll_pm_ops = {
SET_RUNTIME_PM_OPS(tegra_dfll_runtime_suspend,
tegra_dfll_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(tegra_dfll_suspend, tegra_dfll_resume)
};
static struct platform_driver tegra124_dfll_fcpu_driver = {
diff --git a/drivers/clk/tegra/clk-tegra124.c b/drivers/clk/tegra/clk-tegra124.c
index 0224fdc4766f..b3110d5b5a6c 100644
--- a/drivers/clk/tegra/clk-tegra124.c
+++ b/drivers/clk/tegra/clk-tegra124.c
@@ -27,6 +27,7 @@
#define CLK_SOURCE_CSITE 0x1d4
#define CLK_SOURCE_EMC 0x19c
+#define CLK_SOURCE_SOR0 0x414
#define RST_DFLL_DVCO 0x2f4
#define DVFS_DFLL_RESET_SHIFT 0
@@ -91,6 +92,22 @@
/* Tegra CPU clock and reset control regs */
#define CLK_RST_CONTROLLER_CPU_CMPLX_STATUS 0x470
+#define MASK(x) (BIT(x) - 1)
+
+#define MUX8_NOGATE_LOCK(_name, _parents, _offset, _clk_id, _lock) \
+ TEGRA_INIT_DATA_TABLE(_name, NULL, NULL, _parents, _offset, \
+ 29, MASK(3), 0, 0, 8, 1, TEGRA_DIVIDER_ROUND_UP,\
+ 0, TEGRA_PERIPH_NO_GATE, _clk_id,\
+ _parents##_idx, 0, _lock)
+
+#define NODIV(_name, _parents, _offset, \
+ _mux_shift, _mux_mask, _clk_num, \
+ _gate_flags, _clk_id, _lock) \
+ TEGRA_INIT_DATA_TABLE(_name, NULL, NULL, _parents, _offset,\
+ _mux_shift, _mux_mask, 0, 0, 0, 0, 0,\
+ _clk_num, (_gate_flags) | TEGRA_PERIPH_NO_DIV,\
+ _clk_id, _parents##_idx, 0, _lock)
+
#ifdef CONFIG_PM_SLEEP
static struct cpu_clk_suspend_context {
u32 clk_csite_src;
@@ -110,6 +127,7 @@ static DEFINE_SPINLOCK(pll_e_lock);
static DEFINE_SPINLOCK(pll_re_lock);
static DEFINE_SPINLOCK(pll_u_lock);
static DEFINE_SPINLOCK(emc_lock);
+static DEFINE_SPINLOCK(sor0_lock);
/* possible OSC frequencies in Hz */
static unsigned long tegra124_input_freq[] = {
@@ -829,7 +847,7 @@ static struct tegra_clk tegra124_clks[tegra_clk_max] __initdata = {
[tegra_clk_adx1] = { .dt_id = TEGRA124_CLK_ADX1, .present = true },
[tegra_clk_dpaux] = { .dt_id = TEGRA124_CLK_DPAUX, .present = true },
[tegra_clk_sor0] = { .dt_id = TEGRA124_CLK_SOR0, .present = true },
- [tegra_clk_sor0_lvds] = { .dt_id = TEGRA124_CLK_SOR0_LVDS, .present = true },
+ [tegra_clk_sor0_out] = { .dt_id = TEGRA124_CLK_SOR0_OUT, .present = true },
[tegra_clk_gpu] = { .dt_id = TEGRA124_CLK_GPU, .present = true },
[tegra_clk_amx1] = { .dt_id = TEGRA124_CLK_AMX1, .present = true },
[tegra_clk_uartb] = { .dt_id = TEGRA124_CLK_UARTB, .present = true },
@@ -987,12 +1005,33 @@ static struct tegra_devclk devclks[] __initdata = {
{ .con_id = "hda2hdmi", .dt_id = TEGRA124_CLK_HDA2HDMI },
};
+static const char * const sor0_parents[] = {
+ "pll_p_out0", "pll_m_out0", "pll_d_out0", "pll_a_out0", "pll_c_out0",
+ "pll_d2_out0", "clk_m",
+};
+
+static const char * const sor0_out_parents[] = {
+ "clk_m", "sor0_pad_clkout",
+};
+
+static struct tegra_periph_init_data tegra124_periph[] = {
+ TEGRA_INIT_DATA_TABLE("sor0", NULL, NULL, sor0_parents,
+ CLK_SOURCE_SOR0, 29, 0x7, 0, 0, 0, 0,
+ 0, 182, 0, tegra_clk_sor0, NULL, 0,
+ &sor0_lock),
+ TEGRA_INIT_DATA_TABLE("sor0_out", NULL, NULL, sor0_out_parents,
+ CLK_SOURCE_SOR0, 14, 0x1, 0, 0, 0, 0,
+ 0, 0, TEGRA_PERIPH_NO_GATE, tegra_clk_sor0_out,
+ NULL, 0, &sor0_lock),
+};
+
static struct clk **clks;
static __init void tegra124_periph_clk_init(void __iomem *clk_base,
void __iomem *pmc_base)
{
struct clk *clk;
+ unsigned int i;
/* xusb_ss_div2 */
clk = clk_register_fixed_factor(NULL, "xusb_ss_div2", "xusb_ss_src", 0,
@@ -1033,6 +1072,20 @@ static __init void tegra124_periph_clk_init(void __iomem *clk_base,
clk_register_clkdev(clk, "cml1", NULL);
clks[TEGRA124_CLK_CML1] = clk;
+ for (i = 0; i < ARRAY_SIZE(tegra124_periph); i++) {
+ struct tegra_periph_init_data *init = &tegra124_periph[i];
+ struct clk **clkp;
+
+ clkp = tegra_lookup_dt_id(init->clk_id, tegra124_clks);
+ if (!clkp) {
+ pr_warn("clock %u not found\n", init->clk_id);
+ continue;
+ }
+
+ clk = tegra_clk_register_periph_data(clk_base, init);
+ *clkp = clk;
+ }
+
tegra_periph_clk_init(clk_base, pmc_base, tegra124_clks, &pll_p_params);
}
diff --git a/drivers/clk/tegra/clk-tegra20-emc.c b/drivers/clk/tegra/clk-tegra20-emc.c
new file mode 100644
index 000000000000..03bf0009a33c
--- /dev/null
+++ b/drivers/clk/tegra/clk-tegra20-emc.c
@@ -0,0 +1,293 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Based on drivers/clk/tegra/clk-emc.c
+ * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Author: Dmitry Osipenko <digetx@gmail.com>
+ * Copyright (C) 2019 GRATE-DRIVER project
+ */
+
+#define pr_fmt(fmt) "tegra-emc-clk: " fmt
+
+#include <linux/bits.h>
+#include <linux/clk-provider.h>
+#include <linux/clk/tegra.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+
+#include "clk.h"
+
+#define CLK_SOURCE_EMC_2X_CLK_DIVISOR_MASK GENMASK(7, 0)
+#define CLK_SOURCE_EMC_2X_CLK_SRC_MASK GENMASK(31, 30)
+#define CLK_SOURCE_EMC_2X_CLK_SRC_SHIFT 30
+
+#define MC_EMC_SAME_FREQ BIT(16)
+#define USE_PLLM_UD BIT(29)
+
+#define EMC_SRC_PLL_M 0
+#define EMC_SRC_PLL_C 1
+#define EMC_SRC_PLL_P 2
+#define EMC_SRC_CLK_M 3
+
+static const char * const emc_parent_clk_names[] = {
+ "pll_m", "pll_c", "pll_p", "clk_m",
+};
+
+struct tegra_clk_emc {
+ struct clk_hw hw;
+ void __iomem *reg;
+ bool mc_same_freq;
+ bool want_low_jitter;
+
+ tegra20_clk_emc_round_cb *round_cb;
+ void *cb_arg;
+};
+
+static inline struct tegra_clk_emc *to_tegra_clk_emc(struct clk_hw *hw)
+{
+ return container_of(hw, struct tegra_clk_emc, hw);
+}
+
+static unsigned long emc_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct tegra_clk_emc *emc = to_tegra_clk_emc(hw);
+ u32 val, div;
+
+ val = readl_relaxed(emc->reg);
+ div = val & CLK_SOURCE_EMC_2X_CLK_DIVISOR_MASK;
+
+ return DIV_ROUND_UP(parent_rate * 2, div + 2);
+}
+
+static u8 emc_get_parent(struct clk_hw *hw)
+{
+ struct tegra_clk_emc *emc = to_tegra_clk_emc(hw);
+
+ return readl_relaxed(emc->reg) >> CLK_SOURCE_EMC_2X_CLK_SRC_SHIFT;
+}
+
+static int emc_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct tegra_clk_emc *emc = to_tegra_clk_emc(hw);
+ u32 val, div;
+
+ val = readl_relaxed(emc->reg);
+ val &= ~CLK_SOURCE_EMC_2X_CLK_SRC_MASK;
+ val |= index << CLK_SOURCE_EMC_2X_CLK_SRC_SHIFT;
+
+ div = val & CLK_SOURCE_EMC_2X_CLK_DIVISOR_MASK;
+
+ if (index == EMC_SRC_PLL_M && div == 0 && emc->want_low_jitter)
+ val |= USE_PLLM_UD;
+ else
+ val &= ~USE_PLLM_UD;
+
+ if (emc->mc_same_freq)
+ val |= MC_EMC_SAME_FREQ;
+ else
+ val &= ~MC_EMC_SAME_FREQ;
+
+ writel_relaxed(val, emc->reg);
+
+ fence_udelay(1, emc->reg);
+
+ return 0;
+}
+
+static int emc_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct tegra_clk_emc *emc = to_tegra_clk_emc(hw);
+ unsigned int index;
+ u32 val, div;
+
+ div = div_frac_get(rate, parent_rate, 8, 1, 0);
+
+ val = readl_relaxed(emc->reg);
+ val &= ~CLK_SOURCE_EMC_2X_CLK_DIVISOR_MASK;
+ val |= div;
+
+ index = val >> CLK_SOURCE_EMC_2X_CLK_SRC_SHIFT;
+
+ if (index == EMC_SRC_PLL_M && div == 0 && emc->want_low_jitter)
+ val |= USE_PLLM_UD;
+ else
+ val &= ~USE_PLLM_UD;
+
+ if (emc->mc_same_freq)
+ val |= MC_EMC_SAME_FREQ;
+ else
+ val &= ~MC_EMC_SAME_FREQ;
+
+ writel_relaxed(val, emc->reg);
+
+ fence_udelay(1, emc->reg);
+
+ return 0;
+}
+
+static int emc_set_rate_and_parent(struct clk_hw *hw,
+ unsigned long rate,
+ unsigned long parent_rate,
+ u8 index)
+{
+ struct tegra_clk_emc *emc = to_tegra_clk_emc(hw);
+ u32 val, div;
+
+ div = div_frac_get(rate, parent_rate, 8, 1, 0);
+
+ val = readl_relaxed(emc->reg);
+
+ val &= ~CLK_SOURCE_EMC_2X_CLK_SRC_MASK;
+ val |= index << CLK_SOURCE_EMC_2X_CLK_SRC_SHIFT;
+
+ val &= ~CLK_SOURCE_EMC_2X_CLK_DIVISOR_MASK;
+ val |= div;
+
+ if (index == EMC_SRC_PLL_M && div == 0 && emc->want_low_jitter)
+ val |= USE_PLLM_UD;
+ else
+ val &= ~USE_PLLM_UD;
+
+ if (emc->mc_same_freq)
+ val |= MC_EMC_SAME_FREQ;
+ else
+ val &= ~MC_EMC_SAME_FREQ;
+
+ writel_relaxed(val, emc->reg);
+
+ fence_udelay(1, emc->reg);
+
+ return 0;
+}
+
+static int emc_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
+{
+ struct tegra_clk_emc *emc = to_tegra_clk_emc(hw);
+ struct clk_hw *parent_hw;
+ unsigned long divided_rate;
+ unsigned long parent_rate;
+ unsigned int i;
+ long emc_rate;
+ int div;
+
+ emc_rate = emc->round_cb(req->rate, req->min_rate, req->max_rate,
+ emc->cb_arg);
+ if (emc_rate < 0)
+ return emc_rate;
+
+ for (i = 0; i < ARRAY_SIZE(emc_parent_clk_names); i++) {
+ parent_hw = clk_hw_get_parent_by_index(hw, i);
+
+ if (req->best_parent_hw == parent_hw)
+ parent_rate = req->best_parent_rate;
+ else
+ parent_rate = clk_hw_get_rate(parent_hw);
+
+ if (emc_rate > parent_rate)
+ continue;
+
+ div = div_frac_get(emc_rate, parent_rate, 8, 1, 0);
+ divided_rate = DIV_ROUND_UP(parent_rate * 2, div + 2);
+
+ if (divided_rate != emc_rate)
+ continue;
+
+ req->best_parent_rate = parent_rate;
+ req->best_parent_hw = parent_hw;
+ req->rate = emc_rate;
+ break;
+ }
+
+ if (i == ARRAY_SIZE(emc_parent_clk_names)) {
+ pr_err_once("can't find parent for rate %lu emc_rate %lu\n",
+ req->rate, emc_rate);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct clk_ops tegra_clk_emc_ops = {
+ .recalc_rate = emc_recalc_rate,
+ .get_parent = emc_get_parent,
+ .set_parent = emc_set_parent,
+ .set_rate = emc_set_rate,
+ .set_rate_and_parent = emc_set_rate_and_parent,
+ .determine_rate = emc_determine_rate,
+};
+
+void tegra20_clk_set_emc_round_callback(tegra20_clk_emc_round_cb *round_cb,
+ void *cb_arg)
+{
+ struct clk *clk = __clk_lookup("emc");
+ struct tegra_clk_emc *emc;
+ struct clk_hw *hw;
+
+ if (clk) {
+ hw = __clk_get_hw(clk);
+ emc = to_tegra_clk_emc(hw);
+
+ emc->round_cb = round_cb;
+ emc->cb_arg = cb_arg;
+ }
+}
+
+bool tegra20_clk_emc_driver_available(struct clk_hw *emc_hw)
+{
+ return to_tegra_clk_emc(emc_hw)->round_cb != NULL;
+}
+
+struct clk *tegra20_clk_register_emc(void __iomem *ioaddr, bool low_jitter)
+{
+ struct tegra_clk_emc *emc;
+ struct clk_init_data init;
+ struct clk *clk;
+
+ emc = kzalloc(sizeof(*emc), GFP_KERNEL);
+ if (!emc)
+ return NULL;
+
+ /*
+ * EMC stands for External Memory Controller.
+ *
+ * We don't want EMC clock to be disabled ever by gating its
+ * parent and whatnot because system is busted immediately in that
+ * case, hence the clock is marked as critical.
+ */
+ init.name = "emc";
+ init.ops = &tegra_clk_emc_ops;
+ init.flags = CLK_IS_CRITICAL;
+ init.parent_names = emc_parent_clk_names;
+ init.num_parents = ARRAY_SIZE(emc_parent_clk_names);
+
+ emc->reg = ioaddr;
+ emc->hw.init = &init;
+ emc->want_low_jitter = low_jitter;
+
+ clk = clk_register(NULL, &emc->hw);
+ if (IS_ERR(clk)) {
+ kfree(emc);
+ return NULL;
+ }
+
+ return clk;
+}
+
+int tegra20_clk_prepare_emc_mc_same_freq(struct clk *emc_clk, bool same)
+{
+ struct tegra_clk_emc *emc;
+ struct clk_hw *hw;
+
+ if (!emc_clk)
+ return -EINVAL;
+
+ hw = __clk_get_hw(emc_clk);
+ emc = to_tegra_clk_emc(hw);
+ emc->mc_same_freq = same;
+
+ return 0;
+}
diff --git a/drivers/clk/tegra/clk-tegra20.c b/drivers/clk/tegra/clk-tegra20.c
index bcd871134f45..4d8222f5c638 100644
--- a/drivers/clk/tegra/clk-tegra20.c
+++ b/drivers/clk/tegra/clk-tegra20.c
@@ -130,8 +130,6 @@ static struct cpu_clk_suspend_context {
static void __iomem *clk_base;
static void __iomem *pmc_base;
-static DEFINE_SPINLOCK(emc_lock);
-
#define TEGRA_INIT_DATA_MUX(_name, _parents, _offset, \
_clk_num, _gate_flags, _clk_id) \
TEGRA_INIT_DATA(_name, NULL, NULL, _parents, _offset, \
@@ -760,7 +758,6 @@ static const char *pwm_parents[] = { "pll_p", "pll_c", "audio", "clk_m",
static const char *mux_pllpcm_clkm[] = { "pll_p", "pll_c", "pll_m", "clk_m" };
static const char *mux_pllpdc_clkm[] = { "pll_p", "pll_d_out0", "pll_c",
"clk_m" };
-static const char *mux_pllmcp_clkm[] = { "pll_m", "pll_c", "pll_p", "clk_m" };
static struct tegra_periph_init_data tegra_periph_clk_list[] = {
TEGRA_INIT_DATA_MUX("i2s1", i2s1_parents, CLK_SOURCE_I2S1, 11, TEGRA_PERIPH_ON_APB, TEGRA20_CLK_I2S1),
@@ -787,41 +784,6 @@ static struct tegra_periph_init_data tegra_periph_nodiv_clk_list[] = {
TEGRA_INIT_DATA_NODIV("disp2", mux_pllpdc_clkm, CLK_SOURCE_DISP2, 30, 2, 26, 0, TEGRA20_CLK_DISP2),
};
-static void __init tegra20_emc_clk_init(void)
-{
- const u32 use_pllm_ud = BIT(29);
- struct clk *clk;
- u32 emc_reg;
-
- clk = clk_register_mux(NULL, "emc_mux", mux_pllmcp_clkm,
- ARRAY_SIZE(mux_pllmcp_clkm),
- CLK_SET_RATE_NO_REPARENT,
- clk_base + CLK_SOURCE_EMC,
- 30, 2, 0, &emc_lock);
-
- clk = tegra_clk_register_mc("mc", "emc_mux", clk_base + CLK_SOURCE_EMC,
- &emc_lock);
- clks[TEGRA20_CLK_MC] = clk;
-
- /* un-divided pll_m_out0 is currently unsupported */
- emc_reg = readl_relaxed(clk_base + CLK_SOURCE_EMC);
- if (emc_reg & use_pllm_ud) {
- pr_err("%s: un-divided PllM_out0 used as clock source\n",
- __func__);
- return;
- }
-
- /*
- * Note that 'emc_mux' source and 'emc' rate shouldn't be changed at
- * the same time due to a HW bug, this won't happen because we're
- * defining 'emc_mux' and 'emc' as distinct clocks.
- */
- clk = tegra_clk_register_divider("emc", "emc_mux",
- clk_base + CLK_SOURCE_EMC, CLK_IS_CRITICAL,
- TEGRA_DIVIDER_INT, 0, 8, 1, &emc_lock);
- clks[TEGRA20_CLK_EMC] = clk;
-}
-
static void __init tegra20_periph_clk_init(void)
{
struct tegra_periph_init_data *data;
@@ -835,7 +797,13 @@ static void __init tegra20_periph_clk_init(void)
clks[TEGRA20_CLK_AC97] = clk;
/* emc */
- tegra20_emc_clk_init();
+ clk = tegra20_clk_register_emc(clk_base + CLK_SOURCE_EMC, false);
+
+ clks[TEGRA20_CLK_EMC] = clk;
+
+ clk = tegra_clk_register_mc("mc", "emc", clk_base + CLK_SOURCE_EMC,
+ NULL);
+ clks[TEGRA20_CLK_MC] = clk;
/* dsi */
clk = tegra_clk_register_periph_gate("dsi", "pll_d", 0, clk_base, 0,
@@ -987,6 +955,7 @@ static void tegra20_cpu_clock_suspend(void)
static void tegra20_cpu_clock_resume(void)
{
unsigned int reg, policy;
+ u32 misc, base;
/* Is CPU complex already running on PLLX? */
reg = readl(clk_base + CCLK_BURST_POLICY);
@@ -1000,15 +969,21 @@ static void tegra20_cpu_clock_resume(void)
BUG();
if (reg != CCLK_BURST_POLICY_PLLX) {
- /* restore PLLX settings if CPU is on different PLL */
- writel(tegra20_cpu_clk_sctx.pllx_misc,
- clk_base + PLLX_MISC);
- writel(tegra20_cpu_clk_sctx.pllx_base,
- clk_base + PLLX_BASE);
-
- /* wait for PLL stabilization if PLLX was enabled */
- if (tegra20_cpu_clk_sctx.pllx_base & (1 << 30))
- udelay(300);
+ misc = readl_relaxed(clk_base + PLLX_MISC);
+ base = readl_relaxed(clk_base + PLLX_BASE);
+
+ if (misc != tegra20_cpu_clk_sctx.pllx_misc ||
+ base != tegra20_cpu_clk_sctx.pllx_base) {
+ /* restore PLLX settings if CPU is on different PLL */
+ writel(tegra20_cpu_clk_sctx.pllx_misc,
+ clk_base + PLLX_MISC);
+ writel(tegra20_cpu_clk_sctx.pllx_base,
+ clk_base + PLLX_BASE);
+
+ /* wait for PLL stabilization if PLLX was enabled */
+ if (tegra20_cpu_clk_sctx.pllx_base & (1 << 30))
+ udelay(300);
+ }
}
/*
@@ -1115,6 +1090,8 @@ static struct clk *tegra20_clk_src_onecell_get(struct of_phandle_args *clkspec,
if (IS_ERR(clk))
return clk;
+ hw = __clk_get_hw(clk);
+
/*
* Tegra20 CDEV1 and CDEV2 clocks are a bit special case, their parent
* clock is created by the pinctrl driver. It is possible for clk user
@@ -1124,13 +1101,16 @@ static struct clk *tegra20_clk_src_onecell_get(struct of_phandle_args *clkspec,
*/
if (clkspec->args[0] == TEGRA20_CLK_CDEV1 ||
clkspec->args[0] == TEGRA20_CLK_CDEV2) {
- hw = __clk_get_hw(clk);
-
parent_hw = clk_hw_get_parent(hw);
if (!parent_hw)
return ERR_PTR(-EPROBE_DEFER);
}
+ if (clkspec->args[0] == TEGRA20_CLK_EMC) {
+ if (!tegra20_clk_emc_driver_available(hw))
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+
return clk;
}
diff --git a/drivers/clk/tegra/clk-tegra210.c b/drivers/clk/tegra/clk-tegra210.c
index df172d5772d7..762cd186f714 100644
--- a/drivers/clk/tegra/clk-tegra210.c
+++ b/drivers/clk/tegra/clk-tegra210.c
@@ -9,13 +9,13 @@
#include <linux/clkdev.h>
#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/syscore_ops.h>
#include <linux/delay.h>
#include <linux/export.h>
#include <linux/mutex.h>
#include <linux/clk/tegra.h>
#include <dt-bindings/clock/tegra210-car.h>
#include <dt-bindings/reset/tegra210-car.h>
-#include <linux/iopoll.h>
#include <linux/sizes.h>
#include <soc/tegra/pmc.h>
@@ -33,6 +33,7 @@
#define CLK_SOURCE_CSITE 0x1d4
#define CLK_SOURCE_EMC 0x19c
#define CLK_SOURCE_SOR1 0x410
+#define CLK_SOURCE_SOR0 0x414
#define CLK_SOURCE_LA 0x1f8
#define CLK_SOURCE_SDMMC2 0x154
#define CLK_SOURCE_SDMMC4 0x164
@@ -220,11 +221,15 @@
#define CLK_M_DIVISOR_SHIFT 2
#define CLK_M_DIVISOR_MASK 0x3
+#define CLK_MASK_ARM 0x44
+#define MISC_CLK_ENB 0x48
+
#define RST_DFLL_DVCO 0x2f4
#define DVFS_DFLL_RESET_SHIFT 0
#define CLK_RST_CONTROLLER_RST_DEV_Y_SET 0x2a8
#define CLK_RST_CONTROLLER_RST_DEV_Y_CLR 0x2ac
+#define CPU_SOFTRST_CTRL 0x380
#define LVL2_CLK_GATE_OVRA 0xf8
#define LVL2_CLK_GATE_OVRC 0x3a0
@@ -298,6 +303,7 @@ static DEFINE_SPINLOCK(pll_d_lock);
static DEFINE_SPINLOCK(pll_e_lock);
static DEFINE_SPINLOCK(pll_re_lock);
static DEFINE_SPINLOCK(pll_u_lock);
+static DEFINE_SPINLOCK(sor0_lock);
static DEFINE_SPINLOCK(sor1_lock);
static DEFINE_SPINLOCK(emc_lock);
static DEFINE_MUTEX(lvl2_ovr_lock);
@@ -2351,9 +2357,9 @@ static struct tegra_clk tegra210_clks[tegra_clk_max] __initdata = {
[tegra_clk_dpaux] = { .dt_id = TEGRA210_CLK_DPAUX, .present = true },
[tegra_clk_dpaux1] = { .dt_id = TEGRA210_CLK_DPAUX1, .present = true },
[tegra_clk_sor0] = { .dt_id = TEGRA210_CLK_SOR0, .present = true },
- [tegra_clk_sor0_lvds] = { .dt_id = TEGRA210_CLK_SOR0_LVDS, .present = true },
+ [tegra_clk_sor0_out] = { .dt_id = TEGRA210_CLK_SOR0_OUT, .present = true },
[tegra_clk_sor1] = { .dt_id = TEGRA210_CLK_SOR1, .present = true },
- [tegra_clk_sor1_src] = { .dt_id = TEGRA210_CLK_SOR1_SRC, .present = true },
+ [tegra_clk_sor1_out] = { .dt_id = TEGRA210_CLK_SOR1_OUT, .present = true },
[tegra_clk_gpu] = { .dt_id = TEGRA210_CLK_GPU, .present = true },
[tegra_clk_pll_g_ref] = { .dt_id = TEGRA210_CLK_PLL_G_REF, .present = true, },
[tegra_clk_uartb_8] = { .dt_id = TEGRA210_CLK_UARTB, .present = true },
@@ -2551,7 +2557,6 @@ static struct tegra_devclk devclks[] __initdata = {
{ .con_id = "pll_c4_out2", .dt_id = TEGRA210_CLK_PLL_C4_OUT2 },
{ .con_id = "pll_c4_out3", .dt_id = TEGRA210_CLK_PLL_C4_OUT3 },
{ .con_id = "dpaux", .dt_id = TEGRA210_CLK_DPAUX },
- { .con_id = "sor0", .dt_id = TEGRA210_CLK_SOR0 },
};
static struct tegra_audio_clk_info tegra210_audio_plls[] = {
@@ -2825,6 +2830,7 @@ static int tegra210_enable_pllu(void)
struct tegra_clk_pll_freq_table *fentry;
struct tegra_clk_pll pllu;
u32 reg;
+ int ret;
for (fentry = pll_u_freq_table; fentry->input_rate; fentry++) {
if (fentry->input_rate == pll_ref_freq)
@@ -2841,7 +2847,7 @@ static int tegra210_enable_pllu(void)
reg = readl_relaxed(clk_base + pllu.params->ext_misc_reg[0]);
reg &= ~BIT(pllu.params->iddq_bit_idx);
writel_relaxed(reg, clk_base + pllu.params->ext_misc_reg[0]);
- udelay(5);
+ fence_udelay(5, clk_base);
reg = readl_relaxed(clk_base + PLLU_BASE);
reg &= ~GENMASK(20, 0);
@@ -2849,13 +2855,18 @@ static int tegra210_enable_pllu(void)
reg |= fentry->n << 8;
reg |= fentry->p << 16;
writel(reg, clk_base + PLLU_BASE);
- udelay(1);
+ fence_udelay(1, clk_base);
reg |= PLL_ENABLE;
writel(reg, clk_base + PLLU_BASE);
- readl_relaxed_poll_timeout_atomic(clk_base + PLLU_BASE, reg,
- reg & PLL_BASE_LOCK, 2, 1000);
- if (!(reg & PLL_BASE_LOCK)) {
+ /*
+ * During clocks resume, same PLLU init and enable sequence get
+ * executed. So, readx_poll_timeout_atomic can't be used here as it
+ * uses ktime_get() and timekeeping resume doesn't happen by that
+ * time. So, using tegra210_wait_for_mask for PLL LOCK.
+ */
+ ret = tegra210_wait_for_mask(&pllu, PLLU_BASE, PLL_BASE_LOCK);
+ if (ret) {
pr_err("Timed out waiting for PLL_U to lock\n");
return -ETIMEDOUT;
}
@@ -2895,12 +2906,12 @@ static int tegra210_init_pllu(void)
reg = readl_relaxed(clk_base + XUSB_PLL_CFG0);
reg &= ~XUSB_PLL_CFG0_PLLU_LOCK_DLY_MASK;
writel_relaxed(reg, clk_base + XUSB_PLL_CFG0);
- udelay(1);
+ fence_udelay(1, clk_base);
reg = readl_relaxed(clk_base + PLLU_HW_PWRDN_CFG0);
reg |= PLLU_HW_PWRDN_CFG0_SEQ_ENABLE;
writel_relaxed(reg, clk_base + PLLU_HW_PWRDN_CFG0);
- udelay(1);
+ fence_udelay(1, clk_base);
reg = readl_relaxed(clk_base + PLLU_BASE);
reg &= ~PLLU_BASE_CLKENABLE_USB;
@@ -2915,6 +2926,39 @@ static int tegra210_init_pllu(void)
return 0;
}
+/*
+ * The SOR hardware blocks are driven by two clocks: a module clock that is
+ * used to access registers and a pixel clock that is sourced from the same
+ * pixel clock that also drives the head attached to the SOR. The module
+ * clock is typically called sorX (with X being the SOR instance) and the
+ * pixel clock is called sorX_out. The source for the SOR pixel clock is
+ * referred to as the "parent" clock.
+ *
+ * On Tegra186 and newer, clocks are provided by the BPMP. Unfortunately the
+ * BPMP implementation for the SOR clocks doesn't exactly match the above in
+ * some aspects. For example, the SOR module is really clocked by the pad or
+ * sor_safe clocks, but BPMP models the sorX clock as being sourced by the
+ * pixel clocks. Conversely the sorX_out clock is sourced by the sor_safe or
+ * pad clocks on BPMP.
+ *
+ * In order to allow the display driver to deal with all SoC generations in
+ * a unified way, implement the BPMP semantics in this driver.
+ */
+
+static const char * const sor0_parents[] = {
+ "pll_d_out0",
+};
+
+static const char * const sor0_out_parents[] = {
+ "sor_safe", "sor0_pad_clkout",
+};
+
+static const char * const sor1_parents[] = {
+ "pll_p", "pll_d_out0", "pll_d2_out0", "clk_m",
+};
+
+static u32 sor1_parents_idx[] = { 0, 2, 5, 6 };
+
static const char * const sor1_out_parents[] = {
/*
* Bit 0 of the mux selects sor1_pad_clkout, irrespective of bit 1, so
@@ -2923,20 +2967,31 @@ static const char * const sor1_out_parents[] = {
* these bits to 0b11. While not an invalid setting, code should
* always set the bits to 0b01 to select sor1_pad_clkout.
*/
- "sor_safe", "sor1_pad_clkout", "sor1", "sor1_pad_clkout",
+ "sor_safe", "sor1_pad_clkout", "sor1_out", "sor1_pad_clkout",
};
-static const char * const sor1_parents[] = {
- "pll_p", "pll_d_out0", "pll_d2_out0", "clk_m",
-};
-
-static u32 sor1_parents_idx[] = { 0, 2, 5, 6 };
-
static struct tegra_periph_init_data tegra210_periph[] = {
+ /*
+ * On Tegra210, the sor0 clock doesn't have a mux it bitfield 31:29,
+ * but it is hardwired to the pll_d_out0 clock.
+ */
+ TEGRA_INIT_DATA_TABLE("sor0", NULL, NULL, sor0_parents,
+ CLK_SOURCE_SOR0, 29, 0x0, 0, 0, 0, 0,
+ 0, 182, 0, tegra_clk_sor0, NULL, 0,
+ &sor0_lock),
+ TEGRA_INIT_DATA_TABLE("sor0_out", NULL, NULL, sor0_out_parents,
+ CLK_SOURCE_SOR0, 14, 0x1, 0, 0, 0, 0,
+ 0, 0, TEGRA_PERIPH_NO_GATE, tegra_clk_sor0_out,
+ NULL, 0, &sor0_lock),
TEGRA_INIT_DATA_TABLE("sor1", NULL, NULL, sor1_parents,
CLK_SOURCE_SOR1, 29, 0x7, 0, 0, 8, 1,
- TEGRA_DIVIDER_ROUND_UP, 183, 0, tegra_clk_sor1,
- sor1_parents_idx, 0, &sor1_lock),
+ TEGRA_DIVIDER_ROUND_UP, 183, 0,
+ tegra_clk_sor1, sor1_parents_idx, 0,
+ &sor1_lock),
+ TEGRA_INIT_DATA_TABLE("sor1_out", NULL, NULL, sor1_out_parents,
+ CLK_SOURCE_SOR1, 14, 0x3, 0, 0, 0, 0,
+ 0, 0, TEGRA_PERIPH_NO_GATE,
+ tegra_clk_sor1_out, NULL, 0, &sor1_lock),
};
static const char * const la_parents[] = {
@@ -2969,12 +3024,6 @@ static __init void tegra210_periph_clk_init(void __iomem *clk_base,
1, 17, 207);
clks[TEGRA210_CLK_DPAUX1] = clk;
- clk = clk_register_mux_table(NULL, "sor1_out", sor1_out_parents,
- ARRAY_SIZE(sor1_out_parents), 0,
- clk_base + CLK_SOURCE_SOR1, 14, 0x3,
- 0, NULL, &sor1_lock);
- clks[TEGRA210_CLK_SOR1_OUT] = clk;
-
/* pll_d_dsi_out */
clk = clk_register_gate(NULL, "pll_d_dsi_out", "pll_d_out0", 0,
clk_base + PLLD_MISC0, 21, 0, &pll_d_lock);
@@ -3287,6 +3336,77 @@ static void tegra210_disable_cpu_clock(u32 cpu)
}
#ifdef CONFIG_PM_SLEEP
+#define car_readl(_base, _off) readl_relaxed(clk_base + (_base) + ((_off) * 4))
+#define car_writel(_val, _base, _off) \
+ writel_relaxed(_val, clk_base + (_base) + ((_off) * 4))
+
+static u32 spare_reg_ctx, misc_clk_enb_ctx, clk_msk_arm_ctx;
+static u32 cpu_softrst_ctx[3];
+
+static int tegra210_clk_suspend(void)
+{
+ unsigned int i;
+
+ clk_save_context();
+
+ /*
+ * Save the bootloader configured clock registers SPARE_REG0,
+ * MISC_CLK_ENB, CLK_MASK_ARM, CPU_SOFTRST_CTRL.
+ */
+ spare_reg_ctx = readl_relaxed(clk_base + SPARE_REG0);
+ misc_clk_enb_ctx = readl_relaxed(clk_base + MISC_CLK_ENB);
+ clk_msk_arm_ctx = readl_relaxed(clk_base + CLK_MASK_ARM);
+
+ for (i = 0; i < ARRAY_SIZE(cpu_softrst_ctx); i++)
+ cpu_softrst_ctx[i] = car_readl(CPU_SOFTRST_CTRL, i);
+
+ tegra_clk_periph_suspend();
+ return 0;
+}
+
+static void tegra210_clk_resume(void)
+{
+ unsigned int i;
+
+ tegra_clk_osc_resume(clk_base);
+
+ /*
+ * Restore the bootloader configured clock registers SPARE_REG0,
+ * MISC_CLK_ENB, CLK_MASK_ARM, CPU_SOFTRST_CTRL from saved context.
+ */
+ writel_relaxed(spare_reg_ctx, clk_base + SPARE_REG0);
+ writel_relaxed(misc_clk_enb_ctx, clk_base + MISC_CLK_ENB);
+ writel_relaxed(clk_msk_arm_ctx, clk_base + CLK_MASK_ARM);
+
+ for (i = 0; i < ARRAY_SIZE(cpu_softrst_ctx); i++)
+ car_writel(cpu_softrst_ctx[i], CPU_SOFTRST_CTRL, i);
+
+ /*
+ * Tegra clock programming sequence recommends peripheral clock to
+ * be enabled prior to changing its clock source and divider to
+ * prevent glitchless frequency switch.
+ * So, enable all peripheral clocks before restoring their source
+ * and dividers.
+ */
+ writel_relaxed(TEGRA210_CLK_ENB_VLD_MSK_L, clk_base + CLK_OUT_ENB_L);
+ writel_relaxed(TEGRA210_CLK_ENB_VLD_MSK_H, clk_base + CLK_OUT_ENB_H);
+ writel_relaxed(TEGRA210_CLK_ENB_VLD_MSK_U, clk_base + CLK_OUT_ENB_U);
+ writel_relaxed(TEGRA210_CLK_ENB_VLD_MSK_V, clk_base + CLK_OUT_ENB_V);
+ writel_relaxed(TEGRA210_CLK_ENB_VLD_MSK_W, clk_base + CLK_OUT_ENB_W);
+ writel_relaxed(TEGRA210_CLK_ENB_VLD_MSK_X, clk_base + CLK_OUT_ENB_X);
+ writel_relaxed(TEGRA210_CLK_ENB_VLD_MSK_Y, clk_base + CLK_OUT_ENB_Y);
+
+ /* wait for all writes to happen to have all the clocks enabled */
+ fence_udelay(2, clk_base);
+
+ /* restore PLLs and all peripheral clock rates */
+ tegra210_init_pllu();
+ clk_restore_context();
+
+ /* restore saved context of peripheral clocks and reset state */
+ tegra_clk_periph_resume();
+}
+
static void tegra210_cpu_clock_suspend(void)
{
/* switch coresite to clk_m, save off original source */
@@ -3302,6 +3422,13 @@ static void tegra210_cpu_clock_resume(void)
}
#endif
+static struct syscore_ops tegra_clk_syscore_ops = {
+#ifdef CONFIG_PM_SLEEP
+ .suspend = tegra210_clk_suspend,
+ .resume = tegra210_clk_resume,
+#endif
+};
+
static struct tegra_cpu_car_ops tegra210_cpu_car_ops = {
.wait_for_reset = tegra210_wait_cpu_in_reset,
.disable_clock = tegra210_disable_cpu_clock,
@@ -3586,5 +3713,7 @@ static void __init tegra210_clock_init(struct device_node *np)
tegra210_mbist_clk_init();
tegra_cpu_car_ops = &tegra210_cpu_car_ops;
+
+ register_syscore_ops(&tegra_clk_syscore_ops);
}
CLK_OF_DECLARE(tegra210, "nvidia,tegra210-car", tegra210_clock_init);
diff --git a/drivers/clk/tegra/clk-tegra30.c b/drivers/clk/tegra/clk-tegra30.c
index 7b4c6a488527..c8bc18e4d7e5 100644
--- a/drivers/clk/tegra/clk-tegra30.c
+++ b/drivers/clk/tegra/clk-tegra30.c
@@ -151,7 +151,6 @@ static unsigned long input_freq;
static DEFINE_SPINLOCK(cml_lock);
static DEFINE_SPINLOCK(pll_d_lock);
-static DEFINE_SPINLOCK(emc_lock);
#define TEGRA_INIT_DATA_MUX(_name, _parents, _offset, \
_clk_num, _gate_flags, _clk_id) \
@@ -808,7 +807,7 @@ static struct tegra_clk tegra30_clks[tegra_clk_max] __initdata = {
[tegra_clk_pll_a] = { .dt_id = TEGRA30_CLK_PLL_A, .present = true },
[tegra_clk_pll_a_out0] = { .dt_id = TEGRA30_CLK_PLL_A_OUT0, .present = true },
[tegra_clk_cec] = { .dt_id = TEGRA30_CLK_CEC, .present = true },
- [tegra_clk_emc] = { .dt_id = TEGRA30_CLK_EMC, .present = true },
+ [tegra_clk_emc] = { .dt_id = TEGRA30_CLK_EMC, .present = false },
};
static const char *pll_e_parents[] = { "pll_ref", "pll_p" };
@@ -995,7 +994,6 @@ static void __init tegra30_super_clk_init(void)
static const char *mux_pllacp_clkm[] = { "pll_a_out0", "unused", "pll_p",
"clk_m" };
static const char *mux_pllpcm_clkm[] = { "pll_p", "pll_c", "pll_m", "clk_m" };
-static const char *mux_pllmcp_clkm[] = { "pll_m", "pll_c", "pll_p", "clk_m" };
static const char *spdif_out_parents[] = { "pll_a_out0", "spdif_2x", "pll_p",
"clk_m" };
static const char *mux_pllmcpa[] = { "pll_m", "pll_c", "pll_p", "pll_a_out0" };
@@ -1044,14 +1042,12 @@ static void __init tegra30_periph_clk_init(void)
clks[TEGRA30_CLK_AFI] = clk;
/* emc */
- clk = clk_register_mux(NULL, "emc_mux", mux_pllmcp_clkm,
- ARRAY_SIZE(mux_pllmcp_clkm),
- CLK_SET_RATE_NO_REPARENT,
- clk_base + CLK_SOURCE_EMC,
- 30, 2, 0, &emc_lock);
+ clk = tegra20_clk_register_emc(clk_base + CLK_SOURCE_EMC, true);
+
+ clks[TEGRA30_CLK_EMC] = clk;
- clk = tegra_clk_register_mc("mc", "emc_mux", clk_base + CLK_SOURCE_EMC,
- &emc_lock);
+ clk = tegra_clk_register_mc("mc", "emc", clk_base + CLK_SOURCE_EMC,
+ NULL);
clks[TEGRA30_CLK_MC] = clk;
/* cml0 */
@@ -1167,6 +1163,7 @@ static void tegra30_cpu_clock_suspend(void)
static void tegra30_cpu_clock_resume(void)
{
unsigned int reg, policy;
+ u32 misc, base;
/* Is CPU complex already running on PLLX? */
reg = readl(clk_base + CLK_RESET_CCLK_BURST);
@@ -1180,15 +1177,21 @@ static void tegra30_cpu_clock_resume(void)
BUG();
if (reg != CLK_RESET_CCLK_BURST_POLICY_PLLX) {
- /* restore PLLX settings if CPU is on different PLL */
- writel(tegra30_cpu_clk_sctx.pllx_misc,
- clk_base + CLK_RESET_PLLX_MISC);
- writel(tegra30_cpu_clk_sctx.pllx_base,
- clk_base + CLK_RESET_PLLX_BASE);
-
- /* wait for PLL stabilization if PLLX was enabled */
- if (tegra30_cpu_clk_sctx.pllx_base & (1 << 30))
- udelay(300);
+ misc = readl_relaxed(clk_base + CLK_RESET_PLLX_MISC);
+ base = readl_relaxed(clk_base + CLK_RESET_PLLX_BASE);
+
+ if (misc != tegra30_cpu_clk_sctx.pllx_misc ||
+ base != tegra30_cpu_clk_sctx.pllx_base) {
+ /* restore PLLX settings if CPU is on different PLL */
+ writel(tegra30_cpu_clk_sctx.pllx_misc,
+ clk_base + CLK_RESET_PLLX_MISC);
+ writel(tegra30_cpu_clk_sctx.pllx_base,
+ clk_base + CLK_RESET_PLLX_BASE);
+
+ /* wait for PLL stabilization if PLLX was enabled */
+ if (tegra30_cpu_clk_sctx.pllx_base & (1 << 30))
+ udelay(300);
+ }
}
/*
@@ -1302,6 +1305,26 @@ static struct tegra_audio_clk_info tegra30_audio_plls[] = {
{ "pll_a", &pll_a_params, tegra_clk_pll_a, "pll_p_out1" },
};
+static struct clk *tegra30_clk_src_onecell_get(struct of_phandle_args *clkspec,
+ void *data)
+{
+ struct clk_hw *hw;
+ struct clk *clk;
+
+ clk = of_clk_src_onecell_get(clkspec, data);
+ if (IS_ERR(clk))
+ return clk;
+
+ hw = __clk_get_hw(clk);
+
+ if (clkspec->args[0] == TEGRA30_CLK_EMC) {
+ if (!tegra20_clk_emc_driver_available(hw))
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+
+ return clk;
+}
+
static void __init tegra30_clock_init(struct device_node *np)
{
struct device_node *node;
@@ -1345,7 +1368,7 @@ static void __init tegra30_clock_init(struct device_node *np)
tegra_init_dup_clks(tegra_clk_duplicates, clks, TEGRA30_CLK_CLK_MAX);
- tegra_add_of_provider(np, of_clk_src_onecell_get);
+ tegra_add_of_provider(np, tegra30_clk_src_onecell_get);
tegra_register_devclks(devclks, ARRAY_SIZE(devclks));
tegra_clk_apply_init_table = tegra30_clock_apply_init_table;
diff --git a/drivers/clk/tegra/clk.c b/drivers/clk/tegra/clk.c
index 573e3c967ae1..e6bd6d1ea012 100644
--- a/drivers/clk/tegra/clk.c
+++ b/drivers/clk/tegra/clk.c
@@ -16,56 +16,13 @@
#include "clk.h"
-#define CLK_OUT_ENB_L 0x010
-#define CLK_OUT_ENB_H 0x014
-#define CLK_OUT_ENB_U 0x018
-#define CLK_OUT_ENB_V 0x360
-#define CLK_OUT_ENB_W 0x364
-#define CLK_OUT_ENB_X 0x280
-#define CLK_OUT_ENB_Y 0x298
-#define CLK_OUT_ENB_SET_L 0x320
-#define CLK_OUT_ENB_CLR_L 0x324
-#define CLK_OUT_ENB_SET_H 0x328
-#define CLK_OUT_ENB_CLR_H 0x32c
-#define CLK_OUT_ENB_SET_U 0x330
-#define CLK_OUT_ENB_CLR_U 0x334
-#define CLK_OUT_ENB_SET_V 0x440
-#define CLK_OUT_ENB_CLR_V 0x444
-#define CLK_OUT_ENB_SET_W 0x448
-#define CLK_OUT_ENB_CLR_W 0x44c
-#define CLK_OUT_ENB_SET_X 0x284
-#define CLK_OUT_ENB_CLR_X 0x288
-#define CLK_OUT_ENB_SET_Y 0x29c
-#define CLK_OUT_ENB_CLR_Y 0x2a0
-
-#define RST_DEVICES_L 0x004
-#define RST_DEVICES_H 0x008
-#define RST_DEVICES_U 0x00C
-#define RST_DEVICES_V 0x358
-#define RST_DEVICES_W 0x35C
-#define RST_DEVICES_X 0x28C
-#define RST_DEVICES_Y 0x2a4
-#define RST_DEVICES_SET_L 0x300
-#define RST_DEVICES_CLR_L 0x304
-#define RST_DEVICES_SET_H 0x308
-#define RST_DEVICES_CLR_H 0x30c
-#define RST_DEVICES_SET_U 0x310
-#define RST_DEVICES_CLR_U 0x314
-#define RST_DEVICES_SET_V 0x430
-#define RST_DEVICES_CLR_V 0x434
-#define RST_DEVICES_SET_W 0x438
-#define RST_DEVICES_CLR_W 0x43c
-#define RST_DEVICES_SET_X 0x290
-#define RST_DEVICES_CLR_X 0x294
-#define RST_DEVICES_SET_Y 0x2a8
-#define RST_DEVICES_CLR_Y 0x2ac
-
/* Global data of Tegra CPU CAR ops */
static struct tegra_cpu_car_ops dummy_car_ops;
struct tegra_cpu_car_ops *tegra_cpu_car_ops = &dummy_car_ops;
int *periph_clk_enb_refcnt;
static int periph_banks;
+static u32 *periph_state_ctx;
static struct clk **clks;
static int clk_num;
static struct clk_onecell_data clk_data;
@@ -199,6 +156,65 @@ const struct tegra_clk_periph_regs *get_reg_bank(int clkid)
}
}
+void tegra_clk_set_pllp_out_cpu(bool enable)
+{
+ u32 val;
+
+ val = readl_relaxed(clk_base + CLK_OUT_ENB_Y);
+ if (enable)
+ val |= CLK_ENB_PLLP_OUT_CPU;
+ else
+ val &= ~CLK_ENB_PLLP_OUT_CPU;
+
+ writel_relaxed(val, clk_base + CLK_OUT_ENB_Y);
+}
+
+void tegra_clk_periph_suspend(void)
+{
+ unsigned int i, idx;
+
+ idx = 0;
+ for (i = 0; i < periph_banks; i++, idx++)
+ periph_state_ctx[idx] =
+ readl_relaxed(clk_base + periph_regs[i].enb_reg);
+
+ for (i = 0; i < periph_banks; i++, idx++)
+ periph_state_ctx[idx] =
+ readl_relaxed(clk_base + periph_regs[i].rst_reg);
+}
+
+void tegra_clk_periph_resume(void)
+{
+ unsigned int i, idx;
+
+ idx = 0;
+ for (i = 0; i < periph_banks; i++, idx++)
+ writel_relaxed(periph_state_ctx[idx],
+ clk_base + periph_regs[i].enb_reg);
+ /*
+ * All non-boot peripherals will be in reset state on resume.
+ * Wait for 5us of reset propagation delay before de-asserting
+ * the peripherals based on the saved context.
+ */
+ fence_udelay(5, clk_base);
+
+ for (i = 0; i < periph_banks; i++, idx++)
+ writel_relaxed(periph_state_ctx[idx],
+ clk_base + periph_regs[i].rst_reg);
+
+ fence_udelay(2, clk_base);
+}
+
+static int tegra_clk_periph_ctx_init(int banks)
+{
+ periph_state_ctx = kcalloc(2 * banks, sizeof(*periph_state_ctx),
+ GFP_KERNEL);
+ if (!periph_state_ctx)
+ return -ENOMEM;
+
+ return 0;
+}
+
struct clk ** __init tegra_clk_init(void __iomem *regs, int num, int banks)
{
clk_base = regs;
@@ -220,6 +236,14 @@ struct clk ** __init tegra_clk_init(void __iomem *regs, int num, int banks)
clk_num = num;
+ if (IS_ENABLED(CONFIG_PM_SLEEP)) {
+ if (tegra_clk_periph_ctx_init(banks)) {
+ kfree(periph_clk_enb_refcnt);
+ kfree(clks);
+ return NULL;
+ }
+ }
+
return clks;
}
diff --git a/drivers/clk/tegra/clk.h b/drivers/clk/tegra/clk.h
index 905bf1096558..416a6b09f6a3 100644
--- a/drivers/clk/tegra/clk.h
+++ b/drivers/clk/tegra/clk.h
@@ -10,6 +10,65 @@
#include <linux/clkdev.h>
#include <linux/delay.h>
+#define CLK_OUT_ENB_L 0x010
+#define CLK_OUT_ENB_H 0x014
+#define CLK_OUT_ENB_U 0x018
+#define CLK_OUT_ENB_V 0x360
+#define CLK_OUT_ENB_W 0x364
+#define CLK_OUT_ENB_X 0x280
+#define CLK_OUT_ENB_Y 0x298
+#define CLK_ENB_PLLP_OUT_CPU BIT(31)
+#define CLK_OUT_ENB_SET_L 0x320
+#define CLK_OUT_ENB_CLR_L 0x324
+#define CLK_OUT_ENB_SET_H 0x328
+#define CLK_OUT_ENB_CLR_H 0x32c
+#define CLK_OUT_ENB_SET_U 0x330
+#define CLK_OUT_ENB_CLR_U 0x334
+#define CLK_OUT_ENB_SET_V 0x440
+#define CLK_OUT_ENB_CLR_V 0x444
+#define CLK_OUT_ENB_SET_W 0x448
+#define CLK_OUT_ENB_CLR_W 0x44c
+#define CLK_OUT_ENB_SET_X 0x284
+#define CLK_OUT_ENB_CLR_X 0x288
+#define CLK_OUT_ENB_SET_Y 0x29c
+#define CLK_OUT_ENB_CLR_Y 0x2a0
+
+#define RST_DEVICES_L 0x004
+#define RST_DEVICES_H 0x008
+#define RST_DEVICES_U 0x00C
+#define RST_DEVICES_V 0x358
+#define RST_DEVICES_W 0x35C
+#define RST_DEVICES_X 0x28C
+#define RST_DEVICES_Y 0x2a4
+#define RST_DEVICES_SET_L 0x300
+#define RST_DEVICES_CLR_L 0x304
+#define RST_DEVICES_SET_H 0x308
+#define RST_DEVICES_CLR_H 0x30c
+#define RST_DEVICES_SET_U 0x310
+#define RST_DEVICES_CLR_U 0x314
+#define RST_DEVICES_SET_V 0x430
+#define RST_DEVICES_CLR_V 0x434
+#define RST_DEVICES_SET_W 0x438
+#define RST_DEVICES_CLR_W 0x43c
+#define RST_DEVICES_SET_X 0x290
+#define RST_DEVICES_CLR_X 0x294
+#define RST_DEVICES_SET_Y 0x2a8
+#define RST_DEVICES_CLR_Y 0x2ac
+
+/*
+ * Tegra CLK_OUT_ENB registers have some undefined bits which are not used and
+ * any accidental write of 1 to these bits can cause PSLVERR.
+ * So below are the valid mask defines for each CLK_OUT_ENB register used to
+ * turn ON only the valid clocks.
+ */
+#define TEGRA210_CLK_ENB_VLD_MSK_L 0xdcd7dff9
+#define TEGRA210_CLK_ENB_VLD_MSK_H 0x87d1f3e7
+#define TEGRA210_CLK_ENB_VLD_MSK_U 0xf3fed3fa
+#define TEGRA210_CLK_ENB_VLD_MSK_V 0xffc18cfb
+#define TEGRA210_CLK_ENB_VLD_MSK_W 0x793fb7ff
+#define TEGRA210_CLK_ENB_VLD_MSK_X 0x3fe66fff
+#define TEGRA210_CLK_ENB_VLD_MSK_Y 0xfc1fc7ff
+
/**
* struct tegra_clk_sync_source - external clock source from codec
*
@@ -669,6 +728,9 @@ struct clk *tegra_clk_register_periph_data(void __iomem *clk_base,
* Flags:
* TEGRA_DIVIDER_2 - LP cluster has additional divider. This flag indicates
* that this is LP cluster clock.
+ * TEGRA210_CPU_CLK - This flag is used to identify CPU cluster for gen5
+ * super mux parent using PLLP branches. To use PLLP branches to CPU, need
+ * to configure additional bit PLLP_OUT_CPU in the clock registers.
*/
struct tegra_clk_super_mux {
struct clk_hw hw;
@@ -685,6 +747,7 @@ struct tegra_clk_super_mux {
#define to_clk_super_mux(_hw) container_of(_hw, struct tegra_clk_super_mux, hw)
#define TEGRA_DIVIDER_2 BIT(0)
+#define TEGRA210_CPU_CLK BIT(1)
extern const struct clk_ops tegra_clk_super_ops;
struct clk *tegra_clk_register_super_mux(const char *name,
@@ -829,6 +892,10 @@ u16 tegra_pll_get_fixed_mdiv(struct clk_hw *hw, unsigned long input_rate);
int tegra_pll_p_div_to_hw(struct tegra_clk_pll *pll, u8 p_div);
int div_frac_get(unsigned long rate, unsigned parent_rate, u8 width,
u8 frac_width, u8 flags);
+void tegra_clk_osc_resume(void __iomem *clk_base);
+void tegra_clk_set_pllp_out_cpu(bool enable);
+void tegra_clk_periph_suspend(void);
+void tegra_clk_periph_resume(void);
/* Combined read fence with delay */
@@ -838,4 +905,7 @@ int div_frac_get(unsigned long rate, unsigned parent_rate, u8 width,
udelay(delay); \
} while (0)
+bool tegra20_clk_emc_driver_available(struct clk_hw *emc_hw);
+struct clk *tegra20_clk_register_emc(void __iomem *ioaddr, bool low_jitter);
+
#endif /* TEGRA_CLK_H */
diff --git a/drivers/clk/ti/adpll.c b/drivers/clk/ti/adpll.c
index fdfb90058504..bb2f2836dab2 100644
--- a/drivers/clk/ti/adpll.c
+++ b/drivers/clk/ti/adpll.c
@@ -194,15 +194,8 @@ static const char *ti_adpll_clk_get_name(struct ti_adpll_data *d,
if (err)
return NULL;
} else {
- const char *base_name = "adpll";
- char *buf;
-
- buf = devm_kzalloc(d->dev, 8 + 1 + strlen(base_name) + 1 +
- strlen(postfix), GFP_KERNEL);
- if (!buf)
- return NULL;
- sprintf(buf, "%08lx.%s.%s", d->pa, base_name, postfix);
- name = buf;
+ name = devm_kasprintf(d->dev, GFP_KERNEL, "%08lx.adpll.%s",
+ d->pa, postfix);
}
return name;
diff --git a/drivers/clk/ti/clk-33xx.c b/drivers/clk/ti/clk-33xx.c
index a360d3109555..e001b9bcb6bf 100644
--- a/drivers/clk/ti/clk-33xx.c
+++ b/drivers/clk/ti/clk-33xx.c
@@ -107,7 +107,7 @@ static const struct omap_clkctrl_reg_data am3_l4hs_clkctrl_regs[] __initconst =
};
static const struct omap_clkctrl_reg_data am3_pruss_ocp_clkctrl_regs[] __initconst = {
- { AM3_PRUSS_OCP_PRUSS_CLKCTRL, NULL, CLKF_SW_SUP, "pruss_ocp_gclk" },
+ { AM3_PRUSS_OCP_PRUSS_CLKCTRL, NULL, CLKF_SW_SUP | CLKF_NO_IDLEST, "pruss_ocp_gclk" },
{ 0 },
};
@@ -217,7 +217,7 @@ static const struct omap_clkctrl_reg_data am3_l4_rtc_clkctrl_regs[] __initconst
};
static const struct omap_clkctrl_reg_data am3_gfx_l3_clkctrl_regs[] __initconst = {
- { AM3_GFX_L3_GFX_CLKCTRL, NULL, CLKF_SW_SUP, "gfx_fck_div_ck" },
+ { AM3_GFX_L3_GFX_CLKCTRL, NULL, CLKF_SW_SUP | CLKF_NO_IDLEST, "gfx_fck_div_ck" },
{ 0 },
};
diff --git a/drivers/clk/ti/clk-43xx.c b/drivers/clk/ti/clk-43xx.c
index 2782d91838ac..af3e7805769e 100644
--- a/drivers/clk/ti/clk-43xx.c
+++ b/drivers/clk/ti/clk-43xx.c
@@ -73,7 +73,7 @@ static const struct omap_clkctrl_reg_data am4_mpu_clkctrl_regs[] __initconst = {
};
static const struct omap_clkctrl_reg_data am4_gfx_l3_clkctrl_regs[] __initconst = {
- { AM4_GFX_L3_GFX_CLKCTRL, NULL, CLKF_SW_SUP, "gfx_fck_div_ck" },
+ { AM4_GFX_L3_GFX_CLKCTRL, NULL, CLKF_SW_SUP | CLKF_NO_IDLEST, "gfx_fck_div_ck" },
{ 0 },
};
@@ -126,7 +126,7 @@ static const struct omap_clkctrl_reg_data am4_l3s_clkctrl_regs[] __initconst = {
};
static const struct omap_clkctrl_reg_data am4_pruss_ocp_clkctrl_regs[] __initconst = {
- { AM4_PRUSS_OCP_PRUSS_CLKCTRL, NULL, CLKF_SW_SUP, "pruss_ocp_gclk" },
+ { AM4_PRUSS_OCP_PRUSS_CLKCTRL, NULL, CLKF_SW_SUP | CLKF_NO_IDLEST, "pruss_ocp_gclk" },
{ 0 },
};
diff --git a/drivers/clk/ti/clk-44xx.c b/drivers/clk/ti/clk-44xx.c
index b10ed0429091..2b4dab632318 100644
--- a/drivers/clk/ti/clk-44xx.c
+++ b/drivers/clk/ti/clk-44xx.c
@@ -37,7 +37,7 @@ static const struct omap_clkctrl_reg_data omap4_mpuss_clkctrl_regs[] __initconst
};
static const struct omap_clkctrl_reg_data omap4_tesla_clkctrl_regs[] __initconst = {
- { OMAP4_DSP_CLKCTRL, NULL, CLKF_HW_SUP, "dpll_iva_m4x2_ck" },
+ { OMAP4_DSP_CLKCTRL, NULL, CLKF_HW_SUP | CLKF_NO_IDLEST, "dpll_iva_m4x2_ck" },
{ 0 },
};
@@ -219,7 +219,7 @@ static const struct omap_clkctrl_reg_data omap4_l3_2_clkctrl_regs[] __initconst
};
static const struct omap_clkctrl_reg_data omap4_ducati_clkctrl_regs[] __initconst = {
- { OMAP4_IPU_CLKCTRL, NULL, CLKF_HW_SUP, "ducati_clk_mux_ck" },
+ { OMAP4_IPU_CLKCTRL, NULL, CLKF_HW_SUP | CLKF_NO_IDLEST, "ducati_clk_mux_ck" },
{ 0 },
};
diff --git a/drivers/clk/ti/clk-54xx.c b/drivers/clk/ti/clk-54xx.c
index e675e27f1203..c9608e5d993a 100644
--- a/drivers/clk/ti/clk-54xx.c
+++ b/drivers/clk/ti/clk-54xx.c
@@ -31,7 +31,7 @@ static const struct omap_clkctrl_reg_data omap5_mpu_clkctrl_regs[] __initconst =
};
static const struct omap_clkctrl_reg_data omap5_dsp_clkctrl_regs[] __initconst = {
- { OMAP5_MMU_DSP_CLKCTRL, NULL, CLKF_HW_SUP, "dpll_iva_h11x2_ck" },
+ { OMAP5_MMU_DSP_CLKCTRL, NULL, CLKF_HW_SUP | CLKF_NO_IDLEST, "dpll_iva_h11x2_ck" },
{ 0 },
};
@@ -145,7 +145,7 @@ static const struct omap_clkctrl_reg_data omap5_l3main2_clkctrl_regs[] __initcon
};
static const struct omap_clkctrl_reg_data omap5_ipu_clkctrl_regs[] __initconst = {
- { OMAP5_MMU_IPU_CLKCTRL, NULL, CLKF_HW_SUP, "dpll_core_h22x2_ck" },
+ { OMAP5_MMU_IPU_CLKCTRL, NULL, CLKF_HW_SUP | CLKF_NO_IDLEST, "dpll_core_h22x2_ck" },
{ 0 },
};
@@ -286,6 +286,12 @@ static const struct omap_clkctrl_reg_data omap5_l4per_clkctrl_regs[] __initconst
{ 0 },
};
+static const struct omap_clkctrl_reg_data omap5_iva_clkctrl_regs[] __initconst = {
+ { OMAP5_IVA_CLKCTRL, NULL, CLKF_HW_SUP, "dpll_iva_h12x2_ck" },
+ { OMAP5_SL2IF_CLKCTRL, NULL, CLKF_HW_SUP, "dpll_iva_h12x2_ck" },
+ { 0 },
+};
+
static const char * const omap5_dss_dss_clk_parents[] __initconst = {
"dpll_per_h12x2_ck",
NULL,
@@ -502,6 +508,7 @@ const struct omap_clkctrl_data omap5_clkctrl_data[] __initconst = {
{ 0x4a008d20, omap5_l4cfg_clkctrl_regs },
{ 0x4a008e20, omap5_l3instr_clkctrl_regs },
{ 0x4a009020, omap5_l4per_clkctrl_regs },
+ { 0x4a009220, omap5_iva_clkctrl_regs },
{ 0x4a009420, omap5_dss_clkctrl_regs },
{ 0x4a009520, omap5_gpu_clkctrl_regs },
{ 0x4a009620, omap5_l3init_clkctrl_regs },
diff --git a/drivers/clk/ti/clk-7xx.c b/drivers/clk/ti/clk-7xx.c
index 9dd6185a4b4e..5f46782cebeb 100644
--- a/drivers/clk/ti/clk-7xx.c
+++ b/drivers/clk/ti/clk-7xx.c
@@ -25,7 +25,7 @@ static const struct omap_clkctrl_reg_data dra7_mpu_clkctrl_regs[] __initconst =
};
static const struct omap_clkctrl_reg_data dra7_dsp1_clkctrl_regs[] __initconst = {
- { DRA7_DSP1_MMU0_DSP1_CLKCTRL, NULL, CLKF_HW_SUP, "dpll_dsp_m2_ck" },
+ { DRA7_DSP1_MMU0_DSP1_CLKCTRL, NULL, CLKF_HW_SUP | CLKF_NO_IDLEST, "dpll_dsp_m2_ck" },
{ 0 },
};
@@ -41,7 +41,7 @@ static const struct omap_clkctrl_bit_data dra7_mmu_ipu1_bit_data[] __initconst =
};
static const struct omap_clkctrl_reg_data dra7_ipu1_clkctrl_regs[] __initconst = {
- { DRA7_IPU1_MMU_IPU1_CLKCTRL, dra7_mmu_ipu1_bit_data, CLKF_HW_SUP, "ipu1-clkctrl:0000:24" },
+ { DRA7_IPU1_MMU_IPU1_CLKCTRL, dra7_mmu_ipu1_bit_data, CLKF_HW_SUP | CLKF_NO_IDLEST, "ipu1-clkctrl:0000:24" },
{ 0 },
};
@@ -137,7 +137,7 @@ static const struct omap_clkctrl_reg_data dra7_ipu_clkctrl_regs[] __initconst =
};
static const struct omap_clkctrl_reg_data dra7_dsp2_clkctrl_regs[] __initconst = {
- { DRA7_DSP2_MMU0_DSP2_CLKCTRL, NULL, CLKF_HW_SUP, "dpll_dsp_m2_ck" },
+ { DRA7_DSP2_MMU0_DSP2_CLKCTRL, NULL, CLKF_HW_SUP | CLKF_NO_IDLEST, "dpll_dsp_m2_ck" },
{ 0 },
};
@@ -164,7 +164,7 @@ static const struct omap_clkctrl_reg_data dra7_l3main1_clkctrl_regs[] __initcons
};
static const struct omap_clkctrl_reg_data dra7_ipu2_clkctrl_regs[] __initconst = {
- { DRA7_IPU2_MMU_IPU2_CLKCTRL, NULL, CLKF_HW_SUP, "dpll_core_h22x2_ck" },
+ { DRA7_IPU2_MMU_IPU2_CLKCTRL, NULL, CLKF_HW_SUP | CLKF_NO_IDLEST, "dpll_core_h22x2_ck" },
{ 0 },
};
diff --git a/drivers/clk/ti/clkctrl.c b/drivers/clk/ti/clkctrl.c
index b0c0690a5a12..17b9a761242f 100644
--- a/drivers/clk/ti/clkctrl.c
+++ b/drivers/clk/ti/clkctrl.c
@@ -24,7 +24,7 @@
#include <linux/timekeeping.h>
#include "clock.h"
-#define NO_IDLEST 0x1
+#define NO_IDLEST 0
#define OMAP4_MODULEMODE_MASK 0x3
@@ -34,6 +34,9 @@
#define OMAP4_IDLEST_MASK (0x3 << 16)
#define OMAP4_IDLEST_SHIFT 16
+#define OMAP4_STBYST_MASK BIT(18)
+#define OMAP4_STBYST_SHIFT 18
+
#define CLKCTRL_IDLEST_FUNCTIONAL 0x0
#define CLKCTRL_IDLEST_INTERFACE_IDLE 0x2
#define CLKCTRL_IDLEST_DISABLED 0x3
@@ -159,7 +162,7 @@ static int _omap4_clkctrl_clk_enable(struct clk_hw *hw)
ti_clk_ll_ops->clk_writel(val, &clk->enable_reg);
- if (clk->flags & NO_IDLEST)
+ if (test_bit(NO_IDLEST, &clk->flags))
return 0;
/* Wait until module is enabled */
@@ -188,7 +191,7 @@ static void _omap4_clkctrl_clk_disable(struct clk_hw *hw)
ti_clk_ll_ops->clk_writel(val, &clk->enable_reg);
- if (clk->flags & NO_IDLEST)
+ if (test_bit(NO_IDLEST, &clk->flags))
goto exit;
/* Wait until module is disabled */
@@ -381,7 +384,7 @@ _ti_clkctrl_setup_div(struct omap_clkctrl_provider *provider,
if (ti_clk_parse_divider_data((int *)div_data->dividers, 0,
div_data->max_div, div_flags,
- &div->width, &div->table)) {
+ div)) {
pr_err("%s: Data parsing for %pOF:%04x:%d failed\n", __func__,
node, offset, data->bit);
kfree(div);
@@ -597,7 +600,7 @@ static void __init _ti_omap4_clkctrl_setup(struct device_node *node)
if (reg_data->flags & CLKF_HW_SUP)
hw->enable_bit = MODULEMODE_HWCTRL;
if (reg_data->flags & CLKF_NO_IDLEST)
- hw->flags |= NO_IDLEST;
+ set_bit(NO_IDLEST, &hw->flags);
if (reg_data->clkdm_name)
hw->clkdm_name = reg_data->clkdm_name;
@@ -623,7 +626,7 @@ static void __init _ti_omap4_clkctrl_setup(struct device_node *node)
init.ops = &omap4_clkctrl_clk_ops;
hw->hw.init = &init;
- clk = ti_clk_register(NULL, &hw->hw, init.name);
+ clk = ti_clk_register_omap_hw(NULL, &hw->hw, init.name);
if (IS_ERR_OR_NULL(clk))
goto cleanup;
@@ -648,3 +651,33 @@ cleanup:
}
CLK_OF_DECLARE(ti_omap4_clkctrl_clock, "ti,clkctrl",
_ti_omap4_clkctrl_setup);
+
+/**
+ * ti_clk_is_in_standby - Check if clkctrl clock is in standby or not
+ * @clk: clock to check standby status for
+ *
+ * Finds whether the provided clock is in standby mode or not. Returns
+ * true if the provided clock is a clkctrl type clock and it is in standby,
+ * false otherwise.
+ */
+bool ti_clk_is_in_standby(struct clk *clk)
+{
+ struct clk_hw *hw;
+ struct clk_hw_omap *hwclk;
+ u32 val;
+
+ hw = __clk_get_hw(clk);
+
+ if (!omap2_clk_is_hw_omap(hw))
+ return false;
+
+ hwclk = to_clk_hw_omap(hw);
+
+ val = ti_clk_ll_ops->clk_readl(&hwclk->enable_reg);
+
+ if (val & OMAP4_STBYST_MASK)
+ return true;
+
+ return false;
+}
+EXPORT_SYMBOL_GPL(ti_clk_is_in_standby);
diff --git a/drivers/clk/ti/clock.h b/drivers/clk/ti/clock.h
index e4b8392ff63c..e6995c04001e 100644
--- a/drivers/clk/ti/clock.h
+++ b/drivers/clk/ti/clock.h
@@ -20,9 +20,11 @@ struct clk_omap_divider {
struct clk_hw hw;
struct clk_omap_reg reg;
u8 shift;
- u8 width;
u8 flags;
s8 latch;
+ u16 min;
+ u16 max;
+ u16 mask;
const struct clk_div_table *table;
u32 context;
};
@@ -220,8 +222,7 @@ void ti_clk_latch(struct clk_omap_reg *reg, s8 shift);
struct clk_hw *ti_clk_build_component_mux(struct ti_clk_mux *setup);
int ti_clk_parse_divider_data(int *div_table, int num_dividers, int max_div,
- u8 flags, u8 *width,
- const struct clk_div_table **table);
+ u8 flags, struct clk_omap_divider *div);
int ti_clk_get_reg_addr(struct device_node *node, int index,
struct clk_omap_reg *reg);
diff --git a/drivers/clk/ti/divider.c b/drivers/clk/ti/divider.c
index 6cb863c13648..28080df92f72 100644
--- a/drivers/clk/ti/divider.c
+++ b/drivers/clk/ti/divider.c
@@ -26,30 +26,6 @@
#undef pr_fmt
#define pr_fmt(fmt) "%s: " fmt, __func__
-#define div_mask(d) ((1 << ((d)->width)) - 1)
-
-static unsigned int _get_table_maxdiv(const struct clk_div_table *table)
-{
- unsigned int maxdiv = 0;
- const struct clk_div_table *clkt;
-
- for (clkt = table; clkt->div; clkt++)
- if (clkt->div > maxdiv)
- maxdiv = clkt->div;
- return maxdiv;
-}
-
-static unsigned int _get_maxdiv(struct clk_omap_divider *divider)
-{
- if (divider->flags & CLK_DIVIDER_ONE_BASED)
- return div_mask(divider);
- if (divider->flags & CLK_DIVIDER_POWER_OF_TWO)
- return 1 << div_mask(divider);
- if (divider->table)
- return _get_table_maxdiv(divider->table);
- return div_mask(divider) + 1;
-}
-
static unsigned int _get_table_div(const struct clk_div_table *table,
unsigned int val)
{
@@ -61,6 +37,34 @@ static unsigned int _get_table_div(const struct clk_div_table *table,
return 0;
}
+static void _setup_mask(struct clk_omap_divider *divider)
+{
+ u16 mask;
+ u32 max_val;
+ const struct clk_div_table *clkt;
+
+ if (divider->table) {
+ max_val = 0;
+
+ for (clkt = divider->table; clkt->div; clkt++)
+ if (clkt->val > max_val)
+ max_val = clkt->val;
+ } else {
+ max_val = divider->max;
+
+ if (!(divider->flags & CLK_DIVIDER_ONE_BASED) &&
+ !(divider->flags & CLK_DIVIDER_POWER_OF_TWO))
+ max_val--;
+ }
+
+ if (divider->flags & CLK_DIVIDER_POWER_OF_TWO)
+ mask = fls(max_val) - 1;
+ else
+ mask = max_val;
+
+ divider->mask = (1 << fls(mask)) - 1;
+}
+
static unsigned int _get_div(struct clk_omap_divider *divider, unsigned int val)
{
if (divider->flags & CLK_DIVIDER_ONE_BASED)
@@ -101,7 +105,7 @@ static unsigned long ti_clk_divider_recalc_rate(struct clk_hw *hw,
unsigned int div, val;
val = ti_clk_ll_ops->clk_readl(&divider->reg) >> divider->shift;
- val &= div_mask(divider);
+ val &= divider->mask;
div = _get_div(divider, val);
if (!div) {
@@ -180,7 +184,7 @@ static int ti_clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate,
if (!rate)
rate = 1;
- maxdiv = _get_maxdiv(divider);
+ maxdiv = divider->max;
if (!(clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT)) {
parent_rate = *best_parent_rate;
@@ -219,7 +223,7 @@ static int ti_clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate,
}
if (!bestdiv) {
- bestdiv = _get_maxdiv(divider);
+ bestdiv = divider->max;
*best_parent_rate =
clk_hw_round_rate(clk_hw_get_parent(hw), 1);
}
@@ -249,17 +253,16 @@ static int ti_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
divider = to_clk_omap_divider(hw);
div = DIV_ROUND_UP(parent_rate, rate);
- value = _get_val(divider, div);
- if (value > div_mask(divider))
- value = div_mask(divider);
+ if (div > divider->max)
+ div = divider->max;
+ if (div < divider->min)
+ div = divider->min;
- if (divider->flags & CLK_DIVIDER_HIWORD_MASK) {
- val = div_mask(divider) << (divider->shift + 16);
- } else {
- val = ti_clk_ll_ops->clk_readl(&divider->reg);
- val &= ~(div_mask(divider) << divider->shift);
- }
+ value = _get_val(divider, div);
+
+ val = ti_clk_ll_ops->clk_readl(&divider->reg);
+ val &= ~(divider->mask << divider->shift);
val |= value << divider->shift;
ti_clk_ll_ops->clk_writel(val, &divider->reg);
@@ -280,7 +283,7 @@ static int clk_divider_save_context(struct clk_hw *hw)
u32 val;
val = ti_clk_ll_ops->clk_readl(&divider->reg) >> divider->shift;
- divider->context = val & div_mask(divider);
+ divider->context = val & divider->mask;
return 0;
}
@@ -297,7 +300,7 @@ static void clk_divider_restore_context(struct clk_hw *hw)
u32 val;
val = ti_clk_ll_ops->clk_readl(&divider->reg);
- val &= ~(div_mask(divider) << divider->shift);
+ val &= ~(divider->mask << divider->shift);
val |= divider->context << divider->shift;
ti_clk_ll_ops->clk_writel(val, &divider->reg);
}
@@ -310,47 +313,26 @@ const struct clk_ops ti_clk_divider_ops = {
.restore_context = clk_divider_restore_context,
};
-static struct clk *_register_divider(struct device *dev, const char *name,
- const char *parent_name,
- unsigned long flags,
- struct clk_omap_reg *reg,
- u8 shift, u8 width, s8 latch,
- u8 clk_divider_flags,
- const struct clk_div_table *table)
+static struct clk *_register_divider(struct device_node *node,
+ u32 flags,
+ struct clk_omap_divider *div)
{
- struct clk_omap_divider *div;
struct clk *clk;
struct clk_init_data init;
+ const char *parent_name;
- if (clk_divider_flags & CLK_DIVIDER_HIWORD_MASK) {
- if (width + shift > 16) {
- pr_warn("divider value exceeds LOWORD field\n");
- return ERR_PTR(-EINVAL);
- }
- }
-
- /* allocate the divider */
- div = kzalloc(sizeof(*div), GFP_KERNEL);
- if (!div)
- return ERR_PTR(-ENOMEM);
+ parent_name = of_clk_get_parent_name(node, 0);
- init.name = name;
+ init.name = node->name;
init.ops = &ti_clk_divider_ops;
init.flags = flags;
init.parent_names = (parent_name ? &parent_name : NULL);
init.num_parents = (parent_name ? 1 : 0);
- /* struct clk_divider assignments */
- memcpy(&div->reg, reg, sizeof(*reg));
- div->shift = shift;
- div->width = width;
- div->latch = latch;
- div->flags = clk_divider_flags;
div->hw.init = &init;
- div->table = table;
/* register the clock */
- clk = ti_clk_register(dev, &div->hw, name);
+ clk = ti_clk_register(NULL, &div->hw, node->name);
if (IS_ERR(clk))
kfree(div);
@@ -359,34 +341,17 @@ static struct clk *_register_divider(struct device *dev, const char *name,
}
int ti_clk_parse_divider_data(int *div_table, int num_dividers, int max_div,
- u8 flags, u8 *width,
- const struct clk_div_table **table)
+ u8 flags, struct clk_omap_divider *divider)
{
int valid_div = 0;
- u32 val;
- int div;
int i;
struct clk_div_table *tmp;
+ u16 min_div = 0;
if (!div_table) {
- if (flags & CLKF_INDEX_STARTS_AT_ONE)
- val = 1;
- else
- val = 0;
-
- div = 1;
-
- while (div < max_div) {
- if (flags & CLKF_INDEX_POWER_OF_TWO)
- div <<= 1;
- else
- div++;
- val++;
- }
-
- *width = fls(val);
- *table = NULL;
-
+ divider->min = 1;
+ divider->max = max_div;
+ _setup_mask(divider);
return 0;
}
@@ -403,30 +368,32 @@ int ti_clk_parse_divider_data(int *div_table, int num_dividers, int max_div,
num_dividers = i;
tmp = kcalloc(valid_div + 1, sizeof(*tmp), GFP_KERNEL);
- if (!tmp) {
- *table = ERR_PTR(-ENOMEM);
+ if (!tmp)
return -ENOMEM;
- }
valid_div = 0;
- *width = 0;
for (i = 0; i < num_dividers; i++)
if (div_table[i] > 0) {
tmp[valid_div].div = div_table[i];
tmp[valid_div].val = i;
valid_div++;
- *width = i;
+ if (div_table[i] > max_div)
+ max_div = div_table[i];
+ if (!min_div || div_table[i] < min_div)
+ min_div = div_table[i];
}
- *width = fls(*width);
- *table = tmp;
+ divider->min = min_div;
+ divider->max = max_div;
+ divider->table = tmp;
+ _setup_mask(divider);
return 0;
}
-static struct clk_div_table *
-__init ti_clk_get_div_table(struct device_node *node)
+static int __init ti_clk_get_div_table(struct device_node *node,
+ struct clk_omap_divider *div)
{
struct clk_div_table *table;
const __be32 *divspec;
@@ -438,7 +405,7 @@ __init ti_clk_get_div_table(struct device_node *node)
divspec = of_get_property(node, "ti,dividers", &num_div);
if (!divspec)
- return NULL;
+ return 0;
num_div /= 4;
@@ -453,13 +420,12 @@ __init ti_clk_get_div_table(struct device_node *node)
if (!valid_div) {
pr_err("no valid dividers for %pOFn table\n", node);
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
}
table = kcalloc(valid_div + 1, sizeof(*table), GFP_KERNEL);
-
if (!table)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
valid_div = 0;
@@ -472,19 +438,20 @@ __init ti_clk_get_div_table(struct device_node *node)
}
}
- return table;
+ div->table = table;
+
+ return 0;
}
-static int _get_divider_width(struct device_node *node,
- const struct clk_div_table *table,
- u8 flags)
+static int _populate_divider_min_max(struct device_node *node,
+ struct clk_omap_divider *divider)
{
- u32 min_div;
- u32 max_div;
- u32 val = 0;
- u32 div;
+ u32 min_div = 0;
+ u32 max_div = 0;
+ u32 val;
+ const struct clk_div_table *clkt;
- if (!table) {
+ if (!divider->table) {
/* Clk divider table not provided, determine min/max divs */
if (of_property_read_u32(node, "ti,min-div", &min_div))
min_div = 1;
@@ -493,75 +460,62 @@ static int _get_divider_width(struct device_node *node,
pr_err("no max-div for %pOFn!\n", node);
return -EINVAL;
}
-
- /* Determine bit width for the field */
- if (flags & CLK_DIVIDER_ONE_BASED)
- val = 1;
-
- div = min_div;
-
- while (div < max_div) {
- if (flags & CLK_DIVIDER_POWER_OF_TWO)
- div <<= 1;
- else
- div++;
- val++;
- }
} else {
- div = 0;
- while (table[div].div) {
- val = table[div].val;
- div++;
+ for (clkt = divider->table; clkt->div; clkt++) {
+ val = clkt->div;
+ if (val > max_div)
+ max_div = val;
+ if (!min_div || val < min_div)
+ min_div = val;
}
}
- return fls(val);
+ divider->min = min_div;
+ divider->max = max_div;
+ _setup_mask(divider);
+
+ return 0;
}
static int __init ti_clk_divider_populate(struct device_node *node,
- struct clk_omap_reg *reg, const struct clk_div_table **table,
- u32 *flags, u8 *div_flags, u8 *width, u8 *shift, s8 *latch)
+ struct clk_omap_divider *div,
+ u32 *flags)
{
u32 val;
int ret;
- ret = ti_clk_get_reg_addr(node, 0, reg);
+ ret = ti_clk_get_reg_addr(node, 0, &div->reg);
if (ret)
return ret;
if (!of_property_read_u32(node, "ti,bit-shift", &val))
- *shift = val;
+ div->shift = val;
else
- *shift = 0;
+ div->shift = 0;
- if (latch) {
- if (!of_property_read_u32(node, "ti,latch-bit", &val))
- *latch = val;
- else
- *latch = -EINVAL;
- }
+ if (!of_property_read_u32(node, "ti,latch-bit", &val))
+ div->latch = val;
+ else
+ div->latch = -EINVAL;
*flags = 0;
- *div_flags = 0;
+ div->flags = 0;
if (of_property_read_bool(node, "ti,index-starts-at-one"))
- *div_flags |= CLK_DIVIDER_ONE_BASED;
+ div->flags |= CLK_DIVIDER_ONE_BASED;
if (of_property_read_bool(node, "ti,index-power-of-two"))
- *div_flags |= CLK_DIVIDER_POWER_OF_TWO;
+ div->flags |= CLK_DIVIDER_POWER_OF_TWO;
if (of_property_read_bool(node, "ti,set-rate-parent"))
*flags |= CLK_SET_RATE_PARENT;
- *table = ti_clk_get_div_table(node);
-
- if (IS_ERR(*table))
- return PTR_ERR(*table);
-
- *width = _get_divider_width(node, *table, *div_flags);
+ ret = ti_clk_get_div_table(node, div);
+ if (ret)
+ return ret;
- return 0;
+ return _populate_divider_min_max(node, div);
}
/**
@@ -573,24 +527,17 @@ static int __init ti_clk_divider_populate(struct device_node *node,
static void __init of_ti_divider_clk_setup(struct device_node *node)
{
struct clk *clk;
- const char *parent_name;
- struct clk_omap_reg reg;
- u8 clk_divider_flags = 0;
- u8 width = 0;
- u8 shift = 0;
- s8 latch = -EINVAL;
- const struct clk_div_table *table = NULL;
u32 flags = 0;
+ struct clk_omap_divider *div;
- parent_name = of_clk_get_parent_name(node, 0);
+ div = kzalloc(sizeof(*div), GFP_KERNEL);
+ if (!div)
+ return;
- if (ti_clk_divider_populate(node, &reg, &table, &flags,
- &clk_divider_flags, &width, &shift, &latch))
+ if (ti_clk_divider_populate(node, div, &flags))
goto cleanup;
- clk = _register_divider(NULL, node->name, parent_name, flags, &reg,
- shift, width, latch, clk_divider_flags, table);
-
+ clk = _register_divider(node, flags, div);
if (!IS_ERR(clk)) {
of_clk_add_provider(node, of_clk_src_simple_get, clk);
of_ti_clk_autoidle_setup(node);
@@ -598,22 +545,21 @@ static void __init of_ti_divider_clk_setup(struct device_node *node)
}
cleanup:
- kfree(table);
+ kfree(div->table);
+ kfree(div);
}
CLK_OF_DECLARE(divider_clk, "ti,divider-clock", of_ti_divider_clk_setup);
static void __init of_ti_composite_divider_clk_setup(struct device_node *node)
{
struct clk_omap_divider *div;
- u32 val;
+ u32 tmp;
div = kzalloc(sizeof(*div), GFP_KERNEL);
if (!div)
return;
- if (ti_clk_divider_populate(node, &div->reg, &div->table, &val,
- &div->flags, &div->width, &div->shift,
- NULL) < 0)
+ if (ti_clk_divider_populate(node, div, &tmp))
goto cleanup;
if (!ti_clk_add_component(node, &div->hw, CLK_COMPONENT_TYPE_DIVIDER))
diff --git a/drivers/clk/uniphier/clk-uniphier-core.c b/drivers/clk/uniphier/clk-uniphier-core.c
index c6aaca73cf86..12380236d7ab 100644
--- a/drivers/clk/uniphier/clk-uniphier-core.c
+++ b/drivers/clk/uniphier/clk-uniphier-core.c
@@ -64,8 +64,7 @@ static int uniphier_clk_probe(struct platform_device *pdev)
for (p = data; p->name; p++)
clk_num = max(clk_num, p->idx + 1);
- hw_data = devm_kzalloc(dev,
- sizeof(*hw_data) + clk_num * sizeof(struct clk_hw *),
+ hw_data = devm_kzalloc(dev, struct_size(hw_data, hws, clk_num),
GFP_KERNEL);
if (!hw_data)
return -ENOMEM;
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index f35a53ce8988..5fdd76cb1768 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -528,6 +528,7 @@ config SH_TIMER_MTU2
config RENESAS_OSTM
bool "Renesas OSTM timer driver" if COMPILE_TEST
select CLKSRC_MMIO
+ select TIMER_OF
help
Enables the support for the Renesas OSTM.
diff --git a/drivers/clocksource/asm9260_timer.c b/drivers/clocksource/asm9260_timer.c
index 9f09a59161e7..5b39d3701fa3 100644
--- a/drivers/clocksource/asm9260_timer.c
+++ b/drivers/clocksource/asm9260_timer.c
@@ -194,6 +194,10 @@ static int __init asm9260_timer_init(struct device_node *np)
}
clk = of_clk_get(np, 0);
+ if (IS_ERR(clk)) {
+ pr_err("Failed to get clk!\n");
+ return PTR_ERR(clk);
+ }
ret = clk_prepare_enable(clk);
if (ret) {
diff --git a/drivers/clocksource/renesas-ostm.c b/drivers/clocksource/renesas-ostm.c
index 37c39b901bb1..3d06ba66008c 100644
--- a/drivers/clocksource/renesas-ostm.c
+++ b/drivers/clocksource/renesas-ostm.c
@@ -6,14 +6,14 @@
* Copyright (C) 2017 Chris Brandt
*/
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
#include <linux/clk.h>
#include <linux/clockchips.h>
#include <linux/interrupt.h>
#include <linux/sched_clock.h>
#include <linux/slab.h>
+#include "timer-of.h"
+
/*
* The OSTM contains independent channels.
* The first OSTM channel probed will be set up as a free running
@@ -24,12 +24,6 @@
* driven clock event.
*/
-struct ostm_device {
- void __iomem *base;
- unsigned long ticks_per_jiffy;
- struct clock_event_device ced;
-};
-
static void __iomem *system_clock; /* For sched_clock() */
/* OSTM REGISTERS */
@@ -47,41 +41,32 @@ static void __iomem *system_clock; /* For sched_clock() */
#define CTL_ONESHOT 0x02
#define CTL_FREERUN 0x02
-static struct ostm_device *ced_to_ostm(struct clock_event_device *ced)
-{
- return container_of(ced, struct ostm_device, ced);
-}
-
-static void ostm_timer_stop(struct ostm_device *ostm)
+static void ostm_timer_stop(struct timer_of *to)
{
- if (readb(ostm->base + OSTM_TE) & TE) {
- writeb(TT, ostm->base + OSTM_TT);
+ if (readb(timer_of_base(to) + OSTM_TE) & TE) {
+ writeb(TT, timer_of_base(to) + OSTM_TT);
/*
* Read back the register simply to confirm the write operation
* has completed since I/O writes can sometimes get queued by
* the bus architecture.
*/
- while (readb(ostm->base + OSTM_TE) & TE)
+ while (readb(timer_of_base(to) + OSTM_TE) & TE)
;
}
}
-static int __init ostm_init_clksrc(struct ostm_device *ostm, unsigned long rate)
+static int __init ostm_init_clksrc(struct timer_of *to)
{
- /*
- * irq not used (clock sources don't use interrupts)
- */
-
- ostm_timer_stop(ostm);
+ ostm_timer_stop(to);
- writel(0, ostm->base + OSTM_CMP);
- writeb(CTL_FREERUN, ostm->base + OSTM_CTL);
- writeb(TS, ostm->base + OSTM_TS);
+ writel(0, timer_of_base(to) + OSTM_CMP);
+ writeb(CTL_FREERUN, timer_of_base(to) + OSTM_CTL);
+ writeb(TS, timer_of_base(to) + OSTM_TS);
- return clocksource_mmio_init(ostm->base + OSTM_CNT,
- "ostm", rate,
- 300, 32, clocksource_mmio_readl_up);
+ return clocksource_mmio_init(timer_of_base(to) + OSTM_CNT,
+ to->np->full_name, timer_of_rate(to), 300,
+ 32, clocksource_mmio_readl_up);
}
static u64 notrace ostm_read_sched_clock(void)
@@ -89,87 +74,75 @@ static u64 notrace ostm_read_sched_clock(void)
return readl(system_clock);
}
-static void __init ostm_init_sched_clock(struct ostm_device *ostm,
- unsigned long rate)
+static void __init ostm_init_sched_clock(struct timer_of *to)
{
- system_clock = ostm->base + OSTM_CNT;
- sched_clock_register(ostm_read_sched_clock, 32, rate);
+ system_clock = timer_of_base(to) + OSTM_CNT;
+ sched_clock_register(ostm_read_sched_clock, 32, timer_of_rate(to));
}
static int ostm_clock_event_next(unsigned long delta,
- struct clock_event_device *ced)
+ struct clock_event_device *ced)
{
- struct ostm_device *ostm = ced_to_ostm(ced);
+ struct timer_of *to = to_timer_of(ced);
- ostm_timer_stop(ostm);
+ ostm_timer_stop(to);
- writel(delta, ostm->base + OSTM_CMP);
- writeb(CTL_ONESHOT, ostm->base + OSTM_CTL);
- writeb(TS, ostm->base + OSTM_TS);
+ writel(delta, timer_of_base(to) + OSTM_CMP);
+ writeb(CTL_ONESHOT, timer_of_base(to) + OSTM_CTL);
+ writeb(TS, timer_of_base(to) + OSTM_TS);
return 0;
}
static int ostm_shutdown(struct clock_event_device *ced)
{
- struct ostm_device *ostm = ced_to_ostm(ced);
+ struct timer_of *to = to_timer_of(ced);
- ostm_timer_stop(ostm);
+ ostm_timer_stop(to);
return 0;
}
static int ostm_set_periodic(struct clock_event_device *ced)
{
- struct ostm_device *ostm = ced_to_ostm(ced);
+ struct timer_of *to = to_timer_of(ced);
if (clockevent_state_oneshot(ced) || clockevent_state_periodic(ced))
- ostm_timer_stop(ostm);
+ ostm_timer_stop(to);
- writel(ostm->ticks_per_jiffy - 1, ostm->base + OSTM_CMP);
- writeb(CTL_PERIODIC, ostm->base + OSTM_CTL);
- writeb(TS, ostm->base + OSTM_TS);
+ writel(timer_of_period(to) - 1, timer_of_base(to) + OSTM_CMP);
+ writeb(CTL_PERIODIC, timer_of_base(to) + OSTM_CTL);
+ writeb(TS, timer_of_base(to) + OSTM_TS);
return 0;
}
static int ostm_set_oneshot(struct clock_event_device *ced)
{
- struct ostm_device *ostm = ced_to_ostm(ced);
+ struct timer_of *to = to_timer_of(ced);
- ostm_timer_stop(ostm);
+ ostm_timer_stop(to);
return 0;
}
static irqreturn_t ostm_timer_interrupt(int irq, void *dev_id)
{
- struct ostm_device *ostm = dev_id;
+ struct clock_event_device *ced = dev_id;
- if (clockevent_state_oneshot(&ostm->ced))
- ostm_timer_stop(ostm);
+ if (clockevent_state_oneshot(ced))
+ ostm_timer_stop(to_timer_of(ced));
/* notify clockevent layer */
- if (ostm->ced.event_handler)
- ostm->ced.event_handler(&ostm->ced);
+ if (ced->event_handler)
+ ced->event_handler(ced);
return IRQ_HANDLED;
}
-static int __init ostm_init_clkevt(struct ostm_device *ostm, int irq,
- unsigned long rate)
+static int __init ostm_init_clkevt(struct timer_of *to)
{
- struct clock_event_device *ced = &ostm->ced;
- int ret = -ENXIO;
-
- ret = request_irq(irq, ostm_timer_interrupt,
- IRQF_TIMER | IRQF_IRQPOLL,
- "ostm", ostm);
- if (ret) {
- pr_err("ostm: failed to request irq\n");
- return ret;
- }
+ struct clock_event_device *ced = &to->clkevt;
- ced->name = "ostm";
ced->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC;
ced->set_state_shutdown = ostm_shutdown;
ced->set_state_periodic = ostm_set_periodic;
@@ -178,79 +151,61 @@ static int __init ostm_init_clkevt(struct ostm_device *ostm, int irq,
ced->shift = 32;
ced->rating = 300;
ced->cpumask = cpumask_of(0);
- clockevents_config_and_register(ced, rate, 0xf, 0xffffffff);
+ clockevents_config_and_register(ced, timer_of_rate(to), 0xf,
+ 0xffffffff);
return 0;
}
static int __init ostm_init(struct device_node *np)
{
- struct ostm_device *ostm;
- int ret = -EFAULT;
- struct clk *ostm_clk = NULL;
- int irq;
- unsigned long rate;
-
- ostm = kzalloc(sizeof(*ostm), GFP_KERNEL);
- if (!ostm)
- return -ENOMEM;
-
- ostm->base = of_iomap(np, 0);
- if (!ostm->base) {
- pr_err("ostm: failed to remap I/O memory\n");
- goto err;
- }
-
- irq = irq_of_parse_and_map(np, 0);
- if (irq < 0) {
- pr_err("ostm: Failed to get irq\n");
- goto err;
- }
+ struct timer_of *to;
+ int ret;
- ostm_clk = of_clk_get(np, 0);
- if (IS_ERR(ostm_clk)) {
- pr_err("ostm: Failed to get clock\n");
- ostm_clk = NULL;
- goto err;
- }
+ to = kzalloc(sizeof(*to), GFP_KERNEL);
+ if (!to)
+ return -ENOMEM;
- ret = clk_prepare_enable(ostm_clk);
- if (ret) {
- pr_err("ostm: Failed to enable clock\n");
- goto err;
+ to->flags = TIMER_OF_BASE | TIMER_OF_CLOCK;
+ if (system_clock) {
+ /*
+ * clock sources don't use interrupts, clock events do
+ */
+ to->flags |= TIMER_OF_IRQ;
+ to->of_irq.flags = IRQF_TIMER | IRQF_IRQPOLL;
+ to->of_irq.handler = ostm_timer_interrupt;
}
- rate = clk_get_rate(ostm_clk);
- ostm->ticks_per_jiffy = DIV_ROUND_CLOSEST(rate, HZ);
+ ret = timer_of_init(np, to);
+ if (ret)
+ goto err_free;
/*
* First probed device will be used as system clocksource. Any
* additional devices will be used as clock events.
*/
if (!system_clock) {
- ret = ostm_init_clksrc(ostm, rate);
-
- if (!ret) {
- ostm_init_sched_clock(ostm, rate);
- pr_info("ostm: used for clocksource\n");
- }
+ ret = ostm_init_clksrc(to);
+ if (ret)
+ goto err_cleanup;
+ ostm_init_sched_clock(to);
+ pr_info("%pOF: used for clocksource\n", np);
} else {
- ret = ostm_init_clkevt(ostm, irq, rate);
+ ret = ostm_init_clkevt(to);
+ if (ret)
+ goto err_cleanup;
- if (!ret)
- pr_info("ostm: used for clock events\n");
- }
-
-err:
- if (ret) {
- clk_disable_unprepare(ostm_clk);
- iounmap(ostm->base);
- kfree(ostm);
- return ret;
+ pr_info("%pOF: used for clock events\n", np);
}
return 0;
+
+err_cleanup:
+ timer_of_cleanup(to);
+err_free:
+ kfree(to);
+ return ret;
}
TIMER_OF_DECLARE(ostm, "renesas,ostm", ostm_init);
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
index ef773db080e9..9cde50cb3220 100644
--- a/drivers/clocksource/sh_cmt.c
+++ b/drivers/clocksource/sh_cmt.c
@@ -25,6 +25,10 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
+#ifdef CONFIG_SUPERH
+#include <asm/platform_early.h>
+#endif
+
struct sh_cmt_device;
/*
@@ -1052,7 +1056,7 @@ static int sh_cmt_probe(struct platform_device *pdev)
struct sh_cmt_device *cmt = platform_get_drvdata(pdev);
int ret;
- if (!is_early_platform_device(pdev)) {
+ if (!is_sh_early_platform_device(pdev)) {
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
}
@@ -1072,7 +1076,7 @@ static int sh_cmt_probe(struct platform_device *pdev)
pm_runtime_idle(&pdev->dev);
return ret;
}
- if (is_early_platform_device(pdev))
+ if (is_sh_early_platform_device(pdev))
return 0;
out:
@@ -1109,7 +1113,10 @@ static void __exit sh_cmt_exit(void)
platform_driver_unregister(&sh_cmt_device_driver);
}
-early_platform_init("earlytimer", &sh_cmt_device_driver);
+#ifdef CONFIG_SUPERH
+sh_early_platform_init("earlytimer", &sh_cmt_device_driver);
+#endif
+
subsys_initcall(sh_cmt_init);
module_exit(sh_cmt_exit);
diff --git a/drivers/clocksource/sh_mtu2.c b/drivers/clocksource/sh_mtu2.c
index 62812f80b5cc..64526e50d471 100644
--- a/drivers/clocksource/sh_mtu2.c
+++ b/drivers/clocksource/sh_mtu2.c
@@ -23,6 +23,10 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
+#ifdef CONFIG_SUPERH
+#include <asm/platform_early.h>
+#endif
+
struct sh_mtu2_device;
struct sh_mtu2_channel {
@@ -448,7 +452,7 @@ static int sh_mtu2_probe(struct platform_device *pdev)
struct sh_mtu2_device *mtu = platform_get_drvdata(pdev);
int ret;
- if (!is_early_platform_device(pdev)) {
+ if (!is_sh_early_platform_device(pdev)) {
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
}
@@ -468,7 +472,7 @@ static int sh_mtu2_probe(struct platform_device *pdev)
pm_runtime_idle(&pdev->dev);
return ret;
}
- if (is_early_platform_device(pdev))
+ if (is_sh_early_platform_device(pdev))
return 0;
out:
@@ -517,7 +521,10 @@ static void __exit sh_mtu2_exit(void)
platform_driver_unregister(&sh_mtu2_device_driver);
}
-early_platform_init("earlytimer", &sh_mtu2_device_driver);
+#ifdef CONFIG_SUPERH
+sh_early_platform_init("earlytimer", &sh_mtu2_device_driver);
+#endif
+
subsys_initcall(sh_mtu2_init);
module_exit(sh_mtu2_exit);
diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c
index 8c4f3753b36e..d49690d15536 100644
--- a/drivers/clocksource/sh_tmu.c
+++ b/drivers/clocksource/sh_tmu.c
@@ -24,6 +24,10 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
+#ifdef CONFIG_SUPERH
+#include <asm/platform_early.h>
+#endif
+
enum sh_tmu_model {
SH_TMU,
SH_TMU_SH3,
@@ -595,7 +599,7 @@ static int sh_tmu_probe(struct platform_device *pdev)
struct sh_tmu_device *tmu = platform_get_drvdata(pdev);
int ret;
- if (!is_early_platform_device(pdev)) {
+ if (!is_sh_early_platform_device(pdev)) {
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
}
@@ -615,7 +619,8 @@ static int sh_tmu_probe(struct platform_device *pdev)
pm_runtime_idle(&pdev->dev);
return ret;
}
- if (is_early_platform_device(pdev))
+
+ if (is_sh_early_platform_device(pdev))
return 0;
out:
@@ -665,7 +670,10 @@ static void __exit sh_tmu_exit(void)
platform_driver_unregister(&sh_tmu_device_driver);
}
-early_platform_init("earlytimer", &sh_tmu_device_driver);
+#ifdef CONFIG_SUPERH
+sh_early_platform_init("earlytimer", &sh_tmu_device_driver);
+#endif
+
subsys_initcall(sh_tmu_init);
module_exit(sh_tmu_exit);
diff --git a/drivers/clocksource/timer-of.c b/drivers/clocksource/timer-of.c
index 11ff701ff4bb..572da477c6d3 100644
--- a/drivers/clocksource/timer-of.c
+++ b/drivers/clocksource/timer-of.c
@@ -57,8 +57,8 @@ static __init int timer_of_irq_init(struct device_node *np,
if (of_irq->name) {
of_irq->irq = ret = of_irq_get_byname(np, of_irq->name);
if (ret < 0) {
- pr_err("Failed to get interrupt %s for %s\n",
- of_irq->name, np->full_name);
+ pr_err("Failed to get interrupt %s for %pOF\n",
+ of_irq->name, np);
return ret;
}
} else {
@@ -192,7 +192,7 @@ int __init timer_of_init(struct device_node *np, struct timer_of *to)
}
if (!to->clkevt.name)
- to->clkevt.name = np->name;
+ to->clkevt.name = np->full_name;
to->np = np;
diff --git a/drivers/clocksource/timer-riscv.c b/drivers/clocksource/timer-riscv.c
index 470c7ef02ea4..4e54856ce2a5 100644
--- a/drivers/clocksource/timer-riscv.c
+++ b/drivers/clocksource/timer-riscv.c
@@ -3,9 +3,9 @@
* Copyright (C) 2012 Regents of the University of California
* Copyright (C) 2017 SiFive
*
- * All RISC-V systems have a timer attached to every hart. These timers can be
- * read from the "time" and "timeh" CSRs, and can use the SBI to setup
- * events.
+ * All RISC-V systems have a timer attached to every hart. These timers can
+ * either be read from the "time" and "timeh" CSRs, and can use the SBI to
+ * setup events, or directly accessed using MMIO registers.
*/
#include <linux/clocksource.h>
#include <linux/clockchips.h>
@@ -13,14 +13,29 @@
#include <linux/delay.h>
#include <linux/irq.h>
#include <linux/sched_clock.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
#include <asm/smp.h>
#include <asm/sbi.h>
+u64 __iomem *riscv_time_cmp;
+u64 __iomem *riscv_time_val;
+
+static inline void mmio_set_timer(u64 val)
+{
+ void __iomem *r;
+
+ r = riscv_time_cmp + cpuid_to_hartid_map(smp_processor_id());
+ writeq_relaxed(val, r);
+}
+
static int riscv_clock_next_event(unsigned long delta,
struct clock_event_device *ce)
{
- csr_set(sie, SIE_STIE);
- sbi_set_timer(get_cycles64() + delta);
+ csr_set(CSR_IE, IE_TIE);
+ if (IS_ENABLED(CONFIG_RISCV_SBI))
+ sbi_set_timer(get_cycles64() + delta);
+ else
+ mmio_set_timer(get_cycles64() + delta);
return 0;
}
@@ -61,13 +76,13 @@ static int riscv_timer_starting_cpu(unsigned int cpu)
ce->cpumask = cpumask_of(cpu);
clockevents_config_and_register(ce, riscv_timebase, 100, 0x7fffffff);
- csr_set(sie, SIE_STIE);
+ csr_set(CSR_IE, IE_TIE);
return 0;
}
static int riscv_timer_dying_cpu(unsigned int cpu)
{
- csr_clear(sie, SIE_STIE);
+ csr_clear(CSR_IE, IE_TIE);
return 0;
}
@@ -76,7 +91,7 @@ void riscv_timer_interrupt(void)
{
struct clock_event_device *evdev = this_cpu_ptr(&riscv_clock_event);
- csr_clear(sie, SIE_STIE);
+ csr_clear(CSR_IE, IE_TIE);
evdev->event_handler(evdev);
}
diff --git a/drivers/counter/104-quad-8.c b/drivers/counter/104-quad-8.c
index 00b113f4b958..17e67a84777d 100644
--- a/drivers/counter/104-quad-8.c
+++ b/drivers/counter/104-quad-8.c
@@ -562,11 +562,10 @@ static const struct iio_chan_spec quad8_channels[] = {
};
static int quad8_signal_read(struct counter_device *counter,
- struct counter_signal *signal, struct counter_signal_read_value *val)
+ struct counter_signal *signal, enum counter_signal_value *val)
{
const struct quad8_iio *const priv = counter->priv;
unsigned int state;
- enum counter_signal_level level;
/* Only Index signal levels can be read */
if (signal->id < 16)
@@ -575,22 +574,19 @@ static int quad8_signal_read(struct counter_device *counter,
state = inb(priv->base + QUAD8_REG_INDEX_INPUT_LEVELS)
& BIT(signal->id - 16);
- level = (state) ? COUNTER_SIGNAL_LEVEL_HIGH : COUNTER_SIGNAL_LEVEL_LOW;
-
- counter_signal_read_value_set(val, COUNTER_SIGNAL_LEVEL, &level);
+ *val = (state) ? COUNTER_SIGNAL_HIGH : COUNTER_SIGNAL_LOW;
return 0;
}
static int quad8_count_read(struct counter_device *counter,
- struct counter_count *count, struct counter_count_read_value *val)
+ struct counter_count *count, unsigned long *val)
{
const struct quad8_iio *const priv = counter->priv;
const int base_offset = priv->base + 2 * count->id;
unsigned int flags;
unsigned int borrow;
unsigned int carry;
- unsigned long position;
int i;
flags = inb(base_offset + 1);
@@ -598,36 +594,27 @@ static int quad8_count_read(struct counter_device *counter,
carry = !!(flags & QUAD8_FLAG_CT);
/* Borrow XOR Carry effectively doubles count range */
- position = (unsigned long)(borrow ^ carry) << 24;
+ *val = (unsigned long)(borrow ^ carry) << 24;
/* Reset Byte Pointer; transfer Counter to Output Latch */
outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP | QUAD8_RLD_CNTR_OUT,
base_offset + 1);
for (i = 0; i < 3; i++)
- position |= (unsigned long)inb(base_offset) << (8 * i);
-
- counter_count_read_value_set(val, COUNTER_COUNT_POSITION, &position);
+ *val |= (unsigned long)inb(base_offset) << (8 * i);
return 0;
}
static int quad8_count_write(struct counter_device *counter,
- struct counter_count *count, struct counter_count_write_value *val)
+ struct counter_count *count, unsigned long val)
{
const struct quad8_iio *const priv = counter->priv;
const int base_offset = priv->base + 2 * count->id;
- int err;
- unsigned long position;
int i;
- err = counter_count_write_value_get(&position, COUNTER_COUNT_POSITION,
- val);
- if (err)
- return err;
-
/* Only 24-bit values are supported */
- if (position > 0xFFFFFF)
+ if (val > 0xFFFFFF)
return -EINVAL;
/* Reset Byte Pointer */
@@ -635,7 +622,7 @@ static int quad8_count_write(struct counter_device *counter,
/* Counter can only be set via Preset Register */
for (i = 0; i < 3; i++)
- outb(position >> (8 * i), base_offset);
+ outb(val >> (8 * i), base_offset);
/* Transfer Preset Register to Counter */
outb(QUAD8_CTR_RLD | QUAD8_RLD_PRESET_CNTR, base_offset + 1);
@@ -644,9 +631,9 @@ static int quad8_count_write(struct counter_device *counter,
outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP, base_offset + 1);
/* Set Preset Register back to original value */
- position = priv->preset[count->id];
+ val = priv->preset[count->id];
for (i = 0; i < 3; i++)
- outb(position >> (8 * i), base_offset);
+ outb(val >> (8 * i), base_offset);
/* Reset Borrow, Carry, Compare, and Sign flags */
outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_FLAGS, base_offset + 1);
diff --git a/drivers/counter/Kconfig b/drivers/counter/Kconfig
index 2967d0a9ff91..c80fa76bb531 100644
--- a/drivers/counter/Kconfig
+++ b/drivers/counter/Kconfig
@@ -49,6 +49,17 @@ config STM32_LPTIMER_CNT
To compile this driver as a module, choose M here: the
module will be called stm32-lptimer-cnt.
+config TI_EQEP
+ tristate "TI eQEP counter driver"
+ depends on (SOC_AM33XX || COMPILE_TEST)
+ select REGMAP_MMIO
+ help
+ Select this option to enable the Texas Instruments Enhanced Quadrature
+ Encoder Pulse (eQEP) counter driver.
+
+ To compile this driver as a module, choose M here: the module will be
+ called ti-eqep.
+
config FTM_QUADDEC
tristate "Flex Timer Module Quadrature decoder driver"
depends on HAS_IOMEM && OF
diff --git a/drivers/counter/Makefile b/drivers/counter/Makefile
index 40d35522937d..55142d1f4c43 100644
--- a/drivers/counter/Makefile
+++ b/drivers/counter/Makefile
@@ -8,4 +8,5 @@ obj-$(CONFIG_COUNTER) += counter.o
obj-$(CONFIG_104_QUAD_8) += 104-quad-8.o
obj-$(CONFIG_STM32_TIMER_CNT) += stm32-timer-cnt.o
obj-$(CONFIG_STM32_LPTIMER_CNT) += stm32-lptimer-cnt.o
+obj-$(CONFIG_TI_EQEP) += ti-eqep.o
obj-$(CONFIG_FTM_QUADDEC) += ftm-quaddec.o
diff --git a/drivers/counter/counter.c b/drivers/counter/counter.c
index 106bc7180cd8..6a683d086008 100644
--- a/drivers/counter/counter.c
+++ b/drivers/counter/counter.c
@@ -220,86 +220,6 @@ ssize_t counter_device_enum_available_read(struct counter_device *counter,
}
EXPORT_SYMBOL_GPL(counter_device_enum_available_read);
-static const char *const counter_signal_level_str[] = {
- [COUNTER_SIGNAL_LEVEL_LOW] = "low",
- [COUNTER_SIGNAL_LEVEL_HIGH] = "high"
-};
-
-/**
- * counter_signal_read_value_set - set counter_signal_read_value data
- * @val: counter_signal_read_value structure to set
- * @type: property Signal data represents
- * @data: Signal data
- *
- * This function sets an opaque counter_signal_read_value structure with the
- * provided Signal data.
- */
-void counter_signal_read_value_set(struct counter_signal_read_value *const val,
- const enum counter_signal_value_type type,
- void *const data)
-{
- if (type == COUNTER_SIGNAL_LEVEL)
- val->len = sprintf(val->buf, "%s\n",
- counter_signal_level_str[*(enum counter_signal_level *)data]);
- else
- val->len = 0;
-}
-EXPORT_SYMBOL_GPL(counter_signal_read_value_set);
-
-/**
- * counter_count_read_value_set - set counter_count_read_value data
- * @val: counter_count_read_value structure to set
- * @type: property Count data represents
- * @data: Count data
- *
- * This function sets an opaque counter_count_read_value structure with the
- * provided Count data.
- */
-void counter_count_read_value_set(struct counter_count_read_value *const val,
- const enum counter_count_value_type type,
- void *const data)
-{
- switch (type) {
- case COUNTER_COUNT_POSITION:
- val->len = sprintf(val->buf, "%lu\n", *(unsigned long *)data);
- break;
- default:
- val->len = 0;
- }
-}
-EXPORT_SYMBOL_GPL(counter_count_read_value_set);
-
-/**
- * counter_count_write_value_get - get counter_count_write_value data
- * @data: Count data
- * @type: property Count data represents
- * @val: counter_count_write_value structure containing data
- *
- * This function extracts Count data from the provided opaque
- * counter_count_write_value structure and stores it at the address provided by
- * @data.
- *
- * RETURNS:
- * 0 on success, negative error number on failure.
- */
-int counter_count_write_value_get(void *const data,
- const enum counter_count_value_type type,
- const struct counter_count_write_value *const val)
-{
- int err;
-
- switch (type) {
- case COUNTER_COUNT_POSITION:
- err = kstrtoul(val->buf, 0, data);
- if (err)
- return err;
- break;
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(counter_count_write_value_get);
-
struct counter_attr_parm {
struct counter_device_attr_group *group;
const char *prefix;
@@ -369,6 +289,11 @@ struct counter_signal_unit {
struct counter_signal *signal;
};
+static const char *const counter_signal_value_str[] = {
+ [COUNTER_SIGNAL_LOW] = "low",
+ [COUNTER_SIGNAL_HIGH] = "high"
+};
+
static ssize_t counter_signal_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -377,13 +302,13 @@ static ssize_t counter_signal_show(struct device *dev,
const struct counter_signal_unit *const component = devattr->component;
struct counter_signal *const signal = component->signal;
int err;
- struct counter_signal_read_value val = { .buf = buf };
+ enum counter_signal_value val;
err = counter->ops->signal_read(counter, signal, &val);
if (err)
return err;
- return val.len;
+ return sprintf(buf, "%s\n", counter_signal_value_str[val]);
}
struct counter_name_unit {
@@ -788,13 +713,13 @@ static ssize_t counter_count_show(struct device *dev,
const struct counter_count_unit *const component = devattr->component;
struct counter_count *const count = component->count;
int err;
- struct counter_count_read_value val = { .buf = buf };
+ unsigned long val;
err = counter->ops->count_read(counter, count, &val);
if (err)
return err;
- return val.len;
+ return sprintf(buf, "%lu\n", val);
}
static ssize_t counter_count_store(struct device *dev,
@@ -806,9 +731,13 @@ static ssize_t counter_count_store(struct device *dev,
const struct counter_count_unit *const component = devattr->component;
struct counter_count *const count = component->count;
int err;
- struct counter_count_write_value val = { .buf = buf };
+ unsigned long val;
+
+ err = kstrtoul(buf, 0, &val);
+ if (err)
+ return err;
- err = counter->ops->count_write(counter, count, &val);
+ err = counter->ops->count_write(counter, count, val);
if (err)
return err;
diff --git a/drivers/counter/ftm-quaddec.c b/drivers/counter/ftm-quaddec.c
index 4046aa9f9234..c2b3fdfd8b77 100644
--- a/drivers/counter/ftm-quaddec.c
+++ b/drivers/counter/ftm-quaddec.c
@@ -178,31 +178,25 @@ static const enum counter_count_function ftm_quaddec_count_functions[] = {
static int ftm_quaddec_count_read(struct counter_device *counter,
struct counter_count *count,
- struct counter_count_read_value *val)
+ unsigned long *val)
{
struct ftm_quaddec *const ftm = counter->priv;
uint32_t cntval;
ftm_read(ftm, FTM_CNT, &cntval);
- counter_count_read_value_set(val, COUNTER_COUNT_POSITION, &cntval);
+ *val = cntval;
return 0;
}
static int ftm_quaddec_count_write(struct counter_device *counter,
struct counter_count *count,
- struct counter_count_write_value *val)
+ const unsigned long val)
{
struct ftm_quaddec *const ftm = counter->priv;
- u32 cnt;
- int err;
- err = counter_count_write_value_get(&cnt, COUNTER_COUNT_POSITION, val);
- if (err)
- return err;
-
- if (cnt != 0) {
+ if (val != 0) {
dev_warn(&ftm->pdev->dev, "Can only accept '0' as new counter value\n");
return -EINVAL;
}
diff --git a/drivers/counter/stm32-lptimer-cnt.c b/drivers/counter/stm32-lptimer-cnt.c
index bbc930a5962c..8e276eb655f5 100644
--- a/drivers/counter/stm32-lptimer-cnt.c
+++ b/drivers/counter/stm32-lptimer-cnt.c
@@ -347,7 +347,7 @@ static const struct iio_chan_spec stm32_lptim_cnt_channels = {
};
/**
- * stm32_lptim_cnt_function - enumerates stm32 LPTimer counter & encoder modes
+ * enum stm32_lptim_cnt_function - enumerates LPTimer counter & encoder modes
* @STM32_LPTIM_COUNTER_INCREASE: up count on IN1 rising, falling or both edges
* @STM32_LPTIM_ENCODER_BOTH_EDGE: count on both edges (IN1 & IN2 quadrature)
*/
@@ -377,8 +377,7 @@ static enum counter_synapse_action stm32_lptim_cnt_synapse_actions[] = {
};
static int stm32_lptim_cnt_read(struct counter_device *counter,
- struct counter_count *count,
- struct counter_count_read_value *val)
+ struct counter_count *count, unsigned long *val)
{
struct stm32_lptim_cnt *const priv = counter->priv;
u32 cnt;
@@ -388,7 +387,7 @@ static int stm32_lptim_cnt_read(struct counter_device *counter,
if (ret)
return ret;
- counter_count_read_value_set(val, COUNTER_COUNT_POSITION, &cnt);
+ *val = cnt;
return 0;
}
diff --git a/drivers/counter/stm32-timer-cnt.c b/drivers/counter/stm32-timer-cnt.c
index 644ba18a72ad..3eafccec3beb 100644
--- a/drivers/counter/stm32-timer-cnt.c
+++ b/drivers/counter/stm32-timer-cnt.c
@@ -28,7 +28,7 @@ struct stm32_timer_cnt {
};
/**
- * stm32_count_function - enumerates stm32 timer counter encoder modes
+ * enum stm32_count_function - enumerates stm32 timer counter encoder modes
* @STM32_COUNT_SLAVE_MODE_DISABLED: counts on internal clock when CEN=1
* @STM32_COUNT_ENCODER_MODE_1: counts TI1FP1 edges, depending on TI2FP2 level
* @STM32_COUNT_ENCODER_MODE_2: counts TI2FP2 edges, depending on TI1FP1 level
@@ -48,34 +48,27 @@ static enum counter_count_function stm32_count_functions[] = {
};
static int stm32_count_read(struct counter_device *counter,
- struct counter_count *count,
- struct counter_count_read_value *val)
+ struct counter_count *count, unsigned long *val)
{
struct stm32_timer_cnt *const priv = counter->priv;
u32 cnt;
regmap_read(priv->regmap, TIM_CNT, &cnt);
- counter_count_read_value_set(val, COUNTER_COUNT_POSITION, &cnt);
+ *val = cnt;
return 0;
}
static int stm32_count_write(struct counter_device *counter,
struct counter_count *count,
- struct counter_count_write_value *val)
+ const unsigned long val)
{
struct stm32_timer_cnt *const priv = counter->priv;
- u32 cnt;
- int err;
-
- err = counter_count_write_value_get(&cnt, COUNTER_COUNT_POSITION, val);
- if (err)
- return err;
- if (cnt > priv->ceiling)
+ if (val > priv->ceiling)
return -EINVAL;
- return regmap_write(priv->regmap, TIM_CNT, cnt);
+ return regmap_write(priv->regmap, TIM_CNT, val);
}
static int stm32_count_function_get(struct counter_device *counter,
@@ -219,8 +212,8 @@ static ssize_t stm32_count_enable_write(struct counter_device *counter,
if (enable) {
regmap_read(priv->regmap, TIM_CR1, &cr1);
- if (!(cr1 & TIM_CR1_CEN))
- clk_enable(priv->clk);
+ if (!(cr1 & TIM_CR1_CEN))
+ clk_enable(priv->clk);
regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN,
TIM_CR1_CEN);
diff --git a/drivers/counter/ti-eqep.c b/drivers/counter/ti-eqep.c
new file mode 100644
index 000000000000..1ff07faef27f
--- /dev/null
+++ b/drivers/counter/ti-eqep.c
@@ -0,0 +1,466 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2019 David Lechner <david@lechnology.com>
+ *
+ * Counter driver for Texas Instruments Enhanced Quadrature Encoder Pulse (eQEP)
+ */
+
+#include <linux/bitops.h>
+#include <linux/counter.h>
+#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+
+/* 32-bit registers */
+#define QPOSCNT 0x0
+#define QPOSINIT 0x4
+#define QPOSMAX 0x8
+#define QPOSCMP 0xc
+#define QPOSILAT 0x10
+#define QPOSSLAT 0x14
+#define QPOSLAT 0x18
+#define QUTMR 0x1c
+#define QUPRD 0x20
+
+/* 16-bit registers */
+#define QWDTMR 0x0 /* 0x24 */
+#define QWDPRD 0x2 /* 0x26 */
+#define QDECCTL 0x4 /* 0x28 */
+#define QEPCTL 0x6 /* 0x2a */
+#define QCAPCTL 0x8 /* 0x2c */
+#define QPOSCTL 0xa /* 0x2e */
+#define QEINT 0xc /* 0x30 */
+#define QFLG 0xe /* 0x32 */
+#define QCLR 0x10 /* 0x34 */
+#define QFRC 0x12 /* 0x36 */
+#define QEPSTS 0x14 /* 0x38 */
+#define QCTMR 0x16 /* 0x3a */
+#define QCPRD 0x18 /* 0x3c */
+#define QCTMRLAT 0x1a /* 0x3e */
+#define QCPRDLAT 0x1c /* 0x40 */
+
+#define QDECCTL_QSRC_SHIFT 14
+#define QDECCTL_QSRC GENMASK(15, 14)
+#define QDECCTL_SOEN BIT(13)
+#define QDECCTL_SPSEL BIT(12)
+#define QDECCTL_XCR BIT(11)
+#define QDECCTL_SWAP BIT(10)
+#define QDECCTL_IGATE BIT(9)
+#define QDECCTL_QAP BIT(8)
+#define QDECCTL_QBP BIT(7)
+#define QDECCTL_QIP BIT(6)
+#define QDECCTL_QSP BIT(5)
+
+#define QEPCTL_FREE_SOFT GENMASK(15, 14)
+#define QEPCTL_PCRM GENMASK(13, 12)
+#define QEPCTL_SEI GENMASK(11, 10)
+#define QEPCTL_IEI GENMASK(9, 8)
+#define QEPCTL_SWI BIT(7)
+#define QEPCTL_SEL BIT(6)
+#define QEPCTL_IEL GENMASK(5, 4)
+#define QEPCTL_PHEN BIT(3)
+#define QEPCTL_QCLM BIT(2)
+#define QEPCTL_UTE BIT(1)
+#define QEPCTL_WDE BIT(0)
+
+/* EQEP Inputs */
+enum {
+ TI_EQEP_SIGNAL_QEPA, /* QEPA/XCLK */
+ TI_EQEP_SIGNAL_QEPB, /* QEPB/XDIR */
+};
+
+/* Position Counter Input Modes */
+enum {
+ TI_EQEP_COUNT_FUNC_QUAD_COUNT,
+ TI_EQEP_COUNT_FUNC_DIR_COUNT,
+ TI_EQEP_COUNT_FUNC_UP_COUNT,
+ TI_EQEP_COUNT_FUNC_DOWN_COUNT,
+};
+
+enum {
+ TI_EQEP_SYNAPSE_ACTION_BOTH_EDGES,
+ TI_EQEP_SYNAPSE_ACTION_RISING_EDGE,
+ TI_EQEP_SYNAPSE_ACTION_NONE,
+};
+
+struct ti_eqep_cnt {
+ struct counter_device counter;
+ struct regmap *regmap32;
+ struct regmap *regmap16;
+};
+
+static int ti_eqep_count_read(struct counter_device *counter,
+ struct counter_count *count, unsigned long *val)
+{
+ struct ti_eqep_cnt *priv = counter->priv;
+ u32 cnt;
+
+ regmap_read(priv->regmap32, QPOSCNT, &cnt);
+ *val = cnt;
+
+ return 0;
+}
+
+static int ti_eqep_count_write(struct counter_device *counter,
+ struct counter_count *count, unsigned long val)
+{
+ struct ti_eqep_cnt *priv = counter->priv;
+ u32 max;
+
+ regmap_read(priv->regmap32, QPOSMAX, &max);
+ if (val > max)
+ return -EINVAL;
+
+ return regmap_write(priv->regmap32, QPOSCNT, val);
+}
+
+static int ti_eqep_function_get(struct counter_device *counter,
+ struct counter_count *count, size_t *function)
+{
+ struct ti_eqep_cnt *priv = counter->priv;
+ u32 qdecctl;
+
+ regmap_read(priv->regmap16, QDECCTL, &qdecctl);
+ *function = (qdecctl & QDECCTL_QSRC) >> QDECCTL_QSRC_SHIFT;
+
+ return 0;
+}
+
+static int ti_eqep_function_set(struct counter_device *counter,
+ struct counter_count *count, size_t function)
+{
+ struct ti_eqep_cnt *priv = counter->priv;
+
+ return regmap_write_bits(priv->regmap16, QDECCTL, QDECCTL_QSRC,
+ function << QDECCTL_QSRC_SHIFT);
+}
+
+static int ti_eqep_action_get(struct counter_device *counter,
+ struct counter_count *count,
+ struct counter_synapse *synapse, size_t *action)
+{
+ struct ti_eqep_cnt *priv = counter->priv;
+ size_t function;
+ u32 qdecctl;
+ int err;
+
+ err = ti_eqep_function_get(counter, count, &function);
+ if (err)
+ return err;
+
+ switch (function) {
+ case TI_EQEP_COUNT_FUNC_QUAD_COUNT:
+ /* In quadrature mode, the rising and falling edge of both
+ * QEPA and QEPB trigger QCLK.
+ */
+ *action = TI_EQEP_SYNAPSE_ACTION_BOTH_EDGES;
+ break;
+ case TI_EQEP_COUNT_FUNC_DIR_COUNT:
+ /* In direction-count mode only rising edge of QEPA is counted
+ * and QEPB gives direction.
+ */
+ switch (synapse->signal->id) {
+ case TI_EQEP_SIGNAL_QEPA:
+ *action = TI_EQEP_SYNAPSE_ACTION_RISING_EDGE;
+ break;
+ default:
+ *action = TI_EQEP_SYNAPSE_ACTION_NONE;
+ break;
+ }
+ break;
+ case TI_EQEP_COUNT_FUNC_UP_COUNT:
+ case TI_EQEP_COUNT_FUNC_DOWN_COUNT:
+ /* In up/down-count modes only QEPA is counted and QEPB is not
+ * used.
+ */
+ switch (synapse->signal->id) {
+ case TI_EQEP_SIGNAL_QEPA:
+ err = regmap_read(priv->regmap16, QDECCTL, &qdecctl);
+ if (err)
+ return err;
+
+ if (qdecctl & QDECCTL_XCR)
+ *action = TI_EQEP_SYNAPSE_ACTION_BOTH_EDGES;
+ else
+ *action = TI_EQEP_SYNAPSE_ACTION_RISING_EDGE;
+ break;
+ default:
+ *action = TI_EQEP_SYNAPSE_ACTION_NONE;
+ break;
+ }
+ break;
+ }
+
+ return 0;
+}
+
+static const struct counter_ops ti_eqep_counter_ops = {
+ .count_read = ti_eqep_count_read,
+ .count_write = ti_eqep_count_write,
+ .function_get = ti_eqep_function_get,
+ .function_set = ti_eqep_function_set,
+ .action_get = ti_eqep_action_get,
+};
+
+static ssize_t ti_eqep_position_ceiling_read(struct counter_device *counter,
+ struct counter_count *count,
+ void *ext_priv, char *buf)
+{
+ struct ti_eqep_cnt *priv = counter->priv;
+ u32 qposmax;
+
+ regmap_read(priv->regmap32, QPOSMAX, &qposmax);
+
+ return sprintf(buf, "%u\n", qposmax);
+}
+
+static ssize_t ti_eqep_position_ceiling_write(struct counter_device *counter,
+ struct counter_count *count,
+ void *ext_priv, const char *buf,
+ size_t len)
+{
+ struct ti_eqep_cnt *priv = counter->priv;
+ int err;
+ u32 res;
+
+ err = kstrtouint(buf, 0, &res);
+ if (err < 0)
+ return err;
+
+ regmap_write(priv->regmap32, QPOSMAX, res);
+
+ return len;
+}
+
+static ssize_t ti_eqep_position_floor_read(struct counter_device *counter,
+ struct counter_count *count,
+ void *ext_priv, char *buf)
+{
+ struct ti_eqep_cnt *priv = counter->priv;
+ u32 qposinit;
+
+ regmap_read(priv->regmap32, QPOSINIT, &qposinit);
+
+ return sprintf(buf, "%u\n", qposinit);
+}
+
+static ssize_t ti_eqep_position_floor_write(struct counter_device *counter,
+ struct counter_count *count,
+ void *ext_priv, const char *buf,
+ size_t len)
+{
+ struct ti_eqep_cnt *priv = counter->priv;
+ int err;
+ u32 res;
+
+ err = kstrtouint(buf, 0, &res);
+ if (err < 0)
+ return err;
+
+ regmap_write(priv->regmap32, QPOSINIT, res);
+
+ return len;
+}
+
+static ssize_t ti_eqep_position_enable_read(struct counter_device *counter,
+ struct counter_count *count,
+ void *ext_priv, char *buf)
+{
+ struct ti_eqep_cnt *priv = counter->priv;
+ u32 qepctl;
+
+ regmap_read(priv->regmap16, QEPCTL, &qepctl);
+
+ return sprintf(buf, "%u\n", !!(qepctl & QEPCTL_PHEN));
+}
+
+static ssize_t ti_eqep_position_enable_write(struct counter_device *counter,
+ struct counter_count *count,
+ void *ext_priv, const char *buf,
+ size_t len)
+{
+ struct ti_eqep_cnt *priv = counter->priv;
+ int err;
+ bool res;
+
+ err = kstrtobool(buf, &res);
+ if (err < 0)
+ return err;
+
+ regmap_write_bits(priv->regmap16, QEPCTL, QEPCTL_PHEN, res ? -1 : 0);
+
+ return len;
+}
+
+static struct counter_count_ext ti_eqep_position_ext[] = {
+ {
+ .name = "ceiling",
+ .read = ti_eqep_position_ceiling_read,
+ .write = ti_eqep_position_ceiling_write,
+ },
+ {
+ .name = "floor",
+ .read = ti_eqep_position_floor_read,
+ .write = ti_eqep_position_floor_write,
+ },
+ {
+ .name = "enable",
+ .read = ti_eqep_position_enable_read,
+ .write = ti_eqep_position_enable_write,
+ },
+};
+
+static struct counter_signal ti_eqep_signals[] = {
+ [TI_EQEP_SIGNAL_QEPA] = {
+ .id = TI_EQEP_SIGNAL_QEPA,
+ .name = "QEPA"
+ },
+ [TI_EQEP_SIGNAL_QEPB] = {
+ .id = TI_EQEP_SIGNAL_QEPB,
+ .name = "QEPB"
+ },
+};
+
+static const enum counter_count_function ti_eqep_position_functions[] = {
+ [TI_EQEP_COUNT_FUNC_QUAD_COUNT] = COUNTER_COUNT_FUNCTION_QUADRATURE_X4,
+ [TI_EQEP_COUNT_FUNC_DIR_COUNT] = COUNTER_COUNT_FUNCTION_PULSE_DIRECTION,
+ [TI_EQEP_COUNT_FUNC_UP_COUNT] = COUNTER_COUNT_FUNCTION_INCREASE,
+ [TI_EQEP_COUNT_FUNC_DOWN_COUNT] = COUNTER_COUNT_FUNCTION_DECREASE,
+};
+
+static const enum counter_synapse_action ti_eqep_position_synapse_actions[] = {
+ [TI_EQEP_SYNAPSE_ACTION_BOTH_EDGES] = COUNTER_SYNAPSE_ACTION_BOTH_EDGES,
+ [TI_EQEP_SYNAPSE_ACTION_RISING_EDGE] = COUNTER_SYNAPSE_ACTION_RISING_EDGE,
+ [TI_EQEP_SYNAPSE_ACTION_NONE] = COUNTER_SYNAPSE_ACTION_NONE,
+};
+
+static struct counter_synapse ti_eqep_position_synapses[] = {
+ {
+ .actions_list = ti_eqep_position_synapse_actions,
+ .num_actions = ARRAY_SIZE(ti_eqep_position_synapse_actions),
+ .signal = &ti_eqep_signals[TI_EQEP_SIGNAL_QEPA],
+ },
+ {
+ .actions_list = ti_eqep_position_synapse_actions,
+ .num_actions = ARRAY_SIZE(ti_eqep_position_synapse_actions),
+ .signal = &ti_eqep_signals[TI_EQEP_SIGNAL_QEPB],
+ },
+};
+
+static struct counter_count ti_eqep_counts[] = {
+ {
+ .id = 0,
+ .name = "QPOSCNT",
+ .functions_list = ti_eqep_position_functions,
+ .num_functions = ARRAY_SIZE(ti_eqep_position_functions),
+ .synapses = ti_eqep_position_synapses,
+ .num_synapses = ARRAY_SIZE(ti_eqep_position_synapses),
+ .ext = ti_eqep_position_ext,
+ .num_ext = ARRAY_SIZE(ti_eqep_position_ext),
+ },
+};
+
+static const struct regmap_config ti_eqep_regmap32_config = {
+ .name = "32-bit",
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = 0x24,
+};
+
+static const struct regmap_config ti_eqep_regmap16_config = {
+ .name = "16-bit",
+ .reg_bits = 16,
+ .val_bits = 16,
+ .reg_stride = 2,
+ .max_register = 0x1e,
+};
+
+static int ti_eqep_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct ti_eqep_cnt *priv;
+ void __iomem *base;
+ int err;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ priv->regmap32 = devm_regmap_init_mmio(dev, base,
+ &ti_eqep_regmap32_config);
+ if (IS_ERR(priv->regmap32))
+ return PTR_ERR(priv->regmap32);
+
+ priv->regmap16 = devm_regmap_init_mmio(dev, base + 0x24,
+ &ti_eqep_regmap16_config);
+ if (IS_ERR(priv->regmap16))
+ return PTR_ERR(priv->regmap16);
+
+ priv->counter.name = dev_name(dev);
+ priv->counter.parent = dev;
+ priv->counter.ops = &ti_eqep_counter_ops;
+ priv->counter.counts = ti_eqep_counts;
+ priv->counter.num_counts = ARRAY_SIZE(ti_eqep_counts);
+ priv->counter.signals = ti_eqep_signals;
+ priv->counter.num_signals = ARRAY_SIZE(ti_eqep_signals);
+ priv->counter.priv = priv;
+
+ platform_set_drvdata(pdev, priv);
+
+ /*
+ * Need to make sure power is turned on. On AM33xx, this comes from the
+ * parent PWMSS bus driver. On AM17xx, this comes from the PSC power
+ * domain.
+ */
+ pm_runtime_enable(dev);
+ pm_runtime_get_sync(dev);
+
+ err = counter_register(&priv->counter);
+ if (err < 0) {
+ pm_runtime_put_sync(dev);
+ pm_runtime_disable(dev);
+ return err;
+ }
+
+ return 0;
+}
+
+static int ti_eqep_remove(struct platform_device *pdev)
+{
+ struct ti_eqep_cnt *priv = platform_get_drvdata(pdev);
+ struct device *dev = &pdev->dev;
+
+ counter_unregister(&priv->counter);
+ pm_runtime_put_sync(dev),
+ pm_runtime_disable(dev);
+
+ return 0;
+}
+
+static const struct of_device_id ti_eqep_of_match[] = {
+ { .compatible = "ti,am3352-eqep", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, ti_eqep_of_match);
+
+static struct platform_driver ti_eqep_driver = {
+ .probe = ti_eqep_probe,
+ .remove = ti_eqep_remove,
+ .driver = {
+ .name = "ti-eqep-cnt",
+ .of_match_table = ti_eqep_of_match,
+ },
+};
+module_platform_driver(ti_eqep_driver);
+
+MODULE_AUTHOR("David Lechner <david@lechnology.com>");
+MODULE_DESCRIPTION("TI eQEP counter driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index a905796f7f85..3858d86cf409 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -49,14 +49,6 @@ config ARM_ARMADA_8K_CPUFREQ
If in doubt, say N.
-# big LITTLE core layer and glue drivers
-config ARM_BIG_LITTLE_CPUFREQ
- tristate "Generic ARM big LITTLE CPUfreq driver"
- depends on ARM_CPU_TOPOLOGY && HAVE_CLK
- select PM_OPP
- help
- This enables the Generic CPUfreq driver for ARM big.LITTLE platforms.
-
config ARM_SCPI_CPUFREQ
tristate "SCPI based CPUfreq driver"
depends on ARM_SCPI_PROTOCOL && COMMON_CLK_SCPI
@@ -69,7 +61,9 @@ config ARM_SCPI_CPUFREQ
config ARM_VEXPRESS_SPC_CPUFREQ
tristate "Versatile Express SPC based CPUfreq driver"
- depends on ARM_BIG_LITTLE_CPUFREQ && ARCH_VEXPRESS_SPC
+ depends on ARM_CPU_TOPOLOGY && HAVE_CLK
+ depends on ARCH_VEXPRESS_SPC
+ select PM_OPP
help
This add the CPUfreq driver support for Versatile Express
big.LITTLE platforms using SPC for power management.
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index 9a9f5ccd13d9..f6670c4abbb0 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -47,8 +47,6 @@ obj-$(CONFIG_X86_SFI_CPUFREQ) += sfi-cpufreq.o
##################################################################################
# ARM SoC drivers
-obj-$(CONFIG_ARM_BIG_LITTLE_CPUFREQ) += arm_big_little.o
-
obj-$(CONFIG_ARM_ARMADA_37XX_CPUFREQ) += armada-37xx-cpufreq.o
obj-$(CONFIG_ARM_ARMADA_8K_CPUFREQ) += armada-8k-cpufreq.o
obj-$(CONFIG_ARM_BRCMSTB_AVS_CPUFREQ) += brcmstb-avs-cpufreq.o
diff --git a/drivers/cpufreq/arm_big_little.c b/drivers/cpufreq/arm_big_little.c
deleted file mode 100644
index 7fe52fcddcf1..000000000000
--- a/drivers/cpufreq/arm_big_little.c
+++ /dev/null
@@ -1,658 +0,0 @@
-/*
- * ARM big.LITTLE Platforms CPUFreq support
- *
- * Copyright (C) 2013 ARM Ltd.
- * Sudeep KarkadaNagesha <sudeep.karkadanagesha@arm.com>
- *
- * Copyright (C) 2013 Linaro.
- * Viresh Kumar <viresh.kumar@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/clk.h>
-#include <linux/cpu.h>
-#include <linux/cpufreq.h>
-#include <linux/cpumask.h>
-#include <linux/cpu_cooling.h>
-#include <linux/export.h>
-#include <linux/module.h>
-#include <linux/mutex.h>
-#include <linux/of_platform.h>
-#include <linux/pm_opp.h>
-#include <linux/slab.h>
-#include <linux/topology.h>
-#include <linux/types.h>
-
-#include "arm_big_little.h"
-
-/* Currently we support only two clusters */
-#define A15_CLUSTER 0
-#define A7_CLUSTER 1
-#define MAX_CLUSTERS 2
-
-#ifdef CONFIG_BL_SWITCHER
-#include <asm/bL_switcher.h>
-static bool bL_switching_enabled;
-#define is_bL_switching_enabled() bL_switching_enabled
-#define set_switching_enabled(x) (bL_switching_enabled = (x))
-#else
-#define is_bL_switching_enabled() false
-#define set_switching_enabled(x) do { } while (0)
-#define bL_switch_request(...) do { } while (0)
-#define bL_switcher_put_enabled() do { } while (0)
-#define bL_switcher_get_enabled() do { } while (0)
-#endif
-
-#define ACTUAL_FREQ(cluster, freq) ((cluster == A7_CLUSTER) ? freq << 1 : freq)
-#define VIRT_FREQ(cluster, freq) ((cluster == A7_CLUSTER) ? freq >> 1 : freq)
-
-static struct thermal_cooling_device *cdev[MAX_CLUSTERS];
-static const struct cpufreq_arm_bL_ops *arm_bL_ops;
-static struct clk *clk[MAX_CLUSTERS];
-static struct cpufreq_frequency_table *freq_table[MAX_CLUSTERS + 1];
-static atomic_t cluster_usage[MAX_CLUSTERS + 1];
-
-static unsigned int clk_big_min; /* (Big) clock frequencies */
-static unsigned int clk_little_max; /* Maximum clock frequency (Little) */
-
-static DEFINE_PER_CPU(unsigned int, physical_cluster);
-static DEFINE_PER_CPU(unsigned int, cpu_last_req_freq);
-
-static struct mutex cluster_lock[MAX_CLUSTERS];
-
-static inline int raw_cpu_to_cluster(int cpu)
-{
- return topology_physical_package_id(cpu);
-}
-
-static inline int cpu_to_cluster(int cpu)
-{
- return is_bL_switching_enabled() ?
- MAX_CLUSTERS : raw_cpu_to_cluster(cpu);
-}
-
-static unsigned int find_cluster_maxfreq(int cluster)
-{
- int j;
- u32 max_freq = 0, cpu_freq;
-
- for_each_online_cpu(j) {
- cpu_freq = per_cpu(cpu_last_req_freq, j);
-
- if ((cluster == per_cpu(physical_cluster, j)) &&
- (max_freq < cpu_freq))
- max_freq = cpu_freq;
- }
-
- pr_debug("%s: cluster: %d, max freq: %d\n", __func__, cluster,
- max_freq);
-
- return max_freq;
-}
-
-static unsigned int clk_get_cpu_rate(unsigned int cpu)
-{
- u32 cur_cluster = per_cpu(physical_cluster, cpu);
- u32 rate = clk_get_rate(clk[cur_cluster]) / 1000;
-
- /* For switcher we use virtual A7 clock rates */
- if (is_bL_switching_enabled())
- rate = VIRT_FREQ(cur_cluster, rate);
-
- pr_debug("%s: cpu: %d, cluster: %d, freq: %u\n", __func__, cpu,
- cur_cluster, rate);
-
- return rate;
-}
-
-static unsigned int bL_cpufreq_get_rate(unsigned int cpu)
-{
- if (is_bL_switching_enabled()) {
- pr_debug("%s: freq: %d\n", __func__, per_cpu(cpu_last_req_freq,
- cpu));
-
- return per_cpu(cpu_last_req_freq, cpu);
- } else {
- return clk_get_cpu_rate(cpu);
- }
-}
-
-static unsigned int
-bL_cpufreq_set_rate(u32 cpu, u32 old_cluster, u32 new_cluster, u32 rate)
-{
- u32 new_rate, prev_rate;
- int ret;
- bool bLs = is_bL_switching_enabled();
-
- mutex_lock(&cluster_lock[new_cluster]);
-
- if (bLs) {
- prev_rate = per_cpu(cpu_last_req_freq, cpu);
- per_cpu(cpu_last_req_freq, cpu) = rate;
- per_cpu(physical_cluster, cpu) = new_cluster;
-
- new_rate = find_cluster_maxfreq(new_cluster);
- new_rate = ACTUAL_FREQ(new_cluster, new_rate);
- } else {
- new_rate = rate;
- }
-
- pr_debug("%s: cpu: %d, old cluster: %d, new cluster: %d, freq: %d\n",
- __func__, cpu, old_cluster, new_cluster, new_rate);
-
- ret = clk_set_rate(clk[new_cluster], new_rate * 1000);
- if (!ret) {
- /*
- * FIXME: clk_set_rate hasn't returned an error here however it
- * may be that clk_change_rate failed due to hardware or
- * firmware issues and wasn't able to report that due to the
- * current design of the clk core layer. To work around this
- * problem we will read back the clock rate and check it is
- * correct. This needs to be removed once clk core is fixed.
- */
- if (clk_get_rate(clk[new_cluster]) != new_rate * 1000)
- ret = -EIO;
- }
-
- if (WARN_ON(ret)) {
- pr_err("clk_set_rate failed: %d, new cluster: %d\n", ret,
- new_cluster);
- if (bLs) {
- per_cpu(cpu_last_req_freq, cpu) = prev_rate;
- per_cpu(physical_cluster, cpu) = old_cluster;
- }
-
- mutex_unlock(&cluster_lock[new_cluster]);
-
- return ret;
- }
-
- mutex_unlock(&cluster_lock[new_cluster]);
-
- /* Recalc freq for old cluster when switching clusters */
- if (old_cluster != new_cluster) {
- pr_debug("%s: cpu: %d, old cluster: %d, new cluster: %d\n",
- __func__, cpu, old_cluster, new_cluster);
-
- /* Switch cluster */
- bL_switch_request(cpu, new_cluster);
-
- mutex_lock(&cluster_lock[old_cluster]);
-
- /* Set freq of old cluster if there are cpus left on it */
- new_rate = find_cluster_maxfreq(old_cluster);
- new_rate = ACTUAL_FREQ(old_cluster, new_rate);
-
- if (new_rate) {
- pr_debug("%s: Updating rate of old cluster: %d, to freq: %d\n",
- __func__, old_cluster, new_rate);
-
- if (clk_set_rate(clk[old_cluster], new_rate * 1000))
- pr_err("%s: clk_set_rate failed: %d, old cluster: %d\n",
- __func__, ret, old_cluster);
- }
- mutex_unlock(&cluster_lock[old_cluster]);
- }
-
- return 0;
-}
-
-/* Set clock frequency */
-static int bL_cpufreq_set_target(struct cpufreq_policy *policy,
- unsigned int index)
-{
- u32 cpu = policy->cpu, cur_cluster, new_cluster, actual_cluster;
- unsigned int freqs_new;
- int ret;
-
- cur_cluster = cpu_to_cluster(cpu);
- new_cluster = actual_cluster = per_cpu(physical_cluster, cpu);
-
- freqs_new = freq_table[cur_cluster][index].frequency;
-
- if (is_bL_switching_enabled()) {
- if ((actual_cluster == A15_CLUSTER) &&
- (freqs_new < clk_big_min)) {
- new_cluster = A7_CLUSTER;
- } else if ((actual_cluster == A7_CLUSTER) &&
- (freqs_new > clk_little_max)) {
- new_cluster = A15_CLUSTER;
- }
- }
-
- ret = bL_cpufreq_set_rate(cpu, actual_cluster, new_cluster, freqs_new);
-
- if (!ret) {
- arch_set_freq_scale(policy->related_cpus, freqs_new,
- policy->cpuinfo.max_freq);
- }
-
- return ret;
-}
-
-static inline u32 get_table_count(struct cpufreq_frequency_table *table)
-{
- int count;
-
- for (count = 0; table[count].frequency != CPUFREQ_TABLE_END; count++)
- ;
-
- return count;
-}
-
-/* get the minimum frequency in the cpufreq_frequency_table */
-static inline u32 get_table_min(struct cpufreq_frequency_table *table)
-{
- struct cpufreq_frequency_table *pos;
- uint32_t min_freq = ~0;
- cpufreq_for_each_entry(pos, table)
- if (pos->frequency < min_freq)
- min_freq = pos->frequency;
- return min_freq;
-}
-
-/* get the maximum frequency in the cpufreq_frequency_table */
-static inline u32 get_table_max(struct cpufreq_frequency_table *table)
-{
- struct cpufreq_frequency_table *pos;
- uint32_t max_freq = 0;
- cpufreq_for_each_entry(pos, table)
- if (pos->frequency > max_freq)
- max_freq = pos->frequency;
- return max_freq;
-}
-
-static int merge_cluster_tables(void)
-{
- int i, j, k = 0, count = 1;
- struct cpufreq_frequency_table *table;
-
- for (i = 0; i < MAX_CLUSTERS; i++)
- count += get_table_count(freq_table[i]);
-
- table = kcalloc(count, sizeof(*table), GFP_KERNEL);
- if (!table)
- return -ENOMEM;
-
- freq_table[MAX_CLUSTERS] = table;
-
- /* Add in reverse order to get freqs in increasing order */
- for (i = MAX_CLUSTERS - 1; i >= 0; i--) {
- for (j = 0; freq_table[i][j].frequency != CPUFREQ_TABLE_END;
- j++) {
- table[k].frequency = VIRT_FREQ(i,
- freq_table[i][j].frequency);
- pr_debug("%s: index: %d, freq: %d\n", __func__, k,
- table[k].frequency);
- k++;
- }
- }
-
- table[k].driver_data = k;
- table[k].frequency = CPUFREQ_TABLE_END;
-
- pr_debug("%s: End, table: %p, count: %d\n", __func__, table, k);
-
- return 0;
-}
-
-static void _put_cluster_clk_and_freq_table(struct device *cpu_dev,
- const struct cpumask *cpumask)
-{
- u32 cluster = raw_cpu_to_cluster(cpu_dev->id);
-
- if (!freq_table[cluster])
- return;
-
- clk_put(clk[cluster]);
- dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]);
- if (arm_bL_ops->free_opp_table)
- arm_bL_ops->free_opp_table(cpumask);
- dev_dbg(cpu_dev, "%s: cluster: %d\n", __func__, cluster);
-}
-
-static void put_cluster_clk_and_freq_table(struct device *cpu_dev,
- const struct cpumask *cpumask)
-{
- u32 cluster = cpu_to_cluster(cpu_dev->id);
- int i;
-
- if (atomic_dec_return(&cluster_usage[cluster]))
- return;
-
- if (cluster < MAX_CLUSTERS)
- return _put_cluster_clk_and_freq_table(cpu_dev, cpumask);
-
- for_each_present_cpu(i) {
- struct device *cdev = get_cpu_device(i);
- if (!cdev) {
- pr_err("%s: failed to get cpu%d device\n", __func__, i);
- return;
- }
-
- _put_cluster_clk_and_freq_table(cdev, cpumask);
- }
-
- /* free virtual table */
- kfree(freq_table[cluster]);
-}
-
-static int _get_cluster_clk_and_freq_table(struct device *cpu_dev,
- const struct cpumask *cpumask)
-{
- u32 cluster = raw_cpu_to_cluster(cpu_dev->id);
- int ret;
-
- if (freq_table[cluster])
- return 0;
-
- ret = arm_bL_ops->init_opp_table(cpumask);
- if (ret) {
- dev_err(cpu_dev, "%s: init_opp_table failed, cpu: %d, err: %d\n",
- __func__, cpu_dev->id, ret);
- goto out;
- }
-
- ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table[cluster]);
- if (ret) {
- dev_err(cpu_dev, "%s: failed to init cpufreq table, cpu: %d, err: %d\n",
- __func__, cpu_dev->id, ret);
- goto free_opp_table;
- }
-
- clk[cluster] = clk_get(cpu_dev, NULL);
- if (!IS_ERR(clk[cluster])) {
- dev_dbg(cpu_dev, "%s: clk: %p & freq table: %p, cluster: %d\n",
- __func__, clk[cluster], freq_table[cluster],
- cluster);
- return 0;
- }
-
- dev_err(cpu_dev, "%s: Failed to get clk for cpu: %d, cluster: %d\n",
- __func__, cpu_dev->id, cluster);
- ret = PTR_ERR(clk[cluster]);
- dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]);
-
-free_opp_table:
- if (arm_bL_ops->free_opp_table)
- arm_bL_ops->free_opp_table(cpumask);
-out:
- dev_err(cpu_dev, "%s: Failed to get data for cluster: %d\n", __func__,
- cluster);
- return ret;
-}
-
-static int get_cluster_clk_and_freq_table(struct device *cpu_dev,
- const struct cpumask *cpumask)
-{
- u32 cluster = cpu_to_cluster(cpu_dev->id);
- int i, ret;
-
- if (atomic_inc_return(&cluster_usage[cluster]) != 1)
- return 0;
-
- if (cluster < MAX_CLUSTERS) {
- ret = _get_cluster_clk_and_freq_table(cpu_dev, cpumask);
- if (ret)
- atomic_dec(&cluster_usage[cluster]);
- return ret;
- }
-
- /*
- * Get data for all clusters and fill virtual cluster with a merge of
- * both
- */
- for_each_present_cpu(i) {
- struct device *cdev = get_cpu_device(i);
- if (!cdev) {
- pr_err("%s: failed to get cpu%d device\n", __func__, i);
- return -ENODEV;
- }
-
- ret = _get_cluster_clk_and_freq_table(cdev, cpumask);
- if (ret)
- goto put_clusters;
- }
-
- ret = merge_cluster_tables();
- if (ret)
- goto put_clusters;
-
- /* Assuming 2 cluster, set clk_big_min and clk_little_max */
- clk_big_min = get_table_min(freq_table[0]);
- clk_little_max = VIRT_FREQ(1, get_table_max(freq_table[1]));
-
- pr_debug("%s: cluster: %d, clk_big_min: %d, clk_little_max: %d\n",
- __func__, cluster, clk_big_min, clk_little_max);
-
- return 0;
-
-put_clusters:
- for_each_present_cpu(i) {
- struct device *cdev = get_cpu_device(i);
- if (!cdev) {
- pr_err("%s: failed to get cpu%d device\n", __func__, i);
- return -ENODEV;
- }
-
- _put_cluster_clk_and_freq_table(cdev, cpumask);
- }
-
- atomic_dec(&cluster_usage[cluster]);
-
- return ret;
-}
-
-/* Per-CPU initialization */
-static int bL_cpufreq_init(struct cpufreq_policy *policy)
-{
- u32 cur_cluster = cpu_to_cluster(policy->cpu);
- struct device *cpu_dev;
- int ret;
-
- cpu_dev = get_cpu_device(policy->cpu);
- if (!cpu_dev) {
- pr_err("%s: failed to get cpu%d device\n", __func__,
- policy->cpu);
- return -ENODEV;
- }
-
- if (cur_cluster < MAX_CLUSTERS) {
- int cpu;
-
- cpumask_copy(policy->cpus, topology_core_cpumask(policy->cpu));
-
- for_each_cpu(cpu, policy->cpus)
- per_cpu(physical_cluster, cpu) = cur_cluster;
- } else {
- /* Assumption: during init, we are always running on A15 */
- per_cpu(physical_cluster, policy->cpu) = A15_CLUSTER;
- }
-
- ret = get_cluster_clk_and_freq_table(cpu_dev, policy->cpus);
- if (ret)
- return ret;
-
- policy->freq_table = freq_table[cur_cluster];
- policy->cpuinfo.transition_latency =
- arm_bL_ops->get_transition_latency(cpu_dev);
-
- dev_pm_opp_of_register_em(policy->cpus);
-
- if (is_bL_switching_enabled())
- per_cpu(cpu_last_req_freq, policy->cpu) = clk_get_cpu_rate(policy->cpu);
-
- dev_info(cpu_dev, "%s: CPU %d initialized\n", __func__, policy->cpu);
- return 0;
-}
-
-static int bL_cpufreq_exit(struct cpufreq_policy *policy)
-{
- struct device *cpu_dev;
- int cur_cluster = cpu_to_cluster(policy->cpu);
-
- if (cur_cluster < MAX_CLUSTERS) {
- cpufreq_cooling_unregister(cdev[cur_cluster]);
- cdev[cur_cluster] = NULL;
- }
-
- cpu_dev = get_cpu_device(policy->cpu);
- if (!cpu_dev) {
- pr_err("%s: failed to get cpu%d device\n", __func__,
- policy->cpu);
- return -ENODEV;
- }
-
- put_cluster_clk_and_freq_table(cpu_dev, policy->related_cpus);
- dev_dbg(cpu_dev, "%s: Exited, cpu: %d\n", __func__, policy->cpu);
-
- return 0;
-}
-
-static void bL_cpufreq_ready(struct cpufreq_policy *policy)
-{
- int cur_cluster = cpu_to_cluster(policy->cpu);
-
- /* Do not register a cpu_cooling device if we are in IKS mode */
- if (cur_cluster >= MAX_CLUSTERS)
- return;
-
- cdev[cur_cluster] = of_cpufreq_cooling_register(policy);
-}
-
-static struct cpufreq_driver bL_cpufreq_driver = {
- .name = "arm-big-little",
- .flags = CPUFREQ_STICKY |
- CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
- CPUFREQ_NEED_INITIAL_FREQ_CHECK,
- .verify = cpufreq_generic_frequency_table_verify,
- .target_index = bL_cpufreq_set_target,
- .get = bL_cpufreq_get_rate,
- .init = bL_cpufreq_init,
- .exit = bL_cpufreq_exit,
- .ready = bL_cpufreq_ready,
- .attr = cpufreq_generic_attr,
-};
-
-#ifdef CONFIG_BL_SWITCHER
-static int bL_cpufreq_switcher_notifier(struct notifier_block *nfb,
- unsigned long action, void *_arg)
-{
- pr_debug("%s: action: %ld\n", __func__, action);
-
- switch (action) {
- case BL_NOTIFY_PRE_ENABLE:
- case BL_NOTIFY_PRE_DISABLE:
- cpufreq_unregister_driver(&bL_cpufreq_driver);
- break;
-
- case BL_NOTIFY_POST_ENABLE:
- set_switching_enabled(true);
- cpufreq_register_driver(&bL_cpufreq_driver);
- break;
-
- case BL_NOTIFY_POST_DISABLE:
- set_switching_enabled(false);
- cpufreq_register_driver(&bL_cpufreq_driver);
- break;
-
- default:
- return NOTIFY_DONE;
- }
-
- return NOTIFY_OK;
-}
-
-static struct notifier_block bL_switcher_notifier = {
- .notifier_call = bL_cpufreq_switcher_notifier,
-};
-
-static int __bLs_register_notifier(void)
-{
- return bL_switcher_register_notifier(&bL_switcher_notifier);
-}
-
-static int __bLs_unregister_notifier(void)
-{
- return bL_switcher_unregister_notifier(&bL_switcher_notifier);
-}
-#else
-static int __bLs_register_notifier(void) { return 0; }
-static int __bLs_unregister_notifier(void) { return 0; }
-#endif
-
-int bL_cpufreq_register(const struct cpufreq_arm_bL_ops *ops)
-{
- int ret, i;
-
- if (arm_bL_ops) {
- pr_debug("%s: Already registered: %s, exiting\n", __func__,
- arm_bL_ops->name);
- return -EBUSY;
- }
-
- if (!ops || !strlen(ops->name) || !ops->init_opp_table ||
- !ops->get_transition_latency) {
- pr_err("%s: Invalid arm_bL_ops, exiting\n", __func__);
- return -ENODEV;
- }
-
- arm_bL_ops = ops;
-
- set_switching_enabled(bL_switcher_get_enabled());
-
- for (i = 0; i < MAX_CLUSTERS; i++)
- mutex_init(&cluster_lock[i]);
-
- ret = cpufreq_register_driver(&bL_cpufreq_driver);
- if (ret) {
- pr_info("%s: Failed registering platform driver: %s, err: %d\n",
- __func__, ops->name, ret);
- arm_bL_ops = NULL;
- } else {
- ret = __bLs_register_notifier();
- if (ret) {
- cpufreq_unregister_driver(&bL_cpufreq_driver);
- arm_bL_ops = NULL;
- } else {
- pr_info("%s: Registered platform driver: %s\n",
- __func__, ops->name);
- }
- }
-
- bL_switcher_put_enabled();
- return ret;
-}
-EXPORT_SYMBOL_GPL(bL_cpufreq_register);
-
-void bL_cpufreq_unregister(const struct cpufreq_arm_bL_ops *ops)
-{
- if (arm_bL_ops != ops) {
- pr_err("%s: Registered with: %s, can't unregister, exiting\n",
- __func__, arm_bL_ops->name);
- return;
- }
-
- bL_switcher_get_enabled();
- __bLs_unregister_notifier();
- cpufreq_unregister_driver(&bL_cpufreq_driver);
- bL_switcher_put_enabled();
- pr_info("%s: Un-registered platform driver: %s\n", __func__,
- arm_bL_ops->name);
- arm_bL_ops = NULL;
-}
-EXPORT_SYMBOL_GPL(bL_cpufreq_unregister);
-
-MODULE_AUTHOR("Viresh Kumar <viresh.kumar@linaro.org>");
-MODULE_DESCRIPTION("Generic ARM big LITTLE cpufreq driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/cpufreq/arm_big_little.h b/drivers/cpufreq/arm_big_little.h
deleted file mode 100644
index 88a176e466c8..000000000000
--- a/drivers/cpufreq/arm_big_little.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * ARM big.LITTLE platform's CPUFreq header file
- *
- * Copyright (C) 2013 ARM Ltd.
- * Sudeep KarkadaNagesha <sudeep.karkadanagesha@arm.com>
- *
- * Copyright (C) 2013 Linaro.
- * Viresh Kumar <viresh.kumar@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-#ifndef CPUFREQ_ARM_BIG_LITTLE_H
-#define CPUFREQ_ARM_BIG_LITTLE_H
-
-#include <linux/cpufreq.h>
-#include <linux/device.h>
-#include <linux/types.h>
-
-struct cpufreq_arm_bL_ops {
- char name[CPUFREQ_NAME_LEN];
-
- /*
- * This must set opp table for cpu_dev in a similar way as done by
- * dev_pm_opp_of_add_table().
- */
- int (*init_opp_table)(const struct cpumask *cpumask);
-
- /* Optional */
- int (*get_transition_latency)(struct device *cpu_dev);
- void (*free_opp_table)(const struct cpumask *cpumask);
-};
-
-int bL_cpufreq_register(const struct cpufreq_arm_bL_ops *ops);
-void bL_cpufreq_unregister(const struct cpufreq_arm_bL_ops *ops);
-
-#endif /* CPUFREQ_ARM_BIG_LITTLE_H */
diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
index bca8d1f47fd2..54bc76743b1f 100644
--- a/drivers/cpufreq/cpufreq-dt-platdev.c
+++ b/drivers/cpufreq/cpufreq-dt-platdev.c
@@ -86,7 +86,6 @@ static const struct of_device_id whitelist[] __initconst = {
{ .compatible = "st-ericsson,u9540", },
{ .compatible = "ti,omap2", },
- { .compatible = "ti,omap3", },
{ .compatible = "ti,omap4", },
{ .compatible = "ti,omap5", },
@@ -137,6 +136,7 @@ static const struct of_device_id blacklist[] __initconst = {
{ .compatible = "ti,am33xx", },
{ .compatible = "ti,am43", },
{ .compatible = "ti,dra7", },
+ { .compatible = "ti,omap3", },
{ }
};
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index ee23eaf20f35..77114a3897fb 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -936,6 +936,9 @@ static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
struct freq_attr *fattr = to_attr(attr);
ssize_t ret;
+ if (!fattr->show)
+ return -EIO;
+
down_read(&policy->rwsem);
ret = fattr->show(policy, buf);
up_read(&policy->rwsem);
@@ -950,6 +953,9 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
struct freq_attr *fattr = to_attr(attr);
ssize_t ret = -EINVAL;
+ if (!fattr->store)
+ return -EIO;
+
/*
* cpus_read_trylock() is used here to work around a circular lock
* dependency problem with respect to the cpufreq_register_driver().
@@ -2388,7 +2394,10 @@ int cpufreq_set_policy(struct cpufreq_policy *policy,
new_policy->min = freq_qos_read_value(&policy->constraints, FREQ_QOS_MIN);
new_policy->max = freq_qos_read_value(&policy->constraints, FREQ_QOS_MAX);
- /* verify the cpu speed can be set within this limit */
+ /*
+ * Verify that the CPU speed can be set within these limits and make sure
+ * that min <= max.
+ */
ret = cpufreq_driver->verify(new_policy);
if (ret)
return ret;
@@ -2631,6 +2640,13 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
if (cpufreq_disabled())
return -ENODEV;
+ /*
+ * The cpufreq core depends heavily on the availability of device
+ * structure, make sure they are available before proceeding further.
+ */
+ if (!get_cpu_device(0))
+ return -EPROBE_DEFER;
+
if (!driver_data || !driver_data->verify || !driver_data->init ||
!(driver_data->setpolicy || driver_data->target_index ||
driver_data->target) ||
diff --git a/drivers/cpufreq/imx-cpufreq-dt.c b/drivers/cpufreq/imx-cpufreq-dt.c
index 35db14cf3102..85a6efd6b68f 100644
--- a/drivers/cpufreq/imx-cpufreq-dt.c
+++ b/drivers/cpufreq/imx-cpufreq-dt.c
@@ -44,19 +44,19 @@ static int imx_cpufreq_dt_probe(struct platform_device *pdev)
mkt_segment = (cell_value & OCOTP_CFG3_MKT_SEGMENT_MASK) >> OCOTP_CFG3_MKT_SEGMENT_SHIFT;
/*
- * Early samples without fuses written report "0 0" which means
- * consumer segment and minimum speed grading.
- *
- * According to datasheet minimum speed grading is not supported for
- * consumer parts so clamp to 1 to avoid warning for "no OPPs"
+ * Early samples without fuses written report "0 0" which may NOT
+ * match any OPP defined in DT. So clamp to minimum OPP defined in
+ * DT to avoid warning for "no OPPs".
*
* Applies to i.MX8M series SoCs.
*/
- if (mkt_segment == 0 && speed_grade == 0 && (
- of_machine_is_compatible("fsl,imx8mm") ||
- of_machine_is_compatible("fsl,imx8mn") ||
- of_machine_is_compatible("fsl,imx8mq")))
- speed_grade = 1;
+ if (mkt_segment == 0 && speed_grade == 0) {
+ if (of_machine_is_compatible("fsl,imx8mm") ||
+ of_machine_is_compatible("fsl,imx8mq"))
+ speed_grade = 1;
+ if (of_machine_is_compatible("fsl,imx8mn"))
+ speed_grade = 0xb;
+ }
supported_hw[0] = BIT(speed_grade);
supported_hw[1] = BIT(mkt_segment);
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 8ab31702cf6a..d2fa3e9ccd97 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -2662,21 +2662,21 @@ enum {
/* Hardware vendor-specific info that has its own power management modes */
static struct acpi_platform_list plat_info[] __initdata = {
- {"HP ", "ProLiant", 0, ACPI_SIG_FADT, all_versions, 0, PSS},
- {"ORACLE", "X4-2 ", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
- {"ORACLE", "X4-2L ", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
- {"ORACLE", "X4-2B ", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
- {"ORACLE", "X3-2 ", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
- {"ORACLE", "X3-2L ", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
- {"ORACLE", "X3-2B ", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
- {"ORACLE", "X4470M2 ", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
- {"ORACLE", "X4270M3 ", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
- {"ORACLE", "X4270M2 ", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
- {"ORACLE", "X4170M2 ", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
- {"ORACLE", "X4170 M3", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
- {"ORACLE", "X4275 M3", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
- {"ORACLE", "X6-2 ", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
- {"ORACLE", "Sudbury ", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
+ {"HP ", "ProLiant", 0, ACPI_SIG_FADT, all_versions, NULL, PSS},
+ {"ORACLE", "X4-2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
+ {"ORACLE", "X4-2L ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
+ {"ORACLE", "X4-2B ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
+ {"ORACLE", "X3-2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
+ {"ORACLE", "X3-2L ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
+ {"ORACLE", "X3-2B ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
+ {"ORACLE", "X4470M2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
+ {"ORACLE", "X4270M3 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
+ {"ORACLE", "X4270M2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
+ {"ORACLE", "X4170M2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
+ {"ORACLE", "X4170 M3", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
+ {"ORACLE", "X4275 M3", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
+ {"ORACLE", "X6-2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
+ {"ORACLE", "Sudbury ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
{ } /* End */
};
diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
index 6061850e59c9..56f4bc0d209e 100644
--- a/drivers/cpufreq/powernv-cpufreq.c
+++ b/drivers/cpufreq/powernv-cpufreq.c
@@ -1041,9 +1041,14 @@ static struct cpufreq_driver powernv_cpufreq_driver = {
static int init_chip_info(void)
{
- unsigned int chip[256];
+ unsigned int *chip;
unsigned int cpu, i;
unsigned int prev_chip_id = UINT_MAX;
+ int ret = 0;
+
+ chip = kcalloc(num_possible_cpus(), sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
for_each_possible_cpu(cpu) {
unsigned int id = cpu_to_chip_id(cpu);
@@ -1055,8 +1060,10 @@ static int init_chip_info(void)
}
chips = kcalloc(nr_chips, sizeof(struct chip), GFP_KERNEL);
- if (!chips)
- return -ENOMEM;
+ if (!chips) {
+ ret = -ENOMEM;
+ goto free_and_return;
+ }
for (i = 0; i < nr_chips; i++) {
chips[i].id = chip[i];
@@ -1066,7 +1073,9 @@ static int init_chip_info(void)
per_cpu(chip_info, cpu) = &chips[i];
}
- return 0;
+free_and_return:
+ kfree(chip);
+ return ret;
}
static inline void clean_chip_info(void)
diff --git a/drivers/cpufreq/s3c64xx-cpufreq.c b/drivers/cpufreq/s3c64xx-cpufreq.c
index af0c00dabb22..c6bdfc308e99 100644
--- a/drivers/cpufreq/s3c64xx-cpufreq.c
+++ b/drivers/cpufreq/s3c64xx-cpufreq.c
@@ -19,7 +19,6 @@
static struct regulator *vddarm;
static unsigned long regulator_latency;
-#ifdef CONFIG_CPU_S3C6410
struct s3c64xx_dvfs {
unsigned int vddarm_min;
unsigned int vddarm_max;
@@ -48,7 +47,6 @@ static struct cpufreq_frequency_table s3c64xx_freq_table[] = {
{ 0, 4, 800000 },
{ 0, 0, CPUFREQ_TABLE_END },
};
-#endif
static int s3c64xx_cpufreq_set_target(struct cpufreq_policy *policy,
unsigned int index)
@@ -149,11 +147,6 @@ static int s3c64xx_cpufreq_driver_init(struct cpufreq_policy *policy)
if (policy->cpu != 0)
return -EINVAL;
- if (s3c64xx_freq_table == NULL) {
- pr_err("No frequency information for this CPU\n");
- return -ENODEV;
- }
-
policy->clk = clk_get(NULL, "armclk");
if (IS_ERR(policy->clk)) {
pr_err("Unable to obtain ARMCLK: %ld\n",
diff --git a/drivers/cpufreq/scpi-cpufreq.c b/drivers/cpufreq/scpi-cpufreq.c
index 2b51e0718c9f..20d1f85d5f5a 100644
--- a/drivers/cpufreq/scpi-cpufreq.c
+++ b/drivers/cpufreq/scpi-cpufreq.c
@@ -1,8 +1,6 @@
/*
* System Control and Power Interface (SCPI) based CPUFreq Interface driver
*
- * It provides necessary ops to arm_big_little cpufreq driver.
- *
* Copyright (C) 2015 ARM Ltd.
* Sudeep Holla <sudeep.holla@arm.com>
*
diff --git a/drivers/cpufreq/sun50i-cpufreq-nvmem.c b/drivers/cpufreq/sun50i-cpufreq-nvmem.c
index eca32e443716..9907a165135b 100644
--- a/drivers/cpufreq/sun50i-cpufreq-nvmem.c
+++ b/drivers/cpufreq/sun50i-cpufreq-nvmem.c
@@ -25,7 +25,7 @@
static struct platform_device *cpufreq_dt_pdev, *sun50i_cpufreq_pdev;
/**
- * sun50i_cpufreq_get_efuse() - Parse and return efuse value present on SoC
+ * sun50i_cpufreq_get_efuse() - Determine speed grade from efuse value
* @versions: Set to the value parsed from efuse
*
* Returns 0 if success.
@@ -69,21 +69,16 @@ static int sun50i_cpufreq_get_efuse(u32 *versions)
return PTR_ERR(speedbin);
efuse_value = (*speedbin >> NVMEM_SHIFT) & NVMEM_MASK;
- switch (efuse_value) {
- case 0b0001:
- *versions = 1;
- break;
- case 0b0011:
- *versions = 2;
- break;
- default:
- /*
- * For other situations, we treat it as bin0.
- * This vf table can be run for any good cpu.
- */
+
+ /*
+ * We treat unexpected efuse values as if the SoC was from
+ * the slowest bin. Expected efuse values are 1-3, slowest
+ * to fastest.
+ */
+ if (efuse_value >= 1 && efuse_value <= 3)
+ *versions = efuse_value - 1;
+ else
*versions = 0;
- break;
- }
kfree(speedbin);
return 0;
diff --git a/drivers/cpufreq/ti-cpufreq.c b/drivers/cpufreq/ti-cpufreq.c
index aeaa883a8c9d..557cb513bf7f 100644
--- a/drivers/cpufreq/ti-cpufreq.c
+++ b/drivers/cpufreq/ti-cpufreq.c
@@ -31,11 +31,17 @@
#define DRA7_EFUSE_OD_MPU_OPP BIT(1)
#define DRA7_EFUSE_HIGH_MPU_OPP BIT(2)
+#define OMAP3_CONTROL_DEVICE_STATUS 0x4800244C
+#define OMAP3_CONTROL_IDCODE 0x4830A204
+#define OMAP34xx_ProdID_SKUID 0x4830A20C
+#define OMAP3_SYSCON_BASE (0x48000000 + 0x2000 + 0x270)
+
#define VERSION_COUNT 2
struct ti_cpufreq_data;
struct ti_cpufreq_soc_data {
+ const char * const *reg_names;
unsigned long (*efuse_xlate)(struct ti_cpufreq_data *opp_data,
unsigned long efuse);
unsigned long efuse_fallback;
@@ -85,6 +91,13 @@ static unsigned long dra7_efuse_xlate(struct ti_cpufreq_data *opp_data,
return calculated_efuse;
}
+static unsigned long omap3_efuse_xlate(struct ti_cpufreq_data *opp_data,
+ unsigned long efuse)
+{
+ /* OPP enable bit ("Speed Binned") */
+ return BIT(efuse);
+}
+
static struct ti_cpufreq_soc_data am3x_soc_data = {
.efuse_xlate = amx3_efuse_xlate,
.efuse_fallback = AM33XX_800M_ARM_MPU_MAX_FREQ,
@@ -112,6 +125,74 @@ static struct ti_cpufreq_soc_data dra7_soc_data = {
.multi_regulator = true,
};
+/*
+ * OMAP35x TRM (SPRUF98K):
+ * CONTROL_IDCODE (0x4830 A204) describes Silicon revisions.
+ * Control OMAP Status Register 15:0 (Address 0x4800 244C)
+ * to separate between omap3503, omap3515, omap3525, omap3530
+ * and feature presence.
+ * There are encodings for versions limited to 400/266MHz
+ * but we ignore.
+ * Not clear if this also holds for omap34xx.
+ * some eFuse values e.g. CONTROL_FUSE_OPP1_VDD1
+ * are stored in the SYSCON register range
+ * Register 0x4830A20C [ProdID.SKUID] [0:3]
+ * 0x0 for normal 600/430MHz device.
+ * 0x8 for 720/520MHz device.
+ * Not clear what omap34xx value is.
+ */
+
+static struct ti_cpufreq_soc_data omap34xx_soc_data = {
+ .efuse_xlate = omap3_efuse_xlate,
+ .efuse_offset = OMAP34xx_ProdID_SKUID - OMAP3_SYSCON_BASE,
+ .efuse_shift = 3,
+ .efuse_mask = BIT(3),
+ .rev_offset = OMAP3_CONTROL_IDCODE - OMAP3_SYSCON_BASE,
+ .multi_regulator = false,
+};
+
+/*
+ * AM/DM37x TRM (SPRUGN4M)
+ * CONTROL_IDCODE (0x4830 A204) describes Silicon revisions.
+ * Control Device Status Register 15:0 (Address 0x4800 244C)
+ * to separate between am3703, am3715, dm3725, dm3730
+ * and feature presence.
+ * Speed Binned = Bit 9
+ * 0 800/600 MHz
+ * 1 1000/800 MHz
+ * some eFuse values e.g. CONTROL_FUSE_OPP 1G_VDD1
+ * are stored in the SYSCON register range.
+ * There is no 0x4830A20C [ProdID.SKUID] register (exists but
+ * seems to always read as 0).
+ */
+
+static const char * const omap3_reg_names[] = {"cpu0", "vbb"};
+
+static struct ti_cpufreq_soc_data omap36xx_soc_data = {
+ .reg_names = omap3_reg_names,
+ .efuse_xlate = omap3_efuse_xlate,
+ .efuse_offset = OMAP3_CONTROL_DEVICE_STATUS - OMAP3_SYSCON_BASE,
+ .efuse_shift = 9,
+ .efuse_mask = BIT(9),
+ .rev_offset = OMAP3_CONTROL_IDCODE - OMAP3_SYSCON_BASE,
+ .multi_regulator = true,
+};
+
+/*
+ * AM3517 is quite similar to AM/DM37x except that it has no
+ * high speed grade eFuse and no abb ldo
+ */
+
+static struct ti_cpufreq_soc_data am3517_soc_data = {
+ .efuse_xlate = omap3_efuse_xlate,
+ .efuse_offset = OMAP3_CONTROL_DEVICE_STATUS - OMAP3_SYSCON_BASE,
+ .efuse_shift = 0,
+ .efuse_mask = 0,
+ .rev_offset = OMAP3_CONTROL_IDCODE - OMAP3_SYSCON_BASE,
+ .multi_regulator = false,
+};
+
+
/**
* ti_cpufreq_get_efuse() - Parse and return efuse value present on SoC
* @opp_data: pointer to ti_cpufreq_data context
@@ -128,7 +209,17 @@ static int ti_cpufreq_get_efuse(struct ti_cpufreq_data *opp_data,
ret = regmap_read(opp_data->syscon, opp_data->soc_data->efuse_offset,
&efuse);
- if (ret) {
+ if (ret == -EIO) {
+ /* not a syscon register! */
+ void __iomem *regs = ioremap(OMAP3_SYSCON_BASE +
+ opp_data->soc_data->efuse_offset, 4);
+
+ if (!regs)
+ return -ENOMEM;
+ efuse = readl(regs);
+ iounmap(regs);
+ }
+ else if (ret) {
dev_err(dev,
"Failed to read the efuse value from syscon: %d\n",
ret);
@@ -159,7 +250,17 @@ static int ti_cpufreq_get_rev(struct ti_cpufreq_data *opp_data,
ret = regmap_read(opp_data->syscon, opp_data->soc_data->rev_offset,
&revision);
- if (ret) {
+ if (ret == -EIO) {
+ /* not a syscon register! */
+ void __iomem *regs = ioremap(OMAP3_SYSCON_BASE +
+ opp_data->soc_data->rev_offset, 4);
+
+ if (!regs)
+ return -ENOMEM;
+ revision = readl(regs);
+ iounmap(regs);
+ }
+ else if (ret) {
dev_err(dev,
"Failed to read the revision number from syscon: %d\n",
ret);
@@ -189,8 +290,14 @@ static int ti_cpufreq_setup_syscon_register(struct ti_cpufreq_data *opp_data)
static const struct of_device_id ti_cpufreq_of_match[] = {
{ .compatible = "ti,am33xx", .data = &am3x_soc_data, },
+ { .compatible = "ti,am3517", .data = &am3517_soc_data, },
{ .compatible = "ti,am43", .data = &am4x_soc_data, },
{ .compatible = "ti,dra7", .data = &dra7_soc_data },
+ { .compatible = "ti,omap34xx", .data = &omap34xx_soc_data, },
+ { .compatible = "ti,omap36xx", .data = &omap36xx_soc_data, },
+ /* legacy */
+ { .compatible = "ti,omap3430", .data = &omap34xx_soc_data, },
+ { .compatible = "ti,omap3630", .data = &omap36xx_soc_data, },
{},
};
@@ -212,7 +319,7 @@ static int ti_cpufreq_probe(struct platform_device *pdev)
const struct of_device_id *match;
struct opp_table *ti_opp_table;
struct ti_cpufreq_data *opp_data;
- const char * const reg_names[] = {"vdd", "vbb"};
+ const char * const default_reg_names[] = {"vdd", "vbb"};
int ret;
match = dev_get_platdata(&pdev->dev);
@@ -268,9 +375,13 @@ static int ti_cpufreq_probe(struct platform_device *pdev)
opp_data->opp_table = ti_opp_table;
if (opp_data->soc_data->multi_regulator) {
+ const char * const *reg_names = default_reg_names;
+
+ if (opp_data->soc_data->reg_names)
+ reg_names = opp_data->soc_data->reg_names;
ti_opp_table = dev_pm_opp_set_regulators(opp_data->cpu_dev,
reg_names,
- ARRAY_SIZE(reg_names));
+ ARRAY_SIZE(default_reg_names));
if (IS_ERR(ti_opp_table)) {
dev_pm_opp_put_supported_hw(opp_data->opp_table);
ret = PTR_ERR(ti_opp_table);
diff --git a/drivers/cpufreq/vexpress-spc-cpufreq.c b/drivers/cpufreq/vexpress-spc-cpufreq.c
index 53237289e606..506e3f2bf53a 100644
--- a/drivers/cpufreq/vexpress-spc-cpufreq.c
+++ b/drivers/cpufreq/vexpress-spc-cpufreq.c
@@ -1,61 +1,592 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Versatile Express SPC CPUFreq Interface driver
*
- * It provides necessary ops to arm_big_little cpufreq driver.
+ * Copyright (C) 2013 - 2019 ARM Ltd.
+ * Sudeep Holla <sudeep.holla@arm.com>
*
- * Copyright (C) 2013 ARM Ltd.
- * Sudeep KarkadaNagesha <sudeep.karkadanagesha@arm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
+ * Copyright (C) 2013 Linaro.
+ * Viresh Kumar <viresh.kumar@linaro.org>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/clk.h>
#include <linux/cpu.h>
#include <linux/cpufreq.h>
+#include <linux/cpumask.h>
+#include <linux/cpu_cooling.h>
+#include <linux/device.h>
#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pm_opp.h>
+#include <linux/slab.h>
+#include <linux/topology.h>
#include <linux/types.h>
-#include "arm_big_little.h"
+/* Currently we support only two clusters */
+#define A15_CLUSTER 0
+#define A7_CLUSTER 1
+#define MAX_CLUSTERS 2
+
+#ifdef CONFIG_BL_SWITCHER
+#include <asm/bL_switcher.h>
+static bool bL_switching_enabled;
+#define is_bL_switching_enabled() bL_switching_enabled
+#define set_switching_enabled(x) (bL_switching_enabled = (x))
+#else
+#define is_bL_switching_enabled() false
+#define set_switching_enabled(x) do { } while (0)
+#define bL_switch_request(...) do { } while (0)
+#define bL_switcher_put_enabled() do { } while (0)
+#define bL_switcher_get_enabled() do { } while (0)
+#endif
+
+#define ACTUAL_FREQ(cluster, freq) ((cluster == A7_CLUSTER) ? freq << 1 : freq)
+#define VIRT_FREQ(cluster, freq) ((cluster == A7_CLUSTER) ? freq >> 1 : freq)
+
+static struct thermal_cooling_device *cdev[MAX_CLUSTERS];
+static struct clk *clk[MAX_CLUSTERS];
+static struct cpufreq_frequency_table *freq_table[MAX_CLUSTERS + 1];
+static atomic_t cluster_usage[MAX_CLUSTERS + 1];
+
+static unsigned int clk_big_min; /* (Big) clock frequencies */
+static unsigned int clk_little_max; /* Maximum clock frequency (Little) */
+
+static DEFINE_PER_CPU(unsigned int, physical_cluster);
+static DEFINE_PER_CPU(unsigned int, cpu_last_req_freq);
+
+static struct mutex cluster_lock[MAX_CLUSTERS];
+
+static inline int raw_cpu_to_cluster(int cpu)
+{
+ return topology_physical_package_id(cpu);
+}
+
+static inline int cpu_to_cluster(int cpu)
+{
+ return is_bL_switching_enabled() ?
+ MAX_CLUSTERS : raw_cpu_to_cluster(cpu);
+}
+
+static unsigned int find_cluster_maxfreq(int cluster)
+{
+ int j;
+ u32 max_freq = 0, cpu_freq;
+
+ for_each_online_cpu(j) {
+ cpu_freq = per_cpu(cpu_last_req_freq, j);
+
+ if (cluster == per_cpu(physical_cluster, j) &&
+ max_freq < cpu_freq)
+ max_freq = cpu_freq;
+ }
+
+ return max_freq;
+}
+
+static unsigned int clk_get_cpu_rate(unsigned int cpu)
+{
+ u32 cur_cluster = per_cpu(physical_cluster, cpu);
+ u32 rate = clk_get_rate(clk[cur_cluster]) / 1000;
+
+ /* For switcher we use virtual A7 clock rates */
+ if (is_bL_switching_enabled())
+ rate = VIRT_FREQ(cur_cluster, rate);
+
+ return rate;
+}
+
+static unsigned int ve_spc_cpufreq_get_rate(unsigned int cpu)
+{
+ if (is_bL_switching_enabled())
+ return per_cpu(cpu_last_req_freq, cpu);
+ else
+ return clk_get_cpu_rate(cpu);
+}
+
+static unsigned int
+ve_spc_cpufreq_set_rate(u32 cpu, u32 old_cluster, u32 new_cluster, u32 rate)
+{
+ u32 new_rate, prev_rate;
+ int ret;
+ bool bLs = is_bL_switching_enabled();
+
+ mutex_lock(&cluster_lock[new_cluster]);
+
+ if (bLs) {
+ prev_rate = per_cpu(cpu_last_req_freq, cpu);
+ per_cpu(cpu_last_req_freq, cpu) = rate;
+ per_cpu(physical_cluster, cpu) = new_cluster;
+
+ new_rate = find_cluster_maxfreq(new_cluster);
+ new_rate = ACTUAL_FREQ(new_cluster, new_rate);
+ } else {
+ new_rate = rate;
+ }
+
+ ret = clk_set_rate(clk[new_cluster], new_rate * 1000);
+ if (!ret) {
+ /*
+ * FIXME: clk_set_rate hasn't returned an error here however it
+ * may be that clk_change_rate failed due to hardware or
+ * firmware issues and wasn't able to report that due to the
+ * current design of the clk core layer. To work around this
+ * problem we will read back the clock rate and check it is
+ * correct. This needs to be removed once clk core is fixed.
+ */
+ if (clk_get_rate(clk[new_cluster]) != new_rate * 1000)
+ ret = -EIO;
+ }
+
+ if (WARN_ON(ret)) {
+ if (bLs) {
+ per_cpu(cpu_last_req_freq, cpu) = prev_rate;
+ per_cpu(physical_cluster, cpu) = old_cluster;
+ }
+
+ mutex_unlock(&cluster_lock[new_cluster]);
+
+ return ret;
+ }
+
+ mutex_unlock(&cluster_lock[new_cluster]);
+
+ /* Recalc freq for old cluster when switching clusters */
+ if (old_cluster != new_cluster) {
+ /* Switch cluster */
+ bL_switch_request(cpu, new_cluster);
+
+ mutex_lock(&cluster_lock[old_cluster]);
+
+ /* Set freq of old cluster if there are cpus left on it */
+ new_rate = find_cluster_maxfreq(old_cluster);
+ new_rate = ACTUAL_FREQ(old_cluster, new_rate);
+
+ if (new_rate &&
+ clk_set_rate(clk[old_cluster], new_rate * 1000)) {
+ pr_err("%s: clk_set_rate failed: %d, old cluster: %d\n",
+ __func__, ret, old_cluster);
+ }
+ mutex_unlock(&cluster_lock[old_cluster]);
+ }
+
+ return 0;
+}
+
+/* Set clock frequency */
+static int ve_spc_cpufreq_set_target(struct cpufreq_policy *policy,
+ unsigned int index)
+{
+ u32 cpu = policy->cpu, cur_cluster, new_cluster, actual_cluster;
+ unsigned int freqs_new;
+ int ret;
+
+ cur_cluster = cpu_to_cluster(cpu);
+ new_cluster = actual_cluster = per_cpu(physical_cluster, cpu);
+
+ freqs_new = freq_table[cur_cluster][index].frequency;
+
+ if (is_bL_switching_enabled()) {
+ if (actual_cluster == A15_CLUSTER && freqs_new < clk_big_min)
+ new_cluster = A7_CLUSTER;
+ else if (actual_cluster == A7_CLUSTER &&
+ freqs_new > clk_little_max)
+ new_cluster = A15_CLUSTER;
+ }
+
+ ret = ve_spc_cpufreq_set_rate(cpu, actual_cluster, new_cluster,
+ freqs_new);
+
+ if (!ret) {
+ arch_set_freq_scale(policy->related_cpus, freqs_new,
+ policy->cpuinfo.max_freq);
+ }
+
+ return ret;
+}
+
+static inline u32 get_table_count(struct cpufreq_frequency_table *table)
+{
+ int count;
+
+ for (count = 0; table[count].frequency != CPUFREQ_TABLE_END; count++)
+ ;
+
+ return count;
+}
+
+/* get the minimum frequency in the cpufreq_frequency_table */
+static inline u32 get_table_min(struct cpufreq_frequency_table *table)
+{
+ struct cpufreq_frequency_table *pos;
+ u32 min_freq = ~0;
+
+ cpufreq_for_each_entry(pos, table)
+ if (pos->frequency < min_freq)
+ min_freq = pos->frequency;
+ return min_freq;
+}
+
+/* get the maximum frequency in the cpufreq_frequency_table */
+static inline u32 get_table_max(struct cpufreq_frequency_table *table)
+{
+ struct cpufreq_frequency_table *pos;
+ u32 max_freq = 0;
+
+ cpufreq_for_each_entry(pos, table)
+ if (pos->frequency > max_freq)
+ max_freq = pos->frequency;
+ return max_freq;
+}
+
+static bool search_frequency(struct cpufreq_frequency_table *table, int size,
+ unsigned int freq)
+{
+ int count;
+
+ for (count = 0; count < size; count++) {
+ if (table[count].frequency == freq)
+ return true;
+ }
+
+ return false;
+}
+
+static int merge_cluster_tables(void)
+{
+ int i, j, k = 0, count = 1;
+ struct cpufreq_frequency_table *table;
+
+ for (i = 0; i < MAX_CLUSTERS; i++)
+ count += get_table_count(freq_table[i]);
+
+ table = kcalloc(count, sizeof(*table), GFP_KERNEL);
+ if (!table)
+ return -ENOMEM;
+
+ freq_table[MAX_CLUSTERS] = table;
+
+ /* Add in reverse order to get freqs in increasing order */
+ for (i = MAX_CLUSTERS - 1; i >= 0; i--, count = k) {
+ for (j = 0; freq_table[i][j].frequency != CPUFREQ_TABLE_END;
+ j++) {
+ if (i == A15_CLUSTER &&
+ search_frequency(table, count, freq_table[i][j].frequency))
+ continue; /* skip duplicates */
+ table[k++].frequency =
+ VIRT_FREQ(i, freq_table[i][j].frequency);
+ }
+ }
+
+ table[k].driver_data = k;
+ table[k].frequency = CPUFREQ_TABLE_END;
+
+ return 0;
+}
-static int ve_spc_init_opp_table(const struct cpumask *cpumask)
+static void _put_cluster_clk_and_freq_table(struct device *cpu_dev,
+ const struct cpumask *cpumask)
{
- struct device *cpu_dev = get_cpu_device(cpumask_first(cpumask));
+ u32 cluster = raw_cpu_to_cluster(cpu_dev->id);
+
+ if (!freq_table[cluster])
+ return;
+
+ clk_put(clk[cluster]);
+ dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]);
+}
+
+static void put_cluster_clk_and_freq_table(struct device *cpu_dev,
+ const struct cpumask *cpumask)
+{
+ u32 cluster = cpu_to_cluster(cpu_dev->id);
+ int i;
+
+ if (atomic_dec_return(&cluster_usage[cluster]))
+ return;
+
+ if (cluster < MAX_CLUSTERS)
+ return _put_cluster_clk_and_freq_table(cpu_dev, cpumask);
+
+ for_each_present_cpu(i) {
+ struct device *cdev = get_cpu_device(i);
+
+ if (!cdev)
+ return;
+
+ _put_cluster_clk_and_freq_table(cdev, cpumask);
+ }
+
+ /* free virtual table */
+ kfree(freq_table[cluster]);
+}
+
+static int _get_cluster_clk_and_freq_table(struct device *cpu_dev,
+ const struct cpumask *cpumask)
+{
+ u32 cluster = raw_cpu_to_cluster(cpu_dev->id);
+ int ret;
+
+ if (freq_table[cluster])
+ return 0;
+
/*
* platform specific SPC code must initialise the opp table
* so just check if the OPP count is non-zero
*/
- return dev_pm_opp_get_opp_count(cpu_dev) <= 0;
+ ret = dev_pm_opp_get_opp_count(cpu_dev) <= 0;
+ if (ret)
+ goto out;
+
+ ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table[cluster]);
+ if (ret)
+ goto out;
+
+ clk[cluster] = clk_get(cpu_dev, NULL);
+ if (!IS_ERR(clk[cluster]))
+ return 0;
+
+ dev_err(cpu_dev, "%s: Failed to get clk for cpu: %d, cluster: %d\n",
+ __func__, cpu_dev->id, cluster);
+ ret = PTR_ERR(clk[cluster]);
+ dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]);
+
+out:
+ dev_err(cpu_dev, "%s: Failed to get data for cluster: %d\n", __func__,
+ cluster);
+ return ret;
}
-static int ve_spc_get_transition_latency(struct device *cpu_dev)
+static int get_cluster_clk_and_freq_table(struct device *cpu_dev,
+ const struct cpumask *cpumask)
{
- return 1000000; /* 1 ms */
+ u32 cluster = cpu_to_cluster(cpu_dev->id);
+ int i, ret;
+
+ if (atomic_inc_return(&cluster_usage[cluster]) != 1)
+ return 0;
+
+ if (cluster < MAX_CLUSTERS) {
+ ret = _get_cluster_clk_and_freq_table(cpu_dev, cpumask);
+ if (ret)
+ atomic_dec(&cluster_usage[cluster]);
+ return ret;
+ }
+
+ /*
+ * Get data for all clusters and fill virtual cluster with a merge of
+ * both
+ */
+ for_each_present_cpu(i) {
+ struct device *cdev = get_cpu_device(i);
+
+ if (!cdev)
+ return -ENODEV;
+
+ ret = _get_cluster_clk_and_freq_table(cdev, cpumask);
+ if (ret)
+ goto put_clusters;
+ }
+
+ ret = merge_cluster_tables();
+ if (ret)
+ goto put_clusters;
+
+ /* Assuming 2 cluster, set clk_big_min and clk_little_max */
+ clk_big_min = get_table_min(freq_table[A15_CLUSTER]);
+ clk_little_max = VIRT_FREQ(A7_CLUSTER,
+ get_table_max(freq_table[A7_CLUSTER]));
+
+ return 0;
+
+put_clusters:
+ for_each_present_cpu(i) {
+ struct device *cdev = get_cpu_device(i);
+
+ if (!cdev)
+ return -ENODEV;
+
+ _put_cluster_clk_and_freq_table(cdev, cpumask);
+ }
+
+ atomic_dec(&cluster_usage[cluster]);
+
+ return ret;
}
-static const struct cpufreq_arm_bL_ops ve_spc_cpufreq_ops = {
- .name = "vexpress-spc",
- .get_transition_latency = ve_spc_get_transition_latency,
- .init_opp_table = ve_spc_init_opp_table,
+/* Per-CPU initialization */
+static int ve_spc_cpufreq_init(struct cpufreq_policy *policy)
+{
+ u32 cur_cluster = cpu_to_cluster(policy->cpu);
+ struct device *cpu_dev;
+ int ret;
+
+ cpu_dev = get_cpu_device(policy->cpu);
+ if (!cpu_dev) {
+ pr_err("%s: failed to get cpu%d device\n", __func__,
+ policy->cpu);
+ return -ENODEV;
+ }
+
+ if (cur_cluster < MAX_CLUSTERS) {
+ int cpu;
+
+ cpumask_copy(policy->cpus, topology_core_cpumask(policy->cpu));
+
+ for_each_cpu(cpu, policy->cpus)
+ per_cpu(physical_cluster, cpu) = cur_cluster;
+ } else {
+ /* Assumption: during init, we are always running on A15 */
+ per_cpu(physical_cluster, policy->cpu) = A15_CLUSTER;
+ }
+
+ ret = get_cluster_clk_and_freq_table(cpu_dev, policy->cpus);
+ if (ret)
+ return ret;
+
+ policy->freq_table = freq_table[cur_cluster];
+ policy->cpuinfo.transition_latency = 1000000; /* 1 ms */
+
+ dev_pm_opp_of_register_em(policy->cpus);
+
+ if (is_bL_switching_enabled())
+ per_cpu(cpu_last_req_freq, policy->cpu) =
+ clk_get_cpu_rate(policy->cpu);
+
+ dev_info(cpu_dev, "%s: CPU %d initialized\n", __func__, policy->cpu);
+ return 0;
+}
+
+static int ve_spc_cpufreq_exit(struct cpufreq_policy *policy)
+{
+ struct device *cpu_dev;
+ int cur_cluster = cpu_to_cluster(policy->cpu);
+
+ if (cur_cluster < MAX_CLUSTERS) {
+ cpufreq_cooling_unregister(cdev[cur_cluster]);
+ cdev[cur_cluster] = NULL;
+ }
+
+ cpu_dev = get_cpu_device(policy->cpu);
+ if (!cpu_dev) {
+ pr_err("%s: failed to get cpu%d device\n", __func__,
+ policy->cpu);
+ return -ENODEV;
+ }
+
+ put_cluster_clk_and_freq_table(cpu_dev, policy->related_cpus);
+ return 0;
+}
+
+static void ve_spc_cpufreq_ready(struct cpufreq_policy *policy)
+{
+ int cur_cluster = cpu_to_cluster(policy->cpu);
+
+ /* Do not register a cpu_cooling device if we are in IKS mode */
+ if (cur_cluster >= MAX_CLUSTERS)
+ return;
+
+ cdev[cur_cluster] = of_cpufreq_cooling_register(policy);
+}
+
+static struct cpufreq_driver ve_spc_cpufreq_driver = {
+ .name = "vexpress-spc",
+ .flags = CPUFREQ_STICKY |
+ CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
+ CPUFREQ_NEED_INITIAL_FREQ_CHECK,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = ve_spc_cpufreq_set_target,
+ .get = ve_spc_cpufreq_get_rate,
+ .init = ve_spc_cpufreq_init,
+ .exit = ve_spc_cpufreq_exit,
+ .ready = ve_spc_cpufreq_ready,
+ .attr = cpufreq_generic_attr,
};
+#ifdef CONFIG_BL_SWITCHER
+static int bL_cpufreq_switcher_notifier(struct notifier_block *nfb,
+ unsigned long action, void *_arg)
+{
+ pr_debug("%s: action: %ld\n", __func__, action);
+
+ switch (action) {
+ case BL_NOTIFY_PRE_ENABLE:
+ case BL_NOTIFY_PRE_DISABLE:
+ cpufreq_unregister_driver(&ve_spc_cpufreq_driver);
+ break;
+
+ case BL_NOTIFY_POST_ENABLE:
+ set_switching_enabled(true);
+ cpufreq_register_driver(&ve_spc_cpufreq_driver);
+ break;
+
+ case BL_NOTIFY_POST_DISABLE:
+ set_switching_enabled(false);
+ cpufreq_register_driver(&ve_spc_cpufreq_driver);
+ break;
+
+ default:
+ return NOTIFY_DONE;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block bL_switcher_notifier = {
+ .notifier_call = bL_cpufreq_switcher_notifier,
+};
+
+static int __bLs_register_notifier(void)
+{
+ return bL_switcher_register_notifier(&bL_switcher_notifier);
+}
+
+static int __bLs_unregister_notifier(void)
+{
+ return bL_switcher_unregister_notifier(&bL_switcher_notifier);
+}
+#else
+static int __bLs_register_notifier(void) { return 0; }
+static int __bLs_unregister_notifier(void) { return 0; }
+#endif
+
static int ve_spc_cpufreq_probe(struct platform_device *pdev)
{
- return bL_cpufreq_register(&ve_spc_cpufreq_ops);
+ int ret, i;
+
+ set_switching_enabled(bL_switcher_get_enabled());
+
+ for (i = 0; i < MAX_CLUSTERS; i++)
+ mutex_init(&cluster_lock[i]);
+
+ ret = cpufreq_register_driver(&ve_spc_cpufreq_driver);
+ if (ret) {
+ pr_info("%s: Failed registering platform driver: %s, err: %d\n",
+ __func__, ve_spc_cpufreq_driver.name, ret);
+ } else {
+ ret = __bLs_register_notifier();
+ if (ret)
+ cpufreq_unregister_driver(&ve_spc_cpufreq_driver);
+ else
+ pr_info("%s: Registered platform driver: %s\n",
+ __func__, ve_spc_cpufreq_driver.name);
+ }
+
+ bL_switcher_put_enabled();
+ return ret;
}
static int ve_spc_cpufreq_remove(struct platform_device *pdev)
{
- bL_cpufreq_unregister(&ve_spc_cpufreq_ops);
+ bL_switcher_get_enabled();
+ __bLs_unregister_notifier();
+ cpufreq_unregister_driver(&ve_spc_cpufreq_driver);
+ bL_switcher_put_enabled();
+ pr_info("%s: Un-registered platform driver: %s\n", __func__,
+ ve_spc_cpufreq_driver.name);
return 0;
}
@@ -68,4 +599,7 @@ static struct platform_driver ve_spc_cpufreq_platdrv = {
};
module_platform_driver(ve_spc_cpufreq_platdrv);
-MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Viresh Kumar <viresh.kumar@linaro.org>");
+MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
+MODULE_DESCRIPTION("Vexpress SPC ARM big LITTLE cpufreq driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/cpuidle/cpuidle-powernv.c b/drivers/cpuidle/cpuidle-powernv.c
index 84b1ebe212b3..1b299e801f74 100644
--- a/drivers/cpuidle/cpuidle-powernv.c
+++ b/drivers/cpuidle/cpuidle-powernv.c
@@ -56,13 +56,10 @@ static u64 get_snooze_timeout(struct cpuidle_device *dev,
return default_snooze_timeout;
for (i = index + 1; i < drv->state_count; i++) {
- struct cpuidle_state *s = &drv->states[i];
- struct cpuidle_state_usage *su = &dev->states_usage[i];
-
- if (s->disabled || su->disable)
+ if (dev->states_usage[i].disable)
continue;
- return s->target_residency * tb_ticks_per_usec;
+ return drv->states[i].target_residency * tb_ticks_per_usec;
}
return default_snooze_timeout;
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 0895b988fa92..569dbac443bd 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -75,44 +75,45 @@ int cpuidle_play_dead(void)
static int find_deepest_state(struct cpuidle_driver *drv,
struct cpuidle_device *dev,
- unsigned int max_latency,
+ u64 max_latency_ns,
unsigned int forbidden_flags,
bool s2idle)
{
- unsigned int latency_req = 0;
+ u64 latency_req = 0;
int i, ret = 0;
for (i = 1; i < drv->state_count; i++) {
struct cpuidle_state *s = &drv->states[i];
- struct cpuidle_state_usage *su = &dev->states_usage[i];
- if (s->disabled || su->disable || s->exit_latency <= latency_req
- || s->exit_latency > max_latency
- || (s->flags & forbidden_flags)
- || (s2idle && !s->enter_s2idle))
+ if (dev->states_usage[i].disable ||
+ s->exit_latency_ns <= latency_req ||
+ s->exit_latency_ns > max_latency_ns ||
+ (s->flags & forbidden_flags) ||
+ (s2idle && !s->enter_s2idle))
continue;
- latency_req = s->exit_latency;
+ latency_req = s->exit_latency_ns;
ret = i;
}
return ret;
}
/**
- * cpuidle_use_deepest_state - Set/clear governor override flag.
- * @enable: New value of the flag.
+ * cpuidle_use_deepest_state - Set/unset governor override mode.
+ * @latency_limit_ns: Idle state exit latency limit (or no override if 0).
*
- * Set/unset the current CPU to use the deepest idle state (override governors
- * going forward if set).
+ * If @latency_limit_ns is nonzero, set the current CPU to use the deepest idle
+ * state with exit latency within @latency_limit_ns (override governors going
+ * forward), or do not override governors if it is zero.
*/
-void cpuidle_use_deepest_state(bool enable)
+void cpuidle_use_deepest_state(u64 latency_limit_ns)
{
struct cpuidle_device *dev;
preempt_disable();
dev = cpuidle_get_device();
if (dev)
- dev->use_deepest_state = enable;
+ dev->forced_idle_latency_limit_ns = latency_limit_ns;
preempt_enable();
}
@@ -122,9 +123,10 @@ void cpuidle_use_deepest_state(bool enable)
* @dev: cpuidle device for the given CPU.
*/
int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
- struct cpuidle_device *dev)
+ struct cpuidle_device *dev,
+ u64 latency_limit_ns)
{
- return find_deepest_state(drv, dev, UINT_MAX, 0, false);
+ return find_deepest_state(drv, dev, latency_limit_ns, 0, false);
}
#ifdef CONFIG_SUSPEND
@@ -180,7 +182,7 @@ int cpuidle_enter_s2idle(struct cpuidle_driver *drv, struct cpuidle_device *dev)
* that interrupts won't be enabled when it exits and allows the tick to
* be frozen safely.
*/
- index = find_deepest_state(drv, dev, UINT_MAX, 0, true);
+ index = find_deepest_state(drv, dev, U64_MAX, 0, true);
if (index > 0)
enter_s2idle_proper(drv, dev, index);
@@ -209,7 +211,7 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
* CPU as a broadcast timer, this call may fail if it is not available.
*/
if (broadcast && tick_broadcast_enter()) {
- index = find_deepest_state(drv, dev, target_state->exit_latency,
+ index = find_deepest_state(drv, dev, target_state->exit_latency_ns,
CPUIDLE_FLAG_TIMER_STOP, false);
if (index < 0) {
default_idle_call();
@@ -247,7 +249,7 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
local_irq_enable();
if (entered_state >= 0) {
- s64 diff, delay = drv->states[entered_state].exit_latency;
+ s64 diff, delay = drv->states[entered_state].exit_latency_ns;
int i;
/*
@@ -255,18 +257,15 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
* This can be moved to within driver enter routine,
* but that results in multiple copies of same code.
*/
- diff = ktime_us_delta(time_end, time_start);
- if (diff > INT_MAX)
- diff = INT_MAX;
+ diff = ktime_sub(time_end, time_start);
- dev->last_residency = (int)diff;
- dev->states_usage[entered_state].time += dev->last_residency;
+ dev->last_residency_ns = diff;
+ dev->states_usage[entered_state].time_ns += diff;
dev->states_usage[entered_state].usage++;
- if (diff < drv->states[entered_state].target_residency) {
+ if (diff < drv->states[entered_state].target_residency_ns) {
for (i = entered_state - 1; i >= 0; i--) {
- if (drv->states[i].disabled ||
- dev->states_usage[i].disable)
+ if (dev->states_usage[i].disable)
continue;
/* Shallower states are enabled, so update. */
@@ -275,22 +274,21 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
}
} else if (diff > delay) {
for (i = entered_state + 1; i < drv->state_count; i++) {
- if (drv->states[i].disabled ||
- dev->states_usage[i].disable)
+ if (dev->states_usage[i].disable)
continue;
/*
* Update if a deeper state would have been a
* better match for the observed idle duration.
*/
- if (diff - delay >= drv->states[i].target_residency)
+ if (diff - delay >= drv->states[i].target_residency_ns)
dev->states_usage[entered_state].below++;
break;
}
}
} else {
- dev->last_residency = 0;
+ dev->last_residency_ns = 0;
}
return entered_state;
@@ -380,10 +378,10 @@ u64 cpuidle_poll_time(struct cpuidle_driver *drv,
limit_ns = TICK_NSEC;
for (i = 1; i < drv->state_count; i++) {
- if (drv->states[i].disabled || dev->states_usage[i].disable)
+ if (dev->states_usage[i].disable)
continue;
- limit_ns = (u64)drv->states[i].target_residency * NSEC_PER_USEC;
+ limit_ns = (u64)drv->states[i].target_residency_ns;
}
dev->poll_limit_ns = limit_ns;
@@ -554,7 +552,7 @@ static void __cpuidle_unregister_device(struct cpuidle_device *dev)
static void __cpuidle_device_init(struct cpuidle_device *dev)
{
memset(dev->states_usage, 0, sizeof(dev->states_usage));
- dev->last_residency = 0;
+ dev->last_residency_ns = 0;
dev->next_hrtimer = 0;
}
@@ -567,12 +565,16 @@ static void __cpuidle_device_init(struct cpuidle_device *dev)
*/
static int __cpuidle_register_device(struct cpuidle_device *dev)
{
- int ret;
struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
+ int i, ret;
if (!try_module_get(drv->owner))
return -EINVAL;
+ for (i = 0; i < drv->state_count; i++)
+ if (drv->states[i].disabled)
+ dev->states_usage[i].disable |= CPUIDLE_STATE_DISABLED_BY_DRIVER;
+
per_cpu(cpuidle_devices, dev->cpu) = dev;
list_add(&dev->device_list, &cpuidle_detected_devices);
diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c
index 80c1a830d991..c76423aaef4d 100644
--- a/drivers/cpuidle/driver.c
+++ b/drivers/cpuidle/driver.c
@@ -62,24 +62,23 @@ static inline void __cpuidle_unset_driver(struct cpuidle_driver *drv)
* __cpuidle_set_driver - set per CPU driver variables for the given driver.
* @drv: a valid pointer to a struct cpuidle_driver
*
- * For each CPU in the driver's cpumask, unset the registered driver per CPU
- * to @drv.
- *
- * Returns 0 on success, -EBUSY if the CPUs have driver(s) already.
+ * Returns 0 on success, -EBUSY if any CPU in the cpumask have a driver
+ * different from drv already.
*/
static inline int __cpuidle_set_driver(struct cpuidle_driver *drv)
{
int cpu;
for_each_cpu(cpu, drv->cpumask) {
+ struct cpuidle_driver *old_drv;
- if (__cpuidle_get_cpu_driver(cpu)) {
- __cpuidle_unset_driver(drv);
+ old_drv = __cpuidle_get_cpu_driver(cpu);
+ if (old_drv && old_drv != drv)
return -EBUSY;
- }
+ }
+ for_each_cpu(cpu, drv->cpumask)
per_cpu(cpuidle_drivers, cpu) = drv;
- }
return 0;
}
@@ -166,16 +165,27 @@ static void __cpuidle_driver_init(struct cpuidle_driver *drv)
if (!drv->cpumask)
drv->cpumask = (struct cpumask *)cpu_possible_mask;
- /*
- * Look for the timer stop flag in the different states, so that we know
- * if the broadcast timer has to be set up. The loop is in the reverse
- * order, because usually one of the deeper states have this flag set.
- */
- for (i = drv->state_count - 1; i >= 0 ; i--) {
- if (drv->states[i].flags & CPUIDLE_FLAG_TIMER_STOP) {
+ for (i = 0; i < drv->state_count; i++) {
+ struct cpuidle_state *s = &drv->states[i];
+
+ /*
+ * Look for the timer stop flag in the different states and if
+ * it is found, indicate that the broadcast timer has to be set
+ * up.
+ */
+ if (s->flags & CPUIDLE_FLAG_TIMER_STOP)
drv->bctimer = 1;
- break;
- }
+
+ /*
+ * The core will use the target residency and exit latency
+ * values in nanoseconds, but allow drivers to provide them in
+ * microseconds too.
+ */
+ if (s->target_residency > 0)
+ s->target_residency_ns = s->target_residency * NSEC_PER_USEC;
+
+ if (s->exit_latency > 0)
+ s->exit_latency_ns = s->exit_latency * NSEC_PER_USEC;
}
}
@@ -379,3 +389,31 @@ void cpuidle_driver_unref(void)
spin_unlock(&cpuidle_driver_lock);
}
+
+/**
+ * cpuidle_driver_state_disabled - Disable or enable an idle state
+ * @drv: cpuidle driver owning the state
+ * @idx: State index
+ * @disable: Whether or not to disable the state
+ */
+void cpuidle_driver_state_disabled(struct cpuidle_driver *drv, int idx,
+ bool disable)
+{
+ unsigned int cpu;
+
+ mutex_lock(&cpuidle_lock);
+
+ for_each_cpu(cpu, drv->cpumask) {
+ struct cpuidle_device *dev = per_cpu(cpuidle_devices, cpu);
+
+ if (!dev)
+ continue;
+
+ if (disable)
+ dev->states_usage[idx].disable |= CPUIDLE_STATE_DISABLED_BY_DRIVER;
+ else
+ dev->states_usage[idx].disable &= ~CPUIDLE_STATE_DISABLED_BY_DRIVER;
+ }
+
+ mutex_unlock(&cpuidle_lock);
+}
diff --git a/drivers/cpuidle/governor.c b/drivers/cpuidle/governor.c
index e9801f26c732..e48271e117a3 100644
--- a/drivers/cpuidle/governor.c
+++ b/drivers/cpuidle/governor.c
@@ -107,11 +107,14 @@ int cpuidle_register_governor(struct cpuidle_governor *gov)
* cpuidle_governor_latency_req - Compute a latency constraint for CPU
* @cpu: Target CPU
*/
-int cpuidle_governor_latency_req(unsigned int cpu)
+s64 cpuidle_governor_latency_req(unsigned int cpu)
{
int global_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
struct device *device = get_cpu_device(cpu);
int device_req = dev_pm_qos_raw_resume_latency(device);
- return device_req < global_req ? device_req : global_req;
+ if (device_req > global_req)
+ device_req = global_req;
+
+ return (s64)device_req * NSEC_PER_USEC;
}
diff --git a/drivers/cpuidle/governors/haltpoll.c b/drivers/cpuidle/governors/haltpoll.c
index 7a703d2e0064..cb2a96eafc02 100644
--- a/drivers/cpuidle/governors/haltpoll.c
+++ b/drivers/cpuidle/governors/haltpoll.c
@@ -49,7 +49,7 @@ static int haltpoll_select(struct cpuidle_driver *drv,
struct cpuidle_device *dev,
bool *stop_tick)
{
- int latency_req = cpuidle_governor_latency_req(dev->cpu);
+ s64 latency_req = cpuidle_governor_latency_req(dev->cpu);
if (!drv->state_count || latency_req == 0) {
*stop_tick = false;
@@ -75,10 +75,9 @@ static int haltpoll_select(struct cpuidle_driver *drv,
return 0;
}
-static void adjust_poll_limit(struct cpuidle_device *dev, unsigned int block_us)
+static void adjust_poll_limit(struct cpuidle_device *dev, u64 block_ns)
{
unsigned int val;
- u64 block_ns = block_us*NSEC_PER_USEC;
/* Grow cpu_halt_poll_us if
* cpu_halt_poll_us < block_ns < guest_halt_poll_us
@@ -115,7 +114,7 @@ static void haltpoll_reflect(struct cpuidle_device *dev, int index)
dev->last_state_idx = index;
if (index != 0)
- adjust_poll_limit(dev, dev->last_residency);
+ adjust_poll_limit(dev, dev->last_residency_ns);
}
/**
diff --git a/drivers/cpuidle/governors/ladder.c b/drivers/cpuidle/governors/ladder.c
index 428eeb832fe7..8e9058c4ea63 100644
--- a/drivers/cpuidle/governors/ladder.c
+++ b/drivers/cpuidle/governors/ladder.c
@@ -27,8 +27,8 @@ struct ladder_device_state {
struct {
u32 promotion_count;
u32 demotion_count;
- u32 promotion_time;
- u32 demotion_time;
+ u64 promotion_time_ns;
+ u64 demotion_time_ns;
} threshold;
struct {
int promotion_count;
@@ -68,9 +68,10 @@ static int ladder_select_state(struct cpuidle_driver *drv,
{
struct ladder_device *ldev = this_cpu_ptr(&ladder_devices);
struct ladder_device_state *last_state;
- int last_residency, last_idx = dev->last_state_idx;
+ int last_idx = dev->last_state_idx;
int first_idx = drv->states[0].flags & CPUIDLE_FLAG_POLLING ? 1 : 0;
- int latency_req = cpuidle_governor_latency_req(dev->cpu);
+ s64 latency_req = cpuidle_governor_latency_req(dev->cpu);
+ s64 last_residency;
/* Special case when user has set very strict latency requirement */
if (unlikely(latency_req == 0)) {
@@ -80,14 +81,13 @@ static int ladder_select_state(struct cpuidle_driver *drv,
last_state = &ldev->states[last_idx];
- last_residency = dev->last_residency - drv->states[last_idx].exit_latency;
+ last_residency = dev->last_residency_ns - drv->states[last_idx].exit_latency_ns;
/* consider promotion */
if (last_idx < drv->state_count - 1 &&
- !drv->states[last_idx + 1].disabled &&
!dev->states_usage[last_idx + 1].disable &&
- last_residency > last_state->threshold.promotion_time &&
- drv->states[last_idx + 1].exit_latency <= latency_req) {
+ last_residency > last_state->threshold.promotion_time_ns &&
+ drv->states[last_idx + 1].exit_latency_ns <= latency_req) {
last_state->stats.promotion_count++;
last_state->stats.demotion_count = 0;
if (last_state->stats.promotion_count >= last_state->threshold.promotion_count) {
@@ -98,13 +98,12 @@ static int ladder_select_state(struct cpuidle_driver *drv,
/* consider demotion */
if (last_idx > first_idx &&
- (drv->states[last_idx].disabled ||
- dev->states_usage[last_idx].disable ||
- drv->states[last_idx].exit_latency > latency_req)) {
+ (dev->states_usage[last_idx].disable ||
+ drv->states[last_idx].exit_latency_ns > latency_req)) {
int i;
for (i = last_idx - 1; i > first_idx; i--) {
- if (drv->states[i].exit_latency <= latency_req)
+ if (drv->states[i].exit_latency_ns <= latency_req)
break;
}
ladder_do_selection(dev, ldev, last_idx, i);
@@ -112,7 +111,7 @@ static int ladder_select_state(struct cpuidle_driver *drv,
}
if (last_idx > first_idx &&
- last_residency < last_state->threshold.demotion_time) {
+ last_residency < last_state->threshold.demotion_time_ns) {
last_state->stats.demotion_count++;
last_state->stats.promotion_count = 0;
if (last_state->stats.demotion_count >= last_state->threshold.demotion_count) {
@@ -152,9 +151,9 @@ static int ladder_enable_device(struct cpuidle_driver *drv,
lstate->threshold.demotion_count = DEMOTION_COUNT;
if (i < drv->state_count - 1)
- lstate->threshold.promotion_time = state->exit_latency;
+ lstate->threshold.promotion_time_ns = state->exit_latency_ns;
if (i > first_idx)
- lstate->threshold.demotion_time = state->exit_latency;
+ lstate->threshold.demotion_time_ns = state->exit_latency_ns;
}
return 0;
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index e5a5d0c8d66b..b0a7ad566081 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -19,22 +19,12 @@
#include <linux/sched/stat.h>
#include <linux/math64.h>
-/*
- * Please note when changing the tuning values:
- * If (MAX_INTERESTING-1) * RESOLUTION > UINT_MAX, the result of
- * a scaling operation multiplication may overflow on 32 bit platforms.
- * In that case, #define RESOLUTION as ULL to get 64 bit result:
- * #define RESOLUTION 1024ULL
- *
- * The default values do not overflow.
- */
#define BUCKETS 12
#define INTERVAL_SHIFT 3
#define INTERVALS (1UL << INTERVAL_SHIFT)
#define RESOLUTION 1024
#define DECAY 8
-#define MAX_INTERESTING 50000
-
+#define MAX_INTERESTING (50000 * NSEC_PER_USEC)
/*
* Concepts and ideas behind the menu governor
@@ -120,14 +110,14 @@ struct menu_device {
int needs_update;
int tick_wakeup;
- unsigned int next_timer_us;
+ u64 next_timer_ns;
unsigned int bucket;
unsigned int correction_factor[BUCKETS];
unsigned int intervals[INTERVALS];
int interval_ptr;
};
-static inline int which_bucket(unsigned int duration, unsigned long nr_iowaiters)
+static inline int which_bucket(u64 duration_ns, unsigned long nr_iowaiters)
{
int bucket = 0;
@@ -140,15 +130,15 @@ static inline int which_bucket(unsigned int duration, unsigned long nr_iowaiters
if (nr_iowaiters)
bucket = BUCKETS/2;
- if (duration < 10)
+ if (duration_ns < 10ULL * NSEC_PER_USEC)
return bucket;
- if (duration < 100)
+ if (duration_ns < 100ULL * NSEC_PER_USEC)
return bucket + 1;
- if (duration < 1000)
+ if (duration_ns < 1000ULL * NSEC_PER_USEC)
return bucket + 2;
- if (duration < 10000)
+ if (duration_ns < 10000ULL * NSEC_PER_USEC)
return bucket + 3;
- if (duration < 100000)
+ if (duration_ns < 100000ULL * NSEC_PER_USEC)
return bucket + 4;
return bucket + 5;
}
@@ -276,13 +266,13 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
bool *stop_tick)
{
struct menu_device *data = this_cpu_ptr(&menu_devices);
- int latency_req = cpuidle_governor_latency_req(dev->cpu);
- int i;
- int idx;
- unsigned int interactivity_req;
+ s64 latency_req = cpuidle_governor_latency_req(dev->cpu);
unsigned int predicted_us;
+ u64 predicted_ns;
+ u64 interactivity_req;
unsigned long nr_iowaiters;
ktime_t delta_next;
+ int i, idx;
if (data->needs_update) {
menu_update(drv, dev);
@@ -290,15 +280,15 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
}
/* determine the expected residency time, round up */
- data->next_timer_us = ktime_to_us(tick_nohz_get_sleep_length(&delta_next));
+ data->next_timer_ns = tick_nohz_get_sleep_length(&delta_next);
nr_iowaiters = nr_iowait_cpu(dev->cpu);
- data->bucket = which_bucket(data->next_timer_us, nr_iowaiters);
+ data->bucket = which_bucket(data->next_timer_ns, nr_iowaiters);
if (unlikely(drv->state_count <= 1 || latency_req == 0) ||
- ((data->next_timer_us < drv->states[1].target_residency ||
- latency_req < drv->states[1].exit_latency) &&
- !drv->states[0].disabled && !dev->states_usage[0].disable)) {
+ ((data->next_timer_ns < drv->states[1].target_residency_ns ||
+ latency_req < drv->states[1].exit_latency_ns) &&
+ !dev->states_usage[0].disable)) {
/*
* In this case state[0] will be used no matter what, so return
* it right away and keep the tick running if state[0] is a
@@ -308,18 +298,15 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
return 0;
}
- /*
- * Force the result of multiplication to be 64 bits even if both
- * operands are 32 bits.
- * Make sure to round up for half microseconds.
- */
- predicted_us = DIV_ROUND_CLOSEST_ULL((uint64_t)data->next_timer_us *
- data->correction_factor[data->bucket],
- RESOLUTION * DECAY);
- /*
- * Use the lowest expected idle interval to pick the idle state.
- */
- predicted_us = min(predicted_us, get_typical_interval(data, predicted_us));
+ /* Round up the result for half microseconds. */
+ predicted_us = div_u64(data->next_timer_ns *
+ data->correction_factor[data->bucket] +
+ (RESOLUTION * DECAY * NSEC_PER_USEC) / 2,
+ RESOLUTION * DECAY * NSEC_PER_USEC);
+ /* Use the lowest expected idle interval to pick the idle state. */
+ predicted_ns = (u64)min(predicted_us,
+ get_typical_interval(data, predicted_us)) *
+ NSEC_PER_USEC;
if (tick_nohz_tick_stopped()) {
/*
@@ -330,14 +317,15 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
* the known time till the closest timer event for the idle
* state selection.
*/
- if (predicted_us < TICK_USEC)
- predicted_us = ktime_to_us(delta_next);
+ if (predicted_ns < TICK_NSEC)
+ predicted_ns = delta_next;
} else {
/*
* Use the performance multiplier and the user-configurable
* latency_req to determine the maximum exit latency.
*/
- interactivity_req = predicted_us / performance_multiplier(nr_iowaiters);
+ interactivity_req = div64_u64(predicted_ns,
+ performance_multiplier(nr_iowaiters));
if (latency_req > interactivity_req)
latency_req = interactivity_req;
}
@@ -349,27 +337,26 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
idx = -1;
for (i = 0; i < drv->state_count; i++) {
struct cpuidle_state *s = &drv->states[i];
- struct cpuidle_state_usage *su = &dev->states_usage[i];
- if (s->disabled || su->disable)
+ if (dev->states_usage[i].disable)
continue;
if (idx == -1)
idx = i; /* first enabled state */
- if (s->target_residency > predicted_us) {
+ if (s->target_residency_ns > predicted_ns) {
/*
* Use a physical idle state, not busy polling, unless
* a timer is going to trigger soon enough.
*/
if ((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) &&
- s->exit_latency <= latency_req &&
- s->target_residency <= data->next_timer_us) {
- predicted_us = s->target_residency;
+ s->exit_latency_ns <= latency_req &&
+ s->target_residency_ns <= data->next_timer_ns) {
+ predicted_ns = s->target_residency_ns;
idx = i;
break;
}
- if (predicted_us < TICK_USEC)
+ if (predicted_ns < TICK_NSEC)
break;
if (!tick_nohz_tick_stopped()) {
@@ -379,7 +366,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
* tick in that case and let the governor run
* again in the next iteration of the loop.
*/
- predicted_us = drv->states[idx].target_residency;
+ predicted_ns = drv->states[idx].target_residency_ns;
break;
}
@@ -389,13 +376,13 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
* closest timer event, select this one to avoid getting
* stuck in the shallow one for too long.
*/
- if (drv->states[idx].target_residency < TICK_USEC &&
- s->target_residency <= ktime_to_us(delta_next))
+ if (drv->states[idx].target_residency_ns < TICK_NSEC &&
+ s->target_residency_ns <= delta_next)
idx = i;
return idx;
}
- if (s->exit_latency > latency_req)
+ if (s->exit_latency_ns > latency_req)
break;
idx = i;
@@ -409,12 +396,10 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
* expected idle duration is shorter than the tick period length.
*/
if (((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) ||
- predicted_us < TICK_USEC) && !tick_nohz_tick_stopped()) {
- unsigned int delta_next_us = ktime_to_us(delta_next);
-
+ predicted_ns < TICK_NSEC) && !tick_nohz_tick_stopped()) {
*stop_tick = false;
- if (idx > 0 && drv->states[idx].target_residency > delta_next_us) {
+ if (idx > 0 && drv->states[idx].target_residency_ns > delta_next) {
/*
* The tick is not going to be stopped and the target
* residency of the state to be returned is not within
@@ -422,12 +407,11 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
* tick, so try to correct that.
*/
for (i = idx - 1; i >= 0; i--) {
- if (drv->states[i].disabled ||
- dev->states_usage[i].disable)
+ if (dev->states_usage[i].disable)
continue;
idx = i;
- if (drv->states[i].target_residency <= delta_next_us)
+ if (drv->states[i].target_residency_ns <= delta_next)
break;
}
}
@@ -463,7 +447,7 @@ static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
struct menu_device *data = this_cpu_ptr(&menu_devices);
int last_idx = dev->last_state_idx;
struct cpuidle_state *target = &drv->states[last_idx];
- unsigned int measured_us;
+ u64 measured_ns;
unsigned int new_factor;
/*
@@ -481,7 +465,7 @@ static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
* assume the state was never reached and the exit latency is 0.
*/
- if (data->tick_wakeup && data->next_timer_us > TICK_USEC) {
+ if (data->tick_wakeup && data->next_timer_ns > TICK_NSEC) {
/*
* The nohz code said that there wouldn't be any events within
* the tick boundary (if the tick was stopped), but the idle
@@ -491,7 +475,7 @@ static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
* have been idle long (but not forever) to help the idle
* duration predictor do a better job next time.
*/
- measured_us = 9 * MAX_INTERESTING / 10;
+ measured_ns = 9 * MAX_INTERESTING / 10;
} else if ((drv->states[last_idx].flags & CPUIDLE_FLAG_POLLING) &&
dev->poll_time_limit) {
/*
@@ -501,28 +485,29 @@ static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
* the CPU might have been woken up from idle by the next timer.
* Assume that to be the case.
*/
- measured_us = data->next_timer_us;
+ measured_ns = data->next_timer_ns;
} else {
/* measured value */
- measured_us = dev->last_residency;
+ measured_ns = dev->last_residency_ns;
/* Deduct exit latency */
- if (measured_us > 2 * target->exit_latency)
- measured_us -= target->exit_latency;
+ if (measured_ns > 2 * target->exit_latency_ns)
+ measured_ns -= target->exit_latency_ns;
else
- measured_us /= 2;
+ measured_ns /= 2;
}
/* Make sure our coefficients do not exceed unity */
- if (measured_us > data->next_timer_us)
- measured_us = data->next_timer_us;
+ if (measured_ns > data->next_timer_ns)
+ measured_ns = data->next_timer_ns;
/* Update our correction ratio */
new_factor = data->correction_factor[data->bucket];
new_factor -= new_factor / DECAY;
- if (data->next_timer_us > 0 && measured_us < MAX_INTERESTING)
- new_factor += RESOLUTION * measured_us / data->next_timer_us;
+ if (data->next_timer_ns > 0 && measured_ns < MAX_INTERESTING)
+ new_factor += div64_u64(RESOLUTION * measured_ns,
+ data->next_timer_ns);
else
/*
* we were idle so long that we count it as a perfect
@@ -542,7 +527,7 @@ static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
data->correction_factor[data->bucket] = new_factor;
/* update the repeating-pattern data */
- data->intervals[data->interval_ptr++] = measured_us;
+ data->intervals[data->interval_ptr++] = ktime_to_us(measured_ns);
if (data->interval_ptr >= INTERVALS)
data->interval_ptr = 0;
}
diff --git a/drivers/cpuidle/governors/teo.c b/drivers/cpuidle/governors/teo.c
index b5a0e498f798..de7e706efd46 100644
--- a/drivers/cpuidle/governors/teo.c
+++ b/drivers/cpuidle/governors/teo.c
@@ -104,7 +104,7 @@ struct teo_cpu {
u64 sleep_length_ns;
struct teo_idle_state states[CPUIDLE_STATE_MAX];
int interval_idx;
- unsigned int intervals[INTERVALS];
+ u64 intervals[INTERVALS];
};
static DEFINE_PER_CPU(struct teo_cpu, teo_cpus);
@@ -117,9 +117,8 @@ static DEFINE_PER_CPU(struct teo_cpu, teo_cpus);
static void teo_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
{
struct teo_cpu *cpu_data = per_cpu_ptr(&teo_cpus, dev->cpu);
- unsigned int sleep_length_us = ktime_to_us(cpu_data->sleep_length_ns);
int i, idx_hit = -1, idx_timer = -1;
- unsigned int measured_us;
+ u64 measured_ns;
if (cpu_data->time_span_ns >= cpu_data->sleep_length_ns) {
/*
@@ -127,23 +126,28 @@ static void teo_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
* enough to the closest timer event expected at the idle state
* selection time to be discarded.
*/
- measured_us = UINT_MAX;
+ measured_ns = U64_MAX;
} else {
- unsigned int lat;
+ u64 lat_ns = drv->states[dev->last_state_idx].exit_latency_ns;
- lat = drv->states[dev->last_state_idx].exit_latency;
-
- measured_us = ktime_to_us(cpu_data->time_span_ns);
+ /*
+ * The computations below are to determine whether or not the
+ * (saved) time till the next timer event and the measured idle
+ * duration fall into the same "bin", so use last_residency_ns
+ * for that instead of time_span_ns which includes the cpuidle
+ * overhead.
+ */
+ measured_ns = dev->last_residency_ns;
/*
* The delay between the wakeup and the first instruction
* executed by the CPU is not likely to be worst-case every
* time, so take 1/2 of the exit latency as a very rough
* approximation of the average of it.
*/
- if (measured_us >= lat)
- measured_us -= lat / 2;
+ if (measured_ns >= lat_ns)
+ measured_ns -= lat_ns / 2;
else
- measured_us /= 2;
+ measured_ns /= 2;
}
/*
@@ -155,9 +159,9 @@ static void teo_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
cpu_data->states[i].early_hits -= early_hits >> DECAY_SHIFT;
- if (drv->states[i].target_residency <= sleep_length_us) {
+ if (drv->states[i].target_residency_ns <= cpu_data->sleep_length_ns) {
idx_timer = i;
- if (drv->states[i].target_residency <= measured_us)
+ if (drv->states[i].target_residency_ns <= measured_ns)
idx_hit = i;
}
}
@@ -193,30 +197,35 @@ static void teo_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
* Save idle duration values corresponding to non-timer wakeups for
* pattern detection.
*/
- cpu_data->intervals[cpu_data->interval_idx++] = measured_us;
+ cpu_data->intervals[cpu_data->interval_idx++] = measured_ns;
if (cpu_data->interval_idx > INTERVALS)
cpu_data->interval_idx = 0;
}
+static bool teo_time_ok(u64 interval_ns)
+{
+ return !tick_nohz_tick_stopped() || interval_ns >= TICK_NSEC;
+}
+
/**
* teo_find_shallower_state - Find shallower idle state matching given duration.
* @drv: cpuidle driver containing state data.
* @dev: Target CPU.
* @state_idx: Index of the capping idle state.
- * @duration_us: Idle duration value to match.
+ * @duration_ns: Idle duration value to match.
*/
static int teo_find_shallower_state(struct cpuidle_driver *drv,
struct cpuidle_device *dev, int state_idx,
- unsigned int duration_us)
+ u64 duration_ns)
{
int i;
for (i = state_idx - 1; i >= 0; i--) {
- if (drv->states[i].disabled || dev->states_usage[i].disable)
+ if (dev->states_usage[i].disable)
continue;
state_idx = i;
- if (drv->states[i].target_residency <= duration_us)
+ if (drv->states[i].target_residency_ns <= duration_ns)
break;
}
return state_idx;
@@ -232,9 +241,10 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
bool *stop_tick)
{
struct teo_cpu *cpu_data = per_cpu_ptr(&teo_cpus, dev->cpu);
- int latency_req = cpuidle_governor_latency_req(dev->cpu);
- unsigned int duration_us, count;
- int max_early_idx, constraint_idx, idx, i;
+ s64 latency_req = cpuidle_governor_latency_req(dev->cpu);
+ u64 duration_ns;
+ unsigned int hits, misses, early_hits;
+ int max_early_idx, prev_max_early_idx, constraint_idx, idx, i;
ktime_t delta_tick;
if (dev->last_state_idx >= 0) {
@@ -244,50 +254,92 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
cpu_data->time_span_ns = local_clock();
- cpu_data->sleep_length_ns = tick_nohz_get_sleep_length(&delta_tick);
- duration_us = ktime_to_us(cpu_data->sleep_length_ns);
+ duration_ns = tick_nohz_get_sleep_length(&delta_tick);
+ cpu_data->sleep_length_ns = duration_ns;
- count = 0;
+ hits = 0;
+ misses = 0;
+ early_hits = 0;
max_early_idx = -1;
+ prev_max_early_idx = -1;
constraint_idx = drv->state_count;
idx = -1;
for (i = 0; i < drv->state_count; i++) {
struct cpuidle_state *s = &drv->states[i];
- struct cpuidle_state_usage *su = &dev->states_usage[i];
- if (s->disabled || su->disable) {
+ if (dev->states_usage[i].disable) {
+ /*
+ * Ignore disabled states with target residencies beyond
+ * the anticipated idle duration.
+ */
+ if (s->target_residency_ns > duration_ns)
+ continue;
+
+ /*
+ * This state is disabled, so the range of idle duration
+ * values corresponding to it is covered by the current
+ * candidate state, but still the "hits" and "misses"
+ * metrics of the disabled state need to be used to
+ * decide whether or not the state covering the range in
+ * question is good enough.
+ */
+ hits = cpu_data->states[i].hits;
+ misses = cpu_data->states[i].misses;
+
+ if (early_hits >= cpu_data->states[i].early_hits ||
+ idx < 0)
+ continue;
+
/*
- * If the "early hits" metric of a disabled state is
- * greater than the current maximum, it should be taken
- * into account, because it would be a mistake to select
- * a deeper state with lower "early hits" metric. The
- * index cannot be changed to point to it, however, so
- * just increase the max count alone and let the index
- * still point to a shallower idle state.
+ * If the current candidate state has been the one with
+ * the maximum "early hits" metric so far, the "early
+ * hits" metric of the disabled state replaces the
+ * current "early hits" count to avoid selecting a
+ * deeper state with lower "early hits" metric.
*/
- if (max_early_idx >= 0 &&
- count < cpu_data->states[i].early_hits)
- count = cpu_data->states[i].early_hits;
+ if (max_early_idx == idx) {
+ early_hits = cpu_data->states[i].early_hits;
+ continue;
+ }
+
+ /*
+ * The current candidate state is closer to the disabled
+ * one than the current maximum "early hits" state, so
+ * replace the latter with it, but in case the maximum
+ * "early hits" state index has not been set so far,
+ * check if the current candidate state is not too
+ * shallow for that role.
+ */
+ if (teo_time_ok(drv->states[idx].target_residency_ns)) {
+ prev_max_early_idx = max_early_idx;
+ early_hits = cpu_data->states[i].early_hits;
+ max_early_idx = idx;
+ }
continue;
}
- if (idx < 0)
+ if (idx < 0) {
idx = i; /* first enabled state */
+ hits = cpu_data->states[i].hits;
+ misses = cpu_data->states[i].misses;
+ }
- if (s->target_residency > duration_us)
+ if (s->target_residency_ns > duration_ns)
break;
- if (s->exit_latency > latency_req && constraint_idx > i)
+ if (s->exit_latency_ns > latency_req && constraint_idx > i)
constraint_idx = i;
idx = i;
+ hits = cpu_data->states[i].hits;
+ misses = cpu_data->states[i].misses;
- if (count < cpu_data->states[i].early_hits &&
- !(tick_nohz_tick_stopped() &&
- drv->states[i].target_residency < TICK_USEC)) {
- count = cpu_data->states[i].early_hits;
+ if (early_hits < cpu_data->states[i].early_hits &&
+ teo_time_ok(drv->states[i].target_residency_ns)) {
+ prev_max_early_idx = max_early_idx;
+ early_hits = cpu_data->states[i].early_hits;
max_early_idx = i;
}
}
@@ -300,10 +352,19 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
* "early hits" metric, but if that cannot be determined, just use the
* state selected so far.
*/
- if (cpu_data->states[idx].hits <= cpu_data->states[idx].misses &&
- max_early_idx >= 0) {
- idx = max_early_idx;
- duration_us = drv->states[idx].target_residency;
+ if (hits <= misses) {
+ /*
+ * The current candidate state is not suitable, so take the one
+ * whose "early hits" metric is the maximum for the range of
+ * shallower states.
+ */
+ if (idx == max_early_idx)
+ max_early_idx = prev_max_early_idx;
+
+ if (max_early_idx >= 0) {
+ idx = max_early_idx;
+ duration_ns = drv->states[idx].target_residency_ns;
+ }
}
/*
@@ -316,18 +377,17 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
if (idx < 0) {
idx = 0; /* No states enabled. Must use 0. */
} else if (idx > 0) {
+ unsigned int count = 0;
u64 sum = 0;
- count = 0;
-
/*
* Count and sum the most recent idle duration values less than
* the current expected idle duration value.
*/
for (i = 0; i < INTERVALS; i++) {
- unsigned int val = cpu_data->intervals[i];
+ u64 val = cpu_data->intervals[i];
- if (val >= duration_us)
+ if (val >= duration_ns)
continue;
count++;
@@ -339,17 +399,17 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
* values are in the interesting range.
*/
if (count > INTERVALS / 2) {
- unsigned int avg_us = div64_u64(sum, count);
+ u64 avg_ns = div64_u64(sum, count);
/*
* Avoid spending too much time in an idle state that
* would be too shallow.
*/
- if (!(tick_nohz_tick_stopped() && avg_us < TICK_USEC)) {
- duration_us = avg_us;
- if (drv->states[idx].target_residency > avg_us)
+ if (teo_time_ok(avg_ns)) {
+ duration_ns = avg_ns;
+ if (drv->states[idx].target_residency_ns > avg_ns)
idx = teo_find_shallower_state(drv, dev,
- idx, avg_us);
+ idx, avg_ns);
}
}
}
@@ -359,9 +419,7 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
* expected idle duration is shorter than the tick period length.
*/
if (((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) ||
- duration_us < TICK_USEC) && !tick_nohz_tick_stopped()) {
- unsigned int delta_tick_us = ktime_to_us(delta_tick);
-
+ duration_ns < TICK_NSEC) && !tick_nohz_tick_stopped()) {
*stop_tick = false;
/*
@@ -370,8 +428,8 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
* till the closest timer including the tick, try to correct
* that.
*/
- if (idx > 0 && drv->states[idx].target_residency > delta_tick_us)
- idx = teo_find_shallower_state(drv, dev, idx, delta_tick_us);
+ if (idx > 0 && drv->states[idx].target_residency_ns > delta_tick)
+ idx = teo_find_shallower_state(drv, dev, idx, delta_tick);
}
return idx;
@@ -415,7 +473,7 @@ static int teo_enable_device(struct cpuidle_driver *drv,
memset(cpu_data, 0, sizeof(*cpu_data));
for (i = 0; i < INTERVALS; i++)
- cpu_data->intervals[i] = UINT_MAX;
+ cpu_data->intervals[i] = U64_MAX;
return 0;
}
diff --git a/drivers/cpuidle/poll_state.c b/drivers/cpuidle/poll_state.c
index c8fa5f41dfc4..9f1ace9c53da 100644
--- a/drivers/cpuidle/poll_state.c
+++ b/drivers/cpuidle/poll_state.c
@@ -49,6 +49,8 @@ void cpuidle_poll_state_init(struct cpuidle_driver *drv)
snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE");
state->exit_latency = 0;
state->target_residency = 0;
+ state->exit_latency_ns = 0;
+ state->target_residency_ns = 0;
state->power_usage = -1;
state->enter = poll_idle;
state->disabled = false;
diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
index 2bb2683b493c..38ef770be90d 100644
--- a/drivers/cpuidle/sysfs.c
+++ b/drivers/cpuidle/sysfs.c
@@ -255,25 +255,6 @@ static ssize_t show_state_##_name(struct cpuidle_state *state, \
return sprintf(buf, "%u\n", state->_name);\
}
-#define define_store_state_ull_function(_name) \
-static ssize_t store_state_##_name(struct cpuidle_state *state, \
- struct cpuidle_state_usage *state_usage, \
- const char *buf, size_t size) \
-{ \
- unsigned long long value; \
- int err; \
- if (!capable(CAP_SYS_ADMIN)) \
- return -EPERM; \
- err = kstrtoull(buf, 0, &value); \
- if (err) \
- return err; \
- if (value) \
- state_usage->_name = 1; \
- else \
- state_usage->_name = 0; \
- return size; \
-}
-
#define define_show_state_ull_function(_name) \
static ssize_t show_state_##_name(struct cpuidle_state *state, \
struct cpuidle_state_usage *state_usage, \
@@ -292,18 +273,60 @@ static ssize_t show_state_##_name(struct cpuidle_state *state, \
return sprintf(buf, "%s\n", state->_name);\
}
-define_show_state_function(exit_latency)
-define_show_state_function(target_residency)
+#define define_show_state_time_function(_name) \
+static ssize_t show_state_##_name(struct cpuidle_state *state, \
+ struct cpuidle_state_usage *state_usage, \
+ char *buf) \
+{ \
+ return sprintf(buf, "%llu\n", ktime_to_us(state->_name##_ns)); \
+}
+
+define_show_state_time_function(exit_latency)
+define_show_state_time_function(target_residency)
define_show_state_function(power_usage)
define_show_state_ull_function(usage)
-define_show_state_ull_function(time)
define_show_state_str_function(name)
define_show_state_str_function(desc)
-define_show_state_ull_function(disable)
-define_store_state_ull_function(disable)
define_show_state_ull_function(above)
define_show_state_ull_function(below)
+static ssize_t show_state_time(struct cpuidle_state *state,
+ struct cpuidle_state_usage *state_usage,
+ char *buf)
+{
+ return sprintf(buf, "%llu\n", ktime_to_us(state_usage->time_ns));
+}
+
+static ssize_t show_state_disable(struct cpuidle_state *state,
+ struct cpuidle_state_usage *state_usage,
+ char *buf)
+{
+ return sprintf(buf, "%llu\n",
+ state_usage->disable & CPUIDLE_STATE_DISABLED_BY_USER);
+}
+
+static ssize_t store_state_disable(struct cpuidle_state *state,
+ struct cpuidle_state_usage *state_usage,
+ const char *buf, size_t size)
+{
+ unsigned int value;
+ int err;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ err = kstrtouint(buf, 0, &value);
+ if (err)
+ return err;
+
+ if (value)
+ state_usage->disable |= CPUIDLE_STATE_DISABLED_BY_USER;
+ else
+ state_usage->disable &= ~CPUIDLE_STATE_DISABLED_BY_USER;
+
+ return size;
+}
+
define_one_state_ro(name, show_state_name);
define_one_state_ro(desc, show_state_desc);
define_one_state_ro(latency, show_state_exit_latency);
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 43ed1b621718..91eb768d4221 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -289,6 +289,7 @@ config CRYPTO_DEV_TALITOS
select CRYPTO_AUTHENC
select CRYPTO_SKCIPHER
select CRYPTO_HASH
+ select CRYPTO_LIB_DES
select HW_RANDOM
depends on FSL_SOC
help
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c
index dc1eb97d57f7..62b04e19067c 100644
--- a/drivers/crypto/hisilicon/sec2/sec_crypto.c
+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c
@@ -179,14 +179,14 @@ static int sec_create_qp_ctx(struct hisi_qm *qm, struct sec_ctx *ctx,
qp_ctx->c_in_pool = hisi_acc_create_sgl_pool(dev, QM_Q_DEPTH,
SEC_SGL_SGE_NR);
- if (!qp_ctx->c_in_pool) {
+ if (IS_ERR(qp_ctx->c_in_pool)) {
dev_err(dev, "fail to create sgl pool for input!\n");
goto err_free_req_list;
}
qp_ctx->c_out_pool = hisi_acc_create_sgl_pool(dev, QM_Q_DEPTH,
SEC_SGL_SGE_NR);
- if (!qp_ctx->c_out_pool) {
+ if (IS_ERR(qp_ctx->c_out_pool)) {
dev_err(dev, "fail to create sgl pool for output!\n");
goto err_free_c_in_pool;
}
diff --git a/drivers/crypto/qat/qat_common/adf_ctl_drv.c b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
index abc7a7f64d64..ef0e482ee04f 100644
--- a/drivers/crypto/qat/qat_common/adf_ctl_drv.c
+++ b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
@@ -68,7 +68,7 @@ static long adf_ctl_ioctl(struct file *fp, unsigned int cmd, unsigned long arg);
static const struct file_operations adf_ctl_ops = {
.owner = THIS_MODULE,
.unlocked_ioctl = adf_ctl_ioctl,
- .compat_ioctl = adf_ctl_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
};
struct adf_ctl_drv_info {
diff --git a/drivers/dax/Kconfig b/drivers/dax/Kconfig
index f33c73e4af41..3b6c06f07326 100644
--- a/drivers/dax/Kconfig
+++ b/drivers/dax/Kconfig
@@ -32,19 +32,36 @@ config DEV_DAX_PMEM
Say M if unsure
+config DEV_DAX_HMEM
+ tristate "HMEM DAX: direct access to 'specific purpose' memory"
+ depends on EFI_SOFT_RESERVE
+ default DEV_DAX
+ help
+ EFI 2.8 platforms, and others, may advertise 'specific purpose'
+ memory. For example, a high bandwidth memory pool. The
+ indication from platform firmware is meant to reserve the
+ memory from typical usage by default. This driver creates
+ device-dax instances for these memory ranges, and that also
+ enables the possibility to assign them to the DEV_DAX_KMEM
+ driver to override the reservation and add them to kernel
+ "System RAM" pool.
+
+ Say M if unsure.
+
config DEV_DAX_KMEM
tristate "KMEM DAX: volatile-use of persistent memory"
default DEV_DAX
depends on DEV_DAX
depends on MEMORY_HOTPLUG # for add_memory() and friends
help
- Support access to persistent memory as if it were RAM. This
- allows easier use of persistent memory by unmodified
- applications.
+ Support access to persistent, or other performance
+ differentiated memory as if it were System RAM. This allows
+ easier use of persistent memory by unmodified applications, or
+ adds core kernel memory services to heterogeneous memory types
+ (HMEM) marked "reserved" by platform firmware.
To use this feature, a DAX device must be unbound from the
- device_dax driver (PMEM DAX) and bound to this kmem driver
- on each boot.
+ device_dax driver and bound to this kmem driver on each boot.
Say N if unsure.
diff --git a/drivers/dax/Makefile b/drivers/dax/Makefile
index 81f7d54dadfb..80065b38b3c4 100644
--- a/drivers/dax/Makefile
+++ b/drivers/dax/Makefile
@@ -2,9 +2,11 @@
obj-$(CONFIG_DAX) += dax.o
obj-$(CONFIG_DEV_DAX) += device_dax.o
obj-$(CONFIG_DEV_DAX_KMEM) += kmem.o
+obj-$(CONFIG_DEV_DAX_HMEM) += dax_hmem.o
dax-y := super.o
dax-y += bus.o
device_dax-y := device.o
+dax_hmem-y := hmem.o
obj-y += pmem/
diff --git a/drivers/dax/bus.c b/drivers/dax/bus.c
index 8fafbeab510a..46e46047a1f7 100644
--- a/drivers/dax/bus.c
+++ b/drivers/dax/bus.c
@@ -227,7 +227,7 @@ static void dax_region_unregister(void *region)
struct dax_region *alloc_dax_region(struct device *parent, int region_id,
struct resource *res, int target_node, unsigned int align,
- unsigned long pfn_flags)
+ unsigned long long pfn_flags)
{
struct dax_region *dax_region;
@@ -309,7 +309,7 @@ static ssize_t resource_show(struct device *dev,
return sprintf(buf, "%#llx\n", dev_dax_resource(dev_dax));
}
-static DEVICE_ATTR_RO(resource);
+static DEVICE_ATTR(resource, 0400, resource_show, NULL);
static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -322,6 +322,13 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR_RO(modalias);
+static ssize_t numa_node_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n", dev_to_node(dev));
+}
+static DEVICE_ATTR_RO(numa_node);
+
static umode_t dev_dax_visible(struct kobject *kobj, struct attribute *a, int n)
{
struct device *dev = container_of(kobj, struct device, kobj);
@@ -329,8 +336,8 @@ static umode_t dev_dax_visible(struct kobject *kobj, struct attribute *a, int n)
if (a == &dev_attr_target_node.attr && dev_dax_target_node(dev_dax) < 0)
return 0;
- if (a == &dev_attr_resource.attr)
- return 0400;
+ if (a == &dev_attr_numa_node.attr && !IS_ENABLED(CONFIG_NUMA))
+ return 0;
return a->mode;
}
@@ -339,6 +346,7 @@ static struct attribute *dev_dax_attributes[] = {
&dev_attr_size.attr,
&dev_attr_target_node.attr,
&dev_attr_resource.attr,
+ &dev_attr_numa_node.attr,
NULL,
};
@@ -373,6 +381,11 @@ static void dev_dax_release(struct device *dev)
kfree(dev_dax);
}
+static const struct device_type dev_dax_type = {
+ .release = dev_dax_release,
+ .groups = dax_attribute_groups,
+};
+
static void unregister_dev_dax(void *dev)
{
struct dev_dax *dev_dax = to_dev_dax(dev);
@@ -430,8 +443,7 @@ struct dev_dax *__devm_create_dev_dax(struct dax_region *dax_region, int id,
else
dev->class = dax_class;
dev->parent = parent;
- dev->groups = dax_attribute_groups;
- dev->release = dev_dax_release;
+ dev->type = &dev_dax_type;
dev_set_name(dev, "dax%d.%d", dax_region->id, id);
rc = device_add(dev);
diff --git a/drivers/dax/bus.h b/drivers/dax/bus.h
index 8619e3299943..9e4eba67e8b9 100644
--- a/drivers/dax/bus.h
+++ b/drivers/dax/bus.h
@@ -11,7 +11,7 @@ struct dax_region;
void dax_region_put(struct dax_region *dax_region);
struct dax_region *alloc_dax_region(struct device *parent, int region_id,
struct resource *res, int target_node, unsigned int align,
- unsigned long flags);
+ unsigned long long flags);
enum dev_dax_subsys {
DEV_DAX_BUS,
diff --git a/drivers/dax/dax-private.h b/drivers/dax/dax-private.h
index 6ccca3b890d6..3107ce80e809 100644
--- a/drivers/dax/dax-private.h
+++ b/drivers/dax/dax-private.h
@@ -32,7 +32,7 @@ struct dax_region {
struct device *dev;
unsigned int align;
struct resource res;
- unsigned long pfn_flags;
+ unsigned long long pfn_flags;
};
/**
diff --git a/drivers/dax/hmem.c b/drivers/dax/hmem.c
new file mode 100644
index 000000000000..fe7214daf62e
--- /dev/null
+++ b/drivers/dax/hmem.c
@@ -0,0 +1,56 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/platform_device.h>
+#include <linux/memregion.h>
+#include <linux/module.h>
+#include <linux/pfn_t.h>
+#include "bus.h"
+
+static int dax_hmem_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct dev_pagemap pgmap = { };
+ struct dax_region *dax_region;
+ struct memregion_info *mri;
+ struct dev_dax *dev_dax;
+ struct resource *res;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENOMEM;
+
+ mri = dev->platform_data;
+ memcpy(&pgmap.res, res, sizeof(*res));
+
+ dax_region = alloc_dax_region(dev, pdev->id, res, mri->target_node,
+ PMD_SIZE, PFN_DEV|PFN_MAP);
+ if (!dax_region)
+ return -ENOMEM;
+
+ dev_dax = devm_create_dev_dax(dax_region, 0, &pgmap);
+ if (IS_ERR(dev_dax))
+ return PTR_ERR(dev_dax);
+
+ /* child dev_dax instances now own the lifetime of the dax_region */
+ dax_region_put(dax_region);
+ return 0;
+}
+
+static int dax_hmem_remove(struct platform_device *pdev)
+{
+ /* devm handles teardown */
+ return 0;
+}
+
+static struct platform_driver dax_hmem_driver = {
+ .probe = dax_hmem_probe,
+ .remove = dax_hmem_remove,
+ .driver = {
+ .name = "hmem",
+ },
+};
+
+module_platform_driver(dax_hmem_driver);
+
+MODULE_ALIAS("platform:hmem*");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Intel Corporation");
diff --git a/drivers/dax/pmem/core.c b/drivers/dax/pmem/core.c
index 6eb6dfdf19bf..2bedf8414fff 100644
--- a/drivers/dax/pmem/core.c
+++ b/drivers/dax/pmem/core.c
@@ -25,20 +25,20 @@ struct dev_dax *__dax_pmem_probe(struct device *dev, enum dev_dax_subsys subsys)
ndns = nvdimm_namespace_common_probe(dev);
if (IS_ERR(ndns))
return ERR_CAST(ndns);
- nsio = to_nd_namespace_io(&ndns->dev);
/* parse the 'pfn' info block via ->rw_bytes */
- rc = devm_nsio_enable(dev, nsio);
+ rc = devm_namespace_enable(dev, ndns, nd_info_block_reserve());
if (rc)
return ERR_PTR(rc);
rc = nvdimm_setup_pfn(nd_pfn, &pgmap);
if (rc)
return ERR_PTR(rc);
- devm_nsio_disable(dev, nsio);
+ devm_namespace_disable(dev, ndns);
/* reserve the metadata area, device-dax will reserve the data */
pfn_sb = nd_pfn->pfn_sb;
offset = le64_to_cpu(pfn_sb->dataoff);
+ nsio = to_nd_namespace_io(&ndns->dev);
if (!devm_request_mem_region(dev, nsio->res.start, offset,
dev_name(&ndns->dev))) {
dev_warn(dev, "could not reserve metadata\n");
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index 446490c9d635..f840e61e5a27 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -160,6 +160,7 @@ int devfreq_update_status(struct devfreq *devfreq, unsigned long freq)
int lev, prev_lev, ret = 0;
unsigned long cur_time;
+ lockdep_assert_held(&devfreq->lock);
cur_time = jiffies;
/* Immediately exit if previous_freq is not initialized yet. */
@@ -409,6 +410,9 @@ static void devfreq_monitor(struct work_struct *work)
*/
void devfreq_monitor_start(struct devfreq *devfreq)
{
+ if (devfreq->governor->interrupt_driven)
+ return;
+
INIT_DEFERRABLE_WORK(&devfreq->work, devfreq_monitor);
if (devfreq->profile->polling_ms)
queue_delayed_work(devfreq_wq, &devfreq->work,
@@ -426,6 +430,9 @@ EXPORT_SYMBOL(devfreq_monitor_start);
*/
void devfreq_monitor_stop(struct devfreq *devfreq)
{
+ if (devfreq->governor->interrupt_driven)
+ return;
+
cancel_delayed_work_sync(&devfreq->work);
}
EXPORT_SYMBOL(devfreq_monitor_stop);
@@ -453,6 +460,10 @@ void devfreq_monitor_suspend(struct devfreq *devfreq)
devfreq_update_status(devfreq, devfreq->previous_freq);
devfreq->stop_polling = true;
mutex_unlock(&devfreq->lock);
+
+ if (devfreq->governor->interrupt_driven)
+ return;
+
cancel_delayed_work_sync(&devfreq->work);
}
EXPORT_SYMBOL(devfreq_monitor_suspend);
@@ -473,11 +484,15 @@ void devfreq_monitor_resume(struct devfreq *devfreq)
if (!devfreq->stop_polling)
goto out;
+ if (devfreq->governor->interrupt_driven)
+ goto out_update;
+
if (!delayed_work_pending(&devfreq->work) &&
devfreq->profile->polling_ms)
queue_delayed_work(devfreq_wq, &devfreq->work,
msecs_to_jiffies(devfreq->profile->polling_ms));
+out_update:
devfreq->last_stat_updated = jiffies;
devfreq->stop_polling = false;
@@ -509,6 +524,9 @@ void devfreq_interval_update(struct devfreq *devfreq, unsigned int *delay)
if (devfreq->stop_polling)
goto out;
+ if (devfreq->governor->interrupt_driven)
+ goto out;
+
/* if new delay is zero, stop polling */
if (!new_delay) {
mutex_unlock(&devfreq->lock);
@@ -625,7 +643,7 @@ struct devfreq *devfreq_add_device(struct device *dev,
devfreq = find_device_devfreq(dev);
mutex_unlock(&devfreq_list_lock);
if (!IS_ERR(devfreq)) {
- dev_err(dev, "%s: Unable to create devfreq for the device.\n",
+ dev_err(dev, "%s: devfreq device already exists!\n",
__func__);
err = -EINVAL;
goto err_out;
@@ -1195,7 +1213,7 @@ static ssize_t available_governors_show(struct device *d,
* The devfreq with immutable governor (e.g., passive) shows
* only own governor.
*/
- if (df->governor->immutable) {
+ if (df->governor && df->governor->immutable) {
count = scnprintf(&buf[count], DEVFREQ_NAME_LEN,
"%s ", df->governor_name);
/*
@@ -1397,12 +1415,17 @@ static ssize_t trans_stat_show(struct device *dev,
int i, j;
unsigned int max_state = devfreq->profile->max_state;
- if (!devfreq->stop_polling &&
- devfreq_update_status(devfreq, devfreq->previous_freq))
- return 0;
if (max_state == 0)
return sprintf(buf, "Not Supported.\n");
+ mutex_lock(&devfreq->lock);
+ if (!devfreq->stop_polling &&
+ devfreq_update_status(devfreq, devfreq->previous_freq)) {
+ mutex_unlock(&devfreq->lock);
+ return 0;
+ }
+ mutex_unlock(&devfreq->lock);
+
len = sprintf(buf, " From : To\n");
len += sprintf(buf + len, " :");
for (i = 0; i < max_state; i++)
diff --git a/drivers/devfreq/event/exynos-ppmu.c b/drivers/devfreq/event/exynos-ppmu.c
index 87b42055e6bc..85c7a77bf3f0 100644
--- a/drivers/devfreq/event/exynos-ppmu.c
+++ b/drivers/devfreq/event/exynos-ppmu.c
@@ -673,7 +673,6 @@ static int exynos_ppmu_probe(struct platform_device *pdev)
for (i = 0; i < info->num_events; i++) {
edev[i] = devm_devfreq_event_add_edev(&pdev->dev, &desc[i]);
if (IS_ERR(edev[i])) {
- ret = PTR_ERR(edev[i]);
dev_err(&pdev->dev,
"failed to add devfreq-event device\n");
return PTR_ERR(edev[i]);
diff --git a/drivers/devfreq/governor.h b/drivers/devfreq/governor.h
index bbe5ff9fcecf..dc7533ccc3db 100644
--- a/drivers/devfreq/governor.h
+++ b/drivers/devfreq/governor.h
@@ -31,6 +31,8 @@
* @name: Governor's name
* @immutable: Immutable flag for governor. If the value is 1,
* this govenror is never changeable to other governor.
+ * @interrupt_driven: Devfreq core won't schedule polling work for this
+ * governor if value is set to 1.
* @get_target_freq: Returns desired operating frequency for the device.
* Basically, get_target_freq will run
* devfreq_dev_profile.get_dev_status() to get the
@@ -49,6 +51,7 @@ struct devfreq_governor {
const char name[DEVFREQ_NAME_LEN];
const unsigned int immutable;
+ const unsigned int interrupt_driven;
int (*get_target_freq)(struct devfreq *this, unsigned long *freq);
int (*event_handler)(struct devfreq *devfreq,
unsigned int event, void *data);
diff --git a/drivers/devfreq/tegra30-devfreq.c b/drivers/devfreq/tegra30-devfreq.c
index a6ba75f4106d..0b65f89d74d5 100644
--- a/drivers/devfreq/tegra30-devfreq.c
+++ b/drivers/devfreq/tegra30-devfreq.c
@@ -11,11 +11,13 @@
#include <linux/devfreq.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/irq.h>
#include <linux/module.h>
-#include <linux/mod_devicetable.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_opp.h>
#include <linux/reset.h>
+#include <linux/workqueue.h>
#include "governor.h"
@@ -33,6 +35,8 @@
#define ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN BIT(30)
#define ACTMON_DEV_CTRL_ENB BIT(31)
+#define ACTMON_DEV_CTRL_STOP 0x00000000
+
#define ACTMON_DEV_UPPER_WMARK 0x4
#define ACTMON_DEV_LOWER_WMARK 0x8
#define ACTMON_DEV_INIT_AVG 0xc
@@ -68,6 +72,8 @@
#define KHZ 1000
+#define KHZ_MAX (ULONG_MAX / KHZ)
+
/* Assume that the bus is saturated if the utilization is 25% */
#define BUS_SATURATION_RATIO 25
@@ -90,9 +96,10 @@ struct tegra_devfreq_device_config {
unsigned int boost_down_threshold;
/*
- * Threshold of activity (cycles) below which the CPU frequency isn't
- * to be taken into account. This is to avoid increasing the EMC
- * frequency when the CPU is very busy but not accessing the bus often.
+ * Threshold of activity (cycles translated to kHz) below which the
+ * CPU frequency isn't to be taken into account. This is to avoid
+ * increasing the EMC frequency when the CPU is very busy but not
+ * accessing the bus often.
*/
u32 avg_dependency_threshold;
};
@@ -102,7 +109,7 @@ enum tegra_actmon_device {
MCCPU,
};
-static struct tegra_devfreq_device_config actmon_device_configs[] = {
+static const struct tegra_devfreq_device_config actmon_device_configs[] = {
{
/* MCALL: All memory accesses (including from the CPUs) */
.offset = 0x1c0,
@@ -117,10 +124,10 @@ static struct tegra_devfreq_device_config actmon_device_configs[] = {
.offset = 0x200,
.irq_mask = 1 << 25,
.boost_up_coeff = 800,
- .boost_down_coeff = 90,
+ .boost_down_coeff = 40,
.boost_up_threshold = 27,
.boost_down_threshold = 10,
- .avg_dependency_threshold = 50000,
+ .avg_dependency_threshold = 16000, /* 16MHz in kHz units */
},
};
@@ -156,11 +163,16 @@ struct tegra_devfreq {
struct clk *emc_clock;
unsigned long max_freq;
unsigned long cur_freq;
- struct notifier_block rate_change_nb;
+ struct notifier_block clk_rate_change_nb;
+
+ struct delayed_work cpufreq_update_work;
+ struct notifier_block cpu_rate_change_nb;
struct tegra_devfreq_device devices[ARRAY_SIZE(actmon_device_configs)];
- int irq;
+ unsigned int irq;
+
+ bool started;
};
struct tegra_actmon_emc_ratio {
@@ -168,8 +180,8 @@ struct tegra_actmon_emc_ratio {
unsigned long emc_freq;
};
-static struct tegra_actmon_emc_ratio actmon_emc_ratios[] = {
- { 1400000, ULONG_MAX },
+static const struct tegra_actmon_emc_ratio actmon_emc_ratios[] = {
+ { 1400000, KHZ_MAX },
{ 1200000, 750000 },
{ 1100000, 600000 },
{ 1000000, 500000 },
@@ -199,18 +211,26 @@ static void device_writel(struct tegra_devfreq_device *dev, u32 val,
writel_relaxed(val, dev->regs + offset);
}
-static unsigned long do_percent(unsigned long val, unsigned int pct)
+static unsigned long do_percent(unsigned long long val, unsigned int pct)
{
- return val * pct / 100;
+ val = val * pct;
+ do_div(val, 100);
+
+ /*
+ * High freq + high boosting percent + large polling interval are
+ * resulting in integer overflow when watermarks are calculated.
+ */
+ return min_t(u64, val, U32_MAX);
}
static void tegra_devfreq_update_avg_wmark(struct tegra_devfreq *tegra,
struct tegra_devfreq_device *dev)
{
- u32 avg = dev->avg_count;
u32 avg_band_freq = tegra->max_freq * ACTMON_DEFAULT_AVG_BAND / KHZ;
- u32 band = avg_band_freq * ACTMON_SAMPLING_PERIOD;
+ u32 band = avg_band_freq * tegra->devfreq->profile->polling_ms;
+ u32 avg;
+ avg = min(dev->avg_count, U32_MAX - band);
device_writel(dev, avg + band, ACTMON_DEV_AVG_UPPER_WMARK);
avg = max(dev->avg_count, band);
@@ -220,7 +240,7 @@ static void tegra_devfreq_update_avg_wmark(struct tegra_devfreq *tegra,
static void tegra_devfreq_update_wmark(struct tegra_devfreq *tegra,
struct tegra_devfreq_device *dev)
{
- u32 val = tegra->cur_freq * ACTMON_SAMPLING_PERIOD;
+ u32 val = tegra->cur_freq * tegra->devfreq->profile->polling_ms;
device_writel(dev, do_percent(val, dev->config->boost_up_threshold),
ACTMON_DEV_UPPER_WMARK);
@@ -229,12 +249,6 @@ static void tegra_devfreq_update_wmark(struct tegra_devfreq *tegra,
ACTMON_DEV_LOWER_WMARK);
}
-static void actmon_write_barrier(struct tegra_devfreq *tegra)
-{
- /* ensure the update has reached the ACTMON */
- readl(tegra->regs + ACTMON_GLB_STATUS);
-}
-
static void actmon_isr_device(struct tegra_devfreq *tegra,
struct tegra_devfreq_device *dev)
{
@@ -256,10 +270,10 @@ static void actmon_isr_device(struct tegra_devfreq *tegra,
dev_ctrl |= ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN;
- if (dev->boost_freq >= tegra->max_freq)
+ if (dev->boost_freq >= tegra->max_freq) {
+ dev_ctrl &= ~ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN;
dev->boost_freq = tegra->max_freq;
- else
- dev_ctrl |= ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN;
+ }
} else if (intr_status & ACTMON_DEV_INTR_CONSECUTIVE_LOWER) {
/*
* new_boost = old_boost * down_coef
@@ -270,31 +284,22 @@ static void actmon_isr_device(struct tegra_devfreq *tegra,
dev_ctrl |= ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN;
- if (dev->boost_freq < (ACTMON_BOOST_FREQ_STEP >> 1))
- dev->boost_freq = 0;
- else
- dev_ctrl |= ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN;
- }
-
- if (dev->config->avg_dependency_threshold) {
- if (dev->avg_count >= dev->config->avg_dependency_threshold)
- dev_ctrl |= ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN;
- else if (dev->boost_freq == 0)
+ if (dev->boost_freq < (ACTMON_BOOST_FREQ_STEP >> 1)) {
dev_ctrl &= ~ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN;
+ dev->boost_freq = 0;
+ }
}
device_writel(dev, dev_ctrl, ACTMON_DEV_CTRL);
device_writel(dev, ACTMON_INTR_STATUS_CLEAR, ACTMON_DEV_INTR_STATUS);
-
- actmon_write_barrier(tegra);
}
static unsigned long actmon_cpu_to_emc_rate(struct tegra_devfreq *tegra,
unsigned long cpu_freq)
{
unsigned int i;
- struct tegra_actmon_emc_ratio *ratio = actmon_emc_ratios;
+ const struct tegra_actmon_emc_ratio *ratio = actmon_emc_ratios;
for (i = 0; i < ARRAY_SIZE(actmon_emc_ratios); i++, ratio++) {
if (cpu_freq >= ratio->cpu_freq) {
@@ -308,25 +313,37 @@ static unsigned long actmon_cpu_to_emc_rate(struct tegra_devfreq *tegra,
return 0;
}
+static unsigned long actmon_device_target_freq(struct tegra_devfreq *tegra,
+ struct tegra_devfreq_device *dev)
+{
+ unsigned int avg_sustain_coef;
+ unsigned long target_freq;
+
+ target_freq = dev->avg_count / tegra->devfreq->profile->polling_ms;
+ avg_sustain_coef = 100 * 100 / dev->config->boost_up_threshold;
+ target_freq = do_percent(target_freq, avg_sustain_coef);
+
+ return target_freq;
+}
+
static void actmon_update_target(struct tegra_devfreq *tegra,
struct tegra_devfreq_device *dev)
{
unsigned long cpu_freq = 0;
unsigned long static_cpu_emc_freq = 0;
- unsigned int avg_sustain_coef;
- if (dev->config->avg_dependency_threshold) {
- cpu_freq = cpufreq_get(0);
- static_cpu_emc_freq = actmon_cpu_to_emc_rate(tegra, cpu_freq);
- }
+ dev->target_freq = actmon_device_target_freq(tegra, dev);
- dev->target_freq = dev->avg_count / ACTMON_SAMPLING_PERIOD;
- avg_sustain_coef = 100 * 100 / dev->config->boost_up_threshold;
- dev->target_freq = do_percent(dev->target_freq, avg_sustain_coef);
- dev->target_freq += dev->boost_freq;
+ if (dev->config->avg_dependency_threshold &&
+ dev->config->avg_dependency_threshold <= dev->target_freq) {
+ cpu_freq = cpufreq_quick_get(0);
+ static_cpu_emc_freq = actmon_cpu_to_emc_rate(tegra, cpu_freq);
- if (dev->avg_count >= dev->config->avg_dependency_threshold)
+ dev->target_freq += dev->boost_freq;
dev->target_freq = max(dev->target_freq, static_cpu_emc_freq);
+ } else {
+ dev->target_freq += dev->boost_freq;
+ }
}
static irqreturn_t actmon_thread_isr(int irq, void *data)
@@ -354,8 +371,8 @@ static irqreturn_t actmon_thread_isr(int irq, void *data)
return handled ? IRQ_HANDLED : IRQ_NONE;
}
-static int tegra_actmon_rate_notify_cb(struct notifier_block *nb,
- unsigned long action, void *ptr)
+static int tegra_actmon_clk_notify_cb(struct notifier_block *nb,
+ unsigned long action, void *ptr)
{
struct clk_notifier_data *data = ptr;
struct tegra_devfreq *tegra;
@@ -365,7 +382,7 @@ static int tegra_actmon_rate_notify_cb(struct notifier_block *nb,
if (action != POST_RATE_CHANGE)
return NOTIFY_OK;
- tegra = container_of(nb, struct tegra_devfreq, rate_change_nb);
+ tegra = container_of(nb, struct tegra_devfreq, clk_rate_change_nb);
tegra->cur_freq = data->new_rate / KHZ;
@@ -375,7 +392,79 @@ static int tegra_actmon_rate_notify_cb(struct notifier_block *nb,
tegra_devfreq_update_wmark(tegra, dev);
}
- actmon_write_barrier(tegra);
+ return NOTIFY_OK;
+}
+
+static void tegra_actmon_delayed_update(struct work_struct *work)
+{
+ struct tegra_devfreq *tegra = container_of(work, struct tegra_devfreq,
+ cpufreq_update_work.work);
+
+ mutex_lock(&tegra->devfreq->lock);
+ update_devfreq(tegra->devfreq);
+ mutex_unlock(&tegra->devfreq->lock);
+}
+
+static unsigned long
+tegra_actmon_cpufreq_contribution(struct tegra_devfreq *tegra,
+ unsigned int cpu_freq)
+{
+ struct tegra_devfreq_device *actmon_dev = &tegra->devices[MCCPU];
+ unsigned long static_cpu_emc_freq, dev_freq;
+
+ dev_freq = actmon_device_target_freq(tegra, actmon_dev);
+
+ /* check whether CPU's freq is taken into account at all */
+ if (dev_freq < actmon_dev->config->avg_dependency_threshold)
+ return 0;
+
+ static_cpu_emc_freq = actmon_cpu_to_emc_rate(tegra, cpu_freq);
+
+ if (dev_freq >= static_cpu_emc_freq)
+ return 0;
+
+ return static_cpu_emc_freq;
+}
+
+static int tegra_actmon_cpu_notify_cb(struct notifier_block *nb,
+ unsigned long action, void *ptr)
+{
+ struct cpufreq_freqs *freqs = ptr;
+ struct tegra_devfreq *tegra;
+ unsigned long old, new, delay;
+
+ if (action != CPUFREQ_POSTCHANGE)
+ return NOTIFY_OK;
+
+ tegra = container_of(nb, struct tegra_devfreq, cpu_rate_change_nb);
+
+ /*
+ * Quickly check whether CPU frequency should be taken into account
+ * at all, without blocking CPUFreq's core.
+ */
+ if (mutex_trylock(&tegra->devfreq->lock)) {
+ old = tegra_actmon_cpufreq_contribution(tegra, freqs->old);
+ new = tegra_actmon_cpufreq_contribution(tegra, freqs->new);
+ mutex_unlock(&tegra->devfreq->lock);
+
+ /*
+ * If CPU's frequency shouldn't be taken into account at
+ * the moment, then there is no need to update the devfreq's
+ * state because ISR will re-check CPU's frequency on the
+ * next interrupt.
+ */
+ if (old == new)
+ return NOTIFY_OK;
+ }
+
+ /*
+ * CPUFreq driver should support CPUFREQ_ASYNC_NOTIFICATION in order
+ * to allow asynchronous notifications. This means we can't block
+ * here for too long, otherwise CPUFreq's core will complain with a
+ * warning splat.
+ */
+ delay = msecs_to_jiffies(ACTMON_SAMPLING_PERIOD);
+ schedule_delayed_work(&tegra->cpufreq_update_work, delay);
return NOTIFY_OK;
}
@@ -385,9 +474,12 @@ static void tegra_actmon_configure_device(struct tegra_devfreq *tegra,
{
u32 val = 0;
+ /* reset boosting on governor's restart */
+ dev->boost_freq = 0;
+
dev->target_freq = tegra->cur_freq;
- dev->avg_count = tegra->cur_freq * ACTMON_SAMPLING_PERIOD;
+ dev->avg_count = tegra->cur_freq * tegra->devfreq->profile->polling_ms;
device_writel(dev, dev->avg_count, ACTMON_DEV_INIT_AVG);
tegra_devfreq_update_avg_wmark(tegra, dev);
@@ -405,45 +497,116 @@ static void tegra_actmon_configure_device(struct tegra_devfreq *tegra,
<< ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_NUM_SHIFT;
val |= ACTMON_DEV_CTRL_AVG_ABOVE_WMARK_EN;
val |= ACTMON_DEV_CTRL_AVG_BELOW_WMARK_EN;
- val |= ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN;
val |= ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN;
val |= ACTMON_DEV_CTRL_ENB;
device_writel(dev, val, ACTMON_DEV_CTRL);
}
-static void tegra_actmon_start(struct tegra_devfreq *tegra)
+static void tegra_actmon_stop_devices(struct tegra_devfreq *tegra)
{
+ struct tegra_devfreq_device *dev = tegra->devices;
unsigned int i;
- disable_irq(tegra->irq);
+ for (i = 0; i < ARRAY_SIZE(tegra->devices); i++, dev++) {
+ device_writel(dev, ACTMON_DEV_CTRL_STOP, ACTMON_DEV_CTRL);
+ device_writel(dev, ACTMON_INTR_STATUS_CLEAR,
+ ACTMON_DEV_INTR_STATUS);
+ }
+}
- actmon_writel(tegra, ACTMON_SAMPLING_PERIOD - 1,
+static int tegra_actmon_resume(struct tegra_devfreq *tegra)
+{
+ unsigned int i;
+ int err;
+
+ if (!tegra->devfreq->profile->polling_ms || !tegra->started)
+ return 0;
+
+ actmon_writel(tegra, tegra->devfreq->profile->polling_ms - 1,
ACTMON_GLB_PERIOD_CTRL);
+ /*
+ * CLK notifications are needed in order to reconfigure the upper
+ * consecutive watermark in accordance to the actual clock rate
+ * to avoid unnecessary upper interrupts.
+ */
+ err = clk_notifier_register(tegra->emc_clock,
+ &tegra->clk_rate_change_nb);
+ if (err) {
+ dev_err(tegra->devfreq->dev.parent,
+ "Failed to register rate change notifier\n");
+ return err;
+ }
+
+ tegra->cur_freq = clk_get_rate(tegra->emc_clock) / KHZ;
+
for (i = 0; i < ARRAY_SIZE(tegra->devices); i++)
tegra_actmon_configure_device(tegra, &tegra->devices[i]);
- actmon_write_barrier(tegra);
+ /*
+ * We are estimating CPU's memory bandwidth requirement based on
+ * amount of memory accesses and system's load, judging by CPU's
+ * frequency. We also don't want to receive events about CPU's
+ * frequency transaction when governor is stopped, hence notifier
+ * is registered dynamically.
+ */
+ err = cpufreq_register_notifier(&tegra->cpu_rate_change_nb,
+ CPUFREQ_TRANSITION_NOTIFIER);
+ if (err) {
+ dev_err(tegra->devfreq->dev.parent,
+ "Failed to register rate change notifier: %d\n", err);
+ goto err_stop;
+ }
enable_irq(tegra->irq);
+
+ return 0;
+
+err_stop:
+ tegra_actmon_stop_devices(tegra);
+
+ clk_notifier_unregister(tegra->emc_clock, &tegra->clk_rate_change_nb);
+
+ return err;
}
-static void tegra_actmon_stop(struct tegra_devfreq *tegra)
+static int tegra_actmon_start(struct tegra_devfreq *tegra)
{
- unsigned int i;
+ int ret = 0;
- disable_irq(tegra->irq);
+ if (!tegra->started) {
+ tegra->started = true;
- for (i = 0; i < ARRAY_SIZE(tegra->devices); i++) {
- device_writel(&tegra->devices[i], 0x00000000, ACTMON_DEV_CTRL);
- device_writel(&tegra->devices[i], ACTMON_INTR_STATUS_CLEAR,
- ACTMON_DEV_INTR_STATUS);
+ ret = tegra_actmon_resume(tegra);
+ if (ret)
+ tegra->started = false;
}
- actmon_write_barrier(tegra);
+ return ret;
+}
- enable_irq(tegra->irq);
+static void tegra_actmon_pause(struct tegra_devfreq *tegra)
+{
+ if (!tegra->devfreq->profile->polling_ms || !tegra->started)
+ return;
+
+ disable_irq(tegra->irq);
+
+ cpufreq_unregister_notifier(&tegra->cpu_rate_change_nb,
+ CPUFREQ_TRANSITION_NOTIFIER);
+
+ cancel_delayed_work_sync(&tegra->cpufreq_update_work);
+
+ tegra_actmon_stop_devices(tegra);
+
+ clk_notifier_unregister(tegra->emc_clock, &tegra->clk_rate_change_nb);
+}
+
+static void tegra_actmon_stop(struct tegra_devfreq *tegra)
+{
+ tegra_actmon_pause(tegra);
+ tegra->started = false;
}
static int tegra_devfreq_target(struct device *dev, unsigned long *freq,
@@ -463,7 +626,7 @@ static int tegra_devfreq_target(struct device *dev, unsigned long *freq,
rate = dev_pm_opp_get_freq(opp);
dev_pm_opp_put(opp);
- err = clk_set_min_rate(tegra->emc_clock, rate);
+ err = clk_set_min_rate(tegra->emc_clock, rate * KHZ);
if (err)
return err;
@@ -492,7 +655,7 @@ static int tegra_devfreq_get_dev_status(struct device *dev,
stat->private_data = tegra;
/* The below are to be used by the other governors */
- stat->current_frequency = cur_freq * KHZ;
+ stat->current_frequency = cur_freq;
actmon_dev = &tegra->devices[MCALL];
@@ -503,7 +666,7 @@ static int tegra_devfreq_get_dev_status(struct device *dev,
stat->busy_time *= 100 / BUS_SATURATION_RATIO;
/* Number of cycles in a sampling period */
- stat->total_time = ACTMON_SAMPLING_PERIOD * cur_freq;
+ stat->total_time = tegra->devfreq->profile->polling_ms * cur_freq;
stat->busy_time = min(stat->busy_time, stat->total_time);
@@ -511,7 +674,7 @@ static int tegra_devfreq_get_dev_status(struct device *dev,
}
static struct devfreq_dev_profile tegra_devfreq_profile = {
- .polling_ms = 0,
+ .polling_ms = ACTMON_SAMPLING_PERIOD,
.target = tegra_devfreq_target,
.get_dev_status = tegra_devfreq_get_dev_status,
};
@@ -542,7 +705,7 @@ static int tegra_governor_get_target(struct devfreq *devfreq,
target_freq = max(target_freq, dev->target_freq);
}
- *freq = target_freq * KHZ;
+ *freq = target_freq;
return 0;
}
@@ -551,11 +714,19 @@ static int tegra_governor_event_handler(struct devfreq *devfreq,
unsigned int event, void *data)
{
struct tegra_devfreq *tegra = dev_get_drvdata(devfreq->dev.parent);
+ unsigned int *new_delay = data;
+ int ret = 0;
+
+ /*
+ * Couple devfreq-device with the governor early because it is
+ * needed at the moment of governor's start (used by ISR).
+ */
+ tegra->devfreq = devfreq;
switch (event) {
case DEVFREQ_GOV_START:
devfreq_monitor_start(devfreq);
- tegra_actmon_start(tegra);
+ ret = tegra_actmon_start(tegra);
break;
case DEVFREQ_GOV_STOP:
@@ -563,6 +734,21 @@ static int tegra_governor_event_handler(struct devfreq *devfreq,
devfreq_monitor_stop(devfreq);
break;
+ case DEVFREQ_GOV_INTERVAL:
+ /*
+ * ACTMON hardware supports up to 256 milliseconds for the
+ * sampling period.
+ */
+ if (*new_delay > 256) {
+ ret = -EINVAL;
+ break;
+ }
+
+ tegra_actmon_pause(tegra);
+ devfreq_interval_update(devfreq, new_delay);
+ ret = tegra_actmon_resume(tegra);
+ break;
+
case DEVFREQ_GOV_SUSPEND:
tegra_actmon_stop(tegra);
devfreq_monitor_suspend(devfreq);
@@ -570,11 +756,11 @@ static int tegra_governor_event_handler(struct devfreq *devfreq,
case DEVFREQ_GOV_RESUME:
devfreq_monitor_resume(devfreq);
- tegra_actmon_start(tegra);
+ ret = tegra_actmon_start(tegra);
break;
}
- return 0;
+ return ret;
}
static struct devfreq_governor tegra_devfreq_governor = {
@@ -582,14 +768,16 @@ static struct devfreq_governor tegra_devfreq_governor = {
.get_target_freq = tegra_governor_get_target,
.event_handler = tegra_governor_event_handler,
.immutable = true,
+ .interrupt_driven = true,
};
static int tegra_devfreq_probe(struct platform_device *pdev)
{
- struct tegra_devfreq *tegra;
struct tegra_devfreq_device *dev;
+ struct tegra_devfreq *tegra;
+ struct devfreq *devfreq;
unsigned int i;
- unsigned long rate;
+ long rate;
int err;
tegra = devm_kzalloc(&pdev->dev, sizeof(*tegra), GFP_KERNEL);
@@ -618,12 +806,22 @@ static int tegra_devfreq_probe(struct platform_device *pdev)
return PTR_ERR(tegra->emc_clock);
}
- tegra->irq = platform_get_irq(pdev, 0);
- if (tegra->irq < 0) {
- err = tegra->irq;
+ err = platform_get_irq(pdev, 0);
+ if (err < 0) {
dev_err(&pdev->dev, "Failed to get IRQ: %d\n", err);
return err;
}
+ tegra->irq = err;
+
+ irq_set_status_flags(tegra->irq, IRQ_NOAUTOEN);
+
+ err = devm_request_threaded_irq(&pdev->dev, tegra->irq, NULL,
+ actmon_thread_isr, IRQF_ONESHOT,
+ "tegra-devfreq", tegra);
+ if (err) {
+ dev_err(&pdev->dev, "Interrupt request failed: %d\n", err);
+ return err;
+ }
reset_control_assert(tegra->reset);
@@ -636,8 +834,13 @@ static int tegra_devfreq_probe(struct platform_device *pdev)
reset_control_deassert(tegra->reset);
- tegra->max_freq = clk_round_rate(tegra->emc_clock, ULONG_MAX) / KHZ;
- tegra->cur_freq = clk_get_rate(tegra->emc_clock) / KHZ;
+ rate = clk_round_rate(tegra->emc_clock, ULONG_MAX);
+ if (rate < 0) {
+ dev_err(&pdev->dev, "Failed to round clock rate: %ld\n", rate);
+ return rate;
+ }
+
+ tegra->max_freq = rate / KHZ;
for (i = 0; i < ARRAY_SIZE(actmon_device_configs); i++) {
dev = tegra->devices + i;
@@ -648,7 +851,14 @@ static int tegra_devfreq_probe(struct platform_device *pdev)
for (rate = 0; rate <= tegra->max_freq * KHZ; rate++) {
rate = clk_round_rate(tegra->emc_clock, rate);
- err = dev_pm_opp_add(&pdev->dev, rate, 0);
+ if (rate < 0) {
+ dev_err(&pdev->dev,
+ "Failed to round clock rate: %ld\n", rate);
+ err = rate;
+ goto remove_opps;
+ }
+
+ err = dev_pm_opp_add(&pdev->dev, rate / KHZ, 0);
if (err) {
dev_err(&pdev->dev, "Failed to add OPP: %d\n", err);
goto remove_opps;
@@ -657,49 +867,33 @@ static int tegra_devfreq_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, tegra);
- tegra->rate_change_nb.notifier_call = tegra_actmon_rate_notify_cb;
- err = clk_notifier_register(tegra->emc_clock, &tegra->rate_change_nb);
- if (err) {
- dev_err(&pdev->dev,
- "Failed to register rate change notifier\n");
- goto remove_opps;
- }
+ tegra->clk_rate_change_nb.notifier_call = tegra_actmon_clk_notify_cb;
+ tegra->cpu_rate_change_nb.notifier_call = tegra_actmon_cpu_notify_cb;
+
+ INIT_DELAYED_WORK(&tegra->cpufreq_update_work,
+ tegra_actmon_delayed_update);
err = devfreq_add_governor(&tegra_devfreq_governor);
if (err) {
dev_err(&pdev->dev, "Failed to add governor: %d\n", err);
- goto unreg_notifier;
+ goto remove_opps;
}
tegra_devfreq_profile.initial_freq = clk_get_rate(tegra->emc_clock);
- tegra->devfreq = devfreq_add_device(&pdev->dev,
- &tegra_devfreq_profile,
- "tegra_actmon",
- NULL);
- if (IS_ERR(tegra->devfreq)) {
- err = PTR_ERR(tegra->devfreq);
- goto remove_governor;
- }
+ tegra_devfreq_profile.initial_freq /= KHZ;
- err = devm_request_threaded_irq(&pdev->dev, tegra->irq, NULL,
- actmon_thread_isr, IRQF_ONESHOT,
- "tegra-devfreq", tegra);
- if (err) {
- dev_err(&pdev->dev, "Interrupt request failed: %d\n", err);
- goto remove_devfreq;
+ devfreq = devfreq_add_device(&pdev->dev, &tegra_devfreq_profile,
+ "tegra_actmon", NULL);
+ if (IS_ERR(devfreq)) {
+ err = PTR_ERR(devfreq);
+ goto remove_governor;
}
return 0;
-remove_devfreq:
- devfreq_remove_device(tegra->devfreq);
-
remove_governor:
devfreq_remove_governor(&tegra_devfreq_governor);
-unreg_notifier:
- clk_notifier_unregister(tegra->emc_clock, &tegra->rate_change_nb);
-
remove_opps:
dev_pm_opp_remove_all_dynamic(&pdev->dev);
@@ -716,7 +910,6 @@ static int tegra_devfreq_remove(struct platform_device *pdev)
devfreq_remove_device(tegra->devfreq);
devfreq_remove_governor(&tegra_devfreq_governor);
- clk_notifier_unregister(tegra->emc_clock, &tegra->rate_change_nb);
dev_pm_opp_remove_all_dynamic(&pdev->dev);
reset_control_reset(tegra->reset);
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index 433d91d710e4..ce41cd9b758a 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -45,10 +45,10 @@ static char *dmabuffs_dname(struct dentry *dentry, char *buffer, int buflen)
size_t ret = 0;
dmabuf = dentry->d_fsdata;
- mutex_lock(&dmabuf->lock);
+ dma_resv_lock(dmabuf->resv, NULL);
if (dmabuf->name)
ret = strlcpy(name, dmabuf->name, DMA_BUF_NAME_LEN);
- mutex_unlock(&dmabuf->lock);
+ dma_resv_unlock(dmabuf->resv);
return dynamic_dname(dentry, buffer, buflen, "/%s:%s",
dentry->d_name.name, ret > 0 ? name : "");
@@ -334,7 +334,7 @@ static long dma_buf_set_name(struct dma_buf *dmabuf, const char __user *buf)
if (IS_ERR(name))
return PTR_ERR(name);
- mutex_lock(&dmabuf->lock);
+ dma_resv_lock(dmabuf->resv, NULL);
if (!list_empty(&dmabuf->attachments)) {
ret = -EBUSY;
kfree(name);
@@ -344,7 +344,7 @@ static long dma_buf_set_name(struct dma_buf *dmabuf, const char __user *buf)
dmabuf->name = name;
out_unlock:
- mutex_unlock(&dmabuf->lock);
+ dma_resv_unlock(dmabuf->resv);
return ret;
}
@@ -403,10 +403,10 @@ static void dma_buf_show_fdinfo(struct seq_file *m, struct file *file)
/* Don't count the temporary reference taken inside procfs seq_show */
seq_printf(m, "count:\t%ld\n", file_count(dmabuf->file) - 1);
seq_printf(m, "exp_name:\t%s\n", dmabuf->exp_name);
- mutex_lock(&dmabuf->lock);
+ dma_resv_lock(dmabuf->resv, NULL);
if (dmabuf->name)
seq_printf(m, "name:\t%s\n", dmabuf->name);
- mutex_unlock(&dmabuf->lock);
+ dma_resv_unlock(dmabuf->resv);
}
static const struct file_operations dma_buf_fops = {
@@ -415,9 +415,7 @@ static const struct file_operations dma_buf_fops = {
.llseek = dma_buf_llseek,
.poll = dma_buf_poll,
.unlocked_ioctl = dma_buf_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = dma_buf_ioctl,
-#endif
+ .compat_ioctl = compat_ptr_ioctl,
.show_fdinfo = dma_buf_show_fdinfo,
};
@@ -525,6 +523,10 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
return ERR_PTR(-EINVAL);
}
+ if (WARN_ON(exp_info->ops->cache_sgt_mapping &&
+ exp_info->ops->dynamic_mapping))
+ return ERR_PTR(-EINVAL);
+
if (!try_module_get(exp_info->owner))
return ERR_PTR(-ENOENT);
@@ -645,10 +647,11 @@ void dma_buf_put(struct dma_buf *dmabuf)
EXPORT_SYMBOL_GPL(dma_buf_put);
/**
- * dma_buf_attach - Add the device to dma_buf's attachments list; optionally,
+ * dma_buf_dynamic_attach - Add the device to dma_buf's attachments list; optionally,
* calls attach() of dma_buf_ops to allow device-specific attach functionality
- * @dmabuf: [in] buffer to attach device to.
- * @dev: [in] device to be attached.
+ * @dmabuf: [in] buffer to attach device to.
+ * @dev: [in] device to be attached.
+ * @dynamic_mapping: [in] calling convention for map/unmap
*
* Returns struct dma_buf_attachment pointer for this attachment. Attachments
* must be cleaned up by calling dma_buf_detach().
@@ -662,8 +665,9 @@ EXPORT_SYMBOL_GPL(dma_buf_put);
* accessible to @dev, and cannot be moved to a more suitable place. This is
* indicated with the error code -EBUSY.
*/
-struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
- struct device *dev)
+struct dma_buf_attachment *
+dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
+ bool dynamic_mapping)
{
struct dma_buf_attachment *attach;
int ret;
@@ -677,24 +681,68 @@ struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
attach->dev = dev;
attach->dmabuf = dmabuf;
-
- mutex_lock(&dmabuf->lock);
+ attach->dynamic_mapping = dynamic_mapping;
if (dmabuf->ops->attach) {
ret = dmabuf->ops->attach(dmabuf, attach);
if (ret)
goto err_attach;
}
+ dma_resv_lock(dmabuf->resv, NULL);
list_add(&attach->node, &dmabuf->attachments);
+ dma_resv_unlock(dmabuf->resv);
- mutex_unlock(&dmabuf->lock);
+ /* When either the importer or the exporter can't handle dynamic
+ * mappings we cache the mapping here to avoid issues with the
+ * reservation object lock.
+ */
+ if (dma_buf_attachment_is_dynamic(attach) !=
+ dma_buf_is_dynamic(dmabuf)) {
+ struct sg_table *sgt;
+
+ if (dma_buf_is_dynamic(attach->dmabuf))
+ dma_resv_lock(attach->dmabuf->resv, NULL);
+
+ sgt = dmabuf->ops->map_dma_buf(attach, DMA_BIDIRECTIONAL);
+ if (!sgt)
+ sgt = ERR_PTR(-ENOMEM);
+ if (IS_ERR(sgt)) {
+ ret = PTR_ERR(sgt);
+ goto err_unlock;
+ }
+ if (dma_buf_is_dynamic(attach->dmabuf))
+ dma_resv_unlock(attach->dmabuf->resv);
+ attach->sgt = sgt;
+ attach->dir = DMA_BIDIRECTIONAL;
+ }
return attach;
err_attach:
kfree(attach);
- mutex_unlock(&dmabuf->lock);
return ERR_PTR(ret);
+
+err_unlock:
+ if (dma_buf_is_dynamic(attach->dmabuf))
+ dma_resv_unlock(attach->dmabuf->resv);
+
+ dma_buf_detach(dmabuf, attach);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(dma_buf_dynamic_attach);
+
+/**
+ * dma_buf_attach - Wrapper for dma_buf_dynamic_attach
+ * @dmabuf: [in] buffer to attach device to.
+ * @dev: [in] device to be attached.
+ *
+ * Wrapper to call dma_buf_dynamic_attach() for drivers which still use a static
+ * mapping.
+ */
+struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
+ struct device *dev)
+{
+ return dma_buf_dynamic_attach(dmabuf, dev, false);
}
EXPORT_SYMBOL_GPL(dma_buf_attach);
@@ -711,15 +759,22 @@ void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
if (WARN_ON(!dmabuf || !attach))
return;
- if (attach->sgt)
+ if (attach->sgt) {
+ if (dma_buf_is_dynamic(attach->dmabuf))
+ dma_resv_lock(attach->dmabuf->resv, NULL);
+
dmabuf->ops->unmap_dma_buf(attach, attach->sgt, attach->dir);
- mutex_lock(&dmabuf->lock);
+ if (dma_buf_is_dynamic(attach->dmabuf))
+ dma_resv_unlock(attach->dmabuf->resv);
+ }
+
+ dma_resv_lock(dmabuf->resv, NULL);
list_del(&attach->node);
+ dma_resv_unlock(dmabuf->resv);
if (dmabuf->ops->detach)
dmabuf->ops->detach(dmabuf, attach);
- mutex_unlock(&dmabuf->lock);
kfree(attach);
}
EXPORT_SYMBOL_GPL(dma_buf_detach);
@@ -749,6 +804,9 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
if (WARN_ON(!attach || !attach->dmabuf))
return ERR_PTR(-EINVAL);
+ if (dma_buf_attachment_is_dynamic(attach))
+ dma_resv_assert_held(attach->dmabuf->resv);
+
if (attach->sgt) {
/*
* Two mappings with different directions for the same
@@ -761,6 +819,9 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
return attach->sgt;
}
+ if (dma_buf_is_dynamic(attach->dmabuf))
+ dma_resv_assert_held(attach->dmabuf->resv);
+
sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
if (!sg_table)
sg_table = ERR_PTR(-ENOMEM);
@@ -793,9 +854,15 @@ void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
if (WARN_ON(!attach || !attach->dmabuf || !sg_table))
return;
+ if (dma_buf_attachment_is_dynamic(attach))
+ dma_resv_assert_held(attach->dmabuf->resv);
+
if (attach->sgt == sg_table)
return;
+ if (dma_buf_is_dynamic(attach->dmabuf))
+ dma_resv_assert_held(attach->dmabuf->resv);
+
attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
}
EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
@@ -1171,13 +1238,10 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
"size", "flags", "mode", "count", "ino");
list_for_each_entry(buf_obj, &db_list.head, list_node) {
- ret = mutex_lock_interruptible(&buf_obj->lock);
- if (ret) {
- seq_puts(s,
- "\tERROR locking buffer object: skipping\n");
- continue;
- }
+ ret = dma_resv_lock_interruptible(buf_obj->resv, NULL);
+ if (ret)
+ goto error_unlock;
seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%s\t%08lu\t%s\n",
buf_obj->size,
@@ -1223,19 +1287,23 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
seq_printf(s, "\t%s\n", dev_name(attach_obj->dev));
attach_count++;
}
+ dma_resv_unlock(buf_obj->resv);
seq_printf(s, "Total %d devices attached\n\n",
attach_count);
count++;
size += buf_obj->size;
- mutex_unlock(&buf_obj->lock);
}
seq_printf(s, "\nTotal %d objects, %zu bytes\n", count, size);
mutex_unlock(&db_list.lock);
return 0;
+
+error_unlock:
+ mutex_unlock(&db_list.lock);
+ return ret;
}
DEFINE_SHOW_ATTRIBUTE(dma_buf_debug);
diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c
index 2c136aee3e79..052a41e2451c 100644
--- a/drivers/dma-buf/dma-fence.c
+++ b/drivers/dma-buf/dma-fence.c
@@ -273,6 +273,30 @@ void dma_fence_free(struct dma_fence *fence)
}
EXPORT_SYMBOL(dma_fence_free);
+static bool __dma_fence_enable_signaling(struct dma_fence *fence)
+{
+ bool was_set;
+
+ lockdep_assert_held(fence->lock);
+
+ was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
+ &fence->flags);
+
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+ return false;
+
+ if (!was_set && fence->ops->enable_signaling) {
+ trace_dma_fence_enable_signal(fence);
+
+ if (!fence->ops->enable_signaling(fence)) {
+ dma_fence_signal_locked(fence);
+ return false;
+ }
+ }
+
+ return true;
+}
+
/**
* dma_fence_enable_sw_signaling - enable signaling on fence
* @fence: the fence to enable
@@ -285,19 +309,12 @@ void dma_fence_enable_sw_signaling(struct dma_fence *fence)
{
unsigned long flags;
- if (!test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
- &fence->flags) &&
- !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) &&
- fence->ops->enable_signaling) {
- trace_dma_fence_enable_signal(fence);
-
- spin_lock_irqsave(fence->lock, flags);
-
- if (!fence->ops->enable_signaling(fence))
- dma_fence_signal_locked(fence);
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+ return;
- spin_unlock_irqrestore(fence->lock, flags);
- }
+ spin_lock_irqsave(fence->lock, flags);
+ __dma_fence_enable_signaling(fence);
+ spin_unlock_irqrestore(fence->lock, flags);
}
EXPORT_SYMBOL(dma_fence_enable_sw_signaling);
@@ -331,7 +348,6 @@ int dma_fence_add_callback(struct dma_fence *fence, struct dma_fence_cb *cb,
{
unsigned long flags;
int ret = 0;
- bool was_set;
if (WARN_ON(!fence || !func))
return -EINVAL;
@@ -343,25 +359,14 @@ int dma_fence_add_callback(struct dma_fence *fence, struct dma_fence_cb *cb,
spin_lock_irqsave(fence->lock, flags);
- was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
- &fence->flags);
-
- if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
- ret = -ENOENT;
- else if (!was_set && fence->ops->enable_signaling) {
- trace_dma_fence_enable_signal(fence);
-
- if (!fence->ops->enable_signaling(fence)) {
- dma_fence_signal_locked(fence);
- ret = -ENOENT;
- }
- }
-
- if (!ret) {
+ if (__dma_fence_enable_signaling(fence)) {
cb->func = func;
list_add_tail(&cb->node, &fence->cb_list);
- } else
+ } else {
INIT_LIST_HEAD(&cb->node);
+ ret = -ENOENT;
+ }
+
spin_unlock_irqrestore(fence->lock, flags);
return ret;
@@ -461,7 +466,6 @@ dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout)
struct default_wait_cb cb;
unsigned long flags;
signed long ret = timeout ? timeout : 1;
- bool was_set;
if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
return ret;
@@ -473,21 +477,9 @@ dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout)
goto out;
}
- was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
- &fence->flags);
-
- if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+ if (!__dma_fence_enable_signaling(fence))
goto out;
- if (!was_set && fence->ops->enable_signaling) {
- trace_dma_fence_enable_signal(fence);
-
- if (!fence->ops->enable_signaling(fence)) {
- dma_fence_signal_locked(fence);
- goto out;
- }
- }
-
if (!timeout) {
ret = 0;
goto out;
diff --git a/drivers/dma-buf/sw_sync.c b/drivers/dma-buf/sw_sync.c
index 6713cfb1995c..348b3a9170fa 100644
--- a/drivers/dma-buf/sw_sync.c
+++ b/drivers/dma-buf/sw_sync.c
@@ -408,5 +408,5 @@ const struct file_operations sw_sync_debugfs_fops = {
.open = sw_sync_debugfs_open,
.release = sw_sync_debugfs_release,
.unlocked_ioctl = sw_sync_ioctl,
- .compat_ioctl = sw_sync_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
};
diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c
index 25c5c071645b..76fb072c22dc 100644
--- a/drivers/dma-buf/sync_file.c
+++ b/drivers/dma-buf/sync_file.c
@@ -480,5 +480,5 @@ static const struct file_operations sync_file_fops = {
.release = sync_file_release,
.poll = sync_file_poll,
.unlocked_ioctl = sync_file_ioctl,
- .compat_ioctl = sync_file_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
};
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 7af874b69ffb..6fa1eba9d477 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -15,19 +15,19 @@ menuconfig DMADEVICES
be empty in some cases.
config DMADEVICES_DEBUG
- bool "DMA Engine debugging"
- depends on DMADEVICES != n
- help
- This is an option for use by developers; most people should
- say N here. This enables DMA engine core and driver debugging.
+ bool "DMA Engine debugging"
+ depends on DMADEVICES != n
+ help
+ This is an option for use by developers; most people should
+ say N here. This enables DMA engine core and driver debugging.
config DMADEVICES_VDEBUG
- bool "DMA Engine verbose debugging"
- depends on DMADEVICES_DEBUG != n
- help
- This is an option for use by developers; most people should
- say N here. This enables deeper (more verbose) debugging of
- the DMA engine core and drivers.
+ bool "DMA Engine verbose debugging"
+ depends on DMADEVICES_DEBUG != n
+ help
+ This is an option for use by developers; most people should
+ say N here. This enables deeper (more verbose) debugging of
+ the DMA engine core and drivers.
if DMADEVICES
@@ -215,28 +215,28 @@ config FSL_EDMA
This module can be found on Freescale Vybrid and LS-1 SoCs.
config FSL_QDMA
- tristate "NXP Layerscape qDMA engine support"
- depends on ARM || ARM64
- select DMA_ENGINE
- select DMA_VIRTUAL_CHANNELS
- select DMA_ENGINE_RAID
- select ASYNC_TX_ENABLE_CHANNEL_SWITCH
- help
- Support the NXP Layerscape qDMA engine with command queue and legacy mode.
- Channel virtualization is supported through enqueuing of DMA jobs to,
- or dequeuing DMA jobs from, different work queues.
- This module can be found on NXP Layerscape SoCs.
+ tristate "NXP Layerscape qDMA engine support"
+ depends on ARM || ARM64
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ select DMA_ENGINE_RAID
+ select ASYNC_TX_ENABLE_CHANNEL_SWITCH
+ help
+ Support the NXP Layerscape qDMA engine with command queue and legacy mode.
+ Channel virtualization is supported through enqueuing of DMA jobs to,
+ or dequeuing DMA jobs from, different work queues.
+ This module can be found on NXP Layerscape SoCs.
The qdma driver only work on SoCs with a DPAA hardware block.
config FSL_RAID
- tristate "Freescale RAID engine Support"
- depends on FSL_SOC && !ASYNC_TX_ENABLE_CHANNEL_SWITCH
- select DMA_ENGINE
- select DMA_ENGINE_RAID
- ---help---
- Enable support for Freescale RAID Engine. RAID Engine is
- available on some QorIQ SoCs (like P5020/P5040). It has
- the capability to offload memcpy, xor and pq computation
+ tristate "Freescale RAID engine Support"
+ depends on FSL_SOC && !ASYNC_TX_ENABLE_CHANNEL_SWITCH
+ select DMA_ENGINE
+ select DMA_ENGINE_RAID
+ ---help---
+ Enable support for Freescale RAID Engine. RAID Engine is
+ available on some QorIQ SoCs (like P5020/P5040). It has
+ the capability to offload memcpy, xor and pq computation
for raid5/6.
config IMG_MDC_DMA
@@ -342,6 +342,26 @@ config MCF_EDMA
minimal intervention from a host processor.
This module can be found on Freescale ColdFire mcf5441x SoCs.
+config MILBEAUT_HDMAC
+ tristate "Milbeaut AHB DMA support"
+ depends on ARCH_MILBEAUT || COMPILE_TEST
+ depends on OF
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ help
+ Say yes here to support the Socionext Milbeaut
+ HDMAC device.
+
+config MILBEAUT_XDMAC
+ tristate "Milbeaut AXI DMA support"
+ depends on ARCH_MILBEAUT || COMPILE_TEST
+ depends on OF
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ help
+ Say yes here to support the Socionext Milbeaut
+ XDMAC device.
+
config MMP_PDMA
bool "MMP PDMA support"
depends on ARCH_MMP || ARCH_PXA || COMPILE_TEST
@@ -635,6 +655,10 @@ config XILINX_DMA
destination address.
AXI DMA engine provides high-bandwidth one dimensional direct
memory access between memory and AXI4-Stream target peripherals.
+ AXI MCDMA engine provides high-bandwidth direct memory access
+ between memory and AXI4-Stream target peripherals. It provides
+ the scatter gather interface with multiple channels independent
+ configuration support.
config XILINX_ZYNQMP_DMA
tristate "Xilinx ZynqMP DMA Engine"
@@ -665,10 +689,14 @@ source "drivers/dma/dw-edma/Kconfig"
source "drivers/dma/hsu/Kconfig"
+source "drivers/dma/sf-pdma/Kconfig"
+
source "drivers/dma/sh/Kconfig"
source "drivers/dma/ti/Kconfig"
+source "drivers/dma/fsl-dpaa2-qdma/Kconfig"
+
# clients
comment "DMA Clients"
depends on DMA_ENGINE
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index f5ce8665e944..42d7e2fc64fa 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -45,6 +45,8 @@ obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o
obj-$(CONFIG_INTEL_MIC_X100_DMA) += mic_x100_dma.o
obj-$(CONFIG_K3_DMA) += k3dma.o
obj-$(CONFIG_LPC18XX_DMAMUX) += lpc18xx-dmamux.o
+obj-$(CONFIG_MILBEAUT_HDMAC) += milbeaut-hdmac.o
+obj-$(CONFIG_MILBEAUT_XDMAC) += milbeaut-xdmac.o
obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o
obj-$(CONFIG_MMP_TDMA) += mmp_tdma.o
obj-$(CONFIG_MOXART_DMA) += moxart-dma.o
@@ -60,6 +62,7 @@ obj-$(CONFIG_PL330_DMA) += pl330.o
obj-$(CONFIG_PPC_BESTCOMM) += bestcomm/
obj-$(CONFIG_PXA_DMA) += pxa_dma.o
obj-$(CONFIG_RENESAS_DMA) += sh/
+obj-$(CONFIG_SF_PDMA) += sf-pdma/
obj-$(CONFIG_SIRF_DMA) += sirf-dma.o
obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o
obj-$(CONFIG_STM32_DMA) += stm32-dma.o
@@ -75,6 +78,7 @@ obj-$(CONFIG_UNIPHIER_MDMAC) += uniphier-mdmac.o
obj-$(CONFIG_XGENE_DMA) += xgene-dma.o
obj-$(CONFIG_ZX_DMA) += zx_dma.o
obj-$(CONFIG_ST_FDMA) += st_fdma.o
+obj-$(CONFIG_FSL_DPAA2_QDMA) += fsl-dpaa2-qdma/
obj-y += mediatek/
obj-y += qcom/
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index b58ac720d9a1..f71c9f77d405 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -1957,21 +1957,16 @@ static int atmel_xdmac_resume(struct device *dev)
static int at_xdmac_probe(struct platform_device *pdev)
{
- struct resource *res;
struct at_xdmac *atxdmac;
int irq, size, nr_channels, i, ret;
void __iomem *base;
u32 reg;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -EINVAL;
-
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c
index cafb1cc065bb..fa626acdc9b9 100644
--- a/drivers/dma/dma-jz4780.c
+++ b/drivers/dma/dma-jz4780.c
@@ -858,13 +858,7 @@ static int jz4780_dma_probe(struct platform_device *pdev)
jzdma->soc_data = soc_data;
platform_set_drvdata(pdev, jzdma);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(dev, "failed to get I/O memory\n");
- return -EINVAL;
- }
-
- jzdma->chn_base = devm_ioremap_resource(dev, res);
+ jzdma->chn_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(jzdma->chn_base))
return PTR_ERR(jzdma->chn_base);
@@ -987,6 +981,7 @@ static int jz4780_dma_remove(struct platform_device *pdev)
of_dma_controller_free(pdev->dev.of_node);
+ clk_disable_unprepare(jzdma->clk);
free_irq(jzdma->irq, jzdma);
for (i = 0; i < jzdma->soc_data->nb_channels; i++)
@@ -1019,11 +1014,18 @@ static const struct jz4780_dma_soc_data jz4780_dma_soc_data = {
.flags = JZ_SOC_DATA_ALLOW_LEGACY_DT | JZ_SOC_DATA_PROGRAMMABLE_DMA,
};
+static const struct jz4780_dma_soc_data x1000_dma_soc_data = {
+ .nb_channels = 8,
+ .transfer_ord_max = 7,
+ .flags = JZ_SOC_DATA_PROGRAMMABLE_DMA,
+};
+
static const struct of_device_id jz4780_dma_dt_match[] = {
{ .compatible = "ingenic,jz4740-dma", .data = &jz4740_dma_soc_data },
{ .compatible = "ingenic,jz4725b-dma", .data = &jz4725b_dma_soc_data },
{ .compatible = "ingenic,jz4770-dma", .data = &jz4770_dma_soc_data },
{ .compatible = "ingenic,jz4780-dma", .data = &jz4780_dma_soc_data },
+ { .compatible = "ingenic,x1000-dma", .data = &x1000_dma_soc_data },
{},
};
MODULE_DEVICE_TABLE(of, jz4780_dma_dt_match);
diff --git a/drivers/dma/dw/platform.c b/drivers/dma/dw/platform.c
index c90c798e5ec3..0585d749d935 100644
--- a/drivers/dma/dw/platform.c
+++ b/drivers/dma/dw/platform.c
@@ -66,7 +66,7 @@ static int dw_probe(struct platform_device *pdev)
data->chip = chip;
- chip->clk = devm_clk_get(chip->dev, "hclk");
+ chip->clk = devm_clk_get_optional(chip->dev, "hclk");
if (IS_ERR(chip->clk))
return PTR_ERR(chip->clk);
err = clk_prepare_enable(chip->clk);
diff --git a/drivers/dma/fsl-dpaa2-qdma/Kconfig b/drivers/dma/fsl-dpaa2-qdma/Kconfig
new file mode 100644
index 000000000000..258ed6be934d
--- /dev/null
+++ b/drivers/dma/fsl-dpaa2-qdma/Kconfig
@@ -0,0 +1,9 @@
+menuconfig FSL_DPAA2_QDMA
+ tristate "NXP DPAA2 QDMA"
+ depends on ARM64
+ depends on FSL_MC_BUS && FSL_MC_DPIO
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ help
+ NXP Data Path Acceleration Architecture 2 QDMA driver,
+ using the NXP MC bus driver.
diff --git a/drivers/dma/fsl-dpaa2-qdma/Makefile b/drivers/dma/fsl-dpaa2-qdma/Makefile
new file mode 100644
index 000000000000..c1d0226f2bd7
--- /dev/null
+++ b/drivers/dma/fsl-dpaa2-qdma/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+# Makefile for the NXP DPAA2 qDMA controllers
+obj-$(CONFIG_FSL_DPAA2_QDMA) += dpaa2-qdma.o dpdmai.o
diff --git a/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c b/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c
new file mode 100644
index 000000000000..c70a7965f140
--- /dev/null
+++ b/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c
@@ -0,0 +1,825 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright 2019 NXP
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/dmapool.h>
+#include <linux/of_irq.h>
+#include <linux/iommu.h>
+#include <linux/sys_soc.h>
+#include <linux/fsl/mc.h>
+#include <soc/fsl/dpaa2-io.h>
+
+#include "../virt-dma.h"
+#include "dpdmai.h"
+#include "dpaa2-qdma.h"
+
+static bool smmu_disable = true;
+
+static struct dpaa2_qdma_chan *to_dpaa2_qdma_chan(struct dma_chan *chan)
+{
+ return container_of(chan, struct dpaa2_qdma_chan, vchan.chan);
+}
+
+static struct dpaa2_qdma_comp *to_fsl_qdma_comp(struct virt_dma_desc *vd)
+{
+ return container_of(vd, struct dpaa2_qdma_comp, vdesc);
+}
+
+static int dpaa2_qdma_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan);
+ struct dpaa2_qdma_engine *dpaa2_qdma = dpaa2_chan->qdma;
+ struct device *dev = &dpaa2_qdma->priv->dpdmai_dev->dev;
+
+ dpaa2_chan->fd_pool = dma_pool_create("fd_pool", dev,
+ sizeof(struct dpaa2_fd),
+ sizeof(struct dpaa2_fd), 0);
+ if (!dpaa2_chan->fd_pool)
+ goto err;
+
+ dpaa2_chan->fl_pool = dma_pool_create("fl_pool", dev,
+ sizeof(struct dpaa2_fl_entry),
+ sizeof(struct dpaa2_fl_entry), 0);
+ if (!dpaa2_chan->fl_pool)
+ goto err_fd;
+
+ dpaa2_chan->sdd_pool =
+ dma_pool_create("sdd_pool", dev,
+ sizeof(struct dpaa2_qdma_sd_d),
+ sizeof(struct dpaa2_qdma_sd_d), 0);
+ if (!dpaa2_chan->sdd_pool)
+ goto err_fl;
+
+ return dpaa2_qdma->desc_allocated++;
+err_fl:
+ dma_pool_destroy(dpaa2_chan->fl_pool);
+err_fd:
+ dma_pool_destroy(dpaa2_chan->fd_pool);
+err:
+ return -ENOMEM;
+}
+
+static void dpaa2_qdma_free_chan_resources(struct dma_chan *chan)
+{
+ struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan);
+ struct dpaa2_qdma_engine *dpaa2_qdma = dpaa2_chan->qdma;
+ unsigned long flags;
+
+ LIST_HEAD(head);
+
+ spin_lock_irqsave(&dpaa2_chan->vchan.lock, flags);
+ vchan_get_all_descriptors(&dpaa2_chan->vchan, &head);
+ spin_unlock_irqrestore(&dpaa2_chan->vchan.lock, flags);
+
+ vchan_dma_desc_free_list(&dpaa2_chan->vchan, &head);
+
+ dpaa2_dpdmai_free_comp(dpaa2_chan, &dpaa2_chan->comp_used);
+ dpaa2_dpdmai_free_comp(dpaa2_chan, &dpaa2_chan->comp_free);
+
+ dma_pool_destroy(dpaa2_chan->fd_pool);
+ dma_pool_destroy(dpaa2_chan->fl_pool);
+ dma_pool_destroy(dpaa2_chan->sdd_pool);
+ dpaa2_qdma->desc_allocated--;
+}
+
+/*
+ * Request a command descriptor for enqueue.
+ */
+static struct dpaa2_qdma_comp *
+dpaa2_qdma_request_desc(struct dpaa2_qdma_chan *dpaa2_chan)
+{
+ struct dpaa2_qdma_priv *qdma_priv = dpaa2_chan->qdma->priv;
+ struct device *dev = &qdma_priv->dpdmai_dev->dev;
+ struct dpaa2_qdma_comp *comp_temp = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dpaa2_chan->queue_lock, flags);
+ if (list_empty(&dpaa2_chan->comp_free)) {
+ spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags);
+ comp_temp = kzalloc(sizeof(*comp_temp), GFP_NOWAIT);
+ if (!comp_temp)
+ goto err;
+ comp_temp->fd_virt_addr =
+ dma_pool_alloc(dpaa2_chan->fd_pool, GFP_NOWAIT,
+ &comp_temp->fd_bus_addr);
+ if (!comp_temp->fd_virt_addr)
+ goto err_comp;
+
+ comp_temp->fl_virt_addr =
+ dma_pool_alloc(dpaa2_chan->fl_pool, GFP_NOWAIT,
+ &comp_temp->fl_bus_addr);
+ if (!comp_temp->fl_virt_addr)
+ goto err_fd_virt;
+
+ comp_temp->desc_virt_addr =
+ dma_pool_alloc(dpaa2_chan->sdd_pool, GFP_NOWAIT,
+ &comp_temp->desc_bus_addr);
+ if (!comp_temp->desc_virt_addr)
+ goto err_fl_virt;
+
+ comp_temp->qchan = dpaa2_chan;
+ return comp_temp;
+ }
+
+ comp_temp = list_first_entry(&dpaa2_chan->comp_free,
+ struct dpaa2_qdma_comp, list);
+ list_del(&comp_temp->list);
+ spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags);
+
+ comp_temp->qchan = dpaa2_chan;
+
+ return comp_temp;
+
+err_fl_virt:
+ dma_pool_free(dpaa2_chan->fl_pool,
+ comp_temp->fl_virt_addr,
+ comp_temp->fl_bus_addr);
+err_fd_virt:
+ dma_pool_free(dpaa2_chan->fd_pool,
+ comp_temp->fd_virt_addr,
+ comp_temp->fd_bus_addr);
+err_comp:
+ kfree(comp_temp);
+err:
+ dev_err(dev, "Failed to request descriptor\n");
+ return NULL;
+}
+
+static void
+dpaa2_qdma_populate_fd(u32 format, struct dpaa2_qdma_comp *dpaa2_comp)
+{
+ struct dpaa2_fd *fd;
+
+ fd = dpaa2_comp->fd_virt_addr;
+ memset(fd, 0, sizeof(struct dpaa2_fd));
+
+ /* fd populated */
+ dpaa2_fd_set_addr(fd, dpaa2_comp->fl_bus_addr);
+
+ /*
+ * Bypass memory translation, Frame list format, short length disable
+ * we need to disable BMT if fsl-mc use iova addr
+ */
+ if (smmu_disable)
+ dpaa2_fd_set_bpid(fd, QMAN_FD_BMT_ENABLE);
+ dpaa2_fd_set_format(fd, QMAN_FD_FMT_ENABLE | QMAN_FD_SL_DISABLE);
+
+ dpaa2_fd_set_frc(fd, format | QDMA_SER_CTX);
+}
+
+/* first frame list for descriptor buffer */
+static void
+dpaa2_qdma_populate_first_framel(struct dpaa2_fl_entry *f_list,
+ struct dpaa2_qdma_comp *dpaa2_comp,
+ bool wrt_changed)
+{
+ struct dpaa2_qdma_sd_d *sdd;
+
+ sdd = dpaa2_comp->desc_virt_addr;
+ memset(sdd, 0, 2 * (sizeof(*sdd)));
+
+ /* source descriptor CMD */
+ sdd->cmd = cpu_to_le32(QDMA_SD_CMD_RDTTYPE_COHERENT);
+ sdd++;
+
+ /* dest descriptor CMD */
+ if (wrt_changed)
+ sdd->cmd = cpu_to_le32(LX2160_QDMA_DD_CMD_WRTTYPE_COHERENT);
+ else
+ sdd->cmd = cpu_to_le32(QDMA_DD_CMD_WRTTYPE_COHERENT);
+
+ memset(f_list, 0, sizeof(struct dpaa2_fl_entry));
+
+ /* first frame list to source descriptor */
+ dpaa2_fl_set_addr(f_list, dpaa2_comp->desc_bus_addr);
+ dpaa2_fl_set_len(f_list, 0x20);
+ dpaa2_fl_set_format(f_list, QDMA_FL_FMT_SBF | QDMA_FL_SL_LONG);
+
+ /* bypass memory translation */
+ if (smmu_disable)
+ f_list->bpid = cpu_to_le16(QDMA_FL_BMT_ENABLE);
+}
+
+/* source and destination frame list */
+static void
+dpaa2_qdma_populate_frames(struct dpaa2_fl_entry *f_list,
+ dma_addr_t dst, dma_addr_t src,
+ size_t len, uint8_t fmt)
+{
+ /* source frame list to source buffer */
+ memset(f_list, 0, sizeof(struct dpaa2_fl_entry));
+
+ dpaa2_fl_set_addr(f_list, src);
+ dpaa2_fl_set_len(f_list, len);
+
+ /* single buffer frame or scatter gather frame */
+ dpaa2_fl_set_format(f_list, (fmt | QDMA_FL_SL_LONG));
+
+ /* bypass memory translation */
+ if (smmu_disable)
+ f_list->bpid = cpu_to_le16(QDMA_FL_BMT_ENABLE);
+
+ f_list++;
+
+ /* destination frame list to destination buffer */
+ memset(f_list, 0, sizeof(struct dpaa2_fl_entry));
+
+ dpaa2_fl_set_addr(f_list, dst);
+ dpaa2_fl_set_len(f_list, len);
+ dpaa2_fl_set_format(f_list, (fmt | QDMA_FL_SL_LONG));
+ /* single buffer frame or scatter gather frame */
+ dpaa2_fl_set_final(f_list, QDMA_FL_F);
+ /* bypass memory translation */
+ if (smmu_disable)
+ f_list->bpid = cpu_to_le16(QDMA_FL_BMT_ENABLE);
+}
+
+static struct dma_async_tx_descriptor
+*dpaa2_qdma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst,
+ dma_addr_t src, size_t len, ulong flags)
+{
+ struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan);
+ struct dpaa2_qdma_engine *dpaa2_qdma;
+ struct dpaa2_qdma_comp *dpaa2_comp;
+ struct dpaa2_fl_entry *f_list;
+ bool wrt_changed;
+
+ dpaa2_qdma = dpaa2_chan->qdma;
+ dpaa2_comp = dpaa2_qdma_request_desc(dpaa2_chan);
+ if (!dpaa2_comp)
+ return NULL;
+
+ wrt_changed = (bool)dpaa2_qdma->qdma_wrtype_fixup;
+
+ /* populate Frame descriptor */
+ dpaa2_qdma_populate_fd(QDMA_FD_LONG_FORMAT, dpaa2_comp);
+
+ f_list = dpaa2_comp->fl_virt_addr;
+
+ /* first frame list for descriptor buffer (logn format) */
+ dpaa2_qdma_populate_first_framel(f_list, dpaa2_comp, wrt_changed);
+
+ f_list++;
+
+ dpaa2_qdma_populate_frames(f_list, dst, src, len, QDMA_FL_FMT_SBF);
+
+ return vchan_tx_prep(&dpaa2_chan->vchan, &dpaa2_comp->vdesc, flags);
+}
+
+static void dpaa2_qdma_issue_pending(struct dma_chan *chan)
+{
+ struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan);
+ struct dpaa2_qdma_comp *dpaa2_comp;
+ struct virt_dma_desc *vdesc;
+ struct dpaa2_fd *fd;
+ unsigned long flags;
+ int err;
+
+ spin_lock_irqsave(&dpaa2_chan->queue_lock, flags);
+ spin_lock(&dpaa2_chan->vchan.lock);
+ if (vchan_issue_pending(&dpaa2_chan->vchan)) {
+ vdesc = vchan_next_desc(&dpaa2_chan->vchan);
+ if (!vdesc)
+ goto err_enqueue;
+ dpaa2_comp = to_fsl_qdma_comp(vdesc);
+
+ fd = dpaa2_comp->fd_virt_addr;
+
+ list_del(&vdesc->node);
+ list_add_tail(&dpaa2_comp->list, &dpaa2_chan->comp_used);
+
+ err = dpaa2_io_service_enqueue_fq(NULL, dpaa2_chan->fqid, fd);
+ if (err) {
+ list_del(&dpaa2_comp->list);
+ list_add_tail(&dpaa2_comp->list,
+ &dpaa2_chan->comp_free);
+ }
+ }
+err_enqueue:
+ spin_unlock(&dpaa2_chan->vchan.lock);
+ spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags);
+}
+
+static int __cold dpaa2_qdma_setup(struct fsl_mc_device *ls_dev)
+{
+ struct dpaa2_qdma_priv_per_prio *ppriv;
+ struct device *dev = &ls_dev->dev;
+ struct dpaa2_qdma_priv *priv;
+ u8 prio_def = DPDMAI_PRIO_NUM;
+ int err = -EINVAL;
+ int i;
+
+ priv = dev_get_drvdata(dev);
+
+ priv->dev = dev;
+ priv->dpqdma_id = ls_dev->obj_desc.id;
+
+ /* Get the handle for the DPDMAI this interface is associate with */
+ err = dpdmai_open(priv->mc_io, 0, priv->dpqdma_id, &ls_dev->mc_handle);
+ if (err) {
+ dev_err(dev, "dpdmai_open() failed\n");
+ return err;
+ }
+
+ dev_dbg(dev, "Opened dpdmai object successfully\n");
+
+ err = dpdmai_get_attributes(priv->mc_io, 0, ls_dev->mc_handle,
+ &priv->dpdmai_attr);
+ if (err) {
+ dev_err(dev, "dpdmai_get_attributes() failed\n");
+ goto exit;
+ }
+
+ if (priv->dpdmai_attr.version.major > DPDMAI_VER_MAJOR) {
+ dev_err(dev, "DPDMAI major version mismatch\n"
+ "Found %u.%u, supported version is %u.%u\n",
+ priv->dpdmai_attr.version.major,
+ priv->dpdmai_attr.version.minor,
+ DPDMAI_VER_MAJOR, DPDMAI_VER_MINOR);
+ goto exit;
+ }
+
+ if (priv->dpdmai_attr.version.minor > DPDMAI_VER_MINOR) {
+ dev_err(dev, "DPDMAI minor version mismatch\n"
+ "Found %u.%u, supported version is %u.%u\n",
+ priv->dpdmai_attr.version.major,
+ priv->dpdmai_attr.version.minor,
+ DPDMAI_VER_MAJOR, DPDMAI_VER_MINOR);
+ goto exit;
+ }
+
+ priv->num_pairs = min(priv->dpdmai_attr.num_of_priorities, prio_def);
+ ppriv = kcalloc(priv->num_pairs, sizeof(*ppriv), GFP_KERNEL);
+ if (!ppriv) {
+ err = -ENOMEM;
+ goto exit;
+ }
+ priv->ppriv = ppriv;
+
+ for (i = 0; i < priv->num_pairs; i++) {
+ err = dpdmai_get_rx_queue(priv->mc_io, 0, ls_dev->mc_handle,
+ i, &priv->rx_queue_attr[i]);
+ if (err) {
+ dev_err(dev, "dpdmai_get_rx_queue() failed\n");
+ goto exit;
+ }
+ ppriv->rsp_fqid = priv->rx_queue_attr[i].fqid;
+
+ err = dpdmai_get_tx_queue(priv->mc_io, 0, ls_dev->mc_handle,
+ i, &priv->tx_fqid[i]);
+ if (err) {
+ dev_err(dev, "dpdmai_get_tx_queue() failed\n");
+ goto exit;
+ }
+ ppriv->req_fqid = priv->tx_fqid[i];
+ ppriv->prio = i;
+ ppriv->priv = priv;
+ ppriv++;
+ }
+
+ return 0;
+exit:
+ dpdmai_close(priv->mc_io, 0, ls_dev->mc_handle);
+ return err;
+}
+
+static void dpaa2_qdma_fqdan_cb(struct dpaa2_io_notification_ctx *ctx)
+{
+ struct dpaa2_qdma_priv_per_prio *ppriv = container_of(ctx,
+ struct dpaa2_qdma_priv_per_prio, nctx);
+ struct dpaa2_qdma_comp *dpaa2_comp, *_comp_tmp;
+ struct dpaa2_qdma_priv *priv = ppriv->priv;
+ u32 n_chans = priv->dpaa2_qdma->n_chans;
+ struct dpaa2_qdma_chan *qchan;
+ const struct dpaa2_fd *fd_eq;
+ const struct dpaa2_fd *fd;
+ struct dpaa2_dq *dq;
+ int is_last = 0;
+ int found;
+ u8 status;
+ int err;
+ int i;
+
+ do {
+ err = dpaa2_io_service_pull_fq(NULL, ppriv->rsp_fqid,
+ ppriv->store);
+ } while (err);
+
+ while (!is_last) {
+ do {
+ dq = dpaa2_io_store_next(ppriv->store, &is_last);
+ } while (!is_last && !dq);
+ if (!dq) {
+ dev_err(priv->dev, "FQID returned no valid frames!\n");
+ continue;
+ }
+
+ /* obtain FD and process the error */
+ fd = dpaa2_dq_fd(dq);
+
+ status = dpaa2_fd_get_ctrl(fd) & 0xff;
+ if (status)
+ dev_err(priv->dev, "FD error occurred\n");
+ found = 0;
+ for (i = 0; i < n_chans; i++) {
+ qchan = &priv->dpaa2_qdma->chans[i];
+ spin_lock(&qchan->queue_lock);
+ if (list_empty(&qchan->comp_used)) {
+ spin_unlock(&qchan->queue_lock);
+ continue;
+ }
+ list_for_each_entry_safe(dpaa2_comp, _comp_tmp,
+ &qchan->comp_used, list) {
+ fd_eq = dpaa2_comp->fd_virt_addr;
+
+ if (le64_to_cpu(fd_eq->simple.addr) ==
+ le64_to_cpu(fd->simple.addr)) {
+ spin_lock(&qchan->vchan.lock);
+ vchan_cookie_complete(&
+ dpaa2_comp->vdesc);
+ spin_unlock(&qchan->vchan.lock);
+ found = 1;
+ break;
+ }
+ }
+ spin_unlock(&qchan->queue_lock);
+ if (found)
+ break;
+ }
+ }
+
+ dpaa2_io_service_rearm(NULL, ctx);
+}
+
+static int __cold dpaa2_qdma_dpio_setup(struct dpaa2_qdma_priv *priv)
+{
+ struct dpaa2_qdma_priv_per_prio *ppriv;
+ struct device *dev = priv->dev;
+ int err = -EINVAL;
+ int i, num;
+
+ num = priv->num_pairs;
+ ppriv = priv->ppriv;
+ for (i = 0; i < num; i++) {
+ ppriv->nctx.is_cdan = 0;
+ ppriv->nctx.desired_cpu = DPAA2_IO_ANY_CPU;
+ ppriv->nctx.id = ppriv->rsp_fqid;
+ ppriv->nctx.cb = dpaa2_qdma_fqdan_cb;
+ err = dpaa2_io_service_register(NULL, &ppriv->nctx, dev);
+ if (err) {
+ dev_err(dev, "Notification register failed\n");
+ goto err_service;
+ }
+
+ ppriv->store =
+ dpaa2_io_store_create(DPAA2_QDMA_STORE_SIZE, dev);
+ if (!ppriv->store) {
+ dev_err(dev, "dpaa2_io_store_create() failed\n");
+ goto err_store;
+ }
+
+ ppriv++;
+ }
+ return 0;
+
+err_store:
+ dpaa2_io_service_deregister(NULL, &ppriv->nctx, dev);
+err_service:
+ ppriv--;
+ while (ppriv >= priv->ppriv) {
+ dpaa2_io_service_deregister(NULL, &ppriv->nctx, dev);
+ dpaa2_io_store_destroy(ppriv->store);
+ ppriv--;
+ }
+ return err;
+}
+
+static void dpaa2_dpmai_store_free(struct dpaa2_qdma_priv *priv)
+{
+ struct dpaa2_qdma_priv_per_prio *ppriv = priv->ppriv;
+ int i;
+
+ for (i = 0; i < priv->num_pairs; i++) {
+ dpaa2_io_store_destroy(ppriv->store);
+ ppriv++;
+ }
+}
+
+static void dpaa2_dpdmai_dpio_free(struct dpaa2_qdma_priv *priv)
+{
+ struct dpaa2_qdma_priv_per_prio *ppriv = priv->ppriv;
+ struct device *dev = priv->dev;
+ int i;
+
+ for (i = 0; i < priv->num_pairs; i++) {
+ dpaa2_io_service_deregister(NULL, &ppriv->nctx, dev);
+ ppriv++;
+ }
+}
+
+static int __cold dpaa2_dpdmai_bind(struct dpaa2_qdma_priv *priv)
+{
+ struct dpdmai_rx_queue_cfg rx_queue_cfg;
+ struct dpaa2_qdma_priv_per_prio *ppriv;
+ struct device *dev = priv->dev;
+ struct fsl_mc_device *ls_dev;
+ int i, num;
+ int err;
+
+ ls_dev = to_fsl_mc_device(dev);
+ num = priv->num_pairs;
+ ppriv = priv->ppriv;
+ for (i = 0; i < num; i++) {
+ rx_queue_cfg.options = DPDMAI_QUEUE_OPT_USER_CTX |
+ DPDMAI_QUEUE_OPT_DEST;
+ rx_queue_cfg.user_ctx = ppriv->nctx.qman64;
+ rx_queue_cfg.dest_cfg.dest_type = DPDMAI_DEST_DPIO;
+ rx_queue_cfg.dest_cfg.dest_id = ppriv->nctx.dpio_id;
+ rx_queue_cfg.dest_cfg.priority = ppriv->prio;
+ err = dpdmai_set_rx_queue(priv->mc_io, 0, ls_dev->mc_handle,
+ rx_queue_cfg.dest_cfg.priority,
+ &rx_queue_cfg);
+ if (err) {
+ dev_err(dev, "dpdmai_set_rx_queue() failed\n");
+ return err;
+ }
+
+ ppriv++;
+ }
+
+ return 0;
+}
+
+static int __cold dpaa2_dpdmai_dpio_unbind(struct dpaa2_qdma_priv *priv)
+{
+ struct dpaa2_qdma_priv_per_prio *ppriv = priv->ppriv;
+ struct device *dev = priv->dev;
+ struct fsl_mc_device *ls_dev;
+ int err = 0;
+ int i;
+
+ ls_dev = to_fsl_mc_device(dev);
+
+ for (i = 0; i < priv->num_pairs; i++) {
+ ppriv->nctx.qman64 = 0;
+ ppriv->nctx.dpio_id = 0;
+ ppriv++;
+ }
+
+ err = dpdmai_reset(priv->mc_io, 0, ls_dev->mc_handle);
+ if (err)
+ dev_err(dev, "dpdmai_reset() failed\n");
+
+ return err;
+}
+
+static void dpaa2_dpdmai_free_comp(struct dpaa2_qdma_chan *qchan,
+ struct list_head *head)
+{
+ struct dpaa2_qdma_comp *comp_tmp, *_comp_tmp;
+ unsigned long flags;
+
+ list_for_each_entry_safe(comp_tmp, _comp_tmp,
+ head, list) {
+ spin_lock_irqsave(&qchan->queue_lock, flags);
+ list_del(&comp_tmp->list);
+ spin_unlock_irqrestore(&qchan->queue_lock, flags);
+ dma_pool_free(qchan->fd_pool,
+ comp_tmp->fd_virt_addr,
+ comp_tmp->fd_bus_addr);
+ dma_pool_free(qchan->fl_pool,
+ comp_tmp->fl_virt_addr,
+ comp_tmp->fl_bus_addr);
+ dma_pool_free(qchan->sdd_pool,
+ comp_tmp->desc_virt_addr,
+ comp_tmp->desc_bus_addr);
+ kfree(comp_tmp);
+ }
+}
+
+static void dpaa2_dpdmai_free_channels(struct dpaa2_qdma_engine *dpaa2_qdma)
+{
+ struct dpaa2_qdma_chan *qchan;
+ int num, i;
+
+ num = dpaa2_qdma->n_chans;
+ for (i = 0; i < num; i++) {
+ qchan = &dpaa2_qdma->chans[i];
+ dpaa2_dpdmai_free_comp(qchan, &qchan->comp_used);
+ dpaa2_dpdmai_free_comp(qchan, &qchan->comp_free);
+ dma_pool_destroy(qchan->fd_pool);
+ dma_pool_destroy(qchan->fl_pool);
+ dma_pool_destroy(qchan->sdd_pool);
+ }
+}
+
+static void dpaa2_qdma_free_desc(struct virt_dma_desc *vdesc)
+{
+ struct dpaa2_qdma_comp *dpaa2_comp;
+ struct dpaa2_qdma_chan *qchan;
+ unsigned long flags;
+
+ dpaa2_comp = to_fsl_qdma_comp(vdesc);
+ qchan = dpaa2_comp->qchan;
+ spin_lock_irqsave(&qchan->queue_lock, flags);
+ list_del(&dpaa2_comp->list);
+ list_add_tail(&dpaa2_comp->list, &qchan->comp_free);
+ spin_unlock_irqrestore(&qchan->queue_lock, flags);
+}
+
+static int dpaa2_dpdmai_init_channels(struct dpaa2_qdma_engine *dpaa2_qdma)
+{
+ struct dpaa2_qdma_priv *priv = dpaa2_qdma->priv;
+ struct dpaa2_qdma_chan *dpaa2_chan;
+ int num = priv->num_pairs;
+ int i;
+
+ INIT_LIST_HEAD(&dpaa2_qdma->dma_dev.channels);
+ for (i = 0; i < dpaa2_qdma->n_chans; i++) {
+ dpaa2_chan = &dpaa2_qdma->chans[i];
+ dpaa2_chan->qdma = dpaa2_qdma;
+ dpaa2_chan->fqid = priv->tx_fqid[i % num];
+ dpaa2_chan->vchan.desc_free = dpaa2_qdma_free_desc;
+ vchan_init(&dpaa2_chan->vchan, &dpaa2_qdma->dma_dev);
+ spin_lock_init(&dpaa2_chan->queue_lock);
+ INIT_LIST_HEAD(&dpaa2_chan->comp_used);
+ INIT_LIST_HEAD(&dpaa2_chan->comp_free);
+ }
+ return 0;
+}
+
+static int dpaa2_qdma_probe(struct fsl_mc_device *dpdmai_dev)
+{
+ struct device *dev = &dpdmai_dev->dev;
+ struct dpaa2_qdma_engine *dpaa2_qdma;
+ struct dpaa2_qdma_priv *priv;
+ int err;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ dev_set_drvdata(dev, priv);
+ priv->dpdmai_dev = dpdmai_dev;
+
+ priv->iommu_domain = iommu_get_domain_for_dev(dev);
+ if (priv->iommu_domain)
+ smmu_disable = false;
+
+ /* obtain a MC portal */
+ err = fsl_mc_portal_allocate(dpdmai_dev, 0, &priv->mc_io);
+ if (err) {
+ if (err == -ENXIO)
+ err = -EPROBE_DEFER;
+ else
+ dev_err(dev, "MC portal allocation failed\n");
+ goto err_mcportal;
+ }
+
+ /* DPDMAI initialization */
+ err = dpaa2_qdma_setup(dpdmai_dev);
+ if (err) {
+ dev_err(dev, "dpaa2_dpdmai_setup() failed\n");
+ goto err_dpdmai_setup;
+ }
+
+ /* DPIO */
+ err = dpaa2_qdma_dpio_setup(priv);
+ if (err) {
+ dev_err(dev, "dpaa2_dpdmai_dpio_setup() failed\n");
+ goto err_dpio_setup;
+ }
+
+ /* DPDMAI binding to DPIO */
+ err = dpaa2_dpdmai_bind(priv);
+ if (err) {
+ dev_err(dev, "dpaa2_dpdmai_bind() failed\n");
+ goto err_bind;
+ }
+
+ /* DPDMAI enable */
+ err = dpdmai_enable(priv->mc_io, 0, dpdmai_dev->mc_handle);
+ if (err) {
+ dev_err(dev, "dpdmai_enable() faile\n");
+ goto err_enable;
+ }
+
+ dpaa2_qdma = kzalloc(sizeof(*dpaa2_qdma), GFP_KERNEL);
+ if (!dpaa2_qdma) {
+ err = -ENOMEM;
+ goto err_eng;
+ }
+
+ priv->dpaa2_qdma = dpaa2_qdma;
+ dpaa2_qdma->priv = priv;
+
+ dpaa2_qdma->desc_allocated = 0;
+ dpaa2_qdma->n_chans = NUM_CH;
+
+ dpaa2_dpdmai_init_channels(dpaa2_qdma);
+
+ if (soc_device_match(soc_fixup_tuning))
+ dpaa2_qdma->qdma_wrtype_fixup = true;
+ else
+ dpaa2_qdma->qdma_wrtype_fixup = false;
+
+ dma_cap_set(DMA_PRIVATE, dpaa2_qdma->dma_dev.cap_mask);
+ dma_cap_set(DMA_SLAVE, dpaa2_qdma->dma_dev.cap_mask);
+ dma_cap_set(DMA_MEMCPY, dpaa2_qdma->dma_dev.cap_mask);
+
+ dpaa2_qdma->dma_dev.dev = dev;
+ dpaa2_qdma->dma_dev.device_alloc_chan_resources =
+ dpaa2_qdma_alloc_chan_resources;
+ dpaa2_qdma->dma_dev.device_free_chan_resources =
+ dpaa2_qdma_free_chan_resources;
+ dpaa2_qdma->dma_dev.device_tx_status = dma_cookie_status;
+ dpaa2_qdma->dma_dev.device_prep_dma_memcpy = dpaa2_qdma_prep_memcpy;
+ dpaa2_qdma->dma_dev.device_issue_pending = dpaa2_qdma_issue_pending;
+
+ err = dma_async_device_register(&dpaa2_qdma->dma_dev);
+ if (err) {
+ dev_err(dev, "Can't register NXP QDMA engine.\n");
+ goto err_dpaa2_qdma;
+ }
+
+ return 0;
+
+err_dpaa2_qdma:
+ kfree(dpaa2_qdma);
+err_eng:
+ dpdmai_disable(priv->mc_io, 0, dpdmai_dev->mc_handle);
+err_enable:
+ dpaa2_dpdmai_dpio_unbind(priv);
+err_bind:
+ dpaa2_dpmai_store_free(priv);
+ dpaa2_dpdmai_dpio_free(priv);
+err_dpio_setup:
+ kfree(priv->ppriv);
+ dpdmai_close(priv->mc_io, 0, dpdmai_dev->mc_handle);
+err_dpdmai_setup:
+ fsl_mc_portal_free(priv->mc_io);
+err_mcportal:
+ kfree(priv);
+ dev_set_drvdata(dev, NULL);
+ return err;
+}
+
+static int dpaa2_qdma_remove(struct fsl_mc_device *ls_dev)
+{
+ struct dpaa2_qdma_engine *dpaa2_qdma;
+ struct dpaa2_qdma_priv *priv;
+ struct device *dev;
+
+ dev = &ls_dev->dev;
+ priv = dev_get_drvdata(dev);
+ dpaa2_qdma = priv->dpaa2_qdma;
+
+ dpdmai_disable(priv->mc_io, 0, ls_dev->mc_handle);
+ dpaa2_dpdmai_dpio_unbind(priv);
+ dpaa2_dpmai_store_free(priv);
+ dpaa2_dpdmai_dpio_free(priv);
+ dpdmai_close(priv->mc_io, 0, ls_dev->mc_handle);
+ fsl_mc_portal_free(priv->mc_io);
+ dev_set_drvdata(dev, NULL);
+ dpaa2_dpdmai_free_channels(dpaa2_qdma);
+
+ dma_async_device_unregister(&dpaa2_qdma->dma_dev);
+ kfree(priv);
+ kfree(dpaa2_qdma);
+
+ return 0;
+}
+
+static const struct fsl_mc_device_id dpaa2_qdma_id_table[] = {
+ {
+ .vendor = FSL_MC_VENDOR_FREESCALE,
+ .obj_type = "dpdmai",
+ },
+ { .vendor = 0x0 }
+};
+
+static struct fsl_mc_driver dpaa2_qdma_driver = {
+ .driver = {
+ .name = "dpaa2-qdma",
+ .owner = THIS_MODULE,
+ },
+ .probe = dpaa2_qdma_probe,
+ .remove = dpaa2_qdma_remove,
+ .match_id_table = dpaa2_qdma_id_table
+};
+
+static int __init dpaa2_qdma_driver_init(void)
+{
+ return fsl_mc_driver_register(&(dpaa2_qdma_driver));
+}
+late_initcall(dpaa2_qdma_driver_init);
+
+static void __exit fsl_qdma_exit(void)
+{
+ fsl_mc_driver_unregister(&(dpaa2_qdma_driver));
+}
+module_exit(fsl_qdma_exit);
+
+MODULE_ALIAS("platform:fsl-dpaa2-qdma");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("NXP Layerscape DPAA2 qDMA engine driver");
diff --git a/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.h b/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.h
new file mode 100644
index 000000000000..7d571849c569
--- /dev/null
+++ b/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.h
@@ -0,0 +1,153 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright 2019 NXP */
+
+#ifndef __DPAA2_QDMA_H
+#define __DPAA2_QDMA_H
+
+#define DPAA2_QDMA_STORE_SIZE 16
+#define NUM_CH 8
+
+struct dpaa2_qdma_sd_d {
+ u32 rsv:32;
+ union {
+ struct {
+ u32 ssd:12; /* souce stride distance */
+ u32 sss:12; /* souce stride size */
+ u32 rsv1:8;
+ } sdf;
+ struct {
+ u32 dsd:12; /* Destination stride distance */
+ u32 dss:12; /* Destination stride size */
+ u32 rsv2:8;
+ } ddf;
+ } df;
+ u32 rbpcmd; /* Route-by-port command */
+ u32 cmd;
+} __attribute__((__packed__));
+
+/* Source descriptor command read transaction type for RBP=0: */
+/* coherent copy of cacheable memory */
+#define QDMA_SD_CMD_RDTTYPE_COHERENT (0xb << 28)
+/* Destination descriptor command write transaction type for RBP=0: */
+/* coherent copy of cacheable memory */
+#define QDMA_DD_CMD_WRTTYPE_COHERENT (0x6 << 28)
+#define LX2160_QDMA_DD_CMD_WRTTYPE_COHERENT (0xb << 28)
+
+#define QMAN_FD_FMT_ENABLE BIT(0) /* frame list table enable */
+#define QMAN_FD_BMT_ENABLE BIT(15) /* bypass memory translation */
+#define QMAN_FD_BMT_DISABLE (0) /* bypass memory translation */
+#define QMAN_FD_SL_DISABLE (0) /* short lengthe disabled */
+#define QMAN_FD_SL_ENABLE BIT(14) /* short lengthe enabled */
+
+#define QDMA_FINAL_BIT_DISABLE (0) /* final bit disable */
+#define QDMA_FINAL_BIT_ENABLE BIT(31) /* final bit enable */
+
+#define QDMA_FD_SHORT_FORMAT BIT(11) /* short format */
+#define QDMA_FD_LONG_FORMAT (0) /* long format */
+#define QDMA_SER_DISABLE (8) /* no notification */
+#define QDMA_SER_CTX BIT(8) /* notification by FQD_CTX[fqid] */
+#define QDMA_SER_DEST (2 << 8) /* notification by destination desc */
+#define QDMA_SER_BOTH (3 << 8) /* soruce and dest notification */
+#define QDMA_FD_SPF_ENALBE BIT(30) /* source prefetch enable */
+
+#define QMAN_FD_VA_ENABLE BIT(14) /* Address used is virtual address */
+#define QMAN_FD_VA_DISABLE (0)/* Address used is a real address */
+/* Flow Context: 49bit physical address */
+#define QMAN_FD_CBMT_ENABLE BIT(15)
+#define QMAN_FD_CBMT_DISABLE (0) /* Flow Context: 64bit virtual address */
+#define QMAN_FD_SC_DISABLE (0) /* stashing control */
+
+#define QDMA_FL_FMT_SBF (0x0) /* Single buffer frame */
+#define QDMA_FL_FMT_SGE (0x2) /* Scatter gather frame */
+#define QDMA_FL_BMT_ENABLE BIT(15) /* enable bypass memory translation */
+#define QDMA_FL_BMT_DISABLE (0x0) /* enable bypass memory translation */
+#define QDMA_FL_SL_LONG (0x0)/* long length */
+#define QDMA_FL_SL_SHORT (0x1) /* short length */
+#define QDMA_FL_F (0x1)/* last frame list bit */
+
+/*Description of Frame list table structure*/
+struct dpaa2_qdma_chan {
+ struct dpaa2_qdma_engine *qdma;
+ struct virt_dma_chan vchan;
+ struct virt_dma_desc vdesc;
+ enum dma_status status;
+ u32 fqid;
+
+ /* spinlock used by dpaa2 qdma driver */
+ spinlock_t queue_lock;
+ struct dma_pool *fd_pool;
+ struct dma_pool *fl_pool;
+ struct dma_pool *sdd_pool;
+
+ struct list_head comp_used;
+ struct list_head comp_free;
+
+};
+
+struct dpaa2_qdma_comp {
+ dma_addr_t fd_bus_addr;
+ dma_addr_t fl_bus_addr;
+ dma_addr_t desc_bus_addr;
+ struct dpaa2_fd *fd_virt_addr;
+ struct dpaa2_fl_entry *fl_virt_addr;
+ struct dpaa2_qdma_sd_d *desc_virt_addr;
+ struct dpaa2_qdma_chan *qchan;
+ struct virt_dma_desc vdesc;
+ struct list_head list;
+};
+
+struct dpaa2_qdma_engine {
+ struct dma_device dma_dev;
+ u32 n_chans;
+ struct dpaa2_qdma_chan chans[NUM_CH];
+ int qdma_wrtype_fixup;
+ int desc_allocated;
+
+ struct dpaa2_qdma_priv *priv;
+};
+
+/*
+ * dpaa2_qdma_priv - driver private data
+ */
+struct dpaa2_qdma_priv {
+ int dpqdma_id;
+
+ struct iommu_domain *iommu_domain;
+ struct dpdmai_attr dpdmai_attr;
+ struct device *dev;
+ struct fsl_mc_io *mc_io;
+ struct fsl_mc_device *dpdmai_dev;
+ u8 num_pairs;
+
+ struct dpaa2_qdma_engine *dpaa2_qdma;
+ struct dpaa2_qdma_priv_per_prio *ppriv;
+
+ struct dpdmai_rx_queue_attr rx_queue_attr[DPDMAI_PRIO_NUM];
+ u32 tx_fqid[DPDMAI_PRIO_NUM];
+};
+
+struct dpaa2_qdma_priv_per_prio {
+ int req_fqid;
+ int rsp_fqid;
+ int prio;
+
+ struct dpaa2_io_store *store;
+ struct dpaa2_io_notification_ctx nctx;
+
+ struct dpaa2_qdma_priv *priv;
+};
+
+static struct soc_device_attribute soc_fixup_tuning[] = {
+ { .family = "QorIQ LX2160A"},
+ { },
+};
+
+/* FD pool size: one FD + 3 Frame list + 2 source/destination descriptor */
+#define FD_POOL_SIZE (sizeof(struct dpaa2_fd) + \
+ sizeof(struct dpaa2_fl_entry) * 3 + \
+ sizeof(struct dpaa2_qdma_sd_d) * 2)
+
+static void dpaa2_dpdmai_free_channels(struct dpaa2_qdma_engine *dpaa2_qdma);
+static void dpaa2_dpdmai_free_comp(struct dpaa2_qdma_chan *qchan,
+ struct list_head *head);
+#endif /* __DPAA2_QDMA_H */
diff --git a/drivers/dma/fsl-dpaa2-qdma/dpdmai.c b/drivers/dma/fsl-dpaa2-qdma/dpdmai.c
new file mode 100644
index 000000000000..f8d22115154a
--- /dev/null
+++ b/drivers/dma/fsl-dpaa2-qdma/dpdmai.c
@@ -0,0 +1,376 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright 2019 NXP
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/io.h>
+#include <linux/fsl/mc.h>
+#include "dpdmai.h"
+
+struct dpdmai_rsp_get_attributes {
+ __le32 id;
+ u8 num_of_priorities;
+ u8 pad0[3];
+ __le16 major;
+ __le16 minor;
+};
+
+struct dpdmai_cmd_queue {
+ __le32 dest_id;
+ u8 priority;
+ u8 queue;
+ u8 dest_type;
+ u8 pad;
+ __le64 user_ctx;
+ union {
+ __le32 options;
+ __le32 fqid;
+ };
+};
+
+struct dpdmai_rsp_get_tx_queue {
+ __le64 pad;
+ __le32 fqid;
+};
+
+#define MC_CMD_OP(_cmd, _param, _offset, _width, _type, _arg) \
+ ((_cmd).params[_param] |= mc_enc((_offset), (_width), _arg))
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPDMAI_CMD_CREATE(cmd, cfg) \
+do { \
+ MC_CMD_OP(cmd, 0, 8, 8, u8, (cfg)->priorities[0]);\
+ MC_CMD_OP(cmd, 0, 16, 8, u8, (cfg)->priorities[1]);\
+} while (0)
+
+static inline u64 mc_enc(int lsoffset, int width, u64 val)
+{
+ return (val & MAKE_UMASK64(width)) << lsoffset;
+}
+
+/**
+ * dpdmai_open() - Open a control session for the specified object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @dpdmai_id: DPDMAI unique ID
+ * @token: Returned token; use in subsequent API calls
+ *
+ * This function can be used to open a control session for an
+ * already created object; an object may have been declared in
+ * the DPL or by calling the dpdmai_create() function.
+ * This function returns a unique authentication token,
+ * associated with the specific object ID and the specific MC
+ * portal; this token must be used in all subsequent commands for
+ * this specific object.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpdmai_open(struct fsl_mc_io *mc_io, u32 cmd_flags,
+ int dpdmai_id, u16 *token)
+{
+ struct fsl_mc_command cmd = { 0 };
+ __le64 *cmd_dpdmai_id;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_OPEN,
+ cmd_flags, 0);
+
+ cmd_dpdmai_id = cmd.params;
+ *cmd_dpdmai_id = cpu_to_le32(dpdmai_id);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ *token = mc_cmd_hdr_read_token(&cmd);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dpdmai_open);
+
+/**
+ * dpdmai_close() - Close the control session of the object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPDMAI object
+ *
+ * After this function is called, no further operations are
+ * allowed on the object without opening a new control session.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpdmai_close(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_CLOSE,
+ cmd_flags, token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+EXPORT_SYMBOL_GPL(dpdmai_close);
+
+/**
+ * dpdmai_create() - Create the DPDMAI object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @cfg: Configuration structure
+ * @token: Returned token; use in subsequent API calls
+ *
+ * Create the DPDMAI object, allocate required resources and
+ * perform required initialization.
+ *
+ * The object can be created either by declaring it in the
+ * DPL file, or by calling this function.
+ *
+ * This function returns a unique authentication token,
+ * associated with the specific object ID and the specific MC
+ * portal; this token must be used in all subsequent calls to
+ * this specific object. For objects that are created using the
+ * DPL file, call dpdmai_open() function to get an authentication
+ * token first.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpdmai_create(struct fsl_mc_io *mc_io, u32 cmd_flags,
+ const struct dpdmai_cfg *cfg, u16 *token)
+{
+ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_CREATE,
+ cmd_flags, 0);
+ DPDMAI_CMD_CREATE(cmd, cfg);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ *token = mc_cmd_hdr_read_token(&cmd);
+
+ return 0;
+}
+
+/**
+ * dpdmai_enable() - Enable the DPDMAI, allow sending and receiving frames.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPDMAI object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpdmai_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_ENABLE,
+ cmd_flags, token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+EXPORT_SYMBOL_GPL(dpdmai_enable);
+
+/**
+ * dpdmai_disable() - Disable the DPDMAI, stop sending and receiving frames.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPDMAI object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpdmai_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_DISABLE,
+ cmd_flags, token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+EXPORT_SYMBOL_GPL(dpdmai_disable);
+
+/**
+ * dpdmai_reset() - Reset the DPDMAI, returns the object to initial state.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPDMAI object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpdmai_reset(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_RESET,
+ cmd_flags, token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+EXPORT_SYMBOL_GPL(dpdmai_reset);
+
+/**
+ * dpdmai_get_attributes() - Retrieve DPDMAI attributes.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPDMAI object
+ * @attr: Returned object's attributes
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpdmai_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags,
+ u16 token, struct dpdmai_attr *attr)
+{
+ struct dpdmai_rsp_get_attributes *rsp_params;
+ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_ATTR,
+ cmd_flags, token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpdmai_rsp_get_attributes *)cmd.params;
+ attr->id = le32_to_cpu(rsp_params->id);
+ attr->version.major = le16_to_cpu(rsp_params->major);
+ attr->version.minor = le16_to_cpu(rsp_params->minor);
+ attr->num_of_priorities = rsp_params->num_of_priorities;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dpdmai_get_attributes);
+
+/**
+ * dpdmai_set_rx_queue() - Set Rx queue configuration
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPDMAI object
+ * @priority: Select the queue relative to number of
+ * priorities configured at DPDMAI creation
+ * @cfg: Rx queue configuration
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpdmai_set_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u8 priority, const struct dpdmai_rx_queue_cfg *cfg)
+{
+ struct dpdmai_cmd_queue *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_SET_RX_QUEUE,
+ cmd_flags, token);
+
+ cmd_params = (struct dpdmai_cmd_queue *)cmd.params;
+ cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id);
+ cmd_params->priority = cfg->dest_cfg.priority;
+ cmd_params->queue = priority;
+ cmd_params->dest_type = cfg->dest_cfg.dest_type;
+ cmd_params->user_ctx = cpu_to_le64(cfg->user_ctx);
+ cmd_params->options = cpu_to_le32(cfg->options);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+EXPORT_SYMBOL_GPL(dpdmai_set_rx_queue);
+
+/**
+ * dpdmai_get_rx_queue() - Retrieve Rx queue attributes.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPDMAI object
+ * @priority: Select the queue relative to number of
+ * priorities configured at DPDMAI creation
+ * @attr: Returned Rx queue attributes
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpdmai_get_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u8 priority, struct dpdmai_rx_queue_attr *attr)
+{
+ struct dpdmai_cmd_queue *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_RX_QUEUE,
+ cmd_flags, token);
+
+ cmd_params = (struct dpdmai_cmd_queue *)cmd.params;
+ cmd_params->queue = priority;
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ attr->dest_cfg.dest_id = le32_to_cpu(cmd_params->dest_id);
+ attr->dest_cfg.priority = cmd_params->priority;
+ attr->dest_cfg.dest_type = cmd_params->dest_type;
+ attr->user_ctx = le64_to_cpu(cmd_params->user_ctx);
+ attr->fqid = le32_to_cpu(cmd_params->fqid);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dpdmai_get_rx_queue);
+
+/**
+ * dpdmai_get_tx_queue() - Retrieve Tx queue attributes.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPDMAI object
+ * @priority: Select the queue relative to number of
+ * priorities configured at DPDMAI creation
+ * @fqid: Returned Tx queue
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpdmai_get_tx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags,
+ u16 token, u8 priority, u32 *fqid)
+{
+ struct dpdmai_rsp_get_tx_queue *rsp_params;
+ struct dpdmai_cmd_queue *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_TX_QUEUE,
+ cmd_flags, token);
+
+ cmd_params = (struct dpdmai_cmd_queue *)cmd.params;
+ cmd_params->queue = priority;
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+
+ rsp_params = (struct dpdmai_rsp_get_tx_queue *)cmd.params;
+ *fqid = le32_to_cpu(rsp_params->fqid);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dpdmai_get_tx_queue);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/fsl-dpaa2-qdma/dpdmai.h b/drivers/dma/fsl-dpaa2-qdma/dpdmai.h
new file mode 100644
index 000000000000..6d785093da8e
--- /dev/null
+++ b/drivers/dma/fsl-dpaa2-qdma/dpdmai.h
@@ -0,0 +1,177 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright 2019 NXP */
+
+#ifndef __FSL_DPDMAI_H
+#define __FSL_DPDMAI_H
+
+/* DPDMAI Version */
+#define DPDMAI_VER_MAJOR 2
+#define DPDMAI_VER_MINOR 2
+
+#define DPDMAI_CMD_BASE_VERSION 0
+#define DPDMAI_CMD_ID_OFFSET 4
+
+#define DPDMAI_CMDID_FORMAT(x) (((x) << DPDMAI_CMD_ID_OFFSET) | \
+ DPDMAI_CMD_BASE_VERSION)
+
+/* Command IDs */
+#define DPDMAI_CMDID_CLOSE DPDMAI_CMDID_FORMAT(0x800)
+#define DPDMAI_CMDID_OPEN DPDMAI_CMDID_FORMAT(0x80E)
+#define DPDMAI_CMDID_CREATE DPDMAI_CMDID_FORMAT(0x90E)
+
+#define DPDMAI_CMDID_ENABLE DPDMAI_CMDID_FORMAT(0x002)
+#define DPDMAI_CMDID_DISABLE DPDMAI_CMDID_FORMAT(0x003)
+#define DPDMAI_CMDID_GET_ATTR DPDMAI_CMDID_FORMAT(0x004)
+#define DPDMAI_CMDID_RESET DPDMAI_CMDID_FORMAT(0x005)
+#define DPDMAI_CMDID_IS_ENABLED DPDMAI_CMDID_FORMAT(0x006)
+
+#define DPDMAI_CMDID_SET_IRQ DPDMAI_CMDID_FORMAT(0x010)
+#define DPDMAI_CMDID_GET_IRQ DPDMAI_CMDID_FORMAT(0x011)
+#define DPDMAI_CMDID_SET_IRQ_ENABLE DPDMAI_CMDID_FORMAT(0x012)
+#define DPDMAI_CMDID_GET_IRQ_ENABLE DPDMAI_CMDID_FORMAT(0x013)
+#define DPDMAI_CMDID_SET_IRQ_MASK DPDMAI_CMDID_FORMAT(0x014)
+#define DPDMAI_CMDID_GET_IRQ_MASK DPDMAI_CMDID_FORMAT(0x015)
+#define DPDMAI_CMDID_GET_IRQ_STATUS DPDMAI_CMDID_FORMAT(0x016)
+#define DPDMAI_CMDID_CLEAR_IRQ_STATUS DPDMAI_CMDID_FORMAT(0x017)
+
+#define DPDMAI_CMDID_SET_RX_QUEUE DPDMAI_CMDID_FORMAT(0x1A0)
+#define DPDMAI_CMDID_GET_RX_QUEUE DPDMAI_CMDID_FORMAT(0x1A1)
+#define DPDMAI_CMDID_GET_TX_QUEUE DPDMAI_CMDID_FORMAT(0x1A2)
+
+#define MC_CMD_HDR_TOKEN_O 32 /* Token field offset */
+#define MC_CMD_HDR_TOKEN_S 16 /* Token field size */
+
+#define MAKE_UMASK64(_width) \
+ ((u64)((_width) < 64 ? ((u64)1 << (_width)) - 1 : (u64)-1))
+
+/* Data Path DMA Interface API
+ * Contains initialization APIs and runtime control APIs for DPDMAI
+ */
+
+/**
+ * Maximum number of Tx/Rx priorities per DPDMAI object
+ */
+#define DPDMAI_PRIO_NUM 2
+
+/* DPDMAI queue modification options */
+
+/**
+ * Select to modify the user's context associated with the queue
+ */
+#define DPDMAI_QUEUE_OPT_USER_CTX 0x1
+
+/**
+ * Select to modify the queue's destination
+ */
+#define DPDMAI_QUEUE_OPT_DEST 0x2
+
+/**
+ * struct dpdmai_cfg - Structure representing DPDMAI configuration
+ * @priorities: Priorities for the DMA hardware processing; valid priorities are
+ * configured with values 1-8; the entry following last valid entry
+ * should be configured with 0
+ */
+struct dpdmai_cfg {
+ u8 priorities[DPDMAI_PRIO_NUM];
+};
+
+/**
+ * struct dpdmai_attr - Structure representing DPDMAI attributes
+ * @id: DPDMAI object ID
+ * @version: DPDMAI version
+ * @num_of_priorities: number of priorities
+ */
+struct dpdmai_attr {
+ int id;
+ /**
+ * struct version - DPDMAI version
+ * @major: DPDMAI major version
+ * @minor: DPDMAI minor version
+ */
+ struct {
+ u16 major;
+ u16 minor;
+ } version;
+ u8 num_of_priorities;
+};
+
+/**
+ * enum dpdmai_dest - DPDMAI destination types
+ * @DPDMAI_DEST_NONE: Unassigned destination; The queue is set in parked mode
+ * and does not generate FQDAN notifications; user is expected to dequeue
+ * from the queue based on polling or other user-defined method
+ * @DPDMAI_DEST_DPIO: The queue is set in schedule mode and generates FQDAN
+ * notifications to the specified DPIO; user is expected to dequeue
+ * from the queue only after notification is received
+ * @DPDMAI_DEST_DPCON: The queue is set in schedule mode and does not generate
+ * FQDAN notifications, but is connected to the specified DPCON object;
+ * user is expected to dequeue from the DPCON channel
+ */
+enum dpdmai_dest {
+ DPDMAI_DEST_NONE = 0,
+ DPDMAI_DEST_DPIO = 1,
+ DPDMAI_DEST_DPCON = 2
+};
+
+/**
+ * struct dpdmai_dest_cfg - Structure representing DPDMAI destination parameters
+ * @dest_type: Destination type
+ * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type
+ * @priority: Priority selection within the DPIO or DPCON channel; valid values
+ * are 0-1 or 0-7, depending on the number of priorities in that
+ * channel; not relevant for 'DPDMAI_DEST_NONE' option
+ */
+struct dpdmai_dest_cfg {
+ enum dpdmai_dest dest_type;
+ int dest_id;
+ u8 priority;
+};
+
+/**
+ * struct dpdmai_rx_queue_cfg - DPDMAI RX queue configuration
+ * @options: Flags representing the suggested modifications to the queue;
+ * Use any combination of 'DPDMAI_QUEUE_OPT_<X>' flags
+ * @user_ctx: User context value provided in the frame descriptor of each
+ * dequeued frame;
+ * valid only if 'DPDMAI_QUEUE_OPT_USER_CTX' is contained in 'options'
+ * @dest_cfg: Queue destination parameters;
+ * valid only if 'DPDMAI_QUEUE_OPT_DEST' is contained in 'options'
+ */
+struct dpdmai_rx_queue_cfg {
+ struct dpdmai_dest_cfg dest_cfg;
+ u32 options;
+ u64 user_ctx;
+
+};
+
+/**
+ * struct dpdmai_rx_queue_attr - Structure representing attributes of Rx queues
+ * @user_ctx: User context value provided in the frame descriptor of each
+ * dequeued frame
+ * @dest_cfg: Queue destination configuration
+ * @fqid: Virtual FQID value to be used for dequeue operations
+ */
+struct dpdmai_rx_queue_attr {
+ struct dpdmai_dest_cfg dest_cfg;
+ u64 user_ctx;
+ u32 fqid;
+};
+
+int dpdmai_open(struct fsl_mc_io *mc_io, u32 cmd_flags,
+ int dpdmai_id, u16 *token);
+int dpdmai_close(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
+int dpdmai_create(struct fsl_mc_io *mc_io, u32 cmd_flags,
+ const struct dpdmai_cfg *cfg, u16 *token);
+int dpdmai_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
+int dpdmai_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
+int dpdmai_reset(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
+int dpdmai_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags,
+ u16 token, struct dpdmai_attr *attr);
+int dpdmai_set_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u8 priority, const struct dpdmai_rx_queue_cfg *cfg);
+int dpdmai_get_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u8 priority, struct dpdmai_rx_queue_attr *attr);
+int dpdmai_get_tx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags,
+ u16 token, u8 priority, u32 *fqid);
+
+#endif /* __FSL_DPDMAI_H */
diff --git a/drivers/dma/fsl-qdma.c b/drivers/dma/fsl-qdma.c
index 06664fbd2d91..89792083d62c 100644
--- a/drivers/dma/fsl-qdma.c
+++ b/drivers/dma/fsl-qdma.c
@@ -1155,6 +1155,9 @@ static int fsl_qdma_probe(struct platform_device *pdev)
return ret;
fsl_qdma->irq_base = platform_get_irq_byname(pdev, "qdma-queue0");
+ if (fsl_qdma->irq_base < 0)
+ return fsl_qdma->irq_base;
+
fsl_qdma->feature = of_property_read_bool(np, "big-endian");
INIT_LIST_HEAD(&fsl_qdma->dma_dev.channels);
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
index a3f942a6a946..db0e274126fb 100644
--- a/drivers/dma/iop-adma.c
+++ b/drivers/dma/iop-adma.c
@@ -173,7 +173,7 @@ static void __iop_adma_slot_cleanup(struct iop_adma_chan *iop_chan)
&iop_chan->chain, chain_node) {
zero_sum_result |=
iop_desc_get_zero_result(grp_iter);
- pr_debug("\titer%d result: %d\n",
+ pr_debug("\titer%d result: %d\n",
grp_iter->idx, zero_sum_result);
slot_cnt -= slots_per_op;
if (slot_cnt == 0)
@@ -1359,9 +1359,11 @@ static int iop_adma_probe(struct platform_device *pdev)
iop_adma_device_clear_err_status(iop_chan);
for (i = 0; i < 3; i++) {
- irq_handler_t handler[] = { iop_adma_eot_handler,
- iop_adma_eoc_handler,
- iop_adma_err_handler };
+ static const irq_handler_t handler[] = {
+ iop_adma_eot_handler,
+ iop_adma_eoc_handler,
+ iop_adma_err_handler
+ };
int irq = platform_get_irq(pdev, i);
if (irq < 0) {
ret = -ENXIO;
diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c
index 4b36c8810517..adecea51814f 100644
--- a/drivers/dma/k3dma.c
+++ b/drivers/dma/k3dma.c
@@ -835,13 +835,8 @@ static int k3_dma_probe(struct platform_device *op)
const struct k3dma_soc_data *soc_data;
struct k3_dma_dev *d;
const struct of_device_id *of_id;
- struct resource *iores;
int i, ret, irq = 0;
- iores = platform_get_resource(op, IORESOURCE_MEM, 0);
- if (!iores)
- return -EINVAL;
-
d = devm_kzalloc(&op->dev, sizeof(*d), GFP_KERNEL);
if (!d)
return -ENOMEM;
@@ -850,7 +845,7 @@ static int k3_dma_probe(struct platform_device *op)
if (!soc_data)
return -EINVAL;
- d->base = devm_ioremap_resource(&op->dev, iores);
+ d->base = devm_platform_ioremap_resource(op, 0);
if (IS_ERR(d->base))
return PTR_ERR(d->base);
diff --git a/drivers/dma/mediatek/mtk-cqdma.c b/drivers/dma/mediatek/mtk-cqdma.c
index 723b11c190b3..6bf838e63be1 100644
--- a/drivers/dma/mediatek/mtk-cqdma.c
+++ b/drivers/dma/mediatek/mtk-cqdma.c
@@ -819,15 +819,7 @@ static int mtk_cqdma_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&cqdma->pc[i]->queue);
spin_lock_init(&cqdma->pc[i]->lock);
refcount_set(&cqdma->pc[i]->refcnt, 0);
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, i);
- if (!res) {
- dev_err(&pdev->dev, "No mem resource for %s\n",
- dev_name(&pdev->dev));
- return -EINVAL;
- }
-
- cqdma->pc[i]->base = devm_ioremap_resource(&pdev->dev, res);
+ cqdma->pc[i]->base = devm_platform_ioremap_resource(pdev, i);
if (IS_ERR(cqdma->pc[i]->base))
return PTR_ERR(cqdma->pc[i]->base);
diff --git a/drivers/dma/mediatek/mtk-hsdma.c b/drivers/dma/mediatek/mtk-hsdma.c
index 1a2028e1c29e..4c58da742143 100644
--- a/drivers/dma/mediatek/mtk-hsdma.c
+++ b/drivers/dma/mediatek/mtk-hsdma.c
@@ -997,7 +997,7 @@ static int mtk_hsdma_probe(struct platform_device *pdev)
if (err) {
dev_err(&pdev->dev,
"request_irq failed with err %d\n", err);
- goto err_unregister;
+ goto err_free;
}
platform_set_drvdata(pdev, hsdma);
@@ -1006,6 +1006,8 @@ static int mtk_hsdma_probe(struct platform_device *pdev)
return 0;
+err_free:
+ of_dma_controller_free(pdev->dev.of_node);
err_unregister:
dma_async_device_unregister(dd);
diff --git a/drivers/dma/mediatek/mtk-uart-apdma.c b/drivers/dma/mediatek/mtk-uart-apdma.c
index f40051d6aecb..c20e6bd4e298 100644
--- a/drivers/dma/mediatek/mtk-uart-apdma.c
+++ b/drivers/dma/mediatek/mtk-uart-apdma.c
@@ -475,7 +475,6 @@ static int mtk_uart_apdma_probe(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
struct mtk_uart_apdmadev *mtkd;
int bit_mask = 32, rc;
- struct resource *res;
struct mtk_chan *c;
unsigned int i;
@@ -532,13 +531,7 @@ static int mtk_uart_apdma_probe(struct platform_device *pdev)
goto err_no_dma;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, i);
- if (!res) {
- rc = -ENODEV;
- goto err_no_dma;
- }
-
- c->base = devm_ioremap_resource(&pdev->dev, res);
+ c->base = devm_platform_ioremap_resource(pdev, i);
if (IS_ERR(c->base)) {
rc = PTR_ERR(c->base);
goto err_no_dma;
diff --git a/drivers/dma/milbeaut-hdmac.c b/drivers/dma/milbeaut-hdmac.c
new file mode 100644
index 000000000000..8853d442430b
--- /dev/null
+++ b/drivers/dma/milbeaut-hdmac.c
@@ -0,0 +1,578 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (C) 2019 Linaro Ltd.
+// Copyright (C) 2019 Socionext Inc.
+
+#include <linux/bits.h>
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of_dma.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/bitfield.h>
+
+#include "virt-dma.h"
+
+#define MLB_HDMAC_DMACR 0x0 /* global */
+#define MLB_HDMAC_DE BIT(31)
+#define MLB_HDMAC_DS BIT(30)
+#define MLB_HDMAC_PR BIT(28)
+#define MLB_HDMAC_DH GENMASK(27, 24)
+
+#define MLB_HDMAC_CH_STRIDE 0x10
+
+#define MLB_HDMAC_DMACA 0x0 /* channel */
+#define MLB_HDMAC_EB BIT(31)
+#define MLB_HDMAC_PB BIT(30)
+#define MLB_HDMAC_ST BIT(29)
+#define MLB_HDMAC_IS GENMASK(28, 24)
+#define MLB_HDMAC_BT GENMASK(23, 20)
+#define MLB_HDMAC_BC GENMASK(19, 16)
+#define MLB_HDMAC_TC GENMASK(15, 0)
+#define MLB_HDMAC_DMACB 0x4
+#define MLB_HDMAC_TT GENMASK(31, 30)
+#define MLB_HDMAC_MS GENMASK(29, 28)
+#define MLB_HDMAC_TW GENMASK(27, 26)
+#define MLB_HDMAC_FS BIT(25)
+#define MLB_HDMAC_FD BIT(24)
+#define MLB_HDMAC_RC BIT(23)
+#define MLB_HDMAC_RS BIT(22)
+#define MLB_HDMAC_RD BIT(21)
+#define MLB_HDMAC_EI BIT(20)
+#define MLB_HDMAC_CI BIT(19)
+#define HDMAC_PAUSE 0x7
+#define MLB_HDMAC_SS GENMASK(18, 16)
+#define MLB_HDMAC_SP GENMASK(15, 12)
+#define MLB_HDMAC_DP GENMASK(11, 8)
+#define MLB_HDMAC_DMACSA 0x8
+#define MLB_HDMAC_DMACDA 0xc
+
+#define MLB_HDMAC_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
+ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
+
+struct milbeaut_hdmac_desc {
+ struct virt_dma_desc vd;
+ struct scatterlist *sgl;
+ unsigned int sg_len;
+ unsigned int sg_cur;
+ enum dma_transfer_direction dir;
+};
+
+struct milbeaut_hdmac_chan {
+ struct virt_dma_chan vc;
+ struct milbeaut_hdmac_device *mdev;
+ struct milbeaut_hdmac_desc *md;
+ void __iomem *reg_ch_base;
+ unsigned int slave_id;
+ struct dma_slave_config cfg;
+};
+
+struct milbeaut_hdmac_device {
+ struct dma_device ddev;
+ struct clk *clk;
+ void __iomem *reg_base;
+ struct milbeaut_hdmac_chan channels[0];
+};
+
+static struct milbeaut_hdmac_chan *
+to_milbeaut_hdmac_chan(struct virt_dma_chan *vc)
+{
+ return container_of(vc, struct milbeaut_hdmac_chan, vc);
+}
+
+static struct milbeaut_hdmac_desc *
+to_milbeaut_hdmac_desc(struct virt_dma_desc *vd)
+{
+ return container_of(vd, struct milbeaut_hdmac_desc, vd);
+}
+
+/* mc->vc.lock must be held by caller */
+static struct milbeaut_hdmac_desc *
+milbeaut_hdmac_next_desc(struct milbeaut_hdmac_chan *mc)
+{
+ struct virt_dma_desc *vd;
+
+ vd = vchan_next_desc(&mc->vc);
+ if (!vd) {
+ mc->md = NULL;
+ return NULL;
+ }
+
+ list_del(&vd->node);
+
+ mc->md = to_milbeaut_hdmac_desc(vd);
+
+ return mc->md;
+}
+
+/* mc->vc.lock must be held by caller */
+static void milbeaut_chan_start(struct milbeaut_hdmac_chan *mc,
+ struct milbeaut_hdmac_desc *md)
+{
+ struct scatterlist *sg;
+ u32 cb, ca, src_addr, dest_addr, len;
+ u32 width, burst;
+
+ sg = &md->sgl[md->sg_cur];
+ len = sg_dma_len(sg);
+
+ cb = MLB_HDMAC_CI | MLB_HDMAC_EI;
+ if (md->dir == DMA_MEM_TO_DEV) {
+ cb |= MLB_HDMAC_FD;
+ width = mc->cfg.dst_addr_width;
+ burst = mc->cfg.dst_maxburst;
+ src_addr = sg_dma_address(sg);
+ dest_addr = mc->cfg.dst_addr;
+ } else {
+ cb |= MLB_HDMAC_FS;
+ width = mc->cfg.src_addr_width;
+ burst = mc->cfg.src_maxburst;
+ src_addr = mc->cfg.src_addr;
+ dest_addr = sg_dma_address(sg);
+ }
+ cb |= FIELD_PREP(MLB_HDMAC_TW, (width >> 1));
+ cb |= FIELD_PREP(MLB_HDMAC_MS, 2);
+
+ writel_relaxed(MLB_HDMAC_DE, mc->mdev->reg_base + MLB_HDMAC_DMACR);
+ writel_relaxed(src_addr, mc->reg_ch_base + MLB_HDMAC_DMACSA);
+ writel_relaxed(dest_addr, mc->reg_ch_base + MLB_HDMAC_DMACDA);
+ writel_relaxed(cb, mc->reg_ch_base + MLB_HDMAC_DMACB);
+
+ ca = FIELD_PREP(MLB_HDMAC_IS, mc->slave_id);
+ if (burst == 16)
+ ca |= FIELD_PREP(MLB_HDMAC_BT, 0xf);
+ else if (burst == 8)
+ ca |= FIELD_PREP(MLB_HDMAC_BT, 0xd);
+ else if (burst == 4)
+ ca |= FIELD_PREP(MLB_HDMAC_BT, 0xb);
+ burst *= width;
+ ca |= FIELD_PREP(MLB_HDMAC_TC, (len / burst - 1));
+ writel_relaxed(ca, mc->reg_ch_base + MLB_HDMAC_DMACA);
+ ca |= MLB_HDMAC_EB;
+ writel_relaxed(ca, mc->reg_ch_base + MLB_HDMAC_DMACA);
+}
+
+/* mc->vc.lock must be held by caller */
+static void milbeaut_hdmac_start(struct milbeaut_hdmac_chan *mc)
+{
+ struct milbeaut_hdmac_desc *md;
+
+ md = milbeaut_hdmac_next_desc(mc);
+ if (md)
+ milbeaut_chan_start(mc, md);
+}
+
+static irqreturn_t milbeaut_hdmac_interrupt(int irq, void *dev_id)
+{
+ struct milbeaut_hdmac_chan *mc = dev_id;
+ struct milbeaut_hdmac_desc *md;
+ u32 val;
+
+ spin_lock(&mc->vc.lock);
+
+ /* Ack and Disable irqs */
+ val = readl_relaxed(mc->reg_ch_base + MLB_HDMAC_DMACB);
+ val &= ~(FIELD_PREP(MLB_HDMAC_SS, HDMAC_PAUSE));
+ writel_relaxed(val, mc->reg_ch_base + MLB_HDMAC_DMACB);
+ val &= ~MLB_HDMAC_EI;
+ val &= ~MLB_HDMAC_CI;
+ writel_relaxed(val, mc->reg_ch_base + MLB_HDMAC_DMACB);
+
+ md = mc->md;
+ if (!md)
+ goto out;
+
+ md->sg_cur++;
+
+ if (md->sg_cur >= md->sg_len) {
+ vchan_cookie_complete(&md->vd);
+ md = milbeaut_hdmac_next_desc(mc);
+ if (!md)
+ goto out;
+ }
+
+ milbeaut_chan_start(mc, md);
+
+out:
+ spin_unlock(&mc->vc.lock);
+ return IRQ_HANDLED;
+}
+
+static void milbeaut_hdmac_free_chan_resources(struct dma_chan *chan)
+{
+ vchan_free_chan_resources(to_virt_chan(chan));
+}
+
+static int
+milbeaut_hdmac_chan_config(struct dma_chan *chan, struct dma_slave_config *cfg)
+{
+ struct virt_dma_chan *vc = to_virt_chan(chan);
+ struct milbeaut_hdmac_chan *mc = to_milbeaut_hdmac_chan(vc);
+
+ spin_lock(&mc->vc.lock);
+ mc->cfg = *cfg;
+ spin_unlock(&mc->vc.lock);
+
+ return 0;
+}
+
+static int milbeaut_hdmac_chan_pause(struct dma_chan *chan)
+{
+ struct virt_dma_chan *vc = to_virt_chan(chan);
+ struct milbeaut_hdmac_chan *mc = to_milbeaut_hdmac_chan(vc);
+ u32 val;
+
+ spin_lock(&mc->vc.lock);
+ val = readl_relaxed(mc->reg_ch_base + MLB_HDMAC_DMACA);
+ val |= MLB_HDMAC_PB;
+ writel_relaxed(val, mc->reg_ch_base + MLB_HDMAC_DMACA);
+ spin_unlock(&mc->vc.lock);
+
+ return 0;
+}
+
+static int milbeaut_hdmac_chan_resume(struct dma_chan *chan)
+{
+ struct virt_dma_chan *vc = to_virt_chan(chan);
+ struct milbeaut_hdmac_chan *mc = to_milbeaut_hdmac_chan(vc);
+ u32 val;
+
+ spin_lock(&mc->vc.lock);
+ val = readl_relaxed(mc->reg_ch_base + MLB_HDMAC_DMACA);
+ val &= ~MLB_HDMAC_PB;
+ writel_relaxed(val, mc->reg_ch_base + MLB_HDMAC_DMACA);
+ spin_unlock(&mc->vc.lock);
+
+ return 0;
+}
+
+static struct dma_async_tx_descriptor *
+milbeaut_hdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+ unsigned int sg_len,
+ enum dma_transfer_direction direction,
+ unsigned long flags, void *context)
+{
+ struct virt_dma_chan *vc = to_virt_chan(chan);
+ struct milbeaut_hdmac_desc *md;
+ int i;
+
+ if (!is_slave_direction(direction))
+ return NULL;
+
+ md = kzalloc(sizeof(*md), GFP_NOWAIT);
+ if (!md)
+ return NULL;
+
+ md->sgl = kzalloc(sizeof(*sgl) * sg_len, GFP_NOWAIT);
+ if (!md->sgl) {
+ kfree(md);
+ return NULL;
+ }
+
+ for (i = 0; i < sg_len; i++)
+ md->sgl[i] = sgl[i];
+
+ md->sg_len = sg_len;
+ md->dir = direction;
+
+ return vchan_tx_prep(vc, &md->vd, flags);
+}
+
+static int milbeaut_hdmac_terminate_all(struct dma_chan *chan)
+{
+ struct virt_dma_chan *vc = to_virt_chan(chan);
+ struct milbeaut_hdmac_chan *mc = to_milbeaut_hdmac_chan(vc);
+ unsigned long flags;
+ u32 val;
+
+ LIST_HEAD(head);
+
+ spin_lock_irqsave(&vc->lock, flags);
+
+ val = readl_relaxed(mc->reg_ch_base + MLB_HDMAC_DMACA);
+ val &= ~MLB_HDMAC_EB; /* disable the channel */
+ writel_relaxed(val, mc->reg_ch_base + MLB_HDMAC_DMACA);
+
+ if (mc->md) {
+ vchan_terminate_vdesc(&mc->md->vd);
+ mc->md = NULL;
+ }
+
+ vchan_get_all_descriptors(vc, &head);
+
+ spin_unlock_irqrestore(&vc->lock, flags);
+
+ vchan_dma_desc_free_list(vc, &head);
+
+ return 0;
+}
+
+static void milbeaut_hdmac_synchronize(struct dma_chan *chan)
+{
+ vchan_synchronize(to_virt_chan(chan));
+}
+
+static enum dma_status milbeaut_hdmac_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ struct virt_dma_chan *vc;
+ struct virt_dma_desc *vd;
+ struct milbeaut_hdmac_chan *mc;
+ struct milbeaut_hdmac_desc *md = NULL;
+ enum dma_status stat;
+ unsigned long flags;
+ int i;
+
+ stat = dma_cookie_status(chan, cookie, txstate);
+ /* Return immediately if we do not need to compute the residue. */
+ if (stat == DMA_COMPLETE || !txstate)
+ return stat;
+
+ vc = to_virt_chan(chan);
+
+ spin_lock_irqsave(&vc->lock, flags);
+
+ mc = to_milbeaut_hdmac_chan(vc);
+
+ /* residue from the on-flight chunk */
+ if (mc->md && mc->md->vd.tx.cookie == cookie) {
+ struct scatterlist *sg;
+ u32 done;
+
+ md = mc->md;
+ sg = &md->sgl[md->sg_cur];
+
+ if (md->dir == DMA_DEV_TO_MEM)
+ done = readl_relaxed(mc->reg_ch_base
+ + MLB_HDMAC_DMACDA);
+ else
+ done = readl_relaxed(mc->reg_ch_base
+ + MLB_HDMAC_DMACSA);
+ done -= sg_dma_address(sg);
+
+ txstate->residue = -done;
+ }
+
+ if (!md) {
+ vd = vchan_find_desc(vc, cookie);
+ if (vd)
+ md = to_milbeaut_hdmac_desc(vd);
+ }
+
+ if (md) {
+ /* residue from the queued chunks */
+ for (i = md->sg_cur; i < md->sg_len; i++)
+ txstate->residue += sg_dma_len(&md->sgl[i]);
+ }
+
+ spin_unlock_irqrestore(&vc->lock, flags);
+
+ return stat;
+}
+
+static void milbeaut_hdmac_issue_pending(struct dma_chan *chan)
+{
+ struct virt_dma_chan *vc = to_virt_chan(chan);
+ struct milbeaut_hdmac_chan *mc = to_milbeaut_hdmac_chan(vc);
+ unsigned long flags;
+
+ spin_lock_irqsave(&vc->lock, flags);
+
+ if (vchan_issue_pending(vc) && !mc->md)
+ milbeaut_hdmac_start(mc);
+
+ spin_unlock_irqrestore(&vc->lock, flags);
+}
+
+static void milbeaut_hdmac_desc_free(struct virt_dma_desc *vd)
+{
+ struct milbeaut_hdmac_desc *md = to_milbeaut_hdmac_desc(vd);
+
+ kfree(md->sgl);
+ kfree(md);
+}
+
+static struct dma_chan *
+milbeaut_hdmac_xlate(struct of_phandle_args *dma_spec, struct of_dma *of_dma)
+{
+ struct milbeaut_hdmac_device *mdev = of_dma->of_dma_data;
+ struct milbeaut_hdmac_chan *mc;
+ struct virt_dma_chan *vc;
+ struct dma_chan *chan;
+
+ if (dma_spec->args_count != 1)
+ return NULL;
+
+ chan = dma_get_any_slave_channel(&mdev->ddev);
+ if (!chan)
+ return NULL;
+
+ vc = to_virt_chan(chan);
+ mc = to_milbeaut_hdmac_chan(vc);
+ mc->slave_id = dma_spec->args[0];
+
+ return chan;
+}
+
+static int milbeaut_hdmac_chan_init(struct platform_device *pdev,
+ struct milbeaut_hdmac_device *mdev,
+ int chan_id)
+{
+ struct device *dev = &pdev->dev;
+ struct milbeaut_hdmac_chan *mc = &mdev->channels[chan_id];
+ char *irq_name;
+ int irq, ret;
+
+ irq = platform_get_irq(pdev, chan_id);
+ if (irq < 0)
+ return irq;
+
+ irq_name = devm_kasprintf(dev, GFP_KERNEL, "milbeaut-hdmac-%d",
+ chan_id);
+ if (!irq_name)
+ return -ENOMEM;
+
+ ret = devm_request_irq(dev, irq, milbeaut_hdmac_interrupt,
+ IRQF_SHARED, irq_name, mc);
+ if (ret)
+ return ret;
+
+ mc->mdev = mdev;
+ mc->reg_ch_base = mdev->reg_base + MLB_HDMAC_CH_STRIDE * (chan_id + 1);
+ mc->vc.desc_free = milbeaut_hdmac_desc_free;
+ vchan_init(&mc->vc, &mdev->ddev);
+
+ return 0;
+}
+
+static int milbeaut_hdmac_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct milbeaut_hdmac_device *mdev;
+ struct dma_device *ddev;
+ int nr_chans, ret, i;
+
+ nr_chans = platform_irq_count(pdev);
+ if (nr_chans < 0)
+ return nr_chans;
+
+ ret = dma_set_mask(dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
+
+ mdev = devm_kzalloc(dev, struct_size(mdev, channels, nr_chans),
+ GFP_KERNEL);
+ if (!mdev)
+ return -ENOMEM;
+
+ mdev->reg_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(mdev->reg_base))
+ return PTR_ERR(mdev->reg_base);
+
+ mdev->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(mdev->clk)) {
+ dev_err(dev, "failed to get clock\n");
+ return PTR_ERR(mdev->clk);
+ }
+
+ ret = clk_prepare_enable(mdev->clk);
+ if (ret)
+ return ret;
+
+ ddev = &mdev->ddev;
+ ddev->dev = dev;
+ dma_cap_set(DMA_SLAVE, ddev->cap_mask);
+ dma_cap_set(DMA_PRIVATE, ddev->cap_mask);
+ ddev->src_addr_widths = MLB_HDMAC_BUSWIDTHS;
+ ddev->dst_addr_widths = MLB_HDMAC_BUSWIDTHS;
+ ddev->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
+ ddev->device_free_chan_resources = milbeaut_hdmac_free_chan_resources;
+ ddev->device_config = milbeaut_hdmac_chan_config;
+ ddev->device_pause = milbeaut_hdmac_chan_pause;
+ ddev->device_resume = milbeaut_hdmac_chan_resume;
+ ddev->device_prep_slave_sg = milbeaut_hdmac_prep_slave_sg;
+ ddev->device_terminate_all = milbeaut_hdmac_terminate_all;
+ ddev->device_synchronize = milbeaut_hdmac_synchronize;
+ ddev->device_tx_status = milbeaut_hdmac_tx_status;
+ ddev->device_issue_pending = milbeaut_hdmac_issue_pending;
+ INIT_LIST_HEAD(&ddev->channels);
+
+ for (i = 0; i < nr_chans; i++) {
+ ret = milbeaut_hdmac_chan_init(pdev, mdev, i);
+ if (ret)
+ goto disable_clk;
+ }
+
+ ret = dma_async_device_register(ddev);
+ if (ret)
+ goto disable_clk;
+
+ ret = of_dma_controller_register(dev->of_node,
+ milbeaut_hdmac_xlate, mdev);
+ if (ret)
+ goto unregister_dmac;
+
+ platform_set_drvdata(pdev, mdev);
+
+ return 0;
+
+unregister_dmac:
+ dma_async_device_unregister(ddev);
+disable_clk:
+ clk_disable_unprepare(mdev->clk);
+
+ return ret;
+}
+
+static int milbeaut_hdmac_remove(struct platform_device *pdev)
+{
+ struct milbeaut_hdmac_device *mdev = platform_get_drvdata(pdev);
+ struct dma_chan *chan;
+ int ret;
+
+ /*
+ * Before reaching here, almost all descriptors have been freed by the
+ * ->device_free_chan_resources() hook. However, each channel might
+ * be still holding one descriptor that was on-flight at that moment.
+ * Terminate it to make sure this hardware is no longer running. Then,
+ * free the channel resources once again to avoid memory leak.
+ */
+ list_for_each_entry(chan, &mdev->ddev.channels, device_node) {
+ ret = dmaengine_terminate_sync(chan);
+ if (ret)
+ return ret;
+ milbeaut_hdmac_free_chan_resources(chan);
+ }
+
+ of_dma_controller_free(pdev->dev.of_node);
+ dma_async_device_unregister(&mdev->ddev);
+ clk_disable_unprepare(mdev->clk);
+
+ return 0;
+}
+
+static const struct of_device_id milbeaut_hdmac_match[] = {
+ { .compatible = "socionext,milbeaut-m10v-hdmac" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, milbeaut_hdmac_match);
+
+static struct platform_driver milbeaut_hdmac_driver = {
+ .probe = milbeaut_hdmac_probe,
+ .remove = milbeaut_hdmac_remove,
+ .driver = {
+ .name = "milbeaut-m10v-hdmac",
+ .of_match_table = milbeaut_hdmac_match,
+ },
+};
+module_platform_driver(milbeaut_hdmac_driver);
+
+MODULE_DESCRIPTION("Milbeaut HDMAC DmaEngine driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/milbeaut-xdmac.c b/drivers/dma/milbeaut-xdmac.c
new file mode 100644
index 000000000000..ab3d2f395378
--- /dev/null
+++ b/drivers/dma/milbeaut-xdmac.c
@@ -0,0 +1,415 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (C) 2019 Linaro Ltd.
+// Copyright (C) 2019 Socionext Inc.
+
+#include <linux/bits.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of_dma.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/bitfield.h>
+
+#include "virt-dma.h"
+
+/* global register */
+#define M10V_XDACS 0x00
+
+/* channel local register */
+#define M10V_XDTBC 0x10
+#define M10V_XDSSA 0x14
+#define M10V_XDDSA 0x18
+#define M10V_XDSAC 0x1C
+#define M10V_XDDAC 0x20
+#define M10V_XDDCC 0x24
+#define M10V_XDDES 0x28
+#define M10V_XDDPC 0x2C
+#define M10V_XDDSD 0x30
+
+#define M10V_XDACS_XE BIT(28)
+
+#define M10V_DEFBS 0x3
+#define M10V_DEFBL 0xf
+
+#define M10V_XDSAC_SBS GENMASK(17, 16)
+#define M10V_XDSAC_SBL GENMASK(11, 8)
+
+#define M10V_XDDAC_DBS GENMASK(17, 16)
+#define M10V_XDDAC_DBL GENMASK(11, 8)
+
+#define M10V_XDDES_CE BIT(28)
+#define M10V_XDDES_SE BIT(24)
+#define M10V_XDDES_SA BIT(15)
+#define M10V_XDDES_TF GENMASK(23, 20)
+#define M10V_XDDES_EI BIT(1)
+#define M10V_XDDES_TI BIT(0)
+
+#define M10V_XDDSD_IS_MASK GENMASK(3, 0)
+#define M10V_XDDSD_IS_NORMAL 0x8
+
+#define MLB_XDMAC_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
+ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
+ BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
+
+struct milbeaut_xdmac_desc {
+ struct virt_dma_desc vd;
+ size_t len;
+ dma_addr_t src;
+ dma_addr_t dst;
+};
+
+struct milbeaut_xdmac_chan {
+ struct virt_dma_chan vc;
+ struct milbeaut_xdmac_desc *md;
+ void __iomem *reg_ch_base;
+};
+
+struct milbeaut_xdmac_device {
+ struct dma_device ddev;
+ void __iomem *reg_base;
+ struct milbeaut_xdmac_chan channels[0];
+};
+
+static struct milbeaut_xdmac_chan *
+to_milbeaut_xdmac_chan(struct virt_dma_chan *vc)
+{
+ return container_of(vc, struct milbeaut_xdmac_chan, vc);
+}
+
+static struct milbeaut_xdmac_desc *
+to_milbeaut_xdmac_desc(struct virt_dma_desc *vd)
+{
+ return container_of(vd, struct milbeaut_xdmac_desc, vd);
+}
+
+/* mc->vc.lock must be held by caller */
+static struct milbeaut_xdmac_desc *
+milbeaut_xdmac_next_desc(struct milbeaut_xdmac_chan *mc)
+{
+ struct virt_dma_desc *vd;
+
+ vd = vchan_next_desc(&mc->vc);
+ if (!vd) {
+ mc->md = NULL;
+ return NULL;
+ }
+
+ list_del(&vd->node);
+
+ mc->md = to_milbeaut_xdmac_desc(vd);
+
+ return mc->md;
+}
+
+/* mc->vc.lock must be held by caller */
+static void milbeaut_chan_start(struct milbeaut_xdmac_chan *mc,
+ struct milbeaut_xdmac_desc *md)
+{
+ u32 val;
+
+ /* Setup the channel */
+ val = md->len - 1;
+ writel_relaxed(val, mc->reg_ch_base + M10V_XDTBC);
+
+ val = md->src;
+ writel_relaxed(val, mc->reg_ch_base + M10V_XDSSA);
+
+ val = md->dst;
+ writel_relaxed(val, mc->reg_ch_base + M10V_XDDSA);
+
+ val = readl_relaxed(mc->reg_ch_base + M10V_XDSAC);
+ val &= ~(M10V_XDSAC_SBS | M10V_XDSAC_SBL);
+ val |= FIELD_PREP(M10V_XDSAC_SBS, M10V_DEFBS) |
+ FIELD_PREP(M10V_XDSAC_SBL, M10V_DEFBL);
+ writel_relaxed(val, mc->reg_ch_base + M10V_XDSAC);
+
+ val = readl_relaxed(mc->reg_ch_base + M10V_XDDAC);
+ val &= ~(M10V_XDDAC_DBS | M10V_XDDAC_DBL);
+ val |= FIELD_PREP(M10V_XDDAC_DBS, M10V_DEFBS) |
+ FIELD_PREP(M10V_XDDAC_DBL, M10V_DEFBL);
+ writel_relaxed(val, mc->reg_ch_base + M10V_XDDAC);
+
+ /* Start the channel */
+ val = readl_relaxed(mc->reg_ch_base + M10V_XDDES);
+ val &= ~(M10V_XDDES_CE | M10V_XDDES_SE | M10V_XDDES_TF |
+ M10V_XDDES_EI | M10V_XDDES_TI);
+ val |= FIELD_PREP(M10V_XDDES_CE, 1) | FIELD_PREP(M10V_XDDES_SE, 1) |
+ FIELD_PREP(M10V_XDDES_TF, 1) | FIELD_PREP(M10V_XDDES_EI, 1) |
+ FIELD_PREP(M10V_XDDES_TI, 1);
+ writel_relaxed(val, mc->reg_ch_base + M10V_XDDES);
+}
+
+/* mc->vc.lock must be held by caller */
+static void milbeaut_xdmac_start(struct milbeaut_xdmac_chan *mc)
+{
+ struct milbeaut_xdmac_desc *md;
+
+ md = milbeaut_xdmac_next_desc(mc);
+ if (md)
+ milbeaut_chan_start(mc, md);
+}
+
+static irqreturn_t milbeaut_xdmac_interrupt(int irq, void *dev_id)
+{
+ struct milbeaut_xdmac_chan *mc = dev_id;
+ struct milbeaut_xdmac_desc *md;
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(&mc->vc.lock, flags);
+
+ /* Ack and Stop */
+ val = FIELD_PREP(M10V_XDDSD_IS_MASK, 0x0);
+ writel_relaxed(val, mc->reg_ch_base + M10V_XDDSD);
+
+ md = mc->md;
+ if (!md)
+ goto out;
+
+ vchan_cookie_complete(&md->vd);
+
+ milbeaut_xdmac_start(mc);
+out:
+ spin_unlock_irqrestore(&mc->vc.lock, flags);
+ return IRQ_HANDLED;
+}
+
+static void milbeaut_xdmac_free_chan_resources(struct dma_chan *chan)
+{
+ vchan_free_chan_resources(to_virt_chan(chan));
+}
+
+static struct dma_async_tx_descriptor *
+milbeaut_xdmac_prep_memcpy(struct dma_chan *chan, dma_addr_t dst,
+ dma_addr_t src, size_t len, unsigned long flags)
+{
+ struct virt_dma_chan *vc = to_virt_chan(chan);
+ struct milbeaut_xdmac_desc *md;
+
+ md = kzalloc(sizeof(*md), GFP_NOWAIT);
+ if (!md)
+ return NULL;
+
+ md->len = len;
+ md->src = src;
+ md->dst = dst;
+
+ return vchan_tx_prep(vc, &md->vd, flags);
+}
+
+static int milbeaut_xdmac_terminate_all(struct dma_chan *chan)
+{
+ struct virt_dma_chan *vc = to_virt_chan(chan);
+ struct milbeaut_xdmac_chan *mc = to_milbeaut_xdmac_chan(vc);
+ unsigned long flags;
+ u32 val;
+
+ LIST_HEAD(head);
+
+ spin_lock_irqsave(&vc->lock, flags);
+
+ /* Halt the channel */
+ val = readl(mc->reg_ch_base + M10V_XDDES);
+ val &= ~M10V_XDDES_CE;
+ val |= FIELD_PREP(M10V_XDDES_CE, 0);
+ writel(val, mc->reg_ch_base + M10V_XDDES);
+
+ if (mc->md) {
+ vchan_terminate_vdesc(&mc->md->vd);
+ mc->md = NULL;
+ }
+
+ vchan_get_all_descriptors(vc, &head);
+
+ spin_unlock_irqrestore(&vc->lock, flags);
+
+ vchan_dma_desc_free_list(vc, &head);
+
+ return 0;
+}
+
+static void milbeaut_xdmac_synchronize(struct dma_chan *chan)
+{
+ vchan_synchronize(to_virt_chan(chan));
+}
+
+static void milbeaut_xdmac_issue_pending(struct dma_chan *chan)
+{
+ struct virt_dma_chan *vc = to_virt_chan(chan);
+ struct milbeaut_xdmac_chan *mc = to_milbeaut_xdmac_chan(vc);
+ unsigned long flags;
+
+ spin_lock_irqsave(&vc->lock, flags);
+
+ if (vchan_issue_pending(vc) && !mc->md)
+ milbeaut_xdmac_start(mc);
+
+ spin_unlock_irqrestore(&vc->lock, flags);
+}
+
+static void milbeaut_xdmac_desc_free(struct virt_dma_desc *vd)
+{
+ kfree(to_milbeaut_xdmac_desc(vd));
+}
+
+static int milbeaut_xdmac_chan_init(struct platform_device *pdev,
+ struct milbeaut_xdmac_device *mdev,
+ int chan_id)
+{
+ struct device *dev = &pdev->dev;
+ struct milbeaut_xdmac_chan *mc = &mdev->channels[chan_id];
+ char *irq_name;
+ int irq, ret;
+
+ irq = platform_get_irq(pdev, chan_id);
+ if (irq < 0)
+ return irq;
+
+ irq_name = devm_kasprintf(dev, GFP_KERNEL, "milbeaut-xdmac-%d",
+ chan_id);
+ if (!irq_name)
+ return -ENOMEM;
+
+ ret = devm_request_irq(dev, irq, milbeaut_xdmac_interrupt,
+ IRQF_SHARED, irq_name, mc);
+ if (ret)
+ return ret;
+
+ mc->reg_ch_base = mdev->reg_base + chan_id * 0x30;
+
+ mc->vc.desc_free = milbeaut_xdmac_desc_free;
+ vchan_init(&mc->vc, &mdev->ddev);
+
+ return 0;
+}
+
+static void enable_xdmac(struct milbeaut_xdmac_device *mdev)
+{
+ unsigned int val;
+
+ val = readl(mdev->reg_base + M10V_XDACS);
+ val |= M10V_XDACS_XE;
+ writel(val, mdev->reg_base + M10V_XDACS);
+}
+
+static void disable_xdmac(struct milbeaut_xdmac_device *mdev)
+{
+ unsigned int val;
+
+ val = readl(mdev->reg_base + M10V_XDACS);
+ val &= ~M10V_XDACS_XE;
+ writel(val, mdev->reg_base + M10V_XDACS);
+}
+
+static int milbeaut_xdmac_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct milbeaut_xdmac_device *mdev;
+ struct dma_device *ddev;
+ int nr_chans, ret, i;
+
+ nr_chans = platform_irq_count(pdev);
+ if (nr_chans < 0)
+ return nr_chans;
+
+ mdev = devm_kzalloc(dev, struct_size(mdev, channels, nr_chans),
+ GFP_KERNEL);
+ if (!mdev)
+ return -ENOMEM;
+
+ mdev->reg_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(mdev->reg_base))
+ return PTR_ERR(mdev->reg_base);
+
+ ddev = &mdev->ddev;
+ ddev->dev = dev;
+ dma_cap_set(DMA_MEMCPY, ddev->cap_mask);
+ ddev->src_addr_widths = MLB_XDMAC_BUSWIDTHS;
+ ddev->dst_addr_widths = MLB_XDMAC_BUSWIDTHS;
+ ddev->device_free_chan_resources = milbeaut_xdmac_free_chan_resources;
+ ddev->device_prep_dma_memcpy = milbeaut_xdmac_prep_memcpy;
+ ddev->device_terminate_all = milbeaut_xdmac_terminate_all;
+ ddev->device_synchronize = milbeaut_xdmac_synchronize;
+ ddev->device_tx_status = dma_cookie_status;
+ ddev->device_issue_pending = milbeaut_xdmac_issue_pending;
+ INIT_LIST_HEAD(&ddev->channels);
+
+ for (i = 0; i < nr_chans; i++) {
+ ret = milbeaut_xdmac_chan_init(pdev, mdev, i);
+ if (ret)
+ return ret;
+ }
+
+ enable_xdmac(mdev);
+
+ ret = dma_async_device_register(ddev);
+ if (ret)
+ return ret;
+
+ ret = of_dma_controller_register(dev->of_node,
+ of_dma_simple_xlate, mdev);
+ if (ret)
+ goto unregister_dmac;
+
+ platform_set_drvdata(pdev, mdev);
+
+ return 0;
+
+unregister_dmac:
+ dma_async_device_unregister(ddev);
+ return ret;
+}
+
+static int milbeaut_xdmac_remove(struct platform_device *pdev)
+{
+ struct milbeaut_xdmac_device *mdev = platform_get_drvdata(pdev);
+ struct dma_chan *chan;
+ int ret;
+
+ /*
+ * Before reaching here, almost all descriptors have been freed by the
+ * ->device_free_chan_resources() hook. However, each channel might
+ * be still holding one descriptor that was on-flight at that moment.
+ * Terminate it to make sure this hardware is no longer running. Then,
+ * free the channel resources once again to avoid memory leak.
+ */
+ list_for_each_entry(chan, &mdev->ddev.channels, device_node) {
+ ret = dmaengine_terminate_sync(chan);
+ if (ret)
+ return ret;
+ milbeaut_xdmac_free_chan_resources(chan);
+ }
+
+ of_dma_controller_free(pdev->dev.of_node);
+ dma_async_device_unregister(&mdev->ddev);
+
+ disable_xdmac(mdev);
+
+ return 0;
+}
+
+static const struct of_device_id milbeaut_xdmac_match[] = {
+ { .compatible = "socionext,milbeaut-m10v-xdmac" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, milbeaut_xdmac_match);
+
+static struct platform_driver milbeaut_xdmac_driver = {
+ .probe = milbeaut_xdmac_probe,
+ .remove = milbeaut_xdmac_remove,
+ .driver = {
+ .name = "milbeaut-m10v-xdmac",
+ .of_match_table = milbeaut_xdmac_match,
+ },
+};
+module_platform_driver(milbeaut_xdmac_driver);
+
+MODULE_DESCRIPTION("Milbeaut XDMAC DmaEngine driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c
index 7fe494fc50d4..ad06f260e907 100644
--- a/drivers/dma/mmp_pdma.c
+++ b/drivers/dma/mmp_pdma.c
@@ -945,6 +945,8 @@ static int mmp_pdma_remove(struct platform_device *op)
struct mmp_pdma_phy *phy;
int i, irq = 0, irq_num = 0;
+ if (op->dev.of_node)
+ of_dma_controller_free(op->dev.of_node);
for (i = 0; i < pdev->dma_channels; i++) {
if (platform_get_irq(op, i) > 0)
diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c
index e7d1e12bf464..10117f271b12 100644
--- a/drivers/dma/mmp_tdma.c
+++ b/drivers/dma/mmp_tdma.c
@@ -544,6 +544,9 @@ static void mmp_tdma_issue_pending(struct dma_chan *chan)
static int mmp_tdma_remove(struct platform_device *pdev)
{
+ if (pdev->dev.of_node)
+ of_dma_controller_free(pdev->dev.of_node);
+
return 0;
}
diff --git a/drivers/dma/owl-dma.c b/drivers/dma/owl-dma.c
index 90bbcef99ef8..023f951189a7 100644
--- a/drivers/dma/owl-dma.c
+++ b/drivers/dma/owl-dma.c
@@ -1045,18 +1045,13 @@ static int owl_dma_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct owl_dma *od;
- struct resource *res;
int ret, i, nr_channels, nr_requests;
od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL);
if (!od)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -EINVAL;
-
- od->base = devm_ioremap_resource(&pdev->dev, res);
+ od->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(od->base))
return PTR_ERR(od->base);
diff --git a/drivers/dma/sf-pdma/Kconfig b/drivers/dma/sf-pdma/Kconfig
new file mode 100644
index 000000000000..f8ffa02e279f
--- /dev/null
+++ b/drivers/dma/sf-pdma/Kconfig
@@ -0,0 +1,6 @@
+config SF_PDMA
+ tristate "Sifive PDMA controller driver"
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ help
+ Support the SiFive PDMA controller.
diff --git a/drivers/dma/sf-pdma/Makefile b/drivers/dma/sf-pdma/Makefile
new file mode 100644
index 000000000000..764552ab8d0a
--- /dev/null
+++ b/drivers/dma/sf-pdma/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_SF_PDMA) += sf-pdma.o
diff --git a/drivers/dma/sf-pdma/sf-pdma.c b/drivers/dma/sf-pdma/sf-pdma.c
new file mode 100644
index 000000000000..465256fe8b1f
--- /dev/null
+++ b/drivers/dma/sf-pdma/sf-pdma.c
@@ -0,0 +1,620 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * SiFive FU540 Platform DMA driver
+ * Copyright (C) 2019 SiFive
+ *
+ * Based partially on:
+ * - drivers/dma/fsl-edma.c
+ * - drivers/dma/dw-edma/
+ * - drivers/dma/pxa-dma.c
+ *
+ * See the following sources for further documentation:
+ * - Chapter 12 "Platform DMA Engine (PDMA)" of
+ * SiFive FU540-C000 v1.0
+ * https://static.dev.sifive.com/FU540-C000-v1.0.pdf
+ */
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/mod_devicetable.h>
+#include <linux/dma-mapping.h>
+#include <linux/of.h>
+
+#include "sf-pdma.h"
+
+#ifndef readq
+static inline unsigned long long readq(void __iomem *addr)
+{
+ return readl(addr) | (((unsigned long long)readl(addr + 4)) << 32LL);
+}
+#endif
+
+#ifndef writeq
+static inline void writeq(unsigned long long v, void __iomem *addr)
+{
+ writel(lower_32_bits(v), addr);
+ writel(upper_32_bits(v), addr + 4);
+}
+#endif
+
+static inline struct sf_pdma_chan *to_sf_pdma_chan(struct dma_chan *dchan)
+{
+ return container_of(dchan, struct sf_pdma_chan, vchan.chan);
+}
+
+static inline struct sf_pdma_desc *to_sf_pdma_desc(struct virt_dma_desc *vd)
+{
+ return container_of(vd, struct sf_pdma_desc, vdesc);
+}
+
+static struct sf_pdma_desc *sf_pdma_alloc_desc(struct sf_pdma_chan *chan)
+{
+ struct sf_pdma_desc *desc;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->lock, flags);
+
+ if (chan->desc && !chan->desc->in_use) {
+ spin_unlock_irqrestore(&chan->lock, flags);
+ return chan->desc;
+ }
+
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
+ if (!desc)
+ return NULL;
+
+ desc->chan = chan;
+
+ return desc;
+}
+
+static void sf_pdma_fill_desc(struct sf_pdma_desc *desc,
+ u64 dst, u64 src, u64 size)
+{
+ desc->xfer_type = PDMA_FULL_SPEED;
+ desc->xfer_size = size;
+ desc->dst_addr = dst;
+ desc->src_addr = src;
+}
+
+static void sf_pdma_disclaim_chan(struct sf_pdma_chan *chan)
+{
+ struct pdma_regs *regs = &chan->regs;
+
+ writel(PDMA_CLEAR_CTRL, regs->ctrl);
+}
+
+static struct dma_async_tx_descriptor *
+sf_pdma_prep_dma_memcpy(struct dma_chan *dchan, dma_addr_t dest, dma_addr_t src,
+ size_t len, unsigned long flags)
+{
+ struct sf_pdma_chan *chan = to_sf_pdma_chan(dchan);
+ struct sf_pdma_desc *desc;
+
+ if (chan && (!len || !dest || !src)) {
+ dev_err(chan->pdma->dma_dev.dev,
+ "Please check dma len, dest, src!\n");
+ return NULL;
+ }
+
+ desc = sf_pdma_alloc_desc(chan);
+ if (!desc)
+ return NULL;
+
+ desc->in_use = true;
+ desc->dirn = DMA_MEM_TO_MEM;
+ desc->async_tx = vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
+
+ spin_lock_irqsave(&chan->vchan.lock, flags);
+ chan->desc = desc;
+ sf_pdma_fill_desc(desc, dest, src, len);
+ spin_unlock_irqrestore(&chan->vchan.lock, flags);
+
+ return desc->async_tx;
+}
+
+static int sf_pdma_slave_config(struct dma_chan *dchan,
+ struct dma_slave_config *cfg)
+{
+ struct sf_pdma_chan *chan = to_sf_pdma_chan(dchan);
+
+ memcpy(&chan->cfg, cfg, sizeof(*cfg));
+
+ return 0;
+}
+
+static int sf_pdma_alloc_chan_resources(struct dma_chan *dchan)
+{
+ struct sf_pdma_chan *chan = to_sf_pdma_chan(dchan);
+ struct pdma_regs *regs = &chan->regs;
+
+ dma_cookie_init(dchan);
+ writel(PDMA_CLAIM_MASK, regs->ctrl);
+
+ return 0;
+}
+
+static void sf_pdma_disable_request(struct sf_pdma_chan *chan)
+{
+ struct pdma_regs *regs = &chan->regs;
+
+ writel(readl(regs->ctrl) & ~PDMA_RUN_MASK, regs->ctrl);
+}
+
+static void sf_pdma_free_chan_resources(struct dma_chan *dchan)
+{
+ struct sf_pdma_chan *chan = to_sf_pdma_chan(dchan);
+ unsigned long flags;
+ LIST_HEAD(head);
+
+ spin_lock_irqsave(&chan->vchan.lock, flags);
+ sf_pdma_disable_request(chan);
+ kfree(chan->desc);
+ chan->desc = NULL;
+ vchan_get_all_descriptors(&chan->vchan, &head);
+ vchan_dma_desc_free_list(&chan->vchan, &head);
+ sf_pdma_disclaim_chan(chan);
+ spin_unlock_irqrestore(&chan->vchan.lock, flags);
+}
+
+static size_t sf_pdma_desc_residue(struct sf_pdma_chan *chan,
+ dma_cookie_t cookie)
+{
+ struct virt_dma_desc *vd = NULL;
+ struct pdma_regs *regs = &chan->regs;
+ unsigned long flags;
+ u64 residue = 0;
+ struct sf_pdma_desc *desc;
+ struct dma_async_tx_descriptor *tx;
+
+ spin_lock_irqsave(&chan->vchan.lock, flags);
+
+ tx = &chan->desc->vdesc.tx;
+ if (cookie == tx->chan->completed_cookie)
+ goto out;
+
+ if (cookie == tx->cookie) {
+ residue = readq(regs->residue);
+ } else {
+ vd = vchan_find_desc(&chan->vchan, cookie);
+ if (!vd)
+ goto out;
+
+ desc = to_sf_pdma_desc(vd);
+ residue = desc->xfer_size;
+ }
+
+out:
+ spin_unlock_irqrestore(&chan->vchan.lock, flags);
+ return residue;
+}
+
+static enum dma_status
+sf_pdma_tx_status(struct dma_chan *dchan,
+ dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ struct sf_pdma_chan *chan = to_sf_pdma_chan(dchan);
+ enum dma_status status;
+
+ status = dma_cookie_status(dchan, cookie, txstate);
+
+ if (txstate && status != DMA_ERROR)
+ dma_set_residue(txstate, sf_pdma_desc_residue(chan, cookie));
+
+ return status;
+}
+
+static int sf_pdma_terminate_all(struct dma_chan *dchan)
+{
+ struct sf_pdma_chan *chan = to_sf_pdma_chan(dchan);
+ unsigned long flags;
+ LIST_HEAD(head);
+
+ spin_lock_irqsave(&chan->vchan.lock, flags);
+ sf_pdma_disable_request(chan);
+ kfree(chan->desc);
+ chan->desc = NULL;
+ chan->xfer_err = false;
+ vchan_get_all_descriptors(&chan->vchan, &head);
+ vchan_dma_desc_free_list(&chan->vchan, &head);
+ spin_unlock_irqrestore(&chan->vchan.lock, flags);
+
+ return 0;
+}
+
+static void sf_pdma_enable_request(struct sf_pdma_chan *chan)
+{
+ struct pdma_regs *regs = &chan->regs;
+ u32 v;
+
+ v = PDMA_CLAIM_MASK |
+ PDMA_ENABLE_DONE_INT_MASK |
+ PDMA_ENABLE_ERR_INT_MASK |
+ PDMA_RUN_MASK;
+
+ writel(v, regs->ctrl);
+}
+
+static void sf_pdma_xfer_desc(struct sf_pdma_chan *chan)
+{
+ struct sf_pdma_desc *desc = chan->desc;
+ struct pdma_regs *regs = &chan->regs;
+
+ if (!desc) {
+ dev_err(chan->pdma->dma_dev.dev, "NULL desc.\n");
+ return;
+ }
+
+ writel(desc->xfer_type, regs->xfer_type);
+ writeq(desc->xfer_size, regs->xfer_size);
+ writeq(desc->dst_addr, regs->dst_addr);
+ writeq(desc->src_addr, regs->src_addr);
+
+ chan->desc = desc;
+ chan->status = DMA_IN_PROGRESS;
+ sf_pdma_enable_request(chan);
+}
+
+static void sf_pdma_issue_pending(struct dma_chan *dchan)
+{
+ struct sf_pdma_chan *chan = to_sf_pdma_chan(dchan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->vchan.lock, flags);
+
+ if (vchan_issue_pending(&chan->vchan) && chan->desc)
+ sf_pdma_xfer_desc(chan);
+
+ spin_unlock_irqrestore(&chan->vchan.lock, flags);
+}
+
+static void sf_pdma_free_desc(struct virt_dma_desc *vdesc)
+{
+ struct sf_pdma_desc *desc;
+
+ desc = to_sf_pdma_desc(vdesc);
+ desc->in_use = false;
+}
+
+static void sf_pdma_donebh_tasklet(unsigned long arg)
+{
+ struct sf_pdma_chan *chan = (struct sf_pdma_chan *)arg;
+ struct sf_pdma_desc *desc = chan->desc;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->lock, flags);
+ if (chan->xfer_err) {
+ chan->retries = MAX_RETRY;
+ chan->status = DMA_COMPLETE;
+ chan->xfer_err = false;
+ }
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ dmaengine_desc_get_callback_invoke(desc->async_tx, NULL);
+}
+
+static void sf_pdma_errbh_tasklet(unsigned long arg)
+{
+ struct sf_pdma_chan *chan = (struct sf_pdma_chan *)arg;
+ struct sf_pdma_desc *desc = chan->desc;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->lock, flags);
+ if (chan->retries <= 0) {
+ /* fail to recover */
+ spin_unlock_irqrestore(&chan->lock, flags);
+ dmaengine_desc_get_callback_invoke(desc->async_tx, NULL);
+ } else {
+ /* retry */
+ chan->retries--;
+ chan->xfer_err = true;
+ chan->status = DMA_ERROR;
+
+ sf_pdma_enable_request(chan);
+ spin_unlock_irqrestore(&chan->lock, flags);
+ }
+}
+
+static irqreturn_t sf_pdma_done_isr(int irq, void *dev_id)
+{
+ struct sf_pdma_chan *chan = dev_id;
+ struct pdma_regs *regs = &chan->regs;
+ unsigned long flags;
+ u64 residue;
+
+ spin_lock_irqsave(&chan->vchan.lock, flags);
+ writel((readl(regs->ctrl)) & ~PDMA_DONE_STATUS_MASK, regs->ctrl);
+ residue = readq(regs->residue);
+
+ if (!residue) {
+ list_del(&chan->desc->vdesc.node);
+ vchan_cookie_complete(&chan->desc->vdesc);
+ } else {
+ /* submit next trascatioin if possible */
+ struct sf_pdma_desc *desc = chan->desc;
+
+ desc->src_addr += desc->xfer_size - residue;
+ desc->dst_addr += desc->xfer_size - residue;
+ desc->xfer_size = residue;
+
+ sf_pdma_xfer_desc(chan);
+ }
+
+ spin_unlock_irqrestore(&chan->vchan.lock, flags);
+
+ tasklet_hi_schedule(&chan->done_tasklet);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t sf_pdma_err_isr(int irq, void *dev_id)
+{
+ struct sf_pdma_chan *chan = dev_id;
+ struct pdma_regs *regs = &chan->regs;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->lock, flags);
+ writel((readl(regs->ctrl)) & ~PDMA_ERR_STATUS_MASK, regs->ctrl);
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ tasklet_schedule(&chan->err_tasklet);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * sf_pdma_irq_init() - Init PDMA IRQ Handlers
+ * @pdev: pointer of platform_device
+ * @pdma: pointer of PDMA engine. Caller should check NULL
+ *
+ * Initialize DONE and ERROR interrupt handler for 4 channels. Caller should
+ * make sure the pointer passed in are non-NULL. This function should be called
+ * only one time during the device probe.
+ *
+ * Context: Any context.
+ *
+ * Return:
+ * * 0 - OK to init all IRQ handlers
+ * * -EINVAL - Fail to request IRQ
+ */
+static int sf_pdma_irq_init(struct platform_device *pdev, struct sf_pdma *pdma)
+{
+ int irq, r, i;
+ struct sf_pdma_chan *chan;
+
+ for (i = 0; i < pdma->n_chans; i++) {
+ chan = &pdma->chans[i];
+
+ irq = platform_get_irq(pdev, i * 2);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "ch(%d) Can't get done irq.\n", i);
+ return -EINVAL;
+ }
+
+ r = devm_request_irq(&pdev->dev, irq, sf_pdma_done_isr, 0,
+ dev_name(&pdev->dev), (void *)chan);
+ if (r) {
+ dev_err(&pdev->dev, "Fail to attach done ISR: %d\n", r);
+ return -EINVAL;
+ }
+
+ chan->txirq = irq;
+
+ irq = platform_get_irq(pdev, (i * 2) + 1);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "ch(%d) Can't get err irq.\n", i);
+ return -EINVAL;
+ }
+
+ r = devm_request_irq(&pdev->dev, irq, sf_pdma_err_isr, 0,
+ dev_name(&pdev->dev), (void *)chan);
+ if (r) {
+ dev_err(&pdev->dev, "Fail to attach err ISR: %d\n", r);
+ return -EINVAL;
+ }
+
+ chan->errirq = irq;
+ }
+
+ return 0;
+}
+
+/**
+ * sf_pdma_setup_chans() - Init settings of each channel
+ * @pdma: pointer of PDMA engine. Caller should check NULL
+ *
+ * Initialize all data structure and register base. Caller should make sure
+ * the pointer passed in are non-NULL. This function should be called only
+ * one time during the device probe.
+ *
+ * Context: Any context.
+ *
+ * Return: none
+ */
+static void sf_pdma_setup_chans(struct sf_pdma *pdma)
+{
+ int i;
+ struct sf_pdma_chan *chan;
+
+ INIT_LIST_HEAD(&pdma->dma_dev.channels);
+
+ for (i = 0; i < pdma->n_chans; i++) {
+ chan = &pdma->chans[i];
+
+ chan->regs.ctrl =
+ SF_PDMA_REG_BASE(i) + PDMA_CTRL;
+ chan->regs.xfer_type =
+ SF_PDMA_REG_BASE(i) + PDMA_XFER_TYPE;
+ chan->regs.xfer_size =
+ SF_PDMA_REG_BASE(i) + PDMA_XFER_SIZE;
+ chan->regs.dst_addr =
+ SF_PDMA_REG_BASE(i) + PDMA_DST_ADDR;
+ chan->regs.src_addr =
+ SF_PDMA_REG_BASE(i) + PDMA_SRC_ADDR;
+ chan->regs.act_type =
+ SF_PDMA_REG_BASE(i) + PDMA_ACT_TYPE;
+ chan->regs.residue =
+ SF_PDMA_REG_BASE(i) + PDMA_REMAINING_BYTE;
+ chan->regs.cur_dst_addr =
+ SF_PDMA_REG_BASE(i) + PDMA_CUR_DST_ADDR;
+ chan->regs.cur_src_addr =
+ SF_PDMA_REG_BASE(i) + PDMA_CUR_SRC_ADDR;
+
+ chan->pdma = pdma;
+ chan->pm_state = RUNNING;
+ chan->slave_id = i;
+ chan->xfer_err = false;
+ spin_lock_init(&chan->lock);
+
+ chan->vchan.desc_free = sf_pdma_free_desc;
+ vchan_init(&chan->vchan, &pdma->dma_dev);
+
+ writel(PDMA_CLEAR_CTRL, chan->regs.ctrl);
+
+ tasklet_init(&chan->done_tasklet,
+ sf_pdma_donebh_tasklet, (unsigned long)chan);
+ tasklet_init(&chan->err_tasklet,
+ sf_pdma_errbh_tasklet, (unsigned long)chan);
+ }
+}
+
+static int sf_pdma_probe(struct platform_device *pdev)
+{
+ struct sf_pdma *pdma;
+ struct sf_pdma_chan *chan;
+ struct resource *res;
+ int len, chans;
+ int ret;
+ const enum dma_slave_buswidth widths =
+ DMA_SLAVE_BUSWIDTH_1_BYTE | DMA_SLAVE_BUSWIDTH_2_BYTES |
+ DMA_SLAVE_BUSWIDTH_4_BYTES | DMA_SLAVE_BUSWIDTH_8_BYTES |
+ DMA_SLAVE_BUSWIDTH_16_BYTES | DMA_SLAVE_BUSWIDTH_32_BYTES |
+ DMA_SLAVE_BUSWIDTH_64_BYTES;
+
+ chans = PDMA_NR_CH;
+ len = sizeof(*pdma) + sizeof(*chan) * chans;
+ pdma = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
+ if (!pdma)
+ return -ENOMEM;
+
+ pdma->n_chans = chans;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ pdma->membase = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(pdma->membase))
+ goto ERR_MEMBASE;
+
+ ret = sf_pdma_irq_init(pdev, pdma);
+ if (ret)
+ goto ERR_INITIRQ;
+
+ sf_pdma_setup_chans(pdma);
+
+ pdma->dma_dev.dev = &pdev->dev;
+
+ /* Setup capability */
+ dma_cap_set(DMA_MEMCPY, pdma->dma_dev.cap_mask);
+ pdma->dma_dev.copy_align = 2;
+ pdma->dma_dev.src_addr_widths = widths;
+ pdma->dma_dev.dst_addr_widths = widths;
+ pdma->dma_dev.directions = BIT(DMA_MEM_TO_MEM);
+ pdma->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
+ pdma->dma_dev.descriptor_reuse = true;
+
+ /* Setup DMA APIs */
+ pdma->dma_dev.device_alloc_chan_resources =
+ sf_pdma_alloc_chan_resources;
+ pdma->dma_dev.device_free_chan_resources =
+ sf_pdma_free_chan_resources;
+ pdma->dma_dev.device_tx_status = sf_pdma_tx_status;
+ pdma->dma_dev.device_prep_dma_memcpy = sf_pdma_prep_dma_memcpy;
+ pdma->dma_dev.device_config = sf_pdma_slave_config;
+ pdma->dma_dev.device_terminate_all = sf_pdma_terminate_all;
+ pdma->dma_dev.device_issue_pending = sf_pdma_issue_pending;
+
+ platform_set_drvdata(pdev, pdma);
+
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (ret)
+ dev_warn(&pdev->dev,
+ "Failed to set DMA mask. Fall back to default.\n");
+
+ ret = dma_async_device_register(&pdma->dma_dev);
+ if (ret)
+ goto ERR_REG_DMADEVICE;
+
+ return 0;
+
+ERR_MEMBASE:
+ devm_kfree(&pdev->dev, pdma);
+ return PTR_ERR(pdma->membase);
+
+ERR_INITIRQ:
+ devm_kfree(&pdev->dev, pdma);
+ return ret;
+
+ERR_REG_DMADEVICE:
+ devm_kfree(&pdev->dev, pdma);
+ dev_err(&pdev->dev,
+ "Can't register SiFive Platform DMA. (%d)\n", ret);
+ return ret;
+}
+
+static int sf_pdma_remove(struct platform_device *pdev)
+{
+ struct sf_pdma *pdma = platform_get_drvdata(pdev);
+ struct sf_pdma_chan *ch;
+ int i;
+
+ for (i = 0; i < PDMA_NR_CH; i++) {
+ ch = &pdma->chans[i];
+
+ devm_free_irq(&pdev->dev, ch->txirq, ch);
+ devm_free_irq(&pdev->dev, ch->errirq, ch);
+ list_del(&ch->vchan.chan.device_node);
+ tasklet_kill(&ch->vchan.task);
+ tasklet_kill(&ch->done_tasklet);
+ tasklet_kill(&ch->err_tasklet);
+ }
+
+ dma_async_device_unregister(&pdma->dma_dev);
+
+ return 0;
+}
+
+static const struct of_device_id sf_pdma_dt_ids[] = {
+ { .compatible = "sifive,fu540-c000-pdma" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, sf_pdma_dt_ids);
+
+static struct platform_driver sf_pdma_driver = {
+ .probe = sf_pdma_probe,
+ .remove = sf_pdma_remove,
+ .driver = {
+ .name = "sf-pdma",
+ .of_match_table = of_match_ptr(sf_pdma_dt_ids),
+ },
+};
+
+static int __init sf_pdma_init(void)
+{
+ return platform_driver_register(&sf_pdma_driver);
+}
+
+static void __exit sf_pdma_exit(void)
+{
+ platform_driver_unregister(&sf_pdma_driver);
+}
+
+/* do early init */
+subsys_initcall(sf_pdma_init);
+module_exit(sf_pdma_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("SiFive Platform DMA driver");
+MODULE_AUTHOR("Green Wan <green.wan@sifive.com>");
diff --git a/drivers/dma/sf-pdma/sf-pdma.h b/drivers/dma/sf-pdma/sf-pdma.h
new file mode 100644
index 000000000000..0c20167b097d
--- /dev/null
+++ b/drivers/dma/sf-pdma/sf-pdma.h
@@ -0,0 +1,124 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * SiFive FU540 Platform DMA driver
+ * Copyright (C) 2019 SiFive
+ *
+ * Based partially on:
+ * - drivers/dma/fsl-edma.c
+ * - drivers/dma/dw-edma/
+ * - drivers/dma/pxa-dma.c
+ *
+ * See the following sources for further documentation:
+ * - Chapter 12 "Platform DMA Engine (PDMA)" of
+ * SiFive FU540-C000 v1.0
+ * https://static.dev.sifive.com/FU540-C000-v1.0.pdf
+ */
+#ifndef _SF_PDMA_H
+#define _SF_PDMA_H
+
+#include <linux/dmaengine.h>
+#include <linux/dma-direction.h>
+
+#include "../dmaengine.h"
+#include "../virt-dma.h"
+
+#define PDMA_NR_CH 4
+
+#if (PDMA_NR_CH != 4)
+#error "Please define PDMA_NR_CH to 4"
+#endif
+
+#define PDMA_BASE_ADDR 0x3000000
+#define PDMA_CHAN_OFFSET 0x1000
+
+/* Register Offset */
+#define PDMA_CTRL 0x000
+#define PDMA_XFER_TYPE 0x004
+#define PDMA_XFER_SIZE 0x008
+#define PDMA_DST_ADDR 0x010
+#define PDMA_SRC_ADDR 0x018
+#define PDMA_ACT_TYPE 0x104 /* Read-only */
+#define PDMA_REMAINING_BYTE 0x108 /* Read-only */
+#define PDMA_CUR_DST_ADDR 0x110 /* Read-only*/
+#define PDMA_CUR_SRC_ADDR 0x118 /* Read-only*/
+
+/* CTRL */
+#define PDMA_CLEAR_CTRL 0x0
+#define PDMA_CLAIM_MASK GENMASK(0, 0)
+#define PDMA_RUN_MASK GENMASK(1, 1)
+#define PDMA_ENABLE_DONE_INT_MASK GENMASK(14, 14)
+#define PDMA_ENABLE_ERR_INT_MASK GENMASK(15, 15)
+#define PDMA_DONE_STATUS_MASK GENMASK(30, 30)
+#define PDMA_ERR_STATUS_MASK GENMASK(31, 31)
+
+/* Transfer Type */
+#define PDMA_FULL_SPEED 0xFF000008
+
+/* Error Recovery */
+#define MAX_RETRY 1
+
+#define SF_PDMA_REG_BASE(ch) (pdma->membase + (PDMA_CHAN_OFFSET * (ch)))
+
+struct pdma_regs {
+ /* read-write regs */
+ void __iomem *ctrl; /* 4 bytes */
+
+ void __iomem *xfer_type; /* 4 bytes */
+ void __iomem *xfer_size; /* 8 bytes */
+ void __iomem *dst_addr; /* 8 bytes */
+ void __iomem *src_addr; /* 8 bytes */
+
+ /* read-only */
+ void __iomem *act_type; /* 4 bytes */
+ void __iomem *residue; /* 8 bytes */
+ void __iomem *cur_dst_addr; /* 8 bytes */
+ void __iomem *cur_src_addr; /* 8 bytes */
+};
+
+struct sf_pdma_desc {
+ u32 xfer_type;
+ u64 xfer_size;
+ u64 dst_addr;
+ u64 src_addr;
+ struct virt_dma_desc vdesc;
+ struct sf_pdma_chan *chan;
+ bool in_use;
+ enum dma_transfer_direction dirn;
+ struct dma_async_tx_descriptor *async_tx;
+};
+
+enum sf_pdma_pm_state {
+ RUNNING = 0,
+ SUSPENDED,
+};
+
+struct sf_pdma_chan {
+ struct virt_dma_chan vchan;
+ enum dma_status status;
+ enum sf_pdma_pm_state pm_state;
+ u32 slave_id;
+ struct sf_pdma *pdma;
+ struct sf_pdma_desc *desc;
+ struct dma_slave_config cfg;
+ u32 attr;
+ dma_addr_t dma_dev_addr;
+ u32 dma_dev_size;
+ struct tasklet_struct done_tasklet;
+ struct tasklet_struct err_tasklet;
+ struct pdma_regs regs;
+ spinlock_t lock; /* protect chan data */
+ bool xfer_err;
+ int txirq;
+ int errirq;
+ int retries;
+};
+
+struct sf_pdma {
+ struct dma_device dma_dev;
+ void __iomem *membase;
+ void __iomem *mappedbase;
+ u32 n_chans;
+ struct sf_pdma_chan chans[PDMA_NR_CH];
+};
+
+#endif /* _SF_PDMA_H */
diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c
index 3993ab65c62c..f06016d38a05 100644
--- a/drivers/dma/sh/rcar-dmac.c
+++ b/drivers/dma/sh/rcar-dmac.c
@@ -203,19 +203,27 @@ struct rcar_dmac {
unsigned int n_channels;
struct rcar_dmac_chan *channels;
- unsigned int channels_mask;
+ u32 channels_mask;
DECLARE_BITMAP(modules, 256);
};
#define to_rcar_dmac(d) container_of(d, struct rcar_dmac, engine)
+/*
+ * struct rcar_dmac_of_data - This driver's OF data
+ * @chan_offset_base: DMAC channels base offset
+ * @chan_offset_stride: DMAC channels offset stride
+ */
+struct rcar_dmac_of_data {
+ u32 chan_offset_base;
+ u32 chan_offset_stride;
+};
+
/* -----------------------------------------------------------------------------
* Registers
*/
-#define RCAR_DMAC_CHAN_OFFSET(i) (0x8000 + 0x80 * (i))
-
#define RCAR_DMAISTA 0x0020
#define RCAR_DMASEC 0x0030
#define RCAR_DMAOR 0x0060
@@ -1726,6 +1734,7 @@ static const struct dev_pm_ops rcar_dmac_pm = {
static int rcar_dmac_chan_probe(struct rcar_dmac *dmac,
struct rcar_dmac_chan *rchan,
+ const struct rcar_dmac_of_data *data,
unsigned int index)
{
struct platform_device *pdev = to_platform_device(dmac->dev);
@@ -1735,7 +1744,8 @@ static int rcar_dmac_chan_probe(struct rcar_dmac *dmac,
int ret;
rchan->index = index;
- rchan->iomem = dmac->iomem + RCAR_DMAC_CHAN_OFFSET(index);
+ rchan->iomem = dmac->iomem + data->chan_offset_base +
+ data->chan_offset_stride * index;
rchan->mid_rid = -EINVAL;
spin_lock_init(&rchan->lock);
@@ -1800,7 +1810,15 @@ static int rcar_dmac_parse_of(struct device *dev, struct rcar_dmac *dmac)
return -EINVAL;
}
+ /*
+ * If the driver is unable to read dma-channel-mask property,
+ * the driver assumes that it can use all channels.
+ */
dmac->channels_mask = GENMASK(dmac->n_channels - 1, 0);
+ of_property_read_u32(np, "dma-channel-mask", &dmac->channels_mask);
+
+ /* If the property has out-of-channel mask, this driver clears it */
+ dmac->channels_mask &= GENMASK(dmac->n_channels - 1, 0);
return 0;
}
@@ -1813,10 +1831,14 @@ static int rcar_dmac_probe(struct platform_device *pdev)
DMA_SLAVE_BUSWIDTH_32_BYTES | DMA_SLAVE_BUSWIDTH_64_BYTES;
struct dma_device *engine;
struct rcar_dmac *dmac;
- struct resource *mem;
+ const struct rcar_dmac_of_data *data;
unsigned int i;
int ret;
+ data = of_device_get_match_data(&pdev->dev);
+ if (!data)
+ return -EINVAL;
+
dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL);
if (!dmac)
return -ENOMEM;
@@ -1848,8 +1870,7 @@ static int rcar_dmac_probe(struct platform_device *pdev)
return -ENOMEM;
/* Request resources. */
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- dmac->iomem = devm_ioremap_resource(&pdev->dev, mem);
+ dmac->iomem = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dmac->iomem))
return PTR_ERR(dmac->iomem);
@@ -1901,7 +1922,7 @@ static int rcar_dmac_probe(struct platform_device *pdev)
if (!(dmac->channels_mask & BIT(i)))
continue;
- ret = rcar_dmac_chan_probe(dmac, &dmac->channels[i], i);
+ ret = rcar_dmac_chan_probe(dmac, &dmac->channels[i], data, i);
if (ret < 0)
goto error;
}
@@ -1948,8 +1969,16 @@ static void rcar_dmac_shutdown(struct platform_device *pdev)
rcar_dmac_stop_all_chan(dmac);
}
+static const struct rcar_dmac_of_data rcar_dmac_data = {
+ .chan_offset_base = 0x8000,
+ .chan_offset_stride = 0x80,
+};
+
static const struct of_device_id rcar_dmac_of_ids[] = {
- { .compatible = "renesas,rcar-dmac", },
+ {
+ .compatible = "renesas,rcar-dmac",
+ .data = &rcar_dmac_data,
+ },
{ /* Sentinel */ }
};
MODULE_DEVICE_TABLE(of, rcar_dmac_of_ids);
diff --git a/drivers/dma/sprd-dma.c b/drivers/dma/sprd-dma.c
index 8546ad034720..9a31a315dbef 100644
--- a/drivers/dma/sprd-dma.c
+++ b/drivers/dma/sprd-dma.c
@@ -99,6 +99,7 @@
/* DMA_CHN_WARP_* register definition */
#define SPRD_DMA_HIGH_ADDR_MASK GENMASK(31, 28)
#define SPRD_DMA_LOW_ADDR_MASK GENMASK(31, 0)
+#define SPRD_DMA_WRAP_ADDR_MASK GENMASK(27, 0)
#define SPRD_DMA_HIGH_ADDR_OFFSET 4
/* SPRD_DMA_CHN_INTC register definition */
@@ -118,6 +119,8 @@
#define SPRD_DMA_SWT_MODE_OFFSET 26
#define SPRD_DMA_REQ_MODE_OFFSET 24
#define SPRD_DMA_REQ_MODE_MASK GENMASK(1, 0)
+#define SPRD_DMA_WRAP_SEL_DEST BIT(23)
+#define SPRD_DMA_WRAP_EN BIT(22)
#define SPRD_DMA_FIX_SEL_OFFSET 21
#define SPRD_DMA_FIX_EN_OFFSET 20
#define SPRD_DMA_LLIST_END BIT(19)
@@ -804,6 +807,8 @@ static int sprd_dma_fill_desc(struct dma_chan *chan,
temp |= req_mode << SPRD_DMA_REQ_MODE_OFFSET;
temp |= fix_mode << SPRD_DMA_FIX_SEL_OFFSET;
temp |= fix_en << SPRD_DMA_FIX_EN_OFFSET;
+ temp |= schan->linklist.wrap_addr ?
+ SPRD_DMA_WRAP_EN | SPRD_DMA_WRAP_SEL_DEST : 0;
temp |= slave_cfg->src_maxburst & SPRD_DMA_FRG_LEN_MASK;
hw->frg_len = temp;
@@ -831,6 +836,12 @@ static int sprd_dma_fill_desc(struct dma_chan *chan,
hw->llist_ptr = lower_32_bits(llist_ptr);
hw->src_blk_step = (upper_32_bits(llist_ptr) << SPRD_DMA_LLIST_HIGH_SHIFT) &
SPRD_DMA_LLIST_HIGH_MASK;
+
+ if (schan->linklist.wrap_addr) {
+ hw->wrap_ptr |= schan->linklist.wrap_addr &
+ SPRD_DMA_WRAP_ADDR_MASK;
+ hw->wrap_to |= dst & SPRD_DMA_WRAP_ADDR_MASK;
+ }
} else {
hw->llist_ptr = 0;
hw->src_blk_step = 0;
@@ -939,9 +950,11 @@ sprd_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
schan->linklist.phy_addr = ll_cfg->phy_addr;
schan->linklist.virt_addr = ll_cfg->virt_addr;
+ schan->linklist.wrap_addr = ll_cfg->wrap_addr;
} else {
schan->linklist.phy_addr = 0;
schan->linklist.virt_addr = 0;
+ schan->linklist.wrap_addr = 0;
}
/*
@@ -1080,7 +1093,6 @@ static int sprd_dma_probe(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
struct sprd_dma_dev *sdev;
struct sprd_dma_chn *dma_chn;
- struct resource *res;
u32 chn_count;
int ret, i;
@@ -1126,8 +1138,7 @@ static int sprd_dma_probe(struct platform_device *pdev)
dev_warn(&pdev->dev, "no interrupts for the dma controller\n");
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- sdev->glb_base = devm_ioremap_resource(&pdev->dev, res);
+ sdev->glb_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(sdev->glb_base))
return PTR_ERR(sdev->glb_base);
diff --git a/drivers/dma/ti/edma.c b/drivers/dma/ti/edma.c
index ba7c4f07fcd6..756a3c951dc7 100644
--- a/drivers/dma/ti/edma.c
+++ b/drivers/dma/ti/edma.c
@@ -260,6 +260,13 @@ struct edma_cc {
*/
unsigned long *slot_inuse;
+ /*
+ * For tracking reserved channels used by DSP.
+ * If the bit is cleared, the channel is allocated to be used by DSP
+ * and Linux must not touch it.
+ */
+ unsigned long *channels_mask;
+
struct dma_device dma_slave;
struct dma_device *dma_memcpy;
struct edma_chan *slave_chans;
@@ -716,6 +723,12 @@ static int edma_alloc_channel(struct edma_chan *echan,
struct edma_cc *ecc = echan->ecc;
int channel = EDMA_CHAN_SLOT(echan->ch_num);
+ if (!test_bit(echan->ch_num, ecc->channels_mask)) {
+ dev_err(ecc->dev, "Channel%d is reserved, can not be used!\n",
+ echan->ch_num);
+ return -EINVAL;
+ }
+
/* ensure access through shadow region 0 */
edma_or_array2(ecc, EDMA_DRAE, 0, EDMA_REG_ARRAY_INDEX(channel),
EDMA_CHANNEL_BIT(channel));
@@ -2249,10 +2262,8 @@ static int edma_probe(struct platform_device *pdev)
{
struct edma_soc_info *info = pdev->dev.platform_data;
s8 (*queue_priority_mapping)[2];
- int i, off;
- const s16 (*rsv_slots)[2];
- const s16 (*xbar_chans)[2];
- int irq;
+ const s16 (*reserved)[2];
+ int i, irq;
char *irq_name;
struct resource *mem;
struct device_node *node = pdev->dev.of_node;
@@ -2331,15 +2342,32 @@ static int edma_probe(struct platform_device *pdev)
if (!ecc->slot_inuse)
return -ENOMEM;
+ ecc->channels_mask = devm_kcalloc(dev,
+ BITS_TO_LONGS(ecc->num_channels),
+ sizeof(unsigned long), GFP_KERNEL);
+ if (!ecc->channels_mask)
+ return -ENOMEM;
+
+ /* Mark all channels available initially */
+ bitmap_fill(ecc->channels_mask, ecc->num_channels);
+
ecc->default_queue = info->default_queue;
if (info->rsv) {
/* Set the reserved slots in inuse list */
- rsv_slots = info->rsv->rsv_slots;
- if (rsv_slots) {
- for (i = 0; rsv_slots[i][0] != -1; i++)
- bitmap_set(ecc->slot_inuse, rsv_slots[i][0],
- rsv_slots[i][1]);
+ reserved = info->rsv->rsv_slots;
+ if (reserved) {
+ for (i = 0; reserved[i][0] != -1; i++)
+ bitmap_set(ecc->slot_inuse, reserved[i][0],
+ reserved[i][1]);
+ }
+
+ /* Clear channels not usable for Linux */
+ reserved = info->rsv->rsv_chans;
+ if (reserved) {
+ for (i = 0; reserved[i][0] != -1; i++)
+ bitmap_clear(ecc->channels_mask, reserved[i][0],
+ reserved[i][1]);
}
}
@@ -2349,14 +2377,6 @@ static int edma_probe(struct platform_device *pdev)
edma_write_slot(ecc, i, &dummy_paramset);
}
- /* Clear the xbar mapped channels in unused list */
- xbar_chans = info->xbar_chans;
- if (xbar_chans) {
- for (i = 0; xbar_chans[i][1] != -1; i++) {
- off = xbar_chans[i][1];
- }
- }
-
irq = platform_get_irq_byname(pdev, "edma3_ccint");
if (irq < 0 && node)
irq = irq_of_parse_and_map(node, 0);
@@ -2399,12 +2419,15 @@ static int edma_probe(struct platform_device *pdev)
if (!ecc->legacy_mode) {
int lowest_priority = 0;
+ unsigned int array_max;
struct of_phandle_args tc_args;
ecc->tc_list = devm_kcalloc(dev, ecc->num_tc,
sizeof(*ecc->tc_list), GFP_KERNEL);
- if (!ecc->tc_list)
- return -ENOMEM;
+ if (!ecc->tc_list) {
+ ret = -ENOMEM;
+ goto err_reg1;
+ }
for (i = 0;; i++) {
ret = of_parse_phandle_with_fixed_args(node, "ti,tptcs",
@@ -2420,6 +2443,18 @@ static int edma_probe(struct platform_device *pdev)
info->default_queue = i;
}
}
+
+ /* See if we have optional dma-channel-mask array */
+ array_max = DIV_ROUND_UP(ecc->num_channels, BITS_PER_TYPE(u32));
+ ret = of_property_read_variable_u32_array(node,
+ "dma-channel-mask",
+ (u32 *)ecc->channels_mask,
+ 1, array_max);
+ if (ret > 0 && ret != array_max)
+ dev_warn(dev, "dma-channel-mask is not complete.\n");
+ else if (ret == -EOVERFLOW || ret == -ENODATA)
+ dev_warn(dev,
+ "dma-channel-mask is out of range or empty\n");
}
/* Event queue priority mapping */
@@ -2437,6 +2472,10 @@ static int edma_probe(struct platform_device *pdev)
edma_dma_init(ecc, legacy_mode);
for (i = 0; i < ecc->num_channels; i++) {
+ /* Do not touch reserved channels */
+ if (!test_bit(i, ecc->channels_mask))
+ continue;
+
/* Assign all channels to the default queue */
edma_assign_channel_eventq(&ecc->slave_chans[i],
info->default_queue);
diff --git a/drivers/dma/uniphier-mdmac.c b/drivers/dma/uniphier-mdmac.c
index fde54687856b..21b8f1131d55 100644
--- a/drivers/dma/uniphier-mdmac.c
+++ b/drivers/dma/uniphier-mdmac.c
@@ -382,7 +382,6 @@ static int uniphier_mdmac_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct uniphier_mdmac_device *mdev;
struct dma_device *ddev;
- struct resource *res;
int nr_chans, ret, i;
nr_chans = platform_irq_count(pdev);
@@ -398,8 +397,7 @@ static int uniphier_mdmac_probe(struct platform_device *pdev)
if (!mdev)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mdev->reg_base = devm_ioremap_resource(dev, res);
+ mdev->reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mdev->reg_base))
return PTR_ERR(mdev->reg_base);
diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
index 5d56f1e4d332..a9c5d5cc9f2b 100644
--- a/drivers/dma/xilinx/xilinx_dma.c
+++ b/drivers/dma/xilinx/xilinx_dma.c
@@ -25,6 +25,12 @@
* The AXI CDMA, is a soft IP, which provides high-bandwidth Direct Memory
* Access (DMA) between a memory-mapped source address and a memory-mapped
* destination address.
+ *
+ * The AXI Multichannel Direct Memory Access (AXI MCDMA) core is a soft
+ * Xilinx IP that provides high-bandwidth direct memory access between
+ * memory and AXI4-Stream target peripherals. It provides scatter gather
+ * (SG) interface with multiple channels independent configuration support.
+ *
*/
#include <linux/bitops.h>
@@ -173,18 +179,6 @@
#define XILINX_DMA_NUM_DESCS 255
#define XILINX_DMA_NUM_APP_WORDS 5
-/* Multi-Channel DMA Descriptor offsets*/
-#define XILINX_DMA_MCRX_CDESC(x) (0x40 + (x-1) * 0x20)
-#define XILINX_DMA_MCRX_TDESC(x) (0x48 + (x-1) * 0x20)
-
-/* Multi-Channel DMA Masks/Shifts */
-#define XILINX_DMA_BD_HSIZE_MASK GENMASK(15, 0)
-#define XILINX_DMA_BD_STRIDE_MASK GENMASK(15, 0)
-#define XILINX_DMA_BD_VSIZE_MASK GENMASK(31, 19)
-#define XILINX_DMA_BD_TDEST_MASK GENMASK(4, 0)
-#define XILINX_DMA_BD_STRIDE_SHIFT 0
-#define XILINX_DMA_BD_VSIZE_SHIFT 19
-
/* AXI CDMA Specific Registers/Offsets */
#define XILINX_CDMA_REG_SRCADDR 0x18
#define XILINX_CDMA_REG_DSTADDR 0x20
@@ -194,6 +188,31 @@
#define xilinx_prep_dma_addr_t(addr) \
((dma_addr_t)((u64)addr##_##msb << 32 | (addr)))
+
+/* AXI MCDMA Specific Registers/Offsets */
+#define XILINX_MCDMA_MM2S_CTRL_OFFSET 0x0000
+#define XILINX_MCDMA_S2MM_CTRL_OFFSET 0x0500
+#define XILINX_MCDMA_CHEN_OFFSET 0x0008
+#define XILINX_MCDMA_CH_ERR_OFFSET 0x0010
+#define XILINX_MCDMA_RXINT_SER_OFFSET 0x0020
+#define XILINX_MCDMA_TXINT_SER_OFFSET 0x0028
+#define XILINX_MCDMA_CHAN_CR_OFFSET(x) (0x40 + (x) * 0x40)
+#define XILINX_MCDMA_CHAN_SR_OFFSET(x) (0x44 + (x) * 0x40)
+#define XILINX_MCDMA_CHAN_CDESC_OFFSET(x) (0x48 + (x) * 0x40)
+#define XILINX_MCDMA_CHAN_TDESC_OFFSET(x) (0x50 + (x) * 0x40)
+
+/* AXI MCDMA Specific Masks/Shifts */
+#define XILINX_MCDMA_COALESCE_SHIFT 16
+#define XILINX_MCDMA_COALESCE_MAX 24
+#define XILINX_MCDMA_IRQ_ALL_MASK GENMASK(7, 5)
+#define XILINX_MCDMA_COALESCE_MASK GENMASK(23, 16)
+#define XILINX_MCDMA_CR_RUNSTOP_MASK BIT(0)
+#define XILINX_MCDMA_IRQ_IOC_MASK BIT(5)
+#define XILINX_MCDMA_IRQ_DELAY_MASK BIT(6)
+#define XILINX_MCDMA_IRQ_ERR_MASK BIT(7)
+#define XILINX_MCDMA_BD_EOP BIT(30)
+#define XILINX_MCDMA_BD_SOP BIT(31)
+
/**
* struct xilinx_vdma_desc_hw - Hardware Descriptor
* @next_desc: Next Descriptor Pointer @0x00
@@ -221,8 +240,8 @@ struct xilinx_vdma_desc_hw {
* @next_desc_msb: MSB of Next Descriptor Pointer @0x04
* @buf_addr: Buffer address @0x08
* @buf_addr_msb: MSB of Buffer address @0x0C
- * @mcdma_control: Control field for mcdma @0x10
- * @vsize_stride: Vsize and Stride field for mcdma @0x14
+ * @reserved1: Reserved @0x10
+ * @reserved2: Reserved @0x14
* @control: Control field @0x18
* @status: Status field @0x1C
* @app: APP Fields @0x20 - 0x30
@@ -232,14 +251,38 @@ struct xilinx_axidma_desc_hw {
u32 next_desc_msb;
u32 buf_addr;
u32 buf_addr_msb;
- u32 mcdma_control;
- u32 vsize_stride;
+ u32 reserved1;
+ u32 reserved2;
u32 control;
u32 status;
u32 app[XILINX_DMA_NUM_APP_WORDS];
} __aligned(64);
/**
+ * struct xilinx_aximcdma_desc_hw - Hardware Descriptor for AXI MCDMA
+ * @next_desc: Next Descriptor Pointer @0x00
+ * @next_desc_msb: MSB of Next Descriptor Pointer @0x04
+ * @buf_addr: Buffer address @0x08
+ * @buf_addr_msb: MSB of Buffer address @0x0C
+ * @rsvd: Reserved field @0x10
+ * @control: Control Information field @0x14
+ * @status: Status field @0x18
+ * @sideband_status: Status of sideband signals @0x1C
+ * @app: APP Fields @0x20 - 0x30
+ */
+struct xilinx_aximcdma_desc_hw {
+ u32 next_desc;
+ u32 next_desc_msb;
+ u32 buf_addr;
+ u32 buf_addr_msb;
+ u32 rsvd;
+ u32 control;
+ u32 status;
+ u32 sideband_status;
+ u32 app[XILINX_DMA_NUM_APP_WORDS];
+} __aligned(64);
+
+/**
* struct xilinx_cdma_desc_hw - Hardware Descriptor
* @next_desc: Next Descriptor Pointer @0x00
* @next_desc_msb: Next Descriptor Pointer MSB @0x04
@@ -286,6 +329,18 @@ struct xilinx_axidma_tx_segment {
} __aligned(64);
/**
+ * struct xilinx_aximcdma_tx_segment - Descriptor segment
+ * @hw: Hardware descriptor
+ * @node: Node in the descriptor segments list
+ * @phys: Physical address of segment
+ */
+struct xilinx_aximcdma_tx_segment {
+ struct xilinx_aximcdma_desc_hw hw;
+ struct list_head node;
+ dma_addr_t phys;
+} __aligned(64);
+
+/**
* struct xilinx_cdma_tx_segment - Descriptor segment
* @hw: Hardware descriptor
* @node: Node in the descriptor segments list
@@ -303,12 +358,16 @@ struct xilinx_cdma_tx_segment {
* @segments: TX segments list
* @node: Node in the channel descriptors list
* @cyclic: Check for cyclic transfers.
+ * @err: Whether the descriptor has an error.
+ * @residue: Residue of the completed descriptor
*/
struct xilinx_dma_tx_descriptor {
struct dma_async_tx_descriptor async_tx;
struct list_head segments;
struct list_head node;
bool cyclic;
+ bool err;
+ u32 residue;
};
/**
@@ -339,8 +398,8 @@ struct xilinx_dma_tx_descriptor {
* @desc_pendingcount: Descriptor pending count
* @ext_addr: Indicates 64 bit addressing is supported by dma channel
* @desc_submitcount: Descriptor h/w submitted count
- * @residue: Residue for AXI DMA
* @seg_v: Statically allocated segments base
+ * @seg_mv: Statically allocated segments base for MCDMA
* @seg_p: Physical allocated segments base
* @cyclic_seg_v: Statically allocated segment base for cyclic transfers
* @cyclic_seg_p: Physical allocated segments base for cyclic dma
@@ -376,8 +435,8 @@ struct xilinx_dma_chan {
u32 desc_pendingcount;
bool ext_addr;
u32 desc_submitcount;
- u32 residue;
struct xilinx_axidma_tx_segment *seg_v;
+ struct xilinx_aximcdma_tx_segment *seg_mv;
dma_addr_t seg_p;
struct xilinx_axidma_tx_segment *cyclic_seg_v;
dma_addr_t cyclic_seg_p;
@@ -393,12 +452,14 @@ struct xilinx_dma_chan {
* @XDMA_TYPE_AXIDMA: Axi dma ip.
* @XDMA_TYPE_CDMA: Axi cdma ip.
* @XDMA_TYPE_VDMA: Axi vdma ip.
+ * @XDMA_TYPE_AXIMCDMA: Axi MCDMA ip.
*
*/
enum xdma_ip_type {
XDMA_TYPE_AXIDMA = 0,
XDMA_TYPE_CDMA,
XDMA_TYPE_VDMA,
+ XDMA_TYPE_AXIMCDMA
};
struct xilinx_dma_config {
@@ -406,6 +467,7 @@ struct xilinx_dma_config {
int (*clk_init)(struct platform_device *pdev, struct clk **axi_clk,
struct clk **tx_clk, struct clk **txs_clk,
struct clk **rx_clk, struct clk **rxs_clk);
+ irqreturn_t (*irq_handler)(int irq, void *data);
};
/**
@@ -414,7 +476,6 @@ struct xilinx_dma_config {
* @dev: Device Structure
* @common: DMA device structure
* @chan: Driver specific DMA channel
- * @mcdma: Specifies whether Multi-Channel is present or not
* @flush_on_fsync: Flush on frame sync
* @ext_addr: Indicates 64 bit addressing is supported by dma device
* @pdev: Platform device structure pointer
@@ -427,13 +488,13 @@ struct xilinx_dma_config {
* @nr_channels: Number of channels DMA device supports
* @chan_id: DMA channel identifier
* @max_buffer_len: Max buffer length
+ * @s2mm_index: S2MM channel index
*/
struct xilinx_dma_device {
void __iomem *regs;
struct device *dev;
struct dma_device common;
struct xilinx_dma_chan *chan[XILINX_DMA_MAX_CHANS_PER_DEVICE];
- bool mcdma;
u32 flush_on_fsync;
bool ext_addr;
struct platform_device *pdev;
@@ -446,6 +507,7 @@ struct xilinx_dma_device {
u32 nr_channels;
u32 chan_id;
u32 max_buffer_len;
+ u32 s2mm_index;
};
/* Macros */
@@ -546,6 +608,18 @@ static inline void xilinx_axidma_buf(struct xilinx_dma_chan *chan,
}
}
+static inline void xilinx_aximcdma_buf(struct xilinx_dma_chan *chan,
+ struct xilinx_aximcdma_desc_hw *hw,
+ dma_addr_t buf_addr, size_t sg_used)
+{
+ if (chan->ext_addr) {
+ hw->buf_addr = lower_32_bits(buf_addr + sg_used);
+ hw->buf_addr_msb = upper_32_bits(buf_addr + sg_used);
+ } else {
+ hw->buf_addr = buf_addr + sg_used;
+ }
+}
+
/* -----------------------------------------------------------------------------
* Descriptors and segments alloc and free
*/
@@ -613,6 +687,33 @@ xilinx_axidma_alloc_tx_segment(struct xilinx_dma_chan *chan)
}
spin_unlock_irqrestore(&chan->lock, flags);
+ if (!segment)
+ dev_dbg(chan->dev, "Could not find free tx segment\n");
+
+ return segment;
+}
+
+/**
+ * xilinx_aximcdma_alloc_tx_segment - Allocate transaction segment
+ * @chan: Driver specific DMA channel
+ *
+ * Return: The allocated segment on success and NULL on failure.
+ */
+static struct xilinx_aximcdma_tx_segment *
+xilinx_aximcdma_alloc_tx_segment(struct xilinx_dma_chan *chan)
+{
+ struct xilinx_aximcdma_tx_segment *segment = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->lock, flags);
+ if (!list_empty(&chan->free_seg_list)) {
+ segment = list_first_entry(&chan->free_seg_list,
+ struct xilinx_aximcdma_tx_segment,
+ node);
+ list_del(&segment->node);
+ }
+ spin_unlock_irqrestore(&chan->lock, flags);
+
return segment;
}
@@ -627,6 +728,17 @@ static void xilinx_dma_clean_hw_desc(struct xilinx_axidma_desc_hw *hw)
hw->next_desc_msb = next_desc_msb;
}
+static void xilinx_mcdma_clean_hw_desc(struct xilinx_aximcdma_desc_hw *hw)
+{
+ u32 next_desc = hw->next_desc;
+ u32 next_desc_msb = hw->next_desc_msb;
+
+ memset(hw, 0, sizeof(struct xilinx_aximcdma_desc_hw));
+
+ hw->next_desc = next_desc;
+ hw->next_desc_msb = next_desc_msb;
+}
+
/**
* xilinx_dma_free_tx_segment - Free transaction segment
* @chan: Driver specific DMA channel
@@ -641,6 +753,20 @@ static void xilinx_dma_free_tx_segment(struct xilinx_dma_chan *chan,
}
/**
+ * xilinx_mcdma_free_tx_segment - Free transaction segment
+ * @chan: Driver specific DMA channel
+ * @segment: DMA transaction segment
+ */
+static void xilinx_mcdma_free_tx_segment(struct xilinx_dma_chan *chan,
+ struct xilinx_aximcdma_tx_segment *
+ segment)
+{
+ xilinx_mcdma_clean_hw_desc(&segment->hw);
+
+ list_add_tail(&segment->node, &chan->free_seg_list);
+}
+
+/**
* xilinx_cdma_free_tx_segment - Free transaction segment
* @chan: Driver specific DMA channel
* @segment: DMA transaction segment
@@ -694,6 +820,7 @@ xilinx_dma_free_tx_descriptor(struct xilinx_dma_chan *chan,
struct xilinx_vdma_tx_segment *segment, *next;
struct xilinx_cdma_tx_segment *cdma_segment, *cdma_next;
struct xilinx_axidma_tx_segment *axidma_segment, *axidma_next;
+ struct xilinx_aximcdma_tx_segment *aximcdma_segment, *aximcdma_next;
if (!desc)
return;
@@ -709,12 +836,18 @@ xilinx_dma_free_tx_descriptor(struct xilinx_dma_chan *chan,
list_del(&cdma_segment->node);
xilinx_cdma_free_tx_segment(chan, cdma_segment);
}
- } else {
+ } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
list_for_each_entry_safe(axidma_segment, axidma_next,
&desc->segments, node) {
list_del(&axidma_segment->node);
xilinx_dma_free_tx_segment(chan, axidma_segment);
}
+ } else {
+ list_for_each_entry_safe(aximcdma_segment, aximcdma_next,
+ &desc->segments, node) {
+ list_del(&aximcdma_segment->node);
+ xilinx_mcdma_free_tx_segment(chan, aximcdma_segment);
+ }
}
kfree(desc);
@@ -783,10 +916,61 @@ static void xilinx_dma_free_chan_resources(struct dma_chan *dchan)
chan->cyclic_seg_v, chan->cyclic_seg_p);
}
- if (chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA) {
+ if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) {
+ spin_lock_irqsave(&chan->lock, flags);
+ INIT_LIST_HEAD(&chan->free_seg_list);
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ /* Free memory that is allocated for BD */
+ dma_free_coherent(chan->dev, sizeof(*chan->seg_mv) *
+ XILINX_DMA_NUM_DESCS, chan->seg_mv,
+ chan->seg_p);
+ }
+
+ if (chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA &&
+ chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIMCDMA) {
dma_pool_destroy(chan->desc_pool);
chan->desc_pool = NULL;
}
+
+}
+
+/**
+ * xilinx_dma_get_residue - Compute residue for a given descriptor
+ * @chan: Driver specific dma channel
+ * @desc: dma transaction descriptor
+ *
+ * Return: The number of residue bytes for the descriptor.
+ */
+static u32 xilinx_dma_get_residue(struct xilinx_dma_chan *chan,
+ struct xilinx_dma_tx_descriptor *desc)
+{
+ struct xilinx_cdma_tx_segment *cdma_seg;
+ struct xilinx_axidma_tx_segment *axidma_seg;
+ struct xilinx_cdma_desc_hw *cdma_hw;
+ struct xilinx_axidma_desc_hw *axidma_hw;
+ struct list_head *entry;
+ u32 residue = 0;
+
+ list_for_each(entry, &desc->segments) {
+ if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
+ cdma_seg = list_entry(entry,
+ struct xilinx_cdma_tx_segment,
+ node);
+ cdma_hw = &cdma_seg->hw;
+ residue += (cdma_hw->control - cdma_hw->status) &
+ chan->xdev->max_buffer_len;
+ } else {
+ axidma_seg = list_entry(entry,
+ struct xilinx_axidma_tx_segment,
+ node);
+ axidma_hw = &axidma_seg->hw;
+ residue += (axidma_hw->control - axidma_hw->status) &
+ chan->xdev->max_buffer_len;
+ }
+ }
+
+ return residue;
}
/**
@@ -823,7 +1007,7 @@ static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan *chan)
spin_lock_irqsave(&chan->lock, flags);
list_for_each_entry_safe(desc, next, &chan->done_list, node) {
- struct dmaengine_desc_callback cb;
+ struct dmaengine_result result;
if (desc->cyclic) {
xilinx_dma_chan_handle_cyclic(chan, desc, &flags);
@@ -833,14 +1017,22 @@ static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan *chan)
/* Remove from the list of running transactions */
list_del(&desc->node);
- /* Run the link descriptor callback function */
- dmaengine_desc_get_callback(&desc->async_tx, &cb);
- if (dmaengine_desc_callback_valid(&cb)) {
- spin_unlock_irqrestore(&chan->lock, flags);
- dmaengine_desc_callback_invoke(&cb, NULL);
- spin_lock_irqsave(&chan->lock, flags);
+ if (unlikely(desc->err)) {
+ if (chan->direction == DMA_DEV_TO_MEM)
+ result.result = DMA_TRANS_READ_FAILED;
+ else
+ result.result = DMA_TRANS_WRITE_FAILED;
+ } else {
+ result.result = DMA_TRANS_NOERROR;
}
+ result.residue = desc->residue;
+
+ /* Run the link descriptor callback function */
+ spin_unlock_irqrestore(&chan->lock, flags);
+ dmaengine_desc_get_callback_invoke(&desc->async_tx, &result);
+ spin_lock_irqsave(&chan->lock, flags);
+
/* Run any dependencies, then free the descriptor */
dma_run_dependencies(&desc->async_tx);
xilinx_dma_free_tx_descriptor(chan, desc);
@@ -922,6 +1114,30 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
list_add_tail(&chan->seg_v[i].node,
&chan->free_seg_list);
}
+ } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) {
+ /* Allocate the buffer descriptors. */
+ chan->seg_mv = dma_alloc_coherent(chan->dev,
+ sizeof(*chan->seg_mv) *
+ XILINX_DMA_NUM_DESCS,
+ &chan->seg_p, GFP_KERNEL);
+ if (!chan->seg_mv) {
+ dev_err(chan->dev,
+ "unable to allocate channel %d descriptors\n",
+ chan->id);
+ return -ENOMEM;
+ }
+ for (i = 0; i < XILINX_DMA_NUM_DESCS; i++) {
+ chan->seg_mv[i].hw.next_desc =
+ lower_32_bits(chan->seg_p + sizeof(*chan->seg_mv) *
+ ((i + 1) % XILINX_DMA_NUM_DESCS));
+ chan->seg_mv[i].hw.next_desc_msb =
+ upper_32_bits(chan->seg_p + sizeof(*chan->seg_mv) *
+ ((i + 1) % XILINX_DMA_NUM_DESCS));
+ chan->seg_mv[i].phys = chan->seg_p +
+ sizeof(*chan->seg_v) * i;
+ list_add_tail(&chan->seg_mv[i].node,
+ &chan->free_seg_list);
+ }
} else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
chan->desc_pool = dma_pool_create("xilinx_cdma_desc_pool",
chan->dev,
@@ -937,7 +1153,8 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
}
if (!chan->desc_pool &&
- (chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA)) {
+ ((chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA) &&
+ chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIMCDMA)) {
dev_err(chan->dev,
"unable to allocate channel %d descriptor pool\n",
chan->id);
@@ -1003,8 +1220,6 @@ static enum dma_status xilinx_dma_tx_status(struct dma_chan *dchan,
{
struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
struct xilinx_dma_tx_descriptor *desc;
- struct xilinx_axidma_tx_segment *segment;
- struct xilinx_axidma_desc_hw *hw;
enum dma_status ret;
unsigned long flags;
u32 residue = 0;
@@ -1013,23 +1228,20 @@ static enum dma_status xilinx_dma_tx_status(struct dma_chan *dchan,
if (ret == DMA_COMPLETE || !txstate)
return ret;
- if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
- spin_lock_irqsave(&chan->lock, flags);
+ spin_lock_irqsave(&chan->lock, flags);
- desc = list_last_entry(&chan->active_list,
- struct xilinx_dma_tx_descriptor, node);
- if (chan->has_sg) {
- list_for_each_entry(segment, &desc->segments, node) {
- hw = &segment->hw;
- residue += (hw->control - hw->status) &
- chan->xdev->max_buffer_len;
- }
- }
- spin_unlock_irqrestore(&chan->lock, flags);
+ desc = list_last_entry(&chan->active_list,
+ struct xilinx_dma_tx_descriptor, node);
+ /*
+ * VDMA and simple mode do not support residue reporting, so the
+ * residue field will always be 0.
+ */
+ if (chan->has_sg && chan->xdev->dma_config->dmatype != XDMA_TYPE_VDMA)
+ residue = xilinx_dma_get_residue(chan, desc);
- chan->residue = residue;
- dma_set_residue(txstate, chan->residue);
- }
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ dma_set_residue(txstate, residue);
return ret;
}
@@ -1301,53 +1513,23 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
}
- if (chan->has_sg && !chan->xdev->mcdma)
+ if (chan->has_sg)
xilinx_write(chan, XILINX_DMA_REG_CURDESC,
head_desc->async_tx.phys);
- if (chan->has_sg && chan->xdev->mcdma) {
- if (chan->direction == DMA_MEM_TO_DEV) {
- dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC,
- head_desc->async_tx.phys);
- } else {
- if (!chan->tdest) {
- dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC,
- head_desc->async_tx.phys);
- } else {
- dma_ctrl_write(chan,
- XILINX_DMA_MCRX_CDESC(chan->tdest),
- head_desc->async_tx.phys);
- }
- }
- }
-
xilinx_dma_start(chan);
if (chan->err)
return;
/* Start the transfer */
- if (chan->has_sg && !chan->xdev->mcdma) {
+ if (chan->has_sg) {
if (chan->cyclic)
xilinx_write(chan, XILINX_DMA_REG_TAILDESC,
chan->cyclic_seg_v->phys);
else
xilinx_write(chan, XILINX_DMA_REG_TAILDESC,
tail_segment->phys);
- } else if (chan->has_sg && chan->xdev->mcdma) {
- if (chan->direction == DMA_MEM_TO_DEV) {
- dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC,
- tail_segment->phys);
- } else {
- if (!chan->tdest) {
- dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC,
- tail_segment->phys);
- } else {
- dma_ctrl_write(chan,
- XILINX_DMA_MCRX_TDESC(chan->tdest),
- tail_segment->phys);
- }
- }
} else {
struct xilinx_axidma_tx_segment *segment;
struct xilinx_axidma_desc_hw *hw;
@@ -1371,6 +1553,76 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
}
/**
+ * xilinx_mcdma_start_transfer - Starts MCDMA transfer
+ * @chan: Driver specific channel struct pointer
+ */
+static void xilinx_mcdma_start_transfer(struct xilinx_dma_chan *chan)
+{
+ struct xilinx_dma_tx_descriptor *head_desc, *tail_desc;
+ struct xilinx_axidma_tx_segment *tail_segment;
+ u32 reg;
+
+ /*
+ * lock has been held by calling functions, so we don't need it
+ * to take it here again.
+ */
+
+ if (chan->err)
+ return;
+
+ if (!chan->idle)
+ return;
+
+ if (list_empty(&chan->pending_list))
+ return;
+
+ head_desc = list_first_entry(&chan->pending_list,
+ struct xilinx_dma_tx_descriptor, node);
+ tail_desc = list_last_entry(&chan->pending_list,
+ struct xilinx_dma_tx_descriptor, node);
+ tail_segment = list_last_entry(&tail_desc->segments,
+ struct xilinx_axidma_tx_segment, node);
+
+ reg = dma_ctrl_read(chan, XILINX_MCDMA_CHAN_CR_OFFSET(chan->tdest));
+
+ if (chan->desc_pendingcount <= XILINX_MCDMA_COALESCE_MAX) {
+ reg &= ~XILINX_MCDMA_COALESCE_MASK;
+ reg |= chan->desc_pendingcount <<
+ XILINX_MCDMA_COALESCE_SHIFT;
+ }
+
+ reg |= XILINX_MCDMA_IRQ_ALL_MASK;
+ dma_ctrl_write(chan, XILINX_MCDMA_CHAN_CR_OFFSET(chan->tdest), reg);
+
+ /* Program current descriptor */
+ xilinx_write(chan, XILINX_MCDMA_CHAN_CDESC_OFFSET(chan->tdest),
+ head_desc->async_tx.phys);
+
+ /* Program channel enable register */
+ reg = dma_ctrl_read(chan, XILINX_MCDMA_CHEN_OFFSET);
+ reg |= BIT(chan->tdest);
+ dma_ctrl_write(chan, XILINX_MCDMA_CHEN_OFFSET, reg);
+
+ /* Start the fetch of BDs for the channel */
+ reg = dma_ctrl_read(chan, XILINX_MCDMA_CHAN_CR_OFFSET(chan->tdest));
+ reg |= XILINX_MCDMA_CR_RUNSTOP_MASK;
+ dma_ctrl_write(chan, XILINX_MCDMA_CHAN_CR_OFFSET(chan->tdest), reg);
+
+ xilinx_dma_start(chan);
+
+ if (chan->err)
+ return;
+
+ /* Start the transfer */
+ xilinx_write(chan, XILINX_MCDMA_CHAN_TDESC_OFFSET(chan->tdest),
+ tail_segment->phys);
+
+ list_splice_tail_init(&chan->pending_list, &chan->active_list);
+ chan->desc_pendingcount = 0;
+ chan->idle = false;
+}
+
+/**
* xilinx_dma_issue_pending - Issue pending transactions
* @dchan: DMA channel
*/
@@ -1399,6 +1651,13 @@ static void xilinx_dma_complete_descriptor(struct xilinx_dma_chan *chan)
return;
list_for_each_entry_safe(desc, next, &chan->active_list, node) {
+ if (chan->has_sg && chan->xdev->dma_config->dmatype !=
+ XDMA_TYPE_VDMA)
+ desc->residue = xilinx_dma_get_residue(chan, desc);
+ else
+ desc->residue = 0;
+ desc->err = chan->err;
+
list_del(&desc->node);
if (!desc->cyclic)
dma_cookie_complete(&desc->async_tx);
@@ -1433,6 +1692,7 @@ static int xilinx_dma_reset(struct xilinx_dma_chan *chan)
chan->err = false;
chan->idle = true;
+ chan->desc_pendingcount = 0;
chan->desc_submitcount = 0;
return err;
@@ -1461,6 +1721,74 @@ static int xilinx_dma_chan_reset(struct xilinx_dma_chan *chan)
}
/**
+ * xilinx_mcdma_irq_handler - MCDMA Interrupt handler
+ * @irq: IRQ number
+ * @data: Pointer to the Xilinx MCDMA channel structure
+ *
+ * Return: IRQ_HANDLED/IRQ_NONE
+ */
+static irqreturn_t xilinx_mcdma_irq_handler(int irq, void *data)
+{
+ struct xilinx_dma_chan *chan = data;
+ u32 status, ser_offset, chan_sermask, chan_offset = 0, chan_id;
+
+ if (chan->direction == DMA_DEV_TO_MEM)
+ ser_offset = XILINX_MCDMA_RXINT_SER_OFFSET;
+ else
+ ser_offset = XILINX_MCDMA_TXINT_SER_OFFSET;
+
+ /* Read the channel id raising the interrupt*/
+ chan_sermask = dma_ctrl_read(chan, ser_offset);
+ chan_id = ffs(chan_sermask);
+
+ if (!chan_id)
+ return IRQ_NONE;
+
+ if (chan->direction == DMA_DEV_TO_MEM)
+ chan_offset = chan->xdev->s2mm_index;
+
+ chan_offset = chan_offset + (chan_id - 1);
+ chan = chan->xdev->chan[chan_offset];
+ /* Read the status and ack the interrupts. */
+ status = dma_ctrl_read(chan, XILINX_MCDMA_CHAN_SR_OFFSET(chan->tdest));
+ if (!(status & XILINX_MCDMA_IRQ_ALL_MASK))
+ return IRQ_NONE;
+
+ dma_ctrl_write(chan, XILINX_MCDMA_CHAN_SR_OFFSET(chan->tdest),
+ status & XILINX_MCDMA_IRQ_ALL_MASK);
+
+ if (status & XILINX_MCDMA_IRQ_ERR_MASK) {
+ dev_err(chan->dev, "Channel %p has errors %x cdr %x tdr %x\n",
+ chan,
+ dma_ctrl_read(chan, XILINX_MCDMA_CH_ERR_OFFSET),
+ dma_ctrl_read(chan, XILINX_MCDMA_CHAN_CDESC_OFFSET
+ (chan->tdest)),
+ dma_ctrl_read(chan, XILINX_MCDMA_CHAN_TDESC_OFFSET
+ (chan->tdest)));
+ chan->err = true;
+ }
+
+ if (status & XILINX_MCDMA_IRQ_DELAY_MASK) {
+ /*
+ * Device takes too long to do the transfer when user requires
+ * responsiveness.
+ */
+ dev_dbg(chan->dev, "Inter-packet latency too long\n");
+ }
+
+ if (status & XILINX_MCDMA_IRQ_IOC_MASK) {
+ spin_lock(&chan->lock);
+ xilinx_dma_complete_descriptor(chan);
+ chan->idle = true;
+ chan->start_transfer(chan);
+ spin_unlock(&chan->lock);
+ }
+
+ tasklet_schedule(&chan->tasklet);
+ return IRQ_HANDLED;
+}
+
+/**
* xilinx_dma_irq_handler - DMA Interrupt handler
* @irq: IRQ number
* @data: Pointer to the Xilinx DMA channel structure
@@ -1967,31 +2295,32 @@ error:
}
/**
- * xilinx_dma_prep_interleaved - prepare a descriptor for a
- * DMA_SLAVE transaction
+ * xilinx_mcdma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
* @dchan: DMA channel
- * @xt: Interleaved template pointer
+ * @sgl: scatterlist to transfer to/from
+ * @sg_len: number of entries in @scatterlist
+ * @direction: DMA direction
* @flags: transfer ack flags
+ * @context: APP words of the descriptor
*
* Return: Async transaction descriptor on success and NULL on failure
*/
static struct dma_async_tx_descriptor *
-xilinx_dma_prep_interleaved(struct dma_chan *dchan,
- struct dma_interleaved_template *xt,
- unsigned long flags)
+xilinx_mcdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
+ unsigned int sg_len,
+ enum dma_transfer_direction direction,
+ unsigned long flags, void *context)
{
struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
struct xilinx_dma_tx_descriptor *desc;
- struct xilinx_axidma_tx_segment *segment;
- struct xilinx_axidma_desc_hw *hw;
-
- if (!is_slave_direction(xt->dir))
- return NULL;
-
- if (!xt->numf || !xt->sgl[0].size)
- return NULL;
+ struct xilinx_aximcdma_tx_segment *segment = NULL;
+ u32 *app_w = (u32 *)context;
+ struct scatterlist *sg;
+ size_t copy;
+ size_t sg_used;
+ unsigned int i;
- if (xt->frame_size != 1)
+ if (!is_slave_direction(direction))
return NULL;
/* Allocate a transaction descriptor. */
@@ -1999,54 +2328,67 @@ xilinx_dma_prep_interleaved(struct dma_chan *dchan,
if (!desc)
return NULL;
- chan->direction = xt->dir;
dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
desc->async_tx.tx_submit = xilinx_dma_tx_submit;
- /* Get a free segment */
- segment = xilinx_axidma_alloc_tx_segment(chan);
- if (!segment)
- goto error;
+ /* Build transactions using information in the scatter gather list */
+ for_each_sg(sgl, sg, sg_len, i) {
+ sg_used = 0;
- hw = &segment->hw;
+ /* Loop until the entire scatterlist entry is used */
+ while (sg_used < sg_dma_len(sg)) {
+ struct xilinx_aximcdma_desc_hw *hw;
- /* Fill in the descriptor */
- if (xt->dir != DMA_MEM_TO_DEV)
- hw->buf_addr = xt->dst_start;
- else
- hw->buf_addr = xt->src_start;
+ /* Get a free segment */
+ segment = xilinx_aximcdma_alloc_tx_segment(chan);
+ if (!segment)
+ goto error;
- hw->mcdma_control = chan->tdest & XILINX_DMA_BD_TDEST_MASK;
- hw->vsize_stride = (xt->numf << XILINX_DMA_BD_VSIZE_SHIFT) &
- XILINX_DMA_BD_VSIZE_MASK;
- hw->vsize_stride |= (xt->sgl[0].icg + xt->sgl[0].size) &
- XILINX_DMA_BD_STRIDE_MASK;
- hw->control = xt->sgl[0].size & XILINX_DMA_BD_HSIZE_MASK;
+ /*
+ * Calculate the maximum number of bytes to transfer,
+ * making sure it is less than the hw limit
+ */
+ copy = min_t(size_t, sg_dma_len(sg) - sg_used,
+ chan->xdev->max_buffer_len);
+ hw = &segment->hw;
- /*
- * Insert the segment into the descriptor segments
- * list.
- */
- list_add_tail(&segment->node, &desc->segments);
+ /* Fill in the descriptor */
+ xilinx_aximcdma_buf(chan, hw, sg_dma_address(sg),
+ sg_used);
+ hw->control = copy;
+ if (chan->direction == DMA_MEM_TO_DEV && app_w) {
+ memcpy(hw->app, app_w, sizeof(u32) *
+ XILINX_DMA_NUM_APP_WORDS);
+ }
+
+ sg_used += copy;
+ /*
+ * Insert the segment into the descriptor segments
+ * list.
+ */
+ list_add_tail(&segment->node, &desc->segments);
+ }
+ }
segment = list_first_entry(&desc->segments,
- struct xilinx_axidma_tx_segment, node);
+ struct xilinx_aximcdma_tx_segment, node);
desc->async_tx.phys = segment->phys;
/* For the last DMA_MEM_TO_DEV transfer, set EOP */
- if (xt->dir == DMA_MEM_TO_DEV) {
- segment->hw.control |= XILINX_DMA_BD_SOP;
+ if (chan->direction == DMA_MEM_TO_DEV) {
+ segment->hw.control |= XILINX_MCDMA_BD_SOP;
segment = list_last_entry(&desc->segments,
- struct xilinx_axidma_tx_segment,
+ struct xilinx_aximcdma_tx_segment,
node);
- segment->hw.control |= XILINX_DMA_BD_EOP;
+ segment->hw.control |= XILINX_MCDMA_BD_EOP;
}
return &desc->async_tx;
error:
xilinx_dma_free_tx_descriptor(chan, desc);
+
return NULL;
}
@@ -2194,7 +2536,9 @@ static int axidma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
*axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
if (IS_ERR(*axi_clk)) {
err = PTR_ERR(*axi_clk);
- dev_err(&pdev->dev, "failed to get axi_aclk (%d)\n", err);
+ if (err != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "failed to get axi_aclk (%d)\n",
+ err);
return err;
}
@@ -2259,14 +2603,18 @@ static int axicdma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
*axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
if (IS_ERR(*axi_clk)) {
err = PTR_ERR(*axi_clk);
- dev_err(&pdev->dev, "failed to get axi_clk (%d)\n", err);
+ if (err != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "failed to get axi_clk (%d)\n",
+ err);
return err;
}
*dev_clk = devm_clk_get(&pdev->dev, "m_axi_aclk");
if (IS_ERR(*dev_clk)) {
err = PTR_ERR(*dev_clk);
- dev_err(&pdev->dev, "failed to get dev_clk (%d)\n", err);
+ if (err != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "failed to get dev_clk (%d)\n",
+ err);
return err;
}
@@ -2299,7 +2647,9 @@ static int axivdma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
*axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
if (IS_ERR(*axi_clk)) {
err = PTR_ERR(*axi_clk);
- dev_err(&pdev->dev, "failed to get axi_aclk (%d)\n", err);
+ if (err != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "failed to get axi_aclk (%d)\n",
+ err);
return err;
}
@@ -2321,7 +2671,8 @@ static int axivdma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
err = clk_prepare_enable(*axi_clk);
if (err) {
- dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", err);
+ dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n",
+ err);
return err;
}
@@ -2454,6 +2805,7 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
"xlnx,axi-dma-s2mm-channel")) {
chan->direction = DMA_DEV_TO_MEM;
chan->id = chan_id;
+ xdev->s2mm_index = xdev->nr_channels;
chan->tdest = chan_id - xdev->nr_channels;
chan->has_vflip = of_property_read_bool(node,
"xlnx,enable-vert-flip");
@@ -2463,7 +2815,11 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
XILINX_VDMA_ENABLE_VERTICAL_FLIP;
}
- chan->ctrl_offset = XILINX_DMA_S2MM_CTRL_OFFSET;
+ if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA)
+ chan->ctrl_offset = XILINX_MCDMA_S2MM_CTRL_OFFSET;
+ else
+ chan->ctrl_offset = XILINX_DMA_S2MM_CTRL_OFFSET;
+
if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
chan->desc_offset = XILINX_VDMA_S2MM_DESC_OFFSET;
chan->config.park = 1;
@@ -2478,9 +2834,9 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
}
/* Request the interrupt */
- chan->irq = irq_of_parse_and_map(node, 0);
- err = request_irq(chan->irq, xilinx_dma_irq_handler, IRQF_SHARED,
- "xilinx-dma-controller", chan);
+ chan->irq = irq_of_parse_and_map(node, chan->tdest);
+ err = request_irq(chan->irq, xdev->dma_config->irq_handler,
+ IRQF_SHARED, "xilinx-dma-controller", chan);
if (err) {
dev_err(xdev->dev, "unable to request IRQ %d\n", chan->irq);
return err;
@@ -2489,6 +2845,9 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
chan->start_transfer = xilinx_dma_start_transfer;
chan->stop_transfer = xilinx_dma_stop_transfer;
+ } else if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) {
+ chan->start_transfer = xilinx_mcdma_start_transfer;
+ chan->stop_transfer = xilinx_dma_stop_transfer;
} else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
chan->start_transfer = xilinx_cdma_start_transfer;
chan->stop_transfer = xilinx_cdma_stop_transfer;
@@ -2545,7 +2904,7 @@ static int xilinx_dma_child_probe(struct xilinx_dma_device *xdev,
int ret, i, nr_channels = 1;
ret = of_property_read_u32(node, "dma-channels", &nr_channels);
- if ((ret < 0) && xdev->mcdma)
+ if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA && ret < 0)
dev_warn(xdev->dev, "missing dma-channels property\n");
for (i = 0; i < nr_channels; i++)
@@ -2578,22 +2937,31 @@ static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec,
static const struct xilinx_dma_config axidma_config = {
.dmatype = XDMA_TYPE_AXIDMA,
.clk_init = axidma_clk_init,
+ .irq_handler = xilinx_dma_irq_handler,
};
+static const struct xilinx_dma_config aximcdma_config = {
+ .dmatype = XDMA_TYPE_AXIMCDMA,
+ .clk_init = axidma_clk_init,
+ .irq_handler = xilinx_mcdma_irq_handler,
+};
static const struct xilinx_dma_config axicdma_config = {
.dmatype = XDMA_TYPE_CDMA,
.clk_init = axicdma_clk_init,
+ .irq_handler = xilinx_dma_irq_handler,
};
static const struct xilinx_dma_config axivdma_config = {
.dmatype = XDMA_TYPE_VDMA,
.clk_init = axivdma_clk_init,
+ .irq_handler = xilinx_dma_irq_handler,
};
static const struct of_device_id xilinx_dma_of_ids[] = {
{ .compatible = "xlnx,axi-dma-1.00.a", .data = &axidma_config },
{ .compatible = "xlnx,axi-cdma-1.00.a", .data = &axicdma_config },
{ .compatible = "xlnx,axi-vdma-1.00.a", .data = &axivdma_config },
+ { .compatible = "xlnx,axi-mcdma-1.00.a", .data = &aximcdma_config },
{}
};
MODULE_DEVICE_TABLE(of, xilinx_dma_of_ids);
@@ -2612,7 +2980,6 @@ static int xilinx_dma_probe(struct platform_device *pdev)
struct device_node *node = pdev->dev.of_node;
struct xilinx_dma_device *xdev;
struct device_node *child, *np = pdev->dev.of_node;
- struct resource *io;
u32 num_frames, addr_width, len_width;
int i, err;
@@ -2638,16 +3005,15 @@ static int xilinx_dma_probe(struct platform_device *pdev)
return err;
/* Request and map I/O memory */
- io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- xdev->regs = devm_ioremap_resource(&pdev->dev, io);
+ xdev->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(xdev->regs))
return PTR_ERR(xdev->regs);
/* Retrieve the DMA engine properties from the device tree */
xdev->max_buffer_len = GENMASK(XILINX_DMA_MAX_TRANS_LEN_MAX - 1, 0);
- if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
- xdev->mcdma = of_property_read_bool(node, "xlnx,mcdma");
+ if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA ||
+ xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) {
if (!of_property_read_u32(node, "xlnx,sg-length-width",
&len_width)) {
if (len_width < XILINX_DMA_MAX_TRANS_LEN_MIN ||
@@ -2712,14 +3078,17 @@ static int xilinx_dma_probe(struct platform_device *pdev)
xdev->common.device_prep_slave_sg = xilinx_dma_prep_slave_sg;
xdev->common.device_prep_dma_cyclic =
xilinx_dma_prep_dma_cyclic;
- xdev->common.device_prep_interleaved_dma =
- xilinx_dma_prep_interleaved;
- /* Residue calculation is supported by only AXI DMA */
+ /* Residue calculation is supported by only AXI DMA and CDMA */
xdev->common.residue_granularity =
DMA_RESIDUE_GRANULARITY_SEGMENT;
} else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
dma_cap_set(DMA_MEMCPY, xdev->common.cap_mask);
xdev->common.device_prep_dma_memcpy = xilinx_cdma_prep_memcpy;
+ /* Residue calculation is supported by only AXI DMA and CDMA */
+ xdev->common.residue_granularity =
+ DMA_RESIDUE_GRANULARITY_SEGMENT;
+ } else if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) {
+ xdev->common.device_prep_slave_sg = xilinx_mcdma_prep_slave_sg;
} else {
xdev->common.device_prep_interleaved_dma =
xilinx_vdma_dma_prep_interleaved;
@@ -2755,6 +3124,8 @@ static int xilinx_dma_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "Xilinx AXI DMA Engine Driver Probed!!\n");
else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA)
dev_info(&pdev->dev, "Xilinx AXI CDMA Engine Driver Probed!!\n");
+ else if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA)
+ dev_info(&pdev->dev, "Xilinx AXI MCDMA Engine Driver Probed!!\n");
else
dev_info(&pdev->dev, "Xilinx AXI VDMA Engine Driver Probed!!\n");
diff --git a/drivers/dma/zx_dma.c b/drivers/dma/zx_dma.c
index 9f4436f7c914..5fe2e8b9a7b8 100644
--- a/drivers/dma/zx_dma.c
+++ b/drivers/dma/zx_dma.c
@@ -754,18 +754,13 @@ static struct dma_chan *zx_of_dma_simple_xlate(struct of_phandle_args *dma_spec,
static int zx_dma_probe(struct platform_device *op)
{
struct zx_dma_dev *d;
- struct resource *iores;
int i, ret = 0;
- iores = platform_get_resource(op, IORESOURCE_MEM, 0);
- if (!iores)
- return -EINVAL;
-
d = devm_kzalloc(&op->dev, sizeof(*d), GFP_KERNEL);
if (!d)
return -ENOMEM;
- d->base = devm_ioremap_resource(&op->dev, iores);
+ d->base = devm_platform_ioremap_resource(op, 0);
if (IS_ERR(d->base))
return PTR_ERR(d->base);
@@ -894,7 +889,6 @@ static int zx_dma_remove(struct platform_device *op)
list_del(&c->vc.chan.device_node);
}
clk_disable_unprepare(d->clk);
- dmam_pool_destroy(d->pool);
return 0;
}
diff --git a/drivers/extcon/extcon-axp288.c b/drivers/extcon/extcon-axp288.c
index 415afaf479e7..a7f216191493 100644
--- a/drivers/extcon/extcon-axp288.c
+++ b/drivers/extcon/extcon-axp288.c
@@ -322,6 +322,25 @@ static void axp288_put_role_sw(void *data)
usb_role_switch_put(info->role_sw);
}
+static int axp288_extcon_find_role_sw(struct axp288_extcon_info *info)
+{
+ const struct software_node *swnode;
+ struct fwnode_handle *fwnode;
+
+ if (!x86_match_cpu(cherry_trail_cpu_ids))
+ return 0;
+
+ swnode = software_node_find_by_name(NULL, "intel-xhci-usb-sw");
+ if (!swnode)
+ return -EPROBE_DEFER;
+
+ fwnode = software_node_fwnode(swnode);
+ info->role_sw = usb_role_switch_find_by_fwnode(fwnode);
+ fwnode_handle_put(fwnode);
+
+ return info->role_sw ? 0 : -EPROBE_DEFER;
+}
+
static int axp288_extcon_probe(struct platform_device *pdev)
{
struct axp288_extcon_info *info;
@@ -343,9 +362,10 @@ static int axp288_extcon_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, info);
- info->role_sw = usb_role_switch_get(dev);
- if (IS_ERR(info->role_sw))
- return PTR_ERR(info->role_sw);
+ ret = axp288_extcon_find_role_sw(info);
+ if (ret)
+ return ret;
+
if (info->role_sw) {
ret = devm_add_action_or_reset(dev, axp288_put_role_sw, info);
if (ret)
@@ -440,26 +460,14 @@ static struct platform_driver axp288_extcon_driver = {
},
};
-static struct device_connection axp288_extcon_role_sw_conn = {
- .endpoint[0] = "axp288_extcon",
- .endpoint[1] = "intel_xhci_usb_sw-role-switch",
- .id = "usb-role-switch",
-};
-
static int __init axp288_extcon_init(void)
{
- if (x86_match_cpu(cherry_trail_cpu_ids))
- device_connection_add(&axp288_extcon_role_sw_conn);
-
return platform_driver_register(&axp288_extcon_driver);
}
module_init(axp288_extcon_init);
static void __exit axp288_extcon_exit(void)
{
- if (x86_match_cpu(cherry_trail_cpu_ids))
- device_connection_remove(&axp288_extcon_role_sw_conn);
-
platform_driver_unregister(&axp288_extcon_driver);
}
module_exit(axp288_extcon_exit);
diff --git a/drivers/extcon/extcon-intel-cht-wc.c b/drivers/extcon/extcon-intel-cht-wc.c
index 9d32150e68db..771f6f4cf92e 100644
--- a/drivers/extcon/extcon-intel-cht-wc.c
+++ b/drivers/extcon/extcon-intel-cht-wc.c
@@ -338,6 +338,7 @@ static int cht_wc_extcon_probe(struct platform_device *pdev)
struct intel_soc_pmic *pmic = dev_get_drvdata(pdev->dev.parent);
struct cht_wc_extcon_data *ext;
unsigned long mask = ~(CHT_WC_PWRSRC_VBUS | CHT_WC_PWRSRC_USBID_MASK);
+ int pwrsrc_sts, id;
int irq, ret;
irq = platform_get_irq(pdev, 0);
@@ -387,8 +388,19 @@ static int cht_wc_extcon_probe(struct platform_device *pdev)
goto disable_sw_control;
}
- /* Route D+ and D- to PMIC for initial charger detection */
- cht_wc_extcon_set_phymux(ext, MUX_SEL_PMIC);
+ ret = regmap_read(ext->regmap, CHT_WC_PWRSRC_STS, &pwrsrc_sts);
+ if (ret) {
+ dev_err(ext->dev, "Error reading pwrsrc status: %d\n", ret);
+ goto disable_sw_control;
+ }
+
+ /*
+ * If no USB host or device connected, route D+ and D- to PMIC for
+ * initial charger detection
+ */
+ id = cht_wc_extcon_get_id(ext, pwrsrc_sts);
+ if (id != INTEL_USB_ID_GND)
+ cht_wc_extcon_set_phymux(ext, MUX_SEL_PMIC);
/* Get initial state */
cht_wc_extcon_pwrsrc_event(ext);
diff --git a/drivers/extcon/extcon-sm5502.c b/drivers/extcon/extcon-sm5502.c
index dc43847ad2b0..bcf65aaca5d2 100644
--- a/drivers/extcon/extcon-sm5502.c
+++ b/drivers/extcon/extcon-sm5502.c
@@ -65,6 +65,10 @@ struct sm5502_muic_info {
/* Default value of SM5502 register to bring up MUIC device. */
static struct reg_data sm5502_reg_data[] = {
{
+ .reg = SM5502_REG_RESET,
+ .val = SM5502_REG_RESET_MASK,
+ .invert = true,
+ }, {
.reg = SM5502_REG_CONTROL,
.val = SM5502_REG_CONTROL_MASK_INT_MASK,
.invert = false,
@@ -272,7 +276,7 @@ static int sm5502_muic_set_path(struct sm5502_muic_info *info,
/* Return cable type of attached or detached accessories */
static unsigned int sm5502_muic_get_cable_type(struct sm5502_muic_info *info)
{
- unsigned int cable_type = -1, adc, dev_type1;
+ unsigned int cable_type, adc, dev_type1;
int ret;
/* Read ADC value according to external cable or button */
diff --git a/drivers/extcon/extcon-sm5502.h b/drivers/extcon/extcon-sm5502.h
index 9dbb634d213b..ce1f1ec310c4 100644
--- a/drivers/extcon/extcon-sm5502.h
+++ b/drivers/extcon/extcon-sm5502.h
@@ -237,6 +237,8 @@ enum sm5502_reg {
#define DM_DP_SWITCH_UART ((DM_DP_CON_SWITCH_UART <<SM5502_REG_MANUAL_SW1_DP_SHIFT) \
| (DM_DP_CON_SWITCH_UART <<SM5502_REG_MANUAL_SW1_DM_SHIFT))
+#define SM5502_REG_RESET_MASK (0x1)
+
/* SM5502 Interrupts */
enum sm5502_irq {
/* INT1 */
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
index 1da7ba18d399..6e291d8f3a27 100644
--- a/drivers/firewire/core-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -1646,14 +1646,6 @@ static long fw_device_op_ioctl(struct file *file,
return dispatch_ioctl(file->private_data, cmd, (void __user *)arg);
}
-#ifdef CONFIG_COMPAT
-static long fw_device_op_compat_ioctl(struct file *file,
- unsigned int cmd, unsigned long arg)
-{
- return dispatch_ioctl(file->private_data, cmd, compat_ptr(arg));
-}
-#endif
-
static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma)
{
struct client *client = file->private_data;
@@ -1694,7 +1686,8 @@ static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma)
if (ret < 0)
goto fail;
- ret = fw_iso_buffer_map_vma(&client->buffer, vma);
+ ret = vm_map_pages_zero(vma, client->buffer.pages,
+ client->buffer.page_count);
if (ret < 0)
goto fail;
@@ -1795,7 +1788,5 @@ const struct file_operations fw_device_ops = {
.mmap = fw_device_op_mmap,
.release = fw_device_op_release,
.poll = fw_device_op_poll,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = fw_device_op_compat_ioctl,
-#endif
+ .compat_ioctl = compat_ptr_ioctl,
};
diff --git a/drivers/firewire/core-iso.c b/drivers/firewire/core-iso.c
index df8a56a979b9..185b0b78b3d6 100644
--- a/drivers/firewire/core-iso.c
+++ b/drivers/firewire/core-iso.c
@@ -91,13 +91,6 @@ int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card,
}
EXPORT_SYMBOL(fw_iso_buffer_init);
-int fw_iso_buffer_map_vma(struct fw_iso_buffer *buffer,
- struct vm_area_struct *vma)
-{
- return vm_map_pages_zero(vma, buffer->pages,
- buffer->page_count);
-}
-
void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer,
struct fw_card *card)
{
diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
index 0f0bed3a4bbb..4b0e4ee655a1 100644
--- a/drivers/firewire/core.h
+++ b/drivers/firewire/core.h
@@ -158,8 +158,6 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event);
int fw_iso_buffer_alloc(struct fw_iso_buffer *buffer, int page_count);
int fw_iso_buffer_map_dma(struct fw_iso_buffer *buffer, struct fw_card *card,
enum dma_data_direction direction);
-int fw_iso_buffer_map_vma(struct fw_iso_buffer *buffer,
- struct vm_area_struct *vma);
/* -topology */
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 522f3addb5bd..33269316f111 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -1752,7 +1752,7 @@ static u32 update_bus_time(struct fw_ohci *ohci)
if (unlikely(!ohci->bus_time_running)) {
reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_cycle64Seconds);
- ohci->bus_time = (lower_32_bits(get_seconds()) & ~0x7f) |
+ ohci->bus_time = (lower_32_bits(ktime_get_seconds()) & ~0x7f) |
(cycle_time_seconds & 0x40);
ohci->bus_time_running = true;
}
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index 1e21fc3e9851..2045566d622f 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -35,6 +35,7 @@ static struct dmi_memdev_info {
const char *bank;
u64 size; /* bytes */
u16 handle;
+ u8 type; /* DDR2, DDR3, DDR4 etc */
} *dmi_memdev;
static int dmi_memdev_nr;
@@ -391,7 +392,7 @@ static void __init save_mem_devices(const struct dmi_header *dm, void *v)
u64 bytes;
u16 size;
- if (dm->type != DMI_ENTRY_MEM_DEVICE || dm->length < 0x12)
+ if (dm->type != DMI_ENTRY_MEM_DEVICE || dm->length < 0x13)
return;
if (nr >= dmi_memdev_nr) {
pr_warn(FW_BUG "Too many DIMM entries in SMBIOS table\n");
@@ -400,6 +401,7 @@ static void __init save_mem_devices(const struct dmi_header *dm, void *v)
dmi_memdev[nr].handle = get_unaligned(&dm->handle);
dmi_memdev[nr].device = dmi_string(dm, d[0x10]);
dmi_memdev[nr].bank = dmi_string(dm, d[0x11]);
+ dmi_memdev[nr].type = d[0x12];
size = get_unaligned((u16 *)&d[0xC]);
if (size == 0)
@@ -1128,3 +1130,40 @@ u64 dmi_memdev_size(u16 handle)
return ~0ull;
}
EXPORT_SYMBOL_GPL(dmi_memdev_size);
+
+/**
+ * dmi_memdev_type - get the memory type
+ * @handle: DMI structure handle
+ *
+ * Return the DMI memory type of the module in the slot associated with the
+ * given DMI handle, or 0x0 if no such DMI handle exists.
+ */
+u8 dmi_memdev_type(u16 handle)
+{
+ int n;
+
+ if (dmi_memdev) {
+ for (n = 0; n < dmi_memdev_nr; n++) {
+ if (handle == dmi_memdev[n].handle)
+ return dmi_memdev[n].type;
+ }
+ }
+ return 0x0; /* Not a valid value */
+}
+EXPORT_SYMBOL_GPL(dmi_memdev_type);
+
+/**
+ * dmi_memdev_handle - get the DMI handle of a memory slot
+ * @slot: slot number
+ *
+ * Return the DMI handle associated with a given memory slot, or %0xFFFF
+ * if there is no such slot.
+ */
+u16 dmi_memdev_handle(int slot)
+{
+ if (dmi_memdev && slot >= 0 && slot < dmi_memdev_nr)
+ return dmi_memdev[slot].handle;
+
+ return 0xffff; /* Not a valid value */
+}
+EXPORT_SYMBOL_GPL(dmi_memdev_handle);
diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig
index b248870a9806..bcc378c19ebe 100644
--- a/drivers/firmware/efi/Kconfig
+++ b/drivers/firmware/efi/Kconfig
@@ -75,6 +75,27 @@ config EFI_MAX_FAKE_MEM
Ranges can be set up to this value using comma-separated list.
The default value is 8.
+config EFI_SOFT_RESERVE
+ bool "Reserve EFI Specific Purpose Memory"
+ depends on EFI && EFI_STUB && ACPI_HMAT
+ default ACPI_HMAT
+ help
+ On systems that have mixed performance classes of memory EFI
+ may indicate specific purpose memory with an attribute (See
+ EFI_MEMORY_SP in UEFI 2.8). A memory range tagged with this
+ attribute may have unique performance characteristics compared
+ to the system's general purpose "System RAM" pool. On the
+ expectation that such memory has application specific usage,
+ and its base EFI memory type is "conventional" answer Y to
+ arrange for the kernel to reserve it as a "Soft Reserved"
+ resource, and set aside for direct-access (device-dax) by
+ default. The memory range can later be optionally assigned to
+ the page allocator by system administrator policy via the
+ device-dax kmem facility. Say N to have the kernel treat this
+ memory as "System RAM" by default.
+
+ If unsure, say Y.
+
config EFI_PARAMS_FROM_FDT
bool
help
diff --git a/drivers/firmware/efi/Makefile b/drivers/firmware/efi/Makefile
index 4ac2de4dfa72..554d795270d9 100644
--- a/drivers/firmware/efi/Makefile
+++ b/drivers/firmware/efi/Makefile
@@ -20,13 +20,16 @@ obj-$(CONFIG_UEFI_CPER) += cper.o
obj-$(CONFIG_EFI_RUNTIME_MAP) += runtime-map.o
obj-$(CONFIG_EFI_RUNTIME_WRAPPERS) += runtime-wrappers.o
obj-$(CONFIG_EFI_STUB) += libstub/
-obj-$(CONFIG_EFI_FAKE_MEMMAP) += fake_mem.o
+obj-$(CONFIG_EFI_FAKE_MEMMAP) += fake_map.o
obj-$(CONFIG_EFI_BOOTLOADER_CONTROL) += efibc.o
obj-$(CONFIG_EFI_TEST) += test/
obj-$(CONFIG_EFI_DEV_PATH_PARSER) += dev-path-parser.o
obj-$(CONFIG_APPLE_PROPERTIES) += apple-properties.o
obj-$(CONFIG_EFI_RCI2_TABLE) += rci2-table.o
+fake_map-y += fake_mem.o
+fake_map-$(CONFIG_X86) += x86_fake_mem.o
+
arm-obj-$(CONFIG_EFI) := arm-init.o arm-runtime.o
obj-$(CONFIG_ARM) += $(arm-obj-y)
obj-$(CONFIG_ARM64) += $(arm-obj-y)
diff --git a/drivers/firmware/efi/apple-properties.c b/drivers/firmware/efi/apple-properties.c
index 0e206c9e0d7a..5ccf39986a14 100644
--- a/drivers/firmware/efi/apple-properties.c
+++ b/drivers/firmware/efi/apple-properties.c
@@ -53,7 +53,8 @@ static void __init unmarshal_key_value_pairs(struct dev_header *dev_header,
for (i = 0; i < dev_header->prop_count; i++) {
int remaining = dev_header->len - (ptr - (void *)dev_header);
- u32 key_len, val_len;
+ u32 key_len, val_len, entry_len;
+ const u8 *entry_data;
char *key;
if (sizeof(key_len) > remaining)
@@ -85,17 +86,14 @@ static void __init unmarshal_key_value_pairs(struct dev_header *dev_header,
ucs2_as_utf8(key, ptr + sizeof(key_len),
key_len - sizeof(key_len));
- entry[i].name = key;
- entry[i].length = val_len - sizeof(val_len);
- entry[i].is_array = !!entry[i].length;
- entry[i].type = DEV_PROP_U8;
- entry[i].pointer.u8_data = ptr + key_len + sizeof(val_len);
-
+ entry_data = ptr + key_len + sizeof(val_len);
+ entry_len = val_len - sizeof(val_len);
+ entry[i] = PROPERTY_ENTRY_U8_ARRAY_LEN(key, entry_data,
+ entry_len);
if (dump_properties) {
- dev_info(dev, "property: %s\n", entry[i].name);
+ dev_info(dev, "property: %s\n", key);
print_hex_dump(KERN_INFO, pr_fmt(), DUMP_PREFIX_OFFSET,
- 16, 1, entry[i].pointer.u8_data,
- entry[i].length, true);
+ 16, 1, entry_data, entry_len, true);
}
ptr += key_len + val_len;
diff --git a/drivers/firmware/efi/arm-init.c b/drivers/firmware/efi/arm-init.c
index 311cd349a862..904fa09e6a6b 100644
--- a/drivers/firmware/efi/arm-init.c
+++ b/drivers/firmware/efi/arm-init.c
@@ -164,6 +164,15 @@ static __init int is_usable_memory(efi_memory_desc_t *md)
case EFI_CONVENTIONAL_MEMORY:
case EFI_PERSISTENT_MEMORY:
/*
+ * Special purpose memory is 'soft reserved', which means it
+ * is set aside initially, but can be hotplugged back in or
+ * be assigned to the dax driver after boot.
+ */
+ if (efi_soft_reserve_enabled() &&
+ (md->attribute & EFI_MEMORY_SP))
+ return false;
+
+ /*
* According to the spec, these regions are no longer reserved
* after calling ExitBootServices(). However, we can only use
* them as System RAM if they can be mapped writeback cacheable.
diff --git a/drivers/firmware/efi/arm-runtime.c b/drivers/firmware/efi/arm-runtime.c
index e2ac5fa5531b..899b803842bb 100644
--- a/drivers/firmware/efi/arm-runtime.c
+++ b/drivers/firmware/efi/arm-runtime.c
@@ -121,6 +121,30 @@ static int __init arm_enable_runtime_services(void)
return 0;
}
+ if (efi_soft_reserve_enabled()) {
+ efi_memory_desc_t *md;
+
+ for_each_efi_memory_desc(md) {
+ int md_size = md->num_pages << EFI_PAGE_SHIFT;
+ struct resource *res;
+
+ if (!(md->attribute & EFI_MEMORY_SP))
+ continue;
+
+ res = kzalloc(sizeof(*res), GFP_KERNEL);
+ if (WARN_ON(!res))
+ break;
+
+ res->start = md->phys_addr;
+ res->end = md->phys_addr + md_size - 1;
+ res->name = "Soft Reserved";
+ res->flags = IORESOURCE_MEM;
+ res->desc = IORES_DESC_SOFT_RESERVED;
+
+ insert_resource(&iomem_resource, res);
+ }
+ }
+
if (efi_runtime_disabled()) {
pr_info("EFI runtime services will be disabled.\n");
return 0;
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index e98bbf8e56d9..d101f072c8f8 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -81,6 +81,11 @@ bool efi_runtime_disabled(void)
return disable_runtime;
}
+bool __pure __efi_soft_reserve_enabled(void)
+{
+ return !efi_enabled(EFI_MEM_NO_SOFT_RESERVE);
+}
+
static int __init parse_efi_cmdline(char *str)
{
if (!str) {
@@ -94,6 +99,9 @@ static int __init parse_efi_cmdline(char *str)
if (parse_option_str(str, "noruntime"))
disable_runtime = true;
+ if (parse_option_str(str, "nosoftreserve"))
+ set_bit(EFI_MEM_NO_SOFT_RESERVE, &efi.flags);
+
return 0;
}
early_param("efi", parse_efi_cmdline);
@@ -296,7 +304,7 @@ static __init int efivar_ssdt_load(void)
goto free_data;
}
- ret = acpi_load_table(data);
+ ret = acpi_load_table(data, NULL);
if (ret) {
pr_err("failed to load table: %d\n", ret);
goto free_data;
@@ -842,15 +850,16 @@ char * __init efi_md_typeattr_format(char *buf, size_t size,
if (attr & ~(EFI_MEMORY_UC | EFI_MEMORY_WC | EFI_MEMORY_WT |
EFI_MEMORY_WB | EFI_MEMORY_UCE | EFI_MEMORY_RO |
EFI_MEMORY_WP | EFI_MEMORY_RP | EFI_MEMORY_XP |
- EFI_MEMORY_NV |
+ EFI_MEMORY_NV | EFI_MEMORY_SP |
EFI_MEMORY_RUNTIME | EFI_MEMORY_MORE_RELIABLE))
snprintf(pos, size, "|attr=0x%016llx]",
(unsigned long long)attr);
else
snprintf(pos, size,
- "|%3s|%2s|%2s|%2s|%2s|%2s|%2s|%3s|%2s|%2s|%2s|%2s]",
+ "|%3s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%3s|%2s|%2s|%2s|%2s]",
attr & EFI_MEMORY_RUNTIME ? "RUN" : "",
attr & EFI_MEMORY_MORE_RELIABLE ? "MR" : "",
+ attr & EFI_MEMORY_SP ? "SP" : "",
attr & EFI_MEMORY_NV ? "NV" : "",
attr & EFI_MEMORY_XP ? "XP" : "",
attr & EFI_MEMORY_RP ? "RP" : "",
diff --git a/drivers/firmware/efi/esrt.c b/drivers/firmware/efi/esrt.c
index d6dd5f503fa2..2762e0662bf4 100644
--- a/drivers/firmware/efi/esrt.c
+++ b/drivers/firmware/efi/esrt.c
@@ -246,6 +246,9 @@ void __init efi_esrt_init(void)
int rc;
phys_addr_t end;
+ if (!efi_enabled(EFI_MEMMAP))
+ return;
+
pr_debug("esrt-init: loading.\n");
if (!esrt_table_exists())
return;
diff --git a/drivers/firmware/efi/fake_mem.c b/drivers/firmware/efi/fake_mem.c
index 9501edc0fcfb..bb9fc70d0cfa 100644
--- a/drivers/firmware/efi/fake_mem.c
+++ b/drivers/firmware/efi/fake_mem.c
@@ -17,12 +17,10 @@
#include <linux/memblock.h>
#include <linux/types.h>
#include <linux/sort.h>
-#include <asm/efi.h>
+#include "fake_mem.h"
-#define EFI_MAX_FAKEMEM CONFIG_EFI_MAX_FAKE_MEM
-
-static struct efi_mem_range fake_mems[EFI_MAX_FAKEMEM];
-static int nr_fake_mem;
+struct efi_mem_range efi_fake_mems[EFI_MAX_FAKEMEM];
+int nr_fake_mem;
static int __init cmp_fake_mem(const void *x1, const void *x2)
{
@@ -44,13 +42,13 @@ void __init efi_fake_memmap(void)
void *new_memmap;
int i;
- if (!nr_fake_mem)
+ if (!efi_enabled(EFI_MEMMAP) || !nr_fake_mem)
return;
/* count up the number of EFI memory descriptor */
for (i = 0; i < nr_fake_mem; i++) {
for_each_efi_memory_desc(md) {
- struct range *r = &fake_mems[i].range;
+ struct range *r = &efi_fake_mems[i].range;
new_nr_map += efi_memmap_split_count(md, r);
}
@@ -70,7 +68,7 @@ void __init efi_fake_memmap(void)
}
for (i = 0; i < nr_fake_mem; i++)
- efi_memmap_insert(&efi.memmap, new_memmap, &fake_mems[i]);
+ efi_memmap_insert(&efi.memmap, new_memmap, &efi_fake_mems[i]);
/* swap into new EFI memmap */
early_memunmap(new_memmap, efi.memmap.desc_size * new_nr_map);
@@ -104,22 +102,22 @@ static int __init setup_fake_mem(char *p)
if (nr_fake_mem >= EFI_MAX_FAKEMEM)
break;
- fake_mems[nr_fake_mem].range.start = start;
- fake_mems[nr_fake_mem].range.end = start + mem_size - 1;
- fake_mems[nr_fake_mem].attribute = attribute;
+ efi_fake_mems[nr_fake_mem].range.start = start;
+ efi_fake_mems[nr_fake_mem].range.end = start + mem_size - 1;
+ efi_fake_mems[nr_fake_mem].attribute = attribute;
nr_fake_mem++;
if (*p == ',')
p++;
}
- sort(fake_mems, nr_fake_mem, sizeof(struct efi_mem_range),
+ sort(efi_fake_mems, nr_fake_mem, sizeof(struct efi_mem_range),
cmp_fake_mem, NULL);
for (i = 0; i < nr_fake_mem; i++)
pr_info("efi_fake_mem: add attr=0x%016llx to [mem 0x%016llx-0x%016llx]",
- fake_mems[i].attribute, fake_mems[i].range.start,
- fake_mems[i].range.end);
+ efi_fake_mems[i].attribute, efi_fake_mems[i].range.start,
+ efi_fake_mems[i].range.end);
return *p == '\0' ? 0 : -EINVAL;
}
diff --git a/drivers/firmware/efi/fake_mem.h b/drivers/firmware/efi/fake_mem.h
new file mode 100644
index 000000000000..d52791af4b18
--- /dev/null
+++ b/drivers/firmware/efi/fake_mem.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __EFI_FAKE_MEM_H__
+#define __EFI_FAKE_MEM_H__
+#include <asm/efi.h>
+
+#define EFI_MAX_FAKEMEM CONFIG_EFI_MAX_FAKE_MEM
+
+extern struct efi_mem_range efi_fake_mems[EFI_MAX_FAKEMEM];
+extern int nr_fake_mem;
+#endif /* __EFI_FAKE_MEM_H__ */
diff --git a/drivers/firmware/efi/libstub/arm32-stub.c b/drivers/firmware/efi/libstub/arm32-stub.c
index 41213bf5fcf5..4566640de650 100644
--- a/drivers/firmware/efi/libstub/arm32-stub.c
+++ b/drivers/firmware/efi/libstub/arm32-stub.c
@@ -146,6 +146,11 @@ static efi_status_t reserve_kernel_base(efi_system_table_t *sys_table_arg,
continue;
case EFI_CONVENTIONAL_MEMORY:
+ /* Skip soft reserved conventional memory */
+ if (efi_soft_reserve_enabled() &&
+ (desc->attribute & EFI_MEMORY_SP))
+ continue;
+
/*
* Reserve the intersection between this entry and the
* region.
diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c
index 35dbc2791c97..e02579907f2e 100644
--- a/drivers/firmware/efi/libstub/efi-stub-helper.c
+++ b/drivers/firmware/efi/libstub/efi-stub-helper.c
@@ -32,6 +32,7 @@ static unsigned long __chunk_size = EFI_READ_CHUNK_SIZE;
static int __section(.data) __nokaslr;
static int __section(.data) __quiet;
static int __section(.data) __novamap;
+static bool __section(.data) efi_nosoftreserve;
int __pure nokaslr(void)
{
@@ -45,6 +46,10 @@ int __pure novamap(void)
{
return __novamap;
}
+bool __pure __efi_soft_reserve_enabled(void)
+{
+ return !efi_nosoftreserve;
+}
#define EFI_MMAP_NR_SLACK_SLOTS 8
@@ -211,6 +216,10 @@ again:
if (desc->type != EFI_CONVENTIONAL_MEMORY)
continue;
+ if (efi_soft_reserve_enabled() &&
+ (desc->attribute & EFI_MEMORY_SP))
+ continue;
+
if (desc->num_pages < nr_pages)
continue;
@@ -305,6 +314,10 @@ efi_status_t efi_low_alloc_above(efi_system_table_t *sys_table_arg,
if (desc->type != EFI_CONVENTIONAL_MEMORY)
continue;
+ if (efi_soft_reserve_enabled() &&
+ (desc->attribute & EFI_MEMORY_SP))
+ continue;
+
if (desc->num_pages < nr_pages)
continue;
@@ -484,6 +497,12 @@ efi_status_t efi_parse_options(char const *cmdline)
__novamap = 1;
}
+ if (IS_ENABLED(CONFIG_EFI_SOFT_RESERVE) &&
+ !strncmp(str, "nosoftreserve", 7)) {
+ str += strlen("nosoftreserve");
+ efi_nosoftreserve = 1;
+ }
+
/* Group words together, delimited by "," */
while (*str && *str != ' ' && *str != ',')
str++;
diff --git a/drivers/firmware/efi/libstub/random.c b/drivers/firmware/efi/libstub/random.c
index 53f1466f7de6..35edd7cfb6a1 100644
--- a/drivers/firmware/efi/libstub/random.c
+++ b/drivers/firmware/efi/libstub/random.c
@@ -58,6 +58,10 @@ static unsigned long get_entry_num_slots(efi_memory_desc_t *md,
if (md->type != EFI_CONVENTIONAL_MEMORY)
return 0;
+ if (efi_soft_reserve_enabled() &&
+ (md->attribute & EFI_MEMORY_SP))
+ return 0;
+
region_end = min((u64)ULONG_MAX, md->phys_addr + md->num_pages*EFI_PAGE_SIZE - 1);
first_slot = round_up(md->phys_addr, align);
diff --git a/drivers/firmware/efi/x86_fake_mem.c b/drivers/firmware/efi/x86_fake_mem.c
new file mode 100644
index 000000000000..e5d6d5a1b240
--- /dev/null
+++ b/drivers/firmware/efi/x86_fake_mem.c
@@ -0,0 +1,69 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2019 Intel Corporation. All rights reserved. */
+#include <linux/efi.h>
+#include <asm/e820/api.h>
+#include "fake_mem.h"
+
+void __init efi_fake_memmap_early(void)
+{
+ int i;
+
+ /*
+ * The late efi_fake_mem() call can handle all requests if
+ * EFI_MEMORY_SP support is disabled.
+ */
+ if (!efi_soft_reserve_enabled())
+ return;
+
+ if (!efi_enabled(EFI_MEMMAP) || !nr_fake_mem)
+ return;
+
+ /*
+ * Given that efi_fake_memmap() needs to perform memblock
+ * allocations it needs to run after e820__memblock_setup().
+ * However, if efi_fake_mem specifies EFI_MEMORY_SP for a given
+ * address range that potentially needs to mark the memory as
+ * reserved prior to e820__memblock_setup(). Update e820
+ * directly if EFI_MEMORY_SP is specified for an
+ * EFI_CONVENTIONAL_MEMORY descriptor.
+ */
+ for (i = 0; i < nr_fake_mem; i++) {
+ struct efi_mem_range *mem = &efi_fake_mems[i];
+ efi_memory_desc_t *md;
+ u64 m_start, m_end;
+
+ if ((mem->attribute & EFI_MEMORY_SP) == 0)
+ continue;
+
+ m_start = mem->range.start;
+ m_end = mem->range.end;
+ for_each_efi_memory_desc(md) {
+ u64 start, end;
+
+ if (md->type != EFI_CONVENTIONAL_MEMORY)
+ continue;
+
+ start = md->phys_addr;
+ end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) - 1;
+
+ if (m_start <= end && m_end >= start)
+ /* fake range overlaps descriptor */;
+ else
+ continue;
+
+ /*
+ * Trim the boundary of the e820 update to the
+ * descriptor in case the fake range overlaps
+ * !EFI_CONVENTIONAL_MEMORY
+ */
+ start = max(start, m_start);
+ end = min(end, m_end);
+
+ if (end <= start)
+ continue;
+ e820__range_update(start, end - start + 1, E820_TYPE_RAM,
+ E820_TYPE_SOFT_RESERVED);
+ e820__update_table(e820_table);
+ }
+ }
+}
diff --git a/drivers/firmware/qcom_scm-32.c b/drivers/firmware/qcom_scm-32.c
index 215061c581e1..bee8729525ec 100644
--- a/drivers/firmware/qcom_scm-32.c
+++ b/drivers/firmware/qcom_scm-32.c
@@ -614,3 +614,8 @@ int __qcom_scm_io_writel(struct device *dev, phys_addr_t addr, unsigned int val)
return qcom_scm_call_atomic2(QCOM_SCM_SVC_IO, QCOM_SCM_IO_WRITE,
addr, val);
}
+
+int __qcom_scm_qsmmu500_wait_safe_toggle(struct device *dev, bool enable)
+{
+ return -ENODEV;
+}
diff --git a/drivers/firmware/qcom_scm-64.c b/drivers/firmware/qcom_scm-64.c
index 91d5ad7cf58b..e1cd933ea9ae 100644
--- a/drivers/firmware/qcom_scm-64.c
+++ b/drivers/firmware/qcom_scm-64.c
@@ -62,32 +62,72 @@ static DEFINE_MUTEX(qcom_scm_lock);
#define FIRST_EXT_ARG_IDX 3
#define N_REGISTER_ARGS (MAX_QCOM_SCM_ARGS - N_EXT_QCOM_SCM_ARGS + 1)
-/**
- * qcom_scm_call() - Invoke a syscall in the secure world
- * @dev: device
- * @svc_id: service identifier
- * @cmd_id: command identifier
- * @desc: Descriptor structure containing arguments and return values
- *
- * Sends a command to the SCM and waits for the command to finish processing.
- * This should *only* be called in pre-emptible context.
-*/
-static int qcom_scm_call(struct device *dev, u32 svc_id, u32 cmd_id,
- const struct qcom_scm_desc *desc,
- struct arm_smccc_res *res)
+static void __qcom_scm_call_do(const struct qcom_scm_desc *desc,
+ struct arm_smccc_res *res, u32 fn_id,
+ u64 x5, u32 type)
+{
+ u64 cmd;
+ struct arm_smccc_quirk quirk = { .id = ARM_SMCCC_QUIRK_QCOM_A6 };
+
+ cmd = ARM_SMCCC_CALL_VAL(type, qcom_smccc_convention,
+ ARM_SMCCC_OWNER_SIP, fn_id);
+
+ quirk.state.a6 = 0;
+
+ do {
+ arm_smccc_smc_quirk(cmd, desc->arginfo, desc->args[0],
+ desc->args[1], desc->args[2], x5,
+ quirk.state.a6, 0, res, &quirk);
+
+ if (res->a0 == QCOM_SCM_INTERRUPTED)
+ cmd = res->a0;
+
+ } while (res->a0 == QCOM_SCM_INTERRUPTED);
+}
+
+static void qcom_scm_call_do(const struct qcom_scm_desc *desc,
+ struct arm_smccc_res *res, u32 fn_id,
+ u64 x5, bool atomic)
+{
+ int retry_count = 0;
+
+ if (atomic) {
+ __qcom_scm_call_do(desc, res, fn_id, x5, ARM_SMCCC_FAST_CALL);
+ return;
+ }
+
+ do {
+ mutex_lock(&qcom_scm_lock);
+
+ __qcom_scm_call_do(desc, res, fn_id, x5,
+ ARM_SMCCC_STD_CALL);
+
+ mutex_unlock(&qcom_scm_lock);
+
+ if (res->a0 == QCOM_SCM_V2_EBUSY) {
+ if (retry_count++ > QCOM_SCM_EBUSY_MAX_RETRY)
+ break;
+ msleep(QCOM_SCM_EBUSY_WAIT_MS);
+ }
+ } while (res->a0 == QCOM_SCM_V2_EBUSY);
+}
+
+static int ___qcom_scm_call(struct device *dev, u32 svc_id, u32 cmd_id,
+ const struct qcom_scm_desc *desc,
+ struct arm_smccc_res *res, bool atomic)
{
int arglen = desc->arginfo & 0xf;
- int retry_count = 0, i;
+ int i;
u32 fn_id = QCOM_SCM_FNID(svc_id, cmd_id);
- u64 cmd, x5 = desc->args[FIRST_EXT_ARG_IDX];
+ u64 x5 = desc->args[FIRST_EXT_ARG_IDX];
dma_addr_t args_phys = 0;
void *args_virt = NULL;
size_t alloc_len;
- struct arm_smccc_quirk quirk = {.id = ARM_SMCCC_QUIRK_QCOM_A6};
+ gfp_t flag = atomic ? GFP_ATOMIC : GFP_KERNEL;
if (unlikely(arglen > N_REGISTER_ARGS)) {
alloc_len = N_EXT_QCOM_SCM_ARGS * sizeof(u64);
- args_virt = kzalloc(PAGE_ALIGN(alloc_len), GFP_KERNEL);
+ args_virt = kzalloc(PAGE_ALIGN(alloc_len), flag);
if (!args_virt)
return -ENOMEM;
@@ -117,46 +157,56 @@ static int qcom_scm_call(struct device *dev, u32 svc_id, u32 cmd_id,
x5 = args_phys;
}
- do {
- mutex_lock(&qcom_scm_lock);
-
- cmd = ARM_SMCCC_CALL_VAL(ARM_SMCCC_STD_CALL,
- qcom_smccc_convention,
- ARM_SMCCC_OWNER_SIP, fn_id);
-
- quirk.state.a6 = 0;
-
- do {
- arm_smccc_smc_quirk(cmd, desc->arginfo, desc->args[0],
- desc->args[1], desc->args[2], x5,
- quirk.state.a6, 0, res, &quirk);
-
- if (res->a0 == QCOM_SCM_INTERRUPTED)
- cmd = res->a0;
-
- } while (res->a0 == QCOM_SCM_INTERRUPTED);
-
- mutex_unlock(&qcom_scm_lock);
-
- if (res->a0 == QCOM_SCM_V2_EBUSY) {
- if (retry_count++ > QCOM_SCM_EBUSY_MAX_RETRY)
- break;
- msleep(QCOM_SCM_EBUSY_WAIT_MS);
- }
- } while (res->a0 == QCOM_SCM_V2_EBUSY);
+ qcom_scm_call_do(desc, res, fn_id, x5, atomic);
if (args_virt) {
dma_unmap_single(dev, args_phys, alloc_len, DMA_TO_DEVICE);
kfree(args_virt);
}
- if (res->a0 < 0)
+ if ((long)res->a0 < 0)
return qcom_scm_remap_error(res->a0);
return 0;
}
/**
+ * qcom_scm_call() - Invoke a syscall in the secure world
+ * @dev: device
+ * @svc_id: service identifier
+ * @cmd_id: command identifier
+ * @desc: Descriptor structure containing arguments and return values
+ *
+ * Sends a command to the SCM and waits for the command to finish processing.
+ * This should *only* be called in pre-emptible context.
+ */
+static int qcom_scm_call(struct device *dev, u32 svc_id, u32 cmd_id,
+ const struct qcom_scm_desc *desc,
+ struct arm_smccc_res *res)
+{
+ might_sleep();
+ return ___qcom_scm_call(dev, svc_id, cmd_id, desc, res, false);
+}
+
+/**
+ * qcom_scm_call_atomic() - atomic variation of qcom_scm_call()
+ * @dev: device
+ * @svc_id: service identifier
+ * @cmd_id: command identifier
+ * @desc: Descriptor structure containing arguments and return values
+ * @res: Structure containing results from SMC/HVC call
+ *
+ * Sends a command to the SCM and waits for the command to finish processing.
+ * This can be called in atomic context.
+ */
+static int qcom_scm_call_atomic(struct device *dev, u32 svc_id, u32 cmd_id,
+ const struct qcom_scm_desc *desc,
+ struct arm_smccc_res *res)
+{
+ return ___qcom_scm_call(dev, svc_id, cmd_id, desc, res, true);
+}
+
+/**
* qcom_scm_set_cold_boot_addr() - Set the cold boot address for cpus
* @entry: Entry point function for the cpus
* @cpus: The cpumask of cpus that will use the entry point
@@ -502,3 +552,16 @@ int __qcom_scm_io_writel(struct device *dev, phys_addr_t addr, unsigned int val)
return qcom_scm_call(dev, QCOM_SCM_SVC_IO, QCOM_SCM_IO_WRITE,
&desc, &res);
}
+
+int __qcom_scm_qsmmu500_wait_safe_toggle(struct device *dev, bool en)
+{
+ struct qcom_scm_desc desc = {0};
+ struct arm_smccc_res res;
+
+ desc.args[0] = QCOM_SCM_CONFIG_ERRATA1_CLIENT_ALL;
+ desc.args[1] = en;
+ desc.arginfo = QCOM_SCM_ARGS(2);
+
+ return qcom_scm_call_atomic(dev, QCOM_SCM_SVC_SMMU_PROGRAM,
+ QCOM_SCM_CONFIG_ERRATA1, &desc, &res);
+}
diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c
index 4802ab170fe5..a729e05c21b8 100644
--- a/drivers/firmware/qcom_scm.c
+++ b/drivers/firmware/qcom_scm.c
@@ -345,6 +345,12 @@ int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare)
}
EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_init);
+int qcom_scm_qsmmu500_wait_safe_toggle(bool en)
+{
+ return __qcom_scm_qsmmu500_wait_safe_toggle(__scm->dev, en);
+}
+EXPORT_SYMBOL(qcom_scm_qsmmu500_wait_safe_toggle);
+
int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val)
{
return __qcom_scm_io_readl(__scm->dev, addr, val);
diff --git a/drivers/firmware/qcom_scm.h b/drivers/firmware/qcom_scm.h
index 99506bd873c0..baee744dbcfe 100644
--- a/drivers/firmware/qcom_scm.h
+++ b/drivers/firmware/qcom_scm.h
@@ -91,10 +91,15 @@ extern int __qcom_scm_restore_sec_cfg(struct device *dev, u32 device_id,
u32 spare);
#define QCOM_SCM_IOMMU_SECURE_PTBL_SIZE 3
#define QCOM_SCM_IOMMU_SECURE_PTBL_INIT 4
+#define QCOM_SCM_SVC_SMMU_PROGRAM 0x15
+#define QCOM_SCM_CONFIG_ERRATA1 0x3
+#define QCOM_SCM_CONFIG_ERRATA1_CLIENT_ALL 0x2
extern int __qcom_scm_iommu_secure_ptbl_size(struct device *dev, u32 spare,
size_t *size);
extern int __qcom_scm_iommu_secure_ptbl_init(struct device *dev, u64 addr,
u32 size, u32 spare);
+extern int __qcom_scm_qsmmu500_wait_safe_toggle(struct device *dev,
+ bool enable);
#define QCOM_MEM_PROT_ASSIGN_ID 0x16
extern int __qcom_scm_assign_mem(struct device *dev,
phys_addr_t mem_region, size_t mem_sz,
diff --git a/drivers/firmware/stratix10-rsu.c b/drivers/firmware/stratix10-rsu.c
index bb008c019920..f8533338b018 100644
--- a/drivers/firmware/stratix10-rsu.c
+++ b/drivers/firmware/stratix10-rsu.c
@@ -20,7 +20,6 @@
#define RSU_VERSION_MASK GENMASK_ULL(63, 32)
#define RSU_ERROR_LOCATION_MASK GENMASK_ULL(31, 0)
#define RSU_ERROR_DETAIL_MASK GENMASK_ULL(63, 32)
-#define RSU_FW_VERSION_MASK GENMASK_ULL(15, 0)
#define RSU_TIMEOUT (msecs_to_jiffies(SVC_RSU_REQUEST_TIMEOUT_MS))
@@ -109,9 +108,12 @@ static void rsu_command_callback(struct stratix10_svc_client *client,
{
struct stratix10_rsu_priv *priv = client->priv;
- if (data->status != BIT(SVC_STATUS_RSU_OK))
- dev_err(client->dev, "RSU returned status is %i\n",
- data->status);
+ if (data->status == BIT(SVC_STATUS_RSU_NO_SUPPORT))
+ dev_warn(client->dev, "Secure FW doesn't support notify\n");
+ else if (data->status == BIT(SVC_STATUS_RSU_ERROR))
+ dev_err(client->dev, "Failure, returned status is %lu\n",
+ BIT(data->status));
+
complete(&priv->completion);
}
@@ -133,9 +135,11 @@ static void rsu_retry_callback(struct stratix10_svc_client *client,
if (data->status == BIT(SVC_STATUS_RSU_OK))
priv->retry_counter = *counter;
+ else if (data->status == BIT(SVC_STATUS_RSU_NO_SUPPORT))
+ dev_warn(client->dev, "Secure FW doesn't support retry\n");
else
- dev_err(client->dev, "Failed to get retry counter %i\n",
- data->status);
+ dev_err(client->dev, "Failed to get retry counter %lu\n",
+ BIT(data->status));
complete(&priv->completion);
}
@@ -333,15 +337,10 @@ static ssize_t notify_store(struct device *dev,
return ret;
}
- /* only 19.3 or late version FW supports retry counter feature */
- if (FIELD_GET(RSU_FW_VERSION_MASK, priv->status.version)) {
- ret = rsu_send_msg(priv, COMMAND_RSU_RETRY,
- 0, rsu_retry_callback);
- if (ret) {
- dev_err(dev,
- "Error, getting RSU retry %i\n", ret);
- return ret;
- }
+ ret = rsu_send_msg(priv, COMMAND_RSU_RETRY, 0, rsu_retry_callback);
+ if (ret) {
+ dev_err(dev, "Error, getting RSU retry %i\n", ret);
+ return ret;
}
return count;
@@ -413,15 +412,10 @@ static int stratix10_rsu_probe(struct platform_device *pdev)
stratix10_svc_free_channel(priv->chan);
}
- /* only 19.3 or late version FW supports retry counter feature */
- if (FIELD_GET(RSU_FW_VERSION_MASK, priv->status.version)) {
- ret = rsu_send_msg(priv, COMMAND_RSU_RETRY, 0,
- rsu_retry_callback);
- if (ret) {
- dev_err(dev,
- "Error, getting RSU retry %i\n", ret);
- stratix10_svc_free_channel(priv->chan);
- }
+ ret = rsu_send_msg(priv, COMMAND_RSU_RETRY, 0, rsu_retry_callback);
+ if (ret) {
+ dev_err(dev, "Error, getting RSU retry %i\n", ret);
+ stratix10_svc_free_channel(priv->chan);
}
return ret;
diff --git a/drivers/firmware/stratix10-svc.c b/drivers/firmware/stratix10-svc.c
index b485321189e1..c6c31402848d 100644
--- a/drivers/firmware/stratix10-svc.c
+++ b/drivers/firmware/stratix10-svc.c
@@ -493,8 +493,24 @@ static int svc_normal_to_secure_thread(void *data)
pdata->chan->scl->receive_cb(pdata->chan->scl, cbdata);
break;
default:
- pr_warn("it shouldn't happen\n");
+ pr_warn("Secure firmware doesn't support...\n");
+
+ /*
+ * be compatible with older version firmware which
+ * doesn't support RSU notify or retry
+ */
+ if ((pdata->command == COMMAND_RSU_RETRY) ||
+ (pdata->command == COMMAND_RSU_NOTIFY)) {
+ cbdata->status =
+ BIT(SVC_STATUS_RSU_NO_SUPPORT);
+ cbdata->kaddr1 = NULL;
+ cbdata->kaddr2 = NULL;
+ cbdata->kaddr3 = NULL;
+ pdata->chan->scl->receive_cb(
+ pdata->chan->scl, cbdata);
+ }
break;
+
}
};
diff --git a/drivers/fpga/Kconfig b/drivers/fpga/Kconfig
index 73c779e920ed..72380e1d31c7 100644
--- a/drivers/fpga/Kconfig
+++ b/drivers/fpga/Kconfig
@@ -156,7 +156,7 @@ config FPGA_DFL
config FPGA_DFL_FME
tristate "FPGA DFL FME Driver"
- depends on FPGA_DFL
+ depends on FPGA_DFL && HWMON
help
The FPGA Management Engine (FME) is a feature device implemented
under Device Feature List (DFL) framework. Select this option to
diff --git a/drivers/fpga/dfl-fme-main.c b/drivers/fpga/dfl-fme-main.c
index 4d78e182878f..7c930e6b314d 100644
--- a/drivers/fpga/dfl-fme-main.c
+++ b/drivers/fpga/dfl-fme-main.c
@@ -14,6 +14,8 @@
* Henry Mitchel <henry.mitchel@intel.com>
*/
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/uaccess.h>
@@ -181,6 +183,381 @@ static const struct dfl_feature_ops fme_hdr_ops = {
.ioctl = fme_hdr_ioctl,
};
+#define FME_THERM_THRESHOLD 0x8
+#define TEMP_THRESHOLD1 GENMASK_ULL(6, 0)
+#define TEMP_THRESHOLD1_EN BIT_ULL(7)
+#define TEMP_THRESHOLD2 GENMASK_ULL(14, 8)
+#define TEMP_THRESHOLD2_EN BIT_ULL(15)
+#define TRIP_THRESHOLD GENMASK_ULL(30, 24)
+#define TEMP_THRESHOLD1_STATUS BIT_ULL(32) /* threshold1 reached */
+#define TEMP_THRESHOLD2_STATUS BIT_ULL(33) /* threshold2 reached */
+/* threshold1 policy: 0 - AP2 (90% throttle) / 1 - AP1 (50% throttle) */
+#define TEMP_THRESHOLD1_POLICY BIT_ULL(44)
+
+#define FME_THERM_RDSENSOR_FMT1 0x10
+#define FPGA_TEMPERATURE GENMASK_ULL(6, 0)
+
+#define FME_THERM_CAP 0x20
+#define THERM_NO_THROTTLE BIT_ULL(0)
+
+#define MD_PRE_DEG
+
+static bool fme_thermal_throttle_support(void __iomem *base)
+{
+ u64 v = readq(base + FME_THERM_CAP);
+
+ return FIELD_GET(THERM_NO_THROTTLE, v) ? false : true;
+}
+
+static umode_t thermal_hwmon_attrs_visible(const void *drvdata,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ const struct dfl_feature *feature = drvdata;
+
+ /* temperature is always supported, and check hardware cap for others */
+ if (attr == hwmon_temp_input)
+ return 0444;
+
+ return fme_thermal_throttle_support(feature->ioaddr) ? 0444 : 0;
+}
+
+static int thermal_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct dfl_feature *feature = dev_get_drvdata(dev);
+ u64 v;
+
+ switch (attr) {
+ case hwmon_temp_input:
+ v = readq(feature->ioaddr + FME_THERM_RDSENSOR_FMT1);
+ *val = (long)(FIELD_GET(FPGA_TEMPERATURE, v) * 1000);
+ break;
+ case hwmon_temp_max:
+ v = readq(feature->ioaddr + FME_THERM_THRESHOLD);
+ *val = (long)(FIELD_GET(TEMP_THRESHOLD1, v) * 1000);
+ break;
+ case hwmon_temp_crit:
+ v = readq(feature->ioaddr + FME_THERM_THRESHOLD);
+ *val = (long)(FIELD_GET(TEMP_THRESHOLD2, v) * 1000);
+ break;
+ case hwmon_temp_emergency:
+ v = readq(feature->ioaddr + FME_THERM_THRESHOLD);
+ *val = (long)(FIELD_GET(TRIP_THRESHOLD, v) * 1000);
+ break;
+ case hwmon_temp_max_alarm:
+ v = readq(feature->ioaddr + FME_THERM_THRESHOLD);
+ *val = (long)FIELD_GET(TEMP_THRESHOLD1_STATUS, v);
+ break;
+ case hwmon_temp_crit_alarm:
+ v = readq(feature->ioaddr + FME_THERM_THRESHOLD);
+ *val = (long)FIELD_GET(TEMP_THRESHOLD2_STATUS, v);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static const struct hwmon_ops thermal_hwmon_ops = {
+ .is_visible = thermal_hwmon_attrs_visible,
+ .read = thermal_hwmon_read,
+};
+
+static const struct hwmon_channel_info *thermal_hwmon_info[] = {
+ HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT | HWMON_T_EMERGENCY |
+ HWMON_T_MAX | HWMON_T_MAX_ALARM |
+ HWMON_T_CRIT | HWMON_T_CRIT_ALARM),
+ NULL
+};
+
+static const struct hwmon_chip_info thermal_hwmon_chip_info = {
+ .ops = &thermal_hwmon_ops,
+ .info = thermal_hwmon_info,
+};
+
+static ssize_t temp1_max_policy_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dfl_feature *feature = dev_get_drvdata(dev);
+ u64 v;
+
+ v = readq(feature->ioaddr + FME_THERM_THRESHOLD);
+
+ return sprintf(buf, "%u\n",
+ (unsigned int)FIELD_GET(TEMP_THRESHOLD1_POLICY, v));
+}
+
+static DEVICE_ATTR_RO(temp1_max_policy);
+
+static struct attribute *thermal_extra_attrs[] = {
+ &dev_attr_temp1_max_policy.attr,
+ NULL,
+};
+
+static umode_t thermal_extra_attrs_visible(struct kobject *kobj,
+ struct attribute *attr, int index)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct dfl_feature *feature = dev_get_drvdata(dev);
+
+ return fme_thermal_throttle_support(feature->ioaddr) ? attr->mode : 0;
+}
+
+static const struct attribute_group thermal_extra_group = {
+ .attrs = thermal_extra_attrs,
+ .is_visible = thermal_extra_attrs_visible,
+};
+__ATTRIBUTE_GROUPS(thermal_extra);
+
+static int fme_thermal_mgmt_init(struct platform_device *pdev,
+ struct dfl_feature *feature)
+{
+ struct device *hwmon;
+
+ /*
+ * create hwmon to allow userspace monitoring temperature and other
+ * threshold information.
+ *
+ * temp1_input -> FPGA device temperature
+ * temp1_max -> hardware threshold 1 -> 50% or 90% throttling
+ * temp1_crit -> hardware threshold 2 -> 100% throttling
+ * temp1_emergency -> hardware trip_threshold to shutdown FPGA
+ * temp1_max_alarm -> hardware threshold 1 alarm
+ * temp1_crit_alarm -> hardware threshold 2 alarm
+ *
+ * create device specific sysfs interfaces, e.g. read temp1_max_policy
+ * to understand the actual hardware throttling action (50% vs 90%).
+ *
+ * If hardware doesn't support automatic throttling per thresholds,
+ * then all above sysfs interfaces are not visible except temp1_input
+ * for temperature.
+ */
+ hwmon = devm_hwmon_device_register_with_info(&pdev->dev,
+ "dfl_fme_thermal", feature,
+ &thermal_hwmon_chip_info,
+ thermal_extra_groups);
+ if (IS_ERR(hwmon)) {
+ dev_err(&pdev->dev, "Fail to register thermal hwmon\n");
+ return PTR_ERR(hwmon);
+ }
+
+ return 0;
+}
+
+static const struct dfl_feature_id fme_thermal_mgmt_id_table[] = {
+ {.id = FME_FEATURE_ID_THERMAL_MGMT,},
+ {0,}
+};
+
+static const struct dfl_feature_ops fme_thermal_mgmt_ops = {
+ .init = fme_thermal_mgmt_init,
+};
+
+#define FME_PWR_STATUS 0x8
+#define FME_LATENCY_TOLERANCE BIT_ULL(18)
+#define PWR_CONSUMED GENMASK_ULL(17, 0)
+
+#define FME_PWR_THRESHOLD 0x10
+#define PWR_THRESHOLD1 GENMASK_ULL(6, 0) /* in Watts */
+#define PWR_THRESHOLD2 GENMASK_ULL(14, 8) /* in Watts */
+#define PWR_THRESHOLD_MAX 0x7f /* in Watts */
+#define PWR_THRESHOLD1_STATUS BIT_ULL(16)
+#define PWR_THRESHOLD2_STATUS BIT_ULL(17)
+
+#define FME_PWR_XEON_LIMIT 0x18
+#define XEON_PWR_LIMIT GENMASK_ULL(14, 0) /* in 0.1 Watts */
+#define XEON_PWR_EN BIT_ULL(15)
+#define FME_PWR_FPGA_LIMIT 0x20
+#define FPGA_PWR_LIMIT GENMASK_ULL(14, 0) /* in 0.1 Watts */
+#define FPGA_PWR_EN BIT_ULL(15)
+
+static int power_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct dfl_feature *feature = dev_get_drvdata(dev);
+ u64 v;
+
+ switch (attr) {
+ case hwmon_power_input:
+ v = readq(feature->ioaddr + FME_PWR_STATUS);
+ *val = (long)(FIELD_GET(PWR_CONSUMED, v) * 1000000);
+ break;
+ case hwmon_power_max:
+ v = readq(feature->ioaddr + FME_PWR_THRESHOLD);
+ *val = (long)(FIELD_GET(PWR_THRESHOLD1, v) * 1000000);
+ break;
+ case hwmon_power_crit:
+ v = readq(feature->ioaddr + FME_PWR_THRESHOLD);
+ *val = (long)(FIELD_GET(PWR_THRESHOLD2, v) * 1000000);
+ break;
+ case hwmon_power_max_alarm:
+ v = readq(feature->ioaddr + FME_PWR_THRESHOLD);
+ *val = (long)FIELD_GET(PWR_THRESHOLD1_STATUS, v);
+ break;
+ case hwmon_power_crit_alarm:
+ v = readq(feature->ioaddr + FME_PWR_THRESHOLD);
+ *val = (long)FIELD_GET(PWR_THRESHOLD2_STATUS, v);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int power_hwmon_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long val)
+{
+ struct dfl_feature_platform_data *pdata = dev_get_platdata(dev->parent);
+ struct dfl_feature *feature = dev_get_drvdata(dev);
+ int ret = 0;
+ u64 v;
+
+ val = clamp_val(val / 1000000, 0, PWR_THRESHOLD_MAX);
+
+ mutex_lock(&pdata->lock);
+
+ switch (attr) {
+ case hwmon_power_max:
+ v = readq(feature->ioaddr + FME_PWR_THRESHOLD);
+ v &= ~PWR_THRESHOLD1;
+ v |= FIELD_PREP(PWR_THRESHOLD1, val);
+ writeq(v, feature->ioaddr + FME_PWR_THRESHOLD);
+ break;
+ case hwmon_power_crit:
+ v = readq(feature->ioaddr + FME_PWR_THRESHOLD);
+ v &= ~PWR_THRESHOLD2;
+ v |= FIELD_PREP(PWR_THRESHOLD2, val);
+ writeq(v, feature->ioaddr + FME_PWR_THRESHOLD);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ mutex_unlock(&pdata->lock);
+
+ return ret;
+}
+
+static umode_t power_hwmon_attrs_visible(const void *drvdata,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ switch (attr) {
+ case hwmon_power_input:
+ case hwmon_power_max_alarm:
+ case hwmon_power_crit_alarm:
+ return 0444;
+ case hwmon_power_max:
+ case hwmon_power_crit:
+ return 0644;
+ }
+
+ return 0;
+}
+
+static const struct hwmon_ops power_hwmon_ops = {
+ .is_visible = power_hwmon_attrs_visible,
+ .read = power_hwmon_read,
+ .write = power_hwmon_write,
+};
+
+static const struct hwmon_channel_info *power_hwmon_info[] = {
+ HWMON_CHANNEL_INFO(power, HWMON_P_INPUT |
+ HWMON_P_MAX | HWMON_P_MAX_ALARM |
+ HWMON_P_CRIT | HWMON_P_CRIT_ALARM),
+ NULL
+};
+
+static const struct hwmon_chip_info power_hwmon_chip_info = {
+ .ops = &power_hwmon_ops,
+ .info = power_hwmon_info,
+};
+
+static ssize_t power1_xeon_limit_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dfl_feature *feature = dev_get_drvdata(dev);
+ u16 xeon_limit = 0;
+ u64 v;
+
+ v = readq(feature->ioaddr + FME_PWR_XEON_LIMIT);
+
+ if (FIELD_GET(XEON_PWR_EN, v))
+ xeon_limit = FIELD_GET(XEON_PWR_LIMIT, v);
+
+ return sprintf(buf, "%u\n", xeon_limit * 100000);
+}
+
+static ssize_t power1_fpga_limit_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dfl_feature *feature = dev_get_drvdata(dev);
+ u16 fpga_limit = 0;
+ u64 v;
+
+ v = readq(feature->ioaddr + FME_PWR_FPGA_LIMIT);
+
+ if (FIELD_GET(FPGA_PWR_EN, v))
+ fpga_limit = FIELD_GET(FPGA_PWR_LIMIT, v);
+
+ return sprintf(buf, "%u\n", fpga_limit * 100000);
+}
+
+static ssize_t power1_ltr_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dfl_feature *feature = dev_get_drvdata(dev);
+ u64 v;
+
+ v = readq(feature->ioaddr + FME_PWR_STATUS);
+
+ return sprintf(buf, "%u\n",
+ (unsigned int)FIELD_GET(FME_LATENCY_TOLERANCE, v));
+}
+
+static DEVICE_ATTR_RO(power1_xeon_limit);
+static DEVICE_ATTR_RO(power1_fpga_limit);
+static DEVICE_ATTR_RO(power1_ltr);
+
+static struct attribute *power_extra_attrs[] = {
+ &dev_attr_power1_xeon_limit.attr,
+ &dev_attr_power1_fpga_limit.attr,
+ &dev_attr_power1_ltr.attr,
+ NULL
+};
+
+ATTRIBUTE_GROUPS(power_extra);
+
+static int fme_power_mgmt_init(struct platform_device *pdev,
+ struct dfl_feature *feature)
+{
+ struct device *hwmon;
+
+ hwmon = devm_hwmon_device_register_with_info(&pdev->dev,
+ "dfl_fme_power", feature,
+ &power_hwmon_chip_info,
+ power_extra_groups);
+ if (IS_ERR(hwmon)) {
+ dev_err(&pdev->dev, "Fail to register power hwmon\n");
+ return PTR_ERR(hwmon);
+ }
+
+ return 0;
+}
+
+static const struct dfl_feature_id fme_power_mgmt_id_table[] = {
+ {.id = FME_FEATURE_ID_POWER_MGMT,},
+ {0,}
+};
+
+static const struct dfl_feature_ops fme_power_mgmt_ops = {
+ .init = fme_power_mgmt_init,
+};
+
static struct dfl_feature_driver fme_feature_drvs[] = {
{
.id_table = fme_hdr_id_table,
@@ -195,6 +572,14 @@ static struct dfl_feature_driver fme_feature_drvs[] = {
.ops = &fme_global_err_ops,
},
{
+ .id_table = fme_thermal_mgmt_id_table,
+ .ops = &fme_thermal_mgmt_ops,
+ },
+ {
+ .id_table = fme_power_mgmt_id_table,
+ .ops = &fme_power_mgmt_ops,
+ },
+ {
.ops = NULL,
},
};
diff --git a/drivers/fpga/zynq-fpga.c b/drivers/fpga/zynq-fpga.c
index 31ef38e38537..ee7765049607 100644
--- a/drivers/fpga/zynq-fpga.c
+++ b/drivers/fpga/zynq-fpga.c
@@ -578,10 +578,8 @@ static int zynq_fpga_probe(struct platform_device *pdev)
init_completion(&priv->dma_done);
priv->irq = platform_get_irq(pdev, 0);
- if (priv->irq < 0) {
- dev_err(dev, "No IRQ available\n");
+ if (priv->irq < 0)
return priv->irq;
- }
priv->clk = devm_clk_get(dev, "ref_clk");
if (IS_ERR(priv->clk)) {
diff --git a/drivers/fsi/Kconfig b/drivers/fsi/Kconfig
index c612db7a914a..92ce6d85802c 100644
--- a/drivers/fsi/Kconfig
+++ b/drivers/fsi/Kconfig
@@ -53,6 +53,14 @@ config FSI_MASTER_AST_CF
lines driven by the internal ColdFire coprocessor. This requires
the corresponding machine specific ColdFire firmware to be available.
+config FSI_MASTER_ASPEED
+ tristate "FSI ASPEED master"
+ help
+ This option enables a FSI master that is present behind an OPB bridge
+ in the AST2600.
+
+ Enable it for your BMC kernel in an OpenPower or IBM Power system.
+
config FSI_SCOM
tristate "SCOM FSI client device driver"
---help---
diff --git a/drivers/fsi/Makefile b/drivers/fsi/Makefile
index e4a2ff043c32..da218a1ad8e1 100644
--- a/drivers/fsi/Makefile
+++ b/drivers/fsi/Makefile
@@ -2,6 +2,7 @@
obj-$(CONFIG_FSI) += fsi-core.o
obj-$(CONFIG_FSI_MASTER_HUB) += fsi-master-hub.o
+obj-$(CONFIG_FSI_MASTER_ASPEED) += fsi-master-aspeed.o
obj-$(CONFIG_FSI_MASTER_GPIO) += fsi-master-gpio.o
obj-$(CONFIG_FSI_MASTER_AST_CF) += fsi-master-ast-cf.o
obj-$(CONFIG_FSI_SCOM) += fsi-scom.o
diff --git a/drivers/fsi/fsi-core.c b/drivers/fsi/fsi-core.c
index 1f76740f33b6..8244da8a7241 100644
--- a/drivers/fsi/fsi-core.c
+++ b/drivers/fsi/fsi-core.c
@@ -544,6 +544,31 @@ static int fsi_slave_scan(struct fsi_slave *slave)
return 0;
}
+static unsigned long aligned_access_size(size_t offset, size_t count)
+{
+ unsigned long offset_unit, count_unit;
+
+ /* Criteria:
+ *
+ * 1. Access size must be less than or equal to the maximum access
+ * width or the highest power-of-two factor of offset
+ * 2. Access size must be less than or equal to the amount specified by
+ * count
+ *
+ * The access width is optimal if we can calculate 1 to be strictly
+ * equal while still satisfying 2.
+ */
+
+ /* Find 1 by the bottom bit of offset (with a 4 byte access cap) */
+ offset_unit = BIT(__builtin_ctzl(offset | 4));
+
+ /* Find 2 by the top bit of count */
+ count_unit = BIT(8 * sizeof(unsigned long) - 1 - __builtin_clzl(count));
+
+ /* Constrain the maximum access width to the minimum of both criteria */
+ return BIT(__builtin_ctzl(offset_unit | count_unit));
+}
+
static ssize_t fsi_slave_sysfs_raw_read(struct file *file,
struct kobject *kobj, struct bin_attribute *attr, char *buf,
loff_t off, size_t count)
@@ -559,8 +584,7 @@ static ssize_t fsi_slave_sysfs_raw_read(struct file *file,
return -EINVAL;
for (total_len = 0; total_len < count; total_len += read_len) {
- read_len = min_t(size_t, count, 4);
- read_len -= off & 0x3;
+ read_len = aligned_access_size(off, count - total_len);
rc = fsi_slave_read(slave, off, buf + total_len, read_len);
if (rc)
@@ -587,8 +611,7 @@ static ssize_t fsi_slave_sysfs_raw_write(struct file *file,
return -EINVAL;
for (total_len = 0; total_len < count; total_len += write_len) {
- write_len = min_t(size_t, count, 4);
- write_len -= off & 0x3;
+ write_len = aligned_access_size(off, count - total_len);
rc = fsi_slave_write(slave, off, buf + total_len, write_len);
if (rc)
@@ -1241,6 +1264,19 @@ static ssize_t master_break_store(struct device *dev,
static DEVICE_ATTR(break, 0200, NULL, master_break_store);
+static struct attribute *master_attrs[] = {
+ &dev_attr_break.attr,
+ &dev_attr_rescan.attr,
+ NULL
+};
+
+ATTRIBUTE_GROUPS(master);
+
+static struct class fsi_master_class = {
+ .name = "fsi-master",
+ .dev_groups = master_groups,
+};
+
int fsi_master_register(struct fsi_master *master)
{
int rc;
@@ -1249,6 +1285,7 @@ int fsi_master_register(struct fsi_master *master)
mutex_init(&master->scan_lock);
master->idx = ida_simple_get(&master_ida, 0, INT_MAX, GFP_KERNEL);
dev_set_name(&master->dev, "fsi%d", master->idx);
+ master->dev.class = &fsi_master_class;
rc = device_register(&master->dev);
if (rc) {
@@ -1256,20 +1293,6 @@ int fsi_master_register(struct fsi_master *master)
return rc;
}
- rc = device_create_file(&master->dev, &dev_attr_rescan);
- if (rc) {
- device_del(&master->dev);
- ida_simple_remove(&master_ida, master->idx);
- return rc;
- }
-
- rc = device_create_file(&master->dev, &dev_attr_break);
- if (rc) {
- device_del(&master->dev);
- ida_simple_remove(&master_ida, master->idx);
- return rc;
- }
-
np = dev_of_node(&master->dev);
if (!of_property_read_bool(np, "no-scan-on-init")) {
mutex_lock(&master->scan_lock);
@@ -1350,8 +1373,15 @@ static int __init fsi_init(void)
rc = bus_register(&fsi_bus_type);
if (rc)
goto fail_bus;
+
+ rc = class_register(&fsi_master_class);
+ if (rc)
+ goto fail_class;
+
return 0;
+ fail_class:
+ bus_unregister(&fsi_bus_type);
fail_bus:
unregister_chrdev_region(fsi_base_dev, FSI_CHAR_MAX_DEVICES);
return rc;
@@ -1360,6 +1390,7 @@ postcore_initcall(fsi_init);
static void fsi_exit(void)
{
+ class_unregister(&fsi_master_class);
bus_unregister(&fsi_bus_type);
unregister_chrdev_region(fsi_base_dev, FSI_CHAR_MAX_DEVICES);
ida_destroy(&fsi_minor_ida);
diff --git a/drivers/fsi/fsi-master-aspeed.c b/drivers/fsi/fsi-master-aspeed.c
new file mode 100644
index 000000000000..f49742b310c2
--- /dev/null
+++ b/drivers/fsi/fsi-master-aspeed.c
@@ -0,0 +1,544 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+// Copyright (C) IBM Corporation 2018
+// FSI master driver for AST2600
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/fsi.h>
+#include <linux/io.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/iopoll.h>
+
+#include "fsi-master.h"
+
+struct fsi_master_aspeed {
+ struct fsi_master master;
+ struct device *dev;
+ void __iomem *base;
+ struct clk *clk;
+};
+
+#define to_fsi_master_aspeed(m) \
+ container_of(m, struct fsi_master_aspeed, master)
+
+/* Control register (size 0x400) */
+static const u32 ctrl_base = 0x80000000;
+
+static const u32 fsi_base = 0xa0000000;
+
+#define OPB_FSI_VER 0x00
+#define OPB_TRIGGER 0x04
+#define OPB_CTRL_BASE 0x08
+#define OPB_FSI_BASE 0x0c
+#define OPB_CLK_SYNC 0x3c
+#define OPB_IRQ_CLEAR 0x40
+#define OPB_IRQ_MASK 0x44
+#define OPB_IRQ_STATUS 0x48
+
+#define OPB0_SELECT 0x10
+#define OPB0_RW 0x14
+#define OPB0_XFER_SIZE 0x18
+#define OPB0_FSI_ADDR 0x1c
+#define OPB0_FSI_DATA_W 0x20
+#define OPB0_STATUS 0x80
+#define OPB0_FSI_DATA_R 0x84
+
+#define OPB0_WRITE_ORDER1 0x4c
+#define OPB0_WRITE_ORDER2 0x50
+#define OPB1_WRITE_ORDER1 0x54
+#define OPB1_WRITE_ORDER2 0x58
+#define OPB0_READ_ORDER1 0x5c
+#define OPB1_READ_ORDER2 0x60
+
+#define OPB_RETRY_COUNTER 0x64
+
+/* OPBn_STATUS */
+#define STATUS_HALFWORD_ACK BIT(0)
+#define STATUS_FULLWORD_ACK BIT(1)
+#define STATUS_ERR_ACK BIT(2)
+#define STATUS_RETRY BIT(3)
+#define STATUS_TIMEOUT BIT(4)
+
+/* OPB_IRQ_MASK */
+#define OPB1_XFER_ACK_EN BIT(17)
+#define OPB0_XFER_ACK_EN BIT(16)
+
+/* OPB_RW */
+#define CMD_READ BIT(0)
+#define CMD_WRITE 0
+
+/* OPBx_XFER_SIZE */
+#define XFER_FULLWORD (BIT(1) | BIT(0))
+#define XFER_HALFWORD (BIT(0))
+#define XFER_BYTE (0)
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/fsi_master_aspeed.h>
+
+#define FSI_LINK_ENABLE_SETUP_TIME 10 /* in mS */
+
+#define DEFAULT_DIVISOR 14
+#define OPB_POLL_TIMEOUT 10000
+
+static int __opb_write(struct fsi_master_aspeed *aspeed, u32 addr,
+ u32 val, u32 transfer_size)
+{
+ void __iomem *base = aspeed->base;
+ u32 reg, status;
+ int ret;
+
+ writel(CMD_WRITE, base + OPB0_RW);
+ writel(transfer_size, base + OPB0_XFER_SIZE);
+ writel(addr, base + OPB0_FSI_ADDR);
+ writel(val, base + OPB0_FSI_DATA_W);
+ writel(0x1, base + OPB_IRQ_CLEAR);
+ writel(0x1, base + OPB_TRIGGER);
+
+ ret = readl_poll_timeout(base + OPB_IRQ_STATUS, reg,
+ (reg & OPB0_XFER_ACK_EN) != 0,
+ 0, OPB_POLL_TIMEOUT);
+
+ status = readl(base + OPB0_STATUS);
+
+ trace_fsi_master_aspeed_opb_write(addr, val, transfer_size, status, reg);
+
+ /* Return error when poll timed out */
+ if (ret)
+ return ret;
+
+ /* Command failed, master will reset */
+ if (status & STATUS_ERR_ACK)
+ return -EIO;
+
+ return 0;
+}
+
+static int opb_writeb(struct fsi_master_aspeed *aspeed, u32 addr, u8 val)
+{
+ return __opb_write(aspeed, addr, val, XFER_BYTE);
+}
+
+static int opb_writew(struct fsi_master_aspeed *aspeed, u32 addr, __be16 val)
+{
+ return __opb_write(aspeed, addr, (__force u16)val, XFER_HALFWORD);
+}
+
+static int opb_writel(struct fsi_master_aspeed *aspeed, u32 addr, __be32 val)
+{
+ return __opb_write(aspeed, addr, (__force u32)val, XFER_FULLWORD);
+}
+
+static int __opb_read(struct fsi_master_aspeed *aspeed, uint32_t addr,
+ u32 transfer_size, void *out)
+{
+ void __iomem *base = aspeed->base;
+ u32 result, reg;
+ int status, ret;
+
+ writel(CMD_READ, base + OPB0_RW);
+ writel(transfer_size, base + OPB0_XFER_SIZE);
+ writel(addr, base + OPB0_FSI_ADDR);
+ writel(0x1, base + OPB_IRQ_CLEAR);
+ writel(0x1, base + OPB_TRIGGER);
+
+ ret = readl_poll_timeout(base + OPB_IRQ_STATUS, reg,
+ (reg & OPB0_XFER_ACK_EN) != 0,
+ 0, OPB_POLL_TIMEOUT);
+
+ status = readl(base + OPB0_STATUS);
+
+ result = readl(base + OPB0_FSI_DATA_R);
+
+ trace_fsi_master_aspeed_opb_read(addr, transfer_size, result,
+ readl(base + OPB0_STATUS),
+ reg);
+
+ /* Return error when poll timed out */
+ if (ret)
+ return ret;
+
+ /* Command failed, master will reset */
+ if (status & STATUS_ERR_ACK)
+ return -EIO;
+
+ if (out) {
+ switch (transfer_size) {
+ case XFER_BYTE:
+ *(u8 *)out = result;
+ break;
+ case XFER_HALFWORD:
+ *(u16 *)out = result;
+ break;
+ case XFER_FULLWORD:
+ *(u32 *)out = result;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ }
+
+ return 0;
+}
+
+static int opb_readl(struct fsi_master_aspeed *aspeed, uint32_t addr, __be32 *out)
+{
+ return __opb_read(aspeed, addr, XFER_FULLWORD, out);
+}
+
+static int opb_readw(struct fsi_master_aspeed *aspeed, uint32_t addr, __be16 *out)
+{
+ return __opb_read(aspeed, addr, XFER_HALFWORD, (void *)out);
+}
+
+static int opb_readb(struct fsi_master_aspeed *aspeed, uint32_t addr, u8 *out)
+{
+ return __opb_read(aspeed, addr, XFER_BYTE, (void *)out);
+}
+
+static int check_errors(struct fsi_master_aspeed *aspeed, int err)
+{
+ int ret;
+
+ if (trace_fsi_master_aspeed_opb_error_enabled()) {
+ __be32 mresp0, mstap0, mesrb0;
+
+ opb_readl(aspeed, ctrl_base + FSI_MRESP0, &mresp0);
+ opb_readl(aspeed, ctrl_base + FSI_MSTAP0, &mstap0);
+ opb_readl(aspeed, ctrl_base + FSI_MESRB0, &mesrb0);
+
+ trace_fsi_master_aspeed_opb_error(
+ be32_to_cpu(mresp0),
+ be32_to_cpu(mstap0),
+ be32_to_cpu(mesrb0));
+ }
+
+ if (err == -EIO) {
+ /* Check MAEB (0x70) ? */
+
+ /* Then clear errors in master */
+ ret = opb_writel(aspeed, ctrl_base + FSI_MRESP0,
+ cpu_to_be32(FSI_MRESP_RST_ALL_MASTER));
+ if (ret) {
+ /* TODO: log? return different code? */
+ return ret;
+ }
+ /* TODO: confirm that 0x70 was okay */
+ }
+
+ /* This will pass through timeout errors */
+ return err;
+}
+
+static int aspeed_master_read(struct fsi_master *master, int link,
+ uint8_t id, uint32_t addr, void *val, size_t size)
+{
+ struct fsi_master_aspeed *aspeed = to_fsi_master_aspeed(master);
+ int ret;
+
+ if (id != 0)
+ return -EINVAL;
+
+ addr += link * FSI_HUB_LINK_SIZE;
+
+ switch (size) {
+ case 1:
+ ret = opb_readb(aspeed, fsi_base + addr, val);
+ break;
+ case 2:
+ ret = opb_readw(aspeed, fsi_base + addr, val);
+ break;
+ case 4:
+ ret = opb_readl(aspeed, fsi_base + addr, val);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = check_errors(aspeed, ret);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int aspeed_master_write(struct fsi_master *master, int link,
+ uint8_t id, uint32_t addr, const void *val, size_t size)
+{
+ struct fsi_master_aspeed *aspeed = to_fsi_master_aspeed(master);
+ int ret;
+
+ if (id != 0)
+ return -EINVAL;
+
+ addr += link * FSI_HUB_LINK_SIZE;
+
+ switch (size) {
+ case 1:
+ ret = opb_writeb(aspeed, fsi_base + addr, *(u8 *)val);
+ break;
+ case 2:
+ ret = opb_writew(aspeed, fsi_base + addr, *(__be16 *)val);
+ break;
+ case 4:
+ ret = opb_writel(aspeed, fsi_base + addr, *(__be32 *)val);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = check_errors(aspeed, ret);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int aspeed_master_link_enable(struct fsi_master *master, int link)
+{
+ struct fsi_master_aspeed *aspeed = to_fsi_master_aspeed(master);
+ int idx, bit, ret;
+ __be32 reg, result;
+
+ idx = link / 32;
+ bit = link % 32;
+
+ reg = cpu_to_be32(0x80000000 >> bit);
+
+ ret = opb_writel(aspeed, ctrl_base + FSI_MSENP0 + (4 * idx), reg);
+ if (ret)
+ return ret;
+
+ mdelay(FSI_LINK_ENABLE_SETUP_TIME);
+
+ ret = opb_readl(aspeed, ctrl_base + FSI_MENP0 + (4 * idx), &result);
+ if (ret)
+ return ret;
+
+ if (result != reg) {
+ dev_err(aspeed->dev, "%s failed: %08x\n", __func__, result);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int aspeed_master_term(struct fsi_master *master, int link, uint8_t id)
+{
+ uint32_t addr;
+ __be32 cmd;
+
+ addr = 0x4;
+ cmd = cpu_to_be32(0xecc00000);
+
+ return aspeed_master_write(master, link, id, addr, &cmd, 4);
+}
+
+static int aspeed_master_break(struct fsi_master *master, int link)
+{
+ uint32_t addr;
+ __be32 cmd;
+
+ addr = 0x0;
+ cmd = cpu_to_be32(0xc0de0000);
+
+ return aspeed_master_write(master, link, 0, addr, &cmd, 4);
+}
+
+static void aspeed_master_release(struct device *dev)
+{
+ struct fsi_master_aspeed *aspeed =
+ to_fsi_master_aspeed(dev_to_fsi_master(dev));
+
+ kfree(aspeed);
+}
+
+/* mmode encoders */
+static inline u32 fsi_mmode_crs0(u32 x)
+{
+ return (x & FSI_MMODE_CRS0MASK) << FSI_MMODE_CRS0SHFT;
+}
+
+static inline u32 fsi_mmode_crs1(u32 x)
+{
+ return (x & FSI_MMODE_CRS1MASK) << FSI_MMODE_CRS1SHFT;
+}
+
+static int aspeed_master_init(struct fsi_master_aspeed *aspeed)
+{
+ __be32 reg;
+
+ reg = cpu_to_be32(FSI_MRESP_RST_ALL_MASTER | FSI_MRESP_RST_ALL_LINK
+ | FSI_MRESP_RST_MCR | FSI_MRESP_RST_PYE);
+ opb_writel(aspeed, ctrl_base + FSI_MRESP0, reg);
+
+ /* Initialize the MFSI (hub master) engine */
+ reg = cpu_to_be32(FSI_MRESP_RST_ALL_MASTER | FSI_MRESP_RST_ALL_LINK
+ | FSI_MRESP_RST_MCR | FSI_MRESP_RST_PYE);
+ opb_writel(aspeed, ctrl_base + FSI_MRESP0, reg);
+
+ reg = cpu_to_be32(FSI_MECTRL_EOAE | FSI_MECTRL_P8_AUTO_TERM);
+ opb_writel(aspeed, ctrl_base + FSI_MECTRL, reg);
+
+ reg = cpu_to_be32(FSI_MMODE_ECRC | FSI_MMODE_EPC | FSI_MMODE_RELA
+ | fsi_mmode_crs0(DEFAULT_DIVISOR)
+ | fsi_mmode_crs1(DEFAULT_DIVISOR)
+ | FSI_MMODE_P8_TO_LSB);
+ opb_writel(aspeed, ctrl_base + FSI_MMODE, reg);
+
+ reg = cpu_to_be32(0xffff0000);
+ opb_writel(aspeed, ctrl_base + FSI_MDLYR, reg);
+
+ reg = cpu_to_be32(~0);
+ opb_writel(aspeed, ctrl_base + FSI_MSENP0, reg);
+
+ /* Leave enabled long enough for master logic to set up */
+ mdelay(FSI_LINK_ENABLE_SETUP_TIME);
+
+ opb_writel(aspeed, ctrl_base + FSI_MCENP0, reg);
+
+ opb_readl(aspeed, ctrl_base + FSI_MAEB, NULL);
+
+ reg = cpu_to_be32(FSI_MRESP_RST_ALL_MASTER | FSI_MRESP_RST_ALL_LINK);
+ opb_writel(aspeed, ctrl_base + FSI_MRESP0, reg);
+
+ opb_readl(aspeed, ctrl_base + FSI_MLEVP0, NULL);
+
+ /* Reset the master bridge */
+ reg = cpu_to_be32(FSI_MRESB_RST_GEN);
+ opb_writel(aspeed, ctrl_base + FSI_MRESB0, reg);
+
+ reg = cpu_to_be32(FSI_MRESB_RST_ERR);
+ opb_writel(aspeed, ctrl_base + FSI_MRESB0, reg);
+
+ return 0;
+}
+
+static int fsi_master_aspeed_probe(struct platform_device *pdev)
+{
+ struct fsi_master_aspeed *aspeed;
+ struct resource *res;
+ int rc, links, reg;
+ __be32 raw;
+
+ aspeed = devm_kzalloc(&pdev->dev, sizeof(*aspeed), GFP_KERNEL);
+ if (!aspeed)
+ return -ENOMEM;
+
+ aspeed->dev = &pdev->dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ aspeed->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(aspeed->base))
+ return PTR_ERR(aspeed->base);
+
+ aspeed->clk = devm_clk_get(aspeed->dev, NULL);
+ if (IS_ERR(aspeed->clk)) {
+ dev_err(aspeed->dev, "couldn't get clock\n");
+ return PTR_ERR(aspeed->clk);
+ }
+ rc = clk_prepare_enable(aspeed->clk);
+ if (rc) {
+ dev_err(aspeed->dev, "couldn't enable clock\n");
+ return rc;
+ }
+
+ writel(0x1, aspeed->base + OPB_CLK_SYNC);
+ writel(OPB1_XFER_ACK_EN | OPB0_XFER_ACK_EN,
+ aspeed->base + OPB_IRQ_MASK);
+
+ /* TODO: determine an appropriate value */
+ writel(0x10, aspeed->base + OPB_RETRY_COUNTER);
+
+ writel(ctrl_base, aspeed->base + OPB_CTRL_BASE);
+ writel(fsi_base, aspeed->base + OPB_FSI_BASE);
+
+ /* Set read data order */
+ writel(0x00030b1b, aspeed->base + OPB0_READ_ORDER1);
+
+ /* Set write data order */
+ writel(0x0011101b, aspeed->base + OPB0_WRITE_ORDER1);
+ writel(0x0c330f3f, aspeed->base + OPB0_WRITE_ORDER2);
+
+ /*
+ * Select OPB0 for all operations.
+ * Will need to be reworked when enabling DMA or anything that uses
+ * OPB1.
+ */
+ writel(0x1, aspeed->base + OPB0_SELECT);
+
+ rc = opb_readl(aspeed, ctrl_base + FSI_MVER, &raw);
+ if (rc) {
+ dev_err(&pdev->dev, "failed to read hub version\n");
+ return rc;
+ }
+
+ reg = be32_to_cpu(raw);
+ links = (reg >> 8) & 0xff;
+ dev_info(&pdev->dev, "hub version %08x (%d links)\n", reg, links);
+
+ aspeed->master.dev.parent = &pdev->dev;
+ aspeed->master.dev.release = aspeed_master_release;
+ aspeed->master.dev.of_node = of_node_get(dev_of_node(&pdev->dev));
+
+ aspeed->master.n_links = links;
+ aspeed->master.read = aspeed_master_read;
+ aspeed->master.write = aspeed_master_write;
+ aspeed->master.send_break = aspeed_master_break;
+ aspeed->master.term = aspeed_master_term;
+ aspeed->master.link_enable = aspeed_master_link_enable;
+
+ dev_set_drvdata(&pdev->dev, aspeed);
+
+ aspeed_master_init(aspeed);
+
+ rc = fsi_master_register(&aspeed->master);
+ if (rc)
+ goto err_release;
+
+ /* At this point, fsi_master_register performs the device_initialize(),
+ * and holds the sole reference on master.dev. This means the device
+ * will be freed (via ->release) during any subsequent call to
+ * fsi_master_unregister. We add our own reference to it here, so we
+ * can perform cleanup (in _remove()) without it being freed before
+ * we're ready.
+ */
+ get_device(&aspeed->master.dev);
+ return 0;
+
+err_release:
+ clk_disable_unprepare(aspeed->clk);
+ return rc;
+}
+
+static int fsi_master_aspeed_remove(struct platform_device *pdev)
+{
+ struct fsi_master_aspeed *aspeed = platform_get_drvdata(pdev);
+
+ fsi_master_unregister(&aspeed->master);
+ clk_disable_unprepare(aspeed->clk);
+
+ return 0;
+}
+
+static const struct of_device_id fsi_master_aspeed_match[] = {
+ { .compatible = "aspeed,ast2600-fsi-master" },
+ { },
+};
+
+static struct platform_driver fsi_master_aspeed_driver = {
+ .driver = {
+ .name = "fsi-master-aspeed",
+ .of_match_table = fsi_master_aspeed_match,
+ },
+ .probe = fsi_master_aspeed_probe,
+ .remove = fsi_master_aspeed_remove,
+};
+
+module_platform_driver(fsi_master_aspeed_driver);
+MODULE_LICENSE("GPL");
diff --git a/drivers/fsi/fsi-master-hub.c b/drivers/fsi/fsi-master-hub.c
index f158b1a88286..def35cf92571 100644
--- a/drivers/fsi/fsi-master-hub.c
+++ b/drivers/fsi/fsi-master-hub.c
@@ -13,53 +13,7 @@
#include "fsi-master.h"
-/* Control Registers */
-#define FSI_MMODE 0x0 /* R/W: mode */
-#define FSI_MDLYR 0x4 /* R/W: delay */
-#define FSI_MCRSP 0x8 /* R/W: clock rate */
-#define FSI_MENP0 0x10 /* R/W: enable */
-#define FSI_MLEVP0 0x18 /* R: plug detect */
-#define FSI_MSENP0 0x18 /* S: Set enable */
-#define FSI_MCENP0 0x20 /* C: Clear enable */
-#define FSI_MAEB 0x70 /* R: Error address */
-#define FSI_MVER 0x74 /* R: master version/type */
-#define FSI_MRESP0 0xd0 /* W: Port reset */
-#define FSI_MESRB0 0x1d0 /* R: Master error status */
-#define FSI_MRESB0 0x1d0 /* W: Reset bridge */
-#define FSI_MECTRL 0x2e0 /* W: Error control */
-
-/* MMODE: Mode control */
-#define FSI_MMODE_EIP 0x80000000 /* Enable interrupt polling */
-#define FSI_MMODE_ECRC 0x40000000 /* Enable error recovery */
-#define FSI_MMODE_EPC 0x10000000 /* Enable parity checking */
-#define FSI_MMODE_P8_TO_LSB 0x00000010 /* Timeout value LSB */
- /* MSB=1, LSB=0 is 0.8 ms */
- /* MSB=0, LSB=1 is 0.9 ms */
-#define FSI_MMODE_CRS0SHFT 18 /* Clk rate selection 0 shift */
-#define FSI_MMODE_CRS0MASK 0x3ff /* Clk rate selection 0 mask */
-#define FSI_MMODE_CRS1SHFT 8 /* Clk rate selection 1 shift */
-#define FSI_MMODE_CRS1MASK 0x3ff /* Clk rate selection 1 mask */
-
-/* MRESB: Reset brindge */
-#define FSI_MRESB_RST_GEN 0x80000000 /* General reset */
-#define FSI_MRESB_RST_ERR 0x40000000 /* Error Reset */
-
-/* MRESB: Reset port */
-#define FSI_MRESP_RST_ALL_MASTER 0x20000000 /* Reset all FSI masters */
-#define FSI_MRESP_RST_ALL_LINK 0x10000000 /* Reset all FSI port contr. */
-#define FSI_MRESP_RST_MCR 0x08000000 /* Reset FSI master reg. */
-#define FSI_MRESP_RST_PYE 0x04000000 /* Reset FSI parity error */
-#define FSI_MRESP_RST_ALL 0xfc000000 /* Reset any error */
-
-/* MECTRL: Error control */
-#define FSI_MECTRL_EOAE 0x8000 /* Enable machine check when */
- /* master 0 in error */
-#define FSI_MECTRL_P8_AUTO_TERM 0x4000 /* Auto terminate */
-
#define FSI_ENGID_HUB_MASTER 0x1c
-#define FSI_HUB_LINK_OFFSET 0x80000
-#define FSI_HUB_LINK_SIZE 0x80000
-#define FSI_HUB_MASTER_MAX_LINKS 8
#define FSI_LINK_ENABLE_SETUP_TIME 10 /* in mS */
diff --git a/drivers/fsi/fsi-master.h b/drivers/fsi/fsi-master.h
index c7174237e864..6e8d4d4d5149 100644
--- a/drivers/fsi/fsi-master.h
+++ b/drivers/fsi/fsi-master.h
@@ -12,6 +12,71 @@
#include <linux/device.h>
#include <linux/mutex.h>
+/*
+ * Master registers
+ *
+ * These are used by hardware masters, such as the one in the FSP2, AST2600 and
+ * the hub master in POWER processors.
+ */
+
+/* Control Registers */
+#define FSI_MMODE 0x0 /* R/W: mode */
+#define FSI_MDLYR 0x4 /* R/W: delay */
+#define FSI_MCRSP 0x8 /* R/W: clock rate */
+#define FSI_MENP0 0x10 /* R/W: enable */
+#define FSI_MLEVP0 0x18 /* R: plug detect */
+#define FSI_MSENP0 0x18 /* S: Set enable */
+#define FSI_MCENP0 0x20 /* C: Clear enable */
+#define FSI_MAEB 0x70 /* R: Error address */
+#define FSI_MVER 0x74 /* R: master version/type */
+#define FSI_MSTAP0 0xd0 /* R: Port status */
+#define FSI_MRESP0 0xd0 /* W: Port reset */
+#define FSI_MESRB0 0x1d0 /* R: Master error status */
+#define FSI_MRESB0 0x1d0 /* W: Reset bridge */
+#define FSI_MSCSB0 0x1d4 /* R: Master sub command stack */
+#define FSI_MATRB0 0x1d8 /* R: Master address trace */
+#define FSI_MDTRB0 0x1dc /* R: Master data trace */
+#define FSI_MECTRL 0x2e0 /* W: Error control */
+
+/* MMODE: Mode control */
+#define FSI_MMODE_EIP 0x80000000 /* Enable interrupt polling */
+#define FSI_MMODE_ECRC 0x40000000 /* Enable error recovery */
+#define FSI_MMODE_RELA 0x20000000 /* Enable relative address commands */
+#define FSI_MMODE_EPC 0x10000000 /* Enable parity checking */
+#define FSI_MMODE_P8_TO_LSB 0x00000010 /* Timeout value LSB */
+ /* MSB=1, LSB=0 is 0.8 ms */
+ /* MSB=0, LSB=1 is 0.9 ms */
+#define FSI_MMODE_CRS0SHFT 18 /* Clk rate selection 0 shift */
+#define FSI_MMODE_CRS0MASK 0x3ff /* Clk rate selection 0 mask */
+#define FSI_MMODE_CRS1SHFT 8 /* Clk rate selection 1 shift */
+#define FSI_MMODE_CRS1MASK 0x3ff /* Clk rate selection 1 mask */
+
+/* MRESB: Reset brindge */
+#define FSI_MRESB_RST_GEN 0x80000000 /* General reset */
+#define FSI_MRESB_RST_ERR 0x40000000 /* Error Reset */
+
+/* MRESP: Reset port */
+#define FSI_MRESP_RST_ALL_MASTER 0x20000000 /* Reset all FSI masters */
+#define FSI_MRESP_RST_ALL_LINK 0x10000000 /* Reset all FSI port contr. */
+#define FSI_MRESP_RST_MCR 0x08000000 /* Reset FSI master reg. */
+#define FSI_MRESP_RST_PYE 0x04000000 /* Reset FSI parity error */
+#define FSI_MRESP_RST_ALL 0xfc000000 /* Reset any error */
+
+/* MECTRL: Error control */
+#define FSI_MECTRL_EOAE 0x8000 /* Enable machine check when */
+ /* master 0 in error */
+#define FSI_MECTRL_P8_AUTO_TERM 0x4000 /* Auto terminate */
+
+#define FSI_HUB_LINK_OFFSET 0x80000
+#define FSI_HUB_LINK_SIZE 0x80000
+#define FSI_HUB_MASTER_MAX_LINKS 8
+
+/*
+ * Protocol definitions
+ *
+ * These are used by low level masters that bit-bang out the protocol
+ */
+
/* Various protocol delays */
#define FSI_ECHO_DELAY_CLOCKS 16 /* Number clocks for echo delay */
#define FSI_SEND_DELAY_CLOCKS 16 /* Number clocks for send delay */
@@ -47,6 +112,12 @@
/* fsi-master definition and flags */
#define FSI_MASTER_FLAG_SWCLOCK 0x1
+/*
+ * Structures and function prototypes
+ *
+ * These are common to all masters
+ */
+
struct fsi_master {
struct device dev;
int idx;
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 92d0ff63b3ea..8adffd42f8cb 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -120,6 +120,14 @@ config GPIO_ASPEED
help
Say Y here to support Aspeed AST2400 and AST2500 GPIO controllers.
+config GPIO_ASPEED_SGPIO
+ bool "Aspeed SGPIO support"
+ depends on (ARCH_ASPEED || COMPILE_TEST) && OF_GPIO
+ select GPIO_GENERIC
+ select GPIOLIB_IRQCHIP
+ help
+ Say Y here to support Aspeed AST2500 SGPIO functionality.
+
config GPIO_ATH79
tristate "Atheros AR71XX/AR724X/AR913X GPIO support"
default y if ATH79
@@ -147,6 +155,15 @@ config GPIO_BCM_KONA
help
Turn on GPIO support for Broadcom "Kona" chips.
+config GPIO_BCM_XGS_IPROC
+ tristate "BRCM XGS iProc GPIO support"
+ depends on OF_GPIO && (ARCH_BCM_IPROC || COMPILE_TEST)
+ select GPIO_GENERIC
+ select GPIOLIB_IRQCHIP
+ default ARCH_BCM_IPROC
+ help
+ Say yes here to enable GPIO support for Broadcom XGS iProc SoCs.
+
config GPIO_BRCMSTB
tristate "BRCMSTB GPIO support"
default y if (ARCH_BRCMSTB || BMIPS_GENERIC)
@@ -435,6 +452,15 @@ config GPIO_RCAR
help
Say yes here to support GPIO on Renesas R-Car SoCs.
+config GPIO_RDA
+ bool "RDA Micro GPIO controller support"
+ depends on ARCH_RDA || COMPILE_TEST
+ depends on OF_GPIO
+ select GPIO_GENERIC
+ select GPIOLIB_IRQCHIP
+ help
+ Say Y here to support RDA Micro GPIO controller.
+
config GPIO_REG
bool
help
@@ -531,6 +557,7 @@ config GPIO_TEGRA186
depends on ARCH_TEGRA_186_SOC || COMPILE_TEST
depends on OF_GPIO
select GPIOLIB_IRQCHIP
+ select IRQ_DOMAIN_HIERARCHY
help
Say yes here to support GPIO pins on NVIDIA Tegra186 SoCs.
@@ -1320,7 +1347,7 @@ config GPIO_BT8XX
The card needs to be physically altered for using it as a
GPIO card. For more information on how to build a GPIO card
from a BT8xx TV card, see the documentation file at
- Documentation/driver-api/bt8xxgpio.rst
+ Documentation/driver-api/gpio/bt8xxgpio.rst
If unsure, say N.
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index d2fd19c15bae..34eb8b2b12dd 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -32,8 +32,10 @@ obj-$(CONFIG_GPIO_AMD_FCH) += gpio-amd-fch.o
obj-$(CONFIG_GPIO_AMDPT) += gpio-amdpt.o
obj-$(CONFIG_GPIO_ARIZONA) += gpio-arizona.o
obj-$(CONFIG_GPIO_ASPEED) += gpio-aspeed.o
+obj-$(CONFIG_GPIO_ASPEED_SGPIO) += gpio-aspeed-sgpio.o
obj-$(CONFIG_GPIO_ATH79) += gpio-ath79.o
obj-$(CONFIG_GPIO_BCM_KONA) += gpio-bcm-kona.o
+obj-$(CONFIG_GPIO_BCM_XGS_IPROC) += gpio-xgs-iproc.o
obj-$(CONFIG_GPIO_BD70528) += gpio-bd70528.o
obj-$(CONFIG_GPIO_BD9571MWV) += gpio-bd9571mwv.o
obj-$(CONFIG_GPIO_BRCMSTB) += gpio-brcmstb.o
@@ -115,6 +117,7 @@ obj-$(CONFIG_GPIO_PXA) += gpio-pxa.o
obj-$(CONFIG_GPIO_RASPBERRYPI_EXP) += gpio-raspberrypi-exp.o
obj-$(CONFIG_GPIO_RC5T583) += gpio-rc5t583.o
obj-$(CONFIG_GPIO_RCAR) += gpio-rcar.o
+obj-$(CONFIG_GPIO_RDA) += gpio-rda.o
obj-$(CONFIG_GPIO_RDC321X) += gpio-rdc321x.o
obj-$(CONFIG_GPIO_REG) += gpio-reg.o
obj-$(CONFIG_ARCH_SA1100) += gpio-sa1100.o
diff --git a/drivers/gpio/TODO b/drivers/gpio/TODO
index 9c048f10c9ad..76f8c7ff18ff 100644
--- a/drivers/gpio/TODO
+++ b/drivers/gpio/TODO
@@ -80,6 +80,10 @@ Work items:
- Look over and identify any remaining easily converted drivers and
dry-code conversions to MMIO GPIO for maintainers to test
+- Expand the MMIO GPIO or write a new library for regmap-based I/O
+ helpers for GPIO drivers on regmap that simply use offsets
+ 0..n in some register to drive GPIO lines
+
- Expand the MMIO GPIO or write a new library for port-mapped I/O
helpers (x86 inb()/outb()) and convert port-mapped I/O drivers to use
this with dry-coding and sending to maintainers to test
diff --git a/drivers/gpio/gpio-104-dio-48e.c b/drivers/gpio/gpio-104-dio-48e.c
index a44fa8af5b0d..400c09b905f8 100644
--- a/drivers/gpio/gpio-104-dio-48e.c
+++ b/drivers/gpio/gpio-104-dio-48e.c
@@ -59,7 +59,10 @@ static int dio48e_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
const unsigned port = offset / 8;
const unsigned mask = BIT(offset % 8);
- return !!(dio48egpio->io_state[port] & mask);
+ if (dio48egpio->io_state[port] & mask)
+ return GPIO_LINE_DIRECTION_IN;
+
+ return GPIO_LINE_DIRECTION_OUT;
}
static int dio48e_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
diff --git a/drivers/gpio/gpio-104-idi-48.c b/drivers/gpio/gpio-104-idi-48.c
index ff53887bdaa8..c50329ab493a 100644
--- a/drivers/gpio/gpio-104-idi-48.c
+++ b/drivers/gpio/gpio-104-idi-48.c
@@ -53,7 +53,7 @@ struct idi_48_gpio {
static int idi_48_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
{
- return 1;
+ return GPIO_LINE_DIRECTION_IN;
}
static int idi_48_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
@@ -65,7 +65,7 @@ static int idi_48_gpio_get(struct gpio_chip *chip, unsigned offset)
{
struct idi_48_gpio *const idi48gpio = gpiochip_get_data(chip);
unsigned i;
- const unsigned register_offset[6] = { 0, 1, 2, 4, 5, 6 };
+ static const unsigned int register_offset[6] = { 0, 1, 2, 4, 5, 6 };
unsigned base_offset;
unsigned mask;
diff --git a/drivers/gpio/gpio-104-idio-16.c b/drivers/gpio/gpio-104-idio-16.c
index 8d2f51cd9d91..5752d9dab148 100644
--- a/drivers/gpio/gpio-104-idio-16.c
+++ b/drivers/gpio/gpio-104-idio-16.c
@@ -51,9 +51,9 @@ struct idio_16_gpio {
static int idio_16_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
{
if (offset > 15)
- return 1;
+ return GPIO_LINE_DIRECTION_IN;
- return 0;
+ return GPIO_LINE_DIRECTION_OUT;
}
static int idio_16_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
diff --git a/drivers/gpio/gpio-74xx-mmio.c b/drivers/gpio/gpio-74xx-mmio.c
index 83a2286d93f6..173e06758e6c 100644
--- a/drivers/gpio/gpio-74xx-mmio.c
+++ b/drivers/gpio/gpio-74xx-mmio.c
@@ -77,7 +77,10 @@ static int mmio_74xx_get_direction(struct gpio_chip *gc, unsigned offset)
{
struct mmio_74xx_gpio_priv *priv = gpiochip_get_data(gc);
- return !(priv->flags & MMIO_74XX_DIR_OUT);
+ if (priv->flags & MMIO_74XX_DIR_OUT)
+ return GPIO_LINE_DIRECTION_OUT;
+
+ return GPIO_LINE_DIRECTION_IN;
}
static int mmio_74xx_dir_in(struct gpio_chip *gc, unsigned int gpio)
diff --git a/drivers/gpio/gpio-amd-fch.c b/drivers/gpio/gpio-amd-fch.c
index 181df1581df5..4e44ba4d7423 100644
--- a/drivers/gpio/gpio-amd-fch.c
+++ b/drivers/gpio/gpio-amd-fch.c
@@ -92,7 +92,7 @@ static int amd_fch_gpio_get_direction(struct gpio_chip *gc, unsigned int gpio)
ret = (readl_relaxed(ptr) & AMD_FCH_GPIO_FLAG_DIRECTION);
spin_unlock_irqrestore(&priv->lock, flags);
- return ret;
+ return ret ? GPIO_LINE_DIRECTION_IN : GPIO_LINE_DIRECTION_OUT;
}
static void amd_fch_gpio_set(struct gpio_chip *gc,
diff --git a/drivers/gpio/sgpio-aspeed.c b/drivers/gpio/gpio-aspeed-sgpio.c
index 7e99860ca447..7e99860ca447 100644
--- a/drivers/gpio/sgpio-aspeed.c
+++ b/drivers/gpio/gpio-aspeed-sgpio.c
diff --git a/drivers/gpio/gpio-aspeed.c b/drivers/gpio/gpio-aspeed.c
index 09e53c5f3b0a..f1037b61f763 100644
--- a/drivers/gpio/gpio-aspeed.c
+++ b/drivers/gpio/gpio-aspeed.c
@@ -487,10 +487,10 @@ static int aspeed_gpio_get_direction(struct gpio_chip *gc, unsigned int offset)
u32 val;
if (!have_input(gpio, offset))
- return 0;
+ return GPIO_LINE_DIRECTION_OUT;
if (!have_output(gpio, offset))
- return 1;
+ return GPIO_LINE_DIRECTION_IN;
spin_lock_irqsave(&gpio->lock, flags);
@@ -498,8 +498,7 @@ static int aspeed_gpio_get_direction(struct gpio_chip *gc, unsigned int offset)
spin_unlock_irqrestore(&gpio->lock, flags);
- return !val;
-
+ return val ? GPIO_LINE_DIRECTION_OUT : GPIO_LINE_DIRECTION_IN;
}
static inline int irqd_to_aspeed_gpio_data(struct irq_data *d,
diff --git a/drivers/gpio/gpio-ath79.c b/drivers/gpio/gpio-ath79.c
index f1a5ea9b3de2..53fae02c40ad 100644
--- a/drivers/gpio/gpio-ath79.c
+++ b/drivers/gpio/gpio-ath79.c
@@ -226,7 +226,6 @@ static int ath79_gpio_probe(struct platform_device *pdev)
struct device_node *np = dev->of_node;
struct ath79_gpio_ctrl *ctrl;
struct gpio_irq_chip *girq;
- struct resource *res;
u32 ath79_gpio_count;
bool oe_inverted;
int err;
@@ -256,12 +255,9 @@ static int ath79_gpio_probe(struct platform_device *pdev)
return -EINVAL;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -EINVAL;
- ctrl->base = devm_ioremap_nocache(dev, res->start, resource_size(res));
- if (!ctrl->base)
- return -ENOMEM;
+ ctrl->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(ctrl->base))
+ return PTR_ERR(ctrl->base);
raw_spin_lock_init(&ctrl->lock);
err = bgpio_init(&ctrl->gc, dev, 4,
diff --git a/drivers/gpio/gpio-bcm-kona.c b/drivers/gpio/gpio-bcm-kona.c
index 9fa6d3a967d2..4122683eb1f9 100644
--- a/drivers/gpio/gpio-bcm-kona.c
+++ b/drivers/gpio/gpio-bcm-kona.c
@@ -127,7 +127,7 @@ static int bcm_kona_gpio_get_dir(struct gpio_chip *chip, unsigned gpio)
u32 val;
val = readl(reg_base + GPIO_CONTROL(gpio)) & GPIO_GPCTR0_IOTR_MASK;
- return !!val;
+ return val ? GPIO_LINE_DIRECTION_IN : GPIO_LINE_DIRECTION_OUT;
}
static void bcm_kona_gpio_set(struct gpio_chip *chip, unsigned gpio, int value)
@@ -144,7 +144,7 @@ static void bcm_kona_gpio_set(struct gpio_chip *chip, unsigned gpio, int value)
raw_spin_lock_irqsave(&kona_gpio->lock, flags);
/* this function only applies to output pin */
- if (bcm_kona_gpio_get_dir(chip, gpio) == 1)
+ if (bcm_kona_gpio_get_dir(chip, gpio) == GPIO_LINE_DIRECTION_IN)
goto out;
reg_offset = value ? GPIO_OUT_SET(bank_id) : GPIO_OUT_CLEAR(bank_id);
@@ -170,7 +170,7 @@ static int bcm_kona_gpio_get(struct gpio_chip *chip, unsigned gpio)
reg_base = kona_gpio->reg_base;
raw_spin_lock_irqsave(&kona_gpio->lock, flags);
- if (bcm_kona_gpio_get_dir(chip, gpio) == 1)
+ if (bcm_kona_gpio_get_dir(chip, gpio) == GPIO_LINE_DIRECTION_IN)
reg_offset = GPIO_IN_STATUS(bank_id);
else
reg_offset = GPIO_OUT_STATUS(bank_id);
diff --git a/drivers/gpio/gpio-bd70528.c b/drivers/gpio/gpio-bd70528.c
index 4ba4d4a67881..45b3da8da336 100644
--- a/drivers/gpio/gpio-bd70528.c
+++ b/drivers/gpio/gpio-bd70528.c
@@ -54,8 +54,10 @@ static int bd70528_get_direction(struct gpio_chip *chip, unsigned int offset)
dev_err(bdgpio->chip.dev, "Could not read gpio direction\n");
return ret;
}
+ if (val & BD70528_GPIO_OUT_EN_MASK)
+ return GPIO_LINE_DIRECTION_OUT;
- return !(val & BD70528_GPIO_OUT_EN_MASK);
+ return GPIO_LINE_DIRECTION_IN;
}
static int bd70528_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
@@ -166,9 +168,9 @@ static int bd70528_gpio_get(struct gpio_chip *chip, unsigned int offset)
* locking would make no sense.
*/
ret = bd70528_get_direction(chip, offset);
- if (ret == 0)
+ if (ret == GPIO_LINE_DIRECTION_OUT)
ret = bd70528_gpio_get_o(bdgpio, offset);
- else if (ret == 1)
+ else if (ret == GPIO_LINE_DIRECTION_IN)
ret = bd70528_gpio_get_i(bdgpio, offset);
else
dev_err(bdgpio->chip.dev, "failed to read GPIO direction\n");
@@ -230,3 +232,4 @@ module_platform_driver(bd70528_gpio);
MODULE_AUTHOR("Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>");
MODULE_DESCRIPTION("BD70528 voltage regulator driver");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:bd70528-gpio");
diff --git a/drivers/gpio/gpio-bd9571mwv.c b/drivers/gpio/gpio-bd9571mwv.c
index 5224a946e8ab..c0abc9c6851b 100644
--- a/drivers/gpio/gpio-bd9571mwv.c
+++ b/drivers/gpio/gpio-bd9571mwv.c
@@ -37,8 +37,10 @@ static int bd9571mwv_gpio_get_direction(struct gpio_chip *chip,
ret = regmap_read(gpio->bd->regmap, BD9571MWV_GPIO_DIR, &val);
if (ret < 0)
return ret;
+ if (val & BIT(offset))
+ return GPIO_LINE_DIRECTION_IN;
- return val & BIT(offset);
+ return GPIO_LINE_DIRECTION_OUT;
}
static int bd9571mwv_gpio_direction_input(struct gpio_chip *chip,
diff --git a/drivers/gpio/gpio-dln2.c b/drivers/gpio/gpio-dln2.c
index 8a33c2fc174d..26b40c8b8a12 100644
--- a/drivers/gpio/gpio-dln2.c
+++ b/drivers/gpio/gpio-dln2.c
@@ -200,9 +200,9 @@ static int dln2_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
struct dln2_gpio *dln2 = gpiochip_get_data(chip);
if (test_bit(offset, dln2->output_enabled))
- return 0;
+ return GPIO_LINE_DIRECTION_OUT;
- return 1;
+ return GPIO_LINE_DIRECTION_IN;
}
static int dln2_gpio_get(struct gpio_chip *chip, unsigned int offset)
@@ -214,7 +214,7 @@ static int dln2_gpio_get(struct gpio_chip *chip, unsigned int offset)
if (dir < 0)
return dir;
- if (dir == 1)
+ if (dir == GPIO_LINE_DIRECTION_IN)
return dln2_gpio_pin_get_in_val(dln2, offset);
return dln2_gpio_pin_get_out_val(dln2, offset);
diff --git a/drivers/gpio/gpio-em.c b/drivers/gpio/gpio-em.c
index 620f25b7efb4..17a243c528ad 100644
--- a/drivers/gpio/gpio-em.c
+++ b/drivers/gpio/gpio-em.c
@@ -269,13 +269,12 @@ static void em_gio_irq_domain_remove(void *data)
static int em_gio_probe(struct platform_device *pdev)
{
struct em_gio_priv *p;
- struct resource *io[2], *irq[2];
struct gpio_chip *gpio_chip;
struct irq_chip *irq_chip;
struct device *dev = &pdev->dev;
const char *name = dev_name(dev);
unsigned int ngpios;
- int ret;
+ int irq[2], ret;
p = devm_kzalloc(dev, sizeof(*p), GFP_KERNEL);
if (!p)
@@ -285,25 +284,21 @@ static int em_gio_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, p);
spin_lock_init(&p->sense_lock);
- io[0] = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- io[1] = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- irq[0] = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- irq[1] = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
+ irq[0] = platform_get_irq(pdev, 0);
+ if (irq[0] < 0)
+ return irq[0];
- if (!io[0] || !io[1] || !irq[0] || !irq[1]) {
- dev_err(dev, "missing IRQ or IOMEM\n");
- return -EINVAL;
- }
+ irq[1] = platform_get_irq(pdev, 1);
+ if (irq[1] < 0)
+ return irq[1];
- p->base0 = devm_ioremap_nocache(dev, io[0]->start,
- resource_size(io[0]));
- if (!p->base0)
- return -ENOMEM;
+ p->base0 = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(p->base0))
+ return PTR_ERR(p->base0);
- p->base1 = devm_ioremap_nocache(dev, io[1]->start,
- resource_size(io[1]));
- if (!p->base1)
- return -ENOMEM;
+ p->base1 = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(p->base1))
+ return PTR_ERR(p->base1);
if (of_property_read_u32(dev->of_node, "ngpios", &ngpios)) {
dev_err(dev, "Missing ngpios OF property\n");
@@ -326,7 +321,7 @@ static int em_gio_probe(struct platform_device *pdev)
gpio_chip->ngpio = ngpios;
irq_chip = &p->irq_chip;
- irq_chip->name = name;
+ irq_chip->name = "gpio-em";
irq_chip->irq_mask = em_gio_irq_disable;
irq_chip->irq_unmask = em_gio_irq_enable;
irq_chip->irq_set_type = em_gio_irq_set_type;
@@ -346,14 +341,12 @@ static int em_gio_probe(struct platform_device *pdev)
if (ret)
return ret;
- if (devm_request_irq(dev, irq[0]->start,
- em_gio_irq_handler, 0, name, p)) {
+ if (devm_request_irq(dev, irq[0], em_gio_irq_handler, 0, name, p)) {
dev_err(dev, "failed to request low IRQ\n");
return -ENOENT;
}
- if (devm_request_irq(dev, irq[1]->start,
- em_gio_irq_handler, 0, name, p)) {
+ if (devm_request_irq(dev, irq[1], em_gio_irq_handler, 0, name, p)) {
dev_err(dev, "failed to request high IRQ\n");
return -ENOENT;
}
diff --git a/drivers/gpio/gpio-exar.c b/drivers/gpio/gpio-exar.c
index fae327d5b06e..da1ef0b1c291 100644
--- a/drivers/gpio/gpio-exar.c
+++ b/drivers/gpio/gpio-exar.c
@@ -77,7 +77,10 @@ static int exar_get_direction(struct gpio_chip *chip, unsigned int offset)
EXAR_OFFSET_MPIOSEL_HI : EXAR_OFFSET_MPIOSEL_LO;
unsigned int bit = (offset + exar_gpio->first_pin) % 8;
- return !!(exar_get(chip, addr) & BIT(bit));
+ if (exar_get(chip, addr) & BIT(bit))
+ return GPIO_LINE_DIRECTION_IN;
+
+ return GPIO_LINE_DIRECTION_OUT;
}
static int exar_get_value(struct gpio_chip *chip, unsigned int offset)
diff --git a/drivers/gpio/gpio-f7188x.c b/drivers/gpio/gpio-f7188x.c
index fdc639f856f1..cadd02993539 100644
--- a/drivers/gpio/gpio-f7188x.c
+++ b/drivers/gpio/gpio-f7188x.c
@@ -250,7 +250,10 @@ static int f7188x_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
superio_exit(sio->addr);
- return !(dir & 1 << offset);
+ if (dir & 1 << offset)
+ return GPIO_LINE_DIRECTION_OUT;
+
+ return GPIO_LINE_DIRECTION_IN;
}
static int f7188x_gpio_direction_in(struct gpio_chip *chip, unsigned offset)
diff --git a/drivers/gpio/gpio-gpio-mm.c b/drivers/gpio/gpio-gpio-mm.c
index 78a1db24e931..c22d6f94129c 100644
--- a/drivers/gpio/gpio-gpio-mm.c
+++ b/drivers/gpio/gpio-gpio-mm.c
@@ -52,7 +52,10 @@ static int gpiomm_gpio_get_direction(struct gpio_chip *chip,
const unsigned int port = offset / 8;
const unsigned int mask = BIT(offset % 8);
- return !!(gpiommgpio->io_state[port] & mask);
+ if (gpiommgpio->io_state[port] & mask)
+ return GPIO_LINE_DIRECTION_IN;
+
+ return GPIO_LINE_DIRECTION_OUT;
}
static int gpiomm_gpio_direction_input(struct gpio_chip *chip,
diff --git a/drivers/gpio/gpio-htc-egpio.c b/drivers/gpio/gpio-htc-egpio.c
index 6eb56f7ab9c9..a40bd56673fe 100644
--- a/drivers/gpio/gpio-htc-egpio.c
+++ b/drivers/gpio/gpio-htc-egpio.c
@@ -220,7 +220,10 @@ static int egpio_get_direction(struct gpio_chip *chip, unsigned offset)
egpio = gpiochip_get_data(chip);
- return !test_bit(offset, &egpio->is_out);
+ if (test_bit(offset, &egpio->is_out))
+ return GPIO_LINE_DIRECTION_OUT;
+
+ return GPIO_LINE_DIRECTION_IN;
}
static void egpio_write_cache(struct egpio_info *ei)
@@ -265,7 +268,6 @@ static int __init egpio_probe(struct platform_device *pdev)
struct gpio_chip *chip;
unsigned int irq, irq_end;
int i;
- int ret;
/* Initialize ei data structure. */
ei = devm_kzalloc(&pdev->dev, sizeof(*ei), GFP_KERNEL);
@@ -275,28 +277,24 @@ static int __init egpio_probe(struct platform_device *pdev)
spin_lock_init(&ei->lock);
/* Find chained irq */
- ret = -EINVAL;
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (res)
ei->chained_irq = res->start;
/* Map egpio chip into virtual address space. */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- goto fail;
- ei->base_addr = devm_ioremap_nocache(&pdev->dev, res->start,
- resource_size(res));
- if (!ei->base_addr)
- goto fail;
- pr_debug("EGPIO phys=%08x virt=%p\n", (u32)res->start, ei->base_addr);
+ ei->base_addr = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(ei->base_addr))
+ return PTR_ERR(ei->base_addr);
if ((pdata->bus_width != 16) && (pdata->bus_width != 32))
- goto fail;
+ return -EINVAL;
+
ei->bus_shift = fls(pdata->bus_width - 1) - 3;
pr_debug("bus_shift = %d\n", ei->bus_shift);
if ((pdata->reg_width != 8) && (pdata->reg_width != 16))
- goto fail;
+ return -EINVAL;
+
ei->reg_shift = fls(pdata->reg_width - 1);
pr_debug("reg_shift = %d\n", ei->reg_shift);
@@ -308,10 +306,9 @@ static int __init egpio_probe(struct platform_device *pdev)
ei->chip = devm_kcalloc(&pdev->dev,
ei->nchips, sizeof(struct egpio_chip),
GFP_KERNEL);
- if (!ei->chip) {
- ret = -ENOMEM;
- goto fail;
- }
+ if (!ei->chip)
+ return -ENOMEM;
+
for (i = 0; i < ei->nchips; i++) {
ei->chip[i].reg_start = pdata->chip[i].reg_start;
ei->chip[i].cached_values = pdata->chip[i].initial_values;
@@ -321,10 +318,9 @@ static int __init egpio_probe(struct platform_device *pdev)
chip->label = devm_kasprintf(&pdev->dev, GFP_KERNEL,
"htc-egpio-%d",
i);
- if (!chip->label) {
- ret = -ENOMEM;
- goto fail;
- }
+ if (!chip->label)
+ return -ENOMEM;
+
chip->parent = &pdev->dev;
chip->owner = THIS_MODULE;
chip->get = egpio_get;
@@ -366,10 +362,6 @@ static int __init egpio_probe(struct platform_device *pdev)
}
return 0;
-
-fail:
- printk(KERN_ERR "EGPIO failed to setup\n");
- return ret;
}
#ifdef CONFIG_PM
diff --git a/drivers/gpio/gpio-ich.c b/drivers/gpio/gpio-ich.c
index 90bf7742f9b0..2f086d0aa1f4 100644
--- a/drivers/gpio/gpio-ich.c
+++ b/drivers/gpio/gpio-ich.c
@@ -159,7 +159,10 @@ static bool ichx_gpio_check_available(struct gpio_chip *gpio, unsigned nr)
static int ichx_gpio_get_direction(struct gpio_chip *gpio, unsigned nr)
{
- return ichx_read_bit(GPIO_IO_SEL, nr);
+ if (ichx_read_bit(GPIO_IO_SEL, nr))
+ return GPIO_LINE_DIRECTION_IN;
+
+ return GPIO_LINE_DIRECTION_OUT;
}
static int ichx_gpio_direction_input(struct gpio_chip *gpio, unsigned nr)
diff --git a/drivers/gpio/gpio-kempld.c b/drivers/gpio/gpio-kempld.c
index ef51638f3f75..4ea15f08e0f4 100644
--- a/drivers/gpio/gpio-kempld.c
+++ b/drivers/gpio/gpio-kempld.c
@@ -104,7 +104,10 @@ static int kempld_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
struct kempld_gpio_data *gpio = gpiochip_get_data(chip);
struct kempld_device_data *pld = gpio->pld;
- return !kempld_gpio_get_bit(pld, KEMPLD_GPIO_DIR_NUM(offset), offset);
+ if (kempld_gpio_get_bit(pld, KEMPLD_GPIO_DIR_NUM(offset), offset))
+ return GPIO_LINE_DIRECTION_OUT;
+
+ return GPIO_LINE_DIRECTION_IN;
}
static int kempld_gpio_pincount(struct kempld_device_data *pld)
diff --git a/drivers/gpio/gpio-lp873x.c b/drivers/gpio/gpio-lp873x.c
index 801995dd9b26..70fad87ff2db 100644
--- a/drivers/gpio/gpio-lp873x.c
+++ b/drivers/gpio/gpio-lp873x.c
@@ -33,7 +33,7 @@ static int lp873x_gpio_get_direction(struct gpio_chip *chip,
unsigned int offset)
{
/* This device is output only */
- return 0;
+ return GPIO_LINE_DIRECTION_OUT;
}
static int lp873x_gpio_direction_input(struct gpio_chip *chip,
diff --git a/drivers/gpio/gpio-lp87565.c b/drivers/gpio/gpio-lp87565.c
index a121c8f10610..e1244520cf7d 100644
--- a/drivers/gpio/gpio-lp87565.c
+++ b/drivers/gpio/gpio-lp87565.c
@@ -57,7 +57,10 @@ static int lp87565_gpio_get_direction(struct gpio_chip *chip,
if (ret < 0)
return ret;
- return !(val & BIT(offset));
+ if (val & BIT(offset))
+ return GPIO_LINE_DIRECTION_OUT;
+
+ return GPIO_LINE_DIRECTION_IN;
}
static int lp87565_gpio_direction_input(struct gpio_chip *chip,
diff --git a/drivers/gpio/gpio-lynxpoint.c b/drivers/gpio/gpio-lynxpoint.c
index e9e47c0d5be7..490ce7bae25e 100644
--- a/drivers/gpio/gpio-lynxpoint.c
+++ b/drivers/gpio/gpio-lynxpoint.c
@@ -164,6 +164,12 @@ static int lp_irq_type(struct irq_data *d, unsigned type)
value |= TRIG_SEL_BIT | INT_INV_BIT;
outl(value, reg);
+
+ if (type & IRQ_TYPE_EDGE_BOTH)
+ irq_set_handler_locked(d, handle_edge_irq);
+ else if (type & IRQ_TYPE_LEVEL_MASK)
+ irq_set_handler_locked(d, handle_level_irq);
+
spin_unlock_irqrestore(&lg->lock, flags);
return 0;
diff --git a/drivers/gpio/gpio-madera.c b/drivers/gpio/gpio-madera.c
index 7086f8b5388f..8f38303fcbc4 100644
--- a/drivers/gpio/gpio-madera.c
+++ b/drivers/gpio/gpio-madera.c
@@ -34,7 +34,10 @@ static int madera_gpio_get_direction(struct gpio_chip *chip,
if (ret < 0)
return ret;
- return !!(val & MADERA_GP1_DIR_MASK);
+ if (val & MADERA_GP1_DIR_MASK)
+ return GPIO_LINE_DIRECTION_IN;
+
+ return GPIO_LINE_DIRECTION_OUT;
}
static int madera_gpio_direction_in(struct gpio_chip *chip, unsigned int offset)
diff --git a/drivers/gpio/gpio-max3191x.c b/drivers/gpio/gpio-max3191x.c
index 4b4b2ceb82fc..0696d5a21431 100644
--- a/drivers/gpio/gpio-max3191x.c
+++ b/drivers/gpio/gpio-max3191x.c
@@ -94,7 +94,7 @@ DECLARE_CRC8_TABLE(max3191x_crc8);
static int max3191x_get_direction(struct gpio_chip *gpio, unsigned int offset)
{
- return 1; /* always in */
+ return GPIO_LINE_DIRECTION_IN; /* always in */
}
static int max3191x_direction_input(struct gpio_chip *gpio, unsigned int offset)
diff --git a/drivers/gpio/gpio-max77620.c b/drivers/gpio/gpio-max77620.c
index 642c6321c22a..313bd02dd893 100644
--- a/drivers/gpio/gpio-max77620.c
+++ b/drivers/gpio/gpio-max77620.c
@@ -18,109 +18,115 @@ struct max77620_gpio {
struct gpio_chip gpio_chip;
struct regmap *rmap;
struct device *dev;
+ struct mutex buslock; /* irq_bus_lock */
+ unsigned int irq_type[8];
+ bool irq_enabled[8];
};
-static const struct regmap_irq max77620_gpio_irqs[] = {
- [0] = {
- .reg_offset = 0,
- .mask = MAX77620_IRQ_LVL2_GPIO_EDGE0,
- .type = {
- .type_rising_val = MAX77620_CNFG_GPIO_INT_RISING,
- .type_falling_val = MAX77620_CNFG_GPIO_INT_FALLING,
- .type_reg_mask = MAX77620_CNFG_GPIO_INT_MASK,
- .type_reg_offset = 0,
- .types_supported = IRQ_TYPE_EDGE_BOTH,
- },
- },
- [1] = {
- .reg_offset = 0,
- .mask = MAX77620_IRQ_LVL2_GPIO_EDGE1,
- .type = {
- .type_rising_val = MAX77620_CNFG_GPIO_INT_RISING,
- .type_falling_val = MAX77620_CNFG_GPIO_INT_FALLING,
- .type_reg_mask = MAX77620_CNFG_GPIO_INT_MASK,
- .type_reg_offset = 1,
- .types_supported = IRQ_TYPE_EDGE_BOTH,
- },
- },
- [2] = {
- .reg_offset = 0,
- .mask = MAX77620_IRQ_LVL2_GPIO_EDGE2,
- .type = {
- .type_rising_val = MAX77620_CNFG_GPIO_INT_RISING,
- .type_falling_val = MAX77620_CNFG_GPIO_INT_FALLING,
- .type_reg_mask = MAX77620_CNFG_GPIO_INT_MASK,
- .type_reg_offset = 2,
- .types_supported = IRQ_TYPE_EDGE_BOTH,
- },
- },
- [3] = {
- .reg_offset = 0,
- .mask = MAX77620_IRQ_LVL2_GPIO_EDGE3,
- .type = {
- .type_rising_val = MAX77620_CNFG_GPIO_INT_RISING,
- .type_falling_val = MAX77620_CNFG_GPIO_INT_FALLING,
- .type_reg_mask = MAX77620_CNFG_GPIO_INT_MASK,
- .type_reg_offset = 3,
- .types_supported = IRQ_TYPE_EDGE_BOTH,
- },
- },
- [4] = {
- .reg_offset = 0,
- .mask = MAX77620_IRQ_LVL2_GPIO_EDGE4,
- .type = {
- .type_rising_val = MAX77620_CNFG_GPIO_INT_RISING,
- .type_falling_val = MAX77620_CNFG_GPIO_INT_FALLING,
- .type_reg_mask = MAX77620_CNFG_GPIO_INT_MASK,
- .type_reg_offset = 4,
- .types_supported = IRQ_TYPE_EDGE_BOTH,
- },
- },
- [5] = {
- .reg_offset = 0,
- .mask = MAX77620_IRQ_LVL2_GPIO_EDGE5,
- .type = {
- .type_rising_val = MAX77620_CNFG_GPIO_INT_RISING,
- .type_falling_val = MAX77620_CNFG_GPIO_INT_FALLING,
- .type_reg_mask = MAX77620_CNFG_GPIO_INT_MASK,
- .type_reg_offset = 5,
- .types_supported = IRQ_TYPE_EDGE_BOTH,
- },
- },
- [6] = {
- .reg_offset = 0,
- .mask = MAX77620_IRQ_LVL2_GPIO_EDGE6,
- .type = {
- .type_rising_val = MAX77620_CNFG_GPIO_INT_RISING,
- .type_falling_val = MAX77620_CNFG_GPIO_INT_FALLING,
- .type_reg_mask = MAX77620_CNFG_GPIO_INT_MASK,
- .type_reg_offset = 6,
- .types_supported = IRQ_TYPE_EDGE_BOTH,
- },
- },
- [7] = {
- .reg_offset = 0,
- .mask = MAX77620_IRQ_LVL2_GPIO_EDGE7,
- .type = {
- .type_rising_val = MAX77620_CNFG_GPIO_INT_RISING,
- .type_falling_val = MAX77620_CNFG_GPIO_INT_FALLING,
- .type_reg_mask = MAX77620_CNFG_GPIO_INT_MASK,
- .type_reg_offset = 7,
- .types_supported = IRQ_TYPE_EDGE_BOTH,
- },
- },
-};
+static irqreturn_t max77620_gpio_irqhandler(int irq, void *data)
+{
+ struct max77620_gpio *gpio = data;
+ unsigned int value, offset;
+ unsigned long pending;
+ int err;
+
+ err = regmap_read(gpio->rmap, MAX77620_REG_IRQ_LVL2_GPIO, &value);
+ if (err < 0) {
+ dev_err(gpio->dev, "REG_IRQ_LVL2_GPIO read failed: %d\n", err);
+ return IRQ_NONE;
+ }
+
+ pending = value;
+
+ for_each_set_bit(offset, &pending, 8) {
+ unsigned int virq;
+
+ virq = irq_find_mapping(gpio->gpio_chip.irq.domain, offset);
+ handle_nested_irq(virq);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void max77620_gpio_irq_mask(struct irq_data *data)
+{
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
+ struct max77620_gpio *gpio = gpiochip_get_data(chip);
+
+ gpio->irq_enabled[data->hwirq] = false;
+}
-static const struct regmap_irq_chip max77620_gpio_irq_chip = {
- .name = "max77620-gpio",
- .irqs = max77620_gpio_irqs,
- .num_irqs = ARRAY_SIZE(max77620_gpio_irqs),
- .num_regs = 1,
- .num_type_reg = 8,
- .irq_reg_stride = 1,
- .type_reg_stride = 1,
- .status_base = MAX77620_REG_IRQ_LVL2_GPIO,
- .type_base = MAX77620_REG_GPIO0,
+static void max77620_gpio_irq_unmask(struct irq_data *data)
+{
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
+ struct max77620_gpio *gpio = gpiochip_get_data(chip);
+
+ gpio->irq_enabled[data->hwirq] = true;
+}
+
+static int max77620_gpio_set_irq_type(struct irq_data *data, unsigned int type)
+{
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
+ struct max77620_gpio *gpio = gpiochip_get_data(chip);
+ unsigned int irq_type;
+
+ switch (type) {
+ case IRQ_TYPE_EDGE_RISING:
+ irq_type = MAX77620_CNFG_GPIO_INT_RISING;
+ break;
+
+ case IRQ_TYPE_EDGE_FALLING:
+ irq_type = MAX77620_CNFG_GPIO_INT_FALLING;
+ break;
+
+ case IRQ_TYPE_EDGE_BOTH:
+ irq_type = MAX77620_CNFG_GPIO_INT_RISING |
+ MAX77620_CNFG_GPIO_INT_FALLING;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ gpio->irq_type[data->hwirq] = irq_type;
+
+ return 0;
+}
+
+static void max77620_gpio_bus_lock(struct irq_data *data)
+{
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
+ struct max77620_gpio *gpio = gpiochip_get_data(chip);
+
+ mutex_lock(&gpio->buslock);
+}
+
+static void max77620_gpio_bus_sync_unlock(struct irq_data *data)
+{
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
+ struct max77620_gpio *gpio = gpiochip_get_data(chip);
+ unsigned int value, offset = data->hwirq;
+ int err;
+
+ value = gpio->irq_enabled[offset] ? gpio->irq_type[offset] : 0;
+
+ err = regmap_update_bits(gpio->rmap, GPIO_REG_ADDR(offset),
+ MAX77620_CNFG_GPIO_INT_MASK, value);
+ if (err < 0)
+ dev_err(chip->parent, "failed to update interrupt mask: %d\n",
+ err);
+
+ mutex_unlock(&gpio->buslock);
+}
+
+static struct irq_chip max77620_gpio_irqchip = {
+ .name = "max77620-gpio",
+ .irq_mask = max77620_gpio_irq_mask,
+ .irq_unmask = max77620_gpio_irq_unmask,
+ .irq_set_type = max77620_gpio_set_irq_type,
+ .irq_bus_lock = max77620_gpio_bus_lock,
+ .irq_bus_sync_unlock = max77620_gpio_bus_sync_unlock,
+ .flags = IRQCHIP_MASK_ON_SUSPEND,
};
static int max77620_gpio_dir_input(struct gpio_chip *gc, unsigned int offset)
@@ -254,14 +260,6 @@ static int max77620_gpio_set_config(struct gpio_chip *gc, unsigned int offset,
return -ENOTSUPP;
}
-static int max77620_gpio_to_irq(struct gpio_chip *gc, unsigned int offset)
-{
- struct max77620_gpio *mgpio = gpiochip_get_data(gc);
- struct max77620_chip *chip = dev_get_drvdata(mgpio->dev->parent);
-
- return regmap_irq_get_virq(chip->gpio_irq_data, offset);
-}
-
static int max77620_gpio_probe(struct platform_device *pdev)
{
struct max77620_chip *chip = dev_get_drvdata(pdev->dev.parent);
@@ -287,7 +285,6 @@ static int max77620_gpio_probe(struct platform_device *pdev)
mgpio->gpio_chip.direction_output = max77620_gpio_dir_output;
mgpio->gpio_chip.set = max77620_gpio_set;
mgpio->gpio_chip.set_config = max77620_gpio_set_config;
- mgpio->gpio_chip.to_irq = max77620_gpio_to_irq;
mgpio->gpio_chip.ngpio = MAX77620_GPIO_NR;
mgpio->gpio_chip.can_sleep = 1;
mgpio->gpio_chip.base = -1;
@@ -303,15 +300,21 @@ static int max77620_gpio_probe(struct platform_device *pdev)
return ret;
}
- ret = devm_regmap_add_irq_chip(&pdev->dev, chip->rmap, gpio_irq,
- IRQF_ONESHOT, -1,
- &max77620_gpio_irq_chip,
- &chip->gpio_irq_data);
+ mutex_init(&mgpio->buslock);
+
+ gpiochip_irqchip_add_nested(&mgpio->gpio_chip, &max77620_gpio_irqchip,
+ 0, handle_edge_irq, IRQ_TYPE_NONE);
+
+ ret = request_threaded_irq(gpio_irq, NULL, max77620_gpio_irqhandler,
+ IRQF_ONESHOT, "max77620-gpio", mgpio);
if (ret < 0) {
- dev_err(&pdev->dev, "Failed to add gpio irq_chip %d\n", ret);
+ dev_err(&pdev->dev, "failed to request IRQ: %d\n", ret);
return ret;
}
+ gpiochip_set_nested_irqchip(&mgpio->gpio_chip, &max77620_gpio_irqchip,
+ gpio_irq);
+
return 0;
}
diff --git a/drivers/gpio/gpio-menz127.c b/drivers/gpio/gpio-menz127.c
index 70fdb42a8e88..1e21c661d79d 100644
--- a/drivers/gpio/gpio-menz127.c
+++ b/drivers/gpio/gpio-menz127.c
@@ -211,3 +211,4 @@ MODULE_AUTHOR("Andreas Werner <andreas.werner@men.de>");
MODULE_DESCRIPTION("MEN 16z127 GPIO Controller");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("mcb:16z127");
+MODULE_IMPORT_NS(MCB);
diff --git a/drivers/gpio/gpio-merrifield.c b/drivers/gpio/gpio-merrifield.c
index 3302125e5265..48918a016cd8 100644
--- a/drivers/gpio/gpio-merrifield.c
+++ b/drivers/gpio/gpio-merrifield.c
@@ -162,7 +162,10 @@ static int mrfld_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
{
void __iomem *gpdr = gpio_reg(chip, offset, GPDR);
- return !(readl(gpdr) & BIT(offset % 32));
+ if (readl(gpdr) & BIT(offset % 32))
+ return GPIO_LINE_DIRECTION_OUT;
+
+ return GPIO_LINE_DIRECTION_IN;
}
static int mrfld_gpio_set_debounce(struct gpio_chip *chip, unsigned int offset,
@@ -362,8 +365,9 @@ static void mrfld_irq_handler(struct irq_desc *desc)
chained_irq_exit(irqchip, desc);
}
-static void mrfld_irq_init_hw(struct mrfld_gpio *priv)
+static int mrfld_irq_init_hw(struct gpio_chip *chip)
{
+ struct mrfld_gpio *priv = gpiochip_get_data(chip);
void __iomem *reg;
unsigned int base;
@@ -375,6 +379,8 @@ static void mrfld_irq_init_hw(struct mrfld_gpio *priv)
reg = gpio_reg(&priv->chip, base, GFER);
writel(0, reg);
}
+
+ return 0;
}
static const char *mrfld_gpio_get_pinctrl_dev_name(struct mrfld_gpio *priv)
@@ -393,14 +399,36 @@ static const char *mrfld_gpio_get_pinctrl_dev_name(struct mrfld_gpio *priv)
return name;
}
-static int mrfld_gpio_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+static int mrfld_gpio_add_pin_ranges(struct gpio_chip *chip)
{
+ struct mrfld_gpio *priv = gpiochip_get_data(chip);
const struct mrfld_gpio_pinrange *range;
const char *pinctrl_dev_name;
+ unsigned int i;
+ int retval;
+
+ pinctrl_dev_name = mrfld_gpio_get_pinctrl_dev_name(priv);
+ for (i = 0; i < ARRAY_SIZE(mrfld_gpio_ranges); i++) {
+ range = &mrfld_gpio_ranges[i];
+ retval = gpiochip_add_pin_range(&priv->chip, pinctrl_dev_name,
+ range->gpio_base,
+ range->pin_base,
+ range->npins);
+ if (retval) {
+ dev_err(priv->dev, "failed to add GPIO pin range\n");
+ return retval;
+ }
+ }
+
+ return 0;
+}
+
+static int mrfld_gpio_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct gpio_irq_chip *girq;
struct mrfld_gpio *priv;
u32 gpio_base, irq_base;
void __iomem *base;
- unsigned int i;
int retval;
retval = pcim_enable_device(pdev);
@@ -441,42 +469,31 @@ static int mrfld_gpio_probe(struct pci_dev *pdev, const struct pci_device_id *id
priv->chip.base = gpio_base;
priv->chip.ngpio = MRFLD_NGPIO;
priv->chip.can_sleep = false;
+ priv->chip.add_pin_ranges = mrfld_gpio_add_pin_ranges;
raw_spin_lock_init(&priv->lock);
- pci_set_drvdata(pdev, priv);
+ girq = &priv->chip.irq;
+ girq->chip = &mrfld_irqchip;
+ girq->init_hw = mrfld_irq_init_hw;
+ girq->parent_handler = mrfld_irq_handler;
+ girq->num_parents = 1;
+ girq->parents = devm_kcalloc(&pdev->dev, girq->num_parents,
+ sizeof(*girq->parents), GFP_KERNEL);
+ if (!girq->parents)
+ return -ENOMEM;
+ girq->parents[0] = pdev->irq;
+ girq->first = irq_base;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_bad_irq;
+
retval = devm_gpiochip_add_data(&pdev->dev, &priv->chip, priv);
if (retval) {
dev_err(&pdev->dev, "gpiochip_add error %d\n", retval);
return retval;
}
- pinctrl_dev_name = mrfld_gpio_get_pinctrl_dev_name(priv);
- for (i = 0; i < ARRAY_SIZE(mrfld_gpio_ranges); i++) {
- range = &mrfld_gpio_ranges[i];
- retval = gpiochip_add_pin_range(&priv->chip,
- pinctrl_dev_name,
- range->gpio_base,
- range->pin_base,
- range->npins);
- if (retval) {
- dev_err(&pdev->dev, "failed to add GPIO pin range\n");
- return retval;
- }
- }
-
- retval = gpiochip_irqchip_add(&priv->chip, &mrfld_irqchip, irq_base,
- handle_bad_irq, IRQ_TYPE_NONE);
- if (retval) {
- dev_err(&pdev->dev, "could not connect irqchip to gpiochip\n");
- return retval;
- }
-
- mrfld_irq_init_hw(priv);
-
- gpiochip_set_chained_irqchip(&priv->chip, &mrfld_irqchip, pdev->irq,
- mrfld_irq_handler);
-
+ pci_set_drvdata(pdev, priv);
return 0;
}
diff --git a/drivers/gpio/gpio-mmio.c b/drivers/gpio/gpio-mmio.c
index 6f904c874678..f729e3e9e983 100644
--- a/drivers/gpio/gpio-mmio.c
+++ b/drivers/gpio/gpio-mmio.c
@@ -370,15 +370,23 @@ static int bgpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
static int bgpio_get_dir(struct gpio_chip *gc, unsigned int gpio)
{
/* Return 0 if output, 1 if input */
- if (gc->bgpio_dir_unreadable)
- return !(gc->bgpio_dir & bgpio_line2mask(gc, gpio));
- if (gc->reg_dir_out)
- return !(gc->read_reg(gc->reg_dir_out) & bgpio_line2mask(gc, gpio));
+ if (gc->bgpio_dir_unreadable) {
+ if (gc->bgpio_dir & bgpio_line2mask(gc, gpio))
+ return GPIO_LINE_DIRECTION_OUT;
+ return GPIO_LINE_DIRECTION_IN;
+ }
+
+ if (gc->reg_dir_out) {
+ if (gc->read_reg(gc->reg_dir_out) & bgpio_line2mask(gc, gpio))
+ return GPIO_LINE_DIRECTION_OUT;
+ return GPIO_LINE_DIRECTION_IN;
+ }
+
if (gc->reg_dir_in)
- return !!(gc->read_reg(gc->reg_dir_in) & bgpio_line2mask(gc, gpio));
+ if (!(gc->read_reg(gc->reg_dir_in) & bgpio_line2mask(gc, gpio)))
+ return GPIO_LINE_DIRECTION_OUT;
- /* This should not happen */
- return 1;
+ return GPIO_LINE_DIRECTION_IN;
}
static int bgpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c
index 213aedc97dc2..56d647a30e3e 100644
--- a/drivers/gpio/gpio-mockup.c
+++ b/drivers/gpio/gpio-mockup.c
@@ -34,14 +34,9 @@
#define gpio_mockup_err(...) pr_err(GPIO_MOCKUP_NAME ": " __VA_ARGS__)
-enum {
- GPIO_MOCKUP_DIR_IN = 0,
- GPIO_MOCKUP_DIR_OUT = 1,
-};
-
/*
* struct gpio_pin_status - structure describing a GPIO status
- * @dir: Configures direction of gpio as "in" or "out", 0=in, 1=out
+ * @dir: Configures direction of gpio as "in" or "out"
* @value: Configures status of the gpio as 0(low) or 1(high)
*/
struct gpio_mockup_line_status {
@@ -146,13 +141,68 @@ static void gpio_mockup_set_multiple(struct gpio_chip *gc,
mutex_unlock(&chip->lock);
}
+static int gpio_mockup_apply_pull(struct gpio_mockup_chip *chip,
+ unsigned int offset, int value)
+{
+ struct gpio_desc *desc;
+ struct gpio_chip *gc;
+ struct irq_sim *sim;
+ int curr, irq, irq_type;
+
+ gc = &chip->gc;
+ desc = &gc->gpiodev->descs[offset];
+ sim = &chip->irqsim;
+
+ mutex_lock(&chip->lock);
+
+ if (test_bit(FLAG_REQUESTED, &desc->flags) &&
+ !test_bit(FLAG_IS_OUT, &desc->flags)) {
+ curr = __gpio_mockup_get(chip, offset);
+ if (curr == value)
+ goto out;
+
+ irq = irq_sim_irqnum(sim, offset);
+ irq_type = irq_get_trigger_type(irq);
+
+ if ((value == 1 && (irq_type & IRQ_TYPE_EDGE_RISING)) ||
+ (value == 0 && (irq_type & IRQ_TYPE_EDGE_FALLING)))
+ irq_sim_fire(sim, offset);
+ }
+
+ /* Change the value unless we're actively driving the line. */
+ if (!test_bit(FLAG_REQUESTED, &desc->flags) ||
+ !test_bit(FLAG_IS_OUT, &desc->flags))
+ __gpio_mockup_set(chip, offset, value);
+
+out:
+ chip->lines[offset].pull = value;
+ mutex_unlock(&chip->lock);
+ return 0;
+}
+
+static int gpio_mockup_set_config(struct gpio_chip *gc,
+ unsigned int offset, unsigned long config)
+{
+ struct gpio_mockup_chip *chip = gpiochip_get_data(gc);
+
+ switch (pinconf_to_config_param(config)) {
+ case PIN_CONFIG_BIAS_PULL_UP:
+ return gpio_mockup_apply_pull(chip, offset, 1);
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ return gpio_mockup_apply_pull(chip, offset, 0);
+ default:
+ break;
+ }
+ return -ENOTSUPP;
+}
+
static int gpio_mockup_dirout(struct gpio_chip *gc,
unsigned int offset, int value)
{
struct gpio_mockup_chip *chip = gpiochip_get_data(gc);
mutex_lock(&chip->lock);
- chip->lines[offset].dir = GPIO_MOCKUP_DIR_OUT;
+ chip->lines[offset].dir = GPIO_LINE_DIRECTION_OUT;
__gpio_mockup_set(chip, offset, value);
mutex_unlock(&chip->lock);
@@ -164,7 +214,7 @@ static int gpio_mockup_dirin(struct gpio_chip *gc, unsigned int offset)
struct gpio_mockup_chip *chip = gpiochip_get_data(gc);
mutex_lock(&chip->lock);
- chip->lines[offset].dir = GPIO_MOCKUP_DIR_IN;
+ chip->lines[offset].dir = GPIO_LINE_DIRECTION_IN;
mutex_unlock(&chip->lock);
return 0;
@@ -226,12 +276,8 @@ static ssize_t gpio_mockup_debugfs_write(struct file *file,
size_t size, loff_t *ppos)
{
struct gpio_mockup_dbgfs_private *priv;
- int rv, val, curr, irq, irq_type;
- struct gpio_mockup_chip *chip;
+ int rv, val;
struct seq_file *sfile;
- struct gpio_desc *desc;
- struct gpio_chip *gc;
- struct irq_sim *sim;
if (*ppos != 0)
return -EINVAL;
@@ -244,35 +290,9 @@ static ssize_t gpio_mockup_debugfs_write(struct file *file,
sfile = file->private_data;
priv = sfile->private;
- chip = priv->chip;
- gc = &chip->gc;
- desc = &gc->gpiodev->descs[priv->offset];
- sim = &chip->irqsim;
-
- mutex_lock(&chip->lock);
-
- if (test_bit(FLAG_REQUESTED, &desc->flags) &&
- !test_bit(FLAG_IS_OUT, &desc->flags)) {
- curr = __gpio_mockup_get(chip, priv->offset);
- if (curr == val)
- goto out;
-
- irq = irq_sim_irqnum(sim, priv->offset);
- irq_type = irq_get_trigger_type(irq);
-
- if ((val == 1 && (irq_type & IRQ_TYPE_EDGE_RISING)) ||
- (val == 0 && (irq_type & IRQ_TYPE_EDGE_FALLING)))
- irq_sim_fire(sim, priv->offset);
- }
-
- /* Change the value unless we're actively driving the line. */
- if (!test_bit(FLAG_REQUESTED, &desc->flags) ||
- !test_bit(FLAG_IS_OUT, &desc->flags))
- __gpio_mockup_set(chip, priv->offset, val);
-
-out:
- chip->lines[priv->offset].pull = val;
- mutex_unlock(&chip->lock);
+ rv = gpio_mockup_apply_pull(priv->chip, priv->offset, val);
+ if (rv)
+ return rv;
return size;
}
@@ -418,6 +438,7 @@ static int gpio_mockup_probe(struct platform_device *pdev)
gc->direction_output = gpio_mockup_dirout;
gc->direction_input = gpio_mockup_dirin;
gc->get_direction = gpio_mockup_get_direction;
+ gc->set_config = gpio_mockup_set_config;
gc->to_irq = gpio_mockup_to_irq;
gc->free = gpio_mockup_free;
diff --git a/drivers/gpio/gpio-moxtet.c b/drivers/gpio/gpio-moxtet.c
index 3fd729994a38..8299909318f4 100644
--- a/drivers/gpio/gpio-moxtet.c
+++ b/drivers/gpio/gpio-moxtet.c
@@ -78,9 +78,9 @@ static int moxtet_gpio_get_direction(struct gpio_chip *gc, unsigned int offset)
/* All lines are hard wired to be either input or output, not both. */
if (chip->desc->in_mask & BIT(offset))
- return 1;
+ return GPIO_LINE_DIRECTION_IN;
else if (chip->desc->out_mask & BIT(offset))
- return 0;
+ return GPIO_LINE_DIRECTION_OUT;
else
return -EINVAL;
}
diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c
index 16a47de29c94..f1e164cecff8 100644
--- a/drivers/gpio/gpio-mpc8xxx.c
+++ b/drivers/gpio/gpio-mpc8xxx.c
@@ -22,6 +22,7 @@
#include <linux/irq.h>
#include <linux/gpio/driver.h>
#include <linux/bitops.h>
+#include <linux/interrupt.h>
#define MPC8XXX_GPIO_PINS 32
@@ -127,20 +128,19 @@ static int mpc8xxx_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
return -ENXIO;
}
-static void mpc8xxx_gpio_irq_cascade(struct irq_desc *desc)
+static irqreturn_t mpc8xxx_gpio_irq_cascade(int irq, void *data)
{
- struct mpc8xxx_gpio_chip *mpc8xxx_gc = irq_desc_get_handler_data(desc);
- struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct mpc8xxx_gpio_chip *mpc8xxx_gc = data;
struct gpio_chip *gc = &mpc8xxx_gc->gc;
- unsigned int mask;
+ unsigned long mask;
+ int i;
mask = gc->read_reg(mpc8xxx_gc->regs + GPIO_IER)
& gc->read_reg(mpc8xxx_gc->regs + GPIO_IMR);
- if (mask)
- generic_handle_irq(irq_linear_revmap(mpc8xxx_gc->irq,
- 32 - ffs(mask)));
- if (chip->irq_eoi)
- chip->irq_eoi(&desc->irq_data);
+ for_each_set_bit(i, &mask, 32)
+ generic_handle_irq(irq_linear_revmap(mpc8xxx_gc->irq, 31 - i));
+
+ return IRQ_HANDLED;
}
static void mpc8xxx_irq_unmask(struct irq_data *d)
@@ -377,7 +377,8 @@ static int mpc8xxx_probe(struct platform_device *pdev)
* It's assumed that only a single type of gpio controller is available
* on the current machine, so overwriting global data is fine.
*/
- mpc8xxx_irq_chip.irq_set_type = devtype->irq_set_type;
+ if (devtype->irq_set_type)
+ mpc8xxx_irq_chip.irq_set_type = devtype->irq_set_type;
if (devtype->gpio_dir_out)
gc->direction_output = devtype->gpio_dir_out;
@@ -386,6 +387,9 @@ static int mpc8xxx_probe(struct platform_device *pdev)
gc->to_irq = mpc8xxx_gpio_to_irq;
+ if (of_device_is_compatible(np, "fsl,qoriq-gpio"))
+ gc->write_reg(mpc8xxx_gc->regs + GPIO_IBE, 0xffffffff);
+
ret = gpiochip_add_data(gc, mpc8xxx_gc);
if (ret) {
pr_err("%pOF: GPIO chip registration failed with status %d\n",
@@ -409,8 +413,16 @@ static int mpc8xxx_probe(struct platform_device *pdev)
if (devtype->gpio_dir_in_init)
devtype->gpio_dir_in_init(gc);
- irq_set_chained_handler_and_data(mpc8xxx_gc->irqn,
- mpc8xxx_gpio_irq_cascade, mpc8xxx_gc);
+ ret = devm_request_irq(&pdev->dev, mpc8xxx_gc->irqn,
+ mpc8xxx_gpio_irq_cascade,
+ IRQF_NO_THREAD | IRQF_SHARED, "gpio-cascade",
+ mpc8xxx_gc);
+ if (ret) {
+ dev_err(&pdev->dev, "%s: failed to devm_request_irq(%d), ret = %d\n",
+ np->full_name, mpc8xxx_gc->irqn, ret);
+ goto err;
+ }
+
return 0;
err:
iounmap(mpc8xxx_gc->regs);
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
index 6c0687694341..993bbeb3c006 100644
--- a/drivers/gpio/gpio-mvebu.c
+++ b/drivers/gpio/gpio-mvebu.c
@@ -384,7 +384,10 @@ static int mvebu_gpio_get_direction(struct gpio_chip *chip, unsigned int pin)
regmap_read(mvchip->regs, GPIO_IO_CONF_OFF + mvchip->offset, &u);
- return !!(u & BIT(pin));
+ if (u & BIT(pin))
+ return GPIO_LINE_DIRECTION_IN;
+
+ return GPIO_LINE_DIRECTION_OUT;
}
static int mvebu_gpio_to_irq(struct gpio_chip *chip, unsigned int pin)
@@ -773,23 +776,12 @@ static int mvebu_pwm_probe(struct platform_device *pdev,
{
struct device *dev = &pdev->dev;
struct mvebu_pwm *mvpwm;
- struct resource *res;
u32 set;
if (!of_device_is_compatible(mvchip->chip.of_node,
"marvell,armada-370-gpio"))
return 0;
- /*
- * There are only two sets of PWM configuration registers for
- * all the GPIO lines on those SoCs which this driver reserves
- * for the first two GPIO chips. So if the resource is missing
- * we can't treat it as an error.
- */
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pwm");
- if (!res)
- return 0;
-
if (IS_ERR(mvchip->clk))
return PTR_ERR(mvchip->clk);
@@ -812,7 +804,13 @@ static int mvebu_pwm_probe(struct platform_device *pdev,
mvchip->mvpwm = mvpwm;
mvpwm->mvchip = mvchip;
- mvpwm->membase = devm_ioremap_resource(dev, res);
+ /*
+ * There are only two sets of PWM configuration registers for
+ * all the GPIO lines on those SoCs which this driver reserves
+ * for the first two GPIO chips. So if the resource is missing
+ * we can't treat it as an error.
+ */
+ mvpwm->membase = devm_platform_ioremap_resource_byname(pdev, "pwm");
if (IS_ERR(mvpwm->membase))
return PTR_ERR(mvpwm->membase);
diff --git a/drivers/gpio/gpio-mxc.c b/drivers/gpio/gpio-mxc.c
index 7907a8755866..c77d474185f3 100644
--- a/drivers/gpio/gpio-mxc.c
+++ b/drivers/gpio/gpio-mxc.c
@@ -411,6 +411,7 @@ static int mxc_gpio_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct mxc_gpio_port *port;
+ int irq_count;
int irq_base;
int err;
@@ -426,9 +427,15 @@ static int mxc_gpio_probe(struct platform_device *pdev)
if (IS_ERR(port->base))
return PTR_ERR(port->base);
- port->irq_high = platform_get_irq(pdev, 1);
- if (port->irq_high < 0)
- port->irq_high = 0;
+ irq_count = platform_irq_count(pdev);
+ if (irq_count < 0)
+ return irq_count;
+
+ if (irq_count > 1) {
+ port->irq_high = platform_get_irq(pdev, 1);
+ if (port->irq_high < 0)
+ port->irq_high = 0;
+ }
port->irq = platform_get_irq(pdev, 0);
if (port->irq < 0)
diff --git a/drivers/gpio/gpio-mxs.c b/drivers/gpio/gpio-mxs.c
index 5e5437a2c607..c4a314c68555 100644
--- a/drivers/gpio/gpio-mxs.c
+++ b/drivers/gpio/gpio-mxs.c
@@ -248,7 +248,10 @@ static int mxs_gpio_get_direction(struct gpio_chip *gc, unsigned offset)
u32 dir;
dir = readl(port->base + PINCTRL_DOE(port));
- return !(dir & mask);
+ if (dir & mask)
+ return GPIO_LINE_DIRECTION_OUT;
+
+ return GPIO_LINE_DIRECTION_IN;
}
static const struct platform_device_id mxs_gpio_ids[] = {
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index d0f27084a942..3bd8adaeed9e 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -805,8 +805,10 @@ static int omap_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
{
struct gpio_bank *bank = gpiochip_get_data(chip);
- return !!(readl_relaxed(bank->base + bank->regs->direction) &
- BIT(offset));
+ if (readl_relaxed(bank->base + bank->regs->direction) & BIT(offset))
+ return GPIO_LINE_DIRECTION_IN;
+
+ return GPIO_LINE_DIRECTION_OUT;
}
static int omap_gpio_input(struct gpio_chip *chip, unsigned offset)
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index de5d1383f28d..82122c3c688a 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -449,7 +449,10 @@ static int pca953x_gpio_get_direction(struct gpio_chip *gc, unsigned off)
if (ret < 0)
return ret;
- return !!(reg_val & bit);
+ if (reg_val & bit)
+ return GPIO_LINE_DIRECTION_IN;
+
+ return GPIO_LINE_DIRECTION_OUT;
}
static void pca953x_gpio_set_multiple(struct gpio_chip *gc,
diff --git a/drivers/gpio/gpio-pci-idio-16.c b/drivers/gpio/gpio-pci-idio-16.c
index 5aa136a6a3e0..df51dd08bdfe 100644
--- a/drivers/gpio/gpio-pci-idio-16.c
+++ b/drivers/gpio/gpio-pci-idio-16.c
@@ -61,9 +61,9 @@ static int idio_16_gpio_get_direction(struct gpio_chip *chip,
unsigned int offset)
{
if (offset > 15)
- return 1;
+ return GPIO_LINE_DIRECTION_IN;
- return 0;
+ return GPIO_LINE_DIRECTION_OUT;
}
static int idio_16_gpio_direction_input(struct gpio_chip *chip,
diff --git a/drivers/gpio/gpio-pcie-idio-24.c b/drivers/gpio/gpio-pcie-idio-24.c
index 52f1647a46fd..44c1e4fc489f 100644
--- a/drivers/gpio/gpio-pcie-idio-24.c
+++ b/drivers/gpio/gpio-pcie-idio-24.c
@@ -104,15 +104,18 @@ static int idio_24_gpio_get_direction(struct gpio_chip *chip,
/* FET Outputs */
if (offset < 24)
- return 0;
+ return GPIO_LINE_DIRECTION_OUT;
/* Isolated Inputs */
if (offset < 48)
- return 1;
+ return GPIO_LINE_DIRECTION_IN;
/* TTL/CMOS I/O */
/* OUT MODE = 1 when TTL/CMOS Output Mode is set */
- return !(ioread8(&idio24gpio->reg->ctl) & out_mode_mask);
+ if (ioread8(&idio24gpio->reg->ctl) & out_mode_mask)
+ return GPIO_LINE_DIRECTION_OUT;
+
+ return GPIO_LINE_DIRECTION_IN;
}
static int idio_24_gpio_direction_input(struct gpio_chip *chip,
diff --git a/drivers/gpio/gpio-pisosr.c b/drivers/gpio/gpio-pisosr.c
index f809a5a8e9eb..1331b2a94679 100644
--- a/drivers/gpio/gpio-pisosr.c
+++ b/drivers/gpio/gpio-pisosr.c
@@ -65,7 +65,7 @@ static int pisosr_gpio_get_direction(struct gpio_chip *chip,
unsigned offset)
{
/* This device always input */
- return 1;
+ return GPIO_LINE_DIRECTION_IN;
}
static int pisosr_gpio_direction_input(struct gpio_chip *chip,
diff --git a/drivers/gpio/gpio-pl061.c b/drivers/gpio/gpio-pl061.c
index 722ce5cf861e..5df7782e348f 100644
--- a/drivers/gpio/gpio-pl061.c
+++ b/drivers/gpio/gpio-pl061.c
@@ -63,7 +63,10 @@ static int pl061_get_direction(struct gpio_chip *gc, unsigned offset)
{
struct pl061 *pl061 = gpiochip_get_data(gc);
- return !(readb(pl061->base + GPIODIR) & BIT(offset));
+ if (readb(pl061->base + GPIODIR) & BIT(offset))
+ return GPIO_LINE_DIRECTION_OUT;
+
+ return GPIO_LINE_DIRECTION_IN;
}
static int pl061_direction_input(struct gpio_chip *gc, unsigned offset)
diff --git a/drivers/gpio/gpio-raspberrypi-exp.c b/drivers/gpio/gpio-raspberrypi-exp.c
index b77ea16ffa03..bb100e0124e6 100644
--- a/drivers/gpio/gpio-raspberrypi-exp.c
+++ b/drivers/gpio/gpio-raspberrypi-exp.c
@@ -147,7 +147,10 @@ static int rpi_exp_gpio_get_direction(struct gpio_chip *gc, unsigned int off)
get.gpio);
return ret ? ret : -EIO;
}
- return !get.direction;
+ if (get.direction)
+ return GPIO_LINE_DIRECTION_OUT;
+
+ return GPIO_LINE_DIRECTION_IN;
}
static int rpi_exp_gpio_get(struct gpio_chip *gc, unsigned int off)
diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
index 187984d26f47..f800b250971c 100644
--- a/drivers/gpio/gpio-rcar.c
+++ b/drivers/gpio/gpio-rcar.c
@@ -279,7 +279,10 @@ static int gpio_rcar_get_direction(struct gpio_chip *chip, unsigned int offset)
{
struct gpio_rcar_priv *p = gpiochip_get_data(chip);
- return !(gpio_rcar_read(p, INOUTSEL) & BIT(offset));
+ if (gpio_rcar_read(p, INOUTSEL) & BIT(offset))
+ return GPIO_LINE_DIRECTION_OUT;
+
+ return GPIO_LINE_DIRECTION_IN;
}
static int gpio_rcar_direction_input(struct gpio_chip *chip, unsigned offset)
@@ -483,7 +486,7 @@ static int gpio_rcar_probe(struct platform_device *pdev)
gpio_chip->ngpio = npins;
irq_chip = &p->irq_chip;
- irq_chip->name = name;
+ irq_chip->name = "gpio-rcar";
irq_chip->parent_device = dev;
irq_chip->irq_mask = gpio_rcar_irq_disable;
irq_chip->irq_unmask = gpio_rcar_irq_enable;
diff --git a/drivers/gpio/gpio-rda.c b/drivers/gpio/gpio-rda.c
new file mode 100644
index 000000000000..28dcbb58b76b
--- /dev/null
+++ b/drivers/gpio/gpio-rda.c
@@ -0,0 +1,294 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * RDA Micro GPIO driver
+ *
+ * Copyright (C) 2012 RDA Micro Inc.
+ * Copyright (C) 2019 Manivannan Sadhasivam
+ */
+
+#include <linux/bitops.h>
+#include <linux/gpio/driver.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+
+#define RDA_GPIO_OEN_VAL 0x00
+#define RDA_GPIO_OEN_SET_OUT 0x04
+#define RDA_GPIO_OEN_SET_IN 0x08
+#define RDA_GPIO_VAL 0x0c
+#define RDA_GPIO_SET 0x10
+#define RDA_GPIO_CLR 0x14
+#define RDA_GPIO_INT_CTRL_SET 0x18
+#define RDA_GPIO_INT_CTRL_CLR 0x1c
+#define RDA_GPIO_INT_CLR 0x20
+#define RDA_GPIO_INT_STATUS 0x24
+
+#define RDA_GPIO_IRQ_RISE_SHIFT 0
+#define RDA_GPIO_IRQ_FALL_SHIFT 8
+#define RDA_GPIO_DEBOUCE_SHIFT 16
+#define RDA_GPIO_LEVEL_SHIFT 24
+
+#define RDA_GPIO_IRQ_MASK 0xff
+
+/* Each bank consists of 32 GPIOs */
+#define RDA_GPIO_BANK_NR 32
+
+struct rda_gpio {
+ struct gpio_chip chip;
+ void __iomem *base;
+ spinlock_t lock;
+ struct irq_chip irq_chip;
+ int irq;
+};
+
+static inline void rda_gpio_update(struct gpio_chip *chip, unsigned int offset,
+ u16 reg, int val)
+{
+ struct rda_gpio *rda_gpio = gpiochip_get_data(chip);
+ void __iomem *base = rda_gpio->base;
+ unsigned long flags;
+ u32 tmp;
+
+ spin_lock_irqsave(&rda_gpio->lock, flags);
+ tmp = readl_relaxed(base + reg);
+
+ if (val)
+ tmp |= BIT(offset);
+ else
+ tmp &= ~BIT(offset);
+
+ writel_relaxed(tmp, base + reg);
+ spin_unlock_irqrestore(&rda_gpio->lock, flags);
+}
+
+static void rda_gpio_irq_mask(struct irq_data *data)
+{
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
+ struct rda_gpio *rda_gpio = gpiochip_get_data(chip);
+ void __iomem *base = rda_gpio->base;
+ u32 offset = irqd_to_hwirq(data);
+ u32 value;
+
+ value = BIT(offset) << RDA_GPIO_IRQ_RISE_SHIFT;
+ value |= BIT(offset) << RDA_GPIO_IRQ_FALL_SHIFT;
+
+ writel_relaxed(value, base + RDA_GPIO_INT_CTRL_CLR);
+}
+
+static void rda_gpio_irq_ack(struct irq_data *data)
+{
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
+ u32 offset = irqd_to_hwirq(data);
+
+ rda_gpio_update(chip, offset, RDA_GPIO_INT_CLR, 1);
+}
+
+static int rda_gpio_set_irq(struct gpio_chip *chip, u32 offset,
+ unsigned int flow_type)
+{
+ struct rda_gpio *rda_gpio = gpiochip_get_data(chip);
+ void __iomem *base = rda_gpio->base;
+ u32 value;
+
+ switch (flow_type) {
+ case IRQ_TYPE_EDGE_RISING:
+ /* Set rising edge trigger */
+ value = BIT(offset) << RDA_GPIO_IRQ_RISE_SHIFT;
+ writel_relaxed(value, base + RDA_GPIO_INT_CTRL_SET);
+
+ /* Switch to edge trigger interrupt */
+ value = BIT(offset) << RDA_GPIO_LEVEL_SHIFT;
+ writel_relaxed(value, base + RDA_GPIO_INT_CTRL_CLR);
+ break;
+
+ case IRQ_TYPE_EDGE_FALLING:
+ /* Set falling edge trigger */
+ value = BIT(offset) << RDA_GPIO_IRQ_FALL_SHIFT;
+ writel_relaxed(value, base + RDA_GPIO_INT_CTRL_SET);
+
+ /* Switch to edge trigger interrupt */
+ value = BIT(offset) << RDA_GPIO_LEVEL_SHIFT;
+ writel_relaxed(value, base + RDA_GPIO_INT_CTRL_CLR);
+ break;
+
+ case IRQ_TYPE_EDGE_BOTH:
+ /* Set both edge trigger */
+ value = BIT(offset) << RDA_GPIO_IRQ_RISE_SHIFT;
+ value |= BIT(offset) << RDA_GPIO_IRQ_FALL_SHIFT;
+ writel_relaxed(value, base + RDA_GPIO_INT_CTRL_SET);
+
+ /* Switch to edge trigger interrupt */
+ value = BIT(offset) << RDA_GPIO_LEVEL_SHIFT;
+ writel_relaxed(value, base + RDA_GPIO_INT_CTRL_CLR);
+ break;
+
+ case IRQ_TYPE_LEVEL_HIGH:
+ /* Set high level trigger */
+ value = BIT(offset) << RDA_GPIO_IRQ_RISE_SHIFT;
+
+ /* Switch to level trigger interrupt */
+ value |= BIT(offset) << RDA_GPIO_LEVEL_SHIFT;
+ writel_relaxed(value, base + RDA_GPIO_INT_CTRL_SET);
+ break;
+
+ case IRQ_TYPE_LEVEL_LOW:
+ /* Set low level trigger */
+ value = BIT(offset) << RDA_GPIO_IRQ_FALL_SHIFT;
+
+ /* Switch to level trigger interrupt */
+ value |= BIT(offset) << RDA_GPIO_LEVEL_SHIFT;
+ writel_relaxed(value, base + RDA_GPIO_INT_CTRL_SET);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void rda_gpio_irq_unmask(struct irq_data *data)
+{
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
+ u32 offset = irqd_to_hwirq(data);
+ u32 trigger = irqd_get_trigger_type(data);
+
+ rda_gpio_set_irq(chip, offset, trigger);
+}
+
+static int rda_gpio_irq_set_type(struct irq_data *data, unsigned int flow_type)
+{
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
+ u32 offset = irqd_to_hwirq(data);
+ int ret;
+
+ ret = rda_gpio_set_irq(chip, offset, flow_type);
+ if (ret)
+ return ret;
+
+ if (flow_type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
+ irq_set_handler_locked(data, handle_level_irq);
+ else if (flow_type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
+ irq_set_handler_locked(data, handle_edge_irq);
+
+ return 0;
+}
+
+static void rda_gpio_irq_handler(struct irq_desc *desc)
+{
+ struct gpio_chip *chip = irq_desc_get_handler_data(desc);
+ struct irq_chip *ic = irq_desc_get_chip(desc);
+ struct rda_gpio *rda_gpio = gpiochip_get_data(chip);
+ unsigned long status;
+ u32 n, girq;
+
+ chained_irq_enter(ic, desc);
+
+ status = readl_relaxed(rda_gpio->base + RDA_GPIO_INT_STATUS);
+ /* Only lower 8 bits are capable of generating interrupts */
+ status &= RDA_GPIO_IRQ_MASK;
+
+ for_each_set_bit(n, &status, RDA_GPIO_BANK_NR) {
+ girq = irq_find_mapping(chip->irq.domain, n);
+ generic_handle_irq(girq);
+ }
+
+ chained_irq_exit(ic, desc);
+}
+
+static int rda_gpio_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ struct gpio_irq_chip *girq;
+ struct rda_gpio *rda_gpio;
+ u32 ngpios;
+ int ret;
+
+ rda_gpio = devm_kzalloc(dev, sizeof(*rda_gpio), GFP_KERNEL);
+ if (!rda_gpio)
+ return -ENOMEM;
+
+ ret = device_property_read_u32(dev, "ngpios", &ngpios);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Not all ports have interrupt capability. For instance, on
+ * RDA8810PL, GPIOC doesn't support interrupt. So we must handle
+ * those also.
+ */
+ rda_gpio->irq = platform_get_irq(pdev, 0);
+
+ rda_gpio->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(rda_gpio->base))
+ return PTR_ERR(rda_gpio->base);
+
+ spin_lock_init(&rda_gpio->lock);
+
+ ret = bgpio_init(&rda_gpio->chip, dev, 4,
+ rda_gpio->base + RDA_GPIO_VAL,
+ rda_gpio->base + RDA_GPIO_SET,
+ rda_gpio->base + RDA_GPIO_CLR,
+ rda_gpio->base + RDA_GPIO_OEN_SET_OUT,
+ rda_gpio->base + RDA_GPIO_OEN_SET_IN,
+ BGPIOF_READ_OUTPUT_REG_SET);
+ if (ret) {
+ dev_err(dev, "bgpio_init failed\n");
+ return ret;
+ }
+
+ rda_gpio->chip.label = dev_name(dev);
+ rda_gpio->chip.ngpio = ngpios;
+ rda_gpio->chip.base = -1;
+ rda_gpio->chip.parent = dev;
+ rda_gpio->chip.of_node = np;
+
+ if (rda_gpio->irq >= 0) {
+ rda_gpio->irq_chip.name = "rda-gpio",
+ rda_gpio->irq_chip.irq_ack = rda_gpio_irq_ack,
+ rda_gpio->irq_chip.irq_mask = rda_gpio_irq_mask,
+ rda_gpio->irq_chip.irq_unmask = rda_gpio_irq_unmask,
+ rda_gpio->irq_chip.irq_set_type = rda_gpio_irq_set_type,
+ rda_gpio->irq_chip.flags = IRQCHIP_SKIP_SET_WAKE,
+
+ girq = &rda_gpio->chip.irq;
+ girq->chip = &rda_gpio->irq_chip;
+ girq->handler = handle_bad_irq;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->parent_handler = rda_gpio_irq_handler;
+ girq->parent_handler_data = rda_gpio;
+ girq->num_parents = 1;
+ girq->parents = devm_kcalloc(dev, 1,
+ sizeof(*girq->parents),
+ GFP_KERNEL);
+ if (!girq->parents)
+ return -ENOMEM;
+ girq->parents[0] = rda_gpio->irq;
+ }
+
+ platform_set_drvdata(pdev, rda_gpio);
+
+ return devm_gpiochip_add_data(dev, &rda_gpio->chip, rda_gpio);
+}
+
+static const struct of_device_id rda_gpio_of_match[] = {
+ { .compatible = "rda,8810pl-gpio", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, rda_gpio_of_match);
+
+static struct platform_driver rda_gpio_driver = {
+ .probe = rda_gpio_probe,
+ .driver = {
+ .name = "rda-gpio",
+ .of_match_table = rda_gpio_of_match,
+ },
+};
+
+module_platform_driver_probe(rda_gpio_driver, rda_gpio_probe);
+
+MODULE_DESCRIPTION("RDA Micro GPIO driver");
+MODULE_AUTHOR("Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/gpio-reg.c b/drivers/gpio/gpio-reg.c
index fdc7a9d5b382..d35169bde25a 100644
--- a/drivers/gpio/gpio-reg.c
+++ b/drivers/gpio/gpio-reg.c
@@ -26,7 +26,8 @@ static int gpio_reg_get_direction(struct gpio_chip *gc, unsigned offset)
{
struct gpio_reg *r = to_gpio_reg(gc);
- return r->direction & BIT(offset) ? 1 : 0;
+ return r->direction & BIT(offset) ? GPIO_LINE_DIRECTION_IN :
+ GPIO_LINE_DIRECTION_OUT;
}
static int gpio_reg_direction_output(struct gpio_chip *gc, unsigned offset,
diff --git a/drivers/gpio/gpio-sa1100.c b/drivers/gpio/gpio-sa1100.c
index 46b7cf23fb0f..edff5e81489f 100644
--- a/drivers/gpio/gpio-sa1100.c
+++ b/drivers/gpio/gpio-sa1100.c
@@ -53,7 +53,10 @@ static int sa1100_get_direction(struct gpio_chip *chip, unsigned offset)
{
void __iomem *gpdr = sa1100_gpio_chip(chip)->membase + R_GPDR;
- return !(readl_relaxed(gpdr) & BIT(offset));
+ if (readl_relaxed(gpdr) & BIT(offset))
+ return GPIO_LINE_DIRECTION_OUT;
+
+ return GPIO_LINE_DIRECTION_IN;
}
static int sa1100_direction_input(struct gpio_chip *chip, unsigned offset)
diff --git a/drivers/gpio/gpio-sama5d2-piobu.c b/drivers/gpio/gpio-sama5d2-piobu.c
index 7d718557092e..b04c561f3280 100644
--- a/drivers/gpio/gpio-sama5d2-piobu.c
+++ b/drivers/gpio/gpio-sama5d2-piobu.c
@@ -119,7 +119,8 @@ static int sama5d2_piobu_get_direction(struct gpio_chip *chip,
if (ret < 0)
return ret;
- return (ret == PIOBU_IN) ? 1 : 0;
+ return (ret == PIOBU_IN) ? GPIO_LINE_DIRECTION_IN :
+ GPIO_LINE_DIRECTION_OUT;
}
/**
@@ -154,9 +155,9 @@ static int sama5d2_piobu_get(struct gpio_chip *chip, unsigned int pin)
/* if pin is input, read value from PDS else read from SOD */
int ret = sama5d2_piobu_get_direction(chip, pin);
- if (ret == 1)
+ if (ret == GPIO_LINE_DIRECTION_IN)
ret = sama5d2_piobu_read_value(chip, pin, PIOBU_PDS);
- else if (!ret)
+ else if (ret == GPIO_LINE_DIRECTION_OUT)
ret = sama5d2_piobu_read_value(chip, pin, PIOBU_SOD);
if (ret < 0)
diff --git a/drivers/gpio/gpio-sch.c b/drivers/gpio/gpio-sch.c
index fb143f28c386..c65f35b68202 100644
--- a/drivers/gpio/gpio-sch.c
+++ b/drivers/gpio/gpio-sch.c
@@ -127,7 +127,10 @@ static int sch_gpio_get_direction(struct gpio_chip *gc, unsigned gpio_num)
{
struct sch_gpio *sch = gpiochip_get_data(gc);
- return sch_gpio_reg_get(sch, gpio_num, GIO);
+ if (sch_gpio_reg_get(sch, gpio_num, GIO))
+ return GPIO_LINE_DIRECTION_IN;
+
+ return GPIO_LINE_DIRECTION_OUT;
}
static const struct gpio_chip sch_gpio_chip = {
diff --git a/drivers/gpio/gpio-sch311x.c b/drivers/gpio/gpio-sch311x.c
index 8ecf336c9af4..da01e1cad7cb 100644
--- a/drivers/gpio/gpio-sch311x.c
+++ b/drivers/gpio/gpio-sch311x.c
@@ -228,7 +228,10 @@ static int sch311x_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
data = inb(block->runtime_reg + block->config_regs[offset]);
spin_unlock(&block->lock);
- return !!(data & SCH311X_GPIO_CONF_DIR);
+ if (data & SCH311X_GPIO_CONF_DIR)
+ return GPIO_LINE_DIRECTION_IN;
+
+ return GPIO_LINE_DIRECTION_OUT;
}
static int sch311x_gpio_set_config(struct gpio_chip *chip, unsigned offset,
diff --git a/drivers/gpio/gpio-siox.c b/drivers/gpio/gpio-siox.c
index 006a7e6a75f2..311f66757b92 100644
--- a/drivers/gpio/gpio-siox.c
+++ b/drivers/gpio/gpio-siox.c
@@ -203,9 +203,9 @@ static int gpio_siox_direction_output(struct gpio_chip *chip,
static int gpio_siox_get_direction(struct gpio_chip *chip, unsigned int offset)
{
if (offset < 12)
- return 1; /* input */
+ return GPIO_LINE_DIRECTION_IN;
else
- return 0; /* output */
+ return GPIO_LINE_DIRECTION_OUT;
}
static int gpio_siox_probe(struct siox_device *sdevice)
diff --git a/drivers/gpio/gpio-stmpe.c b/drivers/gpio/gpio-stmpe.c
index 994d542daf53..542706a852e6 100644
--- a/drivers/gpio/gpio-stmpe.c
+++ b/drivers/gpio/gpio-stmpe.c
@@ -84,7 +84,10 @@ static int stmpe_gpio_get_direction(struct gpio_chip *chip,
if (ret < 0)
return ret;
- return !(ret & mask);
+ if (ret & mask)
+ return GPIO_LINE_DIRECTION_OUT;
+
+ return GPIO_LINE_DIRECTION_IN;
}
static int stmpe_gpio_direction_output(struct gpio_chip *chip,
diff --git a/drivers/gpio/gpio-tc3589x.c b/drivers/gpio/gpio-tc3589x.c
index 75b1135b383a..6be0684cfa49 100644
--- a/drivers/gpio/gpio-tc3589x.c
+++ b/drivers/gpio/gpio-tc3589x.c
@@ -97,7 +97,10 @@ static int tc3589x_gpio_get_direction(struct gpio_chip *chip,
if (ret < 0)
return ret;
- return !(ret & BIT(pos));
+ if (ret & BIT(pos))
+ return GPIO_LINE_DIRECTION_OUT;
+
+ return GPIO_LINE_DIRECTION_IN;
}
static int tc3589x_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c
index 8a01d3694b28..6fdfe4c5303e 100644
--- a/drivers/gpio/gpio-tegra.c
+++ b/drivers/gpio/gpio-tegra.c
@@ -215,7 +215,10 @@ static int tegra_gpio_get_direction(struct gpio_chip *chip,
oe = tegra_gpio_readl(tgi, GPIO_OE(tgi, offset));
- return !(oe & pin_mask);
+ if (oe & pin_mask)
+ return GPIO_LINE_DIRECTION_OUT;
+
+ return GPIO_LINE_DIRECTION_IN;
}
static int tegra_gpio_set_debounce(struct gpio_chip *chip, unsigned int offset,
diff --git a/drivers/gpio/gpio-tegra186.c b/drivers/gpio/gpio-tegra186.c
index a9058fda187e..55b43b7ce88d 100644
--- a/drivers/gpio/gpio-tegra186.c
+++ b/drivers/gpio/gpio-tegra186.c
@@ -15,6 +15,14 @@
#include <dt-bindings/gpio/tegra186-gpio.h>
#include <dt-bindings/gpio/tegra194-gpio.h>
+/* security registers */
+#define TEGRA186_GPIO_CTL_SCR 0x0c
+#define TEGRA186_GPIO_CTL_SCR_SEC_WEN BIT(28)
+#define TEGRA186_GPIO_CTL_SCR_SEC_REN BIT(27)
+
+#define TEGRA186_GPIO_INT_ROUTE_MAPPING(p, x) (0x14 + (p) * 0x20 + (x) * 4)
+
+/* control registers */
#define TEGRA186_GPIO_ENABLE_CONFIG 0x00
#define TEGRA186_GPIO_ENABLE_CONFIG_ENABLE BIT(0)
#define TEGRA186_GPIO_ENABLE_CONFIG_OUT BIT(1)
@@ -24,6 +32,7 @@
#define TEGRA186_GPIO_ENABLE_CONFIG_TRIGGER_TYPE_DOUBLE_EDGE (0x3 << 2)
#define TEGRA186_GPIO_ENABLE_CONFIG_TRIGGER_TYPE_MASK (0x3 << 2)
#define TEGRA186_GPIO_ENABLE_CONFIG_TRIGGER_LEVEL BIT(4)
+#define TEGRA186_GPIO_ENABLE_CONFIG_DEBOUNCE BIT(5)
#define TEGRA186_GPIO_ENABLE_CONFIG_INTERRUPT BIT(6)
#define TEGRA186_GPIO_DEBOUNCE_CONTROL 0x04
@@ -44,15 +53,16 @@
struct tegra_gpio_port {
const char *name;
- unsigned int offset;
+ unsigned int bank;
+ unsigned int port;
unsigned int pins;
- unsigned int irq;
};
struct tegra_gpio_soc {
const struct tegra_gpio_port *ports;
unsigned int num_ports;
const char *name;
+ unsigned int instance;
};
struct tegra_gpio {
@@ -63,6 +73,7 @@ struct tegra_gpio {
const struct tegra_gpio_soc *soc;
+ void __iomem *secure;
void __iomem *base;
};
@@ -89,12 +100,15 @@ static void __iomem *tegra186_gpio_get_base(struct tegra_gpio *gpio,
unsigned int pin)
{
const struct tegra_gpio_port *port;
+ unsigned int offset;
port = tegra186_gpio_get_port(gpio, &pin);
if (!port)
return NULL;
- return gpio->base + port->offset + pin * 0x20;
+ offset = port->bank * 0x1000 + port->port * 0x200;
+
+ return gpio->base + offset + pin * 0x20;
}
static int tegra186_gpio_get_direction(struct gpio_chip *chip,
@@ -110,9 +124,9 @@ static int tegra186_gpio_get_direction(struct gpio_chip *chip,
value = readl(base + TEGRA186_GPIO_ENABLE_CONFIG);
if (value & TEGRA186_GPIO_ENABLE_CONFIG_OUT)
- return 0;
+ return GPIO_LINE_DIRECTION_OUT;
- return 1;
+ return GPIO_LINE_DIRECTION_IN;
}
static int tegra186_gpio_direction_input(struct gpio_chip *chip,
@@ -204,6 +218,42 @@ static void tegra186_gpio_set(struct gpio_chip *chip, unsigned int offset,
writel(value, base + TEGRA186_GPIO_OUTPUT_VALUE);
}
+static int tegra186_gpio_set_config(struct gpio_chip *chip,
+ unsigned int offset,
+ unsigned long config)
+{
+ struct tegra_gpio *gpio = gpiochip_get_data(chip);
+ u32 debounce, value;
+ void __iomem *base;
+
+ base = tegra186_gpio_get_base(gpio, offset);
+ if (base == NULL)
+ return -ENXIO;
+
+ if (pinconf_to_config_param(config) != PIN_CONFIG_INPUT_DEBOUNCE)
+ return -ENOTSUPP;
+
+ debounce = pinconf_to_config_argument(config);
+
+ /*
+ * The Tegra186 GPIO controller supports a maximum of 255 ms debounce
+ * time.
+ */
+ if (debounce > 255000)
+ return -EINVAL;
+
+ debounce = DIV_ROUND_UP(debounce, USEC_PER_MSEC);
+
+ value = TEGRA186_GPIO_DEBOUNCE_CONTROL_THRESHOLD(debounce);
+ writel(value, base + TEGRA186_GPIO_DEBOUNCE_CONTROL);
+
+ value = readl(base + TEGRA186_GPIO_ENABLE_CONFIG);
+ value |= TEGRA186_GPIO_ENABLE_CONFIG_DEBOUNCE;
+ writel(value, base + TEGRA186_GPIO_ENABLE_CONFIG);
+
+ return 0;
+}
+
static int tegra186_gpio_of_xlate(struct gpio_chip *chip,
const struct of_phandle_args *spec,
u32 *flags)
@@ -327,7 +377,7 @@ static int tegra186_irq_set_type(struct irq_data *data, unsigned int type)
else
irq_set_handler_locked(data, handle_edge_irq);
- return 0;
+ return irq_chip_set_type_parent(data, type);
}
static void tegra186_gpio_irq(struct irq_desc *desc)
@@ -342,12 +392,14 @@ static void tegra186_gpio_irq(struct irq_desc *desc)
for (i = 0; i < gpio->soc->num_ports; i++) {
const struct tegra_gpio_port *port = &gpio->soc->ports[i];
- void __iomem *base = gpio->base + port->offset;
unsigned int pin, irq;
unsigned long value;
+ void __iomem *base;
- /* skip ports that are not associated with this controller */
- if (parent != gpio->irq[port->irq])
+ base = gpio->base + port->bank * 0x1000 + port->port * 0x200;
+
+ /* skip ports that are not associated with this bank */
+ if (parent != gpio->irq[port->bank])
goto skip;
value = readl(base + TEGRA186_GPIO_INTERRUPT_STATUS(1));
@@ -367,47 +419,119 @@ skip:
chained_irq_exit(chip, desc);
}
-static int tegra186_gpio_irq_domain_xlate(struct irq_domain *domain,
- struct device_node *np,
- const u32 *spec, unsigned int size,
- unsigned long *hwirq,
- unsigned int *type)
+static int tegra186_gpio_irq_domain_translate(struct irq_domain *domain,
+ struct irq_fwspec *fwspec,
+ unsigned long *hwirq,
+ unsigned int *type)
{
struct tegra_gpio *gpio = gpiochip_get_data(domain->host_data);
unsigned int port, pin, i, offset = 0;
- if (size < 2)
+ if (WARN_ON(gpio->gpio.of_gpio_n_cells < 2))
+ return -EINVAL;
+
+ if (WARN_ON(fwspec->param_count < gpio->gpio.of_gpio_n_cells))
return -EINVAL;
- port = spec[0] / 8;
- pin = spec[0] % 8;
+ port = fwspec->param[0] / 8;
+ pin = fwspec->param[0] % 8;
- if (port >= gpio->soc->num_ports) {
- dev_err(gpio->gpio.parent, "invalid port number: %u\n", port);
+ if (port >= gpio->soc->num_ports)
return -EINVAL;
- }
for (i = 0; i < port; i++)
offset += gpio->soc->ports[i].pins;
- *type = spec[1] & IRQ_TYPE_SENSE_MASK;
+ *type = fwspec->param[1] & IRQ_TYPE_SENSE_MASK;
*hwirq = offset + pin;
return 0;
}
-static const struct irq_domain_ops tegra186_gpio_irq_domain_ops = {
- .map = gpiochip_irq_map,
- .unmap = gpiochip_irq_unmap,
- .xlate = tegra186_gpio_irq_domain_xlate,
+static void tegra186_gpio_populate_parent_fwspec(struct gpio_chip *chip,
+ struct irq_fwspec *fwspec,
+ unsigned int parent_hwirq,
+ unsigned int parent_type)
+{
+ struct tegra_gpio *gpio = gpiochip_get_data(chip);
+
+ fwspec->param_count = 3;
+ fwspec->param[0] = gpio->soc->instance;
+ fwspec->param[1] = parent_hwirq;
+ fwspec->param[2] = parent_type;
+}
+
+static int tegra186_gpio_child_to_parent_hwirq(struct gpio_chip *chip,
+ unsigned int hwirq,
+ unsigned int type,
+ unsigned int *parent_hwirq,
+ unsigned int *parent_type)
+{
+ *parent_hwirq = chip->irq.child_offset_to_irq(chip, hwirq);
+ *parent_type = type;
+
+ return 0;
+}
+
+static unsigned int tegra186_gpio_child_offset_to_irq(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ struct tegra_gpio *gpio = gpiochip_get_data(chip);
+ unsigned int i;
+
+ for (i = 0; i < gpio->soc->num_ports; i++) {
+ if (offset < gpio->soc->ports[i].pins)
+ break;
+
+ offset -= gpio->soc->ports[i].pins;
+ }
+
+ return offset + i * 8;
+}
+
+static const struct of_device_id tegra186_pmc_of_match[] = {
+ { .compatible = "nvidia,tegra186-pmc" },
+ { .compatible = "nvidia,tegra194-pmc" },
+ { /* sentinel */ }
};
+static void tegra186_gpio_init_route_mapping(struct tegra_gpio *gpio)
+{
+ unsigned int i, j;
+ u32 value;
+
+ for (i = 0; i < gpio->soc->num_ports; i++) {
+ const struct tegra_gpio_port *port = &gpio->soc->ports[i];
+ unsigned int offset, p = port->port;
+ void __iomem *base;
+
+ base = gpio->secure + port->bank * 0x1000 + 0x800;
+
+ value = readl(base + TEGRA186_GPIO_CTL_SCR);
+
+ /*
+ * For controllers that haven't been locked down yet, make
+ * sure to program the default interrupt route mapping.
+ */
+ if ((value & TEGRA186_GPIO_CTL_SCR_SEC_REN) == 0 &&
+ (value & TEGRA186_GPIO_CTL_SCR_SEC_WEN) == 0) {
+ for (j = 0; j < 8; j++) {
+ offset = TEGRA186_GPIO_INT_ROUTE_MAPPING(p, j);
+
+ value = readl(base + offset);
+ value = BIT(port->pins) - 1;
+ writel(value, base + offset);
+ }
+ }
+ }
+}
+
static int tegra186_gpio_probe(struct platform_device *pdev)
{
unsigned int i, j, offset;
struct gpio_irq_chip *irq;
struct tegra_gpio *gpio;
- struct resource *res;
+ struct device_node *np;
char **names;
int err;
@@ -417,8 +541,11 @@ static int tegra186_gpio_probe(struct platform_device *pdev)
gpio->soc = of_device_get_match_data(&pdev->dev);
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gpio");
- gpio->base = devm_ioremap_resource(&pdev->dev, res);
+ gpio->secure = devm_platform_ioremap_resource_byname(pdev, "security");
+ if (IS_ERR(gpio->secure))
+ return PTR_ERR(gpio->secure);
+
+ gpio->base = devm_platform_ioremap_resource_byname(pdev, "gpio");
if (IS_ERR(gpio->base))
return PTR_ERR(gpio->base);
@@ -449,6 +576,7 @@ static int tegra186_gpio_probe(struct platform_device *pdev)
gpio->gpio.direction_output = tegra186_gpio_direction_output;
gpio->gpio.get = tegra186_gpio_get,
gpio->gpio.set = tegra186_gpio_set;
+ gpio->gpio.set_config = tegra186_gpio_set_config;
gpio->gpio.base = -1;
@@ -487,10 +615,15 @@ static int tegra186_gpio_probe(struct platform_device *pdev)
gpio->intc.irq_mask = tegra186_irq_mask;
gpio->intc.irq_unmask = tegra186_irq_unmask;
gpio->intc.irq_set_type = tegra186_irq_set_type;
+ gpio->intc.irq_set_wake = irq_chip_set_wake_parent;
irq = &gpio->gpio.irq;
irq->chip = &gpio->intc;
- irq->domain_ops = &tegra186_gpio_irq_domain_ops;
+ irq->fwnode = of_node_to_fwnode(pdev->dev.of_node);
+ irq->child_to_parent_hwirq = tegra186_gpio_child_to_parent_hwirq;
+ irq->populate_parent_fwspec = tegra186_gpio_populate_parent_fwspec;
+ irq->child_offset_to_irq = tegra186_gpio_child_offset_to_irq;
+ irq->child_irq_domain_ops.translate = tegra186_gpio_irq_domain_translate;
irq->handler = handle_simple_irq;
irq->default_type = IRQ_TYPE_NONE;
irq->parent_handler = tegra186_gpio_irq;
@@ -498,6 +631,17 @@ static int tegra186_gpio_probe(struct platform_device *pdev)
irq->num_parents = gpio->num_irq;
irq->parents = gpio->irq;
+ np = of_find_matching_node(NULL, tegra186_pmc_of_match);
+ if (np) {
+ irq->parent_domain = irq_find_host(np);
+ of_node_put(np);
+
+ if (!irq->parent_domain)
+ return -EPROBE_DEFER;
+ }
+
+ tegra186_gpio_init_route_mapping(gpio);
+
irq->map = devm_kcalloc(&pdev->dev, gpio->gpio.ngpio,
sizeof(*irq->map), GFP_KERNEL);
if (!irq->map)
@@ -507,7 +651,7 @@ static int tegra186_gpio_probe(struct platform_device *pdev)
const struct tegra_gpio_port *port = &gpio->soc->ports[i];
for (j = 0; j < port->pins; j++)
- irq->map[offset + j] = irq->parents[port->irq];
+ irq->map[offset + j] = irq->parents[port->bank];
offset += port->pins;
}
@@ -526,136 +670,140 @@ static int tegra186_gpio_remove(struct platform_device *pdev)
return 0;
}
-#define TEGRA186_MAIN_GPIO_PORT(port, base, count, controller) \
- [TEGRA186_MAIN_GPIO_PORT_##port] = { \
- .name = #port, \
- .offset = base, \
- .pins = count, \
- .irq = controller, \
+#define TEGRA186_MAIN_GPIO_PORT(_name, _bank, _port, _pins) \
+ [TEGRA186_MAIN_GPIO_PORT_##_name] = { \
+ .name = #_name, \
+ .bank = _bank, \
+ .port = _port, \
+ .pins = _pins, \
}
static const struct tegra_gpio_port tegra186_main_ports[] = {
- TEGRA186_MAIN_GPIO_PORT( A, 0x2000, 7, 2),
- TEGRA186_MAIN_GPIO_PORT( B, 0x3000, 7, 3),
- TEGRA186_MAIN_GPIO_PORT( C, 0x3200, 7, 3),
- TEGRA186_MAIN_GPIO_PORT( D, 0x3400, 6, 3),
- TEGRA186_MAIN_GPIO_PORT( E, 0x2200, 8, 2),
- TEGRA186_MAIN_GPIO_PORT( F, 0x2400, 6, 2),
- TEGRA186_MAIN_GPIO_PORT( G, 0x4200, 6, 4),
- TEGRA186_MAIN_GPIO_PORT( H, 0x1000, 7, 1),
- TEGRA186_MAIN_GPIO_PORT( I, 0x0800, 8, 0),
- TEGRA186_MAIN_GPIO_PORT( J, 0x5000, 8, 5),
- TEGRA186_MAIN_GPIO_PORT( K, 0x5200, 1, 5),
- TEGRA186_MAIN_GPIO_PORT( L, 0x1200, 8, 1),
- TEGRA186_MAIN_GPIO_PORT( M, 0x5600, 6, 5),
- TEGRA186_MAIN_GPIO_PORT( N, 0x0000, 7, 0),
- TEGRA186_MAIN_GPIO_PORT( O, 0x0200, 4, 0),
- TEGRA186_MAIN_GPIO_PORT( P, 0x4000, 7, 4),
- TEGRA186_MAIN_GPIO_PORT( Q, 0x0400, 6, 0),
- TEGRA186_MAIN_GPIO_PORT( R, 0x0a00, 6, 0),
- TEGRA186_MAIN_GPIO_PORT( T, 0x0600, 4, 0),
- TEGRA186_MAIN_GPIO_PORT( X, 0x1400, 8, 1),
- TEGRA186_MAIN_GPIO_PORT( Y, 0x1600, 7, 1),
- TEGRA186_MAIN_GPIO_PORT(BB, 0x2600, 2, 2),
- TEGRA186_MAIN_GPIO_PORT(CC, 0x5400, 4, 5),
+ TEGRA186_MAIN_GPIO_PORT( A, 2, 0, 7),
+ TEGRA186_MAIN_GPIO_PORT( B, 3, 0, 7),
+ TEGRA186_MAIN_GPIO_PORT( C, 3, 1, 7),
+ TEGRA186_MAIN_GPIO_PORT( D, 3, 2, 6),
+ TEGRA186_MAIN_GPIO_PORT( E, 2, 1, 8),
+ TEGRA186_MAIN_GPIO_PORT( F, 2, 2, 6),
+ TEGRA186_MAIN_GPIO_PORT( G, 4, 1, 6),
+ TEGRA186_MAIN_GPIO_PORT( H, 1, 0, 7),
+ TEGRA186_MAIN_GPIO_PORT( I, 0, 4, 8),
+ TEGRA186_MAIN_GPIO_PORT( J, 5, 0, 8),
+ TEGRA186_MAIN_GPIO_PORT( K, 5, 1, 1),
+ TEGRA186_MAIN_GPIO_PORT( L, 1, 1, 8),
+ TEGRA186_MAIN_GPIO_PORT( M, 5, 3, 6),
+ TEGRA186_MAIN_GPIO_PORT( N, 0, 0, 7),
+ TEGRA186_MAIN_GPIO_PORT( O, 0, 1, 4),
+ TEGRA186_MAIN_GPIO_PORT( P, 4, 0, 7),
+ TEGRA186_MAIN_GPIO_PORT( Q, 0, 2, 6),
+ TEGRA186_MAIN_GPIO_PORT( R, 0, 5, 6),
+ TEGRA186_MAIN_GPIO_PORT( T, 0, 3, 4),
+ TEGRA186_MAIN_GPIO_PORT( X, 1, 2, 8),
+ TEGRA186_MAIN_GPIO_PORT( Y, 1, 3, 7),
+ TEGRA186_MAIN_GPIO_PORT(BB, 2, 3, 2),
+ TEGRA186_MAIN_GPIO_PORT(CC, 5, 2, 4),
};
static const struct tegra_gpio_soc tegra186_main_soc = {
.num_ports = ARRAY_SIZE(tegra186_main_ports),
.ports = tegra186_main_ports,
.name = "tegra186-gpio",
+ .instance = 0,
};
-#define TEGRA186_AON_GPIO_PORT(port, base, count, controller) \
- [TEGRA186_AON_GPIO_PORT_##port] = { \
- .name = #port, \
- .offset = base, \
- .pins = count, \
- .irq = controller, \
+#define TEGRA186_AON_GPIO_PORT(_name, _bank, _port, _pins) \
+ [TEGRA186_AON_GPIO_PORT_##_name] = { \
+ .name = #_name, \
+ .bank = _bank, \
+ .port = _port, \
+ .pins = _pins, \
}
static const struct tegra_gpio_port tegra186_aon_ports[] = {
- TEGRA186_AON_GPIO_PORT( S, 0x0200, 5, 0),
- TEGRA186_AON_GPIO_PORT( U, 0x0400, 6, 0),
- TEGRA186_AON_GPIO_PORT( V, 0x0800, 8, 0),
- TEGRA186_AON_GPIO_PORT( W, 0x0a00, 8, 0),
- TEGRA186_AON_GPIO_PORT( Z, 0x0e00, 4, 0),
- TEGRA186_AON_GPIO_PORT(AA, 0x0c00, 8, 0),
- TEGRA186_AON_GPIO_PORT(EE, 0x0600, 3, 0),
- TEGRA186_AON_GPIO_PORT(FF, 0x0000, 5, 0),
+ TEGRA186_AON_GPIO_PORT( S, 0, 1, 5),
+ TEGRA186_AON_GPIO_PORT( U, 0, 2, 6),
+ TEGRA186_AON_GPIO_PORT( V, 0, 4, 8),
+ TEGRA186_AON_GPIO_PORT( W, 0, 5, 8),
+ TEGRA186_AON_GPIO_PORT( Z, 0, 7, 4),
+ TEGRA186_AON_GPIO_PORT(AA, 0, 6, 8),
+ TEGRA186_AON_GPIO_PORT(EE, 0, 3, 3),
+ TEGRA186_AON_GPIO_PORT(FF, 0, 0, 5),
};
static const struct tegra_gpio_soc tegra186_aon_soc = {
.num_ports = ARRAY_SIZE(tegra186_aon_ports),
.ports = tegra186_aon_ports,
.name = "tegra186-gpio-aon",
+ .instance = 1,
};
-#define TEGRA194_MAIN_GPIO_PORT(port, base, count, controller) \
- [TEGRA194_MAIN_GPIO_PORT_##port] = { \
- .name = #port, \
- .offset = base, \
- .pins = count, \
- .irq = controller, \
+#define TEGRA194_MAIN_GPIO_PORT(_name, _bank, _port, _pins) \
+ [TEGRA194_MAIN_GPIO_PORT_##_name] = { \
+ .name = #_name, \
+ .bank = _bank, \
+ .port = _port, \
+ .pins = _pins, \
}
static const struct tegra_gpio_port tegra194_main_ports[] = {
- TEGRA194_MAIN_GPIO_PORT( A, 0x1400, 8, 1),
- TEGRA194_MAIN_GPIO_PORT( B, 0x4e00, 2, 4),
- TEGRA194_MAIN_GPIO_PORT( C, 0x4600, 8, 4),
- TEGRA194_MAIN_GPIO_PORT( D, 0x4800, 4, 4),
- TEGRA194_MAIN_GPIO_PORT( E, 0x4a00, 8, 4),
- TEGRA194_MAIN_GPIO_PORT( F, 0x4c00, 6, 4),
- TEGRA194_MAIN_GPIO_PORT( G, 0x4000, 8, 4),
- TEGRA194_MAIN_GPIO_PORT( H, 0x4200, 8, 4),
- TEGRA194_MAIN_GPIO_PORT( I, 0x4400, 5, 4),
- TEGRA194_MAIN_GPIO_PORT( J, 0x5200, 6, 5),
- TEGRA194_MAIN_GPIO_PORT( K, 0x3000, 8, 3),
- TEGRA194_MAIN_GPIO_PORT( L, 0x3200, 4, 3),
- TEGRA194_MAIN_GPIO_PORT( M, 0x2600, 8, 2),
- TEGRA194_MAIN_GPIO_PORT( N, 0x2800, 3, 2),
- TEGRA194_MAIN_GPIO_PORT( O, 0x5000, 6, 5),
- TEGRA194_MAIN_GPIO_PORT( P, 0x2a00, 8, 2),
- TEGRA194_MAIN_GPIO_PORT( Q, 0x2c00, 8, 2),
- TEGRA194_MAIN_GPIO_PORT( R, 0x2e00, 6, 2),
- TEGRA194_MAIN_GPIO_PORT( S, 0x3600, 8, 3),
- TEGRA194_MAIN_GPIO_PORT( T, 0x3800, 8, 3),
- TEGRA194_MAIN_GPIO_PORT( U, 0x3a00, 1, 3),
- TEGRA194_MAIN_GPIO_PORT( V, 0x1000, 8, 1),
- TEGRA194_MAIN_GPIO_PORT( W, 0x1200, 2, 1),
- TEGRA194_MAIN_GPIO_PORT( X, 0x2000, 8, 2),
- TEGRA194_MAIN_GPIO_PORT( Y, 0x2200, 8, 2),
- TEGRA194_MAIN_GPIO_PORT( Z, 0x2400, 8, 2),
- TEGRA194_MAIN_GPIO_PORT(FF, 0x3400, 2, 3),
- TEGRA194_MAIN_GPIO_PORT(GG, 0x0000, 2, 0)
+ TEGRA194_MAIN_GPIO_PORT( A, 1, 2, 8),
+ TEGRA194_MAIN_GPIO_PORT( B, 4, 7, 2),
+ TEGRA194_MAIN_GPIO_PORT( C, 4, 3, 8),
+ TEGRA194_MAIN_GPIO_PORT( D, 4, 4, 4),
+ TEGRA194_MAIN_GPIO_PORT( E, 4, 5, 8),
+ TEGRA194_MAIN_GPIO_PORT( F, 4, 6, 6),
+ TEGRA194_MAIN_GPIO_PORT( G, 4, 0, 8),
+ TEGRA194_MAIN_GPIO_PORT( H, 4, 1, 8),
+ TEGRA194_MAIN_GPIO_PORT( I, 4, 2, 5),
+ TEGRA194_MAIN_GPIO_PORT( J, 5, 1, 6),
+ TEGRA194_MAIN_GPIO_PORT( K, 3, 0, 8),
+ TEGRA194_MAIN_GPIO_PORT( L, 3, 1, 4),
+ TEGRA194_MAIN_GPIO_PORT( M, 2, 3, 8),
+ TEGRA194_MAIN_GPIO_PORT( N, 2, 4, 3),
+ TEGRA194_MAIN_GPIO_PORT( O, 5, 0, 6),
+ TEGRA194_MAIN_GPIO_PORT( P, 2, 5, 8),
+ TEGRA194_MAIN_GPIO_PORT( Q, 2, 6, 8),
+ TEGRA194_MAIN_GPIO_PORT( R, 2, 7, 6),
+ TEGRA194_MAIN_GPIO_PORT( S, 3, 3, 8),
+ TEGRA194_MAIN_GPIO_PORT( T, 3, 4, 8),
+ TEGRA194_MAIN_GPIO_PORT( U, 3, 5, 1),
+ TEGRA194_MAIN_GPIO_PORT( V, 1, 0, 8),
+ TEGRA194_MAIN_GPIO_PORT( W, 1, 1, 2),
+ TEGRA194_MAIN_GPIO_PORT( X, 2, 0, 8),
+ TEGRA194_MAIN_GPIO_PORT( Y, 2, 1, 8),
+ TEGRA194_MAIN_GPIO_PORT( Z, 2, 2, 8),
+ TEGRA194_MAIN_GPIO_PORT(FF, 3, 2, 2),
+ TEGRA194_MAIN_GPIO_PORT(GG, 0, 0, 2)
};
static const struct tegra_gpio_soc tegra194_main_soc = {
.num_ports = ARRAY_SIZE(tegra194_main_ports),
.ports = tegra194_main_ports,
.name = "tegra194-gpio",
+ .instance = 0,
};
-#define TEGRA194_AON_GPIO_PORT(port, base, count, controller) \
- [TEGRA194_AON_GPIO_PORT_##port] = { \
- .name = #port, \
- .offset = base, \
- .pins = count, \
- .irq = controller, \
+#define TEGRA194_AON_GPIO_PORT(_name, _bank, _port, _pins) \
+ [TEGRA194_AON_GPIO_PORT_##_name] = { \
+ .name = #_name, \
+ .bank = _bank, \
+ .port = _port, \
+ .pins = _pins, \
}
static const struct tegra_gpio_port tegra194_aon_ports[] = {
- TEGRA194_AON_GPIO_PORT(AA, 0x0600, 8, 0),
- TEGRA194_AON_GPIO_PORT(BB, 0x0800, 4, 0),
- TEGRA194_AON_GPIO_PORT(CC, 0x0200, 8, 0),
- TEGRA194_AON_GPIO_PORT(DD, 0x0400, 3, 0),
- TEGRA194_AON_GPIO_PORT(EE, 0x0000, 7, 0)
+ TEGRA194_AON_GPIO_PORT(AA, 0, 3, 8),
+ TEGRA194_AON_GPIO_PORT(BB, 0, 4, 4),
+ TEGRA194_AON_GPIO_PORT(CC, 0, 1, 8),
+ TEGRA194_AON_GPIO_PORT(DD, 0, 2, 3),
+ TEGRA194_AON_GPIO_PORT(EE, 0, 0, 7)
};
static const struct tegra_gpio_soc tegra194_aon_soc = {
.num_ports = ARRAY_SIZE(tegra194_aon_ports),
.ports = tegra194_aon_ports,
.name = "tegra194-gpio-aon",
+ .instance = 1,
};
static const struct of_device_id tegra186_gpio_of_match[] = {
diff --git a/drivers/gpio/gpio-thunderx.c b/drivers/gpio/gpio-thunderx.c
index ddad5c7ea617..d08d86a22b1f 100644
--- a/drivers/gpio/gpio-thunderx.c
+++ b/drivers/gpio/gpio-thunderx.c
@@ -169,7 +169,10 @@ static int thunderx_gpio_get_direction(struct gpio_chip *chip, unsigned int line
bit_cfg = readq(txgpio->register_base + bit_cfg_reg(line));
- return !(bit_cfg & GPIO_BIT_CFG_TX_OE);
+ if (bit_cfg & GPIO_BIT_CFG_TX_OE)
+ return GPIO_LINE_DIRECTION_OUT;
+
+ return GPIO_LINE_DIRECTION_IN;
}
static int thunderx_gpio_set_config(struct gpio_chip *chip,
diff --git a/drivers/gpio/gpio-tpic2810.c b/drivers/gpio/gpio-tpic2810.c
index c8b34d787eed..99d5a84a9129 100644
--- a/drivers/gpio/gpio-tpic2810.c
+++ b/drivers/gpio/gpio-tpic2810.c
@@ -39,7 +39,7 @@ static int tpic2810_get_direction(struct gpio_chip *chip,
unsigned offset)
{
/* This device always output */
- return 0;
+ return GPIO_LINE_DIRECTION_OUT;
}
static int tpic2810_direction_input(struct gpio_chip *chip,
diff --git a/drivers/gpio/gpio-tps65086.c b/drivers/gpio/gpio-tps65086.c
index 2eea98ff4ea3..1e9d8262d0ff 100644
--- a/drivers/gpio/gpio-tps65086.c
+++ b/drivers/gpio/gpio-tps65086.c
@@ -21,7 +21,7 @@ static int tps65086_gpio_get_direction(struct gpio_chip *chip,
unsigned offset)
{
/* This device is output only */
- return 0;
+ return GPIO_LINE_DIRECTION_OUT;
}
static int tps65086_gpio_direction_input(struct gpio_chip *chip,
diff --git a/drivers/gpio/gpio-tps65912.c b/drivers/gpio/gpio-tps65912.c
index 3ad68bd78282..510d9ed9fd2a 100644
--- a/drivers/gpio/gpio-tps65912.c
+++ b/drivers/gpio/gpio-tps65912.c
@@ -32,9 +32,9 @@ static int tps65912_gpio_get_direction(struct gpio_chip *gc,
return ret;
if (val & GPIO_CFG_MASK)
- return 0;
+ return GPIO_LINE_DIRECTION_OUT;
else
- return 1;
+ return GPIO_LINE_DIRECTION_IN;
}
static int tps65912_gpio_direction_input(struct gpio_chip *gc, unsigned offset)
diff --git a/drivers/gpio/gpio-tps68470.c b/drivers/gpio/gpio-tps68470.c
index aff6e504c666..f7f5f770e0fb 100644
--- a/drivers/gpio/gpio-tps68470.c
+++ b/drivers/gpio/gpio-tps68470.c
@@ -47,7 +47,6 @@ static int tps68470_gpio_get(struct gpio_chip *gc, unsigned int offset)
return !!(val & BIT(offset));
}
-/* Return 0 if output, 1 if input */
static int tps68470_gpio_get_direction(struct gpio_chip *gc,
unsigned int offset)
{
@@ -57,7 +56,7 @@ static int tps68470_gpio_get_direction(struct gpio_chip *gc,
/* rest are always outputs */
if (offset >= TPS68470_N_REGULAR_GPIO)
- return 0;
+ return GPIO_LINE_DIRECTION_OUT;
ret = regmap_read(regmap, TPS68470_GPIO_CTL_REG_A(offset), &val);
if (ret) {
@@ -67,7 +66,8 @@ static int tps68470_gpio_get_direction(struct gpio_chip *gc,
}
val &= TPS68470_GPIO_MODE_MASK;
- return val >= TPS68470_GPIO_MODE_OUT_CMOS ? 0 : 1;
+ return val >= TPS68470_GPIO_MODE_OUT_CMOS ? GPIO_LINE_DIRECTION_OUT :
+ GPIO_LINE_DIRECTION_IN;
}
static void tps68470_gpio_set(struct gpio_chip *gc, unsigned int offset,
diff --git a/drivers/gpio/gpio-tqmx86.c b/drivers/gpio/gpio-tqmx86.c
index a3109bcaa0ac..5022e0ad0fae 100644
--- a/drivers/gpio/gpio-tqmx86.c
+++ b/drivers/gpio/gpio-tqmx86.c
@@ -101,7 +101,10 @@ static int tqmx86_gpio_direction_output(struct gpio_chip *chip,
static int tqmx86_gpio_get_direction(struct gpio_chip *chip,
unsigned int offset)
{
- return !!(TQMX86_DIR_INPUT_MASK & BIT(offset));
+ if (TQMX86_DIR_INPUT_MASK & BIT(offset))
+ return GPIO_LINE_DIRECTION_IN;
+
+ return GPIO_LINE_DIRECTION_OUT;
}
static void tqmx86_gpio_irq_mask(struct irq_data *data)
diff --git a/drivers/gpio/gpio-ts4900.c b/drivers/gpio/gpio-ts4900.c
index 1da8d0586329..d885032cf814 100644
--- a/drivers/gpio/gpio-ts4900.c
+++ b/drivers/gpio/gpio-ts4900.c
@@ -44,7 +44,10 @@ static int ts4900_gpio_get_direction(struct gpio_chip *chip,
regmap_read(priv->regmap, offset, &reg);
- return !(reg & TS4900_GPIO_OE);
+ if (reg & TS4900_GPIO_OE)
+ return GPIO_LINE_DIRECTION_OUT;
+
+ return GPIO_LINE_DIRECTION_IN;
}
static int ts4900_gpio_direction_input(struct gpio_chip *chip,
diff --git a/drivers/gpio/gpio-twl4030.c b/drivers/gpio/gpio-twl4030.c
index fbfb648d3502..de249726230e 100644
--- a/drivers/gpio/gpio-twl4030.c
+++ b/drivers/gpio/gpio-twl4030.c
@@ -165,10 +165,10 @@ static int twl4030_get_gpio_direction(int gpio)
if (ret < 0)
return ret;
- /* 1 = output, but gpiolib semantics are inverse so invert */
- ret = !(ret & d_msk);
+ if (ret & d_msk)
+ return GPIO_LINE_DIRECTION_OUT;
- return ret;
+ return GPIO_LINE_DIRECTION_IN;
}
static int twl4030_set_gpio_dataout(int gpio, int enable)
@@ -380,10 +380,10 @@ static int twl_get_direction(struct gpio_chip *chip, unsigned offset)
{
struct gpio_twl4030_priv *priv = gpiochip_get_data(chip);
/*
- * Default 0 = output
+ * Default GPIO_LINE_DIRECTION_OUT
* LED GPIOs >= TWL4030_GPIO_MAX are always output
*/
- int ret = 0;
+ int ret = GPIO_LINE_DIRECTION_OUT;
mutex_lock(&priv->mutex);
if (offset < TWL4030_GPIO_MAX) {
diff --git a/drivers/gpio/gpio-twl6040.c b/drivers/gpio/gpio-twl6040.c
index c845b2ff1f43..648fb418d775 100644
--- a/drivers/gpio/gpio-twl6040.c
+++ b/drivers/gpio/gpio-twl6040.c
@@ -34,8 +34,7 @@ static int twl6040gpo_get(struct gpio_chip *chip, unsigned offset)
static int twl6040gpo_get_direction(struct gpio_chip *chip, unsigned offset)
{
- /* This means "out" */
- return 0;
+ return GPIO_LINE_DIRECTION_OUT;
}
static int twl6040gpo_direction_out(struct gpio_chip *chip, unsigned offset,
diff --git a/drivers/gpio/gpio-uniphier.c b/drivers/gpio/gpio-uniphier.c
index 93cdcc41e9fb..bd203e8fa58e 100644
--- a/drivers/gpio/gpio-uniphier.c
+++ b/drivers/gpio/gpio-uniphier.c
@@ -113,7 +113,10 @@ static int uniphier_gpio_offset_read(struct gpio_chip *chip,
static int uniphier_gpio_get_direction(struct gpio_chip *chip,
unsigned int offset)
{
- return uniphier_gpio_offset_read(chip, offset, UNIPHIER_GPIO_PORT_DIR);
+ if (uniphier_gpio_offset_read(chip, offset, UNIPHIER_GPIO_PORT_DIR))
+ return GPIO_LINE_DIRECTION_IN;
+
+ return GPIO_LINE_DIRECTION_OUT;
}
static int uniphier_gpio_direction_input(struct gpio_chip *chip,
diff --git a/drivers/gpio/gpio-wcove.c b/drivers/gpio/gpio-wcove.c
index 444fe9e7f04a..8b481b3c1ebe 100644
--- a/drivers/gpio/gpio-wcove.c
+++ b/drivers/gpio/gpio-wcove.c
@@ -170,13 +170,16 @@ static int wcove_gpio_get_direction(struct gpio_chip *chip, unsigned int gpio)
int ret, reg = to_reg(gpio, CTRL_OUT);
if (reg < 0)
- return 0;
+ return GPIO_LINE_DIRECTION_OUT;
ret = regmap_read(wg->regmap, reg, &val);
if (ret)
return ret;
- return !(val & CTLO_DIR_OUT);
+ if (val & CTLO_DIR_OUT)
+ return GPIO_LINE_DIRECTION_OUT;
+
+ return GPIO_LINE_DIRECTION_IN;
}
static int wcove_gpio_get(struct gpio_chip *chip, unsigned int gpio)
diff --git a/drivers/gpio/gpio-ws16c48.c b/drivers/gpio/gpio-ws16c48.c
index e0ef66b6a237..fe456bea81f6 100644
--- a/drivers/gpio/gpio-ws16c48.c
+++ b/drivers/gpio/gpio-ws16c48.c
@@ -56,7 +56,10 @@ static int ws16c48_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
const unsigned port = offset / 8;
const unsigned mask = BIT(offset % 8);
- return !!(ws16c48gpio->io_state[port] & mask);
+ if (ws16c48gpio->io_state[port] & mask)
+ return GPIO_LINE_DIRECTION_IN;
+
+ return GPIO_LINE_DIRECTION_OUT;
}
static int ws16c48_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
diff --git a/drivers/gpio/gpio-xgene.c b/drivers/gpio/gpio-xgene.c
index 2918363884de..532b0df8a1f2 100644
--- a/drivers/gpio/gpio-xgene.c
+++ b/drivers/gpio/gpio-xgene.c
@@ -80,7 +80,10 @@ static int xgene_gpio_get_direction(struct gpio_chip *gc, unsigned int offset)
bank_offset = GPIO_SET_DR_OFFSET + GPIO_BANK_OFFSET(offset);
bit_offset = GPIO_BIT_OFFSET(offset);
- return !!(ioread32(chip->base + bank_offset) & BIT(bit_offset));
+ if (ioread32(chip->base + bank_offset) & BIT(bit_offset))
+ return GPIO_LINE_DIRECTION_IN;
+
+ return GPIO_LINE_DIRECTION_OUT;
}
static int xgene_gpio_dir_in(struct gpio_chip *gc, unsigned int offset)
@@ -155,28 +158,16 @@ static SIMPLE_DEV_PM_OPS(xgene_gpio_pm, xgene_gpio_suspend, xgene_gpio_resume);
static int xgene_gpio_probe(struct platform_device *pdev)
{
- struct resource *res;
struct xgene_gpio *gpio;
int err = 0;
gpio = devm_kzalloc(&pdev->dev, sizeof(*gpio), GFP_KERNEL);
- if (!gpio) {
- err = -ENOMEM;
- goto err;
- }
+ if (!gpio)
+ return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- err = -EINVAL;
- goto err;
- }
-
- gpio->base = devm_ioremap_nocache(&pdev->dev, res->start,
- resource_size(res));
- if (!gpio->base) {
- err = -ENOMEM;
- goto err;
- }
+ gpio->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(gpio->base))
+ return PTR_ERR(gpio->base);
gpio->chip.ngpio = XGENE_MAX_GPIOS;
@@ -196,14 +187,11 @@ static int xgene_gpio_probe(struct platform_device *pdev)
if (err) {
dev_err(&pdev->dev,
"failed to register gpiochip.\n");
- goto err;
+ return err;
}
dev_info(&pdev->dev, "X-Gene GPIO driver registered.\n");
return 0;
-err:
- dev_err(&pdev->dev, "X-Gene GPIO driver registration failed.\n");
- return err;
}
static const struct of_device_id xgene_gpio_of_match[] = {
diff --git a/drivers/gpio/gpio-xgs-iproc.c b/drivers/gpio/gpio-xgs-iproc.c
new file mode 100644
index 000000000000..773e5c24309e
--- /dev/null
+++ b/drivers/gpio/gpio-xgs-iproc.c
@@ -0,0 +1,320 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2017 Broadcom
+ */
+
+#include <linux/gpio/driver.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+
+#define IPROC_CCA_INT_F_GPIOINT BIT(0)
+#define IPROC_CCA_INT_STS 0x20
+#define IPROC_CCA_INT_MASK 0x24
+
+#define IPROC_GPIO_CCA_DIN 0x0
+#define IPROC_GPIO_CCA_DOUT 0x4
+#define IPROC_GPIO_CCA_OUT_EN 0x8
+#define IPROC_GPIO_CCA_INT_LEVEL 0x10
+#define IPROC_GPIO_CCA_INT_LEVEL_MASK 0x14
+#define IPROC_GPIO_CCA_INT_EVENT 0x18
+#define IPROC_GPIO_CCA_INT_EVENT_MASK 0x1C
+#define IPROC_GPIO_CCA_INT_EDGE 0x24
+
+struct iproc_gpio_chip {
+ struct irq_chip irqchip;
+ struct gpio_chip gc;
+ spinlock_t lock;
+ struct device *dev;
+ void __iomem *base;
+ void __iomem *intr;
+};
+
+static inline struct iproc_gpio_chip *
+to_iproc_gpio(struct gpio_chip *gc)
+{
+ return container_of(gc, struct iproc_gpio_chip, gc);
+}
+
+static void iproc_gpio_irq_ack(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct iproc_gpio_chip *chip = to_iproc_gpio(gc);
+ int pin = d->hwirq;
+ unsigned long flags;
+ u32 irq = d->irq;
+ u32 irq_type, event_status = 0;
+
+ spin_lock_irqsave(&chip->lock, flags);
+ irq_type = irq_get_trigger_type(irq);
+ if (irq_type & IRQ_TYPE_EDGE_BOTH) {
+ event_status |= BIT(pin);
+ writel_relaxed(event_status,
+ chip->base + IPROC_GPIO_CCA_INT_EVENT);
+ }
+ spin_unlock_irqrestore(&chip->lock, flags);
+}
+
+static void iproc_gpio_irq_unmask(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct iproc_gpio_chip *chip = to_iproc_gpio(gc);
+ int pin = d->hwirq;
+ unsigned long flags;
+ u32 irq = d->irq;
+ u32 int_mask, irq_type, event_mask;
+
+ spin_lock_irqsave(&chip->lock, flags);
+ irq_type = irq_get_trigger_type(irq);
+ event_mask = readl_relaxed(chip->base + IPROC_GPIO_CCA_INT_EVENT_MASK);
+ int_mask = readl_relaxed(chip->base + IPROC_GPIO_CCA_INT_LEVEL_MASK);
+
+ if (irq_type & IRQ_TYPE_EDGE_BOTH) {
+ event_mask |= 1 << pin;
+ writel_relaxed(event_mask,
+ chip->base + IPROC_GPIO_CCA_INT_EVENT_MASK);
+ } else {
+ int_mask |= 1 << pin;
+ writel_relaxed(int_mask,
+ chip->base + IPROC_GPIO_CCA_INT_LEVEL_MASK);
+ }
+ spin_unlock_irqrestore(&chip->lock, flags);
+}
+
+static void iproc_gpio_irq_mask(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct iproc_gpio_chip *chip = to_iproc_gpio(gc);
+ int pin = d->hwirq;
+ unsigned long flags;
+ u32 irq = d->irq;
+ u32 irq_type, int_mask, event_mask;
+
+ spin_lock_irqsave(&chip->lock, flags);
+ irq_type = irq_get_trigger_type(irq);
+ event_mask = readl_relaxed(chip->base + IPROC_GPIO_CCA_INT_EVENT_MASK);
+ int_mask = readl_relaxed(chip->base + IPROC_GPIO_CCA_INT_LEVEL_MASK);
+
+ if (irq_type & IRQ_TYPE_EDGE_BOTH) {
+ event_mask &= ~BIT(pin);
+ writel_relaxed(event_mask,
+ chip->base + IPROC_GPIO_CCA_INT_EVENT_MASK);
+ } else {
+ int_mask &= ~BIT(pin);
+ writel_relaxed(int_mask,
+ chip->base + IPROC_GPIO_CCA_INT_LEVEL_MASK);
+ }
+ spin_unlock_irqrestore(&chip->lock, flags);
+}
+
+static int iproc_gpio_irq_set_type(struct irq_data *d, u32 type)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct iproc_gpio_chip *chip = to_iproc_gpio(gc);
+ int pin = d->hwirq;
+ unsigned long flags;
+ u32 irq = d->irq;
+ u32 event_pol, int_pol;
+ int ret = 0;
+
+ spin_lock_irqsave(&chip->lock, flags);
+ switch (type & IRQ_TYPE_SENSE_MASK) {
+ case IRQ_TYPE_EDGE_RISING:
+ event_pol = readl_relaxed(chip->base + IPROC_GPIO_CCA_INT_EDGE);
+ event_pol &= ~BIT(pin);
+ writel_relaxed(event_pol, chip->base + IPROC_GPIO_CCA_INT_EDGE);
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ event_pol = readl_relaxed(chip->base + IPROC_GPIO_CCA_INT_EDGE);
+ event_pol |= BIT(pin);
+ writel_relaxed(event_pol, chip->base + IPROC_GPIO_CCA_INT_EDGE);
+ break;
+ case IRQ_TYPE_LEVEL_HIGH:
+ int_pol = readl_relaxed(chip->base + IPROC_GPIO_CCA_INT_LEVEL);
+ int_pol &= ~BIT(pin);
+ writel_relaxed(int_pol, chip->base + IPROC_GPIO_CCA_INT_LEVEL);
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ int_pol = readl_relaxed(chip->base + IPROC_GPIO_CCA_INT_LEVEL);
+ int_pol |= BIT(pin);
+ writel_relaxed(int_pol, chip->base + IPROC_GPIO_CCA_INT_LEVEL);
+ break;
+ default:
+ /* should not come here */
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ if (type & IRQ_TYPE_LEVEL_MASK)
+ irq_set_handler_locked(irq_get_irq_data(irq), handle_level_irq);
+ else if (type & IRQ_TYPE_EDGE_BOTH)
+ irq_set_handler_locked(irq_get_irq_data(irq), handle_edge_irq);
+
+out_unlock:
+ spin_unlock_irqrestore(&chip->lock, flags);
+
+ return ret;
+}
+
+static irqreturn_t iproc_gpio_irq_handler(int irq, void *data)
+{
+ struct gpio_chip *gc = (struct gpio_chip *)data;
+ struct iproc_gpio_chip *chip = to_iproc_gpio(gc);
+ int bit;
+ unsigned long int_bits = 0;
+ u32 int_status;
+
+ /* go through the entire GPIOs and handle all interrupts */
+ int_status = readl_relaxed(chip->intr + IPROC_CCA_INT_STS);
+ if (int_status & IPROC_CCA_INT_F_GPIOINT) {
+ u32 event, level;
+
+ /* Get level and edge interrupts */
+ event =
+ readl_relaxed(chip->base + IPROC_GPIO_CCA_INT_EVENT_MASK);
+ event &= readl_relaxed(chip->base + IPROC_GPIO_CCA_INT_EVENT);
+ level = readl_relaxed(chip->base + IPROC_GPIO_CCA_DIN);
+ level ^= readl_relaxed(chip->base + IPROC_GPIO_CCA_INT_LEVEL);
+ level &=
+ readl_relaxed(chip->base + IPROC_GPIO_CCA_INT_LEVEL_MASK);
+ int_bits = level | event;
+
+ for_each_set_bit(bit, &int_bits, gc->ngpio)
+ generic_handle_irq(irq_linear_revmap(gc->irq.domain, bit));
+ }
+
+ return int_bits ? IRQ_HANDLED : IRQ_NONE;
+}
+
+static int iproc_gpio_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *dn = pdev->dev.of_node;
+ struct iproc_gpio_chip *chip;
+ u32 num_gpios;
+ int irq, ret;
+
+ chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ chip->dev = dev;
+ platform_set_drvdata(pdev, chip);
+ spin_lock_init(&chip->lock);
+
+ chip->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(chip->base))
+ return PTR_ERR(chip->base);
+
+ ret = bgpio_init(&chip->gc, dev, 4,
+ chip->base + IPROC_GPIO_CCA_DIN,
+ chip->base + IPROC_GPIO_CCA_DOUT,
+ NULL,
+ chip->base + IPROC_GPIO_CCA_OUT_EN,
+ NULL,
+ 0);
+ if (ret) {
+ dev_err(dev, "unable to init GPIO chip\n");
+ return ret;
+ }
+
+ chip->gc.label = dev_name(dev);
+ if (of_property_read_u32(dn, "ngpios", &num_gpios))
+ chip->gc.ngpio = num_gpios;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq > 0) {
+ struct gpio_irq_chip *girq;
+ struct irq_chip *irqc;
+ u32 val;
+
+ irqc = &chip->irqchip;
+ irqc->name = dev_name(dev);
+ irqc->irq_ack = iproc_gpio_irq_ack;
+ irqc->irq_mask = iproc_gpio_irq_mask;
+ irqc->irq_unmask = iproc_gpio_irq_unmask;
+ irqc->irq_set_type = iproc_gpio_irq_set_type;
+
+ chip->intr = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(chip->intr))
+ return PTR_ERR(chip->intr);
+
+ /* Enable GPIO interrupts for CCA GPIO */
+ val = readl_relaxed(chip->intr + IPROC_CCA_INT_MASK);
+ val |= IPROC_CCA_INT_F_GPIOINT;
+ writel_relaxed(val, chip->intr + IPROC_CCA_INT_MASK);
+
+ /*
+ * Directly request the irq here instead of passing
+ * a flow-handler to gpiochip_set_chained_irqchip,
+ * because the irq is shared.
+ */
+ ret = devm_request_irq(dev, irq, iproc_gpio_irq_handler,
+ IRQF_SHARED, chip->gc.label, &chip->gc);
+ if (ret) {
+ dev_err(dev, "Fail to request IRQ%d: %d\n", irq, ret);
+ return ret;
+ }
+
+ girq = &chip->gc.irq;
+ girq->chip = irqc;
+ /* This will let us handle the parent IRQ in the driver */
+ girq->parent_handler = NULL;
+ girq->num_parents = 0;
+ girq->parents = NULL;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_simple_irq;
+ }
+
+ ret = devm_gpiochip_add_data(dev, &chip->gc, chip);
+ if (ret) {
+ dev_err(dev, "unable to add GPIO chip\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int __exit iproc_gpio_remove(struct platform_device *pdev)
+{
+ struct iproc_gpio_chip *chip;
+
+ chip = platform_get_drvdata(pdev);
+ if (!chip)
+ return -ENODEV;
+
+ if (chip->intr) {
+ u32 val;
+
+ val = readl_relaxed(chip->intr + IPROC_CCA_INT_MASK);
+ val &= ~IPROC_CCA_INT_F_GPIOINT;
+ writel_relaxed(val, chip->intr + IPROC_CCA_INT_MASK);
+ }
+
+ return 0;
+}
+
+static const struct of_device_id bcm_iproc_gpio_of_match[] = {
+ { .compatible = "brcm,iproc-gpio-cca" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, bcm_iproc_gpio_of_match);
+
+static struct platform_driver bcm_iproc_gpio_driver = {
+ .driver = {
+ .name = "iproc-xgs-gpio",
+ .of_match_table = bcm_iproc_gpio_of_match,
+ },
+ .probe = iproc_gpio_probe,
+ .remove = iproc_gpio_remove,
+};
+
+module_platform_driver(bcm_iproc_gpio_driver);
+
+MODULE_DESCRIPTION("XGS IPROC GPIO driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/gpio-xra1403.c b/drivers/gpio/gpio-xra1403.c
index 05f1998c11a4..31b5072b2df0 100644
--- a/drivers/gpio/gpio-xra1403.c
+++ b/drivers/gpio/gpio-xra1403.c
@@ -83,7 +83,10 @@ static int xra1403_get_direction(struct gpio_chip *chip, unsigned int offset)
if (ret)
return ret;
- return !!(val & BIT(offset % 8));
+ if (val & BIT(offset % 8))
+ return GPIO_LINE_DIRECTION_IN;
+
+ return GPIO_LINE_DIRECTION_OUT;
}
static int xra1403_get(struct gpio_chip *chip, unsigned int offset)
diff --git a/drivers/gpio/gpio-xtensa.c b/drivers/gpio/gpio-xtensa.c
index 43d3fa5f511a..08d7c3b32038 100644
--- a/drivers/gpio/gpio-xtensa.c
+++ b/drivers/gpio/gpio-xtensa.c
@@ -72,7 +72,7 @@ static inline void disable_cp(unsigned long flags, unsigned long cpenable)
static int xtensa_impwire_get_direction(struct gpio_chip *gc, unsigned offset)
{
- return 1; /* input only */
+ return GPIO_LINE_DIRECTION_IN; /* input only */
}
static int xtensa_impwire_get_value(struct gpio_chip *gc, unsigned offset)
@@ -95,7 +95,7 @@ static void xtensa_impwire_set_value(struct gpio_chip *gc, unsigned offset,
static int xtensa_expstate_get_direction(struct gpio_chip *gc, unsigned offset)
{
- return 0; /* output only */
+ return GPIO_LINE_DIRECTION_OUT; /* output only */
}
static int xtensa_expstate_get_value(struct gpio_chip *gc, unsigned offset)
diff --git a/drivers/gpio/gpio-zynq.c b/drivers/gpio/gpio-zynq.c
index cd475ff4bcad..4c3f6370eab4 100644
--- a/drivers/gpio/gpio-zynq.c
+++ b/drivers/gpio/gpio-zynq.c
@@ -360,7 +360,7 @@ static int zynq_gpio_dir_out(struct gpio_chip *chip, unsigned int pin,
*
* This function returns the direction of the specified GPIO.
*
- * Return: 0 for output, 1 for input
+ * Return: GPIO_LINE_DIRECTION_OUT or GPIO_LINE_DIRECTION_IN
*/
static int zynq_gpio_get_direction(struct gpio_chip *chip, unsigned int pin)
{
@@ -372,7 +372,10 @@ static int zynq_gpio_get_direction(struct gpio_chip *chip, unsigned int pin)
reg = readl_relaxed(gpio->base_addr + ZYNQ_GPIO_DIRM_OFFSET(bank_num));
- return !(reg & BIT(bank_pin_num));
+ if (reg & BIT(bank_pin_num))
+ return GPIO_LINE_DIRECTION_OUT;
+
+ return GPIO_LINE_DIRECTION_IN;
}
/**
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index 59ccfd24627d..d30e57dc755c 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -194,6 +194,7 @@ static void acpi_gpiochip_request_irqs(struct acpi_gpio_chip *acpi_gpio)
acpi_gpiochip_request_irq(acpi_gpio, event);
}
+/* Always returns AE_OK so that we keep looping over the resources */
static acpi_status acpi_gpiochip_alloc_event(struct acpi_resource *ares,
void *context)
{
@@ -230,19 +231,25 @@ static acpi_status acpi_gpiochip_alloc_event(struct acpi_resource *ares,
desc = gpiochip_request_own_desc(chip, pin, "ACPI:Event",
GPIO_ACTIVE_HIGH, GPIOD_IN);
if (IS_ERR(desc)) {
- dev_err(chip->parent, "Failed to request GPIO\n");
- return AE_ERROR;
+ dev_err(chip->parent,
+ "Failed to request GPIO for pin 0x%04X, err %ld\n",
+ pin, PTR_ERR(desc));
+ return AE_OK;
}
ret = gpiochip_lock_as_irq(chip, pin);
if (ret) {
- dev_err(chip->parent, "Failed to lock GPIO as interrupt\n");
+ dev_err(chip->parent,
+ "Failed to lock GPIO pin 0x%04X as interrupt, err %d\n",
+ pin, ret);
goto fail_free_desc;
}
irq = gpiod_to_irq(desc);
if (irq < 0) {
- dev_err(chip->parent, "Failed to translate GPIO to IRQ\n");
+ dev_err(chip->parent,
+ "Failed to translate GPIO pin 0x%04X to IRQ, err %d\n",
+ pin, irq);
goto fail_unlock_irq;
}
@@ -287,7 +294,7 @@ fail_unlock_irq:
fail_free_desc:
gpiochip_free_own_desc(desc);
- return AE_ERROR;
+ return AE_OK;
}
/**
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index 80ea49f570f4..dc27b1a88e93 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -27,7 +27,7 @@
* This is used by external users of of_gpio_count() from <linux/of_gpio.h>
*
* FIXME: get rid of those external users by converting them to GPIO
- * descriptors and let them all use gpiod_get_count()
+ * descriptors and let them all use gpiod_count()
*/
int of_gpio_get_count(struct device *dev, const char *con_id)
{
@@ -84,8 +84,9 @@ static struct gpio_desc *of_xlate_and_get_gpiod_flags(struct gpio_chip *chip,
/**
* of_gpio_need_valid_mask() - figure out if the OF GPIO driver needs
* to set the .valid_mask
- * @dev: the device for the GPIO provider
- * @return: true if the valid mask needs to be set
+ * @gc: the target gpio_chip
+ *
+ * Return: true if the valid mask needs to be set
*/
bool of_gpio_need_valid_mask(const struct gpio_chip *gc)
{
@@ -134,18 +135,20 @@ static void of_gpio_flags_quirks(struct device_node *np,
(!(strcmp(propname, "enable-gpio") &&
strcmp(propname, "enable-gpios")) &&
of_device_is_compatible(np, "regulator-gpio")))) {
+ bool active_low = !of_property_read_bool(np,
+ "enable-active-high");
/*
* The regulator GPIO handles are specified such that the
* presence or absence of "enable-active-high" solely controls
* the polarity of the GPIO line. Any phandle flags must
* be actively ignored.
*/
- if (*flags & OF_GPIO_ACTIVE_LOW) {
+ if ((*flags & OF_GPIO_ACTIVE_LOW) && !active_low) {
pr_warn("%s GPIO handle specifies active low - ignored\n",
of_node_full_name(np));
*flags &= ~OF_GPIO_ACTIVE_LOW;
}
- if (!of_property_read_bool(np, "enable-active-high"))
+ if (active_low)
*flags |= OF_GPIO_ACTIVE_LOW;
}
/*
@@ -882,16 +885,13 @@ int of_gpiochip_add(struct gpio_chip *chip)
of_node_get(chip->of_node);
ret = of_gpiochip_scan_gpios(chip);
- if (ret) {
+ if (ret)
of_node_put(chip->of_node);
- gpiochip_remove_pin_ranges(chip);
- }
return ret;
}
void of_gpiochip_remove(struct gpio_chip *chip)
{
- gpiochip_remove_pin_ranges(chip);
of_node_put(chip->of_node);
}
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index fb33ff6fc1a9..9913886ede90 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -390,6 +390,14 @@ static void gpiochip_free_valid_mask(struct gpio_chip *gpiochip)
gpiochip->valid_mask = NULL;
}
+static int gpiochip_add_pin_ranges(struct gpio_chip *gc)
+{
+ if (gc->add_pin_ranges)
+ return gc->add_pin_ranges(gc);
+
+ return 0;
+}
+
bool gpiochip_line_is_valid(const struct gpio_chip *gpiochip,
unsigned int offset)
{
@@ -422,9 +430,127 @@ struct linehandle_state {
(GPIOHANDLE_REQUEST_INPUT | \
GPIOHANDLE_REQUEST_OUTPUT | \
GPIOHANDLE_REQUEST_ACTIVE_LOW | \
+ GPIOHANDLE_REQUEST_BIAS_PULL_UP | \
+ GPIOHANDLE_REQUEST_BIAS_PULL_DOWN | \
+ GPIOHANDLE_REQUEST_BIAS_DISABLE | \
GPIOHANDLE_REQUEST_OPEN_DRAIN | \
GPIOHANDLE_REQUEST_OPEN_SOURCE)
+static int linehandle_validate_flags(u32 flags)
+{
+ /* Return an error if an unknown flag is set */
+ if (flags & ~GPIOHANDLE_REQUEST_VALID_FLAGS)
+ return -EINVAL;
+
+ /*
+ * Do not allow both INPUT & OUTPUT flags to be set as they are
+ * contradictory.
+ */
+ if ((flags & GPIOHANDLE_REQUEST_INPUT) &&
+ (flags & GPIOHANDLE_REQUEST_OUTPUT))
+ return -EINVAL;
+
+ /*
+ * Do not allow OPEN_SOURCE & OPEN_DRAIN flags in a single request. If
+ * the hardware actually supports enabling both at the same time the
+ * electrical result would be disastrous.
+ */
+ if ((flags & GPIOHANDLE_REQUEST_OPEN_DRAIN) &&
+ (flags & GPIOHANDLE_REQUEST_OPEN_SOURCE))
+ return -EINVAL;
+
+ /* OPEN_DRAIN and OPEN_SOURCE flags only make sense for output mode. */
+ if (!(flags & GPIOHANDLE_REQUEST_OUTPUT) &&
+ ((flags & GPIOHANDLE_REQUEST_OPEN_DRAIN) ||
+ (flags & GPIOHANDLE_REQUEST_OPEN_SOURCE)))
+ return -EINVAL;
+
+ /* Bias flags only allowed for input or output mode. */
+ if (!((flags & GPIOHANDLE_REQUEST_INPUT) ||
+ (flags & GPIOHANDLE_REQUEST_OUTPUT)) &&
+ ((flags & GPIOHANDLE_REQUEST_BIAS_DISABLE) ||
+ (flags & GPIOHANDLE_REQUEST_BIAS_PULL_UP) ||
+ (flags & GPIOHANDLE_REQUEST_BIAS_PULL_DOWN)))
+ return -EINVAL;
+
+ /* Only one bias flag can be set. */
+ if (((flags & GPIOHANDLE_REQUEST_BIAS_DISABLE) &&
+ (flags & (GPIOHANDLE_REQUEST_BIAS_PULL_DOWN |
+ GPIOHANDLE_REQUEST_BIAS_PULL_UP))) ||
+ ((flags & GPIOHANDLE_REQUEST_BIAS_PULL_DOWN) &&
+ (flags & GPIOHANDLE_REQUEST_BIAS_PULL_UP)))
+ return -EINVAL;
+
+ return 0;
+}
+
+static void linehandle_configure_flag(unsigned long *flagsp,
+ u32 bit, bool active)
+{
+ if (active)
+ set_bit(bit, flagsp);
+ else
+ clear_bit(bit, flagsp);
+}
+
+static long linehandle_set_config(struct linehandle_state *lh,
+ void __user *ip)
+{
+ struct gpiohandle_config gcnf;
+ struct gpio_desc *desc;
+ int i, ret;
+ u32 lflags;
+ unsigned long *flagsp;
+
+ if (copy_from_user(&gcnf, ip, sizeof(gcnf)))
+ return -EFAULT;
+
+ lflags = gcnf.flags;
+ ret = linehandle_validate_flags(lflags);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < lh->numdescs; i++) {
+ desc = lh->descs[i];
+ flagsp = &desc->flags;
+
+ linehandle_configure_flag(flagsp, FLAG_ACTIVE_LOW,
+ lflags & GPIOHANDLE_REQUEST_ACTIVE_LOW);
+
+ linehandle_configure_flag(flagsp, FLAG_OPEN_DRAIN,
+ lflags & GPIOHANDLE_REQUEST_OPEN_DRAIN);
+
+ linehandle_configure_flag(flagsp, FLAG_OPEN_SOURCE,
+ lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE);
+
+ linehandle_configure_flag(flagsp, FLAG_PULL_UP,
+ lflags & GPIOHANDLE_REQUEST_BIAS_PULL_UP);
+
+ linehandle_configure_flag(flagsp, FLAG_PULL_DOWN,
+ lflags & GPIOHANDLE_REQUEST_BIAS_PULL_DOWN);
+
+ linehandle_configure_flag(flagsp, FLAG_BIAS_DISABLE,
+ lflags & GPIOHANDLE_REQUEST_BIAS_DISABLE);
+
+ /*
+ * Lines have to be requested explicitly for input
+ * or output, else the line will be treated "as is".
+ */
+ if (lflags & GPIOHANDLE_REQUEST_OUTPUT) {
+ int val = !!gcnf.default_values[i];
+
+ ret = gpiod_direction_output(desc, val);
+ if (ret)
+ return ret;
+ } else if (lflags & GPIOHANDLE_REQUEST_INPUT) {
+ ret = gpiod_direction_input(desc);
+ if (ret)
+ return ret;
+ }
+ }
+ return 0;
+}
+
static long linehandle_ioctl(struct file *filep, unsigned int cmd,
unsigned long arg)
{
@@ -475,6 +601,8 @@ static long linehandle_ioctl(struct file *filep, unsigned int cmd,
lh->descs,
NULL,
vals);
+ } else if (cmd == GPIOHANDLE_SET_CONFIG_IOCTL) {
+ return linehandle_set_config(lh, ip);
}
return -EINVAL;
}
@@ -526,32 +654,9 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
lflags = handlereq.flags;
- /* Return an error if an unknown flag is set */
- if (lflags & ~GPIOHANDLE_REQUEST_VALID_FLAGS)
- return -EINVAL;
-
- /*
- * Do not allow both INPUT & OUTPUT flags to be set as they are
- * contradictory.
- */
- if ((lflags & GPIOHANDLE_REQUEST_INPUT) &&
- (lflags & GPIOHANDLE_REQUEST_OUTPUT))
- return -EINVAL;
-
- /*
- * Do not allow OPEN_SOURCE & OPEN_DRAIN flags in a single request. If
- * the hardware actually supports enabling both at the same time the
- * electrical result would be disastrous.
- */
- if ((lflags & GPIOHANDLE_REQUEST_OPEN_DRAIN) &&
- (lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE))
- return -EINVAL;
-
- /* OPEN_DRAIN and OPEN_SOURCE flags only make sense for output mode. */
- if (!(lflags & GPIOHANDLE_REQUEST_OUTPUT) &&
- ((lflags & GPIOHANDLE_REQUEST_OPEN_DRAIN) ||
- (lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE)))
- return -EINVAL;
+ ret = linehandle_validate_flags(lflags);
+ if (ret)
+ return ret;
lh = kzalloc(sizeof(*lh), GFP_KERNEL);
if (!lh)
@@ -593,6 +698,12 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
set_bit(FLAG_OPEN_DRAIN, &desc->flags);
if (lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE)
set_bit(FLAG_OPEN_SOURCE, &desc->flags);
+ if (lflags & GPIOHANDLE_REQUEST_BIAS_DISABLE)
+ set_bit(FLAG_BIAS_DISABLE, &desc->flags);
+ if (lflags & GPIOHANDLE_REQUEST_BIAS_PULL_DOWN)
+ set_bit(FLAG_PULL_DOWN, &desc->flags);
+ if (lflags & GPIOHANDLE_REQUEST_BIAS_PULL_UP)
+ set_bit(FLAG_PULL_UP, &desc->flags);
ret = gpiod_set_transitory(desc, false);
if (ret < 0)
@@ -895,6 +1006,32 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
if (copy_from_user(&eventreq, ip, sizeof(eventreq)))
return -EFAULT;
+ offset = eventreq.lineoffset;
+ lflags = eventreq.handleflags;
+ eflags = eventreq.eventflags;
+
+ if (offset >= gdev->ngpio)
+ return -EINVAL;
+
+ /* Return an error if a unknown flag is set */
+ if ((lflags & ~GPIOHANDLE_REQUEST_VALID_FLAGS) ||
+ (eflags & ~GPIOEVENT_REQUEST_VALID_FLAGS))
+ return -EINVAL;
+
+ /* This is just wrong: we don't look for events on output lines */
+ if ((lflags & GPIOHANDLE_REQUEST_OUTPUT) ||
+ (lflags & GPIOHANDLE_REQUEST_OPEN_DRAIN) ||
+ (lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE))
+ return -EINVAL;
+
+ /* Only one bias flag can be set. */
+ if (((lflags & GPIOHANDLE_REQUEST_BIAS_DISABLE) &&
+ (lflags & (GPIOHANDLE_REQUEST_BIAS_PULL_DOWN |
+ GPIOHANDLE_REQUEST_BIAS_PULL_UP))) ||
+ ((lflags & GPIOHANDLE_REQUEST_BIAS_PULL_DOWN) &&
+ (lflags & GPIOHANDLE_REQUEST_BIAS_PULL_UP)))
+ return -EINVAL;
+
le = kzalloc(sizeof(*le), GFP_KERNEL);
if (!le)
return -ENOMEM;
@@ -912,30 +1049,6 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
}
}
- offset = eventreq.lineoffset;
- lflags = eventreq.handleflags;
- eflags = eventreq.eventflags;
-
- if (offset >= gdev->ngpio) {
- ret = -EINVAL;
- goto out_free_label;
- }
-
- /* Return an error if a unknown flag is set */
- if ((lflags & ~GPIOHANDLE_REQUEST_VALID_FLAGS) ||
- (eflags & ~GPIOEVENT_REQUEST_VALID_FLAGS)) {
- ret = -EINVAL;
- goto out_free_label;
- }
-
- /* This is just wrong: we don't look for events on output lines */
- if ((lflags & GPIOHANDLE_REQUEST_OUTPUT) ||
- (lflags & GPIOHANDLE_REQUEST_OPEN_DRAIN) ||
- (lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE)) {
- ret = -EINVAL;
- goto out_free_label;
- }
-
desc = &gdev->descs[offset];
ret = gpiod_request(desc, le->label);
if (ret)
@@ -945,6 +1058,12 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
if (lflags & GPIOHANDLE_REQUEST_ACTIVE_LOW)
set_bit(FLAG_ACTIVE_LOW, &desc->flags);
+ if (lflags & GPIOHANDLE_REQUEST_BIAS_DISABLE)
+ set_bit(FLAG_BIAS_DISABLE, &desc->flags);
+ if (lflags & GPIOHANDLE_REQUEST_BIAS_PULL_DOWN)
+ set_bit(FLAG_PULL_DOWN, &desc->flags);
+ if (lflags & GPIOHANDLE_REQUEST_BIAS_PULL_UP)
+ set_bit(FLAG_PULL_UP, &desc->flags);
ret = gpiod_direction_input(desc);
if (ret)
@@ -1098,6 +1217,12 @@ static long gpio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
if (test_bit(FLAG_OPEN_SOURCE, &desc->flags))
lineinfo.flags |= (GPIOLINE_FLAG_OPEN_SOURCE |
GPIOLINE_FLAG_IS_OUT);
+ if (test_bit(FLAG_BIAS_DISABLE, &desc->flags))
+ lineinfo.flags |= GPIOLINE_FLAG_BIAS_DISABLE;
+ if (test_bit(FLAG_PULL_DOWN, &desc->flags))
+ lineinfo.flags |= GPIOLINE_FLAG_BIAS_PULL_DOWN;
+ if (test_bit(FLAG_PULL_UP, &desc->flags))
+ lineinfo.flags |= GPIOLINE_FLAG_BIAS_PULL_UP;
if (copy_to_user(ip, &lineinfo, sizeof(lineinfo)))
return -EFAULT;
@@ -1403,15 +1528,19 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
}
}
+ ret = gpiochip_add_pin_ranges(chip);
+ if (ret)
+ goto err_remove_of_chip;
+
acpi_gpiochip_add(chip);
machine_gpiochip_add(chip);
- ret = gpiochip_irqchip_init_hw(chip);
+ ret = gpiochip_irqchip_init_valid_mask(chip);
if (ret)
goto err_remove_acpi_chip;
- ret = gpiochip_irqchip_init_valid_mask(chip);
+ ret = gpiochip_irqchip_init_hw(chip);
if (ret)
goto err_remove_acpi_chip;
@@ -1444,6 +1573,7 @@ err_remove_of_chip:
gpiochip_free_hogs(chip);
of_gpiochip_remove(chip);
err_free_gpiochip_mask:
+ gpiochip_remove_pin_ranges(chip);
gpiochip_free_valid_mask(chip);
err_remove_from_list:
spin_lock_irqsave(&gpio_lock, flags);
@@ -1499,8 +1629,8 @@ void gpiochip_remove(struct gpio_chip *chip)
gdev->chip = NULL;
gpiochip_irqchip_remove(chip);
acpi_gpiochip_remove(chip);
- gpiochip_remove_pin_ranges(chip);
of_gpiochip_remove(chip);
+ gpiochip_remove_pin_ranges(chip);
gpiochip_free_valid_mask(chip);
/*
* We accept no more calls into the driver from this point, so
@@ -1539,7 +1669,7 @@ static void devm_gpio_chip_release(struct device *dev, void *res)
}
/**
- * devm_gpiochip_add_data() - Resource manager gpiochip_add_data()
+ * devm_gpiochip_add_data() - Resource managed gpiochip_add_data()
* @dev: pointer to the device that gpio_chip belongs to.
* @chip: the chip to register, with chip->base initialized
* @data: driver-private data associated with this chip
@@ -2790,6 +2920,9 @@ static bool gpiod_free_commit(struct gpio_desc *desc)
clear_bit(FLAG_REQUESTED, &desc->flags);
clear_bit(FLAG_OPEN_DRAIN, &desc->flags);
clear_bit(FLAG_OPEN_SOURCE, &desc->flags);
+ clear_bit(FLAG_PULL_UP, &desc->flags);
+ clear_bit(FLAG_PULL_DOWN, &desc->flags);
+ clear_bit(FLAG_BIAS_DISABLE, &desc->flags);
clear_bit(FLAG_IS_HOGGED, &desc->flags);
ret = true;
}
@@ -2916,6 +3049,7 @@ static int gpio_set_config(struct gpio_chip *gc, unsigned offset,
unsigned arg;
switch (mode) {
+ case PIN_CONFIG_BIAS_DISABLE:
case PIN_CONFIG_BIAS_PULL_DOWN:
case PIN_CONFIG_BIAS_PULL_UP:
arg = 1;
@@ -2929,6 +3063,26 @@ static int gpio_set_config(struct gpio_chip *gc, unsigned offset,
return gc->set_config ? gc->set_config(gc, offset, config) : -ENOTSUPP;
}
+static int gpio_set_bias(struct gpio_chip *chip, struct gpio_desc *desc)
+{
+ int bias = 0;
+ int ret = 0;
+
+ if (test_bit(FLAG_BIAS_DISABLE, &desc->flags))
+ bias = PIN_CONFIG_BIAS_DISABLE;
+ else if (test_bit(FLAG_PULL_UP, &desc->flags))
+ bias = PIN_CONFIG_BIAS_PULL_UP;
+ else if (test_bit(FLAG_PULL_DOWN, &desc->flags))
+ bias = PIN_CONFIG_BIAS_PULL_DOWN;
+
+ if (bias) {
+ ret = gpio_set_config(chip, gpio_chip_hwgpio(desc), bias);
+ if (ret != -ENOTSUPP)
+ return ret;
+ }
+ return 0;
+}
+
/**
* gpiod_direction_input - set the GPIO direction to input
* @desc: GPIO to set to input
@@ -2973,15 +3127,10 @@ int gpiod_direction_input(struct gpio_desc *desc)
__func__);
return -EIO;
}
- if (ret == 0)
+ if (ret == 0) {
clear_bit(FLAG_IS_OUT, &desc->flags);
-
- if (test_bit(FLAG_PULL_UP, &desc->flags))
- gpio_set_config(chip, gpio_chip_hwgpio(desc),
- PIN_CONFIG_BIAS_PULL_UP);
- else if (test_bit(FLAG_PULL_DOWN, &desc->flags))
- gpio_set_config(chip, gpio_chip_hwgpio(desc),
- PIN_CONFIG_BIAS_PULL_DOWN);
+ ret = gpio_set_bias(chip, desc);
+ }
trace_gpio_direction(desc_to_gpio(desc), 1, ret);
@@ -3111,6 +3260,9 @@ int gpiod_direction_output(struct gpio_desc *desc, int value)
}
set_output_value:
+ ret = gpio_set_bias(gc, desc);
+ if (ret)
+ return ret;
return gpiod_direction_output_raw_commit(desc, value);
set_output_flag:
@@ -4742,9 +4894,9 @@ int gpiod_hog(struct gpio_desc *desc, const char *name,
pr_info("GPIO line %d (%s) hogged as %s%s\n",
desc_to_gpio(desc), name,
- (dflags&GPIOD_FLAGS_BIT_DIR_OUT) ? "output" : "input",
- (dflags&GPIOD_FLAGS_BIT_DIR_OUT) ?
- (dflags&GPIOD_FLAGS_BIT_DIR_VAL) ? "/high" : "/low":"");
+ (dflags & GPIOD_FLAGS_BIT_DIR_OUT) ? "output" : "input",
+ (dflags & GPIOD_FLAGS_BIT_DIR_OUT) ?
+ (dflags & GPIOD_FLAGS_BIT_DIR_VAL) ? "/high" : "/low" : "");
return 0;
}
diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h
index b8b10a409c7b..ca9bc1e4803c 100644
--- a/drivers/gpio/gpiolib.h
+++ b/drivers/gpio/gpiolib.h
@@ -110,6 +110,7 @@ struct gpio_desc {
#define FLAG_TRANSITORY 12 /* GPIO may lose value in sleep or reset */
#define FLAG_PULL_UP 13 /* GPIO has pull up enabled */
#define FLAG_PULL_DOWN 14 /* GPIO has pull down enabled */
+#define FLAG_BIAS_DISABLE 15 /* GPIO has pull disabled */
/* Connection label */
const char *label;
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index e67c194c2aca..1168351267fd 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -93,6 +93,20 @@ config DRM_KMS_FB_HELPER
help
FBDEV helpers for KMS drivers.
+config DRM_DEBUG_DP_MST_TOPOLOGY_REFS
+ bool "Enable refcount backtrace history in the DP MST helpers"
+ select STACKDEPOT
+ depends on DRM_KMS_HELPER
+ depends on DEBUG_KERNEL
+ depends on EXPERT
+ help
+ Enables debug tracing for topology refs in DRM's DP MST helpers. A
+ history of each topology reference/dereference will be printed to the
+ kernel log once a port or branch device's topology refcount reaches 0.
+
+ This has the potential to use a lot of memory and print some very
+ large kernel messages. If in doubt, say "N".
+
config DRM_FBDEV_EMULATION
bool "Enable legacy fbdev support for your modesetting driver"
depends on DRM
@@ -165,13 +179,26 @@ config DRM_TTM
GPU memory types. Will be enabled automatically if a device driver
uses it.
+config DRM_TTM_DMA_PAGE_POOL
+ bool
+ depends on DRM_TTM && (SWIOTLB || INTEL_IOMMU)
+ default y
+ help
+ Choose this if you need the TTM dma page pool
+
config DRM_VRAM_HELPER
tristate
depends on DRM
- select DRM_TTM
help
Helpers for VRAM memory management
+config DRM_TTM_HELPER
+ tristate
+ depends on DRM
+ select DRM_TTM
+ help
+ Helpers for ttm-based gem objects
+
config DRM_GEM_CMA_HELPER
bool
depends on DRM
@@ -226,9 +253,9 @@ config DRM_AMDGPU
tristate "AMD GPU"
depends on DRM && PCI && MMU
select FW_LOADER
- select DRM_KMS_HELPER
+ select DRM_KMS_HELPER
select DRM_SCHED
- select DRM_TTM
+ select DRM_TTM
select POWER_SUPPLY
select HWMON
select BACKLIGHT_CLASS_DEVICE
@@ -257,6 +284,7 @@ config DRM_VKMS
tristate "Virtual KMS (EXPERIMENTAL)"
depends on DRM
select DRM_KMS_HELPER
+ select CRC32
default n
help
Virtual Kernel Mode-Setting (VKMS) is used for testing or for
@@ -397,7 +425,7 @@ config DRM_R128
config DRM_I810
tristate "Intel I810"
- # !PREEMPT because of missing ioctl locking
+ # !PREEMPTION because of missing ioctl locking
depends on DRM && AGP && AGP_INTEL && (!PREEMPTION || BROKEN)
help
Choose this option if you have an Intel I810 graphics card. If M is
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 82ff826b33cc..9f1c7c486f88 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -33,10 +33,12 @@ drm-$(CONFIG_DEBUG_FS) += drm_debugfs.o drm_debugfs_crc.o
drm-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o
drm_vram_helper-y := drm_gem_vram_helper.o \
- drm_vram_helper_common.o \
- drm_vram_mm_helper.o
+ drm_vram_helper_common.o
obj-$(CONFIG_DRM_VRAM_HELPER) += drm_vram_helper.o
+drm_ttm_helper-y := drm_gem_ttm_helper.o
+obj-$(CONFIG_DRM_TTM_HELPER) += drm_ttm_helper.o
+
drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_dsc.o drm_probe_helper.o \
drm_plane_helper.o drm_dp_mst_topology.o drm_atomic_helper.o \
drm_kms_helper_common.o drm_dp_dual_mode_helper.o \
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 00962a659009..ca0e435559d5 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -53,8 +53,9 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \
amdgpu_gtt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o amdgpu_atomfirmware.o \
amdgpu_vf_error.o amdgpu_sched.o amdgpu_debugfs.o amdgpu_ids.o \
- amdgpu_gmc.o amdgpu_xgmi.o amdgpu_csa.o amdgpu_ras.o amdgpu_vm_cpu.o \
- amdgpu_vm_sdma.o amdgpu_discovery.o amdgpu_ras_eeprom.o smu_v11_0_i2c.o
+ amdgpu_gmc.o amdgpu_mmhub.o amdgpu_xgmi.o amdgpu_csa.o amdgpu_ras.o amdgpu_vm_cpu.o \
+ amdgpu_vm_sdma.o amdgpu_discovery.o amdgpu_ras_eeprom.o amdgpu_nbio.o \
+ amdgpu_umc.o smu_v11_0_i2c.o
amdgpu-$(CONFIG_PERF_EVENTS) += amdgpu_pmu.o
@@ -67,7 +68,7 @@ amdgpu-$(CONFIG_DRM_AMDGPU_SI)+= si.o gmc_v6_0.o gfx_v6_0.o si_ih.o si_dma.o dce
amdgpu-y += \
vi.o mxgpu_vi.o nbio_v6_1.o soc15.o emu_soc.o mxgpu_ai.o nbio_v7_0.o vega10_reg_init.o \
vega20_reg_init.o nbio_v7_4.o nbio_v2_3.o nv.o navi10_reg_init.o navi14_reg_init.o \
- arct_reg_init.o navi12_reg_init.o
+ arct_reg_init.o navi12_reg_init.o mxgpu_nv.o
# add DF block
amdgpu-y += \
@@ -83,7 +84,7 @@ amdgpu-y += \
# add UMC block
amdgpu-y += \
- umc_v6_1.o
+ umc_v6_1.o umc_v6_0.o
# add IH block
amdgpu-y += \
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index bd37df5dd6d0..0c229a92a24b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -73,6 +73,7 @@
#include "amdgpu_gmc.h"
#include "amdgpu_gfx.h"
#include "amdgpu_sdma.h"
+#include "amdgpu_nbio.h"
#include "amdgpu_dm.h"
#include "amdgpu_virt.h"
#include "amdgpu_csa.h"
@@ -106,6 +107,8 @@ struct amdgpu_mgpu_info
uint32_t num_apu;
};
+#define AMDGPU_MAX_TIMEOUT_PARAM_LENGTH 256
+
/*
* Modules parameters.
*/
@@ -122,6 +125,7 @@ extern int amdgpu_disp_priority;
extern int amdgpu_hw_i2c;
extern int amdgpu_pcie_gen2;
extern int amdgpu_msi;
+extern char amdgpu_lockup_timeout[AMDGPU_MAX_TIMEOUT_PARAM_LENGTH];
extern int amdgpu_dpm;
extern int amdgpu_fw_load_type;
extern int amdgpu_aspm;
@@ -135,6 +139,7 @@ extern int amdgpu_vm_fragment_size;
extern int amdgpu_vm_fault_stop;
extern int amdgpu_vm_debug;
extern int amdgpu_vm_update_mode;
+extern int amdgpu_exp_hw_support;
extern int amdgpu_dc;
extern int amdgpu_sched_jobs;
extern int amdgpu_sched_hw_submission;
@@ -146,11 +151,7 @@ extern uint amdgpu_sdma_phase_quantum;
extern char *amdgpu_disable_cu;
extern char *amdgpu_virtual_display;
extern uint amdgpu_pp_feature_mask;
-extern int amdgpu_ngg;
-extern int amdgpu_prim_buf_per_se;
-extern int amdgpu_pos_buf_per_se;
-extern int amdgpu_cntl_sb_buf_per_se;
-extern int amdgpu_param_buf_per_se;
+extern uint amdgpu_force_long_training;
extern int amdgpu_job_hang_limit;
extern int amdgpu_lbpw;
extern int amdgpu_compute_multipipe;
@@ -167,6 +168,12 @@ extern int amdgpu_mcbp;
extern int amdgpu_discovery;
extern int amdgpu_mes;
extern int amdgpu_noretry;
+extern int amdgpu_force_asic_type;
+#ifdef CONFIG_HSA_AMD
+extern int sched_policy;
+#else
+static const int sched_policy = KFD_SCHED_POLICY_HWS;
+#endif
#ifdef CONFIG_DRM_AMDGPU_SI
extern int amdgpu_si_support;
@@ -283,6 +290,9 @@ struct amdgpu_ip_block_version {
const struct amd_ip_funcs *funcs;
};
+#define HW_REV(_Major, _Minor, _Rev) \
+ ((((uint32_t) (_Major)) << 16) | ((uint32_t) (_Minor) << 8) | ((uint32_t) (_Rev)))
+
struct amdgpu_ip_block {
struct amdgpu_ip_block_status status;
const struct amdgpu_ip_block_version *version;
@@ -425,7 +435,6 @@ struct amdgpu_fpriv {
};
int amdgpu_file_to_fpriv(struct file *filp, struct amdgpu_fpriv **fpriv);
-int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev);
int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
unsigned size, struct amdgpu_ib *ib);
@@ -477,7 +486,6 @@ struct amdgpu_cs_parser {
uint64_t bytes_moved_vis_threshold;
uint64_t bytes_moved;
uint64_t bytes_moved_vis;
- struct amdgpu_bo_list_entry *evictable;
/* user fence */
struct amdgpu_bo_list_entry uf_entry;
@@ -624,6 +632,11 @@ struct amdgpu_fw_vram_usage {
u64 size;
struct amdgpu_bo *reserved_bo;
void *va;
+
+ /* Offset on the top of VRAM, used as c2p write buffer.
+ */
+ u64 mem_train_fb_loc;
+ bool mem_train_support;
};
/*
@@ -644,71 +657,14 @@ typedef void (*amdgpu_wreg64_t)(struct amdgpu_device*, uint32_t, uint64_t);
typedef uint32_t (*amdgpu_block_rreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, uint32_t);
-
-/*
- * amdgpu nbio functions
- *
- */
-struct nbio_hdp_flush_reg {
- u32 ref_and_mask_cp0;
- u32 ref_and_mask_cp1;
- u32 ref_and_mask_cp2;
- u32 ref_and_mask_cp3;
- u32 ref_and_mask_cp4;
- u32 ref_and_mask_cp5;
- u32 ref_and_mask_cp6;
- u32 ref_and_mask_cp7;
- u32 ref_and_mask_cp8;
- u32 ref_and_mask_cp9;
- u32 ref_and_mask_sdma0;
- u32 ref_and_mask_sdma1;
- u32 ref_and_mask_sdma2;
- u32 ref_and_mask_sdma3;
- u32 ref_and_mask_sdma4;
- u32 ref_and_mask_sdma5;
- u32 ref_and_mask_sdma6;
- u32 ref_and_mask_sdma7;
-};
-
struct amdgpu_mmio_remap {
u32 reg_offset;
resource_size_t bus_addr;
};
-struct amdgpu_nbio_funcs {
- const struct nbio_hdp_flush_reg *hdp_flush_reg;
- u32 (*get_hdp_flush_req_offset)(struct amdgpu_device *adev);
- u32 (*get_hdp_flush_done_offset)(struct amdgpu_device *adev);
- u32 (*get_pcie_index_offset)(struct amdgpu_device *adev);
- u32 (*get_pcie_data_offset)(struct amdgpu_device *adev);
- u32 (*get_rev_id)(struct amdgpu_device *adev);
- void (*mc_access_enable)(struct amdgpu_device *adev, bool enable);
- void (*hdp_flush)(struct amdgpu_device *adev, struct amdgpu_ring *ring);
- u32 (*get_memsize)(struct amdgpu_device *adev);
- void (*sdma_doorbell_range)(struct amdgpu_device *adev, int instance,
- bool use_doorbell, int doorbell_index, int doorbell_size);
- void (*vcn_doorbell_range)(struct amdgpu_device *adev, bool use_doorbell,
- int doorbell_index, int instance);
- void (*enable_doorbell_aperture)(struct amdgpu_device *adev,
- bool enable);
- void (*enable_doorbell_selfring_aperture)(struct amdgpu_device *adev,
- bool enable);
- void (*ih_doorbell_range)(struct amdgpu_device *adev,
- bool use_doorbell, int doorbell_index);
- void (*update_medium_grain_clock_gating)(struct amdgpu_device *adev,
- bool enable);
- void (*update_medium_grain_light_sleep)(struct amdgpu_device *adev,
- bool enable);
- void (*get_clockgating_state)(struct amdgpu_device *adev,
- u32 *flags);
- void (*ih_control)(struct amdgpu_device *adev);
- void (*init_registers)(struct amdgpu_device *adev);
- void (*detect_hw_virt)(struct amdgpu_device *adev);
- void (*remap_hdp_registers)(struct amdgpu_device *adev);
-};
-
struct amdgpu_df_funcs {
void (*sw_init)(struct amdgpu_device *adev);
+ void (*sw_fini)(struct amdgpu_device *adev);
void (*enable_broadcast_mode)(struct amdgpu_device *adev,
bool enable);
u32 (*get_fb_channel_number)(struct amdgpu_device *adev);
@@ -813,6 +769,7 @@ struct amdgpu_device {
uint8_t *bios;
uint32_t bios_size;
struct amdgpu_bo *stolen_vga_memory;
+ struct amdgpu_bo *discovery_memory;
uint32_t bios_scratch_reg_offset;
uint32_t bios_scratch[AMDGPU_BIOS_NUM_SCRATCH];
@@ -921,6 +878,12 @@ struct amdgpu_device {
u32 cg_flags;
u32 pg_flags;
+ /* nbio */
+ struct amdgpu_nbio nbio;
+
+ /* mmhub */
+ struct amdgpu_mmhub mmhub;
+
/* gfx */
struct amdgpu_gfx gfx;
@@ -974,9 +937,7 @@ struct amdgpu_device {
/* soc15 register offset based on ip, instance and segment */
uint32_t *reg_offset[MAX_HWIP][HWIP_MAX_INSTANCE];
- const struct amdgpu_nbio_funcs *nbio_funcs;
const struct amdgpu_df_funcs *df_funcs;
- const struct amdgpu_mmhub_funcs *mmhub_funcs;
/* delayed work_func for deferring clockgating during resume */
struct delayed_work delayed_init_work;
@@ -1006,11 +967,11 @@ struct amdgpu_device {
struct mutex lock_reset;
struct amdgpu_doorbell_index doorbell_index;
+ struct mutex notifier_lock;
+
int asic_reset_res;
struct work_struct xgmi_reset_work;
- bool in_baco_reset;
-
long gfx_timeout;
long sdma_timeout;
long video_timeout;
@@ -1018,6 +979,9 @@ struct amdgpu_device {
uint64_t unique_id;
uint64_t df_perfmon_config_assign_mask[AMDGPU_MAX_DF_PERFMONS];
+
+ /* device pstate */
+ int pstate;
};
static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev)
@@ -1032,6 +996,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
void amdgpu_device_fini(struct amdgpu_device *adev);
int amdgpu_gpu_wait_for_idle(struct amdgpu_device *adev);
+void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
+ uint32_t *buf, size_t size, bool write);
uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
uint32_t acc_flags);
void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index 07eb29885372..d3da9dde4ee1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -63,45 +63,10 @@ void amdgpu_amdkfd_fini(void)
void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev)
{
- const struct kfd2kgd_calls *kfd2kgd;
-
- switch (adev->asic_type) {
-#ifdef CONFIG_DRM_AMDGPU_CIK
- case CHIP_KAVERI:
- case CHIP_HAWAII:
- kfd2kgd = amdgpu_amdkfd_gfx_7_get_functions();
- break;
-#endif
- case CHIP_CARRIZO:
- case CHIP_TONGA:
- case CHIP_FIJI:
- case CHIP_POLARIS10:
- case CHIP_POLARIS11:
- case CHIP_POLARIS12:
- case CHIP_VEGAM:
- kfd2kgd = amdgpu_amdkfd_gfx_8_0_get_functions();
- break;
- case CHIP_VEGA10:
- case CHIP_VEGA12:
- case CHIP_VEGA20:
- case CHIP_RAVEN:
- kfd2kgd = amdgpu_amdkfd_gfx_9_0_get_functions();
- break;
- case CHIP_ARCTURUS:
- kfd2kgd = amdgpu_amdkfd_arcturus_get_functions();
- break;
- case CHIP_NAVI10:
- case CHIP_NAVI14:
- case CHIP_NAVI12:
- kfd2kgd = amdgpu_amdkfd_gfx_10_0_get_functions();
- break;
- default:
- dev_info(adev->dev, "kfd not supported on this ASIC\n");
- return;
- }
+ bool vf = amdgpu_sriov_vf(adev);
adev->kfd.dev = kgd2kfd_probe((struct kgd_dev *)adev,
- adev->pdev, kfd2kgd);
+ adev->pdev, adev->asic_type, vf);
if (adev->kfd.dev)
amdgpu_amdkfd_total_mem_size += adev->gmc.real_vram_size;
@@ -165,14 +130,6 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
adev->gfx.mec.queue_bitmap,
KGD_MAX_QUEUES);
- /* remove the KIQ bit as well */
- if (adev->gfx.kiq.ring.sched.ready)
- clear_bit(amdgpu_gfx_mec_queue_to_bit(adev,
- adev->gfx.kiq.ring.me - 1,
- adev->gfx.kiq.ring.pipe,
- adev->gfx.kiq.ring.queue),
- gpu_resources.queue_bitmap);
-
/* According to linux/bitmap.h we shouldn't use bitmap_clear if
* nbits is not compile time constant
*/
@@ -202,7 +159,7 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
adev->doorbell_index.last_non_cp;
}
- kgd2kfd_device_init(adev->kfd.dev, &gpu_resources);
+ kgd2kfd_device_init(adev->kfd.dev, adev->ddev, &gpu_resources);
}
}
@@ -709,38 +666,14 @@ int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, struct mm_struct *mm)
return 0;
}
-struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions(void)
-{
- return NULL;
-}
-
-struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void)
-{
- return NULL;
-}
-
-struct kfd2kgd_calls *amdgpu_amdkfd_gfx_9_0_get_functions(void)
-{
- return NULL;
-}
-
-struct kfd2kgd_calls *amdgpu_amdkfd_arcturus_get_functions(void)
-{
- return NULL;
-}
-
-struct kfd2kgd_calls *amdgpu_amdkfd_gfx_10_0_get_functions(void)
-{
- return NULL;
-}
-
struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, struct pci_dev *pdev,
- const struct kfd2kgd_calls *f2g)
+ unsigned int asic_type, bool vf)
{
return NULL;
}
bool kgd2kfd_device_init(struct kfd_dev *kfd,
+ struct drm_device *ddev,
const struct kgd2kfd_shared_resources *gpu_resources)
{
return false;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index e519df3fd2b6..069d5d230810 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -57,7 +57,7 @@ struct kgd_mem {
unsigned int mapped_to_gpu_memory;
uint64_t va;
- uint32_t mapping_flags;
+ uint32_t alloc_flags;
atomic_t invalid;
struct amdkfd_process_info *process_info;
@@ -137,12 +137,6 @@ int amdgpu_amdkfd_submit_ib(struct kgd_dev *kgd, enum kgd_engine_type engine,
void amdgpu_amdkfd_set_compute_idle(struct kgd_dev *kgd, bool idle);
bool amdgpu_amdkfd_have_atomics_support(struct kgd_dev *kgd);
-struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions(void);
-struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void);
-struct kfd2kgd_calls *amdgpu_amdkfd_gfx_9_0_get_functions(void);
-struct kfd2kgd_calls *amdgpu_amdkfd_arcturus_get_functions(void);
-struct kfd2kgd_calls *amdgpu_amdkfd_gfx_10_0_get_functions(void);
-
bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid);
int amdgpu_amdkfd_pre_reset(struct amdgpu_device *adev);
@@ -179,10 +173,17 @@ uint64_t amdgpu_amdkfd_get_mmio_remap_phys_addr(struct kgd_dev *kgd);
uint32_t amdgpu_amdkfd_get_num_gws(struct kgd_dev *kgd);
uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct kgd_dev *dst, struct kgd_dev *src);
+/* Read user wptr from a specified user address space with page fault
+ * disabled. The memory must be pinned and mapped to the hardware when
+ * this is called in hqd_load functions, so it should never fault in
+ * the first place. This resolves a circular lock dependency involving
+ * four locks, including the DQM lock and mmap_sem.
+ */
#define read_user_wptr(mmptr, wptr, dst) \
({ \
bool valid = false; \
if ((mmptr) && (wptr)) { \
+ pagefault_disable(); \
if ((mmptr) == current->mm) { \
valid = !get_user((dst), (wptr)); \
} else if (current->mm == NULL) { \
@@ -190,6 +191,7 @@ uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct kgd_dev *dst, struct kgd_dev *s
valid = !get_user((dst), (wptr)); \
unuse_mm(mmptr); \
} \
+ pagefault_enable(); \
} \
valid; \
})
@@ -240,8 +242,9 @@ void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo);
int kgd2kfd_init(void);
void kgd2kfd_exit(void);
struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, struct pci_dev *pdev,
- const struct kfd2kgd_calls *f2g);
+ unsigned int asic_type, bool vf);
bool kgd2kfd_device_init(struct kfd_dev *kfd,
+ struct drm_device *ddev,
const struct kgd2kfd_shared_resources *gpu_resources);
void kgd2kfd_device_exit(struct kfd_dev *kfd);
void kgd2kfd_suspend(struct kfd_dev *kfd);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
index c79aaebeeaf0..b6713e0ed1b2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
@@ -19,10 +19,6 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
-
-#undef pr_fmt
-#define pr_fmt(fmt) "kfd2kgd: " fmt
-
#include <linux/module.h>
#include <linux/fdtable.h>
#include <linux/uaccess.h>
@@ -69,11 +65,11 @@ static inline struct v9_sdma_mqd *get_sdma_mqd(void *mqd)
return (struct v9_sdma_mqd *)mqd;
}
-static uint32_t get_sdma_base_addr(struct amdgpu_device *adev,
+static uint32_t get_sdma_rlc_reg_offset(struct amdgpu_device *adev,
unsigned int engine_id,
unsigned int queue_id)
{
- uint32_t base[8] = {
+ uint32_t sdma_engine_reg_base[8] = {
SOC15_REG_OFFSET(SDMA0, 0,
mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL,
SOC15_REG_OFFSET(SDMA1, 0,
@@ -91,111 +87,82 @@ static uint32_t get_sdma_base_addr(struct amdgpu_device *adev,
SOC15_REG_OFFSET(SDMA7, 0,
mmSDMA7_RLC0_RB_CNTL) - mmSDMA7_RLC0_RB_CNTL
};
- uint32_t retval;
- retval = base[engine_id] + queue_id * (mmSDMA0_RLC1_RB_CNTL -
- mmSDMA0_RLC0_RB_CNTL);
+ uint32_t retval = sdma_engine_reg_base[engine_id]
+ + queue_id * (mmSDMA0_RLC1_RB_CNTL - mmSDMA0_RLC0_RB_CNTL);
- pr_debug("sdma base address: 0x%x\n", retval);
+ pr_debug("RLC register offset for SDMA%d RLC%d: 0x%x\n", engine_id,
+ queue_id, retval);
return retval;
}
-static u32 sdma_v4_0_get_reg_offset(struct amdgpu_device *adev,
- u32 instance, u32 offset)
-{
- switch (instance) {
- case 0:
- return (adev->reg_offset[SDMA0_HWIP][0][0] + offset);
- case 1:
- return (adev->reg_offset[SDMA1_HWIP][0][1] + offset);
- case 2:
- return (adev->reg_offset[SDMA2_HWIP][0][1] + offset);
- case 3:
- return (adev->reg_offset[SDMA3_HWIP][0][1] + offset);
- case 4:
- return (adev->reg_offset[SDMA4_HWIP][0][1] + offset);
- case 5:
- return (adev->reg_offset[SDMA5_HWIP][0][1] + offset);
- case 6:
- return (adev->reg_offset[SDMA6_HWIP][0][1] + offset);
- case 7:
- return (adev->reg_offset[SDMA7_HWIP][0][1] + offset);
- default:
- break;
- }
- return 0;
-}
-
static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
uint32_t __user *wptr, struct mm_struct *mm)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct v9_sdma_mqd *m;
- uint32_t sdma_base_addr, sdmax_gfx_context_cntl;
+ uint32_t sdma_rlc_reg_offset;
unsigned long end_jiffies;
uint32_t data;
uint64_t data64;
uint64_t __user *wptr64 = (uint64_t __user *)wptr;
m = get_sdma_mqd(mqd);
- sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id,
+ sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
m->sdma_queue_id);
- sdmax_gfx_context_cntl = sdma_v4_0_get_reg_offset(adev,
- m->sdma_engine_id, mmSDMA0_GFX_CONTEXT_CNTL);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
m->sdmax_rlcx_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK));
end_jiffies = msecs_to_jiffies(2000) + jiffies;
while (true) {
- data = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
+ data = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
break;
- if (time_after(jiffies, end_jiffies))
+ if (time_after(jiffies, end_jiffies)) {
+ pr_err("SDMA RLC not idle in %s\n", __func__);
return -ETIME;
+ }
usleep_range(500, 1000);
}
- data = RREG32(sdmax_gfx_context_cntl);
- data = REG_SET_FIELD(data, SDMA0_GFX_CONTEXT_CNTL,
- RESUME_CTX, 0);
- WREG32(sdmax_gfx_context_cntl, data);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL_OFFSET,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL_OFFSET,
m->sdmax_rlcx_doorbell_offset);
data = REG_SET_FIELD(m->sdmax_rlcx_doorbell, SDMA0_RLC0_DOORBELL,
ENABLE, 1);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, data);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, m->sdmax_rlcx_rb_rptr);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_HI,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, data);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR,
+ m->sdmax_rlcx_rb_rptr);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI,
m->sdmax_rlcx_rb_rptr_hi);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 1);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 1);
if (read_user_wptr(mm, wptr64, data64)) {
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR,
lower_32_bits(data64));
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR_HI,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI,
upper_32_bits(data64));
} else {
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR,
m->sdmax_rlcx_rb_rptr);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR_HI,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI,
m->sdmax_rlcx_rb_rptr_hi);
}
- WREG32(sdma_base_addr + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 0);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 0);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE_HI,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE_HI,
m->sdmax_rlcx_rb_base_hi);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
m->sdmax_rlcx_rb_rptr_addr_lo);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
m->sdmax_rlcx_rb_rptr_addr_hi);
data = REG_SET_FIELD(m->sdmax_rlcx_rb_cntl, SDMA0_RLC0_RB_CNTL,
RB_ENABLE, 1);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, data);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, data);
return 0;
}
@@ -205,7 +172,8 @@ static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
uint32_t (**dump)[2], uint32_t *n_regs)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
- uint32_t sdma_base_addr = get_sdma_base_addr(adev, engine_id, queue_id);
+ uint32_t sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev,
+ engine_id, queue_id);
uint32_t i = 0, reg;
#undef HQD_N_REGS
#define HQD_N_REGS (19+6+7+10)
@@ -215,15 +183,15 @@ static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
return -ENOMEM;
for (reg = mmSDMA0_RLC0_RB_CNTL; reg <= mmSDMA0_RLC0_DOORBELL; reg++)
- DUMP_REG(sdma_base_addr + reg);
+ DUMP_REG(sdma_rlc_reg_offset + reg);
for (reg = mmSDMA0_RLC0_STATUS; reg <= mmSDMA0_RLC0_CSA_ADDR_HI; reg++)
- DUMP_REG(sdma_base_addr + reg);
+ DUMP_REG(sdma_rlc_reg_offset + reg);
for (reg = mmSDMA0_RLC0_IB_SUB_REMAIN;
reg <= mmSDMA0_RLC0_MINOR_PTR_UPDATE; reg++)
- DUMP_REG(sdma_base_addr + reg);
+ DUMP_REG(sdma_rlc_reg_offset + reg);
for (reg = mmSDMA0_RLC0_MIDCMD_DATA0;
reg <= mmSDMA0_RLC0_MIDCMD_CNTL; reg++)
- DUMP_REG(sdma_base_addr + reg);
+ DUMP_REG(sdma_rlc_reg_offset + reg);
WARN_ON_ONCE(i != HQD_N_REGS);
*n_regs = i;
@@ -235,14 +203,14 @@ static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct v9_sdma_mqd *m;
- uint32_t sdma_base_addr;
+ uint32_t sdma_rlc_reg_offset;
uint32_t sdma_rlc_rb_cntl;
m = get_sdma_mqd(mqd);
- sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id,
+ sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
m->sdma_queue_id);
- sdma_rlc_rb_cntl = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
+ sdma_rlc_rb_cntl = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)
return true;
@@ -255,40 +223,42 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct v9_sdma_mqd *m;
- uint32_t sdma_base_addr;
+ uint32_t sdma_rlc_reg_offset;
uint32_t temp;
unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies;
m = get_sdma_mqd(mqd);
- sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id,
+ sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
m->sdma_queue_id);
- temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
+ temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK;
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, temp);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, temp);
while (true) {
- temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
+ temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
break;
- if (time_after(jiffies, end_jiffies))
+ if (time_after(jiffies, end_jiffies)) {
+ pr_err("SDMA RLC not idle in %s\n", __func__);
return -ETIME;
+ }
usleep_range(500, 1000);
}
- WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
- RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL) |
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, 0);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
+ RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL) |
SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK);
- m->sdmax_rlcx_rb_rptr = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR);
+ m->sdmax_rlcx_rb_rptr = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR);
m->sdmax_rlcx_rb_rptr_hi =
- RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_HI);
+ RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI);
return 0;
}
-static const struct kfd2kgd_calls kfd2kgd = {
+const struct kfd2kgd_calls arcturus_kfd2kgd = {
.program_sh_mem_settings = kgd_gfx_v9_program_sh_mem_settings,
.set_pasid_vmid_mapping = kgd_gfx_v9_set_pasid_vmid_mapping,
.init_interrupts = kgd_gfx_v9_init_interrupts,
@@ -304,20 +274,11 @@ static const struct kfd2kgd_calls kfd2kgd = {
.address_watch_execute = kgd_gfx_v9_address_watch_execute,
.wave_control_execute = kgd_gfx_v9_wave_control_execute,
.address_watch_get_offset = kgd_gfx_v9_address_watch_get_offset,
- .get_atc_vmid_pasid_mapping_pasid =
- kgd_gfx_v9_get_atc_vmid_pasid_mapping_pasid,
- .get_atc_vmid_pasid_mapping_valid =
- kgd_gfx_v9_get_atc_vmid_pasid_mapping_valid,
- .set_scratch_backing_va = kgd_gfx_v9_set_scratch_backing_va,
+ .get_atc_vmid_pasid_mapping_info =
+ kgd_gfx_v9_get_atc_vmid_pasid_mapping_info,
.get_tile_config = kgd_gfx_v9_get_tile_config,
.set_vm_context_page_table_base = kgd_gfx_v9_set_vm_context_page_table_base,
.invalidate_tlbs = kgd_gfx_v9_invalidate_tlbs,
.invalidate_tlbs_vmid = kgd_gfx_v9_invalidate_tlbs_vmid,
.get_hive_id = amdgpu_amdkfd_get_hive_id,
};
-
-struct kfd2kgd_calls *amdgpu_amdkfd_arcturus_get_functions(void)
-{
- return (struct kfd2kgd_calls *)&kfd2kgd;
-}
-
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
index d10f483f5e27..61cd707158e4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
@@ -19,18 +19,9 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#undef pr_fmt
-#define pr_fmt(fmt) "kfd2kgd: " fmt
-
-#include <linux/module.h>
-#include <linux/fdtable.h>
-#include <linux/uaccess.h>
-#include <linux/firmware.h>
#include <linux/mmu_context.h>
#include "amdgpu.h"
#include "amdgpu_amdkfd.h"
-#include "amdgpu_ucode.h"
-#include "soc15_hw_ip.h"
#include "gc/gc_10_1_0_offset.h"
#include "gc/gc_10_1_0_sh_mask.h"
#include "navi10_enum.h"
@@ -42,6 +33,7 @@
#include "v10_structs.h"
#include "nv.h"
#include "nvd.h"
+#include "gfxhub_v2_0.h"
enum hqd_dequeue_request_type {
NO_ACTION = 0,
@@ -50,63 +42,6 @@ enum hqd_dequeue_request_type {
SAVE_WAVES
};
-/*
- * Register access functions
- */
-
-static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
- uint32_t sh_mem_config,
- uint32_t sh_mem_ape1_base, uint32_t sh_mem_ape1_limit,
- uint32_t sh_mem_bases);
-static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
- unsigned int vmid);
-static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id);
-static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
- uint32_t queue_id, uint32_t __user *wptr,
- uint32_t wptr_shift, uint32_t wptr_mask,
- struct mm_struct *mm);
-static int kgd_hqd_dump(struct kgd_dev *kgd,
- uint32_t pipe_id, uint32_t queue_id,
- uint32_t (**dump)[2], uint32_t *n_regs);
-static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
- uint32_t __user *wptr, struct mm_struct *mm);
-static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
- uint32_t engine_id, uint32_t queue_id,
- uint32_t (**dump)[2], uint32_t *n_regs);
-static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
- uint32_t pipe_id, uint32_t queue_id);
-static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd);
-static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
- enum kfd_preempt_type reset_type,
- unsigned int utimeout, uint32_t pipe_id,
- uint32_t queue_id);
-static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
- unsigned int utimeout);
-#if 0
-static uint32_t get_watch_base_addr(struct amdgpu_device *adev);
-#endif
-static int kgd_address_watch_disable(struct kgd_dev *kgd);
-static int kgd_address_watch_execute(struct kgd_dev *kgd,
- unsigned int watch_point_id,
- uint32_t cntl_val,
- uint32_t addr_hi,
- uint32_t addr_lo);
-static int kgd_wave_control_execute(struct kgd_dev *kgd,
- uint32_t gfx_index_val,
- uint32_t sq_cmd);
-static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
- unsigned int watch_point_id,
- unsigned int reg_offset);
-
-static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd,
- uint8_t vmid);
-static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
- uint8_t vmid);
-static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
- uint64_t page_table_base);
-static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid);
-static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid);
-
/* Because of REG_GET_FIELD() being used, we put this function in the
* asic specific file.
*/
@@ -139,37 +74,6 @@ static int amdgpu_amdkfd_get_tile_config(struct kgd_dev *kgd,
return 0;
}
-static const struct kfd2kgd_calls kfd2kgd = {
- .program_sh_mem_settings = kgd_program_sh_mem_settings,
- .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
- .init_interrupts = kgd_init_interrupts,
- .hqd_load = kgd_hqd_load,
- .hqd_sdma_load = kgd_hqd_sdma_load,
- .hqd_dump = kgd_hqd_dump,
- .hqd_sdma_dump = kgd_hqd_sdma_dump,
- .hqd_is_occupied = kgd_hqd_is_occupied,
- .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
- .hqd_destroy = kgd_hqd_destroy,
- .hqd_sdma_destroy = kgd_hqd_sdma_destroy,
- .address_watch_disable = kgd_address_watch_disable,
- .address_watch_execute = kgd_address_watch_execute,
- .wave_control_execute = kgd_wave_control_execute,
- .address_watch_get_offset = kgd_address_watch_get_offset,
- .get_atc_vmid_pasid_mapping_pasid =
- get_atc_vmid_pasid_mapping_pasid,
- .get_atc_vmid_pasid_mapping_valid =
- get_atc_vmid_pasid_mapping_valid,
- .invalidate_tlbs = invalidate_tlbs,
- .invalidate_tlbs_vmid = invalidate_tlbs_vmid,
- .set_vm_context_page_table_base = set_vm_context_page_table_base,
- .get_tile_config = amdgpu_amdkfd_get_tile_config,
-};
-
-struct kfd2kgd_calls *amdgpu_amdkfd_gfx_10_0_get_functions()
-{
- return (struct kfd2kgd_calls *)&kfd2kgd;
-}
-
static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
{
return (struct amdgpu_device *)kgd;
@@ -250,11 +154,6 @@ static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
ATC_VMID0_PASID_MAPPING__VALID_MASK;
pr_debug("pasid 0x%x vmid %d, reg value %x\n", pasid, vmid, pasid_mapping);
- /*
- * need to do this twice, once for gfx and once for mmhub
- * for ATC add 16 to VMID for mmhub, for IH different registers.
- * ATC_VMID0..15 registers are separate from ATC_VMID16..31.
- */
pr_debug("ATHUB, reg %x\n", SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING) + vmid);
WREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING) + vmid,
@@ -306,11 +205,11 @@ static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id)
return 0;
}
-static uint32_t get_sdma_base_addr(struct amdgpu_device *adev,
+static uint32_t get_sdma_rlc_reg_offset(struct amdgpu_device *adev,
unsigned int engine_id,
unsigned int queue_id)
{
- uint32_t base[2] = {
+ uint32_t sdma_engine_reg_base[2] = {
SOC15_REG_OFFSET(SDMA0, 0,
mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL,
/* On gfx10, mmSDMA1_xxx registers are defined NOT based
@@ -322,12 +221,12 @@ static uint32_t get_sdma_base_addr(struct amdgpu_device *adev,
SOC15_REG_OFFSET(SDMA1, 0,
mmSDMA1_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL
};
- uint32_t retval;
- retval = base[engine_id] + queue_id * (mmSDMA0_RLC1_RB_CNTL -
- mmSDMA0_RLC0_RB_CNTL);
+ uint32_t retval = sdma_engine_reg_base[engine_id]
+ + queue_id * (mmSDMA0_RLC1_RB_CNTL - mmSDMA0_RLC0_RB_CNTL);
- pr_debug("sdma base address: 0x%x\n", retval);
+ pr_debug("RLC register offset for SDMA%d RLC%d: 0x%x\n", engine_id,
+ queue_id, retval);
return retval;
}
@@ -488,72 +387,67 @@ static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct v10_sdma_mqd *m;
- uint32_t sdma_base_addr, sdmax_gfx_context_cntl;
+ uint32_t sdma_rlc_reg_offset;
unsigned long end_jiffies;
uint32_t data;
uint64_t data64;
uint64_t __user *wptr64 = (uint64_t __user *)wptr;
m = get_sdma_mqd(mqd);
- sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id,
+ sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
m->sdma_queue_id);
- pr_debug("sdma load base addr %x for engine %d, queue %d\n", sdma_base_addr, m->sdma_engine_id, m->sdma_queue_id);
- sdmax_gfx_context_cntl = m->sdma_engine_id ?
- SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_GFX_CONTEXT_CNTL) :
- SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_GFX_CONTEXT_CNTL);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
m->sdmax_rlcx_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK));
end_jiffies = msecs_to_jiffies(2000) + jiffies;
while (true) {
- data = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
+ data = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
break;
- if (time_after(jiffies, end_jiffies))
+ if (time_after(jiffies, end_jiffies)) {
+ pr_err("SDMA RLC not idle in %s\n", __func__);
return -ETIME;
+ }
usleep_range(500, 1000);
}
- data = RREG32(sdmax_gfx_context_cntl);
- data = REG_SET_FIELD(data, SDMA0_GFX_CONTEXT_CNTL,
- RESUME_CTX, 0);
- WREG32(sdmax_gfx_context_cntl, data);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL_OFFSET,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL_OFFSET,
m->sdmax_rlcx_doorbell_offset);
data = REG_SET_FIELD(m->sdmax_rlcx_doorbell, SDMA0_RLC0_DOORBELL,
ENABLE, 1);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, data);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, m->sdmax_rlcx_rb_rptr);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_HI,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, data);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR,
+ m->sdmax_rlcx_rb_rptr);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI,
m->sdmax_rlcx_rb_rptr_hi);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 1);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 1);
if (read_user_wptr(mm, wptr64, data64)) {
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR,
lower_32_bits(data64));
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR_HI,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI,
upper_32_bits(data64));
} else {
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR,
m->sdmax_rlcx_rb_rptr);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR_HI,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI,
m->sdmax_rlcx_rb_rptr_hi);
}
- WREG32(sdma_base_addr + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 0);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 0);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE_HI,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE_HI,
m->sdmax_rlcx_rb_base_hi);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
m->sdmax_rlcx_rb_rptr_addr_lo);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
m->sdmax_rlcx_rb_rptr_addr_hi);
data = REG_SET_FIELD(m->sdmax_rlcx_rb_cntl, SDMA0_RLC0_RB_CNTL,
RB_ENABLE, 1);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, data);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, data);
return 0;
}
@@ -563,28 +457,26 @@ static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
uint32_t (**dump)[2], uint32_t *n_regs)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
- uint32_t sdma_base_addr = get_sdma_base_addr(adev, engine_id, queue_id);
+ uint32_t sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev,
+ engine_id, queue_id);
uint32_t i = 0, reg;
#undef HQD_N_REGS
#define HQD_N_REGS (19+6+7+10)
- pr_debug("sdma dump engine id %d queue_id %d\n", engine_id, queue_id);
- pr_debug("sdma base addr %x\n", sdma_base_addr);
-
*dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
if (*dump == NULL)
return -ENOMEM;
for (reg = mmSDMA0_RLC0_RB_CNTL; reg <= mmSDMA0_RLC0_DOORBELL; reg++)
- DUMP_REG(sdma_base_addr + reg);
+ DUMP_REG(sdma_rlc_reg_offset + reg);
for (reg = mmSDMA0_RLC0_STATUS; reg <= mmSDMA0_RLC0_CSA_ADDR_HI; reg++)
- DUMP_REG(sdma_base_addr + reg);
+ DUMP_REG(sdma_rlc_reg_offset + reg);
for (reg = mmSDMA0_RLC0_IB_SUB_REMAIN;
reg <= mmSDMA0_RLC0_MINOR_PTR_UPDATE; reg++)
- DUMP_REG(sdma_base_addr + reg);
+ DUMP_REG(sdma_rlc_reg_offset + reg);
for (reg = mmSDMA0_RLC0_MIDCMD_DATA0;
reg <= mmSDMA0_RLC0_MIDCMD_CNTL; reg++)
- DUMP_REG(sdma_base_addr + reg);
+ DUMP_REG(sdma_rlc_reg_offset + reg);
WARN_ON_ONCE(i != HQD_N_REGS);
*n_regs = i;
@@ -618,14 +510,14 @@ static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct v10_sdma_mqd *m;
- uint32_t sdma_base_addr;
+ uint32_t sdma_rlc_reg_offset;
uint32_t sdma_rlc_rb_cntl;
m = get_sdma_mqd(mqd);
- sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id,
+ sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
m->sdma_queue_id);
- sdma_rlc_rb_cntl = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
+ sdma_rlc_rb_cntl = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)
return true;
@@ -746,59 +638,52 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct v10_sdma_mqd *m;
- uint32_t sdma_base_addr;
+ uint32_t sdma_rlc_reg_offset;
uint32_t temp;
unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies;
m = get_sdma_mqd(mqd);
- sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id,
+ sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
m->sdma_queue_id);
- temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
+ temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK;
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, temp);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, temp);
while (true) {
- temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
+ temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
break;
- if (time_after(jiffies, end_jiffies))
+ if (time_after(jiffies, end_jiffies)) {
+ pr_err("SDMA RLC not idle in %s\n", __func__);
return -ETIME;
+ }
usleep_range(500, 1000);
}
- WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
- RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL) |
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, 0);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
+ RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL) |
SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK);
- m->sdmax_rlcx_rb_rptr = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR);
+ m->sdmax_rlcx_rb_rptr = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR);
m->sdmax_rlcx_rb_rptr_hi =
- RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_HI);
+ RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI);
return 0;
}
-static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd,
- uint8_t vmid)
+static bool get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd,
+ uint8_t vmid, uint16_t *p_pasid)
{
- uint32_t reg;
+ uint32_t value;
struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
- reg = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
+ value = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
+ vmid);
- return reg & ATC_VMID0_PASID_MAPPING__VALID_MASK;
-}
-
-static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
- uint8_t vmid)
-{
- uint32_t reg;
- struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
+ *p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK;
- reg = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
- + vmid);
- return reg & ATC_VMID0_PASID_MAPPING__PASID_MASK;
+ return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
}
static int invalidate_tlbs_with_kiq(struct amdgpu_device *adev, uint16_t pasid)
@@ -830,6 +715,8 @@ static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid)
{
struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
int vmid;
+ uint16_t queried_pasid;
+ bool ret;
struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
if (amdgpu_emu_mode == 0 && ring->sched.ready)
@@ -838,13 +725,13 @@ static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid)
for (vmid = 0; vmid < 16; vmid++) {
if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid))
continue;
- if (get_atc_vmid_pasid_mapping_valid(kgd, vmid)) {
- if (get_atc_vmid_pasid_mapping_pasid(kgd, vmid)
- == pasid) {
- amdgpu_gmc_flush_gpu_tlb(adev, vmid,
- AMDGPU_GFXHUB_0, 0);
- break;
- }
+
+ ret = get_atc_vmid_pasid_mapping_info(kgd, vmid,
+ &queried_pasid);
+ if (ret && queried_pasid == pasid) {
+ amdgpu_gmc_flush_gpu_tlb(adev, vmid,
+ AMDGPU_GFXHUB_0, 0);
+ break;
}
}
@@ -914,7 +801,6 @@ static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
uint64_t page_table_base)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
- uint64_t base = page_table_base | AMDGPU_PTE_VALID;
if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) {
pr_err("trying to set page table base for wrong VMID %u\n",
@@ -922,18 +808,31 @@ static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
return;
}
- /* TODO: take advantage of per-process address space size. For
- * now, all processes share the same address space size, like
- * on GFX8 and older.
- */
- WREG32(SOC15_REG_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32) + (vmid*2), 0);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32) + (vmid*2), 0);
-
- WREG32(SOC15_REG_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32) + (vmid*2),
- lower_32_bits(adev->vm_manager.max_pfn - 1));
- WREG32(SOC15_REG_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32) + (vmid*2),
- upper_32_bits(adev->vm_manager.max_pfn - 1));
-
- WREG32(SOC15_REG_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32) + (vmid*2), lower_32_bits(base));
- WREG32(SOC15_REG_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32) + (vmid*2), upper_32_bits(base));
+ /* SDMA is on gfxhub as well for Navi1* series */
+ gfxhub_v2_0_setup_vm_pt_regs(adev, vmid, page_table_base);
}
+
+const struct kfd2kgd_calls gfx_v10_kfd2kgd = {
+ .program_sh_mem_settings = kgd_program_sh_mem_settings,
+ .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
+ .init_interrupts = kgd_init_interrupts,
+ .hqd_load = kgd_hqd_load,
+ .hqd_sdma_load = kgd_hqd_sdma_load,
+ .hqd_dump = kgd_hqd_dump,
+ .hqd_sdma_dump = kgd_hqd_sdma_dump,
+ .hqd_is_occupied = kgd_hqd_is_occupied,
+ .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
+ .hqd_destroy = kgd_hqd_destroy,
+ .hqd_sdma_destroy = kgd_hqd_sdma_destroy,
+ .address_watch_disable = kgd_address_watch_disable,
+ .address_watch_execute = kgd_address_watch_execute,
+ .wave_control_execute = kgd_wave_control_execute,
+ .address_watch_get_offset = kgd_address_watch_get_offset,
+ .get_atc_vmid_pasid_mapping_info =
+ get_atc_vmid_pasid_mapping_info,
+ .get_tile_config = amdgpu_amdkfd_get_tile_config,
+ .set_vm_context_page_table_base = set_vm_context_page_table_base,
+ .invalidate_tlbs = invalidate_tlbs,
+ .invalidate_tlbs_vmid = invalidate_tlbs_vmid,
+ .get_hive_id = amdgpu_amdkfd_get_hive_id,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
index 5f459bf5f622..6e6f0a99ec06 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
@@ -20,8 +20,6 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include <linux/fdtable.h>
-#include <linux/uaccess.h>
#include <linux/mmu_context.h>
#include "amdgpu.h"
@@ -86,65 +84,6 @@ union TCP_WATCH_CNTL_BITS {
float f32All;
};
-/*
- * Register access functions
- */
-
-static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
- uint32_t sh_mem_config, uint32_t sh_mem_ape1_base,
- uint32_t sh_mem_ape1_limit, uint32_t sh_mem_bases);
-
-static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
- unsigned int vmid);
-
-static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id);
-static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
- uint32_t queue_id, uint32_t __user *wptr,
- uint32_t wptr_shift, uint32_t wptr_mask,
- struct mm_struct *mm);
-static int kgd_hqd_dump(struct kgd_dev *kgd,
- uint32_t pipe_id, uint32_t queue_id,
- uint32_t (**dump)[2], uint32_t *n_regs);
-static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
- uint32_t __user *wptr, struct mm_struct *mm);
-static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
- uint32_t engine_id, uint32_t queue_id,
- uint32_t (**dump)[2], uint32_t *n_regs);
-static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
- uint32_t pipe_id, uint32_t queue_id);
-
-static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
- enum kfd_preempt_type reset_type,
- unsigned int utimeout, uint32_t pipe_id,
- uint32_t queue_id);
-static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd);
-static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
- unsigned int utimeout);
-static int kgd_address_watch_disable(struct kgd_dev *kgd);
-static int kgd_address_watch_execute(struct kgd_dev *kgd,
- unsigned int watch_point_id,
- uint32_t cntl_val,
- uint32_t addr_hi,
- uint32_t addr_lo);
-static int kgd_wave_control_execute(struct kgd_dev *kgd,
- uint32_t gfx_index_val,
- uint32_t sq_cmd);
-static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
- unsigned int watch_point_id,
- unsigned int reg_offset);
-
-static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd, uint8_t vmid);
-static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
- uint8_t vmid);
-
-static void set_scratch_backing_va(struct kgd_dev *kgd,
- uint64_t va, uint32_t vmid);
-static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
- uint64_t page_table_base);
-static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid);
-static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid);
-static uint32_t read_vmid_from_vmfault_reg(struct kgd_dev *kgd);
-
/* Because of REG_GET_FIELD() being used, we put this function in the
* asic specific file.
*/
@@ -170,37 +109,6 @@ static int get_tile_config(struct kgd_dev *kgd,
return 0;
}
-static const struct kfd2kgd_calls kfd2kgd = {
- .program_sh_mem_settings = kgd_program_sh_mem_settings,
- .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
- .init_interrupts = kgd_init_interrupts,
- .hqd_load = kgd_hqd_load,
- .hqd_sdma_load = kgd_hqd_sdma_load,
- .hqd_dump = kgd_hqd_dump,
- .hqd_sdma_dump = kgd_hqd_sdma_dump,
- .hqd_is_occupied = kgd_hqd_is_occupied,
- .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
- .hqd_destroy = kgd_hqd_destroy,
- .hqd_sdma_destroy = kgd_hqd_sdma_destroy,
- .address_watch_disable = kgd_address_watch_disable,
- .address_watch_execute = kgd_address_watch_execute,
- .wave_control_execute = kgd_wave_control_execute,
- .address_watch_get_offset = kgd_address_watch_get_offset,
- .get_atc_vmid_pasid_mapping_pasid = get_atc_vmid_pasid_mapping_pasid,
- .get_atc_vmid_pasid_mapping_valid = get_atc_vmid_pasid_mapping_valid,
- .set_scratch_backing_va = set_scratch_backing_va,
- .get_tile_config = get_tile_config,
- .set_vm_context_page_table_base = set_vm_context_page_table_base,
- .invalidate_tlbs = invalidate_tlbs,
- .invalidate_tlbs_vmid = invalidate_tlbs_vmid,
- .read_vmid_from_vmfault_reg = read_vmid_from_vmfault_reg,
-};
-
-struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions(void)
-{
- return (struct kfd2kgd_calls *)&kfd2kgd;
-}
-
static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
{
return (struct amdgpu_device *)kgd;
@@ -303,14 +211,15 @@ static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id)
return 0;
}
-static inline uint32_t get_sdma_base_addr(struct cik_sdma_rlc_registers *m)
+static inline uint32_t get_sdma_rlc_reg_offset(struct cik_sdma_rlc_registers *m)
{
uint32_t retval;
retval = m->sdma_engine_id * SDMA1_REGISTER_OFFSET +
m->sdma_queue_id * KFD_CIK_SDMA_QUEUE_OFFSET;
- pr_debug("sdma base address: 0x%x\n", retval);
+ pr_debug("RLC register offset for SDMA%d RLC%d: 0x%x\n",
+ m->sdma_engine_id, m->sdma_queue_id, retval);
return retval;
}
@@ -413,60 +322,52 @@ static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct cik_sdma_rlc_registers *m;
unsigned long end_jiffies;
- uint32_t sdma_base_addr;
+ uint32_t sdma_rlc_reg_offset;
uint32_t data;
m = get_sdma_mqd(mqd);
- sdma_base_addr = get_sdma_base_addr(m);
+ sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(m);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
m->sdma_rlc_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK));
end_jiffies = msecs_to_jiffies(2000) + jiffies;
while (true) {
- data = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
+ data = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
break;
- if (time_after(jiffies, end_jiffies))
+ if (time_after(jiffies, end_jiffies)) {
+ pr_err("SDMA RLC not idle in %s\n", __func__);
return -ETIME;
+ }
usleep_range(500, 1000);
}
- if (m->sdma_engine_id) {
- data = RREG32(mmSDMA1_GFX_CONTEXT_CNTL);
- data = REG_SET_FIELD(data, SDMA1_GFX_CONTEXT_CNTL,
- RESUME_CTX, 0);
- WREG32(mmSDMA1_GFX_CONTEXT_CNTL, data);
- } else {
- data = RREG32(mmSDMA0_GFX_CONTEXT_CNTL);
- data = REG_SET_FIELD(data, SDMA0_GFX_CONTEXT_CNTL,
- RESUME_CTX, 0);
- WREG32(mmSDMA0_GFX_CONTEXT_CNTL, data);
- }
data = REG_SET_FIELD(m->sdma_rlc_doorbell, SDMA0_RLC0_DOORBELL,
ENABLE, 1);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, data);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, m->sdma_rlc_rb_rptr);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, data);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR,
+ m->sdma_rlc_rb_rptr);
if (read_user_wptr(mm, wptr, data))
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, data);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR, data);
else
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR,
m->sdma_rlc_rb_rptr);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_VIRTUAL_ADDR,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_VIRTUAL_ADDR,
m->sdma_rlc_virtual_addr);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, m->sdma_rlc_rb_base);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE_HI,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE, m->sdma_rlc_rb_base);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE_HI,
m->sdma_rlc_rb_base_hi);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
m->sdma_rlc_rb_rptr_addr_lo);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
m->sdma_rlc_rb_rptr_addr_hi);
data = REG_SET_FIELD(m->sdma_rlc_rb_cntl, SDMA0_RLC0_RB_CNTL,
RB_ENABLE, 1);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, data);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, data);
return 0;
}
@@ -524,13 +425,13 @@ static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct cik_sdma_rlc_registers *m;
- uint32_t sdma_base_addr;
+ uint32_t sdma_rlc_reg_offset;
uint32_t sdma_rlc_rb_cntl;
m = get_sdma_mqd(mqd);
- sdma_base_addr = get_sdma_base_addr(m);
+ sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(m);
- sdma_rlc_rb_cntl = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
+ sdma_rlc_rb_cntl = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)
return true;
@@ -645,32 +546,34 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct cik_sdma_rlc_registers *m;
- uint32_t sdma_base_addr;
+ uint32_t sdma_rlc_reg_offset;
uint32_t temp;
unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies;
m = get_sdma_mqd(mqd);
- sdma_base_addr = get_sdma_base_addr(m);
+ sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(m);
- temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
+ temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK;
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, temp);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, temp);
while (true) {
- temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
+ temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
break;
- if (time_after(jiffies, end_jiffies))
+ if (time_after(jiffies, end_jiffies)) {
+ pr_err("SDMA RLC not idle in %s\n", __func__);
return -ETIME;
+ }
usleep_range(500, 1000);
}
- WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
- RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL) |
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, 0);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
+ RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL) |
SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK);
- m->sdma_rlc_rb_rptr = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR);
+ m->sdma_rlc_rb_rptr = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR);
return 0;
}
@@ -758,24 +661,16 @@ static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
return watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX + reg_offset];
}
-static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd,
- uint8_t vmid)
+static bool get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd,
+ uint8_t vmid, uint16_t *p_pasid)
{
- uint32_t reg;
+ uint32_t value;
struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
- reg = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
- return reg & ATC_VMID0_PASID_MAPPING__VALID_MASK;
-}
-
-static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
- uint8_t vmid)
-{
- uint32_t reg;
- struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
+ value = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
+ *p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK;
- reg = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
- return reg & ATC_VMID0_PASID_MAPPING__PASID_MASK;
+ return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
}
static void set_scratch_backing_va(struct kgd_dev *kgd,
@@ -855,3 +750,28 @@ static uint32_t read_vmid_from_vmfault_reg(struct kgd_dev *kgd)
return REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID);
}
+
+const struct kfd2kgd_calls gfx_v7_kfd2kgd = {
+ .program_sh_mem_settings = kgd_program_sh_mem_settings,
+ .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
+ .init_interrupts = kgd_init_interrupts,
+ .hqd_load = kgd_hqd_load,
+ .hqd_sdma_load = kgd_hqd_sdma_load,
+ .hqd_dump = kgd_hqd_dump,
+ .hqd_sdma_dump = kgd_hqd_sdma_dump,
+ .hqd_is_occupied = kgd_hqd_is_occupied,
+ .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
+ .hqd_destroy = kgd_hqd_destroy,
+ .hqd_sdma_destroy = kgd_hqd_sdma_destroy,
+ .address_watch_disable = kgd_address_watch_disable,
+ .address_watch_execute = kgd_address_watch_execute,
+ .wave_control_execute = kgd_wave_control_execute,
+ .address_watch_get_offset = kgd_address_watch_get_offset,
+ .get_atc_vmid_pasid_mapping_info = get_atc_vmid_pasid_mapping_info,
+ .set_scratch_backing_va = set_scratch_backing_va,
+ .get_tile_config = get_tile_config,
+ .set_vm_context_page_table_base = set_vm_context_page_table_base,
+ .invalidate_tlbs = invalidate_tlbs,
+ .invalidate_tlbs_vmid = invalidate_tlbs_vmid,
+ .read_vmid_from_vmfault_reg = read_vmid_from_vmfault_reg,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
index 6d2f61449606..bfbddedb2380 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
@@ -20,9 +20,6 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include <linux/module.h>
-#include <linux/fdtable.h>
-#include <linux/uaccess.h>
#include <linux/mmu_context.h>
#include "amdgpu.h"
@@ -44,62 +41,6 @@ enum hqd_dequeue_request_type {
RESET_WAVES
};
-/*
- * Register access functions
- */
-
-static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
- uint32_t sh_mem_config,
- uint32_t sh_mem_ape1_base, uint32_t sh_mem_ape1_limit,
- uint32_t sh_mem_bases);
-static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
- unsigned int vmid);
-static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id);
-static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
- uint32_t queue_id, uint32_t __user *wptr,
- uint32_t wptr_shift, uint32_t wptr_mask,
- struct mm_struct *mm);
-static int kgd_hqd_dump(struct kgd_dev *kgd,
- uint32_t pipe_id, uint32_t queue_id,
- uint32_t (**dump)[2], uint32_t *n_regs);
-static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
- uint32_t __user *wptr, struct mm_struct *mm);
-static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
- uint32_t engine_id, uint32_t queue_id,
- uint32_t (**dump)[2], uint32_t *n_regs);
-static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
- uint32_t pipe_id, uint32_t queue_id);
-static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd);
-static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
- enum kfd_preempt_type reset_type,
- unsigned int utimeout, uint32_t pipe_id,
- uint32_t queue_id);
-static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
- unsigned int utimeout);
-static int kgd_address_watch_disable(struct kgd_dev *kgd);
-static int kgd_address_watch_execute(struct kgd_dev *kgd,
- unsigned int watch_point_id,
- uint32_t cntl_val,
- uint32_t addr_hi,
- uint32_t addr_lo);
-static int kgd_wave_control_execute(struct kgd_dev *kgd,
- uint32_t gfx_index_val,
- uint32_t sq_cmd);
-static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
- unsigned int watch_point_id,
- unsigned int reg_offset);
-
-static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd,
- uint8_t vmid);
-static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
- uint8_t vmid);
-static void set_scratch_backing_va(struct kgd_dev *kgd,
- uint64_t va, uint32_t vmid);
-static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
- uint64_t page_table_base);
-static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid);
-static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid);
-
/* Because of REG_GET_FIELD() being used, we put this function in the
* asic specific file.
*/
@@ -125,38 +66,6 @@ static int get_tile_config(struct kgd_dev *kgd,
return 0;
}
-static const struct kfd2kgd_calls kfd2kgd = {
- .program_sh_mem_settings = kgd_program_sh_mem_settings,
- .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
- .init_interrupts = kgd_init_interrupts,
- .hqd_load = kgd_hqd_load,
- .hqd_sdma_load = kgd_hqd_sdma_load,
- .hqd_dump = kgd_hqd_dump,
- .hqd_sdma_dump = kgd_hqd_sdma_dump,
- .hqd_is_occupied = kgd_hqd_is_occupied,
- .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
- .hqd_destroy = kgd_hqd_destroy,
- .hqd_sdma_destroy = kgd_hqd_sdma_destroy,
- .address_watch_disable = kgd_address_watch_disable,
- .address_watch_execute = kgd_address_watch_execute,
- .wave_control_execute = kgd_wave_control_execute,
- .address_watch_get_offset = kgd_address_watch_get_offset,
- .get_atc_vmid_pasid_mapping_pasid =
- get_atc_vmid_pasid_mapping_pasid,
- .get_atc_vmid_pasid_mapping_valid =
- get_atc_vmid_pasid_mapping_valid,
- .set_scratch_backing_va = set_scratch_backing_va,
- .get_tile_config = get_tile_config,
- .set_vm_context_page_table_base = set_vm_context_page_table_base,
- .invalidate_tlbs = invalidate_tlbs,
- .invalidate_tlbs_vmid = invalidate_tlbs_vmid,
-};
-
-struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void)
-{
- return (struct kfd2kgd_calls *)&kfd2kgd;
-}
-
static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
{
return (struct amdgpu_device *)kgd;
@@ -260,13 +169,15 @@ static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id)
return 0;
}
-static inline uint32_t get_sdma_base_addr(struct vi_sdma_mqd *m)
+static inline uint32_t get_sdma_rlc_reg_offset(struct vi_sdma_mqd *m)
{
uint32_t retval;
retval = m->sdma_engine_id * SDMA1_REGISTER_OFFSET +
m->sdma_queue_id * KFD_VI_SDMA_QUEUE_OFFSET;
- pr_debug("sdma base address: 0x%x\n", retval);
+
+ pr_debug("RLC register offset for SDMA%d RLC%d: 0x%x\n",
+ m->sdma_engine_id, m->sdma_queue_id, retval);
return retval;
}
@@ -398,59 +309,51 @@ static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct vi_sdma_mqd *m;
unsigned long end_jiffies;
- uint32_t sdma_base_addr;
+ uint32_t sdma_rlc_reg_offset;
uint32_t data;
m = get_sdma_mqd(mqd);
- sdma_base_addr = get_sdma_base_addr(m);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
+ sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(m);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
m->sdmax_rlcx_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK));
end_jiffies = msecs_to_jiffies(2000) + jiffies;
while (true) {
- data = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
+ data = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
break;
- if (time_after(jiffies, end_jiffies))
+ if (time_after(jiffies, end_jiffies)) {
+ pr_err("SDMA RLC not idle in %s\n", __func__);
return -ETIME;
+ }
usleep_range(500, 1000);
}
- if (m->sdma_engine_id) {
- data = RREG32(mmSDMA1_GFX_CONTEXT_CNTL);
- data = REG_SET_FIELD(data, SDMA1_GFX_CONTEXT_CNTL,
- RESUME_CTX, 0);
- WREG32(mmSDMA1_GFX_CONTEXT_CNTL, data);
- } else {
- data = RREG32(mmSDMA0_GFX_CONTEXT_CNTL);
- data = REG_SET_FIELD(data, SDMA0_GFX_CONTEXT_CNTL,
- RESUME_CTX, 0);
- WREG32(mmSDMA0_GFX_CONTEXT_CNTL, data);
- }
data = REG_SET_FIELD(m->sdmax_rlcx_doorbell, SDMA0_RLC0_DOORBELL,
ENABLE, 1);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, data);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, m->sdmax_rlcx_rb_rptr);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, data);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR,
+ m->sdmax_rlcx_rb_rptr);
if (read_user_wptr(mm, wptr, data))
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, data);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR, data);
else
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR,
m->sdmax_rlcx_rb_rptr);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_VIRTUAL_ADDR,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_VIRTUAL_ADDR,
m->sdmax_rlcx_virtual_addr);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE_HI,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE_HI,
m->sdmax_rlcx_rb_base_hi);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
m->sdmax_rlcx_rb_rptr_addr_lo);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
m->sdmax_rlcx_rb_rptr_addr_hi);
data = REG_SET_FIELD(m->sdmax_rlcx_rb_cntl, SDMA0_RLC0_RB_CNTL,
RB_ENABLE, 1);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, data);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, data);
return 0;
}
@@ -517,13 +420,13 @@ static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct vi_sdma_mqd *m;
- uint32_t sdma_base_addr;
+ uint32_t sdma_rlc_reg_offset;
uint32_t sdma_rlc_rb_cntl;
m = get_sdma_mqd(mqd);
- sdma_base_addr = get_sdma_base_addr(m);
+ sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(m);
- sdma_rlc_rb_cntl = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
+ sdma_rlc_rb_cntl = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)
return true;
@@ -641,54 +544,48 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct vi_sdma_mqd *m;
- uint32_t sdma_base_addr;
+ uint32_t sdma_rlc_reg_offset;
uint32_t temp;
unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies;
m = get_sdma_mqd(mqd);
- sdma_base_addr = get_sdma_base_addr(m);
+ sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(m);
- temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
+ temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK;
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, temp);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, temp);
while (true) {
- temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
+ temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
break;
- if (time_after(jiffies, end_jiffies))
+ if (time_after(jiffies, end_jiffies)) {
+ pr_err("SDMA RLC not idle in %s\n", __func__);
return -ETIME;
+ }
usleep_range(500, 1000);
}
- WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
- RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL) |
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, 0);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
+ RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL) |
SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK);
- m->sdmax_rlcx_rb_rptr = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR);
+ m->sdmax_rlcx_rb_rptr = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR);
return 0;
}
-static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd,
- uint8_t vmid)
+static bool get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd,
+ uint8_t vmid, uint16_t *p_pasid)
{
- uint32_t reg;
+ uint32_t value;
struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
- reg = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
- return reg & ATC_VMID0_PASID_MAPPING__VALID_MASK;
-}
+ value = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
+ *p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK;
-static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
- uint8_t vmid)
-{
- uint32_t reg;
- struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
-
- reg = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
- return reg & ATC_VMID0_PASID_MAPPING__PASID_MASK;
+ return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
}
static int kgd_address_watch_disable(struct kgd_dev *kgd)
@@ -798,3 +695,28 @@ static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid)
RREG32(mmVM_INVALIDATE_RESPONSE);
return 0;
}
+
+const struct kfd2kgd_calls gfx_v8_kfd2kgd = {
+ .program_sh_mem_settings = kgd_program_sh_mem_settings,
+ .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
+ .init_interrupts = kgd_init_interrupts,
+ .hqd_load = kgd_hqd_load,
+ .hqd_sdma_load = kgd_hqd_sdma_load,
+ .hqd_dump = kgd_hqd_dump,
+ .hqd_sdma_dump = kgd_hqd_sdma_dump,
+ .hqd_is_occupied = kgd_hqd_is_occupied,
+ .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
+ .hqd_destroy = kgd_hqd_destroy,
+ .hqd_sdma_destroy = kgd_hqd_sdma_destroy,
+ .address_watch_disable = kgd_address_watch_disable,
+ .address_watch_execute = kgd_address_watch_execute,
+ .wave_control_execute = kgd_wave_control_execute,
+ .address_watch_get_offset = kgd_address_watch_get_offset,
+ .get_atc_vmid_pasid_mapping_info =
+ get_atc_vmid_pasid_mapping_info,
+ .set_scratch_backing_va = set_scratch_backing_va,
+ .get_tile_config = get_tile_config,
+ .set_vm_context_page_table_base = set_vm_context_page_table_base,
+ .invalidate_tlbs = invalidate_tlbs,
+ .invalidate_tlbs_vmid = invalidate_tlbs_vmid,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
index e262f2ac07a3..47c853ef1051 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
@@ -19,17 +19,10 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
-
-#define pr_fmt(fmt) "kfd2kgd: " fmt
-
-#include <linux/module.h>
-#include <linux/fdtable.h>
-#include <linux/uaccess.h>
#include <linux/mmu_context.h>
#include "amdgpu.h"
#include "amdgpu_amdkfd.h"
-#include "soc15_hw_ip.h"
#include "gc/gc_9_0_offset.h"
#include "gc/gc_9_0_sh_mask.h"
#include "vega10_enum.h"
@@ -50,9 +43,6 @@
#include "gmc_v9_0.h"
-#define V9_PIPE_PER_MEC (4)
-#define V9_QUEUES_PER_PIPE_MEC (8)
-
enum hqd_dequeue_request_type {
NO_ACTION = 0,
DRAIN_PIPE,
@@ -226,22 +216,21 @@ int kgd_gfx_v9_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id)
return 0;
}
-static uint32_t get_sdma_base_addr(struct amdgpu_device *adev,
+static uint32_t get_sdma_rlc_reg_offset(struct amdgpu_device *adev,
unsigned int engine_id,
unsigned int queue_id)
{
- uint32_t base[2] = {
+ uint32_t sdma_engine_reg_base[2] = {
SOC15_REG_OFFSET(SDMA0, 0,
mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL,
SOC15_REG_OFFSET(SDMA1, 0,
mmSDMA1_RLC0_RB_CNTL) - mmSDMA1_RLC0_RB_CNTL
};
- uint32_t retval;
+ uint32_t retval = sdma_engine_reg_base[engine_id]
+ + queue_id * (mmSDMA0_RLC1_RB_CNTL - mmSDMA0_RLC0_RB_CNTL);
- retval = base[engine_id] + queue_id * (mmSDMA0_RLC1_RB_CNTL -
- mmSDMA0_RLC0_RB_CNTL);
-
- pr_debug("sdma base address: 0x%x\n", retval);
+ pr_debug("RLC register offset for SDMA%d RLC%d: 0x%x\n", engine_id,
+ queue_id, retval);
return retval;
}
@@ -388,71 +377,67 @@ static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct v9_sdma_mqd *m;
- uint32_t sdma_base_addr, sdmax_gfx_context_cntl;
+ uint32_t sdma_rlc_reg_offset;
unsigned long end_jiffies;
uint32_t data;
uint64_t data64;
uint64_t __user *wptr64 = (uint64_t __user *)wptr;
m = get_sdma_mqd(mqd);
- sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id,
+ sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
m->sdma_queue_id);
- sdmax_gfx_context_cntl = m->sdma_engine_id ?
- SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_GFX_CONTEXT_CNTL) :
- SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_GFX_CONTEXT_CNTL);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
m->sdmax_rlcx_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK));
end_jiffies = msecs_to_jiffies(2000) + jiffies;
while (true) {
- data = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
+ data = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
break;
- if (time_after(jiffies, end_jiffies))
+ if (time_after(jiffies, end_jiffies)) {
+ pr_err("SDMA RLC not idle in %s\n", __func__);
return -ETIME;
+ }
usleep_range(500, 1000);
}
- data = RREG32(sdmax_gfx_context_cntl);
- data = REG_SET_FIELD(data, SDMA0_GFX_CONTEXT_CNTL,
- RESUME_CTX, 0);
- WREG32(sdmax_gfx_context_cntl, data);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL_OFFSET,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL_OFFSET,
m->sdmax_rlcx_doorbell_offset);
data = REG_SET_FIELD(m->sdmax_rlcx_doorbell, SDMA0_RLC0_DOORBELL,
ENABLE, 1);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, data);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, m->sdmax_rlcx_rb_rptr);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_HI,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, data);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR,
+ m->sdmax_rlcx_rb_rptr);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI,
m->sdmax_rlcx_rb_rptr_hi);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 1);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 1);
if (read_user_wptr(mm, wptr64, data64)) {
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR,
lower_32_bits(data64));
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR_HI,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI,
upper_32_bits(data64));
} else {
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR,
m->sdmax_rlcx_rb_rptr);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR_HI,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI,
m->sdmax_rlcx_rb_rptr_hi);
}
- WREG32(sdma_base_addr + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 0);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 0);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE_HI,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE_HI,
m->sdmax_rlcx_rb_base_hi);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
m->sdmax_rlcx_rb_rptr_addr_lo);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
m->sdmax_rlcx_rb_rptr_addr_hi);
data = REG_SET_FIELD(m->sdmax_rlcx_rb_cntl, SDMA0_RLC0_RB_CNTL,
RB_ENABLE, 1);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, data);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, data);
return 0;
}
@@ -462,7 +447,8 @@ static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
uint32_t (**dump)[2], uint32_t *n_regs)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
- uint32_t sdma_base_addr = get_sdma_base_addr(adev, engine_id, queue_id);
+ uint32_t sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev,
+ engine_id, queue_id);
uint32_t i = 0, reg;
#undef HQD_N_REGS
#define HQD_N_REGS (19+6+7+10)
@@ -472,15 +458,15 @@ static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
return -ENOMEM;
for (reg = mmSDMA0_RLC0_RB_CNTL; reg <= mmSDMA0_RLC0_DOORBELL; reg++)
- DUMP_REG(sdma_base_addr + reg);
+ DUMP_REG(sdma_rlc_reg_offset + reg);
for (reg = mmSDMA0_RLC0_STATUS; reg <= mmSDMA0_RLC0_CSA_ADDR_HI; reg++)
- DUMP_REG(sdma_base_addr + reg);
+ DUMP_REG(sdma_rlc_reg_offset + reg);
for (reg = mmSDMA0_RLC0_IB_SUB_REMAIN;
reg <= mmSDMA0_RLC0_MINOR_PTR_UPDATE; reg++)
- DUMP_REG(sdma_base_addr + reg);
+ DUMP_REG(sdma_rlc_reg_offset + reg);
for (reg = mmSDMA0_RLC0_MIDCMD_DATA0;
reg <= mmSDMA0_RLC0_MIDCMD_CNTL; reg++)
- DUMP_REG(sdma_base_addr + reg);
+ DUMP_REG(sdma_rlc_reg_offset + reg);
WARN_ON_ONCE(i != HQD_N_REGS);
*n_regs = i;
@@ -514,14 +500,14 @@ static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct v9_sdma_mqd *m;
- uint32_t sdma_base_addr;
+ uint32_t sdma_rlc_reg_offset;
uint32_t sdma_rlc_rb_cntl;
m = get_sdma_mqd(mqd);
- sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id,
+ sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
m->sdma_queue_id);
- sdma_rlc_rb_cntl = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
+ sdma_rlc_rb_cntl = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)
return true;
@@ -584,59 +570,52 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct v9_sdma_mqd *m;
- uint32_t sdma_base_addr;
+ uint32_t sdma_rlc_reg_offset;
uint32_t temp;
unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies;
m = get_sdma_mqd(mqd);
- sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id,
+ sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
m->sdma_queue_id);
- temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
+ temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK;
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, temp);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, temp);
while (true) {
- temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
+ temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
break;
- if (time_after(jiffies, end_jiffies))
+ if (time_after(jiffies, end_jiffies)) {
+ pr_err("SDMA RLC not idle in %s\n", __func__);
return -ETIME;
+ }
usleep_range(500, 1000);
}
- WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
- RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL) |
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, 0);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
+ RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL) |
SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK);
- m->sdmax_rlcx_rb_rptr = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR);
+ m->sdmax_rlcx_rb_rptr = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR);
m->sdmax_rlcx_rb_rptr_hi =
- RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_HI);
+ RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI);
return 0;
}
-bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd,
- uint8_t vmid)
+bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd,
+ uint8_t vmid, uint16_t *p_pasid)
{
- uint32_t reg;
+ uint32_t value;
struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
- reg = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
+ value = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
+ vmid);
- return reg & ATC_VMID0_PASID_MAPPING__VALID_MASK;
-}
-
-uint16_t kgd_gfx_v9_get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
- uint8_t vmid)
-{
- uint32_t reg;
- struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
+ *p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK;
- reg = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
- + vmid);
- return reg & ATC_VMID0_PASID_MAPPING__PASID_MASK;
+ return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
}
static int invalidate_tlbs_with_kiq(struct amdgpu_device *adev, uint16_t pasid,
@@ -671,6 +650,8 @@ int kgd_gfx_v9_invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid)
{
struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
int vmid, i;
+ uint16_t queried_pasid;
+ bool ret;
struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
uint32_t flush_type = 0;
@@ -686,14 +667,14 @@ int kgd_gfx_v9_invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid)
for (vmid = 0; vmid < 16; vmid++) {
if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid))
continue;
- if (kgd_gfx_v9_get_atc_vmid_pasid_mapping_valid(kgd, vmid)) {
- if (kgd_gfx_v9_get_atc_vmid_pasid_mapping_pasid(kgd, vmid)
- == pasid) {
- for (i = 0; i < adev->num_vmhubs; i++)
- amdgpu_gmc_flush_gpu_tlb(adev, vmid,
- i, flush_type);
- break;
- }
+
+ ret = kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(kgd, vmid,
+ &queried_pasid);
+ if (ret && queried_pasid == pasid) {
+ for (i = 0; i < adev->num_vmhubs; i++)
+ amdgpu_gmc_flush_gpu_tlb(adev, vmid,
+ i, flush_type);
+ break;
}
}
@@ -777,15 +758,6 @@ uint32_t kgd_gfx_v9_address_watch_get_offset(struct kgd_dev *kgd,
return 0;
}
-void kgd_gfx_v9_set_scratch_backing_va(struct kgd_dev *kgd,
- uint64_t va, uint32_t vmid)
-{
- /* No longer needed on GFXv9. The scratch base address is
- * passed to the shader by the CP. It's the user mode driver's
- * responsibility.
- */
-}
-
void kgd_gfx_v9_set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
uint64_t page_table_base)
{
@@ -811,7 +783,7 @@ void kgd_gfx_v9_set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmi
gfxhub_v1_0_setup_vm_pt_regs(adev, vmid, page_table_base);
}
-static const struct kfd2kgd_calls kfd2kgd = {
+const struct kfd2kgd_calls gfx_v9_kfd2kgd = {
.program_sh_mem_settings = kgd_gfx_v9_program_sh_mem_settings,
.set_pasid_vmid_mapping = kgd_gfx_v9_set_pasid_vmid_mapping,
.init_interrupts = kgd_gfx_v9_init_interrupts,
@@ -827,19 +799,11 @@ static const struct kfd2kgd_calls kfd2kgd = {
.address_watch_execute = kgd_gfx_v9_address_watch_execute,
.wave_control_execute = kgd_gfx_v9_wave_control_execute,
.address_watch_get_offset = kgd_gfx_v9_address_watch_get_offset,
- .get_atc_vmid_pasid_mapping_pasid =
- kgd_gfx_v9_get_atc_vmid_pasid_mapping_pasid,
- .get_atc_vmid_pasid_mapping_valid =
- kgd_gfx_v9_get_atc_vmid_pasid_mapping_valid,
- .set_scratch_backing_va = kgd_gfx_v9_set_scratch_backing_va,
+ .get_atc_vmid_pasid_mapping_info =
+ kgd_gfx_v9_get_atc_vmid_pasid_mapping_info,
.get_tile_config = kgd_gfx_v9_get_tile_config,
.set_vm_context_page_table_base = kgd_gfx_v9_set_vm_context_page_table_base,
.invalidate_tlbs = kgd_gfx_v9_invalidate_tlbs,
.invalidate_tlbs_vmid = kgd_gfx_v9_invalidate_tlbs_vmid,
.get_hive_id = amdgpu_amdkfd_get_hive_id,
};
-
-struct kfd2kgd_calls *amdgpu_amdkfd_gfx_9_0_get_functions(void)
-{
- return (struct kfd2kgd_calls *)&kfd2kgd;
-}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h
index 26d8879bff9d..d9e9ad22b2bd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h
@@ -55,14 +55,10 @@ uint32_t kgd_gfx_v9_address_watch_get_offset(struct kgd_dev *kgd,
unsigned int watch_point_id,
unsigned int reg_offset);
-bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd,
- uint8_t vmid);
-uint16_t kgd_gfx_v9_get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
- uint8_t vmid);
+bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd,
+ uint8_t vmid, uint16_t *p_pasid);
void kgd_gfx_v9_set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
uint64_t page_table_base);
-void kgd_gfx_v9_set_scratch_backing_va(struct kgd_dev *kgd,
- uint64_t va, uint32_t vmid);
int kgd_gfx_v9_invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid);
int kgd_gfx_v9_invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid);
int kgd_gfx_v9_get_tile_config(struct kgd_dev *kgd,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index 6d021ecc8d59..7d35b5b66229 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -19,9 +19,6 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
-
-#define pr_fmt(fmt) "kfd2kgd: " fmt
-
#include <linux/dma-buf.h>
#include <linux/list.h>
#include <linux/pagemap.h>
@@ -33,11 +30,6 @@
#include "amdgpu_amdkfd.h"
#include "amdgpu_dma_buf.h"
-/* Special VM and GART address alignment needed for VI pre-Fiji due to
- * a HW bug.
- */
-#define VI_BO_SIZE_ALIGN (0x8000)
-
/* BO flag to indicate a KFD userptr BO */
#define AMDGPU_AMDKFD_USERPTR_BO (1ULL << 63)
@@ -349,13 +341,46 @@ static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync)
struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
int ret;
- ret = amdgpu_vm_update_directories(adev, vm);
+ ret = amdgpu_vm_update_pdes(adev, vm, false);
if (ret)
return ret;
return amdgpu_sync_fence(NULL, sync, vm->last_update, false);
}
+static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem)
+{
+ struct amdgpu_device *bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev);
+ bool coherent = mem->alloc_flags & ALLOC_MEM_FLAGS_COHERENT;
+ uint32_t mapping_flags;
+
+ mapping_flags = AMDGPU_VM_PAGE_READABLE;
+ if (mem->alloc_flags & ALLOC_MEM_FLAGS_WRITABLE)
+ mapping_flags |= AMDGPU_VM_PAGE_WRITEABLE;
+ if (mem->alloc_flags & ALLOC_MEM_FLAGS_EXECUTABLE)
+ mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
+
+ switch (adev->asic_type) {
+ case CHIP_ARCTURUS:
+ if (mem->alloc_flags & ALLOC_MEM_FLAGS_VRAM) {
+ if (bo_adev == adev)
+ mapping_flags |= coherent ?
+ AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
+ else
+ mapping_flags |= AMDGPU_VM_MTYPE_UC;
+ } else {
+ mapping_flags |= coherent ?
+ AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
+ }
+ break;
+ default:
+ mapping_flags |= coherent ?
+ AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
+ }
+
+ return amdgpu_gem_va_map_flags(adev, mapping_flags);
+}
+
/* add_bo_to_vm - Add a BO to a VM
*
* Everything that needs to bo done only once when a BO is first added
@@ -404,8 +429,7 @@ static int add_bo_to_vm(struct amdgpu_device *adev, struct kgd_mem *mem,
}
bo_va_entry->va = va;
- bo_va_entry->pte_flags = amdgpu_gmc_get_pte_flags(adev,
- mem->mapping_flags);
+ bo_va_entry->pte_flags = get_pte_flags(adev, mem);
bo_va_entry->kgd_dev = (void *)adev;
list_add(&bo_va_entry->bo_list, list_bo_va);
@@ -481,8 +505,7 @@ static void remove_kgd_mem_from_kfd_bo_list(struct kgd_mem *mem,
*
* Returns 0 for success, negative errno for errors.
*/
-static int init_user_pages(struct kgd_mem *mem, struct mm_struct *mm,
- uint64_t user_addr)
+static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr)
{
struct amdkfd_process_info *process_info = mem->process_info;
struct amdgpu_bo *bo = mem->bo;
@@ -586,7 +609,7 @@ static int reserve_bo_and_vm(struct kgd_mem *mem,
amdgpu_vm_get_pd_bo(vm, &ctx->list, &ctx->vm_pd[0]);
ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
- false, &ctx->duplicates, true);
+ false, &ctx->duplicates);
if (!ret)
ctx->reserved = true;
else {
@@ -659,7 +682,7 @@ static int reserve_bo_and_cond_vms(struct kgd_mem *mem,
}
ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
- false, &ctx->duplicates, true);
+ false, &ctx->duplicates);
if (!ret)
ctx->reserved = true;
else
@@ -1079,10 +1102,8 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
uint64_t user_addr = 0;
struct amdgpu_bo *bo;
struct amdgpu_bo_param bp;
- int byte_align;
u32 domain, alloc_domain;
u64 alloc_flags;
- uint32_t mapping_flags;
int ret;
/*
@@ -1135,25 +1156,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
if ((*mem)->aql_queue)
size = size >> 1;
- /* Workaround for TLB bug on older VI chips */
- byte_align = (adev->family == AMDGPU_FAMILY_VI &&
- adev->asic_type != CHIP_FIJI &&
- adev->asic_type != CHIP_POLARIS10 &&
- adev->asic_type != CHIP_POLARIS11 &&
- adev->asic_type != CHIP_POLARIS12 &&
- adev->asic_type != CHIP_VEGAM) ?
- VI_BO_SIZE_ALIGN : 1;
-
- mapping_flags = AMDGPU_VM_PAGE_READABLE;
- if (flags & ALLOC_MEM_FLAGS_WRITABLE)
- mapping_flags |= AMDGPU_VM_PAGE_WRITEABLE;
- if (flags & ALLOC_MEM_FLAGS_EXECUTABLE)
- mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
- if (flags & ALLOC_MEM_FLAGS_COHERENT)
- mapping_flags |= AMDGPU_VM_MTYPE_UC;
- else
- mapping_flags |= AMDGPU_VM_MTYPE_NC;
- (*mem)->mapping_flags = mapping_flags;
+ (*mem)->alloc_flags = flags;
amdgpu_sync_create(&(*mem)->sync);
@@ -1168,7 +1171,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
memset(&bp, 0, sizeof(bp));
bp.size = size;
- bp.byte_align = byte_align;
+ bp.byte_align = 1;
bp.domain = alloc_domain;
bp.flags = alloc_flags;
bp.type = bo_type;
@@ -1195,7 +1198,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, user_addr);
if (user_addr) {
- ret = init_user_pages(*mem, current->mm, user_addr);
+ ret = init_user_pages(*mem, user_addr);
if (ret)
goto allocate_init_user_pages_failed;
}
@@ -1626,9 +1629,10 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd,
INIT_LIST_HEAD(&(*mem)->bo_va_list);
mutex_init(&(*mem)->lock);
- (*mem)->mapping_flags =
- AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE |
- AMDGPU_VM_PAGE_EXECUTABLE | AMDGPU_VM_MTYPE_NC;
+ (*mem)->alloc_flags =
+ ((bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
+ ALLOC_MEM_FLAGS_VRAM : ALLOC_MEM_FLAGS_GTT) |
+ ALLOC_MEM_FLAGS_WRITABLE | ALLOC_MEM_FLAGS_EXECUTABLE;
(*mem)->bo = amdgpu_bo_ref(bo);
(*mem)->va = va;
@@ -1739,6 +1743,10 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
return ret;
}
+ /*
+ * FIXME: Cannot ignore the return code, must hold
+ * notifier_lock
+ */
amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
/* Mark the BO as valid unless it was invalidated
@@ -1797,8 +1805,7 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
}
/* Reserve all BOs and page tables for validation */
- ret = ttm_eu_reserve_buffers(&ticket, &resv_list, false, &duplicates,
- true);
+ ret = ttm_eu_reserve_buffers(&ticket, &resv_list, false, &duplicates);
WARN(!list_empty(&duplicates), "Duplicates should be empty");
if (ret)
goto out_free;
@@ -1996,7 +2003,7 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
}
ret = ttm_eu_reserve_buffers(&ctx.ticket, &ctx.list,
- false, &duplicate_save, true);
+ false, &duplicate_save);
if (ret) {
pr_debug("Memory eviction: TTM Reserve Failed. Try again\n");
goto ttm_reserve_fail;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
index 1c9d40f97a9b..72232fccf61a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
@@ -2038,6 +2038,11 @@ int amdgpu_atombios_init(struct amdgpu_device *adev)
if (adev->is_atom_fw) {
amdgpu_atomfirmware_scratch_regs_init(adev);
amdgpu_atomfirmware_allocate_fb_scratch(adev);
+ ret = amdgpu_atomfirmware_get_mem_train_fb_loc(adev);
+ if (ret) {
+ DRM_ERROR("Failed to get mem train fb location.\n");
+ return ret;
+ }
} else {
amdgpu_atombios_scratch_regs_init(adev);
amdgpu_atombios_allocate_fb_scratch(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
index daf687428cdb..ff4eb96bdfb5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
@@ -27,6 +27,7 @@
#include "amdgpu_atomfirmware.h"
#include "atom.h"
#include "atombios.h"
+#include "soc15_hw_ip.h"
bool amdgpu_atomfirmware_gpu_supports_virtualization(struct amdgpu_device *adev)
{
@@ -120,65 +121,14 @@ union vram_info {
struct atom_vram_info_header_v2_3 v23;
struct atom_vram_info_header_v2_4 v24;
};
-/*
- * Return vram width from integrated system info table, if available,
- * or 0 if not.
- */
-int amdgpu_atomfirmware_get_vram_width(struct amdgpu_device *adev)
-{
- struct amdgpu_mode_info *mode_info = &adev->mode_info;
- int index;
- u16 data_offset, size;
- union igp_info *igp_info;
- union vram_info *vram_info;
- u32 mem_channel_number;
- u32 mem_channel_width;
- u8 frev, crev;
-
- if (adev->flags & AMD_IS_APU)
- index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
- integratedsysteminfo);
- else
- index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
- vram_info);
- /* get any igp specific overrides */
- if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, &size,
- &frev, &crev, &data_offset)) {
- if (adev->flags & AMD_IS_APU) {
- igp_info = (union igp_info *)
- (mode_info->atom_context->bios + data_offset);
- switch (crev) {
- case 11:
- mem_channel_number = igp_info->v11.umachannelnumber;
- /* channel width is 64 */
- return mem_channel_number * 64;
- default:
- return 0;
- }
- } else {
- vram_info = (union vram_info *)
- (mode_info->atom_context->bios + data_offset);
- switch (crev) {
- case 3:
- mem_channel_number = vram_info->v23.vram_module[0].channel_num;
- mem_channel_width = vram_info->v23.vram_module[0].channel_width;
- return mem_channel_number * (1 << mem_channel_width);
- case 4:
- mem_channel_number = vram_info->v24.vram_module[0].channel_num;
- mem_channel_width = vram_info->v24.vram_module[0].channel_width;
- return mem_channel_number * (1 << mem_channel_width);
- default:
- return 0;
- }
- }
- }
-
- return 0;
-}
+union vram_module {
+ struct atom_vram_module_v9 v9;
+ struct atom_vram_module_v10 v10;
+};
-static int convert_atom_mem_type_to_vram_type (struct amdgpu_device *adev,
- int atom_mem_type)
+static int convert_atom_mem_type_to_vram_type(struct amdgpu_device *adev,
+ int atom_mem_type)
{
int vram_type;
@@ -219,19 +169,25 @@ static int convert_atom_mem_type_to_vram_type (struct amdgpu_device *adev,
return vram_type;
}
-/*
- * Return vram type from either integrated system info table
- * or umc info table, if available, or 0 (TYPE_UNKNOWN) if not
- */
-int amdgpu_atomfirmware_get_vram_type(struct amdgpu_device *adev)
+
+
+int
+amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev,
+ int *vram_width, int *vram_type,
+ int *vram_vendor)
{
struct amdgpu_mode_info *mode_info = &adev->mode_info;
- int index;
+ int index, i = 0;
u16 data_offset, size;
union igp_info *igp_info;
union vram_info *vram_info;
+ union vram_module *vram_module;
u8 frev, crev;
u8 mem_type;
+ u8 mem_vendor;
+ u32 mem_channel_number;
+ u32 mem_channel_width;
+ u32 module_id;
if (adev->flags & AMD_IS_APU)
index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
@@ -239,6 +195,7 @@ int amdgpu_atomfirmware_get_vram_type(struct amdgpu_device *adev)
else
index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
vram_info);
+
if (amdgpu_atom_parse_data_header(mode_info->atom_context,
index, &size,
&frev, &crev, &data_offset)) {
@@ -247,25 +204,67 @@ int amdgpu_atomfirmware_get_vram_type(struct amdgpu_device *adev)
(mode_info->atom_context->bios + data_offset);
switch (crev) {
case 11:
+ mem_channel_number = igp_info->v11.umachannelnumber;
+ /* channel width is 64 */
+ if (vram_width)
+ *vram_width = mem_channel_number * 64;
mem_type = igp_info->v11.memorytype;
- return convert_atom_mem_type_to_vram_type(adev, mem_type);
+ if (vram_type)
+ *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
+ break;
default:
- return 0;
+ return -EINVAL;
}
} else {
vram_info = (union vram_info *)
(mode_info->atom_context->bios + data_offset);
+ module_id = (RREG32(adev->bios_scratch_reg_offset + 4) & 0x00ff0000) >> 16;
switch (crev) {
case 3:
- mem_type = vram_info->v23.vram_module[0].memory_type;
- return convert_atom_mem_type_to_vram_type(adev, mem_type);
+ if (module_id > vram_info->v23.vram_module_num)
+ module_id = 0;
+ vram_module = (union vram_module *)vram_info->v23.vram_module;
+ while (i < module_id) {
+ vram_module = (union vram_module *)
+ ((u8 *)vram_module + vram_module->v9.vram_module_size);
+ i++;
+ }
+ mem_type = vram_module->v9.memory_type;
+ if (vram_type)
+ *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
+ mem_channel_number = vram_module->v9.channel_num;
+ mem_channel_width = vram_module->v9.channel_width;
+ if (vram_width)
+ *vram_width = mem_channel_number * (1 << mem_channel_width);
+ mem_vendor = (vram_module->v9.vender_rev_id) & 0xF;
+ if (vram_vendor)
+ *vram_vendor = mem_vendor;
+ break;
case 4:
- mem_type = vram_info->v24.vram_module[0].memory_type;
- return convert_atom_mem_type_to_vram_type(adev, mem_type);
+ if (module_id > vram_info->v24.vram_module_num)
+ module_id = 0;
+ vram_module = (union vram_module *)vram_info->v24.vram_module;
+ while (i < module_id) {
+ vram_module = (union vram_module *)
+ ((u8 *)vram_module + vram_module->v10.vram_module_size);
+ i++;
+ }
+ mem_type = vram_module->v10.memory_type;
+ if (vram_type)
+ *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
+ mem_channel_number = vram_module->v10.channel_num;
+ mem_channel_width = vram_module->v10.channel_width;
+ if (vram_width)
+ *vram_width = mem_channel_number * (1 << mem_channel_width);
+ mem_vendor = (vram_module->v10.vender_rev_id) & 0xF;
+ if (vram_vendor)
+ *vram_vendor = mem_vendor;
+ break;
default:
- return 0;
+ return -EINVAL;
}
}
+
}
return 0;
@@ -464,3 +463,138 @@ int amdgpu_atomfirmware_get_gfx_info(struct amdgpu_device *adev)
}
return -EINVAL;
}
+
+/*
+ * Check if VBIOS supports GDDR6 training data save/restore
+ */
+static bool gddr6_mem_train_vbios_support(struct amdgpu_device *adev)
+{
+ uint16_t data_offset;
+ int index;
+
+ index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
+ firmwareinfo);
+ if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, NULL,
+ NULL, NULL, &data_offset)) {
+ struct atom_firmware_info_v3_1 *firmware_info =
+ (struct atom_firmware_info_v3_1 *)(adev->mode_info.atom_context->bios +
+ data_offset);
+
+ DRM_DEBUG("atom firmware capability:0x%08x.\n",
+ le32_to_cpu(firmware_info->firmware_capability));
+
+ if (le32_to_cpu(firmware_info->firmware_capability) &
+ ATOM_FIRMWARE_CAP_ENABLE_2STAGE_BIST_TRAINING)
+ return true;
+ }
+
+ return false;
+}
+
+static int gddr6_mem_train_support(struct amdgpu_device *adev)
+{
+ int ret;
+ uint32_t major, minor, revision, hw_v;
+
+ if (gddr6_mem_train_vbios_support(adev)) {
+ amdgpu_discovery_get_ip_version(adev, MP0_HWID, &major, &minor, &revision);
+ hw_v = HW_REV(major, minor, revision);
+ /*
+ * treat 0 revision as a special case since register for MP0 and MMHUB is missing
+ * for some Navi10 A0, preventing driver from discovering the hwip information since
+ * none of the functions will be initialized, it should not cause any problems
+ */
+ switch (hw_v) {
+ case HW_REV(11, 0, 0):
+ case HW_REV(11, 0, 5):
+ ret = 1;
+ break;
+ default:
+ DRM_ERROR("memory training vbios supports but psp hw(%08x)"
+ " doesn't support!\n", hw_v);
+ ret = -1;
+ break;
+ }
+ } else {
+ ret = 0;
+ hw_v = -1;
+ }
+
+
+ DRM_DEBUG("mp0 hw_v %08x, ret:%d.\n", hw_v, ret);
+ return ret;
+}
+
+int amdgpu_atomfirmware_get_mem_train_fb_loc(struct amdgpu_device *adev)
+{
+ struct atom_context *ctx = adev->mode_info.atom_context;
+ unsigned char *bios = ctx->bios;
+ struct vram_reserve_block *reserved_block;
+ int index, block_number;
+ uint8_t frev, crev;
+ uint16_t data_offset, size;
+ uint32_t start_address_in_kb;
+ uint64_t offset;
+ int ret;
+
+ adev->fw_vram_usage.mem_train_support = false;
+
+ if (adev->asic_type != CHIP_NAVI10 &&
+ adev->asic_type != CHIP_NAVI14)
+ return 0;
+
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
+ ret = gddr6_mem_train_support(adev);
+ if (ret == -1)
+ return -EINVAL;
+ else if (ret == 0)
+ return 0;
+
+ index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
+ vram_usagebyfirmware);
+ ret = amdgpu_atom_parse_data_header(ctx, index, &size, &frev, &crev,
+ &data_offset);
+ if (ret == 0) {
+ DRM_ERROR("parse data header failed.\n");
+ return -EINVAL;
+ }
+
+ DRM_DEBUG("atom firmware common table header size:0x%04x, frev:0x%02x,"
+ " crev:0x%02x, data_offset:0x%04x.\n", size, frev, crev, data_offset);
+ /* only support 2.1+ */
+ if (((uint16_t)frev << 8 | crev) < 0x0201) {
+ DRM_ERROR("frev:0x%02x, crev:0x%02x < 2.1 !\n", frev, crev);
+ return -EINVAL;
+ }
+
+ reserved_block = (struct vram_reserve_block *)
+ (bios + data_offset + sizeof(struct atom_common_table_header));
+ block_number = ((unsigned int)size - sizeof(struct atom_common_table_header))
+ / sizeof(struct vram_reserve_block);
+ reserved_block += (block_number > 0) ? block_number-1 : 0;
+ DRM_DEBUG("block_number:0x%04x, last block: 0x%08xkb sz, %dkb fw, %dkb drv.\n",
+ block_number,
+ le32_to_cpu(reserved_block->start_address_in_kb),
+ le16_to_cpu(reserved_block->used_by_firmware_in_kb),
+ le16_to_cpu(reserved_block->used_by_driver_in_kb));
+ if (reserved_block->used_by_firmware_in_kb > 0) {
+ start_address_in_kb = le32_to_cpu(reserved_block->start_address_in_kb);
+ offset = (uint64_t)start_address_in_kb * ONE_KiB;
+ if ((offset & (ONE_MiB - 1)) < (4 * ONE_KiB + 1) ) {
+ offset -= ONE_MiB;
+ }
+
+ offset &= ~(ONE_MiB - 1);
+ adev->fw_vram_usage.mem_train_fb_loc = offset;
+ adev->fw_vram_usage.mem_train_support = true;
+ DRM_DEBUG("mem_train_fb_loc:0x%09llx.\n", offset);
+ ret = 0;
+ } else {
+ DRM_ERROR("used_by_firmware_in_kb is 0!\n");
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h
index 5ec6f92f353c..f871af5ea6f3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h
@@ -29,8 +29,9 @@
bool amdgpu_atomfirmware_gpu_supports_virtualization(struct amdgpu_device *adev);
void amdgpu_atomfirmware_scratch_regs_init(struct amdgpu_device *adev);
int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev);
-int amdgpu_atomfirmware_get_vram_width(struct amdgpu_device *adev);
-int amdgpu_atomfirmware_get_vram_type(struct amdgpu_device *adev);
+int amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev,
+ int *vram_width, int *vram_type, int *vram_vendor);
+int amdgpu_atomfirmware_get_mem_train_fb_loc(struct amdgpu_device *adev);
int amdgpu_atomfirmware_get_clock_info(struct amdgpu_device *adev);
int amdgpu_atomfirmware_get_gfx_info(struct amdgpu_device *adev);
bool amdgpu_atomfirmware_mem_ecc_supported(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
index 3e35a8f2c5e5..a97fb759e2f4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
@@ -613,17 +613,7 @@ static bool amdgpu_atpx_detect(void)
bool d3_supported = false;
struct pci_dev *parent_pdev;
- while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
- vga_count++;
-
- has_atpx |= (amdgpu_atpx_pci_probe_handle(pdev) == true);
-
- parent_pdev = pci_upstream_bridge(pdev);
- d3_supported |= parent_pdev && parent_pdev->bridge_d3;
- amdgpu_atpx_get_quirks(pdev);
- }
-
- while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_OTHER << 8, pdev)) != NULL) {
+ while ((pdev = pci_get_class(PCI_BASE_CLASS_DISPLAY << 16, pdev)) != NULL) {
vga_count++;
has_atpx |= (amdgpu_atpx_pci_probe_handle(pdev) == true);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
index 649e68c4479b..d1495e1c9289 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
@@ -33,7 +33,7 @@ static int amdgpu_benchmark_do_move(struct amdgpu_device *adev, unsigned size,
{
unsigned long start_jiffies;
unsigned long end_jiffies;
- struct dma_fence *fence = NULL;
+ struct dma_fence *fence;
int i, r;
start_jiffies = jiffies;
@@ -44,16 +44,14 @@ static int amdgpu_benchmark_do_move(struct amdgpu_device *adev, unsigned size,
if (r)
goto exit_do_move;
r = dma_fence_wait(fence, false);
+ dma_fence_put(fence);
if (r)
goto exit_do_move;
- dma_fence_put(fence);
}
end_jiffies = jiffies;
r = jiffies_to_msecs(end_jiffies - start_jiffies);
exit_do_move:
- if (fence)
- dma_fence_put(fence);
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index ece55c8fa673..a62cbc8199de 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -217,11 +217,10 @@ amdgpu_connector_update_scratch_regs(struct drm_connector *connector,
struct drm_encoder *encoder;
const struct drm_connector_helper_funcs *connector_funcs = connector->helper_private;
bool connected;
- int i;
best_encoder = connector_funcs->best_encoder(connector);
- drm_connector_for_each_possible_encoder(connector, encoder, i) {
+ drm_connector_for_each_possible_encoder(connector, encoder) {
if ((encoder == best_encoder) && (status == connector_status_connected))
connected = true;
else
@@ -236,9 +235,8 @@ amdgpu_connector_find_encoder(struct drm_connector *connector,
int encoder_type)
{
struct drm_encoder *encoder;
- int i;
- drm_connector_for_each_possible_encoder(connector, encoder, i) {
+ drm_connector_for_each_possible_encoder(connector, encoder) {
if (encoder->encoder_type == encoder_type)
return encoder;
}
@@ -347,10 +345,9 @@ static struct drm_encoder *
amdgpu_connector_best_single_encoder(struct drm_connector *connector)
{
struct drm_encoder *encoder;
- int i;
/* pick the first one */
- drm_connector_for_each_possible_encoder(connector, encoder, i)
+ drm_connector_for_each_possible_encoder(connector, encoder)
return encoder;
return NULL;
@@ -1022,8 +1019,12 @@ amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force)
*/
if (amdgpu_connector->shared_ddc && (ret == connector_status_connected)) {
struct drm_connector *list_connector;
+ struct drm_connector_list_iter iter;
struct amdgpu_connector *list_amdgpu_connector;
- list_for_each_entry(list_connector, &dev->mode_config.connector_list, head) {
+
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(list_connector,
+ &iter) {
if (connector == list_connector)
continue;
list_amdgpu_connector = to_amdgpu_connector(list_connector);
@@ -1040,6 +1041,7 @@ amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force)
}
}
}
+ drm_connector_list_iter_end(&iter);
}
}
}
@@ -1065,9 +1067,8 @@ amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force)
/* find analog encoder */
if (amdgpu_connector->dac_load_detect) {
struct drm_encoder *encoder;
- int i;
- drm_connector_for_each_possible_encoder(connector, encoder, i) {
+ drm_connector_for_each_possible_encoder(connector, encoder) {
if (encoder->encoder_type != DRM_MODE_ENCODER_DAC &&
encoder->encoder_type != DRM_MODE_ENCODER_TVDAC)
continue;
@@ -1117,9 +1118,8 @@ amdgpu_connector_dvi_encoder(struct drm_connector *connector)
{
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
struct drm_encoder *encoder;
- int i;
- drm_connector_for_each_possible_encoder(connector, encoder, i) {
+ drm_connector_for_each_possible_encoder(connector, encoder) {
if (amdgpu_connector->use_digital == true) {
if (encoder->encoder_type == DRM_MODE_ENCODER_TMDS)
return encoder;
@@ -1134,7 +1134,7 @@ amdgpu_connector_dvi_encoder(struct drm_connector *connector)
/* then check use digitial */
/* pick the first one */
- drm_connector_for_each_possible_encoder(connector, encoder, i)
+ drm_connector_for_each_possible_encoder(connector, encoder)
return encoder;
return NULL;
@@ -1271,9 +1271,8 @@ u16 amdgpu_connector_encoder_get_dp_bridge_encoder_id(struct drm_connector *conn
{
struct drm_encoder *encoder;
struct amdgpu_encoder *amdgpu_encoder;
- int i;
- drm_connector_for_each_possible_encoder(connector, encoder, i) {
+ drm_connector_for_each_possible_encoder(connector, encoder) {
amdgpu_encoder = to_amdgpu_encoder(encoder);
switch (amdgpu_encoder->encoder_id) {
@@ -1292,10 +1291,9 @@ static bool amdgpu_connector_encoder_is_hbr2(struct drm_connector *connector)
{
struct drm_encoder *encoder;
struct amdgpu_encoder *amdgpu_encoder;
- int i;
bool found = false;
- drm_connector_for_each_possible_encoder(connector, encoder, i) {
+ drm_connector_for_each_possible_encoder(connector, encoder) {
amdgpu_encoder = to_amdgpu_encoder(encoder);
if (amdgpu_encoder->caps & ATOM_ENCODER_CAP_RECORD_HBR2)
found = true;
@@ -1501,6 +1499,7 @@ amdgpu_connector_add(struct amdgpu_device *adev,
{
struct drm_device *dev = adev->ddev;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
struct amdgpu_connector *amdgpu_connector;
struct amdgpu_connector_atom_dig *amdgpu_dig_connector;
struct drm_encoder *encoder;
@@ -1515,10 +1514,12 @@ amdgpu_connector_add(struct amdgpu_device *adev,
return;
/* see if we already added it */
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
amdgpu_connector = to_amdgpu_connector(connector);
if (amdgpu_connector->connector_id == connector_id) {
amdgpu_connector->devices |= supported_device;
+ drm_connector_list_iter_end(&iter);
return;
}
if (amdgpu_connector->ddc_bus && i2c_bus->valid) {
@@ -1533,6 +1534,7 @@ amdgpu_connector_add(struct amdgpu_device *adev,
}
}
}
+ drm_connector_list_iter_end(&iter);
/* check if it's a dp bridge */
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 82823d9a8ba8..5ca905b4a0fb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -35,6 +35,7 @@
#include "amdgpu_trace.h"
#include "amdgpu_gmc.h"
#include "amdgpu_gem.h"
+#include "amdgpu_ras.h"
static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
struct drm_amdgpu_cs_chunk_fence *data,
@@ -449,75 +450,12 @@ retry:
return r;
}
-/* Last resort, try to evict something from the current working set */
-static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
- struct amdgpu_bo *validated)
-{
- uint32_t domain = validated->allowed_domains;
- struct ttm_operation_ctx ctx = { true, false };
- int r;
-
- if (!p->evictable)
- return false;
-
- for (;&p->evictable->tv.head != &p->validated;
- p->evictable = list_prev_entry(p->evictable, tv.head)) {
-
- struct amdgpu_bo_list_entry *candidate = p->evictable;
- struct amdgpu_bo *bo = ttm_to_amdgpu_bo(candidate->tv.bo);
- struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
- bool update_bytes_moved_vis;
- uint32_t other;
-
- /* If we reached our current BO we can forget it */
- if (bo == validated)
- break;
-
- /* We can't move pinned BOs here */
- if (bo->pin_count)
- continue;
-
- other = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
-
- /* Check if this BO is in one of the domains we need space for */
- if (!(other & domain))
- continue;
-
- /* Check if we can move this BO somewhere else */
- other = bo->allowed_domains & ~domain;
- if (!other)
- continue;
-
- /* Good we can try to move this BO somewhere else */
- update_bytes_moved_vis =
- !amdgpu_gmc_vram_full_visible(&adev->gmc) &&
- amdgpu_bo_in_cpu_visible_vram(bo);
- amdgpu_bo_placement_from_domain(bo, other);
- r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
- p->bytes_moved += ctx.bytes_moved;
- if (update_bytes_moved_vis)
- p->bytes_moved_vis += ctx.bytes_moved;
-
- if (unlikely(r))
- break;
-
- p->evictable = list_prev_entry(p->evictable, tv.head);
- list_move(&candidate->tv.head, &p->validated);
-
- return true;
- }
-
- return false;
-}
-
static int amdgpu_cs_validate(void *param, struct amdgpu_bo *bo)
{
struct amdgpu_cs_parser *p = param;
int r;
- do {
- r = amdgpu_cs_bo_validate(p, bo);
- } while (r == -ENOMEM && amdgpu_cs_try_evict(p, bo));
+ r = amdgpu_cs_bo_validate(p, bo);
if (r)
return r;
@@ -554,9 +492,6 @@ static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
lobj->user_pages);
}
- if (p->evictable == lobj)
- p->evictable = NULL;
-
r = amdgpu_cs_validate(p, bo);
if (r)
return r;
@@ -603,8 +538,6 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
e->tv.num_shared = 2;
amdgpu_bo_list_get_list(p->bo_list, &p->validated);
- if (p->bo_list->first_userptr != p->bo_list->num_entries)
- p->mn = amdgpu_mn_get(p->adev, AMDGPU_MN_TYPE_GFX);
INIT_LIST_HEAD(&duplicates);
amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd);
@@ -646,7 +579,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
}
r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true,
- &duplicates, false);
+ &duplicates);
if (unlikely(r != 0)) {
if (r != -ERESTARTSYS)
DRM_ERROR("ttm_eu_reserve_buffers failed.\n");
@@ -657,9 +590,6 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
&p->bytes_moved_vis_threshold);
p->bytes_moved = 0;
p->bytes_moved_vis = 0;
- p->evictable = list_last_entry(&p->validated,
- struct amdgpu_bo_list_entry,
- tv.head);
r = amdgpu_vm_validate_pt_bos(p->adev, &fpriv->vm,
amdgpu_cs_validate, p);
@@ -911,7 +841,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
if (r)
return r;
- r = amdgpu_vm_update_directories(adev, vm);
+ r = amdgpu_vm_update_pdes(adev, vm, false);
if (r)
return r;
@@ -1287,11 +1217,11 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
if (r)
goto error_unlock;
- /* No memory allocation is allowed while holding the mn lock.
- * p->mn is hold until amdgpu_cs_submit is finished and fence is added
- * to BOs.
+ /* No memory allocation is allowed while holding the notifier lock.
+ * The lock is held until amdgpu_cs_submit is finished and fence is
+ * added to BOs.
*/
- amdgpu_mn_lock(p->mn);
+ mutex_lock(&p->adev->notifier_lock);
/* If userptr are invalidated after amdgpu_cs_parser_bos(), return
* -EAGAIN, drmIoctl in libdrm will restart the amdgpu_cs_ioctl.
@@ -1334,13 +1264,13 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
amdgpu_vm_move_to_lru_tail(p->adev, &fpriv->vm);
ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence);
- amdgpu_mn_unlock(p->mn);
+ mutex_unlock(&p->adev->notifier_lock);
return 0;
error_abort:
drm_sched_job_cleanup(&job->base);
- amdgpu_mn_unlock(p->mn);
+ mutex_unlock(&p->adev->notifier_lock);
error_unlock:
amdgpu_job_free(job);
@@ -1355,6 +1285,9 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
bool reserved_buffers = false;
int i, r;
+ if (amdgpu_ras_intr_triggered())
+ return -EHWPOISON;
+
if (!adev->accel_working)
return -EBUSY;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
index 35a8d3c96fc9..08047bc4d588 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
@@ -80,7 +80,7 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
list_add(&csa_tv.head, &list);
amdgpu_vm_get_pd_bo(vm, &list, &pd);
- r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL, false);
+ r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
if (r) {
DRM_ERROR("failed to reserve CSA,PD BOs: err=%d\n", r);
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index 5652cc72ed3a..8e6726e0d035 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -859,6 +859,9 @@ static int amdgpu_debugfs_test_ib(struct seq_file *m, void *data)
struct amdgpu_device *adev = dev->dev_private;
int r = 0, i;
+ /* Avoid accidently unparking the sched thread during GPU reset */
+ mutex_lock(&adev->lock_reset);
+
/* hold on the scheduler */
for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
struct amdgpu_ring *ring = adev->rings[i];
@@ -884,6 +887,8 @@ static int amdgpu_debugfs_test_ib(struct seq_file *m, void *data)
kthread_unpark(ring->sched.thread);
}
+ mutex_unlock(&adev->lock_reset);
+
return 0;
}
@@ -1036,6 +1041,9 @@ static int amdgpu_debugfs_ib_preempt(void *data, u64 val)
if (!fences)
return -ENOMEM;
+ /* Avoid accidently unparking the sched thread during GPU reset */
+ mutex_lock(&adev->lock_reset);
+
/* stop the scheduler */
kthread_park(ring->sched.thread);
@@ -1075,10 +1083,11 @@ failure:
/* restart the scheduler */
kthread_unpark(ring->sched.thread);
+ mutex_unlock(&adev->lock_reset);
+
ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
- if (fences)
- kfree(fences);
+ kfree(fences);
return 0;
}
@@ -1090,8 +1099,8 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
{
adev->debugfs_preempt =
debugfs_create_file("amdgpu_preempt_ib", 0600,
- adev->ddev->primary->debugfs_root,
- (void *)adev, &fops_ib_preempt);
+ adev->ddev->primary->debugfs_root, adev,
+ &fops_ib_preempt);
if (!(adev->debugfs_preempt)) {
DRM_ERROR("unable to create amdgpu_preempt_ib debugsfs file\n");
return -EIO;
@@ -1103,8 +1112,7 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
void amdgpu_debugfs_preempt_cleanup(struct amdgpu_device *adev)
{
- if (adev->debugfs_preempt)
- debugfs_remove(adev->debugfs_preempt);
+ debugfs_remove(adev->debugfs_preempt);
}
#else
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 7a6c837c0a85..c17505fba988 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -65,6 +65,8 @@
#include "amdgpu_ras.h"
#include "amdgpu_pmu.h"
+#include <linux/suspend.h>
+
MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
@@ -78,7 +80,7 @@ MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
#define AMDGPU_RESUME_MS 2000
-static const char *amdgpu_asic_name[] = {
+const char *amdgpu_asic_name[] = {
"TAHITI",
"PITCAIRN",
"VERDE",
@@ -151,6 +153,36 @@ bool amdgpu_device_is_px(struct drm_device *dev)
return false;
}
+/**
+ * VRAM access helper functions.
+ *
+ * amdgpu_device_vram_access - read/write a buffer in vram
+ *
+ * @adev: amdgpu_device pointer
+ * @pos: offset of the buffer in vram
+ * @buf: virtual address of the buffer in system memory
+ * @size: read/write size, sizeof(@buf) must > @size
+ * @write: true - write to vram, otherwise - read from vram
+ */
+void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
+ uint32_t *buf, size_t size, bool write)
+{
+ uint64_t last;
+ unsigned long flags;
+
+ last = size - 4;
+ for (last += pos; pos <= last; pos += 4) {
+ spin_lock_irqsave(&adev->mmio_idx_lock, flags);
+ WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000);
+ WREG32_NO_KIQ(mmMM_INDEX_HI, pos >> 31);
+ if (write)
+ WREG32_NO_KIQ(mmMM_DATA, *buf++);
+ else
+ *buf++ = RREG32_NO_KIQ(mmMM_DATA);
+ spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
+ }
+}
+
/*
* MMIO register access helper functions.
*/
@@ -1023,12 +1055,6 @@ static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
amdgpu_device_check_block_size(adev);
- ret = amdgpu_device_get_job_timeout_settings(adev);
- if (ret) {
- dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
- return ret;
- }
-
adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
return ret;
@@ -1469,6 +1495,9 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
(const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ if (amdgpu_discovery && adev->asic_type >= CHIP_NAVI10)
+ goto parse_soc_bounding_box;
+
adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
@@ -1496,7 +1525,13 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
adev->gfx.config.num_packer_per_sc =
le32_to_cpu(gpu_info_fw->num_packer_per_sc);
}
+
+parse_soc_bounding_box:
#ifdef CONFIG_DRM_AMD_DC_DCN2_0
+ /*
+ * soc bounding box info is not integrated in disocovery table,
+ * we always need to parse it from gpu info firmware.
+ */
if (hdr->version_minor == 2) {
const struct gpu_info_firmware_v1_2 *gpu_info_fw =
(const struct gpu_info_firmware_v1_2 *)(adev->firmware.gpu_info_fw->data +
@@ -1613,6 +1648,9 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
if (r)
return r;
+ if (amdgpu_discovery && adev->asic_type >= CHIP_NAVI10)
+ amdgpu_discovery_get_gfx_info(adev);
+
amdgpu_amdkfd_device_probe(adev);
if (amdgpu_sriov_vf(adev)) {
@@ -1622,7 +1660,7 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
}
adev->pm.pp_feature = amdgpu_pp_feature_mask;
- if (amdgpu_sriov_vf(adev))
+ if (amdgpu_sriov_vf(adev) || sched_policy == KFD_SCHED_POLICY_NO_HWS)
adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
for (i = 0; i < adev->num_ip_blocks; i++) {
@@ -1839,6 +1877,19 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
if (r)
goto init_failed;
+ /*
+ * retired pages will be loaded from eeprom and reserved here,
+ * it should be called after amdgpu_device_ip_hw_init_phase2 since
+ * for some ASICs the RAS EEPROM code relies on SMU fully functioning
+ * for I2C communication which only true at this point.
+ * recovery_init may fail, but it can free all resources allocated by
+ * itself and its failure should not stop amdgpu init process.
+ *
+ * Note: theoretically, this should be called before all vram allocations
+ * to protect retired page from abusing
+ */
+ amdgpu_ras_recovery_init(adev);
+
if (adev->gmc.xgmi.num_physical_nodes > 1)
amdgpu_xgmi_add_device(adev);
amdgpu_amdkfd_device_init(adev);
@@ -2006,6 +2057,7 @@ out:
*/
static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
{
+ struct amdgpu_gpu_instance *gpu_instance;
int i = 0, r;
for (i = 0; i < adev->num_ip_blocks; i++) {
@@ -2031,8 +2083,39 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
if (r)
DRM_ERROR("enable mgpu fan boost failed (%d).\n", r);
- /* set to low pstate by default */
- amdgpu_xgmi_set_pstate(adev, 0);
+
+ if (adev->gmc.xgmi.num_physical_nodes > 1) {
+ mutex_lock(&mgpu_info.mutex);
+
+ /*
+ * Reset device p-state to low as this was booted with high.
+ *
+ * This should be performed only after all devices from the same
+ * hive get initialized.
+ *
+ * However, it's unknown how many device in the hive in advance.
+ * As this is counted one by one during devices initializations.
+ *
+ * So, we wait for all XGMI interlinked devices initialized.
+ * This may bring some delays as those devices may come from
+ * different hives. But that should be OK.
+ */
+ if (mgpu_info.num_dgpu == adev->gmc.xgmi.num_physical_nodes) {
+ for (i = 0; i < mgpu_info.num_gpu; i++) {
+ gpu_instance = &(mgpu_info.gpu_ins[i]);
+ if (gpu_instance->adev->flags & AMD_IS_APU)
+ continue;
+
+ r = amdgpu_xgmi_set_pstate(gpu_instance->adev, 0);
+ if (r) {
+ DRM_ERROR("pstate setting failed (%d).\n", r);
+ break;
+ }
+ }
+ }
+
+ mutex_unlock(&mgpu_info.mutex);
+ }
return 0;
}
@@ -2220,6 +2303,12 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
/* displays are handled in phase1 */
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)
continue;
+ /* PSP lost connection when err_event_athub occurs */
+ if (amdgpu_ras_intr_triggered() &&
+ adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
+ adev->ip_blocks[i].status.hw = false;
+ continue;
+ }
/* XXX handle errors */
r = adev->ip_blocks[i].version->funcs->suspend(adev);
/* XXX handle errors */
@@ -2231,17 +2320,17 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
/* handle putting the SMC in the appropriate state */
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
if (is_support_sw_smu(adev)) {
- /* todo */
+ r = smu_set_mp1_state(&adev->smu, adev->mp1_state);
} else if (adev->powerplay.pp_funcs &&
adev->powerplay.pp_funcs->set_mp1_state) {
r = adev->powerplay.pp_funcs->set_mp1_state(
adev->powerplay.pp_handle,
adev->mp1_state);
- if (r) {
- DRM_ERROR("SMC failed to set mp1 state %d, %d\n",
- adev->mp1_state, r);
- return r;
- }
+ }
+ if (r) {
+ DRM_ERROR("SMC failed to set mp1 state %d, %d\n",
+ adev->mp1_state, r);
+ return r;
}
}
@@ -2556,6 +2645,73 @@ static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
adev->asic_reset_res, adev->ddev->unique);
}
+static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
+{
+ char *input = amdgpu_lockup_timeout;
+ char *timeout_setting = NULL;
+ int index = 0;
+ long timeout;
+ int ret = 0;
+
+ /*
+ * By default timeout for non compute jobs is 10000.
+ * And there is no timeout enforced on compute jobs.
+ * In SR-IOV or passthrough mode, timeout for compute
+ * jobs are 10000 by default.
+ */
+ adev->gfx_timeout = msecs_to_jiffies(10000);
+ adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
+ if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
+ adev->compute_timeout = adev->gfx_timeout;
+ else
+ adev->compute_timeout = MAX_SCHEDULE_TIMEOUT;
+
+ if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
+ while ((timeout_setting = strsep(&input, ",")) &&
+ strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
+ ret = kstrtol(timeout_setting, 0, &timeout);
+ if (ret)
+ return ret;
+
+ if (timeout == 0) {
+ index++;
+ continue;
+ } else if (timeout < 0) {
+ timeout = MAX_SCHEDULE_TIMEOUT;
+ } else {
+ timeout = msecs_to_jiffies(timeout);
+ }
+
+ switch (index++) {
+ case 0:
+ adev->gfx_timeout = timeout;
+ break;
+ case 1:
+ adev->compute_timeout = timeout;
+ break;
+ case 2:
+ adev->sdma_timeout = timeout;
+ break;
+ case 3:
+ adev->video_timeout = timeout;
+ break;
+ default:
+ break;
+ }
+ }
+ /*
+ * There is only one value specified and
+ * it should apply to all non-compute jobs.
+ */
+ if (index == 1) {
+ adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
+ if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
+ adev->compute_timeout = adev->gfx_timeout;
+ }
+ }
+
+ return ret;
+}
/**
* amdgpu_device_init - initialize the driver
@@ -2583,7 +2739,12 @@ int amdgpu_device_init(struct amdgpu_device *adev,
adev->ddev = ddev;
adev->pdev = pdev;
adev->flags = flags;
- adev->asic_type = flags & AMD_ASIC_MASK;
+
+ if (amdgpu_force_asic_type >= 0 && amdgpu_force_asic_type < CHIP_LAST)
+ adev->asic_type = amdgpu_force_asic_type;
+ else
+ adev->asic_type = flags & AMD_ASIC_MASK;
+
adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
if (amdgpu_emu_mode == 1)
adev->usec_timeout *= 2;
@@ -2633,6 +2794,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
mutex_init(&adev->virt.vf_errors.lock);
hash_init(adev->mn_hash);
mutex_init(&adev->lock_reset);
+ mutex_init(&adev->notifier_lock);
mutex_init(&adev->virt.dpm_mutex);
mutex_init(&adev->psp.mutex);
@@ -2726,6 +2888,12 @@ int amdgpu_device_init(struct amdgpu_device *adev,
if (r)
return r;
+ r = amdgpu_device_get_job_timeout_settings(adev);
+ if (r) {
+ dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
+ return r;
+ }
+
/* doorbell bar mapping and doorbell index init*/
amdgpu_device_doorbell_init(adev);
@@ -2942,7 +3110,9 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
int r;
DRM_INFO("amdgpu: finishing device.\n");
+ flush_delayed_work(&adev->delayed_init_work);
adev->shutdown = true;
+
/* disable all interrupts */
amdgpu_irq_disable_all(adev);
if (adev->mode_info.mode_config_initialized){
@@ -2960,7 +3130,6 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
adev->firmware.gpu_info_fw = NULL;
}
adev->accel_working = false;
- cancel_delayed_work_sync(&adev->delayed_init_work);
/* free i2c buses */
if (!amdgpu_device_has_dc_support(adev))
amdgpu_i2c_fini(adev);
@@ -3014,6 +3183,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
struct amdgpu_device *adev;
struct drm_crtc *crtc;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
int r;
if (dev == NULL || dev->dev_private == NULL) {
@@ -3036,9 +3206,11 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
if (!amdgpu_device_has_dc_support(adev)) {
/* turn off display hw */
drm_modeset_lock_all(dev);
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
- }
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter)
+ drm_helper_connector_dpms(connector,
+ DRM_MODE_DPMS_OFF);
+ drm_connector_list_iter_end(&iter);
drm_modeset_unlock_all(dev);
/* unpin the front buffers and cursors */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
@@ -3089,15 +3261,11 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
*/
amdgpu_bo_evict_vram(adev);
- pci_save_state(dev->pdev);
if (suspend) {
+ pci_save_state(dev->pdev);
/* Shut down the device */
pci_disable_device(dev->pdev);
pci_set_power_state(dev->pdev, PCI_D3hot);
- } else {
- r = amdgpu_asic_reset(adev);
- if (r)
- DRM_ERROR("amdgpu asic reset failed\n");
}
return 0;
@@ -3117,6 +3285,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
{
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
struct amdgpu_device *adev = dev->dev_private;
struct drm_crtc *crtc;
int r = 0;
@@ -3187,9 +3356,13 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
/* turn on display hw */
drm_modeset_lock_all(dev);
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
- }
+
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter)
+ drm_helper_connector_dpms(connector,
+ DRM_MODE_DPMS_ON);
+ drm_connector_list_iter_end(&iter);
+
drm_modeset_unlock_all(dev);
}
amdgpu_fbdev_set_suspend(adev, 0);
@@ -3635,11 +3808,6 @@ static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
break;
}
}
-
- list_for_each_entry(tmp_adev, device_list_handle,
- gmc.xgmi.head) {
- amdgpu_ras_reserve_bad_pages(tmp_adev);
- }
}
}
@@ -3743,25 +3911,18 @@ static bool amdgpu_device_lock_adev(struct amdgpu_device *adev, bool trylock)
adev->mp1_state = PP_MP1_STATE_NONE;
break;
}
- /* Block kfd: SRIOV would do it separately */
- if (!amdgpu_sriov_vf(adev))
- amdgpu_amdkfd_pre_reset(adev);
return true;
}
static void amdgpu_device_unlock_adev(struct amdgpu_device *adev)
{
- /*unlock kfd: SRIOV would do it separately */
- if (!amdgpu_sriov_vf(adev))
- amdgpu_amdkfd_post_reset(adev);
amdgpu_vf_error_trans_all(adev);
adev->mp1_state = PP_MP1_STATE_NONE;
adev->in_gpu_reset = 0;
mutex_unlock(&adev->lock_reset);
}
-
/**
* amdgpu_device_gpu_recover - reset the asic and recover scheduler
*
@@ -3781,11 +3942,24 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
struct amdgpu_hive_info *hive = NULL;
struct amdgpu_device *tmp_adev = NULL;
int i, r = 0;
+ bool in_ras_intr = amdgpu_ras_intr_triggered();
+
+ /*
+ * Flush RAM to disk so that after reboot
+ * the user can read log and see why the system rebooted.
+ */
+ if (in_ras_intr && amdgpu_ras_get_context(adev)->reboot) {
+
+ DRM_WARN("Emergency reboot.");
+
+ ksys_sync_helper();
+ emergency_restart();
+ }
need_full_reset = job_signaled = false;
INIT_LIST_HEAD(&device_list);
- dev_info(adev->dev, "GPU reset begin!\n");
+ dev_info(adev->dev, "GPU %s begin!\n", in_ras_intr ? "jobs stop":"reset");
cancel_delayed_work_sync(&adev->delayed_init_work);
@@ -3812,9 +3986,16 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
return 0;
}
+ /* Block kfd: SRIOV would do it separately */
+ if (!amdgpu_sriov_vf(adev))
+ amdgpu_amdkfd_pre_reset(adev);
+
/* Build list of devices to reset */
if (adev->gmc.xgmi.num_physical_nodes > 1) {
if (!hive) {
+ /*unlock kfd: SRIOV would do it separately */
+ if (!amdgpu_sriov_vf(adev))
+ amdgpu_amdkfd_post_reset(adev);
amdgpu_device_unlock_adev(adev);
return -ENODEV;
}
@@ -3830,17 +4011,22 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
device_list_handle = &device_list;
}
- /*
- * Mark these ASICs to be reseted as untracked first
- * And add them back after reset completed
- */
- list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head)
- amdgpu_unregister_gpu_instance(tmp_adev);
-
/* block all schedulers and reset given job's ring */
list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
+ if (tmp_adev != adev) {
+ amdgpu_device_lock_adev(tmp_adev, false);
+ if (!amdgpu_sriov_vf(tmp_adev))
+ amdgpu_amdkfd_pre_reset(tmp_adev);
+ }
+
+ /*
+ * Mark these ASICs to be reseted as untracked first
+ * And add them back after reset completed
+ */
+ amdgpu_unregister_gpu_instance(tmp_adev);
+
/* disable ras on ALL IPs */
- if (amdgpu_device_ip_need_full_reset(tmp_adev))
+ if (!in_ras_intr && amdgpu_device_ip_need_full_reset(tmp_adev))
amdgpu_ras_suspend(tmp_adev);
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
@@ -3850,10 +4036,16 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
continue;
drm_sched_stop(&ring->sched, job ? &job->base : NULL);
+
+ if (in_ras_intr)
+ amdgpu_job_stop_all_jobs_on_sched(&ring->sched);
}
}
+ if (in_ras_intr)
+ goto skip_sched_resume;
+
/*
* Must check guilty signal here since after this point all old
* HW fences are force signaled.
@@ -3864,9 +4056,6 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
dma_fence_is_signaled(job->base.s_fence->parent))
job_signaled = true;
- if (!amdgpu_device_ip_need_full_reset(adev))
- device_list_handle = &device_list;
-
if (job_signaled) {
dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
goto skip_hw_reset;
@@ -3888,7 +4077,6 @@ retry: /* Rest of adevs pre asic reset from XGMI hive. */
if (tmp_adev == adev)
continue;
- amdgpu_device_lock_adev(tmp_adev, false);
r = amdgpu_device_pre_asic_reset(tmp_adev,
NULL,
&need_full_reset);
@@ -3916,6 +4104,7 @@ skip_hw_reset:
/* Post ASIC reset for all devs .*/
list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
+
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = tmp_adev->rings[i];
@@ -3937,12 +4126,18 @@ skip_hw_reset:
if (r) {
/* bad news, how to tell it to userspace ? */
- dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&adev->gpu_reset_counter));
+ dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&tmp_adev->gpu_reset_counter));
amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
} else {
- dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&adev->gpu_reset_counter));
+ dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter));
}
+ }
+skip_sched_resume:
+ list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
+ /*unlock kfd: SRIOV would do it separately */
+ if (!in_ras_intr && !amdgpu_sriov_vf(tmp_adev))
+ amdgpu_amdkfd_post_reset(tmp_adev);
amdgpu_device_unlock_adev(tmp_adev);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
index 1481899f86c1..f95092741c38 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
@@ -134,20 +134,10 @@ static int hw_id_map[MAX_HWIP] = {
static int amdgpu_discovery_read_binary(struct amdgpu_device *adev, uint8_t *binary)
{
- uint32_t *p = (uint32_t *)binary;
uint64_t vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20;
- uint64_t pos = vram_size - BINARY_MAX_SIZE;
- unsigned long flags;
-
- while (pos < vram_size) {
- spin_lock_irqsave(&adev->mmio_idx_lock, flags);
- WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000);
- WREG32_NO_KIQ(mmMM_INDEX_HI, pos >> 31);
- *p++ = RREG32_NO_KIQ(mmMM_DATA);
- spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
- pos += 4;
- }
+ uint64_t pos = vram_size - DISCOVERY_TMR_SIZE;
+ amdgpu_device_vram_access(adev, pos, (uint32_t *)binary, DISCOVERY_TMR_SIZE, false);
return 0;
}
@@ -179,7 +169,7 @@ int amdgpu_discovery_init(struct amdgpu_device *adev)
uint16_t checksum;
int r;
- adev->discovery = kzalloc(BINARY_MAX_SIZE, GFP_KERNEL);
+ adev->discovery = kzalloc(DISCOVERY_TMR_SIZE, GFP_KERNEL);
if (!adev->discovery)
return -ENOMEM;
@@ -333,7 +323,7 @@ int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
}
int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id,
- int *major, int *minor)
+ int *major, int *minor, int *revision)
{
struct binary_header *bhdr;
struct ip_discovery_header *ihdr;
@@ -369,6 +359,8 @@ int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id,
*major = ip->major;
if (minor)
*minor = ip->minor;
+ if (revision)
+ *revision = ip->revision;
return 0;
}
ip_offset += sizeof(*ip) + 4 * (ip->num_base_address - 1);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h
index 85b8c4d4d576..ba78e15d9b05 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h
@@ -24,11 +24,13 @@
#ifndef __AMDGPU_DISCOVERY__
#define __AMDGPU_DISCOVERY__
+#define DISCOVERY_TMR_SIZE (64 << 10)
+
int amdgpu_discovery_init(struct amdgpu_device *adev);
void amdgpu_discovery_fini(struct amdgpu_device *adev);
int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev);
int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id,
- int *major, int *minor);
+ int *major, int *minor, int *revision);
int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev);
#endif /* __AMDGPU_DISCOVERY__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 82efc1e22e61..3cadb0b76f22 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -370,11 +370,13 @@ void amdgpu_display_print_display_setup(struct drm_device *dev)
struct amdgpu_connector *amdgpu_connector;
struct drm_encoder *encoder;
struct amdgpu_encoder *amdgpu_encoder;
+ struct drm_connector_list_iter iter;
uint32_t devices;
int i = 0;
+ drm_connector_list_iter_begin(dev, &iter);
DRM_INFO("AMDGPU Display Connectors\n");
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_for_each_connector_iter(connector, &iter) {
amdgpu_connector = to_amdgpu_connector(connector);
DRM_INFO("Connector %d:\n", i);
DRM_INFO(" %s\n", connector->name);
@@ -438,6 +440,7 @@ void amdgpu_display_print_display_setup(struct drm_device *dev)
}
i++;
}
+ drm_connector_list_iter_end(&iter);
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
index 61f108ec2b5c..e2eec7b66334 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
@@ -34,27 +34,12 @@
#include "amdgpu.h"
#include "amdgpu_display.h"
#include "amdgpu_gem.h"
+#include "amdgpu_dma_buf.h"
#include <drm/amdgpu_drm.h>
#include <linux/dma-buf.h>
#include <linux/dma-fence-array.h>
/**
- * amdgpu_gem_prime_get_sg_table - &drm_driver.gem_prime_get_sg_table
- * implementation
- * @obj: GEM buffer object (BO)
- *
- * Returns:
- * A scatter/gather table for the pinned pages of the BO's memory.
- */
-struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj)
-{
- struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
- int npages = bo->tbo.num_pages;
-
- return drm_prime_pages_to_sg(bo->tbo.ttm->pages, npages);
-}
-
-/**
* amdgpu_gem_prime_vmap - &dma_buf_ops.vmap implementation
* @obj: GEM BO
*
@@ -179,92 +164,126 @@ err_fences_put:
}
/**
- * amdgpu_dma_buf_map_attach - &dma_buf_ops.attach implementation
- * @dma_buf: Shared DMA buffer
+ * amdgpu_dma_buf_attach - &dma_buf_ops.attach implementation
+ *
+ * @dmabuf: DMA-buf where we attach to
+ * @attach: attachment to add
+ *
+ * Add the attachment as user to the exported DMA-buf.
+ */
+static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf,
+ struct dma_buf_attachment *attach)
+{
+ struct drm_gem_object *obj = dmabuf->priv;
+ struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+ int r;
+
+ if (attach->dev->driver == adev->dev->driver)
+ return 0;
+
+ r = amdgpu_bo_reserve(bo, false);
+ if (unlikely(r != 0))
+ return r;
+
+ /*
+ * We only create shared fences for internal use, but importers
+ * of the dmabuf rely on exclusive fences for implicitly
+ * tracking write hazards. As any of the current fences may
+ * correspond to a write, we need to convert all existing
+ * fences on the reservation object into a single exclusive
+ * fence.
+ */
+ r = __dma_resv_make_exclusive(bo->tbo.base.resv);
+ if (r)
+ return r;
+
+ bo->prime_shared_count++;
+ amdgpu_bo_unreserve(bo);
+ return 0;
+}
+
+/**
+ * amdgpu_dma_buf_detach - &dma_buf_ops.detach implementation
+ *
+ * @dmabuf: DMA-buf where we remove the attachment from
+ * @attach: the attachment to remove
+ *
+ * Called when an attachment is removed from the DMA-buf.
+ */
+static void amdgpu_dma_buf_detach(struct dma_buf *dmabuf,
+ struct dma_buf_attachment *attach)
+{
+ struct drm_gem_object *obj = dmabuf->priv;
+ struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+
+ if (attach->dev->driver != adev->dev->driver && bo->prime_shared_count)
+ bo->prime_shared_count--;
+}
+
+/**
+ * amdgpu_dma_buf_map - &dma_buf_ops.map_dma_buf implementation
* @attach: DMA-buf attachment
+ * @dir: DMA direction
*
* Makes sure that the shared DMA buffer can be accessed by the target device.
* For now, simply pins it to the GTT domain, where it should be accessible by
* all DMA devices.
*
* Returns:
- * 0 on success or a negative error code on failure.
+ * sg_table filled with the DMA addresses to use or ERR_PRT with negative error
+ * code.
*/
-static int amdgpu_dma_buf_map_attach(struct dma_buf *dma_buf,
- struct dma_buf_attachment *attach)
+static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach,
+ enum dma_data_direction dir)
{
+ struct dma_buf *dma_buf = attach->dmabuf;
struct drm_gem_object *obj = dma_buf->priv;
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
- struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+ struct sg_table *sgt;
long r;
- r = drm_gem_map_attach(dma_buf, attach);
- if (r)
- return r;
-
- r = amdgpu_bo_reserve(bo, false);
- if (unlikely(r != 0))
- goto error_detach;
-
-
- if (attach->dev->driver != adev->dev->driver) {
- /*
- * We only create shared fences for internal use, but importers
- * of the dmabuf rely on exclusive fences for implicitly
- * tracking write hazards. As any of the current fences may
- * correspond to a write, we need to convert all existing
- * fences on the reservation object into a single exclusive
- * fence.
- */
- r = __dma_resv_make_exclusive(bo->tbo.base.resv);
- if (r)
- goto error_unreserve;
- }
-
- /* pin buffer into GTT */
r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
if (r)
- goto error_unreserve;
+ return ERR_PTR(r);
- if (attach->dev->driver != adev->dev->driver)
- bo->prime_shared_count++;
+ sgt = drm_prime_pages_to_sg(bo->tbo.ttm->pages, bo->tbo.num_pages);
+ if (IS_ERR(sgt))
+ return sgt;
-error_unreserve:
- amdgpu_bo_unreserve(bo);
+ if (!dma_map_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir,
+ DMA_ATTR_SKIP_CPU_SYNC))
+ goto error_free;
-error_detach:
- if (r)
- drm_gem_map_detach(dma_buf, attach);
- return r;
+ return sgt;
+
+error_free:
+ sg_free_table(sgt);
+ kfree(sgt);
+ return ERR_PTR(-ENOMEM);
}
/**
- * amdgpu_dma_buf_map_detach - &dma_buf_ops.detach implementation
- * @dma_buf: Shared DMA buffer
+ * amdgpu_dma_buf_unmap - &dma_buf_ops.unmap_dma_buf implementation
* @attach: DMA-buf attachment
+ * @sgt: sg_table to unmap
+ * @dir: DMA direction
*
* This is called when a shared DMA buffer no longer needs to be accessible by
* another device. For now, simply unpins the buffer from GTT.
*/
-static void amdgpu_dma_buf_map_detach(struct dma_buf *dma_buf,
- struct dma_buf_attachment *attach)
+static void amdgpu_dma_buf_unmap(struct dma_buf_attachment *attach,
+ struct sg_table *sgt,
+ enum dma_data_direction dir)
{
- struct drm_gem_object *obj = dma_buf->priv;
+ struct drm_gem_object *obj = attach->dmabuf->priv;
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
- struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
- int ret = 0;
-
- ret = amdgpu_bo_reserve(bo, true);
- if (unlikely(ret != 0))
- goto error;
+ dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
+ sg_free_table(sgt);
+ kfree(sgt);
amdgpu_bo_unpin(bo);
- if (attach->dev->driver != adev->dev->driver && bo->prime_shared_count)
- bo->prime_shared_count--;
- amdgpu_bo_unreserve(bo);
-
-error:
- drm_gem_map_detach(dma_buf, attach);
}
/**
@@ -308,10 +327,11 @@ static int amdgpu_dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
}
const struct dma_buf_ops amdgpu_dmabuf_ops = {
- .attach = amdgpu_dma_buf_map_attach,
- .detach = amdgpu_dma_buf_map_detach,
- .map_dma_buf = drm_gem_map_dma_buf,
- .unmap_dma_buf = drm_gem_unmap_dma_buf,
+ .dynamic_mapping = true,
+ .attach = amdgpu_dma_buf_attach,
+ .detach = amdgpu_dma_buf_detach,
+ .map_dma_buf = amdgpu_dma_buf_map,
+ .unmap_dma_buf = amdgpu_dma_buf_unmap,
.release = drm_gem_dmabuf_release,
.begin_cpu_access = amdgpu_dma_buf_begin_cpu_access,
.mmap = drm_gem_dmabuf_mmap,
@@ -321,7 +341,6 @@ const struct dma_buf_ops amdgpu_dmabuf_ops = {
/**
* amdgpu_gem_prime_export - &drm_driver.gem_prime_export implementation
- * @dev: DRM device
* @gobj: GEM BO
* @flags: Flags such as DRM_CLOEXEC and DRM_RDWR.
*
@@ -350,31 +369,28 @@ struct dma_buf *amdgpu_gem_prime_export(struct drm_gem_object *gobj,
}
/**
- * amdgpu_gem_prime_import_sg_table - &drm_driver.gem_prime_import_sg_table
- * implementation
+ * amdgpu_dma_buf_create_obj - create BO for DMA-buf import
+ *
* @dev: DRM device
- * @attach: DMA-buf attachment
- * @sg: Scatter/gather table
+ * @dma_buf: DMA-buf
*
- * Imports shared DMA buffer memory exported by another device.
+ * Creates an empty SG BO for DMA-buf import.
*
* Returns:
* A new GEM BO of the given DRM device, representing the memory
* described by the given DMA-buf attachment and scatter/gather table.
*/
-struct drm_gem_object *
-amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
- struct dma_buf_attachment *attach,
- struct sg_table *sg)
+static struct drm_gem_object *
+amdgpu_dma_buf_create_obj(struct drm_device *dev, struct dma_buf *dma_buf)
{
- struct dma_resv *resv = attach->dmabuf->resv;
+ struct dma_resv *resv = dma_buf->resv;
struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_bo *bo;
struct amdgpu_bo_param bp;
int ret;
memset(&bp, 0, sizeof(bp));
- bp.size = attach->dmabuf->size;
+ bp.size = dma_buf->size;
bp.byte_align = PAGE_SIZE;
bp.domain = AMDGPU_GEM_DOMAIN_CPU;
bp.flags = 0;
@@ -385,11 +401,9 @@ amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
if (ret)
goto error;
- bo->tbo.sg = sg;
- bo->tbo.ttm->sg = sg;
bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
- if (attach->dmabuf->ops != &amdgpu_dmabuf_ops)
+ if (dma_buf->ops != &amdgpu_dmabuf_ops)
bo->prime_shared_count = 1;
dma_resv_unlock(resv);
@@ -405,15 +419,15 @@ error:
* @dev: DRM device
* @dma_buf: Shared DMA buffer
*
- * The main work is done by the &drm_gem_prime_import helper, which in turn
- * uses &amdgpu_gem_prime_import_sg_table.
+ * Import a dma_buf into a the driver and potentially create a new GEM object.
*
* Returns:
* GEM BO representing the shared DMA buffer for the given device.
*/
struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev,
- struct dma_buf *dma_buf)
+ struct dma_buf *dma_buf)
{
+ struct dma_buf_attachment *attach;
struct drm_gem_object *obj;
if (dma_buf->ops == &amdgpu_dmabuf_ops) {
@@ -428,5 +442,17 @@ struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev,
}
}
- return drm_gem_prime_import(dev, dma_buf);
+ obj = amdgpu_dma_buf_create_obj(dev, dma_buf);
+ if (IS_ERR(obj))
+ return obj;
+
+ attach = dma_buf_dynamic_attach(dma_buf, dev->dev, true);
+ if (IS_ERR(attach)) {
+ drm_gem_object_put(obj);
+ return ERR_CAST(attach);
+ }
+
+ get_dma_buf(dma_buf);
+ obj->import_attach = attach;
+ return obj;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.h
index 5012e6ab58f1..ec447a7b6b28 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.h
@@ -25,11 +25,6 @@
#include <drm/drm_gem.h>
-struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj);
-struct drm_gem_object *
-amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
- struct dma_buf_attachment *attach,
- struct sg_table *sg);
struct dma_buf *amdgpu_gem_prime_export(struct drm_gem_object *gobj,
int flags);
struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
index 5803fcbae22f..9cc270efee7c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
@@ -911,7 +911,8 @@ int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low)
if (is_support_sw_smu(adev)) {
ret = smu_get_dpm_freq_range(&adev->smu, SMU_GFXCLK,
low ? &clk_freq : NULL,
- !low ? &clk_freq : NULL);
+ !low ? &clk_freq : NULL,
+ true);
if (ret)
return 0;
return clk_freq * 100;
@@ -928,7 +929,8 @@ int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low)
if (is_support_sw_smu(adev)) {
ret = smu_get_dpm_freq_range(&adev->smu, SMU_UCLK,
low ? &clk_freq : NULL,
- !low ? &clk_freq : NULL);
+ !low ? &clk_freq : NULL,
+ true);
if (ret)
return 0;
return clk_freq * 100;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
index 1c5c0fd76dbf..2cfb677272af 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
@@ -298,12 +298,6 @@ enum amdgpu_pcie_gen {
#define amdgpu_dpm_get_current_power_state(adev) \
((adev)->powerplay.pp_funcs->get_current_power_state((adev)->powerplay.pp_handle))
-#define amdgpu_smu_get_current_power_state(adev) \
- ((adev)->smu.ppt_funcs->get_current_power_state(&((adev)->smu)))
-
-#define amdgpu_smu_set_power_state(adev) \
- ((adev)->smu.ppt_funcs->set_power_state(&((adev)->smu)))
-
#define amdgpu_dpm_get_pp_num_states(adev, data) \
((adev)->powerplay.pp_funcs->get_pp_num_states((adev)->powerplay.pp_handle, data))
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index b19157b19fa0..0ffc9447b573 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -43,6 +43,8 @@
#include "amdgpu_amdkfd.h"
+#include "amdgpu_ras.h"
+
/*
* KMS wrapper.
* - 3.0.0 - initial driver
@@ -82,13 +84,12 @@
* - 3.33.0 - Fixes for GDS ENOMEM failures in AMDGPU_CS.
* - 3.34.0 - Non-DC can flip correctly between buffers with different pitches
* - 3.35.0 - Add drm_amdgpu_info_device::tcc_disabled_mask
+ * - 3.36.0 - Allow reading more status registers on si/cik
*/
#define KMS_DRIVER_MAJOR 3
-#define KMS_DRIVER_MINOR 35
+#define KMS_DRIVER_MINOR 36
#define KMS_DRIVER_PATCHLEVEL 0
-#define AMDGPU_MAX_TIMEOUT_PARAM_LENTH 256
-
int amdgpu_vram_limit = 0;
int amdgpu_vis_vram_limit = 0;
int amdgpu_gart_size = -1; /* auto */
@@ -101,7 +102,7 @@ int amdgpu_disp_priority = 0;
int amdgpu_hw_i2c = 0;
int amdgpu_pcie_gen2 = -1;
int amdgpu_msi = -1;
-char amdgpu_lockup_timeout[AMDGPU_MAX_TIMEOUT_PARAM_LENTH];
+char amdgpu_lockup_timeout[AMDGPU_MAX_TIMEOUT_PARAM_LENGTH];
int amdgpu_dpm = -1;
int amdgpu_fw_load_type = -1;
int amdgpu_aspm = -1;
@@ -128,11 +129,7 @@ char *amdgpu_disable_cu = NULL;
char *amdgpu_virtual_display = NULL;
/* OverDrive(bit 14) disabled by default*/
uint amdgpu_pp_feature_mask = 0xffffbfff;
-int amdgpu_ngg = 0;
-int amdgpu_prim_buf_per_se = 0;
-int amdgpu_pos_buf_per_se = 0;
-int amdgpu_cntl_sb_buf_per_se = 0;
-int amdgpu_param_buf_per_se = 0;
+uint amdgpu_force_long_training = 0;
int amdgpu_job_hang_limit = 0;
int amdgpu_lbpw = -1;
int amdgpu_compute_multipipe = -1;
@@ -146,12 +143,13 @@ int amdgpu_mcbp = 0;
int amdgpu_discovery = -1;
int amdgpu_mes = 0;
int amdgpu_noretry = 1;
+int amdgpu_force_asic_type = -1;
struct amdgpu_mgpu_info mgpu_info = {
.mutex = __MUTEX_INITIALIZER(mgpu_info.mutex),
};
int amdgpu_ras_enable = -1;
-uint amdgpu_ras_mask = 0xfffffffb;
+uint amdgpu_ras_mask = 0xffffffff;
/**
* DOC: vramlimit (int)
@@ -244,16 +242,21 @@ module_param_named(msi, amdgpu_msi, int, 0444);
*
* The format can be [Non-Compute] or [GFX,Compute,SDMA,Video]. That is there can be one or
* multiple values specified. 0 and negative values are invalidated. They will be adjusted
- * to default timeout.
- * - With one value specified, the setting will apply to all non-compute jobs.
- * - With multiple values specified, the first one will be for GFX. The second one is for Compute.
- * And the third and fourth ones are for SDMA and Video.
+ * to the default timeout.
+ *
+ * - With one value specified, the setting will apply to all non-compute jobs.
+ * - With multiple values specified, the first one will be for GFX.
+ * The second one is for Compute. The third and fourth ones are
+ * for SDMA and Video.
+ *
* By default(with no lockup_timeout settings), the timeout for all non-compute(GFX, SDMA and Video)
* jobs is 10000. And there is no timeout enforced on compute jobs.
*/
-MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (default: 10000 for non-compute jobs and infinity timeout for compute jobs."
+MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (default: for bare metal 10000 for non-compute jobs and infinity timeout for compute jobs; "
+ "for passthrough or sriov, 10000 for all jobs."
" 0: keep default value. negative: infinity timeout), "
- "format is [Non-Compute] or [GFX,Compute,SDMA,Video]");
+ "format: for bare metal [Non-Compute] or [GFX,Compute,SDMA,Video]; "
+ "for passthrough or sriov [all jobs] or [GFX,Compute,SDMA,Video].");
module_param_string(lockup_timeout, amdgpu_lockup_timeout, sizeof(amdgpu_lockup_timeout), 0444);
/**
@@ -392,6 +395,14 @@ MODULE_PARM_DESC(ppfeaturemask, "all power features enabled (default))");
module_param_named(ppfeaturemask, amdgpu_pp_feature_mask, uint, 0444);
/**
+ * DOC: forcelongtraining (uint)
+ * Force long memory training in resume.
+ * The default is zero, indicates short training in resume.
+ */
+MODULE_PARM_DESC(forcelongtraining, "force memory long training");
+module_param_named(forcelongtraining, amdgpu_force_long_training, uint, 0444);
+
+/**
* DOC: pcie_gen_cap (uint)
* Override PCIE gen speed capabilities. See the CAIL flags in drivers/gpu/drm/amd/include/amd_pcie.h.
* The default is 0 (automatic for each asic).
@@ -449,42 +460,6 @@ MODULE_PARM_DESC(virtual_display,
module_param_named(virtual_display, amdgpu_virtual_display, charp, 0444);
/**
- * DOC: ngg (int)
- * Set to enable Next Generation Graphics (1 = enable). The default is 0 (disabled).
- */
-MODULE_PARM_DESC(ngg, "Next Generation Graphics (1 = enable, 0 = disable(default depending on gfx))");
-module_param_named(ngg, amdgpu_ngg, int, 0444);
-
-/**
- * DOC: prim_buf_per_se (int)
- * Override the size of Primitive Buffer per Shader Engine in Byte. The default is 0 (depending on gfx).
- */
-MODULE_PARM_DESC(prim_buf_per_se, "the size of Primitive Buffer per Shader Engine (default depending on gfx)");
-module_param_named(prim_buf_per_se, amdgpu_prim_buf_per_se, int, 0444);
-
-/**
- * DOC: pos_buf_per_se (int)
- * Override the size of Position Buffer per Shader Engine in Byte. The default is 0 (depending on gfx).
- */
-MODULE_PARM_DESC(pos_buf_per_se, "the size of Position Buffer per Shader Engine (default depending on gfx)");
-module_param_named(pos_buf_per_se, amdgpu_pos_buf_per_se, int, 0444);
-
-/**
- * DOC: cntl_sb_buf_per_se (int)
- * Override the size of Control Sideband per Shader Engine in Byte. The default is 0 (depending on gfx).
- */
-MODULE_PARM_DESC(cntl_sb_buf_per_se, "the size of Control Sideband per Shader Engine (default depending on gfx)");
-module_param_named(cntl_sb_buf_per_se, amdgpu_cntl_sb_buf_per_se, int, 0444);
-
-/**
- * DOC: param_buf_per_se (int)
- * Override the size of Off-Chip Parameter Cache per Shader Engine in Byte.
- * The default is 0 (depending on gfx).
- */
-MODULE_PARM_DESC(param_buf_per_se, "the size of Off-Chip Parameter Cache per Shader Engine (default depending on gfx)");
-module_param_named(param_buf_per_se, amdgpu_param_buf_per_se, int, 0444);
-
-/**
* DOC: job_hang_limit (int)
* Set how much time allow a job hang and not drop it. The default is 0.
*/
@@ -616,6 +591,16 @@ MODULE_PARM_DESC(noretry,
"Disable retry faults (0 = retry enabled, 1 = retry disabled (default))");
module_param_named(noretry, amdgpu_noretry, int, 0644);
+/**
+ * DOC: force_asic_type (int)
+ * A non negative value used to specify the asic type for all supported GPUs.
+ */
+MODULE_PARM_DESC(force_asic_type,
+ "A non negative value used to specify the asic type for all supported GPUs");
+module_param_named(force_asic_type, amdgpu_force_asic_type, int, 0444);
+
+
+
#ifdef CONFIG_HSA_AMD
/**
* DOC: sched_policy (int)
@@ -1023,6 +1008,7 @@ static const struct pci_device_id pciidlist[] = {
/* Navi12 */
{0x1002, 0x7360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI12|AMD_EXP_HW_SUPPORT},
+ {0x1002, 0x7362, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI12|AMD_EXP_HW_SUPPORT},
{0, 0, 0}
};
@@ -1085,7 +1071,7 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
#endif
/* Get rid of things like offb */
- ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, 0, "amdgpudrmfb");
+ ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, "amdgpudrmfb");
if (ret)
return ret;
@@ -1128,7 +1114,10 @@ amdgpu_pci_remove(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
- DRM_ERROR("Device removal is currently not supported outside of fbcon\n");
+#ifdef MODULE
+ if (THIS_MODULE->state != MODULE_STATE_GOING)
+#endif
+ DRM_ERROR("Hotplug removal is not supported\n");
drm_dev_unplug(dev);
drm_dev_put(dev);
pci_disable_device(pdev);
@@ -1141,6 +1130,9 @@ amdgpu_pci_shutdown(struct pci_dev *pdev)
struct drm_device *dev = pci_get_drvdata(pdev);
struct amdgpu_device *adev = dev->dev_private;
+ if (amdgpu_ras_intr_triggered())
+ return;
+
/* if we are running in a VM, make sure the device
* torn down properly on reboot/shutdown.
* unfortunately we can't detect certain
@@ -1175,8 +1167,13 @@ static int amdgpu_pmops_resume(struct device *dev)
static int amdgpu_pmops_freeze(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_dev->dev_private;
+ int r;
- return amdgpu_device_suspend(drm_dev, false, true);
+ r = amdgpu_device_suspend(drm_dev, false, true);
+ if (r)
+ return r;
+ return amdgpu_asic_reset(adev);
}
static int amdgpu_pmops_thaw(struct device *dev)
@@ -1348,66 +1345,6 @@ int amdgpu_file_to_fpriv(struct file *filp, struct amdgpu_fpriv **fpriv)
return 0;
}
-int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
-{
- char *input = amdgpu_lockup_timeout;
- char *timeout_setting = NULL;
- int index = 0;
- long timeout;
- int ret = 0;
-
- /*
- * By default timeout for non compute jobs is 10000.
- * And there is no timeout enforced on compute jobs.
- */
- adev->gfx_timeout = msecs_to_jiffies(10000);
- adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
- adev->compute_timeout = MAX_SCHEDULE_TIMEOUT;
-
- if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENTH)) {
- while ((timeout_setting = strsep(&input, ",")) &&
- strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENTH)) {
- ret = kstrtol(timeout_setting, 0, &timeout);
- if (ret)
- return ret;
-
- if (timeout == 0) {
- index++;
- continue;
- } else if (timeout < 0) {
- timeout = MAX_SCHEDULE_TIMEOUT;
- } else {
- timeout = msecs_to_jiffies(timeout);
- }
-
- switch (index++) {
- case 0:
- adev->gfx_timeout = timeout;
- break;
- case 1:
- adev->compute_timeout = timeout;
- break;
- case 2:
- adev->sdma_timeout = timeout;
- break;
- case 3:
- adev->video_timeout = timeout;
- break;
- default:
- break;
- }
- }
- /*
- * There is only one value specified and
- * it should apply to all non-compute jobs.
- */
- if (index == 1)
- adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
- }
-
- return ret;
-}
-
static bool
amdgpu_get_crtc_scanout_position(struct drm_device *dev, unsigned int pipe,
bool in_vblank_irq, int *vpos, int *hpos,
@@ -1446,8 +1383,6 @@ static struct drm_driver kms_driver = {
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_export = amdgpu_gem_prime_export,
.gem_prime_import = amdgpu_gem_prime_import,
- .gem_prime_get_sg_table = amdgpu_gem_prime_get_sg_table,
- .gem_prime_import_sg_table = amdgpu_gem_prime_import_sg_table,
.gem_prime_vmap = amdgpu_gem_prime_vmap,
.gem_prime_vunmap = amdgpu_gem_prime_vunmap,
.gem_prime_mmap = amdgpu_gem_prime_mmap,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c
index 571a6dfb473e..61fcf247a638 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c
@@ -37,12 +37,14 @@ amdgpu_link_encoder_connector(struct drm_device *dev)
{
struct amdgpu_device *adev = dev->dev_private;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
struct amdgpu_connector *amdgpu_connector;
struct drm_encoder *encoder;
struct amdgpu_encoder *amdgpu_encoder;
+ drm_connector_list_iter_begin(dev, &iter);
/* walk the list and link encoders to connectors */
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_for_each_connector_iter(connector, &iter) {
amdgpu_connector = to_amdgpu_connector(connector);
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
amdgpu_encoder = to_amdgpu_encoder(encoder);
@@ -55,6 +57,7 @@ amdgpu_link_encoder_connector(struct drm_device *dev)
}
}
}
+ drm_connector_list_iter_end(&iter);
}
void amdgpu_encoder_set_active_device(struct drm_encoder *encoder)
@@ -62,8 +65,10 @@ void amdgpu_encoder_set_active_device(struct drm_encoder *encoder)
struct drm_device *dev = encoder->dev;
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
if (connector->encoder == encoder) {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
amdgpu_encoder->active_device = amdgpu_encoder->devices & amdgpu_connector->devices;
@@ -72,6 +77,7 @@ void amdgpu_encoder_set_active_device(struct drm_encoder *encoder)
amdgpu_connector->devices, encoder->encoder_type);
}
}
+ drm_connector_list_iter_end(&iter);
}
struct drm_connector *
@@ -79,15 +85,20 @@ amdgpu_get_connector_for_encoder(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
- struct drm_connector *connector;
+ struct drm_connector *connector, *found = NULL;
+ struct drm_connector_list_iter iter;
struct amdgpu_connector *amdgpu_connector;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
amdgpu_connector = to_amdgpu_connector(connector);
- if (amdgpu_encoder->active_device & amdgpu_connector->devices)
- return connector;
+ if (amdgpu_encoder->active_device & amdgpu_connector->devices) {
+ found = connector;
+ break;
+ }
}
- return NULL;
+ drm_connector_list_iter_end(&iter);
+ return found;
}
struct drm_connector *
@@ -95,15 +106,20 @@ amdgpu_get_connector_for_encoder_init(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
- struct drm_connector *connector;
+ struct drm_connector *connector, *found = NULL;
+ struct drm_connector_list_iter iter;
struct amdgpu_connector *amdgpu_connector;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
amdgpu_connector = to_amdgpu_connector(connector);
- if (amdgpu_encoder->devices & amdgpu_connector->devices)
- return connector;
+ if (amdgpu_encoder->devices & amdgpu_connector->devices) {
+ found = connector;
+ break;
+ }
}
- return NULL;
+ drm_connector_list_iter_end(&iter);
+ return found;
}
struct drm_encoder *amdgpu_get_external_encoder(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 23085b352cf2..377fe20bce23 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -462,18 +462,7 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
timeout = adev->gfx_timeout;
break;
case AMDGPU_RING_TYPE_COMPUTE:
- /*
- * For non-sriov case, no timeout enforce
- * on compute ring by default. Unless user
- * specifies a timeout for compute ring.
- *
- * For sriov case, always use the timeout
- * as gfx ring
- */
- if (!amdgpu_sriov_vf(ring->adev))
- timeout = adev->compute_timeout;
- else
- timeout = adev->gfx_timeout;
+ timeout = adev->compute_timeout;
break;
case AMDGPU_RING_TYPE_SDMA:
timeout = adev->sdma_timeout;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
index 5e8bdded265f..19705e399905 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
@@ -71,7 +71,7 @@
*/
static int amdgpu_gart_dummy_page_init(struct amdgpu_device *adev)
{
- struct page *dummy_page = adev->mman.bdev.glob->dummy_read_page;
+ struct page *dummy_page = ttm_bo_glob.dummy_read_page;
if (adev->dummy_page_addr)
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 8ceb44925947..4277125a79ee 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -175,7 +175,7 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
- r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates, false);
+ r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates);
if (r) {
dev_err(adev->dev, "leaking bo va because "
"we fail to reserve bo (%d)\n", r);
@@ -527,13 +527,41 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
goto error;
}
- r = amdgpu_vm_update_directories(adev, vm);
+ r = amdgpu_vm_update_pdes(adev, vm, false);
error:
if (r && r != -ERESTARTSYS)
DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
}
+/**
+ * amdgpu_gem_va_map_flags - map GEM UAPI flags into hardware flags
+ *
+ * @adev: amdgpu_device pointer
+ * @flags: GEM UAPI flags
+ *
+ * Returns the GEM UAPI flags mapped into hardware for the ASIC.
+ */
+uint64_t amdgpu_gem_va_map_flags(struct amdgpu_device *adev, uint32_t flags)
+{
+ uint64_t pte_flag = 0;
+
+ if (flags & AMDGPU_VM_PAGE_EXECUTABLE)
+ pte_flag |= AMDGPU_PTE_EXECUTABLE;
+ if (flags & AMDGPU_VM_PAGE_READABLE)
+ pte_flag |= AMDGPU_PTE_READABLE;
+ if (flags & AMDGPU_VM_PAGE_WRITEABLE)
+ pte_flag |= AMDGPU_PTE_WRITEABLE;
+ if (flags & AMDGPU_VM_PAGE_PRT)
+ pte_flag |= AMDGPU_PTE_PRT;
+
+ if (adev->gmc.gmc_funcs->map_mtype)
+ pte_flag |= amdgpu_gmc_map_mtype(adev,
+ flags & AMDGPU_VM_MTYPE_MASK);
+
+ return pte_flag;
+}
+
int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
@@ -613,7 +641,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd);
- r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates, false);
+ r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
if (r)
goto error_unref;
@@ -631,7 +659,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
switch (args->operation) {
case AMDGPU_VA_OP_MAP:
- va_flags = amdgpu_gmc_get_pte_flags(adev, args->flags);
+ va_flags = amdgpu_gem_va_map_flags(adev, args->flags);
r = amdgpu_vm_bo_map(adev, bo_va, args->va_address,
args->offset_in_bo, args->map_size,
va_flags);
@@ -646,7 +674,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
args->map_size);
break;
case AMDGPU_VA_OP_REPLACE:
- va_flags = amdgpu_gmc_get_pte_flags(adev, args->flags);
+ va_flags = amdgpu_gem_va_map_flags(adev, args->flags);
r = amdgpu_vm_bo_replace_map(adev, bo_va, args->va_address,
args->offset_in_bo, args->map_size,
va_flags);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h
index 0b66d2e6b5d5..e0f025dd1b14 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h
@@ -67,6 +67,7 @@ int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
+uint64_t amdgpu_gem_va_map_flags(struct amdgpu_device *adev, uint32_t flags);
int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index f9bef3154b99..e00b46180d2e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -26,6 +26,7 @@
#include "amdgpu.h"
#include "amdgpu_gfx.h"
#include "amdgpu_rlc.h"
+#include "amdgpu_ras.h"
/* delay 0.1 second to enable gfx off feature */
#define GFX_OFF_DELAY_ENABLE msecs_to_jiffies(100)
@@ -231,12 +232,10 @@ void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev)
void amdgpu_gfx_graphics_queue_acquire(struct amdgpu_device *adev)
{
- int i, queue, pipe, me;
+ int i, queue, me;
for (i = 0; i < AMDGPU_MAX_GFX_QUEUES; ++i) {
queue = i % adev->gfx.me.num_queue_per_pipe;
- pipe = (i / adev->gfx.me.num_queue_per_pipe)
- % adev->gfx.me.num_pipe_per_me;
me = (i / adev->gfx.me.num_queue_per_pipe)
/ adev->gfx.me.num_pipe_per_me;
@@ -320,8 +319,7 @@ int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev,
return r;
}
-void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring,
- struct amdgpu_irq_src *irq)
+void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring)
{
amdgpu_device_wb_free(ring->adev, ring->adev->virt.reg_val_offs);
amdgpu_ring_fini(ring);
@@ -456,8 +454,6 @@ void amdgpu_gfx_mqd_sw_fini(struct amdgpu_device *adev)
}
ring = &adev->gfx.kiq.ring;
- if (adev->asic_type >= CHIP_NAVI10 && amdgpu_async_gfx_ring)
- kfree(adev->gfx.me.mqd_backup[AMDGPU_MAX_GFX_RINGS]);
kfree(adev->gfx.mec.mqd_backup[AMDGPU_MAX_COMPUTE_RINGS]);
amdgpu_bo_free_kernel(&ring->mqd_obj,
&ring->mqd_gpu_addr,
@@ -569,3 +565,102 @@ void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable)
mutex_unlock(&adev->gfx.gfx_off_mutex);
}
+
+int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev)
+{
+ int r;
+ struct ras_fs_if fs_info = {
+ .sysfs_name = "gfx_err_count",
+ .debugfs_name = "gfx_err_inject",
+ };
+ struct ras_ih_if ih_info = {
+ .cb = amdgpu_gfx_process_ras_data_cb,
+ };
+
+ if (!adev->gfx.ras_if) {
+ adev->gfx.ras_if = kmalloc(sizeof(struct ras_common_if), GFP_KERNEL);
+ if (!adev->gfx.ras_if)
+ return -ENOMEM;
+ adev->gfx.ras_if->block = AMDGPU_RAS_BLOCK__GFX;
+ adev->gfx.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
+ adev->gfx.ras_if->sub_block_index = 0;
+ strcpy(adev->gfx.ras_if->name, "gfx");
+ }
+ fs_info.head = ih_info.head = *adev->gfx.ras_if;
+
+ r = amdgpu_ras_late_init(adev, adev->gfx.ras_if,
+ &fs_info, &ih_info);
+ if (r)
+ goto free;
+
+ if (amdgpu_ras_is_supported(adev, adev->gfx.ras_if->block)) {
+ r = amdgpu_irq_get(adev, &adev->gfx.cp_ecc_error_irq, 0);
+ if (r)
+ goto late_fini;
+ } else {
+ /* free gfx ras_if if ras is not supported */
+ r = 0;
+ goto free;
+ }
+
+ return 0;
+late_fini:
+ amdgpu_ras_late_fini(adev, adev->gfx.ras_if, &ih_info);
+free:
+ kfree(adev->gfx.ras_if);
+ adev->gfx.ras_if = NULL;
+ return r;
+}
+
+void amdgpu_gfx_ras_fini(struct amdgpu_device *adev)
+{
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX) &&
+ adev->gfx.ras_if) {
+ struct ras_common_if *ras_if = adev->gfx.ras_if;
+ struct ras_ih_if ih_info = {
+ .head = *ras_if,
+ .cb = amdgpu_gfx_process_ras_data_cb,
+ };
+
+ amdgpu_ras_late_fini(adev, ras_if, &ih_info);
+ kfree(ras_if);
+ }
+}
+
+int amdgpu_gfx_process_ras_data_cb(struct amdgpu_device *adev,
+ void *err_data,
+ struct amdgpu_iv_entry *entry)
+{
+ /* TODO ue will trigger an interrupt.
+ *
+ * When “Full RAS†is enabled, the per-IP interrupt sources should
+ * be disabled and the driver should only look for the aggregated
+ * interrupt via sync flood
+ */
+ if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) {
+ kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
+ if (adev->gfx.funcs->query_ras_error_count)
+ adev->gfx.funcs->query_ras_error_count(adev, err_data);
+ amdgpu_ras_reset_gpu(adev, 0);
+ }
+ return AMDGPU_RAS_SUCCESS;
+}
+
+int amdgpu_gfx_cp_ecc_error_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ struct ras_common_if *ras_if = adev->gfx.ras_if;
+ struct ras_dispatch_if ih_data = {
+ .entry = entry,
+ };
+
+ if (!ras_if)
+ return 0;
+
+ ih_data.head = *ras_if;
+
+ DRM_ERROR("CP ECC ERROR IRQ\n");
+ amdgpu_ras_interrupt_dispatch(adev, &ih_data);
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
index 6d19183b478b..0ae0a2715b0d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
@@ -201,28 +201,6 @@ struct amdgpu_gfx_funcs {
int (*query_ras_error_count) (struct amdgpu_device *adev, void *ras_error_status);
};
-struct amdgpu_ngg_buf {
- struct amdgpu_bo *bo;
- uint64_t gpu_addr;
- uint32_t size;
- uint32_t bo_size;
-};
-
-enum {
- NGG_PRIM = 0,
- NGG_POS,
- NGG_CNTL,
- NGG_PARAM,
- NGG_BUF_MAX
-};
-
-struct amdgpu_ngg {
- struct amdgpu_ngg_buf buf[NGG_BUF_MAX];
- uint32_t gds_reserve_addr;
- uint32_t gds_reserve_size;
- bool init;
-};
-
struct sq_work {
struct work_struct work;
unsigned ih_data;
@@ -247,7 +225,7 @@ struct amdgpu_me {
uint32_t num_me;
uint32_t num_pipe_per_me;
uint32_t num_queue_per_pipe;
- void *mqd_backup[AMDGPU_MAX_GFX_RINGS + 1];
+ void *mqd_backup[AMDGPU_MAX_GFX_RINGS];
/* These are the resources for which amdgpu takes ownership */
DECLARE_BITMAP(queue_bitmap, AMDGPU_MAX_GFX_QUEUES);
@@ -312,9 +290,6 @@ struct amdgpu_gfx {
uint32_t grbm_soft_reset;
uint32_t srbm_soft_reset;
- /* NGG */
- struct amdgpu_ngg ngg;
-
/* gfx off */
bool gfx_off_state; /* true: enabled, false: disabled */
struct mutex gfx_off_mutex;
@@ -356,8 +331,7 @@ int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev,
struct amdgpu_ring *ring,
struct amdgpu_irq_src *irq);
-void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring,
- struct amdgpu_irq_src *irq);
+void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring);
void amdgpu_gfx_kiq_fini(struct amdgpu_device *adev);
int amdgpu_gfx_kiq_init(struct amdgpu_device *adev,
@@ -385,5 +359,12 @@ void amdgpu_gfx_bit_to_me_queue(struct amdgpu_device *adev, int bit,
bool amdgpu_gfx_is_me_queue_enabled(struct amdgpu_device *adev, int me,
int pipe, int queue);
void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable);
-
+int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev);
+void amdgpu_gfx_ras_fini(struct amdgpu_device *adev);
+int amdgpu_gfx_process_ras_data_cb(struct amdgpu_device *adev,
+ void *err_data,
+ struct amdgpu_iv_entry *entry);
+int amdgpu_gfx_cp_ecc_error_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index 5790db61fa2c..a12f33c0f5df 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -27,6 +27,8 @@
#include <linux/io-64-nonatomic-lo-hi.h>
#include "amdgpu.h"
+#include "amdgpu_ras.h"
+#include "amdgpu_xgmi.h"
/**
* amdgpu_gmc_get_pde_for_bo - get the PDE for a BO
@@ -305,3 +307,29 @@ bool amdgpu_gmc_filter_faults(struct amdgpu_device *adev, uint64_t addr,
gmc->fault_hash[hash].idx = gmc->last_fault++;
return false;
}
+
+int amdgpu_gmc_ras_late_init(struct amdgpu_device *adev)
+{
+ int r;
+
+ if (adev->umc.funcs && adev->umc.funcs->ras_late_init) {
+ r = adev->umc.funcs->ras_late_init(adev);
+ if (r)
+ return r;
+ }
+
+ if (adev->mmhub.funcs && adev->mmhub.funcs->ras_late_init) {
+ r = adev->mmhub.funcs->ras_late_init(adev);
+ if (r)
+ return r;
+ }
+
+ return amdgpu_xgmi_ras_late_init(adev);
+}
+
+void amdgpu_gmc_ras_fini(struct amdgpu_device *adev)
+{
+ amdgpu_umc_ras_fini(adev);
+ amdgpu_mmhub_ras_fini(adev);
+ amdgpu_xgmi_ras_fini(adev);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
index b6e1d98ef01e..b499a3de8bb6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
@@ -77,6 +77,7 @@ struct amdgpu_gmc_fault {
struct amdgpu_vmhub {
uint32_t ctx0_ptb_addr_lo32;
uint32_t ctx0_ptb_addr_hi32;
+ uint32_t vm_inv_eng0_sem;
uint32_t vm_inv_eng0_req;
uint32_t vm_inv_eng0_ack;
uint32_t vm_context0_cntl;
@@ -99,12 +100,15 @@ struct amdgpu_gmc_funcs {
unsigned pasid);
/* enable/disable PRT support */
void (*set_prt)(struct amdgpu_device *adev, bool enable);
- /* set pte flags based per asic */
- uint64_t (*get_vm_pte_flags)(struct amdgpu_device *adev,
- uint32_t flags);
+ /* map mtype to hardware flags */
+ uint64_t (*map_mtype)(struct amdgpu_device *adev, uint32_t flags);
/* get the pde for a given mc addr */
void (*get_vm_pde)(struct amdgpu_device *adev, int level,
u64 *dst, u64 *flags);
+ /* get the pte flags to use for a BO VA mapping */
+ void (*get_vm_pte)(struct amdgpu_device *adev,
+ struct amdgpu_bo_va_mapping *mapping,
+ uint64_t *flags);
};
struct amdgpu_xgmi {
@@ -120,21 +124,52 @@ struct amdgpu_xgmi {
/* gpu list in the same hive */
struct list_head head;
bool supported;
+ struct ras_common_if *ras_if;
};
struct amdgpu_gmc {
+ /* FB's physical address in MMIO space (for CPU to
+ * map FB). This is different compared to the agp/
+ * gart/vram_start/end field as the later is from
+ * GPU's view and aper_base is from CPU's view.
+ */
resource_size_t aper_size;
resource_size_t aper_base;
/* for some chips with <= 32MB we need to lie
* about vram size near mc fb location */
u64 mc_vram_size;
u64 visible_vram_size;
+ /* AGP aperture start and end in MC address space
+ * Driver find a hole in the MC address space
+ * to place AGP by setting MC_VM_AGP_BOT/TOP registers
+ * Under VMID0, logical address == MC address. AGP
+ * aperture maps to physical bus or IOVA addressed.
+ * AGP aperture is used to simulate FB in ZFB case.
+ * AGP aperture is also used for page table in system
+ * memory (mainly for APU).
+ *
+ */
u64 agp_size;
u64 agp_start;
u64 agp_end;
+ /* GART aperture start and end in MC address space
+ * Driver find a hole in the MC address space
+ * to place GART by setting VM_CONTEXT0_PAGE_TABLE_START/END_ADDR
+ * registers
+ * Under VMID0, logical address inside GART aperture will
+ * be translated through gpuvm gart page table to access
+ * paged system memory
+ */
u64 gart_size;
u64 gart_start;
u64 gart_end;
+ /* Frame buffer aperture of this GPU device. Different from
+ * fb_start (see below), this only covers the local GPU device.
+ * Driver get fb_start from MC_VM_FB_LOCATION_BASE (set by vbios)
+ * and calculate vram_start of this local device by adding an
+ * offset inside the XGMI hive.
+ * Under VMID0, logical address == MC address
+ */
u64 vram_start;
u64 vram_end;
/* FB region , it's same as local vram region in single GPU, in XGMI
@@ -153,6 +188,7 @@ struct amdgpu_gmc {
uint32_t fw_version;
struct amdgpu_irq_src vm_fault;
uint32_t vram_type;
+ uint8_t vram_vendor;
uint32_t srbm_soft_reset;
bool prt_warning;
uint64_t stolen_size;
@@ -177,15 +213,14 @@ struct amdgpu_gmc {
struct amdgpu_xgmi xgmi;
struct amdgpu_irq_src ecc_irq;
- struct ras_common_if *umc_ras_if;
- struct ras_common_if *mmhub_ras_if;
};
#define amdgpu_gmc_flush_gpu_tlb(adev, vmid, vmhub, type) ((adev)->gmc.gmc_funcs->flush_gpu_tlb((adev), (vmid), (vmhub), (type)))
#define amdgpu_gmc_emit_flush_gpu_tlb(r, vmid, addr) (r)->adev->gmc.gmc_funcs->emit_flush_gpu_tlb((r), (vmid), (addr))
#define amdgpu_gmc_emit_pasid_mapping(r, vmid, pasid) (r)->adev->gmc.gmc_funcs->emit_pasid_mapping((r), (vmid), (pasid))
+#define amdgpu_gmc_map_mtype(adev, flags) (adev)->gmc.gmc_funcs->map_mtype((adev),(flags))
#define amdgpu_gmc_get_vm_pde(adev, level, dst, flags) (adev)->gmc.gmc_funcs->get_vm_pde((adev), (level), (dst), (flags))
-#define amdgpu_gmc_get_pte_flags(adev, flags) (adev)->gmc.gmc_funcs->get_vm_pte_flags((adev),(flags))
+#define amdgpu_gmc_get_vm_pte(adev, mapping, flags) (adev)->gmc.gmc_funcs->get_vm_pte((adev), (mapping), (flags))
/**
* amdgpu_gmc_vram_full_visible - Check if full VRAM is visible through the BAR
@@ -230,5 +265,7 @@ void amdgpu_gmc_agp_location(struct amdgpu_device *adev,
struct amdgpu_gmc *mc);
bool amdgpu_gmc_filter_faults(struct amdgpu_device *adev, uint64_t addr,
uint16_t pasid, uint64_t timestamp);
+int amdgpu_gmc_ras_late_init(struct amdgpu_device *adev);
+void amdgpu_gmc_ras_fini(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
index 53734da1c2df..6f9289735e31 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
@@ -282,7 +282,7 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
!dma_fence_is_later(updates, (*id)->flushed_updates))
updates = NULL;
- if ((*id)->owner != vm->entity.fence_context ||
+ if ((*id)->owner != vm->direct.fence_context ||
job->vm_pd_addr != (*id)->pd_gpu_addr ||
updates || !(*id)->last_flush ||
((*id)->last_flush->context != fence_context &&
@@ -349,7 +349,7 @@ static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,
struct dma_fence *flushed;
/* Check all the prerequisites to using this VMID */
- if ((*id)->owner != vm->entity.fence_context)
+ if ((*id)->owner != vm->direct.fence_context)
continue;
if ((*id)->pd_gpu_addr != job->vm_pd_addr)
@@ -449,7 +449,7 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
}
id->pd_gpu_addr = job->vm_pd_addr;
- id->owner = vm->entity.fence_context;
+ id->owner = vm->direct.fence_context;
if (job->vm_needs_flush) {
dma_fence_put(id->last_flush);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index 2a3f5ec298db..30d540d23b77 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -87,10 +87,13 @@ static void amdgpu_hotplug_work_func(struct work_struct *work)
struct drm_device *dev = adev->ddev;
struct drm_mode_config *mode_config = &dev->mode_config;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
mutex_lock(&mode_config->mutex);
- list_for_each_entry(connector, &mode_config->connector_list, head)
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter)
amdgpu_connector_hotplug(connector);
+ drm_connector_list_iter_end(&iter);
mutex_unlock(&mode_config->mutex);
/* Just fire off a uevent and let userspace tell us what to do */
drm_helper_hpd_irq_event(dev);
@@ -153,6 +156,20 @@ irqreturn_t amdgpu_irq_handler(int irq, void *arg)
ret = amdgpu_ih_process(adev, &adev->irq.ih);
if (ret == IRQ_HANDLED)
pm_runtime_mark_last_busy(dev->dev);
+
+ /* For the hardware that cannot enable bif ring for both ras_controller_irq
+ * and ras_err_evnet_athub_irq ih cookies, the driver has to poll status
+ * register to check whether the interrupt is triggered or not, and properly
+ * ack the interrupt if it is there
+ */
+ if (adev->nbio.funcs &&
+ adev->nbio.funcs->handle_ras_controller_intr_no_bifring)
+ adev->nbio.funcs->handle_ras_controller_intr_no_bifring(adev);
+
+ if (adev->nbio.funcs &&
+ adev->nbio.funcs->handle_ras_err_event_athub_intr_no_bifring)
+ adev->nbio.funcs->handle_ras_err_event_athub_intr_no_bifring(adev);
+
return ret;
}
@@ -228,10 +245,19 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
adev->irq.msi_enabled = false;
if (amdgpu_msi_ok(adev)) {
- int ret = pci_enable_msi(adev->pdev);
- if (!ret) {
+ int nvec = pci_msix_vec_count(adev->pdev);
+ unsigned int flags;
+
+ if (nvec <= 0) {
+ flags = PCI_IRQ_MSI;
+ } else {
+ flags = PCI_IRQ_MSI | PCI_IRQ_MSIX;
+ }
+ /* we only need one vector */
+ nvec = pci_alloc_irq_vectors(adev->pdev, 1, 1, flags);
+ if (nvec > 0) {
adev->irq.msi_enabled = true;
- dev_dbg(adev->dev, "amdgpu: using MSI.\n");
+ dev_dbg(adev->dev, "amdgpu: using MSI/MSI-X.\n");
}
}
@@ -254,7 +280,8 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
INIT_WORK(&adev->irq.ih2_work, amdgpu_irq_handle_ih2);
adev->irq.installed = true;
- r = drm_irq_install(adev->ddev, adev->ddev->pdev->irq);
+ /* Use vector 0 for MSI-X */
+ r = drm_irq_install(adev->ddev, pci_irq_vector(adev->pdev, 0));
if (r) {
adev->irq.installed = false;
if (!amdgpu_device_has_dc_support(adev))
@@ -284,7 +311,7 @@ void amdgpu_irq_fini(struct amdgpu_device *adev)
drm_irq_uninstall(adev->ddev);
adev->irq.installed = false;
if (adev->irq.msi_enabled)
- pci_disable_msi(adev->pdev);
+ pci_free_irq_vectors(adev->pdev);
if (!amdgpu_device_has_dc_support(adev))
flush_work(&adev->hotplug_work);
}
@@ -369,7 +396,7 @@ int amdgpu_irq_add_id(struct amdgpu_device *adev,
* amdgpu_irq_dispatch - dispatch IRQ to IP blocks
*
* @adev: amdgpu device pointer
- * @entry: interrupt vector pointer
+ * @ih: interrupt ring instance
*
* Dispatches IRQ to IP blocks.
*/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 96b2a31ccfed..4fb20e870e63 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -248,6 +248,44 @@ static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
return fence;
}
+#define to_drm_sched_job(sched_job) \
+ container_of((sched_job), struct drm_sched_job, queue_node)
+
+void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched)
+{
+ struct drm_sched_job *s_job;
+ struct drm_sched_entity *s_entity = NULL;
+ int i;
+
+ /* Signal all jobs not yet scheduled */
+ for (i = DRM_SCHED_PRIORITY_MAX - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
+ struct drm_sched_rq *rq = &sched->sched_rq[i];
+
+ if (!rq)
+ continue;
+
+ spin_lock(&rq->lock);
+ list_for_each_entry(s_entity, &rq->entities, list) {
+ while ((s_job = to_drm_sched_job(spsc_queue_pop(&s_entity->job_queue)))) {
+ struct drm_sched_fence *s_fence = s_job->s_fence;
+
+ dma_fence_signal(&s_fence->scheduled);
+ dma_fence_set_error(&s_fence->finished, -EHWPOISON);
+ dma_fence_signal(&s_fence->finished);
+ }
+ }
+ spin_unlock(&rq->lock);
+ }
+
+ /* Signal all jobs already scheduled to HW */
+ list_for_each_entry(s_job, &sched->ring_mirror_list, node) {
+ struct drm_sched_fence *s_fence = s_job->s_fence;
+
+ dma_fence_set_error(&s_fence->finished, -EHWPOISON);
+ dma_fence_signal(&s_fence->finished);
+ }
+}
+
const struct drm_sched_backend_ops amdgpu_sched_ops = {
.dependency = amdgpu_job_dependency,
.run_job = amdgpu_job_run,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
index 51e62504c279..dc7ee9358dcd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
@@ -76,4 +76,7 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
void *owner, struct dma_fence **f);
int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring,
struct dma_fence **fence);
+
+void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index a73206784cba..b6db28a570c2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -583,9 +583,12 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
struct drm_amdgpu_info_vram_gtt vram_gtt;
vram_gtt.vram_size = adev->gmc.real_vram_size -
- atomic64_read(&adev->vram_pin_size);
- vram_gtt.vram_cpu_accessible_size = adev->gmc.visible_vram_size -
- atomic64_read(&adev->visible_pin_size);
+ atomic64_read(&adev->vram_pin_size) -
+ AMDGPU_VM_RESERVED_VRAM;
+ vram_gtt.vram_cpu_accessible_size =
+ min(adev->gmc.visible_vram_size -
+ atomic64_read(&adev->visible_pin_size),
+ vram_gtt.vram_size);
vram_gtt.gtt_size = adev->mman.bdev.man[TTM_PL_TT].size;
vram_gtt.gtt_size *= PAGE_SIZE;
vram_gtt.gtt_size -= atomic64_read(&adev->gart_pin_size);
@@ -598,15 +601,18 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
memset(&mem, 0, sizeof(mem));
mem.vram.total_heap_size = adev->gmc.real_vram_size;
mem.vram.usable_heap_size = adev->gmc.real_vram_size -
- atomic64_read(&adev->vram_pin_size);
+ atomic64_read(&adev->vram_pin_size) -
+ AMDGPU_VM_RESERVED_VRAM;
mem.vram.heap_usage =
amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
mem.vram.max_allocation = mem.vram.usable_heap_size * 3 / 4;
mem.cpu_accessible_vram.total_heap_size =
adev->gmc.visible_vram_size;
- mem.cpu_accessible_vram.usable_heap_size = adev->gmc.visible_vram_size -
- atomic64_read(&adev->visible_pin_size);
+ mem.cpu_accessible_vram.usable_heap_size =
+ min(adev->gmc.visible_vram_size -
+ atomic64_read(&adev->visible_pin_size),
+ mem.vram.usable_heap_size);
mem.cpu_accessible_vram.heap_usage =
amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
mem.cpu_accessible_vram.max_allocation =
@@ -732,17 +738,6 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
dev_info.vce_harvest_config = adev->vce.harvest_config;
dev_info.gc_double_offchip_lds_buf =
adev->gfx.config.double_offchip_lds_buf;
-
- if (amdgpu_ngg) {
- dev_info.prim_buf_gpu_addr = adev->gfx.ngg.buf[NGG_PRIM].gpu_addr;
- dev_info.prim_buf_size = adev->gfx.ngg.buf[NGG_PRIM].size;
- dev_info.pos_buf_gpu_addr = adev->gfx.ngg.buf[NGG_POS].gpu_addr;
- dev_info.pos_buf_size = adev->gfx.ngg.buf[NGG_POS].size;
- dev_info.cntl_sb_buf_gpu_addr = adev->gfx.ngg.buf[NGG_CNTL].gpu_addr;
- dev_info.cntl_sb_buf_size = adev->gfx.ngg.buf[NGG_CNTL].size;
- dev_info.param_buf_gpu_addr = adev->gfx.ngg.buf[NGG_PARAM].gpu_addr;
- dev_info.param_buf_size = adev->gfx.ngg.buf[NGG_PARAM].size;
- }
dev_info.wave_front_size = adev->gfx.cu_info.wave_front_size;
dev_info.num_shader_visible_vgprs = adev->gfx.config.max_gprs;
dev_info.num_cu_per_sh = adev->gfx.config.max_cu_per_sh;
@@ -971,6 +966,12 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
/* Ensure IB tests are run on ring */
flush_delayed_work(&adev->delayed_init_work);
+
+ if (amdgpu_ras_intr_triggered()) {
+ DRM_ERROR("RAS Intr triggered, device disabled!!");
+ return -EHWPOISON;
+ }
+
file_priv->driver_priv = NULL;
r = pm_runtime_get_sync(dev->dev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c
new file mode 100644
index 000000000000..676c48c02d77
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c
@@ -0,0 +1,70 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "amdgpu.h"
+#include "amdgpu_ras.h"
+
+int amdgpu_mmhub_ras_late_init(struct amdgpu_device *adev)
+{
+ int r;
+ struct ras_ih_if ih_info = {
+ .cb = NULL,
+ };
+ struct ras_fs_if fs_info = {
+ .sysfs_name = "mmhub_err_count",
+ .debugfs_name = "mmhub_err_inject",
+ };
+
+ if (!adev->mmhub.ras_if) {
+ adev->mmhub.ras_if = kmalloc(sizeof(struct ras_common_if), GFP_KERNEL);
+ if (!adev->mmhub.ras_if)
+ return -ENOMEM;
+ adev->mmhub.ras_if->block = AMDGPU_RAS_BLOCK__MMHUB;
+ adev->mmhub.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
+ adev->mmhub.ras_if->sub_block_index = 0;
+ strcpy(adev->mmhub.ras_if->name, "mmhub");
+ }
+ ih_info.head = fs_info.head = *adev->mmhub.ras_if;
+ r = amdgpu_ras_late_init(adev, adev->mmhub.ras_if,
+ &fs_info, &ih_info);
+ if (r || !amdgpu_ras_is_supported(adev, adev->mmhub.ras_if->block)) {
+ kfree(adev->mmhub.ras_if);
+ adev->mmhub.ras_if = NULL;
+ }
+
+ return r;
+}
+
+void amdgpu_mmhub_ras_fini(struct amdgpu_device *adev)
+{
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__MMHUB) &&
+ adev->mmhub.ras_if) {
+ struct ras_common_if *ras_if = adev->mmhub.ras_if;
+ struct ras_ih_if ih_info = {
+ .cb = NULL,
+ };
+
+ amdgpu_ras_late_fini(adev, ras_if, &ih_info);
+ kfree(ras_if);
+ }
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h
index 2d75ecfa199b..1cd78940cf82 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h
@@ -23,9 +23,17 @@
struct amdgpu_mmhub_funcs {
void (*ras_init)(struct amdgpu_device *adev);
+ int (*ras_late_init)(struct amdgpu_device *adev);
void (*query_ras_error_count)(struct amdgpu_device *adev,
void *ras_error_status);
};
+struct amdgpu_mmhub {
+ struct ras_common_if *ras_if;
+ const struct amdgpu_mmhub_funcs *funcs;
+};
+
+int amdgpu_mmhub_ras_late_init(struct amdgpu_device *adev);
+void amdgpu_mmhub_ras_fini(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
index 31d4deb5d294..828b5167ff12 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
@@ -51,438 +51,107 @@
#include "amdgpu_amdkfd.h"
/**
- * struct amdgpu_mn_node
+ * amdgpu_mn_invalidate_gfx - callback to notify about mm change
*
- * @it: interval node defining start-last of the affected address range
- * @bos: list of all BOs in the affected address range
- *
- * Manages all BOs which are affected of a certain range of address space.
- */
-struct amdgpu_mn_node {
- struct interval_tree_node it;
- struct list_head bos;
-};
-
-/**
- * amdgpu_mn_destroy - destroy the HMM mirror
- *
- * @work: previously sheduled work item
- *
- * Lazy destroys the notifier from a work item
- */
-static void amdgpu_mn_destroy(struct work_struct *work)
-{
- struct amdgpu_mn *amn = container_of(work, struct amdgpu_mn, work);
- struct amdgpu_device *adev = amn->adev;
- struct amdgpu_mn_node *node, *next_node;
- struct amdgpu_bo *bo, *next_bo;
-
- mutex_lock(&adev->mn_lock);
- down_write(&amn->lock);
- hash_del(&amn->node);
- rbtree_postorder_for_each_entry_safe(node, next_node,
- &amn->objects.rb_root, it.rb) {
- list_for_each_entry_safe(bo, next_bo, &node->bos, mn_list) {
- bo->mn = NULL;
- list_del_init(&bo->mn_list);
- }
- kfree(node);
- }
- up_write(&amn->lock);
- mutex_unlock(&adev->mn_lock);
-
- hmm_mirror_unregister(&amn->mirror);
- kfree(amn);
-}
-
-/**
- * amdgpu_hmm_mirror_release - callback to notify about mm destruction
- *
- * @mirror: the HMM mirror (mm) this callback is about
- *
- * Shedule a work item to lazy destroy HMM mirror.
- */
-static void amdgpu_hmm_mirror_release(struct hmm_mirror *mirror)
-{
- struct amdgpu_mn *amn = container_of(mirror, struct amdgpu_mn, mirror);
-
- INIT_WORK(&amn->work, amdgpu_mn_destroy);
- schedule_work(&amn->work);
-}
-
-/**
- * amdgpu_mn_lock - take the write side lock for this notifier
- *
- * @mn: our notifier
- */
-void amdgpu_mn_lock(struct amdgpu_mn *mn)
-{
- if (mn)
- down_write(&mn->lock);
-}
-
-/**
- * amdgpu_mn_unlock - drop the write side lock for this notifier
- *
- * @mn: our notifier
- */
-void amdgpu_mn_unlock(struct amdgpu_mn *mn)
-{
- if (mn)
- up_write(&mn->lock);
-}
-
-/**
- * amdgpu_mn_read_lock - take the read side lock for this notifier
- *
- * @amn: our notifier
- */
-static int amdgpu_mn_read_lock(struct amdgpu_mn *amn, bool blockable)
-{
- if (blockable)
- down_read(&amn->lock);
- else if (!down_read_trylock(&amn->lock))
- return -EAGAIN;
-
- return 0;
-}
-
-/**
- * amdgpu_mn_read_unlock - drop the read side lock for this notifier
- *
- * @amn: our notifier
- */
-static void amdgpu_mn_read_unlock(struct amdgpu_mn *amn)
-{
- up_read(&amn->lock);
-}
-
-/**
- * amdgpu_mn_invalidate_node - unmap all BOs of a node
- *
- * @node: the node with the BOs to unmap
- * @start: start of address range affected
- * @end: end of address range affected
+ * @mni: the range (mm) is about to update
+ * @range: details on the invalidation
+ * @cur_seq: Value to pass to mmu_interval_set_seq()
*
* Block for operations on BOs to finish and mark pages as accessed and
* potentially dirty.
*/
-static void amdgpu_mn_invalidate_node(struct amdgpu_mn_node *node,
- unsigned long start,
- unsigned long end)
+static bool amdgpu_mn_invalidate_gfx(struct mmu_interval_notifier *mni,
+ const struct mmu_notifier_range *range,
+ unsigned long cur_seq)
{
- struct amdgpu_bo *bo;
+ struct amdgpu_bo *bo = container_of(mni, struct amdgpu_bo, notifier);
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
long r;
- list_for_each_entry(bo, &node->bos, mn_list) {
-
- if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, start, end))
- continue;
-
- r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv,
- true, false, MAX_SCHEDULE_TIMEOUT);
- if (r <= 0)
- DRM_ERROR("(%ld) failed to wait for user bo\n", r);
- }
-}
-
-/**
- * amdgpu_mn_sync_pagetables_gfx - callback to notify about mm change
- *
- * @mirror: the hmm_mirror (mm) is about to update
- * @update: the update start, end address
- *
- * Block for operations on BOs to finish and mark pages as accessed and
- * potentially dirty.
- */
-static int
-amdgpu_mn_sync_pagetables_gfx(struct hmm_mirror *mirror,
- const struct mmu_notifier_range *update)
-{
- struct amdgpu_mn *amn = container_of(mirror, struct amdgpu_mn, mirror);
- unsigned long start = update->start;
- unsigned long end = update->end;
- bool blockable = mmu_notifier_range_blockable(update);
- struct interval_tree_node *it;
+ if (!mmu_notifier_range_blockable(range))
+ return false;
- /* notification is exclusive, but interval is inclusive */
- end -= 1;
+ mutex_lock(&adev->notifier_lock);
- /* TODO we should be able to split locking for interval tree and
- * amdgpu_mn_invalidate_node
- */
- if (amdgpu_mn_read_lock(amn, blockable))
- return -EAGAIN;
+ mmu_interval_set_seq(mni, cur_seq);
- it = interval_tree_iter_first(&amn->objects, start, end);
- while (it) {
- struct amdgpu_mn_node *node;
-
- if (!blockable) {
- amdgpu_mn_read_unlock(amn);
- return -EAGAIN;
- }
-
- node = container_of(it, struct amdgpu_mn_node, it);
- it = interval_tree_iter_next(it, start, end);
-
- amdgpu_mn_invalidate_node(node, start, end);
- }
-
- amdgpu_mn_read_unlock(amn);
-
- return 0;
-}
-
-/**
- * amdgpu_mn_sync_pagetables_hsa - callback to notify about mm change
- *
- * @mirror: the hmm_mirror (mm) is about to update
- * @update: the update start, end address
- *
- * We temporarily evict all BOs between start and end. This
- * necessitates evicting all user-mode queues of the process. The BOs
- * are restorted in amdgpu_mn_invalidate_range_end_hsa.
- */
-static int
-amdgpu_mn_sync_pagetables_hsa(struct hmm_mirror *mirror,
- const struct mmu_notifier_range *update)
-{
- struct amdgpu_mn *amn = container_of(mirror, struct amdgpu_mn, mirror);
- unsigned long start = update->start;
- unsigned long end = update->end;
- bool blockable = mmu_notifier_range_blockable(update);
- struct interval_tree_node *it;
-
- /* notification is exclusive, but interval is inclusive */
- end -= 1;
-
- if (amdgpu_mn_read_lock(amn, blockable))
- return -EAGAIN;
-
- it = interval_tree_iter_first(&amn->objects, start, end);
- while (it) {
- struct amdgpu_mn_node *node;
- struct amdgpu_bo *bo;
-
- if (!blockable) {
- amdgpu_mn_read_unlock(amn);
- return -EAGAIN;
- }
-
- node = container_of(it, struct amdgpu_mn_node, it);
- it = interval_tree_iter_next(it, start, end);
-
- list_for_each_entry(bo, &node->bos, mn_list) {
- struct kgd_mem *mem = bo->kfd_bo;
-
- if (amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm,
- start, end))
- amdgpu_amdkfd_evict_userptr(mem, amn->mm);
- }
- }
-
- amdgpu_mn_read_unlock(amn);
-
- return 0;
+ r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, true, false,
+ MAX_SCHEDULE_TIMEOUT);
+ mutex_unlock(&adev->notifier_lock);
+ if (r <= 0)
+ DRM_ERROR("(%ld) failed to wait for user bo\n", r);
+ return true;
}
-/* Low bits of any reasonable mm pointer will be unused due to struct
- * alignment. Use these bits to make a unique key from the mm pointer
- * and notifier type.
- */
-#define AMDGPU_MN_KEY(mm, type) ((unsigned long)(mm) + (type))
-
-static struct hmm_mirror_ops amdgpu_hmm_mirror_ops[] = {
- [AMDGPU_MN_TYPE_GFX] = {
- .sync_cpu_device_pagetables = amdgpu_mn_sync_pagetables_gfx,
- .release = amdgpu_hmm_mirror_release
- },
- [AMDGPU_MN_TYPE_HSA] = {
- .sync_cpu_device_pagetables = amdgpu_mn_sync_pagetables_hsa,
- .release = amdgpu_hmm_mirror_release
- },
+static const struct mmu_interval_notifier_ops amdgpu_mn_gfx_ops = {
+ .invalidate = amdgpu_mn_invalidate_gfx,
};
/**
- * amdgpu_mn_get - create HMM mirror context
+ * amdgpu_mn_invalidate_hsa - callback to notify about mm change
*
- * @adev: amdgpu device pointer
- * @type: type of MMU notifier context
+ * @mni: the range (mm) is about to update
+ * @range: details on the invalidation
+ * @cur_seq: Value to pass to mmu_interval_set_seq()
*
- * Creates a HMM mirror context for current->mm.
+ * We temporarily evict the BO attached to this range. This necessitates
+ * evicting all user-mode queues of the process.
*/
-struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev,
- enum amdgpu_mn_type type)
+static bool amdgpu_mn_invalidate_hsa(struct mmu_interval_notifier *mni,
+ const struct mmu_notifier_range *range,
+ unsigned long cur_seq)
{
- struct mm_struct *mm = current->mm;
- struct amdgpu_mn *amn;
- unsigned long key = AMDGPU_MN_KEY(mm, type);
- int r;
-
- mutex_lock(&adev->mn_lock);
- if (down_write_killable(&mm->mmap_sem)) {
- mutex_unlock(&adev->mn_lock);
- return ERR_PTR(-EINTR);
- }
-
- hash_for_each_possible(adev->mn_hash, amn, node, key)
- if (AMDGPU_MN_KEY(amn->mm, amn->type) == key)
- goto release_locks;
-
- amn = kzalloc(sizeof(*amn), GFP_KERNEL);
- if (!amn) {
- amn = ERR_PTR(-ENOMEM);
- goto release_locks;
- }
-
- amn->adev = adev;
- amn->mm = mm;
- init_rwsem(&amn->lock);
- amn->type = type;
- amn->objects = RB_ROOT_CACHED;
-
- amn->mirror.ops = &amdgpu_hmm_mirror_ops[type];
- r = hmm_mirror_register(&amn->mirror, mm);
- if (r)
- goto free_amn;
+ struct amdgpu_bo *bo = container_of(mni, struct amdgpu_bo, notifier);
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
- hash_add(adev->mn_hash, &amn->node, AMDGPU_MN_KEY(mm, type));
+ if (!mmu_notifier_range_blockable(range))
+ return false;
-release_locks:
- up_write(&mm->mmap_sem);
- mutex_unlock(&adev->mn_lock);
+ mutex_lock(&adev->notifier_lock);
- return amn;
+ mmu_interval_set_seq(mni, cur_seq);
-free_amn:
- up_write(&mm->mmap_sem);
- mutex_unlock(&adev->mn_lock);
- kfree(amn);
+ amdgpu_amdkfd_evict_userptr(bo->kfd_bo, bo->notifier.mm);
+ mutex_unlock(&adev->notifier_lock);
- return ERR_PTR(r);
+ return true;
}
+static const struct mmu_interval_notifier_ops amdgpu_mn_hsa_ops = {
+ .invalidate = amdgpu_mn_invalidate_hsa,
+};
+
/**
* amdgpu_mn_register - register a BO for notifier updates
*
* @bo: amdgpu buffer object
* @addr: userptr addr we should monitor
*
- * Registers an HMM mirror for the given BO at the specified address.
+ * Registers a mmu_notifier for the given BO at the specified address.
* Returns 0 on success, -ERRNO if anything goes wrong.
*/
int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
{
- unsigned long end = addr + amdgpu_bo_size(bo) - 1;
- struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
- enum amdgpu_mn_type type =
- bo->kfd_bo ? AMDGPU_MN_TYPE_HSA : AMDGPU_MN_TYPE_GFX;
- struct amdgpu_mn *amn;
- struct amdgpu_mn_node *node = NULL, *new_node;
- struct list_head bos;
- struct interval_tree_node *it;
-
- amn = amdgpu_mn_get(adev, type);
- if (IS_ERR(amn))
- return PTR_ERR(amn);
-
- new_node = kmalloc(sizeof(*new_node), GFP_KERNEL);
- if (!new_node)
- return -ENOMEM;
-
- INIT_LIST_HEAD(&bos);
-
- down_write(&amn->lock);
-
- while ((it = interval_tree_iter_first(&amn->objects, addr, end))) {
- kfree(node);
- node = container_of(it, struct amdgpu_mn_node, it);
- interval_tree_remove(&node->it, &amn->objects);
- addr = min(it->start, addr);
- end = max(it->last, end);
- list_splice(&node->bos, &bos);
- }
-
- if (!node)
- node = new_node;
- else
- kfree(new_node);
-
- bo->mn = amn;
-
- node->it.start = addr;
- node->it.last = end;
- INIT_LIST_HEAD(&node->bos);
- list_splice(&bos, &node->bos);
- list_add(&bo->mn_list, &node->bos);
-
- interval_tree_insert(&node->it, &amn->objects);
-
- up_write(&amn->lock);
-
- return 0;
+ if (bo->kfd_bo)
+ return mmu_interval_notifier_insert(&bo->notifier, current->mm,
+ addr, amdgpu_bo_size(bo),
+ &amdgpu_mn_hsa_ops);
+ return mmu_interval_notifier_insert(&bo->notifier, current->mm, addr,
+ amdgpu_bo_size(bo),
+ &amdgpu_mn_gfx_ops);
}
/**
- * amdgpu_mn_unregister - unregister a BO for HMM mirror updates
+ * amdgpu_mn_unregister - unregister a BO for notifier updates
*
* @bo: amdgpu buffer object
*
- * Remove any registration of HMM mirror updates from the buffer object.
+ * Remove any registration of mmu notifier updates from the buffer object.
*/
void amdgpu_mn_unregister(struct amdgpu_bo *bo)
{
- struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
- struct amdgpu_mn *amn;
- struct list_head *head;
-
- mutex_lock(&adev->mn_lock);
-
- amn = bo->mn;
- if (amn == NULL) {
- mutex_unlock(&adev->mn_lock);
+ if (!bo->notifier.mm)
return;
- }
-
- down_write(&amn->lock);
-
- /* save the next list entry for later */
- head = bo->mn_list.next;
-
- bo->mn = NULL;
- list_del_init(&bo->mn_list);
-
- if (list_empty(head)) {
- struct amdgpu_mn_node *node;
-
- node = container_of(head, struct amdgpu_mn_node, bos);
- interval_tree_remove(&node->it, &amn->objects);
- kfree(node);
- }
-
- up_write(&amn->lock);
- mutex_unlock(&adev->mn_lock);
-}
-
-/* flags used by HMM internal, not related to CPU/GPU PTE flags */
-static const uint64_t hmm_range_flags[HMM_PFN_FLAG_MAX] = {
- (1 << 0), /* HMM_PFN_VALID */
- (1 << 1), /* HMM_PFN_WRITE */
- 0 /* HMM_PFN_DEVICE_PRIVATE */
-};
-
-static const uint64_t hmm_range_values[HMM_PFN_VALUE_MAX] = {
- 0xfffffffffffffffeUL, /* HMM_PFN_ERROR */
- 0, /* HMM_PFN_NONE */
- 0xfffffffffffffffcUL /* HMM_PFN_SPECIAL */
-};
-
-void amdgpu_hmm_init_range(struct hmm_range *range)
-{
- if (range) {
- range->flags = hmm_range_flags;
- range->values = hmm_range_values;
- range->pfn_shift = PAGE_SHIFT;
- }
+ mmu_interval_notifier_remove(&bo->notifier);
+ bo->notifier.mm = NULL;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.h
index b8ed68943625..a292238f75eb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.h
@@ -30,63 +30,10 @@
#include <linux/workqueue.h>
#include <linux/interval_tree.h>
-enum amdgpu_mn_type {
- AMDGPU_MN_TYPE_GFX,
- AMDGPU_MN_TYPE_HSA,
-};
-
-/**
- * struct amdgpu_mn
- *
- * @adev: amdgpu device pointer
- * @mm: process address space
- * @type: type of MMU notifier
- * @work: destruction work item
- * @node: hash table node to find structure by adev and mn
- * @lock: rw semaphore protecting the notifier nodes
- * @objects: interval tree containing amdgpu_mn_nodes
- * @mirror: HMM mirror function support
- *
- * Data for each amdgpu device and process address space.
- */
-struct amdgpu_mn {
- /* constant after initialisation */
- struct amdgpu_device *adev;
- struct mm_struct *mm;
- enum amdgpu_mn_type type;
-
- /* only used on destruction */
- struct work_struct work;
-
- /* protected by adev->mn_lock */
- struct hlist_node node;
-
- /* objects protected by lock */
- struct rw_semaphore lock;
- struct rb_root_cached objects;
-
-#ifdef CONFIG_HMM_MIRROR
- /* HMM mirror */
- struct hmm_mirror mirror;
-#endif
-};
-
#if defined(CONFIG_HMM_MIRROR)
-void amdgpu_mn_lock(struct amdgpu_mn *mn);
-void amdgpu_mn_unlock(struct amdgpu_mn *mn);
-struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev,
- enum amdgpu_mn_type type);
int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr);
void amdgpu_mn_unregister(struct amdgpu_bo *bo);
-void amdgpu_hmm_init_range(struct hmm_range *range);
#else
-static inline void amdgpu_mn_lock(struct amdgpu_mn *mn) {}
-static inline void amdgpu_mn_unlock(struct amdgpu_mn *mn) {}
-static inline struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev,
- enum amdgpu_mn_type type)
-{
- return NULL;
-}
static inline int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
{
DRM_WARN_ONCE("HMM_MIRROR kernel config option is not enabled, "
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c
new file mode 100644
index 000000000000..7d5c3a9de9ea
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c
@@ -0,0 +1,84 @@
+/*
+ * Copyright (C) 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "amdgpu.h"
+#include "amdgpu_ras.h"
+
+int amdgpu_nbio_ras_late_init(struct amdgpu_device *adev)
+{
+ int r;
+ struct ras_ih_if ih_info = {
+ .cb = NULL,
+ };
+ struct ras_fs_if fs_info = {
+ .sysfs_name = "pcie_bif_err_count",
+ .debugfs_name = "pcie_bif_err_inject",
+ };
+
+ if (!adev->nbio.ras_if) {
+ adev->nbio.ras_if = kmalloc(sizeof(struct ras_common_if), GFP_KERNEL);
+ if (!adev->nbio.ras_if)
+ return -ENOMEM;
+ adev->nbio.ras_if->block = AMDGPU_RAS_BLOCK__PCIE_BIF;
+ adev->nbio.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
+ adev->nbio.ras_if->sub_block_index = 0;
+ strcpy(adev->nbio.ras_if->name, "pcie_bif");
+ }
+ ih_info.head = fs_info.head = *adev->nbio.ras_if;
+ r = amdgpu_ras_late_init(adev, adev->nbio.ras_if,
+ &fs_info, &ih_info);
+ if (r)
+ goto free;
+
+ if (amdgpu_ras_is_supported(adev, adev->nbio.ras_if->block)) {
+ r = amdgpu_irq_get(adev, &adev->nbio.ras_controller_irq, 0);
+ if (r)
+ goto late_fini;
+ r = amdgpu_irq_get(adev, &adev->nbio.ras_err_event_athub_irq, 0);
+ if (r)
+ goto late_fini;
+ } else {
+ r = 0;
+ goto free;
+ }
+
+ return 0;
+late_fini:
+ amdgpu_ras_late_fini(adev, adev->nbio.ras_if, &ih_info);
+free:
+ kfree(adev->nbio.ras_if);
+ adev->nbio.ras_if = NULL;
+ return r;
+}
+
+void amdgpu_nbio_ras_fini(struct amdgpu_device *adev)
+{
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__PCIE_BIF) &&
+ adev->nbio.ras_if) {
+ struct ras_common_if *ras_if = adev->nbio.ras_if;
+ struct ras_ih_if ih_info = {
+ .cb = NULL,
+ };
+
+ amdgpu_ras_late_fini(adev, ras_if, &ih_info);
+ kfree(ras_if);
+ }
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h
new file mode 100644
index 000000000000..919bd566ba3c
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h
@@ -0,0 +1,101 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __AMDGPU_NBIO_H__
+#define __AMDGPU_NBIO_H__
+
+/*
+ * amdgpu nbio functions
+ */
+struct nbio_hdp_flush_reg {
+ u32 ref_and_mask_cp0;
+ u32 ref_and_mask_cp1;
+ u32 ref_and_mask_cp2;
+ u32 ref_and_mask_cp3;
+ u32 ref_and_mask_cp4;
+ u32 ref_and_mask_cp5;
+ u32 ref_and_mask_cp6;
+ u32 ref_and_mask_cp7;
+ u32 ref_and_mask_cp8;
+ u32 ref_and_mask_cp9;
+ u32 ref_and_mask_sdma0;
+ u32 ref_and_mask_sdma1;
+ u32 ref_and_mask_sdma2;
+ u32 ref_and_mask_sdma3;
+ u32 ref_and_mask_sdma4;
+ u32 ref_and_mask_sdma5;
+ u32 ref_and_mask_sdma6;
+ u32 ref_and_mask_sdma7;
+};
+
+struct amdgpu_nbio_funcs {
+ const struct nbio_hdp_flush_reg *hdp_flush_reg;
+ u32 (*get_hdp_flush_req_offset)(struct amdgpu_device *adev);
+ u32 (*get_hdp_flush_done_offset)(struct amdgpu_device *adev);
+ u32 (*get_pcie_index_offset)(struct amdgpu_device *adev);
+ u32 (*get_pcie_data_offset)(struct amdgpu_device *adev);
+ u32 (*get_rev_id)(struct amdgpu_device *adev);
+ void (*mc_access_enable)(struct amdgpu_device *adev, bool enable);
+ void (*hdp_flush)(struct amdgpu_device *adev, struct amdgpu_ring *ring);
+ u32 (*get_memsize)(struct amdgpu_device *adev);
+ void (*sdma_doorbell_range)(struct amdgpu_device *adev, int instance,
+ bool use_doorbell, int doorbell_index, int doorbell_size);
+ void (*vcn_doorbell_range)(struct amdgpu_device *adev, bool use_doorbell,
+ int doorbell_index, int instance);
+ void (*enable_doorbell_aperture)(struct amdgpu_device *adev,
+ bool enable);
+ void (*enable_doorbell_selfring_aperture)(struct amdgpu_device *adev,
+ bool enable);
+ void (*ih_doorbell_range)(struct amdgpu_device *adev,
+ bool use_doorbell, int doorbell_index);
+ void (*enable_doorbell_interrupt)(struct amdgpu_device *adev,
+ bool enable);
+ void (*update_medium_grain_clock_gating)(struct amdgpu_device *adev,
+ bool enable);
+ void (*update_medium_grain_light_sleep)(struct amdgpu_device *adev,
+ bool enable);
+ void (*get_clockgating_state)(struct amdgpu_device *adev,
+ u32 *flags);
+ void (*ih_control)(struct amdgpu_device *adev);
+ void (*init_registers)(struct amdgpu_device *adev);
+ void (*detect_hw_virt)(struct amdgpu_device *adev);
+ void (*remap_hdp_registers)(struct amdgpu_device *adev);
+ void (*handle_ras_controller_intr_no_bifring)(struct amdgpu_device *adev);
+ void (*handle_ras_err_event_athub_intr_no_bifring)(struct amdgpu_device *adev);
+ int (*init_ras_controller_interrupt)(struct amdgpu_device *adev);
+ int (*init_ras_err_event_athub_interrupt)(struct amdgpu_device *adev);
+ void (*query_ras_error_count)(struct amdgpu_device *adev,
+ void *ras_error_status);
+ int (*ras_late_init)(struct amdgpu_device *adev);
+};
+
+struct amdgpu_nbio {
+ const struct nbio_hdp_flush_reg *hdp_flush_reg;
+ struct amdgpu_irq_src ras_controller_irq;
+ struct amdgpu_irq_src ras_err_event_athub_irq;
+ struct ras_common_if *ras_if;
+ const struct amdgpu_nbio_funcs *funcs;
+};
+
+int amdgpu_nbio_ras_late_init(struct amdgpu_device *adev);
+void amdgpu_nbio_ras_fini(struct amdgpu_device *adev);
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 7289e1b4fb60..e3f16b49e970 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -343,6 +343,70 @@ int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
}
/**
+ * amdgpu_bo_create_kernel_at - create BO for kernel use at specific location
+ *
+ * @adev: amdgpu device object
+ * @offset: offset of the BO
+ * @size: size of the BO
+ * @domain: where to place it
+ * @bo_ptr: used to initialize BOs in structures
+ * @cpu_addr: optional CPU address mapping
+ *
+ * Creates a kernel BO at a specific offset in the address space of the domain.
+ *
+ * Returns:
+ * 0 on success, negative error code otherwise.
+ */
+int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev,
+ uint64_t offset, uint64_t size, uint32_t domain,
+ struct amdgpu_bo **bo_ptr, void **cpu_addr)
+{
+ struct ttm_operation_ctx ctx = { false, false };
+ unsigned int i;
+ int r;
+
+ offset &= PAGE_MASK;
+ size = ALIGN(size, PAGE_SIZE);
+
+ r = amdgpu_bo_create_reserved(adev, size, PAGE_SIZE, domain, bo_ptr,
+ NULL, cpu_addr);
+ if (r)
+ return r;
+
+ /*
+ * Remove the original mem node and create a new one at the request
+ * position.
+ */
+ if (cpu_addr)
+ amdgpu_bo_kunmap(*bo_ptr);
+
+ ttm_bo_mem_put(&(*bo_ptr)->tbo, &(*bo_ptr)->tbo.mem);
+
+ for (i = 0; i < (*bo_ptr)->placement.num_placement; ++i) {
+ (*bo_ptr)->placements[i].fpfn = offset >> PAGE_SHIFT;
+ (*bo_ptr)->placements[i].lpfn = (offset + size) >> PAGE_SHIFT;
+ }
+ r = ttm_bo_mem_space(&(*bo_ptr)->tbo, &(*bo_ptr)->placement,
+ &(*bo_ptr)->tbo.mem, &ctx);
+ if (r)
+ goto error;
+
+ if (cpu_addr) {
+ r = amdgpu_bo_kmap(*bo_ptr, cpu_addr);
+ if (r)
+ goto error;
+ }
+
+ amdgpu_bo_unreserve(*bo_ptr);
+ return 0;
+
+error:
+ amdgpu_bo_unreserve(*bo_ptr);
+ amdgpu_bo_unref(bo_ptr);
+ return r;
+}
+
+/**
* amdgpu_bo_free_kernel - free BO for kernel use
*
* @bo: amdgpu BO to free
@@ -451,7 +515,7 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
{
struct ttm_operation_ctx ctx = {
.interruptible = (bp->type != ttm_bo_type_kernel),
- .no_wait_gpu = false,
+ .no_wait_gpu = bp->no_wait_gpu,
.resv = bp->resv,
.flags = bp->type != ttm_bo_type_kernel ?
TTM_OPT_FLAG_ALLOW_RES_EVICT : 0
@@ -1059,7 +1123,10 @@ void amdgpu_bo_fini(struct amdgpu_device *adev)
int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
struct vm_area_struct *vma)
{
- return ttm_fbdev_mmap(vma, &bo->tbo);
+ if (vma->vm_pgoff != 0)
+ return -EACCES;
+
+ return ttm_bo_mmap_obj(vma, &bo->tbo);
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index 658f4c9779b7..36dec51d1ef1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -30,6 +30,9 @@
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
+#ifdef CONFIG_MMU_NOTIFIER
+#include <linux/mmu_notifier.h>
+#endif
#define AMDGPU_BO_INVALID_OFFSET LONG_MAX
#define AMDGPU_BO_MAX_PLACEMENTS 3
@@ -41,6 +44,7 @@ struct amdgpu_bo_param {
u32 preferred_domain;
u64 flags;
enum ttm_bo_type type;
+ bool no_wait_gpu;
struct dma_resv *resv;
};
@@ -100,10 +104,12 @@ struct amdgpu_bo {
struct ttm_bo_kmap_obj dma_buf_vmap;
struct amdgpu_mn *mn;
- union {
- struct list_head mn_list;
- struct list_head shadow_list;
- };
+
+#ifdef CONFIG_MMU_NOTIFIER
+ struct mmu_interval_notifier notifier;
+#endif
+
+ struct list_head shadow_list;
struct kgd_mem *kfd_bo;
};
@@ -237,6 +243,9 @@ int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
unsigned long size, int align,
u32 domain, struct amdgpu_bo **bo_ptr,
u64 *gpu_addr, void **cpu_addr);
+int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev,
+ uint64_t offset, uint64_t size, uint32_t domain,
+ struct amdgpu_bo **bo_ptr, void **cpu_addr);
void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
void **cpu_addr);
int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index 03930313c263..f205f56e3358 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -161,7 +161,7 @@ static ssize_t amdgpu_get_dpm_state(struct device *dev,
if (is_support_sw_smu(adev)) {
if (adev->smu.ppt_funcs->get_current_power_state)
- pm = amdgpu_smu_get_current_power_state(adev);
+ pm = smu_get_current_power_state(&adev->smu);
else
pm = adev->pm.dpm.user_state;
} else if (adev->powerplay.pp_funcs->get_current_power_state) {
@@ -805,8 +805,7 @@ static ssize_t amdgpu_get_pp_feature_status(struct device *dev,
}
/**
- * DOC: pp_dpm_sclk pp_dpm_mclk pp_dpm_socclk pp_dpm_fclk pp_dpm_dcefclk
- * pp_dpm_pcie
+ * DOC: pp_dpm_sclk pp_dpm_mclk pp_dpm_socclk pp_dpm_fclk pp_dpm_dcefclk pp_dpm_pcie
*
* The amdgpu driver provides a sysfs API for adjusting what power levels
* are enabled for a given power state. The files pp_dpm_sclk, pp_dpm_mclk,
@@ -822,9 +821,15 @@ static ssize_t amdgpu_get_pp_feature_status(struct device *dev,
*
* To manually adjust these states, first select manual using
* power_dpm_force_performance_level.
- * Secondly,Enter a new value for each level by inputing a string that
+ * Secondly, enter a new value for each level by inputing a string that
* contains " echo xx xx xx > pp_dpm_sclk/mclk/pcie"
- * E.g., echo 4 5 6 to > pp_dpm_sclk will enable sclk levels 4, 5, and 6.
+ * E.g.,
+ *
+ * .. code-block:: bash
+ *
+ * echo "4 5 6" > pp_dpm_sclk
+ *
+ * will enable sclk levels 4, 5, and 6.
*
* NOTE: change to the dcefclk max dpm level is not supported now
*/
@@ -902,7 +907,7 @@ static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
return ret;
if (is_support_sw_smu(adev))
- ret = smu_force_clk_levels(&adev->smu, SMU_SCLK, mask);
+ ret = smu_force_clk_levels(&adev->smu, SMU_SCLK, mask, true);
else if (adev->powerplay.pp_funcs->force_clock_level)
ret = amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask);
@@ -949,7 +954,7 @@ static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
return ret;
if (is_support_sw_smu(adev))
- ret = smu_force_clk_levels(&adev->smu, SMU_MCLK, mask);
+ ret = smu_force_clk_levels(&adev->smu, SMU_MCLK, mask, true);
else if (adev->powerplay.pp_funcs->force_clock_level)
ret = amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask);
@@ -989,7 +994,7 @@ static ssize_t amdgpu_set_pp_dpm_socclk(struct device *dev,
return ret;
if (is_support_sw_smu(adev))
- ret = smu_force_clk_levels(&adev->smu, SMU_SOCCLK, mask);
+ ret = smu_force_clk_levels(&adev->smu, SMU_SOCCLK, mask, true);
else if (adev->powerplay.pp_funcs->force_clock_level)
ret = amdgpu_dpm_force_clock_level(adev, PP_SOCCLK, mask);
@@ -1029,7 +1034,7 @@ static ssize_t amdgpu_set_pp_dpm_fclk(struct device *dev,
return ret;
if (is_support_sw_smu(adev))
- ret = smu_force_clk_levels(&adev->smu, SMU_FCLK, mask);
+ ret = smu_force_clk_levels(&adev->smu, SMU_FCLK, mask, true);
else if (adev->powerplay.pp_funcs->force_clock_level)
ret = amdgpu_dpm_force_clock_level(adev, PP_FCLK, mask);
@@ -1069,7 +1074,7 @@ static ssize_t amdgpu_set_pp_dpm_dcefclk(struct device *dev,
return ret;
if (is_support_sw_smu(adev))
- ret = smu_force_clk_levels(&adev->smu, SMU_DCEFCLK, mask);
+ ret = smu_force_clk_levels(&adev->smu, SMU_DCEFCLK, mask, true);
else if (adev->powerplay.pp_funcs->force_clock_level)
ret = amdgpu_dpm_force_clock_level(adev, PP_DCEFCLK, mask);
@@ -1109,7 +1114,7 @@ static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
return ret;
if (is_support_sw_smu(adev))
- ret = smu_force_clk_levels(&adev->smu, SMU_PCIE, mask);
+ ret = smu_force_clk_levels(&adev->smu, SMU_PCIE, mask, true);
else if (adev->powerplay.pp_funcs->force_clock_level)
ret = amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask);
@@ -1301,7 +1306,7 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
}
parameter[parameter_size] = profile_mode;
if (is_support_sw_smu(adev))
- ret = smu_set_power_profile_mode(&adev->smu, parameter, parameter_size);
+ ret = smu_set_power_profile_mode(&adev->smu, parameter, parameter_size, true);
else if (adev->powerplay.pp_funcs->set_power_profile_mode)
ret = amdgpu_dpm_set_power_profile_mode(adev, parameter, parameter_size);
if (!ret)
@@ -2010,7 +2015,7 @@ static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev,
uint32_t limit = 0;
if (is_support_sw_smu(adev)) {
- smu_get_power_limit(&adev->smu, &limit, true);
+ smu_get_power_limit(&adev->smu, &limit, true, true);
return snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
} else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) {
adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, true);
@@ -2028,7 +2033,7 @@ static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev,
uint32_t limit = 0;
if (is_support_sw_smu(adev)) {
- smu_get_power_limit(&adev->smu, &limit, false);
+ smu_get_power_limit(&adev->smu, &limit, false, true);
return snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
} else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) {
adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, false);
@@ -2196,9 +2201,9 @@ static ssize_t amdgpu_hwmon_show_mclk_label(struct device *dev,
*
* - fan1_input: fan speed in RPM
*
- * - fan[1-*]_target: Desired fan speed Unit: revolution/min (RPM)
+ * - fan[1-\*]_target: Desired fan speed Unit: revolution/min (RPM)
*
- * - fan[1-*]_enable: Enable or disable the sensors.1: Enable 0: Disable
+ * - fan[1-\*]_enable: Enable or disable the sensors.1: Enable 0: Disable
*
* hwmon interfaces for GPU clocks:
*
@@ -2825,6 +2830,19 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
DRM_ERROR("failed to create device file pp_dpm_sclk\n");
return ret;
}
+
+ /* Arcturus does not support standalone mclk/socclk/fclk level setting */
+ if (adev->asic_type == CHIP_ARCTURUS) {
+ dev_attr_pp_dpm_mclk.attr.mode &= ~S_IWUGO;
+ dev_attr_pp_dpm_mclk.store = NULL;
+
+ dev_attr_pp_dpm_socclk.attr.mode &= ~S_IWUGO;
+ dev_attr_pp_dpm_socclk.store = NULL;
+
+ dev_attr_pp_dpm_fclk.attr.mode &= ~S_IWUGO;
+ dev_attr_pp_dpm_fclk.store = NULL;
+ }
+
ret = device_create_file(adev->dev, &dev_attr_pp_dpm_mclk);
if (ret) {
DRM_ERROR("failed to create device file pp_dpm_mclk\n");
@@ -3008,7 +3026,8 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
struct smu_dpm_context *smu_dpm = &adev->smu.smu_dpm;
smu_handle_task(&adev->smu,
smu_dpm->dpm_level,
- AMD_PP_TASK_DISPLAY_CONFIG_CHANGE);
+ AMD_PP_TASK_DISPLAY_CONFIG_CHANGE,
+ true);
} else {
if (adev->powerplay.pp_funcs->dispatch_tasks) {
if (!amdgpu_device_has_dc_support(adev)) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index a46090071034..2770cba56a6b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -34,6 +34,8 @@
#include "psp_v11_0.h"
#include "psp_v12_0.h"
+#include "amdgpu_ras.h"
+
static void psp_set_funcs(struct amdgpu_device *adev);
static int psp_early_init(void *handle)
@@ -88,6 +90,17 @@ static int psp_sw_init(void *handle)
return ret;
}
+ ret = psp_mem_training_init(psp);
+ if (ret) {
+ DRM_ERROR("Failed to initialize memory training!\n");
+ return ret;
+ }
+ ret = psp_mem_training(psp, PSP_MEM_TRAIN_COLD_BOOT);
+ if (ret) {
+ DRM_ERROR("Failed to process memory training!\n");
+ return ret;
+ }
+
return 0;
}
@@ -95,6 +108,7 @@ static int psp_sw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ psp_mem_training_fini(&adev->psp);
release_firmware(adev->psp.sos_fw);
adev->psp.sos_fw = NULL;
release_firmware(adev->psp.asd_fw);
@@ -151,10 +165,19 @@ psp_cmd_submit_buf(struct psp_context *psp,
return ret;
}
+ amdgpu_asic_invalidate_hdp(psp->adev, NULL);
while (*((unsigned int *)psp->fence_buf) != index) {
if (--timeout == 0)
break;
+ /*
+ * Shouldn't wait for timeout when err_event_athub occurs,
+ * because gpu reset thread triggered and lock resource should
+ * be released for psp resume sequence.
+ */
+ if (amdgpu_ras_intr_triggered())
+ break;
msleep(1);
+ amdgpu_asic_invalidate_hdp(psp->adev, NULL);
}
/* In some cases, psp response status is not 0 even there is no
@@ -168,8 +191,9 @@ psp_cmd_submit_buf(struct psp_context *psp,
if (ucode)
DRM_WARN("failed to load ucode id (%d) ",
ucode->ucode_id);
- DRM_WARN("psp command failed and response status is (0x%X)\n",
- psp->cmd_buf_mem->resp.status & GFX_CMD_STATUS_MASK);
+ DRM_DEBUG_DRIVER("psp command (0x%X) failed and response status is (0x%X)\n",
+ psp->cmd_buf_mem->cmd_id,
+ psp->cmd_buf_mem->resp.status & GFX_CMD_STATUS_MASK);
if (!timeout) {
mutex_unlock(&psp->mutex);
return -EINVAL;
@@ -253,7 +277,8 @@ static int psp_tmr_init(struct psp_context *psp)
/* For ASICs support RLC autoload, psp will parse the toc
* and calculate the total size of TMR needed */
- if (psp->toc_start_addr &&
+ if (!amdgpu_sriov_vf(psp->adev) &&
+ psp->toc_start_addr &&
psp->toc_bin_size &&
psp->fw_pri_buf) {
ret = psp_load_toc(psp, &tmr_size);
@@ -287,15 +312,9 @@ static int psp_tmr_load(struct psp_context *psp)
ret = psp_cmd_submit_buf(psp, NULL, cmd,
psp->fence_buf_mc_addr);
- if (ret)
- goto failed;
kfree(cmd);
- return 0;
-
-failed:
- kfree(cmd);
return ret;
}
@@ -548,7 +567,9 @@ static int psp_xgmi_initialize(struct psp_context *psp)
struct ta_xgmi_shared_memory *xgmi_cmd;
int ret;
- if (!psp->adev->psp.ta_fw)
+ if (!psp->adev->psp.ta_fw ||
+ !psp->adev->psp.ta_xgmi_ucode_size ||
+ !psp->adev->psp.ta_xgmi_start_addr)
return -ENOENT;
if (!psp->xgmi_context.initialized) {
@@ -737,6 +758,12 @@ static int psp_ras_terminate(struct psp_context *psp)
{
int ret;
+ /*
+ * TODO: bypass the terminate in sriov for now
+ */
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
if (!psp->ras.ras_initialized)
return 0;
@@ -758,6 +785,18 @@ static int psp_ras_initialize(struct psp_context *psp)
{
int ret;
+ /*
+ * TODO: bypass the initialize in sriov for now
+ */
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
+ if (!psp->adev->psp.ta_ras_ucode_size ||
+ !psp->adev->psp.ta_ras_start_addr) {
+ dev_warn(psp->adev->dev, "RAS: ras ta ucode is not available\n");
+ return 0;
+ }
+
if (!psp->ras.ras_initialized) {
ret = psp_ras_init_shared_buf(psp);
if (ret)
@@ -772,6 +811,360 @@ static int psp_ras_initialize(struct psp_context *psp)
}
// ras end
+// HDCP start
+static void psp_prep_hdcp_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd,
+ uint64_t hdcp_ta_mc,
+ uint64_t hdcp_mc_shared,
+ uint32_t hdcp_ta_size,
+ uint32_t shared_size)
+{
+ cmd->cmd_id = GFX_CMD_ID_LOAD_TA;
+ cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(hdcp_ta_mc);
+ cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(hdcp_ta_mc);
+ cmd->cmd.cmd_load_ta.app_len = hdcp_ta_size;
+
+ cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo =
+ lower_32_bits(hdcp_mc_shared);
+ cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi =
+ upper_32_bits(hdcp_mc_shared);
+ cmd->cmd.cmd_load_ta.cmd_buf_len = shared_size;
+}
+
+static int psp_hdcp_init_shared_buf(struct psp_context *psp)
+{
+ int ret;
+
+ /*
+ * Allocate 16k memory aligned to 4k from Frame Buffer (local
+ * physical) for hdcp ta <-> Driver
+ */
+ ret = amdgpu_bo_create_kernel(psp->adev, PSP_HDCP_SHARED_MEM_SIZE,
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
+ &psp->hdcp_context.hdcp_shared_bo,
+ &psp->hdcp_context.hdcp_shared_mc_addr,
+ &psp->hdcp_context.hdcp_shared_buf);
+
+ return ret;
+}
+
+static int psp_hdcp_load(struct psp_context *psp)
+{
+ int ret;
+ struct psp_gfx_cmd_resp *cmd;
+
+ /*
+ * TODO: bypass the loading in sriov for now
+ */
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
+ cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ memset(psp->fw_pri_buf, 0, PSP_1_MEG);
+ memcpy(psp->fw_pri_buf, psp->ta_hdcp_start_addr,
+ psp->ta_hdcp_ucode_size);
+
+ psp_prep_hdcp_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr,
+ psp->hdcp_context.hdcp_shared_mc_addr,
+ psp->ta_hdcp_ucode_size,
+ PSP_HDCP_SHARED_MEM_SIZE);
+
+ ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
+
+ if (!ret) {
+ psp->hdcp_context.hdcp_initialized = 1;
+ psp->hdcp_context.session_id = cmd->resp.session_id;
+ }
+
+ kfree(cmd);
+
+ return ret;
+}
+static int psp_hdcp_initialize(struct psp_context *psp)
+{
+ int ret;
+
+ /*
+ * TODO: bypass the initialize in sriov for now
+ */
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
+ if (!psp->adev->psp.ta_hdcp_ucode_size ||
+ !psp->adev->psp.ta_hdcp_start_addr) {
+ dev_warn(psp->adev->dev, "HDCP: hdcp ta ucode is not available\n");
+ return 0;
+ }
+
+ if (!psp->hdcp_context.hdcp_initialized) {
+ ret = psp_hdcp_init_shared_buf(psp);
+ if (ret)
+ return ret;
+ }
+
+ ret = psp_hdcp_load(psp);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+static void psp_prep_hdcp_ta_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd,
+ uint32_t hdcp_session_id)
+{
+ cmd->cmd_id = GFX_CMD_ID_UNLOAD_TA;
+ cmd->cmd.cmd_unload_ta.session_id = hdcp_session_id;
+}
+
+static int psp_hdcp_unload(struct psp_context *psp)
+{
+ int ret;
+ struct psp_gfx_cmd_resp *cmd;
+
+ /*
+ * TODO: bypass the unloading in sriov for now
+ */
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
+ cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ psp_prep_hdcp_ta_unload_cmd_buf(cmd, psp->hdcp_context.session_id);
+
+ ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
+
+ kfree(cmd);
+
+ return ret;
+}
+
+static void psp_prep_hdcp_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd,
+ uint32_t ta_cmd_id,
+ uint32_t hdcp_session_id)
+{
+ cmd->cmd_id = GFX_CMD_ID_INVOKE_CMD;
+ cmd->cmd.cmd_invoke_cmd.session_id = hdcp_session_id;
+ cmd->cmd.cmd_invoke_cmd.ta_cmd_id = ta_cmd_id;
+ /* Note: cmd_invoke_cmd.buf is not used for now */
+}
+
+int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
+{
+ int ret;
+ struct psp_gfx_cmd_resp *cmd;
+
+ /*
+ * TODO: bypass the loading in sriov for now
+ */
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
+ cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ psp_prep_hdcp_ta_invoke_cmd_buf(cmd, ta_cmd_id,
+ psp->hdcp_context.session_id);
+
+ ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
+
+ kfree(cmd);
+
+ return ret;
+}
+
+static int psp_hdcp_terminate(struct psp_context *psp)
+{
+ int ret;
+
+ /*
+ * TODO: bypass the terminate in sriov for now
+ */
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
+ if (!psp->hdcp_context.hdcp_initialized)
+ return 0;
+
+ ret = psp_hdcp_unload(psp);
+ if (ret)
+ return ret;
+
+ psp->hdcp_context.hdcp_initialized = 0;
+
+ /* free hdcp shared memory */
+ amdgpu_bo_free_kernel(&psp->hdcp_context.hdcp_shared_bo,
+ &psp->hdcp_context.hdcp_shared_mc_addr,
+ &psp->hdcp_context.hdcp_shared_buf);
+
+ return 0;
+}
+// HDCP end
+
+// DTM start
+static void psp_prep_dtm_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd,
+ uint64_t dtm_ta_mc,
+ uint64_t dtm_mc_shared,
+ uint32_t dtm_ta_size,
+ uint32_t shared_size)
+{
+ cmd->cmd_id = GFX_CMD_ID_LOAD_TA;
+ cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(dtm_ta_mc);
+ cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(dtm_ta_mc);
+ cmd->cmd.cmd_load_ta.app_len = dtm_ta_size;
+
+ cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = lower_32_bits(dtm_mc_shared);
+ cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = upper_32_bits(dtm_mc_shared);
+ cmd->cmd.cmd_load_ta.cmd_buf_len = shared_size;
+}
+
+static int psp_dtm_init_shared_buf(struct psp_context *psp)
+{
+ int ret;
+
+ /*
+ * Allocate 16k memory aligned to 4k from Frame Buffer (local
+ * physical) for dtm ta <-> Driver
+ */
+ ret = amdgpu_bo_create_kernel(psp->adev, PSP_DTM_SHARED_MEM_SIZE,
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
+ &psp->dtm_context.dtm_shared_bo,
+ &psp->dtm_context.dtm_shared_mc_addr,
+ &psp->dtm_context.dtm_shared_buf);
+
+ return ret;
+}
+
+static int psp_dtm_load(struct psp_context *psp)
+{
+ int ret;
+ struct psp_gfx_cmd_resp *cmd;
+
+ /*
+ * TODO: bypass the loading in sriov for now
+ */
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
+ cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ memset(psp->fw_pri_buf, 0, PSP_1_MEG);
+ memcpy(psp->fw_pri_buf, psp->ta_dtm_start_addr, psp->ta_dtm_ucode_size);
+
+ psp_prep_dtm_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr,
+ psp->dtm_context.dtm_shared_mc_addr,
+ psp->ta_dtm_ucode_size,
+ PSP_DTM_SHARED_MEM_SIZE);
+
+ ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
+
+ if (!ret) {
+ psp->dtm_context.dtm_initialized = 1;
+ psp->dtm_context.session_id = cmd->resp.session_id;
+ }
+
+ kfree(cmd);
+
+ return ret;
+}
+
+static int psp_dtm_initialize(struct psp_context *psp)
+{
+ int ret;
+
+ /*
+ * TODO: bypass the initialize in sriov for now
+ */
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
+ if (!psp->adev->psp.ta_dtm_ucode_size ||
+ !psp->adev->psp.ta_dtm_start_addr) {
+ dev_warn(psp->adev->dev, "DTM: dtm ta ucode is not available\n");
+ return 0;
+ }
+
+ if (!psp->dtm_context.dtm_initialized) {
+ ret = psp_dtm_init_shared_buf(psp);
+ if (ret)
+ return ret;
+ }
+
+ ret = psp_dtm_load(psp);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void psp_prep_dtm_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd,
+ uint32_t ta_cmd_id,
+ uint32_t dtm_session_id)
+{
+ cmd->cmd_id = GFX_CMD_ID_INVOKE_CMD;
+ cmd->cmd.cmd_invoke_cmd.session_id = dtm_session_id;
+ cmd->cmd.cmd_invoke_cmd.ta_cmd_id = ta_cmd_id;
+ /* Note: cmd_invoke_cmd.buf is not used for now */
+}
+
+int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
+{
+ int ret;
+ struct psp_gfx_cmd_resp *cmd;
+
+ /*
+ * TODO: bypass the loading in sriov for now
+ */
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
+ cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ psp_prep_dtm_ta_invoke_cmd_buf(cmd, ta_cmd_id,
+ psp->dtm_context.session_id);
+
+ ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
+
+ kfree(cmd);
+
+ return ret;
+}
+
+static int psp_dtm_terminate(struct psp_context *psp)
+{
+ int ret;
+
+ /*
+ * TODO: bypass the terminate in sriov for now
+ */
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
+ if (!psp->dtm_context.dtm_initialized)
+ return 0;
+
+ ret = psp_hdcp_unload(psp);
+ if (ret)
+ return ret;
+
+ psp->dtm_context.dtm_initialized = 0;
+
+ /* free hdcp shared memory */
+ amdgpu_bo_free_kernel(&psp->dtm_context.dtm_shared_bo,
+ &psp->dtm_context.dtm_shared_mc_addr,
+ &psp->dtm_context.dtm_shared_buf);
+
+ return 0;
+}
+// DTM end
+
static int psp_hw_start(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
@@ -845,6 +1238,16 @@ static int psp_hw_start(struct psp_context *psp)
if (ret)
dev_err(psp->adev->dev,
"RAS: Failed to initialize RAS\n");
+
+ ret = psp_hdcp_initialize(psp);
+ if (ret)
+ dev_err(psp->adev->dev,
+ "HDCP: Failed to initialize HDCP\n");
+
+ ret = psp_dtm_initialize(psp);
+ if (ret)
+ dev_err(psp->adev->dev,
+ "DTM: Failed to initialize DTM\n");
}
return 0;
@@ -1064,7 +1467,10 @@ out:
|| ucode->ucode_id == AMDGPU_UCODE_ID_SDMA5
|| ucode->ucode_id == AMDGPU_UCODE_ID_SDMA6
|| ucode->ucode_id == AMDGPU_UCODE_ID_SDMA7
- || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_G))
+ || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_G
+ || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL
+ || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM
+ || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM))
/*skip ucode loading in SRIOV VF */
continue;
@@ -1073,10 +1479,6 @@ out:
ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC2_JT))
/* skip mec JT when autoload is enabled */
continue;
- /* Renoir only needs to load mec jump table one time */
- if (adev->asic_type == CHIP_RENOIR &&
- ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC2_JT)
- continue;
psp_print_fw_hdr(psp, ucode);
@@ -1085,7 +1487,8 @@ out:
return ret;
/* Start rlc autoload after psp recieved all the gfx firmware */
- if (ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM) {
+ if (psp->autoload_supported && ucode->ucode_id ==
+ AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM) {
ret = psp_rlc_autoload(psp);
if (ret) {
DRM_ERROR("Failed to start rlc autoload\n");
@@ -1210,8 +1613,11 @@ static int psp_hw_fini(void *handle)
psp->xgmi_context.initialized == 1)
psp_xgmi_terminate(psp);
- if (psp->adev->psp.ta_fw)
+ if (psp->adev->psp.ta_fw) {
psp_ras_terminate(psp);
+ psp_dtm_terminate(psp);
+ psp_hdcp_terminate(psp);
+ }
psp_ring_destroy(psp, PSP_RING_TYPE__KM);
@@ -1253,6 +1659,16 @@ static int psp_suspend(void *handle)
DRM_ERROR("Failed to terminate ras ta\n");
return ret;
}
+ ret = psp_hdcp_terminate(psp);
+ if (ret) {
+ DRM_ERROR("Failed to terminate hdcp ta\n");
+ return ret;
+ }
+ ret = psp_dtm_terminate(psp);
+ if (ret) {
+ DRM_ERROR("Failed to terminate dtm ta\n");
+ return ret;
+ }
}
ret = psp_ring_stop(psp, PSP_RING_TYPE__KM);
@@ -1272,6 +1688,12 @@ static int psp_resume(void *handle)
DRM_INFO("PSP is resuming...\n");
+ ret = psp_mem_training(psp, PSP_MEM_TRAIN_RESUME);
+ if (ret) {
+ DRM_ERROR("Failed to process memory training!\n");
+ return ret;
+ }
+
mutex_lock(&adev->firmware.mutex);
ret = psp_hw_start(psp);
@@ -1311,9 +1733,6 @@ int psp_rlc_autoload_start(struct psp_context *psp)
int ret;
struct psp_gfx_cmd_resp *cmd;
- if (amdgpu_sriov_vf(psp->adev))
- return 0;
-
cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
if (!cmd)
return -ENOMEM;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
index bc0947f6bc8a..09c5474ebcc3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
@@ -37,6 +37,9 @@
#define PSP_RAS_SHARED_MEM_SIZE 0x4000
#define PSP_1_MEG 0x100000
#define PSP_TMR_SIZE 0x400000
+#define PSP_HDCP_SHARED_MEM_SIZE 0x4000
+#define PSP_DTM_SHARED_MEM_SIZE 0x4000
+#define PSP_SHARED_MEM_SIZE 0x4000
struct psp_context;
struct psp_xgmi_node_info;
@@ -46,6 +49,8 @@ enum psp_bootloader_cmd {
PSP_BL__LOAD_SYSDRV = 0x10000,
PSP_BL__LOAD_SOSDRV = 0x20000,
PSP_BL__LOAD_KEY_DATABASE = 0x80000,
+ PSP_BL__DRAM_LONG_TRAIN = 0x100000,
+ PSP_BL__DRAM_SHORT_TRAIN = 0x200000,
};
enum psp_ring_type
@@ -108,6 +113,9 @@ struct psp_funcs
struct ta_ras_trigger_error_input *info);
int (*ras_cure_posion)(struct psp_context *psp, uint64_t *mode_ptr);
int (*rlc_autoload_start)(struct psp_context *psp);
+ int (*mem_training_init)(struct psp_context *psp);
+ void (*mem_training_fini)(struct psp_context *psp);
+ int (*mem_training)(struct psp_context *psp, uint32_t ops);
};
#define AMDGPU_XGMI_MAX_CONNECTED_NODES 64
@@ -142,6 +150,65 @@ struct psp_ras_context {
struct amdgpu_ras *ras;
};
+struct psp_hdcp_context {
+ bool hdcp_initialized;
+ uint32_t session_id;
+ struct amdgpu_bo *hdcp_shared_bo;
+ uint64_t hdcp_shared_mc_addr;
+ void *hdcp_shared_buf;
+};
+
+struct psp_dtm_context {
+ bool dtm_initialized;
+ uint32_t session_id;
+ struct amdgpu_bo *dtm_shared_bo;
+ uint64_t dtm_shared_mc_addr;
+ void *dtm_shared_buf;
+};
+
+#define MEM_TRAIN_SYSTEM_SIGNATURE 0x54534942
+#define GDDR6_MEM_TRAINING_DATA_SIZE_IN_BYTES 0x1000
+#define GDDR6_MEM_TRAINING_OFFSET 0x8000
+
+enum psp_memory_training_init_flag {
+ PSP_MEM_TRAIN_NOT_SUPPORT = 0x0,
+ PSP_MEM_TRAIN_SUPPORT = 0x1,
+ PSP_MEM_TRAIN_INIT_FAILED = 0x2,
+ PSP_MEM_TRAIN_RESERVE_SUCCESS = 0x4,
+ PSP_MEM_TRAIN_INIT_SUCCESS = 0x8,
+};
+
+enum psp_memory_training_ops {
+ PSP_MEM_TRAIN_SEND_LONG_MSG = 0x1,
+ PSP_MEM_TRAIN_SAVE = 0x2,
+ PSP_MEM_TRAIN_RESTORE = 0x4,
+ PSP_MEM_TRAIN_SEND_SHORT_MSG = 0x8,
+ PSP_MEM_TRAIN_COLD_BOOT = PSP_MEM_TRAIN_SEND_LONG_MSG,
+ PSP_MEM_TRAIN_RESUME = PSP_MEM_TRAIN_SEND_SHORT_MSG,
+};
+
+struct psp_memory_training_context {
+ /*training data size*/
+ u64 train_data_size;
+ /*
+ * sys_cache
+ * cpu virtual address
+ * system memory buffer that used to store the training data.
+ */
+ void *sys_cache;
+
+ /*vram offset of the p2c training data*/
+ u64 p2c_train_data_offset;
+ struct amdgpu_bo *p2c_bo;
+
+ /*vram offset of the c2p training data*/
+ u64 c2p_train_data_offset;
+ struct amdgpu_bo *c2p_bo;
+
+ enum psp_memory_training_init_flag init;
+ u32 training_cnt;
+};
+
struct psp_context
{
struct amdgpu_device *adev;
@@ -206,9 +273,21 @@ struct psp_context
uint32_t ta_ras_ucode_version;
uint32_t ta_ras_ucode_size;
uint8_t *ta_ras_start_addr;
+
+ uint32_t ta_hdcp_ucode_version;
+ uint32_t ta_hdcp_ucode_size;
+ uint8_t *ta_hdcp_start_addr;
+
+ uint32_t ta_dtm_ucode_version;
+ uint32_t ta_dtm_ucode_size;
+ uint8_t *ta_dtm_start_addr;
+
struct psp_xgmi_context xgmi_context;
struct psp_ras_context ras;
+ struct psp_hdcp_context hdcp_context;
+ struct psp_dtm_context dtm_context;
struct mutex mutex;
+ struct psp_memory_training_context mem_train_ctx;
};
struct amdgpu_psp_funcs {
@@ -251,6 +330,12 @@ struct amdgpu_psp_funcs {
(psp)->funcs->xgmi_set_topology_info((psp), (num_device), (topology)) : -EINVAL)
#define psp_rlc_autoload(psp) \
((psp)->funcs->rlc_autoload_start ? (psp)->funcs->rlc_autoload_start((psp)) : 0)
+#define psp_mem_training_init(psp) \
+ ((psp)->funcs->mem_training_init ? (psp)->funcs->mem_training_init((psp)) : 0)
+#define psp_mem_training_fini(psp) \
+ ((psp)->funcs->mem_training_fini ? (psp)->funcs->mem_training_fini((psp)) : 0)
+#define psp_mem_training(psp, ops) \
+ ((psp)->funcs->mem_training ? (psp)->funcs->mem_training((psp), (ops)) : 0)
#define amdgpu_psp_check_fw_loading_status(adev, i) (adev)->firmware.funcs->check_fw_loading_status((adev), (i))
@@ -279,6 +364,8 @@ int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
int psp_ras_enable_features(struct psp_context *psp,
union ta_ras_cmd_input *info, bool enable);
+int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
+int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
int psp_rlc_autoload_start(struct psp_context *psp);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index 016ea274b955..404483437bd3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -25,10 +25,13 @@
#include <linux/list.h>
#include <linux/module.h>
#include <linux/uaccess.h>
+#include <linux/reboot.h>
+#include <linux/syscalls.h>
#include "amdgpu.h"
#include "amdgpu_ras.h"
#include "amdgpu_atomfirmware.h"
+#include "ivsrcid/nbio/irqsrcs_nbif_7_4.h"
const char *ras_error_string[] = {
"none",
@@ -65,11 +68,16 @@ const char *ras_block_string[] = {
/* inject address is 52 bits */
#define RAS_UMC_INJECT_ADDR_LIMIT (0x1ULL << 52)
-static int amdgpu_ras_reserve_vram(struct amdgpu_device *adev,
- uint64_t offset, uint64_t size,
- struct amdgpu_bo **bo_ptr);
-static int amdgpu_ras_release_vram(struct amdgpu_device *adev,
- struct amdgpu_bo **bo_ptr);
+enum amdgpu_ras_retire_page_reservation {
+ AMDGPU_RAS_RETIRE_PAGE_RESERVED,
+ AMDGPU_RAS_RETIRE_PAGE_PENDING,
+ AMDGPU_RAS_RETIRE_PAGE_FAULT,
+};
+
+atomic_t amdgpu_ras_in_intr = ATOMIC_INIT(0);
+
+static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
+ uint64_t addr);
static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf,
size_t size, loff_t *pos)
@@ -189,6 +197,10 @@ static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
return 0;
}
+
+static struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev,
+ struct ras_common_if *head);
+
/**
* DOC: AMDGPU RAS debugfs control interface
*
@@ -208,31 +220,44 @@ static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
* As their names indicate, inject operation will write the
* value to the address.
*
- * Second member: struct ras_debug_if::op.
+ * The second member: struct ras_debug_if::op.
* It has three kinds of operations.
- * 0: disable RAS on the block. Take ::head as its data.
- * 1: enable RAS on the block. Take ::head as its data.
- * 2: inject errors on the block. Take ::inject as its data.
+ *
+ * - 0: disable RAS on the block. Take ::head as its data.
+ * - 1: enable RAS on the block. Take ::head as its data.
+ * - 2: inject errors on the block. Take ::inject as its data.
*
* How to use the interface?
- * programs:
- * copy the struct ras_debug_if in your codes and initialize it.
- * write the struct to the control node.
*
- * bash:
- * echo op block [error [sub_blcok address value]] > .../ras/ras_ctrl
- * op: disable, enable, inject
- * disable: only block is needed
- * enable: block and error are needed
- * inject: error, address, value are needed
- * block: umc, smda, gfx, .........
- * see ras_block_string[] for details
- * error: ue, ce
- * ue: multi_uncorrectable
- * ce: single_correctable
- * sub_block: sub block index, pass 0 if there is no sub block
+ * Programs
+ *
+ * Copy the struct ras_debug_if in your codes and initialize it.
+ * Write the struct to the control node.
+ *
+ * Shells
+ *
+ * .. code-block:: bash
+ *
+ * echo op block [error [sub_block address value]] > .../ras/ras_ctrl
+ *
+ * Parameters:
+ *
+ * op: disable, enable, inject
+ * disable: only block is needed
+ * enable: block and error are needed
+ * inject: error, address, value are needed
+ * block: umc, sdma, gfx, .........
+ * see ras_block_string[] for details
+ * error: ue, ce
+ * ue: multi_uncorrectable
+ * ce: single_correctable
+ * sub_block:
+ * sub block index, pass 0 if there is no sub block
+ *
+ * here are some examples for bash commands:
+ *
+ * .. code-block:: bash
*
- * here are some examples for bash commands,
* echo inject umc ue 0x0 0x0 0x0 > /sys/kernel/debug/dri/0/ras/ras_ctrl
* echo inject umc ce 0 0 0 > /sys/kernel/debug/dri/0/ras/ras_ctrl
* echo disable umc > /sys/kernel/debug/dri/0/ras/ras_ctrl
@@ -245,8 +270,11 @@ static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
* For inject, please check corresponding err count at
* /sys/class/drm/card[0/1/2...]/device/ras/[gfx/sdma/...]_err_count
*
- * NOTE: operation is only allowed on blocks which are supported.
- * Please check ras mask at /sys/module/amdgpu/parameters/ras_mask
+ * .. note::
+ * Operations are only allowed on blocks which are supported.
+ * Please check ras mask at /sys/module/amdgpu/parameters/ras_mask
+ * to see which blocks support RAS on a particular asic.
+ *
*/
static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, const char __user *buf,
size_t size, loff_t *pos)
@@ -276,6 +304,14 @@ static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, const char __user *
break;
}
+ /* umc ce/ue error injection for a bad page is not allowed */
+ if ((data.head.block == AMDGPU_RAS_BLOCK__UMC) &&
+ amdgpu_ras_check_bad_page(adev, data.inject.address)) {
+ DRM_WARN("RAS WARN: 0x%llx has been marked as bad before error injection!\n",
+ data.inject.address);
+ break;
+ }
+
/* data.inject.address is offset instead of absolute gpu address */
ret = amdgpu_ras_error_inject(adev, &data.inject);
break;
@@ -290,6 +326,33 @@ static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, const char __user *
return size;
}
+/**
+ * DOC: AMDGPU RAS debugfs EEPROM table reset interface
+ *
+ * Some boards contain an EEPROM which is used to persistently store a list of
+ * bad pages which experiences ECC errors in vram. This interface provides
+ * a way to reset the EEPROM, e.g., after testing error injection.
+ *
+ * Usage:
+ *
+ * .. code-block:: bash
+ *
+ * echo 1 > ../ras/ras_eeprom_reset
+ *
+ * will reset EEPROM table to 0 entries.
+ *
+ */
+static ssize_t amdgpu_ras_debugfs_eeprom_write(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
+ int ret;
+
+ ret = amdgpu_ras_eeprom_reset_table(&adev->psp.ras.ras->eeprom_control);
+
+ return ret == 1 ? size : -EIO;
+}
+
static const struct file_operations amdgpu_ras_debugfs_ctrl_ops = {
.owner = THIS_MODULE,
.read = NULL,
@@ -297,6 +360,34 @@ static const struct file_operations amdgpu_ras_debugfs_ctrl_ops = {
.llseek = default_llseek
};
+static const struct file_operations amdgpu_ras_debugfs_eeprom_ops = {
+ .owner = THIS_MODULE,
+ .read = NULL,
+ .write = amdgpu_ras_debugfs_eeprom_write,
+ .llseek = default_llseek
+};
+
+/**
+ * DOC: AMDGPU RAS sysfs Error Count Interface
+ *
+ * It allows the user to read the error count for each IP block on the gpu through
+ * /sys/class/drm/card[0/1/2...]/device/ras/[gfx/sdma/...]_err_count
+ *
+ * It outputs the multiple lines which report the uncorrected (ue) and corrected
+ * (ce) error counts.
+ *
+ * The format of one line is below,
+ *
+ * [ce|ue]: count
+ *
+ * Example:
+ *
+ * .. code-block:: bash
+ *
+ * ue: 0
+ * ce: 1
+ *
+ */
static ssize_t amdgpu_ras_sysfs_read(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -475,15 +566,17 @@ int amdgpu_ras_feature_enable(struct amdgpu_device *adev,
if (!(!!enable ^ !!amdgpu_ras_is_feature_enabled(adev, head)))
return 0;
- ret = psp_ras_enable_features(&adev->psp, &info, enable);
- if (ret) {
- DRM_ERROR("RAS ERROR: %s %s feature failed ret %d\n",
- enable ? "enable":"disable",
- ras_block_str(head->block),
- ret);
- if (ret == TA_RAS_STATUS__RESET_NEEDED)
- return -EAGAIN;
- return -EINVAL;
+ if (!amdgpu_ras_intr_triggered()) {
+ ret = psp_ras_enable_features(&adev->psp, &info, enable);
+ if (ret) {
+ DRM_ERROR("RAS ERROR: %s %s feature failed ret %d\n",
+ enable ? "enable":"disable",
+ ras_block_str(head->block),
+ ret);
+ if (ret == TA_RAS_STATUS__RESET_NEEDED)
+ return -EAGAIN;
+ return -EINVAL;
+ }
}
/* setup the obj */
@@ -615,8 +708,12 @@ int amdgpu_ras_error_query(struct amdgpu_device *adev,
adev->gfx.funcs->query_ras_error_count(adev, &err_data);
break;
case AMDGPU_RAS_BLOCK__MMHUB:
- if (adev->mmhub_funcs->query_ras_error_count)
- adev->mmhub_funcs->query_ras_error_count(adev, &err_data);
+ if (adev->mmhub.funcs->query_ras_error_count)
+ adev->mmhub.funcs->query_ras_error_count(adev, &err_data);
+ break;
+ case AMDGPU_RAS_BLOCK__PCIE_BIF:
+ if (adev->nbio.funcs->query_ras_error_count)
+ adev->nbio.funcs->query_ras_error_count(adev, &err_data);
break;
default:
break;
@@ -628,12 +725,14 @@ int amdgpu_ras_error_query(struct amdgpu_device *adev,
info->ue_count = obj->err_data.ue_count;
info->ce_count = obj->err_data.ce_count;
- if (err_data.ce_count)
+ if (err_data.ce_count) {
dev_info(adev->dev, "%ld correctable errors detected in %s block\n",
obj->err_data.ce_count, ras_block_str(info->head.block));
- if (err_data.ue_count)
+ }
+ if (err_data.ue_count) {
dev_info(adev->dev, "%ld uncorrectable errors detected in %s block\n",
obj->err_data.ue_count, ras_block_str(info->head.block));
+ }
return 0;
}
@@ -664,6 +763,8 @@ int amdgpu_ras_error_inject(struct amdgpu_device *adev,
break;
case AMDGPU_RAS_BLOCK__UMC:
case AMDGPU_RAS_BLOCK__MMHUB:
+ case AMDGPU_RAS_BLOCK__XGMI_WAFL:
+ case AMDGPU_RAS_BLOCK__PCIE_BIF:
ret = psp_ras_trigger_error(&adev->psp, &block_info);
break;
default:
@@ -723,18 +824,18 @@ static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
static char *amdgpu_ras_badpage_flags_str(unsigned int flags)
{
switch (flags) {
- case 0:
+ case AMDGPU_RAS_RETIRE_PAGE_RESERVED:
return "R";
- case 1:
+ case AMDGPU_RAS_RETIRE_PAGE_PENDING:
return "P";
- case 2:
+ case AMDGPU_RAS_RETIRE_PAGE_FAULT:
default:
return "F";
};
}
-/*
- * DOC: ras sysfs gpu_vram_bad_pages interface
+/**
+ * DOC: AMDGPU RAS sysfs gpu_vram_bad_pages Interface
*
* It allows user to read the bad pages of vram on the gpu through
* /sys/class/drm/card[0/1/2...]/device/ras/gpu_vram_bad_pages
@@ -746,14 +847,21 @@ static char *amdgpu_ras_badpage_flags_str(unsigned int flags)
*
* gpu pfn and gpu page size are printed in hex format.
* flags can be one of below character,
+ *
* R: reserved, this gpu page is reserved and not able to use.
+ *
* P: pending for reserve, this gpu page is marked as bad, will be reserved
- * in next window of page_reserve.
+ * in next window of page_reserve.
+ *
* F: unable to reserve. this gpu page can't be reserved due to some reasons.
*
- * examples:
- * 0x00000001 : 0x00001000 : R
- * 0x00000002 : 0x00001000 : P
+ * Examples:
+ *
+ * .. code-block:: bash
+ *
+ * 0x00000001 : 0x00001000 : R
+ * 0x00000002 : 0x00001000 : P
+ *
*/
static ssize_t amdgpu_ras_sysfs_badpages_read(struct file *f,
@@ -927,6 +1035,24 @@ static int amdgpu_ras_sysfs_remove_all(struct amdgpu_device *adev)
}
/* sysfs end */
+/**
+ * DOC: AMDGPU RAS Reboot Behavior for Unrecoverable Errors
+ *
+ * Normally when there is an uncorrectable error, the driver will reset
+ * the GPU to recover. However, in the event of an unrecoverable error,
+ * the driver provides an interface to reboot the system automatically
+ * in that event.
+ *
+ * The following file in debugfs provides that interface:
+ * /sys/kernel/debug/dri/[0/1/2...]/ras/auto_reboot
+ *
+ * Usage:
+ *
+ * .. code-block:: bash
+ *
+ * echo true > .../ras/auto_reboot
+ *
+ */
/* debugfs begin */
static void amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device *adev)
{
@@ -934,8 +1060,21 @@ static void amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device *adev)
struct drm_minor *minor = adev->ddev->primary;
con->dir = debugfs_create_dir("ras", minor->debugfs_root);
- con->ent = debugfs_create_file("ras_ctrl", S_IWUGO | S_IRUGO, con->dir,
- adev, &amdgpu_ras_debugfs_ctrl_ops);
+ debugfs_create_file("ras_ctrl", S_IWUGO | S_IRUGO, con->dir,
+ adev, &amdgpu_ras_debugfs_ctrl_ops);
+ debugfs_create_file("ras_eeprom_reset", S_IWUGO | S_IRUGO, con->dir,
+ adev, &amdgpu_ras_debugfs_eeprom_ops);
+
+ /*
+ * After one uncorrectable error happens, usually GPU recovery will
+ * be scheduled. But due to the known problem in GPU recovery failing
+ * to bring GPU back, below interface provides one direct way to
+ * user to reboot system automatically in such case within
+ * ERREVENT_ATHUB_INTERRUPT generated. Normal GPU recovery routine
+ * will never be called.
+ */
+ debugfs_create_bool("auto_reboot", S_IWUGO | S_IRUGO, con->dir,
+ &con->reboot);
}
void amdgpu_ras_debugfs_create(struct amdgpu_device *adev,
@@ -980,10 +1119,8 @@ static void amdgpu_ras_debugfs_remove_all(struct amdgpu_device *adev)
amdgpu_ras_debugfs_remove(adev, &obj->head);
}
- debugfs_remove(con->ent);
- debugfs_remove(con->dir);
+ debugfs_remove_recursive(con->dir);
con->dir = NULL;
- con->ent = NULL;
}
/* debugfs end */
@@ -1188,15 +1325,15 @@ static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
for (; i < data->count; i++) {
(*bps)[i] = (struct ras_badpage){
- .bp = data->bps[i].bp,
+ .bp = data->bps[i].retired_page,
.size = AMDGPU_GPU_PAGE_SIZE,
- .flags = 0,
+ .flags = AMDGPU_RAS_RETIRE_PAGE_RESERVED,
};
if (data->last_reserved <= i)
- (*bps)[i].flags = 1;
- else if (data->bps[i].bo == NULL)
- (*bps)[i].flags = 2;
+ (*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_PENDING;
+ else if (data->bps_bo[i] == NULL)
+ (*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_FAULT;
}
*count = data->count;
@@ -1214,105 +1351,46 @@ static void amdgpu_ras_do_recovery(struct work_struct *work)
atomic_set(&ras->in_recovery, 0);
}
-static int amdgpu_ras_release_vram(struct amdgpu_device *adev,
- struct amdgpu_bo **bo_ptr)
-{
- /* no need to free it actually. */
- amdgpu_bo_free_kernel(bo_ptr, NULL, NULL);
- return 0;
-}
-
-/* reserve vram with size@offset */
-static int amdgpu_ras_reserve_vram(struct amdgpu_device *adev,
- uint64_t offset, uint64_t size,
- struct amdgpu_bo **bo_ptr)
-{
- struct ttm_operation_ctx ctx = { false, false };
- struct amdgpu_bo_param bp;
- int r = 0;
- int i;
- struct amdgpu_bo *bo;
-
- if (bo_ptr)
- *bo_ptr = NULL;
- memset(&bp, 0, sizeof(bp));
- bp.size = size;
- bp.byte_align = PAGE_SIZE;
- bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
- bp.flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
- AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
- bp.type = ttm_bo_type_kernel;
- bp.resv = NULL;
-
- r = amdgpu_bo_create(adev, &bp, &bo);
- if (r)
- return -EINVAL;
-
- r = amdgpu_bo_reserve(bo, false);
- if (r)
- goto error_reserve;
-
- offset = ALIGN(offset, PAGE_SIZE);
- for (i = 0; i < bo->placement.num_placement; ++i) {
- bo->placements[i].fpfn = offset >> PAGE_SHIFT;
- bo->placements[i].lpfn = (offset + size) >> PAGE_SHIFT;
- }
-
- ttm_bo_mem_put(&bo->tbo, &bo->tbo.mem);
- r = ttm_bo_mem_space(&bo->tbo, &bo->placement, &bo->tbo.mem, &ctx);
- if (r)
- goto error_pin;
-
- r = amdgpu_bo_pin_restricted(bo,
- AMDGPU_GEM_DOMAIN_VRAM,
- offset,
- offset + size);
- if (r)
- goto error_pin;
-
- if (bo_ptr)
- *bo_ptr = bo;
-
- amdgpu_bo_unreserve(bo);
- return r;
-
-error_pin:
- amdgpu_bo_unreserve(bo);
-error_reserve:
- amdgpu_bo_unref(&bo);
- return r;
-}
-
/* alloc/realloc bps array */
static int amdgpu_ras_realloc_eh_data_space(struct amdgpu_device *adev,
struct ras_err_handler_data *data, int pages)
{
unsigned int old_space = data->count + data->space_left;
unsigned int new_space = old_space + pages;
- unsigned int align_space = ALIGN(new_space, 1024);
- void *tmp = kmalloc(align_space * sizeof(*data->bps), GFP_KERNEL);
-
- if (!tmp)
+ unsigned int align_space = ALIGN(new_space, 512);
+ void *bps = kmalloc(align_space * sizeof(*data->bps), GFP_KERNEL);
+ struct amdgpu_bo **bps_bo =
+ kmalloc(align_space * sizeof(*data->bps_bo), GFP_KERNEL);
+
+ if (!bps || !bps_bo) {
+ kfree(bps);
+ kfree(bps_bo);
return -ENOMEM;
+ }
if (data->bps) {
- memcpy(tmp, data->bps,
+ memcpy(bps, data->bps,
data->count * sizeof(*data->bps));
kfree(data->bps);
}
+ if (data->bps_bo) {
+ memcpy(bps_bo, data->bps_bo,
+ data->count * sizeof(*data->bps_bo));
+ kfree(data->bps_bo);
+ }
- data->bps = tmp;
+ data->bps = bps;
+ data->bps_bo = bps_bo;
data->space_left += align_space - old_space;
return 0;
}
/* it deal with vram only. */
int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
- unsigned long *bps, int pages)
+ struct eeprom_table_record *bps, int pages)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
struct ras_err_handler_data *data;
- int i = pages;
int ret = 0;
if (!con || !con->eh_data || !bps || pages <= 0)
@@ -1329,24 +1407,120 @@ int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
goto out;
}
- while (i--)
- data->bps[data->count++].bp = bps[i];
-
+ memcpy(&data->bps[data->count], bps, pages * sizeof(*data->bps));
+ data->count += pages;
data->space_left -= pages;
+
out:
mutex_unlock(&con->recovery_lock);
return ret;
}
+/*
+ * write error record array to eeprom, the function should be
+ * protected by recovery_lock
+ */
+static int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_err_handler_data *data;
+ struct amdgpu_ras_eeprom_control *control;
+ int save_count;
+
+ if (!con || !con->eh_data)
+ return 0;
+
+ control = &con->eeprom_control;
+ data = con->eh_data;
+ save_count = data->count - control->num_recs;
+ /* only new entries are saved */
+ if (save_count > 0)
+ if (amdgpu_ras_eeprom_process_recods(control,
+ &data->bps[control->num_recs],
+ true,
+ save_count)) {
+ DRM_ERROR("Failed to save EEPROM table data!");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/*
+ * read error record array in eeprom and reserve enough space for
+ * storing new bad pages
+ */
+static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras_eeprom_control *control =
+ &adev->psp.ras.ras->eeprom_control;
+ struct eeprom_table_record *bps = NULL;
+ int ret = 0;
+
+ /* no bad page record, skip eeprom access */
+ if (!control->num_recs)
+ return ret;
+
+ bps = kcalloc(control->num_recs, sizeof(*bps), GFP_KERNEL);
+ if (!bps)
+ return -ENOMEM;
+
+ if (amdgpu_ras_eeprom_process_recods(control, bps, false,
+ control->num_recs)) {
+ DRM_ERROR("Failed to load EEPROM table records!");
+ ret = -EIO;
+ goto out;
+ }
+
+ ret = amdgpu_ras_add_bad_pages(adev, bps, control->num_recs);
+
+out:
+ kfree(bps);
+ return ret;
+}
+
+/*
+ * check if an address belongs to bad page
+ *
+ * Note: this check is only for umc block
+ */
+static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
+ uint64_t addr)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_err_handler_data *data;
+ int i;
+ bool ret = false;
+
+ if (!con || !con->eh_data)
+ return ret;
+
+ mutex_lock(&con->recovery_lock);
+ data = con->eh_data;
+ if (!data)
+ goto out;
+
+ addr >>= AMDGPU_GPU_PAGE_SHIFT;
+ for (i = 0; i < data->count; i++)
+ if (addr == data->bps[i].retired_page) {
+ ret = true;
+ goto out;
+ }
+
+out:
+ mutex_unlock(&con->recovery_lock);
+ return ret;
+}
+
/* called in gpu recovery/init */
int amdgpu_ras_reserve_bad_pages(struct amdgpu_device *adev)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
struct ras_err_handler_data *data;
uint64_t bp;
- struct amdgpu_bo *bo;
- int i;
+ struct amdgpu_bo *bo = NULL;
+ int i, ret = 0;
if (!con || !con->eh_data)
return 0;
@@ -1357,18 +1531,29 @@ int amdgpu_ras_reserve_bad_pages(struct amdgpu_device *adev)
goto out;
/* reserve vram at driver post stage. */
for (i = data->last_reserved; i < data->count; i++) {
- bp = data->bps[i].bp;
+ bp = data->bps[i].retired_page;
- if (amdgpu_ras_reserve_vram(adev, bp << PAGE_SHIFT,
- PAGE_SIZE, &bo))
- DRM_ERROR("RAS ERROR: reserve vram %llx fail\n", bp);
+ /* There are two cases of reserve error should be ignored:
+ * 1) a ras bad page has been allocated (used by someone);
+ * 2) a ras bad page has been reserved (duplicate error injection
+ * for one page);
+ */
+ if (amdgpu_bo_create_kernel_at(adev, bp << AMDGPU_GPU_PAGE_SHIFT,
+ AMDGPU_GPU_PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &bo, NULL))
+ DRM_WARN("RAS WARN: reserve vram for retired page %llx fail\n", bp);
- data->bps[i].bo = bo;
+ data->bps_bo[i] = bo;
data->last_reserved = i + 1;
+ bo = NULL;
}
+
+ /* continue to save bad pages to eeprom even reesrve_vram fails */
+ ret = amdgpu_ras_save_bad_pages(adev);
out:
mutex_unlock(&con->recovery_lock);
- return 0;
+ return ret;
}
/* called when driver unload */
@@ -1388,11 +1573,11 @@ static int amdgpu_ras_release_bad_pages(struct amdgpu_device *adev)
goto out;
for (i = data->last_reserved - 1; i >= 0; i--) {
- bo = data->bps[i].bo;
+ bo = data->bps_bo[i];
- amdgpu_ras_release_vram(adev, &bo);
+ amdgpu_bo_free_kernel(&bo, NULL, NULL);
- data->bps[i].bo = bo;
+ data->bps_bo[i] = bo;
data->last_reserved = i;
}
out:
@@ -1400,41 +1585,54 @@ out:
return 0;
}
-static int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev)
-{
- /* TODO
- * write the array to eeprom when SMU disabled.
- */
- return 0;
-}
-
-static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev)
-{
- /* TODO
- * read the array to eeprom when SMU disabled.
- */
- return 0;
-}
-
-static int amdgpu_ras_recovery_init(struct amdgpu_device *adev)
+int amdgpu_ras_recovery_init(struct amdgpu_device *adev)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
- struct ras_err_handler_data **data = &con->eh_data;
+ struct ras_err_handler_data **data;
+ int ret;
- *data = kmalloc(sizeof(**data),
- GFP_KERNEL|__GFP_ZERO);
- if (!*data)
- return -ENOMEM;
+ if (con)
+ data = &con->eh_data;
+ else
+ return 0;
+
+ *data = kmalloc(sizeof(**data), GFP_KERNEL | __GFP_ZERO);
+ if (!*data) {
+ ret = -ENOMEM;
+ goto out;
+ }
mutex_init(&con->recovery_lock);
INIT_WORK(&con->recovery_work, amdgpu_ras_do_recovery);
atomic_set(&con->in_recovery, 0);
con->adev = adev;
- amdgpu_ras_load_bad_pages(adev);
- amdgpu_ras_reserve_bad_pages(adev);
+ ret = amdgpu_ras_eeprom_init(&con->eeprom_control);
+ if (ret)
+ goto free;
+
+ if (con->eeprom_control.num_recs) {
+ ret = amdgpu_ras_load_bad_pages(adev);
+ if (ret)
+ goto free;
+ ret = amdgpu_ras_reserve_bad_pages(adev);
+ if (ret)
+ goto release;
+ }
return 0;
+
+release:
+ amdgpu_ras_release_bad_pages(adev);
+free:
+ kfree((*data)->bps);
+ kfree((*data)->bps_bo);
+ kfree(*data);
+ con->eh_data = NULL;
+out:
+ DRM_WARN("Failed to initialize ras recovery!\n");
+
+ return ret;
}
static int amdgpu_ras_recovery_fini(struct amdgpu_device *adev)
@@ -1442,13 +1640,17 @@ static int amdgpu_ras_recovery_fini(struct amdgpu_device *adev)
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
struct ras_err_handler_data *data = con->eh_data;
+ /* recovery_init failed to init it, fini is useless */
+ if (!data)
+ return 0;
+
cancel_work_sync(&con->recovery_work);
- amdgpu_ras_save_bad_pages(adev);
amdgpu_ras_release_bad_pages(adev);
mutex_lock(&con->recovery_lock);
con->eh_data = NULL;
kfree(data->bps);
+ kfree(data->bps_bo);
kfree(data);
mutex_unlock(&con->recovery_lock);
@@ -1500,6 +1702,7 @@ static void amdgpu_ras_check_supported(struct amdgpu_device *adev,
int amdgpu_ras_init(struct amdgpu_device *adev)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ int r;
if (con)
return 0;
@@ -1527,31 +1730,106 @@ int amdgpu_ras_init(struct amdgpu_device *adev)
/* Might need get this flag from vbios. */
con->flags = RAS_DEFAULT_FLAGS;
- if (amdgpu_ras_recovery_init(adev))
- goto recovery_out;
+ if (adev->nbio.funcs->init_ras_controller_interrupt) {
+ r = adev->nbio.funcs->init_ras_controller_interrupt(adev);
+ if (r)
+ return r;
+ }
+
+ if (adev->nbio.funcs->init_ras_err_event_athub_interrupt) {
+ r = adev->nbio.funcs->init_ras_err_event_athub_interrupt(adev);
+ if (r)
+ return r;
+ }
amdgpu_ras_mask &= AMDGPU_RAS_BLOCK_MASK;
if (amdgpu_ras_fs_init(adev))
goto fs_out;
- /* ras init for each ras block */
- if (adev->umc.funcs->ras_init)
- adev->umc.funcs->ras_init(adev);
-
DRM_INFO("RAS INFO: ras initialized successfully, "
"hardware ability[%x] ras_mask[%x]\n",
con->hw_supported, con->supported);
return 0;
fs_out:
- amdgpu_ras_recovery_fini(adev);
-recovery_out:
amdgpu_ras_set_context(adev, NULL);
kfree(con);
return -EINVAL;
}
+/* helper function to handle common stuff in ip late init phase */
+int amdgpu_ras_late_init(struct amdgpu_device *adev,
+ struct ras_common_if *ras_block,
+ struct ras_fs_if *fs_info,
+ struct ras_ih_if *ih_info)
+{
+ int r;
+
+ /* disable RAS feature per IP block if it is not supported */
+ if (!amdgpu_ras_is_supported(adev, ras_block->block)) {
+ amdgpu_ras_feature_enable_on_boot(adev, ras_block, 0);
+ return 0;
+ }
+
+ r = amdgpu_ras_feature_enable_on_boot(adev, ras_block, 1);
+ if (r) {
+ if (r == -EAGAIN) {
+ /* request gpu reset. will run again */
+ amdgpu_ras_request_reset_on_boot(adev,
+ ras_block->block);
+ return 0;
+ } else if (adev->in_suspend || adev->in_gpu_reset) {
+ /* in resume phase, if fail to enable ras,
+ * clean up all ras fs nodes, and disable ras */
+ goto cleanup;
+ } else
+ return r;
+ }
+
+ /* in resume phase, no need to create ras fs node */
+ if (adev->in_suspend || adev->in_gpu_reset)
+ return 0;
+
+ if (ih_info->cb) {
+ r = amdgpu_ras_interrupt_add_handler(adev, ih_info);
+ if (r)
+ goto interrupt;
+ }
+
+ amdgpu_ras_debugfs_create(adev, fs_info);
+
+ r = amdgpu_ras_sysfs_create(adev, fs_info);
+ if (r)
+ goto sysfs;
+
+ return 0;
+cleanup:
+ amdgpu_ras_sysfs_remove(adev, ras_block);
+sysfs:
+ amdgpu_ras_debugfs_remove(adev, ras_block);
+ if (ih_info->cb)
+ amdgpu_ras_interrupt_remove_handler(adev, ih_info);
+interrupt:
+ amdgpu_ras_feature_enable(adev, ras_block, 0);
+ return r;
+}
+
+/* helper function to remove ras fs node and interrupt handler */
+void amdgpu_ras_late_fini(struct amdgpu_device *adev,
+ struct ras_common_if *ras_block,
+ struct ras_ih_if *ih_info)
+{
+ if (!ras_block || !ih_info)
+ return;
+
+ amdgpu_ras_sysfs_remove(adev, ras_block);
+ amdgpu_ras_debugfs_remove(adev, ras_block);
+ if (ih_info->cb)
+ amdgpu_ras_interrupt_remove_handler(adev, ih_info);
+ amdgpu_ras_feature_enable(adev, ras_block, 0);
+}
+
/* do some init work after IP late init as dependence.
* and it runs in resume/gpu reset/booting up cases.
*/
@@ -1645,3 +1923,18 @@ int amdgpu_ras_fini(struct amdgpu_device *adev)
return 0;
}
+
+void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev)
+{
+ uint32_t hw_supported, supported;
+
+ amdgpu_ras_check_supported(adev, &hw_supported, &supported);
+ if (!hw_supported)
+ return;
+
+ if (atomic_cmpxchg(&amdgpu_ras_in_intr, 0, 1) == 0) {
+ DRM_WARN("RAS event of type ERREVENT_ATHUB_INTERRUPT detected!\n");
+
+ amdgpu_ras_reset_gpu(adev, false);
+ }
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
index 6c76bb2a6843..f80fd3428c98 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
@@ -317,8 +317,6 @@ struct amdgpu_ras {
struct list_head head;
/* debugfs */
struct dentry *dir;
- /* debugfs ctrl */
- struct dentry *ent;
/* sysfs */
struct device_attribute features_attr;
struct bin_attribute badpages_attr;
@@ -334,7 +332,7 @@ struct amdgpu_ras {
struct mutex recovery_lock;
uint32_t flags;
-
+ bool reboot;
struct amdgpu_ras_eeprom_control eeprom_control;
};
@@ -347,15 +345,14 @@ struct ras_err_data {
unsigned long ue_count;
unsigned long ce_count;
unsigned long err_addr_cnt;
- uint64_t *err_addr;
+ struct eeprom_table_record *err_addr;
};
struct ras_err_handler_data {
- /* point to bad pages array */
- struct {
- unsigned long bp;
- struct amdgpu_bo *bo;
- } *bps;
+ /* point to bad page records array */
+ struct eeprom_table_record *bps;
+ /* point to reserved bo array */
+ struct amdgpu_bo **bps_bo;
/* the count of entries */
int count;
/* the space can place new entries */
@@ -365,7 +362,7 @@ struct ras_err_handler_data {
};
typedef int (*ras_ih_cb)(struct amdgpu_device *adev,
- struct ras_err_data *err_data,
+ void *err_data,
struct amdgpu_iv_entry *entry);
struct ras_ih_data {
@@ -481,6 +478,7 @@ static inline int amdgpu_ras_is_supported(struct amdgpu_device *adev,
return ras && (ras->supported & (1 << block));
}
+int amdgpu_ras_recovery_init(struct amdgpu_device *adev);
int amdgpu_ras_request_reset_on_boot(struct amdgpu_device *adev,
unsigned int block);
@@ -492,7 +490,7 @@ unsigned long amdgpu_ras_query_error_count(struct amdgpu_device *adev,
/* error handling functions */
int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
- unsigned long *bps, int pages);
+ struct eeprom_table_record *bps, int pages);
int amdgpu_ras_reserve_bad_pages(struct amdgpu_device *adev);
@@ -501,6 +499,12 @@ static inline int amdgpu_ras_reset_gpu(struct amdgpu_device *adev,
{
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+ /* save bad page to eeprom before gpu reset,
+ * i2c may be unstable in gpu reset
+ */
+ if (in_task())
+ amdgpu_ras_reserve_bad_pages(adev);
+
if (atomic_cmpxchg(&ras->in_recovery, 0, 1) == 0)
schedule_work(&ras->recovery_work);
return 0;
@@ -566,6 +570,13 @@ amdgpu_ras_error_to_ta(enum amdgpu_ras_error_type error) {
int amdgpu_ras_init(struct amdgpu_device *adev);
int amdgpu_ras_fini(struct amdgpu_device *adev);
int amdgpu_ras_pre_fini(struct amdgpu_device *adev);
+int amdgpu_ras_late_init(struct amdgpu_device *adev,
+ struct ras_common_if *ras_block,
+ struct ras_fs_if *fs_info,
+ struct ras_ih_if *ih_info);
+void amdgpu_ras_late_fini(struct amdgpu_device *adev,
+ struct ras_common_if *ras_block,
+ struct ras_ih_if *ih_info);
int amdgpu_ras_feature_enable(struct amdgpu_device *adev,
struct ras_common_if *head, bool enable);
@@ -599,4 +610,14 @@ int amdgpu_ras_interrupt_remove_handler(struct amdgpu_device *adev,
int amdgpu_ras_interrupt_dispatch(struct amdgpu_device *adev,
struct ras_dispatch_if *info);
+
+extern atomic_t amdgpu_ras_in_intr;
+
+static inline bool amdgpu_ras_intr_triggered(void)
+{
+ return !!atomic_read(&amdgpu_ras_in_intr);
+}
+
+void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
index 8a32b5c93778..7de16c0c2f20 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
@@ -100,7 +100,101 @@ static int __update_table_header(struct amdgpu_ras_eeprom_control *control,
return ret;
}
-static uint32_t __calc_hdr_byte_sum(struct amdgpu_ras_eeprom_control *control);
+
+
+static uint32_t __calc_hdr_byte_sum(struct amdgpu_ras_eeprom_control *control)
+{
+ int i;
+ uint32_t tbl_sum = 0;
+
+ /* Header checksum, skip checksum field in the calculation */
+ for (i = 0; i < sizeof(control->tbl_hdr) - sizeof(control->tbl_hdr.checksum); i++)
+ tbl_sum += *(((unsigned char *)&control->tbl_hdr) + i);
+
+ return tbl_sum;
+}
+
+static uint32_t __calc_recs_byte_sum(struct eeprom_table_record *records,
+ int num)
+{
+ int i, j;
+ uint32_t tbl_sum = 0;
+
+ /* Records checksum */
+ for (i = 0; i < num; i++) {
+ struct eeprom_table_record *record = &records[i];
+
+ for (j = 0; j < sizeof(*record); j++) {
+ tbl_sum += *(((unsigned char *)record) + j);
+ }
+ }
+
+ return tbl_sum;
+}
+
+static inline uint32_t __calc_tbl_byte_sum(struct amdgpu_ras_eeprom_control *control,
+ struct eeprom_table_record *records, int num)
+{
+ return __calc_hdr_byte_sum(control) + __calc_recs_byte_sum(records, num);
+}
+
+/* Checksum = 256 -((sum of all table entries) mod 256) */
+static void __update_tbl_checksum(struct amdgpu_ras_eeprom_control *control,
+ struct eeprom_table_record *records, int num,
+ uint32_t old_hdr_byte_sum)
+{
+ /*
+ * This will update the table sum with new records.
+ *
+ * TODO: What happens when the EEPROM table is to be wrapped around
+ * and old records from start will get overridden.
+ */
+
+ /* need to recalculate updated header byte sum */
+ control->tbl_byte_sum -= old_hdr_byte_sum;
+ control->tbl_byte_sum += __calc_tbl_byte_sum(control, records, num);
+
+ control->tbl_hdr.checksum = 256 - (control->tbl_byte_sum % 256);
+}
+
+/* table sum mod 256 + checksum must equals 256 */
+static bool __validate_tbl_checksum(struct amdgpu_ras_eeprom_control *control,
+ struct eeprom_table_record *records, int num)
+{
+ control->tbl_byte_sum = __calc_tbl_byte_sum(control, records, num);
+
+ if (control->tbl_hdr.checksum + (control->tbl_byte_sum % 256) != 256) {
+ DRM_WARN("Checksum mismatch, checksum: %u ", control->tbl_hdr.checksum);
+ return false;
+ }
+
+ return true;
+}
+
+int amdgpu_ras_eeprom_reset_table(struct amdgpu_ras_eeprom_control *control)
+{
+ unsigned char buff[EEPROM_ADDRESS_SIZE + EEPROM_TABLE_HEADER_SIZE] = { 0 };
+ struct amdgpu_ras_eeprom_table_header *hdr = &control->tbl_hdr;
+ int ret = 0;
+
+ mutex_lock(&control->tbl_mutex);
+
+ hdr->header = EEPROM_TABLE_HDR_VAL;
+ hdr->version = EEPROM_TABLE_VER;
+ hdr->first_rec_offset = EEPROM_RECORD_START;
+ hdr->tbl_size = EEPROM_TABLE_HEADER_SIZE;
+
+ control->tbl_byte_sum = 0;
+ __update_tbl_checksum(control, NULL, 0, 0);
+ control->next_addr = EEPROM_RECORD_START;
+
+ ret = __update_table_header(control, buff);
+
+ mutex_unlock(&control->tbl_mutex);
+
+ return ret;
+
+}
int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control)
{
@@ -122,6 +216,10 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control)
ret = smu_v11_0_i2c_eeprom_control_init(&control->eeprom_accessor);
break;
+ case CHIP_ARCTURUS:
+ ret = smu_i2c_eeprom_init(&adev->smu, &control->eeprom_accessor);
+ break;
+
default:
return 0;
}
@@ -143,25 +241,18 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control)
if (hdr->header == EEPROM_TABLE_HDR_VAL) {
control->num_recs = (hdr->tbl_size - EEPROM_TABLE_HEADER_SIZE) /
EEPROM_TABLE_RECORD_SIZE;
+ control->tbl_byte_sum = __calc_hdr_byte_sum(control);
+ control->next_addr = EEPROM_RECORD_START;
+
DRM_DEBUG_DRIVER("Found existing EEPROM table with %d records",
control->num_recs);
} else {
DRM_INFO("Creating new EEPROM table");
- hdr->header = EEPROM_TABLE_HDR_VAL;
- hdr->version = EEPROM_TABLE_VER;
- hdr->first_rec_offset = EEPROM_RECORD_START;
- hdr->tbl_size = EEPROM_TABLE_HEADER_SIZE;
-
- adev->psp.ras.ras->eeprom_control.tbl_byte_sum =
- __calc_hdr_byte_sum(&adev->psp.ras.ras->eeprom_control);
- ret = __update_table_header(control, buff);
+ ret = amdgpu_ras_eeprom_reset_table(control);
}
- /* Start inserting records from here */
- adev->psp.ras.ras->eeprom_control.next_addr = EEPROM_RECORD_START;
-
return ret == 1 ? 0 : -EIO;
}
@@ -173,6 +264,9 @@ void amdgpu_ras_eeprom_fini(struct amdgpu_ras_eeprom_control *control)
case CHIP_VEGA20:
smu_v11_0_i2c_eeprom_control_fini(&control->eeprom_accessor);
break;
+ case CHIP_ARCTURUS:
+ smu_i2c_eeprom_fini(&adev->smu, &control->eeprom_accessor);
+ break;
default:
return;
@@ -226,8 +320,8 @@ static void __decode_table_record_from_buff(struct amdgpu_ras_eeprom_control *co
record->offset = (le64_to_cpu(tmp) & 0xffffffffffff);
i += 6;
- buff[i++] = record->mem_channel;
- buff[i++] = record->mcumc_id;
+ record->mem_channel = buff[i++];
+ record->mcumc_id = buff[i++];
memcpy(&tmp, buff + i, 6);
record->retired_page = (le64_to_cpu(tmp) & 0xffffffffffff);
@@ -266,87 +360,18 @@ static uint32_t __correct_eeprom_dest_address(uint32_t curr_address)
return curr_address;
}
-
-static uint32_t __calc_hdr_byte_sum(struct amdgpu_ras_eeprom_control *control)
-{
- int i;
- uint32_t tbl_sum = 0;
-
- /* Header checksum, skip checksum field in the calculation */
- for (i = 0; i < sizeof(control->tbl_hdr) - sizeof(control->tbl_hdr.checksum); i++)
- tbl_sum += *(((unsigned char *)&control->tbl_hdr) + i);
-
- return tbl_sum;
-}
-
-static uint32_t __calc_recs_byte_sum(struct eeprom_table_record *records,
- int num)
-{
- int i, j;
- uint32_t tbl_sum = 0;
-
- /* Records checksum */
- for (i = 0; i < num; i++) {
- struct eeprom_table_record *record = &records[i];
-
- for (j = 0; j < sizeof(*record); j++) {
- tbl_sum += *(((unsigned char *)record) + j);
- }
- }
-
- return tbl_sum;
-}
-
-static inline uint32_t __calc_tbl_byte_sum(struct amdgpu_ras_eeprom_control *control,
- struct eeprom_table_record *records, int num)
-{
- return __calc_hdr_byte_sum(control) + __calc_recs_byte_sum(records, num);
-}
-
-/* Checksum = 256 -((sum of all table entries) mod 256) */
-static void __update_tbl_checksum(struct amdgpu_ras_eeprom_control *control,
- struct eeprom_table_record *records, int num,
- uint32_t old_hdr_byte_sum)
-{
- /*
- * This will update the table sum with new records.
- *
- * TODO: What happens when the EEPROM table is to be wrapped around
- * and old records from start will get overridden.
- */
-
- /* need to recalculate updated header byte sum */
- control->tbl_byte_sum -= old_hdr_byte_sum;
- control->tbl_byte_sum += __calc_tbl_byte_sum(control, records, num);
-
- control->tbl_hdr.checksum = 256 - (control->tbl_byte_sum % 256);
-}
-
-/* table sum mod 256 + checksum must equals 256 */
-static bool __validate_tbl_checksum(struct amdgpu_ras_eeprom_control *control,
- struct eeprom_table_record *records, int num)
-{
- control->tbl_byte_sum = __calc_tbl_byte_sum(control, records, num);
-
- if (control->tbl_hdr.checksum + (control->tbl_byte_sum % 256) != 256) {
- DRM_WARN("Checksum mismatch, checksum: %u ", control->tbl_hdr.checksum);
- return false;
- }
-
- return true;
-}
-
int amdgpu_ras_eeprom_process_recods(struct amdgpu_ras_eeprom_control *control,
struct eeprom_table_record *records,
bool write,
int num)
{
int i, ret = 0;
- struct i2c_msg *msgs;
- unsigned char *buffs;
+ struct i2c_msg *msgs, *msg;
+ unsigned char *buffs, *buff;
+ struct eeprom_table_record *record;
struct amdgpu_device *adev = to_amdgpu_device(control);
- if (adev->asic_type != CHIP_VEGA20)
+ if (adev->asic_type != CHIP_VEGA20 && adev->asic_type != CHIP_ARCTURUS)
return 0;
buffs = kcalloc(num, EEPROM_ADDRESS_SIZE + EEPROM_TABLE_RECORD_SIZE,
@@ -373,9 +398,9 @@ int amdgpu_ras_eeprom_process_recods(struct amdgpu_ras_eeprom_control *control,
* 256b
*/
for (i = 0; i < num; i++) {
- unsigned char *buff = &buffs[i * (EEPROM_ADDRESS_SIZE + EEPROM_TABLE_RECORD_SIZE)];
- struct eeprom_table_record *record = &records[i];
- struct i2c_msg *msg = &msgs[i];
+ buff = &buffs[i * (EEPROM_ADDRESS_SIZE + EEPROM_TABLE_RECORD_SIZE)];
+ record = &records[i];
+ msg = &msgs[i];
control->next_addr = __correct_eeprom_dest_address(control->next_addr);
@@ -415,8 +440,8 @@ int amdgpu_ras_eeprom_process_recods(struct amdgpu_ras_eeprom_control *control,
if (!write) {
for (i = 0; i < num; i++) {
- unsigned char *buff = &buffs[i*(EEPROM_ADDRESS_SIZE + EEPROM_TABLE_RECORD_SIZE)];
- struct eeprom_table_record *record = &records[i];
+ buff = &buffs[i*(EEPROM_ADDRESS_SIZE + EEPROM_TABLE_RECORD_SIZE)];
+ record = &records[i];
__decode_table_record_from_buff(control, record, buff + EEPROM_ADDRESS_SIZE);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h
index 41f3fcb9a29b..622269957c1b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h
@@ -79,6 +79,7 @@ struct eeprom_table_record {
int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control);
void amdgpu_ras_eeprom_fini(struct amdgpu_ras_eeprom_control *control);
+int amdgpu_ras_eeprom_reset_table(struct amdgpu_ras_eeprom_control *control);
int amdgpu_ras_eeprom_process_recods(struct amdgpu_ras_eeprom_control *control,
struct eeprom_table_record *records,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
index 5c13c503e61f..6010999d9020 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
@@ -23,6 +23,7 @@
#include "amdgpu.h"
#include "amdgpu_sdma.h"
+#include "amdgpu_ras.h"
#define AMDGPU_CSA_SDMA_SIZE 64
/* SDMA CSA reside in the 3rd page of CSA */
@@ -83,3 +84,101 @@ uint64_t amdgpu_sdma_get_csa_mc_addr(struct amdgpu_ring *ring,
return csa_mc_addr;
}
+
+int amdgpu_sdma_ras_late_init(struct amdgpu_device *adev,
+ void *ras_ih_info)
+{
+ int r, i;
+ struct ras_ih_if *ih_info = (struct ras_ih_if *)ras_ih_info;
+ struct ras_fs_if fs_info = {
+ .sysfs_name = "sdma_err_count",
+ .debugfs_name = "sdma_err_inject",
+ };
+
+ if (!ih_info)
+ return -EINVAL;
+
+ if (!adev->sdma.ras_if) {
+ adev->sdma.ras_if = kmalloc(sizeof(struct ras_common_if), GFP_KERNEL);
+ if (!adev->sdma.ras_if)
+ return -ENOMEM;
+ adev->sdma.ras_if->block = AMDGPU_RAS_BLOCK__SDMA;
+ adev->sdma.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
+ adev->sdma.ras_if->sub_block_index = 0;
+ strcpy(adev->sdma.ras_if->name, "sdma");
+ }
+ fs_info.head = ih_info->head = *adev->sdma.ras_if;
+
+ r = amdgpu_ras_late_init(adev, adev->sdma.ras_if,
+ &fs_info, ih_info);
+ if (r)
+ goto free;
+
+ if (amdgpu_ras_is_supported(adev, adev->sdma.ras_if->block)) {
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ r = amdgpu_irq_get(adev, &adev->sdma.ecc_irq,
+ AMDGPU_SDMA_IRQ_INSTANCE0 + i);
+ if (r)
+ goto late_fini;
+ }
+ } else {
+ r = 0;
+ goto free;
+ }
+
+ return 0;
+
+late_fini:
+ amdgpu_ras_late_fini(adev, adev->sdma.ras_if, ih_info);
+free:
+ kfree(adev->sdma.ras_if);
+ adev->sdma.ras_if = NULL;
+ return r;
+}
+
+void amdgpu_sdma_ras_fini(struct amdgpu_device *adev)
+{
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA) &&
+ adev->sdma.ras_if) {
+ struct ras_common_if *ras_if = adev->sdma.ras_if;
+ struct ras_ih_if ih_info = {
+ .head = *ras_if,
+ /* the cb member will not be used by
+ * amdgpu_ras_interrupt_remove_handler, init it only
+ * to cheat the check in ras_late_fini
+ */
+ .cb = amdgpu_sdma_process_ras_data_cb,
+ };
+
+ amdgpu_ras_late_fini(adev, ras_if, &ih_info);
+ kfree(ras_if);
+ }
+}
+
+int amdgpu_sdma_process_ras_data_cb(struct amdgpu_device *adev,
+ void *err_data,
+ struct amdgpu_iv_entry *entry)
+{
+ kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
+ amdgpu_ras_reset_gpu(adev, 0);
+
+ return AMDGPU_RAS_SUCCESS;
+}
+
+int amdgpu_sdma_process_ecc_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ struct ras_common_if *ras_if = adev->sdma.ras_if;
+ struct ras_dispatch_if ih_data = {
+ .entry = entry,
+ };
+
+ if (!ras_if)
+ return 0;
+
+ ih_data.head = *ras_if;
+
+ amdgpu_ras_interrupt_dispatch(adev, &ih_data);
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
index a9ae0d8a0589..761ff8be6314 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
@@ -104,4 +104,13 @@ struct amdgpu_sdma_instance *
amdgpu_sdma_get_instance_from_ring(struct amdgpu_ring *ring);
int amdgpu_sdma_get_index_from_ring(struct amdgpu_ring *ring, uint32_t *index);
uint64_t amdgpu_sdma_get_csa_mc_addr(struct amdgpu_ring *ring, unsigned vmid);
+int amdgpu_sdma_ras_late_init(struct amdgpu_device *adev,
+ void *ras_ih_info);
+void amdgpu_sdma_ras_fini(struct amdgpu_device *adev);
+int amdgpu_sdma_process_ras_data_cb(struct amdgpu_device *adev,
+ void *err_data,
+ struct amdgpu_iv_entry *entry);
+int amdgpu_sdma_process_ecc_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
index b66d29d5ffa2..b158230af8db 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
@@ -138,6 +138,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
}
dma_fence_put(fence);
+ fence = NULL;
r = amdgpu_bo_kmap(vram_obj, &vram_map);
if (r) {
@@ -183,6 +184,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
}
dma_fence_put(fence);
+ fence = NULL;
r = amdgpu_bo_kmap(gtt_obj[i], &gtt_map);
if (r) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index 77674a7b9616..63e734a125fb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -170,7 +170,7 @@ TRACE_EVENT(amdgpu_cs_ioctl,
__field(unsigned int, context)
__field(unsigned int, seqno)
__field(struct dma_fence *, fence)
- __field(char *, ring_name)
+ __string(ring, to_amdgpu_ring(job->base.sched)->name)
__field(u32, num_ibs)
),
@@ -179,12 +179,12 @@ TRACE_EVENT(amdgpu_cs_ioctl,
__assign_str(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job))
__entry->context = job->base.s_fence->finished.context;
__entry->seqno = job->base.s_fence->finished.seqno;
- __entry->ring_name = to_amdgpu_ring(job->base.sched)->name;
+ __assign_str(ring, to_amdgpu_ring(job->base.sched)->name)
__entry->num_ibs = job->num_ibs;
),
TP_printk("sched_job=%llu, timeline=%s, context=%u, seqno=%u, ring_name=%s, num_ibs=%u",
__entry->sched_job_id, __get_str(timeline), __entry->context,
- __entry->seqno, __entry->ring_name, __entry->num_ibs)
+ __entry->seqno, __get_str(ring), __entry->num_ibs)
);
TRACE_EVENT(amdgpu_sched_run_job,
@@ -195,7 +195,7 @@ TRACE_EVENT(amdgpu_sched_run_job,
__string(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job))
__field(unsigned int, context)
__field(unsigned int, seqno)
- __field(char *, ring_name)
+ __string(ring, to_amdgpu_ring(job->base.sched)->name)
__field(u32, num_ibs)
),
@@ -204,12 +204,12 @@ TRACE_EVENT(amdgpu_sched_run_job,
__assign_str(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job))
__entry->context = job->base.s_fence->finished.context;
__entry->seqno = job->base.s_fence->finished.seqno;
- __entry->ring_name = to_amdgpu_ring(job->base.sched)->name;
+ __assign_str(ring, to_amdgpu_ring(job->base.sched)->name)
__entry->num_ibs = job->num_ibs;
),
TP_printk("sched_job=%llu, timeline=%s, context=%u, seqno=%u, ring_name=%s, num_ibs=%u",
__entry->sched_job_id, __get_str(timeline), __entry->context,
- __entry->seqno, __entry->ring_name, __entry->num_ibs)
+ __entry->seqno, __get_str(ring), __entry->num_ibs)
);
@@ -323,14 +323,15 @@ DEFINE_EVENT(amdgpu_vm_mapping, amdgpu_vm_bo_cs,
TRACE_EVENT(amdgpu_vm_set_ptes,
TP_PROTO(uint64_t pe, uint64_t addr, unsigned count,
- uint32_t incr, uint64_t flags),
- TP_ARGS(pe, addr, count, incr, flags),
+ uint32_t incr, uint64_t flags, bool direct),
+ TP_ARGS(pe, addr, count, incr, flags, direct),
TP_STRUCT__entry(
__field(u64, pe)
__field(u64, addr)
__field(u32, count)
__field(u32, incr)
__field(u64, flags)
+ __field(bool, direct)
),
TP_fast_assign(
@@ -339,28 +340,32 @@ TRACE_EVENT(amdgpu_vm_set_ptes,
__entry->count = count;
__entry->incr = incr;
__entry->flags = flags;
+ __entry->direct = direct;
),
- TP_printk("pe=%010Lx, addr=%010Lx, incr=%u, flags=%llx, count=%u",
- __entry->pe, __entry->addr, __entry->incr,
- __entry->flags, __entry->count)
+ TP_printk("pe=%010Lx, addr=%010Lx, incr=%u, flags=%llx, count=%u, "
+ "direct=%d", __entry->pe, __entry->addr, __entry->incr,
+ __entry->flags, __entry->count, __entry->direct)
);
TRACE_EVENT(amdgpu_vm_copy_ptes,
- TP_PROTO(uint64_t pe, uint64_t src, unsigned count),
- TP_ARGS(pe, src, count),
+ TP_PROTO(uint64_t pe, uint64_t src, unsigned count, bool direct),
+ TP_ARGS(pe, src, count, direct),
TP_STRUCT__entry(
__field(u64, pe)
__field(u64, src)
__field(u32, count)
+ __field(bool, direct)
),
TP_fast_assign(
__entry->pe = pe;
__entry->src = src;
__entry->count = count;
+ __entry->direct = direct;
),
- TP_printk("pe=%010Lx, src=%010Lx, count=%u",
- __entry->pe, __entry->src, __entry->count)
+ TP_printk("pe=%010Lx, src=%010Lx, count=%u, direct=%d",
+ __entry->pe, __entry->src, __entry->count,
+ __entry->direct)
);
TRACE_EVENT(amdgpu_vm_flush,
@@ -468,7 +473,7 @@ TRACE_EVENT(amdgpu_ib_pipe_sync,
TP_PROTO(struct amdgpu_job *sched_job, struct dma_fence *fence),
TP_ARGS(sched_job, fence),
TP_STRUCT__entry(
- __field(const char *,name)
+ __string(ring, sched_job->base.sched->name)
__field(uint64_t, id)
__field(struct dma_fence *, fence)
__field(uint64_t, ctx)
@@ -476,14 +481,14 @@ TRACE_EVENT(amdgpu_ib_pipe_sync,
),
TP_fast_assign(
- __entry->name = sched_job->base.sched->name;
+ __assign_str(ring, sched_job->base.sched->name)
__entry->id = sched_job->base.id;
__entry->fence = fence;
__entry->ctx = fence->context;
__entry->seqno = fence->seqno;
),
TP_printk("job ring=%s, id=%llu, need pipe sync to fence=%p, context=%llu, seq=%u",
- __entry->name, __entry->id,
+ __get_str(ring), __entry->id,
__entry->fence, __entry->ctx,
__entry->seqno)
);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index dff41d0a85fe..2616e2eafdeb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -35,10 +35,12 @@
#include <linux/hmm.h>
#include <linux/pagemap.h>
#include <linux/sched/task.h>
+#include <linux/sched/mm.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/swap.h>
#include <linux/swiotlb.h>
+#include <linux/dma-buf.h>
#include <drm/ttm/ttm_bo_api.h>
#include <drm/ttm/ttm_bo_driver.h>
@@ -54,6 +56,7 @@
#include "amdgpu_trace.h"
#include "amdgpu_amdkfd.h"
#include "amdgpu_sdma.h"
+#include "amdgpu_ras.h"
#include "bif/bif_4_1_d.h"
static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
@@ -484,15 +487,12 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, bool evict,
struct ttm_operation_ctx *ctx,
struct ttm_mem_reg *new_mem)
{
- struct amdgpu_device *adev;
struct ttm_mem_reg *old_mem = &bo->mem;
struct ttm_mem_reg tmp_mem;
struct ttm_place placements;
struct ttm_placement placement;
int r;
- adev = amdgpu_ttm_adev(bo->bdev);
-
/* create space/pages for new_mem in GTT space */
tmp_mem = *new_mem;
tmp_mem.mm_node = NULL;
@@ -543,15 +543,12 @@ static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, bool evict,
struct ttm_operation_ctx *ctx,
struct ttm_mem_reg *new_mem)
{
- struct amdgpu_device *adev;
struct ttm_mem_reg *old_mem = &bo->mem;
struct ttm_mem_reg tmp_mem;
struct ttm_placement placement;
struct ttm_place placements;
int r;
- adev = amdgpu_ttm_adev(bo->bdev);
-
/* make space in GTT for old_mem buffer */
tmp_mem = *new_mem;
tmp_mem.mm_node = NULL;
@@ -763,6 +760,7 @@ static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
*/
struct amdgpu_ttm_tt {
struct ttm_dma_tt ttm;
+ struct drm_gem_object *gobj;
u64 offset;
uint64_t userptr;
struct task_struct *usertask;
@@ -772,6 +770,20 @@ struct amdgpu_ttm_tt {
#endif
};
+#ifdef CONFIG_DRM_AMDGPU_USERPTR
+/* flags used by HMM internal, not related to CPU/GPU PTE flags */
+static const uint64_t hmm_range_flags[HMM_PFN_FLAG_MAX] = {
+ (1 << 0), /* HMM_PFN_VALID */
+ (1 << 1), /* HMM_PFN_WRITE */
+ 0 /* HMM_PFN_DEVICE_PRIVATE */
+};
+
+static const uint64_t hmm_range_values[HMM_PFN_VALUE_MAX] = {
+ 0xfffffffffffffffeUL, /* HMM_PFN_ERROR */
+ 0, /* HMM_PFN_NONE */
+ 0xfffffffffffffffcUL /* HMM_PFN_SPECIAL */
+};
+
/**
* amdgpu_ttm_tt_get_user_pages - get device accessible pages that back user
* memory and start HMM tracking CPU page table update
@@ -779,85 +791,89 @@ struct amdgpu_ttm_tt {
* Calling function must call amdgpu_ttm_tt_userptr_range_done() once and only
* once afterwards to stop HMM tracking
*/
-#if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
-
-#define MAX_RETRY_HMM_RANGE_FAULT 16
-
int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages)
{
- struct hmm_mirror *mirror = bo->mn ? &bo->mn->mirror : NULL;
struct ttm_tt *ttm = bo->tbo.ttm;
struct amdgpu_ttm_tt *gtt = (void *)ttm;
- struct mm_struct *mm = gtt->usertask->mm;
unsigned long start = gtt->userptr;
struct vm_area_struct *vma;
struct hmm_range *range;
+ unsigned long timeout;
+ struct mm_struct *mm;
unsigned long i;
- uint64_t *pfns;
int r = 0;
- if (!mm) /* Happens during process shutdown */
- return -ESRCH;
-
- if (unlikely(!mirror)) {
- DRM_DEBUG_DRIVER("Failed to get hmm_mirror\n");
- r = -EFAULT;
- goto out;
+ mm = bo->notifier.mm;
+ if (unlikely(!mm)) {
+ DRM_DEBUG_DRIVER("BO is not registered?\n");
+ return -EFAULT;
}
- vma = find_vma(mm, start);
- if (unlikely(!vma || start < vma->vm_start)) {
- r = -EFAULT;
- goto out;
- }
- if (unlikely((gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) &&
- vma->vm_file)) {
- r = -EPERM;
- goto out;
- }
+ /* Another get_user_pages is running at the same time?? */
+ if (WARN_ON(gtt->range))
+ return -EFAULT;
+
+ if (!mmget_not_zero(mm)) /* Happens during process shutdown */
+ return -ESRCH;
range = kzalloc(sizeof(*range), GFP_KERNEL);
if (unlikely(!range)) {
r = -ENOMEM;
goto out;
}
+ range->notifier = &bo->notifier;
+ range->flags = hmm_range_flags;
+ range->values = hmm_range_values;
+ range->pfn_shift = PAGE_SHIFT;
+ range->start = bo->notifier.interval_tree.start;
+ range->end = bo->notifier.interval_tree.last + 1;
+ range->default_flags = hmm_range_flags[HMM_PFN_VALID];
+ if (!amdgpu_ttm_tt_is_readonly(ttm))
+ range->default_flags |= range->flags[HMM_PFN_WRITE];
- pfns = kvmalloc_array(ttm->num_pages, sizeof(*pfns), GFP_KERNEL);
- if (unlikely(!pfns)) {
+ range->pfns = kvmalloc_array(ttm->num_pages, sizeof(*range->pfns),
+ GFP_KERNEL);
+ if (unlikely(!range->pfns)) {
r = -ENOMEM;
goto out_free_ranges;
}
- amdgpu_hmm_init_range(range);
- range->default_flags = range->flags[HMM_PFN_VALID];
- range->default_flags |= amdgpu_ttm_tt_is_readonly(ttm) ?
- 0 : range->flags[HMM_PFN_WRITE];
- range->pfn_flags_mask = 0;
- range->pfns = pfns;
- range->start = start;
- range->end = start + ttm->num_pages * PAGE_SIZE;
-
- hmm_range_register(range, mirror);
+ down_read(&mm->mmap_sem);
+ vma = find_vma(mm, start);
+ if (unlikely(!vma || start < vma->vm_start)) {
+ r = -EFAULT;
+ goto out_unlock;
+ }
+ if (unlikely((gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) &&
+ vma->vm_file)) {
+ r = -EPERM;
+ goto out_unlock;
+ }
+ up_read(&mm->mmap_sem);
+ timeout = jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
- /*
- * Just wait for range to be valid, safe to ignore return value as we
- * will use the return value of hmm_range_fault() below under the
- * mmap_sem to ascertain the validity of the range.
- */
- hmm_range_wait_until_valid(range, HMM_RANGE_DEFAULT_TIMEOUT);
+retry:
+ range->notifier_seq = mmu_interval_read_begin(&bo->notifier);
down_read(&mm->mmap_sem);
r = hmm_range_fault(range, 0);
up_read(&mm->mmap_sem);
-
- if (unlikely(r < 0))
+ if (unlikely(r <= 0)) {
+ /*
+ * FIXME: This timeout should encompass the retry from
+ * mmu_interval_read_retry() as well.
+ */
+ if ((r == 0 || r == -EBUSY) && !time_after(jiffies, timeout))
+ goto retry;
goto out_free_pfns;
+ }
for (i = 0; i < ttm->num_pages; i++) {
- pages[i] = hmm_device_entry_to_page(range, pfns[i]);
+ /* FIXME: The pages cannot be touched outside the notifier_lock */
+ pages[i] = hmm_device_entry_to_page(range, range->pfns[i]);
if (unlikely(!pages[i])) {
pr_err("Page fault failed for pfn[%lu] = 0x%llx\n",
- i, pfns[i]);
+ i, range->pfns[i]);
r = -ENOMEM;
goto out_free_pfns;
@@ -865,15 +881,18 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages)
}
gtt->range = range;
+ mmput(mm);
return 0;
+out_unlock:
+ up_read(&mm->mmap_sem);
out_free_pfns:
- hmm_range_unregister(range);
- kvfree(pfns);
+ kvfree(range->pfns);
out_free_ranges:
kfree(range);
out:
+ mmput(mm);
return r;
}
@@ -898,15 +917,18 @@ bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm)
"No user pages to check\n");
if (gtt->range) {
- r = hmm_range_valid(gtt->range);
- hmm_range_unregister(gtt->range);
-
+ /*
+ * FIXME: Must always hold notifier_lock for this, and must
+ * not ignore the return code.
+ */
+ r = mmu_interval_read_retry(gtt->range->notifier,
+ gtt->range->notifier_seq);
kvfree(gtt->range->pfns);
kfree(gtt->range);
gtt->range = NULL;
}
- return r;
+ return !r;
}
#endif
@@ -987,10 +1009,18 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
sg_free_table(ttm->sg);
#if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
- if (gtt->range &&
- ttm->pages[0] == hmm_device_entry_to_page(gtt->range,
- gtt->range->pfns[0]))
- WARN_ONCE(1, "Missing get_user_page_done\n");
+ if (gtt->range) {
+ unsigned long i;
+
+ for (i = 0; i < ttm->num_pages; i++) {
+ if (ttm->pages[i] !=
+ hmm_device_entry_to_page(gtt->range,
+ gtt->range->pfns[i]))
+ break;
+ }
+
+ WARN((i == ttm->num_pages), "Missing get_user_page_done\n");
+ }
#endif
}
@@ -1217,16 +1247,14 @@ static struct ttm_backend_func amdgpu_backend_func = {
static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo,
uint32_t page_flags)
{
- struct amdgpu_device *adev;
struct amdgpu_ttm_tt *gtt;
- adev = amdgpu_ttm_adev(bo->bdev);
-
gtt = kzalloc(sizeof(struct amdgpu_ttm_tt), GFP_KERNEL);
if (gtt == NULL) {
return NULL;
}
gtt->ttm.ttm.func = &amdgpu_backend_func;
+ gtt->gobj = &bo->base;
/* allocate space for the uninitialized page entries */
if (ttm_sg_tt_init(&gtt->ttm, bo, page_flags)) {
@@ -1247,7 +1275,6 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm,
{
struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
struct amdgpu_ttm_tt *gtt = (void *)ttm;
- bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
/* user pages are bound by amdgpu_ttm_tt_pin_userptr() */
if (gtt && gtt->userptr) {
@@ -1260,7 +1287,19 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm,
return 0;
}
- if (slave && ttm->sg) {
+ if (ttm->page_flags & TTM_PAGE_FLAG_SG) {
+ if (!ttm->sg) {
+ struct dma_buf_attachment *attach;
+ struct sg_table *sgt;
+
+ attach = gtt->gobj->import_attach;
+ sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
+ if (IS_ERR(sgt))
+ return PTR_ERR(sgt);
+
+ ttm->sg = sgt;
+ }
+
drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
gtt->ttm.dma_address,
ttm->num_pages);
@@ -1287,9 +1326,8 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm,
*/
static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
{
- struct amdgpu_device *adev;
struct amdgpu_ttm_tt *gtt = (void *)ttm;
- bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
+ struct amdgpu_device *adev;
if (gtt && gtt->userptr) {
amdgpu_ttm_tt_set_user_pages(ttm, NULL);
@@ -1298,7 +1336,16 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
return;
}
- if (slave)
+ if (ttm->sg && gtt->gobj->import_attach) {
+ struct dma_buf_attachment *attach;
+
+ attach = gtt->gobj->import_attach;
+ dma_buf_unmap_attachment(attach, ttm->sg, DMA_BIDIRECTIONAL);
+ ttm->sg = NULL;
+ return;
+ }
+
+ if (ttm->page_flags & TTM_PAGE_FLAG_SG)
return;
adev = amdgpu_ttm_adev(ttm->bdev);
@@ -1634,81 +1681,105 @@ static void amdgpu_ttm_fw_reserve_vram_fini(struct amdgpu_device *adev)
*/
static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev)
{
- struct ttm_operation_ctx ctx = { false, false };
- struct amdgpu_bo_param bp;
- int r = 0;
- int i;
- u64 vram_size = adev->gmc.visible_vram_size;
- u64 offset = adev->fw_vram_usage.start_offset;
- u64 size = adev->fw_vram_usage.size;
- struct amdgpu_bo *bo;
-
- memset(&bp, 0, sizeof(bp));
- bp.size = adev->fw_vram_usage.size;
- bp.byte_align = PAGE_SIZE;
- bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
- bp.flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
- AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
- bp.type = ttm_bo_type_kernel;
- bp.resv = NULL;
+ uint64_t vram_size = adev->gmc.visible_vram_size;
+
adev->fw_vram_usage.va = NULL;
adev->fw_vram_usage.reserved_bo = NULL;
- if (adev->fw_vram_usage.size > 0 &&
- adev->fw_vram_usage.size <= vram_size) {
+ if (adev->fw_vram_usage.size == 0 ||
+ adev->fw_vram_usage.size > vram_size)
+ return 0;
- r = amdgpu_bo_create(adev, &bp,
- &adev->fw_vram_usage.reserved_bo);
- if (r)
- goto error_create;
+ return amdgpu_bo_create_kernel_at(adev,
+ adev->fw_vram_usage.start_offset,
+ adev->fw_vram_usage.size,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &adev->fw_vram_usage.reserved_bo,
+ &adev->fw_vram_usage.va);
+}
- r = amdgpu_bo_reserve(adev->fw_vram_usage.reserved_bo, false);
- if (r)
- goto error_reserve;
+/*
+ * Memoy training reservation functions
+ */
- /* remove the original mem node and create a new one at the
- * request position
- */
- bo = adev->fw_vram_usage.reserved_bo;
- offset = ALIGN(offset, PAGE_SIZE);
- for (i = 0; i < bo->placement.num_placement; ++i) {
- bo->placements[i].fpfn = offset >> PAGE_SHIFT;
- bo->placements[i].lpfn = (offset + size) >> PAGE_SHIFT;
- }
+/**
+ * amdgpu_ttm_training_reserve_vram_fini - free memory training reserved vram
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * free memory training reserved vram if it has been reserved.
+ */
+static int amdgpu_ttm_training_reserve_vram_fini(struct amdgpu_device *adev)
+{
+ struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx;
- ttm_bo_mem_put(&bo->tbo, &bo->tbo.mem);
- r = ttm_bo_mem_space(&bo->tbo, &bo->placement,
- &bo->tbo.mem, &ctx);
- if (r)
- goto error_pin;
+ ctx->init = PSP_MEM_TRAIN_NOT_SUPPORT;
+ amdgpu_bo_free_kernel(&ctx->c2p_bo, NULL, NULL);
+ ctx->c2p_bo = NULL;
- r = amdgpu_bo_pin_restricted(adev->fw_vram_usage.reserved_bo,
- AMDGPU_GEM_DOMAIN_VRAM,
- adev->fw_vram_usage.start_offset,
- (adev->fw_vram_usage.start_offset +
- adev->fw_vram_usage.size));
- if (r)
- goto error_pin;
- r = amdgpu_bo_kmap(adev->fw_vram_usage.reserved_bo,
- &adev->fw_vram_usage.va);
- if (r)
- goto error_kmap;
+ amdgpu_bo_free_kernel(&ctx->p2c_bo, NULL, NULL);
+ ctx->p2c_bo = NULL;
+
+ return 0;
+}
- amdgpu_bo_unreserve(adev->fw_vram_usage.reserved_bo);
+/**
+ * amdgpu_ttm_training_reserve_vram_init - create bo vram reservation from memory training
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * create bo vram reservation from memory training.
+ */
+static int amdgpu_ttm_training_reserve_vram_init(struct amdgpu_device *adev)
+{
+ int ret;
+ struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx;
+
+ memset(ctx, 0, sizeof(*ctx));
+ if (!adev->fw_vram_usage.mem_train_support) {
+ DRM_DEBUG("memory training does not support!\n");
+ return 0;
}
- return r;
-error_kmap:
- amdgpu_bo_unpin(adev->fw_vram_usage.reserved_bo);
-error_pin:
- amdgpu_bo_unreserve(adev->fw_vram_usage.reserved_bo);
-error_reserve:
- amdgpu_bo_unref(&adev->fw_vram_usage.reserved_bo);
-error_create:
- adev->fw_vram_usage.va = NULL;
- adev->fw_vram_usage.reserved_bo = NULL;
- return r;
+ ctx->c2p_train_data_offset = adev->fw_vram_usage.mem_train_fb_loc;
+ ctx->p2c_train_data_offset = (adev->gmc.mc_vram_size - GDDR6_MEM_TRAINING_OFFSET);
+ ctx->train_data_size = GDDR6_MEM_TRAINING_DATA_SIZE_IN_BYTES;
+
+ DRM_DEBUG("train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n",
+ ctx->train_data_size,
+ ctx->p2c_train_data_offset,
+ ctx->c2p_train_data_offset);
+
+ ret = amdgpu_bo_create_kernel_at(adev,
+ ctx->p2c_train_data_offset,
+ ctx->train_data_size,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &ctx->p2c_bo,
+ NULL);
+ if (ret) {
+ DRM_ERROR("alloc p2c_bo failed(%d)!\n", ret);
+ goto Err_out;
+ }
+
+ ret = amdgpu_bo_create_kernel_at(adev,
+ ctx->c2p_train_data_offset,
+ ctx->train_data_size,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &ctx->c2p_bo,
+ NULL);
+ if (ret) {
+ DRM_ERROR("alloc c2p_bo failed(%d)!\n", ret);
+ goto Err_out;
+ }
+
+ ctx->init = PSP_MEM_TRAIN_RESERVE_SUCCESS;
+ return 0;
+
+Err_out:
+ amdgpu_ttm_training_reserve_vram_fini(adev);
+ return ret;
}
+
/**
* amdgpu_ttm_init - Init the memory management (ttm) as well as various
* gtt/vram related fields.
@@ -1731,6 +1802,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
r = ttm_bo_device_init(&adev->mman.bdev,
&amdgpu_bo_driver,
adev->ddev->anon_inode->i_mapping,
+ adev->ddev->vma_offset_manager,
dma_addressing_limited(adev->dev));
if (r) {
DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
@@ -1771,6 +1843,14 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
return r;
}
+ /*
+ *The reserved vram for memory training must be pinned to the specified
+ *place on the VRAM, so reserve it early.
+ */
+ r = amdgpu_ttm_training_reserve_vram_init(adev);
+ if (r)
+ return r;
+
/* allocate memory as required for VGA
* This is used for VGA emulation and pre-OS scanout buffers to
* avoid display artifacts while transitioning between pre-OS
@@ -1781,6 +1861,20 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
NULL, &stolen_vga_buf);
if (r)
return r;
+
+ /*
+ * reserve one TMR (64K) memory at the top of VRAM which holds
+ * IP Discovery data and is protected by PSP.
+ */
+ r = amdgpu_bo_create_kernel_at(adev,
+ adev->gmc.real_vram_size - DISCOVERY_TMR_SIZE,
+ DISCOVERY_TMR_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &adev->discovery_memory,
+ NULL);
+ if (r)
+ return r;
+
DRM_INFO("amdgpu: %uM of VRAM memory ready\n",
(unsigned) (adev->gmc.real_vram_size / (1024 * 1024)));
@@ -1856,7 +1950,11 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
return;
amdgpu_ttm_debugfs_fini(adev);
+ amdgpu_ttm_training_reserve_vram_fini(adev);
+ /* return the IP Discovery TMR memory back to VRAM */
+ amdgpu_bo_free_kernel(&adev->discovery_memory, NULL, NULL);
amdgpu_ttm_fw_reserve_vram_fini(adev);
+
if (adev->mman.aper_base_kaddr)
iounmap(adev->mman.aper_base_kaddr);
adev->mman.aper_base_kaddr = NULL;
@@ -1952,10 +2050,7 @@ static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
*addr += (u64)window * AMDGPU_GTT_MAX_TRANSFER_SIZE *
AMDGPU_GPU_PAGE_SIZE;
- num_dw = adev->mman.buffer_funcs->copy_num_dw;
- while (num_dw & 0x7)
- num_dw++;
-
+ num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
num_bytes = num_pages * 8;
r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes, &job);
@@ -2015,11 +2110,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
max_bytes = adev->mman.buffer_funcs->copy_max_bytes;
num_loops = DIV_ROUND_UP(byte_count, max_bytes);
- num_dw = num_loops * adev->mman.buffer_funcs->copy_num_dw;
-
- /* for IB padding */
- while (num_dw & 0x7)
- num_dw++;
+ num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->copy_num_dw, 8);
r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, &job);
if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
index 3a6115ad0196..833fc4b68940 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
@@ -360,6 +360,7 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type)
case CHIP_RAVEN:
case CHIP_VEGA12:
case CHIP_VEGA20:
+ case CHIP_ARCTURUS:
case CHIP_RENOIR:
case CHIP_NAVI10:
case CHIP_NAVI14:
@@ -368,8 +369,6 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type)
return AMDGPU_FW_LOAD_DIRECT;
else
return AMDGPU_FW_LOAD_PSP;
- case CHIP_ARCTURUS:
- return AMDGPU_FW_LOAD_DIRECT;
default:
DRM_ERROR("Unknown firmware load type\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
index b34f00d42049..410587b950f3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
@@ -108,6 +108,12 @@ struct ta_firmware_header_v1_0 {
uint32_t ta_ras_ucode_version;
uint32_t ta_ras_offset_bytes;
uint32_t ta_ras_size_bytes;
+ uint32_t ta_hdcp_ucode_version;
+ uint32_t ta_hdcp_offset_bytes;
+ uint32_t ta_hdcp_size_bytes;
+ uint32_t ta_dtm_ucode_version;
+ uint32_t ta_dtm_offset_bytes;
+ uint32_t ta_dtm_size_bytes;
};
/* version_major=1, version_minor=0 */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
new file mode 100644
index 000000000000..d4fb9cf27e21
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
@@ -0,0 +1,158 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "amdgpu_ras.h"
+
+int amdgpu_umc_ras_late_init(struct amdgpu_device *adev)
+{
+ int r;
+ struct ras_fs_if fs_info = {
+ .sysfs_name = "umc_err_count",
+ .debugfs_name = "umc_err_inject",
+ };
+ struct ras_ih_if ih_info = {
+ .cb = amdgpu_umc_process_ras_data_cb,
+ };
+
+ if (!adev->umc.ras_if) {
+ adev->umc.ras_if =
+ kmalloc(sizeof(struct ras_common_if), GFP_KERNEL);
+ if (!adev->umc.ras_if)
+ return -ENOMEM;
+ adev->umc.ras_if->block = AMDGPU_RAS_BLOCK__UMC;
+ adev->umc.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
+ adev->umc.ras_if->sub_block_index = 0;
+ strcpy(adev->umc.ras_if->name, "umc");
+ }
+ ih_info.head = fs_info.head = *adev->umc.ras_if;
+
+ r = amdgpu_ras_late_init(adev, adev->umc.ras_if,
+ &fs_info, &ih_info);
+ if (r)
+ goto free;
+
+ if (amdgpu_ras_is_supported(adev, adev->umc.ras_if->block)) {
+ r = amdgpu_irq_get(adev, &adev->gmc.ecc_irq, 0);
+ if (r)
+ goto late_fini;
+ } else {
+ r = 0;
+ goto free;
+ }
+
+ /* ras init of specific umc version */
+ if (adev->umc.funcs && adev->umc.funcs->err_cnt_init)
+ adev->umc.funcs->err_cnt_init(adev);
+
+ return 0;
+
+late_fini:
+ amdgpu_ras_late_fini(adev, adev->umc.ras_if, &ih_info);
+free:
+ kfree(adev->umc.ras_if);
+ adev->umc.ras_if = NULL;
+ return r;
+}
+
+void amdgpu_umc_ras_fini(struct amdgpu_device *adev)
+{
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC) &&
+ adev->umc.ras_if) {
+ struct ras_common_if *ras_if = adev->umc.ras_if;
+ struct ras_ih_if ih_info = {
+ .head = *ras_if,
+ .cb = amdgpu_umc_process_ras_data_cb,
+ };
+
+ amdgpu_ras_late_fini(adev, ras_if, &ih_info);
+ kfree(ras_if);
+ }
+}
+
+int amdgpu_umc_process_ras_data_cb(struct amdgpu_device *adev,
+ void *ras_error_status,
+ struct amdgpu_iv_entry *entry)
+{
+ struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
+
+ /* When “Full RAS†is enabled, the per-IP interrupt sources should
+ * be disabled and the driver should only look for the aggregated
+ * interrupt via sync flood
+ */
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
+ return AMDGPU_RAS_SUCCESS;
+
+ kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
+ if (adev->umc.funcs &&
+ adev->umc.funcs->query_ras_error_count)
+ adev->umc.funcs->query_ras_error_count(adev, ras_error_status);
+
+ if (adev->umc.funcs &&
+ adev->umc.funcs->query_ras_error_address &&
+ adev->umc.max_ras_err_cnt_per_query) {
+ err_data->err_addr =
+ kcalloc(adev->umc.max_ras_err_cnt_per_query,
+ sizeof(struct eeprom_table_record), GFP_KERNEL);
+ /* still call query_ras_error_address to clear error status
+ * even NOMEM error is encountered
+ */
+ if(!err_data->err_addr)
+ DRM_WARN("Failed to alloc memory for umc error address record!\n");
+
+ /* umc query_ras_error_address is also responsible for clearing
+ * error status
+ */
+ adev->umc.funcs->query_ras_error_address(adev, ras_error_status);
+ }
+
+ /* only uncorrectable error needs gpu reset */
+ if (err_data->ue_count) {
+ if (err_data->err_addr_cnt &&
+ amdgpu_ras_add_bad_pages(adev, err_data->err_addr,
+ err_data->err_addr_cnt))
+ DRM_WARN("Failed to add ras bad page!\n");
+
+ amdgpu_ras_reset_gpu(adev, 0);
+ }
+
+ kfree(err_data->err_addr);
+ return AMDGPU_RAS_SUCCESS;
+}
+
+int amdgpu_umc_process_ecc_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ struct ras_common_if *ras_if = adev->umc.ras_if;
+ struct ras_dispatch_if ih_data = {
+ .entry = entry,
+ };
+
+ if (!ras_if)
+ return 0;
+
+ ih_data.head = *ras_if;
+
+ amdgpu_ras_interrupt_dispatch(adev, &ih_data);
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
index 975afa04df09..3283032a78e5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
@@ -54,7 +54,8 @@
adev->umc.funcs->disable_umc_index_mode(adev);
struct amdgpu_umc_funcs {
- void (*ras_init)(struct amdgpu_device *adev);
+ void (*err_cnt_init)(struct amdgpu_device *adev);
+ int (*ras_late_init)(struct amdgpu_device *adev);
void (*query_ras_error_count)(struct amdgpu_device *adev,
void *ras_error_status);
void (*query_ras_error_address)(struct amdgpu_device *adev,
@@ -62,6 +63,7 @@ struct amdgpu_umc_funcs {
void (*enable_umc_index_mode)(struct amdgpu_device *adev,
uint32_t umc_instance);
void (*disable_umc_index_mode)(struct amdgpu_device *adev);
+ void (*init_registers)(struct amdgpu_device *adev);
};
struct amdgpu_umc {
@@ -75,8 +77,17 @@ struct amdgpu_umc {
uint32_t channel_offs;
/* channel index table of interleaved memory */
const uint32_t *channel_idx_tbl;
+ struct ras_common_if *ras_if;
const struct amdgpu_umc_funcs *funcs;
};
+int amdgpu_umc_ras_late_init(struct amdgpu_device *adev);
+void amdgpu_umc_ras_fini(struct amdgpu_device *adev);
+int amdgpu_umc_process_ras_data_cb(struct amdgpu_device *adev,
+ void *ras_error_status,
+ struct amdgpu_iv_entry *entry);
+int amdgpu_umc_process_ecc_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index b2c364b8695f..e324bfe6c58f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -39,6 +39,8 @@
#include "cikd.h"
#include "uvd/uvd_4_2_d.h"
+#include "amdgpu_ras.h"
+
/* 1 second timeout */
#define UVD_IDLE_TIMEOUT msecs_to_jiffies(1000)
@@ -297,6 +299,7 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
{
int i, j;
+ cancel_delayed_work_sync(&adev->uvd.idle_work);
drm_sched_entity_destroy(&adev->uvd.entity);
for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
@@ -372,7 +375,13 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev)
if (!adev->uvd.inst[j].saved_bo)
return -ENOMEM;
- memcpy_fromio(adev->uvd.inst[j].saved_bo, ptr, size);
+ /* re-write 0 since err_event_athub will corrupt VCPU buffer */
+ if (amdgpu_ras_intr_triggered()) {
+ DRM_WARN("UVD VCPU state may lost due to RAS ERREVENT_ATHUB_INTERRUPT\n");
+ memset(adev->uvd.inst[j].saved_bo, 0, size);
+ } else {
+ memcpy_fromio(adev->uvd.inst[j].saved_bo, ptr, size);
+ }
}
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 65044b1b3d4c..46b590af2fd2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -80,6 +80,11 @@ MODULE_FIRMWARE(FIRMWARE_VEGA12);
MODULE_FIRMWARE(FIRMWARE_VEGA20);
static void amdgpu_vce_idle_work_handler(struct work_struct *work);
+static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
+ struct amdgpu_bo *bo,
+ struct dma_fence **fence);
+static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
+ bool direct, struct dma_fence **fence);
/**
* amdgpu_vce_init - allocate memory, load vce firmware
@@ -211,6 +216,7 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
if (adev->vce.vcpu_bo == NULL)
return 0;
+ cancel_delayed_work_sync(&adev->vce.idle_work);
drm_sched_entity_destroy(&adev->vce.entity);
amdgpu_bo_free_kernel(&adev->vce.vcpu_bo, &adev->vce.gpu_addr,
@@ -428,9 +434,9 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
*
* Open up a stream for HW test
*/
-int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
- struct amdgpu_bo *bo,
- struct dma_fence **fence)
+static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
+ struct amdgpu_bo *bo,
+ struct dma_fence **fence)
{
const unsigned ib_size_dw = 1024;
struct amdgpu_job *job;
@@ -508,8 +514,8 @@ err:
*
* Close up a stream for HW test or if userspace failed to do so
*/
-int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
- bool direct, struct dma_fence **fence)
+static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
+ bool direct, struct dma_fence **fence)
{
const unsigned ib_size_dw = 1024;
struct amdgpu_job *job;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
index e802f7d9db0a..d6d83a3ec803 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
@@ -58,11 +58,6 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev);
int amdgpu_vce_entity_init(struct amdgpu_device *adev);
int amdgpu_vce_suspend(struct amdgpu_device *adev);
int amdgpu_vce_resume(struct amdgpu_device *adev);
-int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
- struct amdgpu_bo *bo,
- struct dma_fence **fence);
-int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
- bool direct, struct dma_fence **fence);
void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp);
int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx);
int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p, uint32_t ib_idx);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index 3199e4a5ff12..9d870444d7d6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -193,6 +193,8 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
{
int i, j;
+ cancel_delayed_work_sync(&adev->vcn.idle_work);
+
if (adev->vcn.indirect_sram) {
amdgpu_bo_free_kernel(&adev->vcn.dpg_sram_bo,
&adev->vcn.dpg_sram_gpu_addr,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 5251352f5922..598c24505c73 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -130,7 +130,8 @@ static unsigned amdgpu_vm_num_entries(struct amdgpu_device *adev,
if (level == adev->vm_manager.root_level)
/* For the root directory */
- return round_up(adev->vm_manager.max_pfn, 1ULL << shift) >> shift;
+ return round_up(adev->vm_manager.max_pfn, 1ULL << shift)
+ >> shift;
else if (level != AMDGPU_VM_PTB)
/* Everything in between */
return 512;
@@ -341,7 +342,7 @@ static struct amdgpu_vm_pt *amdgpu_vm_pt_parent(struct amdgpu_vm_pt *pt)
return container_of(parent->vm_bo, struct amdgpu_vm_pt, base);
}
-/**
+/*
* amdgpu_vm_pt_cursor - state for for_each_amdgpu_vm_pt
*/
struct amdgpu_vm_pt_cursor {
@@ -482,6 +483,7 @@ static void amdgpu_vm_pt_next(struct amdgpu_device *adev,
*
* @adev: amdgpu_device structure
* @vm: amdgpu_vm structure
+ * @start: optional cursor to start with
* @cursor: state to initialize
*
* Starts a deep first traversal of the PD/PT tree.
@@ -535,7 +537,7 @@ static void amdgpu_vm_pt_next_dfs(struct amdgpu_device *adev,
amdgpu_vm_pt_ancestor(cursor);
}
-/**
+/*
* for_each_amdgpu_vm_pt_dfs_safe - safe deep first search of all PDs/PTs
*/
#define for_each_amdgpu_vm_pt_dfs_safe(adev, vm, start, cursor, entry) \
@@ -566,6 +568,14 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
list_add(&entry->tv.head, validated);
}
+/**
+ * amdgpu_vm_del_from_lru_notify - update bulk_moveable flag
+ *
+ * @bo: BO which was removed from the LRU
+ *
+ * Make sure the bulk_moveable flag is updated when a BO is removed from the
+ * LRU.
+ */
void amdgpu_vm_del_from_lru_notify(struct ttm_buffer_object *bo)
{
struct amdgpu_bo *abo;
@@ -600,19 +610,18 @@ void amdgpu_vm_del_from_lru_notify(struct ttm_buffer_object *bo)
void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
struct amdgpu_vm *vm)
{
- struct ttm_bo_global *glob = adev->mman.bdev.glob;
struct amdgpu_vm_bo_base *bo_base;
if (vm->bulk_moveable) {
- spin_lock(&glob->lru_lock);
+ spin_lock(&ttm_bo_glob.lru_lock);
ttm_bo_bulk_move_lru_tail(&vm->lru_bulk_move);
- spin_unlock(&glob->lru_lock);
+ spin_unlock(&ttm_bo_glob.lru_lock);
return;
}
memset(&vm->lru_bulk_move, 0, sizeof(vm->lru_bulk_move));
- spin_lock(&glob->lru_lock);
+ spin_lock(&ttm_bo_glob.lru_lock);
list_for_each_entry(bo_base, &vm->idle, vm_status) {
struct amdgpu_bo *bo = bo_base->bo;
@@ -624,7 +633,7 @@ void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
ttm_bo_move_to_lru_tail(&bo->shadow->tbo,
&vm->lru_bulk_move);
}
- spin_unlock(&glob->lru_lock);
+ spin_unlock(&ttm_bo_glob.lru_lock);
vm->bulk_moveable = true;
}
@@ -693,6 +702,7 @@ bool amdgpu_vm_ready(struct amdgpu_vm *vm)
* @adev: amdgpu_device pointer
* @vm: VM to clear BO from
* @bo: BO to clear
+ * @direct: use a direct update
*
* Root PD needs to be reserved when calling this.
*
@@ -701,7 +711,8 @@ bool amdgpu_vm_ready(struct amdgpu_vm *vm)
*/
static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
- struct amdgpu_bo *bo)
+ struct amdgpu_bo *bo,
+ bool direct)
{
struct ttm_operation_ctx ctx = { true, false };
unsigned level = adev->vm_manager.root_level;
@@ -760,6 +771,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
memset(&params, 0, sizeof(params));
params.adev = adev;
params.vm = vm;
+ params.direct = direct;
r = vm->update_funcs->prepare(&params, AMDGPU_FENCE_OWNER_KFD, NULL);
if (r)
@@ -813,10 +825,13 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
*
* @adev: amdgpu_device pointer
* @vm: requesting vm
+ * @level: the page table level
+ * @direct: use a direct update
* @bp: resulting BO allocation parameters
*/
static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm,
- int level, struct amdgpu_bo_param *bp)
+ int level, bool direct,
+ struct amdgpu_bo_param *bp)
{
memset(bp, 0, sizeof(*bp));
@@ -831,6 +846,7 @@ static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm,
else if (!vm->root.base.bo || vm->root.base.bo->shadow)
bp->flags |= AMDGPU_GEM_CREATE_SHADOW;
bp->type = ttm_bo_type_kernel;
+ bp->no_wait_gpu = direct;
if (vm->root.base.bo)
bp->resv = vm->root.base.bo->tbo.base.resv;
}
@@ -841,6 +857,7 @@ static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm,
* @adev: amdgpu_device pointer
* @vm: VM to allocate page tables for
* @cursor: Which page table to allocate
+ * @direct: use a direct update
*
* Make sure a specific page table or directory is allocated.
*
@@ -850,7 +867,8 @@ static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm,
*/
static int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
- struct amdgpu_vm_pt_cursor *cursor)
+ struct amdgpu_vm_pt_cursor *cursor,
+ bool direct)
{
struct amdgpu_vm_pt *entry = cursor->entry;
struct amdgpu_bo_param bp;
@@ -871,7 +889,7 @@ static int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
if (entry->base.bo)
return 0;
- amdgpu_vm_bo_param(adev, vm, cursor->level, &bp);
+ amdgpu_vm_bo_param(adev, vm, cursor->level, direct, &bp);
r = amdgpu_bo_create(adev, &bp, &pt);
if (r)
@@ -883,7 +901,7 @@ static int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
pt->parent = amdgpu_bo_ref(cursor->parent->base.bo);
amdgpu_vm_bo_base_init(&entry->base, vm, pt);
- r = amdgpu_vm_clear_bo(adev, vm, pt);
+ r = amdgpu_vm_clear_bo(adev, vm, pt, direct);
if (r)
goto error_free_pt;
@@ -1020,7 +1038,8 @@ bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
* Returns:
* 0 on success, errno otherwise.
*/
-int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync)
+int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
+ bool need_pipe_sync)
{
struct amdgpu_device *adev = ring->adev;
unsigned vmhub = ring->funcs->vmhub;
@@ -1034,10 +1053,8 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_
id->oa_base != job->oa_base ||
id->oa_size != job->oa_size);
bool vm_flush_needed = job->vm_needs_flush;
- bool pasid_mapping_needed = id->pasid != job->pasid ||
- !id->pasid_mapping ||
- !dma_fence_is_signaled(id->pasid_mapping);
struct dma_fence *fence = NULL;
+ bool pasid_mapping_needed = false;
unsigned patch_offset = 0;
int r;
@@ -1047,6 +1064,12 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_
pasid_mapping_needed = true;
}
+ mutex_lock(&id_mgr->lock);
+ if (id->pasid != job->pasid || !id->pasid_mapping ||
+ !dma_fence_is_signaled(id->pasid_mapping))
+ pasid_mapping_needed = true;
+ mutex_unlock(&id_mgr->lock);
+
gds_switch_needed &= !!ring->funcs->emit_gds_switch;
vm_flush_needed &= !!ring->funcs->emit_vm_flush &&
job->vm_pd_addr != AMDGPU_BO_INVALID_OFFSET;
@@ -1086,9 +1109,11 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_
}
if (pasid_mapping_needed) {
+ mutex_lock(&id_mgr->lock);
id->pasid = job->pasid;
dma_fence_put(id->pasid_mapping);
id->pasid_mapping = dma_fence_get(fence);
+ mutex_unlock(&id_mgr->lock);
}
dma_fence_put(fence);
@@ -1172,10 +1197,10 @@ uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
return result;
}
-/*
+/**
* amdgpu_vm_update_pde - update a single level in the hierarchy
*
- * @param: parameters for the update
+ * @params: parameters for the update
* @vm: requested vm
* @entry: entry to update
*
@@ -1199,7 +1224,7 @@ static int amdgpu_vm_update_pde(struct amdgpu_vm_update_params *params,
return vm->update_funcs->update(params, bo, pde, pt, 1, 0, flags);
}
-/*
+/**
* amdgpu_vm_invalidate_pds - mark all PDs as invalid
*
* @adev: amdgpu_device pointer
@@ -1218,19 +1243,20 @@ static void amdgpu_vm_invalidate_pds(struct amdgpu_device *adev,
amdgpu_vm_bo_relocated(&entry->base);
}
-/*
- * amdgpu_vm_update_directories - make sure that all directories are valid
+/**
+ * amdgpu_vm_update_pdes - make sure that all directories are valid
*
* @adev: amdgpu_device pointer
* @vm: requested vm
+ * @direct: submit directly to the paging queue
*
* Makes sure all directories are up to date.
*
* Returns:
* 0 for success, error for failure.
*/
-int amdgpu_vm_update_directories(struct amdgpu_device *adev,
- struct amdgpu_vm *vm)
+int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm, bool direct)
{
struct amdgpu_vm_update_params params;
int r;
@@ -1241,6 +1267,7 @@ int amdgpu_vm_update_directories(struct amdgpu_device *adev,
memset(&params, 0, sizeof(params));
params.adev = adev;
params.vm = vm;
+ params.direct = direct;
r = vm->update_funcs->prepare(&params, AMDGPU_FENCE_OWNER_VM, NULL);
if (r)
@@ -1268,7 +1295,7 @@ error:
return r;
}
-/**
+/*
* amdgpu_vm_update_flags - figure out flags for PTE updates
*
* Make sure to set the right flags for the PTEs at the desired level.
@@ -1391,7 +1418,11 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
uint64_t incr, entry_end, pe_start;
struct amdgpu_bo *pt;
- r = amdgpu_vm_alloc_pts(params->adev, params->vm, &cursor);
+ /* make sure that the page tables covering the address range are
+ * actually allocated
+ */
+ r = amdgpu_vm_alloc_pts(params->adev, params->vm, &cursor,
+ params->direct);
if (r)
return r;
@@ -1463,7 +1494,12 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
} while (frag_start < entry_end);
if (amdgpu_vm_pt_descendant(adev, &cursor)) {
- /* Free all child entries */
+ /* Free all child entries.
+ * Update the tables with the flags and addresses and free up subsequent
+ * tables in the case of huge pages or freed up areas.
+ * This is the maximum you can free, because all other page tables are not
+ * completely covered by the range and so potentially still in use.
+ */
while (cursor.pfn < frag_start) {
amdgpu_vm_free_pts(adev, params->vm, &cursor);
amdgpu_vm_pt_next(adev, &cursor);
@@ -1482,13 +1518,14 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
* amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
*
* @adev: amdgpu_device pointer
- * @exclusive: fence we need to sync to
- * @pages_addr: DMA addresses to use for mapping
* @vm: requested vm
+ * @direct: direct submission in a page fault
+ * @exclusive: fence we need to sync to
* @start: start of mapped range
* @last: last mapped entry
* @flags: flags for the entries
* @addr: addr to set the area to
+ * @pages_addr: DMA addresses to use for mapping
* @fence: optional resulting fence
*
* Fill in the page table entries between @start and @last.
@@ -1497,11 +1534,11 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
* 0 for success, -EINVAL for failure.
*/
static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm, bool direct,
struct dma_fence *exclusive,
- dma_addr_t *pages_addr,
- struct amdgpu_vm *vm,
uint64_t start, uint64_t last,
uint64_t flags, uint64_t addr,
+ dma_addr_t *pages_addr,
struct dma_fence **fence)
{
struct amdgpu_vm_update_params params;
@@ -1511,6 +1548,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
memset(&params, 0, sizeof(params));
params.adev = adev;
params.vm = vm;
+ params.direct = direct;
params.pages_addr = pages_addr;
/* sync to everything except eviction fences on unmapping */
@@ -1569,27 +1607,8 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
if (!(mapping->flags & AMDGPU_PTE_WRITEABLE))
flags &= ~AMDGPU_PTE_WRITEABLE;
- flags &= ~AMDGPU_PTE_EXECUTABLE;
- flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
-
- if (adev->asic_type >= CHIP_NAVI10) {
- flags &= ~AMDGPU_PTE_MTYPE_NV10_MASK;
- flags |= (mapping->flags & AMDGPU_PTE_MTYPE_NV10_MASK);
- } else {
- flags &= ~AMDGPU_PTE_MTYPE_VG10_MASK;
- flags |= (mapping->flags & AMDGPU_PTE_MTYPE_VG10_MASK);
- }
-
- if ((mapping->flags & AMDGPU_PTE_PRT) &&
- (adev->asic_type >= CHIP_VEGA10)) {
- flags |= AMDGPU_PTE_PRT;
- if (adev->asic_type >= CHIP_NAVI10) {
- flags |= AMDGPU_PTE_SNOOPED;
- flags |= AMDGPU_PTE_LOG;
- flags |= AMDGPU_PTE_SYSTEM;
- }
- flags &= ~AMDGPU_PTE_VALID;
- }
+ /* Apply ASIC specific mapping flags */
+ amdgpu_gmc_get_vm_pte(adev, mapping, &flags);
trace_amdgpu_vm_bo_update(mapping);
@@ -1633,7 +1652,8 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
dma_addr = pages_addr;
} else {
addr = pages_addr[pfn];
- max_entries = count * AMDGPU_GPU_PAGES_IN_CPU_PAGE;
+ max_entries = count *
+ AMDGPU_GPU_PAGES_IN_CPU_PAGE;
}
} else if (flags & AMDGPU_PTE_VALID) {
@@ -1642,9 +1662,9 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
}
last = min((uint64_t)mapping->last, start + max_entries - 1);
- r = amdgpu_vm_bo_update_mapping(adev, exclusive, dma_addr, vm,
+ r = amdgpu_vm_bo_update_mapping(adev, vm, false, exclusive,
start, last, flags, addr,
- fence);
+ dma_addr, fence);
if (r)
return r;
@@ -1672,8 +1692,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
* Returns:
* 0 for success, -EINVAL for failure.
*/
-int amdgpu_vm_bo_update(struct amdgpu_device *adev,
- struct amdgpu_bo_va *bo_va,
+int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
bool clear)
{
struct amdgpu_bo *bo = bo_va->base.bo;
@@ -1700,7 +1719,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
ttm = container_of(bo->tbo.ttm, struct ttm_dma_tt, ttm);
pages_addr = ttm->dma_address;
}
- exclusive = dma_resv_get_excl(bo->tbo.base.resv);
+ exclusive = bo->tbo.moving;
}
if (bo) {
@@ -1731,12 +1750,6 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
return r;
}
- if (vm->use_cpu_for_update) {
- /* Flush HDP */
- mb();
- amdgpu_asic_flush_hdp(adev, NULL);
- }
-
/* If the BO is not in its preferred location add it back to
* the evicted list so that it gets validated again on the
* next command submission.
@@ -1744,7 +1757,8 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
if (bo && bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv) {
uint32_t mem_type = bo->tbo.mem.mem_type;
- if (!(bo->preferred_domains & amdgpu_mem_type_to_domain(mem_type)))
+ if (!(bo->preferred_domains &
+ amdgpu_mem_type_to_domain(mem_type)))
amdgpu_vm_bo_evicted(&bo_va->base);
else
amdgpu_vm_bo_idle(&bo_va->base);
@@ -1938,9 +1952,9 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
mapping->start < AMDGPU_GMC_HOLE_START)
init_pte_value = AMDGPU_PTE_DEFAULT_ATC;
- r = amdgpu_vm_bo_update_mapping(adev, NULL, NULL, vm,
+ r = amdgpu_vm_bo_update_mapping(adev, vm, false, NULL,
mapping->start, mapping->last,
- init_pte_value, 0, &f);
+ init_pte_value, 0, NULL, &f);
amdgpu_vm_free_mapping(adev, vm, mapping, f);
if (r) {
dma_fence_put(f);
@@ -2682,12 +2696,17 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
spin_lock_init(&vm->invalidated_lock);
INIT_LIST_HEAD(&vm->freed);
- /* create scheduler entity for page table updates */
- r = drm_sched_entity_init(&vm->entity, adev->vm_manager.vm_pte_rqs,
+ /* create scheduler entities for page table updates */
+ r = drm_sched_entity_init(&vm->direct, adev->vm_manager.vm_pte_rqs,
adev->vm_manager.vm_pte_num_rqs, NULL);
if (r)
return r;
+ r = drm_sched_entity_init(&vm->delayed, adev->vm_manager.vm_pte_rqs,
+ adev->vm_manager.vm_pte_num_rqs, NULL);
+ if (r)
+ goto error_free_direct;
+
vm->pte_support_ats = false;
if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE) {
@@ -2702,7 +2721,8 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
}
DRM_DEBUG_DRIVER("VM update mode is %s\n",
vm->use_cpu_for_update ? "CPU" : "SDMA");
- WARN_ONCE((vm->use_cpu_for_update && !amdgpu_gmc_vram_full_visible(&adev->gmc)),
+ WARN_ONCE((vm->use_cpu_for_update &&
+ !amdgpu_gmc_vram_full_visible(&adev->gmc)),
"CPU update of VM recommended only for large BAR system\n");
if (vm->use_cpu_for_update)
@@ -2711,12 +2731,12 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
vm->update_funcs = &amdgpu_vm_sdma_funcs;
vm->last_update = NULL;
- amdgpu_vm_bo_param(adev, vm, adev->vm_manager.root_level, &bp);
+ amdgpu_vm_bo_param(adev, vm, adev->vm_manager.root_level, false, &bp);
if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE)
bp.flags &= ~AMDGPU_GEM_CREATE_SHADOW;
r = amdgpu_bo_create(adev, &bp, &root);
if (r)
- goto error_free_sched_entity;
+ goto error_free_delayed;
r = amdgpu_bo_reserve(root, true);
if (r)
@@ -2728,7 +2748,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
amdgpu_vm_bo_base_init(&vm->root.base, vm, root);
- r = amdgpu_vm_clear_bo(adev, vm, root);
+ r = amdgpu_vm_clear_bo(adev, vm, root, false);
if (r)
goto error_unreserve;
@@ -2759,8 +2779,11 @@ error_free_root:
amdgpu_bo_unref(&vm->root.base.bo);
vm->root.base.bo = NULL;
-error_free_sched_entity:
- drm_sched_entity_destroy(&vm->entity);
+error_free_delayed:
+ drm_sched_entity_destroy(&vm->delayed);
+
+error_free_direct:
+ drm_sched_entity_destroy(&vm->direct);
return r;
}
@@ -2801,6 +2824,7 @@ static int amdgpu_vm_check_clean_reserved(struct amdgpu_device *adev,
*
* @adev: amdgpu_device pointer
* @vm: requested vm
+ * @pasid: pasid to use
*
* This only works on GFX VMs that don't have any BOs added and no
* page tables allocated yet.
@@ -2816,7 +2840,8 @@ static int amdgpu_vm_check_clean_reserved(struct amdgpu_device *adev,
* Returns:
* 0 for success, -errno for errors.
*/
-int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, unsigned int pasid)
+int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ unsigned int pasid)
{
bool pte_support_ats = (adev->asic_type == CHIP_RAVEN);
int r;
@@ -2848,7 +2873,7 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, uns
*/
if (pte_support_ats != vm->pte_support_ats) {
vm->pte_support_ats = pte_support_ats;
- r = amdgpu_vm_clear_bo(adev, vm, vm->root.base.bo);
+ r = amdgpu_vm_clear_bo(adev, vm, vm->root.base.bo, false);
if (r)
goto free_idr;
}
@@ -2858,7 +2883,8 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, uns
AMDGPU_VM_USE_CPU_FOR_COMPUTE);
DRM_DEBUG_DRIVER("VM update mode is %s\n",
vm->use_cpu_for_update ? "CPU" : "SDMA");
- WARN_ONCE((vm->use_cpu_for_update && !amdgpu_gmc_vram_full_visible(&adev->gmc)),
+ WARN_ONCE((vm->use_cpu_for_update &&
+ !amdgpu_gmc_vram_full_visible(&adev->gmc)),
"CPU update of VM recommended only for large BAR system\n");
if (vm->use_cpu_for_update)
@@ -2937,19 +2963,38 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
struct amdgpu_bo_va_mapping *mapping, *tmp;
bool prt_fini_needed = !!adev->gmc.gmc_funcs->set_prt;
struct amdgpu_bo *root;
- int i, r;
+ int i;
amdgpu_amdkfd_gpuvm_destroy_cb(adev, vm);
+ root = amdgpu_bo_ref(vm->root.base.bo);
+ amdgpu_bo_reserve(root, true);
if (vm->pasid) {
unsigned long flags;
spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
idr_remove(&adev->vm_manager.pasid_idr, vm->pasid);
spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
+ vm->pasid = 0;
+ }
+
+ list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
+ if (mapping->flags & AMDGPU_PTE_PRT && prt_fini_needed) {
+ amdgpu_vm_prt_fini(adev, vm);
+ prt_fini_needed = false;
+ }
+
+ list_del(&mapping->list);
+ amdgpu_vm_free_mapping(adev, vm, mapping, NULL);
}
- drm_sched_entity_destroy(&vm->entity);
+ amdgpu_vm_free_pts(adev, vm, NULL);
+ amdgpu_bo_unreserve(root);
+ amdgpu_bo_unref(&root);
+ WARN_ON(vm->root.base.bo);
+
+ drm_sched_entity_destroy(&vm->direct);
+ drm_sched_entity_destroy(&vm->delayed);
if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
dev_err(adev->dev, "still active bo inside vm\n");
@@ -2962,26 +3007,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
list_del(&mapping->list);
kfree(mapping);
}
- list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
- if (mapping->flags & AMDGPU_PTE_PRT && prt_fini_needed) {
- amdgpu_vm_prt_fini(adev, vm);
- prt_fini_needed = false;
- }
-
- list_del(&mapping->list);
- amdgpu_vm_free_mapping(adev, vm, mapping, NULL);
- }
- root = amdgpu_bo_ref(vm->root.base.bo);
- r = amdgpu_bo_reserve(root, true);
- if (r) {
- dev_err(adev->dev, "Leaking page tables because BO reservation failed\n");
- } else {
- amdgpu_vm_free_pts(adev, vm, NULL);
- amdgpu_bo_unreserve(root);
- }
- amdgpu_bo_unref(&root);
- WARN_ON(vm->root.base.bo);
dma_fence_put(vm->last_update);
for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
amdgpu_vmid_free_reserved(adev, vm, i);
@@ -3065,8 +3091,9 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
switch (args->in.op) {
case AMDGPU_VM_OP_RESERVE_VMID:
- /* current, we only have requirement to reserve vmid from gfxhub */
- r = amdgpu_vmid_alloc_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB_0);
+ /* We only have requirement to reserve vmid from gfxhub */
+ r = amdgpu_vmid_alloc_reserved(adev, &fpriv->vm,
+ AMDGPU_GFXHUB_0);
if (r)
return r;
break;
@@ -3109,13 +3136,88 @@ void amdgpu_vm_get_task_info(struct amdgpu_device *adev, unsigned int pasid,
*/
void amdgpu_vm_set_task_info(struct amdgpu_vm *vm)
{
- if (!vm->task_info.pid) {
- vm->task_info.pid = current->pid;
- get_task_comm(vm->task_info.task_name, current);
+ if (vm->task_info.pid)
+ return;
- if (current->group_leader->mm == current->mm) {
- vm->task_info.tgid = current->group_leader->pid;
- get_task_comm(vm->task_info.process_name, current->group_leader);
- }
+ vm->task_info.pid = current->pid;
+ get_task_comm(vm->task_info.task_name, current);
+
+ if (current->group_leader->mm != current->mm)
+ return;
+
+ vm->task_info.tgid = current->group_leader->pid;
+ get_task_comm(vm->task_info.process_name, current->group_leader);
+}
+
+/**
+ * amdgpu_vm_handle_fault - graceful handling of VM faults.
+ * @adev: amdgpu device pointer
+ * @pasid: PASID of the VM
+ * @addr: Address of the fault
+ *
+ * Try to gracefully handle a VM fault. Return true if the fault was handled and
+ * shouldn't be reported any more.
+ */
+bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, unsigned int pasid,
+ uint64_t addr)
+{
+ struct amdgpu_bo *root;
+ uint64_t value, flags;
+ struct amdgpu_vm *vm;
+ long r;
+
+ spin_lock(&adev->vm_manager.pasid_lock);
+ vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
+ if (vm)
+ root = amdgpu_bo_ref(vm->root.base.bo);
+ else
+ root = NULL;
+ spin_unlock(&adev->vm_manager.pasid_lock);
+
+ if (!root)
+ return false;
+
+ r = amdgpu_bo_reserve(root, true);
+ if (r)
+ goto error_unref;
+
+ /* Double check that the VM still exists */
+ spin_lock(&adev->vm_manager.pasid_lock);
+ vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
+ if (vm && vm->root.base.bo != root)
+ vm = NULL;
+ spin_unlock(&adev->vm_manager.pasid_lock);
+ if (!vm)
+ goto error_unlock;
+
+ addr /= AMDGPU_GPU_PAGE_SIZE;
+ flags = AMDGPU_PTE_VALID | AMDGPU_PTE_SNOOPED |
+ AMDGPU_PTE_SYSTEM;
+
+ if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_NEVER) {
+ /* Redirect the access to the dummy page */
+ value = adev->dummy_page_addr;
+ flags |= AMDGPU_PTE_EXECUTABLE | AMDGPU_PTE_READABLE |
+ AMDGPU_PTE_WRITEABLE;
+ } else {
+ /* Let the hw retry silently on the PTE */
+ value = 0;
}
+
+ r = amdgpu_vm_bo_update_mapping(adev, vm, true, NULL, addr, addr + 1,
+ flags, value, NULL, NULL);
+ if (r)
+ goto error_unlock;
+
+ r = amdgpu_vm_update_pdes(adev, vm, true);
+
+error_unlock:
+ amdgpu_bo_unreserve(root);
+ if (r < 0)
+ DRM_ERROR("Can't handle page fault (%ld)\n", r);
+
+error_unref:
+ amdgpu_bo_unref(&root);
+
+ return false;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index 2eda3a8c330d..4dbbe1b6b413 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -99,6 +99,9 @@ struct amdgpu_bo_list_entry;
#define AMDGPU_VM_FAULT_STOP_FIRST 1
#define AMDGPU_VM_FAULT_STOP_ALWAYS 2
+/* Reserve 4MB VRAM for page tables */
+#define AMDGPU_VM_RESERVED_VRAM (4ULL << 20)
+
/* max number of VMHUB */
#define AMDGPU_MAX_VMHUBS 3
#define AMDGPU_GFXHUB_0 0
@@ -199,6 +202,11 @@ struct amdgpu_vm_update_params {
struct amdgpu_vm *vm;
/**
+ * @direct: if changes should be made directly
+ */
+ bool direct;
+
+ /**
* @pages_addr:
*
* DMA addresses to use for mapping
@@ -254,8 +262,9 @@ struct amdgpu_vm {
struct amdgpu_vm_pt root;
struct dma_fence *last_update;
- /* Scheduler entity for page table updates */
- struct drm_sched_entity entity;
+ /* Scheduler entities for page table updates */
+ struct drm_sched_entity direct;
+ struct drm_sched_entity delayed;
unsigned int pasid;
/* dedicated to vm */
@@ -357,8 +366,8 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
int (*callback)(void *p, struct amdgpu_bo *bo),
void *param);
int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync);
-int amdgpu_vm_update_directories(struct amdgpu_device *adev,
- struct amdgpu_vm *vm);
+int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm, bool direct);
int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
struct dma_fence **fence);
@@ -404,6 +413,8 @@ void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev);
void amdgpu_vm_get_task_info(struct amdgpu_device *adev, unsigned int pasid,
struct amdgpu_task_info *task_info);
+bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, unsigned int pasid,
+ uint64_t addr);
void amdgpu_vm_set_task_info(struct amdgpu_vm *vm);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
index 5222d165abfc..73fec7a0ced5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
@@ -49,13 +49,6 @@ static int amdgpu_vm_cpu_prepare(struct amdgpu_vm_update_params *p, void *owner,
{
int r;
- /* Wait for PT BOs to be idle. PTs share the same resv. object
- * as the root PD BO
- */
- r = amdgpu_bo_sync_wait(p->vm->root.base.bo, owner, true);
- if (unlikely(r))
- return r;
-
/* Wait for any BO move to be completed */
if (exclusive) {
r = dma_fence_wait(exclusive, true);
@@ -63,7 +56,14 @@ static int amdgpu_vm_cpu_prepare(struct amdgpu_vm_update_params *p, void *owner,
return r;
}
- return 0;
+ /* Don't wait for submissions during page fault */
+ if (p->direct)
+ return 0;
+
+ /* Wait for PT BOs to be idle. PTs share the same resv. object
+ * as the root PD BO
+ */
+ return amdgpu_bo_sync_wait(p->vm->root.base.bo, owner, true);
}
/**
@@ -89,7 +89,7 @@ static int amdgpu_vm_cpu_update(struct amdgpu_vm_update_params *p,
pe += (unsigned long)amdgpu_bo_kptr(bo);
- trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags);
+ trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags, p->direct);
for (i = 0; i < count; i++) {
value = p->pages_addr ?
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
index 61fc584cbb1a..832db59f441e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
@@ -68,17 +68,19 @@ static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p,
if (r)
return r;
+ p->num_dw_left = ndw;
+
+ /* Wait for moves to be completed */
r = amdgpu_sync_fence(p->adev, &p->job->sync, exclusive, false);
if (r)
return r;
- r = amdgpu_sync_resv(p->adev, &p->job->sync, root->tbo.base.resv,
- owner, false);
- if (r)
- return r;
+ /* Don't wait for any submissions during page fault handling */
+ if (p->direct)
+ return 0;
- p->num_dw_left = ndw;
- return 0;
+ return amdgpu_sync_resv(p->adev, &p->job->sync, root->tbo.base.resv,
+ owner, false);
}
/**
@@ -95,22 +97,23 @@ static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p,
{
struct amdgpu_bo *root = p->vm->root.base.bo;
struct amdgpu_ib *ib = p->job->ibs;
+ struct drm_sched_entity *entity;
struct amdgpu_ring *ring;
struct dma_fence *f;
int r;
- ring = container_of(p->vm->entity.rq->sched, struct amdgpu_ring, sched);
+ entity = p->direct ? &p->vm->direct : &p->vm->delayed;
+ ring = container_of(entity->rq->sched, struct amdgpu_ring, sched);
WARN_ON(ib->length_dw == 0);
amdgpu_ring_pad_ib(ring, ib);
WARN_ON(ib->length_dw > p->num_dw_left);
- r = amdgpu_job_submit(p->job, &p->vm->entity,
- AMDGPU_FENCE_OWNER_VM, &f);
+ r = amdgpu_job_submit(p->job, entity, AMDGPU_FENCE_OWNER_VM, &f);
if (r)
goto error;
amdgpu_bo_fence(root, f, true);
- if (fence)
+ if (fence && !p->direct)
swap(*fence, f);
dma_fence_put(f);
return 0;
@@ -120,7 +123,6 @@ error:
return r;
}
-
/**
* amdgpu_vm_sdma_copy_ptes - copy the PTEs from mapping
*
@@ -141,7 +143,7 @@ static void amdgpu_vm_sdma_copy_ptes(struct amdgpu_vm_update_params *p,
src += p->num_dw_left * 4;
pe += amdgpu_bo_gpu_offset(bo);
- trace_amdgpu_vm_copy_ptes(pe, src, count);
+ trace_amdgpu_vm_copy_ptes(pe, src, count, p->direct);
amdgpu_vm_copy_pte(p->adev, ib, pe, src, count);
}
@@ -168,7 +170,7 @@ static void amdgpu_vm_sdma_set_ptes(struct amdgpu_vm_update_params *p,
struct amdgpu_ib *ib = p->job->ibs;
pe += amdgpu_bo_gpu_offset(bo);
- trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags);
+ trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags, p->direct);
if (count < 3) {
amdgpu_vm_write_pte(p->adev, ib, pe, addr | flags,
count, incr);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index 3a9d8c15fe9f..82a3299e53c0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -23,6 +23,9 @@
*/
#include "amdgpu.h"
+#include "amdgpu_vm.h"
+#include "amdgpu_atomfirmware.h"
+#include "atom.h"
struct amdgpu_vram_mgr {
struct drm_mm mm;
@@ -101,6 +104,39 @@ static ssize_t amdgpu_mem_info_vis_vram_used_show(struct device *dev,
amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]));
}
+static ssize_t amdgpu_mem_info_vram_vendor(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = ddev->dev_private;
+
+ switch (adev->gmc.vram_vendor) {
+ case SAMSUNG:
+ return snprintf(buf, PAGE_SIZE, "samsung\n");
+ case INFINEON:
+ return snprintf(buf, PAGE_SIZE, "infineon\n");
+ case ELPIDA:
+ return snprintf(buf, PAGE_SIZE, "elpida\n");
+ case ETRON:
+ return snprintf(buf, PAGE_SIZE, "etron\n");
+ case NANYA:
+ return snprintf(buf, PAGE_SIZE, "nanya\n");
+ case HYNIX:
+ return snprintf(buf, PAGE_SIZE, "hynix\n");
+ case MOSEL:
+ return snprintf(buf, PAGE_SIZE, "mosel\n");
+ case WINBOND:
+ return snprintf(buf, PAGE_SIZE, "winbond\n");
+ case ESMT:
+ return snprintf(buf, PAGE_SIZE, "esmt\n");
+ case MICRON:
+ return snprintf(buf, PAGE_SIZE, "micron\n");
+ default:
+ return snprintf(buf, PAGE_SIZE, "unknown\n");
+ }
+}
+
static DEVICE_ATTR(mem_info_vram_total, S_IRUGO,
amdgpu_mem_info_vram_total_show, NULL);
static DEVICE_ATTR(mem_info_vis_vram_total, S_IRUGO,
@@ -109,6 +145,8 @@ static DEVICE_ATTR(mem_info_vram_used, S_IRUGO,
amdgpu_mem_info_vram_used_show, NULL);
static DEVICE_ATTR(mem_info_vis_vram_used, S_IRUGO,
amdgpu_mem_info_vis_vram_used_show, NULL);
+static DEVICE_ATTR(mem_info_vram_vendor, S_IRUGO,
+ amdgpu_mem_info_vram_vendor, NULL);
/**
* amdgpu_vram_mgr_init - init VRAM manager and DRM MM
@@ -154,6 +192,11 @@ static int amdgpu_vram_mgr_init(struct ttm_mem_type_manager *man,
DRM_ERROR("Failed to create device file mem_info_vis_vram_used\n");
return ret;
}
+ ret = device_create_file(adev->dev, &dev_attr_mem_info_vram_vendor);
+ if (ret) {
+ DRM_ERROR("Failed to create device file mem_info_vram_vendor\n");
+ return ret;
+ }
return 0;
}
@@ -180,6 +223,7 @@ static int amdgpu_vram_mgr_fini(struct ttm_mem_type_manager *man)
device_remove_file(adev->dev, &dev_attr_mem_info_vis_vram_total);
device_remove_file(adev->dev, &dev_attr_mem_info_vram_used);
device_remove_file(adev->dev, &dev_attr_mem_info_vis_vram_used);
+ device_remove_file(adev->dev, &dev_attr_mem_info_vram_vendor);
return 0;
}
@@ -275,7 +319,7 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
struct drm_mm_node *nodes;
enum drm_mm_insert_mode mode;
unsigned long lpfn, num_nodes, pages_per_node, pages_left;
- uint64_t vis_usage = 0, mem_bytes;
+ uint64_t vis_usage = 0, mem_bytes, max_bytes;
unsigned i;
int r;
@@ -283,9 +327,13 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
if (!lpfn)
lpfn = man->size;
+ max_bytes = adev->gmc.mc_vram_size;
+ if (tbo->type != ttm_bo_type_kernel)
+ max_bytes -= AMDGPU_VM_RESERVED_VRAM;
+
/* bail out quickly if there's likely not enough VRAM for this BO */
mem_bytes = (u64)mem->num_pages << PAGE_SHIFT;
- if (atomic64_add_return(mem_bytes, &mgr->usage) > adev->gmc.mc_vram_size) {
+ if (atomic64_add_return(mem_bytes, &mgr->usage) > max_bytes) {
atomic64_sub(mem_bytes, &mgr->usage);
mem->mm_node = NULL;
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
index 65aae75f80fd..61d13d8b7b20 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
@@ -25,6 +25,7 @@
#include "amdgpu.h"
#include "amdgpu_xgmi.h"
#include "amdgpu_smu.h"
+#include "amdgpu_ras.h"
#include "df/df_3_6_offset.h"
static DEFINE_MUTEX(xgmi_mutex);
@@ -273,22 +274,55 @@ int amdgpu_xgmi_set_pstate(struct amdgpu_device *adev, int pstate)
{
int ret = 0;
struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev, 0);
+ struct amdgpu_device *tmp_adev;
+ bool update_hive_pstate = true;
+ bool is_high_pstate = pstate && adev->asic_type == CHIP_VEGA20;
if (!hive)
return 0;
- if (hive->pstate == pstate)
- return 0;
+ mutex_lock(&hive->hive_lock);
+
+ if (hive->pstate == pstate) {
+ adev->pstate = is_high_pstate ? pstate : adev->pstate;
+ goto out;
+ }
dev_dbg(adev->dev, "Set xgmi pstate %d.\n", pstate);
if (is_support_sw_smu_xgmi(adev))
ret = smu_set_xgmi_pstate(&adev->smu, pstate);
- if (ret)
+ else if (adev->powerplay.pp_funcs &&
+ adev->powerplay.pp_funcs->set_xgmi_pstate)
+ ret = adev->powerplay.pp_funcs->set_xgmi_pstate(adev->powerplay.pp_handle,
+ pstate);
+
+ if (ret) {
dev_err(adev->dev,
"XGMI: Set pstate failure on device %llx, hive %llx, ret %d",
adev->gmc.xgmi.node_id,
adev->gmc.xgmi.hive_id, ret);
+ goto out;
+ }
+
+ /* Update device pstate */
+ adev->pstate = pstate;
+
+ /*
+ * Update the hive pstate only all devices of the hive
+ * are in the same pstate
+ */
+ list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
+ if (tmp_adev->pstate != adev->pstate) {
+ update_hive_pstate = false;
+ break;
+ }
+ }
+ if (update_hive_pstate || is_high_pstate)
+ hive->pstate = pstate;
+
+out:
+ mutex_unlock(&hive->hive_lock);
return ret;
}
@@ -363,6 +397,9 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
goto exit;
}
+ /* Set default device pstate */
+ adev->pstate = -1;
+
top_info = &adev->psp.xgmi_context.top_info;
list_add_tail(&adev->gmc.xgmi.head, &hive->device_list);
@@ -437,3 +474,52 @@ void amdgpu_xgmi_remove_device(struct amdgpu_device *adev)
mutex_unlock(&hive->hive_lock);
}
}
+
+int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev)
+{
+ int r;
+ struct ras_ih_if ih_info = {
+ .cb = NULL,
+ };
+ struct ras_fs_if fs_info = {
+ .sysfs_name = "xgmi_wafl_err_count",
+ .debugfs_name = "xgmi_wafl_err_inject",
+ };
+
+ if (!adev->gmc.xgmi.supported ||
+ adev->gmc.xgmi.num_physical_nodes == 0)
+ return 0;
+
+ if (!adev->gmc.xgmi.ras_if) {
+ adev->gmc.xgmi.ras_if = kmalloc(sizeof(struct ras_common_if), GFP_KERNEL);
+ if (!adev->gmc.xgmi.ras_if)
+ return -ENOMEM;
+ adev->gmc.xgmi.ras_if->block = AMDGPU_RAS_BLOCK__XGMI_WAFL;
+ adev->gmc.xgmi.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
+ adev->gmc.xgmi.ras_if->sub_block_index = 0;
+ strcpy(adev->gmc.xgmi.ras_if->name, "xgmi_wafl");
+ }
+ ih_info.head = fs_info.head = *adev->gmc.xgmi.ras_if;
+ r = amdgpu_ras_late_init(adev, adev->gmc.xgmi.ras_if,
+ &fs_info, &ih_info);
+ if (r || !amdgpu_ras_is_supported(adev, adev->gmc.xgmi.ras_if->block)) {
+ kfree(adev->gmc.xgmi.ras_if);
+ adev->gmc.xgmi.ras_if = NULL;
+ }
+
+ return r;
+}
+
+void amdgpu_xgmi_ras_fini(struct amdgpu_device *adev)
+{
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__XGMI_WAFL) &&
+ adev->gmc.xgmi.ras_if) {
+ struct ras_common_if *ras_if = adev->gmc.xgmi.ras_if;
+ struct ras_ih_if ih_info = {
+ .cb = NULL,
+ };
+
+ amdgpu_ras_late_fini(adev, ras_if, &ih_info);
+ kfree(ras_if);
+ }
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
index fbcee31788c4..bbf504ff7051 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
@@ -42,6 +42,8 @@ void amdgpu_xgmi_remove_device(struct amdgpu_device *adev);
int amdgpu_xgmi_set_pstate(struct amdgpu_device *adev, int pstate);
int amdgpu_xgmi_get_hops_count(struct amdgpu_device *adev,
struct amdgpu_device *peer_adev);
+int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev);
+void amdgpu_xgmi_ras_fini(struct amdgpu_device *adev);
static inline bool amdgpu_xgmi_same_hive(struct amdgpu_device *adev,
struct amdgpu_device *bo_adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/arct_reg_init.c b/drivers/gpu/drm/amd/amdgpu/arct_reg_init.c
index 4853899b1824..fda99c958c3b 100644
--- a/drivers/gpu/drm/amd/amdgpu/arct_reg_init.c
+++ b/drivers/gpu/drm/amd/amdgpu/arct_reg_init.c
@@ -24,7 +24,6 @@
#include "soc15.h"
#include "soc15_common.h"
-#include "soc15_hw_ip.h"
#include "arct_ip_offset.h"
int arct_reg_base_init(struct amdgpu_device *adev)
@@ -52,6 +51,8 @@ int arct_reg_base_init(struct amdgpu_device *adev)
adev->reg_offset[SDMA7_HWIP][i] = (uint32_t *)(&(SDMA7_BASE.instance[i]));
adev->reg_offset[SMUIO_HWIP][i] = (uint32_t *)(&(SMUIO_BASE.instance[i]));
adev->reg_offset[THM_HWIP][i] = (uint32_t *)(&(THM_BASE.instance[i]));
+ adev->reg_offset[UMC_HWIP][i] = (uint32_t *)(&(UMC_BASE.instance[i]));
+ adev->reg_offset[RSMU_HWIP][i] = (uint32_t *)(&(RSMU_BASE.instance[i]));
}
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index b81bb414fcb3..7a43993544c1 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -966,6 +966,25 @@ static bool cik_read_bios_from_rom(struct amdgpu_device *adev,
static const struct amdgpu_allowed_register_entry cik_allowed_read_registers[] = {
{mmGRBM_STATUS},
+ {mmGRBM_STATUS2},
+ {mmGRBM_STATUS_SE0},
+ {mmGRBM_STATUS_SE1},
+ {mmGRBM_STATUS_SE2},
+ {mmGRBM_STATUS_SE3},
+ {mmSRBM_STATUS},
+ {mmSRBM_STATUS2},
+ {mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET},
+ {mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET},
+ {mmCP_STAT},
+ {mmCP_STALLED_STAT1},
+ {mmCP_STALLED_STAT2},
+ {mmCP_STALLED_STAT3},
+ {mmCP_CPF_BUSY_STAT},
+ {mmCP_CPF_STALLED_STAT1},
+ {mmCP_CPF_STATUS},
+ {mmCP_CPC_BUSY_STAT},
+ {mmCP_CPC_STALLED_STAT1},
+ {mmCP_CPC_STATUS},
{mmGB_ADDR_CONFIG},
{mmMC_ARB_RAMCFG},
{mmGB_TILE_MODE0},
@@ -1270,15 +1289,15 @@ static int cik_gpu_pci_config_reset(struct amdgpu_device *adev)
}
/**
- * cik_asic_reset - soft reset GPU
+ * cik_asic_pci_config_reset - soft reset GPU
*
* @adev: amdgpu_device pointer
*
- * Look up which blocks are hung and attempt
- * to reset them.
+ * Use PCI Config method to reset the GPU.
+ *
* Returns 0 for success.
*/
-static int cik_asic_reset(struct amdgpu_device *adev)
+static int cik_asic_pci_config_reset(struct amdgpu_device *adev)
{
int r;
@@ -1294,7 +1313,45 @@ static int cik_asic_reset(struct amdgpu_device *adev)
static enum amd_reset_method
cik_asic_reset_method(struct amdgpu_device *adev)
{
- return AMD_RESET_METHOD_LEGACY;
+ bool baco_reset;
+
+ switch (adev->asic_type) {
+ case CHIP_BONAIRE:
+ case CHIP_HAWAII:
+ /* disable baco reset until it works */
+ /* smu7_asic_get_baco_capability(adev, &baco_reset); */
+ baco_reset = false;
+ break;
+ default:
+ baco_reset = false;
+ break;
+ }
+
+ if (baco_reset)
+ return AMD_RESET_METHOD_BACO;
+ else
+ return AMD_RESET_METHOD_LEGACY;
+}
+
+/**
+ * cik_asic_reset - soft reset GPU
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Look up which blocks are hung and attempt
+ * to reset them.
+ * Returns 0 for success.
+ */
+static int cik_asic_reset(struct amdgpu_device *adev)
+{
+ int r;
+
+ if (cik_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)
+ r = smu7_asic_baco_reset(adev);
+ else
+ r = cik_asic_pci_config_reset(adev);
+
+ return r;
}
static u32 cik_get_config_memsize(struct amdgpu_device *adev)
@@ -1384,7 +1441,6 @@ static int cik_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
static void cik_pcie_gen3_enable(struct amdgpu_device *adev)
{
struct pci_dev *root = adev->pdev->bus->self;
- int bridge_pos, gpu_pos;
u32 speed_cntl, current_data_rate;
int i;
u16 tmp16;
@@ -1419,12 +1475,7 @@ static void cik_pcie_gen3_enable(struct amdgpu_device *adev)
DRM_INFO("enabling PCIE gen 2 link speeds, disable with amdgpu.pcie_gen2=0\n");
}
- bridge_pos = pci_pcie_cap(root);
- if (!bridge_pos)
- return;
-
- gpu_pos = pci_pcie_cap(adev->pdev);
- if (!gpu_pos)
+ if (!pci_is_pcie(root) || !pci_is_pcie(adev->pdev))
return;
if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) {
@@ -1434,14 +1485,17 @@ static void cik_pcie_gen3_enable(struct amdgpu_device *adev)
u16 bridge_cfg2, gpu_cfg2;
u32 max_lw, current_lw, tmp;
- pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
- pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
+ pcie_capability_read_word(root, PCI_EXP_LNKCTL,
+ &bridge_cfg);
+ pcie_capability_read_word(adev->pdev, PCI_EXP_LNKCTL,
+ &gpu_cfg);
tmp16 = bridge_cfg | PCI_EXP_LNKCTL_HAWD;
- pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
+ pcie_capability_write_word(root, PCI_EXP_LNKCTL, tmp16);
tmp16 = gpu_cfg | PCI_EXP_LNKCTL_HAWD;
- pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
+ pcie_capability_write_word(adev->pdev, PCI_EXP_LNKCTL,
+ tmp16);
tmp = RREG32_PCIE(ixPCIE_LC_STATUS1);
max_lw = (tmp & PCIE_LC_STATUS1__LC_DETECTED_LINK_WIDTH_MASK) >>
@@ -1465,15 +1519,23 @@ static void cik_pcie_gen3_enable(struct amdgpu_device *adev)
for (i = 0; i < 10; i++) {
/* check status */
- pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_DEVSTA, &tmp16);
+ pcie_capability_read_word(adev->pdev,
+ PCI_EXP_DEVSTA,
+ &tmp16);
if (tmp16 & PCI_EXP_DEVSTA_TRPND)
break;
- pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
- pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
+ pcie_capability_read_word(root, PCI_EXP_LNKCTL,
+ &bridge_cfg);
+ pcie_capability_read_word(adev->pdev,
+ PCI_EXP_LNKCTL,
+ &gpu_cfg);
- pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &bridge_cfg2);
- pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &gpu_cfg2);
+ pcie_capability_read_word(root, PCI_EXP_LNKCTL2,
+ &bridge_cfg2);
+ pcie_capability_read_word(adev->pdev,
+ PCI_EXP_LNKCTL2,
+ &gpu_cfg2);
tmp = RREG32_PCIE(ixPCIE_LC_CNTL4);
tmp |= PCIE_LC_CNTL4__LC_SET_QUIESCE_MASK;
@@ -1486,26 +1548,45 @@ static void cik_pcie_gen3_enable(struct amdgpu_device *adev)
msleep(100);
/* linkctl */
- pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &tmp16);
+ pcie_capability_read_word(root, PCI_EXP_LNKCTL,
+ &tmp16);
tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
tmp16 |= (bridge_cfg & PCI_EXP_LNKCTL_HAWD);
- pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
+ pcie_capability_write_word(root, PCI_EXP_LNKCTL,
+ tmp16);
- pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, &tmp16);
+ pcie_capability_read_word(adev->pdev,
+ PCI_EXP_LNKCTL,
+ &tmp16);
tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
tmp16 |= (gpu_cfg & PCI_EXP_LNKCTL_HAWD);
- pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
+ pcie_capability_write_word(adev->pdev,
+ PCI_EXP_LNKCTL,
+ tmp16);
/* linkctl2 */
- pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &tmp16);
- tmp16 &= ~((1 << 4) | (7 << 9));
- tmp16 |= (bridge_cfg2 & ((1 << 4) | (7 << 9)));
- pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, tmp16);
-
- pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
- tmp16 &= ~((1 << 4) | (7 << 9));
- tmp16 |= (gpu_cfg2 & ((1 << 4) | (7 << 9)));
- pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
+ pcie_capability_read_word(root, PCI_EXP_LNKCTL2,
+ &tmp16);
+ tmp16 &= ~(PCI_EXP_LNKCTL2_ENTER_COMP |
+ PCI_EXP_LNKCTL2_TX_MARGIN);
+ tmp16 |= (bridge_cfg2 &
+ (PCI_EXP_LNKCTL2_ENTER_COMP |
+ PCI_EXP_LNKCTL2_TX_MARGIN));
+ pcie_capability_write_word(root,
+ PCI_EXP_LNKCTL2,
+ tmp16);
+
+ pcie_capability_read_word(adev->pdev,
+ PCI_EXP_LNKCTL2,
+ &tmp16);
+ tmp16 &= ~(PCI_EXP_LNKCTL2_ENTER_COMP |
+ PCI_EXP_LNKCTL2_TX_MARGIN);
+ tmp16 |= (gpu_cfg2 &
+ (PCI_EXP_LNKCTL2_ENTER_COMP |
+ PCI_EXP_LNKCTL2_TX_MARGIN));
+ pcie_capability_write_word(adev->pdev,
+ PCI_EXP_LNKCTL2,
+ tmp16);
tmp = RREG32_PCIE(ixPCIE_LC_CNTL4);
tmp &= ~PCIE_LC_CNTL4__LC_SET_QUIESCE_MASK;
@@ -1520,15 +1601,16 @@ static void cik_pcie_gen3_enable(struct amdgpu_device *adev)
speed_cntl &= ~PCIE_LC_SPEED_CNTL__LC_FORCE_DIS_SW_SPEED_CHANGE_MASK;
WREG32_PCIE(ixPCIE_LC_SPEED_CNTL, speed_cntl);
- pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
- tmp16 &= ~0xf;
+ pcie_capability_read_word(adev->pdev, PCI_EXP_LNKCTL2, &tmp16);
+ tmp16 &= ~PCI_EXP_LNKCTL2_TLS;
+
if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
- tmp16 |= 3; /* gen3 */
+ tmp16 |= PCI_EXP_LNKCTL2_TLS_8_0GT; /* gen3 */
else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
- tmp16 |= 2; /* gen2 */
+ tmp16 |= PCI_EXP_LNKCTL2_TLS_5_0GT; /* gen2 */
else
- tmp16 |= 1; /* gen1 */
- pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
+ tmp16 |= PCI_EXP_LNKCTL2_TLS_2_5GT; /* gen1 */
+ pcie_capability_write_word(adev->pdev, PCI_EXP_LNKCTL2, tmp16);
speed_cntl = RREG32_PCIE(ixPCIE_LC_SPEED_CNTL);
speed_cntl |= PCIE_LC_SPEED_CNTL__LC_INITIATE_LINK_SPEED_CHANGE_MASK;
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.h b/drivers/gpu/drm/amd/amdgpu/cik.h
index 54c625a2e570..9870bf27870e 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.h
+++ b/drivers/gpu/drm/amd/amdgpu/cik.h
@@ -31,4 +31,7 @@ void cik_srbm_select(struct amdgpu_device *adev,
int cik_set_ip_blocks(struct amdgpu_device *adev);
void legacy_doorbell_index_init(struct amdgpu_device *adev);
+int smu7_asic_get_baco_capability(struct amdgpu_device *adev, bool *cap);
+int smu7_asic_baco_reset(struct amdgpu_device *adev);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 645550e7caf5..40d2ac723dd6 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -330,9 +330,11 @@ static void dce_v10_0_hpd_init(struct amdgpu_device *adev)
{
struct drm_device *dev = adev->ddev;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
u32 tmp;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
@@ -368,6 +370,7 @@ static void dce_v10_0_hpd_init(struct amdgpu_device *adev)
amdgpu_irq_get(adev, &adev->hpd_irq,
amdgpu_connector->hpd.hpd);
}
+ drm_connector_list_iter_end(&iter);
}
/**
@@ -382,9 +385,11 @@ static void dce_v10_0_hpd_fini(struct amdgpu_device *adev)
{
struct drm_device *dev = adev->ddev;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
u32 tmp;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
@@ -397,6 +402,7 @@ static void dce_v10_0_hpd_fini(struct amdgpu_device *adev)
amdgpu_irq_put(adev, &adev->hpd_irq,
amdgpu_connector->hpd.hpd);
}
+ drm_connector_list_iter_end(&iter);
}
static u32 dce_v10_0_hpd_get_gpio_reg(struct amdgpu_device *adev)
@@ -1219,10 +1225,12 @@ static void dce_v10_0_afmt_audio_select_pin(struct drm_encoder *encoder)
static void dce_v10_0_audio_write_latency_fields(struct drm_encoder *encoder,
struct drm_display_mode *mode)
{
- struct amdgpu_device *adev = encoder->dev->dev_private;
+ struct drm_device *dev = encoder->dev;
+ struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
struct amdgpu_connector *amdgpu_connector = NULL;
u32 tmp;
int interlace = 0;
@@ -1230,12 +1238,14 @@ static void dce_v10_0_audio_write_latency_fields(struct drm_encoder *encoder,
if (!dig || !dig->afmt || !dig->afmt->pin)
return;
- list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
if (connector->encoder == encoder) {
amdgpu_connector = to_amdgpu_connector(connector);
break;
}
}
+ drm_connector_list_iter_end(&iter);
if (!amdgpu_connector) {
DRM_ERROR("Couldn't find encoder's connector\n");
@@ -1261,10 +1271,12 @@ static void dce_v10_0_audio_write_latency_fields(struct drm_encoder *encoder,
static void dce_v10_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
{
- struct amdgpu_device *adev = encoder->dev->dev_private;
+ struct drm_device *dev = encoder->dev;
+ struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
struct amdgpu_connector *amdgpu_connector = NULL;
u32 tmp;
u8 *sadb = NULL;
@@ -1273,12 +1285,14 @@ static void dce_v10_0_audio_write_speaker_allocation(struct drm_encoder *encoder
if (!dig || !dig->afmt || !dig->afmt->pin)
return;
- list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
if (connector->encoder == encoder) {
amdgpu_connector = to_amdgpu_connector(connector);
break;
}
}
+ drm_connector_list_iter_end(&iter);
if (!amdgpu_connector) {
DRM_ERROR("Couldn't find encoder's connector\n");
@@ -1313,10 +1327,12 @@ static void dce_v10_0_audio_write_speaker_allocation(struct drm_encoder *encoder
static void dce_v10_0_audio_write_sad_regs(struct drm_encoder *encoder)
{
- struct amdgpu_device *adev = encoder->dev->dev_private;
+ struct drm_device *dev = encoder->dev;
+ struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
struct amdgpu_connector *amdgpu_connector = NULL;
struct cea_sad *sads;
int i, sad_count;
@@ -1339,12 +1355,14 @@ static void dce_v10_0_audio_write_sad_regs(struct drm_encoder *encoder)
if (!dig || !dig->afmt || !dig->afmt->pin)
return;
- list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
if (connector->encoder == encoder) {
amdgpu_connector = to_amdgpu_connector(connector);
break;
}
}
+ drm_connector_list_iter_end(&iter);
if (!amdgpu_connector) {
DRM_ERROR("Couldn't find encoder's connector\n");
@@ -1352,10 +1370,10 @@ static void dce_v10_0_audio_write_sad_regs(struct drm_encoder *encoder)
}
sad_count = drm_edid_to_sad(amdgpu_connector_edid(connector), &sads);
- if (sad_count <= 0) {
+ if (sad_count < 0)
DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
+ if (sad_count <= 0)
return;
- }
BUG_ON(!sads);
for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index d9f470632b2c..898ef72d423c 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -348,9 +348,11 @@ static void dce_v11_0_hpd_init(struct amdgpu_device *adev)
{
struct drm_device *dev = adev->ddev;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
u32 tmp;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
@@ -385,6 +387,7 @@ static void dce_v11_0_hpd_init(struct amdgpu_device *adev)
dce_v11_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
}
+ drm_connector_list_iter_end(&iter);
}
/**
@@ -399,9 +402,11 @@ static void dce_v11_0_hpd_fini(struct amdgpu_device *adev)
{
struct drm_device *dev = adev->ddev;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
u32 tmp;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
@@ -413,6 +418,7 @@ static void dce_v11_0_hpd_fini(struct amdgpu_device *adev)
amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
}
+ drm_connector_list_iter_end(&iter);
}
static u32 dce_v11_0_hpd_get_gpio_reg(struct amdgpu_device *adev)
@@ -1245,10 +1251,12 @@ static void dce_v11_0_afmt_audio_select_pin(struct drm_encoder *encoder)
static void dce_v11_0_audio_write_latency_fields(struct drm_encoder *encoder,
struct drm_display_mode *mode)
{
- struct amdgpu_device *adev = encoder->dev->dev_private;
+ struct drm_device *dev = encoder->dev;
+ struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
struct amdgpu_connector *amdgpu_connector = NULL;
u32 tmp;
int interlace = 0;
@@ -1256,12 +1264,14 @@ static void dce_v11_0_audio_write_latency_fields(struct drm_encoder *encoder,
if (!dig || !dig->afmt || !dig->afmt->pin)
return;
- list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
if (connector->encoder == encoder) {
amdgpu_connector = to_amdgpu_connector(connector);
break;
}
}
+ drm_connector_list_iter_end(&iter);
if (!amdgpu_connector) {
DRM_ERROR("Couldn't find encoder's connector\n");
@@ -1287,10 +1297,12 @@ static void dce_v11_0_audio_write_latency_fields(struct drm_encoder *encoder,
static void dce_v11_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
{
- struct amdgpu_device *adev = encoder->dev->dev_private;
+ struct drm_device *dev = encoder->dev;
+ struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
struct amdgpu_connector *amdgpu_connector = NULL;
u32 tmp;
u8 *sadb = NULL;
@@ -1299,12 +1311,14 @@ static void dce_v11_0_audio_write_speaker_allocation(struct drm_encoder *encoder
if (!dig || !dig->afmt || !dig->afmt->pin)
return;
- list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
if (connector->encoder == encoder) {
amdgpu_connector = to_amdgpu_connector(connector);
break;
}
}
+ drm_connector_list_iter_end(&iter);
if (!amdgpu_connector) {
DRM_ERROR("Couldn't find encoder's connector\n");
@@ -1339,10 +1353,12 @@ static void dce_v11_0_audio_write_speaker_allocation(struct drm_encoder *encoder
static void dce_v11_0_audio_write_sad_regs(struct drm_encoder *encoder)
{
- struct amdgpu_device *adev = encoder->dev->dev_private;
+ struct drm_device *dev = encoder->dev;
+ struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
struct amdgpu_connector *amdgpu_connector = NULL;
struct cea_sad *sads;
int i, sad_count;
@@ -1365,12 +1381,14 @@ static void dce_v11_0_audio_write_sad_regs(struct drm_encoder *encoder)
if (!dig || !dig->afmt || !dig->afmt->pin)
return;
- list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
if (connector->encoder == encoder) {
amdgpu_connector = to_amdgpu_connector(connector);
break;
}
}
+ drm_connector_list_iter_end(&iter);
if (!amdgpu_connector) {
DRM_ERROR("Couldn't find encoder's connector\n");
@@ -1378,10 +1396,10 @@ static void dce_v11_0_audio_write_sad_regs(struct drm_encoder *encoder)
}
sad_count = drm_edid_to_sad(amdgpu_connector_edid(connector), &sads);
- if (sad_count <= 0) {
+ if (sad_count < 0)
DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
+ if (sad_count <= 0)
return;
- }
BUG_ON(!sads);
for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index 3eb2e7429269..db15a112becc 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -281,9 +281,11 @@ static void dce_v6_0_hpd_init(struct amdgpu_device *adev)
{
struct drm_device *dev = adev->ddev;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
u32 tmp;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
@@ -309,7 +311,7 @@ static void dce_v6_0_hpd_init(struct amdgpu_device *adev)
dce_v6_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
}
-
+ drm_connector_list_iter_end(&iter);
}
/**
@@ -324,9 +326,11 @@ static void dce_v6_0_hpd_fini(struct amdgpu_device *adev)
{
struct drm_device *dev = adev->ddev;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
u32 tmp;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
@@ -338,6 +342,7 @@ static void dce_v6_0_hpd_fini(struct amdgpu_device *adev)
amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
}
+ drm_connector_list_iter_end(&iter);
}
static u32 dce_v6_0_hpd_get_gpio_reg(struct amdgpu_device *adev)
@@ -1124,20 +1129,24 @@ static void dce_v6_0_audio_select_pin(struct drm_encoder *encoder)
static void dce_v6_0_audio_write_latency_fields(struct drm_encoder *encoder,
struct drm_display_mode *mode)
{
- struct amdgpu_device *adev = encoder->dev->dev_private;
+ struct drm_device *dev = encoder->dev;
+ struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
struct amdgpu_connector *amdgpu_connector = NULL;
int interlace = 0;
u32 tmp;
- list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
if (connector->encoder == encoder) {
amdgpu_connector = to_amdgpu_connector(connector);
break;
}
}
+ drm_connector_list_iter_end(&iter);
if (!amdgpu_connector) {
DRM_ERROR("Couldn't find encoder's connector\n");
@@ -1164,21 +1173,25 @@ static void dce_v6_0_audio_write_latency_fields(struct drm_encoder *encoder,
static void dce_v6_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
{
- struct amdgpu_device *adev = encoder->dev->dev_private;
+ struct drm_device *dev = encoder->dev;
+ struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
struct amdgpu_connector *amdgpu_connector = NULL;
u8 *sadb = NULL;
int sad_count;
u32 tmp;
- list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
if (connector->encoder == encoder) {
amdgpu_connector = to_amdgpu_connector(connector);
break;
}
}
+ drm_connector_list_iter_end(&iter);
if (!amdgpu_connector) {
DRM_ERROR("Couldn't find encoder's connector\n");
@@ -1221,10 +1234,12 @@ static void dce_v6_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
static void dce_v6_0_audio_write_sad_regs(struct drm_encoder *encoder)
{
- struct amdgpu_device *adev = encoder->dev->dev_private;
+ struct drm_device *dev = encoder->dev;
+ struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
struct amdgpu_connector *amdgpu_connector = NULL;
struct cea_sad *sads;
int i, sad_count;
@@ -1244,12 +1259,14 @@ static void dce_v6_0_audio_write_sad_regs(struct drm_encoder *encoder)
{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
};
- list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
if (connector->encoder == encoder) {
amdgpu_connector = to_amdgpu_connector(connector);
break;
}
}
+ drm_connector_list_iter_end(&iter);
if (!amdgpu_connector) {
DRM_ERROR("Couldn't find encoder's connector\n");
@@ -1257,10 +1274,10 @@ static void dce_v6_0_audio_write_sad_regs(struct drm_encoder *encoder)
}
sad_count = drm_edid_to_sad(amdgpu_connector_edid(connector), &sads);
- if (sad_count <= 0) {
+ if (sad_count < 0)
DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
+ if (sad_count <= 0)
return;
- }
for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
u32 tmp = 0;
@@ -1632,6 +1649,7 @@ static void dce_v6_0_afmt_setmode(struct drm_encoder *encoder,
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
struct amdgpu_connector *amdgpu_connector = NULL;
int em = amdgpu_atombios_encoder_get_encoder_mode(encoder);
int bpc = 8;
@@ -1639,12 +1657,14 @@ static void dce_v6_0_afmt_setmode(struct drm_encoder *encoder,
if (!dig || !dig->afmt)
return;
- list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
if (connector->encoder == encoder) {
amdgpu_connector = to_amdgpu_connector(connector);
break;
}
}
+ drm_connector_list_iter_end(&iter);
if (!amdgpu_connector) {
DRM_ERROR("Couldn't find encoder's connector\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index a16c5e9e610e..f06c9022c1fd 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -275,9 +275,11 @@ static void dce_v8_0_hpd_init(struct amdgpu_device *adev)
{
struct drm_device *dev = adev->ddev;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
u32 tmp;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
@@ -303,6 +305,7 @@ static void dce_v8_0_hpd_init(struct amdgpu_device *adev)
dce_v8_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
}
+ drm_connector_list_iter_end(&iter);
}
/**
@@ -317,9 +320,11 @@ static void dce_v8_0_hpd_fini(struct amdgpu_device *adev)
{
struct drm_device *dev = adev->ddev;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
u32 tmp;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
@@ -331,6 +336,7 @@ static void dce_v8_0_hpd_fini(struct amdgpu_device *adev)
amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
}
+ drm_connector_list_iter_end(&iter);
}
static u32 dce_v8_0_hpd_get_gpio_reg(struct amdgpu_device *adev)
@@ -1157,10 +1163,12 @@ static void dce_v8_0_afmt_audio_select_pin(struct drm_encoder *encoder)
static void dce_v8_0_audio_write_latency_fields(struct drm_encoder *encoder,
struct drm_display_mode *mode)
{
- struct amdgpu_device *adev = encoder->dev->dev_private;
+ struct drm_device *dev = encoder->dev;
+ struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
struct amdgpu_connector *amdgpu_connector = NULL;
u32 tmp = 0, offset;
@@ -1169,12 +1177,14 @@ static void dce_v8_0_audio_write_latency_fields(struct drm_encoder *encoder,
offset = dig->afmt->pin->offset;
- list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
if (connector->encoder == encoder) {
amdgpu_connector = to_amdgpu_connector(connector);
break;
}
}
+ drm_connector_list_iter_end(&iter);
if (!amdgpu_connector) {
DRM_ERROR("Couldn't find encoder's connector\n");
@@ -1214,10 +1224,12 @@ static void dce_v8_0_audio_write_latency_fields(struct drm_encoder *encoder,
static void dce_v8_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
{
- struct amdgpu_device *adev = encoder->dev->dev_private;
+ struct drm_device *dev = encoder->dev;
+ struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
struct amdgpu_connector *amdgpu_connector = NULL;
u32 offset, tmp;
u8 *sadb = NULL;
@@ -1228,12 +1240,14 @@ static void dce_v8_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
offset = dig->afmt->pin->offset;
- list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
if (connector->encoder == encoder) {
amdgpu_connector = to_amdgpu_connector(connector);
break;
}
}
+ drm_connector_list_iter_end(&iter);
if (!amdgpu_connector) {
DRM_ERROR("Couldn't find encoder's connector\n");
@@ -1263,11 +1277,13 @@ static void dce_v8_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
static void dce_v8_0_audio_write_sad_regs(struct drm_encoder *encoder)
{
- struct amdgpu_device *adev = encoder->dev->dev_private;
+ struct drm_device *dev = encoder->dev;
+ struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
u32 offset;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
struct amdgpu_connector *amdgpu_connector = NULL;
struct cea_sad *sads;
int i, sad_count;
@@ -1292,12 +1308,14 @@ static void dce_v8_0_audio_write_sad_regs(struct drm_encoder *encoder)
offset = dig->afmt->pin->offset;
- list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
if (connector->encoder == encoder) {
amdgpu_connector = to_amdgpu_connector(connector);
break;
}
}
+ drm_connector_list_iter_end(&iter);
if (!amdgpu_connector) {
DRM_ERROR("Couldn't find encoder's connector\n");
@@ -1305,10 +1323,10 @@ static void dce_v8_0_audio_write_sad_regs(struct drm_encoder *encoder)
}
sad_count = drm_edid_to_sad(amdgpu_connector_edid(connector), &sads);
- if (sad_count <= 0) {
+ if (sad_count < 0)
DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
+ if (sad_count <= 0)
return;
- }
BUG_ON(!sads);
for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
index c9608ae8643b..e4f94863332c 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
@@ -260,15 +260,14 @@ static struct drm_encoder *
dce_virtual_encoder(struct drm_connector *connector)
{
struct drm_encoder *encoder;
- int i;
- drm_connector_for_each_possible_encoder(connector, encoder, i) {
+ drm_connector_for_each_possible_encoder(connector, encoder) {
if (encoder->encoder_type == DRM_MODE_ENCODER_VIRTUAL)
return encoder;
}
/* pick the first one */
- drm_connector_for_each_possible_encoder(connector, encoder, i)
+ drm_connector_for_each_possible_encoder(connector, encoder)
return encoder;
return NULL;
diff --git a/drivers/gpu/drm/amd/amdgpu/df_v1_7.c b/drivers/gpu/drm/amd/amdgpu/df_v1_7.c
index 844c03868248..d6221298b477 100644
--- a/drivers/gpu/drm/amd/amdgpu/df_v1_7.c
+++ b/drivers/gpu/drm/amd/amdgpu/df_v1_7.c
@@ -33,6 +33,10 @@ static void df_v1_7_sw_init(struct amdgpu_device *adev)
{
}
+static void df_v1_7_sw_fini(struct amdgpu_device *adev)
+{
+}
+
static void df_v1_7_enable_broadcast_mode(struct amdgpu_device *adev,
bool enable)
{
@@ -111,6 +115,7 @@ static void df_v1_7_enable_ecc_force_par_wr_rmw(struct amdgpu_device *adev,
const struct amdgpu_df_funcs df_v1_7_funcs = {
.sw_init = df_v1_7_sw_init,
+ .sw_fini = df_v1_7_sw_fini,
.enable_broadcast_mode = df_v1_7_enable_broadcast_mode,
.get_fb_channel_number = df_v1_7_get_fb_channel_number,
.get_hbm_channel_number = df_v1_7_get_hbm_channel_number,
diff --git a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
index 5850c8e34caa..16fbd2bc8ad1 100644
--- a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
+++ b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
@@ -99,8 +99,8 @@ static uint64_t df_v3_6_get_fica(struct amdgpu_device *adev,
unsigned long flags, address, data;
uint32_t ficadl_val, ficadh_val;
- address = adev->nbio_funcs->get_pcie_index_offset(adev);
- data = adev->nbio_funcs->get_pcie_data_offset(adev);
+ address = adev->nbio.funcs->get_pcie_index_offset(adev);
+ data = adev->nbio.funcs->get_pcie_data_offset(adev);
spin_lock_irqsave(&adev->pcie_idx_lock, flags);
WREG32(address, smnDF_PIE_AON_FabricIndirectConfigAccessAddress3);
@@ -122,8 +122,8 @@ static void df_v3_6_set_fica(struct amdgpu_device *adev, uint32_t ficaa_val,
{
unsigned long flags, address, data;
- address = adev->nbio_funcs->get_pcie_index_offset(adev);
- data = adev->nbio_funcs->get_pcie_data_offset(adev);
+ address = adev->nbio.funcs->get_pcie_index_offset(adev);
+ data = adev->nbio.funcs->get_pcie_data_offset(adev);
spin_lock_irqsave(&adev->pcie_idx_lock, flags);
WREG32(address, smnDF_PIE_AON_FabricIndirectConfigAccessAddress3);
@@ -150,8 +150,8 @@ static void df_v3_6_perfmon_rreg(struct amdgpu_device *adev,
{
unsigned long flags, address, data;
- address = adev->nbio_funcs->get_pcie_index_offset(adev);
- data = adev->nbio_funcs->get_pcie_data_offset(adev);
+ address = adev->nbio.funcs->get_pcie_index_offset(adev);
+ data = adev->nbio.funcs->get_pcie_data_offset(adev);
spin_lock_irqsave(&adev->pcie_idx_lock, flags);
WREG32(address, lo_addr);
@@ -172,8 +172,8 @@ static void df_v3_6_perfmon_wreg(struct amdgpu_device *adev, uint32_t lo_addr,
{
unsigned long flags, address, data;
- address = adev->nbio_funcs->get_pcie_index_offset(adev);
- data = adev->nbio_funcs->get_pcie_data_offset(adev);
+ address = adev->nbio.funcs->get_pcie_index_offset(adev);
+ data = adev->nbio.funcs->get_pcie_data_offset(adev);
spin_lock_irqsave(&adev->pcie_idx_lock, flags);
WREG32(address, lo_addr);
@@ -220,6 +220,13 @@ static void df_v3_6_sw_init(struct amdgpu_device *adev)
adev->df_perfmon_config_assign_mask[i] = 0;
}
+static void df_v3_6_sw_fini(struct amdgpu_device *adev)
+{
+
+ device_remove_file(adev->dev, &dev_attr_df_cntr_avail);
+
+}
+
static void df_v3_6_enable_broadcast_mode(struct amdgpu_device *adev,
bool enable)
{
@@ -537,6 +544,7 @@ static void df_v3_6_pmc_get_count(struct amdgpu_device *adev,
const struct amdgpu_df_funcs df_v3_6_funcs = {
.sw_init = df_v3_6_sw_init,
+ .sw_fini = df_v3_6_sw_fini,
.enable_broadcast_mode = df_v3_6_enable_broadcast_mode,
.get_fb_channel_number = df_v3_6_get_fb_channel_number,
.get_hbm_channel_number = df_v3_6_get_hbm_channel_number,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index 53090eae0082..ca5f0e7ea1ac 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -127,7 +127,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_1[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfff7ffff, 0x01030000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CNTL, 0x60000010, 0x479c0010),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CGTT_CLK_CTRL, 0xfeff0fff, 0x40000100),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0x00800000, 0x00800000)
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0x00c00000, 0x00c00000)
};
static const struct soc15_reg_golden golden_settings_gc_10_0_nv10[] =
@@ -171,7 +171,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_1_1[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_LDS_CLK_CTRL, 0xffffffff, 0xffffffff),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfff7ffff, 0x01030000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CNTL, 0x60000010, 0x479c0010),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0x00800000, 0x00800000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0x00c00000, 0x00c00000),
};
static const struct soc15_reg_golden golden_settings_gc_10_1_2[] =
@@ -1469,7 +1469,7 @@ static int gfx_v10_0_sw_fini(void *handle)
amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
amdgpu_gfx_mqd_sw_fini(adev);
- amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring, &adev->gfx.kiq.irq);
+ amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring);
amdgpu_gfx_kiq_fini(adev);
gfx_v10_0_pfp_fini(adev);
@@ -1785,27 +1785,52 @@ static void gfx_v10_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
WREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0, tmp);
}
-static void gfx_v10_0_init_csb(struct amdgpu_device *adev)
+static int gfx_v10_0_init_csb(struct amdgpu_device *adev)
{
+ int r;
+
+ if (adev->in_gpu_reset) {
+ r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false);
+ if (r)
+ return r;
+
+ r = amdgpu_bo_kmap(adev->gfx.rlc.clear_state_obj,
+ (void **)&adev->gfx.rlc.cs_ptr);
+ if (!r) {
+ adev->gfx.rlc.funcs->get_csb_buffer(adev,
+ adev->gfx.rlc.cs_ptr);
+ amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj);
+ }
+
+ amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
+ if (r)
+ return r;
+ }
+
/* csib */
WREG32_SOC15(GC, 0, mmRLC_CSIB_ADDR_HI,
adev->gfx.rlc.clear_state_gpu_addr >> 32);
WREG32_SOC15(GC, 0, mmRLC_CSIB_ADDR_LO,
adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc);
WREG32_SOC15(GC, 0, mmRLC_CSIB_LENGTH, adev->gfx.rlc.clear_state_size);
+
+ return 0;
}
-static void gfx_v10_0_init_pg(struct amdgpu_device *adev)
+static int gfx_v10_0_init_pg(struct amdgpu_device *adev)
{
int i;
+ int r;
- gfx_v10_0_init_csb(adev);
+ r = gfx_v10_0_init_csb(adev);
+ if (r)
+ return r;
for (i = 0; i < adev->num_vmhubs; i++)
amdgpu_gmc_flush_gpu_tlb(adev, 0, i, 0);
/* TODO: init power gating */
- return;
+ return 0;
}
void gfx_v10_0_rlc_stop(struct amdgpu_device *adev)
@@ -1907,7 +1932,10 @@ static int gfx_v10_0_rlc_resume(struct amdgpu_device *adev)
r = gfx_v10_0_wait_for_rlc_autoload_complete(adev);
if (r)
return r;
- gfx_v10_0_init_pg(adev);
+
+ r = gfx_v10_0_init_pg(adev);
+ if (r)
+ return r;
/* enable RLC SRM */
gfx_v10_0_rlc_enable_srm(adev);
@@ -1933,7 +1961,10 @@ static int gfx_v10_0_rlc_resume(struct amdgpu_device *adev)
return r;
}
- gfx_v10_0_init_pg(adev);
+ r = gfx_v10_0_init_pg(adev);
+ if (r)
+ return r;
+
adev->gfx.rlc.funcs->start(adev);
if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) {
@@ -2400,7 +2431,7 @@ static int gfx_v10_0_wait_for_rlc_autoload_complete(struct amdgpu_device *adev)
return 0;
}
-static void gfx_v10_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
+static int gfx_v10_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
{
int i;
u32 tmp = RREG32_SOC15(GC, 0, mmCP_ME_CNTL);
@@ -2413,7 +2444,17 @@ static void gfx_v10_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
adev->gfx.gfx_ring[i].sched.ready = false;
}
WREG32_SOC15(GC, 0, mmCP_ME_CNTL, tmp);
- udelay(50);
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (RREG32_SOC15(GC, 0, mmCP_STAT) == 0)
+ break;
+ udelay(1);
+ }
+
+ if (i >= adev->usec_timeout)
+ DRM_ERROR("failed to %s cp gfx\n", enable ? "unhalt" : "halt");
+
+ return 0;
}
static int gfx_v10_0_cp_gfx_load_pfp_microcode(struct amdgpu_device *adev)
@@ -2470,7 +2511,7 @@ static int gfx_v10_0_cp_gfx_load_pfp_microcode(struct amdgpu_device *adev)
}
if (amdgpu_emu_mode == 1)
- adev->nbio_funcs->hdp_flush(adev, NULL);
+ adev->nbio.funcs->hdp_flush(adev, NULL);
tmp = RREG32_SOC15(GC, 0, mmCP_PFP_IC_BASE_CNTL);
tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, VMID, 0);
@@ -2540,7 +2581,7 @@ static int gfx_v10_0_cp_gfx_load_ce_microcode(struct amdgpu_device *adev)
}
if (amdgpu_emu_mode == 1)
- adev->nbio_funcs->hdp_flush(adev, NULL);
+ adev->nbio.funcs->hdp_flush(adev, NULL);
tmp = RREG32_SOC15(GC, 0, mmCP_CE_IC_BASE_CNTL);
tmp = REG_SET_FIELD(tmp, CP_CE_IC_BASE_CNTL, VMID, 0);
@@ -2609,7 +2650,7 @@ static int gfx_v10_0_cp_gfx_load_me_microcode(struct amdgpu_device *adev)
}
if (amdgpu_emu_mode == 1)
- adev->nbio_funcs->hdp_flush(adev, NULL);
+ adev->nbio.funcs->hdp_flush(adev, NULL);
tmp = RREG32_SOC15(GC, 0, mmCP_ME_IC_BASE_CNTL);
tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, VMID, 0);
@@ -2930,7 +2971,7 @@ static int gfx_v10_0_cp_compute_load_microcode(struct amdgpu_device *adev)
}
if (amdgpu_emu_mode == 1)
- adev->nbio_funcs->hdp_flush(adev, NULL);
+ adev->nbio.funcs->hdp_flush(adev, NULL);
tmp = RREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_CNTL);
tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
@@ -3114,6 +3155,7 @@ static int gfx_v10_0_gfx_init_queue(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
struct v10_gfx_mqd *mqd = ring->mqd_ptr;
+ int mqd_idx = ring - &adev->gfx.gfx_ring[0];
if (!adev->in_gpu_reset && !adev->in_suspend) {
memset((void *)mqd, 0, sizeof(*mqd));
@@ -3125,14 +3167,15 @@ static int gfx_v10_0_gfx_init_queue(struct amdgpu_ring *ring)
#endif
nv_grbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
- if (adev->gfx.me.mqd_backup[AMDGPU_MAX_GFX_RINGS])
- memcpy(adev->gfx.me.mqd_backup[AMDGPU_MAX_GFX_RINGS], mqd, sizeof(*mqd));
+ if (adev->gfx.me.mqd_backup[mqd_idx])
+ memcpy(adev->gfx.me.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
} else if (adev->in_gpu_reset) {
/* reset mqd with the backup copy */
- if (adev->gfx.me.mqd_backup[AMDGPU_MAX_GFX_RINGS])
- memcpy(mqd, adev->gfx.me.mqd_backup[AMDGPU_MAX_GFX_RINGS], sizeof(*mqd));
+ if (adev->gfx.me.mqd_backup[mqd_idx])
+ memcpy(mqd, adev->gfx.me.mqd_backup[mqd_idx], sizeof(*mqd));
/* reset the ring */
ring->wptr = 0;
+ adev->wb.wb[ring->wptr_offs] = 0;
amdgpu_ring_clear_ring(ring);
#ifdef BRING_UP_DEBUG
mutex_lock(&adev->srbm_mutex);
@@ -4384,7 +4427,7 @@ static void gfx_v10_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
u32 ref_and_mask, reg_mem_engine;
- const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio_funcs->hdp_flush_reg;
+ const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
switch (ring->me) {
@@ -4404,8 +4447,8 @@ static void gfx_v10_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
}
gfx_v10_0_wait_reg_mem(ring, reg_mem_engine, 0, 1,
- adev->nbio_funcs->get_hdp_flush_req_offset(adev),
- adev->nbio_funcs->get_hdp_flush_done_offset(adev),
+ adev->nbio.funcs->get_hdp_flush_req_offset(adev),
+ adev->nbio.funcs->get_hdp_flush_done_offset(adev),
ref_and_mask, ref_and_mask, 0x20);
}
@@ -5331,15 +5374,12 @@ static void gfx_v10_0_set_rlc_funcs(struct amdgpu_device *adev)
static void gfx_v10_0_set_gds_init(struct amdgpu_device *adev)
{
- /* init asic gds info */
- switch (adev->asic_type) {
- case CHIP_NAVI10:
- default:
- adev->gds.gds_size = 0x10000;
- adev->gds.gds_compute_max_wave_id = 0x4ff;
- break;
- }
+ unsigned total_cu = adev->gfx.config.max_cu_per_sh *
+ adev->gfx.config.max_sh_per_se *
+ adev->gfx.config.max_shader_engines;
+ adev->gds.gds_size = 0x10000;
+ adev->gds.gds_compute_max_wave_id = total_cu * 32 - 1;
adev->gds.gws_size = 64;
adev->gds.oa_size = 16;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 87dd55e9d72b..ffbde9136372 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -2103,7 +2103,7 @@ static int gfx_v8_0_sw_fini(void *handle)
amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
amdgpu_gfx_mqd_sw_fini(adev);
- amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring, &adev->gfx.kiq.irq);
+ amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring);
amdgpu_gfx_kiq_fini(adev);
gfx_v8_0_mec_fini(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 97cf0b536873..faf2ffce5837 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -131,6 +131,18 @@ MODULE_FIRMWARE("amdgpu/renoir_rlc.bin");
#define mmTCP_CHAN_STEER_5_ARCT 0x0b0c
#define mmTCP_CHAN_STEER_5_ARCT_BASE_IDX 0
+struct ras_gfx_subblock_reg {
+ const char *name;
+ uint32_t hwip;
+ uint32_t inst;
+ uint32_t seg;
+ uint32_t reg_offset;
+ uint32_t sec_count_mask;
+ uint32_t sec_count_shift;
+ uint32_t ded_count_mask;
+ uint32_t ded_count_shift;
+};
+
enum ta_ras_gfx_subblock {
/*CPC*/
TA_RAS_BLOCK__GFX_CPC_INDEX_START = 0,
@@ -517,9 +529,9 @@ static const struct soc15_reg_golden golden_settings_gc_9_0[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0xb5d3f197),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000000, 0x00000800),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000000, 0x00000800),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00000000, 0x00008000)
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000800, 0x00000800),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000800, 0x00000800),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00008000, 0x00008000)
};
static const struct soc15_reg_golden golden_settings_gc_9_0_vg10[] =
@@ -582,9 +594,9 @@ static const struct soc15_reg_golden golden_settings_gc_9_1[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000000ff),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000000, 0x00000800),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000000, 0x00000800),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00000000, 0x00008000)
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000800, 0x00000800),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000800, 0x00000800),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00008000, 0x00008000)
};
static const struct soc15_reg_golden golden_settings_gc_9_1_rv1[] =
@@ -676,9 +688,9 @@ static const struct soc15_reg_golden golden_settings_gc_9_2_1_vg12[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x76325410),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x01000000),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000000, 0x00000800),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000000, 0x00000800),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00000000, 0x00008000)
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000800, 0x00000800),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000800, 0x00000800),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00008000, 0x00008000)
};
static const struct soc15_reg_golden golden_settings_gc_9_4_1_arct[] =
@@ -691,6 +703,8 @@ static const struct soc15_reg_golden golden_settings_gc_9_4_1_arct[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_3_ARCT, 0x3fffffff, 0x2ebd9fe3),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_4_ARCT, 0x3fffffff, 0xb90f5b1),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_5_ARCT, 0x3ff, 0x135),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_CONFIG, 0xffffffff, 0x011A0000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_FIFO_SIZES, 0xffffffff, 0x00000f00),
};
static const u32 GFX_RLC_SRM_INDEX_CNTL_ADDR_OFFSETS[] =
@@ -1342,7 +1356,8 @@ static int gfx_v9_0_init_cp_compute_microcode(struct amdgpu_device *adev,
/* TODO: Determine if MEC2 JT FW loading can be removed
for all GFX V9 asic and above */
- if (adev->asic_type != CHIP_ARCTURUS) {
+ if (adev->asic_type != CHIP_ARCTURUS &&
+ adev->asic_type != CHIP_RENOIR) {
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2_JT];
info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2_JT;
info->fw = adev->gfx.mec2_fw;
@@ -1974,190 +1989,6 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
return 0;
}
-static int gfx_v9_0_ngg_create_buf(struct amdgpu_device *adev,
- struct amdgpu_ngg_buf *ngg_buf,
- int size_se,
- int default_size_se)
-{
- int r;
-
- if (size_se < 0) {
- dev_err(adev->dev, "Buffer size is invalid: %d\n", size_se);
- return -EINVAL;
- }
- size_se = size_se ? size_se : default_size_se;
-
- ngg_buf->size = size_se * adev->gfx.config.max_shader_engines;
- r = amdgpu_bo_create_kernel(adev, ngg_buf->size,
- PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
- &ngg_buf->bo,
- &ngg_buf->gpu_addr,
- NULL);
- if (r) {
- dev_err(adev->dev, "(%d) failed to create NGG buffer\n", r);
- return r;
- }
- ngg_buf->bo_size = amdgpu_bo_size(ngg_buf->bo);
-
- return r;
-}
-
-static int gfx_v9_0_ngg_fini(struct amdgpu_device *adev)
-{
- int i;
-
- for (i = 0; i < NGG_BUF_MAX; i++)
- amdgpu_bo_free_kernel(&adev->gfx.ngg.buf[i].bo,
- &adev->gfx.ngg.buf[i].gpu_addr,
- NULL);
-
- memset(&adev->gfx.ngg.buf[0], 0,
- sizeof(struct amdgpu_ngg_buf) * NGG_BUF_MAX);
-
- adev->gfx.ngg.init = false;
-
- return 0;
-}
-
-static int gfx_v9_0_ngg_init(struct amdgpu_device *adev)
-{
- int r;
-
- if (!amdgpu_ngg || adev->gfx.ngg.init == true)
- return 0;
-
- /* GDS reserve memory: 64 bytes alignment */
- adev->gfx.ngg.gds_reserve_size = ALIGN(5 * 4, 0x40);
- adev->gds.gds_size -= adev->gfx.ngg.gds_reserve_size;
- adev->gfx.ngg.gds_reserve_addr = RREG32_SOC15(GC, 0, mmGDS_VMID0_BASE);
- adev->gfx.ngg.gds_reserve_addr += RREG32_SOC15(GC, 0, mmGDS_VMID0_SIZE);
-
- /* Primitive Buffer */
- r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_PRIM],
- amdgpu_prim_buf_per_se,
- 64 * 1024);
- if (r) {
- dev_err(adev->dev, "Failed to create Primitive Buffer\n");
- goto err;
- }
-
- /* Position Buffer */
- r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_POS],
- amdgpu_pos_buf_per_se,
- 256 * 1024);
- if (r) {
- dev_err(adev->dev, "Failed to create Position Buffer\n");
- goto err;
- }
-
- /* Control Sideband */
- r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_CNTL],
- amdgpu_cntl_sb_buf_per_se,
- 256);
- if (r) {
- dev_err(adev->dev, "Failed to create Control Sideband Buffer\n");
- goto err;
- }
-
- /* Parameter Cache, not created by default */
- if (amdgpu_param_buf_per_se <= 0)
- goto out;
-
- r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_PARAM],
- amdgpu_param_buf_per_se,
- 512 * 1024);
- if (r) {
- dev_err(adev->dev, "Failed to create Parameter Cache\n");
- goto err;
- }
-
-out:
- adev->gfx.ngg.init = true;
- return 0;
-err:
- gfx_v9_0_ngg_fini(adev);
- return r;
-}
-
-static int gfx_v9_0_ngg_en(struct amdgpu_device *adev)
-{
- struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0];
- int r;
- u32 data, base;
-
- if (!amdgpu_ngg)
- return 0;
-
- /* Program buffer size */
- data = REG_SET_FIELD(0, WD_BUF_RESOURCE_1, INDEX_BUF_SIZE,
- adev->gfx.ngg.buf[NGG_PRIM].size >> 8);
- data = REG_SET_FIELD(data, WD_BUF_RESOURCE_1, POS_BUF_SIZE,
- adev->gfx.ngg.buf[NGG_POS].size >> 8);
- WREG32_SOC15(GC, 0, mmWD_BUF_RESOURCE_1, data);
-
- data = REG_SET_FIELD(0, WD_BUF_RESOURCE_2, CNTL_SB_BUF_SIZE,
- adev->gfx.ngg.buf[NGG_CNTL].size >> 8);
- data = REG_SET_FIELD(data, WD_BUF_RESOURCE_2, PARAM_BUF_SIZE,
- adev->gfx.ngg.buf[NGG_PARAM].size >> 10);
- WREG32_SOC15(GC, 0, mmWD_BUF_RESOURCE_2, data);
-
- /* Program buffer base address */
- base = lower_32_bits(adev->gfx.ngg.buf[NGG_PRIM].gpu_addr);
- data = REG_SET_FIELD(0, WD_INDEX_BUF_BASE, BASE, base);
- WREG32_SOC15(GC, 0, mmWD_INDEX_BUF_BASE, data);
-
- base = upper_32_bits(adev->gfx.ngg.buf[NGG_PRIM].gpu_addr);
- data = REG_SET_FIELD(0, WD_INDEX_BUF_BASE_HI, BASE_HI, base);
- WREG32_SOC15(GC, 0, mmWD_INDEX_BUF_BASE_HI, data);
-
- base = lower_32_bits(adev->gfx.ngg.buf[NGG_POS].gpu_addr);
- data = REG_SET_FIELD(0, WD_POS_BUF_BASE, BASE, base);
- WREG32_SOC15(GC, 0, mmWD_POS_BUF_BASE, data);
-
- base = upper_32_bits(adev->gfx.ngg.buf[NGG_POS].gpu_addr);
- data = REG_SET_FIELD(0, WD_POS_BUF_BASE_HI, BASE_HI, base);
- WREG32_SOC15(GC, 0, mmWD_POS_BUF_BASE_HI, data);
-
- base = lower_32_bits(adev->gfx.ngg.buf[NGG_CNTL].gpu_addr);
- data = REG_SET_FIELD(0, WD_CNTL_SB_BUF_BASE, BASE, base);
- WREG32_SOC15(GC, 0, mmWD_CNTL_SB_BUF_BASE, data);
-
- base = upper_32_bits(adev->gfx.ngg.buf[NGG_CNTL].gpu_addr);
- data = REG_SET_FIELD(0, WD_CNTL_SB_BUF_BASE_HI, BASE_HI, base);
- WREG32_SOC15(GC, 0, mmWD_CNTL_SB_BUF_BASE_HI, data);
-
- /* Clear GDS reserved memory */
- r = amdgpu_ring_alloc(ring, 17);
- if (r) {
- DRM_ERROR("amdgpu: NGG failed to lock ring %s (%d).\n",
- ring->name, r);
- return r;
- }
-
- gfx_v9_0_write_data_to_reg(ring, 0, false,
- SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE),
- (adev->gds.gds_size +
- adev->gfx.ngg.gds_reserve_size));
-
- amdgpu_ring_write(ring, PACKET3(PACKET3_DMA_DATA, 5));
- amdgpu_ring_write(ring, (PACKET3_DMA_DATA_CP_SYNC |
- PACKET3_DMA_DATA_DST_SEL(1) |
- PACKET3_DMA_DATA_SRC_SEL(2)));
- amdgpu_ring_write(ring, 0);
- amdgpu_ring_write(ring, 0);
- amdgpu_ring_write(ring, adev->gfx.ngg.gds_reserve_addr);
- amdgpu_ring_write(ring, 0);
- amdgpu_ring_write(ring, PACKET3_DMA_DATA_CMD_RAW_WAIT |
- adev->gfx.ngg.gds_reserve_size);
-
- gfx_v9_0_write_data_to_reg(ring, 0, false,
- SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE), 0);
-
- amdgpu_ring_commit(ring);
-
- return 0;
-}
-
static int gfx_v9_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
int mec, int pipe, int queue)
{
@@ -2325,10 +2156,6 @@ static int gfx_v9_0_sw_init(void *handle)
if (r)
return r;
- r = gfx_v9_0_ngg_init(adev);
- if (r)
- return r;
-
return 0;
}
@@ -2338,19 +2165,7 @@ static int gfx_v9_0_sw_fini(void *handle)
int i;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX) &&
- adev->gfx.ras_if) {
- struct ras_common_if *ras_if = adev->gfx.ras_if;
- struct ras_ih_if ih_info = {
- .head = *ras_if,
- };
-
- amdgpu_ras_debugfs_remove(adev, ras_if);
- amdgpu_ras_sysfs_remove(adev, ras_if);
- amdgpu_ras_interrupt_remove_handler(adev, &ih_info);
- amdgpu_ras_feature_enable(adev, ras_if, 0);
- kfree(ras_if);
- }
+ amdgpu_gfx_ras_fini(adev);
for (i = 0; i < adev->gfx.num_gfx_rings; i++)
amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
@@ -2358,11 +2173,10 @@ static int gfx_v9_0_sw_fini(void *handle)
amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
amdgpu_gfx_mqd_sw_fini(adev);
- amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring, &adev->gfx.kiq.irq);
+ amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring);
amdgpu_gfx_kiq_fini(adev);
gfx_v9_0_mec_fini(adev);
- gfx_v9_0_ngg_fini(adev);
amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj);
if (adev->asic_type == CHIP_RAVEN || adev->asic_type == CHIP_RENOIR) {
amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
@@ -2930,7 +2744,10 @@ static void gfx_v9_0_init_pg(struct amdgpu_device *adev)
* And it's needed by gfxoff feature.
*/
if (adev->gfx.rlc.is_rlc_v2_1) {
- gfx_v9_1_init_rlc_save_restore_list(adev);
+ if (adev->asic_type == CHIP_VEGA12 ||
+ (adev->asic_type == CHIP_RAVEN &&
+ adev->rev_id >= 8))
+ gfx_v9_1_init_rlc_save_restore_list(adev);
gfx_v9_0_enable_save_restore_machine(adev);
}
@@ -3901,12 +3718,6 @@ static int gfx_v9_0_hw_init(void *handle)
if (r)
return r;
- if (adev->asic_type != CHIP_ARCTURUS) {
- r = gfx_v9_0_ngg_en(adev);
- if (r)
- return r;
- }
-
return r;
}
@@ -3948,8 +3759,10 @@ static int gfx_v9_0_hw_fini(void *handle)
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
- /* disable KCQ to avoid CPC touch memory not valid anymore */
- gfx_v9_0_kcq_disable(adev);
+ /* DF freeze and kcq disable will fail */
+ if (!amdgpu_ras_intr_triggered())
+ /* disable KCQ to avoid CPC touch memory not valid anymore */
+ gfx_v9_0_kcq_disable(adev);
if (amdgpu_sriov_vf(adev)) {
gfx_v9_0_cp_gfx_enable(adev, false);
@@ -4085,9 +3898,22 @@ static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev)
uint64_t clock;
mutex_lock(&adev->gfx.gpu_clock_mutex);
- WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
- clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) |
- ((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
+ if (adev->asic_type == CHIP_VEGA10 && amdgpu_sriov_runtime(adev)) {
+ uint32_t tmp, lsb, msb, i = 0;
+ do {
+ if (i != 0)
+ udelay(1);
+ tmp = RREG32_SOC15(GC, 0, mmRLC_REFCLOCK_TIMESTAMP_MSB);
+ lsb = RREG32_SOC15(GC, 0, mmRLC_REFCLOCK_TIMESTAMP_LSB);
+ msb = RREG32_SOC15(GC, 0, mmRLC_REFCLOCK_TIMESTAMP_MSB);
+ i++;
+ } while (unlikely(tmp != msb) && (i < adev->usec_timeout));
+ clock = (uint64_t)lsb | ((uint64_t)msb << 32ULL);
+ } else {
+ WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
+ clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) |
+ ((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
+ }
mutex_unlock(&adev->gfx.gpu_clock_mutex);
return clock;
}
@@ -4202,6 +4028,7 @@ static const struct soc15_reg_entry sec_ded_counter_registers[] = {
{ SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 1, 16},
{ SOC15_REG_ENTRY(GC, 0, mmTCP_ATC_EDC_GATCL1_CNT), 0, 4, 16},
{ SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT), 0, 4, 16},
+ { SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), 0, 4, 16},
{ SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT), 0, 4, 16},
{ SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 0, 4, 6},
{ SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 0, 4, 16},
@@ -4221,6 +4048,10 @@ static int gfx_v9_0_do_edc_gds_workarounds(struct amdgpu_device *adev)
struct amdgpu_ring *ring = &adev->gfx.compute_ring[0];
int i, r;
+ /* only support when RAS is enabled */
+ if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
+ return 0;
+
r = amdgpu_ring_alloc(ring, 7);
if (r) {
DRM_ERROR("amdgpu: GDS workarounds failed to lock ring %s (%d).\n",
@@ -4411,33 +4242,14 @@ static int gfx_v9_0_early_init(void *handle)
return 0;
}
-static int gfx_v9_0_process_ras_data_cb(struct amdgpu_device *adev,
- struct ras_err_data *err_data,
- struct amdgpu_iv_entry *entry);
-
static int gfx_v9_0_ecc_late_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct ras_common_if **ras_if = &adev->gfx.ras_if;
- struct ras_ih_if ih_info = {
- .cb = gfx_v9_0_process_ras_data_cb,
- };
- struct ras_fs_if fs_info = {
- .sysfs_name = "gfx_err_count",
- .debugfs_name = "gfx_err_inject",
- };
- struct ras_common_if ras_block = {
- .block = AMDGPU_RAS_BLOCK__GFX,
- .type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
- .sub_block_index = 0,
- .name = "gfx",
- };
int r;
- if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) {
- amdgpu_ras_feature_enable_on_boot(adev, &ras_block, 0);
- return 0;
- }
+ r = amdgpu_gfx_ras_late_init(adev);
+ if (r)
+ return r;
r = gfx_v9_0_do_edc_gds_workarounds(adev);
if (r)
@@ -4448,72 +4260,7 @@ static int gfx_v9_0_ecc_late_init(void *handle)
if (r)
return r;
- /* handle resume path. */
- if (*ras_if) {
- /* resend ras TA enable cmd during resume.
- * prepare to handle failure.
- */
- ih_info.head = **ras_if;
- r = amdgpu_ras_feature_enable_on_boot(adev, *ras_if, 1);
- if (r) {
- if (r == -EAGAIN) {
- /* request a gpu reset. will run again. */
- amdgpu_ras_request_reset_on_boot(adev,
- AMDGPU_RAS_BLOCK__GFX);
- return 0;
- }
- /* fail to enable ras, cleanup all. */
- goto irq;
- }
- /* enable successfully. continue. */
- goto resume;
- }
-
- *ras_if = kmalloc(sizeof(**ras_if), GFP_KERNEL);
- if (!*ras_if)
- return -ENOMEM;
-
- **ras_if = ras_block;
-
- r = amdgpu_ras_feature_enable_on_boot(adev, *ras_if, 1);
- if (r) {
- if (r == -EAGAIN) {
- amdgpu_ras_request_reset_on_boot(adev,
- AMDGPU_RAS_BLOCK__GFX);
- r = 0;
- }
- goto feature;
- }
-
- ih_info.head = **ras_if;
- fs_info.head = **ras_if;
-
- r = amdgpu_ras_interrupt_add_handler(adev, &ih_info);
- if (r)
- goto interrupt;
-
- amdgpu_ras_debugfs_create(adev, &fs_info);
-
- r = amdgpu_ras_sysfs_create(adev, &fs_info);
- if (r)
- goto sysfs;
-resume:
- r = amdgpu_irq_get(adev, &adev->gfx.cp_ecc_error_irq, 0);
- if (r)
- goto irq;
-
return 0;
-irq:
- amdgpu_ras_sysfs_remove(adev, *ras_if);
-sysfs:
- amdgpu_ras_debugfs_remove(adev, *ras_if);
- amdgpu_ras_interrupt_remove_handler(adev, &ih_info);
-interrupt:
- amdgpu_ras_feature_enable(adev, *ras_if, 0);
-feature:
- kfree(*ras_if);
- *ras_if = NULL;
- return r;
}
static int gfx_v9_0_late_init(void *handle)
@@ -4578,16 +4325,14 @@ static void gfx_v9_0_update_gfx_cg_power_gating(struct amdgpu_device *adev,
{
amdgpu_gfx_rlc_enter_safe_mode(adev);
- if (is_support_sw_smu(adev) && !enable)
- smu_set_gfx_cgpg(&adev->smu, enable);
-
if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) && enable) {
gfx_v9_0_enable_gfx_cg_power_gating(adev, true);
if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PIPELINE)
gfx_v9_0_enable_gfx_pipeline_powergating(adev, true);
} else {
gfx_v9_0_enable_gfx_cg_power_gating(adev, false);
- gfx_v9_0_enable_gfx_pipeline_powergating(adev, false);
+ if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PIPELINE)
+ gfx_v9_0_enable_gfx_pipeline_powergating(adev, false);
}
amdgpu_gfx_rlc_exit_safe_mode(adev);
@@ -4856,8 +4601,6 @@ static int gfx_v9_0_set_powergating_state(void *handle,
gfx_v9_0_enable_cp_power_gating(adev, false);
/* update gfx cgpg state */
- if (is_support_sw_smu(adev) && enable)
- smu_set_gfx_cgpg(&adev->smu, enable);
gfx_v9_0_update_gfx_cg_power_gating(adev, enable);
/* update mgcg state */
@@ -4988,7 +4731,7 @@ static void gfx_v9_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
u32 ref_and_mask, reg_mem_engine;
- const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio_funcs->hdp_flush_reg;
+ const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
switch (ring->me) {
@@ -5008,8 +4751,8 @@ static void gfx_v9_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
}
gfx_v9_0_wait_reg_mem(ring, reg_mem_engine, 0, 1,
- adev->nbio_funcs->get_hdp_flush_req_offset(adev),
- adev->nbio_funcs->get_hdp_flush_done_offset(adev),
+ adev->nbio.funcs->get_hdp_flush_req_offset(adev),
+ adev->nbio.funcs->get_hdp_flush_done_offset(adev),
ref_and_mask, ref_and_mask, 0x20);
}
@@ -5741,313 +5484,446 @@ static int gfx_v9_0_priv_inst_irq(struct amdgpu_device *adev,
return 0;
}
-static int gfx_v9_0_process_ras_data_cb(struct amdgpu_device *adev,
- struct ras_err_data *err_data,
- struct amdgpu_iv_entry *entry)
-{
- /* TODO ue will trigger an interrupt. */
- kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
- if (adev->gfx.funcs->query_ras_error_count)
- adev->gfx.funcs->query_ras_error_count(adev, err_data);
- amdgpu_ras_reset_gpu(adev, 0);
- return AMDGPU_RAS_SUCCESS;
-}
-static const struct {
- const char *name;
- uint32_t ip;
- uint32_t inst;
- uint32_t seg;
- uint32_t reg_offset;
- uint32_t per_se_instance;
- int32_t num_instance;
- uint32_t sec_count_mask;
- uint32_t ded_count_mask;
-} gfx_ras_edc_regs[] = {
- { "CPC_SCRATCH", SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_SCRATCH_CNT), 0, 1,
- REG_FIELD_MASK(CPC_EDC_SCRATCH_CNT, SEC_COUNT),
- REG_FIELD_MASK(CPC_EDC_SCRATCH_CNT, DED_COUNT) },
- { "CPC_UCODE", SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_UCODE_CNT), 0, 1,
- REG_FIELD_MASK(CPC_EDC_UCODE_CNT, SEC_COUNT),
- REG_FIELD_MASK(CPC_EDC_UCODE_CNT, DED_COUNT) },
- { "CPF_ROQ_ME1", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT), 0, 1,
- REG_FIELD_MASK(CPF_EDC_ROQ_CNT, COUNT_ME1), 0 },
- { "CPF_ROQ_ME2", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT), 0, 1,
- REG_FIELD_MASK(CPF_EDC_ROQ_CNT, COUNT_ME2), 0 },
- { "CPF_TAG", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_TAG_CNT), 0, 1,
- REG_FIELD_MASK(CPF_EDC_TAG_CNT, SEC_COUNT),
- REG_FIELD_MASK(CPF_EDC_TAG_CNT, DED_COUNT) },
- { "CPG_DMA_ROQ", SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_DMA_CNT), 0, 1,
- REG_FIELD_MASK(CPG_EDC_DMA_CNT, ROQ_COUNT), 0 },
- { "CPG_DMA_TAG", SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_DMA_CNT), 0, 1,
- REG_FIELD_MASK(CPG_EDC_DMA_CNT, TAG_SEC_COUNT),
- REG_FIELD_MASK(CPG_EDC_DMA_CNT, TAG_DED_COUNT) },
- { "CPG_TAG", SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_TAG_CNT), 0, 1,
- REG_FIELD_MASK(CPG_EDC_TAG_CNT, SEC_COUNT),
- REG_FIELD_MASK(CPG_EDC_TAG_CNT, DED_COUNT) },
- { "DC_CSINVOC", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_CSINVOC_CNT), 0, 1,
- REG_FIELD_MASK(DC_EDC_CSINVOC_CNT, COUNT_ME1), 0 },
- { "DC_RESTORE", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_RESTORE_CNT), 0, 1,
- REG_FIELD_MASK(DC_EDC_RESTORE_CNT, COUNT_ME1), 0 },
- { "DC_STATE", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_STATE_CNT), 0, 1,
- REG_FIELD_MASK(DC_EDC_STATE_CNT, COUNT_ME1), 0 },
- { "GDS_MEM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_CNT), 0, 1,
- REG_FIELD_MASK(GDS_EDC_CNT, GDS_MEM_SEC),
- REG_FIELD_MASK(GDS_EDC_CNT, GDS_MEM_DED) },
- { "GDS_INPUT_QUEUE", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_CNT), 0, 1,
- REG_FIELD_MASK(GDS_EDC_CNT, GDS_INPUT_QUEUE_SED), 0 },
+static const struct ras_gfx_subblock_reg ras_subblock_regs[] = {
+ { "CPC_SCRATCH", SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_SCRATCH_CNT),
+ SOC15_REG_FIELD(CPC_EDC_SCRATCH_CNT, SEC_COUNT),
+ SOC15_REG_FIELD(CPC_EDC_SCRATCH_CNT, DED_COUNT)
+ },
+ { "CPC_UCODE", SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_UCODE_CNT),
+ SOC15_REG_FIELD(CPC_EDC_UCODE_CNT, SEC_COUNT),
+ SOC15_REG_FIELD(CPC_EDC_UCODE_CNT, DED_COUNT)
+ },
+ { "CPF_ROQ_ME1", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT),
+ SOC15_REG_FIELD(CPF_EDC_ROQ_CNT, COUNT_ME1),
+ 0, 0
+ },
+ { "CPF_ROQ_ME2", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT),
+ SOC15_REG_FIELD(CPF_EDC_ROQ_CNT, COUNT_ME2),
+ 0, 0
+ },
+ { "CPF_TAG", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_TAG_CNT),
+ SOC15_REG_FIELD(CPF_EDC_TAG_CNT, SEC_COUNT),
+ SOC15_REG_FIELD(CPF_EDC_TAG_CNT, DED_COUNT)
+ },
+ { "CPG_DMA_ROQ", SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_DMA_CNT),
+ SOC15_REG_FIELD(CPG_EDC_DMA_CNT, ROQ_COUNT),
+ 0, 0
+ },
+ { "CPG_DMA_TAG", SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_DMA_CNT),
+ SOC15_REG_FIELD(CPG_EDC_DMA_CNT, TAG_SEC_COUNT),
+ SOC15_REG_FIELD(CPG_EDC_DMA_CNT, TAG_DED_COUNT)
+ },
+ { "CPG_TAG", SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_TAG_CNT),
+ SOC15_REG_FIELD(CPG_EDC_TAG_CNT, SEC_COUNT),
+ SOC15_REG_FIELD(CPG_EDC_TAG_CNT, DED_COUNT)
+ },
+ { "DC_CSINVOC", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_CSINVOC_CNT),
+ SOC15_REG_FIELD(DC_EDC_CSINVOC_CNT, COUNT_ME1),
+ 0, 0
+ },
+ { "DC_RESTORE", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_RESTORE_CNT),
+ SOC15_REG_FIELD(DC_EDC_RESTORE_CNT, COUNT_ME1),
+ 0, 0
+ },
+ { "DC_STATE", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_STATE_CNT),
+ SOC15_REG_FIELD(DC_EDC_STATE_CNT, COUNT_ME1),
+ 0, 0
+ },
+ { "GDS_MEM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_CNT),
+ SOC15_REG_FIELD(GDS_EDC_CNT, GDS_MEM_SEC),
+ SOC15_REG_FIELD(GDS_EDC_CNT, GDS_MEM_DED)
+ },
+ { "GDS_INPUT_QUEUE", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_CNT),
+ SOC15_REG_FIELD(GDS_EDC_CNT, GDS_INPUT_QUEUE_SED),
+ 0, 0
+ },
{ "GDS_ME0_CS_PIPE_MEM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT),
- 0, 1, REG_FIELD_MASK(GDS_EDC_OA_PHY_CNT, ME0_CS_PIPE_MEM_SEC),
- REG_FIELD_MASK(GDS_EDC_OA_PHY_CNT, ME0_CS_PIPE_MEM_DED) },
+ SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, ME0_CS_PIPE_MEM_SEC),
+ SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, ME0_CS_PIPE_MEM_DED)
+ },
{ "GDS_OA_PHY_PHY_CMD_RAM_MEM",
- SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT), 0, 1,
- REG_FIELD_MASK(GDS_EDC_OA_PHY_CNT, PHY_CMD_RAM_MEM_SEC),
- REG_FIELD_MASK(GDS_EDC_OA_PHY_CNT, PHY_CMD_RAM_MEM_DED) },
+ SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT),
+ SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_CMD_RAM_MEM_SEC),
+ SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_CMD_RAM_MEM_DED)
+ },
{ "GDS_OA_PHY_PHY_DATA_RAM_MEM",
- SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT), 0, 1,
- REG_FIELD_MASK(GDS_EDC_OA_PHY_CNT, PHY_DATA_RAM_MEM_SED), 0 },
+ SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT),
+ SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_DATA_RAM_MEM_SED),
+ 0, 0
+ },
{ "GDS_OA_PIPE_ME1_PIPE0_PIPE_MEM",
- SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT), 0, 1,
- REG_FIELD_MASK(GDS_EDC_OA_PIPE_CNT, ME1_PIPE0_PIPE_MEM_SEC),
- REG_FIELD_MASK(GDS_EDC_OA_PIPE_CNT, ME1_PIPE0_PIPE_MEM_DED) },
+ SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT),
+ SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE0_PIPE_MEM_SEC),
+ SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE0_PIPE_MEM_DED)
+ },
{ "GDS_OA_PIPE_ME1_PIPE1_PIPE_MEM",
- SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT), 0, 1,
- REG_FIELD_MASK(GDS_EDC_OA_PIPE_CNT, ME1_PIPE1_PIPE_MEM_SEC),
- REG_FIELD_MASK(GDS_EDC_OA_PIPE_CNT, ME1_PIPE1_PIPE_MEM_DED) },
+ SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT),
+ SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE1_PIPE_MEM_SEC),
+ SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE1_PIPE_MEM_DED)
+ },
{ "GDS_OA_PIPE_ME1_PIPE2_PIPE_MEM",
- SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT), 0, 1,
- REG_FIELD_MASK(GDS_EDC_OA_PIPE_CNT, ME1_PIPE2_PIPE_MEM_SEC),
- REG_FIELD_MASK(GDS_EDC_OA_PIPE_CNT, ME1_PIPE2_PIPE_MEM_DED) },
+ SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT),
+ SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE2_PIPE_MEM_SEC),
+ SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE2_PIPE_MEM_DED)
+ },
{ "GDS_OA_PIPE_ME1_PIPE3_PIPE_MEM",
- SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT), 0, 1,
- REG_FIELD_MASK(GDS_EDC_OA_PIPE_CNT, ME1_PIPE3_PIPE_MEM_SEC),
- REG_FIELD_MASK(GDS_EDC_OA_PIPE_CNT, ME1_PIPE3_PIPE_MEM_DED) },
- { "SPI_SR_MEM", SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT), 1, 1,
- REG_FIELD_MASK(SPI_EDC_CNT, SPI_SR_MEM_SED_COUNT), 0 },
- { "TA_FS_DFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), 1, 16,
- REG_FIELD_MASK(TA_EDC_CNT, TA_FS_DFIFO_SEC_COUNT),
- REG_FIELD_MASK(TA_EDC_CNT, TA_FS_DFIFO_DED_COUNT) },
- { "TA_FS_AFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), 1, 16,
- REG_FIELD_MASK(TA_EDC_CNT, TA_FS_AFIFO_SED_COUNT), 0 },
- { "TA_FL_LFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), 1, 16,
- REG_FIELD_MASK(TA_EDC_CNT, TA_FL_LFIFO_SED_COUNT), 0 },
- { "TA_FX_LFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), 1, 16,
- REG_FIELD_MASK(TA_EDC_CNT, TA_FX_LFIFO_SED_COUNT), 0 },
- { "TA_FS_CFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), 1, 16,
- REG_FIELD_MASK(TA_EDC_CNT, TA_FS_CFIFO_SED_COUNT), 0 },
- { "TCA_HOLE_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT), 0, 2,
- REG_FIELD_MASK(TCA_EDC_CNT, HOLE_FIFO_SED_COUNT), 0 },
- { "TCA_REQ_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT), 0, 2,
- REG_FIELD_MASK(TCA_EDC_CNT, REQ_FIFO_SED_COUNT), 0 },
- { "TCC_CACHE_DATA", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 16,
- REG_FIELD_MASK(TCC_EDC_CNT, CACHE_DATA_SEC_COUNT),
- REG_FIELD_MASK(TCC_EDC_CNT, CACHE_DATA_DED_COUNT) },
- { "TCC_CACHE_DIRTY", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 16,
- REG_FIELD_MASK(TCC_EDC_CNT, CACHE_DIRTY_SEC_COUNT),
- REG_FIELD_MASK(TCC_EDC_CNT, CACHE_DIRTY_DED_COUNT) },
- { "TCC_HIGH_RATE_TAG", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 16,
- REG_FIELD_MASK(TCC_EDC_CNT, HIGH_RATE_TAG_SEC_COUNT),
- REG_FIELD_MASK(TCC_EDC_CNT, HIGH_RATE_TAG_DED_COUNT) },
- { "TCC_LOW_RATE_TAG", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 16,
- REG_FIELD_MASK(TCC_EDC_CNT, LOW_RATE_TAG_SEC_COUNT),
- REG_FIELD_MASK(TCC_EDC_CNT, LOW_RATE_TAG_DED_COUNT) },
- { "TCC_SRC_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 16,
- REG_FIELD_MASK(TCC_EDC_CNT, SRC_FIFO_SEC_COUNT),
- REG_FIELD_MASK(TCC_EDC_CNT, SRC_FIFO_DED_COUNT) },
- { "TCC_IN_USE_DEC", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 16,
- REG_FIELD_MASK(TCC_EDC_CNT, IN_USE_DEC_SED_COUNT), 0 },
- { "TCC_IN_USE_TRANSFER", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 16,
- REG_FIELD_MASK(TCC_EDC_CNT, IN_USE_TRANSFER_SED_COUNT), 0 },
- { "TCC_LATENCY_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 16,
- REG_FIELD_MASK(TCC_EDC_CNT, LATENCY_FIFO_SED_COUNT), 0 },
- { "TCC_RETURN_DATA", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 16,
- REG_FIELD_MASK(TCC_EDC_CNT, RETURN_DATA_SED_COUNT), 0 },
- { "TCC_RETURN_CONTROL", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 16,
- REG_FIELD_MASK(TCC_EDC_CNT, RETURN_CONTROL_SED_COUNT), 0 },
- { "TCC_UC_ATOMIC_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 16,
- REG_FIELD_MASK(TCC_EDC_CNT, UC_ATOMIC_FIFO_SED_COUNT), 0 },
- { "TCC_WRITE_RETURN", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), 0, 16,
- REG_FIELD_MASK(TCC_EDC_CNT2, WRITE_RETURN_SED_COUNT), 0 },
- { "TCC_WRITE_CACHE_READ", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), 0, 16,
- REG_FIELD_MASK(TCC_EDC_CNT2, WRITE_CACHE_READ_SED_COUNT), 0 },
- { "TCC_SRC_FIFO_NEXT_RAM", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), 0,
- 16, REG_FIELD_MASK(TCC_EDC_CNT2, SRC_FIFO_NEXT_RAM_SED_COUNT), 0 },
+ SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT),
+ SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE3_PIPE_MEM_SEC),
+ SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE3_PIPE_MEM_DED)
+ },
+ { "SPI_SR_MEM", SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT),
+ SOC15_REG_FIELD(SPI_EDC_CNT, SPI_SR_MEM_SED_COUNT),
+ 0, 0
+ },
+ { "TA_FS_DFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
+ SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_DFIFO_SEC_COUNT),
+ SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_DFIFO_DED_COUNT)
+ },
+ { "TA_FS_AFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
+ SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_AFIFO_SED_COUNT),
+ 0, 0
+ },
+ { "TA_FL_LFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
+ SOC15_REG_FIELD(TA_EDC_CNT, TA_FL_LFIFO_SED_COUNT),
+ 0, 0
+ },
+ { "TA_FX_LFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
+ SOC15_REG_FIELD(TA_EDC_CNT, TA_FX_LFIFO_SED_COUNT),
+ 0, 0
+ },
+ { "TA_FS_CFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
+ SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_CFIFO_SED_COUNT),
+ 0, 0
+ },
+ { "TCA_HOLE_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT),
+ SOC15_REG_FIELD(TCA_EDC_CNT, HOLE_FIFO_SED_COUNT),
+ 0, 0
+ },
+ { "TCA_REQ_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT),
+ SOC15_REG_FIELD(TCA_EDC_CNT, REQ_FIFO_SED_COUNT),
+ 0, 0
+ },
+ { "TCC_CACHE_DATA", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DATA_SEC_COUNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DATA_DED_COUNT)
+ },
+ { "TCC_CACHE_DIRTY", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DIRTY_SEC_COUNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DIRTY_DED_COUNT)
+ },
+ { "TCC_HIGH_RATE_TAG", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, HIGH_RATE_TAG_SEC_COUNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, HIGH_RATE_TAG_DED_COUNT)
+ },
+ { "TCC_LOW_RATE_TAG", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, LOW_RATE_TAG_SEC_COUNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, LOW_RATE_TAG_DED_COUNT)
+ },
+ { "TCC_SRC_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, SRC_FIFO_SEC_COUNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, SRC_FIFO_DED_COUNT)
+ },
+ { "TCC_IN_USE_DEC", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, IN_USE_DEC_SED_COUNT),
+ 0, 0
+ },
+ { "TCC_IN_USE_TRANSFER", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, IN_USE_TRANSFER_SED_COUNT),
+ 0, 0
+ },
+ { "TCC_LATENCY_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, LATENCY_FIFO_SED_COUNT),
+ 0, 0
+ },
+ { "TCC_RETURN_DATA", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, RETURN_DATA_SED_COUNT),
+ 0, 0
+ },
+ { "TCC_RETURN_CONTROL", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, RETURN_CONTROL_SED_COUNT),
+ 0, 0
+ },
+ { "TCC_UC_ATOMIC_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, UC_ATOMIC_FIFO_SED_COUNT),
+ 0, 0
+ },
+ { "TCC_WRITE_RETURN", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
+ SOC15_REG_FIELD(TCC_EDC_CNT2, WRITE_RETURN_SED_COUNT),
+ 0, 0
+ },
+ { "TCC_WRITE_CACHE_READ", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
+ SOC15_REG_FIELD(TCC_EDC_CNT2, WRITE_CACHE_READ_SED_COUNT),
+ 0, 0
+ },
+ { "TCC_SRC_FIFO_NEXT_RAM", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
+ SOC15_REG_FIELD(TCC_EDC_CNT2, SRC_FIFO_NEXT_RAM_SED_COUNT),
+ 0, 0
+ },
{ "TCC_LATENCY_FIFO_NEXT_RAM", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
- 0, 16, REG_FIELD_MASK(TCC_EDC_CNT2, LATENCY_FIFO_NEXT_RAM_SED_COUNT),
- 0 },
- { "TCC_CACHE_TAG_PROBE_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), 0,
- 16, REG_FIELD_MASK(TCC_EDC_CNT2, CACHE_TAG_PROBE_FIFO_SED_COUNT), 0 },
+ SOC15_REG_FIELD(TCC_EDC_CNT2, LATENCY_FIFO_NEXT_RAM_SED_COUNT),
+ 0, 0
+ },
+ { "TCC_CACHE_TAG_PROBE_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
+ SOC15_REG_FIELD(TCC_EDC_CNT2, CACHE_TAG_PROBE_FIFO_SED_COUNT),
+ 0, 0
+ },
{ "TCC_WRRET_TAG_WRITE_RETURN", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
- 0, 16, REG_FIELD_MASK(TCC_EDC_CNT2, WRRET_TAG_WRITE_RETURN_SED_COUNT),
- 0 },
- { "TCC_ATOMIC_RETURN_BUFFER", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), 0,
- 16, REG_FIELD_MASK(TCC_EDC_CNT2, ATOMIC_RETURN_BUFFER_SED_COUNT), 0 },
- { "TCI_WRITE_RAM", SOC15_REG_ENTRY(GC, 0, mmTCI_EDC_CNT), 0, 72,
- REG_FIELD_MASK(TCI_EDC_CNT, WRITE_RAM_SED_COUNT), 0 },
- { "TCP_CACHE_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), 1, 16,
- REG_FIELD_MASK(TCP_EDC_CNT_NEW, CACHE_RAM_SEC_COUNT),
- REG_FIELD_MASK(TCP_EDC_CNT_NEW, CACHE_RAM_DED_COUNT) },
- { "TCP_LFIFO_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), 1, 16,
- REG_FIELD_MASK(TCP_EDC_CNT_NEW, LFIFO_RAM_SEC_COUNT),
- REG_FIELD_MASK(TCP_EDC_CNT_NEW, LFIFO_RAM_DED_COUNT) },
- { "TCP_CMD_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), 1, 16,
- REG_FIELD_MASK(TCP_EDC_CNT_NEW, CMD_FIFO_SED_COUNT), 0 },
- { "TCP_VM_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), 1, 16,
- REG_FIELD_MASK(TCP_EDC_CNT_NEW, VM_FIFO_SEC_COUNT), 0 },
- { "TCP_DB_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), 1, 16,
- REG_FIELD_MASK(TCP_EDC_CNT_NEW, DB_RAM_SED_COUNT), 0 },
- { "TCP_UTCL1_LFIFO0", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), 1, 16,
- REG_FIELD_MASK(TCP_EDC_CNT_NEW, UTCL1_LFIFO0_SEC_COUNT),
- REG_FIELD_MASK(TCP_EDC_CNT_NEW, UTCL1_LFIFO0_DED_COUNT) },
- { "TCP_UTCL1_LFIFO1", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), 1, 16,
- REG_FIELD_MASK(TCP_EDC_CNT_NEW, UTCL1_LFIFO1_SEC_COUNT),
- REG_FIELD_MASK(TCP_EDC_CNT_NEW, UTCL1_LFIFO1_DED_COUNT) },
- { "TD_SS_FIFO_LO", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT), 1, 16,
- REG_FIELD_MASK(TD_EDC_CNT, SS_FIFO_LO_SEC_COUNT),
- REG_FIELD_MASK(TD_EDC_CNT, SS_FIFO_LO_DED_COUNT) },
- { "TD_SS_FIFO_HI", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT), 1, 16,
- REG_FIELD_MASK(TD_EDC_CNT, SS_FIFO_HI_SEC_COUNT),
- REG_FIELD_MASK(TD_EDC_CNT, SS_FIFO_HI_DED_COUNT) },
- { "TD_CS_FIFO", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT), 1, 16,
- REG_FIELD_MASK(TD_EDC_CNT, CS_FIFO_SED_COUNT), 0 },
- { "SQ_LDS_D", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 1, 16,
- REG_FIELD_MASK(SQ_EDC_CNT, LDS_D_SEC_COUNT),
- REG_FIELD_MASK(SQ_EDC_CNT, LDS_D_DED_COUNT) },
- { "SQ_LDS_I", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 1, 16,
- REG_FIELD_MASK(SQ_EDC_CNT, LDS_I_SEC_COUNT),
- REG_FIELD_MASK(SQ_EDC_CNT, LDS_I_DED_COUNT) },
- { "SQ_SGPR", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 1, 16,
- REG_FIELD_MASK(SQ_EDC_CNT, SGPR_SEC_COUNT),
- REG_FIELD_MASK(SQ_EDC_CNT, SGPR_DED_COUNT) },
- { "SQ_VGPR0", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 1, 16,
- REG_FIELD_MASK(SQ_EDC_CNT, VGPR0_SEC_COUNT),
- REG_FIELD_MASK(SQ_EDC_CNT, VGPR0_DED_COUNT) },
- { "SQ_VGPR1", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 1, 16,
- REG_FIELD_MASK(SQ_EDC_CNT, VGPR1_SEC_COUNT),
- REG_FIELD_MASK(SQ_EDC_CNT, VGPR1_DED_COUNT) },
- { "SQ_VGPR2", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 1, 16,
- REG_FIELD_MASK(SQ_EDC_CNT, VGPR2_SEC_COUNT),
- REG_FIELD_MASK(SQ_EDC_CNT, VGPR2_DED_COUNT) },
- { "SQ_VGPR3", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 1, 16,
- REG_FIELD_MASK(SQ_EDC_CNT, VGPR3_SEC_COUNT),
- REG_FIELD_MASK(SQ_EDC_CNT, VGPR3_DED_COUNT) },
+ SOC15_REG_FIELD(TCC_EDC_CNT2, WRRET_TAG_WRITE_RETURN_SED_COUNT),
+ 0, 0
+ },
+ { "TCC_ATOMIC_RETURN_BUFFER", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
+ SOC15_REG_FIELD(TCC_EDC_CNT2, ATOMIC_RETURN_BUFFER_SED_COUNT),
+ 0, 0
+ },
+ { "TCI_WRITE_RAM", SOC15_REG_ENTRY(GC, 0, mmTCI_EDC_CNT),
+ SOC15_REG_FIELD(TCI_EDC_CNT, WRITE_RAM_SED_COUNT),
+ 0, 0
+ },
+ { "TCP_CACHE_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CACHE_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CACHE_RAM_DED_COUNT)
+ },
+ { "TCP_LFIFO_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, LFIFO_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, LFIFO_RAM_DED_COUNT)
+ },
+ { "TCP_CMD_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CMD_FIFO_SED_COUNT),
+ 0, 0
+ },
+ { "TCP_VM_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, VM_FIFO_SEC_COUNT),
+ 0, 0
+ },
+ { "TCP_DB_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, DB_RAM_SED_COUNT),
+ 0, 0
+ },
+ { "TCP_UTCL1_LFIFO0", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO0_SEC_COUNT),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO0_DED_COUNT)
+ },
+ { "TCP_UTCL1_LFIFO1", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO1_SEC_COUNT),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO1_DED_COUNT)
+ },
+ { "TD_SS_FIFO_LO", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT),
+ SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_LO_SEC_COUNT),
+ SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_LO_DED_COUNT)
+ },
+ { "TD_SS_FIFO_HI", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT),
+ SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_HI_SEC_COUNT),
+ SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_HI_DED_COUNT)
+ },
+ { "TD_CS_FIFO", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT),
+ SOC15_REG_FIELD(TD_EDC_CNT, CS_FIFO_SED_COUNT),
+ 0, 0
+ },
+ { "SQ_LDS_D", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, LDS_D_SEC_COUNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, LDS_D_DED_COUNT)
+ },
+ { "SQ_LDS_I", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, LDS_I_SEC_COUNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, LDS_I_DED_COUNT)
+ },
+ { "SQ_SGPR", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, SGPR_SEC_COUNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, SGPR_DED_COUNT)
+ },
+ { "SQ_VGPR0", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, VGPR0_SEC_COUNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, VGPR0_DED_COUNT)
+ },
+ { "SQ_VGPR1", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, VGPR1_SEC_COUNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, VGPR1_DED_COUNT)
+ },
+ { "SQ_VGPR2", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, VGPR2_SEC_COUNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, VGPR2_DED_COUNT)
+ },
+ { "SQ_VGPR3", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, VGPR3_SEC_COUNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, VGPR3_DED_COUNT)
+ },
{ "SQC_DATA_CU0_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
- 1, 6, REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU0_WRITE_DATA_BUF_SEC_COUNT),
- REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU0_WRITE_DATA_BUF_DED_COUNT) },
- { "SQC_DATA_CU0_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), 1,
- 6, REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU0_UTCL1_LFIFO_SEC_COUNT),
- REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU0_UTCL1_LFIFO_DED_COUNT) },
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_WRITE_DATA_BUF_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_WRITE_DATA_BUF_DED_COUNT)
+ },
+ { "SQC_DATA_CU0_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_UTCL1_LFIFO_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_UTCL1_LFIFO_DED_COUNT)
+ },
{ "SQC_DATA_CU1_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
- 1, 6, REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU1_WRITE_DATA_BUF_SEC_COUNT),
- REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU1_WRITE_DATA_BUF_DED_COUNT) },
- { "SQC_DATA_CU1_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), 1,
- 6, REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU1_UTCL1_LFIFO_SEC_COUNT),
- REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU1_UTCL1_LFIFO_DED_COUNT) },
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_WRITE_DATA_BUF_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_WRITE_DATA_BUF_DED_COUNT)
+ },
+ { "SQC_DATA_CU1_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_UTCL1_LFIFO_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_UTCL1_LFIFO_DED_COUNT)
+ },
{ "SQC_DATA_CU2_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
- 1, 6, REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU2_WRITE_DATA_BUF_SEC_COUNT),
- REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU2_WRITE_DATA_BUF_DED_COUNT) },
- { "SQC_DATA_CU2_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), 1,
- 6, REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU2_UTCL1_LFIFO_SEC_COUNT),
- REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU2_UTCL1_LFIFO_DED_COUNT) },
- { "SQC_INST_BANKA_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 1,
- 6, REG_FIELD_MASK(SQC_EDC_CNT2, INST_BANKA_TAG_RAM_SEC_COUNT),
- REG_FIELD_MASK(SQC_EDC_CNT2, INST_BANKA_TAG_RAM_DED_COUNT) },
- { "SQC_INST_BANKA_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 1,
- 6, REG_FIELD_MASK(SQC_EDC_CNT2, INST_BANKA_BANK_RAM_SEC_COUNT),
- REG_FIELD_MASK(SQC_EDC_CNT2, INST_BANKA_BANK_RAM_DED_COUNT) },
- { "SQC_DATA_BANKA_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 1,
- 6, REG_FIELD_MASK(SQC_EDC_CNT2, DATA_BANKA_TAG_RAM_SEC_COUNT),
- REG_FIELD_MASK(SQC_EDC_CNT2, DATA_BANKA_TAG_RAM_DED_COUNT) },
- { "SQC_DATA_BANKA_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 1,
- 6, REG_FIELD_MASK(SQC_EDC_CNT2, DATA_BANKA_BANK_RAM_SEC_COUNT),
- REG_FIELD_MASK(SQC_EDC_CNT2, DATA_BANKA_BANK_RAM_DED_COUNT) },
- { "SQC_INST_BANKA_UTCL1_MISS_FIFO",
- SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 1, 6,
- REG_FIELD_MASK(SQC_EDC_CNT2, INST_BANKA_UTCL1_MISS_FIFO_SED_COUNT),
- 0 },
- { "SQC_INST_BANKA_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 1,
- 6, REG_FIELD_MASK(SQC_EDC_CNT2, INST_BANKA_MISS_FIFO_SED_COUNT), 0 },
- { "SQC_DATA_BANKA_HIT_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 1,
- 6, REG_FIELD_MASK(SQC_EDC_CNT2, DATA_BANKA_HIT_FIFO_SED_COUNT), 0 },
- { "SQC_DATA_BANKA_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 1,
- 6, REG_FIELD_MASK(SQC_EDC_CNT2, DATA_BANKA_MISS_FIFO_SED_COUNT), 0 },
- { "SQC_DATA_BANKA_DIRTY_BIT_RAM",
- SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 1, 6,
- REG_FIELD_MASK(SQC_EDC_CNT2, DATA_BANKA_DIRTY_BIT_RAM_SED_COUNT), 0 },
- { "SQC_INST_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 1, 6,
- REG_FIELD_MASK(SQC_EDC_CNT2, INST_UTCL1_LFIFO_SEC_COUNT),
- REG_FIELD_MASK(SQC_EDC_CNT2, INST_UTCL1_LFIFO_DED_COUNT) },
- { "SQC_INST_BANKB_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 1,
- 6, REG_FIELD_MASK(SQC_EDC_CNT3, INST_BANKB_TAG_RAM_SEC_COUNT),
- REG_FIELD_MASK(SQC_EDC_CNT3, INST_BANKB_TAG_RAM_DED_COUNT) },
- { "SQC_INST_BANKB_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 1,
- 6, REG_FIELD_MASK(SQC_EDC_CNT3, INST_BANKB_BANK_RAM_SEC_COUNT),
- REG_FIELD_MASK(SQC_EDC_CNT3, INST_BANKB_BANK_RAM_DED_COUNT) },
- { "SQC_DATA_BANKB_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 1,
- 6, REG_FIELD_MASK(SQC_EDC_CNT3, DATA_BANKB_TAG_RAM_SEC_COUNT),
- REG_FIELD_MASK(SQC_EDC_CNT3, DATA_BANKB_TAG_RAM_DED_COUNT) },
- { "SQC_DATA_BANKB_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 1,
- 6, REG_FIELD_MASK(SQC_EDC_CNT3, DATA_BANKB_BANK_RAM_SEC_COUNT),
- REG_FIELD_MASK(SQC_EDC_CNT3, DATA_BANKB_BANK_RAM_DED_COUNT) },
- { "SQC_INST_BANKB_UTCL1_MISS_FIFO",
- SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 1, 6,
- REG_FIELD_MASK(SQC_EDC_CNT3, INST_BANKB_UTCL1_MISS_FIFO_SED_COUNT),
- 0 },
- { "SQC_INST_BANKB_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 1,
- 6, REG_FIELD_MASK(SQC_EDC_CNT3, INST_BANKB_MISS_FIFO_SED_COUNT), 0 },
- { "SQC_DATA_BANKB_HIT_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 1,
- 6, REG_FIELD_MASK(SQC_EDC_CNT3, DATA_BANKB_HIT_FIFO_SED_COUNT), 0 },
- { "SQC_DATA_BANKB_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 1,
- 6, REG_FIELD_MASK(SQC_EDC_CNT3, DATA_BANKB_MISS_FIFO_SED_COUNT), 0 },
- { "SQC_DATA_BANKB_DIRTY_BIT_RAM",
- SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 1, 6,
- REG_FIELD_MASK(SQC_EDC_CNT3, DATA_BANKB_DIRTY_BIT_RAM_SED_COUNT), 0 },
- { "EA_DRAMRD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 32,
- REG_FIELD_MASK(GCEA_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT),
- REG_FIELD_MASK(GCEA_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT) },
- { "EA_DRAMWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 32,
- REG_FIELD_MASK(GCEA_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT),
- REG_FIELD_MASK(GCEA_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT) },
- { "EA_DRAMWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 32,
- REG_FIELD_MASK(GCEA_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT),
- REG_FIELD_MASK(GCEA_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT) },
- { "EA_RRET_TAGMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 32,
- REG_FIELD_MASK(GCEA_EDC_CNT, RRET_TAGMEM_SEC_COUNT),
- REG_FIELD_MASK(GCEA_EDC_CNT, RRET_TAGMEM_DED_COUNT) },
- { "EA_WRET_TAGMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 32,
- REG_FIELD_MASK(GCEA_EDC_CNT, WRET_TAGMEM_SEC_COUNT),
- REG_FIELD_MASK(GCEA_EDC_CNT, WRET_TAGMEM_DED_COUNT) },
- { "EA_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 32,
- REG_FIELD_MASK(GCEA_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT), 0 },
- { "EA_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 32,
- REG_FIELD_MASK(GCEA_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT), 0 },
- { "EA_IORD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 32,
- REG_FIELD_MASK(GCEA_EDC_CNT, IORD_CMDMEM_SED_COUNT), 0 },
- { "EA_IOWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 32,
- REG_FIELD_MASK(GCEA_EDC_CNT, IOWR_CMDMEM_SED_COUNT), 0 },
- { "EA_IOWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 32,
- REG_FIELD_MASK(GCEA_EDC_CNT, IOWR_DATAMEM_SED_COUNT), 0 },
- { "GMIRD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 32,
- REG_FIELD_MASK(GCEA_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT),
- REG_FIELD_MASK(GCEA_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT) },
- { "GMIWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 32,
- REG_FIELD_MASK(GCEA_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT),
- REG_FIELD_MASK(GCEA_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT) },
- { "GMIWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 32,
- REG_FIELD_MASK(GCEA_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT),
- REG_FIELD_MASK(GCEA_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT) },
- { "GMIRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 32,
- REG_FIELD_MASK(GCEA_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT), 0 },
- { "GMIWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 32,
- REG_FIELD_MASK(GCEA_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT), 0 },
- { "MAM_D0MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 32,
- REG_FIELD_MASK(GCEA_EDC_CNT2, MAM_D0MEM_SED_COUNT), 0 },
- { "MAM_D1MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 32,
- REG_FIELD_MASK(GCEA_EDC_CNT2, MAM_D1MEM_SED_COUNT), 0 },
- { "MAM_D2MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 32,
- REG_FIELD_MASK(GCEA_EDC_CNT2, MAM_D2MEM_SED_COUNT), 0 },
- { "MAM_D3MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 32,
- REG_FIELD_MASK(GCEA_EDC_CNT2, MAM_D3MEM_SED_COUNT), 0 },
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_WRITE_DATA_BUF_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_WRITE_DATA_BUF_DED_COUNT)
+ },
+ { "SQC_DATA_CU2_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_UTCL1_LFIFO_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_UTCL1_LFIFO_DED_COUNT)
+ },
+ { "SQC_INST_BANKA_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_TAG_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_TAG_RAM_DED_COUNT)
+ },
+ { "SQC_INST_BANKA_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_BANK_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_BANK_RAM_DED_COUNT)
+ },
+ { "SQC_DATA_BANKA_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_TAG_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_TAG_RAM_DED_COUNT)
+ },
+ { "SQC_DATA_BANKA_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_BANK_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_BANK_RAM_DED_COUNT)
+ },
+ { "SQC_INST_BANKA_UTCL1_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_UTCL1_MISS_FIFO_SED_COUNT),
+ 0, 0
+ },
+ { "SQC_INST_BANKA_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_MISS_FIFO_SED_COUNT),
+ 0, 0
+ },
+ { "SQC_DATA_BANKA_HIT_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_HIT_FIFO_SED_COUNT),
+ 0, 0
+ },
+ { "SQC_DATA_BANKA_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_MISS_FIFO_SED_COUNT),
+ 0, 0
+ },
+ { "SQC_DATA_BANKA_DIRTY_BIT_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_DIRTY_BIT_RAM_SED_COUNT),
+ 0, 0
+ },
+ { "SQC_INST_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, INST_UTCL1_LFIFO_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, INST_UTCL1_LFIFO_DED_COUNT)
+ },
+ { "SQC_INST_BANKB_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
+ SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_TAG_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_TAG_RAM_DED_COUNT)
+ },
+ { "SQC_INST_BANKB_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
+ SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_BANK_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_BANK_RAM_DED_COUNT)
+ },
+ { "SQC_DATA_BANKB_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
+ SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_TAG_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_TAG_RAM_DED_COUNT)
+ },
+ { "SQC_DATA_BANKB_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
+ SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_BANK_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_BANK_RAM_DED_COUNT)
+ },
+ { "SQC_INST_BANKB_UTCL1_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
+ SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_UTCL1_MISS_FIFO_SED_COUNT),
+ 0, 0
+ },
+ { "SQC_INST_BANKB_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
+ SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_MISS_FIFO_SED_COUNT),
+ 0, 0
+ },
+ { "SQC_DATA_BANKB_HIT_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
+ SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_HIT_FIFO_SED_COUNT),
+ 0, 0
+ },
+ { "SQC_DATA_BANKB_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
+ SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_MISS_FIFO_SED_COUNT),
+ 0, 0
+ },
+ { "SQC_DATA_BANKB_DIRTY_BIT_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
+ SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_DIRTY_BIT_RAM_SED_COUNT),
+ 0, 0
+ },
+ { "EA_DRAMRD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT)
+ },
+ { "EA_DRAMWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT)
+ },
+ { "EA_DRAMWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT)
+ },
+ { "EA_RRET_TAGMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, RRET_TAGMEM_SEC_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, RRET_TAGMEM_DED_COUNT)
+ },
+ { "EA_WRET_TAGMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, WRET_TAGMEM_SEC_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, WRET_TAGMEM_DED_COUNT)
+ },
+ { "EA_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT),
+ 0, 0
+ },
+ { "EA_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT),
+ 0, 0
+ },
+ { "EA_IORD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, IORD_CMDMEM_SED_COUNT),
+ 0, 0
+ },
+ { "EA_IOWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, IOWR_CMDMEM_SED_COUNT),
+ 0, 0
+ },
+ { "EA_IOWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, IOWR_DATAMEM_SED_COUNT),
+ 0, 0
+ },
+ { "GMIRD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT)
+ },
+ { "GMIWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT)
+ },
+ { "GMIWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT)
+ },
+ { "GMIRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT),
+ 0, 0
+ },
+ { "GMIWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT),
+ 0, 0
+ },
+ { "MAM_D0MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D0MEM_SED_COUNT),
+ 0, 0
+ },
+ { "MAM_D1MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D1MEM_SED_COUNT),
+ 0, 0
+ },
+ { "MAM_D2MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D2MEM_SED_COUNT),
+ 0, 0
+ },
+ { "MAM_D3MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D3MEM_SED_COUNT),
+ 0, 0
+ }
};
static int gfx_v9_0_ras_error_inject(struct amdgpu_device *adev,
@@ -6096,14 +5972,217 @@ static int gfx_v9_0_ras_error_inject(struct amdgpu_device *adev,
return ret;
}
+static const char *vml2_mems[] = {
+ "UTC_VML2_BANK_CACHE_0_BIGK_MEM0",
+ "UTC_VML2_BANK_CACHE_0_BIGK_MEM1",
+ "UTC_VML2_BANK_CACHE_0_4K_MEM0",
+ "UTC_VML2_BANK_CACHE_0_4K_MEM1",
+ "UTC_VML2_BANK_CACHE_1_BIGK_MEM0",
+ "UTC_VML2_BANK_CACHE_1_BIGK_MEM1",
+ "UTC_VML2_BANK_CACHE_1_4K_MEM0",
+ "UTC_VML2_BANK_CACHE_1_4K_MEM1",
+ "UTC_VML2_BANK_CACHE_2_BIGK_MEM0",
+ "UTC_VML2_BANK_CACHE_2_BIGK_MEM1",
+ "UTC_VML2_BANK_CACHE_2_4K_MEM0",
+ "UTC_VML2_BANK_CACHE_2_4K_MEM1",
+ "UTC_VML2_BANK_CACHE_3_BIGK_MEM0",
+ "UTC_VML2_BANK_CACHE_3_BIGK_MEM1",
+ "UTC_VML2_BANK_CACHE_3_4K_MEM0",
+ "UTC_VML2_BANK_CACHE_3_4K_MEM1",
+};
+
+static const char *vml2_walker_mems[] = {
+ "UTC_VML2_CACHE_PDE0_MEM0",
+ "UTC_VML2_CACHE_PDE0_MEM1",
+ "UTC_VML2_CACHE_PDE1_MEM0",
+ "UTC_VML2_CACHE_PDE1_MEM1",
+ "UTC_VML2_CACHE_PDE2_MEM0",
+ "UTC_VML2_CACHE_PDE2_MEM1",
+ "UTC_VML2_RDIF_LOG_FIFO",
+};
+
+static const char *atc_l2_cache_2m_mems[] = {
+ "UTC_ATCL2_CACHE_2M_BANK0_WAY0_MEM",
+ "UTC_ATCL2_CACHE_2M_BANK0_WAY1_MEM",
+ "UTC_ATCL2_CACHE_2M_BANK1_WAY0_MEM",
+ "UTC_ATCL2_CACHE_2M_BANK1_WAY1_MEM",
+};
+
+static const char *atc_l2_cache_4k_mems[] = {
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM0",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM1",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM2",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM3",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM4",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM5",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM6",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM7",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM0",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM1",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM2",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM3",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM4",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM5",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM6",
+ "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM7",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM0",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM1",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM2",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM3",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM4",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM5",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM6",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM7",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM0",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM1",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM2",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM3",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM4",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM5",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM6",
+ "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM7",
+};
+
+static int gfx_v9_0_query_utc_edc_status(struct amdgpu_device *adev,
+ struct ras_err_data *err_data)
+{
+ uint32_t i, data;
+ uint32_t sec_count, ded_count;
+
+ WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_CNT, 0);
+ WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_CNT, 0);
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_CNT, 0);
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT, 0);
+
+ for (i = 0; i < 16; i++) {
+ WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, i);
+ data = RREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_CNT);
+
+ sec_count = REG_GET_FIELD(data, VM_L2_MEM_ECC_CNT, SEC_COUNT);
+ if (sec_count) {
+ DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i,
+ vml2_mems[i], sec_count);
+ err_data->ce_count += sec_count;
+ }
+
+ ded_count = REG_GET_FIELD(data, VM_L2_MEM_ECC_CNT, DED_COUNT);
+ if (ded_count) {
+ DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i,
+ vml2_mems[i], ded_count);
+ err_data->ue_count += ded_count;
+ }
+ }
+
+ for (i = 0; i < 7; i++) {
+ WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, i);
+ data = RREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_CNT);
+
+ sec_count = REG_GET_FIELD(data, VM_L2_WALKER_MEM_ECC_CNT,
+ SEC_COUNT);
+ if (sec_count) {
+ DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i,
+ vml2_walker_mems[i], sec_count);
+ err_data->ce_count += sec_count;
+ }
+
+ ded_count = REG_GET_FIELD(data, VM_L2_WALKER_MEM_ECC_CNT,
+ DED_COUNT);
+ if (ded_count) {
+ DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i,
+ vml2_walker_mems[i], ded_count);
+ err_data->ue_count += ded_count;
+ }
+ }
+
+ for (i = 0; i < 4; i++) {
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, i);
+ data = RREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_CNT);
+
+ sec_count = (data & 0x00006000L) >> 0xd;
+ if (sec_count) {
+ DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i,
+ atc_l2_cache_2m_mems[i], sec_count);
+ err_data->ce_count += sec_count;
+ }
+ }
+
+ for (i = 0; i < 32; i++) {
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, i);
+ data = RREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT);
+
+ sec_count = (data & 0x00006000L) >> 0xd;
+ if (sec_count) {
+ DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i,
+ atc_l2_cache_4k_mems[i], sec_count);
+ err_data->ce_count += sec_count;
+ }
+
+ ded_count = (data & 0x00018000L) >> 0xf;
+ if (ded_count) {
+ DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i,
+ atc_l2_cache_4k_mems[i], ded_count);
+ err_data->ue_count += ded_count;
+ }
+ }
+
+ WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, 255);
+ WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255);
+
+ return 0;
+}
+
+static int __get_ras_error_count(const struct soc15_reg_entry *reg,
+ uint32_t se_id, uint32_t inst_id, uint32_t value,
+ uint32_t *sec_count, uint32_t *ded_count)
+{
+ uint32_t i;
+ uint32_t sec_cnt, ded_cnt;
+
+ for (i = 0; i < ARRAY_SIZE(ras_subblock_regs); i++) {
+ if(ras_subblock_regs[i].reg_offset != reg->reg_offset ||
+ ras_subblock_regs[i].seg != reg->seg ||
+ ras_subblock_regs[i].inst != reg->inst)
+ continue;
+
+ sec_cnt = (value &
+ ras_subblock_regs[i].sec_count_mask) >>
+ ras_subblock_regs[i].sec_count_shift;
+ if (sec_cnt) {
+ DRM_INFO("GFX SubBlock %s, Instance[%d][%d], SEC %d\n",
+ ras_subblock_regs[i].name,
+ se_id, inst_id,
+ sec_cnt);
+ *sec_count += sec_cnt;
+ }
+
+ ded_cnt = (value &
+ ras_subblock_regs[i].ded_count_mask) >>
+ ras_subblock_regs[i].ded_count_shift;
+ if (ded_cnt) {
+ DRM_INFO("GFX SubBlock %s, Instance[%d][%d], DED %d\n",
+ ras_subblock_regs[i].name,
+ se_id, inst_id,
+ ded_cnt);
+ *ded_count += ded_cnt;
+ }
+ }
+
+ return 0;
+}
+
static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
void *ras_error_status)
{
struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
- uint32_t sec_count, ded_count;
- uint32_t i;
+ uint32_t sec_count = 0, ded_count = 0;
+ uint32_t i, j, k;
uint32_t reg_value;
- uint32_t se_id, instance_id;
if (adev->asic_type != CHIP_VEGA20)
return -EINVAL;
@@ -6112,71 +6191,29 @@ static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
err_data->ce_count = 0;
mutex_lock(&adev->grbm_idx_mutex);
- for (se_id = 0; se_id < adev->gfx.config.max_shader_engines; se_id++) {
- for (instance_id = 0; instance_id < 256; instance_id++) {
- for (i = 0;
- i < sizeof(gfx_ras_edc_regs) / sizeof(gfx_ras_edc_regs[0]);
- i++) {
- if (se_id != 0 &&
- !gfx_ras_edc_regs[i].per_se_instance)
- continue;
- if (instance_id >= gfx_ras_edc_regs[i].num_instance)
- continue;
-
- gfx_v9_0_select_se_sh(adev, se_id, 0,
- instance_id);
-
- reg_value = RREG32(
- adev->reg_offset[gfx_ras_edc_regs[i].ip]
- [gfx_ras_edc_regs[i].inst]
- [gfx_ras_edc_regs[i].seg] +
- gfx_ras_edc_regs[i].reg_offset);
- sec_count = reg_value &
- gfx_ras_edc_regs[i].sec_count_mask;
- ded_count = reg_value &
- gfx_ras_edc_regs[i].ded_count_mask;
- if (sec_count) {
- DRM_INFO(
- "Instance[%d][%d]: SubBlock %s, SEC %d\n",
- se_id, instance_id,
- gfx_ras_edc_regs[i].name,
- sec_count);
- err_data->ce_count++;
- }
- if (ded_count) {
- DRM_INFO(
- "Instance[%d][%d]: SubBlock %s, DED %d\n",
- se_id, instance_id,
- gfx_ras_edc_regs[i].name,
- ded_count);
- err_data->ue_count++;
- }
+ for (i = 0; i < ARRAY_SIZE(sec_ded_counter_registers); i++) {
+ for (j = 0; j < sec_ded_counter_registers[i].se_num; j++) {
+ for (k = 0; k < sec_ded_counter_registers[i].instance; k++) {
+ gfx_v9_0_select_se_sh(adev, j, 0, k);
+ reg_value =
+ RREG32(SOC15_REG_ENTRY_OFFSET(sec_ded_counter_registers[i]));
+ if (reg_value)
+ __get_ras_error_count(&sec_ded_counter_registers[i],
+ j, k, reg_value,
+ &sec_count, &ded_count);
}
}
}
- gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
- mutex_unlock(&adev->grbm_idx_mutex);
-
- return 0;
-}
-static int gfx_v9_0_cp_ecc_error_irq(struct amdgpu_device *adev,
- struct amdgpu_irq_src *source,
- struct amdgpu_iv_entry *entry)
-{
- struct ras_common_if *ras_if = adev->gfx.ras_if;
- struct ras_dispatch_if ih_data = {
- .entry = entry,
- };
+ err_data->ce_count += sec_count;
+ err_data->ue_count += ded_count;
- if (!ras_if)
- return 0;
+ gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ mutex_unlock(&adev->grbm_idx_mutex);
- ih_data.head = *ras_if;
+ gfx_v9_0_query_utc_edc_status(adev, err_data);
- DRM_ERROR("CP ECC ERROR IRQ\n");
- amdgpu_ras_interrupt_dispatch(adev, &ih_data);
return 0;
}
@@ -6343,7 +6380,7 @@ static const struct amdgpu_irq_src_funcs gfx_v9_0_priv_inst_irq_funcs = {
static const struct amdgpu_irq_src_funcs gfx_v9_0_cp_ecc_error_irq_funcs = {
.set = gfx_v9_0_set_cp_ecc_error_state,
- .process = gfx_v9_0_cp_ecc_error_irq,
+ .process = amdgpu_gfx_cp_ecc_error_irq,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
index 6ce37ce77d14..e91bd7945777 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
@@ -178,6 +178,8 @@ static void gfxhub_v1_0_enable_system_domain(struct amdgpu_device *adev)
tmp = RREG32_SOC15(GC, 0, mmVM_CONTEXT0_CNTL);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL,
+ RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
WREG32_SOC15(GC, 0, mmVM_CONTEXT0_CNTL, tmp);
}
@@ -365,6 +367,8 @@ void gfxhub_v1_0_init(struct amdgpu_device *adev)
hub->ctx0_ptb_addr_hi32 =
SOC15_REG_OFFSET(GC, 0,
mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32);
+ hub->vm_inv_eng0_sem =
+ SOC15_REG_OFFSET(GC, 0, mmVM_INVALIDATE_ENG0_SEM);
hub->vm_inv_eng0_req =
SOC15_REG_OFFSET(GC, 0, mmVM_INVALIDATE_ENG0_REQ);
hub->vm_inv_eng0_ack =
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c
index db10640a3b2f..b70c7b483c24 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c
@@ -46,21 +46,25 @@ u64 gfxhub_v2_0_get_mc_fb_offset(struct amdgpu_device *adev)
return (u64)RREG32_SOC15(GC, 0, mmGCMC_VM_FB_OFFSET) << 24;
}
-static void gfxhub_v2_0_init_gart_pt_regs(struct amdgpu_device *adev)
+void gfxhub_v2_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
+ uint64_t page_table_base)
{
- uint64_t value = amdgpu_gmc_pd_addr(adev->gart.bo);
+ /* two registers distance between mmGCVM_CONTEXT0_* to mmGCVM_CONTEXT1_* */
+ int offset = mmGCVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32
+ - mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32;
+ WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
+ offset * vmid, lower_32_bits(page_table_base));
- WREG32_SOC15(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
- lower_32_bits(value));
-
- WREG32_SOC15(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
- upper_32_bits(value));
+ WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
+ offset * vmid, upper_32_bits(page_table_base));
}
static void gfxhub_v2_0_init_gart_aperture_regs(struct amdgpu_device *adev)
{
- gfxhub_v2_0_init_gart_pt_regs(adev);
+ uint64_t pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
+
+ gfxhub_v2_0_setup_vm_pt_regs(adev, 0, pt_base);
WREG32_SOC15(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
(u32)(adev->gmc.gart_start >> 12));
@@ -175,6 +179,8 @@ static void gfxhub_v2_0_enable_system_domain(struct amdgpu_device *adev)
tmp = RREG32_SOC15(GC, 0, mmGCVM_CONTEXT0_CNTL);
tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
+ tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT0_CNTL,
+ RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
WREG32_SOC15(GC, 0, mmGCVM_CONTEXT0_CNTL, tmp);
}
@@ -350,6 +356,8 @@ void gfxhub_v2_0_init(struct amdgpu_device *adev)
hub->ctx0_ptb_addr_hi32 =
SOC15_REG_OFFSET(GC, 0,
mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32);
+ hub->vm_inv_eng0_sem =
+ SOC15_REG_OFFSET(GC, 0, mmGCVM_INVALIDATE_ENG0_SEM);
hub->vm_inv_eng0_req =
SOC15_REG_OFFSET(GC, 0, mmGCVM_INVALIDATE_ENG0_REQ);
hub->vm_inv_eng0_ack =
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.h b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.h
index 06807940748b..392b8cd94fc0 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.h
@@ -31,5 +31,7 @@ void gfxhub_v2_0_set_fault_enable_default(struct amdgpu_device *adev,
bool value);
void gfxhub_v2_0_init(struct amdgpu_device *adev);
u64 gfxhub_v2_0_get_mc_fb_offset(struct amdgpu_device *adev);
+void gfxhub_v2_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
+ uint64_t page_table_base);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
index 5c7d5f73f54f..321f8a997be8 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
@@ -235,6 +235,29 @@ static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
const unsigned eng = 17;
unsigned int i;
+ spin_lock(&adev->gmc.invalidate_lock);
+ /*
+ * It may lose gpuvm invalidate acknowldege state across power-gating
+ * off cycle, add semaphore acquire before invalidation and semaphore
+ * release after invalidation to avoid entering power gated state
+ * to WA the Issue
+ */
+
+ /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
+ if (vmhub == AMDGPU_MMHUB_0 ||
+ vmhub == AMDGPU_MMHUB_1) {
+ for (i = 0; i < adev->usec_timeout; i++) {
+ /* a read return value of 1 means semaphore acuqire */
+ tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_sem + eng);
+ if (tmp & 0x1)
+ break;
+ udelay(1);
+ }
+
+ if (i >= adev->usec_timeout)
+ DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n");
+ }
+
WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp);
/*
@@ -254,6 +277,17 @@ static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
udelay(1);
}
+ /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
+ if (vmhub == AMDGPU_MMHUB_0 ||
+ vmhub == AMDGPU_MMHUB_1)
+ /*
+ * add semaphore release after invalidation,
+ * write with 0 means semaphore release
+ */
+ WREG32_NO_KIQ(hub->vm_inv_eng0_sem + eng, 0);
+
+ spin_unlock(&adev->gmc.invalidate_lock);
+
if (i < adev->usec_timeout)
return;
@@ -278,7 +312,7 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
int r;
/* flush hdp cache */
- adev->nbio_funcs->hdp_flush(adev, NULL);
+ adev->nbio.funcs->hdp_flush(adev, NULL);
mutex_lock(&adev->mman.gtt_window_lock);
@@ -338,6 +372,20 @@ static uint64_t gmc_v10_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
uint32_t req = gmc_v10_0_get_invalidate_req(vmid, 0);
unsigned eng = ring->vm_inv_eng;
+ /*
+ * It may lose gpuvm invalidate acknowldege state across power-gating
+ * off cycle, add semaphore acquire before invalidation and semaphore
+ * release after invalidation to avoid entering power gated state
+ * to WA the Issue
+ */
+
+ /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
+ if (ring->funcs->vmhub == AMDGPU_MMHUB_0 ||
+ ring->funcs->vmhub == AMDGPU_MMHUB_1)
+ /* a read return value of 1 means semaphore acuqire */
+ amdgpu_ring_emit_reg_wait(ring,
+ hub->vm_inv_eng0_sem + eng, 0x1, 0x1);
+
amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 + (2 * vmid),
lower_32_bits(pd_addr));
@@ -348,6 +396,15 @@ static uint64_t gmc_v10_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
hub->vm_inv_eng0_ack + eng,
req, 1 << vmid);
+ /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
+ if (ring->funcs->vmhub == AMDGPU_MMHUB_0 ||
+ ring->funcs->vmhub == AMDGPU_MMHUB_1)
+ /*
+ * add semaphore release after invalidation,
+ * write with 0 means semaphore release
+ */
+ amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_sem + eng, 0);
+
return pd_addr;
}
@@ -396,43 +453,23 @@ static void gmc_v10_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid
* 1 system
* 0 valid
*/
-static uint64_t gmc_v10_0_get_vm_pte_flags(struct amdgpu_device *adev,
- uint32_t flags)
-{
- uint64_t pte_flag = 0;
-
- if (flags & AMDGPU_VM_PAGE_EXECUTABLE)
- pte_flag |= AMDGPU_PTE_EXECUTABLE;
- if (flags & AMDGPU_VM_PAGE_READABLE)
- pte_flag |= AMDGPU_PTE_READABLE;
- if (flags & AMDGPU_VM_PAGE_WRITEABLE)
- pte_flag |= AMDGPU_PTE_WRITEABLE;
- switch (flags & AMDGPU_VM_MTYPE_MASK) {
+static uint64_t gmc_v10_0_map_mtype(struct amdgpu_device *adev, uint32_t flags)
+{
+ switch (flags) {
case AMDGPU_VM_MTYPE_DEFAULT:
- pte_flag |= AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
- break;
+ return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
case AMDGPU_VM_MTYPE_NC:
- pte_flag |= AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
- break;
+ return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
case AMDGPU_VM_MTYPE_WC:
- pte_flag |= AMDGPU_PTE_MTYPE_NV10(MTYPE_WC);
- break;
+ return AMDGPU_PTE_MTYPE_NV10(MTYPE_WC);
case AMDGPU_VM_MTYPE_CC:
- pte_flag |= AMDGPU_PTE_MTYPE_NV10(MTYPE_CC);
- break;
+ return AMDGPU_PTE_MTYPE_NV10(MTYPE_CC);
case AMDGPU_VM_MTYPE_UC:
- pte_flag |= AMDGPU_PTE_MTYPE_NV10(MTYPE_UC);
- break;
+ return AMDGPU_PTE_MTYPE_NV10(MTYPE_UC);
default:
- pte_flag |= AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
- break;
+ return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
}
-
- if (flags & AMDGPU_VM_PAGE_PRT)
- pte_flag |= AMDGPU_PTE_PRT;
-
- return pte_flag;
}
static void gmc_v10_0_get_vm_pde(struct amdgpu_device *adev, int level,
@@ -459,12 +496,32 @@ static void gmc_v10_0_get_vm_pde(struct amdgpu_device *adev, int level,
}
}
+static void gmc_v10_0_get_vm_pte(struct amdgpu_device *adev,
+ struct amdgpu_bo_va_mapping *mapping,
+ uint64_t *flags)
+{
+ *flags &= ~AMDGPU_PTE_EXECUTABLE;
+ *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
+
+ *flags &= ~AMDGPU_PTE_MTYPE_NV10_MASK;
+ *flags |= (mapping->flags & AMDGPU_PTE_MTYPE_NV10_MASK);
+
+ if (mapping->flags & AMDGPU_PTE_PRT) {
+ *flags |= AMDGPU_PTE_PRT;
+ *flags |= AMDGPU_PTE_SNOOPED;
+ *flags |= AMDGPU_PTE_LOG;
+ *flags |= AMDGPU_PTE_SYSTEM;
+ *flags &= ~AMDGPU_PTE_VALID;
+ }
+}
+
static const struct amdgpu_gmc_funcs gmc_v10_0_gmc_funcs = {
.flush_gpu_tlb = gmc_v10_0_flush_gpu_tlb,
.emit_flush_gpu_tlb = gmc_v10_0_emit_flush_gpu_tlb,
.emit_pasid_mapping = gmc_v10_0_emit_pasid_mapping,
- .get_vm_pte_flags = gmc_v10_0_get_vm_pte_flags,
- .get_vm_pde = gmc_v10_0_get_vm_pde
+ .map_mtype = gmc_v10_0_map_mtype,
+ .get_vm_pde = gmc_v10_0_get_vm_pde,
+ .get_vm_pte = gmc_v10_0_get_vm_pte
};
static void gmc_v10_0_set_gmc_funcs(struct amdgpu_device *adev)
@@ -518,8 +575,7 @@ static void gmc_v10_0_vram_gtt_location(struct amdgpu_device *adev,
{
u64 base = 0;
- if (!amdgpu_sriov_vf(adev))
- base = gfxhub_v2_0_get_fb_location(adev);
+ base = gfxhub_v2_0_get_fb_location(adev);
amdgpu_gmc_vram_location(adev, &adev->gmc, base);
amdgpu_gmc_gart_location(adev, mc);
@@ -539,24 +595,13 @@ static void gmc_v10_0_vram_gtt_location(struct amdgpu_device *adev,
*/
static int gmc_v10_0_mc_init(struct amdgpu_device *adev)
{
- int chansize, numchan;
-
- if (!amdgpu_emu_mode)
- adev->gmc.vram_width = amdgpu_atomfirmware_get_vram_width(adev);
- else {
- /* hard code vram_width for emulation */
- chansize = 128;
- numchan = 1;
- adev->gmc.vram_width = numchan * chansize;
- }
-
/* Could aper size report 0 ? */
adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
/* size in MB on si */
adev->gmc.mc_vram_size =
- adev->nbio_funcs->get_memsize(adev) * 1024ULL * 1024ULL;
+ adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL;
adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
adev->gmc.visible_vram_size = adev->gmc.aper_size;
@@ -635,7 +680,7 @@ static unsigned gmc_v10_0_get_vbios_fb_size(struct amdgpu_device *adev)
static int gmc_v10_0_sw_init(void *handle)
{
- int r;
+ int r, vram_width = 0, vram_type = 0, vram_vendor = 0;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
gfxhub_v2_0_init(adev);
@@ -643,7 +688,15 @@ static int gmc_v10_0_sw_init(void *handle)
spin_lock_init(&adev->gmc.invalidate_lock);
- adev->gmc.vram_type = amdgpu_atomfirmware_get_vram_type(adev);
+ r = amdgpu_atomfirmware_get_vram_info(adev,
+ &vram_width, &vram_type, &vram_vendor);
+ if (!amdgpu_emu_mode)
+ adev->gmc.vram_width = vram_width;
+ else
+ adev->gmc.vram_width = 1 * 128; /* numchan * chansize */
+
+ adev->gmc.vram_type = vram_type;
+ adev->gmc.vram_vendor = vram_vendor;
switch (adev->asic_type) {
case CHIP_NAVI10:
case CHIP_NAVI14:
@@ -793,7 +846,7 @@ static int gmc_v10_0_gart_enable(struct amdgpu_device *adev)
WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp);
/* Flush HDP after it is initialized */
- adev->nbio_funcs->hdp_flush(adev, NULL);
+ adev->nbio.funcs->hdp_flush(adev, NULL);
value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
false : true;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index 9fb1765e92d1..b205039350b6 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -386,27 +386,20 @@ static uint64_t gmc_v6_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
return pd_addr;
}
-static uint64_t gmc_v6_0_get_vm_pte_flags(struct amdgpu_device *adev,
- uint32_t flags)
-{
- uint64_t pte_flag = 0;
-
- if (flags & AMDGPU_VM_PAGE_READABLE)
- pte_flag |= AMDGPU_PTE_READABLE;
- if (flags & AMDGPU_VM_PAGE_WRITEABLE)
- pte_flag |= AMDGPU_PTE_WRITEABLE;
- if (flags & AMDGPU_VM_PAGE_PRT)
- pte_flag |= AMDGPU_PTE_PRT;
-
- return pte_flag;
-}
-
static void gmc_v6_0_get_vm_pde(struct amdgpu_device *adev, int level,
uint64_t *addr, uint64_t *flags)
{
BUG_ON(*addr & 0xFFFFFF0000000FFFULL);
}
+static void gmc_v6_0_get_vm_pte(struct amdgpu_device *adev,
+ struct amdgpu_bo_va_mapping *mapping,
+ uint64_t *flags)
+{
+ *flags &= ~AMDGPU_PTE_EXECUTABLE;
+ *flags &= ~AMDGPU_PTE_PRT;
+}
+
static void gmc_v6_0_set_fault_enable_default(struct amdgpu_device *adev,
bool value)
{
@@ -1153,7 +1146,7 @@ static const struct amdgpu_gmc_funcs gmc_v6_0_gmc_funcs = {
.emit_flush_gpu_tlb = gmc_v6_0_emit_flush_gpu_tlb,
.set_prt = gmc_v6_0_set_prt,
.get_vm_pde = gmc_v6_0_get_vm_pde,
- .get_vm_pte_flags = gmc_v6_0_get_vm_pte_flags
+ .get_vm_pte = gmc_v6_0_get_vm_pte,
};
static const struct amdgpu_irq_src_funcs gmc_v6_0_irq_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 0c3d9bc3a641..f08e5330642d 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -463,27 +463,20 @@ static void gmc_v7_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
amdgpu_ring_emit_wreg(ring, mmIH_VMID_0_LUT + vmid, pasid);
}
-static uint64_t gmc_v7_0_get_vm_pte_flags(struct amdgpu_device *adev,
- uint32_t flags)
-{
- uint64_t pte_flag = 0;
-
- if (flags & AMDGPU_VM_PAGE_READABLE)
- pte_flag |= AMDGPU_PTE_READABLE;
- if (flags & AMDGPU_VM_PAGE_WRITEABLE)
- pte_flag |= AMDGPU_PTE_WRITEABLE;
- if (flags & AMDGPU_VM_PAGE_PRT)
- pte_flag |= AMDGPU_PTE_PRT;
-
- return pte_flag;
-}
-
static void gmc_v7_0_get_vm_pde(struct amdgpu_device *adev, int level,
uint64_t *addr, uint64_t *flags)
{
BUG_ON(*addr & 0xFFFFFF0000000FFFULL);
}
+static void gmc_v7_0_get_vm_pte(struct amdgpu_device *adev,
+ struct amdgpu_bo_va_mapping *mapping,
+ uint64_t *flags)
+{
+ *flags &= ~AMDGPU_PTE_EXECUTABLE;
+ *flags &= ~AMDGPU_PTE_PRT;
+}
+
/**
* gmc_v8_0_set_fault_enable_default - update VM fault handling
*
@@ -1343,8 +1336,8 @@ static const struct amdgpu_gmc_funcs gmc_v7_0_gmc_funcs = {
.emit_flush_gpu_tlb = gmc_v7_0_emit_flush_gpu_tlb,
.emit_pasid_mapping = gmc_v7_0_emit_pasid_mapping,
.set_prt = gmc_v7_0_set_prt,
- .get_vm_pte_flags = gmc_v7_0_get_vm_pte_flags,
- .get_vm_pde = gmc_v7_0_get_vm_pde
+ .get_vm_pde = gmc_v7_0_get_vm_pde,
+ .get_vm_pte = gmc_v7_0_get_vm_pte
};
static const struct amdgpu_irq_src_funcs gmc_v7_0_irq_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index ea764dd9245d..6d96d40fbcb8 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -686,29 +686,21 @@ static void gmc_v8_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
* 0 valid
*/
-static uint64_t gmc_v8_0_get_vm_pte_flags(struct amdgpu_device *adev,
- uint32_t flags)
-{
- uint64_t pte_flag = 0;
-
- if (flags & AMDGPU_VM_PAGE_EXECUTABLE)
- pte_flag |= AMDGPU_PTE_EXECUTABLE;
- if (flags & AMDGPU_VM_PAGE_READABLE)
- pte_flag |= AMDGPU_PTE_READABLE;
- if (flags & AMDGPU_VM_PAGE_WRITEABLE)
- pte_flag |= AMDGPU_PTE_WRITEABLE;
- if (flags & AMDGPU_VM_PAGE_PRT)
- pte_flag |= AMDGPU_PTE_PRT;
-
- return pte_flag;
-}
-
static void gmc_v8_0_get_vm_pde(struct amdgpu_device *adev, int level,
uint64_t *addr, uint64_t *flags)
{
BUG_ON(*addr & 0xFFFFFF0000000FFFULL);
}
+static void gmc_v8_0_get_vm_pte(struct amdgpu_device *adev,
+ struct amdgpu_bo_va_mapping *mapping,
+ uint64_t *flags)
+{
+ *flags &= ~AMDGPU_PTE_EXECUTABLE;
+ *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
+ *flags &= ~AMDGPU_PTE_PRT;
+}
+
/**
* gmc_v8_0_set_fault_enable_default - update VM fault handling
*
@@ -1711,8 +1703,8 @@ static const struct amdgpu_gmc_funcs gmc_v8_0_gmc_funcs = {
.emit_flush_gpu_tlb = gmc_v8_0_emit_flush_gpu_tlb,
.emit_pasid_mapping = gmc_v8_0_emit_pasid_mapping,
.set_prt = gmc_v8_0_set_prt,
- .get_vm_pte_flags = gmc_v8_0_get_vm_pte_flags,
- .get_vm_pde = gmc_v8_0_get_vm_pde
+ .get_vm_pde = gmc_v8_0_get_vm_pde,
+ .get_vm_pte = gmc_v8_0_get_vm_pte
};
static const struct amdgpu_irq_src_funcs gmc_v8_0_irq_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index f91337030dc0..3c355fb5d2b4 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -51,10 +51,12 @@
#include "gfxhub_v1_1.h"
#include "mmhub_v9_4.h"
#include "umc_v6_1.h"
+#include "umc_v6_0.h"
#include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
#include "amdgpu_ras.h"
+#include "amdgpu_xgmi.h"
/* add these here since we already include dce12 headers and these are for DCN */
#define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION 0x055d
@@ -243,44 +245,6 @@ static int gmc_v9_0_ecc_interrupt_state(struct amdgpu_device *adev,
return 0;
}
-static int gmc_v9_0_process_ras_data_cb(struct amdgpu_device *adev,
- struct ras_err_data *err_data,
- struct amdgpu_iv_entry *entry)
-{
- kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
- if (adev->umc.funcs->query_ras_error_count)
- adev->umc.funcs->query_ras_error_count(adev, err_data);
- /* umc query_ras_error_address is also responsible for clearing
- * error status
- */
- if (adev->umc.funcs->query_ras_error_address)
- adev->umc.funcs->query_ras_error_address(adev, err_data);
-
- /* only uncorrectable error needs gpu reset */
- if (err_data->ue_count)
- amdgpu_ras_reset_gpu(adev, 0);
-
- return AMDGPU_RAS_SUCCESS;
-}
-
-static int gmc_v9_0_process_ecc_irq(struct amdgpu_device *adev,
- struct amdgpu_irq_src *source,
- struct amdgpu_iv_entry *entry)
-{
- struct ras_common_if *ras_if = adev->gmc.umc_ras_if;
- struct ras_dispatch_if ih_data = {
- .entry = entry,
- };
-
- if (!ras_if)
- return 0;
-
- ih_data.head = *ras_if;
-
- amdgpu_ras_interrupt_dispatch(adev, &ih_data);
- return 0;
-}
-
static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
unsigned type,
@@ -355,6 +319,10 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
}
/* If it's the first fault for this address, process it normally */
+ if (retry_fault && !in_interrupt() &&
+ amdgpu_vm_handle_fault(adev, entry->pasid, addr))
+ return 1; /* This also prevents sending it to KFD */
+
if (!amdgpu_sriov_vf(adev)) {
/*
* Issue a dummy read to wait for the status register to
@@ -417,7 +385,7 @@ static const struct amdgpu_irq_src_funcs gmc_v9_0_irq_funcs = {
static const struct amdgpu_irq_src_funcs gmc_v9_0_ecc_funcs = {
.set = gmc_v9_0_ecc_interrupt_state,
- .process = gmc_v9_0_process_ecc_irq,
+ .process = amdgpu_umc_process_ecc_irq,
};
static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev)
@@ -491,6 +459,29 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
}
spin_lock(&adev->gmc.invalidate_lock);
+
+ /*
+ * It may lose gpuvm invalidate acknowldege state across power-gating
+ * off cycle, add semaphore acquire before invalidation and semaphore
+ * release after invalidation to avoid entering power gated state
+ * to WA the Issue
+ */
+
+ /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
+ if (vmhub == AMDGPU_MMHUB_0 ||
+ vmhub == AMDGPU_MMHUB_1) {
+ for (j = 0; j < adev->usec_timeout; j++) {
+ /* a read return value of 1 means semaphore acuqire */
+ tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_sem + eng);
+ if (tmp & 0x1)
+ break;
+ udelay(1);
+ }
+
+ if (j >= adev->usec_timeout)
+ DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n");
+ }
+
WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp);
/*
@@ -506,7 +497,18 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
break;
udelay(1);
}
+
+ /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
+ if (vmhub == AMDGPU_MMHUB_0 ||
+ vmhub == AMDGPU_MMHUB_1)
+ /*
+ * add semaphore release after invalidation,
+ * write with 0 means semaphore release
+ */
+ WREG32_NO_KIQ(hub->vm_inv_eng0_sem + eng, 0);
+
spin_unlock(&adev->gmc.invalidate_lock);
+
if (j < adev->usec_timeout)
return;
@@ -521,6 +523,20 @@ static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
uint32_t req = gmc_v9_0_get_invalidate_req(vmid, 0);
unsigned eng = ring->vm_inv_eng;
+ /*
+ * It may lose gpuvm invalidate acknowldege state across power-gating
+ * off cycle, add semaphore acquire before invalidation and semaphore
+ * release after invalidation to avoid entering power gated state
+ * to WA the Issue
+ */
+
+ /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
+ if (ring->funcs->vmhub == AMDGPU_MMHUB_0 ||
+ ring->funcs->vmhub == AMDGPU_MMHUB_1)
+ /* a read return value of 1 means semaphore acuqire */
+ amdgpu_ring_emit_reg_wait(ring,
+ hub->vm_inv_eng0_sem + eng, 0x1, 0x1);
+
amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 + (2 * vmid),
lower_32_bits(pd_addr));
@@ -531,6 +547,15 @@ static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
hub->vm_inv_eng0_ack + eng,
req, 1 << vmid);
+ /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
+ if (ring->funcs->vmhub == AMDGPU_MMHUB_0 ||
+ ring->funcs->vmhub == AMDGPU_MMHUB_1)
+ /*
+ * add semaphore release after invalidation,
+ * write with 0 means semaphore release
+ */
+ amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_sem + eng, 0);
+
return pd_addr;
}
@@ -584,44 +609,25 @@ static void gmc_v9_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
* 0 valid
*/
-static uint64_t gmc_v9_0_get_vm_pte_flags(struct amdgpu_device *adev,
- uint32_t flags)
+static uint64_t gmc_v9_0_map_mtype(struct amdgpu_device *adev, uint32_t flags)
{
- uint64_t pte_flag = 0;
-
- if (flags & AMDGPU_VM_PAGE_EXECUTABLE)
- pte_flag |= AMDGPU_PTE_EXECUTABLE;
- if (flags & AMDGPU_VM_PAGE_READABLE)
- pte_flag |= AMDGPU_PTE_READABLE;
- if (flags & AMDGPU_VM_PAGE_WRITEABLE)
- pte_flag |= AMDGPU_PTE_WRITEABLE;
-
- switch (flags & AMDGPU_VM_MTYPE_MASK) {
+ switch (flags) {
case AMDGPU_VM_MTYPE_DEFAULT:
- pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
- break;
+ return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
case AMDGPU_VM_MTYPE_NC:
- pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
- break;
+ return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
case AMDGPU_VM_MTYPE_WC:
- pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_WC);
- break;
+ return AMDGPU_PTE_MTYPE_VG10(MTYPE_WC);
+ case AMDGPU_VM_MTYPE_RW:
+ return AMDGPU_PTE_MTYPE_VG10(MTYPE_RW);
case AMDGPU_VM_MTYPE_CC:
- pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_CC);
- break;
+ return AMDGPU_PTE_MTYPE_VG10(MTYPE_CC);
case AMDGPU_VM_MTYPE_UC:
- pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_UC);
- break;
+ return AMDGPU_PTE_MTYPE_VG10(MTYPE_UC);
default:
- pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
- break;
+ return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
}
-
- if (flags & AMDGPU_VM_PAGE_PRT)
- pte_flag |= AMDGPU_PTE_PRT;
-
- return pte_flag;
}
static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level,
@@ -648,12 +654,34 @@ static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level,
}
}
+static void gmc_v9_0_get_vm_pte(struct amdgpu_device *adev,
+ struct amdgpu_bo_va_mapping *mapping,
+ uint64_t *flags)
+{
+ *flags &= ~AMDGPU_PTE_EXECUTABLE;
+ *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
+
+ *flags &= ~AMDGPU_PTE_MTYPE_VG10_MASK;
+ *flags |= mapping->flags & AMDGPU_PTE_MTYPE_VG10_MASK;
+
+ if (mapping->flags & AMDGPU_PTE_PRT) {
+ *flags |= AMDGPU_PTE_PRT;
+ *flags &= ~AMDGPU_PTE_VALID;
+ }
+
+ if (adev->asic_type == CHIP_ARCTURUS &&
+ !(*flags & AMDGPU_PTE_SYSTEM) &&
+ mapping->bo_va->is_xgmi)
+ *flags |= AMDGPU_PTE_SNOOPED;
+}
+
static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = {
.flush_gpu_tlb = gmc_v9_0_flush_gpu_tlb,
.emit_flush_gpu_tlb = gmc_v9_0_emit_flush_gpu_tlb,
.emit_pasid_mapping = gmc_v9_0_emit_pasid_mapping,
- .get_vm_pte_flags = gmc_v9_0_get_vm_pte_flags,
- .get_vm_pde = gmc_v9_0_get_vm_pde
+ .map_mtype = gmc_v9_0_map_mtype,
+ .get_vm_pde = gmc_v9_0_get_vm_pde,
+ .get_vm_pte = gmc_v9_0_get_vm_pte
};
static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev)
@@ -664,6 +692,9 @@ static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev)
static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev)
{
switch (adev->asic_type) {
+ case CHIP_VEGA10:
+ adev->umc.funcs = &umc_v6_0_funcs;
+ break;
case CHIP_VEGA20:
adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM;
adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM;
@@ -681,7 +712,7 @@ static void gmc_v9_0_set_mmhub_funcs(struct amdgpu_device *adev)
{
switch (adev->asic_type) {
case CHIP_VEGA20:
- adev->mmhub_funcs = &mmhub_v1_0_funcs;
+ adev->mmhub.funcs = &mmhub_v1_0_funcs;
break;
default:
break;
@@ -762,140 +793,10 @@ static int gmc_v9_0_allocate_vm_inv_eng(struct amdgpu_device *adev)
return 0;
}
-static int gmc_v9_0_ecc_ras_block_late_init(void *handle,
- struct ras_fs_if *fs_info, struct ras_common_if *ras_block)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct ras_common_if **ras_if = NULL;
- struct ras_ih_if ih_info = {
- .cb = gmc_v9_0_process_ras_data_cb,
- };
- int r;
-
- if (ras_block->block == AMDGPU_RAS_BLOCK__UMC)
- ras_if = &adev->gmc.umc_ras_if;
- else if (ras_block->block == AMDGPU_RAS_BLOCK__MMHUB)
- ras_if = &adev->gmc.mmhub_ras_if;
- else
- BUG();
-
- if (!amdgpu_ras_is_supported(adev, ras_block->block)) {
- amdgpu_ras_feature_enable_on_boot(adev, ras_block, 0);
- return 0;
- }
-
- /* handle resume path. */
- if (*ras_if) {
- /* resend ras TA enable cmd during resume.
- * prepare to handle failure.
- */
- ih_info.head = **ras_if;
- r = amdgpu_ras_feature_enable_on_boot(adev, *ras_if, 1);
- if (r) {
- if (r == -EAGAIN) {
- /* request a gpu reset. will run again. */
- amdgpu_ras_request_reset_on_boot(adev,
- ras_block->block);
- return 0;
- }
- /* fail to enable ras, cleanup all. */
- goto irq;
- }
- /* enable successfully. continue. */
- goto resume;
- }
-
- *ras_if = kmalloc(sizeof(**ras_if), GFP_KERNEL);
- if (!*ras_if)
- return -ENOMEM;
-
- **ras_if = *ras_block;
-
- r = amdgpu_ras_feature_enable_on_boot(adev, *ras_if, 1);
- if (r) {
- if (r == -EAGAIN) {
- amdgpu_ras_request_reset_on_boot(adev,
- ras_block->block);
- r = 0;
- }
- goto feature;
- }
-
- ih_info.head = **ras_if;
- fs_info->head = **ras_if;
-
- if (ras_block->block == AMDGPU_RAS_BLOCK__UMC) {
- r = amdgpu_ras_interrupt_add_handler(adev, &ih_info);
- if (r)
- goto interrupt;
- }
-
- amdgpu_ras_debugfs_create(adev, fs_info);
-
- r = amdgpu_ras_sysfs_create(adev, fs_info);
- if (r)
- goto sysfs;
-resume:
- if (ras_block->block == AMDGPU_RAS_BLOCK__UMC) {
- r = amdgpu_irq_get(adev, &adev->gmc.ecc_irq, 0);
- if (r)
- goto irq;
- }
-
- return 0;
-irq:
- amdgpu_ras_sysfs_remove(adev, *ras_if);
-sysfs:
- amdgpu_ras_debugfs_remove(adev, *ras_if);
- if (ras_block->block == AMDGPU_RAS_BLOCK__UMC)
- amdgpu_ras_interrupt_remove_handler(adev, &ih_info);
-interrupt:
- amdgpu_ras_feature_enable(adev, *ras_if, 0);
-feature:
- kfree(*ras_if);
- *ras_if = NULL;
- return r;
-}
-
-static int gmc_v9_0_ecc_late_init(void *handle)
-{
- int r;
-
- struct ras_fs_if umc_fs_info = {
- .sysfs_name = "umc_err_count",
- .debugfs_name = "umc_err_inject",
- };
- struct ras_common_if umc_ras_block = {
- .block = AMDGPU_RAS_BLOCK__UMC,
- .type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
- .sub_block_index = 0,
- .name = "umc",
- };
- struct ras_fs_if mmhub_fs_info = {
- .sysfs_name = "mmhub_err_count",
- .debugfs_name = "mmhub_err_inject",
- };
- struct ras_common_if mmhub_ras_block = {
- .block = AMDGPU_RAS_BLOCK__MMHUB,
- .type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
- .sub_block_index = 0,
- .name = "mmhub",
- };
-
- r = gmc_v9_0_ecc_ras_block_late_init(handle,
- &umc_fs_info, &umc_ras_block);
- if (r)
- return r;
-
- r = gmc_v9_0_ecc_ras_block_late_init(handle,
- &mmhub_fs_info, &mmhub_ras_block);
- return r;
-}
-
static int gmc_v9_0_late_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- bool r;
+ int r;
if (!gmc_v9_0_keep_stolen_memory(adev))
amdgpu_bo_late_init(adev);
@@ -929,7 +830,7 @@ static int gmc_v9_0_late_init(void *handle)
}
}
- r = gmc_v9_0_ecc_late_init(handle);
+ r = amdgpu_gmc_ras_late_init(adev);
if (r)
return r;
@@ -970,33 +871,11 @@ static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
*/
static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
{
- int chansize, numchan;
int r;
- if (amdgpu_sriov_vf(adev)) {
- /* For Vega10 SR-IOV, vram_width can't be read from ATOM as RAVEN,
- * and DF related registers is not readable, seems hardcord is the
- * only way to set the correct vram_width
- */
- adev->gmc.vram_width = 2048;
- } else if (amdgpu_emu_mode != 1) {
- adev->gmc.vram_width = amdgpu_atomfirmware_get_vram_width(adev);
- }
-
- if (!adev->gmc.vram_width) {
- /* hbm memory channel size */
- if (adev->flags & AMD_IS_APU)
- chansize = 64;
- else
- chansize = 128;
-
- numchan = adev->df_funcs->get_hbm_channel_number(adev);
- adev->gmc.vram_width = numchan * chansize;
- }
-
/* size in MB on si */
adev->gmc.mc_vram_size =
- adev->nbio_funcs->get_memsize(adev) * 1024ULL * 1024ULL;
+ adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL;
adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
if (!(adev->flags & AMD_IS_APU)) {
@@ -1108,7 +987,7 @@ static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev)
static int gmc_v9_0_sw_init(void *handle)
{
- int r;
+ int r, vram_width = 0, vram_type = 0, vram_vendor = 0;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
gfxhub_v1_0_init(adev);
@@ -1119,7 +998,32 @@ static int gmc_v9_0_sw_init(void *handle)
spin_lock_init(&adev->gmc.invalidate_lock);
- adev->gmc.vram_type = amdgpu_atomfirmware_get_vram_type(adev);
+ r = amdgpu_atomfirmware_get_vram_info(adev,
+ &vram_width, &vram_type, &vram_vendor);
+ if (amdgpu_sriov_vf(adev))
+ /* For Vega10 SR-IOV, vram_width can't be read from ATOM as RAVEN,
+ * and DF related registers is not readable, seems hardcord is the
+ * only way to set the correct vram_width
+ */
+ adev->gmc.vram_width = 2048;
+ else if (amdgpu_emu_mode != 1)
+ adev->gmc.vram_width = vram_width;
+
+ if (!adev->gmc.vram_width) {
+ int chansize, numchan;
+
+ /* hbm memory channel size */
+ if (adev->flags & AMD_IS_APU)
+ chansize = 64;
+ else
+ chansize = 128;
+
+ numchan = adev->df_funcs->get_hbm_channel_number(adev);
+ adev->gmc.vram_width = numchan * chansize;
+ }
+
+ adev->gmc.vram_type = vram_type;
+ adev->gmc.vram_vendor = vram_vendor;
switch (adev->asic_type) {
case CHIP_RAVEN:
adev->num_vmhubs = 2;
@@ -1240,33 +1144,7 @@ static int gmc_v9_0_sw_fini(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
void *stolen_vga_buf;
- if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC) &&
- adev->gmc.umc_ras_if) {
- struct ras_common_if *ras_if = adev->gmc.umc_ras_if;
- struct ras_ih_if ih_info = {
- .head = *ras_if,
- };
-
- /* remove fs first */
- amdgpu_ras_debugfs_remove(adev, ras_if);
- amdgpu_ras_sysfs_remove(adev, ras_if);
- /* remove the IH */
- amdgpu_ras_interrupt_remove_handler(adev, &ih_info);
- amdgpu_ras_feature_enable(adev, ras_if, 0);
- kfree(ras_if);
- }
-
- if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__MMHUB) &&
- adev->gmc.mmhub_ras_if) {
- struct ras_common_if *ras_if = adev->gmc.mmhub_ras_if;
-
- /* remove fs and disable ras feature */
- amdgpu_ras_debugfs_remove(adev, ras_if);
- amdgpu_ras_sysfs_remove(adev, ras_if);
- amdgpu_ras_feature_enable(adev, ras_if, 0);
- kfree(ras_if);
- }
-
+ amdgpu_gmc_ras_fini(adev);
amdgpu_gem_force_release(adev);
amdgpu_vm_manager_fini(adev);
@@ -1316,13 +1194,7 @@ static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
*/
static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
{
- int r, i;
- bool value;
- u32 tmp;
-
- amdgpu_device_program_register_sequence(adev,
- golden_settings_vega10_hdp,
- ARRAY_SIZE(golden_settings_vega10_hdp));
+ int r;
if (adev->gart.bo == NULL) {
dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
@@ -1332,15 +1204,6 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
if (r)
return r;
- switch (adev->asic_type) {
- case CHIP_RAVEN:
- /* TODO for renoir */
- mmhub_v1_0_update_power_gating(adev, true);
- break;
- default:
- break;
- }
-
r = gfxhub_v1_0_gart_enable(adev);
if (r)
return r;
@@ -1352,6 +1215,49 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
if (r)
return r;
+ DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
+ (unsigned)(adev->gmc.gart_size >> 20),
+ (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
+ adev->gart.ready = true;
+ return 0;
+}
+
+static int gmc_v9_0_hw_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ bool value;
+ int r, i;
+ u32 tmp;
+
+ /* The sequence of these two function calls matters.*/
+ gmc_v9_0_init_golden_registers(adev);
+
+ if (adev->mode_info.num_crtc) {
+ if (adev->asic_type != CHIP_ARCTURUS) {
+ /* Lockout access through VGA aperture*/
+ WREG32_FIELD15(DCE, 0, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
+
+ /* disable VGA render */
+ WREG32_FIELD15(DCE, 0, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
+ }
+ }
+
+ amdgpu_device_program_register_sequence(adev,
+ golden_settings_vega10_hdp,
+ ARRAY_SIZE(golden_settings_vega10_hdp));
+
+ switch (adev->asic_type) {
+ case CHIP_RAVEN:
+ /* TODO for renoir */
+ mmhub_v1_0_update_power_gating(adev, true);
+ break;
+ case CHIP_ARCTURUS:
+ WREG32_FIELD15(HDP, 0, HDP_MMHUB_CNTL, HDP_MMHUB_GCC, 1);
+ break;
+ default:
+ break;
+ }
+
WREG32_FIELD15(HDP, 0, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 1);
tmp = RREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL);
@@ -1361,7 +1267,7 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE_HI, (adev->gmc.vram_start >> 40));
/* After HDP is initialized, flush HDP.*/
- adev->nbio_funcs->hdp_flush(adev, NULL);
+ adev->nbio.funcs->hdp_flush(adev, NULL);
if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
value = false;
@@ -1377,28 +1283,8 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
for (i = 0; i < adev->num_vmhubs; ++i)
gmc_v9_0_flush_gpu_tlb(adev, 0, i, 0);
- DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
- (unsigned)(adev->gmc.gart_size >> 20),
- (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
- adev->gart.ready = true;
- return 0;
-}
-
-static int gmc_v9_0_hw_init(void *handle)
-{
- int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- /* The sequence of these two function calls matters.*/
- gmc_v9_0_init_golden_registers(adev);
-
- if (adev->mode_info.num_crtc) {
- /* Lockout access through VGA aperture*/
- WREG32_FIELD15(DCE, 0, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
-
- /* disable VGA render */
- WREG32_FIELD15(DCE, 0, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
- }
+ if (adev->umc.funcs && adev->umc.funcs->init_registers)
+ adev->umc.funcs->init_registers(adev);
r = gmc_v9_0_gart_enable(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
index 04cd4b6f95d4..28105e4af507 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
@@ -206,6 +206,8 @@ static void mmhub_v1_0_enable_system_domain(struct amdgpu_device *adev)
tmp = RREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_CNTL);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL,
+ RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_CNTL, tmp);
}
@@ -418,6 +420,8 @@ void mmhub_v1_0_init(struct amdgpu_device *adev)
hub->ctx0_ptb_addr_hi32 =
SOC15_REG_OFFSET(MMHUB, 0,
mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32);
+ hub->vm_inv_eng0_sem =
+ SOC15_REG_OFFSET(MMHUB, 0, mmVM_INVALIDATE_ENG0_SEM);
hub->vm_inv_eng0_req =
SOC15_REG_OFFSET(MMHUB, 0, mmVM_INVALIDATE_ENG0_REQ);
hub->vm_inv_eng0_ack =
@@ -616,5 +620,6 @@ static void mmhub_v1_0_query_ras_error_count(struct amdgpu_device *adev,
}
const struct amdgpu_mmhub_funcs mmhub_v1_0_funcs = {
+ .ras_late_init = amdgpu_mmhub_ras_late_init,
.query_ras_error_count = mmhub_v1_0_query_ras_error_count,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
index b39bea6f54e9..a7cb185d639a 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
@@ -31,20 +31,25 @@
#include "soc15_common.h"
-static void mmhub_v2_0_init_gart_pt_regs(struct amdgpu_device *adev)
+void mmhub_v2_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
+ uint64_t page_table_base)
{
- uint64_t value = amdgpu_gmc_pd_addr(adev->gart.bo);
+ /* two registers distance between mmMMVM_CONTEXT0_* to mmMMVM_CONTEXT1_* */
+ int offset = mmMMVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32
+ - mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32;
- WREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
- lower_32_bits(value));
+ WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
+ offset * vmid, lower_32_bits(page_table_base));
- WREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
- upper_32_bits(value));
+ WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
+ offset * vmid, upper_32_bits(page_table_base));
}
static void mmhub_v2_0_init_gart_aperture_regs(struct amdgpu_device *adev)
{
- mmhub_v2_0_init_gart_pt_regs(adev);
+ uint64_t pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
+
+ mmhub_v2_0_setup_vm_pt_regs(adev, 0, pt_base);
WREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
(u32)(adev->gmc.gart_start >> 12));
@@ -161,6 +166,8 @@ static void mmhub_v2_0_enable_system_domain(struct amdgpu_device *adev)
tmp = RREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_CNTL);
tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
+ tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT0_CNTL,
+ RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
WREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_CNTL, tmp);
}
@@ -341,6 +348,8 @@ void mmhub_v2_0_init(struct amdgpu_device *adev)
hub->ctx0_ptb_addr_hi32 =
SOC15_REG_OFFSET(MMHUB, 0,
mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32);
+ hub->vm_inv_eng0_sem =
+ SOC15_REG_OFFSET(MMHUB, 0, mmMMVM_INVALIDATE_ENG0_SEM);
hub->vm_inv_eng0_req =
SOC15_REG_OFFSET(MMHUB, 0, mmMMVM_INVALIDATE_ENG0_REQ);
hub->vm_inv_eng0_ack =
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.h b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.h
index db16f3ece218..3ea4344f0315 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.h
@@ -31,5 +31,7 @@ void mmhub_v2_0_init(struct amdgpu_device *adev);
int mmhub_v2_0_set_clockgating(struct amdgpu_device *adev,
enum amd_clockgating_state state);
void mmhub_v2_0_get_clockgating(struct amdgpu_device *adev, u32 *flags);
+void mmhub_v2_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
+ uint64_t page_table_base);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
index 9ed178fa241c..66efe2f7bd76 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
@@ -249,6 +249,8 @@ static void mmhub_v9_4_enable_system_domain(struct amdgpu_device *adev,
hubid * MMHUB_INSTANCE_REGISTER_OFFSET);
tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
+ tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT0_CNTL,
+ RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
WREG32_SOC15_OFFSET(MMHUB, 0, mmVML2VC0_VM_CONTEXT0_CNTL,
hubid * MMHUB_INSTANCE_REGISTER_OFFSET, tmp);
}
@@ -502,6 +504,10 @@ void mmhub_v9_4_init(struct amdgpu_device *adev)
SOC15_REG_OFFSET(MMHUB, 0,
mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32) +
i * MMHUB_INSTANCE_REGISTER_OFFSET;
+ hub[i]->vm_inv_eng0_sem =
+ SOC15_REG_OFFSET(MMHUB, 0,
+ mmVML2VC0_VM_INVALIDATE_ENG0_SEM) +
+ i * MMHUB_INSTANCE_REGISTER_OFFSET;
hub[i]->vm_inv_eng0_req =
SOC15_REG_OFFSET(MMHUB, 0,
mmVML2VC0_VM_INVALIDATE_ENG0_REQ) +
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
new file mode 100644
index 000000000000..0d8767eb7a70
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
@@ -0,0 +1,380 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "amdgpu.h"
+#include "nbio/nbio_2_3_offset.h"
+#include "nbio/nbio_2_3_sh_mask.h"
+#include "gc/gc_10_1_0_offset.h"
+#include "gc/gc_10_1_0_sh_mask.h"
+#include "soc15.h"
+#include "navi10_ih.h"
+#include "soc15_common.h"
+#include "mxgpu_nv.h"
+#include "mxgpu_ai.h"
+
+static void xgpu_nv_mailbox_send_ack(struct amdgpu_device *adev)
+{
+ WREG8(NV_MAIBOX_CONTROL_RCV_OFFSET_BYTE, 2);
+}
+
+static void xgpu_nv_mailbox_set_valid(struct amdgpu_device *adev, bool val)
+{
+ WREG8(NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE, val ? 1 : 0);
+}
+
+/*
+ * this peek_msg could *only* be called in IRQ routine becuase in IRQ routine
+ * RCV_MSG_VALID filed of BIF_BX_PF_MAILBOX_CONTROL must already be set to 1
+ * by host.
+ *
+ * if called no in IRQ routine, this peek_msg cannot guaranteed to return the
+ * correct value since it doesn't return the RCV_DW0 under the case that
+ * RCV_MSG_VALID is set by host.
+ */
+static enum idh_event xgpu_nv_mailbox_peek_msg(struct amdgpu_device *adev)
+{
+ return RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
+ mmBIF_BX_PF_MAILBOX_MSGBUF_RCV_DW0));
+}
+
+
+static int xgpu_nv_mailbox_rcv_msg(struct amdgpu_device *adev,
+ enum idh_event event)
+{
+ u32 reg;
+
+ reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
+ mmBIF_BX_PF_MAILBOX_MSGBUF_RCV_DW0));
+ if (reg != event)
+ return -ENOENT;
+
+ xgpu_nv_mailbox_send_ack(adev);
+
+ return 0;
+}
+
+static uint8_t xgpu_nv_peek_ack(struct amdgpu_device *adev)
+{
+ return RREG8(NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE) & 2;
+}
+
+static int xgpu_nv_poll_ack(struct amdgpu_device *adev)
+{
+ int timeout = NV_MAILBOX_POLL_ACK_TIMEDOUT;
+ u8 reg;
+
+ do {
+ reg = RREG8(NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE);
+ if (reg & 2)
+ return 0;
+
+ mdelay(5);
+ timeout -= 5;
+ } while (timeout > 1);
+
+ pr_err("Doesn't get TRN_MSG_ACK from pf in %d msec\n", NV_MAILBOX_POLL_ACK_TIMEDOUT);
+
+ return -ETIME;
+}
+
+static int xgpu_nv_poll_msg(struct amdgpu_device *adev, enum idh_event event)
+{
+ int r, timeout = NV_MAILBOX_POLL_MSG_TIMEDOUT;
+
+ do {
+ r = xgpu_nv_mailbox_rcv_msg(adev, event);
+ if (!r)
+ return 0;
+
+ msleep(10);
+ timeout -= 10;
+ } while (timeout > 1);
+
+ pr_err("Doesn't get msg:%d from pf, error=%d\n", event, r);
+
+ return -ETIME;
+}
+
+static void xgpu_nv_mailbox_trans_msg (struct amdgpu_device *adev,
+ enum idh_request req, u32 data1, u32 data2, u32 data3)
+{
+ u32 reg;
+ int r;
+ uint8_t trn;
+
+ /* IMPORTANT:
+ * clear TRN_MSG_VALID valid to clear host's RCV_MSG_ACK
+ * and with host's RCV_MSG_ACK cleared hw automatically clear host's RCV_MSG_ACK
+ * which lead to VF's TRN_MSG_ACK cleared, otherwise below xgpu_nv_poll_ack()
+ * will return immediatly
+ */
+ do {
+ xgpu_nv_mailbox_set_valid(adev, false);
+ trn = xgpu_nv_peek_ack(adev);
+ if (trn) {
+ pr_err("trn=%x ACK should not assert! wait again !\n", trn);
+ msleep(1);
+ }
+ } while (trn);
+
+ reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
+ mmBIF_BX_PF_MAILBOX_MSGBUF_TRN_DW0));
+ reg = REG_SET_FIELD(reg, BIF_BX_PF_MAILBOX_MSGBUF_TRN_DW0,
+ MSGBUF_DATA, req);
+ WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_MSGBUF_TRN_DW0),
+ reg);
+ WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_MSGBUF_TRN_DW1),
+ data1);
+ WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_MSGBUF_TRN_DW2),
+ data2);
+ WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_MSGBUF_TRN_DW3),
+ data3);
+
+ xgpu_nv_mailbox_set_valid(adev, true);
+
+ /* start to poll ack */
+ r = xgpu_nv_poll_ack(adev);
+ if (r)
+ pr_err("Doesn't get ack from pf, continue\n");
+
+ xgpu_nv_mailbox_set_valid(adev, false);
+}
+
+static int xgpu_nv_send_access_requests(struct amdgpu_device *adev,
+ enum idh_request req)
+{
+ int r;
+
+ xgpu_nv_mailbox_trans_msg(adev, req, 0, 0, 0);
+
+ /* start to check msg if request is idh_req_gpu_init_access */
+ if (req == IDH_REQ_GPU_INIT_ACCESS ||
+ req == IDH_REQ_GPU_FINI_ACCESS ||
+ req == IDH_REQ_GPU_RESET_ACCESS) {
+ r = xgpu_nv_poll_msg(adev, IDH_READY_TO_ACCESS_GPU);
+ if (r) {
+ pr_err("Doesn't get READY_TO_ACCESS_GPU from pf, give up\n");
+ return r;
+ }
+ /* Retrieve checksum from mailbox2 */
+ if (req == IDH_REQ_GPU_INIT_ACCESS || req == IDH_REQ_GPU_RESET_ACCESS) {
+ adev->virt.fw_reserve.checksum_key =
+ RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
+ mmBIF_BX_PF_MAILBOX_MSGBUF_RCV_DW2));
+ }
+ }
+
+ return 0;
+}
+
+static int xgpu_nv_request_reset(struct amdgpu_device *adev)
+{
+ return xgpu_nv_send_access_requests(adev, IDH_REQ_GPU_RESET_ACCESS);
+}
+
+static int xgpu_nv_request_full_gpu_access(struct amdgpu_device *adev,
+ bool init)
+{
+ enum idh_request req;
+
+ req = init ? IDH_REQ_GPU_INIT_ACCESS : IDH_REQ_GPU_FINI_ACCESS;
+ return xgpu_nv_send_access_requests(adev, req);
+}
+
+static int xgpu_nv_release_full_gpu_access(struct amdgpu_device *adev,
+ bool init)
+{
+ enum idh_request req;
+ int r = 0;
+
+ req = init ? IDH_REL_GPU_INIT_ACCESS : IDH_REL_GPU_FINI_ACCESS;
+ r = xgpu_nv_send_access_requests(adev, req);
+
+ return r;
+}
+
+static int xgpu_nv_mailbox_ack_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ DRM_DEBUG("get ack intr and do nothing.\n");
+ return 0;
+}
+
+static int xgpu_nv_set_mailbox_ack_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ u32 tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_INT_CNTL));
+
+ tmp = REG_SET_FIELD(tmp, BIF_BX_PF_MAILBOX_INT_CNTL, ACK_INT_EN,
+ (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0);
+ WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_INT_CNTL), tmp);
+
+ return 0;
+}
+
+static void xgpu_nv_mailbox_flr_work(struct work_struct *work)
+{
+ struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work);
+ struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
+ int timeout = NV_MAILBOX_POLL_FLR_TIMEDOUT;
+ int locked;
+
+ /* block amdgpu_gpu_recover till msg FLR COMPLETE received,
+ * otherwise the mailbox msg will be ruined/reseted by
+ * the VF FLR.
+ *
+ * we can unlock the lock_reset to allow "amdgpu_job_timedout"
+ * to run gpu_recover() after FLR_NOTIFICATION_CMPL received
+ * which means host side had finished this VF's FLR.
+ */
+ locked = mutex_trylock(&adev->lock_reset);
+ if (locked)
+ adev->in_gpu_reset = 1;
+
+ do {
+ if (xgpu_nv_mailbox_peek_msg(adev) == IDH_FLR_NOTIFICATION_CMPL)
+ goto flr_done;
+
+ msleep(10);
+ timeout -= 10;
+ } while (timeout > 1);
+
+flr_done:
+ if (locked) {
+ adev->in_gpu_reset = 0;
+ mutex_unlock(&adev->lock_reset);
+ }
+
+ /* Trigger recovery for world switch failure if no TDR */
+ if (amdgpu_device_should_recover_gpu(adev))
+ amdgpu_device_gpu_recover(adev, NULL);
+}
+
+static int xgpu_nv_set_mailbox_rcv_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *src,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ u32 tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_INT_CNTL));
+
+ tmp = REG_SET_FIELD(tmp, BIF_BX_PF_MAILBOX_INT_CNTL, VALID_INT_EN,
+ (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0);
+ WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_INT_CNTL), tmp);
+
+ return 0;
+}
+
+static int xgpu_nv_mailbox_rcv_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ enum idh_event event = xgpu_nv_mailbox_peek_msg(adev);
+
+ switch (event) {
+ case IDH_FLR_NOTIFICATION:
+ if (amdgpu_sriov_runtime(adev))
+ schedule_work(&adev->virt.flr_work);
+ break;
+ /* READY_TO_ACCESS_GPU is fetched by kernel polling, IRQ can ignore
+ * it byfar since that polling thread will handle it,
+ * other msg like flr complete is not handled here.
+ */
+ case IDH_CLR_MSG_BUF:
+ case IDH_FLR_NOTIFICATION_CMPL:
+ case IDH_READY_TO_ACCESS_GPU:
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static const struct amdgpu_irq_src_funcs xgpu_nv_mailbox_ack_irq_funcs = {
+ .set = xgpu_nv_set_mailbox_ack_irq,
+ .process = xgpu_nv_mailbox_ack_irq,
+};
+
+static const struct amdgpu_irq_src_funcs xgpu_nv_mailbox_rcv_irq_funcs = {
+ .set = xgpu_nv_set_mailbox_rcv_irq,
+ .process = xgpu_nv_mailbox_rcv_irq,
+};
+
+void xgpu_nv_mailbox_set_irq_funcs(struct amdgpu_device *adev)
+{
+ adev->virt.ack_irq.num_types = 1;
+ adev->virt.ack_irq.funcs = &xgpu_nv_mailbox_ack_irq_funcs;
+ adev->virt.rcv_irq.num_types = 1;
+ adev->virt.rcv_irq.funcs = &xgpu_nv_mailbox_rcv_irq_funcs;
+}
+
+int xgpu_nv_mailbox_add_irq_id(struct amdgpu_device *adev)
+{
+ int r;
+
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 135, &adev->virt.rcv_irq);
+ if (r)
+ return r;
+
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 138, &adev->virt.ack_irq);
+ if (r) {
+ amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
+ return r;
+ }
+
+ return 0;
+}
+
+int xgpu_nv_mailbox_get_irq(struct amdgpu_device *adev)
+{
+ int r;
+
+ r = amdgpu_irq_get(adev, &adev->virt.rcv_irq, 0);
+ if (r)
+ return r;
+ r = amdgpu_irq_get(adev, &adev->virt.ack_irq, 0);
+ if (r) {
+ amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
+ return r;
+ }
+
+ INIT_WORK(&adev->virt.flr_work, xgpu_nv_mailbox_flr_work);
+
+ return 0;
+}
+
+void xgpu_nv_mailbox_put_irq(struct amdgpu_device *adev)
+{
+ amdgpu_irq_put(adev, &adev->virt.ack_irq, 0);
+ amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
+}
+
+const struct amdgpu_virt_ops xgpu_nv_virt_ops = {
+ .req_full_gpu = xgpu_nv_request_full_gpu_access,
+ .rel_full_gpu = xgpu_nv_release_full_gpu_access,
+ .reset_gpu = xgpu_nv_request_reset,
+ .wait_reset = NULL,
+ .trans_msg = xgpu_nv_mailbox_trans_msg,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h
new file mode 100644
index 000000000000..99b15f6865cb
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __MXGPU_NV_H__
+#define __MXGPU_NV_H__
+
+#define NV_MAILBOX_POLL_ACK_TIMEDOUT 500
+#define NV_MAILBOX_POLL_MSG_TIMEDOUT 12000
+#define NV_MAILBOX_POLL_FLR_TIMEDOUT 500
+
+extern const struct amdgpu_virt_ops xgpu_nv_virt_ops;
+
+void xgpu_nv_mailbox_set_irq_funcs(struct amdgpu_device *adev);
+int xgpu_nv_mailbox_add_irq_id(struct amdgpu_device *adev);
+int xgpu_nv_mailbox_get_irq(struct amdgpu_device *adev);
+void xgpu_nv_mailbox_put_irq(struct amdgpu_device *adev);
+
+#define NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE (SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_CONTROL) * 4)
+#define NV_MAIBOX_CONTROL_RCV_OFFSET_BYTE (SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_CONTROL) * 4 + 1)
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
index 9fe08408db58..9af73567e716 100644
--- a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
@@ -117,7 +117,7 @@ static int navi10_ih_irq_init(struct amdgpu_device *adev)
/* disable irqs */
navi10_ih_disable_interrupts(adev);
- adev->nbio_funcs->ih_control(adev);
+ adev->nbio.funcs->ih_control(adev);
/* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/
WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE, ih->gpu_addr >> 8);
@@ -162,7 +162,7 @@ static int navi10_ih_irq_init(struct amdgpu_device *adev)
}
WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR, ih_doorbell_rtpr);
- adev->nbio_funcs->ih_doorbell_range(adev, ih->use_doorbell,
+ adev->nbio.funcs->ih_doorbell_range(adev, ih->use_doorbell,
ih->doorbell_index);
tmp = RREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL);
diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_reg_init.c b/drivers/gpu/drm/amd/amdgpu/navi10_reg_init.c
index a56c93620e78..88efaecf9f70 100644
--- a/drivers/gpu/drm/amd/amdgpu/navi10_reg_init.c
+++ b/drivers/gpu/drm/amd/amdgpu/navi10_reg_init.c
@@ -24,7 +24,6 @@
#include "nv.h"
#include "soc15_common.h"
-#include "soc15_hw_ip.h"
#include "navi10_ip_offset.h"
int navi10_reg_base_init(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/navi12_reg_init.c b/drivers/gpu/drm/amd/amdgpu/navi12_reg_init.c
index cadc7603ca41..a786d159e5e9 100644
--- a/drivers/gpu/drm/amd/amdgpu/navi12_reg_init.c
+++ b/drivers/gpu/drm/amd/amdgpu/navi12_reg_init.c
@@ -24,7 +24,6 @@
#include "nv.h"
#include "soc15_common.h"
-#include "soc15_hw_ip.h"
#include "navi12_ip_offset.h"
int navi12_reg_base_init(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/navi14_reg_init.c b/drivers/gpu/drm/amd/amdgpu/navi14_reg_init.c
index 3b5f0f65e096..4ea1e8fbb601 100644
--- a/drivers/gpu/drm/amd/amdgpu/navi14_reg_init.c
+++ b/drivers/gpu/drm/amd/amdgpu/navi14_reg_init.c
@@ -24,7 +24,6 @@
#include "nv.h"
#include "soc15_common.h"
-#include "soc15_hw_ip.h"
#include "navi14_ip_offset.h"
int navi14_reg_base_init(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
index c05d78d4efc6..f3a3fe746222 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
@@ -27,11 +27,21 @@
#include "nbio/nbio_2_3_default.h"
#include "nbio/nbio_2_3_offset.h"
#include "nbio/nbio_2_3_sh_mask.h"
+#include <uapi/linux/kfd_ioctl.h>
#define smnPCIE_CONFIG_CNTL 0x11180044
#define smnCPM_CONTROL 0x11180460
#define smnPCIE_CNTL2 0x11180070
+
+static void nbio_v2_3_remap_hdp_registers(struct amdgpu_device *adev)
+{
+ WREG32_SOC15(NBIO, 0, mmREMAP_HDP_MEM_FLUSH_CNTL,
+ adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL);
+ WREG32_SOC15(NBIO, 0, mmREMAP_HDP_REG_FLUSH_CNTL,
+ adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_REG_FLUSH_CNTL);
+}
+
static u32 nbio_v2_3_get_rev_id(struct amdgpu_device *adev)
{
u32 tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0);
@@ -56,10 +66,9 @@ static void nbio_v2_3_hdp_flush(struct amdgpu_device *adev,
struct amdgpu_ring *ring)
{
if (!ring || !ring->funcs->emit_wreg)
- WREG32_SOC15_NO_KIQ(NBIO, 0, mmBIF_BX_PF_HDP_MEM_COHERENCY_FLUSH_CNTL, 0);
+ WREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
else
- amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET(
- NBIO, 0, mmBIF_BX_PF_HDP_MEM_COHERENCY_FLUSH_CNTL), 0);
+ amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
}
static u32 nbio_v2_3_get_memsize(struct amdgpu_device *adev)
@@ -311,7 +320,6 @@ static void nbio_v2_3_init_registers(struct amdgpu_device *adev)
}
const struct amdgpu_nbio_funcs nbio_v2_3_funcs = {
- .hdp_flush_reg = &nbio_v2_3_hdp_flush_reg,
.get_hdp_flush_req_offset = nbio_v2_3_get_hdp_flush_req_offset,
.get_hdp_flush_done_offset = nbio_v2_3_get_hdp_flush_done_offset,
.get_pcie_index_offset = nbio_v2_3_get_pcie_index_offset,
@@ -331,4 +339,5 @@ const struct amdgpu_nbio_funcs nbio_v2_3_funcs = {
.ih_control = nbio_v2_3_ih_control,
.init_registers = nbio_v2_3_init_registers,
.detect_hw_virt = nbio_v2_3_detect_hw_virt,
+ .remap_hdp_registers = nbio_v2_3_remap_hdp_registers,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.h b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.h
index 5ae52085f6b7..a43b60acf7f6 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.h
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.h
@@ -26,6 +26,7 @@
#include "soc15_common.h"
+extern const struct nbio_hdp_flush_reg nbio_v2_3_hdp_flush_reg;
extern const struct amdgpu_nbio_funcs nbio_v2_3_funcs;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
index 6590143c3f75..635d9e1fc0a3 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
@@ -226,7 +226,7 @@ static u32 nbio_v6_1_get_pcie_data_offset(struct amdgpu_device *adev)
return SOC15_REG_OFFSET(NBIO, 0, mmPCIE_DATA2);
}
-static const struct nbio_hdp_flush_reg nbio_v6_1_hdp_flush_reg = {
+const struct nbio_hdp_flush_reg nbio_v6_1_hdp_flush_reg = {
.ref_and_mask_cp0 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP0_MASK,
.ref_and_mask_cp1 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP1_MASK,
.ref_and_mask_cp2 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP2_MASK,
@@ -277,7 +277,6 @@ static void nbio_v6_1_init_registers(struct amdgpu_device *adev)
}
const struct amdgpu_nbio_funcs nbio_v6_1_funcs = {
- .hdp_flush_reg = &nbio_v6_1_hdp_flush_reg,
.get_hdp_flush_req_offset = nbio_v6_1_get_hdp_flush_req_offset,
.get_hdp_flush_done_offset = nbio_v6_1_get_hdp_flush_done_offset,
.get_pcie_index_offset = nbio_v6_1_get_pcie_index_offset,
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.h b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.h
index 0743a6f016f3..6dc743b73218 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.h
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.h
@@ -26,6 +26,7 @@
#include "soc15_common.h"
+extern const struct nbio_hdp_flush_reg nbio_v6_1_hdp_flush_reg;
extern const struct amdgpu_nbio_funcs nbio_v6_1_funcs;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
index 74eecb768a82..d6cbf26074bc 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
@@ -292,7 +292,6 @@ static void nbio_v7_0_init_registers(struct amdgpu_device *adev)
}
const struct amdgpu_nbio_funcs nbio_v7_0_funcs = {
- .hdp_flush_reg = &nbio_v7_0_hdp_flush_reg,
.get_hdp_flush_req_offset = nbio_v7_0_get_hdp_flush_req_offset,
.get_hdp_flush_done_offset = nbio_v7_0_get_hdp_flush_done_offset,
.get_pcie_index_offset = nbio_v7_0_get_pcie_index_offset,
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.h b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.h
index 508d549c5029..e7aefb252550 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.h
@@ -26,6 +26,7 @@
#include "soc15_common.h"
+extern const struct nbio_hdp_flush_reg nbio_v7_0_hdp_flush_reg;
extern const struct amdgpu_nbio_funcs nbio_v7_0_funcs;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
index 910fffced43b..0db458f9fafc 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
@@ -23,10 +23,12 @@
#include "amdgpu.h"
#include "amdgpu_atombios.h"
#include "nbio_v7_4.h"
+#include "amdgpu_ras.h"
#include "nbio/nbio_7_4_offset.h"
#include "nbio/nbio_7_4_sh_mask.h"
#include "nbio/nbio_7_4_0_smn.h"
+#include "ivsrcid/nbio/irqsrcs_nbif_7_4.h"
#include <uapi/linux/kfd_ioctl.h>
#define smnNBIF_MGCG_CTRL_LCLK 0x1013a21c
@@ -266,7 +268,7 @@ static u32 nbio_v7_4_get_pcie_data_offset(struct amdgpu_device *adev)
return SOC15_REG_OFFSET(NBIO, 0, mmPCIE_DATA2);
}
-static const struct nbio_hdp_flush_reg nbio_v7_4_hdp_flush_reg = {
+const struct nbio_hdp_flush_reg nbio_v7_4_hdp_flush_reg = {
.ref_and_mask_cp0 = GPU_HDP_FLUSH_DONE__CP0_MASK,
.ref_and_mask_cp1 = GPU_HDP_FLUSH_DONE__CP1_MASK,
.ref_and_mask_cp2 = GPU_HDP_FLUSH_DONE__CP2_MASK,
@@ -306,17 +308,208 @@ static void nbio_v7_4_detect_hw_virt(struct amdgpu_device *adev)
static void nbio_v7_4_init_registers(struct amdgpu_device *adev)
{
- uint32_t def, data;
- def = data = RREG32_PCIE(smnPCIE_CI_CNTL);
- data = REG_SET_FIELD(data, PCIE_CI_CNTL, CI_SLV_ORDERING_DIS, 1);
+}
- if (def != data)
- WREG32_PCIE(smnPCIE_CI_CNTL, data);
+static void nbio_v7_4_handle_ras_controller_intr_no_bifring(struct amdgpu_device *adev)
+{
+ uint32_t bif_doorbell_intr_cntl;
+
+ bif_doorbell_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL);
+ if (REG_GET_FIELD(bif_doorbell_intr_cntl,
+ BIF_DOORBELL_INT_CNTL, RAS_CNTLR_INTERRUPT_STATUS)) {
+ /* driver has to clear the interrupt status when bif ring is disabled */
+ bif_doorbell_intr_cntl = REG_SET_FIELD(bif_doorbell_intr_cntl,
+ BIF_DOORBELL_INT_CNTL,
+ RAS_CNTLR_INTERRUPT_CLEAR, 1);
+ WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl);
+
+ amdgpu_ras_global_ras_isr(adev);
+ }
+}
+
+static void nbio_v7_4_handle_ras_err_event_athub_intr_no_bifring(struct amdgpu_device *adev)
+{
+ uint32_t bif_doorbell_intr_cntl;
+
+ bif_doorbell_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL);
+ if (REG_GET_FIELD(bif_doorbell_intr_cntl,
+ BIF_DOORBELL_INT_CNTL, RAS_ATHUB_ERR_EVENT_INTERRUPT_STATUS)) {
+ /* driver has to clear the interrupt status when bif ring is disabled */
+ bif_doorbell_intr_cntl = REG_SET_FIELD(bif_doorbell_intr_cntl,
+ BIF_DOORBELL_INT_CNTL,
+ RAS_ATHUB_ERR_EVENT_INTERRUPT_CLEAR, 1);
+ WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl);
+
+ amdgpu_ras_global_ras_isr(adev);
+ }
+}
+
+
+static int nbio_v7_4_set_ras_controller_irq_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *src,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ /* The ras_controller_irq enablement should be done in psp bl when it
+ * tries to enable ras feature. Driver only need to set the correct interrupt
+ * vector for bare-metal and sriov use case respectively
+ */
+ uint32_t bif_intr_cntl;
+
+ bif_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL);
+ if (state == AMDGPU_IRQ_STATE_ENABLE) {
+ /* set interrupt vector select bit to 0 to select
+ * vetcor 1 for bare metal case */
+ bif_intr_cntl = REG_SET_FIELD(bif_intr_cntl,
+ BIF_INTR_CNTL,
+ RAS_INTR_VEC_SEL, 0);
+ WREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL, bif_intr_cntl);
+ }
+
+ return 0;
+}
+
+static int nbio_v7_4_process_ras_controller_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ /* By design, the ih cookie for ras_controller_irq should be written
+ * to BIFring instead of general iv ring. However, due to known bif ring
+ * hw bug, it has to be disabled. There is no chance the process function
+ * will be involked. Just left it as a dummy one.
+ */
+ return 0;
+}
+
+static int nbio_v7_4_set_ras_err_event_athub_irq_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *src,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ /* The ras_controller_irq enablement should be done in psp bl when it
+ * tries to enable ras feature. Driver only need to set the correct interrupt
+ * vector for bare-metal and sriov use case respectively
+ */
+ uint32_t bif_intr_cntl;
+
+ bif_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL);
+ if (state == AMDGPU_IRQ_STATE_ENABLE) {
+ /* set interrupt vector select bit to 0 to select
+ * vetcor 1 for bare metal case */
+ bif_intr_cntl = REG_SET_FIELD(bif_intr_cntl,
+ BIF_INTR_CNTL,
+ RAS_INTR_VEC_SEL, 0);
+ WREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL, bif_intr_cntl);
+ }
+
+ return 0;
+}
+
+static int nbio_v7_4_process_err_event_athub_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ /* By design, the ih cookie for err_event_athub_irq should be written
+ * to BIFring instead of general iv ring. However, due to known bif ring
+ * hw bug, it has to be disabled. There is no chance the process function
+ * will be involked. Just left it as a dummy one.
+ */
+ return 0;
+}
+
+static const struct amdgpu_irq_src_funcs nbio_v7_4_ras_controller_irq_funcs = {
+ .set = nbio_v7_4_set_ras_controller_irq_state,
+ .process = nbio_v7_4_process_ras_controller_irq,
+};
+
+static const struct amdgpu_irq_src_funcs nbio_v7_4_ras_err_event_athub_irq_funcs = {
+ .set = nbio_v7_4_set_ras_err_event_athub_irq_state,
+ .process = nbio_v7_4_process_err_event_athub_irq,
+};
+
+static int nbio_v7_4_init_ras_controller_interrupt (struct amdgpu_device *adev)
+{
+ int r;
+
+ /* init the irq funcs */
+ adev->nbio.ras_controller_irq.funcs =
+ &nbio_v7_4_ras_controller_irq_funcs;
+ adev->nbio.ras_controller_irq.num_types = 1;
+
+ /* register ras controller interrupt */
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF,
+ NBIF_7_4__SRCID__RAS_CONTROLLER_INTERRUPT,
+ &adev->nbio.ras_controller_irq);
+ if (r)
+ return r;
+
+ return 0;
+}
+
+static int nbio_v7_4_init_ras_err_event_athub_interrupt (struct amdgpu_device *adev)
+{
+
+ int r;
+
+ /* init the irq funcs */
+ adev->nbio.ras_err_event_athub_irq.funcs =
+ &nbio_v7_4_ras_err_event_athub_irq_funcs;
+ adev->nbio.ras_err_event_athub_irq.num_types = 1;
+
+ /* register ras err event athub interrupt */
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF,
+ NBIF_7_4__SRCID__ERREVENT_ATHUB_INTERRUPT,
+ &adev->nbio.ras_err_event_athub_irq);
+ if (r)
+ return r;
+
+ return 0;
+}
+
+static void nbio_v7_4_query_ras_error_count(struct amdgpu_device *adev,
+ void *ras_error_status)
+{
+ uint32_t global_sts, central_sts, int_eoi;
+ uint32_t corr, fatal, non_fatal;
+ struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
+
+ global_sts = RREG32_PCIE(smnRAS_GLOBAL_STATUS_LO);
+ corr = REG_GET_FIELD(global_sts, RAS_GLOBAL_STATUS_LO, ParityErrCorr);
+ fatal = REG_GET_FIELD(global_sts, RAS_GLOBAL_STATUS_LO, ParityErrFatal);
+ non_fatal = REG_GET_FIELD(global_sts, RAS_GLOBAL_STATUS_LO,
+ ParityErrNonFatal);
+
+ if (corr)
+ err_data->ce_count++;
+ if (fatal)
+ err_data->ue_count++;
+
+ if (corr || fatal || non_fatal) {
+ central_sts = RREG32_PCIE(smnBIFL_RAS_CENTRAL_STATUS);
+ /* clear error status register */
+ WREG32_PCIE(smnRAS_GLOBAL_STATUS_LO, global_sts);
+
+ if (REG_GET_FIELD(central_sts, BIFL_RAS_CENTRAL_STATUS,
+ BIFL_RasContller_Intr_Recv)) {
+ /* clear interrupt status register */
+ WREG32_PCIE(smnBIFL_RAS_CENTRAL_STATUS, central_sts);
+ int_eoi = RREG32_PCIE(smnIOHC_INTERRUPT_EOI);
+ int_eoi = REG_SET_FIELD(int_eoi,
+ IOHC_INTERRUPT_EOI, SMI_EOI, 1);
+ WREG32_PCIE(smnIOHC_INTERRUPT_EOI, int_eoi);
+ }
+ }
+}
+
+static void nbio_v7_4_enable_doorbell_interrupt(struct amdgpu_device *adev,
+ bool enable)
+{
+ WREG32_FIELD15(NBIO, 0, BIF_DOORBELL_INT_CNTL,
+ DOORBELL_INTERRUPT_DISABLE, enable ? 0 : 1);
}
const struct amdgpu_nbio_funcs nbio_v7_4_funcs = {
- .hdp_flush_reg = &nbio_v7_4_hdp_flush_reg,
.get_hdp_flush_req_offset = nbio_v7_4_get_hdp_flush_req_offset,
.get_hdp_flush_done_offset = nbio_v7_4_get_hdp_flush_done_offset,
.get_pcie_index_offset = nbio_v7_4_get_pcie_index_offset,
@@ -330,6 +523,7 @@ const struct amdgpu_nbio_funcs nbio_v7_4_funcs = {
.enable_doorbell_aperture = nbio_v7_4_enable_doorbell_aperture,
.enable_doorbell_selfring_aperture = nbio_v7_4_enable_doorbell_selfring_aperture,
.ih_doorbell_range = nbio_v7_4_ih_doorbell_range,
+ .enable_doorbell_interrupt = nbio_v7_4_enable_doorbell_interrupt,
.update_medium_grain_clock_gating = nbio_v7_4_update_medium_grain_clock_gating,
.update_medium_grain_light_sleep = nbio_v7_4_update_medium_grain_light_sleep,
.get_clockgating_state = nbio_v7_4_get_clockgating_state,
@@ -337,4 +531,10 @@ const struct amdgpu_nbio_funcs nbio_v7_4_funcs = {
.init_registers = nbio_v7_4_init_registers,
.detect_hw_virt = nbio_v7_4_detect_hw_virt,
.remap_hdp_registers = nbio_v7_4_remap_hdp_registers,
+ .handle_ras_controller_intr_no_bifring = nbio_v7_4_handle_ras_controller_intr_no_bifring,
+ .handle_ras_err_event_athub_intr_no_bifring = nbio_v7_4_handle_ras_err_event_athub_intr_no_bifring,
+ .init_ras_controller_interrupt = nbio_v7_4_init_ras_controller_interrupt,
+ .init_ras_err_event_athub_interrupt = nbio_v7_4_init_ras_err_event_athub_interrupt,
+ .query_ras_error_count = nbio_v7_4_query_ras_error_count,
+ .ras_late_init = amdgpu_nbio_ras_late_init,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.h b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.h
index c442865bac4f..b1ac82872752 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.h
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.h
@@ -26,6 +26,7 @@
#include "soc15_common.h"
+extern const struct nbio_hdp_flush_reg nbio_v7_4_hdp_flush_reg;
extern const struct amdgpu_nbio_funcs nbio_v7_4_funcs;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c
index de9b995b65b1..0ba66bef5746 100644
--- a/drivers/gpu/drm/amd/amdgpu/nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/nv.c
@@ -40,12 +40,14 @@
#include "gc/gc_10_1_0_sh_mask.h"
#include "hdp/hdp_5_0_0_offset.h"
#include "hdp/hdp_5_0_0_sh_mask.h"
+#include "smuio/smuio_11_0_0_offset.h"
#include "soc15.h"
#include "soc15_common.h"
#include "gmc_v10_0.h"
#include "gfxhub_v2_0.h"
#include "mmhub_v2_0.h"
+#include "nbio_v2_3.h"
#include "nv.h"
#include "navi10_ih.h"
#include "gfx_v10_0.h"
@@ -53,6 +55,7 @@
#include "vcn_v2_0.h"
#include "dce_virtual.h"
#include "mes_v10_1.h"
+#include "mxgpu_nv.h"
static const struct amd_ip_funcs nv_common_ip_funcs;
@@ -63,8 +66,8 @@ static u32 nv_pcie_rreg(struct amdgpu_device *adev, u32 reg)
{
unsigned long flags, address, data;
u32 r;
- address = adev->nbio_funcs->get_pcie_index_offset(adev);
- data = adev->nbio_funcs->get_pcie_data_offset(adev);
+ address = adev->nbio.funcs->get_pcie_index_offset(adev);
+ data = adev->nbio.funcs->get_pcie_data_offset(adev);
spin_lock_irqsave(&adev->pcie_idx_lock, flags);
WREG32(address, reg);
@@ -78,8 +81,8 @@ static void nv_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
{
unsigned long flags, address, data;
- address = adev->nbio_funcs->get_pcie_index_offset(adev);
- data = adev->nbio_funcs->get_pcie_data_offset(adev);
+ address = adev->nbio.funcs->get_pcie_index_offset(adev);
+ data = adev->nbio.funcs->get_pcie_data_offset(adev);
spin_lock_irqsave(&adev->pcie_idx_lock, flags);
WREG32(address, reg);
@@ -119,7 +122,7 @@ static void nv_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
static u32 nv_get_config_memsize(struct amdgpu_device *adev)
{
- return adev->nbio_funcs->get_memsize(adev);
+ return adev->nbio.funcs->get_memsize(adev);
}
static u32 nv_get_xclk(struct amdgpu_device *adev)
@@ -154,8 +157,27 @@ static bool nv_read_disabled_bios(struct amdgpu_device *adev)
static bool nv_read_bios_from_rom(struct amdgpu_device *adev,
u8 *bios, u32 length_bytes)
{
- /* TODO: will implement it when SMU header is available */
- return false;
+ u32 *dw_ptr;
+ u32 i, length_dw;
+
+ if (bios == NULL)
+ return false;
+ if (length_bytes == 0)
+ return false;
+ /* APU vbios image is part of sbios image */
+ if (adev->flags & AMD_IS_APU)
+ return false;
+
+ dw_ptr = (u32 *)bios;
+ length_dw = ALIGN(length_bytes, 4) / 4;
+
+ /* set rom index to 0 */
+ WREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX), 0);
+ /* read out the rom data */
+ for (i = 0; i < length_dw; i++)
+ dw_ptr[i] = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA));
+
+ return true;
}
static struct soc15_allowed_register_entry nv_allowed_read_registers[] = {
@@ -176,6 +198,7 @@ static struct soc15_allowed_register_entry nv_allowed_read_registers[] = {
{ SOC15_REG_ENTRY(GC, 0, mmCP_CPF_BUSY_STAT)},
{ SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STALLED_STAT1)},
{ SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STATUS)},
+ { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_BUSY_STAT)},
{ SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STALLED_STAT1)},
{ SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STATUS)},
{ SOC15_REG_ENTRY(GC, 0, mmGB_ADDR_CONFIG)},
@@ -279,7 +302,7 @@ static int nv_asic_mode1_reset(struct amdgpu_device *adev)
/* wait for asic to come out of reset */
for (i = 0; i < adev->usec_timeout; i++) {
- u32 memsize = adev->nbio_funcs->get_memsize(adev);
+ u32 memsize = adev->nbio.funcs->get_memsize(adev);
if (memsize != 0xffffffff)
break;
@@ -296,7 +319,7 @@ nv_asic_reset_method(struct amdgpu_device *adev)
{
struct smu_context *smu = &adev->smu;
- if (smu_baco_is_support(smu))
+ if (!amdgpu_sriov_vf(adev) && smu_baco_is_support(smu))
return AMD_RESET_METHOD_BACO;
else
return AMD_RESET_METHOD_MODE1;
@@ -368,8 +391,8 @@ static void nv_program_aspm(struct amdgpu_device *adev)
static void nv_enable_doorbell_aperture(struct amdgpu_device *adev,
bool enable)
{
- adev->nbio_funcs->enable_doorbell_aperture(adev, enable);
- adev->nbio_funcs->enable_doorbell_selfring_aperture(adev, enable);
+ adev->nbio.funcs->enable_doorbell_aperture(adev, enable);
+ adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, enable);
}
static const struct amdgpu_ip_block_version nv_common_ip_block =
@@ -423,9 +446,13 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
if (r)
return r;
- adev->nbio_funcs = &nbio_v2_3_funcs;
+ adev->nbio.funcs = &nbio_v2_3_funcs;
+ adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg;
- adev->nbio_funcs->detect_hw_virt(adev);
+ adev->nbio.funcs->detect_hw_virt(adev);
+
+ if (amdgpu_sriov_vf(adev))
+ adev->virt.ops = &xgpu_nv_virt_ops;
switch (adev->asic_type) {
case CHIP_NAVI10:
@@ -435,7 +462,7 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP &&
- is_support_sw_smu(adev))
+ is_support_sw_smu(adev) && !amdgpu_sriov_vf(adev))
amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
@@ -446,7 +473,7 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block);
if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
- is_support_sw_smu(adev))
+ is_support_sw_smu(adev) && !amdgpu_sriov_vf(adev))
amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
if (adev->enable_mes)
@@ -458,7 +485,7 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP &&
- is_support_sw_smu(adev))
+ is_support_sw_smu(adev) && !amdgpu_sriov_vf(adev))
amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
@@ -469,7 +496,7 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block);
if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
- is_support_sw_smu(adev))
+ is_support_sw_smu(adev) && !amdgpu_sriov_vf(adev))
amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
break;
@@ -482,12 +509,12 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
static uint32_t nv_get_rev_id(struct amdgpu_device *adev)
{
- return adev->nbio_funcs->get_rev_id(adev);
+ return adev->nbio.funcs->get_rev_id(adev);
}
static void nv_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
{
- adev->nbio_funcs->hdp_flush(adev, ring);
+ adev->nbio.funcs->hdp_flush(adev, ring);
}
static void nv_invalidate_hdp(struct amdgpu_device *adev,
@@ -532,6 +559,16 @@ static bool nv_need_reset_on_init(struct amdgpu_device *adev)
return false;
}
+static uint64_t nv_get_pcie_replay_count(struct amdgpu_device *adev)
+{
+
+ /* TODO
+ * dummy implement for pcie_replay_count sysfs interface
+ * */
+
+ return 0;
+}
+
static void nv_init_doorbell_index(struct amdgpu_device *adev)
{
adev->doorbell_index.kiq = AMDGPU_NAVI10_DOORBELL_KIQ;
@@ -579,12 +616,16 @@ static const struct amdgpu_asic_funcs nv_asic_funcs =
.need_full_reset = &nv_need_full_reset,
.get_pcie_usage = &nv_get_pcie_usage,
.need_reset_on_init = &nv_need_reset_on_init,
+ .get_pcie_replay_count = &nv_get_pcie_replay_count,
};
static int nv_common_early_init(void *handle)
{
+#define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET;
+ adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET;
adev->smc_rreg = NULL;
adev->smc_wreg = NULL;
adev->pcie_rreg = &nv_pcie_rreg;
@@ -667,16 +708,31 @@ static int nv_common_early_init(void *handle)
return -EINVAL;
}
+ if (amdgpu_sriov_vf(adev)) {
+ amdgpu_virt_init_setting(adev);
+ xgpu_nv_mailbox_set_irq_funcs(adev);
+ }
+
return 0;
}
static int nv_common_late_init(void *handle)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ if (amdgpu_sriov_vf(adev))
+ xgpu_nv_mailbox_get_irq(adev);
+
return 0;
}
static int nv_common_sw_init(void *handle)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ if (amdgpu_sriov_vf(adev))
+ xgpu_nv_mailbox_add_irq_id(adev);
+
return 0;
}
@@ -694,7 +750,13 @@ static int nv_common_hw_init(void *handle)
/* enable aspm */
nv_program_aspm(adev);
/* setup nbio registers */
- adev->nbio_funcs->init_registers(adev);
+ adev->nbio.funcs->init_registers(adev);
+ /* remap HDP registers to a hole in mmio space,
+ * for the purpose of expose those registers
+ * to process space
+ */
+ if (adev->nbio.funcs->remap_hdp_registers)
+ adev->nbio.funcs->remap_hdp_registers(adev);
/* enable the doorbell aperture */
nv_enable_doorbell_aperture(adev, true);
@@ -856,9 +918,9 @@ static int nv_common_set_clockgating_state(void *handle,
case CHIP_NAVI10:
case CHIP_NAVI14:
case CHIP_NAVI12:
- adev->nbio_funcs->update_medium_grain_clock_gating(adev,
+ adev->nbio.funcs->update_medium_grain_clock_gating(adev,
state == AMD_CG_STATE_GATE ? true : false);
- adev->nbio_funcs->update_medium_grain_light_sleep(adev,
+ adev->nbio.funcs->update_medium_grain_light_sleep(adev,
state == AMD_CG_STATE_GATE ? true : false);
nv_update_hdp_mem_power_gating(adev,
state == AMD_CG_STATE_GATE ? true : false);
@@ -886,7 +948,7 @@ static void nv_common_get_clockgating_state(void *handle, u32 *flags)
if (amdgpu_sriov_vf(adev))
*flags = 0;
- adev->nbio_funcs->get_clockgating_state(adev, flags);
+ adev->nbio.funcs->get_clockgating_state(adev, flags);
/* AMD_CG_SUPPORT_HDP_MGCG */
tmp = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL);
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
index 5d95e614369a..b345e69ba246 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
@@ -40,6 +40,9 @@
MODULE_FIRMWARE("amdgpu/raven_asd.bin");
MODULE_FIRMWARE("amdgpu/picasso_asd.bin");
MODULE_FIRMWARE("amdgpu/raven2_asd.bin");
+MODULE_FIRMWARE("amdgpu/picasso_ta.bin");
+MODULE_FIRMWARE("amdgpu/raven2_ta.bin");
+MODULE_FIRMWARE("amdgpu/raven_ta.bin");
static int psp_v10_0_init_microcode(struct psp_context *psp)
{
@@ -48,7 +51,7 @@ static int psp_v10_0_init_microcode(struct psp_context *psp)
char fw_name[30];
int err = 0;
const struct psp_firmware_header_v1_0 *hdr;
-
+ const struct ta_firmware_header_v1_0 *ta_hdr;
DRM_DEBUG("\n");
switch (adev->asic_type) {
@@ -79,7 +82,45 @@ static int psp_v10_0_init_microcode(struct psp_context *psp)
adev->psp.asd_start_addr = (uint8_t *)hdr +
le32_to_cpu(hdr->header.ucode_array_offset_bytes);
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name);
+ err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev);
+ if (err) {
+ release_firmware(adev->psp.ta_fw);
+ adev->psp.ta_fw = NULL;
+ dev_info(adev->dev,
+ "psp v10.0: Failed to load firmware \"%s\"\n",
+ fw_name);
+ } else {
+ err = amdgpu_ucode_validate(adev->psp.ta_fw);
+ if (err)
+ goto out2;
+
+ ta_hdr = (const struct ta_firmware_header_v1_0 *)
+ adev->psp.ta_fw->data;
+ adev->psp.ta_hdcp_ucode_version =
+ le32_to_cpu(ta_hdr->ta_hdcp_ucode_version);
+ adev->psp.ta_hdcp_ucode_size =
+ le32_to_cpu(ta_hdr->ta_hdcp_size_bytes);
+ adev->psp.ta_hdcp_start_addr =
+ (uint8_t *)ta_hdr +
+ le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
+
+ adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version);
+
+ adev->psp.ta_dtm_ucode_version =
+ le32_to_cpu(ta_hdr->ta_dtm_ucode_version);
+ adev->psp.ta_dtm_ucode_size =
+ le32_to_cpu(ta_hdr->ta_dtm_size_bytes);
+ adev->psp.ta_dtm_start_addr =
+ (uint8_t *)adev->psp.ta_hdcp_start_addr +
+ le32_to_cpu(ta_hdr->ta_dtm_offset_bytes);
+ }
+
return 0;
+
+out2:
+ release_firmware(adev->psp.ta_fw);
+ adev->psp.ta_fw = NULL;
out:
if (err) {
dev_err(adev->dev,
@@ -228,6 +269,7 @@ static int psp_v10_0_cmd_submit(struct psp_context *psp,
write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr);
write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr);
write_frame->fence_value = index;
+ amdgpu_asic_flush_hdp(adev, NULL);
/* Update the write Pointer in DWORDs */
psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw;
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
index 10166104b8a3..ffeaa2f5588d 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
@@ -49,6 +49,7 @@ MODULE_FIRMWARE("amdgpu/navi12_sos.bin");
MODULE_FIRMWARE("amdgpu/navi12_asd.bin");
MODULE_FIRMWARE("amdgpu/arcturus_sos.bin");
MODULE_FIRMWARE("amdgpu/arcturus_asd.bin");
+MODULE_FIRMWARE("amdgpu/arcturus_ta.bin");
/* address block */
#define smnMP1_FIRMWARE_FLAGS 0x3010024
@@ -57,6 +58,8 @@ MODULE_FIRMWARE("amdgpu/arcturus_asd.bin");
#define mmRLC_GPM_UCODE_DATA_NV10 0x5b62
#define mmSDMA0_UCODE_ADDR_NV10 0x5880
#define mmSDMA0_UCODE_DATA_NV10 0x5881
+/* memory training timeout define */
+#define MEM_TRAIN_SEND_MSG_TIMEOUT_US 3000000
static int psp_v11_0_init_microcode(struct psp_context *psp)
{
@@ -155,6 +158,7 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)
switch (adev->asic_type) {
case CHIP_VEGA20:
+ case CHIP_ARCTURUS:
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name);
err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev);
if (err) {
@@ -182,7 +186,6 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)
case CHIP_NAVI10:
case CHIP_NAVI14:
case CHIP_NAVI12:
- case CHIP_ARCTURUS:
break;
default:
BUG();
@@ -205,18 +208,26 @@ out:
return err;
}
+static bool psp_v11_0_is_sos_alive(struct psp_context *psp)
+{
+ struct amdgpu_device *adev = psp->adev;
+ uint32_t sol_reg;
+
+ sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
+
+ return sol_reg != 0x0;
+}
+
static int psp_v11_0_bootloader_load_kdb(struct psp_context *psp)
{
int ret;
uint32_t psp_gfxdrv_command_reg = 0;
struct amdgpu_device *adev = psp->adev;
- uint32_t sol_reg;
/* Check tOS sign of life register to confirm sys driver and sOS
* are already been loaded.
*/
- sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
- if (sol_reg) {
+ if (psp_v11_0_is_sos_alive(psp)) {
psp->sos_fw_version = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_58);
dev_info(adev->dev, "sos fw version = 0x%x.\n", psp->sos_fw_version);
return 0;
@@ -233,7 +244,7 @@ static int psp_v11_0_bootloader_load_kdb(struct psp_context *psp)
/* Copy PSP KDB binary to memory */
memcpy(psp->fw_pri_buf, psp->kdb_start_addr, psp->kdb_bin_size);
- /* Provide the sys driver to bootloader */
+ /* Provide the PSP KDB to bootloader */
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36,
(uint32_t)(psp->fw_pri_mc_addr >> 20));
psp_gfxdrv_command_reg = PSP_BL__LOAD_KEY_DATABASE;
@@ -252,13 +263,11 @@ static int psp_v11_0_bootloader_load_sysdrv(struct psp_context *psp)
int ret;
uint32_t psp_gfxdrv_command_reg = 0;
struct amdgpu_device *adev = psp->adev;
- uint32_t sol_reg;
/* Check sOS sign of life register to confirm sys driver and sOS
* are already been loaded.
*/
- sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
- if (sol_reg) {
+ if (psp_v11_0_is_sos_alive(psp)) {
psp->sos_fw_version = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_58);
dev_info(adev->dev, "sos fw version = 0x%x.\n", psp->sos_fw_version);
return 0;
@@ -296,13 +305,11 @@ static int psp_v11_0_bootloader_load_sos(struct psp_context *psp)
int ret;
unsigned int psp_gfxdrv_command_reg = 0;
struct amdgpu_device *adev = psp->adev;
- uint32_t sol_reg;
/* Check sOS sign of life register to confirm sys driver and sOS
* are already been loaded.
*/
- sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
- if (sol_reg)
+ if (psp_v11_0_is_sos_alive(psp))
return 0;
/* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */
@@ -398,6 +405,34 @@ static bool psp_v11_0_support_vmr_ring(struct psp_context *psp)
return false;
}
+static int psp_v11_0_ring_stop(struct psp_context *psp,
+ enum psp_ring_type ring_type)
+{
+ int ret = 0;
+ struct amdgpu_device *adev = psp->adev;
+
+ /* Write the ring destroy command*/
+ if (psp_v11_0_support_vmr_ring(psp))
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101,
+ GFX_CTRL_CMD_ID_DESTROY_GPCOM_RING);
+ else
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64,
+ GFX_CTRL_CMD_ID_DESTROY_RINGS);
+
+ /* there might be handshake issue with hardware which needs delay */
+ mdelay(20);
+
+ /* Wait for response flag (bit 31) */
+ if (psp_v11_0_support_vmr_ring(psp))
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
+ 0x80000000, 0x80000000, false);
+ else
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
+ 0x80000000, 0x80000000, false);
+
+ return ret;
+}
+
static int psp_v11_0_ring_create(struct psp_context *psp,
enum psp_ring_type ring_type)
{
@@ -407,6 +442,12 @@ static int psp_v11_0_ring_create(struct psp_context *psp,
struct amdgpu_device *adev = psp->adev;
if (psp_v11_0_support_vmr_ring(psp)) {
+ ret = psp_v11_0_ring_stop(psp, ring_type);
+ if (ret) {
+ DRM_ERROR("psp_v11_0_ring_stop_sriov failed!\n");
+ return ret;
+ }
+
/* Write low address of the ring to C2PMSG_102 */
psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr);
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, psp_ring_reg);
@@ -426,6 +467,14 @@ static int psp_v11_0_ring_create(struct psp_context *psp,
0x80000000, 0x8000FFFF, false);
} else {
+ /* Wait for sOS ready for ring creation */
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
+ 0x80000000, 0x80000000, false);
+ if (ret) {
+ DRM_ERROR("Failed to wait for sOS ready for ring creation\n");
+ return ret;
+ }
+
/* Write low address of the ring to C2PMSG_69 */
psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr);
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_69, psp_ring_reg);
@@ -451,33 +500,6 @@ static int psp_v11_0_ring_create(struct psp_context *psp,
return ret;
}
-static int psp_v11_0_ring_stop(struct psp_context *psp,
- enum psp_ring_type ring_type)
-{
- int ret = 0;
- struct amdgpu_device *adev = psp->adev;
-
- /* Write the ring destroy command*/
- if (psp_v11_0_support_vmr_ring(psp))
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101,
- GFX_CTRL_CMD_ID_DESTROY_GPCOM_RING);
- else
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64,
- GFX_CTRL_CMD_ID_DESTROY_RINGS);
-
- /* there might be handshake issue with hardware which needs delay */
- mdelay(20);
-
- /* Wait for response flag (bit 31) */
- if (psp_v11_0_support_vmr_ring(psp))
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
- 0x80000000, 0x80000000, false);
- else
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
- 0x80000000, 0x80000000, false);
-
- return ret;
-}
static int psp_v11_0_ring_destroy(struct psp_context *psp,
enum psp_ring_type ring_type)
@@ -541,6 +563,7 @@ static int psp_v11_0_cmd_submit(struct psp_context *psp,
write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr);
write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr);
write_frame->fence_value = index;
+ amdgpu_asic_flush_hdp(adev, NULL);
/* Update the write Pointer in DWORDs */
psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw;
@@ -889,6 +912,162 @@ static int psp_v11_0_rlc_autoload_start(struct psp_context *psp)
return psp_rlc_autoload_start(psp);
}
+static int psp_v11_0_memory_training_send_msg(struct psp_context *psp, int msg)
+{
+ int ret;
+ int i;
+ uint32_t data_32;
+ int max_wait;
+ struct amdgpu_device *adev = psp->adev;
+
+ data_32 = (psp->mem_train_ctx.c2p_train_data_offset >> 20);
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36, data_32);
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35, msg);
+
+ max_wait = MEM_TRAIN_SEND_MSG_TIMEOUT_US / adev->usec_timeout;
+ for (i = 0; i < max_wait; i++) {
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
+ 0x80000000, 0x80000000, false);
+ if (ret == 0)
+ break;
+ }
+ if (i < max_wait)
+ ret = 0;
+ else
+ ret = -ETIME;
+
+ DRM_DEBUG("training %s %s, cost %d @ %d ms\n",
+ (msg == PSP_BL__DRAM_SHORT_TRAIN) ? "short" : "long",
+ (ret == 0) ? "succeed" : "failed",
+ i, adev->usec_timeout/1000);
+ return ret;
+}
+
+static void psp_v11_0_memory_training_fini(struct psp_context *psp)
+{
+ struct psp_memory_training_context *ctx = &psp->mem_train_ctx;
+
+ ctx->init = PSP_MEM_TRAIN_NOT_SUPPORT;
+ kfree(ctx->sys_cache);
+ ctx->sys_cache = NULL;
+}
+
+static int psp_v11_0_memory_training_init(struct psp_context *psp)
+{
+ int ret;
+ struct psp_memory_training_context *ctx = &psp->mem_train_ctx;
+
+ if (ctx->init != PSP_MEM_TRAIN_RESERVE_SUCCESS) {
+ DRM_DEBUG("memory training is not supported!\n");
+ return 0;
+ }
+
+ ctx->sys_cache = kzalloc(ctx->train_data_size, GFP_KERNEL);
+ if (ctx->sys_cache == NULL) {
+ DRM_ERROR("alloc mem_train_ctx.sys_cache failed!\n");
+ ret = -ENOMEM;
+ goto Err_out;
+ }
+
+ DRM_DEBUG("train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n",
+ ctx->train_data_size,
+ ctx->p2c_train_data_offset,
+ ctx->c2p_train_data_offset);
+ ctx->init = PSP_MEM_TRAIN_INIT_SUCCESS;
+ return 0;
+
+Err_out:
+ psp_v11_0_memory_training_fini(psp);
+ return ret;
+}
+
+/*
+ * save and restore proces
+ */
+static int psp_v11_0_memory_training(struct psp_context *psp, uint32_t ops)
+{
+ int ret;
+ uint32_t p2c_header[4];
+ struct psp_memory_training_context *ctx = &psp->mem_train_ctx;
+ uint32_t *pcache = (uint32_t*)ctx->sys_cache;
+
+ if (ctx->init == PSP_MEM_TRAIN_NOT_SUPPORT) {
+ DRM_DEBUG("Memory training is not supported.\n");
+ return 0;
+ } else if (ctx->init != PSP_MEM_TRAIN_INIT_SUCCESS) {
+ DRM_ERROR("Memory training initialization failure.\n");
+ return -EINVAL;
+ }
+
+ if (psp_v11_0_is_sos_alive(psp)) {
+ DRM_DEBUG("SOS is alive, skip memory training.\n");
+ return 0;
+ }
+
+ amdgpu_device_vram_access(psp->adev, ctx->p2c_train_data_offset, p2c_header, sizeof(p2c_header), false);
+ DRM_DEBUG("sys_cache[%08x,%08x,%08x,%08x] p2c_header[%08x,%08x,%08x,%08x]\n",
+ pcache[0], pcache[1], pcache[2], pcache[3],
+ p2c_header[0], p2c_header[1], p2c_header[2], p2c_header[3]);
+
+ if (ops & PSP_MEM_TRAIN_SEND_SHORT_MSG) {
+ DRM_DEBUG("Short training depends on restore.\n");
+ ops |= PSP_MEM_TRAIN_RESTORE;
+ }
+
+ if ((ops & PSP_MEM_TRAIN_RESTORE) &&
+ pcache[0] != MEM_TRAIN_SYSTEM_SIGNATURE) {
+ DRM_DEBUG("sys_cache[0] is invalid, restore depends on save.\n");
+ ops |= PSP_MEM_TRAIN_SAVE;
+ }
+
+ if (p2c_header[0] == MEM_TRAIN_SYSTEM_SIGNATURE &&
+ !(pcache[0] == MEM_TRAIN_SYSTEM_SIGNATURE &&
+ pcache[3] == p2c_header[3])) {
+ DRM_DEBUG("sys_cache is invalid or out-of-date, need save training data to sys_cache.\n");
+ ops |= PSP_MEM_TRAIN_SAVE;
+ }
+
+ if ((ops & PSP_MEM_TRAIN_SAVE) &&
+ p2c_header[0] != MEM_TRAIN_SYSTEM_SIGNATURE) {
+ DRM_DEBUG("p2c_header[0] is invalid, save depends on long training.\n");
+ ops |= PSP_MEM_TRAIN_SEND_LONG_MSG;
+ }
+
+ if (ops & PSP_MEM_TRAIN_SEND_LONG_MSG) {
+ ops &= ~PSP_MEM_TRAIN_SEND_SHORT_MSG;
+ ops |= PSP_MEM_TRAIN_SAVE;
+ }
+
+ DRM_DEBUG("Memory training ops:%x.\n", ops);
+
+ if (ops & PSP_MEM_TRAIN_SEND_LONG_MSG) {
+ ret = psp_v11_0_memory_training_send_msg(psp, PSP_BL__DRAM_LONG_TRAIN);
+ if (ret) {
+ DRM_ERROR("Send long training msg failed.\n");
+ return ret;
+ }
+ }
+
+ if (ops & PSP_MEM_TRAIN_SAVE) {
+ amdgpu_device_vram_access(psp->adev, ctx->p2c_train_data_offset, ctx->sys_cache, ctx->train_data_size, false);
+ }
+
+ if (ops & PSP_MEM_TRAIN_RESTORE) {
+ amdgpu_device_vram_access(psp->adev, ctx->c2p_train_data_offset, ctx->sys_cache, ctx->train_data_size, true);
+ }
+
+ if (ops & PSP_MEM_TRAIN_SEND_SHORT_MSG) {
+ ret = psp_v11_0_memory_training_send_msg(psp, (amdgpu_force_long_training > 0) ?
+ PSP_BL__DRAM_LONG_TRAIN : PSP_BL__DRAM_SHORT_TRAIN);
+ if (ret) {
+ DRM_ERROR("send training msg failed.\n");
+ return ret;
+ }
+ }
+ ctx->training_cnt++;
+ return 0;
+}
+
static const struct psp_funcs psp_v11_0_funcs = {
.init_microcode = psp_v11_0_init_microcode,
.bootloader_load_kdb = psp_v11_0_bootloader_load_kdb,
@@ -909,6 +1088,9 @@ static const struct psp_funcs psp_v11_0_funcs = {
.ras_trigger_error = psp_v11_0_ras_trigger_error,
.ras_cure_posion = psp_v11_0_ras_cure_posion,
.rlc_autoload_start = psp_v11_0_rlc_autoload_start,
+ .mem_training_init = psp_v11_0_memory_training_init,
+ .mem_training_fini = psp_v11_0_memory_training_fini,
+ .mem_training = psp_v11_0_memory_training,
};
void psp_v11_0_set_psp_funcs(struct psp_context *psp)
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
index c72e43f8e0be..8f553f6f92d6 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
@@ -378,6 +378,7 @@ static int psp_v12_0_cmd_submit(struct psp_context *psp,
write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr);
write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr);
write_frame->fence_value = index;
+ amdgpu_asic_flush_hdp(adev, NULL);
/* Update the write Pointer in DWORDs */
psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw;
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
index d2c727f6a8bd..fdc00938327b 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
@@ -454,6 +454,7 @@ static int psp_v3_1_cmd_submit(struct psp_context *psp,
write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr);
write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr);
write_frame->fence_value = index;
+ amdgpu_asic_flush_hdp(adev, NULL);
/* Update the write Pointer in DWORDs */
psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw;
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index 4554e72c8378..4ef4d31f5231 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -747,13 +747,13 @@ static void sdma_v4_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
u32 ref_and_mask = 0;
- const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio_funcs->hdp_flush_reg;
+ const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0 << ring->me;
sdma_v4_0_wait_reg_mem(ring, 0, 1,
- adev->nbio_funcs->get_hdp_flush_done_offset(adev),
- adev->nbio_funcs->get_hdp_flush_req_offset(adev),
+ adev->nbio.funcs->get_hdp_flush_done_offset(adev),
+ adev->nbio.funcs->get_hdp_flush_req_offset(adev),
ref_and_mask, ref_and_mask, 10);
}
@@ -1691,102 +1691,17 @@ static int sdma_v4_0_early_init(void *handle)
}
static int sdma_v4_0_process_ras_data_cb(struct amdgpu_device *adev,
- struct ras_err_data *err_data,
+ void *err_data,
struct amdgpu_iv_entry *entry);
static int sdma_v4_0_late_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct ras_common_if **ras_if = &adev->sdma.ras_if;
struct ras_ih_if ih_info = {
.cb = sdma_v4_0_process_ras_data_cb,
};
- struct ras_fs_if fs_info = {
- .sysfs_name = "sdma_err_count",
- .debugfs_name = "sdma_err_inject",
- };
- struct ras_common_if ras_block = {
- .block = AMDGPU_RAS_BLOCK__SDMA,
- .type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
- .sub_block_index = 0,
- .name = "sdma",
- };
- int r, i;
-
- if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA)) {
- amdgpu_ras_feature_enable_on_boot(adev, &ras_block, 0);
- return 0;
- }
-
- /* handle resume path. */
- if (*ras_if) {
- /* resend ras TA enable cmd during resume.
- * prepare to handle failure.
- */
- ih_info.head = **ras_if;
- r = amdgpu_ras_feature_enable_on_boot(adev, *ras_if, 1);
- if (r) {
- if (r == -EAGAIN) {
- /* request a gpu reset. will run again. */
- amdgpu_ras_request_reset_on_boot(adev,
- AMDGPU_RAS_BLOCK__SDMA);
- return 0;
- }
- /* fail to enable ras, cleanup all. */
- goto irq;
- }
- /* enable successfully. continue. */
- goto resume;
- }
-
- *ras_if = kmalloc(sizeof(**ras_if), GFP_KERNEL);
- if (!*ras_if)
- return -ENOMEM;
-
- **ras_if = ras_block;
-
- r = amdgpu_ras_feature_enable_on_boot(adev, *ras_if, 1);
- if (r) {
- if (r == -EAGAIN) {
- amdgpu_ras_request_reset_on_boot(adev,
- AMDGPU_RAS_BLOCK__SDMA);
- r = 0;
- }
- goto feature;
- }
- ih_info.head = **ras_if;
- fs_info.head = **ras_if;
-
- r = amdgpu_ras_interrupt_add_handler(adev, &ih_info);
- if (r)
- goto interrupt;
-
- amdgpu_ras_debugfs_create(adev, &fs_info);
-
- r = amdgpu_ras_sysfs_create(adev, &fs_info);
- if (r)
- goto sysfs;
-resume:
- for (i = 0; i < adev->sdma.num_instances; i++) {
- r = amdgpu_irq_get(adev, &adev->sdma.ecc_irq,
- AMDGPU_SDMA_IRQ_INSTANCE0 + i);
- if (r)
- goto irq;
- }
-
- return 0;
-irq:
- amdgpu_ras_sysfs_remove(adev, *ras_if);
-sysfs:
- amdgpu_ras_debugfs_remove(adev, *ras_if);
- amdgpu_ras_interrupt_remove_handler(adev, &ih_info);
-interrupt:
- amdgpu_ras_feature_enable(adev, *ras_if, 0);
-feature:
- kfree(*ras_if);
- *ras_if = NULL;
- return r;
+ return amdgpu_sdma_ras_late_init(adev, &ih_info);
}
static int sdma_v4_0_sw_init(void *handle)
@@ -1858,21 +1773,7 @@ static int sdma_v4_0_sw_fini(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int i;
- if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA) &&
- adev->sdma.ras_if) {
- struct ras_common_if *ras_if = adev->sdma.ras_if;
- struct ras_ih_if ih_info = {
- .head = *ras_if,
- };
-
- /*remove fs first*/
- amdgpu_ras_debugfs_remove(adev, ras_if);
- amdgpu_ras_sysfs_remove(adev, ras_if);
- /*remove the IH*/
- amdgpu_ras_interrupt_remove_handler(adev, &ih_info);
- amdgpu_ras_feature_enable(adev, ras_if, 0);
- kfree(ras_if);
- }
+ amdgpu_sdma_ras_fini(adev);
for (i = 0; i < adev->sdma.num_instances; i++) {
amdgpu_ring_fini(&adev->sdma.instance[i].ring);
@@ -1892,7 +1793,7 @@ static int sdma_v4_0_hw_init(void *handle)
if ((adev->asic_type == CHIP_RAVEN && adev->powerplay.pp_funcs &&
adev->powerplay.pp_funcs->set_powergating_by_smu) ||
- adev->asic_type == CHIP_RENOIR)
+ (adev->asic_type == CHIP_RENOIR && !adev->in_gpu_reset))
amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_SDMA, false);
if (!amdgpu_sriov_vf(adev))
@@ -2025,52 +1926,28 @@ static int sdma_v4_0_process_trap_irq(struct amdgpu_device *adev,
}
static int sdma_v4_0_process_ras_data_cb(struct amdgpu_device *adev,
- struct ras_err_data *err_data,
+ void *err_data,
struct amdgpu_iv_entry *entry)
{
- uint32_t err_source;
int instance;
+ /* When “Full RAS†is enabled, the per-IP interrupt sources should
+ * be disabled and the driver should only look for the aggregated
+ * interrupt via sync flood
+ */
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
+ goto out;
+
instance = sdma_v4_0_irq_id_to_seq(entry->client_id);
if (instance < 0)
- return 0;
-
- switch (entry->src_id) {
- case SDMA0_4_0__SRCID__SDMA_SRAM_ECC:
- err_source = 0;
- break;
- case SDMA0_4_0__SRCID__SDMA_ECC:
- err_source = 1;
- break;
- default:
- return 0;
- }
-
- kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
+ goto out;
- amdgpu_ras_reset_gpu(adev, 0);
+ amdgpu_sdma_process_ras_data_cb(adev, err_data, entry);
+out:
return AMDGPU_RAS_SUCCESS;
}
-static int sdma_v4_0_process_ecc_irq(struct amdgpu_device *adev,
- struct amdgpu_irq_src *source,
- struct amdgpu_iv_entry *entry)
-{
- struct ras_common_if *ras_if = adev->sdma.ras_if;
- struct ras_dispatch_if ih_data = {
- .entry = entry,
- };
-
- if (!ras_if)
- return 0;
-
- ih_data.head = *ras_if;
-
- amdgpu_ras_interrupt_dispatch(adev, &ih_data);
- return 0;
-}
-
static int sdma_v4_0_process_illegal_inst_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
@@ -2418,7 +2295,7 @@ static const struct amdgpu_irq_src_funcs sdma_v4_0_illegal_inst_irq_funcs = {
static const struct amdgpu_irq_src_funcs sdma_v4_0_ecc_irq_funcs = {
.set = sdma_v4_0_set_ecc_irq_state,
- .process = sdma_v4_0_process_ecc_irq,
+ .process = amdgpu_sdma_process_ecc_irq,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
index 8493bfbbc148..f4ad2990f973 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
@@ -406,7 +406,7 @@ static void sdma_v5_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
u32 ref_and_mask = 0;
- const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio_funcs->hdp_flush_reg;
+ const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
if (ring->me == 0)
ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0;
@@ -416,8 +416,8 @@ static void sdma_v5_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) |
SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */
- amdgpu_ring_write(ring, (adev->nbio_funcs->get_hdp_flush_done_offset(adev)) << 2);
- amdgpu_ring_write(ring, (adev->nbio_funcs->get_hdp_flush_req_offset(adev)) << 2);
+ amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_done_offset(adev)) << 2);
+ amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_req_offset(adev)) << 2);
amdgpu_ring_write(ring, ref_and_mask); /* reference */
amdgpu_ring_write(ring, ref_and_mask); /* mask */
amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
@@ -683,7 +683,7 @@ static int sdma_v5_0_gfx_resume(struct amdgpu_device *adev)
WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL), doorbell);
WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET), doorbell_offset);
- adev->nbio_funcs->sdma_doorbell_range(adev, i, ring->use_doorbell,
+ adev->nbio.funcs->sdma_doorbell_range(adev, i, ring->use_doorbell,
ring->doorbell_index, 20);
if (amdgpu_sriov_vf(adev))
diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c
index 493af42152f2..f2d70a47a3af 100644
--- a/drivers/gpu/drm/amd/amdgpu/si.c
+++ b/drivers/gpu/drm/amd/amdgpu/si.c
@@ -975,6 +975,17 @@ static void si_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
static struct amdgpu_allowed_register_entry si_allowed_read_registers[] = {
{GRBM_STATUS},
+ {mmGRBM_STATUS2},
+ {mmGRBM_STATUS_SE0},
+ {mmGRBM_STATUS_SE1},
+ {mmSRBM_STATUS},
+ {mmSRBM_STATUS2},
+ {DMA_STATUS_REG + DMA0_REGISTER_OFFSET},
+ {DMA_STATUS_REG + DMA1_REGISTER_OFFSET},
+ {mmCP_STAT},
+ {mmCP_STALLED_STAT1},
+ {mmCP_STALLED_STAT2},
+ {mmCP_STALLED_STAT3},
{GB_ADDR_CONFIG},
{MC_ARB_RAMCFG},
{GB_TILE_MODE0},
@@ -1633,7 +1644,6 @@ static void si_init_golden_registers(struct amdgpu_device *adev)
static void si_pcie_gen3_enable(struct amdgpu_device *adev)
{
struct pci_dev *root = adev->pdev->bus->self;
- int bridge_pos, gpu_pos;
u32 speed_cntl, current_data_rate;
int i;
u16 tmp16;
@@ -1668,12 +1678,7 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
DRM_INFO("enabling PCIE gen 2 link speeds, disable with amdgpu.pcie_gen2=0\n");
}
- bridge_pos = pci_pcie_cap(root);
- if (!bridge_pos)
- return;
-
- gpu_pos = pci_pcie_cap(adev->pdev);
- if (!gpu_pos)
+ if (!pci_is_pcie(root) || !pci_is_pcie(adev->pdev))
return;
if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) {
@@ -1682,14 +1687,17 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
u16 bridge_cfg2, gpu_cfg2;
u32 max_lw, current_lw, tmp;
- pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
- pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
+ pcie_capability_read_word(root, PCI_EXP_LNKCTL,
+ &bridge_cfg);
+ pcie_capability_read_word(adev->pdev, PCI_EXP_LNKCTL,
+ &gpu_cfg);
tmp16 = bridge_cfg | PCI_EXP_LNKCTL_HAWD;
- pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
+ pcie_capability_write_word(root, PCI_EXP_LNKCTL, tmp16);
tmp16 = gpu_cfg | PCI_EXP_LNKCTL_HAWD;
- pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
+ pcie_capability_write_word(adev->pdev, PCI_EXP_LNKCTL,
+ tmp16);
tmp = RREG32_PCIE(PCIE_LC_STATUS1);
max_lw = (tmp & LC_DETECTED_LINK_WIDTH_MASK) >> LC_DETECTED_LINK_WIDTH_SHIFT;
@@ -1706,15 +1714,23 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
}
for (i = 0; i < 10; i++) {
- pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_DEVSTA, &tmp16);
+ pcie_capability_read_word(adev->pdev,
+ PCI_EXP_DEVSTA,
+ &tmp16);
if (tmp16 & PCI_EXP_DEVSTA_TRPND)
break;
- pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
- pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
+ pcie_capability_read_word(root, PCI_EXP_LNKCTL,
+ &bridge_cfg);
+ pcie_capability_read_word(adev->pdev,
+ PCI_EXP_LNKCTL,
+ &gpu_cfg);
- pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &bridge_cfg2);
- pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &gpu_cfg2);
+ pcie_capability_read_word(root, PCI_EXP_LNKCTL2,
+ &bridge_cfg2);
+ pcie_capability_read_word(adev->pdev,
+ PCI_EXP_LNKCTL2,
+ &gpu_cfg2);
tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
tmp |= LC_SET_QUIESCE;
@@ -1726,25 +1742,44 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
mdelay(100);
- pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &tmp16);
+ pcie_capability_read_word(root, PCI_EXP_LNKCTL,
+ &tmp16);
tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
tmp16 |= (bridge_cfg & PCI_EXP_LNKCTL_HAWD);
- pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
+ pcie_capability_write_word(root, PCI_EXP_LNKCTL,
+ tmp16);
- pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, &tmp16);
+ pcie_capability_read_word(adev->pdev,
+ PCI_EXP_LNKCTL,
+ &tmp16);
tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
tmp16 |= (gpu_cfg & PCI_EXP_LNKCTL_HAWD);
- pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
-
- pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &tmp16);
- tmp16 &= ~((1 << 4) | (7 << 9));
- tmp16 |= (bridge_cfg2 & ((1 << 4) | (7 << 9)));
- pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, tmp16);
-
- pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
- tmp16 &= ~((1 << 4) | (7 << 9));
- tmp16 |= (gpu_cfg2 & ((1 << 4) | (7 << 9)));
- pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
+ pcie_capability_write_word(adev->pdev,
+ PCI_EXP_LNKCTL,
+ tmp16);
+
+ pcie_capability_read_word(root, PCI_EXP_LNKCTL2,
+ &tmp16);
+ tmp16 &= ~(PCI_EXP_LNKCTL2_ENTER_COMP |
+ PCI_EXP_LNKCTL2_TX_MARGIN);
+ tmp16 |= (bridge_cfg2 &
+ (PCI_EXP_LNKCTL2_ENTER_COMP |
+ PCI_EXP_LNKCTL2_TX_MARGIN));
+ pcie_capability_write_word(root,
+ PCI_EXP_LNKCTL2,
+ tmp16);
+
+ pcie_capability_read_word(adev->pdev,
+ PCI_EXP_LNKCTL2,
+ &tmp16);
+ tmp16 &= ~(PCI_EXP_LNKCTL2_ENTER_COMP |
+ PCI_EXP_LNKCTL2_TX_MARGIN);
+ tmp16 |= (gpu_cfg2 &
+ (PCI_EXP_LNKCTL2_ENTER_COMP |
+ PCI_EXP_LNKCTL2_TX_MARGIN));
+ pcie_capability_write_word(adev->pdev,
+ PCI_EXP_LNKCTL2,
+ tmp16);
tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
tmp &= ~LC_SET_QUIESCE;
@@ -1757,15 +1792,16 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
speed_cntl &= ~LC_FORCE_DIS_SW_SPEED_CHANGE;
WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
- pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
- tmp16 &= ~0xf;
+ pcie_capability_read_word(adev->pdev, PCI_EXP_LNKCTL2, &tmp16);
+ tmp16 &= ~PCI_EXP_LNKCTL2_TLS;
+
if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
- tmp16 |= 3;
+ tmp16 |= PCI_EXP_LNKCTL2_TLS_8_0GT; /* gen3 */
else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
- tmp16 |= 2;
+ tmp16 |= PCI_EXP_LNKCTL2_TLS_5_0GT; /* gen2 */
else
- tmp16 |= 1;
- pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
+ tmp16 |= PCI_EXP_LNKCTL2_TLS_2_5GT; /* gen1 */
+ pcie_capability_write_word(adev->pdev, PCI_EXP_LNKCTL2, tmp16);
speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
speed_cntl |= LC_INITIATE_LINK_SPEED_CHANGE;
diff --git a/drivers/gpu/drm/amd/amdgpu/si_ih.c b/drivers/gpu/drm/amd/amdgpu/si_ih.c
index 57bb5f9e08b2..88ae27a5a03d 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_ih.c
@@ -64,7 +64,8 @@ static int si_ih_irq_init(struct amdgpu_device *adev)
u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
si_ih_disable_interrupts(adev);
- WREG32(INTERRUPT_CNTL2, adev->irq.ih.gpu_addr >> 8);
+ /* set dummy read address to dummy page address */
+ WREG32(INTERRUPT_CNTL2, adev->dummy_page_addr >> 8);
interrupt_cntl = RREG32(INTERRUPT_CNTL);
interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE;
interrupt_cntl &= ~IH_REQ_NONSNOOP_EN;
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 4ccfcdf8f16a..8e1640bc07af 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -58,6 +58,9 @@
#include "mmhub_v1_0.h"
#include "df_v1_7.h"
#include "df_v3_6.h"
+#include "nbio_v6_1.h"
+#include "nbio_v7_0.h"
+#include "nbio_v7_4.h"
#include "vega10_ih.h"
#include "sdma_v4_0.h"
#include "uvd_v7_0.h"
@@ -91,8 +94,8 @@ static u32 soc15_pcie_rreg(struct amdgpu_device *adev, u32 reg)
{
unsigned long flags, address, data;
u32 r;
- address = adev->nbio_funcs->get_pcie_index_offset(adev);
- data = adev->nbio_funcs->get_pcie_data_offset(adev);
+ address = adev->nbio.funcs->get_pcie_index_offset(adev);
+ data = adev->nbio.funcs->get_pcie_data_offset(adev);
spin_lock_irqsave(&adev->pcie_idx_lock, flags);
WREG32(address, reg);
@@ -106,8 +109,8 @@ static void soc15_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
{
unsigned long flags, address, data;
- address = adev->nbio_funcs->get_pcie_index_offset(adev);
- data = adev->nbio_funcs->get_pcie_data_offset(adev);
+ address = adev->nbio.funcs->get_pcie_index_offset(adev);
+ data = adev->nbio.funcs->get_pcie_data_offset(adev);
spin_lock_irqsave(&adev->pcie_idx_lock, flags);
WREG32(address, reg);
@@ -121,8 +124,8 @@ static u64 soc15_pcie_rreg64(struct amdgpu_device *adev, u32 reg)
{
unsigned long flags, address, data;
u64 r;
- address = adev->nbio_funcs->get_pcie_index_offset(adev);
- data = adev->nbio_funcs->get_pcie_data_offset(adev);
+ address = adev->nbio.funcs->get_pcie_index_offset(adev);
+ data = adev->nbio.funcs->get_pcie_data_offset(adev);
spin_lock_irqsave(&adev->pcie_idx_lock, flags);
/* read low 32 bit */
@@ -142,8 +145,8 @@ static void soc15_pcie_wreg64(struct amdgpu_device *adev, u32 reg, u64 v)
{
unsigned long flags, address, data;
- address = adev->nbio_funcs->get_pcie_index_offset(adev);
- data = adev->nbio_funcs->get_pcie_data_offset(adev);
+ address = adev->nbio.funcs->get_pcie_index_offset(adev);
+ data = adev->nbio.funcs->get_pcie_data_offset(adev);
spin_lock_irqsave(&adev->pcie_idx_lock, flags);
/* write low 32 bit */
@@ -262,7 +265,7 @@ static void soc15_se_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
static u32 soc15_get_config_memsize(struct amdgpu_device *adev)
{
- return adev->nbio_funcs->get_memsize(adev);
+ return adev->nbio.funcs->get_memsize(adev);
}
static u32 soc15_get_xclk(struct amdgpu_device *adev)
@@ -336,6 +339,7 @@ static struct soc15_allowed_register_entry soc15_allowed_read_registers[] = {
{ SOC15_REG_ENTRY(GC, 0, mmCP_CPF_BUSY_STAT)},
{ SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STALLED_STAT1)},
{ SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STATUS)},
+ { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_BUSY_STAT)},
{ SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STALLED_STAT1)},
{ SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STATUS)},
{ SOC15_REG_ENTRY(GC, 0, mmGB_ADDR_CONFIG)},
@@ -461,7 +465,7 @@ static int soc15_asic_mode1_reset(struct amdgpu_device *adev)
/* wait for asic to come out of reset */
for (i = 0; i < adev->usec_timeout; i++) {
- u32 memsize = adev->nbio_funcs->get_memsize(adev);
+ u32 memsize = adev->nbio.funcs->get_memsize(adev);
if (memsize != 0xffffffff)
break;
@@ -475,42 +479,66 @@ static int soc15_asic_mode1_reset(struct amdgpu_device *adev)
static int soc15_asic_get_baco_capability(struct amdgpu_device *adev, bool *cap)
{
- void *pp_handle = adev->powerplay.pp_handle;
- const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ if (is_support_sw_smu(adev)) {
+ struct smu_context *smu = &adev->smu;
- if (!pp_funcs || !pp_funcs->get_asic_baco_capability) {
- *cap = false;
- return -ENOENT;
- }
+ *cap = smu_baco_is_support(smu);
+ return 0;
+ } else {
+ void *pp_handle = adev->powerplay.pp_handle;
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+
+ if (!pp_funcs || !pp_funcs->get_asic_baco_capability) {
+ *cap = false;
+ return -ENOENT;
+ }
- return pp_funcs->get_asic_baco_capability(pp_handle, cap);
+ return pp_funcs->get_asic_baco_capability(pp_handle, cap);
+ }
}
static int soc15_asic_baco_reset(struct amdgpu_device *adev)
{
- void *pp_handle = adev->powerplay.pp_handle;
- const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
- if (!pp_funcs ||!pp_funcs->get_asic_baco_state ||!pp_funcs->set_asic_baco_state)
- return -ENOENT;
+ /* avoid NBIF got stuck when do RAS recovery in BACO reset */
+ if (ras && ras->supported)
+ adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
- /* enter BACO state */
- if (pp_funcs->set_asic_baco_state(pp_handle, 1))
- return -EIO;
+ dev_info(adev->dev, "GPU BACO reset\n");
- /* exit BACO state */
- if (pp_funcs->set_asic_baco_state(pp_handle, 0))
- return -EIO;
+ if (is_support_sw_smu(adev)) {
+ struct smu_context *smu = &adev->smu;
- dev_info(adev->dev, "GPU BACO reset\n");
+ if (smu_baco_reset(smu))
+ return -EIO;
+ } else {
+ void *pp_handle = adev->powerplay.pp_handle;
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
- adev->in_baco_reset = 1;
+ if (!pp_funcs ||!pp_funcs->get_asic_baco_state ||!pp_funcs->set_asic_baco_state)
+ return -ENOENT;
+
+ /* enter BACO state */
+ if (pp_funcs->set_asic_baco_state(pp_handle, 1))
+ return -EIO;
+
+ /* exit BACO state */
+ if (pp_funcs->set_asic_baco_state(pp_handle, 0))
+ return -EIO;
+ }
+
+ /* re-enable doorbell interrupt after BACO exit */
+ if (ras && ras->supported)
+ adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
return 0;
}
static int soc15_mode2_reset(struct amdgpu_device *adev)
{
+ if (is_support_sw_smu(adev))
+ return smu_mode2_reset(&adev->smu);
if (!adev->powerplay.pp_funcs ||
!adev->powerplay.pp_funcs->asic_reset_mode_2)
return -ENOENT;
@@ -525,6 +553,7 @@ soc15_asic_reset_method(struct amdgpu_device *adev)
switch (adev->asic_type) {
case CHIP_RAVEN:
+ case CHIP_RENOIR:
return AMD_RESET_METHOD_MODE2;
case CHIP_VEGA10:
case CHIP_VEGA12:
@@ -626,8 +655,8 @@ static void soc15_program_aspm(struct amdgpu_device *adev)
static void soc15_enable_doorbell_aperture(struct amdgpu_device *adev,
bool enable)
{
- adev->nbio_funcs->enable_doorbell_aperture(adev, enable);
- adev->nbio_funcs->enable_doorbell_selfring_aperture(adev, enable);
+ adev->nbio.funcs->enable_doorbell_aperture(adev, enable);
+ adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, enable);
}
static const struct amdgpu_ip_block_version vega10_common_ip_block =
@@ -641,7 +670,7 @@ static const struct amdgpu_ip_block_version vega10_common_ip_block =
static uint32_t soc15_get_rev_id(struct amdgpu_device *adev)
{
- return adev->nbio_funcs->get_rev_id(adev);
+ return adev->nbio.funcs->get_rev_id(adev);
}
int soc15_set_ip_blocks(struct amdgpu_device *adev)
@@ -667,13 +696,17 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
if (adev->asic_type == CHIP_VEGA20 || adev->asic_type == CHIP_ARCTURUS)
adev->gmc.xgmi.supported = true;
- if (adev->flags & AMD_IS_APU)
- adev->nbio_funcs = &nbio_v7_0_funcs;
- else if (adev->asic_type == CHIP_VEGA20 ||
- adev->asic_type == CHIP_ARCTURUS)
- adev->nbio_funcs = &nbio_v7_4_funcs;
- else
- adev->nbio_funcs = &nbio_v6_1_funcs;
+ if (adev->flags & AMD_IS_APU) {
+ adev->nbio.funcs = &nbio_v7_0_funcs;
+ adev->nbio.hdp_flush_reg = &nbio_v7_0_hdp_flush_reg;
+ } else if (adev->asic_type == CHIP_VEGA20 ||
+ adev->asic_type == CHIP_ARCTURUS) {
+ adev->nbio.funcs = &nbio_v7_4_funcs;
+ adev->nbio.hdp_flush_reg = &nbio_v7_4_hdp_flush_reg;
+ } else {
+ adev->nbio.funcs = &nbio_v6_1_funcs;
+ adev->nbio.hdp_flush_reg = &nbio_v6_1_hdp_flush_reg;
+ }
if (adev->asic_type == CHIP_VEGA20 || adev->asic_type == CHIP_ARCTURUS)
adev->df_funcs = &df_v3_6_funcs;
@@ -681,7 +714,7 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
adev->df_funcs = &df_v1_7_funcs;
adev->rev_id = soc15_get_rev_id(adev);
- adev->nbio_funcs->detect_hw_virt(adev);
+ adev->nbio.funcs->detect_hw_virt(adev);
if (amdgpu_sriov_vf(adev))
adev->virt.ops = &xgpu_ai_virt_ops;
@@ -750,13 +783,26 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
case CHIP_ARCTURUS:
amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
- amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
+
+ if (amdgpu_sriov_vf(adev)) {
+ if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
+ amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
+ } else {
+ amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
+ if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
+ amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
+ }
+
if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
- amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
- amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block);
+ if (!amdgpu_sriov_vf(adev))
+ amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
+
+ if (unlikely(adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT))
+ amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block);
break;
case CHIP_RENOIR:
amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
@@ -785,7 +831,7 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
static void soc15_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
{
- adev->nbio_funcs->hdp_flush(adev, ring);
+ adev->nbio.funcs->hdp_flush(adev, ring);
}
static void soc15_invalidate_hdp(struct amdgpu_device *adev,
@@ -1099,7 +1145,9 @@ static int soc15_common_early_init(void *handle)
AMD_CG_SUPPORT_SDMA_LS |
AMD_CG_SUPPORT_VCN_MGCG;
- adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN;
+ adev->pg_flags = AMD_PG_SUPPORT_SDMA |
+ AMD_PG_SUPPORT_VCN |
+ AMD_PG_SUPPORT_VCN_DPG;
} else if (adev->pdev->device == 0x15d8) {
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_MGLS |
@@ -1142,7 +1190,9 @@ static int soc15_common_early_init(void *handle)
AMD_CG_SUPPORT_SDMA_LS |
AMD_CG_SUPPORT_VCN_MGCG;
- adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN;
+ adev->pg_flags = AMD_PG_SUPPORT_SDMA |
+ AMD_PG_SUPPORT_VCN |
+ AMD_PG_SUPPORT_VCN_DPG;
}
break;
case CHIP_ARCTURUS:
@@ -1157,7 +1207,8 @@ static int soc15_common_early_init(void *handle)
AMD_CG_SUPPORT_SDMA_MGCG |
AMD_CG_SUPPORT_SDMA_LS |
AMD_CG_SUPPORT_MC_MGCG |
- AMD_CG_SUPPORT_MC_LS;
+ AMD_CG_SUPPORT_MC_LS |
+ AMD_CG_SUPPORT_IH_CG;
adev->pg_flags = 0;
adev->external_rev_id = adev->rev_id + 0x32;
break;
@@ -1203,11 +1254,15 @@ static int soc15_common_early_init(void *handle)
static int soc15_common_late_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int r = 0;
if (amdgpu_sriov_vf(adev))
xgpu_ai_mailbox_get_irq(adev);
- return 0;
+ if (adev->nbio.funcs->ras_late_init)
+ r = adev->nbio.funcs->ras_late_init(adev);
+
+ return r;
}
static int soc15_common_sw_init(void *handle)
@@ -1224,6 +1279,10 @@ static int soc15_common_sw_init(void *handle)
static int soc15_common_sw_fini(void *handle)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ amdgpu_nbio_ras_fini(adev);
+ adev->df_funcs->sw_fini(adev);
return 0;
}
@@ -1236,12 +1295,12 @@ static void soc15_doorbell_range_init(struct amdgpu_device *adev)
if (!amdgpu_sriov_vf(adev)) {
for (i = 0; i < adev->sdma.num_instances; i++) {
ring = &adev->sdma.instance[i].ring;
- adev->nbio_funcs->sdma_doorbell_range(adev, i,
+ adev->nbio.funcs->sdma_doorbell_range(adev, i,
ring->use_doorbell, ring->doorbell_index,
adev->doorbell_index.sdma_doorbell_range);
}
- adev->nbio_funcs->ih_doorbell_range(adev, adev->irq.ih.use_doorbell,
+ adev->nbio.funcs->ih_doorbell_range(adev, adev->irq.ih.use_doorbell,
adev->irq.ih.doorbell_index);
}
}
@@ -1255,13 +1314,13 @@ static int soc15_common_hw_init(void *handle)
/* enable aspm */
soc15_program_aspm(adev);
/* setup nbio registers */
- adev->nbio_funcs->init_registers(adev);
+ adev->nbio.funcs->init_registers(adev);
/* remap HDP registers to a hole in mmio space,
* for the purpose of expose those registers
* to process space
*/
- if (adev->nbio_funcs->remap_hdp_registers)
- adev->nbio_funcs->remap_hdp_registers(adev);
+ if (adev->nbio.funcs->remap_hdp_registers)
+ adev->nbio.funcs->remap_hdp_registers(adev);
/* enable the doorbell aperture */
soc15_enable_doorbell_aperture(adev, true);
@@ -1284,6 +1343,14 @@ static int soc15_common_hw_fini(void *handle)
if (amdgpu_sriov_vf(adev))
xgpu_ai_mailbox_put_irq(adev);
+ if (adev->nbio.ras_if &&
+ amdgpu_ras_is_supported(adev, adev->nbio.ras_if->block)) {
+ if (adev->nbio.funcs->init_ras_controller_interrupt)
+ amdgpu_irq_put(adev, &adev->nbio.ras_controller_irq, 0);
+ if (adev->nbio.funcs->init_ras_err_event_athub_interrupt)
+ amdgpu_irq_put(adev, &adev->nbio.ras_err_event_athub_irq, 0);
+ }
+
return 0;
}
@@ -1424,9 +1491,9 @@ static int soc15_common_set_clockgating_state(void *handle,
case CHIP_VEGA10:
case CHIP_VEGA12:
case CHIP_VEGA20:
- adev->nbio_funcs->update_medium_grain_clock_gating(adev,
+ adev->nbio.funcs->update_medium_grain_clock_gating(adev,
state == AMD_CG_STATE_GATE ? true : false);
- adev->nbio_funcs->update_medium_grain_light_sleep(adev,
+ adev->nbio.funcs->update_medium_grain_light_sleep(adev,
state == AMD_CG_STATE_GATE ? true : false);
soc15_update_hdp_light_sleep(adev,
state == AMD_CG_STATE_GATE ? true : false);
@@ -1441,9 +1508,9 @@ static int soc15_common_set_clockgating_state(void *handle,
break;
case CHIP_RAVEN:
case CHIP_RENOIR:
- adev->nbio_funcs->update_medium_grain_clock_gating(adev,
+ adev->nbio.funcs->update_medium_grain_clock_gating(adev,
state == AMD_CG_STATE_GATE ? true : false);
- adev->nbio_funcs->update_medium_grain_light_sleep(adev,
+ adev->nbio.funcs->update_medium_grain_light_sleep(adev,
state == AMD_CG_STATE_GATE ? true : false);
soc15_update_hdp_light_sleep(adev,
state == AMD_CG_STATE_GATE ? true : false);
@@ -1472,7 +1539,7 @@ static void soc15_common_get_clockgating_state(void *handle, u32 *flags)
if (amdgpu_sriov_vf(adev))
*flags = 0;
- adev->nbio_funcs->get_clockgating_state(adev, flags);
+ adev->nbio.funcs->get_clockgating_state(adev, flags);
/* AMD_CG_SUPPORT_HDP_LS */
data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS));
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.h b/drivers/gpu/drm/amd/amdgpu/soc15.h
index a3dde0c31f57..57af489a5de3 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.h
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.h
@@ -28,8 +28,8 @@
#include "nbio_v7_0.h"
#include "nbio_v7_4.h"
-#define SOC15_FLUSH_GPU_TLB_NUM_WREG 4
-#define SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT 1
+#define SOC15_FLUSH_GPU_TLB_NUM_WREG 6
+#define SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT 3
extern const struct amd_ip_funcs soc15_common_ip_funcs;
@@ -67,6 +67,8 @@ struct soc15_allowed_register_entry {
#define SOC15_REG_GOLDEN_VALUE(ip, inst, reg, and_mask, or_mask) \
{ ip##_HWIP, inst, reg##_BASE_IDX, reg, and_mask, or_mask }
+#define SOC15_REG_FIELD(reg, field) reg##__##field##_MASK, reg##__##field##__SHIFT
+
void soc15_grbm_select(struct amdgpu_device *adev,
u32 me, u32 pipe, u32 queue, u32 vmid);
int soc15_set_ip_blocks(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/umc_v6_0.c
new file mode 100644
index 000000000000..0d6b50528d76
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_0.c
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "umc_v6_0.h"
+#include "amdgpu.h"
+
+static void umc_v6_0_init_registers(struct amdgpu_device *adev)
+{
+ unsigned i,j;
+
+ for (i = 0; i < 4; i++)
+ for (j = 0; j < 4; j++)
+ WREG32((i*0x100000 + 0x5010c + j*0x2000)/4, 0x1002);
+}
+
+const struct amdgpu_umc_funcs umc_v6_0_funcs = {
+ .init_registers = umc_v6_0_init_registers,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_0.h b/drivers/gpu/drm/amd/amdgpu/umc_v6_0.h
new file mode 100644
index 000000000000..109f1a57a46e
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_0.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __UMC_V6_0_H__
+#define __UMC_V6_0_H__
+
+#include "soc15_common.h"
+#include "amdgpu.h"
+
+extern const struct amdgpu_umc_funcs umc_v6_0_funcs;
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c
index 8502e736f721..47c4b96b14d1 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c
@@ -75,6 +75,17 @@ static void umc_v6_1_disable_umc_index_mode(struct amdgpu_device *adev)
RSMU_UMC_INDEX_MODE_EN, 0);
}
+static uint32_t umc_v6_1_get_umc_inst(struct amdgpu_device *adev)
+{
+ uint32_t rsmu_umc_index;
+
+ rsmu_umc_index = RREG32_SOC15(RSMU, 0,
+ mmRSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU);
+ return REG_GET_FIELD(rsmu_umc_index,
+ RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU,
+ RSMU_UMC_INDEX_INSTANCE);
+}
+
static void umc_v6_1_query_correctable_error_count(struct amdgpu_device *adev,
uint32_t umc_reg_offset,
unsigned long *error_count)
@@ -165,7 +176,8 @@ static void umc_v6_1_query_error_address(struct amdgpu_device *adev,
uint32_t umc_reg_offset, uint32_t channel_index)
{
uint32_t lsb, mc_umc_status_addr;
- uint64_t mc_umc_status, err_addr;
+ uint64_t mc_umc_status, err_addr, retired_page;
+ struct eeprom_table_record *err_rec;
mc_umc_status_addr =
SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0);
@@ -177,6 +189,7 @@ static void umc_v6_1_query_error_address(struct amdgpu_device *adev,
return;
}
+ err_rec = &err_data->err_addr[err_data->err_addr_cnt];
mc_umc_status = RREG64_UMC(mc_umc_status_addr + umc_reg_offset);
/* calculate error address if ue/ce error is detected */
@@ -191,12 +204,24 @@ static void umc_v6_1_query_error_address(struct amdgpu_device *adev,
err_addr &= ~((0x1ULL << lsb) - 1);
/* translate umc channel address to soc pa, 3 parts are included */
- err_data->err_addr[err_data->err_addr_cnt] =
- ADDR_OF_8KB_BLOCK(err_addr) |
- ADDR_OF_256B_BLOCK(channel_index) |
- OFFSET_IN_256B_BLOCK(err_addr);
-
- err_data->err_addr_cnt++;
+ retired_page = ADDR_OF_8KB_BLOCK(err_addr) |
+ ADDR_OF_256B_BLOCK(channel_index) |
+ OFFSET_IN_256B_BLOCK(err_addr);
+
+ /* we only save ue error information currently, ce is skipped */
+ if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC)
+ == 1) {
+ err_rec->address = err_addr;
+ /* page frame address is saved */
+ err_rec->retired_page = retired_page >> AMDGPU_GPU_PAGE_SHIFT;
+ err_rec->ts = (uint64_t)ktime_get_real_seconds();
+ err_rec->err_type = AMDGPU_RAS_EEPROM_ERR_NON_RECOVERABLE;
+ err_rec->cu = 0;
+ err_rec->mem_channel = channel_index;
+ err_rec->mcumc_id = umc_v6_1_get_umc_inst(adev);
+
+ err_data->err_addr_cnt++;
+ }
}
/* clear umc status */
@@ -209,7 +234,7 @@ static void umc_v6_1_query_ras_error_address(struct amdgpu_device *adev,
amdgpu_umc_for_each_channel(umc_v6_1_query_error_address);
}
-static void umc_v6_1_ras_init_per_channel(struct amdgpu_device *adev,
+static void umc_v6_1_err_cnt_init_per_channel(struct amdgpu_device *adev,
struct ras_err_data *err_data,
uint32_t umc_reg_offset, uint32_t channel_index)
{
@@ -239,15 +264,16 @@ static void umc_v6_1_ras_init_per_channel(struct amdgpu_device *adev,
WREG32(ecc_err_cnt_addr + umc_reg_offset, UMC_V6_1_CE_CNT_INIT);
}
-static void umc_v6_1_ras_init(struct amdgpu_device *adev)
+static void umc_v6_1_err_cnt_init(struct amdgpu_device *adev)
{
void *ras_error_status = NULL;
- amdgpu_umc_for_each_channel(umc_v6_1_ras_init_per_channel);
+ amdgpu_umc_for_each_channel(umc_v6_1_err_cnt_init_per_channel);
}
const struct amdgpu_umc_funcs umc_v6_1_funcs = {
- .ras_init = umc_v6_1_ras_init,
+ .err_cnt_init = umc_v6_1_err_cnt_init,
+ .ras_late_init = amdgpu_umc_ras_late_init,
.query_ras_error_count = umc_v6_1_query_ras_error_count,
.query_ras_error_address = umc_v6_1_query_ras_error_address,
.enable_umc_index_mode = umc_v6_1_enable_umc_index_mode,
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index 93b3500e522b..b4f84a820a44 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -202,7 +202,6 @@ static int vcn_v1_0_hw_init(void *handle)
for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
ring = &adev->vcn.inst->ring_enc[i];
- ring->sched.ready = true;
r = amdgpu_ring_test_helper(ring);
if (r)
goto done;
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
index 36ad0c0e8efb..38f787a560cb 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
@@ -244,33 +244,24 @@ static int vcn_v2_0_hw_init(void *handle)
struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
int i, r;
- adev->nbio_funcs->vcn_doorbell_range(adev, ring->use_doorbell,
+ adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
ring->doorbell_index, 0);
- ring->sched.ready = true;
- r = amdgpu_ring_test_ring(ring);
- if (r) {
- ring->sched.ready = false;
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
goto done;
- }
for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
ring = &adev->vcn.inst->ring_enc[i];
- ring->sched.ready = true;
- r = amdgpu_ring_test_ring(ring);
- if (r) {
- ring->sched.ready = false;
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
goto done;
- }
}
ring = &adev->vcn.inst->ring_jpeg;
- ring->sched.ready = true;
- r = amdgpu_ring_test_ring(ring);
- if (r) {
- ring->sched.ready = false;
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
goto done;
- }
done:
if (!r)
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
index 395c2259f979..93edf9193a7b 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
@@ -25,6 +25,7 @@
#include "amdgpu.h"
#include "amdgpu_vcn.h"
+#include "amdgpu_pm.h"
#include "soc15.h"
#include "soc15d.h"
#include "vcn_v2_0.h"
@@ -255,32 +256,24 @@ static int vcn_v2_5_hw_init(void *handle)
continue;
ring = &adev->vcn.inst[j].ring_dec;
- adev->nbio_funcs->vcn_doorbell_range(adev, ring->use_doorbell,
+ adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
ring->doorbell_index, j);
- r = amdgpu_ring_test_ring(ring);
- if (r) {
- ring->sched.ready = false;
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
goto done;
- }
for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
ring = &adev->vcn.inst[j].ring_enc[i];
- ring->sched.ready = false;
- continue;
- r = amdgpu_ring_test_ring(ring);
- if (r) {
- ring->sched.ready = false;
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
goto done;
- }
}
ring = &adev->vcn.inst[j].ring_jpeg;
- r = amdgpu_ring_test_ring(ring);
- if (r) {
- ring->sched.ready = false;
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
goto done;
- }
}
done:
if (!r)
@@ -300,7 +293,7 @@ static int vcn_v2_5_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct amdgpu_ring *ring;
- int i;
+ int i, j;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
if (adev->vcn.harvest_config & (1 << i))
@@ -312,8 +305,8 @@ static int vcn_v2_5_hw_fini(void *handle)
ring->sched.ready = false;
- for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
- ring = &adev->vcn.inst[i].ring_enc[i];
+ for (j = 0; j < adev->vcn.num_enc_rings; ++j) {
+ ring = &adev->vcn.inst[i].ring_enc[j];
ring->sched.ready = false;
}
@@ -423,7 +416,6 @@ static void vcn_v2_5_mc_resume(struct amdgpu_device *adev)
* vcn_v2_5_disable_clock_gating - disable VCN clock gating
*
* @adev: amdgpu_device pointer
- * @sw: enable SW clock gating
*
* Disable clock gating for VCN block
*/
@@ -542,7 +534,6 @@ static void vcn_v2_5_disable_clock_gating(struct amdgpu_device *adev)
* vcn_v2_5_enable_clock_gating - enable VCN clock gating
*
* @adev: amdgpu_device pointer
- * @sw: enable SW clock gating
*
* Enable clock gating for VCN block
*/
@@ -716,6 +707,9 @@ static int vcn_v2_5_start(struct amdgpu_device *adev)
uint32_t rb_bufsz, tmp;
int i, j, k, r;
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_uvd(adev, true);
+
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
if (adev->vcn.harvest_config & (1 << i))
continue;
@@ -946,6 +940,9 @@ static int vcn_v2_5_stop(struct amdgpu_device *adev)
~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
}
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_uvd(adev, false);
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
index 9eae3536ddad..5cb7e231de5f 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
@@ -226,7 +226,7 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev)
/* disable irqs */
vega10_ih_disable_interrupts(adev);
- adev->nbio_funcs->ih_control(adev);
+ adev->nbio.funcs->ih_control(adev);
ih = &adev->irq.ih;
/* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/
@@ -675,10 +675,49 @@ static int vega10_ih_soft_reset(void *handle)
return 0;
}
+static void vega10_ih_update_clockgating_state(struct amdgpu_device *adev,
+ bool enable)
+{
+ uint32_t data, def, field_val;
+
+ if (adev->cg_flags & AMD_CG_SUPPORT_IH_CG) {
+ def = data = RREG32_SOC15(OSSSYS, 0, mmIH_CLK_CTRL);
+ field_val = enable ? 0 : 1;
+ /**
+ * Vega10 does not have IH_RETRY_INT_CAM_MEM_CLK_SOFT_OVERRIDE
+ * and IH_BUFFER_MEM_CLK_SOFT_OVERRIDE field.
+ */
+ if (adev->asic_type > CHIP_VEGA10) {
+ data = REG_SET_FIELD(data, IH_CLK_CTRL,
+ IH_RETRY_INT_CAM_MEM_CLK_SOFT_OVERRIDE, field_val);
+ data = REG_SET_FIELD(data, IH_CLK_CTRL,
+ IH_BUFFER_MEM_CLK_SOFT_OVERRIDE, field_val);
+ }
+
+ data = REG_SET_FIELD(data, IH_CLK_CTRL,
+ DBUS_MUX_CLK_SOFT_OVERRIDE, field_val);
+ data = REG_SET_FIELD(data, IH_CLK_CTRL,
+ OSSSYS_SHARE_CLK_SOFT_OVERRIDE, field_val);
+ data = REG_SET_FIELD(data, IH_CLK_CTRL,
+ LIMIT_SMN_CLK_SOFT_OVERRIDE, field_val);
+ data = REG_SET_FIELD(data, IH_CLK_CTRL,
+ DYN_CLK_SOFT_OVERRIDE, field_val);
+ data = REG_SET_FIELD(data, IH_CLK_CTRL,
+ REG_CLK_SOFT_OVERRIDE, field_val);
+ if (def != data)
+ WREG32_SOC15(OSSSYS, 0, mmIH_CLK_CTRL, data);
+ }
+}
+
static int vega10_ih_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ vega10_ih_update_clockgating_state(adev,
+ state == AMD_CG_STATE_GATE ? true : false);
return 0;
+
}
static int vega10_ih_set_powergating_state(void *handle,
diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c b/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c
index bd0580334f83..6b52a539d51b 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c
@@ -24,7 +24,6 @@
#include "soc15.h"
#include "soc15_common.h"
-#include "soc15_hw_ip.h"
#include "vega10_ip_offset.h"
int vega10_reg_base_init(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c b/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c
index 587e33f5dcce..556f854e3551 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c
@@ -24,7 +24,6 @@
#include "soc15.h"
#include "soc15_common.h"
-#include "soc15_hw_ip.h"
#include "vega20_ip_offset.h"
int vega20_reg_base_init(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 5f8c8786cac5..78e5cdc0c058 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -689,16 +689,50 @@ static int vi_gpu_pci_config_reset(struct amdgpu_device *adev)
return -EINVAL;
}
+int smu7_asic_get_baco_capability(struct amdgpu_device *adev, bool *cap)
+{
+ void *pp_handle = adev->powerplay.pp_handle;
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+
+ if (!pp_funcs || !pp_funcs->get_asic_baco_capability) {
+ *cap = false;
+ return -ENOENT;
+ }
+
+ return pp_funcs->get_asic_baco_capability(pp_handle, cap);
+}
+
+int smu7_asic_baco_reset(struct amdgpu_device *adev)
+{
+ void *pp_handle = adev->powerplay.pp_handle;
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+
+ if (!pp_funcs ||!pp_funcs->get_asic_baco_state ||!pp_funcs->set_asic_baco_state)
+ return -ENOENT;
+
+ /* enter BACO state */
+ if (pp_funcs->set_asic_baco_state(pp_handle, 1))
+ return -EIO;
+
+ /* exit BACO state */
+ if (pp_funcs->set_asic_baco_state(pp_handle, 0))
+ return -EIO;
+
+ dev_info(adev->dev, "GPU BACO reset\n");
+
+ return 0;
+}
+
/**
- * vi_asic_reset - soft reset GPU
+ * vi_asic_pci_config_reset - soft reset GPU
*
* @adev: amdgpu_device pointer
*
- * Look up which blocks are hung and attempt
- * to reset them.
+ * Use PCI Config method to reset the GPU.
+ *
* Returns 0 for success.
*/
-static int vi_asic_reset(struct amdgpu_device *adev)
+static int vi_asic_pci_config_reset(struct amdgpu_device *adev)
{
int r;
@@ -714,7 +748,47 @@ static int vi_asic_reset(struct amdgpu_device *adev)
static enum amd_reset_method
vi_asic_reset_method(struct amdgpu_device *adev)
{
- return AMD_RESET_METHOD_LEGACY;
+ bool baco_reset;
+
+ switch (adev->asic_type) {
+ case CHIP_FIJI:
+ case CHIP_TONGA:
+ case CHIP_POLARIS10:
+ case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
+ case CHIP_TOPAZ:
+ smu7_asic_get_baco_capability(adev, &baco_reset);
+ break;
+ default:
+ baco_reset = false;
+ break;
+ }
+
+ if (baco_reset)
+ return AMD_RESET_METHOD_BACO;
+ else
+ return AMD_RESET_METHOD_LEGACY;
+}
+
+/**
+ * vi_asic_reset - soft reset GPU
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Look up which blocks are hung and attempt
+ * to reset them.
+ * Returns 0 for success.
+ */
+static int vi_asic_reset(struct amdgpu_device *adev)
+{
+ int r;
+
+ if (vi_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)
+ r = smu7_asic_baco_reset(adev);
+ else
+ r = vi_asic_pci_config_reset(adev);
+
+ return r;
}
static u32 vi_get_config_memsize(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.h b/drivers/gpu/drm/amd/amdgpu/vi.h
index 8de0772f986c..40d4174913a4 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.h
+++ b/drivers/gpu/drm/amd/amdgpu/vi.h
@@ -31,4 +31,7 @@ void vi_srbm_select(struct amdgpu_device *adev,
int vi_set_ip_blocks(struct amdgpu_device *adev);
void legacy_doorbell_index_init(struct amdgpu_device *adev);
+int smu7_asic_get_baco_capability(struct amdgpu_device *adev, bool *cap);
+int smu7_asic_baco_reset(struct amdgpu_device *adev);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c b/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c
index 177d1e5329a5..9f59ba93cfe0 100644
--- a/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c
+++ b/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c
@@ -33,7 +33,9 @@ static bool cik_event_interrupt_isr(struct kfd_dev *dev,
const struct cik_ih_ring_entry *ihre =
(const struct cik_ih_ring_entry *)ih_ring_entry;
const struct kfd2kgd_calls *f2g = dev->kfd2kgd;
- unsigned int vmid, pasid;
+ unsigned int vmid;
+ uint16_t pasid;
+ bool ret;
/* This workaround is due to HW/FW limitation on Hawaii that
* VMID and PASID are not written into ih_ring_entry
@@ -48,13 +50,13 @@ static bool cik_event_interrupt_isr(struct kfd_dev *dev,
*tmp_ihre = *ihre;
vmid = f2g->read_vmid_from_vmfault_reg(dev->kgd);
- pasid = f2g->get_atc_vmid_pasid_mapping_pasid(dev->kgd, vmid);
+ ret = f2g->get_atc_vmid_pasid_mapping_info(dev->kgd, vmid, &pasid);
tmp_ihre->ring_id &= 0x000000ff;
tmp_ihre->ring_id |= vmid << 8;
tmp_ihre->ring_id |= pasid << 16;
- return (pasid != 0) &&
+ return ret && (pasid != 0) &&
vmid >= dev->vm_info.first_vmid_kfd &&
vmid <= dev->vm_info.last_vmid_kfd;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
index 901fe3590165..d3400da6ab64 100644
--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
+++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
@@ -905,7 +905,7 @@ static const uint32_t cwsr_trap_gfx10_hex[] = {
0x7a5d0000, 0x807c817c,
0x807aff7a, 0x00000080,
0xbf0a717c, 0xbf85fff8,
- 0xbf820141, 0xbef4037e,
+ 0xbf820142, 0xbef4037e,
0x8775ff7f, 0x0000ffff,
0x8875ff75, 0x00040000,
0xbef60380, 0xbef703ff,
@@ -967,7 +967,7 @@ static const uint32_t cwsr_trap_gfx10_hex[] = {
0x725d0000, 0xe0304080,
0x725d0100, 0xe0304100,
0x725d0200, 0xe0304180,
- 0x725d0300, 0xbf820031,
+ 0x725d0300, 0xbf820032,
0xbef603ff, 0x01000000,
0xbef20378, 0x8078ff78,
0x00000400, 0xbefc0384,
@@ -992,83 +992,84 @@ static const uint32_t cwsr_trap_gfx10_hex[] = {
0x725d0000, 0xe0304100,
0x725d0100, 0xe0304200,
0x725d0200, 0xe0304300,
- 0x725d0300, 0xb9782a05,
- 0x80788178, 0x907c9973,
- 0x877c817c, 0xbf06817c,
- 0xbf850002, 0x8f788978,
- 0xbf820001, 0x8f788a78,
- 0xb9721e06, 0x8f728a72,
- 0x80787278, 0x8078ff78,
- 0x00000200, 0x80f8ff78,
- 0x00000050, 0xbef603ff,
- 0x01000000, 0xbefc03ff,
- 0x0000006c, 0x80f89078,
- 0xf429003a, 0xf0000000,
- 0xbf8cc07f, 0x80fc847c,
- 0xbf800000, 0xbe803100,
- 0xbe823102, 0x80f8a078,
- 0xf42d003a, 0xf0000000,
- 0xbf8cc07f, 0x80fc887c,
- 0xbf800000, 0xbe803100,
- 0xbe823102, 0xbe843104,
- 0xbe863106, 0x80f8c078,
- 0xf431003a, 0xf0000000,
- 0xbf8cc07f, 0x80fc907c,
- 0xbf800000, 0xbe803100,
- 0xbe823102, 0xbe843104,
- 0xbe863106, 0xbe883108,
- 0xbe8a310a, 0xbe8c310c,
- 0xbe8e310e, 0xbf06807c,
- 0xbf84fff0, 0xb9782a05,
- 0x80788178, 0x907c9973,
- 0x877c817c, 0xbf06817c,
- 0xbf850002, 0x8f788978,
- 0xbf820001, 0x8f788a78,
- 0xb9721e06, 0x8f728a72,
- 0x80787278, 0x8078ff78,
- 0x00000200, 0xbef603ff,
- 0x01000000, 0xf4211bfa,
+ 0x725d0300, 0xbf8c3f70,
+ 0xb9782a05, 0x80788178,
+ 0x907c9973, 0x877c817c,
+ 0xbf06817c, 0xbf850002,
+ 0x8f788978, 0xbf820001,
+ 0x8f788a78, 0xb9721e06,
+ 0x8f728a72, 0x80787278,
+ 0x8078ff78, 0x00000200,
+ 0x80f8ff78, 0x00000050,
+ 0xbef603ff, 0x01000000,
+ 0xbefc03ff, 0x0000006c,
+ 0x80f89078, 0xf429003a,
+ 0xf0000000, 0xbf8cc07f,
+ 0x80fc847c, 0xbf800000,
+ 0xbe803100, 0xbe823102,
+ 0x80f8a078, 0xf42d003a,
+ 0xf0000000, 0xbf8cc07f,
+ 0x80fc887c, 0xbf800000,
+ 0xbe803100, 0xbe823102,
+ 0xbe843104, 0xbe863106,
+ 0x80f8c078, 0xf431003a,
+ 0xf0000000, 0xbf8cc07f,
+ 0x80fc907c, 0xbf800000,
+ 0xbe803100, 0xbe823102,
+ 0xbe843104, 0xbe863106,
+ 0xbe883108, 0xbe8a310a,
+ 0xbe8c310c, 0xbe8e310e,
+ 0xbf06807c, 0xbf84fff0,
+ 0xb9782a05, 0x80788178,
+ 0x907c9973, 0x877c817c,
+ 0xbf06817c, 0xbf850002,
+ 0x8f788978, 0xbf820001,
+ 0x8f788a78, 0xb9721e06,
+ 0x8f728a72, 0x80787278,
+ 0x8078ff78, 0x00000200,
+ 0xbef603ff, 0x01000000,
+ 0xf4211bfa, 0xf0000000,
+ 0x80788478, 0xf4211b3a,
0xf0000000, 0x80788478,
- 0xf4211b3a, 0xf0000000,
- 0x80788478, 0xf4211b7a,
+ 0xf4211b7a, 0xf0000000,
+ 0x80788478, 0xf4211eba,
0xf0000000, 0x80788478,
- 0xf4211eba, 0xf0000000,
- 0x80788478, 0xf4211efa,
+ 0xf4211efa, 0xf0000000,
+ 0x80788478, 0xf4211c3a,
0xf0000000, 0x80788478,
- 0xf4211c3a, 0xf0000000,
- 0x80788478, 0xf4211c7a,
+ 0xf4211c7a, 0xf0000000,
+ 0x80788478, 0xf4211e7a,
0xf0000000, 0x80788478,
- 0xf4211e7a, 0xf0000000,
- 0x80788478, 0xf4211cfa,
+ 0xf4211cfa, 0xf0000000,
+ 0x80788478, 0xf4211bba,
0xf0000000, 0x80788478,
+ 0xbf8cc07f, 0xb9eef814,
0xf4211bba, 0xf0000000,
0x80788478, 0xbf8cc07f,
- 0xb9eef814, 0xf4211bba,
- 0xf0000000, 0x80788478,
- 0xbf8cc07f, 0xb9eef815,
- 0xbef2036d, 0x876dff72,
- 0x0000ffff, 0xbefc036f,
- 0xbefe037a, 0xbeff037b,
- 0x876f71ff, 0x000003ff,
- 0xb9ef4803, 0xb9f9f816,
- 0x876f71ff, 0xfffff800,
- 0x906f8b6f, 0xb9efa2c3,
- 0xb9f3f801, 0x876fff72,
- 0xfc000000, 0x906f9a6f,
- 0x8f6f906f, 0xbef30380,
+ 0xb9eef815, 0xbef2036d,
+ 0x876dff72, 0x0000ffff,
+ 0xbefc036f, 0xbefe037a,
+ 0xbeff037b, 0x876f71ff,
+ 0x000003ff, 0xb9ef4803,
+ 0xb9f9f816, 0x876f71ff,
+ 0xfffff800, 0x906f8b6f,
+ 0xb9efa2c3, 0xb9f3f801,
+ 0x876fff72, 0xfc000000,
+ 0x906f9a6f, 0x8f6f906f,
+ 0xbef30380, 0x88736f73,
+ 0x876fff72, 0x02000000,
+ 0x906f996f, 0x8f6f8f6f,
0x88736f73, 0x876fff72,
- 0x02000000, 0x906f996f,
- 0x8f6f8f6f, 0x88736f73,
- 0x876fff72, 0x01000000,
- 0x906f986f, 0x8f6f996f,
- 0x88736f73, 0x876fff70,
- 0x00800000, 0x906f976f,
- 0xb9f3f807, 0x87fe7e7e,
- 0x87ea6a6a, 0xb9f0f802,
- 0xbf8a0000, 0xbe80226c,
- 0xbf810000, 0xbf9f0000,
+ 0x01000000, 0x906f986f,
+ 0x8f6f996f, 0x88736f73,
+ 0x876fff70, 0x00800000,
+ 0x906f976f, 0xb9f3f807,
+ 0x87fe7e7e, 0x87ea6a6a,
+ 0xb9f0f802, 0xbf8a0000,
+ 0xbe80226c, 0xbf810000,
0xbf9f0000, 0xbf9f0000,
0xbf9f0000, 0xbf9f0000,
+ 0xbf9f0000, 0x00000000,
};
static const uint32_t cwsr_trap_arcturus_hex[] = {
0xbf820001, 0xbf8202c4,
diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm
index cdaa523ce6be..4433bda2ce25 100644
--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm
+++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm
@@ -758,6 +758,7 @@ L_RESTORE_V0:
buffer_load_dword v1, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1 offset:256
buffer_load_dword v2, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1 offset:256*2
buffer_load_dword v3, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1 offset:256*3
+ s_waitcnt vmcnt(0)
/* restore SGPRs */
//will be 2+8+16*6
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index 1d3cd5c50d5f..1544007af34a 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -49,7 +49,7 @@ static const char kfd_dev_name[] = "kfd";
static const struct file_operations kfd_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = kfd_ioctl,
- .compat_ioctl = kfd_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.open = kfd_open,
.mmap = kfd_mmap,
};
@@ -282,7 +282,7 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
goto err_bind_process;
}
- pr_debug("Creating queue for PASID %d on gpu 0x%x\n",
+ pr_debug("Creating queue for PASID 0x%x on gpu 0x%x\n",
p->pasid,
dev->id);
@@ -332,7 +332,7 @@ static int kfd_ioctl_destroy_queue(struct file *filp, struct kfd_process *p,
int retval;
struct kfd_ioctl_destroy_queue_args *args = data;
- pr_debug("Destroying queue id %d for pasid %d\n",
+ pr_debug("Destroying queue id %d for pasid 0x%x\n",
args->queue_id,
p->pasid);
@@ -378,7 +378,7 @@ static int kfd_ioctl_update_queue(struct file *filp, struct kfd_process *p,
properties.queue_percent = args->queue_percentage;
properties.priority = args->queue_priority;
- pr_debug("Updating queue id %d for pasid %d\n",
+ pr_debug("Updating queue id %d for pasid 0x%x\n",
args->queue_id, p->pasid);
mutex_lock(&p->mutex);
@@ -855,7 +855,7 @@ static int kfd_ioctl_get_process_apertures(struct file *filp,
struct kfd_process_device_apertures *pAperture;
struct kfd_process_device *pdd;
- dev_dbg(kfd_device, "get apertures for PASID %d", p->pasid);
+ dev_dbg(kfd_device, "get apertures for PASID 0x%x", p->pasid);
args->num_of_nodes = 0;
@@ -913,7 +913,7 @@ static int kfd_ioctl_get_process_apertures_new(struct file *filp,
uint32_t nodes = 0;
int ret;
- dev_dbg(kfd_device, "get apertures for PASID %d", p->pasid);
+ dev_dbg(kfd_device, "get apertures for PASID 0x%x", p->pasid);
if (args->num_of_nodes == 0) {
/* Return number of nodes, so that user space can alloacate
@@ -1128,7 +1128,7 @@ static int kfd_ioctl_set_scratch_backing_va(struct file *filep,
mutex_unlock(&p->mutex);
if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS &&
- pdd->qpd.vmid != 0)
+ pdd->qpd.vmid != 0 && dev->kfd2kgd->set_scratch_backing_va)
dev->kfd2kgd->set_scratch_backing_va(
dev->kgd, args->va_addr, pdd->qpd.vmid);
@@ -1801,7 +1801,7 @@ static long kfd_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
} else
goto err_i1;
- dev_dbg(kfd_device, "ioctl cmd 0x%x (#%d), arg 0x%lx\n", cmd, nr, arg);
+ dev_dbg(kfd_device, "ioctl cmd 0x%x (#0x%x), arg 0x%lx\n", cmd, nr, arg);
process = kfd_get_process(current);
if (IS_ERR(process)) {
@@ -1856,7 +1856,8 @@ err_i1:
kfree(kdata);
if (retcode)
- dev_dbg(kfd_device, "ret = %d\n", retcode);
+ dev_dbg(kfd_device, "ioctl cmd (#0x%x), arg 0x%lx, ret = %d\n",
+ nr, arg, retcode);
return retcode;
}
@@ -1877,7 +1878,7 @@ static int kfd_mmio_mmap(struct kfd_dev *dev, struct kfd_process *process,
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- pr_debug("Process %d mapping mmio page\n"
+ pr_debug("pasid 0x%x mapping mmio page\n"
" target user address == 0x%08llX\n"
" physical address == 0x%08llX\n"
" vm_flags == 0x%04lX\n"
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
index 66387caf966e..de9f68d5c312 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
@@ -138,6 +138,7 @@ static struct kfd_gpu_cache_info carrizo_cache_info[] = {
/* TODO - check & update Vega10 cache details */
#define vega10_cache_info carrizo_cache_info
#define raven_cache_info carrizo_cache_info
+#define renoir_cache_info carrizo_cache_info
/* TODO - check & update Navi10 cache details */
#define navi10_cache_info carrizo_cache_info
@@ -670,7 +671,13 @@ static int kfd_fill_gpu_cache_info(struct kfd_dev *kdev,
pcache_info = raven_cache_info;
num_of_cache_types = ARRAY_SIZE(raven_cache_info);
break;
+ case CHIP_RENOIR:
+ pcache_info = renoir_cache_info;
+ num_of_cache_types = ARRAY_SIZE(renoir_cache_info);
+ break;
case CHIP_NAVI10:
+ case CHIP_NAVI12:
+ case CHIP_NAVI14:
pcache_info = navi10_cache_info;
num_of_cache_types = ARRAY_SIZE(navi10_cache_info);
break;
@@ -703,7 +710,7 @@ static int kfd_fill_gpu_cache_info(struct kfd_dev *kdev,
pcache_info,
cu_info,
mem_available,
- cu_info->cu_bitmap[i][j],
+ cu_info->cu_bitmap[i % 4][j + i / 4],
ct,
cu_processor_id,
k);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
index a3441b0e385b..d59f2cd056c6 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
@@ -761,6 +761,7 @@ int dbgdev_wave_reset_wavefronts(struct kfd_dev *dev, struct kfd_process *p)
{
int status = 0;
unsigned int vmid;
+ uint16_t queried_pasid;
union SQ_CMD_BITS reg_sq_cmd;
union GRBM_GFX_INDEX_BITS reg_gfx_index;
struct kfd_process_device *pdd;
@@ -782,19 +783,18 @@ int dbgdev_wave_reset_wavefronts(struct kfd_dev *dev, struct kfd_process *p)
*/
for (vmid = first_vmid_to_scan; vmid <= last_vmid_to_scan; vmid++) {
- if (dev->kfd2kgd->get_atc_vmid_pasid_mapping_valid
- (dev->kgd, vmid)) {
- if (dev->kfd2kgd->get_atc_vmid_pasid_mapping_pasid
- (dev->kgd, vmid) == p->pasid) {
- pr_debug("Killing wave fronts of vmid %d and pasid %d\n",
- vmid, p->pasid);
- break;
- }
+ status = dev->kfd2kgd->get_atc_vmid_pasid_mapping_info
+ (dev->kgd, vmid, &queried_pasid);
+
+ if (status && queried_pasid == p->pasid) {
+ pr_debug("Killing wave fronts of vmid %d and pasid 0x%x\n",
+ vmid, p->pasid);
+ break;
}
}
if (vmid > last_vmid_to_scan) {
- pr_err("Didn't find vmid for pasid %d\n", p->pasid);
+ pr_err("Didn't find vmid for pasid 0x%x\n", p->pasid);
return -EFAULT;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c b/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c
index 9d4af961c5d1..9bfa50633654 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c
@@ -96,7 +96,7 @@ bool kfd_dbgmgr_create(struct kfd_dbgmgr **ppmgr, struct kfd_dev *pdev)
long kfd_dbgmgr_register(struct kfd_dbgmgr *pmgr, struct kfd_process *p)
{
if (pmgr->pasid != 0) {
- pr_debug("H/W debugger is already active using pasid %d\n",
+ pr_debug("H/W debugger is already active using pasid 0x%x\n",
pmgr->pasid);
return -EBUSY;
}
@@ -117,7 +117,7 @@ long kfd_dbgmgr_unregister(struct kfd_dbgmgr *pmgr, struct kfd_process *p)
{
/* Is the requests coming from the already registered process? */
if (pmgr->pasid != p->pasid) {
- pr_debug("H/W debugger is not registered by calling pasid %d\n",
+ pr_debug("H/W debugger is not registered by calling pasid 0x%x\n",
p->pasid);
return -EINVAL;
}
@@ -134,7 +134,7 @@ long kfd_dbgmgr_wave_control(struct kfd_dbgmgr *pmgr,
{
/* Is the requests coming from the already registered process? */
if (pmgr->pasid != wac_info->process->pasid) {
- pr_debug("H/W debugger support was not registered for requester pasid %d\n",
+ pr_debug("H/W debugger support was not registered for requester pasid 0x%x\n",
wac_info->process->pasid);
return -EINVAL;
}
@@ -147,7 +147,7 @@ long kfd_dbgmgr_address_watch(struct kfd_dbgmgr *pmgr,
{
/* Is the requests coming from the already registered process? */
if (pmgr->pasid != adw_info->process->pasid) {
- pr_debug("H/W debugger support was not registered for requester pasid %d\n",
+ pr_debug("H/W debugger support was not registered for requester pasid 0x%x\n",
adw_info->process->pasid);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 0dc1084b5e82..4fa8834ce7cb 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -39,6 +39,41 @@
*/
static atomic_t kfd_locked = ATOMIC_INIT(0);
+#ifdef CONFIG_DRM_AMDGPU_CIK
+extern const struct kfd2kgd_calls gfx_v7_kfd2kgd;
+#endif
+extern const struct kfd2kgd_calls gfx_v8_kfd2kgd;
+extern const struct kfd2kgd_calls gfx_v9_kfd2kgd;
+extern const struct kfd2kgd_calls arcturus_kfd2kgd;
+extern const struct kfd2kgd_calls gfx_v10_kfd2kgd;
+
+static const struct kfd2kgd_calls *kfd2kgd_funcs[] = {
+#ifdef KFD_SUPPORT_IOMMU_V2
+#ifdef CONFIG_DRM_AMDGPU_CIK
+ [CHIP_KAVERI] = &gfx_v7_kfd2kgd,
+#endif
+ [CHIP_CARRIZO] = &gfx_v8_kfd2kgd,
+ [CHIP_RAVEN] = &gfx_v9_kfd2kgd,
+#endif
+#ifdef CONFIG_DRM_AMDGPU_CIK
+ [CHIP_HAWAII] = &gfx_v7_kfd2kgd,
+#endif
+ [CHIP_TONGA] = &gfx_v8_kfd2kgd,
+ [CHIP_FIJI] = &gfx_v8_kfd2kgd,
+ [CHIP_POLARIS10] = &gfx_v8_kfd2kgd,
+ [CHIP_POLARIS11] = &gfx_v8_kfd2kgd,
+ [CHIP_POLARIS12] = &gfx_v8_kfd2kgd,
+ [CHIP_VEGAM] = &gfx_v8_kfd2kgd,
+ [CHIP_VEGA10] = &gfx_v9_kfd2kgd,
+ [CHIP_VEGA12] = &gfx_v9_kfd2kgd,
+ [CHIP_VEGA20] = &gfx_v9_kfd2kgd,
+ [CHIP_RENOIR] = &gfx_v9_kfd2kgd,
+ [CHIP_ARCTURUS] = &arcturus_kfd2kgd,
+ [CHIP_NAVI10] = &gfx_v10_kfd2kgd,
+ [CHIP_NAVI12] = &gfx_v10_kfd2kgd,
+ [CHIP_NAVI14] = &gfx_v10_kfd2kgd,
+};
+
#ifdef KFD_SUPPORT_IOMMU_V2
static const struct kfd_device_info kaveri_device_info = {
.asic_family = CHIP_KAVERI,
@@ -351,6 +386,24 @@ static const struct kfd_device_info arcturus_device_info = {
.num_sdma_queues_per_engine = 8,
};
+static const struct kfd_device_info renoir_device_info = {
+ .asic_family = CHIP_RENOIR,
+ .asic_name = "renoir",
+ .max_pasid_bits = 16,
+ .max_no_of_hqd = 24,
+ .doorbell_size = 8,
+ .ih_ring_entry_size = 8 * sizeof(uint32_t),
+ .event_interrupt_class = &event_interrupt_class_v9,
+ .num_of_watch_points = 4,
+ .mqd_size_aligned = MQD_SIZE_ALIGNED,
+ .supports_cwsr = true,
+ .needs_iommu_device = false,
+ .needs_pci_atomics = false,
+ .num_sdma_engines = 1,
+ .num_xgmi_sdma_engines = 0,
+ .num_sdma_queues_per_engine = 2,
+};
+
static const struct kfd_device_info navi10_device_info = {
.asic_family = CHIP_NAVI10,
.asic_name = "navi10",
@@ -369,133 +422,64 @@ static const struct kfd_device_info navi10_device_info = {
.num_sdma_queues_per_engine = 8,
};
-struct kfd_deviceid {
- unsigned short did;
- const struct kfd_device_info *device_info;
+static const struct kfd_device_info navi12_device_info = {
+ .asic_family = CHIP_NAVI12,
+ .asic_name = "navi12",
+ .max_pasid_bits = 16,
+ .max_no_of_hqd = 24,
+ .doorbell_size = 8,
+ .ih_ring_entry_size = 8 * sizeof(uint32_t),
+ .event_interrupt_class = &event_interrupt_class_v9,
+ .num_of_watch_points = 4,
+ .mqd_size_aligned = MQD_SIZE_ALIGNED,
+ .needs_iommu_device = false,
+ .supports_cwsr = true,
+ .needs_pci_atomics = false,
+ .num_sdma_engines = 2,
+ .num_xgmi_sdma_engines = 0,
+ .num_sdma_queues_per_engine = 8,
+};
+
+static const struct kfd_device_info navi14_device_info = {
+ .asic_family = CHIP_NAVI14,
+ .asic_name = "navi14",
+ .max_pasid_bits = 16,
+ .max_no_of_hqd = 24,
+ .doorbell_size = 8,
+ .ih_ring_entry_size = 8 * sizeof(uint32_t),
+ .event_interrupt_class = &event_interrupt_class_v9,
+ .num_of_watch_points = 4,
+ .mqd_size_aligned = MQD_SIZE_ALIGNED,
+ .needs_iommu_device = false,
+ .supports_cwsr = true,
+ .needs_pci_atomics = false,
+ .num_sdma_engines = 2,
+ .num_xgmi_sdma_engines = 0,
+ .num_sdma_queues_per_engine = 8,
};
-static const struct kfd_deviceid supported_devices[] = {
+/* For each entry, [0] is regular and [1] is virtualisation device. */
+static const struct kfd_device_info *kfd_supported_devices[][2] = {
#ifdef KFD_SUPPORT_IOMMU_V2
- { 0x1304, &kaveri_device_info }, /* Kaveri */
- { 0x1305, &kaveri_device_info }, /* Kaveri */
- { 0x1306, &kaveri_device_info }, /* Kaveri */
- { 0x1307, &kaveri_device_info }, /* Kaveri */
- { 0x1309, &kaveri_device_info }, /* Kaveri */
- { 0x130A, &kaveri_device_info }, /* Kaveri */
- { 0x130B, &kaveri_device_info }, /* Kaveri */
- { 0x130C, &kaveri_device_info }, /* Kaveri */
- { 0x130D, &kaveri_device_info }, /* Kaveri */
- { 0x130E, &kaveri_device_info }, /* Kaveri */
- { 0x130F, &kaveri_device_info }, /* Kaveri */
- { 0x1310, &kaveri_device_info }, /* Kaveri */
- { 0x1311, &kaveri_device_info }, /* Kaveri */
- { 0x1312, &kaveri_device_info }, /* Kaveri */
- { 0x1313, &kaveri_device_info }, /* Kaveri */
- { 0x1315, &kaveri_device_info }, /* Kaveri */
- { 0x1316, &kaveri_device_info }, /* Kaveri */
- { 0x1317, &kaveri_device_info }, /* Kaveri */
- { 0x1318, &kaveri_device_info }, /* Kaveri */
- { 0x131B, &kaveri_device_info }, /* Kaveri */
- { 0x131C, &kaveri_device_info }, /* Kaveri */
- { 0x131D, &kaveri_device_info }, /* Kaveri */
- { 0x9870, &carrizo_device_info }, /* Carrizo */
- { 0x9874, &carrizo_device_info }, /* Carrizo */
- { 0x9875, &carrizo_device_info }, /* Carrizo */
- { 0x9876, &carrizo_device_info }, /* Carrizo */
- { 0x9877, &carrizo_device_info }, /* Carrizo */
- { 0x15DD, &raven_device_info }, /* Raven */
- { 0x15D8, &raven_device_info }, /* Raven */
+ [CHIP_KAVERI] = {&kaveri_device_info, NULL},
+ [CHIP_CARRIZO] = {&carrizo_device_info, NULL},
+ [CHIP_RAVEN] = {&raven_device_info, NULL},
#endif
- { 0x67A0, &hawaii_device_info }, /* Hawaii */
- { 0x67A1, &hawaii_device_info }, /* Hawaii */
- { 0x67A2, &hawaii_device_info }, /* Hawaii */
- { 0x67A8, &hawaii_device_info }, /* Hawaii */
- { 0x67A9, &hawaii_device_info }, /* Hawaii */
- { 0x67AA, &hawaii_device_info }, /* Hawaii */
- { 0x67B0, &hawaii_device_info }, /* Hawaii */
- { 0x67B1, &hawaii_device_info }, /* Hawaii */
- { 0x67B8, &hawaii_device_info }, /* Hawaii */
- { 0x67B9, &hawaii_device_info }, /* Hawaii */
- { 0x67BA, &hawaii_device_info }, /* Hawaii */
- { 0x67BE, &hawaii_device_info }, /* Hawaii */
- { 0x6920, &tonga_device_info }, /* Tonga */
- { 0x6921, &tonga_device_info }, /* Tonga */
- { 0x6928, &tonga_device_info }, /* Tonga */
- { 0x6929, &tonga_device_info }, /* Tonga */
- { 0x692B, &tonga_device_info }, /* Tonga */
- { 0x6938, &tonga_device_info }, /* Tonga */
- { 0x6939, &tonga_device_info }, /* Tonga */
- { 0x7300, &fiji_device_info }, /* Fiji */
- { 0x730F, &fiji_vf_device_info }, /* Fiji vf*/
- { 0x67C0, &polaris10_device_info }, /* Polaris10 */
- { 0x67C1, &polaris10_device_info }, /* Polaris10 */
- { 0x67C2, &polaris10_device_info }, /* Polaris10 */
- { 0x67C4, &polaris10_device_info }, /* Polaris10 */
- { 0x67C7, &polaris10_device_info }, /* Polaris10 */
- { 0x67C8, &polaris10_device_info }, /* Polaris10 */
- { 0x67C9, &polaris10_device_info }, /* Polaris10 */
- { 0x67CA, &polaris10_device_info }, /* Polaris10 */
- { 0x67CC, &polaris10_device_info }, /* Polaris10 */
- { 0x67CF, &polaris10_device_info }, /* Polaris10 */
- { 0x67D0, &polaris10_vf_device_info }, /* Polaris10 vf*/
- { 0x67DF, &polaris10_device_info }, /* Polaris10 */
- { 0x6FDF, &polaris10_device_info }, /* Polaris10 */
- { 0x67E0, &polaris11_device_info }, /* Polaris11 */
- { 0x67E1, &polaris11_device_info }, /* Polaris11 */
- { 0x67E3, &polaris11_device_info }, /* Polaris11 */
- { 0x67E7, &polaris11_device_info }, /* Polaris11 */
- { 0x67E8, &polaris11_device_info }, /* Polaris11 */
- { 0x67E9, &polaris11_device_info }, /* Polaris11 */
- { 0x67EB, &polaris11_device_info }, /* Polaris11 */
- { 0x67EF, &polaris11_device_info }, /* Polaris11 */
- { 0x67FF, &polaris11_device_info }, /* Polaris11 */
- { 0x6980, &polaris12_device_info }, /* Polaris12 */
- { 0x6981, &polaris12_device_info }, /* Polaris12 */
- { 0x6985, &polaris12_device_info }, /* Polaris12 */
- { 0x6986, &polaris12_device_info }, /* Polaris12 */
- { 0x6987, &polaris12_device_info }, /* Polaris12 */
- { 0x6995, &polaris12_device_info }, /* Polaris12 */
- { 0x6997, &polaris12_device_info }, /* Polaris12 */
- { 0x699F, &polaris12_device_info }, /* Polaris12 */
- { 0x694C, &vegam_device_info }, /* VegaM */
- { 0x694E, &vegam_device_info }, /* VegaM */
- { 0x694F, &vegam_device_info }, /* VegaM */
- { 0x6860, &vega10_device_info }, /* Vega10 */
- { 0x6861, &vega10_device_info }, /* Vega10 */
- { 0x6862, &vega10_device_info }, /* Vega10 */
- { 0x6863, &vega10_device_info }, /* Vega10 */
- { 0x6864, &vega10_device_info }, /* Vega10 */
- { 0x6867, &vega10_device_info }, /* Vega10 */
- { 0x6868, &vega10_device_info }, /* Vega10 */
- { 0x6869, &vega10_device_info }, /* Vega10 */
- { 0x686A, &vega10_device_info }, /* Vega10 */
- { 0x686B, &vega10_device_info }, /* Vega10 */
- { 0x686C, &vega10_vf_device_info }, /* Vega10 vf*/
- { 0x686D, &vega10_device_info }, /* Vega10 */
- { 0x686E, &vega10_device_info }, /* Vega10 */
- { 0x686F, &vega10_device_info }, /* Vega10 */
- { 0x687F, &vega10_device_info }, /* Vega10 */
- { 0x69A0, &vega12_device_info }, /* Vega12 */
- { 0x69A1, &vega12_device_info }, /* Vega12 */
- { 0x69A2, &vega12_device_info }, /* Vega12 */
- { 0x69A3, &vega12_device_info }, /* Vega12 */
- { 0x69AF, &vega12_device_info }, /* Vega12 */
- { 0x66a0, &vega20_device_info }, /* Vega20 */
- { 0x66a1, &vega20_device_info }, /* Vega20 */
- { 0x66a2, &vega20_device_info }, /* Vega20 */
- { 0x66a3, &vega20_device_info }, /* Vega20 */
- { 0x66a4, &vega20_device_info }, /* Vega20 */
- { 0x66a7, &vega20_device_info }, /* Vega20 */
- { 0x66af, &vega20_device_info }, /* Vega20 */
- { 0x738C, &arcturus_device_info }, /* Arcturus */
- { 0x7388, &arcturus_device_info }, /* Arcturus */
- { 0x738E, &arcturus_device_info }, /* Arcturus */
- { 0x7390, &arcturus_device_info }, /* Arcturus vf */
- { 0x7310, &navi10_device_info }, /* Navi10 */
- { 0x7312, &navi10_device_info }, /* Navi10 */
- { 0x7318, &navi10_device_info }, /* Navi10 */
- { 0x731a, &navi10_device_info }, /* Navi10 */
- { 0x731f, &navi10_device_info }, /* Navi10 */
+ [CHIP_HAWAII] = {&hawaii_device_info, NULL},
+ [CHIP_TONGA] = {&tonga_device_info, NULL},
+ [CHIP_FIJI] = {&fiji_device_info, &fiji_vf_device_info},
+ [CHIP_POLARIS10] = {&polaris10_device_info, &polaris10_vf_device_info},
+ [CHIP_POLARIS11] = {&polaris11_device_info, NULL},
+ [CHIP_POLARIS12] = {&polaris12_device_info, NULL},
+ [CHIP_VEGAM] = {&vegam_device_info, NULL},
+ [CHIP_VEGA10] = {&vega10_device_info, &vega10_vf_device_info},
+ [CHIP_VEGA12] = {&vega12_device_info, NULL},
+ [CHIP_VEGA20] = {&vega20_device_info, NULL},
+ [CHIP_RENOIR] = {&renoir_device_info, NULL},
+ [CHIP_ARCTURUS] = {&arcturus_device_info, &arcturus_device_info},
+ [CHIP_NAVI10] = {&navi10_device_info, NULL},
+ [CHIP_NAVI12] = {&navi12_device_info, &navi12_device_info},
+ [CHIP_NAVI14] = {&navi14_device_info, NULL},
};
static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size,
@@ -504,32 +488,25 @@ static void kfd_gtt_sa_fini(struct kfd_dev *kfd);
static int kfd_resume(struct kfd_dev *kfd);
-static const struct kfd_device_info *lookup_device_info(unsigned short did)
+struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd,
+ struct pci_dev *pdev, unsigned int asic_type, bool vf)
{
- size_t i;
+ struct kfd_dev *kfd;
+ const struct kfd_device_info *device_info;
+ const struct kfd2kgd_calls *f2g;
- for (i = 0; i < ARRAY_SIZE(supported_devices); i++) {
- if (supported_devices[i].did == did) {
- WARN_ON(!supported_devices[i].device_info);
- return supported_devices[i].device_info;
- }
+ if (asic_type >= sizeof(kfd_supported_devices) / (sizeof(void *) * 2)
+ || asic_type >= sizeof(kfd2kgd_funcs) / sizeof(void *)) {
+ dev_err(kfd_device, "asic_type %d out of range\n", asic_type);
+ return NULL; /* asic_type out of range */
}
- dev_warn(kfd_device, "DID %04x is missing in supported_devices\n",
- did);
+ device_info = kfd_supported_devices[asic_type][vf];
+ f2g = kfd2kgd_funcs[asic_type];
- return NULL;
-}
-
-struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd,
- struct pci_dev *pdev, const struct kfd2kgd_calls *f2g)
-{
- struct kfd_dev *kfd;
- const struct kfd_device_info *device_info =
- lookup_device_info(pdev->device);
-
- if (!device_info) {
- dev_err(kfd_device, "kgd2kfd_probe failed\n");
+ if (!device_info || !f2g) {
+ dev_err(kfd_device, "%s %s not supported in kfd\n",
+ amdgpu_asic_name[asic_type], vf ? "VF" : "");
return NULL;
}
@@ -593,10 +570,12 @@ static void kfd_cwsr_init(struct kfd_dev *kfd)
}
bool kgd2kfd_device_init(struct kfd_dev *kfd,
+ struct drm_device *ddev,
const struct kgd2kfd_shared_resources *gpu_resources)
{
unsigned int size;
+ kfd->ddev = ddev;
kfd->mec_fw_version = amdgpu_amdkfd_get_fw_version(kfd->kgd,
KGD_ENGINE_MEC1);
kfd->sdma_fw_version = amdgpu_amdkfd_get_fw_version(kfd->kgd,
@@ -751,9 +730,6 @@ int kgd2kfd_pre_reset(struct kfd_dev *kfd)
return 0;
kgd2kfd_suspend(kfd);
- /* hold dqm->lock to prevent further execution*/
- dqm_lock(kfd->dqm);
-
kfd_signal_reset_event(kfd);
return 0;
}
@@ -771,8 +747,6 @@ int kgd2kfd_post_reset(struct kfd_dev *kfd)
if (!kfd->init_complete)
return 0;
- dqm_unlock(kfd->dqm);
-
ret = kfd_resume(kfd);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index d985e31fcc1e..984c2f2b24b6 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -195,20 +195,30 @@ static int allocate_vmid(struct device_queue_manager *dqm,
struct qcm_process_device *qpd,
struct queue *q)
{
- int bit, allocated_vmid;
+ int allocated_vmid = -1, i;
- if (dqm->vmid_bitmap == 0)
- return -ENOMEM;
+ for (i = dqm->dev->vm_info.first_vmid_kfd;
+ i <= dqm->dev->vm_info.last_vmid_kfd; i++) {
+ if (!dqm->vmid_pasid[i]) {
+ allocated_vmid = i;
+ break;
+ }
+ }
+
+ if (allocated_vmid < 0) {
+ pr_err("no more vmid to allocate\n");
+ return -ENOSPC;
+ }
+
+ pr_debug("vmid allocated: %d\n", allocated_vmid);
+
+ dqm->vmid_pasid[allocated_vmid] = q->process->pasid;
- bit = ffs(dqm->vmid_bitmap) - 1;
- dqm->vmid_bitmap &= ~(1 << bit);
+ set_pasid_vmid_mapping(dqm, q->process->pasid, allocated_vmid);
- allocated_vmid = bit + dqm->dev->vm_info.first_vmid_kfd;
- pr_debug("vmid allocation %d\n", allocated_vmid);
qpd->vmid = allocated_vmid;
q->properties.vmid = allocated_vmid;
- set_pasid_vmid_mapping(dqm, q->process->pasid, q->properties.vmid);
program_sh_mem_settings(dqm, qpd);
/* qpd->page_table_base is set earlier when register_process()
@@ -220,8 +230,9 @@ static int allocate_vmid(struct device_queue_manager *dqm,
/* invalidate the VM context after pasid and vmid mapping is set up */
kfd_flush_tlb(qpd_to_pdd(qpd));
- dqm->dev->kfd2kgd->set_scratch_backing_va(
- dqm->dev->kgd, qpd->sh_hidden_private_base, qpd->vmid);
+ if (dqm->dev->kfd2kgd->set_scratch_backing_va)
+ dqm->dev->kfd2kgd->set_scratch_backing_va(dqm->dev->kgd,
+ qpd->sh_hidden_private_base, qpd->vmid);
return 0;
}
@@ -248,8 +259,6 @@ static void deallocate_vmid(struct device_queue_manager *dqm,
struct qcm_process_device *qpd,
struct queue *q)
{
- int bit = qpd->vmid - dqm->dev->vm_info.first_vmid_kfd;
-
/* On GFX v7, CP doesn't flush TC at dequeue */
if (q->device->device_info->asic_family == CHIP_HAWAII)
if (flush_texture_cache_nocpsch(q->device, qpd))
@@ -259,8 +268,8 @@ static void deallocate_vmid(struct device_queue_manager *dqm,
/* Release the vmid mapping */
set_pasid_vmid_mapping(dqm, 0, qpd->vmid);
+ dqm->vmid_pasid[qpd->vmid] = 0;
- dqm->vmid_bitmap |= (1 << bit);
qpd->vmid = 0;
q->properties.vmid = 0;
}
@@ -331,6 +340,10 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,
mqd_mgr->init_mqd(mqd_mgr, &q->mqd, q->mqd_mem_obj,
&q->gart_mqd_addr, &q->properties);
if (q->properties.is_active) {
+ if (!dqm->sched_running) {
+ WARN_ONCE(1, "Load non-HWS mqd while stopped\n");
+ goto add_queue_to_list;
+ }
if (WARN(q->process->mm != current->mm,
"should only run in user thread"))
@@ -342,6 +355,7 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,
goto out_free_mqd;
}
+add_queue_to_list:
list_add(&q->list, &qpd->queues_list);
qpd->queue_count++;
if (q->properties.is_active)
@@ -449,6 +463,11 @@ static int destroy_queue_nocpsch_locked(struct device_queue_manager *dqm,
deallocate_doorbell(qpd, q);
+ if (!dqm->sched_running) {
+ WARN_ONCE(1, "Destroy non-HWS queue while stopped\n");
+ return 0;
+ }
+
retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
KFD_PREEMPT_TYPE_WAVEFRONT_RESET,
KFD_UNMAP_LATENCY_MS,
@@ -524,6 +543,12 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
(q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
q->properties.type == KFD_QUEUE_TYPE_SDMA ||
q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)) {
+
+ if (!dqm->sched_running) {
+ WARN_ONCE(1, "Update non-HWS queue while stopped\n");
+ goto out_unlock;
+ }
+
retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN,
KFD_UNMAP_LATENCY_MS, q->pipe, q->queue);
@@ -579,7 +604,7 @@ static int evict_process_queues_nocpsch(struct device_queue_manager *dqm,
goto out;
pdd = qpd_to_pdd(qpd);
- pr_info_ratelimited("Evicting PASID %u queues\n",
+ pr_info_ratelimited("Evicting PASID 0x%x queues\n",
pdd->process->pasid);
/* Mark all queues as evicted. Deactivate all active queues on
@@ -593,6 +618,11 @@ static int evict_process_queues_nocpsch(struct device_queue_manager *dqm,
mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
q->properties.type)];
q->properties.is_active = false;
+ dqm->queue_count--;
+
+ if (WARN_ONCE(!dqm->sched_running, "Evict when stopped\n"))
+ continue;
+
retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN,
KFD_UNMAP_LATENCY_MS, q->pipe, q->queue);
@@ -601,7 +631,6 @@ static int evict_process_queues_nocpsch(struct device_queue_manager *dqm,
* maintain a consistent eviction state
*/
ret = retval;
- dqm->queue_count--;
}
out:
@@ -621,7 +650,7 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm,
goto out;
pdd = qpd_to_pdd(qpd);
- pr_info_ratelimited("Evicting PASID %u queues\n",
+ pr_info_ratelimited("Evicting PASID 0x%x queues\n",
pdd->process->pasid);
/* Mark all queues as evicted. Deactivate all active queues on
@@ -667,7 +696,7 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
goto out;
}
- pr_info_ratelimited("Restoring PASID %u queues\n",
+ pr_info_ratelimited("Restoring PASID 0x%x queues\n",
pdd->process->pasid);
/* Update PD Base in QPD */
@@ -702,6 +731,11 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
q->properties.type)];
q->properties.is_active = true;
+ dqm->queue_count++;
+
+ if (WARN_ONCE(!dqm->sched_running, "Restore when stopped\n"))
+ continue;
+
retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe,
q->queue, &q->properties, mm);
if (retval && !ret)
@@ -709,7 +743,6 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
* maintain a consistent eviction state
*/
ret = retval;
- dqm->queue_count++;
}
qpd->evicted = 0;
out:
@@ -739,7 +772,7 @@ static int restore_process_queues_cpsch(struct device_queue_manager *dqm,
goto out;
}
- pr_info_ratelimited("Restoring PASID %u queues\n",
+ pr_info_ratelimited("Restoring PASID 0x%x queues\n",
pdd->process->pasid);
/* Update PD Base in QPD */
@@ -879,7 +912,8 @@ static int initialize_nocpsch(struct device_queue_manager *dqm)
dqm->allocated_queues[pipe] |= 1 << queue;
}
- dqm->vmid_bitmap = (1 << dqm->dev->vm_info.vmid_num_kfd) - 1;
+ memset(dqm->vmid_pasid, 0, sizeof(dqm->vmid_pasid));
+
dqm->sdma_bitmap = ~0ULL >> (64 - get_num_sdma_queues(dqm));
dqm->xgmi_sdma_bitmap = ~0ULL >> (64 - get_num_xgmi_sdma_queues(dqm));
@@ -902,12 +936,20 @@ static void uninitialize(struct device_queue_manager *dqm)
static int start_nocpsch(struct device_queue_manager *dqm)
{
init_interrupts(dqm);
- return pm_init(&dqm->packets, dqm);
+
+ if (dqm->dev->device_info->asic_family == CHIP_HAWAII)
+ return pm_init(&dqm->packets, dqm);
+ dqm->sched_running = true;
+
+ return 0;
}
static int stop_nocpsch(struct device_queue_manager *dqm)
{
- pm_uninit(&dqm->packets);
+ if (dqm->dev->device_info->asic_family == CHIP_HAWAII)
+ pm_uninit(&dqm->packets);
+ dqm->sched_running = false;
+
return 0;
}
@@ -1058,6 +1100,7 @@ static int start_cpsch(struct device_queue_manager *dqm)
dqm_lock(dqm);
/* clear hang status when driver try to start the hw scheduler */
dqm->is_hws_hang = false;
+ dqm->sched_running = true;
execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
dqm_unlock(dqm);
@@ -1073,6 +1116,7 @@ static int stop_cpsch(struct device_queue_manager *dqm)
{
dqm_lock(dqm);
unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
+ dqm->sched_running = false;
dqm_unlock(dqm);
kfd_gtt_sa_free(dqm->dev, dqm->fence_mem);
@@ -1259,9 +1303,10 @@ static int map_queues_cpsch(struct device_queue_manager *dqm)
{
int retval;
+ if (!dqm->sched_running)
+ return 0;
if (dqm->queue_count <= 0 || dqm->processes_count <= 0)
return 0;
-
if (dqm->active_runlist)
return 0;
@@ -1283,6 +1328,8 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm,
{
int retval = 0;
+ if (!dqm->sched_running)
+ return 0;
if (dqm->is_hws_hang)
return -EIO;
if (!dqm->active_runlist)
@@ -1676,7 +1723,8 @@ static int allocate_hiq_sdma_mqd(struct device_queue_manager *dqm)
struct kfd_dev *dev = dqm->dev;
struct kfd_mem_obj *mem_obj = &dqm->hiq_sdma_mqd;
uint32_t size = dqm->mqd_mgrs[KFD_MQD_TYPE_SDMA]->mqd_size *
- dev->device_info->num_sdma_engines *
+ (dev->device_info->num_sdma_engines +
+ dev->device_info->num_xgmi_sdma_engines) *
dev->device_info->num_sdma_queues_per_engine +
dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]->mqd_size;
@@ -1786,10 +1834,13 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
case CHIP_VEGA12:
case CHIP_VEGA20:
case CHIP_RAVEN:
+ case CHIP_RENOIR:
case CHIP_ARCTURUS:
device_queue_manager_init_v9(&dqm->asic_ops);
break;
case CHIP_NAVI10:
+ case CHIP_NAVI12:
+ case CHIP_NAVI14:
device_queue_manager_init_v10_navi10(&dqm->asic_ops);
break;
default:
@@ -1883,6 +1934,12 @@ int dqm_debugfs_hqds(struct seq_file *m, void *data)
int pipe, queue;
int r = 0;
+ if (!dqm->sched_running) {
+ seq_printf(m, " Device is stopped\n");
+
+ return 0;
+ }
+
r = dqm->dev->kfd2kgd->hqd_dump(dqm->dev->kgd,
KFD_CIK_HIQ_PIPE, KFD_CIK_HIQ_QUEUE,
&dump, &n_regs);
@@ -1917,7 +1974,8 @@ int dqm_debugfs_hqds(struct seq_file *m, void *data)
}
}
- for (pipe = 0; pipe < get_num_sdma_engines(dqm); pipe++) {
+ for (pipe = 0; pipe < get_num_sdma_engines(dqm) +
+ get_num_xgmi_sdma_engines(dqm); pipe++) {
for (queue = 0;
queue < dqm->dev->device_info->num_sdma_queues_per_engine;
queue++) {
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
index 90db2c9275f6..a8c37e6da027 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
@@ -32,6 +32,8 @@
#include "kfd_mqd_manager.h"
+#define VMID_NUM 16
+
struct device_process_node {
struct qcm_process_device *qpd;
struct list_head list;
@@ -185,7 +187,8 @@ struct device_queue_manager {
unsigned int *allocated_queues;
uint64_t sdma_bitmap;
uint64_t xgmi_sdma_bitmap;
- unsigned int vmid_bitmap;
+ /* the pasid mapping for each kfd vmid */
+ uint16_t vmid_pasid[VMID_NUM];
uint64_t pipelines_addr;
struct kfd_mem_obj *pipeline_mem;
uint64_t fence_gpu_addr;
@@ -198,6 +201,7 @@ struct device_queue_manager {
bool is_hws_hang;
struct work_struct hw_exception_work;
struct kfd_mem_obj hiq_sdma_mqd;
+ bool sched_running;
};
void device_queue_manager_init_cik(
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
index d674d4b3340f..908081c85de1 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
@@ -852,8 +852,8 @@ static void lookup_events_by_type_and_signal(struct kfd_process *p,
if (type == KFD_EVENT_TYPE_MEMORY) {
dev_warn(kfd_device,
- "Sending SIGSEGV to HSA Process with PID %d ",
- p->lead_thread->pid);
+ "Sending SIGSEGV to process %d (pasid 0x%x)",
+ p->lead_thread->pid, p->pasid);
send_sig(SIGSEGV, p->lead_thread, 0);
}
@@ -861,13 +861,13 @@ static void lookup_events_by_type_and_signal(struct kfd_process *p,
if (send_signal) {
if (send_sigterm) {
dev_warn(kfd_device,
- "Sending SIGTERM to HSA Process with PID %d ",
- p->lead_thread->pid);
+ "Sending SIGTERM to process %d (pasid 0x%x)",
+ p->lead_thread->pid, p->pasid);
send_sig(SIGTERM, p->lead_thread, 0);
} else {
dev_err(kfd_device,
- "HSA Process (PID %d) got unhandled exception",
- p->lead_thread->pid);
+ "Process %d (pasid 0x%x) got unhandled exception",
+ p->lead_thread->pid, p->pasid);
}
}
}
@@ -936,7 +936,8 @@ void kfd_signal_iommu_event(struct kfd_dev *dev, unsigned int pasid,
/* Workaround on Raven to not kill the process when memory is freed
* before IOMMU is able to finish processing all the excessive PPRs
*/
- if (dev->device_info->asic_family != CHIP_RAVEN) {
+ if (dev->device_info->asic_family != CHIP_RAVEN &&
+ dev->device_info->asic_family != CHIP_RENOIR) {
mutex_lock(&p->event_mutex);
/* Lookup events by type and signal them */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
index 9dc4bff8085e..bb77b8890e77 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
@@ -369,8 +369,13 @@ int kfd_init_apertures(struct kfd_process *process)
/*Iterating over all devices*/
while (kfd_topology_enum_kfd_devices(id, &dev) == 0) {
- if (!dev) {
- id++; /* Skip non GPU devices */
+ if (!dev || kfd_devcgroup_check_permission(dev)) {
+ /* Skip non GPU devices and devices to which the
+ * current process have no access to. Access can be
+ * limited by placing the process in a specific
+ * cgroup hierarchy
+ */
+ id++;
continue;
}
@@ -405,8 +410,11 @@ int kfd_init_apertures(struct kfd_process *process)
case CHIP_VEGA12:
case CHIP_VEGA20:
case CHIP_RAVEN:
+ case CHIP_RENOIR:
case CHIP_ARCTURUS:
case CHIP_NAVI10:
+ case CHIP_NAVI12:
+ case CHIP_NAVI14:
kfd_init_apertures_v9(pdd, id);
break;
default:
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
index 3ef67d2e0d9f..e05d75ecda21 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
@@ -54,8 +54,7 @@ static bool event_interrupt_isr_v9(struct kfd_dev *dev,
memcpy(patched_ihre, ih_ring_entry,
dev->device_info->ih_ring_entry_size);
- pasid = dev->kfd2kgd->get_atc_vmid_pasid_mapping_pasid(
- dev->kgd, vmid);
+ pasid = dev->dqm->vmid_pasid[vmid];
/* Patch the pasid field */
patched_ihre[3] = cpu_to_le32((le32_to_cpu(patched_ihre[3])
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c b/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
index c56ac47cd318..bc47f6a44456 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
@@ -62,6 +62,11 @@ int kfd_interrupt_init(struct kfd_dev *kfd)
}
kfd->ih_wq = alloc_workqueue("KFD IH", WQ_HIGHPRI, 1);
+ if (unlikely(!kfd->ih_wq)) {
+ kfifo_free(&kfd->ih_fifo);
+ dev_err(kfd_chardev(), "Failed to allocate KFD IH workqueue\n");
+ return -ENOMEM;
+ }
spin_lock_init(&kfd->interrupt_lock);
INIT_WORK(&kfd->interrupt_work, interrupt_wq);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
index 5f35df23fb18..193e2835bd4d 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
@@ -160,7 +160,7 @@ static void iommu_pasid_shutdown_callback(struct pci_dev *pdev, int pasid)
if (!p)
return;
- pr_debug("Unbinding process %d from IOMMU\n", pasid);
+ pr_debug("Unbinding process 0x%x from IOMMU\n", pasid);
mutex_lock(kfd_get_dbgmgr_mutex());
@@ -194,7 +194,7 @@ static int iommu_invalid_ppr_cb(struct pci_dev *pdev, int pasid,
struct kfd_dev *dev;
dev_warn_ratelimited(kfd_device,
- "Invalid PPR device %x:%x.%x pasid %d address 0x%lX flags 0x%X",
+ "Invalid PPR device %x:%x.%x pasid 0x%x address 0x%lX flags 0x%X",
PCI_BUS_NUM(pdev->devfn),
PCI_SLOT(pdev->devfn),
PCI_FUNC(pdev->devfn),
@@ -235,7 +235,7 @@ static int kfd_bind_processes_to_device(struct kfd_dev *kfd)
err = amd_iommu_bind_pasid(kfd->pdev, p->pasid,
p->lead_thread);
if (err < 0) {
- pr_err("Unexpected pasid %d binding failure\n",
+ pr_err("Unexpected pasid 0x%x binding failure\n",
p->pasid);
mutex_unlock(&p->mutex);
break;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
index 8b4564f71a7a..11d244891393 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
@@ -330,10 +330,13 @@ struct kernel_queue *kernel_queue_init(struct kfd_dev *dev,
case CHIP_VEGA12:
case CHIP_VEGA20:
case CHIP_RAVEN:
+ case CHIP_RENOIR:
case CHIP_ARCTURUS:
kernel_queue_init_v9(&kq->ops_asic_specific);
break;
case CHIP_NAVI10:
+ case CHIP_NAVI12:
+ case CHIP_NAVI14:
kernel_queue_init_v10(&kq->ops_asic_specific);
break;
default:
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_module.c b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
index 986ff52d5750..f4b7f7e6c40e 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_module.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
@@ -82,7 +82,7 @@ static void kfd_exit(void)
kfd_chardev_exit();
}
-int kgd2kfd_init()
+int kgd2kfd_init(void)
{
return kfd_init();
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
index 9cd3eb2d90bd..4a236b2c2354 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
@@ -69,35 +69,13 @@ static void update_cu_mask(struct mqd_manager *mm, void *mqd,
static struct kfd_mem_obj *allocate_mqd(struct kfd_dev *kfd,
struct queue_properties *q)
{
- int retval;
- struct kfd_mem_obj *mqd_mem_obj = NULL;
+ struct kfd_mem_obj *mqd_mem_obj;
- /* From V9, for CWSR, the control stack is located on the next page
- * boundary after the mqd, we will use the gtt allocation function
- * instead of sub-allocation function.
- */
- if (kfd->cwsr_enabled && (q->type == KFD_QUEUE_TYPE_COMPUTE)) {
- mqd_mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_NOIO);
- if (!mqd_mem_obj)
- return NULL;
- retval = amdgpu_amdkfd_alloc_gtt_mem(kfd->kgd,
- ALIGN(q->ctl_stack_size, PAGE_SIZE) +
- ALIGN(sizeof(struct v10_compute_mqd), PAGE_SIZE),
- &(mqd_mem_obj->gtt_mem),
- &(mqd_mem_obj->gpu_addr),
- (void *)&(mqd_mem_obj->cpu_ptr), true);
- } else {
- retval = kfd_gtt_sa_allocate(kfd, sizeof(struct v10_compute_mqd),
- &mqd_mem_obj);
- }
-
- if (retval) {
- kfree(mqd_mem_obj);
+ if (kfd_gtt_sa_allocate(kfd, sizeof(struct v10_compute_mqd),
+ &mqd_mem_obj))
return NULL;
- }
return mqd_mem_obj;
-
}
static void init_mqd(struct mqd_manager *mm, void **mqd,
@@ -250,14 +228,7 @@ static int destroy_mqd(struct mqd_manager *mm, void *mqd,
static void free_mqd(struct mqd_manager *mm, void *mqd,
struct kfd_mem_obj *mqd_mem_obj)
{
- struct kfd_dev *kfd = mm->dev;
-
- if (mqd_mem_obj->gtt_mem) {
- amdgpu_amdkfd_free_gtt_mem(kfd->kgd, mqd_mem_obj->gtt_mem);
- kfree(mqd_mem_obj);
- } else {
- kfd_gtt_sa_free(mm->dev, mqd_mem_obj);
- }
+ kfd_gtt_sa_free(mm->dev, mqd_mem_obj);
}
static bool is_occupied(struct mqd_manager *mm, void *mqd,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
index 2c8624c5b42c..83ef4b3dd2fb 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
@@ -239,10 +239,13 @@ int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm)
case CHIP_VEGA12:
case CHIP_VEGA20:
case CHIP_RAVEN:
+ case CHIP_RENOIR:
case CHIP_ARCTURUS:
pm->pmf = &kfd_v9_pm_funcs;
break;
case CHIP_NAVI10:
+ case CHIP_NAVI12:
+ case CHIP_NAVI14:
pm->pmf = &kfd_v10_pm_funcs;
break;
default:
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index c89326125d71..060a9e8b301e 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -36,6 +36,10 @@
#include <linux/seq_file.h>
#include <linux/kref.h>
#include <linux/sysfs.h>
+#include <linux/device_cgroup.h>
+#include <drm/drm_file.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_device.h>
#include <kgd_kfd_interface.h>
#include "amd_shared.h"
@@ -179,10 +183,6 @@ enum cache_policy {
cache_policy_noncoherent
};
-#define KFD_IS_VI(chip) ((chip) >= CHIP_CARRIZO && (chip) <= CHIP_POLARIS11)
-#define KFD_IS_DGPU(chip) (((chip) >= CHIP_TONGA && \
- (chip) <= CHIP_NAVI10) || \
- (chip) == CHIP_HAWAII)
#define KFD_IS_SOC15(chip) ((chip) >= CHIP_VEGA10)
struct kfd_event_interrupt_class {
@@ -230,6 +230,7 @@ struct kfd_dev {
const struct kfd_device_info *device_info;
struct pci_dev *pdev;
+ struct drm_device *ddev;
unsigned int id; /* topology stub index */
@@ -687,7 +688,7 @@ struct kfd_process {
/* We want to receive a notification when the mm_struct is destroyed */
struct mmu_notifier mmu_notifier;
- unsigned int pasid;
+ uint16_t pasid;
unsigned int doorbell_index;
/*
@@ -1040,6 +1041,21 @@ bool kfd_is_locked(void);
void kfd_inc_compute_active(struct kfd_dev *dev);
void kfd_dec_compute_active(struct kfd_dev *dev);
+/* Cgroup Support */
+/* Check with device cgroup if @kfd device is accessible */
+static inline int kfd_devcgroup_check_permission(struct kfd_dev *kfd)
+{
+#if defined(CONFIG_CGROUP_DEVICE)
+ struct drm_device *ddev = kfd->ddev;
+
+ return devcgroup_check_permission(DEVCG_DEV_CHAR, ddev->driver->major,
+ ddev->render->index,
+ DEVCG_ACC_WRITE | DEVCG_ACC_READ);
+#else
+ return 0;
+#endif
+}
+
/* Debugfs */
#if defined(CONFIG_DEBUG_FS)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index 40e3fc0c6942..10f9af5784f2 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -416,7 +416,7 @@ static void kfd_process_destroy_pdds(struct kfd_process *p)
list_for_each_entry_safe(pdd, temp, &p->per_device_data,
per_device_list) {
- pr_debug("Releasing pdd (topology id %d) for process (pasid %d)\n",
+ pr_debug("Releasing pdd (topology id %d) for process (pasid 0x%x)\n",
pdd->dev->id, p->pasid);
if (pdd->drm_file) {
@@ -687,6 +687,8 @@ static int init_doorbell_bitmap(struct qcm_process_device *qpd,
struct kfd_dev *dev)
{
unsigned int i;
+ int range_start = dev->shared_resources.non_cp_doorbells_start;
+ int range_end = dev->shared_resources.non_cp_doorbells_end;
if (!KFD_IS_SOC15(dev->device_info->asic_family))
return 0;
@@ -698,14 +700,16 @@ static int init_doorbell_bitmap(struct qcm_process_device *qpd,
return -ENOMEM;
/* Mask out doorbells reserved for SDMA, IH, and VCN on SOC15. */
+ pr_debug("reserved doorbell 0x%03x - 0x%03x\n", range_start, range_end);
+ pr_debug("reserved doorbell 0x%03x - 0x%03x\n",
+ range_start + KFD_QUEUE_DOORBELL_MIRROR_OFFSET,
+ range_end + KFD_QUEUE_DOORBELL_MIRROR_OFFSET);
+
for (i = 0; i < KFD_MAX_NUM_OF_QUEUES_PER_PROCESS / 2; i++) {
- if (i >= dev->shared_resources.non_cp_doorbells_start
- && i <= dev->shared_resources.non_cp_doorbells_end) {
+ if (i >= range_start && i <= range_end) {
set_bit(i, qpd->doorbell_bitmap);
set_bit(i + KFD_QUEUE_DOORBELL_MIRROR_OFFSET,
qpd->doorbell_bitmap);
- pr_debug("reserved doorbell 0x%03x and 0x%03x\n", i,
- i + KFD_QUEUE_DOORBELL_MIRROR_OFFSET);
}
}
@@ -1020,7 +1024,7 @@ static void evict_process_worker(struct work_struct *work)
*/
flush_delayed_work(&p->restore_work);
- pr_debug("Started evicting pasid %d\n", p->pasid);
+ pr_debug("Started evicting pasid 0x%x\n", p->pasid);
ret = kfd_process_evict_queues(p);
if (!ret) {
dma_fence_signal(p->ef);
@@ -1029,9 +1033,9 @@ static void evict_process_worker(struct work_struct *work)
queue_delayed_work(kfd_restore_wq, &p->restore_work,
msecs_to_jiffies(PROCESS_RESTORE_TIME_MS));
- pr_debug("Finished evicting pasid %d\n", p->pasid);
+ pr_debug("Finished evicting pasid 0x%x\n", p->pasid);
} else
- pr_err("Failed to evict queues of pasid %d\n", p->pasid);
+ pr_err("Failed to evict queues of pasid 0x%x\n", p->pasid);
}
static void restore_process_worker(struct work_struct *work)
@@ -1046,7 +1050,7 @@ static void restore_process_worker(struct work_struct *work)
* lifetime of this thread, kfd_process p will be valid
*/
p = container_of(dwork, struct kfd_process, restore_work);
- pr_debug("Started restoring pasid %d\n", p->pasid);
+ pr_debug("Started restoring pasid 0x%x\n", p->pasid);
/* Setting last_restore_timestamp before successful restoration.
* Otherwise this would have to be set by KGD (restore_process_bos)
@@ -1062,7 +1066,7 @@ static void restore_process_worker(struct work_struct *work)
ret = amdgpu_amdkfd_gpuvm_restore_process_bos(p->kgd_process_info,
&p->ef);
if (ret) {
- pr_debug("Failed to restore BOs of pasid %d, retry after %d ms\n",
+ pr_debug("Failed to restore BOs of pasid 0x%x, retry after %d ms\n",
p->pasid, PROCESS_BACK_OFF_TIME_MS);
ret = queue_delayed_work(kfd_restore_wq, &p->restore_work,
msecs_to_jiffies(PROCESS_BACK_OFF_TIME_MS));
@@ -1072,9 +1076,9 @@ static void restore_process_worker(struct work_struct *work)
ret = kfd_process_restore_queues(p);
if (!ret)
- pr_debug("Finished restoring pasid %d\n", p->pasid);
+ pr_debug("Finished restoring pasid 0x%x\n", p->pasid);
else
- pr_err("Failed to restore queues of pasid %d\n", p->pasid);
+ pr_err("Failed to restore queues of pasid 0x%x\n", p->pasid);
}
void kfd_suspend_all_processes(void)
@@ -1088,7 +1092,7 @@ void kfd_suspend_all_processes(void)
cancel_delayed_work_sync(&p->restore_work);
if (kfd_process_evict_queues(p))
- pr_err("Failed to suspend process %d\n", p->pasid);
+ pr_err("Failed to suspend process 0x%x\n", p->pasid);
dma_fence_signal(p->ef);
dma_fence_put(p->ef);
p->ef = NULL;
@@ -1171,7 +1175,7 @@ int kfd_debugfs_mqds_by_process(struct seq_file *m, void *data)
int idx = srcu_read_lock(&kfd_processes_srcu);
hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
- seq_printf(m, "Process %d PASID %d:\n",
+ seq_printf(m, "Process %d PASID 0x%x:\n",
p->lead_thread->tgid, p->pasid);
mutex_lock(&p->mutex);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index 7e6c3ee82f5b..2659d226c056 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -53,7 +53,7 @@ static int find_available_queue_slot(struct process_queue_manager *pqm,
pr_debug("The new slot id %lu\n", found);
if (found >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) {
- pr_info("Cannot open more queues for process with pasid %d\n",
+ pr_info("Cannot open more queues for process with pasid 0x%x\n",
pqm->process->pasid);
return -ENOMEM;
}
@@ -298,7 +298,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
}
if (retval != 0) {
- pr_err("Pasid %d DQM create queue %d failed. ret %d\n",
+ pr_err("Pasid 0x%x DQM create queue %d failed. ret %d\n",
pqm->process->pasid, type, retval);
goto err_create_queue;
}
@@ -377,7 +377,7 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid)
dqm = pqn->q->device->dqm;
retval = dqm->ops.destroy_queue(dqm, &pdd->qpd, pqn->q);
if (retval) {
- pr_err("Pasid %d destroy queue %d failed, ret %d\n",
+ pr_err("Pasid 0x%x destroy queue %d failed, ret %d\n",
pqm->process->pasid,
pqn->q->properties.queue_id, retval);
if (retval != -ETIME)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index 7551761f2aa9..69bd0628fdc6 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -269,6 +269,8 @@ static ssize_t iolink_show(struct kobject *kobj, struct attribute *attr,
buffer[0] = 0;
iolink = container_of(attr, struct kfd_iolink_properties, attr);
+ if (iolink->gpu && kfd_devcgroup_check_permission(iolink->gpu))
+ return -EPERM;
sysfs_show_32bit_prop(buffer, "type", iolink->iolink_type);
sysfs_show_32bit_prop(buffer, "version_major", iolink->ver_maj);
sysfs_show_32bit_prop(buffer, "version_minor", iolink->ver_min);
@@ -305,6 +307,8 @@ static ssize_t mem_show(struct kobject *kobj, struct attribute *attr,
buffer[0] = 0;
mem = container_of(attr, struct kfd_mem_properties, attr);
+ if (mem->gpu && kfd_devcgroup_check_permission(mem->gpu))
+ return -EPERM;
sysfs_show_32bit_prop(buffer, "heap_type", mem->heap_type);
sysfs_show_64bit_prop(buffer, "size_in_bytes", mem->size_in_bytes);
sysfs_show_32bit_prop(buffer, "flags", mem->flags);
@@ -334,6 +338,8 @@ static ssize_t kfd_cache_show(struct kobject *kobj, struct attribute *attr,
buffer[0] = 0;
cache = container_of(attr, struct kfd_cache_properties, attr);
+ if (cache->gpu && kfd_devcgroup_check_permission(cache->gpu))
+ return -EPERM;
sysfs_show_32bit_prop(buffer, "processor_id_low",
cache->processor_id_low);
sysfs_show_32bit_prop(buffer, "level", cache->cache_level);
@@ -414,6 +420,8 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
if (strcmp(attr->name, "gpu_id") == 0) {
dev = container_of(attr, struct kfd_topology_device,
attr_gpuid);
+ if (dev->gpu && kfd_devcgroup_check_permission(dev->gpu))
+ return -EPERM;
return sysfs_show_32bit_val(buffer, dev->gpu_id);
}
@@ -421,11 +429,15 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
dev = container_of(attr, struct kfd_topology_device,
attr_name);
+ if (dev->gpu && kfd_devcgroup_check_permission(dev->gpu))
+ return -EPERM;
return sysfs_show_str_val(buffer, dev->node_props.name);
}
dev = container_of(attr, struct kfd_topology_device,
attr_props);
+ if (dev->gpu && kfd_devcgroup_check_permission(dev->gpu))
+ return -EPERM;
sysfs_show_32bit_prop(buffer, "cpu_cores_count",
dev->node_props.cpu_cores_count);
sysfs_show_32bit_prop(buffer, "simd_count",
@@ -1098,6 +1110,9 @@ static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu)
{
struct kfd_topology_device *dev;
struct kfd_topology_device *out_dev = NULL;
+ struct kfd_mem_properties *mem;
+ struct kfd_cache_properties *cache;
+ struct kfd_iolink_properties *iolink;
down_write(&topology_lock);
list_for_each_entry(dev, &topology_device_list, list) {
@@ -1111,6 +1126,13 @@ static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu)
if (!dev->gpu && (dev->node_props.simd_count > 0)) {
dev->gpu = gpu;
out_dev = dev;
+
+ list_for_each_entry(mem, &dev->mem_props, list)
+ mem->gpu = dev->gpu;
+ list_for_each_entry(cache, &dev->cache_props, list)
+ cache->gpu = dev->gpu;
+ list_for_each_entry(iolink, &dev->io_link_props, list)
+ iolink->gpu = dev->gpu;
break;
}
}
@@ -1317,8 +1339,11 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
case CHIP_VEGA12:
case CHIP_VEGA20:
case CHIP_RAVEN:
+ case CHIP_RENOIR:
case CHIP_ARCTURUS:
case CHIP_NAVI10:
+ case CHIP_NAVI12:
+ case CHIP_NAVI14:
dev->node_props.capability |= ((HSA_CAP_DOORBELL_TYPE_2_0 <<
HSA_CAP_DOORBELL_TYPE_TOTALBITS_SHIFT) &
HSA_CAP_DOORBELL_TYPE_TOTALBITS_MASK);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
index d4718d58d0f2..15843e0fc756 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
@@ -102,6 +102,7 @@ struct kfd_mem_properties {
uint32_t flags;
uint32_t width;
uint32_t mem_clk_max;
+ struct kfd_dev *gpu;
struct kobject *kobj;
struct attribute attr;
};
@@ -123,6 +124,7 @@ struct kfd_cache_properties {
uint32_t cache_latency;
uint32_t cache_type;
uint8_t sibling_map[CRAT_SIBLINGMAP_SIZE];
+ struct kfd_dev *gpu;
struct kobject *kobj;
struct attribute attr;
};
@@ -141,6 +143,7 @@ struct kfd_iolink_properties {
uint32_t max_bandwidth;
uint32_t rec_transfer_size;
uint32_t flags;
+ struct kfd_dev *gpu;
struct kobject *kobj;
struct attribute attr;
};
diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig
index 71991a28a775..313183b80032 100644
--- a/drivers/gpu/drm/amd/display/Kconfig
+++ b/drivers/gpu/drm/amd/display/Kconfig
@@ -23,16 +23,16 @@ config DRM_AMD_DC_DCN2_0
depends on DRM_AMD_DC && X86
depends on DRM_AMD_DC_DCN1_0
help
- Choose this option if you want to have
- Navi support for display engine
+ Choose this option if you want to have
+ Navi support for display engine
config DRM_AMD_DC_DCN2_1
- bool "DCN 2.1 family"
- depends on DRM_AMD_DC && X86
- depends on DRM_AMD_DC_DCN2_0
- help
- Choose this option if you want to have
- Renoir support for display engine
+ bool "DCN 2.1 family"
+ depends on DRM_AMD_DC && X86
+ depends on DRM_AMD_DC_DCN2_0
+ help
+ Choose this option if you want to have
+ Renoir support for display engine
config DRM_AMD_DC_DSC_SUPPORT
bool "DSC support"
@@ -41,8 +41,16 @@ config DRM_AMD_DC_DSC_SUPPORT
depends on DRM_AMD_DC_DCN1_0
depends on DRM_AMD_DC_DCN2_0
help
- Choose this option if you want to have
- Dynamic Stream Compression support
+ Choose this option if you want to have
+ Dynamic Stream Compression support
+
+config DRM_AMD_DC_HDCP
+ bool "Enable HDCP support in DC"
+ depends on DRM_AMD_DC
+ help
+ Choose this option
+ if you want to support
+ HDCP authentication
config DEBUG_KERNEL_DC
bool "Enable kgdb break in DC"
diff --git a/drivers/gpu/drm/amd/display/Makefile b/drivers/gpu/drm/amd/display/Makefile
index 496cee000f10..36b3d6a5d04d 100644
--- a/drivers/gpu/drm/amd/display/Makefile
+++ b/drivers/gpu/drm/amd/display/Makefile
@@ -34,12 +34,19 @@ subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/freesync
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/color
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/info_packet
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/power
+ifdef CONFIG_DRM_AMD_DC_HDCP
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/hdcp
+endif
#TODO: remove when Timing Sync feature is complete
subdir-ccflags-y += -DBUILD_FEATURE_TIMING_SYNC=0
DAL_LIBS = amdgpu_dm dc modules/freesync modules/color modules/info_packet modules/power
+ifdef CONFIG_DRM_AMD_DC_HDCP
+DAL_LIBS += modules/hdcp
+endif
+
AMD_DAL = $(addsuffix /Makefile, $(addprefix $(FULL_AMD_DISPLAY_PATH)/,$(DAL_LIBS)))
include $(AMD_DAL)
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
index 94911871eb9b..9a3b7bf8ab0b 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
@@ -31,6 +31,10 @@ ifneq ($(CONFIG_DRM_AMD_DC),)
AMDGPUDM += amdgpu_dm_services.o amdgpu_dm_helpers.o amdgpu_dm_pp_smu.o
endif
+ifdef CONFIG_DRM_AMD_DC_HDCP
+AMDGPUDM += amdgpu_dm_hdcp.o
+endif
+
ifneq ($(CONFIG_DEBUG_FS),)
AMDGPUDM += amdgpu_dm_crc.o amdgpu_dm_debugfs.o
endif
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 4139f129eafb..7aac9568d3be 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -37,6 +37,9 @@
#include "amdgpu_ucode.h"
#include "atom.h"
#include "amdgpu_dm.h"
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+#include "amdgpu_dm_hdcp.h"
+#endif
#include "amdgpu_pm.h"
#include "amd_shared.h"
@@ -67,6 +70,7 @@
#include <drm/drm_edid.h>
#include <drm/drm_vblank.h>
#include <drm/drm_audio_component.h>
+#include <drm/drm_hdcp.h>
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
@@ -143,6 +147,12 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
static void handle_cursor_update(struct drm_plane *plane,
struct drm_plane_state *old_plane_state);
+static void amdgpu_dm_set_psr_caps(struct dc_link *link);
+static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
+static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
+static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
+
+
/*
* dm_vblank_get_counter
*
@@ -263,6 +273,13 @@ static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
}
+/**
+ * dm_pflip_high_irq() - Handle pageflip interrupt
+ * @interrupt_params: ignored
+ *
+ * Handles the pageflip interrupt by notifying all interested parties
+ * that the pageflip has been completed.
+ */
static void dm_pflip_high_irq(void *interrupt_params)
{
struct amdgpu_crtc *amdgpu_crtc;
@@ -407,6 +424,13 @@ static void dm_vupdate_high_irq(void *interrupt_params)
}
}
+/**
+ * dm_crtc_high_irq() - Handles CRTC interrupt
+ * @interrupt_params: ignored
+ *
+ * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
+ * event handler.
+ */
static void dm_crtc_high_irq(void *interrupt_params)
{
struct common_irq_params *irq_params = interrupt_params;
@@ -646,11 +670,18 @@ void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
static int amdgpu_dm_init(struct amdgpu_device *adev)
{
struct dc_init_data init_data;
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+ struct dc_callback_init init_params;
+#endif
+
adev->dm.ddev = adev->ddev;
adev->dm.adev = adev;
/* Zero all the fields */
memset(&init_data, 0, sizeof(init_data));
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+ memset(&init_params, 0, sizeof(init_params));
+#endif
mutex_init(&adev->dm.dc_lock);
mutex_init(&adev->dm.audio_lock);
@@ -697,6 +728,9 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
init_data.flags.multi_mon_pp_mclk_switch = true;
+ if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
+ init_data.flags.disable_fractional_pwm = true;
+
init_data.flags.power_down_display_on_boot = true;
#ifdef CONFIG_DRM_AMD_DC_DCN2_0
@@ -713,6 +747,8 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
goto error;
}
+ dc_hardware_init(adev->dm.dc);
+
adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
if (!adev->dm.freesync_module) {
DRM_ERROR(
@@ -723,6 +759,18 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
amdgpu_dm_init_color_mod();
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+ if (adev->asic_type >= CHIP_RAVEN) {
+ adev->dm.hdcp_workqueue = hdcp_create_workqueue(&adev->psp, &init_params.cp_psp, adev->dm.dc);
+
+ if (!adev->dm.hdcp_workqueue)
+ DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
+ else
+ DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
+
+ dc_init_callbacks(adev->dm.dc, &init_params);
+ }
+#endif
if (amdgpu_dm_initialize_drm_device(adev)) {
DRM_ERROR(
"amdgpu: failed to initialize sw for display support.\n");
@@ -764,6 +812,16 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)
amdgpu_dm_destroy_drm_device(&adev->dm);
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+ if (adev->dm.hdcp_workqueue) {
+ hdcp_destroy(adev->dm.hdcp_workqueue);
+ adev->dm.hdcp_workqueue = NULL;
+ }
+
+ if (adev->dm.dc)
+ dc_deinit_callbacks(adev->dm.dc);
+#endif
+
/* DC Destroy TODO: Replace destroy DAL */
if (adev->dm.dc)
dc_destroy(&adev->dm.dc);
@@ -897,27 +955,29 @@ static int detect_mst_link_for_all_connectors(struct drm_device *dev)
{
struct amdgpu_dm_connector *aconnector;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
int ret = 0;
- drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
-
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
aconnector = to_amdgpu_dm_connector(connector);
if (aconnector->dc_link->type == dc_connection_mst_branch &&
aconnector->mst_mgr.aux) {
DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
- aconnector, aconnector->base.base.id);
+ aconnector,
+ aconnector->base.base.id);
ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
if (ret < 0) {
DRM_ERROR("DM_MST: Failed to start MST\n");
- ((struct dc_link *)aconnector->dc_link)->type = dc_connection_single;
- return ret;
- }
+ aconnector->dc_link->type =
+ dc_connection_single;
+ break;
}
+ }
}
+ drm_connector_list_iter_end(&iter);
- drm_modeset_unlock(&dev->mode_config.connection_mutex);
return ret;
}
@@ -940,6 +1000,11 @@ static int dm_late_init(void *handle)
params.backlight_lut_array_size = 16;
params.backlight_lut_array = linear_lut;
+ /* Min backlight level after ABM reduction, Don't allow below 1%
+ * 0xFFFF x 0.01 = 0x28F
+ */
+ params.min_abm_backlight = 0x28F;
+
/* todo will enable for navi10 */
if (adev->asic_type <= CHIP_RAVEN) {
ret = dmcu_load_iram(dmcu, params);
@@ -955,14 +1020,13 @@ static void s3_handle_mst(struct drm_device *dev, bool suspend)
{
struct amdgpu_dm_connector *aconnector;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
struct drm_dp_mst_topology_mgr *mgr;
int ret;
bool need_hotplug = false;
- drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
-
- list_for_each_entry(connector, &dev->mode_config.connector_list,
- head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
aconnector = to_amdgpu_dm_connector(connector);
if (aconnector->dc_link->type != dc_connection_mst_branch ||
aconnector->mst_port)
@@ -973,15 +1037,14 @@ static void s3_handle_mst(struct drm_device *dev, bool suspend)
if (suspend) {
drm_dp_mst_topology_mgr_suspend(mgr);
} else {
- ret = drm_dp_mst_topology_mgr_resume(mgr);
+ ret = drm_dp_mst_topology_mgr_resume(mgr, true);
if (ret < 0) {
drm_dp_mst_topology_mgr_set_mst(mgr, false);
need_hotplug = true;
}
}
}
-
- drm_modeset_unlock(&dev->mode_config.connection_mutex);
+ drm_connector_list_iter_end(&iter);
if (need_hotplug)
drm_kms_helper_hotplug_event(dev);
@@ -989,7 +1052,7 @@ static void s3_handle_mst(struct drm_device *dev, bool suspend)
/**
* dm_hw_init() - Initialize DC device
- * @handle: The base driver device containing the amdpgu_dm device.
+ * @handle: The base driver device containing the amdgpu_dm device.
*
* Initialize the &struct amdgpu_display_manager device. This involves calling
* the initializers of each DM component, then populating the struct with them.
@@ -1019,7 +1082,7 @@ static int dm_hw_init(void *handle)
/**
* dm_hw_fini() - Teardown DC device
- * @handle: The base driver device containing the amdpgu_dm device.
+ * @handle: The base driver device containing the amdgpu_dm device.
*
* Teardown components within &struct amdgpu_display_manager that require
* cleanup. This involves cleaning up the DRM device, DC, and any modules that
@@ -1163,6 +1226,7 @@ static int dm_resume(void *handle)
struct amdgpu_display_manager *dm = &adev->dm;
struct amdgpu_dm_connector *aconnector;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
struct drm_crtc *crtc;
struct drm_crtc_state *new_crtc_state;
struct dm_crtc_state *dm_new_crtc_state;
@@ -1185,17 +1249,18 @@ static int dm_resume(void *handle)
/* program HPD filter */
dc_resume(dm->dc);
- /* On resume we need to rewrite the MSTM control bits to enamble MST*/
- s3_handle_mst(ddev, false);
-
/*
* early enable HPD Rx IRQ, should be done before set mode as short
* pulse interrupts are used for MST
*/
amdgpu_dm_irq_resume_early(adev);
+ /* On resume we need to rewrite the MSTM control bits to enable MST*/
+ s3_handle_mst(ddev, false);
+
/* Do detection*/
- list_for_each_entry(connector, &ddev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(ddev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
aconnector = to_amdgpu_dm_connector(connector);
/*
@@ -1223,6 +1288,7 @@ static int dm_resume(void *handle)
amdgpu_dm_update_connector_after_detect(aconnector);
mutex_unlock(&aconnector->hpd_lock);
}
+ drm_connector_list_iter_end(&iter);
/* Force mode set in atomic commit */
for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
@@ -1438,6 +1504,11 @@ amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector)
dc_sink_release(aconnector->dc_sink);
aconnector->dc_sink = NULL;
aconnector->edid = NULL;
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+ /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
+ if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
+ connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+#endif
}
mutex_unlock(&dev->mode_config.mutex);
@@ -1452,6 +1523,9 @@ static void handle_hpd_irq(void *param)
struct drm_connector *connector = &aconnector->base;
struct drm_device *dev = connector->dev;
enum dc_connection_type new_connection_type = dc_connection_none;
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+ struct amdgpu_device *adev = dev->dev_private;
+#endif
/*
* In case of failure or MST no need to update connector status or notify the OS
@@ -1459,6 +1533,10 @@ static void handle_hpd_irq(void *param)
*/
mutex_lock(&aconnector->hpd_lock);
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+ if (adev->asic_type >= CHIP_RAVEN)
+ hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
+#endif
if (aconnector->fake_enable)
aconnector->fake_enable = false;
@@ -1577,6 +1655,12 @@ static void handle_hpd_rx_irq(void *param)
struct dc_link *dc_link = aconnector->dc_link;
bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
enum dc_connection_type new_connection_type = dc_connection_none;
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+ union hpd_irq_data hpd_irq_data;
+ struct amdgpu_device *adev = dev->dev_private;
+
+ memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
+#endif
/*
* TODO:Temporary add mutex to protect hpd interrupt not have a gpio
@@ -1586,7 +1670,12 @@ static void handle_hpd_rx_irq(void *param)
if (dc_link->type != dc_connection_mst_branch)
mutex_lock(&aconnector->hpd_lock);
+
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+ if (dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL) &&
+#else
if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
+#endif
!is_mst_root_connector) {
/* Downstream Port status changed. */
if (!dc_link_detect_sink(dc_link, &new_connection_type))
@@ -1621,6 +1710,10 @@ static void handle_hpd_rx_irq(void *param)
drm_kms_helper_hotplug_event(dev);
}
}
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+ if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ)
+ hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
+#endif
if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
(dc_link->type == dc_connection_mst_branch))
dm_handle_hpd_rx_irq(aconnector);
@@ -2334,6 +2427,8 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
} else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
amdgpu_dm_update_connector_after_detect(aconnector);
register_backlight_device(dm, link);
+ if (amdgpu_dc_feature_mask & DC_PSR_MASK)
+ amdgpu_dm_set_psr_caps(link);
}
@@ -3311,8 +3406,12 @@ static void fill_stream_properties_from_drm_display_mode(
{
struct dc_crtc_timing *timing_out = &stream->timing;
const struct drm_display_info *info = &connector->display_info;
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+ struct hdmi_vendor_infoframe hv_frame;
+ struct hdmi_avi_infoframe avi_frame;
- memset(timing_out, 0, sizeof(struct dc_crtc_timing));
+ memset(&hv_frame, 0, sizeof(hv_frame));
+ memset(&avi_frame, 0, sizeof(avi_frame));
timing_out->h_border_left = 0;
timing_out->h_border_right = 0;
@@ -3322,6 +3421,9 @@ static void fill_stream_properties_from_drm_display_mode(
if (drm_mode_is_420_only(info, mode_in)
&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
+ else if (drm_mode_is_420_also(info, mode_in)
+ && aconnector->force_yuv420_output)
+ timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
@@ -3346,6 +3448,13 @@ static void fill_stream_properties_from_drm_display_mode(
timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
}
+ if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
+ drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
+ timing_out->vic = avi_frame.video_code;
+ drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
+ timing_out->hdmi_vic = hv_frame.vic;
+ }
+
timing_out->h_addressable = mode_in->crtc_hdisplay;
timing_out->h_total = mode_in->crtc_htotal;
timing_out->h_sync_width =
@@ -3566,6 +3675,9 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
stream->dm_stream_context = aconnector;
+ stream->timing.flags.LTE_340MCSC_SCRAMBLE =
+ drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
+
list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
/* Search for preferred mode */
if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
@@ -3621,8 +3733,9 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
dc_link_get_link_cap(aconnector->dc_link));
if (dsc_caps.is_dsc_supported)
- if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc,
+ if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
&dsc_caps,
+ aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
link_bandwidth_kbps,
&stream->timing,
&stream->timing.dsc_cfg))
@@ -3639,6 +3752,18 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
update_stream_signal(stream, sink);
+ if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
+ mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket, false, false);
+ if (stream->link->psr_feature_enabled) {
+ struct dc *core_dc = stream->link->ctx->dc;
+
+ if (dc_is_dmcu_initialized(core_dc)) {
+ struct dmcu *dmcu = core_dc->res_pool->dmcu;
+
+ stream->psr_version = dmcu->dmcu_version.psr_version;
+ mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
+ }
+ }
finish:
dc_sink_release(sink);
@@ -4114,8 +4239,8 @@ enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connec
result = MODE_OK;
else
DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d\n",
- mode->vdisplay,
mode->hdisplay,
+ mode->vdisplay,
mode->clock,
dc_result);
@@ -4494,7 +4619,7 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
tv.num_shared = 1;
list_add(&tv.head, &list);
- r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL, true);
+ r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
if (r) {
dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
return r;
@@ -4837,7 +4962,13 @@ static int to_drm_connector_type(enum signal_type st)
static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
{
- return drm_encoder_find(connector->dev, NULL, connector->encoder_ids[0]);
+ struct drm_encoder *encoder;
+
+ /* There is only one encoder per connector */
+ drm_connector_for_each_possible_encoder(connector, encoder)
+ return encoder;
+
+ return NULL;
}
static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
@@ -5082,6 +5213,10 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
drm_connector_attach_vrr_capable_property(
&aconnector->base);
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+ if (adev->asic_type >= CHIP_RAVEN)
+ drm_connector_attach_content_protection_property(&aconnector->base, false);
+#endif
}
}
@@ -5324,6 +5459,53 @@ is_scaling_state_different(const struct dm_connector_state *dm_state,
return false;
}
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+static bool is_content_protection_different(struct drm_connector_state *state,
+ const struct drm_connector_state *old_state,
+ const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
+{
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+
+ /* CP is being re enabled, ignore this */
+ if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
+ state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
+ state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
+ return false;
+ }
+
+ /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED */
+ if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
+ state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
+ state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+
+ /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
+ * hot-plug, headless s3, dpms
+ */
+ if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && connector->dpms == DRM_MODE_DPMS_ON &&
+ aconnector->dc_sink != NULL)
+ return true;
+
+ if (old_state->content_protection == state->content_protection)
+ return false;
+
+ if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
+ return true;
+
+ return false;
+}
+
+static void update_content_protection(struct drm_connector_state *state, const struct drm_connector *connector,
+ struct hdcp_workqueue *hdcp_w)
+{
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+
+ if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED)
+ hdcp_add_display(hdcp_w, aconnector->dc_link->link_index, aconnector);
+ else if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
+ hdcp_remove_display(hdcp_w, aconnector->dc_link->link_index, aconnector->base.index);
+
+}
+#endif
static void remove_stream(struct amdgpu_device *adev,
struct amdgpu_crtc *acrtc,
struct dc_stream_state *stream)
@@ -5665,6 +5847,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
uint32_t target_vblank, last_flip_vblank;
bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
bool pflip_present = false;
+ bool swizzle = true;
struct {
struct dc_surface_update surface_updates[MAX_SURFACES];
struct dc_plane_info plane_infos[MAX_SURFACES];
@@ -5710,6 +5893,9 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
dc_plane = dm_new_plane_state->dc_state;
+ if (dc_plane && !dc_plane->tiling_info.gfx9.swizzle)
+ swizzle = false;
+
bundle->surface_updates[planes_count].surface = dc_plane;
if (new_pcrtc_state->color_mgmt_changed) {
bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
@@ -5864,6 +6050,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
/* Update the planes if changed or disable if we don't have any. */
if ((planes_count || acrtc_state->active_planes == 0) &&
acrtc_state->stream) {
+ bundle->stream_update.stream = acrtc_state->stream;
if (new_pcrtc_state->mode_changed) {
bundle->stream_update.src = acrtc_state->stream->src;
bundle->stream_update.dst = acrtc_state->stream->dst;
@@ -5899,14 +6086,29 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
&acrtc_state->vrr_params.adjust);
spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
}
-
mutex_lock(&dm->dc_lock);
+ if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
+ acrtc_state->stream->link->psr_allow_active)
+ amdgpu_dm_psr_disable(acrtc_state->stream);
+
dc_commit_updates_for_stream(dm->dc,
bundle->surface_updates,
planes_count,
acrtc_state->stream,
&bundle->stream_update,
dc_state);
+
+ if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
+ acrtc_state->stream->psr_version &&
+ !acrtc_state->stream->link->psr_feature_enabled)
+ amdgpu_dm_link_setup_psr(acrtc_state->stream);
+ else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
+ acrtc_state->stream->link->psr_feature_enabled &&
+ !acrtc_state->stream->link->psr_allow_active &&
+ swizzle) {
+ amdgpu_dm_psr_enable(acrtc_state->stream);
+ }
+
mutex_unlock(&dm->dc_lock);
}
@@ -6215,10 +6417,13 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
crtc->hwmode = new_crtc_state->mode;
} else if (modereset_required(new_crtc_state)) {
DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
-
/* i.e. reset mode */
- if (dm_old_crtc_state->stream)
+ if (dm_old_crtc_state->stream) {
+ if (dm_old_crtc_state->stream->link->psr_allow_active)
+ amdgpu_dm_psr_disable(dm_old_crtc_state->stream);
+
remove_stream(adev, acrtc, dm_old_crtc_state->stream);
+ }
}
} /* for_each_crtc_in_state() */
@@ -6248,6 +6453,30 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
acrtc->otg_inst = status->primary_otg_inst;
}
}
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+ for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
+ struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+
+ new_crtc_state = NULL;
+
+ if (acrtc)
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
+
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+
+ if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
+ connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
+ hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
+ new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+ continue;
+ }
+
+ if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
+ update_content_protection(new_con_state, connector, adev->dm.hdcp_workqueue);
+ }
+#endif
/* Handle connector state changes */
for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
@@ -6287,9 +6516,10 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
if (!scaling_changed && !abm_changed && !hdr_changed)
continue;
+ stream_update.stream = dm_new_crtc_state->stream;
if (scaling_changed) {
update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
- dm_new_con_state, (struct dc_stream_state *)dm_new_crtc_state->stream);
+ dm_new_con_state, dm_new_crtc_state->stream);
stream_update.src = dm_new_crtc_state->stream->src;
stream_update.dst = dm_new_crtc_state->stream->dst;
@@ -7158,7 +7388,7 @@ dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
status = dc_stream_get_status_from_state(old_dm_state->context,
new_dm_crtc_state->stream);
-
+ stream_update.stream = new_dm_crtc_state->stream;
/*
* TODO: DC modifies the surface during this call so we need
* to lock here - find a way to do this without locking.
@@ -7569,3 +7799,92 @@ update:
freesync_capable);
}
+static void amdgpu_dm_set_psr_caps(struct dc_link *link)
+{
+ uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
+
+ if (!(link->connector_signal & SIGNAL_TYPE_EDP))
+ return;
+ if (link->type == dc_connection_none)
+ return;
+ if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
+ dpcd_data, sizeof(dpcd_data))) {
+ link->psr_feature_enabled = dpcd_data[0] ? true:false;
+ DRM_INFO("PSR support:%d\n", link->psr_feature_enabled);
+ }
+}
+
+/*
+ * amdgpu_dm_link_setup_psr() - configure psr link
+ * @stream: stream state
+ *
+ * Return: true if success
+ */
+static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
+{
+ struct dc_link *link = NULL;
+ struct psr_config psr_config = {0};
+ struct psr_context psr_context = {0};
+ struct dc *dc = NULL;
+ bool ret = false;
+
+ if (stream == NULL)
+ return false;
+
+ link = stream->link;
+ dc = link->ctx->dc;
+
+ psr_config.psr_version = dc->res_pool->dmcu->dmcu_version.psr_version;
+
+ if (psr_config.psr_version > 0) {
+ psr_config.psr_exit_link_training_required = 0x1;
+ psr_config.psr_frame_capture_indication_req = 0;
+ psr_config.psr_rfb_setup_time = 0x37;
+ psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
+ psr_config.allow_smu_optimizations = 0x0;
+
+ ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
+
+ }
+ DRM_DEBUG_DRIVER("PSR link: %d\n", link->psr_feature_enabled);
+
+ return ret;
+}
+
+/*
+ * amdgpu_dm_psr_enable() - enable psr f/w
+ * @stream: stream state
+ *
+ * Return: true if success
+ */
+bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
+{
+ struct dc_link *link = stream->link;
+ struct dc_static_screen_events triggers = {0};
+
+ DRM_DEBUG_DRIVER("Enabling psr...\n");
+
+ triggers.cursor_update = true;
+ triggers.overlay_update = true;
+ triggers.surface_update = true;
+
+ dc_stream_set_static_screen_events(link->ctx->dc,
+ &stream, 1,
+ &triggers);
+
+ return dc_link_set_psr_allow_active(link, true, false);
+}
+
+/*
+ * amdgpu_dm_psr_disable() - disable psr f/w
+ * @stream: stream state
+ *
+ * Return: true if success
+ */
+static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
+{
+
+ DRM_DEBUG_DRIVER("Disabling psr...\n");
+
+ return dc_link_set_psr_allow_active(stream->link, false, true);
+}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index c8c525a2b505..77c5166e6b08 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -108,6 +108,12 @@ struct amdgpu_dm_backlight_caps {
* @display_indexes_num: Max number of display streams supported
* @irq_handler_list_table_lock: Synchronizes access to IRQ tables
* @backlight_dev: Backlight control device
+ * @backlight_link: Link on which to control backlight
+ * @backlight_caps: Capabilities of the backlight device
+ * @freesync_module: Module handling freesync calculations
+ * @fw_dmcu: Reference to DMCU firmware
+ * @dmcu_fw_version: Version of the DMCU firmware
+ * @soc_bounding_box: SOC bounding box values provided by gpu_info FW
* @cached_state: Caches device atomic state for suspend/resume
* @compressor: Frame buffer compression buffer. See &struct dm_comressor_info
*/
@@ -128,7 +134,7 @@ struct amdgpu_display_manager {
u16 display_indexes_num;
/**
- * @atomic_obj
+ * @atomic_obj:
*
* In combination with &dm_atomic_state it helps manage
* global atomic state that doesn't map cleanly into existing
@@ -225,6 +231,9 @@ struct amdgpu_display_manager {
struct amdgpu_dm_backlight_caps backlight_caps;
struct mod_freesync *freesync_module;
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+ struct hdcp_workqueue *hdcp_workqueue;
+#endif
struct drm_atomic_state *cached_state;
@@ -234,6 +243,8 @@ struct amdgpu_display_manager {
uint32_t dmcu_fw_version;
#ifdef CONFIG_DRM_AMD_DC_DCN2_0
/**
+ * @soc_bounding_box:
+ *
* gpu_info FW provided soc bounding box struct or 0 if not
* available in FW
*/
@@ -287,6 +298,7 @@ struct amdgpu_dm_connector {
uint32_t debugfs_dpcd_address;
uint32_t debugfs_dpcd_size;
#endif
+ bool force_yuv420_output;
};
#define to_amdgpu_dm_connector(x) container_of(x, struct amdgpu_dm_connector, base)
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
index b43bb7f90e4e..2233d293a707 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
@@ -210,6 +210,8 @@ static int __set_legacy_tf(struct dc_transfer_func *func,
res = mod_color_calculate_regamma_params(func, gamma, true, has_rom,
NULL);
+ dc_gamma_release(&gamma);
+
return res ? 0 : -ENOMEM;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
index a549c7c717dd..eaad9099bc0b 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
@@ -122,11 +122,16 @@ int amdgpu_dm_crtc_configure_crc_source(struct drm_crtc *crtc,
}
/* Configure dithering */
- if (!dm_need_crc_dither(source))
+ if (!dm_need_crc_dither(source)) {
dc_stream_set_dither_option(stream_state, DITHER_OPTION_TRUN8);
- else
+ dc_stream_set_dyn_expansion(stream_state->ctx->dc, stream_state,
+ DYN_EXPANSION_DISABLE);
+ } else {
dc_stream_set_dither_option(stream_state,
DITHER_OPTION_DEFAULT);
+ dc_stream_set_dyn_expansion(stream_state->ctx->dc, stream_state,
+ DYN_EXPANSION_AUTO);
+ }
unlock:
mutex_unlock(&adev->dm.dc_lock);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
index f3dfb2887ae0..bdb37e611015 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -942,6 +942,52 @@ static const struct {
{"aux_dpcd_data", &dp_dpcd_data_debugfs_fops}
};
+/*
+ * Force YUV420 output if available from the given mode
+ */
+static int force_yuv420_output_set(void *data, u64 val)
+{
+ struct amdgpu_dm_connector *connector = data;
+
+ connector->force_yuv420_output = (bool)val;
+
+ return 0;
+}
+
+/*
+ * Check if YUV420 is forced when available from the given mode
+ */
+static int force_yuv420_output_get(void *data, u64 *val)
+{
+ struct amdgpu_dm_connector *connector = data;
+
+ *val = connector->force_yuv420_output;
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(force_yuv420_output_fops, force_yuv420_output_get,
+ force_yuv420_output_set, "%llu\n");
+
+/*
+ * Read PSR state
+ */
+static int psr_get(void *data, u64 *val)
+{
+ struct amdgpu_dm_connector *connector = data;
+ struct dc_link *link = connector->dc_link;
+ uint32_t psr_state = 0;
+
+ dc_link_get_psr_state(link, &psr_state);
+
+ *val = psr_state;
+
+ return 0;
+}
+
+
+DEFINE_DEBUGFS_ATTRIBUTE(psr_fops, psr_get, NULL, "%llu\n");
+
void connector_debugfs_init(struct amdgpu_dm_connector *connector)
{
int i;
@@ -955,6 +1001,12 @@ void connector_debugfs_init(struct amdgpu_dm_connector *connector)
dp_debugfs_entries[i].fops);
}
}
+ if (connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
+ debugfs_create_file_unsafe("psr_state", 0444, dir, connector, &psr_fops);
+
+ debugfs_create_file_unsafe("force_yuv420_output", 0644, dir, connector,
+ &force_yuv420_output_fops);
+
}
/*
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
new file mode 100644
index 000000000000..77181ddf6c8e
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
@@ -0,0 +1,346 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "amdgpu_dm_hdcp.h"
+#include "amdgpu.h"
+#include "amdgpu_dm.h"
+#include "dm_helpers.h"
+#include <drm/drm_hdcp.h>
+
+static bool
+lp_write_i2c(void *handle, uint32_t address, const uint8_t *data, uint32_t size)
+{
+
+ struct dc_link *link = handle;
+ struct i2c_payload i2c_payloads[] = {{true, address, size, (void *)data} };
+ struct i2c_command cmd = {i2c_payloads, 1, I2C_COMMAND_ENGINE_HW, link->dc->caps.i2c_speed_in_khz};
+
+ return dm_helpers_submit_i2c(link->ctx, link, &cmd);
+}
+
+static bool
+lp_read_i2c(void *handle, uint32_t address, uint8_t offset, uint8_t *data, uint32_t size)
+{
+ struct dc_link *link = handle;
+
+ struct i2c_payload i2c_payloads[] = {{true, address, 1, &offset}, {false, address, size, data} };
+ struct i2c_command cmd = {i2c_payloads, 2, I2C_COMMAND_ENGINE_HW, link->dc->caps.i2c_speed_in_khz};
+
+ return dm_helpers_submit_i2c(link->ctx, link, &cmd);
+}
+
+static bool
+lp_write_dpcd(void *handle, uint32_t address, const uint8_t *data, uint32_t size)
+{
+ struct dc_link *link = handle;
+
+ return dm_helpers_dp_write_dpcd(link->ctx, link, address, data, size);
+}
+
+static bool
+lp_read_dpcd(void *handle, uint32_t address, uint8_t *data, uint32_t size)
+{
+ struct dc_link *link = handle;
+
+ return dm_helpers_dp_read_dpcd(link->ctx, link, address, data, size);
+}
+
+static void process_output(struct hdcp_workqueue *hdcp_work)
+{
+ struct mod_hdcp_output output = hdcp_work->output;
+
+ if (output.callback_stop)
+ cancel_delayed_work(&hdcp_work->callback_dwork);
+
+ if (output.callback_needed)
+ schedule_delayed_work(&hdcp_work->callback_dwork,
+ msecs_to_jiffies(output.callback_delay));
+
+ if (output.watchdog_timer_stop)
+ cancel_delayed_work(&hdcp_work->watchdog_timer_dwork);
+
+ if (output.watchdog_timer_needed)
+ schedule_delayed_work(&hdcp_work->watchdog_timer_dwork,
+ msecs_to_jiffies(output.watchdog_timer_delay));
+
+}
+
+void hdcp_add_display(struct hdcp_workqueue *hdcp_work, unsigned int link_index, struct amdgpu_dm_connector *aconnector)
+{
+ struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index];
+ struct mod_hdcp_display *display = &hdcp_work[link_index].display;
+ struct mod_hdcp_link *link = &hdcp_work[link_index].link;
+
+ mutex_lock(&hdcp_w->mutex);
+ hdcp_w->aconnector = aconnector;
+
+ mod_hdcp_add_display(&hdcp_w->hdcp, link, display, &hdcp_w->output);
+
+ schedule_delayed_work(&hdcp_w->property_validate_dwork, msecs_to_jiffies(DRM_HDCP_CHECK_PERIOD_MS));
+
+ process_output(hdcp_w);
+
+ mutex_unlock(&hdcp_w->mutex);
+
+}
+
+void hdcp_remove_display(struct hdcp_workqueue *hdcp_work, unsigned int link_index, unsigned int display_index)
+{
+ struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index];
+
+ mutex_lock(&hdcp_w->mutex);
+
+ mod_hdcp_remove_display(&hdcp_w->hdcp, display_index, &hdcp_w->output);
+
+ cancel_delayed_work(&hdcp_w->property_validate_dwork);
+ hdcp_w->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
+
+ process_output(hdcp_w);
+
+ mutex_unlock(&hdcp_w->mutex);
+
+}
+
+void hdcp_reset_display(struct hdcp_workqueue *hdcp_work, unsigned int link_index)
+{
+ struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index];
+
+ mutex_lock(&hdcp_w->mutex);
+
+ mod_hdcp_reset_connection(&hdcp_w->hdcp, &hdcp_w->output);
+
+ cancel_delayed_work(&hdcp_w->property_validate_dwork);
+ hdcp_w->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
+
+ process_output(hdcp_w);
+
+ mutex_unlock(&hdcp_w->mutex);
+}
+
+void hdcp_handle_cpirq(struct hdcp_workqueue *hdcp_work, unsigned int link_index)
+{
+ struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index];
+
+ schedule_work(&hdcp_w->cpirq_work);
+}
+
+
+
+
+static void event_callback(struct work_struct *work)
+{
+ struct hdcp_workqueue *hdcp_work;
+
+ hdcp_work = container_of(to_delayed_work(work), struct hdcp_workqueue,
+ callback_dwork);
+
+ mutex_lock(&hdcp_work->mutex);
+
+ cancel_delayed_work(&hdcp_work->watchdog_timer_dwork);
+
+ mod_hdcp_process_event(&hdcp_work->hdcp, MOD_HDCP_EVENT_CALLBACK,
+ &hdcp_work->output);
+
+ process_output(hdcp_work);
+
+ mutex_unlock(&hdcp_work->mutex);
+
+
+}
+static void event_property_update(struct work_struct *work)
+{
+
+ struct hdcp_workqueue *hdcp_work = container_of(work, struct hdcp_workqueue, property_update_work);
+ struct amdgpu_dm_connector *aconnector = hdcp_work->aconnector;
+ struct drm_device *dev = hdcp_work->aconnector->base.dev;
+ long ret;
+
+ drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+ mutex_lock(&hdcp_work->mutex);
+
+
+ if (aconnector->base.state->commit) {
+ ret = wait_for_completion_interruptible_timeout(&aconnector->base.state->commit->hw_done, 10 * HZ);
+
+ if (ret == 0) {
+ DRM_ERROR("HDCP state unknown! Setting it to DESIRED");
+ hdcp_work->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
+ }
+ }
+
+ if (hdcp_work->encryption_status == MOD_HDCP_ENCRYPTION_STATUS_HDCP1_ON)
+ drm_hdcp_update_content_protection(&aconnector->base, DRM_MODE_CONTENT_PROTECTION_ENABLED);
+ else
+ drm_hdcp_update_content_protection(&aconnector->base, DRM_MODE_CONTENT_PROTECTION_DESIRED);
+
+
+ mutex_unlock(&hdcp_work->mutex);
+ drm_modeset_unlock(&dev->mode_config.connection_mutex);
+}
+
+static void event_property_validate(struct work_struct *work)
+{
+ struct hdcp_workqueue *hdcp_work =
+ container_of(to_delayed_work(work), struct hdcp_workqueue, property_validate_dwork);
+ struct mod_hdcp_display_query query;
+ struct amdgpu_dm_connector *aconnector = hdcp_work->aconnector;
+
+ mutex_lock(&hdcp_work->mutex);
+
+ query.encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
+ mod_hdcp_query_display(&hdcp_work->hdcp, aconnector->base.index, &query);
+
+ if (query.encryption_status != hdcp_work->encryption_status) {
+ hdcp_work->encryption_status = query.encryption_status;
+ schedule_work(&hdcp_work->property_update_work);
+ }
+
+ schedule_delayed_work(&hdcp_work->property_validate_dwork, msecs_to_jiffies(DRM_HDCP_CHECK_PERIOD_MS));
+
+ mutex_unlock(&hdcp_work->mutex);
+}
+
+static void event_watchdog_timer(struct work_struct *work)
+{
+ struct hdcp_workqueue *hdcp_work;
+
+ hdcp_work = container_of(to_delayed_work(work),
+ struct hdcp_workqueue,
+ watchdog_timer_dwork);
+
+ mutex_lock(&hdcp_work->mutex);
+
+ mod_hdcp_process_event(&hdcp_work->hdcp,
+ MOD_HDCP_EVENT_WATCHDOG_TIMEOUT,
+ &hdcp_work->output);
+
+ process_output(hdcp_work);
+
+ mutex_unlock(&hdcp_work->mutex);
+
+}
+
+static void event_cpirq(struct work_struct *work)
+{
+ struct hdcp_workqueue *hdcp_work;
+
+ hdcp_work = container_of(work, struct hdcp_workqueue, cpirq_work);
+
+ mutex_lock(&hdcp_work->mutex);
+
+ mod_hdcp_process_event(&hdcp_work->hdcp, MOD_HDCP_EVENT_CPIRQ, &hdcp_work->output);
+
+ process_output(hdcp_work);
+
+ mutex_unlock(&hdcp_work->mutex);
+
+}
+
+
+void hdcp_destroy(struct hdcp_workqueue *hdcp_work)
+{
+ int i = 0;
+
+ for (i = 0; i < hdcp_work->max_link; i++) {
+ cancel_delayed_work_sync(&hdcp_work[i].callback_dwork);
+ cancel_delayed_work_sync(&hdcp_work[i].watchdog_timer_dwork);
+ }
+
+ kfree(hdcp_work);
+
+}
+
+static void update_config(void *handle, struct cp_psp_stream_config *config)
+{
+ struct hdcp_workqueue *hdcp_work = handle;
+ struct amdgpu_dm_connector *aconnector = config->dm_stream_ctx;
+ int link_index = aconnector->dc_link->link_index;
+ struct mod_hdcp_display *display = &hdcp_work[link_index].display;
+ struct mod_hdcp_link *link = &hdcp_work[link_index].link;
+
+ memset(display, 0, sizeof(*display));
+ memset(link, 0, sizeof(*link));
+
+ display->index = aconnector->base.index;
+ display->state = MOD_HDCP_DISPLAY_ACTIVE;
+
+ if (aconnector->dc_sink != NULL)
+ link->mode = mod_hdcp_signal_type_to_operation_mode(aconnector->dc_sink->sink_signal);
+
+ display->controller = CONTROLLER_ID_D0 + config->otg_inst;
+ display->dig_fe = config->stream_enc_inst;
+ link->dig_be = config->link_enc_inst;
+ link->ddc_line = aconnector->dc_link->ddc_hw_inst + 1;
+ link->dp.rev = aconnector->dc_link->dpcd_caps.dpcd_rev.raw;
+ link->adjust.hdcp2.disable = 1;
+
+}
+
+struct hdcp_workqueue *hdcp_create_workqueue(void *psp_context, struct cp_psp *cp_psp, struct dc *dc)
+{
+
+ int max_caps = dc->caps.max_links;
+ struct hdcp_workqueue *hdcp_work = kzalloc(max_caps*sizeof(*hdcp_work), GFP_KERNEL);
+ int i = 0;
+
+ if (hdcp_work == NULL)
+ goto fail_alloc_context;
+
+ hdcp_work->max_link = max_caps;
+
+ for (i = 0; i < max_caps; i++) {
+
+ mutex_init(&hdcp_work[i].mutex);
+
+ INIT_WORK(&hdcp_work[i].cpirq_work, event_cpirq);
+ INIT_WORK(&hdcp_work[i].property_update_work, event_property_update);
+ INIT_DELAYED_WORK(&hdcp_work[i].callback_dwork, event_callback);
+ INIT_DELAYED_WORK(&hdcp_work[i].watchdog_timer_dwork, event_watchdog_timer);
+ INIT_DELAYED_WORK(&hdcp_work[i].property_validate_dwork, event_property_validate);
+
+ hdcp_work[i].hdcp.config.psp.handle = psp_context;
+ hdcp_work[i].hdcp.config.ddc.handle = dc_get_link_at_index(dc, i);
+ hdcp_work[i].hdcp.config.ddc.funcs.write_i2c = lp_write_i2c;
+ hdcp_work[i].hdcp.config.ddc.funcs.read_i2c = lp_read_i2c;
+ hdcp_work[i].hdcp.config.ddc.funcs.write_dpcd = lp_write_dpcd;
+ hdcp_work[i].hdcp.config.ddc.funcs.read_dpcd = lp_read_dpcd;
+ }
+
+ cp_psp->funcs.update_stream_config = update_config;
+ cp_psp->handle = hdcp_work;
+
+ return hdcp_work;
+
+fail_alloc_context:
+ kfree(hdcp_work);
+
+ return NULL;
+
+
+
+}
+
+
+
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h
new file mode 100644
index 000000000000..d3ba505d0696
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef AMDGPU_DM_AMDGPU_DM_HDCP_H_
+#define AMDGPU_DM_AMDGPU_DM_HDCP_H_
+
+#include "mod_hdcp.h"
+#include "hdcp.h"
+#include "dc.h"
+#include "dm_cp_psp.h"
+
+struct mod_hdcp;
+struct mod_hdcp_link;
+struct mod_hdcp_display;
+struct cp_psp;
+
+struct hdcp_workqueue {
+ struct work_struct cpirq_work;
+ struct work_struct property_update_work;
+ struct delayed_work callback_dwork;
+ struct delayed_work watchdog_timer_dwork;
+ struct delayed_work property_validate_dwork;
+ struct amdgpu_dm_connector *aconnector;
+ struct mutex mutex;
+
+ struct mod_hdcp hdcp;
+ struct mod_hdcp_output output;
+ struct mod_hdcp_display display;
+ struct mod_hdcp_link link;
+
+ enum mod_hdcp_encryption_status encryption_status;
+ uint8_t max_link;
+};
+
+void hdcp_add_display(struct hdcp_workqueue *hdcp_work, unsigned int link_index,
+ struct amdgpu_dm_connector *aconnector);
+void hdcp_remove_display(struct hdcp_workqueue *work, unsigned int link_index, unsigned int display_index);
+void hdcp_reset_display(struct hdcp_workqueue *work, unsigned int link_index);
+void hdcp_handle_cpirq(struct hdcp_workqueue *work, unsigned int link_index);
+void hdcp_destroy(struct hdcp_workqueue *work);
+
+struct hdcp_workqueue *hdcp_create_workqueue(void *psp_context, struct cp_psp *cp_psp, struct dc *dc);
+
+#endif /* AMDGPU_DM_AMDGPU_DM_HDCP_H_ */
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index ee1dc75f5ddc..11e5784aa62a 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -97,11 +97,10 @@ enum dc_edid_status dm_helpers_parse_edid_caps(
(struct edid *) edid->raw_edid);
sad_count = drm_edid_to_sad((struct edid *) edid->raw_edid, &sads);
- if (sad_count <= 0) {
- DRM_INFO("SADs count is: %d, don't need to read it\n",
- sad_count);
+ if (sad_count < 0)
+ DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
+ if (sad_count <= 0)
return result;
- }
edid_caps->audio_mode_count = sad_count < DC_MAX_AUDIO_DESC_COUNT ? sad_count : DC_MAX_AUDIO_DESC_COUNT;
for (i = 0; i < edid_caps->audio_mode_count; ++i) {
@@ -282,7 +281,7 @@ void dm_helpers_dp_mst_clear_payload_allocation_table(
* Polls for ACT (allocation change trigger) handled and sends
* ALLOCATE_PAYLOAD message.
*/
-bool dm_helpers_dp_mst_poll_for_allocation_change_trigger(
+enum act_return_status dm_helpers_dp_mst_poll_for_allocation_change_trigger(
struct dc_context *ctx,
const struct dc_stream_state *stream)
{
@@ -293,19 +292,19 @@ bool dm_helpers_dp_mst_poll_for_allocation_change_trigger(
aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
if (!aconnector || !aconnector->mst_port)
- return false;
+ return ACT_FAILED;
mst_mgr = &aconnector->mst_port->mst_mgr;
if (!mst_mgr->mst_state)
- return false;
+ return ACT_FAILED;
ret = drm_dp_check_act_status(mst_mgr);
if (ret)
- return false;
+ return ACT_FAILED;
- return true;
+ return ACT_SUCCESS;
}
bool dm_helpers_dp_mst_send_payload_allocation(
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
index fa5d503d379c..64445c4cc4c2 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
@@ -732,8 +732,10 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev)
{
struct drm_device *dev = adev->ddev;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
struct amdgpu_dm_connector *amdgpu_dm_connector =
to_amdgpu_dm_connector(connector);
@@ -751,6 +753,7 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev)
true);
}
}
+ drm_connector_list_iter_end(&iter);
}
/**
@@ -765,8 +768,10 @@ void amdgpu_dm_hpd_fini(struct amdgpu_device *adev)
{
struct drm_device *dev = adev->ddev;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
struct amdgpu_dm_connector *amdgpu_dm_connector =
to_amdgpu_dm_connector(connector);
const struct dc_link *dc_link = amdgpu_dm_connector->dc_link;
@@ -779,4 +784,5 @@ void amdgpu_dm_hpd_fini(struct amdgpu_device *adev)
false);
}
}
+ drm_connector_list_iter_end(&iter);
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 16218a202b59..2bf8534c18fb 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -36,7 +36,9 @@
#include "dc_link_ddc.h"
#include "i2caux_interface.h"
-
+#if defined(CONFIG_DEBUG_FS)
+#include "amdgpu_dm_debugfs.h"
+#endif
/* #define TRACE_DPCD */
#ifdef TRACE_DPCD
@@ -113,6 +115,7 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
result = -EIO;
break;
case AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY:
+ case AUX_CHANNEL_OPERATION_FAILED_ENGINE_ACQUIRE:
result = -EBUSY;
break;
case AUX_CHANNEL_OPERATION_FAILED_TIMEOUT:
@@ -123,31 +126,14 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
return result;
}
-static enum drm_connector_status
-dm_dp_mst_detect(struct drm_connector *connector, bool force)
-{
- struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
- struct amdgpu_dm_connector *master = aconnector->mst_port;
-
- enum drm_connector_status status =
- drm_dp_mst_detect_port(
- connector,
- &master->mst_mgr,
- aconnector->port);
-
- return status;
-}
-
static void
dm_dp_mst_connector_destroy(struct drm_connector *connector)
{
struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
struct amdgpu_encoder *amdgpu_encoder = amdgpu_dm_connector->mst_encoder;
- if (amdgpu_dm_connector->edid) {
- kfree(amdgpu_dm_connector->edid);
- amdgpu_dm_connector->edid = NULL;
- }
+ kfree(amdgpu_dm_connector->edid);
+ amdgpu_dm_connector->edid = NULL;
drm_encoder_cleanup(&amdgpu_encoder->base);
kfree(amdgpu_encoder);
@@ -163,6 +149,12 @@ amdgpu_dm_mst_connector_late_register(struct drm_connector *connector)
to_amdgpu_dm_connector(connector);
struct drm_dp_mst_port *port = amdgpu_dm_connector->port;
+#if defined(CONFIG_DEBUG_FS)
+ connector_debugfs_init(amdgpu_dm_connector);
+ amdgpu_dm_connector->debugfs_dpcd_address = 0;
+ amdgpu_dm_connector->debugfs_dpcd_size = 0;
+#endif
+
return drm_dp_mst_connector_late_register(connector, port);
}
@@ -177,7 +169,6 @@ amdgpu_dm_mst_connector_early_unregister(struct drm_connector *connector)
}
static const struct drm_connector_funcs dm_dp_mst_connector_funcs = {
- .detect = dm_dp_mst_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = dm_dp_mst_connector_destroy,
.reset = amdgpu_dm_connector_funcs_reset,
@@ -245,17 +236,29 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
return ret;
}
-static struct drm_encoder *dm_mst_best_encoder(struct drm_connector *connector)
+static struct drm_encoder *
+dm_mst_atomic_best_encoder(struct drm_connector *connector,
+ struct drm_connector_state *connector_state)
{
- struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
+ return &to_amdgpu_dm_connector(connector)->mst_encoder->base;
+}
+
+static int
+dm_dp_mst_detect(struct drm_connector *connector,
+ struct drm_modeset_acquire_ctx *ctx, bool force)
+{
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+ struct amdgpu_dm_connector *master = aconnector->mst_port;
- return &amdgpu_dm_connector->mst_encoder->base;
+ return drm_dp_mst_detect_port(connector, ctx, &master->mst_mgr,
+ aconnector->port);
}
static const struct drm_connector_helper_funcs dm_dp_mst_connector_helper_funcs = {
.get_modes = dm_dp_mst_get_modes,
.mode_valid = amdgpu_dm_connector_mode_valid,
- .best_encoder = dm_mst_best_encoder,
+ .atomic_best_encoder = dm_mst_atomic_best_encoder,
+ .detect_ctx = dm_dp_mst_detect,
};
static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
@@ -416,7 +419,11 @@ void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
drm_dp_aux_register(&aconnector->dm_dp_aux.aux);
drm_dp_cec_register_connector(&aconnector->dm_dp_aux.aux,
- aconnector->base.name, dm->adev->dev);
+ &aconnector->base);
+
+ if (aconnector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
+ return;
+
aconnector->mst_mgr.cbs = &dm_mst_cbs;
drm_dp_mst_topology_mgr_init(
&aconnector->mst_mgr,
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
index f4cfa0caeba8..55a520a63712 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
@@ -345,7 +345,7 @@ bool dm_pp_get_clock_levels_by_type(
/* Error in pplib. Provide default values. */
return true;
}
- } else if (adev->smu.funcs && adev->smu.funcs->get_clock_by_type) {
+ } else if (adev->smu.ppt_funcs && adev->smu.ppt_funcs->get_clock_by_type) {
if (smu_get_clock_by_type(&adev->smu,
dc_to_pp_clock_type(clk_type),
&pp_clks)) {
@@ -365,7 +365,7 @@ bool dm_pp_get_clock_levels_by_type(
validation_clks.memory_max_clock = 80000;
validation_clks.level = 0;
}
- } else if (adev->smu.funcs && adev->smu.funcs->get_max_high_clocks) {
+ } else if (adev->smu.ppt_funcs && adev->smu.ppt_funcs->get_max_high_clocks) {
if (smu_get_max_high_clocks(&adev->smu, &validation_clks)) {
DRM_INFO("DM_PPLIB: Warning: using default validation clocks!\n");
validation_clks.engine_max_clock = 72000;
@@ -506,8 +506,8 @@ bool dm_pp_apply_clock_for_voltage_request(
ret = adev->powerplay.pp_funcs->display_clock_voltage_request(
adev->powerplay.pp_handle,
&pp_clock_request);
- else if (adev->smu.funcs &&
- adev->smu.funcs->display_clock_voltage_request)
+ else if (adev->smu.ppt_funcs &&
+ adev->smu.ppt_funcs->display_clock_voltage_request)
ret = smu_display_clock_voltage_request(&adev->smu,
&pp_clock_request);
if (ret)
@@ -527,7 +527,7 @@ bool dm_pp_get_static_clocks(
ret = adev->powerplay.pp_funcs->get_current_clocks(
adev->powerplay.pp_handle,
&pp_clk_info);
- else if (adev->smu.funcs)
+ else if (adev->smu.ppt_funcs)
ret = smu_get_current_clocks(&adev->smu, &pp_clk_info);
if (ret)
return false;
@@ -589,10 +589,9 @@ void pp_rv_set_wm_ranges(struct pp_smu *pp,
if (pp_funcs && pp_funcs->set_watermarks_for_clocks_ranges)
pp_funcs->set_watermarks_for_clocks_ranges(pp_handle,
&wm_with_clock_ranges);
- else if (adev->smu.funcs &&
- adev->smu.funcs->set_watermarks_for_clock_ranges)
+ else
smu_set_watermarks_for_clock_ranges(&adev->smu,
- &wm_with_clock_ranges);
+ &wm_with_clock_ranges);
}
void pp_rv_set_pme_wa_enable(struct pp_smu *pp)
@@ -604,7 +603,7 @@ void pp_rv_set_pme_wa_enable(struct pp_smu *pp)
if (pp_funcs && pp_funcs->notify_smu_enable_pwe)
pp_funcs->notify_smu_enable_pwe(pp_handle);
- else if (adev->smu.funcs)
+ else if (adev->smu.ppt_funcs)
smu_notify_smu_enable_pwe(&adev->smu);
}
@@ -665,7 +664,6 @@ enum pp_smu_status pp_nv_set_wm_ranges(struct pp_smu *pp,
{
const struct dc_context *ctx = pp->dm;
struct amdgpu_device *adev = ctx->driver_context;
- struct smu_context *smu = &adev->smu;
struct dm_pp_wm_sets_with_clock_ranges_soc15 wm_with_clock_ranges;
struct dm_pp_clock_range_for_dmif_wm_set_soc15 *wm_dce_clocks =
wm_with_clock_ranges.wm_dmif_clocks_ranges;
@@ -708,15 +706,7 @@ enum pp_smu_status pp_nv_set_wm_ranges(struct pp_smu *pp,
ranges->writer_wm_sets[i].min_drain_clk_mhz * 1000;
}
- if (!smu->funcs)
- return PP_SMU_RESULT_UNSUPPORTED;
-
- /* 0: successful or smu.funcs->set_watermarks_for_clock_ranges = NULL;
- * 1: fail
- */
- if (smu_set_watermarks_for_clock_ranges(&adev->smu,
- &wm_with_clock_ranges))
- return PP_SMU_RESULT_UNSUPPORTED;
+ smu_set_watermarks_for_clock_ranges(&adev->smu, &wm_with_clock_ranges);
return PP_SMU_RESULT_OK;
}
@@ -727,10 +717,10 @@ enum pp_smu_status pp_nv_set_pme_wa_enable(struct pp_smu *pp)
struct amdgpu_device *adev = ctx->driver_context;
struct smu_context *smu = &adev->smu;
- if (!smu->funcs)
+ if (!smu->ppt_funcs)
return PP_SMU_RESULT_UNSUPPORTED;
- /* 0: successful or smu.funcs->set_azalia_d3_pme = NULL; 1: fail */
+ /* 0: successful or smu.ppt_funcs->set_azalia_d3_pme = NULL; 1: fail */
if (smu_set_azalia_d3_pme(smu))
return PP_SMU_RESULT_FAIL;
@@ -743,10 +733,10 @@ enum pp_smu_status pp_nv_set_display_count(struct pp_smu *pp, int count)
struct amdgpu_device *adev = ctx->driver_context;
struct smu_context *smu = &adev->smu;
- if (!smu->funcs)
+ if (!smu->ppt_funcs)
return PP_SMU_RESULT_UNSUPPORTED;
- /* 0: successful or smu.funcs->set_display_count = NULL; 1: fail */
+ /* 0: successful or smu.ppt_funcs->set_display_count = NULL; 1: fail */
if (smu_set_display_count(smu, count))
return PP_SMU_RESULT_FAIL;
@@ -759,10 +749,10 @@ enum pp_smu_status pp_nv_set_min_deep_sleep_dcfclk(struct pp_smu *pp, int mhz)
struct amdgpu_device *adev = ctx->driver_context;
struct smu_context *smu = &adev->smu;
- if (!smu->funcs)
+ if (!smu->ppt_funcs)
return PP_SMU_RESULT_UNSUPPORTED;
- /* 0: successful or smu.funcs->set_deep_sleep_dcefclk = NULL;1: fail */
+ /* 0: successful or smu.ppt_funcs->set_deep_sleep_dcefclk = NULL;1: fail */
if (smu_set_deep_sleep_dcefclk(smu, mhz))
return PP_SMU_RESULT_FAIL;
@@ -777,13 +767,13 @@ enum pp_smu_status pp_nv_set_hard_min_dcefclk_by_freq(
struct smu_context *smu = &adev->smu;
struct pp_display_clock_request clock_req;
- if (!smu->funcs)
+ if (!smu->ppt_funcs)
return PP_SMU_RESULT_UNSUPPORTED;
clock_req.clock_type = amd_pp_dcef_clock;
clock_req.clock_freq_in_khz = mhz * 1000;
- /* 0: successful or smu.funcs->display_clock_voltage_request = NULL
+ /* 0: successful or smu.ppt_funcs->display_clock_voltage_request = NULL
* 1: fail
*/
if (smu_display_clock_voltage_request(smu, &clock_req))
@@ -799,13 +789,13 @@ enum pp_smu_status pp_nv_set_hard_min_uclk_by_freq(struct pp_smu *pp, int mhz)
struct smu_context *smu = &adev->smu;
struct pp_display_clock_request clock_req;
- if (!smu->funcs)
+ if (!smu->ppt_funcs)
return PP_SMU_RESULT_UNSUPPORTED;
clock_req.clock_type = amd_pp_mem_clock;
clock_req.clock_freq_in_khz = mhz * 1000;
- /* 0: successful or smu.funcs->display_clock_voltage_request = NULL
+ /* 0: successful or smu.ppt_funcs->display_clock_voltage_request = NULL
* 1: fail
*/
if (smu_display_clock_voltage_request(smu, &clock_req))
@@ -835,7 +825,7 @@ enum pp_smu_status pp_nv_set_voltage_by_freq(struct pp_smu *pp,
struct smu_context *smu = &adev->smu;
struct pp_display_clock_request clock_req;
- if (!smu->funcs)
+ if (!smu->ppt_funcs)
return PP_SMU_RESULT_UNSUPPORTED;
switch (clock_id) {
@@ -853,7 +843,7 @@ enum pp_smu_status pp_nv_set_voltage_by_freq(struct pp_smu *pp,
}
clock_req.clock_freq_in_khz = mhz * 1000;
- /* 0: successful or smu.funcs->display_clock_voltage_request = NULL
+ /* 0: successful or smu.ppt_funcs->display_clock_voltage_request = NULL
* 1: fail
*/
if (smu_display_clock_voltage_request(smu, &clock_req))
@@ -869,13 +859,13 @@ enum pp_smu_status pp_nv_get_maximum_sustainable_clocks(
struct amdgpu_device *adev = ctx->driver_context;
struct smu_context *smu = &adev->smu;
- if (!smu->funcs)
+ if (!smu->ppt_funcs)
return PP_SMU_RESULT_UNSUPPORTED;
- if (!smu->funcs->get_max_sustainable_clocks_by_dc)
+ if (!smu->ppt_funcs->get_max_sustainable_clocks_by_dc)
return PP_SMU_RESULT_UNSUPPORTED;
- if (!smu->funcs->get_max_sustainable_clocks_by_dc(smu, max_clocks))
+ if (!smu_get_max_sustainable_clocks_by_dc(smu, max_clocks))
return PP_SMU_RESULT_OK;
return PP_SMU_RESULT_FAIL;
@@ -894,13 +884,97 @@ enum pp_smu_status pp_nv_get_uclk_dpm_states(struct pp_smu *pp,
if (!smu->ppt_funcs->get_uclk_dpm_states)
return PP_SMU_RESULT_UNSUPPORTED;
- if (!smu->ppt_funcs->get_uclk_dpm_states(smu,
+ if (!smu_get_uclk_dpm_states(smu,
clock_values_in_khz, num_states))
return PP_SMU_RESULT_OK;
return PP_SMU_RESULT_FAIL;
}
+#ifdef CONFIG_DRM_AMD_DC_DCN2_1
+enum pp_smu_status pp_rn_get_dpm_clock_table(
+ struct pp_smu *pp, struct dpm_clocks *clock_table)
+{
+ const struct dc_context *ctx = pp->dm;
+ struct amdgpu_device *adev = ctx->driver_context;
+ struct smu_context *smu = &adev->smu;
+
+ if (!smu->ppt_funcs)
+ return PP_SMU_RESULT_UNSUPPORTED;
+
+ if (!smu->ppt_funcs->get_dpm_clock_table)
+ return PP_SMU_RESULT_UNSUPPORTED;
+
+ if (!smu_get_dpm_clock_table(smu, clock_table))
+ return PP_SMU_RESULT_OK;
+
+ return PP_SMU_RESULT_FAIL;
+}
+
+enum pp_smu_status pp_rn_set_wm_ranges(struct pp_smu *pp,
+ struct pp_smu_wm_range_sets *ranges)
+{
+ const struct dc_context *ctx = pp->dm;
+ struct amdgpu_device *adev = ctx->driver_context;
+ struct smu_context *smu = &adev->smu;
+ struct dm_pp_wm_sets_with_clock_ranges_soc15 wm_with_clock_ranges;
+ struct dm_pp_clock_range_for_dmif_wm_set_soc15 *wm_dce_clocks =
+ wm_with_clock_ranges.wm_dmif_clocks_ranges;
+ struct dm_pp_clock_range_for_mcif_wm_set_soc15 *wm_soc_clocks =
+ wm_with_clock_ranges.wm_mcif_clocks_ranges;
+ int32_t i;
+
+ if (!smu->ppt_funcs)
+ return PP_SMU_RESULT_UNSUPPORTED;
+
+ wm_with_clock_ranges.num_wm_dmif_sets = ranges->num_reader_wm_sets;
+ wm_with_clock_ranges.num_wm_mcif_sets = ranges->num_writer_wm_sets;
+
+ for (i = 0; i < wm_with_clock_ranges.num_wm_dmif_sets; i++) {
+ if (ranges->reader_wm_sets[i].wm_inst > 3)
+ wm_dce_clocks[i].wm_set_id = WM_SET_A;
+ else
+ wm_dce_clocks[i].wm_set_id =
+ ranges->reader_wm_sets[i].wm_inst;
+
+ wm_dce_clocks[i].wm_min_dcfclk_clk_in_khz =
+ ranges->reader_wm_sets[i].min_drain_clk_mhz;
+
+ wm_dce_clocks[i].wm_max_dcfclk_clk_in_khz =
+ ranges->reader_wm_sets[i].max_drain_clk_mhz;
+
+ wm_dce_clocks[i].wm_min_mem_clk_in_khz =
+ ranges->reader_wm_sets[i].min_fill_clk_mhz;
+
+ wm_dce_clocks[i].wm_max_mem_clk_in_khz =
+ ranges->reader_wm_sets[i].max_fill_clk_mhz;
+ }
+
+ for (i = 0; i < wm_with_clock_ranges.num_wm_mcif_sets; i++) {
+ if (ranges->writer_wm_sets[i].wm_inst > 3)
+ wm_soc_clocks[i].wm_set_id = WM_SET_A;
+ else
+ wm_soc_clocks[i].wm_set_id =
+ ranges->writer_wm_sets[i].wm_inst;
+ wm_soc_clocks[i].wm_min_socclk_clk_in_khz =
+ ranges->writer_wm_sets[i].min_fill_clk_mhz;
+
+ wm_soc_clocks[i].wm_max_socclk_clk_in_khz =
+ ranges->writer_wm_sets[i].max_fill_clk_mhz;
+
+ wm_soc_clocks[i].wm_min_mem_clk_in_khz =
+ ranges->writer_wm_sets[i].min_drain_clk_mhz;
+
+ wm_soc_clocks[i].wm_max_mem_clk_in_khz =
+ ranges->writer_wm_sets[i].max_drain_clk_mhz;
+ }
+
+ smu_set_watermarks_for_clock_ranges(&adev->smu, &wm_with_clock_ranges);
+
+ return PP_SMU_RESULT_OK;
+}
+#endif
+
void dm_pp_get_funcs(
struct dc_context *ctx,
struct pp_smu_funcs *funcs)
@@ -945,6 +1019,15 @@ void dm_pp_get_funcs(
funcs->nv_funcs.set_pstate_handshake_support = pp_nv_set_pstate_handshake_support;
break;
#endif
+
+#ifdef CONFIG_DRM_AMD_DC_DCN2_1
+ case DCN_VERSION_2_1:
+ funcs->ctx.ver = PP_SMU_VER_RN;
+ funcs->rn_funcs.pp_smu.dm = ctx;
+ funcs->rn_funcs.set_wm_ranges = pp_rn_set_wm_ranges;
+ funcs->rn_funcs.get_dpm_clock_table = pp_rn_get_dpm_clock_table;
+ break;
+#endif
default:
DRM_ERROR("smu version is not supported !\n");
break;
diff --git a/drivers/gpu/drm/amd/display/dc/Makefile b/drivers/gpu/drm/amd/display/dc/Makefile
index 627982cb15d2..a160512a2f04 100644
--- a/drivers/gpu/drm/amd/display/dc/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/Makefile
@@ -48,6 +48,10 @@ DC_LIBS += dce110
DC_LIBS += dce100
DC_LIBS += dce80
+ifdef CONFIG_DRM_AMD_DC_HDCP
+DC_LIBS += hdcp
+endif
+
AMD_DC = $(addsuffix /Makefile, $(addprefix $(FULL_AMD_DISPLAY_PATH)/dc/,$(DC_LIBS)))
include $(AMD_DC)
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
index 221e0f56389f..823843cd2613 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
@@ -2543,7 +2543,6 @@ static enum bp_result construct_integrated_info(
/* Sort voltage table from low to high*/
if (result == BP_RESULT_OK) {
- struct clock_voltage_caps temp = {0, 0};
uint32_t i;
uint32_t j;
@@ -2553,10 +2552,8 @@ static enum bp_result construct_integrated_info(
info->disp_clk_voltage[j].max_supported_clk <
info->disp_clk_voltage[j-1].max_supported_clk) {
/* swap j and j - 1*/
- temp = info->disp_clk_voltage[j-1];
- info->disp_clk_voltage[j-1] =
- info->disp_clk_voltage[j];
- info->disp_clk_voltage[j] = temp;
+ swap(info->disp_clk_voltage[j - 1],
+ info->disp_clk_voltage[j]);
}
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
index dff65c0fe82f..7873abea4112 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
@@ -1613,8 +1613,6 @@ static enum bp_result construct_integrated_info(
struct atom_common_table_header *header;
struct atom_data_revision revision;
-
- struct clock_voltage_caps temp = {0, 0};
uint32_t i;
uint32_t j;
@@ -1644,10 +1642,8 @@ static enum bp_result construct_integrated_info(
info->disp_clk_voltage[j-1].max_supported_clk
) {
/* swap j and j - 1*/
- temp = info->disp_clk_voltage[j-1];
- info->disp_clk_voltage[j-1] =
- info->disp_clk_voltage[j];
- info->disp_clk_voltage[j] = temp;
+ swap(info->disp_clk_voltage[j - 1],
+ info->disp_clk_voltage[j]);
}
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
index c43797bea413..8828dd9c3783 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
@@ -65,6 +65,31 @@ int clk_mgr_helper_get_active_display_cnt(
return display_count;
}
+void clk_mgr_exit_optimized_pwr_state(const struct dc *dc, struct clk_mgr *clk_mgr)
+{
+ struct dc_link *edp_link = get_edp_link(dc);
+
+ if (dc->hwss.exit_optimized_pwr_state)
+ dc->hwss.exit_optimized_pwr_state(dc, dc->current_state);
+
+ if (edp_link) {
+ clk_mgr->psr_allow_active_cache = edp_link->psr_allow_active;
+ dc_link_set_psr_allow_active(edp_link, false, false);
+ }
+
+}
+
+void clk_mgr_optimize_pwr_state(const struct dc *dc, struct clk_mgr *clk_mgr)
+{
+ struct dc_link *edp_link = get_edp_link(dc);
+
+ if (edp_link)
+ dc_link_set_psr_allow_active(edp_link, clk_mgr->psr_allow_active_cache, false);
+
+ if (dc->hwss.optimize_pwr_state)
+ dc->hwss.optimize_pwr_state(dc, dc->current_state);
+
+}
struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *pp_smu, struct dccg *dccg)
{
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c
index c5c8c4901eed..26db1c5d4e4d 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c
@@ -147,7 +147,7 @@ int dce_get_dp_ref_freq_khz(struct clk_mgr *clk_mgr_base)
/* Calculate the current DFS clock, in kHz.*/
dp_ref_clk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR
- * clk_mgr->dentist_vco_freq_khz) / target_div;
+ * clk_mgr->base.dentist_vco_freq_khz) / target_div;
return dce_adjust_dp_ref_freq_for_ss(clk_mgr, dp_ref_clk_khz);
}
@@ -239,7 +239,7 @@ int dce_set_clock(
/* Make sure requested clock isn't lower than minimum threshold*/
if (requested_clk_khz > 0)
requested_clk_khz = max(requested_clk_khz,
- clk_mgr_dce->dentist_vco_freq_khz / 64);
+ clk_mgr_dce->base.dentist_vco_freq_khz / 64);
/* Prepare to program display clock*/
pxl_clk_params.target_pixel_clock_100hz = requested_clk_khz * 10;
@@ -276,11 +276,11 @@ static void dce_clock_read_integrated_info(struct clk_mgr_internal *clk_mgr_dce)
int i;
if (bp->integrated_info)
- clk_mgr_dce->dentist_vco_freq_khz = bp->integrated_info->dentist_vco_freq;
- if (clk_mgr_dce->dentist_vco_freq_khz == 0) {
- clk_mgr_dce->dentist_vco_freq_khz = bp->fw_info.smu_gpu_pll_output_freq;
- if (clk_mgr_dce->dentist_vco_freq_khz == 0)
- clk_mgr_dce->dentist_vco_freq_khz = 3600000;
+ clk_mgr_dce->base.dentist_vco_freq_khz = bp->integrated_info->dentist_vco_freq;
+ if (clk_mgr_dce->base.dentist_vco_freq_khz == 0) {
+ clk_mgr_dce->base.dentist_vco_freq_khz = bp->fw_info.smu_gpu_pll_output_freq;
+ if (clk_mgr_dce->base.dentist_vco_freq_khz == 0)
+ clk_mgr_dce->base.dentist_vco_freq_khz = 3600000;
}
/*update the maximum display clock for each power state*/
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c
index 7c746ef1e32e..a6c46e903ff9 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c
@@ -81,7 +81,7 @@ int dce112_set_clock(struct clk_mgr *clk_mgr_base, int requested_clk_khz)
/* Make sure requested clock isn't lower than minimum threshold*/
if (requested_clk_khz > 0)
requested_clk_khz = max(requested_clk_khz,
- clk_mgr_dce->dentist_vco_freq_khz / 62);
+ clk_mgr_dce->base.dentist_vco_freq_khz / 62);
dce_clk_params.target_clock_frequency = requested_clk_khz;
dce_clk_params.pll_id = CLOCK_SOURCE_ID_DFS;
@@ -135,7 +135,7 @@ int dce112_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_clk_khz)
/* Make sure requested clock isn't lower than minimum threshold*/
if (requested_clk_khz > 0)
requested_clk_khz = max(requested_clk_khz,
- clk_mgr->dentist_vco_freq_khz / 62);
+ clk_mgr->base.dentist_vco_freq_khz / 62);
dce_clk_params.target_clock_frequency = requested_clk_khz;
dce_clk_params.pll_id = CLOCK_SOURCE_ID_DFS;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c
index 47f529ce280a..3fab9296918a 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c
@@ -139,6 +139,9 @@ static void rv1_update_clocks(struct clk_mgr *clk_mgr_base,
ASSERT(clk_mgr->pp_smu);
+ if (dc->work_arounds.skip_clock_update)
+ return;
+
pp_smu = &clk_mgr->pp_smu->rv_funcs;
display_count = clk_mgr_helper_get_active_display_cnt(dc, context);
@@ -266,11 +269,11 @@ void rv1_clk_mgr_construct(struct dc_context *ctx, struct clk_mgr_internal *clk_
clk_mgr->base.dprefclk_khz = 600000;
if (bp->integrated_info)
- clk_mgr->dentist_vco_freq_khz = bp->integrated_info->dentist_vco_freq;
- if (bp->fw_info_valid && clk_mgr->dentist_vco_freq_khz == 0) {
- clk_mgr->dentist_vco_freq_khz = bp->fw_info.smu_gpu_pll_output_freq;
- if (clk_mgr->dentist_vco_freq_khz == 0)
- clk_mgr->dentist_vco_freq_khz = 3600000;
+ clk_mgr->base.dentist_vco_freq_khz = bp->integrated_info->dentist_vco_freq;
+ if (bp->fw_info_valid && clk_mgr->base.dentist_vco_freq_khz == 0) {
+ clk_mgr->base.dentist_vco_freq_khz = bp->fw_info.smu_gpu_pll_output_freq;
+ if (clk_mgr->base.dentist_vco_freq_khz == 0)
+ clk_mgr->base.dentist_vco_freq_khz = 3600000;
}
if (!debug->disable_dfs_bypass && bp->integrated_info)
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
index 3e8ac303bd52..25d7b7c6681c 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
@@ -104,84 +104,39 @@ void dcn20_update_clocks_update_dpp_dto(struct clk_mgr_internal *clk_mgr,
{
int i;
+ clk_mgr->dccg->ref_dppclk = clk_mgr->base.clks.dppclk_khz;
for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) {
int dpp_inst, dppclk_khz;
- if (!context->res_ctx.pipe_ctx[i].plane_state)
- continue;
-
- dpp_inst = context->res_ctx.pipe_ctx[i].plane_res.dpp->inst;
+ /* Loop index will match dpp->inst if resource exists,
+ * and we want to avoid dependency on dpp object
+ */
+ dpp_inst = i;
dppclk_khz = context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz;
+
clk_mgr->dccg->funcs->update_dpp_dto(
- clk_mgr->dccg, dpp_inst, dppclk_khz, false);
+ clk_mgr->dccg, dpp_inst, dppclk_khz);
}
}
-static void update_global_dpp_clk(struct clk_mgr_internal *clk_mgr, unsigned int khz)
+void dcn20_update_clocks_update_dentist(struct clk_mgr_internal *clk_mgr)
{
int dpp_divider = DENTIST_DIVIDER_RANGE_SCALE_FACTOR
- * clk_mgr->dentist_vco_freq_khz / khz;
-
- uint32_t dppclk_wdivider = dentist_get_did_from_divider(dpp_divider);
-
- REG_UPDATE(DENTIST_DISPCLK_CNTL,
- DENTIST_DPPCLK_WDIVIDER, dppclk_wdivider);
- REG_WAIT(DENTIST_DISPCLK_CNTL, DENTIST_DPPCLK_CHG_DONE, 1, 5, 100);
-}
-
-static void update_display_clk(struct clk_mgr_internal *clk_mgr, unsigned int khz)
-{
+ * clk_mgr->base.dentist_vco_freq_khz / clk_mgr->base.clks.dppclk_khz;
int disp_divider = DENTIST_DIVIDER_RANGE_SCALE_FACTOR
- * clk_mgr->dentist_vco_freq_khz / khz;
+ * clk_mgr->base.dentist_vco_freq_khz / clk_mgr->base.clks.dispclk_khz;
+ uint32_t dppclk_wdivider = dentist_get_did_from_divider(dpp_divider);
uint32_t dispclk_wdivider = dentist_get_did_from_divider(disp_divider);
REG_UPDATE(DENTIST_DISPCLK_CNTL,
DENTIST_DISPCLK_WDIVIDER, dispclk_wdivider);
+// REG_WAIT(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, 1, 5, 100);
+ REG_UPDATE(DENTIST_DISPCLK_CNTL,
+ DENTIST_DPPCLK_WDIVIDER, dppclk_wdivider);
+ REG_WAIT(DENTIST_DISPCLK_CNTL, DENTIST_DPPCLK_CHG_DONE, 1, 5, 100);
}
-static void request_voltage_and_program_disp_clk(struct clk_mgr *clk_mgr_base, unsigned int khz)
-{
- struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
- struct dc *dc = clk_mgr_base->ctx->dc;
- struct pp_smu_funcs_nv *pp_smu = NULL;
- bool going_up = clk_mgr->base.clks.dispclk_khz < khz;
-
- if (dc->res_pool->pp_smu)
- pp_smu = &dc->res_pool->pp_smu->nv_funcs;
-
- clk_mgr->base.clks.dispclk_khz = khz;
-
- if (going_up && pp_smu && pp_smu->set_voltage_by_freq)
- pp_smu->set_voltage_by_freq(&pp_smu->pp_smu, PP_SMU_NV_DISPCLK, clk_mgr_base->clks.dispclk_khz / 1000);
-
- update_display_clk(clk_mgr, khz);
-
- if (!going_up && pp_smu && pp_smu->set_voltage_by_freq)
- pp_smu->set_voltage_by_freq(&pp_smu->pp_smu, PP_SMU_NV_DISPCLK, clk_mgr_base->clks.dispclk_khz / 1000);
-}
-
-static void request_voltage_and_program_global_dpp_clk(struct clk_mgr *clk_mgr_base, unsigned int khz)
-{
- struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
- struct dc *dc = clk_mgr_base->ctx->dc;
- struct pp_smu_funcs_nv *pp_smu = NULL;
- bool going_up = clk_mgr->base.clks.dppclk_khz < khz;
-
- if (dc->res_pool->pp_smu)
- pp_smu = &dc->res_pool->pp_smu->nv_funcs;
-
- clk_mgr->base.clks.dppclk_khz = khz;
- clk_mgr->dccg->ref_dppclk = khz;
-
- if (going_up && pp_smu && pp_smu->set_voltage_by_freq)
- pp_smu->set_voltage_by_freq(&pp_smu->pp_smu, PP_SMU_NV_PIXELCLK, clk_mgr_base->clks.dppclk_khz / 1000);
-
- update_global_dpp_clk(clk_mgr, khz);
-
- if (!going_up && pp_smu && pp_smu->set_voltage_by_freq)
- pp_smu->set_voltage_by_freq(&pp_smu->pp_smu, PP_SMU_NV_PIXELCLK, clk_mgr_base->clks.dppclk_khz / 1000);
-}
void dcn2_update_clocks(struct clk_mgr *clk_mgr_base,
struct dc_state *context,
@@ -192,11 +147,12 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base,
struct dc *dc = clk_mgr_base->ctx->dc;
struct pp_smu_funcs_nv *pp_smu = NULL;
int display_count;
+ bool update_dppclk = false;
bool update_dispclk = false;
bool enter_display_off = false;
+ bool dpp_clock_lowered = false;
struct dmcu *dmcu = clk_mgr_base->ctx->dc->res_pool->dmcu;
bool force_reset = false;
- int i;
if (dc->work_arounds.skip_clock_update)
return;
@@ -251,12 +207,10 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base,
if (should_update_pstate_support(safe_to_lower, new_clocks->p_state_change_support, clk_mgr_base->clks.p_state_change_support)) {
clk_mgr_base->clks.prev_p_state_change_support = clk_mgr_base->clks.p_state_change_support;
-
clk_mgr_base->clks.p_state_change_support = new_clocks->p_state_change_support;
if (pp_smu && pp_smu->set_pstate_handshake_support)
pp_smu->set_pstate_handshake_support(&pp_smu->pp_smu, clk_mgr_base->clks.p_state_change_support);
}
- clk_mgr_base->clks.prev_p_state_change_support = clk_mgr_base->clks.p_state_change_support;
if (should_set_clock(safe_to_lower, new_clocks->dramclk_khz, clk_mgr_base->clks.dramclk_khz)) {
clk_mgr_base->clks.dramclk_khz = new_clocks->dramclk_khz;
@@ -264,50 +218,40 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base,
pp_smu->set_hard_min_uclk_by_freq(&pp_smu->pp_smu, clk_mgr_base->clks.dramclk_khz / 1000);
}
- if (dc->config.forced_clocks == false) {
- // First update display clock
- if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz))
- request_voltage_and_program_disp_clk(clk_mgr_base, new_clocks->dispclk_khz);
-
- // Updating DPP clock requires some more logic
- if (!safe_to_lower) {
- // For pre-programming, we need to make sure any DPP clock that will go up has to go up
+ if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr->base.clks.dppclk_khz)) {
+ if (clk_mgr->base.clks.dppclk_khz > new_clocks->dppclk_khz)
+ dpp_clock_lowered = true;
+ clk_mgr->base.clks.dppclk_khz = new_clocks->dppclk_khz;
- // First raise the global reference if needed
- if (new_clocks->dppclk_khz > clk_mgr_base->clks.dppclk_khz)
- request_voltage_and_program_global_dpp_clk(clk_mgr_base, new_clocks->dppclk_khz);
+ if (pp_smu && pp_smu->set_voltage_by_freq)
+ pp_smu->set_voltage_by_freq(&pp_smu->pp_smu, PP_SMU_NV_PIXELCLK, clk_mgr_base->clks.dppclk_khz / 1000);
- // Then raise any dividers that need raising
- for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) {
- int dpp_inst, dppclk_khz;
+ update_dppclk = true;
+ }
- if (!context->res_ctx.pipe_ctx[i].plane_state)
- continue;
+ if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
+ clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
+ if (pp_smu && pp_smu->set_voltage_by_freq)
+ pp_smu->set_voltage_by_freq(&pp_smu->pp_smu, PP_SMU_NV_DISPCLK, clk_mgr_base->clks.dispclk_khz / 1000);
- dpp_inst = context->res_ctx.pipe_ctx[i].plane_res.dpp->inst;
- dppclk_khz = context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz;
+ update_dispclk = true;
+ }
- clk_mgr->dccg->funcs->update_dpp_dto(clk_mgr->dccg, dpp_inst, dppclk_khz, true);
- }
+ if (dc->config.forced_clocks == false || (force_reset && safe_to_lower)) {
+ if (dpp_clock_lowered) {
+ // if clock is being lowered, increase DTO before lowering refclk
+ dcn20_update_clocks_update_dpp_dto(clk_mgr, context);
+ dcn20_update_clocks_update_dentist(clk_mgr);
} else {
- // For post-programming, we can lower ref clk if needed, and unconditionally set all the DTOs
-
- if (new_clocks->dppclk_khz < clk_mgr_base->clks.dppclk_khz)
- request_voltage_and_program_global_dpp_clk(clk_mgr_base, new_clocks->dppclk_khz);
-
- for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) {
- int dpp_inst, dppclk_khz;
-
- if (!context->res_ctx.pipe_ctx[i].plane_state)
- continue;
-
- dpp_inst = context->res_ctx.pipe_ctx[i].plane_res.dpp->inst;
- dppclk_khz = context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz;
-
- clk_mgr->dccg->funcs->update_dpp_dto(clk_mgr->dccg, dpp_inst, dppclk_khz, false);
- }
+ // if clock is being raised, increase refclk before lowering DTO
+ if (update_dppclk || update_dispclk)
+ dcn20_update_clocks_update_dentist(clk_mgr);
+ // always update dtos unless clock is lowered and not safe to lower
+ if (new_clocks->dppclk_khz >= dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz)
+ dcn20_update_clocks_update_dpp_dto(clk_mgr, context);
}
}
+
if (update_dispclk &&
dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) {
/*update dmcu for wait_loop count*/
@@ -320,6 +264,8 @@ void dcn2_update_clocks_fpga(struct clk_mgr *clk_mgr,
struct dc_state *context,
bool safe_to_lower)
{
+ struct clk_mgr_internal *clk_mgr_int = TO_CLK_MGR_INTERNAL(clk_mgr);
+
struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk;
/* Min fclk = 1.2GHz since all the extra scemi logic seems to run off of it */
int fclk_adj = new_clocks->fclk_khz > 1200000 ? new_clocks->fclk_khz : 1200000;
@@ -357,14 +303,18 @@ void dcn2_update_clocks_fpga(struct clk_mgr *clk_mgr,
clk_mgr->clks.dispclk_khz = new_clocks->dispclk_khz;
}
- /* Both fclk and dppclk ref are run on the same scemi clock so we
- * need to keep the same value for both
+ /* Both fclk and ref_dppclk run on the same scemi clock.
+ * So take the higher value since the DPP DTO is typically programmed
+ * such that max dppclk is 1:1 with ref_dppclk.
*/
if (clk_mgr->clks.fclk_khz > clk_mgr->clks.dppclk_khz)
clk_mgr->clks.dppclk_khz = clk_mgr->clks.fclk_khz;
if (clk_mgr->clks.dppclk_khz > clk_mgr->clks.fclk_khz)
clk_mgr->clks.fclk_khz = clk_mgr->clks.dppclk_khz;
+ // Both fclk and ref_dppclk run on the same scemi clock.
+ clk_mgr_int->dccg->ref_dppclk = clk_mgr->clks.fclk_khz;
+
dm_set_dcn_clocks(clk_mgr->ctx, &clk_mgr->clks);
}
@@ -409,12 +359,36 @@ void dcn2_get_clock(struct clk_mgr *clk_mgr,
}
}
+static bool dcn2_are_clock_states_equal(struct dc_clocks *a,
+ struct dc_clocks *b)
+{
+ if (a->dispclk_khz != b->dispclk_khz)
+ return false;
+ else if (a->dppclk_khz != b->dppclk_khz)
+ return false;
+ else if (a->dcfclk_khz != b->dcfclk_khz)
+ return false;
+ else if (a->socclk_khz != b->socclk_khz)
+ return false;
+ else if (a->dcfclk_deep_sleep_khz != b->dcfclk_deep_sleep_khz)
+ return false;
+ else if (a->phyclk_khz != b->phyclk_khz)
+ return false;
+ else if (a->dramclk_khz != b->dramclk_khz)
+ return false;
+ else if (a->p_state_change_support != b->p_state_change_support)
+ return false;
+
+ return true;
+}
+
static struct clk_mgr_funcs dcn2_funcs = {
.get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
.update_clocks = dcn2_update_clocks,
.init_clocks = dcn2_init_clocks,
.enable_pme_wa = dcn2_enable_pme_wa,
.get_clock = dcn2_get_clock,
+ .are_clock_states_equal = dcn2_are_clock_states_equal,
};
@@ -442,7 +416,7 @@ void dcn20_clk_mgr_construct(
if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) {
dcn2_funcs.update_clocks = dcn2_update_clocks_fpga;
- clk_mgr->dentist_vco_freq_khz = 3850000;
+ clk_mgr->base.dentist_vco_freq_khz = 3850000;
} else {
/* DFS Slice 2 should be used for DPREFCLK */
@@ -466,15 +440,15 @@ void dcn20_clk_mgr_construct(
pll_req = dc_fixpt_mul_int(pll_req, 100000);
/* integer part is now VCO frequency in kHz */
- clk_mgr->dentist_vco_freq_khz = dc_fixpt_floor(pll_req);
+ clk_mgr->base.dentist_vco_freq_khz = dc_fixpt_floor(pll_req);
/* in case we don't get a value from the register, use default */
- if (clk_mgr->dentist_vco_freq_khz == 0)
- clk_mgr->dentist_vco_freq_khz = 3850000;
+ if (clk_mgr->base.dentist_vco_freq_khz == 0)
+ clk_mgr->base.dentist_vco_freq_khz = 3850000;
/* Calculate the DPREFCLK in kHz.*/
clk_mgr->base.dprefclk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR
- * clk_mgr->dentist_vco_freq_khz) / target_div;
+ * clk_mgr->base.dentist_vco_freq_khz) / target_div;
}
//Integrated_info table does not exist on dGPU projects so should not be referenced
//anywhere in code for dGPUs.
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.h
index ac31a9787305..c9fd824f3c23 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.h
@@ -50,4 +50,5 @@ void dcn2_get_clock(struct clk_mgr *clk_mgr,
enum dc_clock_type clock_type,
struct dc_clock_config *clock_cfg);
+void dcn20_update_clocks_update_dentist(struct clk_mgr_internal *clk_mgr);
#endif //__DCN20_CLK_MGR_H__
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
index 787f94d815f4..790a2d211bd6 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
@@ -52,6 +52,45 @@
#define REG(reg_name) \
(CLK_BASE.instance[0].segment[mm ## reg_name ## _BASE_IDX] + mm ## reg_name)
+
+/* TODO: evaluate how to lower or disable all dcn clocks in screen off case */
+int rn_get_active_display_cnt_wa(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ int i, display_count;
+ bool hdmi_present = false;
+
+ display_count = 0;
+ for (i = 0; i < context->stream_count; i++) {
+ const struct dc_stream_state *stream = context->streams[i];
+
+ if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
+ hdmi_present = true;
+ }
+
+ for (i = 0; i < dc->link_count; i++) {
+ const struct dc_link *link = dc->links[i];
+
+ /*
+ * Only notify active stream or virtual stream.
+ * Need to notify virtual stream to work around
+ * headless case. HPD does not fire when system is in
+ * S0i2.
+ */
+ /* abusing the fact that the dig and phy are coupled to see if the phy is enabled */
+ if (link->connector_signal == SIGNAL_TYPE_VIRTUAL ||
+ link->link_enc->funcs->is_dig_enabled(link->link_enc))
+ display_count++;
+ }
+
+ /* WA for hang on HDMI after display off back back on*/
+ if (display_count == 0 && hdmi_present)
+ display_count = 1;
+
+ return display_count;
+}
+
void rn_update_clocks(struct clk_mgr *clk_mgr_base,
struct dc_state *context,
bool safe_to_lower)
@@ -62,17 +101,36 @@ void rn_update_clocks(struct clk_mgr *clk_mgr_base,
int display_count;
bool update_dppclk = false;
bool update_dispclk = false;
- bool enter_display_off = false;
bool dpp_clock_lowered = false;
- struct dmcu *dmcu = clk_mgr_base->ctx->dc->res_pool->dmcu;
- display_count = clk_mgr_helper_get_active_display_cnt(dc, context);
+ struct dmcu *dmcu = clk_mgr_base->ctx->dc->res_pool->dmcu;
- if (display_count == 0)
- enter_display_off = true;
+ if (dc->work_arounds.skip_clock_update)
+ return;
- if (enter_display_off == safe_to_lower) {
- rn_vbios_smu_set_display_count(clk_mgr, display_count);
+ /*
+ * if it is safe to lower, but we are already in the lower state, we don't have to do anything
+ * also if safe to lower is false, we just go in the higher state
+ */
+ if (safe_to_lower) {
+ /* check that we're not already in lower */
+ if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_LOW_POWER) {
+
+ display_count = rn_get_active_display_cnt_wa(dc, context);
+ /* if we can go lower, go lower */
+ if (display_count == 0) {
+ rn_vbios_smu_set_dcn_low_power_state(clk_mgr, DCN_PWR_STATE_LOW_POWER);
+ /* update power state */
+ clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_LOW_POWER;
+ }
+ }
+ } else {
+ /* check that we're not already in D0 */
+ if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_MISSION_MODE) {
+ rn_vbios_smu_set_dcn_low_power_state(clk_mgr, DCN_PWR_STATE_MISSION_MODE);
+ /* update power state */
+ clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_MISSION_MODE;
+ }
}
if (should_set_clock(safe_to_lower, new_clocks->phyclk_khz, clk_mgr_base->clks.phyclk_khz)) {
@@ -113,7 +171,8 @@ void rn_update_clocks(struct clk_mgr *clk_mgr_base,
// if clock is being raised, increase refclk before lowering DTO
if (update_dppclk || update_dispclk)
rn_vbios_smu_set_dppclk(clk_mgr, clk_mgr_base->clks.dppclk_khz);
- if (update_dppclk)
+ // always update dtos unless clock is lowered and not safe to lower
+ if (new_clocks->dppclk_khz >= dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz)
dcn20_update_clocks_update_dpp_dto(clk_mgr, context);
}
@@ -319,7 +378,7 @@ void rn_get_clk_states(struct clk_mgr *clk_mgr_base, struct clk_states *s)
rn_dump_clk_registers(&sb, clk_mgr_base, &log_info);
- s->dprefclk_khz = sb.dprefclk;
+ s->dprefclk_khz = sb.dprefclk * 1000;
}
void rn_enable_pme_wa(struct clk_mgr *clk_mgr_base)
@@ -329,12 +388,96 @@ void rn_enable_pme_wa(struct clk_mgr *clk_mgr_base)
rn_vbios_smu_enable_pme_wa(clk_mgr);
}
+void rn_init_clocks(struct clk_mgr *clk_mgr)
+{
+ memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks));
+ // Assumption is that boot state always supports pstate
+ clk_mgr->clks.p_state_change_support = true;
+ clk_mgr->clks.prev_p_state_change_support = true;
+ clk_mgr->clks.pwr_state = DCN_PWR_STATE_UNKNOWN;
+}
+
+void build_watermark_ranges(struct clk_bw_params *bw_params, struct pp_smu_wm_range_sets *ranges)
+{
+ int i, num_valid_sets;
+
+ num_valid_sets = 0;
+
+ for (i = 0; i < WM_SET_COUNT; i++) {
+ /* skip empty entries, the smu array has no holes*/
+ if (!bw_params->wm_table.entries[i].valid)
+ continue;
+
+ ranges->reader_wm_sets[num_valid_sets].wm_inst = bw_params->wm_table.entries[i].wm_inst;
+ ranges->reader_wm_sets[num_valid_sets].wm_type = bw_params->wm_table.entries[i].wm_type;;
+ /* We will not select WM based on dcfclk, so leave it as unconstrained */
+ ranges->reader_wm_sets[num_valid_sets].min_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
+ ranges->reader_wm_sets[num_valid_sets].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
+ /* fclk wil be used to select WM*/
+
+ if (ranges->reader_wm_sets[num_valid_sets].wm_type == WM_TYPE_PSTATE_CHG) {
+ if (i == 0)
+ ranges->reader_wm_sets[num_valid_sets].min_fill_clk_mhz = 0;
+ else {
+ /* add 1 to make it non-overlapping with next lvl */
+ ranges->reader_wm_sets[num_valid_sets].min_fill_clk_mhz = bw_params->clk_table.entries[i - 1].fclk_mhz + 1;
+ }
+ ranges->reader_wm_sets[num_valid_sets].max_fill_clk_mhz = bw_params->clk_table.entries[i].fclk_mhz;
+
+ } else {
+ /* unconstrained for memory retraining */
+ ranges->reader_wm_sets[num_valid_sets].min_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
+ ranges->reader_wm_sets[num_valid_sets].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
+
+ /* Modify previous watermark range to cover up to max */
+ ranges->reader_wm_sets[num_valid_sets - 1].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
+ }
+ num_valid_sets++;
+ }
+
+ ASSERT(num_valid_sets != 0); /* Must have at least one set of valid watermarks */
+ ranges->num_reader_wm_sets = num_valid_sets;
+
+ /* modify the min and max to make sure we cover the whole range*/
+ ranges->reader_wm_sets[0].min_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
+ ranges->reader_wm_sets[0].min_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
+ ranges->reader_wm_sets[ranges->num_reader_wm_sets - 1].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
+ ranges->reader_wm_sets[ranges->num_reader_wm_sets - 1].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
+
+ /* This is for writeback only, does not matter currently as no writeback support*/
+ ranges->num_writer_wm_sets = 1;
+ ranges->writer_wm_sets[0].wm_inst = WM_A;
+ ranges->writer_wm_sets[0].min_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
+ ranges->writer_wm_sets[0].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
+ ranges->writer_wm_sets[0].min_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
+ ranges->writer_wm_sets[0].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
+
+}
+
+static void rn_notify_wm_ranges(struct clk_mgr *clk_mgr_base)
+{
+ struct dc_debug_options *debug = &clk_mgr_base->ctx->dc->debug;
+ struct pp_smu_wm_range_sets ranges = {0};
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+ struct pp_smu_funcs *pp_smu = clk_mgr->pp_smu;
+
+ if (!debug->disable_pplib_wm_range) {
+ build_watermark_ranges(clk_mgr_base->bw_params, &ranges);
+
+ /* Notify PP Lib/SMU which Watermarks to use for which clock ranges */
+ if (pp_smu && pp_smu->rn_funcs.set_wm_ranges)
+ pp_smu->rn_funcs.set_wm_ranges(&pp_smu->rn_funcs.pp_smu, &ranges);
+ }
+
+}
+
static struct clk_mgr_funcs dcn21_funcs = {
.get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
.update_clocks = rn_update_clocks,
- .init_clocks = dcn2_init_clocks,
+ .init_clocks = rn_init_clocks,
.enable_pme_wa = rn_enable_pme_wa,
- /* .dump_clk_registers = rn_dump_clk_registers */
+ /* .dump_clk_registers = rn_dump_clk_registers, */
+ .notify_wm_ranges = rn_notify_wm_ranges
};
struct clk_bw_params rn_bw_params = {
@@ -405,80 +548,50 @@ struct clk_bw_params rn_bw_params = {
}
};
-void build_watermark_ranges(struct clk_bw_params *bw_params, struct pp_smu_wm_range_sets *ranges)
+static unsigned int find_dcfclk_for_voltage(struct dpm_clocks *clock_table, unsigned int voltage)
{
- int i, num_valid_sets;
-
- num_valid_sets = 0;
-
- for (i = 0; i < WM_SET_COUNT; i++) {
- /* skip empty entries, the smu array has no holes*/
- if (!bw_params->wm_table.entries[i].valid)
- continue;
-
- ranges->reader_wm_sets[num_valid_sets].wm_inst = bw_params->wm_table.entries[i].wm_inst;
- ranges->reader_wm_sets[num_valid_sets].wm_type = bw_params->wm_table.entries[i].wm_type;;
- /* We will not select WM based on dcfclk, so leave it as unconstrained */
- ranges->reader_wm_sets[num_valid_sets].min_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
- ranges->reader_wm_sets[num_valid_sets].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
- /* fclk wil be used to select WM*/
-
- if (ranges->reader_wm_sets[num_valid_sets].wm_type == WM_TYPE_PSTATE_CHG) {
- if (i == 0)
- ranges->reader_wm_sets[num_valid_sets].min_fill_clk_mhz = 0;
- else {
- /* add 1 to make it non-overlapping with next lvl */
- ranges->reader_wm_sets[num_valid_sets].min_fill_clk_mhz = bw_params->clk_table.entries[i - 1].fclk_mhz + 1;
- }
- ranges->reader_wm_sets[num_valid_sets].max_fill_clk_mhz = bw_params->clk_table.entries[i].fclk_mhz;
-
- } else {
- /* unconstrained for memory retraining */
- ranges->reader_wm_sets[num_valid_sets].min_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
- ranges->reader_wm_sets[num_valid_sets].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
+ int i;
- /* Modify previous watermark range to cover up to max */
- ranges->reader_wm_sets[num_valid_sets - 1].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
- }
- num_valid_sets++;
+ for (i = 0; i < PP_SMU_NUM_DCFCLK_DPM_LEVELS; i++) {
+ if (clock_table->DcfClocks[i].Vol == voltage)
+ return clock_table->DcfClocks[i].Freq;
}
- ASSERT(num_valid_sets != 0); /* Must have at least one set of valid watermarks */
- ranges->num_reader_wm_sets = num_valid_sets;
-
- /* modify the min and max to make sure we cover the whole range*/
- ranges->reader_wm_sets[0].min_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
- ranges->reader_wm_sets[0].min_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
- ranges->reader_wm_sets[ranges->num_reader_wm_sets - 1].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
- ranges->reader_wm_sets[ranges->num_reader_wm_sets - 1].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
-
- /* This is for writeback only, does not matter currently as no writeback support*/
- ranges->num_writer_wm_sets = 1;
- ranges->writer_wm_sets[0].wm_inst = WM_A;
- ranges->writer_wm_sets[0].min_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
- ranges->writer_wm_sets[0].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
- ranges->writer_wm_sets[0].min_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
- ranges->writer_wm_sets[0].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
-
+ ASSERT(0);
+ return 0;
}
-void clk_mgr_helper_populate_bw_params(struct clk_bw_params *bw_params, struct dpm_clocks *clock_table, struct hw_asic_id *asic_id)
+static void rn_clk_mgr_helper_populate_bw_params(struct clk_bw_params *bw_params, struct dpm_clocks *clock_table, struct hw_asic_id *asic_id)
{
- int i;
+ int i, j = 0;
+
+ j = -1;
ASSERT(PP_SMU_NUM_FCLK_DPM_LEVELS <= MAX_NUM_DPM_LVL);
- for (i = 0; i < PP_SMU_NUM_FCLK_DPM_LEVELS; i++) {
- if (clock_table->FClocks[i].Freq == 0)
+ /* Find lowest DPM, FCLK is filled in reverse order*/
+
+ for (i = PP_SMU_NUM_FCLK_DPM_LEVELS - 1; i >= 0; i--) {
+ if (clock_table->FClocks[i].Freq != 0) {
+ j = i;
break;
+ }
+ }
+
+ if (j == -1) {
+ /* clock table is all 0s, just use our own hardcode */
+ ASSERT(0);
+ return;
+ }
+
+ bw_params->clk_table.num_entries = j + 1;
- bw_params->clk_table.entries[i].dcfclk_mhz = clock_table->DcfClocks[i].Freq;
- bw_params->clk_table.entries[i].fclk_mhz = clock_table->FClocks[i].Freq;
- bw_params->clk_table.entries[i].memclk_mhz = clock_table->MemClocks[i].Freq;
- bw_params->clk_table.entries[i].socclk_mhz = clock_table->SocClocks[i].Freq;
- bw_params->clk_table.entries[i].voltage = clock_table->FClocks[i].Vol;
+ for (i = 0; i < bw_params->clk_table.num_entries; i++, j--) {
+ bw_params->clk_table.entries[i].fclk_mhz = clock_table->FClocks[j].Freq;
+ bw_params->clk_table.entries[i].memclk_mhz = clock_table->MemClocks[j].Freq;
+ bw_params->clk_table.entries[i].voltage = clock_table->FClocks[j].Vol;
+ bw_params->clk_table.entries[i].dcfclk_mhz = find_dcfclk_for_voltage(clock_table, clock_table->FClocks[j].Vol);
}
- bw_params->clk_table.num_entries = i;
bw_params->vram_type = asic_id->vram_type;
bw_params->num_channels = asic_id->vram_width / DDR4_DRAM_WIDTH;
@@ -486,7 +599,7 @@ void clk_mgr_helper_populate_bw_params(struct clk_bw_params *bw_params, struct d
for (i = 0; i < WM_SET_COUNT; i++) {
bw_params->wm_table.entries[i].wm_inst = i;
- if (clock_table->FClocks[i].Freq == 0) {
+ if (i >= bw_params->clk_table.num_entries) {
bw_params->wm_table.entries[i].valid = false;
continue;
}
@@ -534,57 +647,42 @@ void rn_clk_mgr_construct(
if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) {
dcn21_funcs.update_clocks = dcn2_update_clocks_fpga;
- clk_mgr->dentist_vco_freq_khz = 3600000;
+ clk_mgr->base.dentist_vco_freq_khz = 3600000;
clk_mgr->base.dprefclk_khz = 600000;
} else {
struct clk_log_info log_info = {0};
/* TODO: Check we get what we expect during bringup */
- clk_mgr->dentist_vco_freq_khz = get_vco_frequency_from_reg(clk_mgr);
+ clk_mgr->base.dentist_vco_freq_khz = get_vco_frequency_from_reg(clk_mgr);
/* in case we don't get a value from the register, use default */
- if (clk_mgr->dentist_vco_freq_khz == 0)
- clk_mgr->dentist_vco_freq_khz = 3600000;
+ if (clk_mgr->base.dentist_vco_freq_khz == 0)
+ clk_mgr->base.dentist_vco_freq_khz = 3600000;
rn_dump_clk_registers(&s, &clk_mgr->base, &log_info);
- clk_mgr->base.dprefclk_khz = s.dprefclk;
-
- if (clk_mgr->base.dprefclk_khz != 600000) {
- clk_mgr->base.dprefclk_khz = 600000;
- ASSERT(1); //TODO: Renoir follow up.
- }
+ /* Convert dprefclk units from MHz to KHz */
+ /* Value already divided by 10, some resolution lost */
+ clk_mgr->base.dprefclk_khz = s.dprefclk * 1000;
/* in case we don't get a value from the register, use default */
- if (clk_mgr->base.dprefclk_khz == 0)
+ if (clk_mgr->base.dprefclk_khz == 0) {
+ ASSERT(clk_mgr->base.dprefclk_khz == 600000);
clk_mgr->base.dprefclk_khz = 600000;
+ }
}
dce_clock_read_ss_info(clk_mgr);
clk_mgr->base.bw_params = &rn_bw_params;
- if (pp_smu) {
+ if (pp_smu && pp_smu->rn_funcs.get_dpm_clock_table) {
pp_smu->rn_funcs.get_dpm_clock_table(&pp_smu->rn_funcs.pp_smu, &clock_table);
- clk_mgr_helper_populate_bw_params(clk_mgr->base.bw_params, &clock_table, &ctx->asic_id);
+ rn_clk_mgr_helper_populate_bw_params(clk_mgr->base.bw_params, &clock_table, &ctx->asic_id);
}
- /*
- * Notify SMU which set of WM should be selected for different ranges of fclk
- * On Renoir there is a maximumum of 4 DF pstates supported, could be less
- * depending on DDR speed and fused maximum fclk.
- */
- if (!debug->disable_pplib_wm_range) {
- struct pp_smu_wm_range_sets ranges = {0};
-
- build_watermark_ranges(clk_mgr->base.bw_params, &ranges);
-
- /* Notify PP Lib/SMU which Watermarks to use for which clock ranges */
- if (pp_smu && pp_smu->rn_funcs.set_wm_ranges)
- pp_smu->rn_funcs.set_wm_ranges(&pp_smu->rn_funcs.pp_smu, &ranges);
+ if (!IS_FPGA_MAXIMUS_DC(ctx->dce_environment) && clk_mgr->smu_ver >= 0x00371500) {
+ /* enable powerfeatures when displaycount goes to 0 */
+ rn_vbios_smu_enable_48mhz_tmdp_refclk_pwrdwn(clk_mgr, !debug->disable_48mhz_pwrdwn);
}
-
- /* enable powerfeatures when displaycount goes to 0 */
- if (!debug->disable_48mhz_pwrdwn)
- rn_vbios_smu_enable_48mhz_tmdp_refclk_pwrdwn(clk_mgr);
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.h
index aadec06fde10..e4322fa5475b 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.h
@@ -26,11 +26,13 @@
#ifndef __RN_CLK_MGR_H__
#define __RN_CLK_MGR_H__
+#include "clk_mgr.h"
+#include "dm_pp_smu.h"
+
struct rn_clk_registers {
uint32_t CLK1_CLK0_CURRENT_CNT; /* DPREFCLK */
};
-
void rn_clk_mgr_construct(struct dc_context *ctx,
struct clk_mgr_internal *clk_mgr,
struct pp_smu_funcs *pp_smu,
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c
index 50984c1811bb..cb7c0e8b7e1b 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c
@@ -33,7 +33,7 @@
#include "mp/mp_12_0_0_sh_mask.h"
#define REG(reg_name) \
- (MP1_BASE.instance[0].segment[mm ## reg_name ## _BASE_IDX] + mm ## reg_name)
+ (MP0_BASE.instance[0].segment[mm ## reg_name ## _BASE_IDX] + mm ## reg_name)
#define FN(reg_name, field) \
FD(reg_name##__##field)
@@ -84,16 +84,12 @@ int rn_vbios_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_dis
int actual_dispclk_set_mhz = -1;
struct dc *core_dc = clk_mgr->base.ctx->dc;
struct dmcu *dmcu = core_dc->res_pool->dmcu;
- uint32_t clk = requested_dispclk_khz / 1000;
-
- if (clk <= 100)
- clk = 101;
/* Unit of SMU msg parameter is Mhz */
actual_dispclk_set_mhz = rn_vbios_smu_send_msg_with_param(
clk_mgr,
VBIOSSMC_MSG_SetDispclkFreq,
- clk);
+ requested_dispclk_khz / 1000);
if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) {
if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) {
@@ -124,7 +120,7 @@ int rn_vbios_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int reque
{
int actual_dcfclk_set_mhz = -1;
- if (clk_mgr->smu_ver < 0xFFFFFFFF)
+ if (clk_mgr->smu_ver < 0x370c00)
return actual_dcfclk_set_mhz;
actual_dcfclk_set_mhz = rn_vbios_smu_send_msg_with_param(
@@ -139,7 +135,7 @@ int rn_vbios_smu_set_min_deep_sleep_dcfclk(struct clk_mgr_internal *clk_mgr, int
{
int actual_min_ds_dcfclk_mhz = -1;
- if (clk_mgr->smu_ver < 0xFFFFFFFF)
+ if (clk_mgr->smu_ver < 0x370c00)
return actual_min_ds_dcfclk_mhz;
actual_min_ds_dcfclk_mhz = rn_vbios_smu_send_msg_with_param(
@@ -162,33 +158,35 @@ int rn_vbios_smu_set_dppclk(struct clk_mgr_internal *clk_mgr, int requested_dpp_
{
int actual_dppclk_set_mhz = -1;
- uint32_t clk = requested_dpp_khz / 1000;
-
- if (clk <= 100)
- clk = 101;
-
actual_dppclk_set_mhz = rn_vbios_smu_send_msg_with_param(
clk_mgr,
VBIOSSMC_MSG_SetDppclkFreq,
- clk);
+ requested_dpp_khz / 1000);
return actual_dppclk_set_mhz * 1000;
}
-void rn_vbios_smu_set_display_count(struct clk_mgr_internal *clk_mgr, int display_count)
+void rn_vbios_smu_set_dcn_low_power_state(struct clk_mgr_internal *clk_mgr, enum dcn_pwr_state state)
{
+ int disp_count;
+
+ if (state == DCN_PWR_STATE_LOW_POWER)
+ disp_count = 0;
+ else
+ disp_count = 1;
+
rn_vbios_smu_send_msg_with_param(
- clk_mgr,
- VBIOSSMC_MSG_SetDisplayCount,
- display_count);
+ clk_mgr,
+ VBIOSSMC_MSG_SetDisplayCount,
+ disp_count);
}
-void rn_vbios_smu_enable_48mhz_tmdp_refclk_pwrdwn(struct clk_mgr_internal *clk_mgr)
+void rn_vbios_smu_enable_48mhz_tmdp_refclk_pwrdwn(struct clk_mgr_internal *clk_mgr, bool enable)
{
rn_vbios_smu_send_msg_with_param(
clk_mgr,
VBIOSSMC_MSG_EnableTmdp48MHzRefclkPwrDown,
- 0);
+ enable);
}
void rn_vbios_smu_enable_pme_wa(struct clk_mgr_internal *clk_mgr)
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.h
index da3a49487c6d..ccc01879c9d4 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.h
@@ -33,8 +33,8 @@ int rn_vbios_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int reque
int rn_vbios_smu_set_min_deep_sleep_dcfclk(struct clk_mgr_internal *clk_mgr, int requested_min_ds_dcfclk_khz);
void rn_vbios_smu_set_phyclk(struct clk_mgr_internal *clk_mgr, int requested_phyclk_khz);
int rn_vbios_smu_set_dppclk(struct clk_mgr_internal *clk_mgr, int requested_dpp_khz);
-void rn_vbios_smu_set_display_count(struct clk_mgr_internal *clk_mgr, int display_count);
-void rn_vbios_smu_enable_48mhz_tmdp_refclk_pwrdwn(struct clk_mgr_internal *clk_mgr);
+void rn_vbios_smu_set_dcn_low_power_state(struct clk_mgr_internal *clk_mgr, int display_count);
+void rn_vbios_smu_enable_48mhz_tmdp_refclk_pwrdwn(struct clk_mgr_internal *clk_mgr, bool enable);
void rn_vbios_smu_enable_pme_wa(struct clk_mgr_internal *clk_mgr);
#endif /* DAL_DC_DCN10_RV1_CLK_MGR_VBIOS_SMU_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 4b8819c27fcd..32f31bf91915 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -194,7 +194,7 @@ static bool create_links(
}
}
- if (!should_destory_link) {
+ if (dc->config.force_enum_edp || !should_destory_link) {
dc->links[dc->link_count] = link;
link->dc = dc;
++dc->link_count;
@@ -411,6 +411,27 @@ bool dc_stream_get_crc(struct dc *dc, struct dc_stream_state *stream,
return false;
}
+void dc_stream_set_dyn_expansion(struct dc *dc, struct dc_stream_state *stream,
+ enum dc_dynamic_expansion option)
+{
+ /* OPP FMT dyn expansion updates*/
+ int i = 0;
+ struct pipe_ctx *pipe_ctx;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (dc->current_state->res_ctx.pipe_ctx[i].stream
+ == stream) {
+ pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
+ pipe_ctx->stream_res.opp->dyn_expansion = option;
+ pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion(
+ pipe_ctx->stream_res.opp,
+ COLOR_SPACE_YCBCR601,
+ stream->timing.display_color_depth,
+ stream->signal);
+ }
+ }
+}
+
void dc_stream_set_dither_option(struct dc_stream_state *stream,
enum dc_dither_option option)
{
@@ -765,8 +786,13 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context);
#endif
- dc->hwss.apply_ctx_for_surface(dc, old_stream, 0, dangling_context);
+ if (dc->hwss.apply_ctx_for_surface)
+ dc->hwss.apply_ctx_for_surface(dc, old_stream, 0, dangling_context);
}
+#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
+ if (dc->hwss.program_front_end_for_ctx)
+ dc->hwss.program_front_end_for_ctx(dc, dangling_context);
+#endif
}
current_ctx = dc->current_state;
@@ -789,9 +815,6 @@ struct dc *dc_create(const struct dc_init_data *init_params)
if (false == construct(dc, init_params))
goto construct_fail;
- /*TODO: separate HW and SW initialization*/
- dc->hwss.init_hw(dc);
-
full_pipe_count = dc->res_pool->pipe_count;
if (dc->res_pool->underlay_pipe_index != NO_UNDERLAY_PIPE)
full_pipe_count--;
@@ -824,9 +847,24 @@ alloc_fail:
return NULL;
}
+void dc_hardware_init(struct dc *dc)
+{
+ dc->hwss.init_hw(dc);
+}
+
void dc_init_callbacks(struct dc *dc,
const struct dc_callback_init *init_params)
{
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+ dc->ctx->cp_psp = init_params->cp_psp;
+#endif
+}
+
+void dc_deinit_callbacks(struct dc *dc)
+{
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+ memset(&dc->ctx->cp_psp, 0, sizeof(dc->ctx->cp_psp));
+#endif
}
void dc_destroy(struct dc **dc)
@@ -905,15 +943,11 @@ static void program_timing_sync(
/* set first pipe with plane as master */
for (j = 0; j < group_size; j++) {
- struct pipe_ctx *temp;
-
if (pipe_set[j]->plane_state) {
if (j == 0)
break;
- temp = pipe_set[0];
- pipe_set[0] = pipe_set[j];
- pipe_set[j] = temp;
+ swap(pipe_set[0], pipe_set[j]);
break;
}
}
@@ -970,40 +1004,87 @@ bool dc_validate_seamless_boot_timing(const struct dc *dc,
struct dc_crtc_timing *crtc_timing)
{
struct timing_generator *tg;
+ struct stream_encoder *se = NULL;
+
+ struct dc_crtc_timing hw_crtc_timing = {0};
+
struct dc_link *link = sink->link;
- unsigned int enc_inst, tg_inst;
+ unsigned int i, enc_inst, tg_inst = 0;
+
+ // Seamless port only support single DP and EDP so far
+ if (sink->sink_signal != SIGNAL_TYPE_DISPLAY_PORT &&
+ sink->sink_signal != SIGNAL_TYPE_EDP)
+ return false;
/* Check for enabled DIG to identify enabled display */
if (!link->link_enc->funcs->is_dig_enabled(link->link_enc))
return false;
- /* Check for which front end is used by this encoder.
- * Note the inst is 1 indexed, where 0 is undefined.
- * Note that DIG_FE can source from different OTG but our
- * current implementation always map 1-to-1, so this code makes
- * the same assumption and doesn't check OTG source.
- */
enc_inst = link->link_enc->funcs->get_dig_frontend(link->link_enc);
- /* Instance should be within the range of the pool */
- if (enc_inst >= dc->res_pool->pipe_count)
+ if (enc_inst == ENGINE_ID_UNKNOWN)
return false;
- if (enc_inst >= dc->res_pool->stream_enc_count)
- return false;
+ for (i = 0; i < dc->res_pool->stream_enc_count; i++) {
+ if (dc->res_pool->stream_enc[i]->id == enc_inst) {
+
+ se = dc->res_pool->stream_enc[i];
+
+ tg_inst = dc->res_pool->stream_enc[i]->funcs->dig_source_otg(
+ dc->res_pool->stream_enc[i]);
+ break;
+ }
+ }
- tg_inst = dc->res_pool->stream_enc[enc_inst]->funcs->dig_source_otg(
- dc->res_pool->stream_enc[enc_inst]);
+ // tg_inst not found
+ if (i == dc->res_pool->stream_enc_count)
+ return false;
if (tg_inst >= dc->res_pool->timing_generator_count)
return false;
tg = dc->res_pool->timing_generators[tg_inst];
- if (!tg->funcs->is_matching_timing)
+ if (!tg->funcs->get_hw_timing)
+ return false;
+
+ if (!tg->funcs->get_hw_timing(tg, &hw_crtc_timing))
+ return false;
+
+ if (crtc_timing->h_total != hw_crtc_timing.h_total)
+ return false;
+
+ if (crtc_timing->h_border_left != hw_crtc_timing.h_border_left)
return false;
- if (!tg->funcs->is_matching_timing(tg, crtc_timing))
+ if (crtc_timing->h_addressable != hw_crtc_timing.h_addressable)
+ return false;
+
+ if (crtc_timing->h_border_right != hw_crtc_timing.h_border_right)
+ return false;
+
+ if (crtc_timing->h_front_porch != hw_crtc_timing.h_front_porch)
+ return false;
+
+ if (crtc_timing->h_sync_width != hw_crtc_timing.h_sync_width)
+ return false;
+
+ if (crtc_timing->v_total != hw_crtc_timing.v_total)
+ return false;
+
+ if (crtc_timing->v_border_top != hw_crtc_timing.v_border_top)
+ return false;
+
+ if (crtc_timing->v_addressable != hw_crtc_timing.v_addressable)
+ return false;
+
+ if (crtc_timing->v_border_bottom != hw_crtc_timing.v_border_bottom)
+ return false;
+
+ if (crtc_timing->v_front_porch != hw_crtc_timing.v_front_porch)
+ return false;
+
+ if (crtc_timing->v_sync_width != hw_crtc_timing.v_sync_width)
return false;
if (dc_is_dp_signal(link->connector_signal)) {
@@ -1016,6 +1097,20 @@ bool dc_validate_seamless_boot_timing(const struct dc *dc,
if (crtc_timing->pix_clk_100hz != pix_clk_100hz)
return false;
+ if (!se->funcs->dp_get_pixel_format)
+ return false;
+
+ if (!se->funcs->dp_get_pixel_format(
+ se,
+ &hw_crtc_timing.pixel_encoding,
+ &hw_crtc_timing.display_color_depth))
+ return false;
+
+ if (hw_crtc_timing.display_color_depth != crtc_timing->display_color_depth)
+ return false;
+
+ if (hw_crtc_timing.pixel_encoding != crtc_timing->pixel_encoding)
+ return false;
}
return true;
@@ -1077,15 +1172,20 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
/* re-program planes for existing stream, in case we need to
* free up plane resource for later use
*/
- for (i = 0; i < context->stream_count; i++) {
- if (context->streams[i]->mode_changed)
- continue;
+ if (dc->hwss.apply_ctx_for_surface)
+ for (i = 0; i < context->stream_count; i++) {
+ if (context->streams[i]->mode_changed)
+ continue;
- dc->hwss.apply_ctx_for_surface(
- dc, context->streams[i],
- context->stream_status[i].plane_count,
- context); /* use new pipe config in new context */
- }
+ dc->hwss.apply_ctx_for_surface(
+ dc, context->streams[i],
+ context->stream_status[i].plane_count,
+ context); /* use new pipe config in new context */
+ }
+#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
+ if (dc->hwss.program_front_end_for_ctx)
+ dc->hwss.program_front_end_for_ctx(dc, context);
+#endif
/* Program hardware */
for (i = 0; i < dc->res_pool->pipe_count; i++) {
@@ -1104,16 +1204,21 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
}
/* Program all planes within new context*/
+#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
+ if (dc->hwss.program_front_end_for_ctx)
+ dc->hwss.program_front_end_for_ctx(dc, context);
+#endif
for (i = 0; i < context->stream_count; i++) {
const struct dc_link *link = context->streams[i]->link;
if (!context->streams[i]->mode_changed)
continue;
- dc->hwss.apply_ctx_for_surface(
- dc, context->streams[i],
- context->stream_status[i].plane_count,
- context);
+ if (dc->hwss.apply_ctx_for_surface)
+ dc->hwss.apply_ctx_for_surface(
+ dc, context->streams[i],
+ context->stream_status[i].plane_count,
+ context);
/*
* enable stereo
@@ -1140,15 +1245,9 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
dc_enable_stereo(dc, context, dc_streams, context->stream_count);
- if (!dc->optimize_seamless_boot)
- /* pplib is notified if disp_num changed */
- dc->hwss.optimize_bandwidth(dc, context);
-
for (i = 0; i < context->stream_count; i++)
context->streams[i]->mode_changed = false;
- memset(&context->commit_hints, 0, sizeof(context->commit_hints));
-
dc_release_state(dc->current_state);
dc->current_state = context;
@@ -1496,20 +1595,15 @@ static enum surface_update_type det_surface_update(const struct dc *dc,
enum surface_update_type overall_type = UPDATE_TYPE_FAST;
union surface_update_flags *update_flags = &u->surface->update_flags;
- update_flags->raw = 0; // Reset all flags
-
if (u->flip_addr)
update_flags->bits.addr_update = 1;
- if (!is_surface_in_context(context, u->surface)) {
- update_flags->bits.new_plane = 1;
+ if (!is_surface_in_context(context, u->surface) || u->surface->force_full_update) {
+ update_flags->raw = 0xFFFFFFFF;
return UPDATE_TYPE_FULL;
}
- if (u->surface->force_full_update) {
- update_flags->bits.full_update = 1;
- return UPDATE_TYPE_FULL;
- }
+ update_flags->raw = 0; // Reset all flags
type = get_plane_info_update_type(u);
elevate_update_type(&overall_type, type);
@@ -1567,40 +1661,43 @@ static enum surface_update_type check_update_surfaces_for_stream(
enum surface_update_type overall_type = UPDATE_TYPE_FAST;
if (stream_status == NULL || stream_status->plane_count != surface_count)
- return UPDATE_TYPE_FULL;
+ overall_type = UPDATE_TYPE_FULL;
/* some stream updates require passive update */
if (stream_update) {
- if ((stream_update->src.height != 0) &&
- (stream_update->src.width != 0))
- return UPDATE_TYPE_FULL;
+ union stream_update_flags *su_flags = &stream_update->stream->update_flags;
- if ((stream_update->dst.height != 0) &&
- (stream_update->dst.width != 0))
- return UPDATE_TYPE_FULL;
+ if ((stream_update->src.height != 0 && stream_update->src.width != 0) ||
+ (stream_update->dst.height != 0 && stream_update->dst.width != 0))
+ su_flags->bits.scaling = 1;
if (stream_update->out_transfer_func)
- return UPDATE_TYPE_FULL;
+ su_flags->bits.out_tf = 1;
if (stream_update->abm_level)
- return UPDATE_TYPE_FULL;
+ su_flags->bits.abm_level = 1;
if (stream_update->dpms_off)
- return UPDATE_TYPE_FULL;
+ su_flags->bits.dpms_off = 1;
+
+ if (stream_update->gamut_remap)
+ su_flags->bits.gamut_remap = 1;
#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
if (stream_update->wb_update)
- return UPDATE_TYPE_FULL;
+ su_flags->bits.wb_update = 1;
#endif
+ if (su_flags->raw != 0)
+ overall_type = UPDATE_TYPE_FULL;
+
+ if (stream_update->output_csc_transform || stream_update->output_color_space)
+ su_flags->bits.out_csc = 1;
}
for (i = 0 ; i < surface_count; i++) {
enum surface_update_type type =
det_surface_update(dc, &updates[i]);
- if (type == UPDATE_TYPE_FULL)
- return type;
-
elevate_update_type(&overall_type, type);
}
@@ -1622,16 +1719,29 @@ enum surface_update_type dc_check_update_surfaces_for_stream(
int i;
enum surface_update_type type;
+ if (stream_update)
+ stream_update->stream->update_flags.raw = 0;
for (i = 0; i < surface_count; i++)
updates[i].surface->update_flags.raw = 0;
type = check_update_surfaces_for_stream(dc, updates, surface_count, stream_update, stream_status);
- if (type == UPDATE_TYPE_FULL)
+ if (type == UPDATE_TYPE_FULL) {
+ if (stream_update)
+ stream_update->stream->update_flags.raw = 0xFFFFFFFF;
for (i = 0; i < surface_count; i++)
updates[i].surface->update_flags.raw = 0xFFFFFFFF;
+ }
- if (type == UPDATE_TYPE_FAST && memcmp(&dc->current_state->bw_ctx.bw.dcn.clk, &dc->clk_mgr->clks, offsetof(struct dc_clocks, prev_p_state_change_support)) != 0)
- dc->optimized_required = true;
+ if (type == UPDATE_TYPE_FAST) {
+ // If there's an available clock comparator, we use that.
+ if (dc->clk_mgr->funcs->are_clock_states_equal) {
+ if (!dc->clk_mgr->funcs->are_clock_states_equal(&dc->clk_mgr->clks, &dc->current_state->bw_ctx.bw.dcn.clk))
+ dc->optimized_required = true;
+ // Else we fallback to mem compare.
+ } else if (memcmp(&dc->current_state->bw_ctx.bw.dcn.clk, &dc->clk_mgr->clks, offsetof(struct dc_clocks, prev_p_state_change_support)) != 0) {
+ dc->optimized_required = true;
+ }
+ }
return type;
}
@@ -1872,6 +1982,7 @@ static void commit_planes_do_stream_update(struct dc *dc,
struct dc_state *context)
{
int j;
+ bool should_program_abm;
// Stream updates
for (j = 0; j < dc->res_pool->pipe_count; j++) {
@@ -1952,14 +2063,21 @@ static void commit_planes_do_stream_update(struct dc *dc,
}
if (stream_update->abm_level && pipe_ctx->stream_res.abm) {
- if (pipe_ctx->stream_res.tg->funcs->is_blanked) {
- // if otg funcs defined check if blanked before programming
- if (!pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg))
+ should_program_abm = true;
+
+ // if otg funcs defined check if blanked before programming
+ if (pipe_ctx->stream_res.tg->funcs->is_blanked)
+ if (pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg))
+ should_program_abm = false;
+
+ if (should_program_abm) {
+ if (*stream_update->abm_level == ABM_LEVEL_IMMEDIATE_DISABLE) {
+ pipe_ctx->stream_res.abm->funcs->set_abm_immediate_disable(pipe_ctx->stream_res.abm);
+ } else {
pipe_ctx->stream_res.abm->funcs->set_abm_level(
pipe_ctx->stream_res.abm, stream->abm_level);
- } else
- pipe_ctx->stream_res.abm->funcs->set_abm_level(
- pipe_ctx->stream_res.abm, stream->abm_level);
+ }
+ }
}
}
}
@@ -2004,7 +2122,13 @@ static void commit_planes_for_stream(struct dc *dc,
* In case of turning off screen, no need to program front end a second time.
* just return after program blank.
*/
- dc->hwss.apply_ctx_for_surface(dc, stream, 0, context);
+ if (dc->hwss.apply_ctx_for_surface)
+ dc->hwss.apply_ctx_for_surface(dc, stream, 0, context);
+#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
+ if (dc->hwss.program_front_end_for_ctx)
+ dc->hwss.program_front_end_for_ctx(dc, context);
+#endif
+
return;
}
@@ -2064,10 +2188,15 @@ static void commit_planes_for_stream(struct dc *dc,
stream_status =
stream_get_status(context, pipe_ctx->stream);
- dc->hwss.apply_ctx_for_surface(
+ if (dc->hwss.apply_ctx_for_surface)
+ dc->hwss.apply_ctx_for_surface(
dc, pipe_ctx->stream, stream_status->plane_count, context);
}
}
+#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
+ if (dc->hwss.program_front_end_for_ctx && update_type != UPDATE_TYPE_FAST)
+ dc->hwss.program_front_end_for_ctx(dc, context);
+#endif
// Update Type FAST, Surface updates
if (update_type == UPDATE_TYPE_FAST) {
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index ca20b150afcc..12ba6fdf89b7 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -79,7 +79,6 @@ static void destruct(struct dc_link *link)
int i;
if (link->hpd_gpio != NULL) {
- dal_gpio_close(link->hpd_gpio);
dal_gpio_destroy_irq(&link->hpd_gpio);
link->hpd_gpio = NULL;
}
@@ -520,7 +519,7 @@ static void link_disconnect_remap(struct dc_sink *prev_sink, struct dc_link *lin
}
-static void read_edp_current_link_settings_on_detect(struct dc_link *link)
+static void read_current_link_settings_on_detect(struct dc_link *link)
{
union lane_count_set lane_count_set = { {0} };
uint8_t link_bw_set;
@@ -555,17 +554,23 @@ static void read_edp_current_link_settings_on_detect(struct dc_link *link)
&link_bw_set, sizeof(link_bw_set));
if (link_bw_set == 0) {
- /* If standard link rates are not being used,
- * Read DPCD 00115h to find the link rate set used
- */
- core_link_read_dpcd(link, DP_LINK_RATE_SET,
- &link_rate_set, sizeof(link_rate_set));
-
- if (link_rate_set < link->dpcd_caps.edp_supported_link_rates_count) {
- link->cur_link_settings.link_rate =
- link->dpcd_caps.edp_supported_link_rates[link_rate_set];
- link->cur_link_settings.link_rate_set = link_rate_set;
- link->cur_link_settings.use_link_rate_set = true;
+ if (link->connector_signal == SIGNAL_TYPE_EDP) {
+ /* If standard link rates are not being used,
+ * Read DPCD 00115h to find the edp link rate set used
+ */
+ core_link_read_dpcd(link, DP_LINK_RATE_SET,
+ &link_rate_set, sizeof(link_rate_set));
+
+ // edp_supported_link_rates_count = 0 for DP
+ if (link_rate_set < link->dpcd_caps.edp_supported_link_rates_count) {
+ link->cur_link_settings.link_rate =
+ link->dpcd_caps.edp_supported_link_rates[link_rate_set];
+ link->cur_link_settings.link_rate_set = link_rate_set;
+ link->cur_link_settings.use_link_rate_set = true;
+ }
+ } else {
+ // Link Rate not found. Seamless boot may not work.
+ ASSERT(false);
}
} else {
link->cur_link_settings.link_rate = link_bw_set;
@@ -680,7 +685,7 @@ static bool is_same_edid(struct dc_edid *old_edid, struct dc_edid *new_edid)
return (memcmp(old_edid->raw_edid, new_edid->raw_edid, new_edid->length) == 0);
}
-bool wait_for_alt_mode(struct dc_link *link)
+static bool wait_for_alt_mode(struct dc_link *link)
{
/**
@@ -738,7 +743,8 @@ bool wait_for_alt_mode(struct dc_link *link)
* This does not create remote sinks but will trigger DM
* to start MST detection if a branch is detected.
*/
-bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
+static bool dc_link_detect_helper(struct dc_link *link,
+ enum dc_detect_reason reason)
{
struct dc_sink_init_data sink_init_data = { 0 };
struct display_sink_capability sink_caps = { 0 };
@@ -753,6 +759,8 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
struct dpcd_caps prev_dpcd_caps;
bool same_dpcd = true;
enum dc_connection_type new_connection_type = dc_connection_none;
+ bool perform_dp_seamless_boot = false;
+
DC_LOGGER_INIT(link->ctx->logger);
if (dc_is_virtual_signal(link->connector_signal))
@@ -809,15 +817,15 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
}
case SIGNAL_TYPE_EDP: {
- read_edp_current_link_settings_on_detect(link);
+ read_current_link_settings_on_detect(link);
detect_edp_sink_caps(link);
- sink_caps.transaction_type =
- DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
+ sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
sink_caps.signal = SIGNAL_TYPE_EDP;
break;
}
case SIGNAL_TYPE_DISPLAY_PORT: {
+
/* wa HPD high coming too early*/
if (link->link_enc->features.flags.bits.DP_IS_USB_C == 1) {
@@ -865,12 +873,24 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
* empty which leads to allocate_mst_payload() has "0"
* pbn_per_slot value leading to exception on dc_fixpt_div()
*/
- link->verified_link_cap = link->reported_link_cap;
+ dp_verify_mst_link_cap(link);
+
if (prev_sink != NULL)
dc_sink_release(prev_sink);
return false;
}
+ // For seamless boot, to skip verify link cap, we read UEFI settings and set them as verified.
+ if (reason == DETECT_REASON_BOOT &&
+ dc_ctx->dc->config.power_down_display_on_boot == false &&
+ link->link_status.link_active == true)
+ perform_dp_seamless_boot = true;
+
+ if (perform_dp_seamless_boot) {
+ read_current_link_settings_on_detect(link);
+ link->verified_link_cap = link->reported_link_cap;
+ }
+
break;
}
@@ -955,10 +975,11 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
* two link trainings
*/
- /* deal with non-mst cases */
- dp_verify_link_cap_with_retries(link,
- &link->reported_link_cap,
- LINK_TRAINING_MAX_VERIFY_RETRY);
+ // verify link cap for SST non-seamless boot
+ if (!perform_dp_seamless_boot)
+ dp_verify_link_cap_with_retries(link,
+ &link->reported_link_cap,
+ LINK_TRAINING_MAX_VERIFY_RETRY);
} else {
// If edid is the same, then discard new sink and revert back to original sink
if (same_edid) {
@@ -1047,6 +1068,23 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
dc_sink_release(prev_sink);
return true;
+
+}
+
+bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
+{
+ const struct dc *dc = link->dc;
+ bool ret;
+
+ /* get out of low power state */
+ clk_mgr_exit_optimized_pwr_state(dc, dc->clk_mgr);
+
+ ret = dc_link_detect_helper(link, reason);
+
+ /* Go back to power optimized state */
+ clk_mgr_optimize_pwr_state(dc, dc->clk_mgr);
+
+ return ret;
}
bool dc_link_get_hpd_state(struct dc_link *dc_link)
@@ -1492,7 +1530,7 @@ static enum dc_status enable_link_dp(
pipe_ctx->stream_res.pix_clk_params.requested_sym_clk =
link_settings.link_rate * LINK_RATE_REF_FREQ_IN_KHZ;
- if (!apply_seamless_boot_optimization)
+ if (state->clk_mgr && !apply_seamless_boot_optimization)
state->clk_mgr->funcs->update_clocks(state->clk_mgr, state, false);
dp_enable_link_phy(
@@ -2169,8 +2207,10 @@ static void disable_link(struct dc_link *link, enum signal_type signal)
dp_set_fec_ready(link, false);
}
#endif
- } else
- link->link_enc->funcs->disable_output(link->link_enc, signal);
+ } else {
+ if (signal != SIGNAL_TYPE_VIRTUAL)
+ link->link_enc->funcs->disable_output(link->link_enc, signal);
+ }
if (signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
/* MST disable link only when no stream use the link */
@@ -2217,7 +2257,7 @@ static bool dp_active_dongle_validate_timing(
break;
}
- if (dongle_caps->dongle_type != DISPLAY_DONGLE_DP_HDMI_CONVERTER ||
+ if (dpcd_caps->dongle_type != DISPLAY_DONGLE_DP_HDMI_CONVERTER ||
dongle_caps->extendedCapValid == false)
return true;
@@ -2381,17 +2421,206 @@ bool dc_link_set_abm_disable(const struct dc_link *link)
return true;
}
-bool dc_link_set_psr_enable(const struct dc_link *link, bool enable, bool wait)
+bool dc_link_set_psr_allow_active(struct dc_link *link, bool allow_active, bool wait)
{
struct dc *core_dc = link->ctx->dc;
struct dmcu *dmcu = core_dc->res_pool->dmcu;
- if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu)) && link->psr_enabled)
- dmcu->funcs->set_psr_enable(dmcu, enable, wait);
+
+
+ if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu)) && link->psr_feature_enabled)
+ dmcu->funcs->set_psr_enable(dmcu, allow_active, wait);
+
+ link->psr_allow_active = allow_active;
return true;
}
+bool dc_link_get_psr_state(const struct dc_link *link, uint32_t *psr_state)
+{
+ struct dc *core_dc = link->ctx->dc;
+ struct dmcu *dmcu = core_dc->res_pool->dmcu;
+
+ if (dmcu != NULL && link->psr_feature_enabled)
+ dmcu->funcs->get_psr_state(dmcu, psr_state);
+
+ return true;
+}
+
+static inline enum physical_phy_id
+transmitter_to_phy_id(enum transmitter transmitter_value)
+{
+ switch (transmitter_value) {
+ case TRANSMITTER_UNIPHY_A:
+ return PHYLD_0;
+ case TRANSMITTER_UNIPHY_B:
+ return PHYLD_1;
+ case TRANSMITTER_UNIPHY_C:
+ return PHYLD_2;
+ case TRANSMITTER_UNIPHY_D:
+ return PHYLD_3;
+ case TRANSMITTER_UNIPHY_E:
+ return PHYLD_4;
+ case TRANSMITTER_UNIPHY_F:
+ return PHYLD_5;
+ case TRANSMITTER_NUTMEG_CRT:
+ return PHYLD_6;
+ case TRANSMITTER_TRAVIS_CRT:
+ return PHYLD_7;
+ case TRANSMITTER_TRAVIS_LCD:
+ return PHYLD_8;
+ case TRANSMITTER_UNIPHY_G:
+ return PHYLD_9;
+ case TRANSMITTER_COUNT:
+ return PHYLD_COUNT;
+ case TRANSMITTER_UNKNOWN:
+ return PHYLD_UNKNOWN;
+ default:
+ WARN_ONCE(1, "Unknown transmitter value %d\n",
+ transmitter_value);
+ return PHYLD_UNKNOWN;
+ }
+}
+
+bool dc_link_setup_psr(struct dc_link *link,
+ const struct dc_stream_state *stream, struct psr_config *psr_config,
+ struct psr_context *psr_context)
+{
+ struct dc *core_dc;
+ struct dmcu *dmcu;
+ int i;
+ /* updateSinkPsrDpcdConfig*/
+ union dpcd_psr_configuration psr_configuration;
+
+ psr_context->controllerId = CONTROLLER_ID_UNDEFINED;
+
+ if (!link)
+ return false;
+
+ core_dc = link->ctx->dc;
+ dmcu = core_dc->res_pool->dmcu;
+
+ if (!dmcu)
+ return false;
+
+
+ memset(&psr_configuration, 0, sizeof(psr_configuration));
+
+ psr_configuration.bits.ENABLE = 1;
+ psr_configuration.bits.CRC_VERIFICATION = 1;
+ psr_configuration.bits.FRAME_CAPTURE_INDICATION =
+ psr_config->psr_frame_capture_indication_req;
+
+ /* Check for PSR v2*/
+ if (psr_config->psr_version == 0x2) {
+ /* For PSR v2 selective update.
+ * Indicates whether sink should start capturing
+ * immediately following active scan line,
+ * or starting with the 2nd active scan line.
+ */
+ psr_configuration.bits.LINE_CAPTURE_INDICATION = 0;
+ /*For PSR v2, determines whether Sink should generate
+ * IRQ_HPD when CRC mismatch is detected.
+ */
+ psr_configuration.bits.IRQ_HPD_WITH_CRC_ERROR = 1;
+ }
+
+ dm_helpers_dp_write_dpcd(
+ link->ctx,
+ link,
+ 368,
+ &psr_configuration.raw,
+ sizeof(psr_configuration.raw));
+
+ psr_context->channel = link->ddc->ddc_pin->hw_info.ddc_channel;
+ psr_context->transmitterId = link->link_enc->transmitter;
+ psr_context->engineId = link->link_enc->preferred_engine;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (core_dc->current_state->res_ctx.pipe_ctx[i].stream
+ == stream) {
+ /* dmcu -1 for all controller id values,
+ * therefore +1 here
+ */
+ psr_context->controllerId =
+ core_dc->current_state->res_ctx.
+ pipe_ctx[i].stream_res.tg->inst + 1;
+ break;
+ }
+ }
+
+ /* Hardcoded for now. Can be Pcie or Uniphy (or Unknown)*/
+ psr_context->phyType = PHY_TYPE_UNIPHY;
+ /*PhyId is associated with the transmitter id*/
+ psr_context->smuPhyId =
+ transmitter_to_phy_id(link->link_enc->transmitter);
+
+ psr_context->crtcTimingVerticalTotal = stream->timing.v_total;
+ psr_context->vsyncRateHz = div64_u64(div64_u64((stream->
+ timing.pix_clk_100hz * 100),
+ stream->timing.v_total),
+ stream->timing.h_total);
+
+ psr_context->psrSupportedDisplayConfig = true;
+ psr_context->psrExitLinkTrainingRequired =
+ psr_config->psr_exit_link_training_required;
+ psr_context->sdpTransmitLineNumDeadline =
+ psr_config->psr_sdp_transmit_line_num_deadline;
+ psr_context->psrFrameCaptureIndicationReq =
+ psr_config->psr_frame_capture_indication_req;
+
+ psr_context->skipPsrWaitForPllLock = 0; /* only = 1 in KV */
+
+ psr_context->numberOfControllers =
+ link->dc->res_pool->timing_generator_count;
+
+ psr_context->rfb_update_auto_en = true;
+
+ /* 2 frames before enter PSR. */
+ psr_context->timehyst_frames = 2;
+ /* half a frame
+ * (units in 100 lines, i.e. a value of 1 represents 100 lines)
+ */
+ psr_context->hyst_lines = stream->timing.v_total / 2 / 100;
+ psr_context->aux_repeats = 10;
+
+ psr_context->psr_level.u32all = 0;
+
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+ /*skip power down the single pipe since it blocks the cstate*/
+ if (ASICREV_IS_RAVEN(link->ctx->asic_id.hw_internal_rev))
+ psr_context->psr_level.bits.SKIP_CRTC_DISABLE = true;
+#endif
+
+ /* SMU will perform additional powerdown sequence.
+ * For unsupported ASICs, set psr_level flag to skip PSR
+ * static screen notification to SMU.
+ * (Always set for DAL2, did not check ASIC)
+ */
+ psr_context->allow_smu_optimizations = psr_config->allow_smu_optimizations;
+
+ /* Complete PSR entry before aborting to prevent intermittent
+ * freezes on certain eDPs
+ */
+ psr_context->psr_level.bits.DISABLE_PSR_ENTRY_ABORT = 1;
+
+ /* Controls additional delay after remote frame capture before
+ * continuing power down, default = 0
+ */
+ psr_context->frame_delay = 0;
+
+ link->psr_feature_enabled = dmcu->funcs->setup_psr(dmcu, link, psr_context);
+
+ /* psr_enabled == 0 indicates setup_psr did not succeed, but this
+ * should not happen since firmware should be running at this point
+ */
+ if (link->psr_feature_enabled == 0)
+ ASSERT(0);
+
+ return true;
+
+}
+
const struct dc_link_status *dc_link_get_status(const struct dc_link *link)
{
return &link->link_status;
@@ -2510,7 +2739,7 @@ static void update_mst_stream_alloc_table(
/* convert link_mst_stream_alloc_table to dm dp_mst_stream_alloc_table
* because stream_encoder is not exposed to dm
*/
-static enum dc_status allocate_mst_payload(struct pipe_ctx *pipe_ctx)
+enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx)
{
struct dc_stream_state *stream = pipe_ctx->stream;
struct dc_link *link = stream->link;
@@ -2521,6 +2750,7 @@ static enum dc_status allocate_mst_payload(struct pipe_ctx *pipe_ctx)
struct fixed31_32 pbn;
struct fixed31_32 pbn_per_slot;
uint8_t i;
+ enum act_return_status ret;
DC_LOGGER_INIT(link->ctx->logger);
/* enable_link_dp_mst already check link->enabled_stream_count
@@ -2568,14 +2798,16 @@ static enum dc_status allocate_mst_payload(struct pipe_ctx *pipe_ctx)
&link->mst_stream_alloc_table);
/* send down message */
- dm_helpers_dp_mst_poll_for_allocation_change_trigger(
+ ret = dm_helpers_dp_mst_poll_for_allocation_change_trigger(
stream->ctx,
stream);
- dm_helpers_dp_mst_send_payload_allocation(
- stream->ctx,
- stream,
- true);
+ if (ret != ACT_LINK_LOST) {
+ dm_helpers_dp_mst_send_payload_allocation(
+ stream->ctx,
+ stream,
+ true);
+ }
/* slot X.Y for only current stream */
pbn_per_slot = get_pbn_per_slot(stream);
@@ -2667,6 +2899,24 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
return DC_OK;
}
+#if defined(CONFIG_DRM_AMD_DC_HDCP)
+static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off)
+{
+ struct cp_psp *cp_psp = &pipe_ctx->stream->ctx->cp_psp;
+ if (cp_psp && cp_psp->funcs.update_stream_config) {
+ struct cp_psp_stream_config config;
+
+ memset(&config, 0, sizeof(config));
+
+ config.otg_inst = (uint8_t) pipe_ctx->stream_res.tg->inst;
+ config.stream_enc_inst = (uint8_t) pipe_ctx->stream_res.stream_enc->id;
+ config.link_enc_inst = pipe_ctx->stream->link->link_enc_hw_inst;
+ config.dpms_off = dpms_off;
+ config.dm_stream_ctx = pipe_ctx->stream->dm_stream_context;
+ cp_psp->funcs.update_stream_config(cp_psp->handle, &config);
+ }
+}
+#endif
void core_link_enable_stream(
struct dc_state *state,
@@ -2677,6 +2927,10 @@ void core_link_enable_stream(
enum dc_status status;
DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
+ if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment) &&
+ dc_is_virtual_signal(pipe_ctx->stream->signal))
+ return;
+
if (!dc_is_virtual_signal(pipe_ctx->stream->signal)) {
stream->link->link_enc->funcs->setup(
stream->link->link_enc,
@@ -2727,6 +2981,9 @@ void core_link_enable_stream(
/* Do not touch link on seamless boot optimization. */
if (pipe_ctx->stream->apply_seamless_boot_optimization) {
pipe_ctx->stream->dpms_off = false;
+#if defined(CONFIG_DRM_AMD_DC_HDCP)
+ update_psp_stream_config(pipe_ctx, false);
+#endif
return;
}
@@ -2734,6 +2991,9 @@ void core_link_enable_stream(
if (pipe_ctx->stream->signal == SIGNAL_TYPE_EDP &&
apply_edp_fast_boot_optimization) {
pipe_ctx->stream->dpms_off = false;
+#if defined(CONFIG_DRM_AMD_DC_HDCP)
+ update_psp_stream_config(pipe_ctx, false);
+#endif
return;
}
@@ -2786,13 +3046,16 @@ void core_link_enable_stream(
#endif
if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
- allocate_mst_payload(pipe_ctx);
+ dc_link_allocate_mst_payload(pipe_ctx);
core_dc->hwss.unblank_stream(pipe_ctx,
&pipe_ctx->stream->link->cur_link_settings);
if (dc_is_dp_signal(pipe_ctx->stream->signal))
enable_stream_features(pipe_ctx);
+#if defined(CONFIG_DRM_AMD_DC_HDCP)
+ update_psp_stream_config(pipe_ctx, false);
+#endif
}
#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
else { // if (IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment))
@@ -2810,6 +3073,14 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx)
struct dc_stream_state *stream = pipe_ctx->stream;
struct dc_link *link = stream->sink->link;
+ if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment) &&
+ dc_is_virtual_signal(pipe_ctx->stream->signal))
+ return;
+
+#if defined(CONFIG_DRM_AMD_DC_HDCP)
+ update_psp_stream_config(pipe_ctx, true);
+#endif
+
core_dc->hwss.blank_stream(pipe_ctx);
if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
index 51991bf26a93..7f904d55c1bc 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
@@ -508,7 +508,7 @@ bool dal_ddc_service_query_ddc_data(
uint8_t *read_buf,
uint32_t read_size)
{
- bool ret;
+ bool ret = false;
uint32_t payload_size =
dal_ddc_service_is_in_aux_transaction_mode(ddc) ?
DEFAULT_AUX_MAX_DATA_SIZE : EDID_SEGMENT_SIZE;
@@ -527,34 +527,32 @@ bool dal_ddc_service_query_ddc_data(
/*TODO: len of payload data for i2c and aux is uint8!!!!,
* but we want to read 256 over i2c!!!!*/
if (dal_ddc_service_is_in_aux_transaction_mode(ddc)) {
- struct aux_payload write_payload = {
- .i2c_over_aux = true,
- .write = true,
- .mot = true,
- .address = address,
- .length = write_size,
- .data = write_buf,
- .reply = NULL,
- .defer_delay = get_defer_delay(ddc),
- };
-
- struct aux_payload read_payload = {
- .i2c_over_aux = true,
- .write = false,
- .mot = false,
- .address = address,
- .length = read_size,
- .data = read_buf,
- .reply = NULL,
- .defer_delay = get_defer_delay(ddc),
- };
-
- ret = dc_link_aux_transfer_with_retries(ddc, &write_payload);
+ struct aux_payload payload;
+ bool read_available = true;
+
+ payload.i2c_over_aux = true;
+ payload.address = address;
+ payload.reply = NULL;
+ payload.defer_delay = get_defer_delay(ddc);
+
+ if (write_size != 0) {
+ payload.write = true;
+ payload.mot = false;
+ payload.length = write_size;
+ payload.data = write_buf;
+
+ ret = dal_ddc_submit_aux_command(ddc, &payload);
+ read_available = ret;
+ }
- if (!ret)
- return false;
+ if (read_size != 0 && read_available) {
+ payload.write = false;
+ payload.mot = false;
+ payload.length = read_size;
+ payload.data = read_buf;
- ret = dc_link_aux_transfer_with_retries(ddc, &read_payload);
+ ret = dal_ddc_submit_aux_command(ddc, &payload);
+ }
} else {
struct i2c_payloads *payloads =
dal_ddc_i2c_payloads_create(ddc->ctx, payloads_num);
@@ -585,6 +583,41 @@ bool dal_ddc_service_query_ddc_data(
return ret;
}
+bool dal_ddc_submit_aux_command(struct ddc_service *ddc,
+ struct aux_payload *payload)
+{
+ uint8_t retrieved = 0;
+ bool ret = 0;
+
+ if (!ddc)
+ return false;
+
+ if (!payload)
+ return false;
+
+ do {
+ struct aux_payload current_payload;
+ bool is_end_of_payload = (retrieved + DEFAULT_AUX_MAX_DATA_SIZE) >
+ payload->length ? true : false;
+
+ current_payload.address = payload->address;
+ current_payload.data = &payload->data[retrieved];
+ current_payload.defer_delay = payload->defer_delay;
+ current_payload.i2c_over_aux = payload->i2c_over_aux;
+ current_payload.length = is_end_of_payload ?
+ payload->length - retrieved : DEFAULT_AUX_MAX_DATA_SIZE;
+ current_payload.mot = !is_end_of_payload;
+ current_payload.reply = payload->reply;
+ current_payload.write = payload->write;
+
+ ret = dc_link_aux_transfer_with_retries(ddc, &current_payload);
+
+ retrieved += current_payload.length;
+ } while (retrieved < payload->length && ret == true);
+
+ return ret;
+}
+
/* dc_link_aux_transfer_raw() - Attempt to transfer
* the given aux payload. This function does not perform
* retries or handle error states. The reply is returned
@@ -613,6 +646,20 @@ bool dc_link_aux_transfer_with_retries(struct ddc_service *ddc,
return dce_aux_transfer_with_retries(ddc, payload);
}
+
+enum dc_status dc_link_aux_configure_timeout(struct ddc_service *ddc,
+ uint32_t timeout)
+{
+ enum dc_status status = DC_OK;
+ struct ddc *ddc_pin = ddc->ddc_pin;
+
+ if (ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en]->funcs->configure_timeout == NULL)
+ return DC_ERROR_UNEXPECTED;
+ if (!ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en]->funcs->configure_timeout(ddc, timeout))
+ status = DC_ERROR_UNEXPECTED;
+ return status;
+}
+
/*test only function*/
void dal_ddc_service_set_ddc_pin(
struct ddc_service *ddc_service,
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index f5742719b5d9..0f59b68aa4c2 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -1409,6 +1409,9 @@ static struct dc_link_settings get_max_link_cap(struct dc_link *link)
if (link->link_enc->features.flags.bits.IS_HBR3_CAPABLE)
max_link_cap.link_rate = LINK_RATE_HIGH3;
+ if (link->link_enc->funcs->get_max_link_cap)
+ link->link_enc->funcs->get_max_link_cap(link->link_enc, &max_link_cap);
+
/* Lower link settings based on sink's link cap */
if (link->reported_link_cap.lane_count < max_link_cap.lane_count)
max_link_cap.lane_count =
@@ -1653,11 +1656,14 @@ bool dp_verify_link_cap_with_retries(
for (i = 0; i < attempts; i++) {
int fail_count = 0;
- enum dc_connection_type type;
+ enum dc_connection_type type = dc_connection_none;
memset(&link->verified_link_cap, 0,
sizeof(struct dc_link_settings));
- if (!dc_link_detect_sink(link, &type)) {
+ if (!dc_link_detect_sink(link, &type) || type == dc_connection_none) {
+ link->verified_link_cap.lane_count = LANE_COUNT_ONE;
+ link->verified_link_cap.link_rate = LINK_RATE_LOW;
+ link->verified_link_cap.link_spread = LINK_SPREAD_DISABLED;
break;
} else if (dp_verify_link_cap(link,
&link->reported_link_cap,
@@ -1670,6 +1676,19 @@ bool dp_verify_link_cap_with_retries(
return success;
}
+bool dp_verify_mst_link_cap(
+ struct dc_link *link)
+{
+ struct dc_link_settings max_link_cap = {0};
+
+ max_link_cap = get_max_link_cap(link);
+ link->verified_link_cap = get_common_supported_link_settings(
+ link->reported_link_cap,
+ max_link_cap);
+
+ return true;
+}
+
static struct dc_link_settings get_common_supported_link_settings(
struct dc_link_settings link_setting_a,
struct dc_link_settings link_setting_b)
@@ -2057,11 +2076,11 @@ static bool allow_hpd_rx_irq(const struct dc_link *link)
return false;
}
-static bool handle_hpd_irq_psr_sink(const struct dc_link *link)
+static bool handle_hpd_irq_psr_sink(struct dc_link *link)
{
union dpcd_psr_configuration psr_configuration;
- if (!link->psr_enabled)
+ if (!link->psr_feature_enabled)
return false;
dm_helpers_dp_read_dpcd(
@@ -2100,8 +2119,8 @@ static bool handle_hpd_irq_psr_sink(const struct dc_link *link)
sizeof(psr_error_status.raw));
/* PSR error, disable and re-enable PSR */
- dc_link_set_psr_enable(link, false, true);
- dc_link_set_psr_enable(link, true, true);
+ dc_link_set_psr_allow_active(link, false, true);
+ dc_link_set_psr_allow_active(link, true, true);
return true;
} else if (psr_sink_psr_status.bits.SINK_SELF_REFRESH_STATUS ==
@@ -2364,6 +2383,8 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd
enum dc_status result;
bool status = false;
+ struct pipe_ctx *pipe_ctx;
+ int i;
if (out_link_loss)
*out_link_loss = false;
@@ -2440,6 +2461,15 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd
&link->cur_link_settings,
true, LINK_TRAINING_ATTEMPTS);
+ for (i = 0; i < MAX_PIPES; i++) {
+ pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
+ if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->link == link &&
+ pipe_ctx->stream->dpms_off == false &&
+ pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
+ dc_link_allocate_mst_payload(pipe_ctx);
+ }
+ }
+
status = false;
if (out_link_loss)
*out_link_loss = true;
@@ -2545,6 +2575,7 @@ static void get_active_converter_info(
uint8_t data, struct dc_link *link)
{
union dp_downstream_port_present ds_port = { .byte = data };
+ memset(&link->dpcd_caps.dongle_caps, 0, sizeof(link->dpcd_caps.dongle_caps));
/* decode converter info*/
if (!ds_port.fields.PORT_PRESENT) {
@@ -2691,6 +2722,7 @@ static void dp_wa_power_up_0010FA(struct dc_link *link, uint8_t *dpcd_data,
* keep receiver powered all the time.*/
case DP_BRANCH_DEVICE_ID_0010FA:
case DP_BRANCH_DEVICE_ID_0080E1:
+ case DP_BRANCH_DEVICE_ID_00E04C:
link->wa_flags.dp_keep_receiver_powered = true;
break;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
index 79438c4f1e20..a519dbc5ecb6 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
@@ -277,7 +277,8 @@ void dp_retrain_link_dp_test(struct dc_link *link,
if (pipes[i].stream != NULL &&
!pipes[i].top_pipe && !pipes[i].prev_odm_pipe &&
pipes[i].stream->link != NULL &&
- pipes[i].stream_res.stream_enc != NULL) {
+ pipes[i].stream_res.stream_enc != NULL &&
+ pipes[i].stream->link == link) {
udelay(100);
pipes[i].stream_res.stream_enc->funcs->dp_blank(
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index f25ac17f47fa..37698305a2dc 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -951,7 +951,7 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx)
data->inits.v_c_bot = dc_fixpt_add(data->inits.v_c, data->ratios.vert_c);
}
-static bool are_rect_integer_multiples(struct rect src, struct rect dest)
+static bool are_rects_integer_multiples(struct rect src, struct rect dest)
{
if (dest.width >= src.width && dest.width % src.width == 0 &&
dest.height >= src.height && dest.height % src.height == 0)
@@ -959,6 +959,38 @@ static bool are_rect_integer_multiples(struct rect src, struct rect dest)
return false;
}
+
+static void calculate_integer_scaling(struct pipe_ctx *pipe_ctx)
+{
+ if (!pipe_ctx->plane_state->scaling_quality.integer_scaling)
+ return;
+
+ //for Centered Mode
+ if (pipe_ctx->stream->dst.width == pipe_ctx->stream->src.width &&
+ pipe_ctx->stream->dst.height == pipe_ctx->stream->src.height) {
+ // calculate maximum # of replication of src onto addressable
+ unsigned int integer_multiple = min(
+ pipe_ctx->stream->timing.h_addressable / pipe_ctx->stream->src.width,
+ pipe_ctx->stream->timing.v_addressable / pipe_ctx->stream->src.height);
+
+ //scale dst
+ pipe_ctx->stream->dst.width = integer_multiple * pipe_ctx->stream->src.width;
+ pipe_ctx->stream->dst.height = integer_multiple * pipe_ctx->stream->src.height;
+
+ //center dst onto addressable
+ pipe_ctx->stream->dst.x = (pipe_ctx->stream->timing.h_addressable - pipe_ctx->stream->dst.width)/2;
+ pipe_ctx->stream->dst.y = (pipe_ctx->stream->timing.v_addressable - pipe_ctx->stream->dst.height)/2;
+ }
+
+ //disable taps if src & dst are integer ratio
+ if (are_rects_integer_multiples(pipe_ctx->stream->src, pipe_ctx->stream->dst)) {
+ pipe_ctx->plane_state->scaling_quality.v_taps = 1;
+ pipe_ctx->plane_state->scaling_quality.h_taps = 1;
+ pipe_ctx->plane_state->scaling_quality.v_taps_c = 1;
+ pipe_ctx->plane_state->scaling_quality.h_taps_c = 1;
+ }
+}
+
bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
{
const struct dc_plane_state *plane_state = pipe_ctx->plane_state;
@@ -972,6 +1004,8 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
pipe_ctx->plane_res.scl_data.format = convert_pixel_format_to_dalsurface(
pipe_ctx->plane_state->format);
+ calculate_integer_scaling(pipe_ctx);
+
calculate_scaling_ratios(pipe_ctx);
calculate_viewport(pipe_ctx);
@@ -1002,13 +1036,6 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
res = pipe_ctx->plane_res.dpp->funcs->dpp_get_optimal_number_of_taps(
pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data, &plane_state->scaling_quality);
- if (res &&
- plane_state->scaling_quality.integer_scaling &&
- are_rect_integer_multiples(pipe_ctx->plane_res.scl_data.viewport,
- pipe_ctx->plane_res.scl_data.recout)) {
- pipe_ctx->plane_res.scl_data.taps.v_taps = 1;
- pipe_ctx->plane_res.scl_data.taps.h_taps = 1;
- }
if (!res) {
/* Try 24 bpp linebuffer */
@@ -1635,7 +1662,8 @@ static int acquire_first_free_pipe(
static struct audio *find_first_free_audio(
struct resource_context *res_ctx,
const struct resource_pool *pool,
- enum engine_id id)
+ enum engine_id id,
+ enum dce_version dc_version)
{
int i, available_audio_count;
@@ -1854,28 +1882,28 @@ static int acquire_resource_from_hw_enabled_state(
struct dc_stream_state *stream)
{
struct dc_link *link = stream->link;
- unsigned int inst, tg_inst;
+ unsigned int i, inst, tg_inst = 0;
/* Check for enabled DIG to identify enabled display */
if (!link->link_enc->funcs->is_dig_enabled(link->link_enc))
return -1;
- /* Check for which front end is used by this encoder.
- * Note the inst is 1 indexed, where 0 is undefined.
- * Note that DIG_FE can source from different OTG but our
- * current implementation always map 1-to-1, so this code makes
- * the same assumption and doesn't check OTG source.
- */
inst = link->link_enc->funcs->get_dig_frontend(link->link_enc);
- /* Instance should be within the range of the pool */
- if (inst >= pool->pipe_count)
- return -1;
+ if (inst == ENGINE_ID_UNKNOWN)
+ return false;
- if (inst >= pool->stream_enc_count)
- return -1;
+ for (i = 0; i < pool->stream_enc_count; i++) {
+ if (pool->stream_enc[i]->id == inst) {
+ tg_inst = pool->stream_enc[i]->funcs->dig_source_otg(
+ pool->stream_enc[i]);
+ break;
+ }
+ }
- tg_inst = pool->stream_enc[inst]->funcs->dig_source_otg(pool->stream_enc[inst]);
+ // tg_inst not found
+ if (i == pool->stream_enc_count)
+ return false;
if (tg_inst >= pool->timing_generator_count)
return false;
@@ -1971,7 +1999,7 @@ enum dc_status resource_map_pool_resources(
dc_is_audio_capable_signal(pipe_ctx->stream->signal) &&
stream->audio_info.mode_count && stream->audio_info.flags.all) {
pipe_ctx->stream_res.audio = find_first_free_audio(
- &context->res_ctx, pool, pipe_ctx->stream_res.stream_enc->id);
+ &context->res_ctx, pool, pipe_ctx->stream_res.stream_enc->id, dc_ctx->dce_version);
/*
* Audio assigned in order first come first get.
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index bf1d7bb90e0f..bb09243758fe 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -423,10 +423,10 @@ bool dc_stream_add_writeback(struct dc *dc,
if (dwb->funcs->is_enabled(dwb)) {
/* writeback pipe already enabled, only need to update */
- dc->hwss.update_writeback(dc, stream_status, wb_info);
+ dc->hwss.update_writeback(dc, stream_status, wb_info, dc->current_state);
} else {
/* Enable writeback pipe from scratch*/
- dc->hwss.enable_writeback(dc, stream_status, wb_info);
+ dc->hwss.enable_writeback(dc, stream_status, wb_info, dc->current_state);
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index a82352a87808..0416a17b0897 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -39,7 +39,7 @@
#include "inc/hw/dmcu.h"
#include "dml/display_mode_lib.h"
-#define DC_VER "3.2.48"
+#define DC_VER "3.2.56"
#define MAX_SURFACES 3
#define MAX_PLANES 6
@@ -111,19 +111,20 @@ struct dc_caps {
bool force_dp_tps4_for_cp2520;
bool disable_dp_clk_share;
bool psp_setup_panel_mode;
+ bool extended_aux_timeout_support;
#ifdef CONFIG_DRM_AMD_DC_DCN2_0
bool hw_3d_lut;
#endif
struct dc_plane_cap planes[MAX_PLANES];
};
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
struct dc_bug_wa {
+#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
bool no_connect_phy_config;
bool dedcn20_305_wa;
+#endif
bool skip_clock_update;
};
-#endif
struct dc_dcc_surface_param {
struct dc_size surface_size;
@@ -219,7 +220,9 @@ struct dc_config {
bool allow_seamless_boot_optimization;
bool power_down_display_on_boot;
bool edp_not_connected;
+ bool force_enum_edp;
bool forced_clocks;
+ bool disable_extended_timeout_support; // Used to disable extended timeout and lttpr feature as well
bool multi_mon_pp_mclk_switch;
};
@@ -227,6 +230,7 @@ enum visual_confirm {
VISUAL_CONFIRM_DISABLE = 0,
VISUAL_CONFIRM_SURFACE = 1,
VISUAL_CONFIRM_HDR = 2,
+ VISUAL_CONFIRM_MPCTREE = 4,
};
enum dcc_option {
@@ -245,6 +249,19 @@ enum wm_report_mode {
WM_REPORT_DEFAULT = 0,
WM_REPORT_OVERRIDE = 1,
};
+enum dtm_pstate{
+ dtm_level_p0 = 0,/*highest voltage*/
+ dtm_level_p1,
+ dtm_level_p2,
+ dtm_level_p3,
+ dtm_level_p4,/*when active_display_count = 0*/
+};
+
+enum dcn_pwr_state {
+ DCN_PWR_STATE_UNKNOWN = -1,
+ DCN_PWR_STATE_MISSION_MODE = 0,
+ DCN_PWR_STATE_LOW_POWER = 3,
+};
/*
* For any clocks that may differ per pipe
@@ -252,11 +269,7 @@ enum wm_report_mode {
*/
struct dc_clocks {
int dispclk_khz;
- int max_supported_dppclk_khz;
- int max_supported_dispclk_khz;
int dppclk_khz;
- int bw_dppclk_khz; /*a copy of dppclk_khz*/
- int bw_dispclk_khz;
int dcfclk_khz;
int socclk_khz;
int dcfclk_deep_sleep_khz;
@@ -264,12 +277,17 @@ struct dc_clocks {
int phyclk_khz;
int dramclk_khz;
bool p_state_change_support;
-
+ enum dcn_pwr_state pwr_state;
/*
* Elements below are not compared for the purposes of
* optimization required
*/
bool prev_p_state_change_support;
+ enum dtm_pstate dtm_level;
+ int max_supported_dppclk_khz;
+ int max_supported_dispclk_khz;
+ int bw_dppclk_khz; /*a copy of dppclk_khz*/
+ int bw_dispclk_khz;
};
struct dc_bw_validation_profile {
@@ -347,6 +365,7 @@ struct dc_debug_options {
bool disable_hubp_power_gate;
#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
bool disable_dsc_power_gate;
+ int dsc_min_slice_height_override;
#endif
bool disable_pplib_wm_range;
enum wm_report_mode pplib_wm_report_mode;
@@ -462,9 +481,7 @@ struct dc {
struct dc_config config;
struct dc_debug_options debug;
struct dc_bounding_box_overrides bb_overrides;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
struct dc_bug_wa work_arounds;
-#endif
struct dc_context *ctx;
#ifdef CONFIG_DRM_AMD_DC_DCN2_0
struct dc_phy_addr_space_config vm_pa_config;
@@ -553,10 +570,16 @@ struct dc_init_data {
};
struct dc_callback_init {
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+ struct cp_psp cp_psp;
+#else
uint8_t reserved;
+#endif
};
struct dc *dc_create(const struct dc_init_data *init_params);
+void dc_hardware_init(struct dc *dc);
+
int dc_get_vmid_use_vector(struct dc *dc);
#ifdef CONFIG_DRM_AMD_DC_DCN2_0
void dc_setup_vm_context(struct dc *dc, struct dc_virtual_addr_space_config *va_config, int vmid);
@@ -565,6 +588,7 @@ int dc_setup_system_context(struct dc *dc, struct dc_phy_addr_space_config *pa_c
#endif
void dc_init_callbacks(struct dc *dc,
const struct dc_callback_init *init_params);
+void dc_deinit_callbacks(struct dc *dc);
void dc_destroy(struct dc **dc);
/*******************************************************************************
diff --git a/drivers/gpu/drm/amd/display/dc/dc_ddc_types.h b/drivers/gpu/drm/amd/display/dc/dc_ddc_types.h
index 4ef97f65e55d..4f8f576d5fcf 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_ddc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_ddc_types.h
@@ -49,7 +49,8 @@ enum aux_channel_operation_result {
AUX_CHANNEL_OPERATION_FAILED_REASON_UNKNOWN,
AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY,
AUX_CHANNEL_OPERATION_FAILED_TIMEOUT,
- AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON
+ AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON,
+ AUX_CHANNEL_OPERATION_FAILED_ENGINE_ACQUIRE
};
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dsc.h b/drivers/gpu/drm/amd/display/dc/dc_dsc.h
index 6e42209f0e20..0ed2962add5a 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dsc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dsc.h
@@ -30,6 +30,7 @@
#define DP_DSC_BRANCH_OVERALL_THROUGHPUT_0 0x0a0 /* DP 1.4a SCR */
#define DP_DSC_BRANCH_OVERALL_THROUGHPUT_1 0x0a1
#define DP_DSC_BRANCH_MAX_LINE_WIDTH 0x0a2
+#include "dc_types.h"
struct dc_dsc_bw_range {
uint32_t min_kbps; /* Bandwidth if min_target_bpp_x16 is used */
@@ -39,13 +40,21 @@ struct dc_dsc_bw_range {
uint32_t stream_kbps; /* Uncompressed stream bandwidth */
};
+struct display_stream_compressor {
+ const struct dsc_funcs *funcs;
+#ifndef AMD_EDID_UTILITY
+ struct dc_context *ctx;
+ int inst;
+#endif
+};
bool dc_dsc_parse_dsc_dpcd(const uint8_t *dpcd_dsc_basic_data,
const uint8_t *dpcd_dsc_ext_data,
struct dsc_dec_dpcd_caps *dsc_sink_caps);
bool dc_dsc_compute_bandwidth_range(
- const struct dc *dc,
+ const struct display_stream_compressor *dsc,
+ const uint32_t dsc_min_slice_height_override,
const uint32_t min_kbps,
const uint32_t max_kbps,
const struct dsc_dec_dpcd_caps *dsc_sink_caps,
@@ -53,8 +62,9 @@ bool dc_dsc_compute_bandwidth_range(
struct dc_dsc_bw_range *range);
bool dc_dsc_compute_config(
- const struct dc *dc,
+ const struct display_stream_compressor *dsc,
const struct dsc_dec_dpcd_caps *dsc_sink_caps,
+ const uint32_t dsc_min_slice_height_override,
uint32_t target_bandwidth_kbps,
const struct dc_crtc_timing *timing,
struct dc_dsc_config *dsc_cfg);
diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
index 0b8700a8a94a..e0856bb8511f 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
@@ -26,6 +26,8 @@
#ifndef DC_HW_TYPES_H
#define DC_HW_TYPES_H
+#ifndef AMD_EDID_UTILITY
+
#include "os_types.h"
#include "fixed31_32.h"
#include "signal_types.h"
@@ -124,20 +126,6 @@ struct plane_size {
int chroma_pitch;
struct rect surface_size;
struct rect chroma_size;
-
- union {
- struct {
- struct rect surface_size;
- int surface_pitch;
- } grph;
-
- struct {
- struct rect luma_size;
- int luma_pitch;
- struct rect chroma_size;
- int chroma_pitch;
- } video;
- };
};
struct dc_plane_dcc_param {
@@ -148,21 +136,6 @@ struct dc_plane_dcc_param {
int meta_pitch_c;
bool independent_64b_blks_c;
-
- union {
- struct {
- int meta_pitch;
- bool independent_64b_blks;
- } grph;
-
- struct {
- int meta_pitch_l;
- bool independent_64b_blks_l;
-
- int meta_pitch_c;
- bool independent_64b_blks_c;
- } video;
- };
};
/*Displayable pixel format in fb*/
@@ -605,6 +578,11 @@ enum dc_quantization_range {
QUANTIZATION_RANGE_LIMITED
};
+enum dc_dynamic_expansion {
+ DYN_EXPANSION_AUTO,
+ DYN_EXPANSION_DISABLE
+};
+
/* XFM */
/* used in struct dc_plane_state */
@@ -616,6 +594,8 @@ struct scaling_taps {
bool integer_scaling;
};
+#endif /* AMD_EDID_UTILITY */
+
enum dc_timing_standard {
DC_TIMING_STANDARD_UNDEFINED,
DC_TIMING_STANDARD_DMT,
@@ -737,30 +717,6 @@ enum dc_timing_3d_format {
TIMING_3D_FORMAT_MAX,
};
-enum trigger_delay {
- TRIGGER_DELAY_NEXT_PIXEL = 0,
- TRIGGER_DELAY_NEXT_LINE,
-};
-
-enum crtc_event {
- CRTC_EVENT_VSYNC_RISING = 0,
- CRTC_EVENT_VSYNC_FALLING
-};
-
-struct crtc_trigger_info {
- bool enabled;
- struct dc_stream_state *event_source;
- enum crtc_event event;
- enum trigger_delay delay;
-};
-
-struct dc_crtc_timing_adjust {
- uint32_t v_total_min;
- uint32_t v_total_max;
- uint32_t v_total_mid;
- uint32_t v_total_mid_frame_num;
-};
-
#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
struct dc_dsc_config {
uint32_t num_slices_h; /* Number of DSC slices - horizontal */
@@ -804,6 +760,33 @@ struct dc_crtc_timing {
#endif
};
+#ifndef AMD_EDID_UTILITY
+
+enum trigger_delay {
+ TRIGGER_DELAY_NEXT_PIXEL = 0,
+ TRIGGER_DELAY_NEXT_LINE,
+};
+
+enum crtc_event {
+ CRTC_EVENT_VSYNC_RISING = 0,
+ CRTC_EVENT_VSYNC_FALLING
+};
+
+struct crtc_trigger_info {
+ bool enabled;
+ struct dc_stream_state *event_source;
+ enum crtc_event event;
+ enum trigger_delay delay;
+};
+
+struct dc_crtc_timing_adjust {
+ uint32_t v_total_min;
+ uint32_t v_total_max;
+ uint32_t v_total_mid;
+ uint32_t v_total_mid_frame_num;
+};
+
+
/* Passed on init */
enum vram_type {
VIDEO_MEMORY_TYPE_GDDR5 = 2,
@@ -874,5 +857,7 @@ struct tg_color {
uint16_t color_b_cb;
};
+#endif /* AMD_EDID_UTILITY */
+
#endif /* DC_HW_TYPES_H */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h
index 9ea75db3484e..f24fd19ed93d 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_link.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_link.h
@@ -126,7 +126,8 @@ struct dc_link {
unsigned short chip_caps;
unsigned int dpcd_sink_count;
enum edp_revision edp_revision;
- bool psr_enabled;
+ bool psr_feature_enabled;
+ bool psr_allow_active;
/* MST record stream using this link */
struct link_flags {
@@ -158,6 +159,18 @@ static inline struct dc_link *dc_get_link_at_index(struct dc *dc, uint32_t link_
return dc->links[link_index];
}
+static inline struct dc_link *get_edp_link(const struct dc *dc)
+{
+ int i;
+
+ // report any eDP links, even unconnected DDI's
+ for (i = 0; i < dc->link_count; i++) {
+ if (dc->links[i]->connector_signal == SIGNAL_TYPE_EDP)
+ return dc->links[i];
+ }
+ return NULL;
+}
+
/* Set backlight level of an embedded panel (eDP, LVDS).
* backlight_pwm_u16_16 is unsigned 32 bit with 16 bit integer
* and 16 bit fractional, where 1.0 is max backlight value.
@@ -170,7 +183,7 @@ int dc_link_get_backlight_level(const struct dc_link *dc_link);
bool dc_link_set_abm_disable(const struct dc_link *dc_link);
-bool dc_link_set_psr_enable(const struct dc_link *dc_link, bool enable, bool wait);
+bool dc_link_set_psr_allow_active(struct dc_link *dc_link, bool enable, bool wait);
bool dc_link_get_psr_state(const struct dc_link *dc_link, uint32_t *psr_state);
@@ -192,6 +205,7 @@ enum dc_detect_reason {
bool dc_link_detect(struct dc_link *dc_link, enum dc_detect_reason reason);
bool dc_link_get_hpd_state(struct dc_link *dc_link);
+enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx);
/* Notify DC about DP RX Interrupt (aka Short Pulse Interrupt).
* Return:
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index 0fa1c26bc20d..fdb6adc37857 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -113,6 +113,21 @@ struct periodic_interrupt_config {
int lines_offset;
};
+union stream_update_flags {
+ struct {
+ uint32_t scaling:1;
+ uint32_t out_tf:1;
+ uint32_t out_csc:1;
+ uint32_t abm_level:1;
+ uint32_t dpms_off:1;
+ uint32_t gamut_remap:1;
+#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
+ uint32_t wb_update:1;
+#endif
+ } bits;
+
+ uint32_t raw;
+};
struct dc_stream_state {
// sink is deprecated, new code should not reference
@@ -214,9 +229,14 @@ struct dc_stream_state {
#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
bool is_dsc_enabled;
#endif
+ union stream_update_flags update_flags;
};
+#define ABM_LEVEL_IMMEDIATE_DISABLE 0xFFFFFFFF
+
struct dc_stream_update {
+ struct dc_stream_state *stream;
+
struct rect src;
struct rect dst;
struct dc_transfer_func *out_transfer_func;
@@ -431,6 +451,9 @@ void dc_stream_set_static_screen_events(struct dc *dc,
int num_streams,
const struct dc_static_screen_events *events);
+void dc_stream_set_dyn_expansion(struct dc *dc, struct dc_stream_state *stream,
+ enum dc_dynamic_expansion option);
+
void dc_stream_set_dither_option(struct dc_stream_state *stream,
enum dc_dither_option option);
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
index b273735b6a3e..d9be8fc3889f 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -25,6 +25,11 @@
#ifndef DC_TYPES_H_
#define DC_TYPES_H_
+#ifndef AMD_EDID_UTILITY
+/* AND EdidUtility only needs a portion
+ * of this file, including the rest only
+ * causes additional issues.
+ */
#include "os_types.h"
#include "fixed31_32.h"
#include "irq_types.h"
@@ -33,6 +38,10 @@
#include "dal_types.h"
#include "grph_object_defs.h"
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+#include "dm_cp_psp.h"
+#endif
+
/* forward declarations */
struct dc_plane_state;
struct dc_stream_state;
@@ -100,6 +109,9 @@ struct dc_context {
uint32_t dc_sink_id_count;
uint32_t dc_stream_id_count;
uint64_t fbc_gpu_addr;
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+ struct cp_psp cp_psp;
+#endif
};
@@ -159,6 +171,12 @@ enum dc_edid_status {
EDID_THE_SAME,
};
+enum act_return_status {
+ ACT_SUCCESS,
+ ACT_LINK_LOST,
+ ACT_FAILED
+};
+
/* audio capability from EDID*/
struct dc_cea_audio_mode {
uint8_t format_code; /* ucData[0] [6:3]*/
@@ -739,6 +757,9 @@ struct dc_clock_config {
uint32_t current_clock_khz;/*current clock in use*/
};
+#endif /*AMD_EDID_UTILITY*/
+//AMD EDID UTILITY does not need any of the above structures
+
#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
/* DSC DPCD capabilities */
union dsc_slice_caps1 {
@@ -810,4 +831,5 @@ struct dsc_dec_dpcd_caps {
uint32_t branch_max_line_width;
};
#endif
+
#endif /* DC_TYPES_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
index 58bd131d5b48..b8a3fc505c9b 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
@@ -77,6 +77,9 @@ static bool dce_abm_set_pipe(struct abm *abm, uint32_t controller_id)
/* notifyDMCUMsg */
REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1);
+ REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0,
+ 1, 80000);
+
return true;
}
@@ -401,6 +404,10 @@ static bool dce_abm_init_backlight(struct abm *abm)
/* Enable the backlight output */
REG_UPDATE(BL_PWM_CNTL, BL_PWM_EN, 1);
+ /* Disable fractional pwm if configured */
+ REG_UPDATE(BL_PWM_CNTL, BL_PWM_FRACTIONAL_EN,
+ abm->ctx->dc->config.disable_fractional_pwm ? 0 : 1);
+
/* Unlock group 2 backlight registers */
REG_UPDATE(BL_PWM_GRP1_REG_LOCK,
BL_PWM_GRP1_REG_LOCK, 0);
@@ -489,9 +496,6 @@ void dce_abm_destroy(struct abm **abm)
{
struct dce_abm *abm_dce = TO_DCE_ABM(*abm);
- if (abm_dce->base.dmcu_is_running == true)
- abm_dce->base.funcs->set_abm_immediate_disable(*abm);
-
kfree(abm_dce);
*abm = NULL;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
index c3f9f4185ce8..e472608faf33 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
@@ -42,6 +42,10 @@
#include "reg_helper.h"
+#undef FN
+#define FN(reg_name, field_name) \
+ aux110->shift->field_name, aux110->mask->field_name
+
#define FROM_AUX_ENGINE(ptr) \
container_of((ptr), struct aux_engine_dce110, base)
@@ -55,6 +59,14 @@ enum {
AUX_TIMED_OUT_RETRY_COUNTER = 2,
AUX_DEFER_RETRY_COUNTER = 6
};
+
+#define TIME_OUT_INCREMENT 1016
+#define TIME_OUT_MULTIPLIER_8 8
+#define TIME_OUT_MULTIPLIER_16 16
+#define TIME_OUT_MULTIPLIER_32 32
+#define TIME_OUT_MULTIPLIER_64 64
+#define MAX_TIMEOUT_LENGTH 127
+
static void release_engine(
struct dce_aux *engine)
{
@@ -198,7 +210,7 @@ static void submit_channel_request(
REG_UPDATE(AUX_INTERRUPT_CONTROL, AUX_SW_DONE_ACK, 1);
REG_WAIT(AUX_SW_STATUS, AUX_SW_DONE, 0,
- 10, aux110->timeout_period/10);
+ 10, aux110->polling_timeout_period/10);
/* set the delay and the number of bytes to write */
@@ -327,7 +339,7 @@ static enum aux_channel_operation_result get_channel_status(
/* poll to make sure that SW_DONE is asserted */
REG_WAIT(AUX_SW_STATUS, AUX_SW_DONE, 1,
- 10, aux110->timeout_period/10);
+ 10, aux110->polling_timeout_period/10);
value = REG_READ(AUX_SW_STATUS);
/* in case HPD is LOW, exit AUX transaction */
@@ -414,20 +426,77 @@ void dce110_engine_destroy(struct dce_aux **engine)
*engine = NULL;
}
+
+static bool dce_aux_configure_timeout(struct ddc_service *ddc,
+ uint32_t timeout_in_us)
+{
+ uint32_t multiplier = 0;
+ uint32_t length = 0;
+ struct ddc *ddc_pin = ddc->ddc_pin;
+ struct dce_aux *aux_engine = ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en];
+ struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(aux_engine);
+
+ /* 1-Update polling timeout period */
+ aux110->polling_timeout_period = timeout_in_us * SW_AUX_TIMEOUT_PERIOD_MULTIPLIER;
+
+ /* 2-Update aux timeout period length and multiplier */
+ if (timeout_in_us <= TIME_OUT_INCREMENT) {
+ multiplier = 0;
+ length = timeout_in_us/TIME_OUT_MULTIPLIER_8;
+ if (timeout_in_us % TIME_OUT_MULTIPLIER_8 != 0)
+ length++;
+ } else if (timeout_in_us <= 2 * TIME_OUT_INCREMENT) {
+ multiplier = 1;
+ length = timeout_in_us/TIME_OUT_MULTIPLIER_16;
+ if (timeout_in_us % TIME_OUT_MULTIPLIER_16 != 0)
+ length++;
+ } else if (timeout_in_us <= 4 * TIME_OUT_INCREMENT) {
+ multiplier = 2;
+ length = timeout_in_us/TIME_OUT_MULTIPLIER_32;
+ if (timeout_in_us % TIME_OUT_MULTIPLIER_32 != 0)
+ length++;
+ } else if (timeout_in_us > 4 * TIME_OUT_INCREMENT) {
+ multiplier = 3;
+ length = timeout_in_us/TIME_OUT_MULTIPLIER_64;
+ if (timeout_in_us % TIME_OUT_MULTIPLIER_64 != 0)
+ length++;
+ }
+
+ length = (length < MAX_TIMEOUT_LENGTH) ? length : MAX_TIMEOUT_LENGTH;
+
+ REG_UPDATE_SEQ_2(AUX_DPHY_RX_CONTROL1, AUX_RX_TIMEOUT_LEN, length, AUX_RX_TIMEOUT_LEN_MUL, multiplier);
+
+ return true;
+}
+
+static struct dce_aux_funcs aux_functions = {
+ .configure_timeout = NULL,
+ .destroy = NULL,
+};
+
struct dce_aux *dce110_aux_engine_construct(struct aux_engine_dce110 *aux_engine110,
struct dc_context *ctx,
uint32_t inst,
uint32_t timeout_period,
- const struct dce110_aux_registers *regs)
+ const struct dce110_aux_registers *regs,
+ const struct dce110_aux_registers_mask *mask,
+ const struct dce110_aux_registers_shift *shift,
+ bool is_ext_aux_timeout_configurable)
{
aux_engine110->base.ddc = NULL;
aux_engine110->base.ctx = ctx;
aux_engine110->base.delay = 0;
aux_engine110->base.max_defer_write_retry = 0;
aux_engine110->base.inst = inst;
- aux_engine110->timeout_period = timeout_period;
+ aux_engine110->polling_timeout_period = timeout_period;
aux_engine110->regs = regs;
+ aux_engine110->mask = mask;
+ aux_engine110->shift = shift;
+ aux_engine110->base.funcs = &aux_functions;
+ if (is_ext_aux_timeout_configurable)
+ aux_engine110->base.funcs->configure_timeout = &dce_aux_configure_timeout;
+
return &aux_engine110->base;
}
@@ -464,8 +533,10 @@ int dce_aux_transfer_raw(struct ddc_service *ddc,
memset(&aux_rep, 0, sizeof(aux_rep));
aux_engine = ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en];
- if (!acquire(aux_engine, ddc_pin))
+ if (!acquire(aux_engine, ddc_pin)) {
+ *operation_result = AUX_CHANNEL_OPERATION_FAILED_ENGINE_ACQUIRE;
return -1;
+ }
if (payload->i2c_over_aux)
aux_req.type = AUX_TRANSACTION_TYPE_I2C;
@@ -475,7 +546,7 @@ int dce_aux_transfer_raw(struct ddc_service *ddc,
aux_req.action = i2caux_action_from_payload(payload);
aux_req.address = payload->address;
- aux_req.delay = payload->defer_delay * 10;
+ aux_req.delay = 0;
aux_req.length = payload->length;
aux_req.data = payload->data;
@@ -544,8 +615,15 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
case AUX_TRANSACTION_REPLY_AUX_DEFER:
case AUX_TRANSACTION_REPLY_I2C_OVER_AUX_NACK:
case AUX_TRANSACTION_REPLY_I2C_OVER_AUX_DEFER:
- if (++aux_defer_retries >= AUX_MAX_DEFER_RETRIES)
+ if (++aux_defer_retries >= AUX_MAX_DEFER_RETRIES) {
goto fail;
+ } else {
+ if ((*payload->reply == AUX_TRANSACTION_REPLY_AUX_DEFER) ||
+ (*payload->reply == AUX_TRANSACTION_REPLY_I2C_OVER_AUX_DEFER)) {
+ if (payload->defer_delay > 0)
+ msleep(payload->defer_delay);
+ }
+ }
break;
case AUX_TRANSACTION_REPLY_I2C_DEFER:
@@ -582,6 +660,7 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
break;
case AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON:
+ case AUX_CHANNEL_OPERATION_FAILED_ENGINE_ACQUIRE:
case AUX_CHANNEL_OPERATION_FAILED_REASON_UNKNOWN:
default:
goto fail;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h
index ed7fec8fe253..b4b2c79a8073 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h
@@ -29,6 +29,7 @@
#include "i2caux_interface.h"
#include "inc/hw/aux_engine.h"
+
#ifdef CONFIG_DRM_AMD_DC_DCN2_0
#define AUX_COMMON_REG_LIST0(id)\
SRI(AUX_CONTROL, DP_AUX, id), \
@@ -36,6 +37,7 @@
SRI(AUX_SW_DATA, DP_AUX, id), \
SRI(AUX_SW_CONTROL, DP_AUX, id), \
SRI(AUX_INTERRUPT_CONTROL, DP_AUX, id), \
+ SRI(AUX_DPHY_RX_CONTROL1, DP_AUX, id), \
SRI(AUX_SW_STATUS, DP_AUX, id)
#endif
@@ -55,6 +57,7 @@ struct dce110_aux_registers {
uint32_t AUX_SW_DATA;
uint32_t AUX_SW_CONTROL;
uint32_t AUX_INTERRUPT_CONTROL;
+ uint32_t AUX_DPHY_RX_CONTROL1;
uint32_t AUX_SW_STATUS;
uint32_t AUXN_IMPCAL;
uint32_t AUXP_IMPCAL;
@@ -62,6 +65,156 @@ struct dce110_aux_registers {
uint32_t AUX_RESET_MASK;
};
+#define DCE_AUX_REG_FIELD_LIST(type)\
+ type AUX_EN;\
+ type AUX_RESET;\
+ type AUX_RESET_DONE;\
+ type AUX_REG_RW_CNTL_STATUS;\
+ type AUX_SW_USE_AUX_REG_REQ;\
+ type AUX_SW_DONE_USING_AUX_REG;\
+ type AUX_SW_AUTOINCREMENT_DISABLE;\
+ type AUX_SW_DATA_RW;\
+ type AUX_SW_INDEX;\
+ type AUX_SW_GO;\
+ type AUX_SW_DATA;\
+ type AUX_SW_REPLY_BYTE_COUNT;\
+ type AUX_SW_DONE;\
+ type AUX_SW_DONE_ACK;\
+ type AUXN_IMPCAL_ENABLE;\
+ type AUXP_IMPCAL_ENABLE;\
+ type AUXN_IMPCAL_OVERRIDE_ENABLE;\
+ type AUXP_IMPCAL_OVERRIDE_ENABLE;\
+ type AUX_RX_TIMEOUT_LEN;\
+ type AUX_RX_TIMEOUT_LEN_MUL;\
+ type AUXN_CALOUT_ERROR_AK;\
+ type AUXP_CALOUT_ERROR_AK;\
+ type AUX_SW_START_DELAY;\
+ type AUX_SW_WR_BYTES
+
+#define DCE10_AUX_MASK_SH_LIST(mask_sh)\
+ AUX_SF(AUX_CONTROL, AUX_EN, mask_sh),\
+ AUX_SF(AUX_ARB_CONTROL, AUX_REG_RW_CNTL_STATUS, mask_sh),\
+ AUX_SF(AUX_ARB_CONTROL, AUX_SW_USE_AUX_REG_REQ, mask_sh),\
+ AUX_SF(AUX_ARB_CONTROL, AUX_SW_DONE_USING_AUX_REG, mask_sh),\
+ AUX_SF(AUX_SW_CONTROL, AUX_SW_START_DELAY, mask_sh),\
+ AUX_SF(AUX_SW_CONTROL, AUX_SW_WR_BYTES, mask_sh),\
+ AUX_SF(AUX_SW_CONTROL, AUX_SW_GO, mask_sh),\
+ AUX_SF(AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\
+ AUX_SF(AUX_SW_DATA, AUX_SW_DATA_RW, mask_sh),\
+ AUX_SF(AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\
+ AUX_SF(AUX_SW_DATA, AUX_SW_INDEX, mask_sh),\
+ AUX_SF(AUX_SW_DATA, AUX_SW_DATA, mask_sh),\
+ AUX_SF(AUX_SW_STATUS, AUX_SW_REPLY_BYTE_COUNT, mask_sh),\
+ AUX_SF(AUX_SW_STATUS, AUX_SW_DONE, mask_sh),\
+ AUX_SF(AUX_INTERRUPT_CONTROL, AUX_SW_DONE_ACK, mask_sh),\
+ AUX_SF(AUXN_IMPCAL, AUXN_CALOUT_ERROR_AK, mask_sh),\
+ AUX_SF(AUXP_IMPCAL, AUXP_CALOUT_ERROR_AK, mask_sh),\
+ AUX_SF(AUXN_IMPCAL, AUXN_IMPCAL_ENABLE, mask_sh),\
+ AUX_SF(AUXP_IMPCAL, AUXP_IMPCAL_ENABLE, mask_sh),\
+ AUX_SF(AUXP_IMPCAL, AUXP_IMPCAL_OVERRIDE_ENABLE, mask_sh),\
+ AUX_SF(AUXN_IMPCAL, AUXN_IMPCAL_OVERRIDE_ENABLE, mask_sh)
+
+#define DCE_AUX_MASK_SH_LIST(mask_sh)\
+ AUX_SF(AUX_CONTROL, AUX_EN, mask_sh),\
+ AUX_SF(AUX_CONTROL, AUX_RESET, mask_sh),\
+ AUX_SF(AUX_CONTROL, AUX_RESET_DONE, mask_sh),\
+ AUX_SF(AUX_ARB_CONTROL, AUX_REG_RW_CNTL_STATUS, mask_sh),\
+ AUX_SF(AUX_ARB_CONTROL, AUX_SW_USE_AUX_REG_REQ, mask_sh),\
+ AUX_SF(AUX_ARB_CONTROL, AUX_SW_DONE_USING_AUX_REG, mask_sh),\
+ AUX_SF(AUX_SW_CONTROL, AUX_SW_START_DELAY, mask_sh),\
+ AUX_SF(AUX_SW_CONTROL, AUX_SW_WR_BYTES, mask_sh),\
+ AUX_SF(AUX_SW_CONTROL, AUX_SW_GO, mask_sh),\
+ AUX_SF(AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\
+ AUX_SF(AUX_SW_DATA, AUX_SW_DATA_RW, mask_sh),\
+ AUX_SF(AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\
+ AUX_SF(AUX_SW_DATA, AUX_SW_INDEX, mask_sh),\
+ AUX_SF(AUX_SW_DATA, AUX_SW_DATA, mask_sh),\
+ AUX_SF(AUX_SW_STATUS, AUX_SW_REPLY_BYTE_COUNT, mask_sh),\
+ AUX_SF(AUX_SW_STATUS, AUX_SW_DONE, mask_sh),\
+ AUX_SF(AUX_INTERRUPT_CONTROL, AUX_SW_DONE_ACK, mask_sh),\
+ AUX_SF(AUXN_IMPCAL, AUXN_CALOUT_ERROR_AK, mask_sh),\
+ AUX_SF(AUXP_IMPCAL, AUXP_CALOUT_ERROR_AK, mask_sh),\
+ AUX_SF(AUXN_IMPCAL, AUXN_IMPCAL_ENABLE, mask_sh),\
+ AUX_SF(AUXP_IMPCAL, AUXP_IMPCAL_ENABLE, mask_sh),\
+ AUX_SF(AUXP_IMPCAL, AUXP_IMPCAL_OVERRIDE_ENABLE, mask_sh),\
+ AUX_SF(AUXN_IMPCAL, AUXN_IMPCAL_OVERRIDE_ENABLE, mask_sh)
+
+#define DCE12_AUX_MASK_SH_LIST(mask_sh)\
+ AUX_SF(DP_AUX0_AUX_CONTROL, AUX_EN, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_CONTROL, AUX_RESET, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_CONTROL, AUX_RESET_DONE, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_ARB_CONTROL, AUX_REG_RW_CNTL_STATUS, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_ARB_CONTROL, AUX_SW_USE_AUX_REG_REQ, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_ARB_CONTROL, AUX_SW_DONE_USING_AUX_REG, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_CONTROL, AUX_SW_START_DELAY, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_CONTROL, AUX_SW_WR_BYTES, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_CONTROL, AUX_SW_GO, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_DATA_RW, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_INDEX, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_DATA, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_STATUS, AUX_SW_REPLY_BYTE_COUNT, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_STATUS, AUX_SW_DONE, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_INTERRUPT_CONTROL, AUX_SW_DONE_ACK, mask_sh),\
+ AUX_SF(AUXN_IMPCAL, AUXN_CALOUT_ERROR_AK, mask_sh),\
+ AUX_SF(AUXP_IMPCAL, AUXP_CALOUT_ERROR_AK, mask_sh),\
+ AUX_SF(AUXN_IMPCAL, AUXN_IMPCAL_ENABLE, mask_sh),\
+ AUX_SF(AUXP_IMPCAL, AUXP_IMPCAL_ENABLE, mask_sh),\
+ AUX_SF(AUXP_IMPCAL, AUXP_IMPCAL_OVERRIDE_ENABLE, mask_sh),\
+ AUX_SF(AUXN_IMPCAL, AUXN_IMPCAL_OVERRIDE_ENABLE, mask_sh)
+
+/* DCN10 MASK */
+#define DCN10_AUX_MASK_SH_LIST(mask_sh)\
+ AUX_SF(DP_AUX0_AUX_CONTROL, AUX_EN, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_CONTROL, AUX_RESET, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_CONTROL, AUX_RESET_DONE, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_ARB_CONTROL, AUX_REG_RW_CNTL_STATUS, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_ARB_CONTROL, AUX_SW_USE_AUX_REG_REQ, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_ARB_CONTROL, AUX_SW_DONE_USING_AUX_REG, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_CONTROL, AUX_SW_START_DELAY, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_CONTROL, AUX_SW_WR_BYTES, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_CONTROL, AUX_SW_GO, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_DATA_RW, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_INDEX, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_DATA, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_STATUS, AUX_SW_REPLY_BYTE_COUNT, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_STATUS, AUX_SW_DONE, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_INTERRUPT_CONTROL, AUX_SW_DONE_ACK, mask_sh),\
+ AUX_SF(AUXN_IMPCAL, AUXN_CALOUT_ERROR_AK, mask_sh),\
+ AUX_SF(AUXP_IMPCAL, AUXP_CALOUT_ERROR_AK, mask_sh),\
+ AUX_SF(AUXN_IMPCAL, AUXN_IMPCAL_ENABLE, mask_sh),\
+ AUX_SF(AUXP_IMPCAL, AUXP_IMPCAL_ENABLE, mask_sh),\
+ AUX_SF(AUXP_IMPCAL, AUXP_IMPCAL_OVERRIDE_ENABLE, mask_sh),\
+ AUX_SF(AUXN_IMPCAL, AUXN_IMPCAL_OVERRIDE_ENABLE, mask_sh)
+
+/* for all other DCN */
+#define DCN_AUX_MASK_SH_LIST(mask_sh)\
+ AUX_SF(DP_AUX0_AUX_CONTROL, AUX_EN, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_CONTROL, AUX_RESET, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_CONTROL, AUX_RESET_DONE, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_ARB_CONTROL, AUX_REG_RW_CNTL_STATUS, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_ARB_CONTROL, AUX_SW_USE_AUX_REG_REQ, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_ARB_CONTROL, AUX_SW_DONE_USING_AUX_REG, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_CONTROL, AUX_SW_START_DELAY, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_CONTROL, AUX_SW_WR_BYTES, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_CONTROL, AUX_SW_GO, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_DATA_RW, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_INDEX, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_DATA, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_STATUS, AUX_SW_REPLY_BYTE_COUNT, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_SW_STATUS, AUX_SW_DONE, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_INTERRUPT_CONTROL, AUX_SW_DONE_ACK, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_DPHY_RX_CONTROL1, AUX_RX_TIMEOUT_LEN, mask_sh),\
+ AUX_SF(DP_AUX0_AUX_DPHY_RX_CONTROL1, AUX_RX_TIMEOUT_LEN_MUL, mask_sh)
+
+#define AUX_SF(reg_name, field_name, post_fix)\
+ .field_name = reg_name ## __ ## field_name ## post_fix
+
enum { /* This is the timeout as defined in DP 1.2a,
* 2.3.4 "Detailed uPacket TX AUX CH State Description".
*/
@@ -97,20 +250,34 @@ struct dce_aux {
uint32_t max_defer_write_retry;
bool acquire_reset;
+ struct dce_aux_funcs *funcs;
+};
+
+struct dce110_aux_registers_mask {
+ DCE_AUX_REG_FIELD_LIST(uint32_t);
+};
+
+struct dce110_aux_registers_shift {
+ DCE_AUX_REG_FIELD_LIST(uint8_t);
};
+
struct aux_engine_dce110 {
struct dce_aux base;
const struct dce110_aux_registers *regs;
+ const struct dce110_aux_registers_mask *mask;
+ const struct dce110_aux_registers_shift *shift;
struct {
uint32_t aux_control;
uint32_t aux_arb_control;
uint32_t aux_sw_data;
uint32_t aux_sw_control;
uint32_t aux_interrupt_control;
+ uint32_t aux_dphy_rx_control1;
+ uint32_t aux_dphy_rx_control0;
uint32_t aux_sw_status;
} addr;
- uint32_t timeout_period;
+ uint32_t polling_timeout_period;
};
struct aux_engine_dce110_init_data {
@@ -120,12 +287,15 @@ struct aux_engine_dce110_init_data {
const struct dce110_aux_registers *regs;
};
-struct dce_aux *dce110_aux_engine_construct(
- struct aux_engine_dce110 *aux_engine110,
+struct dce_aux *dce110_aux_engine_construct(struct aux_engine_dce110 *aux_engine110,
struct dc_context *ctx,
uint32_t inst,
uint32_t timeout_period,
- const struct dce110_aux_registers *regs);
+ const struct dce110_aux_registers *regs,
+
+ const struct dce110_aux_registers_mask *mask,
+ const struct dce110_aux_registers_shift *shift,
+ bool is_ext_aux_timeout_configurable);
void dce110_engine_destroy(struct dce_aux **engine);
@@ -139,4 +309,13 @@ int dce_aux_transfer_raw(struct ddc_service *ddc,
bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
struct aux_payload *cmd);
+
+struct dce_aux_funcs {
+ bool (*configure_timeout)
+ (struct ddc_service *ddc,
+ uint32_t timeout);
+ void (*destroy)
+ (struct aux_engine **ptr);
+};
+
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
index 0b86cee4876f..ba995d3f2318 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
@@ -907,9 +907,6 @@ void dce_dmcu_destroy(struct dmcu **dmcu)
{
struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(*dmcu);
- if (dmcu_dce->base.dmcu_state == DMCU_RUNNING)
- dmcu_dce->base.funcs->set_psr_enable(*dmcu, false, true);
-
kfree(dmcu_dce);
*dmcu = NULL;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
index ac04d77058f0..32d145a0d6fc 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
@@ -679,6 +679,7 @@ struct dce_hwseq_registers {
HWS_SF(, DOMAIN17_PG_STATUS, DOMAIN17_PGFSM_PWR_STATUS, mask_sh), \
HWS_SF(, DOMAIN18_PG_STATUS, DOMAIN18_PGFSM_PWR_STATUS, mask_sh), \
HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh), \
+ HWSEQ_LVTMA_MASK_SH_LIST(mask_sh), \
HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh), \
HWS_SF(, LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, mask_sh)
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
index 31b698bf9cfc..8aa937f496c4 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
@@ -606,11 +606,11 @@ static void dce_mi_allocate_dmif(
}
if (dce_mi->wa.single_head_rdreq_dmif_limit) {
- uint32_t eanble = (total_stream_num > 1) ? 0 :
+ uint32_t enable = (total_stream_num > 1) ? 0 :
dce_mi->wa.single_head_rdreq_dmif_limit;
REG_UPDATE(MC_HUB_RDREQ_DMIF_LIMIT,
- ENABLE, eanble);
+ ENABLE, enable);
}
}
@@ -636,11 +636,11 @@ static void dce_mi_free_dmif(
10, 3500);
if (dce_mi->wa.single_head_rdreq_dmif_limit) {
- uint32_t eanble = (total_stream_num > 1) ? 0 :
+ uint32_t enable = (total_stream_num > 1) ? 0 :
dce_mi->wa.single_head_rdreq_dmif_limit;
REG_UPDATE(MC_HUB_RDREQ_DMIF_LIMIT,
- ENABLE, eanble);
+ ENABLE, enable);
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
index 76d54885374a..a5e122c721ec 100644
--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
@@ -399,6 +399,37 @@ static const struct dc_plane_cap plane_cap = {
#define CC_DC_HDMI_STRAPS__AUDIO_STREAM_NUMBER__SHIFT 0x8
#endif
+static int map_transmitter_id_to_phy_instance(
+ enum transmitter transmitter)
+{
+ switch (transmitter) {
+ case TRANSMITTER_UNIPHY_A:
+ return 0;
+ break;
+ case TRANSMITTER_UNIPHY_B:
+ return 1;
+ break;
+ case TRANSMITTER_UNIPHY_C:
+ return 2;
+ break;
+ case TRANSMITTER_UNIPHY_D:
+ return 3;
+ break;
+ case TRANSMITTER_UNIPHY_E:
+ return 4;
+ break;
+ case TRANSMITTER_UNIPHY_F:
+ return 5;
+ break;
+ case TRANSMITTER_UNIPHY_G:
+ return 6;
+ break;
+ default:
+ ASSERT(0);
+ return 0;
+ }
+}
+
static void read_dce_straps(
struct dc_context *ctx,
struct resource_straps *straps)
@@ -506,6 +537,14 @@ static const struct dce_mem_input_mask mi_masks = {
.ENABLE = MC_HUB_RDREQ_DMIF_LIMIT__ENABLE_MASK
};
+static const struct dce110_aux_registers_shift aux_shift = {
+ DCE10_AUX_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce110_aux_registers_mask aux_mask = {
+ DCE10_AUX_MASK_SH_LIST(_MASK)
+};
+
static struct mem_input *dce100_mem_input_create(
struct dc_context *ctx,
uint32_t inst)
@@ -571,14 +610,18 @@ struct link_encoder *dce100_link_encoder_create(
{
struct dce110_link_encoder *enc110 =
kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL);
+ int link_regs_id;
if (!enc110)
return NULL;
+ link_regs_id =
+ map_transmitter_id_to_phy_instance(enc_init_data->transmitter);
+
dce110_link_encoder_construct(enc110,
enc_init_data,
&link_enc_feature,
- &link_enc_regs[enc_init_data->transmitter],
+ &link_enc_regs[link_regs_id],
&link_enc_aux_regs[enc_init_data->channel - 1],
&link_enc_hpd_regs[enc_init_data->hpd_source]);
return &enc110->base;
@@ -611,7 +654,10 @@ struct dce_aux *dce100_aux_engine_create(
dce110_aux_engine_construct(aux_engine, ctx, inst,
SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
- &aux_engine_regs[inst]);
+ &aux_engine_regs[inst],
+ &aux_mask,
+ &aux_shift,
+ ctx->dc->caps.extended_aux_timeout_support);
return &aux_engine->base;
}
@@ -997,6 +1043,8 @@ static bool construct(
dc->caps.max_cursor_size = 128;
dc->caps.dual_link_dvi = true;
dc->caps.disable_dp_clk_share = true;
+ dc->caps.extended_aux_timeout_support = false;
+
for (i = 0; i < pool->base.pipe_count; i++) {
pool->base.timing_generators[i] =
dce100_timing_generator_create(
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index 01a924bf477a..f0e837d14000 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -944,7 +944,6 @@ void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx)
{
/* notify audio driver for audio modes of monitor */
struct dc *core_dc;
- struct pp_smu_funcs *pp_smu = NULL;
struct clk_mgr *clk_mgr;
unsigned int i, num_audio = 1;
@@ -957,9 +956,6 @@ void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx)
if (pipe_ctx->stream_res.audio && pipe_ctx->stream_res.audio->enabled == true)
return;
- if (core_dc->res_pool->pp_smu)
- pp_smu = core_dc->res_pool->pp_smu;
-
if (pipe_ctx->stream_res.audio) {
for (i = 0; i < MAX_PIPES; i++) {
/*current_state not updated yet*/
@@ -984,7 +980,6 @@ void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx)
void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx)
{
struct dc *dc;
- struct pp_smu_funcs *pp_smu = NULL;
struct clk_mgr *clk_mgr;
if (!pipe_ctx || !pipe_ctx->stream)
@@ -1001,9 +996,6 @@ void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx)
if (pipe_ctx->stream_res.audio) {
pipe_ctx->stream_res.audio->enabled = false;
- if (dc->res_pool->pp_smu)
- pp_smu = dc->res_pool->pp_smu;
-
if (dc_is_dp_signal(pipe_ctx->stream->signal))
pipe_ctx->stream_res.stream_enc->funcs->dp_audio_disable(
pipe_ctx->stream_res.stream_enc);
@@ -1169,8 +1161,9 @@ static void build_audio_output(
}
}
- if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT ||
- pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
+ if (state->clk_mgr &&
+ (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT ||
+ pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)) {
audio_output->pll_info.dp_dto_source_clock_in_khz =
state->clk_mgr->funcs->get_dp_ref_clk_frequency(
state->clk_mgr);
@@ -1418,7 +1411,7 @@ static enum dc_status apply_single_controller_ctx_to_hw(
pipe_ctx->plane_res.scl_data.lb_params.alpha_en = pipe_ctx->bottom_pipe != 0;
- pipe_ctx->stream->link->psr_enabled = false;
+ pipe_ctx->stream->link->psr_feature_enabled = false;
return DC_OK;
}
@@ -1428,8 +1421,6 @@ static enum dc_status apply_single_controller_ctx_to_hw(
static void power_down_encoders(struct dc *dc)
{
int i;
- enum connector_id connector_id;
- enum signal_type signal = SIGNAL_TYPE_NONE;
/* do not know BIOS back-front mapping, simply blank all. It will not
* hurt for non-DP
@@ -1440,15 +1431,12 @@ static void power_down_encoders(struct dc *dc)
}
for (i = 0; i < dc->link_count; i++) {
- connector_id = dal_graphics_object_id_get_connector_id(dc->links[i]->link_id);
- if ((connector_id == CONNECTOR_ID_DISPLAY_PORT) ||
- (connector_id == CONNECTOR_ID_EDP)) {
+ enum signal_type signal = dc->links[i]->connector_signal;
+ if ((signal == SIGNAL_TYPE_EDP) ||
+ (signal == SIGNAL_TYPE_DISPLAY_PORT))
if (!dc->links[i]->wa_flags.dp_keep_receiver_powered)
dp_receiver_power_ctrl(dc->links[i], false);
- if (connector_id == CONNECTOR_ID_EDP)
- signal = SIGNAL_TYPE_EDP;
- }
dc->links[i]->link_enc->funcs->disable_output(
dc->links[i]->link_enc, signal);
@@ -1529,18 +1517,6 @@ static struct dc_stream_state *get_edp_stream(struct dc_state *context)
return NULL;
}
-static struct dc_link *get_edp_link(struct dc *dc)
-{
- int i;
-
- // report any eDP links, even unconnected DDI's
- for (i = 0; i < dc->link_count; i++) {
- if (dc->links[i]->connector_signal == SIGNAL_TYPE_EDP)
- return dc->links[i];
- }
- return NULL;
-}
-
static struct dc_link *get_edp_link_with_sink(
struct dc *dc,
struct dc_state *context)
@@ -1834,7 +1810,7 @@ static bool should_enable_fbc(struct dc *dc,
return false;
/* PSR should not be enabled */
- if (pipe_ctx->stream->link->psr_enabled)
+ if (pipe_ctx->stream->link->psr_feature_enabled)
return false;
/* Nothing to compress */
@@ -2464,7 +2440,6 @@ static void dce110_program_front_end_for_pipe(
struct dc *dc, struct pipe_ctx *pipe_ctx)
{
struct mem_input *mi = pipe_ctx->plane_res.mi;
- struct pipe_ctx *old_pipe = NULL;
struct dc_plane_state *plane_state = pipe_ctx->plane_state;
struct xfm_grph_csc_adjustment adjust;
struct out_csc_color_matrix tbl_entry;
@@ -2472,9 +2447,6 @@ static void dce110_program_front_end_for_pipe(
DC_LOGGER_INIT();
memset(&tbl_entry, 0, sizeof(tbl_entry));
- if (dc->current_state)
- old_pipe = &dc->current_state->res_ctx.pipe_ctx[pipe_ctx->pipe_idx];
-
memset(&adjust, 0, sizeof(adjust));
adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
index 89620adc81d8..83a4dbf6d76e 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
@@ -275,6 +275,14 @@ static const struct dce_stream_encoder_mask se_mask = {
SE_COMMON_MASK_SH_LIST_DCE110(_MASK)
};
+static const struct dce110_aux_registers_shift aux_shift = {
+ DCE_AUX_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce110_aux_registers_mask aux_mask = {
+ DCE_AUX_MASK_SH_LIST(_MASK)
+};
+
#define opp_regs(id)\
[id] = {\
OPP_DCE_110_REG_LIST(id),\
@@ -440,6 +448,37 @@ static const struct dc_plane_cap underlay_plane_cap = {
#define CC_DC_HDMI_STRAPS__AUDIO_STREAM_NUMBER__SHIFT 0x8
#endif
+static int map_transmitter_id_to_phy_instance(
+ enum transmitter transmitter)
+{
+ switch (transmitter) {
+ case TRANSMITTER_UNIPHY_A:
+ return 0;
+ break;
+ case TRANSMITTER_UNIPHY_B:
+ return 1;
+ break;
+ case TRANSMITTER_UNIPHY_C:
+ return 2;
+ break;
+ case TRANSMITTER_UNIPHY_D:
+ return 3;
+ break;
+ case TRANSMITTER_UNIPHY_E:
+ return 4;
+ break;
+ case TRANSMITTER_UNIPHY_F:
+ return 5;
+ break;
+ case TRANSMITTER_UNIPHY_G:
+ return 6;
+ break;
+ default:
+ ASSERT(0);
+ return 0;
+ }
+}
+
static void read_dce_straps(
struct dc_context *ctx,
struct resource_straps *straps)
@@ -617,14 +656,18 @@ static struct link_encoder *dce110_link_encoder_create(
{
struct dce110_link_encoder *enc110 =
kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL);
+ int link_regs_id;
if (!enc110)
return NULL;
+ link_regs_id =
+ map_transmitter_id_to_phy_instance(enc_init_data->transmitter);
+
dce110_link_encoder_construct(enc110,
enc_init_data,
&link_enc_feature,
- &link_enc_regs[enc_init_data->transmitter],
+ &link_enc_regs[link_regs_id],
&link_enc_aux_regs[enc_init_data->channel - 1],
&link_enc_hpd_regs[enc_init_data->hpd_source]);
return &enc110->base;
@@ -657,7 +700,10 @@ struct dce_aux *dce110_aux_engine_create(
dce110_aux_engine_construct(aux_engine, ctx, inst,
SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
- &aux_engine_regs[inst]);
+ &aux_engine_regs[inst],
+ &aux_mask,
+ &aux_shift,
+ ctx->dc->caps.extended_aux_timeout_support);
return &aux_engine->base;
}
@@ -1293,6 +1339,7 @@ static bool construct(
dc->caps.i2c_speed_in_khz = 100;
dc->caps.max_cursor_size = 128;
dc->caps.is_apu = true;
+ dc->caps.extended_aux_timeout_support = false;
/*************************************************
* Create resources *
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
index 21a657e79306..97dcc5d0862b 100644
--- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
@@ -172,6 +172,14 @@ static const struct dce_abm_mask abm_mask = {
ABM_MASK_SH_LIST_DCE110(_MASK)
};
+static const struct dce110_aux_registers_shift aux_shift = {
+ DCE_AUX_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce110_aux_registers_mask aux_mask = {
+ DCE_AUX_MASK_SH_LIST(_MASK)
+};
+
#define ipp_regs(id)\
[id] = {\
IPP_DCE110_REG_LIST_DCE_BASE(id)\
@@ -417,6 +425,37 @@ static const struct dc_plane_cap plane_cap = {
#define CC_DC_HDMI_STRAPS__AUDIO_STREAM_NUMBER__SHIFT 0x8
#endif
+static int map_transmitter_id_to_phy_instance(
+ enum transmitter transmitter)
+{
+ switch (transmitter) {
+ case TRANSMITTER_UNIPHY_A:
+ return 0;
+ break;
+ case TRANSMITTER_UNIPHY_B:
+ return 1;
+ break;
+ case TRANSMITTER_UNIPHY_C:
+ return 2;
+ break;
+ case TRANSMITTER_UNIPHY_D:
+ return 3;
+ break;
+ case TRANSMITTER_UNIPHY_E:
+ return 4;
+ break;
+ case TRANSMITTER_UNIPHY_F:
+ return 5;
+ break;
+ case TRANSMITTER_UNIPHY_G:
+ return 6;
+ break;
+ default:
+ ASSERT(0);
+ return 0;
+ }
+}
+
static void read_dce_straps(
struct dc_context *ctx,
struct resource_straps *straps)
@@ -575,14 +614,18 @@ struct link_encoder *dce112_link_encoder_create(
{
struct dce110_link_encoder *enc110 =
kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL);
+ int link_regs_id;
if (!enc110)
return NULL;
+ link_regs_id =
+ map_transmitter_id_to_phy_instance(enc_init_data->transmitter);
+
dce110_link_encoder_construct(enc110,
enc_init_data,
&link_enc_feature,
- &link_enc_regs[enc_init_data->transmitter],
+ &link_enc_regs[link_regs_id],
&link_enc_aux_regs[enc_init_data->channel - 1],
&link_enc_hpd_regs[enc_init_data->hpd_source]);
return &enc110->base;
@@ -630,7 +673,10 @@ struct dce_aux *dce112_aux_engine_create(
dce110_aux_engine_construct(aux_engine, ctx, inst,
SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
- &aux_engine_regs[inst]);
+ &aux_engine_regs[inst],
+ &aux_mask,
+ &aux_shift,
+ ctx->dc->caps.extended_aux_timeout_support);
return &aux_engine->base;
}
@@ -1163,7 +1209,7 @@ static bool construct(
dc->caps.i2c_speed_in_khz = 100;
dc->caps.max_cursor_size = 128;
dc->caps.dual_link_dvi = true;
-
+ dc->caps.extended_aux_timeout_support = false;
/*************************************************
* Create resources *
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
index 7c52f7f9196c..63543f6918ff 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
@@ -293,6 +293,14 @@ static const struct dce_stream_encoder_mask se_mask = {
SE_COMMON_MASK_SH_LIST_DCE120(_MASK)
};
+static const struct dce110_aux_registers_shift aux_shift = {
+ DCE12_AUX_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce110_aux_registers_mask aux_mask = {
+ DCE12_AUX_MASK_SH_LIST(_MASK)
+};
+
#define opp_regs(id)\
[id] = {\
OPP_DCE_120_REG_LIST(id),\
@@ -356,6 +364,37 @@ static const struct dce_audio_mask audio_mask = {
DCE120_AUD_COMMON_MASK_SH_LIST(_MASK)
};
+static int map_transmitter_id_to_phy_instance(
+ enum transmitter transmitter)
+{
+ switch (transmitter) {
+ case TRANSMITTER_UNIPHY_A:
+ return 0;
+ break;
+ case TRANSMITTER_UNIPHY_B:
+ return 1;
+ break;
+ case TRANSMITTER_UNIPHY_C:
+ return 2;
+ break;
+ case TRANSMITTER_UNIPHY_D:
+ return 3;
+ break;
+ case TRANSMITTER_UNIPHY_E:
+ return 4;
+ break;
+ case TRANSMITTER_UNIPHY_F:
+ return 5;
+ break;
+ case TRANSMITTER_UNIPHY_G:
+ return 6;
+ break;
+ default:
+ ASSERT(0);
+ return 0;
+ }
+}
+
#define clk_src_regs(index, id)\
[index] = {\
CS_COMMON_REG_LIST_DCE_112(id),\
@@ -404,7 +443,10 @@ struct dce_aux *dce120_aux_engine_create(
dce110_aux_engine_construct(aux_engine, ctx, inst,
SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
- &aux_engine_regs[inst]);
+ &aux_engine_regs[inst],
+ &aux_mask,
+ &aux_shift,
+ ctx->dc->caps.extended_aux_timeout_support);
return &aux_engine->base;
}
@@ -655,14 +697,18 @@ static struct link_encoder *dce120_link_encoder_create(
{
struct dce110_link_encoder *enc110 =
kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL);
+ int link_regs_id;
if (!enc110)
return NULL;
+ link_regs_id =
+ map_transmitter_id_to_phy_instance(enc_init_data->transmitter);
+
dce110_link_encoder_construct(enc110,
enc_init_data,
&link_enc_feature,
- &link_enc_regs[enc_init_data->transmitter],
+ &link_enc_regs[link_regs_id],
&link_enc_aux_regs[enc_init_data->channel - 1],
&link_enc_hpd_regs[enc_init_data->hpd_source]);
@@ -1006,7 +1052,7 @@ static bool construct(
dc->caps.max_cursor_size = 128;
dc->caps.dual_link_dvi = true;
dc->caps.psp_setup_panel_mode = true;
-
+ dc->caps.extended_aux_timeout_support = false;
dc->debug = debug_defaults;
/*************************************************
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
index 643ccb0ade00..3e8d4b49f279 100644
--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
@@ -288,6 +288,14 @@ static const struct dce_opp_mask opp_mask = {
OPP_COMMON_MASK_SH_LIST_DCE_80(_MASK)
};
+static const struct dce110_aux_registers_shift aux_shift = {
+ DCE10_AUX_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce110_aux_registers_mask aux_mask = {
+ DCE10_AUX_MASK_SH_LIST(_MASK)
+};
+
#define aux_engine_regs(id)\
[id] = {\
AUX_COMMON_REG_LIST(id), \
@@ -431,6 +439,37 @@ static const struct dce_abm_mask abm_mask = {
#define CC_DC_HDMI_STRAPS__AUDIO_STREAM_NUMBER__SHIFT 0x8
#endif
+static int map_transmitter_id_to_phy_instance(
+ enum transmitter transmitter)
+{
+ switch (transmitter) {
+ case TRANSMITTER_UNIPHY_A:
+ return 0;
+ break;
+ case TRANSMITTER_UNIPHY_B:
+ return 1;
+ break;
+ case TRANSMITTER_UNIPHY_C:
+ return 2;
+ break;
+ case TRANSMITTER_UNIPHY_D:
+ return 3;
+ break;
+ case TRANSMITTER_UNIPHY_E:
+ return 4;
+ break;
+ case TRANSMITTER_UNIPHY_F:
+ return 5;
+ break;
+ case TRANSMITTER_UNIPHY_G:
+ return 6;
+ break;
+ default:
+ ASSERT(0);
+ return 0;
+ }
+}
+
static void read_dce_straps(
struct dc_context *ctx,
struct resource_straps *straps)
@@ -491,7 +530,10 @@ struct dce_aux *dce80_aux_engine_create(
dce110_aux_engine_construct(aux_engine, ctx, inst,
SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
- &aux_engine_regs[inst]);
+ &aux_engine_regs[inst],
+ &aux_mask,
+ &aux_shift,
+ ctx->dc->caps.extended_aux_timeout_support);
return &aux_engine->base;
}
@@ -669,14 +711,18 @@ struct link_encoder *dce80_link_encoder_create(
{
struct dce110_link_encoder *enc110 =
kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL);
+ int link_regs_id;
if (!enc110)
return NULL;
+ link_regs_id =
+ map_transmitter_id_to_phy_instance(enc_init_data->transmitter);
+
dce110_link_encoder_construct(enc110,
enc_init_data,
&link_enc_feature,
- &link_enc_regs[enc_init_data->transmitter],
+ &link_enc_regs[link_regs_id],
&link_enc_aux_regs[enc_init_data->channel - 1],
&link_enc_hpd_regs[enc_init_data->hpd_source]);
return &enc110->base;
@@ -895,6 +941,7 @@ static bool dce80_construct(
dc->caps.i2c_speed_in_khz = 40;
dc->caps.max_cursor_size = 128;
dc->caps.dual_link_dvi = true;
+ dc->caps.extended_aux_timeout_support = false;
/*************************************************
* Create resources *
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
index d8b2da18db39..997e9582edc7 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
@@ -129,7 +129,7 @@ void dpp_set_gamut_remap_bypass(struct dcn10_dpp *dpp)
#define IDENTITY_RATIO(ratio) (dc_fixpt_u2d19(ratio) == (1 << 19))
-static bool dpp_get_optimal_number_of_taps(
+bool dpp1_get_optimal_number_of_taps(
struct dpp *dpp,
struct scaler_data *scl_data,
const struct scaling_taps *in_taps)
@@ -521,7 +521,7 @@ static const struct dpp_funcs dcn10_dpp_funcs = {
.dpp_read_state = dpp_read_state,
.dpp_reset = dpp_reset,
.dpp_set_scaler = dpp1_dscl_set_scaler_manual_scale,
- .dpp_get_optimal_number_of_taps = dpp_get_optimal_number_of_taps,
+ .dpp_get_optimal_number_of_taps = dpp1_get_optimal_number_of_taps,
.dpp_set_gamut_remap = dpp1_cm_set_gamut_remap,
.dpp_set_csc_adjustment = dpp1_cm_set_output_csc_adjustment,
.dpp_set_csc_default = dpp1_cm_set_output_csc_default,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
index e2c613611ac9..1d4a7d640334 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
@@ -1504,6 +1504,11 @@ void dpp1_set_hdr_multiplier(
struct dpp *dpp_base,
uint32_t multiplier);
+bool dpp1_get_optimal_number_of_taps(
+ struct dpp *dpp,
+ struct scaler_data *scl_data,
+ const struct scaling_taps *in_taps);
+
void dpp1_construct(struct dcn10_dpp *dpp1,
struct dc_context *ctx,
uint32_t inst,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
index 001db49e4bb2..14d1be6c66e6 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
@@ -841,6 +841,14 @@ void min_set_viewport(
REG_SET_2(DCSURF_PRI_VIEWPORT_START_C, 0,
PRI_VIEWPORT_X_START_C, viewport_c->x,
PRI_VIEWPORT_Y_START_C, viewport_c->y);
+
+ REG_SET_2(DCSURF_SEC_VIEWPORT_DIMENSION_C, 0,
+ SEC_VIEWPORT_WIDTH_C, viewport_c->width,
+ SEC_VIEWPORT_HEIGHT_C, viewport_c->height);
+
+ REG_SET_2(DCSURF_SEC_VIEWPORT_START_C, 0,
+ SEC_VIEWPORT_X_START_C, viewport_c->x,
+ SEC_VIEWPORT_Y_START_C, viewport_c->y);
}
void hubp1_read_state_common(struct hubp *hubp)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
index cb20d10288c0..ae70d9c0aa1d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
@@ -47,6 +47,8 @@
SRI(DCSURF_SEC_VIEWPORT_START, HUBP, id), \
SRI(DCSURF_PRI_VIEWPORT_DIMENSION_C, HUBP, id), \
SRI(DCSURF_PRI_VIEWPORT_START_C, HUBP, id), \
+ SRI(DCSURF_SEC_VIEWPORT_DIMENSION_C, HUBP, id), \
+ SRI(DCSURF_SEC_VIEWPORT_START_C, HUBP, id), \
SRI(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH, HUBPREQ, id),\
SRI(DCSURF_PRIMARY_SURFACE_ADDRESS, HUBPREQ, id),\
SRI(DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH, HUBPREQ, id),\
@@ -57,8 +59,12 @@
SRI(DCSURF_SECONDARY_META_SURFACE_ADDRESS, HUBPREQ, id),\
SRI(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C, HUBPREQ, id),\
SRI(DCSURF_PRIMARY_SURFACE_ADDRESS_C, HUBPREQ, id),\
+ SRI(DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C, HUBPREQ, id),\
+ SRI(DCSURF_SECONDARY_SURFACE_ADDRESS_C, HUBPREQ, id),\
SRI(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C, HUBPREQ, id),\
SRI(DCSURF_PRIMARY_META_SURFACE_ADDRESS_C, HUBPREQ, id),\
+ SRI(DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C, HUBPREQ, id),\
+ SRI(DCSURF_SECONDARY_META_SURFACE_ADDRESS_C, HUBPREQ, id),\
SRI(DCSURF_SURFACE_INUSE, HUBPREQ, id),\
SRI(DCSURF_SURFACE_INUSE_HIGH, HUBPREQ, id),\
SRI(DCSURF_SURFACE_INUSE_C, HUBPREQ, id),\
@@ -150,6 +156,8 @@
uint32_t DCSURF_SEC_VIEWPORT_START; \
uint32_t DCSURF_PRI_VIEWPORT_DIMENSION_C; \
uint32_t DCSURF_PRI_VIEWPORT_START_C; \
+ uint32_t DCSURF_SEC_VIEWPORT_DIMENSION_C; \
+ uint32_t DCSURF_SEC_VIEWPORT_START_C; \
uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH; \
uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS; \
uint32_t DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH; \
@@ -160,8 +168,12 @@
uint32_t DCSURF_SECONDARY_META_SURFACE_ADDRESS; \
uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C; \
uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_C; \
+ uint32_t DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C; \
+ uint32_t DCSURF_SECONDARY_SURFACE_ADDRESS_C; \
uint32_t DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C; \
uint32_t DCSURF_PRIMARY_META_SURFACE_ADDRESS_C; \
+ uint32_t DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C; \
+ uint32_t DCSURF_SECONDARY_META_SURFACE_ADDRESS_C; \
uint32_t DCSURF_SURFACE_INUSE; \
uint32_t DCSURF_SURFACE_INUSE_HIGH; \
uint32_t DCSURF_SURFACE_INUSE_C; \
@@ -279,6 +291,10 @@
HUBP_SF(HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_C, PRI_VIEWPORT_HEIGHT_C, mask_sh),\
HUBP_SF(HUBP0_DCSURF_PRI_VIEWPORT_START_C, PRI_VIEWPORT_X_START_C, mask_sh),\
HUBP_SF(HUBP0_DCSURF_PRI_VIEWPORT_START_C, PRI_VIEWPORT_Y_START_C, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_SEC_VIEWPORT_DIMENSION_C, SEC_VIEWPORT_WIDTH_C, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_SEC_VIEWPORT_DIMENSION_C, SEC_VIEWPORT_HEIGHT_C, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_SEC_VIEWPORT_START_C, SEC_VIEWPORT_X_START_C, mask_sh),\
+ HUBP_SF(HUBP0_DCSURF_SEC_VIEWPORT_START_C, SEC_VIEWPORT_Y_START_C, mask_sh),\
HUBP_SF(HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH, PRIMARY_SURFACE_ADDRESS_HIGH, mask_sh),\
HUBP_SF(HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS, PRIMARY_SURFACE_ADDRESS, mask_sh),\
HUBP_SF(HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH, SECONDARY_SURFACE_ADDRESS_HIGH, mask_sh),\
@@ -289,8 +305,12 @@
HUBP_SF(HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS, SECONDARY_META_SURFACE_ADDRESS, mask_sh),\
HUBP_SF(HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C, PRIMARY_SURFACE_ADDRESS_HIGH_C, mask_sh),\
HUBP_SF(HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_C, PRIMARY_SURFACE_ADDRESS_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C, SECONDARY_SURFACE_ADDRESS_HIGH_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_C, SECONDARY_SURFACE_ADDRESS_C, mask_sh),\
HUBP_SF(HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C, PRIMARY_META_SURFACE_ADDRESS_HIGH_C, mask_sh),\
HUBP_SF(HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C, PRIMARY_META_SURFACE_ADDRESS_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C, SECONDARY_META_SURFACE_ADDRESS_HIGH_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C, SECONDARY_META_SURFACE_ADDRESS_C, mask_sh),\
HUBP_SF(HUBPREQ0_DCSURF_SURFACE_INUSE, SURFACE_INUSE_ADDRESS, mask_sh),\
HUBP_SF(HUBPREQ0_DCSURF_SURFACE_INUSE_HIGH, SURFACE_INUSE_ADDRESS_HIGH, mask_sh),\
HUBP_SF(HUBPREQ0_DCSURF_SURFACE_INUSE_C, SURFACE_INUSE_ADDRESS_C, mask_sh),\
@@ -469,6 +489,10 @@
type PRI_VIEWPORT_HEIGHT_C; \
type PRI_VIEWPORT_X_START_C; \
type PRI_VIEWPORT_Y_START_C; \
+ type SEC_VIEWPORT_WIDTH_C; \
+ type SEC_VIEWPORT_HEIGHT_C; \
+ type SEC_VIEWPORT_X_START_C; \
+ type SEC_VIEWPORT_Y_START_C; \
type PRIMARY_SURFACE_ADDRESS_HIGH;\
type PRIMARY_SURFACE_ADDRESS;\
type SECONDARY_SURFACE_ADDRESS_HIGH;\
@@ -479,8 +503,12 @@
type SECONDARY_META_SURFACE_ADDRESS;\
type PRIMARY_SURFACE_ADDRESS_HIGH_C;\
type PRIMARY_SURFACE_ADDRESS_C;\
+ type SECONDARY_SURFACE_ADDRESS_HIGH_C;\
+ type SECONDARY_SURFACE_ADDRESS_C;\
type PRIMARY_META_SURFACE_ADDRESS_HIGH_C;\
type PRIMARY_META_SURFACE_ADDRESS_C;\
+ type SECONDARY_META_SURFACE_ADDRESS_HIGH_C;\
+ type SECONDARY_META_SURFACE_ADDRESS_C;\
type SURFACE_INUSE_ADDRESS;\
type SURFACE_INUSE_ADDRESS_HIGH;\
type SURFACE_INUSE_ADDRESS_C;\
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 60123db7ba02..eb91432621ab 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -670,6 +670,10 @@ static void dcn10_bios_golden_init(struct dc *dc)
int i;
bool allow_self_fresh_force_enable = true;
+#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
+ if (dc->hwss.s0i3_golden_init_wa && dc->hwss.s0i3_golden_init_wa(dc))
+ return;
+#endif
if (dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled)
allow_self_fresh_force_enable =
dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled(dc->res_pool->hubbub);
@@ -1300,6 +1304,10 @@ static void dcn10_init_hw(struct dc *dc)
}
dc->hwss.enable_power_gating_plane(dc->hwseq, true);
+
+ if (dc->clk_mgr->funcs->notify_wm_ranges)
+ dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr);
+
}
static void dcn10_reset_hw_ctx_wrap(
@@ -1452,15 +1460,15 @@ static void log_tf(struct dc_context *ctx,
DC_LOG_ALL_TF_CHANNELS("Logging all channels...");
for (i = 0; i < hw_points_num; i++) {
- DC_LOG_GAMMA("R\t%d\t%llu\n", i, tf->tf_pts.red[i].value);
- DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu\n", i, tf->tf_pts.green[i].value);
- DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu\n", i, tf->tf_pts.blue[i].value);
+ DC_LOG_GAMMA("R\t%d\t%llu", i, tf->tf_pts.red[i].value);
+ DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu", i, tf->tf_pts.green[i].value);
+ DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu", i, tf->tf_pts.blue[i].value);
}
for (i = hw_points_num; i < MAX_NUM_HW_POINTS; i++) {
- DC_LOG_ALL_GAMMA("R\t%d\t%llu\n", i, tf->tf_pts.red[i].value);
- DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu\n", i, tf->tf_pts.green[i].value);
- DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu\n", i, tf->tf_pts.blue[i].value);
+ DC_LOG_ALL_GAMMA("R\t%d\t%llu", i, tf->tf_pts.red[i].value);
+ DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu", i, tf->tf_pts.green[i].value);
+ DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu", i, tf->tf_pts.blue[i].value);
}
}
@@ -2304,8 +2312,7 @@ void update_dchubp_dpp(
dc->res_pool->dccg->funcs->update_dpp_dto(
dc->res_pool->dccg,
dpp->inst,
- pipe_ctx->plane_res.bw.dppclk_khz,
- false);
+ pipe_ctx->plane_res.bw.dppclk_khz);
else
dc->clk_mgr->clks.dppclk_khz = should_divided_by_2 ?
dc->clk_mgr->clks.dispclk_khz / 2 :
@@ -2512,8 +2519,10 @@ static void program_all_pipe_in_tree(
pipe_ctx->stream_res.tg->funcs->set_vtg_params(
pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
- dc->hwss.blank_pixel_data(dc, pipe_ctx, blank);
+ if (dc->hwss.setup_vupdate_interrupt)
+ dc->hwss.setup_vupdate_interrupt(pipe_ctx);
+ dc->hwss.blank_pixel_data(dc, pipe_ctx, blank);
}
if (pipe_ctx->plane_state != NULL)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
index 8bf5f0f2301d..88fcc395adf5 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
@@ -113,6 +113,20 @@ struct dcn10_link_enc_registers {
uint32_t DIG_LANE_ENABLE;
/* UNIPHY */
uint32_t CHANNEL_XBAR_CNTL;
+ /* DPCS */
+ uint32_t RDPCSTX_PHY_CNTL3;
+ uint32_t RDPCSTX_PHY_CNTL4;
+ uint32_t RDPCSTX_PHY_CNTL5;
+ uint32_t RDPCSTX_PHY_CNTL6;
+ uint32_t RDPCSTX_PHY_CNTL7;
+ uint32_t RDPCSTX_PHY_CNTL8;
+ uint32_t RDPCSTX_PHY_CNTL9;
+ uint32_t RDPCSTX_PHY_CNTL10;
+ uint32_t RDPCSTX_PHY_CNTL11;
+ uint32_t RDPCSTX_PHY_CNTL12;
+ uint32_t RDPCSTX_PHY_CNTL13;
+ uint32_t RDPCSTX_PHY_CNTL14;
+ uint32_t RDPCSTX_PHY_CNTL15;
/* indirect registers */
uint32_t RAWLANE0_DIG_PCS_XF_RX_OVRD_IN_2;
uint32_t RAWLANE0_DIG_PCS_XF_RX_OVRD_IN_3;
@@ -250,6 +264,10 @@ struct dcn10_link_enc_registers {
type RDPCS_EXT_REFCLK_EN;\
type RDPCS_TX_FIFO_EN;\
type UNIPHY_LINK_ENABLE;\
+ type UNIPHY_CHANNEL0_XBAR_SOURCE;\
+ type UNIPHY_CHANNEL1_XBAR_SOURCE;\
+ type UNIPHY_CHANNEL2_XBAR_SOURCE;\
+ type UNIPHY_CHANNEL3_XBAR_SOURCE;\
type UNIPHY_CHANNEL0_INVERT;\
type UNIPHY_CHANNEL1_INVERT;\
type UNIPHY_CHANNEL2_INVERT;\
@@ -337,16 +355,46 @@ struct dcn10_link_enc_registers {
type RDPCS_TX_FIFO_ERROR_MASK;\
type RDPCS_DPALT_DISABLE_TOGGLE_MASK;\
type RDPCS_DPALT_4LANE_TOGGLE_MASK;\
+ type RDPCS_PHY_DPALT_DP4;\
type RDPCS_PHY_DPALT_DISABLE;\
type RDPCS_PHY_DPALT_DISABLE_ACK;\
type RDPCS_PHY_DP_MPLLB_V2I;\
type RDPCS_PHY_DP_MPLLB_FREQ_VCO;\
+ type RDPCS_PHY_DP_MPLLB_CP_INT_GS;\
+ type RDPCS_PHY_RX_VREF_CTRL;\
type RDPCS_PHY_DP_MPLLB_CP_INT;\
type RDPCS_PHY_DP_MPLLB_CP_PROP;\
type RDPCS_PHY_RX_REF_LD_VAL;\
type RDPCS_PHY_RX_VCO_LD_VAL;\
type DPCSTX_DEBUG_CONFIG; \
- type RDPCSTX_DEBUG_CONFIG
+ type RDPCSTX_DEBUG_CONFIG; \
+ type RDPCS_PHY_DP_TX0_EQ_MAIN;\
+ type RDPCS_PHY_DP_TX0_EQ_PRE;\
+ type RDPCS_PHY_DP_TX0_EQ_POST;\
+ type RDPCS_PHY_DP_TX1_EQ_MAIN;\
+ type RDPCS_PHY_DP_TX1_EQ_PRE;\
+ type RDPCS_PHY_DP_TX1_EQ_POST;\
+ type RDPCS_PHY_DP_TX2_EQ_MAIN;\
+ type RDPCS_PHY_DP_MPLLB_CP_PROP_GS;\
+ type RDPCS_PHY_DP_TX2_EQ_PRE;\
+ type RDPCS_PHY_DP_TX2_EQ_POST;\
+ type RDPCS_PHY_DP_TX3_EQ_MAIN;\
+ type RDPCS_PHY_DCO_RANGE;\
+ type RDPCS_PHY_DCO_FINETUNE;\
+ type RDPCS_PHY_DP_TX3_EQ_PRE;\
+ type RDPCS_PHY_DP_TX3_EQ_POST;\
+ type RDPCS_PHY_SUP_PRE_HP;\
+ type RDPCS_PHY_DP_TX0_VREGDRV_BYP;\
+ type RDPCS_PHY_DP_TX1_VREGDRV_BYP;\
+ type RDPCS_PHY_DP_TX2_VREGDRV_BYP;\
+ type RDPCS_PHY_DP_TX3_VREGDRV_BYP;\
+ type RDPCS_DMCU_DPALT_DIS_BLOCK_REG;\
+ type UNIPHYA_SOFT_RESET;\
+ type UNIPHYB_SOFT_RESET;\
+ type UNIPHYC_SOFT_RESET;\
+ type UNIPHYD_SOFT_RESET;\
+ type UNIPHYE_SOFT_RESET;\
+ type UNIPHYF_SOFT_RESET
#define DCN20_LINK_ENCODER_REG_FIELD_LIST(type) \
type DIG_LANE0EN;\
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c
index e9ebbbe256b4..0a9ad692f541 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c
@@ -168,7 +168,10 @@ static void opp1_set_pixel_encoding(
REG_UPDATE(FMT_CONTROL, FMT_PIXEL_ENCODING, 0);
break;
case PIXEL_ENCODING_YCBCR422:
- REG_UPDATE(FMT_CONTROL, FMT_PIXEL_ENCODING, 1);
+ REG_UPDATE_3(FMT_CONTROL,
+ FMT_PIXEL_ENCODING, 1,
+ FMT_SUBSAMPLING_MODE, 2,
+ FMT_CBCR_BIT_REDUCTION_BYPASS, 0);
break;
case PIXEL_ENCODING_YCBCR420:
REG_UPDATE(FMT_CONTROL, FMT_PIXEL_ENCODING, 2);
@@ -237,6 +240,9 @@ void opp1_set_dyn_expansion(
FMT_DYNAMIC_EXP_EN, 0,
FMT_DYNAMIC_EXP_MODE, 0);
+ if (opp->dyn_expansion == DYN_EXPANSION_DISABLE)
+ return;
+
/*00 - 10-bit -> 12-bit dynamic expansion*/
/*01 - 8-bit -> 12-bit dynamic expansion*/
if (signal == SIGNAL_TYPE_HDMI_TYPE_A ||
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h
index 0f10adea000c..2c0ecfa5a643 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h
@@ -116,6 +116,8 @@
type FMT_RAND_G_SEED; \
type FMT_RAND_B_SEED; \
type FMT_PIXEL_ENCODING; \
+ type FMT_SUBSAMPLING_MODE; \
+ type FMT_CBCR_BIT_REDUCTION_BYPASS; \
type FMT_CLAMP_DATA_EN; \
type FMT_CLAMP_COLOR_FORMAT; \
type FMT_DYNAMIC_EXP_EN; \
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
index e74a07d03fde..dabccbd49ad4 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
@@ -1230,59 +1230,25 @@ bool optc1_is_stereo_left_eye(struct timing_generator *optc)
return ret;
}
-bool optc1_is_matching_timing(struct timing_generator *tg,
- const struct dc_crtc_timing *otg_timing)
+bool optc1_get_hw_timing(struct timing_generator *tg,
+ struct dc_crtc_timing *hw_crtc_timing)
{
- struct dc_crtc_timing hw_crtc_timing = {0};
struct dcn_otg_state s = {0};
- if (tg == NULL || otg_timing == NULL)
+ if (tg == NULL || hw_crtc_timing == NULL)
return false;
optc1_read_otg_state(DCN10TG_FROM_TG(tg), &s);
- hw_crtc_timing.h_total = s.h_total + 1;
- hw_crtc_timing.h_addressable = s.h_total - ((s.h_total - s.h_blank_start) + s.h_blank_end);
- hw_crtc_timing.h_front_porch = s.h_total + 1 - s.h_blank_start;
- hw_crtc_timing.h_sync_width = s.h_sync_a_end - s.h_sync_a_start;
+ hw_crtc_timing->h_total = s.h_total + 1;
+ hw_crtc_timing->h_addressable = s.h_total - ((s.h_total - s.h_blank_start) + s.h_blank_end);
+ hw_crtc_timing->h_front_porch = s.h_total + 1 - s.h_blank_start;
+ hw_crtc_timing->h_sync_width = s.h_sync_a_end - s.h_sync_a_start;
- hw_crtc_timing.v_total = s.v_total + 1;
- hw_crtc_timing.v_addressable = s.v_total - ((s.v_total - s.v_blank_start) + s.v_blank_end);
- hw_crtc_timing.v_front_porch = s.v_total + 1 - s.v_blank_start;
- hw_crtc_timing.v_sync_width = s.v_sync_a_end - s.v_sync_a_start;
-
- if (otg_timing->h_total != hw_crtc_timing.h_total)
- return false;
-
- if (otg_timing->h_border_left != hw_crtc_timing.h_border_left)
- return false;
-
- if (otg_timing->h_addressable != hw_crtc_timing.h_addressable)
- return false;
-
- if (otg_timing->h_border_right != hw_crtc_timing.h_border_right)
- return false;
-
- if (otg_timing->h_front_porch != hw_crtc_timing.h_front_porch)
- return false;
-
- if (otg_timing->h_sync_width != hw_crtc_timing.h_sync_width)
- return false;
-
- if (otg_timing->v_total != hw_crtc_timing.v_total)
- return false;
-
- if (otg_timing->v_border_top != hw_crtc_timing.v_border_top)
- return false;
-
- if (otg_timing->v_addressable != hw_crtc_timing.v_addressable)
- return false;
-
- if (otg_timing->v_border_bottom != hw_crtc_timing.v_border_bottom)
- return false;
-
- if (otg_timing->v_sync_width != hw_crtc_timing.v_sync_width)
- return false;
+ hw_crtc_timing->v_total = s.v_total + 1;
+ hw_crtc_timing->v_addressable = s.v_total - ((s.v_total - s.v_blank_start) + s.v_blank_end);
+ hw_crtc_timing->v_front_porch = s.v_total + 1 - s.v_blank_start;
+ hw_crtc_timing->v_sync_width = s.v_sync_a_end - s.v_sync_a_start;
return true;
}
@@ -1486,7 +1452,6 @@ static const struct timing_generator_funcs dcn10_tg_funcs = {
.get_frame_count = optc1_get_vblank_counter,
.get_scanoutpos = optc1_get_crtc_scanoutpos,
.get_otg_active_size = optc1_get_otg_active_size,
- .is_matching_timing = optc1_is_matching_timing,
.set_early_control = optc1_set_early_control,
/* used by enable_timing_synchronization. Not need for FPGA */
.wait_for_state = optc1_wait_for_state,
@@ -1514,7 +1479,8 @@ static const struct timing_generator_funcs dcn10_tg_funcs = {
.configure_crc = optc1_configure_crc,
.set_vtg_params = optc1_set_vtg_params,
.program_manual_trigger = optc1_program_manual_trigger,
- .setup_manual_trigger = optc1_setup_manual_trigger
+ .setup_manual_trigger = optc1_setup_manual_trigger,
+ .get_hw_timing = optc1_get_hw_timing,
};
void dcn10_timing_generator_init(struct optc *optc1)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
index 83575599672e..c8d795b335ba 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
@@ -547,9 +547,8 @@ struct dcn_otg_state {
void optc1_read_otg_state(struct optc *optc1,
struct dcn_otg_state *s);
-bool optc1_is_matching_timing(
- struct timing_generator *tg,
- const struct dc_crtc_timing *otg_timing);
+bool optc1_get_hw_timing(struct timing_generator *tg,
+ struct dc_crtc_timing *hw_crtc_timing);
bool optc1_validate_timing(
struct timing_generator *optc,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
index 1599bb971111..15640aedd664 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
@@ -319,6 +319,14 @@ static const struct dcn10_link_enc_mask le_mask = {
LINK_ENCODER_MASK_SH_LIST_DCN10(_MASK)
};
+static const struct dce110_aux_registers_shift aux_shift = {
+ DCN10_AUX_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce110_aux_registers_mask aux_mask = {
+ DCN10_AUX_MASK_SH_LIST(_MASK)
+};
+
#define ipp_regs(id)\
[id] = {\
IPP_REG_LIST_DCN10(id),\
@@ -471,6 +479,28 @@ static const struct dcn_hubbub_mask hubbub_mask = {
HUBBUB_MASK_SH_LIST_DCN10(_MASK)
};
+static int map_transmitter_id_to_phy_instance(
+ enum transmitter transmitter)
+{
+ switch (transmitter) {
+ case TRANSMITTER_UNIPHY_A:
+ return 0;
+ break;
+ case TRANSMITTER_UNIPHY_B:
+ return 1;
+ break;
+ case TRANSMITTER_UNIPHY_C:
+ return 2;
+ break;
+ case TRANSMITTER_UNIPHY_D:
+ return 3;
+ break;
+ default:
+ ASSERT(0);
+ return 0;
+ }
+}
+
#define clk_src_regs(index, pllid)\
[index] = {\
CS_COMMON_REG_LIST_DCN1_0(index, pllid),\
@@ -642,7 +672,10 @@ struct dce_aux *dcn10_aux_engine_create(
dce110_aux_engine_construct(aux_engine, ctx, inst,
SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
- &aux_engine_regs[inst]);
+ &aux_engine_regs[inst],
+ &aux_mask,
+ &aux_shift,
+ ctx->dc->caps.extended_aux_timeout_support);
return &aux_engine->base;
}
@@ -751,14 +784,18 @@ struct link_encoder *dcn10_link_encoder_create(
{
struct dcn10_link_encoder *enc10 =
kzalloc(sizeof(struct dcn10_link_encoder), GFP_KERNEL);
+ int link_regs_id;
if (!enc10)
return NULL;
+ link_regs_id =
+ map_transmitter_id_to_phy_instance(enc_init_data->transmitter);
+
dcn10_link_encoder_construct(enc10,
enc_init_data,
&link_enc_feature,
- &link_enc_regs[enc_init_data->transmitter],
+ &link_enc_regs[link_regs_id],
&link_enc_aux_regs[enc_init_data->channel - 1],
&link_enc_hpd_regs[enc_init_data->hpd_source],
&le_shift,
@@ -1308,6 +1345,8 @@ static bool construct(
dc->caps.max_slave_planes = 1;
dc->caps.is_apu = true;
dc->caps.post_blend_color_processing = false;
+ dc->caps.extended_aux_timeout_support = false;
+
/* Raven DP PHY HBR2 eye diagram pattern is not stable. Use TP4 */
dc->caps.force_dp_tps4_for_cp2520 = true;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
index 9aa258f3550b..06e5bbb4545c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
@@ -1553,6 +1553,66 @@ unsigned int enc1_dig_source_otg(
return tg_inst;
}
+bool enc1_stream_encoder_dp_get_pixel_format(
+ struct stream_encoder *enc,
+ enum dc_pixel_encoding *encoding,
+ enum dc_color_depth *depth)
+{
+ uint32_t hw_encoding = 0;
+ uint32_t hw_depth = 0;
+ struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
+
+ if (enc == NULL ||
+ encoding == NULL ||
+ depth == NULL)
+ return false;
+
+ REG_GET_2(DP_PIXEL_FORMAT,
+ DP_PIXEL_ENCODING, &hw_encoding,
+ DP_COMPONENT_DEPTH, &hw_depth);
+
+ switch (hw_depth) {
+ case DP_COMPONENT_PIXEL_DEPTH_6BPC:
+ *depth = COLOR_DEPTH_666;
+ break;
+ case DP_COMPONENT_PIXEL_DEPTH_8BPC:
+ *depth = COLOR_DEPTH_888;
+ break;
+ case DP_COMPONENT_PIXEL_DEPTH_10BPC:
+ *depth = COLOR_DEPTH_101010;
+ break;
+ case DP_COMPONENT_PIXEL_DEPTH_12BPC:
+ *depth = COLOR_DEPTH_121212;
+ break;
+ case DP_COMPONENT_PIXEL_DEPTH_16BPC:
+ *depth = COLOR_DEPTH_161616;
+ break;
+ default:
+ *depth = COLOR_DEPTH_UNDEFINED;
+ break;
+ }
+
+ switch (hw_encoding) {
+ case DP_PIXEL_ENCODING_TYPE_RGB444:
+ *encoding = PIXEL_ENCODING_RGB;
+ break;
+ case DP_PIXEL_ENCODING_TYPE_YCBCR422:
+ *encoding = PIXEL_ENCODING_YCBCR422;
+ break;
+ case DP_PIXEL_ENCODING_TYPE_YCBCR444:
+ case DP_PIXEL_ENCODING_TYPE_Y_ONLY:
+ *encoding = PIXEL_ENCODING_YCBCR444;
+ break;
+ case DP_PIXEL_ENCODING_TYPE_YCBCR420:
+ *encoding = PIXEL_ENCODING_YCBCR420;
+ break;
+ default:
+ *encoding = PIXEL_ENCODING_UNDEFINED;
+ break;
+ }
+ return true;
+}
+
static const struct stream_encoder_funcs dcn10_str_enc_funcs = {
.dp_set_stream_attribute =
enc1_stream_encoder_dp_set_stream_attribute,
@@ -1589,6 +1649,8 @@ static const struct stream_encoder_funcs dcn10_str_enc_funcs = {
.dig_connect_to_otg = enc1_dig_connect_to_otg,
.hdmi_reset_stream_attribute = enc1_reset_hdmi_stream_attribute,
.dig_source_otg = enc1_dig_source_otg,
+
+ .dp_get_pixel_format = enc1_stream_encoder_dp_get_pixel_format,
};
void dcn10_stream_encoder_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
index a512cbea00d1..c9cbc21d121e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
@@ -621,4 +621,9 @@ void get_audio_clock_info(
void enc1_reset_hdmi_stream_attribute(
struct stream_encoder *enc);
+bool enc1_stream_encoder_dp_get_pixel_format(
+ struct stream_encoder *enc,
+ enum dc_pixel_encoding *encoding,
+ enum dc_color_depth *depth);
+
#endif /* __DC_STREAM_ENCODER_DCN10_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c
index 16476ed25536..1e1151356e60 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c
@@ -44,16 +44,12 @@
#define DC_LOGGER \
dccg->ctx->logger
-void dccg2_update_dpp_dto(struct dccg *dccg,
- int dpp_inst,
- int req_dppclk,
- bool reduce_divider_only)
+void dccg2_update_dpp_dto(struct dccg *dccg, int dpp_inst, int req_dppclk)
{
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
if (dccg->ref_dppclk && req_dppclk) {
int ref_dppclk = dccg->ref_dppclk;
- int current_phase, current_modulo;
ASSERT(req_dppclk <= ref_dppclk);
/* need to clamp to 8 bits */
@@ -65,28 +61,9 @@ void dccg2_update_dpp_dto(struct dccg *dccg,
if (req_dppclk > ref_dppclk)
req_dppclk = ref_dppclk;
}
-
- REG_GET_2(DPPCLK_DTO_PARAM[dpp_inst],
- DPPCLK0_DTO_PHASE, &current_phase,
- DPPCLK0_DTO_MODULO, &current_modulo);
-
- if (reduce_divider_only) {
- // requested phase/modulo greater than current
- if (req_dppclk * current_modulo >= current_phase * ref_dppclk) {
- REG_SET_2(DPPCLK_DTO_PARAM[dpp_inst], 0,
- DPPCLK0_DTO_PHASE, req_dppclk,
- DPPCLK0_DTO_MODULO, ref_dppclk);
- } else {
- REG_SET_2(DPPCLK_DTO_PARAM[dpp_inst], 0,
- DPPCLK0_DTO_PHASE, current_phase,
- DPPCLK0_DTO_MODULO, current_modulo);
- }
- } else {
- REG_SET_2(DPPCLK_DTO_PARAM[dpp_inst], 0,
- DPPCLK0_DTO_PHASE, req_dppclk,
- DPPCLK0_DTO_MODULO, ref_dppclk);
- }
-
+ REG_SET_2(DPPCLK_DTO_PARAM[dpp_inst], 0,
+ DPPCLK0_DTO_PHASE, req_dppclk,
+ DPPCLK0_DTO_MODULO, ref_dppclk);
REG_UPDATE(DPPCLK_DTO_CTRL,
DPPCLK_DTO_ENABLE[dpp_inst], 1);
} else {
@@ -119,32 +96,6 @@ void dccg2_get_dccg_ref_freq(struct dccg *dccg,
void dccg2_init(struct dccg *dccg)
{
- struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
-
- // Fallthrough intentional to program all available dpp_dto's
- switch (dccg_dcn->base.ctx->dc->res_pool->pipe_count) {
- case 6:
- REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[5], 1);
- /* Fall through */
- case 5:
- REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[4], 1);
- /* Fall through */
- case 4:
- REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[3], 1);
- /* Fall through */
- case 3:
- REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[2], 1);
- /* Fall through */
- case 2:
- REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[1], 1);
- /* Fall through */
- case 1:
- REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[0], 1);
- break;
- default:
- ASSERT(false);
- break;
- }
}
static const struct dccg_funcs dccg2_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h
index 74a074a873cd..2205cb0204e7 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h
@@ -97,7 +97,7 @@ struct dcn_dccg {
const struct dccg_mask *dccg_mask;
};
-void dccg2_update_dpp_dto(struct dccg *dccg, int dpp_inst, int req_dppclk, bool raise_divider_only);
+void dccg2_update_dpp_dto(struct dccg *dccg, int dpp_inst, int req_dppclk);
void dccg2_get_dccg_ref_freq(struct dccg *dccg,
unsigned int xtalin_freq_inKhz,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c
index 2f5aade1e882..4d7e45892f08 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c
@@ -376,13 +376,6 @@ bool dpp2_get_optimal_number_of_taps(
struct scaler_data *scl_data,
const struct scaling_taps *in_taps)
{
- uint32_t pixel_width;
-
- if (scl_data->viewport.width > scl_data->recout.width)
- pixel_width = scl_data->recout.width;
- else
- pixel_width = scl_data->viewport.width;
-
/* Some ASICs does not support FP16 scaling, so we reject modes require this*/
if (scl_data->viewport.width != scl_data->h_active &&
scl_data->viewport.height != scl_data->v_active &&
@@ -464,7 +457,7 @@ static struct dpp_funcs dcn20_dpp_funcs = {
.dpp_read_state = dpp20_read_state,
.dpp_reset = dpp_reset,
.dpp_set_scaler = dpp1_dscl_set_scaler_manual_scale,
- .dpp_get_optimal_number_of_taps = dpp2_get_optimal_number_of_taps,
+ .dpp_get_optimal_number_of_taps = dpp1_get_optimal_number_of_taps,
.dpp_set_gamut_remap = dpp1_cm_set_gamut_remap,
.dpp_set_csc_adjustment = NULL,
.dpp_set_csc_default = NULL,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h
index 290b2854bd2c..5b03b737b1d6 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h
@@ -30,16 +30,20 @@
#define TO_DCN20_DPP(dpp)\
container_of(dpp, struct dcn20_dpp, base)
-#define TF_REG_LIST_DCN20(id) \
- TF_REG_LIST_DCN(id), \
+#define TF_REG_LIST_DCN20_COMMON_UPDATED(id) \
SRI(CM_BLNDGAM_LUT_WRITE_EN_MASK, CM, id), \
+ SRI(CM_BLNDGAM_RAMB_SLOPE_CNTL_B, CM, id), \
+ SRI(CM_BLNDGAM_RAMB_SLOPE_CNTL_G, CM, id), \
+ SRI(CM_BLNDGAM_RAMB_SLOPE_CNTL_R, CM, id), \
+ SRI(CM_BLNDGAM_RAMA_SLOPE_CNTL_B, CM, id), \
+ SRI(CM_BLNDGAM_RAMA_SLOPE_CNTL_G, CM, id), \
+ SRI(CM_BLNDGAM_RAMA_SLOPE_CNTL_R, CM, id)
+
+#define TF_REG_LIST_DCN20_COMMON(id) \
SRI(CM_BLNDGAM_CONTROL, CM, id), \
SRI(CM_BLNDGAM_RAMB_START_CNTL_B, CM, id), \
SRI(CM_BLNDGAM_RAMB_START_CNTL_G, CM, id), \
SRI(CM_BLNDGAM_RAMB_START_CNTL_R, CM, id), \
- SRI(CM_BLNDGAM_RAMB_SLOPE_CNTL_B, CM, id), \
- SRI(CM_BLNDGAM_RAMB_SLOPE_CNTL_G, CM, id), \
- SRI(CM_BLNDGAM_RAMB_SLOPE_CNTL_R, CM, id), \
SRI(CM_BLNDGAM_RAMB_END_CNTL1_B, CM, id), \
SRI(CM_BLNDGAM_RAMB_END_CNTL2_B, CM, id), \
SRI(CM_BLNDGAM_RAMB_END_CNTL1_G, CM, id), \
@@ -66,9 +70,6 @@
SRI(CM_BLNDGAM_RAMA_START_CNTL_B, CM, id), \
SRI(CM_BLNDGAM_RAMA_START_CNTL_G, CM, id), \
SRI(CM_BLNDGAM_RAMA_START_CNTL_R, CM, id), \
- SRI(CM_BLNDGAM_RAMA_SLOPE_CNTL_B, CM, id), \
- SRI(CM_BLNDGAM_RAMA_SLOPE_CNTL_G, CM, id), \
- SRI(CM_BLNDGAM_RAMA_SLOPE_CNTL_R, CM, id), \
SRI(CM_BLNDGAM_RAMA_END_CNTL1_B, CM, id), \
SRI(CM_BLNDGAM_RAMA_END_CNTL2_B, CM, id), \
SRI(CM_BLNDGAM_RAMA_END_CNTL1_G, CM, id), \
@@ -147,7 +148,12 @@
SRI(CM_SHAPER_RAMA_REGION_28_29, CM, id), \
SRI(CM_SHAPER_RAMA_REGION_30_31, CM, id), \
SRI(CM_SHAPER_RAMA_REGION_32_33, CM, id), \
- SRI(CM_SHAPER_LUT_INDEX, CM, id), \
+ SRI(CM_SHAPER_LUT_INDEX, CM, id)
+
+#define TF_REG_LIST_DCN20(id) \
+ TF_REG_LIST_DCN(id), \
+ TF_REG_LIST_DCN20_COMMON(id), \
+ TF_REG_LIST_DCN20_COMMON_UPDATED(id), \
SRI(CURSOR_CONTROL, CURSOR0_, id), \
SRI(ALPHA_2BIT_LUT, CNVC_CFG, id), \
SRI(FCNV_FP_BIAS_R, CNVC_CFG, id), \
@@ -166,27 +172,41 @@
SRI(OBUF_MEM_PWR_CTRL, DSCL, id),\
SRI(DSCL_MEM_PWR_CTRL, DSCL, id)
-#define TF_REG_LIST_SH_MASK_DCN20(mask_sh)\
- TF_REG_LIST_SH_MASK_DCN(mask_sh), \
+
+#define TF_REG_LIST_SH_MASK_DCN20_UPDATED(mask_sh)\
+ TF_SF(CM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_B, CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_G, CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_R, CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL1_B, CM_BLNDGAM_RAMB_EXP_REGION_END_B, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL1_G, CM_BLNDGAM_RAMB_EXP_REGION_END_G, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL1_R, CM_BLNDGAM_RAMB_EXP_REGION_END_R, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_B, CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_G, CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_R, CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL1_B, CM_BLNDGAM_RAMA_EXP_REGION_END_B, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL1_G, CM_BLNDGAM_RAMA_EXP_REGION_END_G, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL1_R, CM_BLNDGAM_RAMA_EXP_REGION_END_R, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_B, CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_B, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_G, CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_G, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_R, CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_R, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_CONTROL, CM_BLNDGAM_LUT_MODE, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_LUT_WRITE_EN_MASK, CM_BLNDGAM_LUT_WRITE_EN_MASK, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_LUT_WRITE_EN_MASK, CM_BLNDGAM_LUT_WRITE_SEL, mask_sh), \
+ TF_SF(CM0_CM_BLNDGAM_LUT_WRITE_EN_MASK, CM_BLNDGAM_CONFIG_STATUS, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_CONTROL, CM_SHAPER_LUT_MODE, mask_sh)
+
+
+#define TF_REG_LIST_SH_MASK_DCN20_COMMON(mask_sh)\
+ TF_SF(CM0_CM_3DLUT_MODE, CM_3DLUT_MODE, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMB_START_CNTL_B, CM_BLNDGAM_RAMB_EXP_REGION_START_B, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMB_START_CNTL_B, CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_B, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMB_START_CNTL_G, CM_BLNDGAM_RAMB_EXP_REGION_START_G, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMB_START_CNTL_G, CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_G, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMB_START_CNTL_R, CM_BLNDGAM_RAMB_EXP_REGION_START_R, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMB_START_CNTL_R, CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_R, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_B, CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_G, CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_R, CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL1_B, CM_BLNDGAM_RAMB_EXP_REGION_END_B, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL2_B, CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_B, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL2_B, CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_B, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL1_G, CM_BLNDGAM_RAMB_EXP_REGION_END_G, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL2_G, CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_G, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL2_G, CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_G, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL1_R, CM_BLNDGAM_RAMB_EXP_REGION_END_R, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL2_R, CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_R, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL2_R, CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_R, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_0_1, CM_BLNDGAM_RAMB_EXP_REGION0_LUT_OFFSET, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_0_1, CM_BLNDGAM_RAMB_EXP_REGION0_NUM_SEGMENTS, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_0_1, CM_BLNDGAM_RAMB_EXP_REGION1_LUT_OFFSET, mask_sh), \
@@ -261,18 +281,9 @@
TF_SF(CM0_CM_BLNDGAM_RAMA_START_CNTL_G, CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_G, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMA_START_CNTL_R, CM_BLNDGAM_RAMA_EXP_REGION_START_R, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMA_START_CNTL_R, CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_R, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_B, CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_G, CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_R, CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL1_B, CM_BLNDGAM_RAMA_EXP_REGION_END_B, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_B, CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_B, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_B, CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_B, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL1_G, CM_BLNDGAM_RAMA_EXP_REGION_END_G, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_G, CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_G, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_G, CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_G, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL1_R, CM_BLNDGAM_RAMA_EXP_REGION_END_R, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_R, CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_R, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_R, CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_R, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_0_1, CM_BLNDGAM_RAMA_EXP_REGION0_LUT_OFFSET, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_0_1, CM_BLNDGAM_RAMA_EXP_REGION0_NUM_SEGMENTS, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_0_1, CM_BLNDGAM_RAMA_EXP_REGION1_LUT_OFFSET, mask_sh), \
@@ -341,9 +352,6 @@
TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_32_33, CM_BLNDGAM_RAMA_EXP_REGION32_NUM_SEGMENTS, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_32_33, CM_BLNDGAM_RAMA_EXP_REGION33_LUT_OFFSET, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_32_33, CM_BLNDGAM_RAMA_EXP_REGION33_NUM_SEGMENTS, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_LUT_WRITE_EN_MASK, CM_BLNDGAM_LUT_WRITE_EN_MASK, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_LUT_WRITE_EN_MASK, CM_BLNDGAM_LUT_WRITE_SEL, mask_sh), \
- TF_SF(CM0_CM_BLNDGAM_LUT_WRITE_EN_MASK, CM_BLNDGAM_CONFIG_STATUS, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_LUT_INDEX, CM_BLNDGAM_LUT_INDEX, mask_sh), \
TF_SF(CM0_CM_BLNDGAM_LUT_DATA, CM_BLNDGAM_LUT_DATA, mask_sh), \
TF_SF(CM0_CM_MEM_PWR_CTRL, BLNDGAM_MEM_PWR_FORCE, mask_sh), \
@@ -356,7 +364,6 @@
TF_SF(CM0_CM_3DLUT_READ_WRITE_CONTROL, CM_3DLUT_WRITE_EN_MASK, mask_sh), \
TF_SF(CM0_CM_3DLUT_READ_WRITE_CONTROL, CM_3DLUT_RAM_SEL, mask_sh), \
TF_SF(CM0_CM_3DLUT_READ_WRITE_CONTROL, CM_3DLUT_30BIT_EN, mask_sh), \
- TF_SF(CM0_CM_3DLUT_READ_WRITE_CONTROL, CM_3DLUT_CONFIG_STATUS, mask_sh), \
TF_SF(CM0_CM_3DLUT_READ_WRITE_CONTROL, CM_3DLUT_READ_SEL, mask_sh), \
TF_SF(CM0_CM_SHAPER_CONTROL, CM_SHAPER_LUT_MODE, mask_sh), \
TF_SF(CM0_CM_SHAPER_RAMB_START_CNTL_B, CM_SHAPER_RAMB_EXP_REGION_START_B, mask_sh), \
@@ -521,9 +528,14 @@
TF_SF(CM0_CM_SHAPER_RAMA_REGION_32_33, CM_SHAPER_RAMA_EXP_REGION33_NUM_SEGMENTS, mask_sh), \
TF_SF(CM0_CM_SHAPER_LUT_WRITE_EN_MASK, CM_SHAPER_LUT_WRITE_EN_MASK, mask_sh), \
TF_SF(CM0_CM_SHAPER_LUT_WRITE_EN_MASK, CM_SHAPER_LUT_WRITE_SEL, mask_sh), \
- TF_SF(CM0_CM_SHAPER_LUT_WRITE_EN_MASK, CM_SHAPER_CONFIG_STATUS, mask_sh), \
TF_SF(CM0_CM_SHAPER_LUT_INDEX, CM_SHAPER_LUT_INDEX, mask_sh), \
- TF_SF(CM0_CM_SHAPER_LUT_DATA, CM_SHAPER_LUT_DATA, mask_sh), \
+ TF_SF(CM0_CM_SHAPER_LUT_DATA, CM_SHAPER_LUT_DATA, mask_sh)
+
+
+#define TF_REG_LIST_SH_MASK_DCN20(mask_sh)\
+ TF_REG_LIST_SH_MASK_DCN(mask_sh), \
+ TF_REG_LIST_SH_MASK_DCN20_COMMON(mask_sh), \
+ TF_REG_LIST_SH_MASK_DCN20_UPDATED(mask_sh), \
TF_SF(CM0_CM_DGAM_LUT_WRITE_EN_MASK, CM_DGAM_CONFIG_STATUS, mask_sh), \
TF_SF(CM0_CM_CONTROL, CM_BYPASS, mask_sh), \
TF_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_MODE, mask_sh), \
@@ -560,6 +572,7 @@
TF_SF(DSCL0_OBUF_MEM_PWR_CTRL, OBUF_MEM_PWR_FORCE, mask_sh),\
TF_SF(DSCL0_DSCL_MEM_PWR_CTRL, LUT_MEM_PWR_FORCE, mask_sh)
+
#define TF_REG_FIELD_LIST_DCN2_0(type) \
TF_REG_FIELD_LIST(type) \
type CM_BLNDGAM_LUT_DATA; \
@@ -593,6 +606,7 @@
type OBUF_MEM_PWR_FORCE;\
type LUT_MEM_PWR_FORCE
+
struct dcn2_dpp_shift {
TF_REG_FIELD_LIST_DCN2_0(uint8_t);
};
@@ -691,11 +705,6 @@ void dpp2_set_hdr_multiplier(
struct dpp *dpp_base,
uint32_t multiplier);
-bool dpp2_get_optimal_number_of_taps(
- struct dpp *dpp,
- struct scaler_data *scl_data,
- const struct scaling_taps *in_taps);
-
bool dpp2_construct(struct dcn20_dpp *dpp2,
struct dc_context *ctx,
uint32_t inst,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c
index 1b419407af94..63eb377ed9c0 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c
@@ -118,7 +118,7 @@ static void dsc2_get_enc_caps(struct dsc_enc_caps *dsc_enc_caps, int pixel_clock
dsc_enc_caps->color_formats.bits.RGB = 1;
dsc_enc_caps->color_formats.bits.YCBCR_444 = 1;
- dsc_enc_caps->color_formats.bits.YCBCR_SIMPLE_422 = 0;
+ dsc_enc_caps->color_formats.bits.YCBCR_SIMPLE_422 = 1;
dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_422 = 0;
dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_420 = 1;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb_scl.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb_scl.c
index cd8bc92ce3ba..880954ac0b02 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb_scl.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb_scl.c
@@ -722,7 +722,6 @@ bool dwb_program_horz_scalar(struct dcn20_dwbc *dwbc20,
struct scaling_taps num_taps)
{
uint32_t h_ratio_luma = 1;
- uint32_t h_ratio_chroma = 1;
uint32_t h_taps_luma = num_taps.h_taps;
uint32_t h_taps_chroma = num_taps.h_taps_c;
int32_t h_init_phase_luma = 0;
@@ -747,7 +746,6 @@ bool dwb_program_horz_scalar(struct dcn20_dwbc *dwbc20,
h_ratio_luma = -1;
else
h_ratio_luma = dc_fixpt_u3d19(tmp_h_ratio_luma) << 5;
- h_ratio_chroma = h_ratio_luma * 2;
/*Program ratio*/
REG_UPDATE(WBSCL_HORZ_FILTER_SCALE_RATIO, WBSCL_H_SCALE_RATIO, h_ratio_luma);
@@ -803,7 +801,6 @@ bool dwb_program_vert_scalar(struct dcn20_dwbc *dwbc20,
enum dwb_subsample_position subsample_position)
{
uint32_t v_ratio_luma = 1;
- uint32_t v_ratio_chroma = 1;
uint32_t v_taps_luma = num_taps.v_taps;
uint32_t v_taps_chroma = num_taps.v_taps_c;
int32_t v_init_phase_luma = 0;
@@ -827,7 +824,6 @@ bool dwb_program_vert_scalar(struct dcn20_dwbc *dwbc20,
v_ratio_luma = -1;
else
v_ratio_luma = dc_fixpt_u3d19(tmp_v_ratio_luma) << 5;
- v_ratio_chroma = v_ratio_luma * 2;
/*Program ratio*/
REG_UPDATE(WBSCL_VERT_FILTER_SCALE_RATIO, WBSCL_V_SCALE_RATIO, v_ratio_luma);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c
index b83c022e2c6f..8b8438566101 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c
@@ -186,14 +186,13 @@ static void hubbub2_get_blk256_size(unsigned int *blk256_width, unsigned int *bl
}
static void hubbub2_det_request_size(
+ unsigned int detile_buf_size,
unsigned int height,
unsigned int width,
unsigned int bpe,
bool *req128_horz_wc,
bool *req128_vert_wc)
{
- unsigned int detile_buf_size = 164 * 1024; /* 164KB for DCN1.0 */
-
unsigned int blk256_height = 0;
unsigned int blk256_width = 0;
unsigned int swath_bytes_horz_wc, swath_bytes_vert_wc;
@@ -236,7 +235,8 @@ bool hubbub2_get_dcc_compression_cap(struct hubbub *hubbub,
&segment_order_horz, &segment_order_vert))
return false;
- hubbub2_det_request_size(input->surface_size.height, input->surface_size.width,
+ hubbub2_det_request_size(TO_DCN20_HUBBUB(hubbub)->detile_buf_size,
+ input->surface_size.height, input->surface_size.width,
bpe, &req128_horz_wc, &req128_vert_wc);
if (!req128_horz_wc && !req128_vert_wc) {
@@ -588,7 +588,7 @@ static void hubbub2_program_watermarks(
DCHUBBUB_ARB_SAT_LEVEL, 60 * refclk_mhz);
REG_UPDATE(DCHUBBUB_ARB_DF_REQ_OUTSTAND, DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 180);
- hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
+ hubbub->funcs->allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
}
static const struct hubbub_funcs hubbub2_funcs = {
@@ -600,7 +600,8 @@ static const struct hubbub_funcs hubbub2_funcs = {
.get_dcc_compression_cap = hubbub2_get_dcc_compression_cap,
.wm_read_state = hubbub2_wm_read_state,
.get_dchub_ref_freq = hubbub2_get_dchub_ref_freq,
- .program_watermarks = hubbub2_program_watermarks
+ .program_watermarks = hubbub2_program_watermarks,
+ .allow_self_refresh_control = hubbub1_allow_self_refresh_control
};
void hubbub2_construct(struct dcn20_hubbub *hubbub,
@@ -618,4 +619,5 @@ void hubbub2_construct(struct dcn20_hubbub *hubbub,
hubbub->masks = hubbub_mask;
hubbub->debug_test_index_pstate = 0xB;
+ hubbub->detile_buf_size = 164 * 1024; /* 164KB for DCN2.0 */
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.h
index 626117d3b4e9..501532dd523a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.h
@@ -81,6 +81,7 @@ struct dcn20_hubbub {
unsigned int debug_test_index_pstate;
struct dcn_watermark_set watermarks;
struct dcn20_vmid vmid[16];
+ unsigned int detile_buf_size;
};
void hubbub2_construct(struct dcn20_hubbub *hubbub,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
index 1212da12c414..921a36668ced 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
@@ -688,7 +688,7 @@ bool dcn20_set_output_transfer_func(struct pipe_ctx *pipe_ctx,
return true;
}
-static bool dcn20_set_blend_lut(
+bool dcn20_set_blend_lut(
struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state)
{
struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
@@ -710,7 +710,7 @@ static bool dcn20_set_blend_lut(
return result;
}
-static bool dcn20_set_shaper_3dlut(
+bool dcn20_set_shaper_3dlut(
struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state)
{
struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
@@ -999,72 +999,6 @@ void dcn20_enable_plane(
}
-static void dcn20_program_pipe(
- struct dc *dc,
- struct pipe_ctx *pipe_ctx,
- struct dc_state *context)
-{
- pipe_ctx->plane_state->update_flags.bits.full_update =
- context->commit_hints.full_update_needed ? 1 : pipe_ctx->plane_state->update_flags.bits.full_update;
-
- if (pipe_ctx->plane_state->update_flags.bits.full_update)
- dcn20_enable_plane(dc, pipe_ctx, context);
-
- update_dchubp_dpp(dc, pipe_ctx, context);
-
- set_hdr_multiplier(pipe_ctx);
-
- if (pipe_ctx->plane_state->update_flags.bits.full_update ||
- pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
- pipe_ctx->plane_state->update_flags.bits.gamma_change)
- dc->hwss.set_input_transfer_func(pipe_ctx, pipe_ctx->plane_state);
-
- /* dcn10_translate_regamma_to_hw_format takes 750us to finish
- * only do gamma programming for full update.
- * TODO: This can be further optimized/cleaned up
- * Always call this for now since it does memcmp inside before
- * doing heavy calculation and programming
- */
- if (pipe_ctx->plane_state->update_flags.bits.full_update)
- dc->hwss.set_output_transfer_func(pipe_ctx, pipe_ctx->stream);
-}
-
-static void dcn20_program_all_pipe_in_tree(
- struct dc *dc,
- struct pipe_ctx *pipe_ctx,
- struct dc_state *context)
-{
- if (pipe_ctx->top_pipe == NULL && !pipe_ctx->prev_odm_pipe) {
- bool blank = !is_pipe_tree_visible(pipe_ctx);
-
- pipe_ctx->stream_res.tg->funcs->program_global_sync(
- pipe_ctx->stream_res.tg,
- pipe_ctx->pipe_dlg_param.vready_offset,
- pipe_ctx->pipe_dlg_param.vstartup_start,
- pipe_ctx->pipe_dlg_param.vupdate_offset,
- pipe_ctx->pipe_dlg_param.vupdate_width);
-
- pipe_ctx->stream_res.tg->funcs->set_vtg_params(
- pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
-
- dc->hwss.blank_pixel_data(dc, pipe_ctx, blank);
-
- if (dc->hwss.update_odm)
- dc->hwss.update_odm(dc, context, pipe_ctx);
- }
-
- if (pipe_ctx->plane_state != NULL)
- dcn20_program_pipe(dc, pipe_ctx, context);
-
- if (pipe_ctx->bottom_pipe != NULL) {
- ASSERT(pipe_ctx->bottom_pipe != pipe_ctx);
- dcn20_program_all_pipe_in_tree(dc, pipe_ctx->bottom_pipe, context);
- } else if (pipe_ctx->next_odm_pipe != NULL) {
- ASSERT(pipe_ctx->next_odm_pipe != pipe_ctx);
- dcn20_program_all_pipe_in_tree(dc, pipe_ctx->next_odm_pipe, context);
- }
-}
-
void dcn20_pipe_control_lock_global(
struct dc *dc,
struct pipe_ctx *pipe,
@@ -1124,114 +1058,456 @@ void dcn20_pipe_control_lock(
}
}
-static void dcn20_apply_ctx_for_surface(
- struct dc *dc,
- const struct dc_stream_state *stream,
- int num_planes,
- struct dc_state *context)
+static void dcn20_detect_pipe_changes(struct pipe_ctx *old_pipe, struct pipe_ctx *new_pipe)
{
- const unsigned int TIMEOUT_FOR_PIPE_ENABLE_MS = 100;
- int i;
- struct timing_generator *tg;
- bool removed_pipe[6] = { false };
- bool interdependent_update = false;
- struct pipe_ctx *top_pipe_to_program =
- find_top_pipe_for_stream(dc, context, stream);
- struct pipe_ctx *prev_top_pipe_to_program =
- find_top_pipe_for_stream(dc, dc->current_state, stream);
- DC_LOGGER_INIT(dc->ctx->logger);
+ new_pipe->update_flags.raw = 0;
- if (!top_pipe_to_program)
+ /* Exit on unchanged, unused pipe */
+ if (!old_pipe->plane_state && !new_pipe->plane_state)
return;
+ /* Detect pipe enable/disable */
+ if (!old_pipe->plane_state && new_pipe->plane_state) {
+ new_pipe->update_flags.bits.enable = 1;
+ new_pipe->update_flags.bits.mpcc = 1;
+ new_pipe->update_flags.bits.dppclk = 1;
+ new_pipe->update_flags.bits.hubp_interdependent = 1;
+ new_pipe->update_flags.bits.hubp_rq_dlg_ttu = 1;
+ new_pipe->update_flags.bits.gamut_remap = 1;
+ new_pipe->update_flags.bits.scaler = 1;
+ new_pipe->update_flags.bits.viewport = 1;
+ if (!new_pipe->top_pipe && !new_pipe->prev_odm_pipe) {
+ new_pipe->update_flags.bits.odm = 1;
+ new_pipe->update_flags.bits.global_sync = 1;
+ }
+ return;
+ }
+ if (old_pipe->plane_state && !new_pipe->plane_state) {
+ new_pipe->update_flags.bits.disable = 1;
+ return;
+ }
- /* Carry over GSL groups in case the context is changing. */
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
- struct pipe_ctx *old_pipe_ctx =
- &dc->current_state->res_ctx.pipe_ctx[i];
-
- if (pipe_ctx->stream == stream &&
- pipe_ctx->stream == old_pipe_ctx->stream)
- pipe_ctx->stream_res.gsl_group =
- old_pipe_ctx->stream_res.gsl_group;
+ /* Detect top pipe only changes */
+ if (!new_pipe->top_pipe && !new_pipe->prev_odm_pipe) {
+ /* Detect odm changes */
+ if ((old_pipe->next_odm_pipe && new_pipe->next_odm_pipe
+ && old_pipe->next_odm_pipe->pipe_idx != new_pipe->next_odm_pipe->pipe_idx)
+ || (!old_pipe->next_odm_pipe && new_pipe->next_odm_pipe)
+ || (old_pipe->next_odm_pipe && !new_pipe->next_odm_pipe)
+ || old_pipe->stream_res.opp != new_pipe->stream_res.opp)
+ new_pipe->update_flags.bits.odm = 1;
+
+ /* Detect global sync changes */
+ if (old_pipe->pipe_dlg_param.vready_offset != new_pipe->pipe_dlg_param.vready_offset
+ || old_pipe->pipe_dlg_param.vstartup_start != new_pipe->pipe_dlg_param.vstartup_start
+ || old_pipe->pipe_dlg_param.vupdate_offset != new_pipe->pipe_dlg_param.vupdate_offset
+ || old_pipe->pipe_dlg_param.vupdate_width != new_pipe->pipe_dlg_param.vupdate_width)
+ new_pipe->update_flags.bits.global_sync = 1;
}
- tg = top_pipe_to_program->stream_res.tg;
+ /*
+ * Detect opp / tg change, only set on change, not on enable
+ * Assume mpcc inst = pipe index, if not this code needs to be updated
+ * since mpcc is what is affected by these. In fact all of our sequence
+ * makes this assumption at the moment with how hubp reset is matched to
+ * same index mpcc reset.
+ */
+ if (old_pipe->stream_res.opp != new_pipe->stream_res.opp)
+ new_pipe->update_flags.bits.opp_changed = 1;
+ if (old_pipe->stream_res.tg != new_pipe->stream_res.tg)
+ new_pipe->update_flags.bits.tg_changed = 1;
+
+ /* Detect mpcc blending changes, only dpp inst and bot matter here */
+ if (old_pipe->plane_res.dpp != new_pipe->plane_res.dpp
+ || old_pipe->stream_res.opp != new_pipe->stream_res.opp
+ || (!old_pipe->bottom_pipe && new_pipe->bottom_pipe)
+ || (old_pipe->bottom_pipe && !new_pipe->bottom_pipe)
+ || (old_pipe->bottom_pipe && new_pipe->bottom_pipe
+ && old_pipe->bottom_pipe->plane_res.mpcc_inst
+ != new_pipe->bottom_pipe->plane_res.mpcc_inst))
+ new_pipe->update_flags.bits.mpcc = 1;
+
+ /* Detect dppclk change */
+ if (old_pipe->plane_res.bw.dppclk_khz != new_pipe->plane_res.bw.dppclk_khz)
+ new_pipe->update_flags.bits.dppclk = 1;
+
+ /* Check for scl update */
+ if (memcmp(&old_pipe->plane_res.scl_data, &new_pipe->plane_res.scl_data, sizeof(struct scaler_data)))
+ new_pipe->update_flags.bits.scaler = 1;
+ /* Check for vp update */
+ if (memcmp(&old_pipe->plane_res.scl_data.viewport, &new_pipe->plane_res.scl_data.viewport, sizeof(struct rect))
+ || memcmp(&old_pipe->plane_res.scl_data.viewport_c,
+ &new_pipe->plane_res.scl_data.viewport_c, sizeof(struct rect)))
+ new_pipe->update_flags.bits.viewport = 1;
+
+ /* Detect dlg/ttu/rq updates */
+ {
+ struct _vcs_dpi_display_dlg_regs_st old_dlg_attr = old_pipe->dlg_regs;
+ struct _vcs_dpi_display_ttu_regs_st old_ttu_attr = old_pipe->ttu_regs;
+ struct _vcs_dpi_display_dlg_regs_st *new_dlg_attr = &new_pipe->dlg_regs;
+ struct _vcs_dpi_display_ttu_regs_st *new_ttu_attr = &new_pipe->ttu_regs;
+
+ /* Detect pipe interdependent updates */
+ if (old_dlg_attr.dst_y_prefetch != new_dlg_attr->dst_y_prefetch ||
+ old_dlg_attr.vratio_prefetch != new_dlg_attr->vratio_prefetch ||
+ old_dlg_attr.vratio_prefetch_c != new_dlg_attr->vratio_prefetch_c ||
+ old_dlg_attr.dst_y_per_vm_vblank != new_dlg_attr->dst_y_per_vm_vblank ||
+ old_dlg_attr.dst_y_per_row_vblank != new_dlg_attr->dst_y_per_row_vblank ||
+ old_dlg_attr.dst_y_per_vm_flip != new_dlg_attr->dst_y_per_vm_flip ||
+ old_dlg_attr.dst_y_per_row_flip != new_dlg_attr->dst_y_per_row_flip ||
+ old_dlg_attr.refcyc_per_meta_chunk_vblank_l != new_dlg_attr->refcyc_per_meta_chunk_vblank_l ||
+ old_dlg_attr.refcyc_per_meta_chunk_vblank_c != new_dlg_attr->refcyc_per_meta_chunk_vblank_c ||
+ old_dlg_attr.refcyc_per_meta_chunk_flip_l != new_dlg_attr->refcyc_per_meta_chunk_flip_l ||
+ old_dlg_attr.refcyc_per_line_delivery_pre_l != new_dlg_attr->refcyc_per_line_delivery_pre_l ||
+ old_dlg_attr.refcyc_per_line_delivery_pre_c != new_dlg_attr->refcyc_per_line_delivery_pre_c ||
+ old_ttu_attr.refcyc_per_req_delivery_pre_l != new_ttu_attr->refcyc_per_req_delivery_pre_l ||
+ old_ttu_attr.refcyc_per_req_delivery_pre_c != new_ttu_attr->refcyc_per_req_delivery_pre_c ||
+ old_ttu_attr.refcyc_per_req_delivery_pre_cur0 != new_ttu_attr->refcyc_per_req_delivery_pre_cur0 ||
+ old_ttu_attr.refcyc_per_req_delivery_pre_cur1 != new_ttu_attr->refcyc_per_req_delivery_pre_cur1 ||
+ old_ttu_attr.min_ttu_vblank != new_ttu_attr->min_ttu_vblank ||
+ old_ttu_attr.qos_level_flip != new_ttu_attr->qos_level_flip) {
+ old_dlg_attr.dst_y_prefetch = new_dlg_attr->dst_y_prefetch;
+ old_dlg_attr.vratio_prefetch = new_dlg_attr->vratio_prefetch;
+ old_dlg_attr.vratio_prefetch_c = new_dlg_attr->vratio_prefetch_c;
+ old_dlg_attr.dst_y_per_vm_vblank = new_dlg_attr->dst_y_per_vm_vblank;
+ old_dlg_attr.dst_y_per_row_vblank = new_dlg_attr->dst_y_per_row_vblank;
+ old_dlg_attr.dst_y_per_vm_flip = new_dlg_attr->dst_y_per_vm_flip;
+ old_dlg_attr.dst_y_per_row_flip = new_dlg_attr->dst_y_per_row_flip;
+ old_dlg_attr.refcyc_per_meta_chunk_vblank_l = new_dlg_attr->refcyc_per_meta_chunk_vblank_l;
+ old_dlg_attr.refcyc_per_meta_chunk_vblank_c = new_dlg_attr->refcyc_per_meta_chunk_vblank_c;
+ old_dlg_attr.refcyc_per_meta_chunk_flip_l = new_dlg_attr->refcyc_per_meta_chunk_flip_l;
+ old_dlg_attr.refcyc_per_line_delivery_pre_l = new_dlg_attr->refcyc_per_line_delivery_pre_l;
+ old_dlg_attr.refcyc_per_line_delivery_pre_c = new_dlg_attr->refcyc_per_line_delivery_pre_c;
+ old_ttu_attr.refcyc_per_req_delivery_pre_l = new_ttu_attr->refcyc_per_req_delivery_pre_l;
+ old_ttu_attr.refcyc_per_req_delivery_pre_c = new_ttu_attr->refcyc_per_req_delivery_pre_c;
+ old_ttu_attr.refcyc_per_req_delivery_pre_cur0 = new_ttu_attr->refcyc_per_req_delivery_pre_cur0;
+ old_ttu_attr.refcyc_per_req_delivery_pre_cur1 = new_ttu_attr->refcyc_per_req_delivery_pre_cur1;
+ old_ttu_attr.min_ttu_vblank = new_ttu_attr->min_ttu_vblank;
+ old_ttu_attr.qos_level_flip = new_ttu_attr->qos_level_flip;
+ new_pipe->update_flags.bits.hubp_interdependent = 1;
+ }
+ /* Detect any other updates to ttu/rq/dlg */
+ if (memcmp(&old_dlg_attr, &new_pipe->dlg_regs, sizeof(old_dlg_attr)) ||
+ memcmp(&old_ttu_attr, &new_pipe->ttu_regs, sizeof(old_ttu_attr)) ||
+ memcmp(&old_pipe->rq_regs, &new_pipe->rq_regs, sizeof(old_pipe->rq_regs)))
+ new_pipe->update_flags.bits.hubp_rq_dlg_ttu = 1;
+ }
+}
- interdependent_update = top_pipe_to_program->plane_state &&
- top_pipe_to_program->plane_state->update_flags.bits.full_update;
+static void dcn20_update_dchubp_dpp(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context)
+{
+ struct hubp *hubp = pipe_ctx->plane_res.hubp;
+ struct dpp *dpp = pipe_ctx->plane_res.dpp;
+ struct dc_plane_state *plane_state = pipe_ctx->plane_state;
- if (interdependent_update)
- lock_all_pipes(dc, context, true);
- else
- dcn20_pipe_control_lock(dc, top_pipe_to_program, true);
+ if (pipe_ctx->update_flags.bits.dppclk)
+ dpp->funcs->dpp_dppclk_control(dpp, false, true);
- if (num_planes == 0) {
- /* OTG blank before remove all front end */
- dc->hwss.blank_pixel_data(dc, top_pipe_to_program, true);
+ /* TODO: Need input parameter to tell current DCHUB pipe tie to which OTG
+ * VTG is within DCHUBBUB which is commond block share by each pipe HUBP.
+ * VTG is 1:1 mapping with OTG. Each pipe HUBP will select which VTG
+ */
+ if (pipe_ctx->update_flags.bits.hubp_rq_dlg_ttu) {
+ hubp->funcs->hubp_vtg_sel(hubp, pipe_ctx->stream_res.tg->inst);
+
+ hubp->funcs->hubp_setup(
+ hubp,
+ &pipe_ctx->dlg_regs,
+ &pipe_ctx->ttu_regs,
+ &pipe_ctx->rq_regs,
+ &pipe_ctx->pipe_dlg_param);
+ }
+ if (pipe_ctx->update_flags.bits.hubp_interdependent)
+ hubp->funcs->hubp_setup_interdependent(
+ hubp,
+ &pipe_ctx->dlg_regs,
+ &pipe_ctx->ttu_regs);
+
+ if (pipe_ctx->update_flags.bits.enable ||
+ plane_state->update_flags.bits.bpp_change ||
+ plane_state->update_flags.bits.input_csc_change ||
+ plane_state->update_flags.bits.color_space_change ||
+ plane_state->update_flags.bits.coeff_reduction_change) {
+ struct dc_bias_and_scale bns_params = {0};
+
+ // program the input csc
+ dpp->funcs->dpp_setup(dpp,
+ plane_state->format,
+ EXPANSION_MODE_ZERO,
+ plane_state->input_csc_color_matrix,
+ plane_state->color_space,
+ NULL);
+
+ if (dpp->funcs->dpp_program_bias_and_scale) {
+ //TODO :for CNVC set scale and bias registers if necessary
+ dcn10_build_prescale_params(&bns_params, plane_state);
+ dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params);
+ }
}
- /* Disconnect unused mpcc */
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
- struct pipe_ctx *old_pipe_ctx =
- &dc->current_state->res_ctx.pipe_ctx[i];
- /*
- * Powergate reused pipes that are not powergated
- * fairly hacky right now, using opp_id as indicator
- * TODO: After move dc_post to dc_update, this will
- * be removed.
- */
- if (pipe_ctx->plane_state && !old_pipe_ctx->plane_state) {
- if (old_pipe_ctx->stream_res.tg == tg &&
- old_pipe_ctx->plane_res.hubp &&
- old_pipe_ctx->plane_res.hubp->opp_id != OPP_ID_INVALID)
- dc->hwss.disable_plane(dc, old_pipe_ctx);
+ if (pipe_ctx->update_flags.bits.mpcc
+ || plane_state->update_flags.bits.global_alpha_change
+ || plane_state->update_flags.bits.per_pixel_alpha_change) {
+ /* Need mpcc to be idle if changing opp */
+ if (pipe_ctx->update_flags.bits.opp_changed) {
+ struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[pipe_ctx->pipe_idx];
+ int mpcc_inst;
+
+ for (mpcc_inst = 0; mpcc_inst < MAX_PIPES; mpcc_inst++) {
+ if (!old_pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst])
+ continue;
+ dc->res_pool->mpc->funcs->wait_for_idle(dc->res_pool->mpc, mpcc_inst);
+ old_pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false;
+ }
}
+ dc->hwss.update_mpcc(dc, pipe_ctx);
+ }
- if ((!pipe_ctx->plane_state ||
- pipe_ctx->stream_res.tg != old_pipe_ctx->stream_res.tg) &&
- old_pipe_ctx->plane_state &&
- old_pipe_ctx->stream_res.tg == tg) {
+ if (pipe_ctx->update_flags.bits.scaler ||
+ plane_state->update_flags.bits.scaling_change ||
+ plane_state->update_flags.bits.position_change ||
+ plane_state->update_flags.bits.per_pixel_alpha_change ||
+ pipe_ctx->stream->update_flags.bits.scaling) {
+ pipe_ctx->plane_res.scl_data.lb_params.alpha_en = pipe_ctx->plane_state->per_pixel_alpha;
+ ASSERT(pipe_ctx->plane_res.scl_data.lb_params.depth == LB_PIXEL_DEPTH_30BPP);
+ /* scaler configuration */
+ pipe_ctx->plane_res.dpp->funcs->dpp_set_scaler(
+ pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data);
+ }
- dc->hwss.plane_atomic_disconnect(dc, old_pipe_ctx);
- removed_pipe[i] = true;
+ if (pipe_ctx->update_flags.bits.viewport ||
+ (context == dc->current_state && plane_state->update_flags.bits.scaling_change) ||
+ (context == dc->current_state && pipe_ctx->stream->update_flags.bits.scaling))
+ hubp->funcs->mem_program_viewport(
+ hubp,
+ &pipe_ctx->plane_res.scl_data.viewport,
+ &pipe_ctx->plane_res.scl_data.viewport_c);
+
+ /* Any updates are handled in dc interface, just need to apply existing for plane enable */
+ if ((pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed)
+ && pipe_ctx->stream->cursor_attributes.address.quad_part != 0) {
+ dc->hwss.set_cursor_position(pipe_ctx);
+ dc->hwss.set_cursor_attribute(pipe_ctx);
+
+ if (dc->hwss.set_cursor_sdr_white_level)
+ dc->hwss.set_cursor_sdr_white_level(pipe_ctx);
+ }
- DC_LOG_DC("Reset mpcc for pipe %d\n",
- old_pipe_ctx->pipe_idx);
- }
+ /* Any updates are handled in dc interface, just need
+ * to apply existing for plane enable / opp change */
+ if (pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed
+ || pipe_ctx->stream->update_flags.bits.gamut_remap
+ || pipe_ctx->stream->update_flags.bits.out_csc) {
+ /* dpp/cm gamut remap*/
+ dc->hwss.program_gamut_remap(pipe_ctx);
+
+ /*call the dcn2 method which uses mpc csc*/
+ dc->hwss.program_output_csc(dc,
+ pipe_ctx,
+ pipe_ctx->stream->output_color_space,
+ pipe_ctx->stream->csc_color_matrix.matrix,
+ hubp->opp_id);
}
- if (num_planes > 0)
- dcn20_program_all_pipe_in_tree(dc, top_pipe_to_program, context);
+ if (pipe_ctx->update_flags.bits.enable ||
+ pipe_ctx->update_flags.bits.opp_changed ||
+ plane_state->update_flags.bits.pixel_format_change ||
+ plane_state->update_flags.bits.horizontal_mirror_change ||
+ plane_state->update_flags.bits.rotation_change ||
+ plane_state->update_flags.bits.swizzle_change ||
+ plane_state->update_flags.bits.dcc_change ||
+ plane_state->update_flags.bits.bpp_change ||
+ plane_state->update_flags.bits.scaling_change ||
+ plane_state->update_flags.bits.plane_size_change) {
+ struct plane_size size = plane_state->plane_size;
+
+ size.surface_size = pipe_ctx->plane_res.scl_data.viewport;
+ hubp->funcs->hubp_program_surface_config(
+ hubp,
+ plane_state->format,
+ &plane_state->tiling_info,
+ &size,
+ plane_state->rotation,
+ &plane_state->dcc,
+ plane_state->horizontal_mirror,
+ 0);
+ hubp->power_gated = false;
+ }
- /* Program secondary blending tree and writeback pipes */
- if ((stream->num_wb_info > 0) && (dc->hwss.program_all_writeback_pipes_in_tree))
- dc->hwss.program_all_writeback_pipes_in_tree(dc, stream, context);
+ if (pipe_ctx->update_flags.bits.enable || plane_state->update_flags.bits.addr_update)
+ dc->hwss.update_plane_addr(dc, pipe_ctx);
- if (interdependent_update)
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ if (pipe_ctx->update_flags.bits.enable)
+ hubp->funcs->set_blank(hubp, false);
+}
+
+
+static void dcn20_program_pipe(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context)
+{
+ /* Only need to unblank on top pipe */
+ if ((pipe_ctx->update_flags.bits.enable || pipe_ctx->stream->update_flags.bits.abm_level)
+ && !pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe)
+ dc->hwss.blank_pixel_data(dc, pipe_ctx, !pipe_ctx->plane_state->visible);
+
+ if (pipe_ctx->update_flags.bits.global_sync) {
+ pipe_ctx->stream_res.tg->funcs->program_global_sync(
+ pipe_ctx->stream_res.tg,
+ pipe_ctx->pipe_dlg_param.vready_offset,
+ pipe_ctx->pipe_dlg_param.vstartup_start,
+ pipe_ctx->pipe_dlg_param.vupdate_offset,
+ pipe_ctx->pipe_dlg_param.vupdate_width);
+
+ pipe_ctx->stream_res.tg->funcs->set_vtg_params(
+ pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
+
+ if (dc->hwss.setup_vupdate_interrupt)
+ dc->hwss.setup_vupdate_interrupt(pipe_ctx);
+ }
+
+ if (pipe_ctx->update_flags.bits.odm)
+ dc->hwss.update_odm(dc, context, pipe_ctx);
+
+ if (pipe_ctx->update_flags.bits.enable)
+ dcn20_enable_plane(dc, pipe_ctx, context);
+
+ if (pipe_ctx->update_flags.raw || pipe_ctx->plane_state->update_flags.raw || pipe_ctx->stream->update_flags.raw)
+ dcn20_update_dchubp_dpp(dc, pipe_ctx, context);
+
+ if (pipe_ctx->update_flags.bits.enable
+ || pipe_ctx->plane_state->update_flags.bits.sdr_white_level)
+ set_hdr_multiplier(pipe_ctx);
+
+ if (pipe_ctx->update_flags.bits.enable ||
+ pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
+ pipe_ctx->plane_state->update_flags.bits.gamma_change)
+ dc->hwss.set_input_transfer_func(pipe_ctx, pipe_ctx->plane_state);
+
+ /* dcn10_translate_regamma_to_hw_format takes 750us to finish
+ * only do gamma programming for powering on, internal memcmp to avoid
+ * updating on slave planes
+ */
+ if (pipe_ctx->update_flags.bits.enable || pipe_ctx->stream->update_flags.bits.out_tf)
+ dc->hwss.set_output_transfer_func(pipe_ctx, pipe_ctx->stream);
+
+ /* If the pipe has been enabled or has a different opp, we
+ * should reprogram the fmt. This deals with cases where
+ * interation between mpc and odm combine on different streams
+ * causes a different pipe to be chosen to odm combine with.
+ */
+ if (pipe_ctx->update_flags.bits.enable
+ || pipe_ctx->update_flags.bits.opp_changed) {
+
+ pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion(
+ pipe_ctx->stream_res.opp,
+ COLOR_SPACE_YCBCR601,
+ pipe_ctx->stream->timing.display_color_depth,
+ pipe_ctx->stream->signal);
+
+ pipe_ctx->stream_res.opp->funcs->opp_program_fmt(
+ pipe_ctx->stream_res.opp,
+ &pipe_ctx->stream->bit_depth_params,
+ &pipe_ctx->stream->clamping);
+ }
+}
+
+static bool does_pipe_need_lock(struct pipe_ctx *pipe)
+{
+ if ((pipe->plane_state && pipe->plane_state->update_flags.raw)
+ || pipe->update_flags.raw)
+ return true;
+ if (pipe->bottom_pipe)
+ return does_pipe_need_lock(pipe->bottom_pipe);
+
+ return false;
+}
+
+static void dcn20_program_front_end_for_ctx(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ const unsigned int TIMEOUT_FOR_PIPE_ENABLE_MS = 100;
+ int i;
+ bool pipe_locked[MAX_PIPES] = {false};
+ DC_LOGGER_INIT(dc->ctx->logger);
+
+ /* Carry over GSL groups in case the context is changing. */
+ for (i = 0; i < dc->res_pool->pipe_count; i++)
+ if (context->res_ctx.pipe_ctx[i].stream == dc->current_state->res_ctx.pipe_ctx[i].stream)
+ context->res_ctx.pipe_ctx[i].stream_res.gsl_group =
+ dc->current_state->res_ctx.pipe_ctx[i].stream_res.gsl_group;
+
+ /* Set pipe update flags and lock pipes */
+ for (i = 0; i < dc->res_pool->pipe_count; i++)
+ dcn20_detect_pipe_changes(&dc->current_state->res_ctx.pipe_ctx[i],
+ &context->res_ctx.pipe_ctx[i]);
+ for (i = 0; i < dc->res_pool->pipe_count; i++)
+ if (!context->res_ctx.pipe_ctx[i].top_pipe &&
+ does_pipe_need_lock(&context->res_ctx.pipe_ctx[i])) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
- /* Skip inactive pipes and ones already updated */
- if (!pipe_ctx->stream || pipe_ctx->stream == stream ||
- !pipe_ctx->plane_state || !tg->funcs->is_tg_enabled(tg))
- continue;
+ if (pipe_ctx->update_flags.bits.tg_changed || pipe_ctx->update_flags.bits.enable)
+ dc->hwss.pipe_control_lock(dc, pipe_ctx, true);
+ if (!pipe_ctx->update_flags.bits.enable)
+ dc->hwss.pipe_control_lock(dc, &dc->current_state->res_ctx.pipe_ctx[i], true);
+ pipe_locked[i] = true;
+ }
- pipe_ctx->plane_res.hubp->funcs->hubp_setup_interdependent(
- pipe_ctx->plane_res.hubp,
- &pipe_ctx->dlg_regs,
- &pipe_ctx->ttu_regs);
+ /* OTG blank before disabling all front ends */
+ for (i = 0; i < dc->res_pool->pipe_count; i++)
+ if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable
+ && !context->res_ctx.pipe_ctx[i].top_pipe
+ && !context->res_ctx.pipe_ctx[i].prev_odm_pipe
+ && context->res_ctx.pipe_ctx[i].stream)
+ dc->hwss.blank_pixel_data(dc, &context->res_ctx.pipe_ctx[i], true);
+
+ /* Disconnect mpcc */
+ for (i = 0; i < dc->res_pool->pipe_count; i++)
+ if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable
+ || context->res_ctx.pipe_ctx[i].update_flags.bits.opp_changed) {
+ dc->hwss.plane_atomic_disconnect(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
+ DC_LOG_DC("Reset mpcc for pipe %d\n", dc->current_state->res_ctx.pipe_ctx[i].pipe_idx);
}
- if (interdependent_update)
- lock_all_pipes(dc, context, false);
- else
- dcn20_pipe_control_lock(dc, top_pipe_to_program, false);
+ /*
+ * Program all updated pipes, order matters for mpcc setup. Start with
+ * top pipe and program all pipes that follow in order
+ */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe->plane_state && !pipe->top_pipe) {
+ while (pipe) {
+ dcn20_program_pipe(dc, pipe, context);
+ pipe = pipe->bottom_pipe;
+ }
+ /* Program secondary blending tree and writeback pipes */
+ pipe = &context->res_ctx.pipe_ctx[i];
+ if (!pipe->prev_odm_pipe && pipe->stream->num_wb_info > 0
+ && (pipe->update_flags.raw || pipe->plane_state->update_flags.raw || pipe->stream->update_flags.raw)
+ && dc->hwss.program_all_writeback_pipes_in_tree)
+ dc->hwss.program_all_writeback_pipes_in_tree(dc, pipe->stream, context);
+ }
+ }
+ /* Unlock all locked pipes */
for (i = 0; i < dc->res_pool->pipe_count; i++)
- if (removed_pipe[i])
- dcn20_disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
+ if (pipe_locked[i]) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe_ctx->update_flags.bits.tg_changed || pipe_ctx->update_flags.bits.enable)
+ dc->hwss.pipe_control_lock(dc, pipe_ctx, false);
+ if (!pipe_ctx->update_flags.bits.enable)
+ dc->hwss.pipe_control_lock(dc, &dc->current_state->res_ctx.pipe_ctx[i], false);
+ }
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++)
+ if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable)
+ dc->hwss.disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
/*
* If we are enabling a pipe, we need to wait for pending clear as this is a critical
@@ -1239,15 +1515,22 @@ static void dcn20_apply_ctx_for_surface(
* will cause HW to perform an "immediate enable" (as opposed to "vsync enable") which
* is unsupported on DCN.
*/
- i = 0;
- if (num_planes > 0 && top_pipe_to_program &&
- (prev_top_pipe_to_program == NULL || prev_top_pipe_to_program->plane_state == NULL)) {
- while (i < TIMEOUT_FOR_PIPE_ENABLE_MS &&
- top_pipe_to_program->plane_res.hubp->funcs->hubp_is_flip_pending(top_pipe_to_program->plane_res.hubp)) {
- i += 1;
- msleep(1);
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe->plane_state && !pipe->top_pipe && pipe->update_flags.bits.enable) {
+ struct hubp *hubp = pipe->plane_res.hubp;
+ int j = 0;
+
+ for (j = 0; j < TIMEOUT_FOR_PIPE_ENABLE_MS
+ && hubp->funcs->hubp_is_flip_pending(hubp); j++)
+ msleep(1);
}
}
+
+ /* WA to apply WM setting*/
+ if (dc->hwseq->wa.DEGVIDCN21)
+ dc->res_pool->hubbub->funcs->apply_DEDCN21_147_wa(dc->res_pool->hubbub);
}
@@ -1319,8 +1602,12 @@ bool dcn20_update_bandwidth(
pipe_ctx->stream_res.tg->funcs->set_vtg_params(
pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
+
if (pipe_ctx->prev_odm_pipe == NULL)
dc->hwss.blank_pixel_data(dc, pipe_ctx, blank);
+
+ if (dc->hwss.setup_vupdate_interrupt)
+ dc->hwss.setup_vupdate_interrupt(pipe_ctx);
}
pipe_ctx->plane_res.hubp->funcs->hubp_setup(
@@ -1337,7 +1624,8 @@ bool dcn20_update_bandwidth(
static void dcn20_enable_writeback(
struct dc *dc,
const struct dc_stream_status *stream_status,
- struct dc_writeback_info *wb_info)
+ struct dc_writeback_info *wb_info,
+ struct dc_state *context)
{
struct dwbc *dwb;
struct mcif_wb *mcif_wb;
@@ -1354,7 +1642,7 @@ static void dcn20_enable_writeback(
optc->funcs->set_dwb_source(optc, wb_info->dwb_pipe_inst);
/* set MCIF_WB buffer and arbitration configuration */
mcif_wb->funcs->config_mcif_buf(mcif_wb, &wb_info->mcif_buf_params, wb_info->dwb_params.dest_height);
- mcif_wb->funcs->config_mcif_arb(mcif_wb, &dc->current_state->bw_ctx.bw.dcn.bw_writeback.mcif_wb_arb[wb_info->dwb_pipe_inst]);
+ mcif_wb->funcs->config_mcif_arb(mcif_wb, &context->bw_ctx.bw.dcn.bw_writeback.mcif_wb_arb[wb_info->dwb_pipe_inst]);
/* Enable MCIF_WB */
mcif_wb->funcs->enable_mcif(mcif_wb);
/* Enable DWB */
@@ -1702,6 +1990,28 @@ static void dcn20_reset_hw_ctx_wrap(
}
}
+void dcn20_get_mpctree_visual_confirm_color(
+ struct pipe_ctx *pipe_ctx,
+ struct tg_color *color)
+{
+ const struct tg_color pipe_colors[6] = {
+ {MAX_TG_COLOR_VALUE, 0, 0}, // red
+ {MAX_TG_COLOR_VALUE, 0, MAX_TG_COLOR_VALUE}, // yellow
+ {0, MAX_TG_COLOR_VALUE, 0}, // blue
+ {MAX_TG_COLOR_VALUE / 2, 0, MAX_TG_COLOR_VALUE / 2}, // purple
+ {0, 0, MAX_TG_COLOR_VALUE}, // green
+ {MAX_TG_COLOR_VALUE, MAX_TG_COLOR_VALUE * 2 / 3, 0}, // orange
+ };
+
+ struct pipe_ctx *top_pipe = pipe_ctx;
+
+ while (top_pipe->top_pipe) {
+ top_pipe = top_pipe->top_pipe;
+ }
+
+ *color = pipe_colors[top_pipe->pipe_idx];
+}
+
static void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
{
struct hubp *hubp = pipe_ctx->plane_res.hubp;
@@ -1719,6 +2029,9 @@ static void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
} else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE) {
dcn10_get_surface_visual_confirm_color(
pipe_ctx, &blnd_cfg.black_color);
+ } else if (dc->debug.visual_confirm == VISUAL_CONFIRM_MPCTREE) {
+ dcn20_get_mpctree_visual_confirm_color(
+ pipe_ctx, &blnd_cfg.black_color);
}
if (per_pixel_alpha)
@@ -1919,8 +2232,10 @@ static void dcn20_enable_stream(struct pipe_ctx *pipe_ctx)
link->link_enc->funcs->connect_dig_be_to_fe(link->link_enc,
pipe_ctx->stream_res.stream_enc->id, true);
- if (link->dc->hwss.program_dmdata_engine)
- link->dc->hwss.program_dmdata_engine(pipe_ctx);
+ if (pipe_ctx->plane_state && pipe_ctx->plane_state->flip_immediate != 1) {
+ if (link->dc->hwss.program_dmdata_engine)
+ link->dc->hwss.program_dmdata_engine(pipe_ctx);
+ }
link->dc->hwss.update_info_frame(pipe_ctx);
@@ -2095,7 +2410,8 @@ void dcn20_hw_sequencer_construct(struct dc *dc)
dc->hwss.program_triplebuffer = dcn20_program_tripleBuffer;
dc->hwss.set_input_transfer_func = dcn20_set_input_transfer_func;
dc->hwss.set_output_transfer_func = dcn20_set_output_transfer_func;
- dc->hwss.apply_ctx_for_surface = dcn20_apply_ctx_for_surface;
+ dc->hwss.apply_ctx_for_surface = NULL;
+ dc->hwss.program_front_end_for_ctx = dcn20_program_front_end_for_ctx;
dc->hwss.pipe_control_lock = dcn20_pipe_control_lock;
dc->hwss.pipe_control_lock_global = dcn20_pipe_control_lock_global;
dc->hwss.optimize_bandwidth = dcn20_optimize_bandwidth;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h
index 92ab3dd91814..3098f1049ed7 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h
@@ -96,4 +96,20 @@ void dcn20_init_blank(
struct dc *dc,
struct timing_generator *tg);
void dcn20_display_init(struct dc *dc);
+void dcn20_pipe_control_lock(
+ struct dc *dc,
+ struct pipe_ctx *pipe,
+ bool lock);
+void dcn20_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx);
+void dcn20_enable_plane(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context);
+bool dcn20_set_blend_lut(
+ struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state);
+bool dcn20_set_shaper_3dlut(
+ struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state);
+void dcn20_get_mpctree_visual_confirm_color(
+ struct pipe_ctx *pipe_ctx,
+ struct tg_color *color);
#endif /* __DC_HWSS_DCN20_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h
index 3736b5548a25..0c98a0bbbd14 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h
@@ -91,6 +91,13 @@ struct mpll_cfg {
uint32_t ref_range;
uint32_t ref_clk;
bool hdmimode_enable;
+ bool sup_pre_hp;
+ bool dp_tx0_vergdrv_byp;
+ bool dp_tx1_vergdrv_byp;
+ bool dp_tx2_vergdrv_byp;
+ bool dp_tx3_vergdrv_byp;
+
+
};
struct dpcssys_phy_seq_cfg {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
index 2137e2be2140..3b613fb93ef8 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
@@ -287,6 +287,10 @@ void optc2_get_optc_source(struct timing_generator *optc,
*num_of_src_opp = 2;
else
*num_of_src_opp = 1;
+
+ /* Work around VBIOS not updating OPTC_NUM_OF_INPUT_SEGMENT */
+ if (*src_opp_id_1 == 0xf)
+ *num_of_src_opp = 1;
}
void optc2_set_dwb_source(struct timing_generator *optc,
@@ -456,7 +460,7 @@ static struct timing_generator_funcs dcn20_tg_funcs = {
.set_vtg_params = optc1_set_vtg_params,
.program_manual_trigger = optc2_program_manual_trigger,
.setup_manual_trigger = optc2_setup_manual_trigger,
- .is_matching_timing = optc1_is_matching_timing
+ .get_hw_timing = optc1_get_hw_timing,
};
void dcn20_timing_generator_init(struct optc *optc1)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
index 6b2f2f1a1c9c..bbd1c98564be 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
@@ -581,11 +581,13 @@ static const struct dcn2_dpp_registers tf_regs[] = {
};
static const struct dcn2_dpp_shift tf_shift = {
- TF_REG_LIST_SH_MASK_DCN20(__SHIFT)
+ TF_REG_LIST_SH_MASK_DCN20(__SHIFT),
+ TF_DEBUG_REG_LIST_SH_DCN10
};
static const struct dcn2_dpp_mask tf_mask = {
- TF_REG_LIST_SH_MASK_DCN20(_MASK)
+ TF_REG_LIST_SH_MASK_DCN20(_MASK),
+ TF_DEBUG_REG_LIST_MASK_DCN10
};
#define dwbc_regs_dcn2(id)\
@@ -732,6 +734,42 @@ static const struct dcn20_vmid_mask vmid_masks = {
DCN20_VMID_MASK_SH_LIST(_MASK)
};
+static const struct dce110_aux_registers_shift aux_shift = {
+ DCN_AUX_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce110_aux_registers_mask aux_mask = {
+ DCN_AUX_MASK_SH_LIST(_MASK)
+};
+
+static int map_transmitter_id_to_phy_instance(
+ enum transmitter transmitter)
+{
+ switch (transmitter) {
+ case TRANSMITTER_UNIPHY_A:
+ return 0;
+ break;
+ case TRANSMITTER_UNIPHY_B:
+ return 1;
+ break;
+ case TRANSMITTER_UNIPHY_C:
+ return 2;
+ break;
+ case TRANSMITTER_UNIPHY_D:
+ return 3;
+ break;
+ case TRANSMITTER_UNIPHY_E:
+ return 4;
+ break;
+ case TRANSMITTER_UNIPHY_F:
+ return 5;
+ break;
+ default:
+ ASSERT(0);
+ return 0;
+ }
+}
+
#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
#define dsc_regsDCN20(id)\
[id] = {\
@@ -825,7 +863,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.clock_trace = true,
.disable_pplib_clock_request = true,
.pipe_split_policy = MPC_SPLIT_DYNAMIC,
- .force_single_disp_pipe_split = true,
+ .force_single_disp_pipe_split = false,
.disable_dcc = DCC_ENABLE,
.vsr_support = true,
.performance_trace = false,
@@ -922,7 +960,10 @@ struct dce_aux *dcn20_aux_engine_create(
dce110_aux_engine_construct(aux_engine, ctx, inst,
SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
- &aux_engine_regs[inst]);
+ &aux_engine_regs[inst],
+ &aux_mask,
+ &aux_shift,
+ ctx->dc->caps.extended_aux_timeout_support);
return &aux_engine->base;
}
@@ -1042,14 +1083,18 @@ struct link_encoder *dcn20_link_encoder_create(
{
struct dcn20_link_encoder *enc20 =
kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL);
+ int link_regs_id;
if (!enc20)
return NULL;
+ link_regs_id =
+ map_transmitter_id_to_phy_instance(enc_init_data->transmitter);
+
dcn20_link_encoder_construct(enc20,
enc_init_data,
&link_enc_feature,
- &link_enc_regs[enc_init_data->transmitter],
+ &link_enc_regs[link_regs_id],
&link_enc_aux_regs[enc_init_data->channel - 1],
&link_enc_hpd_regs[enc_init_data->hpd_source],
&le_shift,
@@ -1159,6 +1204,8 @@ static const struct resource_create_funcs res_create_maximus_funcs = {
.create_hwseq = dcn20_hwseq_create,
};
+static void dcn20_pp_smu_destroy(struct pp_smu_funcs **pp_smu);
+
void dcn20_clock_source_destroy(struct clock_source **clk_src)
{
kfree(TO_DCE110_CLK_SRC(*clk_src));
@@ -1601,7 +1648,7 @@ static void swizzle_to_dml_params(
}
}
-static bool dcn20_split_stream_for_odm(
+bool dcn20_split_stream_for_odm(
struct resource_context *res_ctx,
const struct resource_pool *pool,
struct pipe_ctx *prev_odm_pipe,
@@ -1622,7 +1669,6 @@ static bool dcn20_split_stream_for_odm(
next_odm_pipe->stream_res.dsc = NULL;
#endif
if (prev_odm_pipe->next_odm_pipe && prev_odm_pipe->next_odm_pipe != next_odm_pipe) {
- ASSERT(!next_odm_pipe->next_odm_pipe);
next_odm_pipe->next_odm_pipe = prev_odm_pipe->next_odm_pipe;
next_odm_pipe->next_odm_pipe->prev_odm_pipe = next_odm_pipe;
}
@@ -1679,7 +1725,7 @@ static bool dcn20_split_stream_for_odm(
return true;
}
-static void dcn20_split_stream_for_mpc(
+void dcn20_split_stream_for_mpc(
struct resource_context *res_ctx,
const struct resource_pool *pool,
struct pipe_ctx *primary_pipe,
@@ -1765,7 +1811,7 @@ int dcn20_populate_dml_pipes_from_context(
pipe_cnt = i;
continue;
}
- if (!resource_are_streams_timing_synchronizable(
+ if (dc->debug.disable_timing_sync || !resource_are_streams_timing_synchronizable(
res_ctx->pipe_ctx[pipe_cnt].stream,
res_ctx->pipe_ctx[i].stream)) {
synchronized_vblank = false;
@@ -1897,7 +1943,7 @@ int dcn20_populate_dml_pipes_from_context(
break;
case PIXEL_ENCODING_YCBCR420:
pipes[pipe_cnt].dout.output_format = dm_420;
- pipes[pipe_cnt].dout.output_bpp = (output_bpc * 3) / 2;
+ pipes[pipe_cnt].dout.output_bpp = (output_bpc * 3.0) / 2;
break;
case PIXEL_ENCODING_YCBCR422:
if (true) /* todo */
@@ -1911,6 +1957,11 @@ int dcn20_populate_dml_pipes_from_context(
pipes[pipe_cnt].dout.output_bpp = output_bpc * 3;
}
+#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
+ if (res_ctx->pipe_ctx[i].stream->timing.flags.DSC)
+ pipes[pipe_cnt].dout.output_bpp = res_ctx->pipe_ctx[i].stream->timing.dsc_cfg.bits_per_pixel / 16.0;
+#endif
+
/* todo: default max for now, until there is logic reflecting this in dc*/
pipes[pipe_cnt].dout.output_bpc = 12;
/*
@@ -2132,7 +2183,7 @@ void dcn20_set_mcif_arb_params(
}
#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
-static bool dcn20_validate_dsc(struct dc *dc, struct dc_state *new_ctx)
+bool dcn20_validate_dsc(struct dc *dc, struct dc_state *new_ctx)
{
int i;
@@ -2167,7 +2218,7 @@ static bool dcn20_validate_dsc(struct dc *dc, struct dc_state *new_ctx)
}
#endif
-static struct pipe_ctx *dcn20_find_secondary_pipe(struct dc *dc,
+struct pipe_ctx *dcn20_find_secondary_pipe(struct dc *dc,
struct resource_context *res_ctx,
const struct resource_pool *pool,
const struct pipe_ctx *primary_pipe)
@@ -2207,7 +2258,8 @@ static struct pipe_ctx *dcn20_find_secondary_pipe(struct dc *dc,
*/
if (secondary_pipe == NULL) {
for (j = dc->res_pool->pipe_count - 1; j >= 0; j--) {
- if (dc->current_state->res_ctx.pipe_ctx[j].top_pipe == NULL) {
+ if (dc->current_state->res_ctx.pipe_ctx[j].top_pipe == NULL
+ && dc->current_state->res_ctx.pipe_ctx[j].prev_odm_pipe == NULL) {
preferred_pipe_idx = j;
if (res_ctx->pipe_ctx[preferred_pipe_idx].stream == NULL) {
@@ -2243,29 +2295,11 @@ static struct pipe_ctx *dcn20_find_secondary_pipe(struct dc *dc,
return secondary_pipe;
}
-bool dcn20_fast_validate_bw(
+void dcn20_merge_pipes_for_validate(
struct dc *dc,
- struct dc_state *context,
- display_e2e_pipe_params_st *pipes,
- int *pipe_cnt_out,
- int *pipe_split_from,
- int *vlevel_out)
+ struct dc_state *context)
{
- bool out = false;
-
- int pipe_cnt, i, pipe_idx, vlevel, vlevel_unsplit;
- bool odm_capable = context->bw_ctx.dml.ip.odm_capable;
- bool force_split = false;
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
- bool failed_non_odm_dsc = false;
-#endif
- int split_threshold = dc->res_pool->pipe_count / 2;
- bool avoid_split = dc->debug.pipe_split_policy != MPC_SPLIT_DYNAMIC;
-
-
- ASSERT(pipes);
- if (!pipes)
- return false;
+ int i;
/* merge previously split odm pipes since mode support needs to make the decision */
for (i = 0; i < dc->res_pool->pipe_count; i++) {
@@ -2320,51 +2354,19 @@ bool dcn20_fast_validate_bw(
if (pipe->plane_state)
resource_build_scaling_params(pipe);
}
+}
- if (dc->res_pool->funcs->populate_dml_pipes)
- pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc,
- &context->res_ctx, pipes);
- else
- pipe_cnt = dcn20_populate_dml_pipes_from_context(dc,
- &context->res_ctx, pipes);
-
- *pipe_cnt_out = pipe_cnt;
-
- if (!pipe_cnt) {
- out = true;
- goto validate_out;
- }
-
- context->bw_ctx.dml.ip.odm_capable = 0;
-
- vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt);
-
- context->bw_ctx.dml.ip.odm_capable = odm_capable;
-
-#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
- /* 1 dsc per stream dsc validation */
- if (vlevel <= context->bw_ctx.dml.soc.num_states)
- if (!dcn20_validate_dsc(dc, context)) {
- failed_non_odm_dsc = true;
- vlevel = context->bw_ctx.dml.soc.num_states + 1;
- }
-#endif
-
- if (vlevel > context->bw_ctx.dml.soc.num_states && odm_capable)
- vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt);
-
- if (vlevel > context->bw_ctx.dml.soc.num_states)
- goto validate_fail;
-
- if ((context->stream_count > split_threshold && dc->current_state->stream_count <= split_threshold)
- || (context->stream_count <= split_threshold && dc->current_state->stream_count > split_threshold))
- context->commit_hints.full_update_needed = true;
-
- /*initialize pipe_just_split_from to invalid idx*/
- for (i = 0; i < MAX_PIPES; i++)
- pipe_split_from[i] = -1;
+int dcn20_validate_apply_pipe_split_flags(
+ struct dc *dc,
+ struct dc_state *context,
+ int vlevel,
+ bool *split)
+{
+ int i, pipe_idx, vlevel_split;
+ bool force_split = false;
+ bool avoid_split = dc->debug.pipe_split_policy != MPC_SPLIT_DYNAMIC;
- /* Single display only conditionals get set here */
+ /* Single display loop, exits if there is more than one display */
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
bool exit_loop = false;
@@ -2391,38 +2393,107 @@ bool dcn20_fast_validate_bw(
if (exit_loop)
break;
}
-
- if (context->stream_count > split_threshold)
+ /* TODO: fix dc bugs and remove this split threshold thing */
+ if (context->stream_count > dc->res_pool->pipe_count / 2)
avoid_split = true;
- vlevel_unsplit = vlevel;
+ /* Avoid split loop looks for lowest voltage level that allows most unsplit pipes possible */
+ if (avoid_split) {
+ for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
+ if (!context->res_ctx.pipe_ctx[i].stream)
+ continue;
+
+ for (vlevel_split = vlevel; vlevel <= context->bw_ctx.dml.soc.num_states; vlevel++)
+ if (context->bw_ctx.dml.vba.NoOfDPP[vlevel][0][pipe_idx] == 1)
+ break;
+ /* Impossible to not split this pipe */
+ if (vlevel > context->bw_ctx.dml.soc.num_states)
+ vlevel = vlevel_split;
+ pipe_idx++;
+ }
+ context->bw_ctx.dml.vba.maxMpcComb = 0;
+ }
+
+ /* Split loop sets which pipe should be split based on dml outputs and dc flags */
for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
if (!context->res_ctx.pipe_ctx[i].stream)
continue;
- for (; vlevel_unsplit <= context->bw_ctx.dml.soc.num_states; vlevel_unsplit++)
- if (context->bw_ctx.dml.vba.NoOfDPP[vlevel_unsplit][0][pipe_idx] == 1)
- break;
+
+ if (force_split || context->bw_ctx.dml.vba.NoOfDPP[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] > 1)
+ split[i] = true;
+ if ((pipe->stream->view_format ==
+ VIEW_3D_FORMAT_SIDE_BY_SIDE ||
+ pipe->stream->view_format ==
+ VIEW_3D_FORMAT_TOP_AND_BOTTOM) &&
+ (pipe->stream->timing.timing_3d_format ==
+ TIMING_3D_FORMAT_TOP_AND_BOTTOM ||
+ pipe->stream->timing.timing_3d_format ==
+ TIMING_3D_FORMAT_SIDE_BY_SIDE))
+ split[i] = true;
+ if (dc->debug.force_odm_combine & (1 << pipe->stream_res.tg->inst)) {
+ split[i] = true;
+ context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_idx] = true;
+ }
+ context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx] =
+ context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_idx];
+ /* Adjust dppclk when split is forced, do not bother with dispclk */
+ if (split[i] && context->bw_ctx.dml.vba.NoOfDPP[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] == 1)
+ context->bw_ctx.dml.vba.RequiredDPPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] /= 2;
pipe_idx++;
}
+ return vlevel;
+}
+
+bool dcn20_fast_validate_bw(
+ struct dc *dc,
+ struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ int *pipe_cnt_out,
+ int *pipe_split_from,
+ int *vlevel_out)
+{
+ bool out = false;
+ bool split[MAX_PIPES] = { false };
+ int pipe_cnt, i, pipe_idx, vlevel;
+
+ ASSERT(pipes);
+ if (!pipes)
+ return false;
+
+ dcn20_merge_pipes_for_validate(dc, context);
+
+ pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, &context->res_ctx, pipes);
+
+ *pipe_cnt_out = pipe_cnt;
+
+ if (!pipe_cnt) {
+ out = true;
+ goto validate_out;
+ }
+
+ vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt);
+
+ if (vlevel > context->bw_ctx.dml.soc.num_states)
+ goto validate_fail;
+
+ vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, vlevel, split);
+
+ /*initialize pipe_just_split_from to invalid idx*/
+ for (i = 0; i < MAX_PIPES; i++)
+ pipe_split_from[i] = -1;
+
for (i = 0, pipe_idx = -1; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
struct pipe_ctx *hsplit_pipe = pipe->bottom_pipe;
- bool need_split = true;
- bool need_split3d;
if (!pipe->stream || pipe_split_from[i] >= 0)
continue;
pipe_idx++;
- if (dc->debug.force_odm_combine & (1 << pipe->stream_res.tg->inst)) {
- force_split = true;
- context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx] = true;
- context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_idx] = true;
- }
- if (force_split && context->bw_ctx.dml.vba.NoOfDPP[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] == 1)
- context->bw_ctx.dml.vba.RequiredDPPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] /= 2;
if (!pipe->top_pipe && !pipe->plane_state && context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx]) {
hsplit_pipe = dcn20_find_secondary_pipe(dc, &context->res_ctx, dc->res_pool, pipe);
ASSERT(hsplit_pipe);
@@ -2440,40 +2511,26 @@ bool dcn20_fast_validate_bw(
if (pipe->top_pipe && pipe->plane_state == pipe->top_pipe->plane_state)
continue;
- need_split3d = ((pipe->stream->view_format ==
- VIEW_3D_FORMAT_SIDE_BY_SIDE ||
- pipe->stream->view_format ==
- VIEW_3D_FORMAT_TOP_AND_BOTTOM) &&
- (pipe->stream->timing.timing_3d_format ==
- TIMING_3D_FORMAT_TOP_AND_BOTTOM ||
- pipe->stream->timing.timing_3d_format ==
- TIMING_3D_FORMAT_SIDE_BY_SIDE));
-
- if (avoid_split && vlevel_unsplit <= context->bw_ctx.dml.soc.num_states && !force_split && !need_split3d) {
- need_split = false;
- vlevel = vlevel_unsplit;
- context->bw_ctx.dml.vba.maxMpcComb = 0;
- } else
- need_split = context->bw_ctx.dml.vba.NoOfDPP[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] == 2;
-
/* We do not support mpo + odm at the moment */
if (hsplit_pipe && hsplit_pipe->plane_state != pipe->plane_state
&& context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx])
goto validate_fail;
- if (need_split3d || need_split || force_split) {
+ if (split[i]) {
if (!hsplit_pipe || hsplit_pipe->plane_state != pipe->plane_state) {
/* pipe not split previously needs split */
hsplit_pipe = dcn20_find_secondary_pipe(dc, &context->res_ctx, dc->res_pool, pipe);
- ASSERT(hsplit_pipe || force_split);
- if (!hsplit_pipe)
+ ASSERT(hsplit_pipe);
+ if (!hsplit_pipe) {
+ context->bw_ctx.dml.vba.RequiredDPPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] *= 2;
continue;
-
+ }
if (context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx]) {
if (!dcn20_split_stream_for_odm(
&context->res_ctx, dc->res_pool,
pipe, hsplit_pipe))
goto validate_fail;
+ dcn20_build_mapped_resource(dc, context, pipe->stream);
} else
dcn20_split_stream_for_mpc(
&context->res_ctx, dc->res_pool,
@@ -2487,7 +2544,7 @@ bool dcn20_fast_validate_bw(
}
#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
/* Actual dsc count per stream dsc validation*/
- if (failed_non_odm_dsc && !dcn20_validate_dsc(dc, context)) {
+ if (!dcn20_validate_dsc(dc, context)) {
context->bw_ctx.dml.vba.ValidationStatus[context->bw_ctx.dml.vba.soc.num_states] =
DML_FAIL_DSC_VALIDATION_FAILURE;
goto validate_fail;
@@ -2506,7 +2563,7 @@ validate_out:
return out;
}
-void dcn20_calculate_wm(
+static void dcn20_calculate_wm(
struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
int *out_pipe_cnt,
@@ -2527,7 +2584,7 @@ void dcn20_calculate_wm(
context->bw_ctx.dml.vba.RequiredDPPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx];
if (context->bw_ctx.dml.vba.BlendingAndTiming[pipe_idx] == pipe_idx)
pipes[pipe_cnt].pipe.dest.odm_combine =
- context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_idx];
+ context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx];
else
pipes[pipe_cnt].pipe.dest.odm_combine = 0;
pipe_idx++;
@@ -2536,7 +2593,7 @@ void dcn20_calculate_wm(
context->bw_ctx.dml.vba.RequiredDPPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_split_from[i]];
if (context->bw_ctx.dml.vba.BlendingAndTiming[pipe_split_from[i]] == pipe_split_from[i])
pipes[pipe_cnt].pipe.dest.odm_combine =
- context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_split_from[i]];
+ context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_split_from[i]];
else
pipes[pipe_cnt].pipe.dest.odm_combine = 0;
}
@@ -2579,6 +2636,11 @@ void dcn20_calculate_wm(
context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.b.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
+ context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.b.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+#endif
if (vlevel < 2) {
pipes[0].clks_cfg.voltage = 2;
@@ -2590,6 +2652,10 @@ void dcn20_calculate_wm(
context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.c.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
+ context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+#endif
if (vlevel < 3) {
pipes[0].clks_cfg.voltage = 3;
@@ -2601,6 +2667,10 @@ void dcn20_calculate_wm(
context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.d.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
+ context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+#endif
pipes[0].clks_cfg.voltage = vlevel;
pipes[0].clks_cfg.dcfclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].dcfclk_mhz;
@@ -2610,6 +2680,10 @@ void dcn20_calculate_wm(
context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.a.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
+ context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+#endif
}
void dcn20_calculate_dlg_params(
@@ -2629,7 +2703,7 @@ void dcn20_calculate_dlg_params(
context->bw_ctx.bw.dcn.clk.socclk_khz = context->bw_ctx.dml.vba.SOCCLK * 1000;
context->bw_ctx.bw.dcn.clk.dramclk_khz = context->bw_ctx.dml.vba.DRAMSpeed * 1000 / 16;
context->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz = context->bw_ctx.dml.vba.DCFCLKDeepSleep * 1000;
- context->bw_ctx.bw.dcn.clk.fclk_khz = 0;
+ context->bw_ctx.bw.dcn.clk.fclk_khz = context->bw_ctx.dml.vba.FabricClock * 1000;
context->bw_ctx.bw.dcn.clk.p_state_change_support =
context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb]
!= dm_dram_clock_change_unsupported;
@@ -2645,8 +2719,8 @@ void dcn20_calculate_dlg_params(
continue;
if (!visited[pipe_idx]) {
- display_pipe_source_params_st *src = &pipes[pipe_idx_unsplit].pipe.src;
- display_pipe_dest_params_st *dst = &pipes[pipe_idx_unsplit].pipe.dest;
+ display_pipe_source_params_st *src = &pipes[pipe_idx].pipe.src;
+ display_pipe_dest_params_st *dst = &pipes[pipe_idx].pipe.dest;
dst->vstartup_start = context->bw_ctx.dml.vba.VStartup[pipe_idx_unsplit];
dst->vupdate_offset = context->bw_ctx.dml.vba.VUpdateOffsetPix[pipe_idx_unsplit];
@@ -2806,7 +2880,6 @@ bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context,
ASSERT(false);
restore_dml_state:
- memcpy(&context->bw_ctx.dml, &dc->dml, sizeof(struct display_mode_lib));
context->bw_ctx.dml.soc.dram_clock_change_latency_us = p_state_latency_us;
return voltage_supported;
@@ -2892,6 +2965,7 @@ static struct resource_funcs dcn20_res_pool_funcs = {
.populate_dml_writeback_from_context = dcn20_populate_dml_writeback_from_context,
.get_default_swizzle_mode = dcn20_get_default_swizzle_mode,
.set_mcif_arb_params = dcn20_set_mcif_arb_params,
+ .populate_dml_pipes = dcn20_populate_dml_pipes_from_context,
.find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link
};
@@ -2900,8 +2974,6 @@ bool dcn20_dwbc_create(struct dc_context *ctx, struct resource_pool *pool)
int i;
uint32_t pipe_count = pool->res_cap->num_dwb;
- ASSERT(pipe_count > 0);
-
for (i = 0; i < pipe_count; i++) {
struct dcn20_dwbc *dwbc20 = kzalloc(sizeof(struct dcn20_dwbc),
GFP_KERNEL);
@@ -2947,7 +3019,7 @@ bool dcn20_mmhubbub_create(struct dc_context *ctx, struct resource_pool *pool)
return true;
}
-struct pp_smu_funcs *dcn20_pp_smu_create(struct dc_context *ctx)
+static struct pp_smu_funcs *dcn20_pp_smu_create(struct dc_context *ctx)
{
struct pp_smu_funcs *pp_smu = kzalloc(sizeof(*pp_smu), GFP_KERNEL);
@@ -2962,7 +3034,7 @@ struct pp_smu_funcs *dcn20_pp_smu_create(struct dc_context *ctx)
return pp_smu;
}
-void dcn20_pp_smu_destroy(struct pp_smu_funcs **pp_smu)
+static void dcn20_pp_smu_destroy(struct pp_smu_funcs **pp_smu)
{
if (pp_smu && *pp_smu) {
kfree(*pp_smu);
@@ -2970,7 +3042,7 @@ void dcn20_pp_smu_destroy(struct pp_smu_funcs **pp_smu)
}
}
-static void cap_soc_clocks(
+void dcn20_cap_soc_clocks(
struct _vcs_dpi_soc_bounding_box_st *bb,
struct pp_smu_nv_clock_table max_clocks)
{
@@ -3037,10 +3109,10 @@ static void cap_soc_clocks(
}
}
-static void update_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb,
+void dcn20_update_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb,
struct pp_smu_nv_clock_table *max_clocks, unsigned int *uclk_states, unsigned int num_states)
{
- struct _vcs_dpi_voltage_scaling_st calculated_states[MAX_CLOCK_LIMIT_STATES] = {0};
+ struct _vcs_dpi_voltage_scaling_st calculated_states[MAX_CLOCK_LIMIT_STATES];
int i;
int num_calculated_states = 0;
int min_dcfclk = 0;
@@ -3048,12 +3120,18 @@ static void update_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_
if (num_states == 0)
return;
+ memset(calculated_states, 0, sizeof(calculated_states));
+
if (dc->bb_overrides.min_dcfclk_mhz > 0)
min_dcfclk = dc->bb_overrides.min_dcfclk_mhz;
- else
- // Accounting for SOC/DCF relationship, we can go as high as
- // 506Mhz in Vmin. We need to code 507 since SMU will round down to 506.
- min_dcfclk = 507;
+ else {
+ if (ASICREV_IS_NAVI12_P(dc->ctx->asic_id.hw_internal_rev))
+ min_dcfclk = 310;
+ else
+ // Accounting for SOC/DCF relationship, we can go as high as
+ // 506Mhz in Vmin.
+ min_dcfclk = 506;
+ }
for (i = 0; i < num_states; i++) {
int min_fclk_required_by_uclk;
@@ -3093,7 +3171,7 @@ static void update_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_
bb->clock_limits[num_calculated_states].state = bb->num_states;
}
-static void patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb)
+void dcn20_patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb)
{
kernel_fpu_begin();
if ((int)(bb->sr_exit_time_us * 1000) != dc->bb_overrides.sr_exit_time_ns
@@ -3292,14 +3370,14 @@ static bool init_soc_bounding_box(struct dc *dc,
}
if (clock_limits_available && uclk_states_available && num_states)
- update_bounding_box(dc, loaded_bb, &max_clocks, uclk_states, num_states);
+ dcn20_update_bounding_box(dc, loaded_bb, &max_clocks, uclk_states, num_states);
else if (clock_limits_available)
- cap_soc_clocks(loaded_bb, max_clocks);
+ dcn20_cap_soc_clocks(loaded_bb, max_clocks);
}
loaded_ip->max_num_otg = pool->base.res_cap->num_timing_generator;
loaded_ip->max_num_dpp = pool->base.pipe_count;
- patch_bounding_box(dc, loaded_bb);
+ dcn20_patch_bounding_box(dc, loaded_bb);
return true;
}
@@ -3345,6 +3423,7 @@ static bool construct(
dc->caps.post_blend_color_processing = true;
dc->caps.force_dp_tps4_for_cp2520 = true;
dc->caps.hw_3d_lut = true;
+ dc->caps.extended_aux_timeout_support = true;
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) {
dc->debug = debug_defaults_drv;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h
index 44f95aa0d61e..fef473d68a4a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h
@@ -95,9 +95,12 @@ struct display_stream_compressor *dcn20_dsc_create(
struct dc_context *ctx, uint32_t inst);
void dcn20_dsc_destroy(struct display_stream_compressor **dsc);
-struct pp_smu_funcs *dcn20_pp_smu_create(struct dc_context *ctx);
-void dcn20_pp_smu_destroy(struct pp_smu_funcs **pp_smu);
-
+void dcn20_patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb);
+void dcn20_cap_soc_clocks(
+ struct _vcs_dpi_soc_bounding_box_st *bb,
+ struct pp_smu_nv_clock_table max_clocks);
+void dcn20_update_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb,
+ struct pp_smu_nv_clock_table *max_clocks, unsigned int *uclk_states, unsigned int num_states);
struct hubp *dcn20_hubp_create(
struct dc_context *ctx,
uint32_t inst);
@@ -116,6 +119,31 @@ void dcn20_set_mcif_arb_params(
display_e2e_pipe_params_st *pipes,
int pipe_cnt);
bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context, bool fast_validate);
+void dcn20_merge_pipes_for_validate(
+ struct dc *dc,
+ struct dc_state *context);
+int dcn20_validate_apply_pipe_split_flags(
+ struct dc *dc,
+ struct dc_state *context,
+ int vlevel,
+ bool *split);
+#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
+bool dcn20_validate_dsc(struct dc *dc, struct dc_state *new_ctx);
+#endif
+void dcn20_split_stream_for_mpc(
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool,
+ struct pipe_ctx *primary_pipe,
+ struct pipe_ctx *secondary_pipe);
+bool dcn20_split_stream_for_odm(
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool,
+ struct pipe_ctx *prev_odm_pipe,
+ struct pipe_ctx *next_odm_pipe);
+struct pipe_ctx *dcn20_find_secondary_pipe(struct dc *dc,
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool,
+ const struct pipe_ctx *primary_pipe);
bool dcn20_fast_validate_bw(
struct dc *dc,
struct dc_state *context,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c
index 5ab9d6240498..4b3401616434 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c
@@ -578,6 +578,10 @@ static const struct stream_encoder_funcs dcn20_str_enc_funcs = {
.set_avmute = enc1_stream_encoder_set_avmute,
.dig_connect_to_otg = enc1_dig_connect_to_otg,
.dig_source_otg = enc1_dig_source_otg,
+
+ .dp_get_pixel_format =
+ enc1_stream_encoder_dp_get_pixel_format,
+
#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
.enc_read_state = enc2_read_state,
.dp_set_dsc_config = enc2_dp_set_dsc_config,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/Makefile b/drivers/gpu/drm/amd/display/dc/dcn21/Makefile
index ff50ae71fe27..14113ccf498d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/Makefile
@@ -1,7 +1,7 @@
#
# Makefile for DCN21.
-DCN21 = dcn21_hubp.o dcn21_hubbub.o dcn21_resource.o
+DCN21 = dcn21_hubp.o dcn21_hubbub.o dcn21_resource.o dcn21_hwseq.o dcn21_link_encoder.o
CFLAGS_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o := -mhard-float -msse
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c
index d1266741763b..f546260c15b7 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c
@@ -22,6 +22,7 @@
* Authors: AMD
*
*/
+#include <linux/delay.h>
#include "dm_services.h"
#include "dcn20/dcn20_hubbub.h"
#include "dcn21_hubbub.h"
@@ -51,7 +52,7 @@
#ifdef NUM_VMID
#undef NUM_VMID
#endif
-#define NUM_VMID 1
+#define NUM_VMID 16
static uint32_t convert_and_clamp(
uint32_t wm_ns,
@@ -71,56 +72,76 @@ static uint32_t convert_and_clamp(
void dcn21_dchvm_init(struct hubbub *hubbub)
{
struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
+ uint32_t riommu_active;
+ int i;
//Init DCHVM block
REG_UPDATE(DCHVM_CTRL0, HOSTVM_INIT_REQ, 1);
//Poll until RIOMMU_ACTIVE = 1
- //TODO: Figure out interval us and retry count
- REG_WAIT(DCHVM_RIOMMU_STAT0, RIOMMU_ACTIVE, 1, 5, 100);
+ for (i = 0; i < 100; i++) {
+ REG_GET(DCHVM_RIOMMU_STAT0, RIOMMU_ACTIVE, &riommu_active);
- //Reflect the power status of DCHUBBUB
- REG_UPDATE(DCHVM_RIOMMU_CTRL0, HOSTVM_POWERSTATUS, 1);
+ if (riommu_active)
+ break;
+ else
+ udelay(5);
+ }
+
+ if (riommu_active) {
+ //Reflect the power status of DCHUBBUB
+ REG_UPDATE(DCHVM_RIOMMU_CTRL0, HOSTVM_POWERSTATUS, 1);
- //Start rIOMMU prefetching
- REG_UPDATE(DCHVM_RIOMMU_CTRL0, HOSTVM_PREFETCH_REQ, 1);
+ //Start rIOMMU prefetching
+ REG_UPDATE(DCHVM_RIOMMU_CTRL0, HOSTVM_PREFETCH_REQ, 1);
- // Enable dynamic clock gating
- REG_UPDATE_4(DCHVM_CLK_CTRL,
- HVM_DISPCLK_R_GATE_DIS, 0,
- HVM_DISPCLK_G_GATE_DIS, 0,
- HVM_DCFCLK_R_GATE_DIS, 0,
- HVM_DCFCLK_G_GATE_DIS, 0);
+ // Enable dynamic clock gating
+ REG_UPDATE_4(DCHVM_CLK_CTRL,
+ HVM_DISPCLK_R_GATE_DIS, 0,
+ HVM_DISPCLK_G_GATE_DIS, 0,
+ HVM_DCFCLK_R_GATE_DIS, 0,
+ HVM_DCFCLK_G_GATE_DIS, 0);
- //Poll until HOSTVM_PREFETCH_DONE = 1
- //TODO: Figure out interval us and retry count
- REG_WAIT(DCHVM_RIOMMU_STAT0, HOSTVM_PREFETCH_DONE, 1, 5, 100);
+ //Poll until HOSTVM_PREFETCH_DONE = 1
+ REG_WAIT(DCHVM_RIOMMU_STAT0, HOSTVM_PREFETCH_DONE, 1, 5, 100);
+ }
}
-static int hubbub21_init_dchub(struct hubbub *hubbub,
+int hubbub21_init_dchub(struct hubbub *hubbub,
struct dcn_hubbub_phys_addr_config *pa_config)
{
struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
+ struct dcn_vmid_page_table_config phys_config;
REG_SET(DCN_VM_FB_LOCATION_BASE, 0,
- FB_BASE, pa_config->system_aperture.fb_base);
+ FB_BASE, pa_config->system_aperture.fb_base >> 24);
REG_SET(DCN_VM_FB_LOCATION_TOP, 0,
- FB_TOP, pa_config->system_aperture.fb_top);
+ FB_TOP, pa_config->system_aperture.fb_top >> 24);
REG_SET(DCN_VM_FB_OFFSET, 0,
- FB_OFFSET, pa_config->system_aperture.fb_offset);
+ FB_OFFSET, pa_config->system_aperture.fb_offset >> 24);
REG_SET(DCN_VM_AGP_BOT, 0,
- AGP_BOT, pa_config->system_aperture.agp_bot);
+ AGP_BOT, pa_config->system_aperture.agp_bot >> 24);
REG_SET(DCN_VM_AGP_TOP, 0,
- AGP_TOP, pa_config->system_aperture.agp_top);
+ AGP_TOP, pa_config->system_aperture.agp_top >> 24);
REG_SET(DCN_VM_AGP_BASE, 0,
- AGP_BASE, pa_config->system_aperture.agp_base);
+ AGP_BASE, pa_config->system_aperture.agp_base >> 24);
+
+ if (pa_config->gart_config.page_table_start_addr != pa_config->gart_config.page_table_end_addr) {
+ phys_config.page_table_start_addr = pa_config->gart_config.page_table_start_addr >> 12;
+ phys_config.page_table_end_addr = pa_config->gart_config.page_table_end_addr >> 12;
+ phys_config.page_table_base_addr = pa_config->gart_config.page_table_base_addr | 1; //Note: hack
+ phys_config.depth = 0;
+ phys_config.block_size = 0;
+ // Init VMID 0 based on PA config
+ dcn20_vmid_setup(&hubbub1->vmid[0], &phys_config);
+ }
dcn21_dchvm_init(hubbub);
return NUM_VMID;
}
-static void hubbub21_program_urgent_watermarks(
+void hubbub21_program_urgent_watermarks(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
@@ -160,6 +181,13 @@ static void hubbub21_program_urgent_watermarks(
REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, 0,
DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, watermarks->a.frac_urg_bw_nom);
}
+ if (safe_to_lower || watermarks->a.urgent_latency_ns > hubbub1->watermarks.a.urgent_latency_ns) {
+ hubbub1->watermarks.a.urgent_latency_ns = watermarks->a.urgent_latency_ns;
+ prog_wm_value = convert_and_clamp(watermarks->a.urgent_latency_ns,
+ refclk_mhz, 0x1fffff);
+ REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, 0,
+ DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, prog_wm_value);
+ }
/* clock state B */
if (safe_to_lower || watermarks->b.urgent_ns > hubbub1->watermarks.b.urgent_ns) {
@@ -192,6 +220,14 @@ static void hubbub21_program_urgent_watermarks(
DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, watermarks->a.frac_urg_bw_nom);
}
+ if (safe_to_lower || watermarks->b.urgent_latency_ns > hubbub1->watermarks.b.urgent_latency_ns) {
+ hubbub1->watermarks.b.urgent_latency_ns = watermarks->b.urgent_latency_ns;
+ prog_wm_value = convert_and_clamp(watermarks->b.urgent_latency_ns,
+ refclk_mhz, 0x1fffff);
+ REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, 0,
+ DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, prog_wm_value);
+ }
+
/* clock state C */
if (safe_to_lower || watermarks->c.urgent_ns > hubbub1->watermarks.c.urgent_ns) {
hubbub1->watermarks.c.urgent_ns = watermarks->c.urgent_ns;
@@ -223,6 +259,14 @@ static void hubbub21_program_urgent_watermarks(
DCHUBBUB_ARB_FRAC_URG_BW_NOM_C, watermarks->a.frac_urg_bw_nom);
}
+ if (safe_to_lower || watermarks->c.urgent_latency_ns > hubbub1->watermarks.c.urgent_latency_ns) {
+ hubbub1->watermarks.c.urgent_latency_ns = watermarks->c.urgent_latency_ns;
+ prog_wm_value = convert_and_clamp(watermarks->c.urgent_latency_ns,
+ refclk_mhz, 0x1fffff);
+ REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, 0,
+ DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, prog_wm_value);
+ }
+
/* clock state D */
if (safe_to_lower || watermarks->d.urgent_ns > hubbub1->watermarks.d.urgent_ns) {
hubbub1->watermarks.d.urgent_ns = watermarks->d.urgent_ns;
@@ -253,9 +297,17 @@ static void hubbub21_program_urgent_watermarks(
REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, 0,
DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, watermarks->a.frac_urg_bw_nom);
}
+
+ if (safe_to_lower || watermarks->d.urgent_latency_ns > hubbub1->watermarks.d.urgent_latency_ns) {
+ hubbub1->watermarks.d.urgent_latency_ns = watermarks->d.urgent_latency_ns;
+ prog_wm_value = convert_and_clamp(watermarks->d.urgent_latency_ns,
+ refclk_mhz, 0x1fffff);
+ REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, 0,
+ DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, prog_wm_value);
+ }
}
-static void hubbub21_program_stutter_watermarks(
+void hubbub21_program_stutter_watermarks(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
@@ -389,7 +441,7 @@ static void hubbub21_program_stutter_watermarks(
}
}
-static void hubbub21_program_pstate_watermarks(
+void hubbub21_program_pstate_watermarks(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
@@ -564,17 +616,26 @@ void hubbub21_wm_read_state(struct hubbub *hubbub,
DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, &s->dram_clk_chanage);
}
+void hubbub21_apply_DEDCN21_147_wa(struct hubbub *hubbub)
+{
+ struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
+ uint32_t prog_wm_value;
+
+ prog_wm_value = REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A);
+ REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value);
+}
static const struct hubbub_funcs hubbub21_funcs = {
.update_dchub = hubbub2_update_dchub,
.init_dchub_sys_ctx = hubbub21_init_dchub,
- .init_vm_ctx = NULL,
+ .init_vm_ctx = hubbub2_init_vm_ctx,
.dcc_support_swizzle = hubbub2_dcc_support_swizzle,
.dcc_support_pixel_format = hubbub2_dcc_support_pixel_format,
.get_dcc_compression_cap = hubbub2_get_dcc_compression_cap,
.wm_read_state = hubbub21_wm_read_state,
.get_dchub_ref_freq = hubbub2_get_dchub_ref_freq,
.program_watermarks = hubbub21_program_watermarks,
+ .apply_DEDCN21_147_wa = hubbub21_apply_DEDCN21_147_wa,
};
void hubbub21_construct(struct dcn20_hubbub *hubbub,
@@ -592,4 +653,5 @@ void hubbub21_construct(struct dcn20_hubbub *hubbub,
hubbub->masks = hubbub_mask;
hubbub->debug_test_index_pstate = 0xB;
+ hubbub->detile_buf_size = 164 * 1024; /* 164KB for DCN2.0 */
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.h
index 6ff3cdb89178..c4840dfb1fa5 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.h
@@ -36,6 +36,10 @@
SR(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B),\
SR(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C),\
SR(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D),\
+ SR(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A),\
+ SR(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B),\
+ SR(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C),\
+ SR(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D),\
SR(DCHUBBUB_ARB_HOSTVM_CNTL), \
SR(DCHVM_CTRL0), \
SR(DCHVM_MEM_CTRL), \
@@ -44,16 +48,9 @@
SR(DCHVM_RIOMMU_STAT0)
#define HUBBUB_REG_LIST_DCN21()\
- HUBBUB_REG_LIST_DCN_COMMON(), \
+ HUBBUB_REG_LIST_DCN20_COMMON(), \
HUBBUB_SR_WATERMARK_REG_LIST(), \
- HUBBUB_HVM_REG_LIST(), \
- SR(DCHUBBUB_CRC_CTRL), \
- SR(DCN_VM_FB_LOCATION_BASE),\
- SR(DCN_VM_FB_LOCATION_TOP),\
- SR(DCN_VM_FB_OFFSET),\
- SR(DCN_VM_AGP_BOT),\
- SR(DCN_VM_AGP_TOP),\
- SR(DCN_VM_AGP_BASE)
+ HUBBUB_HVM_REG_LIST()
#define HUBBUB_MASK_SH_LIST_HVM(mask_sh) \
HUBBUB_SF(DCHUBBUB_ARB_DF_REQ_OUTSTAND, DCHUBBUB_ARB_MIN_REQ_OUTSTAND_COMMIT_THRESHOLD, mask_sh), \
@@ -102,7 +99,7 @@
HUBBUB_SF(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, mask_sh)
#define HUBBUB_MASK_SH_LIST_DCN21(mask_sh)\
- HUBBUB_MASK_SH_LIST_HVM(mask_sh),\
+ HUBBUB_MASK_SH_LIST_HVM(mask_sh), \
HUBBUB_MASK_SH_LIST_DCN_COMMON(mask_sh), \
HUBBUB_MASK_SH_LIST_STUTTER(mask_sh), \
HUBBUB_SF(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, mask_sh), \
@@ -114,11 +111,28 @@
HUBBUB_SF(DCN_VM_AGP_BASE, AGP_BASE, mask_sh)
void dcn21_dchvm_init(struct hubbub *hubbub);
+int hubbub21_init_dchub(struct hubbub *hubbub,
+ struct dcn_hubbub_phys_addr_config *pa_config);
void hubbub21_program_watermarks(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower);
+void hubbub21_program_urgent_watermarks(
+ struct hubbub *hubbub,
+ struct dcn_watermark_set *watermarks,
+ unsigned int refclk_mhz,
+ bool safe_to_lower);
+void hubbub21_program_stutter_watermarks(
+ struct hubbub *hubbub,
+ struct dcn_watermark_set *watermarks,
+ unsigned int refclk_mhz,
+ bool safe_to_lower);
+void hubbub21_program_pstate_watermarks(
+ struct hubbub *hubbub,
+ struct dcn_watermark_set *watermarks,
+ unsigned int refclk_mhz,
+ bool safe_to_lower);
void hubbub21_wm_read_state(struct hubbub *hubbub,
struct dcn_hubbub_wm *wm);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c
index a00af513aa2b..2f5a5867e674 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c
@@ -22,6 +22,8 @@
* Authors: AMD
*
*/
+
+#include "dcn10/dcn10_hubp.h"
#include "dcn21_hubp.h"
#include "dm_services.h"
@@ -202,7 +204,7 @@ static struct hubp_funcs dcn21_hubp_funcs = {
.hubp_enable_tripleBuffer = hubp2_enable_triplebuffer,
.hubp_is_triplebuffer_enabled = hubp2_is_triplebuffer_enabled,
.hubp_program_surface_flip_and_addr = hubp2_program_surface_flip_and_addr,
- .hubp_program_surface_config = hubp2_program_surface_config,
+ .hubp_program_surface_config = hubp1_program_surface_config,
.hubp_is_flip_pending = hubp1_is_flip_pending,
.hubp_setup = hubp21_setup,
.hubp_setup_interdependent = hubp2_setup_interdependent,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c
new file mode 100644
index 000000000000..b25215cadf85
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c
@@ -0,0 +1,122 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "dm_helpers.h"
+#include "core_types.h"
+#include "resource.h"
+#include "dce/dce_hwseq.h"
+#include "dcn20/dcn20_hwseq.h"
+#include "vmid.h"
+#include "reg_helper.h"
+#include "hw/clk_mgr.h"
+
+
+#define DC_LOGGER_INIT(logger)
+
+#define CTX \
+ hws->ctx
+#define REG(reg)\
+ hws->regs->reg
+
+#undef FN
+#define FN(reg_name, field_name) \
+ hws->shifts->field_name, hws->masks->field_name
+
+/* Temporary read settings, future will get values from kmd directly */
+static void mmhub_update_page_table_config(struct dcn_hubbub_phys_addr_config *config,
+ struct dce_hwseq *hws)
+{
+ uint32_t page_table_base_hi;
+ uint32_t page_table_base_lo;
+
+ REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
+ PAGE_DIRECTORY_ENTRY_HI32, &page_table_base_hi);
+ REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
+ PAGE_DIRECTORY_ENTRY_LO32, &page_table_base_lo);
+
+ config->gart_config.page_table_base_addr = ((uint64_t)page_table_base_hi << 32) | page_table_base_lo;
+
+}
+
+static int dcn21_init_sys_ctx(struct dce_hwseq *hws, struct dc *dc, struct dc_phy_addr_space_config *pa_config)
+{
+ struct dcn_hubbub_phys_addr_config config;
+
+ config.system_aperture.fb_top = pa_config->system_aperture.fb_top;
+ config.system_aperture.fb_offset = pa_config->system_aperture.fb_offset;
+ config.system_aperture.fb_base = pa_config->system_aperture.fb_base;
+ config.system_aperture.agp_top = pa_config->system_aperture.agp_top;
+ config.system_aperture.agp_bot = pa_config->system_aperture.agp_bot;
+ config.system_aperture.agp_base = pa_config->system_aperture.agp_base;
+ config.gart_config.page_table_start_addr = pa_config->gart_config.page_table_start_addr;
+ config.gart_config.page_table_end_addr = pa_config->gart_config.page_table_end_addr;
+ config.gart_config.page_table_base_addr = pa_config->gart_config.page_table_base_addr;
+
+ mmhub_update_page_table_config(&config, hws);
+
+ return dc->res_pool->hubbub->funcs->init_dchub_sys_ctx(dc->res_pool->hubbub, &config);
+}
+
+// work around for Renoir s0i3, if register is programmed, bypass golden init.
+
+static bool dcn21_s0i3_golden_init_wa(struct dc *dc)
+{
+ struct dce_hwseq *hws = dc->hwseq;
+ uint32_t value = 0;
+
+ value = REG_READ(MICROSECOND_TIME_BASE_DIV);
+
+ return value != 0x00120464;
+}
+
+void dcn21_exit_optimized_pwr_state(
+ const struct dc *dc,
+ struct dc_state *context)
+{
+ dc->clk_mgr->funcs->update_clocks(
+ dc->clk_mgr,
+ context,
+ false);
+}
+
+void dcn21_optimize_pwr_state(
+ const struct dc *dc,
+ struct dc_state *context)
+{
+ dc->clk_mgr->funcs->update_clocks(
+ dc->clk_mgr,
+ context,
+ true);
+}
+
+void dcn21_hw_sequencer_construct(struct dc *dc)
+{
+ dcn20_hw_sequencer_construct(dc);
+ dc->hwss.init_sys_ctx = dcn21_init_sys_ctx;
+ dc->hwss.s0i3_golden_init_wa = dcn21_s0i3_golden_init_wa;
+ dc->hwss.optimize_pwr_state = dcn21_optimize_pwr_state;
+ dc->hwss.exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.h
new file mode 100644
index 000000000000..be67b62e6fb1
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.h
@@ -0,0 +1,33 @@
+/*
+* Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_HWSS_DCN21_H__
+#define __DC_HWSS_DCN21_H__
+
+struct dc;
+
+void dcn21_hw_sequencer_construct(struct dc *dc);
+
+#endif /* __DC_HWSS_DCN21_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c
new file mode 100644
index 000000000000..e8a504ca5890
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c
@@ -0,0 +1,470 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "reg_helper.h"
+
+#include <linux/delay.h>
+#include "core_types.h"
+#include "link_encoder.h"
+#include "dcn21_link_encoder.h"
+#include "stream_encoder.h"
+
+#include "i2caux_interface.h"
+#include "dc_bios_types.h"
+
+#include "gpio_service_interface.h"
+
+#define CTX \
+ enc10->base.ctx
+#define DC_LOGGER \
+ enc10->base.ctx->logger
+
+#define REG(reg)\
+ (enc10->link_regs->reg)
+
+#undef FN
+#define FN(reg_name, field_name) \
+ enc10->link_shift->field_name, enc10->link_mask->field_name
+
+#define IND_REG(index) \
+ (enc10->link_regs->index)
+
+static struct mpll_cfg dcn21_mpll_cfg_ref[] = {
+ // RBR
+ {
+ .hdmimode_enable = 0,
+ .ref_range = 1,
+ .ref_clk_mpllb_div = 1,
+ .mpllb_ssc_en = 1,
+ .mpllb_div5_clk_en = 1,
+ .mpllb_multiplier = 238,
+ .mpllb_fracn_en = 0,
+ .mpllb_fracn_quot = 0,
+ .mpllb_fracn_rem = 0,
+ .mpllb_fracn_den = 1,
+ .mpllb_ssc_up_spread = 0,
+ .mpllb_ssc_peak = 44237,
+ .mpllb_ssc_stepsize = 59454,
+ .mpllb_div_clk_en = 0,
+ .mpllb_div_multiplier = 0,
+ .mpllb_hdmi_div = 0,
+ .mpllb_tx_clk_div = 2,
+ .tx_vboost_lvl = 5,
+ .mpllb_pmix_en = 1,
+ .mpllb_word_div2_en = 0,
+ .mpllb_ana_v2i = 2,
+ .mpllb_ana_freq_vco = 2,
+ .mpllb_ana_cp_int = 9,
+ .mpllb_ana_cp_prop = 15,
+ .hdmi_pixel_clk_div = 0,
+ },
+ // HBR
+ {
+ .hdmimode_enable = 0,
+ .ref_range = 1,
+ .ref_clk_mpllb_div = 1,
+ .mpllb_ssc_en = 1,
+ .mpllb_div5_clk_en = 1,
+ .mpllb_multiplier = 192,
+ .mpllb_fracn_en = 1,
+ .mpllb_fracn_quot = 32768,
+ .mpllb_fracn_rem = 0,
+ .mpllb_fracn_den = 1,
+ .mpllb_ssc_up_spread = 0,
+ .mpllb_ssc_peak = 36864,
+ .mpllb_ssc_stepsize = 49545,
+ .mpllb_div_clk_en = 0,
+ .mpllb_div_multiplier = 0,
+ .mpllb_hdmi_div = 0,
+ .mpllb_tx_clk_div = 1,
+ .tx_vboost_lvl = 5,
+ .mpllb_pmix_en = 1,
+ .mpllb_word_div2_en = 0,
+ .mpllb_ana_v2i = 2,
+ .mpllb_ana_freq_vco = 3,
+ .mpllb_ana_cp_int = 9,
+ .mpllb_ana_cp_prop = 15,
+ .hdmi_pixel_clk_div = 0,
+ },
+ //HBR2
+ {
+ .hdmimode_enable = 0,
+ .ref_range = 1,
+ .ref_clk_mpllb_div = 1,
+ .mpllb_ssc_en = 1,
+ .mpllb_div5_clk_en = 1,
+ .mpllb_multiplier = 192,
+ .mpllb_fracn_en = 1,
+ .mpllb_fracn_quot = 32768,
+ .mpllb_fracn_rem = 0,
+ .mpllb_fracn_den = 1,
+ .mpllb_ssc_up_spread = 0,
+ .mpllb_ssc_peak = 36864,
+ .mpllb_ssc_stepsize = 49545,
+ .mpllb_div_clk_en = 0,
+ .mpllb_div_multiplier = 0,
+ .mpllb_hdmi_div = 0,
+ .mpllb_tx_clk_div = 0,
+ .tx_vboost_lvl = 5,
+ .mpllb_pmix_en = 1,
+ .mpllb_word_div2_en = 0,
+ .mpllb_ana_v2i = 2,
+ .mpllb_ana_freq_vco = 3,
+ .mpllb_ana_cp_int = 9,
+ .mpllb_ana_cp_prop = 15,
+ .hdmi_pixel_clk_div = 0,
+ },
+ //HBR3
+ {
+ .hdmimode_enable = 0,
+ .ref_range = 1,
+ .ref_clk_mpllb_div = 1,
+ .mpllb_ssc_en = 1,
+ .mpllb_div5_clk_en = 1,
+ .mpllb_multiplier = 304,
+ .mpllb_fracn_en = 1,
+ .mpllb_fracn_quot = 49152,
+ .mpllb_fracn_rem = 0,
+ .mpllb_fracn_den = 1,
+ .mpllb_ssc_up_spread = 0,
+ .mpllb_ssc_peak = 55296,
+ .mpllb_ssc_stepsize = 74318,
+ .mpllb_div_clk_en = 0,
+ .mpllb_div_multiplier = 0,
+ .mpllb_hdmi_div = 0,
+ .mpllb_tx_clk_div = 0,
+ .tx_vboost_lvl = 5,
+ .mpllb_pmix_en = 1,
+ .mpllb_word_div2_en = 0,
+ .mpllb_ana_v2i = 2,
+ .mpllb_ana_freq_vco = 1,
+ .mpllb_ana_cp_int = 7,
+ .mpllb_ana_cp_prop = 16,
+ .hdmi_pixel_clk_div = 0,
+ },
+};
+
+
+static bool update_cfg_data(
+ struct dcn10_link_encoder *enc10,
+ const struct dc_link_settings *link_settings,
+ struct dpcssys_phy_seq_cfg *cfg)
+{
+ int i;
+
+ cfg->load_sram_fw = false;
+ cfg->use_calibration_setting = true;
+
+ //TODO: need to implement a proper lane mapping for Renoir.
+ for (i = 0; i < 4; i++)
+ cfg->lane_en[i] = true;
+
+ switch (link_settings->link_rate) {
+ case LINK_RATE_LOW:
+ cfg->mpll_cfg = dcn21_mpll_cfg_ref[0];
+ break;
+ case LINK_RATE_HIGH:
+ cfg->mpll_cfg = dcn21_mpll_cfg_ref[1];
+ break;
+ case LINK_RATE_HIGH2:
+ cfg->mpll_cfg = dcn21_mpll_cfg_ref[2];
+ break;
+ case LINK_RATE_HIGH3:
+ cfg->mpll_cfg = dcn21_mpll_cfg_ref[3];
+ break;
+ default:
+ DC_LOG_ERROR("%s: No supported link rate found %X!\n",
+ __func__, link_settings->link_rate);
+ return false;
+ }
+
+ return true;
+}
+
+void dcn21_link_encoder_get_max_link_cap(struct link_encoder *enc,
+ struct dc_link_settings *link_settings)
+{
+ struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+ uint32_t value;
+
+ REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, &value);
+
+ if (!value && link_settings->lane_count > LANE_COUNT_TWO)
+ link_settings->lane_count = LANE_COUNT_TWO;
+}
+
+bool dcn21_link_encoder_is_in_alt_mode(struct link_encoder *enc)
+{
+ struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+ uint32_t value;
+
+ REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, &value);
+
+ // if value == 1 alt mode is disabled, otherwise it is enabled
+ return !value;
+}
+
+bool dcn21_link_encoder_acquire_phy(struct link_encoder *enc)
+{
+ struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+ int value;
+
+ if (enc->features.flags.bits.DP_IS_USB_C) {
+ REG_GET(RDPCSTX_PHY_CNTL6,
+ RDPCS_PHY_DPALT_DISABLE, &value);
+
+ if (value == 1) {
+ ASSERT(0);
+ return false;
+ }
+ REG_UPDATE(RDPCSTX_PHY_CNTL6,
+ RDPCS_PHY_DPALT_DISABLE_ACK, 0);
+
+ udelay(40);
+
+ REG_GET(RDPCSTX_PHY_CNTL6,
+ RDPCS_PHY_DPALT_DISABLE, &value);
+ if (value == 1) {
+ ASSERT(0);
+ REG_UPDATE(RDPCSTX_PHY_CNTL6,
+ RDPCS_PHY_DPALT_DISABLE_ACK, 1);
+ return false;
+ }
+ }
+
+ REG_UPDATE(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_REF_CLK_EN, 1);
+
+ return true;
+}
+
+
+
+static void dcn21_link_encoder_release_phy(struct link_encoder *enc)
+{
+ struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+
+ if (enc->features.flags.bits.DP_IS_USB_C) {
+ REG_UPDATE(RDPCSTX_PHY_CNTL6,
+ RDPCS_PHY_DPALT_DISABLE_ACK, 1);
+ }
+
+ REG_UPDATE(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_REF_CLK_EN, 0);
+
+}
+
+void dcn21_link_encoder_enable_dp_output(
+ struct link_encoder *enc,
+ const struct dc_link_settings *link_settings,
+ enum clock_source_id clock_source)
+{
+ struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+ struct dcn21_link_encoder *enc21 = (struct dcn21_link_encoder *) enc10;
+ struct dpcssys_phy_seq_cfg *cfg = &enc21->phy_seq_cfg;
+
+ if (!dcn21_link_encoder_acquire_phy(enc))
+ return;
+
+ if (!enc->ctx->dc->debug.avoid_vbios_exec_table) {
+ dcn10_link_encoder_enable_dp_output(enc, link_settings, clock_source);
+ return;
+ }
+
+ if (!update_cfg_data(enc10, link_settings, cfg))
+ return;
+
+ enc1_configure_encoder(enc10, link_settings);
+
+ dcn10_link_encoder_setup(enc, SIGNAL_TYPE_DISPLAY_PORT);
+
+}
+
+void dcn21_link_encoder_enable_dp_mst_output(
+ struct link_encoder *enc,
+ const struct dc_link_settings *link_settings,
+ enum clock_source_id clock_source)
+{
+ if (!dcn21_link_encoder_acquire_phy(enc))
+ return;
+
+ dcn10_link_encoder_enable_dp_mst_output(enc, link_settings, clock_source);
+}
+
+void dcn21_link_encoder_disable_output(
+ struct link_encoder *enc,
+ enum signal_type signal)
+{
+ dcn10_link_encoder_disable_output(enc, signal);
+
+ if (dc_is_dp_signal(signal))
+ dcn21_link_encoder_release_phy(enc);
+}
+
+
+static const struct link_encoder_funcs dcn21_link_enc_funcs = {
+#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
+ .read_state = link_enc2_read_state,
+#endif
+ .validate_output_with_stream =
+ dcn10_link_encoder_validate_output_with_stream,
+ .hw_init = enc2_hw_init,
+ .setup = dcn10_link_encoder_setup,
+ .enable_tmds_output = dcn10_link_encoder_enable_tmds_output,
+ .enable_dp_output = dcn21_link_encoder_enable_dp_output,
+ .enable_dp_mst_output = dcn21_link_encoder_enable_dp_mst_output,
+ .disable_output = dcn21_link_encoder_disable_output,
+ .dp_set_lane_settings = dcn10_link_encoder_dp_set_lane_settings,
+ .dp_set_phy_pattern = dcn10_link_encoder_dp_set_phy_pattern,
+ .update_mst_stream_allocation_table =
+ dcn10_link_encoder_update_mst_stream_allocation_table,
+ .psr_program_dp_dphy_fast_training =
+ dcn10_psr_program_dp_dphy_fast_training,
+ .psr_program_secondary_packet = dcn10_psr_program_secondary_packet,
+ .connect_dig_be_to_fe = dcn10_link_encoder_connect_dig_be_to_fe,
+ .enable_hpd = dcn10_link_encoder_enable_hpd,
+ .disable_hpd = dcn10_link_encoder_disable_hpd,
+ .is_dig_enabled = dcn10_is_dig_enabled,
+ .destroy = dcn10_link_encoder_destroy,
+ .fec_set_enable = enc2_fec_set_enable,
+ .fec_set_ready = enc2_fec_set_ready,
+ .fec_is_active = enc2_fec_is_active,
+ .get_dig_frontend = dcn10_get_dig_frontend,
+ .is_in_alt_mode = dcn21_link_encoder_is_in_alt_mode,
+ .get_max_link_cap = dcn21_link_encoder_get_max_link_cap,
+};
+
+void dcn21_link_encoder_construct(
+ struct dcn21_link_encoder *enc21,
+ const struct encoder_init_data *init_data,
+ const struct encoder_feature_support *enc_features,
+ const struct dcn10_link_enc_registers *link_regs,
+ const struct dcn10_link_enc_aux_registers *aux_regs,
+ const struct dcn10_link_enc_hpd_registers *hpd_regs,
+ const struct dcn10_link_enc_shift *link_shift,
+ const struct dcn10_link_enc_mask *link_mask)
+{
+ struct bp_encoder_cap_info bp_cap_info = {0};
+ const struct dc_vbios_funcs *bp_funcs = init_data->ctx->dc_bios->funcs;
+ enum bp_result result = BP_RESULT_OK;
+ struct dcn10_link_encoder *enc10 = &enc21->enc10;
+
+ enc10->base.funcs = &dcn21_link_enc_funcs;
+ enc10->base.ctx = init_data->ctx;
+ enc10->base.id = init_data->encoder;
+
+ enc10->base.hpd_source = init_data->hpd_source;
+ enc10->base.connector = init_data->connector;
+
+ enc10->base.preferred_engine = ENGINE_ID_UNKNOWN;
+
+ enc10->base.features = *enc_features;
+
+ enc10->base.transmitter = init_data->transmitter;
+
+ /* set the flag to indicate whether driver poll the I2C data pin
+ * while doing the DP sink detect
+ */
+
+/* if (dal_adapter_service_is_feature_supported(as,
+ FEATURE_DP_SINK_DETECT_POLL_DATA_PIN))
+ enc10->base.features.flags.bits.
+ DP_SINK_DETECT_POLL_DATA_PIN = true;*/
+
+ enc10->base.output_signals =
+ SIGNAL_TYPE_DVI_SINGLE_LINK |
+ SIGNAL_TYPE_DVI_DUAL_LINK |
+ SIGNAL_TYPE_LVDS |
+ SIGNAL_TYPE_DISPLAY_PORT |
+ SIGNAL_TYPE_DISPLAY_PORT_MST |
+ SIGNAL_TYPE_EDP |
+ SIGNAL_TYPE_HDMI_TYPE_A;
+
+ /* For DCE 8.0 and 8.1, by design, UNIPHY is hardwired to DIG_BE.
+ * SW always assign DIG_FE 1:1 mapped to DIG_FE for non-MST UNIPHY.
+ * SW assign DIG_FE to non-MST UNIPHY first and MST last. So prefer
+ * DIG is per UNIPHY and used by SST DP, eDP, HDMI, DVI and LVDS.
+ * Prefer DIG assignment is decided by board design.
+ * For DCE 8.0, there are only max 6 UNIPHYs, we assume board design
+ * and VBIOS will filter out 7 UNIPHY for DCE 8.0.
+ * By this, adding DIGG should not hurt DCE 8.0.
+ * This will let DCE 8.1 share DCE 8.0 as much as possible
+ */
+
+ enc10->link_regs = link_regs;
+ enc10->aux_regs = aux_regs;
+ enc10->hpd_regs = hpd_regs;
+ enc10->link_shift = link_shift;
+ enc10->link_mask = link_mask;
+
+ switch (enc10->base.transmitter) {
+ case TRANSMITTER_UNIPHY_A:
+ enc10->base.preferred_engine = ENGINE_ID_DIGA;
+ break;
+ case TRANSMITTER_UNIPHY_B:
+ enc10->base.preferred_engine = ENGINE_ID_DIGB;
+ break;
+ case TRANSMITTER_UNIPHY_C:
+ enc10->base.preferred_engine = ENGINE_ID_DIGC;
+ break;
+ case TRANSMITTER_UNIPHY_D:
+ enc10->base.preferred_engine = ENGINE_ID_DIGD;
+ break;
+ case TRANSMITTER_UNIPHY_E:
+ enc10->base.preferred_engine = ENGINE_ID_DIGE;
+ break;
+ case TRANSMITTER_UNIPHY_F:
+ enc10->base.preferred_engine = ENGINE_ID_DIGF;
+ break;
+ case TRANSMITTER_UNIPHY_G:
+ enc10->base.preferred_engine = ENGINE_ID_DIGG;
+ break;
+ default:
+ ASSERT_CRITICAL(false);
+ enc10->base.preferred_engine = ENGINE_ID_UNKNOWN;
+ }
+
+ /* default to one to mirror Windows behavior */
+ enc10->base.features.flags.bits.HDMI_6GB_EN = 1;
+
+ result = bp_funcs->get_encoder_cap_info(enc10->base.ctx->dc_bios,
+ enc10->base.id, &bp_cap_info);
+
+ /* Override features with DCE-specific values */
+ if (result == BP_RESULT_OK) {
+ enc10->base.features.flags.bits.IS_HBR2_CAPABLE =
+ bp_cap_info.DP_HBR2_EN;
+ enc10->base.features.flags.bits.IS_HBR3_CAPABLE =
+ bp_cap_info.DP_HBR3_EN;
+ enc10->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN;
+ enc10->base.features.flags.bits.DP_IS_USB_C =
+ bp_cap_info.DP_IS_USB_C;
+ } else {
+ DC_LOG_WARNING("%s: Failed to get encoder_cap_info from VBIOS with error code %d!\n",
+ __func__,
+ result);
+ }
+ if (enc10->base.ctx->dc->debug.hdmi20_disable) {
+ enc10->base.features.flags.bits.HDMI_6GB_EN = 0;
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.h
new file mode 100644
index 000000000000..1d7a1a51f13d
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_LINK_ENCODER__DCN21_H__
+#define __DC_LINK_ENCODER__DCN21_H__
+
+#include "dcn20/dcn20_link_encoder.h"
+
+struct dcn21_link_encoder {
+ struct dcn10_link_encoder enc10;
+ struct dpcssys_phy_seq_cfg phy_seq_cfg;
+};
+
+#define LINK_ENCODER_MASK_SH_LIST_DCN21(mask_sh)\
+ LINK_ENCODER_MASK_SH_LIST_DCN20(mask_sh),\
+ LE_SF(UNIPHYA_CHANNEL_XBAR_CNTL, UNIPHY_CHANNEL0_XBAR_SOURCE, mask_sh),\
+ LE_SF(UNIPHYA_CHANNEL_XBAR_CNTL, UNIPHY_CHANNEL1_XBAR_SOURCE, mask_sh),\
+ LE_SF(UNIPHYA_CHANNEL_XBAR_CNTL, UNIPHY_CHANNEL2_XBAR_SOURCE, mask_sh),\
+ LE_SF(UNIPHYA_CHANNEL_XBAR_CNTL, UNIPHY_CHANNEL3_XBAR_SOURCE, mask_sh), \
+ SRI(RDPCSTX_PHY_FUSE2, RDPCSTX, id), \
+ SRI(RDPCSTX_PHY_FUSE3, RDPCSTX, id), \
+ SR(RDPCSTX0_RDPCSTX_SCRATCH)
+
+void dcn21_link_encoder_enable_dp_output(
+ struct link_encoder *enc,
+ const struct dc_link_settings *link_settings,
+ enum clock_source_id clock_source);
+
+void dcn21_link_encoder_construct(
+ struct dcn21_link_encoder *enc21,
+ const struct encoder_init_data *init_data,
+ const struct encoder_feature_support *enc_features,
+ const struct dcn10_link_enc_registers *link_regs,
+ const struct dcn10_link_enc_aux_registers *aux_regs,
+ const struct dcn10_link_enc_hpd_registers *hpd_regs,
+ const struct dcn10_link_enc_shift *link_shift,
+ const struct dcn10_link_enc_mask *link_mask);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
index de182185fe1f..459bd9a5caed 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
@@ -23,8 +23,6 @@
*
*/
-#include <linux/slab.h>
-
#include "dm_services.h"
#include "dc.h"
@@ -42,11 +40,11 @@
#include "irq/dcn21/irq_service_dcn21.h"
#include "dcn20/dcn20_dpp.h"
#include "dcn20/dcn20_optc.h"
-#include "dcn20/dcn20_hwseq.h"
+#include "dcn21/dcn21_hwseq.h"
#include "dce110/dce110_hw_sequencer.h"
#include "dcn20/dcn20_opp.h"
#include "dcn20/dcn20_dsc.h"
-#include "dcn20/dcn20_link_encoder.h"
+#include "dcn21/dcn21_link_encoder.h"
#include "dcn20/dcn20_stream_encoder.h"
#include "dce/dce_clock_source.h"
#include "dce/dce_audio.h"
@@ -84,8 +82,9 @@
struct _vcs_dpi_ip_params_st dcn2_1_ip = {
- .gpuvm_enable = 0,
- .hostvm_enable = 0,
+ .odm_capable = 1,
+ .gpuvm_enable = 1,
+ .hostvm_enable = 1,
.gpuvm_max_page_table_levels = 1,
.hostvm_max_page_table_levels = 4,
.hostvm_cached_page_table_levels = 2,
@@ -205,11 +204,11 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = {
.state = 4,
.dcfclk_mhz = 810.0,
.fabricclk_mhz = 1600.0,
- .dispclk_mhz = 1015.0,
- .dppclk_mhz = 1015.0,
- .phyclk_mhz = 810.0,
+ .dispclk_mhz = 1395.0,
+ .dppclk_mhz = 1285.0,
+ .phyclk_mhz = 1325.0,
.socclk_mhz = 953.0,
- .dscclk_mhz = 318.334,
+ .dscclk_mhz = 489.0,
.dram_speed_mts = 4266.0,
},
/*Extra state, no dispclk ramping*/
@@ -217,18 +216,18 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = {
.state = 5,
.dcfclk_mhz = 810.0,
.fabricclk_mhz = 1600.0,
- .dispclk_mhz = 1015.0,
- .dppclk_mhz = 1015.0,
- .phyclk_mhz = 810.0,
+ .dispclk_mhz = 1395.0,
+ .dppclk_mhz = 1285.0,
+ .phyclk_mhz = 1325.0,
.socclk_mhz = 953.0,
- .dscclk_mhz = 318.334,
+ .dscclk_mhz = 489.0,
.dram_speed_mts = 4266.0,
},
},
- .sr_exit_time_us = 9.0,
- .sr_enter_plus_exit_time_us = 11.0,
+ .sr_exit_time_us = 12.5,
+ .sr_enter_plus_exit_time_us = 17.0,
.urgent_latency_us = 4.0,
.urgent_latency_pixel_data_only_us = 4.0,
.urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
@@ -350,6 +349,30 @@ static const struct bios_registers bios_regs = {
NBIO_SR(BIOS_SCRATCH_6)
};
+static const struct dce_dmcu_registers dmcu_regs = {
+ DMCU_DCN10_REG_LIST()
+};
+
+static const struct dce_dmcu_shift dmcu_shift = {
+ DMCU_MASK_SH_LIST_DCN10(__SHIFT)
+};
+
+static const struct dce_dmcu_mask dmcu_mask = {
+ DMCU_MASK_SH_LIST_DCN10(_MASK)
+};
+
+static const struct dce_abm_registers abm_regs = {
+ ABM_DCN20_REG_LIST()
+};
+
+static const struct dce_abm_shift abm_shift = {
+ ABM_MASK_SH_LIST_DCN20(__SHIFT)
+};
+
+static const struct dce_abm_mask abm_mask = {
+ ABM_MASK_SH_LIST_DCN20(_MASK)
+};
+
#ifdef CONFIG_DRM_AMD_DC_DMUB
static const struct dcn21_dmcub_registers dmcub_regs = {
DMCUB_REG_LIST_DCN()
@@ -628,6 +651,14 @@ static const struct dcn10_stream_enc_registers stream_enc_regs[] = {
stream_enc_regs(4),
};
+static const struct dce110_aux_registers_shift aux_shift = {
+ DCN_AUX_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce110_aux_registers_mask aux_mask = {
+ DCN_AUX_MASK_SH_LIST(_MASK)
+};
+
static const struct dcn10_stream_encoder_shift se_shift = {
SE_COMMON_MASK_SH_LIST_DCN20(__SHIFT)
};
@@ -636,6 +667,11 @@ static const struct dcn10_stream_encoder_mask se_mask = {
SE_COMMON_MASK_SH_LIST_DCN20(_MASK)
};
+static void dcn21_pp_smu_destroy(struct pp_smu_funcs **pp_smu);
+
+static int dcn21_populate_dml_pipes_from_context(
+ struct dc *dc, struct resource_context *res_ctx, display_e2e_pipe_params_st *pipes);
+
static struct input_pixel_processor *dcn21_ipp_create(
struct dc_context *ctx, uint32_t inst)
{
@@ -683,7 +719,10 @@ static struct dce_aux *dcn21_aux_engine_create(
dce110_aux_engine_construct(aux_engine, ctx, inst,
SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
- &aux_engine_regs[inst]);
+ &aux_engine_regs[inst],
+ &aux_mask,
+ &aux_shift,
+ ctx->dc->caps.extended_aux_timeout_support);
return &aux_engine->base;
}
@@ -726,11 +765,12 @@ static const struct resource_caps res_cap_rn = {
.num_timing_generator = 4,
.num_opp = 4,
.num_video_plane = 4,
- .num_audio = 6, // 6 audio endpoints. 4 audio streams
+ .num_audio = 4, // 4 audio endpoints. 4 audio streams
.num_stream_encoder = 5,
.num_pll = 5, // maybe 3 because the last two used for USB-c
.num_dwb = 1,
.num_ddc = 5,
+ .num_vmid = 1,
#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
.num_dsc = 3,
#endif
@@ -796,15 +836,15 @@ static const struct dc_debug_options debug_defaults_drv = {
.clock_trace = true,
.disable_pplib_clock_request = true,
.pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP,
- .force_single_disp_pipe_split = true,
+ .force_single_disp_pipe_split = false,
.disable_dcc = DCC_ENABLE,
.vsr_support = true,
.performance_trace = false,
- .max_downscale_src_width = 5120,/*upto 5K*/
+ .max_downscale_src_width = 3840,
.disable_pplib_wm_range = false,
.scl_reset_length10 = true,
.sanity_checks = true,
- .disable_48mhz_pwrdwn = true,
+ .disable_48mhz_pwrdwn = false,
};
static const struct dc_debug_options debug_defaults_diags = {
@@ -939,7 +979,7 @@ static void destruct(struct dcn21_resource_pool *pool)
dcn_dccg_destroy(&pool->base.dccg);
if (pool->base.pp_smu != NULL)
- dcn20_pp_smu_destroy(&pool->base.pp_smu);
+ dcn21_pp_smu_destroy(&pool->base.pp_smu);
}
@@ -969,11 +1009,35 @@ static void calculate_wm_set_for_vlevel(
#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
wm_set->frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(dml, pipes, pipe_cnt) * 1000;
wm_set->frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(dml, pipes, pipe_cnt) * 1000;
+ wm_set->urgent_latency_ns = get_urgent_latency(dml, pipes, pipe_cnt) * 1000;
#endif
dml->soc.dram_clock_change_latency_us = dram_clock_change_latency_cached;
}
+static void patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb)
+{
+ kernel_fpu_begin();
+ if (dc->bb_overrides.sr_exit_time_ns) {
+ bb->sr_exit_time_us = dc->bb_overrides.sr_exit_time_ns / 1000.0;
+ }
+
+ if (dc->bb_overrides.sr_enter_plus_exit_time_ns) {
+ bb->sr_enter_plus_exit_time_us =
+ dc->bb_overrides.sr_enter_plus_exit_time_ns / 1000.0;
+ }
+
+ if (dc->bb_overrides.urgent_latency_ns) {
+ bb->urgent_latency_us = dc->bb_overrides.urgent_latency_ns / 1000.0;
+ }
+
+ if (dc->bb_overrides.dram_clock_change_latency_ns) {
+ bb->dram_clock_change_latency_us =
+ dc->bb_overrides.dram_clock_change_latency_ns / 1000.0;
+ }
+ kernel_fpu_end();
+}
+
void dcn21_calculate_wm(
struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
@@ -988,6 +1052,8 @@ void dcn21_calculate_wm(
ASSERT(bw_params);
+ patch_bounding_box(dc, &context->bw_ctx.dml.soc);
+
for (i = 0, pipe_idx = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
if (!context->res_ctx.pipe_ctx[i].stream)
continue;
@@ -1021,7 +1087,7 @@ void dcn21_calculate_wm(
pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc,
&context->res_ctx, pipes);
else
- pipe_cnt = dcn20_populate_dml_pipes_from_context(dc,
+ pipe_cnt = dcn21_populate_dml_pipes_from_context(dc,
&context->res_ctx, pipes);
}
@@ -1271,6 +1337,12 @@ struct display_stream_compressor *dcn21_dsc_create(
static void update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
{
+ /*
+ TODO: Fix this function to calcualte correct values.
+ There are known issues with this function currently
+ that will need to be investigated. Use hardcoded known good values for now.
+
+
struct dcn21_resource_pool *pool = TO_DCN21_RES_POOL(dc->res_pool);
struct clk_limit_table *clk_table = &bw_params->clk_table;
int i;
@@ -1278,7 +1350,6 @@ static void update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param
dcn2_1_ip.max_num_otg = pool->base.res_cap->num_timing_generator;
dcn2_1_ip.max_num_dpp = pool->base.pipe_count;
dcn2_1_soc.num_chans = bw_params->num_channels;
- dcn2_1_soc.num_states = 0;
for (i = 0; i < clk_table->num_entries; i++) {
@@ -1286,10 +1357,11 @@ static void update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param
dcn2_1_soc.clock_limits[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz;
dcn2_1_soc.clock_limits[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz;
dcn2_1_soc.clock_limits[i].socclk_mhz = clk_table->entries[i].socclk_mhz;
- /* This is probably wrong, TODO: find correct calculation */
dcn2_1_soc.clock_limits[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 16 / 1000;
- dcn2_1_soc.num_states++;
}
+ dcn2_1_soc.clock_limits[i] = dcn2_1_soc.clock_limits[i - i];
+ dcn2_1_soc.num_states = i;
+ */
}
/* Temporary Place holder until we can get them from fuse */
@@ -1317,32 +1389,42 @@ static struct dpm_clocks dummy_clocks = {
};
-enum pp_smu_status dummy_set_wm_ranges(struct pp_smu *pp,
+static enum pp_smu_status dummy_set_wm_ranges(struct pp_smu *pp,
struct pp_smu_wm_range_sets *ranges)
{
return PP_SMU_RESULT_OK;
}
-enum pp_smu_status dummy_get_dpm_clock_table(struct pp_smu *pp,
+static enum pp_smu_status dummy_get_dpm_clock_table(struct pp_smu *pp,
struct dpm_clocks *clock_table)
{
*clock_table = dummy_clocks;
return PP_SMU_RESULT_OK;
}
-struct pp_smu_funcs *dcn21_pp_smu_create(struct dc_context *ctx)
+static struct pp_smu_funcs *dcn21_pp_smu_create(struct dc_context *ctx)
{
struct pp_smu_funcs *pp_smu = kzalloc(sizeof(*pp_smu), GFP_KERNEL);
- pp_smu->ctx.ver = PP_SMU_VER_RN;
+ if (!pp_smu)
+ return pp_smu;
+
+ if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment) || IS_DIAG_DC(ctx->dce_environment)) {
+ pp_smu->ctx.ver = PP_SMU_VER_RN;
+ pp_smu->rn_funcs.get_dpm_clock_table = dummy_get_dpm_clock_table;
+ pp_smu->rn_funcs.set_wm_ranges = dummy_set_wm_ranges;
+ } else {
- pp_smu->rn_funcs.get_dpm_clock_table = dummy_get_dpm_clock_table;
- pp_smu->rn_funcs.set_wm_ranges = dummy_set_wm_ranges;
+ dm_pp_get_funcs(ctx, pp_smu);
+
+ if (pp_smu->ctx.ver != PP_SMU_VER_RN)
+ pp_smu = memset(pp_smu, 0, sizeof(struct pp_smu_funcs));
+ }
return pp_smu;
}
-void dcn21_pp_smu_destroy(struct pp_smu_funcs **pp_smu)
+static void dcn21_pp_smu_destroy(struct pp_smu_funcs **pp_smu)
{
if (pp_smu && *pp_smu) {
kfree(*pp_smu);
@@ -1400,6 +1482,7 @@ static struct dce_hwseq *dcn21_hwseq_create(
hws->regs = &hwseq_reg;
hws->shifts = &hwseq_shift;
hws->masks = &hwseq_mask;
+ hws->wa.DEGVIDCN21 = true;
}
return hws;
}
@@ -1418,10 +1501,152 @@ static const struct resource_create_funcs res_create_maximus_funcs = {
.create_hwseq = dcn21_hwseq_create,
};
+static const struct encoder_feature_support link_enc_feature = {
+ .max_hdmi_deep_color = COLOR_DEPTH_121212,
+ .max_hdmi_pixel_clock = 600000,
+ .hdmi_ycbcr420_supported = true,
+ .dp_ycbcr420_supported = true,
+ .flags.bits.IS_HBR2_CAPABLE = true,
+ .flags.bits.IS_HBR3_CAPABLE = true,
+ .flags.bits.IS_TPS3_CAPABLE = true,
+ .flags.bits.IS_TPS4_CAPABLE = true
+};
+
+
+#define link_regs(id, phyid)\
+[id] = {\
+ LE_DCN10_REG_LIST(id), \
+ UNIPHY_DCN2_REG_LIST(phyid), \
+ SRI(DP_DPHY_INTERNAL_CTRL, DP, id) \
+}
+
+static const struct dcn10_link_enc_registers link_enc_regs[] = {
+ link_regs(0, A),
+ link_regs(1, B),
+ link_regs(2, C),
+ link_regs(3, D),
+ link_regs(4, E),
+};
+
+#define aux_regs(id)\
+[id] = {\
+ DCN2_AUX_REG_LIST(id)\
+}
+
+static const struct dcn10_link_enc_aux_registers link_enc_aux_regs[] = {
+ aux_regs(0),
+ aux_regs(1),
+ aux_regs(2),
+ aux_regs(3),
+ aux_regs(4)
+};
+
+#define hpd_regs(id)\
+[id] = {\
+ HPD_REG_LIST(id)\
+}
+
+static const struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[] = {
+ hpd_regs(0),
+ hpd_regs(1),
+ hpd_regs(2),
+ hpd_regs(3),
+ hpd_regs(4)
+};
+
+static const struct dcn10_link_enc_shift le_shift = {
+ LINK_ENCODER_MASK_SH_LIST_DCN20(__SHIFT)
+};
+
+static const struct dcn10_link_enc_mask le_mask = {
+ LINK_ENCODER_MASK_SH_LIST_DCN20(_MASK)
+};
+
+static int map_transmitter_id_to_phy_instance(
+ enum transmitter transmitter)
+{
+ switch (transmitter) {
+ case TRANSMITTER_UNIPHY_A:
+ return 0;
+ break;
+ case TRANSMITTER_UNIPHY_B:
+ return 1;
+ break;
+ case TRANSMITTER_UNIPHY_C:
+ return 2;
+ break;
+ case TRANSMITTER_UNIPHY_D:
+ return 3;
+ break;
+ case TRANSMITTER_UNIPHY_E:
+ return 4;
+ break;
+ default:
+ ASSERT(0);
+ return 0;
+ }
+}
+
+static struct link_encoder *dcn21_link_encoder_create(
+ const struct encoder_init_data *enc_init_data)
+{
+ struct dcn21_link_encoder *enc21 =
+ kzalloc(sizeof(struct dcn21_link_encoder), GFP_KERNEL);
+ int link_regs_id;
+
+ if (!enc21)
+ return NULL;
+
+ link_regs_id =
+ map_transmitter_id_to_phy_instance(enc_init_data->transmitter);
+
+ dcn21_link_encoder_construct(enc21,
+ enc_init_data,
+ &link_enc_feature,
+ &link_enc_regs[link_regs_id],
+ &link_enc_aux_regs[enc_init_data->channel - 1],
+ &link_enc_hpd_regs[enc_init_data->hpd_source],
+ &le_shift,
+ &le_mask);
+
+ return &enc21->enc10.base;
+}
+#define CTX ctx
+
+#define REG(reg_name) \
+ (DCN_BASE.instance[0].segment[mm ## reg_name ## _BASE_IDX] + mm ## reg_name)
+
+static uint32_t read_pipe_fuses(struct dc_context *ctx)
+{
+ uint32_t value = REG_READ(CC_DC_PIPE_DIS);
+ /* RV1 support max 4 pipes */
+ value = value & 0xf;
+ return value;
+}
+
+static int dcn21_populate_dml_pipes_from_context(
+ struct dc *dc, struct resource_context *res_ctx, display_e2e_pipe_params_st *pipes)
+{
+ uint32_t pipe_cnt = dcn20_populate_dml_pipes_from_context(dc, res_ctx, pipes);
+ int i;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+
+ if (!res_ctx->pipe_ctx[i].stream)
+ continue;
+
+ pipes[i].pipe.src.hostvm = 1;
+ pipes[i].pipe.src.gpuvm = 1;
+ }
+
+ return pipe_cnt;
+}
+
static struct resource_funcs dcn21_res_pool_funcs = {
.destroy = dcn21_destroy_resource_pool,
- .link_enc_create = dcn20_link_encoder_create,
+ .link_enc_create = dcn21_link_encoder_create,
.validate_bandwidth = dcn21_validate_bandwidth,
+ .populate_dml_pipes = dcn21_populate_dml_pipes_from_context,
.add_stream_to_ctx = dcn20_add_stream_to_ctx,
.remove_stream_from_ctx = dcn20_remove_stream_from_ctx,
.acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer,
@@ -1437,9 +1662,11 @@ static bool construct(
struct dc *dc,
struct dcn21_resource_pool *pool)
{
- int i;
+ int i, j;
struct dc_context *ctx = dc->ctx;
struct irq_service_init_data init_data;
+ uint32_t pipe_fuses = read_pipe_fuses(ctx);
+ uint32_t num_pipes;
ctx->dc_bios->regs = &bios_regs;
@@ -1457,7 +1684,9 @@ static bool construct(
*************************************************/
pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
- pool->base.pipe_count = 4;
+ /* max pipe num for ASIC before check pipe fuses */
+ pool->base.pipe_count = pool->base.res_cap->num_timing_generator;
+
dc->caps.max_downscale_ratio = 200;
dc->caps.i2c_speed_in_khz = 100;
dc->caps.max_cursor_size = 256;
@@ -1467,6 +1696,7 @@ static bool construct(
dc->caps.max_slave_planes = 1;
dc->caps.post_blend_color_processing = true;
dc->caps.force_dp_tps4_for_cp2520 = true;
+ dc->caps.extended_aux_timeout_support = true;
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
@@ -1516,6 +1746,26 @@ static bool construct(
goto create_fail;
}
+ pool->base.dmcu = dcn20_dmcu_create(ctx,
+ &dmcu_regs,
+ &dmcu_shift,
+ &dmcu_mask);
+ if (pool->base.dmcu == NULL) {
+ dm_error("DC: failed to create dmcu!\n");
+ BREAK_TO_DEBUGGER();
+ goto create_fail;
+ }
+
+ pool->base.abm = dce_abm_create(ctx,
+ &abm_regs,
+ &abm_shift,
+ &abm_mask);
+ if (pool->base.abm == NULL) {
+ dm_error("DC: failed to create abm!\n");
+ BREAK_TO_DEBUGGER();
+ goto create_fail;
+ }
+
#ifdef CONFIG_DRM_AMD_DC_DMUB
pool->base.dmcub = dcn21_dmcub_create(ctx,
&dmcub_regs,
@@ -1530,6 +1780,14 @@ static bool construct(
pool->base.pp_smu = dcn21_pp_smu_create(ctx);
+ num_pipes = dcn2_1_ip.max_num_dpp;
+
+ for (i = 0; i < dcn2_1_ip.max_num_dpp; i++)
+ if (pipe_fuses & 1 << i)
+ num_pipes--;
+ dcn2_1_ip.max_num_dpp = num_pipes;
+ dcn2_1_ip.max_num_otg = num_pipes;
+
dml_init_instance(&dc->dml, &dcn2_1_soc, &dcn2_1_ip, DML_PROJECT_DCN21);
init_data.ctx = dc->ctx;
@@ -1537,8 +1795,15 @@ static bool construct(
if (!pool->base.irqs)
goto create_fail;
+ j = 0;
/* mem input -> ipp -> dpp -> opp -> TG */
for (i = 0; i < pool->base.pipe_count; i++) {
+ /* if pipe is disabled, skip instance of HW pipe,
+ * i.e, skip ASIC register instance
+ */
+ if ((pipe_fuses & (1 << i)) != 0)
+ continue;
+
pool->base.hubps[i] = dcn21_hubp_create(ctx, i);
if (pool->base.hubps[i] == NULL) {
BREAK_TO_DEBUGGER();
@@ -1562,6 +1827,23 @@ static bool construct(
"DC: failed to create dpps!\n");
goto create_fail;
}
+
+ pool->base.opps[i] = dcn21_opp_create(ctx, i);
+ if (pool->base.opps[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC: failed to create output pixel processor!\n");
+ goto create_fail;
+ }
+
+ pool->base.timing_generators[i] = dcn21_timing_generator_create(
+ ctx, i);
+ if (pool->base.timing_generators[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create tg!\n");
+ goto create_fail;
+ }
+ j++;
}
for (i = 0; i < pool->base.res_cap->num_ddc; i++) {
@@ -1582,27 +1864,9 @@ static bool construct(
pool->base.sw_i2cs[i] = NULL;
}
- for (i = 0; i < pool->base.res_cap->num_opp; i++) {
- pool->base.opps[i] = dcn21_opp_create(ctx, i);
- if (pool->base.opps[i] == NULL) {
- BREAK_TO_DEBUGGER();
- dm_error(
- "DC: failed to create output pixel processor!\n");
- goto create_fail;
- }
- }
-
- for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) {
- pool->base.timing_generators[i] = dcn21_timing_generator_create(
- ctx, i);
- if (pool->base.timing_generators[i] == NULL) {
- BREAK_TO_DEBUGGER();
- dm_error("DC: failed to create tg!\n");
- goto create_fail;
- }
- }
-
- pool->base.timing_generator_count = i;
+ pool->base.timing_generator_count = j;
+ pool->base.pipe_count = j;
+ pool->base.mpcc_count = j;
pool->base.mpc = dcn21_mpc_create(ctx);
if (pool->base.mpc == NULL) {
@@ -1645,7 +1909,7 @@ static bool construct(
&res_create_funcs : &res_create_maximus_funcs)))
goto create_fail;
- dcn20_hw_sequencer_construct(dc);
+ dcn21_hw_sequencer_construct(dc);
dc->caps.max_planes = pool->base.pipe_count;
diff --git a/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h b/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h
new file mode 100644
index 000000000000..626d22d437f4
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef DM_CP_PSP_IF__H
+#define DM_CP_PSP_IF__H
+
+struct dc_link;
+
+struct cp_psp_stream_config {
+ uint8_t otg_inst;
+ uint8_t link_enc_inst;
+ uint8_t stream_enc_inst;
+ void *dm_stream_ctx;
+ bool dpms_off;
+};
+
+struct cp_psp_funcs {
+ void (*update_stream_config)(void *handle, struct cp_psp_stream_config *config);
+};
+
+struct cp_psp {
+ void *handle;
+ struct cp_psp_funcs funcs;
+};
+
+
+#endif /* DM_CP_PSP_IF__H */
diff --git a/drivers/gpu/drm/amd/display/dc/dm_helpers.h b/drivers/gpu/drm/amd/display/dc/dm_helpers.h
index b6b4333737f2..94b75e942607 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_helpers.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_helpers.h
@@ -74,7 +74,7 @@ void dm_helpers_dp_mst_clear_payload_allocation_table(
/*
* Polls for ACT (allocation change trigger) handled and
*/
-bool dm_helpers_dp_mst_poll_for_allocation_change_trigger(
+enum act_return_status dm_helpers_dp_mst_poll_for_allocation_change_trigger(
struct dc_context *ctx,
const struct dc_stream_state *stream);
/*
diff --git a/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
index c03a441ee638..ef7df9ef6d7e 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
@@ -249,10 +249,8 @@ struct pp_smu_funcs_nv {
};
#endif
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
-
#define PP_SMU_NUM_SOCCLK_DPM_LEVELS 8
-#define PP_SMU_NUM_DCFCLK_DPM_LEVELS 4
+#define PP_SMU_NUM_DCFCLK_DPM_LEVELS 8
#define PP_SMU_NUM_FCLK_DPM_LEVELS 4
#define PP_SMU_NUM_MEMCLK_DPM_LEVELS 4
@@ -288,7 +286,6 @@ struct pp_smu_funcs_rn {
enum pp_smu_status (*get_dpm_clock_table) (struct pp_smu *pp,
struct dpm_clocks *clock_table);
};
-#endif
struct pp_smu_funcs {
struct pp_smu ctx;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
index 0fafd693ffb4..3c70dd577292 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
@@ -38,6 +38,7 @@
#define BPP_INVALID 0
#define BPP_BLENDED_PIPE 0xffffffff
+#define DCN20_MAX_DSC_IMAGE_WIDTH 5184
static double adjust_ReturnBW(
struct display_mode_lib *mode_lib,
@@ -2610,7 +2611,8 @@ static void dml20v2_DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndP
mode_lib->vba.MinActiveDRAMClockChangeMargin
+ mode_lib->vba.DRAMClockChangeLatency;
- if (mode_lib->vba.MinActiveDRAMClockChangeMargin > 0) {
+ if (mode_lib->vba.MinActiveDRAMClockChangeMargin > 50) {
+ mode_lib->vba.DRAMClockChangeWatermark += 25;
mode_lib->vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_vactive;
} else {
if (mode_lib->vba.SynchronizedVBlank || mode_lib->vba.NumberOfActivePlanes == 1) {
@@ -3901,6 +3903,10 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
mode_lib->vba.MaximumSwathWidthInLineBuffer);
}
for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
+ double MaxMaxDispclkRoundedDown = RoundToDFSGranularityDown(
+ mode_lib->vba.MaxDispclk[mode_lib->vba.soc.num_states],
+ mode_lib->vba.DISPCLKDPPCLKVCOSpeed);
+
for (j = 0; j < 2; j++) {
mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity = RoundToDFSGranularityDown(
mode_lib->vba.MaxDispclk[i],
@@ -3925,7 +3931,9 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
&& i == mode_lib->vba.soc.num_states)
mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine = mode_lib->vba.PixelClock[k] / 2
* (1 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0);
- if (mode_lib->vba.ODMCapability == false || mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine <= mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity) {
+ if (mode_lib->vba.ODMCapability == false ||
+ (locals->PlaneRequiredDISPCLKWithoutODMCombine <= MaxMaxDispclkRoundedDown
+ && (!locals->DSCEnabled[k] || locals->HActive[k] <= DCN20_MAX_DSC_IMAGE_WIDTH))) {
locals->ODMCombineEnablePerState[i][k] = false;
mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine;
} else {
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
index 878bf4782ce6..2c7455e22a65 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
@@ -207,7 +207,7 @@ static void extract_rq_regs(struct display_mode_lib *mode_lib,
rq_regs->rq_regs_l.swath_height = dml_log2(rq_param.dlg.rq_l.swath_height);
rq_regs->rq_regs_c.swath_height = dml_log2(rq_param.dlg.rq_c.swath_height);
- // FIXME: take the max between luma, chroma chunk size?
+ // TODO: take the max between luma, chroma chunk size?
// okay for now, as we are setting chunk_bytes to 8kb anyways
if (rq_param.sizing.rq_l.chunk_bytes >= 32 * 1024) { //32kb
rq_regs->drq_expansion_mode = 0;
@@ -677,7 +677,7 @@ static void get_surf_rq_param(struct display_mode_lib *mode_lib,
unsigned int meta_pitch = 0;
unsigned int ppe = mode_422 ? 2 : 1;
- // FIXME check if ppe apply for both luma and chroma in 422 case
+ // TODO check if ppe apply for both luma and chroma in 422 case
if (is_chroma) {
vp_width = pipe_src_param.viewport_width_c / ppe;
vp_height = pipe_src_param.viewport_height_c;
@@ -959,7 +959,7 @@ static void dml20_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib,
// Source
// dcc_en = src.dcc;
dual_plane = is_dual_plane((enum source_format_class)(src->source_format));
- mode_422 = 0; // FIXME
+ mode_422 = 0; // TODO
access_dir = (src->source_scan == dm_vert); // vp access direction: horizontal or vertical accessed
// bytes_per_element_l = get_bytes_per_element(source_format_class(src.source_format), 0);
// bytes_per_element_c = get_bytes_per_element(source_format_class(src.source_format), 1);
@@ -1655,7 +1655,7 @@ static void calculate_ttu_cursor(struct display_mode_lib *mode_lib,
cur_width_ub = dml_ceil((double) cur_src_width / (double) cur_req_width, 1)
* (double) cur_req_width;
cur_req_per_width = cur_width_ub / (double) cur_req_width;
- hactive_cur = (double) cur_src_width / hscl_ratio; // FIXME: oswin to think about what to do for cursor
+ hactive_cur = (double) cur_src_width / hscl_ratio; // TODO: oswin to think about what to do for cursor
if (vratio_pre_l <= 1.0) {
*refcyc_per_req_delivery_pre_cur = hactive_cur * ref_freq_to_pix_freq
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
index ed8bf5f723c9..1e6aeb1bd2bf 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
@@ -207,7 +207,7 @@ static void extract_rq_regs(struct display_mode_lib *mode_lib,
rq_regs->rq_regs_l.swath_height = dml_log2(rq_param.dlg.rq_l.swath_height);
rq_regs->rq_regs_c.swath_height = dml_log2(rq_param.dlg.rq_c.swath_height);
- // FIXME: take the max between luma, chroma chunk size?
+ // TODO: take the max between luma, chroma chunk size?
// okay for now, as we are setting chunk_bytes to 8kb anyways
if (rq_param.sizing.rq_l.chunk_bytes >= 32 * 1024) { //32kb
rq_regs->drq_expansion_mode = 0;
@@ -677,7 +677,7 @@ static void get_surf_rq_param(struct display_mode_lib *mode_lib,
unsigned int meta_pitch = 0;
unsigned int ppe = mode_422 ? 2 : 1;
- // FIXME check if ppe apply for both luma and chroma in 422 case
+ // TODO check if ppe apply for both luma and chroma in 422 case
if (is_chroma) {
vp_width = pipe_src_param.viewport_width_c / ppe;
vp_height = pipe_src_param.viewport_height_c;
@@ -959,7 +959,7 @@ static void dml20v2_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib,
// Source
// dcc_en = src.dcc;
dual_plane = is_dual_plane((enum source_format_class)(src->source_format));
- mode_422 = 0; // FIXME
+ mode_422 = 0; // TODO
access_dir = (src->source_scan == dm_vert); // vp access direction: horizontal or vertical accessed
// bytes_per_element_l = get_bytes_per_element(source_format_class(src.source_format), 0);
// bytes_per_element_c = get_bytes_per_element(source_format_class(src.source_format), 1);
@@ -1655,7 +1655,7 @@ static void calculate_ttu_cursor(struct display_mode_lib *mode_lib,
cur_width_ub = dml_ceil((double) cur_src_width / (double) cur_req_width, 1)
* (double) cur_req_width;
cur_req_per_width = cur_width_ub / (double) cur_req_width;
- hactive_cur = (double) cur_src_width / hscl_ratio; // FIXME: oswin to think about what to do for cursor
+ hactive_cur = (double) cur_src_width / hscl_ratio; // TODO: oswin to think about what to do for cursor
if (vratio_pre_l <= 1.0) {
*refcyc_per_req_delivery_pre_cur = hactive_cur * ref_freq_to_pix_freq
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
index 3b6ed60dcd35..ba77957aefe3 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
@@ -65,6 +65,7 @@ typedef struct {
#define BPP_INVALID 0
#define BPP_BLENDED_PIPE 0xffffffff
+#define DCN21_MAX_DSC_IMAGE_WIDTH 5184
static void DisplayPipeConfiguration(struct display_mode_lib *mode_lib);
static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation(
@@ -3379,6 +3380,8 @@ static unsigned int TruncToValidBPP(
return 30;
else if (DecimalBPP >= 24 && (DesiredBPP == 0 || DesiredBPP == 24))
return 24;
+ else if (DecimalBPP >= 18 && (DesiredBPP == 0 || DesiredBPP == 18))
+ return 18;
else
return BPP_INVALID;
}
@@ -3936,6 +3939,10 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.MaximumSwathWidthInLineBuffer);
}
for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
+ double MaxMaxDispclkRoundedDown = RoundToDFSGranularityDown(
+ mode_lib->vba.MaxDispclk[mode_lib->vba.soc.num_states],
+ mode_lib->vba.DISPCLKDPPCLKVCOSpeed);
+
for (j = 0; j < 2; j++) {
mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity = RoundToDFSGranularityDown(
mode_lib->vba.MaxDispclk[i],
@@ -3965,7 +3972,9 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
&& i == mode_lib->vba.soc.num_states)
mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine = mode_lib->vba.PixelClock[k] / 2
* (1 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0);
- if (mode_lib->vba.ODMCapability == false || mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine <= mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity) {
+ if (mode_lib->vba.ODMCapability == false ||
+ (locals->PlaneRequiredDISPCLKWithoutODMCombine <= MaxMaxDispclkRoundedDown
+ && (!locals->DSCEnabled[k] || locals->HActive[k] <= DCN21_MAX_DSC_IMAGE_WIDTH))) {
locals->ODMCombineEnablePerState[i][k] = false;
mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine;
} else {
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
index f4c1ef9046bf..cfacd6027467 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
@@ -269,7 +269,7 @@ struct writeback_st {
struct _vcs_dpi_display_output_params_st {
int dp_lanes;
- int output_bpp;
+ double output_bpp;
int dsc_enable;
int wb_enable;
int num_active_wb;
@@ -318,6 +318,7 @@ struct _vcs_dpi_display_pipe_dest_params_st {
unsigned int vupdate_width;
unsigned int vready_offset;
unsigned char interlaced;
+ unsigned char embedded;
double pixel_rate_mhz;
unsigned char synchronized_vblank_all_planes;
unsigned char otg_inst;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
index 65cf4edddaff..7f9a5621922f 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
@@ -375,6 +375,7 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib)
mode_lib->vba.pipe_plane[j] = mode_lib->vba.NumberOfActivePlanes;
+ mode_lib->vba.EmbeddedPanel[mode_lib->vba.NumberOfActivePlanes] = dst->embedded;
mode_lib->vba.DPPPerPlane[mode_lib->vba.NumberOfActivePlanes] = 1;
mode_lib->vba.SourceScan[mode_lib->vba.NumberOfActivePlanes] =
(enum scan_direction_class) (src->source_scan);
@@ -432,8 +433,12 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib)
dst->recout_width; // TODO: or should this be full_recout_width???...maybe only when in hsplit mode?
mode_lib->vba.ODMCombineEnabled[mode_lib->vba.NumberOfActivePlanes] =
dst->odm_combine;
+ mode_lib->vba.ODMCombineTypeEnabled[mode_lib->vba.NumberOfActivePlanes] =
+ dst->odm_combine;
mode_lib->vba.OutputFormat[mode_lib->vba.NumberOfActivePlanes] =
(enum output_format_class) (dout->output_format);
+ mode_lib->vba.OutputBpp[mode_lib->vba.NumberOfActivePlanes] =
+ dout->output_bpp;
mode_lib->vba.Output[mode_lib->vba.NumberOfActivePlanes] =
(enum output_encoder_class) (dout->output_type);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
index 91decac50557..1540ffbe3979 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
@@ -387,6 +387,7 @@ struct vba_vars_st {
/* vba mode support */
/*inputs*/
+ bool EmbeddedPanel[DC__NUM_DPP__MAX];
bool SupportGFX7CompatibleTilingIn32bppAnd64bpp;
double MaxHSCLRatio;
double MaxVSCLRatio;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c
index ad8571f5a142..4c3e9cc30167 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c
@@ -243,7 +243,7 @@ void dml1_extract_rq_regs(
rq_regs->rq_regs_l.swath_height = dml_log2(rq_param.dlg.rq_l.swath_height);
rq_regs->rq_regs_c.swath_height = dml_log2(rq_param.dlg.rq_c.swath_height);
- /* FIXME: take the max between luma, chroma chunk size?
+ /* TODO: take the max between luma, chroma chunk size?
* okay for now, as we are setting chunk_bytes to 8kb anyways
*/
if (rq_param.sizing.rq_l.chunk_bytes >= 32 * 1024) { /*32kb */
@@ -602,7 +602,7 @@ static void get_surf_rq_param(
unsigned int log2_dpte_group_length;
unsigned int func_meta_row_height, func_dpte_row_height;
- /* FIXME check if ppe apply for both luma and chroma in 422 case */
+ /* TODO check if ppe apply for both luma and chroma in 422 case */
if (is_chroma) {
vp_width = pipe_src_param.viewport_width_c / ppe;
vp_height = pipe_src_param.viewport_height_c;
@@ -1141,7 +1141,7 @@ void dml1_rq_dlg_get_dlg_params(
ASSERT(disp_dlg_regs->refcyc_h_blank_end < (unsigned int) dml_pow(2, 13));
disp_dlg_regs->dlg_vblank_end = interlaced ? (vblank_end / 2) : vblank_end; /* 15 bits */
- prefetch_xy_calc_in_dcfclk = 24.0; /* FIXME: ip_param */
+ prefetch_xy_calc_in_dcfclk = 24.0; /* TODO: ip_param */
min_dcfclk_mhz = dlg_sys_param.deepsleep_dcfclk_mhz;
t_calc_us = prefetch_xy_calc_in_dcfclk / min_dcfclk_mhz;
min_ttu_vblank = dlg_sys_param.t_urg_wm_us;
@@ -1182,7 +1182,7 @@ void dml1_rq_dlg_get_dlg_params(
dcc_en = e2e_pipe_param.pipe.src.dcc;
dual_plane = is_dual_plane(
(enum source_format_class) e2e_pipe_param.pipe.src.source_format);
- mode_422 = 0; /* FIXME */
+ mode_422 = 0; /* TODO */
access_dir = (e2e_pipe_param.pipe.src.source_scan == dm_vert); /* vp access direction: horizontal or vertical accessed */
bytes_per_element_l = get_bytes_per_element(
(enum source_format_class) e2e_pipe_param.pipe.src.source_format,
@@ -1837,7 +1837,7 @@ void dml1_rq_dlg_get_dlg_params(
cur0_width_ub = dml_ceil((double) cur0_src_width / (double) cur0_req_width, 1)
* (double) cur0_req_width;
cur0_req_per_width = cur0_width_ub / (double) cur0_req_width;
- hactive_cur0 = (double) cur0_src_width / hratios_cur0; /* FIXME: oswin to think about what to do for cursor */
+ hactive_cur0 = (double) cur0_src_width / hratios_cur0; /* TODO: oswin to think about what to do for cursor */
if (vratio_pre_l <= 1.0) {
refcyc_per_req_delivery_pre_cur0 = hactive_cur0 * ref_freq_to_pix_freq
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
index 5995bcdfed54..e60f760585e4 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
@@ -23,8 +23,7 @@
*/
#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
-#include "dc.h"
-#include "core_types.h"
+#include "dc_hw_types.h"
#include "dsc.h"
#include <drm/drm_dp_helper.h>
@@ -47,6 +46,59 @@ const struct dc_dsc_policy dsc_policy = {
/* This module's internal functions */
+static uint32_t dc_dsc_bandwidth_in_kbps_from_timing(
+ const struct dc_crtc_timing *timing)
+{
+ uint32_t bits_per_channel = 0;
+ uint32_t kbps;
+
+ if (timing->flags.DSC) {
+ kbps = (timing->pix_clk_100hz * timing->dsc_cfg.bits_per_pixel);
+ kbps = kbps / 160 + ((kbps % 160) ? 1 : 0);
+ return kbps;
+ }
+
+ switch (timing->display_color_depth) {
+ case COLOR_DEPTH_666:
+ bits_per_channel = 6;
+ break;
+ case COLOR_DEPTH_888:
+ bits_per_channel = 8;
+ break;
+ case COLOR_DEPTH_101010:
+ bits_per_channel = 10;
+ break;
+ case COLOR_DEPTH_121212:
+ bits_per_channel = 12;
+ break;
+ case COLOR_DEPTH_141414:
+ bits_per_channel = 14;
+ break;
+ case COLOR_DEPTH_161616:
+ bits_per_channel = 16;
+ break;
+ default:
+ break;
+ }
+
+ ASSERT(bits_per_channel != 0);
+
+ kbps = timing->pix_clk_100hz / 10;
+ kbps *= bits_per_channel;
+
+ if (timing->flags.Y_ONLY != 1) {
+ /*Only YOnly make reduce bandwidth by 1/3 compares to RGB*/
+ kbps *= 3;
+ if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
+ kbps /= 2;
+ else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422)
+ kbps = kbps * 2 / 3;
+ }
+
+ return kbps;
+
+}
+
static bool dsc_buff_block_size_from_dpcd(int dpcd_buff_block_size, int *buff_block_size)
{
@@ -178,12 +230,11 @@ static bool dsc_bpp_increment_div_from_dpcd(int bpp_increment_dpcd, uint32_t *bp
}
static void get_dsc_enc_caps(
- const struct dc *dc,
+ const struct display_stream_compressor *dsc,
struct dsc_enc_caps *dsc_enc_caps,
int pixel_clock_100Hz)
{
// This is a static HW query, so we can use any DSC
- struct display_stream_compressor *dsc = dc->res_pool->dscs[0];
memset(dsc_enc_caps, 0, sizeof(struct dsc_enc_caps));
if (dsc)
@@ -290,7 +341,7 @@ static void get_dsc_bandwidth_range(
struct dc_dsc_bw_range *range)
{
/* native stream bandwidth */
- range->stream_kbps = dc_bandwidth_in_kbps_from_timing(timing);
+ range->stream_kbps = dc_dsc_bandwidth_in_kbps_from_timing(timing);
/* max dsc target bpp */
range->max_kbps = dsc_div_by_10_round_up(max_bpp * timing->pix_clk_100hz);
@@ -512,6 +563,7 @@ static bool setup_dsc_config(
const struct dsc_enc_caps *dsc_enc_caps,
int target_bandwidth_kbps,
const struct dc_crtc_timing *timing,
+ int min_slice_height_override,
struct dc_dsc_config *dsc_cfg)
{
struct dsc_enc_caps dsc_common_caps;
@@ -680,7 +732,10 @@ static bool setup_dsc_config(
// Slice height (i.e. number of slices per column): start with policy and pick the first one that height is divisible by.
// For 4:2:0 make sure the slice height is divisible by 2 as well.
- slice_height = min(dsc_policy.min_sice_height, pic_height);
+ if (min_slice_height_override == 0)
+ slice_height = min(dsc_policy.min_sice_height, pic_height);
+ else
+ slice_height = min(min_slice_height_override, pic_height);
while (slice_height < pic_height && (pic_height % slice_height != 0 ||
(timing->pixel_encoding == PIXEL_ENCODING_YCBCR420 && slice_height % 2 != 0)))
@@ -802,7 +857,8 @@ bool dc_dsc_parse_dsc_dpcd(const uint8_t *dpcd_dsc_basic_data, const uint8_t *dp
* If DSC is not possible, leave '*range' untouched.
*/
bool dc_dsc_compute_bandwidth_range(
- const struct dc *dc,
+ const struct display_stream_compressor *dsc,
+ const uint32_t dsc_min_slice_height_override,
const uint32_t min_bpp,
const uint32_t max_bpp,
const struct dsc_dec_dpcd_caps *dsc_sink_caps,
@@ -814,16 +870,14 @@ bool dc_dsc_compute_bandwidth_range(
struct dsc_enc_caps dsc_common_caps;
struct dc_dsc_config config;
- get_dsc_enc_caps(dc, &dsc_enc_caps, timing->pix_clk_100hz);
+ get_dsc_enc_caps(dsc, &dsc_enc_caps, timing->pix_clk_100hz);
is_dsc_possible = intersect_dsc_caps(dsc_sink_caps, &dsc_enc_caps,
timing->pixel_encoding, &dsc_common_caps);
if (is_dsc_possible)
- is_dsc_possible = setup_dsc_config(dsc_sink_caps,
- &dsc_enc_caps,
- 0,
- timing, &config);
+ is_dsc_possible = setup_dsc_config(dsc_sink_caps, &dsc_enc_caps, 0, timing,
+ dsc_min_slice_height_override, &config);
if (is_dsc_possible)
get_dsc_bandwidth_range(min_bpp, max_bpp, &dsc_common_caps, timing, range);
@@ -832,8 +886,9 @@ bool dc_dsc_compute_bandwidth_range(
}
bool dc_dsc_compute_config(
- const struct dc *dc,
+ const struct display_stream_compressor *dsc,
const struct dsc_dec_dpcd_caps *dsc_sink_caps,
+ const uint32_t dsc_min_slice_height_override,
uint32_t target_bandwidth_kbps,
const struct dc_crtc_timing *timing,
struct dc_dsc_config *dsc_cfg)
@@ -841,11 +896,11 @@ bool dc_dsc_compute_config(
bool is_dsc_possible = false;
struct dsc_enc_caps dsc_enc_caps;
- get_dsc_enc_caps(dc, &dsc_enc_caps, timing->pix_clk_100hz);
+ get_dsc_enc_caps(dsc, &dsc_enc_caps, timing->pix_clk_100hz);
is_dsc_possible = setup_dsc_config(dsc_sink_caps,
&dsc_enc_caps,
target_bandwidth_kbps,
- timing, dsc_cfg);
+ timing, dsc_min_slice_height_override, dsc_cfg);
return is_dsc_possible;
}
#endif /* CONFIG_DRM_AMD_DC_DSC_SUPPORT */
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c
index ca51e83f8764..76c4b12d6824 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c
@@ -177,7 +177,6 @@ void calc_rc_params(struct rc_params *rc, enum colour_mode cm, enum bits_per_com
{
float bpp_group;
float initial_xmit_delay_factor;
- int source_bpp;
int padding_pixels;
int i;
@@ -217,8 +216,6 @@ void calc_rc_params(struct rc_params *rc, enum colour_mode cm, enum bits_per_com
rc->initial_xmit_delay++;
}
- source_bpp = MODE_SELECT(bpc * 3, bpc * 2, bpc * 1.5);
-
rc->flatness_min_qp = ((bpc == BPC_8) ? (3) : ((bpc == BPC_10) ? (7) : (11))) - ((minor_version == 1 && cm == CM_444) ? 1 : 0);
rc->flatness_max_qp = ((bpc == BPC_8) ? (12) : ((bpc == BPC_10) ? (16) : (20))) - ((minor_version == 1 && cm == CM_444) ? 1 : 0);
rc->flatness_det_thresh = 2 << (bpc - 8);
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/gpio_base.c b/drivers/gpu/drm/amd/display/dc/gpio/gpio_base.c
index f8f85490e77e..f67c18375bfd 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/gpio_base.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/gpio_base.c
@@ -321,8 +321,6 @@ void dal_gpio_destroy(
return;
}
- dal_gpio_close(*gpio);
-
switch ((*gpio)->id) {
case GPIO_ID_DDC_DATA:
kfree((*gpio)->hw_container.ddc);
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
index d03165e71dc6..92280cc05e2d 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
@@ -169,7 +169,6 @@ void dal_gpio_destroy_generic_mux(
return;
}
- dal_gpio_close(*mux);
dal_gpio_destroy(mux);
kfree(*mux);
@@ -460,7 +459,6 @@ void dal_gpio_destroy_irq(
return;
}
- dal_gpio_close(*irq);
dal_gpio_destroy(irq);
kfree(*irq);
diff --git a/drivers/gpu/drm/amd/display/dc/hdcp/Makefile b/drivers/gpu/drm/amd/display/dc/hdcp/Makefile
new file mode 100644
index 000000000000..4170b6eb9ec0
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/hdcp/Makefile
@@ -0,0 +1,28 @@
+# Copyright 2019 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+# Makefile for the 'hdcp' sub-component of DAL.
+#
+
+HDCP_MSG = hdcp_msg.o
+
+AMD_DAL_HDCP_MSG = $(addprefix $(AMDDALPATH)/dc/hdcp/,$(HDCP_MSG))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_HDCP_MSG)
diff --git a/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c b/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c
new file mode 100644
index 000000000000..6f730b5bfe42
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c
@@ -0,0 +1,324 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include <linux/slab.h>
+
+#include "dm_services.h"
+#include "dm_helpers.h"
+#include "include/hdcp_types.h"
+#include "include/i2caux_interface.h"
+#include "include/signal_types.h"
+#include "core_types.h"
+#include "dc_link_ddc.h"
+#include "link_hwss.h"
+
+#define DC_LOGGER \
+ link->ctx->logger
+#define HDCP14_KSV_SIZE 5
+#define HDCP14_MAX_KSV_FIFO_SIZE 127*HDCP14_KSV_SIZE
+
+static const bool hdcp_cmd_is_read[] = {
+ [HDCP_MESSAGE_ID_READ_BKSV] = true,
+ [HDCP_MESSAGE_ID_READ_RI_R0] = true,
+ [HDCP_MESSAGE_ID_READ_PJ] = true,
+ [HDCP_MESSAGE_ID_WRITE_AKSV] = false,
+ [HDCP_MESSAGE_ID_WRITE_AINFO] = false,
+ [HDCP_MESSAGE_ID_WRITE_AN] = false,
+ [HDCP_MESSAGE_ID_READ_VH_X] = true,
+ [HDCP_MESSAGE_ID_READ_VH_0] = true,
+ [HDCP_MESSAGE_ID_READ_VH_1] = true,
+ [HDCP_MESSAGE_ID_READ_VH_2] = true,
+ [HDCP_MESSAGE_ID_READ_VH_3] = true,
+ [HDCP_MESSAGE_ID_READ_VH_4] = true,
+ [HDCP_MESSAGE_ID_READ_BCAPS] = true,
+ [HDCP_MESSAGE_ID_READ_BSTATUS] = true,
+ [HDCP_MESSAGE_ID_READ_KSV_FIFO] = true,
+ [HDCP_MESSAGE_ID_READ_BINFO] = true,
+ [HDCP_MESSAGE_ID_HDCP2VERSION] = true,
+ [HDCP_MESSAGE_ID_RX_CAPS] = true,
+ [HDCP_MESSAGE_ID_WRITE_AKE_INIT] = false,
+ [HDCP_MESSAGE_ID_READ_AKE_SEND_CERT] = true,
+ [HDCP_MESSAGE_ID_WRITE_AKE_NO_STORED_KM] = false,
+ [HDCP_MESSAGE_ID_WRITE_AKE_STORED_KM] = false,
+ [HDCP_MESSAGE_ID_READ_AKE_SEND_H_PRIME] = true,
+ [HDCP_MESSAGE_ID_READ_AKE_SEND_PAIRING_INFO] = true,
+ [HDCP_MESSAGE_ID_WRITE_LC_INIT] = false,
+ [HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME] = true,
+ [HDCP_MESSAGE_ID_WRITE_SKE_SEND_EKS] = false,
+ [HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST] = true,
+ [HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_SEND_ACK] = false,
+ [HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_STREAM_MANAGE] = false,
+ [HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY] = true,
+ [HDCP_MESSAGE_ID_READ_RXSTATUS] = true,
+ [HDCP_MESSAGE_ID_WRITE_CONTENT_STREAM_TYPE] = false
+};
+
+static const uint8_t hdcp_i2c_offsets[] = {
+ [HDCP_MESSAGE_ID_READ_BKSV] = 0x0,
+ [HDCP_MESSAGE_ID_READ_RI_R0] = 0x8,
+ [HDCP_MESSAGE_ID_READ_PJ] = 0xA,
+ [HDCP_MESSAGE_ID_WRITE_AKSV] = 0x10,
+ [HDCP_MESSAGE_ID_WRITE_AINFO] = 0x15,
+ [HDCP_MESSAGE_ID_WRITE_AN] = 0x18,
+ [HDCP_MESSAGE_ID_READ_VH_X] = 0x20,
+ [HDCP_MESSAGE_ID_READ_VH_0] = 0x20,
+ [HDCP_MESSAGE_ID_READ_VH_1] = 0x24,
+ [HDCP_MESSAGE_ID_READ_VH_2] = 0x28,
+ [HDCP_MESSAGE_ID_READ_VH_3] = 0x2C,
+ [HDCP_MESSAGE_ID_READ_VH_4] = 0x30,
+ [HDCP_MESSAGE_ID_READ_BCAPS] = 0x40,
+ [HDCP_MESSAGE_ID_READ_BSTATUS] = 0x41,
+ [HDCP_MESSAGE_ID_READ_KSV_FIFO] = 0x43,
+ [HDCP_MESSAGE_ID_READ_BINFO] = 0xFF,
+ [HDCP_MESSAGE_ID_HDCP2VERSION] = 0x50,
+ [HDCP_MESSAGE_ID_WRITE_AKE_INIT] = 0x60,
+ [HDCP_MESSAGE_ID_READ_AKE_SEND_CERT] = 0x80,
+ [HDCP_MESSAGE_ID_WRITE_AKE_NO_STORED_KM] = 0x60,
+ [HDCP_MESSAGE_ID_WRITE_AKE_STORED_KM] = 0x60,
+ [HDCP_MESSAGE_ID_READ_AKE_SEND_H_PRIME] = 0x80,
+ [HDCP_MESSAGE_ID_READ_AKE_SEND_PAIRING_INFO] = 0x80,
+ [HDCP_MESSAGE_ID_WRITE_LC_INIT] = 0x60,
+ [HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME] = 0x80,
+ [HDCP_MESSAGE_ID_WRITE_SKE_SEND_EKS] = 0x60,
+ [HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST] = 0x80,
+ [HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_SEND_ACK] = 0x60,
+ [HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_STREAM_MANAGE] = 0x60,
+ [HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY] = 0x80,
+ [HDCP_MESSAGE_ID_READ_RXSTATUS] = 0x70
+};
+
+struct protection_properties {
+ bool supported;
+ bool (*process_transaction)(
+ struct dc_link *link,
+ struct hdcp_protection_message *message_info);
+};
+
+static const struct protection_properties non_supported_protection = {
+ .supported = false
+};
+
+static bool hdmi_14_process_transaction(
+ struct dc_link *link,
+ struct hdcp_protection_message *message_info)
+{
+ uint8_t *buff = NULL;
+ bool result;
+ const uint8_t hdcp_i2c_addr_link_primary = 0x3a; /* 0x74 >> 1*/
+ const uint8_t hdcp_i2c_addr_link_secondary = 0x3b; /* 0x76 >> 1*/
+ struct i2c_command i2c_command;
+ uint8_t offset = hdcp_i2c_offsets[message_info->msg_id];
+ struct i2c_payload i2c_payloads[] = {
+ { true, 0, 1, &offset },
+ /* actual hdcp payload, will be filled later, zeroed for now*/
+ { 0 }
+ };
+
+ switch (message_info->link) {
+ case HDCP_LINK_SECONDARY:
+ i2c_payloads[0].address = hdcp_i2c_addr_link_secondary;
+ i2c_payloads[1].address = hdcp_i2c_addr_link_secondary;
+ break;
+ case HDCP_LINK_PRIMARY:
+ default:
+ i2c_payloads[0].address = hdcp_i2c_addr_link_primary;
+ i2c_payloads[1].address = hdcp_i2c_addr_link_primary;
+ break;
+ }
+
+ if (hdcp_cmd_is_read[message_info->msg_id]) {
+ i2c_payloads[1].write = false;
+ i2c_command.number_of_payloads = ARRAY_SIZE(i2c_payloads);
+ i2c_payloads[1].length = message_info->length;
+ i2c_payloads[1].data = message_info->data;
+ } else {
+ i2c_command.number_of_payloads = 1;
+ buff = kzalloc(message_info->length + 1, GFP_KERNEL);
+
+ if (!buff)
+ return false;
+
+ buff[0] = offset;
+ memmove(&buff[1], message_info->data, message_info->length);
+ i2c_payloads[0].length = message_info->length + 1;
+ i2c_payloads[0].data = buff;
+ }
+
+ i2c_command.payloads = i2c_payloads;
+ i2c_command.engine = I2C_COMMAND_ENGINE_HW;//only HW
+ i2c_command.speed = link->ddc->ctx->dc->caps.i2c_speed_in_khz;
+
+ result = dm_helpers_submit_i2c(
+ link->ctx,
+ link,
+ &i2c_command);
+ kfree(buff);
+
+ return result;
+}
+
+static const struct protection_properties hdmi_14_protection = {
+ .supported = true,
+ .process_transaction = hdmi_14_process_transaction
+};
+
+static const uint32_t hdcp_dpcd_addrs[] = {
+ [HDCP_MESSAGE_ID_READ_BKSV] = 0x68000,
+ [HDCP_MESSAGE_ID_READ_RI_R0] = 0x68005,
+ [HDCP_MESSAGE_ID_READ_PJ] = 0xFFFFFFFF,
+ [HDCP_MESSAGE_ID_WRITE_AKSV] = 0x68007,
+ [HDCP_MESSAGE_ID_WRITE_AINFO] = 0x6803B,
+ [HDCP_MESSAGE_ID_WRITE_AN] = 0x6800c,
+ [HDCP_MESSAGE_ID_READ_VH_X] = 0x68014,
+ [HDCP_MESSAGE_ID_READ_VH_0] = 0x68014,
+ [HDCP_MESSAGE_ID_READ_VH_1] = 0x68018,
+ [HDCP_MESSAGE_ID_READ_VH_2] = 0x6801c,
+ [HDCP_MESSAGE_ID_READ_VH_3] = 0x68020,
+ [HDCP_MESSAGE_ID_READ_VH_4] = 0x68024,
+ [HDCP_MESSAGE_ID_READ_BCAPS] = 0x68028,
+ [HDCP_MESSAGE_ID_READ_BSTATUS] = 0x68029,
+ [HDCP_MESSAGE_ID_READ_KSV_FIFO] = 0x6802c,
+ [HDCP_MESSAGE_ID_READ_BINFO] = 0x6802a,
+ [HDCP_MESSAGE_ID_RX_CAPS] = 0x6921d,
+ [HDCP_MESSAGE_ID_WRITE_AKE_INIT] = 0x69000,
+ [HDCP_MESSAGE_ID_READ_AKE_SEND_CERT] = 0x6900b,
+ [HDCP_MESSAGE_ID_WRITE_AKE_NO_STORED_KM] = 0x69220,
+ [HDCP_MESSAGE_ID_WRITE_AKE_STORED_KM] = 0x692a0,
+ [HDCP_MESSAGE_ID_READ_AKE_SEND_H_PRIME] = 0x692c0,
+ [HDCP_MESSAGE_ID_READ_AKE_SEND_PAIRING_INFO] = 0x692e0,
+ [HDCP_MESSAGE_ID_WRITE_LC_INIT] = 0x692f0,
+ [HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME] = 0x692f8,
+ [HDCP_MESSAGE_ID_WRITE_SKE_SEND_EKS] = 0x69318,
+ [HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST] = 0x69330,
+ [HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_SEND_ACK] = 0x693e0,
+ [HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_STREAM_MANAGE] = 0x693f0,
+ [HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY] = 0x69473,
+ [HDCP_MESSAGE_ID_READ_RXSTATUS] = 0x69493,
+ [HDCP_MESSAGE_ID_WRITE_CONTENT_STREAM_TYPE] = 0x69494
+};
+
+static bool dpcd_access_helper(
+ struct dc_link *link,
+ uint32_t length,
+ uint8_t *data,
+ uint32_t dpcd_addr,
+ bool is_read)
+{
+ enum dc_status status;
+ uint32_t cur_length = 0;
+ uint32_t offset = 0;
+ uint32_t ksv_read_size = 0x6803b - 0x6802c;
+
+ /* Read KSV, need repeatedly handle */
+ if (dpcd_addr == 0x6802c) {
+ if (length % HDCP14_KSV_SIZE) {
+ DC_LOG_ERROR("%s: KsvFifo Size(%d) is not a multiple of HDCP14_KSV_SIZE(%d)\n",
+ __func__,
+ length,
+ HDCP14_KSV_SIZE);
+ }
+ if (length > HDCP14_MAX_KSV_FIFO_SIZE) {
+ DC_LOG_ERROR("%s: KsvFifo Size(%d) is greater than HDCP14_MAX_KSV_FIFO_SIZE(%d)\n",
+ __func__,
+ length,
+ HDCP14_MAX_KSV_FIFO_SIZE);
+ }
+
+ DC_LOG_ERROR("%s: Reading %d Ksv(s) from KsvFifo\n",
+ __func__,
+ length / HDCP14_KSV_SIZE);
+
+ while (length > 0) {
+ if (length > ksv_read_size) {
+ status = core_link_read_dpcd(
+ link,
+ dpcd_addr + offset,
+ data + offset,
+ ksv_read_size);
+
+ data += ksv_read_size;
+ length -= ksv_read_size;
+ } else {
+ status = core_link_read_dpcd(
+ link,
+ dpcd_addr + offset,
+ data + offset,
+ length);
+
+ data += length;
+ length = 0;
+ }
+
+ if (status != DC_OK)
+ return false;
+ }
+ } else {
+ while (length > 0) {
+ if (length > DEFAULT_AUX_MAX_DATA_SIZE)
+ cur_length = DEFAULT_AUX_MAX_DATA_SIZE;
+ else
+ cur_length = length;
+
+ if (is_read) {
+ status = core_link_read_dpcd(
+ link,
+ dpcd_addr + offset,
+ data + offset,
+ cur_length);
+ } else {
+ status = core_link_write_dpcd(
+ link,
+ dpcd_addr + offset,
+ data + offset,
+ cur_length);
+ }
+
+ if (status != DC_OK)
+ return false;
+
+ length -= cur_length;
+ offset += cur_length;
+ }
+ }
+ return true;
+}
+
+static bool dp_11_process_transaction(
+ struct dc_link *link,
+ struct hdcp_protection_message *message_info)
+{
+ return dpcd_access_helper(
+ link,
+ message_info->length,
+ message_info->data,
+ hdcp_dpcd_addrs[message_info->msg_id],
+ hdcp_cmd_is_read[message_info->msg_id]);
+}
+
+static const struct protection_properties dp_11_protection = {
+ .supported = true,
+ .process_transaction = dp_11_process_transaction
+};
+
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index f189307750ab..a831079607cd 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -52,7 +52,9 @@ void enable_surface_flip_reporting(struct dc_plane_state *plane_state,
#include "clock_source.h"
#include "audio.h"
#include "dm_pp_smu.h"
-
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+#include "dm_cp_psp.h"
+#endif
/************ link *****************/
struct link_init_data {
@@ -231,6 +233,7 @@ struct resource_pool {
struct dcn_fe_bandwidth {
int dppclk_khz;
+
};
struct stream_resource {
@@ -395,10 +398,6 @@ struct dc_state {
struct clk_mgr *clk_mgr;
- struct {
- bool full_update_needed : 1;
- } commit_hints;
-
struct kref refcount;
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h
index b1fab251c09b..14716ba35662 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h
@@ -95,6 +95,9 @@ bool dal_ddc_service_query_ddc_data(
uint8_t *read_buf,
uint32_t read_size);
+bool dal_ddc_submit_aux_command(struct ddc_service *ddc,
+ struct aux_payload *payload);
+
int dc_link_aux_transfer_raw(struct ddc_service *ddc,
struct aux_payload *payload,
enum aux_channel_operation_result *operation_result);
@@ -102,6 +105,9 @@ int dc_link_aux_transfer_raw(struct ddc_service *ddc,
bool dc_link_aux_transfer_with_retries(struct ddc_service *ddc,
struct aux_payload *payload);
+enum dc_status dc_link_aux_configure_timeout(struct ddc_service *ddc,
+ uint32_t timeout);
+
void dal_ddc_service_write_scdc_data(
struct ddc_service *ddc_service,
uint32_t pix_clk,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
index 08a4df2c61a8..045138dbdccb 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
@@ -28,6 +28,8 @@
#define LINK_TRAINING_ATTEMPTS 4
#define LINK_TRAINING_RETRY_DELAY 50 /* ms */
+#define LINK_AUX_DEFAULT_EXTENDED_TIMEOUT_PERIOD 32000 /*us*/
+#define LINK_AUX_DEFAULT_TIMEOUT_PERIOD 400 /*us*/
struct dc_link;
struct dc_stream_state;
@@ -43,6 +45,9 @@ bool dp_verify_link_cap_with_retries(
struct dc_link_settings *known_limit_link_setting,
int attempts);
+bool dp_verify_mst_link_cap(
+ struct dc_link *link);
+
bool dp_validate_mode_timing(
struct dc_link *link,
const struct dc_crtc_timing *timing);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h b/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h
index e79cd4e92919..e77b3a76766d 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h
@@ -140,6 +140,9 @@ struct write_command_context {
struct aux_engine_funcs {
+ bool (*configure_timeout)(
+ struct ddc_service *ddc,
+ uint32_t timeout);
void (*destroy)(
struct aux_engine **ptr);
bool (*acquire_engine)(
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
index 76f9ad1b23df..4e18e77dcf42 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
@@ -47,7 +47,7 @@
#ifdef CONFIG_DRM_AMD_DC_DCN2_1
/* Will these bw structures be ASIC specific? */
-#define MAX_NUM_DPM_LVL 4
+#define MAX_NUM_DPM_LVL 8
#define WM_SET_COUNT 4
@@ -180,13 +180,19 @@ struct clk_mgr_funcs {
struct dc_state *context,
enum dc_clock_type clock_type,
struct dc_clock_config *clock_cfg);
+
+ bool (*are_clock_states_equal) (struct dc_clocks *a,
+ struct dc_clocks *b);
+ void (*notify_wm_ranges)(struct clk_mgr *clk_mgr);
};
struct clk_mgr {
struct dc_context *ctx;
struct clk_mgr_funcs *funcs;
struct dc_clocks clks;
+ bool psr_allow_active_cache;
int dprefclk_khz; // Used by program pixel clock in clock source funcs, need to figureout where this goes
+ int dentist_vco_freq_khz;
#ifdef CONFIG_DRM_AMD_DC_DCN2_1
struct clk_bw_params *bw_params;
#endif
@@ -199,4 +205,8 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p
void dc_destroy_clk_mgr(struct clk_mgr *clk_mgr);
+void clk_mgr_exit_optimized_pwr_state(const struct dc *dc, struct clk_mgr *clk_mgr);
+
+void clk_mgr_optimize_pwr_state(const struct dc *dc, struct clk_mgr *clk_mgr);
+
#endif /* __DAL_CLK_MGR_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
index 7dd46eb96d67..a17a77192690 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
@@ -184,6 +184,21 @@ struct clk_mgr_registers {
uint32_t MP1_SMN_C2PMSG_91;
};
+enum clock_type {
+ clock_type_dispclk = 1,
+ clock_type_dcfclk,
+ clock_type_socclk,
+ clock_type_pixelclk,
+ clock_type_phyclk,
+ clock_type_dppclk,
+ clock_type_fclk,
+ clock_type_dcfdsclk,
+ clock_type_dscclk,
+ clock_type_uclk,
+ clock_type_dramclk,
+};
+
+
struct state_dependent_clocks {
int display_clk_khz;
int pixel_clk_khz;
@@ -210,8 +225,6 @@ struct clk_mgr_internal {
struct state_dependent_clocks max_clks_by_state[DM_PP_CLOCKS_MAX_STATES];
/*TODO: figure out which of the below fields should be here vs in asic specific portion */
- int dentist_vco_freq_khz;
-
/* Cache the status of DFS-bypass feature*/
bool dfs_bypass_enabled;
/* True if the DFS-bypass feature is enabled and active. */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
index d8e744f366e5..05ee5295d2c1 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
@@ -38,8 +38,7 @@ struct dccg {
struct dccg_funcs {
void (*update_dpp_dto)(struct dccg *dccg,
int dpp_inst,
- int req_dppclk,
- bool reduce_divider_only);
+ int req_dppclk);
void (*get_dccg_ref_freq)(struct dccg *dccg,
unsigned int xtalin_freq_inKhz,
unsigned int *dccg_ref_freq_inKhz);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
index a6297219d7fc..c81a17aeaa25 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
@@ -147,6 +147,7 @@ struct hubbub_funcs {
bool (*is_allow_self_refresh_enabled)(struct hubbub *hubbub);
void (*allow_self_refresh_control)(struct hubbub *hubbub, bool allow);
+ void (*apply_DEDCN21_147_wa)(struct hubbub *hubbub);
};
struct hubbub {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h
index 1ddb1c6fa149..c6ff3d78b435 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h
@@ -28,7 +28,11 @@
#include "dc_dsc.h"
#include "dc_hw_types.h"
-#include "dc_dp_types.h"
+#include "dc_types.h"
+/* do not include any other headers
+ * or else it might break Edid Utility functionality.
+ */
+
/* Input parameters for configuring DSC from the outside of DSC */
struct dsc_config {
@@ -81,12 +85,6 @@ struct dsc_enc_caps {
uint32_t bpp_increment_div; /* bpp increment divisor, e.g. if 16, it's 1/16th of a bit */
};
-struct display_stream_compressor {
- const struct dsc_funcs *funcs;
- struct dc_context *ctx;
- int inst;
-};
-
struct dsc_funcs {
void (*dsc_get_enc_caps)(struct dsc_enc_caps *dsc_enc_caps, int pixel_clock_100Hz);
void (*dsc_read_state)(struct display_stream_compressor *dsc, struct dcn_dsc_state *s);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
index abb4e4237fb6..b21909216fb6 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
@@ -184,6 +184,10 @@ struct link_encoder_funcs {
bool (*fec_is_active)(struct link_encoder *enc);
#endif
bool (*is_in_alt_mode) (struct link_encoder *enc);
+
+ void (*get_max_link_cap)(struct link_encoder *enc,
+ struct dc_link_settings *link_settings);
+
enum signal_type (*get_dig_mode)(
struct link_encoder *enc);
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
index e8668388581b..67b610d6d91f 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
@@ -43,6 +43,7 @@ struct dcn_watermarks {
#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
uint32_t frac_urg_bw_nom;
uint32_t frac_urg_bw_flip;
+ int32_t urgent_latency_ns;
#endif
struct cstate_pstate_watermarks_st cstate_pstate;
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h
index 957e9047381a..18def2b6fafe 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h
@@ -208,6 +208,7 @@ struct output_pixel_processor {
struct mpc_tree mpc_tree_params;
bool mpcc_disconnect_pending[MAX_PIPES];
const struct opp_funcs *funcs;
+ uint32_t dyn_expansion;
};
enum fmt_stereo_action {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
index fe9b7a10a1c3..6305e388612a 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
@@ -214,6 +214,11 @@ struct stream_encoder_funcs {
unsigned int (*dig_source_otg)(
struct stream_encoder *enc);
+ bool (*dp_get_pixel_format)(
+ struct stream_encoder *enc,
+ enum dc_pixel_encoding *encoding,
+ enum dc_color_depth *depth);
+
#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
void (*enc_read_state)(struct stream_encoder *enc, struct enc_state *s);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
index 6196cc32356e..27c73caf74ee 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
@@ -261,6 +261,8 @@ struct timing_generator_funcs {
void (*program_manual_trigger)(struct timing_generator *optc);
void (*setup_manual_trigger)(struct timing_generator *optc);
+ bool (*get_hw_timing)(struct timing_generator *optc,
+ struct dc_crtc_timing *hw_crtc_timing);
void (*set_vtg_params)(struct timing_generator *optc,
const struct dc_crtc_timing *dc_crtc_timing);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
index 3a938cd414ea..d39c1e11def5 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
@@ -114,6 +114,9 @@ struct hw_sequencer_funcs {
int opp_id);
#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
+ void (*program_front_end_for_ctx)(
+ struct dc *dc,
+ struct dc_state *context);
void (*program_triplebuffer)(
const struct dc *dc,
struct pipe_ctx *pipe_ctx,
@@ -229,6 +232,13 @@ struct hw_sequencer_funcs {
struct dc *dc,
struct dc_state *context);
+ void (*exit_optimized_pwr_state)(
+ const struct dc *dc,
+ struct dc_state *context);
+ void (*optimize_pwr_state)(
+ const struct dc *dc,
+ struct dc_state *context);
+
#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
bool (*update_bandwidth)(
struct dc *dc,
@@ -321,10 +331,12 @@ struct hw_sequencer_funcs {
struct dc_state *context);
void (*update_writeback)(struct dc *dc,
const struct dc_stream_status *stream_status,
- struct dc_writeback_info *wb_info);
+ struct dc_writeback_info *wb_info,
+ struct dc_state *context);
void (*enable_writeback)(struct dc *dc,
const struct dc_stream_status *stream_status,
- struct dc_writeback_info *wb_info);
+ struct dc_writeback_info *wb_info,
+ struct dc_state *context);
void (*disable_writeback)(struct dc *dc,
unsigned int dwb_pipe_inst);
#endif
@@ -337,6 +349,9 @@ struct hw_sequencer_funcs {
enum dc_clock_type clock_type,
struct dc_clock_config *clock_cfg);
+#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
+ bool (*s0i3_golden_init_wa)(struct dc *dc);
+#endif
};
void color_space_to_black_color(
diff --git a/drivers/gpu/drm/amd/display/include/ddc_service_types.h b/drivers/gpu/drm/amd/display/include/ddc_service_types.h
index 18961707db23..9ad49da50a17 100644
--- a/drivers/gpu/drm/amd/display/include/ddc_service_types.h
+++ b/drivers/gpu/drm/amd/display/include/ddc_service_types.h
@@ -31,6 +31,8 @@
#define DP_BRANCH_DEVICE_ID_0022B9 0x0022B9
#define DP_BRANCH_DEVICE_ID_00001A 0x00001A
#define DP_BRANCH_DEVICE_ID_0080E1 0x0080e1
+#define DP_BRANCH_DEVICE_ID_90CC24 0x90CC24
+#define DP_BRANCH_DEVICE_ID_00E04C 0x00E04C
enum ddc_result {
DDC_RESULT_UNKNOWN = 0,
diff --git a/drivers/gpu/drm/amd/display/include/hdcp_types.h b/drivers/gpu/drm/amd/display/include/hdcp_types.h
new file mode 100644
index 000000000000..f31e6befc8d6
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/include/hdcp_types.h
@@ -0,0 +1,96 @@
+/*
+* Copyright 2019 Advanced Micro Devices, Inc.
+*
+* Permission is hereby granted, free of charge, to any person obtaining a
+* copy of this software and associated documentation files (the "Software"),
+* to deal in the Software without restriction, including without limitation
+* the rights to use, copy, modify, merge, publish, distribute, sublicense,
+* and/or sell copies of the Software, and to permit persons to whom the
+* Software is furnished to do so, subject to the following conditions:
+*
+* The above copyright notice and this permission notice shall be included in
+* all copies or substantial portions of the Software.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+* OTHER DEALINGS IN THE SOFTWARE.
+*
+* Authors: AMD
+*
+*/
+
+#ifndef __DC_HDCP_TYPES_H__
+#define __DC_HDCP_TYPES_H__
+
+enum hdcp_message_id {
+ HDCP_MESSAGE_ID_INVALID = -1,
+
+ /* HDCP 1.4 */
+
+ HDCP_MESSAGE_ID_READ_BKSV = 0,
+ /* HDMI is called Ri', DP is called R0' */
+ HDCP_MESSAGE_ID_READ_RI_R0,
+ HDCP_MESSAGE_ID_READ_PJ,
+ HDCP_MESSAGE_ID_WRITE_AKSV,
+ HDCP_MESSAGE_ID_WRITE_AINFO,
+ HDCP_MESSAGE_ID_WRITE_AN,
+ HDCP_MESSAGE_ID_READ_VH_X,
+ HDCP_MESSAGE_ID_READ_VH_0,
+ HDCP_MESSAGE_ID_READ_VH_1,
+ HDCP_MESSAGE_ID_READ_VH_2,
+ HDCP_MESSAGE_ID_READ_VH_3,
+ HDCP_MESSAGE_ID_READ_VH_4,
+ HDCP_MESSAGE_ID_READ_BCAPS,
+ HDCP_MESSAGE_ID_READ_BSTATUS,
+ HDCP_MESSAGE_ID_READ_KSV_FIFO,
+ HDCP_MESSAGE_ID_READ_BINFO,
+
+ /* HDCP 2.2 */
+
+ HDCP_MESSAGE_ID_HDCP2VERSION,
+ HDCP_MESSAGE_ID_RX_CAPS,
+ HDCP_MESSAGE_ID_WRITE_AKE_INIT,
+ HDCP_MESSAGE_ID_READ_AKE_SEND_CERT,
+ HDCP_MESSAGE_ID_WRITE_AKE_NO_STORED_KM,
+ HDCP_MESSAGE_ID_WRITE_AKE_STORED_KM,
+ HDCP_MESSAGE_ID_READ_AKE_SEND_H_PRIME,
+ HDCP_MESSAGE_ID_READ_AKE_SEND_PAIRING_INFO,
+ HDCP_MESSAGE_ID_WRITE_LC_INIT,
+ HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME,
+ HDCP_MESSAGE_ID_WRITE_SKE_SEND_EKS,
+ HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST,
+ HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_SEND_ACK,
+ HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_STREAM_MANAGE,
+ HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY,
+ HDCP_MESSAGE_ID_READ_RXSTATUS,
+ HDCP_MESSAGE_ID_WRITE_CONTENT_STREAM_TYPE,
+
+ HDCP_MESSAGE_ID_MAX
+};
+
+enum hdcp_version {
+ HDCP_Unknown = 0,
+ HDCP_VERSION_14,
+ HDCP_VERSION_22,
+};
+
+enum hdcp_link {
+ HDCP_LINK_PRIMARY,
+ HDCP_LINK_SECONDARY
+};
+
+struct hdcp_protection_message {
+ enum hdcp_version version;
+ /* relevant only for DVI */
+ enum hdcp_link link;
+ enum hdcp_message_id msg_id;
+ uint32_t length;
+ uint8_t max_retries;
+ uint8_t *data;
+};
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
index 2d8f14b69117..1de4805cb8c7 100644
--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
+++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
@@ -373,7 +373,42 @@ static struct fixed31_32 translate_from_linear_space(
return dc_fixpt_mul(args->arg, args->a1);
}
-static struct fixed31_32 calculate_gamma22(struct fixed31_32 arg)
+
+static struct fixed31_32 translate_from_linear_space_long(
+ struct translate_from_linear_space_args *args)
+{
+ const struct fixed31_32 one = dc_fixpt_from_int(1);
+
+ if (dc_fixpt_lt(one, args->arg))
+ return one;
+
+ if (dc_fixpt_le(args->arg, dc_fixpt_neg(args->a0)))
+ return dc_fixpt_sub(
+ args->a2,
+ dc_fixpt_mul(
+ dc_fixpt_add(
+ one,
+ args->a3),
+ dc_fixpt_pow(
+ dc_fixpt_neg(args->arg),
+ dc_fixpt_recip(args->gamma))));
+ else if (dc_fixpt_le(args->a0, args->arg))
+ return dc_fixpt_sub(
+ dc_fixpt_mul(
+ dc_fixpt_add(
+ one,
+ args->a3),
+ dc_fixpt_pow(
+ args->arg,
+ dc_fixpt_recip(args->gamma))),
+ args->a2);
+ else
+ return dc_fixpt_mul(
+ args->arg,
+ args->a1);
+}
+
+static struct fixed31_32 calculate_gamma22(struct fixed31_32 arg, bool use_eetf)
{
struct fixed31_32 gamma = dc_fixpt_from_fraction(22, 10);
@@ -384,9 +419,13 @@ static struct fixed31_32 calculate_gamma22(struct fixed31_32 arg)
scratch_gamma_args.a3 = dc_fixpt_zero;
scratch_gamma_args.gamma = gamma;
+ if (use_eetf)
+ return translate_from_linear_space_long(&scratch_gamma_args);
+
return translate_from_linear_space(&scratch_gamma_args);
}
+
static struct fixed31_32 translate_to_linear_space(
struct fixed31_32 arg,
struct fixed31_32 a0,
@@ -920,11 +959,7 @@ static bool build_freesync_hdr(struct pwl_float_data_ex *rgb_regamma,
if (fs_params->max_display < 100) // cap at 100 at the top
max_display = dc_fixpt_from_int(100);
- if (fs_params->min_content < fs_params->min_display)
- use_eetf = true;
- else
- min_content = min_display;
-
+ // only max used, we don't adjust min luminance
if (fs_params->max_content > fs_params->max_display)
use_eetf = true;
else
@@ -950,7 +985,7 @@ static bool build_freesync_hdr(struct pwl_float_data_ex *rgb_regamma,
if (dc_fixpt_lt(scaledX, dc_fixpt_zero))
output = dc_fixpt_zero;
else
- output = calculate_gamma22(scaledX);
+ output = calculate_gamma22(scaledX, use_eetf);
rgb->r = output;
rgb->g = output;
@@ -2173,5 +2208,3 @@ bool mod_color_calculate_degamma_curve(enum dc_transfer_func_predefined trans,
rgb_degamma_alloc_fail:
return ret;
}
-
-
diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
index ec70c9b12e1a..16e69bbc69aa 100644
--- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
@@ -37,8 +37,8 @@
#define STATIC_SCREEN_RAMP_DELTA_REFRESH_RATE_PER_FRAME ((1000 / 60) * 65)
/* Number of elements in the render times cache array */
#define RENDER_TIMES_MAX_COUNT 10
-/* Threshold to exit BTR (to avoid frequent enter-exits at the lower limit) */
-#define BTR_EXIT_MARGIN 2000
+/* Threshold to exit/exit BTR (to avoid frequent enter-exits at the lower limit) */
+#define BTR_MAX_MARGIN 2500
/* Threshold to change BTR multiplier (to avoid frequent changes) */
#define BTR_DRIFT_MARGIN 2000
/*Threshold to exit fixed refresh rate*/
@@ -234,6 +234,10 @@ static void update_v_total_for_static_ramp(
current_duration_in_us) * (stream->timing.pix_clk_100hz / 10)),
stream->timing.h_total), 1000);
+ /* v_total cannot be less than nominal */
+ if (v_total < stream->timing.v_total)
+ v_total = stream->timing.v_total;
+
in_out_vrr->adjust.v_total_min = v_total;
in_out_vrr->adjust.v_total_max = v_total;
}
@@ -250,24 +254,22 @@ static void apply_below_the_range(struct core_freesync *core_freesync,
unsigned int delta_from_mid_point_in_us_1 = 0xFFFFFFFF;
unsigned int delta_from_mid_point_in_us_2 = 0xFFFFFFFF;
unsigned int frames_to_insert = 0;
- unsigned int min_frame_duration_in_ns = 0;
- unsigned int max_render_time_in_us = in_out_vrr->max_duration_in_us;
unsigned int delta_from_mid_point_delta_in_us;
-
- min_frame_duration_in_ns = ((unsigned int) (div64_u64(
- (1000000000ULL * 1000000),
- in_out_vrr->max_refresh_in_uhz)));
+ unsigned int max_render_time_in_us =
+ in_out_vrr->max_duration_in_us - in_out_vrr->btr.margin_in_us;
/* Program BTR */
- if (last_render_time_in_us + BTR_EXIT_MARGIN < max_render_time_in_us) {
+ if ((last_render_time_in_us + in_out_vrr->btr.margin_in_us / 2) < max_render_time_in_us) {
/* Exit Below the Range */
if (in_out_vrr->btr.btr_active) {
in_out_vrr->btr.frame_counter = 0;
in_out_vrr->btr.btr_active = false;
}
- } else if (last_render_time_in_us > max_render_time_in_us) {
+ } else if (last_render_time_in_us > (max_render_time_in_us + in_out_vrr->btr.margin_in_us / 2)) {
/* Enter Below the Range */
- in_out_vrr->btr.btr_active = true;
+ if (!in_out_vrr->btr.btr_active) {
+ in_out_vrr->btr.btr_active = true;
+ }
}
/* BTR set to "not active" so disengage */
@@ -323,7 +325,9 @@ static void apply_below_the_range(struct core_freesync *core_freesync,
/* Choose number of frames to insert based on how close it
* can get to the mid point of the variable range.
*/
- if (delta_from_mid_point_in_us_1 < delta_from_mid_point_in_us_2) {
+ if ((frame_time_in_us / mid_point_frames_ceil) > in_out_vrr->min_duration_in_us &&
+ (delta_from_mid_point_in_us_1 < delta_from_mid_point_in_us_2 ||
+ mid_point_frames_floor < 2)) {
frames_to_insert = mid_point_frames_ceil;
delta_from_mid_point_delta_in_us = delta_from_mid_point_in_us_2 -
delta_from_mid_point_in_us_1;
@@ -339,7 +343,7 @@ static void apply_below_the_range(struct core_freesync *core_freesync,
if (in_out_vrr->btr.frames_to_insert != 0 &&
delta_from_mid_point_delta_in_us < BTR_DRIFT_MARGIN) {
if (((last_render_time_in_us / in_out_vrr->btr.frames_to_insert) <
- in_out_vrr->max_duration_in_us) &&
+ max_render_time_in_us) &&
((last_render_time_in_us / in_out_vrr->btr.frames_to_insert) >
in_out_vrr->min_duration_in_us))
frames_to_insert = in_out_vrr->btr.frames_to_insert;
@@ -743,6 +747,10 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,
nominal_field_rate_in_uhz =
mod_freesync_calc_nominal_field_rate(stream);
+ /* Rounded to the nearest Hz */
+ nominal_field_rate_in_uhz = 1000000ULL *
+ div_u64(nominal_field_rate_in_uhz + 500000, 1000000);
+
min_refresh_in_uhz = in_config->min_refresh_in_uhz;
max_refresh_in_uhz = in_config->max_refresh_in_uhz;
@@ -788,6 +796,11 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,
refresh_range = in_out_vrr->max_refresh_in_uhz -
in_out_vrr->min_refresh_in_uhz;
+ in_out_vrr->btr.margin_in_us = in_out_vrr->max_duration_in_us -
+ 2 * in_out_vrr->min_duration_in_us;
+ if (in_out_vrr->btr.margin_in_us > BTR_MAX_MARGIN)
+ in_out_vrr->btr.margin_in_us = BTR_MAX_MARGIN;
+
in_out_vrr->supported = true;
}
@@ -803,6 +816,7 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,
in_out_vrr->btr.inserted_duration_in_us = 0;
in_out_vrr->btr.frames_to_insert = 0;
in_out_vrr->btr.frame_counter = 0;
+
in_out_vrr->btr.mid_point_in_us =
(in_out_vrr->min_duration_in_us +
in_out_vrr->max_duration_in_us) / 2;
@@ -975,13 +989,9 @@ void mod_freesync_get_settings(struct mod_freesync *mod_freesync,
unsigned int *inserted_frames,
unsigned int *inserted_duration_in_us)
{
- struct core_freesync *core_freesync = NULL;
-
if (mod_freesync == NULL)
return;
- core_freesync = MOD_FREESYNC_TO_CORE(mod_freesync);
-
if (vrr->supported) {
*v_total_min = vrr->adjust.v_total_min;
*v_total_max = vrr->adjust.v_total_max;
@@ -996,14 +1006,13 @@ unsigned long long mod_freesync_calc_nominal_field_rate(
const struct dc_stream_state *stream)
{
unsigned long long nominal_field_rate_in_uhz = 0;
+ unsigned int total = stream->timing.h_total * stream->timing.v_total;
- /* Calculate nominal field rate for stream */
+ /* Calculate nominal field rate for stream, rounded up to nearest integer */
nominal_field_rate_in_uhz = stream->timing.pix_clk_100hz / 10;
nominal_field_rate_in_uhz *= 1000ULL * 1000ULL * 1000ULL;
- nominal_field_rate_in_uhz = div_u64(nominal_field_rate_in_uhz,
- stream->timing.h_total);
- nominal_field_rate_in_uhz = div_u64(nominal_field_rate_in_uhz,
- stream->timing.v_total);
+
+ nominal_field_rate_in_uhz = div_u64(nominal_field_rate_in_uhz, total);
return nominal_field_rate_in_uhz;
}
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/Makefile b/drivers/gpu/drm/amd/display/modules/hdcp/Makefile
new file mode 100644
index 000000000000..1c3c6d47973a
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/Makefile
@@ -0,0 +1,32 @@
+#
+# Copyright 2019 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+#
+# Makefile for the 'hdcp' sub-module of DAL.
+#
+
+HDCP = hdcp_ddc.o hdcp_log.o hdcp_psp.o hdcp.o \
+ hdcp1_execution.o hdcp1_transition.o
+
+AMD_DAL_HDCP = $(addprefix $(AMDDALPATH)/modules/hdcp/,$(HDCP))
+#$(info ************ DAL-HDCP_MAKEFILE ************)
+
+AMD_DISPLAY_FILES += $(AMD_DAL_HDCP)
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c
new file mode 100644
index 000000000000..d7ac445dec6f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c
@@ -0,0 +1,426 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "hdcp.h"
+
+static void push_error_status(struct mod_hdcp *hdcp,
+ enum mod_hdcp_status status)
+{
+ struct mod_hdcp_trace *trace = &hdcp->connection.trace;
+
+ if (trace->error_count < MAX_NUM_OF_ERROR_TRACE) {
+ trace->errors[trace->error_count].status = status;
+ trace->errors[trace->error_count].state_id = hdcp->state.id;
+ trace->error_count++;
+ HDCP_ERROR_TRACE(hdcp, status);
+ }
+
+ hdcp->connection.hdcp1_retry_count++;
+}
+
+static uint8_t is_cp_desired_hdcp1(struct mod_hdcp *hdcp)
+{
+ int i, display_enabled = 0;
+
+ /* if all displays on the link are disabled, hdcp is not desired */
+ for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) {
+ if (hdcp->connection.displays[i].state != MOD_HDCP_DISPLAY_INACTIVE &&
+ !hdcp->connection.displays[i].adjust.disable) {
+ display_enabled = 1;
+ break;
+ }
+ }
+
+ return (hdcp->connection.hdcp1_retry_count < MAX_NUM_OF_ATTEMPTS) &&
+ display_enabled && !hdcp->connection.link.adjust.hdcp1.disable;
+}
+
+static enum mod_hdcp_status execution(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ union mod_hdcp_transition_input *input)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ if (is_in_initialized_state(hdcp)) {
+ if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) {
+ event_ctx->unexpected_event = 1;
+ goto out;
+ }
+ /* initialize transition input */
+ memset(input, 0, sizeof(union mod_hdcp_transition_input));
+ } else if (is_in_cp_not_desired_state(hdcp)) {
+ if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) {
+ event_ctx->unexpected_event = 1;
+ goto out;
+ }
+ /* update topology event if hdcp is not desired */
+ status = mod_hdcp_add_display_topology(hdcp);
+ } else if (is_in_hdcp1_states(hdcp)) {
+ status = mod_hdcp_hdcp1_execution(hdcp, event_ctx, &input->hdcp1);
+ } else if (is_in_hdcp1_dp_states(hdcp)) {
+ status = mod_hdcp_hdcp1_dp_execution(hdcp,
+ event_ctx, &input->hdcp1);
+ }
+out:
+ return status;
+}
+
+static enum mod_hdcp_status transition(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ union mod_hdcp_transition_input *input,
+ struct mod_hdcp_output *output)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ if (event_ctx->unexpected_event)
+ goto out;
+
+ if (is_in_initialized_state(hdcp)) {
+ if (is_dp_hdcp(hdcp))
+ if (is_cp_desired_hdcp1(hdcp)) {
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, D1_A0_DETERMINE_RX_HDCP_CAPABLE);
+ } else {
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, HDCP_CP_NOT_DESIRED);
+ }
+ else if (is_hdmi_dvi_sl_hdcp(hdcp))
+ if (is_cp_desired_hdcp1(hdcp)) {
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, H1_A0_WAIT_FOR_ACTIVE_RX);
+ } else {
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, HDCP_CP_NOT_DESIRED);
+ }
+ else {
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, HDCP_CP_NOT_DESIRED);
+ }
+ } else if (is_in_cp_not_desired_state(hdcp)) {
+ increment_stay_counter(hdcp);
+ } else if (is_in_hdcp1_states(hdcp)) {
+ status = mod_hdcp_hdcp1_transition(hdcp,
+ event_ctx, &input->hdcp1, output);
+ } else if (is_in_hdcp1_dp_states(hdcp)) {
+ status = mod_hdcp_hdcp1_dp_transition(hdcp,
+ event_ctx, &input->hdcp1, output);
+ } else {
+ status = MOD_HDCP_STATUS_INVALID_STATE;
+ }
+out:
+ return status;
+}
+
+static enum mod_hdcp_status reset_authentication(struct mod_hdcp *hdcp,
+ struct mod_hdcp_output *output)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ if (is_hdcp1(hdcp)) {
+ if (hdcp->auth.trans_input.hdcp1.create_session != UNKNOWN)
+ mod_hdcp_hdcp1_destroy_session(hdcp);
+
+ if (hdcp->auth.trans_input.hdcp1.add_topology == PASS) {
+ status = mod_hdcp_remove_display_topology(hdcp);
+ if (status != MOD_HDCP_STATUS_SUCCESS) {
+ output->callback_needed = 0;
+ output->watchdog_timer_needed = 0;
+ goto out;
+ }
+ }
+ HDCP_TOP_RESET_AUTH_TRACE(hdcp);
+ memset(&hdcp->auth, 0, sizeof(struct mod_hdcp_authentication));
+ memset(&hdcp->state, 0, sizeof(struct mod_hdcp_state));
+ set_state_id(hdcp, output, HDCP_INITIALIZED);
+ } else if (is_in_cp_not_desired_state(hdcp)) {
+ status = mod_hdcp_remove_display_topology(hdcp);
+ if (status != MOD_HDCP_STATUS_SUCCESS) {
+ output->callback_needed = 0;
+ output->watchdog_timer_needed = 0;
+ goto out;
+ }
+ HDCP_TOP_RESET_AUTH_TRACE(hdcp);
+ memset(&hdcp->auth, 0, sizeof(struct mod_hdcp_authentication));
+ memset(&hdcp->state, 0, sizeof(struct mod_hdcp_state));
+ set_state_id(hdcp, output, HDCP_INITIALIZED);
+ }
+
+out:
+ /* stop callback and watchdog requests from previous authentication*/
+ output->watchdog_timer_stop = 1;
+ output->callback_stop = 1;
+ return status;
+}
+
+static enum mod_hdcp_status reset_connection(struct mod_hdcp *hdcp,
+ struct mod_hdcp_output *output)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ memset(output, 0, sizeof(struct mod_hdcp_output));
+
+ status = reset_authentication(hdcp, output);
+ if (status != MOD_HDCP_STATUS_SUCCESS)
+ goto out;
+
+ if (current_state(hdcp) != HDCP_UNINITIALIZED) {
+ HDCP_TOP_RESET_CONN_TRACE(hdcp);
+ set_state_id(hdcp, output, HDCP_UNINITIALIZED);
+ }
+ memset(&hdcp->connection, 0, sizeof(hdcp->connection));
+out:
+ return status;
+}
+
+/*
+ * Implementation of functions in mod_hdcp.h
+ */
+size_t mod_hdcp_get_memory_size(void)
+{
+ return sizeof(struct mod_hdcp);
+}
+
+enum mod_hdcp_status mod_hdcp_setup(struct mod_hdcp *hdcp,
+ struct mod_hdcp_config *config)
+{
+ struct mod_hdcp_output output;
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ memset(hdcp, 0, sizeof(struct mod_hdcp));
+ memset(&output, 0, sizeof(output));
+ hdcp->config = *config;
+ HDCP_TOP_INTERFACE_TRACE(hdcp);
+ status = reset_connection(hdcp, &output);
+ if (status != MOD_HDCP_STATUS_SUCCESS)
+ push_error_status(hdcp, status);
+ return status;
+}
+
+enum mod_hdcp_status mod_hdcp_teardown(struct mod_hdcp *hdcp)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+ struct mod_hdcp_output output;
+
+ HDCP_TOP_INTERFACE_TRACE(hdcp);
+ memset(&output, 0, sizeof(output));
+ status = reset_connection(hdcp, &output);
+ if (status == MOD_HDCP_STATUS_SUCCESS)
+ memset(hdcp, 0, sizeof(struct mod_hdcp));
+ else
+ push_error_status(hdcp, status);
+ return status;
+}
+
+enum mod_hdcp_status mod_hdcp_add_display(struct mod_hdcp *hdcp,
+ struct mod_hdcp_link *link, struct mod_hdcp_display *display,
+ struct mod_hdcp_output *output)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+ struct mod_hdcp_display *display_container = NULL;
+
+ HDCP_TOP_INTERFACE_TRACE_WITH_INDEX(hdcp, display->index);
+ memset(output, 0, sizeof(struct mod_hdcp_output));
+
+ /* skip inactive display */
+ if (display->state != MOD_HDCP_DISPLAY_ACTIVE) {
+ status = MOD_HDCP_STATUS_SUCCESS;
+ goto out;
+ }
+
+ /* check existing display container */
+ if (get_active_display_at_index(hdcp, display->index)) {
+ status = MOD_HDCP_STATUS_SUCCESS;
+ goto out;
+ }
+
+ /* find an empty display container */
+ display_container = get_empty_display_container(hdcp);
+ if (!display_container) {
+ status = MOD_HDCP_STATUS_DISPLAY_OUT_OF_BOUND;
+ goto out;
+ }
+
+ /* reset existing authentication status */
+ status = reset_authentication(hdcp, output);
+ if (status != MOD_HDCP_STATUS_SUCCESS)
+ goto out;
+
+ /* add display to connection */
+ hdcp->connection.link = *link;
+ *display_container = *display;
+
+ /* reset retry counters */
+ reset_retry_counts(hdcp);
+
+ /* reset error trace */
+ memset(&hdcp->connection.trace, 0, sizeof(hdcp->connection.trace));
+
+ /* request authentication */
+ if (current_state(hdcp) != HDCP_INITIALIZED)
+ set_state_id(hdcp, output, HDCP_INITIALIZED);
+ callback_in_ms(hdcp->connection.link.adjust.auth_delay * 1000, output);
+out:
+ if (status != MOD_HDCP_STATUS_SUCCESS)
+ push_error_status(hdcp, status);
+
+ return status;
+}
+
+enum mod_hdcp_status mod_hdcp_remove_display(struct mod_hdcp *hdcp,
+ uint8_t index, struct mod_hdcp_output *output)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+ struct mod_hdcp_display *display = NULL;
+
+ HDCP_TOP_INTERFACE_TRACE_WITH_INDEX(hdcp, index);
+ memset(output, 0, sizeof(struct mod_hdcp_output));
+
+ /* find display in connection */
+ display = get_active_display_at_index(hdcp, index);
+ if (!display) {
+ status = MOD_HDCP_STATUS_SUCCESS;
+ goto out;
+ }
+
+ /* stop current authentication */
+ status = reset_authentication(hdcp, output);
+ if (status != MOD_HDCP_STATUS_SUCCESS)
+ goto out;
+
+ /* remove display */
+ display->state = MOD_HDCP_DISPLAY_INACTIVE;
+
+ /* clear retry counters */
+ reset_retry_counts(hdcp);
+
+ /* reset error trace */
+ memset(&hdcp->connection.trace, 0, sizeof(hdcp->connection.trace));
+
+ /* request authentication for remaining displays*/
+ if (get_active_display_count(hdcp) > 0)
+ callback_in_ms(hdcp->connection.link.adjust.auth_delay * 1000,
+ output);
+out:
+ if (status != MOD_HDCP_STATUS_SUCCESS)
+ push_error_status(hdcp, status);
+ return status;
+}
+
+enum mod_hdcp_status mod_hdcp_query_display(struct mod_hdcp *hdcp,
+ uint8_t index, struct mod_hdcp_display_query *query)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+ struct mod_hdcp_display *display = NULL;
+
+ /* find display in connection */
+ display = get_active_display_at_index(hdcp, index);
+ if (!display) {
+ status = MOD_HDCP_STATUS_DISPLAY_NOT_FOUND;
+ goto out;
+ }
+
+ /* populate query */
+ query->link = &hdcp->connection.link;
+ query->display = display;
+ query->trace = &hdcp->connection.trace;
+ query->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
+
+ mod_hdcp_hdcp1_get_link_encryption_status(hdcp, &query->encryption_status);
+
+out:
+ return status;
+}
+
+enum mod_hdcp_status mod_hdcp_reset_connection(struct mod_hdcp *hdcp,
+ struct mod_hdcp_output *output)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ HDCP_TOP_INTERFACE_TRACE(hdcp);
+ status = reset_connection(hdcp, output);
+ if (status != MOD_HDCP_STATUS_SUCCESS)
+ push_error_status(hdcp, status);
+
+ return status;
+}
+
+enum mod_hdcp_status mod_hdcp_process_event(struct mod_hdcp *hdcp,
+ enum mod_hdcp_event event, struct mod_hdcp_output *output)
+{
+ enum mod_hdcp_status exec_status, trans_status, reset_status, status;
+ struct mod_hdcp_event_context event_ctx;
+
+ HDCP_EVENT_TRACE(hdcp, event);
+ memset(output, 0, sizeof(struct mod_hdcp_output));
+ memset(&event_ctx, 0, sizeof(struct mod_hdcp_event_context));
+ event_ctx.event = event;
+
+ /* execute and transition */
+ exec_status = execution(hdcp, &event_ctx, &hdcp->auth.trans_input);
+ trans_status = transition(
+ hdcp, &event_ctx, &hdcp->auth.trans_input, output);
+ if (trans_status == MOD_HDCP_STATUS_SUCCESS) {
+ status = MOD_HDCP_STATUS_SUCCESS;
+ } else if (exec_status == MOD_HDCP_STATUS_SUCCESS) {
+ status = MOD_HDCP_STATUS_INTERNAL_POLICY_FAILURE;
+ push_error_status(hdcp, status);
+ } else {
+ status = exec_status;
+ push_error_status(hdcp, status);
+ }
+
+ /* reset authentication if needed */
+ if (trans_status == MOD_HDCP_STATUS_RESET_NEEDED) {
+ HDCP_FULL_DDC_TRACE(hdcp);
+ reset_status = reset_authentication(hdcp, output);
+ if (reset_status != MOD_HDCP_STATUS_SUCCESS)
+ push_error_status(hdcp, reset_status);
+ }
+ return status;
+}
+
+enum mod_hdcp_operation_mode mod_hdcp_signal_type_to_operation_mode(
+ enum signal_type signal)
+{
+ enum mod_hdcp_operation_mode mode = MOD_HDCP_MODE_OFF;
+
+ switch (signal) {
+ case SIGNAL_TYPE_DVI_SINGLE_LINK:
+ case SIGNAL_TYPE_HDMI_TYPE_A:
+ mode = MOD_HDCP_MODE_DEFAULT;
+ break;
+ case SIGNAL_TYPE_EDP:
+ case SIGNAL_TYPE_DISPLAY_PORT:
+ mode = MOD_HDCP_MODE_DP;
+ break;
+ case SIGNAL_TYPE_DISPLAY_PORT_MST:
+ mode = MOD_HDCP_MODE_DP_MST;
+ break;
+ default:
+ break;
+ };
+
+ return mode;
+}
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
new file mode 100644
index 000000000000..5664bc0b5bd0
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
@@ -0,0 +1,442 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef HDCP_H_
+#define HDCP_H_
+
+#include "mod_hdcp.h"
+#include "hdcp_log.h"
+
+#define BCAPS_READY_MASK 0x20
+#define BCAPS_REPEATER_MASK 0x40
+#define BSTATUS_DEVICE_COUNT_MASK 0X007F
+#define BSTATUS_MAX_DEVS_EXCEEDED_MASK 0x0080
+#define BSTATUS_MAX_CASCADE_EXCEEDED_MASK 0x0800
+#define BCAPS_HDCP_CAPABLE_MASK_DP 0x01
+#define BCAPS_REPEATER_MASK_DP 0x02
+#define BSTATUS_READY_MASK_DP 0x01
+#define BSTATUS_R0_P_AVAILABLE_MASK_DP 0x02
+#define BSTATUS_LINK_INTEGRITY_FAILURE_MASK_DP 0x04
+#define BSTATUS_REAUTH_REQUEST_MASK_DP 0x08
+#define BINFO_DEVICE_COUNT_MASK_DP 0X007F
+#define BINFO_MAX_DEVS_EXCEEDED_MASK_DP 0x0080
+#define BINFO_MAX_CASCADE_EXCEEDED_MASK_DP 0x0800
+
+#define RXSTATUS_MSG_SIZE_MASK 0x03FF
+#define RXSTATUS_READY_MASK 0x0400
+#define RXSTATUS_REAUTH_REQUEST_MASK 0x0800
+#define RXIDLIST_DEVICE_COUNT_LOWER_MASK 0xf0
+#define RXIDLIST_DEVICE_COUNT_UPPER_MASK 0x01
+#define RXCAPS_BYTE0_HDCP_CAPABLE_MASK_DP 0x02
+#define RXSTATUS_READY_MASK_DP 0x0001
+#define RXSTATUS_H_P_AVAILABLE_MASK_DP 0x0002
+#define RXSTATUS_PAIRING_AVAILABLE_MASK_DP 0x0004
+#define RXSTATUS_REAUTH_REQUEST_MASK_DP 0x0008
+#define RXSTATUS_LINK_INTEGRITY_FAILURE_MASK_DP 0x0010
+
+enum mod_hdcp_trans_input_result {
+ UNKNOWN = 0,
+ PASS,
+ FAIL
+};
+
+struct mod_hdcp_transition_input_hdcp1 {
+ uint8_t bksv_read;
+ uint8_t bksv_validation;
+ uint8_t add_topology;
+ uint8_t create_session;
+ uint8_t an_write;
+ uint8_t aksv_write;
+ uint8_t ainfo_write;
+ uint8_t bcaps_read;
+ uint8_t r0p_read;
+ uint8_t rx_validation;
+ uint8_t encryption;
+ uint8_t link_maintenance;
+ uint8_t ready_check;
+ uint8_t bstatus_read;
+ uint8_t max_cascade_check;
+ uint8_t max_devs_check;
+ uint8_t device_count_check;
+ uint8_t ksvlist_read;
+ uint8_t vp_read;
+ uint8_t ksvlist_vp_validation;
+
+ uint8_t hdcp_capable_dp;
+ uint8_t binfo_read_dp;
+ uint8_t r0p_available_dp;
+ uint8_t link_integiry_check;
+ uint8_t reauth_request_check;
+ uint8_t stream_encryption_dp;
+};
+
+union mod_hdcp_transition_input {
+ struct mod_hdcp_transition_input_hdcp1 hdcp1;
+};
+
+struct mod_hdcp_message_hdcp1 {
+ uint8_t an[8];
+ uint8_t aksv[5];
+ uint8_t ainfo;
+ uint8_t bksv[5];
+ uint16_t r0p;
+ uint8_t bcaps;
+ uint16_t bstatus;
+ uint8_t ksvlist[635];
+ uint16_t ksvlist_size;
+ uint8_t vp[20];
+
+ uint16_t binfo_dp;
+};
+
+union mod_hdcp_message {
+ struct mod_hdcp_message_hdcp1 hdcp1;
+};
+
+struct mod_hdcp_auth_counters {
+ uint8_t stream_management_retry_count;
+};
+
+/* contains values per connection */
+struct mod_hdcp_connection {
+ struct mod_hdcp_link link;
+ struct mod_hdcp_display displays[MAX_NUM_OF_DISPLAYS];
+ uint8_t is_repeater;
+ uint8_t is_km_stored;
+ struct mod_hdcp_trace trace;
+ uint8_t hdcp1_retry_count;
+};
+
+/* contains values per authentication cycle */
+struct mod_hdcp_authentication {
+ uint32_t id;
+ union mod_hdcp_message msg;
+ union mod_hdcp_transition_input trans_input;
+ struct mod_hdcp_auth_counters count;
+};
+
+/* contains values per state change */
+struct mod_hdcp_state {
+ uint8_t id;
+ uint32_t stay_count;
+};
+
+/* per event in a state */
+struct mod_hdcp_event_context {
+ enum mod_hdcp_event event;
+ uint8_t rx_id_list_ready;
+ uint8_t unexpected_event;
+};
+
+struct mod_hdcp {
+ /* per link */
+ struct mod_hdcp_config config;
+ /* per connection */
+ struct mod_hdcp_connection connection;
+ /* per authentication attempt */
+ struct mod_hdcp_authentication auth;
+ /* per state in an authentication */
+ struct mod_hdcp_state state;
+ /* reserved memory buffer */
+ uint8_t buf[2025];
+};
+
+enum mod_hdcp_initial_state_id {
+ HDCP_UNINITIALIZED = 0x0,
+ HDCP_INITIAL_STATE_START = HDCP_UNINITIALIZED,
+ HDCP_INITIALIZED,
+ HDCP_CP_NOT_DESIRED,
+ HDCP_INITIAL_STATE_END = HDCP_CP_NOT_DESIRED
+};
+
+enum mod_hdcp_hdcp1_state_id {
+ HDCP1_STATE_START = HDCP_INITIAL_STATE_END,
+ H1_A0_WAIT_FOR_ACTIVE_RX,
+ H1_A1_EXCHANGE_KSVS,
+ H1_A2_COMPUTATIONS_A3_VALIDATE_RX_A6_TEST_FOR_REPEATER,
+ H1_A45_AUTHENTICATED,
+ H1_A8_WAIT_FOR_READY,
+ H1_A9_READ_KSV_LIST,
+ HDCP1_STATE_END = H1_A9_READ_KSV_LIST
+};
+
+enum mod_hdcp_hdcp1_dp_state_id {
+ HDCP1_DP_STATE_START = HDCP1_STATE_END,
+ D1_A0_DETERMINE_RX_HDCP_CAPABLE,
+ D1_A1_EXCHANGE_KSVS,
+ D1_A23_WAIT_FOR_R0_PRIME,
+ D1_A2_COMPUTATIONS_A3_VALIDATE_RX_A5_TEST_FOR_REPEATER,
+ D1_A4_AUTHENTICATED,
+ D1_A6_WAIT_FOR_READY,
+ D1_A7_READ_KSV_LIST,
+ HDCP1_DP_STATE_END = D1_A7_READ_KSV_LIST,
+};
+
+/* hdcp1 executions and transitions */
+typedef enum mod_hdcp_status (*mod_hdcp_action)(struct mod_hdcp *hdcp);
+uint8_t mod_hdcp_execute_and_set(
+ mod_hdcp_action func, uint8_t *flag,
+ enum mod_hdcp_status *status, struct mod_hdcp *hdcp, char *str);
+enum mod_hdcp_status mod_hdcp_hdcp1_execution(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp1 *input);
+enum mod_hdcp_status mod_hdcp_hdcp1_dp_execution(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp1 *input);
+enum mod_hdcp_status mod_hdcp_hdcp1_transition(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp1 *input,
+ struct mod_hdcp_output *output);
+enum mod_hdcp_status mod_hdcp_hdcp1_dp_transition(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp1 *input,
+ struct mod_hdcp_output *output);
+
+/* log functions */
+void mod_hdcp_dump_binary_message(uint8_t *msg, uint32_t msg_size,
+ uint8_t *buf, uint32_t buf_size);
+/* TODO: add adjustment log */
+
+/* psp functions */
+enum mod_hdcp_status mod_hdcp_add_display_topology(
+ struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_remove_display_topology(
+ struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_hdcp1_create_session(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_hdcp1_destroy_session(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_hdcp1_validate_rx(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_hdcp1_enable_encryption(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_hdcp1_validate_ksvlist_vp(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_hdcp1_enable_dp_stream_encryption(
+ struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_hdcp1_link_maintenance(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_hdcp1_get_link_encryption_status(struct mod_hdcp *hdcp,
+ enum mod_hdcp_encryption_status *encryption_status);
+/* ddc functions */
+enum mod_hdcp_status mod_hdcp_read_bksv(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_read_bcaps(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_read_bstatus(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_read_r0p(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_read_ksvlist(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_read_vp(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_read_binfo(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_write_aksv(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_write_ainfo(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_write_an(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_read_rxcaps(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_read_rxstatus(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_read_ake_cert(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_read_h_prime(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_read_pairing_info(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_read_l_prime(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_read_rx_id_list(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_read_stream_ready(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_write_ake_init(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_write_no_stored_km(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_write_stored_km(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_write_lc_init(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_write_eks(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_write_repeater_auth_ack(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_write_stream_manage(struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_write_content_type(struct mod_hdcp *hdcp);
+
+/* hdcp version helpers */
+static inline uint8_t is_dp_hdcp(struct mod_hdcp *hdcp)
+{
+ return (hdcp->connection.link.mode == MOD_HDCP_MODE_DP ||
+ hdcp->connection.link.mode == MOD_HDCP_MODE_DP_MST);
+}
+
+static inline uint8_t is_dp_mst_hdcp(struct mod_hdcp *hdcp)
+{
+ return (hdcp->connection.link.mode == MOD_HDCP_MODE_DP_MST);
+}
+
+static inline uint8_t is_hdmi_dvi_sl_hdcp(struct mod_hdcp *hdcp)
+{
+ return (hdcp->connection.link.mode == MOD_HDCP_MODE_DEFAULT);
+}
+
+/* hdcp state helpers */
+static inline uint8_t current_state(struct mod_hdcp *hdcp)
+{
+ return hdcp->state.id;
+}
+
+static inline void set_state_id(struct mod_hdcp *hdcp,
+ struct mod_hdcp_output *output, uint8_t id)
+{
+ memset(&hdcp->state, 0, sizeof(hdcp->state));
+ hdcp->state.id = id;
+ /* callback timer should be reset per state */
+ output->callback_stop = 1;
+ output->watchdog_timer_stop = 1;
+ HDCP_NEXT_STATE_TRACE(hdcp, id, output);
+}
+
+static inline uint8_t is_in_hdcp1_states(struct mod_hdcp *hdcp)
+{
+ return (current_state(hdcp) > HDCP1_STATE_START &&
+ current_state(hdcp) <= HDCP1_STATE_END);
+}
+
+static inline uint8_t is_in_hdcp1_dp_states(struct mod_hdcp *hdcp)
+{
+ return (current_state(hdcp) > HDCP1_DP_STATE_START &&
+ current_state(hdcp) <= HDCP1_DP_STATE_END);
+}
+
+static inline uint8_t is_hdcp1(struct mod_hdcp *hdcp)
+{
+ return (is_in_hdcp1_states(hdcp) || is_in_hdcp1_dp_states(hdcp));
+}
+
+static inline uint8_t is_in_cp_not_desired_state(struct mod_hdcp *hdcp)
+{
+ return current_state(hdcp) == HDCP_CP_NOT_DESIRED;
+}
+
+static inline uint8_t is_in_initialized_state(struct mod_hdcp *hdcp)
+{
+ return current_state(hdcp) == HDCP_INITIALIZED;
+}
+
+/* transition operation helpers */
+static inline void increment_stay_counter(struct mod_hdcp *hdcp)
+{
+ hdcp->state.stay_count++;
+}
+
+static inline void fail_and_restart_in_ms(uint16_t time,
+ enum mod_hdcp_status *status,
+ struct mod_hdcp_output *output)
+{
+ output->callback_needed = 1;
+ output->callback_delay = time;
+ output->watchdog_timer_needed = 0;
+ output->watchdog_timer_delay = 0;
+ *status = MOD_HDCP_STATUS_RESET_NEEDED;
+}
+
+static inline void callback_in_ms(uint16_t time, struct mod_hdcp_output *output)
+{
+ output->callback_needed = 1;
+ output->callback_delay = time;
+}
+
+static inline void set_watchdog_in_ms(struct mod_hdcp *hdcp, uint16_t time,
+ struct mod_hdcp_output *output)
+{
+ output->watchdog_timer_needed = 1;
+ output->watchdog_timer_delay = time;
+}
+
+/* connection topology helpers */
+static inline uint8_t is_display_active(struct mod_hdcp_display *display)
+{
+ return display->state >= MOD_HDCP_DISPLAY_ACTIVE;
+}
+
+static inline uint8_t is_display_added(struct mod_hdcp_display *display)
+{
+ return display->state >= MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED;
+}
+
+static inline uint8_t is_display_encryption_enabled(struct mod_hdcp_display *display)
+{
+ return display->state >= MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED;
+}
+
+static inline uint8_t get_active_display_count(struct mod_hdcp *hdcp)
+{
+ uint8_t added_count = 0;
+ uint8_t i;
+
+ for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++)
+ if (is_display_active(&hdcp->connection.displays[i]))
+ added_count++;
+ return added_count;
+}
+
+static inline uint8_t get_added_display_count(struct mod_hdcp *hdcp)
+{
+ uint8_t added_count = 0;
+ uint8_t i;
+
+ for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++)
+ if (is_display_added(&hdcp->connection.displays[i]))
+ added_count++;
+ return added_count;
+}
+
+static inline struct mod_hdcp_display *get_first_added_display(
+ struct mod_hdcp *hdcp)
+{
+ uint8_t i;
+ struct mod_hdcp_display *display = NULL;
+
+ for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++)
+ if (is_display_added(&hdcp->connection.displays[i])) {
+ display = &hdcp->connection.displays[i];
+ break;
+ }
+ return display;
+}
+
+static inline struct mod_hdcp_display *get_active_display_at_index(
+ struct mod_hdcp *hdcp, uint8_t index)
+{
+ uint8_t i;
+ struct mod_hdcp_display *display = NULL;
+
+ for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++)
+ if (hdcp->connection.displays[i].index == index &&
+ is_display_active(&hdcp->connection.displays[i])) {
+ display = &hdcp->connection.displays[i];
+ break;
+ }
+ return display;
+}
+
+static inline struct mod_hdcp_display *get_empty_display_container(
+ struct mod_hdcp *hdcp)
+{
+ uint8_t i;
+ struct mod_hdcp_display *display = NULL;
+
+ for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++)
+ if (!is_display_active(&hdcp->connection.displays[i])) {
+ display = &hdcp->connection.displays[i];
+ break;
+ }
+ return display;
+}
+
+static inline void reset_retry_counts(struct mod_hdcp *hdcp)
+{
+ hdcp->connection.hdcp1_retry_count = 0;
+}
+
+#endif /* HDCP_H_ */
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
new file mode 100644
index 000000000000..3db4a7da414f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
@@ -0,0 +1,531 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "hdcp.h"
+
+static inline enum mod_hdcp_status validate_bksv(struct mod_hdcp *hdcp)
+{
+ uint64_t n = *(uint64_t *)hdcp->auth.msg.hdcp1.bksv;
+ uint8_t count = 0;
+
+ while (n) {
+ count++;
+ n &= (n - 1);
+ }
+ return (count == 20) ? MOD_HDCP_STATUS_SUCCESS :
+ MOD_HDCP_STATUS_HDCP1_INVALID_BKSV;
+}
+
+static inline enum mod_hdcp_status check_ksv_ready(struct mod_hdcp *hdcp)
+{
+ if (is_dp_hdcp(hdcp))
+ return (hdcp->auth.msg.hdcp1.bstatus & BSTATUS_READY_MASK_DP) ?
+ MOD_HDCP_STATUS_SUCCESS :
+ MOD_HDCP_STATUS_HDCP1_KSV_LIST_NOT_READY;
+ return (hdcp->auth.msg.hdcp1.bcaps & BCAPS_READY_MASK) ?
+ MOD_HDCP_STATUS_SUCCESS :
+ MOD_HDCP_STATUS_HDCP1_KSV_LIST_NOT_READY;
+}
+
+static inline enum mod_hdcp_status check_hdcp_capable_dp(struct mod_hdcp *hdcp)
+{
+ return (hdcp->auth.msg.hdcp1.bcaps & BCAPS_HDCP_CAPABLE_MASK_DP) ?
+ MOD_HDCP_STATUS_SUCCESS :
+ MOD_HDCP_STATUS_HDCP1_NOT_CAPABLE;
+}
+
+static inline enum mod_hdcp_status check_r0p_available_dp(struct mod_hdcp *hdcp)
+{
+ enum mod_hdcp_status status;
+ if (is_dp_hdcp(hdcp)) {
+ status = (hdcp->auth.msg.hdcp1.bstatus &
+ BSTATUS_R0_P_AVAILABLE_MASK_DP) ?
+ MOD_HDCP_STATUS_SUCCESS :
+ MOD_HDCP_STATUS_HDCP1_R0_PRIME_PENDING;
+ } else {
+ status = MOD_HDCP_STATUS_INVALID_OPERATION;
+ }
+ return status;
+}
+
+static inline enum mod_hdcp_status check_link_integrity_dp(
+ struct mod_hdcp *hdcp)
+{
+ return (hdcp->auth.msg.hdcp1.bstatus &
+ BSTATUS_LINK_INTEGRITY_FAILURE_MASK_DP) ?
+ MOD_HDCP_STATUS_HDCP1_LINK_INTEGRITY_FAILURE :
+ MOD_HDCP_STATUS_SUCCESS;
+}
+
+static inline enum mod_hdcp_status check_no_reauthentication_request_dp(
+ struct mod_hdcp *hdcp)
+{
+ return (hdcp->auth.msg.hdcp1.bstatus & BSTATUS_REAUTH_REQUEST_MASK_DP) ?
+ MOD_HDCP_STATUS_HDCP1_REAUTH_REQUEST_ISSUED :
+ MOD_HDCP_STATUS_SUCCESS;
+}
+
+static inline enum mod_hdcp_status check_no_max_cascade(struct mod_hdcp *hdcp)
+{
+ enum mod_hdcp_status status;
+
+ if (is_dp_hdcp(hdcp))
+ status = (hdcp->auth.msg.hdcp1.binfo_dp &
+ BINFO_MAX_CASCADE_EXCEEDED_MASK_DP) ?
+ MOD_HDCP_STATUS_HDCP1_MAX_CASCADE_EXCEEDED_FAILURE :
+ MOD_HDCP_STATUS_SUCCESS;
+ else
+ status = (hdcp->auth.msg.hdcp1.bstatus &
+ BSTATUS_MAX_CASCADE_EXCEEDED_MASK) ?
+ MOD_HDCP_STATUS_HDCP1_MAX_CASCADE_EXCEEDED_FAILURE :
+ MOD_HDCP_STATUS_SUCCESS;
+ return status;
+}
+
+static inline enum mod_hdcp_status check_no_max_devs(struct mod_hdcp *hdcp)
+{
+ enum mod_hdcp_status status;
+
+ if (is_dp_hdcp(hdcp))
+ status = (hdcp->auth.msg.hdcp1.binfo_dp &
+ BINFO_MAX_DEVS_EXCEEDED_MASK_DP) ?
+ MOD_HDCP_STATUS_HDCP1_MAX_DEVS_EXCEEDED_FAILURE :
+ MOD_HDCP_STATUS_SUCCESS;
+ else
+ status = (hdcp->auth.msg.hdcp1.bstatus &
+ BSTATUS_MAX_DEVS_EXCEEDED_MASK) ?
+ MOD_HDCP_STATUS_HDCP1_MAX_DEVS_EXCEEDED_FAILURE :
+ MOD_HDCP_STATUS_SUCCESS;
+ return status;
+}
+
+static inline uint8_t get_device_count(struct mod_hdcp *hdcp)
+{
+ return is_dp_hdcp(hdcp) ?
+ (hdcp->auth.msg.hdcp1.binfo_dp & BINFO_DEVICE_COUNT_MASK_DP) :
+ (hdcp->auth.msg.hdcp1.bstatus & BSTATUS_DEVICE_COUNT_MASK);
+}
+
+static inline enum mod_hdcp_status check_device_count(struct mod_hdcp *hdcp)
+{
+ /* device count must be greater than or equal to tracked hdcp displays */
+ return (get_device_count(hdcp) < get_added_display_count(hdcp)) ?
+ MOD_HDCP_STATUS_HDCP1_DEVICE_COUNT_MISMATCH_FAILURE :
+ MOD_HDCP_STATUS_SUCCESS;
+}
+
+static enum mod_hdcp_status wait_for_active_rx(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp1 *input)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) {
+ event_ctx->unexpected_event = 1;
+ goto out;
+ }
+
+ if (!mod_hdcp_execute_and_set(mod_hdcp_read_bksv,
+ &input->bksv_read, &status,
+ hdcp, "bksv_read"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(mod_hdcp_read_bcaps,
+ &input->bcaps_read, &status,
+ hdcp, "bcaps_read"))
+ goto out;
+out:
+ return status;
+}
+
+static enum mod_hdcp_status exchange_ksvs(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp1 *input)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) {
+ event_ctx->unexpected_event = 1;
+ goto out;
+ }
+
+ if (!mod_hdcp_execute_and_set(mod_hdcp_add_display_topology,
+ &input->add_topology, &status,
+ hdcp, "add_topology"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp1_create_session,
+ &input->create_session, &status,
+ hdcp, "create_session"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(mod_hdcp_write_an,
+ &input->an_write, &status,
+ hdcp, "an_write"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(mod_hdcp_write_aksv,
+ &input->aksv_write, &status,
+ hdcp, "aksv_write"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(mod_hdcp_read_bksv,
+ &input->bksv_read, &status,
+ hdcp, "bksv_read"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(validate_bksv,
+ &input->bksv_validation, &status,
+ hdcp, "bksv_validation"))
+ goto out;
+ if (hdcp->auth.msg.hdcp1.ainfo) {
+ if (!mod_hdcp_execute_and_set(mod_hdcp_write_ainfo,
+ &input->ainfo_write, &status,
+ hdcp, "ainfo_write"))
+ goto out;
+ }
+out:
+ return status;
+}
+
+static enum mod_hdcp_status computations_validate_rx_test_for_repeater(
+ struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp1 *input)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) {
+ event_ctx->unexpected_event = 1;
+ goto out;
+ }
+
+ if (!mod_hdcp_execute_and_set(mod_hdcp_read_r0p,
+ &input->r0p_read, &status,
+ hdcp, "r0p_read"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp1_validate_rx,
+ &input->rx_validation, &status,
+ hdcp, "rx_validation"))
+ goto out;
+ if (hdcp->connection.is_repeater) {
+ if (!hdcp->connection.link.adjust.hdcp1.postpone_encryption)
+ if (!mod_hdcp_execute_and_set(
+ mod_hdcp_hdcp1_enable_encryption,
+ &input->encryption, &status,
+ hdcp, "encryption"))
+ goto out;
+ } else {
+ if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp1_enable_encryption,
+ &input->encryption, &status,
+ hdcp, "encryption"))
+ goto out;
+ if (is_dp_mst_hdcp(hdcp))
+ if (!mod_hdcp_execute_and_set(
+ mod_hdcp_hdcp1_enable_dp_stream_encryption,
+ &input->stream_encryption_dp, &status,
+ hdcp, "stream_encryption_dp"))
+ goto out;
+ }
+out:
+ return status;
+}
+
+static enum mod_hdcp_status authenticated(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp1 *input)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) {
+ event_ctx->unexpected_event = 1;
+ goto out;
+ }
+
+ if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp1_link_maintenance,
+ &input->link_maintenance, &status,
+ hdcp, "link_maintenance"))
+ goto out;
+out:
+ return status;
+}
+
+static enum mod_hdcp_status wait_for_ready(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp1 *input)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK &&
+ event_ctx->event != MOD_HDCP_EVENT_CPIRQ &&
+ event_ctx->event != MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) {
+ event_ctx->unexpected_event = 1;
+ goto out;
+ }
+
+ if (is_dp_hdcp(hdcp)) {
+ if (!mod_hdcp_execute_and_set(mod_hdcp_read_bstatus,
+ &input->bstatus_read, &status,
+ hdcp, "bstatus_read"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(check_link_integrity_dp,
+ &input->link_integiry_check, &status,
+ hdcp, "link_integiry_check"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(check_no_reauthentication_request_dp,
+ &input->reauth_request_check, &status,
+ hdcp, "reauth_request_check"))
+ goto out;
+ } else {
+ if (!mod_hdcp_execute_and_set(mod_hdcp_read_bcaps,
+ &input->bcaps_read, &status,
+ hdcp, "bcaps_read"))
+ goto out;
+ }
+ if (!mod_hdcp_execute_and_set(check_ksv_ready,
+ &input->ready_check, &status,
+ hdcp, "ready_check"))
+ goto out;
+out:
+ return status;
+}
+
+static enum mod_hdcp_status read_ksv_list(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp1 *input)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+ uint8_t device_count;
+
+ if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) {
+ event_ctx->unexpected_event = 1;
+ goto out;
+ }
+
+ if (is_dp_hdcp(hdcp)) {
+ if (!mod_hdcp_execute_and_set(mod_hdcp_read_binfo,
+ &input->binfo_read_dp, &status,
+ hdcp, "binfo_read_dp"))
+ goto out;
+ } else {
+ if (!mod_hdcp_execute_and_set(mod_hdcp_read_bstatus,
+ &input->bstatus_read, &status,
+ hdcp, "bstatus_read"))
+ goto out;
+ }
+ if (!mod_hdcp_execute_and_set(check_no_max_cascade,
+ &input->max_cascade_check, &status,
+ hdcp, "max_cascade_check"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(check_no_max_devs,
+ &input->max_devs_check, &status,
+ hdcp, "max_devs_check"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(check_device_count,
+ &input->device_count_check, &status,
+ hdcp, "device_count_check"))
+ goto out;
+ device_count = get_device_count(hdcp);
+ hdcp->auth.msg.hdcp1.ksvlist_size = device_count*5;
+ if (!mod_hdcp_execute_and_set(mod_hdcp_read_ksvlist,
+ &input->ksvlist_read, &status,
+ hdcp, "ksvlist_read"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(mod_hdcp_read_vp,
+ &input->vp_read, &status,
+ hdcp, "vp_read"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp1_validate_ksvlist_vp,
+ &input->ksvlist_vp_validation, &status,
+ hdcp, "ksvlist_vp_validation"))
+ goto out;
+ if (input->encryption != PASS)
+ if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp1_enable_encryption,
+ &input->encryption, &status,
+ hdcp, "encryption"))
+ goto out;
+ if (is_dp_mst_hdcp(hdcp))
+ if (!mod_hdcp_execute_and_set(
+ mod_hdcp_hdcp1_enable_dp_stream_encryption,
+ &input->stream_encryption_dp, &status,
+ hdcp, "stream_encryption_dp"))
+ goto out;
+out:
+ return status;
+}
+
+static enum mod_hdcp_status determine_rx_hdcp_capable_dp(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp1 *input)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) {
+ event_ctx->unexpected_event = 1;
+ goto out;
+ }
+
+ if (!mod_hdcp_execute_and_set(mod_hdcp_read_bcaps,
+ &input->bcaps_read, &status,
+ hdcp, "bcaps_read"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(check_hdcp_capable_dp,
+ &input->hdcp_capable_dp, &status,
+ hdcp, "hdcp_capable_dp"))
+ goto out;
+out:
+ return status;
+}
+
+static enum mod_hdcp_status wait_for_r0_prime_dp(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp1 *input)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ if (event_ctx->event != MOD_HDCP_EVENT_CPIRQ &&
+ event_ctx->event != MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) {
+ event_ctx->unexpected_event = 1;
+ goto out;
+ }
+
+ if (!mod_hdcp_execute_and_set(mod_hdcp_read_bstatus,
+ &input->bstatus_read, &status,
+ hdcp, "bstatus_read"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(check_r0p_available_dp,
+ &input->r0p_available_dp, &status,
+ hdcp, "r0p_available_dp"))
+ goto out;
+out:
+ return status;
+}
+
+static enum mod_hdcp_status authenticated_dp(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp1 *input)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ if (event_ctx->event != MOD_HDCP_EVENT_CPIRQ) {
+ event_ctx->unexpected_event = 1;
+ goto out;
+ }
+
+ if (!mod_hdcp_execute_and_set(mod_hdcp_read_bstatus,
+ &input->bstatus_read, &status,
+ hdcp, "bstatus_read"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(check_link_integrity_dp,
+ &input->link_integiry_check, &status,
+ hdcp, "link_integiry_check"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(check_no_reauthentication_request_dp,
+ &input->reauth_request_check, &status,
+ hdcp, "reauth_request_check"))
+ goto out;
+out:
+ return status;
+}
+
+uint8_t mod_hdcp_execute_and_set(
+ mod_hdcp_action func, uint8_t *flag,
+ enum mod_hdcp_status *status, struct mod_hdcp *hdcp, char *str)
+{
+ *status = func(hdcp);
+ if (*status == MOD_HDCP_STATUS_SUCCESS && *flag != PASS) {
+ HDCP_INPUT_PASS_TRACE(hdcp, str);
+ *flag = PASS;
+ } else if (*status != MOD_HDCP_STATUS_SUCCESS && *flag != FAIL) {
+ HDCP_INPUT_FAIL_TRACE(hdcp, str);
+ *flag = FAIL;
+ }
+ return (*status == MOD_HDCP_STATUS_SUCCESS);
+}
+
+enum mod_hdcp_status mod_hdcp_hdcp1_execution(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp1 *input)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ switch (current_state(hdcp)) {
+ case H1_A0_WAIT_FOR_ACTIVE_RX:
+ status = wait_for_active_rx(hdcp, event_ctx, input);
+ break;
+ case H1_A1_EXCHANGE_KSVS:
+ status = exchange_ksvs(hdcp, event_ctx, input);
+ break;
+ case H1_A2_COMPUTATIONS_A3_VALIDATE_RX_A6_TEST_FOR_REPEATER:
+ status = computations_validate_rx_test_for_repeater(hdcp,
+ event_ctx, input);
+ break;
+ case H1_A45_AUTHENTICATED:
+ status = authenticated(hdcp, event_ctx, input);
+ break;
+ case H1_A8_WAIT_FOR_READY:
+ status = wait_for_ready(hdcp, event_ctx, input);
+ break;
+ case H1_A9_READ_KSV_LIST:
+ status = read_ksv_list(hdcp, event_ctx, input);
+ break;
+ default:
+ status = MOD_HDCP_STATUS_INVALID_STATE;
+ break;
+ }
+
+ return status;
+}
+
+extern enum mod_hdcp_status mod_hdcp_hdcp1_dp_execution(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp1 *input)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ switch (current_state(hdcp)) {
+ case D1_A0_DETERMINE_RX_HDCP_CAPABLE:
+ status = determine_rx_hdcp_capable_dp(hdcp, event_ctx, input);
+ break;
+ case D1_A1_EXCHANGE_KSVS:
+ status = exchange_ksvs(hdcp, event_ctx, input);
+ break;
+ case D1_A23_WAIT_FOR_R0_PRIME:
+ status = wait_for_r0_prime_dp(hdcp, event_ctx, input);
+ break;
+ case D1_A2_COMPUTATIONS_A3_VALIDATE_RX_A5_TEST_FOR_REPEATER:
+ status = computations_validate_rx_test_for_repeater(
+ hdcp, event_ctx, input);
+ break;
+ case D1_A4_AUTHENTICATED:
+ status = authenticated_dp(hdcp, event_ctx, input);
+ break;
+ case D1_A6_WAIT_FOR_READY:
+ status = wait_for_ready(hdcp, event_ctx, input);
+ break;
+ case D1_A7_READ_KSV_LIST:
+ status = read_ksv_list(hdcp, event_ctx, input);
+ break;
+ default:
+ status = MOD_HDCP_STATUS_INVALID_STATE;
+ break;
+ }
+
+ return status;
+}
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c
new file mode 100644
index 000000000000..136b8011ff3f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c
@@ -0,0 +1,307 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "hdcp.h"
+
+enum mod_hdcp_status mod_hdcp_hdcp1_transition(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp1 *input,
+ struct mod_hdcp_output *output)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+ struct mod_hdcp_connection *conn = &hdcp->connection;
+ struct mod_hdcp_link_adjustment *adjust = &hdcp->connection.link.adjust;
+
+ switch (current_state(hdcp)) {
+ case H1_A0_WAIT_FOR_ACTIVE_RX:
+ if (input->bksv_read != PASS || input->bcaps_read != PASS) {
+ /* 1A-04: repeatedly attempts on port access failure */
+ callback_in_ms(500, output);
+ increment_stay_counter(hdcp);
+ break;
+ }
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, H1_A1_EXCHANGE_KSVS);
+ break;
+ case H1_A1_EXCHANGE_KSVS:
+ if (input->add_topology != PASS ||
+ input->create_session != PASS) {
+ /* out of sync with psp state */
+ adjust->hdcp1.disable = 1;
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ } else if (input->an_write != PASS ||
+ input->aksv_write != PASS ||
+ input->bksv_read != PASS ||
+ input->bksv_validation != PASS ||
+ input->ainfo_write == FAIL) {
+ /* 1A-05: consider invalid bksv a failure */
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ }
+ callback_in_ms(300, output);
+ set_state_id(hdcp, output,
+ H1_A2_COMPUTATIONS_A3_VALIDATE_RX_A6_TEST_FOR_REPEATER);
+ break;
+ case H1_A2_COMPUTATIONS_A3_VALIDATE_RX_A6_TEST_FOR_REPEATER:
+ if (input->bcaps_read != PASS ||
+ input->r0p_read != PASS ||
+ input->rx_validation != PASS ||
+ (!conn->is_repeater && input->encryption != PASS)) {
+ /* 1A-06: consider invalid r0' a failure */
+ /* 1A-08: consider bksv listed in SRM a failure */
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ }
+ if (conn->is_repeater) {
+ callback_in_ms(0, output);
+ set_watchdog_in_ms(hdcp, 5000, output);
+ set_state_id(hdcp, output, H1_A8_WAIT_FOR_READY);
+ } else {
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, H1_A45_AUTHENTICATED);
+ HDCP_FULL_DDC_TRACE(hdcp);
+ }
+ break;
+ case H1_A45_AUTHENTICATED:
+ if (input->link_maintenance != PASS) {
+ /* 1A-07: consider invalid ri' a failure */
+ /* 1A-07a: consider read ri' not returned a failure */
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ }
+ callback_in_ms(500, output);
+ increment_stay_counter(hdcp);
+ break;
+ case H1_A8_WAIT_FOR_READY:
+ if (input->ready_check != PASS) {
+ if (event_ctx->event ==
+ MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) {
+ /* 1B-03: fail hdcp on ksv list READY timeout */
+ /* prevent black screen in next attempt */
+ adjust->hdcp1.postpone_encryption = 1;
+ fail_and_restart_in_ms(0, &status, output);
+ } else {
+ /* continue ksv list READY polling*/
+ callback_in_ms(500, output);
+ increment_stay_counter(hdcp);
+ }
+ break;
+ }
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, H1_A9_READ_KSV_LIST);
+ break;
+ case H1_A9_READ_KSV_LIST:
+ if (input->bstatus_read != PASS ||
+ input->max_cascade_check != PASS ||
+ input->max_devs_check != PASS ||
+ input->device_count_check != PASS ||
+ input->ksvlist_read != PASS ||
+ input->vp_read != PASS ||
+ input->ksvlist_vp_validation != PASS ||
+ input->encryption != PASS) {
+ /* 1B-06: consider MAX_CASCADE_EXCEEDED a failure */
+ /* 1B-05: consider MAX_DEVS_EXCEEDED a failure */
+ /* 1B-04: consider invalid v' a failure */
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ }
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, H1_A45_AUTHENTICATED);
+ HDCP_FULL_DDC_TRACE(hdcp);
+ break;
+ default:
+ status = MOD_HDCP_STATUS_INVALID_STATE;
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ }
+
+ return status;
+}
+
+enum mod_hdcp_status mod_hdcp_hdcp1_dp_transition(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp1 *input,
+ struct mod_hdcp_output *output)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+ struct mod_hdcp_connection *conn = &hdcp->connection;
+ struct mod_hdcp_link_adjustment *adjust = &hdcp->connection.link.adjust;
+
+ switch (current_state(hdcp)) {
+ case D1_A0_DETERMINE_RX_HDCP_CAPABLE:
+ if (input->bcaps_read != PASS) {
+ /* 1A-04: no authentication on bcaps read failure */
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ } else if (input->hdcp_capable_dp != PASS) {
+ adjust->hdcp1.disable = 1;
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ }
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, D1_A1_EXCHANGE_KSVS);
+ break;
+ case D1_A1_EXCHANGE_KSVS:
+ if (input->add_topology != PASS ||
+ input->create_session != PASS) {
+ /* out of sync with psp state */
+ adjust->hdcp1.disable = 1;
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ } else if (input->an_write != PASS ||
+ input->aksv_write != PASS ||
+ input->bksv_read != PASS ||
+ input->bksv_validation != PASS ||
+ input->ainfo_write == FAIL) {
+ /* 1A-05: consider invalid bksv a failure */
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ }
+ set_watchdog_in_ms(hdcp, 100, output);
+ set_state_id(hdcp, output, D1_A23_WAIT_FOR_R0_PRIME);
+ break;
+ case D1_A23_WAIT_FOR_R0_PRIME:
+ if (input->bstatus_read != PASS) {
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ } else if (input->r0p_available_dp != PASS) {
+ if (event_ctx->event == MOD_HDCP_EVENT_WATCHDOG_TIMEOUT)
+ fail_and_restart_in_ms(0, &status, output);
+ else
+ increment_stay_counter(hdcp);
+ break;
+ }
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, D1_A2_COMPUTATIONS_A3_VALIDATE_RX_A5_TEST_FOR_REPEATER);
+ break;
+ case D1_A2_COMPUTATIONS_A3_VALIDATE_RX_A5_TEST_FOR_REPEATER:
+ if (input->r0p_read != PASS) {
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ } else if (input->rx_validation != PASS) {
+ if (hdcp->state.stay_count < 2) {
+ /* allow 2 additional retries */
+ callback_in_ms(0, output);
+ increment_stay_counter(hdcp);
+ } else {
+ /*
+ * 1A-06: consider invalid r0' a failure
+ * after 3 attempts.
+ * 1A-08: consider bksv listed in SRM a failure
+ */
+ fail_and_restart_in_ms(0, &status, output);
+ }
+ break;
+ } else if ((!conn->is_repeater && input->encryption != PASS) ||
+ (!conn->is_repeater && is_dp_mst_hdcp(hdcp) && input->stream_encryption_dp != PASS)) {
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ }
+ if (conn->is_repeater) {
+ set_watchdog_in_ms(hdcp, 5000, output);
+ set_state_id(hdcp, output, D1_A6_WAIT_FOR_READY);
+ } else {
+ set_state_id(hdcp, output, D1_A4_AUTHENTICATED);
+ HDCP_FULL_DDC_TRACE(hdcp);
+ }
+ break;
+ case D1_A4_AUTHENTICATED:
+ if (input->link_integiry_check != PASS ||
+ input->reauth_request_check != PASS) {
+ /* 1A-07: restart hdcp on a link integrity failure */
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ }
+ break;
+ case D1_A6_WAIT_FOR_READY:
+ if (input->link_integiry_check == FAIL ||
+ input->reauth_request_check == FAIL) {
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ } else if (input->ready_check != PASS) {
+ if (event_ctx->event ==
+ MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) {
+ /* 1B-04: fail hdcp on ksv list READY timeout */
+ /* prevent black screen in next attempt */
+ adjust->hdcp1.postpone_encryption = 1;
+ fail_and_restart_in_ms(0, &status, output);
+ } else {
+ increment_stay_counter(hdcp);
+ }
+ break;
+ }
+ callback_in_ms(0, output);
+ set_state_id(hdcp, output, D1_A7_READ_KSV_LIST);
+ break;
+ case D1_A7_READ_KSV_LIST:
+ if (input->binfo_read_dp != PASS ||
+ input->max_cascade_check != PASS ||
+ input->max_devs_check != PASS) {
+ /* 1B-06: consider MAX_DEVS_EXCEEDED a failure */
+ /* 1B-07: consider MAX_CASCADE_EXCEEDED a failure */
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ } else if (input->device_count_check != PASS) {
+ /*
+ * some slow dongle doesn't update
+ * device count as soon as downstream is connected.
+ * give it more time to react.
+ */
+ adjust->hdcp1.postpone_encryption = 1;
+ fail_and_restart_in_ms(1000, &status, output);
+ break;
+ } else if (input->ksvlist_read != PASS ||
+ input->vp_read != PASS) {
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ } else if (input->ksvlist_vp_validation != PASS) {
+ if (hdcp->state.stay_count < 2) {
+ /* allow 2 additional retries */
+ callback_in_ms(0, output);
+ increment_stay_counter(hdcp);
+ } else {
+ /*
+ * 1B-05: consider invalid v' a failure
+ * after 3 attempts.
+ */
+ fail_and_restart_in_ms(0, &status, output);
+ }
+ break;
+ } else if (input->encryption != PASS ||
+ (is_dp_mst_hdcp(hdcp) && input->stream_encryption_dp != PASS)) {
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ }
+ set_state_id(hdcp, output, D1_A4_AUTHENTICATED);
+ HDCP_FULL_DDC_TRACE(hdcp);
+ break;
+ default:
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ }
+
+ return status;
+}
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c
new file mode 100644
index 000000000000..e7baae059b85
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c
@@ -0,0 +1,305 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "hdcp.h"
+
+#define MIN(a, b) ((a) < (b) ? (a) : (b))
+#define HDCP_I2C_ADDR 0x3a /* 0x74 >> 1*/
+#define KSV_READ_SIZE 0xf /* 0x6803b - 0x6802c */
+#define HDCP_MAX_AUX_TRANSACTION_SIZE 16
+
+enum mod_hdcp_ddc_message_id {
+ MOD_HDCP_MESSAGE_ID_INVALID = -1,
+
+ /* HDCP 1.4 */
+
+ MOD_HDCP_MESSAGE_ID_READ_BKSV = 0,
+ MOD_HDCP_MESSAGE_ID_READ_RI_R0,
+ MOD_HDCP_MESSAGE_ID_WRITE_AKSV,
+ MOD_HDCP_MESSAGE_ID_WRITE_AINFO,
+ MOD_HDCP_MESSAGE_ID_WRITE_AN,
+ MOD_HDCP_MESSAGE_ID_READ_VH_X,
+ MOD_HDCP_MESSAGE_ID_READ_VH_0,
+ MOD_HDCP_MESSAGE_ID_READ_VH_1,
+ MOD_HDCP_MESSAGE_ID_READ_VH_2,
+ MOD_HDCP_MESSAGE_ID_READ_VH_3,
+ MOD_HDCP_MESSAGE_ID_READ_VH_4,
+ MOD_HDCP_MESSAGE_ID_READ_BCAPS,
+ MOD_HDCP_MESSAGE_ID_READ_BSTATUS,
+ MOD_HDCP_MESSAGE_ID_READ_KSV_FIFO,
+ MOD_HDCP_MESSAGE_ID_READ_BINFO,
+
+ MOD_HDCP_MESSAGE_ID_MAX
+};
+
+static const uint8_t hdcp_i2c_offsets[] = {
+ [MOD_HDCP_MESSAGE_ID_READ_BKSV] = 0x0,
+ [MOD_HDCP_MESSAGE_ID_READ_RI_R0] = 0x8,
+ [MOD_HDCP_MESSAGE_ID_WRITE_AKSV] = 0x10,
+ [MOD_HDCP_MESSAGE_ID_WRITE_AINFO] = 0x15,
+ [MOD_HDCP_MESSAGE_ID_WRITE_AN] = 0x18,
+ [MOD_HDCP_MESSAGE_ID_READ_VH_X] = 0x20,
+ [MOD_HDCP_MESSAGE_ID_READ_VH_0] = 0x20,
+ [MOD_HDCP_MESSAGE_ID_READ_VH_1] = 0x24,
+ [MOD_HDCP_MESSAGE_ID_READ_VH_2] = 0x28,
+ [MOD_HDCP_MESSAGE_ID_READ_VH_3] = 0x2C,
+ [MOD_HDCP_MESSAGE_ID_READ_VH_4] = 0x30,
+ [MOD_HDCP_MESSAGE_ID_READ_BCAPS] = 0x40,
+ [MOD_HDCP_MESSAGE_ID_READ_BSTATUS] = 0x41,
+ [MOD_HDCP_MESSAGE_ID_READ_KSV_FIFO] = 0x43,
+ [MOD_HDCP_MESSAGE_ID_READ_BINFO] = 0xFF,
+};
+
+static const uint32_t hdcp_dpcd_addrs[] = {
+ [MOD_HDCP_MESSAGE_ID_READ_BKSV] = 0x68000,
+ [MOD_HDCP_MESSAGE_ID_READ_RI_R0] = 0x68005,
+ [MOD_HDCP_MESSAGE_ID_WRITE_AKSV] = 0x68007,
+ [MOD_HDCP_MESSAGE_ID_WRITE_AINFO] = 0x6803B,
+ [MOD_HDCP_MESSAGE_ID_WRITE_AN] = 0x6800c,
+ [MOD_HDCP_MESSAGE_ID_READ_VH_X] = 0x68014,
+ [MOD_HDCP_MESSAGE_ID_READ_VH_0] = 0x68014,
+ [MOD_HDCP_MESSAGE_ID_READ_VH_1] = 0x68018,
+ [MOD_HDCP_MESSAGE_ID_READ_VH_2] = 0x6801c,
+ [MOD_HDCP_MESSAGE_ID_READ_VH_3] = 0x68020,
+ [MOD_HDCP_MESSAGE_ID_READ_VH_4] = 0x68024,
+ [MOD_HDCP_MESSAGE_ID_READ_BCAPS] = 0x68028,
+ [MOD_HDCP_MESSAGE_ID_READ_BSTATUS] = 0x68029,
+ [MOD_HDCP_MESSAGE_ID_READ_KSV_FIFO] = 0x6802c,
+ [MOD_HDCP_MESSAGE_ID_READ_BINFO] = 0x6802a,
+};
+
+static enum mod_hdcp_status read(struct mod_hdcp *hdcp,
+ enum mod_hdcp_ddc_message_id msg_id,
+ uint8_t *buf,
+ uint32_t buf_len)
+{
+ bool success = true;
+ uint32_t cur_size = 0;
+ uint32_t data_offset = 0;
+
+ if (is_dp_hdcp(hdcp)) {
+ while (buf_len > 0) {
+ cur_size = MIN(buf_len, HDCP_MAX_AUX_TRANSACTION_SIZE);
+ success = hdcp->config.ddc.funcs.read_dpcd(hdcp->config.ddc.handle,
+ hdcp_dpcd_addrs[msg_id] + data_offset,
+ buf + data_offset,
+ cur_size);
+
+ if (!success)
+ break;
+
+ buf_len -= cur_size;
+ data_offset += cur_size;
+ }
+ } else {
+ success = hdcp->config.ddc.funcs.read_i2c(
+ hdcp->config.ddc.handle,
+ HDCP_I2C_ADDR,
+ hdcp_i2c_offsets[msg_id],
+ buf,
+ (uint32_t)buf_len);
+ }
+
+ return success ? MOD_HDCP_STATUS_SUCCESS : MOD_HDCP_STATUS_DDC_FAILURE;
+}
+
+static enum mod_hdcp_status read_repeatedly(struct mod_hdcp *hdcp,
+ enum mod_hdcp_ddc_message_id msg_id,
+ uint8_t *buf,
+ uint32_t buf_len,
+ uint8_t read_size)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_DDC_FAILURE;
+ uint32_t cur_size = 0;
+ uint32_t data_offset = 0;
+
+ while (buf_len > 0) {
+ cur_size = MIN(buf_len, read_size);
+ status = read(hdcp, msg_id, buf + data_offset, cur_size);
+
+ if (status != MOD_HDCP_STATUS_SUCCESS)
+ break;
+
+ buf_len -= cur_size;
+ data_offset += cur_size;
+ }
+
+ return status;
+}
+
+static enum mod_hdcp_status write(struct mod_hdcp *hdcp,
+ enum mod_hdcp_ddc_message_id msg_id,
+ uint8_t *buf,
+ uint32_t buf_len)
+{
+ bool success = true;
+ uint32_t cur_size = 0;
+ uint32_t data_offset = 0;
+
+ if (is_dp_hdcp(hdcp)) {
+ while (buf_len > 0) {
+ cur_size = MIN(buf_len, HDCP_MAX_AUX_TRANSACTION_SIZE);
+ success = hdcp->config.ddc.funcs.write_dpcd(
+ hdcp->config.ddc.handle,
+ hdcp_dpcd_addrs[msg_id] + data_offset,
+ buf + data_offset,
+ cur_size);
+
+ if (!success)
+ break;
+
+ buf_len -= cur_size;
+ data_offset += cur_size;
+ }
+ } else {
+ hdcp->buf[0] = hdcp_i2c_offsets[msg_id];
+ memmove(&hdcp->buf[1], buf, buf_len);
+ success = hdcp->config.ddc.funcs.write_i2c(
+ hdcp->config.ddc.handle,
+ HDCP_I2C_ADDR,
+ hdcp->buf,
+ (uint32_t)(buf_len+1));
+ }
+
+ return success ? MOD_HDCP_STATUS_SUCCESS : MOD_HDCP_STATUS_DDC_FAILURE;
+}
+
+enum mod_hdcp_status mod_hdcp_read_bksv(struct mod_hdcp *hdcp)
+{
+ return read(hdcp, MOD_HDCP_MESSAGE_ID_READ_BKSV,
+ hdcp->auth.msg.hdcp1.bksv,
+ sizeof(hdcp->auth.msg.hdcp1.bksv));
+}
+
+enum mod_hdcp_status mod_hdcp_read_bcaps(struct mod_hdcp *hdcp)
+{
+ return read(hdcp, MOD_HDCP_MESSAGE_ID_READ_BCAPS,
+ &hdcp->auth.msg.hdcp1.bcaps,
+ sizeof(hdcp->auth.msg.hdcp1.bcaps));
+}
+
+enum mod_hdcp_status mod_hdcp_read_bstatus(struct mod_hdcp *hdcp)
+{
+ enum mod_hdcp_status status;
+
+ if (is_dp_hdcp(hdcp))
+ status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_BSTATUS,
+ (uint8_t *)&hdcp->auth.msg.hdcp1.bstatus,
+ 1);
+ else
+ status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_BSTATUS,
+ (uint8_t *)&hdcp->auth.msg.hdcp1.bstatus,
+ sizeof(hdcp->auth.msg.hdcp1.bstatus));
+ return status;
+}
+
+enum mod_hdcp_status mod_hdcp_read_r0p(struct mod_hdcp *hdcp)
+{
+ return read(hdcp, MOD_HDCP_MESSAGE_ID_READ_RI_R0,
+ (uint8_t *)&hdcp->auth.msg.hdcp1.r0p,
+ sizeof(hdcp->auth.msg.hdcp1.r0p));
+}
+
+/* special case, reading repeatedly at the same address, don't use read() */
+enum mod_hdcp_status mod_hdcp_read_ksvlist(struct mod_hdcp *hdcp)
+{
+ enum mod_hdcp_status status;
+
+ if (is_dp_hdcp(hdcp))
+ status = read_repeatedly(hdcp, MOD_HDCP_MESSAGE_ID_READ_KSV_FIFO,
+ hdcp->auth.msg.hdcp1.ksvlist,
+ hdcp->auth.msg.hdcp1.ksvlist_size,
+ KSV_READ_SIZE);
+ else
+ status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_KSV_FIFO,
+ (uint8_t *)&hdcp->auth.msg.hdcp1.ksvlist,
+ hdcp->auth.msg.hdcp1.ksvlist_size);
+ return status;
+}
+
+enum mod_hdcp_status mod_hdcp_read_vp(struct mod_hdcp *hdcp)
+{
+ enum mod_hdcp_status status;
+
+ status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_VH_0,
+ &hdcp->auth.msg.hdcp1.vp[0], 4);
+ if (status != MOD_HDCP_STATUS_SUCCESS)
+ goto out;
+
+ status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_VH_1,
+ &hdcp->auth.msg.hdcp1.vp[4], 4);
+ if (status != MOD_HDCP_STATUS_SUCCESS)
+ goto out;
+
+ status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_VH_2,
+ &hdcp->auth.msg.hdcp1.vp[8], 4);
+ if (status != MOD_HDCP_STATUS_SUCCESS)
+ goto out;
+
+ status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_VH_3,
+ &hdcp->auth.msg.hdcp1.vp[12], 4);
+ if (status != MOD_HDCP_STATUS_SUCCESS)
+ goto out;
+
+ status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_VH_4,
+ &hdcp->auth.msg.hdcp1.vp[16], 4);
+out:
+ return status;
+}
+
+enum mod_hdcp_status mod_hdcp_read_binfo(struct mod_hdcp *hdcp)
+{
+ enum mod_hdcp_status status;
+
+ if (is_dp_hdcp(hdcp))
+ status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_BINFO,
+ (uint8_t *)&hdcp->auth.msg.hdcp1.binfo_dp,
+ sizeof(hdcp->auth.msg.hdcp1.binfo_dp));
+ else
+ status = MOD_HDCP_STATUS_INVALID_OPERATION;
+
+ return status;
+}
+
+enum mod_hdcp_status mod_hdcp_write_aksv(struct mod_hdcp *hdcp)
+{
+ return write(hdcp, MOD_HDCP_MESSAGE_ID_WRITE_AKSV,
+ hdcp->auth.msg.hdcp1.aksv,
+ sizeof(hdcp->auth.msg.hdcp1.aksv));
+}
+
+enum mod_hdcp_status mod_hdcp_write_ainfo(struct mod_hdcp *hdcp)
+{
+ return write(hdcp, MOD_HDCP_MESSAGE_ID_WRITE_AINFO,
+ &hdcp->auth.msg.hdcp1.ainfo,
+ sizeof(hdcp->auth.msg.hdcp1.ainfo));
+}
+
+enum mod_hdcp_status mod_hdcp_write_an(struct mod_hdcp *hdcp)
+{
+ return write(hdcp, MOD_HDCP_MESSAGE_ID_WRITE_AN,
+ hdcp->auth.msg.hdcp1.an,
+ sizeof(hdcp->auth.msg.hdcp1.an));
+}
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c
new file mode 100644
index 000000000000..3982ced5f969
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c
@@ -0,0 +1,163 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+
+#include "hdcp.h"
+
+void mod_hdcp_dump_binary_message(uint8_t *msg, uint32_t msg_size,
+ uint8_t *buf, uint32_t buf_size)
+{
+ const uint8_t bytes_per_line = 16,
+ byte_size = 3,
+ newline_size = 1,
+ terminator_size = 1;
+ uint32_t line_count = msg_size / bytes_per_line,
+ trailing_bytes = msg_size % bytes_per_line;
+ uint32_t target_size = (byte_size * bytes_per_line + newline_size) * line_count +
+ byte_size * trailing_bytes + newline_size + terminator_size;
+ uint32_t buf_pos = 0;
+ uint32_t i = 0;
+
+ if (buf_size >= target_size) {
+ for (i = 0; i < msg_size; i++) {
+ if (i % bytes_per_line == 0)
+ buf[buf_pos++] = '\n';
+ sprintf(&buf[buf_pos], "%02X ", msg[i]);
+ buf_pos += byte_size;
+ }
+ buf[buf_pos++] = '\0';
+ }
+}
+
+char *mod_hdcp_status_to_str(int32_t status)
+{
+ switch (status) {
+ case MOD_HDCP_STATUS_SUCCESS:
+ return "MOD_HDCP_STATUS_SUCCESS";
+ case MOD_HDCP_STATUS_FAILURE:
+ return "MOD_HDCP_STATUS_FAILURE";
+ case MOD_HDCP_STATUS_RESET_NEEDED:
+ return "MOD_HDCP_STATUS_RESET_NEEDED";
+ case MOD_HDCP_STATUS_DISPLAY_OUT_OF_BOUND:
+ return "MOD_HDCP_STATUS_DISPLAY_OUT_OF_BOUND";
+ case MOD_HDCP_STATUS_DISPLAY_NOT_FOUND:
+ return "MOD_HDCP_STATUS_DISPLAY_NOT_FOUND";
+ case MOD_HDCP_STATUS_INVALID_STATE:
+ return "MOD_HDCP_STATUS_INVALID_STATE";
+ case MOD_HDCP_STATUS_NOT_IMPLEMENTED:
+ return "MOD_HDCP_STATUS_NOT_IMPLEMENTED";
+ case MOD_HDCP_STATUS_INTERNAL_POLICY_FAILURE:
+ return "MOD_HDCP_STATUS_INTERNAL_POLICY_FAILURE";
+ case MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE:
+ return "MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE";
+ case MOD_HDCP_STATUS_CREATE_PSP_SERVICE_FAILURE:
+ return "MOD_HDCP_STATUS_CREATE_PSP_SERVICE_FAILURE";
+ case MOD_HDCP_STATUS_DESTROY_PSP_SERVICE_FAILURE:
+ return "MOD_HDCP_STATUS_DESTROY_PSP_SERVICE_FAILURE";
+ case MOD_HDCP_STATUS_HDCP1_CREATE_SESSION_FAILURE:
+ return "MOD_HDCP_STATUS_HDCP1_CREATE_SESSION_FAILURE";
+ case MOD_HDCP_STATUS_HDCP1_DESTROY_SESSION_FAILURE:
+ return "MOD_HDCP_STATUS_HDCP1_DESTROY_SESSION_FAILURE";
+ case MOD_HDCP_STATUS_HDCP1_VALIDATE_ENCRYPTION_FAILURE:
+ return "MOD_HDCP_STATUS_HDCP1_VALIDATE_ENCRYPTION_FAILURE";
+ case MOD_HDCP_STATUS_HDCP1_NOT_HDCP_REPEATER:
+ return "MOD_HDCP_STATUS_HDCP1_NOT_HDCP_REPEATER";
+ case MOD_HDCP_STATUS_HDCP1_NOT_CAPABLE:
+ return "MOD_HDCP_STATUS_HDCP1_NOT_CAPABLE";
+ case MOD_HDCP_STATUS_HDCP1_R0_PRIME_PENDING:
+ return "MOD_HDCP_STATUS_HDCP1_R0_PRIME_PENDING";
+ case MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE:
+ return "MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE";
+ case MOD_HDCP_STATUS_HDCP1_KSV_LIST_NOT_READY:
+ return "MOD_HDCP_STATUS_HDCP1_KSV_LIST_NOT_READY";
+ case MOD_HDCP_STATUS_HDCP1_VALIDATE_KSV_LIST_FAILURE:
+ return "MOD_HDCP_STATUS_HDCP1_VALIDATE_KSV_LIST_FAILURE";
+ case MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION:
+ return "MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION";
+ case MOD_HDCP_STATUS_HDCP1_ENABLE_STREAM_ENCRYPTION_FAILURE:
+ return "MOD_HDCP_STATUS_HDCP1_ENABLE_STREAM_ENCRYPTION_FAILURE";
+ case MOD_HDCP_STATUS_HDCP1_MAX_CASCADE_EXCEEDED_FAILURE:
+ return "MOD_HDCP_STATUS_HDCP1_MAX_CASCADE_EXCEEDED_FAILURE";
+ case MOD_HDCP_STATUS_HDCP1_MAX_DEVS_EXCEEDED_FAILURE:
+ return "MOD_HDCP_STATUS_HDCP1_MAX_DEVS_EXCEEDED_FAILURE";
+ case MOD_HDCP_STATUS_HDCP1_DEVICE_COUNT_MISMATCH_FAILURE:
+ return "MOD_HDCP_STATUS_HDCP1_DEVICE_COUNT_MISMATCH_FAILURE";
+ case MOD_HDCP_STATUS_HDCP1_LINK_INTEGRITY_FAILURE:
+ return "MOD_HDCP_STATUS_HDCP1_LINK_INTEGRITY_FAILURE";
+ case MOD_HDCP_STATUS_HDCP1_REAUTH_REQUEST_ISSUED:
+ return "MOD_HDCP_STATUS_HDCP1_REAUTH_REQUEST_ISSUED";
+ case MOD_HDCP_STATUS_HDCP1_LINK_MAINTENANCE_FAILURE:
+ return "MOD_HDCP_STATUS_HDCP1_LINK_MAINTENANCE_FAILURE";
+ case MOD_HDCP_STATUS_HDCP1_INVALID_BKSV:
+ return "MOD_HDCP_STATUS_HDCP1_INVALID_BKSV";
+ case MOD_HDCP_STATUS_DDC_FAILURE:
+ return "MOD_HDCP_STATUS_DDC_FAILURE";
+ case MOD_HDCP_STATUS_INVALID_OPERATION:
+ return "MOD_HDCP_STATUS_INVALID_OPERATION";
+ default:
+ return "MOD_HDCP_STATUS_UNKNOWN";
+ }
+}
+
+char *mod_hdcp_state_id_to_str(int32_t id)
+{
+ switch (id) {
+ case HDCP_UNINITIALIZED:
+ return "HDCP_UNINITIALIZED";
+ case HDCP_INITIALIZED:
+ return "HDCP_INITIALIZED";
+ case HDCP_CP_NOT_DESIRED:
+ return "HDCP_CP_NOT_DESIRED";
+ case H1_A0_WAIT_FOR_ACTIVE_RX:
+ return "H1_A0_WAIT_FOR_ACTIVE_RX";
+ case H1_A1_EXCHANGE_KSVS:
+ return "H1_A1_EXCHANGE_KSVS";
+ case H1_A2_COMPUTATIONS_A3_VALIDATE_RX_A6_TEST_FOR_REPEATER:
+ return "H1_A2_COMPUTATIONS_A3_VALIDATE_RX_A6_TEST_FOR_REPEATER";
+ case H1_A45_AUTHENTICATED:
+ return "H1_A45_AUTHENTICATED";
+ case H1_A8_WAIT_FOR_READY:
+ return "H1_A8_WAIT_FOR_READY";
+ case H1_A9_READ_KSV_LIST:
+ return "H1_A9_READ_KSV_LIST";
+ case D1_A0_DETERMINE_RX_HDCP_CAPABLE:
+ return "D1_A0_DETERMINE_RX_HDCP_CAPABLE";
+ case D1_A1_EXCHANGE_KSVS:
+ return "D1_A1_EXCHANGE_KSVS";
+ case D1_A23_WAIT_FOR_R0_PRIME:
+ return "D1_A23_WAIT_FOR_R0_PRIME";
+ case D1_A2_COMPUTATIONS_A3_VALIDATE_RX_A5_TEST_FOR_REPEATER:
+ return "D1_A2_COMPUTATIONS_A3_VALIDATE_RX_A5_TEST_FOR_REPEATER";
+ case D1_A4_AUTHENTICATED:
+ return "D1_A4_AUTHENTICATED";
+ case D1_A6_WAIT_FOR_READY:
+ return "D1_A6_WAIT_FOR_READY";
+ case D1_A7_READ_KSV_LIST:
+ return "D1_A7_READ_KSV_LIST";
+ default:
+ return "UNKNOWN_STATE_ID";
+ };
+}
+
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h
new file mode 100644
index 000000000000..2fd0e0a893ef
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h
@@ -0,0 +1,139 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef MOD_HDCP_LOG_H_
+#define MOD_HDCP_LOG_H_
+
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+#define HDCP_LOG_ERR(hdcp, ...) DRM_ERROR(__VA_ARGS__)
+#define HDCP_LOG_VER(hdcp, ...) DRM_DEBUG_KMS(__VA_ARGS__)
+#define HDCP_LOG_FSM(hdcp, ...) DRM_DEBUG_KMS(__VA_ARGS__)
+#define HDCP_LOG_TOP(hdcp, ...) pr_debug("[HDCP_TOP]:"__VA_ARGS__)
+#define HDCP_LOG_DDC(hdcp, ...) pr_debug("[HDCP_DDC]:"__VA_ARGS__)
+#endif
+
+/* default logs */
+#define HDCP_ERROR_TRACE(hdcp, status) \
+ HDCP_LOG_ERR(hdcp, \
+ "[Link %d] ERROR %s IN STATE %s", \
+ hdcp->config.index, \
+ mod_hdcp_status_to_str(status), \
+ mod_hdcp_state_id_to_str(hdcp->state.id))
+#define HDCP_HDCP1_ENABLED_TRACE(hdcp, displayIndex) \
+ HDCP_LOG_VER(hdcp, \
+ "[Link %d] HDCP 1.4 enabled on display %d", \
+ hdcp->config.index, displayIndex)
+/* state machine logs */
+#define HDCP_REMOVE_DISPLAY_TRACE(hdcp, displayIndex) \
+ HDCP_LOG_FSM(hdcp, \
+ "[Link %d] HDCP_REMOVE_DISPLAY index %d", \
+ hdcp->config.index, displayIndex)
+#define HDCP_INPUT_PASS_TRACE(hdcp, str) \
+ HDCP_LOG_FSM(hdcp, \
+ "[Link %d]\tPASS %s", \
+ hdcp->config.index, str)
+#define HDCP_INPUT_FAIL_TRACE(hdcp, str) \
+ HDCP_LOG_FSM(hdcp, \
+ "[Link %d]\tFAIL %s", \
+ hdcp->config.index, str)
+#define HDCP_NEXT_STATE_TRACE(hdcp, id, output) do { \
+ if (output->watchdog_timer_needed) \
+ HDCP_LOG_FSM(hdcp, \
+ "[Link %d] > %s with %d ms watchdog", \
+ hdcp->config.index, \
+ mod_hdcp_state_id_to_str(id), output->watchdog_timer_delay); \
+ else \
+ HDCP_LOG_FSM(hdcp, \
+ "[Link %d] > %s", hdcp->config.index, \
+ mod_hdcp_state_id_to_str(id)); \
+} while (0)
+#define HDCP_TIMEOUT_TRACE(hdcp) \
+ HDCP_LOG_FSM(hdcp, "[Link %d] --> TIMEOUT", hdcp->config.index)
+#define HDCP_CPIRQ_TRACE(hdcp) \
+ HDCP_LOG_FSM(hdcp, "[Link %d] --> CPIRQ", hdcp->config.index)
+#define HDCP_EVENT_TRACE(hdcp, event) \
+ if (event == MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) \
+ HDCP_TIMEOUT_TRACE(hdcp); \
+ else if (event == MOD_HDCP_EVENT_CPIRQ) \
+ HDCP_CPIRQ_TRACE(hdcp)
+/* TODO: find some way to tell if logging is off to save time */
+#define HDCP_DDC_READ_TRACE(hdcp, msg_name, msg, msg_size) do { \
+ mod_hdcp_dump_binary_message(msg, msg_size, hdcp->buf, \
+ sizeof(hdcp->buf)); \
+ HDCP_LOG_DDC(hdcp, "[Link %d] Read %s%s", hdcp->config.index, \
+ msg_name, hdcp->buf); \
+} while (0)
+#define HDCP_DDC_WRITE_TRACE(hdcp, msg_name, msg, msg_size) do { \
+ mod_hdcp_dump_binary_message(msg, msg_size, hdcp->buf, \
+ sizeof(hdcp->buf)); \
+ HDCP_LOG_DDC(hdcp, "[Link %d] Write %s%s", \
+ hdcp->config.index, msg_name,\
+ hdcp->buf); \
+} while (0)
+#define HDCP_FULL_DDC_TRACE(hdcp) do { \
+ HDCP_DDC_READ_TRACE(hdcp, "BKSV", hdcp->auth.msg.hdcp1.bksv, \
+ sizeof(hdcp->auth.msg.hdcp1.bksv)); \
+ HDCP_DDC_READ_TRACE(hdcp, "BCAPS", &hdcp->auth.msg.hdcp1.bcaps, \
+ sizeof(hdcp->auth.msg.hdcp1.bcaps)); \
+ HDCP_DDC_WRITE_TRACE(hdcp, "AN", hdcp->auth.msg.hdcp1.an, \
+ sizeof(hdcp->auth.msg.hdcp1.an)); \
+ HDCP_DDC_WRITE_TRACE(hdcp, "AKSV", hdcp->auth.msg.hdcp1.aksv, \
+ sizeof(hdcp->auth.msg.hdcp1.aksv)); \
+ HDCP_DDC_WRITE_TRACE(hdcp, "AINFO", &hdcp->auth.msg.hdcp1.ainfo, \
+ sizeof(hdcp->auth.msg.hdcp1.ainfo)); \
+ HDCP_DDC_READ_TRACE(hdcp, "RI' / R0'", \
+ (uint8_t *)&hdcp->auth.msg.hdcp1.r0p, \
+ sizeof(hdcp->auth.msg.hdcp1.r0p)); \
+ HDCP_DDC_READ_TRACE(hdcp, "BINFO", \
+ (uint8_t *)&hdcp->auth.msg.hdcp1.binfo_dp, \
+ sizeof(hdcp->auth.msg.hdcp1.binfo_dp)); \
+ HDCP_DDC_READ_TRACE(hdcp, "KSVLIST", hdcp->auth.msg.hdcp1.ksvlist, \
+ hdcp->auth.msg.hdcp1.ksvlist_size); \
+ HDCP_DDC_READ_TRACE(hdcp, "V'", hdcp->auth.msg.hdcp1.vp, \
+ sizeof(hdcp->auth.msg.hdcp1.vp)); \
+} while (0)
+#define HDCP_TOP_ADD_DISPLAY_TRACE(hdcp, i) \
+ HDCP_LOG_TOP(hdcp, "[Link %d]\tadd display %d", \
+ hdcp->config.index, i)
+#define HDCP_TOP_REMOVE_DISPLAY_TRACE(hdcp, i) \
+ HDCP_LOG_TOP(hdcp, "[Link %d]\tremove display %d", \
+ hdcp->config.index, i)
+#define HDCP_TOP_HDCP1_DESTROY_SESSION_TRACE(hdcp) \
+ HDCP_LOG_TOP(hdcp, "[Link %d]\tdestroy hdcp1 session", \
+ hdcp->config.index)
+#define HDCP_TOP_RESET_AUTH_TRACE(hdcp) \
+ HDCP_LOG_TOP(hdcp, "[Link %d]\treset authentication", hdcp->config.index)
+#define HDCP_TOP_RESET_CONN_TRACE(hdcp) \
+ HDCP_LOG_TOP(hdcp, "[Link %d]\treset connection", hdcp->config.index)
+#define HDCP_TOP_INTERFACE_TRACE(hdcp) do { \
+ HDCP_LOG_TOP(hdcp, "\n"); \
+ HDCP_LOG_TOP(hdcp, "[Link %d] %s", hdcp->config.index, __func__); \
+} while (0)
+#define HDCP_TOP_INTERFACE_TRACE_WITH_INDEX(hdcp, i) do { \
+ HDCP_LOG_TOP(hdcp, "\n"); \
+ HDCP_LOG_TOP(hdcp, "[Link %d] %s display %d", hdcp->config.index, __func__, i); \
+} while (0)
+
+#endif // MOD_HDCP_LOG_H_
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
new file mode 100644
index 000000000000..646d909bbc37
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
@@ -0,0 +1,328 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#define MAX_NUM_DISPLAYS 24
+
+
+#include "hdcp.h"
+
+#include "amdgpu.h"
+#include "hdcp_psp.h"
+
+enum mod_hdcp_status mod_hdcp_remove_display_topology(struct mod_hdcp *hdcp)
+{
+
+ struct psp_context *psp = hdcp->config.psp.handle;
+ struct ta_dtm_shared_memory *dtm_cmd;
+ struct mod_hdcp_display *display = NULL;
+ uint8_t i;
+
+ dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.dtm_shared_buf;
+
+ for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) {
+ if (hdcp->connection.displays[i].state == MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED) {
+
+ memset(dtm_cmd, 0, sizeof(struct ta_dtm_shared_memory));
+
+ display = &hdcp->connection.displays[i];
+
+ dtm_cmd->cmd_id = TA_DTM_COMMAND__TOPOLOGY_UPDATE_V2;
+ dtm_cmd->dtm_in_message.topology_update_v2.display_handle = display->index;
+ dtm_cmd->dtm_in_message.topology_update_v2.is_active = 0;
+ dtm_cmd->dtm_status = TA_DTM_STATUS__GENERIC_FAILURE;
+
+ psp_dtm_invoke(psp, dtm_cmd->cmd_id);
+
+ if (dtm_cmd->dtm_status != TA_DTM_STATUS__SUCCESS)
+ return MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE;
+
+ display->state = MOD_HDCP_DISPLAY_ACTIVE;
+ HDCP_TOP_REMOVE_DISPLAY_TRACE(hdcp, display->index);
+ }
+ }
+
+ return MOD_HDCP_STATUS_SUCCESS;
+}
+
+enum mod_hdcp_status mod_hdcp_add_display_topology(struct mod_hdcp *hdcp)
+{
+ struct psp_context *psp = hdcp->config.psp.handle;
+ struct ta_dtm_shared_memory *dtm_cmd;
+ struct mod_hdcp_display *display = NULL;
+ struct mod_hdcp_link *link = &hdcp->connection.link;
+ uint8_t i;
+
+ if (!psp->dtm_context.dtm_initialized) {
+ DRM_ERROR("Failed to add display topology, DTM TA is not initialized.");
+ return MOD_HDCP_STATUS_FAILURE;
+ }
+
+ dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.dtm_shared_buf;
+
+ for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) {
+ if (hdcp->connection.displays[i].state == MOD_HDCP_DISPLAY_ACTIVE) {
+ display = &hdcp->connection.displays[i];
+
+ memset(dtm_cmd, 0, sizeof(struct ta_dtm_shared_memory));
+
+ dtm_cmd->cmd_id = TA_DTM_COMMAND__TOPOLOGY_UPDATE_V2;
+ dtm_cmd->dtm_in_message.topology_update_v2.display_handle = display->index;
+ dtm_cmd->dtm_in_message.topology_update_v2.is_active = 1;
+ dtm_cmd->dtm_in_message.topology_update_v2.controller = display->controller;
+ dtm_cmd->dtm_in_message.topology_update_v2.ddc_line = link->ddc_line;
+ dtm_cmd->dtm_in_message.topology_update_v2.dig_be = link->dig_be;
+ dtm_cmd->dtm_in_message.topology_update_v2.dig_fe = display->dig_fe;
+ dtm_cmd->dtm_in_message.topology_update_v2.dp_mst_vcid = display->vc_id;
+ dtm_cmd->dtm_in_message.topology_update_v2.max_hdcp_supported_version =
+ TA_DTM_HDCP_VERSION_MAX_SUPPORTED__1_x;
+ dtm_cmd->dtm_status = TA_DTM_STATUS__GENERIC_FAILURE;
+
+ psp_dtm_invoke(psp, dtm_cmd->cmd_id);
+
+ if (dtm_cmd->dtm_status != TA_DTM_STATUS__SUCCESS)
+ return MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE;
+
+ display->state = MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED;
+ HDCP_TOP_ADD_DISPLAY_TRACE(hdcp, display->index);
+ }
+ }
+
+ return MOD_HDCP_STATUS_SUCCESS;
+}
+
+enum mod_hdcp_status mod_hdcp_hdcp1_create_session(struct mod_hdcp *hdcp)
+{
+
+ struct psp_context *psp = hdcp->config.psp.handle;
+ struct mod_hdcp_display *display = get_first_added_display(hdcp);
+ struct ta_hdcp_shared_memory *hdcp_cmd;
+
+ if (!psp->hdcp_context.hdcp_initialized) {
+ DRM_ERROR("Failed to create hdcp session. HDCP TA is not initialized.");
+ return MOD_HDCP_STATUS_FAILURE;
+ }
+
+ hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+ memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
+
+ hdcp_cmd->in_msg.hdcp1_create_session.display_handle = display->index;
+ hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP1_CREATE_SESSION;
+
+ psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
+
+ if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
+ return MOD_HDCP_STATUS_HDCP1_CREATE_SESSION_FAILURE;
+
+ hdcp->auth.id = hdcp_cmd->out_msg.hdcp1_create_session.session_handle;
+ hdcp->auth.msg.hdcp1.ainfo = hdcp_cmd->out_msg.hdcp1_create_session.ainfo_primary;
+ memcpy(hdcp->auth.msg.hdcp1.aksv, hdcp_cmd->out_msg.hdcp1_create_session.aksv_primary,
+ sizeof(hdcp->auth.msg.hdcp1.aksv));
+ memcpy(hdcp->auth.msg.hdcp1.an, hdcp_cmd->out_msg.hdcp1_create_session.an_primary,
+ sizeof(hdcp->auth.msg.hdcp1.an));
+
+ return MOD_HDCP_STATUS_SUCCESS;
+}
+
+enum mod_hdcp_status mod_hdcp_hdcp1_destroy_session(struct mod_hdcp *hdcp)
+{
+
+ struct psp_context *psp = hdcp->config.psp.handle;
+ struct ta_hdcp_shared_memory *hdcp_cmd;
+
+ hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+ memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
+
+ hdcp_cmd->in_msg.hdcp1_destroy_session.session_handle = hdcp->auth.id;
+ hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP1_DESTROY_SESSION;
+
+ psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
+
+ if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
+ return MOD_HDCP_STATUS_HDCP1_DESTROY_SESSION_FAILURE;
+
+ HDCP_TOP_HDCP1_DESTROY_SESSION_TRACE(hdcp);
+
+ return MOD_HDCP_STATUS_SUCCESS;
+}
+
+enum mod_hdcp_status mod_hdcp_hdcp1_validate_rx(struct mod_hdcp *hdcp)
+{
+ struct psp_context *psp = hdcp->config.psp.handle;
+ struct ta_hdcp_shared_memory *hdcp_cmd;
+
+ hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+ memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
+
+ hdcp_cmd->in_msg.hdcp1_first_part_authentication.session_handle = hdcp->auth.id;
+
+ memcpy(hdcp_cmd->in_msg.hdcp1_first_part_authentication.bksv_primary, hdcp->auth.msg.hdcp1.bksv,
+ TA_HDCP__HDCP1_KSV_SIZE);
+
+ hdcp_cmd->in_msg.hdcp1_first_part_authentication.r0_prime_primary = hdcp->auth.msg.hdcp1.r0p;
+ hdcp_cmd->in_msg.hdcp1_first_part_authentication.bcaps = hdcp->auth.msg.hdcp1.bcaps;
+ hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP1_FIRST_PART_AUTHENTICATION;
+
+ psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
+
+ if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
+ return MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE;
+
+ if (hdcp_cmd->out_msg.hdcp1_first_part_authentication.authentication_status ==
+ TA_HDCP_AUTHENTICATION_STATUS__HDCP1_FIRST_PART_COMPLETE) {
+ /* needs second part of authentication */
+ hdcp->connection.is_repeater = 1;
+ } else if (hdcp_cmd->out_msg.hdcp1_first_part_authentication.authentication_status ==
+ TA_HDCP_AUTHENTICATION_STATUS__HDCP1_AUTHENTICATED) {
+ hdcp->connection.is_repeater = 0;
+ } else
+ return MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE;
+
+
+ return MOD_HDCP_STATUS_SUCCESS;
+}
+
+enum mod_hdcp_status mod_hdcp_hdcp1_enable_encryption(struct mod_hdcp *hdcp)
+{
+ struct psp_context *psp = hdcp->config.psp.handle;
+ struct ta_hdcp_shared_memory *hdcp_cmd;
+ struct mod_hdcp_display *display = get_first_added_display(hdcp);
+
+ hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+ memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
+
+ hdcp_cmd->in_msg.hdcp1_enable_encryption.session_handle = hdcp->auth.id;
+ hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP1_ENABLE_ENCRYPTION;
+
+ psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
+
+ if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
+ return MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION;
+
+ if (!is_dp_mst_hdcp(hdcp)) {
+ display->state = MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED;
+ HDCP_HDCP1_ENABLED_TRACE(hdcp, display->index);
+ }
+ return MOD_HDCP_STATUS_SUCCESS;
+}
+
+enum mod_hdcp_status mod_hdcp_hdcp1_validate_ksvlist_vp(struct mod_hdcp *hdcp)
+{
+ struct psp_context *psp = hdcp->config.psp.handle;
+ struct ta_hdcp_shared_memory *hdcp_cmd;
+
+ hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+ memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
+
+ hdcp_cmd->in_msg.hdcp1_second_part_authentication.session_handle = hdcp->auth.id;
+
+ hdcp_cmd->in_msg.hdcp1_second_part_authentication.ksv_list_size = hdcp->auth.msg.hdcp1.ksvlist_size;
+ memcpy(hdcp_cmd->in_msg.hdcp1_second_part_authentication.ksv_list, hdcp->auth.msg.hdcp1.ksvlist,
+ hdcp->auth.msg.hdcp1.ksvlist_size);
+
+ memcpy(hdcp_cmd->in_msg.hdcp1_second_part_authentication.v_prime, hdcp->auth.msg.hdcp1.vp,
+ sizeof(hdcp->auth.msg.hdcp1.vp));
+
+ hdcp_cmd->in_msg.hdcp1_second_part_authentication.bstatus_binfo =
+ is_dp_hdcp(hdcp) ? hdcp->auth.msg.hdcp1.binfo_dp : hdcp->auth.msg.hdcp1.bstatus;
+ hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP1_SECOND_PART_AUTHENTICATION;
+
+ psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
+
+ if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
+ return MOD_HDCP_STATUS_HDCP1_VALIDATE_KSV_LIST_FAILURE;
+
+ return MOD_HDCP_STATUS_SUCCESS;
+}
+
+enum mod_hdcp_status mod_hdcp_hdcp1_enable_dp_stream_encryption(struct mod_hdcp *hdcp)
+{
+
+ struct psp_context *psp = hdcp->config.psp.handle;
+ struct ta_hdcp_shared_memory *hdcp_cmd;
+ int i = 0;
+
+ hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+
+ for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) {
+
+ if (hdcp->connection.displays[i].state != MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED ||
+ hdcp->connection.displays[i].adjust.disable)
+ continue;
+
+ memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
+
+ hdcp_cmd->in_msg.hdcp1_enable_dp_stream_encryption.session_handle = hdcp->auth.id;
+ hdcp_cmd->in_msg.hdcp1_enable_dp_stream_encryption.display_handle = hdcp->connection.displays[i].index;
+ hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP1_ENABLE_DP_STREAM_ENCRYPTION;
+
+ psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
+
+ if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
+ return MOD_HDCP_STATUS_HDCP1_ENABLE_STREAM_ENCRYPTION_FAILURE;
+
+ hdcp->connection.displays[i].state = MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED;
+ HDCP_HDCP1_ENABLED_TRACE(hdcp, hdcp->connection.displays[i].index);
+ }
+
+ return MOD_HDCP_STATUS_SUCCESS;
+}
+
+enum mod_hdcp_status mod_hdcp_hdcp1_link_maintenance(struct mod_hdcp *hdcp)
+{
+ struct psp_context *psp = hdcp->config.psp.handle;
+ struct ta_hdcp_shared_memory *hdcp_cmd;
+
+ hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+
+ memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
+
+ hdcp_cmd->in_msg.hdcp1_get_encryption_status.session_handle = hdcp->auth.id;
+
+ hdcp_cmd->out_msg.hdcp1_get_encryption_status.protection_level = 0;
+ hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP1_GET_ENCRYPTION_STATUS;
+
+ psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
+
+ if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
+ return MOD_HDCP_STATUS_HDCP1_LINK_MAINTENANCE_FAILURE;
+
+ return (hdcp_cmd->out_msg.hdcp1_get_encryption_status.protection_level == 1)
+ ? MOD_HDCP_STATUS_SUCCESS
+ : MOD_HDCP_STATUS_HDCP1_LINK_MAINTENANCE_FAILURE;
+}
+
+enum mod_hdcp_status mod_hdcp_hdcp1_get_link_encryption_status(struct mod_hdcp *hdcp,
+ enum mod_hdcp_encryption_status *encryption_status)
+{
+ *encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
+
+ if (mod_hdcp_hdcp1_link_maintenance(hdcp) != MOD_HDCP_STATUS_SUCCESS)
+ return MOD_HDCP_STATUS_FAILURE;
+
+ *encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP1_ON;
+
+ return MOD_HDCP_STATUS_SUCCESS;
+}
+
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.h
new file mode 100644
index 000000000000..986fc07ea9ea
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.h
@@ -0,0 +1,272 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef MODULES_HDCP_HDCP_PSP_H_
+#define MODULES_HDCP_HDCP_PSP_H_
+
+/*
+ * NOTE: These parameters are a one-to-one copy of the
+ * parameters required by PSP
+ */
+enum bgd_security_hdcp_encryption_level {
+ HDCP_ENCRYPTION_LEVEL__INVALID = 0,
+ HDCP_ENCRYPTION_LEVEL__OFF,
+ HDCP_ENCRYPTION_LEVEL__ON
+};
+
+enum ta_dtm_command {
+ TA_DTM_COMMAND__UNUSED_1 = 1,
+ TA_DTM_COMMAND__TOPOLOGY_UPDATE_V2,
+ TA_DTM_COMMAND__TOPOLOGY_ASSR_ENABLE
+};
+
+/* DTM related enumerations */
+/**********************************************************/
+
+enum ta_dtm_status {
+ TA_DTM_STATUS__SUCCESS = 0x00,
+ TA_DTM_STATUS__GENERIC_FAILURE = 0x01,
+ TA_DTM_STATUS__INVALID_PARAMETER = 0x02,
+ TA_DTM_STATUS__NULL_POINTER = 0x3
+};
+
+/* input/output structures for DTM commands */
+/**********************************************************/
+/**
+ * Input structures
+ */
+enum ta_dtm_hdcp_version_max_supported {
+ TA_DTM_HDCP_VERSION_MAX_SUPPORTED__NONE = 0,
+ TA_DTM_HDCP_VERSION_MAX_SUPPORTED__1_x = 10,
+ TA_DTM_HDCP_VERSION_MAX_SUPPORTED__2_0 = 20,
+ TA_DTM_HDCP_VERSION_MAX_SUPPORTED__2_1 = 21,
+ TA_DTM_HDCP_VERSION_MAX_SUPPORTED__2_2 = 22,
+ TA_DTM_HDCP_VERSION_MAX_SUPPORTED__2_3 = 23
+};
+
+struct ta_dtm_topology_update_input_v2 {
+ /* display handle is unique across the driver and is used to identify a display */
+ /* for all security interfaces which reference displays such as HDCP */
+ uint32_t display_handle;
+ uint32_t is_active;
+ uint32_t is_miracast;
+ uint32_t controller;
+ uint32_t ddc_line;
+ uint32_t dig_be;
+ uint32_t dig_fe;
+ uint32_t dp_mst_vcid;
+ uint32_t is_assr;
+ uint32_t max_hdcp_supported_version;
+};
+
+struct ta_dtm_topology_assr_enable {
+ uint32_t display_topology_dig_be_index;
+};
+
+/**
+ * Output structures
+ */
+
+/* No output structures yet */
+
+union ta_dtm_cmd_input {
+ struct ta_dtm_topology_update_input_v2 topology_update_v2;
+ struct ta_dtm_topology_assr_enable topology_assr_enable;
+};
+
+union ta_dtm_cmd_output {
+ uint32_t reserved;
+};
+
+struct ta_dtm_shared_memory {
+ uint32_t cmd_id;
+ uint32_t resp_id;
+ enum ta_dtm_status dtm_status;
+ uint32_t reserved;
+ union ta_dtm_cmd_input dtm_in_message;
+ union ta_dtm_cmd_output dtm_out_message;
+};
+
+int psp_cmd_submit_buf(struct psp_context *psp, struct amdgpu_firmware_info *ucode, struct psp_gfx_cmd_resp *cmd,
+ uint64_t fence_mc_addr);
+
+enum ta_hdcp_command {
+ TA_HDCP_COMMAND__INITIALIZE,
+ TA_HDCP_COMMAND__HDCP1_CREATE_SESSION,
+ TA_HDCP_COMMAND__HDCP1_DESTROY_SESSION,
+ TA_HDCP_COMMAND__HDCP1_FIRST_PART_AUTHENTICATION,
+ TA_HDCP_COMMAND__HDCP1_SECOND_PART_AUTHENTICATION,
+ TA_HDCP_COMMAND__HDCP1_ENABLE_ENCRYPTION,
+ TA_HDCP_COMMAND__HDCP1_ENABLE_DP_STREAM_ENCRYPTION,
+ TA_HDCP_COMMAND__HDCP1_GET_ENCRYPTION_STATUS,
+};
+
+
+/* HDCP related enumerations */
+/**********************************************************/
+#define TA_HDCP__INVALID_SESSION 0xFFFF
+#define TA_HDCP__HDCP1_AN_SIZE 8
+#define TA_HDCP__HDCP1_KSV_SIZE 5
+#define TA_HDCP__HDCP1_KSV_LIST_MAX_ENTRIES 127
+#define TA_HDCP__HDCP1_V_PRIME_SIZE 20
+
+enum ta_hdcp_status {
+ TA_HDCP_STATUS__SUCCESS = 0x00,
+ TA_HDCP_STATUS__GENERIC_FAILURE = 0x01,
+ TA_HDCP_STATUS__NULL_POINTER = 0x02,
+ TA_HDCP_STATUS__FAILED_ALLOCATING_SESSION = 0x03,
+ TA_HDCP_STATUS__FAILED_SETUP_TX = 0x04,
+ TA_HDCP_STATUS__INVALID_PARAMETER = 0x05,
+ TA_HDCP_STATUS__VHX_ERROR = 0x06,
+ TA_HDCP_STATUS__SESSION_NOT_CLOSED_PROPERLY = 0x07,
+ TA_HDCP_STATUS__SRM_FAILURE = 0x08,
+ TA_HDCP_STATUS__MST_AUTHENTICATED_ALREADY_STARTED = 0x09,
+ TA_HDCP_STATUS__AKE_SEND_CERT_FAILURE = 0x0A,
+ TA_HDCP_STATUS__AKE_NO_STORED_KM_FAILURE = 0x0B,
+ TA_HDCP_STATUS__AKE_SEND_HPRIME_FAILURE = 0x0C,
+ TA_HDCP_STATUS__LC_SEND_LPRIME_FAILURE = 0x0D,
+ TA_HDCP_STATUS__SKE_SEND_EKS_FAILURE = 0x0E,
+ TA_HDCP_STATUS__REPAUTH_SEND_RXIDLIST_FAILURE = 0x0F,
+ TA_HDCP_STATUS__REPAUTH_STREAM_READY_FAILURE = 0x10,
+ TA_HDCP_STATUS__ASD_GENERIC_FAILURE = 0x11,
+ TA_HDCP_STATUS__UNWRAP_SECRET_FAILURE = 0x12,
+ TA_HDCP_STATUS__ENABLE_ENCR_FAILURE = 0x13,
+ TA_HDCP_STATUS__DISABLE_ENCR_FAILURE = 0x14,
+ TA_HDCP_STATUS__NOT_ENOUGH_MEMORY_FAILURE = 0x15,
+ TA_HDCP_STATUS__UNKNOWN_MESSAGE = 0x16,
+ TA_HDCP_STATUS__TOO_MANY_STREAM = 0x17
+};
+
+enum ta_hdcp_authentication_status {
+ TA_HDCP_AUTHENTICATION_STATUS__NOT_STARTED = 0x00,
+ TA_HDCP_AUTHENTICATION_STATUS__HDCP1_FIRST_PART_FAILED = 0x01,
+ TA_HDCP_AUTHENTICATION_STATUS__HDCP1_FIRST_PART_COMPLETE = 0x02,
+ TA_HDCP_AUTHENTICATION_STATUS__HDCP1_SECOND_PART_FAILED = 0x03,
+ TA_HDCP_AUTHENTICATION_STATUS__HDCP1_AUTHENTICATED = 0x04,
+ TA_HDCP_AUTHENTICATION_STATUS__HDCP1_KSV_VALIDATION_FAILED = 0x09
+};
+
+
+/* input/output structures for HDCP commands */
+/**********************************************************/
+struct ta_hdcp_cmd_hdcp1_create_session_input {
+ uint8_t display_handle;
+};
+
+struct ta_hdcp_cmd_hdcp1_create_session_output {
+ uint32_t session_handle;
+ uint8_t an_primary[TA_HDCP__HDCP1_AN_SIZE];
+ uint8_t aksv_primary[TA_HDCP__HDCP1_KSV_SIZE];
+ uint8_t ainfo_primary;
+ uint8_t an_secondary[TA_HDCP__HDCP1_AN_SIZE];
+ uint8_t aksv_secondary[TA_HDCP__HDCP1_KSV_SIZE];
+ uint8_t ainfo_secondary;
+};
+
+struct ta_hdcp_cmd_hdcp1_destroy_session_input {
+ uint32_t session_handle;
+};
+
+struct ta_hdcp_cmd_hdcp1_first_part_authentication_input {
+ uint32_t session_handle;
+ uint8_t bksv_primary[TA_HDCP__HDCP1_KSV_SIZE];
+ uint8_t bksv_secondary[TA_HDCP__HDCP1_KSV_SIZE];
+ uint8_t bcaps;
+ uint16_t r0_prime_primary;
+ uint16_t r0_prime_secondary;
+};
+
+struct ta_hdcp_cmd_hdcp1_first_part_authentication_output {
+ enum ta_hdcp_authentication_status authentication_status;
+};
+
+struct ta_hdcp_cmd_hdcp1_second_part_authentication_input {
+ uint32_t session_handle;
+ uint16_t bstatus_binfo;
+ uint8_t ksv_list[TA_HDCP__HDCP1_KSV_LIST_MAX_ENTRIES][TA_HDCP__HDCP1_KSV_SIZE];
+ uint32_t ksv_list_size;
+ uint8_t pj_prime;
+ uint8_t v_prime[TA_HDCP__HDCP1_V_PRIME_SIZE];
+};
+
+struct ta_hdcp_cmd_hdcp1_second_part_authentication_output {
+ enum ta_hdcp_authentication_status authentication_status;
+};
+
+struct ta_hdcp_cmd_hdcp1_enable_encryption_input {
+ uint32_t session_handle;
+};
+
+struct ta_hdcp_cmd_hdcp1_enable_dp_stream_encryption_input {
+ uint32_t session_handle;
+ uint32_t display_handle;
+};
+
+struct ta_hdcp_cmd_hdcp1_get_encryption_status_input {
+ uint32_t session_handle;
+};
+
+struct ta_hdcp_cmd_hdcp1_get_encryption_status_output {
+ uint32_t protection_level;
+};
+
+/**********************************************************/
+/* Common input structure for HDCP callbacks */
+union ta_hdcp_cmd_input {
+ struct ta_hdcp_cmd_hdcp1_create_session_input hdcp1_create_session;
+ struct ta_hdcp_cmd_hdcp1_destroy_session_input hdcp1_destroy_session;
+ struct ta_hdcp_cmd_hdcp1_first_part_authentication_input hdcp1_first_part_authentication;
+ struct ta_hdcp_cmd_hdcp1_second_part_authentication_input hdcp1_second_part_authentication;
+ struct ta_hdcp_cmd_hdcp1_enable_encryption_input hdcp1_enable_encryption;
+ struct ta_hdcp_cmd_hdcp1_enable_dp_stream_encryption_input hdcp1_enable_dp_stream_encryption;
+ struct ta_hdcp_cmd_hdcp1_get_encryption_status_input hdcp1_get_encryption_status;
+};
+
+/* Common output structure for HDCP callbacks */
+union ta_hdcp_cmd_output {
+ struct ta_hdcp_cmd_hdcp1_create_session_output hdcp1_create_session;
+ struct ta_hdcp_cmd_hdcp1_first_part_authentication_output hdcp1_first_part_authentication;
+ struct ta_hdcp_cmd_hdcp1_second_part_authentication_output hdcp1_second_part_authentication;
+ struct ta_hdcp_cmd_hdcp1_get_encryption_status_output hdcp1_get_encryption_status;
+};
+/**********************************************************/
+
+struct ta_hdcp_shared_memory {
+ uint32_t cmd_id;
+ enum ta_hdcp_status hdcp_status;
+ uint32_t reserved;
+ union ta_hdcp_cmd_input in_msg;
+ union ta_hdcp_cmd_output out_msg;
+};
+
+enum psp_status {
+ PSP_STATUS__SUCCESS = 0,
+ PSP_STATUS__ERROR_INVALID_PARAMS,
+ PSP_STATUS__ERROR_GENERIC,
+ PSP_STATUS__ERROR_OUT_OF_MEMORY,
+ PSP_STATUS__ERROR_UNSUPPORTED_FEATURE
+};
+
+#endif /* MODULES_HDCP_HDCP_PSP_H_ */
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
index dc187844d10b..dbe7835aabcf 100644
--- a/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
@@ -92,6 +92,7 @@ struct mod_vrr_params_btr {
uint32_t inserted_duration_in_us;
uint32_t frames_to_insert;
uint32_t frame_counter;
+ uint32_t margin_in_us;
};
struct mod_vrr_params_fixed_refresh {
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
new file mode 100644
index 000000000000..dea21702edff
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
@@ -0,0 +1,289 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef MOD_HDCP_H_
+#define MOD_HDCP_H_
+
+#include "os_types.h"
+#include "signal_types.h"
+
+/* Forward Declarations */
+struct mod_hdcp;
+
+#define MAX_NUM_OF_DISPLAYS 6
+#define MAX_NUM_OF_ATTEMPTS 4
+#define MAX_NUM_OF_ERROR_TRACE 10
+
+/* detailed return status */
+enum mod_hdcp_status {
+ MOD_HDCP_STATUS_SUCCESS = 0,
+ MOD_HDCP_STATUS_FAILURE,
+ MOD_HDCP_STATUS_RESET_NEEDED,
+ MOD_HDCP_STATUS_DISPLAY_OUT_OF_BOUND,
+ MOD_HDCP_STATUS_DISPLAY_NOT_FOUND,
+ MOD_HDCP_STATUS_INVALID_STATE,
+ MOD_HDCP_STATUS_NOT_IMPLEMENTED,
+ MOD_HDCP_STATUS_INTERNAL_POLICY_FAILURE,
+ MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE,
+ MOD_HDCP_STATUS_CREATE_PSP_SERVICE_FAILURE,
+ MOD_HDCP_STATUS_DESTROY_PSP_SERVICE_FAILURE,
+ MOD_HDCP_STATUS_HDCP1_CREATE_SESSION_FAILURE,
+ MOD_HDCP_STATUS_HDCP1_DESTROY_SESSION_FAILURE,
+ MOD_HDCP_STATUS_HDCP1_VALIDATE_ENCRYPTION_FAILURE,
+ MOD_HDCP_STATUS_HDCP1_NOT_HDCP_REPEATER,
+ MOD_HDCP_STATUS_HDCP1_NOT_CAPABLE,
+ MOD_HDCP_STATUS_HDCP1_R0_PRIME_PENDING,
+ MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE,
+ MOD_HDCP_STATUS_HDCP1_KSV_LIST_NOT_READY,
+ MOD_HDCP_STATUS_HDCP1_VALIDATE_KSV_LIST_FAILURE,
+ MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION,
+ MOD_HDCP_STATUS_HDCP1_ENABLE_STREAM_ENCRYPTION_FAILURE,
+ MOD_HDCP_STATUS_HDCP1_MAX_CASCADE_EXCEEDED_FAILURE,
+ MOD_HDCP_STATUS_HDCP1_MAX_DEVS_EXCEEDED_FAILURE,
+ MOD_HDCP_STATUS_HDCP1_DEVICE_COUNT_MISMATCH_FAILURE,
+ MOD_HDCP_STATUS_HDCP1_LINK_INTEGRITY_FAILURE,
+ MOD_HDCP_STATUS_HDCP1_REAUTH_REQUEST_ISSUED,
+ MOD_HDCP_STATUS_HDCP1_LINK_MAINTENANCE_FAILURE,
+ MOD_HDCP_STATUS_HDCP1_INVALID_BKSV,
+ MOD_HDCP_STATUS_DDC_FAILURE, /* TODO: specific errors */
+ MOD_HDCP_STATUS_INVALID_OPERATION,
+ MOD_HDCP_STATUS_HDCP2_NOT_CAPABLE,
+ MOD_HDCP_STATUS_HDCP2_CREATE_SESSION_FAILURE,
+ MOD_HDCP_STATUS_HDCP2_DESTROY_SESSION_FAILURE,
+ MOD_HDCP_STATUS_HDCP2_PREP_AKE_INIT_FAILURE,
+ MOD_HDCP_STATUS_HDCP2_AKE_CERT_PENDING,
+ MOD_HDCP_STATUS_HDCP2_H_PRIME_PENDING,
+ MOD_HDCP_STATUS_HDCP2_PAIRING_INFO_PENDING,
+ MOD_HDCP_STATUS_HDCP2_VALIDATE_AKE_CERT_FAILURE,
+ MOD_HDCP_STATUS_HDCP2_VALIDATE_H_PRIME_FAILURE,
+ MOD_HDCP_STATUS_HDCP2_VALIDATE_PAIRING_INFO_FAILURE,
+ MOD_HDCP_STATUS_HDCP2_PREP_LC_INIT_FAILURE,
+ MOD_HDCP_STATUS_HDCP2_L_PRIME_PENDING,
+ MOD_HDCP_STATUS_HDCP2_VALIDATE_L_PRIME_FAILURE,
+ MOD_HDCP_STATUS_HDCP2_PREP_EKS_FAILURE,
+ MOD_HDCP_STATUS_HDCP2_ENABLE_ENCRYPTION_FAILURE,
+ MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_NOT_READY,
+ MOD_HDCP_STATUS_HDCP2_VALIDATE_RX_ID_LIST_FAILURE,
+ MOD_HDCP_STATUS_HDCP2_ENABLE_STREAM_ENCRYPTION,
+ MOD_HDCP_STATUS_HDCP2_STREAM_READY_PENDING,
+ MOD_HDCP_STATUS_HDCP2_VALIDATE_STREAM_READY_FAILURE,
+ MOD_HDCP_STATUS_HDCP2_PREPARE_STREAM_MANAGEMENT_FAILURE,
+ MOD_HDCP_STATUS_HDCP2_REAUTH_REQUEST,
+ MOD_HDCP_STATUS_HDCP2_REAUTH_LINK_INTEGRITY_FAILURE,
+ MOD_HDCP_STATUS_HDCP2_DEVICE_COUNT_MISMATCH_FAILURE,
+};
+
+struct mod_hdcp_displayport {
+ uint8_t rev;
+ uint8_t assr_supported;
+};
+
+struct mod_hdcp_hdmi {
+ uint8_t reserved;
+};
+enum mod_hdcp_operation_mode {
+ MOD_HDCP_MODE_OFF,
+ MOD_HDCP_MODE_DEFAULT,
+ MOD_HDCP_MODE_DP,
+ MOD_HDCP_MODE_DP_MST
+};
+
+enum mod_hdcp_display_state {
+ MOD_HDCP_DISPLAY_INACTIVE = 0,
+ MOD_HDCP_DISPLAY_ACTIVE,
+ MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED,
+ MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED
+};
+
+struct mod_hdcp_ddc {
+ void *handle;
+ struct {
+ bool (*read_i2c)(void *handle,
+ uint32_t address,
+ uint8_t offset,
+ uint8_t *data,
+ uint32_t size);
+ bool (*write_i2c)(void *handle,
+ uint32_t address,
+ const uint8_t *data,
+ uint32_t size);
+ bool (*read_dpcd)(void *handle,
+ uint32_t address,
+ uint8_t *data,
+ uint32_t size);
+ bool (*write_dpcd)(void *handle,
+ uint32_t address,
+ const uint8_t *data,
+ uint32_t size);
+ } funcs;
+};
+
+struct mod_hdcp_psp {
+ void *handle;
+ void *funcs;
+};
+
+struct mod_hdcp_display_adjustment {
+ uint8_t disable : 1;
+ uint8_t reserved : 7;
+};
+
+struct mod_hdcp_link_adjustment_hdcp1 {
+ uint8_t disable : 1;
+ uint8_t postpone_encryption : 1;
+ uint8_t reserved : 6;
+};
+
+struct mod_hdcp_link_adjustment_hdcp2 {
+ uint8_t disable : 1;
+ uint8_t disable_type1 : 1;
+ uint8_t force_no_stored_km : 1;
+ uint8_t increase_h_prime_timeout: 1;
+ uint8_t reserved : 4;
+};
+
+struct mod_hdcp_link_adjustment {
+ uint8_t auth_delay;
+ struct mod_hdcp_link_adjustment_hdcp1 hdcp1;
+ struct mod_hdcp_link_adjustment_hdcp2 hdcp2;
+};
+
+struct mod_hdcp_error {
+ enum mod_hdcp_status status;
+ uint8_t state_id;
+};
+
+struct mod_hdcp_trace {
+ struct mod_hdcp_error errors[MAX_NUM_OF_ERROR_TRACE];
+ uint8_t error_count;
+};
+
+enum mod_hdcp_encryption_status {
+ MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF = 0,
+ MOD_HDCP_ENCRYPTION_STATUS_HDCP1_ON,
+ MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE0_ON,
+ MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE1_ON
+};
+
+/* per link events dm has to notify to hdcp module */
+enum mod_hdcp_event {
+ MOD_HDCP_EVENT_CALLBACK = 0,
+ MOD_HDCP_EVENT_WATCHDOG_TIMEOUT,
+ MOD_HDCP_EVENT_CPIRQ
+};
+
+/* output flags from module requesting timer operations */
+struct mod_hdcp_output {
+ uint8_t callback_needed;
+ uint8_t callback_stop;
+ uint8_t watchdog_timer_needed;
+ uint8_t watchdog_timer_stop;
+ uint16_t callback_delay;
+ uint16_t watchdog_timer_delay;
+};
+
+/* used to represent per display info */
+struct mod_hdcp_display {
+ enum mod_hdcp_display_state state;
+ uint8_t index;
+ uint8_t controller;
+ uint8_t dig_fe;
+ union {
+ uint8_t vc_id;
+ };
+ struct mod_hdcp_display_adjustment adjust;
+};
+
+/* used to represent per link info */
+/* in case a link has multiple displays, they share the same link info */
+struct mod_hdcp_link {
+ enum mod_hdcp_operation_mode mode;
+ uint8_t dig_be;
+ uint8_t ddc_line;
+ union {
+ struct mod_hdcp_displayport dp;
+ struct mod_hdcp_hdmi hdmi;
+ };
+ struct mod_hdcp_link_adjustment adjust;
+};
+
+/* a query structure for a display's hdcp information */
+struct mod_hdcp_display_query {
+ const struct mod_hdcp_display *display;
+ const struct mod_hdcp_link *link;
+ const struct mod_hdcp_trace *trace;
+ enum mod_hdcp_encryption_status encryption_status;
+};
+
+/* contains values per on external display configuration change */
+struct mod_hdcp_config {
+ struct mod_hdcp_psp psp;
+ struct mod_hdcp_ddc ddc;
+ uint8_t index;
+};
+
+struct mod_hdcp;
+
+/* dm allocates memory of mod_hdcp per dc_link on dm init based on memory size*/
+size_t mod_hdcp_get_memory_size(void);
+
+/* called per link on link creation */
+enum mod_hdcp_status mod_hdcp_setup(struct mod_hdcp *hdcp,
+ struct mod_hdcp_config *config);
+
+/* called per link on link destroy */
+enum mod_hdcp_status mod_hdcp_teardown(struct mod_hdcp *hdcp);
+
+/* called per display on cp_desired set to true */
+enum mod_hdcp_status mod_hdcp_add_display(struct mod_hdcp *hdcp,
+ struct mod_hdcp_link *link, struct mod_hdcp_display *display,
+ struct mod_hdcp_output *output);
+
+/* called per display on cp_desired set to false */
+enum mod_hdcp_status mod_hdcp_remove_display(struct mod_hdcp *hdcp,
+ uint8_t index, struct mod_hdcp_output *output);
+
+/* called to query hdcp information on a specific index */
+enum mod_hdcp_status mod_hdcp_query_display(struct mod_hdcp *hdcp,
+ uint8_t index, struct mod_hdcp_display_query *query);
+
+/* called per link on connectivity change */
+enum mod_hdcp_status mod_hdcp_reset_connection(struct mod_hdcp *hdcp,
+ struct mod_hdcp_output *output);
+
+/* called per link on events (i.e. callback, watchdog, CP_IRQ) */
+enum mod_hdcp_status mod_hdcp_process_event(struct mod_hdcp *hdcp,
+ enum mod_hdcp_event event, struct mod_hdcp_output *output);
+
+/* called to convert enum mod_hdcp_status to c string */
+char *mod_hdcp_status_to_str(int32_t status);
+
+/* called to convert state id to c string */
+char *mod_hdcp_state_id_to_str(int32_t id);
+
+/* called to convert signal type to operation mode */
+enum mod_hdcp_operation_mode mod_hdcp_signal_type_to_operation_mode(
+ enum signal_type signal);
+#endif /* MOD_HDCP_H_ */
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h b/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h
index d930bdecb117..ca8ce3c55337 100644
--- a/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h
@@ -35,4 +35,7 @@ struct mod_vrr_params;
void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
struct dc_info_packet *info_packet);
+void mod_build_hf_vsif_infopacket(const struct dc_stream_state *stream,
+ struct dc_info_packet *info_packet, int ALLMEnabled, int ALLMValue);
+
#endif
diff --git a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
index d885d642ed7f..db6b08f6d093 100644
--- a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
+++ b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
@@ -31,6 +31,7 @@
#include "dc.h"
#define HDMI_INFOFRAME_TYPE_VENDOR 0x81
+#define HF_VSIF_VERSION 1
// VTEM Byte Offset
#define VTEM_PB0 0
@@ -395,3 +396,100 @@ void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
}
+/**
+ *****************************************************************************
+ * Function: mod_build_hf_vsif_infopacket
+ *
+ * @brief
+ * Prepare HDMI Vendor Specific info frame.
+ * Follows HDMI Spec to build up Vendor Specific info frame
+ *
+ * @param [in] stream: contains data we may need to construct VSIF (i.e. timing_3d_format, etc.)
+ * @param [out] info_packet: output structure where to store VSIF
+ *****************************************************************************
+ */
+void mod_build_hf_vsif_infopacket(const struct dc_stream_state *stream,
+ struct dc_info_packet *info_packet, int ALLMEnabled, int ALLMValue)
+{
+ unsigned int length = 5;
+ bool hdmi_vic_mode = false;
+ uint8_t checksum = 0;
+ uint32_t i = 0;
+ enum dc_timing_3d_format format;
+ bool bALLM = (bool)ALLMEnabled;
+ bool bALLMVal = (bool)ALLMValue;
+
+ info_packet->valid = false;
+ format = stream->timing.timing_3d_format;
+ if (stream->view_format == VIEW_3D_FORMAT_NONE)
+ format = TIMING_3D_FORMAT_NONE;
+
+ if (stream->timing.hdmi_vic != 0
+ && stream->timing.h_total >= 3840
+ && stream->timing.v_total >= 2160
+ && format == TIMING_3D_FORMAT_NONE)
+ hdmi_vic_mode = true;
+
+ if ((format == TIMING_3D_FORMAT_NONE) && !hdmi_vic_mode && !bALLM)
+ return;
+
+ info_packet->sb[1] = 0x03;
+ info_packet->sb[2] = 0x0C;
+ info_packet->sb[3] = 0x00;
+
+ if (bALLM) {
+ info_packet->sb[1] = 0xD8;
+ info_packet->sb[2] = 0x5D;
+ info_packet->sb[3] = 0xC4;
+ info_packet->sb[4] = HF_VSIF_VERSION;
+ }
+
+ if (format != TIMING_3D_FORMAT_NONE)
+ info_packet->sb[4] = (2 << 5);
+
+ else if (hdmi_vic_mode)
+ info_packet->sb[4] = (1 << 5);
+
+ switch (format) {
+ case TIMING_3D_FORMAT_HW_FRAME_PACKING:
+ case TIMING_3D_FORMAT_SW_FRAME_PACKING:
+ info_packet->sb[5] = (0x0 << 4);
+ break;
+
+ case TIMING_3D_FORMAT_SIDE_BY_SIDE:
+ case TIMING_3D_FORMAT_SBS_SW_PACKED:
+ info_packet->sb[5] = (0x8 << 4);
+ length = 6;
+ break;
+
+ case TIMING_3D_FORMAT_TOP_AND_BOTTOM:
+ case TIMING_3D_FORMAT_TB_SW_PACKED:
+ info_packet->sb[5] = (0x6 << 4);
+ break;
+
+ default:
+ break;
+ }
+
+ if (hdmi_vic_mode)
+ info_packet->sb[5] = stream->timing.hdmi_vic;
+
+ info_packet->hb0 = HDMI_INFOFRAME_TYPE_VENDOR;
+ info_packet->hb1 = 0x01;
+ info_packet->hb2 = (uint8_t) (length);
+
+ if (bALLM)
+ info_packet->sb[5] = (info_packet->sb[5] & ~0x02) | (bALLMVal << 1);
+
+ checksum += info_packet->hb0;
+ checksum += info_packet->hb1;
+ checksum += info_packet->hb2;
+
+ for (i = 1; i <= length; i++)
+ checksum += info_packet->sb[i];
+
+ info_packet->sb[0] = (uint8_t) (0x100 - checksum);
+
+ info_packet->valid = true;
+}
+
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
index 05e2be856037..4e2f615c3566 100644
--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
+++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
@@ -80,18 +80,18 @@ struct abm_parameters {
static const struct abm_parameters abm_settings_config0[abm_defines_max_level] = {
// min_red max_red bright_pos dark_pos brightness_gain contrast deviation min_knee max_knee
- {0xff, 0xbf, 0x20, 0x00, 0xff, 0x99, 0xb3, 0x40, 0xE0},
- {0xff, 0x85, 0x20, 0x00, 0xff, 0x90, 0xa8, 0x40, 0xE0},
- {0xff, 0x40, 0x20, 0x00, 0xff, 0x90, 0x68, 0x40, 0xE0},
- {0x82, 0x4d, 0x20, 0x00, 0x00, 0x90, 0xb3, 0x70, 0x70},
+ {0xff, 0xbf, 0x20, 0x00, 0xff, 0x99, 0xb3, 0x40, 0xe0},
+ {0xde, 0x85, 0x20, 0x00, 0xff, 0x90, 0xa8, 0x40, 0xdf},
+ {0xb0, 0x50, 0x20, 0x00, 0xc0, 0x88, 0x78, 0x70, 0xa0},
+ {0x82, 0x40, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70},
};
static const struct abm_parameters abm_settings_config1[abm_defines_max_level] = {
// min_red max_red bright_pos dark_pos brightness_gain contrast deviation min_knee max_knee
- {0xf0, 0xd9, 0x20, 0x00, 0x00, 0xa8, 0xb3, 0x70, 0x70},
- {0xcd, 0xa5, 0x20, 0x00, 0x00, 0xa8, 0xb3, 0x70, 0x70},
- {0x99, 0x65, 0x20, 0x00, 0x00, 0xa8, 0xb3, 0x70, 0x70},
- {0x82, 0x4d, 0x20, 0x00, 0x00, 0xa8, 0xb3, 0x70, 0x70},
+ {0xf0, 0xd9, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70},
+ {0xcd, 0xa5, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70},
+ {0x99, 0x65, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70},
+ {0x82, 0x4d, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70},
};
static const struct abm_parameters * const abm_settings[] = {
@@ -115,7 +115,7 @@ static const struct abm_parameters * const abm_settings[] = {
/* NOTE: iRAM is 256B in size */
struct iram_table_v_2 {
/* flags */
- uint16_t flags; /* 0x00 U16 */
+ uint16_t min_abm_backlight; /* 0x00 U16 */
/* parameters for ABM2.0 algorithm */
uint8_t min_reduction[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; /* 0x02 U0.8 */
@@ -140,10 +140,10 @@ struct iram_table_v_2 {
/* For reading PSR State directly from IRAM */
uint8_t psr_state; /* 0xf0 */
- uint8_t dmcu_mcp_interface_version; /* 0xf1 */
- uint8_t dmcu_abm_feature_version; /* 0xf2 */
- uint8_t dmcu_psr_feature_version; /* 0xf3 */
- uint16_t dmcu_version; /* 0xf4 */
+ uint8_t dmcu_mcp_interface_version; /* 0xf1 */
+ uint8_t dmcu_abm_feature_version; /* 0xf2 */
+ uint8_t dmcu_psr_feature_version; /* 0xf3 */
+ uint16_t dmcu_version; /* 0xf4 */
uint8_t dmcu_state; /* 0xf6 */
uint16_t blRampReduction; /* 0xf7 */
@@ -164,42 +164,43 @@ struct iram_table_v_2_2 {
uint8_t max_reduction[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; /* 0x16 U0.8 */
uint8_t bright_pos_gain[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; /* 0x2a U2.6 */
uint8_t dark_pos_gain[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; /* 0x3e U2.6 */
- uint8_t hybrid_factor[NUM_AGGR_LEVEL]; /* 0x52 U0.8 */
- uint8_t contrast_factor[NUM_AGGR_LEVEL]; /* 0x56 U0.8 */
- uint8_t deviation_gain[NUM_AGGR_LEVEL]; /* 0x5a U0.8 */
- uint8_t iir_curve[NUM_AMBI_LEVEL]; /* 0x5e U0.8 */
- uint8_t min_knee[NUM_AGGR_LEVEL]; /* 0x63 U0.8 */
- uint8_t max_knee[NUM_AGGR_LEVEL]; /* 0x67 U0.8 */
- uint8_t pad[21]; /* 0x6b U0.8 */
+ uint8_t hybrid_factor[NUM_AGGR_LEVEL]; /* 0x52 U0.8 */
+ uint8_t contrast_factor[NUM_AGGR_LEVEL]; /* 0x56 U0.8 */
+ uint8_t deviation_gain[NUM_AGGR_LEVEL]; /* 0x5a U0.8 */
+ uint8_t iir_curve[NUM_AMBI_LEVEL]; /* 0x5e U0.8 */
+ uint8_t min_knee[NUM_AGGR_LEVEL]; /* 0x63 U0.8 */
+ uint8_t max_knee[NUM_AGGR_LEVEL]; /* 0x67 U0.8 */
+ uint16_t min_abm_backlight; /* 0x6b U16 */
+ uint8_t pad[19]; /* 0x6d U0.8 */
/* parameters for crgb conversion */
- uint16_t crgb_thresh[NUM_POWER_FN_SEGS]; /* 0x80 U3.13 */
- uint16_t crgb_offset[NUM_POWER_FN_SEGS]; /* 0x90 U1.15 */
- uint16_t crgb_slope[NUM_POWER_FN_SEGS]; /* 0xa0 U4.12 */
+ uint16_t crgb_thresh[NUM_POWER_FN_SEGS]; /* 0x80 U3.13 */
+ uint16_t crgb_offset[NUM_POWER_FN_SEGS]; /* 0x90 U1.15 */
+ uint16_t crgb_slope[NUM_POWER_FN_SEGS]; /* 0xa0 U4.12 */
/* parameters for custom curve */
/* thresholds for brightness --> backlight */
- uint16_t backlight_thresholds[NUM_BL_CURVE_SEGS]; /* 0xb0 U16.0 */
+ uint16_t backlight_thresholds[NUM_BL_CURVE_SEGS]; /* 0xb0 U16.0 */
/* offsets for brightness --> backlight */
- uint16_t backlight_offsets[NUM_BL_CURVE_SEGS]; /* 0xd0 U16.0 */
+ uint16_t backlight_offsets[NUM_BL_CURVE_SEGS]; /* 0xd0 U16.0 */
/* For reading PSR State directly from IRAM */
- uint8_t psr_state; /* 0xf0 */
- uint8_t dmcu_mcp_interface_version; /* 0xf1 */
- uint8_t dmcu_abm_feature_version; /* 0xf2 */
- uint8_t dmcu_psr_feature_version; /* 0xf3 */
- uint16_t dmcu_version; /* 0xf4 */
- uint8_t dmcu_state; /* 0xf6 */
-
- uint8_t dummy1; /* 0xf7 */
- uint8_t dummy2; /* 0xf8 */
- uint8_t dummy3; /* 0xf9 */
- uint8_t dummy4; /* 0xfa */
- uint8_t dummy5; /* 0xfb */
- uint8_t dummy6; /* 0xfc */
- uint8_t dummy7; /* 0xfd */
- uint8_t dummy8; /* 0xfe */
- uint8_t dummy9; /* 0xff */
+ uint8_t psr_state; /* 0xf0 */
+ uint8_t dmcu_mcp_interface_version; /* 0xf1 */
+ uint8_t dmcu_abm_feature_version; /* 0xf2 */
+ uint8_t dmcu_psr_feature_version; /* 0xf3 */
+ uint16_t dmcu_version; /* 0xf4 */
+ uint8_t dmcu_state; /* 0xf6 */
+
+ uint8_t dummy1; /* 0xf7 */
+ uint8_t dummy2; /* 0xf8 */
+ uint8_t dummy3; /* 0xf9 */
+ uint8_t dummy4; /* 0xfa */
+ uint8_t dummy5; /* 0xfb */
+ uint8_t dummy6; /* 0xfc */
+ uint8_t dummy7; /* 0xfd */
+ uint8_t dummy8; /* 0xfe */
+ uint8_t dummy9; /* 0xff */
};
#pragma pack(pop)
@@ -271,7 +272,8 @@ void fill_iram_v_2(struct iram_table_v_2 *ram_table, struct dmcu_iram_parameters
{
unsigned int set = params.set;
- ram_table->flags = 0x0;
+ ram_table->min_abm_backlight =
+ cpu_to_be16(params.min_abm_backlight);
ram_table->deviation_gain = 0xb3;
ram_table->blRampReduction =
@@ -445,6 +447,9 @@ void fill_iram_v_2_2(struct iram_table_v_2_2 *ram_table, struct dmcu_iram_parame
ram_table->flags = 0x0;
+ ram_table->min_abm_backlight =
+ cpu_to_be16(params.min_abm_backlight);
+
ram_table->deviation_gain[0] = 0xb3;
ram_table->deviation_gain[1] = 0xa8;
ram_table->deviation_gain[2] = 0x98;
@@ -588,6 +593,10 @@ void fill_iram_v_2_3(struct iram_table_v_2_2 *ram_table, struct dmcu_iram_parame
unsigned int set = params.set;
ram_table->flags = 0x0;
+
+ ram_table->min_abm_backlight =
+ cpu_to_be16(params.min_abm_backlight);
+
for (i = 0; i < NUM_AGGR_LEVEL; i++) {
ram_table->hybrid_factor[i] = abm_settings[set][i].brightness_gain;
ram_table->contrast_factor[i] = abm_settings[set][i].contrast_factor;
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
index da5df00fedce..e54157026330 100644
--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
+++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
@@ -38,6 +38,7 @@ struct dmcu_iram_parameters {
unsigned int backlight_lut_array_size;
unsigned int backlight_ramping_reduction;
unsigned int backlight_ramping_start;
+ unsigned int min_abm_backlight;
unsigned int set;
};
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index 8889aaceec60..dc7eb28f0296 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -143,6 +143,8 @@ enum PP_FEATURE_MASK {
enum DC_FEATURE_MASK {
DC_FBC_MASK = 0x1,
DC_MULTI_MON_PP_MCLK_SWITCH_MASK = 0x2,
+ DC_DISABLE_FRACTIONAL_PWM_MASK = 0x4,
+ DC_PSR_MASK = 0x8,
};
enum amd_dpm_forced_level;
diff --git a/drivers/gpu/drm/amd/include/asic_reg/bif/bif_4_1_d.h b/drivers/gpu/drm/amd/include/asic_reg/bif/bif_4_1_d.h
index a761ba07f937..fce965984e76 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/bif/bif_4_1_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/bif/bif_4_1_d.h
@@ -27,6 +27,7 @@
#define mmMM_INDEX 0x0
#define mmMM_INDEX_HI 0x6
#define mmMM_DATA 0x1
+#define mmCC_BIF_BX_FUSESTRAP0 0x14D7
#define mmBUS_CNTL 0x1508
#define mmCONFIG_CNTL 0x1509
#define mmCONFIG_MEMSIZE 0x150a
diff --git a/drivers/gpu/drm/amd/include/asic_reg/bif/bif_4_1_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/bif/bif_4_1_sh_mask.h
index 8fbfd0261d27..39cc4880beb4 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/bif/bif_4_1_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/bif/bif_4_1_sh_mask.h
@@ -32,6 +32,8 @@
#define MM_INDEX_HI__MM_OFFSET_HI__SHIFT 0x0
#define MM_DATA__MM_DATA_MASK 0xffffffff
#define MM_DATA__MM_DATA__SHIFT 0x0
+#define CC_BIF_BX_FUSESTRAP0__STRAP_BIF_PX_CAPABLE_MASK 0x2
+#define CC_BIF_BX_FUSESTRAP0__STRAP_BIF_PX_CAPABLE__SHIFT 0x1
#define BUS_CNTL__BIOS_ROM_WRT_EN_MASK 0x1
#define BUS_CNTL__BIOS_ROM_WRT_EN__SHIFT 0x0
#define BUS_CNTL__BIOS_ROM_DIS_MASK 0x2
diff --git a/drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_d.h
index 809759f7bb81..8d05d6ca1c8d 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_d.h
@@ -27,6 +27,7 @@
#define mmMM_INDEX 0x0
#define mmMM_INDEX_HI 0x6
#define mmMM_DATA 0x1
+#define mmCC_BIF_BX_FUSESTRAP0 0x14D7
#define mmCC_BIF_BX_STRAP2 0x152A
#define mmBIF_MM_INDACCESS_CNTL 0x1500
#define mmBIF_DOORBELL_APER_EN 0x1501
diff --git a/drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_sh_mask.h
index adc71b01f793..73435687d049 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_sh_mask.h
@@ -32,6 +32,8 @@
#define MM_INDEX_HI__MM_OFFSET_HI__SHIFT 0x0
#define MM_DATA__MM_DATA_MASK 0xffffffff
#define MM_DATA__MM_DATA__SHIFT 0x0
+#define CC_BIF_BX_FUSESTRAP0__STRAP_BIF_PX_CAPABLE_MASK 0x2
+#define CC_BIF_BX_FUSESTRAP0__STRAP_BIF_PX_CAPABLE__SHIFT 0x1
#define BIF_MM_INDACCESS_CNTL__MM_INDACCESS_DIS_MASK 0x2
#define BIF_MM_INDACCESS_CNTL__MM_INDACCESS_DIS__SHIFT 0x1
#define BIF_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN_MASK 0x1
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_1_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_1_0_offset.h
index be4249adb356..eddf83ec1c39 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_1_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_1_0_offset.h
@@ -9859,6 +9859,8 @@
#define mmDP0_DP_STEER_FIFO_BASE_IDX 2
#define mmDP0_DP_MSA_MISC 0x210e
#define mmDP0_DP_MSA_MISC_BASE_IDX 2
+#define mmDP0_DP_DPHY_INTERNAL_CTRL 0x210f
+#define mmDP0_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
#define mmDP0_DP_VID_TIMING 0x2110
#define mmDP0_DP_VID_TIMING_BASE_IDX 2
#define mmDP0_DP_VID_N 0x2111
@@ -10187,6 +10189,8 @@
#define mmDP1_DP_STEER_FIFO_BASE_IDX 2
#define mmDP1_DP_MSA_MISC 0x220e
#define mmDP1_DP_MSA_MISC_BASE_IDX 2
+#define mmDP1_DP_DPHY_INTERNAL_CTRL 0x220f
+#define mmDP1_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
#define mmDP1_DP_VID_TIMING 0x2210
#define mmDP1_DP_VID_TIMING_BASE_IDX 2
#define mmDP1_DP_VID_N 0x2211
@@ -10515,6 +10519,8 @@
#define mmDP2_DP_STEER_FIFO_BASE_IDX 2
#define mmDP2_DP_MSA_MISC 0x230e
#define mmDP2_DP_MSA_MISC_BASE_IDX 2
+#define mmDP2_DP_DPHY_INTERNAL_CTRL 0x230f
+#define mmDP2_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
#define mmDP2_DP_VID_TIMING 0x2310
#define mmDP2_DP_VID_TIMING_BASE_IDX 2
#define mmDP2_DP_VID_N 0x2311
@@ -10843,6 +10849,8 @@
#define mmDP3_DP_STEER_FIFO_BASE_IDX 2
#define mmDP3_DP_MSA_MISC 0x240e
#define mmDP3_DP_MSA_MISC_BASE_IDX 2
+#define mmDP3_DP_DPHY_INTERNAL_CTRL 0x240f
+#define mmDP3_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
#define mmDP3_DP_VID_TIMING 0x2410
#define mmDP3_DP_VID_TIMING_BASE_IDX 2
#define mmDP3_DP_VID_N 0x2411
@@ -11171,6 +11179,8 @@
#define mmDP4_DP_STEER_FIFO_BASE_IDX 2
#define mmDP4_DP_MSA_MISC 0x250e
#define mmDP4_DP_MSA_MISC_BASE_IDX 2
+#define mmDP4_DP_DPHY_INTERNAL_CTRL 0x250f
+#define mmDP4_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
#define mmDP4_DP_VID_TIMING 0x2510
#define mmDP4_DP_VID_TIMING_BASE_IDX 2
#define mmDP4_DP_VID_N 0x2511
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_offset.h
index ca16d9125fbc..2bfaaa8157d0 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_offset.h
@@ -1146,7 +1146,14 @@
#define mmATC_L2_MEM_POWER_LS_BASE_IDX 0
#define mmATC_L2_CGTT_CLK_CTRL 0x080c
#define mmATC_L2_CGTT_CLK_CTRL_BASE_IDX 0
-
+#define mmATC_L2_CACHE_4K_EDC_INDEX 0x080e
+#define mmATC_L2_CACHE_4K_EDC_INDEX_BASE_IDX 0
+#define mmATC_L2_CACHE_2M_EDC_INDEX 0x080f
+#define mmATC_L2_CACHE_2M_EDC_INDEX_BASE_IDX 0
+#define mmATC_L2_CACHE_4K_EDC_CNT 0x0810
+#define mmATC_L2_CACHE_4K_EDC_CNT_BASE_IDX 0
+#define mmATC_L2_CACHE_2M_EDC_CNT 0x0811
+#define mmATC_L2_CACHE_2M_EDC_CNT_BASE_IDX 0
// addressBlock: gc_utcl2_vml2pfdec
// base address: 0xa100
@@ -1206,7 +1213,14 @@
#define mmVM_L2_CACHE_PARITY_CNTL_BASE_IDX 0
#define mmVM_L2_CGTT_CLK_CTRL 0x085e
#define mmVM_L2_CGTT_CLK_CTRL_BASE_IDX 0
-
+#define mmVM_L2_MEM_ECC_INDEX 0x0860
+#define mmVM_L2_MEM_ECC_INDEX_BASE_IDX 0
+#define mmVM_L2_WALKER_MEM_ECC_INDEX 0x0861
+#define mmVM_L2_WALKER_MEM_ECC_INDEX_BASE_IDX 0
+#define mmVM_L2_MEM_ECC_CNT 0x0862
+#define mmVM_L2_MEM_ECC_CNT_BASE_IDX 0
+#define mmVM_L2_WALKER_MEM_ECC_CNT 0x0863
+#define mmVM_L2_WALKER_MEM_ECC_CNT_BASE_IDX 0
// addressBlock: gc_utcl2_vml2vcdec
// base address: 0xa200
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_sh_mask.h
index 064c4bb1dc62..d4c613a85352 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_sh_mask.h
@@ -6661,7 +6661,6 @@
#define ATC_L2_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00FF0000L
#define ATC_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK 0xFF000000L
-
// addressBlock: gc_utcl2_vml2pfdec
//VM_L2_CNTL
#define VM_L2_CNTL__ENABLE_L2_CACHE__SHIFT 0x0
@@ -6991,7 +6990,22 @@
#define VM_L2_CGTT_CLK_CTRL__MGLS_OVERRIDE_MASK 0x00008000L
#define VM_L2_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00FF0000L
#define VM_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK 0xFF000000L
-
+//VM_L2_MEM_ECC_INDEX
+#define VM_L2_MEM_ECC_INDEX__INDEX__SHIFT 0x0
+#define VM_L2_MEM_ECC_INDEX__INDEX_MASK 0x000000FFL
+//VM_L2_WALKER_MEM_ECC_INDEX
+#define VM_L2_WALKER_MEM_ECC_INDEX__INDEX__SHIFT 0x0
+#define VM_L2_WALKER_MEM_ECC_INDEX__INDEX_MASK 0x000000FFL
+//VM_L2_MEM_ECC_CNT
+#define VM_L2_MEM_ECC_CNT__SEC_COUNT__SHIFT 0xc
+#define VM_L2_MEM_ECC_CNT__DED_COUNT__SHIFT 0xe
+#define VM_L2_MEM_ECC_CNT__SEC_COUNT_MASK 0x00003000L
+#define VM_L2_MEM_ECC_CNT__DED_COUNT_MASK 0x0000C000L
+//VM_L2_WALKER_MEM_ECC_CNT
+#define VM_L2_WALKER_MEM_ECC_CNT__SEC_COUNT__SHIFT 0xc
+#define VM_L2_WALKER_MEM_ECC_CNT__DED_COUNT__SHIFT 0xe
+#define VM_L2_WALKER_MEM_ECC_CNT__SEC_COUNT_MASK 0x00003000L
+#define VM_L2_WALKER_MEM_ECC_CNT__DED_COUNT_MASK 0x0000C000L
// addressBlock: gc_utcl2_vml2vcdec
//VM_CONTEXT0_CNTL
diff --git a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_0_smn.h b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_0_smn.h
index 4bcacf529852..991128bb9476 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_0_smn.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_0_smn.h
@@ -22,6 +22,9 @@
#ifndef _nbio_7_4_0_SMN_HEADER
#define _nbio_7_4_0_SMN_HEADER
+// addressBlock: nbio_nbif0_bif_ras_bif_ras_regblk
+// base address: 0x10100000
+#define smnBIFL_RAS_CENTRAL_STATUS 0x10139040
#define smnNBIF_MGCG_CTRL_LCLK 0x1013a21c
#define smnCPM_CONTROL 0x11180460
@@ -53,4 +56,13 @@
#define smnPCIE_RX_NUM_NAK 0x11180038
#define smnPCIE_RX_NUM_NAK_GENERATED 0x1118003c
+// addressBlock: nbio_iohub_nb_misc_misc_cfgdec
+// base address: 0x13a10000
+#define smnIOHC_INTERRUPT_EOI 0x13a10120
+
+// addressBlock: nbio_iohub_nb_rascfg_ras_cfgdec
+// base address: 0x13a20000
+#define smnRAS_GLOBAL_STATUS_LO 0x13a20020
+#define smnRAS_GLOBAL_STATUS_HI 0x13a20024
+
#endif // _nbio_7_4_0_SMN_HEADER
diff --git a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_offset.h b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_offset.h
index 994e796a28d7..ce5830ebe095 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_offset.h
@@ -2793,8 +2793,8 @@
#define mmBIF_DOORBELL_INT_CNTL_BASE_IDX 2
#define mmBIF_FB_EN 0x00ff
#define mmBIF_FB_EN_BASE_IDX 2
-#define mmBIF_BUSY_DELAY_CNTR 0x0100
-#define mmBIF_BUSY_DELAY_CNTR_BASE_IDX 2
+#define mmBIF_INTR_CNTL 0x0100
+#define mmBIF_INTR_CNTL_BASE_IDX 2
#define mmBIF_MST_TRANS_PENDING_VF 0x0109
#define mmBIF_MST_TRANS_PENDING_VF_BASE_IDX 2
#define mmBIF_SLV_TRANS_PENDING_VF 0x010a
diff --git a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_sh_mask.h
index d467b939c971..07f04b2b5bdd 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_sh_mask.h
@@ -20420,9 +20420,9 @@
#define BIF_FB_EN__FB_WRITE_EN__SHIFT 0x1
#define BIF_FB_EN__FB_READ_EN_MASK 0x00000001L
#define BIF_FB_EN__FB_WRITE_EN_MASK 0x00000002L
-//BIF_BUSY_DELAY_CNTR
-#define BIF_BUSY_DELAY_CNTR__DELAY_CNT__SHIFT 0x0
-#define BIF_BUSY_DELAY_CNTR__DELAY_CNT_MASK 0x0000003FL
+//BIF_INTR_CNTL
+#define BIF_INTR_CNTL__RAS_INTR_VEC_SEL__SHIFT 0x0
+#define BIF_INTR_CNTL__RAS_INTR_VEC_SEL_MASK 0x00000001L
//BIF_MST_TRANS_PENDING_VF
#define BIF_MST_TRANS_PENDING_VF__BIF_MST_TRANS_PENDING__SHIFT 0x0
#define BIF_MST_TRANS_PENDING_VF__BIF_MST_TRANS_PENDING_MASK 0x7FFFFFFFL
@@ -48436,4 +48436,47 @@
#define RCC_DEV0_EPF0_VF15_GFXMSIX_PBA__MSIX_PENDING_BITS_1_MASK 0x00000002L
#define RCC_DEV0_EPF0_VF15_GFXMSIX_PBA__MSIX_PENDING_BITS_2_MASK 0x00000004L
+//IOHC_INTERRUPT_EOI
+#define IOHC_INTERRUPT_EOI__SMI_EOI__SHIFT 0x0
+#define IOHC_INTERRUPT_EOI__SCI_EOI__SHIFT 0x1
+#define IOHC_INTERRUPT_EOI__NMI_EOI__SHIFT 0x2
+#define IOHC_INTERRUPT_EOI__SMI_EOI_MASK 0x00000001L
+#define IOHC_INTERRUPT_EOI__SCI_EOI_MASK 0x00000002L
+#define IOHC_INTERRUPT_EOI__NMI_EOI_MASK 0x00000004L
+
+//RAS_GLOBAL_STATUS_LO
+#define RAS_GLOBAL_STATUS_LO__ParityErrCorr__SHIFT 0x0
+#define RAS_GLOBAL_STATUS_LO__ParityErrNonFatal__SHIFT 0x1
+#define RAS_GLOBAL_STATUS_LO__ParityErrFatal__SHIFT 0x2
+#define RAS_GLOBAL_STATUS_LO__ParityErrSerr__SHIFT 0x3
+#define RAS_GLOBAL_STATUS_LO__HPLGWA_NMI__SHIFT 0x6
+#define RAS_GLOBAL_STATUS_LO__HPLGWA_SCI__SHIFT 0x7
+#define RAS_GLOBAL_STATUS_LO__HPLGWA_SMI__SHIFT 0x8
+#define RAS_GLOBAL_STATUS_LO__SW_SMI__SHIFT 0x9
+#define RAS_GLOBAL_STATUS_LO__SW_SCI__SHIFT 0xa
+#define RAS_GLOBAL_STATUS_LO__SW_NMI__SHIFT 0xb
+#define RAS_GLOBAL_STATUS_LO__APML_NMI__SHIFT 0xc
+#define RAS_GLOBAL_STATUS_LO__APML_SyncFld__SHIFT 0xd
+#define RAS_GLOBAL_STATUS_LO__PIN_SyncFld_NMI__SHIFT 0xe
+#define RAS_GLOBAL_STATUS_LO__APML_SyncFld_Private__SHIFT 0xf
+#define RAS_GLOBAL_STATUS_LO__ParityErrCorr_MASK 0x00000001L
+#define RAS_GLOBAL_STATUS_LO__ParityErrNonFatal_MASK 0x00000002L
+#define RAS_GLOBAL_STATUS_LO__ParityErrFatal_MASK 0x00000004L
+#define RAS_GLOBAL_STATUS_LO__ParityErrSerr_MASK 0x00000008L
+#define RAS_GLOBAL_STATUS_LO__HPLGWA_NMI_MASK 0x00000040L
+#define RAS_GLOBAL_STATUS_LO__HPLGWA_SCI_MASK 0x00000080L
+#define RAS_GLOBAL_STATUS_LO__HPLGWA_SMI_MASK 0x00000100L
+#define RAS_GLOBAL_STATUS_LO__SW_SMI_MASK 0x00000200L
+#define RAS_GLOBAL_STATUS_LO__SW_SCI_MASK 0x00000400L
+#define RAS_GLOBAL_STATUS_LO__SW_NMI_MASK 0x00000800L
+#define RAS_GLOBAL_STATUS_LO__APML_NMI_MASK 0x00001000L
+#define RAS_GLOBAL_STATUS_LO__APML_SyncFld_MASK 0x00002000L
+#define RAS_GLOBAL_STATUS_LO__PIN_SyncFld_NMI_MASK 0x00004000L
+#define RAS_GLOBAL_STATUS_LO__APML_SyncFld_Private_MASK 0x00008000L
+//RAS_GLOBAL_STATUS_HI
+#define RAS_GLOBAL_STATUS_HI__PCIE0PortAErr__SHIFT 0x0
+#define RAS_GLOBAL_STATUS_HI__NBIF0PortAErr__SHIFT 0x1
+#define RAS_GLOBAL_STATUS_HI__PCIE0PortAErr_MASK 0x00000001L
+#define RAS_GLOBAL_STATUS_HI__NBIF0PortAErr_MASK 0x00000002L
+
#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_4_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_4_0_sh_mask.h
index dc9895a684fe..096d878eb1de 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_4_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_4_0_sh_mask.h
@@ -588,11 +588,15 @@
#define IH_STORM_CLIENT_LIST_CNTL__CLIENT30_IS_STORM_CLIENT_MASK 0x40000000L
#define IH_STORM_CLIENT_LIST_CNTL__CLIENT31_IS_STORM_CLIENT_MASK 0x80000000L
//IH_CLK_CTRL
+#define IH_CLK_CTRL__IH_RETRY_INT_CAM_MEM_CLK_SOFT_OVERRIDE__SHIFT 0x19
+#define IH_CLK_CTRL__IH_BUFFER_MEM_CLK_SOFT_OVERRIDE__SHIFT 0x1a
#define IH_CLK_CTRL__DBUS_MUX_CLK_SOFT_OVERRIDE__SHIFT 0x1b
#define IH_CLK_CTRL__OSSSYS_SHARE_CLK_SOFT_OVERRIDE__SHIFT 0x1c
#define IH_CLK_CTRL__LIMIT_SMN_CLK_SOFT_OVERRIDE__SHIFT 0x1d
#define IH_CLK_CTRL__DYN_CLK_SOFT_OVERRIDE__SHIFT 0x1e
#define IH_CLK_CTRL__REG_CLK_SOFT_OVERRIDE__SHIFT 0x1f
+#define IH_CLK_CTRL__IH_RETRY_INT_CAM_MEM_CLK_SOFT_OVERRIDE_MASK 0x02000000L
+#define IH_CLK_CTRL__IH_BUFFER_MEM_CLK_SOFT_OVERRIDE_MASK 0x04000000L
#define IH_CLK_CTRL__DBUS_MUX_CLK_SOFT_OVERRIDE_MASK 0x08000000L
#define IH_CLK_CTRL__OSSSYS_SHARE_CLK_SOFT_OVERRIDE_MASK 0x10000000L
#define IH_CLK_CTRL__LIMIT_SMN_CLK_SOFT_OVERRIDE_MASK 0x20000000L
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_d.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_d.h
index dbc2e723f659..71169daa701a 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_d.h
@@ -49,6 +49,7 @@
#define ixCG_SPLL_FUNC_CNTL_5 0xc0500150
#define ixCG_SPLL_FUNC_CNTL_6 0xc0500154
#define ixCG_SPLL_FUNC_CNTL_7 0xc0500158
+#define ixCG_SPLL_STATUS 0xC050015C
#define ixSPLL_CNTL_MODE 0xc0500160
#define ixCG_SPLL_SPREAD_SPECTRUM 0xc0500164
#define ixCG_SPLL_SPREAD_SPECTRUM_2 0xc0500168
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_sh_mask.h
index 6af9f0217b34..61a9a84e0c3a 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_sh_mask.h
@@ -194,6 +194,8 @@
#define CG_SPLL_FUNC_CNTL_6__SPLL_LF_CNTR__SHIFT 0x19
#define CG_SPLL_FUNC_CNTL_7__SPLL_BW_CNTRL_MASK 0xfff
#define CG_SPLL_FUNC_CNTL_7__SPLL_BW_CNTRL__SHIFT 0x0
+#define CG_SPLL_STATUS__SPLL_CHG_STATUS_MASK 0x2
+#define CG_SPLL_STATUS__SPLL_CHG_STATUS__SHIFT 0x1
#define SPLL_CNTL_MODE__SPLL_SW_DIR_CONTROL_MASK 0x1
#define SPLL_CNTL_MODE__SPLL_SW_DIR_CONTROL__SHIFT 0x0
#define SPLL_CNTL_MODE__SPLL_LEGACY_PDIV_MASK 0x2
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_d.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_d.h
index bd3685166779..351446754c72 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_d.h
@@ -49,6 +49,7 @@
#define ixCG_SPLL_FUNC_CNTL_5 0xc0500150
#define ixCG_SPLL_FUNC_CNTL_6 0xc0500154
#define ixCG_SPLL_FUNC_CNTL_7 0xc0500158
+#define ixCG_SPLL_STATUS 0xC050015C
#define ixSPLL_CNTL_MODE 0xc0500160
#define ixCG_SPLL_SPREAD_SPECTRUM 0xc0500164
#define ixCG_SPLL_SPREAD_SPECTRUM_2 0xc0500168
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_sh_mask.h
index 627906674fe8..4bfd5f8ba66c 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_sh_mask.h
@@ -194,6 +194,8 @@
#define CG_SPLL_FUNC_CNTL_6__SPLL_LF_CNTR__SHIFT 0x19
#define CG_SPLL_FUNC_CNTL_7__SPLL_BW_CNTRL_MASK 0xfff
#define CG_SPLL_FUNC_CNTL_7__SPLL_BW_CNTRL__SHIFT 0x0
+#define CG_SPLL_STATUS__SPLL_CHG_STATUS_MASK 0x2
+#define CG_SPLL_STATUS__SPLL_CHG_STATUS__SHIFT 0x1
#define SPLL_CNTL_MODE__SPLL_SW_DIR_CONTROL_MASK 0x1
#define SPLL_CNTL_MODE__SPLL_SW_DIR_CONTROL__SHIFT 0x0
#define SPLL_CNTL_MODE__SPLL_LEGACY_PDIV_MASK 0x2
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h
index f35aba72e640..21da61c398f5 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h
@@ -52,6 +52,7 @@
#define ixCG_SPLL_FUNC_CNTL_5 0xc0500150
#define ixCG_SPLL_FUNC_CNTL_6 0xc0500154
#define ixCG_SPLL_FUNC_CNTL_7 0xc0500158
+#define ixCG_SPLL_STATUS 0xC050015C
#define ixSPLL_CNTL_MODE 0xc0500160
#define ixCG_SPLL_SPREAD_SPECTRUM 0xc0500164
#define ixCG_SPLL_SPREAD_SPECTRUM_2 0xc0500168
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_sh_mask.h
index 481ee6560aa9..f64fe0fbcb32 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_sh_mask.h
@@ -220,6 +220,8 @@
#define CG_SPLL_FUNC_CNTL_6__SPLL_LF_CNTR__SHIFT 0x19
#define CG_SPLL_FUNC_CNTL_7__SPLL_BW_CNTRL_MASK 0xfff
#define CG_SPLL_FUNC_CNTL_7__SPLL_BW_CNTRL__SHIFT 0x0
+#define CG_SPLL_STATUS__SPLL_CHG_STATUS_MASK 0x2
+#define CG_SPLL_STATUS__SPLL_CHG_STATUS__SHIFT 0x1
#define SPLL_CNTL_MODE__SPLL_SW_DIR_CONTROL_MASK 0x1
#define SPLL_CNTL_MODE__SPLL_SW_DIR_CONTROL__SHIFT 0x0
#define SPLL_CNTL_MODE__SPLL_LEGACY_PDIV_MASK 0x2
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_11_0_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_11_0_0_offset.h
index d3876052562b..687d6843c258 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_11_0_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_11_0_0_offset.h
@@ -121,6 +121,98 @@
#define mmCKSVII2C_IC_COMP_VERSION_BASE_IDX 0
#define mmCKSVII2C_IC_COMP_TYPE 0x006d
#define mmCKSVII2C_IC_COMP_TYPE_BASE_IDX 0
+#define mmCKSVII2C1_IC_CON 0x0080
+#define mmCKSVII2C1_IC_CON_BASE_IDX 0
+#define mmCKSVII2C1_IC_TAR 0x0081
+#define mmCKSVII2C1_IC_TAR_BASE_IDX 0
+#define mmCKSVII2C1_IC_SAR 0x0082
+#define mmCKSVII2C1_IC_SAR_BASE_IDX 0
+#define mmCKSVII2C1_IC_HS_MADDR 0x0083
+#define mmCKSVII2C1_IC_HS_MADDR_BASE_IDX 0
+#define mmCKSVII2C1_IC_DATA_CMD 0x0084
+#define mmCKSVII2C1_IC_DATA_CMD_BASE_IDX 0
+#define mmCKSVII2C1_IC_SS_SCL_HCNT 0x0085
+#define mmCKSVII2C1_IC_SS_SCL_HCNT_BASE_IDX 0
+#define mmCKSVII2C1_IC_SS_SCL_LCNT 0x0086
+#define mmCKSVII2C1_IC_SS_SCL_LCNT_BASE_IDX 0
+#define mmCKSVII2C1_IC_FS_SCL_HCNT 0x0087
+#define mmCKSVII2C1_IC_FS_SCL_HCNT_BASE_IDX 0
+#define mmCKSVII2C1_IC_FS_SCL_LCNT 0x0088
+#define mmCKSVII2C1_IC_FS_SCL_LCNT_BASE_IDX 0
+#define mmCKSVII2C1_IC_HS_SCL_HCNT 0x0089
+#define mmCKSVII2C1_IC_HS_SCL_HCNT_BASE_IDX 0
+#define mmCKSVII2C1_IC_HS_SCL_LCNT 0x008a
+#define mmCKSVII2C1_IC_HS_SCL_LCNT_BASE_IDX 0
+#define mmCKSVII2C1_IC_INTR_STAT 0x008b
+#define mmCKSVII2C1_IC_INTR_STAT_BASE_IDX 0
+#define mmCKSVII2C1_IC_INTR_MASK 0x008c
+#define mmCKSVII2C1_IC_INTR_MASK_BASE_IDX 0
+#define mmCKSVII2C1_IC_RAW_INTR_STAT 0x008d
+#define mmCKSVII2C1_IC_RAW_INTR_STAT_BASE_IDX 0
+#define mmCKSVII2C1_IC_RX_TL 0x008e
+#define mmCKSVII2C1_IC_RX_TL_BASE_IDX 0
+#define mmCKSVII2C1_IC_TX_TL 0x008f
+#define mmCKSVII2C1_IC_TX_TL_BASE_IDX 0
+#define mmCKSVII2C1_IC_CLR_INTR 0x0090
+#define mmCKSVII2C1_IC_CLR_INTR_BASE_IDX 0
+#define mmCKSVII2C1_IC_CLR_RX_UNDER 0x0091
+#define mmCKSVII2C1_IC_CLR_RX_UNDER_BASE_IDX 0
+#define mmCKSVII2C1_IC_CLR_RX_OVER 0x0092
+#define mmCKSVII2C1_IC_CLR_RX_OVER_BASE_IDX 0
+#define mmCKSVII2C1_IC_CLR_TX_OVER 0x0093
+#define mmCKSVII2C1_IC_CLR_TX_OVER_BASE_IDX 0
+#define mmCKSVII2C1_IC_CLR_RD_REQ 0x0094
+#define mmCKSVII2C1_IC_CLR_RD_REQ_BASE_IDX 0
+#define mmCKSVII2C1_IC_CLR_TX_ABRT 0x0095
+#define mmCKSVII2C1_IC_CLR_TX_ABRT_BASE_IDX 0
+#define mmCKSVII2C1_IC_CLR_RX_DONE 0x0096
+#define mmCKSVII2C1_IC_CLR_RX_DONE_BASE_IDX 0
+#define mmCKSVII2C1_IC_CLR_ACTIVITY 0x0097
+#define mmCKSVII2C1_IC_CLR_ACTIVITY_BASE_IDX 0
+#define mmCKSVII2C1_IC_CLR_STOP_DET 0x0098
+#define mmCKSVII2C1_IC_CLR_STOP_DET_BASE_IDX 0
+#define mmCKSVII2C1_IC_CLR_START_DET 0x0099
+#define mmCKSVII2C1_IC_CLR_START_DET_BASE_IDX 0
+#define mmCKSVII2C1_IC_CLR_GEN_CALL 0x009a
+#define mmCKSVII2C1_IC_CLR_GEN_CALL_BASE_IDX 0
+#define mmCKSVII2C1_IC_ENABLE 0x009b
+#define mmCKSVII2C1_IC_ENABLE_BASE_IDX 0
+#define mmCKSVII2C1_IC_STATUS 0x009c
+#define mmCKSVII2C1_IC_STATUS_BASE_IDX 0
+#define mmCKSVII2C1_IC_TXFLR 0x009d
+#define mmCKSVII2C1_IC_TXFLR_BASE_IDX 0
+#define mmCKSVII2C1_IC_RXFLR 0x009e
+#define mmCKSVII2C1_IC_RXFLR_BASE_IDX 0
+#define mmCKSVII2C1_IC_SDA_HOLD 0x009f
+#define mmCKSVII2C1_IC_SDA_HOLD_BASE_IDX 0
+#define mmCKSVII2C1_IC_TX_ABRT_SOURCE 0x00a0
+#define mmCKSVII2C1_IC_TX_ABRT_SOURCE_BASE_IDX 0
+#define mmCKSVII2C1_IC_SLV_DATA_NACK_ONLY 0x00a1
+#define mmCKSVII2C1_IC_SLV_DATA_NACK_ONLY_BASE_IDX 0
+#define mmCKSVII2C1_IC_DMA_CR 0x00a2
+#define mmCKSVII2C1_IC_DMA_CR_BASE_IDX 0
+#define mmCKSVII2C1_IC_DMA_TDLR 0x00a3
+#define mmCKSVII2C1_IC_DMA_TDLR_BASE_IDX 0
+#define mmCKSVII2C1_IC_DMA_RDLR 0x00a4
+#define mmCKSVII2C1_IC_DMA_RDLR_BASE_IDX 0
+#define mmCKSVII2C1_IC_SDA_SETUP 0x00a5
+#define mmCKSVII2C1_IC_SDA_SETUP_BASE_IDX 0
+#define mmCKSVII2C1_IC_ACK_GENERAL_CALL 0x00a6
+#define mmCKSVII2C1_IC_ACK_GENERAL_CALL_BASE_IDX 0
+#define mmCKSVII2C1_IC_ENABLE_STATUS 0x00a7
+#define mmCKSVII2C1_IC_ENABLE_STATUS_BASE_IDX 0
+#define mmCKSVII2C1_IC_FS_SPKLEN 0x00a8
+#define mmCKSVII2C1_IC_FS_SPKLEN_BASE_IDX 0
+#define mmCKSVII2C1_IC_HS_SPKLEN 0x00a9
+#define mmCKSVII2C1_IC_HS_SPKLEN_BASE_IDX 0
+#define mmCKSVII2C1_IC_CLR_RESTART_DET 0x00aa
+#define mmCKSVII2C1_IC_CLR_RESTART_DET_BASE_IDX 0
+#define mmCKSVII2C1_IC_COMP_PARAM_1 0x00ab
+#define mmCKSVII2C1_IC_COMP_PARAM_1_BASE_IDX 0
+#define mmCKSVII2C1_IC_COMP_VERSION 0x00ac
+#define mmCKSVII2C1_IC_COMP_VERSION_BASE_IDX 0
+#define mmCKSVII2C1_IC_COMP_TYPE 0x00ad
+#define mmCKSVII2C1_IC_COMP_TYPE_BASE_IDX 0
#define mmSMUIO_MP_RESET_INTR 0x00c1
#define mmSMUIO_MP_RESET_INTR_BASE_IDX 0
#define mmSMUIO_SOC_HALT 0x00c2
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_11_0_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_11_0_0_sh_mask.h
index f8afa3518bf2..6905a9618127 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_11_0_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_11_0_0_sh_mask.h
@@ -268,6 +268,182 @@
//CKSVII2C_IC_COMP_TYPE
#define CKSVII2C_IC_COMP_TYPE__COMP_TYPE__SHIFT 0x0
#define CKSVII2C_IC_COMP_TYPE__COMP_TYPE_MASK 0xFFFFFFFFL
+//CKSVII2C1_IC_CON
+#define CKSVII2C1_IC_CON__IC1_MASTER_MODE__SHIFT 0x0
+#define CKSVII2C1_IC_CON__IC1_MAX_SPEED_MODE__SHIFT 0x1
+#define CKSVII2C1_IC_CON__IC1_10BITADDR_SLAVE__SHIFT 0x3
+#define CKSVII2C1_IC_CON__IC1_10BITADDR_MASTER__SHIFT 0x4
+#define CKSVII2C1_IC_CON__IC1_RESTART_EN__SHIFT 0x5
+#define CKSVII2C1_IC_CON__IC1_SLAVE_DISABLE__SHIFT 0x6
+#define CKSVII2C1_IC_CON__STOP1_DET_IFADDRESSED__SHIFT 0x7
+#define CKSVII2C1_IC_CON__TX1_EMPTY_CTRL__SHIFT 0x8
+#define CKSVII2C1_IC_CON__RX1_FIFO_FULL_HLD_CTRL__SHIFT 0x9
+#define CKSVII2C1_IC_CON__IC1_MASTER_MODE_MASK 0x00000001L
+#define CKSVII2C1_IC_CON__IC1_MAX_SPEED_MODE_MASK 0x00000006L
+#define CKSVII2C1_IC_CON__IC1_10BITADDR_SLAVE_MASK 0x00000008L
+#define CKSVII2C1_IC_CON__IC1_10BITADDR_MASTER_MASK 0x00000010L
+#define CKSVII2C1_IC_CON__IC1_RESTART_EN_MASK 0x00000020L
+#define CKSVII2C1_IC_CON__IC1_SLAVE_DISABLE_MASK 0x00000040L
+#define CKSVII2C1_IC_CON__STOP1_DET_IFADDRESSED_MASK 0x00000080L
+#define CKSVII2C1_IC_CON__TX1_EMPTY_CTRL_MASK 0x00000100L
+#define CKSVII2C1_IC_CON__RX1_FIFO_FULL_HLD_CTRL_MASK 0x00000200L
+//CKSVII2C1_IC_TAR
+#define CKSVII2C1_IC_TAR__IC1_TAR__SHIFT 0x0
+#define CKSVII2C1_IC_TAR__GC1_OR_START__SHIFT 0xa
+#define CKSVII2C1_IC_TAR__SPECIAL1__SHIFT 0xb
+#define CKSVII2C1_IC_TAR__IC1_10BITADDR_MASTER__SHIFT 0xc
+#define CKSVII2C1_IC_TAR__IC1_TAR_MASK 0x000003FFL
+#define CKSVII2C1_IC_TAR__GC1_OR_START_MASK 0x00000400L
+#define CKSVII2C1_IC_TAR__SPECIAL1_MASK 0x00000800L
+#define CKSVII2C1_IC_TAR__IC1_10BITADDR_MASTER_MASK 0x00001000L
+//CKSVII2C1_IC_SAR
+#define CKSVII2C1_IC_SAR__IC1_SAR__SHIFT 0x0
+#define CKSVII2C1_IC_SAR__IC1_SAR_MASK 0x000003FFL
+//CKSVII2C1_IC_HS_MADDR
+#define CKSVII2C1_IC_HS_MADDR__IC1_HS_MADDR__SHIFT 0x0
+#define CKSVII2C1_IC_HS_MADDR__IC1_HS_MADDR_MASK 0x00000007L
+//CKSVII2C1_IC_DATA_CMD
+#define CKSVII2C1_IC_DATA_CMD__DAT1__SHIFT 0x0
+#define CKSVII2C1_IC_DATA_CMD__CMD1__SHIFT 0x8
+#define CKSVII2C1_IC_DATA_CMD__STOP1__SHIFT 0x9
+#define CKSVII2C1_IC_DATA_CMD__RESTART1__SHIFT 0xa
+#define CKSVII2C1_IC_DATA_CMD__DAT1_MASK 0x000000FFL
+#define CKSVII2C1_IC_DATA_CMD__CMD1_MASK 0x00000100L
+#define CKSVII2C1_IC_DATA_CMD__STOP1_MASK 0x00000200L
+#define CKSVII2C1_IC_DATA_CMD__RESTART1_MASK 0x00000400L
+//CKSVII2C1_IC_SS_SCL_HCNT
+#define CKSVII2C1_IC_SS_SCL_HCNT__IC1_SS_SCL_HCNT__SHIFT 0x0
+#define CKSVII2C1_IC_SS_SCL_HCNT__IC1_SS_SCL_HCNT_MASK 0x0000FFFFL
+//CKSVII2C1_IC_SS_SCL_LCNT
+#define CKSVII2C1_IC_SS_SCL_LCNT__IC1_SS_SCL_LCNT__SHIFT 0x0
+#define CKSVII2C1_IC_SS_SCL_LCNT__IC1_SS_SCL_LCNT_MASK 0x0000FFFFL
+//CKSVII2C1_IC_FS_SCL_HCNT
+#define CKSVII2C1_IC_FS_SCL_HCNT__IC1_FS_SCL_HCNT__SHIFT 0x0
+#define CKSVII2C1_IC_FS_SCL_HCNT__IC1_FS_SCL_HCNT_MASK 0x0000FFFFL
+//CKSVII2C1_IC_FS_SCL_LCNT
+#define CKSVII2C1_IC_FS_SCL_LCNT__IC1_FS_SCL_LCNT__SHIFT 0x0
+#define CKSVII2C1_IC_FS_SCL_LCNT__IC1_FS_SCL_LCNT_MASK 0x0000FFFFL
+//CKSVII2C1_IC_HS_SCL_HCNT
+#define CKSVII2C1_IC_HS_SCL_HCNT__IC1_HS_SCL_HCNT__SHIFT 0x0
+#define CKSVII2C1_IC_HS_SCL_HCNT__IC1_HS_SCL_HCNT_MASK 0x0000FFFFL
+//CKSVII2C1_IC_HS_SCL_LCNT
+#define CKSVII2C1_IC_HS_SCL_LCNT__IC1_HS_SCL_LCNT__SHIFT 0x0
+#define CKSVII2C1_IC_HS_SCL_LCNT__IC1_HS_SCL_LCNT_MASK 0x0000FFFFL
+//CKSVII2C1_IC_INTR_STAT
+#define CKSVII2C1_IC_INTR_STAT__R1_RX_UNDER__SHIFT 0x0
+#define CKSVII2C1_IC_INTR_STAT__R1_RX_OVER__SHIFT 0x1
+#define CKSVII2C1_IC_INTR_STAT__R1_RX_FULL__SHIFT 0x2
+#define CKSVII2C1_IC_INTR_STAT__R1_TX_OVER__SHIFT 0x3
+#define CKSVII2C1_IC_INTR_STAT__R1_TX_EMPTY__SHIFT 0x4
+#define CKSVII2C1_IC_INTR_STAT__R1_RD_REQ__SHIFT 0x5
+#define CKSVII2C1_IC_INTR_STAT__R1_TX_ABRT__SHIFT 0x6
+#define CKSVII2C1_IC_INTR_STAT__R1_RX_DONE__SHIFT 0x7
+#define CKSVII2C1_IC_INTR_STAT__R1_ACTIVITY__SHIFT 0x8
+#define CKSVII2C1_IC_INTR_STAT__R1_STOP_DET__SHIFT 0x9
+#define CKSVII2C1_IC_INTR_STAT__R1_START_DET__SHIFT 0xa
+#define CKSVII2C1_IC_INTR_STAT__R1_GEN_CALL__SHIFT 0xb
+#define CKSVII2C1_IC_INTR_STAT__R1_RESTART_DET__SHIFT 0xc
+#define CKSVII2C1_IC_INTR_STAT__R1_MST_ON_HOLD__SHIFT 0xd
+#define CKSVII2C1_IC_INTR_STAT__R1_RX_UNDER_MASK 0x00000001L
+#define CKSVII2C1_IC_INTR_STAT__R1_RX_OVER_MASK 0x00000002L
+#define CKSVII2C1_IC_INTR_STAT__R1_RX_FULL_MASK 0x00000004L
+#define CKSVII2C1_IC_INTR_STAT__R1_TX_OVER_MASK 0x00000008L
+#define CKSVII2C1_IC_INTR_STAT__R1_TX_EMPTY_MASK 0x00000010L
+#define CKSVII2C1_IC_INTR_STAT__R1_RD_REQ_MASK 0x00000020L
+#define CKSVII2C1_IC_INTR_STAT__R1_TX_ABRT_MASK 0x00000040L
+#define CKSVII2C1_IC_INTR_STAT__R1_RX_DONE_MASK 0x00000080L
+#define CKSVII2C1_IC_INTR_STAT__R1_ACTIVITY_MASK 0x00000100L
+#define CKSVII2C1_IC_INTR_STAT__R1_STOP_DET_MASK 0x00000200L
+#define CKSVII2C1_IC_INTR_STAT__R1_START_DET_MASK 0x00000400L
+#define CKSVII2C1_IC_INTR_STAT__R1_GEN_CALL_MASK 0x00000800L
+#define CKSVII2C1_IC_INTR_STAT__R1_RESTART_DET_MASK 0x00001000L
+#define CKSVII2C1_IC_INTR_STAT__R1_MST_ON_HOLD_MASK 0x00002000L
+//CKSVII2C1_IC_INTR_MASK
+#define CKSVII2C1_IC_INTR_MASK__M1_RX_UNDER__SHIFT 0x0
+#define CKSVII2C1_IC_INTR_MASK__M1_RX_OVER__SHIFT 0x1
+#define CKSVII2C1_IC_INTR_MASK__M1_RX_FULL__SHIFT 0x2
+#define CKSVII2C1_IC_INTR_MASK__M1_TX_OVER__SHIFT 0x3
+#define CKSVII2C1_IC_INTR_MASK__M1_TX_EMPTY__SHIFT 0x4
+#define CKSVII2C1_IC_INTR_MASK__M1_RD_REQ__SHIFT 0x5
+#define CKSVII2C1_IC_INTR_MASK__M1_TX_ABRT__SHIFT 0x6
+#define CKSVII2C1_IC_INTR_MASK__M1_RX_DONE__SHIFT 0x7
+#define CKSVII2C1_IC_INTR_MASK__M1_ACTIVITY__SHIFT 0x8
+#define CKSVII2C1_IC_INTR_MASK__M1_STOP_DET__SHIFT 0x9
+#define CKSVII2C1_IC_INTR_MASK__M1_START_DET__SHIFT 0xa
+#define CKSVII2C1_IC_INTR_MASK__M1_GEN_CALL__SHIFT 0xb
+#define CKSVII2C1_IC_INTR_MASK__M1_RESTART_DET__SHIFT 0xc
+#define CKSVII2C1_IC_INTR_MASK__M1_MST_ON_HOLD__SHIFT 0xd
+#define CKSVII2C1_IC_INTR_MASK__M1_RX_UNDER_MASK 0x00000001L
+#define CKSVII2C1_IC_INTR_MASK__M1_RX_OVER_MASK 0x00000002L
+#define CKSVII2C1_IC_INTR_MASK__M1_RX_FULL_MASK 0x00000004L
+#define CKSVII2C1_IC_INTR_MASK__M1_TX_OVER_MASK 0x00000008L
+#define CKSVII2C1_IC_INTR_MASK__M1_TX_EMPTY_MASK 0x00000010L
+#define CKSVII2C1_IC_INTR_MASK__M1_RD_REQ_MASK 0x00000020L
+#define CKSVII2C1_IC_INTR_MASK__M1_TX_ABRT_MASK 0x00000040L
+#define CKSVII2C1_IC_INTR_MASK__M1_RX_DONE_MASK 0x00000080L
+#define CKSVII2C1_IC_INTR_MASK__M1_ACTIVITY_MASK 0x00000100L
+#define CKSVII2C1_IC_INTR_MASK__M1_STOP_DET_MASK 0x00000200L
+#define CKSVII2C1_IC_INTR_MASK__M1_START_DET_MASK 0x00000400L
+#define CKSVII2C1_IC_INTR_MASK__M1_GEN_CALL_MASK 0x00000800L
+#define CKSVII2C1_IC_INTR_MASK__M1_RESTART_DET_MASK 0x00001000L
+#define CKSVII2C1_IC_INTR_MASK__M1_MST_ON_HOLD_MASK 0x00002000L
+//CKSVII2C1_IC_RAW_INTR_STAT
+//CKSVII2C1_IC_RX_TL
+//CKSVII2C1_IC_TX_TL
+//CKSVII2C1_IC_CLR_INTR
+//CKSVII2C1_IC_CLR_RX_UNDER
+//CKSVII2C1_IC_CLR_RX_OVER
+//CKSVII2C1_IC_CLR_TX_OVER
+//CKSVII2C1_IC_CLR_RD_REQ
+//CKSVII2C1_IC_CLR_TX_ABRT
+//CKSVII2C1_IC_CLR_RX_DONE
+//CKSVII2C1_IC_CLR_ACTIVITY
+//CKSVII2C1_IC_CLR_STOP_DET
+//CKSVII2C1_IC_CLR_START_DET
+//CKSVII2C1_IC_CLR_GEN_CALL
+//CKSVII2C1_IC_ENABLE
+#define CKSVII2C1_IC_ENABLE__ENABLE1__SHIFT 0x0
+#define CKSVII2C1_IC_ENABLE__ABORT1__SHIFT 0x1
+#define CKSVII2C1_IC_ENABLE__ENABLE1_MASK 0x00000001L
+#define CKSVII2C1_IC_ENABLE__ABORT1_MASK 0x00000002L
+//CKSVII2C1_IC_STATUS
+#define CKSVII2C1_IC_STATUS__ACTIVITY1__SHIFT 0x0
+#define CKSVII2C1_IC_STATUS__TFNF1__SHIFT 0x1
+#define CKSVII2C1_IC_STATUS__TFE1__SHIFT 0x2
+#define CKSVII2C1_IC_STATUS__RFNE1__SHIFT 0x3
+#define CKSVII2C1_IC_STATUS__RFF1__SHIFT 0x4
+#define CKSVII2C1_IC_STATUS__MST1_ACTIVITY__SHIFT 0x5
+#define CKSVII2C1_IC_STATUS__SLV1_ACTIVITY__SHIFT 0x6
+#define CKSVII2C1_IC_STATUS__ACTIVITY1_MASK 0x00000001L
+#define CKSVII2C1_IC_STATUS__TFNF1_MASK 0x00000002L
+#define CKSVII2C1_IC_STATUS__TFE1_MASK 0x00000004L
+#define CKSVII2C1_IC_STATUS__RFNE1_MASK 0x00000008L
+#define CKSVII2C1_IC_STATUS__RFF1_MASK 0x00000010L
+#define CKSVII2C1_IC_STATUS__MST1_ACTIVITY_MASK 0x00000020L
+#define CKSVII2C1_IC_STATUS__SLV1_ACTIVITY_MASK 0x00000040L
+//CKSVII2C1_IC_TXFLR
+//CKSVII2C1_IC_RXFLR
+//CKSVII2C1_IC_SDA_HOLD
+#define CKSVII2C1_IC_SDA_HOLD__IC1_SDA_HOLD__SHIFT 0x0
+#define CKSVII2C1_IC_SDA_HOLD__IC1_SDA_HOLD_MASK 0x00FFFFFFL
+//CKSVII2C1_IC_TX_ABRT_SOURCE
+//CKSVII2C1_IC_SLV_DATA_NACK_ONLY
+//CKSVII2C1_IC_DMA_CR
+//CKSVII2C1_IC_DMA_TDLR
+//CKSVII2C1_IC_DMA_RDLR
+//CKSVII2C1_IC_SDA_SETUP
+#define CKSVII2C1_IC_SDA_SETUP__SDA1_SETUP__SHIFT 0x0
+#define CKSVII2C1_IC_SDA_SETUP__SDA1_SETUP_MASK 0x000000FFL
+//CKSVII2C1_IC_ACK_GENERAL_CALL
+#define CKSVII2C1_IC_ACK_GENERAL_CALL__ACK1_GENERAL_CALL__SHIFT 0x0
+#define CKSVII2C1_IC_ACK_GENERAL_CALL__ACK1_GENERAL_CALL_MASK 0x00000001L
+//CKSVII2C1_IC_ENABLE_STATUS
+#define CKSVII2C1_IC_ENABLE_STATUS__IC1_EN__SHIFT 0x0
+#define CKSVII2C1_IC_ENABLE_STATUS__SLV1_RX_ABORTED__SHIFT 0x1
+#define CKSVII2C1_IC_ENABLE_STATUS__SLV1_FIFO_FILLED_AND_FLUSHED__SHIFT 0x2
+#define CKSVII2C1_IC_ENABLE_STATUS__IC1_EN_MASK 0x00000001L
+#define CKSVII2C1_IC_ENABLE_STATUS__SLV1_RX_ABORTED_MASK 0x00000002L
+#define CKSVII2C1_IC_ENABLE_STATUS__SLV1_FIFO_FILLED_AND_FLUSHED_MASK 0x00000004L
//SMUIO_MP_RESET_INTR
#define SMUIO_MP_RESET_INTR__SMUIO_MP_RESET_INTR__SHIFT 0x0
#define SMUIO_MP_RESET_INTR__SMUIO_MP_RESET_INTR_MASK 0x00000001L
diff --git a/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_2_5_offset.h b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_2_5_offset.h
index cf2149cc12ee..90350f46a0c4 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_2_5_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_2_5_offset.h
@@ -24,6 +24,18 @@
// addressBlock: uvd0_mmsch_dec
// base address: 0x1e000
+#define mmMMSCH_VF_VMID 0x000b
+#define mmMMSCH_VF_VMID_BASE_IDX 0
+#define mmMMSCH_VF_CTX_ADDR_LO 0x000c
+#define mmMMSCH_VF_CTX_ADDR_LO_BASE_IDX 0
+#define mmMMSCH_VF_CTX_ADDR_HI 0x000d
+#define mmMMSCH_VF_CTX_ADDR_HI_BASE_IDX 0
+#define mmMMSCH_VF_CTX_SIZE 0x000e
+#define mmMMSCH_VF_CTX_SIZE_BASE_IDX 0
+#define mmMMSCH_VF_MAILBOX_HOST 0x0012
+#define mmMMSCH_VF_MAILBOX_HOST_BASE_IDX 0
+#define mmMMSCH_VF_MAILBOX_RESP 0x0013
+#define mmMMSCH_VF_MAILBOX_RESP_BASE_IDX 0
// addressBlock: uvd0_jpegnpdec
diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h
index e88541d67aa0..dd7cbc00a0aa 100644
--- a/drivers/gpu/drm/amd/include/atomfirmware.h
+++ b/drivers/gpu/drm/amd/include/atomfirmware.h
@@ -492,12 +492,13 @@ struct atom_firmware_info_v3_1
/* Total 32bit cap indication */
enum atombios_firmware_capability
{
- ATOM_FIRMWARE_CAP_FIRMWARE_POSTED = 0x00000001,
- ATOM_FIRMWARE_CAP_GPU_VIRTUALIZATION = 0x00000002,
- ATOM_FIRMWARE_CAP_WMI_SUPPORT = 0x00000040,
- ATOM_FIRMWARE_CAP_HWEMU_ENABLE = 0x00000080,
- ATOM_FIRMWARE_CAP_HWEMU_UMC_CFG = 0x00000100,
- ATOM_FIRMWARE_CAP_SRAM_ECC = 0x00000200,
+ ATOM_FIRMWARE_CAP_FIRMWARE_POSTED = 0x00000001,
+ ATOM_FIRMWARE_CAP_GPU_VIRTUALIZATION = 0x00000002,
+ ATOM_FIRMWARE_CAP_WMI_SUPPORT = 0x00000040,
+ ATOM_FIRMWARE_CAP_HWEMU_ENABLE = 0x00000080,
+ ATOM_FIRMWARE_CAP_HWEMU_UMC_CFG = 0x00000100,
+ ATOM_FIRMWARE_CAP_SRAM_ECC = 0x00000200,
+ ATOM_FIRMWARE_CAP_ENABLE_2STAGE_BIST_TRAINING = 0x00000400,
};
enum atom_cooling_solution_id{
@@ -671,6 +672,20 @@ struct vram_usagebyfirmware_v2_1
uint16_t used_by_driver_in_kb;
};
+/* This is part of vram_usagebyfirmware_v2_1 */
+struct vram_reserve_block
+{
+ uint32_t start_address_in_kb;
+ uint16_t used_by_firmware_in_kb;
+ uint16_t used_by_driver_in_kb;
+};
+
+/* Definitions for constance */
+enum atomfirmware_internal_constants
+{
+ ONE_KiB = 0x400,
+ ONE_MiB = 0x100000,
+};
/*
***************************************************************************
diff --git a/drivers/gpu/drm/amd/include/discovery.h b/drivers/gpu/drm/amd/include/discovery.h
index 5dcb776548d8..7ec4331e67f2 100644
--- a/drivers/gpu/drm/amd/include/discovery.h
+++ b/drivers/gpu/drm/amd/include/discovery.h
@@ -25,7 +25,6 @@
#define _DISCOVERY_H_
#define PSP_HEADER_SIZE 256
-#define BINARY_MAX_SIZE (64 << 10)
#define BINARY_SIGNATURE 0x28211407
#define DISCOVERY_TABLE_SIGNATURE 0x53445049
diff --git a/drivers/gpu/drm/amd/include/ivsrcid/nbio/irqsrcs_nbif_7_4.h b/drivers/gpu/drm/amd/include/ivsrcid/nbio/irqsrcs_nbif_7_4.h
new file mode 100644
index 000000000000..79af4258f259
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/ivsrcid/nbio/irqsrcs_nbif_7_4.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __IRQSRCS_NBIF_7_4_H__
+#define __IRQSRCS_NBIF_7_4_H__
+
+#define NBIF_7_4__SRCID__CHIP_ERR_INT_EVENT 0x5E // Error generated
+#define NBIF_7_4__SRCID__DOORBELL_INTERRUPT 0x5F // Interrupt for doorbell event during VDDGFX off
+#define NBIF_7_4__SRCID__RAS_CONTROLLER_INTERRUPT 0x60 // Interrupt for ras_intr_valid from RAS controller
+#define NBIF_7_4__SRCID__ERREVENT_ATHUB_INTERRUPT 0x61 // Interrupt for SDP ErrEvent received from ATHUB
+#define NBIF_7_4__SRCID__PF_VF_MSGBUF_VALID 0x87 // Valid message in PF->VF mailbox message buffer (The interrupt is sent on behalf of PF)
+#define NBIF_7_4__SRCID__PF_VF_MSGBUF_ACK 0x88 // Acknowledge message in PF->VF mailbox message buffer (The interrupt is sent on behalf of VF)
+#define NBIF_7_4__SRCID__VF_PF_MSGBUF_VALID 0x89 // Valid message in VF->PF mailbox message buffer (The interrupt is sent on behalf of VF)
+#define NBIF_7_4__SRCID__VF_PF_MSGBUF_ACK 0x8A // Acknowledge message in VF->PF mailbox message buffer (The interrupt is sent on behalf of PF)
+#define NBIF_7_4__SRCID__CHIP_DPA_INT_EVENT 0xA0 // BIF_CHIP_DPA_INT_EVENT
+#define NBIF_7_4__SRCID__CHIP_SLOT_POWER_CHG_INT_EVENT 0xA1 // BIF_CHIP_SLOT_POWER_CHG_INT_EVENT
+#define NBIF_7_4__SRCID__ATOMIC_UR_OPCODE 0xCE // BIF receives unsupported atomic opcode from MC
+#define NBIF_7_4__SRCID__ATOMIC_REQESTEREN_LOW 0xCF // BIF receive atomic request from MC while AtomicOp Requester is not enabled in PCIE config space
+
+#endif // __IRQSRCS_NBIF_7_4_H__
diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
index 98b9533e672b..2cd217e60125 100644
--- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
@@ -291,15 +291,18 @@ struct kfd2kgd_calls {
uint32_t (*address_watch_get_offset)(struct kgd_dev *kgd,
unsigned int watch_point_id,
unsigned int reg_offset);
- bool (*get_atc_vmid_pasid_mapping_valid)(
+ bool (*get_atc_vmid_pasid_mapping_info)(
struct kgd_dev *kgd,
- uint8_t vmid);
- uint16_t (*get_atc_vmid_pasid_mapping_pasid)(
- struct kgd_dev *kgd,
- uint8_t vmid);
+ uint8_t vmid,
+ uint16_t *p_pasid);
+ /* No longer needed from GFXv9 onward. The scratch base address is
+ * passed to the shader by the CP. It's the user mode driver's
+ * responsibility.
+ */
void (*set_scratch_backing_va)(struct kgd_dev *kgd,
uint64_t va, uint32_t vmid);
+
int (*get_tile_config)(struct kgd_dev *kgd, struct tile_config *config);
void (*set_vm_context_page_table_base)(struct kgd_dev *kgd,
diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
index 27cf0afaa0b4..a7f92d0b3a90 100644
--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
@@ -179,6 +179,11 @@ enum pp_mp1_state {
PP_MP1_STATE_RESET,
};
+enum pp_df_cstate {
+ DF_CSTATE_DISALLOW = 0,
+ DF_CSTATE_ALLOW,
+};
+
#define PP_GROUP_MASK 0xF0000000
#define PP_GROUP_SHIFT 28
@@ -215,6 +220,9 @@ enum pp_mp1_state {
((group) << PP_GROUP_SHIFT | (block) << PP_BLOCK_SHIFT | \
(support) << PP_STATE_SUPPORT_SHIFT | (state) << PP_STATE_SHIFT)
+#define XGMI_MODE_PSTATE_D3 0
+#define XGMI_MODE_PSTATE_D0 1
+
struct seq_file;
enum amd_pp_clock_type;
struct amd_pp_simple_clock_info;
@@ -312,6 +320,8 @@ struct amd_pm_funcs {
int (*get_ppfeature_status)(void *handle, char *buf);
int (*set_ppfeature_status)(void *handle, uint64_t ppfeature_masks);
int (*asic_reset_mode_2)(void *handle);
+ int (*set_df_cstate)(void *handle, enum pp_df_cstate state);
+ int (*set_xgmi_pstate)(void *handle, uint32_t pstate);
};
#endif
diff --git a/drivers/gpu/drm/amd/include/renoir_ip_offset.h b/drivers/gpu/drm/amd/include/renoir_ip_offset.h
index 094648cac392..07633e22e99a 100644
--- a/drivers/gpu/drm/amd/include/renoir_ip_offset.h
+++ b/drivers/gpu/drm/amd/include/renoir_ip_offset.h
@@ -169,6 +169,11 @@ static const struct IP_BASE NBIF0_BASE ={ { { { 0x00000000, 0x00000014, 0x00000D
{ { 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE DCN_BASE ={ { { { 0x00000012, 0x000000C0, 0x000034C0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } },
+ { { 0, 0, 0, 0, 0 } } } };
static const struct IP_BASE OSSSYS_BASE ={ { { { 0x000010A0, 0x0240A000, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0 } },
{ { 0, 0, 0, 0, 0 } },
@@ -1361,4 +1366,33 @@ static const struct IP_BASE UVD0_BASE ={ { { { 0x00007800, 0x00007E00, 0x0240300
#define UVD0_BASE__INST6_SEG3 0
#define UVD0_BASE__INST6_SEG4 0
+#define DCN_BASE__INST0_SEG0 0x00000012
+#define DCN_BASE__INST0_SEG1 0x000000C0
+#define DCN_BASE__INST0_SEG2 0x000034C0
+#define DCN_BASE__INST0_SEG3 0
+#define DCN_BASE__INST0_SEG4 0
+
+#define DCN_BASE__INST1_SEG0 0
+#define DCN_BASE__INST1_SEG1 0
+#define DCN_BASE__INST1_SEG2 0
+#define DCN_BASE__INST1_SEG3 0
+#define DCN_BASE__INST1_SEG4 0
+
+#define DCN_BASE__INST2_SEG0 0
+#define DCN_BASE__INST2_SEG1 0
+#define DCN_BASE__INST2_SEG2 0
+#define DCN_BASE__INST2_SEG3 0
+#define DCN_BASE__INST2_SEG4 0
+
+#define DCN_BASE__INST3_SEG0 0
+#define DCN_BASE__INST3_SEG1 0
+#define DCN_BASE__INST3_SEG2 0
+#define DCN_BASE__INST3_SEG3 0
+#define DCN_BASE__INST3_SEG4 0
+
+#define DCN_BASE__INST4_SEG0 0
+#define DCN_BASE__INST4_SEG1 0
+#define DCN_BASE__INST4_SEG2 0
+#define DCN_BASE__INST4_SEG3 0
+#define DCN_BASE__INST4_SEG4 0
#endif
diff --git a/drivers/gpu/drm/amd/include/vega10_enum.h b/drivers/gpu/drm/amd/include/vega10_enum.h
index c14ba65a2415..adf1b754666e 100644
--- a/drivers/gpu/drm/amd/include/vega10_enum.h
+++ b/drivers/gpu/drm/amd/include/vega10_enum.h
@@ -1037,6 +1037,7 @@ TCC_CACHE_POLICY_STREAM = 0x00000001,
typedef enum MTYPE {
MTYPE_NC = 0x00000000,
MTYPE_WC = 0x00000001,
+MTYPE_RW = 0x00000001,
MTYPE_CC = 0x00000002,
MTYPE_UC = 0x00000003,
} MTYPE;
diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
index fa8ad7db2b3a..7932eb163a00 100644
--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
@@ -969,6 +969,14 @@ static int pp_dpm_switch_power_profile(void *handle,
workload = hwmgr->workload_setting[index];
}
+ if (type == PP_SMC_POWER_PROFILE_COMPUTE &&
+ hwmgr->hwmgr_func->disable_power_features_for_compute_performance) {
+ if (hwmgr->hwmgr_func->disable_power_features_for_compute_performance(hwmgr, en)) {
+ mutex_unlock(&hwmgr->smu_lock);
+ return -EINVAL;
+ }
+ }
+
if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, &workload, 0);
mutex_unlock(&hwmgr->smu_lock);
@@ -1421,6 +1429,7 @@ static int pp_get_asic_baco_capability(void *handle, bool *cap)
{
struct pp_hwmgr *hwmgr = handle;
+ *cap = false;
if (!hwmgr)
return -EINVAL;
@@ -1548,6 +1557,40 @@ static int pp_smu_i2c_bus_access(void *handle, bool acquire)
return ret;
}
+static int pp_set_df_cstate(void *handle, enum pp_df_cstate state)
+{
+ struct pp_hwmgr *hwmgr = handle;
+
+ if (!hwmgr)
+ return -EINVAL;
+
+ if (!hwmgr->pm_en || !hwmgr->hwmgr_func->set_df_cstate)
+ return 0;
+
+ mutex_lock(&hwmgr->smu_lock);
+ hwmgr->hwmgr_func->set_df_cstate(hwmgr, state);
+ mutex_unlock(&hwmgr->smu_lock);
+
+ return 0;
+}
+
+static int pp_set_xgmi_pstate(void *handle, uint32_t pstate)
+{
+ struct pp_hwmgr *hwmgr = handle;
+
+ if (!hwmgr)
+ return -EINVAL;
+
+ if (!hwmgr->pm_en || !hwmgr->hwmgr_func->set_xgmi_pstate)
+ return 0;
+
+ mutex_lock(&hwmgr->smu_lock);
+ hwmgr->hwmgr_func->set_xgmi_pstate(hwmgr, pstate);
+ mutex_unlock(&hwmgr->smu_lock);
+
+ return 0;
+}
+
static const struct amd_pm_funcs pp_dpm_funcs = {
.load_firmware = pp_dpm_load_fw,
.wait_for_fw_loading_complete = pp_dpm_fw_loading_complete,
@@ -1606,4 +1649,6 @@ static const struct amd_pm_funcs pp_dpm_funcs = {
.set_ppfeature_status = pp_set_ppfeature_status,
.asic_reset_mode_2 = pp_asic_reset_mode_2,
.smu_i2c_bus_access = pp_smu_i2c_bus_access,
+ .set_df_cstate = pp_set_df_cstate,
+ .set_xgmi_pstate = pp_set_xgmi_pstate,
};
diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
index 4acf139ea014..40b546c75fc2 100644
--- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
@@ -25,11 +25,16 @@
#include "pp_debug.h"
#include "amdgpu.h"
#include "amdgpu_smu.h"
+#include "smu_internal.h"
#include "soc15_common.h"
#include "smu_v11_0.h"
#include "smu_v12_0.h"
#include "atom.h"
#include "amd_pcie.h"
+#include "vega20_ppt.h"
+#include "arcturus_ppt.h"
+#include "navi10_ppt.h"
+#include "renoir_ppt.h"
#undef __SMU_DUMMY_MAP
#define __SMU_DUMMY_MAP(type) #type
@@ -67,6 +72,8 @@ size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf)
uint32_t sort_feature[SMU_FEATURE_COUNT];
uint64_t hw_feature_count = 0;
+ mutex_lock(&smu->mutex);
+
ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
if (ret)
goto failed;
@@ -92,9 +99,57 @@ size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf)
}
failed:
+ mutex_unlock(&smu->mutex);
+
return size;
}
+static int smu_feature_update_enable_state(struct smu_context *smu,
+ uint64_t feature_mask,
+ bool enabled)
+{
+ struct smu_feature *feature = &smu->smu_feature;
+ uint32_t feature_low = 0, feature_high = 0;
+ int ret = 0;
+
+ if (!smu->pm_enabled)
+ return ret;
+
+ feature_low = (feature_mask >> 0 ) & 0xffffffff;
+ feature_high = (feature_mask >> 32) & 0xffffffff;
+
+ if (enabled) {
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesLow,
+ feature_low);
+ if (ret)
+ return ret;
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesHigh,
+ feature_high);
+ if (ret)
+ return ret;
+ } else {
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesLow,
+ feature_low);
+ if (ret)
+ return ret;
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesHigh,
+ feature_high);
+ if (ret)
+ return ret;
+ }
+
+ mutex_lock(&feature->mutex);
+ if (enabled)
+ bitmap_or(feature->enabled, feature->enabled,
+ (unsigned long *)(&feature_mask), SMU_FEATURE_MAX);
+ else
+ bitmap_andnot(feature->enabled, feature->enabled,
+ (unsigned long *)(&feature_mask), SMU_FEATURE_MAX);
+ mutex_unlock(&feature->mutex);
+
+ return ret;
+}
+
int smu_sys_set_pp_feature_mask(struct smu_context *smu, uint64_t new_mask)
{
int ret = 0;
@@ -103,9 +158,11 @@ int smu_sys_set_pp_feature_mask(struct smu_context *smu, uint64_t new_mask)
uint64_t feature_2_disabled = 0;
uint64_t feature_enables = 0;
+ mutex_lock(&smu->mutex);
+
ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
if (ret)
- return ret;
+ goto out;
feature_enables = ((uint64_t)feature_mask[1] << 32 | (uint64_t)feature_mask[0]);
@@ -115,14 +172,17 @@ int smu_sys_set_pp_feature_mask(struct smu_context *smu, uint64_t new_mask)
if (feature_2_enabled) {
ret = smu_feature_update_enable_state(smu, feature_2_enabled, true);
if (ret)
- return ret;
+ goto out;
}
if (feature_2_disabled) {
ret = smu_feature_update_enable_state(smu, feature_2_disabled, false);
if (ret)
- return ret;
+ goto out;
}
+out:
+ mutex_unlock(&smu->mutex);
+
return ret;
}
@@ -159,8 +219,7 @@ int smu_get_smc_version(struct smu_context *smu, uint32_t *if_version, uint32_t
int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
uint32_t min, uint32_t max)
{
- int ret = 0, clk_id = 0;
- uint32_t param;
+ int ret = 0;
if (min <= 0 && max <= 0)
return -EINVAL;
@@ -168,27 +227,7 @@ int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
if (!smu_clk_dpm_is_enabled(smu, clk_type))
return 0;
- clk_id = smu_clk_get_index(smu, clk_type);
- if (clk_id < 0)
- return clk_id;
-
- if (max > 0) {
- param = (uint32_t)((clk_id << 16) | (max & 0xffff));
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq,
- param);
- if (ret)
- return ret;
- }
-
- if (min > 0) {
- param = (uint32_t)((clk_id << 16) | (min & 0xffff));
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinByFreq,
- param);
- if (ret)
- return ret;
- }
-
-
+ ret = smu_set_soft_freq_limited_range(smu, clk_type, min, max);
return ret;
}
@@ -229,7 +268,7 @@ int smu_set_hard_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
}
int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
- uint32_t *min, uint32_t *max)
+ uint32_t *min, uint32_t *max, bool lock_needed)
{
uint32_t clock_limit;
int ret = 0;
@@ -237,6 +276,9 @@ int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
if (!min && !max)
return -EINVAL;
+ if (lock_needed)
+ mutex_lock(&smu->mutex);
+
if (!smu_clk_dpm_is_enabled(smu, clk_type)) {
switch (clk_type) {
case SMU_MCLK:
@@ -260,14 +302,17 @@ int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
*min = clock_limit / 100;
if (max)
*max = clock_limit / 100;
-
- return 0;
+ } else {
+ /*
+ * Todo: Use each asic(ASIC_ppt funcs) control the callbacks exposed to the
+ * core driver and then have helpers for stuff that is common(SMU_v11_x | SMU_v12_x funcs).
+ */
+ ret = smu_get_dpm_ultimate_freq(smu, clk_type, min, max);
}
- /*
- * Todo: Use each asic(ASIC_ppt funcs) control the callbacks exposed to the
- * core driver and then have helpers for stuff that is common(SMU_v11_x | SMU_v12_x funcs).
- */
- ret = smu_get_dpm_ultimate_freq(smu, clk_type, min, max);
+
+ if (lock_needed)
+ mutex_unlock(&smu->mutex);
+
return ret;
}
@@ -338,7 +383,20 @@ bool smu_clk_dpm_is_enabled(struct smu_context *smu, enum smu_clk_type clk_type)
return true;
}
-
+/**
+ * smu_dpm_set_power_gate - power gate/ungate the specific IP block
+ *
+ * @smu: smu_context pointer
+ * @block_type: the IP block to power gate/ungate
+ * @gate: to power gate if true, ungate otherwise
+ *
+ * This API uses no smu->mutex lock protection due to:
+ * 1. It is either called by other IP block(gfx/sdma/vcn/uvd/vce).
+ * This is guarded to be race condition free by the caller.
+ * 2. Or get called on user setting request of power_dpm_force_performance_level.
+ * Under this case, the smu->mutex lock protection is already enforced on
+ * the parent API smu_force_performance_level of the call path.
+ */
int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type,
bool gate)
{
@@ -364,12 +422,6 @@ int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type,
return ret;
}
-enum amd_pm_state_type smu_get_current_power_state(struct smu_context *smu)
-{
- /* not support power state */
- return POWER_STATE_TYPE_DEFAULT;
-}
-
int smu_get_power_num_states(struct smu_context *smu,
struct pp_states_info *state_info)
{
@@ -439,7 +491,7 @@ int smu_update_table(struct smu_context *smu, enum smu_table_id table_index, int
int ret = 0;
int table_id = smu_table_get_index(smu, table_index);
- if (!table_data || table_id >= smu_table->table_count || table_id < 0)
+ if (!table_data || table_id >= SMU_TABLE_COUNT || table_id < 0)
return -EINVAL;
table = &smu_table->tables[table_index];
@@ -463,7 +515,7 @@ int smu_update_table(struct smu_context *smu, enum smu_table_id table_index, int
return ret;
/* flush hdp cache */
- adev->nbio_funcs->hdp_flush(adev, NULL);
+ adev->nbio.funcs->hdp_flush(adev, NULL);
if (!drv2smu)
memcpy(table_data, table->cpu_addr, table->size);
@@ -483,7 +535,7 @@ bool is_support_sw_smu(struct amdgpu_device *adev)
bool is_support_sw_smu_xgmi(struct amdgpu_device *adev)
{
- if (amdgpu_dpm != 1)
+ if (!is_support_sw_smu(adev))
return false;
if (adev->asic_type == CHIP_VEGA20)
@@ -495,16 +547,23 @@ bool is_support_sw_smu_xgmi(struct amdgpu_device *adev)
int smu_sys_get_pp_table(struct smu_context *smu, void **table)
{
struct smu_table_context *smu_table = &smu->smu_table;
+ uint32_t powerplay_table_size;
if (!smu_table->power_play_table && !smu_table->hardcode_pptable)
return -EINVAL;
+ mutex_lock(&smu->mutex);
+
if (smu_table->hardcode_pptable)
*table = smu_table->hardcode_pptable;
else
*table = smu_table->power_play_table;
- return smu_table->power_play_table_size;
+ powerplay_table_size = smu_table->power_play_table_size;
+
+ mutex_unlock(&smu->mutex);
+
+ return powerplay_table_size;
}
int smu_sys_set_pp_table(struct smu_context *smu, void *buf, size_t size)
@@ -531,13 +590,18 @@ int smu_sys_set_pp_table(struct smu_context *smu, void *buf, size_t size)
memcpy(smu_table->hardcode_pptable, buf, size);
smu_table->power_play_table = smu_table->hardcode_pptable;
smu_table->power_play_table_size = size;
- mutex_unlock(&smu->mutex);
+
+ /*
+ * Special hw_fini action(for Navi1x, the DPMs disablement will be
+ * skipped) may be needed for custom pptable uploading.
+ */
+ smu->uploading_custom_pp_table = true;
ret = smu_reset(smu);
if (ret)
pr_info("smu reset failed, ret = %d\n", ret);
- return ret;
+ smu->uploading_custom_pp_table = false;
failed:
mutex_unlock(&smu->mutex);
@@ -569,41 +633,7 @@ int smu_feature_init_dpm(struct smu_context *smu)
return ret;
}
-int smu_feature_update_enable_state(struct smu_context *smu, uint64_t feature_mask, bool enabled)
-{
- uint32_t feature_low = 0, feature_high = 0;
- int ret = 0;
-
- if (!smu->pm_enabled)
- return ret;
- feature_low = (feature_mask >> 0 ) & 0xffffffff;
- feature_high = (feature_mask >> 32) & 0xffffffff;
-
- if (enabled) {
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesLow,
- feature_low);
- if (ret)
- return ret;
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesHigh,
- feature_high);
- if (ret)
- return ret;
-
- } else {
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesLow,
- feature_low);
- if (ret)
- return ret;
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesHigh,
- feature_high);
- if (ret)
- return ret;
-
- }
-
- return ret;
-}
int smu_feature_is_enabled(struct smu_context *smu, enum smu_feature_mask mask)
{
@@ -633,8 +663,6 @@ int smu_feature_set_enabled(struct smu_context *smu, enum smu_feature_mask mask,
{
struct smu_feature *feature = &smu->smu_feature;
int feature_id;
- uint64_t feature_mask = 0;
- int ret = 0;
feature_id = smu_feature_get_index(smu, mask);
if (feature_id < 0)
@@ -642,22 +670,9 @@ int smu_feature_set_enabled(struct smu_context *smu, enum smu_feature_mask mask,
WARN_ON(feature_id > feature->feature_num);
- feature_mask = 1ULL << feature_id;
-
- mutex_lock(&feature->mutex);
- ret = smu_feature_update_enable_state(smu, feature_mask, enable);
- if (ret)
- goto failed;
-
- if (enable)
- test_and_set_bit(feature_id, feature->enabled);
- else
- test_and_clear_bit(feature_id, feature->enabled);
-
-failed:
- mutex_unlock(&feature->mutex);
-
- return ret;
+ return smu_feature_update_enable_state(smu,
+ 1ULL << feature_id,
+ enable);
}
int smu_feature_is_supported(struct smu_context *smu, enum smu_feature_mask mask)
@@ -707,20 +722,27 @@ static int smu_set_funcs(struct amdgpu_device *adev)
{
struct smu_context *smu = &adev->smu;
+ if (adev->pm.pp_feature & PP_OVERDRIVE_MASK)
+ smu->od_enabled = true;
+
switch (adev->asic_type) {
case CHIP_VEGA20:
+ adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
+ vega20_set_ppt_funcs(smu);
+ break;
case CHIP_NAVI10:
case CHIP_NAVI14:
case CHIP_NAVI12:
+ navi10_set_ppt_funcs(smu);
+ break;
case CHIP_ARCTURUS:
- if (adev->pm.pp_feature & PP_OVERDRIVE_MASK)
- smu->od_enabled = true;
- smu_v11_0_set_smu_funcs(smu);
+ adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
+ arcturus_set_ppt_funcs(smu);
+ /* OD is not supported on Arcturus */
+ smu->od_enabled =false;
break;
case CHIP_RENOIR:
- if (adev->pm.pp_feature & PP_OVERDRIVE_MASK)
- smu->od_enabled = true;
- smu_v12_0_set_smu_funcs(smu);
+ renoir_set_ppt_funcs(smu);
break;
default:
return -EINVAL;
@@ -736,6 +758,7 @@ static int smu_early_init(void *handle)
smu->adev = adev;
smu->pm_enabled = !!amdgpu_dpm;
+ smu->is_apu = false;
mutex_init(&smu->mutex);
return smu_set_funcs(adev);
@@ -749,11 +772,10 @@ static int smu_late_init(void *handle)
if (!smu->pm_enabled)
return 0;
- mutex_lock(&smu->mutex);
smu_handle_task(&adev->smu,
smu->smu_dpm.dpm_level,
- AMD_PP_TASK_COMPLETE_INIT);
- mutex_unlock(&smu->mutex);
+ AMD_PP_TASK_COMPLETE_INIT,
+ false);
return 0;
}
@@ -919,14 +941,9 @@ static int smu_init_fb_allocations(struct smu_context *smu)
struct amdgpu_device *adev = smu->adev;
struct smu_table_context *smu_table = &smu->smu_table;
struct smu_table *tables = smu_table->tables;
- uint32_t table_count = smu_table->table_count;
- uint32_t i = 0;
- int32_t ret = 0;
+ int ret, i;
- if (table_count <= 0)
- return -EINVAL;
-
- for (i = 0 ; i < table_count; i++) {
+ for (i = 0; i < SMU_TABLE_COUNT; i++) {
if (tables[i].size == 0)
continue;
ret = amdgpu_bo_create_kernel(adev,
@@ -942,7 +959,7 @@ static int smu_init_fb_allocations(struct smu_context *smu)
return 0;
failed:
- for (; i > 0; i--) {
+ while (--i >= 0) {
if (tables[i].size == 0)
continue;
amdgpu_bo_free_kernel(&tables[i].bo,
@@ -957,13 +974,12 @@ static int smu_fini_fb_allocations(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
struct smu_table *tables = smu_table->tables;
- uint32_t table_count = smu_table->table_count;
uint32_t i = 0;
- if (table_count == 0 || tables == NULL)
+ if (!tables)
return 0;
- for (i = 0 ; i < table_count; i++) {
+ for (i = 0; i < SMU_TABLE_COUNT; i++) {
if (tables[i].size == 0)
continue;
amdgpu_bo_free_kernel(&tables[i].bo,
@@ -974,50 +990,6 @@ static int smu_fini_fb_allocations(struct smu_context *smu)
return 0;
}
-static int smu_override_pcie_parameters(struct smu_context *smu)
-{
- struct amdgpu_device *adev = smu->adev;
- uint32_t pcie_gen = 0, pcie_width = 0, smu_pcie_arg;
- int ret;
-
- if (adev->flags & AMD_IS_APU)
- return 0;
-
- if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4)
- pcie_gen = 3;
- else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
- pcie_gen = 2;
- else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
- pcie_gen = 1;
- else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1)
- pcie_gen = 0;
-
- /* Bit 31:16: LCLK DPM level. 0 is DPM0, and 1 is DPM1
- * Bit 15:8: PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4
- * Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32
- */
- if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16)
- pcie_width = 6;
- else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12)
- pcie_width = 5;
- else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8)
- pcie_width = 4;
- else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4)
- pcie_width = 3;
- else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2)
- pcie_width = 2;
- else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1)
- pcie_width = 1;
-
- smu_pcie_arg = (1 << 16) | (pcie_gen << 8) | pcie_width;
- ret = smu_send_smc_msg_with_param(smu,
- SMU_MSG_OverridePcieParameters,
- smu_pcie_arg);
- if (ret)
- pr_err("[%s] Attempt to override pcie params failed!\n", __func__);
- return ret;
-}
-
static int smu_smc_table_hw_init(struct smu_context *smu,
bool initialize)
{
@@ -1092,8 +1064,8 @@ static int smu_smc_table_hw_init(struct smu_context *smu,
if (ret)
return ret;
- /* issue RunAfllBtc msg */
- ret = smu_run_afll_btc(smu);
+ /* issue Run*Btc msg */
+ ret = smu_run_btc(smu);
if (ret)
return ret;
@@ -1106,10 +1078,6 @@ static int smu_smc_table_hw_init(struct smu_context *smu,
return ret;
if (adev->asic_type != CHIP_ARCTURUS) {
- ret = smu_override_pcie_parameters(smu);
- if (ret)
- return ret;
-
ret = smu_notify_display_change(smu);
if (ret)
return ret;
@@ -1138,6 +1106,12 @@ static int smu_smc_table_hw_init(struct smu_context *smu,
return ret;
}
+ if (adev->asic_type != CHIP_ARCTURUS) {
+ ret = smu_override_pcie_parameters(smu);
+ if (ret)
+ return ret;
+ }
+
ret = smu_set_default_od_settings(smu, initialize);
if (ret)
return ret;
@@ -1147,7 +1121,7 @@ static int smu_smc_table_hw_init(struct smu_context *smu,
if (ret)
return ret;
- ret = smu_get_power_limit(smu, &smu->default_power_limit, true);
+ ret = smu_get_power_limit(smu, &smu->default_power_limit, false, false);
if (ret)
return ret;
}
@@ -1226,29 +1200,46 @@ static int smu_free_memory_pool(struct smu_context *smu)
return ret;
}
-static int smu_hw_init(void *handle)
+static int smu_start_smc_engine(struct smu_context *smu)
{
- int ret;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct smu_context *smu = &adev->smu;
+ struct amdgpu_device *adev = smu->adev;
+ int ret = 0;
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
if (adev->asic_type < CHIP_NAVI10) {
- ret = smu_load_microcode(smu);
- if (ret)
- return ret;
+ if (smu->ppt_funcs->load_microcode) {
+ ret = smu->ppt_funcs->load_microcode(smu);
+ if (ret)
+ return ret;
+ }
}
}
- ret = smu_check_fw_status(smu);
+ if (smu->ppt_funcs->check_fw_status) {
+ ret = smu->ppt_funcs->check_fw_status(smu);
+ if (ret)
+ pr_err("SMC is not ready\n");
+ }
+
+ return ret;
+}
+
+static int smu_hw_init(void *handle)
+{
+ int ret;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct smu_context *smu = &adev->smu;
+
+ ret = smu_start_smc_engine(smu);
if (ret) {
- pr_err("SMC firmware status is not correct\n");
+ pr_err("SMU is not ready yet!\n");
return ret;
}
if (adev->flags & AMD_IS_APU) {
smu_powergate_sdma(&adev->smu, false);
smu_powergate_vcn(&adev->smu, false);
+ smu_set_gfx_cgpg(&adev->smu, true);
}
if (!smu->pm_enabled)
@@ -1291,6 +1282,11 @@ failed:
return ret;
}
+static int smu_stop_dpms(struct smu_context *smu)
+{
+ return smu_send_smc_msg(smu, SMU_MSG_DisableAllSmuFeatures);
+}
+
static int smu_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -1303,6 +1299,33 @@ static int smu_hw_fini(void *handle)
smu_powergate_vcn(&adev->smu, true);
}
+ ret = smu_stop_thermal_control(smu);
+ if (ret) {
+ pr_warn("Fail to stop thermal control!\n");
+ return ret;
+ }
+
+ /*
+ * For custom pptable uploading, skip the DPM features
+ * disable process on Navi1x ASICs.
+ * - As the gfx related features are under control of
+ * RLC on those ASICs. RLC reinitialization will be
+ * needed to reenable them. That will cost much more
+ * efforts.
+ *
+ * - SMU firmware can handle the DPM reenablement
+ * properly.
+ */
+ if (!smu->uploading_custom_pp_table ||
+ !((adev->asic_type >= CHIP_NAVI10) &&
+ (adev->asic_type <= CHIP_NAVI12))) {
+ ret = smu_stop_dpms(smu);
+ if (ret) {
+ pr_warn("Fail to stop Dpms!\n");
+ return ret;
+ }
+ }
+
kfree(table_context->driver_pptable);
table_context->driver_pptable = NULL;
@@ -1344,7 +1367,10 @@ static int smu_suspend(void *handle)
int ret;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct smu_context *smu = &adev->smu;
- bool baco_feature_is_enabled = smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT);
+ bool baco_feature_is_enabled = false;
+
+ if(!(adev->flags & AMD_IS_APU))
+ baco_feature_is_enabled = smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT);
ret = smu_system_features_control(smu, false);
if (ret)
@@ -1363,6 +1389,8 @@ static int smu_suspend(void *handle)
if (adev->asic_type >= CHIP_NAVI10 &&
adev->gfx.rlc.funcs->stop)
adev->gfx.rlc.funcs->stop(adev);
+ if (smu->is_apu)
+ smu_set_gfx_cgpg(&adev->smu, false);
return 0;
}
@@ -1375,7 +1403,11 @@ static int smu_resume(void *handle)
pr_info("SMU is resuming...\n");
- mutex_lock(&smu->mutex);
+ ret = smu_start_smc_engine(smu);
+ if (ret) {
+ pr_err("SMU is not ready yet!\n");
+ goto failed;
+ }
ret = smu_smc_table_hw_init(smu, false);
if (ret)
@@ -1385,13 +1417,16 @@ static int smu_resume(void *handle)
if (ret)
goto failed;
- mutex_unlock(&smu->mutex);
+ if (smu->is_apu)
+ smu_set_gfx_cgpg(&adev->smu, true);
+
+ smu->disable_uclk_switch = 0;
pr_info("SMU is resumed successfully!\n");
return 0;
+
failed:
- mutex_unlock(&smu->mutex);
return ret;
}
@@ -1409,8 +1444,9 @@ int smu_display_configuration_change(struct smu_context *smu,
mutex_lock(&smu->mutex);
- smu_set_deep_sleep_dcefclk(smu,
- display_config->min_dcef_deep_sleep_set_clk / 100);
+ if (smu->ppt_funcs->set_deep_sleep_dcefclk)
+ smu->ppt_funcs->set_deep_sleep_dcefclk(smu,
+ display_config->min_dcef_deep_sleep_set_clk / 100);
for (index = 0; index < display_config->num_path_including_non_display; index++) {
if (display_config->displays[index].controller_id != 0)
@@ -1529,7 +1565,8 @@ static int smu_enable_umd_pstate(void *handle,
struct smu_context *smu = (struct smu_context*)(handle);
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
- if (!smu->pm_enabled || !smu_dpm_ctx->dpm_context)
+
+ if (!smu->is_apu && (!smu->pm_enabled || !smu_dpm_ctx->dpm_context))
return -EINVAL;
if (!(smu_dpm_ctx->dpm_level & profile_mode_mask)) {
@@ -1587,9 +1624,9 @@ static int smu_default_set_performance_level(struct smu_context *smu, enum amd_d
&soc_mask);
if (ret)
return ret;
- smu_force_clk_levels(smu, SMU_SCLK, 1 << sclk_mask);
- smu_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask);
- smu_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask);
+ smu_force_clk_levels(smu, SMU_SCLK, 1 << sclk_mask, false);
+ smu_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask, false);
+ smu_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask, false);
break;
case AMD_DPM_FORCED_LEVEL_MANUAL:
case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
@@ -1653,7 +1690,7 @@ int smu_adjust_power_state_dynamic(struct smu_context *smu,
workload = smu->workload_setting[index];
if (smu->power_profile_mode != workload)
- smu_set_power_profile_mode(smu, &workload, 0);
+ smu_set_power_profile_mode(smu, &workload, 0, false);
}
return ret;
@@ -1661,18 +1698,22 @@ int smu_adjust_power_state_dynamic(struct smu_context *smu,
int smu_handle_task(struct smu_context *smu,
enum amd_dpm_forced_level level,
- enum amd_pp_task task_id)
+ enum amd_pp_task task_id,
+ bool lock_needed)
{
int ret = 0;
+ if (lock_needed)
+ mutex_lock(&smu->mutex);
+
switch (task_id) {
case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
ret = smu_pre_display_config_changed(smu);
if (ret)
- return ret;
+ goto out;
ret = smu_set_cpu_power_state(smu);
if (ret)
- return ret;
+ goto out;
ret = smu_adjust_power_state_dynamic(smu, level, false);
break;
case AMD_PP_TASK_COMPLETE_INIT:
@@ -1683,6 +1724,10 @@ int smu_handle_task(struct smu_context *smu,
break;
}
+out:
+ if (lock_needed)
+ mutex_unlock(&smu->mutex);
+
return ret;
}
@@ -1715,7 +1760,7 @@ int smu_switch_power_profile(struct smu_context *smu,
}
if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
- smu_set_power_profile_mode(smu, &workload, 0);
+ smu_set_power_profile_mode(smu, &workload, 0, false);
mutex_unlock(&smu->mutex);
@@ -1727,7 +1772,7 @@ enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu)
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
enum amd_dpm_forced_level level;
- if (!smu_dpm_ctx->dpm_context)
+ if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
return -EINVAL;
mutex_lock(&(smu->mutex));
@@ -1742,15 +1787,22 @@ int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_lev
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
int ret = 0;
- if (!smu_dpm_ctx->dpm_context)
+ if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
return -EINVAL;
+ mutex_lock(&smu->mutex);
+
ret = smu_enable_umd_pstate(smu, &level);
- if (ret)
+ if (ret) {
+ mutex_unlock(&smu->mutex);
return ret;
+ }
ret = smu_handle_task(smu, level,
- AMD_PP_TASK_READJUST_POWER_STATE);
+ AMD_PP_TASK_READJUST_POWER_STATE,
+ false);
+
+ mutex_unlock(&smu->mutex);
return ret;
}
@@ -1766,6 +1818,144 @@ int smu_set_display_count(struct smu_context *smu, uint32_t count)
return ret;
}
+int smu_force_clk_levels(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t mask,
+ bool lock_needed)
+{
+ struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
+ int ret = 0;
+
+ if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
+ pr_debug("force clock level is for dpm manual mode only.\n");
+ return -EINVAL;
+ }
+
+ if (lock_needed)
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs && smu->ppt_funcs->force_clk_levels)
+ ret = smu->ppt_funcs->force_clk_levels(smu, clk_type, mask);
+
+ if (lock_needed)
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_set_mp1_state(struct smu_context *smu,
+ enum pp_mp1_state mp1_state)
+{
+ uint16_t msg;
+ int ret;
+
+ /*
+ * The SMC is not fully ready. That may be
+ * expected as the IP may be masked.
+ * So, just return without error.
+ */
+ if (!smu->pm_enabled)
+ return 0;
+
+ mutex_lock(&smu->mutex);
+
+ switch (mp1_state) {
+ case PP_MP1_STATE_SHUTDOWN:
+ msg = SMU_MSG_PrepareMp1ForShutdown;
+ break;
+ case PP_MP1_STATE_UNLOAD:
+ msg = SMU_MSG_PrepareMp1ForUnload;
+ break;
+ case PP_MP1_STATE_RESET:
+ msg = SMU_MSG_PrepareMp1ForReset;
+ break;
+ case PP_MP1_STATE_NONE:
+ default:
+ mutex_unlock(&smu->mutex);
+ return 0;
+ }
+
+ /* some asics may not support those messages */
+ if (smu_msg_get_index(smu, msg) < 0) {
+ mutex_unlock(&smu->mutex);
+ return 0;
+ }
+
+ ret = smu_send_smc_msg(smu, msg);
+ if (ret)
+ pr_err("[PrepareMp1] Failed!\n");
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_set_df_cstate(struct smu_context *smu,
+ enum pp_df_cstate state)
+{
+ int ret = 0;
+
+ /*
+ * The SMC is not fully ready. That may be
+ * expected as the IP may be masked.
+ * So, just return without error.
+ */
+ if (!smu->pm_enabled)
+ return 0;
+
+ if (!smu->ppt_funcs || !smu->ppt_funcs->set_df_cstate)
+ return 0;
+
+ mutex_lock(&smu->mutex);
+
+ ret = smu->ppt_funcs->set_df_cstate(smu, state);
+ if (ret)
+ pr_err("[SetDfCstate] failed!\n");
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_write_watermarks_table(struct smu_context *smu)
+{
+ int ret = 0;
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct smu_table *table = NULL;
+
+ table = &smu_table->tables[SMU_TABLE_WATERMARKS];
+
+ if (!table->cpu_addr)
+ return -EINVAL;
+
+ ret = smu_update_table(smu, SMU_TABLE_WATERMARKS, 0, table->cpu_addr,
+ true);
+
+ return ret;
+}
+
+int smu_set_watermarks_for_clock_ranges(struct smu_context *smu,
+ struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges)
+{
+ int ret = 0;
+ struct smu_table *watermarks = &smu->smu_table.tables[SMU_TABLE_WATERMARKS];
+ void *table = watermarks->cpu_addr;
+
+ mutex_lock(&smu->mutex);
+
+ if (!smu->disable_watermark &&
+ smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) &&
+ smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
+ smu_set_watermarks_table(smu, table, clock_ranges);
+ smu->watermarks_bitmap |= WATERMARKS_EXIST;
+ smu->watermarks_bitmap &= ~WATERMARKS_LOADED;
+ }
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
const struct amd_ip_funcs smu_ip_funcs = {
.name = "smu",
.early_init = smu_early_init,
@@ -1802,3 +1992,559 @@ const struct amdgpu_ip_block_version smu_v12_0_ip_block =
.rev = 0,
.funcs = &smu_ip_funcs,
};
+
+int smu_load_microcode(struct smu_context *smu)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->load_microcode)
+ ret = smu->ppt_funcs->load_microcode(smu);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_check_fw_status(struct smu_context *smu)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->check_fw_status)
+ ret = smu->ppt_funcs->check_fw_status(smu);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->set_gfx_cgpg)
+ ret = smu->ppt_funcs->set_gfx_cgpg(smu, enabled);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_set_fan_speed_rpm(struct smu_context *smu, uint32_t speed)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->set_fan_speed_rpm)
+ ret = smu->ppt_funcs->set_fan_speed_rpm(smu, speed);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_get_power_limit(struct smu_context *smu,
+ uint32_t *limit,
+ bool def,
+ bool lock_needed)
+{
+ int ret = 0;
+
+ if (lock_needed)
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->get_power_limit)
+ ret = smu->ppt_funcs->get_power_limit(smu, limit, def);
+
+ if (lock_needed)
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_set_power_limit(struct smu_context *smu, uint32_t limit)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->set_power_limit)
+ ret = smu->ppt_funcs->set_power_limit(smu, limit);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->print_clk_levels)
+ ret = smu->ppt_funcs->print_clk_levels(smu, clk_type, buf);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_get_od_percentage(struct smu_context *smu, enum smu_clk_type type)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->get_od_percentage)
+ ret = smu->ppt_funcs->get_od_percentage(smu, type);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_set_od_percentage(struct smu_context *smu, enum smu_clk_type type, uint32_t value)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->set_od_percentage)
+ ret = smu->ppt_funcs->set_od_percentage(smu, type, value);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_od_edit_dpm_table(struct smu_context *smu,
+ enum PP_OD_DPM_TABLE_COMMAND type,
+ long *input, uint32_t size)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->od_edit_dpm_table)
+ ret = smu->ppt_funcs->od_edit_dpm_table(smu, type, input, size);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_read_sensor(struct smu_context *smu,
+ enum amd_pp_sensors sensor,
+ void *data, uint32_t *size)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->read_sensor)
+ ret = smu->ppt_funcs->read_sensor(smu, sensor, data, size);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_get_power_profile_mode(struct smu_context *smu, char *buf)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->get_power_profile_mode)
+ ret = smu->ppt_funcs->get_power_profile_mode(smu, buf);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_set_power_profile_mode(struct smu_context *smu,
+ long *param,
+ uint32_t param_size,
+ bool lock_needed)
+{
+ int ret = 0;
+
+ if (lock_needed)
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->set_power_profile_mode)
+ ret = smu->ppt_funcs->set_power_profile_mode(smu, param, param_size);
+
+ if (lock_needed)
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+
+int smu_get_fan_control_mode(struct smu_context *smu)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->get_fan_control_mode)
+ ret = smu->ppt_funcs->get_fan_control_mode(smu);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_set_fan_control_mode(struct smu_context *smu, int value)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->set_fan_control_mode)
+ ret = smu->ppt_funcs->set_fan_control_mode(smu, value);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_get_fan_speed_percent(struct smu_context *smu, uint32_t *speed)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->get_fan_speed_percent)
+ ret = smu->ppt_funcs->get_fan_speed_percent(smu, speed);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_set_fan_speed_percent(struct smu_context *smu, uint32_t speed)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->set_fan_speed_percent)
+ ret = smu->ppt_funcs->set_fan_speed_percent(smu, speed);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_get_fan_speed_rpm(struct smu_context *smu, uint32_t *speed)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->get_fan_speed_rpm)
+ ret = smu->ppt_funcs->get_fan_speed_rpm(smu, speed);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_set_deep_sleep_dcefclk(struct smu_context *smu, int clk)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->set_deep_sleep_dcefclk)
+ ret = smu->ppt_funcs->set_deep_sleep_dcefclk(smu, clk);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_set_active_display_count(struct smu_context *smu, uint32_t count)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->set_active_display_count)
+ ret = smu->ppt_funcs->set_active_display_count(smu, count);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_get_clock_by_type(struct smu_context *smu,
+ enum amd_pp_clock_type type,
+ struct amd_pp_clocks *clocks)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->get_clock_by_type)
+ ret = smu->ppt_funcs->get_clock_by_type(smu, type, clocks);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_get_max_high_clocks(struct smu_context *smu,
+ struct amd_pp_simple_clock_info *clocks)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->get_max_high_clocks)
+ ret = smu->ppt_funcs->get_max_high_clocks(smu, clocks);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_get_clock_by_type_with_latency(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ struct pp_clock_levels_with_latency *clocks)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->get_clock_by_type_with_latency)
+ ret = smu->ppt_funcs->get_clock_by_type_with_latency(smu, clk_type, clocks);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_get_clock_by_type_with_voltage(struct smu_context *smu,
+ enum amd_pp_clock_type type,
+ struct pp_clock_levels_with_voltage *clocks)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->get_clock_by_type_with_voltage)
+ ret = smu->ppt_funcs->get_clock_by_type_with_voltage(smu, type, clocks);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+
+int smu_display_clock_voltage_request(struct smu_context *smu,
+ struct pp_display_clock_request *clock_req)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->display_clock_voltage_request)
+ ret = smu->ppt_funcs->display_clock_voltage_request(smu, clock_req);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+
+int smu_display_disable_memory_clock_switch(struct smu_context *smu, bool disable_memory_clock_switch)
+{
+ int ret = -EINVAL;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->display_disable_memory_clock_switch)
+ ret = smu->ppt_funcs->display_disable_memory_clock_switch(smu, disable_memory_clock_switch);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_notify_smu_enable_pwe(struct smu_context *smu)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->notify_smu_enable_pwe)
+ ret = smu->ppt_funcs->notify_smu_enable_pwe(smu);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_set_xgmi_pstate(struct smu_context *smu,
+ uint32_t pstate)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->set_xgmi_pstate)
+ ret = smu->ppt_funcs->set_xgmi_pstate(smu, pstate);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_set_azalia_d3_pme(struct smu_context *smu)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->set_azalia_d3_pme)
+ ret = smu->ppt_funcs->set_azalia_d3_pme(smu);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+bool smu_baco_is_support(struct smu_context *smu)
+{
+ bool ret = false;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->baco_is_support)
+ ret = smu->ppt_funcs->baco_is_support(smu);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_baco_get_state(struct smu_context *smu, enum smu_baco_state *state)
+{
+ if (smu->ppt_funcs->baco_get_state)
+ return -EINVAL;
+
+ mutex_lock(&smu->mutex);
+ *state = smu->ppt_funcs->baco_get_state(smu);
+ mutex_unlock(&smu->mutex);
+
+ return 0;
+}
+
+int smu_baco_reset(struct smu_context *smu)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->baco_reset)
+ ret = smu->ppt_funcs->baco_reset(smu);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_mode2_reset(struct smu_context *smu)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->mode2_reset)
+ ret = smu->ppt_funcs->mode2_reset(smu);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_get_max_sustainable_clocks_by_dc(struct smu_context *smu,
+ struct pp_smu_nv_clock_table *max_clocks)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->get_max_sustainable_clocks_by_dc)
+ ret = smu->ppt_funcs->get_max_sustainable_clocks_by_dc(smu, max_clocks);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+int smu_get_uclk_dpm_states(struct smu_context *smu,
+ unsigned int *clock_values_in_khz,
+ unsigned int *num_states)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->get_uclk_dpm_states)
+ ret = smu->ppt_funcs->get_uclk_dpm_states(smu, clock_values_in_khz, num_states);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+enum amd_pm_state_type smu_get_current_power_state(struct smu_context *smu)
+{
+ enum amd_pm_state_type pm_state = POWER_STATE_TYPE_DEFAULT;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->get_current_power_state)
+ pm_state = smu->ppt_funcs->get_current_power_state(smu);
+
+ mutex_unlock(&smu->mutex);
+
+ return pm_state;
+}
+
+int smu_get_dpm_clock_table(struct smu_context *smu,
+ struct dpm_clocks *clock_table)
+{
+ int ret = 0;
+
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->get_dpm_clock_table)
+ ret = smu->ppt_funcs->get_dpm_clock_table(smu, clock_table);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+uint32_t smu_get_pptable_power_limit(struct smu_context *smu)
+{
+ uint32_t ret = 0;
+
+ if (smu->ppt_funcs->get_pptable_power_limit)
+ ret = smu->ppt_funcs->get_pptable_power_limit(smu);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
index d493a3f8c07a..58c7c4a3053e 100644
--- a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
@@ -25,6 +25,7 @@
#include <linux/firmware.h>
#include "amdgpu.h"
#include "amdgpu_smu.h"
+#include "smu_internal.h"
#include "atomfirmware.h"
#include "amdgpu_atomfirmware.h"
#include "smu_v11_0.h"
@@ -36,6 +37,12 @@
#include "smu_v11_0_pptable.h"
#include "arcturus_ppsmc.h"
#include "nbio/nbio_7_4_sh_mask.h"
+#include "amdgpu_xgmi.h"
+#include <linux/i2c.h>
+#include <linux/pci.h>
+#include "amdgpu_ras.h"
+
+#define to_amdgpu_device(x) (container_of(x, struct amdgpu_ras, eeprom_control.eeprom_accessor))->adev
#define CTF_OFFSET_EDGE 5
#define CTF_OFFSET_HOTSPOT 5
@@ -112,8 +119,7 @@ static struct smu_11_0_cmn2aisc_mapping arcturus_message_map[SMU_MSG_MAX_COUNT]
MSG_MAP(PrepareMp1ForShutdown, PPSMC_MSG_PrepareMp1ForShutdown),
MSG_MAP(SoftReset, PPSMC_MSG_SoftReset),
MSG_MAP(RunAfllBtc, PPSMC_MSG_RunAfllBtc),
- MSG_MAP(RunGfxDcBtc, PPSMC_MSG_RunGfxDcBtc),
- MSG_MAP(RunSocDcBtc, PPSMC_MSG_RunSocDcBtc),
+ MSG_MAP(RunDcBtc, PPSMC_MSG_RunDcBtc),
MSG_MAP(DramLogSetDramAddrHigh, PPSMC_MSG_DramLogSetDramAddrHigh),
MSG_MAP(DramLogSetDramAddrLow, PPSMC_MSG_DramLogSetDramAddrLow),
MSG_MAP(DramLogSetDramSize, PPSMC_MSG_DramLogSetDramSize),
@@ -172,6 +178,7 @@ static struct smu_11_0_cmn2aisc_mapping arcturus_table_map[SMU_TABLE_COUNT] = {
TAB_MAP(SMU_METRICS),
TAB_MAP(DRIVER_SMU_CONFIG),
TAB_MAP(OVERDRIVE),
+ TAB_MAP(I2C_COMMANDS),
};
static struct smu_11_0_cmn2aisc_mapping arcturus_pwr_src_map[SMU_POWER_SOURCE_COUNT] = {
@@ -294,6 +301,9 @@ static int arcturus_tables_init(struct smu_context *smu, struct smu_table *table
SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_t),
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
+ SMU_TABLE_INIT(tables, SMU_TABLE_I2C_COMMANDS, sizeof(SwI2cRequest_t),
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
+
smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL);
if (!smu_table->metrics_table)
return -ENOMEM;
@@ -528,9 +538,17 @@ static int arcturus_append_powerplay_table(struct smu_context *smu)
return 0;
}
-static int arcturus_run_btc_afll(struct smu_context *smu)
+static int arcturus_run_btc(struct smu_context *smu)
{
- return smu_send_smc_msg(smu, SMU_MSG_RunAfllBtc);
+ int ret = 0;
+
+ ret = smu_send_smc_msg(smu, SMU_MSG_RunAfllBtc);
+ if (ret) {
+ pr_err("RunAfllBtc failed!\n");
+ return ret;
+ }
+
+ return smu_send_smc_msg(smu, SMU_MSG_RunDcBtc);
}
static int arcturus_populate_umd_state_clk(struct smu_context *smu)
@@ -610,12 +628,17 @@ static int arcturus_print_clk_levels(struct smu_context *smu,
return ret;
}
+ /*
+ * For DPM disabled case, there will be only one clock level.
+ * And it's safe to assume that is always the current clock.
+ */
for (i = 0; i < clocks.num_levels; i++)
size += sprintf(buf + size, "%d: %uMhz %s\n", i,
clocks.data[i].clocks_in_khz / 1000,
- arcturus_freqs_in_same_level(
+ (clocks.num_levels == 1) ? "*" :
+ (arcturus_freqs_in_same_level(
clocks.data[i].clocks_in_khz / 1000,
- now / 100) ? "*" : "");
+ now / 100) ? "*" : ""));
break;
case SMU_MCLK:
@@ -635,9 +658,10 @@ static int arcturus_print_clk_levels(struct smu_context *smu,
for (i = 0; i < clocks.num_levels; i++)
size += sprintf(buf + size, "%d: %uMhz %s\n",
i, clocks.data[i].clocks_in_khz / 1000,
- arcturus_freqs_in_same_level(
+ (clocks.num_levels == 1) ? "*" :
+ (arcturus_freqs_in_same_level(
clocks.data[i].clocks_in_khz / 1000,
- now / 100) ? "*" : "");
+ now / 100) ? "*" : ""));
break;
case SMU_SOCCLK:
@@ -657,9 +681,10 @@ static int arcturus_print_clk_levels(struct smu_context *smu,
for (i = 0; i < clocks.num_levels; i++)
size += sprintf(buf + size, "%d: %uMhz %s\n",
i, clocks.data[i].clocks_in_khz / 1000,
- arcturus_freqs_in_same_level(
+ (clocks.num_levels == 1) ? "*" :
+ (arcturus_freqs_in_same_level(
clocks.data[i].clocks_in_khz / 1000,
- now / 100) ? "*" : "");
+ now / 100) ? "*" : ""));
break;
case SMU_FCLK:
@@ -679,9 +704,10 @@ static int arcturus_print_clk_levels(struct smu_context *smu,
for (i = 0; i < single_dpm_table->count; i++)
size += sprintf(buf + size, "%d: %uMhz %s\n",
i, single_dpm_table->dpm_levels[i].value,
- arcturus_freqs_in_same_level(
+ (clocks.num_levels == 1) ? "*" :
+ (arcturus_freqs_in_same_level(
clocks.data[i].clocks_in_khz / 1000,
- now / 100) ? "*" : "");
+ now / 100) ? "*" : ""));
break;
default:
@@ -756,8 +782,6 @@ static int arcturus_force_clk_levels(struct smu_context *smu,
uint32_t soft_min_level, soft_max_level;
int ret = 0;
- mutex_lock(&(smu->mutex));
-
soft_min_level = mask ? (ffs(mask) - 1) : 0;
soft_max_level = mask ? (fls(mask) - 1) : 0;
@@ -792,91 +816,19 @@ static int arcturus_force_clk_levels(struct smu_context *smu,
break;
case SMU_MCLK:
- single_dpm_table = &(dpm_table->mem_table);
-
- if (soft_max_level >= single_dpm_table->count) {
- pr_err("Clock level specified %d is over max allowed %d\n",
- soft_max_level, single_dpm_table->count - 1);
- ret = -EINVAL;
- break;
- }
-
- single_dpm_table->dpm_state.soft_min_level =
- single_dpm_table->dpm_levels[soft_min_level].value;
- single_dpm_table->dpm_state.soft_max_level =
- single_dpm_table->dpm_levels[soft_max_level].value;
-
- ret = arcturus_upload_dpm_level(smu, false, FEATURE_DPM_UCLK_MASK);
- if (ret) {
- pr_err("Failed to upload boot level to lowest!\n");
- break;
- }
-
- ret = arcturus_upload_dpm_level(smu, true, FEATURE_DPM_UCLK_MASK);
- if (ret)
- pr_err("Failed to upload dpm max level to highest!\n");
-
- break;
-
case SMU_SOCCLK:
- single_dpm_table = &(dpm_table->soc_table);
-
- if (soft_max_level >= single_dpm_table->count) {
- pr_err("Clock level specified %d is over max allowed %d\n",
- soft_max_level, single_dpm_table->count - 1);
- ret = -EINVAL;
- break;
- }
-
- single_dpm_table->dpm_state.soft_min_level =
- single_dpm_table->dpm_levels[soft_min_level].value;
- single_dpm_table->dpm_state.soft_max_level =
- single_dpm_table->dpm_levels[soft_max_level].value;
-
- ret = arcturus_upload_dpm_level(smu, false, FEATURE_DPM_SOCCLK_MASK);
- if (ret) {
- pr_err("Failed to upload boot level to lowest!\n");
- break;
- }
-
- ret = arcturus_upload_dpm_level(smu, true, FEATURE_DPM_SOCCLK_MASK);
- if (ret)
- pr_err("Failed to upload dpm max level to highest!\n");
-
- break;
-
case SMU_FCLK:
- single_dpm_table = &(dpm_table->fclk_table);
-
- if (soft_max_level >= single_dpm_table->count) {
- pr_err("Clock level specified %d is over max allowed %d\n",
- soft_max_level, single_dpm_table->count - 1);
- ret = -EINVAL;
- break;
- }
-
- single_dpm_table->dpm_state.soft_min_level =
- single_dpm_table->dpm_levels[soft_min_level].value;
- single_dpm_table->dpm_state.soft_max_level =
- single_dpm_table->dpm_levels[soft_max_level].value;
-
- ret = arcturus_upload_dpm_level(smu, false, FEATURE_DPM_FCLK_MASK);
- if (ret) {
- pr_err("Failed to upload boot level to lowest!\n");
- break;
- }
-
- ret = arcturus_upload_dpm_level(smu, true, FEATURE_DPM_FCLK_MASK);
- if (ret)
- pr_err("Failed to upload dpm max level to highest!\n");
-
+ /*
+ * Should not arrive here since Arcturus does not
+ * support mclk/socclk/fclk softmin/softmax settings
+ */
+ ret = -EINVAL;
break;
default:
break;
}
- mutex_unlock(&(smu->mutex));
return ret;
}
@@ -1043,7 +995,7 @@ static int arcturus_read_sensor(struct smu_context *smu,
*size = 4;
break;
default:
- ret = smu_smc_read_sensor(smu, sensor, data, size);
+ ret = smu_v11_0_read_sensor(smu, sensor, data, size);
}
mutex_unlock(&smu->sensor_lock);
@@ -1186,6 +1138,7 @@ static int arcturus_force_dpm_limit_value(struct smu_context *smu, bool highest)
{
struct arcturus_dpm_table *dpm_table =
(struct arcturus_dpm_table *)smu->smu_dpm.dpm_context;
+ struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(smu->adev, 0);
uint32_t soft_level;
int ret = 0;
@@ -1199,40 +1152,27 @@ static int arcturus_force_dpm_limit_value(struct smu_context *smu, bool highest)
dpm_table->gfx_table.dpm_state.soft_max_level =
dpm_table->gfx_table.dpm_levels[soft_level].value;
- /* uclk */
- if (highest)
- soft_level = arcturus_find_highest_dpm_level(&(dpm_table->mem_table));
- else
- soft_level = arcturus_find_lowest_dpm_level(&(dpm_table->mem_table));
-
- dpm_table->mem_table.dpm_state.soft_min_level =
- dpm_table->mem_table.dpm_state.soft_max_level =
- dpm_table->mem_table.dpm_levels[soft_level].value;
-
- /* socclk */
- if (highest)
- soft_level = arcturus_find_highest_dpm_level(&(dpm_table->soc_table));
- else
- soft_level = arcturus_find_lowest_dpm_level(&(dpm_table->soc_table));
-
- dpm_table->soc_table.dpm_state.soft_min_level =
- dpm_table->soc_table.dpm_state.soft_max_level =
- dpm_table->soc_table.dpm_levels[soft_level].value;
-
- ret = arcturus_upload_dpm_level(smu, false, 0xFFFFFFFF);
+ ret = arcturus_upload_dpm_level(smu, false, FEATURE_DPM_GFXCLK_MASK);
if (ret) {
pr_err("Failed to upload boot level to %s!\n",
highest ? "highest" : "lowest");
return ret;
}
- ret = arcturus_upload_dpm_level(smu, true, 0xFFFFFFFF);
+ ret = arcturus_upload_dpm_level(smu, true, FEATURE_DPM_GFXCLK_MASK);
if (ret) {
pr_err("Failed to upload dpm max level to %s!\n!",
highest ? "highest" : "lowest");
return ret;
}
+ if (hive)
+ /*
+ * Force XGMI Pstate to highest or lowest
+ * TODO: revise this when xgmi dpm is functional
+ */
+ ret = smu_v11_0_set_xgmi_pstate(smu, highest ? 1 : 0);
+
return ret;
}
@@ -1240,6 +1180,7 @@ static int arcturus_unforce_dpm_levels(struct smu_context *smu)
{
struct arcturus_dpm_table *dpm_table =
(struct arcturus_dpm_table *)smu->smu_dpm.dpm_context;
+ struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(smu->adev, 0);
uint32_t soft_min_level, soft_max_level;
int ret = 0;
@@ -1251,34 +1192,25 @@ static int arcturus_unforce_dpm_levels(struct smu_context *smu)
dpm_table->gfx_table.dpm_state.soft_max_level =
dpm_table->gfx_table.dpm_levels[soft_max_level].value;
- /* uclk */
- soft_min_level = arcturus_find_lowest_dpm_level(&(dpm_table->mem_table));
- soft_max_level = arcturus_find_highest_dpm_level(&(dpm_table->mem_table));
- dpm_table->mem_table.dpm_state.soft_min_level =
- dpm_table->gfx_table.dpm_levels[soft_min_level].value;
- dpm_table->mem_table.dpm_state.soft_max_level =
- dpm_table->gfx_table.dpm_levels[soft_max_level].value;
-
- /* socclk */
- soft_min_level = arcturus_find_lowest_dpm_level(&(dpm_table->soc_table));
- soft_max_level = arcturus_find_highest_dpm_level(&(dpm_table->soc_table));
- dpm_table->soc_table.dpm_state.soft_min_level =
- dpm_table->soc_table.dpm_levels[soft_min_level].value;
- dpm_table->soc_table.dpm_state.soft_max_level =
- dpm_table->soc_table.dpm_levels[soft_max_level].value;
-
- ret = arcturus_upload_dpm_level(smu, false, 0xFFFFFFFF);
+ ret = arcturus_upload_dpm_level(smu, false, FEATURE_DPM_GFXCLK_MASK);
if (ret) {
pr_err("Failed to upload DPM Bootup Levels!");
return ret;
}
- ret = arcturus_upload_dpm_level(smu, true, 0xFFFFFFFF);
+ ret = arcturus_upload_dpm_level(smu, true, FEATURE_DPM_GFXCLK_MASK);
if (ret) {
pr_err("Failed to upload DPM Max Levels!");
return ret;
}
+ if (hive)
+ /*
+ * Reset XGMI Pstate back to default
+ * TODO: revise this when xgmi dpm is functional
+ */
+ ret = smu_v11_0_set_xgmi_pstate(smu, 0);
+
return ret;
}
@@ -1329,15 +1261,14 @@ arcturus_get_profiling_clk_mask(struct smu_context *smu,
static int arcturus_get_power_limit(struct smu_context *smu,
uint32_t *limit,
- bool asic_default)
+ bool cap)
{
PPTable_t *pptable = smu->smu_table.driver_pptable;
uint32_t asic_default_power_limit = 0;
int ret = 0;
int power_src;
- if (!smu->default_power_limit ||
- !smu->power_limit) {
+ if (!smu->power_limit) {
if (smu_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) {
power_src = smu_power_get_index(smu, SMU_POWER_SOURCE_AC);
if (power_src < 0)
@@ -1360,17 +1291,11 @@ static int arcturus_get_power_limit(struct smu_context *smu,
pptable->SocketPowerLimitAc[PPT_THROTTLER_PPT0];
}
- if (smu->od_enabled) {
- asic_default_power_limit *= (100 + smu->smu_table.TDPODLimit);
- asic_default_power_limit /= 100;
- }
-
- smu->default_power_limit = asic_default_power_limit;
smu->power_limit = asic_default_power_limit;
}
- if (asic_default)
- *limit = smu->default_power_limit;
+ if (cap)
+ *limit = smu_v11_0_get_max_power_limit(smu);
else
*limit = smu->power_limit;
@@ -1891,6 +1816,260 @@ static bool arcturus_is_dpm_running(struct smu_context *smu)
return !!(feature_enabled & SMC_DPM_FEATURE);
}
+static int arcturus_dpm_set_uvd_enable(struct smu_context *smu, bool enable)
+{
+ struct smu_power_context *smu_power = &smu->smu_power;
+ struct smu_power_gate *power_gate = &smu_power->power_gate;
+ int ret = 0;
+
+ if (enable) {
+ if (!smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
+ ret = smu_feature_set_enabled(smu, SMU_FEATURE_VCN_PG_BIT, 1);
+ if (ret) {
+ pr_err("[EnableVCNDPM] failed!\n");
+ return ret;
+ }
+ }
+ power_gate->vcn_gated = false;
+ } else {
+ if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
+ ret = smu_feature_set_enabled(smu, SMU_FEATURE_VCN_PG_BIT, 0);
+ if (ret) {
+ pr_err("[DisableVCNDPM] failed!\n");
+ return ret;
+ }
+ }
+ power_gate->vcn_gated = true;
+ }
+
+ return ret;
+}
+
+
+static void arcturus_fill_eeprom_i2c_req(SwI2cRequest_t *req, bool write,
+ uint8_t address, uint32_t numbytes,
+ uint8_t *data)
+{
+ int i;
+
+ BUG_ON(numbytes > MAX_SW_I2C_COMMANDS);
+
+ req->I2CcontrollerPort = 0;
+ req->I2CSpeed = 2;
+ req->SlaveAddress = address;
+ req->NumCmds = numbytes;
+
+ for (i = 0; i < numbytes; i++) {
+ SwI2cCmd_t *cmd = &req->SwI2cCmds[i];
+
+ /* First 2 bytes are always write for lower 2b EEPROM address */
+ if (i < 2)
+ cmd->Cmd = 1;
+ else
+ cmd->Cmd = write;
+
+
+ /* Add RESTART for read after address filled */
+ cmd->CmdConfig |= (i == 2 && !write) ? CMDCONFIG_RESTART_MASK : 0;
+
+ /* Add STOP in the end */
+ cmd->CmdConfig |= (i == (numbytes - 1)) ? CMDCONFIG_STOP_MASK : 0;
+
+ /* Fill with data regardless if read or write to simplify code */
+ cmd->RegisterAddr = data[i];
+ }
+}
+
+static int arcturus_i2c_eeprom_read_data(struct i2c_adapter *control,
+ uint8_t address,
+ uint8_t *data,
+ uint32_t numbytes)
+{
+ uint32_t i, ret = 0;
+ SwI2cRequest_t req;
+ struct amdgpu_device *adev = to_amdgpu_device(control);
+ struct smu_table_context *smu_table = &adev->smu.smu_table;
+ struct smu_table *table = &smu_table->tables[SMU_TABLE_I2C_COMMANDS];
+
+ memset(&req, 0, sizeof(req));
+ arcturus_fill_eeprom_i2c_req(&req, false, address, numbytes, data);
+
+ mutex_lock(&adev->smu.mutex);
+ /* Now read data starting with that address */
+ ret = smu_update_table(&adev->smu, SMU_TABLE_I2C_COMMANDS, 0, &req,
+ true);
+ mutex_unlock(&adev->smu.mutex);
+
+ if (!ret) {
+ SwI2cRequest_t *res = (SwI2cRequest_t *)table->cpu_addr;
+
+ /* Assume SMU fills res.SwI2cCmds[i].Data with read bytes */
+ for (i = 0; i < numbytes; i++)
+ data[i] = res->SwI2cCmds[i].Data;
+
+ pr_debug("arcturus_i2c_eeprom_read_data, address = %x, bytes = %d, data :",
+ (uint16_t)address, numbytes);
+
+ print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_NONE,
+ 8, 1, data, numbytes, false);
+ } else
+ pr_err("arcturus_i2c_eeprom_read_data - error occurred :%x", ret);
+
+ return ret;
+}
+
+static int arcturus_i2c_eeprom_write_data(struct i2c_adapter *control,
+ uint8_t address,
+ uint8_t *data,
+ uint32_t numbytes)
+{
+ uint32_t ret;
+ SwI2cRequest_t req;
+ struct amdgpu_device *adev = to_amdgpu_device(control);
+
+ memset(&req, 0, sizeof(req));
+ arcturus_fill_eeprom_i2c_req(&req, true, address, numbytes, data);
+
+ mutex_lock(&adev->smu.mutex);
+ ret = smu_update_table(&adev->smu, SMU_TABLE_I2C_COMMANDS, 0, &req, true);
+ mutex_unlock(&adev->smu.mutex);
+
+ if (!ret) {
+ pr_debug("arcturus_i2c_write(), address = %x, bytes = %d , data: ",
+ (uint16_t)address, numbytes);
+
+ print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_NONE,
+ 8, 1, data, numbytes, false);
+ /*
+ * According to EEPROM spec there is a MAX of 10 ms required for
+ * EEPROM to flush internal RX buffer after STOP was issued at the
+ * end of write transaction. During this time the EEPROM will not be
+ * responsive to any more commands - so wait a bit more.
+ */
+ msleep(10);
+
+ } else
+ pr_err("arcturus_i2c_write- error occurred :%x", ret);
+
+ return ret;
+}
+
+static int arcturus_i2c_eeprom_i2c_xfer(struct i2c_adapter *i2c_adap,
+ struct i2c_msg *msgs, int num)
+{
+ uint32_t i, j, ret, data_size, data_chunk_size, next_eeprom_addr = 0;
+ uint8_t *data_ptr, data_chunk[MAX_SW_I2C_COMMANDS] = { 0 };
+
+ for (i = 0; i < num; i++) {
+ /*
+ * SMU interface allows at most MAX_SW_I2C_COMMANDS bytes of data at
+ * once and hence the data needs to be spliced into chunks and sent each
+ * chunk separately
+ */
+ data_size = msgs[i].len - 2;
+ data_chunk_size = MAX_SW_I2C_COMMANDS - 2;
+ next_eeprom_addr = (msgs[i].buf[0] << 8 & 0xff00) | (msgs[i].buf[1] & 0xff);
+ data_ptr = msgs[i].buf + 2;
+
+ for (j = 0; j < data_size / data_chunk_size; j++) {
+ /* Insert the EEPROM dest addess, bits 0-15 */
+ data_chunk[0] = ((next_eeprom_addr >> 8) & 0xff);
+ data_chunk[1] = (next_eeprom_addr & 0xff);
+
+ if (msgs[i].flags & I2C_M_RD) {
+ ret = arcturus_i2c_eeprom_read_data(i2c_adap,
+ (uint8_t)msgs[i].addr,
+ data_chunk, MAX_SW_I2C_COMMANDS);
+
+ memcpy(data_ptr, data_chunk + 2, data_chunk_size);
+ } else {
+
+ memcpy(data_chunk + 2, data_ptr, data_chunk_size);
+
+ ret = arcturus_i2c_eeprom_write_data(i2c_adap,
+ (uint8_t)msgs[i].addr,
+ data_chunk, MAX_SW_I2C_COMMANDS);
+ }
+
+ if (ret) {
+ num = -EIO;
+ goto fail;
+ }
+
+ next_eeprom_addr += data_chunk_size;
+ data_ptr += data_chunk_size;
+ }
+
+ if (data_size % data_chunk_size) {
+ data_chunk[0] = ((next_eeprom_addr >> 8) & 0xff);
+ data_chunk[1] = (next_eeprom_addr & 0xff);
+
+ if (msgs[i].flags & I2C_M_RD) {
+ ret = arcturus_i2c_eeprom_read_data(i2c_adap,
+ (uint8_t)msgs[i].addr,
+ data_chunk, (data_size % data_chunk_size) + 2);
+
+ memcpy(data_ptr, data_chunk + 2, data_size % data_chunk_size);
+ } else {
+ memcpy(data_chunk + 2, data_ptr, data_size % data_chunk_size);
+
+ ret = arcturus_i2c_eeprom_write_data(i2c_adap,
+ (uint8_t)msgs[i].addr,
+ data_chunk, (data_size % data_chunk_size) + 2);
+ }
+
+ if (ret) {
+ num = -EIO;
+ goto fail;
+ }
+ }
+ }
+
+fail:
+ return num;
+}
+
+static u32 arcturus_i2c_eeprom_i2c_func(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+
+static const struct i2c_algorithm arcturus_i2c_eeprom_i2c_algo = {
+ .master_xfer = arcturus_i2c_eeprom_i2c_xfer,
+ .functionality = arcturus_i2c_eeprom_i2c_func,
+};
+
+static int arcturus_i2c_eeprom_control_init(struct i2c_adapter *control)
+{
+ struct amdgpu_device *adev = to_amdgpu_device(control);
+ int res;
+
+ control->owner = THIS_MODULE;
+ control->class = I2C_CLASS_SPD;
+ control->dev.parent = &adev->pdev->dev;
+ control->algo = &arcturus_i2c_eeprom_i2c_algo;
+ snprintf(control->name, sizeof(control->name), "RAS EEPROM");
+
+ res = i2c_add_adapter(control);
+ if (res)
+ DRM_ERROR("Failed to register hw i2c, err: %d\n", res);
+
+ return res;
+}
+
+static void arcturus_i2c_eeprom_control_fini(struct i2c_adapter *control)
+{
+ i2c_del_adapter(control);
+}
+
+static uint32_t arcturus_get_pptable_power_limit(struct smu_context *smu)
+{
+ PPTable_t *pptable = smu->smu_table.driver_pptable;
+
+ return pptable->SocketPowerLimitAc[PPT_THROTTLER_PPT0];
+}
+
static const struct pptable_funcs arcturus_ppt_funcs = {
/* translate smu index into arcturus specific index */
.get_smu_msg_index = arcturus_get_smu_msg_index,
@@ -1909,7 +2088,7 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
/* init dpm */
.get_allowed_feature_mask = arcturus_get_allowed_feature_mask,
/* btc */
- .run_afll_btc = arcturus_run_btc_afll,
+ .run_btc = arcturus_run_btc,
/* dpm/clk tables */
.set_default_dpm_table = arcturus_set_default_dpm_table,
.populate_umd_state_clk = arcturus_populate_umd_state_clk,
@@ -1929,12 +2108,62 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
.dump_pptable = arcturus_dump_pptable,
.get_power_limit = arcturus_get_power_limit,
.is_dpm_running = arcturus_is_dpm_running,
+ .dpm_set_uvd_enable = arcturus_dpm_set_uvd_enable,
+ .i2c_eeprom_init = arcturus_i2c_eeprom_control_init,
+ .i2c_eeprom_fini = arcturus_i2c_eeprom_control_fini,
+ .init_microcode = smu_v11_0_init_microcode,
+ .load_microcode = smu_v11_0_load_microcode,
+ .init_smc_tables = smu_v11_0_init_smc_tables,
+ .fini_smc_tables = smu_v11_0_fini_smc_tables,
+ .init_power = smu_v11_0_init_power,
+ .fini_power = smu_v11_0_fini_power,
+ .check_fw_status = smu_v11_0_check_fw_status,
+ .setup_pptable = smu_v11_0_setup_pptable,
+ .get_vbios_bootup_values = smu_v11_0_get_vbios_bootup_values,
+ .get_clk_info_from_vbios = smu_v11_0_get_clk_info_from_vbios,
+ .check_pptable = smu_v11_0_check_pptable,
+ .parse_pptable = smu_v11_0_parse_pptable,
+ .populate_smc_tables = smu_v11_0_populate_smc_pptable,
+ .check_fw_version = smu_v11_0_check_fw_version,
+ .write_pptable = smu_v11_0_write_pptable,
+ .set_min_dcef_deep_sleep = smu_v11_0_set_min_dcef_deep_sleep,
+ .set_tool_table_location = smu_v11_0_set_tool_table_location,
+ .notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
+ .system_features_control = smu_v11_0_system_features_control,
+ .send_smc_msg = smu_v11_0_send_msg,
+ .send_smc_msg_with_param = smu_v11_0_send_msg_with_param,
+ .read_smc_arg = smu_v11_0_read_arg,
+ .init_display_count = smu_v11_0_init_display_count,
+ .set_allowed_mask = smu_v11_0_set_allowed_mask,
+ .get_enabled_mask = smu_v11_0_get_enabled_mask,
+ .notify_display_change = smu_v11_0_notify_display_change,
+ .set_power_limit = smu_v11_0_set_power_limit,
+ .get_current_clk_freq = smu_v11_0_get_current_clk_freq,
+ .init_max_sustainable_clocks = smu_v11_0_init_max_sustainable_clocks,
+ .start_thermal_control = smu_v11_0_start_thermal_control,
+ .stop_thermal_control = smu_v11_0_stop_thermal_control,
+ .set_deep_sleep_dcefclk = smu_v11_0_set_deep_sleep_dcefclk,
+ .display_clock_voltage_request = smu_v11_0_display_clock_voltage_request,
+ .get_fan_control_mode = smu_v11_0_get_fan_control_mode,
+ .set_fan_control_mode = smu_v11_0_set_fan_control_mode,
+ .set_fan_speed_percent = smu_v11_0_set_fan_speed_percent,
+ .set_fan_speed_rpm = smu_v11_0_set_fan_speed_rpm,
+ .set_xgmi_pstate = smu_v11_0_set_xgmi_pstate,
+ .gfx_off_control = smu_v11_0_gfx_off_control,
+ .register_irq_handler = smu_v11_0_register_irq_handler,
+ .set_azalia_d3_pme = smu_v11_0_set_azalia_d3_pme,
+ .get_max_sustainable_clocks_by_dc = smu_v11_0_get_max_sustainable_clocks_by_dc,
+ .baco_is_support= smu_v11_0_baco_is_support,
+ .baco_get_state = smu_v11_0_baco_get_state,
+ .baco_set_state = smu_v11_0_baco_set_state,
+ .baco_reset = smu_v11_0_baco_reset,
+ .get_dpm_ultimate_freq = smu_v11_0_get_dpm_ultimate_freq,
+ .set_soft_freq_limited_range = smu_v11_0_set_soft_freq_limited_range,
+ .override_pcie_parameters = smu_v11_0_override_pcie_parameters,
+ .get_pptable_power_limit = arcturus_get_pptable_power_limit,
};
void arcturus_set_ppt_funcs(struct smu_context *smu)
{
- struct smu_table_context *smu_table = &smu->smu_table;
-
smu->ppt_funcs = &arcturus_ppt_funcs;
- smu_table->table_count = TABLE_COUNT;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
index cc63705920dc..2773966ae434 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
@@ -36,7 +36,8 @@ HARDWARE_MGR = hwmgr.o processpptables.o \
pp_overdriver.o smu_helper.o \
vega20_processpptables.o vega20_hwmgr.o vega20_powertune.o \
vega20_thermal.o common_baco.o vega10_baco.o vega20_baco.o \
- vega12_baco.o smu9_baco.o
+ vega12_baco.o smu9_baco.o tonga_baco.o polaris_baco.o fiji_baco.o \
+ ci_baco.o smu7_baco.o
AMD_PP_HWMGR = $(addprefix $(AMD_PP_PATH)/hwmgr/,$(HARDWARE_MGR))
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ci_baco.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ci_baco.c
new file mode 100644
index 000000000000..3be40114e63d
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ci_baco.c
@@ -0,0 +1,195 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "ci_baco.h"
+
+#include "gmc/gmc_7_1_d.h"
+#include "gmc/gmc_7_1_sh_mask.h"
+
+#include "bif/bif_4_1_d.h"
+#include "bif/bif_4_1_sh_mask.h"
+
+#include "dce/dce_8_0_d.h"
+#include "dce/dce_8_0_sh_mask.h"
+
+#include "smu/smu_7_0_1_d.h"
+#include "smu/smu_7_0_1_sh_mask.h"
+
+#include "gca/gfx_7_2_d.h"
+#include "gca/gfx_7_2_sh_mask.h"
+
+static const struct baco_cmd_entry gpio_tbl[] =
+{
+ { CMD_WRITE, mmGPIOPAD_EN, 0, 0, 0, 0x0 },
+ { CMD_WRITE, mmGPIOPAD_PD_EN, 0, 0, 0, 0x0 },
+ { CMD_WRITE, mmGPIOPAD_PU_EN, 0, 0, 0, 0x0 },
+ { CMD_WRITE, mmGPIOPAD_MASK, 0, 0, 0, 0xff77ffff },
+ { CMD_WRITE, mmDC_GPIO_DVODATA_EN, 0, 0, 0, 0x0 },
+ { CMD_WRITE, mmDC_GPIO_DVODATA_MASK, 0, 0, 0, 0xffffffff },
+ { CMD_WRITE, mmDC_GPIO_GENERIC_EN, 0, 0, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmDC_GPIO_GENERIC_MASK, 0, 0, 0, 0x03333333 },
+ { CMD_WRITE, mmDC_GPIO_SYNCA_EN, 0, 0, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmDC_GPIO_SYNCA_MASK, 0, 0, 0, 0x00001111 }
+};
+
+static const struct baco_cmd_entry enable_fb_req_rej_tbl[] =
+{
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0300024 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x1, 0x0, 0, 0x1 },
+ { CMD_WRITE, mmBIF_FB_EN, 0, 0, 0, 0x0 }
+};
+
+static const struct baco_cmd_entry use_bclk_tbl[] =
+{
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN_MASK, CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL_2 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_STATUS },
+ { CMD_WAITFOR, mmGCK_SMC_IND_DATA, CG_SPLL_STATUS__SPLL_CHG_STATUS_MASK, 0, 0xffffffff, 0x2 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL_2 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG__SHIFT, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_STATUS },
+ { CMD_WAITFOR, mmGCK_SMC_IND_DATA, CG_SPLL_STATUS__SPLL_CHG_STATUS_MASK, 0, 0xffffffff, 0x2 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL_2 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG__SHIFT, 0, 0x0 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0500170 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x4000000, 0x1a, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMPLL_BYPASSCLK_SEL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT, 0, 0x2 },
+ { CMD_READMODIFYWRITE, mmMPLL_CNTL_MODE, MPLL_CNTL_MODE__MPLL_SW_DIR_CONTROL_MASK, MPLL_CNTL_MODE__MPLL_SW_DIR_CONTROL__SHIFT, 0, 0x1 },
+ { CMD_READMODIFYWRITE, mmMPLL_CNTL_MODE, MPLL_CNTL_MODE__MPLL_MCLK_SEL_MASK, MPLL_CNTL_MODE__MPLL_MCLK_SEL__SHIFT, 0, 0x0 }
+};
+
+static const struct baco_cmd_entry turn_off_plls_tbl[] =
+{
+ { CMD_READMODIFYWRITE, mmDISPPLL_BG_CNTL, DISPPLL_BG_CNTL__DISPPLL_BG_PDN_MASK, DISPPLL_BG_CNTL__DISPPLL_BG_PDN__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_CLKPIN_CNTL_DC },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_CLKPIN_CNTL_DC__OSC_EN_MASK, CG_CLKPIN_CNTL_DC__OSC_EN__SHIFT, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_CLKPIN_CNTL_DC__XTALIN_SEL_MASK, CG_CLKPIN_CNTL_DC__XTALIN_SEL__SHIFT, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmPLL_CNTL, PLL_CNTL__PLL_RESET_MASK, PLL_CNTL__PLL_RESET__SHIFT, 0, 0x1 },
+ { CMD_READMODIFYWRITE, mmPLL_CNTL, PLL_CNTL__PLL_POWER_DOWN_MASK, PLL_CNTL__PLL_POWER_DOWN__SHIFT, 0, 0x1 },
+ { CMD_READMODIFYWRITE, mmPLL_CNTL, PLL_CNTL__PLL_BYPASS_CAL_MASK, PLL_CNTL__PLL_BYPASS_CAL__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_RESET_MASK, CG_SPLL_FUNC_CNTL__SPLL_RESET__SHIFT, 0, 0x1 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_PWRON_MASK, CG_SPLL_FUNC_CNTL__SPLL_PWRON__SHIFT, 0, 0x0 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0500170 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x2000000, 0x19, 0, 0x1 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x8000000, 0x1b, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmMPLL_CNTL_MODE, MPLL_CNTL_MODE__GLOBAL_MPLL_RESET_MASK, MPLL_CNTL_MODE__GLOBAL_MPLL_RESET__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmMPLL_CONTROL, 0, 0, 0, 0x00000006 },
+ { CMD_WRITE, mmMC_IO_RXCNTL_DPHY0_D0, 0, 0, 0, 0x00007740 },
+ { CMD_WRITE, mmMC_IO_RXCNTL_DPHY0_D1, 0, 0, 0, 0x00007740 },
+ { CMD_WRITE, mmMC_IO_RXCNTL_DPHY1_D0, 0, 0, 0, 0x00007740 },
+ { CMD_WRITE, mmMC_IO_RXCNTL_DPHY1_D1, 0, 0, 0, 0x00007740 },
+ { CMD_READMODIFYWRITE, mmMCLK_PWRMGT_CNTL, MCLK_PWRMGT_CNTL__MRDCK0_PDNB_MASK, MCLK_PWRMGT_CNTL__MRDCK0_PDNB__SHIFT, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmMCLK_PWRMGT_CNTL, MCLK_PWRMGT_CNTL__MRDCK1_PDNB_MASK, MCLK_PWRMGT_CNTL__MRDCK1_PDNB__SHIFT, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmMC_SEQ_CNTL_2, MC_SEQ_CNTL_2__DRST_PU_MASK, MC_SEQ_CNTL_2__DRST_PU__SHIFT, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmMC_SEQ_CNTL_2, MC_SEQ_CNTL_2__DRST_PD_MASK, MC_SEQ_CNTL_2__DRST_PD__SHIFT, 0, 0x0 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_CLKPIN_CNTL_2 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN_MASK, CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN__SHIFT, 0, 0x0 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMPLL_BYPASSCLK_SEL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT, 0, 0x4 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMISC_CLK_CTRL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL_MASK, MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL__SHIFT, 0, 0x2 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MISC_CLK_CTRL__ZCLK_SEL_MASK, MISC_CLK_CTRL__ZCLK_SEL__SHIFT, 0, 0x2 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MISC_CLK_CTRL__DFT_SMS_PG_CLK_SEL_MASK, MISC_CLK_CTRL__DFT_SMS_PG_CLK_SEL__SHIFT, 0, 0x2 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixTHM_CLK_CNTL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, THM_CLK_CNTL__CMON_CLK_SEL_MASK, THM_CLK_CNTL__CMON_CLK_SEL__SHIFT, 0, 0x2 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, THM_CLK_CNTL__TMON_CLK_SEL_MASK, THM_CLK_CNTL__TMON_CLK_SEL__SHIFT, 0, 0x2 }
+};
+
+static const struct baco_cmd_entry enter_baco_tbl[] =
+{
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_EN_MASK, BACO_CNTL__BACO_EN__SHIFT, 0, 0x01 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, BACO_CNTL__BACO_BCLK_OFF__SHIFT, 0, 0x01 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, 0, 5, 0x02 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, BACO_CNTL__BACO_ISO_DIS__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, 0, 5, 0x00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, BACO_CNTL__BACO_ANA_ISO_DIS__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, 0, 5, 0x00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 0x01 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, 0, 5, 0x08 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_MODE_MASK, 0, 0xffffffff, 0x40 }
+};
+
+#define BACO_CNTL__PWRGOOD_MASK BACO_CNTL__PWRGOOD_GPIO_MASK+BACO_CNTL__PWRGOOD_MEM_MASK+BACO_CNTL__PWRGOOD_DVO_MASK
+
+static const struct baco_cmd_entry exit_baco_tbl[] =
+{
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_RESET_EN_MASK, BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0x01 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, BACO_CNTL__BACO_BCLK_OFF__SHIFT, 0, 0x00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 0x00 },
+ { CMD_DELAY_MS, 0, 0, 0, 20, 0 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_BF_MASK, 0, 0xffffffff, 0x20 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, BACO_CNTL__BACO_ISO_DIS__SHIFT, 0, 0x01 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_MASK, 0, 5, 0x1c },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, BACO_CNTL__BACO_ANA_ISO_DIS__SHIFT, 0, 0x01 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_RESET_EN_MASK, BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__RCU_BIF_CONFIG_DONE_MASK, 0, 5, 0x10 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_EN_MASK, BACO_CNTL__BACO_EN__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_MODE_MASK, 0, 0xffffffff, 0x00 }
+};
+
+static const struct baco_cmd_entry clean_baco_tbl[] =
+{
+ { CMD_WRITE, mmBIOS_SCRATCH_6, 0, 0, 0, 0 },
+ { CMD_WRITE, mmCP_PFP_UCODE_ADDR, 0, 0, 0, 0 }
+};
+
+int ci_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state)
+{
+ enum BACO_STATE cur_state;
+
+ smu7_baco_get_state(hwmgr, &cur_state);
+
+ if (cur_state == state)
+ /* aisc already in the target state */
+ return 0;
+
+ if (state == BACO_STATE_IN) {
+ baco_program_registers(hwmgr, gpio_tbl, ARRAY_SIZE(gpio_tbl));
+ baco_program_registers(hwmgr, enable_fb_req_rej_tbl,
+ ARRAY_SIZE(enable_fb_req_rej_tbl));
+ baco_program_registers(hwmgr, use_bclk_tbl, ARRAY_SIZE(use_bclk_tbl));
+ baco_program_registers(hwmgr, turn_off_plls_tbl,
+ ARRAY_SIZE(turn_off_plls_tbl));
+ if (baco_program_registers(hwmgr, enter_baco_tbl,
+ ARRAY_SIZE(enter_baco_tbl)))
+ return 0;
+
+ } else if (state == BACO_STATE_OUT) {
+ /* HW requires at least 20ms between regulator off and on */
+ msleep(20);
+ /* Execute Hardware BACO exit sequence */
+ if (baco_program_registers(hwmgr, exit_baco_tbl,
+ ARRAY_SIZE(exit_baco_tbl))) {
+ if (baco_program_registers(hwmgr, clean_baco_tbl,
+ ARRAY_SIZE(clean_baco_tbl)))
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ci_baco.h b/drivers/gpu/drm/amd/powerplay/hwmgr/ci_baco.h
new file mode 100644
index 000000000000..17041f187020
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ci_baco.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __CI_BACO_H__
+#define __CI_BACO_H__
+#include "smu7_baco.h"
+
+extern int ci_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state);
+
+#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/common_baco.c b/drivers/gpu/drm/amd/powerplay/hwmgr/common_baco.c
index 9c57c1f67749..1c73776bd606 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/common_baco.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/common_baco.c
@@ -79,6 +79,25 @@ static bool baco_cmd_handler(struct pp_hwmgr *hwmgr, u32 command, u32 reg, u32 m
return ret;
}
+bool baco_program_registers(struct pp_hwmgr *hwmgr,
+ const struct baco_cmd_entry *entry,
+ const u32 array_size)
+{
+ u32 i, reg = 0;
+
+ for (i = 0; i < array_size; i++) {
+ if ((entry[i].cmd == CMD_WRITE) ||
+ (entry[i].cmd == CMD_READMODIFYWRITE) ||
+ (entry[i].cmd == CMD_WAITFOR))
+ reg = entry[i].reg_offset;
+ if (!baco_cmd_handler(hwmgr, entry[i].cmd, reg, entry[i].mask,
+ entry[i].shift, entry[i].val, entry[i].timeout))
+ return false;
+ }
+
+ return true;
+}
+
bool soc15_baco_program_registers(struct pp_hwmgr *hwmgr,
const struct soc15_baco_cmd_entry *entry,
const u32 array_size)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/common_baco.h b/drivers/gpu/drm/amd/powerplay/hwmgr/common_baco.h
index 95296c916f4e..8393eb62706d 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/common_baco.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/common_baco.h
@@ -33,6 +33,15 @@ enum baco_cmd_type {
CMD_DELAY_US,
};
+struct baco_cmd_entry {
+ enum baco_cmd_type cmd;
+ uint32_t reg_offset;
+ uint32_t mask;
+ uint32_t shift;
+ uint32_t timeout;
+ uint32_t val;
+};
+
struct soc15_baco_cmd_entry {
enum baco_cmd_type cmd;
uint32_t hwip;
@@ -44,6 +53,10 @@ struct soc15_baco_cmd_entry {
uint32_t timeout;
uint32_t val;
};
+
+extern bool baco_program_registers(struct pp_hwmgr *hwmgr,
+ const struct baco_cmd_entry *entry,
+ const u32 array_size);
extern bool soc15_baco_program_registers(struct pp_hwmgr *hwmgr,
const struct soc15_baco_cmd_entry *entry,
const u32 array_size);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_baco.c b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_baco.c
new file mode 100644
index 000000000000..c0368f2dfb21
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_baco.c
@@ -0,0 +1,196 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "fiji_baco.h"
+
+#include "gmc/gmc_8_1_d.h"
+#include "gmc/gmc_8_1_sh_mask.h"
+
+#include "bif/bif_5_0_d.h"
+#include "bif/bif_5_0_sh_mask.h"
+
+#include "dce/dce_10_0_d.h"
+#include "dce/dce_10_0_sh_mask.h"
+
+#include "smu/smu_7_1_3_d.h"
+#include "smu/smu_7_1_3_sh_mask.h"
+
+
+static const struct baco_cmd_entry gpio_tbl[] =
+{
+ { CMD_WRITE, mmGPIOPAD_EN, 0, 0, 0, 0x0 },
+ { CMD_WRITE, mmGPIOPAD_PD_EN, 0, 0, 0, 0x0 },
+ { CMD_WRITE, mmGPIOPAD_PU_EN, 0, 0, 0, 0x0 },
+ { CMD_WRITE, mmGPIOPAD_MASK, 0, 0, 0, 0xff77ffff },
+ { CMD_WRITE, mmDC_GPIO_DVODATA_EN, 0, 0, 0, 0x0 },
+ { CMD_WRITE, mmDC_GPIO_DVODATA_MASK, 0, 0, 0, 0xffffffff },
+ { CMD_WRITE, mmDC_GPIO_GENERIC_EN, 0, 0, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmDC_GPIO_GENERIC_MASK, 0, 0, 0, 0x03333333 },
+ { CMD_WRITE, mmDC_GPIO_SYNCA_EN, 0, 0, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmDC_GPIO_SYNCA_MASK, 0, 0, 0, 0x00001111 }
+};
+
+static const struct baco_cmd_entry enable_fb_req_rej_tbl[] =
+{
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0300024 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x1, 0x0, 0, 0x1 },
+ { CMD_WRITE, mmBIF_FB_EN, 0, 0, 0, 0x0 }
+};
+
+static const struct baco_cmd_entry use_bclk_tbl[] =
+{
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN_MASK, CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL_2 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_STATUS },
+ { CMD_WAITFOR, mmGCK_SMC_IND_DATA, CG_SPLL_STATUS__SPLL_CHG_STATUS_MASK, 0, 0xffffffff, 0x2 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL_2 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG__SHIFT, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_STATUS },
+ { CMD_WAITFOR, mmGCK_SMC_IND_DATA, CG_SPLL_STATUS__SPLL_CHG_STATUS_MASK, 0, 0xffffffff, 0x2 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL_2 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG__SHIFT, 0, 0x0 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0500170 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x4000000, 0x1a, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMPLL_BYPASSCLK_SEL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT, 0, 0x2 }
+};
+
+static const struct baco_cmd_entry turn_off_plls_tbl[] =
+{
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_RESET_MASK, CG_SPLL_FUNC_CNTL__SPLL_RESET__SHIFT, 0, 0x1 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_PWRON_MASK, CG_SPLL_FUNC_CNTL__SPLL_PWRON__SHIFT, 0, 0x0 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0500170 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x2000000, 0x19, 0, 0x1 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x8000000, 0x1b, 0, 0x0 }
+};
+
+static const struct baco_cmd_entry clk_req_b_tbl[] =
+{
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_CLKPIN_CNTL_2 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN_MASK, CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN__SHIFT, 0, 0x0 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMPLL_BYPASSCLK_SEL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT, 0, 0x4 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMISC_CLK_CTRL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL_MASK, MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL__SHIFT, 0, 0x1 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MISC_CLK_CTRL__ZCLK_SEL_MASK, MISC_CLK_CTRL__ZCLK_SEL__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_CLKPIN_CNTL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_CLKPIN_CNTL__BCLK_AS_XCLK_MASK, CG_CLKPIN_CNTL__BCLK_AS_XCLK__SHIFT, 0, 0x0 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixTHM_CLK_CNTL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, THM_CLK_CNTL__CMON_CLK_SEL_MASK, THM_CLK_CNTL__CMON_CLK_SEL__SHIFT, 0, 0x1 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, THM_CLK_CNTL__TMON_CLK_SEL_MASK, THM_CLK_CNTL__TMON_CLK_SEL__SHIFT, 0, 0x1 }
+};
+
+static const struct baco_cmd_entry enter_baco_tbl[] =
+{
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_EN_MASK, BACO_CNTL__BACO_EN__SHIFT, 0, 0x01 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BIF_SCLK_SWITCH_MASK, BACO_CNTL__BACO_BIF_SCLK_SWITCH__SHIFT, 0, 0x01 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_BIF_SCLK_SWITCH_MASK, 0, 5, 0x40000 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, BACO_CNTL__BACO_BCLK_OFF__SHIFT, 0, 0x01 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, 0, 5, 0x02 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, BACO_CNTL__BACO_ISO_DIS__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, 0, 5, 0x00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, BACO_CNTL__BACO_ANA_ISO_DIS__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, 0, 5, 0x00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 0x01 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, 0, 5, 0x08 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_MODE_MASK, 0, 0xffffffff, 0x40 }
+};
+
+#define BACO_CNTL__PWRGOOD_MASK BACO_CNTL__PWRGOOD_GPIO_MASK+BACO_CNTL__PWRGOOD_MEM_MASK+BACO_CNTL__PWRGOOD_DVO_MASK
+
+static const struct baco_cmd_entry exit_baco_tbl[] =
+{
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_RESET_EN_MASK, BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0x01 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, BACO_CNTL__BACO_BCLK_OFF__SHIFT, 0, 0x00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_BF_MASK, 0, 0xffffffff, 0x200 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, BACO_CNTL__BACO_ISO_DIS__SHIFT, 0, 0x01 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_MASK, 0, 5, 0x1c00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, BACO_CNTL__BACO_ANA_ISO_DIS__SHIFT, 0, 0x01 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BIF_SCLK_SWITCH_MASK, BACO_CNTL__BACO_BIF_SCLK_SWITCH__SHIFT, 0, 0x00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_RESET_EN_MASK, BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__RCU_BIF_CONFIG_DONE_MASK, 0, 5, 0x100 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_EN_MASK, BACO_CNTL__BACO_EN__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_MODE_MASK, 0, 0xffffffff, 0x00 }
+};
+
+static const struct baco_cmd_entry clean_baco_tbl[] =
+{
+ { CMD_WRITE, mmBIOS_SCRATCH_0, 0, 0, 0, 0 },
+ { CMD_WRITE, mmBIOS_SCRATCH_1, 0, 0, 0, 0 },
+ { CMD_WRITE, mmBIOS_SCRATCH_2, 0, 0, 0, 0 },
+ { CMD_WRITE, mmBIOS_SCRATCH_3, 0, 0, 0, 0 },
+ { CMD_WRITE, mmBIOS_SCRATCH_4, 0, 0, 0, 0 },
+ { CMD_WRITE, mmBIOS_SCRATCH_5, 0, 0, 0, 0 },
+ { CMD_WRITE, mmBIOS_SCRATCH_6, 0, 0, 0, 0 },
+ { CMD_WRITE, mmBIOS_SCRATCH_7, 0, 0, 0, 0 },
+ { CMD_WRITE, mmBIOS_SCRATCH_8, 0, 0, 0, 0 },
+ { CMD_WRITE, mmBIOS_SCRATCH_9, 0, 0, 0, 0 },
+ { CMD_WRITE, mmBIOS_SCRATCH_10, 0, 0, 0, 0 },
+ { CMD_WRITE, mmBIOS_SCRATCH_11, 0, 0, 0, 0 },
+ { CMD_WRITE, mmBIOS_SCRATCH_12, 0, 0, 0, 0 },
+ { CMD_WRITE, mmBIOS_SCRATCH_13, 0, 0, 0, 0 },
+ { CMD_WRITE, mmBIOS_SCRATCH_14, 0, 0, 0, 0 },
+ { CMD_WRITE, mmBIOS_SCRATCH_15, 0, 0, 0, 0 }
+};
+
+int fiji_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state)
+{
+ enum BACO_STATE cur_state;
+
+ smu7_baco_get_state(hwmgr, &cur_state);
+
+ if (cur_state == state)
+ /* aisc already in the target state */
+ return 0;
+
+ if (state == BACO_STATE_IN) {
+ baco_program_registers(hwmgr, gpio_tbl, ARRAY_SIZE(gpio_tbl));
+ baco_program_registers(hwmgr, enable_fb_req_rej_tbl,
+ ARRAY_SIZE(enable_fb_req_rej_tbl));
+ baco_program_registers(hwmgr, use_bclk_tbl, ARRAY_SIZE(use_bclk_tbl));
+ baco_program_registers(hwmgr, turn_off_plls_tbl,
+ ARRAY_SIZE(turn_off_plls_tbl));
+ baco_program_registers(hwmgr, clk_req_b_tbl, ARRAY_SIZE(clk_req_b_tbl));
+ if (baco_program_registers(hwmgr, enter_baco_tbl,
+ ARRAY_SIZE(enter_baco_tbl)))
+ return 0;
+
+ } else if (state == BACO_STATE_OUT) {
+ /* HW requires at least 20ms between regulator off and on */
+ msleep(20);
+ /* Execute Hardware BACO exit sequence */
+ if (baco_program_registers(hwmgr, exit_baco_tbl,
+ ARRAY_SIZE(exit_baco_tbl))) {
+ if (baco_program_registers(hwmgr, clean_baco_tbl,
+ ARRAY_SIZE(clean_baco_tbl)))
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_baco.h b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_baco.h
new file mode 100644
index 000000000000..47f402900bdb
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_baco.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __FIJI_BACO_H__
+#define __FIJI_BACO_H__
+#include "smu7_baco.h"
+
+extern int fiji_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state);
+
+#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
index a24beaa4fb01..d2909c91d65b 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
@@ -81,6 +81,8 @@ static void hwmgr_init_workload_prority(struct pp_hwmgr *hwmgr)
int hwmgr_early_init(struct pp_hwmgr *hwmgr)
{
+ struct amdgpu_device *adev;
+
if (!hwmgr)
return -EINVAL;
@@ -94,8 +96,11 @@ int hwmgr_early_init(struct pp_hwmgr *hwmgr)
hwmgr_init_workload_prority(hwmgr);
hwmgr->gfxoff_state_changed_by_workload = false;
+ adev = hwmgr->adev;
+
switch (hwmgr->chip_family) {
case AMDGPU_FAMILY_CI:
+ adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
hwmgr->smumgr_funcs = &ci_smu_funcs;
ci_set_asic_special_caps(hwmgr);
hwmgr->feature_mask &= ~(PP_VBI_TIME_SUPPORT_MASK |
@@ -106,12 +111,14 @@ int hwmgr_early_init(struct pp_hwmgr *hwmgr)
smu7_init_function_pointers(hwmgr);
break;
case AMDGPU_FAMILY_CZ:
+ adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
hwmgr->od_enabled = false;
hwmgr->smumgr_funcs = &smu8_smu_funcs;
hwmgr->feature_mask &= ~PP_GFXOFF_MASK;
smu8_init_function_pointers(hwmgr);
break;
case AMDGPU_FAMILY_VI:
+ adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
hwmgr->feature_mask &= ~PP_GFXOFF_MASK;
switch (hwmgr->chip_id) {
case CHIP_TOPAZ:
@@ -153,6 +160,7 @@ int hwmgr_early_init(struct pp_hwmgr *hwmgr)
case AMDGPU_FAMILY_AI:
switch (hwmgr->chip_id) {
case CHIP_VEGA10:
+ adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
hwmgr->feature_mask &= ~PP_GFXOFF_MASK;
hwmgr->smumgr_funcs = &vega10_smu_funcs;
vega10_hwmgr_init(hwmgr);
@@ -162,6 +170,7 @@ int hwmgr_early_init(struct pp_hwmgr *hwmgr)
vega12_hwmgr_init(hwmgr);
break;
case CHIP_VEGA20:
+ adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
hwmgr->feature_mask &= ~PP_GFXOFF_MASK;
hwmgr->smumgr_funcs = &vega20_smu_funcs;
vega20_hwmgr_init(hwmgr);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris_baco.c b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris_baco.c
new file mode 100644
index 000000000000..8f8e296f2fe9
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris_baco.c
@@ -0,0 +1,222 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "polaris_baco.h"
+
+#include "gmc/gmc_8_1_d.h"
+#include "gmc/gmc_8_1_sh_mask.h"
+
+#include "bif/bif_5_0_d.h"
+#include "bif/bif_5_0_sh_mask.h"
+
+#include "dce/dce_11_0_d.h"
+#include "dce/dce_11_0_sh_mask.h"
+
+#include "smu/smu_7_1_3_d.h"
+#include "smu/smu_7_1_3_sh_mask.h"
+
+static const struct baco_cmd_entry gpio_tbl[] =
+{
+ { CMD_WRITE, mmGPIOPAD_EN, 0, 0, 0, 0x0 },
+ { CMD_WRITE, mmGPIOPAD_PD_EN, 0, 0, 0, 0x0 },
+ { CMD_WRITE, mmGPIOPAD_PU_EN, 0, 0, 0, 0x0 },
+ { CMD_WRITE, mmGPIOPAD_MASK, 0, 0, 0, 0xff77ffff },
+ { CMD_WRITE, mmDC_GPIO_DVODATA_EN, 0, 0, 0, 0x0 },
+ { CMD_WRITE, mmDC_GPIO_DVODATA_MASK, 0, 0, 0, 0xffffffff },
+ { CMD_WRITE, mmDC_GPIO_GENERIC_EN, 0, 0, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmDC_GPIO_GENERIC_MASK, 0, 0, 0, 0x03333333 },
+ { CMD_WRITE, mmDC_GPIO_SYNCA_EN, 0, 0, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmDC_GPIO_SYNCA_MASK, 0, 0, 0, 0x00001111 }
+};
+
+static const struct baco_cmd_entry enable_fb_req_rej_tbl[] =
+{
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0300024 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x1, 0x0, 0, 0x1 },
+ { CMD_WRITE, mmBIF_FB_EN, 0, 0, 0, 0x0 }
+};
+
+static const struct baco_cmd_entry use_bclk_tbl[] =
+{
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN_MASK, CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0500170 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x4000000, 0x1a, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixGCK_DFS_BYPASS_CNTL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, GCK_DFS_BYPASS_CNTL__BYPASSACLK_MASK, GCK_DFS_BYPASS_CNTL__BYPASSACLK__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMPLL_BYPASSCLK_SEL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT, 0, 0x2 },
+ { CMD_READMODIFYWRITE, mmMPLL_CNTL_MODE, MPLL_CNTL_MODE__MPLL_SW_DIR_CONTROL_MASK, MPLL_CNTL_MODE__MPLL_SW_DIR_CONTROL__SHIFT, 0, 0x1 },
+ { CMD_READMODIFYWRITE, mmMPLL_CNTL_MODE, MPLL_CNTL_MODE__MPLL_MCLK_SEL_MASK, MPLL_CNTL_MODE__MPLL_MCLK_SEL__SHIFT, 0, 0x0 }
+};
+
+static const struct baco_cmd_entry turn_off_plls_tbl[] =
+{
+ { CMD_READMODIFYWRITE, mmDC_GPIO_PAD_STRENGTH_1, DC_GPIO_PAD_STRENGTH_1__GENLK_STRENGTH_SP_MASK, DC_GPIO_PAD_STRENGTH_1__GENLK_STRENGTH_SP__SHIFT, 0, 0x1 },
+ { CMD_DELAY_US, 0, 0, 0, 1, 0x0 },
+ { CMD_READMODIFYWRITE, mmMC_SEQ_DRAM, MC_SEQ_DRAM__RST_CTL_MASK, MC_SEQ_DRAM__RST_CTL__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC05002B0 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x10, 0x4, 0, 0x1 },
+ { CMD_WAITFOR, mmGCK_SMC_IND_DATA, 0x10, 0, 1, 0 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC050032C },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x10, 0x4, 0, 0x1 },
+ { CMD_WAITFOR, mmGCK_SMC_IND_DATA, 0x10, 0, 1, 0 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0500080 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x1, 0x0, 0, 0x1 },
+ { CMD_READMODIFYWRITE, 0xda2, 0x40, 0x6, 0, 0x0 },
+ { CMD_DELAY_US, 0, 0, 0, 3, 0x0 },
+ { CMD_READMODIFYWRITE, 0xda2, 0x8, 0x3, 0, 0x0 },
+ { CMD_READMODIFYWRITE, 0xda2, 0x3fff00, 0x8, 0, 0x32 },
+ { CMD_DELAY_US, 0, 0, 0, 3, 0x0 },
+ { CMD_READMODIFYWRITE, mmMPLL_FUNC_CNTL_2, MPLL_FUNC_CNTL_2__ISO_DIS_P_MASK, MPLL_FUNC_CNTL_2__ISO_DIS_P__SHIFT, 0, 0x0 },
+ { CMD_DELAY_US, 0, 0, 0, 5, 0x0 }
+};
+
+static const struct baco_cmd_entry clk_req_b_tbl[] =
+{
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixTHM_CLK_CNTL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, THM_CLK_CNTL__CMON_CLK_SEL_MASK, THM_CLK_CNTL__CMON_CLK_SEL__SHIFT, 0, 0x1 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, THM_CLK_CNTL__TMON_CLK_SEL_MASK, THM_CLK_CNTL__TMON_CLK_SEL__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMISC_CLK_CTRL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL_MASK, MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL__SHIFT, 0, 0x1 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MISC_CLK_CTRL__ZCLK_SEL_MASK, MISC_CLK_CTRL__ZCLK_SEL__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_CLKPIN_CNTL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_CLKPIN_CNTL__BCLK_AS_XCLK_MASK, CG_CLKPIN_CNTL__BCLK_AS_XCLK__SHIFT, 0, 0x0 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_CLKPIN_CNTL_2 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN_MASK, CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN__SHIFT, 0, 0x0 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMPLL_BYPASSCLK_SEL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT, 0, 0x4 }
+};
+
+static const struct baco_cmd_entry enter_baco_tbl[] =
+{
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_EN_MASK, BACO_CNTL__BACO_EN__SHIFT, 0, 0x01 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BIF_SCLK_SWITCH_MASK, BACO_CNTL__BACO_BIF_SCLK_SWITCH__SHIFT, 0, 0x01 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_BIF_SCLK_SWITCH_MASK, 0, 5, 0x40000 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, BACO_CNTL__BACO_BCLK_OFF__SHIFT, 0, 0x01 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, 0, 5, 0x02 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, BACO_CNTL__BACO_ISO_DIS__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, 0, 5, 0x00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, BACO_CNTL__BACO_ANA_ISO_DIS__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, 0, 5, 0x00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 0x01 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, 0, 5, 0x08 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_MODE_MASK, 0, 0xffffffff, 0x40 }
+};
+
+#define BACO_CNTL__PWRGOOD_MASK BACO_CNTL__PWRGOOD_GPIO_MASK+BACO_CNTL__PWRGOOD_MEM_MASK+BACO_CNTL__PWRGOOD_DVO_MASK
+
+static const struct baco_cmd_entry exit_baco_tbl[] =
+{
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_RESET_EN_MASK, BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0x01 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, BACO_CNTL__BACO_BCLK_OFF__SHIFT, 0, 0x00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_BF_MASK, 0, 0xffffffff, 0x200 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, BACO_CNTL__BACO_ISO_DIS__SHIFT, 0, 0x01 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_MASK, 0, 5, 0x1c00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, BACO_CNTL__BACO_ANA_ISO_DIS__SHIFT, 0, 0x01 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BIF_SCLK_SWITCH_MASK, BACO_CNTL__BACO_BIF_SCLK_SWITCH__SHIFT, 0, 0x00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_RESET_EN_MASK, BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__RCU_BIF_CONFIG_DONE_MASK, 0, 5, 0x100 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_EN_MASK, BACO_CNTL__BACO_EN__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_MODE_MASK, 0, 0xffffffff, 0x00 }
+};
+
+static const struct baco_cmd_entry clean_baco_tbl[] =
+{
+ { CMD_WRITE, mmBIOS_SCRATCH_6, 0, 0, 0, 0 },
+ { CMD_WRITE, mmBIOS_SCRATCH_7, 0, 0, 0, 0 }
+};
+
+static const struct baco_cmd_entry use_bclk_tbl_vg[] =
+{
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN_MASK, CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0500170 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x4000000, 0x1a, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixGCK_DFS_BYPASS_CNTL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, GCK_DFS_BYPASS_CNTL__BYPASSACLK_MASK, GCK_DFS_BYPASS_CNTL__BYPASSACLK__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMPLL_BYPASSCLK_SEL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT, 0, 0x2 }
+};
+
+static const struct baco_cmd_entry turn_off_plls_tbl_vg[] =
+{
+ { CMD_READMODIFYWRITE, mmDC_GPIO_PAD_STRENGTH_1, DC_GPIO_PAD_STRENGTH_1__GENLK_STRENGTH_SP_MASK, DC_GPIO_PAD_STRENGTH_1__GENLK_STRENGTH_SP__SHIFT, 0, 0x1 },
+ { CMD_DELAY_US, 0, 0, 0, 1, 0x0 },
+ { CMD_READMODIFYWRITE, mmMC_SEQ_DRAM, MC_SEQ_DRAM__RST_CTL_MASK, MC_SEQ_DRAM__RST_CTL__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC05002B0 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x10, 0x4, 0, 0x1 },
+ { CMD_WAITFOR, mmGCK_SMC_IND_DATA, 0x10, 0, 1, 0 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC050032C },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x10, 0x4, 0, 0x1 },
+ { CMD_WAITFOR, mmGCK_SMC_IND_DATA, 0x10, 0, 1, 0 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0500080 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x1, 0x0, 0, 0x1 },
+ { CMD_DELAY_US, 0, 0, 0, 3, 0x0 },
+ { CMD_DELAY_US, 0, 0, 0, 3, 0x0 },
+ { CMD_DELAY_US, 0, 0, 0, 5, 0x0 }
+};
+
+int polaris_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state)
+{
+ enum BACO_STATE cur_state;
+
+ smu7_baco_get_state(hwmgr, &cur_state);
+
+ if (cur_state == state)
+ /* aisc already in the target state */
+ return 0;
+
+ if (state == BACO_STATE_IN) {
+ baco_program_registers(hwmgr, gpio_tbl, ARRAY_SIZE(gpio_tbl));
+ baco_program_registers(hwmgr, enable_fb_req_rej_tbl,
+ ARRAY_SIZE(enable_fb_req_rej_tbl));
+ if (hwmgr->chip_id == CHIP_VEGAM) {
+ baco_program_registers(hwmgr, use_bclk_tbl_vg, ARRAY_SIZE(use_bclk_tbl_vg));
+ baco_program_registers(hwmgr, turn_off_plls_tbl_vg,
+ ARRAY_SIZE(turn_off_plls_tbl_vg));
+ } else {
+ baco_program_registers(hwmgr, use_bclk_tbl, ARRAY_SIZE(use_bclk_tbl));
+ baco_program_registers(hwmgr, turn_off_plls_tbl,
+ ARRAY_SIZE(turn_off_plls_tbl));
+ }
+ baco_program_registers(hwmgr, clk_req_b_tbl, ARRAY_SIZE(clk_req_b_tbl));
+ if (baco_program_registers(hwmgr, enter_baco_tbl,
+ ARRAY_SIZE(enter_baco_tbl)))
+ return 0;
+
+ } else if (state == BACO_STATE_OUT) {
+ /* HW requires at least 20ms between regulator off and on */
+ msleep(20);
+ /* Execute Hardware BACO exit sequence */
+ if (baco_program_registers(hwmgr, exit_baco_tbl,
+ ARRAY_SIZE(exit_baco_tbl))) {
+ if (baco_program_registers(hwmgr, clean_baco_tbl,
+ ARRAY_SIZE(clean_baco_tbl)))
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris_baco.h b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris_baco.h
new file mode 100644
index 000000000000..87a5fa0a157a
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris_baco.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __POLARIS_BACO_H__
+#define __POLARIS_BACO_H__
+#include "smu7_baco.h"
+
+extern int polaris_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state);
+
+#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_baco.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_baco.c
new file mode 100644
index 000000000000..044cda005aed
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_baco.c
@@ -0,0 +1,91 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "smu7_baco.h"
+#include "tonga_baco.h"
+#include "fiji_baco.h"
+#include "polaris_baco.h"
+#include "ci_baco.h"
+
+#include "bif/bif_5_0_d.h"
+#include "bif/bif_5_0_sh_mask.h"
+
+#include "smu/smu_7_1_2_d.h"
+#include "smu/smu_7_1_2_sh_mask.h"
+
+int smu7_baco_get_capability(struct pp_hwmgr *hwmgr, bool *cap)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
+ uint32_t reg;
+
+ *cap = false;
+ if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_BACO))
+ return 0;
+
+ reg = RREG32(mmCC_BIF_BX_FUSESTRAP0);
+
+ if (reg & CC_BIF_BX_FUSESTRAP0__STRAP_BIF_PX_CAPABLE_MASK)
+ *cap = true;
+
+ return 0;
+}
+
+int smu7_baco_get_state(struct pp_hwmgr *hwmgr, enum BACO_STATE *state)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
+ uint32_t reg;
+
+ reg = RREG32(mmBACO_CNTL);
+
+ if (reg & BACO_CNTL__BACO_MODE_MASK)
+ /* gfx has already entered BACO state */
+ *state = BACO_STATE_IN;
+ else
+ *state = BACO_STATE_OUT;
+ return 0;
+}
+
+int smu7_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
+
+ switch (adev->asic_type) {
+ case CHIP_TOPAZ:
+ case CHIP_TONGA:
+ return tonga_baco_set_state(hwmgr, state);
+ case CHIP_FIJI:
+ return fiji_baco_set_state(hwmgr, state);
+ case CHIP_POLARIS10:
+ case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
+ case CHIP_VEGAM:
+ return polaris_baco_set_state(hwmgr, state);
+#ifdef CONFIG_DRM_AMDGPU_CIK
+ case CHIP_BONAIRE:
+ case CHIP_HAWAII:
+ return ci_baco_set_state(hwmgr, state);
+#endif
+ default:
+ return -EINVAL;
+ }
+}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_baco.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_baco.h
new file mode 100644
index 000000000000..be0d98abb536
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_baco.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __SMU7_BACO_H__
+#define __SMU7_BACO_H__
+#include "hwmgr.h"
+#include "common_baco.h"
+
+extern int smu7_baco_get_capability(struct pp_hwmgr *hwmgr, bool *cap);
+extern int smu7_baco_get_state(struct pp_hwmgr *hwmgr, enum BACO_STATE *state);
+extern int smu7_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state);
+
+#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index 203ce4b1028f..f73dff68e799 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -48,6 +48,7 @@
#include "smu7_clockpowergating.h"
#include "processpptables.h"
#include "pp_thermal.h"
+#include "smu7_baco.h"
#include "ivsrcid/ivsrcid_vislands30.h"
@@ -1994,7 +1995,6 @@ static int smu7_sort_lookup_table(struct pp_hwmgr *hwmgr,
struct phm_ppt_v1_voltage_lookup_table *lookup_table)
{
uint32_t table_size, i, j;
- struct phm_ppt_v1_voltage_lookup_record tmp_voltage_lookup_record;
table_size = lookup_table->count;
PP_ASSERT_WITH_CODE(0 != lookup_table->count,
@@ -2005,9 +2005,8 @@ static int smu7_sort_lookup_table(struct pp_hwmgr *hwmgr,
for (j = i + 1; j > 0; j--) {
if (lookup_table->entries[j].us_vdd <
lookup_table->entries[j - 1].us_vdd) {
- tmp_voltage_lookup_record = lookup_table->entries[j - 1];
- lookup_table->entries[j - 1] = lookup_table->entries[j];
- lookup_table->entries[j] = tmp_voltage_lookup_record;
+ swap(lookup_table->entries[j - 1],
+ lookup_table->entries[j]);
}
}
}
@@ -3983,6 +3982,13 @@ static int smu7_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
"Failed to populate and upload SCLK MCLK DPM levels!",
result = tmp_result);
+ /*
+ * If a custom pp table is loaded, set DPMTABLE_OD_UPDATE_VDDC flag.
+ * That effectively disables AVFS feature.
+ */
+ if (hwmgr->hardcode_pp_table != NULL)
+ data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC;
+
tmp_result = smu7_update_avfs(hwmgr);
PP_ASSERT_WITH_CODE((0 == tmp_result),
"Failed to update avfs voltages!",
@@ -5158,6 +5164,9 @@ static const struct pp_hwmgr_func smu7_hwmgr_funcs = {
.get_power_profile_mode = smu7_get_power_profile_mode,
.set_power_profile_mode = smu7_set_power_profile_mode,
.get_performance_level = smu7_get_performance_level,
+ .get_asic_baco_capability = smu7_baco_get_capability,
+ .get_asic_baco_state = smu7_baco_get_state,
+ .set_asic_baco_state = smu7_baco_set_state,
.power_off_asic = smu7_power_off_asic,
};
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_baco.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_baco.c
new file mode 100644
index 000000000000..ea743bea8e29
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_baco.c
@@ -0,0 +1,231 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "tonga_baco.h"
+
+#include "gmc/gmc_8_1_d.h"
+#include "gmc/gmc_8_1_sh_mask.h"
+
+#include "bif/bif_5_0_d.h"
+#include "bif/bif_5_0_sh_mask.h"
+
+#include "dce/dce_10_0_d.h"
+#include "dce/dce_10_0_sh_mask.h"
+
+#include "smu/smu_7_1_2_d.h"
+#include "smu/smu_7_1_2_sh_mask.h"
+
+
+static const struct baco_cmd_entry gpio_tbl[] =
+{
+ { CMD_WRITE, mmGPIOPAD_EN, 0, 0, 0, 0x0 },
+ { CMD_WRITE, mmGPIOPAD_PD_EN, 0, 0, 0, 0x0 },
+ { CMD_WRITE, mmGPIOPAD_PU_EN, 0, 0, 0, 0x0 },
+ { CMD_WRITE, mmGPIOPAD_MASK, 0, 0, 0, 0xff77ffff },
+ { CMD_WRITE, mmDC_GPIO_DVODATA_EN, 0, 0, 0, 0x0 },
+ { CMD_WRITE, mmDC_GPIO_DVODATA_MASK, 0, 0, 0, 0xffffffff },
+ { CMD_WRITE, mmDC_GPIO_GENERIC_EN, 0, 0, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmDC_GPIO_GENERIC_MASK, 0, 0, 0, 0x03333333 },
+ { CMD_WRITE, mmDC_GPIO_SYNCA_EN, 0, 0, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmDC_GPIO_SYNCA_MASK, 0, 0, 0, 0x00001111 }
+};
+
+static const struct baco_cmd_entry enable_fb_req_rej_tbl[] =
+{
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0300024 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x1, 0x0, 0, 0x1 },
+ { CMD_WRITE, mmBIF_FB_EN, 0, 0, 0, 0x0 }
+};
+
+static const struct baco_cmd_entry use_bclk_tbl[] =
+{
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN_MASK, CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL_2 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_STATUS },
+ { CMD_WAITFOR, mmGCK_SMC_IND_DATA, CG_SPLL_STATUS__SPLL_CHG_STATUS_MASK, 0, 0xffffffff, 0x2 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL_2 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG__SHIFT, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_STATUS },
+ { CMD_WAITFOR, mmGCK_SMC_IND_DATA, CG_SPLL_STATUS__SPLL_CHG_STATUS_MASK, 0, 0xffffffff, 0x2 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL_2 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG__SHIFT, 0, 0x0 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0500170 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x4000000, 0x1a, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMPLL_BYPASSCLK_SEL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT, 0, 0x2 },
+ { CMD_READMODIFYWRITE, mmMPLL_CNTL_MODE, MPLL_CNTL_MODE__MPLL_SW_DIR_CONTROL_MASK, MPLL_CNTL_MODE__MPLL_SW_DIR_CONTROL__SHIFT, 0, 0x1 },
+ { CMD_READMODIFYWRITE, mmMPLL_CNTL_MODE, MPLL_CNTL_MODE__MPLL_MCLK_SEL_MASK, MPLL_CNTL_MODE__MPLL_MCLK_SEL__SHIFT, 0, 0x0 }
+};
+
+static const struct baco_cmd_entry turn_off_plls_tbl[] =
+{
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_RESET_MASK, CG_SPLL_FUNC_CNTL__SPLL_RESET__SHIFT, 0, 0x1 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_PWRON_MASK, CG_SPLL_FUNC_CNTL__SPLL_PWRON__SHIFT, 0, 0x0 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0500170 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x2000000, 0x19, 0, 0x1 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x8000000, 0x1b, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmMPLL_CNTL_MODE, MPLL_CNTL_MODE__GLOBAL_MPLL_RESET_MASK, MPLL_CNTL_MODE__GLOBAL_MPLL_RESET__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmMPLL_CONTROL, 0, 0, 0, 0x00000006 },
+ { CMD_WRITE, mmMC_IO_RXCNTL_DPHY0_D0, 0, 0, 0, 0x00007740 },
+ { CMD_WRITE, mmMC_IO_RXCNTL_DPHY0_D1, 0, 0, 0, 0x00007740 },
+ { CMD_WRITE, mmMC_IO_RXCNTL_DPHY1_D0, 0, 0, 0, 0x00007740 },
+ { CMD_WRITE, mmMC_IO_RXCNTL_DPHY1_D1, 0, 0, 0, 0x00007740 },
+ { CMD_READMODIFYWRITE, mmMCLK_PWRMGT_CNTL, MCLK_PWRMGT_CNTL__MRDCK0_PDNB_MASK, MCLK_PWRMGT_CNTL__MRDCK0_PDNB__SHIFT, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmMCLK_PWRMGT_CNTL, MCLK_PWRMGT_CNTL__MRDCK1_PDNB_MASK, MCLK_PWRMGT_CNTL__MRDCK1_PDNB__SHIFT, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmMC_SEQ_CNTL_2, MC_SEQ_CNTL_2__DRST_PU_MASK, MC_SEQ_CNTL_2__DRST_PU__SHIFT, 0, 0x0 },
+ { CMD_READMODIFYWRITE, mmMC_SEQ_CNTL_2, MC_SEQ_CNTL_2__DRST_PD_MASK, MC_SEQ_CNTL_2__DRST_PD__SHIFT, 0, 0x0 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_CLKPIN_CNTL_2 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN_MASK, CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN__SHIFT, 0, 0x0 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMPLL_BYPASSCLK_SEL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT, 0, 0x4 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMISC_CLK_CTRL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL_MASK, MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL__SHIFT, 0, 0x1 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MISC_CLK_CTRL__ZCLK_SEL_MASK, MISC_CLK_CTRL__ZCLK_SEL__SHIFT, 0, 0x1 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_CLKPIN_CNTL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_CLKPIN_CNTL__BCLK_AS_XCLK_MASK, CG_CLKPIN_CNTL__BCLK_AS_XCLK__SHIFT, 0, 0x0 },
+ { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixTHM_CLK_CNTL },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, THM_CLK_CNTL__CMON_CLK_SEL_MASK, THM_CLK_CNTL__CMON_CLK_SEL__SHIFT, 0, 0x1 },
+ { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, THM_CLK_CNTL__TMON_CLK_SEL_MASK, THM_CLK_CNTL__TMON_CLK_SEL__SHIFT, 0, 0x1 }
+};
+
+static const struct baco_cmd_entry enter_baco_tbl[] =
+{
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_EN_MASK, BACO_CNTL__BACO_EN__SHIFT, 0, 0x01 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BIF_SCLK_SWITCH_MASK, BACO_CNTL__BACO_BIF_SCLK_SWITCH__SHIFT, 0, 0x01 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_BIF_SCLK_SWITCH_MASK, 0, 5, 0x40000 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, BACO_CNTL__BACO_BCLK_OFF__SHIFT, 0, 0x01 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, 0, 5, 0x02 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, BACO_CNTL__BACO_ISO_DIS__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, 0, 5, 0x00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, BACO_CNTL__BACO_ANA_ISO_DIS__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, 0, 5, 0x00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 0x01 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, 0, 5, 0x08 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_MODE_MASK, 0, 0xffffffff, 0x40 }
+};
+
+#define BACO_CNTL__PWRGOOD_MASK BACO_CNTL__PWRGOOD_GPIO_MASK+BACO_CNTL__PWRGOOD_MEM_MASK+BACO_CNTL__PWRGOOD_DVO_MASK
+
+static const struct baco_cmd_entry exit_baco_tbl[] =
+{
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_RESET_EN_MASK, BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0x01 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, BACO_CNTL__BACO_BCLK_OFF__SHIFT, 0, 0x00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_BF_MASK, 0, 0xffffffff, 0x200 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, BACO_CNTL__BACO_ISO_DIS__SHIFT, 0, 0x01 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_MASK, 0, 5, 0x1c00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, BACO_CNTL__BACO_ANA_ISO_DIS__SHIFT, 0, 0x01 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BIF_SCLK_SWITCH_MASK, BACO_CNTL__BACO_BIF_SCLK_SWITCH__SHIFT, 0, 0x00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_RESET_EN_MASK, BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__RCU_BIF_CONFIG_DONE_MASK, 0, 5, 0x100 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_EN_MASK, BACO_CNTL__BACO_EN__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_MODE_MASK, 0, 0xffffffff, 0x00 }
+};
+
+static const struct baco_cmd_entry clean_baco_tbl[] =
+{
+ { CMD_WRITE, mmBIOS_SCRATCH_6, 0, 0, 0, 0 },
+ { CMD_WRITE, mmBIOS_SCRATCH_7, 0, 0, 0, 0 }
+};
+
+static const struct baco_cmd_entry gpio_tbl_iceland[] =
+{
+ { CMD_WRITE, mmGPIOPAD_EN, 0, 0, 0, 0x0 },
+ { CMD_WRITE, mmGPIOPAD_PD_EN, 0, 0, 0, 0x0 },
+ { CMD_WRITE, mmGPIOPAD_PU_EN, 0, 0, 0, 0x0 },
+ { CMD_WRITE, mmGPIOPAD_MASK, 0, 0, 0, 0xff77ffff }
+};
+
+static const struct baco_cmd_entry exit_baco_tbl_iceland[] =
+{
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_RESET_EN_MASK, BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0x01 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, BACO_CNTL__BACO_BCLK_OFF__SHIFT, 0, 0x00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 0x00 },
+ { CMD_DELAY_MS, 0, 0, 0, 20, 0 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_BF_MASK, 0, 0xffffffff, 0x200 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, BACO_CNTL__BACO_ISO_DIS__SHIFT, 0, 0x01 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_MASK, 0, 5, 0x1c00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, BACO_CNTL__BACO_ANA_ISO_DIS__SHIFT, 0, 0x01 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BIF_SCLK_SWITCH_MASK, BACO_CNTL__BACO_BIF_SCLK_SWITCH__SHIFT, 0, 0x00 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_RESET_EN_MASK, BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__RCU_BIF_CONFIG_DONE_MASK, 0, 5, 0x100 },
+ { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_EN_MASK, BACO_CNTL__BACO_EN__SHIFT, 0, 0x00 },
+ { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_MODE_MASK, 0, 0xffffffff, 0x00 }
+};
+
+static const struct baco_cmd_entry clean_baco_tbl_iceland[] =
+{
+ { CMD_WRITE, mmBIOS_SCRATCH_7, 0, 0, 0, 0 }
+};
+
+int tonga_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state)
+{
+ enum BACO_STATE cur_state;
+
+ smu7_baco_get_state(hwmgr, &cur_state);
+
+ if (cur_state == state)
+ /* aisc already in the target state */
+ return 0;
+
+ if (state == BACO_STATE_IN) {
+ if (hwmgr->chip_id == CHIP_TOPAZ)
+ baco_program_registers(hwmgr, gpio_tbl_iceland, ARRAY_SIZE(gpio_tbl_iceland));
+ else
+ baco_program_registers(hwmgr, gpio_tbl, ARRAY_SIZE(gpio_tbl));
+ baco_program_registers(hwmgr, enable_fb_req_rej_tbl,
+ ARRAY_SIZE(enable_fb_req_rej_tbl));
+ baco_program_registers(hwmgr, use_bclk_tbl, ARRAY_SIZE(use_bclk_tbl));
+ baco_program_registers(hwmgr, turn_off_plls_tbl,
+ ARRAY_SIZE(turn_off_plls_tbl));
+ if (baco_program_registers(hwmgr, enter_baco_tbl,
+ ARRAY_SIZE(enter_baco_tbl)))
+ return 0;
+
+ } else if (state == BACO_STATE_OUT) {
+ /* HW requires at least 20ms between regulator off and on */
+ msleep(20);
+ /* Execute Hardware BACO exit sequence */
+ if (hwmgr->chip_id == CHIP_TOPAZ) {
+ if (baco_program_registers(hwmgr, exit_baco_tbl_iceland,
+ ARRAY_SIZE(exit_baco_tbl_iceland))) {
+ if (baco_program_registers(hwmgr, clean_baco_tbl_iceland,
+ ARRAY_SIZE(clean_baco_tbl_iceland)))
+ return 0;
+ }
+ } else {
+ if (baco_program_registers(hwmgr, exit_baco_tbl,
+ ARRAY_SIZE(exit_baco_tbl))) {
+ if (baco_program_registers(hwmgr, clean_baco_tbl,
+ ARRAY_SIZE(clean_baco_tbl)))
+ return 0;
+ }
+ }
+ }
+
+ return -EINVAL;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_baco.h b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_baco.h
new file mode 100644
index 000000000000..5dc16cc8a295
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_baco.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __TONGA_BACO_H__
+#define __TONGA_BACO_H__
+#include "smu7_baco.h"
+
+extern int tonga_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state);
+
+#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
index beacfffbdc3e..d71a492c87a3 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
@@ -712,7 +712,6 @@ static int vega10_sort_lookup_table(struct pp_hwmgr *hwmgr,
struct phm_ppt_v1_voltage_lookup_table *lookup_table)
{
uint32_t table_size, i, j;
- struct phm_ppt_v1_voltage_lookup_record tmp_voltage_lookup_record;
PP_ASSERT_WITH_CODE(lookup_table && lookup_table->count,
"Lookup table is empty", return -EINVAL);
@@ -724,9 +723,8 @@ static int vega10_sort_lookup_table(struct pp_hwmgr *hwmgr,
for (j = i + 1; j > 0; j--) {
if (lookup_table->entries[j].us_vdd <
lookup_table->entries[j - 1].us_vdd) {
- tmp_voltage_lookup_record = lookup_table->entries[j - 1];
- lookup_table->entries[j - 1] = lookup_table->entries[j];
- lookup_table->entries[j] = tmp_voltage_lookup_record;
+ swap(lookup_table->entries[j - 1],
+ lookup_table->entries[j]);
}
}
}
@@ -3691,6 +3689,13 @@ static int vega10_set_power_state_tasks(struct pp_hwmgr *hwmgr,
PP_ASSERT_WITH_CODE(!result,
"Failed to upload PPtable!", return result);
+ /*
+ * If a custom pp table is loaded, set DPMTABLE_OD_UPDATE_VDDC flag.
+ * That effectively disables AVFS feature.
+ */
+ if(hwmgr->hardcode_pp_table != NULL)
+ data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_VDDC;
+
vega10_update_avfs(hwmgr);
/*
@@ -5265,6 +5270,59 @@ static int vega10_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_
return 0;
}
+static int vega10_disable_power_features_for_compute_performance(struct pp_hwmgr *hwmgr, bool disable)
+{
+ struct vega10_hwmgr *data = hwmgr->backend;
+ uint32_t feature_mask = 0;
+
+ if (disable) {
+ feature_mask |= data->smu_features[GNLD_ULV].enabled ?
+ data->smu_features[GNLD_ULV].smu_feature_bitmap : 0;
+ feature_mask |= data->smu_features[GNLD_DS_GFXCLK].enabled ?
+ data->smu_features[GNLD_DS_GFXCLK].smu_feature_bitmap : 0;
+ feature_mask |= data->smu_features[GNLD_DS_SOCCLK].enabled ?
+ data->smu_features[GNLD_DS_SOCCLK].smu_feature_bitmap : 0;
+ feature_mask |= data->smu_features[GNLD_DS_LCLK].enabled ?
+ data->smu_features[GNLD_DS_LCLK].smu_feature_bitmap : 0;
+ feature_mask |= data->smu_features[GNLD_DS_DCEFCLK].enabled ?
+ data->smu_features[GNLD_DS_DCEFCLK].smu_feature_bitmap : 0;
+ } else {
+ feature_mask |= (!data->smu_features[GNLD_ULV].enabled) ?
+ data->smu_features[GNLD_ULV].smu_feature_bitmap : 0;
+ feature_mask |= (!data->smu_features[GNLD_DS_GFXCLK].enabled) ?
+ data->smu_features[GNLD_DS_GFXCLK].smu_feature_bitmap : 0;
+ feature_mask |= (!data->smu_features[GNLD_DS_SOCCLK].enabled) ?
+ data->smu_features[GNLD_DS_SOCCLK].smu_feature_bitmap : 0;
+ feature_mask |= (!data->smu_features[GNLD_DS_LCLK].enabled) ?
+ data->smu_features[GNLD_DS_LCLK].smu_feature_bitmap : 0;
+ feature_mask |= (!data->smu_features[GNLD_DS_DCEFCLK].enabled) ?
+ data->smu_features[GNLD_DS_DCEFCLK].smu_feature_bitmap : 0;
+ }
+
+ if (feature_mask)
+ PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
+ !disable, feature_mask),
+ "enable/disable power features for compute performance Failed!",
+ return -EINVAL);
+
+ if (disable) {
+ data->smu_features[GNLD_ULV].enabled = false;
+ data->smu_features[GNLD_DS_GFXCLK].enabled = false;
+ data->smu_features[GNLD_DS_SOCCLK].enabled = false;
+ data->smu_features[GNLD_DS_LCLK].enabled = false;
+ data->smu_features[GNLD_DS_DCEFCLK].enabled = false;
+ } else {
+ data->smu_features[GNLD_ULV].enabled = true;
+ data->smu_features[GNLD_DS_GFXCLK].enabled = true;
+ data->smu_features[GNLD_DS_SOCCLK].enabled = true;
+ data->smu_features[GNLD_DS_LCLK].enabled = true;
+ data->smu_features[GNLD_DS_DCEFCLK].enabled = true;
+ }
+
+ return 0;
+
+}
+
static const struct pp_hwmgr_func vega10_hwmgr_funcs = {
.backend_init = vega10_hwmgr_backend_init,
.backend_fini = vega10_hwmgr_backend_fini,
@@ -5332,6 +5390,8 @@ static const struct pp_hwmgr_func vega10_hwmgr_funcs = {
.get_ppfeature_status = vega10_get_ppfeature_status,
.set_ppfeature_status = vega10_set_ppfeature_status,
.set_mp1_state = vega10_set_mp1_state,
+ .disable_power_features_for_compute_performance =
+ vega10_disable_power_features_for_compute_performance,
};
int vega10_hwmgr_init(struct pp_hwmgr *hwmgr)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.c
index df6ff9252401..9b5e72bdceca 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.c
@@ -29,7 +29,7 @@
#include "vega20_baco.h"
#include "vega20_smumgr.h"
-
+#include "amdgpu_ras.h"
static const struct soc15_baco_cmd_entry clean_baco_tbl[] =
{
@@ -74,6 +74,7 @@ int vega20_baco_get_state(struct pp_hwmgr *hwmgr, enum BACO_STATE *state)
int vega20_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state)
{
struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
enum BACO_STATE cur_state;
uint32_t data;
@@ -84,13 +85,19 @@ int vega20_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state)
return 0;
if (state == BACO_STATE_IN) {
- data = RREG32_SOC15(THM, 0, mmTHM_BACO_CNTL);
- data |= 0x80000000;
- WREG32_SOC15(THM, 0, mmTHM_BACO_CNTL, data);
-
-
- if(smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_EnterBaco, 0))
- return -EINVAL;
+ if (!ras || !ras->supported) {
+ data = RREG32_SOC15(THM, 0, mmTHM_BACO_CNTL);
+ data |= 0x80000000;
+ WREG32_SOC15(THM, 0, mmTHM_BACO_CNTL, data);
+
+ if(smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_EnterBaco, 0))
+ return -EINVAL;
+ } else {
+ if(smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_EnterBaco, 1))
+ return -EINVAL;
+ }
} else if (state == BACO_STATE_OUT) {
if (smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ExitBaco))
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
index f5915308e643..5bcf0d684151 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
@@ -183,6 +183,9 @@ static int vega20_set_features_platform_caps(struct pp_hwmgr *hwmgr)
PHM_PlatformCaps_TablelessHardwareInterface);
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_BACO);
+
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_EnableSMU7ThermalManagement);
if (adev->pg_flags & AMD_PG_SUPPORT_UVD)
@@ -490,8 +493,8 @@ static int vega20_setup_asic_task(struct pp_hwmgr *hwmgr)
"Failed to init sclk threshold!",
return ret);
- if (adev->in_baco_reset) {
- adev->in_baco_reset = 0;
+ if (adev->in_gpu_reset &&
+ (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) {
ret = vega20_baco_apply_vdci_flush_workaround(hwmgr);
if (ret)
@@ -4155,6 +4158,38 @@ static int vega20_smu_i2c_bus_access(struct pp_hwmgr *hwmgr, bool acquire)
return res;
}
+static int vega20_set_df_cstate(struct pp_hwmgr *hwmgr,
+ enum pp_df_cstate state)
+{
+ int ret;
+
+ /* PPSMC_MSG_DFCstateControl is supported with 40.50 and later fws */
+ if (hwmgr->smu_version < 0x283200) {
+ pr_err("Df cstate control is supported with 40.50 and later SMC fw!\n");
+ return -EINVAL;
+ }
+
+ ret = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DFCstateControl, state);
+ if (ret)
+ pr_err("SetDfCstate failed!\n");
+
+ return ret;
+}
+
+static int vega20_set_xgmi_pstate(struct pp_hwmgr *hwmgr,
+ uint32_t pstate)
+{
+ int ret;
+
+ ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetXgmiMode,
+ pstate ? XGMI_MODE_PSTATE_D0 : XGMI_MODE_PSTATE_D3);
+ if (ret)
+ pr_err("SetXgmiPstate failed!\n");
+
+ return ret;
+}
+
static const struct pp_hwmgr_func vega20_hwmgr_funcs = {
/* init/fini related */
.backend_init = vega20_hwmgr_backend_init,
@@ -4223,6 +4258,8 @@ static const struct pp_hwmgr_func vega20_hwmgr_funcs = {
.set_asic_baco_state = vega20_baco_set_state,
.set_mp1_state = vega20_set_mp1_state,
.smu_i2c_bus_access = vega20_smu_i2c_bus_access,
+ .set_df_cstate = vega20_set_df_cstate,
+ .set_xgmi_pstate = vega20_set_xgmi_pstate,
};
int vega20_hwmgr_init(struct pp_hwmgr *hwmgr)
diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
index 23171a4d9a31..031e0c22fcc7 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
@@ -259,10 +259,8 @@ struct smu_table_context
struct smu_bios_boot_up_values boot_values;
void *driver_pptable;
struct smu_table *tables;
- uint32_t table_count;
struct smu_table memory_pool;
uint8_t thermal_controller_type;
- uint16_t TDPODLimit;
void *overdrive_table;
};
@@ -322,6 +320,13 @@ struct mclock_latency_table {
struct mclk_latency_entries entries[MAX_REGULAR_DPM_NUM];
};
+enum smu_reset_mode
+{
+ SMU_RESET_MODE_0,
+ SMU_RESET_MODE_1,
+ SMU_RESET_MODE_2,
+};
+
enum smu_baco_state
{
SMU_BACO_STATE_ENTER = 0,
@@ -341,7 +346,6 @@ struct smu_context
struct amdgpu_device *adev;
struct amdgpu_irq_src *irq_source;
- const struct smu_funcs *funcs;
const struct pptable_funcs *ppt_funcs;
struct mutex mutex;
struct mutex sensor_lock;
@@ -382,11 +386,15 @@ struct smu_context
uint32_t power_profile_mode;
uint32_t default_power_profile_mode;
bool pm_enabled;
+ bool is_apu;
uint32_t smc_if_version;
+ bool uploading_custom_pp_table;
};
+struct i2c_adapter;
+
struct pptable_funcs {
int (*alloc_dpm_context)(struct smu_context *smu);
int (*store_powerplay_table)(struct smu_context *smu);
@@ -398,7 +406,7 @@ struct pptable_funcs {
int (*get_smu_table_index)(struct smu_context *smu, uint32_t index);
int (*get_smu_power_index)(struct smu_context *smu, uint32_t index);
int (*get_workload_type)(struct smu_context *smu, enum PP_SMC_POWER_PROFILE profile);
- int (*run_afll_btc)(struct smu_context *smu);
+ int (*run_btc)(struct smu_context *smu);
int (*get_allowed_feature_mask)(struct smu_context *smu, uint32_t *feature_mask, uint32_t num);
enum amd_pm_state_type (*get_current_power_state)(struct smu_context *smu);
int (*set_default_dpm_table)(struct smu_context *smu);
@@ -459,17 +467,19 @@ struct pptable_funcs {
int (*display_disable_memory_clock_switch)(struct smu_context *smu, bool disable_memory_clock_switch);
void (*dump_pptable)(struct smu_context *smu);
int (*get_power_limit)(struct smu_context *smu, uint32_t *limit, bool asic_default);
- int (*get_dpm_uclk_limited)(struct smu_context *smu, uint32_t *clock, bool max);
-};
-
-struct smu_funcs
-{
+ int (*get_dpm_clk_limited)(struct smu_context *smu, enum smu_clk_type clk_type,
+ uint32_t dpm_level, uint32_t *freq);
+ int (*set_df_cstate)(struct smu_context *smu, enum pp_df_cstate state);
+ int (*update_pcie_parameters)(struct smu_context *smu, uint32_t pcie_gen_cap, uint32_t pcie_width_cap);
+ int (*i2c_eeprom_init)(struct i2c_adapter *control);
+ void (*i2c_eeprom_fini)(struct i2c_adapter *control);
+ int (*get_dpm_clock_table)(struct smu_context *smu, struct dpm_clocks *clock_table);
int (*init_microcode)(struct smu_context *smu);
+ int (*load_microcode)(struct smu_context *smu);
int (*init_smc_tables)(struct smu_context *smu);
int (*fini_smc_tables)(struct smu_context *smu);
int (*init_power)(struct smu_context *smu);
int (*fini_power)(struct smu_context *smu);
- int (*load_microcode)(struct smu_context *smu);
int (*check_fw_status)(struct smu_context *smu);
int (*setup_pptable)(struct smu_context *smu);
int (*get_vbios_bootup_values)(struct smu_context *smu);
@@ -485,7 +495,6 @@ struct smu_funcs
int (*set_min_dcef_deep_sleep)(struct smu_context *smu);
int (*set_tool_table_location)(struct smu_context *smu);
int (*notify_memory_pool_location)(struct smu_context *smu);
- int (*write_watermarks_table)(struct smu_context *smu);
int (*set_last_dcef_min_deep_sleep_clk)(struct smu_context *smu);
int (*system_features_control)(struct smu_context *smu, bool en);
int (*send_smc_msg)(struct smu_context *smu, uint16_t msg);
@@ -499,8 +508,7 @@ struct smu_funcs
int (*get_current_clk_freq)(struct smu_context *smu, enum smu_clk_type clk_id, uint32_t *value);
int (*init_max_sustainable_clocks)(struct smu_context *smu);
int (*start_thermal_control)(struct smu_context *smu);
- int (*read_sensor)(struct smu_context *smu, enum amd_pp_sensors sensor,
- void *data, uint32_t *size);
+ int (*stop_thermal_control)(struct smu_context *smu);
int (*set_deep_sleep_dcefclk)(struct smu_context *smu, uint32_t clk);
int (*set_active_display_count)(struct smu_context *smu, uint32_t count);
int (*store_cc6_data)(struct smu_context *smu, uint32_t separation_time,
@@ -522,8 +530,6 @@ struct smu_funcs
int (*get_current_shallow_sleep_clocks)(struct smu_context *smu,
struct smu_clock_info *clocks);
int (*notify_smu_enable_pwe)(struct smu_context *smu);
- int (*set_watermarks_for_clock_ranges)(struct smu_context *smu,
- struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges);
int (*conv_power_profile_to_pplib_workload)(int power_profile);
uint32_t (*get_fan_control_mode)(struct smu_context *smu);
int (*set_fan_control_mode)(struct smu_context *smu, uint32_t mode);
@@ -538,234 +544,90 @@ struct smu_funcs
enum smu_baco_state (*baco_get_state)(struct smu_context *smu);
int (*baco_set_state)(struct smu_context *smu, enum smu_baco_state state);
int (*baco_reset)(struct smu_context *smu);
+ int (*mode2_reset)(struct smu_context *smu);
int (*get_dpm_ultimate_freq)(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t *min, uint32_t *max);
+ int (*set_soft_freq_limited_range)(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t min, uint32_t max);
+ int (*override_pcie_parameters)(struct smu_context *smu);
+ uint32_t (*get_pptable_power_limit)(struct smu_context *smu);
};
-#define smu_init_microcode(smu) \
- ((smu)->funcs->init_microcode ? (smu)->funcs->init_microcode((smu)) : 0)
-#define smu_init_smc_tables(smu) \
- ((smu)->funcs->init_smc_tables ? (smu)->funcs->init_smc_tables((smu)) : 0)
-#define smu_fini_smc_tables(smu) \
- ((smu)->funcs->fini_smc_tables ? (smu)->funcs->fini_smc_tables((smu)) : 0)
-#define smu_init_power(smu) \
- ((smu)->funcs->init_power ? (smu)->funcs->init_power((smu)) : 0)
-#define smu_fini_power(smu) \
- ((smu)->funcs->fini_power ? (smu)->funcs->fini_power((smu)) : 0)
-#define smu_load_microcode(smu) \
- ((smu)->funcs->load_microcode ? (smu)->funcs->load_microcode((smu)) : 0)
-#define smu_check_fw_status(smu) \
- ((smu)->funcs->check_fw_status ? (smu)->funcs->check_fw_status((smu)) : 0)
-#define smu_setup_pptable(smu) \
- ((smu)->funcs->setup_pptable ? (smu)->funcs->setup_pptable((smu)) : 0)
-#define smu_powergate_sdma(smu, gate) \
- ((smu)->funcs->powergate_sdma ? (smu)->funcs->powergate_sdma((smu), (gate)) : 0)
-#define smu_powergate_vcn(smu, gate) \
- ((smu)->funcs->powergate_vcn ? (smu)->funcs->powergate_vcn((smu), (gate)) : 0)
-#define smu_set_gfx_cgpg(smu, enabled) \
- ((smu)->funcs->set_gfx_cgpg ? (smu)->funcs->set_gfx_cgpg((smu), (enabled)) : 0)
-#define smu_get_vbios_bootup_values(smu) \
- ((smu)->funcs->get_vbios_bootup_values ? (smu)->funcs->get_vbios_bootup_values((smu)) : 0)
-#define smu_get_clk_info_from_vbios(smu) \
- ((smu)->funcs->get_clk_info_from_vbios ? (smu)->funcs->get_clk_info_from_vbios((smu)) : 0)
-#define smu_check_pptable(smu) \
- ((smu)->funcs->check_pptable ? (smu)->funcs->check_pptable((smu)) : 0)
-#define smu_parse_pptable(smu) \
- ((smu)->funcs->parse_pptable ? (smu)->funcs->parse_pptable((smu)) : 0)
-#define smu_populate_smc_tables(smu) \
- ((smu)->funcs->populate_smc_tables ? (smu)->funcs->populate_smc_tables((smu)) : 0)
-#define smu_check_fw_version(smu) \
- ((smu)->funcs->check_fw_version ? (smu)->funcs->check_fw_version((smu)) : 0)
-#define smu_write_pptable(smu) \
- ((smu)->funcs->write_pptable ? (smu)->funcs->write_pptable((smu)) : 0)
-#define smu_set_min_dcef_deep_sleep(smu) \
- ((smu)->funcs->set_min_dcef_deep_sleep ? (smu)->funcs->set_min_dcef_deep_sleep((smu)) : 0)
-#define smu_set_tool_table_location(smu) \
- ((smu)->funcs->set_tool_table_location ? (smu)->funcs->set_tool_table_location((smu)) : 0)
-#define smu_notify_memory_pool_location(smu) \
- ((smu)->funcs->notify_memory_pool_location ? (smu)->funcs->notify_memory_pool_location((smu)) : 0)
-#define smu_gfx_off_control(smu, enable) \
- ((smu)->funcs->gfx_off_control ? (smu)->funcs->gfx_off_control((smu), (enable)) : 0)
-
-#define smu_write_watermarks_table(smu) \
- ((smu)->funcs->write_watermarks_table ? (smu)->funcs->write_watermarks_table((smu)) : 0)
-#define smu_set_last_dcef_min_deep_sleep_clk(smu) \
- ((smu)->funcs->set_last_dcef_min_deep_sleep_clk ? (smu)->funcs->set_last_dcef_min_deep_sleep_clk((smu)) : 0)
-#define smu_system_features_control(smu, en) \
- ((smu)->funcs->system_features_control ? (smu)->funcs->system_features_control((smu), (en)) : 0)
-#define smu_init_max_sustainable_clocks(smu) \
- ((smu)->funcs->init_max_sustainable_clocks ? (smu)->funcs->init_max_sustainable_clocks((smu)) : 0)
-#define smu_set_default_od_settings(smu, initialize) \
- ((smu)->ppt_funcs->set_default_od_settings ? (smu)->ppt_funcs->set_default_od_settings((smu), (initialize)) : 0)
-#define smu_set_fan_speed_rpm(smu, speed) \
- ((smu)->funcs->set_fan_speed_rpm ? (smu)->funcs->set_fan_speed_rpm((smu), (speed)) : 0)
-#define smu_send_smc_msg(smu, msg) \
- ((smu)->funcs->send_smc_msg? (smu)->funcs->send_smc_msg((smu), (msg)) : 0)
-#define smu_send_smc_msg_with_param(smu, msg, param) \
- ((smu)->funcs->send_smc_msg_with_param? (smu)->funcs->send_smc_msg_with_param((smu), (msg), (param)) : 0)
-#define smu_read_smc_arg(smu, arg) \
- ((smu)->funcs->read_smc_arg? (smu)->funcs->read_smc_arg((smu), (arg)) : 0)
-#define smu_alloc_dpm_context(smu) \
- ((smu)->ppt_funcs->alloc_dpm_context ? (smu)->ppt_funcs->alloc_dpm_context((smu)) : 0)
-#define smu_init_display_count(smu, count) \
- ((smu)->funcs->init_display_count ? (smu)->funcs->init_display_count((smu), (count)) : 0)
-#define smu_feature_set_allowed_mask(smu) \
- ((smu)->funcs->set_allowed_mask? (smu)->funcs->set_allowed_mask((smu)) : 0)
-#define smu_feature_get_enabled_mask(smu, mask, num) \
- ((smu)->funcs->get_enabled_mask? (smu)->funcs->get_enabled_mask((smu), (mask), (num)) : 0)
-#define smu_is_dpm_running(smu) \
- ((smu)->ppt_funcs->is_dpm_running ? (smu)->ppt_funcs->is_dpm_running((smu)) : 0)
-#define smu_notify_display_change(smu) \
- ((smu)->funcs->notify_display_change? (smu)->funcs->notify_display_change((smu)) : 0)
-#define smu_store_powerplay_table(smu) \
- ((smu)->ppt_funcs->store_powerplay_table ? (smu)->ppt_funcs->store_powerplay_table((smu)) : 0)
-#define smu_check_powerplay_table(smu) \
- ((smu)->ppt_funcs->check_powerplay_table ? (smu)->ppt_funcs->check_powerplay_table((smu)) : 0)
-#define smu_append_powerplay_table(smu) \
- ((smu)->ppt_funcs->append_powerplay_table ? (smu)->ppt_funcs->append_powerplay_table((smu)) : 0)
-#define smu_set_default_dpm_table(smu) \
- ((smu)->ppt_funcs->set_default_dpm_table ? (smu)->ppt_funcs->set_default_dpm_table((smu)) : 0)
-#define smu_populate_umd_state_clk(smu) \
- ((smu)->ppt_funcs->populate_umd_state_clk ? (smu)->ppt_funcs->populate_umd_state_clk((smu)) : 0)
-#define smu_set_default_od8_settings(smu) \
- ((smu)->ppt_funcs->set_default_od8_settings ? (smu)->ppt_funcs->set_default_od8_settings((smu)) : 0)
-#define smu_get_power_limit(smu, limit, def) \
- ((smu)->ppt_funcs->get_power_limit ? (smu)->ppt_funcs->get_power_limit((smu), (limit), (def)) : 0)
-#define smu_set_power_limit(smu, limit) \
- ((smu)->funcs->set_power_limit ? (smu)->funcs->set_power_limit((smu), (limit)) : 0)
-#define smu_get_current_clk_freq(smu, clk_id, value) \
- ((smu)->funcs->get_current_clk_freq? (smu)->funcs->get_current_clk_freq((smu), (clk_id), (value)) : 0)
-#define smu_print_clk_levels(smu, clk_type, buf) \
- ((smu)->ppt_funcs->print_clk_levels ? (smu)->ppt_funcs->print_clk_levels((smu), (clk_type), (buf)) : 0)
-#define smu_force_clk_levels(smu, clk_type, level) \
- ((smu)->ppt_funcs->force_clk_levels ? (smu)->ppt_funcs->force_clk_levels((smu), (clk_type), (level)) : 0)
-#define smu_get_od_percentage(smu, type) \
- ((smu)->ppt_funcs->get_od_percentage ? (smu)->ppt_funcs->get_od_percentage((smu), (type)) : 0)
-#define smu_set_od_percentage(smu, type, value) \
- ((smu)->ppt_funcs->set_od_percentage ? (smu)->ppt_funcs->set_od_percentage((smu), (type), (value)) : 0)
-#define smu_od_edit_dpm_table(smu, type, input, size) \
- ((smu)->ppt_funcs->od_edit_dpm_table ? (smu)->ppt_funcs->od_edit_dpm_table((smu), (type), (input), (size)) : 0)
-#define smu_tables_init(smu, tab) \
- ((smu)->ppt_funcs->tables_init ? (smu)->ppt_funcs->tables_init((smu), (tab)) : 0)
-#define smu_set_thermal_fan_table(smu) \
- ((smu)->ppt_funcs->set_thermal_fan_table ? (smu)->ppt_funcs->set_thermal_fan_table((smu)) : 0)
-#define smu_start_thermal_control(smu) \
- ((smu)->funcs->start_thermal_control? (smu)->funcs->start_thermal_control((smu)) : 0)
-#define smu_read_sensor(smu, sensor, data, size) \
- ((smu)->ppt_funcs->read_sensor? (smu)->ppt_funcs->read_sensor((smu), (sensor), (data), (size)) : 0)
-#define smu_smc_read_sensor(smu, sensor, data, size) \
- ((smu)->funcs->read_sensor? (smu)->funcs->read_sensor((smu), (sensor), (data), (size)) : -EINVAL)
-#define smu_get_power_profile_mode(smu, buf) \
- ((smu)->ppt_funcs->get_power_profile_mode ? (smu)->ppt_funcs->get_power_profile_mode((smu), buf) : 0)
-#define smu_set_power_profile_mode(smu, param, param_size) \
- ((smu)->ppt_funcs->set_power_profile_mode ? (smu)->ppt_funcs->set_power_profile_mode((smu), (param), (param_size)) : 0)
-#define smu_pre_display_config_changed(smu) \
- ((smu)->ppt_funcs->pre_display_config_changed ? (smu)->ppt_funcs->pre_display_config_changed((smu)) : 0)
-#define smu_display_config_changed(smu) \
- ((smu)->ppt_funcs->display_config_changed ? (smu)->ppt_funcs->display_config_changed((smu)) : 0)
-#define smu_apply_clocks_adjust_rules(smu) \
- ((smu)->ppt_funcs->apply_clocks_adjust_rules ? (smu)->ppt_funcs->apply_clocks_adjust_rules((smu)) : 0)
-#define smu_notify_smc_dispaly_config(smu) \
- ((smu)->ppt_funcs->notify_smc_dispaly_config ? (smu)->ppt_funcs->notify_smc_dispaly_config((smu)) : 0)
-#define smu_force_dpm_limit_value(smu, highest) \
- ((smu)->ppt_funcs->force_dpm_limit_value ? (smu)->ppt_funcs->force_dpm_limit_value((smu), (highest)) : 0)
-#define smu_unforce_dpm_levels(smu) \
- ((smu)->ppt_funcs->unforce_dpm_levels ? (smu)->ppt_funcs->unforce_dpm_levels((smu)) : 0)
-#define smu_get_profiling_clk_mask(smu, level, sclk_mask, mclk_mask, soc_mask) \
- ((smu)->ppt_funcs->get_profiling_clk_mask ? (smu)->ppt_funcs->get_profiling_clk_mask((smu), (level), (sclk_mask), (mclk_mask), (soc_mask)) : 0)
-#define smu_set_cpu_power_state(smu) \
- ((smu)->ppt_funcs->set_cpu_power_state ? (smu)->ppt_funcs->set_cpu_power_state((smu)) : 0)
-#define smu_get_fan_control_mode(smu) \
- ((smu)->funcs->get_fan_control_mode ? (smu)->funcs->get_fan_control_mode((smu)) : 0)
-#define smu_set_fan_control_mode(smu, value) \
- ((smu)->funcs->set_fan_control_mode ? (smu)->funcs->set_fan_control_mode((smu), (value)) : 0)
-#define smu_get_fan_speed_percent(smu, speed) \
- ((smu)->ppt_funcs->get_fan_speed_percent ? (smu)->ppt_funcs->get_fan_speed_percent((smu), (speed)) : 0)
-#define smu_set_fan_speed_percent(smu, speed) \
- ((smu)->funcs->set_fan_speed_percent ? (smu)->funcs->set_fan_speed_percent((smu), (speed)) : 0)
-#define smu_get_fan_speed_rpm(smu, speed) \
- ((smu)->ppt_funcs->get_fan_speed_rpm ? (smu)->ppt_funcs->get_fan_speed_rpm((smu), (speed)) : 0)
-
-#define smu_msg_get_index(smu, msg) \
- ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_smu_msg_index? (smu)->ppt_funcs->get_smu_msg_index((smu), (msg)) : -EINVAL) : -EINVAL)
-#define smu_clk_get_index(smu, msg) \
- ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_smu_clk_index? (smu)->ppt_funcs->get_smu_clk_index((smu), (msg)) : -EINVAL) : -EINVAL)
-#define smu_feature_get_index(smu, msg) \
- ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_smu_feature_index? (smu)->ppt_funcs->get_smu_feature_index((smu), (msg)) : -EINVAL) : -EINVAL)
-#define smu_table_get_index(smu, tab) \
- ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_smu_table_index? (smu)->ppt_funcs->get_smu_table_index((smu), (tab)) : -EINVAL) : -EINVAL)
-#define smu_power_get_index(smu, src) \
- ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_smu_power_index? (smu)->ppt_funcs->get_smu_power_index((smu), (src)) : -EINVAL) : -EINVAL)
-#define smu_workload_get_type(smu, profile) \
- ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_workload_type? (smu)->ppt_funcs->get_workload_type((smu), (profile)) : -EINVAL) : -EINVAL)
-#define smu_run_afll_btc(smu) \
- ((smu)->ppt_funcs? ((smu)->ppt_funcs->run_afll_btc? (smu)->ppt_funcs->run_afll_btc((smu)) : 0) : 0)
-#define smu_get_allowed_feature_mask(smu, feature_mask, num) \
- ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_allowed_feature_mask? (smu)->ppt_funcs->get_allowed_feature_mask((smu), (feature_mask), (num)) : 0) : 0)
-#define smu_set_deep_sleep_dcefclk(smu, clk) \
- ((smu)->funcs->set_deep_sleep_dcefclk ? (smu)->funcs->set_deep_sleep_dcefclk((smu), (clk)) : 0)
-#define smu_set_active_display_count(smu, count) \
- ((smu)->funcs->set_active_display_count ? (smu)->funcs->set_active_display_count((smu), (count)) : 0)
-#define smu_store_cc6_data(smu, st, cc6_dis, pst_dis, pst_sw_dis) \
- ((smu)->funcs->store_cc6_data ? (smu)->funcs->store_cc6_data((smu), (st), (cc6_dis), (pst_dis), (pst_sw_dis)) : 0)
-#define smu_get_clock_by_type(smu, type, clocks) \
- ((smu)->funcs->get_clock_by_type ? (smu)->funcs->get_clock_by_type((smu), (type), (clocks)) : 0)
-#define smu_get_max_high_clocks(smu, clocks) \
- ((smu)->funcs->get_max_high_clocks ? (smu)->funcs->get_max_high_clocks((smu), (clocks)) : 0)
-#define smu_get_clock_by_type_with_latency(smu, clk_type, clocks) \
- ((smu)->ppt_funcs->get_clock_by_type_with_latency ? (smu)->ppt_funcs->get_clock_by_type_with_latency((smu), (clk_type), (clocks)) : 0)
-#define smu_get_clock_by_type_with_voltage(smu, type, clocks) \
- ((smu)->ppt_funcs->get_clock_by_type_with_voltage ? (smu)->ppt_funcs->get_clock_by_type_with_voltage((smu), (type), (clocks)) : 0)
-#define smu_display_clock_voltage_request(smu, clock_req) \
- ((smu)->funcs->display_clock_voltage_request ? (smu)->funcs->display_clock_voltage_request((smu), (clock_req)) : 0)
-#define smu_display_disable_memory_clock_switch(smu, disable_memory_clock_switch) \
- ((smu)->ppt_funcs->display_disable_memory_clock_switch ? (smu)->ppt_funcs->display_disable_memory_clock_switch((smu), (disable_memory_clock_switch)) : -EINVAL)
-#define smu_get_dal_power_level(smu, clocks) \
- ((smu)->funcs->get_dal_power_level ? (smu)->funcs->get_dal_power_level((smu), (clocks)) : 0)
-#define smu_get_perf_level(smu, designation, level) \
- ((smu)->funcs->get_perf_level ? (smu)->funcs->get_perf_level((smu), (designation), (level)) : 0)
-#define smu_get_current_shallow_sleep_clocks(smu, clocks) \
- ((smu)->funcs->get_current_shallow_sleep_clocks ? (smu)->funcs->get_current_shallow_sleep_clocks((smu), (clocks)) : 0)
-#define smu_notify_smu_enable_pwe(smu) \
- ((smu)->funcs->notify_smu_enable_pwe ? (smu)->funcs->notify_smu_enable_pwe((smu)) : 0)
-#define smu_set_watermarks_for_clock_ranges(smu, clock_ranges) \
- ((smu)->funcs->set_watermarks_for_clock_ranges ? (smu)->funcs->set_watermarks_for_clock_ranges((smu), (clock_ranges)) : 0)
-#define smu_dpm_set_uvd_enable(smu, enable) \
- ((smu)->ppt_funcs->dpm_set_uvd_enable ? (smu)->ppt_funcs->dpm_set_uvd_enable((smu), (enable)) : 0)
-#define smu_dpm_set_vce_enable(smu, enable) \
- ((smu)->ppt_funcs->dpm_set_vce_enable ? (smu)->ppt_funcs->dpm_set_vce_enable((smu), (enable)) : 0)
-#define smu_set_xgmi_pstate(smu, pstate) \
- ((smu)->funcs->set_xgmi_pstate ? (smu)->funcs->set_xgmi_pstate((smu), (pstate)) : 0)
-#define smu_set_watermarks_table(smu, tab, clock_ranges) \
- ((smu)->ppt_funcs->set_watermarks_table ? (smu)->ppt_funcs->set_watermarks_table((smu), (tab), (clock_ranges)) : 0)
-#define smu_get_current_clk_freq_by_table(smu, clk_type, value) \
- ((smu)->ppt_funcs->get_current_clk_freq_by_table ? (smu)->ppt_funcs->get_current_clk_freq_by_table((smu), (clk_type), (value)) : 0)
-#define smu_thermal_temperature_range_update(smu, range, rw) \
- ((smu)->ppt_funcs->thermal_temperature_range_update? (smu)->ppt_funcs->thermal_temperature_range_update((smu), (range), (rw)) : 0)
-#define smu_get_thermal_temperature_range(smu, range) \
- ((smu)->ppt_funcs->get_thermal_temperature_range? (smu)->ppt_funcs->get_thermal_temperature_range((smu), (range)) : 0)
-#define smu_register_irq_handler(smu) \
- ((smu)->funcs->register_irq_handler ? (smu)->funcs->register_irq_handler(smu) : 0)
-#define smu_set_azalia_d3_pme(smu) \
- ((smu)->funcs->set_azalia_d3_pme ? (smu)->funcs->set_azalia_d3_pme((smu)) : 0)
-#define smu_get_dpm_ultimate_freq(smu, param, min, max) \
- ((smu)->funcs->get_dpm_ultimate_freq ? (smu)->funcs->get_dpm_ultimate_freq((smu), (param), (min), (max)) : 0)
-#define smu_get_max_sustainable_clocks_by_dc(smu, max_clocks) \
- ((smu)->funcs->get_max_sustainable_clocks_by_dc ? (smu)->funcs->get_max_sustainable_clocks_by_dc((smu), (max_clocks)) : 0)
-#define smu_get_uclk_dpm_states(smu, clocks_in_khz, num_states) \
- ((smu)->ppt_funcs->get_uclk_dpm_states ? (smu)->ppt_funcs->get_uclk_dpm_states((smu), (clocks_in_khz), (num_states)) : 0)
-#define smu_baco_is_support(smu) \
- ((smu)->funcs->baco_is_support? (smu)->funcs->baco_is_support((smu)) : false)
-#define smu_baco_get_state(smu, state) \
- ((smu)->funcs->baco_get_state? (smu)->funcs->baco_get_state((smu), (state)) : 0)
-#define smu_baco_reset(smu) \
- ((smu)->funcs->baco_reset? (smu)->funcs->baco_reset((smu)) : 0)
-#define smu_asic_set_performance_level(smu, level) \
- ((smu)->ppt_funcs->set_performance_level? (smu)->ppt_funcs->set_performance_level((smu), (level)) : -EINVAL);
-#define smu_dump_pptable(smu) \
- ((smu)->ppt_funcs->dump_pptable ? (smu)->ppt_funcs->dump_pptable((smu)) : 0)
-#define smu_get_dpm_uclk_limited(smu, clock, max) \
- ((smu)->ppt_funcs->get_dpm_uclk_limited ? (smu)->ppt_funcs->get_dpm_uclk_limited((smu), (clock), (max)) : -EINVAL)
+int smu_load_microcode(struct smu_context *smu);
+
+int smu_check_fw_status(struct smu_context *smu);
+
+int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled);
+
+#define smu_i2c_eeprom_init(smu, control) \
+ ((smu)->ppt_funcs->i2c_eeprom_init ? (smu)->ppt_funcs->i2c_eeprom_init((control)) : -EINVAL)
+#define smu_i2c_eeprom_fini(smu, control) \
+ ((smu)->ppt_funcs->i2c_eeprom_fini ? (smu)->ppt_funcs->i2c_eeprom_fini((control)) : -EINVAL)
+
+int smu_set_fan_speed_rpm(struct smu_context *smu, uint32_t speed);
+
+int smu_get_power_limit(struct smu_context *smu,
+ uint32_t *limit,
+ bool def,
+ bool lock_needed);
+
+int smu_set_power_limit(struct smu_context *smu, uint32_t limit);
+int smu_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf);
+int smu_get_od_percentage(struct smu_context *smu, enum smu_clk_type type);
+int smu_set_od_percentage(struct smu_context *smu, enum smu_clk_type type, uint32_t value);
+
+int smu_od_edit_dpm_table(struct smu_context *smu,
+ enum PP_OD_DPM_TABLE_COMMAND type,
+ long *input, uint32_t size);
+
+int smu_read_sensor(struct smu_context *smu,
+ enum amd_pp_sensors sensor,
+ void *data, uint32_t *size);
+int smu_get_power_profile_mode(struct smu_context *smu, char *buf);
+
+int smu_set_power_profile_mode(struct smu_context *smu,
+ long *param,
+ uint32_t param_size,
+ bool lock_needed);
+int smu_get_fan_control_mode(struct smu_context *smu);
+int smu_set_fan_control_mode(struct smu_context *smu, int value);
+int smu_get_fan_speed_percent(struct smu_context *smu, uint32_t *speed);
+int smu_set_fan_speed_percent(struct smu_context *smu, uint32_t speed);
+int smu_get_fan_speed_rpm(struct smu_context *smu, uint32_t *speed);
+
+int smu_set_deep_sleep_dcefclk(struct smu_context *smu, int clk);
+int smu_set_active_display_count(struct smu_context *smu, uint32_t count);
+
+int smu_get_clock_by_type(struct smu_context *smu,
+ enum amd_pp_clock_type type,
+ struct amd_pp_clocks *clocks);
+
+int smu_get_max_high_clocks(struct smu_context *smu,
+ struct amd_pp_simple_clock_info *clocks);
+
+int smu_get_clock_by_type_with_latency(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ struct pp_clock_levels_with_latency *clocks);
+
+int smu_get_clock_by_type_with_voltage(struct smu_context *smu,
+ enum amd_pp_clock_type type,
+ struct pp_clock_levels_with_voltage *clocks);
+
+int smu_display_clock_voltage_request(struct smu_context *smu,
+ struct pp_display_clock_request *clock_req);
+int smu_display_disable_memory_clock_switch(struct smu_context *smu, bool disable_memory_clock_switch);
+int smu_notify_smu_enable_pwe(struct smu_context *smu);
+
+int smu_set_xgmi_pstate(struct smu_context *smu,
+ uint32_t pstate);
+
+int smu_set_azalia_d3_pme(struct smu_context *smu);
+
+bool smu_baco_is_support(struct smu_context *smu);
+
+int smu_baco_get_state(struct smu_context *smu, enum smu_baco_state *state);
+
+int smu_baco_reset(struct smu_context *smu);
+int smu_mode2_reset(struct smu_context *smu);
extern int smu_get_atom_data_table(struct smu_context *smu, uint32_t table,
uint16_t *size, uint8_t *frev, uint8_t *crev,
@@ -799,6 +661,10 @@ int smu_sys_get_pp_table(struct smu_context *smu, void **table);
int smu_sys_set_pp_table(struct smu_context *smu, void *buf, size_t size);
int smu_get_power_num_states(struct smu_context *smu, struct pp_states_info *state_info);
enum amd_pm_state_type smu_get_current_power_state(struct smu_context *smu);
+int smu_write_watermarks_table(struct smu_context *smu);
+int smu_set_watermarks_for_clock_ranges(
+ struct smu_context *smu,
+ struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges);
/* smu to display interface */
extern int smu_display_configuration_change(struct smu_context *smu, const
@@ -809,7 +675,8 @@ extern int smu_get_current_clocks(struct smu_context *smu,
extern int smu_dpm_set_power_gate(struct smu_context *smu,uint32_t block_type, bool gate);
extern int smu_handle_task(struct smu_context *smu,
enum amd_dpm_forced_level level,
- enum amd_pp_task task_id);
+ enum amd_pp_task task_id,
+ bool lock_needed);
int smu_switch_power_profile(struct smu_context *smu,
enum PP_SMC_POWER_PROFILE type,
bool en);
@@ -819,7 +686,7 @@ int smu_get_dpm_freq_by_index(struct smu_context *smu, enum smu_clk_type clk_typ
int smu_get_dpm_level_count(struct smu_context *smu, enum smu_clk_type clk_type,
uint32_t *value);
int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
- uint32_t *min, uint32_t *max);
+ uint32_t *min, uint32_t *max, bool lock_needed);
int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
uint32_t min, uint32_t max);
int smu_set_hard_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
@@ -828,10 +695,29 @@ enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu);
int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level);
int smu_set_display_count(struct smu_context *smu, uint32_t count);
bool smu_clk_dpm_is_enabled(struct smu_context *smu, enum smu_clk_type clk_type);
-int smu_feature_update_enable_state(struct smu_context *smu, uint64_t feature_mask, bool enabled);
const char *smu_get_message_name(struct smu_context *smu, enum smu_message_type type);
const char *smu_get_feature_name(struct smu_context *smu, enum smu_feature_mask feature);
size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf);
int smu_sys_set_pp_feature_mask(struct smu_context *smu, uint64_t new_mask);
+int smu_force_clk_levels(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t mask,
+ bool lock_needed);
+int smu_set_mp1_state(struct smu_context *smu,
+ enum pp_mp1_state mp1_state);
+int smu_set_df_cstate(struct smu_context *smu,
+ enum pp_df_cstate state);
+
+int smu_get_max_sustainable_clocks_by_dc(struct smu_context *smu,
+ struct pp_smu_nv_clock_table *max_clocks);
+
+int smu_get_uclk_dpm_states(struct smu_context *smu,
+ unsigned int *clock_values_in_khz,
+ unsigned int *num_states);
+
+int smu_get_dpm_clock_table(struct smu_context *smu,
+ struct dpm_clocks *clock_table);
+
+uint32_t smu_get_pptable_power_limit(struct smu_context *smu);
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/inc/arcturus_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/arcturus_ppsmc.h
index 78e5927b7711..e3291259b249 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/arcturus_ppsmc.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/arcturus_ppsmc.h
@@ -95,8 +95,7 @@
//BTC
#define PPSMC_MSG_RunAfllBtc 0x30
-#define PPSMC_MSG_RunGfxDcBtc 0x31
-#define PPSMC_MSG_RunSocDcBtc 0x32
+#define PPSMC_MSG_RunDcBtc 0x31
//Debug
#define PPSMC_MSG_DramLogSetDramAddrHigh 0x33
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
index 7bf9a14bfa0b..af977675fd33 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
@@ -355,6 +355,10 @@ struct pp_hwmgr_func {
int (*set_mp1_state)(struct pp_hwmgr *hwmgr, enum pp_mp1_state mp1_state);
int (*asic_reset)(struct pp_hwmgr *hwmgr, enum SMU_ASIC_RESET_MODE mode);
int (*smu_i2c_bus_access)(struct pp_hwmgr *hwmgr, bool aquire);
+ int (*set_df_cstate)(struct pp_hwmgr *hwmgr, enum pp_df_cstate state);
+ int (*set_xgmi_pstate)(struct pp_hwmgr *hwmgr, uint32_t pstate);
+ int (*disable_power_features_for_compute_performance)(struct pp_hwmgr *hwmgr,
+ bool disable);
};
struct pp_table_func {
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_arcturus.h b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_arcturus.h
index e02950b505fa..a886f0644d24 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_arcturus.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_arcturus.h
@@ -137,29 +137,29 @@
#define FEATURE_DS_SOCCLK_MASK (1 << FEATURE_DS_SOCCLK_BIT )
#define FEATURE_DS_LCLK_MASK (1 << FEATURE_DS_LCLK_BIT )
#define FEATURE_DS_FCLK_MASK (1 << FEATURE_DS_FCLK_BIT )
-#define FEATURE_DS_LCLK_MASK (1 << FEATURE_DS_LCLK_BIT )
+#define FEATURE_DS_UCLK_MASK (1 << FEATURE_DS_UCLK_BIT )
#define FEATURE_GFX_ULV_MASK (1 << FEATURE_GFX_ULV_BIT )
-#define FEATURE_VCN_PG_MASK (1 << FEATURE_VCN_PG_BIT )
+#define FEATURE_DPM_VCN_MASK (1 << FEATURE_DPM_VCN_BIT )
#define FEATURE_RSMU_SMN_CG_MASK (1 << FEATURE_RSMU_SMN_CG_BIT )
#define FEATURE_WAFL_CG_MASK (1 << FEATURE_WAFL_CG_BIT )
#define FEATURE_PPT_MASK (1 << FEATURE_PPT_BIT )
#define FEATURE_TDC_MASK (1 << FEATURE_TDC_BIT )
-#define FEATURE_APCC_MASK (1 << FEATURE_APCC_BIT )
+#define FEATURE_APCC_PLUS_MASK (1 << FEATURE_APCC_PLUS_BIT )
#define FEATURE_VR0HOT_MASK (1 << FEATURE_VR0HOT_BIT )
#define FEATURE_VR1HOT_MASK (1 << FEATURE_VR1HOT_BIT )
#define FEATURE_FW_CTF_MASK (1 << FEATURE_FW_CTF_BIT )
#define FEATURE_FAN_CONTROL_MASK (1 << FEATURE_FAN_CONTROL_BIT )
#define FEATURE_THERMAL_MASK (1 << FEATURE_THERMAL_BIT )
-#define FEATURE_OUT_OF_BAND_MONITOR_MASK (1 << EATURE_OUT_OF_BAND_MONITOR_BIT )
-#define FEATURE_TEMP_DEPENDENT_VMIN_MASK (1 << FEATURE_TEMP_DEPENDENT_VMIN_MASK )
+#define FEATURE_OUT_OF_BAND_MONITOR_MASK (1 << FEATURE_OUT_OF_BAND_MONITOR_BIT )
+#define FEATURE_TEMP_DEPENDENT_VMIN_MASK (1 << FEATURE_TEMP_DEPENDENT_VMIN_BIT )
//FIXME need updating
// Debug Overrides Bitmask
#define DPM_OVERRIDE_DISABLE_UCLK_PID 0x00000001
-#define DPM_OVERRIDE_ENABLE_VOLT_LINK_VCN_FCLK 0x00000002
+#define DPM_OVERRIDE_DISABLE_VOLT_LINK_VCN_FCLK 0x00000002
// I2C Config Bit Defines
#define I2C_CONTROLLER_ENABLED 1
@@ -423,18 +423,30 @@ typedef enum {
} PwrConfig_e;
typedef enum {
- XGMI_LINK_RATE_12 = 0, // 12Gbps
- XGMI_LINK_RATE_16, // 16Gbps
- XGMI_LINK_RATE_22, // 22Gbps
- XGMI_LINK_RATE_25, // 25Gbps
+ XGMI_LINK_RATE_2 = 2, // 2Gbps
+ XGMI_LINK_RATE_4 = 4, // 4Gbps
+ XGMI_LINK_RATE_8 = 8, // 8Gbps
+ XGMI_LINK_RATE_12 = 12, // 12Gbps
+ XGMI_LINK_RATE_16 = 16, // 16Gbps
+ XGMI_LINK_RATE_17 = 17, // 17Gbps
+ XGMI_LINK_RATE_18 = 18, // 18Gbps
+ XGMI_LINK_RATE_19 = 19, // 19Gbps
+ XGMI_LINK_RATE_20 = 20, // 20Gbps
+ XGMI_LINK_RATE_21 = 21, // 21Gbps
+ XGMI_LINK_RATE_22 = 22, // 22Gbps
+ XGMI_LINK_RATE_23 = 23, // 23Gbps
+ XGMI_LINK_RATE_24 = 24, // 24Gbps
+ XGMI_LINK_RATE_25 = 25, // 25Gbps
XGMI_LINK_RATE_COUNT
} XGMI_LINK_RATE_e;
typedef enum {
- XGMI_LINK_WIDTH_2 = 0, // x2
- XGMI_LINK_WIDTH_4, // x4
- XGMI_LINK_WIDTH_8, // x8
- XGMI_LINK_WIDTH_16, // x16
+ XGMI_LINK_WIDTH_1 = 1, // x1
+ XGMI_LINK_WIDTH_2 = 2, // x2
+ XGMI_LINK_WIDTH_4 = 4, // x4
+ XGMI_LINK_WIDTH_8 = 8, // x8
+ XGMI_LINK_WIDTH_9 = 9, // x9
+ XGMI_LINK_WIDTH_16 = 16, // x16
XGMI_LINK_WIDTH_COUNT
} XGMI_LINK_WIDTH_e;
@@ -696,7 +708,11 @@ typedef struct {
uint8_t GpioI2cSda; // Serial Data
uint16_t GpioPadding;
- uint32_t BoardReserved[9];
+ // Platform input telemetry voltage coefficient
+ uint32_t BoardVoltageCoeffA; // decode by /1000
+ uint32_t BoardVoltageCoeffB; // decode by /1000
+
+ uint32_t BoardReserved[7];
// Padding for MMHUB - do not modify this
uint32_t MmHubPadding[8]; // SMU internal use
@@ -802,7 +818,7 @@ typedef struct {
uint32_t P2VCharzFreq[AVFS_VOLTAGE_COUNT]; // in 10KHz units
- uint32_t EnabledAvfsModules[2];
+ uint32_t EnabledAvfsModules[3];
uint32_t MmHubPadding[8]; // SMU internal use
} AvfsFuseOverride_t;
@@ -865,7 +881,8 @@ typedef struct {
//#define TABLE_ACTIVITY_MONITOR_COEFF 7
#define TABLE_OVERDRIVE 7
#define TABLE_WAFL_XGMI_TOPOLOGY 8
-#define TABLE_COUNT 9
+#define TABLE_I2C_COMMANDS 9
+#define TABLE_COUNT 10
// These defines are used with the SMC_MSG_SetUclkFastSwitch message.
typedef enum {
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_types.h b/drivers/gpu/drm/amd/powerplay/inc/smu_types.h
index b0dd05d431dd..d8c9b7f91fcc 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu_types.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu_types.h
@@ -114,6 +114,7 @@
__SMU_DUMMY_MAP(PowerDownJpeg), \
__SMU_DUMMY_MAP(BacoAudioD3PME), \
__SMU_DUMMY_MAP(ArmD3), \
+ __SMU_DUMMY_MAP(RunDcBtc), \
__SMU_DUMMY_MAP(RunGfxDcBtc), \
__SMU_DUMMY_MAP(RunSocDcBtc), \
__SMU_DUMMY_MAP(SetMemoryChannelEnable), \
@@ -168,6 +169,7 @@
__SMU_DUMMY_MAP(PowerGateAtHub), \
__SMU_DUMMY_MAP(SetSoftMinJpeg), \
__SMU_DUMMY_MAP(SetHardMinFclkByFreq), \
+ __SMU_DUMMY_MAP(DFCstateControl), \
#undef __SMU_DUMMY_MAP
#define __SMU_DUMMY_MAP(type) SMU_MSG_##type
@@ -251,6 +253,7 @@ enum smu_clk_type {
__SMU_DUMMY_MAP(TEMP_DEPENDENT_VMIN), \
__SMU_DUMMY_MAP(MMHUB_PG), \
__SMU_DUMMY_MAP(ATHUB_PG), \
+ __SMU_DUMMY_MAP(APCC_DFLL), \
__SMU_DUMMY_MAP(WAFL_CG),
#undef __SMU_DUMMY_MAP
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
index 5bda8539447a..606149085683 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
@@ -27,7 +27,7 @@
#define SMU11_DRIVER_IF_VERSION_INV 0xFFFFFFFF
#define SMU11_DRIVER_IF_VERSION_VG20 0x13
-#define SMU11_DRIVER_IF_VERSION_ARCT 0x09
+#define SMU11_DRIVER_IF_VERSION_ARCT 0x10
#define SMU11_DRIVER_IF_VERSION_NV10 0x33
#define SMU11_DRIVER_IF_VERSION_NV14 0x34
@@ -48,6 +48,8 @@
#define SMU11_TOOL_SIZE 0x19000
+#define MAX_PCIE_CONF 2
+
#define CLK_MAP(clk, index) \
[SMU_##clk] = {1, (index)}
@@ -88,6 +90,11 @@ struct smu_11_0_dpm_table {
uint32_t max; /* MHz */
};
+struct smu_11_0_pcie_table {
+ uint8_t pcie_gen[MAX_PCIE_CONF];
+ uint8_t pcie_lane[MAX_PCIE_CONF];
+};
+
struct smu_11_0_dpm_tables {
struct smu_11_0_dpm_table soc_table;
struct smu_11_0_dpm_table gfx_table;
@@ -100,6 +107,7 @@ struct smu_11_0_dpm_tables {
struct smu_11_0_dpm_table display_table;
struct smu_11_0_dpm_table phy_table;
struct smu_11_0_dpm_table fclk_table;
+ struct smu_11_0_pcie_table pcie_table;
};
struct smu_11_0_dpm_context {
@@ -130,6 +138,128 @@ enum smu_v11_0_baco_seq {
BACO_SEQ_COUNT,
};
-void smu_v11_0_set_smu_funcs(struct smu_context *smu);
+int smu_v11_0_init_microcode(struct smu_context *smu);
+
+int smu_v11_0_load_microcode(struct smu_context *smu);
+
+int smu_v11_0_init_smc_tables(struct smu_context *smu);
+
+int smu_v11_0_fini_smc_tables(struct smu_context *smu);
+
+int smu_v11_0_init_power(struct smu_context *smu);
+
+int smu_v11_0_fini_power(struct smu_context *smu);
+
+int smu_v11_0_check_fw_status(struct smu_context *smu);
+
+int smu_v11_0_setup_pptable(struct smu_context *smu);
+
+int smu_v11_0_get_vbios_bootup_values(struct smu_context *smu);
+
+int smu_v11_0_get_clk_info_from_vbios(struct smu_context *smu);
+
+int smu_v11_0_check_pptable(struct smu_context *smu);
+
+int smu_v11_0_parse_pptable(struct smu_context *smu);
+
+int smu_v11_0_populate_smc_pptable(struct smu_context *smu);
+
+int smu_v11_0_check_fw_version(struct smu_context *smu);
+
+int smu_v11_0_write_pptable(struct smu_context *smu);
+
+int smu_v11_0_set_min_dcef_deep_sleep(struct smu_context *smu);
+
+int smu_v11_0_set_tool_table_location(struct smu_context *smu);
+
+int smu_v11_0_notify_memory_pool_location(struct smu_context *smu);
+
+int smu_v11_0_system_features_control(struct smu_context *smu,
+ bool en);
+
+int smu_v11_0_send_msg(struct smu_context *smu, uint16_t msg);
+
+int
+smu_v11_0_send_msg_with_param(struct smu_context *smu, uint16_t msg,
+ uint32_t param);
+
+int smu_v11_0_read_arg(struct smu_context *smu, uint32_t *arg);
+
+int smu_v11_0_init_display_count(struct smu_context *smu, uint32_t count);
+
+int smu_v11_0_set_allowed_mask(struct smu_context *smu);
+
+int smu_v11_0_get_enabled_mask(struct smu_context *smu,
+ uint32_t *feature_mask, uint32_t num);
+
+int smu_v11_0_notify_display_change(struct smu_context *smu);
+
+int smu_v11_0_set_power_limit(struct smu_context *smu, uint32_t n);
+
+int smu_v11_0_get_current_clk_freq(struct smu_context *smu,
+ enum smu_clk_type clk_id,
+ uint32_t *value);
+
+int smu_v11_0_init_max_sustainable_clocks(struct smu_context *smu);
+
+int smu_v11_0_start_thermal_control(struct smu_context *smu);
+
+int smu_v11_0_stop_thermal_control(struct smu_context *smu);
+
+int smu_v11_0_read_sensor(struct smu_context *smu,
+ enum amd_pp_sensors sensor,
+ void *data, uint32_t *size);
+
+int smu_v11_0_set_deep_sleep_dcefclk(struct smu_context *smu, uint32_t clk);
+
+int
+smu_v11_0_display_clock_voltage_request(struct smu_context *smu,
+ struct pp_display_clock_request
+ *clock_req);
+
+uint32_t
+smu_v11_0_get_fan_control_mode(struct smu_context *smu);
+
+int
+smu_v11_0_set_fan_control_mode(struct smu_context *smu,
+ uint32_t mode);
+
+int
+smu_v11_0_set_fan_speed_percent(struct smu_context *smu, uint32_t speed);
+
+int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu,
+ uint32_t speed);
+
+int smu_v11_0_set_xgmi_pstate(struct smu_context *smu,
+ uint32_t pstate);
+
+int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable);
+
+int smu_v11_0_register_irq_handler(struct smu_context *smu);
+
+int smu_v11_0_set_azalia_d3_pme(struct smu_context *smu);
+
+int smu_v11_0_get_max_sustainable_clocks_by_dc(struct smu_context *smu,
+ struct pp_smu_nv_clock_table *max_clocks);
+
+bool smu_v11_0_baco_is_support(struct smu_context *smu);
+
+enum smu_baco_state smu_v11_0_baco_get_state(struct smu_context *smu);
+
+int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state);
+
+int smu_v11_0_baco_reset(struct smu_context *smu);
+
+int smu_v11_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type,
+ uint32_t *min, uint32_t *max);
+
+int smu_v11_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type,
+ uint32_t min, uint32_t max);
+
+int smu_v11_0_override_pcie_parameters(struct smu_context *smu);
+
+int smu_v11_0_set_default_od_settings(struct smu_context *smu, bool initialize, size_t overdrive_table_size);
+
+uint32_t smu_v11_0_get_max_power_limit(struct smu_context *smu);
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_pptable.h b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_pptable.h
index 86cdc3393eac..b2f96a101124 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_pptable.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_pptable.h
@@ -141,7 +141,9 @@ struct smu_11_0_powerplay_table
struct smu_11_0_power_saving_clock_table power_saving_clock;
struct smu_11_0_overdrive_table overdrive_table;
+#ifndef SMU_11_0_PARTIAL_PPTABLE
PPTable_t smc_pptable; //PPTable_t in smu11_driver_if.h
+#endif
} __attribute__((packed));
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h b/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h
index acf3db12f59f..9b9f5df0911c 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h
@@ -37,6 +37,45 @@ struct smu_12_0_cmn2aisc_mapping {
int map_to;
};
-void smu_v12_0_set_smu_funcs(struct smu_context *smu);
+int smu_v12_0_send_msg_without_waiting(struct smu_context *smu,
+ uint16_t msg);
+
+int smu_v12_0_read_arg(struct smu_context *smu, uint32_t *arg);
+
+int smu_v12_0_wait_for_response(struct smu_context *smu);
+
+int smu_v12_0_send_msg(struct smu_context *smu, uint16_t msg);
+
+int
+smu_v12_0_send_msg_with_param(struct smu_context *smu, uint16_t msg,
+ uint32_t param);
+
+int smu_v12_0_check_fw_status(struct smu_context *smu);
+
+int smu_v12_0_check_fw_version(struct smu_context *smu);
+
+int smu_v12_0_powergate_sdma(struct smu_context *smu, bool gate);
+
+int smu_v12_0_powergate_vcn(struct smu_context *smu, bool gate);
+
+int smu_v12_0_set_gfx_cgpg(struct smu_context *smu, bool enable);
+
+uint32_t smu_v12_0_get_gfxoff_status(struct smu_context *smu);
+
+int smu_v12_0_gfx_off_control(struct smu_context *smu, bool enable);
+
+int smu_v12_0_init_smc_tables(struct smu_context *smu);
+
+int smu_v12_0_fini_smc_tables(struct smu_context *smu);
+
+int smu_v12_0_populate_smc_tables(struct smu_context *smu);
+
+int smu_v12_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type,
+ uint32_t *min, uint32_t *max);
+
+int smu_v12_0_mode2_reset(struct smu_context *smu);
+
+int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type,
+ uint32_t min, uint32_t max);
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/inc/vega20_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/vega20_ppsmc.h
index a0883038f3c3..0c66f0fe1aaf 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/vega20_ppsmc.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/vega20_ppsmc.h
@@ -120,7 +120,8 @@
#define PPSMC_MSG_SetMGpuFanBoostLimitRpm 0x5D
#define PPSMC_MSG_GetAVFSVoltageByDpm 0x5F
#define PPSMC_MSG_BacoWorkAroundFlushVDCI 0x60
-#define PPSMC_Message_Count 0x61
+#define PPSMC_MSG_DFCstateControl 0x63
+#define PPSMC_Message_Count 0x64
typedef uint32_t PPSMC_Result;
typedef uint32_t PPSMC_Msg;
diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
index 328e258a6895..aaec884d63ed 100644
--- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
@@ -26,6 +26,7 @@
#include <linux/pci.h>
#include "amdgpu.h"
#include "amdgpu_smu.h"
+#include "smu_internal.h"
#include "atomfirmware.h"
#include "amdgpu_atomfirmware.h"
#include "smu_v11_0.h"
@@ -35,6 +36,7 @@
#include "navi10_ppt.h"
#include "smu_v11_0_pptable.h"
#include "smu_v11_0_ppsmc.h"
+#include "nbio/nbio_7_4_sh_mask.h"
#include "asic_reg/mp/mp_11_0_sh_mask.h"
@@ -177,6 +179,7 @@ static struct smu_11_0_cmn2aisc_mapping navi10_feature_mask_map[SMU_FEATURE_COUN
FEA_MAP(TEMP_DEPENDENT_VMIN),
FEA_MAP(MMHUB_PG),
FEA_MAP(ATHUB_PG),
+ FEA_MAP(APCC_DFLL),
};
static struct smu_11_0_cmn2aisc_mapping navi10_table_map[SMU_TABLE_COUNT] = {
@@ -327,40 +330,52 @@ navi10_get_allowed_feature_mask(struct smu_context *smu,
memset(feature_mask, 0, sizeof(uint32_t) * num);
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_PREFETCHER_BIT)
- | FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT)
- | FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT)
| FEATURE_MASK(FEATURE_DPM_MP0CLK_BIT)
- | FEATURE_MASK(FEATURE_DPM_LINK_BIT)
- | FEATURE_MASK(FEATURE_GFX_ULV_BIT)
| FEATURE_MASK(FEATURE_RSMU_SMN_CG_BIT)
| FEATURE_MASK(FEATURE_DS_SOCCLK_BIT)
| FEATURE_MASK(FEATURE_PPT_BIT)
| FEATURE_MASK(FEATURE_TDC_BIT)
| FEATURE_MASK(FEATURE_GFX_EDC_BIT)
+ | FEATURE_MASK(FEATURE_APCC_PLUS_BIT)
| FEATURE_MASK(FEATURE_VR0HOT_BIT)
| FEATURE_MASK(FEATURE_FAN_CONTROL_BIT)
| FEATURE_MASK(FEATURE_THERMAL_BIT)
| FEATURE_MASK(FEATURE_LED_DISPLAY_BIT)
- | FEATURE_MASK(FEATURE_DPM_DCEFCLK_BIT)
- | FEATURE_MASK(FEATURE_DS_GFXCLK_BIT)
+ | FEATURE_MASK(FEATURE_DS_LCLK_BIT)
| FEATURE_MASK(FEATURE_DS_DCEFCLK_BIT)
| FEATURE_MASK(FEATURE_FW_DSTATE_BIT)
| FEATURE_MASK(FEATURE_BACO_BIT)
| FEATURE_MASK(FEATURE_ACDC_BIT)
| FEATURE_MASK(FEATURE_GFX_SS_BIT)
| FEATURE_MASK(FEATURE_APCC_DFLL_BIT)
- | FEATURE_MASK(FEATURE_FW_CTF_BIT);
+ | FEATURE_MASK(FEATURE_FW_CTF_BIT)
+ | FEATURE_MASK(FEATURE_OUT_OF_BAND_MONITOR_BIT);
+
+ if (adev->pm.pp_feature & PP_SOCCLK_DPM_MASK)
+ *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT);
+
+ if (adev->pm.pp_feature & PP_SCLK_DPM_MASK)
+ *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT);
+
+ if (adev->pm.pp_feature & PP_PCIE_DPM_MASK)
+ *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_LINK_BIT);
+
+ if (adev->pm.pp_feature & PP_DCEFCLK_DPM_MASK)
+ *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_DCEFCLK_BIT);
if (adev->pm.pp_feature & PP_MCLK_DPM_MASK)
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_UCLK_BIT)
| FEATURE_MASK(FEATURE_MEM_VDDCI_SCALING_BIT)
| FEATURE_MASK(FEATURE_MEM_MVDD_SCALING_BIT);
- if (adev->pm.pp_feature & PP_GFXOFF_MASK) {
+ if (adev->pm.pp_feature & PP_ULV_MASK)
+ *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFX_ULV_BIT);
+
+ if (adev->pm.pp_feature & PP_SCLK_DEEP_SLEEP_MASK)
+ *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_GFXCLK_BIT);
+
+ if (adev->pm.pp_feature & PP_GFXOFF_MASK)
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFXOFF_BIT);
- /* TODO: remove it once fw fix the bug */
- *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_FW_DSTATE_BIT);
- }
if (smu->adev->pg_flags & AMD_PG_SUPPORT_MMHUB)
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_MMHUB_PG_BIT);
@@ -585,6 +600,7 @@ static int navi10_set_default_dpm_table(struct smu_context *smu)
struct smu_table_context *table_context = &smu->smu_table;
struct smu_11_0_dpm_context *dpm_context = smu_dpm->dpm_context;
PPTable_t *driver_ppt = NULL;
+ int i;
driver_ppt = table_context->driver_pptable;
@@ -615,6 +631,11 @@ static int navi10_set_default_dpm_table(struct smu_context *smu)
dpm_context->dpm_tables.phy_table.min = driver_ppt->FreqTablePhyclk[0];
dpm_context->dpm_tables.phy_table.max = driver_ppt->FreqTablePhyclk[NUM_PHYCLK_DPM_LEVELS - 1];
+ for (i = 0; i < MAX_PCIE_CONF; i++) {
+ dpm_context->dpm_tables.pcie_table.pcie_gen[i] = driver_ppt->PcieGenSpeed[i];
+ dpm_context->dpm_tables.pcie_table.pcie_lane[i] = driver_ppt->PcieLaneCount[i];
+ }
+
return 0;
}
@@ -677,13 +698,29 @@ static bool navi10_is_support_fine_grained_dpm(struct smu_context *smu, enum smu
return dpm_desc->SnapToDiscrete == 0 ? true : false;
}
+static inline bool navi10_od_feature_is_supported(struct smu_11_0_overdrive_table *od_table, enum SMU_11_0_ODFEATURE_ID feature)
+{
+ return od_table->cap[feature];
+}
+
+
static int navi10_print_clk_levels(struct smu_context *smu,
enum smu_clk_type clk_type, char *buf)
{
+ uint16_t *curve_settings;
int i, size = 0, ret = 0;
uint32_t cur_value = 0, value = 0, count = 0;
uint32_t freq_values[3] = {0};
uint32_t mark_index = 0;
+ struct smu_table_context *table_context = &smu->smu_table;
+ uint32_t gen_speed, lane_width;
+ struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
+ struct smu_11_0_dpm_context *dpm_context = smu_dpm->dpm_context;
+ struct amdgpu_device *adev = smu->adev;
+ PPTable_t *pptable = (PPTable_t *)table_context->driver_pptable;
+ OverDriveTable_t *od_table =
+ (OverDriveTable_t *)table_context->overdrive_table;
+ struct smu_11_0_overdrive_table *od_settings = smu->od_settings;
switch (clk_type) {
case SMU_GFXCLK:
@@ -734,6 +771,69 @@ static int navi10_print_clk_levels(struct smu_context *smu,
}
break;
+ case SMU_PCIE:
+ gen_speed = (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL) &
+ PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK)
+ >> PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
+ lane_width = (RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL) &
+ PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK)
+ >> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT;
+ for (i = 0; i < NUM_LINK_LEVELS; i++)
+ size += sprintf(buf + size, "%d: %s %s %dMhz %s\n", i,
+ (dpm_context->dpm_tables.pcie_table.pcie_gen[i] == 0) ? "2.5GT/s," :
+ (dpm_context->dpm_tables.pcie_table.pcie_gen[i] == 1) ? "5.0GT/s," :
+ (dpm_context->dpm_tables.pcie_table.pcie_gen[i] == 2) ? "8.0GT/s," :
+ (dpm_context->dpm_tables.pcie_table.pcie_gen[i] == 3) ? "16.0GT/s," : "",
+ (dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 1) ? "x1" :
+ (dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 2) ? "x2" :
+ (dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 3) ? "x4" :
+ (dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 4) ? "x8" :
+ (dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 5) ? "x12" :
+ (dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 6) ? "x16" : "",
+ pptable->LclkFreq[i],
+ (gen_speed == dpm_context->dpm_tables.pcie_table.pcie_gen[i]) &&
+ (lane_width == dpm_context->dpm_tables.pcie_table.pcie_lane[i]) ?
+ "*" : "");
+ break;
+ case SMU_OD_SCLK:
+ if (!smu->od_enabled || !od_table || !od_settings)
+ break;
+ if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_GFXCLK_LIMITS))
+ break;
+ size += sprintf(buf + size, "OD_SCLK:\n");
+ size += sprintf(buf + size, "0: %uMhz\n1: %uMhz\n", od_table->GfxclkFmin, od_table->GfxclkFmax);
+ break;
+ case SMU_OD_MCLK:
+ if (!smu->od_enabled || !od_table || !od_settings)
+ break;
+ if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_UCLK_MAX))
+ break;
+ size += sprintf(buf + size, "OD_MCLK:\n");
+ size += sprintf(buf + size, "0: %uMHz\n", od_table->UclkFmax);
+ break;
+ case SMU_OD_VDDC_CURVE:
+ if (!smu->od_enabled || !od_table || !od_settings)
+ break;
+ if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_GFXCLK_CURVE))
+ break;
+ size += sprintf(buf + size, "OD_VDDC_CURVE:\n");
+ for (i = 0; i < 3; i++) {
+ switch (i) {
+ case 0:
+ curve_settings = &od_table->GfxclkFreq1;
+ break;
+ case 1:
+ curve_settings = &od_table->GfxclkFreq2;
+ break;
+ case 2:
+ curve_settings = &od_table->GfxclkFreq3;
+ break;
+ default:
+ break;
+ }
+ size += sprintf(buf + size, "%d: %uMHz @ %umV\n", i, curve_settings[0], curve_settings[1] / NAVI10_VOLTAGE_SCALE);
+ }
+ break;
default:
break;
}
@@ -789,13 +889,13 @@ static int navi10_populate_umd_state_clk(struct smu_context *smu)
int ret = 0;
uint32_t min_sclk_freq = 0, min_mclk_freq = 0;
- ret = smu_get_dpm_freq_range(smu, SMU_SCLK, &min_sclk_freq, NULL);
+ ret = smu_get_dpm_freq_range(smu, SMU_SCLK, &min_sclk_freq, NULL, false);
if (ret)
return ret;
smu->pstate_sclk = min_sclk_freq * 100;
- ret = smu_get_dpm_freq_range(smu, SMU_MCLK, &min_mclk_freq, NULL);
+ ret = smu_get_dpm_freq_range(smu, SMU_MCLK, &min_mclk_freq, NULL, false);
if (ret)
return ret;
@@ -848,7 +948,7 @@ static int navi10_pre_display_config_changed(struct smu_context *smu)
return ret;
if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
- ret = smu_get_dpm_freq_range(smu, SMU_UCLK, NULL, &max_freq);
+ ret = smu_get_dpm_freq_range(smu, SMU_UCLK, NULL, &max_freq, false);
if (ret)
return ret;
ret = smu_set_hard_freq_range(smu, SMU_UCLK, 0, max_freq);
@@ -898,7 +998,7 @@ static int navi10_force_dpm_limit_value(struct smu_context *smu, bool highest)
for (i = 0; i < ARRAY_SIZE(clks); i++) {
clk_type = clks[i];
- ret = smu_get_dpm_freq_range(smu, clk_type, &min_freq, &max_freq);
+ ret = smu_get_dpm_freq_range(smu, clk_type, &min_freq, &max_freq, false);
if (ret)
return ret;
@@ -925,7 +1025,7 @@ static int navi10_unforce_dpm_levels(struct smu_context *smu)
for (i = 0; i < ARRAY_SIZE(clks); i++) {
clk_type = clks[i];
- ret = smu_get_dpm_freq_range(smu, clk_type, &min_freq, &max_freq);
+ ret = smu_get_dpm_freq_range(smu, clk_type, &min_freq, &max_freq, false);
if (ret)
return ret;
@@ -1260,7 +1360,9 @@ static int navi10_notify_smc_dispaly_config(struct smu_context *smu)
if (smu_feature_is_supported(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
clock_req.clock_type = amd_pp_dcef_clock;
clock_req.clock_freq_in_khz = min_clocks.dcef_clock * 10;
- if (!smu_display_clock_voltage_request(smu, &clock_req)) {
+
+ ret = smu_v11_0_display_clock_voltage_request(smu, &clock_req);
+ if (!ret) {
if (smu_feature_is_supported(smu, SMU_FEATURE_DS_DCEFCLK_BIT)) {
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_SetMinDeepSleepDcefclk,
@@ -1414,7 +1516,7 @@ static int navi10_read_sensor(struct smu_context *smu,
*size = 4;
break;
default:
- ret = smu_smc_read_sensor(smu, sensor, data, size);
+ ret = smu_v11_0_read_sensor(smu, sensor, data, size);
}
mutex_unlock(&smu->sensor_lock);
@@ -1457,18 +1559,47 @@ static int navi10_set_peak_clock_by_device(struct smu_context *smu)
uint32_t sclk_freq = 0, uclk_freq = 0;
uint32_t uclk_level = 0;
- switch (adev->pdev->revision) {
- case 0xf0: /* XTX */
- case 0xc0:
- sclk_freq = NAVI10_PEAK_SCLK_XTX;
- break;
- case 0xf1: /* XT */
- case 0xc1:
- sclk_freq = NAVI10_PEAK_SCLK_XT;
+ switch (adev->asic_type) {
+ case CHIP_NAVI10:
+ switch (adev->pdev->revision) {
+ case 0xf0: /* XTX */
+ case 0xc0:
+ sclk_freq = NAVI10_PEAK_SCLK_XTX;
+ break;
+ case 0xf1: /* XT */
+ case 0xc1:
+ sclk_freq = NAVI10_PEAK_SCLK_XT;
+ break;
+ default: /* XL */
+ sclk_freq = NAVI10_PEAK_SCLK_XL;
+ break;
+ }
break;
- default: /* XL */
- sclk_freq = NAVI10_PEAK_SCLK_XL;
+ case CHIP_NAVI14:
+ switch (adev->pdev->revision) {
+ case 0xc7: /* XT */
+ case 0xf4:
+ sclk_freq = NAVI14_UMD_PSTATE_PEAK_XT_GFXCLK;
+ break;
+ case 0xc1: /* XTM */
+ case 0xf2:
+ sclk_freq = NAVI14_UMD_PSTATE_PEAK_XTM_GFXCLK;
+ break;
+ case 0xc3: /* XLM */
+ case 0xf3:
+ sclk_freq = NAVI14_UMD_PSTATE_PEAK_XLM_GFXCLK;
+ break;
+ case 0xc5: /* XTX */
+ case 0xf6:
+ sclk_freq = NAVI14_UMD_PSTATE_PEAK_XLM_GFXCLK;
+ break;
+ default: /* XL */
+ sclk_freq = NAVI14_UMD_PSTATE_PEAK_XL_GFXCLK;
+ break;
+ }
break;
+ default:
+ return -EINVAL;
}
ret = smu_get_dpm_level_count(smu, SMU_UCLK, &uclk_level);
@@ -1491,10 +1622,6 @@ static int navi10_set_peak_clock_by_device(struct smu_context *smu)
static int navi10_set_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level)
{
int ret = 0;
- struct amdgpu_device *adev = smu->adev;
-
- if (adev->asic_type != CHIP_NAVI10)
- return -EINVAL;
switch (level) {
case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
@@ -1547,17 +1674,22 @@ static int navi10_display_disable_memory_clock_switch(struct smu_context *smu,
return ret;
}
+static uint32_t navi10_get_pptable_power_limit(struct smu_context *smu)
+{
+ PPTable_t *pptable = smu->smu_table.driver_pptable;
+ return pptable->SocketPowerLimitAc[PPT_THROTTLER_PPT0];
+}
+
static int navi10_get_power_limit(struct smu_context *smu,
uint32_t *limit,
- bool asic_default)
+ bool cap)
{
PPTable_t *pptable = smu->smu_table.driver_pptable;
uint32_t asic_default_power_limit = 0;
int ret = 0;
int power_src;
- if (!smu->default_power_limit ||
- !smu->power_limit) {
+ if (!smu->power_limit) {
if (smu_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) {
power_src = smu_power_get_index(smu, SMU_POWER_SOURCE_AC);
if (power_src < 0)
@@ -1580,23 +1712,291 @@ static int navi10_get_power_limit(struct smu_context *smu,
pptable->SocketPowerLimitAc[PPT_THROTTLER_PPT0];
}
- if (smu->od_enabled) {
- asic_default_power_limit *= (100 + smu->smu_table.TDPODLimit);
- asic_default_power_limit /= 100;
- }
-
- smu->default_power_limit = asic_default_power_limit;
smu->power_limit = asic_default_power_limit;
}
- if (asic_default)
- *limit = smu->default_power_limit;
+ if (cap)
+ *limit = smu_v11_0_get_max_power_limit(smu);
else
*limit = smu->power_limit;
return 0;
}
+static int navi10_update_pcie_parameters(struct smu_context *smu,
+ uint32_t pcie_gen_cap,
+ uint32_t pcie_width_cap)
+{
+ PPTable_t *pptable = smu->smu_table.driver_pptable;
+ int ret, i;
+ uint32_t smu_pcie_arg;
+
+ struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
+ struct smu_11_0_dpm_context *dpm_context = smu_dpm->dpm_context;
+
+ for (i = 0; i < NUM_LINK_LEVELS; i++) {
+ smu_pcie_arg = (i << 16) |
+ ((pptable->PcieGenSpeed[i] <= pcie_gen_cap) ? (pptable->PcieGenSpeed[i] << 8) :
+ (pcie_gen_cap << 8)) | ((pptable->PcieLaneCount[i] <= pcie_width_cap) ?
+ pptable->PcieLaneCount[i] : pcie_width_cap);
+ ret = smu_send_smc_msg_with_param(smu,
+ SMU_MSG_OverridePcieParameters,
+ smu_pcie_arg);
+
+ if (ret)
+ return ret;
+
+ if (pptable->PcieGenSpeed[i] > pcie_gen_cap)
+ dpm_context->dpm_tables.pcie_table.pcie_gen[i] = pcie_gen_cap;
+ if (pptable->PcieLaneCount[i] > pcie_width_cap)
+ dpm_context->dpm_tables.pcie_table.pcie_lane[i] = pcie_width_cap;
+ }
+
+ return 0;
+}
+
+static inline void navi10_dump_od_table(OverDriveTable_t *od_table) {
+ pr_debug("OD: Gfxclk: (%d, %d)\n", od_table->GfxclkFmin, od_table->GfxclkFmax);
+ pr_debug("OD: Gfx1: (%d, %d)\n", od_table->GfxclkFreq1, od_table->GfxclkVolt1);
+ pr_debug("OD: Gfx2: (%d, %d)\n", od_table->GfxclkFreq2, od_table->GfxclkVolt2);
+ pr_debug("OD: Gfx3: (%d, %d)\n", od_table->GfxclkFreq3, od_table->GfxclkVolt3);
+ pr_debug("OD: UclkFmax: %d\n", od_table->UclkFmax);
+ pr_debug("OD: OverDrivePct: %d\n", od_table->OverDrivePct);
+}
+
+static int navi10_od_setting_check_range(struct smu_11_0_overdrive_table *od_table, enum SMU_11_0_ODSETTING_ID setting, uint32_t value)
+{
+ if (value < od_table->min[setting]) {
+ pr_warn("OD setting (%d, %d) is less than the minimum allowed (%d)\n", setting, value, od_table->min[setting]);
+ return -EINVAL;
+ }
+ if (value > od_table->max[setting]) {
+ pr_warn("OD setting (%d, %d) is greater than the maximum allowed (%d)\n", setting, value, od_table->max[setting]);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int navi10_setup_od_limits(struct smu_context *smu) {
+ struct smu_11_0_overdrive_table *overdrive_table = NULL;
+ struct smu_11_0_powerplay_table *powerplay_table = NULL;
+
+ if (!smu->smu_table.power_play_table) {
+ pr_err("powerplay table uninitialized!\n");
+ return -ENOENT;
+ }
+ powerplay_table = (struct smu_11_0_powerplay_table *)smu->smu_table.power_play_table;
+ overdrive_table = &powerplay_table->overdrive_table;
+ if (!smu->od_settings) {
+ smu->od_settings = kmemdup(overdrive_table, sizeof(struct smu_11_0_overdrive_table), GFP_KERNEL);
+ } else {
+ memcpy(smu->od_settings, overdrive_table, sizeof(struct smu_11_0_overdrive_table));
+ }
+ return 0;
+}
+
+static int navi10_set_default_od_settings(struct smu_context *smu, bool initialize) {
+ OverDriveTable_t *od_table;
+ int ret = 0;
+
+ ret = smu_v11_0_set_default_od_settings(smu, initialize, sizeof(OverDriveTable_t));
+ if (ret)
+ return ret;
+
+ if (initialize) {
+ ret = navi10_setup_od_limits(smu);
+ if (ret) {
+ pr_err("Failed to retrieve board OD limits\n");
+ return ret;
+ }
+
+ }
+
+ od_table = (OverDriveTable_t *)smu->smu_table.overdrive_table;
+ if (od_table) {
+ navi10_dump_od_table(od_table);
+ }
+
+ return ret;
+}
+
+static int navi10_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TABLE_COMMAND type, long input[], uint32_t size) {
+ int i;
+ int ret = 0;
+ struct smu_table_context *table_context = &smu->smu_table;
+ OverDriveTable_t *od_table;
+ struct smu_11_0_overdrive_table *od_settings;
+ enum SMU_11_0_ODSETTING_ID freq_setting, voltage_setting;
+ uint16_t *freq_ptr, *voltage_ptr;
+ od_table = (OverDriveTable_t *)table_context->overdrive_table;
+
+ if (!smu->od_enabled) {
+ pr_warn("OverDrive is not enabled!\n");
+ return -EINVAL;
+ }
+
+ if (!smu->od_settings) {
+ pr_err("OD board limits are not set!\n");
+ return -ENOENT;
+ }
+
+ od_settings = smu->od_settings;
+
+ switch (type) {
+ case PP_OD_EDIT_SCLK_VDDC_TABLE:
+ if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_GFXCLK_LIMITS)) {
+ pr_warn("GFXCLK_LIMITS not supported!\n");
+ return -ENOTSUPP;
+ }
+ if (!table_context->overdrive_table) {
+ pr_err("Overdrive is not initialized\n");
+ return -EINVAL;
+ }
+ for (i = 0; i < size; i += 2) {
+ if (i + 2 > size) {
+ pr_info("invalid number of input parameters %d\n", size);
+ return -EINVAL;
+ }
+ switch (input[i]) {
+ case 0:
+ freq_setting = SMU_11_0_ODSETTING_GFXCLKFMIN;
+ freq_ptr = &od_table->GfxclkFmin;
+ if (input[i + 1] > od_table->GfxclkFmax) {
+ pr_info("GfxclkFmin (%ld) must be <= GfxclkFmax (%u)!\n",
+ input[i + 1],
+ od_table->GfxclkFmin);
+ return -EINVAL;
+ }
+ break;
+ case 1:
+ freq_setting = SMU_11_0_ODSETTING_GFXCLKFMAX;
+ freq_ptr = &od_table->GfxclkFmax;
+ if (input[i + 1] < od_table->GfxclkFmin) {
+ pr_info("GfxclkFmax (%ld) must be >= GfxclkFmin (%u)!\n",
+ input[i + 1],
+ od_table->GfxclkFmax);
+ return -EINVAL;
+ }
+ break;
+ default:
+ pr_info("Invalid SCLK_VDDC_TABLE index: %ld\n", input[i]);
+ pr_info("Supported indices: [0:min,1:max]\n");
+ return -EINVAL;
+ }
+ ret = navi10_od_setting_check_range(od_settings, freq_setting, input[i + 1]);
+ if (ret)
+ return ret;
+ *freq_ptr = input[i + 1];
+ }
+ break;
+ case PP_OD_EDIT_MCLK_VDDC_TABLE:
+ if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_UCLK_MAX)) {
+ pr_warn("UCLK_MAX not supported!\n");
+ return -ENOTSUPP;
+ }
+ if (size < 2) {
+ pr_info("invalid number of parameters: %d\n", size);
+ return -EINVAL;
+ }
+ if (input[0] != 1) {
+ pr_info("Invalid MCLK_VDDC_TABLE index: %ld\n", input[0]);
+ pr_info("Supported indices: [1:max]\n");
+ return -EINVAL;
+ }
+ ret = navi10_od_setting_check_range(od_settings, SMU_11_0_ODSETTING_UCLKFMAX, input[1]);
+ if (ret)
+ return ret;
+ od_table->UclkFmax = input[1];
+ break;
+ case PP_OD_COMMIT_DPM_TABLE:
+ navi10_dump_od_table(od_table);
+ ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE, 0, (void *)od_table, true);
+ if (ret) {
+ pr_err("Failed to import overdrive table!\n");
+ return ret;
+ }
+ // no lock needed because smu_od_edit_dpm_table has it
+ ret = smu_handle_task(smu, smu->smu_dpm.dpm_level,
+ AMD_PP_TASK_READJUST_POWER_STATE,
+ false);
+ if (ret) {
+ return ret;
+ }
+ break;
+ case PP_OD_EDIT_VDDC_CURVE:
+ if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_GFXCLK_CURVE)) {
+ pr_warn("GFXCLK_CURVE not supported!\n");
+ return -ENOTSUPP;
+ }
+ if (size < 3) {
+ pr_info("invalid number of parameters: %d\n", size);
+ return -EINVAL;
+ }
+ if (!od_table) {
+ pr_info("Overdrive is not initialized\n");
+ return -EINVAL;
+ }
+
+ switch (input[0]) {
+ case 0:
+ freq_setting = SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P1;
+ voltage_setting = SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P1;
+ freq_ptr = &od_table->GfxclkFreq1;
+ voltage_ptr = &od_table->GfxclkVolt1;
+ break;
+ case 1:
+ freq_setting = SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P2;
+ voltage_setting = SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P2;
+ freq_ptr = &od_table->GfxclkFreq2;
+ voltage_ptr = &od_table->GfxclkVolt2;
+ break;
+ case 2:
+ freq_setting = SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P3;
+ voltage_setting = SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P3;
+ freq_ptr = &od_table->GfxclkFreq3;
+ voltage_ptr = &od_table->GfxclkVolt3;
+ break;
+ default:
+ pr_info("Invalid VDDC_CURVE index: %ld\n", input[0]);
+ pr_info("Supported indices: [0, 1, 2]\n");
+ return -EINVAL;
+ }
+ ret = navi10_od_setting_check_range(od_settings, freq_setting, input[1]);
+ if (ret)
+ return ret;
+ // Allow setting zero to disable the OverDrive VDDC curve
+ if (input[2] != 0) {
+ ret = navi10_od_setting_check_range(od_settings, voltage_setting, input[2]);
+ if (ret)
+ return ret;
+ *freq_ptr = input[1];
+ *voltage_ptr = ((uint16_t)input[2]) * NAVI10_VOLTAGE_SCALE;
+ pr_debug("OD: set curve %ld: (%d, %d)\n", input[0], *freq_ptr, *voltage_ptr);
+ } else {
+ // If setting 0, disable all voltage curve settings
+ od_table->GfxclkVolt1 = 0;
+ od_table->GfxclkVolt2 = 0;
+ od_table->GfxclkVolt3 = 0;
+ }
+ navi10_dump_od_table(od_table);
+ break;
+ default:
+ return -ENOSYS;
+ }
+ return ret;
+}
+
+static int navi10_run_btc(struct smu_context *smu)
+{
+ int ret = 0;
+
+ ret = smu_send_smc_msg(smu, SMU_MSG_RunBtc);
+ if (ret)
+ pr_err("RunBtc failed!\n");
+
+ return ret;
+}
+
static const struct pptable_funcs navi10_ppt_funcs = {
.tables_init = navi10_tables_init,
.alloc_dpm_context = navi10_allocate_dpm_context,
@@ -1635,12 +2035,63 @@ static const struct pptable_funcs navi10_ppt_funcs = {
.get_thermal_temperature_range = navi10_get_thermal_temperature_range,
.display_disable_memory_clock_switch = navi10_display_disable_memory_clock_switch,
.get_power_limit = navi10_get_power_limit,
+ .update_pcie_parameters = navi10_update_pcie_parameters,
+ .init_microcode = smu_v11_0_init_microcode,
+ .load_microcode = smu_v11_0_load_microcode,
+ .init_smc_tables = smu_v11_0_init_smc_tables,
+ .fini_smc_tables = smu_v11_0_fini_smc_tables,
+ .init_power = smu_v11_0_init_power,
+ .fini_power = smu_v11_0_fini_power,
+ .check_fw_status = smu_v11_0_check_fw_status,
+ .setup_pptable = smu_v11_0_setup_pptable,
+ .get_vbios_bootup_values = smu_v11_0_get_vbios_bootup_values,
+ .get_clk_info_from_vbios = smu_v11_0_get_clk_info_from_vbios,
+ .check_pptable = smu_v11_0_check_pptable,
+ .parse_pptable = smu_v11_0_parse_pptable,
+ .populate_smc_tables = smu_v11_0_populate_smc_pptable,
+ .check_fw_version = smu_v11_0_check_fw_version,
+ .write_pptable = smu_v11_0_write_pptable,
+ .set_min_dcef_deep_sleep = smu_v11_0_set_min_dcef_deep_sleep,
+ .set_tool_table_location = smu_v11_0_set_tool_table_location,
+ .notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
+ .system_features_control = smu_v11_0_system_features_control,
+ .send_smc_msg = smu_v11_0_send_msg,
+ .send_smc_msg_with_param = smu_v11_0_send_msg_with_param,
+ .read_smc_arg = smu_v11_0_read_arg,
+ .init_display_count = smu_v11_0_init_display_count,
+ .set_allowed_mask = smu_v11_0_set_allowed_mask,
+ .get_enabled_mask = smu_v11_0_get_enabled_mask,
+ .notify_display_change = smu_v11_0_notify_display_change,
+ .set_power_limit = smu_v11_0_set_power_limit,
+ .get_current_clk_freq = smu_v11_0_get_current_clk_freq,
+ .init_max_sustainable_clocks = smu_v11_0_init_max_sustainable_clocks,
+ .start_thermal_control = smu_v11_0_start_thermal_control,
+ .stop_thermal_control = smu_v11_0_stop_thermal_control,
+ .set_deep_sleep_dcefclk = smu_v11_0_set_deep_sleep_dcefclk,
+ .display_clock_voltage_request = smu_v11_0_display_clock_voltage_request,
+ .get_fan_control_mode = smu_v11_0_get_fan_control_mode,
+ .set_fan_control_mode = smu_v11_0_set_fan_control_mode,
+ .set_fan_speed_percent = smu_v11_0_set_fan_speed_percent,
+ .set_fan_speed_rpm = smu_v11_0_set_fan_speed_rpm,
+ .set_xgmi_pstate = smu_v11_0_set_xgmi_pstate,
+ .gfx_off_control = smu_v11_0_gfx_off_control,
+ .register_irq_handler = smu_v11_0_register_irq_handler,
+ .set_azalia_d3_pme = smu_v11_0_set_azalia_d3_pme,
+ .get_max_sustainable_clocks_by_dc = smu_v11_0_get_max_sustainable_clocks_by_dc,
+ .baco_is_support= smu_v11_0_baco_is_support,
+ .baco_get_state = smu_v11_0_baco_get_state,
+ .baco_set_state = smu_v11_0_baco_set_state,
+ .baco_reset = smu_v11_0_baco_reset,
+ .get_dpm_ultimate_freq = smu_v11_0_get_dpm_ultimate_freq,
+ .set_soft_freq_limited_range = smu_v11_0_set_soft_freq_limited_range,
+ .override_pcie_parameters = smu_v11_0_override_pcie_parameters,
+ .set_default_od_settings = navi10_set_default_od_settings,
+ .od_edit_dpm_table = navi10_od_edit_dpm_table,
+ .get_pptable_power_limit = navi10_get_pptable_power_limit,
+ .run_btc = navi10_run_btc,
};
void navi10_set_ppt_funcs(struct smu_context *smu)
{
- struct smu_table_context *smu_table = &smu->smu_table;
-
smu->ppt_funcs = &navi10_ppt_funcs;
- smu_table->table_count = TABLE_COUNT;
}
diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.h b/drivers/gpu/drm/amd/powerplay/navi10_ppt.h
index 620ff17c2fef..ec03c7992f6d 100644
--- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.h
+++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.h
@@ -27,6 +27,17 @@
#define NAVI10_PEAK_SCLK_XT (1755)
#define NAVI10_PEAK_SCLK_XL (1625)
+#define NAVI14_UMD_PSTATE_PEAK_XT_GFXCLK (1670)
+#define NAVI14_UMD_PSTATE_PEAK_XTM_GFXCLK (1448)
+#define NAVI14_UMD_PSTATE_PEAK_XLM_GFXCLK (1181)
+#define NAVI14_UMD_PSTATE_PEAK_XTX_GFXCLK (1717)
+#define NAVI14_UMD_PSTATE_PEAK_XL_GFXCLK (1448)
+
+#define NAVI10_VOLTAGE_SCALE (4)
+
+#define smnPCIE_LC_SPEED_CNTL 0x11140290
+#define smnPCIE_LC_LINK_WIDTH_CNTL 0x11140288
+
extern void navi10_set_ppt_funcs(struct smu_context *smu);
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
index e62bfba51562..04daf7e9fe05 100644
--- a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
@@ -23,6 +23,7 @@
#include "amdgpu.h"
#include "amdgpu_smu.h"
+#include "smu_internal.h"
#include "soc15_common.h"
#include "smu_v12_0_ppsmc.h"
#include "smu12_driver_if.h"
@@ -160,21 +161,17 @@ static int renoir_tables_init(struct smu_context *smu, struct smu_table *tables)
* This interface just for getting uclk ultimate freq and should't introduce
* other likewise function result in overmuch callback.
*/
-static int renoir_get_dpm_uclk_limited(struct smu_context *smu, uint32_t *clock, bool max)
+static int renoir_get_dpm_clk_limited(struct smu_context *smu, enum smu_clk_type clk_type,
+ uint32_t dpm_level, uint32_t *freq)
{
+ DpmClocks_t *clk_table = smu->smu_table.clocks_table;
- DpmClocks_t *table = smu->smu_table.clocks_table;
-
- if (!clock || !table)
+ if (!clk_table || clk_type >= SMU_CLK_COUNT)
return -EINVAL;
- if (max)
- *clock = table->FClocks[NUM_FCLK_DPM_LEVELS-1].Freq;
- else
- *clock = table->FClocks[0].Freq;
+ GET_DPM_CUR_FREQ(clk_table, clk_type, dpm_level, *freq);
return 0;
-
}
static int renoir_print_clk_levels(struct smu_context *smu,
@@ -183,11 +180,13 @@ static int renoir_print_clk_levels(struct smu_context *smu,
int i, size = 0, ret = 0;
uint32_t cur_value = 0, value = 0, count = 0, min = 0, max = 0;
DpmClocks_t *clk_table = smu->smu_table.clocks_table;
- SmuMetrics_t metrics = {0};
+ SmuMetrics_t metrics;
if (!clk_table || clk_type >= SMU_CLK_COUNT)
return -EINVAL;
+ memset(&metrics, 0, sizeof(metrics));
+
ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0,
(void *)&metrics, false);
if (ret)
@@ -198,7 +197,7 @@ static int renoir_print_clk_levels(struct smu_context *smu,
case SMU_SCLK:
/* retirve table returned paramters unit is MHz */
cur_value = metrics.ClockFrequency[CLOCK_GFXCLK];
- ret = smu_get_dpm_freq_range(smu, SMU_GFXCLK, &min, &max);
+ ret = smu_get_dpm_freq_range(smu, SMU_GFXCLK, &min, &max, false);
if (!ret) {
/* driver only know min/max gfx_clk, Add level 1 for all other gfx clks */
if (cur_value == max)
@@ -246,20 +245,474 @@ static int renoir_print_clk_levels(struct smu_context *smu,
return size;
}
+static enum amd_pm_state_type renoir_get_current_power_state(struct smu_context *smu)
+{
+ enum amd_pm_state_type pm_type;
+ struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
+
+ if (!smu_dpm_ctx->dpm_context ||
+ !smu_dpm_ctx->dpm_current_power_state)
+ return -EINVAL;
+
+ switch (smu_dpm_ctx->dpm_current_power_state->classification.ui_label) {
+ case SMU_STATE_UI_LABEL_BATTERY:
+ pm_type = POWER_STATE_TYPE_BATTERY;
+ break;
+ case SMU_STATE_UI_LABEL_BALLANCED:
+ pm_type = POWER_STATE_TYPE_BALANCED;
+ break;
+ case SMU_STATE_UI_LABEL_PERFORMANCE:
+ pm_type = POWER_STATE_TYPE_PERFORMANCE;
+ break;
+ default:
+ if (smu_dpm_ctx->dpm_current_power_state->classification.flags & SMU_STATE_CLASSIFICATION_FLAG_BOOT)
+ pm_type = POWER_STATE_TYPE_INTERNAL_BOOT;
+ else
+ pm_type = POWER_STATE_TYPE_DEFAULT;
+ break;
+ }
+
+ return pm_type;
+}
+
+static int renoir_dpm_set_uvd_enable(struct smu_context *smu, bool enable)
+{
+ struct smu_power_context *smu_power = &smu->smu_power;
+ struct smu_power_gate *power_gate = &smu_power->power_gate;
+ int ret = 0;
+
+ if (enable) {
+ /* vcn dpm on is a prerequisite for vcn power gate messages */
+ if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 0);
+ if (ret)
+ return ret;
+ }
+ power_gate->vcn_gated = false;
+ } else {
+ if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
+ ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn);
+ if (ret)
+ return ret;
+ }
+ power_gate->vcn_gated = true;
+ }
+
+ return ret;
+}
+
+static int renoir_force_dpm_limit_value(struct smu_context *smu, bool highest)
+{
+ int ret = 0, i = 0;
+ uint32_t min_freq, max_freq, force_freq;
+ enum smu_clk_type clk_type;
+
+ enum smu_clk_type clks[] = {
+ SMU_GFXCLK,
+ SMU_MCLK,
+ SMU_SOCCLK,
+ };
+
+ for (i = 0; i < ARRAY_SIZE(clks); i++) {
+ clk_type = clks[i];
+ ret = smu_get_dpm_freq_range(smu, clk_type, &min_freq, &max_freq, false);
+ if (ret)
+ return ret;
+
+ force_freq = highest ? max_freq : min_freq;
+ ret = smu_set_soft_freq_range(smu, clk_type, force_freq, force_freq);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+static int renoir_unforce_dpm_levels(struct smu_context *smu) {
+
+ int ret = 0, i = 0;
+ uint32_t min_freq, max_freq;
+ enum smu_clk_type clk_type;
+
+ struct clk_feature_map {
+ enum smu_clk_type clk_type;
+ uint32_t feature;
+ } clk_feature_map[] = {
+ {SMU_GFXCLK, SMU_FEATURE_DPM_GFXCLK_BIT},
+ {SMU_MCLK, SMU_FEATURE_DPM_UCLK_BIT},
+ {SMU_SOCCLK, SMU_FEATURE_DPM_SOCCLK_BIT},
+ };
+
+ for (i = 0; i < ARRAY_SIZE(clk_feature_map); i++) {
+ if (!smu_feature_is_enabled(smu, clk_feature_map[i].feature))
+ continue;
+
+ clk_type = clk_feature_map[i].clk_type;
+
+ ret = smu_get_dpm_freq_range(smu, clk_type, &min_freq, &max_freq, false);
+ if (ret)
+ return ret;
+
+ ret = smu_set_soft_freq_range(smu, clk_type, min_freq, max_freq);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+static int renoir_get_workload_type(struct smu_context *smu, uint32_t profile)
+{
+
+ uint32_t pplib_workload = 0;
+
+ switch (profile) {
+ case PP_SMC_POWER_PROFILE_FULLSCREEN3D:
+ pplib_workload = WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT;
+ break;
+ case PP_SMC_POWER_PROFILE_CUSTOM:
+ pplib_workload = WORKLOAD_PPLIB_COUNT;
+ break;
+ case PP_SMC_POWER_PROFILE_VIDEO:
+ pplib_workload = WORKLOAD_PPLIB_VIDEO_BIT;
+ break;
+ case PP_SMC_POWER_PROFILE_VR:
+ pplib_workload = WORKLOAD_PPLIB_VR_BIT;
+ break;
+ case PP_SMC_POWER_PROFILE_COMPUTE:
+ pplib_workload = WORKLOAD_PPLIB_COMPUTE_BIT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return pplib_workload;
+}
+
+static int renoir_get_profiling_clk_mask(struct smu_context *smu,
+ enum amd_dpm_forced_level level,
+ uint32_t *sclk_mask,
+ uint32_t *mclk_mask,
+ uint32_t *soc_mask)
+{
+
+ if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
+ if (sclk_mask)
+ *sclk_mask = 0;
+ } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) {
+ if (mclk_mask)
+ *mclk_mask = 0;
+ } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
+ if(sclk_mask)
+ /* The sclk as gfxclk and has three level about max/min/current */
+ *sclk_mask = 3 - 1;
+
+ if(mclk_mask)
+ *mclk_mask = NUM_MEMCLK_DPM_LEVELS - 1;
+
+ if(soc_mask)
+ *soc_mask = NUM_SOCCLK_DPM_LEVELS - 1;
+ }
+
+ return 0;
+}
+
+/**
+ * This interface get dpm clock table for dc
+ */
+static int renoir_get_dpm_clock_table(struct smu_context *smu, struct dpm_clocks *clock_table)
+{
+ DpmClocks_t *table = smu->smu_table.clocks_table;
+ int i;
+
+ if (!clock_table || !table)
+ return -EINVAL;
+
+ for (i = 0; i < NUM_DCFCLK_DPM_LEVELS; i++) {
+ clock_table->DcfClocks[i].Freq = table->DcfClocks[i].Freq;
+ clock_table->DcfClocks[i].Vol = table->DcfClocks[i].Vol;
+ }
+
+ for (i = 0; i < NUM_SOCCLK_DPM_LEVELS; i++) {
+ clock_table->SocClocks[i].Freq = table->SocClocks[i].Freq;
+ clock_table->SocClocks[i].Vol = table->SocClocks[i].Vol;
+ }
+
+ for (i = 0; i < NUM_FCLK_DPM_LEVELS; i++) {
+ clock_table->FClocks[i].Freq = table->FClocks[i].Freq;
+ clock_table->FClocks[i].Vol = table->FClocks[i].Vol;
+ }
+
+ for (i = 0; i< NUM_MEMCLK_DPM_LEVELS; i++) {
+ clock_table->MemClocks[i].Freq = table->MemClocks[i].Freq;
+ clock_table->MemClocks[i].Vol = table->MemClocks[i].Vol;
+ }
+
+ return 0;
+}
+
+static int renoir_force_clk_levels(struct smu_context *smu,
+ enum smu_clk_type clk_type, uint32_t mask)
+{
+
+ int ret = 0 ;
+ uint32_t soft_min_level = 0, soft_max_level = 0, min_freq = 0, max_freq = 0;
+ DpmClocks_t *clk_table = smu->smu_table.clocks_table;
+
+ soft_min_level = mask ? (ffs(mask) - 1) : 0;
+ soft_max_level = mask ? (fls(mask) - 1) : 0;
+
+ switch (clk_type) {
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ if (soft_min_level > 2 || soft_max_level > 2) {
+ pr_info("Currently sclk only support 3 levels on APU\n");
+ return -EINVAL;
+ }
+
+ ret = smu_get_dpm_freq_range(smu, SMU_GFXCLK, &min_freq, &max_freq, false);
+ if (ret)
+ return ret;
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk,
+ soft_max_level == 0 ? min_freq :
+ soft_max_level == 1 ? RENOIR_UMD_PSTATE_GFXCLK : max_freq);
+ if (ret)
+ return ret;
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk,
+ soft_min_level == 2 ? max_freq :
+ soft_min_level == 1 ? RENOIR_UMD_PSTATE_GFXCLK : min_freq);
+ if (ret)
+ return ret;
+ break;
+ case SMU_SOCCLK:
+ GET_DPM_CUR_FREQ(clk_table, clk_type, soft_min_level, min_freq);
+ GET_DPM_CUR_FREQ(clk_table, clk_type, soft_max_level, max_freq);
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxSocclkByFreq, max_freq);
+ if (ret)
+ return ret;
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinSocclkByFreq, min_freq);
+ if (ret)
+ return ret;
+ break;
+ case SMU_MCLK:
+ case SMU_FCLK:
+ GET_DPM_CUR_FREQ(clk_table, clk_type, soft_min_level, min_freq);
+ GET_DPM_CUR_FREQ(clk_table, clk_type, soft_max_level, max_freq);
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxFclkByFreq, max_freq);
+ if (ret)
+ return ret;
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinFclkByFreq, min_freq);
+ if (ret)
+ return ret;
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static int renoir_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size)
+{
+ int workload_type, ret;
+ uint32_t profile_mode = input[size];
+
+ if (profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) {
+ pr_err("Invalid power profile mode %d\n", smu->power_profile_mode);
+ return -EINVAL;
+ }
+
+ /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
+ workload_type = smu_workload_get_type(smu, smu->power_profile_mode);
+ if (workload_type < 0) {
+ pr_err("Unsupported power profile mode %d on RENOIR\n",smu->power_profile_mode);
+ return -EINVAL;
+ }
+
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask,
+ 1 << workload_type);
+ if (ret) {
+ pr_err("Fail to set workload type %d\n", workload_type);
+ return ret;
+ }
+
+ smu->power_profile_mode = profile_mode;
+
+ return 0;
+}
+
+static int renoir_set_peak_clock_by_device(struct smu_context *smu)
+{
+ int ret = 0;
+ uint32_t sclk_freq = 0, uclk_freq = 0;
+
+ ret = smu_get_dpm_freq_range(smu, SMU_SCLK, NULL, &sclk_freq, false);
+ if (ret)
+ return ret;
+
+ ret = smu_set_soft_freq_range(smu, SMU_SCLK, sclk_freq, sclk_freq);
+ if (ret)
+ return ret;
+
+ ret = smu_get_dpm_freq_range(smu, SMU_UCLK, NULL, &uclk_freq, false);
+ if (ret)
+ return ret;
+
+ ret = smu_set_soft_freq_range(smu, SMU_UCLK, uclk_freq, uclk_freq);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
+static int renoir_set_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level)
+{
+ int ret = 0;
+
+ switch (level) {
+ case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
+ ret = renoir_set_peak_clock_by_device(smu);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+/* save watermark settings into pplib smu structure,
+ * also pass data to smu controller
+ */
+static int renoir_set_watermarks_table(
+ struct smu_context *smu,
+ void *watermarks,
+ struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges)
+{
+ int i;
+ int ret = 0;
+ Watermarks_t *table = watermarks;
+
+ if (!table || !clock_ranges)
+ return -EINVAL;
+
+ if (clock_ranges->num_wm_dmif_sets > 4 ||
+ clock_ranges->num_wm_mcif_sets > 4)
+ return -EINVAL;
+
+ /* save into smu->smu_table.tables[SMU_TABLE_WATERMARKS]->cpu_addr*/
+ for (i = 0; i < clock_ranges->num_wm_dmif_sets; i++) {
+ table->WatermarkRow[WM_DCFCLK][i].MinClock =
+ cpu_to_le16((uint16_t)
+ (clock_ranges->wm_dmif_clocks_ranges[i].wm_min_dcfclk_clk_in_khz));
+ table->WatermarkRow[WM_DCFCLK][i].MaxClock =
+ cpu_to_le16((uint16_t)
+ (clock_ranges->wm_dmif_clocks_ranges[i].wm_max_dcfclk_clk_in_khz));
+ table->WatermarkRow[WM_DCFCLK][i].MinMclk =
+ cpu_to_le16((uint16_t)
+ (clock_ranges->wm_dmif_clocks_ranges[i].wm_min_mem_clk_in_khz));
+ table->WatermarkRow[WM_DCFCLK][i].MaxMclk =
+ cpu_to_le16((uint16_t)
+ (clock_ranges->wm_dmif_clocks_ranges[i].wm_max_mem_clk_in_khz));
+ table->WatermarkRow[WM_DCFCLK][i].WmSetting = (uint8_t)
+ clock_ranges->wm_dmif_clocks_ranges[i].wm_set_id;
+ }
+
+ for (i = 0; i < clock_ranges->num_wm_mcif_sets; i++) {
+ table->WatermarkRow[WM_SOCCLK][i].MinClock =
+ cpu_to_le16((uint16_t)
+ (clock_ranges->wm_mcif_clocks_ranges[i].wm_min_socclk_clk_in_khz));
+ table->WatermarkRow[WM_SOCCLK][i].MaxClock =
+ cpu_to_le16((uint16_t)
+ (clock_ranges->wm_mcif_clocks_ranges[i].wm_max_socclk_clk_in_khz));
+ table->WatermarkRow[WM_SOCCLK][i].MinMclk =
+ cpu_to_le16((uint16_t)
+ (clock_ranges->wm_mcif_clocks_ranges[i].wm_min_mem_clk_in_khz));
+ table->WatermarkRow[WM_SOCCLK][i].MaxMclk =
+ cpu_to_le16((uint16_t)
+ (clock_ranges->wm_mcif_clocks_ranges[i].wm_max_mem_clk_in_khz));
+ table->WatermarkRow[WM_SOCCLK][i].WmSetting = (uint8_t)
+ clock_ranges->wm_mcif_clocks_ranges[i].wm_set_id;
+ }
+
+ /* pass data to smu controller */
+ ret = smu_write_watermarks_table(smu);
+
+ return ret;
+}
+
+static int renoir_get_power_profile_mode(struct smu_context *smu,
+ char *buf)
+{
+ static const char *profile_name[] = {
+ "BOOTUP_DEFAULT",
+ "3D_FULL_SCREEN",
+ "POWER_SAVING",
+ "VIDEO",
+ "VR",
+ "COMPUTE",
+ "CUSTOM"};
+ uint32_t i, size = 0;
+ int16_t workload_type = 0;
+
+ if (!smu->pm_enabled || !buf)
+ return -EINVAL;
+
+ for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) {
+ /*
+ * Conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT
+ * Not all profile modes are supported on arcturus.
+ */
+ workload_type = smu_workload_get_type(smu, i);
+ if (workload_type < 0)
+ continue;
+
+ size += sprintf(buf + size, "%2d %14s%s\n",
+ i, profile_name[i], (i == smu->power_profile_mode) ? "*" : " ");
+ }
+
+ return size;
+}
+
static const struct pptable_funcs renoir_ppt_funcs = {
.get_smu_msg_index = renoir_get_smu_msg_index,
.get_smu_table_index = renoir_get_smu_table_index,
.tables_init = renoir_tables_init,
.set_power_state = NULL,
- .get_dpm_uclk_limited = renoir_get_dpm_uclk_limited,
+ .get_dpm_clk_limited = renoir_get_dpm_clk_limited,
.print_clk_levels = renoir_print_clk_levels,
+ .get_current_power_state = renoir_get_current_power_state,
+ .dpm_set_uvd_enable = renoir_dpm_set_uvd_enable,
+ .force_dpm_limit_value = renoir_force_dpm_limit_value,
+ .unforce_dpm_levels = renoir_unforce_dpm_levels,
+ .get_workload_type = renoir_get_workload_type,
+ .get_profiling_clk_mask = renoir_get_profiling_clk_mask,
+ .force_clk_levels = renoir_force_clk_levels,
+ .set_power_profile_mode = renoir_set_power_profile_mode,
+ .set_performance_level = renoir_set_performance_level,
+ .get_dpm_clock_table = renoir_get_dpm_clock_table,
+ .set_watermarks_table = renoir_set_watermarks_table,
+ .get_power_profile_mode = renoir_get_power_profile_mode,
+ .check_fw_status = smu_v12_0_check_fw_status,
+ .check_fw_version = smu_v12_0_check_fw_version,
+ .powergate_sdma = smu_v12_0_powergate_sdma,
+ .powergate_vcn = smu_v12_0_powergate_vcn,
+ .send_smc_msg = smu_v12_0_send_msg,
+ .send_smc_msg_with_param = smu_v12_0_send_msg_with_param,
+ .read_smc_arg = smu_v12_0_read_arg,
+ .set_gfx_cgpg = smu_v12_0_set_gfx_cgpg,
+ .gfx_off_control = smu_v12_0_gfx_off_control,
+ .init_smc_tables = smu_v12_0_init_smc_tables,
+ .fini_smc_tables = smu_v12_0_fini_smc_tables,
+ .populate_smc_tables = smu_v12_0_populate_smc_tables,
+ .get_dpm_ultimate_freq = smu_v12_0_get_dpm_ultimate_freq,
+ .mode2_reset = smu_v12_0_mode2_reset,
+ .set_soft_freq_limited_range = smu_v12_0_set_soft_freq_limited_range,
};
void renoir_set_ppt_funcs(struct smu_context *smu)
{
- struct smu_table_context *smu_table = &smu->smu_table;
-
smu->ppt_funcs = &renoir_ppt_funcs;
smu->smc_if_version = SMU12_DRIVER_IF_VERSION;
- smu_table->table_count = TABLE_COUNT;
+ smu->is_apu = true;
}
diff --git a/drivers/gpu/drm/amd/powerplay/smu_internal.h b/drivers/gpu/drm/amd/powerplay/smu_internal.h
new file mode 100644
index 000000000000..8bcda7871309
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/smu_internal.h
@@ -0,0 +1,204 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __SMU_INTERNAL_H__
+#define __SMU_INTERNAL_H__
+
+#include "amdgpu_smu.h"
+
+#define smu_init_microcode(smu) \
+ ((smu)->ppt_funcs->init_microcode ? (smu)->ppt_funcs->init_microcode((smu)) : 0)
+#define smu_init_smc_tables(smu) \
+ ((smu)->ppt_funcs->init_smc_tables ? (smu)->ppt_funcs->init_smc_tables((smu)) : 0)
+#define smu_fini_smc_tables(smu) \
+ ((smu)->ppt_funcs->fini_smc_tables ? (smu)->ppt_funcs->fini_smc_tables((smu)) : 0)
+#define smu_init_power(smu) \
+ ((smu)->ppt_funcs->init_power ? (smu)->ppt_funcs->init_power((smu)) : 0)
+#define smu_fini_power(smu) \
+ ((smu)->ppt_funcs->fini_power ? (smu)->ppt_funcs->fini_power((smu)) : 0)
+
+#define smu_setup_pptable(smu) \
+ ((smu)->ppt_funcs->setup_pptable ? (smu)->ppt_funcs->setup_pptable((smu)) : 0)
+#define smu_powergate_sdma(smu, gate) \
+ ((smu)->ppt_funcs->powergate_sdma ? (smu)->ppt_funcs->powergate_sdma((smu), (gate)) : 0)
+#define smu_powergate_vcn(smu, gate) \
+ ((smu)->ppt_funcs->powergate_vcn ? (smu)->ppt_funcs->powergate_vcn((smu), (gate)) : 0)
+
+#define smu_get_vbios_bootup_values(smu) \
+ ((smu)->ppt_funcs->get_vbios_bootup_values ? (smu)->ppt_funcs->get_vbios_bootup_values((smu)) : 0)
+#define smu_get_clk_info_from_vbios(smu) \
+ ((smu)->ppt_funcs->get_clk_info_from_vbios ? (smu)->ppt_funcs->get_clk_info_from_vbios((smu)) : 0)
+#define smu_check_pptable(smu) \
+ ((smu)->ppt_funcs->check_pptable ? (smu)->ppt_funcs->check_pptable((smu)) : 0)
+#define smu_parse_pptable(smu) \
+ ((smu)->ppt_funcs->parse_pptable ? (smu)->ppt_funcs->parse_pptable((smu)) : 0)
+#define smu_populate_smc_tables(smu) \
+ ((smu)->ppt_funcs->populate_smc_tables ? (smu)->ppt_funcs->populate_smc_tables((smu)) : 0)
+#define smu_check_fw_version(smu) \
+ ((smu)->ppt_funcs->check_fw_version ? (smu)->ppt_funcs->check_fw_version((smu)) : 0)
+#define smu_write_pptable(smu) \
+ ((smu)->ppt_funcs->write_pptable ? (smu)->ppt_funcs->write_pptable((smu)) : 0)
+#define smu_set_min_dcef_deep_sleep(smu) \
+ ((smu)->ppt_funcs->set_min_dcef_deep_sleep ? (smu)->ppt_funcs->set_min_dcef_deep_sleep((smu)) : 0)
+#define smu_set_tool_table_location(smu) \
+ ((smu)->ppt_funcs->set_tool_table_location ? (smu)->ppt_funcs->set_tool_table_location((smu)) : 0)
+#define smu_notify_memory_pool_location(smu) \
+ ((smu)->ppt_funcs->notify_memory_pool_location ? (smu)->ppt_funcs->notify_memory_pool_location((smu)) : 0)
+#define smu_gfx_off_control(smu, enable) \
+ ((smu)->ppt_funcs->gfx_off_control ? (smu)->ppt_funcs->gfx_off_control((smu), (enable)) : 0)
+
+#define smu_set_last_dcef_min_deep_sleep_clk(smu) \
+ ((smu)->ppt_funcs->set_last_dcef_min_deep_sleep_clk ? (smu)->ppt_funcs->set_last_dcef_min_deep_sleep_clk((smu)) : 0)
+#define smu_system_features_control(smu, en) \
+ ((smu)->ppt_funcs->system_features_control ? (smu)->ppt_funcs->system_features_control((smu), (en)) : 0)
+#define smu_init_max_sustainable_clocks(smu) \
+ ((smu)->ppt_funcs->init_max_sustainable_clocks ? (smu)->ppt_funcs->init_max_sustainable_clocks((smu)) : 0)
+#define smu_set_default_od_settings(smu, initialize) \
+ ((smu)->ppt_funcs->set_default_od_settings ? (smu)->ppt_funcs->set_default_od_settings((smu), (initialize)) : 0)
+
+#define smu_send_smc_msg(smu, msg) \
+ ((smu)->ppt_funcs->send_smc_msg? (smu)->ppt_funcs->send_smc_msg((smu), (msg)) : 0)
+#define smu_send_smc_msg_with_param(smu, msg, param) \
+ ((smu)->ppt_funcs->send_smc_msg_with_param? (smu)->ppt_funcs->send_smc_msg_with_param((smu), (msg), (param)) : 0)
+#define smu_read_smc_arg(smu, arg) \
+ ((smu)->ppt_funcs->read_smc_arg? (smu)->ppt_funcs->read_smc_arg((smu), (arg)) : 0)
+#define smu_alloc_dpm_context(smu) \
+ ((smu)->ppt_funcs->alloc_dpm_context ? (smu)->ppt_funcs->alloc_dpm_context((smu)) : 0)
+#define smu_init_display_count(smu, count) \
+ ((smu)->ppt_funcs->init_display_count ? (smu)->ppt_funcs->init_display_count((smu), (count)) : 0)
+#define smu_feature_set_allowed_mask(smu) \
+ ((smu)->ppt_funcs->set_allowed_mask? (smu)->ppt_funcs->set_allowed_mask((smu)) : 0)
+#define smu_feature_get_enabled_mask(smu, mask, num) \
+ ((smu)->ppt_funcs->get_enabled_mask? (smu)->ppt_funcs->get_enabled_mask((smu), (mask), (num)) : 0)
+#define smu_is_dpm_running(smu) \
+ ((smu)->ppt_funcs->is_dpm_running ? (smu)->ppt_funcs->is_dpm_running((smu)) : 0)
+#define smu_notify_display_change(smu) \
+ ((smu)->ppt_funcs->notify_display_change? (smu)->ppt_funcs->notify_display_change((smu)) : 0)
+#define smu_store_powerplay_table(smu) \
+ ((smu)->ppt_funcs->store_powerplay_table ? (smu)->ppt_funcs->store_powerplay_table((smu)) : 0)
+#define smu_check_powerplay_table(smu) \
+ ((smu)->ppt_funcs->check_powerplay_table ? (smu)->ppt_funcs->check_powerplay_table((smu)) : 0)
+#define smu_append_powerplay_table(smu) \
+ ((smu)->ppt_funcs->append_powerplay_table ? (smu)->ppt_funcs->append_powerplay_table((smu)) : 0)
+#define smu_set_default_dpm_table(smu) \
+ ((smu)->ppt_funcs->set_default_dpm_table ? (smu)->ppt_funcs->set_default_dpm_table((smu)) : 0)
+#define smu_populate_umd_state_clk(smu) \
+ ((smu)->ppt_funcs->populate_umd_state_clk ? (smu)->ppt_funcs->populate_umd_state_clk((smu)) : 0)
+#define smu_set_default_od8_settings(smu) \
+ ((smu)->ppt_funcs->set_default_od8_settings ? (smu)->ppt_funcs->set_default_od8_settings((smu)) : 0)
+
+#define smu_get_current_clk_freq(smu, clk_id, value) \
+ ((smu)->ppt_funcs->get_current_clk_freq? (smu)->ppt_funcs->get_current_clk_freq((smu), (clk_id), (value)) : 0)
+
+#define smu_tables_init(smu, tab) \
+ ((smu)->ppt_funcs->tables_init ? (smu)->ppt_funcs->tables_init((smu), (tab)) : 0)
+#define smu_set_thermal_fan_table(smu) \
+ ((smu)->ppt_funcs->set_thermal_fan_table ? (smu)->ppt_funcs->set_thermal_fan_table((smu)) : 0)
+#define smu_start_thermal_control(smu) \
+ ((smu)->ppt_funcs->start_thermal_control? (smu)->ppt_funcs->start_thermal_control((smu)) : 0)
+#define smu_stop_thermal_control(smu) \
+ ((smu)->ppt_funcs->stop_thermal_control? (smu)->ppt_funcs->stop_thermal_control((smu)) : 0)
+
+#define smu_smc_read_sensor(smu, sensor, data, size) \
+ ((smu)->ppt_funcs->read_sensor? (smu)->ppt_funcs->read_sensor((smu), (sensor), (data), (size)) : -EINVAL)
+
+#define smu_pre_display_config_changed(smu) \
+ ((smu)->ppt_funcs->pre_display_config_changed ? (smu)->ppt_funcs->pre_display_config_changed((smu)) : 0)
+#define smu_display_config_changed(smu) \
+ ((smu)->ppt_funcs->display_config_changed ? (smu)->ppt_funcs->display_config_changed((smu)) : 0)
+#define smu_apply_clocks_adjust_rules(smu) \
+ ((smu)->ppt_funcs->apply_clocks_adjust_rules ? (smu)->ppt_funcs->apply_clocks_adjust_rules((smu)) : 0)
+#define smu_notify_smc_dispaly_config(smu) \
+ ((smu)->ppt_funcs->notify_smc_dispaly_config ? (smu)->ppt_funcs->notify_smc_dispaly_config((smu)) : 0)
+#define smu_force_dpm_limit_value(smu, highest) \
+ ((smu)->ppt_funcs->force_dpm_limit_value ? (smu)->ppt_funcs->force_dpm_limit_value((smu), (highest)) : 0)
+#define smu_unforce_dpm_levels(smu) \
+ ((smu)->ppt_funcs->unforce_dpm_levels ? (smu)->ppt_funcs->unforce_dpm_levels((smu)) : 0)
+#define smu_get_profiling_clk_mask(smu, level, sclk_mask, mclk_mask, soc_mask) \
+ ((smu)->ppt_funcs->get_profiling_clk_mask ? (smu)->ppt_funcs->get_profiling_clk_mask((smu), (level), (sclk_mask), (mclk_mask), (soc_mask)) : 0)
+#define smu_set_cpu_power_state(smu) \
+ ((smu)->ppt_funcs->set_cpu_power_state ? (smu)->ppt_funcs->set_cpu_power_state((smu)) : 0)
+
+#define smu_msg_get_index(smu, msg) \
+ ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_smu_msg_index? (smu)->ppt_funcs->get_smu_msg_index((smu), (msg)) : -EINVAL) : -EINVAL)
+#define smu_clk_get_index(smu, msg) \
+ ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_smu_clk_index? (smu)->ppt_funcs->get_smu_clk_index((smu), (msg)) : -EINVAL) : -EINVAL)
+#define smu_feature_get_index(smu, msg) \
+ ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_smu_feature_index? (smu)->ppt_funcs->get_smu_feature_index((smu), (msg)) : -EINVAL) : -EINVAL)
+#define smu_table_get_index(smu, tab) \
+ ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_smu_table_index? (smu)->ppt_funcs->get_smu_table_index((smu), (tab)) : -EINVAL) : -EINVAL)
+#define smu_power_get_index(smu, src) \
+ ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_smu_power_index? (smu)->ppt_funcs->get_smu_power_index((smu), (src)) : -EINVAL) : -EINVAL)
+#define smu_workload_get_type(smu, profile) \
+ ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_workload_type? (smu)->ppt_funcs->get_workload_type((smu), (profile)) : -EINVAL) : -EINVAL)
+#define smu_run_btc(smu) \
+ ((smu)->ppt_funcs? ((smu)->ppt_funcs->run_btc? (smu)->ppt_funcs->run_btc((smu)) : 0) : 0)
+#define smu_get_allowed_feature_mask(smu, feature_mask, num) \
+ ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_allowed_feature_mask? (smu)->ppt_funcs->get_allowed_feature_mask((smu), (feature_mask), (num)) : 0) : 0)
+
+
+#define smu_store_cc6_data(smu, st, cc6_dis, pst_dis, pst_sw_dis) \
+ ((smu)->ppt_funcs->store_cc6_data ? (smu)->ppt_funcs->store_cc6_data((smu), (st), (cc6_dis), (pst_dis), (pst_sw_dis)) : 0)
+
+#define smu_get_dal_power_level(smu, clocks) \
+ ((smu)->ppt_funcs->get_dal_power_level ? (smu)->ppt_funcs->get_dal_power_level((smu), (clocks)) : 0)
+#define smu_get_perf_level(smu, designation, level) \
+ ((smu)->ppt_funcs->get_perf_level ? (smu)->ppt_funcs->get_perf_level((smu), (designation), (level)) : 0)
+#define smu_get_current_shallow_sleep_clocks(smu, clocks) \
+ ((smu)->ppt_funcs->get_current_shallow_sleep_clocks ? (smu)->ppt_funcs->get_current_shallow_sleep_clocks((smu), (clocks)) : 0)
+
+#define smu_dpm_set_uvd_enable(smu, enable) \
+ ((smu)->ppt_funcs->dpm_set_uvd_enable ? (smu)->ppt_funcs->dpm_set_uvd_enable((smu), (enable)) : 0)
+#define smu_dpm_set_vce_enable(smu, enable) \
+ ((smu)->ppt_funcs->dpm_set_vce_enable ? (smu)->ppt_funcs->dpm_set_vce_enable((smu), (enable)) : 0)
+
+#define smu_set_watermarks_table(smu, tab, clock_ranges) \
+ ((smu)->ppt_funcs->set_watermarks_table ? (smu)->ppt_funcs->set_watermarks_table((smu), (tab), (clock_ranges)) : 0)
+#define smu_get_current_clk_freq_by_table(smu, clk_type, value) \
+ ((smu)->ppt_funcs->get_current_clk_freq_by_table ? (smu)->ppt_funcs->get_current_clk_freq_by_table((smu), (clk_type), (value)) : 0)
+#define smu_thermal_temperature_range_update(smu, range, rw) \
+ ((smu)->ppt_funcs->thermal_temperature_range_update? (smu)->ppt_funcs->thermal_temperature_range_update((smu), (range), (rw)) : 0)
+#define smu_get_thermal_temperature_range(smu, range) \
+ ((smu)->ppt_funcs->get_thermal_temperature_range? (smu)->ppt_funcs->get_thermal_temperature_range((smu), (range)) : 0)
+#define smu_register_irq_handler(smu) \
+ ((smu)->ppt_funcs->register_irq_handler ? (smu)->ppt_funcs->register_irq_handler(smu) : 0)
+
+#define smu_get_dpm_ultimate_freq(smu, param, min, max) \
+ ((smu)->ppt_funcs->get_dpm_ultimate_freq ? (smu)->ppt_funcs->get_dpm_ultimate_freq((smu), (param), (min), (max)) : 0)
+
+#define smu_asic_set_performance_level(smu, level) \
+ ((smu)->ppt_funcs->set_performance_level? (smu)->ppt_funcs->set_performance_level((smu), (level)) : -EINVAL);
+#define smu_dump_pptable(smu) \
+ ((smu)->ppt_funcs->dump_pptable ? (smu)->ppt_funcs->dump_pptable((smu)) : 0)
+#define smu_get_dpm_clk_limited(smu, clk_type, dpm_level, freq) \
+ ((smu)->ppt_funcs->get_dpm_clk_limited ? (smu)->ppt_funcs->get_dpm_clk_limited((smu), (clk_type), (dpm_level), (freq)) : -EINVAL)
+
+#define smu_set_soft_freq_limited_range(smu, clk_type, min, max) \
+ ((smu)->ppt_funcs->set_soft_freq_limited_range ? (smu)->ppt_funcs->set_soft_freq_limited_range((smu), (clk_type), (min), (max)) : -EINVAL)
+
+#define smu_override_pcie_parameters(smu) \
+ ((smu)->ppt_funcs->override_pcie_parameters ? (smu)->ppt_funcs->override_pcie_parameters((smu)) : 0)
+
+#define smu_update_pcie_parameters(smu, pcie_gen_cap, pcie_width_cap) \
+ ((smu)->ppt_funcs->update_pcie_parameters ? (smu)->ppt_funcs->update_pcie_parameters((smu), (pcie_gen_cap), (pcie_width_cap)) : 0)
+
+#endif
diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
index c5257ae3188a..fc9679ea2368 100644
--- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
@@ -24,17 +24,19 @@
#include <linux/module.h>
#include <linux/pci.h>
+#define SMU_11_0_PARTIAL_PPTABLE
+
#include "pp_debug.h"
#include "amdgpu.h"
#include "amdgpu_smu.h"
+#include "smu_internal.h"
#include "atomfirmware.h"
#include "amdgpu_atomfirmware.h"
#include "smu_v11_0.h"
+#include "smu_v11_0_pptable.h"
#include "soc15_common.h"
#include "atom.h"
-#include "vega20_ppt.h"
-#include "arcturus_ppt.h"
-#include "navi10_ppt.h"
+#include "amd_pcie.h"
#include "asic_reg/thm/thm_11_0_2_offset.h"
#include "asic_reg/thm/thm_11_0_2_sh_mask.h"
@@ -61,7 +63,7 @@ static int smu_v11_0_send_msg_without_waiting(struct smu_context *smu,
return 0;
}
-static int smu_v11_0_read_arg(struct smu_context *smu, uint32_t *arg)
+int smu_v11_0_read_arg(struct smu_context *smu, uint32_t *arg)
{
struct amdgpu_device *adev = smu->adev;
@@ -88,7 +90,7 @@ static int smu_v11_0_wait_for_response(struct smu_context *smu)
return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90) == 0x1 ? 0 : -EIO;
}
-static int smu_v11_0_send_msg(struct smu_context *smu, uint16_t msg)
+int smu_v11_0_send_msg(struct smu_context *smu, uint16_t msg)
{
struct amdgpu_device *adev = smu->adev;
int ret = 0, index = 0;
@@ -113,7 +115,7 @@ static int smu_v11_0_send_msg(struct smu_context *smu, uint16_t msg)
}
-static int
+int
smu_v11_0_send_msg_with_param(struct smu_context *smu, uint16_t msg,
uint32_t param)
{
@@ -144,7 +146,7 @@ smu_v11_0_send_msg_with_param(struct smu_context *smu, uint16_t msg,
return ret;
}
-static int smu_v11_0_init_microcode(struct smu_context *smu)
+int smu_v11_0_init_microcode(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
const char *chip_name;
@@ -206,7 +208,7 @@ out:
return err;
}
-static int smu_v11_0_load_microcode(struct smu_context *smu)
+int smu_v11_0_load_microcode(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
const uint32_t *src;
@@ -244,7 +246,7 @@ static int smu_v11_0_load_microcode(struct smu_context *smu)
return 0;
}
-static int smu_v11_0_check_fw_status(struct smu_context *smu)
+int smu_v11_0_check_fw_status(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
uint32_t mp1_fw_flags;
@@ -259,7 +261,7 @@ static int smu_v11_0_check_fw_status(struct smu_context *smu)
return -EIO;
}
-static int smu_v11_0_check_fw_version(struct smu_context *smu)
+int smu_v11_0_check_fw_version(struct smu_context *smu)
{
uint32_t if_version = 0xff, smu_version = 0xff;
uint16_t smu_major;
@@ -354,7 +356,7 @@ static int smu_v11_0_set_pptable_v2_1(struct smu_context *smu, void **table,
return 0;
}
-static int smu_v11_0_setup_pptable(struct smu_context *smu)
+int smu_v11_0_setup_pptable(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
const struct smc_firmware_header_v1_0 *hdr;
@@ -369,6 +371,7 @@ static int smu_v11_0_setup_pptable(struct smu_context *smu)
version_major = le16_to_cpu(hdr->header.header_version_major);
version_minor = le16_to_cpu(hdr->header.header_version_minor);
if (version_major == 2 && smu->smu_table.boot_values.pp_table_id > 0) {
+ pr_info("use driver provided pptable %d\n", smu->smu_table.boot_values.pp_table_id);
switch (version_minor) {
case 0:
ret = smu_v11_0_set_pptable_v2_0(smu, &table, &size);
@@ -385,6 +388,7 @@ static int smu_v11_0_setup_pptable(struct smu_context *smu)
return ret;
} else {
+ pr_info("use vbios provided pptable\n");
index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
powerplayinfo);
@@ -433,13 +437,13 @@ static int smu_v11_0_fini_dpm_context(struct smu_context *smu)
return 0;
}
-static int smu_v11_0_init_smc_tables(struct smu_context *smu)
+int smu_v11_0_init_smc_tables(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
struct smu_table *tables = NULL;
int ret = 0;
- if (smu_table->tables || smu_table->table_count == 0)
+ if (smu_table->tables)
return -EINVAL;
tables = kcalloc(SMU_TABLE_COUNT, sizeof(struct smu_table),
@@ -460,18 +464,17 @@ static int smu_v11_0_init_smc_tables(struct smu_context *smu)
return 0;
}
-static int smu_v11_0_fini_smc_tables(struct smu_context *smu)
+int smu_v11_0_fini_smc_tables(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
int ret = 0;
- if (!smu_table->tables || smu_table->table_count == 0)
+ if (!smu_table->tables)
return -EINVAL;
kfree(smu_table->tables);
kfree(smu_table->metrics_table);
smu_table->tables = NULL;
- smu_table->table_count = 0;
smu_table->metrics_table = NULL;
smu_table->metrics_time = 0;
@@ -481,7 +484,7 @@ static int smu_v11_0_fini_smc_tables(struct smu_context *smu)
return 0;
}
-static int smu_v11_0_init_power(struct smu_context *smu)
+int smu_v11_0_init_power(struct smu_context *smu)
{
struct smu_power_context *smu_power = &smu->smu_power;
@@ -499,7 +502,7 @@ static int smu_v11_0_init_power(struct smu_context *smu)
return 0;
}
-static int smu_v11_0_fini_power(struct smu_context *smu)
+int smu_v11_0_fini_power(struct smu_context *smu)
{
struct smu_power_context *smu_power = &smu->smu_power;
@@ -576,7 +579,7 @@ int smu_v11_0_get_vbios_bootup_values(struct smu_context *smu)
return 0;
}
-static int smu_v11_0_get_clk_info_from_vbios(struct smu_context *smu)
+int smu_v11_0_get_clk_info_from_vbios(struct smu_context *smu)
{
int ret, index;
struct amdgpu_device *adev = smu->adev;
@@ -673,7 +676,7 @@ static int smu_v11_0_get_clk_info_from_vbios(struct smu_context *smu)
return 0;
}
-static int smu_v11_0_notify_memory_pool_location(struct smu_context *smu)
+int smu_v11_0_notify_memory_pool_location(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
struct smu_table *memory_pool = &smu_table->memory_pool;
@@ -719,7 +722,7 @@ static int smu_v11_0_notify_memory_pool_location(struct smu_context *smu)
return ret;
}
-static int smu_v11_0_check_pptable(struct smu_context *smu)
+int smu_v11_0_check_pptable(struct smu_context *smu)
{
int ret;
@@ -727,7 +730,7 @@ static int smu_v11_0_check_pptable(struct smu_context *smu)
return ret;
}
-static int smu_v11_0_parse_pptable(struct smu_context *smu)
+int smu_v11_0_parse_pptable(struct smu_context *smu)
{
int ret;
@@ -751,7 +754,7 @@ static int smu_v11_0_parse_pptable(struct smu_context *smu)
return ret;
}
-static int smu_v11_0_populate_smc_pptable(struct smu_context *smu)
+int smu_v11_0_populate_smc_pptable(struct smu_context *smu)
{
int ret;
@@ -760,7 +763,7 @@ static int smu_v11_0_populate_smc_pptable(struct smu_context *smu)
return ret;
}
-static int smu_v11_0_write_pptable(struct smu_context *smu)
+int smu_v11_0_write_pptable(struct smu_context *smu)
{
struct smu_table_context *table_context = &smu->smu_table;
int ret = 0;
@@ -771,24 +774,7 @@ static int smu_v11_0_write_pptable(struct smu_context *smu)
return ret;
}
-static int smu_v11_0_write_watermarks_table(struct smu_context *smu)
-{
- int ret = 0;
- struct smu_table_context *smu_table = &smu->smu_table;
- struct smu_table *table = NULL;
-
- table = &smu_table->tables[SMU_TABLE_WATERMARKS];
-
- if (!table->cpu_addr)
- return -EINVAL;
-
- ret = smu_update_table(smu, SMU_TABLE_WATERMARKS, 0, table->cpu_addr,
- true);
-
- return ret;
-}
-
-static int smu_v11_0_set_deep_sleep_dcefclk(struct smu_context *smu, uint32_t clk)
+int smu_v11_0_set_deep_sleep_dcefclk(struct smu_context *smu, uint32_t clk)
{
int ret;
@@ -800,7 +786,7 @@ static int smu_v11_0_set_deep_sleep_dcefclk(struct smu_context *smu, uint32_t cl
return ret;
}
-static int smu_v11_0_set_min_dcef_deep_sleep(struct smu_context *smu)
+int smu_v11_0_set_min_dcef_deep_sleep(struct smu_context *smu)
{
struct smu_table_context *table_context = &smu->smu_table;
@@ -809,11 +795,10 @@ static int smu_v11_0_set_min_dcef_deep_sleep(struct smu_context *smu)
if (!table_context)
return -EINVAL;
- return smu_set_deep_sleep_dcefclk(smu,
- table_context->boot_values.dcefclk / 100);
+ return smu_v11_0_set_deep_sleep_dcefclk(smu, table_context->boot_values.dcefclk / 100);
}
-static int smu_v11_0_set_tool_table_location(struct smu_context *smu)
+int smu_v11_0_set_tool_table_location(struct smu_context *smu)
{
int ret = 0;
struct smu_table *tool_table = &smu->smu_table.tables[SMU_TABLE_PMSTATUSLOG];
@@ -831,7 +816,7 @@ static int smu_v11_0_set_tool_table_location(struct smu_context *smu)
return ret;
}
-static int smu_v11_0_init_display_count(struct smu_context *smu, uint32_t count)
+int smu_v11_0_init_display_count(struct smu_context *smu, uint32_t count)
{
int ret = 0;
@@ -843,7 +828,7 @@ static int smu_v11_0_init_display_count(struct smu_context *smu, uint32_t count)
}
-static int smu_v11_0_set_allowed_mask(struct smu_context *smu)
+int smu_v11_0_set_allowed_mask(struct smu_context *smu)
{
struct smu_feature *feature = &smu->smu_feature;
int ret = 0;
@@ -870,7 +855,7 @@ failed:
return ret;
}
-static int smu_v11_0_get_enabled_mask(struct smu_context *smu,
+int smu_v11_0_get_enabled_mask(struct smu_context *smu,
uint32_t *feature_mask, uint32_t num)
{
uint32_t feature_mask_high = 0, feature_mask_low = 0;
@@ -899,7 +884,7 @@ static int smu_v11_0_get_enabled_mask(struct smu_context *smu,
return ret;
}
-static int smu_v11_0_system_features_control(struct smu_context *smu,
+int smu_v11_0_system_features_control(struct smu_context *smu,
bool en)
{
struct smu_feature *feature = &smu->smu_feature;
@@ -925,7 +910,7 @@ static int smu_v11_0_system_features_control(struct smu_context *smu,
return ret;
}
-static int smu_v11_0_notify_display_change(struct smu_context *smu)
+int smu_v11_0_notify_display_change(struct smu_context *smu)
{
int ret = 0;
@@ -983,7 +968,7 @@ smu_v11_0_get_max_sustainable_clock(struct smu_context *smu, uint32_t *clock,
return ret;
}
-static int smu_v11_0_init_max_sustainable_clocks(struct smu_context *smu)
+int smu_v11_0_init_max_sustainable_clocks(struct smu_context *smu)
{
struct smu_11_0_max_sustainable_clocks *max_sustainable_clocks;
int ret = 0;
@@ -1063,13 +1048,44 @@ static int smu_v11_0_init_max_sustainable_clocks(struct smu_context *smu)
return 0;
}
-static int smu_v11_0_set_power_limit(struct smu_context *smu, uint32_t n)
+uint32_t smu_v11_0_get_max_power_limit(struct smu_context *smu) {
+ uint32_t od_limit, max_power_limit;
+ struct smu_11_0_powerplay_table *powerplay_table = NULL;
+ struct smu_table_context *table_context = &smu->smu_table;
+ powerplay_table = table_context->power_play_table;
+
+ max_power_limit = smu_get_pptable_power_limit(smu);
+
+ if (!max_power_limit) {
+ // If we couldn't get the table limit, fall back on first-read value
+ if (!smu->default_power_limit)
+ smu->default_power_limit = smu->power_limit;
+ max_power_limit = smu->default_power_limit;
+ }
+
+ if (smu->od_enabled) {
+ od_limit = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
+
+ pr_debug("ODSETTING_POWERPERCENTAGE: %d (default: %d)\n", od_limit, smu->default_power_limit);
+
+ max_power_limit *= (100 + od_limit);
+ max_power_limit /= 100;
+ }
+
+ return max_power_limit;
+}
+
+int smu_v11_0_set_power_limit(struct smu_context *smu, uint32_t n)
{
int ret = 0;
+ uint32_t max_power_limit;
- if (n > smu->default_power_limit) {
- pr_err("New power limit is over the max allowed %d\n",
- smu->default_power_limit);
+ max_power_limit = smu_v11_0_get_max_power_limit(smu);
+
+ if (n > max_power_limit) {
+ pr_err("New power limit (%d) is over the max allowed %d\n",
+ n,
+ max_power_limit);
return -EINVAL;
}
@@ -1091,7 +1107,7 @@ static int smu_v11_0_set_power_limit(struct smu_context *smu, uint32_t n)
return 0;
}
-static int smu_v11_0_get_current_clk_freq(struct smu_context *smu,
+int smu_v11_0_get_current_clk_freq(struct smu_context *smu,
enum smu_clk_type clk_id,
uint32_t *value)
{
@@ -1170,7 +1186,7 @@ static int smu_v11_0_enable_thermal_alert(struct smu_context *smu)
return 0;
}
-static int smu_v11_0_start_thermal_control(struct smu_context *smu)
+int smu_v11_0_start_thermal_control(struct smu_context *smu)
{
int ret = 0;
struct smu_temperature_range range;
@@ -1212,6 +1228,15 @@ static int smu_v11_0_start_thermal_control(struct smu_context *smu)
return ret;
}
+int smu_v11_0_stop_thermal_control(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+
+ WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_ENA, 0);
+
+ return 0;
+}
+
static uint16_t convert_to_vddc(uint8_t vid)
{
return (uint16_t) ((6200 - (vid * 25)) / SMU11_VOLTAGE_SCALE);
@@ -1236,7 +1261,7 @@ static int smu_v11_0_get_gfx_vdd(struct smu_context *smu, uint32_t *value)
}
-static int smu_v11_0_read_sensor(struct smu_context *smu,
+int smu_v11_0_read_sensor(struct smu_context *smu,
enum amd_pp_sensors sensor,
void *data, uint32_t *size)
{
@@ -1273,7 +1298,7 @@ static int smu_v11_0_read_sensor(struct smu_context *smu,
return ret;
}
-static int
+int
smu_v11_0_display_clock_voltage_request(struct smu_context *smu,
struct pp_display_clock_request
*clock_req)
@@ -1316,9 +1341,7 @@ smu_v11_0_display_clock_voltage_request(struct smu_context *smu,
if (clk_select == SMU_UCLK && smu->disable_uclk_switch)
return 0;
- mutex_lock(&smu->mutex);
ret = smu_set_hard_freq_range(smu, clk_select, clk_freq, 0);
- mutex_unlock(&smu->mutex);
if(clk_select == SMU_UCLK)
smu->hard_min_uclk_req_from_dal = clk_freq;
@@ -1328,27 +1351,7 @@ failed:
return ret;
}
-static int
-smu_v11_0_set_watermarks_for_clock_ranges(struct smu_context *smu, struct
- dm_pp_wm_sets_with_clock_ranges_soc15
- *clock_ranges)
-{
- int ret = 0;
- struct smu_table *watermarks = &smu->smu_table.tables[SMU_TABLE_WATERMARKS];
- void *table = watermarks->cpu_addr;
-
- if (!smu->disable_watermark &&
- smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) &&
- smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
- smu_set_watermarks_table(smu, table, clock_ranges);
- smu->watermarks_bitmap |= WATERMARKS_EXIST;
- smu->watermarks_bitmap &= ~WATERMARKS_LOADED;
- }
-
- return ret;
-}
-
-static int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable)
+int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable)
{
int ret = 0;
struct amdgpu_device *adev = smu->adev;
@@ -1361,12 +1364,10 @@ static int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable)
case CHIP_NAVI12:
if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
return 0;
- mutex_lock(&smu->mutex);
if (enable)
ret = smu_send_smc_msg(smu, SMU_MSG_AllowGfxOff);
else
ret = smu_send_smc_msg(smu, SMU_MSG_DisallowGfxOff);
- mutex_unlock(&smu->mutex);
break;
default:
break;
@@ -1375,7 +1376,7 @@ static int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable)
return ret;
}
-static uint32_t
+uint32_t
smu_v11_0_get_fan_control_mode(struct smu_context *smu)
{
if (!smu_feature_is_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT))
@@ -1415,7 +1416,7 @@ smu_v11_0_set_fan_static_mode(struct smu_context *smu, uint32_t mode)
return 0;
}
-static int
+int
smu_v11_0_set_fan_speed_percent(struct smu_context *smu, uint32_t speed)
{
struct amdgpu_device *adev = smu->adev;
@@ -1444,7 +1445,7 @@ smu_v11_0_set_fan_speed_percent(struct smu_context *smu, uint32_t speed)
return smu_v11_0_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC);
}
-static int
+int
smu_v11_0_set_fan_control_mode(struct smu_context *smu,
uint32_t mode)
{
@@ -1472,7 +1473,7 @@ smu_v11_0_set_fan_control_mode(struct smu_context *smu,
return ret;
}
-static int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu,
+int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu,
uint32_t speed)
{
struct amdgpu_device *adev = smu->adev;
@@ -1482,10 +1483,9 @@ static int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu,
if (!speed)
return -EINVAL;
- mutex_lock(&(smu->mutex));
ret = smu_v11_0_auto_fan_control(smu, 0);
if (ret)
- goto set_fan_speed_rpm_failed;
+ return ret;
crystal_clock_freq = amdgpu_asic_get_xclk(adev);
tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed);
@@ -1496,23 +1496,16 @@ static int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu,
ret = smu_v11_0_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC_RPM);
-set_fan_speed_rpm_failed:
- mutex_unlock(&(smu->mutex));
return ret;
}
-#define XGMI_STATE_D0 1
-#define XGMI_STATE_D3 0
-
-static int smu_v11_0_set_xgmi_pstate(struct smu_context *smu,
+int smu_v11_0_set_xgmi_pstate(struct smu_context *smu,
uint32_t pstate)
{
int ret = 0;
- mutex_lock(&(smu->mutex));
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_SetXgmiMode,
- pstate ? XGMI_STATE_D0 : XGMI_STATE_D3);
- mutex_unlock(&(smu->mutex));
+ pstate ? XGMI_MODE_PSTATE_D0 : XGMI_MODE_PSTATE_D3);
return ret;
}
@@ -1559,7 +1552,7 @@ static const struct amdgpu_irq_src_funcs smu_v11_0_irq_funcs =
.process = smu_v11_0_irq_process,
};
-static int smu_v11_0_register_irq_handler(struct smu_context *smu)
+int smu_v11_0_register_irq_handler(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
struct amdgpu_irq_src *irq_src = smu->irq_source;
@@ -1591,7 +1584,7 @@ static int smu_v11_0_register_irq_handler(struct smu_context *smu)
return ret;
}
-static int smu_v11_0_get_max_sustainable_clocks_by_dc(struct smu_context *smu,
+int smu_v11_0_get_max_sustainable_clocks_by_dc(struct smu_context *smu,
struct pp_smu_nv_clock_table *max_clocks)
{
struct smu_table_context *table_context = &smu->smu_table;
@@ -1621,13 +1614,11 @@ static int smu_v11_0_get_max_sustainable_clocks_by_dc(struct smu_context *smu,
return 0;
}
-static int smu_v11_0_set_azalia_d3_pme(struct smu_context *smu)
+int smu_v11_0_set_azalia_d3_pme(struct smu_context *smu)
{
int ret = 0;
- mutex_lock(&smu->mutex);
ret = smu_send_smc_msg(smu, SMU_MSG_BacoAudioD3PME);
- mutex_unlock(&smu->mutex);
return ret;
}
@@ -1637,7 +1628,7 @@ static int smu_v11_0_baco_set_armd3_sequence(struct smu_context *smu, enum smu_v
return smu_send_smc_msg_with_param(smu, SMU_MSG_ArmD3, baco_seq);
}
-static bool smu_v11_0_baco_is_support(struct smu_context *smu)
+bool smu_v11_0_baco_is_support(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
struct smu_baco_context *smu_baco = &smu->smu_baco;
@@ -1661,7 +1652,7 @@ static bool smu_v11_0_baco_is_support(struct smu_context *smu)
return false;
}
-static enum smu_baco_state smu_v11_0_baco_get_state(struct smu_context *smu)
+enum smu_baco_state smu_v11_0_baco_get_state(struct smu_context *smu)
{
struct smu_baco_context *smu_baco = &smu->smu_baco;
enum smu_baco_state baco_state;
@@ -1673,7 +1664,7 @@ static enum smu_baco_state smu_v11_0_baco_get_state(struct smu_context *smu)
return baco_state;
}
-static int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state)
+int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state)
{
struct smu_baco_context *smu_baco = &smu->smu_baco;
@@ -1697,7 +1688,7 @@ out:
return ret;
}
-static int smu_v11_0_baco_reset(struct smu_context *smu)
+int smu_v11_0_baco_reset(struct smu_context *smu)
{
int ret = 0;
@@ -1718,13 +1709,12 @@ static int smu_v11_0_baco_reset(struct smu_context *smu)
return ret;
}
-static int smu_v11_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type,
+int smu_v11_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type,
uint32_t *min, uint32_t *max)
{
int ret = 0, clk_id = 0;
uint32_t param = 0;
- mutex_lock(&smu->mutex);
clk_id = smu_clk_get_index(smu, clk_type);
if (clk_id < 0) {
ret = -EINVAL;
@@ -1751,80 +1741,102 @@ static int smu_v11_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk
}
failed:
- mutex_unlock(&smu->mutex);
return ret;
}
-static const struct smu_funcs smu_v11_0_funcs = {
- .init_microcode = smu_v11_0_init_microcode,
- .load_microcode = smu_v11_0_load_microcode,
- .check_fw_status = smu_v11_0_check_fw_status,
- .check_fw_version = smu_v11_0_check_fw_version,
- .send_smc_msg = smu_v11_0_send_msg,
- .send_smc_msg_with_param = smu_v11_0_send_msg_with_param,
- .read_smc_arg = smu_v11_0_read_arg,
- .setup_pptable = smu_v11_0_setup_pptable,
- .init_smc_tables = smu_v11_0_init_smc_tables,
- .fini_smc_tables = smu_v11_0_fini_smc_tables,
- .init_power = smu_v11_0_init_power,
- .fini_power = smu_v11_0_fini_power,
- .get_vbios_bootup_values = smu_v11_0_get_vbios_bootup_values,
- .get_clk_info_from_vbios = smu_v11_0_get_clk_info_from_vbios,
- .notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
- .check_pptable = smu_v11_0_check_pptable,
- .parse_pptable = smu_v11_0_parse_pptable,
- .populate_smc_tables = smu_v11_0_populate_smc_pptable,
- .write_pptable = smu_v11_0_write_pptable,
- .write_watermarks_table = smu_v11_0_write_watermarks_table,
- .set_min_dcef_deep_sleep = smu_v11_0_set_min_dcef_deep_sleep,
- .set_tool_table_location = smu_v11_0_set_tool_table_location,
- .init_display_count = smu_v11_0_init_display_count,
- .set_allowed_mask = smu_v11_0_set_allowed_mask,
- .get_enabled_mask = smu_v11_0_get_enabled_mask,
- .system_features_control = smu_v11_0_system_features_control,
- .notify_display_change = smu_v11_0_notify_display_change,
- .set_power_limit = smu_v11_0_set_power_limit,
- .get_current_clk_freq = smu_v11_0_get_current_clk_freq,
- .init_max_sustainable_clocks = smu_v11_0_init_max_sustainable_clocks,
- .start_thermal_control = smu_v11_0_start_thermal_control,
- .read_sensor = smu_v11_0_read_sensor,
- .set_deep_sleep_dcefclk = smu_v11_0_set_deep_sleep_dcefclk,
- .display_clock_voltage_request = smu_v11_0_display_clock_voltage_request,
- .set_watermarks_for_clock_ranges = smu_v11_0_set_watermarks_for_clock_ranges,
- .get_fan_control_mode = smu_v11_0_get_fan_control_mode,
- .set_fan_control_mode = smu_v11_0_set_fan_control_mode,
- .set_fan_speed_percent = smu_v11_0_set_fan_speed_percent,
- .set_fan_speed_rpm = smu_v11_0_set_fan_speed_rpm,
- .set_xgmi_pstate = smu_v11_0_set_xgmi_pstate,
- .gfx_off_control = smu_v11_0_gfx_off_control,
- .register_irq_handler = smu_v11_0_register_irq_handler,
- .set_azalia_d3_pme = smu_v11_0_set_azalia_d3_pme,
- .get_max_sustainable_clocks_by_dc = smu_v11_0_get_max_sustainable_clocks_by_dc,
- .baco_is_support = smu_v11_0_baco_is_support,
- .baco_get_state = smu_v11_0_baco_get_state,
- .baco_set_state = smu_v11_0_baco_set_state,
- .baco_reset = smu_v11_0_baco_reset,
- .get_dpm_ultimate_freq = smu_v11_0_get_dpm_ultimate_freq,
-};
+int smu_v11_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type,
+ uint32_t min, uint32_t max)
+{
+ int ret = 0, clk_id = 0;
+ uint32_t param;
-void smu_v11_0_set_smu_funcs(struct smu_context *smu)
+ clk_id = smu_clk_get_index(smu, clk_type);
+ if (clk_id < 0)
+ return clk_id;
+
+ if (max > 0) {
+ param = (uint32_t)((clk_id << 16) | (max & 0xffff));
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq,
+ param);
+ if (ret)
+ return ret;
+ }
+
+ if (min > 0) {
+ param = (uint32_t)((clk_id << 16) | (min & 0xffff));
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinByFreq,
+ param);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+int smu_v11_0_override_pcie_parameters(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
+ uint32_t pcie_gen = 0, pcie_width = 0;
+ int ret;
- smu->funcs = &smu_v11_0_funcs;
- switch (adev->asic_type) {
- case CHIP_VEGA20:
- vega20_set_ppt_funcs(smu);
- break;
- case CHIP_ARCTURUS:
- arcturus_set_ppt_funcs(smu);
- break;
- case CHIP_NAVI10:
- case CHIP_NAVI14:
- case CHIP_NAVI12:
- navi10_set_ppt_funcs(smu);
- break;
- default:
- pr_warn("Unknown asic for smu11\n");
+ if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4)
+ pcie_gen = 3;
+ else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
+ pcie_gen = 2;
+ else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
+ pcie_gen = 1;
+ else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1)
+ pcie_gen = 0;
+
+ /* Bit 31:16: LCLK DPM level. 0 is DPM0, and 1 is DPM1
+ * Bit 15:8: PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4
+ * Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32
+ */
+ if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16)
+ pcie_width = 6;
+ else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12)
+ pcie_width = 5;
+ else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8)
+ pcie_width = 4;
+ else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4)
+ pcie_width = 3;
+ else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2)
+ pcie_width = 2;
+ else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1)
+ pcie_width = 1;
+
+ ret = smu_update_pcie_parameters(smu, pcie_gen, pcie_width);
+
+ if (ret)
+ pr_err("[%s] Attempt to override pcie params failed!\n", __func__);
+
+ return ret;
+
+}
+
+int smu_v11_0_set_default_od_settings(struct smu_context *smu, bool initialize, size_t overdrive_table_size)
+{
+ struct smu_table_context *table_context = &smu->smu_table;
+ int ret = 0;
+
+ if (initialize) {
+ if (table_context->overdrive_table) {
+ return -EINVAL;
+ }
+ table_context->overdrive_table = kzalloc(overdrive_table_size, GFP_KERNEL);
+ if (!table_context->overdrive_table) {
+ return -ENOMEM;
+ }
+ ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE, 0, table_context->overdrive_table, false);
+ if (ret) {
+ pr_err("Failed to export overdrive table!\n");
+ return ret;
+ }
}
+ ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE, 0, table_context->overdrive_table, true);
+ if (ret) {
+ pr_err("Failed to import overdrive table!\n");
+ return ret;
+ }
+ return ret;
}
diff --git a/drivers/gpu/drm/amd/powerplay/smu_v12_0.c b/drivers/gpu/drm/amd/powerplay/smu_v12_0.c
index 9d2280ca1f4b..139dd737eaa5 100644
--- a/drivers/gpu/drm/amd/powerplay/smu_v12_0.c
+++ b/drivers/gpu/drm/amd/powerplay/smu_v12_0.c
@@ -24,12 +24,12 @@
#include <linux/firmware.h>
#include "amdgpu.h"
#include "amdgpu_smu.h"
+#include "smu_internal.h"
#include "atomfirmware.h"
#include "amdgpu_atomfirmware.h"
#include "smu_v12_0.h"
#include "soc15_common.h"
#include "atom.h"
-#include "renoir_ppt.h"
#include "asic_reg/mp/mp_12_0_0_offset.h"
#include "asic_reg/mp/mp_12_0_0_sh_mask.h"
@@ -41,7 +41,7 @@
#define SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS_MASK 0x00000006L
#define SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS__SHIFT 0x1
-static int smu_v12_0_send_msg_without_waiting(struct smu_context *smu,
+int smu_v12_0_send_msg_without_waiting(struct smu_context *smu,
uint16_t msg)
{
struct amdgpu_device *adev = smu->adev;
@@ -50,7 +50,7 @@ static int smu_v12_0_send_msg_without_waiting(struct smu_context *smu,
return 0;
}
-static int smu_v12_0_read_arg(struct smu_context *smu, uint32_t *arg)
+int smu_v12_0_read_arg(struct smu_context *smu, uint32_t *arg)
{
struct amdgpu_device *adev = smu->adev;
@@ -58,7 +58,7 @@ static int smu_v12_0_read_arg(struct smu_context *smu, uint32_t *arg)
return 0;
}
-static int smu_v12_0_wait_for_response(struct smu_context *smu)
+int smu_v12_0_wait_for_response(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
uint32_t cur_value, i;
@@ -77,7 +77,7 @@ static int smu_v12_0_wait_for_response(struct smu_context *smu)
return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90) == 0x1 ? 0 : -EIO;
}
-static int smu_v12_0_send_msg(struct smu_context *smu, uint16_t msg)
+int smu_v12_0_send_msg(struct smu_context *smu, uint16_t msg)
{
struct amdgpu_device *adev = smu->adev;
int ret = 0, index = 0;
@@ -102,7 +102,7 @@ static int smu_v12_0_send_msg(struct smu_context *smu, uint16_t msg)
}
-static int
+int
smu_v12_0_send_msg_with_param(struct smu_context *smu, uint16_t msg,
uint32_t param)
{
@@ -132,7 +132,7 @@ smu_v12_0_send_msg_with_param(struct smu_context *smu, uint16_t msg,
return ret;
}
-static int smu_v12_0_check_fw_status(struct smu_context *smu)
+int smu_v12_0_check_fw_status(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
uint32_t mp1_fw_flags;
@@ -147,7 +147,7 @@ static int smu_v12_0_check_fw_status(struct smu_context *smu)
return -EIO;
}
-static int smu_v12_0_check_fw_version(struct smu_context *smu)
+int smu_v12_0_check_fw_version(struct smu_context *smu)
{
uint32_t if_version = 0xff, smu_version = 0xff;
uint16_t smu_major;
@@ -181,7 +181,7 @@ static int smu_v12_0_check_fw_version(struct smu_context *smu)
return ret;
}
-static int smu_v12_0_powergate_sdma(struct smu_context *smu, bool gate)
+int smu_v12_0_powergate_sdma(struct smu_context *smu, bool gate)
{
if (!(smu->adev->flags & AMD_IS_APU))
return 0;
@@ -192,7 +192,7 @@ static int smu_v12_0_powergate_sdma(struct smu_context *smu, bool gate)
return smu_send_smc_msg(smu, SMU_MSG_PowerUpSdma);
}
-static int smu_v12_0_powergate_vcn(struct smu_context *smu, bool gate)
+int smu_v12_0_powergate_vcn(struct smu_context *smu, bool gate)
{
if (!(smu->adev->flags & AMD_IS_APU))
return 0;
@@ -203,7 +203,7 @@ static int smu_v12_0_powergate_vcn(struct smu_context *smu, bool gate)
return smu_send_smc_msg(smu, SMU_MSG_PowerUpVcn);
}
-static int smu_v12_0_set_gfx_cgpg(struct smu_context *smu, bool enable)
+int smu_v12_0_set_gfx_cgpg(struct smu_context *smu, bool enable)
{
if (!(smu->adev->pg_flags & AMD_PG_SUPPORT_GFX_PG))
return 0;
@@ -224,7 +224,7 @@ static int smu_v12_0_set_gfx_cgpg(struct smu_context *smu, bool enable)
* Returns 2=Not in GFXOFF.
* Returns 3=Transition into GFXOFF.
*/
-static uint32_t smu_v12_0_get_gfxoff_status(struct smu_context *smu)
+uint32_t smu_v12_0_get_gfxoff_status(struct smu_context *smu)
{
uint32_t reg;
uint32_t gfxOff_Status = 0;
@@ -237,22 +237,13 @@ static uint32_t smu_v12_0_get_gfxoff_status(struct smu_context *smu)
return gfxOff_Status;
}
-static int smu_v12_0_gfx_off_control(struct smu_context *smu, bool enable)
+int smu_v12_0_gfx_off_control(struct smu_context *smu, bool enable)
{
int ret = 0, timeout = 500;
if (enable) {
ret = smu_send_smc_msg(smu, SMU_MSG_AllowGfxOff);
- /* confirm gfx is back to "off" state, timeout is 5 seconds */
- while (!(smu_v12_0_get_gfxoff_status(smu) == 0)) {
- msleep(10);
- timeout--;
- if (timeout == 0) {
- DRM_ERROR("enable gfxoff timeout and failed!\n");
- break;
- }
- }
} else {
ret = smu_send_smc_msg(smu, SMU_MSG_DisallowGfxOff);
@@ -270,12 +261,12 @@ static int smu_v12_0_gfx_off_control(struct smu_context *smu, bool enable)
return ret;
}
-static int smu_v12_0_init_smc_tables(struct smu_context *smu)
+int smu_v12_0_init_smc_tables(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
struct smu_table *tables = NULL;
- if (smu_table->tables || smu_table->table_count == 0)
+ if (smu_table->tables)
return -EINVAL;
tables = kcalloc(SMU_TABLE_COUNT, sizeof(struct smu_table),
@@ -288,11 +279,11 @@ static int smu_v12_0_init_smc_tables(struct smu_context *smu)
return smu_tables_init(smu, tables);
}
-static int smu_v12_0_fini_smc_tables(struct smu_context *smu)
+int smu_v12_0_fini_smc_tables(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
- if (!smu_table->tables || smu_table->table_count == 0)
+ if (!smu_table->tables)
return -EINVAL;
kfree(smu_table->clocks_table);
@@ -304,7 +295,7 @@ static int smu_v12_0_fini_smc_tables(struct smu_context *smu)
return 0;
}
-static int smu_v12_0_populate_smc_tables(struct smu_context *smu)
+int smu_v12_0_populate_smc_tables(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
struct smu_table *table = NULL;
@@ -319,14 +310,20 @@ static int smu_v12_0_populate_smc_tables(struct smu_context *smu)
return smu_update_table(smu, SMU_TABLE_DPMCLOCKS, 0, smu_table->clocks_table, false);
}
-static int smu_v12_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type,
+int smu_v12_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type,
uint32_t *min, uint32_t *max)
{
int ret = 0;
-
- mutex_lock(&smu->mutex);
+ uint32_t mclk_mask, soc_mask;
if (max) {
+ ret = smu_get_profiling_clk_mask(smu, AMD_DPM_FORCED_LEVEL_PROFILE_PEAK,
+ NULL,
+ &mclk_mask,
+ &soc_mask);
+ if (ret)
+ goto failed;
+
switch (clk_type) {
case SMU_GFXCLK:
case SMU_SCLK:
@@ -340,14 +337,20 @@ static int smu_v12_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk
goto failed;
break;
case SMU_UCLK:
- ret = smu_get_dpm_uclk_limited(smu, max, true);
+ case SMU_FCLK:
+ case SMU_MCLK:
+ ret = smu_get_dpm_clk_limited(smu, clk_type, mclk_mask, max);
+ if (ret)
+ goto failed;
+ break;
+ case SMU_SOCCLK:
+ ret = smu_get_dpm_clk_limited(smu, clk_type, soc_mask, max);
if (ret)
goto failed;
break;
default:
ret = -EINVAL;
goto failed;
-
}
}
@@ -365,7 +368,14 @@ static int smu_v12_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk
goto failed;
break;
case SMU_UCLK:
- ret = smu_get_dpm_uclk_limited(smu, min, false);
+ case SMU_FCLK:
+ case SMU_MCLK:
+ ret = smu_get_dpm_clk_limited(smu, clk_type, 0, min);
+ if (ret)
+ goto failed;
+ break;
+ case SMU_SOCCLK:
+ ret = smu_get_dpm_clk_limited(smu, clk_type, 0, min);
if (ret)
goto failed;
break;
@@ -373,40 +383,65 @@ static int smu_v12_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk
ret = -EINVAL;
goto failed;
}
-
}
failed:
- mutex_unlock(&smu->mutex);
return ret;
}
-static const struct smu_funcs smu_v12_0_funcs = {
- .check_fw_status = smu_v12_0_check_fw_status,
- .check_fw_version = smu_v12_0_check_fw_version,
- .powergate_sdma = smu_v12_0_powergate_sdma,
- .powergate_vcn = smu_v12_0_powergate_vcn,
- .send_smc_msg = smu_v12_0_send_msg,
- .send_smc_msg_with_param = smu_v12_0_send_msg_with_param,
- .read_smc_arg = smu_v12_0_read_arg,
- .set_gfx_cgpg = smu_v12_0_set_gfx_cgpg,
- .gfx_off_control = smu_v12_0_gfx_off_control,
- .init_smc_tables = smu_v12_0_init_smc_tables,
- .fini_smc_tables = smu_v12_0_fini_smc_tables,
- .populate_smc_tables = smu_v12_0_populate_smc_tables,
- .get_dpm_ultimate_freq = smu_v12_0_get_dpm_ultimate_freq,
-};
-
-void smu_v12_0_set_smu_funcs(struct smu_context *smu)
+int smu_v12_0_mode2_reset(struct smu_context *smu){
+ return smu_v12_0_send_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset, SMU_RESET_MODE_2);
+}
+
+int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type,
+ uint32_t min, uint32_t max)
{
- struct amdgpu_device *adev = smu->adev;
+ int ret = 0;
- smu->funcs = &smu_v12_0_funcs;
+ if (max < min)
+ return -EINVAL;
- switch (adev->asic_type) {
- case CHIP_RENOIR:
- renoir_set_ppt_funcs(smu);
- break;
+ switch (clk_type) {
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk, min);
+ if (ret)
+ return ret;
+
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk, max);
+ if (ret)
+ return ret;
+ break;
+ case SMU_FCLK:
+ case SMU_MCLK:
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinFclkByFreq, min);
+ if (ret)
+ return ret;
+
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxFclkByFreq, max);
+ if (ret)
+ return ret;
+ break;
+ case SMU_SOCCLK:
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinSocclkByFreq, min);
+ if (ret)
+ return ret;
+
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxSocclkByFreq, max);
+ if (ret)
+ return ret;
+ break;
+ case SMU_VCLK:
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinVcn, min);
+ if (ret)
+ return ret;
+
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxVcn, max);
+ if (ret)
+ return ret;
+ break;
default:
- pr_warn("Unknown asic for smu12\n");
+ return -EINVAL;
}
+
+ return ret;
}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
index 3f12cf341511..aa0ee2b46135 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
@@ -137,7 +137,7 @@ static int smu10_copy_table_from_smc(struct pp_hwmgr *hwmgr,
priv->smu_tables.entry[table_id].table_id);
/* flush hdp cache */
- adev->nbio_funcs->hdp_flush(adev, NULL);
+ adev->nbio.funcs->hdp_flush(adev, NULL);
memcpy(table, (uint8_t *)priv->smu_tables.entry[table_id].table,
priv->smu_tables.entry[table_id].size);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c
index 4728aa23a818..7dca04a89217 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c
@@ -177,12 +177,10 @@ static int smu8_load_mec_firmware(struct pp_hwmgr *hwmgr)
uint32_t tmp;
int ret = 0;
struct cgs_firmware_info info = {0};
- struct smu8_smumgr *smu8_smu;
if (hwmgr == NULL || hwmgr->device == NULL)
return -EINVAL;
- smu8_smu = hwmgr->smu_backend;
ret = cgs_get_firmware_info(hwmgr->device,
CGS_UCODE_ID_CP_MEC, &info);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
index 0dbdde69f2d9..0f3836fd9666 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
@@ -58,7 +58,7 @@ static int vega10_copy_table_from_smc(struct pp_hwmgr *hwmgr,
priv->smu_tables.entry[table_id].table_id);
/* flush hdp cache */
- adev->nbio_funcs->hdp_flush(adev, NULL);
+ adev->nbio.funcs->hdp_flush(adev, NULL);
memcpy(table, priv->smu_tables.entry[table_id].table,
priv->smu_tables.entry[table_id].size);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
index f9589806bf83..90c782c132d2 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
@@ -66,7 +66,7 @@ static int vega12_copy_table_from_smc(struct pp_hwmgr *hwmgr,
return -EINVAL);
/* flush hdp cache */
- adev->nbio_funcs->hdp_flush(adev, NULL);
+ adev->nbio.funcs->hdp_flush(adev, NULL);
memcpy(table, priv->smu_tables.entry[table_id].table,
priv->smu_tables.entry[table_id].size);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
index b9089c6bea85..f604612f411f 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
@@ -189,7 +189,7 @@ static int vega20_copy_table_from_smc(struct pp_hwmgr *hwmgr,
return ret);
/* flush hdp cache */
- adev->nbio_funcs->hdp_flush(adev, NULL);
+ adev->nbio.funcs->hdp_flush(adev, NULL);
memcpy(table, priv->smu_tables.entry[table_id].table,
priv->smu_tables.entry[table_id].size);
@@ -290,7 +290,7 @@ int vega20_get_activity_monitor_coeff(struct pp_hwmgr *hwmgr,
return ret);
/* flush hdp cache */
- adev->nbio_funcs->hdp_flush(adev, NULL);
+ adev->nbio.funcs->hdp_flush(adev, NULL);
memcpy(table, priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].table,
priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].size);
diff --git a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
index 92c393f613d3..0b4892833808 100644
--- a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
@@ -25,6 +25,7 @@
#include <linux/firmware.h>
#include "amdgpu.h"
#include "amdgpu_smu.h"
+#include "smu_internal.h"
#include "atomfirmware.h"
#include "amdgpu_atomfirmware.h"
#include "smu_v11_0.h"
@@ -143,6 +144,7 @@ static struct smu_11_0_cmn2aisc_mapping vega20_message_map[SMU_MSG_MAX_COUNT] =
MSG_MAP(PrepareMp1ForShutdown),
MSG_MAP(SetMGpuFanBoostLimitRpm),
MSG_MAP(GetAVFSVoltageByDpm),
+ MSG_MAP(DFCstateControl),
};
static struct smu_11_0_cmn2aisc_mapping vega20_clk_map[SMU_CLK_COUNT] = {
@@ -464,7 +466,6 @@ static int vega20_store_powerplay_table(struct smu_context *smu)
sizeof(PPTable_t));
table_context->thermal_controller_type = powerplay_table->ucThermalControllerType;
- table_context->TDPODLimit = le32_to_cpu(powerplay_table->OverDrive8Table.ODSettingsMax[ATOM_VEGA20_ODSETTING_POWERPERCENTAGE]);
return 0;
}
@@ -634,7 +635,6 @@ amd_pm_state_type vega20_get_current_power_state(struct smu_context *smu)
!smu_dpm_ctx->dpm_current_power_state)
return -EINVAL;
- mutex_lock(&(smu->mutex));
switch (smu_dpm_ctx->dpm_current_power_state->classification.ui_label) {
case SMU_STATE_UI_LABEL_BATTERY:
pm_type = POWER_STATE_TYPE_BATTERY;
@@ -652,7 +652,6 @@ amd_pm_state_type vega20_get_current_power_state(struct smu_context *smu)
pm_type = POWER_STATE_TYPE_DEFAULT;
break;
}
- mutex_unlock(&(smu->mutex));
return pm_type;
}
@@ -1274,16 +1273,8 @@ static int vega20_force_clk_levels(struct smu_context *smu,
struct vega20_dpm_table *dpm_table;
struct vega20_single_dpm_table *single_dpm_table;
uint32_t soft_min_level, soft_max_level, hard_min_level;
- struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
int ret = 0;
- if (smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
- pr_info("force clock level is for dpm manual mode only.\n");
- return -EINVAL;
- }
-
- mutex_lock(&(smu->mutex));
-
soft_min_level = mask ? (ffs(mask) - 1) : 0;
soft_max_level = mask ? (fls(mask) - 1) : 0;
@@ -1436,7 +1427,6 @@ static int vega20_force_clk_levels(struct smu_context *smu,
break;
}
- mutex_unlock(&(smu->mutex));
return ret;
}
@@ -1451,8 +1441,6 @@ static int vega20_get_clock_by_type_with_latency(struct smu_context *smu,
dpm_table = smu_dpm->dpm_context;
- mutex_lock(&smu->mutex);
-
switch (clk_type) {
case SMU_GFXCLK:
single_dpm_table = &(dpm_table->gfx_table);
@@ -1474,7 +1462,6 @@ static int vega20_get_clock_by_type_with_latency(struct smu_context *smu,
ret = -EINVAL;
}
- mutex_unlock(&smu->mutex);
return ret;
}
@@ -2260,7 +2247,7 @@ vega20_notify_smc_dispaly_config(struct smu_context *smu)
if (smu_feature_is_supported(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
clock_req.clock_type = amd_pp_dcef_clock;
clock_req.clock_freq_in_khz = min_clocks.dcef_clock * 10;
- if (!smu->funcs->display_clock_voltage_request(smu, &clock_req)) {
+ if (!smu_v11_0_display_clock_voltage_request(smu, &clock_req)) {
if (smu_feature_is_supported(smu, SMU_FEATURE_DS_DCEFCLK_BIT)) {
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_SetMinDeepSleepDcefclk,
@@ -2547,8 +2534,6 @@ static int vega20_set_od_percentage(struct smu_context *smu,
int feature_enabled;
PPCLK_e clk_id;
- mutex_lock(&(smu->mutex));
-
dpm_table = smu_dpm->dpm_context;
golden_table = smu_dpm->golden_dpm_context;
@@ -2598,11 +2583,10 @@ static int vega20_set_od_percentage(struct smu_context *smu,
}
ret = smu_handle_task(smu, smu_dpm->dpm_level,
- AMD_PP_TASK_READJUST_POWER_STATE);
+ AMD_PP_TASK_READJUST_POWER_STATE,
+ false);
set_od_failed:
- mutex_unlock(&(smu->mutex));
-
return ret;
}
@@ -2827,10 +2811,9 @@ static int vega20_odn_edit_dpm_table(struct smu_context *smu,
}
if (type == PP_OD_COMMIT_DPM_TABLE) {
- mutex_lock(&(smu->mutex));
ret = smu_handle_task(smu, smu_dpm->dpm_level,
- AMD_PP_TASK_READJUST_POWER_STATE);
- mutex_unlock(&(smu->mutex));
+ AMD_PP_TASK_READJUST_POWER_STATE,
+ false);
}
return ret;
@@ -3047,7 +3030,7 @@ static int vega20_read_sensor(struct smu_context *smu,
*size = 4;
break;
default:
- ret = smu_smc_read_sensor(smu, sensor, data, size);
+ ret = smu_v11_0_read_sensor(smu, sensor, data, size);
}
mutex_unlock(&smu->sensor_lock);
@@ -3141,6 +3124,49 @@ static int vega20_get_thermal_temperature_range(struct smu_context *smu,
return 0;
}
+static int vega20_set_df_cstate(struct smu_context *smu,
+ enum pp_df_cstate state)
+{
+ uint32_t smu_version;
+ int ret;
+
+ ret = smu_get_smc_version(smu, NULL, &smu_version);
+ if (ret) {
+ pr_err("Failed to get smu version!\n");
+ return ret;
+ }
+
+ /* PPSMC_MSG_DFCstateControl is supported with 40.50 and later fws */
+ if (smu_version < 0x283200) {
+ pr_err("Df cstate control is supported with 40.50 and later SMC fw!\n");
+ return -EINVAL;
+ }
+
+ return smu_send_smc_msg_with_param(smu, SMU_MSG_DFCstateControl, state);
+}
+
+static int vega20_update_pcie_parameters(struct smu_context *smu,
+ uint32_t pcie_gen_cap,
+ uint32_t pcie_width_cap)
+{
+ PPTable_t *pptable = smu->smu_table.driver_pptable;
+ int ret, i;
+ uint32_t smu_pcie_arg;
+
+ for (i = 0; i < NUM_LINK_LEVELS; i++) {
+ smu_pcie_arg = (i << 16) |
+ ((pptable->PcieGenSpeed[i] <= pcie_gen_cap) ? (pptable->PcieGenSpeed[i] << 8) :
+ (pcie_gen_cap << 8)) | ((pptable->PcieLaneCount[i] <= pcie_width_cap) ?
+ pptable->PcieLaneCount[i] : pcie_width_cap);
+ ret = smu_send_smc_msg_with_param(smu,
+ SMU_MSG_OverridePcieParameters,
+ smu_pcie_arg);
+ }
+
+ return ret;
+}
+
+
static const struct pptable_funcs vega20_ppt_funcs = {
.tables_init = vega20_tables_init,
.alloc_dpm_context = vega20_allocate_dpm_context,
@@ -3153,7 +3179,7 @@ static const struct pptable_funcs vega20_ppt_funcs = {
.get_smu_table_index = vega20_get_smu_table_index,
.get_smu_power_index = vega20_get_pwr_src_index,
.get_workload_type = vega20_get_workload_type,
- .run_afll_btc = vega20_run_btc_afll,
+ .run_btc = vega20_run_btc_afll,
.get_allowed_feature_mask = vega20_get_allowed_feature_mask,
.get_current_power_state = vega20_get_current_power_state,
.set_default_dpm_table = vega20_set_default_dpm_table,
@@ -3183,13 +3209,61 @@ static const struct pptable_funcs vega20_ppt_funcs = {
.get_fan_speed_percent = vega20_get_fan_speed_percent,
.get_fan_speed_rpm = vega20_get_fan_speed_rpm,
.set_watermarks_table = vega20_set_watermarks_table,
- .get_thermal_temperature_range = vega20_get_thermal_temperature_range
+ .get_thermal_temperature_range = vega20_get_thermal_temperature_range,
+ .set_df_cstate = vega20_set_df_cstate,
+ .update_pcie_parameters = vega20_update_pcie_parameters,
+ .init_microcode = smu_v11_0_init_microcode,
+ .load_microcode = smu_v11_0_load_microcode,
+ .init_smc_tables = smu_v11_0_init_smc_tables,
+ .fini_smc_tables = smu_v11_0_fini_smc_tables,
+ .init_power = smu_v11_0_init_power,
+ .fini_power = smu_v11_0_fini_power,
+ .check_fw_status = smu_v11_0_check_fw_status,
+ .setup_pptable = smu_v11_0_setup_pptable,
+ .get_vbios_bootup_values = smu_v11_0_get_vbios_bootup_values,
+ .get_clk_info_from_vbios = smu_v11_0_get_clk_info_from_vbios,
+ .check_pptable = smu_v11_0_check_pptable,
+ .parse_pptable = smu_v11_0_parse_pptable,
+ .populate_smc_tables = smu_v11_0_populate_smc_pptable,
+ .check_fw_version = smu_v11_0_check_fw_version,
+ .write_pptable = smu_v11_0_write_pptable,
+ .set_min_dcef_deep_sleep = smu_v11_0_set_min_dcef_deep_sleep,
+ .set_tool_table_location = smu_v11_0_set_tool_table_location,
+ .notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
+ .system_features_control = smu_v11_0_system_features_control,
+ .send_smc_msg = smu_v11_0_send_msg,
+ .send_smc_msg_with_param = smu_v11_0_send_msg_with_param,
+ .read_smc_arg = smu_v11_0_read_arg,
+ .init_display_count = smu_v11_0_init_display_count,
+ .set_allowed_mask = smu_v11_0_set_allowed_mask,
+ .get_enabled_mask = smu_v11_0_get_enabled_mask,
+ .notify_display_change = smu_v11_0_notify_display_change,
+ .set_power_limit = smu_v11_0_set_power_limit,
+ .get_current_clk_freq = smu_v11_0_get_current_clk_freq,
+ .init_max_sustainable_clocks = smu_v11_0_init_max_sustainable_clocks,
+ .start_thermal_control = smu_v11_0_start_thermal_control,
+ .stop_thermal_control = smu_v11_0_stop_thermal_control,
+ .set_deep_sleep_dcefclk = smu_v11_0_set_deep_sleep_dcefclk,
+ .display_clock_voltage_request = smu_v11_0_display_clock_voltage_request,
+ .get_fan_control_mode = smu_v11_0_get_fan_control_mode,
+ .set_fan_control_mode = smu_v11_0_set_fan_control_mode,
+ .set_fan_speed_percent = smu_v11_0_set_fan_speed_percent,
+ .set_fan_speed_rpm = smu_v11_0_set_fan_speed_rpm,
+ .set_xgmi_pstate = smu_v11_0_set_xgmi_pstate,
+ .gfx_off_control = smu_v11_0_gfx_off_control,
+ .register_irq_handler = smu_v11_0_register_irq_handler,
+ .set_azalia_d3_pme = smu_v11_0_set_azalia_d3_pme,
+ .get_max_sustainable_clocks_by_dc = smu_v11_0_get_max_sustainable_clocks_by_dc,
+ .baco_is_support= smu_v11_0_baco_is_support,
+ .baco_get_state = smu_v11_0_baco_get_state,
+ .baco_set_state = smu_v11_0_baco_set_state,
+ .baco_reset = smu_v11_0_baco_reset,
+ .get_dpm_ultimate_freq = smu_v11_0_get_dpm_ultimate_freq,
+ .set_soft_freq_limited_range = smu_v11_0_set_soft_freq_limited_range,
+ .override_pcie_parameters = smu_v11_0_override_pcie_parameters,
};
void vega20_set_ppt_funcs(struct smu_context *smu)
{
- struct smu_table_context *smu_table = &smu->smu_table;
-
smu->ppt_funcs = &vega20_ppt_funcs;
- smu_table->table_count = TABLE_COUNT;
}
diff --git a/drivers/gpu/drm/arc/arcpgu_drv.c b/drivers/gpu/drm/arc/arcpgu_drv.c
index 6b7f791685ec..d6a6692db0ac 100644
--- a/drivers/gpu/drm/arc/arcpgu_drv.c
+++ b/drivers/gpu/drm/arc/arcpgu_drv.c
@@ -14,6 +14,7 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_of.h>
#include <drm/drm_probe_helper.h>
#include <linux/dma-mapping.h>
#include <linux/module.h>
@@ -45,7 +46,7 @@ static int arcpgu_load(struct drm_device *drm)
{
struct platform_device *pdev = to_platform_device(drm->dev);
struct arcpgu_drm_private *arcpgu;
- struct device_node *encoder_node;
+ struct device_node *encoder_node = NULL, *endpoint_node = NULL;
struct resource *res;
int ret;
@@ -80,14 +81,23 @@ static int arcpgu_load(struct drm_device *drm)
if (arc_pgu_setup_crtc(drm) < 0)
return -ENODEV;
- /* find the encoder node and initialize it */
- encoder_node = of_parse_phandle(drm->dev->of_node, "encoder-slave", 0);
+ /*
+ * There is only one output port inside each device. It is linked with
+ * encoder endpoint.
+ */
+ endpoint_node = of_graph_get_next_endpoint(pdev->dev.of_node, NULL);
+ if (endpoint_node) {
+ encoder_node = of_graph_get_remote_port_parent(endpoint_node);
+ of_node_put(endpoint_node);
+ }
+
if (encoder_node) {
ret = arcpgu_drm_hdmi_init(drm, encoder_node);
of_node_put(encoder_node);
if (ret < 0)
return ret;
} else {
+ dev_info(drm->dev, "no encoder found. Assumed virtual LCD on simulation platform\n");
ret = arcpgu_drm_sim_init(drm, NULL);
if (ret < 0)
return ret;
diff --git a/drivers/gpu/drm/arc/arcpgu_hdmi.c b/drivers/gpu/drm/arc/arcpgu_hdmi.c
index 98aac743cc26..8fd7094beece 100644
--- a/drivers/gpu/drm/arc/arcpgu_hdmi.c
+++ b/drivers/gpu/drm/arc/arcpgu_hdmi.c
@@ -5,6 +5,7 @@
* Copyright (C) 2016 Synopsys, Inc. (www.synopsys.com)
*/
+#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
#include <drm/drm_encoder.h>
#include <drm/drm_device.h>
diff --git a/drivers/gpu/drm/arm/display/Kconfig b/drivers/gpu/drm/arm/display/Kconfig
index cec0639e3aa1..e87ff8623076 100644
--- a/drivers/gpu/drm/arm/display/Kconfig
+++ b/drivers/gpu/drm/arm/display/Kconfig
@@ -12,3 +12,9 @@ config DRM_KOMEDA
Processor driver. It supports the D71 variants of the hardware.
If compiled as a module it will be called komeda.
+
+config DRM_KOMEDA_ERROR_PRINT
+ bool "Enable komeda error print"
+ depends on DRM_KOMEDA
+ help
+ Choose this option to enable error printing.
diff --git a/drivers/gpu/drm/arm/display/komeda/Makefile b/drivers/gpu/drm/arm/display/komeda/Makefile
index 5c3900c2e764..f095a1c68ac7 100644
--- a/drivers/gpu/drm/arm/display/komeda/Makefile
+++ b/drivers/gpu/drm/arm/display/komeda/Makefile
@@ -22,4 +22,6 @@ komeda-y += \
d71/d71_dev.o \
d71/d71_component.o
+komeda-$(CONFIG_DRM_KOMEDA_ERROR_PRINT) += komeda_event.o
+
obj-$(CONFIG_DRM_KOMEDA) += komeda.o
diff --git a/drivers/gpu/drm/arm/display/komeda/d71/d71_component.c b/drivers/gpu/drm/arm/display/komeda/d71/d71_component.c
index 55a8cc94808a..f0ba26e282c3 100644
--- a/drivers/gpu/drm/arm/display/komeda/d71/d71_component.c
+++ b/drivers/gpu/drm/arm/display/komeda/d71/d71_component.c
@@ -106,6 +106,23 @@ static void dump_block_header(struct seq_file *sf, void __iomem *reg)
i, hdr.output_ids[i]);
}
+/* On D71, we are using the global line size. From D32, every component have
+ * a line size register to indicate the fifo size.
+ */
+static u32 __get_blk_line_size(struct d71_dev *d71, u32 __iomem *reg,
+ u32 max_default)
+{
+ if (!d71->periph_addr)
+ max_default = malidp_read32(reg, BLK_MAX_LINE_SIZE);
+
+ return max_default;
+}
+
+static u32 get_blk_line_size(struct d71_dev *d71, u32 __iomem *reg)
+{
+ return __get_blk_line_size(d71, reg, d71->max_line_size);
+}
+
static u32 to_rot_ctrl(u32 rot)
{
u32 lr_ctrl = 0;
@@ -332,7 +349,56 @@ static void d71_layer_dump(struct komeda_component *c, struct seq_file *sf)
seq_printf(sf, "%sAD_V_CROP:\t\t0x%X\n", prefix, v[2]);
}
+static int d71_layer_validate(struct komeda_component *c,
+ struct komeda_component_state *state)
+{
+ struct komeda_layer_state *st = to_layer_st(state);
+ struct komeda_layer *layer = to_layer(c);
+ struct drm_plane_state *plane_st;
+ struct drm_framebuffer *fb;
+ u32 fourcc, line_sz, max_line_sz;
+
+ plane_st = drm_atomic_get_new_plane_state(state->obj.state,
+ state->plane);
+ fb = plane_st->fb;
+ fourcc = fb->format->format;
+
+ if (drm_rotation_90_or_270(st->rot))
+ line_sz = st->vsize - st->afbc_crop_t - st->afbc_crop_b;
+ else
+ line_sz = st->hsize - st->afbc_crop_l - st->afbc_crop_r;
+
+ if (fb->modifier) {
+ if ((fb->modifier & AFBC_FORMAT_MOD_BLOCK_SIZE_MASK) ==
+ AFBC_FORMAT_MOD_BLOCK_SIZE_32x8)
+ max_line_sz = layer->line_sz;
+ else
+ max_line_sz = layer->line_sz / 2;
+
+ if (line_sz > max_line_sz) {
+ DRM_DEBUG_ATOMIC("afbc request line_sz: %d exceed the max afbc line_sz: %d.\n",
+ line_sz, max_line_sz);
+ return -EINVAL;
+ }
+ }
+
+ if (fourcc == DRM_FORMAT_YUV420_10BIT && line_sz > 2046 && (st->afbc_crop_l % 4)) {
+ DRM_DEBUG_ATOMIC("YUV420_10BIT input_hsize: %d exceed the max size 2046.\n",
+ line_sz);
+ return -EINVAL;
+ }
+
+ if (fourcc == DRM_FORMAT_X0L2 && line_sz > 2046 && (st->addr[0] % 16)) {
+ DRM_DEBUG_ATOMIC("X0L2 input_hsize: %d exceed the max size 2046.\n",
+ line_sz);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static const struct komeda_component_funcs d71_layer_funcs = {
+ .validate = d71_layer_validate,
.update = d71_layer_update,
.disable = d71_layer_disable,
.dump_register = d71_layer_dump,
@@ -365,7 +431,28 @@ static int d71_layer_init(struct d71_dev *d71,
else
layer->layer_type = KOMEDA_FMT_SIMPLE_LAYER;
- set_range(&layer->hsize_in, 4, d71->max_line_size);
+ if (!d71->periph_addr) {
+ /* D32 or newer product */
+ layer->line_sz = malidp_read32(reg, BLK_MAX_LINE_SIZE);
+ layer->yuv_line_sz = L_INFO_YUV_MAX_LINESZ(layer_info);
+ } else if (d71->max_line_size > 2048) {
+ /* D71 4K */
+ layer->line_sz = d71->max_line_size;
+ layer->yuv_line_sz = layer->line_sz / 2;
+ } else {
+ /* D71 2K */
+ if (layer->layer_type == KOMEDA_FMT_RICH_LAYER) {
+ /* rich layer is 4K configuration */
+ layer->line_sz = d71->max_line_size * 2;
+ layer->yuv_line_sz = layer->line_sz / 2;
+ } else {
+ layer->line_sz = d71->max_line_size;
+ layer->yuv_line_sz = 0;
+ }
+ }
+
+ set_range(&layer->hsize_in, 4, layer->line_sz);
+
set_range(&layer->vsize_in, 4, d71->max_vsize);
malidp_write32(reg, LAYER_PALPHA, D71_PALPHA_DEF_MAP);
@@ -456,9 +543,11 @@ static int d71_wb_layer_init(struct d71_dev *d71,
wb_layer = to_layer(c);
wb_layer->layer_type = KOMEDA_FMT_WB_LAYER;
+ wb_layer->line_sz = get_blk_line_size(d71, reg);
+ wb_layer->yuv_line_sz = wb_layer->line_sz;
- set_range(&wb_layer->hsize_in, D71_MIN_LINE_SIZE, d71->max_line_size);
- set_range(&wb_layer->vsize_in, D71_MIN_VERTICAL_SIZE, d71->max_vsize);
+ set_range(&wb_layer->hsize_in, 64, wb_layer->line_sz);
+ set_range(&wb_layer->vsize_in, 64, d71->max_vsize);
return 0;
}
@@ -595,8 +684,8 @@ static int d71_compiz_init(struct d71_dev *d71,
compiz = to_compiz(c);
- set_range(&compiz->hsize, D71_MIN_LINE_SIZE, d71->max_line_size);
- set_range(&compiz->vsize, D71_MIN_VERTICAL_SIZE, d71->max_vsize);
+ set_range(&compiz->hsize, 64, get_blk_line_size(d71, reg));
+ set_range(&compiz->vsize, 64, d71->max_vsize);
return 0;
}
@@ -703,7 +792,7 @@ static void d71_scaler_update(struct komeda_component *c,
static void d71_scaler_dump(struct komeda_component *c, struct seq_file *sf)
{
- u32 v[9];
+ u32 v[10];
dump_block_header(sf, c->reg);
@@ -723,6 +812,18 @@ static void d71_scaler_dump(struct komeda_component *c, struct seq_file *sf)
seq_printf(sf, "SC_H_DELTA_PH:\t\t0x%X\n", v[6]);
seq_printf(sf, "SC_V_INIT_PH:\t\t0x%X\n", v[7]);
seq_printf(sf, "SC_V_DELTA_PH:\t\t0x%X\n", v[8]);
+
+ get_values_from_reg(c->reg, 0x130, 10, v);
+ seq_printf(sf, "SC_ENH_LIMITS:\t\t0x%X\n", v[0]);
+ seq_printf(sf, "SC_ENH_COEFF0:\t\t0x%X\n", v[1]);
+ seq_printf(sf, "SC_ENH_COEFF1:\t\t0x%X\n", v[2]);
+ seq_printf(sf, "SC_ENH_COEFF2:\t\t0x%X\n", v[3]);
+ seq_printf(sf, "SC_ENH_COEFF3:\t\t0x%X\n", v[4]);
+ seq_printf(sf, "SC_ENH_COEFF4:\t\t0x%X\n", v[5]);
+ seq_printf(sf, "SC_ENH_COEFF5:\t\t0x%X\n", v[6]);
+ seq_printf(sf, "SC_ENH_COEFF6:\t\t0x%X\n", v[7]);
+ seq_printf(sf, "SC_ENH_COEFF7:\t\t0x%X\n", v[8]);
+ seq_printf(sf, "SC_ENH_COEFF8:\t\t0x%X\n", v[9]);
}
static const struct komeda_component_funcs d71_scaler_funcs = {
@@ -753,7 +854,7 @@ static int d71_scaler_init(struct d71_dev *d71,
}
scaler = to_scaler(c);
- set_range(&scaler->hsize, 4, 2048);
+ set_range(&scaler->hsize, 4, __get_blk_line_size(d71, reg, 2048));
set_range(&scaler->vsize, 4, 4096);
scaler->max_downscaling = 6;
scaler->max_upscaling = 64;
@@ -862,7 +963,7 @@ static int d71_splitter_init(struct d71_dev *d71,
splitter = to_splitter(c);
- set_range(&splitter->hsize, 4, d71->max_line_size);
+ set_range(&splitter->hsize, 4, get_blk_line_size(d71, reg));
set_range(&splitter->vsize, 4, d71->max_vsize);
return 0;
@@ -933,7 +1034,8 @@ static int d71_merger_init(struct d71_dev *d71,
merger = to_merger(c);
- set_range(&merger->hsize_merged, 4, 4032);
+ set_range(&merger->hsize_merged, 4,
+ __get_blk_line_size(d71, reg, 4032));
set_range(&merger->vsize_merged, 4, 4096);
return 0;
@@ -944,13 +1046,26 @@ static void d71_improc_update(struct komeda_component *c,
{
struct komeda_improc_state *st = to_improc_st(state);
u32 __iomem *reg = c->reg;
- u32 index;
+ u32 index, mask = 0, ctrl = 0;
for_each_changed_input(state, index)
malidp_write32(reg, BLK_INPUT_ID0 + index * 4,
to_d71_input_id(state, index));
malidp_write32(reg, BLK_SIZE, HV_SIZE(st->hsize, st->vsize));
+ malidp_write32(reg, IPS_DEPTH, st->color_depth);
+
+ mask |= IPS_CTRL_YUV | IPS_CTRL_CHD422 | IPS_CTRL_CHD420;
+
+ /* config color format */
+ if (st->color_format == DRM_COLOR_FORMAT_YCRCB420)
+ ctrl |= IPS_CTRL_YUV | IPS_CTRL_CHD422 | IPS_CTRL_CHD420;
+ else if (st->color_format == DRM_COLOR_FORMAT_YCRCB422)
+ ctrl |= IPS_CTRL_YUV | IPS_CTRL_CHD422;
+ else if (st->color_format == DRM_COLOR_FORMAT_YCRCB444)
+ ctrl |= IPS_CTRL_YUV;
+
+ malidp_write32_mask(reg, BLK_CONTROL, mask, ctrl);
}
static void d71_improc_dump(struct komeda_component *c, struct seq_file *sf)
@@ -1218,6 +1333,90 @@ int d71_probe_block(struct d71_dev *d71,
return err;
}
+static void d71_gcu_dump(struct d71_dev *d71, struct seq_file *sf)
+{
+ u32 v[5];
+
+ seq_puts(sf, "\n------ GCU ------\n");
+
+ get_values_from_reg(d71->gcu_addr, 0, 3, v);
+ seq_printf(sf, "GLB_ARCH_ID:\t\t0x%X\n", v[0]);
+ seq_printf(sf, "GLB_CORE_ID:\t\t0x%X\n", v[1]);
+ seq_printf(sf, "GLB_CORE_INFO:\t\t0x%X\n", v[2]);
+
+ get_values_from_reg(d71->gcu_addr, 0x10, 1, v);
+ seq_printf(sf, "GLB_IRQ_STATUS:\t\t0x%X\n", v[0]);
+
+ get_values_from_reg(d71->gcu_addr, 0xA0, 5, v);
+ seq_printf(sf, "GCU_IRQ_RAW_STATUS:\t0x%X\n", v[0]);
+ seq_printf(sf, "GCU_IRQ_CLEAR:\t\t0x%X\n", v[1]);
+ seq_printf(sf, "GCU_IRQ_MASK:\t\t0x%X\n", v[2]);
+ seq_printf(sf, "GCU_IRQ_STATUS:\t\t0x%X\n", v[3]);
+ seq_printf(sf, "GCU_STATUS:\t\t0x%X\n", v[4]);
+
+ get_values_from_reg(d71->gcu_addr, 0xD0, 3, v);
+ seq_printf(sf, "GCU_CONTROL:\t\t0x%X\n", v[0]);
+ seq_printf(sf, "GCU_CONFIG_VALID0:\t0x%X\n", v[1]);
+ seq_printf(sf, "GCU_CONFIG_VALID1:\t0x%X\n", v[2]);
+}
+
+static void d71_lpu_dump(struct d71_pipeline *pipe, struct seq_file *sf)
+{
+ u32 v[6];
+
+ seq_printf(sf, "\n------ LPU%d ------\n", pipe->base.id);
+
+ dump_block_header(sf, pipe->lpu_addr);
+
+ get_values_from_reg(pipe->lpu_addr, 0xA0, 6, v);
+ seq_printf(sf, "LPU_IRQ_RAW_STATUS:\t0x%X\n", v[0]);
+ seq_printf(sf, "LPU_IRQ_CLEAR:\t\t0x%X\n", v[1]);
+ seq_printf(sf, "LPU_IRQ_MASK:\t\t0x%X\n", v[2]);
+ seq_printf(sf, "LPU_IRQ_STATUS:\t\t0x%X\n", v[3]);
+ seq_printf(sf, "LPU_STATUS:\t\t0x%X\n", v[4]);
+ seq_printf(sf, "LPU_TBU_STATUS:\t\t0x%X\n", v[5]);
+
+ get_values_from_reg(pipe->lpu_addr, 0xC0, 1, v);
+ seq_printf(sf, "LPU_INFO:\t\t0x%X\n", v[0]);
+
+ get_values_from_reg(pipe->lpu_addr, 0xD0, 3, v);
+ seq_printf(sf, "LPU_RAXI_CONTROL:\t0x%X\n", v[0]);
+ seq_printf(sf, "LPU_WAXI_CONTROL:\t0x%X\n", v[1]);
+ seq_printf(sf, "LPU_TBU_CONTROL:\t0x%X\n", v[2]);
+}
+
+static void d71_dou_dump(struct d71_pipeline *pipe, struct seq_file *sf)
+{
+ u32 v[5];
+
+ seq_printf(sf, "\n------ DOU%d ------\n", pipe->base.id);
+
+ dump_block_header(sf, pipe->dou_addr);
+
+ get_values_from_reg(pipe->dou_addr, 0xA0, 5, v);
+ seq_printf(sf, "DOU_IRQ_RAW_STATUS:\t0x%X\n", v[0]);
+ seq_printf(sf, "DOU_IRQ_CLEAR:\t\t0x%X\n", v[1]);
+ seq_printf(sf, "DOU_IRQ_MASK:\t\t0x%X\n", v[2]);
+ seq_printf(sf, "DOU_IRQ_STATUS:\t\t0x%X\n", v[3]);
+ seq_printf(sf, "DOU_STATUS:\t\t0x%X\n", v[4]);
+}
+
+static void d71_pipeline_dump(struct komeda_pipeline *pipe, struct seq_file *sf)
+{
+ struct d71_pipeline *d71_pipe = to_d71_pipeline(pipe);
+
+ d71_lpu_dump(d71_pipe, sf);
+ d71_dou_dump(d71_pipe, sf);
+}
+
const struct komeda_pipeline_funcs d71_pipeline_funcs = {
- .downscaling_clk_check = d71_downscaling_clk_check,
+ .downscaling_clk_check = d71_downscaling_clk_check,
+ .dump_register = d71_pipeline_dump,
};
+
+void d71_dump(struct komeda_dev *mdev, struct seq_file *sf)
+{
+ struct d71_dev *d71 = mdev->chip_data;
+
+ d71_gcu_dump(d71, sf);
+}
diff --git a/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c b/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c
index d567ab7ed314..822b23a1ce75 100644
--- a/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c
+++ b/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c
@@ -195,7 +195,7 @@ d71_irq_handler(struct komeda_dev *mdev, struct komeda_events *evts)
if (gcu_status & GLB_IRQ_STATUS_PIPE1)
evts->pipes[1] |= get_pipeline_event(d71->pipes[1], gcu_status);
- return gcu_status ? IRQ_HANDLED : IRQ_NONE;
+ return IRQ_RETVAL(gcu_status);
}
#define ENABLED_GCU_IRQS (GCU_IRQ_CVAL0 | GCU_IRQ_CVAL1 | \
@@ -395,6 +395,22 @@ static int d71_enum_resources(struct komeda_dev *mdev)
err = PTR_ERR(pipe);
goto err_cleanup;
}
+
+ /* D71 HW doesn't update shadow registers when display output
+ * is turning off, so when we disable all pipeline components
+ * together with display output disable by one flush or one
+ * operation, the disable operation updated registers will not
+ * be flush to or valid in HW, which may leads problem.
+ * To workaround this problem, introduce a two phase disable.
+ * Phase1: Disabling components with display is on to make sure
+ * the disable can be flushed to HW.
+ * Phase2: Only turn-off display output.
+ */
+ value = KOMEDA_PIPELINE_IMPROCS |
+ BIT(KOMEDA_COMPONENT_TIMING_CTRLR);
+
+ pipe->standalone_disabled_comps = value;
+
d71->pipes[i] = to_d71_pipeline(pipe);
}
@@ -561,17 +577,18 @@ static int d71_disconnect_iommu(struct komeda_dev *mdev)
}
static const struct komeda_dev_funcs d71_chip_funcs = {
- .init_format_table = d71_init_fmt_tbl,
- .enum_resources = d71_enum_resources,
- .cleanup = d71_cleanup,
- .irq_handler = d71_irq_handler,
- .enable_irq = d71_enable_irq,
- .disable_irq = d71_disable_irq,
- .on_off_vblank = d71_on_off_vblank,
- .change_opmode = d71_change_opmode,
- .flush = d71_flush,
- .connect_iommu = d71_connect_iommu,
- .disconnect_iommu = d71_disconnect_iommu,
+ .init_format_table = d71_init_fmt_tbl,
+ .enum_resources = d71_enum_resources,
+ .cleanup = d71_cleanup,
+ .irq_handler = d71_irq_handler,
+ .enable_irq = d71_enable_irq,
+ .disable_irq = d71_disable_irq,
+ .on_off_vblank = d71_on_off_vblank,
+ .change_opmode = d71_change_opmode,
+ .flush = d71_flush,
+ .connect_iommu = d71_connect_iommu,
+ .disconnect_iommu = d71_disconnect_iommu,
+ .dump_register = d71_dump,
};
const struct komeda_dev_funcs *
diff --git a/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.h b/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.h
index 84f1878b647d..c7357c2b9e62 100644
--- a/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.h
+++ b/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.h
@@ -49,4 +49,6 @@ int d71_probe_block(struct d71_dev *d71,
struct block_header *blk, u32 __iomem *reg);
void d71_read_block_header(u32 __iomem *reg, struct block_header *blk);
+void d71_dump(struct komeda_dev *mdev, struct seq_file *sf);
+
#endif /* !_D71_DEV_H_ */
diff --git a/drivers/gpu/drm/arm/display/komeda/d71/d71_regs.h b/drivers/gpu/drm/arm/display/komeda/d71/d71_regs.h
index 2d5e6d00b42c..1727dc993909 100644
--- a/drivers/gpu/drm/arm/display/komeda/d71/d71_regs.h
+++ b/drivers/gpu/drm/arm/display/komeda/d71/d71_regs.h
@@ -10,6 +10,7 @@
/* Common block registers offset */
#define BLK_BLOCK_INFO 0x000
#define BLK_PIPELINE_INFO 0x004
+#define BLK_MAX_LINE_SIZE 0x008
#define BLK_VALID_INPUT_ID0 0x020
#define BLK_OUTPUT_ID0 0x060
#define BLK_INPUT_ID0 0x080
@@ -321,6 +322,7 @@
#define L_INFO_RF BIT(0)
#define L_INFO_CM BIT(1)
#define L_INFO_ABUF_SIZE(x) (((x) >> 4) & 0x7)
+#define L_INFO_YUV_MAX_LINESZ(x) (((x) >> 16) & 0xFFFF)
/* Scaler registers */
#define SC_COEFFTAB 0x0DC
@@ -494,13 +496,6 @@ enum d71_blk_type {
#define D71_DEFAULT_PREPRETCH_LINE 5
#define D71_BUS_WIDTH_16_BYTES 16
-#define D71_MIN_LINE_SIZE 64
-#define D71_MIN_VERTICAL_SIZE 64
-#define D71_SC_MIN_LIN_SIZE 4
-#define D71_SC_MIN_VERTICAL_SIZE 4
-#define D71_SC_MAX_LIN_SIZE 2048
-#define D71_SC_MAX_VERTICAL_SIZE 4096
-
#define D71_SC_MAX_UPSCALING 64
#define D71_SC_MAX_DOWNSCALING 6
#define D71_SC_SPLIT_OVERLAP 8
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c b/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c
index 624d257da20f..252015210fbc 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c
@@ -5,7 +5,6 @@
*
*/
#include <linux/clk.h>
-#include <linux/pm_runtime.h>
#include <linux/spinlock.h>
#include <drm/drm_atomic.h>
@@ -18,6 +17,33 @@
#include "komeda_dev.h"
#include "komeda_kms.h"
+void komeda_crtc_get_color_config(struct drm_crtc_state *crtc_st,
+ u32 *color_depths, u32 *color_formats)
+{
+ struct drm_connector *conn;
+ struct drm_connector_state *conn_st;
+ u32 conn_color_formats = ~0u;
+ int i, min_bpc = 31, conn_bpc = 0;
+
+ for_each_new_connector_in_state(crtc_st->state, conn, conn_st, i) {
+ if (conn_st->crtc != crtc_st->crtc)
+ continue;
+
+ conn_bpc = conn->display_info.bpc ? conn->display_info.bpc : 8;
+ conn_color_formats &= conn->display_info.color_formats;
+
+ if (conn_bpc < min_bpc)
+ min_bpc = conn_bpc;
+ }
+
+ /* connector doesn't config any color_format, use RGB444 as default */
+ if (!conn_color_formats)
+ conn_color_formats = DRM_COLOR_FORMAT_RGB444;
+
+ *color_depths = GENMASK(min_bpc, 0);
+ *color_formats = conn_color_formats;
+}
+
static void komeda_crtc_update_clock_ratio(struct komeda_crtc_state *kcrtc_st)
{
u64 pxlclk, aclk;
@@ -250,23 +276,57 @@ komeda_crtc_atomic_enable(struct drm_crtc *crtc,
{
komeda_crtc_prepare(to_kcrtc(crtc));
drm_crtc_vblank_on(crtc);
+ WARN_ON(drm_crtc_vblank_get(crtc));
komeda_crtc_do_flush(crtc, old);
}
static void
+komeda_crtc_flush_and_wait_for_flip_done(struct komeda_crtc *kcrtc,
+ struct completion *input_flip_done)
+{
+ struct drm_device *drm = kcrtc->base.dev;
+ struct komeda_dev *mdev = kcrtc->master->mdev;
+ struct completion *flip_done;
+ struct completion temp;
+ int timeout;
+
+ /* if caller doesn't send a flip_done, use a private flip_done */
+ if (input_flip_done) {
+ flip_done = input_flip_done;
+ } else {
+ init_completion(&temp);
+ kcrtc->disable_done = &temp;
+ flip_done = &temp;
+ }
+
+ mdev->funcs->flush(mdev, kcrtc->master->id, 0);
+
+ /* wait the flip take affect.*/
+ timeout = wait_for_completion_timeout(flip_done, HZ);
+ if (timeout == 0) {
+ DRM_ERROR("wait pipe%d flip done timeout\n", kcrtc->master->id);
+ if (!input_flip_done) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&drm->event_lock, flags);
+ kcrtc->disable_done = NULL;
+ spin_unlock_irqrestore(&drm->event_lock, flags);
+ }
+ }
+}
+
+static void
komeda_crtc_atomic_disable(struct drm_crtc *crtc,
struct drm_crtc_state *old)
{
struct komeda_crtc *kcrtc = to_kcrtc(crtc);
struct komeda_crtc_state *old_st = to_kcrtc_st(old);
- struct komeda_dev *mdev = crtc->dev->dev_private;
struct komeda_pipeline *master = kcrtc->master;
struct komeda_pipeline *slave = kcrtc->slave;
- struct completion *disable_done = &crtc->state->commit->flip_done;
- struct completion temp;
- int timeout;
+ struct completion *disable_done;
+ bool needs_phase2 = false;
- DRM_DEBUG_ATOMIC("CRTC%d_DISABLE: active_pipes: 0x%x, affected: 0x%x.\n",
+ DRM_DEBUG_ATOMIC("CRTC%d_DISABLE: active_pipes: 0x%x, affected: 0x%x\n",
drm_crtc_index(crtc),
old_st->active_pipes, old_st->affected_pipes);
@@ -274,7 +334,7 @@ komeda_crtc_atomic_disable(struct drm_crtc *crtc,
komeda_pipeline_disable(slave, old->state);
if (has_bit(master->id, old_st->active_pipes))
- komeda_pipeline_disable(master, old->state);
+ needs_phase2 = komeda_pipeline_disable(master, old->state);
/* crtc_disable has two scenarios according to the state->active switch.
* 1. active -> inactive
@@ -293,32 +353,23 @@ komeda_crtc_atomic_disable(struct drm_crtc *crtc,
* That's also the reason why skip modeset commit in
* komeda_crtc_atomic_flush()
*/
- if (crtc->state->active) {
- struct komeda_pipeline_state *pipe_st;
- /* clear the old active_comps to zero */
- pipe_st = komeda_pipeline_get_old_state(master, old->state);
- pipe_st->active_comps = 0;
+ disable_done = (needs_phase2 || crtc->state->active) ?
+ NULL : &crtc->state->commit->flip_done;
- init_completion(&temp);
- kcrtc->disable_done = &temp;
- disable_done = &temp;
- }
+ /* wait phase 1 disable done */
+ komeda_crtc_flush_and_wait_for_flip_done(kcrtc, disable_done);
- mdev->funcs->flush(mdev, master->id, 0);
+ /* phase 2 */
+ if (needs_phase2) {
+ komeda_pipeline_disable(kcrtc->master, old->state);
- /* wait the disable take affect.*/
- timeout = wait_for_completion_timeout(disable_done, HZ);
- if (timeout == 0) {
- DRM_ERROR("disable pipeline%d timeout.\n", kcrtc->master->id);
- if (crtc->state->active) {
- unsigned long flags;
+ disable_done = crtc->state->active ?
+ NULL : &crtc->state->commit->flip_done;
- spin_lock_irqsave(&crtc->dev->event_lock, flags);
- kcrtc->disable_done = NULL;
- spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
- }
+ komeda_crtc_flush_and_wait_for_flip_done(kcrtc, disable_done);
}
+ drm_crtc_vblank_put(crtc);
drm_crtc_vblank_off(crtc);
komeda_crtc_unprepare(kcrtc);
}
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_dev.c b/drivers/gpu/drm/arm/display/komeda/komeda_dev.c
index ca64a129c594..937a6d4c4865 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_dev.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_dev.c
@@ -25,6 +25,8 @@ static int komeda_register_show(struct seq_file *sf, void *x)
struct komeda_dev *mdev = sf->private;
int i;
+ seq_puts(sf, "\n====== Komeda register dump =========\n");
+
if (mdev->funcs->dump_register)
mdev->funcs->dump_register(mdev, sf);
@@ -91,9 +93,19 @@ config_id_show(struct device *dev, struct device_attribute *attr, char *buf)
}
static DEVICE_ATTR_RO(config_id);
+static ssize_t
+aclk_hz_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct komeda_dev *mdev = dev_to_mdev(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%lu\n", clk_get_rate(mdev->aclk));
+}
+static DEVICE_ATTR_RO(aclk_hz);
+
static struct attribute *komeda_sysfs_entries[] = {
&dev_attr_core_id.attr,
&dev_attr_config_id.attr,
+ &dev_attr_aclk_hz.attr,
NULL,
};
@@ -216,7 +228,7 @@ struct komeda_dev *komeda_dev_create(struct device *dev)
product->product_id,
MALIDP_CORE_ID_PRODUCT_ID(mdev->chip.core_id));
err = -ENODEV;
- goto err_cleanup;
+ goto disable_clk;
}
DRM_INFO("Found ARM Mali-D%x version r%dp%d\n",
@@ -229,19 +241,19 @@ struct komeda_dev *komeda_dev_create(struct device *dev)
err = mdev->funcs->enum_resources(mdev);
if (err) {
DRM_ERROR("enumerate display resource failed.\n");
- goto err_cleanup;
+ goto disable_clk;
}
err = komeda_parse_dt(dev, mdev);
if (err) {
DRM_ERROR("parse device tree failed.\n");
- goto err_cleanup;
+ goto disable_clk;
}
err = komeda_assemble_pipelines(mdev);
if (err) {
DRM_ERROR("assemble display pipelines failed.\n");
- goto err_cleanup;
+ goto disable_clk;
}
dev->dma_parms = &mdev->dma_parms;
@@ -254,11 +266,14 @@ struct komeda_dev *komeda_dev_create(struct device *dev)
if (mdev->iommu && mdev->funcs->connect_iommu) {
err = mdev->funcs->connect_iommu(mdev);
if (err) {
+ DRM_ERROR("connect iommu failed.\n");
mdev->iommu = NULL;
- goto err_cleanup;
+ goto disable_clk;
}
}
+ clk_disable_unprepare(mdev->aclk);
+
err = sysfs_create_group(&dev->kobj, &komeda_sysfs_attr_group);
if (err) {
DRM_ERROR("create sysfs group failed.\n");
@@ -271,6 +286,8 @@ struct komeda_dev *komeda_dev_create(struct device *dev)
return mdev;
+disable_clk:
+ clk_disable_unprepare(mdev->aclk);
err_cleanup:
komeda_dev_destroy(mdev);
return ERR_PTR(err);
@@ -288,8 +305,12 @@ void komeda_dev_destroy(struct komeda_dev *mdev)
debugfs_remove_recursive(mdev->debugfs_root);
#endif
+ if (mdev->aclk)
+ clk_prepare_enable(mdev->aclk);
+
if (mdev->iommu && mdev->funcs->disconnect_iommu)
- mdev->funcs->disconnect_iommu(mdev);
+ if (mdev->funcs->disconnect_iommu(mdev))
+ DRM_ERROR("disconnect iommu failed.\n");
mdev->iommu = NULL;
for (i = 0; i < mdev->n_pipelines; i++) {
@@ -317,3 +338,47 @@ void komeda_dev_destroy(struct komeda_dev *mdev)
devm_kfree(dev, mdev);
}
+
+int komeda_dev_resume(struct komeda_dev *mdev)
+{
+ int ret = 0;
+
+ clk_prepare_enable(mdev->aclk);
+
+ if (mdev->iommu && mdev->funcs->connect_iommu) {
+ ret = mdev->funcs->connect_iommu(mdev);
+ if (ret < 0) {
+ DRM_ERROR("connect iommu failed.\n");
+ goto disable_clk;
+ }
+ }
+
+ ret = mdev->funcs->enable_irq(mdev);
+
+disable_clk:
+ clk_disable_unprepare(mdev->aclk);
+
+ return ret;
+}
+
+int komeda_dev_suspend(struct komeda_dev *mdev)
+{
+ int ret = 0;
+
+ clk_prepare_enable(mdev->aclk);
+
+ if (mdev->iommu && mdev->funcs->disconnect_iommu) {
+ ret = mdev->funcs->disconnect_iommu(mdev);
+ if (ret < 0) {
+ DRM_ERROR("disconnect iommu failed.\n");
+ goto disable_clk;
+ }
+ }
+
+ ret = mdev->funcs->disable_irq(mdev);
+
+disable_clk:
+ clk_disable_unprepare(mdev->aclk);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_dev.h b/drivers/gpu/drm/arm/display/komeda/komeda_dev.h
index d1c86b6174c8..414200233b64 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_dev.h
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_dev.h
@@ -40,6 +40,17 @@
#define KOMEDA_ERR_TTNG BIT_ULL(30)
#define KOMEDA_ERR_TTF BIT_ULL(31)
+#define KOMEDA_ERR_EVENTS \
+ (KOMEDA_EVENT_URUN | KOMEDA_EVENT_IBSY | KOMEDA_EVENT_OVR |\
+ KOMEDA_ERR_TETO | KOMEDA_ERR_TEMR | KOMEDA_ERR_TITR |\
+ KOMEDA_ERR_CPE | KOMEDA_ERR_CFGE | KOMEDA_ERR_AXIE |\
+ KOMEDA_ERR_ACE0 | KOMEDA_ERR_ACE1 | KOMEDA_ERR_ACE2 |\
+ KOMEDA_ERR_ACE3 | KOMEDA_ERR_DRIFTTO | KOMEDA_ERR_FRAMETO |\
+ KOMEDA_ERR_ZME | KOMEDA_ERR_MERR | KOMEDA_ERR_TCF |\
+ KOMEDA_ERR_TTNG | KOMEDA_ERR_TTF)
+
+#define KOMEDA_WARN_EVENTS KOMEDA_ERR_CSCE
+
/* malidp device id */
enum {
MALI_D71 = 0,
@@ -207,4 +218,13 @@ void komeda_dev_destroy(struct komeda_dev *mdev);
struct komeda_dev *dev_to_mdev(struct device *dev);
+#ifdef CONFIG_DRM_KOMEDA_ERROR_PRINT
+void komeda_print_events(struct komeda_events *evts);
+#else
+static inline void komeda_print_events(struct komeda_events *evts) {}
+#endif
+
+int komeda_dev_resume(struct komeda_dev *mdev);
+int komeda_dev_suspend(struct komeda_dev *mdev);
+
#endif /*_KOMEDA_DEV_H_*/
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_drv.c b/drivers/gpu/drm/arm/display/komeda/komeda_drv.c
index 69ace6f9055d..d6c2222c5d33 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_drv.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_drv.c
@@ -8,6 +8,7 @@
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/component.h>
+#include <linux/pm_runtime.h>
#include <drm/drm_of.h>
#include "komeda_dev.h"
#include "komeda_kms.h"
@@ -136,13 +137,40 @@ static const struct of_device_id komeda_of_match[] = {
MODULE_DEVICE_TABLE(of, komeda_of_match);
+static int __maybe_unused komeda_pm_suspend(struct device *dev)
+{
+ struct komeda_drv *mdrv = dev_get_drvdata(dev);
+ struct drm_device *drm = &mdrv->kms->base;
+ int res;
+
+ res = drm_mode_config_helper_suspend(drm);
+
+ komeda_dev_suspend(mdrv->mdev);
+
+ return res;
+}
+
+static int __maybe_unused komeda_pm_resume(struct device *dev)
+{
+ struct komeda_drv *mdrv = dev_get_drvdata(dev);
+ struct drm_device *drm = &mdrv->kms->base;
+
+ komeda_dev_resume(mdrv->mdev);
+
+ return drm_mode_config_helper_resume(drm);
+}
+
+static const struct dev_pm_ops komeda_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(komeda_pm_suspend, komeda_pm_resume)
+};
+
static struct platform_driver komeda_platform_driver = {
.probe = komeda_platform_probe,
.remove = komeda_platform_remove,
.driver = {
.name = "komeda",
.of_match_table = komeda_of_match,
- .pm = NULL,
+ .pm = &komeda_pm_ops,
},
};
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_event.c b/drivers/gpu/drm/arm/display/komeda/komeda_event.c
new file mode 100644
index 000000000000..a36fb86cc054
--- /dev/null
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_event.c
@@ -0,0 +1,140 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * (C) COPYRIGHT 2019 ARM Limited. All rights reserved.
+ * Author: James.Qian.Wang <james.qian.wang@arm.com>
+ *
+ */
+#include <drm/drm_print.h>
+
+#include "komeda_dev.h"
+
+struct komeda_str {
+ char *str;
+ u32 sz;
+ u32 len;
+};
+
+/* return 0 on success, < 0 on no space.
+ */
+static int komeda_sprintf(struct komeda_str *str, const char *fmt, ...)
+{
+ va_list args;
+ int num, free_sz;
+ int err;
+
+ free_sz = str->sz - str->len - 1;
+ if (free_sz <= 0)
+ return -ENOSPC;
+
+ va_start(args, fmt);
+
+ num = vsnprintf(str->str + str->len, free_sz, fmt, args);
+
+ va_end(args);
+
+ if (num < free_sz) {
+ str->len += num;
+ err = 0;
+ } else {
+ str->len = str->sz - 1;
+ err = -ENOSPC;
+ }
+
+ return err;
+}
+
+static void evt_sprintf(struct komeda_str *str, u64 evt, const char *msg)
+{
+ if (evt)
+ komeda_sprintf(str, msg);
+}
+
+static void evt_str(struct komeda_str *str, u64 events)
+{
+ if (events == 0ULL) {
+ komeda_sprintf(str, "None");
+ return;
+ }
+
+ evt_sprintf(str, events & KOMEDA_EVENT_VSYNC, "VSYNC|");
+ evt_sprintf(str, events & KOMEDA_EVENT_FLIP, "FLIP|");
+ evt_sprintf(str, events & KOMEDA_EVENT_EOW, "EOW|");
+ evt_sprintf(str, events & KOMEDA_EVENT_MODE, "OP-MODE|");
+
+ evt_sprintf(str, events & KOMEDA_EVENT_URUN, "UNDERRUN|");
+ evt_sprintf(str, events & KOMEDA_EVENT_OVR, "OVERRUN|");
+
+ /* GLB error */
+ evt_sprintf(str, events & KOMEDA_ERR_MERR, "MERR|");
+ evt_sprintf(str, events & KOMEDA_ERR_FRAMETO, "FRAMETO|");
+
+ /* DOU error */
+ evt_sprintf(str, events & KOMEDA_ERR_DRIFTTO, "DRIFTTO|");
+ evt_sprintf(str, events & KOMEDA_ERR_FRAMETO, "FRAMETO|");
+ evt_sprintf(str, events & KOMEDA_ERR_TETO, "TETO|");
+ evt_sprintf(str, events & KOMEDA_ERR_CSCE, "CSCE|");
+
+ /* LPU errors or events */
+ evt_sprintf(str, events & KOMEDA_EVENT_IBSY, "IBSY|");
+ evt_sprintf(str, events & KOMEDA_ERR_AXIE, "AXIE|");
+ evt_sprintf(str, events & KOMEDA_ERR_ACE0, "ACE0|");
+ evt_sprintf(str, events & KOMEDA_ERR_ACE1, "ACE1|");
+ evt_sprintf(str, events & KOMEDA_ERR_ACE2, "ACE2|");
+ evt_sprintf(str, events & KOMEDA_ERR_ACE3, "ACE3|");
+
+ /* LPU TBU errors*/
+ evt_sprintf(str, events & KOMEDA_ERR_TCF, "TCF|");
+ evt_sprintf(str, events & KOMEDA_ERR_TTNG, "TTNG|");
+ evt_sprintf(str, events & KOMEDA_ERR_TITR, "TITR|");
+ evt_sprintf(str, events & KOMEDA_ERR_TEMR, "TEMR|");
+ evt_sprintf(str, events & KOMEDA_ERR_TTF, "TTF|");
+
+ /* CU errors*/
+ evt_sprintf(str, events & KOMEDA_ERR_CPE, "COPROC|");
+ evt_sprintf(str, events & KOMEDA_ERR_ZME, "ZME|");
+ evt_sprintf(str, events & KOMEDA_ERR_CFGE, "CFGE|");
+ evt_sprintf(str, events & KOMEDA_ERR_TEMR, "TEMR|");
+
+ if (str->len > 0 && (str->str[str->len - 1] == '|')) {
+ str->str[str->len - 1] = 0;
+ str->len--;
+ }
+}
+
+static bool is_new_frame(struct komeda_events *a)
+{
+ return (a->pipes[0] | a->pipes[1]) &
+ (KOMEDA_EVENT_FLIP | KOMEDA_EVENT_EOW);
+}
+
+void komeda_print_events(struct komeda_events *evts)
+{
+ u64 print_evts = KOMEDA_ERR_EVENTS;
+ static bool en_print = true;
+
+ /* reduce the same msg print, only print the first evt for one frame */
+ if (evts->global || is_new_frame(evts))
+ en_print = true;
+ if (!en_print)
+ return;
+
+ if ((evts->global | evts->pipes[0] | evts->pipes[1]) & print_evts) {
+ char msg[256];
+ struct komeda_str str;
+
+ str.str = msg;
+ str.sz = sizeof(msg);
+ str.len = 0;
+
+ komeda_sprintf(&str, "gcu: ");
+ evt_str(&str, evts->global);
+ komeda_sprintf(&str, ", pipes[0]: ");
+ evt_str(&str, evts->pipes[0]);
+ komeda_sprintf(&str, ", pipes[1]: ");
+ evt_str(&str, evts->pipes[1]);
+
+ DRM_ERROR("err detect: %s\n", msg);
+
+ en_print = false;
+ }
+}
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c
index ae274902ff92..52648b4008bc 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c
@@ -48,6 +48,8 @@ static irqreturn_t komeda_kms_irq_handler(int irq, void *data)
memset(&evts, 0, sizeof(evts));
status = mdev->funcs->irq_handler(mdev, &evts);
+ komeda_print_events(&evts);
+
/* Notify the crtc to handle the events */
for (i = 0; i < kms->n_crtcs; i++)
komeda_crtc_handle_event(&kms->crtcs[i], &evts);
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_kms.h b/drivers/gpu/drm/arm/display/komeda/komeda_kms.h
index 45c498e15e7a..456f3c435719 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_kms.h
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_kms.h
@@ -166,6 +166,8 @@ static inline bool has_flip_h(u32 rot)
return !!(rotation & DRM_MODE_REFLECT_X);
}
+void komeda_crtc_get_color_config(struct drm_crtc_state *crtc_st,
+ u32 *color_depths, u32 *color_formats);
unsigned long komeda_crtc_get_aclk(struct komeda_crtc_state *kcrtc_st);
int komeda_kms_setup_crtcs(struct komeda_kms_dev *kms, struct komeda_dev *mdev);
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h
index cf5bea578ad9..bd6ca7c87037 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h
@@ -227,6 +227,8 @@ struct komeda_layer {
/* accepted h/v input range before rotation */
struct malidp_range hsize_in, vsize_in;
u32 layer_type; /* RICH, SIMPLE or WB */
+ u32 line_sz;
+ u32 yuv_line_sz; /* maximum line size for YUV422 and YUV420 */
u32 supported_rots;
/* komeda supports layer split which splits a whole image to two parts
* left and right and handle them by two individual layer processors
@@ -323,6 +325,7 @@ struct komeda_improc {
struct komeda_improc_state {
struct komeda_component_state base;
+ u8 color_format, color_depth;
u16 hsize, vsize;
};
@@ -389,6 +392,18 @@ struct komeda_pipeline {
int id;
/** @avail_comps: available components mask of pipeline */
u32 avail_comps;
+ /**
+ * @standalone_disabled_comps:
+ *
+ * When disable the pipeline, some components can not be disabled
+ * together with others, but need a sparated and standalone disable.
+ * The standalone_disabled_comps are the components which need to be
+ * disabled standalone, and this concept also introduce concept of
+ * two phase.
+ * phase 1: for disabling the common components.
+ * phase 2: for disabling the standalong_disabled_comps.
+ */
+ u32 standalone_disabled_comps;
/** @n_layers: the number of layer on @layers */
int n_layers;
/** @layers: the pipeline layers */
@@ -535,7 +550,7 @@ int komeda_release_unclaimed_resources(struct komeda_pipeline *pipe,
struct komeda_pipeline_state *
komeda_pipeline_get_old_state(struct komeda_pipeline *pipe,
struct drm_atomic_state *state);
-void komeda_pipeline_disable(struct komeda_pipeline *pipe,
+bool komeda_pipeline_disable(struct komeda_pipeline *pipe,
struct drm_atomic_state *old_state);
void komeda_pipeline_update(struct komeda_pipeline *pipe,
struct drm_atomic_state *old_state);
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
index b848270e0a1f..52750116aa19 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
@@ -285,6 +285,7 @@ komeda_layer_check_cfg(struct komeda_layer *layer,
struct komeda_data_flow_cfg *dflow)
{
u32 src_x, src_y, src_w, src_h;
+ u32 line_sz, max_line_sz;
if (!komeda_fb_is_layer_supported(kfb, layer->layer_type, dflow->rot))
return -EINVAL;
@@ -314,6 +315,22 @@ komeda_layer_check_cfg(struct komeda_layer *layer,
return -EINVAL;
}
+ if (drm_rotation_90_or_270(dflow->rot))
+ line_sz = dflow->in_h;
+ else
+ line_sz = dflow->in_w;
+
+ if (kfb->base.format->hsub > 1)
+ max_line_sz = layer->yuv_line_sz;
+ else
+ max_line_sz = layer->line_sz;
+
+ if (line_sz > max_line_sz) {
+ DRM_DEBUG_ATOMIC("Required line_sz: %d exceeds the max size %d\n",
+ line_sz, max_line_sz);
+ return -EINVAL;
+ }
+
return 0;
}
@@ -743,6 +760,7 @@ komeda_improc_validate(struct komeda_improc *improc,
struct komeda_data_flow_cfg *dflow)
{
struct drm_crtc *crtc = kcrtc_st->base.crtc;
+ struct drm_crtc_state *crtc_st = &kcrtc_st->base;
struct komeda_component_state *c_st;
struct komeda_improc_state *st;
@@ -756,6 +774,34 @@ komeda_improc_validate(struct komeda_improc *improc,
st->hsize = dflow->in_w;
st->vsize = dflow->in_h;
+ if (drm_atomic_crtc_needs_modeset(crtc_st)) {
+ u32 output_depths, output_formats;
+ u32 avail_depths, avail_formats;
+
+ komeda_crtc_get_color_config(crtc_st, &output_depths,
+ &output_formats);
+
+ avail_depths = output_depths & improc->supported_color_depths;
+ if (avail_depths == 0) {
+ DRM_DEBUG_ATOMIC("No available color depths, conn depths: 0x%x & display: 0x%x\n",
+ output_depths,
+ improc->supported_color_depths);
+ return -EINVAL;
+ }
+
+ avail_formats = output_formats &
+ improc->supported_color_formats;
+ if (!avail_formats) {
+ DRM_DEBUG_ATOMIC("No available color_formats, conn formats 0x%x & display: 0x%x\n",
+ output_formats,
+ improc->supported_color_formats);
+ return -EINVAL;
+ }
+
+ st->color_depth = __fls(avail_depths);
+ st->color_format = BIT(__ffs(avail_formats));
+ }
+
komeda_component_add_input(&st->base, &dflow->input, 0);
komeda_component_set_output(&dflow->input, &improc->base, 0);
@@ -1218,7 +1264,17 @@ int komeda_release_unclaimed_resources(struct komeda_pipeline *pipe,
return 0;
}
-void komeda_pipeline_disable(struct komeda_pipeline *pipe,
+/* Since standalong disabled components must be disabled separately and in the
+ * last, So a complete disable operation may needs to call pipeline_disable
+ * twice (two phase disabling).
+ * Phase 1: disable the common components, flush it.
+ * Phase 2: disable the standalone disabled components, flush it.
+ *
+ * RETURNS:
+ * true: disable is not complete, needs a phase 2 disable.
+ * false: disable is complete.
+ */
+bool komeda_pipeline_disable(struct komeda_pipeline *pipe,
struct drm_atomic_state *old_state)
{
struct komeda_pipeline_state *old;
@@ -1228,9 +1284,14 @@ void komeda_pipeline_disable(struct komeda_pipeline *pipe,
old = komeda_pipeline_get_old_state(pipe, old_state);
- disabling_comps = old->active_comps;
- DRM_DEBUG_ATOMIC("PIPE%d: disabling_comps: 0x%x.\n",
- pipe->id, disabling_comps);
+ disabling_comps = old->active_comps &
+ (~pipe->standalone_disabled_comps);
+ if (!disabling_comps)
+ disabling_comps = old->active_comps &
+ pipe->standalone_disabled_comps;
+
+ DRM_DEBUG_ATOMIC("PIPE%d: active_comps: 0x%x, disabling_comps: 0x%x.\n",
+ pipe->id, old->active_comps, disabling_comps);
dp_for_each_set_bit(id, disabling_comps) {
c = komeda_pipeline_get_component(pipe, id);
@@ -1248,6 +1309,13 @@ void komeda_pipeline_disable(struct komeda_pipeline *pipe,
c->funcs->disable(c);
}
+
+ /* Update the pipeline state, if there are components that are still
+ * active, return true for calling the phase 2 disable.
+ */
+ old->active_comps &= ~disabling_comps;
+
+ return old->active_comps ? true : false;
}
void komeda_pipeline_update(struct komeda_pipeline *pipe,
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c b/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c
index b72840c06ab7..e465cc4879c9 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c
@@ -141,6 +141,7 @@ static int komeda_wb_connector_add(struct komeda_kms_dev *kms,
struct komeda_dev *mdev = kms->base.dev_private;
struct komeda_wb_connector *kwb_conn;
struct drm_writeback_connector *wb_conn;
+ struct drm_display_info *info;
u32 *formats, n_formats = 0;
int err;
@@ -172,6 +173,10 @@ static int komeda_wb_connector_add(struct komeda_kms_dev *kms,
drm_connector_helper_add(&wb_conn->base, &komeda_wb_conn_helper_funcs);
+ info = &kwb_conn->base.base.display_info;
+ info->bpc = __fls(kcrtc->master->improc->supported_color_depths);
+ info->color_formats = kcrtc->master->improc->supported_color_formats;
+
kcrtc->wb_conn = kwb_conn;
return 0;
diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c
index 333b88a5efb0..37d92a06318e 100644
--- a/drivers/gpu/drm/arm/malidp_drv.c
+++ b/drivers/gpu/drm/arm/malidp_drv.c
@@ -368,7 +368,7 @@ malidp_verify_afbc_framebuffer(struct drm_device *dev, struct drm_file *file,
return false;
}
-struct drm_framebuffer *
+static struct drm_framebuffer *
malidp_fb_create(struct drm_device *dev, struct drm_file *file,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
@@ -491,9 +491,9 @@ void malidp_error(struct malidp_drm *malidp,
spin_unlock_irqrestore(&malidp->errors_lock, irqflags);
}
-void malidp_error_stats_dump(const char *prefix,
- struct malidp_error_stats error_stats,
- struct seq_file *m)
+static void malidp_error_stats_dump(const char *prefix,
+ struct malidp_error_stats error_stats,
+ struct seq_file *m)
{
seq_printf(m, "[%s] num_errors : %d\n", prefix,
error_stats.num_errors);
@@ -665,7 +665,7 @@ static ssize_t core_id_show(struct device *dev, struct device_attribute *attr,
return snprintf(buf, PAGE_SIZE, "%08x\n", malidp->core_id);
}
-DEVICE_ATTR_RO(core_id);
+static DEVICE_ATTR_RO(core_id);
static int malidp_init_sysfs(struct device *dev)
{
@@ -817,6 +817,12 @@ static int malidp_bind(struct device *dev)
malidp->core_id = version;
+ ret = of_property_read_u32(dev->of_node,
+ "arm,malidp-arqos-value",
+ &hwdev->arqos_value);
+ if (ret)
+ hwdev->arqos_value = 0x0;
+
/* set the number of lines used for output of RGB data */
ret = of_property_read_u8_array(dev->of_node,
"arm,malidp-output-port-lines",
diff --git a/drivers/gpu/drm/arm/malidp_hw.c b/drivers/gpu/drm/arm/malidp_hw.c
index bd8265f02e0b..ca570b135478 100644
--- a/drivers/gpu/drm/arm/malidp_hw.c
+++ b/drivers/gpu/drm/arm/malidp_hw.c
@@ -379,6 +379,15 @@ static void malidp500_modeset(struct malidp_hw_device *hwdev, struct videomode *
malidp_hw_setbits(hwdev, MALIDP_DISP_FUNC_ILACED, MALIDP_DE_DISPLAY_FUNC);
else
malidp_hw_clearbits(hwdev, MALIDP_DISP_FUNC_ILACED, MALIDP_DE_DISPLAY_FUNC);
+
+ /*
+ * Program the RQoS register to avoid high resolutions flicker
+ * issue on the LS1028A.
+ */
+ if (hwdev->arqos_value) {
+ val = hwdev->arqos_value;
+ malidp_hw_setbits(hwdev, val, MALIDP500_RQOS_QUALITY);
+ }
}
int malidp_format_get_bpp(u32 fmt)
diff --git a/drivers/gpu/drm/arm/malidp_hw.h b/drivers/gpu/drm/arm/malidp_hw.h
index 968a65eed371..e4c36bc90bda 100644
--- a/drivers/gpu/drm/arm/malidp_hw.h
+++ b/drivers/gpu/drm/arm/malidp_hw.h
@@ -251,6 +251,9 @@ struct malidp_hw_device {
/* size of memory used for rotating layers, up to two banks available */
u32 rotation_memory[2];
+
+ /* priority level of RQOS register used for driven the ARQOS signal */
+ u32 arqos_value;
};
static inline u32 malidp_hw_read(struct malidp_hw_device *hwdev, u32 reg)
diff --git a/drivers/gpu/drm/arm/malidp_regs.h b/drivers/gpu/drm/arm/malidp_regs.h
index 993031542fa1..514c50dcb74d 100644
--- a/drivers/gpu/drm/arm/malidp_regs.h
+++ b/drivers/gpu/drm/arm/malidp_regs.h
@@ -210,6 +210,16 @@
#define MALIDP500_CONFIG_VALID 0x00f00
#define MALIDP500_CONFIG_ID 0x00fd4
+/*
+ * The quality of service (QoS) register on the DP500. RQOS register values
+ * are driven by the ARQOS signal, using AXI transacations, dependent on the
+ * FIFO input level.
+ * The RQOS register can also set QoS levels for:
+ * - RED_ARQOS @ A 4-bit signal value for close to underflow conditions
+ * - GREEN_ARQOS @ A 4-bit signal value for normal conditions
+ */
+#define MALIDP500_RQOS_QUALITY 0x00500
+
/* register offsets and bits specific to DP550/DP650 */
#define MALIDP550_ADDR_SPACE_SIZE 0x10000
#define MALIDP550_DE_CONTROL 0x00010
diff --git a/drivers/gpu/drm/ast/Kconfig b/drivers/gpu/drm/ast/Kconfig
index 829620d5326c..fbcf2f45cef5 100644
--- a/drivers/gpu/drm/ast/Kconfig
+++ b/drivers/gpu/drm/ast/Kconfig
@@ -4,6 +4,8 @@ config DRM_AST
depends on DRM && PCI && MMU
select DRM_KMS_HELPER
select DRM_VRAM_HELPER
+ select DRM_TTM
+ select DRM_TTM_HELPER
help
Say yes for experimental AST GPU driver. Do not enable
this driver without having a working -modesetting,
diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
index 6ed6ff49efc0..1f17794b0890 100644
--- a/drivers/gpu/drm/ast/ast_drv.c
+++ b/drivers/gpu/drm/ast/ast_drv.c
@@ -35,7 +35,6 @@
#include <drm/drm_gem_vram_helper.h>
#include <drm/drm_pci.h>
#include <drm/drm_probe_helper.h>
-#include <drm/drm_vram_mm_helper.h>
#include "ast_drv.h"
@@ -201,10 +200,7 @@ static struct pci_driver ast_pci_driver = {
.driver.pm = &ast_pm_ops,
};
-static const struct file_operations ast_fops = {
- .owner = THIS_MODULE,
- DRM_VRAM_MM_FILE_OPERATIONS
-};
+DEFINE_DRM_GEM_FOPS(ast_fops);
static struct drm_driver driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM,
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index 244cc7c382af..ff161bd622f3 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -82,6 +82,25 @@ enum ast_tx_chip {
#define AST_DRAM_4Gx16 7
#define AST_DRAM_8Gx16 8
+
+#define AST_MAX_HWC_WIDTH 64
+#define AST_MAX_HWC_HEIGHT 64
+
+#define AST_HWC_SIZE (AST_MAX_HWC_WIDTH * AST_MAX_HWC_HEIGHT * 2)
+#define AST_HWC_SIGNATURE_SIZE 32
+
+#define AST_DEFAULT_HWC_NUM 2
+
+/* define for signature structure */
+#define AST_HWC_SIGNATURE_CHECKSUM 0x00
+#define AST_HWC_SIGNATURE_SizeX 0x04
+#define AST_HWC_SIGNATURE_SizeY 0x08
+#define AST_HWC_SIGNATURE_X 0x0C
+#define AST_HWC_SIGNATURE_Y 0x10
+#define AST_HWC_SIGNATURE_HOTSPOTX 0x14
+#define AST_HWC_SIGNATURE_HOTSPOTY 0x18
+
+
struct ast_private {
struct drm_device *dev;
@@ -97,8 +116,11 @@ struct ast_private {
int fb_mtrr;
- struct drm_gem_object *cursor_cache;
- int next_cursor;
+ struct {
+ struct drm_gem_vram_object *gbo[AST_DEFAULT_HWC_NUM];
+ unsigned int next_index;
+ } cursor;
+
bool support_wide_screen;
enum {
ast_use_p2a,
@@ -199,23 +221,6 @@ static inline void ast_open_key(struct ast_private *ast)
#define AST_VIDMEM_DEFAULT_SIZE AST_VIDMEM_SIZE_8M
-#define AST_MAX_HWC_WIDTH 64
-#define AST_MAX_HWC_HEIGHT 64
-
-#define AST_HWC_SIZE (AST_MAX_HWC_WIDTH*AST_MAX_HWC_HEIGHT*2)
-#define AST_HWC_SIGNATURE_SIZE 32
-
-#define AST_DEFAULT_HWC_NUM 2
-/* define for signature structure */
-#define AST_HWC_SIGNATURE_CHECKSUM 0x00
-#define AST_HWC_SIGNATURE_SizeX 0x04
-#define AST_HWC_SIGNATURE_SizeY 0x08
-#define AST_HWC_SIGNATURE_X 0x0C
-#define AST_HWC_SIGNATURE_Y 0x10
-#define AST_HWC_SIGNATURE_HOTSPOTX 0x14
-#define AST_HWC_SIGNATURE_HOTSPOTY 0x18
-
-
struct ast_i2c_chan {
struct i2c_adapter adapter;
struct drm_device *dev;
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index 50de8e47659c..21715d6a9b56 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -33,7 +33,6 @@
#include <drm/drm_gem.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_gem_vram_helper.h>
-#include <drm/drm_vram_mm_helper.h>
#include "ast_drv.h"
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index d349c721501c..b13eaa2619ab 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -687,17 +687,6 @@ static void ast_encoder_destroy(struct drm_encoder *encoder)
kfree(encoder);
}
-
-static struct drm_encoder *ast_best_single_encoder(struct drm_connector *connector)
-{
- int enc_id = connector->encoder_ids[0];
- /* pick the encoder ids */
- if (enc_id)
- return drm_encoder_find(connector->dev, NULL, enc_id);
- return NULL;
-}
-
-
static const struct drm_encoder_funcs ast_enc_funcs = {
.destroy = ast_encoder_destroy,
};
@@ -847,7 +836,6 @@ static void ast_connector_destroy(struct drm_connector *connector)
static const struct drm_connector_helper_funcs ast_connector_helper_funcs = {
.mode_valid = ast_mode_valid,
.get_modes = ast_get_modes,
- .best_encoder = ast_best_single_encoder,
};
static const struct drm_connector_funcs ast_connector_funcs = {
@@ -895,50 +883,53 @@ static int ast_connector_init(struct drm_device *dev)
static int ast_cursor_init(struct drm_device *dev)
{
struct ast_private *ast = dev->dev_private;
- int size;
- int ret;
- struct drm_gem_object *obj;
+ size_t size, i;
struct drm_gem_vram_object *gbo;
- s64 gpu_addr;
- void *base;
+ int ret;
- size = (AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE) * AST_DEFAULT_HWC_NUM;
+ size = roundup(AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE, PAGE_SIZE);
- ret = ast_gem_create(dev, size, true, &obj);
- if (ret)
- return ret;
- gbo = drm_gem_vram_of_gem(obj);
- ret = drm_gem_vram_pin(gbo, DRM_GEM_VRAM_PL_FLAG_VRAM);
- if (ret)
- goto fail;
- gpu_addr = drm_gem_vram_offset(gbo);
- if (gpu_addr < 0) {
- drm_gem_vram_unpin(gbo);
- ret = (int)gpu_addr;
- goto fail;
- }
+ for (i = 0; i < ARRAY_SIZE(ast->cursor.gbo); ++i) {
+ gbo = drm_gem_vram_create(dev, &dev->vram_mm->bdev,
+ size, 0, false);
+ if (IS_ERR(gbo)) {
+ ret = PTR_ERR(gbo);
+ goto err_drm_gem_vram_put;
+ }
+ ret = drm_gem_vram_pin(gbo, DRM_GEM_VRAM_PL_FLAG_VRAM |
+ DRM_GEM_VRAM_PL_FLAG_TOPDOWN);
+ if (ret) {
+ drm_gem_vram_put(gbo);
+ goto err_drm_gem_vram_put;
+ }
- /* kmap the object */
- base = drm_gem_vram_kmap(gbo, true, NULL);
- if (IS_ERR(base)) {
- ret = PTR_ERR(base);
- goto fail;
+ ast->cursor.gbo[i] = gbo;
}
- ast->cursor_cache = obj;
return 0;
-fail:
+
+err_drm_gem_vram_put:
+ while (i) {
+ --i;
+ gbo = ast->cursor.gbo[i];
+ drm_gem_vram_unpin(gbo);
+ drm_gem_vram_put(gbo);
+ ast->cursor.gbo[i] = NULL;
+ }
return ret;
}
static void ast_cursor_fini(struct drm_device *dev)
{
struct ast_private *ast = dev->dev_private;
- struct drm_gem_vram_object *gbo =
- drm_gem_vram_of_gem(ast->cursor_cache);
- drm_gem_vram_kunmap(gbo);
- drm_gem_vram_unpin(gbo);
- drm_gem_object_put_unlocked(ast->cursor_cache);
+ size_t i;
+ struct drm_gem_vram_object *gbo;
+
+ for (i = 0; i < ARRAY_SIZE(ast->cursor.gbo); ++i) {
+ gbo = ast->cursor.gbo[i];
+ drm_gem_vram_unpin(gbo);
+ drm_gem_vram_put(gbo);
+ }
}
int ast_mode_init(struct drm_device *dev)
@@ -1076,23 +1067,6 @@ static void ast_i2c_destroy(struct ast_i2c_chan *i2c)
kfree(i2c);
}
-static void ast_show_cursor(struct drm_crtc *crtc)
-{
- struct ast_private *ast = crtc->dev->dev_private;
- u8 jreg;
-
- jreg = 0x2;
- /* enable ARGB cursor */
- jreg |= 1;
- ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xcb, 0xfc, jreg);
-}
-
-static void ast_hide_cursor(struct drm_crtc *crtc)
-{
- struct ast_private *ast = crtc->dev->dev_private;
- ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xcb, 0xfc, 0x00);
-}
-
static u32 copy_cursor_image(u8 *src, u8 *dst, int width, int height)
{
union {
@@ -1149,21 +1123,99 @@ static u32 copy_cursor_image(u8 *src, u8 *dst, int width, int height)
return csum;
}
+static int ast_cursor_update(void *dst, void *src, unsigned int width,
+ unsigned int height)
+{
+ u32 csum;
+
+ /* do data transfer to cursor cache */
+ csum = copy_cursor_image(src, dst, width, height);
+
+ /* write checksum + signature */
+ dst += AST_HWC_SIZE;
+ writel(csum, dst);
+ writel(width, dst + AST_HWC_SIGNATURE_SizeX);
+ writel(height, dst + AST_HWC_SIGNATURE_SizeY);
+ writel(0, dst + AST_HWC_SIGNATURE_HOTSPOTX);
+ writel(0, dst + AST_HWC_SIGNATURE_HOTSPOTY);
+
+ return 0;
+}
+
+static void ast_cursor_set_base(struct ast_private *ast, u64 address)
+{
+ u8 addr0 = (address >> 3) & 0xff;
+ u8 addr1 = (address >> 11) & 0xff;
+ u8 addr2 = (address >> 19) & 0xff;
+
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc8, addr0);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc9, addr1);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xca, addr2);
+}
+
+static int ast_show_cursor(struct drm_crtc *crtc, void *src,
+ unsigned int width, unsigned int height)
+{
+ struct ast_private *ast = crtc->dev->dev_private;
+ struct ast_crtc *ast_crtc = to_ast_crtc(crtc);
+ struct drm_gem_vram_object *gbo;
+ void *dst;
+ s64 off;
+ int ret;
+ u8 jreg;
+
+ gbo = ast->cursor.gbo[ast->cursor.next_index];
+ dst = drm_gem_vram_vmap(gbo);
+ if (IS_ERR(dst))
+ return PTR_ERR(dst);
+ off = drm_gem_vram_offset(gbo);
+ if (off < 0) {
+ ret = (int)off;
+ goto err_drm_gem_vram_vunmap;
+ }
+
+ ret = ast_cursor_update(dst, src, width, height);
+ if (ret)
+ goto err_drm_gem_vram_vunmap;
+ ast_cursor_set_base(ast, off);
+
+ ast_crtc->offset_x = AST_MAX_HWC_WIDTH - width;
+ ast_crtc->offset_y = AST_MAX_HWC_WIDTH - height;
+
+ jreg = 0x2;
+ /* enable ARGB cursor */
+ jreg |= 1;
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xcb, 0xfc, jreg);
+
+ ++ast->cursor.next_index;
+ ast->cursor.next_index %= ARRAY_SIZE(ast->cursor.gbo);
+
+ drm_gem_vram_vunmap(gbo, dst);
+
+ return 0;
+
+err_drm_gem_vram_vunmap:
+ drm_gem_vram_vunmap(gbo, dst);
+ return ret;
+}
+
+static void ast_hide_cursor(struct drm_crtc *crtc)
+{
+ struct ast_private *ast = crtc->dev->dev_private;
+
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xcb, 0xfc, 0x00);
+}
+
static int ast_cursor_set(struct drm_crtc *crtc,
struct drm_file *file_priv,
uint32_t handle,
uint32_t width,
uint32_t height)
{
- struct ast_private *ast = crtc->dev->dev_private;
- struct ast_crtc *ast_crtc = to_ast_crtc(crtc);
struct drm_gem_object *obj;
struct drm_gem_vram_object *gbo;
- s64 dst_gpu;
- u64 gpu_addr;
- u32 csum;
+ u8 *src;
int ret;
- u8 *src, *dst;
if (!handle) {
ast_hide_cursor(crtc);
@@ -1179,70 +1231,23 @@ static int ast_cursor_set(struct drm_crtc *crtc,
return -ENOENT;
}
gbo = drm_gem_vram_of_gem(obj);
-
- ret = drm_gem_vram_pin(gbo, 0);
- if (ret)
- goto err_drm_gem_object_put_unlocked;
- src = drm_gem_vram_kmap(gbo, true, NULL);
+ src = drm_gem_vram_vmap(gbo);
if (IS_ERR(src)) {
ret = PTR_ERR(src);
- goto err_drm_gem_vram_unpin;
- }
-
- dst = drm_gem_vram_kmap(drm_gem_vram_of_gem(ast->cursor_cache),
- false, NULL);
- if (IS_ERR(dst)) {
- ret = PTR_ERR(dst);
- goto err_drm_gem_vram_kunmap;
- }
- dst_gpu = drm_gem_vram_offset(drm_gem_vram_of_gem(ast->cursor_cache));
- if (dst_gpu < 0) {
- ret = (int)dst_gpu;
- goto err_drm_gem_vram_kunmap;
- }
-
- dst += (AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE)*ast->next_cursor;
-
- /* do data transfer to cursor cache */
- csum = copy_cursor_image(src, dst, width, height);
-
- /* write checksum + signature */
- {
- struct drm_gem_vram_object *dst_gbo =
- drm_gem_vram_of_gem(ast->cursor_cache);
- u8 *dst = drm_gem_vram_kmap(dst_gbo, false, NULL);
- dst += (AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE)*ast->next_cursor + AST_HWC_SIZE;
- writel(csum, dst);
- writel(width, dst + AST_HWC_SIGNATURE_SizeX);
- writel(height, dst + AST_HWC_SIGNATURE_SizeY);
- writel(0, dst + AST_HWC_SIGNATURE_HOTSPOTX);
- writel(0, dst + AST_HWC_SIGNATURE_HOTSPOTY);
-
- /* set pattern offset */
- gpu_addr = (u64)dst_gpu;
- gpu_addr += (AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE)*ast->next_cursor;
- gpu_addr >>= 3;
- ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc8, gpu_addr & 0xff);
- ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc9, (gpu_addr >> 8) & 0xff);
- ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xca, (gpu_addr >> 16) & 0xff);
+ goto err_drm_gem_object_put_unlocked;
}
- ast_crtc->offset_x = AST_MAX_HWC_WIDTH - width;
- ast_crtc->offset_y = AST_MAX_HWC_WIDTH - height;
-
- ast->next_cursor = (ast->next_cursor + 1) % AST_DEFAULT_HWC_NUM;
- ast_show_cursor(crtc);
+ ret = ast_show_cursor(crtc, src, width, height);
+ if (ret)
+ goto err_drm_gem_vram_vunmap;
- drm_gem_vram_kunmap(gbo);
- drm_gem_vram_unpin(gbo);
+ drm_gem_vram_vunmap(gbo, src);
drm_gem_object_put_unlocked(obj);
return 0;
-err_drm_gem_vram_kunmap:
- drm_gem_vram_kunmap(gbo);
-err_drm_gem_vram_unpin:
- drm_gem_vram_unpin(gbo);
+err_drm_gem_vram_vunmap:
+ drm_gem_vram_vunmap(gbo, src);
err_drm_gem_object_put_unlocked:
drm_gem_object_put_unlocked(obj);
return ret;
@@ -1253,12 +1258,17 @@ static int ast_cursor_move(struct drm_crtc *crtc,
{
struct ast_crtc *ast_crtc = to_ast_crtc(crtc);
struct ast_private *ast = crtc->dev->dev_private;
+ struct drm_gem_vram_object *gbo;
int x_offset, y_offset;
- u8 *sig;
+ u8 *dst, *sig;
+ u8 jreg;
- sig = drm_gem_vram_kmap(drm_gem_vram_of_gem(ast->cursor_cache),
- false, NULL);
- sig += (AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE)*ast->next_cursor + AST_HWC_SIZE;
+ gbo = ast->cursor.gbo[ast->cursor.next_index];
+ dst = drm_gem_vram_vmap(gbo);
+ if (IS_ERR(dst))
+ return PTR_ERR(dst);
+
+ sig = dst + AST_HWC_SIZE;
writel(x, sig + AST_HWC_SIGNATURE_X);
writel(y, sig + AST_HWC_SIGNATURE_Y);
@@ -1281,7 +1291,11 @@ static int ast_cursor_move(struct drm_crtc *crtc,
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc7, ((y >> 8) & 0x07));
/* dummy write to fire HWC */
- ast_show_cursor(crtc);
+ jreg = 0x02 |
+ 0x01; /* enable ARGB4444 cursor */
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xcb, 0xfc, jreg);
+
+ drm_gem_vram_vunmap(gbo, dst);
return 0;
}
diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c
index c52d92294171..fad34106083a 100644
--- a/drivers/gpu/drm/ast/ast_ttm.c
+++ b/drivers/gpu/drm/ast/ast_ttm.c
@@ -30,7 +30,6 @@
#include <drm/drm_print.h>
#include <drm/drm_gem_vram_helper.h>
-#include <drm/drm_vram_mm_helper.h>
#include "ast_drv.h"
@@ -42,7 +41,7 @@ int ast_mm_init(struct ast_private *ast)
vmm = drm_vram_helper_alloc_mm(
dev, pci_resource_start(dev->pdev, 0),
- ast->vram_size, &drm_gem_vram_mm_funcs);
+ ast->vram_size);
if (IS_ERR(vmm)) {
ret = PTR_ERR(vmm);
DRM_ERROR("Error initializing VRAM MM; %d\n", ret);
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
index 375fa84c548b..121b62682d80 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
@@ -107,7 +107,8 @@ static int atmel_hlcdc_attach_endpoint(struct drm_device *dev, int endpoint)
output->encoder.possible_crtcs = 0x1;
if (panel) {
- bridge = drm_panel_bridge_add(panel, DRM_MODE_CONNECTOR_Unknown);
+ bridge = drm_panel_bridge_add_typed(panel,
+ DRM_MODE_CONNECTOR_Unknown);
if (IS_ERR(bridge))
return PTR_ERR(bridge);
}
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
index 89f5a756fa37..034f202dfe8f 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
@@ -601,7 +601,6 @@ static int atmel_hlcdc_plane_atomic_check(struct drm_plane *p,
struct drm_framebuffer *fb = state->base.fb;
const struct drm_display_mode *mode;
struct drm_crtc_state *crtc_state;
- unsigned int tmp;
int ret;
int i;
@@ -694,9 +693,7 @@ static int atmel_hlcdc_plane_atomic_check(struct drm_plane *p,
* Swap width and size in case of 90 or 270 degrees rotation
*/
if (drm_rotation_90_or_270(state->base.rotation)) {
- tmp = state->src_w;
- state->src_w = state->src_h;
- state->src_h = tmp;
+ swap(state->src_w, state->src_h);
}
if (!desc->layout.size &&
diff --git a/drivers/gpu/drm/bochs/Kconfig b/drivers/gpu/drm/bochs/Kconfig
index 32b043abb668..7bcdf294fed8 100644
--- a/drivers/gpu/drm/bochs/Kconfig
+++ b/drivers/gpu/drm/bochs/Kconfig
@@ -4,6 +4,8 @@ config DRM_BOCHS
depends on DRM && PCI && MMU
select DRM_KMS_HELPER
select DRM_VRAM_HELPER
+ select DRM_TTM
+ select DRM_TTM_HELPER
help
Choose this option for qemu.
If M is selected the module will be called bochs-drm.
diff --git a/drivers/gpu/drm/bochs/bochs.h b/drivers/gpu/drm/bochs/bochs.h
index 68483a2fc12c..917767173ee6 100644
--- a/drivers/gpu/drm/bochs/bochs.h
+++ b/drivers/gpu/drm/bochs/bochs.h
@@ -10,7 +10,6 @@
#include <drm/drm_gem.h>
#include <drm/drm_gem_vram_helper.h>
#include <drm/drm_simple_kms_helper.h>
-#include <drm/drm_vram_mm_helper.h>
/* ---------------------------------------------------------------------- */
diff --git a/drivers/gpu/drm/bochs/bochs_drv.c b/drivers/gpu/drm/bochs/bochs_drv.c
index 770e1625d05e..10460878414e 100644
--- a/drivers/gpu/drm/bochs/bochs_drv.c
+++ b/drivers/gpu/drm/bochs/bochs_drv.c
@@ -58,10 +58,7 @@ err:
return ret;
}
-static const struct file_operations bochs_fops = {
- .owner = THIS_MODULE,
- DRM_VRAM_MM_FILE_OPERATIONS
-};
+DEFINE_DRM_GEM_FOPS(bochs_fops);
static struct drm_driver bochs_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
@@ -114,7 +111,7 @@ static int bochs_pci_probe(struct pci_dev *pdev,
return -ENOMEM;
}
- ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, 0, "bochsdrmfb");
+ ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, "bochsdrmfb");
if (ret)
return ret;
diff --git a/drivers/gpu/drm/bochs/bochs_kms.c b/drivers/gpu/drm/bochs/bochs_kms.c
index 02a9c1ed165b..3f0006c2470d 100644
--- a/drivers/gpu/drm/bochs/bochs_kms.c
+++ b/drivers/gpu/drm/bochs/bochs_kms.c
@@ -69,33 +69,11 @@ static void bochs_pipe_update(struct drm_simple_display_pipe *pipe,
}
}
-static int bochs_pipe_prepare_fb(struct drm_simple_display_pipe *pipe,
- struct drm_plane_state *new_state)
-{
- struct drm_gem_vram_object *gbo;
-
- if (!new_state->fb)
- return 0;
- gbo = drm_gem_vram_of_gem(new_state->fb->obj[0]);
- return drm_gem_vram_pin(gbo, DRM_GEM_VRAM_PL_FLAG_VRAM);
-}
-
-static void bochs_pipe_cleanup_fb(struct drm_simple_display_pipe *pipe,
- struct drm_plane_state *old_state)
-{
- struct drm_gem_vram_object *gbo;
-
- if (!old_state->fb)
- return;
- gbo = drm_gem_vram_of_gem(old_state->fb->obj[0]);
- drm_gem_vram_unpin(gbo);
-}
-
static const struct drm_simple_display_pipe_funcs bochs_pipe_funcs = {
.enable = bochs_pipe_enable,
.update = bochs_pipe_update,
- .prepare_fb = bochs_pipe_prepare_fb,
- .cleanup_fb = bochs_pipe_cleanup_fb,
+ .prepare_fb = drm_gem_vram_simple_display_pipe_prepare_fb,
+ .cleanup_fb = drm_gem_vram_simple_display_pipe_cleanup_fb,
};
static int bochs_connector_get_modes(struct drm_connector *connector)
diff --git a/drivers/gpu/drm/bochs/bochs_mm.c b/drivers/gpu/drm/bochs/bochs_mm.c
index 8f9bb886f7ad..1b74f530b07c 100644
--- a/drivers/gpu/drm/bochs/bochs_mm.c
+++ b/drivers/gpu/drm/bochs/bochs_mm.c
@@ -11,8 +11,7 @@ int bochs_mm_init(struct bochs_device *bochs)
struct drm_vram_mm *vmm;
vmm = drm_vram_helper_alloc_mm(bochs->dev, bochs->fb_base,
- bochs->fb_size,
- &drm_gem_vram_mm_funcs);
+ bochs->fb_size);
return PTR_ERR_OR_ZERO(vmm);
}
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index 1cc9f502c1f2..34362976cd6f 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -87,8 +87,7 @@ config DRM_SIL_SII8620
depends on OF
select DRM_KMS_HELPER
imply EXTCON
- select INPUT
- select RC_CORE
+ depends on RC_CORE || !RC_CORE
help
Silicon Image SII8620 HDMI/MHL bridge chip driver.
diff --git a/drivers/gpu/drm/bridge/analogix-anx78xx.c b/drivers/gpu/drm/bridge/analogix-anx78xx.c
index 3c7cc5af735c..274989f96a91 100644
--- a/drivers/gpu/drm/bridge/analogix-anx78xx.c
+++ b/drivers/gpu/drm/bridge/analogix-anx78xx.c
@@ -19,6 +19,7 @@
#include <linux/types.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
#include <drm/drm_dp_helper.h>
#include <drm/drm_edid.h>
@@ -38,12 +39,20 @@
#define AUX_CH_BUFFER_SIZE 16
#define AUX_WAIT_TIMEOUT_MS 15
-static const u8 anx78xx_i2c_addresses[] = {
- [I2C_IDX_TX_P0] = TX_P0,
- [I2C_IDX_TX_P1] = TX_P1,
- [I2C_IDX_TX_P2] = TX_P2,
- [I2C_IDX_RX_P0] = RX_P0,
- [I2C_IDX_RX_P1] = RX_P1,
+static const u8 anx7808_i2c_addresses[] = {
+ [I2C_IDX_TX_P0] = 0x78,
+ [I2C_IDX_TX_P1] = 0x7a,
+ [I2C_IDX_TX_P2] = 0x72,
+ [I2C_IDX_RX_P0] = 0x7e,
+ [I2C_IDX_RX_P1] = 0x80,
+};
+
+static const u8 anx781x_i2c_addresses[] = {
+ [I2C_IDX_TX_P0] = 0x70,
+ [I2C_IDX_TX_P1] = 0x7a,
+ [I2C_IDX_TX_P2] = 0x72,
+ [I2C_IDX_RX_P0] = 0x7e,
+ [I2C_IDX_RX_P1] = 0x80,
};
struct anx78xx_platform_data {
@@ -62,7 +71,6 @@ struct anx78xx {
struct i2c_client *client;
struct edid *edid;
struct drm_connector connector;
- struct drm_dp_link link;
struct anx78xx_platform_data pdata;
struct mutex lock;
@@ -715,7 +723,9 @@ static int anx78xx_init_pdata(struct anx78xx *anx78xx)
/* 1.0V digital core power regulator */
pdata->dvdd10 = devm_regulator_get(dev, "dvdd10");
if (IS_ERR(pdata->dvdd10)) {
- DRM_ERROR("DVDD10 regulator not found\n");
+ if (PTR_ERR(pdata->dvdd10) != -EPROBE_DEFER)
+ DRM_ERROR("DVDD10 regulator not found\n");
+
return PTR_ERR(pdata->dvdd10);
}
@@ -737,7 +747,7 @@ static int anx78xx_init_pdata(struct anx78xx *anx78xx)
static int anx78xx_dp_link_training(struct anx78xx *anx78xx)
{
- u8 dp_bw, value;
+ u8 dp_bw, dpcd[2];
int err;
err = regmap_write(anx78xx->map[I2C_IDX_RX_P0], SP_HDMI_MUTE_CTRL_REG,
@@ -790,18 +800,34 @@ static int anx78xx_dp_link_training(struct anx78xx *anx78xx)
if (err)
return err;
- /* Check link capabilities */
- err = drm_dp_link_probe(&anx78xx->aux, &anx78xx->link);
- if (err < 0) {
- DRM_ERROR("Failed to probe link capabilities: %d\n", err);
- return err;
- }
+ /*
+ * Power up the sink (DP_SET_POWER register is only available on DPCD
+ * v1.1 and later).
+ */
+ if (anx78xx->dpcd[DP_DPCD_REV] >= 0x11) {
+ err = drm_dp_dpcd_readb(&anx78xx->aux, DP_SET_POWER, &dpcd[0]);
+ if (err < 0) {
+ DRM_ERROR("Failed to read DP_SET_POWER register: %d\n",
+ err);
+ return err;
+ }
- /* Power up the sink */
- err = drm_dp_link_power_up(&anx78xx->aux, &anx78xx->link);
- if (err < 0) {
- DRM_ERROR("Failed to power up DisplayPort link: %d\n", err);
- return err;
+ dpcd[0] &= ~DP_SET_POWER_MASK;
+ dpcd[0] |= DP_SET_POWER_D0;
+
+ err = drm_dp_dpcd_writeb(&anx78xx->aux, DP_SET_POWER, dpcd[0]);
+ if (err < 0) {
+ DRM_ERROR("Failed to power up DisplayPort link: %d\n",
+ err);
+ return err;
+ }
+
+ /*
+ * According to the DP 1.1 specification, a "Sink Device must
+ * exit the power saving state within 1 ms" (Section 2.5.3.1,
+ * Table 5-52, "Sink Control Field" (register 0x600).
+ */
+ usleep_range(1000, 2000);
}
/* Possibly enable downspread on the sink */
@@ -840,15 +866,22 @@ static int anx78xx_dp_link_training(struct anx78xx *anx78xx)
if (err)
return err;
- value = drm_dp_link_rate_to_bw_code(anx78xx->link.rate);
+ dpcd[0] = drm_dp_max_link_rate(anx78xx->dpcd);
+ dpcd[0] = drm_dp_link_rate_to_bw_code(dpcd[0]);
err = regmap_write(anx78xx->map[I2C_IDX_TX_P0],
- SP_DP_MAIN_LINK_BW_SET_REG, value);
+ SP_DP_MAIN_LINK_BW_SET_REG, dpcd[0]);
if (err)
return err;
- err = drm_dp_link_configure(&anx78xx->aux, &anx78xx->link);
+ dpcd[1] = drm_dp_max_lane_count(anx78xx->dpcd);
+
+ if (drm_dp_enhanced_frame_cap(anx78xx->dpcd))
+ dpcd[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
+
+ err = drm_dp_dpcd_write(&anx78xx->aux, DP_LINK_BW_SET, dpcd,
+ sizeof(dpcd));
if (err < 0) {
- DRM_ERROR("Failed to configure DisplayPort link: %d\n", err);
+ DRM_ERROR("Failed to configure link: %d\n", err);
return err;
}
@@ -1301,6 +1334,7 @@ static const struct regmap_config anx78xx_regmap_config = {
};
static const u16 anx78xx_chipid_list[] = {
+ 0x7808,
0x7812,
0x7814,
0x7818,
@@ -1312,6 +1346,7 @@ static int anx78xx_i2c_probe(struct i2c_client *client,
struct anx78xx *anx78xx;
struct anx78xx_platform_data *pdata;
unsigned int i, idl, idh, version;
+ const u8 *i2c_addresses;
bool found = false;
int err;
@@ -1332,7 +1367,9 @@ static int anx78xx_i2c_probe(struct i2c_client *client,
err = anx78xx_init_pdata(anx78xx);
if (err) {
- DRM_ERROR("Failed to initialize pdata: %d\n", err);
+ if (err != -EPROBE_DEFER)
+ DRM_ERROR("Failed to initialize pdata: %d\n", err);
+
return err;
}
@@ -1349,22 +1386,26 @@ static int anx78xx_i2c_probe(struct i2c_client *client,
}
/* Map slave addresses of ANX7814 */
+ i2c_addresses = device_get_match_data(&client->dev);
for (i = 0; i < I2C_NUM_ADDRESSES; i++) {
- anx78xx->i2c_dummy[i] = i2c_new_dummy(client->adapter,
- anx78xx_i2c_addresses[i] >> 1);
- if (!anx78xx->i2c_dummy[i]) {
- err = -ENOMEM;
- DRM_ERROR("Failed to reserve I2C bus %02x\n",
- anx78xx_i2c_addresses[i]);
+ struct i2c_client *i2c_dummy;
+
+ i2c_dummy = i2c_new_dummy_device(client->adapter,
+ i2c_addresses[i] >> 1);
+ if (IS_ERR(i2c_dummy)) {
+ err = PTR_ERR(i2c_dummy);
+ DRM_ERROR("Failed to reserve I2C bus %02x: %d\n",
+ i2c_addresses[i], err);
goto err_unregister_i2c;
}
+ anx78xx->i2c_dummy[i] = i2c_dummy;
anx78xx->map[i] = devm_regmap_init_i2c(anx78xx->i2c_dummy[i],
&anx78xx_regmap_config);
if (IS_ERR(anx78xx->map[i])) {
err = PTR_ERR(anx78xx->map[i]);
DRM_ERROR("Failed regmap initialization %02x\n",
- anx78xx_i2c_addresses[i]);
+ i2c_addresses[i]);
goto err_unregister_i2c;
}
}
@@ -1463,7 +1504,10 @@ MODULE_DEVICE_TABLE(i2c, anx78xx_id);
#if IS_ENABLED(CONFIG_OF)
static const struct of_device_id anx78xx_match_table[] = {
- { .compatible = "analogix,anx7814", },
+ { .compatible = "analogix,anx7808", .data = anx7808_i2c_addresses },
+ { .compatible = "analogix,anx7812", .data = anx781x_i2c_addresses },
+ { .compatible = "analogix,anx7814", .data = anx781x_i2c_addresses },
+ { .compatible = "analogix,anx7818", .data = anx781x_i2c_addresses },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, anx78xx_match_table);
diff --git a/drivers/gpu/drm/bridge/analogix-anx78xx.h b/drivers/gpu/drm/bridge/analogix-anx78xx.h
index 25e063bcecbc..55d6c2109740 100644
--- a/drivers/gpu/drm/bridge/analogix-anx78xx.h
+++ b/drivers/gpu/drm/bridge/analogix-anx78xx.h
@@ -6,15 +6,8 @@
#ifndef __ANX78xx_H
#define __ANX78xx_H
-#define TX_P0 0x70
-#define TX_P1 0x7a
-#define TX_P2 0x72
-
-#define RX_P0 0x7e
-#define RX_P1 0x80
-
/***************************************************************/
-/* Register definition of device address 0x7e */
+/* Register definitions for RX_PO */
/***************************************************************/
/*
@@ -171,7 +164,7 @@
#define SP_VSI_RCVD BIT(1)
/***************************************************************/
-/* Register definition of device address 0x80 */
+/* Register definitions for RX_P1 */
/***************************************************************/
/* HDCP BCAPS Shadow Register */
@@ -217,7 +210,7 @@
#define SP_SET_AVMUTE BIT(0)
/***************************************************************/
-/* Register definition of device address 0x70 */
+/* Register definitions for TX_P0 */
/***************************************************************/
/* HDCP Status Register */
@@ -451,7 +444,7 @@
#define SP_DP_BUF_DATA0_REG 0xf0
/***************************************************************/
-/* Register definition of device address 0x72 */
+/* Register definitions for TX_P2 */
/***************************************************************/
/*
@@ -674,7 +667,7 @@
#define SP_INT_CTRL_REG 0xff
/***************************************************************/
-/* Register definition of device address 0x7a */
+/* Register definitions for TX_P1 */
/***************************************************************/
/* DP TX Link Training Control Register */
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
index 22885dceaa17..bb411fe52ae8 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
@@ -21,6 +21,7 @@
#include <drm/bridge/analogix_dp.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
#include <drm/drm_device.h>
#include <drm/drm_panel.h>
diff --git a/drivers/gpu/drm/bridge/cdns-dsi.c b/drivers/gpu/drm/bridge/cdns-dsi.c
index 6166dca6be81..3a5bd4e7fd1e 100644
--- a/drivers/gpu/drm/bridge/cdns-dsi.c
+++ b/drivers/gpu/drm/bridge/cdns-dsi.c
@@ -956,7 +956,8 @@ static int cdns_dsi_attach(struct mipi_dsi_host *host,
panel = of_drm_find_panel(np);
if (!IS_ERR(panel)) {
- bridge = drm_panel_bridge_add(panel, DRM_MODE_CONNECTOR_DSI);
+ bridge = drm_panel_bridge_add_typed(panel,
+ DRM_MODE_CONNECTOR_DSI);
} else {
bridge = of_drm_find_bridge(dev->dev.of_node);
if (!bridge)
diff --git a/drivers/gpu/drm/bridge/dumb-vga-dac.c b/drivers/gpu/drm/bridge/dumb-vga-dac.c
index 7aa789c35882..cc33dc411b9e 100644
--- a/drivers/gpu/drm/bridge/dumb-vga-dac.c
+++ b/drivers/gpu/drm/bridge/dumb-vga-dac.c
@@ -12,6 +12,7 @@
#include <linux/regulator/consumer.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
diff --git a/drivers/gpu/drm/bridge/lvds-encoder.c b/drivers/gpu/drm/bridge/lvds-encoder.c
index 2ab2c234f26c..e2132a8d5106 100644
--- a/drivers/gpu/drm/bridge/lvds-encoder.c
+++ b/drivers/gpu/drm/bridge/lvds-encoder.c
@@ -106,7 +106,8 @@ static int lvds_encoder_probe(struct platform_device *pdev)
}
lvds_encoder->panel_bridge =
- devm_drm_panel_bridge_add(dev, panel, DRM_MODE_CONNECTOR_LVDS);
+ devm_drm_panel_bridge_add_typed(dev, panel,
+ DRM_MODE_CONNECTOR_LVDS);
if (IS_ERR(lvds_encoder->panel_bridge))
return PTR_ERR(lvds_encoder->panel_bridge);
diff --git a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
index 6e81e5db57f2..e8a49f6146c6 100644
--- a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
+++ b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
@@ -25,6 +25,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_edid.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
diff --git a/drivers/gpu/drm/bridge/nxp-ptn3460.c b/drivers/gpu/drm/bridge/nxp-ptn3460.c
index d4a1cc5052c3..57ff01339559 100644
--- a/drivers/gpu/drm/bridge/nxp-ptn3460.c
+++ b/drivers/gpu/drm/bridge/nxp-ptn3460.c
@@ -11,6 +11,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include <drm/drm_of.h>
diff --git a/drivers/gpu/drm/bridge/panel.c b/drivers/gpu/drm/bridge/panel.c
index b12ae3a4c5f1..f4e293e7cf64 100644
--- a/drivers/gpu/drm/bridge/panel.c
+++ b/drivers/gpu/drm/bridge/panel.c
@@ -5,6 +5,7 @@
*/
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_connector.h>
#include <drm/drm_encoder.h>
#include <drm/drm_modeset_helper_vtables.h>
@@ -133,8 +134,6 @@ static const struct drm_bridge_funcs panel_bridge_bridge_funcs = {
* just calls the appropriate functions from &drm_panel.
*
* @panel: The drm_panel being wrapped. Must be non-NULL.
- * @connector_type: The DRM_MODE_CONNECTOR_* for the connector to be
- * created.
*
* For drivers converting from directly using drm_panel: The expected
* usage pattern is that during either encoder module probe or DSI
@@ -148,11 +147,37 @@ static const struct drm_bridge_funcs panel_bridge_bridge_funcs = {
* drm_mode_config_cleanup() if the bridge has already been attached), then
* drm_panel_bridge_remove() to free it.
*
+ * The connector type is set to @panel->connector_type, which must be set to a
+ * known type. Calling this function with a panel whose connector type is
+ * DRM_MODE_CONNECTOR_Unknown will return NULL.
+ *
* See devm_drm_panel_bridge_add() for an automatically manged version of this
* function.
*/
-struct drm_bridge *drm_panel_bridge_add(struct drm_panel *panel,
- u32 connector_type)
+struct drm_bridge *drm_panel_bridge_add(struct drm_panel *panel)
+{
+ if (WARN_ON(panel->connector_type == DRM_MODE_CONNECTOR_Unknown))
+ return NULL;
+
+ return drm_panel_bridge_add_typed(panel, panel->connector_type);
+}
+EXPORT_SYMBOL(drm_panel_bridge_add);
+
+/**
+ * drm_panel_bridge_add_typed - Creates a &drm_bridge and &drm_connector with
+ * an explicit connector type.
+ * @panel: The drm_panel being wrapped. Must be non-NULL.
+ * @connector_type: The connector type (DRM_MODE_CONNECTOR_*)
+ *
+ * This is just like drm_panel_bridge_add(), but forces the connector type to
+ * @connector_type instead of infering it from the panel.
+ *
+ * This function is deprecated and should not be used in new drivers. Use
+ * drm_panel_bridge_add() instead, and fix panel drivers as necessary if they
+ * don't report a connector type.
+ */
+struct drm_bridge *drm_panel_bridge_add_typed(struct drm_panel *panel,
+ u32 connector_type)
{
struct panel_bridge *panel_bridge;
@@ -176,7 +201,7 @@ struct drm_bridge *drm_panel_bridge_add(struct drm_panel *panel,
return &panel_bridge->bridge;
}
-EXPORT_SYMBOL(drm_panel_bridge_add);
+EXPORT_SYMBOL(drm_panel_bridge_add_typed);
/**
* drm_panel_bridge_remove - Unregisters and frees a drm_bridge
@@ -213,15 +238,38 @@ static void devm_drm_panel_bridge_release(struct device *dev, void *res)
* that just calls the appropriate functions from &drm_panel.
* @dev: device to tie the bridge lifetime to
* @panel: The drm_panel being wrapped. Must be non-NULL.
- * @connector_type: The DRM_MODE_CONNECTOR_* for the connector to be
- * created.
*
* This is the managed version of drm_panel_bridge_add() which automatically
* calls drm_panel_bridge_remove() when @dev is unbound.
*/
struct drm_bridge *devm_drm_panel_bridge_add(struct device *dev,
- struct drm_panel *panel,
- u32 connector_type)
+ struct drm_panel *panel)
+{
+ if (WARN_ON(panel->connector_type == DRM_MODE_CONNECTOR_Unknown))
+ return NULL;
+
+ return devm_drm_panel_bridge_add_typed(dev, panel,
+ panel->connector_type);
+}
+EXPORT_SYMBOL(devm_drm_panel_bridge_add);
+
+/**
+ * devm_drm_panel_bridge_add_typed - Creates a managed &drm_bridge and
+ * &drm_connector with an explicit connector type.
+ * @dev: device to tie the bridge lifetime to
+ * @panel: The drm_panel being wrapped. Must be non-NULL.
+ * @connector_type: The connector type (DRM_MODE_CONNECTOR_*)
+ *
+ * This is just like devm_drm_panel_bridge_add(), but forces the connector type
+ * to @connector_type instead of infering it from the panel.
+ *
+ * This function is deprecated and should not be used in new drivers. Use
+ * devm_drm_panel_bridge_add() instead, and fix panel drivers as necessary if
+ * they don't report a connector type.
+ */
+struct drm_bridge *devm_drm_panel_bridge_add_typed(struct device *dev,
+ struct drm_panel *panel,
+ u32 connector_type)
{
struct drm_bridge **ptr, *bridge;
@@ -230,7 +278,7 @@ struct drm_bridge *devm_drm_panel_bridge_add(struct device *dev,
if (!ptr)
return ERR_PTR(-ENOMEM);
- bridge = drm_panel_bridge_add(panel, connector_type);
+ bridge = drm_panel_bridge_add_typed(panel, connector_type);
if (!IS_ERR(bridge)) {
*ptr = bridge;
devres_add(dev, ptr);
@@ -240,4 +288,4 @@ struct drm_bridge *devm_drm_panel_bridge_add(struct device *dev,
return bridge;
}
-EXPORT_SYMBOL(devm_drm_panel_bridge_add);
+EXPORT_SYMBOL(devm_drm_panel_bridge_add_typed);
diff --git a/drivers/gpu/drm/bridge/parade-ps8622.c b/drivers/gpu/drm/bridge/parade-ps8622.c
index 93c68e2e9484..b7a72dfdcac3 100644
--- a/drivers/gpu/drm/bridge/parade-ps8622.c
+++ b/drivers/gpu/drm/bridge/parade-ps8622.c
@@ -17,6 +17,7 @@
#include <linux/regulator/consumer.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
diff --git a/drivers/gpu/drm/bridge/sii902x.c b/drivers/gpu/drm/bridge/sii902x.c
index 38f75ac580df..b70e8c5cf2e1 100644
--- a/drivers/gpu/drm/bridge/sii902x.c
+++ b/drivers/gpu/drm/bridge/sii902x.c
@@ -20,6 +20,7 @@
#include <linux/clk.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_drv.h>
#include <drm/drm_edid.h>
#include <drm/drm_print.h>
diff --git a/drivers/gpu/drm/bridge/sii9234.c b/drivers/gpu/drm/bridge/sii9234.c
index 25d4ad8c7ad6..f81f81b7051f 100644
--- a/drivers/gpu/drm/bridge/sii9234.c
+++ b/drivers/gpu/drm/bridge/sii9234.c
@@ -13,6 +13,7 @@
* Dharam Kumar <dharam.kr@samsung.com>
*/
#include <drm/bridge/mhl.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
@@ -841,39 +842,28 @@ static int sii9234_init_resources(struct sii9234 *ctx,
ctx->client[I2C_MHL] = client;
- ctx->client[I2C_TPI] = i2c_new_dummy(adapter, I2C_TPI_ADDR);
- if (!ctx->client[I2C_TPI]) {
+ ctx->client[I2C_TPI] = devm_i2c_new_dummy_device(&client->dev, adapter,
+ I2C_TPI_ADDR);
+ if (IS_ERR(ctx->client[I2C_TPI])) {
dev_err(ctx->dev, "failed to create TPI client\n");
- return -ENODEV;
+ return PTR_ERR(ctx->client[I2C_TPI]);
}
- ctx->client[I2C_HDMI] = i2c_new_dummy(adapter, I2C_HDMI_ADDR);
- if (!ctx->client[I2C_HDMI]) {
+ ctx->client[I2C_HDMI] = devm_i2c_new_dummy_device(&client->dev, adapter,
+ I2C_HDMI_ADDR);
+ if (IS_ERR(ctx->client[I2C_HDMI])) {
dev_err(ctx->dev, "failed to create HDMI RX client\n");
- goto fail_tpi;
+ return PTR_ERR(ctx->client[I2C_HDMI]);
}
- ctx->client[I2C_CBUS] = i2c_new_dummy(adapter, I2C_CBUS_ADDR);
- if (!ctx->client[I2C_CBUS]) {
+ ctx->client[I2C_CBUS] = devm_i2c_new_dummy_device(&client->dev, adapter,
+ I2C_CBUS_ADDR);
+ if (IS_ERR(ctx->client[I2C_CBUS])) {
dev_err(ctx->dev, "failed to create CBUS client\n");
- goto fail_hdmi;
+ return PTR_ERR(ctx->client[I2C_CBUS]);
}
return 0;
-
-fail_hdmi:
- i2c_unregister_device(ctx->client[I2C_HDMI]);
-fail_tpi:
- i2c_unregister_device(ctx->client[I2C_TPI]);
-
- return -ENODEV;
-}
-
-static void sii9234_deinit_resources(struct sii9234 *ctx)
-{
- i2c_unregister_device(ctx->client[I2C_CBUS]);
- i2c_unregister_device(ctx->client[I2C_HDMI]);
- i2c_unregister_device(ctx->client[I2C_TPI]);
}
static inline struct sii9234 *bridge_to_sii9234(struct drm_bridge *bridge)
@@ -950,7 +940,6 @@ static int sii9234_remove(struct i2c_client *client)
sii9234_cable_out(ctx);
drm_bridge_remove(&ctx->bridge);
- sii9234_deinit_resources(ctx);
return 0;
}
diff --git a/drivers/gpu/drm/bridge/sil-sii8620.c b/drivers/gpu/drm/bridge/sil-sii8620.c
index bd3165ee5354..4c0eef406eb1 100644
--- a/drivers/gpu/drm/bridge/sil-sii8620.c
+++ b/drivers/gpu/drm/bridge/sil-sii8620.c
@@ -9,6 +9,7 @@
#include <asm/unaligned.h>
#include <drm/bridge/mhl.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include <drm/drm_encoder.h>
@@ -1759,10 +1760,8 @@ static bool sii8620_rcp_consume(struct sii8620 *ctx, u8 scancode)
scancode &= MHL_RCP_KEY_ID_MASK;
- if (!ctx->rc_dev) {
- dev_dbg(ctx->dev, "RCP input device not initialized\n");
+ if (!IS_ENABLED(CONFIG_RC_CORE) || !ctx->rc_dev)
return false;
- }
if (pressed)
rc_keydown(ctx->rc_dev, RC_PROTO_CEC, scancode, 0);
@@ -2099,6 +2098,9 @@ static void sii8620_init_rcp_input_dev(struct sii8620 *ctx)
struct rc_dev *rc_dev;
int ret;
+ if (!IS_ENABLED(CONFIG_RC_CORE))
+ return;
+
rc_dev = rc_allocate_device(RC_DRIVER_SCANCODE);
if (!rc_dev) {
dev_err(ctx->dev, "Failed to allocate RC device\n");
@@ -2213,6 +2215,9 @@ static void sii8620_detach(struct drm_bridge *bridge)
{
struct sii8620 *ctx = bridge_to_sii8620(bridge);
+ if (!IS_ENABLED(CONFIG_RC_CORE))
+ return;
+
rc_unregister_device(ctx->rc_dev);
}
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.c
index ac1e001d0882..70ab4fbdc23e 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.c
@@ -285,7 +285,7 @@ static int dw_hdmi_cec_probe(struct platform_device *pdev)
ret = cec_register_adapter(cec->adap, pdev->dev.parent);
if (ret < 0) {
- cec_notifier_cec_adap_unregister(cec->notify);
+ cec_notifier_cec_adap_unregister(cec->notify, cec->adap);
return ret;
}
@@ -302,7 +302,7 @@ static int dw_hdmi_cec_remove(struct platform_device *pdev)
{
struct dw_hdmi_cec *cec = platform_get_drvdata(pdev);
- cec_notifier_cec_adap_unregister(cec->notify);
+ cec_notifier_cec_adap_unregister(cec->notify, cec->adap);
cec_unregister_adapter(cec->adap);
return 0;
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c
index 1d15cf9b6821..d7e65c869415 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c
@@ -102,6 +102,7 @@ static int dw_hdmi_i2s_hw_params(struct device *dev, void *data,
}
dw_hdmi_set_sample_rate(hdmi, hparms->sample_rate);
+ dw_hdmi_set_channel_status(hdmi, hparms->iec.status);
dw_hdmi_set_channel_count(hdmi, hparms->channels);
dw_hdmi_set_channel_allocation(hdmi, hparms->cea.channel_allocation);
@@ -109,6 +110,14 @@ static int dw_hdmi_i2s_hw_params(struct device *dev, void *data,
hdmi_write(audio, conf0, HDMI_AUD_CONF0);
hdmi_write(audio, conf1, HDMI_AUD_CONF1);
+ return 0;
+}
+
+static int dw_hdmi_i2s_audio_startup(struct device *dev, void *data)
+{
+ struct dw_hdmi_i2s_audio_data *audio = data;
+ struct dw_hdmi *hdmi = audio->hdmi;
+
dw_hdmi_audio_enable(hdmi);
return 0;
@@ -151,11 +160,23 @@ static int dw_hdmi_i2s_get_dai_id(struct snd_soc_component *component,
return -EINVAL;
}
+static int dw_hdmi_i2s_hook_plugged_cb(struct device *dev, void *data,
+ hdmi_codec_plugged_cb fn,
+ struct device *codec_dev)
+{
+ struct dw_hdmi_i2s_audio_data *audio = data;
+ struct dw_hdmi *hdmi = audio->hdmi;
+
+ return dw_hdmi_set_plugged_cb(hdmi, fn, codec_dev);
+}
+
static struct hdmi_codec_ops dw_hdmi_i2s_ops = {
.hw_params = dw_hdmi_i2s_hw_params,
+ .audio_startup = dw_hdmi_i2s_audio_startup,
.audio_shutdown = dw_hdmi_i2s_audio_shutdown,
.get_eld = dw_hdmi_i2s_get_eld,
.get_dai_id = dw_hdmi_i2s_get_dai_id,
+ .hook_plugged_cb = dw_hdmi_i2s_hook_plugged_cb,
};
static int snd_dw_hdmi_probe(struct platform_device *pdev)
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
index 521d689413c8..67fca439bbfb 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
@@ -25,7 +25,9 @@
#include <uapi/linux/videodev2.h>
#include <drm/bridge/dw_hdmi.h>
+#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_edid.h>
#include <drm/drm_of.h>
#include <drm/drm_print.h>
@@ -36,6 +38,7 @@
#include "dw-hdmi-cec.h"
#include "dw-hdmi.h"
+#define DDC_CI_ADDR 0x37
#define DDC_SEGMENT_ADDR 0x30
#define HDMI_EDID_LEN 512
@@ -191,6 +194,10 @@ struct dw_hdmi {
struct mutex cec_notifier_mutex;
struct cec_notifier *cec_notifier;
+
+ hdmi_codec_plugged_cb plugged_cb;
+ struct device *codec_dev;
+ enum drm_connector_status last_connector_result;
};
#define HDMI_IH_PHY_STAT0_RX_SENSE \
@@ -215,6 +222,28 @@ static inline u8 hdmi_readb(struct dw_hdmi *hdmi, int offset)
return val;
}
+static void handle_plugged_change(struct dw_hdmi *hdmi, bool plugged)
+{
+ if (hdmi->plugged_cb && hdmi->codec_dev)
+ hdmi->plugged_cb(hdmi->codec_dev, plugged);
+}
+
+int dw_hdmi_set_plugged_cb(struct dw_hdmi *hdmi, hdmi_codec_plugged_cb fn,
+ struct device *codec_dev)
+{
+ bool plugged;
+
+ mutex_lock(&hdmi->mutex);
+ hdmi->plugged_cb = fn;
+ hdmi->codec_dev = codec_dev;
+ plugged = hdmi->last_connector_result == connector_status_connected;
+ handle_plugged_change(hdmi, plugged);
+ mutex_unlock(&hdmi->mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dw_hdmi_set_plugged_cb);
+
static void hdmi_modb(struct dw_hdmi *hdmi, u8 data, u8 mask, unsigned reg)
{
regmap_update_bits(hdmi->regm, reg << hdmi->reg_shift, mask, data);
@@ -398,6 +427,15 @@ static int dw_hdmi_i2c_xfer(struct i2c_adapter *adap,
u8 addr = msgs[0].addr;
int i, ret = 0;
+ if (addr == DDC_CI_ADDR)
+ /*
+ * The internal I2C controller does not support the multi-byte
+ * read and write operations needed for DDC/CI.
+ * TOFIX: Blacklist the DDC/CI address until we filter out
+ * unsupported I2C operations.
+ */
+ return -EOPNOTSUPP;
+
dev_dbg(hdmi->dev, "xfer: num: %d, addr: %#x\n", num, addr);
for (i = 0; i < num; i++) {
@@ -580,6 +618,26 @@ static unsigned int hdmi_compute_n(unsigned int freq, unsigned long pixel_clk)
return n;
}
+/*
+ * When transmitting IEC60958 linear PCM audio, these registers allow to
+ * configure the channel status information of all the channel status
+ * bits in the IEC60958 frame. For the moment this configuration is only
+ * used when the I2S audio interface, General Purpose Audio (GPA),
+ * or AHB audio DMA (AHBAUDDMA) interface is active
+ * (for S/PDIF interface this information comes from the stream).
+ */
+void dw_hdmi_set_channel_status(struct dw_hdmi *hdmi,
+ u8 *channel_status)
+{
+ /*
+ * Set channel status register for frequency and word length.
+ * Use default values for other registers.
+ */
+ hdmi_writeb(hdmi, channel_status[3], HDMI_FC_AUDSCHNLS7);
+ hdmi_writeb(hdmi, channel_status[4], HDMI_FC_AUDSCHNLS8);
+}
+EXPORT_SYMBOL_GPL(dw_hdmi_set_channel_status);
+
static void hdmi_set_clk_regenerator(struct dw_hdmi *hdmi,
unsigned long pixel_clk, unsigned int sample_rate)
{
@@ -1712,6 +1770,41 @@ static void hdmi_config_vendor_specific_infoframe(struct dw_hdmi *hdmi,
HDMI_FC_DATAUTO0_VSD_MASK);
}
+static void hdmi_config_drm_infoframe(struct dw_hdmi *hdmi)
+{
+ const struct drm_connector_state *conn_state = hdmi->connector.state;
+ struct hdmi_drm_infoframe frame;
+ u8 buffer[30];
+ ssize_t err;
+ int i;
+
+ if (!hdmi->plat_data->use_drm_infoframe)
+ return;
+
+ hdmi_modb(hdmi, HDMI_FC_PACKET_TX_EN_DRM_DISABLE,
+ HDMI_FC_PACKET_TX_EN_DRM_MASK, HDMI_FC_PACKET_TX_EN);
+
+ err = drm_hdmi_infoframe_set_hdr_metadata(&frame, conn_state);
+ if (err < 0)
+ return;
+
+ err = hdmi_drm_infoframe_pack(&frame, buffer, sizeof(buffer));
+ if (err < 0) {
+ dev_err(hdmi->dev, "Failed to pack drm infoframe: %zd\n", err);
+ return;
+ }
+
+ hdmi_writeb(hdmi, frame.version, HDMI_FC_DRM_HB0);
+ hdmi_writeb(hdmi, frame.length, HDMI_FC_DRM_HB1);
+
+ for (i = 0; i < frame.length; i++)
+ hdmi_writeb(hdmi, buffer[4 + i], HDMI_FC_DRM_PB0 + i);
+
+ hdmi_writeb(hdmi, 1, HDMI_FC_DRM_UP);
+ hdmi_modb(hdmi, HDMI_FC_PACKET_TX_EN_DRM_ENABLE,
+ HDMI_FC_PACKET_TX_EN_DRM_MASK, HDMI_FC_PACKET_TX_EN);
+}
+
static void hdmi_av_composer(struct dw_hdmi *hdmi,
const struct drm_display_mode *mode)
{
@@ -2023,7 +2116,7 @@ static int dw_hdmi_setup(struct dw_hdmi *hdmi, struct drm_display_mode *mode)
/* HDMI Initialization Step E - Configure audio */
hdmi_clk_regenerator_update_pixel_clock(hdmi);
- hdmi_enable_audio_clk(hdmi, true);
+ hdmi_enable_audio_clk(hdmi, hdmi->audio_enable);
}
/* not for DVI mode */
@@ -2033,6 +2126,7 @@ static int dw_hdmi_setup(struct dw_hdmi *hdmi, struct drm_display_mode *mode)
/* HDMI Initialization Step F - Configure AVI InfoFrame */
hdmi_config_AVI(hdmi, mode);
hdmi_config_vendor_specific_infoframe(hdmi, mode);
+ hdmi_config_drm_infoframe(hdmi);
} else {
dev_dbg(hdmi->dev, "%s DVI mode\n", __func__);
}
@@ -2161,6 +2255,7 @@ dw_hdmi_connector_detect(struct drm_connector *connector, bool force)
{
struct dw_hdmi *hdmi = container_of(connector, struct dw_hdmi,
connector);
+ enum drm_connector_status result;
mutex_lock(&hdmi->mutex);
hdmi->force = DRM_FORCE_UNSPECIFIED;
@@ -2168,7 +2263,18 @@ dw_hdmi_connector_detect(struct drm_connector *connector, bool force)
dw_hdmi_update_phy_mask(hdmi);
mutex_unlock(&hdmi->mutex);
- return hdmi->phy.ops->read_hpd(hdmi, hdmi->phy.data);
+ result = hdmi->phy.ops->read_hpd(hdmi, hdmi->phy.data);
+
+ mutex_lock(&hdmi->mutex);
+ if (result != hdmi->last_connector_result) {
+ dev_dbg(hdmi->dev, "read_hpd result: %d", result);
+ handle_plugged_change(hdmi,
+ result == connector_status_connected);
+ hdmi->last_connector_result = result;
+ }
+ mutex_unlock(&hdmi->mutex);
+
+ return result;
}
static int dw_hdmi_connector_get_modes(struct drm_connector *connector)
@@ -2199,6 +2305,45 @@ static int dw_hdmi_connector_get_modes(struct drm_connector *connector)
return ret;
}
+static bool hdr_metadata_equal(const struct drm_connector_state *old_state,
+ const struct drm_connector_state *new_state)
+{
+ struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
+ struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
+
+ if (!old_blob || !new_blob)
+ return old_blob == new_blob;
+
+ if (old_blob->length != new_blob->length)
+ return false;
+
+ return !memcmp(old_blob->data, new_blob->data, old_blob->length);
+}
+
+static int dw_hdmi_connector_atomic_check(struct drm_connector *connector,
+ struct drm_atomic_state *state)
+{
+ struct drm_connector_state *old_state =
+ drm_atomic_get_old_connector_state(state, connector);
+ struct drm_connector_state *new_state =
+ drm_atomic_get_new_connector_state(state, connector);
+ struct drm_crtc *crtc = new_state->crtc;
+ struct drm_crtc_state *crtc_state;
+
+ if (!crtc)
+ return 0;
+
+ if (!hdr_metadata_equal(old_state, new_state)) {
+ crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+
+ crtc_state->mode_changed = true;
+ }
+
+ return 0;
+}
+
static void dw_hdmi_connector_force(struct drm_connector *connector)
{
struct dw_hdmi *hdmi = container_of(connector, struct dw_hdmi,
@@ -2223,6 +2368,7 @@ static const struct drm_connector_funcs dw_hdmi_connector_funcs = {
static const struct drm_connector_helper_funcs dw_hdmi_connector_helper_funcs = {
.get_modes = dw_hdmi_connector_get_modes,
+ .atomic_check = dw_hdmi_connector_atomic_check,
};
static int dw_hdmi_bridge_attach(struct drm_bridge *bridge)
@@ -2243,6 +2389,10 @@ static int dw_hdmi_bridge_attach(struct drm_bridge *bridge)
DRM_MODE_CONNECTOR_HDMIA,
hdmi->ddc);
+ if (hdmi->version >= 0x200a && hdmi->plat_data->use_drm_infoframe)
+ drm_object_attach_property(&connector->base,
+ connector->dev->mode_config.hdr_output_metadata_property, 0);
+
drm_connector_attach_encoder(connector, encoder);
cec_fill_conn_info_from_drm(&conn_info, connector);
@@ -2619,6 +2769,7 @@ __dw_hdmi_probe(struct platform_device *pdev,
hdmi->rxsense = true;
hdmi->phy_mask = (u8)~(HDMI_PHY_HPD | HDMI_PHY_RX_SENSE);
hdmi->mc_clkdis = 0x7f;
+ hdmi->last_connector_result = connector_status_disconnected;
mutex_init(&hdmi->mutex);
mutex_init(&hdmi->audio_mutex);
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.h b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.h
index 6988f12d89d9..1999db05bc3b 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.h
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.h
@@ -158,6 +158,8 @@
#define HDMI_FC_SPDDEVICEINF 0x1062
#define HDMI_FC_AUDSCONF 0x1063
#define HDMI_FC_AUDSSTAT 0x1064
+#define HDMI_FC_AUDSCHNLS7 0x106e
+#define HDMI_FC_AUDSCHNLS8 0x106f
#define HDMI_FC_DATACH0FILL 0x1070
#define HDMI_FC_DATACH1FILL 0x1071
#define HDMI_FC_DATACH2FILL 0x1072
@@ -252,6 +254,7 @@
#define HDMI_FC_POL2 0x10DB
#define HDMI_FC_PRCONF 0x10E0
#define HDMI_FC_SCRAMBLER_CTRL 0x10E1
+#define HDMI_FC_PACKET_TX_EN 0x10E3
#define HDMI_FC_GMD_STAT 0x1100
#define HDMI_FC_GMD_EN 0x1101
@@ -287,6 +290,37 @@
#define HDMI_FC_GMD_PB26 0x111F
#define HDMI_FC_GMD_PB27 0x1120
+#define HDMI_FC_DRM_UP 0x1167
+#define HDMI_FC_DRM_HB0 0x1168
+#define HDMI_FC_DRM_HB1 0x1169
+#define HDMI_FC_DRM_PB0 0x116A
+#define HDMI_FC_DRM_PB1 0x116B
+#define HDMI_FC_DRM_PB2 0x116C
+#define HDMI_FC_DRM_PB3 0x116D
+#define HDMI_FC_DRM_PB4 0x116E
+#define HDMI_FC_DRM_PB5 0x116F
+#define HDMI_FC_DRM_PB6 0x1170
+#define HDMI_FC_DRM_PB7 0x1171
+#define HDMI_FC_DRM_PB8 0x1172
+#define HDMI_FC_DRM_PB9 0x1173
+#define HDMI_FC_DRM_PB10 0x1174
+#define HDMI_FC_DRM_PB11 0x1175
+#define HDMI_FC_DRM_PB12 0x1176
+#define HDMI_FC_DRM_PB13 0x1177
+#define HDMI_FC_DRM_PB14 0x1178
+#define HDMI_FC_DRM_PB15 0x1179
+#define HDMI_FC_DRM_PB16 0x117A
+#define HDMI_FC_DRM_PB17 0x117B
+#define HDMI_FC_DRM_PB18 0x117C
+#define HDMI_FC_DRM_PB19 0x117D
+#define HDMI_FC_DRM_PB20 0x117E
+#define HDMI_FC_DRM_PB21 0x117F
+#define HDMI_FC_DRM_PB22 0x1180
+#define HDMI_FC_DRM_PB23 0x1181
+#define HDMI_FC_DRM_PB24 0x1182
+#define HDMI_FC_DRM_PB25 0x1183
+#define HDMI_FC_DRM_PB26 0x1184
+
#define HDMI_FC_DBGFORCE 0x1200
#define HDMI_FC_DBGAUD0CH0 0x1201
#define HDMI_FC_DBGAUD1CH0 0x1202
@@ -742,6 +776,11 @@ enum {
HDMI_FC_PRCONF_OUTPUT_PR_FACTOR_MASK = 0x0F,
HDMI_FC_PRCONF_OUTPUT_PR_FACTOR_OFFSET = 0,
+/* FC_PACKET_TX_EN field values */
+ HDMI_FC_PACKET_TX_EN_DRM_MASK = 0x80,
+ HDMI_FC_PACKET_TX_EN_DRM_ENABLE = 0x80,
+ HDMI_FC_PACKET_TX_EN_DRM_DISABLE = 0x00,
+
/* FC_AVICONF0-FC_AVICONF3 field values */
HDMI_FC_AVICONF0_PIX_FMT_MASK = 0x03,
HDMI_FC_AVICONF0_PIX_FMT_RGB = 0x00,
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
index 675442bfc1bd..b6e793bb653c 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
@@ -316,7 +316,8 @@ static int dw_mipi_dsi_host_attach(struct mipi_dsi_host *host,
return ret;
if (panel) {
- bridge = drm_panel_bridge_add(panel, DRM_MODE_CONNECTOR_DSI);
+ bridge = drm_panel_bridge_add_typed(panel,
+ DRM_MODE_CONNECTOR_DSI);
if (IS_ERR(bridge))
return PTR_ERR(bridge);
}
@@ -981,7 +982,6 @@ __dw_mipi_dsi_probe(struct platform_device *pdev,
struct device *dev = &pdev->dev;
struct reset_control *apb_rst;
struct dw_mipi_dsi *dsi;
- struct resource *res;
int ret;
dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL);
@@ -997,11 +997,7 @@ __dw_mipi_dsi_probe(struct platform_device *pdev,
}
if (!plat_data->base) {
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return ERR_PTR(-ENODEV);
-
- dsi->base = devm_ioremap_resource(dev, res);
+ dsi->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dsi->base))
return ERR_PTR(-ENODEV);
diff --git a/drivers/gpu/drm/bridge/tc358764.c b/drivers/gpu/drm/bridge/tc358764.c
index 170f162ffa55..db298f550a5a 100644
--- a/drivers/gpu/drm/bridge/tc358764.c
+++ b/drivers/gpu/drm/bridge/tc358764.c
@@ -16,6 +16,7 @@
#include <video/mipi_display.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_mipi_dsi.h>
diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
index 8a8d605021f0..8029478ffebb 100644
--- a/drivers/gpu/drm/bridge/tc358767.c
+++ b/drivers/gpu/drm/bridge/tc358767.c
@@ -26,6 +26,7 @@
#include <linux/slab.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_dp_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_of.h>
@@ -228,7 +229,9 @@ static bool tc_test_pattern;
module_param_named(test, tc_test_pattern, bool, 0644);
struct tc_edp_link {
- struct drm_dp_link base;
+ u8 dpcd[DP_RECEIVER_CAP_SIZE];
+ unsigned int rate;
+ u8 num_lanes;
u8 assr;
bool scrambler_dis;
bool spread;
@@ -437,9 +440,9 @@ static u32 tc_srcctrl(struct tc_data *tc)
reg |= DP0_SRCCTRL_SCRMBLDIS; /* Scrambler Disabled */
if (tc->link.spread)
reg |= DP0_SRCCTRL_SSCG; /* Spread Spectrum Enable */
- if (tc->link.base.num_lanes == 2)
+ if (tc->link.num_lanes == 2)
reg |= DP0_SRCCTRL_LANES_2; /* Two Main Channel Lanes */
- if (tc->link.base.rate != 162000)
+ if (tc->link.rate != 162000)
reg |= DP0_SRCCTRL_BW27; /* 2.7 Gbps link */
return reg;
}
@@ -662,23 +665,35 @@ err:
static int tc_get_display_props(struct tc_data *tc)
{
+ u8 revision, num_lanes;
+ unsigned int rate;
int ret;
u8 reg;
/* Read DP Rx Link Capability */
- ret = drm_dp_link_probe(&tc->aux, &tc->link.base);
+ ret = drm_dp_dpcd_read(&tc->aux, DP_DPCD_REV, tc->link.dpcd,
+ DP_RECEIVER_CAP_SIZE);
if (ret < 0)
goto err_dpcd_read;
- if (tc->link.base.rate != 162000 && tc->link.base.rate != 270000) {
+
+ revision = tc->link.dpcd[DP_DPCD_REV];
+ rate = drm_dp_max_link_rate(tc->link.dpcd);
+ num_lanes = drm_dp_max_lane_count(tc->link.dpcd);
+
+ if (rate != 162000 && rate != 270000) {
dev_dbg(tc->dev, "Falling to 2.7 Gbps rate\n");
- tc->link.base.rate = 270000;
+ rate = 270000;
}
- if (tc->link.base.num_lanes > 2) {
+ tc->link.rate = rate;
+
+ if (num_lanes > 2) {
dev_dbg(tc->dev, "Falling to 2 lanes\n");
- tc->link.base.num_lanes = 2;
+ num_lanes = 2;
}
+ tc->link.num_lanes = num_lanes;
+
ret = drm_dp_dpcd_readb(&tc->aux, DP_MAX_DOWNSPREAD, &reg);
if (ret < 0)
goto err_dpcd_read;
@@ -696,11 +711,11 @@ static int tc_get_display_props(struct tc_data *tc)
tc->link.assr = reg & DP_ALTERNATE_SCRAMBLER_RESET_ENABLE;
dev_dbg(tc->dev, "DPCD rev: %d.%d, rate: %s, lanes: %d, framing: %s\n",
- tc->link.base.revision >> 4, tc->link.base.revision & 0x0f,
- (tc->link.base.rate == 162000) ? "1.62Gbps" : "2.7Gbps",
- tc->link.base.num_lanes,
- (tc->link.base.capabilities & DP_LINK_CAP_ENHANCED_FRAMING) ?
- "enhanced" : "non-enhanced");
+ revision >> 4, revision & 0x0f,
+ (tc->link.rate == 162000) ? "1.62Gbps" : "2.7Gbps",
+ tc->link.num_lanes,
+ drm_dp_enhanced_frame_cap(tc->link.dpcd) ?
+ "enhanced" : "default");
dev_dbg(tc->dev, "Downspread: %s, scrambler: %s\n",
tc->link.spread ? "0.5%" : "0.0%",
tc->link.scrambler_dis ? "disabled" : "enabled");
@@ -739,7 +754,7 @@ static int tc_set_video_mode(struct tc_data *tc,
*/
in_bw = mode->clock * bits_per_pixel / 8;
- out_bw = tc->link.base.num_lanes * tc->link.base.rate;
+ out_bw = tc->link.num_lanes * tc->link.rate;
max_tu_symbol = DIV_ROUND_UP(in_bw * TU_SIZE_RECOMMENDED, out_bw);
dev_dbg(tc->dev, "set mode %dx%d\n",
@@ -901,7 +916,7 @@ static int tc_main_link_enable(struct tc_data *tc)
/* SSCG and BW27 on DP1 must be set to the same as on DP0 */
ret = regmap_write(tc->regmap, DP1_SRCCTRL,
(tc->link.spread ? DP0_SRCCTRL_SSCG : 0) |
- ((tc->link.base.rate != 162000) ? DP0_SRCCTRL_BW27 : 0));
+ ((tc->link.rate != 162000) ? DP0_SRCCTRL_BW27 : 0));
if (ret)
return ret;
@@ -911,7 +926,7 @@ static int tc_main_link_enable(struct tc_data *tc)
/* Setup Main Link */
dp_phy_ctrl = BGREN | PWR_SW_EN | PHY_A0_EN | PHY_M0_EN;
- if (tc->link.base.num_lanes == 2)
+ if (tc->link.num_lanes == 2)
dp_phy_ctrl |= PHY_2LANE;
ret = regmap_write(tc->regmap, DP_PHY_CTRL, dp_phy_ctrl);
@@ -974,7 +989,13 @@ static int tc_main_link_enable(struct tc_data *tc)
}
/* Setup Link & DPRx Config for Training */
- ret = drm_dp_link_configure(aux, &tc->link.base);
+ tmp[0] = drm_dp_link_rate_to_bw_code(tc->link.rate);
+ tmp[1] = tc->link.num_lanes;
+
+ if (drm_dp_enhanced_frame_cap(tc->link.dpcd))
+ tmp[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
+
+ ret = drm_dp_dpcd_write(aux, DP_LINK_BW_SET, tmp, 2);
if (ret < 0)
goto err_dpcd_write;
@@ -1018,9 +1039,8 @@ static int tc_main_link_enable(struct tc_data *tc)
/* Enable DP0 to start Link Training */
ret = regmap_write(tc->regmap, DP0CTL,
- ((tc->link.base.capabilities &
- DP_LINK_CAP_ENHANCED_FRAMING) ? EF_EN : 0) |
- DP_EN);
+ (drm_dp_enhanced_frame_cap(tc->link.dpcd) ?
+ EF_EN : 0) | DP_EN);
if (ret)
return ret;
@@ -1099,7 +1119,7 @@ static int tc_main_link_enable(struct tc_data *tc)
ret = -ENODEV;
}
- if (tc->link.base.num_lanes == 2) {
+ if (tc->link.num_lanes == 2) {
value = (tmp[0] >> 4) & DP_CHANNEL_EQ_BITS;
if (value != DP_CHANNEL_EQ_BITS) {
@@ -1170,7 +1190,7 @@ static int tc_stream_enable(struct tc_data *tc)
return ret;
value = VID_MN_GEN | DP_EN;
- if (tc->link.base.capabilities & DP_LINK_CAP_ENHANCED_FRAMING)
+ if (drm_dp_enhanced_frame_cap(tc->link.dpcd))
value |= EF_EN;
ret = regmap_write(tc->regmap, DP0CTL, value);
if (ret)
@@ -1296,7 +1316,7 @@ static enum drm_mode_status tc_mode_valid(struct drm_bridge *bridge,
return MODE_CLOCK_HIGH;
req = mode->clock * bits_per_pixel / 8;
- avail = tc->link.base.num_lanes * tc->link.base.rate;
+ avail = tc->link.num_lanes * tc->link.rate;
if (req > avail)
return MODE_BAD;
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
index 0a580957c8cf..43abf01ebd4c 100644
--- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
@@ -17,6 +17,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_dp_helper.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_of.h>
diff --git a/drivers/gpu/drm/bridge/ti-tfp410.c b/drivers/gpu/drm/bridge/ti-tfp410.c
index 61cc2354ef1b..6f6d6d1e60ae 100644
--- a/drivers/gpu/drm/bridge/ti-tfp410.c
+++ b/drivers/gpu/drm/bridge/ti-tfp410.c
@@ -14,6 +14,7 @@
#include <linux/platform_device.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
@@ -284,8 +285,8 @@ static int tfp410_get_connector_properties(struct tfp410 *dvi)
else
dvi->connector_type = DRM_MODE_CONNECTOR_DVID;
- dvi->hpd = fwnode_get_named_gpiod(&connector_node->fwnode,
- "hpd-gpios", 0, GPIOD_IN, "hpd");
+ dvi->hpd = fwnode_gpiod_get_index(&connector_node->fwnode,
+ "hpd", 0, GPIOD_IN, "hpd");
if (IS_ERR(dvi->hpd)) {
ret = PTR_ERR(dvi->hpd);
dvi->hpd = NULL;
diff --git a/drivers/gpu/drm/cirrus/cirrus.c b/drivers/gpu/drm/cirrus/cirrus.c
index 36a69aec8a4b..248c9f765c45 100644
--- a/drivers/gpu/drm/cirrus/cirrus.c
+++ b/drivers/gpu/drm/cirrus/cirrus.c
@@ -390,7 +390,7 @@ static int cirrus_conn_init(struct cirrus_device *cirrus)
/* ------------------------------------------------------------------ */
/* cirrus (simple) display pipe */
-static enum drm_mode_status cirrus_pipe_mode_valid(struct drm_crtc *crtc,
+static enum drm_mode_status cirrus_pipe_mode_valid(struct drm_simple_display_pipe *pipe,
const struct drm_display_mode *mode)
{
if (cirrus_check_size(mode->hdisplay, mode->vdisplay, NULL) < 0)
@@ -510,7 +510,7 @@ static void cirrus_mode_config_init(struct cirrus_device *cirrus)
/* ------------------------------------------------------------------ */
-DEFINE_DRM_GEM_SHMEM_FOPS(cirrus_fops);
+DEFINE_DRM_GEM_FOPS(cirrus_fops);
static struct drm_driver cirrus_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
@@ -532,7 +532,7 @@ static int cirrus_pci_probe(struct pci_dev *pdev,
struct cirrus_device *cirrus;
int ret;
- ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, 0, "cirrusdrmfb");
+ ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, "cirrusdrmfb");
if (ret)
return ret;
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.h b/drivers/gpu/drm/cirrus/cirrus_drv.h
deleted file mode 100644
index 1f73916e528e..000000000000
--- a/drivers/gpu/drm/cirrus/cirrus_drv.h
+++ /dev/null
@@ -1,247 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright 2012 Red Hat
- *
- * Authors: Matthew Garrett
- * Dave Airlie
- */
-#ifndef __CIRRUS_DRV_H__
-#define __CIRRUS_DRV_H__
-
-#include <video/vga.h>
-
-#include <drm/drm_encoder.h>
-#include <drm/drm_fb_helper.h>
-
-#include <drm/ttm/ttm_bo_api.h>
-#include <drm/ttm/ttm_bo_driver.h>
-#include <drm/ttm/ttm_placement.h>
-#include <drm/ttm/ttm_memory.h>
-#include <drm/ttm/ttm_module.h>
-
-#include <drm/drm_gem.h>
-
-#define DRIVER_AUTHOR "Matthew Garrett"
-
-#define DRIVER_NAME "cirrus"
-#define DRIVER_DESC "qemu Cirrus emulation"
-#define DRIVER_DATE "20110418"
-
-#define DRIVER_MAJOR 1
-#define DRIVER_MINOR 0
-#define DRIVER_PATCHLEVEL 0
-
-#define CIRRUSFB_CONN_LIMIT 1
-
-#define RREG8(reg) ioread8(((void __iomem *)cdev->rmmio) + (reg))
-#define WREG8(reg, v) iowrite8(v, ((void __iomem *)cdev->rmmio) + (reg))
-#define RREG32(reg) ioread32(((void __iomem *)cdev->rmmio) + (reg))
-#define WREG32(reg, v) iowrite32(v, ((void __iomem *)cdev->rmmio) + (reg))
-
-#define SEQ_INDEX 4
-#define SEQ_DATA 5
-
-#define WREG_SEQ(reg, v) \
- do { \
- WREG8(SEQ_INDEX, reg); \
- WREG8(SEQ_DATA, v); \
- } while (0) \
-
-#define CRT_INDEX 0x14
-#define CRT_DATA 0x15
-
-#define WREG_CRT(reg, v) \
- do { \
- WREG8(CRT_INDEX, reg); \
- WREG8(CRT_DATA, v); \
- } while (0) \
-
-#define GFX_INDEX 0xe
-#define GFX_DATA 0xf
-
-#define WREG_GFX(reg, v) \
- do { \
- WREG8(GFX_INDEX, reg); \
- WREG8(GFX_DATA, v); \
- } while (0) \
-
-/*
- * Cirrus has a "hidden" DAC register that can be accessed by writing to
- * the pixel mask register to reset the state, then reading from the register
- * four times. The next write will then pass to the DAC
- */
-#define VGA_DAC_MASK 0x6
-
-#define WREG_HDR(v) \
- do { \
- RREG8(VGA_DAC_MASK); \
- RREG8(VGA_DAC_MASK); \
- RREG8(VGA_DAC_MASK); \
- RREG8(VGA_DAC_MASK); \
- WREG8(VGA_DAC_MASK, v); \
- } while (0) \
-
-
-#define CIRRUS_MAX_FB_HEIGHT 4096
-#define CIRRUS_MAX_FB_WIDTH 4096
-
-#define CIRRUS_DPMS_CLEARED (-1)
-
-#define to_cirrus_crtc(x) container_of(x, struct cirrus_crtc, base)
-#define to_cirrus_encoder(x) container_of(x, struct cirrus_encoder, base)
-
-struct cirrus_crtc {
- struct drm_crtc base;
- int last_dpms;
- bool enabled;
-};
-
-struct cirrus_fbdev;
-struct cirrus_mode_info {
- struct cirrus_crtc *crtc;
- /* pointer to fbdev info structure */
- struct cirrus_fbdev *gfbdev;
-};
-
-struct cirrus_encoder {
- struct drm_encoder base;
- int last_dpms;
-};
-
-struct cirrus_connector {
- struct drm_connector base;
-};
-
-struct cirrus_mc {
- resource_size_t vram_size;
- resource_size_t vram_base;
-};
-
-struct cirrus_device {
- struct drm_device *dev;
- unsigned long flags;
-
- resource_size_t rmmio_base;
- resource_size_t rmmio_size;
- void __iomem *rmmio;
-
- struct cirrus_mc mc;
- struct cirrus_mode_info mode_info;
-
- int num_crtc;
- int fb_mtrr;
-
- struct {
- struct ttm_bo_device bdev;
- } ttm;
- bool mm_inited;
-};
-
-
-struct cirrus_fbdev {
- struct drm_fb_helper helper; /* must be first */
- struct drm_framebuffer *gfb;
- void *sysram;
- int size;
- int x1, y1, x2, y2; /* dirty rect */
- spinlock_t dirty_lock;
-};
-
-struct cirrus_bo {
- struct ttm_buffer_object bo;
- struct ttm_placement placement;
- struct ttm_bo_kmap_obj kmap;
- struct drm_gem_object gem;
- struct ttm_place placements[3];
- int pin_count;
-};
-#define gem_to_cirrus_bo(gobj) container_of((gobj), struct cirrus_bo, gem)
-
-static inline struct cirrus_bo *
-cirrus_bo(struct ttm_buffer_object *bo)
-{
- return container_of(bo, struct cirrus_bo, bo);
-}
-
-
-#define to_cirrus_obj(x) container_of(x, struct cirrus_gem_object, base)
-
- /* cirrus_main.c */
-int cirrus_device_init(struct cirrus_device *cdev,
- struct drm_device *ddev,
- struct pci_dev *pdev,
- uint32_t flags);
-void cirrus_device_fini(struct cirrus_device *cdev);
-void cirrus_gem_free_object(struct drm_gem_object *obj);
-int cirrus_dumb_mmap_offset(struct drm_file *file,
- struct drm_device *dev,
- uint32_t handle,
- uint64_t *offset);
-int cirrus_gem_create(struct drm_device *dev,
- u32 size, bool iskernel,
- struct drm_gem_object **obj);
-int cirrus_dumb_create(struct drm_file *file,
- struct drm_device *dev,
- struct drm_mode_create_dumb *args);
-
-int cirrus_framebuffer_init(struct drm_device *dev,
- struct drm_framebuffer *gfb,
- const struct drm_mode_fb_cmd2 *mode_cmd,
- struct drm_gem_object *obj);
-
-bool cirrus_check_framebuffer(struct cirrus_device *cdev, int width, int height,
- int bpp, int pitch);
-
- /* cirrus_display.c */
-int cirrus_modeset_init(struct cirrus_device *cdev);
-void cirrus_modeset_fini(struct cirrus_device *cdev);
-
- /* cirrus_fbdev.c */
-int cirrus_fbdev_init(struct cirrus_device *cdev);
-void cirrus_fbdev_fini(struct cirrus_device *cdev);
-
-
-
- /* cirrus_irq.c */
-void cirrus_driver_irq_preinstall(struct drm_device *dev);
-int cirrus_driver_irq_postinstall(struct drm_device *dev);
-void cirrus_driver_irq_uninstall(struct drm_device *dev);
-irqreturn_t cirrus_driver_irq_handler(int irq, void *arg);
-
- /* cirrus_kms.c */
-int cirrus_driver_load(struct drm_device *dev, unsigned long flags);
-void cirrus_driver_unload(struct drm_device *dev);
-extern struct drm_ioctl_desc cirrus_ioctls[];
-extern int cirrus_max_ioctl;
-
-int cirrus_mm_init(struct cirrus_device *cirrus);
-void cirrus_mm_fini(struct cirrus_device *cirrus);
-void cirrus_ttm_placement(struct cirrus_bo *bo, int domain);
-int cirrus_bo_create(struct drm_device *dev, int size, int align,
- uint32_t flags, struct cirrus_bo **pcirrusbo);
-int cirrus_mmap(struct file *filp, struct vm_area_struct *vma);
-
-static inline int cirrus_bo_reserve(struct cirrus_bo *bo, bool no_wait)
-{
- int ret;
-
- ret = ttm_bo_reserve(&bo->bo, true, no_wait, NULL);
- if (ret) {
- if (ret != -ERESTARTSYS && ret != -EBUSY)
- DRM_ERROR("reserve failed %p\n", bo);
- return ret;
- }
- return 0;
-}
-
-static inline void cirrus_bo_unreserve(struct cirrus_bo *bo)
-{
- ttm_bo_unreserve(&bo->bo);
-}
-
-int cirrus_bo_push_sysram(struct cirrus_bo *bo);
-int cirrus_bo_pin(struct cirrus_bo *bo, u32 pl_flag, u64 *gpu_addr);
-
-extern int cirrus_bpp;
-
-#endif /* __CIRRUS_DRV_H__ */
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 2dd2cd87cdbb..b191d39c071d 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -31,6 +31,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_atomic_uapi.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_device.h>
#include <drm/drm_plane_helper.h>
@@ -97,17 +98,6 @@ drm_atomic_helper_plane_changed(struct drm_atomic_state *state,
}
}
-/*
- * For connectors that support multiple encoders, either the
- * .atomic_best_encoder() or .best_encoder() operation must be implemented.
- */
-static struct drm_encoder *
-pick_single_encoder_for_connector(struct drm_connector *connector)
-{
- WARN_ON(connector->encoder_ids[1]);
- return drm_encoder_find(connector->dev, NULL, connector->encoder_ids[0]);
-}
-
static int handle_conflicting_encoders(struct drm_atomic_state *state,
bool disable_conflicting_encoders)
{
@@ -135,7 +125,7 @@ static int handle_conflicting_encoders(struct drm_atomic_state *state,
else if (funcs->best_encoder)
new_encoder = funcs->best_encoder(connector);
else
- new_encoder = pick_single_encoder_for_connector(connector);
+ new_encoder = drm_connector_get_single_encoder(connector);
if (new_encoder) {
if (encoder_mask & drm_encoder_mask(new_encoder)) {
@@ -359,7 +349,7 @@ update_connector_routing(struct drm_atomic_state *state,
else if (funcs->best_encoder)
new_encoder = funcs->best_encoder(connector);
else
- new_encoder = pick_single_encoder_for_connector(connector);
+ new_encoder = drm_connector_get_single_encoder(connector);
if (!new_encoder) {
DRM_DEBUG_ATOMIC("No suitable encoder found for [CONNECTOR:%d:%s]\n",
@@ -482,7 +472,7 @@ mode_fixup(struct drm_atomic_state *state)
continue;
funcs = crtc->helper_private;
- if (!funcs->mode_fixup)
+ if (!funcs || !funcs->mode_fixup)
continue;
ret = funcs->mode_fixup(crtc, &new_crtc_state->mode,
diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c
index 7a26bfb5329c..0d466d3b0809 100644
--- a/drivers/gpu/drm/drm_atomic_uapi.c
+++ b/drivers/gpu/drm/drm_atomic_uapi.c
@@ -1405,7 +1405,7 @@ retry:
} else if (arg->flags & DRM_MODE_ATOMIC_NONBLOCK) {
ret = drm_atomic_nonblocking_commit(state);
} else {
- if (unlikely(drm_debug & DRM_UT_STATE))
+ if (drm_debug_enabled(DRM_UT_STATE))
drm_atomic_print_state(state);
ret = drm_atomic_commit(state);
diff --git a/drivers/gpu/drm/drm_blend.c b/drivers/gpu/drm/drm_blend.c
index 37ac168fcb60..121481f6aa71 100644
--- a/drivers/gpu/drm/drm_blend.c
+++ b/drivers/gpu/drm/drm_blend.c
@@ -130,7 +130,12 @@
* Z position is set up with drm_plane_create_zpos_immutable_property() and
* drm_plane_create_zpos_property(). It controls the visibility of overlapping
* planes. Without this property the primary plane is always below the cursor
- * plane, and ordering between all other planes is undefined.
+ * plane, and ordering between all other planes is undefined. The positive
+ * Z axis points towards the user, i.e. planes with lower Z position values
+ * are underneath planes with higher Z position values. Two planes with the
+ * same Z position value have undefined ordering. Note that the Z position
+ * value can also be immutable, to inform userspace about the hard-coded
+ * stacking of planes, see drm_plane_create_zpos_immutable_property().
*
* pixel blend mode:
* Pixel blend mode is set up with drm_plane_create_blend_mode_property().
diff --git a/drivers/gpu/drm/drm_cache.c b/drivers/gpu/drm/drm_cache.c
index 3bd76e918b5d..03e01b000f7a 100644
--- a/drivers/gpu/drm/drm_cache.c
+++ b/drivers/gpu/drm/drm_cache.c
@@ -62,10 +62,10 @@ static void drm_cache_flush_clflush(struct page *pages[],
{
unsigned long i;
- mb();
+ mb(); /*Full memory barrier used before so that CLFLUSH is ordered*/
for (i = 0; i < num_pages; i++)
drm_clflush_page(*pages++);
- mb();
+ mb(); /*Also used after CLFLUSH so that all cache is flushed*/
}
#endif
@@ -92,6 +92,7 @@ drm_clflush_pages(struct page *pages[], unsigned long num_pages)
#elif defined(__powerpc__)
unsigned long i;
+
for (i = 0; i < num_pages; i++) {
struct page *page = pages[i];
void *page_virtual;
@@ -125,10 +126,10 @@ drm_clflush_sg(struct sg_table *st)
if (static_cpu_has(X86_FEATURE_CLFLUSH)) {
struct sg_page_iter sg_iter;
- mb();
+ mb(); /*CLFLUSH is ordered only by using memory barriers*/
for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
drm_clflush_page(sg_page_iter_page(&sg_iter));
- mb();
+ mb(); /*Make sure that all cache line entry is flushed*/
return;
}
@@ -157,12 +158,13 @@ drm_clflush_virt_range(void *addr, unsigned long length)
if (static_cpu_has(X86_FEATURE_CLFLUSH)) {
const int size = boot_cpu_data.x86_clflush_size;
void *end = addr + length;
+
addr = (void *)(((unsigned long)addr) & -size);
- mb();
+ mb(); /*CLFLUSH is only ordered with a full memory barrier*/
for (; addr < end; addr += size)
clflushopt(addr);
clflushopt(end - 1); /* force serialisation */
- mb();
+ mb(); /*Ensure that evry data cache line entry is flushed*/
return;
}
diff --git a/drivers/gpu/drm/drm_client_modeset.c b/drivers/gpu/drm/drm_client_modeset.c
index c8922b7cac09..895b73f23079 100644
--- a/drivers/gpu/drm/drm_client_modeset.c
+++ b/drivers/gpu/drm/drm_client_modeset.c
@@ -415,9 +415,8 @@ static bool connector_has_possible_crtc(struct drm_connector *connector,
struct drm_crtc *crtc)
{
struct drm_encoder *encoder;
- int i;
- drm_connector_for_each_possible_encoder(connector, encoder, i) {
+ drm_connector_for_each_possible_encoder(connector, encoder) {
if (encoder->possible_crtcs & drm_crtc_mask(crtc))
return true;
}
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index 4a8b2e5c2af6..2166000ed057 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -365,8 +365,6 @@ EXPORT_SYMBOL(drm_connector_attach_edid_property);
int drm_connector_attach_encoder(struct drm_connector *connector,
struct drm_encoder *encoder)
{
- int i;
-
/*
* In the past, drivers have attempted to model the static association
* of connector to encoder in simple connector/encoder devices using a
@@ -381,18 +379,15 @@ int drm_connector_attach_encoder(struct drm_connector *connector,
if (WARN_ON(connector->encoder))
return -EINVAL;
- for (i = 0; i < ARRAY_SIZE(connector->encoder_ids); i++) {
- if (connector->encoder_ids[i] == 0) {
- connector->encoder_ids[i] = encoder->base.id;
- return 0;
- }
- }
- return -ENOMEM;
+ connector->possible_encoders |= drm_encoder_mask(encoder);
+
+ return 0;
}
EXPORT_SYMBOL(drm_connector_attach_encoder);
/**
- * drm_connector_has_possible_encoder - check if the connector and encoder are assosicated with each other
+ * drm_connector_has_possible_encoder - check if the connector and encoder are
+ * associated with each other
* @connector: the connector
* @encoder: the encoder
*
@@ -402,15 +397,7 @@ EXPORT_SYMBOL(drm_connector_attach_encoder);
bool drm_connector_has_possible_encoder(struct drm_connector *connector,
struct drm_encoder *encoder)
{
- struct drm_encoder *enc;
- int i;
-
- drm_connector_for_each_possible_encoder(connector, enc, i) {
- if (enc == encoder)
- return true;
- }
-
- return false;
+ return connector->possible_encoders & drm_encoder_mask(encoder);
}
EXPORT_SYMBOL(drm_connector_has_possible_encoder);
@@ -480,7 +467,10 @@ EXPORT_SYMBOL(drm_connector_cleanup);
* drm_connector_register - register a connector
* @connector: the connector to register
*
- * Register userspace interfaces for a connector
+ * Register userspace interfaces for a connector. Only call this for connectors
+ * which can be hotplugged after drm_dev_register() has been called already,
+ * e.g. DP MST connectors. All other connectors will be registered automatically
+ * when calling drm_dev_register().
*
* Returns:
* Zero on success, error code on failure.
@@ -526,7 +516,10 @@ EXPORT_SYMBOL(drm_connector_register);
* drm_connector_unregister - unregister a connector
* @connector: the connector to unregister
*
- * Unregister userspace interfaces for a connector
+ * Unregister userspace interfaces for a connector. Only call this for
+ * connectors which have registered explicitly by calling drm_dev_register(),
+ * since connectors are unregistered automatically when drm_dev_unregister() is
+ * called.
*/
void drm_connector_unregister(struct drm_connector *connector)
{
@@ -882,6 +875,38 @@ static const struct drm_prop_enum_list hdmi_colorspaces[] = {
{ DRM_MODE_COLORIMETRY_DCI_P3_RGB_THEATER, "DCI-P3_RGB_Theater" },
};
+/*
+ * As per DP 1.4a spec, 2.2.5.7.5 VSC SDP Payload for Pixel Encoding/Colorimetry
+ * Format Table 2-120
+ */
+static const struct drm_prop_enum_list dp_colorspaces[] = {
+ /* For Default case, driver will set the colorspace */
+ { DRM_MODE_COLORIMETRY_DEFAULT, "Default" },
+ { DRM_MODE_COLORIMETRY_RGB_WIDE_FIXED, "RGB_Wide_Gamut_Fixed_Point" },
+ /* Colorimetry based on scRGB (IEC 61966-2-2) */
+ { DRM_MODE_COLORIMETRY_RGB_WIDE_FLOAT, "RGB_Wide_Gamut_Floating_Point" },
+ /* Colorimetry based on IEC 61966-2-5 */
+ { DRM_MODE_COLORIMETRY_OPRGB, "opRGB" },
+ /* Colorimetry based on SMPTE RP 431-2 */
+ { DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65, "DCI-P3_RGB_D65" },
+ /* Colorimetry based on ITU-R BT.2020 */
+ { DRM_MODE_COLORIMETRY_BT2020_RGB, "BT2020_RGB" },
+ { DRM_MODE_COLORIMETRY_BT601_YCC, "BT601_YCC" },
+ { DRM_MODE_COLORIMETRY_BT709_YCC, "BT709_YCC" },
+ /* Standard Definition Colorimetry based on IEC 61966-2-4 */
+ { DRM_MODE_COLORIMETRY_XVYCC_601, "XVYCC_601" },
+ /* High Definition Colorimetry based on IEC 61966-2-4 */
+ { DRM_MODE_COLORIMETRY_XVYCC_709, "XVYCC_709" },
+ /* Colorimetry based on IEC 61966-2-1/Amendment 1 */
+ { DRM_MODE_COLORIMETRY_SYCC_601, "SYCC_601" },
+ /* Colorimetry based on IEC 61966-2-5 [33] */
+ { DRM_MODE_COLORIMETRY_OPYCC_601, "opYCC_601" },
+ /* Colorimetry based on ITU-R BT.2020 */
+ { DRM_MODE_COLORIMETRY_BT2020_CYCC, "BT2020_CYCC" },
+ /* Colorimetry based on ITU-R BT.2020 */
+ { DRM_MODE_COLORIMETRY_BT2020_YCC, "BT2020_YCC" },
+};
+
/**
* DOC: standard connector properties
*
@@ -1674,7 +1699,6 @@ EXPORT_SYMBOL(drm_mode_create_aspect_ratio_property);
* DOC: standard connector properties
*
* Colorspace:
- * drm_mode_create_colorspace_property - create colorspace property
* This property helps select a suitable colorspace based on the sink
* capability. Modern sink devices support wider gamut like BT2020.
* This helps switch to BT2020 mode if the BT2020 encoded video stream
@@ -1694,32 +1718,68 @@ EXPORT_SYMBOL(drm_mode_create_aspect_ratio_property);
* - This property is just to inform sink what colorspace
* source is trying to drive.
*
+ * Because between HDMI and DP have different colorspaces,
+ * drm_mode_create_hdmi_colorspace_property() is used for HDMI connector and
+ * drm_mode_create_dp_colorspace_property() is used for DP connector.
+ */
+
+/**
+ * drm_mode_create_hdmi_colorspace_property - create hdmi colorspace property
+ * @connector: connector to create the Colorspace property on.
+ *
* Called by a driver the first time it's needed, must be attached to desired
- * connectors.
+ * HDMI connectors.
+ *
+ * Returns:
+ * Zero on success, negative errono on failure.
*/
-int drm_mode_create_colorspace_property(struct drm_connector *connector)
+int drm_mode_create_hdmi_colorspace_property(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
- struct drm_property *prop;
- if (connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
- connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) {
- prop = drm_property_create_enum(dev, DRM_MODE_PROP_ENUM,
- "Colorspace",
- hdmi_colorspaces,
- ARRAY_SIZE(hdmi_colorspaces));
- if (!prop)
- return -ENOMEM;
- } else {
- DRM_DEBUG_KMS("Colorspace property not supported\n");
+ if (connector->colorspace_property)
return 0;
- }
- connector->colorspace_property = prop;
+ connector->colorspace_property =
+ drm_property_create_enum(dev, DRM_MODE_PROP_ENUM, "Colorspace",
+ hdmi_colorspaces,
+ ARRAY_SIZE(hdmi_colorspaces));
+
+ if (!connector->colorspace_property)
+ return -ENOMEM;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_mode_create_hdmi_colorspace_property);
+
+/**
+ * drm_mode_create_dp_colorspace_property - create dp colorspace property
+ * @connector: connector to create the Colorspace property on.
+ *
+ * Called by a driver the first time it's needed, must be attached to desired
+ * DP connectors.
+ *
+ * Returns:
+ * Zero on success, negative errono on failure.
+ */
+int drm_mode_create_dp_colorspace_property(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+
+ if (connector->colorspace_property)
+ return 0;
+
+ connector->colorspace_property =
+ drm_property_create_enum(dev, DRM_MODE_PROP_ENUM, "Colorspace",
+ dp_colorspaces,
+ ARRAY_SIZE(dp_colorspaces));
+
+ if (!connector->colorspace_property)
+ return -ENOMEM;
return 0;
}
-EXPORT_SYMBOL(drm_mode_create_colorspace_property);
+EXPORT_SYMBOL(drm_mode_create_dp_colorspace_property);
/**
* drm_mode_create_content_type_property - create content type property
@@ -2121,7 +2181,6 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
int encoders_count = 0;
int ret = 0;
int copied = 0;
- int i;
struct drm_mode_modeinfo u_mode;
struct drm_mode_modeinfo __user *mode_ptr;
uint32_t __user *encoder_ptr;
@@ -2136,14 +2195,13 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
if (!connector)
return -ENOENT;
- drm_connector_for_each_possible_encoder(connector, encoder, i)
- encoders_count++;
+ encoders_count = hweight32(connector->possible_encoders);
if ((out_resp->count_encoders >= encoders_count) && encoders_count) {
copied = 0;
encoder_ptr = (uint32_t __user *)(unsigned long)(out_resp->encoders_ptr);
- drm_connector_for_each_possible_encoder(connector, encoder, i) {
+ drm_connector_for_each_possible_encoder(connector, encoder) {
if (put_user(encoder->base.id, encoder_ptr + copied)) {
ret = -EFAULT;
goto out;
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 80ddf13ad996..499b05aaccfc 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -36,6 +36,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_atomic_uapi.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_drv.h>
@@ -459,6 +460,22 @@ drm_crtc_helper_disable(struct drm_crtc *crtc)
__drm_helper_disable_unused_functions(dev);
}
+/*
+ * For connectors that support multiple encoders, either the
+ * .atomic_best_encoder() or .best_encoder() operation must be implemented.
+ */
+struct drm_encoder *
+drm_connector_get_single_encoder(struct drm_connector *connector)
+{
+ struct drm_encoder *encoder;
+
+ WARN_ON(hweight32(connector->possible_encoders) > 1);
+ drm_connector_for_each_possible_encoder(connector, encoder)
+ return encoder;
+
+ return NULL;
+}
+
/**
* drm_crtc_helper_set_config - set a new config from userspace
* @set: mode set configuration
@@ -624,7 +641,11 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set,
new_encoder = connector->encoder;
for (ro = 0; ro < set->num_connectors; ro++) {
if (set->connectors[ro] == connector) {
- new_encoder = connector_funcs->best_encoder(connector);
+ if (connector_funcs->best_encoder)
+ new_encoder = connector_funcs->best_encoder(connector);
+ else
+ new_encoder = drm_connector_get_single_encoder(connector);
+
/* if we can't get an encoder for a connector
we are setting now - then fail */
if (new_encoder == NULL)
diff --git a/drivers/gpu/drm/drm_crtc_helper_internal.h b/drivers/gpu/drm/drm_crtc_helper_internal.h
index b5ac1581e623..f0a66ef47e5a 100644
--- a/drivers/gpu/drm/drm_crtc_helper_internal.h
+++ b/drivers/gpu/drm/drm_crtc_helper_internal.h
@@ -75,3 +75,6 @@ enum drm_mode_status drm_encoder_mode_valid(struct drm_encoder *encoder,
const struct drm_display_mode *mode);
enum drm_mode_status drm_connector_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode);
+
+struct drm_encoder *
+drm_connector_get_single_encoder(struct drm_connector *connector);
diff --git a/drivers/gpu/drm/drm_damage_helper.c b/drivers/gpu/drm/drm_damage_helper.c
index 8230dac01a89..3a4126dc2520 100644
--- a/drivers/gpu/drm/drm_damage_helper.c
+++ b/drivers/gpu/drm/drm_damage_helper.c
@@ -212,8 +212,14 @@ retry:
drm_for_each_plane(plane, fb->dev) {
struct drm_plane_state *plane_state;
- if (plane->state->fb != fb)
+ ret = drm_modeset_lock(&plane->mutex, state->acquire_ctx);
+ if (ret)
+ goto out;
+
+ if (plane->state->fb != fb) {
+ drm_modeset_unlock(&plane->mutex);
continue;
+ }
plane_state = drm_atomic_get_plane_state(state, plane);
if (IS_ERR(plane_state)) {
diff --git a/drivers/gpu/drm/drm_debugfs_crc.c b/drivers/gpu/drm/drm_debugfs_crc.c
index be1b7ba92ffe..ca3c55c6b815 100644
--- a/drivers/gpu/drm/drm_debugfs_crc.c
+++ b/drivers/gpu/drm/drm_debugfs_crc.c
@@ -334,19 +334,17 @@ static ssize_t crtc_crc_read(struct file *filep, char __user *user_buf,
return LINE_LEN(crc->values_cnt);
}
-static unsigned int crtc_crc_poll(struct file *file, poll_table *wait)
+static __poll_t crtc_crc_poll(struct file *file, poll_table *wait)
{
struct drm_crtc *crtc = file->f_inode->i_private;
struct drm_crtc_crc *crc = &crtc->crc;
- unsigned ret;
+ __poll_t ret = 0;
poll_wait(file, &crc->wq, wait);
spin_lock_irq(&crc->lock);
if (crc->source && crtc_crc_data_count(crc))
- ret = POLLIN | POLLRDNORM;
- else
- ret = 0;
+ ret |= EPOLLIN | EPOLLRDNORM;
spin_unlock_irq(&crc->lock);
return ret;
diff --git a/drivers/gpu/drm/drm_dp_cec.c b/drivers/gpu/drm/drm_dp_cec.c
index b15cee85b702..3ab2609f9ec7 100644
--- a/drivers/gpu/drm/drm_dp_cec.c
+++ b/drivers/gpu/drm/drm_dp_cec.c
@@ -8,9 +8,13 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
-#include <drm/drm_dp_helper.h>
+
#include <media/cec.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_device.h>
+#include <drm/drm_dp_helper.h>
+
/*
* Unfortunately it turns out that we have a chicken-and-egg situation
* here. Quite a few active (mini-)DP-to-HDMI or USB-C-to-HDMI adapters
@@ -295,7 +299,10 @@ static void drm_dp_cec_unregister_work(struct work_struct *work)
*/
void drm_dp_cec_set_edid(struct drm_dp_aux *aux, const struct edid *edid)
{
- u32 cec_caps = CEC_CAP_DEFAULTS | CEC_CAP_NEEDS_HPD;
+ struct drm_connector *connector = aux->cec.connector;
+ u32 cec_caps = CEC_CAP_DEFAULTS | CEC_CAP_NEEDS_HPD |
+ CEC_CAP_CONNECTOR_INFO;
+ struct cec_connector_info conn_info;
unsigned int num_las = 1;
u8 cap;
@@ -344,13 +351,17 @@ void drm_dp_cec_set_edid(struct drm_dp_aux *aux, const struct edid *edid)
/* Create a new adapter */
aux->cec.adap = cec_allocate_adapter(&drm_dp_cec_adap_ops,
- aux, aux->cec.name, cec_caps,
+ aux, connector->name, cec_caps,
num_las);
if (IS_ERR(aux->cec.adap)) {
aux->cec.adap = NULL;
goto unlock;
}
- if (cec_register_adapter(aux->cec.adap, aux->cec.parent)) {
+
+ cec_fill_conn_info_from_drm(&conn_info, connector);
+ cec_s_conn_info(aux->cec.adap, &conn_info);
+
+ if (cec_register_adapter(aux->cec.adap, connector->dev->dev)) {
cec_delete_adapter(aux->cec.adap);
aux->cec.adap = NULL;
} else {
@@ -406,22 +417,20 @@ EXPORT_SYMBOL(drm_dp_cec_unset_edid);
/**
* drm_dp_cec_register_connector() - register a new connector
* @aux: DisplayPort AUX channel
- * @name: name of the CEC device
- * @parent: parent device
+ * @connector: drm connector
*
* A new connector was registered with associated CEC adapter name and
* CEC adapter parent device. After registering the name and parent
* drm_dp_cec_set_edid() is called to check if the connector supports
* CEC and to register a CEC adapter if that is the case.
*/
-void drm_dp_cec_register_connector(struct drm_dp_aux *aux, const char *name,
- struct device *parent)
+void drm_dp_cec_register_connector(struct drm_dp_aux *aux,
+ struct drm_connector *connector)
{
WARN_ON(aux->cec.adap);
if (WARN_ON(!aux->transfer))
return;
- aux->cec.name = name;
- aux->cec.parent = parent;
+ aux->cec.connector = connector;
INIT_DELAYED_WORK(&aux->cec.unregister_work,
drm_dp_cec_unregister_work);
}
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index ffc68d305afe..2c7870aef469 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -120,33 +120,49 @@ u8 drm_dp_get_adjust_request_pre_emphasis(const u8 link_status[DP_LINK_STATUS_SI
}
EXPORT_SYMBOL(drm_dp_get_adjust_request_pre_emphasis);
-void drm_dp_link_train_clock_recovery_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) {
- int rd_interval = dpcd[DP_TRAINING_AUX_RD_INTERVAL] &
- DP_TRAINING_AUX_RD_MASK;
+u8 drm_dp_get_adjust_request_post_cursor(const u8 link_status[DP_LINK_STATUS_SIZE],
+ unsigned int lane)
+{
+ unsigned int offset = DP_ADJUST_REQUEST_POST_CURSOR2;
+ u8 value = dp_link_status(link_status, offset);
+
+ return (value >> (lane << 1)) & 0x3;
+}
+EXPORT_SYMBOL(drm_dp_get_adjust_request_post_cursor);
+
+void drm_dp_link_train_clock_recovery_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+ unsigned long rd_interval = dpcd[DP_TRAINING_AUX_RD_INTERVAL] &
+ DP_TRAINING_AUX_RD_MASK;
if (rd_interval > 4)
- DRM_DEBUG_KMS("AUX interval %d, out of range (max 4)\n",
+ DRM_DEBUG_KMS("AUX interval %lu, out of range (max 4)\n",
rd_interval);
if (rd_interval == 0 || dpcd[DP_DPCD_REV] >= DP_DPCD_REV_14)
- udelay(100);
+ rd_interval = 100;
else
- mdelay(rd_interval * 4);
+ rd_interval *= 4 * USEC_PER_MSEC;
+
+ usleep_range(rd_interval, rd_interval * 2);
}
EXPORT_SYMBOL(drm_dp_link_train_clock_recovery_delay);
-void drm_dp_link_train_channel_eq_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) {
- int rd_interval = dpcd[DP_TRAINING_AUX_RD_INTERVAL] &
- DP_TRAINING_AUX_RD_MASK;
+void drm_dp_link_train_channel_eq_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+ unsigned long rd_interval = dpcd[DP_TRAINING_AUX_RD_INTERVAL] &
+ DP_TRAINING_AUX_RD_MASK;
if (rd_interval > 4)
- DRM_DEBUG_KMS("AUX interval %d, out of range (max 4)\n",
+ DRM_DEBUG_KMS("AUX interval %lu, out of range (max 4)\n",
rd_interval);
if (rd_interval == 0)
- udelay(400);
+ rd_interval = 400;
else
- mdelay(rd_interval * 4);
+ rd_interval *= 4 * USEC_PER_MSEC;
+
+ usleep_range(rd_interval, rd_interval * 2);
}
EXPORT_SYMBOL(drm_dp_link_train_channel_eq_delay);
@@ -220,7 +236,6 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request,
}
ret = aux->transfer(aux, &msg);
-
if (ret >= 0) {
native_reply = msg.reply & DP_AUX_NATIVE_REPLY_MASK;
if (native_reply == DP_AUX_NATIVE_REPLY_ACK) {
@@ -337,134 +352,6 @@ int drm_dp_dpcd_read_link_status(struct drm_dp_aux *aux,
EXPORT_SYMBOL(drm_dp_dpcd_read_link_status);
/**
- * drm_dp_link_probe() - probe a DisplayPort link for capabilities
- * @aux: DisplayPort AUX channel
- * @link: pointer to structure in which to return link capabilities
- *
- * The structure filled in by this function can usually be passed directly
- * into drm_dp_link_power_up() and drm_dp_link_configure() to power up and
- * configure the link based on the link's capabilities.
- *
- * Returns 0 on success or a negative error code on failure.
- */
-int drm_dp_link_probe(struct drm_dp_aux *aux, struct drm_dp_link *link)
-{
- u8 values[3];
- int err;
-
- memset(link, 0, sizeof(*link));
-
- err = drm_dp_dpcd_read(aux, DP_DPCD_REV, values, sizeof(values));
- if (err < 0)
- return err;
-
- link->revision = values[0];
- link->rate = drm_dp_bw_code_to_link_rate(values[1]);
- link->num_lanes = values[2] & DP_MAX_LANE_COUNT_MASK;
-
- if (values[2] & DP_ENHANCED_FRAME_CAP)
- link->capabilities |= DP_LINK_CAP_ENHANCED_FRAMING;
-
- return 0;
-}
-EXPORT_SYMBOL(drm_dp_link_probe);
-
-/**
- * drm_dp_link_power_up() - power up a DisplayPort link
- * @aux: DisplayPort AUX channel
- * @link: pointer to a structure containing the link configuration
- *
- * Returns 0 on success or a negative error code on failure.
- */
-int drm_dp_link_power_up(struct drm_dp_aux *aux, struct drm_dp_link *link)
-{
- u8 value;
- int err;
-
- /* DP_SET_POWER register is only available on DPCD v1.1 and later */
- if (link->revision < 0x11)
- return 0;
-
- err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value);
- if (err < 0)
- return err;
-
- value &= ~DP_SET_POWER_MASK;
- value |= DP_SET_POWER_D0;
-
- err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value);
- if (err < 0)
- return err;
-
- /*
- * According to the DP 1.1 specification, a "Sink Device must exit the
- * power saving state within 1 ms" (Section 2.5.3.1, Table 5-52, "Sink
- * Control Field" (register 0x600).
- */
- usleep_range(1000, 2000);
-
- return 0;
-}
-EXPORT_SYMBOL(drm_dp_link_power_up);
-
-/**
- * drm_dp_link_power_down() - power down a DisplayPort link
- * @aux: DisplayPort AUX channel
- * @link: pointer to a structure containing the link configuration
- *
- * Returns 0 on success or a negative error code on failure.
- */
-int drm_dp_link_power_down(struct drm_dp_aux *aux, struct drm_dp_link *link)
-{
- u8 value;
- int err;
-
- /* DP_SET_POWER register is only available on DPCD v1.1 and later */
- if (link->revision < 0x11)
- return 0;
-
- err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value);
- if (err < 0)
- return err;
-
- value &= ~DP_SET_POWER_MASK;
- value |= DP_SET_POWER_D3;
-
- err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value);
- if (err < 0)
- return err;
-
- return 0;
-}
-EXPORT_SYMBOL(drm_dp_link_power_down);
-
-/**
- * drm_dp_link_configure() - configure a DisplayPort link
- * @aux: DisplayPort AUX channel
- * @link: pointer to a structure containing the link configuration
- *
- * Returns 0 on success or a negative error code on failure.
- */
-int drm_dp_link_configure(struct drm_dp_aux *aux, struct drm_dp_link *link)
-{
- u8 values[2];
- int err;
-
- values[0] = drm_dp_link_rate_to_bw_code(link->rate);
- values[1] = link->num_lanes;
-
- if (link->capabilities & DP_LINK_CAP_ENHANCED_FRAMING)
- values[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
-
- err = drm_dp_dpcd_write(aux, DP_LINK_BW_SET, values, sizeof(values));
- if (err < 0)
- return err;
-
- return 0;
-}
-EXPORT_SYMBOL(drm_dp_link_configure);
-
-/**
* drm_dp_downstream_max_clock() - extract branch device max
* pixel rate for legacy VGA
* converter or max TMDS clock
@@ -1109,6 +996,14 @@ EXPORT_SYMBOL(drm_dp_aux_init);
* @aux: DisplayPort AUX channel
*
* Automatically calls drm_dp_aux_init() if this hasn't been done yet.
+ * This should only be called when the underlying &struct drm_connector is
+ * initialiazed already. Therefore the best place to call this is from
+ * &drm_connector_funcs.late_register. Not that drivers which don't follow this
+ * will Oops when CONFIG_DRM_DP_AUX_CHARDEV is enabled.
+ *
+ * Drivers which need to use the aux channel before that point (e.g. at driver
+ * load time, before drm_dev_register() has been called) need to call
+ * drm_dp_aux_init().
*
* Returns 0 on success or a negative error code on failure.
*/
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 82add736e17d..ae5809a1f19a 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -28,15 +28,22 @@
#include <linux/sched.h>
#include <linux/seq_file.h>
+#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
+#include <linux/stacktrace.h>
+#include <linux/sort.h>
+#include <linux/timekeeping.h>
+#include <linux/math64.h>
+#endif
+
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_dp_mst_helper.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fixed.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include "drm_crtc_helper_internal.h"
+#include "drm_dp_mst_topology_internal.h"
/**
* DOC: dp mst helper
@@ -45,9 +52,14 @@
* protocol. The helpers contain a topology manager and bandwidth manager.
* The helpers encapsulate the sending and received of sideband msgs.
*/
+struct drm_dp_pending_up_req {
+ struct drm_dp_sideband_msg_hdr hdr;
+ struct drm_dp_sideband_msg_req_body msg;
+ struct list_head next;
+};
+
static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
char *buf);
-static int test_calc_pbn_mode(void);
static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port);
@@ -62,8 +74,8 @@ static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port,
int offset, int size, u8 *bytes);
-static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
- struct drm_dp_mst_branch *mstb);
+static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_branch *mstb);
static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_branch *mstb,
struct drm_dp_mst_port *port);
@@ -74,6 +86,8 @@ static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux);
static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux);
static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr);
+#define DBG_PREFIX "[dp_mst]"
+
#define DP_STR(x) [DP_ ## x] = #x
static const char *drm_dp_mst_req_type_str(u8 req_type)
@@ -130,6 +144,43 @@ static const char *drm_dp_mst_nak_reason_str(u8 nak_reason)
}
#undef DP_STR
+#define DP_STR(x) [DRM_DP_SIDEBAND_TX_ ## x] = #x
+
+static const char *drm_dp_mst_sideband_tx_state_str(int state)
+{
+ static const char * const sideband_reason_str[] = {
+ DP_STR(QUEUED),
+ DP_STR(START_SEND),
+ DP_STR(SENT),
+ DP_STR(RX),
+ DP_STR(TIMEOUT),
+ };
+
+ if (state >= ARRAY_SIZE(sideband_reason_str) ||
+ !sideband_reason_str[state])
+ return "unknown";
+
+ return sideband_reason_str[state];
+}
+
+static int
+drm_dp_mst_rad_to_str(const u8 rad[8], u8 lct, char *out, size_t len)
+{
+ int i;
+ u8 unpacked_rad[16];
+
+ for (i = 0; i < lct; i++) {
+ if (i % 2)
+ unpacked_rad[i] = rad[i / 2] >> 4;
+ else
+ unpacked_rad[i] = rad[i / 2] & BIT_MASK(4);
+ }
+
+ /* TODO: Eventually add something to printk so we can format the rad
+ * like this: 1.2.3
+ */
+ return snprintf(out, len, "%*phC", lct, unpacked_rad);
+}
/* sideband msg handling */
static u8 drm_dp_msg_header_crc4(const uint8_t *data, size_t num_nibbles)
@@ -262,8 +313,9 @@ static bool drm_dp_decode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
return true;
}
-static void drm_dp_encode_sideband_req(struct drm_dp_sideband_msg_req_body *req,
- struct drm_dp_sideband_msg_tx *raw)
+void
+drm_dp_encode_sideband_req(const struct drm_dp_sideband_msg_req_body *req,
+ struct drm_dp_sideband_msg_tx *raw)
{
int idx = 0;
int i;
@@ -272,6 +324,8 @@ static void drm_dp_encode_sideband_req(struct drm_dp_sideband_msg_req_body *req,
switch (req->req_type) {
case DP_ENUM_PATH_RESOURCES:
+ case DP_POWER_DOWN_PHY:
+ case DP_POWER_UP_PHY:
buf[idx] = (req->u.port_num.port_number & 0xf) << 4;
idx++;
break;
@@ -359,14 +413,253 @@ static void drm_dp_encode_sideband_req(struct drm_dp_sideband_msg_req_body *req,
memcpy(&buf[idx], req->u.i2c_write.bytes, req->u.i2c_write.num_bytes);
idx += req->u.i2c_write.num_bytes;
break;
+ }
+ raw->cur_len = idx;
+}
+EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_dp_encode_sideband_req);
+
+/* Decode a sideband request we've encoded, mainly used for debugging */
+int
+drm_dp_decode_sideband_req(const struct drm_dp_sideband_msg_tx *raw,
+ struct drm_dp_sideband_msg_req_body *req)
+{
+ const u8 *buf = raw->msg;
+ int i, idx = 0;
+ req->req_type = buf[idx++] & 0x7f;
+ switch (req->req_type) {
+ case DP_ENUM_PATH_RESOURCES:
case DP_POWER_DOWN_PHY:
case DP_POWER_UP_PHY:
- buf[idx] = (req->u.port_num.port_number & 0xf) << 4;
- idx++;
+ req->u.port_num.port_number = (buf[idx] >> 4) & 0xf;
+ break;
+ case DP_ALLOCATE_PAYLOAD:
+ {
+ struct drm_dp_allocate_payload *a =
+ &req->u.allocate_payload;
+
+ a->number_sdp_streams = buf[idx] & 0xf;
+ a->port_number = (buf[idx] >> 4) & 0xf;
+
+ WARN_ON(buf[++idx] & 0x80);
+ a->vcpi = buf[idx] & 0x7f;
+
+ a->pbn = buf[++idx] << 8;
+ a->pbn |= buf[++idx];
+
+ idx++;
+ for (i = 0; i < a->number_sdp_streams; i++) {
+ a->sdp_stream_sink[i] =
+ (buf[idx + (i / 2)] >> ((i % 2) ? 0 : 4)) & 0xf;
+ }
+ }
+ break;
+ case DP_QUERY_PAYLOAD:
+ req->u.query_payload.port_number = (buf[idx] >> 4) & 0xf;
+ WARN_ON(buf[++idx] & 0x80);
+ req->u.query_payload.vcpi = buf[idx] & 0x7f;
+ break;
+ case DP_REMOTE_DPCD_READ:
+ {
+ struct drm_dp_remote_dpcd_read *r = &req->u.dpcd_read;
+
+ r->port_number = (buf[idx] >> 4) & 0xf;
+
+ r->dpcd_address = (buf[idx] << 16) & 0xf0000;
+ r->dpcd_address |= (buf[++idx] << 8) & 0xff00;
+ r->dpcd_address |= buf[++idx] & 0xff;
+
+ r->num_bytes = buf[++idx];
+ }
+ break;
+ case DP_REMOTE_DPCD_WRITE:
+ {
+ struct drm_dp_remote_dpcd_write *w =
+ &req->u.dpcd_write;
+
+ w->port_number = (buf[idx] >> 4) & 0xf;
+
+ w->dpcd_address = (buf[idx] << 16) & 0xf0000;
+ w->dpcd_address |= (buf[++idx] << 8) & 0xff00;
+ w->dpcd_address |= buf[++idx] & 0xff;
+
+ w->num_bytes = buf[++idx];
+
+ w->bytes = kmemdup(&buf[++idx], w->num_bytes,
+ GFP_KERNEL);
+ if (!w->bytes)
+ return -ENOMEM;
+ }
+ break;
+ case DP_REMOTE_I2C_READ:
+ {
+ struct drm_dp_remote_i2c_read *r = &req->u.i2c_read;
+ struct drm_dp_remote_i2c_read_tx *tx;
+ bool failed = false;
+
+ r->num_transactions = buf[idx] & 0x3;
+ r->port_number = (buf[idx] >> 4) & 0xf;
+ for (i = 0; i < r->num_transactions; i++) {
+ tx = &r->transactions[i];
+
+ tx->i2c_dev_id = buf[++idx] & 0x7f;
+ tx->num_bytes = buf[++idx];
+ tx->bytes = kmemdup(&buf[++idx],
+ tx->num_bytes,
+ GFP_KERNEL);
+ if (!tx->bytes) {
+ failed = true;
+ break;
+ }
+ idx += tx->num_bytes;
+ tx->no_stop_bit = (buf[idx] >> 5) & 0x1;
+ tx->i2c_transaction_delay = buf[idx] & 0xf;
+ }
+
+ if (failed) {
+ for (i = 0; i < r->num_transactions; i++)
+ kfree(tx->bytes);
+ return -ENOMEM;
+ }
+
+ r->read_i2c_device_id = buf[++idx] & 0x7f;
+ r->num_bytes_read = buf[++idx];
+ }
+ break;
+ case DP_REMOTE_I2C_WRITE:
+ {
+ struct drm_dp_remote_i2c_write *w = &req->u.i2c_write;
+
+ w->port_number = (buf[idx] >> 4) & 0xf;
+ w->write_i2c_device_id = buf[++idx] & 0x7f;
+ w->num_bytes = buf[++idx];
+ w->bytes = kmemdup(&buf[++idx], w->num_bytes,
+ GFP_KERNEL);
+ if (!w->bytes)
+ return -ENOMEM;
+ }
+ break;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_dp_decode_sideband_req);
+
+void
+drm_dp_dump_sideband_msg_req_body(const struct drm_dp_sideband_msg_req_body *req,
+ int indent, struct drm_printer *printer)
+{
+ int i;
+
+#define P(f, ...) drm_printf_indent(printer, indent, f, ##__VA_ARGS__)
+ if (req->req_type == DP_LINK_ADDRESS) {
+ /* No contents to print */
+ P("type=%s\n", drm_dp_mst_req_type_str(req->req_type));
+ return;
+ }
+
+ P("type=%s contents:\n", drm_dp_mst_req_type_str(req->req_type));
+ indent++;
+
+ switch (req->req_type) {
+ case DP_ENUM_PATH_RESOURCES:
+ case DP_POWER_DOWN_PHY:
+ case DP_POWER_UP_PHY:
+ P("port=%d\n", req->u.port_num.port_number);
+ break;
+ case DP_ALLOCATE_PAYLOAD:
+ P("port=%d vcpi=%d pbn=%d sdp_streams=%d %*ph\n",
+ req->u.allocate_payload.port_number,
+ req->u.allocate_payload.vcpi, req->u.allocate_payload.pbn,
+ req->u.allocate_payload.number_sdp_streams,
+ req->u.allocate_payload.number_sdp_streams,
+ req->u.allocate_payload.sdp_stream_sink);
+ break;
+ case DP_QUERY_PAYLOAD:
+ P("port=%d vcpi=%d\n",
+ req->u.query_payload.port_number,
+ req->u.query_payload.vcpi);
+ break;
+ case DP_REMOTE_DPCD_READ:
+ P("port=%d dpcd_addr=%05x len=%d\n",
+ req->u.dpcd_read.port_number, req->u.dpcd_read.dpcd_address,
+ req->u.dpcd_read.num_bytes);
+ break;
+ case DP_REMOTE_DPCD_WRITE:
+ P("port=%d addr=%05x len=%d: %*ph\n",
+ req->u.dpcd_write.port_number,
+ req->u.dpcd_write.dpcd_address,
+ req->u.dpcd_write.num_bytes, req->u.dpcd_write.num_bytes,
+ req->u.dpcd_write.bytes);
+ break;
+ case DP_REMOTE_I2C_READ:
+ P("port=%d num_tx=%d id=%d size=%d:\n",
+ req->u.i2c_read.port_number,
+ req->u.i2c_read.num_transactions,
+ req->u.i2c_read.read_i2c_device_id,
+ req->u.i2c_read.num_bytes_read);
+
+ indent++;
+ for (i = 0; i < req->u.i2c_read.num_transactions; i++) {
+ const struct drm_dp_remote_i2c_read_tx *rtx =
+ &req->u.i2c_read.transactions[i];
+
+ P("%d: id=%03d size=%03d no_stop_bit=%d tx_delay=%03d: %*ph\n",
+ i, rtx->i2c_dev_id, rtx->num_bytes,
+ rtx->no_stop_bit, rtx->i2c_transaction_delay,
+ rtx->num_bytes, rtx->bytes);
+ }
+ break;
+ case DP_REMOTE_I2C_WRITE:
+ P("port=%d id=%d size=%d: %*ph\n",
+ req->u.i2c_write.port_number,
+ req->u.i2c_write.write_i2c_device_id,
+ req->u.i2c_write.num_bytes, req->u.i2c_write.num_bytes,
+ req->u.i2c_write.bytes);
+ break;
+ default:
+ P("???\n");
+ break;
+ }
+#undef P
+}
+EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_dp_dump_sideband_msg_req_body);
+
+static inline void
+drm_dp_mst_dump_sideband_msg_tx(struct drm_printer *p,
+ const struct drm_dp_sideband_msg_tx *txmsg)
+{
+ struct drm_dp_sideband_msg_req_body req;
+ char buf[64];
+ int ret;
+ int i;
+
+ drm_dp_mst_rad_to_str(txmsg->dst->rad, txmsg->dst->lct, buf,
+ sizeof(buf));
+ drm_printf(p, "txmsg cur_offset=%x cur_len=%x seqno=%x state=%s path_msg=%d dst=%s\n",
+ txmsg->cur_offset, txmsg->cur_len, txmsg->seqno,
+ drm_dp_mst_sideband_tx_state_str(txmsg->state),
+ txmsg->path_msg, buf);
+
+ ret = drm_dp_decode_sideband_req(txmsg, &req);
+ if (ret) {
+ drm_printf(p, "<failed to decode sideband req: %d>\n", ret);
+ return;
+ }
+ drm_dp_dump_sideband_msg_req_body(&req, 1, p);
+
+ switch (req.req_type) {
+ case DP_REMOTE_DPCD_WRITE:
+ kfree(req.u.dpcd_write.bytes);
+ break;
+ case DP_REMOTE_I2C_READ:
+ for (i = 0; i < req.u.i2c_read.num_transactions; i++)
+ kfree(req.u.i2c_read.transactions[i].bytes);
+ break;
+ case DP_REMOTE_I2C_WRITE:
+ kfree(req.u.i2c_write.bytes);
break;
}
- raw->cur_len = idx;
}
static void drm_dp_crc_sideband_chunk_req(u8 *msg, u8 len)
@@ -842,11 +1135,11 @@ static void drm_dp_mst_put_payload_id(struct drm_dp_mst_topology_mgr *mgr,
clear_bit(vcpi - 1, &mgr->vcpi_mask);
for (i = 0; i < mgr->max_payloads; i++) {
- if (mgr->proposed_vcpis[i])
- if (mgr->proposed_vcpis[i]->vcpi == vcpi) {
- mgr->proposed_vcpis[i] = NULL;
- clear_bit(i + 1, &mgr->payload_mask);
- }
+ if (mgr->proposed_vcpis[i] &&
+ mgr->proposed_vcpis[i]->vcpi == vcpi) {
+ mgr->proposed_vcpis[i] = NULL;
+ clear_bit(i + 1, &mgr->payload_mask);
+ }
}
mutex_unlock(&mgr->payload_lock);
}
@@ -899,6 +1192,11 @@ static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb,
}
}
out:
+ if (unlikely(ret == -EIO) && drm_debug_enabled(DRM_UT_DP)) {
+ struct drm_printer p = drm_debug_printer(DBG_PREFIX);
+
+ drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
+ }
mutex_unlock(&mgr->qlock);
return ret;
@@ -1108,39 +1406,194 @@ drm_dp_mst_put_port_malloc(struct drm_dp_mst_port *port)
}
EXPORT_SYMBOL(drm_dp_mst_put_port_malloc);
-static void drm_dp_destroy_mst_branch_device(struct kref *kref)
+#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
+
+#define STACK_DEPTH 8
+
+static noinline void
+__topology_ref_save(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_topology_ref_history *history,
+ enum drm_dp_mst_topology_ref_type type)
{
- struct drm_dp_mst_branch *mstb =
- container_of(kref, struct drm_dp_mst_branch, topology_kref);
- struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
- struct drm_dp_mst_port *port, *tmp;
- bool wake_tx = false;
+ struct drm_dp_mst_topology_ref_entry *entry = NULL;
+ depot_stack_handle_t backtrace;
+ ulong stack_entries[STACK_DEPTH];
+ uint n;
+ int i;
- mutex_lock(&mgr->lock);
- list_for_each_entry_safe(port, tmp, &mstb->ports, next) {
- list_del(&port->next);
- drm_dp_mst_topology_put_port(port);
+ n = stack_trace_save(stack_entries, ARRAY_SIZE(stack_entries), 1);
+ backtrace = stack_depot_save(stack_entries, n, GFP_KERNEL);
+ if (!backtrace)
+ return;
+
+ /* Try to find an existing entry for this backtrace */
+ for (i = 0; i < history->len; i++) {
+ if (history->entries[i].backtrace == backtrace) {
+ entry = &history->entries[i];
+ break;
+ }
}
- mutex_unlock(&mgr->lock);
- /* drop any tx slots msg */
- mutex_lock(&mstb->mgr->qlock);
- if (mstb->tx_slots[0]) {
- mstb->tx_slots[0]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
- mstb->tx_slots[0] = NULL;
- wake_tx = true;
+ /* Otherwise add one */
+ if (!entry) {
+ struct drm_dp_mst_topology_ref_entry *new;
+ int new_len = history->len + 1;
+
+ new = krealloc(history->entries, sizeof(*new) * new_len,
+ GFP_KERNEL);
+ if (!new)
+ return;
+
+ entry = &new[history->len];
+ history->len = new_len;
+ history->entries = new;
+
+ entry->backtrace = backtrace;
+ entry->type = type;
+ entry->count = 0;
}
- if (mstb->tx_slots[1]) {
- mstb->tx_slots[1]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
- mstb->tx_slots[1] = NULL;
- wake_tx = true;
+ entry->count++;
+ entry->ts_nsec = ktime_get_ns();
+}
+
+static int
+topology_ref_history_cmp(const void *a, const void *b)
+{
+ const struct drm_dp_mst_topology_ref_entry *entry_a = a, *entry_b = b;
+
+ if (entry_a->ts_nsec > entry_b->ts_nsec)
+ return 1;
+ else if (entry_a->ts_nsec < entry_b->ts_nsec)
+ return -1;
+ else
+ return 0;
+}
+
+static inline const char *
+topology_ref_type_to_str(enum drm_dp_mst_topology_ref_type type)
+{
+ if (type == DRM_DP_MST_TOPOLOGY_REF_GET)
+ return "get";
+ else
+ return "put";
+}
+
+static void
+__dump_topology_ref_history(struct drm_dp_mst_topology_ref_history *history,
+ void *ptr, const char *type_str)
+{
+ struct drm_printer p = drm_debug_printer(DBG_PREFIX);
+ char *buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ int i;
+
+ if (!buf)
+ return;
+
+ if (!history->len)
+ goto out;
+
+ /* First, sort the list so that it goes from oldest to newest
+ * reference entry
+ */
+ sort(history->entries, history->len, sizeof(*history->entries),
+ topology_ref_history_cmp, NULL);
+
+ drm_printf(&p, "%s (%p) topology count reached 0, dumping history:\n",
+ type_str, ptr);
+
+ for (i = 0; i < history->len; i++) {
+ const struct drm_dp_mst_topology_ref_entry *entry =
+ &history->entries[i];
+ ulong *entries;
+ uint nr_entries;
+ u64 ts_nsec = entry->ts_nsec;
+ u32 rem_nsec = do_div(ts_nsec, 1000000000);
+
+ nr_entries = stack_depot_fetch(entry->backtrace, &entries);
+ stack_trace_snprint(buf, PAGE_SIZE, entries, nr_entries, 4);
+
+ drm_printf(&p, " %d %ss (last at %5llu.%06u):\n%s",
+ entry->count,
+ topology_ref_type_to_str(entry->type),
+ ts_nsec, rem_nsec / 1000, buf);
}
- mutex_unlock(&mstb->mgr->qlock);
- if (wake_tx)
- wake_up_all(&mstb->mgr->tx_waitq);
+ /* Now free the history, since this is the only time we expose it */
+ kfree(history->entries);
+out:
+ kfree(buf);
+}
- drm_dp_mst_put_mstb_malloc(mstb);
+static __always_inline void
+drm_dp_mst_dump_mstb_topology_history(struct drm_dp_mst_branch *mstb)
+{
+ __dump_topology_ref_history(&mstb->topology_ref_history, mstb,
+ "MSTB");
+}
+
+static __always_inline void
+drm_dp_mst_dump_port_topology_history(struct drm_dp_mst_port *port)
+{
+ __dump_topology_ref_history(&port->topology_ref_history, port,
+ "Port");
+}
+
+static __always_inline void
+save_mstb_topology_ref(struct drm_dp_mst_branch *mstb,
+ enum drm_dp_mst_topology_ref_type type)
+{
+ __topology_ref_save(mstb->mgr, &mstb->topology_ref_history, type);
+}
+
+static __always_inline void
+save_port_topology_ref(struct drm_dp_mst_port *port,
+ enum drm_dp_mst_topology_ref_type type)
+{
+ __topology_ref_save(port->mgr, &port->topology_ref_history, type);
+}
+
+static inline void
+topology_ref_history_lock(struct drm_dp_mst_topology_mgr *mgr)
+{
+ mutex_lock(&mgr->topology_ref_history_lock);
+}
+
+static inline void
+topology_ref_history_unlock(struct drm_dp_mst_topology_mgr *mgr)
+{
+ mutex_unlock(&mgr->topology_ref_history_lock);
+}
+#else
+static inline void
+topology_ref_history_lock(struct drm_dp_mst_topology_mgr *mgr) {}
+static inline void
+topology_ref_history_unlock(struct drm_dp_mst_topology_mgr *mgr) {}
+static inline void
+drm_dp_mst_dump_mstb_topology_history(struct drm_dp_mst_branch *mstb) {}
+static inline void
+drm_dp_mst_dump_port_topology_history(struct drm_dp_mst_port *port) {}
+#define save_mstb_topology_ref(mstb, type)
+#define save_port_topology_ref(port, type)
+#endif
+
+static void drm_dp_destroy_mst_branch_device(struct kref *kref)
+{
+ struct drm_dp_mst_branch *mstb =
+ container_of(kref, struct drm_dp_mst_branch, topology_kref);
+ struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
+
+ drm_dp_mst_dump_mstb_topology_history(mstb);
+
+ INIT_LIST_HEAD(&mstb->destroy_next);
+
+ /*
+ * This can get called under mgr->mutex, so we need to perform the
+ * actual destruction of the mstb in another worker
+ */
+ mutex_lock(&mgr->delayed_destroy_lock);
+ list_add(&mstb->destroy_next, &mgr->destroy_branch_device_list);
+ mutex_unlock(&mgr->delayed_destroy_lock);
+ schedule_work(&mgr->delayed_destroy_work);
}
/**
@@ -1168,11 +1621,17 @@ static void drm_dp_destroy_mst_branch_device(struct kref *kref)
static int __must_check
drm_dp_mst_topology_try_get_mstb(struct drm_dp_mst_branch *mstb)
{
- int ret = kref_get_unless_zero(&mstb->topology_kref);
+ int ret;
- if (ret)
- DRM_DEBUG("mstb %p (%d)\n", mstb,
- kref_read(&mstb->topology_kref));
+ topology_ref_history_lock(mstb->mgr);
+ ret = kref_get_unless_zero(&mstb->topology_kref);
+ if (ret) {
+ DRM_DEBUG("mstb %p (%d)\n",
+ mstb, kref_read(&mstb->topology_kref));
+ save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_GET);
+ }
+
+ topology_ref_history_unlock(mstb->mgr);
return ret;
}
@@ -1193,9 +1652,14 @@ drm_dp_mst_topology_try_get_mstb(struct drm_dp_mst_branch *mstb)
*/
static void drm_dp_mst_topology_get_mstb(struct drm_dp_mst_branch *mstb)
{
+ topology_ref_history_lock(mstb->mgr);
+
+ save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_GET);
WARN_ON(kref_read(&mstb->topology_kref) == 0);
kref_get(&mstb->topology_kref);
DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->topology_kref));
+
+ topology_ref_history_unlock(mstb->mgr);
}
/**
@@ -1213,27 +1677,14 @@ static void drm_dp_mst_topology_get_mstb(struct drm_dp_mst_branch *mstb)
static void
drm_dp_mst_topology_put_mstb(struct drm_dp_mst_branch *mstb)
{
+ topology_ref_history_lock(mstb->mgr);
+
DRM_DEBUG("mstb %p (%d)\n",
mstb, kref_read(&mstb->topology_kref) - 1);
- kref_put(&mstb->topology_kref, drm_dp_destroy_mst_branch_device);
-}
+ save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_PUT);
-static void drm_dp_port_teardown_pdt(struct drm_dp_mst_port *port, int old_pdt)
-{
- struct drm_dp_mst_branch *mstb;
-
- switch (old_pdt) {
- case DP_PEER_DEVICE_DP_LEGACY_CONV:
- case DP_PEER_DEVICE_SST_SINK:
- /* remove i2c over sideband */
- drm_dp_mst_unregister_i2c_bus(&port->aux);
- break;
- case DP_PEER_DEVICE_MST_BRANCHING:
- mstb = port->mstb;
- port->mstb = NULL;
- drm_dp_mst_topology_put_mstb(mstb);
- break;
- }
+ topology_ref_history_unlock(mstb->mgr);
+ kref_put(&mstb->topology_kref, drm_dp_destroy_mst_branch_device);
}
static void drm_dp_destroy_port(struct kref *kref)
@@ -1242,31 +1693,24 @@ static void drm_dp_destroy_port(struct kref *kref)
container_of(kref, struct drm_dp_mst_port, topology_kref);
struct drm_dp_mst_topology_mgr *mgr = port->mgr;
- if (!port->input) {
- kfree(port->cached_edid);
+ drm_dp_mst_dump_port_topology_history(port);
- /*
- * The only time we don't have a connector
- * on an output port is if the connector init
- * fails.
- */
- if (port->connector) {
- /* we can't destroy the connector here, as
- * we might be holding the mode_config.mutex
- * from an EDID retrieval */
-
- mutex_lock(&mgr->destroy_connector_lock);
- list_add(&port->next, &mgr->destroy_connector_list);
- mutex_unlock(&mgr->destroy_connector_lock);
- schedule_work(&mgr->destroy_connector_work);
- return;
- }
- /* no need to clean up vcpi
- * as if we have no connector we never setup a vcpi */
- drm_dp_port_teardown_pdt(port, port->pdt);
- port->pdt = DP_PEER_DEVICE_NONE;
+ /* There's nothing that needs locking to destroy an input port yet */
+ if (port->input) {
+ drm_dp_mst_put_port_malloc(port);
+ return;
}
- drm_dp_mst_put_port_malloc(port);
+
+ kfree(port->cached_edid);
+
+ /*
+ * we can't destroy the connector here, as we might be holding the
+ * mode_config.mutex from an EDID retrieval
+ */
+ mutex_lock(&mgr->delayed_destroy_lock);
+ list_add(&port->next, &mgr->destroy_port_list);
+ mutex_unlock(&mgr->delayed_destroy_lock);
+ schedule_work(&mgr->delayed_destroy_work);
}
/**
@@ -1294,12 +1738,17 @@ static void drm_dp_destroy_port(struct kref *kref)
static int __must_check
drm_dp_mst_topology_try_get_port(struct drm_dp_mst_port *port)
{
- int ret = kref_get_unless_zero(&port->topology_kref);
+ int ret;
- if (ret)
- DRM_DEBUG("port %p (%d)\n", port,
- kref_read(&port->topology_kref));
+ topology_ref_history_lock(port->mgr);
+ ret = kref_get_unless_zero(&port->topology_kref);
+ if (ret) {
+ DRM_DEBUG("port %p (%d)\n",
+ port, kref_read(&port->topology_kref));
+ save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_GET);
+ }
+ topology_ref_history_unlock(port->mgr);
return ret;
}
@@ -1318,9 +1767,14 @@ drm_dp_mst_topology_try_get_port(struct drm_dp_mst_port *port)
*/
static void drm_dp_mst_topology_get_port(struct drm_dp_mst_port *port)
{
+ topology_ref_history_lock(port->mgr);
+
WARN_ON(kref_read(&port->topology_kref) == 0);
kref_get(&port->topology_kref);
DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->topology_kref));
+ save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_GET);
+
+ topology_ref_history_unlock(port->mgr);
}
/**
@@ -1336,8 +1790,13 @@ static void drm_dp_mst_topology_get_port(struct drm_dp_mst_port *port)
*/
static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port)
{
+ topology_ref_history_lock(port->mgr);
+
DRM_DEBUG("port %p (%d)\n",
port, kref_read(&port->topology_kref) - 1);
+ save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_PUT);
+
+ topology_ref_history_unlock(port->mgr);
kref_put(&port->topology_kref, drm_dp_destroy_port);
}
@@ -1454,38 +1913,79 @@ static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port,
return parent_lct + 1;
}
-/*
- * return sends link address for new mstb
- */
-static bool drm_dp_port_setup_pdt(struct drm_dp_mst_port *port)
+static int drm_dp_port_set_pdt(struct drm_dp_mst_port *port, u8 new_pdt)
{
- int ret;
- u8 rad[6], lct;
- bool send_link = false;
+ struct drm_dp_mst_topology_mgr *mgr = port->mgr;
+ struct drm_dp_mst_branch *mstb;
+ u8 rad[8], lct;
+ int ret = 0;
+
+ if (port->pdt == new_pdt)
+ return 0;
+
+ /* Teardown the old pdt, if there is one */
+ switch (port->pdt) {
+ case DP_PEER_DEVICE_DP_LEGACY_CONV:
+ case DP_PEER_DEVICE_SST_SINK:
+ /*
+ * If the new PDT would also have an i2c bus, don't bother
+ * with reregistering it
+ */
+ if (new_pdt == DP_PEER_DEVICE_DP_LEGACY_CONV ||
+ new_pdt == DP_PEER_DEVICE_SST_SINK) {
+ port->pdt = new_pdt;
+ return 0;
+ }
+
+ /* remove i2c over sideband */
+ drm_dp_mst_unregister_i2c_bus(&port->aux);
+ break;
+ case DP_PEER_DEVICE_MST_BRANCHING:
+ mutex_lock(&mgr->lock);
+ drm_dp_mst_topology_put_mstb(port->mstb);
+ port->mstb = NULL;
+ mutex_unlock(&mgr->lock);
+ break;
+ }
+
+ port->pdt = new_pdt;
switch (port->pdt) {
case DP_PEER_DEVICE_DP_LEGACY_CONV:
case DP_PEER_DEVICE_SST_SINK:
/* add i2c over sideband */
ret = drm_dp_mst_register_i2c_bus(&port->aux);
break;
+
case DP_PEER_DEVICE_MST_BRANCHING:
lct = drm_dp_calculate_rad(port, rad);
+ mstb = drm_dp_add_mst_branch_device(lct, rad);
+ if (!mstb) {
+ ret = -ENOMEM;
+ DRM_ERROR("Failed to create MSTB for port %p", port);
+ goto out;
+ }
- port->mstb = drm_dp_add_mst_branch_device(lct, rad);
- if (port->mstb) {
- port->mstb->mgr = port->mgr;
- port->mstb->port_parent = port;
- /*
- * Make sure this port's memory allocation stays
- * around until its child MSTB releases it
- */
- drm_dp_mst_get_port_malloc(port);
+ mutex_lock(&mgr->lock);
+ port->mstb = mstb;
+ mstb->mgr = port->mgr;
+ mstb->port_parent = port;
- send_link = true;
- }
+ /*
+ * Make sure this port's memory allocation stays
+ * around until its child MSTB releases it
+ */
+ drm_dp_mst_get_port_malloc(port);
+ mutex_unlock(&mgr->lock);
+
+ /* And make sure we send a link address for this */
+ ret = 1;
break;
}
- return send_link;
+
+out:
+ if (ret < 0)
+ port->pdt = DP_PEER_DEVICE_NONE;
+ return ret;
}
/**
@@ -1617,44 +2117,131 @@ void drm_dp_mst_connector_early_unregister(struct drm_connector *connector,
}
EXPORT_SYMBOL(drm_dp_mst_connector_early_unregister);
-static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
- struct drm_device *dev,
- struct drm_dp_link_addr_reply_port *port_msg)
+static void
+drm_dp_mst_port_add_connector(struct drm_dp_mst_branch *mstb,
+ struct drm_dp_mst_port *port)
+{
+ struct drm_dp_mst_topology_mgr *mgr = port->mgr;
+ char proppath[255];
+ int ret;
+
+ build_mst_prop_path(mstb, port->port_num, proppath, sizeof(proppath));
+ port->connector = mgr->cbs->add_connector(mgr, port, proppath);
+ if (!port->connector) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ if ((port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV ||
+ port->pdt == DP_PEER_DEVICE_SST_SINK) &&
+ port->port_num >= DP_MST_LOGICAL_PORT_0) {
+ port->cached_edid = drm_get_edid(port->connector,
+ &port->aux.ddc);
+ drm_connector_set_tile_property(port->connector);
+ }
+
+ mgr->cbs->register_connector(port->connector);
+ return;
+
+error:
+ DRM_ERROR("Failed to create connector for port %p: %d\n", port, ret);
+}
+
+/*
+ * Drop a topology reference, and unlink the port from the in-memory topology
+ * layout
+ */
+static void
+drm_dp_mst_topology_unlink_port(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_port *port)
+{
+ mutex_lock(&mgr->lock);
+ list_del(&port->next);
+ mutex_unlock(&mgr->lock);
+ drm_dp_mst_topology_put_port(port);
+}
+
+static struct drm_dp_mst_port *
+drm_dp_mst_add_port(struct drm_device *dev,
+ struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_branch *mstb, u8 port_number)
+{
+ struct drm_dp_mst_port *port = kzalloc(sizeof(*port), GFP_KERNEL);
+
+ if (!port)
+ return NULL;
+
+ kref_init(&port->topology_kref);
+ kref_init(&port->malloc_kref);
+ port->parent = mstb;
+ port->port_num = port_number;
+ port->mgr = mgr;
+ port->aux.name = "DPMST";
+ port->aux.dev = dev->dev;
+ port->aux.is_remote = true;
+
+ /*
+ * Make sure the memory allocation for our parent branch stays
+ * around until our own memory allocation is released
+ */
+ drm_dp_mst_get_mstb_malloc(mstb);
+
+ return port;
+}
+
+static int
+drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb,
+ struct drm_device *dev,
+ struct drm_dp_link_addr_reply_port *port_msg)
{
+ struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
struct drm_dp_mst_port *port;
- bool ret;
- bool created = false;
- int old_pdt = 0;
- int old_ddps = 0;
+ int old_ddps = 0, ret;
+ u8 new_pdt = DP_PEER_DEVICE_NONE;
+ bool created = false, send_link_addr = false, changed = false;
port = drm_dp_get_port(mstb, port_msg->port_number);
if (!port) {
- port = kzalloc(sizeof(*port), GFP_KERNEL);
+ port = drm_dp_mst_add_port(dev, mgr, mstb,
+ port_msg->port_number);
if (!port)
- return;
- kref_init(&port->topology_kref);
- kref_init(&port->malloc_kref);
- port->parent = mstb;
- port->port_num = port_msg->port_number;
- port->mgr = mstb->mgr;
- port->aux.name = "DPMST";
- port->aux.dev = dev->dev;
- port->aux.is_remote = true;
-
- /*
- * Make sure the memory allocation for our parent branch stays
- * around until our own memory allocation is released
+ return -ENOMEM;
+ created = true;
+ changed = true;
+ } else if (!port->input && port_msg->input_port && port->connector) {
+ /* Since port->connector can't be changed here, we create a
+ * new port if input_port changes from 0 to 1
*/
- drm_dp_mst_get_mstb_malloc(mstb);
-
+ drm_dp_mst_topology_unlink_port(mgr, port);
+ drm_dp_mst_topology_put_port(port);
+ port = drm_dp_mst_add_port(dev, mgr, mstb,
+ port_msg->port_number);
+ if (!port)
+ return -ENOMEM;
+ changed = true;
created = true;
- } else {
- old_pdt = port->pdt;
+ } else if (port->input && !port_msg->input_port) {
+ changed = true;
+ } else if (port->connector) {
+ /* We're updating a port that's exposed to userspace, so do it
+ * under lock
+ */
+ drm_modeset_lock(&mgr->base.lock, NULL);
+
old_ddps = port->ddps;
+ changed = port->ddps != port_msg->ddps ||
+ (port->ddps &&
+ (port->ldps != port_msg->legacy_device_plug_status ||
+ port->dpcd_rev != port_msg->dpcd_revision ||
+ port->mcs != port_msg->mcs ||
+ port->pdt != port_msg->peer_device_type ||
+ port->num_sdp_stream_sinks !=
+ port_msg->num_sdp_stream_sinks));
}
- port->pdt = port_msg->peer_device_type;
port->input = port_msg->input_port;
+ if (!port->input)
+ new_pdt = port_msg->peer_device_type;
port->mcs = port_msg->mcs;
port->ddps = port_msg->ddps;
port->ldps = port_msg->legacy_device_plug_status;
@@ -1665,77 +2252,104 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
/* manage mstb port lists with mgr lock - take a reference
for this list */
if (created) {
- mutex_lock(&mstb->mgr->lock);
+ mutex_lock(&mgr->lock);
drm_dp_mst_topology_get_port(port);
list_add(&port->next, &mstb->ports);
- mutex_unlock(&mstb->mgr->lock);
+ mutex_unlock(&mgr->lock);
}
if (old_ddps != port->ddps) {
if (port->ddps) {
if (!port->input) {
- drm_dp_send_enum_path_resources(mstb->mgr,
- mstb, port);
+ drm_dp_send_enum_path_resources(mgr, mstb,
+ port);
}
} else {
port->available_pbn = 0;
}
}
- if (old_pdt != port->pdt && !port->input) {
- drm_dp_port_teardown_pdt(port, old_pdt);
-
- ret = drm_dp_port_setup_pdt(port);
- if (ret == true)
- drm_dp_send_link_address(mstb->mgr, port->mstb);
+ ret = drm_dp_port_set_pdt(port, new_pdt);
+ if (ret == 1) {
+ send_link_addr = true;
+ } else if (ret < 0) {
+ DRM_ERROR("Failed to change PDT on port %p: %d\n",
+ port, ret);
+ goto fail;
}
- if (created && !port->input) {
- char proppath[255];
-
- build_mst_prop_path(mstb, port->port_num, proppath,
- sizeof(proppath));
- port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr,
- port,
- proppath);
- if (!port->connector) {
- /* remove it from the port list */
- mutex_lock(&mstb->mgr->lock);
- list_del(&port->next);
- mutex_unlock(&mstb->mgr->lock);
- /* drop port list reference */
- drm_dp_mst_topology_put_port(port);
- goto out;
- }
- if ((port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV ||
- port->pdt == DP_PEER_DEVICE_SST_SINK) &&
- port->port_num >= DP_MST_LOGICAL_PORT_0) {
- port->cached_edid = drm_get_edid(port->connector,
- &port->aux.ddc);
- drm_connector_set_tile_property(port->connector);
- }
- (*mstb->mgr->cbs->register_connector)(port->connector);
+ /*
+ * If this port wasn't just created, then we're reprobing because
+ * we're coming out of suspend. In this case, always resend the link
+ * address if there's an MSTB on this port
+ */
+ if (!created && port->pdt == DP_PEER_DEVICE_MST_BRANCHING)
+ send_link_addr = true;
+
+ if (port->connector)
+ drm_modeset_unlock(&mgr->base.lock);
+ else if (!port->input)
+ drm_dp_mst_port_add_connector(mstb, port);
+
+ if (send_link_addr && port->mstb) {
+ ret = drm_dp_send_link_address(mgr, port->mstb);
+ if (ret == 1) /* MSTB below us changed */
+ changed = true;
+ else if (ret < 0)
+ goto fail_put;
}
-out:
/* put reference to this port */
drm_dp_mst_topology_put_port(port);
+ return changed;
+
+fail:
+ drm_dp_mst_topology_unlink_port(mgr, port);
+ if (port->connector)
+ drm_modeset_unlock(&mgr->base.lock);
+fail_put:
+ drm_dp_mst_topology_put_port(port);
+ return ret;
}
-static void drm_dp_update_port(struct drm_dp_mst_branch *mstb,
- struct drm_dp_connection_status_notify *conn_stat)
+static void
+drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb,
+ struct drm_dp_connection_status_notify *conn_stat)
{
+ struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
struct drm_dp_mst_port *port;
- int old_pdt;
- int old_ddps;
- bool dowork = false;
+ int old_ddps, ret;
+ u8 new_pdt;
+ bool dowork = false, create_connector = false;
+
port = drm_dp_get_port(mstb, conn_stat->port_number);
if (!port)
return;
+ if (port->connector) {
+ if (!port->input && conn_stat->input_port) {
+ /*
+ * We can't remove a connector from an already exposed
+ * port, so just throw the port out and make sure we
+ * reprobe the link address of it's parent MSTB
+ */
+ drm_dp_mst_topology_unlink_port(mgr, port);
+ mstb->link_address_sent = false;
+ dowork = true;
+ goto out;
+ }
+
+ /* Locking is only needed if the port's exposed to userspace */
+ drm_modeset_lock(&mgr->base.lock, NULL);
+ } else if (port->input && !conn_stat->input_port) {
+ create_connector = true;
+ /* Reprobe link address so we get num_sdp_streams */
+ mstb->link_address_sent = false;
+ dowork = true;
+ }
+
old_ddps = port->ddps;
- old_pdt = port->pdt;
- port->pdt = conn_stat->peer_device_type;
+ port->input = conn_stat->input_port;
port->mcs = conn_stat->message_capability_status;
port->ldps = conn_stat->legacy_device_plug_status;
port->ddps = conn_stat->displayport_device_plug_status;
@@ -1747,17 +2361,27 @@ static void drm_dp_update_port(struct drm_dp_mst_branch *mstb,
port->available_pbn = 0;
}
}
- if (old_pdt != port->pdt && !port->input) {
- drm_dp_port_teardown_pdt(port, old_pdt);
- if (drm_dp_port_setup_pdt(port))
- dowork = true;
+ new_pdt = port->input ? DP_PEER_DEVICE_NONE : conn_stat->peer_device_type;
+
+ ret = drm_dp_port_set_pdt(port, new_pdt);
+ if (ret == 1) {
+ dowork = true;
+ } else if (ret < 0) {
+ DRM_ERROR("Failed to change PDT for port %p: %d\n",
+ port, ret);
+ dowork = false;
}
+ if (port->connector)
+ drm_modeset_unlock(&mgr->base.lock);
+ else if (create_connector)
+ drm_dp_mst_port_add_connector(mstb, port);
+
+out:
drm_dp_mst_topology_put_port(port);
if (dowork)
queue_work(system_long_wq, &mstb->mgr->work);
-
}
static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_topology_mgr *mgr,
@@ -1800,7 +2424,7 @@ out:
static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper(
struct drm_dp_mst_branch *mstb,
- uint8_t *guid)
+ const uint8_t *guid)
{
struct drm_dp_mst_branch *found_mstb;
struct drm_dp_mst_port *port;
@@ -1824,7 +2448,7 @@ static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper(
static struct drm_dp_mst_branch *
drm_dp_get_mst_branch_device_by_guid(struct drm_dp_mst_topology_mgr *mgr,
- uint8_t *guid)
+ const uint8_t *guid)
{
struct drm_dp_mst_branch *mstb;
int ret;
@@ -1843,41 +2467,62 @@ drm_dp_get_mst_branch_device_by_guid(struct drm_dp_mst_topology_mgr *mgr,
return mstb;
}
-static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
+static int drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_branch *mstb)
{
struct drm_dp_mst_port *port;
- struct drm_dp_mst_branch *mstb_child;
- if (!mstb->link_address_sent)
- drm_dp_send_link_address(mgr, mstb);
+ int ret;
+ bool changed = false;
+
+ if (!mstb->link_address_sent) {
+ ret = drm_dp_send_link_address(mgr, mstb);
+ if (ret == 1)
+ changed = true;
+ else if (ret < 0)
+ return ret;
+ }
list_for_each_entry(port, &mstb->ports, next) {
- if (port->input)
- continue;
+ struct drm_dp_mst_branch *mstb_child = NULL;
- if (!port->ddps)
+ if (port->input || !port->ddps)
continue;
- if (!port->available_pbn)
+ if (!port->available_pbn) {
+ drm_modeset_lock(&mgr->base.lock, NULL);
drm_dp_send_enum_path_resources(mgr, mstb, port);
+ drm_modeset_unlock(&mgr->base.lock);
+ changed = true;
+ }
- if (port->mstb) {
+ if (port->mstb)
mstb_child = drm_dp_mst_topology_get_mstb_validated(
mgr, port->mstb);
- if (mstb_child) {
- drm_dp_check_and_send_link_address(mgr, mstb_child);
- drm_dp_mst_topology_put_mstb(mstb_child);
- }
+
+ if (mstb_child) {
+ ret = drm_dp_check_and_send_link_address(mgr,
+ mstb_child);
+ drm_dp_mst_topology_put_mstb(mstb_child);
+ if (ret == 1)
+ changed = true;
+ else if (ret < 0)
+ return ret;
}
}
+
+ return changed;
}
static void drm_dp_mst_link_probe_work(struct work_struct *work)
{
- struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, work);
+ struct drm_dp_mst_topology_mgr *mgr =
+ container_of(work, struct drm_dp_mst_topology_mgr, work);
+ struct drm_device *dev = mgr->dev;
struct drm_dp_mst_branch *mstb;
int ret;
+ mutex_lock(&mgr->probe_lock);
+
mutex_lock(&mgr->lock);
mstb = mgr->mst_primary;
if (mstb) {
@@ -1886,10 +2531,17 @@ static void drm_dp_mst_link_probe_work(struct work_struct *work)
mstb = NULL;
}
mutex_unlock(&mgr->lock);
- if (mstb) {
- drm_dp_check_and_send_link_address(mgr, mstb);
- drm_dp_mst_topology_put_mstb(mstb);
+ if (!mstb) {
+ mutex_unlock(&mgr->probe_lock);
+ return;
}
+
+ ret = drm_dp_check_and_send_link_address(mgr, mstb);
+ drm_dp_mst_topology_put_mstb(mstb);
+
+ mutex_unlock(&mgr->probe_lock);
+ if (ret)
+ drm_kms_helper_hotplug_event(dev);
}
static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
@@ -2035,8 +2687,11 @@ static int process_single_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
idx += tosend + 1;
ret = drm_dp_send_sideband_msg(mgr, up, chunk, idx);
- if (ret) {
- DRM_DEBUG_KMS("sideband msg failed to send\n");
+ if (unlikely(ret) && drm_debug_enabled(DRM_UT_DP)) {
+ struct drm_printer p = drm_debug_printer(DBG_PREFIX);
+
+ drm_printf(&p, "sideband msg failed to send\n");
+ drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
return ret;
}
@@ -2098,21 +2753,52 @@ static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
{
mutex_lock(&mgr->qlock);
list_add_tail(&txmsg->next, &mgr->tx_msg_downq);
+
+ if (drm_debug_enabled(DRM_UT_DP)) {
+ struct drm_printer p = drm_debug_printer(DBG_PREFIX);
+
+ drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
+ }
+
if (list_is_singular(&mgr->tx_msg_downq))
process_single_down_tx_qlock(mgr);
mutex_unlock(&mgr->qlock);
}
-static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
+static void
+drm_dp_dump_link_address(struct drm_dp_link_address_ack_reply *reply)
+{
+ struct drm_dp_link_addr_reply_port *port_reply;
+ int i;
+
+ for (i = 0; i < reply->nports; i++) {
+ port_reply = &reply->ports[i];
+ DRM_DEBUG_KMS("port %d: input %d, pdt: %d, pn: %d, dpcd_rev: %02x, mcs: %d, ddps: %d, ldps %d, sdp %d/%d\n",
+ i,
+ port_reply->input_port,
+ port_reply->peer_device_type,
+ port_reply->port_number,
+ port_reply->dpcd_revision,
+ port_reply->mcs,
+ port_reply->ddps,
+ port_reply->legacy_device_plug_status,
+ port_reply->num_sdp_streams,
+ port_reply->num_sdp_stream_sinks);
+ }
+}
+
+static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_branch *mstb)
{
- int len;
struct drm_dp_sideband_msg_tx *txmsg;
- int ret;
+ struct drm_dp_link_address_ack_reply *reply;
+ struct drm_dp_mst_port *port, *tmp;
+ int i, len, ret, port_mask = 0;
+ bool changed = false;
txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
if (!txmsg)
- return;
+ return -ENOMEM;
txmsg->dst = mstb;
len = build_link_address(txmsg);
@@ -2120,48 +2806,67 @@ static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
mstb->link_address_sent = true;
drm_dp_queue_down_tx(mgr, txmsg);
+ /* FIXME: Actually do some real error handling here */
ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
- if (ret > 0) {
- int i;
+ if (ret <= 0) {
+ DRM_ERROR("Sending link address failed with %d\n", ret);
+ goto out;
+ }
+ if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
+ DRM_ERROR("link address NAK received\n");
+ ret = -EIO;
+ goto out;
+ }
- if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
- DRM_DEBUG_KMS("link address nak received\n");
- } else {
- DRM_DEBUG_KMS("link address reply: %d\n", txmsg->reply.u.link_addr.nports);
- for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) {
- DRM_DEBUG_KMS("port %d: input %d, pdt: %d, pn: %d, dpcd_rev: %02x, mcs: %d, ddps: %d, ldps %d, sdp %d/%d\n", i,
- txmsg->reply.u.link_addr.ports[i].input_port,
- txmsg->reply.u.link_addr.ports[i].peer_device_type,
- txmsg->reply.u.link_addr.ports[i].port_number,
- txmsg->reply.u.link_addr.ports[i].dpcd_revision,
- txmsg->reply.u.link_addr.ports[i].mcs,
- txmsg->reply.u.link_addr.ports[i].ddps,
- txmsg->reply.u.link_addr.ports[i].legacy_device_plug_status,
- txmsg->reply.u.link_addr.ports[i].num_sdp_streams,
- txmsg->reply.u.link_addr.ports[i].num_sdp_stream_sinks);
- }
+ reply = &txmsg->reply.u.link_addr;
+ DRM_DEBUG_KMS("link address reply: %d\n", reply->nports);
+ drm_dp_dump_link_address(reply);
- drm_dp_check_mstb_guid(mstb, txmsg->reply.u.link_addr.guid);
+ drm_dp_check_mstb_guid(mstb, reply->guid);
- for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) {
- drm_dp_add_port(mstb, mgr->dev, &txmsg->reply.u.link_addr.ports[i]);
- }
- drm_kms_helper_hotplug_event(mgr->dev);
- }
- } else {
- mstb->link_address_sent = false;
- DRM_DEBUG_KMS("link address failed %d\n", ret);
+ for (i = 0; i < reply->nports; i++) {
+ port_mask |= BIT(reply->ports[i].port_number);
+ ret = drm_dp_mst_handle_link_address_port(mstb, mgr->dev,
+ &reply->ports[i]);
+ if (ret == 1)
+ changed = true;
+ else if (ret < 0)
+ goto out;
}
+ /* Prune any ports that are currently a part of mstb in our in-memory
+ * topology, but were not seen in this link address. Usually this
+ * means that they were removed while the topology was out of sync,
+ * e.g. during suspend/resume
+ */
+ mutex_lock(&mgr->lock);
+ list_for_each_entry_safe(port, tmp, &mstb->ports, next) {
+ if (port_mask & BIT(port->port_num))
+ continue;
+
+ DRM_DEBUG_KMS("port %d was not in link address, removing\n",
+ port->port_num);
+ list_del(&port->next);
+ drm_dp_mst_topology_put_port(port);
+ changed = true;
+ }
+ mutex_unlock(&mgr->lock);
+
+out:
+ if (ret <= 0)
+ mstb->link_address_sent = false;
kfree(txmsg);
+ return ret < 0 ? ret : changed;
}
-static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
- struct drm_dp_mst_branch *mstb,
- struct drm_dp_mst_port *port)
+static int
+drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_branch *mstb,
+ struct drm_dp_mst_port *port)
{
- int len;
+ struct drm_dp_enum_path_resources_ack_reply *path_res;
struct drm_dp_sideband_msg_tx *txmsg;
+ int len;
int ret;
txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
@@ -2175,14 +2880,20 @@ static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
if (ret > 0) {
+ path_res = &txmsg->reply.u.path_resources;
+
if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
DRM_DEBUG_KMS("enum path resources nak received\n");
} else {
- if (port->port_num != txmsg->reply.u.path_resources.port_number)
+ if (port->port_num != path_res->port_number)
DRM_ERROR("got incorrect port in response\n");
- DRM_DEBUG_KMS("enum path resources %d: %d %d\n", txmsg->reply.u.path_resources.port_number, txmsg->reply.u.path_resources.full_payload_bw_number,
- txmsg->reply.u.path_resources.avail_payload_bw_number);
- port->available_pbn = txmsg->reply.u.path_resources.avail_payload_bw_number;
+
+ DRM_DEBUG_KMS("enum path resources %d: %d %d\n",
+ path_res->port_number,
+ path_res->full_payload_bw_number,
+ path_res->avail_payload_bw_number);
+ port->available_pbn =
+ path_res->avail_payload_bw_number;
}
}
@@ -2655,30 +3366,13 @@ static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
return 0;
}
-static bool drm_dp_get_vc_payload_bw(int dp_link_bw,
- int dp_link_count,
- int *out)
+static int drm_dp_get_vc_payload_bw(u8 dp_link_bw, u8 dp_link_count)
{
- switch (dp_link_bw) {
- default:
+ if (dp_link_bw == 0 || dp_link_count == 0)
DRM_DEBUG_KMS("invalid link bandwidth in DPCD: %x (link count: %d)\n",
dp_link_bw, dp_link_count);
- return false;
- case DP_LINK_BW_1_62:
- *out = 3 * dp_link_count;
- break;
- case DP_LINK_BW_2_7:
- *out = 5 * dp_link_count;
- break;
- case DP_LINK_BW_5_4:
- *out = 10 * dp_link_count;
- break;
- case DP_LINK_BW_8_1:
- *out = 15 * dp_link_count;
- break;
- }
- return true;
+ return dp_link_bw * dp_link_count / 2;
}
/**
@@ -2710,9 +3404,9 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
goto out_unlock;
}
- if (!drm_dp_get_vc_payload_bw(mgr->dpcd[1],
- mgr->dpcd[2] & DP_MAX_LANE_COUNT_MASK,
- &mgr->pbn_div)) {
+ mgr->pbn_div = drm_dp_get_vc_payload_bw(mgr->dpcd[1],
+ mgr->dpcd[2] & DP_MAX_LANE_COUNT_MASK);
+ if (mgr->pbn_div == 0) {
ret = -EINVAL;
goto out_unlock;
}
@@ -2767,6 +3461,23 @@ out_unlock:
}
EXPORT_SYMBOL(drm_dp_mst_topology_mgr_set_mst);
+static void
+drm_dp_mst_topology_mgr_invalidate_mstb(struct drm_dp_mst_branch *mstb)
+{
+ struct drm_dp_mst_port *port;
+
+ /* The link address will need to be re-sent on resume */
+ mstb->link_address_sent = false;
+
+ list_for_each_entry(port, &mstb->ports, next) {
+ /* The PBN for each port will also need to be re-probed */
+ port->available_pbn = 0;
+
+ if (port->mstb)
+ drm_dp_mst_topology_mgr_invalidate_mstb(port->mstb);
+ }
+}
+
/**
* drm_dp_mst_topology_mgr_suspend() - suspend the MST manager
* @mgr: manager to suspend
@@ -2780,62 +3491,89 @@ void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr)
drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
DP_MST_EN | DP_UPSTREAM_IS_SRC);
mutex_unlock(&mgr->lock);
+ flush_work(&mgr->up_req_work);
flush_work(&mgr->work);
- flush_work(&mgr->destroy_connector_work);
+ flush_work(&mgr->delayed_destroy_work);
+
+ mutex_lock(&mgr->lock);
+ if (mgr->mst_state && mgr->mst_primary)
+ drm_dp_mst_topology_mgr_invalidate_mstb(mgr->mst_primary);
+ mutex_unlock(&mgr->lock);
}
EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend);
/**
* drm_dp_mst_topology_mgr_resume() - resume the MST manager
* @mgr: manager to resume
+ * @sync: whether or not to perform topology reprobing synchronously
*
* This will fetch DPCD and see if the device is still there,
* if it is, it will rewrite the MSTM control bits, and return.
*
- * if the device fails this returns -1, and the driver should do
+ * If the device fails this returns -1, and the driver should do
* a full MST reprobe, in case we were undocked.
+ *
+ * During system resume (where it is assumed that the driver will be calling
+ * drm_atomic_helper_resume()) this function should be called beforehand with
+ * @sync set to true. In contexts like runtime resume where the driver is not
+ * expected to be calling drm_atomic_helper_resume(), this function should be
+ * called with @sync set to false in order to avoid deadlocking.
+ *
+ * Returns: -1 if the MST topology was removed while we were suspended, 0
+ * otherwise.
*/
-int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr)
+int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr,
+ bool sync)
{
- int ret = 0;
+ int ret;
+ u8 guid[16];
mutex_lock(&mgr->lock);
+ if (!mgr->mst_primary)
+ goto out_fail;
- if (mgr->mst_primary) {
- int sret;
- u8 guid[16];
+ ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd,
+ DP_RECEIVER_CAP_SIZE);
+ if (ret != DP_RECEIVER_CAP_SIZE) {
+ DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
+ goto out_fail;
+ }
- sret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
- if (sret != DP_RECEIVER_CAP_SIZE) {
- DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
- ret = -1;
- goto out_unlock;
- }
+ ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
+ DP_MST_EN |
+ DP_UP_REQ_EN |
+ DP_UPSTREAM_IS_SRC);
+ if (ret < 0) {
+ DRM_DEBUG_KMS("mst write failed - undocked during suspend?\n");
+ goto out_fail;
+ }
- ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
- DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
- if (ret < 0) {
- DRM_DEBUG_KMS("mst write failed - undocked during suspend?\n");
- ret = -1;
- goto out_unlock;
- }
+ /* Some hubs forget their guids after they resume */
+ ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16);
+ if (ret != 16) {
+ DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
+ goto out_fail;
+ }
+ drm_dp_check_mstb_guid(mgr->mst_primary, guid);
- /* Some hubs forget their guids after they resume */
- sret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16);
- if (sret != 16) {
- DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
- ret = -1;
- goto out_unlock;
- }
- drm_dp_check_mstb_guid(mgr->mst_primary, guid);
+ /*
+ * For the final step of resuming the topology, we need to bring the
+ * state of our in-memory topology back into sync with reality. So,
+ * restart the probing process as if we're probing a new hub
+ */
+ queue_work(system_long_wq, &mgr->work);
+ mutex_unlock(&mgr->lock);
- ret = 0;
- } else
- ret = -1;
+ if (sync) {
+ DRM_DEBUG_KMS("Waiting for link probe work to finish re-syncing topology...\n");
+ flush_work(&mgr->work);
+ }
-out_unlock:
+ return 0;
+
+out_fail:
mutex_unlock(&mgr->lock);
- return ret;
+ return -1;
}
EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume);
@@ -2890,136 +3628,198 @@ static bool drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
{
- int ret = 0;
+ struct drm_dp_sideband_msg_tx *txmsg;
+ struct drm_dp_mst_branch *mstb;
+ struct drm_dp_sideband_msg_hdr *hdr = &mgr->down_rep_recv.initial_hdr;
+ int slot = -1;
+
+ if (!drm_dp_get_one_sb_msg(mgr, false))
+ goto clear_down_rep_recv;
- if (!drm_dp_get_one_sb_msg(mgr, false)) {
- memset(&mgr->down_rep_recv, 0,
- sizeof(struct drm_dp_sideband_msg_rx));
+ if (!mgr->down_rep_recv.have_eomt)
return 0;
+
+ mstb = drm_dp_get_mst_branch_device(mgr, hdr->lct, hdr->rad);
+ if (!mstb) {
+ DRM_DEBUG_KMS("Got MST reply from unknown device %d\n",
+ hdr->lct);
+ goto clear_down_rep_recv;
+ }
+
+ /* find the message */
+ slot = hdr->seqno;
+ mutex_lock(&mgr->qlock);
+ txmsg = mstb->tx_slots[slot];
+ /* remove from slots */
+ mutex_unlock(&mgr->qlock);
+
+ if (!txmsg) {
+ DRM_DEBUG_KMS("Got MST reply with no msg %p %d %d %02x %02x\n",
+ mstb, hdr->seqno, hdr->lct, hdr->rad[0],
+ mgr->down_rep_recv.msg[0]);
+ goto no_msg;
}
- if (mgr->down_rep_recv.have_eomt) {
- struct drm_dp_sideband_msg_tx *txmsg;
- struct drm_dp_mst_branch *mstb;
- int slot = -1;
- mstb = drm_dp_get_mst_branch_device(mgr,
- mgr->down_rep_recv.initial_hdr.lct,
- mgr->down_rep_recv.initial_hdr.rad);
+ drm_dp_sideband_parse_reply(&mgr->down_rep_recv, &txmsg->reply);
- if (!mstb) {
- DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->down_rep_recv.initial_hdr.lct);
- memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
- return 0;
- }
+ if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
+ DRM_DEBUG_KMS("Got NAK reply: req 0x%02x (%s), reason 0x%02x (%s), nak data 0x%02x\n",
+ txmsg->reply.req_type,
+ drm_dp_mst_req_type_str(txmsg->reply.req_type),
+ txmsg->reply.u.nak.reason,
+ drm_dp_mst_nak_reason_str(txmsg->reply.u.nak.reason),
+ txmsg->reply.u.nak.nak_data);
- /* find the message */
- slot = mgr->down_rep_recv.initial_hdr.seqno;
- mutex_lock(&mgr->qlock);
- txmsg = mstb->tx_slots[slot];
- /* remove from slots */
- mutex_unlock(&mgr->qlock);
-
- if (!txmsg) {
- DRM_DEBUG_KMS("Got MST reply with no msg %p %d %d %02x %02x\n",
- mstb,
- mgr->down_rep_recv.initial_hdr.seqno,
- mgr->down_rep_recv.initial_hdr.lct,
- mgr->down_rep_recv.initial_hdr.rad[0],
- mgr->down_rep_recv.msg[0]);
- drm_dp_mst_topology_put_mstb(mstb);
- memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
- return 0;
- }
+ memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
+ drm_dp_mst_topology_put_mstb(mstb);
- drm_dp_sideband_parse_reply(&mgr->down_rep_recv, &txmsg->reply);
+ mutex_lock(&mgr->qlock);
+ txmsg->state = DRM_DP_SIDEBAND_TX_RX;
+ mstb->tx_slots[slot] = NULL;
+ mutex_unlock(&mgr->qlock);
- if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
- DRM_DEBUG_KMS("Got NAK reply: req 0x%02x (%s), reason 0x%02x (%s), nak data 0x%02x\n",
- txmsg->reply.req_type,
- drm_dp_mst_req_type_str(txmsg->reply.req_type),
- txmsg->reply.u.nak.reason,
- drm_dp_mst_nak_reason_str(txmsg->reply.u.nak.reason),
- txmsg->reply.u.nak.nak_data);
-
- memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
- drm_dp_mst_topology_put_mstb(mstb);
+ wake_up_all(&mgr->tx_waitq);
+
+ return 0;
- mutex_lock(&mgr->qlock);
- txmsg->state = DRM_DP_SIDEBAND_TX_RX;
- mstb->tx_slots[slot] = NULL;
- mutex_unlock(&mgr->qlock);
+no_msg:
+ drm_dp_mst_topology_put_mstb(mstb);
+clear_down_rep_recv:
+ memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
- wake_up_all(&mgr->tx_waitq);
- }
- return ret;
+ return 0;
}
-static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
+static inline bool
+drm_dp_mst_process_up_req(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_pending_up_req *up_req)
{
- int ret = 0;
+ struct drm_dp_mst_branch *mstb = NULL;
+ struct drm_dp_sideband_msg_req_body *msg = &up_req->msg;
+ struct drm_dp_sideband_msg_hdr *hdr = &up_req->hdr;
+ bool hotplug = false;
- if (!drm_dp_get_one_sb_msg(mgr, true)) {
- memset(&mgr->up_req_recv, 0,
- sizeof(struct drm_dp_sideband_msg_rx));
- return 0;
+ if (hdr->broadcast) {
+ const u8 *guid = NULL;
+
+ if (msg->req_type == DP_CONNECTION_STATUS_NOTIFY)
+ guid = msg->u.conn_stat.guid;
+ else if (msg->req_type == DP_RESOURCE_STATUS_NOTIFY)
+ guid = msg->u.resource_stat.guid;
+
+ mstb = drm_dp_get_mst_branch_device_by_guid(mgr, guid);
+ } else {
+ mstb = drm_dp_get_mst_branch_device(mgr, hdr->lct, hdr->rad);
}
- if (mgr->up_req_recv.have_eomt) {
- struct drm_dp_sideband_msg_req_body msg;
- struct drm_dp_mst_branch *mstb = NULL;
- bool seqno;
+ if (!mstb) {
+ DRM_DEBUG_KMS("Got MST reply from unknown device %d\n",
+ hdr->lct);
+ return false;
+ }
- if (!mgr->up_req_recv.initial_hdr.broadcast) {
- mstb = drm_dp_get_mst_branch_device(mgr,
- mgr->up_req_recv.initial_hdr.lct,
- mgr->up_req_recv.initial_hdr.rad);
- if (!mstb) {
- DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
- memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
- return 0;
- }
- }
+ /* TODO: Add missing handler for DP_RESOURCE_STATUS_NOTIFY events */
+ if (msg->req_type == DP_CONNECTION_STATUS_NOTIFY) {
+ drm_dp_mst_handle_conn_stat(mstb, &msg->u.conn_stat);
+ hotplug = true;
+ }
- seqno = mgr->up_req_recv.initial_hdr.seqno;
- drm_dp_sideband_parse_req(&mgr->up_req_recv, &msg);
+ drm_dp_mst_topology_put_mstb(mstb);
+ return hotplug;
+}
- if (msg.req_type == DP_CONNECTION_STATUS_NOTIFY) {
- drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false);
+static void drm_dp_mst_up_req_work(struct work_struct *work)
+{
+ struct drm_dp_mst_topology_mgr *mgr =
+ container_of(work, struct drm_dp_mst_topology_mgr,
+ up_req_work);
+ struct drm_dp_pending_up_req *up_req;
+ bool send_hotplug = false;
- if (!mstb)
- mstb = drm_dp_get_mst_branch_device_by_guid(mgr, msg.u.conn_stat.guid);
+ mutex_lock(&mgr->probe_lock);
+ while (true) {
+ mutex_lock(&mgr->up_req_lock);
+ up_req = list_first_entry_or_null(&mgr->up_req_list,
+ struct drm_dp_pending_up_req,
+ next);
+ if (up_req)
+ list_del(&up_req->next);
+ mutex_unlock(&mgr->up_req_lock);
+
+ if (!up_req)
+ break;
- if (!mstb) {
- DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
- memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
- return 0;
- }
+ send_hotplug |= drm_dp_mst_process_up_req(mgr, up_req);
+ kfree(up_req);
+ }
+ mutex_unlock(&mgr->probe_lock);
- drm_dp_update_port(mstb, &msg.u.conn_stat);
+ if (send_hotplug)
+ drm_kms_helper_hotplug_event(mgr->dev);
+}
- DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", msg.u.conn_stat.port_number, msg.u.conn_stat.legacy_device_plug_status, msg.u.conn_stat.displayport_device_plug_status, msg.u.conn_stat.message_capability_status, msg.u.conn_stat.input_port, msg.u.conn_stat.peer_device_type);
- drm_kms_helper_hotplug_event(mgr->dev);
+static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
+{
+ struct drm_dp_sideband_msg_hdr *hdr = &mgr->up_req_recv.initial_hdr;
+ struct drm_dp_pending_up_req *up_req;
+ bool seqno;
- } else if (msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
- drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false);
- if (!mstb)
- mstb = drm_dp_get_mst_branch_device_by_guid(mgr, msg.u.resource_stat.guid);
+ if (!drm_dp_get_one_sb_msg(mgr, true))
+ goto out;
- if (!mstb) {
- DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
- memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
- return 0;
- }
+ if (!mgr->up_req_recv.have_eomt)
+ return 0;
- DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n", msg.u.resource_stat.port_number, msg.u.resource_stat.available_pbn);
- }
+ up_req = kzalloc(sizeof(*up_req), GFP_KERNEL);
+ if (!up_req) {
+ DRM_ERROR("Not enough memory to process MST up req\n");
+ return -ENOMEM;
+ }
+ INIT_LIST_HEAD(&up_req->next);
- if (mstb)
- drm_dp_mst_topology_put_mstb(mstb);
+ seqno = hdr->seqno;
+ drm_dp_sideband_parse_req(&mgr->up_req_recv, &up_req->msg);
- memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
+ if (up_req->msg.req_type != DP_CONNECTION_STATUS_NOTIFY &&
+ up_req->msg.req_type != DP_RESOURCE_STATUS_NOTIFY) {
+ DRM_DEBUG_KMS("Received unknown up req type, ignoring: %x\n",
+ up_req->msg.req_type);
+ kfree(up_req);
+ goto out;
}
- return ret;
+
+ drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, up_req->msg.req_type,
+ seqno, false);
+
+ if (up_req->msg.req_type == DP_CONNECTION_STATUS_NOTIFY) {
+ const struct drm_dp_connection_status_notify *conn_stat =
+ &up_req->msg.u.conn_stat;
+
+ DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n",
+ conn_stat->port_number,
+ conn_stat->legacy_device_plug_status,
+ conn_stat->displayport_device_plug_status,
+ conn_stat->message_capability_status,
+ conn_stat->input_port,
+ conn_stat->peer_device_type);
+ } else if (up_req->msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
+ const struct drm_dp_resource_status_notify *res_stat =
+ &up_req->msg.u.resource_stat;
+
+ DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n",
+ res_stat->port_number,
+ res_stat->available_pbn);
+ }
+
+ up_req->hdr = *hdr;
+ mutex_lock(&mgr->up_req_lock);
+ list_add_tail(&up_req->next, &mgr->up_req_list);
+ mutex_unlock(&mgr->up_req_lock);
+ queue_work(system_long_wq, &mgr->up_req_work);
+
+out:
+ memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
+ return 0;
}
/**
@@ -3063,22 +3863,31 @@ EXPORT_SYMBOL(drm_dp_mst_hpd_irq);
/**
* drm_dp_mst_detect_port() - get connection status for an MST port
* @connector: DRM connector for this port
+ * @ctx: The acquisition context to use for grabbing locks
* @mgr: manager for this port
- * @port: unverified pointer to a port
+ * @port: pointer to a port
*
- * This returns the current connection state for a port. It validates the
- * port pointer still exists so the caller doesn't require a reference
+ * This returns the current connection state for a port.
*/
-enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector,
- struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
+int
+drm_dp_mst_detect_port(struct drm_connector *connector,
+ struct drm_modeset_acquire_ctx *ctx,
+ struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_port *port)
{
- enum drm_connector_status status = connector_status_disconnected;
+ int ret;
/* we need to search for the port in the mgr in case it's gone */
port = drm_dp_mst_topology_get_port_validated(mgr, port);
if (!port)
return connector_status_disconnected;
+ ret = drm_modeset_lock(&mgr->base.lock, ctx);
+ if (ret)
+ goto out;
+
+ ret = connector_status_disconnected;
+
if (!port->ddps)
goto out;
@@ -3088,7 +3897,7 @@ enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector
break;
case DP_PEER_DEVICE_SST_SINK:
- status = connector_status_connected;
+ ret = connector_status_connected;
/* for logical ports - cache the EDID */
if (port->port_num >= 8 && !port->cached_edid) {
port->cached_edid = drm_get_edid(connector, &port->aux.ddc);
@@ -3096,12 +3905,12 @@ enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector
break;
case DP_PEER_DEVICE_DP_LEGACY_CONV:
if (port->ldps)
- status = connector_status_connected;
+ ret = connector_status_connected;
break;
}
out:
drm_dp_mst_topology_put_port(port);
- return status;
+ return ret;
}
EXPORT_SYMBOL(drm_dp_mst_detect_port);
@@ -3237,7 +4046,7 @@ int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state,
{
struct drm_dp_mst_topology_state *topology_state;
struct drm_dp_vcpi_allocation *pos, *vcpi = NULL;
- int prev_slots, req_slots, ret;
+ int prev_slots, req_slots;
topology_state = drm_atomic_get_mst_topology_state(state, mgr);
if (IS_ERR(topology_state))
@@ -3284,8 +4093,7 @@ int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state,
}
vcpi->vcpi = req_slots;
- ret = req_slots;
- return ret;
+ return req_slots;
}
EXPORT_SYMBOL(drm_dp_atomic_find_vcpi_slots);
@@ -3539,13 +4347,6 @@ EXPORT_SYMBOL(drm_dp_check_act_status);
*/
int drm_dp_calc_pbn_mode(int clock, int bpp)
{
- u64 kbps;
- s64 peak_kbps;
- u32 numerator;
- u32 denominator;
-
- kbps = clock * bpp;
-
/*
* margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006
* The unit of 54/64Mbytes/sec is an arbitrary unit chosen based on
@@ -3556,41 +4357,11 @@ int drm_dp_calc_pbn_mode(int clock, int bpp)
* peak_kbps *= (64/54)
* peak_kbps *= 8 convert to bytes
*/
-
- numerator = 64 * 1006;
- denominator = 54 * 8 * 1000 * 1000;
-
- kbps *= numerator;
- peak_kbps = drm_fixp_from_fraction(kbps, denominator);
-
- return drm_fixp2int_ceil(peak_kbps);
+ return DIV_ROUND_UP_ULL(mul_u32_u32(clock * bpp, 64 * 1006),
+ 8 * 54 * 1000 * 1000);
}
EXPORT_SYMBOL(drm_dp_calc_pbn_mode);
-static int test_calc_pbn_mode(void)
-{
- int ret;
- ret = drm_dp_calc_pbn_mode(154000, 30);
- if (ret != 689) {
- DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
- 154000, 30, 689, ret);
- return -EINVAL;
- }
- ret = drm_dp_calc_pbn_mode(234000, 30);
- if (ret != 1047) {
- DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
- 234000, 30, 1047, ret);
- return -EINVAL;
- }
- ret = drm_dp_calc_pbn_mode(297000, 24);
- if (ret != 1063) {
- DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
- 297000, 24, 1063, ret);
- return -EINVAL;
- }
- return 0;
-}
-
/* we want to kick the TX after we've ack the up/down IRQs. */
static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr)
{
@@ -3729,36 +4500,103 @@ static void drm_dp_tx_work(struct work_struct *work)
mutex_unlock(&mgr->qlock);
}
-static void drm_dp_destroy_connector_work(struct work_struct *work)
+static inline void
+drm_dp_delayed_destroy_port(struct drm_dp_mst_port *port)
{
- struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work);
- struct drm_dp_mst_port *port;
- bool send_hotplug = false;
+ if (port->connector)
+ port->mgr->cbs->destroy_connector(port->mgr, port->connector);
+
+ drm_dp_port_set_pdt(port, DP_PEER_DEVICE_NONE);
+ drm_dp_mst_put_port_malloc(port);
+}
+
+static inline void
+drm_dp_delayed_destroy_mstb(struct drm_dp_mst_branch *mstb)
+{
+ struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
+ struct drm_dp_mst_port *port, *tmp;
+ bool wake_tx = false;
+
+ mutex_lock(&mgr->lock);
+ list_for_each_entry_safe(port, tmp, &mstb->ports, next) {
+ list_del(&port->next);
+ drm_dp_mst_topology_put_port(port);
+ }
+ mutex_unlock(&mgr->lock);
+
+ /* drop any tx slots msg */
+ mutex_lock(&mstb->mgr->qlock);
+ if (mstb->tx_slots[0]) {
+ mstb->tx_slots[0]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
+ mstb->tx_slots[0] = NULL;
+ wake_tx = true;
+ }
+ if (mstb->tx_slots[1]) {
+ mstb->tx_slots[1]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
+ mstb->tx_slots[1] = NULL;
+ wake_tx = true;
+ }
+ mutex_unlock(&mstb->mgr->qlock);
+
+ if (wake_tx)
+ wake_up_all(&mstb->mgr->tx_waitq);
+
+ drm_dp_mst_put_mstb_malloc(mstb);
+}
+
+static void drm_dp_delayed_destroy_work(struct work_struct *work)
+{
+ struct drm_dp_mst_topology_mgr *mgr =
+ container_of(work, struct drm_dp_mst_topology_mgr,
+ delayed_destroy_work);
+ bool send_hotplug = false, go_again;
+
/*
* Not a regular list traverse as we have to drop the destroy
- * connector lock before destroying the connector, to avoid AB->BA
+ * connector lock before destroying the mstb/port, to avoid AB->BA
* ordering between this lock and the config mutex.
*/
- for (;;) {
- mutex_lock(&mgr->destroy_connector_lock);
- port = list_first_entry_or_null(&mgr->destroy_connector_list, struct drm_dp_mst_port, next);
- if (!port) {
- mutex_unlock(&mgr->destroy_connector_lock);
- break;
+ do {
+ go_again = false;
+
+ for (;;) {
+ struct drm_dp_mst_branch *mstb;
+
+ mutex_lock(&mgr->delayed_destroy_lock);
+ mstb = list_first_entry_or_null(&mgr->destroy_branch_device_list,
+ struct drm_dp_mst_branch,
+ destroy_next);
+ if (mstb)
+ list_del(&mstb->destroy_next);
+ mutex_unlock(&mgr->delayed_destroy_lock);
+
+ if (!mstb)
+ break;
+
+ drm_dp_delayed_destroy_mstb(mstb);
+ go_again = true;
}
- list_del(&port->next);
- mutex_unlock(&mgr->destroy_connector_lock);
- INIT_LIST_HEAD(&port->next);
+ for (;;) {
+ struct drm_dp_mst_port *port;
- mgr->cbs->destroy_connector(mgr, port->connector);
+ mutex_lock(&mgr->delayed_destroy_lock);
+ port = list_first_entry_or_null(&mgr->destroy_port_list,
+ struct drm_dp_mst_port,
+ next);
+ if (port)
+ list_del(&port->next);
+ mutex_unlock(&mgr->delayed_destroy_lock);
- drm_dp_port_teardown_pdt(port, port->pdt);
- port->pdt = DP_PEER_DEVICE_NONE;
+ if (!port)
+ break;
+
+ drm_dp_delayed_destroy_port(port);
+ send_hotplug = true;
+ go_again = true;
+ }
+ } while (go_again);
- drm_dp_mst_put_port_malloc(port);
- send_hotplug = true;
- }
if (send_hotplug)
drm_kms_helper_hotplug_event(mgr->dev);
}
@@ -3920,9 +4758,6 @@ EXPORT_SYMBOL(drm_dp_mst_topology_state_funcs);
struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_atomic_state *state,
struct drm_dp_mst_topology_mgr *mgr)
{
- struct drm_device *dev = mgr->dev;
-
- WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
return to_dp_mst_topology_state(drm_atomic_get_private_obj_state(state, &mgr->base));
}
EXPORT_SYMBOL(drm_atomic_get_mst_topology_state);
@@ -3948,12 +4783,20 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
mutex_init(&mgr->lock);
mutex_init(&mgr->qlock);
mutex_init(&mgr->payload_lock);
- mutex_init(&mgr->destroy_connector_lock);
+ mutex_init(&mgr->delayed_destroy_lock);
+ mutex_init(&mgr->up_req_lock);
+ mutex_init(&mgr->probe_lock);
+#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
+ mutex_init(&mgr->topology_ref_history_lock);
+#endif
INIT_LIST_HEAD(&mgr->tx_msg_downq);
- INIT_LIST_HEAD(&mgr->destroy_connector_list);
+ INIT_LIST_HEAD(&mgr->destroy_port_list);
+ INIT_LIST_HEAD(&mgr->destroy_branch_device_list);
+ INIT_LIST_HEAD(&mgr->up_req_list);
INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work);
INIT_WORK(&mgr->tx_work, drm_dp_tx_work);
- INIT_WORK(&mgr->destroy_connector_work, drm_dp_destroy_connector_work);
+ INIT_WORK(&mgr->delayed_destroy_work, drm_dp_delayed_destroy_work);
+ INIT_WORK(&mgr->up_req_work, drm_dp_mst_up_req_work);
init_waitqueue_head(&mgr->tx_waitq);
mgr->dev = dev;
mgr->aux = aux;
@@ -3970,8 +4813,6 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
if (!mgr->proposed_vcpis)
return -ENOMEM;
set_bit(0, &mgr->payload_mask);
- if (test_calc_pbn_mode() < 0)
- DRM_ERROR("MST PBN self-test failed\n");
mst_state = kzalloc(sizeof(*mst_state), GFP_KERNEL);
if (mst_state == NULL)
@@ -3996,7 +4837,7 @@ void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr)
{
drm_dp_mst_topology_mgr_set_mst(mgr, false);
flush_work(&mgr->work);
- flush_work(&mgr->destroy_connector_work);
+ cancel_work_sync(&mgr->delayed_destroy_work);
mutex_lock(&mgr->payload_lock);
kfree(mgr->payloads);
mgr->payloads = NULL;
@@ -4007,6 +4848,16 @@ void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr)
mgr->aux = NULL;
drm_atomic_private_obj_fini(&mgr->base);
mgr->funcs = NULL;
+
+ mutex_destroy(&mgr->delayed_destroy_lock);
+ mutex_destroy(&mgr->payload_lock);
+ mutex_destroy(&mgr->qlock);
+ mutex_destroy(&mgr->lock);
+ mutex_destroy(&mgr->up_req_lock);
+ mutex_destroy(&mgr->probe_lock);
+#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
+ mutex_destroy(&mgr->topology_ref_history_lock);
+#endif
}
EXPORT_SYMBOL(drm_dp_mst_topology_mgr_destroy);
diff --git a/drivers/gpu/drm/drm_dp_mst_topology_internal.h b/drivers/gpu/drm/drm_dp_mst_topology_internal.h
new file mode 100644
index 000000000000..eeda9a61c657
--- /dev/null
+++ b/drivers/gpu/drm/drm_dp_mst_topology_internal.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Declarations for DP MST related functions which are only used in selftests
+ *
+ * Copyright © 2018 Red Hat
+ * Authors:
+ * Lyude Paul <lyude@redhat.com>
+ */
+
+#ifndef _DRM_DP_MST_HELPER_INTERNAL_H_
+#define _DRM_DP_MST_HELPER_INTERNAL_H_
+
+#include <drm/drm_dp_mst_helper.h>
+
+void
+drm_dp_encode_sideband_req(const struct drm_dp_sideband_msg_req_body *req,
+ struct drm_dp_sideband_msg_tx *raw);
+int drm_dp_decode_sideband_req(const struct drm_dp_sideband_msg_tx *raw,
+ struct drm_dp_sideband_msg_req_body *req);
+void
+drm_dp_dump_sideband_msg_req_body(const struct drm_dp_sideband_msg_req_body *req,
+ int indent, struct drm_printer *printer);
+
+#endif /* !_DRM_DP_MST_HELPER_INTERNAL_H_ */
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 769feefeeeef..1b9b40a1c7c9 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -46,26 +46,9 @@
#include "drm_internal.h"
#include "drm_legacy.h"
-/*
- * drm_debug: Enable debug output.
- * Bitmask of DRM_UT_x. See include/drm/drm_print.h for details.
- */
-unsigned int drm_debug = 0;
-EXPORT_SYMBOL(drm_debug);
-
MODULE_AUTHOR("Gareth Hughes, Leif Delgass, José Fonseca, Jon Smirl");
MODULE_DESCRIPTION("DRM shared core routines");
MODULE_LICENSE("GPL and additional rights");
-MODULE_PARM_DESC(debug, "Enable debug output, where each bit enables a debug category.\n"
-"\t\tBit 0 (0x01) will enable CORE messages (drm core code)\n"
-"\t\tBit 1 (0x02) will enable DRIVER messages (drm controller code)\n"
-"\t\tBit 2 (0x04) will enable KMS messages (modesetting code)\n"
-"\t\tBit 3 (0x08) will enable PRIME messages (prime code)\n"
-"\t\tBit 4 (0x10) will enable ATOMIC messages (atomic code)\n"
-"\t\tBit 5 (0x20) will enable VBL messages (vblank code)\n"
-"\t\tBit 7 (0x80) will enable LEASE messages (leasing code)\n"
-"\t\tBit 8 (0x100) will enable DP messages (displayport code)");
-module_param_named(debug, drm_debug, int, 0600);
static DEFINE_SPINLOCK(drm_minor_lock);
static struct idr drm_minors_idr;
diff --git a/drivers/gpu/drm/drm_dsc.c b/drivers/gpu/drm/drm_dsc.c
index 77f4e5ae4197..4a475d9696ff 100644
--- a/drivers/gpu/drm/drm_dsc.c
+++ b/drivers/gpu/drm/drm_dsc.c
@@ -216,13 +216,11 @@ void drm_dsc_pps_payload_pack(struct drm_dsc_picture_parameter_set *pps_payload,
*/
for (i = 0; i < DSC_NUM_BUF_RANGES; i++) {
pps_payload->rc_range_parameters[i] =
- ((dsc_cfg->rc_range_params[i].range_min_qp <<
- DSC_PPS_RC_RANGE_MINQP_SHIFT) |
- (dsc_cfg->rc_range_params[i].range_max_qp <<
- DSC_PPS_RC_RANGE_MAXQP_SHIFT) |
- (dsc_cfg->rc_range_params[i].range_bpg_offset));
- pps_payload->rc_range_parameters[i] =
- cpu_to_be16(pps_payload->rc_range_parameters[i]);
+ cpu_to_be16((dsc_cfg->rc_range_params[i].range_min_qp <<
+ DSC_PPS_RC_RANGE_MINQP_SHIFT) |
+ (dsc_cfg->rc_range_params[i].range_max_qp <<
+ DSC_PPS_RC_RANGE_MAXQP_SHIFT) |
+ (dsc_cfg->rc_range_params[i].range_bpg_offset));
}
/* PPS 88 */
@@ -336,12 +334,6 @@ int drm_dsc_compute_rc_parameters(struct drm_dsc_config *vdsc_cfg)
else
vdsc_cfg->nfl_bpg_offset = 0;
- /* 2^16 - 1 */
- if (vdsc_cfg->nfl_bpg_offset > 65535) {
- DRM_DEBUG_KMS("NflBpgOffset is too large for this slice height\n");
- return -ERANGE;
- }
-
/* Number of groups used to code the entire slice */
groups_total = groups_per_line * vdsc_cfg->slice_height;
@@ -371,11 +363,6 @@ int drm_dsc_compute_rc_parameters(struct drm_dsc_config *vdsc_cfg)
vdsc_cfg->scale_increment_interval = 0;
}
- if (vdsc_cfg->scale_increment_interval > 65535) {
- DRM_DEBUG_KMS("ScaleIncrementInterval is large for slice height\n");
- return -ERANGE;
- }
-
/*
* DSC spec mentions that bits_per_pixel specifies the target
* bits/pixel (bpp) rate that is used by the encoder,
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 6b0177112e18..474ac04d5600 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -1278,6 +1278,106 @@ static const struct drm_display_mode edid_cea_modes[] = {
4104, 4400, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 108 - 1280x720@48Hz 16:9 */
+ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 90000, 1280, 2240,
+ 2280, 2500, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 48, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ /* 109 - 1280x720@48Hz 64:27 */
+ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 90000, 1280, 2240,
+ 2280, 2500, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 48, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 110 - 1680x720@48Hz 64:27 */
+ { DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 99000, 1680, 2490,
+ 2530, 2750, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 48, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 111 - 1920x1080@48Hz 16:9 */
+ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2558,
+ 2602, 2750, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 48, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ /* 112 - 1920x1080@48Hz 64:27 */
+ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2558,
+ 2602, 2750, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 48, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 113 - 2560x1080@48Hz 64:27 */
+ { DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 198000, 2560, 3558,
+ 3602, 3750, 0, 1080, 1084, 1089, 1100, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 48, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 114 - 3840x2160@48Hz 16:9 */
+ { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 5116,
+ 5204, 5500, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 48, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ /* 115 - 4096x2160@48Hz 256:135 */
+ { DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 594000, 4096, 5116,
+ 5204, 5500, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 48, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, },
+ /* 116 - 3840x2160@48Hz 64:27 */
+ { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 5116,
+ 5204, 5500, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 48, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 117 - 3840x2160@100Hz 16:9 */
+ { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 1188000, 3840, 4896,
+ 4984, 5280, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ /* 118 - 3840x2160@120Hz 16:9 */
+ { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 1188000, 3840, 4016,
+ 4104, 4400, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ /* 119 - 3840x2160@100Hz 64:27 */
+ { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 1188000, 3840, 4896,
+ 4984, 5280, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 120 - 3840x2160@120Hz 64:27 */
+ { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 1188000, 3840, 4016,
+ 4104, 4400, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 121 - 5120x2160@24Hz 64:27 */
+ { DRM_MODE("5120x2160", DRM_MODE_TYPE_DRIVER, 396000, 5120, 7116,
+ 7204, 7500, 0, 2160, 2168, 2178, 2200, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 122 - 5120x2160@25Hz 64:27 */
+ { DRM_MODE("5120x2160", DRM_MODE_TYPE_DRIVER, 396000, 5120, 6816,
+ 6904, 7200, 0, 2160, 2168, 2178, 2200, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 123 - 5120x2160@30Hz 64:27 */
+ { DRM_MODE("5120x2160", DRM_MODE_TYPE_DRIVER, 396000, 5120, 5784,
+ 5872, 6000, 0, 2160, 2168, 2178, 2200, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 124 - 5120x2160@48Hz 64:27 */
+ { DRM_MODE("5120x2160", DRM_MODE_TYPE_DRIVER, 742500, 5120, 5866,
+ 5954, 6250, 0, 2160, 2168, 2178, 2475, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 48, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 125 - 5120x2160@50Hz 64:27 */
+ { DRM_MODE("5120x2160", DRM_MODE_TYPE_DRIVER, 742500, 5120, 6216,
+ 6304, 6600, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 126 - 5120x2160@60Hz 64:27 */
+ { DRM_MODE("5120x2160", DRM_MODE_TYPE_DRIVER, 742500, 5120, 5284,
+ 5372, 5500, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 127 - 5120x2160@100Hz 64:27 */
+ { DRM_MODE("5120x2160", DRM_MODE_TYPE_DRIVER, 1485000, 5120, 6216,
+ 6304, 6600, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
};
/*
@@ -1554,7 +1654,7 @@ static void connector_bad_edid(struct drm_connector *connector,
{
int i;
- if (connector->bad_edid_counter++ && !(drm_debug & DRM_UT_KMS))
+ if (connector->bad_edid_counter++ && !drm_debug_enabled(DRM_UT_KMS))
return;
dev_warn(connector->dev->dev,
@@ -2092,7 +2192,8 @@ static int standard_timing_level(struct edid *edid)
return LEVEL_CVT;
if (drm_gtf2_hbreak(edid))
return LEVEL_GTF2;
- return LEVEL_GTF;
+ if (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF)
+ return LEVEL_GTF;
}
return LEVEL_DMT;
}
@@ -3108,18 +3209,10 @@ static bool drm_valid_cea_vic(u8 vic)
return vic > 0 && vic < ARRAY_SIZE(edid_cea_modes);
}
-/**
- * drm_get_cea_aspect_ratio - get the picture aspect ratio corresponding to
- * the input VIC from the CEA mode list
- * @video_code: ID given to each of the CEA modes
- *
- * Returns picture aspect ratio
- */
-enum hdmi_picture_aspect drm_get_cea_aspect_ratio(const u8 video_code)
+static enum hdmi_picture_aspect drm_get_cea_aspect_ratio(const u8 video_code)
{
return edid_cea_modes[video_code].picture_aspect_ratio;
}
-EXPORT_SYMBOL(drm_get_cea_aspect_ratio);
/*
* Calculate the alternate clock for HDMI modes (those from the HDMI vendor
@@ -3722,7 +3815,7 @@ cea_db_offsets(const u8 *cea, int *start, int *end)
if (*end < 4 || *end > 127)
return -ERANGE;
} else {
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
return 0;
@@ -4191,7 +4284,7 @@ int drm_edid_to_sad(struct edid *edid, struct cea_sad **sads)
if (cea_revision(cea) < 3) {
DRM_DEBUG_KMS("SAD: wrong CEA revision\n");
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
if (cea_db_offsets(cea, &start, &end)) {
@@ -4252,7 +4345,7 @@ int drm_edid_to_speaker_allocation(struct edid *edid, u8 **sadb)
if (cea_revision(cea) < 3) {
DRM_DEBUG_KMS("SAD: wrong CEA revision\n");
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
if (cea_db_offsets(cea, &start, &end)) {
@@ -5071,6 +5164,49 @@ drm_hdmi_infoframe_set_hdr_metadata(struct hdmi_drm_infoframe *frame,
}
EXPORT_SYMBOL(drm_hdmi_infoframe_set_hdr_metadata);
+static u8 drm_mode_hdmi_vic(struct drm_connector *connector,
+ const struct drm_display_mode *mode)
+{
+ bool has_hdmi_infoframe = connector ?
+ connector->display_info.has_hdmi_infoframe : false;
+
+ if (!has_hdmi_infoframe)
+ return 0;
+
+ /* No HDMI VIC when signalling 3D video format */
+ if (mode->flags & DRM_MODE_FLAG_3D_MASK)
+ return 0;
+
+ return drm_match_hdmi_mode(mode);
+}
+
+static u8 drm_mode_cea_vic(struct drm_connector *connector,
+ const struct drm_display_mode *mode)
+{
+ u8 vic;
+
+ /*
+ * HDMI spec says if a mode is found in HDMI 1.4b 4K modes
+ * we should send its VIC in vendor infoframes, else send the
+ * VIC in AVI infoframes. Lets check if this mode is present in
+ * HDMI 1.4b 4K modes
+ */
+ if (drm_mode_hdmi_vic(connector, mode))
+ return 0;
+
+ vic = drm_match_cea_mode(mode);
+
+ /*
+ * HDMI 1.4 VIC range: 1 <= VIC <= 64 (CEA-861-D) but
+ * HDMI 2.0 VIC range: 1 <= VIC <= 107 (CEA-861-F). So we
+ * have to make sure we dont break HDMI 1.4 sinks.
+ */
+ if (!is_hdmi2_sink(connector) && vic > 64)
+ return 0;
+
+ return vic;
+}
+
/**
* drm_hdmi_avi_infoframe_from_display_mode() - fill an HDMI AVI infoframe with
* data from a DRM display mode
@@ -5098,29 +5234,7 @@ drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
if (mode->flags & DRM_MODE_FLAG_DBLCLK)
frame->pixel_repeat = 1;
- frame->video_code = drm_match_cea_mode(mode);
-
- /*
- * HDMI 1.4 VIC range: 1 <= VIC <= 64 (CEA-861-D) but
- * HDMI 2.0 VIC range: 1 <= VIC <= 107 (CEA-861-F). So we
- * have to make sure we dont break HDMI 1.4 sinks.
- */
- if (!is_hdmi2_sink(connector) && frame->video_code > 64)
- frame->video_code = 0;
-
- /*
- * HDMI spec says if a mode is found in HDMI 1.4b 4K modes
- * we should send its VIC in vendor infoframes, else send the
- * VIC in AVI infoframes. Lets check if this mode is present in
- * HDMI 1.4b 4K modes
- */
- if (frame->video_code) {
- u8 vendor_if_vic = drm_match_hdmi_mode(mode);
- bool is_s3d = mode->flags & DRM_MODE_FLAG_3D_MASK;
-
- if (drm_valid_hdmi_vic(vendor_if_vic) && !is_s3d)
- frame->video_code = 0;
- }
+ frame->video_code = drm_mode_cea_vic(connector, mode);
frame->picture_aspect = HDMI_PICTURE_ASPECT_NONE;
@@ -5285,6 +5399,23 @@ drm_hdmi_avi_infoframe_quant_range(struct hdmi_avi_infoframe *frame,
}
EXPORT_SYMBOL(drm_hdmi_avi_infoframe_quant_range);
+/**
+ * drm_hdmi_avi_infoframe_bars() - fill the HDMI AVI infoframe
+ * bar information
+ * @frame: HDMI AVI infoframe
+ * @conn_state: connector state
+ */
+void
+drm_hdmi_avi_infoframe_bars(struct hdmi_avi_infoframe *frame,
+ const struct drm_connector_state *conn_state)
+{
+ frame->right_bar = conn_state->tv.margins.right;
+ frame->left_bar = conn_state->tv.margins.left;
+ frame->top_bar = conn_state->tv.margins.top;
+ frame->bottom_bar = conn_state->tv.margins.bottom;
+}
+EXPORT_SYMBOL(drm_hdmi_avi_infoframe_bars);
+
static enum hdmi_3d_structure
s3d_structure_from_display_mode(const struct drm_display_mode *mode)
{
@@ -5337,8 +5468,6 @@ drm_hdmi_vendor_infoframe_from_display_mode(struct hdmi_vendor_infoframe *frame,
bool has_hdmi_infoframe = connector ?
connector->display_info.has_hdmi_infoframe : false;
int err;
- u32 s3d_flags;
- u8 vic;
if (!frame || !mode)
return -EINVAL;
@@ -5346,8 +5475,9 @@ drm_hdmi_vendor_infoframe_from_display_mode(struct hdmi_vendor_infoframe *frame,
if (!has_hdmi_infoframe)
return -EINVAL;
- vic = drm_match_hdmi_mode(mode);
- s3d_flags = mode->flags & DRM_MODE_FLAG_3D_MASK;
+ err = hdmi_vendor_infoframe_init(frame);
+ if (err < 0)
+ return err;
/*
* Even if it's not absolutely necessary to send the infoframe
@@ -5358,15 +5488,7 @@ drm_hdmi_vendor_infoframe_from_display_mode(struct hdmi_vendor_infoframe *frame,
* mode if the source simply stops sending the infoframe when
* it wants to switch from 3D to 2D.
*/
-
- if (vic && s3d_flags)
- return -EINVAL;
-
- err = hdmi_vendor_infoframe_init(frame);
- if (err < 0)
- return err;
-
- frame->vic = vic;
+ frame->vic = drm_mode_hdmi_vic(connector, mode);
frame->s3d_struct = s3d_structure_from_display_mode(mode);
return 0;
diff --git a/drivers/gpu/drm/drm_edid_load.c b/drivers/gpu/drm/drm_edid_load.c
index d38b3b255926..37d8ba3ddb46 100644
--- a/drivers/gpu/drm/drm_edid_load.c
+++ b/drivers/gpu/drm/drm_edid_load.c
@@ -175,7 +175,7 @@ static void *edid_load(struct drm_connector *connector, const char *name,
u8 *edid;
int fwsize, builtin;
int i, valid_extensions = 0;
- bool print_bad_edid = !connector->bad_edid_counter || (drm_debug & DRM_UT_KMS);
+ bool print_bad_edid = !connector->bad_edid_counter || drm_debug_enabled(DRM_UT_KMS);
builtin = match_string(generic_edid_name, GENERIC_EDIDS, name);
if (builtin >= 0) {
diff --git a/drivers/gpu/drm/drm_encoder.c b/drivers/gpu/drm/drm_encoder.c
index 7fb47b7b8b44..80d88a55302e 100644
--- a/drivers/gpu/drm/drm_encoder.c
+++ b/drivers/gpu/drm/drm_encoder.c
@@ -22,6 +22,7 @@
#include <linux/export.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
#include <drm/drm_encoder.h>
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index a7ba5b4902d6..8ebeccdeed23 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -46,6 +46,7 @@
#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
+#include "drm_crtc_helper_internal.h"
#include "drm_internal.h"
static bool drm_fbdev_emulation = true;
@@ -91,9 +92,12 @@ static DEFINE_MUTEX(kernel_fb_helper_lock);
*
* Drivers that support a dumb buffer with a virtual address and mmap support,
* should try out the generic fbdev emulation using drm_fbdev_generic_setup().
+ * It will automatically set up deferred I/O if the driver requires a shadow
+ * buffer.
*
- * Setup fbdev emulation by calling drm_fb_helper_fbdev_setup() and tear it
- * down by calling drm_fb_helper_fbdev_teardown().
+ * For other drivers, setup fbdev emulation by calling
+ * drm_fb_helper_fbdev_setup() and tear it down by calling
+ * drm_fb_helper_fbdev_teardown().
*
* At runtime drivers should restore the fbdev console by using
* drm_fb_helper_lastclose() as their &drm_driver.lastclose callback.
@@ -126,8 +130,10 @@ static DEFINE_MUTEX(kernel_fb_helper_lock);
* always run in process context since the fb_*() function could be running in
* atomic context. If drm_fb_helper_deferred_io() is used as the deferred_io
* callback it will also schedule dirty_work with the damage collected from the
- * mmap page writes. Drivers can use drm_fb_helper_defio_init() to setup
- * deferred I/O (coupled with drm_fb_helper_fbdev_teardown()).
+ * mmap page writes.
+ *
+ * Deferred I/O is not compatible with SHMEM. Such drivers should request an
+ * fbdev shadow buffer and call drm_fbdev_generic_setup() instead.
*/
static void drm_fb_helper_restore_lut_atomic(struct drm_crtc *crtc)
@@ -679,49 +685,6 @@ void drm_fb_helper_deferred_io(struct fb_info *info,
EXPORT_SYMBOL(drm_fb_helper_deferred_io);
/**
- * drm_fb_helper_defio_init - fbdev deferred I/O initialization
- * @fb_helper: driver-allocated fbdev helper
- *
- * This function allocates &fb_deferred_io, sets callback to
- * drm_fb_helper_deferred_io(), delay to 50ms and calls fb_deferred_io_init().
- * It should be called from the &drm_fb_helper_funcs->fb_probe callback.
- * drm_fb_helper_fbdev_teardown() cleans up deferred I/O.
- *
- * NOTE: A copy of &fb_ops is made and assigned to &info->fbops. This is done
- * because fb_deferred_io_cleanup() clears &fbops->fb_mmap and would thereby
- * affect other instances of that &fb_ops.
- *
- * Returns:
- * 0 on success or a negative error code on failure.
- */
-int drm_fb_helper_defio_init(struct drm_fb_helper *fb_helper)
-{
- struct fb_info *info = fb_helper->fbdev;
- struct fb_deferred_io *fbdefio;
- struct fb_ops *fbops;
-
- fbdefio = kzalloc(sizeof(*fbdefio), GFP_KERNEL);
- fbops = kzalloc(sizeof(*fbops), GFP_KERNEL);
- if (!fbdefio || !fbops) {
- kfree(fbdefio);
- kfree(fbops);
- return -ENOMEM;
- }
-
- info->fbdefio = fbdefio;
- fbdefio->delay = msecs_to_jiffies(50);
- fbdefio->deferred_io = drm_fb_helper_deferred_io;
-
- *fbops = *info->fbops;
- info->fbops = fbops;
-
- fb_deferred_io_init(info);
-
- return 0;
-}
-EXPORT_SYMBOL(drm_fb_helper_defio_init);
-
-/**
* drm_fb_helper_sys_read - wrapper around fb_sys_read
* @info: fb_info struct pointer
* @buf: userspace buffer to read from framebuffer memory
@@ -2355,7 +2318,10 @@ static const struct drm_client_funcs drm_fbdev_client_funcs = {
*
* Drivers that set the dirty callback on their framebuffer will get a shadow
* fbdev buffer that is blitted onto the real buffer. This is done in order to
- * make deferred I/O work with all kinds of buffers.
+ * make deferred I/O work with all kinds of buffers. A shadow buffer can be
+ * requested explicitly by setting struct drm_mode_config.prefer_shadow or
+ * struct drm_mode_config.prefer_shadow_fbdev to true beforehand. This is
+ * required to use generic fbdev emulation with SHMEM helpers.
*
* This function is safe to call even when there are no connectors present.
* Setup will be retried on the next hotplug event.
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 6854f5867d51..000fa4a1899f 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -1099,23 +1099,12 @@ int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
struct vm_area_struct *vma)
{
struct drm_device *dev = obj->dev;
+ int ret;
/* Check for valid size. */
if (obj_size < vma->vm_end - vma->vm_start)
return -EINVAL;
- if (obj->funcs && obj->funcs->vm_ops)
- vma->vm_ops = obj->funcs->vm_ops;
- else if (dev->driver->gem_vm_ops)
- vma->vm_ops = dev->driver->gem_vm_ops;
- else
- return -EINVAL;
-
- vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
- vma->vm_private_data = obj;
- vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
- vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
-
/* Take a ref for this mapping of the object, so that the fault
* handler can dereference the mmap offset's pointer to the object.
* This reference is cleaned up by the corresponding vm_close
@@ -1124,6 +1113,33 @@ int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
*/
drm_gem_object_get(obj);
+ if (obj->funcs && obj->funcs->mmap) {
+ /* Remove the fake offset */
+ vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node);
+
+ ret = obj->funcs->mmap(obj, vma);
+ if (ret) {
+ drm_gem_object_put_unlocked(obj);
+ return ret;
+ }
+ WARN_ON(!(vma->vm_flags & VM_DONTEXPAND));
+ } else {
+ if (obj->funcs && obj->funcs->vm_ops)
+ vma->vm_ops = obj->funcs->vm_ops;
+ else if (dev->driver->gem_vm_ops)
+ vma->vm_ops = dev->driver->gem_vm_ops;
+ else {
+ drm_gem_object_put_unlocked(obj);
+ return -EINVAL;
+ }
+
+ vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
+ vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
+ vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
+ }
+
+ vma->vm_private_data = obj;
+
return 0;
}
EXPORT_SYMBOL(drm_gem_mmap_obj);
diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
index f5918707672f..0810d3ef6961 100644
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -32,7 +32,7 @@ static const struct drm_gem_object_funcs drm_gem_shmem_funcs = {
.get_sg_table = drm_gem_shmem_get_sg_table,
.vmap = drm_gem_shmem_vmap,
.vunmap = drm_gem_shmem_vunmap,
- .vm_ops = &drm_gem_shmem_vm_ops,
+ .mmap = drm_gem_shmem_mmap,
};
/**
@@ -505,39 +505,30 @@ static void drm_gem_shmem_vm_close(struct vm_area_struct *vma)
drm_gem_vm_close(vma);
}
-const struct vm_operations_struct drm_gem_shmem_vm_ops = {
+static const struct vm_operations_struct drm_gem_shmem_vm_ops = {
.fault = drm_gem_shmem_fault,
.open = drm_gem_shmem_vm_open,
.close = drm_gem_shmem_vm_close,
};
-EXPORT_SYMBOL_GPL(drm_gem_shmem_vm_ops);
/**
* drm_gem_shmem_mmap - Memory-map a shmem GEM object
- * @filp: File object
+ * @obj: gem object
* @vma: VMA for the area to be mapped
*
* This function implements an augmented version of the GEM DRM file mmap
* operation for shmem objects. Drivers which employ the shmem helpers should
- * use this function as their &file_operations.mmap handler in the DRM device file's
- * file_operations structure.
- *
- * Instead of directly referencing this function, drivers should use the
- * DEFINE_DRM_GEM_SHMEM_FOPS() macro.
+ * use this function as their &drm_gem_object_funcs.mmap handler.
*
* Returns:
* 0 on success or a negative error code on failure.
*/
-int drm_gem_shmem_mmap(struct file *filp, struct vm_area_struct *vma)
+int drm_gem_shmem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
{
struct drm_gem_shmem_object *shmem;
int ret;
- ret = drm_gem_mmap(filp, vma);
- if (ret)
- return ret;
-
- shmem = to_drm_gem_shmem_obj(vma->vm_private_data);
+ shmem = to_drm_gem_shmem_obj(obj);
ret = drm_gem_shmem_get_pages(shmem);
if (ret) {
@@ -545,12 +536,10 @@ int drm_gem_shmem_mmap(struct file *filp, struct vm_area_struct *vma)
return ret;
}
- /* VM_PFNMAP was set by drm_gem_mmap() */
- vma->vm_flags &= ~VM_PFNMAP;
- vma->vm_flags |= VM_MIXEDMAP;
-
- /* Remove the fake offset */
- vma->vm_pgoff -= drm_vma_node_start(&shmem->base.vma_node);
+ vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND;
+ vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
+ vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
+ vma->vm_ops = &drm_gem_shmem_vm_ops;
return 0;
}
diff --git a/drivers/gpu/drm/drm_gem_ttm_helper.c b/drivers/gpu/drm/drm_gem_ttm_helper.c
new file mode 100644
index 000000000000..605a8a3da7f9
--- /dev/null
+++ b/drivers/gpu/drm/drm_gem_ttm_helper.c
@@ -0,0 +1,84 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/module.h>
+
+#include <drm/drm_gem_ttm_helper.h>
+
+/**
+ * DOC: overview
+ *
+ * This library provides helper functions for gem objects backed by
+ * ttm.
+ */
+
+/**
+ * drm_gem_ttm_print_info() - Print &ttm_buffer_object info for debugfs
+ * @p: DRM printer
+ * @indent: Tab indentation level
+ * @gem: GEM object
+ *
+ * This function can be used as &drm_gem_object_funcs.print_info
+ * callback.
+ */
+void drm_gem_ttm_print_info(struct drm_printer *p, unsigned int indent,
+ const struct drm_gem_object *gem)
+{
+ static const char * const plname[] = {
+ [ TTM_PL_SYSTEM ] = "system",
+ [ TTM_PL_TT ] = "tt",
+ [ TTM_PL_VRAM ] = "vram",
+ [ TTM_PL_PRIV ] = "priv",
+
+ [ 16 ] = "cached",
+ [ 17 ] = "uncached",
+ [ 18 ] = "wc",
+ [ 19 ] = "contig",
+
+ [ 21 ] = "pinned", /* NO_EVICT */
+ [ 22 ] = "topdown",
+ };
+ const struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(gem);
+
+ drm_printf_indent(p, indent, "placement=");
+ drm_print_bits(p, bo->mem.placement, plname, ARRAY_SIZE(plname));
+ drm_printf(p, "\n");
+
+ if (bo->mem.bus.is_iomem) {
+ drm_printf_indent(p, indent, "bus.base=%lx\n",
+ (unsigned long)bo->mem.bus.base);
+ drm_printf_indent(p, indent, "bus.offset=%lx\n",
+ (unsigned long)bo->mem.bus.offset);
+ }
+}
+EXPORT_SYMBOL(drm_gem_ttm_print_info);
+
+/**
+ * drm_gem_ttm_mmap() - mmap &ttm_buffer_object
+ * @gem: GEM object.
+ * @vma: vm area.
+ *
+ * This function can be used as &drm_gem_object_funcs.mmap
+ * callback.
+ */
+int drm_gem_ttm_mmap(struct drm_gem_object *gem,
+ struct vm_area_struct *vma)
+{
+ struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(gem);
+ int ret;
+
+ ret = ttm_bo_mmap_obj(vma, bo);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * ttm has its own object refcounting, so drop gem reference
+ * to avoid double accounting counting.
+ */
+ drm_gem_object_put_unlocked(gem);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_gem_ttm_mmap);
+
+MODULE_DESCRIPTION("DRM gem ttm helpers");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c
index fd751078bae1..666cb4c22bb9 100644
--- a/drivers/gpu/drm/drm_gem_vram_helper.c
+++ b/drivers/gpu/drm/drm_gem_vram_helper.c
@@ -1,10 +1,15 @@
// SPDX-License-Identifier: GPL-2.0-or-later
-#include <drm/drm_gem_vram_helper.h>
+#include <drm/drm_debugfs.h>
#include <drm/drm_device.h>
+#include <drm/drm_file.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_gem_ttm_helper.h>
+#include <drm/drm_gem_vram_helper.h>
#include <drm/drm_mode.h>
+#include <drm/drm_plane.h>
#include <drm/drm_prime.h>
-#include <drm/drm_vram_mm_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include <drm/ttm/ttm_page_alloc.h>
static const struct drm_gem_object_funcs drm_gem_vram_object_funcs;
@@ -14,6 +19,11 @@ static const struct drm_gem_object_funcs drm_gem_vram_object_funcs;
*
* This library provides a GEM buffer object that is backed by video RAM
* (VRAM). It can be used for framebuffer devices with dedicated memory.
+ *
+ * The data structure &struct drm_vram_mm and its helpers implement a memory
+ * manager for simple framebuffer devices with dedicated video memory. Buffer
+ * objects are either placed in video RAM or evicted to system memory. The rsp.
+ * buffer object is provided by &struct drm_gem_vram_object.
*/
/*
@@ -26,6 +36,10 @@ static void drm_gem_vram_cleanup(struct drm_gem_vram_object *gbo)
* TTM buffer object in 'bo' has already been cleaned
* up; only release the GEM object.
*/
+
+ WARN_ON(gbo->kmap_use_count);
+ WARN_ON(gbo->kmap.virtual);
+
drm_gem_object_release(&gbo->bo.base);
}
@@ -47,6 +61,7 @@ static void drm_gem_vram_placement(struct drm_gem_vram_object *gbo,
{
unsigned int i;
unsigned int c = 0;
+ u32 invariant_flags = pl_flag & TTM_PL_FLAG_TOPDOWN;
gbo->placement.placement = gbo->placements;
gbo->placement.busy_placement = gbo->placements;
@@ -54,15 +69,18 @@ static void drm_gem_vram_placement(struct drm_gem_vram_object *gbo,
if (pl_flag & TTM_PL_FLAG_VRAM)
gbo->placements[c++].flags = TTM_PL_FLAG_WC |
TTM_PL_FLAG_UNCACHED |
- TTM_PL_FLAG_VRAM;
+ TTM_PL_FLAG_VRAM |
+ invariant_flags;
if (pl_flag & TTM_PL_FLAG_SYSTEM)
gbo->placements[c++].flags = TTM_PL_MASK_CACHING |
- TTM_PL_FLAG_SYSTEM;
+ TTM_PL_FLAG_SYSTEM |
+ invariant_flags;
if (!c)
gbo->placements[c++].flags = TTM_PL_MASK_CACHING |
- TTM_PL_FLAG_SYSTEM;
+ TTM_PL_FLAG_SYSTEM |
+ invariant_flags;
gbo->placement.num_placement = c;
gbo->placement.num_busy_placement = c;
@@ -82,8 +100,7 @@ static int drm_gem_vram_init(struct drm_device *dev,
int ret;
size_t acc_size;
- if (!gbo->bo.base.funcs)
- gbo->bo.base.funcs = &drm_gem_vram_object_funcs;
+ gbo->bo.base.funcs = &drm_gem_vram_object_funcs;
ret = drm_gem_object_init(dev, &gbo->bo.base, size);
if (ret)
@@ -192,30 +209,12 @@ s64 drm_gem_vram_offset(struct drm_gem_vram_object *gbo)
}
EXPORT_SYMBOL(drm_gem_vram_offset);
-/**
- * drm_gem_vram_pin() - Pins a GEM VRAM object in a region.
- * @gbo: the GEM VRAM object
- * @pl_flag: a bitmask of possible memory regions
- *
- * Pinning a buffer object ensures that it is not evicted from
- * a memory region. A pinned buffer object has to be unpinned before
- * it can be pinned to another region. If the pl_flag argument is 0,
- * the buffer is pinned at its current location (video RAM or system
- * memory).
- *
- * Returns:
- * 0 on success, or
- * a negative error code otherwise.
- */
-int drm_gem_vram_pin(struct drm_gem_vram_object *gbo, unsigned long pl_flag)
+static int drm_gem_vram_pin_locked(struct drm_gem_vram_object *gbo,
+ unsigned long pl_flag)
{
int i, ret;
struct ttm_operation_ctx ctx = { false, false };
- ret = ttm_bo_reserve(&gbo->bo, true, false, NULL);
- if (ret < 0)
- return ret;
-
if (gbo->pin_count)
goto out;
@@ -227,62 +226,123 @@ int drm_gem_vram_pin(struct drm_gem_vram_object *gbo, unsigned long pl_flag)
ret = ttm_bo_validate(&gbo->bo, &gbo->placement, &ctx);
if (ret < 0)
- goto err_ttm_bo_unreserve;
+ return ret;
out:
++gbo->pin_count;
- ttm_bo_unreserve(&gbo->bo);
return 0;
-
-err_ttm_bo_unreserve:
- ttm_bo_unreserve(&gbo->bo);
- return ret;
}
-EXPORT_SYMBOL(drm_gem_vram_pin);
/**
- * drm_gem_vram_unpin() - Unpins a GEM VRAM object
+ * drm_gem_vram_pin() - Pins a GEM VRAM object in a region.
* @gbo: the GEM VRAM object
+ * @pl_flag: a bitmask of possible memory regions
+ *
+ * Pinning a buffer object ensures that it is not evicted from
+ * a memory region. A pinned buffer object has to be unpinned before
+ * it can be pinned to another region. If the pl_flag argument is 0,
+ * the buffer is pinned at its current location (video RAM or system
+ * memory).
+ *
+ * Small buffer objects, such as cursor images, can lead to memory
+ * fragmentation if they are pinned in the middle of video RAM. This
+ * is especially a problem on devices with only a small amount of
+ * video RAM. Fragmentation can prevent the primary framebuffer from
+ * fitting in, even though there's enough memory overall. The modifier
+ * DRM_GEM_VRAM_PL_FLAG_TOPDOWN marks the buffer object to be pinned
+ * at the high end of the memory region to avoid fragmentation.
*
* Returns:
* 0 on success, or
* a negative error code otherwise.
*/
-int drm_gem_vram_unpin(struct drm_gem_vram_object *gbo)
+int drm_gem_vram_pin(struct drm_gem_vram_object *gbo, unsigned long pl_flag)
{
- int i, ret;
- struct ttm_operation_ctx ctx = { false, false };
+ int ret;
ret = ttm_bo_reserve(&gbo->bo, true, false, NULL);
- if (ret < 0)
+ if (ret)
return ret;
+ ret = drm_gem_vram_pin_locked(gbo, pl_flag);
+ ttm_bo_unreserve(&gbo->bo);
+
+ return ret;
+}
+EXPORT_SYMBOL(drm_gem_vram_pin);
+
+static int drm_gem_vram_unpin_locked(struct drm_gem_vram_object *gbo)
+{
+ int i, ret;
+ struct ttm_operation_ctx ctx = { false, false };
if (WARN_ON_ONCE(!gbo->pin_count))
- goto out;
+ return 0;
--gbo->pin_count;
if (gbo->pin_count)
- goto out;
+ return 0;
for (i = 0; i < gbo->placement.num_placement ; ++i)
gbo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
ret = ttm_bo_validate(&gbo->bo, &gbo->placement, &ctx);
if (ret < 0)
- goto err_ttm_bo_unreserve;
-
-out:
- ttm_bo_unreserve(&gbo->bo);
+ return ret;
return 0;
+}
-err_ttm_bo_unreserve:
+/**
+ * drm_gem_vram_unpin() - Unpins a GEM VRAM object
+ * @gbo: the GEM VRAM object
+ *
+ * Returns:
+ * 0 on success, or
+ * a negative error code otherwise.
+ */
+int drm_gem_vram_unpin(struct drm_gem_vram_object *gbo)
+{
+ int ret;
+
+ ret = ttm_bo_reserve(&gbo->bo, true, false, NULL);
+ if (ret)
+ return ret;
+ ret = drm_gem_vram_unpin_locked(gbo);
ttm_bo_unreserve(&gbo->bo);
+
return ret;
}
EXPORT_SYMBOL(drm_gem_vram_unpin);
+static void *drm_gem_vram_kmap_locked(struct drm_gem_vram_object *gbo,
+ bool map, bool *is_iomem)
+{
+ int ret;
+ struct ttm_bo_kmap_obj *kmap = &gbo->kmap;
+
+ if (gbo->kmap_use_count > 0)
+ goto out;
+
+ if (kmap->virtual || !map)
+ goto out;
+
+ ret = ttm_bo_kmap(&gbo->bo, 0, gbo->bo.num_pages, kmap);
+ if (ret)
+ return ERR_PTR(ret);
+
+out:
+ if (!kmap->virtual) {
+ if (is_iomem)
+ *is_iomem = false;
+ return NULL; /* not mapped; don't increment ref */
+ }
+ ++gbo->kmap_use_count;
+ if (is_iomem)
+ return ttm_kmap_obj_virtual(kmap, is_iomem);
+ return kmap->virtual;
+}
+
/**
* drm_gem_vram_kmap() - Maps a GEM VRAM object into kernel address space
* @gbo: the GEM VRAM object
@@ -304,43 +364,121 @@ void *drm_gem_vram_kmap(struct drm_gem_vram_object *gbo, bool map,
bool *is_iomem)
{
int ret;
- struct ttm_bo_kmap_obj *kmap = &gbo->kmap;
-
- if (kmap->virtual || !map)
- goto out;
+ void *virtual;
- ret = ttm_bo_kmap(&gbo->bo, 0, gbo->bo.num_pages, kmap);
+ ret = ttm_bo_reserve(&gbo->bo, true, false, NULL);
if (ret)
return ERR_PTR(ret);
+ virtual = drm_gem_vram_kmap_locked(gbo, map, is_iomem);
+ ttm_bo_unreserve(&gbo->bo);
-out:
- if (!is_iomem)
- return kmap->virtual;
- if (!kmap->virtual) {
- *is_iomem = false;
- return NULL;
- }
- return ttm_kmap_obj_virtual(kmap, is_iomem);
+ return virtual;
}
EXPORT_SYMBOL(drm_gem_vram_kmap);
+static void drm_gem_vram_kunmap_locked(struct drm_gem_vram_object *gbo)
+{
+ if (WARN_ON_ONCE(!gbo->kmap_use_count))
+ return;
+ if (--gbo->kmap_use_count > 0)
+ return;
+
+ /*
+ * Permanently mapping and unmapping buffers adds overhead from
+ * updating the page tables and creates debugging output. Therefore,
+ * we delay the actual unmap operation until the BO gets evicted
+ * from memory. See drm_gem_vram_bo_driver_move_notify().
+ */
+}
+
/**
* drm_gem_vram_kunmap() - Unmaps a GEM VRAM object
* @gbo: the GEM VRAM object
*/
void drm_gem_vram_kunmap(struct drm_gem_vram_object *gbo)
{
- struct ttm_bo_kmap_obj *kmap = &gbo->kmap;
+ int ret;
- if (!kmap->virtual)
+ ret = ttm_bo_reserve(&gbo->bo, false, false, NULL);
+ if (WARN_ONCE(ret, "ttm_bo_reserve_failed(): ret=%d\n", ret))
return;
-
- ttm_bo_kunmap(kmap);
- kmap->virtual = NULL;
+ drm_gem_vram_kunmap_locked(gbo);
+ ttm_bo_unreserve(&gbo->bo);
}
EXPORT_SYMBOL(drm_gem_vram_kunmap);
/**
+ * drm_gem_vram_vmap() - Pins and maps a GEM VRAM object into kernel address
+ * space
+ * @gbo: The GEM VRAM object to map
+ *
+ * The vmap function pins a GEM VRAM object to its current location, either
+ * system or video memory, and maps its buffer into kernel address space.
+ * As pinned object cannot be relocated, you should avoid pinning objects
+ * permanently. Call drm_gem_vram_vunmap() with the returned address to
+ * unmap and unpin the GEM VRAM object.
+ *
+ * If you have special requirements for the pinning or mapping operations,
+ * call drm_gem_vram_pin() and drm_gem_vram_kmap() directly.
+ *
+ * Returns:
+ * The buffer's virtual address on success, or
+ * an ERR_PTR()-encoded error code otherwise.
+ */
+void *drm_gem_vram_vmap(struct drm_gem_vram_object *gbo)
+{
+ int ret;
+ void *base;
+
+ ret = ttm_bo_reserve(&gbo->bo, true, false, NULL);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ret = drm_gem_vram_pin_locked(gbo, 0);
+ if (ret)
+ goto err_ttm_bo_unreserve;
+ base = drm_gem_vram_kmap_locked(gbo, true, NULL);
+ if (IS_ERR(base)) {
+ ret = PTR_ERR(base);
+ goto err_drm_gem_vram_unpin_locked;
+ }
+
+ ttm_bo_unreserve(&gbo->bo);
+
+ return base;
+
+err_drm_gem_vram_unpin_locked:
+ drm_gem_vram_unpin_locked(gbo);
+err_ttm_bo_unreserve:
+ ttm_bo_unreserve(&gbo->bo);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL(drm_gem_vram_vmap);
+
+/**
+ * drm_gem_vram_vunmap() - Unmaps and unpins a GEM VRAM object
+ * @gbo: The GEM VRAM object to unmap
+ * @vaddr: The mapping's base address as returned by drm_gem_vram_vmap()
+ *
+ * A call to drm_gem_vram_vunmap() unmaps and unpins a GEM VRAM buffer. See
+ * the documentation for drm_gem_vram_vmap() for more information.
+ */
+void drm_gem_vram_vunmap(struct drm_gem_vram_object *gbo, void *vaddr)
+{
+ int ret;
+
+ ret = ttm_bo_reserve(&gbo->bo, false, false, NULL);
+ if (WARN_ONCE(ret, "ttm_bo_reserve_failed(): ret=%d\n", ret))
+ return;
+
+ drm_gem_vram_kunmap_locked(gbo);
+ drm_gem_vram_unpin_locked(gbo);
+
+ ttm_bo_unreserve(&gbo->bo);
+}
+EXPORT_SYMBOL(drm_gem_vram_vunmap);
+
+/**
* drm_gem_vram_fill_create_dumb() - \
Helper for implementing &struct drm_driver.dumb_create
* @file: the DRM file
@@ -410,59 +548,27 @@ static bool drm_is_gem_vram(struct ttm_buffer_object *bo)
return (bo->destroy == ttm_buffer_object_destroy);
}
-/**
- * drm_gem_vram_bo_driver_evict_flags() - \
- Implements &struct ttm_bo_driver.evict_flags
- * @bo: TTM buffer object. Refers to &struct drm_gem_vram_object.bo
- * @pl: TTM placement information.
- */
-void drm_gem_vram_bo_driver_evict_flags(struct ttm_buffer_object *bo,
- struct ttm_placement *pl)
+static void drm_gem_vram_bo_driver_evict_flags(struct drm_gem_vram_object *gbo,
+ struct ttm_placement *pl)
{
- struct drm_gem_vram_object *gbo;
-
- /* TTM may pass BOs that are not GEM VRAM BOs. */
- if (!drm_is_gem_vram(bo))
- return;
-
- gbo = drm_gem_vram_of_bo(bo);
drm_gem_vram_placement(gbo, TTM_PL_FLAG_SYSTEM);
*pl = gbo->placement;
}
-EXPORT_SYMBOL(drm_gem_vram_bo_driver_evict_flags);
-/**
- * drm_gem_vram_bo_driver_verify_access() - \
- Implements &struct ttm_bo_driver.verify_access
- * @bo: TTM buffer object. Refers to &struct drm_gem_vram_object.bo
- * @filp: File pointer.
- *
- * Returns:
- * 0 on success, or
- * a negative errno code otherwise.
- */
-int drm_gem_vram_bo_driver_verify_access(struct ttm_buffer_object *bo,
- struct file *filp)
+static void drm_gem_vram_bo_driver_move_notify(struct drm_gem_vram_object *gbo,
+ bool evict,
+ struct ttm_mem_reg *new_mem)
{
- struct drm_gem_vram_object *gbo = drm_gem_vram_of_bo(bo);
+ struct ttm_bo_kmap_obj *kmap = &gbo->kmap;
- return drm_vma_node_verify_access(&gbo->bo.base.vma_node,
- filp->private_data);
-}
-EXPORT_SYMBOL(drm_gem_vram_bo_driver_verify_access);
+ if (WARN_ON_ONCE(gbo->kmap_use_count))
+ return;
-/*
- * drm_gem_vram_mm_funcs - Functions for &struct drm_vram_mm
- *
- * Most users of @struct drm_gem_vram_object will also use
- * @struct drm_vram_mm. This instance of &struct drm_vram_mm_funcs
- * can be used to connect both.
- */
-const struct drm_vram_mm_funcs drm_gem_vram_mm_funcs = {
- .evict_flags = drm_gem_vram_bo_driver_evict_flags,
- .verify_access = drm_gem_vram_bo_driver_verify_access
-};
-EXPORT_SYMBOL(drm_gem_vram_mm_funcs);
+ if (!kmap->virtual)
+ return;
+ ttm_bo_kunmap(kmap);
+ kmap->virtual = NULL;
+}
/*
* Helpers for struct drm_gem_object_funcs
@@ -544,6 +650,129 @@ int drm_gem_vram_driver_dumb_mmap_offset(struct drm_file *file,
EXPORT_SYMBOL(drm_gem_vram_driver_dumb_mmap_offset);
/*
+ * Helpers for struct drm_plane_helper_funcs
+ */
+
+/**
+ * drm_gem_vram_plane_helper_prepare_fb() - \
+ * Implements &struct drm_plane_helper_funcs.prepare_fb
+ * @plane: a DRM plane
+ * @new_state: the plane's new state
+ *
+ * During plane updates, this function pins the GEM VRAM
+ * objects of the plane's new framebuffer to VRAM. Call
+ * drm_gem_vram_plane_helper_cleanup_fb() to unpin them.
+ *
+ * Returns:
+ * 0 on success, or
+ * a negative errno code otherwise.
+ */
+int
+drm_gem_vram_plane_helper_prepare_fb(struct drm_plane *plane,
+ struct drm_plane_state *new_state)
+{
+ size_t i;
+ struct drm_gem_vram_object *gbo;
+ int ret;
+
+ if (!new_state->fb)
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(new_state->fb->obj); ++i) {
+ if (!new_state->fb->obj[i])
+ continue;
+ gbo = drm_gem_vram_of_gem(new_state->fb->obj[i]);
+ ret = drm_gem_vram_pin(gbo, DRM_GEM_VRAM_PL_FLAG_VRAM);
+ if (ret)
+ goto err_drm_gem_vram_unpin;
+ }
+
+ return 0;
+
+err_drm_gem_vram_unpin:
+ while (i) {
+ --i;
+ gbo = drm_gem_vram_of_gem(new_state->fb->obj[i]);
+ drm_gem_vram_unpin(gbo);
+ }
+ return ret;
+}
+EXPORT_SYMBOL(drm_gem_vram_plane_helper_prepare_fb);
+
+/**
+ * drm_gem_vram_plane_helper_cleanup_fb() - \
+ * Implements &struct drm_plane_helper_funcs.cleanup_fb
+ * @plane: a DRM plane
+ * @old_state: the plane's old state
+ *
+ * During plane updates, this function unpins the GEM VRAM
+ * objects of the plane's old framebuffer from VRAM. Complements
+ * drm_gem_vram_plane_helper_prepare_fb().
+ */
+void
+drm_gem_vram_plane_helper_cleanup_fb(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ size_t i;
+ struct drm_gem_vram_object *gbo;
+
+ if (!old_state->fb)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(old_state->fb->obj); ++i) {
+ if (!old_state->fb->obj[i])
+ continue;
+ gbo = drm_gem_vram_of_gem(old_state->fb->obj[i]);
+ drm_gem_vram_unpin(gbo);
+ }
+}
+EXPORT_SYMBOL(drm_gem_vram_plane_helper_cleanup_fb);
+
+/*
+ * Helpers for struct drm_simple_display_pipe_funcs
+ */
+
+/**
+ * drm_gem_vram_simple_display_pipe_prepare_fb() - \
+ * Implements &struct drm_simple_display_pipe_funcs.prepare_fb
+ * @pipe: a simple display pipe
+ * @new_state: the plane's new state
+ *
+ * During plane updates, this function pins the GEM VRAM
+ * objects of the plane's new framebuffer to VRAM. Call
+ * drm_gem_vram_simple_display_pipe_cleanup_fb() to unpin them.
+ *
+ * Returns:
+ * 0 on success, or
+ * a negative errno code otherwise.
+ */
+int drm_gem_vram_simple_display_pipe_prepare_fb(
+ struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *new_state)
+{
+ return drm_gem_vram_plane_helper_prepare_fb(&pipe->plane, new_state);
+}
+EXPORT_SYMBOL(drm_gem_vram_simple_display_pipe_prepare_fb);
+
+/**
+ * drm_gem_vram_simple_display_pipe_cleanup_fb() - \
+ * Implements &struct drm_simple_display_pipe_funcs.cleanup_fb
+ * @pipe: a simple display pipe
+ * @old_state: the plane's old state
+ *
+ * During plane updates, this function unpins the GEM VRAM
+ * objects of the plane's old framebuffer from VRAM. Complements
+ * drm_gem_vram_simple_display_pipe_prepare_fb().
+ */
+void drm_gem_vram_simple_display_pipe_cleanup_fb(
+ struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *old_state)
+{
+ drm_gem_vram_plane_helper_cleanup_fb(&pipe->plane, old_state);
+}
+EXPORT_SYMBOL(drm_gem_vram_simple_display_pipe_cleanup_fb);
+
+/*
* PRIME helpers
*/
@@ -595,17 +824,11 @@ static void drm_gem_vram_object_unpin(struct drm_gem_object *gem)
static void *drm_gem_vram_object_vmap(struct drm_gem_object *gem)
{
struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem);
- int ret;
void *base;
- ret = drm_gem_vram_pin(gbo, 0);
- if (ret)
- return NULL;
- base = drm_gem_vram_kmap(gbo, true, NULL);
- if (IS_ERR(base)) {
- drm_gem_vram_unpin(gbo);
+ base = drm_gem_vram_vmap(gbo);
+ if (IS_ERR(base))
return NULL;
- }
return base;
}
@@ -620,8 +843,7 @@ static void drm_gem_vram_object_vunmap(struct drm_gem_object *gem,
{
struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem);
- drm_gem_vram_kunmap(gbo);
- drm_gem_vram_unpin(gbo);
+ drm_gem_vram_vunmap(gbo, vaddr);
}
/*
@@ -633,5 +855,278 @@ static const struct drm_gem_object_funcs drm_gem_vram_object_funcs = {
.pin = drm_gem_vram_object_pin,
.unpin = drm_gem_vram_object_unpin,
.vmap = drm_gem_vram_object_vmap,
- .vunmap = drm_gem_vram_object_vunmap
+ .vunmap = drm_gem_vram_object_vunmap,
+ .mmap = drm_gem_ttm_mmap,
+ .print_info = drm_gem_ttm_print_info,
+};
+
+/*
+ * VRAM memory manager
+ */
+
+/*
+ * TTM TT
+ */
+
+static void backend_func_destroy(struct ttm_tt *tt)
+{
+ ttm_tt_fini(tt);
+ kfree(tt);
+}
+
+static struct ttm_backend_func backend_func = {
+ .destroy = backend_func_destroy
+};
+
+/*
+ * TTM BO device
+ */
+
+static struct ttm_tt *bo_driver_ttm_tt_create(struct ttm_buffer_object *bo,
+ uint32_t page_flags)
+{
+ struct ttm_tt *tt;
+ int ret;
+
+ tt = kzalloc(sizeof(*tt), GFP_KERNEL);
+ if (!tt)
+ return NULL;
+
+ tt->func = &backend_func;
+
+ ret = ttm_tt_init(tt, bo, page_flags);
+ if (ret < 0)
+ goto err_ttm_tt_init;
+
+ return tt;
+
+err_ttm_tt_init:
+ kfree(tt);
+ return NULL;
+}
+
+static int bo_driver_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
+ struct ttm_mem_type_manager *man)
+{
+ switch (type) {
+ case TTM_PL_SYSTEM:
+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
+ man->available_caching = TTM_PL_MASK_CACHING;
+ man->default_caching = TTM_PL_FLAG_CACHED;
+ break;
+ case TTM_PL_VRAM:
+ man->func = &ttm_bo_manager_func;
+ man->flags = TTM_MEMTYPE_FLAG_FIXED |
+ TTM_MEMTYPE_FLAG_MAPPABLE;
+ man->available_caching = TTM_PL_FLAG_UNCACHED |
+ TTM_PL_FLAG_WC;
+ man->default_caching = TTM_PL_FLAG_WC;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void bo_driver_evict_flags(struct ttm_buffer_object *bo,
+ struct ttm_placement *placement)
+{
+ struct drm_gem_vram_object *gbo;
+
+ /* TTM may pass BOs that are not GEM VRAM BOs. */
+ if (!drm_is_gem_vram(bo))
+ return;
+
+ gbo = drm_gem_vram_of_bo(bo);
+
+ drm_gem_vram_bo_driver_evict_flags(gbo, placement);
+}
+
+static void bo_driver_move_notify(struct ttm_buffer_object *bo,
+ bool evict,
+ struct ttm_mem_reg *new_mem)
+{
+ struct drm_gem_vram_object *gbo;
+
+ /* TTM may pass BOs that are not GEM VRAM BOs. */
+ if (!drm_is_gem_vram(bo))
+ return;
+
+ gbo = drm_gem_vram_of_bo(bo);
+
+ drm_gem_vram_bo_driver_move_notify(gbo, evict, new_mem);
+}
+
+static int bo_driver_io_mem_reserve(struct ttm_bo_device *bdev,
+ struct ttm_mem_reg *mem)
+{
+ struct ttm_mem_type_manager *man = bdev->man + mem->mem_type;
+ struct drm_vram_mm *vmm = drm_vram_mm_of_bdev(bdev);
+
+ if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
+ return -EINVAL;
+
+ mem->bus.addr = NULL;
+ mem->bus.size = mem->num_pages << PAGE_SHIFT;
+
+ switch (mem->mem_type) {
+ case TTM_PL_SYSTEM: /* nothing to do */
+ mem->bus.offset = 0;
+ mem->bus.base = 0;
+ mem->bus.is_iomem = false;
+ break;
+ case TTM_PL_VRAM:
+ mem->bus.offset = mem->start << PAGE_SHIFT;
+ mem->bus.base = vmm->vram_base;
+ mem->bus.is_iomem = true;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void bo_driver_io_mem_free(struct ttm_bo_device *bdev,
+ struct ttm_mem_reg *mem)
+{ }
+
+static struct ttm_bo_driver bo_driver = {
+ .ttm_tt_create = bo_driver_ttm_tt_create,
+ .ttm_tt_populate = ttm_pool_populate,
+ .ttm_tt_unpopulate = ttm_pool_unpopulate,
+ .init_mem_type = bo_driver_init_mem_type,
+ .eviction_valuable = ttm_bo_eviction_valuable,
+ .evict_flags = bo_driver_evict_flags,
+ .move_notify = bo_driver_move_notify,
+ .io_mem_reserve = bo_driver_io_mem_reserve,
+ .io_mem_free = bo_driver_io_mem_free,
+};
+
+/*
+ * struct drm_vram_mm
+ */
+
+#if defined(CONFIG_DEBUG_FS)
+static int drm_vram_mm_debugfs(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_vram_mm *vmm = node->minor->dev->vram_mm;
+ struct drm_mm *mm = vmm->bdev.man[TTM_PL_VRAM].priv;
+ struct drm_printer p = drm_seq_file_printer(m);
+
+ spin_lock(&ttm_bo_glob.lru_lock);
+ drm_mm_print(mm, &p);
+ spin_unlock(&ttm_bo_glob.lru_lock);
+ return 0;
+}
+
+static const struct drm_info_list drm_vram_mm_debugfs_list[] = {
+ { "vram-mm", drm_vram_mm_debugfs, 0, NULL },
};
+#endif
+
+/**
+ * drm_vram_mm_debugfs_init() - Register VRAM MM debugfs file.
+ *
+ * @minor: drm minor device.
+ *
+ * Returns:
+ * 0 on success, or
+ * a negative error code otherwise.
+ */
+int drm_vram_mm_debugfs_init(struct drm_minor *minor)
+{
+ int ret = 0;
+
+#if defined(CONFIG_DEBUG_FS)
+ ret = drm_debugfs_create_files(drm_vram_mm_debugfs_list,
+ ARRAY_SIZE(drm_vram_mm_debugfs_list),
+ minor->debugfs_root, minor);
+#endif
+ return ret;
+}
+EXPORT_SYMBOL(drm_vram_mm_debugfs_init);
+
+static int drm_vram_mm_init(struct drm_vram_mm *vmm, struct drm_device *dev,
+ uint64_t vram_base, size_t vram_size)
+{
+ int ret;
+
+ vmm->vram_base = vram_base;
+ vmm->vram_size = vram_size;
+
+ ret = ttm_bo_device_init(&vmm->bdev, &bo_driver,
+ dev->anon_inode->i_mapping,
+ dev->vma_offset_manager,
+ true);
+ if (ret)
+ return ret;
+
+ ret = ttm_bo_init_mm(&vmm->bdev, TTM_PL_VRAM, vram_size >> PAGE_SHIFT);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void drm_vram_mm_cleanup(struct drm_vram_mm *vmm)
+{
+ ttm_bo_device_release(&vmm->bdev);
+}
+
+/*
+ * Helpers for integration with struct drm_device
+ */
+
+/**
+ * drm_vram_helper_alloc_mm - Allocates a device's instance of \
+ &struct drm_vram_mm
+ * @dev: the DRM device
+ * @vram_base: the base address of the video memory
+ * @vram_size: the size of the video memory in bytes
+ *
+ * Returns:
+ * The new instance of &struct drm_vram_mm on success, or
+ * an ERR_PTR()-encoded errno code otherwise.
+ */
+struct drm_vram_mm *drm_vram_helper_alloc_mm(
+ struct drm_device *dev, uint64_t vram_base, size_t vram_size)
+{
+ int ret;
+
+ if (WARN_ON(dev->vram_mm))
+ return dev->vram_mm;
+
+ dev->vram_mm = kzalloc(sizeof(*dev->vram_mm), GFP_KERNEL);
+ if (!dev->vram_mm)
+ return ERR_PTR(-ENOMEM);
+
+ ret = drm_vram_mm_init(dev->vram_mm, dev, vram_base, vram_size);
+ if (ret)
+ goto err_kfree;
+
+ return dev->vram_mm;
+
+err_kfree:
+ kfree(dev->vram_mm);
+ dev->vram_mm = NULL;
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL(drm_vram_helper_alloc_mm);
+
+/**
+ * drm_vram_helper_release_mm - Releases a device's instance of \
+ &struct drm_vram_mm
+ * @dev: the DRM device
+ */
+void drm_vram_helper_release_mm(struct drm_device *dev)
+{
+ if (!dev->vram_mm)
+ return;
+
+ drm_vram_mm_cleanup(dev->vram_mm);
+ kfree(dev->vram_mm);
+ dev->vram_mm = NULL;
+}
+EXPORT_SYMBOL(drm_vram_helper_release_mm);
diff --git a/drivers/gpu/drm/drm_memory.c b/drivers/gpu/drm/drm_memory.c
index 0bec6dbb0142..fbea69d6f909 100644
--- a/drivers/gpu/drm/drm_memory.c
+++ b/drivers/gpu/drm/drm_memory.c
@@ -40,6 +40,7 @@
#include <xen/xen.h>
#include <drm/drm_agpsupport.h>
+#include <drm/drm_cache.h>
#include <drm/drm_device.h>
#include "drm_legacy.h"
diff --git a/drivers/gpu/drm/drm_mipi_dbi.c b/drivers/gpu/drm/drm_mipi_dbi.c
index 1961f713aaab..e34058c721be 100644
--- a/drivers/gpu/drm/drm_mipi_dbi.c
+++ b/drivers/gpu/drm/drm_mipi_dbi.c
@@ -783,7 +783,7 @@ static int mipi_dbi_spi1e_transfer(struct mipi_dbi *dbi, int dc,
int i, ret;
u8 *dst;
- if (drm_debug & DRM_UT_DRIVER)
+ if (drm_debug_enabled(DRM_UT_DRIVER))
pr_debug("[drm:%s] dc=%d, max_chunk=%zu, transfers:\n",
__func__, dc, max_chunk);
@@ -907,7 +907,7 @@ static int mipi_dbi_spi1_transfer(struct mipi_dbi *dbi, int dc,
max_chunk = dbi->tx_buf9_len;
dst16 = dbi->tx_buf9;
- if (drm_debug & DRM_UT_DRIVER)
+ if (drm_debug_enabled(DRM_UT_DRIVER))
pr_debug("[drm:%s] dc=%d, max_chunk=%zu, transfers:\n",
__func__, dc, max_chunk);
@@ -955,7 +955,7 @@ static int mipi_dbi_typec1_command(struct mipi_dbi *dbi, u8 *cmd,
int ret;
if (mipi_dbi_command_is_read(dbi, *cmd))
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
MIPI_DBI_DEBUG_COMMAND(*cmd, parameters, num);
@@ -1021,7 +1021,7 @@ static int mipi_dbi_typec3_command_read(struct mipi_dbi *dbi, u8 *cmd,
unsigned int i;
for (i = 0; i < len; i++)
- data[i] = (buf[i] << 1) | !!(buf[i + 1] & BIT(7));
+ data[i] = (buf[i] << 1) | (buf[i + 1] >> 7);
}
MIPI_DBI_DEBUG_COMMAND(*cmd, data, len);
@@ -1187,8 +1187,7 @@ static ssize_t mipi_dbi_debugfs_command_write(struct file *file,
struct mipi_dbi_dev *dbidev = m->private;
u8 val, cmd = 0, parameters[64];
char *buf, *pos, *token;
- unsigned int i;
- int ret, idx;
+ int i, ret, idx;
if (!drm_dev_enter(&dbidev->drm, &idx))
return -ENODEV;
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index 4581c5387372..2a6e34663146 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -174,7 +174,7 @@ static void drm_mm_interval_tree_add_node(struct drm_mm_node *hole_node,
node->__subtree_last = LAST(node);
- if (hole_node->allocated) {
+ if (drm_mm_node_allocated(hole_node)) {
rb = &hole_node->rb;
while (rb) {
parent = rb_entry(rb, struct drm_mm_node, rb);
@@ -424,9 +424,9 @@ int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
node->mm = mm;
+ __set_bit(DRM_MM_NODE_ALLOCATED_BIT, &node->flags);
list_add(&node->node_list, &hole->node_list);
drm_mm_interval_tree_add_node(hole, node);
- node->allocated = true;
node->hole_size = 0;
rm_hole(hole);
@@ -543,9 +543,9 @@ int drm_mm_insert_node_in_range(struct drm_mm * const mm,
node->color = color;
node->hole_size = 0;
+ __set_bit(DRM_MM_NODE_ALLOCATED_BIT, &node->flags);
list_add(&node->node_list, &hole->node_list);
drm_mm_interval_tree_add_node(hole, node);
- node->allocated = true;
rm_hole(hole);
if (adj_start > hole_start)
@@ -561,6 +561,11 @@ int drm_mm_insert_node_in_range(struct drm_mm * const mm,
}
EXPORT_SYMBOL(drm_mm_insert_node_in_range);
+static inline bool drm_mm_node_scanned_block(const struct drm_mm_node *node)
+{
+ return test_bit(DRM_MM_NODE_SCANNED_BIT, &node->flags);
+}
+
/**
* drm_mm_remove_node - Remove a memory node from the allocator.
* @node: drm_mm_node to remove
@@ -574,8 +579,8 @@ void drm_mm_remove_node(struct drm_mm_node *node)
struct drm_mm *mm = node->mm;
struct drm_mm_node *prev_node;
- DRM_MM_BUG_ON(!node->allocated);
- DRM_MM_BUG_ON(node->scanned_block);
+ DRM_MM_BUG_ON(!drm_mm_node_allocated(node));
+ DRM_MM_BUG_ON(drm_mm_node_scanned_block(node));
prev_node = list_prev_entry(node, node_list);
@@ -584,11 +589,12 @@ void drm_mm_remove_node(struct drm_mm_node *node)
drm_mm_interval_tree_remove(node, &mm->interval_tree);
list_del(&node->node_list);
- node->allocated = false;
if (drm_mm_hole_follows(prev_node))
rm_hole(prev_node);
add_hole(prev_node);
+
+ clear_bit_unlock(DRM_MM_NODE_ALLOCATED_BIT, &node->flags);
}
EXPORT_SYMBOL(drm_mm_remove_node);
@@ -605,10 +611,11 @@ void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
{
struct drm_mm *mm = old->mm;
- DRM_MM_BUG_ON(!old->allocated);
+ DRM_MM_BUG_ON(!drm_mm_node_allocated(old));
*new = *old;
+ __set_bit(DRM_MM_NODE_ALLOCATED_BIT, &new->flags);
list_replace(&old->node_list, &new->node_list);
rb_replace_node_cached(&old->rb, &new->rb, &mm->interval_tree);
@@ -622,8 +629,7 @@ void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
&mm->holes_addr);
}
- old->allocated = false;
- new->allocated = true;
+ clear_bit_unlock(DRM_MM_NODE_ALLOCATED_BIT, &old->flags);
}
EXPORT_SYMBOL(drm_mm_replace_node);
@@ -731,9 +737,9 @@ bool drm_mm_scan_add_block(struct drm_mm_scan *scan,
u64 adj_start, adj_end;
DRM_MM_BUG_ON(node->mm != mm);
- DRM_MM_BUG_ON(!node->allocated);
- DRM_MM_BUG_ON(node->scanned_block);
- node->scanned_block = true;
+ DRM_MM_BUG_ON(!drm_mm_node_allocated(node));
+ DRM_MM_BUG_ON(drm_mm_node_scanned_block(node));
+ __set_bit(DRM_MM_NODE_SCANNED_BIT, &node->flags);
mm->scan_active++;
/* Remove this block from the node_list so that we enlarge the hole
@@ -818,8 +824,8 @@ bool drm_mm_scan_remove_block(struct drm_mm_scan *scan,
struct drm_mm_node *prev_node;
DRM_MM_BUG_ON(node->mm != scan->mm);
- DRM_MM_BUG_ON(!node->scanned_block);
- node->scanned_block = false;
+ DRM_MM_BUG_ON(!drm_mm_node_scanned_block(node));
+ __clear_bit(DRM_MM_NODE_SCANNED_BIT, &node->flags);
DRM_MM_BUG_ON(!node->mm->scan_active);
node->mm->scan_active--;
@@ -917,7 +923,7 @@ void drm_mm_init(struct drm_mm *mm, u64 start, u64 size)
/* Clever trick to avoid a special case in the free hole tracking. */
INIT_LIST_HEAD(&mm->head_node.node_list);
- mm->head_node.allocated = false;
+ mm->head_node.flags = 0;
mm->head_node.mm = mm;
mm->head_node.start = start + size;
mm->head_node.size = -size;
diff --git a/drivers/gpu/drm/drm_mode_config.c b/drivers/gpu/drm/drm_mode_config.c
index 7bc03c3c154f..3b570a404933 100644
--- a/drivers/gpu/drm/drm_mode_config.c
+++ b/drivers/gpu/drm/drm_mode_config.c
@@ -428,8 +428,6 @@ EXPORT_SYMBOL(drm_mode_config_init);
* Note that since this /should/ happen single-threaded at driver/device
* teardown time, no locking is required. It's the driver's job to ensure that
* this guarantee actually holds true.
- *
- * FIXME: cleanup any dangling user buffer objects too
*/
void drm_mode_config_cleanup(struct drm_device *dev)
{
diff --git a/drivers/gpu/drm/drm_of.c b/drivers/gpu/drm/drm_of.c
index 43d89dd59c6b..0ca58803ba46 100644
--- a/drivers/gpu/drm/drm_of.c
+++ b/drivers/gpu/drm/drm_of.c
@@ -250,11 +250,6 @@ int drm_of_find_panel_or_bridge(const struct device_node *np,
if (!remote)
return -ENODEV;
- if (!of_device_is_available(remote)) {
- of_node_put(remote);
- return -ENODEV;
- }
-
if (panel) {
*panel = of_drm_find_panel(remote);
if (!IS_ERR(*panel))
diff --git a/drivers/gpu/drm/drm_panel.c b/drivers/gpu/drm/drm_panel.c
index 6b0bf42039cf..ed7985c0535a 100644
--- a/drivers/gpu/drm/drm_panel.c
+++ b/drivers/gpu/drm/drm_panel.c
@@ -44,13 +44,21 @@ static LIST_HEAD(panel_list);
/**
* drm_panel_init - initialize a panel
* @panel: DRM panel
+ * @dev: parent device of the panel
+ * @funcs: panel operations
+ * @connector_type: the connector type (DRM_MODE_CONNECTOR_*) corresponding to
+ * the panel interface
*
- * Sets up internal fields of the panel so that it can subsequently be added
- * to the registry.
+ * Initialize the panel structure for subsequent registration with
+ * drm_panel_add().
*/
-void drm_panel_init(struct drm_panel *panel)
+void drm_panel_init(struct drm_panel *panel, struct device *dev,
+ const struct drm_panel_funcs *funcs, int connector_type)
{
INIT_LIST_HEAD(&panel->list);
+ panel->dev = dev;
+ panel->funcs = funcs;
+ panel->connector_type = connector_type;
}
EXPORT_SYMBOL(drm_panel_init);
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 0a2316e0e812..0814211b0f3f 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -713,6 +713,15 @@ int drm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
struct file *fil;
int ret;
+ if (obj->funcs && obj->funcs->mmap) {
+ ret = obj->funcs->mmap(obj, vma);
+ if (ret)
+ return ret;
+ vma->vm_private_data = obj;
+ drm_gem_object_get(obj);
+ return 0;
+ }
+
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
fil = kzalloc(sizeof(*fil), GFP_KERNEL);
if (!priv || !fil) {
diff --git a/drivers/gpu/drm/drm_print.c b/drivers/gpu/drm/drm_print.c
index a17c8a14dba4..9a25d73c155c 100644
--- a/drivers/gpu/drm/drm_print.c
+++ b/drivers/gpu/drm/drm_print.c
@@ -28,6 +28,7 @@
#include <stdarg.h>
#include <linux/io.h>
+#include <linux/moduleparam.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
@@ -35,6 +36,24 @@
#include <drm/drm_drv.h>
#include <drm/drm_print.h>
+/*
+ * drm_debug: Enable debug output.
+ * Bitmask of DRM_UT_x. See include/drm/drm_print.h for details.
+ */
+unsigned int drm_debug;
+EXPORT_SYMBOL(drm_debug);
+
+MODULE_PARM_DESC(debug, "Enable debug output, where each bit enables a debug category.\n"
+"\t\tBit 0 (0x01) will enable CORE messages (drm core code)\n"
+"\t\tBit 1 (0x02) will enable DRIVER messages (drm controller code)\n"
+"\t\tBit 2 (0x04) will enable KMS messages (modesetting code)\n"
+"\t\tBit 3 (0x08) will enable PRIME messages (prime code)\n"
+"\t\tBit 4 (0x10) will enable ATOMIC messages (atomic code)\n"
+"\t\tBit 5 (0x20) will enable VBL messages (vblank code)\n"
+"\t\tBit 7 (0x80) will enable LEASE messages (leasing code)\n"
+"\t\tBit 8 (0x100) will enable DP messages (displayport code)");
+module_param_named(debug, drm_debug, int, 0600);
+
void __drm_puts_coredump(struct drm_printer *p, const char *str)
{
struct drm_print_iterator *iterator = p->arg;
@@ -147,6 +166,12 @@ void __drm_printfn_debug(struct drm_printer *p, struct va_format *vaf)
}
EXPORT_SYMBOL(__drm_printfn_debug);
+void __drm_printfn_err(struct drm_printer *p, struct va_format *vaf)
+{
+ pr_err("*ERROR* %s %pV", p->prefix, vaf);
+}
+EXPORT_SYMBOL(__drm_printfn_err);
+
/**
* drm_puts - print a const string to a &drm_printer stream
* @p: the &drm printer
@@ -179,6 +204,37 @@ void drm_printf(struct drm_printer *p, const char *f, ...)
}
EXPORT_SYMBOL(drm_printf);
+/**
+ * drm_print_bits - print bits to a &drm_printer stream
+ *
+ * Print bits (in flag fields for example) in human readable form.
+ *
+ * @p: the &drm_printer
+ * @value: field value.
+ * @bits: Array with bit names.
+ * @nbits: Size of bit names array.
+ */
+void drm_print_bits(struct drm_printer *p, unsigned long value,
+ const char * const bits[], unsigned int nbits)
+{
+ bool first = true;
+ unsigned int i;
+
+ if (WARN_ON_ONCE(nbits > BITS_PER_TYPE(value)))
+ nbits = BITS_PER_TYPE(value);
+
+ for_each_set_bit(i, &value, nbits) {
+ if (WARN_ON_ONCE(!bits[i]))
+ continue;
+ drm_printf(p, "%s%s", first ? "" : ",",
+ bits[i]);
+ first = false;
+ }
+ if (first)
+ drm_printf(p, "(none)");
+}
+EXPORT_SYMBOL(drm_print_bits);
+
void drm_dev_printk(const struct device *dev, const char *level,
const char *format, ...)
{
@@ -206,7 +262,7 @@ void drm_dev_dbg(const struct device *dev, unsigned int category,
struct va_format vaf;
va_list args;
- if (!(drm_debug & category))
+ if (!drm_debug_enabled(category))
return;
va_start(args, format);
@@ -229,7 +285,7 @@ void drm_dbg(unsigned int category, const char *format, ...)
struct va_format vaf;
va_list args;
- if (!(drm_debug & category))
+ if (!drm_debug_enabled(category))
return;
va_start(args, format);
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index ef2c468205a2..a7c87abe88d0 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -32,6 +32,7 @@
#include <linux/export.h>
#include <linux/moduleparam.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_client.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
@@ -92,7 +93,6 @@ drm_mode_validate_pipeline(struct drm_display_mode *mode,
struct drm_device *dev = connector->dev;
enum drm_mode_status ret = MODE_OK;
struct drm_encoder *encoder;
- int i;
/* Step 1: Validate against connector */
ret = drm_connector_mode_valid(connector, mode);
@@ -100,7 +100,7 @@ drm_mode_validate_pipeline(struct drm_display_mode *mode,
return ret;
/* Step 2: Validate against encoders and crtcs */
- drm_connector_for_each_possible_encoder(connector, encoder, i) {
+ drm_connector_for_each_possible_encoder(connector, encoder) {
struct drm_crtc *crtc;
ret = drm_encoder_mode_valid(encoder, mode);
diff --git a/drivers/gpu/drm/drm_simple_kms_helper.c b/drivers/gpu/drm/drm_simple_kms_helper.c
index b11910f14c46..15fb516ae2d8 100644
--- a/drivers/gpu/drm/drm_simple_kms_helper.c
+++ b/drivers/gpu/drm/drm_simple_kms_helper.c
@@ -8,6 +8,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
@@ -42,7 +43,7 @@ drm_simple_kms_crtc_mode_valid(struct drm_crtc *crtc,
/* Anything goes */
return MODE_OK;
- return pipe->funcs->mode_valid(crtc, mode);
+ return pipe->funcs->mode_valid(pipe, mode);
}
static int drm_simple_kms_crtc_check(struct drm_crtc *crtc,
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
index 4b5c7b0ed714..669c93fe2500 100644
--- a/drivers/gpu/drm/drm_syncobj.c
+++ b/drivers/gpu/drm/drm_syncobj.c
@@ -135,6 +135,7 @@
#include <drm/drm_gem.h>
#include <drm/drm_print.h>
#include <drm/drm_syncobj.h>
+#include <drm/drm_utils.h>
#include "drm_internal.h"
@@ -1279,7 +1280,7 @@ drm_syncobj_timeline_signal_ioctl(struct drm_device *dev, void *data,
if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE))
return -EOPNOTSUPP;
- if (args->pad != 0)
+ if (args->flags != 0)
return -EINVAL;
if (args->count_handles == 0)
@@ -1350,7 +1351,7 @@ int drm_syncobj_query_ioctl(struct drm_device *dev, void *data,
if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE))
return -EOPNOTSUPP;
- if (args->pad != 0)
+ if (args->flags & ~DRM_SYNCOBJ_QUERY_FLAGS_LAST_SUBMITTED)
return -EINVAL;
if (args->count_handles == 0)
@@ -1371,25 +1372,32 @@ int drm_syncobj_query_ioctl(struct drm_device *dev, void *data,
fence = drm_syncobj_fence_get(syncobjs[i]);
chain = to_dma_fence_chain(fence);
if (chain) {
- struct dma_fence *iter, *last_signaled = NULL;
-
- dma_fence_chain_for_each(iter, fence) {
- if (iter->context != fence->context) {
- dma_fence_put(iter);
- /* It is most likely that timeline has
- * unorder points. */
- break;
+ struct dma_fence *iter, *last_signaled =
+ dma_fence_get(fence);
+
+ if (args->flags &
+ DRM_SYNCOBJ_QUERY_FLAGS_LAST_SUBMITTED) {
+ point = fence->seqno;
+ } else {
+ dma_fence_chain_for_each(iter, fence) {
+ if (iter->context != fence->context) {
+ dma_fence_put(iter);
+ /* It is most likely that timeline has
+ * unorder points. */
+ break;
+ }
+ dma_fence_put(last_signaled);
+ last_signaled = dma_fence_get(iter);
}
- dma_fence_put(last_signaled);
- last_signaled = dma_fence_get(iter);
+ point = dma_fence_is_signaled(last_signaled) ?
+ last_signaled->seqno :
+ to_dma_fence_chain(last_signaled)->prev_seqno;
}
- point = dma_fence_is_signaled(last_signaled) ?
- last_signaled->seqno :
- to_dma_fence_chain(last_signaled)->prev_seqno;
dma_fence_put(last_signaled);
} else {
point = 0;
}
+ dma_fence_put(fence);
ret = copy_to_user(&points[i], &point, sizeof(uint64_t));
ret = ret ? -EFAULT : 0;
if (ret)
diff --git a/drivers/gpu/drm/drm_trace.h b/drivers/gpu/drm/drm_trace.h
index 471eb927474b..11c6dd577e8e 100644
--- a/drivers/gpu/drm/drm_trace.h
+++ b/drivers/gpu/drm/drm_trace.h
@@ -13,17 +13,23 @@ struct drm_file;
#define TRACE_INCLUDE_FILE drm_trace
TRACE_EVENT(drm_vblank_event,
- TP_PROTO(int crtc, unsigned int seq),
- TP_ARGS(crtc, seq),
+ TP_PROTO(int crtc, unsigned int seq, ktime_t time, bool high_prec),
+ TP_ARGS(crtc, seq, time, high_prec),
TP_STRUCT__entry(
__field(int, crtc)
__field(unsigned int, seq)
+ __field(ktime_t, time)
+ __field(bool, high_prec)
),
TP_fast_assign(
__entry->crtc = crtc;
__entry->seq = seq;
- ),
- TP_printk("crtc=%d, seq=%u", __entry->crtc, __entry->seq)
+ __entry->time = time;
+ __entry->high_prec = high_prec;
+ ),
+ TP_printk("crtc=%d, seq=%u, time=%lld, high-prec=%s",
+ __entry->crtc, __entry->seq, __entry->time,
+ __entry->high_prec ? "true" : "false")
);
TRACE_EVENT(drm_vblank_event_queued,
diff --git a/drivers/gpu/drm/drm_vblank.c b/drivers/gpu/drm/drm_vblank.c
index fd1fbc77871f..1659b13b178c 100644
--- a/drivers/gpu/drm/drm_vblank.c
+++ b/drivers/gpu/drm/drm_vblank.c
@@ -106,7 +106,7 @@ static void store_vblank(struct drm_device *dev, unsigned int pipe,
write_seqlock(&vblank->seqlock);
vblank->time = t_vblank;
- vblank->count += vblank_count_inc;
+ atomic64_add(vblank_count_inc, &vblank->count);
write_sequnlock(&vblank->seqlock);
}
@@ -272,7 +272,8 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe,
DRM_DEBUG_VBL("updating vblank count on crtc %u:"
" current=%llu, diff=%u, hw=%u hw_last=%u\n",
- pipe, vblank->count, diff, cur_vblank, vblank->last);
+ pipe, atomic64_read(&vblank->count), diff,
+ cur_vblank, vblank->last);
if (diff == 0) {
WARN_ON_ONCE(cur_vblank != vblank->last);
@@ -294,11 +295,23 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe,
static u64 drm_vblank_count(struct drm_device *dev, unsigned int pipe)
{
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
+ u64 count;
if (WARN_ON(pipe >= dev->num_crtcs))
return 0;
- return vblank->count;
+ count = atomic64_read(&vblank->count);
+
+ /*
+ * This read barrier corresponds to the implicit write barrier of the
+ * write seqlock in store_vblank(). Note that this is the only place
+ * where we need an explicit barrier, since all other access goes
+ * through drm_vblank_count_and_time(), which already has the required
+ * read barrier curtesy of the read seqlock.
+ */
+ smp_rmb();
+
+ return count;
}
/**
@@ -319,7 +332,7 @@ u64 drm_crtc_accurate_vblank_count(struct drm_crtc *crtc)
u64 vblank;
unsigned long flags;
- WARN_ONCE(drm_debug & DRM_UT_VBL && !dev->driver->get_vblank_timestamp,
+ WARN_ONCE(drm_debug_enabled(DRM_UT_VBL) && !dev->driver->get_vblank_timestamp,
"This function requires support for accurate vblank timestamps.");
spin_lock_irqsave(&dev->vblank_time_lock, flags);
@@ -693,7 +706,7 @@ bool drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
*/
*vblank_time = ktime_sub_ns(etime, delta_ns);
- if ((drm_debug & DRM_UT_VBL) == 0)
+ if (!drm_debug_enabled(DRM_UT_VBL))
return true;
ts_etime = ktime_to_timespec64(etime);
@@ -763,6 +776,14 @@ drm_get_last_vbltimestamp(struct drm_device *dev, unsigned int pipe,
* vblank interrupt (since it only reports the software vblank counter), see
* drm_crtc_accurate_vblank_count() for such use-cases.
*
+ * Note that for a given vblank counter value drm_crtc_handle_vblank()
+ * and drm_crtc_vblank_count() or drm_crtc_vblank_count_and_time()
+ * provide a barrier: Any writes done before calling
+ * drm_crtc_handle_vblank() will be visible to callers of the later
+ * functions, iff the vblank count is the same or a later one.
+ *
+ * See also &drm_vblank_crtc.count.
+ *
* Returns:
* The software vblank counter.
*/
@@ -800,7 +821,7 @@ static u64 drm_vblank_count_and_time(struct drm_device *dev, unsigned int pipe,
do {
seq = read_seqbegin(&vblank->seqlock);
- vblank_count = vblank->count;
+ vblank_count = atomic64_read(&vblank->count);
*vblanktime = vblank->time;
} while (read_seqretry(&vblank->seqlock, seq));
@@ -817,6 +838,14 @@ static u64 drm_vblank_count_and_time(struct drm_device *dev, unsigned int pipe,
* vblank events since the system was booted, including lost events due to
* modesetting activity. Returns corresponding system timestamp of the time
* of the vblank interval that corresponds to the current vblank counter value.
+ *
+ * Note that for a given vblank counter value drm_crtc_handle_vblank()
+ * and drm_crtc_vblank_count() or drm_crtc_vblank_count_and_time()
+ * provide a barrier: Any writes done before calling
+ * drm_crtc_handle_vblank() will be visible to callers of the later
+ * functions, iff the vblank count is the same or a later one.
+ *
+ * See also &drm_vblank_crtc.count.
*/
u64 drm_crtc_vblank_count_and_time(struct drm_crtc *crtc,
ktime_t *vblanktime)
@@ -1323,7 +1352,7 @@ void drm_vblank_restore(struct drm_device *dev, unsigned int pipe)
assert_spin_locked(&dev->vblank_time_lock);
vblank = &dev->vblank[pipe];
- WARN_ONCE((drm_debug & DRM_UT_VBL) && !vblank->framedur_ns,
+ WARN_ONCE(drm_debug_enabled(DRM_UT_VBL) && !vblank->framedur_ns,
"Cannot compute missed vblanks without frame duration\n");
framedur_ns = vblank->framedur_ns;
@@ -1581,7 +1610,7 @@ int drm_wait_vblank_ioctl(struct drm_device *dev, void *data,
unsigned int flags, pipe, high_pipe;
if (!dev->irq_enabled)
- return -EINVAL;
+ return -EOPNOTSUPP;
if (vblwait->request.type & _DRM_VBLANK_SIGNAL)
return -EINVAL;
@@ -1731,7 +1760,8 @@ static void drm_handle_vblank_events(struct drm_device *dev, unsigned int pipe)
send_vblank_event(dev, e, seq, now);
}
- trace_drm_vblank_event(pipe, seq);
+ trace_drm_vblank_event(pipe, seq, now,
+ dev->driver->get_vblank_timestamp != NULL);
}
/**
@@ -1806,6 +1836,14 @@ EXPORT_SYMBOL(drm_handle_vblank);
*
* This is the native KMS version of drm_handle_vblank().
*
+ * Note that for a given vblank counter value drm_crtc_handle_vblank()
+ * and drm_crtc_vblank_count() or drm_crtc_vblank_count_and_time()
+ * provide a barrier: Any writes done before calling
+ * drm_crtc_handle_vblank() will be visible to callers of the later
+ * functions, iff the vblank count is the same or a later one.
+ *
+ * See also &drm_vblank_crtc.count.
+ *
* Returns:
* True if the event was successfully handled, false on failure.
*/
@@ -1838,7 +1876,7 @@ int drm_crtc_get_sequence_ioctl(struct drm_device *dev, void *data,
return -EOPNOTSUPP;
if (!dev->irq_enabled)
- return -EINVAL;
+ return -EOPNOTSUPP;
crtc = drm_crtc_find(dev, file_priv, get_seq->crtc_id);
if (!crtc)
@@ -1896,7 +1934,7 @@ int drm_crtc_queue_sequence_ioctl(struct drm_device *dev, void *data,
return -EOPNOTSUPP;
if (!dev->irq_enabled)
- return -EINVAL;
+ return -EOPNOTSUPP;
crtc = drm_crtc_find(dev, file_priv, queue_seq->crtc_id);
if (!crtc)
diff --git a/drivers/gpu/drm/drm_vram_helper_common.c b/drivers/gpu/drm/drm_vram_helper_common.c
index e9c9f9a80ba3..2000d9b33fd5 100644
--- a/drivers/gpu/drm/drm_vram_helper_common.c
+++ b/drivers/gpu/drm/drm_vram_helper_common.c
@@ -7,9 +7,8 @@
*
* This library provides &struct drm_gem_vram_object (GEM VRAM), a GEM
* buffer object that is backed by video RAM. It can be used for
- * framebuffer devices with dedicated memory. The video RAM can be
- * managed with &struct drm_vram_mm (VRAM MM). Both data structures are
- * supposed to be used together, but can also be used individually.
+ * framebuffer devices with dedicated memory. The video RAM is managed
+ * by &struct drm_vram_mm (VRAM MM).
*
* With the GEM interface userspace applications create, manage and destroy
* graphics buffers, such as an on-screen framebuffer. GEM does not provide
@@ -50,8 +49,7 @@
* // setup device, vram base and size
* // ...
*
- * ret = drm_vram_helper_alloc_mm(dev, vram_base, vram_size,
- * &drm_gem_vram_mm_funcs);
+ * ret = drm_vram_helper_alloc_mm(dev, vram_base, vram_size);
* if (ret)
* return ret;
* return 0;
diff --git a/drivers/gpu/drm/drm_vram_mm_helper.c b/drivers/gpu/drm/drm_vram_mm_helper.c
deleted file mode 100644
index c911781d6728..000000000000
--- a/drivers/gpu/drm/drm_vram_mm_helper.c
+++ /dev/null
@@ -1,297 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-
-#include <drm/drm_device.h>
-#include <drm/drm_file.h>
-#include <drm/drm_vram_mm_helper.h>
-
-#include <drm/ttm/ttm_page_alloc.h>
-
-/**
- * DOC: overview
- *
- * The data structure &struct drm_vram_mm and its helpers implement a memory
- * manager for simple framebuffer devices with dedicated video memory. Buffer
- * objects are either placed in video RAM or evicted to system memory. These
- * helper functions work well with &struct drm_gem_vram_object.
- */
-
-/*
- * TTM TT
- */
-
-static void backend_func_destroy(struct ttm_tt *tt)
-{
- ttm_tt_fini(tt);
- kfree(tt);
-}
-
-static struct ttm_backend_func backend_func = {
- .destroy = backend_func_destroy
-};
-
-/*
- * TTM BO device
- */
-
-static struct ttm_tt *bo_driver_ttm_tt_create(struct ttm_buffer_object *bo,
- uint32_t page_flags)
-{
- struct ttm_tt *tt;
- int ret;
-
- tt = kzalloc(sizeof(*tt), GFP_KERNEL);
- if (!tt)
- return NULL;
-
- tt->func = &backend_func;
-
- ret = ttm_tt_init(tt, bo, page_flags);
- if (ret < 0)
- goto err_ttm_tt_init;
-
- return tt;
-
-err_ttm_tt_init:
- kfree(tt);
- return NULL;
-}
-
-static int bo_driver_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
- struct ttm_mem_type_manager *man)
-{
- switch (type) {
- case TTM_PL_SYSTEM:
- man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
- man->available_caching = TTM_PL_MASK_CACHING;
- man->default_caching = TTM_PL_FLAG_CACHED;
- break;
- case TTM_PL_VRAM:
- man->func = &ttm_bo_manager_func;
- man->flags = TTM_MEMTYPE_FLAG_FIXED |
- TTM_MEMTYPE_FLAG_MAPPABLE;
- man->available_caching = TTM_PL_FLAG_UNCACHED |
- TTM_PL_FLAG_WC;
- man->default_caching = TTM_PL_FLAG_WC;
- break;
- default:
- return -EINVAL;
- }
- return 0;
-}
-
-static void bo_driver_evict_flags(struct ttm_buffer_object *bo,
- struct ttm_placement *placement)
-{
- struct drm_vram_mm *vmm = drm_vram_mm_of_bdev(bo->bdev);
-
- if (vmm->funcs && vmm->funcs->evict_flags)
- vmm->funcs->evict_flags(bo, placement);
-}
-
-static int bo_driver_verify_access(struct ttm_buffer_object *bo,
- struct file *filp)
-{
- struct drm_vram_mm *vmm = drm_vram_mm_of_bdev(bo->bdev);
-
- if (!vmm->funcs || !vmm->funcs->verify_access)
- return 0;
- return vmm->funcs->verify_access(bo, filp);
-}
-
-static int bo_driver_io_mem_reserve(struct ttm_bo_device *bdev,
- struct ttm_mem_reg *mem)
-{
- struct ttm_mem_type_manager *man = bdev->man + mem->mem_type;
- struct drm_vram_mm *vmm = drm_vram_mm_of_bdev(bdev);
-
- if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
- return -EINVAL;
-
- mem->bus.addr = NULL;
- mem->bus.size = mem->num_pages << PAGE_SHIFT;
-
- switch (mem->mem_type) {
- case TTM_PL_SYSTEM: /* nothing to do */
- mem->bus.offset = 0;
- mem->bus.base = 0;
- mem->bus.is_iomem = false;
- break;
- case TTM_PL_VRAM:
- mem->bus.offset = mem->start << PAGE_SHIFT;
- mem->bus.base = vmm->vram_base;
- mem->bus.is_iomem = true;
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-static void bo_driver_io_mem_free(struct ttm_bo_device *bdev,
- struct ttm_mem_reg *mem)
-{ }
-
-static struct ttm_bo_driver bo_driver = {
- .ttm_tt_create = bo_driver_ttm_tt_create,
- .ttm_tt_populate = ttm_pool_populate,
- .ttm_tt_unpopulate = ttm_pool_unpopulate,
- .init_mem_type = bo_driver_init_mem_type,
- .eviction_valuable = ttm_bo_eviction_valuable,
- .evict_flags = bo_driver_evict_flags,
- .verify_access = bo_driver_verify_access,
- .io_mem_reserve = bo_driver_io_mem_reserve,
- .io_mem_free = bo_driver_io_mem_free,
-};
-
-/*
- * struct drm_vram_mm
- */
-
-/**
- * drm_vram_mm_init() - Initialize an instance of VRAM MM.
- * @vmm: the VRAM MM instance to initialize
- * @dev: the DRM device
- * @vram_base: the base address of the video memory
- * @vram_size: the size of the video memory in bytes
- * @funcs: callback functions for buffer objects
- *
- * Returns:
- * 0 on success, or
- * a negative error code otherwise.
- */
-int drm_vram_mm_init(struct drm_vram_mm *vmm, struct drm_device *dev,
- uint64_t vram_base, size_t vram_size,
- const struct drm_vram_mm_funcs *funcs)
-{
- int ret;
-
- vmm->vram_base = vram_base;
- vmm->vram_size = vram_size;
- vmm->funcs = funcs;
-
- ret = ttm_bo_device_init(&vmm->bdev, &bo_driver,
- dev->anon_inode->i_mapping,
- true);
- if (ret)
- return ret;
-
- ret = ttm_bo_init_mm(&vmm->bdev, TTM_PL_VRAM, vram_size >> PAGE_SHIFT);
- if (ret)
- return ret;
-
- return 0;
-}
-EXPORT_SYMBOL(drm_vram_mm_init);
-
-/**
- * drm_vram_mm_cleanup() - Cleans up an initialized instance of VRAM MM.
- * @vmm: the VRAM MM instance to clean up
- */
-void drm_vram_mm_cleanup(struct drm_vram_mm *vmm)
-{
- ttm_bo_device_release(&vmm->bdev);
-}
-EXPORT_SYMBOL(drm_vram_mm_cleanup);
-
-/**
- * drm_vram_mm_mmap() - Helper for implementing &struct file_operations.mmap()
- * @filp: the mapping's file structure
- * @vma: the mapping's memory area
- * @vmm: the VRAM MM instance
- *
- * Returns:
- * 0 on success, or
- * a negative error code otherwise.
- */
-int drm_vram_mm_mmap(struct file *filp, struct vm_area_struct *vma,
- struct drm_vram_mm *vmm)
-{
- return ttm_bo_mmap(filp, vma, &vmm->bdev);
-}
-EXPORT_SYMBOL(drm_vram_mm_mmap);
-
-/*
- * Helpers for integration with struct drm_device
- */
-
-/**
- * drm_vram_helper_alloc_mm - Allocates a device's instance of \
- &struct drm_vram_mm
- * @dev: the DRM device
- * @vram_base: the base address of the video memory
- * @vram_size: the size of the video memory in bytes
- * @funcs: callback functions for buffer objects
- *
- * Returns:
- * The new instance of &struct drm_vram_mm on success, or
- * an ERR_PTR()-encoded errno code otherwise.
- */
-struct drm_vram_mm *drm_vram_helper_alloc_mm(
- struct drm_device *dev, uint64_t vram_base, size_t vram_size,
- const struct drm_vram_mm_funcs *funcs)
-{
- int ret;
-
- if (WARN_ON(dev->vram_mm))
- return dev->vram_mm;
-
- dev->vram_mm = kzalloc(sizeof(*dev->vram_mm), GFP_KERNEL);
- if (!dev->vram_mm)
- return ERR_PTR(-ENOMEM);
-
- ret = drm_vram_mm_init(dev->vram_mm, dev, vram_base, vram_size, funcs);
- if (ret)
- goto err_kfree;
-
- return dev->vram_mm;
-
-err_kfree:
- kfree(dev->vram_mm);
- dev->vram_mm = NULL;
- return ERR_PTR(ret);
-}
-EXPORT_SYMBOL(drm_vram_helper_alloc_mm);
-
-/**
- * drm_vram_helper_release_mm - Releases a device's instance of \
- &struct drm_vram_mm
- * @dev: the DRM device
- */
-void drm_vram_helper_release_mm(struct drm_device *dev)
-{
- if (!dev->vram_mm)
- return;
-
- drm_vram_mm_cleanup(dev->vram_mm);
- kfree(dev->vram_mm);
- dev->vram_mm = NULL;
-}
-EXPORT_SYMBOL(drm_vram_helper_release_mm);
-
-/*
- * Helpers for &struct file_operations
- */
-
-/**
- * drm_vram_mm_file_operations_mmap() - \
- Implements &struct file_operations.mmap()
- * @filp: the mapping's file structure
- * @vma: the mapping's memory area
- *
- * Returns:
- * 0 on success, or
- * a negative error code otherwise.
- */
-int drm_vram_mm_file_operations_mmap(
- struct file *filp, struct vm_area_struct *vma)
-{
- struct drm_file *file_priv = filp->private_data;
- struct drm_device *dev = file_priv->minor->dev;
-
- if (WARN_ONCE(!dev->vram_mm, "VRAM MM not initialized"))
- return -EINVAL;
-
- return drm_vram_mm_mmap(filp, vma, dev->vram_mm);
-}
-EXPORT_SYMBOL(drm_vram_mm_file_operations_mmap);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
index 7e4e2959bf4f..32d9fac587f9 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
@@ -326,7 +326,7 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state,
lockdep_assert_held(&gpu->lock);
- if (drm_debug & DRM_UT_DRIVER)
+ if (drm_debug_enabled(DRM_UT_DRIVER))
etnaviv_buffer_dump(gpu, buffer, 0, 0x50);
link_target = etnaviv_cmdbuf_get_va(cmdbuf,
@@ -459,13 +459,13 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state,
etnaviv_cmdbuf_get_va(buffer, &gpu->mmu_context->cmdbuf_mapping)
+ buffer->user_size - 4);
- if (drm_debug & DRM_UT_DRIVER)
+ if (drm_debug_enabled(DRM_UT_DRIVER))
pr_info("stream link to 0x%08x @ 0x%08x %p\n",
return_target,
etnaviv_cmdbuf_get_va(cmdbuf, &gpu->mmu_context->cmdbuf_mapping),
cmdbuf->vaddr);
- if (drm_debug & DRM_UT_DRIVER) {
+ if (drm_debug_enabled(DRM_UT_DRIVER)) {
print_hex_dump(KERN_INFO, "cmd ", DUMP_PREFIX_OFFSET, 16, 4,
cmdbuf->vaddr, cmdbuf->size, 0);
@@ -484,6 +484,6 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state,
VIV_FE_LINK_HEADER_PREFETCH(link_dwords),
link_target);
- if (drm_debug & DRM_UT_DRIVER)
+ if (drm_debug_enabled(DRM_UT_DRIVER))
etnaviv_buffer_dump(gpu, buffer, 0, 0x50);
}
diff --git a/drivers/gpu/drm/exynos/exynos_dp.c b/drivers/gpu/drm/exynos/exynos_dp.c
index 3a0f0ba8c63a..1e6aa24bf45e 100644
--- a/drivers/gpu/drm/exynos/exynos_dp.c
+++ b/drivers/gpu/drm/exynos/exynos_dp.c
@@ -19,6 +19,7 @@
#include <drm/bridge/analogix_dp.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index 6926cee91b36..72726f2c7a9f 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -24,6 +24,7 @@
#include <video/videomode.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_panel.h>
diff --git a/drivers/gpu/drm/exynos/exynos_drm_mic.c b/drivers/gpu/drm/exynos/exynos_drm_mic.c
index b78e8c5ba553..f41d75923557 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_mic.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_mic.c
@@ -21,6 +21,7 @@
#include <video/of_videomode.h>
#include <video/videomode.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_encoder.h>
#include <drm/drm_print.h>
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index bc1565f1822a..48159d5d2214 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -34,6 +34,7 @@
#include <media/cec-notifier.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_edid.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
@@ -852,6 +853,10 @@ static enum drm_connector_status hdmi_detect(struct drm_connector *connector,
static void hdmi_connector_destroy(struct drm_connector *connector)
{
+ struct hdmi_context *hdata = connector_to_hdmi(connector);
+
+ cec_notifier_conn_unregister(hdata->notifier);
+
drm_connector_unregister(connector);
drm_connector_cleanup(connector);
}
@@ -935,6 +940,7 @@ static int hdmi_create_connector(struct drm_encoder *encoder)
{
struct hdmi_context *hdata = encoder_to_hdmi(encoder);
struct drm_connector *connector = &hdata->connector;
+ struct cec_connector_info conn_info;
int ret;
connector->interlace_allowed = true;
@@ -957,6 +963,15 @@ static int hdmi_create_connector(struct drm_encoder *encoder)
DRM_DEV_ERROR(hdata->dev, "Failed to attach bridge\n");
}
+ cec_fill_conn_info_from_drm(&conn_info, connector);
+
+ hdata->notifier = cec_notifier_conn_register(hdata->dev, NULL,
+ &conn_info);
+ if (!hdata->notifier) {
+ ret = -ENOMEM;
+ DRM_DEV_ERROR(hdata->dev, "Failed to allocate CEC notifier\n");
+ }
+
return ret;
}
@@ -1528,8 +1543,8 @@ static void hdmi_disable(struct drm_encoder *encoder)
*/
mutex_unlock(&hdata->mutex);
cancel_delayed_work(&hdata->hotplug_work);
- cec_notifier_set_phys_addr(hdata->notifier,
- CEC_PHYS_ADDR_INVALID);
+ if (hdata->notifier)
+ cec_notifier_phys_addr_invalidate(hdata->notifier);
return;
}
@@ -2006,12 +2021,6 @@ static int hdmi_probe(struct platform_device *pdev)
}
}
- hdata->notifier = cec_notifier_get(&pdev->dev);
- if (hdata->notifier == NULL) {
- ret = -ENOMEM;
- goto err_hdmiphy;
- }
-
pm_runtime_enable(dev);
audio_infoframe = &hdata->audio.infoframe;
@@ -2023,7 +2032,7 @@ static int hdmi_probe(struct platform_device *pdev)
ret = hdmi_register_audio_device(hdata);
if (ret)
- goto err_notifier_put;
+ goto err_rpm_disable;
ret = component_add(&pdev->dev, &hdmi_component_ops);
if (ret)
@@ -2034,8 +2043,7 @@ static int hdmi_probe(struct platform_device *pdev)
err_unregister_audio:
platform_device_unregister(hdata->audio.pdev);
-err_notifier_put:
- cec_notifier_put(hdata->notifier);
+err_rpm_disable:
pm_runtime_disable(dev);
err_hdmiphy:
@@ -2054,12 +2062,10 @@ static int hdmi_remove(struct platform_device *pdev)
struct hdmi_context *hdata = platform_get_drvdata(pdev);
cancel_delayed_work_sync(&hdata->hotplug_work);
- cec_notifier_set_phys_addr(hdata->notifier, CEC_PHYS_ADDR_INVALID);
component_del(&pdev->dev, &hdmi_component_ops);
platform_device_unregister(hdata->audio.pdev);
- cec_notifier_put(hdata->notifier);
pm_runtime_disable(&pdev->dev);
if (!IS_ERR(hdata->reg_hdmi_en))
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index 7b24338fad3c..6cfdb95fef2f 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -1069,9 +1069,9 @@ static bool mixer_mode_fixup(struct exynos_drm_crtc *crtc,
struct mixer_context *ctx = crtc->ctx;
int width = mode->hdisplay, height = mode->vdisplay, i;
- struct {
+ static const struct {
int hdisplay, vdisplay, htotal, vtotal, scan_val;
- } static const modes[] = {
+ } modes[] = {
{ 720, 480, 858, 525, MXR_CFG_SCAN_NTSC | MXR_CFG_SCAN_SD },
{ 720, 576, 864, 625, MXR_CFG_SCAN_PAL | MXR_CFG_SCAN_SD },
{ 1280, 720, 1650, 750, MXR_CFG_SCAN_HD_720 | MXR_CFG_SCAN_HD },
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
index a92fd6c70b09..82c972e9c024 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
@@ -9,6 +9,7 @@
#include <linux/of_graph.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
#include <drm/drm_probe_helper.h>
diff --git a/drivers/gpu/drm/gma500/cdv_intel_display.c b/drivers/gpu/drm/gma500/cdv_intel_display.c
index f56852a503e8..8b784947ed3b 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_display.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_display.c
@@ -405,6 +405,8 @@ static bool cdv_intel_find_dp_pll(const struct gma_limit_t *limit,
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
struct gma_clock_t clock;
+ memset(&clock, 0, sizeof(clock));
+
switch (refclk) {
case 27000:
if (target < 200000) {
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_output.c b/drivers/gpu/drm/gma500/mdfld_dsi_output.c
index 03023fa0fb6f..f350ac1ead18 100644
--- a/drivers/gpu/drm/gma500/mdfld_dsi_output.c
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_output.c
@@ -498,7 +498,7 @@ void mdfld_dsi_output_init(struct drm_device *dev,
return;
}
- /*create a new connetor*/
+ /*create a new connector*/
dsi_connector = kzalloc(sizeof(struct mdfld_dsi_connector), GFP_KERNEL);
if (!dsi_connector) {
DRM_ERROR("No memory");
diff --git a/drivers/gpu/drm/gma500/oaktrail_crtc.c b/drivers/gpu/drm/gma500/oaktrail_crtc.c
index 167c10767dd4..900e5499249d 100644
--- a/drivers/gpu/drm/gma500/oaktrail_crtc.c
+++ b/drivers/gpu/drm/gma500/oaktrail_crtc.c
@@ -129,6 +129,7 @@ static bool mrst_sdvo_find_best_pll(const struct gma_limit_t *limit,
s32 freq_error, min_error = 100000;
memset(best_clock, 0, sizeof(*best_clock));
+ memset(&clock, 0, sizeof(clock));
for (clock.m = limit->m.min; clock.m <= limit->m.max; clock.m++) {
for (clock.n = limit->n.min; clock.n <= limit->n.max;
@@ -185,6 +186,7 @@ static bool mrst_lvds_find_best_pll(const struct gma_limit_t *limit,
int err = target;
memset(best_clock, 0, sizeof(*best_clock));
+ memset(&clock, 0, sizeof(clock));
for (clock.m = limit->m.min; clock.m <= limit->m.max; clock.m++) {
for (clock.p1 = limit->p1.min; clock.p1 <= limit->p1.max;
diff --git a/drivers/gpu/drm/hisilicon/hibmc/Kconfig b/drivers/gpu/drm/hisilicon/hibmc/Kconfig
index 35a3c5f0c38c..dfc5aef62f7b 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/Kconfig
+++ b/drivers/gpu/drm/hisilicon/hibmc/Kconfig
@@ -4,7 +4,8 @@ config DRM_HISI_HIBMC
depends on DRM && PCI && MMU && ARM64
select DRM_KMS_HELPER
select DRM_VRAM_HELPER
-
+ select DRM_TTM
+ select DRM_TTM_HELPER
help
Choose this option if you have a Hisilicon Hibmc soc chipset.
If M is selected the module will be called hibmc-drm.
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c
index cc4c41748cfb..6527a97f68a3 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c
@@ -96,7 +96,6 @@ static void hibmc_plane_atomic_update(struct drm_plane *plane,
{
struct drm_plane_state *state = plane->state;
u32 reg;
- int ret;
s64 gpu_addr = 0;
unsigned int line_l;
struct hibmc_drm_private *priv = plane->dev->dev_private;
@@ -109,16 +108,9 @@ static void hibmc_plane_atomic_update(struct drm_plane *plane,
hibmc_fb = to_hibmc_framebuffer(state->fb);
gbo = drm_gem_vram_of_gem(hibmc_fb->obj);
- ret = drm_gem_vram_pin(gbo, DRM_GEM_VRAM_PL_FLAG_VRAM);
- if (ret) {
- DRM_ERROR("failed to pin bo: %d", ret);
- return;
- }
gpu_addr = drm_gem_vram_offset(gbo);
- if (gpu_addr < 0) {
- drm_gem_vram_unpin(gbo);
- return;
- }
+ if (WARN_ON_ONCE(gpu_addr < 0))
+ return; /* Bug: we didn't pin the BO to VRAM in prepare_fb. */
writel(gpu_addr, priv->mmio + HIBMC_CRT_FB_ADDRESS);
@@ -157,6 +149,8 @@ static struct drm_plane_funcs hibmc_plane_funcs = {
};
static const struct drm_plane_helper_funcs hibmc_plane_helper_funcs = {
+ .prepare_fb = drm_gem_vram_plane_helper_prepare_fb,
+ .cleanup_fb = drm_gem_vram_plane_helper_cleanup_fb,
.atomic_check = hibmc_plane_atomic_check,
.atomic_update = hibmc_plane_atomic_update,
};
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
index c103005b0a33..2fd4ca91a62d 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
@@ -22,15 +22,11 @@
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
-#include <drm/drm_vram_mm_helper.h>
#include "hibmc_drm_drv.h"
#include "hibmc_drm_regs.h"
-static const struct file_operations hibmc_fops = {
- .owner = THIS_MODULE,
- DRM_VRAM_MM_FILE_OPERATIONS
-};
+DEFINE_DRM_GEM_FOPS(hibmc_fops);
static irqreturn_t hibmc_drm_interrupt(int irq, void *arg)
{
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
index 9f6e473e6295..21b684eab5c9 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
@@ -17,7 +17,6 @@
#include <drm/drm_gem.h>
#include <drm/drm_gem_vram_helper.h>
#include <drm/drm_print.h>
-#include <drm/drm_vram_mm_helper.h>
#include "hibmc_drm_drv.h"
@@ -29,7 +28,7 @@ int hibmc_mm_init(struct hibmc_drm_private *hibmc)
vmm = drm_vram_helper_alloc_mm(dev,
pci_resource_start(dev->pdev, 0),
- hibmc->fb_size, &drm_gem_vram_mm_funcs);
+ hibmc->fb_size);
if (IS_ERR(vmm)) {
ret = PTR_ERR(vmm);
DRM_ERROR("Error initializing VRAM MM; %d\n", ret);
diff --git a/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c b/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
index 5bf8138941de..bdcf9c6ae9e9 100644
--- a/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
+++ b/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
@@ -18,6 +18,7 @@
#include <linux/platform_device.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_device.h>
#include <drm/drm_encoder_slave.h>
#include <drm/drm_mipi_dsi.h>
diff --git a/drivers/gpu/drm/i2c/sil164_drv.c b/drivers/gpu/drm/i2c/sil164_drv.c
index 8bcf0d199145..a839f78a4c8a 100644
--- a/drivers/gpu/drm/i2c/sil164_drv.c
+++ b/drivers/gpu/drm/i2c/sil164_drv.c
@@ -44,7 +44,7 @@ struct sil164_priv {
((struct sil164_priv *)to_encoder_slave(x)->slave_priv)
#define sil164_dbg(client, format, ...) do { \
- if (drm_debug & DRM_UT_KMS) \
+ if (drm_debug_enabled(DRM_UT_KMS)) \
dev_printk(KERN_DEBUG, &client->dev, \
"%s: " format, __func__, ## __VA_ARGS__); \
} while (0)
diff --git a/drivers/gpu/drm/i2c/tda9950.c b/drivers/gpu/drm/i2c/tda9950.c
index 8039fc0d83db..5b03fdd1eaa4 100644
--- a/drivers/gpu/drm/i2c/tda9950.c
+++ b/drivers/gpu/drm/i2c/tda9950.c
@@ -420,7 +420,8 @@ static int tda9950_probe(struct i2c_client *client,
priv->hdmi = glue->parent;
priv->adap = cec_allocate_adapter(&tda9950_cec_ops, priv, "tda9950",
- CEC_CAP_DEFAULTS,
+ CEC_CAP_DEFAULTS |
+ CEC_CAP_CONNECTOR_INFO,
CEC_MAX_LOG_ADDRS);
if (IS_ERR(priv->adap))
return PTR_ERR(priv->adap);
@@ -457,13 +458,14 @@ static int tda9950_probe(struct i2c_client *client,
if (ret < 0)
return ret;
- priv->notify = cec_notifier_get(priv->hdmi);
+ priv->notify = cec_notifier_cec_adap_register(priv->hdmi, NULL,
+ priv->adap);
if (!priv->notify)
return -ENOMEM;
ret = cec_register_adapter(priv->adap, priv->hdmi);
if (ret < 0) {
- cec_notifier_put(priv->notify);
+ cec_notifier_cec_adap_unregister(priv->notify, priv->adap);
return ret;
}
@@ -473,8 +475,6 @@ static int tda9950_probe(struct i2c_client *client,
*/
devm_remove_action(dev, tda9950_cec_del, priv);
- cec_register_cec_notifier(priv->adap, priv->notify);
-
return 0;
}
@@ -482,8 +482,8 @@ static int tda9950_remove(struct i2c_client *client)
{
struct tda9950_priv *priv = i2c_get_clientdata(client);
+ cec_notifier_cec_adap_unregister(priv->notify, priv->adap);
cec_unregister_adapter(priv->adap);
- cec_notifier_put(priv->notify);
return 0;
}
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index 84c6d4c91c65..a63790d32d75 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -14,6 +14,7 @@
#include <sound/hdmi-codec.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_edid.h>
#include <drm/drm_of.h>
#include <drm/drm_print.h>
@@ -805,8 +806,8 @@ static irqreturn_t tda998x_irq_thread(int irq, void *data)
tda998x_edid_delay_start(priv);
} else {
schedule_work(&priv->detect_work);
- cec_notifier_set_phys_addr(priv->cec_notify,
- CEC_PHYS_ADDR_INVALID);
+ cec_notifier_phys_addr_invalidate(
+ priv->cec_notify);
}
handled = true;
@@ -1790,8 +1791,7 @@ static void tda998x_destroy(struct device *dev)
i2c_unregister_device(priv->cec);
- if (priv->cec_notify)
- cec_notifier_put(priv->cec_notify);
+ cec_notifier_conn_unregister(priv->cec_notify);
}
static int tda998x_create(struct device *dev)
@@ -1916,7 +1916,7 @@ static int tda998x_create(struct device *dev)
cec_write(priv, REG_CEC_RXSHPDINTENA, CEC_RXSHPDLEV_HPD);
}
- priv->cec_notify = cec_notifier_get(dev);
+ priv->cec_notify = cec_notifier_conn_register(dev, NULL, NULL);
if (!priv->cec_notify) {
ret = -ENOMEM;
goto fail;
diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
index 2a77823b8e9a..e66c38332df4 100644
--- a/drivers/gpu/drm/i810/i810_dma.c
+++ b/drivers/gpu/drm/i810/i810_dma.c
@@ -728,7 +728,7 @@ static void i810_dma_dispatch_vertex(struct drm_device *dev,
if (nbox > I810_NR_SAREA_CLIPRECTS)
nbox = I810_NR_SAREA_CLIPRECTS;
- if (used > 4 * 1024)
+ if (used < 0 || used > 4 * 1024)
used = 0;
if (sarea_priv->dirty)
@@ -1048,7 +1048,7 @@ static void i810_dma_dispatch_mc(struct drm_device *dev, struct drm_buf *buf, in
if (u != I810_BUF_CLIENT)
DRM_DEBUG("MC found buffer that isn't mine!\n");
- if (used > 4 * 1024)
+ if (used < 0 || used > 4 * 1024)
used = 0;
sarea_priv->dirty = 0x7f;
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index 0d21402945ab..ba9595960bbe 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -76,7 +76,7 @@ config DRM_I915_CAPTURE_ERROR
This option enables capturing the GPU state when a hang is detected.
This information is vital for triaging hangs and assists in debugging.
Please report any hang to
- https://bugs.freedesktop.org/enter_bug.cgi?product=DRI
+ https://bugs.freedesktop.org/enter_bug.cgi?product=DRI
for triaging.
If in doubt, say "Y".
@@ -105,11 +105,11 @@ config DRM_I915_USERPTR
If in doubt, say "Y".
config DRM_I915_GVT
- bool "Enable Intel GVT-g graphics virtualization host support"
- depends on DRM_I915
- depends on 64BIT
- default n
- help
+ bool "Enable Intel GVT-g graphics virtualization host support"
+ depends on DRM_I915
+ depends on 64BIT
+ default n
+ help
Choose this option if you want to enable Intel GVT-g graphics
virtualization technology host support with integrated graphics.
With GVT-g, it's possible to have one integrated graphics
@@ -148,3 +148,9 @@ menu "drm/i915 Profile Guided Optimisation"
depends on DRM_I915
source "drivers/gpu/drm/i915/Kconfig.profile"
endmenu
+
+menu "drm/i915 Unstable Evolution"
+ visible if EXPERT && STAGING && BROKEN
+ depends on DRM_I915
+ source "drivers/gpu/drm/i915/Kconfig.unstable"
+endmenu
diff --git a/drivers/gpu/drm/i915/Kconfig.debug b/drivers/gpu/drm/i915/Kconfig.debug
index 1400fce39c58..438040ff0179 100644
--- a/drivers/gpu/drm/i915/Kconfig.debug
+++ b/drivers/gpu/drm/i915/Kconfig.debug
@@ -1,33 +1,32 @@
# SPDX-License-Identifier: GPL-2.0-only
config DRM_I915_WERROR
- bool "Force GCC to throw an error instead of a warning when compiling"
- # As this may inadvertently break the build, only allow the user
- # to shoot oneself in the foot iff they aim really hard
- depends on EXPERT
- # We use the dependency on !COMPILE_TEST to not be enabled in
- # allmodconfig or allyesconfig configurations
- depends on !COMPILE_TEST
- select HEADER_TEST
- default n
- help
- Add -Werror to the build flags for (and only for) i915.ko.
- Do not enable this unless you are writing code for the i915.ko module.
-
- Recommended for driver developers only.
-
- If in doubt, say "N".
+ bool "Force GCC to throw an error instead of a warning when compiling"
+ # As this may inadvertently break the build, only allow the user
+ # to shoot oneself in the foot iff they aim really hard
+ depends on EXPERT
+ # We use the dependency on !COMPILE_TEST to not be enabled in
+ # allmodconfig or allyesconfig configurations
+ depends on !COMPILE_TEST
+ default n
+ help
+ Add -Werror to the build flags for (and only for) i915.ko.
+ Do not enable this unless you are writing code for the i915.ko module.
+
+ Recommended for driver developers only.
+
+ If in doubt, say "N".
config DRM_I915_DEBUG
- bool "Enable additional driver debugging"
- depends on DRM_I915
- select DEBUG_FS
- select PREEMPT_COUNT
- select I2C_CHARDEV
- select STACKDEPOT
- select DRM_DP_AUX_CHARDEV
- select X86_MSR # used by igt/pm_rpm
- select DRM_VGEM # used by igt/prime_vgem (dmabuf interop checks)
- select DRM_DEBUG_MM if DRM=y
+ bool "Enable additional driver debugging"
+ depends on DRM_I915
+ select DEBUG_FS
+ select PREEMPT_COUNT
+ select I2C_CHARDEV
+ select STACKDEPOT
+ select DRM_DP_AUX_CHARDEV
+ select X86_MSR # used by igt/pm_rpm
+ select DRM_VGEM # used by igt/prime_vgem (dmabuf interop checks)
+ select DRM_DEBUG_MM if DRM=y
select DRM_DEBUG_SELFTEST
select DMABUF_SELFTESTS
select SW_SYNC # signaling validation framework (igt/syncobj*)
@@ -35,14 +34,14 @@ config DRM_I915_DEBUG
select DRM_I915_SELFTEST
select DRM_I915_DEBUG_RUNTIME_PM
select DRM_I915_DEBUG_MMIO
- default n
- help
- Choose this option to turn on extra driver debugging that may affect
- performance but will catch some internal issues.
+ default n
+ help
+ Choose this option to turn on extra driver debugging that may affect
+ performance but will catch some internal issues.
- Recommended for driver developers only.
+ Recommended for driver developers only.
- If in doubt, say "N".
+ If in doubt, say "N".
config DRM_I915_DEBUG_MMIO
bool "Always insert extra checks around mmio access by default"
@@ -58,16 +57,16 @@ config DRM_I915_DEBUG_MMIO
If in doubt, say "N".
config DRM_I915_DEBUG_GEM
- bool "Insert extra checks into the GEM internals"
- default n
- depends on DRM_I915_WERROR
- help
- Enable extra sanity checks (including BUGs) along the GEM driver
- paths that may slow the system down and if hit hang the machine.
+ bool "Insert extra checks into the GEM internals"
+ default n
+ depends on DRM_I915_WERROR
+ help
+ Enable extra sanity checks (including BUGs) along the GEM driver
+ paths that may slow the system down and if hit hang the machine.
- Recommended for driver developers only.
+ Recommended for driver developers only.
- If in doubt, say "N".
+ If in doubt, say "N".
config DRM_I915_ERRLOG_GEM
bool "Insert extra logging (very verbose) for common GEM errors"
@@ -110,41 +109,41 @@ config DRM_I915_TRACE_GTT
If in doubt, say "N".
config DRM_I915_SW_FENCE_DEBUG_OBJECTS
- bool "Enable additional driver debugging for fence objects"
- depends on DRM_I915
- select DEBUG_OBJECTS
- default n
- help
- Choose this option to turn on extra driver debugging that may affect
- performance but will catch some internal issues.
+ bool "Enable additional driver debugging for fence objects"
+ depends on DRM_I915
+ select DEBUG_OBJECTS
+ default n
+ help
+ Choose this option to turn on extra driver debugging that may affect
+ performance but will catch some internal issues.
- Recommended for driver developers only.
+ Recommended for driver developers only.
- If in doubt, say "N".
+ If in doubt, say "N".
config DRM_I915_SW_FENCE_CHECK_DAG
- bool "Enable additional driver debugging for detecting dependency cycles"
- depends on DRM_I915
- default n
- help
- Choose this option to turn on extra driver debugging that may affect
- performance but will catch some internal issues.
+ bool "Enable additional driver debugging for detecting dependency cycles"
+ depends on DRM_I915
+ default n
+ help
+ Choose this option to turn on extra driver debugging that may affect
+ performance but will catch some internal issues.
- Recommended for driver developers only.
+ Recommended for driver developers only.
- If in doubt, say "N".
+ If in doubt, say "N".
config DRM_I915_DEBUG_GUC
- bool "Enable additional driver debugging for GuC"
- depends on DRM_I915
- default n
- help
- Choose this option to turn on extra driver debugging that may affect
- performance but will help resolve GuC related issues.
+ bool "Enable additional driver debugging for GuC"
+ depends on DRM_I915
+ default n
+ help
+ Choose this option to turn on extra driver debugging that may affect
+ performance but will help resolve GuC related issues.
- Recommended for driver developers only.
+ Recommended for driver developers only.
- If in doubt, say "N".
+ If in doubt, say "N".
config DRM_I915_SELFTEST
bool "Enable selftests upon driver load"
@@ -177,15 +176,15 @@ config DRM_I915_SELFTEST_BROKEN
If in doubt, say "N".
config DRM_I915_LOW_LEVEL_TRACEPOINTS
- bool "Enable low level request tracing events"
- depends on DRM_I915
- default n
- help
- Choose this option to turn on low level request tracing events.
- This provides the ability to precisely monitor engine utilisation
- and also analyze the request dependency resolving timeline.
-
- If in doubt, say "N".
+ bool "Enable low level request tracing events"
+ depends on DRM_I915
+ default n
+ help
+ Choose this option to turn on low level request tracing events.
+ This provides the ability to precisely monitor engine utilisation
+ and also analyze the request dependency resolving timeline.
+
+ If in doubt, say "N".
config DRM_I915_DEBUG_VBLANK_EVADE
bool "Enable extra debug warnings for vblank evasion"
diff --git a/drivers/gpu/drm/i915/Kconfig.profile b/drivers/gpu/drm/i915/Kconfig.profile
index 48df8889a88a..1799537a3228 100644
--- a/drivers/gpu/drm/i915/Kconfig.profile
+++ b/drivers/gpu/drm/i915/Kconfig.profile
@@ -12,6 +12,29 @@ config DRM_I915_USERFAULT_AUTOSUSPEND
May be 0 to disable the extra delay and solely use the device level
runtime pm autosuspend delay tunable.
+config DRM_I915_HEARTBEAT_INTERVAL
+ int "Interval between heartbeat pulses (ms)"
+ default 2500 # milliseconds
+ help
+ The driver sends a periodic heartbeat down all active engines to
+ check the health of the GPU and undertake regular house-keeping of
+ internal driver state.
+
+ May be 0 to disable heartbeats and therefore disable automatic GPU
+ hang detection.
+
+config DRM_I915_PREEMPT_TIMEOUT
+ int "Preempt timeout (ms, jiffy granularity)"
+ default 100 # milliseconds
+ help
+ How long to wait (in milliseconds) for a preemption event to occur
+ when submitting a new context via execlists. If the current context
+ does not hit an arbitration point and yield to HW before the timer
+ expires, the HW will be reset to allow the more important context
+ to execute.
+
+ May be 0 to disable the timeout.
+
config DRM_I915_SPIN_REQUEST
int "Busywait for request completion (us)"
default 5 # microseconds
@@ -25,3 +48,29 @@ config DRM_I915_SPIN_REQUEST
May be 0 to disable the initial spin. In practice, we estimate
the cost of enabling the interrupt (if currently disabled) to be
a few microseconds.
+
+config DRM_I915_STOP_TIMEOUT
+ int "How long to wait for an engine to quiesce gracefully before reset (ms)"
+ default 100 # milliseconds
+ help
+ By stopping submission and sleeping for a short time before resetting
+ the GPU, we allow the innocent contexts also on the system to quiesce.
+ It is then less likely for a hanging context to cause collateral
+ damage as the system is reset in order to recover. The corollary is
+ that the reset itself may take longer and so be more disruptive to
+ interactive or low latency workloads.
+
+config DRM_I915_TIMESLICE_DURATION
+ int "Scheduling quantum for userspace batches (ms, jiffy granularity)"
+ default 1 # milliseconds
+ help
+ When two user batches of equal priority are executing, we will
+ alternate execution of each batch to ensure forward progress of
+ all users. This is necessary in some cases where there may be
+ an implicit dependency between those batches that requires
+ concurrent execution in order for them to proceed, e.g. they
+ interact with each other via userspace semaphores. Each context
+ is scheduled for execution for the timeslice duration, before
+ switching to the next context.
+
+ May be 0 to disable timeslicing.
diff --git a/drivers/gpu/drm/i915/Kconfig.unstable b/drivers/gpu/drm/i915/Kconfig.unstable
new file mode 100644
index 000000000000..0c2276155c2b
--- /dev/null
+++ b/drivers/gpu/drm/i915/Kconfig.unstable
@@ -0,0 +1,29 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config DRM_I915_UNSTABLE
+ bool "Enable unstable API for early prototype development"
+ depends on EXPERT
+ depends on STAGING
+ depends on BROKEN # should never be enabled by distros!
+ # We use the dependency on !COMPILE_TEST to not be enabled in
+ # allmodconfig or allyesconfig configurations
+ depends on !COMPILE_TEST
+ default n
+ help
+ Enable prototype uAPI under general discussion before they are
+ finalized. Such prototypes may be withdrawn or substantially
+ changed before release. They are only enabled here so that a wide
+ number of interested parties (userspace driver developers) can
+ verify that the uAPI meet their expectations. These uAPI should
+ never be used in production.
+
+ Recommended for driver developers _only_.
+
+ If in the slightest bit of doubt, say "N".
+
+config DRM_I915_UNSTABLE_FAKE_LMEM
+ bool "Enable the experimental fake lmem"
+ depends on DRM_I915_UNSTABLE
+ default n
+ help
+ Convert some system memory into a fake local memory region for
+ testing.
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 2587ea834f06..90dcf09f52cc 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -46,10 +46,12 @@ i915-y += i915_drv.o \
i915_pci.o \
i915_scatterlist.o \
i915_suspend.o \
+ i915_switcheroo.o \
i915_sysfs.o \
i915_utils.o \
intel_csr.o \
intel_device_info.o \
+ intel_memory_region.o \
intel_pch.o \
intel_pm.o \
intel_runtime_pm.o \
@@ -76,19 +78,24 @@ gt-y += \
gt/intel_breadcrumbs.o \
gt/intel_context.o \
gt/intel_engine_cs.o \
- gt/intel_engine_pool.o \
+ gt/intel_engine_heartbeat.o \
gt/intel_engine_pm.o \
+ gt/intel_engine_pool.o \
gt/intel_engine_user.o \
gt/intel_gt.o \
gt/intel_gt_irq.o \
gt/intel_gt_pm.o \
gt/intel_gt_pm_irq.o \
- gt/intel_hangcheck.o \
+ gt/intel_gt_requests.o \
+ gt/intel_llc.o \
gt/intel_lrc.o \
+ gt/intel_mocs.o \
+ gt/intel_rc6.o \
gt/intel_renderstate.o \
gt/intel_reset.o \
- gt/intel_ringbuffer.o \
- gt/intel_mocs.o \
+ gt/intel_ring.o \
+ gt/intel_ring_submission.o \
+ gt/intel_rps.o \
gt/intel_sseu.o \
gt/intel_timeline.o \
gt/intel_workarounds.o
@@ -114,10 +121,12 @@ gem-y += \
gem/i915_gem_internal.o \
gem/i915_gem_object.o \
gem/i915_gem_object_blt.o \
+ gem/i915_gem_lmem.o \
gem/i915_gem_mman.o \
gem/i915_gem_pages.o \
gem/i915_gem_phys.o \
gem/i915_gem_pm.o \
+ gem/i915_gem_region.o \
gem/i915_gem_shmem.o \
gem/i915_gem_shrinker.o \
gem/i915_gem_stolen.o \
@@ -141,6 +150,7 @@ i915-y += \
i915_scheduler.o \
i915_trace_points.o \
i915_vma.o \
+ intel_region_lmem.o \
intel_wopcm.o
# general-purpose microcontroller (GuC) support
@@ -172,6 +182,7 @@ i915-y += \
display/intel_display_power.o \
display/intel_dpio_phy.o \
display/intel_dpll_mgr.o \
+ display/intel_dsb.o \
display/intel_fbc.o \
display/intel_fifo_underrun.o \
display/intel_frontbuffer.o \
@@ -182,7 +193,8 @@ i915-y += \
display/intel_psr.o \
display/intel_quirks.o \
display/intel_sprite.o \
- display/intel_tc.o
+ display/intel_tc.o \
+ display/intel_vga.o
i915-$(CONFIG_ACPI) += \
display/intel_acpi.o \
display/intel_opregion.o
@@ -235,7 +247,8 @@ i915-y += \
oa/i915_oa_cflgt2.o \
oa/i915_oa_cflgt3.o \
oa/i915_oa_cnl.o \
- oa/i915_oa_icl.o
+ oa/i915_oa_icl.o \
+ oa/i915_oa_tgl.o
i915-y += i915_perf.o
# Post-mortem debug and GPU hang state capture
diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
index 6e398c33a524..325df29b0447 100644
--- a/drivers/gpu/drm/i915/display/icl_dsi.c
+++ b/drivers/gpu/drm/i915/display/icl_dsi.c
@@ -1584,7 +1584,7 @@ void icl_dsi_init(struct drm_i915_private *dev_priv)
encoder->get_hw_state = gen11_dsi_get_hw_state;
encoder->type = INTEL_OUTPUT_DSI;
encoder->cloneable = 0;
- encoder->crtc_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C);
+ encoder->pipe_mask = ~0;
encoder->power_domain = POWER_DOMAIN_PORT_DSI;
encoder->get_power_domains = gen11_dsi_get_power_domains;
diff --git a/drivers/gpu/drm/i915/display/intel_atomic.c b/drivers/gpu/drm/i915/display/intel_atomic.c
index 7cb2257bbb93..c2875b10adf9 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic.c
@@ -199,7 +199,6 @@ intel_crtc_duplicate_state(struct drm_crtc *crtc)
crtc_state->disable_cxsr = false;
crtc_state->update_wm_pre = false;
crtc_state->update_wm_post = false;
- crtc_state->fb_changed = false;
crtc_state->fifo_changed = false;
crtc_state->preload_luts = false;
crtc_state->wm.need_postvbl_update = false;
@@ -265,10 +264,13 @@ static void intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_sta
*/
mode = PS_SCALER_MODE_NORMAL;
} else {
+ struct intel_plane *linked =
+ plane_state->planar_linked_plane;
+
mode = PS_SCALER_MODE_PLANAR;
- if (plane_state->linked_plane)
- mode |= PS_PLANE_Y_SEL(plane_state->linked_plane->id);
+ if (linked)
+ mode |= PS_PLANE_Y_SEL(linked->id);
}
} else if (INTEL_GEN(dev_priv) > 9 || IS_GEMINILAKE(dev_priv)) {
mode = PS_SCALER_MODE_NORMAL;
@@ -372,6 +374,15 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
*/
if (!plane) {
struct drm_plane_state *state;
+
+ /*
+ * GLK+ scalers don't have a HQ mode so it
+ * isn't necessary to change between HQ and dyn mode
+ * on those platforms.
+ */
+ if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+ continue;
+
plane = drm_plane_from_index(&dev_priv->drm, i);
state = drm_atomic_get_plane_state(drm_state, plane);
if (IS_ERR(state)) {
@@ -379,13 +390,6 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
plane->base.id);
return PTR_ERR(state);
}
-
- /*
- * the plane is added after plane checks are run,
- * but since this plane is unchanged just do the
- * minimum required validation.
- */
- crtc_state->base.planes_changed = true;
}
intel_plane = to_intel_plane(plane);
@@ -426,6 +430,13 @@ void intel_atomic_state_clear(struct drm_atomic_state *s)
struct intel_atomic_state *state = to_intel_atomic_state(s);
drm_atomic_state_default_clear(&state->base);
state->dpll_set = state->modeset = false;
+ state->global_state_changed = false;
+ state->active_pipes = 0;
+ memset(&state->min_cdclk, 0, sizeof(state->min_cdclk));
+ memset(&state->min_voltage_level, 0, sizeof(state->min_voltage_level));
+ memset(&state->cdclk.logical, 0, sizeof(state->cdclk.logical));
+ memset(&state->cdclk.actual, 0, sizeof(state->cdclk.actual));
+ state->cdclk.pipe = INVALID_PIPE;
}
struct intel_crtc_state *
@@ -439,3 +450,40 @@ intel_atomic_get_crtc_state(struct drm_atomic_state *state,
return to_intel_crtc_state(crtc_state);
}
+
+int intel_atomic_lock_global_state(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_crtc *crtc;
+
+ state->global_state_changed = true;
+
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ int ret;
+
+ ret = drm_modeset_lock(&crtc->base.mutex,
+ state->base.acquire_ctx);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+int intel_atomic_serialize_global_state(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_crtc *crtc;
+
+ state->global_state_changed = true;
+
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ struct intel_crtc_state *crtc_state;
+
+ crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_atomic.h b/drivers/gpu/drm/i915/display/intel_atomic.h
index 58065d3161a3..49d5cb1b9e0a 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic.h
+++ b/drivers/gpu/drm/i915/display/intel_atomic.h
@@ -16,6 +16,7 @@ struct drm_crtc_state;
struct drm_device;
struct drm_i915_private;
struct drm_property;
+struct intel_atomic_state;
struct intel_crtc;
struct intel_crtc_state;
@@ -46,4 +47,8 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
struct intel_crtc *intel_crtc,
struct intel_crtc_state *crtc_state);
+int intel_atomic_lock_global_state(struct intel_atomic_state *state);
+
+int intel_atomic_serialize_global_state(struct intel_atomic_state *state);
+
#endif /* __INTEL_ATOMIC_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
index d1fcdf206da4..98f557a9f8ee 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
@@ -138,18 +138,58 @@ unsigned int intel_plane_data_rate(const struct intel_crtc_state *crtc_state,
return cpp * crtc_state->pixel_rate;
}
+bool intel_plane_calc_min_cdclk(struct intel_atomic_state *state,
+ struct intel_plane *plane)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ const struct intel_plane_state *plane_state =
+ intel_atomic_get_new_plane_state(state, plane);
+ struct intel_crtc *crtc = to_intel_crtc(plane_state->base.crtc);
+ struct intel_crtc_state *crtc_state;
+
+ if (!plane_state->base.visible || !plane->min_cdclk)
+ return false;
+
+ crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
+
+ crtc_state->min_cdclk[plane->id] =
+ plane->min_cdclk(crtc_state, plane_state);
+
+ /*
+ * Does the cdclk need to be bumbed up?
+ *
+ * Note: we obviously need to be called before the new
+ * cdclk frequency is calculated so state->cdclk.logical
+ * hasn't been populated yet. Hence we look at the old
+ * cdclk state under dev_priv->cdclk.logical. This is
+ * safe as long we hold at least one crtc mutex (which
+ * must be true since we have crtc_state).
+ */
+ if (crtc_state->min_cdclk[plane->id] > dev_priv->cdclk.logical.cdclk) {
+ DRM_DEBUG_KMS("[PLANE:%d:%s] min_cdclk (%d kHz) > logical cdclk (%d kHz)\n",
+ plane->base.base.id, plane->base.name,
+ crtc_state->min_cdclk[plane->id],
+ dev_priv->cdclk.logical.cdclk);
+ return true;
+ }
+
+ return false;
+}
+
int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_state,
struct intel_crtc_state *new_crtc_state,
const struct intel_plane_state *old_plane_state,
struct intel_plane_state *new_plane_state)
{
struct intel_plane *plane = to_intel_plane(new_plane_state->base.plane);
+ const struct drm_framebuffer *fb = new_plane_state->base.fb;
int ret;
new_crtc_state->active_planes &= ~BIT(plane->id);
new_crtc_state->nv12_planes &= ~BIT(plane->id);
new_crtc_state->c8_planes &= ~BIT(plane->id);
new_crtc_state->data_rate[plane->id] = 0;
+ new_crtc_state->min_cdclk[plane->id] = 0;
new_plane_state->base.visible = false;
if (!new_plane_state->base.crtc && !old_plane_state->base.crtc)
@@ -164,11 +204,11 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_
new_crtc_state->active_planes |= BIT(plane->id);
if (new_plane_state->base.visible &&
- is_planar_yuv_format(new_plane_state->base.fb->format->format))
+ drm_format_info_is_yuv_semiplanar(fb->format))
new_crtc_state->nv12_planes |= BIT(plane->id);
if (new_plane_state->base.visible &&
- new_plane_state->base.fb->format->format == DRM_FORMAT_C8)
+ fb->format->format == DRM_FORMAT_C8)
new_crtc_state->c8_planes |= BIT(plane->id);
if (new_plane_state->base.visible || old_plane_state->base.visible)
@@ -194,14 +234,11 @@ get_crtc_from_states(const struct intel_plane_state *old_plane_state,
return NULL;
}
-static int intel_plane_atomic_check(struct drm_plane *_plane,
- struct drm_plane_state *_new_plane_state)
+int intel_plane_atomic_check(struct intel_atomic_state *state,
+ struct intel_plane *plane)
{
- struct intel_plane *plane = to_intel_plane(_plane);
- struct intel_atomic_state *state =
- to_intel_atomic_state(_new_plane_state->state);
struct intel_plane_state *new_plane_state =
- to_intel_plane_state(_new_plane_state);
+ intel_atomic_get_new_plane_state(state, plane);
const struct intel_plane_state *old_plane_state =
intel_atomic_get_old_plane_state(state, plane);
struct intel_crtc *crtc =
@@ -320,9 +357,9 @@ void skl_update_planes_on_crtc(struct intel_atomic_state *state,
if (new_plane_state->base.visible) {
intel_update_plane(plane, new_crtc_state, new_plane_state);
- } else if (new_plane_state->slave) {
+ } else if (new_plane_state->planar_slave) {
struct intel_plane *master =
- new_plane_state->linked_plane;
+ new_plane_state->planar_linked_plane;
/*
* We update the slave plane from this function because
@@ -368,5 +405,4 @@ void i9xx_update_planes_on_crtc(struct intel_atomic_state *state,
const struct drm_plane_helper_funcs intel_plane_helper_funcs = {
.prepare_fb = intel_prepare_plane_fb,
.cleanup_fb = intel_cleanup_plane_fb,
- .atomic_check = intel_plane_atomic_check,
};
diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.h b/drivers/gpu/drm/i915/display/intel_atomic_plane.h
index cb7ef4f9eafd..e61e9a82aadf 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic_plane.h
+++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.h
@@ -41,9 +41,13 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_
struct intel_crtc_state *crtc_state,
const struct intel_plane_state *old_plane_state,
struct intel_plane_state *intel_state);
+int intel_plane_atomic_check(struct intel_atomic_state *state,
+ struct intel_plane *plane);
int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
struct intel_crtc_state *crtc_state,
const struct intel_plane_state *old_plane_state,
struct intel_plane_state *plane_state);
+bool intel_plane_calc_min_cdclk(struct intel_atomic_state *state,
+ struct intel_plane *plane);
#endif /* __INTEL_ATOMIC_PLANE_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c
index ddcccf4408c3..85e6b2bbb34f 100644
--- a/drivers/gpu/drm/i915/display/intel_audio.c
+++ b/drivers/gpu/drm/i915/display/intel_audio.c
@@ -28,6 +28,7 @@
#include <drm/i915_component.h>
#include "i915_drv.h"
+#include "intel_atomic.h"
#include "intel_audio.h"
#include "intel_display_types.h"
#include "intel_lpe_audio.h"
@@ -560,8 +561,9 @@ static void ilk_audio_codec_disable(struct intel_encoder *encoder,
u32 tmp, eldv;
i915_reg_t aud_config, aud_cntrl_st2;
- DRM_DEBUG_KMS("Disable audio codec on port %c, pipe %c\n",
- port_name(port), pipe_name(pipe));
+ DRM_DEBUG_KMS("Disable audio codec on [ENCODER:%d:%s], pipe %c\n",
+ encoder->base.base.id, encoder->base.name,
+ pipe_name(pipe));
if (WARN_ON(port == PORT_A))
return;
@@ -609,8 +611,9 @@ static void ilk_audio_codec_enable(struct intel_encoder *encoder,
int len, i;
i915_reg_t hdmiw_hdmiedid, aud_config, aud_cntl_st, aud_cntrl_st2;
- DRM_DEBUG_KMS("Enable audio codec on port %c, pipe %c, %u bytes ELD\n",
- port_name(port), pipe_name(pipe), drm_eld_size(eld));
+ DRM_DEBUG_KMS("Enable audio codec on [ENCODER:%d:%s], pipe %c, %u bytes ELD\n",
+ encoder->base.base.id, encoder->base.name,
+ pipe_name(pipe), drm_eld_size(eld));
if (WARN_ON(port == PORT_A))
return;
@@ -816,13 +819,8 @@ retry:
to_intel_atomic_state(state)->cdclk.force_min_cdclk =
enable ? 2 * 96000 : 0;
- /*
- * Protects dev_priv->cdclk.force_min_cdclk
- * Need to lock this here in case we have no active pipes
- * and thus wouldn't lock it during the commit otherwise.
- */
- ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex,
- &ctx);
+ /* Protects dev_priv->cdclk.force_min_cdclk */
+ ret = intel_atomic_lock_global_state(to_intel_atomic_state(state));
if (!ret)
ret = drm_atomic_commit(state);
@@ -850,11 +848,23 @@ static unsigned long i915_audio_component_get_power(struct device *kdev)
ret = intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
- /* Force CDCLK to 2*BCLK as long as we need audio to be powered. */
- if (dev_priv->audio_power_refcount++ == 0)
- if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
+ if (dev_priv->audio_power_refcount++ == 0) {
+ if (IS_TIGERLAKE(dev_priv) || IS_ICELAKE(dev_priv)) {
+ I915_WRITE(AUD_FREQ_CNTRL, dev_priv->audio_freq_cntrl);
+ DRM_DEBUG_KMS("restored AUD_FREQ_CNTRL to 0x%x\n",
+ dev_priv->audio_freq_cntrl);
+ }
+
+ /* Force CDCLK to 2*BCLK as long as we need audio powered. */
+ if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
glk_force_audio_cdclk(dev_priv, true);
+ if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+ I915_WRITE(AUD_PIN_BUF_CTL,
+ (I915_READ(AUD_PIN_BUF_CTL) |
+ AUD_PIN_BUF_ENABLE));
+ }
+
return ret;
}
@@ -865,7 +875,7 @@ static void i915_audio_component_put_power(struct device *kdev,
/* Stop forcing CDCLK to 2*BCLK if no need for audio to be powered. */
if (--dev_priv->audio_power_refcount == 0)
- if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
+ if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
glk_force_audio_cdclk(dev_priv, false);
intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO, cookie);
@@ -1114,6 +1124,12 @@ static void i915_audio_component_init(struct drm_i915_private *dev_priv)
return;
}
+ if (IS_TIGERLAKE(dev_priv) || IS_ICELAKE(dev_priv)) {
+ dev_priv->audio_freq_cntrl = I915_READ(AUD_FREQ_CNTRL);
+ DRM_DEBUG_KMS("init value of AUD_FREQ_CNTRL of 0x%x\n",
+ dev_priv->audio_freq_cntrl);
+ }
+
dev_priv->audio_component_registered = true;
}
diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
index 3250c1b8dcca..63c1bd4c2954 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.c
+++ b/drivers/gpu/drm/i915/display/intel_bios.c
@@ -1399,6 +1399,7 @@ static enum port dvo_port_to_port(u8 dvo_port)
[PORT_D] = { DVO_PORT_HDMID, DVO_PORT_DPD, -1},
[PORT_E] = { DVO_PORT_CRT, DVO_PORT_HDMIE, DVO_PORT_DPE},
[PORT_F] = { DVO_PORT_HDMIF, DVO_PORT_DPF, -1},
+ [PORT_G] = { DVO_PORT_HDMIG, DVO_PORT_DPG, -1},
};
enum port port;
int i;
@@ -1625,7 +1626,7 @@ parse_general_definitions(struct drm_i915_private *dev_priv,
expected_size = 37;
} else if (bdb->version <= 215) {
expected_size = 38;
- } else if (bdb->version <= 216) {
+ } else if (bdb->version <= 229) {
expected_size = 39;
} else {
expected_size = sizeof(*child);
@@ -1843,7 +1844,7 @@ void intel_bios_init(struct drm_i915_private *dev_priv)
const struct bdb_header *bdb;
u8 __iomem *bios = NULL;
- if (!HAS_DISPLAY(dev_priv)) {
+ if (!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv)) {
DRM_DEBUG_KMS("Skipping VBT init due to disabled display.\n");
return;
}
@@ -2258,6 +2259,9 @@ enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *dev_priv,
case DP_AUX_F:
aux_ch = AUX_CH_F;
break;
+ case DP_AUX_G:
+ aux_ch = AUX_CH_G;
+ break;
default:
MISSING_CASE(info->alternate_aux_channel);
aux_ch = AUX_CH_A;
diff --git a/drivers/gpu/drm/i915/display/intel_bios.h b/drivers/gpu/drm/i915/display/intel_bios.h
index 4969189e620f..98f064828a57 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.h
+++ b/drivers/gpu/drm/i915/display/intel_bios.h
@@ -1,5 +1,5 @@
/*
- * Copyright © 2016 Intel Corporation
+ * Copyright © 2016-2019 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -35,6 +35,7 @@
#include <drm/i915_drm.h>
struct drm_i915_private;
+enum port;
enum intel_backlight_type {
INTEL_BACKLIGHT_PMIC,
diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c
index 688858ebe4d0..22e83f857de8 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.c
+++ b/drivers/gpu/drm/i915/display/intel_bw.c
@@ -35,28 +35,54 @@ static int icl_pcode_read_mem_global_info(struct drm_i915_private *dev_priv,
if (ret)
return ret;
- switch (val & 0xf) {
- case 0:
- qi->dram_type = INTEL_DRAM_DDR4;
- break;
- case 1:
- qi->dram_type = INTEL_DRAM_DDR3;
- break;
- case 2:
- qi->dram_type = INTEL_DRAM_LPDDR3;
- break;
- case 3:
- qi->dram_type = INTEL_DRAM_LPDDR3;
- break;
- default:
- MISSING_CASE(val & 0xf);
- break;
+ if (IS_GEN(dev_priv, 12)) {
+ switch (val & 0xf) {
+ case 0:
+ qi->dram_type = INTEL_DRAM_DDR4;
+ break;
+ case 3:
+ qi->dram_type = INTEL_DRAM_LPDDR4;
+ break;
+ case 4:
+ qi->dram_type = INTEL_DRAM_DDR3;
+ break;
+ case 5:
+ qi->dram_type = INTEL_DRAM_LPDDR3;
+ break;
+ default:
+ MISSING_CASE(val & 0xf);
+ break;
+ }
+ } else if (IS_GEN(dev_priv, 11)) {
+ switch (val & 0xf) {
+ case 0:
+ qi->dram_type = INTEL_DRAM_DDR4;
+ break;
+ case 1:
+ qi->dram_type = INTEL_DRAM_DDR3;
+ break;
+ case 2:
+ qi->dram_type = INTEL_DRAM_LPDDR3;
+ break;
+ case 3:
+ qi->dram_type = INTEL_DRAM_LPDDR4;
+ break;
+ default:
+ MISSING_CASE(val & 0xf);
+ break;
+ }
+ } else {
+ MISSING_CASE(INTEL_GEN(dev_priv));
+ qi->dram_type = INTEL_DRAM_LPDDR3; /* Conservative default */
}
qi->num_channels = (val & 0xf0) >> 4;
qi->num_points = (val & 0xf00) >> 8;
- qi->t_bl = qi->dram_type == INTEL_DRAM_DDR4 ? 4 : 8;
+ if (IS_GEN(dev_priv, 12))
+ qi->t_bl = qi->dram_type == INTEL_DRAM_DDR4 ? 4 : 16;
+ else if (IS_GEN(dev_priv, 11))
+ qi->t_bl = qi->dram_type == INTEL_DRAM_DDR4 ? 4 : 8;
return 0;
}
@@ -132,20 +158,25 @@ static int icl_sagv_max_dclk(const struct intel_qgv_info *qi)
}
struct intel_sa_info {
- u8 deburst, mpagesize, deprogbwlimit, displayrtids;
+ u16 displayrtids;
+ u8 deburst, deprogbwlimit;
};
static const struct intel_sa_info icl_sa_info = {
.deburst = 8,
- .mpagesize = 16,
.deprogbwlimit = 25, /* GB/s */
.displayrtids = 128,
};
-static int icl_get_bw_info(struct drm_i915_private *dev_priv)
+static const struct intel_sa_info tgl_sa_info = {
+ .deburst = 16,
+ .deprogbwlimit = 34, /* GB/s */
+ .displayrtids = 256,
+};
+
+static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel_sa_info *sa)
{
struct intel_qgv_info qi = {};
- const struct intel_sa_info *sa = &icl_sa_info;
bool is_y_tile = true; /* assume y tile may be used */
int num_channels;
int deinterleave;
@@ -233,14 +264,16 @@ static unsigned int icl_max_bw(struct drm_i915_private *dev_priv,
void intel_bw_init_hw(struct drm_i915_private *dev_priv)
{
- if (IS_GEN(dev_priv, 11))
- icl_get_bw_info(dev_priv);
+ if (IS_GEN(dev_priv, 12))
+ icl_get_bw_info(dev_priv, &tgl_sa_info);
+ else if (IS_GEN(dev_priv, 11))
+ icl_get_bw_info(dev_priv, &icl_sa_info);
}
static unsigned int intel_max_data_rate(struct drm_i915_private *dev_priv,
int num_planes)
{
- if (IS_GEN(dev_priv, 11))
+ if (INTEL_GEN(dev_priv) >= 11)
/*
* FIXME with SAGV disabled maybe we can assume
* point 1 will always be used? Seems to match
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c
index d0bc42e5039c..0caef2592a7e 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.c
@@ -21,6 +21,7 @@
* DEALINGS IN THE SOFTWARE.
*/
+#include "intel_atomic.h"
#include "intel_cdclk.h"
#include "intel_display_types.h"
#include "intel_sideband.h"
@@ -1161,28 +1162,88 @@ static void skl_uninit_cdclk(struct drm_i915_private *dev_priv)
skl_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE);
}
-static int bxt_calc_cdclk(int min_cdclk)
-{
- if (min_cdclk > 576000)
- return 624000;
- else if (min_cdclk > 384000)
- return 576000;
- else if (min_cdclk > 288000)
- return 384000;
- else if (min_cdclk > 144000)
- return 288000;
- else
- return 144000;
+static const struct intel_cdclk_vals bxt_cdclk_table[] = {
+ { .refclk = 19200, .cdclk = 144000, .divider = 8, .ratio = 60 },
+ { .refclk = 19200, .cdclk = 288000, .divider = 4, .ratio = 60 },
+ { .refclk = 19200, .cdclk = 384000, .divider = 3, .ratio = 60 },
+ { .refclk = 19200, .cdclk = 576000, .divider = 2, .ratio = 60 },
+ { .refclk = 19200, .cdclk = 624000, .divider = 2, .ratio = 65 },
+ {}
+};
+
+static const struct intel_cdclk_vals glk_cdclk_table[] = {
+ { .refclk = 19200, .cdclk = 79200, .divider = 8, .ratio = 33 },
+ { .refclk = 19200, .cdclk = 158400, .divider = 4, .ratio = 33 },
+ { .refclk = 19200, .cdclk = 316800, .divider = 2, .ratio = 33 },
+ {}
+};
+
+static const struct intel_cdclk_vals cnl_cdclk_table[] = {
+ { .refclk = 19200, .cdclk = 168000, .divider = 4, .ratio = 35 },
+ { .refclk = 19200, .cdclk = 336000, .divider = 2, .ratio = 35 },
+ { .refclk = 19200, .cdclk = 528000, .divider = 2, .ratio = 55 },
+
+ { .refclk = 24000, .cdclk = 168000, .divider = 4, .ratio = 28 },
+ { .refclk = 24000, .cdclk = 336000, .divider = 2, .ratio = 28 },
+ { .refclk = 24000, .cdclk = 528000, .divider = 2, .ratio = 44 },
+ {}
+};
+
+static const struct intel_cdclk_vals icl_cdclk_table[] = {
+ { .refclk = 19200, .cdclk = 172800, .divider = 2, .ratio = 18 },
+ { .refclk = 19200, .cdclk = 192000, .divider = 2, .ratio = 20 },
+ { .refclk = 19200, .cdclk = 307200, .divider = 2, .ratio = 32 },
+ { .refclk = 19200, .cdclk = 326400, .divider = 4, .ratio = 68 },
+ { .refclk = 19200, .cdclk = 556800, .divider = 2, .ratio = 58 },
+ { .refclk = 19200, .cdclk = 652800, .divider = 2, .ratio = 68 },
+
+ { .refclk = 24000, .cdclk = 180000, .divider = 2, .ratio = 15 },
+ { .refclk = 24000, .cdclk = 192000, .divider = 2, .ratio = 16 },
+ { .refclk = 24000, .cdclk = 312000, .divider = 2, .ratio = 26 },
+ { .refclk = 24000, .cdclk = 324000, .divider = 4, .ratio = 54 },
+ { .refclk = 24000, .cdclk = 552000, .divider = 2, .ratio = 46 },
+ { .refclk = 24000, .cdclk = 648000, .divider = 2, .ratio = 54 },
+
+ { .refclk = 38400, .cdclk = 172800, .divider = 2, .ratio = 9 },
+ { .refclk = 38400, .cdclk = 192000, .divider = 2, .ratio = 10 },
+ { .refclk = 38400, .cdclk = 307200, .divider = 2, .ratio = 16 },
+ { .refclk = 38400, .cdclk = 326400, .divider = 4, .ratio = 34 },
+ { .refclk = 38400, .cdclk = 556800, .divider = 2, .ratio = 29 },
+ { .refclk = 38400, .cdclk = 652800, .divider = 2, .ratio = 34 },
+ {}
+};
+
+static int bxt_calc_cdclk(struct drm_i915_private *dev_priv, int min_cdclk)
+{
+ const struct intel_cdclk_vals *table = dev_priv->cdclk.table;
+ int i;
+
+ for (i = 0; table[i].refclk; i++)
+ if (table[i].refclk == dev_priv->cdclk.hw.ref &&
+ table[i].cdclk >= min_cdclk)
+ return table[i].cdclk;
+
+ WARN(1, "Cannot satisfy minimum cdclk %d with refclk %u\n",
+ min_cdclk, dev_priv->cdclk.hw.ref);
+ return 0;
}
-static int glk_calc_cdclk(int min_cdclk)
+static int bxt_calc_cdclk_pll_vco(struct drm_i915_private *dev_priv, int cdclk)
{
- if (min_cdclk > 158400)
- return 316800;
- else if (min_cdclk > 79200)
- return 158400;
- else
- return 79200;
+ const struct intel_cdclk_vals *table = dev_priv->cdclk.table;
+ int i;
+
+ if (cdclk == dev_priv->cdclk.hw.bypass)
+ return 0;
+
+ for (i = 0; table[i].refclk; i++)
+ if (table[i].refclk == dev_priv->cdclk.hw.ref &&
+ table[i].cdclk == cdclk)
+ return dev_priv->cdclk.hw.ref * table[i].ratio;
+
+ WARN(1, "cdclk %d not valid for refclk %u\n",
+ cdclk, dev_priv->cdclk.hw.ref);
+ return 0;
}
static u8 bxt_calc_voltage_level(int cdclk)
@@ -1190,69 +1251,99 @@ static u8 bxt_calc_voltage_level(int cdclk)
return DIV_ROUND_UP(cdclk, 25000);
}
-static int bxt_de_pll_vco(struct drm_i915_private *dev_priv, int cdclk)
+static u8 cnl_calc_voltage_level(int cdclk)
{
- int ratio;
-
- if (cdclk == dev_priv->cdclk.hw.bypass)
+ if (cdclk > 336000)
+ return 2;
+ else if (cdclk > 168000)
+ return 1;
+ else
return 0;
+}
- switch (cdclk) {
- default:
- MISSING_CASE(cdclk);
- /* fall through */
- case 144000:
- case 288000:
- case 384000:
- case 576000:
- ratio = 60;
- break;
- case 624000:
- ratio = 65;
- break;
- }
+static u8 icl_calc_voltage_level(int cdclk)
+{
+ if (cdclk > 556800)
+ return 2;
+ else if (cdclk > 312000)
+ return 1;
+ else
+ return 0;
+}
- return dev_priv->cdclk.hw.ref * ratio;
+static u8 ehl_calc_voltage_level(int cdclk)
+{
+ if (cdclk > 312000)
+ return 2;
+ else if (cdclk > 180000)
+ return 1;
+ else
+ return 0;
}
-static int glk_de_pll_vco(struct drm_i915_private *dev_priv, int cdclk)
+static void cnl_readout_refclk(struct drm_i915_private *dev_priv,
+ struct intel_cdclk_state *cdclk_state)
{
- int ratio;
+ if (I915_READ(SKL_DSSM) & CNL_DSSM_CDCLK_PLL_REFCLK_24MHz)
+ cdclk_state->ref = 24000;
+ else
+ cdclk_state->ref = 19200;
+}
- if (cdclk == dev_priv->cdclk.hw.bypass)
- return 0;
+static void icl_readout_refclk(struct drm_i915_private *dev_priv,
+ struct intel_cdclk_state *cdclk_state)
+{
+ u32 dssm = I915_READ(SKL_DSSM) & ICL_DSSM_CDCLK_PLL_REFCLK_MASK;
- switch (cdclk) {
+ switch (dssm) {
default:
- MISSING_CASE(cdclk);
+ MISSING_CASE(dssm);
/* fall through */
- case 79200:
- case 158400:
- case 316800:
- ratio = 33;
+ case ICL_DSSM_CDCLK_PLL_REFCLK_24MHz:
+ cdclk_state->ref = 24000;
+ break;
+ case ICL_DSSM_CDCLK_PLL_REFCLK_19_2MHz:
+ cdclk_state->ref = 19200;
+ break;
+ case ICL_DSSM_CDCLK_PLL_REFCLK_38_4MHz:
+ cdclk_state->ref = 38400;
break;
}
-
- return dev_priv->cdclk.hw.ref * ratio;
}
-static void bxt_de_pll_update(struct drm_i915_private *dev_priv,
- struct intel_cdclk_state *cdclk_state)
+static void bxt_de_pll_readout(struct drm_i915_private *dev_priv,
+ struct intel_cdclk_state *cdclk_state)
{
- u32 val;
+ u32 val, ratio;
- cdclk_state->ref = 19200;
- cdclk_state->vco = 0;
+ if (INTEL_GEN(dev_priv) >= 11)
+ icl_readout_refclk(dev_priv, cdclk_state);
+ else if (IS_CANNONLAKE(dev_priv))
+ cnl_readout_refclk(dev_priv, cdclk_state);
+ else
+ cdclk_state->ref = 19200;
val = I915_READ(BXT_DE_PLL_ENABLE);
- if ((val & BXT_DE_PLL_PLL_ENABLE) == 0)
+ if ((val & BXT_DE_PLL_PLL_ENABLE) == 0 ||
+ (val & BXT_DE_PLL_LOCK) == 0) {
+ /*
+ * CDCLK PLL is disabled, the VCO/ratio doesn't matter, but
+ * setting it to zero is a way to signal that.
+ */
+ cdclk_state->vco = 0;
return;
+ }
- if (WARN_ON((val & BXT_DE_PLL_LOCK) == 0))
- return;
+ /*
+ * CNL+ have the ratio directly in the PLL enable register, gen9lp had
+ * it in a separate PLL control register.
+ */
+ if (INTEL_GEN(dev_priv) >= 10)
+ ratio = val & CNL_CDCLK_PLL_RATIO_MASK;
+ else
+ ratio = I915_READ(BXT_DE_PLL_CTL) & BXT_DE_PLL_RATIO_MASK;
- val = I915_READ(BXT_DE_PLL_CTL);
- cdclk_state->vco = (val & BXT_DE_PLL_RATIO_MASK) * cdclk_state->ref;
+ cdclk_state->vco = ratio * cdclk_state->ref;
}
static void bxt_get_cdclk(struct drm_i915_private *dev_priv,
@@ -1261,12 +1352,19 @@ static void bxt_get_cdclk(struct drm_i915_private *dev_priv,
u32 divider;
int div;
- bxt_de_pll_update(dev_priv, cdclk_state);
+ bxt_de_pll_readout(dev_priv, cdclk_state);
- cdclk_state->cdclk = cdclk_state->bypass = cdclk_state->ref;
+ if (INTEL_GEN(dev_priv) >= 12)
+ cdclk_state->bypass = cdclk_state->ref / 2;
+ else if (INTEL_GEN(dev_priv) >= 11)
+ cdclk_state->bypass = 50000;
+ else
+ cdclk_state->bypass = cdclk_state->ref;
- if (cdclk_state->vco == 0)
+ if (cdclk_state->vco == 0) {
+ cdclk_state->cdclk = cdclk_state->bypass;
goto out;
+ }
divider = I915_READ(CDCLK_CTL) & BXT_CDCLK_CD2X_DIV_SEL_MASK;
@@ -1275,13 +1373,15 @@ static void bxt_get_cdclk(struct drm_i915_private *dev_priv,
div = 2;
break;
case BXT_CDCLK_CD2X_DIV_SEL_1_5:
- WARN(IS_GEMINILAKE(dev_priv), "Unsupported divider\n");
+ WARN(IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10,
+ "Unsupported divider\n");
div = 3;
break;
case BXT_CDCLK_CD2X_DIV_SEL_2:
div = 4;
break;
case BXT_CDCLK_CD2X_DIV_SEL_4:
+ WARN(INTEL_GEN(dev_priv) >= 10, "Unsupported divider\n");
div = 8;
break;
default:
@@ -1297,7 +1397,7 @@ static void bxt_get_cdclk(struct drm_i915_private *dev_priv,
* at least what the CDCLK frequency requires.
*/
cdclk_state->voltage_level =
- bxt_calc_voltage_level(cdclk_state->cdclk);
+ dev_priv->display.calc_voltage_level(cdclk_state->cdclk);
}
static void bxt_de_pll_disable(struct drm_i915_private *dev_priv)
@@ -1332,259 +1432,6 @@ static void bxt_de_pll_enable(struct drm_i915_private *dev_priv, int vco)
dev_priv->cdclk.hw.vco = vco;
}
-static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
- const struct intel_cdclk_state *cdclk_state,
- enum pipe pipe)
-{
- int cdclk = cdclk_state->cdclk;
- int vco = cdclk_state->vco;
- u32 val, divider;
- int ret;
-
- /* cdclk = vco / 2 / div{1,1.5,2,4} */
- switch (DIV_ROUND_CLOSEST(vco, cdclk)) {
- default:
- WARN_ON(cdclk != dev_priv->cdclk.hw.bypass);
- WARN_ON(vco != 0);
- /* fall through */
- case 2:
- divider = BXT_CDCLK_CD2X_DIV_SEL_1;
- break;
- case 3:
- WARN(IS_GEMINILAKE(dev_priv), "Unsupported divider\n");
- divider = BXT_CDCLK_CD2X_DIV_SEL_1_5;
- break;
- case 4:
- divider = BXT_CDCLK_CD2X_DIV_SEL_2;
- break;
- case 8:
- divider = BXT_CDCLK_CD2X_DIV_SEL_4;
- break;
- }
-
- /*
- * Inform power controller of upcoming frequency change. BSpec
- * requires us to wait up to 150usec, but that leads to timeouts;
- * the 2ms used here is based on experiment.
- */
- ret = sandybridge_pcode_write_timeout(dev_priv,
- HSW_PCODE_DE_WRITE_FREQ_REQ,
- 0x80000000, 150, 2);
- if (ret) {
- DRM_ERROR("PCode CDCLK freq change notify failed (err %d, freq %d)\n",
- ret, cdclk);
- return;
- }
-
- if (dev_priv->cdclk.hw.vco != 0 &&
- dev_priv->cdclk.hw.vco != vco)
- bxt_de_pll_disable(dev_priv);
-
- if (dev_priv->cdclk.hw.vco != vco)
- bxt_de_pll_enable(dev_priv, vco);
-
- val = divider | skl_cdclk_decimal(cdclk);
- if (pipe == INVALID_PIPE)
- val |= BXT_CDCLK_CD2X_PIPE_NONE;
- else
- val |= BXT_CDCLK_CD2X_PIPE(pipe);
- /*
- * Disable SSA Precharge when CD clock frequency < 500 MHz,
- * enable otherwise.
- */
- if (cdclk >= 500000)
- val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
- I915_WRITE(CDCLK_CTL, val);
-
- if (pipe != INVALID_PIPE)
- intel_wait_for_vblank(dev_priv, pipe);
-
- /*
- * The timeout isn't specified, the 2ms used here is based on
- * experiment.
- * FIXME: Waiting for the request completion could be delayed until
- * the next PCODE request based on BSpec.
- */
- ret = sandybridge_pcode_write_timeout(dev_priv,
- HSW_PCODE_DE_WRITE_FREQ_REQ,
- cdclk_state->voltage_level, 150, 2);
- if (ret) {
- DRM_ERROR("PCode CDCLK freq set failed, (err %d, freq %d)\n",
- ret, cdclk);
- return;
- }
-
- intel_update_cdclk(dev_priv);
-}
-
-static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv)
-{
- u32 cdctl, expected;
-
- intel_update_cdclk(dev_priv);
- intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
-
- if (dev_priv->cdclk.hw.vco == 0 ||
- dev_priv->cdclk.hw.cdclk == dev_priv->cdclk.hw.bypass)
- goto sanitize;
-
- /* DPLL okay; verify the cdclock
- *
- * Some BIOS versions leave an incorrect decimal frequency value and
- * set reserved MBZ bits in CDCLK_CTL at least during exiting from S4,
- * so sanitize this register.
- */
- cdctl = I915_READ(CDCLK_CTL);
- /*
- * Let's ignore the pipe field, since BIOS could have configured the
- * dividers both synching to an active pipe, or asynchronously
- * (PIPE_NONE).
- */
- cdctl &= ~BXT_CDCLK_CD2X_PIPE_NONE;
-
- expected = (cdctl & BXT_CDCLK_CD2X_DIV_SEL_MASK) |
- skl_cdclk_decimal(dev_priv->cdclk.hw.cdclk);
- /*
- * Disable SSA Precharge when CD clock frequency < 500 MHz,
- * enable otherwise.
- */
- if (dev_priv->cdclk.hw.cdclk >= 500000)
- expected |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
-
- if (cdctl == expected)
- /* All well; nothing to sanitize */
- return;
-
-sanitize:
- DRM_DEBUG_KMS("Sanitizing cdclk programmed by pre-os\n");
-
- /* force cdclk programming */
- dev_priv->cdclk.hw.cdclk = 0;
-
- /* force full PLL disable + enable */
- dev_priv->cdclk.hw.vco = -1;
-}
-
-static void bxt_init_cdclk(struct drm_i915_private *dev_priv)
-{
- struct intel_cdclk_state cdclk_state;
-
- bxt_sanitize_cdclk(dev_priv);
-
- if (dev_priv->cdclk.hw.cdclk != 0 &&
- dev_priv->cdclk.hw.vco != 0)
- return;
-
- cdclk_state = dev_priv->cdclk.hw;
-
- /*
- * FIXME:
- * - The initial CDCLK needs to be read from VBT.
- * Need to make this change after VBT has changes for BXT.
- */
- if (IS_GEMINILAKE(dev_priv)) {
- cdclk_state.cdclk = glk_calc_cdclk(0);
- cdclk_state.vco = glk_de_pll_vco(dev_priv, cdclk_state.cdclk);
- } else {
- cdclk_state.cdclk = bxt_calc_cdclk(0);
- cdclk_state.vco = bxt_de_pll_vco(dev_priv, cdclk_state.cdclk);
- }
- cdclk_state.voltage_level = bxt_calc_voltage_level(cdclk_state.cdclk);
-
- bxt_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE);
-}
-
-static void bxt_uninit_cdclk(struct drm_i915_private *dev_priv)
-{
- struct intel_cdclk_state cdclk_state = dev_priv->cdclk.hw;
-
- cdclk_state.cdclk = cdclk_state.bypass;
- cdclk_state.vco = 0;
- cdclk_state.voltage_level = bxt_calc_voltage_level(cdclk_state.cdclk);
-
- bxt_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE);
-}
-
-static int cnl_calc_cdclk(int min_cdclk)
-{
- if (min_cdclk > 336000)
- return 528000;
- else if (min_cdclk > 168000)
- return 336000;
- else
- return 168000;
-}
-
-static u8 cnl_calc_voltage_level(int cdclk)
-{
- if (cdclk > 336000)
- return 2;
- else if (cdclk > 168000)
- return 1;
- else
- return 0;
-}
-
-static void cnl_cdclk_pll_update(struct drm_i915_private *dev_priv,
- struct intel_cdclk_state *cdclk_state)
-{
- u32 val;
-
- if (I915_READ(SKL_DSSM) & CNL_DSSM_CDCLK_PLL_REFCLK_24MHz)
- cdclk_state->ref = 24000;
- else
- cdclk_state->ref = 19200;
-
- cdclk_state->vco = 0;
-
- val = I915_READ(BXT_DE_PLL_ENABLE);
- if ((val & BXT_DE_PLL_PLL_ENABLE) == 0)
- return;
-
- if (WARN_ON((val & BXT_DE_PLL_LOCK) == 0))
- return;
-
- cdclk_state->vco = (val & CNL_CDCLK_PLL_RATIO_MASK) * cdclk_state->ref;
-}
-
-static void cnl_get_cdclk(struct drm_i915_private *dev_priv,
- struct intel_cdclk_state *cdclk_state)
-{
- u32 divider;
- int div;
-
- cnl_cdclk_pll_update(dev_priv, cdclk_state);
-
- cdclk_state->cdclk = cdclk_state->bypass = cdclk_state->ref;
-
- if (cdclk_state->vco == 0)
- goto out;
-
- divider = I915_READ(CDCLK_CTL) & BXT_CDCLK_CD2X_DIV_SEL_MASK;
-
- switch (divider) {
- case BXT_CDCLK_CD2X_DIV_SEL_1:
- div = 2;
- break;
- case BXT_CDCLK_CD2X_DIV_SEL_2:
- div = 4;
- break;
- default:
- MISSING_CASE(divider);
- return;
- }
-
- cdclk_state->cdclk = DIV_ROUND_CLOSEST(cdclk_state->vco, div);
-
- out:
- /*
- * Can't read this out :( Let's assume it's
- * at least what the CDCLK frequency requires.
- */
- cdclk_state->voltage_level =
- cnl_calc_voltage_level(cdclk_state->cdclk);
-}
-
static void cnl_cdclk_pll_disable(struct drm_i915_private *dev_priv)
{
u32 val;
@@ -1618,7 +1465,27 @@ static void cnl_cdclk_pll_enable(struct drm_i915_private *dev_priv, int vco)
dev_priv->cdclk.hw.vco = vco;
}
-static void cnl_set_cdclk(struct drm_i915_private *dev_priv,
+static u32 bxt_cdclk_cd2x_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
+{
+ if (INTEL_GEN(dev_priv) >= 12) {
+ if (pipe == INVALID_PIPE)
+ return TGL_CDCLK_CD2X_PIPE_NONE;
+ else
+ return TGL_CDCLK_CD2X_PIPE(pipe);
+ } else if (INTEL_GEN(dev_priv) >= 11) {
+ if (pipe == INVALID_PIPE)
+ return ICL_CDCLK_CD2X_PIPE_NONE;
+ else
+ return ICL_CDCLK_CD2X_PIPE(pipe);
+ } else {
+ if (pipe == INVALID_PIPE)
+ return BXT_CDCLK_CD2X_PIPE_NONE;
+ else
+ return BXT_CDCLK_CD2X_PIPE(pipe);
+ }
+}
+
+static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
const struct intel_cdclk_state *cdclk_state,
enum pipe pipe)
{
@@ -1627,17 +1494,28 @@ static void cnl_set_cdclk(struct drm_i915_private *dev_priv,
u32 val, divider;
int ret;
- ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL,
- SKL_CDCLK_PREPARE_FOR_CHANGE,
- SKL_CDCLK_READY_FOR_CHANGE,
- SKL_CDCLK_READY_FOR_CHANGE, 3);
+ /* Inform power controller of upcoming frequency change. */
+ if (INTEL_GEN(dev_priv) >= 10)
+ ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL,
+ SKL_CDCLK_PREPARE_FOR_CHANGE,
+ SKL_CDCLK_READY_FOR_CHANGE,
+ SKL_CDCLK_READY_FOR_CHANGE, 3);
+ else
+ /*
+ * BSpec requires us to wait up to 150usec, but that leads to
+ * timeouts; the 2ms used here is based on experiment.
+ */
+ ret = sandybridge_pcode_write_timeout(dev_priv,
+ HSW_PCODE_DE_WRITE_FREQ_REQ,
+ 0x80000000, 150, 2);
+
if (ret) {
- DRM_ERROR("Failed to inform PCU about cdclk change (%d)\n",
- ret);
+ DRM_ERROR("Failed to inform PCU about cdclk change (err %d, freq %d)\n",
+ ret, cdclk);
return;
}
- /* cdclk = vco / 2 / div{1,2} */
+ /* cdclk = vco / 2 / div{1,1.5,2,4} */
switch (DIV_ROUND_CLOSEST(vco, cdclk)) {
default:
WARN_ON(cdclk != dev_priv->cdclk.hw.bypass);
@@ -1646,67 +1524,87 @@ static void cnl_set_cdclk(struct drm_i915_private *dev_priv,
case 2:
divider = BXT_CDCLK_CD2X_DIV_SEL_1;
break;
+ case 3:
+ WARN(IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10,
+ "Unsupported divider\n");
+ divider = BXT_CDCLK_CD2X_DIV_SEL_1_5;
+ break;
case 4:
divider = BXT_CDCLK_CD2X_DIV_SEL_2;
break;
+ case 8:
+ WARN(INTEL_GEN(dev_priv) >= 10, "Unsupported divider\n");
+ divider = BXT_CDCLK_CD2X_DIV_SEL_4;
+ break;
}
- if (dev_priv->cdclk.hw.vco != 0 &&
- dev_priv->cdclk.hw.vco != vco)
- cnl_cdclk_pll_disable(dev_priv);
-
- if (dev_priv->cdclk.hw.vco != vco)
- cnl_cdclk_pll_enable(dev_priv, vco);
+ if (INTEL_GEN(dev_priv) >= 10) {
+ if (dev_priv->cdclk.hw.vco != 0 &&
+ dev_priv->cdclk.hw.vco != vco)
+ cnl_cdclk_pll_disable(dev_priv);
- val = divider | skl_cdclk_decimal(cdclk);
- if (pipe == INVALID_PIPE)
- val |= BXT_CDCLK_CD2X_PIPE_NONE;
- else
- val |= BXT_CDCLK_CD2X_PIPE(pipe);
- I915_WRITE(CDCLK_CTL, val);
+ if (dev_priv->cdclk.hw.vco != vco)
+ cnl_cdclk_pll_enable(dev_priv, vco);
- if (pipe != INVALID_PIPE)
- intel_wait_for_vblank(dev_priv, pipe);
+ } else {
+ if (dev_priv->cdclk.hw.vco != 0 &&
+ dev_priv->cdclk.hw.vco != vco)
+ bxt_de_pll_disable(dev_priv);
- /* inform PCU of the change */
- sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL,
- cdclk_state->voltage_level);
+ if (dev_priv->cdclk.hw.vco != vco)
+ bxt_de_pll_enable(dev_priv, vco);
+ }
- intel_update_cdclk(dev_priv);
+ val = divider | skl_cdclk_decimal(cdclk) |
+ bxt_cdclk_cd2x_pipe(dev_priv, pipe);
/*
- * Can't read out the voltage level :(
- * Let's just assume everything is as expected.
+ * Disable SSA Precharge when CD clock frequency < 500 MHz,
+ * enable otherwise.
*/
- dev_priv->cdclk.hw.voltage_level = cdclk_state->voltage_level;
-}
+ if (IS_GEN9_LP(dev_priv) && cdclk >= 500000)
+ val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
+ I915_WRITE(CDCLK_CTL, val);
-static int cnl_cdclk_pll_vco(struct drm_i915_private *dev_priv, int cdclk)
-{
- int ratio;
+ if (pipe != INVALID_PIPE)
+ intel_wait_for_vblank(dev_priv, pipe);
- if (cdclk == dev_priv->cdclk.hw.bypass)
- return 0;
+ if (INTEL_GEN(dev_priv) >= 10) {
+ ret = sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL,
+ cdclk_state->voltage_level);
+ } else {
+ /*
+ * The timeout isn't specified, the 2ms used here is based on
+ * experiment.
+ * FIXME: Waiting for the request completion could be delayed
+ * until the next PCODE request based on BSpec.
+ */
+ ret = sandybridge_pcode_write_timeout(dev_priv,
+ HSW_PCODE_DE_WRITE_FREQ_REQ,
+ cdclk_state->voltage_level,
+ 150, 2);
+ }
- switch (cdclk) {
- default:
- MISSING_CASE(cdclk);
- /* fall through */
- case 168000:
- case 336000:
- ratio = dev_priv->cdclk.hw.ref == 19200 ? 35 : 28;
- break;
- case 528000:
- ratio = dev_priv->cdclk.hw.ref == 19200 ? 55 : 44;
- break;
+ if (ret) {
+ DRM_ERROR("PCode CDCLK freq set failed, (err %d, freq %d)\n",
+ ret, cdclk);
+ return;
}
- return dev_priv->cdclk.hw.ref * ratio;
+ intel_update_cdclk(dev_priv);
+
+ if (INTEL_GEN(dev_priv) >= 10)
+ /*
+ * Can't read out the voltage level :(
+ * Let's just assume everything is as expected.
+ */
+ dev_priv->cdclk.hw.voltage_level = cdclk_state->voltage_level;
}
-static void cnl_sanitize_cdclk(struct drm_i915_private *dev_priv)
+static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv)
{
u32 cdctl, expected;
+ int cdclk, vco;
intel_update_cdclk(dev_priv);
intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
@@ -1727,262 +1625,65 @@ static void cnl_sanitize_cdclk(struct drm_i915_private *dev_priv)
* dividers both synching to an active pipe, or asynchronously
* (PIPE_NONE).
*/
- cdctl &= ~BXT_CDCLK_CD2X_PIPE_NONE;
-
- expected = (cdctl & BXT_CDCLK_CD2X_DIV_SEL_MASK) |
- skl_cdclk_decimal(dev_priv->cdclk.hw.cdclk);
-
- if (cdctl == expected)
- /* All well; nothing to sanitize */
- return;
-
-sanitize:
- DRM_DEBUG_KMS("Sanitizing cdclk programmed by pre-os\n");
+ cdctl &= ~bxt_cdclk_cd2x_pipe(dev_priv, INVALID_PIPE);
- /* force cdclk programming */
- dev_priv->cdclk.hw.cdclk = 0;
+ /* Make sure this is a legal cdclk value for the platform */
+ cdclk = bxt_calc_cdclk(dev_priv, dev_priv->cdclk.hw.cdclk);
+ if (cdclk != dev_priv->cdclk.hw.cdclk)
+ goto sanitize;
- /* force full PLL disable + enable */
- dev_priv->cdclk.hw.vco = -1;
-}
+ /* Make sure the VCO is correct for the cdclk */
+ vco = bxt_calc_cdclk_pll_vco(dev_priv, cdclk);
+ if (vco != dev_priv->cdclk.hw.vco)
+ goto sanitize;
-static int icl_calc_cdclk(int min_cdclk, unsigned int ref)
-{
- static const int ranges_24[] = { 180000, 192000, 312000, 552000, 648000 };
- static const int ranges_19_38[] = { 172800, 192000, 307200, 556800, 652800 };
- const int *ranges;
- int len, i;
+ expected = skl_cdclk_decimal(cdclk);
- switch (ref) {
- default:
- MISSING_CASE(ref);
- /* fall through */
- case 24000:
- ranges = ranges_24;
- len = ARRAY_SIZE(ranges_24);
- break;
- case 19200:
- case 38400:
- ranges = ranges_19_38;
- len = ARRAY_SIZE(ranges_19_38);
+ /* Figure out what CD2X divider we should be using for this cdclk */
+ switch (DIV_ROUND_CLOSEST(dev_priv->cdclk.hw.vco,
+ dev_priv->cdclk.hw.cdclk)) {
+ case 2:
+ expected |= BXT_CDCLK_CD2X_DIV_SEL_1;
break;
- }
-
- for (i = 0; i < len; i++) {
- if (min_cdclk <= ranges[i])
- return ranges[i];
- }
-
- WARN_ON(min_cdclk > ranges[len - 1]);
- return ranges[len - 1];
-}
-
-static int icl_calc_cdclk_pll_vco(struct drm_i915_private *dev_priv, int cdclk)
-{
- int ratio;
-
- if (cdclk == dev_priv->cdclk.hw.bypass)
- return 0;
-
- switch (cdclk) {
- default:
- MISSING_CASE(cdclk);
- /* fall through */
- case 172800:
- case 307200:
- case 556800:
- case 652800:
- WARN_ON(dev_priv->cdclk.hw.ref != 19200 &&
- dev_priv->cdclk.hw.ref != 38400);
+ case 3:
+ expected |= BXT_CDCLK_CD2X_DIV_SEL_1_5;
break;
- case 180000:
- case 312000:
- case 552000:
- case 648000:
- WARN_ON(dev_priv->cdclk.hw.ref != 24000);
+ case 4:
+ expected |= BXT_CDCLK_CD2X_DIV_SEL_2;
break;
- case 192000:
- WARN_ON(dev_priv->cdclk.hw.ref != 19200 &&
- dev_priv->cdclk.hw.ref != 38400 &&
- dev_priv->cdclk.hw.ref != 24000);
+ case 8:
+ expected |= BXT_CDCLK_CD2X_DIV_SEL_4;
break;
- }
-
- ratio = cdclk / (dev_priv->cdclk.hw.ref / 2);
-
- return dev_priv->cdclk.hw.ref * ratio;
-}
-
-static void icl_set_cdclk(struct drm_i915_private *dev_priv,
- const struct intel_cdclk_state *cdclk_state,
- enum pipe pipe)
-{
- unsigned int cdclk = cdclk_state->cdclk;
- unsigned int vco = cdclk_state->vco;
- int ret;
-
- ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL,
- SKL_CDCLK_PREPARE_FOR_CHANGE,
- SKL_CDCLK_READY_FOR_CHANGE,
- SKL_CDCLK_READY_FOR_CHANGE, 3);
- if (ret) {
- DRM_ERROR("Failed to inform PCU about cdclk change (%d)\n",
- ret);
- return;
- }
-
- if (dev_priv->cdclk.hw.vco != 0 &&
- dev_priv->cdclk.hw.vco != vco)
- cnl_cdclk_pll_disable(dev_priv);
-
- if (dev_priv->cdclk.hw.vco != vco)
- cnl_cdclk_pll_enable(dev_priv, vco);
-
- /*
- * On ICL CD2X_DIV can only be 1, so we'll never end up changing the
- * divider here synchronized to a pipe while CDCLK is on, nor will we
- * need the corresponding vblank wait.
- */
- I915_WRITE(CDCLK_CTL, ICL_CDCLK_CD2X_PIPE_NONE |
- skl_cdclk_decimal(cdclk));
-
- sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL,
- cdclk_state->voltage_level);
-
- intel_update_cdclk(dev_priv);
-
- /*
- * Can't read out the voltage level :(
- * Let's just assume everything is as expected.
- */
- dev_priv->cdclk.hw.voltage_level = cdclk_state->voltage_level;
-}
-
-static u8 icl_calc_voltage_level(struct drm_i915_private *dev_priv, int cdclk)
-{
- if (IS_ELKHARTLAKE(dev_priv)) {
- if (cdclk > 312000)
- return 2;
- else if (cdclk > 180000)
- return 1;
- else
- return 0;
- } else {
- if (cdclk > 556800)
- return 2;
- else if (cdclk > 312000)
- return 1;
- else
- return 0;
- }
-}
-
-static void icl_get_cdclk(struct drm_i915_private *dev_priv,
- struct intel_cdclk_state *cdclk_state)
-{
- u32 val;
-
- cdclk_state->bypass = 50000;
-
- val = I915_READ(SKL_DSSM);
- switch (val & ICL_DSSM_CDCLK_PLL_REFCLK_MASK) {
default:
- MISSING_CASE(val);
- /* fall through */
- case ICL_DSSM_CDCLK_PLL_REFCLK_24MHz:
- cdclk_state->ref = 24000;
- break;
- case ICL_DSSM_CDCLK_PLL_REFCLK_19_2MHz:
- cdclk_state->ref = 19200;
- break;
- case ICL_DSSM_CDCLK_PLL_REFCLK_38_4MHz:
- cdclk_state->ref = 38400;
- break;
- }
-
- val = I915_READ(BXT_DE_PLL_ENABLE);
- if ((val & BXT_DE_PLL_PLL_ENABLE) == 0 ||
- (val & BXT_DE_PLL_LOCK) == 0) {
- /*
- * CDCLK PLL is disabled, the VCO/ratio doesn't matter, but
- * setting it to zero is a way to signal that.
- */
- cdclk_state->vco = 0;
- cdclk_state->cdclk = cdclk_state->bypass;
- goto out;
+ goto sanitize;
}
- cdclk_state->vco = (val & BXT_DE_PLL_RATIO_MASK) * cdclk_state->ref;
-
- val = I915_READ(CDCLK_CTL);
- WARN_ON((val & BXT_CDCLK_CD2X_DIV_SEL_MASK) != 0);
-
- cdclk_state->cdclk = cdclk_state->vco / 2;
-
-out:
/*
- * Can't read this out :( Let's assume it's
- * at least what the CDCLK frequency requires.
+ * Disable SSA Precharge when CD clock frequency < 500 MHz,
+ * enable otherwise.
*/
- cdclk_state->voltage_level =
- icl_calc_voltage_level(dev_priv, cdclk_state->cdclk);
-}
-
-static void icl_init_cdclk(struct drm_i915_private *dev_priv)
-{
- struct intel_cdclk_state sanitized_state;
- u32 val;
-
- /* This sets dev_priv->cdclk.hw. */
- intel_update_cdclk(dev_priv);
- intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
-
- /* This means CDCLK disabled. */
- if (dev_priv->cdclk.hw.cdclk == dev_priv->cdclk.hw.bypass)
- goto sanitize;
-
- val = I915_READ(CDCLK_CTL);
-
- if ((val & BXT_CDCLK_CD2X_DIV_SEL_MASK) != 0)
- goto sanitize;
-
- if ((val & CDCLK_FREQ_DECIMAL_MASK) !=
- skl_cdclk_decimal(dev_priv->cdclk.hw.cdclk))
- goto sanitize;
+ if (IS_GEN9_LP(dev_priv) && dev_priv->cdclk.hw.cdclk >= 500000)
+ expected |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
- return;
+ if (cdctl == expected)
+ /* All well; nothing to sanitize */
+ return;
sanitize:
DRM_DEBUG_KMS("Sanitizing cdclk programmed by pre-os\n");
- sanitized_state.ref = dev_priv->cdclk.hw.ref;
- sanitized_state.cdclk = icl_calc_cdclk(0, sanitized_state.ref);
- sanitized_state.vco = icl_calc_cdclk_pll_vco(dev_priv,
- sanitized_state.cdclk);
- sanitized_state.voltage_level =
- icl_calc_voltage_level(dev_priv,
- sanitized_state.cdclk);
-
- icl_set_cdclk(dev_priv, &sanitized_state, INVALID_PIPE);
-}
-
-static void icl_uninit_cdclk(struct drm_i915_private *dev_priv)
-{
- struct intel_cdclk_state cdclk_state = dev_priv->cdclk.hw;
-
- cdclk_state.cdclk = cdclk_state.bypass;
- cdclk_state.vco = 0;
- cdclk_state.voltage_level = icl_calc_voltage_level(dev_priv,
- cdclk_state.cdclk);
+ /* force cdclk programming */
+ dev_priv->cdclk.hw.cdclk = 0;
- icl_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE);
+ /* force full PLL disable + enable */
+ dev_priv->cdclk.hw.vco = -1;
}
-static void cnl_init_cdclk(struct drm_i915_private *dev_priv)
+static void bxt_init_cdclk(struct drm_i915_private *dev_priv)
{
struct intel_cdclk_state cdclk_state;
- cnl_sanitize_cdclk(dev_priv);
+ bxt_sanitize_cdclk(dev_priv);
if (dev_priv->cdclk.hw.cdclk != 0 &&
dev_priv->cdclk.hw.vco != 0)
@@ -1990,22 +1691,29 @@ static void cnl_init_cdclk(struct drm_i915_private *dev_priv)
cdclk_state = dev_priv->cdclk.hw;
- cdclk_state.cdclk = cnl_calc_cdclk(0);
- cdclk_state.vco = cnl_cdclk_pll_vco(dev_priv, cdclk_state.cdclk);
- cdclk_state.voltage_level = cnl_calc_voltage_level(cdclk_state.cdclk);
+ /*
+ * FIXME:
+ * - The initial CDCLK needs to be read from VBT.
+ * Need to make this change after VBT has changes for BXT.
+ */
+ cdclk_state.cdclk = bxt_calc_cdclk(dev_priv, 0);
+ cdclk_state.vco = bxt_calc_cdclk_pll_vco(dev_priv, cdclk_state.cdclk);
+ cdclk_state.voltage_level =
+ dev_priv->display.calc_voltage_level(cdclk_state.cdclk);
- cnl_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE);
+ bxt_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE);
}
-static void cnl_uninit_cdclk(struct drm_i915_private *dev_priv)
+static void bxt_uninit_cdclk(struct drm_i915_private *dev_priv)
{
struct intel_cdclk_state cdclk_state = dev_priv->cdclk.hw;
cdclk_state.cdclk = cdclk_state.bypass;
cdclk_state.vco = 0;
- cdclk_state.voltage_level = cnl_calc_voltage_level(cdclk_state.cdclk);
+ cdclk_state.voltage_level =
+ dev_priv->display.calc_voltage_level(cdclk_state.cdclk);
- cnl_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE);
+ bxt_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE);
}
/**
@@ -2019,14 +1727,10 @@ static void cnl_uninit_cdclk(struct drm_i915_private *dev_priv)
*/
void intel_cdclk_init(struct drm_i915_private *i915)
{
- if (INTEL_GEN(i915) >= 11)
- icl_init_cdclk(i915);
- else if (IS_CANNONLAKE(i915))
- cnl_init_cdclk(i915);
+ if (IS_GEN9_LP(i915) || INTEL_GEN(i915) >= 10)
+ bxt_init_cdclk(i915);
else if (IS_GEN9_BC(i915))
skl_init_cdclk(i915);
- else if (IS_GEN9_LP(i915))
- bxt_init_cdclk(i915);
}
/**
@@ -2038,14 +1742,10 @@ void intel_cdclk_init(struct drm_i915_private *i915)
*/
void intel_cdclk_uninit(struct drm_i915_private *i915)
{
- if (INTEL_GEN(i915) >= 11)
- icl_uninit_cdclk(i915);
- else if (IS_CANNONLAKE(i915))
- cnl_uninit_cdclk(i915);
+ if (INTEL_GEN(i915) >= 10 || IS_GEN9_LP(i915))
+ bxt_uninit_cdclk(i915);
else if (IS_GEN9_BC(i915))
skl_uninit_cdclk(i915);
- else if (IS_GEN9_LP(i915))
- bxt_uninit_cdclk(i915);
}
/**
@@ -2073,9 +1773,9 @@ bool intel_cdclk_needs_modeset(const struct intel_cdclk_state *a,
* Returns:
* True if the CDCLK states require just a cd2x divider update, false if not.
*/
-bool intel_cdclk_needs_cd2x_update(struct drm_i915_private *dev_priv,
- const struct intel_cdclk_state *a,
- const struct intel_cdclk_state *b)
+static bool intel_cdclk_needs_cd2x_update(struct drm_i915_private *dev_priv,
+ const struct intel_cdclk_state *a,
+ const struct intel_cdclk_state *b)
{
/* Older hw doesn't have the capability */
if (INTEL_GEN(dev_priv) < 10 && !IS_GEN9_LP(dev_priv))
@@ -2094,8 +1794,8 @@ bool intel_cdclk_needs_cd2x_update(struct drm_i915_private *dev_priv,
* Returns:
* True if the CDCLK states don't match, false if they do.
*/
-bool intel_cdclk_changed(const struct intel_cdclk_state *a,
- const struct intel_cdclk_state *b)
+static bool intel_cdclk_changed(const struct intel_cdclk_state *a,
+ const struct intel_cdclk_state *b)
{
return intel_cdclk_needs_modeset(a, b) ||
a->voltage_level != b->voltage_level;
@@ -2200,9 +1900,11 @@ intel_set_cdclk_post_plane_update(struct drm_i915_private *dev_priv,
intel_set_cdclk(dev_priv, new_state, pipe);
}
-static int intel_pixel_rate_to_cdclk(struct drm_i915_private *dev_priv,
- int pixel_rate)
+static int intel_pixel_rate_to_cdclk(const struct intel_crtc_state *crtc_state)
{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ int pixel_rate = crtc_state->pixel_rate;
+
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
return DIV_ROUND_UP(pixel_rate, 2);
else if (IS_GEN(dev_priv, 9) ||
@@ -2210,10 +1912,25 @@ static int intel_pixel_rate_to_cdclk(struct drm_i915_private *dev_priv,
return pixel_rate;
else if (IS_CHERRYVIEW(dev_priv))
return DIV_ROUND_UP(pixel_rate * 100, 95);
+ else if (crtc_state->double_wide)
+ return DIV_ROUND_UP(pixel_rate * 100, 90 * 2);
else
return DIV_ROUND_UP(pixel_rate * 100, 90);
}
+static int intel_planes_min_cdclk(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_plane *plane;
+ int min_cdclk = 0;
+
+ for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane)
+ min_cdclk = max(crtc_state->min_cdclk[plane->id], min_cdclk);
+
+ return min_cdclk;
+}
+
int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv =
@@ -2223,7 +1940,7 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
if (!crtc_state->base.enable)
return 0;
- min_cdclk = intel_pixel_rate_to_cdclk(dev_priv, crtc_state->pixel_rate);
+ min_cdclk = intel_pixel_rate_to_cdclk(crtc_state);
/* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
if (IS_BROADWELL(dev_priv) && hsw_crtc_state_ips_capable(crtc_state))
@@ -2282,6 +1999,9 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
IS_GEMINILAKE(dev_priv))
min_cdclk = max(158400, min_cdclk);
+ /* Account for additional needs from the planes */
+ min_cdclk = max(intel_planes_min_cdclk(crtc_state), min_cdclk);
+
if (min_cdclk > dev_priv->max_cdclk_freq) {
DRM_DEBUG_KMS("required cdclk (%d kHz) exceeds max (%d kHz)\n",
min_cdclk, dev_priv->max_cdclk_freq);
@@ -2303,11 +2023,20 @@ static int intel_compute_min_cdclk(struct intel_atomic_state *state)
sizeof(state->min_cdclk));
for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
+ int ret;
+
min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
if (min_cdclk < 0)
return min_cdclk;
+ if (state->min_cdclk[i] == min_cdclk)
+ continue;
+
state->min_cdclk[i] = min_cdclk;
+
+ ret = intel_atomic_lock_global_state(state);
+ if (ret)
+ return ret;
}
min_cdclk = state->cdclk.force_min_cdclk;
@@ -2318,6 +2047,10 @@ static int intel_compute_min_cdclk(struct intel_atomic_state *state)
}
/*
+ * Account for port clock min voltage level requirements.
+ * This only really does something on CNL+ but can be
+ * called on earlier platforms as well.
+ *
* Note that this functions assumes that 0 is
* the lowest voltage value, and higher values
* correspond to increasingly higher voltages.
@@ -2326,7 +2059,7 @@ static int intel_compute_min_cdclk(struct intel_atomic_state *state)
* future platforms this code will need to be
* adjusted.
*/
-static u8 cnl_compute_min_voltage_level(struct intel_atomic_state *state)
+static int bxt_compute_min_voltage_level(struct intel_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc *crtc;
@@ -2339,11 +2072,21 @@ static u8 cnl_compute_min_voltage_level(struct intel_atomic_state *state)
sizeof(state->min_voltage_level));
for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
+ int ret;
+
if (crtc_state->base.enable)
- state->min_voltage_level[i] =
- crtc_state->min_voltage_level;
+ min_voltage_level = crtc_state->min_voltage_level;
else
- state->min_voltage_level[i] = 0;
+ min_voltage_level = 0;
+
+ if (state->min_voltage_level[i] == min_voltage_level)
+ continue;
+
+ state->min_voltage_level[i] = min_voltage_level;
+
+ ret = intel_atomic_lock_global_state(state);
+ if (ret)
+ return ret;
}
min_voltage_level = 0;
@@ -2369,7 +2112,7 @@ static int vlv_modeset_calc_cdclk(struct intel_atomic_state *state)
state->cdclk.logical.voltage_level =
vlv_calc_voltage_level(dev_priv, cdclk);
- if (!state->active_crtcs) {
+ if (!state->active_pipes) {
cdclk = vlv_calc_cdclk(dev_priv, state->cdclk.force_min_cdclk);
state->cdclk.actual.cdclk = cdclk;
@@ -2400,7 +2143,7 @@ static int bdw_modeset_calc_cdclk(struct intel_atomic_state *state)
state->cdclk.logical.voltage_level =
bdw_calc_voltage_level(cdclk);
- if (!state->active_crtcs) {
+ if (!state->active_pipes) {
cdclk = bdw_calc_cdclk(state->cdclk.force_min_cdclk);
state->cdclk.actual.cdclk = cdclk;
@@ -2470,7 +2213,7 @@ static int skl_modeset_calc_cdclk(struct intel_atomic_state *state)
state->cdclk.logical.voltage_level =
skl_calc_voltage_level(cdclk);
- if (!state->active_crtcs) {
+ if (!state->active_pipes) {
cdclk = skl_calc_cdclk(state->cdclk.force_min_cdclk, vco);
state->cdclk.actual.vco = vco;
@@ -2487,38 +2230,33 @@ static int skl_modeset_calc_cdclk(struct intel_atomic_state *state)
static int bxt_modeset_calc_cdclk(struct intel_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- int min_cdclk, cdclk, vco;
+ int min_cdclk, min_voltage_level, cdclk, vco;
min_cdclk = intel_compute_min_cdclk(state);
if (min_cdclk < 0)
return min_cdclk;
- if (IS_GEMINILAKE(dev_priv)) {
- cdclk = glk_calc_cdclk(min_cdclk);
- vco = glk_de_pll_vco(dev_priv, cdclk);
- } else {
- cdclk = bxt_calc_cdclk(min_cdclk);
- vco = bxt_de_pll_vco(dev_priv, cdclk);
- }
+ min_voltage_level = bxt_compute_min_voltage_level(state);
+ if (min_voltage_level < 0)
+ return min_voltage_level;
+
+ cdclk = bxt_calc_cdclk(dev_priv, min_cdclk);
+ vco = bxt_calc_cdclk_pll_vco(dev_priv, cdclk);
state->cdclk.logical.vco = vco;
state->cdclk.logical.cdclk = cdclk;
state->cdclk.logical.voltage_level =
- bxt_calc_voltage_level(cdclk);
-
- if (!state->active_crtcs) {
- if (IS_GEMINILAKE(dev_priv)) {
- cdclk = glk_calc_cdclk(state->cdclk.force_min_cdclk);
- vco = glk_de_pll_vco(dev_priv, cdclk);
- } else {
- cdclk = bxt_calc_cdclk(state->cdclk.force_min_cdclk);
- vco = bxt_de_pll_vco(dev_priv, cdclk);
- }
+ max_t(int, min_voltage_level,
+ dev_priv->display.calc_voltage_level(cdclk));
+
+ if (!state->active_pipes) {
+ cdclk = bxt_calc_cdclk(dev_priv, state->cdclk.force_min_cdclk);
+ vco = bxt_calc_cdclk_pll_vco(dev_priv, cdclk);
state->cdclk.actual.vco = vco;
state->cdclk.actual.cdclk = cdclk;
state->cdclk.actual.voltage_level =
- bxt_calc_voltage_level(cdclk);
+ dev_priv->display.calc_voltage_level(cdclk);
} else {
state->cdclk.actual = state->cdclk.logical;
}
@@ -2526,70 +2264,138 @@ static int bxt_modeset_calc_cdclk(struct intel_atomic_state *state)
return 0;
}
-static int cnl_modeset_calc_cdclk(struct intel_atomic_state *state)
+static int intel_modeset_all_pipes(struct intel_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- int min_cdclk, cdclk, vco;
+ struct intel_crtc *crtc;
- min_cdclk = intel_compute_min_cdclk(state);
- if (min_cdclk < 0)
- return min_cdclk;
+ /*
+ * Add all pipes to the state, and force
+ * a modeset on all the active ones.
+ */
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ struct intel_crtc_state *crtc_state;
+ int ret;
- cdclk = cnl_calc_cdclk(min_cdclk);
- vco = cnl_cdclk_pll_vco(dev_priv, cdclk);
+ crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
- state->cdclk.logical.vco = vco;
- state->cdclk.logical.cdclk = cdclk;
- state->cdclk.logical.voltage_level =
- max(cnl_calc_voltage_level(cdclk),
- cnl_compute_min_voltage_level(state));
+ if (!crtc_state->base.active ||
+ drm_atomic_crtc_needs_modeset(&crtc_state->base))
+ continue;
- if (!state->active_crtcs) {
- cdclk = cnl_calc_cdclk(state->cdclk.force_min_cdclk);
- vco = cnl_cdclk_pll_vco(dev_priv, cdclk);
+ crtc_state->base.mode_changed = true;
- state->cdclk.actual.vco = vco;
- state->cdclk.actual.cdclk = cdclk;
- state->cdclk.actual.voltage_level =
- cnl_calc_voltage_level(cdclk);
- } else {
- state->cdclk.actual = state->cdclk.logical;
+ ret = drm_atomic_add_affected_connectors(&state->base,
+ &crtc->base);
+ if (ret)
+ return ret;
+
+ ret = drm_atomic_add_affected_planes(&state->base,
+ &crtc->base);
+ if (ret)
+ return ret;
+
+ crtc_state->update_planes |= crtc_state->active_planes;
}
return 0;
}
-static int icl_modeset_calc_cdclk(struct intel_atomic_state *state)
+static int fixed_modeset_calc_cdclk(struct intel_atomic_state *state)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- unsigned int ref = state->cdclk.logical.ref;
- int min_cdclk, cdclk, vco;
+ int min_cdclk;
+ /*
+ * We can't change the cdclk frequency, but we still want to
+ * check that the required minimum frequency doesn't exceed
+ * the actual cdclk frequency.
+ */
min_cdclk = intel_compute_min_cdclk(state);
if (min_cdclk < 0)
return min_cdclk;
- cdclk = icl_calc_cdclk(min_cdclk, ref);
- vco = icl_calc_cdclk_pll_vco(dev_priv, cdclk);
+ return 0;
+}
- state->cdclk.logical.vco = vco;
- state->cdclk.logical.cdclk = cdclk;
- state->cdclk.logical.voltage_level =
- max(icl_calc_voltage_level(dev_priv, cdclk),
- cnl_compute_min_voltage_level(state));
+int intel_modeset_calc_cdclk(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ enum pipe pipe;
+ int ret;
- if (!state->active_crtcs) {
- cdclk = icl_calc_cdclk(state->cdclk.force_min_cdclk, ref);
- vco = icl_calc_cdclk_pll_vco(dev_priv, cdclk);
+ ret = dev_priv->display.modeset_calc_cdclk(state);
+ if (ret)
+ return ret;
- state->cdclk.actual.vco = vco;
- state->cdclk.actual.cdclk = cdclk;
- state->cdclk.actual.voltage_level =
- icl_calc_voltage_level(dev_priv, cdclk);
+ /*
+ * Writes to dev_priv->cdclk.{actual,logical} must protected
+ * by holding all the crtc mutexes even if we don't end up
+ * touching the hardware
+ */
+ if (intel_cdclk_changed(&dev_priv->cdclk.actual,
+ &state->cdclk.actual)) {
+ /*
+ * Also serialize commits across all crtcs
+ * if the actual hw needs to be poked.
+ */
+ ret = intel_atomic_serialize_global_state(state);
+ if (ret)
+ return ret;
+ } else if (intel_cdclk_changed(&dev_priv->cdclk.logical,
+ &state->cdclk.logical)) {
+ ret = intel_atomic_lock_global_state(state);
+ if (ret)
+ return ret;
} else {
- state->cdclk.actual = state->cdclk.logical;
+ return 0;
+ }
+
+ if (is_power_of_2(state->active_pipes) &&
+ intel_cdclk_needs_cd2x_update(dev_priv,
+ &dev_priv->cdclk.actual,
+ &state->cdclk.actual)) {
+ struct intel_crtc *crtc;
+ struct intel_crtc_state *crtc_state;
+
+ pipe = ilog2(state->active_pipes);
+ crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
+
+ crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+
+ if (drm_atomic_crtc_needs_modeset(&crtc_state->base))
+ pipe = INVALID_PIPE;
+ } else {
+ pipe = INVALID_PIPE;
+ }
+
+ if (pipe != INVALID_PIPE) {
+ state->cdclk.pipe = pipe;
+
+ DRM_DEBUG_KMS("Can change cdclk with pipe %c active\n",
+ pipe_name(pipe));
+ } else if (intel_cdclk_needs_modeset(&dev_priv->cdclk.actual,
+ &state->cdclk.actual)) {
+ /* All pipes must be switched off while we change the cdclk. */
+ ret = intel_modeset_all_pipes(state);
+ if (ret)
+ return ret;
+
+ state->cdclk.pipe = INVALID_PIPE;
+
+ DRM_DEBUG_KMS("Modeset required for cdclk change\n");
}
+ DRM_DEBUG_KMS("New cdclk calculated to be logical %u kHz, actual %u kHz\n",
+ state->cdclk.logical.cdclk,
+ state->cdclk.actual.cdclk);
+ DRM_DEBUG_KMS("New voltage level calculated to be logical %u, actual %u\n",
+ state->cdclk.logical.voltage_level,
+ state->cdclk.actual.voltage_level);
+
return 0;
}
@@ -2809,15 +2615,29 @@ void intel_update_rawclk(struct drm_i915_private *dev_priv)
*/
void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv)
{
- if (INTEL_GEN(dev_priv) >= 11) {
- dev_priv->display.set_cdclk = icl_set_cdclk;
- dev_priv->display.modeset_calc_cdclk = icl_modeset_calc_cdclk;
+ if (IS_ELKHARTLAKE(dev_priv)) {
+ dev_priv->display.set_cdclk = bxt_set_cdclk;
+ dev_priv->display.modeset_calc_cdclk = bxt_modeset_calc_cdclk;
+ dev_priv->display.calc_voltage_level = ehl_calc_voltage_level;
+ dev_priv->cdclk.table = icl_cdclk_table;
+ } else if (INTEL_GEN(dev_priv) >= 11) {
+ dev_priv->display.set_cdclk = bxt_set_cdclk;
+ dev_priv->display.modeset_calc_cdclk = bxt_modeset_calc_cdclk;
+ dev_priv->display.calc_voltage_level = icl_calc_voltage_level;
+ dev_priv->cdclk.table = icl_cdclk_table;
} else if (IS_CANNONLAKE(dev_priv)) {
- dev_priv->display.set_cdclk = cnl_set_cdclk;
- dev_priv->display.modeset_calc_cdclk = cnl_modeset_calc_cdclk;
+ dev_priv->display.set_cdclk = bxt_set_cdclk;
+ dev_priv->display.modeset_calc_cdclk = bxt_modeset_calc_cdclk;
+ dev_priv->display.calc_voltage_level = cnl_calc_voltage_level;
+ dev_priv->cdclk.table = cnl_cdclk_table;
} else if (IS_GEN9_LP(dev_priv)) {
dev_priv->display.set_cdclk = bxt_set_cdclk;
dev_priv->display.modeset_calc_cdclk = bxt_modeset_calc_cdclk;
+ dev_priv->display.calc_voltage_level = bxt_calc_voltage_level;
+ if (IS_GEMINILAKE(dev_priv))
+ dev_priv->cdclk.table = glk_cdclk_table;
+ else
+ dev_priv->cdclk.table = bxt_cdclk_table;
} else if (IS_GEN9_BC(dev_priv)) {
dev_priv->display.set_cdclk = skl_set_cdclk;
dev_priv->display.modeset_calc_cdclk = skl_modeset_calc_cdclk;
@@ -2830,13 +2650,11 @@ void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv)
} else if (IS_VALLEYVIEW(dev_priv)) {
dev_priv->display.set_cdclk = vlv_set_cdclk;
dev_priv->display.modeset_calc_cdclk = vlv_modeset_calc_cdclk;
+ } else {
+ dev_priv->display.modeset_calc_cdclk = fixed_modeset_calc_cdclk;
}
- if (INTEL_GEN(dev_priv) >= 11)
- dev_priv->display.get_cdclk = icl_get_cdclk;
- else if (IS_CANNONLAKE(dev_priv))
- dev_priv->display.get_cdclk = cnl_get_cdclk;
- else if (IS_GEN9_LP(dev_priv))
+ if (INTEL_GEN(dev_priv) >= 10 || IS_GEN9_LP(dev_priv))
dev_priv->display.get_cdclk = bxt_get_cdclk;
else if (IS_GEN9_BC(dev_priv))
dev_priv->display.get_cdclk = skl_get_cdclk;
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.h b/drivers/gpu/drm/i915/display/intel_cdclk.h
index 4d6f7f5f8930..cf71394cc79c 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.h
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.h
@@ -15,6 +15,13 @@ struct intel_atomic_state;
struct intel_cdclk_state;
struct intel_crtc_state;
+struct intel_cdclk_vals {
+ u16 refclk;
+ u32 cdclk;
+ u8 divider; /* CD2X divider * 2 */
+ u8 ratio;
+};
+
int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state);
void intel_cdclk_init(struct drm_i915_private *i915);
void intel_cdclk_uninit(struct drm_i915_private *i915);
@@ -22,13 +29,8 @@ void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv);
void intel_update_max_cdclk(struct drm_i915_private *dev_priv);
void intel_update_cdclk(struct drm_i915_private *dev_priv);
void intel_update_rawclk(struct drm_i915_private *dev_priv);
-bool intel_cdclk_needs_cd2x_update(struct drm_i915_private *dev_priv,
- const struct intel_cdclk_state *a,
- const struct intel_cdclk_state *b);
bool intel_cdclk_needs_modeset(const struct intel_cdclk_state *a,
const struct intel_cdclk_state *b);
-bool intel_cdclk_changed(const struct intel_cdclk_state *a,
- const struct intel_cdclk_state *b);
void intel_cdclk_swap_state(struct intel_atomic_state *state);
void
intel_set_cdclk_pre_plane_update(struct drm_i915_private *dev_priv,
@@ -42,5 +44,6 @@ intel_set_cdclk_post_plane_update(struct drm_i915_private *dev_priv,
enum pipe pipe);
void intel_dump_cdclk_state(const struct intel_cdclk_state *cdclk_state,
const char *context);
+int intel_modeset_calc_cdclk(struct intel_atomic_state *state);
#endif /* __INTEL_CDCLK_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c
index aa1e2c670bc4..aa3a063549c3 100644
--- a/drivers/gpu/drm/i915/display/intel_color.c
+++ b/drivers/gpu/drm/i915/display/intel_color.c
@@ -43,6 +43,21 @@
#define LEGACY_LUT_LENGTH 256
/*
+ * ILK+ csc matrix:
+ *
+ * |R/Cr| | c0 c1 c2 | ( |R/Cr| |preoff0| ) |postoff0|
+ * |G/Y | = | c3 c4 c5 | x ( |G/Y | + |preoff1| ) + |postoff1|
+ * |B/Cb| | c6 c7 c8 | ( |B/Cb| |preoff2| ) |postoff2|
+ *
+ * ILK/SNB don't have explicit post offsets, and instead
+ * CSC_MODE_YUV_TO_RGB and CSC_BLACK_SCREEN_OFFSET are used:
+ * CSC_MODE_YUV_TO_RGB=0 + CSC_BLACK_SCREEN_OFFSET=0 -> 1/2, 0, 1/2
+ * CSC_MODE_YUV_TO_RGB=0 + CSC_BLACK_SCREEN_OFFSET=1 -> 1/2, 1/16, 1/2
+ * CSC_MODE_YUV_TO_RGB=1 + CSC_BLACK_SCREEN_OFFSET=0 -> 0, 0, 0
+ * CSC_MODE_YUV_TO_RGB=1 + CSC_BLACK_SCREEN_OFFSET=1 -> 1/16, 1/16, 1/16
+ */
+
+/*
* Extract the CSC coefficient from a CTM coefficient (in U32.32 fixed point
* format). This macro takes the coefficient we want transformed and the
* number of fractional bits.
@@ -59,37 +74,38 @@
#define ILK_CSC_POSTOFF_LIMITED_RANGE (16 * (1 << 12) / 255)
+/* Nop pre/post offsets */
static const u16 ilk_csc_off_zero[3] = {};
+/* Identity matrix */
static const u16 ilk_csc_coeff_identity[9] = {
ILK_CSC_COEFF_1_0, 0, 0,
0, ILK_CSC_COEFF_1_0, 0,
0, 0, ILK_CSC_COEFF_1_0,
};
+/* Limited range RGB post offsets */
static const u16 ilk_csc_postoff_limited_range[3] = {
ILK_CSC_POSTOFF_LIMITED_RANGE,
ILK_CSC_POSTOFF_LIMITED_RANGE,
ILK_CSC_POSTOFF_LIMITED_RANGE,
};
+/* Full range RGB -> limited range RGB matrix */
static const u16 ilk_csc_coeff_limited_range[9] = {
ILK_CSC_COEFF_LIMITED_RANGE, 0, 0,
0, ILK_CSC_COEFF_LIMITED_RANGE, 0,
0, 0, ILK_CSC_COEFF_LIMITED_RANGE,
};
-/*
- * These values are direct register values specified in the Bspec,
- * for RGB->YUV conversion matrix (colorspace BT709)
- */
+/* BT.709 full range RGB -> limited range YCbCr matrix */
static const u16 ilk_csc_coeff_rgb_to_ycbcr[9] = {
0x1e08, 0x9cc0, 0xb528,
0x2ba8, 0x09d8, 0x37e8,
0xbce8, 0x9ad8, 0x1e08,
};
-/* Post offset values for RGB->YCBCR conversion */
+/* Limited range YCbCr post offsets */
static const u16 ilk_csc_postoff_rgb_to_ycbcr[3] = {
0x0800, 0x0100, 0x0800,
};
@@ -611,12 +627,13 @@ static void bdw_load_lut_10(struct intel_crtc *crtc,
static void ivb_load_lut_ext_max(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_dsb *dsb = intel_dsb_get(crtc);
enum pipe pipe = crtc->pipe;
/* Program the max register to clamp values > 1.0. */
- I915_WRITE(PREC_PAL_EXT_GC_MAX(pipe, 0), 1 << 16);
- I915_WRITE(PREC_PAL_EXT_GC_MAX(pipe, 1), 1 << 16);
- I915_WRITE(PREC_PAL_EXT_GC_MAX(pipe, 2), 1 << 16);
+ intel_dsb_reg_write(dsb, PREC_PAL_EXT_GC_MAX(pipe, 0), 1 << 16);
+ intel_dsb_reg_write(dsb, PREC_PAL_EXT_GC_MAX(pipe, 1), 1 << 16);
+ intel_dsb_reg_write(dsb, PREC_PAL_EXT_GC_MAX(pipe, 2), 1 << 16);
/*
* Program the gc max 2 register to clamp values > 1.0.
@@ -624,10 +641,15 @@ static void ivb_load_lut_ext_max(struct intel_crtc *crtc)
* from 3.0 to 7.0
*/
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
- I915_WRITE(PREC_PAL_EXT2_GC_MAX(pipe, 0), 1 << 16);
- I915_WRITE(PREC_PAL_EXT2_GC_MAX(pipe, 1), 1 << 16);
- I915_WRITE(PREC_PAL_EXT2_GC_MAX(pipe, 2), 1 << 16);
+ intel_dsb_reg_write(dsb, PREC_PAL_EXT2_GC_MAX(pipe, 0),
+ 1 << 16);
+ intel_dsb_reg_write(dsb, PREC_PAL_EXT2_GC_MAX(pipe, 1),
+ 1 << 16);
+ intel_dsb_reg_write(dsb, PREC_PAL_EXT2_GC_MAX(pipe, 2),
+ 1 << 16);
}
+
+ intel_dsb_put(dsb);
}
static void ivb_load_luts(const struct intel_crtc_state *crtc_state)
@@ -787,78 +809,83 @@ icl_load_gcmax(const struct intel_crtc_state *crtc_state,
const struct drm_color_lut *color)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_dsb *dsb = intel_dsb_get(crtc);
enum pipe pipe = crtc->pipe;
/* Fixme: LUT entries are 16 bit only, so we can prog 0xFFFF max */
- I915_WRITE(PREC_PAL_GC_MAX(pipe, 0), color->red);
- I915_WRITE(PREC_PAL_GC_MAX(pipe, 1), color->green);
- I915_WRITE(PREC_PAL_GC_MAX(pipe, 2), color->blue);
+ intel_dsb_reg_write(dsb, PREC_PAL_GC_MAX(pipe, 0), color->red);
+ intel_dsb_reg_write(dsb, PREC_PAL_GC_MAX(pipe, 1), color->green);
+ intel_dsb_reg_write(dsb, PREC_PAL_GC_MAX(pipe, 2), color->blue);
+ intel_dsb_put(dsb);
}
static void
icl_program_gamma_superfine_segment(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct drm_property_blob *blob = crtc_state->base.gamma_lut;
const struct drm_color_lut *lut = blob->data;
+ struct intel_dsb *dsb = intel_dsb_get(crtc);
enum pipe pipe = crtc->pipe;
u32 i;
/*
- * Every entry in the multi-segment LUT is corresponding to a superfine
- * segment step which is 1/(8 * 128 * 256).
+ * Program Super Fine segment (let's call it seg1)...
*
- * Superfine segment has 9 entries, corresponding to values
- * 0, 1/(8 * 128 * 256), 2/(8 * 128 * 256) .... 8/(8 * 128 * 256).
+ * Super Fine segment's step is 1/(8 * 128 * 256) and it has
+ * 9 entries, corresponding to values 0, 1/(8 * 128 * 256),
+ * 2/(8 * 128 * 256) ... 8/(8 * 128 * 256).
*/
- I915_WRITE(PREC_PAL_MULTI_SEG_INDEX(pipe), PAL_PREC_AUTO_INCREMENT);
+ intel_dsb_reg_write(dsb, PREC_PAL_MULTI_SEG_INDEX(pipe),
+ PAL_PREC_AUTO_INCREMENT);
for (i = 0; i < 9; i++) {
const struct drm_color_lut *entry = &lut[i];
- I915_WRITE(PREC_PAL_MULTI_SEG_DATA(pipe),
- ilk_lut_12p4_ldw(entry));
- I915_WRITE(PREC_PAL_MULTI_SEG_DATA(pipe),
- ilk_lut_12p4_udw(entry));
+ intel_dsb_indexed_reg_write(dsb, PREC_PAL_MULTI_SEG_DATA(pipe),
+ ilk_lut_12p4_ldw(entry));
+ intel_dsb_indexed_reg_write(dsb, PREC_PAL_MULTI_SEG_DATA(pipe),
+ ilk_lut_12p4_udw(entry));
}
+
+ intel_dsb_put(dsb);
}
static void
icl_program_gamma_multi_segment(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct drm_property_blob *blob = crtc_state->base.gamma_lut;
const struct drm_color_lut *lut = blob->data;
const struct drm_color_lut *entry;
+ struct intel_dsb *dsb = intel_dsb_get(crtc);
enum pipe pipe = crtc->pipe;
u32 i;
/*
- *
* Program Fine segment (let's call it seg2)...
*
- * Fine segment's step is 1/(128 * 256) ie 1/(128 * 256), 2/(128*256)
- * ... 256/(128*256). So in order to program fine segment of LUT we
- * need to pick every 8'th entry in LUT, and program 256 indexes.
+ * Fine segment's step is 1/(128 * 256) i.e. 1/(128 * 256), 2/(128 * 256)
+ * ... 256/(128 * 256). So in order to program fine segment of LUT we
+ * need to pick every 8th entry in the LUT, and program 256 indexes.
*
* PAL_PREC_INDEX[0] and PAL_PREC_INDEX[1] map to seg2[1],
- * with seg2[0] being unused by the hardware.
+ * seg2[0] being unused by the hardware.
*/
- I915_WRITE(PREC_PAL_INDEX(pipe), PAL_PREC_AUTO_INCREMENT);
+ intel_dsb_reg_write(dsb, PREC_PAL_INDEX(pipe), PAL_PREC_AUTO_INCREMENT);
for (i = 1; i < 257; i++) {
entry = &lut[i * 8];
- I915_WRITE(PREC_PAL_DATA(pipe), ilk_lut_12p4_ldw(entry));
- I915_WRITE(PREC_PAL_DATA(pipe), ilk_lut_12p4_udw(entry));
+ intel_dsb_indexed_reg_write(dsb, PREC_PAL_DATA(pipe),
+ ilk_lut_12p4_ldw(entry));
+ intel_dsb_indexed_reg_write(dsb, PREC_PAL_DATA(pipe),
+ ilk_lut_12p4_udw(entry));
}
/*
* Program Coarse segment (let's call it seg3)...
*
- * Coarse segment's starts from index 0 and it's step is 1/256 ie 0,
- * 1/256, 2/256 ...256/256. As per the description of each entry in LUT
+ * Coarse segment starts from index 0 and it's step is 1/256 ie 0,
+ * 1/256, 2/256 ... 256/256. As per the description of each entry in LUT
* above, we need to pick every (8 * 128)th entry in LUT, and
* program 256 of those.
*
@@ -868,20 +895,24 @@ icl_program_gamma_multi_segment(const struct intel_crtc_state *crtc_state)
*/
for (i = 0; i < 256; i++) {
entry = &lut[i * 8 * 128];
- I915_WRITE(PREC_PAL_DATA(pipe), ilk_lut_12p4_ldw(entry));
- I915_WRITE(PREC_PAL_DATA(pipe), ilk_lut_12p4_udw(entry));
+ intel_dsb_indexed_reg_write(dsb, PREC_PAL_DATA(pipe),
+ ilk_lut_12p4_ldw(entry));
+ intel_dsb_indexed_reg_write(dsb, PREC_PAL_DATA(pipe),
+ ilk_lut_12p4_udw(entry));
}
/* The last entry in the LUT is to be programmed in GCMAX */
entry = &lut[256 * 8 * 128];
icl_load_gcmax(crtc_state, entry);
ivb_load_lut_ext_max(crtc);
+ intel_dsb_put(dsb);
}
static void icl_load_luts(const struct intel_crtc_state *crtc_state)
{
const struct drm_property_blob *gamma_lut = crtc_state->base.gamma_lut;
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_dsb *dsb = intel_dsb_get(crtc);
if (crtc_state->base.degamma_lut)
glk_load_degamma_lut(crtc_state);
@@ -890,16 +921,17 @@ static void icl_load_luts(const struct intel_crtc_state *crtc_state)
case GAMMA_MODE_MODE_8BIT:
i9xx_load_luts(crtc_state);
break;
-
case GAMMA_MODE_MODE_12BIT_MULTI_SEGMENTED:
icl_program_gamma_superfine_segment(crtc_state);
icl_program_gamma_multi_segment(crtc_state);
break;
-
default:
bdw_load_lut_10(crtc, gamma_lut, PAL_PREC_INDEX_VALUE(0));
ivb_load_lut_ext_max(crtc);
}
+
+ intel_dsb_commit(dsb);
+ intel_dsb_put(dsb);
}
static u32 chv_cgm_degamma_ldw(const struct drm_color_lut *color)
@@ -1250,6 +1282,21 @@ static u32 ilk_gamma_mode(const struct intel_crtc_state *crtc_state)
return GAMMA_MODE_MODE_10BIT;
}
+static u32 ilk_csc_mode(const struct intel_crtc_state *crtc_state)
+{
+ /*
+ * CSC comes after the LUT in RGB->YCbCr mode.
+ * RGB->YCbCr needs the limited range offsets added to
+ * the output. RGB limited range output is handled by
+ * the hw automagically elsewhere.
+ */
+ if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
+ return CSC_BLACK_SCREEN_OFFSET;
+
+ return CSC_MODE_YUV_TO_RGB |
+ CSC_POSITION_BEFORE_GAMMA;
+}
+
static int ilk_color_check(struct intel_crtc_state *crtc_state)
{
int ret;
@@ -1263,15 +1310,15 @@ static int ilk_color_check(struct intel_crtc_state *crtc_state)
!crtc_state->c8_planes;
/*
- * We don't expose the ctm on ilk/snb currently,
- * nor do we enable YCbCr output. Also RGB limited
- * range output is handled by the hw automagically.
+ * We don't expose the ctm on ilk/snb currently, also RGB
+ * limited range output is handled by the hw automagically.
*/
- crtc_state->csc_enable = false;
+ crtc_state->csc_enable =
+ crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB;
crtc_state->gamma_mode = ilk_gamma_mode(crtc_state);
- crtc_state->csc_mode = 0;
+ crtc_state->csc_mode = ilk_csc_mode(crtc_state);
ret = intel_color_add_affected_planes(crtc_state);
if (ret)
@@ -1432,6 +1479,403 @@ static int icl_color_check(struct intel_crtc_state *crtc_state)
return 0;
}
+static int i9xx_gamma_precision(const struct intel_crtc_state *crtc_state)
+{
+ if (!crtc_state->gamma_enable)
+ return 0;
+
+ switch (crtc_state->gamma_mode) {
+ case GAMMA_MODE_MODE_8BIT:
+ return 8;
+ case GAMMA_MODE_MODE_10BIT:
+ return 16;
+ default:
+ MISSING_CASE(crtc_state->gamma_mode);
+ return 0;
+ }
+}
+
+static int ilk_gamma_precision(const struct intel_crtc_state *crtc_state)
+{
+ if (!crtc_state->gamma_enable)
+ return 0;
+
+ if ((crtc_state->csc_mode & CSC_POSITION_BEFORE_GAMMA) == 0)
+ return 0;
+
+ switch (crtc_state->gamma_mode) {
+ case GAMMA_MODE_MODE_8BIT:
+ return 8;
+ case GAMMA_MODE_MODE_10BIT:
+ return 10;
+ default:
+ MISSING_CASE(crtc_state->gamma_mode);
+ return 0;
+ }
+}
+
+static int chv_gamma_precision(const struct intel_crtc_state *crtc_state)
+{
+ if (crtc_state->cgm_mode & CGM_PIPE_MODE_GAMMA)
+ return 10;
+ else
+ return i9xx_gamma_precision(crtc_state);
+}
+
+static int glk_gamma_precision(const struct intel_crtc_state *crtc_state)
+{
+ if (!crtc_state->gamma_enable)
+ return 0;
+
+ switch (crtc_state->gamma_mode) {
+ case GAMMA_MODE_MODE_8BIT:
+ return 8;
+ case GAMMA_MODE_MODE_10BIT:
+ return 10;
+ default:
+ MISSING_CASE(crtc_state->gamma_mode);
+ return 0;
+ }
+}
+
+int intel_color_get_gamma_bit_precision(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+
+ if (HAS_GMCH(dev_priv)) {
+ if (IS_CHERRYVIEW(dev_priv))
+ return chv_gamma_precision(crtc_state);
+ else
+ return i9xx_gamma_precision(crtc_state);
+ } else {
+ if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
+ return glk_gamma_precision(crtc_state);
+ else if (IS_IRONLAKE(dev_priv))
+ return ilk_gamma_precision(crtc_state);
+ }
+
+ return 0;
+}
+
+static bool err_check(struct drm_color_lut *lut1,
+ struct drm_color_lut *lut2, u32 err)
+{
+ return ((abs((long)lut2->red - lut1->red)) <= err) &&
+ ((abs((long)lut2->blue - lut1->blue)) <= err) &&
+ ((abs((long)lut2->green - lut1->green)) <= err);
+}
+
+static bool intel_color_lut_entry_equal(struct drm_color_lut *lut1,
+ struct drm_color_lut *lut2,
+ int lut_size, u32 err)
+{
+ int i;
+
+ for (i = 0; i < lut_size; i++) {
+ if (!err_check(&lut1[i], &lut2[i], err))
+ return false;
+ }
+
+ return true;
+}
+
+bool intel_color_lut_equal(struct drm_property_blob *blob1,
+ struct drm_property_blob *blob2,
+ u32 gamma_mode, u32 bit_precision)
+{
+ struct drm_color_lut *lut1, *lut2;
+ int lut_size1, lut_size2;
+ u32 err;
+
+ if (!blob1 != !blob2)
+ return false;
+
+ if (!blob1)
+ return true;
+
+ lut_size1 = drm_color_lut_size(blob1);
+ lut_size2 = drm_color_lut_size(blob2);
+
+ /* check sw and hw lut size */
+ switch (gamma_mode) {
+ case GAMMA_MODE_MODE_8BIT:
+ case GAMMA_MODE_MODE_10BIT:
+ if (lut_size1 != lut_size2)
+ return false;
+ break;
+ default:
+ MISSING_CASE(gamma_mode);
+ return false;
+ }
+
+ lut1 = blob1->data;
+ lut2 = blob2->data;
+
+ err = 0xffff >> bit_precision;
+
+ /* check sw and hw lut entry to be equal */
+ switch (gamma_mode) {
+ case GAMMA_MODE_MODE_8BIT:
+ case GAMMA_MODE_MODE_10BIT:
+ if (!intel_color_lut_entry_equal(lut1, lut2,
+ lut_size2, err))
+ return false;
+ break;
+ default:
+ MISSING_CASE(gamma_mode);
+ return false;
+ }
+
+ return true;
+}
+
+/* convert hw value with given bit_precision to lut property val */
+static u32 intel_color_lut_pack(u32 val, u32 bit_precision)
+{
+ u32 max = 0xffff >> (16 - bit_precision);
+
+ val = clamp_val(val, 0, max);
+
+ if (bit_precision < 16)
+ val <<= 16 - bit_precision;
+
+ return val;
+}
+
+static struct drm_property_blob *
+i9xx_read_lut_8(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+ struct drm_property_blob *blob;
+ struct drm_color_lut *blob_data;
+ u32 i, val;
+
+ blob = drm_property_create_blob(&dev_priv->drm,
+ sizeof(struct drm_color_lut) * LEGACY_LUT_LENGTH,
+ NULL);
+ if (IS_ERR(blob))
+ return NULL;
+
+ blob_data = blob->data;
+
+ for (i = 0; i < LEGACY_LUT_LENGTH; i++) {
+ if (HAS_GMCH(dev_priv))
+ val = I915_READ(PALETTE(pipe, i));
+ else
+ val = I915_READ(LGC_PALETTE(pipe, i));
+
+ blob_data[i].red = intel_color_lut_pack(REG_FIELD_GET(
+ LGC_PALETTE_RED_MASK, val), 8);
+ blob_data[i].green = intel_color_lut_pack(REG_FIELD_GET(
+ LGC_PALETTE_GREEN_MASK, val), 8);
+ blob_data[i].blue = intel_color_lut_pack(REG_FIELD_GET(
+ LGC_PALETTE_BLUE_MASK, val), 8);
+ }
+
+ return blob;
+}
+
+static void i9xx_read_luts(struct intel_crtc_state *crtc_state)
+{
+ if (!crtc_state->gamma_enable)
+ return;
+
+ crtc_state->base.gamma_lut = i9xx_read_lut_8(crtc_state);
+}
+
+static struct drm_property_blob *
+i965_read_lut_10p6(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ u32 lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size;
+ enum pipe pipe = crtc->pipe;
+ struct drm_property_blob *blob;
+ struct drm_color_lut *blob_data;
+ u32 i, val1, val2;
+
+ blob = drm_property_create_blob(&dev_priv->drm,
+ sizeof(struct drm_color_lut) * lut_size,
+ NULL);
+ if (IS_ERR(blob))
+ return NULL;
+
+ blob_data = blob->data;
+
+ for (i = 0; i < lut_size - 1; i++) {
+ val1 = I915_READ(PALETTE(pipe, 2 * i + 0));
+ val2 = I915_READ(PALETTE(pipe, 2 * i + 1));
+
+ blob_data[i].red = REG_FIELD_GET(PALETTE_RED_MASK, val2) << 8 |
+ REG_FIELD_GET(PALETTE_RED_MASK, val1);
+ blob_data[i].green = REG_FIELD_GET(PALETTE_GREEN_MASK, val2) << 8 |
+ REG_FIELD_GET(PALETTE_GREEN_MASK, val1);
+ blob_data[i].blue = REG_FIELD_GET(PALETTE_BLUE_MASK, val2) << 8 |
+ REG_FIELD_GET(PALETTE_BLUE_MASK, val1);
+ }
+
+ blob_data[i].red = REG_FIELD_GET(PIPEGCMAX_RGB_MASK,
+ I915_READ(PIPEGCMAX(pipe, 0)));
+ blob_data[i].green = REG_FIELD_GET(PIPEGCMAX_RGB_MASK,
+ I915_READ(PIPEGCMAX(pipe, 1)));
+ blob_data[i].blue = REG_FIELD_GET(PIPEGCMAX_RGB_MASK,
+ I915_READ(PIPEGCMAX(pipe, 2)));
+
+ return blob;
+}
+
+static void i965_read_luts(struct intel_crtc_state *crtc_state)
+{
+ if (!crtc_state->gamma_enable)
+ return;
+
+ if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT)
+ crtc_state->base.gamma_lut = i9xx_read_lut_8(crtc_state);
+ else
+ crtc_state->base.gamma_lut = i965_read_lut_10p6(crtc_state);
+}
+
+static struct drm_property_blob *
+chv_read_cgm_lut(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ u32 lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size;
+ enum pipe pipe = crtc->pipe;
+ struct drm_property_blob *blob;
+ struct drm_color_lut *blob_data;
+ u32 i, val;
+
+ blob = drm_property_create_blob(&dev_priv->drm,
+ sizeof(struct drm_color_lut) * lut_size,
+ NULL);
+ if (IS_ERR(blob))
+ return NULL;
+
+ blob_data = blob->data;
+
+ for (i = 0; i < lut_size; i++) {
+ val = I915_READ(CGM_PIPE_GAMMA(pipe, i, 0));
+ blob_data[i].green = intel_color_lut_pack(REG_FIELD_GET(
+ CGM_PIPE_GAMMA_GREEN_MASK, val), 10);
+ blob_data[i].blue = intel_color_lut_pack(REG_FIELD_GET(
+ CGM_PIPE_GAMMA_BLUE_MASK, val), 10);
+
+ val = I915_READ(CGM_PIPE_GAMMA(pipe, i, 1));
+ blob_data[i].red = intel_color_lut_pack(REG_FIELD_GET(
+ CGM_PIPE_GAMMA_RED_MASK, val), 10);
+ }
+
+ return blob;
+}
+
+static void chv_read_luts(struct intel_crtc_state *crtc_state)
+{
+ if (crtc_state->cgm_mode & CGM_PIPE_MODE_GAMMA)
+ crtc_state->base.gamma_lut = chv_read_cgm_lut(crtc_state);
+ else
+ i965_read_luts(crtc_state);
+}
+
+static struct drm_property_blob *
+ilk_read_lut_10(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ u32 lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size;
+ enum pipe pipe = crtc->pipe;
+ struct drm_property_blob *blob;
+ struct drm_color_lut *blob_data;
+ u32 i, val;
+
+ blob = drm_property_create_blob(&dev_priv->drm,
+ sizeof(struct drm_color_lut) * lut_size,
+ NULL);
+ if (IS_ERR(blob))
+ return NULL;
+
+ blob_data = blob->data;
+
+ for (i = 0; i < lut_size; i++) {
+ val = I915_READ(PREC_PALETTE(pipe, i));
+
+ blob_data[i].red = intel_color_lut_pack(REG_FIELD_GET(
+ PREC_PALETTE_RED_MASK, val), 10);
+ blob_data[i].green = intel_color_lut_pack(REG_FIELD_GET(
+ PREC_PALETTE_GREEN_MASK, val), 10);
+ blob_data[i].blue = intel_color_lut_pack(REG_FIELD_GET(
+ PREC_PALETTE_BLUE_MASK, val), 10);
+ }
+
+ return blob;
+}
+
+static void ilk_read_luts(struct intel_crtc_state *crtc_state)
+{
+ if (!crtc_state->gamma_enable)
+ return;
+
+ if ((crtc_state->csc_mode & CSC_POSITION_BEFORE_GAMMA) == 0)
+ return;
+
+ if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT)
+ crtc_state->base.gamma_lut = i9xx_read_lut_8(crtc_state);
+ else
+ crtc_state->base.gamma_lut = ilk_read_lut_10(crtc_state);
+}
+
+static struct drm_property_blob *
+glk_read_lut_10(const struct intel_crtc_state *crtc_state, u32 prec_index)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ int hw_lut_size = ivb_lut_10_size(prec_index);
+ enum pipe pipe = crtc->pipe;
+ struct drm_property_blob *blob;
+ struct drm_color_lut *blob_data;
+ u32 i, val;
+
+ blob = drm_property_create_blob(&dev_priv->drm,
+ sizeof(struct drm_color_lut) * hw_lut_size,
+ NULL);
+ if (IS_ERR(blob))
+ return NULL;
+
+ blob_data = blob->data;
+
+ I915_WRITE(PREC_PAL_INDEX(pipe), prec_index |
+ PAL_PREC_AUTO_INCREMENT);
+
+ for (i = 0; i < hw_lut_size; i++) {
+ val = I915_READ(PREC_PAL_DATA(pipe));
+
+ blob_data[i].red = intel_color_lut_pack(REG_FIELD_GET(
+ PREC_PAL_DATA_RED_MASK, val), 10);
+ blob_data[i].green = intel_color_lut_pack(REG_FIELD_GET(
+ PREC_PAL_DATA_GREEN_MASK, val), 10);
+ blob_data[i].blue = intel_color_lut_pack(REG_FIELD_GET(
+ PREC_PAL_DATA_BLUE_MASK, val), 10);
+ }
+
+ I915_WRITE(PREC_PAL_INDEX(pipe), 0);
+
+ return blob;
+}
+
+static void glk_read_luts(struct intel_crtc_state *crtc_state)
+{
+ if (!crtc_state->gamma_enable)
+ return;
+
+ if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT)
+ crtc_state->base.gamma_lut = i9xx_read_lut_8(crtc_state);
+ else
+ crtc_state->base.gamma_lut = glk_read_lut_10(crtc_state, PAL_PREC_INDEX_VALUE(0));
+}
+
void intel_color_init(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
@@ -1444,14 +1888,17 @@ void intel_color_init(struct intel_crtc *crtc)
dev_priv->display.color_check = chv_color_check;
dev_priv->display.color_commit = i9xx_color_commit;
dev_priv->display.load_luts = chv_load_luts;
+ dev_priv->display.read_luts = chv_read_luts;
} else if (INTEL_GEN(dev_priv) >= 4) {
dev_priv->display.color_check = i9xx_color_check;
dev_priv->display.color_commit = i9xx_color_commit;
dev_priv->display.load_luts = i965_load_luts;
+ dev_priv->display.read_luts = i965_read_luts;
} else {
dev_priv->display.color_check = i9xx_color_check;
dev_priv->display.color_commit = i9xx_color_commit;
dev_priv->display.load_luts = i9xx_load_luts;
+ dev_priv->display.read_luts = i9xx_read_luts;
}
} else {
if (INTEL_GEN(dev_priv) >= 11)
@@ -1470,16 +1917,19 @@ void intel_color_init(struct intel_crtc *crtc)
else
dev_priv->display.color_commit = ilk_color_commit;
- if (INTEL_GEN(dev_priv) >= 11)
+ if (INTEL_GEN(dev_priv) >= 11) {
dev_priv->display.load_luts = icl_load_luts;
- else if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
+ } else if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv)) {
dev_priv->display.load_luts = glk_load_luts;
- else if (INTEL_GEN(dev_priv) >= 8)
+ dev_priv->display.read_luts = glk_read_luts;
+ } else if (INTEL_GEN(dev_priv) >= 8) {
dev_priv->display.load_luts = bdw_load_luts;
- else if (INTEL_GEN(dev_priv) >= 7)
+ } else if (INTEL_GEN(dev_priv) >= 7) {
dev_priv->display.load_luts = ivb_load_luts;
- else
+ } else {
dev_priv->display.load_luts = ilk_load_luts;
+ dev_priv->display.read_luts = ilk_read_luts;
+ }
}
drm_crtc_enable_color_mgmt(&crtc->base,
diff --git a/drivers/gpu/drm/i915/display/intel_color.h b/drivers/gpu/drm/i915/display/intel_color.h
index 057e8ac63555..173727aaa24d 100644
--- a/drivers/gpu/drm/i915/display/intel_color.h
+++ b/drivers/gpu/drm/i915/display/intel_color.h
@@ -6,13 +6,20 @@
#ifndef __INTEL_COLOR_H__
#define __INTEL_COLOR_H__
+#include <linux/types.h>
+
struct intel_crtc_state;
struct intel_crtc;
+struct drm_property_blob;
void intel_color_init(struct intel_crtc *crtc);
int intel_color_check(struct intel_crtc_state *crtc_state);
void intel_color_commit(const struct intel_crtc_state *crtc_state);
void intel_color_load_luts(const struct intel_crtc_state *crtc_state);
void intel_color_get_config(struct intel_crtc_state *crtc_state);
+int intel_color_get_gamma_bit_precision(const struct intel_crtc_state *crtc_state);
+bool intel_color_lut_equal(struct drm_property_blob *blob1,
+ struct drm_property_blob *blob2,
+ u32 gamma_mode, u32 bit_precision);
#endif /* __INTEL_COLOR_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_connector.c b/drivers/gpu/drm/i915/display/intel_connector.c
index 308ec63207ee..1133c4e97bb4 100644
--- a/drivers/gpu/drm/i915/display/intel_connector.c
+++ b/drivers/gpu/drm/i915/display/intel_connector.c
@@ -277,7 +277,22 @@ intel_attach_aspect_ratio_property(struct drm_connector *connector)
void
intel_attach_colorspace_property(struct drm_connector *connector)
{
- if (!drm_mode_create_colorspace_property(connector))
- drm_object_attach_property(&connector->base,
- connector->colorspace_property, 0);
+ switch (connector->connector_type) {
+ case DRM_MODE_CONNECTOR_HDMIA:
+ case DRM_MODE_CONNECTOR_HDMIB:
+ if (drm_mode_create_hdmi_colorspace_property(connector))
+ return;
+ break;
+ case DRM_MODE_CONNECTOR_DisplayPort:
+ case DRM_MODE_CONNECTOR_eDP:
+ if (drm_mode_create_dp_colorspace_property(connector))
+ return;
+ break;
+ default:
+ DRM_DEBUG_KMS("Colorspace property not supported\n");
+ return;
+ }
+
+ drm_object_attach_property(&connector->base,
+ connector->colorspace_property, 0);
}
diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c
index 0a08354a6183..39cc6d79dc85 100644
--- a/drivers/gpu/drm/i915/display/intel_crt.c
+++ b/drivers/gpu/drm/i915/display/intel_crt.c
@@ -844,7 +844,7 @@ load_detect:
}
/* for pre-945g platforms use load detect */
- ret = intel_get_load_detect_pipe(connector, NULL, &tmp, ctx);
+ ret = intel_get_load_detect_pipe(connector, &tmp, ctx);
if (ret > 0) {
if (intel_crt_detect_ddc(connector))
status = connector_status_connected;
@@ -1001,9 +1001,9 @@ void intel_crt_init(struct drm_i915_private *dev_priv)
crt->base.type = INTEL_OUTPUT_ANALOG;
crt->base.cloneable = (1 << INTEL_OUTPUT_DVO) | (1 << INTEL_OUTPUT_HDMI);
if (IS_I830(dev_priv))
- crt->base.crtc_mask = (1 << 0);
+ crt->base.pipe_mask = BIT(PIPE_A);
else
- crt->base.crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
+ crt->base.pipe_mask = ~0;
if (IS_GEN(dev_priv, 2))
connector->interlace_allowed = 0;
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index 8eb2b3ec01ed..0d6e494b4508 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -45,6 +45,7 @@
#include "intel_lspcon.h"
#include "intel_panel.h"
#include "intel_psr.h"
+#include "intel_sprite.h"
#include "intel_tc.h"
#include "intel_vdsc.h"
@@ -586,6 +587,26 @@ static const struct icl_mg_phy_ddi_buf_trans icl_mg_phy_ddi_translations[] = {
{ 0x0, 0x00, 0x00 }, /* 3 0 */
};
+struct tgl_dkl_phy_ddi_buf_trans {
+ u32 dkl_vswing_control;
+ u32 dkl_preshoot_control;
+ u32 dkl_de_emphasis_control;
+};
+
+static const struct tgl_dkl_phy_ddi_buf_trans tgl_dkl_phy_ddi_translations[] = {
+ /* VS pre-emp Non-trans mV Pre-emph dB */
+ { 0x7, 0x0, 0x00 }, /* 0 0 400mV 0 dB */
+ { 0x5, 0x0, 0x03 }, /* 0 1 400mV 3.5 dB */
+ { 0x2, 0x0, 0x0b }, /* 0 2 400mV 6 dB */
+ { 0x0, 0x0, 0x19 }, /* 0 3 400mV 9.5 dB */
+ { 0x5, 0x0, 0x00 }, /* 1 0 600mV 0 dB */
+ { 0x2, 0x0, 0x03 }, /* 1 1 600mV 3.5 dB */
+ { 0x0, 0x0, 0x14 }, /* 1 2 600mV 6 dB */
+ { 0x2, 0x0, 0x00 }, /* 2 0 800mV 0 dB */
+ { 0x0, 0x0, 0x0B }, /* 2 1 800mV 3.5 dB */
+ { 0x0, 0x0, 0x00 }, /* 3 0 1200mV 0 dB HDMI default */
+};
+
static const struct ddi_buf_trans *
bdw_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
{
@@ -872,7 +893,14 @@ static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port por
level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift;
- if (INTEL_GEN(dev_priv) >= 11) {
+ if (INTEL_GEN(dev_priv) >= 12) {
+ if (intel_phy_is_combo(dev_priv, phy))
+ icl_get_combo_buf_trans(dev_priv, INTEL_OUTPUT_HDMI,
+ 0, &n_entries);
+ else
+ n_entries = ARRAY_SIZE(tgl_dkl_phy_ddi_translations);
+ default_entry = n_entries - 1;
+ } else if (INTEL_GEN(dev_priv) == 11) {
if (intel_phy_is_combo(dev_priv, phy))
icl_get_combo_buf_trans(dev_priv, INTEL_OUTPUT_HDMI,
0, &n_entries);
@@ -1049,6 +1077,8 @@ static u32 icl_pll_to_ddi_clk_sel(struct intel_encoder *encoder,
case DPLL_ID_ICL_MGPLL2:
case DPLL_ID_ICL_MGPLL3:
case DPLL_ID_ICL_MGPLL4:
+ case DPLL_ID_TGL_MGPLL5:
+ case DPLL_ID_TGL_MGPLL6:
return DDI_CLK_SEL_MG;
}
}
@@ -1413,11 +1443,30 @@ static int icl_calc_mg_pll_link(struct drm_i915_private *dev_priv,
ref_clock = dev_priv->cdclk.hw.ref;
- m1 = pll_state->mg_pll_div1 & MG_PLL_DIV1_FBPREDIV_MASK;
- m2_int = pll_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK;
- m2_frac = (pll_state->mg_pll_div0 & MG_PLL_DIV0_FRACNEN_H) ?
- (pll_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_FRAC_MASK) >>
- MG_PLL_DIV0_FBDIV_FRAC_SHIFT : 0;
+ if (INTEL_GEN(dev_priv) >= 12) {
+ m1 = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBPREDIV_MASK;
+ m1 = m1 >> DKL_PLL_DIV0_FBPREDIV_SHIFT;
+ m2_int = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBDIV_INT_MASK;
+
+ if (pll_state->mg_pll_bias & DKL_PLL_BIAS_FRAC_EN_H) {
+ m2_frac = pll_state->mg_pll_bias &
+ DKL_PLL_BIAS_FBDIV_FRAC_MASK;
+ m2_frac = m2_frac >> DKL_PLL_BIAS_FBDIV_SHIFT;
+ } else {
+ m2_frac = 0;
+ }
+ } else {
+ m1 = pll_state->mg_pll_div1 & MG_PLL_DIV1_FBPREDIV_MASK;
+ m2_int = pll_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK;
+
+ if (pll_state->mg_pll_div0 & MG_PLL_DIV0_FRACNEN_H) {
+ m2_frac = pll_state->mg_pll_div0 &
+ MG_PLL_DIV0_FBDIV_FRAC_MASK;
+ m2_frac = m2_frac >> MG_PLL_DIV0_FBDIV_FRAC_SHIFT;
+ } else {
+ m2_frac = 0;
+ }
+ }
switch (pll_state->mg_clktop2_hsclkctl &
MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK) {
@@ -1692,7 +1741,8 @@ static void intel_ddi_clock_get(struct intel_encoder *encoder,
hsw_ddi_clock_get(encoder, pipe_config);
}
-void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state)
+void intel_ddi_set_dp_msa(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
@@ -1704,44 +1754,50 @@ void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state)
WARN_ON(transcoder_is_dsi(cpu_transcoder));
- temp = TRANS_MSA_SYNC_CLK;
-
- if (crtc_state->limited_color_range)
- temp |= TRANS_MSA_CEA_RANGE;
+ temp = DP_MSA_MISC_SYNC_CLOCK;
switch (crtc_state->pipe_bpp) {
case 18:
- temp |= TRANS_MSA_6_BPC;
+ temp |= DP_MSA_MISC_6_BPC;
break;
case 24:
- temp |= TRANS_MSA_8_BPC;
+ temp |= DP_MSA_MISC_8_BPC;
break;
case 30:
- temp |= TRANS_MSA_10_BPC;
+ temp |= DP_MSA_MISC_10_BPC;
break;
case 36:
- temp |= TRANS_MSA_12_BPC;
+ temp |= DP_MSA_MISC_12_BPC;
break;
default:
MISSING_CASE(crtc_state->pipe_bpp);
break;
}
+ /* nonsense combination */
+ WARN_ON(crtc_state->limited_color_range &&
+ crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
+
+ if (crtc_state->limited_color_range)
+ temp |= DP_MSA_MISC_COLOR_CEA_RGB;
+
/*
* As per DP 1.2 spec section 2.3.4.3 while sending
* YCBCR 444 signals we should program MSA MISC1/0 fields with
- * colorspace information. The output colorspace encoding is BT601.
+ * colorspace information.
*/
if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
- temp |= TRANS_MSA_SAMPLING_444 | TRANS_MSA_CLRSP_YCBCR;
+ temp |= DP_MSA_MISC_COLOR_YCBCR_444_BT709;
+
/*
* As per DP 1.4a spec section 2.2.4.3 [MSA Field for Indication
* of Color Encoding Format and Content Color Gamut] while sending
- * YCBCR 420 signals we should program MSA MISC1 fields which
- * indicate VSC SDP for the Pixel Encoding/Colorimetry Format.
+ * YCBCR 420, HDR BT.2020 signals we should program MSA MISC1 fields
+ * which indicate VSC SDP for the Pixel Encoding/Colorimetry Format.
*/
- if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
- temp |= TRANS_MSA_USE_VSC_SDP;
+ if (intel_dp_needs_vsc_sdp(crtc_state, conn_state))
+ temp |= DP_MSA_MISC_COLOR_VSC_SDP;
+
I915_WRITE(TRANS_MSA_MISC(cpu_transcoder), temp);
}
@@ -1761,7 +1817,14 @@ void intel_ddi_set_vc_payload_alloc(const struct intel_crtc_state *crtc_state,
I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), temp);
}
-void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state)
+/*
+ * Returns the TRANS_DDI_FUNC_CTL value based on CRTC state.
+ *
+ * Only intended to be used by intel_ddi_enable_transcoder_func() and
+ * intel_ddi_config_transcoder_func().
+ */
+static u32
+intel_ddi_transcoder_func_reg_val_get(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct intel_encoder *encoder = intel_ddi_get_crtc_encoder(crtc);
@@ -1840,11 +1903,42 @@ void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state)
} else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST)) {
temp |= TRANS_DDI_MODE_SELECT_DP_MST;
temp |= DDI_PORT_WIDTH(crtc_state->lane_count);
+
+ if (INTEL_GEN(dev_priv) >= 12)
+ temp |= TRANS_DDI_MST_TRANSPORT_SELECT(crtc_state->cpu_transcoder);
} else {
temp |= TRANS_DDI_MODE_SELECT_DP_SST;
temp |= DDI_PORT_WIDTH(crtc_state->lane_count);
}
+ return temp;
+}
+
+void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+ u32 temp;
+
+ temp = intel_ddi_transcoder_func_reg_val_get(crtc_state);
+ I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), temp);
+}
+
+/*
+ * Same as intel_ddi_enable_transcoder_func(), but it does not set the enable
+ * bit.
+ */
+static void
+intel_ddi_config_transcoder_func(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+ u32 temp;
+
+ temp = intel_ddi_transcoder_func_reg_val_get(crtc_state);
+ temp &= ~TRANS_DDI_FUNC_ENABLE;
I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), temp);
}
@@ -2045,18 +2139,20 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
}
if (!*pipe_mask)
- DRM_DEBUG_KMS("No pipe for ddi port %c found\n",
- port_name(port));
+ DRM_DEBUG_KMS("No pipe for [ENCODER:%d:%s] found\n",
+ encoder->base.base.id, encoder->base.name);
if (!mst_pipe_mask && hweight8(*pipe_mask) > 1) {
- DRM_DEBUG_KMS("Multiple pipes for non DP-MST port %c (pipe_mask %02x)\n",
- port_name(port), *pipe_mask);
+ DRM_DEBUG_KMS("Multiple pipes for [ENCODER:%d:%s] (pipe_mask %02x)\n",
+ encoder->base.base.id, encoder->base.name,
+ *pipe_mask);
*pipe_mask = BIT(ffs(*pipe_mask) - 1);
}
if (mst_pipe_mask && mst_pipe_mask != *pipe_mask)
- DRM_DEBUG_KMS("Conflicting MST and non-MST encoders for port %c (pipe_mask %02x mst_pipe_mask %02x)\n",
- port_name(port), *pipe_mask, mst_pipe_mask);
+ DRM_DEBUG_KMS("Conflicting MST and non-MST state for [ENCODER:%d:%s] (pipe_mask %02x mst_pipe_mask %02x)\n",
+ encoder->base.base.id, encoder->base.name,
+ *pipe_mask, mst_pipe_mask);
else
*is_dp_mst = mst_pipe_mask;
@@ -2066,8 +2162,9 @@ out:
if ((tmp & (BXT_PHY_CMNLANE_POWERDOWN_ACK |
BXT_PHY_LANE_POWERDOWN_ACK |
BXT_PHY_LANE_ENABLED)) != BXT_PHY_LANE_ENABLED)
- DRM_ERROR("Port %c enabled but PHY powered down? "
- "(PHY_CTL %08x)\n", port_name(port), tmp);
+ DRM_ERROR("[ENCODER:%d:%s] enabled but PHY powered down? "
+ "(PHY_CTL %08x)\n", encoder->base.base.id,
+ encoder->base.name, tmp);
}
intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
@@ -2138,7 +2235,7 @@ static void intel_ddi_get_power_domains(struct intel_encoder *encoder,
/*
* VDSC power is needed when DSC is enabled
*/
- if (crtc_state->dsc_params.compression_enable)
+ if (crtc_state->dsc.compression_enable)
intel_display_power_get(dev_priv,
intel_dsc_power_domain(crtc_state));
}
@@ -2269,7 +2366,13 @@ u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder)
enum phy phy = intel_port_to_phy(dev_priv, port);
int n_entries;
- if (INTEL_GEN(dev_priv) >= 11) {
+ if (INTEL_GEN(dev_priv) >= 12) {
+ if (intel_phy_is_combo(dev_priv, phy))
+ icl_get_combo_buf_trans(dev_priv, encoder->type,
+ intel_dp->link_rate, &n_entries);
+ else
+ n_entries = ARRAY_SIZE(tgl_dkl_phy_ddi_translations);
+ } else if (INTEL_GEN(dev_priv) == 11) {
if (intel_phy_is_combo(dev_priv, phy))
icl_get_combo_buf_trans(dev_priv, encoder->type,
intel_dp->link_rate, &n_entries);
@@ -2583,7 +2686,7 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
u32 level)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum port port = encoder->port;
+ enum tc_port tc_port = intel_port_to_tc(dev_priv, encoder->port);
const struct icl_mg_phy_ddi_buf_trans *ddi_translations;
u32 n_entries, val;
int ln;
@@ -2599,33 +2702,33 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
/* Set MG_TX_LINK_PARAMS cri_use_fs32 to 0. */
for (ln = 0; ln < 2; ln++) {
- val = I915_READ(MG_TX1_LINK_PARAMS(ln, port));
+ val = I915_READ(MG_TX1_LINK_PARAMS(ln, tc_port));
val &= ~CRI_USE_FS32;
- I915_WRITE(MG_TX1_LINK_PARAMS(ln, port), val);
+ I915_WRITE(MG_TX1_LINK_PARAMS(ln, tc_port), val);
- val = I915_READ(MG_TX2_LINK_PARAMS(ln, port));
+ val = I915_READ(MG_TX2_LINK_PARAMS(ln, tc_port));
val &= ~CRI_USE_FS32;
- I915_WRITE(MG_TX2_LINK_PARAMS(ln, port), val);
+ I915_WRITE(MG_TX2_LINK_PARAMS(ln, tc_port), val);
}
/* Program MG_TX_SWINGCTRL with values from vswing table */
for (ln = 0; ln < 2; ln++) {
- val = I915_READ(MG_TX1_SWINGCTRL(ln, port));
+ val = I915_READ(MG_TX1_SWINGCTRL(ln, tc_port));
val &= ~CRI_TXDEEMPH_OVERRIDE_17_12_MASK;
val |= CRI_TXDEEMPH_OVERRIDE_17_12(
ddi_translations[level].cri_txdeemph_override_17_12);
- I915_WRITE(MG_TX1_SWINGCTRL(ln, port), val);
+ I915_WRITE(MG_TX1_SWINGCTRL(ln, tc_port), val);
- val = I915_READ(MG_TX2_SWINGCTRL(ln, port));
+ val = I915_READ(MG_TX2_SWINGCTRL(ln, tc_port));
val &= ~CRI_TXDEEMPH_OVERRIDE_17_12_MASK;
val |= CRI_TXDEEMPH_OVERRIDE_17_12(
ddi_translations[level].cri_txdeemph_override_17_12);
- I915_WRITE(MG_TX2_SWINGCTRL(ln, port), val);
+ I915_WRITE(MG_TX2_SWINGCTRL(ln, tc_port), val);
}
/* Program MG_TX_DRVCTRL with values from vswing table */
for (ln = 0; ln < 2; ln++) {
- val = I915_READ(MG_TX1_DRVCTRL(ln, port));
+ val = I915_READ(MG_TX1_DRVCTRL(ln, tc_port));
val &= ~(CRI_TXDEEMPH_OVERRIDE_11_6_MASK |
CRI_TXDEEMPH_OVERRIDE_5_0_MASK);
val |= CRI_TXDEEMPH_OVERRIDE_5_0(
@@ -2633,9 +2736,9 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
CRI_TXDEEMPH_OVERRIDE_11_6(
ddi_translations[level].cri_txdeemph_override_11_6) |
CRI_TXDEEMPH_OVERRIDE_EN;
- I915_WRITE(MG_TX1_DRVCTRL(ln, port), val);
+ I915_WRITE(MG_TX1_DRVCTRL(ln, tc_port), val);
- val = I915_READ(MG_TX2_DRVCTRL(ln, port));
+ val = I915_READ(MG_TX2_DRVCTRL(ln, tc_port));
val &= ~(CRI_TXDEEMPH_OVERRIDE_11_6_MASK |
CRI_TXDEEMPH_OVERRIDE_5_0_MASK);
val |= CRI_TXDEEMPH_OVERRIDE_5_0(
@@ -2643,7 +2746,7 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
CRI_TXDEEMPH_OVERRIDE_11_6(
ddi_translations[level].cri_txdeemph_override_11_6) |
CRI_TXDEEMPH_OVERRIDE_EN;
- I915_WRITE(MG_TX2_DRVCTRL(ln, port), val);
+ I915_WRITE(MG_TX2_DRVCTRL(ln, tc_port), val);
/* FIXME: Program CRI_LOADGEN_SEL after the spec is updated */
}
@@ -2654,17 +2757,17 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
* values from table for which TX1 and TX2 enabled.
*/
for (ln = 0; ln < 2; ln++) {
- val = I915_READ(MG_CLKHUB(ln, port));
+ val = I915_READ(MG_CLKHUB(ln, tc_port));
if (link_clock < 300000)
val |= CFG_LOW_RATE_LKREN_EN;
else
val &= ~CFG_LOW_RATE_LKREN_EN;
- I915_WRITE(MG_CLKHUB(ln, port), val);
+ I915_WRITE(MG_CLKHUB(ln, tc_port), val);
}
/* Program the MG_TX_DCC<LN, port being used> based on the link frequency */
for (ln = 0; ln < 2; ln++) {
- val = I915_READ(MG_TX1_DCC(ln, port));
+ val = I915_READ(MG_TX1_DCC(ln, tc_port));
val &= ~CFG_AMI_CK_DIV_OVERRIDE_VAL_MASK;
if (link_clock <= 500000) {
val &= ~CFG_AMI_CK_DIV_OVERRIDE_EN;
@@ -2672,9 +2775,9 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
val |= CFG_AMI_CK_DIV_OVERRIDE_EN |
CFG_AMI_CK_DIV_OVERRIDE_VAL(1);
}
- I915_WRITE(MG_TX1_DCC(ln, port), val);
+ I915_WRITE(MG_TX1_DCC(ln, tc_port), val);
- val = I915_READ(MG_TX2_DCC(ln, port));
+ val = I915_READ(MG_TX2_DCC(ln, tc_port));
val &= ~CFG_AMI_CK_DIV_OVERRIDE_VAL_MASK;
if (link_clock <= 500000) {
val &= ~CFG_AMI_CK_DIV_OVERRIDE_EN;
@@ -2682,18 +2785,18 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
val |= CFG_AMI_CK_DIV_OVERRIDE_EN |
CFG_AMI_CK_DIV_OVERRIDE_VAL(1);
}
- I915_WRITE(MG_TX2_DCC(ln, port), val);
+ I915_WRITE(MG_TX2_DCC(ln, tc_port), val);
}
/* Program MG_TX_PISO_READLOAD with values from vswing table */
for (ln = 0; ln < 2; ln++) {
- val = I915_READ(MG_TX1_PISO_READLOAD(ln, port));
+ val = I915_READ(MG_TX1_PISO_READLOAD(ln, tc_port));
val |= CRI_CALCINIT;
- I915_WRITE(MG_TX1_PISO_READLOAD(ln, port), val);
+ I915_WRITE(MG_TX1_PISO_READLOAD(ln, tc_port), val);
- val = I915_READ(MG_TX2_PISO_READLOAD(ln, port));
+ val = I915_READ(MG_TX2_PISO_READLOAD(ln, tc_port));
val |= CRI_CALCINIT;
- I915_WRITE(MG_TX2_PISO_READLOAD(ln, port), val);
+ I915_WRITE(MG_TX2_PISO_READLOAD(ln, tc_port), val);
}
}
@@ -2711,6 +2814,64 @@ static void icl_ddi_vswing_sequence(struct intel_encoder *encoder,
icl_mg_phy_ddi_vswing_sequence(encoder, link_clock, level);
}
+static void
+tgl_dkl_phy_ddi_vswing_sequence(struct intel_encoder *encoder, int link_clock,
+ u32 level)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum tc_port tc_port = intel_port_to_tc(dev_priv, encoder->port);
+ const struct tgl_dkl_phy_ddi_buf_trans *ddi_translations;
+ u32 n_entries, val, ln, dpcnt_mask, dpcnt_val;
+
+ n_entries = ARRAY_SIZE(tgl_dkl_phy_ddi_translations);
+ ddi_translations = tgl_dkl_phy_ddi_translations;
+
+ if (level >= n_entries)
+ level = n_entries - 1;
+
+ dpcnt_mask = (DKL_TX_PRESHOOT_COEFF_MASK |
+ DKL_TX_DE_EMPAHSIS_COEFF_MASK |
+ DKL_TX_VSWING_CONTROL_MASK);
+ dpcnt_val = DKL_TX_VSWING_CONTROL(ddi_translations[level].dkl_vswing_control);
+ dpcnt_val |= DKL_TX_DE_EMPHASIS_COEFF(ddi_translations[level].dkl_de_emphasis_control);
+ dpcnt_val |= DKL_TX_PRESHOOT_COEFF(ddi_translations[level].dkl_preshoot_control);
+
+ for (ln = 0; ln < 2; ln++) {
+ I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, ln));
+
+ I915_WRITE(DKL_TX_PMD_LANE_SUS(tc_port), 0);
+
+ /* All the registers are RMW */
+ val = I915_READ(DKL_TX_DPCNTL0(tc_port));
+ val &= ~dpcnt_mask;
+ val |= dpcnt_val;
+ I915_WRITE(DKL_TX_DPCNTL0(tc_port), val);
+
+ val = I915_READ(DKL_TX_DPCNTL1(tc_port));
+ val &= ~dpcnt_mask;
+ val |= dpcnt_val;
+ I915_WRITE(DKL_TX_DPCNTL1(tc_port), val);
+
+ val = I915_READ(DKL_TX_DPCNTL2(tc_port));
+ val &= ~DKL_TX_DP20BITMODE;
+ I915_WRITE(DKL_TX_DPCNTL2(tc_port), val);
+ }
+}
+
+static void tgl_ddi_vswing_sequence(struct intel_encoder *encoder,
+ int link_clock,
+ u32 level,
+ enum intel_output_type type)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
+
+ if (intel_phy_is_combo(dev_priv, phy))
+ icl_combo_phy_ddi_vswing_sequence(encoder, level, type);
+ else
+ tgl_dkl_phy_ddi_vswing_sequence(encoder, link_clock, level);
+}
+
static u32 translate_signal_level(int signal_levels)
{
int i;
@@ -2742,7 +2903,10 @@ u32 bxt_signal_levels(struct intel_dp *intel_dp)
struct intel_encoder *encoder = &dport->base;
int level = intel_ddi_dp_level(intel_dp);
- if (INTEL_GEN(dev_priv) >= 11)
+ if (INTEL_GEN(dev_priv) >= 12)
+ tgl_ddi_vswing_sequence(encoder, intel_dp->link_rate,
+ level, encoder->type);
+ else if (INTEL_GEN(dev_priv) >= 11)
icl_ddi_vswing_sequence(encoder, intel_dp->link_rate,
level, encoder->type);
else if (IS_CANNONLAKE(dev_priv))
@@ -2989,130 +3153,141 @@ static void intel_ddi_clk_disable(struct intel_encoder *encoder)
}
}
-static void icl_enable_phy_clock_gating(struct intel_digital_port *dig_port)
+static void
+icl_phy_set_clock_gating(struct intel_digital_port *dig_port, bool enable)
{
struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
- enum port port = dig_port->base.port;
- enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
- u32 val;
+ enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port);
+ u32 val, bits;
int ln;
if (tc_port == PORT_TC_NONE)
return;
- for (ln = 0; ln < 2; ln++) {
- val = I915_READ(MG_DP_MODE(ln, port));
- val |= MG_DP_MODE_CFG_TR2PWR_GATING |
- MG_DP_MODE_CFG_TRPWR_GATING |
- MG_DP_MODE_CFG_CLNPWR_GATING |
- MG_DP_MODE_CFG_DIGPWR_GATING |
- MG_DP_MODE_CFG_GAONPWR_GATING;
- I915_WRITE(MG_DP_MODE(ln, port), val);
- }
+ bits = MG_DP_MODE_CFG_TR2PWR_GATING | MG_DP_MODE_CFG_TRPWR_GATING |
+ MG_DP_MODE_CFG_CLNPWR_GATING | MG_DP_MODE_CFG_DIGPWR_GATING |
+ MG_DP_MODE_CFG_GAONPWR_GATING;
- val = I915_READ(MG_MISC_SUS0(tc_port));
- val |= MG_MISC_SUS0_SUSCLK_DYNCLKGATE_MODE(3) |
- MG_MISC_SUS0_CFG_TR2PWR_GATING |
- MG_MISC_SUS0_CFG_CL2PWR_GATING |
- MG_MISC_SUS0_CFG_GAONPWR_GATING |
- MG_MISC_SUS0_CFG_TRPWR_GATING |
- MG_MISC_SUS0_CFG_CL1PWR_GATING |
- MG_MISC_SUS0_CFG_DGPWR_GATING;
- I915_WRITE(MG_MISC_SUS0(tc_port), val);
-}
-
-static void icl_disable_phy_clock_gating(struct intel_digital_port *dig_port)
-{
- struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
- enum port port = dig_port->base.port;
- enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
- u32 val;
- int ln;
+ for (ln = 0; ln < 2; ln++) {
+ if (INTEL_GEN(dev_priv) >= 12) {
+ I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, ln));
+ val = I915_READ(DKL_DP_MODE(tc_port));
+ } else {
+ val = I915_READ(MG_DP_MODE(ln, tc_port));
+ }
- if (tc_port == PORT_TC_NONE)
- return;
+ if (enable)
+ val |= bits;
+ else
+ val &= ~bits;
- for (ln = 0; ln < 2; ln++) {
- val = I915_READ(MG_DP_MODE(ln, port));
- val &= ~(MG_DP_MODE_CFG_TR2PWR_GATING |
- MG_DP_MODE_CFG_TRPWR_GATING |
- MG_DP_MODE_CFG_CLNPWR_GATING |
- MG_DP_MODE_CFG_DIGPWR_GATING |
- MG_DP_MODE_CFG_GAONPWR_GATING);
- I915_WRITE(MG_DP_MODE(ln, port), val);
+ if (INTEL_GEN(dev_priv) >= 12)
+ I915_WRITE(DKL_DP_MODE(tc_port), val);
+ else
+ I915_WRITE(MG_DP_MODE(ln, tc_port), val);
}
- val = I915_READ(MG_MISC_SUS0(tc_port));
- val &= ~(MG_MISC_SUS0_SUSCLK_DYNCLKGATE_MODE_MASK |
- MG_MISC_SUS0_CFG_TR2PWR_GATING |
- MG_MISC_SUS0_CFG_CL2PWR_GATING |
- MG_MISC_SUS0_CFG_GAONPWR_GATING |
- MG_MISC_SUS0_CFG_TRPWR_GATING |
- MG_MISC_SUS0_CFG_CL1PWR_GATING |
- MG_MISC_SUS0_CFG_DGPWR_GATING);
- I915_WRITE(MG_MISC_SUS0(tc_port), val);
+ if (INTEL_GEN(dev_priv) == 11) {
+ bits = MG_MISC_SUS0_CFG_TR2PWR_GATING |
+ MG_MISC_SUS0_CFG_CL2PWR_GATING |
+ MG_MISC_SUS0_CFG_GAONPWR_GATING |
+ MG_MISC_SUS0_CFG_TRPWR_GATING |
+ MG_MISC_SUS0_CFG_CL1PWR_GATING |
+ MG_MISC_SUS0_CFG_DGPWR_GATING;
+
+ val = I915_READ(MG_MISC_SUS0(tc_port));
+ if (enable)
+ val |= (bits | MG_MISC_SUS0_SUSCLK_DYNCLKGATE_MODE(3));
+ else
+ val &= ~(bits | MG_MISC_SUS0_SUSCLK_DYNCLKGATE_MODE_MASK);
+ I915_WRITE(MG_MISC_SUS0(tc_port), val);
+ }
}
-static void icl_program_mg_dp_mode(struct intel_digital_port *intel_dig_port)
+static void
+icl_program_mg_dp_mode(struct intel_digital_port *intel_dig_port,
+ const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
- enum port port = intel_dig_port->base.port;
- u32 ln0, ln1, lane_mask;
+ enum tc_port tc_port = intel_port_to_tc(dev_priv, intel_dig_port->base.port);
+ u32 ln0, ln1, pin_assignment;
+ u8 width;
if (intel_dig_port->tc_mode == TC_PORT_TBT_ALT)
return;
- ln0 = I915_READ(MG_DP_MODE(0, port));
- ln1 = I915_READ(MG_DP_MODE(1, port));
+ if (INTEL_GEN(dev_priv) >= 12) {
+ I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, 0x0));
+ ln0 = I915_READ(DKL_DP_MODE(tc_port));
+ I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, 0x1));
+ ln1 = I915_READ(DKL_DP_MODE(tc_port));
+ } else {
+ ln0 = I915_READ(MG_DP_MODE(0, tc_port));
+ ln1 = I915_READ(MG_DP_MODE(1, tc_port));
+ }
- switch (intel_dig_port->tc_mode) {
- case TC_PORT_DP_ALT:
- ln0 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE);
- ln1 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE);
+ ln0 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X1_MODE);
+ ln1 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE);
- lane_mask = intel_tc_port_get_lane_mask(intel_dig_port);
+ /* DPPATC */
+ pin_assignment = intel_tc_port_get_pin_assignment_mask(intel_dig_port);
+ width = crtc_state->lane_count;
- switch (lane_mask) {
- case 0x1:
- case 0x4:
- break;
- case 0x2:
+ switch (pin_assignment) {
+ case 0x0:
+ WARN_ON(intel_dig_port->tc_mode != TC_PORT_LEGACY);
+ if (width == 1) {
+ ln1 |= MG_DP_MODE_CFG_DP_X1_MODE;
+ } else {
+ ln0 |= MG_DP_MODE_CFG_DP_X2_MODE;
+ ln1 |= MG_DP_MODE_CFG_DP_X2_MODE;
+ }
+ break;
+ case 0x1:
+ if (width == 4) {
+ ln0 |= MG_DP_MODE_CFG_DP_X2_MODE;
+ ln1 |= MG_DP_MODE_CFG_DP_X2_MODE;
+ }
+ break;
+ case 0x2:
+ if (width == 2) {
+ ln0 |= MG_DP_MODE_CFG_DP_X2_MODE;
+ ln1 |= MG_DP_MODE_CFG_DP_X2_MODE;
+ }
+ break;
+ case 0x3:
+ case 0x5:
+ if (width == 1) {
ln0 |= MG_DP_MODE_CFG_DP_X1_MODE;
- break;
- case 0x3:
- ln0 |= MG_DP_MODE_CFG_DP_X1_MODE |
- MG_DP_MODE_CFG_DP_X2_MODE;
- break;
- case 0x8:
ln1 |= MG_DP_MODE_CFG_DP_X1_MODE;
- break;
- case 0xC:
- ln1 |= MG_DP_MODE_CFG_DP_X1_MODE |
- MG_DP_MODE_CFG_DP_X2_MODE;
- break;
- case 0xF:
- ln0 |= MG_DP_MODE_CFG_DP_X1_MODE |
- MG_DP_MODE_CFG_DP_X2_MODE;
- ln1 |= MG_DP_MODE_CFG_DP_X1_MODE |
- MG_DP_MODE_CFG_DP_X2_MODE;
- break;
- default:
- MISSING_CASE(lane_mask);
+ } else {
+ ln0 |= MG_DP_MODE_CFG_DP_X2_MODE;
+ ln1 |= MG_DP_MODE_CFG_DP_X2_MODE;
}
break;
-
- case TC_PORT_LEGACY:
- ln0 |= MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE;
- ln1 |= MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE;
+ case 0x4:
+ case 0x6:
+ if (width == 1) {
+ ln0 |= MG_DP_MODE_CFG_DP_X1_MODE;
+ ln1 |= MG_DP_MODE_CFG_DP_X1_MODE;
+ } else {
+ ln0 |= MG_DP_MODE_CFG_DP_X2_MODE;
+ ln1 |= MG_DP_MODE_CFG_DP_X2_MODE;
+ }
break;
-
default:
- MISSING_CASE(intel_dig_port->tc_mode);
- return;
+ MISSING_CASE(pin_assignment);
}
- I915_WRITE(MG_DP_MODE(0, port), ln0);
- I915_WRITE(MG_DP_MODE(1, port), ln1);
+ if (INTEL_GEN(dev_priv) >= 12) {
+ I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, 0x0));
+ I915_WRITE(DKL_DP_MODE(tc_port), ln0);
+ I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, 0x1));
+ I915_WRITE(DKL_DP_MODE(tc_port), ln1);
+ } else {
+ I915_WRITE(MG_DP_MODE(0, tc_port), ln0);
+ I915_WRITE(MG_DP_MODE(1, tc_port), ln1);
+ }
}
static void intel_dp_sink_set_fec_ready(struct intel_dp *intel_dp,
@@ -3129,17 +3304,18 @@ static void intel_ddi_enable_fec(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum port port = encoder->port;
+ struct intel_dp *intel_dp;
u32 val;
if (!crtc_state->fec_enable)
return;
- val = I915_READ(DP_TP_CTL(port));
+ intel_dp = enc_to_intel_dp(&encoder->base);
+ val = I915_READ(intel_dp->regs.dp_tp_ctl);
val |= DP_TP_CTL_FEC_ENABLE;
- I915_WRITE(DP_TP_CTL(port), val);
+ I915_WRITE(intel_dp->regs.dp_tp_ctl, val);
- if (intel_de_wait_for_set(dev_priv, DP_TP_STATUS(port),
+ if (intel_de_wait_for_set(dev_priv, intel_dp->regs.dp_tp_status,
DP_TP_STATUS_FEC_ENABLE_LIVE, 1))
DRM_ERROR("Timed out waiting for FEC Enable Status\n");
}
@@ -3148,21 +3324,205 @@ static void intel_ddi_disable_fec_state(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum port port = encoder->port;
+ struct intel_dp *intel_dp;
u32 val;
if (!crtc_state->fec_enable)
return;
- val = I915_READ(DP_TP_CTL(port));
+ intel_dp = enc_to_intel_dp(&encoder->base);
+ val = I915_READ(intel_dp->regs.dp_tp_ctl);
val &= ~DP_TP_CTL_FEC_ENABLE;
- I915_WRITE(DP_TP_CTL(port), val);
- POSTING_READ(DP_TP_CTL(port));
+ I915_WRITE(intel_dp->regs.dp_tp_ctl, val);
+ POSTING_READ(intel_dp->regs.dp_tp_ctl);
}
-static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state)
+static void
+tgl_clear_psr2_transcoder_exitline(const struct intel_crtc_state *cstate)
+{
+ struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
+ u32 val;
+
+ if (!cstate->dc3co_exitline)
+ return;
+
+ val = I915_READ(EXITLINE(cstate->cpu_transcoder));
+ val &= ~(EXITLINE_MASK | EXITLINE_ENABLE);
+ I915_WRITE(EXITLINE(cstate->cpu_transcoder), val);
+}
+
+static void
+tgl_set_psr2_transcoder_exitline(const struct intel_crtc_state *cstate)
+{
+ u32 val, exit_scanlines;
+ struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
+
+ if (!cstate->dc3co_exitline)
+ return;
+
+ exit_scanlines = cstate->dc3co_exitline;
+ exit_scanlines <<= EXITLINE_SHIFT;
+ val = I915_READ(EXITLINE(cstate->cpu_transcoder));
+ val &= ~(EXITLINE_MASK | EXITLINE_ENABLE);
+ val |= exit_scanlines;
+ val |= EXITLINE_ENABLE;
+ I915_WRITE(EXITLINE(cstate->cpu_transcoder), val);
+}
+
+static void tgl_dc3co_exitline_compute_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *cstate)
+{
+ u32 exit_scanlines;
+ struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
+ u32 crtc_vdisplay = cstate->base.adjusted_mode.crtc_vdisplay;
+
+ cstate->dc3co_exitline = 0;
+
+ if (!(dev_priv->csr.allowed_dc_mask & DC_STATE_EN_DC3CO))
+ return;
+
+ /* B.Specs:49196 DC3CO only works with pipeA and DDIA.*/
+ if (to_intel_crtc(cstate->base.crtc)->pipe != PIPE_A ||
+ encoder->port != PORT_A)
+ return;
+
+ if (!cstate->has_psr2 || !cstate->base.active)
+ return;
+
+ /*
+ * DC3CO Exit time 200us B.Spec 49196
+ * PSR2 transcoder Early Exit scanlines = ROUNDUP(200 / line time) + 1
+ */
+ exit_scanlines =
+ intel_usecs_to_scanlines(&cstate->base.adjusted_mode, 200) + 1;
+
+ if (WARN_ON(exit_scanlines > crtc_vdisplay))
+ return;
+
+ cstate->dc3co_exitline = crtc_vdisplay - exit_scanlines;
+ DRM_DEBUG_KMS("DC3CO exit scanlines %d\n", cstate->dc3co_exitline);
+}
+
+static void tgl_dc3co_exitline_get_config(struct intel_crtc_state *crtc_state)
+{
+ u32 val;
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+
+ if (INTEL_GEN(dev_priv) < 12)
+ return;
+
+ val = I915_READ(EXITLINE(crtc_state->cpu_transcoder));
+
+ if (val & EXITLINE_ENABLE)
+ crtc_state->dc3co_exitline = val & EXITLINE_MASK;
+}
+
+static void tgl_ddi_pre_enable_dp(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
+ struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
+ bool is_mst = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST);
+ int level = intel_ddi_dp_level(intel_dp);
+ enum transcoder transcoder = crtc_state->cpu_transcoder;
+
+ tgl_set_psr2_transcoder_exitline(crtc_state);
+ intel_dp_set_link_params(intel_dp, crtc_state->port_clock,
+ crtc_state->lane_count, is_mst);
+
+ intel_dp->regs.dp_tp_ctl = TGL_DP_TP_CTL(transcoder);
+ intel_dp->regs.dp_tp_status = TGL_DP_TP_STATUS(transcoder);
+
+ /* 1.a got on intel_atomic_commit_tail() */
+
+ /* 2. */
+ intel_edp_panel_on(intel_dp);
+
+ /*
+ * 1.b, 3. and 4.a is done before tgl_ddi_pre_enable_dp() by:
+ * haswell_crtc_enable()->intel_encoders_pre_pll_enable() and
+ * haswell_crtc_enable()->intel_enable_shared_dpll()
+ */
+
+ /* 4.b */
+ intel_ddi_clk_select(encoder, crtc_state);
+
+ /* 5. */
+ if (!intel_phy_is_tc(dev_priv, phy) ||
+ dig_port->tc_mode != TC_PORT_TBT_ALT)
+ intel_display_power_get(dev_priv,
+ dig_port->ddi_io_power_domain);
+
+ /* 6. */
+ icl_program_mg_dp_mode(dig_port, crtc_state);
+
+ /*
+ * 7.a - Steps in this function should only be executed over MST
+ * master, what will be taken in care by MST hook
+ * intel_mst_pre_enable_dp()
+ */
+ intel_ddi_enable_pipe_clock(crtc_state);
+
+ /* 7.b */
+ intel_ddi_config_transcoder_func(crtc_state);
+
+ /* 7.d */
+ icl_phy_set_clock_gating(dig_port, false);
+
+ /* 7.e */
+ tgl_ddi_vswing_sequence(encoder, crtc_state->port_clock, level,
+ encoder->type);
+
+ /* 7.f */
+ if (intel_phy_is_combo(dev_priv, phy)) {
+ bool lane_reversal =
+ dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
+
+ intel_combo_phy_power_up_lanes(dev_priv, phy, false,
+ crtc_state->lane_count,
+ lane_reversal);
+ }
+
+ /* 7.g */
+ intel_ddi_init_dp_buf_reg(encoder);
+
+ if (!is_mst)
+ intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
+
+ intel_dp_sink_set_decompression_state(intel_dp, crtc_state, true);
+ /*
+ * DDI FEC: "anticipates enabling FEC encoding sets the FEC_READY bit
+ * in the FEC_CONFIGURATION register to 1 before initiating link
+ * training
+ */
+ intel_dp_sink_set_fec_ready(intel_dp, crtc_state);
+ /* 7.c, 7.h, 7.i, 7.j */
+ intel_dp_start_link_train(intel_dp);
+
+ /* 7.k */
+ if (!is_trans_port_sync_mode(crtc_state))
+ intel_dp_stop_link_train(intel_dp);
+
+ /*
+ * TODO: enable clock gating
+ *
+ * It is not written in DP enabling sequence but "PHY Clockgating
+ * programming" states that clock gating should be enabled after the
+ * link training but doing so causes all the following trainings to fail
+ * so not enabling it for now.
+ */
+
+ /* 7.l */
+ intel_ddi_enable_fec(encoder, crtc_state);
+ intel_dsc_enable(encoder, crtc_state);
+}
+
+static void hsw_ddi_pre_enable_dp(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@@ -3177,6 +3537,9 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
intel_dp_set_link_params(intel_dp, crtc_state->port_clock,
crtc_state->lane_count, is_mst);
+ intel_dp->regs.dp_tp_ctl = DP_TP_CTL(port);
+ intel_dp->regs.dp_tp_status = DP_TP_STATUS(port);
+
intel_edp_panel_on(intel_dp);
intel_ddi_clk_select(encoder, crtc_state);
@@ -3186,8 +3549,8 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
intel_display_power_get(dev_priv,
dig_port->ddi_io_power_domain);
- icl_program_mg_dp_mode(dig_port);
- icl_disable_phy_clock_gating(dig_port);
+ icl_program_mg_dp_mode(dig_port, crtc_state);
+ icl_phy_set_clock_gating(dig_port, false);
if (INTEL_GEN(dev_priv) >= 11)
icl_ddi_vswing_sequence(encoder, crtc_state->port_clock,
@@ -3215,12 +3578,13 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
true);
intel_dp_sink_set_fec_ready(intel_dp, crtc_state);
intel_dp_start_link_train(intel_dp);
- if (port != PORT_A || INTEL_GEN(dev_priv) >= 9)
+ if ((port != PORT_A || INTEL_GEN(dev_priv) >= 9) &&
+ !is_trans_port_sync_mode(crtc_state))
intel_dp_stop_link_train(intel_dp);
intel_ddi_enable_fec(encoder, crtc_state);
- icl_enable_phy_clock_gating(dig_port);
+ icl_phy_set_clock_gating(dig_port, true);
if (!is_mst)
intel_ddi_enable_pipe_clock(crtc_state);
@@ -3228,6 +3592,24 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
intel_dsc_enable(encoder, crtc_state);
}
+static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
+ if (INTEL_GEN(dev_priv) >= 12)
+ tgl_ddi_pre_enable_dp(encoder, crtc_state, conn_state);
+ else
+ hsw_ddi_pre_enable_dp(encoder, crtc_state, conn_state);
+
+ /* MST will call a setting of MSA after an allocating of Virtual Channel
+ * from MST encoder pre_enable callback.
+ */
+ if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST))
+ intel_ddi_set_dp_msa(crtc_state, conn_state);
+}
+
static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
@@ -3244,10 +3626,13 @@ static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain);
- icl_program_mg_dp_mode(dig_port);
- icl_disable_phy_clock_gating(dig_port);
+ icl_program_mg_dp_mode(dig_port, crtc_state);
+ icl_phy_set_clock_gating(dig_port, false);
- if (INTEL_GEN(dev_priv) >= 11)
+ if (INTEL_GEN(dev_priv) >= 12)
+ tgl_ddi_vswing_sequence(encoder, crtc_state->port_clock,
+ level, INTEL_OUTPUT_HDMI);
+ else if (INTEL_GEN(dev_priv) == 11)
icl_ddi_vswing_sequence(encoder, crtc_state->port_clock,
level, INTEL_OUTPUT_HDMI);
else if (IS_CANNONLAKE(dev_priv))
@@ -3257,7 +3642,7 @@ static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
else
intel_prepare_hdmi_ddi_buffers(encoder, level);
- icl_enable_phy_clock_gating(dig_port);
+ icl_phy_set_clock_gating(dig_port, true);
if (IS_GEN9_BC(dev_priv))
skl_ddi_set_iboost(encoder, level, INTEL_OUTPUT_HDMI);
@@ -3330,10 +3715,14 @@ static void intel_disable_ddi_buf(struct intel_encoder *encoder,
wait = true;
}
- val = I915_READ(DP_TP_CTL(port));
- val &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK);
- val |= DP_TP_CTL_LINK_TRAIN_PAT1;
- I915_WRITE(DP_TP_CTL(port), val);
+ if (intel_crtc_has_dp_encoder(crtc_state)) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+
+ val = I915_READ(intel_dp->regs.dp_tp_ctl);
+ val &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK);
+ val |= DP_TP_CTL_LINK_TRAIN_PAT1;
+ I915_WRITE(intel_dp->regs.dp_tp_ctl, val);
+ }
/* Disable FEC in DP Sink */
intel_ddi_disable_fec_state(encoder, crtc_state);
@@ -3373,6 +3762,7 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder,
dig_port->ddi_io_power_domain);
intel_ddi_clk_disable(encoder);
+ tgl_clear_psr2_transcoder_exitline(old_crtc_state);
}
static void intel_ddi_post_disable_hdmi(struct intel_encoder *encoder,
@@ -3475,7 +3865,8 @@ static void intel_enable_ddi_dp(struct intel_encoder *encoder,
intel_edp_backlight_on(crtc_state, conn_state);
intel_psr_enable(intel_dp, crtc_state);
- intel_dp_ycbcr_420_enable(intel_dp, crtc_state);
+ intel_dp_vsc_enable(intel_dp, crtc_state, conn_state);
+ intel_dp_hdr_metadata_enable(intel_dp, crtc_state, conn_state);
intel_edp_drrs_enable(intel_dp, crtc_state);
if (crtc_state->has_audio)
@@ -3486,12 +3877,12 @@ static i915_reg_t
gen9_chicken_trans_reg_by_port(struct drm_i915_private *dev_priv,
enum port port)
{
- static const i915_reg_t regs[] = {
- [PORT_A] = CHICKEN_TRANS_EDP,
- [PORT_B] = CHICKEN_TRANS_A,
- [PORT_C] = CHICKEN_TRANS_B,
- [PORT_D] = CHICKEN_TRANS_C,
- [PORT_E] = CHICKEN_TRANS_A,
+ static const enum transcoder trans[] = {
+ [PORT_A] = TRANSCODER_EDP,
+ [PORT_B] = TRANSCODER_A,
+ [PORT_C] = TRANSCODER_B,
+ [PORT_D] = TRANSCODER_C,
+ [PORT_E] = TRANSCODER_A,
};
WARN_ON(INTEL_GEN(dev_priv) < 9);
@@ -3499,7 +3890,7 @@ gen9_chicken_trans_reg_by_port(struct drm_i915_private *dev_priv,
if (WARN_ON(port < PORT_A || port > PORT_E))
port = PORT_A;
- return regs[port];
+ return CHICKEN_TRANS(trans[port]);
}
static void intel_enable_ddi_hdmi(struct intel_encoder *encoder,
@@ -3633,7 +4024,7 @@ static void intel_ddi_update_pipe_dp(struct intel_encoder *encoder,
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
- intel_ddi_set_pipe_settings(crtc_state);
+ intel_ddi_set_dp_msa(crtc_state, conn_state);
intel_psr_update(intel_dp, crtc_state);
intel_edp_drrs_enable(intel_dp, crtc_state);
@@ -3761,7 +4152,7 @@ static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
u32 val;
bool wait = false;
- if (I915_READ(DP_TP_CTL(port)) & DP_TP_CTL_ENABLE) {
+ if (I915_READ(intel_dp->regs.dp_tp_ctl) & DP_TP_CTL_ENABLE) {
val = I915_READ(DDI_BUF_CTL(port));
if (val & DDI_BUF_CTL_ENABLE) {
val &= ~DDI_BUF_CTL_ENABLE;
@@ -3769,11 +4160,11 @@ static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
wait = true;
}
- val = I915_READ(DP_TP_CTL(port));
+ val = I915_READ(intel_dp->regs.dp_tp_ctl);
val &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK);
val |= DP_TP_CTL_LINK_TRAIN_PAT1;
- I915_WRITE(DP_TP_CTL(port), val);
- POSTING_READ(DP_TP_CTL(port));
+ I915_WRITE(intel_dp->regs.dp_tp_ctl, val);
+ POSTING_READ(intel_dp->regs.dp_tp_ctl);
if (wait)
intel_wait_ddi_buf_idle(dev_priv, port);
@@ -3788,8 +4179,8 @@ static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
val |= DP_TP_CTL_ENHANCED_FRAME_ENABLE;
}
- I915_WRITE(DP_TP_CTL(port), val);
- POSTING_READ(DP_TP_CTL(port));
+ I915_WRITE(intel_dp->regs.dp_tp_ctl, val);
+ POSTING_READ(intel_dp->regs.dp_tp_ctl);
intel_dp->DP |= DDI_BUF_CTL_ENABLE;
I915_WRITE(DDI_BUF_CTL(port), intel_dp->DP);
@@ -3891,6 +4282,23 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
pipe_config->lane_count =
((temp & DDI_PORT_WIDTH_MASK) >> DDI_PORT_WIDTH_SHIFT) + 1;
intel_dp_get_m_n(intel_crtc, pipe_config);
+
+ if (INTEL_GEN(dev_priv) >= 11) {
+ i915_reg_t dp_tp_ctl;
+
+ if (IS_GEN(dev_priv, 11))
+ dp_tp_ctl = DP_TP_CTL(encoder->port);
+ else
+ dp_tp_ctl = TGL_DP_TP_CTL(pipe_config->cpu_transcoder);
+
+ pipe_config->fec_enable =
+ I915_READ(dp_tp_ctl) & DP_TP_CTL_FEC_ENABLE;
+
+ DRM_DEBUG_KMS("[ENCODER:%d:%s] Fec status: %u\n",
+ encoder->base.base.id, encoder->base.name,
+ pipe_config->fec_enable);
+ }
+
break;
case TRANS_DDI_MODE_SELECT_DP_MST:
pipe_config->output_types |= BIT(INTEL_OUTPUT_DP_MST);
@@ -3902,6 +4310,9 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
break;
}
+ if (encoder->type == INTEL_OUTPUT_EDP)
+ tgl_dc3co_exitline_get_config(pipe_config);
+
pipe_config->has_audio =
intel_ddi_is_audio_enabled(dev_priv, cpu_transcoder);
@@ -3979,10 +4390,13 @@ static int intel_ddi_compute_config(struct intel_encoder *encoder,
if (HAS_TRANSCODER_EDP(dev_priv) && port == PORT_A)
pipe_config->cpu_transcoder = TRANSCODER_EDP;
- if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI))
+ if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI)) {
ret = intel_hdmi_compute_config(encoder, pipe_config, conn_state);
- else
+ } else {
ret = intel_dp_compute_config(encoder, pipe_config, conn_state);
+ tgl_dc3co_exitline_compute_config(encoder, pipe_config);
+ }
+
if (ret)
return ret;
@@ -4276,7 +4690,6 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
struct intel_encoder *intel_encoder;
struct drm_encoder *encoder;
bool init_hdmi, init_dp, init_lspcon = false;
- enum pipe pipe;
enum phy phy = intel_port_to_phy(dev_priv, port);
init_hdmi = port_info->supports_dvi || port_info->supports_hdmi;
@@ -4328,8 +4741,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
intel_encoder->power_domain = intel_port_to_power_domain(port);
intel_encoder->port = port;
intel_encoder->cloneable = 0;
- for_each_pipe(dev_priv, pipe)
- intel_encoder->crtc_mask |= BIT(pipe);
+ intel_encoder->pipe_mask = ~0;
if (INTEL_GEN(dev_priv) >= 11)
intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) &
@@ -4351,46 +4763,9 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
intel_encoder->update_complete = intel_ddi_update_complete;
}
- switch (port) {
- case PORT_A:
- intel_dig_port->ddi_io_power_domain =
- POWER_DOMAIN_PORT_DDI_A_IO;
- break;
- case PORT_B:
- intel_dig_port->ddi_io_power_domain =
- POWER_DOMAIN_PORT_DDI_B_IO;
- break;
- case PORT_C:
- intel_dig_port->ddi_io_power_domain =
- POWER_DOMAIN_PORT_DDI_C_IO;
- break;
- case PORT_D:
- intel_dig_port->ddi_io_power_domain =
- POWER_DOMAIN_PORT_DDI_D_IO;
- break;
- case PORT_E:
- intel_dig_port->ddi_io_power_domain =
- POWER_DOMAIN_PORT_DDI_E_IO;
- break;
- case PORT_F:
- intel_dig_port->ddi_io_power_domain =
- POWER_DOMAIN_PORT_DDI_F_IO;
- break;
- case PORT_G:
- intel_dig_port->ddi_io_power_domain =
- POWER_DOMAIN_PORT_DDI_G_IO;
- break;
- case PORT_H:
- intel_dig_port->ddi_io_power_domain =
- POWER_DOMAIN_PORT_DDI_H_IO;
- break;
- case PORT_I:
- intel_dig_port->ddi_io_power_domain =
- POWER_DOMAIN_PORT_DDI_I_IO;
- break;
- default:
- MISSING_CASE(port);
- }
+ WARN_ON(port > PORT_I);
+ intel_dig_port->ddi_io_power_domain = POWER_DOMAIN_PORT_DDI_A_IO +
+ port - PORT_A;
if (init_dp) {
if (!intel_ddi_init_dp_connector(intel_dig_port))
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.h b/drivers/gpu/drm/i915/display/intel_ddi.h
index a08365da2643..19aeab1246ee 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.h
+++ b/drivers/gpu/drm/i915/display/intel_ddi.h
@@ -30,7 +30,8 @@ void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state)
void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state);
void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state);
void intel_ddi_disable_pipe_clock(const struct intel_crtc_state *crtc_state);
-void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state);
+void intel_ddi_set_dp_msa(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state);
bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
void intel_ddi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config);
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index af50f05f4e9d..6f5e3bd13ad1 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -31,7 +31,6 @@
#include <linux/module.h>
#include <linux/dma-resv.h>
#include <linux/slab.h>
-#include <linux/vgaarb.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
@@ -56,6 +55,8 @@
#include "display/intel_tv.h"
#include "display/intel_vdsc.h"
+#include "gt/intel_rps.h"
+
#include "i915_drv.h"
#include "i915_trace.h"
#include "intel_acpi.h"
@@ -65,6 +66,7 @@
#include "intel_cdclk.h"
#include "intel_color.h"
#include "intel_display_types.h"
+#include "intel_dp_link_training.h"
#include "intel_fbc.h"
#include "intel_fbdev.h"
#include "intel_fifo_underrun.h"
@@ -79,6 +81,7 @@
#include "intel_sideband.h"
#include "intel_sprite.h"
#include "intel_tc.h"
+#include "intel_vga.h"
/* Primary plane formats for gen <= 3 */
static const u32 i8xx_primary_formats[] = {
@@ -88,7 +91,17 @@ static const u32 i8xx_primary_formats[] = {
DRM_FORMAT_XRGB8888,
};
-/* Primary plane formats for gen >= 4 */
+/* Primary plane formats for ivb (no fp16 due to hw issue) */
+static const u32 ivb_primary_formats[] = {
+ DRM_FORMAT_C8,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_XRGB2101010,
+ DRM_FORMAT_XBGR2101010,
+};
+
+/* Primary plane formats for gen >= 4, except ivb */
static const u32 i965_primary_formats[] = {
DRM_FORMAT_C8,
DRM_FORMAT_RGB565,
@@ -96,6 +109,7 @@ static const u32 i965_primary_formats[] = {
DRM_FORMAT_XBGR8888,
DRM_FORMAT_XRGB2101010,
DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_XBGR16161616F,
};
static const u64 i9xx_format_modifiers[] = {
@@ -135,8 +149,6 @@ static void vlv_prepare_pll(struct intel_crtc *crtc,
const struct intel_crtc_state *pipe_config);
static void chv_prepare_pll(struct intel_crtc *crtc,
const struct intel_crtc_state *pipe_config);
-static void intel_begin_crtc_commit(struct intel_atomic_state *, struct intel_crtc *);
-static void intel_finish_crtc_commit(struct intel_atomic_state *, struct intel_crtc *);
static void intel_crtc_init_scalers(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state);
static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state);
@@ -490,7 +502,7 @@ static const struct intel_limit intel_limits_bxt = {
/* WA Display #0827: Gen9:all */
static void
-skl_wa_827(struct drm_i915_private *dev_priv, int pipe, bool enable)
+skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable)
{
if (enable)
I915_WRITE(CLKGATE_DIS_PSL(pipe),
@@ -521,6 +533,20 @@ needs_modeset(const struct intel_crtc_state *state)
return drm_atomic_crtc_needs_modeset(&state->base);
}
+bool
+is_trans_port_sync_mode(const struct intel_crtc_state *crtc_state)
+{
+ return (crtc_state->master_transcoder != INVALID_TRANSCODER ||
+ crtc_state->sync_mode_slaves_mask);
+}
+
+static bool
+is_trans_port_sync_master(const struct intel_crtc_state *crtc_state)
+{
+ return (crtc_state->master_transcoder == INVALID_TRANSCODER &&
+ crtc_state->sync_mode_slaves_mask);
+}
+
/*
* Platform specific helpers to calculate the port PLL loopback- (clock.m),
* and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
@@ -1612,8 +1638,8 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
if (intel_de_wait_for_register(dev_priv, dpll_reg,
port_mask, expected_mask, 1000))
- WARN(1, "timed out waiting for port %c ready: got 0x%x, expected 0x%x\n",
- port_name(dport->base.port),
+ WARN(1, "timed out waiting for [ENCODER:%d:%s] port ready: got 0x%x, expected 0x%x\n",
+ dport->base.base.base.id, dport->base.base.name,
I915_READ(dpll_reg) & port_mask, expected_mask);
}
@@ -2079,7 +2105,8 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
unsigned int pinctl;
u32 alignment;
- WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+ if (WARN_ON(!i915_gem_object_is_framebuffer(obj)))
+ return ERR_PTR(-EINVAL);
alignment = intel_surf_alignment(fb, 0);
@@ -2161,8 +2188,6 @@ err:
void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags)
{
- lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
-
i915_gem_object_lock(vma->obj);
if (flags & PLANE_HAS_FENCE)
i915_vma_unpin_fence(vma);
@@ -2739,10 +2764,7 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
size++;
/* rotate the x/y offsets to match the GTT view */
- r.x1 = x;
- r.y1 = y;
- r.x2 = x + width;
- r.y2 = y + height;
+ drm_rect_init(&r, x, y, width, height);
drm_rect_rotate(&r,
rot_info->plane[i].width * tile_width,
rot_info->plane[i].height * tile_height,
@@ -2864,10 +2886,7 @@ intel_plane_remap_gtt(struct intel_plane_state *plane_state)
struct drm_rect r;
/* rotate the x/y offsets to match the GTT view */
- r.x1 = x;
- r.y1 = y;
- r.x2 = x + width;
- r.y2 = y + height;
+ drm_rect_init(&r, x, y, width, height);
drm_rect_rotate(&r,
info->plane[i].width * tile_width,
info->plane[i].height * tile_height,
@@ -2969,6 +2988,8 @@ static int i9xx_format_to_fourcc(int format)
return DRM_FORMAT_XRGB2101010;
case DISPPLANE_RGBX101010:
return DRM_FORMAT_XBGR2101010;
+ case DISPPLANE_RGBX161616:
+ return DRM_FORMAT_XBGR16161616F;
}
}
@@ -3066,13 +3087,11 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
return false;
}
- mutex_lock(&dev->struct_mutex);
obj = i915_gem_object_create_stolen_for_preallocated(dev_priv,
base_aligned,
base_aligned,
size_aligned);
- mutex_unlock(&dev->struct_mutex);
- if (!obj)
+ if (IS_ERR(obj))
return false;
switch (plane_config->tiling) {
@@ -3154,6 +3173,7 @@ static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
intel_set_plane_visible(crtc_state, plane_state, false);
fixup_active_planes(crtc_state);
crtc_state->data_rate[plane->id] = 0;
+ crtc_state->min_cdclk[plane->id] = 0;
if (plane->id == PLANE_PRIMARY)
intel_pre_disable_primary_noatomic(&crtc->base);
@@ -3233,13 +3253,11 @@ valid_fb:
intel_state->color_plane[0].stride =
intel_fb_pitch(fb, 0, intel_state->base.rotation);
- mutex_lock(&dev->struct_mutex);
intel_state->vma =
intel_pin_and_fence_fb_obj(fb,
&intel_state->view,
intel_plane_uses_fence(intel_state),
&intel_state->flags);
- mutex_unlock(&dev->struct_mutex);
if (IS_ERR(intel_state->vma)) {
DRM_ERROR("failed to pin boot fb on pipe %d: %li\n",
intel_crtc->pipe, PTR_ERR(intel_state->vma));
@@ -3347,6 +3365,16 @@ static int icl_max_plane_width(const struct drm_framebuffer *fb,
return 5120;
}
+static int skl_max_plane_height(void)
+{
+ return 4096;
+}
+
+static int icl_max_plane_height(void)
+{
+ return 4320;
+}
+
static bool skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state,
int main_x, int main_y, u32 main_offset)
{
@@ -3395,7 +3423,7 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state)
int w = drm_rect_width(&plane_state->base.src) >> 16;
int h = drm_rect_height(&plane_state->base.src) >> 16;
int max_width;
- int max_height = 4096;
+ int max_height;
u32 alignment, offset, aux_offset = plane_state->color_plane[1].offset;
if (INTEL_GEN(dev_priv) >= 11)
@@ -3405,6 +3433,11 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state)
else
max_width = skl_max_plane_width(fb, 0, rotation);
+ if (INTEL_GEN(dev_priv) >= 11)
+ max_height = icl_max_plane_height();
+ else
+ max_height = skl_max_plane_height();
+
if (w > max_width || h > max_height) {
DRM_DEBUG_KMS("requested Y/RGB source size %dx%d too big (limit %dx%d)\n",
w, h, max_width, max_height);
@@ -3471,9 +3504,8 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state)
* Put the final coordinates back so that the src
* coordinate checks will see the right values.
*/
- drm_rect_translate(&plane_state->base.src,
- (x << 16) - plane_state->base.src.x1,
- (y << 16) - plane_state->base.src.y1);
+ drm_rect_translate_to(&plane_state->base.src,
+ x << 16, y << 16);
return 0;
}
@@ -3544,7 +3576,7 @@ int skl_check_plane_surface(struct intel_plane_state *plane_state)
* Handle the AUX surface first since
* the main surface setup depends on it.
*/
- if (is_planar_yuv_format(fb->format->format)) {
+ if (drm_format_info_is_yuv_semiplanar(fb->format)) {
ret = skl_check_nv12_aux_surface(plane_state);
if (ret)
return ret;
@@ -3565,6 +3597,53 @@ int skl_check_plane_surface(struct intel_plane_state *plane_state)
return 0;
}
+static void i9xx_plane_ratio(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ unsigned int *num, unsigned int *den)
+{
+ const struct drm_framebuffer *fb = plane_state->base.fb;
+ unsigned int cpp = fb->format->cpp[0];
+
+ /*
+ * g4x bspec says 64bpp pixel rate can't exceed 80%
+ * of cdclk when the sprite plane is enabled on the
+ * same pipe. ilk/snb bspec says 64bpp pixel rate is
+ * never allowed to exceed 80% of cdclk. Let's just go
+ * with the ilk/snb limit always.
+ */
+ if (cpp == 8) {
+ *num = 10;
+ *den = 8;
+ } else {
+ *num = 1;
+ *den = 1;
+ }
+}
+
+static int i9xx_plane_min_cdclk(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ unsigned int pixel_rate;
+ unsigned int num, den;
+
+ /*
+ * Note that crtc_state->pixel_rate accounts for both
+ * horizontal and vertical panel fitter downscaling factors.
+ * Pre-HSW bspec tells us to only consider the horizontal
+ * downscaling factor here. We ignore that and just consider
+ * both for simplicity.
+ */
+ pixel_rate = crtc_state->pixel_rate;
+
+ i9xx_plane_ratio(crtc_state, plane_state, &num, &den);
+
+ /* two pixels per clock with double wide pipe */
+ if (crtc_state->double_wide)
+ den *= 2;
+
+ return DIV_ROUND_UP(pixel_rate * num, den);
+}
+
unsigned int
i9xx_plane_max_stride(struct intel_plane *plane,
u32 pixel_format, u64 modifier,
@@ -3647,6 +3726,9 @@ static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state,
case DRM_FORMAT_XBGR2101010:
dspcntr |= DISPPLANE_RGBX101010;
break;
+ case DRM_FORMAT_XBGR16161616F:
+ dspcntr |= DISPPLANE_RGBX161616;
+ break;
default:
MISSING_CASE(fb->format->format);
return 0;
@@ -3669,7 +3751,8 @@ int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
{
struct drm_i915_private *dev_priv =
to_i915(plane_state->base.plane->dev);
- int src_x, src_y;
+ const struct drm_framebuffer *fb = plane_state->base.fb;
+ int src_x, src_y, src_w;
u32 offset;
int ret;
@@ -3680,9 +3763,14 @@ int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
if (!plane_state->base.visible)
return 0;
+ src_w = drm_rect_width(&plane_state->base.src) >> 16;
src_x = plane_state->base.src.x1 >> 16;
src_y = plane_state->base.src.y1 >> 16;
+ /* Undocumented hardware limit on i965/g4x/vlv/chv */
+ if (HAS_GMCH(dev_priv) && fb->format->cpp[0] == 8 && src_w > 2048)
+ return -EINVAL;
+
intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
if (INTEL_GEN(dev_priv) >= 4)
@@ -3695,9 +3783,8 @@ int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
* Put the final coordinates back so that the src
* coordinate checks will see the right values.
*/
- drm_rect_translate(&plane_state->base.src,
- (src_x << 16) - plane_state->base.src.x1,
- (src_y << 16) - plane_state->base.src.y1);
+ drm_rect_translate_to(&plane_state->base.src,
+ src_x << 16, src_y << 16);
/* HSW/BDW do this automagically in hardware */
if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)) {
@@ -4227,7 +4314,7 @@ __intel_display_resume(struct drm_device *dev,
int i, ret;
intel_modeset_setup_hw_state(dev, ctx);
- i915_redisable_vga(to_i915(dev));
+ intel_vga_redisable(to_i915(dev));
if (!state)
return 0;
@@ -4259,7 +4346,7 @@ __intel_display_resume(struct drm_device *dev,
static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv)
{
return (INTEL_INFO(dev_priv)->gpu_reset_clobbers_display &&
- intel_has_gpu_reset(dev_priv));
+ intel_has_gpu_reset(&dev_priv->gt));
}
void intel_prepare_reset(struct drm_i915_private *dev_priv)
@@ -4346,7 +4433,7 @@ void intel_finish_reset(struct drm_i915_private *dev_priv)
* so need a full re-initialization.
*/
intel_pps_unlock_regs_wa(dev_priv);
- intel_modeset_init_hw(dev);
+ intel_modeset_init_hw(dev_priv);
intel_init_clock_gating(dev_priv);
spin_lock_irq(&dev_priv->irq_lock);
@@ -4394,50 +4481,60 @@ static void icl_set_pipe_chicken(struct intel_crtc *crtc)
I915_WRITE(PIPE_CHICKEN(pipe), tmp);
}
-static void intel_update_pipe_config(const struct intel_crtc_state *old_crtc_state,
- const struct intel_crtc_state *new_crtc_state)
+static void icl_enable_trans_port_sync(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-
- /* drm_atomic_helper_update_legacy_modeset_state might not be called. */
- crtc->base.mode = new_crtc_state->base.mode;
+ u32 trans_ddi_func_ctl2_val;
+ u8 master_select;
/*
- * Update pipe size and adjust fitter if needed: the reason for this is
- * that in compute_mode_changes we check the native mode (not the pfit
- * mode) to see if we can flip rather than do a full mode set. In the
- * fastboot case, we'll flip, but if we don't update the pipesrc and
- * pfit state, we'll end up with a big fb scanned out into the wrong
- * sized surface.
+ * Configure the master select and enable Transcoder Port Sync for
+ * Slave CRTCs transcoder.
*/
+ if (crtc_state->master_transcoder == INVALID_TRANSCODER)
+ return;
- I915_WRITE(PIPESRC(crtc->pipe),
- ((new_crtc_state->pipe_src_w - 1) << 16) |
- (new_crtc_state->pipe_src_h - 1));
+ if (crtc_state->master_transcoder == TRANSCODER_EDP)
+ master_select = 0;
+ else
+ master_select = crtc_state->master_transcoder + 1;
- /* on skylake this is done by detaching scalers */
- if (INTEL_GEN(dev_priv) >= 9) {
- skl_detach_scalers(new_crtc_state);
+ /* Set the master select bits for Tranascoder Port Sync */
+ trans_ddi_func_ctl2_val = (PORT_SYNC_MODE_MASTER_SELECT(master_select) &
+ PORT_SYNC_MODE_MASTER_SELECT_MASK) <<
+ PORT_SYNC_MODE_MASTER_SELECT_SHIFT;
+ /* Enable Transcoder Port Sync */
+ trans_ddi_func_ctl2_val |= PORT_SYNC_MODE_ENABLE;
- if (new_crtc_state->pch_pfit.enabled)
- skylake_pfit_enable(new_crtc_state);
- } else if (HAS_PCH_SPLIT(dev_priv)) {
- if (new_crtc_state->pch_pfit.enabled)
- ironlake_pfit_enable(new_crtc_state);
- else if (old_crtc_state->pch_pfit.enabled)
- ironlake_pfit_disable(old_crtc_state);
- }
+ I915_WRITE(TRANS_DDI_FUNC_CTL2(crtc_state->cpu_transcoder),
+ trans_ddi_func_ctl2_val);
+}
- if (INTEL_GEN(dev_priv) >= 11)
- icl_set_pipe_chicken(crtc);
+static void icl_disable_transcoder_port_sync(const struct intel_crtc_state *old_crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ i915_reg_t reg;
+ u32 trans_ddi_func_ctl2_val;
+
+ if (old_crtc_state->master_transcoder == INVALID_TRANSCODER)
+ return;
+
+ DRM_DEBUG_KMS("Disabling Transcoder Port Sync on Slave Transcoder %s\n",
+ transcoder_name(old_crtc_state->cpu_transcoder));
+
+ reg = TRANS_DDI_FUNC_CTL2(old_crtc_state->cpu_transcoder);
+ trans_ddi_func_ctl2_val = ~(PORT_SYNC_MODE_ENABLE |
+ PORT_SYNC_MODE_MASTER_SELECT_MASK);
+ I915_WRITE(reg, trans_ddi_func_ctl2_val);
}
static void intel_fdi_normal_train(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- int pipe = crtc->pipe;
+ enum pipe pipe = crtc->pipe;
i915_reg_t reg;
u32 temp;
@@ -4480,7 +4577,7 @@ static void ironlake_fdi_link_train(struct intel_crtc *crtc,
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- int pipe = crtc->pipe;
+ enum pipe pipe = crtc->pipe;
i915_reg_t reg;
u32 temp, tries;
@@ -4581,7 +4678,7 @@ static void gen6_fdi_link_train(struct intel_crtc *crtc,
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- int pipe = crtc->pipe;
+ enum pipe pipe = crtc->pipe;
i915_reg_t reg;
u32 temp, i, retry;
@@ -4714,7 +4811,7 @@ static void ivb_manual_fdi_link_train(struct intel_crtc *crtc,
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- int pipe = crtc->pipe;
+ enum pipe pipe = crtc->pipe;
i915_reg_t reg;
u32 temp, i, j;
@@ -4832,7 +4929,7 @@ static void ironlake_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
- int pipe = intel_crtc->pipe;
+ enum pipe pipe = intel_crtc->pipe;
i915_reg_t reg;
u32 temp;
@@ -4869,7 +4966,7 @@ static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
{
struct drm_device *dev = intel_crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- int pipe = intel_crtc->pipe;
+ enum pipe pipe = intel_crtc->pipe;
i915_reg_t reg;
u32 temp;
@@ -4900,7 +4997,7 @@ static void ironlake_fdi_disable(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe;
+ enum pipe pipe = intel_crtc->pipe;
i915_reg_t reg;
u32 temp;
@@ -5215,7 +5312,7 @@ static void ironlake_pch_enable(const struct intel_atomic_state *state,
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- int pipe = crtc->pipe;
+ enum pipe pipe = crtc->pipe;
u32 temp;
assert_pch_transcoder_disabled(dev_priv, pipe);
@@ -5310,7 +5407,7 @@ static void lpt_pch_enable(const struct intel_atomic_state *state,
lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
}
-static void cpt_verify_modeset(struct drm_device *dev, int pipe)
+static void cpt_verify_modeset(struct drm_device *dev, enum pipe pipe)
{
struct drm_i915_private *dev_priv = to_i915(dev);
i915_reg_t dslreg = PIPEDSL(pipe);
@@ -5462,7 +5559,7 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
return 0;
}
- if (format && is_planar_yuv_format(format->format) &&
+ if (format && drm_format_info_is_yuv_semiplanar(format) &&
(src_h < SKL_MIN_YUV_420_SRC_H || src_w < SKL_MIN_YUV_420_SRC_W)) {
DRM_DEBUG_KMS("Planar YUV: src dimensions not met\n");
return -EINVAL;
@@ -5539,7 +5636,7 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
/* Pre-gen11 and SDR planes always need a scaler for planar formats. */
if (!icl_is_hdr_plane(dev_priv, intel_plane->id) &&
- fb && is_planar_yuv_format(fb->format->format))
+ fb && drm_format_info_is_yuv_semiplanar(fb->format))
need_scaler = true;
ret = skl_update_scaler(crtc_state, force_detach,
@@ -5571,10 +5668,6 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
case DRM_FORMAT_ARGB8888:
case DRM_FORMAT_XRGB2101010:
case DRM_FORMAT_XBGR2101010:
- case DRM_FORMAT_XBGR16161616F:
- case DRM_FORMAT_ABGR16161616F:
- case DRM_FORMAT_XRGB16161616F:
- case DRM_FORMAT_ARGB16161616F:
case DRM_FORMAT_YUYV:
case DRM_FORMAT_YVYU:
case DRM_FORMAT_UYVY:
@@ -5590,6 +5683,13 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
case DRM_FORMAT_XVYU12_16161616:
case DRM_FORMAT_XVYU16161616:
break;
+ case DRM_FORMAT_XBGR16161616F:
+ case DRM_FORMAT_ABGR16161616F:
+ case DRM_FORMAT_XRGB16161616F:
+ case DRM_FORMAT_ARGB16161616F:
+ if (INTEL_GEN(dev_priv) >= 11)
+ break;
+ /* fall through */
default:
DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n",
intel_plane->base.base.id, intel_plane->base.name,
@@ -5649,7 +5749,7 @@ static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- int pipe = crtc->pipe;
+ enum pipe pipe = crtc->pipe;
if (crtc_state->pch_pfit.enabled) {
/* Force use of hard-coded filter coefficients
@@ -5731,13 +5831,8 @@ void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
{
- if (intel_crtc->overlay) {
- struct drm_device *dev = intel_crtc->base.dev;
-
- mutex_lock(&dev->struct_mutex);
+ if (intel_crtc->overlay)
(void) intel_overlay_switch_off(intel_crtc->overlay);
- mutex_unlock(&dev->struct_mutex);
- }
/* Let userspace switch the overlay on again. In most cases userspace
* has to recompute where to put it anyway.
@@ -5762,7 +5857,7 @@ intel_post_enable_primary(struct drm_crtc *crtc,
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe;
+ enum pipe pipe = intel_crtc->pipe;
/*
* Gen2 reports pipe underruns whenever all planes are disabled.
@@ -5786,7 +5881,7 @@ intel_pre_disable_primary_noatomic(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe;
+ enum pipe pipe = intel_crtc->pipe;
/*
* Gen2 reports pipe underruns whenever all planes are disabled.
@@ -6309,7 +6404,7 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe;
+ enum pipe pipe = intel_crtc->pipe;
if (WARN_ON(intel_crtc->active))
return;
@@ -6442,7 +6537,7 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
struct drm_crtc *crtc = pipe_config->base.crtc;
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe, hsw_workaround_pipe;
+ enum pipe pipe = intel_crtc->pipe, hsw_workaround_pipe;
enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
bool psl_clkgate_wa;
@@ -6462,6 +6557,9 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
if (!transcoder_is_dsi(cpu_transcoder))
intel_set_pipe_timings(pipe_config);
+ if (INTEL_GEN(dev_priv) >= 11)
+ icl_enable_trans_port_sync(pipe_config);
+
intel_set_pipe_src_size(pipe_config);
if (cpu_transcoder != TRANSCODER_EDP &&
@@ -6507,7 +6605,6 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
if (INTEL_GEN(dev_priv) >= 11)
icl_set_pipe_chicken(intel_crtc);
- intel_ddi_set_pipe_settings(pipe_config);
if (!transcoder_is_dsi(cpu_transcoder))
intel_ddi_enable_transcoder_func(pipe_config);
@@ -6568,7 +6665,7 @@ static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state,
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe;
+ enum pipe pipe = intel_crtc->pipe;
/*
* Sometimes spurious CPU pipe underruns happen when the
@@ -6640,6 +6737,9 @@ static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
if (intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST))
intel_ddi_set_vc_payload_alloc(old_crtc_state, false);
+ if (INTEL_GEN(dev_priv) >= 11)
+ icl_disable_transcoder_port_sync(old_crtc_state);
+
if (!transcoder_is_dsi(cpu_transcoder))
intel_ddi_disable_transcoder_func(old_crtc_state);
@@ -6737,6 +6837,8 @@ enum intel_display_power_domain intel_port_to_power_domain(enum port port)
return POWER_DOMAIN_PORT_DDI_E_LANES;
case PORT_F:
return POWER_DOMAIN_PORT_DDI_F_LANES;
+ case PORT_G:
+ return POWER_DOMAIN_PORT_DDI_G_LANES;
default:
MISSING_CASE(port);
return POWER_DOMAIN_PORT_OTHER;
@@ -6753,16 +6855,18 @@ intel_aux_power_domain(struct intel_digital_port *dig_port)
dig_port->tc_mode == TC_PORT_TBT_ALT) {
switch (dig_port->aux_ch) {
case AUX_CH_C:
- return POWER_DOMAIN_AUX_TBT1;
+ return POWER_DOMAIN_AUX_C_TBT;
case AUX_CH_D:
- return POWER_DOMAIN_AUX_TBT2;
+ return POWER_DOMAIN_AUX_D_TBT;
case AUX_CH_E:
- return POWER_DOMAIN_AUX_TBT3;
+ return POWER_DOMAIN_AUX_E_TBT;
case AUX_CH_F:
- return POWER_DOMAIN_AUX_TBT4;
+ return POWER_DOMAIN_AUX_F_TBT;
+ case AUX_CH_G:
+ return POWER_DOMAIN_AUX_G_TBT;
default:
MISSING_CASE(dig_port->aux_ch);
- return POWER_DOMAIN_AUX_TBT1;
+ return POWER_DOMAIN_AUX_C_TBT;
}
}
@@ -6779,6 +6883,8 @@ intel_aux_power_domain(struct intel_digital_port *dig_port)
return POWER_DOMAIN_AUX_E;
case AUX_CH_F:
return POWER_DOMAIN_AUX_F;
+ case AUX_CH_G:
+ return POWER_DOMAIN_AUX_G;
default:
MISSING_CASE(dig_port->aux_ch);
return POWER_DOMAIN_AUX_A;
@@ -6855,7 +6961,7 @@ static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe;
+ enum pipe pipe = intel_crtc->pipe;
if (WARN_ON(intel_crtc->active))
return;
@@ -6987,7 +7093,7 @@ static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe;
+ enum pipe pipe = intel_crtc->pipe;
/*
* On gen2 planes are double buffered but the pipe isn't, so we must
@@ -7096,7 +7202,7 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc,
intel_display_power_put_unchecked(dev_priv, domain);
intel_crtc->enabled_power_domains = 0;
- dev_priv->active_crtcs &= ~(1 << intel_crtc->pipe);
+ dev_priv->active_pipes &= ~BIT(intel_crtc->pipe);
dev_priv->min_cdclk[intel_crtc->pipe] = 0;
dev_priv->min_voltage_level[intel_crtc->pipe] = 0;
@@ -7204,7 +7310,7 @@ static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
}
}
- if (INTEL_INFO(dev_priv)->num_pipes == 2)
+ if (INTEL_NUM_PIPES(dev_priv) == 2)
return 0;
/* Ivybridge 3 pipe is really complicated */
@@ -7542,6 +7648,27 @@ intel_link_compute_m_n(u16 bits_per_pixel, int nlanes,
constant_n);
}
+static void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv)
+{
+ /*
+ * There may be no VBT; and if the BIOS enabled SSC we can
+ * just keep using it to avoid unnecessary flicker. Whereas if the
+ * BIOS isn't using it, don't assume it will work even if the VBT
+ * indicates as much.
+ */
+ if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
+ bool bios_lvds_use_ssc = I915_READ(PCH_DREF_CONTROL) &
+ DREF_SSC1_ENABLE;
+
+ if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
+ DRM_DEBUG_KMS("SSC %s by BIOS, overriding VBT which says %s\n",
+ enableddisabled(bios_lvds_use_ssc),
+ enableddisabled(dev_priv->vbt.lvds_use_ssc));
+ dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
+ }
+ }
+}
+
static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
{
if (i915_modparams.panel_use_ssc >= 0)
@@ -8193,6 +8320,21 @@ static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
(crtc_state->pipe_src_h - 1));
}
+static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+
+ if (IS_GEN(dev_priv, 2))
+ return false;
+
+ if (INTEL_GEN(dev_priv) >= 9 ||
+ IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
+ return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK_HSW;
+ else
+ return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK;
+}
+
static void intel_get_pipe_timings(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
@@ -8231,7 +8373,7 @@ static void intel_get_pipe_timings(struct intel_crtc *crtc,
pipe_config->base.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
pipe_config->base.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
- if (I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK) {
+ if (intel_pipe_is_interlaced(pipe_config)) {
pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
pipe_config->base.adjusted_mode.crtc_vtotal += 1;
pipe_config->base.adjusted_mode.crtc_vblank_end += 1;
@@ -8563,7 +8705,7 @@ static void vlv_crtc_clock_get(struct intel_crtc *crtc,
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- int pipe = pipe_config->cpu_transcoder;
+ enum pipe pipe = crtc->pipe;
struct dpll clock;
u32 mdiv;
int refclk = 100000;
@@ -8673,7 +8815,7 @@ static void chv_crtc_clock_get(struct intel_crtc *crtc,
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- int pipe = pipe_config->cpu_transcoder;
+ enum pipe pipe = crtc->pipe;
enum dpio_channel port = vlv_pipe_to_channel(pipe);
struct dpll clock;
u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
@@ -8702,47 +8844,24 @@ static void chv_crtc_clock_get(struct intel_crtc *crtc,
pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
}
-static void intel_get_crtc_ycbcr_config(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config)
+static enum intel_output_format
+bdw_get_pipemisc_output_format(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum intel_output_format output = INTEL_OUTPUT_FORMAT_RGB;
-
- pipe_config->lspcon_downsampling = false;
+ u32 tmp;
- if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9) {
- u32 tmp = I915_READ(PIPEMISC(crtc->pipe));
+ tmp = I915_READ(PIPEMISC(crtc->pipe));
- if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) {
- bool ycbcr420_enabled = tmp & PIPEMISC_YUV420_ENABLE;
- bool blend = tmp & PIPEMISC_YUV420_MODE_FULL_BLEND;
+ if (tmp & PIPEMISC_YUV420_ENABLE) {
+ /* We support 4:2:0 in full blend mode only */
+ WARN_ON((tmp & PIPEMISC_YUV420_MODE_FULL_BLEND) == 0);
- if (ycbcr420_enabled) {
- /* We support 4:2:0 in full blend mode only */
- if (!blend)
- output = INTEL_OUTPUT_FORMAT_INVALID;
- else if (!(IS_GEMINILAKE(dev_priv) ||
- INTEL_GEN(dev_priv) >= 10))
- output = INTEL_OUTPUT_FORMAT_INVALID;
- else
- output = INTEL_OUTPUT_FORMAT_YCBCR420;
- } else {
- /*
- * Currently there is no interface defined to
- * check user preference between RGB/YCBCR444
- * or YCBCR420. So the only possible case for
- * YCBCR444 usage is driving YCBCR420 output
- * with LSPCON, when pipe is configured for
- * YCBCR444 output and LSPCON takes care of
- * downsampling it.
- */
- pipe_config->lspcon_downsampling = true;
- output = INTEL_OUTPUT_FORMAT_YCBCR444;
- }
- }
+ return INTEL_OUTPUT_FORMAT_YCBCR420;
+ } else if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) {
+ return INTEL_OUTPUT_FORMAT_YCBCR444;
+ } else {
+ return INTEL_OUTPUT_FORMAT_RGB;
}
-
- pipe_config->output_format = output;
}
static void i9xx_get_pipe_color_config(struct intel_crtc_state *crtc_state)
@@ -8780,6 +8899,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
pipe_config->shared_dpll = NULL;
+ pipe_config->master_transcoder = INVALID_TRANSCODER;
ret = false;
@@ -9419,9 +9539,19 @@ static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state)
else
val |= PIPECONF_PROGRESSIVE;
+ /*
+ * This would end up with an odd purple hue over
+ * the entire display. Make sure we don't do it.
+ */
+ WARN_ON(crtc_state->limited_color_range &&
+ crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
+
if (crtc_state->limited_color_range)
val |= PIPECONF_COLOR_RANGE_SELECT;
+ if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
+ val |= PIPECONF_OUTPUT_COLORSPACE_YUV709;
+
val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
I915_WRITE(PIPECONF(pipe), val);
@@ -9443,6 +9573,10 @@ static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state)
else
val |= PIPECONF_PROGRESSIVE;
+ if (IS_HASWELL(dev_priv) &&
+ crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
+ val |= PIPECONF_OUTPUT_COLORSPACE_YUV_HSW;
+
I915_WRITE(PIPECONF(cpu_transcoder), val);
POSTING_READ(PIPECONF(cpu_transcoder));
}
@@ -9593,7 +9727,7 @@ static void ironlake_compute_dpll(struct intel_crtc *crtc,
* clear if it''s a win or loss power wise. No point in doing
* this on ILK at all since it has a fixed DPLL<->pipe mapping.
*/
- if (INTEL_INFO(dev_priv)->num_pipes == 3 &&
+ if (INTEL_NUM_PIPES(dev_priv) == 3 &&
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
dpll |= DPLL_SDVO_HIGH_SPEED;
@@ -9892,8 +10026,8 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
offset = I915_READ(PLANE_OFFSET(pipe, plane_id));
val = I915_READ(PLANE_SIZE(pipe, plane_id));
- fb->height = ((val >> 16) & 0xfff) + 1;
- fb->width = ((val >> 0) & 0x1fff) + 1;
+ fb->height = ((val >> 16) & 0xffff) + 1;
+ fb->width = ((val >> 0) & 0xffff) + 1;
val = I915_READ(PLANE_STRIDE(pipe, plane_id));
stride_mult = skl_plane_stride_mult(fb, 0, DRM_MODE_ROTATE_0);
@@ -9954,9 +10088,9 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
if (!wakeref)
return false;
- pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
pipe_config->shared_dpll = NULL;
+ pipe_config->master_transcoder = INVALID_TRANSCODER;
ret = false;
tmp = I915_READ(PIPECONF(crtc->pipe));
@@ -9983,6 +10117,16 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
if (tmp & PIPECONF_COLOR_RANGE_SELECT)
pipe_config->limited_color_range = true;
+ switch (tmp & PIPECONF_OUTPUT_COLORSPACE_MASK) {
+ case PIPECONF_OUTPUT_COLORSPACE_YUV601:
+ case PIPECONF_OUTPUT_COLORSPACE_YUV709:
+ pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
+ break;
+ default:
+ pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
+ break;
+ }
+
pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_ILK) >>
PIPECONF_GAMMA_MODE_SHIFT;
@@ -10397,6 +10541,59 @@ static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
}
}
+static enum transcoder transcoder_master_readout(struct drm_i915_private *dev_priv,
+ enum transcoder cpu_transcoder)
+{
+ u32 trans_port_sync, master_select;
+
+ trans_port_sync = I915_READ(TRANS_DDI_FUNC_CTL2(cpu_transcoder));
+
+ if ((trans_port_sync & PORT_SYNC_MODE_ENABLE) == 0)
+ return INVALID_TRANSCODER;
+
+ master_select = trans_port_sync &
+ PORT_SYNC_MODE_MASTER_SELECT_MASK;
+ if (master_select == 0)
+ return TRANSCODER_EDP;
+ else
+ return master_select - 1;
+}
+
+static void icelake_get_trans_port_sync_config(struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ u32 transcoders;
+ enum transcoder cpu_transcoder;
+
+ crtc_state->master_transcoder = transcoder_master_readout(dev_priv,
+ crtc_state->cpu_transcoder);
+
+ transcoders = BIT(TRANSCODER_A) |
+ BIT(TRANSCODER_B) |
+ BIT(TRANSCODER_C) |
+ BIT(TRANSCODER_D);
+ for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, transcoders) {
+ enum intel_display_power_domain power_domain;
+ intel_wakeref_t trans_wakeref;
+
+ power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
+ trans_wakeref = intel_display_power_get_if_enabled(dev_priv,
+ power_domain);
+
+ if (!trans_wakeref)
+ continue;
+
+ if (transcoder_master_readout(dev_priv, cpu_transcoder) ==
+ crtc_state->cpu_transcoder)
+ crtc_state->sync_mode_slaves_mask |= BIT(cpu_transcoder);
+
+ intel_display_power_put(dev_priv, power_domain, trans_wakeref);
+ }
+
+ WARN_ON(crtc_state->master_transcoder != INVALID_TRANSCODER &&
+ crtc_state->sync_mode_slaves_mask);
+}
+
static bool haswell_get_pipe_config(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
@@ -10408,6 +10605,8 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
intel_crtc_init_scalers(crtc, pipe_config);
+ pipe_config->master_transcoder = INVALID_TRANSCODER;
+
power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
if (!wf)
@@ -10438,7 +10637,30 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
}
intel_get_pipe_src_size(crtc, pipe_config);
- intel_get_crtc_ycbcr_config(crtc, pipe_config);
+
+ if (IS_HASWELL(dev_priv)) {
+ u32 tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
+
+ if (tmp & PIPECONF_OUTPUT_COLORSPACE_YUV_HSW)
+ pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
+ else
+ pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
+ } else {
+ pipe_config->output_format =
+ bdw_get_pipemisc_output_format(crtc);
+
+ /*
+ * Currently there is no interface defined to
+ * check user preference between RGB/YCBCR444
+ * or YCBCR420. So the only possible case for
+ * YCBCR444 usage is driving YCBCR420 output
+ * with LSPCON, when pipe is configured for
+ * YCBCR444 output and LSPCON takes care of
+ * downsampling it.
+ */
+ pipe_config->lspcon_downsampling =
+ pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR444;
+ }
pipe_config->gamma_mode = I915_READ(GAMMA_MODE(crtc->pipe));
@@ -10493,6 +10715,10 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
pipe_config->pixel_multiplier = 1;
}
+ if (INTEL_GEN(dev_priv) >= 11 &&
+ !transcoder_is_dsi(pipe_config->cpu_transcoder))
+ icelake_get_trans_port_sync_config(pipe_config);
+
out:
for_each_power_domain(power_domain, power_domain_mask)
intel_display_power_put(dev_priv,
@@ -10514,21 +10740,13 @@ static u32 intel_cursor_base(const struct intel_plane_state *plane_state)
else
base = intel_plane_ggtt_offset(plane_state);
- base += plane_state->color_plane[0].offset;
-
- /* ILK+ do this automagically */
- if (HAS_GMCH(dev_priv) &&
- plane_state->base.rotation & DRM_MODE_ROTATE_180)
- base += (plane_state->base.crtc_h *
- plane_state->base.crtc_w - 1) * fb->format->cpp[0];
-
- return base;
+ return base + plane_state->color_plane[0].offset;
}
static u32 intel_cursor_position(const struct intel_plane_state *plane_state)
{
- int x = plane_state->base.crtc_x;
- int y = plane_state->base.crtc_y;
+ int x = plane_state->base.dst.x1;
+ int y = plane_state->base.dst.y1;
u32 pos = 0;
if (x < 0) {
@@ -10550,8 +10768,8 @@ static bool intel_cursor_size_ok(const struct intel_plane_state *plane_state)
{
const struct drm_mode_config *config =
&plane_state->base.plane->dev->mode_config;
- int width = plane_state->base.crtc_w;
- int height = plane_state->base.crtc_h;
+ int width = drm_rect_width(&plane_state->base.dst);
+ int height = drm_rect_height(&plane_state->base.dst);
return width > 0 && width <= config->cursor_width &&
height > 0 && height <= config->cursor_height;
@@ -10559,6 +10777,9 @@ static bool intel_cursor_size_ok(const struct intel_plane_state *plane_state)
static int intel_cursor_check_surface(struct intel_plane_state *plane_state)
{
+ struct drm_i915_private *dev_priv =
+ to_i915(plane_state->base.plane->dev);
+ unsigned int rotation = plane_state->base.rotation;
int src_x, src_y;
u32 offset;
int ret;
@@ -10570,8 +10791,8 @@ static int intel_cursor_check_surface(struct intel_plane_state *plane_state)
if (!plane_state->base.visible)
return 0;
- src_x = plane_state->base.src_x >> 16;
- src_y = plane_state->base.src_y >> 16;
+ src_x = plane_state->base.src.x1 >> 16;
+ src_y = plane_state->base.src.y1 >> 16;
intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
@@ -10582,7 +10803,25 @@ static int intel_cursor_check_surface(struct intel_plane_state *plane_state)
return -EINVAL;
}
+ /*
+ * Put the final coordinates back so that the src
+ * coordinate checks will see the right values.
+ */
+ drm_rect_translate_to(&plane_state->base.src,
+ src_x << 16, src_y << 16);
+
+ /* ILK+ do this automagically in hardware */
+ if (HAS_GMCH(dev_priv) && rotation & DRM_MODE_ROTATE_180) {
+ const struct drm_framebuffer *fb = plane_state->base.fb;
+ int src_w = drm_rect_width(&plane_state->base.src) >> 16;
+ int src_h = drm_rect_height(&plane_state->base.src) >> 16;
+
+ offset += (src_h * src_w - 1) * fb->format->cpp[0];
+ }
+
plane_state->color_plane[0].offset = offset;
+ plane_state->color_plane[0].x = src_x;
+ plane_state->color_plane[0].y = src_y;
return 0;
}
@@ -10606,6 +10845,10 @@ static int intel_check_cursor(struct intel_crtc_state *crtc_state,
if (ret)
return ret;
+ /* Use the unclipped src/dst rectangles, which we program to hw */
+ plane_state->base.src = drm_plane_state_src(&plane_state->base);
+ plane_state->base.dst = drm_plane_state_dest(&plane_state->base);
+
ret = intel_cursor_check_surface(plane_state);
if (ret)
return ret;
@@ -10648,7 +10891,7 @@ static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state,
static bool i845_cursor_size_ok(const struct intel_plane_state *plane_state)
{
- int width = plane_state->base.crtc_w;
+ int width = drm_rect_width(&plane_state->base.dst);
/*
* 845g/865g are only limited by the width of their cursors,
@@ -10674,8 +10917,8 @@ static int i845_check_cursor(struct intel_crtc_state *crtc_state,
/* Check for which cursor types we support */
if (!i845_cursor_size_ok(plane_state)) {
DRM_DEBUG("Cursor dimension %dx%d not supported\n",
- plane_state->base.crtc_w,
- plane_state->base.crtc_h);
+ drm_rect_width(&plane_state->base.dst),
+ drm_rect_height(&plane_state->base.dst));
return -EINVAL;
}
@@ -10708,8 +10951,8 @@ static void i845_update_cursor(struct intel_plane *plane,
unsigned long irqflags;
if (plane_state && plane_state->base.visible) {
- unsigned int width = plane_state->base.crtc_w;
- unsigned int height = plane_state->base.crtc_h;
+ unsigned int width = drm_rect_width(&plane_state->base.dst);
+ unsigned int height = drm_rect_height(&plane_state->base.dst);
cntl = plane_state->ctl |
i845_cursor_ctl_crtc(crtc_state);
@@ -10811,7 +11054,7 @@ static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
cntl |= MCURSOR_TRICKLE_FEED_DISABLE;
- switch (plane_state->base.crtc_w) {
+ switch (drm_rect_width(&plane_state->base.dst)) {
case 64:
cntl |= MCURSOR_MODE_64_ARGB_AX;
break;
@@ -10822,7 +11065,7 @@ static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
cntl |= MCURSOR_MODE_256_ARGB_AX;
break;
default:
- MISSING_CASE(plane_state->base.crtc_w);
+ MISSING_CASE(drm_rect_width(&plane_state->base.dst));
return 0;
}
@@ -10836,8 +11079,8 @@ static bool i9xx_cursor_size_ok(const struct intel_plane_state *plane_state)
{
struct drm_i915_private *dev_priv =
to_i915(plane_state->base.plane->dev);
- int width = plane_state->base.crtc_w;
- int height = plane_state->base.crtc_h;
+ int width = drm_rect_width(&plane_state->base.dst);
+ int height = drm_rect_height(&plane_state->base.dst);
if (!intel_cursor_size_ok(plane_state))
return false;
@@ -10890,17 +11133,19 @@ static int i9xx_check_cursor(struct intel_crtc_state *crtc_state,
/* Check for which cursor types we support */
if (!i9xx_cursor_size_ok(plane_state)) {
DRM_DEBUG("Cursor dimension %dx%d not supported\n",
- plane_state->base.crtc_w,
- plane_state->base.crtc_h);
+ drm_rect_width(&plane_state->base.dst),
+ drm_rect_height(&plane_state->base.dst));
return -EINVAL;
}
WARN_ON(plane_state->base.visible &&
plane_state->color_plane[0].stride != fb->pitches[0]);
- if (fb->pitches[0] != plane_state->base.crtc_w * fb->format->cpp[0]) {
+ if (fb->pitches[0] !=
+ drm_rect_width(&plane_state->base.dst) * fb->format->cpp[0]) {
DRM_DEBUG_KMS("Invalid cursor stride (%u) (cursor width %d)\n",
- fb->pitches[0], plane_state->base.crtc_w);
+ fb->pitches[0],
+ drm_rect_width(&plane_state->base.dst));
return -EINVAL;
}
@@ -10915,7 +11160,7 @@ static int i9xx_check_cursor(struct intel_crtc_state *crtc_state,
* Refuse the put the cursor into that compromised position.
*/
if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_C &&
- plane_state->base.visible && plane_state->base.crtc_x < 0) {
+ plane_state->base.visible && plane_state->base.dst.x1 < 0) {
DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n");
return -EINVAL;
}
@@ -10935,11 +11180,14 @@ static void i9xx_update_cursor(struct intel_plane *plane,
unsigned long irqflags;
if (plane_state && plane_state->base.visible) {
+ unsigned width = drm_rect_width(&plane_state->base.dst);
+ unsigned height = drm_rect_height(&plane_state->base.dst);
+
cntl = plane_state->ctl |
i9xx_cursor_ctl_crtc(crtc_state);
- if (plane_state->base.crtc_h != plane_state->base.crtc_w)
- fbc_ctl = CUR_FBC_CTL_EN | (plane_state->base.crtc_h - 1);
+ if (width != height)
+ fbc_ctl = CUR_FBC_CTL_EN | (height - 1);
base = intel_cursor_base(plane_state);
pos = intel_cursor_position(plane_state);
@@ -11084,7 +11332,6 @@ static int intel_modeset_disable_planes(struct drm_atomic_state *state,
}
int intel_get_load_detect_pipe(struct drm_connector *connector,
- const struct drm_display_mode *mode,
struct intel_load_detect_pipe *old,
struct drm_modeset_acquire_ctx *ctx)
{
@@ -11191,10 +11438,8 @@ found:
crtc_state->base.active = crtc_state->base.enable = true;
- if (!mode)
- mode = &load_detect_mode;
-
- ret = drm_atomic_set_mode_for_crtc(&crtc_state->base, mode);
+ ret = drm_atomic_set_mode_for_crtc(&crtc_state->base,
+ &load_detect_mode);
if (ret)
goto fail;
@@ -11286,7 +11531,7 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- int pipe = pipe_config->cpu_transcoder;
+ enum pipe pipe = crtc->pipe;
u32 dpll = pipe_config->dpll_hw_state.dpll;
u32 fp;
struct dpll clock;
@@ -11510,7 +11755,6 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat
bool was_crtc_enabled = old_crtc_state->base.active;
bool is_crtc_enabled = crtc_state->base.active;
bool turn_off, turn_on, visible, was_visible;
- struct drm_framebuffer *fb = plane_state->base.fb;
int ret;
if (INTEL_GEN(dev_priv) >= 9 && plane->id != PLANE_CURSOR) {
@@ -11539,24 +11783,18 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat
plane_state->base.visible = visible = false;
crtc_state->active_planes &= ~BIT(plane->id);
crtc_state->data_rate[plane->id] = 0;
+ crtc_state->min_cdclk[plane->id] = 0;
}
if (!was_visible && !visible)
return 0;
- if (fb != old_plane_state->base.fb)
- crtc_state->fb_changed = true;
-
turn_off = was_visible && (!visible || mode_changed);
turn_on = visible && (!was_visible || mode_changed);
- DRM_DEBUG_ATOMIC("[CRTC:%d:%s] has [PLANE:%d:%s] with fb %i\n",
+ DRM_DEBUG_ATOMIC("[CRTC:%d:%s] with [PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
crtc->base.base.id, crtc->base.name,
plane->base.base.id, plane->base.name,
- fb ? fb->base.id : -1);
-
- DRM_DEBUG_ATOMIC("[PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
- plane->base.base.id, plane->base.name,
was_visible, visible,
turn_off, turn_on, mode_changed);
@@ -11665,7 +11903,7 @@ static int icl_add_linked_planes(struct intel_atomic_state *state)
int i;
for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
- linked = plane_state->linked_plane;
+ linked = plane_state->planar_linked_plane;
if (!linked)
continue;
@@ -11674,8 +11912,8 @@ static int icl_add_linked_planes(struct intel_atomic_state *state)
if (IS_ERR(linked_plane_state))
return PTR_ERR(linked_plane_state);
- WARN_ON(linked_plane_state->linked_plane != plane);
- WARN_ON(linked_plane_state->slave == plane_state->slave);
+ WARN_ON(linked_plane_state->planar_linked_plane != plane);
+ WARN_ON(linked_plane_state->planar_slave == plane_state->planar_slave);
}
return 0;
@@ -11698,16 +11936,16 @@ static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
* in the crtc_state->active_planes mask.
*/
for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
- if (plane->pipe != crtc->pipe || !plane_state->linked_plane)
+ if (plane->pipe != crtc->pipe || !plane_state->planar_linked_plane)
continue;
- plane_state->linked_plane = NULL;
- if (plane_state->slave && !plane_state->base.visible) {
+ plane_state->planar_linked_plane = NULL;
+ if (plane_state->planar_slave && !plane_state->base.visible) {
crtc_state->active_planes &= ~BIT(plane->id);
crtc_state->update_planes |= BIT(plane->id);
}
- plane_state->slave = false;
+ plane_state->planar_slave = false;
}
if (!crtc_state->nv12_planes)
@@ -11741,10 +11979,10 @@ static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
return -EINVAL;
}
- plane_state->linked_plane = linked;
+ plane_state->planar_linked_plane = linked;
- linked_state->slave = true;
- linked_state->linked_plane = plane;
+ linked_state->planar_slave = true;
+ linked_state->planar_linked_plane = plane;
crtc_state->active_planes |= BIT(linked->id);
crtc_state->update_planes |= BIT(linked->id);
DRM_DEBUG_KMS("Using %s as Y plane for %s\n", linked->base.name, plane->base.name);
@@ -11764,25 +12002,108 @@ static bool c8_planes_changed(const struct intel_crtc_state *new_crtc_state)
return !old_crtc_state->c8_planes != !new_crtc_state->c8_planes;
}
-static int intel_crtc_atomic_check(struct drm_crtc *crtc,
- struct drm_crtc_state *crtc_state)
+static int icl_add_sync_mode_crtcs(struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_crtc_state *pipe_config =
- to_intel_crtc_state(crtc_state);
+ struct drm_crtc *crtc = crtc_state->base.crtc;
+ struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->base.state);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ struct drm_connector *master_connector, *connector;
+ struct drm_connector_state *connector_state;
+ struct drm_connector_list_iter conn_iter;
+ struct drm_crtc *master_crtc = NULL;
+ struct drm_crtc_state *master_crtc_state;
+ struct intel_crtc_state *master_pipe_config;
+ int i, tile_group_id;
+
+ if (INTEL_GEN(dev_priv) < 11)
+ return 0;
+
+ /*
+ * In case of tiled displays there could be one or more slaves but there is
+ * only one master. Lets make the CRTC used by the connector corresponding
+ * to the last horizonal and last vertical tile a master/genlock CRTC.
+ * All the other CRTCs corresponding to other tiles of the same Tile group
+ * are the slave CRTCs and hold a pointer to their genlock CRTC.
+ */
+ for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
+ if (connector_state->crtc != crtc)
+ continue;
+ if (!connector->has_tile)
+ continue;
+ if (crtc_state->base.mode.hdisplay != connector->tile_h_size ||
+ crtc_state->base.mode.vdisplay != connector->tile_v_size)
+ return 0;
+ if (connector->tile_h_loc == connector->num_h_tile - 1 &&
+ connector->tile_v_loc == connector->num_v_tile - 1)
+ continue;
+ crtc_state->sync_mode_slaves_mask = 0;
+ tile_group_id = connector->tile_group->id;
+ drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
+ drm_for_each_connector_iter(master_connector, &conn_iter) {
+ struct drm_connector_state *master_conn_state = NULL;
+
+ if (!master_connector->has_tile)
+ continue;
+ if (master_connector->tile_h_loc != master_connector->num_h_tile - 1 ||
+ master_connector->tile_v_loc != master_connector->num_v_tile - 1)
+ continue;
+ if (master_connector->tile_group->id != tile_group_id)
+ continue;
+
+ master_conn_state = drm_atomic_get_connector_state(&state->base,
+ master_connector);
+ if (IS_ERR(master_conn_state)) {
+ drm_connector_list_iter_end(&conn_iter);
+ return PTR_ERR(master_conn_state);
+ }
+ if (master_conn_state->crtc) {
+ master_crtc = master_conn_state->crtc;
+ break;
+ }
+ }
+ drm_connector_list_iter_end(&conn_iter);
+
+ if (!master_crtc) {
+ DRM_DEBUG_KMS("Could not find Master CRTC for Slave CRTC %d\n",
+ connector_state->crtc->base.id);
+ return -EINVAL;
+ }
+
+ master_crtc_state = drm_atomic_get_crtc_state(&state->base,
+ master_crtc);
+ if (IS_ERR(master_crtc_state))
+ return PTR_ERR(master_crtc_state);
+
+ master_pipe_config = to_intel_crtc_state(master_crtc_state);
+ crtc_state->master_transcoder = master_pipe_config->cpu_transcoder;
+ master_pipe_config->sync_mode_slaves_mask |=
+ BIT(crtc_state->cpu_transcoder);
+ DRM_DEBUG_KMS("Master Transcoder = %s added for Slave CRTC = %d, slave transcoder bitmask = %d\n",
+ transcoder_name(crtc_state->master_transcoder),
+ crtc_state->base.crtc->base.id,
+ master_pipe_config->sync_mode_slaves_mask);
+ }
+
+ return 0;
+}
+
+static int intel_crtc_atomic_check(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ bool mode_changed = needs_modeset(crtc_state);
int ret;
- bool mode_changed = needs_modeset(pipe_config);
if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv) &&
- mode_changed && !crtc_state->active)
- pipe_config->update_wm_post = true;
+ mode_changed && !crtc_state->base.active)
+ crtc_state->update_wm_post = true;
- if (mode_changed && crtc_state->enable &&
+ if (mode_changed && crtc_state->base.enable &&
dev_priv->display.crtc_compute_clock &&
- !WARN_ON(pipe_config->shared_dpll)) {
- ret = dev_priv->display.crtc_compute_clock(intel_crtc,
- pipe_config);
+ !WARN_ON(crtc_state->shared_dpll)) {
+ ret = dev_priv->display.crtc_compute_clock(crtc, crtc_state);
if (ret)
return ret;
}
@@ -11791,19 +12112,19 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
* May need to update pipe gamma enable bits
* when C8 planes are getting enabled/disabled.
*/
- if (c8_planes_changed(pipe_config))
- crtc_state->color_mgmt_changed = true;
+ if (c8_planes_changed(crtc_state))
+ crtc_state->base.color_mgmt_changed = true;
- if (mode_changed || pipe_config->update_pipe ||
- crtc_state->color_mgmt_changed) {
- ret = intel_color_check(pipe_config);
+ if (mode_changed || crtc_state->update_pipe ||
+ crtc_state->base.color_mgmt_changed) {
+ ret = intel_color_check(crtc_state);
if (ret)
return ret;
}
ret = 0;
if (dev_priv->display.compute_pipe_wm) {
- ret = dev_priv->display.compute_pipe_wm(pipe_config);
+ ret = dev_priv->display.compute_pipe_wm(crtc_state);
if (ret) {
DRM_DEBUG_KMS("Target pipe watermarks are invalid\n");
return ret;
@@ -11819,7 +12140,7 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
* old state and the new state. We can program these
* immediately.
*/
- ret = dev_priv->display.compute_intermediate_wm(pipe_config);
+ ret = dev_priv->display.compute_intermediate_wm(crtc_state);
if (ret) {
DRM_DEBUG_KMS("No valid intermediate pipe watermarks are possible\n");
return ret;
@@ -11827,29 +12148,19 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
}
if (INTEL_GEN(dev_priv) >= 9) {
- if (mode_changed || pipe_config->update_pipe)
- ret = skl_update_scaler_crtc(pipe_config);
-
- if (!ret)
- ret = icl_check_nv12_planes(pipe_config);
+ if (mode_changed || crtc_state->update_pipe)
+ ret = skl_update_scaler_crtc(crtc_state);
if (!ret)
- ret = skl_check_pipe_max_pixel_rate(intel_crtc,
- pipe_config);
- if (!ret)
- ret = intel_atomic_setup_scalers(dev_priv, intel_crtc,
- pipe_config);
+ ret = intel_atomic_setup_scalers(dev_priv, crtc,
+ crtc_state);
}
if (HAS_IPS(dev_priv))
- pipe_config->ips_enabled = hsw_compute_ips_config(pipe_config);
+ crtc_state->ips_enabled = hsw_compute_ips_config(crtc_state);
return ret;
}
-static const struct drm_crtc_helper_funcs intel_helper_funcs = {
- .atomic_check = intel_crtc_atomic_check,
-};
-
static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
{
struct intel_connector *connector;
@@ -12159,6 +12470,15 @@ static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
intel_dpll_dump_hw_state(dev_priv, &pipe_config->dpll_hw_state);
+ if (IS_CHERRYVIEW(dev_priv))
+ DRM_DEBUG_KMS("cgm_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
+ pipe_config->cgm_mode, pipe_config->gamma_mode,
+ pipe_config->gamma_enable, pipe_config->csc_enable);
+ else
+ DRM_DEBUG_KMS("csc_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
+ pipe_config->csc_mode, pipe_config->gamma_mode,
+ pipe_config->gamma_enable, pipe_config->csc_enable);
+
dump_planes:
if (!state)
return;
@@ -12179,6 +12499,12 @@ static bool check_digital_port_conflicts(struct intel_atomic_state *state)
bool ret = true;
/*
+ * We're going to peek into connector->state,
+ * hence connection_mutex must be held.
+ */
+ drm_modeset_lock_assert_held(&dev->mode_config.connection_mutex);
+
+ /*
* Walk the connector list instead of the encoder
* list to detect the problem on ddi platforms
* where there's just one encoder per digital port.
@@ -12260,6 +12586,13 @@ clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
if (IS_G4X(dev_priv) ||
IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
saved_state->wm = crtc_state->wm;
+ /*
+ * Save the slave bitmask which gets filled for master crtc state during
+ * slave atomic check call.
+ */
+ if (is_trans_port_sync_master(crtc_state))
+ saved_state->sync_mode_slaves_mask =
+ crtc_state->sync_mode_slaves_mask;
/* Keep base drm_crtc_state intact, only clear our extended struct */
BUILD_BUG_ON(offsetof(struct intel_crtc_state, base));
@@ -12353,6 +12686,15 @@ encoder_retry:
drm_mode_set_crtcinfo(&pipe_config->base.adjusted_mode,
CRTC_STEREO_DOUBLE);
+ /* Set the crtc_state defaults for trans_port_sync */
+ pipe_config->master_transcoder = INVALID_TRANSCODER;
+ ret = icl_add_sync_mode_crtcs(pipe_config);
+ if (ret) {
+ DRM_DEBUG_KMS("Cannot assign Sync Mode CRTCs: %d\n",
+ ret);
+ return ret;
+ }
+
/* Pass our mode to the connectors and the CRTC to give them a chance to
* adjust it according to limitations or connector properties, and also
* a chance to reject the mode entirely.
@@ -12485,22 +12827,23 @@ pipe_config_infoframe_mismatch(struct drm_i915_private *dev_priv,
if ((drm_debug & DRM_UT_KMS) == 0)
return;
- drm_dbg(DRM_UT_KMS, "fastset mismatch in %s infoframe", name);
- drm_dbg(DRM_UT_KMS, "expected:");
+ DRM_DEBUG_KMS("fastset mismatch in %s infoframe\n", name);
+ DRM_DEBUG_KMS("expected:\n");
hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, a);
- drm_dbg(DRM_UT_KMS, "found");
+ DRM_DEBUG_KMS("found:\n");
hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, b);
} else {
- drm_err("mismatch in %s infoframe", name);
- drm_err("expected:");
+ DRM_ERROR("mismatch in %s infoframe\n", name);
+ DRM_ERROR("expected:\n");
hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, a);
- drm_err("found");
+ DRM_ERROR("found:\n");
hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, b);
}
}
-static void __printf(3, 4)
-pipe_config_mismatch(bool fastset, const char *name, const char *format, ...)
+static void __printf(4, 5)
+pipe_config_mismatch(bool fastset, const struct intel_crtc *crtc,
+ const char *name, const char *format, ...)
{
struct va_format vaf;
va_list args;
@@ -12510,9 +12853,11 @@ pipe_config_mismatch(bool fastset, const char *name, const char *format, ...)
vaf.va = &args;
if (fastset)
- drm_dbg(DRM_UT_KMS, "fastset mismatch in %s %pV", name, &vaf);
+ DRM_DEBUG_KMS("[CRTC:%d:%s] fastset mismatch in %s %pV\n",
+ crtc->base.base.id, crtc->base.name, name, &vaf);
else
- drm_err("mismatch in %s %pV", name, &vaf);
+ DRM_ERROR("[CRTC:%d:%s] mismatch in %s %pV\n",
+ crtc->base.base.id, crtc->base.name, name, &vaf);
va_end(args);
}
@@ -12540,7 +12885,9 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
bool fastset)
{
struct drm_i915_private *dev_priv = to_i915(current_config->base.crtc->dev);
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
bool ret = true;
+ u32 bp_gamma = 0;
bool fixup_inherited = fastset &&
(current_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED) &&
!(pipe_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED);
@@ -12552,8 +12899,8 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
#define PIPE_CONF_CHECK_X(name) do { \
if (current_config->name != pipe_config->name) { \
- pipe_config_mismatch(fastset, __stringify(name), \
- "(expected 0x%08x, found 0x%08x)\n", \
+ pipe_config_mismatch(fastset, crtc, __stringify(name), \
+ "(expected 0x%08x, found 0x%08x)", \
current_config->name, \
pipe_config->name); \
ret = false; \
@@ -12562,8 +12909,8 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
#define PIPE_CONF_CHECK_I(name) do { \
if (current_config->name != pipe_config->name) { \
- pipe_config_mismatch(fastset, __stringify(name), \
- "(expected %i, found %i)\n", \
+ pipe_config_mismatch(fastset, crtc, __stringify(name), \
+ "(expected %i, found %i)", \
current_config->name, \
pipe_config->name); \
ret = false; \
@@ -12572,8 +12919,8 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
#define PIPE_CONF_CHECK_BOOL(name) do { \
if (current_config->name != pipe_config->name) { \
- pipe_config_mismatch(fastset, __stringify(name), \
- "(expected %s, found %s)\n", \
+ pipe_config_mismatch(fastset, crtc, __stringify(name), \
+ "(expected %s, found %s)", \
yesno(current_config->name), \
yesno(pipe_config->name)); \
ret = false; \
@@ -12589,8 +12936,8 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
if (!fixup_inherited || (!current_config->name && !pipe_config->name)) { \
PIPE_CONF_CHECK_BOOL(name); \
} else { \
- pipe_config_mismatch(fastset, __stringify(name), \
- "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)\n", \
+ pipe_config_mismatch(fastset, crtc, __stringify(name), \
+ "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)", \
yesno(current_config->name), \
yesno(pipe_config->name)); \
ret = false; \
@@ -12599,8 +12946,8 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
#define PIPE_CONF_CHECK_P(name) do { \
if (current_config->name != pipe_config->name) { \
- pipe_config_mismatch(fastset, __stringify(name), \
- "(expected %p, found %p)\n", \
+ pipe_config_mismatch(fastset, crtc, __stringify(name), \
+ "(expected %p, found %p)", \
current_config->name, \
pipe_config->name); \
ret = false; \
@@ -12611,9 +12958,9 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
if (!intel_compare_link_m_n(&current_config->name, \
&pipe_config->name,\
!fastset)) { \
- pipe_config_mismatch(fastset, __stringify(name), \
+ pipe_config_mismatch(fastset, crtc, __stringify(name), \
"(expected tu %i gmch %i/%i link %i/%i, " \
- "found tu %i, gmch %i/%i link %i/%i)\n", \
+ "found tu %i, gmch %i/%i link %i/%i)", \
current_config->name.tu, \
current_config->name.gmch_m, \
current_config->name.gmch_n, \
@@ -12638,10 +12985,10 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
&pipe_config->name, !fastset) && \
!intel_compare_link_m_n(&current_config->alt_name, \
&pipe_config->name, !fastset)) { \
- pipe_config_mismatch(fastset, __stringify(name), \
+ pipe_config_mismatch(fastset, crtc, __stringify(name), \
"(expected tu %i gmch %i/%i link %i/%i, " \
"or tu %i gmch %i/%i link %i/%i, " \
- "found tu %i, gmch %i/%i link %i/%i)\n", \
+ "found tu %i, gmch %i/%i link %i/%i)", \
current_config->name.tu, \
current_config->name.gmch_m, \
current_config->name.gmch_n, \
@@ -12663,8 +13010,8 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
#define PIPE_CONF_CHECK_FLAGS(name, mask) do { \
if ((current_config->name ^ pipe_config->name) & (mask)) { \
- pipe_config_mismatch(fastset, __stringify(name), \
- "(%x) (expected %i, found %i)\n", \
+ pipe_config_mismatch(fastset, crtc, __stringify(name), \
+ "(%x) (expected %i, found %i)", \
(mask), \
current_config->name & (mask), \
pipe_config->name & (mask)); \
@@ -12674,8 +13021,8 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
#define PIPE_CONF_CHECK_CLOCK_FUZZY(name) do { \
if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
- pipe_config_mismatch(fastset, __stringify(name), \
- "(expected %i, found %i)\n", \
+ pipe_config_mismatch(fastset, crtc, __stringify(name), \
+ "(expected %i, found %i)", \
current_config->name, \
pipe_config->name); \
ret = false; \
@@ -12692,6 +13039,24 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
} \
} while (0)
+#define PIPE_CONF_CHECK_COLOR_LUT(name1, name2, bit_precision) do { \
+ if (current_config->name1 != pipe_config->name1) { \
+ pipe_config_mismatch(fastset, crtc, __stringify(name1), \
+ "(expected %i, found %i, won't compare lut values)", \
+ current_config->name1, \
+ pipe_config->name1); \
+ ret = false;\
+ } else { \
+ if (!intel_color_lut_equal(current_config->name2, \
+ pipe_config->name2, pipe_config->name1, \
+ bit_precision)) { \
+ pipe_config_mismatch(fastset, crtc, __stringify(name2), \
+ "hw_state doesn't match sw_state"); \
+ ret = false; \
+ } \
+ } \
+} while (0)
+
#define PIPE_CONF_QUIRK(quirk) \
((current_config->quirks | pipe_config->quirks) & (quirk))
@@ -12730,6 +13095,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_I(pixel_multiplier);
PIPE_CONF_CHECK_I(output_format);
+ PIPE_CONF_CHECK_I(dc3co_exitline);
PIPE_CONF_CHECK_BOOL(has_hdmi_sink);
if ((INTEL_GEN(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
@@ -12738,6 +13104,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_BOOL(hdmi_scrambling);
PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio);
PIPE_CONF_CHECK_BOOL(has_infoframe);
+ PIPE_CONF_CHECK_BOOL(fec_enable);
PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio);
@@ -12787,6 +13154,11 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_X(csc_mode);
PIPE_CONF_CHECK_BOOL(gamma_enable);
PIPE_CONF_CHECK_BOOL(csc_enable);
+
+ bp_gamma = intel_color_get_gamma_bit_precision(pipe_config);
+ if (bp_gamma)
+ PIPE_CONF_CHECK_COLOR_LUT(gamma_mode, base.gamma_lut, bp_gamma);
+
}
PIPE_CONF_CHECK_BOOL(double_wide);
@@ -12842,6 +13214,9 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_INFOFRAME(hdmi);
PIPE_CONF_CHECK_INFOFRAME(drm);
+ PIPE_CONF_CHECK_I(sync_mode_slaves_mask);
+ PIPE_CONF_CHECK_I(master_transcoder);
+
#undef PIPE_CONF_CHECK_X
#undef PIPE_CONF_CHECK_I
#undef PIPE_CONF_CHECK_BOOL
@@ -12849,6 +13224,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
#undef PIPE_CONF_CHECK_P
#undef PIPE_CONF_CHECK_FLAGS
#undef PIPE_CONF_CHECK_CLOCK_FUZZY
+#undef PIPE_CONF_CHECK_COLOR_LUT
#undef PIPE_CONF_QUIRK
return ret;
@@ -13160,7 +13536,7 @@ intel_verify_planes(struct intel_atomic_state *state)
for_each_new_intel_plane_in_state(state, plane,
plane_state, i)
- assert_plane(plane, plane_state->slave ||
+ assert_plane(plane, plane_state->planar_slave ||
plane_state->base.visible);
}
@@ -13276,10 +13652,15 @@ intel_modeset_verify_disabled(struct drm_i915_private *dev_priv,
verify_disabled_dpll_state(dev_priv);
}
-static void update_scanline_offset(const struct intel_crtc_state *crtc_state)
+static void
+intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->base.adjusted_mode;
+
+ drm_calc_timestamping_constants(&crtc->base, adjusted_mode);
/*
* The scanline counter increments at the leading edge of hsync.
@@ -13309,7 +13690,6 @@ static void update_scanline_offset(const struct intel_crtc_state *crtc_state)
* answer that's slightly in the future.
*/
if (IS_GEN(dev_priv, 2)) {
- const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
int vtotal;
vtotal = adjusted_mode->crtc_vtotal;
@@ -13320,8 +13700,9 @@ static void update_scanline_offset(const struct intel_crtc_state *crtc_state)
} else if (HAS_DDI(dev_priv) &&
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
crtc->scanline_offset = 2;
- } else
+ } else {
crtc->scanline_offset = 1;
+ }
}
static void intel_modeset_clear_plls(struct intel_atomic_state *state)
@@ -13403,158 +13784,43 @@ static int haswell_mode_set_planes_workaround(struct intel_atomic_state *state)
return 0;
}
-static int intel_lock_all_pipes(struct intel_atomic_state *state)
-{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- struct intel_crtc *crtc;
-
- /* Add all pipes to the state */
- for_each_intel_crtc(&dev_priv->drm, crtc) {
- struct intel_crtc_state *crtc_state;
-
- crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
- if (IS_ERR(crtc_state))
- return PTR_ERR(crtc_state);
- }
-
- return 0;
-}
-
-static int intel_modeset_all_pipes(struct intel_atomic_state *state)
-{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- struct intel_crtc *crtc;
-
- /*
- * Add all pipes to the state, and force
- * a modeset on all the active ones.
- */
- for_each_intel_crtc(&dev_priv->drm, crtc) {
- struct intel_crtc_state *crtc_state;
- int ret;
-
- crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
- if (IS_ERR(crtc_state))
- return PTR_ERR(crtc_state);
-
- if (!crtc_state->base.active || needs_modeset(crtc_state))
- continue;
-
- crtc_state->base.mode_changed = true;
-
- ret = drm_atomic_add_affected_connectors(&state->base,
- &crtc->base);
- if (ret)
- return ret;
-
- ret = drm_atomic_add_affected_planes(&state->base,
- &crtc->base);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
static int intel_modeset_checks(struct intel_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc_state *old_crtc_state, *new_crtc_state;
struct intel_crtc *crtc;
- int ret = 0, i;
-
- if (!check_digital_port_conflicts(state)) {
- DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n");
- return -EINVAL;
- }
+ int ret, i;
/* keep the current setting */
if (!state->cdclk.force_min_cdclk_changed)
state->cdclk.force_min_cdclk = dev_priv->cdclk.force_min_cdclk;
state->modeset = true;
- state->active_crtcs = dev_priv->active_crtcs;
+ state->active_pipes = dev_priv->active_pipes;
state->cdclk.logical = dev_priv->cdclk.logical;
state->cdclk.actual = dev_priv->cdclk.actual;
- state->cdclk.pipe = INVALID_PIPE;
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
if (new_crtc_state->base.active)
- state->active_crtcs |= 1 << i;
+ state->active_pipes |= BIT(crtc->pipe);
else
- state->active_crtcs &= ~(1 << i);
+ state->active_pipes &= ~BIT(crtc->pipe);
if (old_crtc_state->base.active != new_crtc_state->base.active)
- state->active_pipe_changes |= drm_crtc_mask(&crtc->base);
+ state->active_pipe_changes |= BIT(crtc->pipe);
}
- /*
- * See if the config requires any additional preparation, e.g.
- * to adjust global state with pipes off. We need to do this
- * here so we can get the modeset_pipe updated config for the new
- * mode set on this crtc. For other crtcs we need to use the
- * adjusted_mode bits in the crtc directly.
- */
- if (dev_priv->display.modeset_calc_cdclk) {
- enum pipe pipe;
-
- ret = dev_priv->display.modeset_calc_cdclk(state);
- if (ret < 0)
+ if (state->active_pipe_changes) {
+ ret = intel_atomic_lock_global_state(state);
+ if (ret)
return ret;
-
- /*
- * Writes to dev_priv->cdclk.logical must protected by
- * holding all the crtc locks, even if we don't end up
- * touching the hardware
- */
- if (intel_cdclk_changed(&dev_priv->cdclk.logical,
- &state->cdclk.logical)) {
- ret = intel_lock_all_pipes(state);
- if (ret < 0)
- return ret;
- }
-
- if (is_power_of_2(state->active_crtcs)) {
- struct intel_crtc *crtc;
- struct intel_crtc_state *crtc_state;
-
- pipe = ilog2(state->active_crtcs);
- crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
- crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
- if (crtc_state && needs_modeset(crtc_state))
- pipe = INVALID_PIPE;
- } else {
- pipe = INVALID_PIPE;
- }
-
- /* All pipes must be switched off while we change the cdclk. */
- if (pipe != INVALID_PIPE &&
- intel_cdclk_needs_cd2x_update(dev_priv,
- &dev_priv->cdclk.actual,
- &state->cdclk.actual)) {
- ret = intel_lock_all_pipes(state);
- if (ret < 0)
- return ret;
-
- state->cdclk.pipe = pipe;
- } else if (intel_cdclk_needs_modeset(&dev_priv->cdclk.actual,
- &state->cdclk.actual)) {
- ret = intel_modeset_all_pipes(state);
- if (ret < 0)
- return ret;
-
- state->cdclk.pipe = INVALID_PIPE;
- }
-
- DRM_DEBUG_KMS("New cdclk calculated to be logical %u kHz, actual %u kHz\n",
- state->cdclk.logical.cdclk,
- state->cdclk.actual.cdclk);
- DRM_DEBUG_KMS("New voltage level calculated to be logical %u, actual %u\n",
- state->cdclk.logical.voltage_level,
- state->cdclk.actual.voltage_level);
}
+ ret = intel_modeset_calc_cdclk(state);
+ if (ret)
+ return ret;
+
intel_modeset_clear_plls(state);
if (IS_HASWELL(dev_priv))
@@ -13603,6 +13869,114 @@ static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_sta
new_crtc_state->has_drrs = old_crtc_state->has_drrs;
}
+static int intel_crtc_add_planes_to_state(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ u8 plane_ids_mask)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_plane *plane;
+
+ for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
+ struct intel_plane_state *plane_state;
+
+ if ((plane_ids_mask & BIT(plane->id)) == 0)
+ continue;
+
+ plane_state = intel_atomic_get_plane_state(state, plane);
+ if (IS_ERR(plane_state))
+ return PTR_ERR(plane_state);
+ }
+
+ return 0;
+}
+
+static bool active_planes_affects_min_cdclk(struct drm_i915_private *dev_priv)
+{
+ /* See {hsw,vlv,ivb}_plane_ratio() */
+ return IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv) ||
+ IS_CHERRYVIEW(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
+ IS_IVYBRIDGE(dev_priv);
+}
+
+static int intel_atomic_check_planes(struct intel_atomic_state *state,
+ bool *need_modeset)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_crtc_state *old_crtc_state, *new_crtc_state;
+ struct intel_plane_state *plane_state;
+ struct intel_plane *plane;
+ struct intel_crtc *crtc;
+ int i, ret;
+
+ ret = icl_add_linked_planes(state);
+ if (ret)
+ return ret;
+
+ for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
+ ret = intel_plane_atomic_check(state, plane);
+ if (ret) {
+ DRM_DEBUG_ATOMIC("[PLANE:%d:%s] atomic driver check failed\n",
+ plane->base.base.id, plane->base.name);
+ return ret;
+ }
+ }
+
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ u8 old_active_planes, new_active_planes;
+
+ ret = icl_check_nv12_planes(new_crtc_state);
+ if (ret)
+ return ret;
+
+ /*
+ * On some platforms the number of active planes affects
+ * the planes' minimum cdclk calculation. Add such planes
+ * to the state before we compute the minimum cdclk.
+ */
+ if (!active_planes_affects_min_cdclk(dev_priv))
+ continue;
+
+ old_active_planes = old_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
+ new_active_planes = new_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
+
+ if (hweight8(old_active_planes) == hweight8(new_active_planes))
+ continue;
+
+ ret = intel_crtc_add_planes_to_state(state, crtc, new_active_planes);
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * active_planes bitmask has been updated, and potentially
+ * affected planes are part of the state. We can now
+ * compute the minimum cdclk for each plane.
+ */
+ for_each_new_intel_plane_in_state(state, plane, plane_state, i)
+ *need_modeset |= intel_plane_calc_min_cdclk(state, plane);
+
+ return 0;
+}
+
+static int intel_atomic_check_crtcs(struct intel_atomic_state *state)
+{
+ struct intel_crtc_state *crtc_state;
+ struct intel_crtc *crtc;
+ int i;
+
+ for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
+ int ret = intel_crtc_atomic_check(state, crtc);
+ if (ret) {
+ DRM_DEBUG_ATOMIC("[CRTC:%d:%s] atomic driver check failed\n",
+ crtc->base.base.id, crtc->base.name);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
/**
* intel_atomic_check - validate state object
* @dev: drm device
@@ -13616,7 +13990,7 @@ static int intel_atomic_check(struct drm_device *dev,
struct intel_crtc_state *old_crtc_state, *new_crtc_state;
struct intel_crtc *crtc;
int ret, i;
- bool any_ms = state->cdclk.force_min_cdclk_changed;
+ bool any_ms = false;
/* Catch I915_MODE_FLAG_INHERITED */
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
@@ -13650,10 +14024,22 @@ static int intel_atomic_check(struct drm_device *dev,
any_ms = true;
}
+ if (any_ms && !check_digital_port_conflicts(state)) {
+ DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n");
+ ret = EINVAL;
+ goto fail;
+ }
+
ret = drm_dp_mst_atomic_check(&state->base);
if (ret)
goto fail;
+ any_ms |= state->cdclk.force_min_cdclk_changed;
+
+ ret = intel_atomic_check_planes(state, &any_ms);
+ if (ret)
+ goto fail;
+
if (any_ms) {
ret = intel_modeset_checks(state);
if (ret)
@@ -13662,11 +14048,7 @@ static int intel_atomic_check(struct drm_device *dev,
state->cdclk.logical = dev_priv->cdclk.logical;
}
- ret = icl_add_linked_planes(state);
- if (ret)
- goto fail;
-
- ret = drm_atomic_helper_check_planes(dev, &state->base);
+ ret = intel_atomic_check_crtcs(state);
if (ret)
goto fail;
@@ -13724,20 +14106,100 @@ u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
return crtc->base.funcs->get_vblank_counter(&crtc->base);
}
+void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+
+ if (!IS_GEN(dev_priv, 2))
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
+
+ if (crtc_state->has_pch_encoder) {
+ enum pipe pch_transcoder =
+ intel_crtc_pch_transcoder(crtc);
+
+ intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true);
+ }
+}
+
+static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state,
+ const struct intel_crtc_state *new_crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+
+ /*
+ * Update pipe size and adjust fitter if needed: the reason for this is
+ * that in compute_mode_changes we check the native mode (not the pfit
+ * mode) to see if we can flip rather than do a full mode set. In the
+ * fastboot case, we'll flip, but if we don't update the pipesrc and
+ * pfit state, we'll end up with a big fb scanned out into the wrong
+ * sized surface.
+ */
+ intel_set_pipe_src_size(new_crtc_state);
+
+ /* on skylake this is done by detaching scalers */
+ if (INTEL_GEN(dev_priv) >= 9) {
+ skl_detach_scalers(new_crtc_state);
+
+ if (new_crtc_state->pch_pfit.enabled)
+ skylake_pfit_enable(new_crtc_state);
+ } else if (HAS_PCH_SPLIT(dev_priv)) {
+ if (new_crtc_state->pch_pfit.enabled)
+ ironlake_pfit_enable(new_crtc_state);
+ else if (old_crtc_state->pch_pfit.enabled)
+ ironlake_pfit_disable(old_crtc_state);
+ }
+
+ if (INTEL_GEN(dev_priv) >= 11)
+ icl_set_pipe_chicken(crtc);
+}
+
+static void commit_pipe_config(struct intel_atomic_state *state,
+ struct intel_crtc_state *old_crtc_state,
+ struct intel_crtc_state *new_crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ bool modeset = needs_modeset(new_crtc_state);
+
+ /*
+ * During modesets pipe configuration was programmed as the
+ * CRTC was enabled.
+ */
+ if (!modeset) {
+ if (new_crtc_state->base.color_mgmt_changed ||
+ new_crtc_state->update_pipe)
+ intel_color_commit(new_crtc_state);
+
+ if (INTEL_GEN(dev_priv) >= 9)
+ skl_detach_scalers(new_crtc_state);
+
+ if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
+ bdw_set_pipemisc(new_crtc_state);
+
+ if (new_crtc_state->update_pipe)
+ intel_pipe_fastset(old_crtc_state, new_crtc_state);
+ }
+
+ if (dev_priv->display.atomic_update_watermarks)
+ dev_priv->display.atomic_update_watermarks(state,
+ new_crtc_state);
+}
+
static void intel_update_crtc(struct intel_crtc *crtc,
struct intel_atomic_state *state,
struct intel_crtc_state *old_crtc_state,
struct intel_crtc_state *new_crtc_state)
{
- struct drm_device *dev = state->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
bool modeset = needs_modeset(new_crtc_state);
struct intel_plane_state *new_plane_state =
intel_atomic_get_new_plane_state(state,
to_intel_plane(crtc->base.primary));
if (modeset) {
- update_scanline_offset(new_crtc_state);
+ intel_crtc_update_active_timings(new_crtc_state);
+
dev_priv->display.crtc_enable(new_crtc_state, state);
/* vblanks work again, re-enable pipe CRC. */
@@ -13759,17 +14221,151 @@ static void intel_update_crtc(struct intel_crtc *crtc,
else if (new_plane_state)
intel_fbc_enable(crtc, new_crtc_state, new_plane_state);
- intel_begin_crtc_commit(state, crtc);
+ /* Perform vblank evasion around commit operation */
+ intel_pipe_update_start(new_crtc_state);
+
+ commit_pipe_config(state, old_crtc_state, new_crtc_state);
if (INTEL_GEN(dev_priv) >= 9)
skl_update_planes_on_crtc(state, crtc);
else
i9xx_update_planes_on_crtc(state, crtc);
- intel_finish_crtc_commit(state, crtc);
+ intel_pipe_update_end(new_crtc_state);
+
+ /*
+ * We usually enable FIFO underrun interrupts as part of the
+ * CRTC enable sequence during modesets. But when we inherit a
+ * valid pipe configuration from the BIOS we need to take care
+ * of enabling them on the CRTC's first fastset.
+ */
+ if (new_crtc_state->update_pipe && !modeset &&
+ old_crtc_state->base.mode.private_flags & I915_MODE_FLAG_INHERITED)
+ intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
+}
+
+static struct intel_crtc *intel_get_slave_crtc(const struct intel_crtc_state *new_crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(new_crtc_state->base.crtc->dev);
+ enum transcoder slave_transcoder;
+
+ WARN_ON(!is_power_of_2(new_crtc_state->sync_mode_slaves_mask));
+
+ slave_transcoder = ffs(new_crtc_state->sync_mode_slaves_mask) - 1;
+ return intel_get_crtc_for_pipe(dev_priv,
+ (enum pipe)slave_transcoder);
}
-static void intel_update_crtcs(struct intel_atomic_state *state)
+static void intel_old_crtc_state_disables(struct intel_atomic_state *state,
+ struct intel_crtc_state *old_crtc_state,
+ struct intel_crtc_state *new_crtc_state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+
+ intel_crtc_disable_planes(state, crtc);
+
+ /*
+ * We need to disable pipe CRC before disabling the pipe,
+ * or we race against vblank off.
+ */
+ intel_crtc_disable_pipe_crc(crtc);
+
+ dev_priv->display.crtc_disable(old_crtc_state, state);
+ crtc->active = false;
+ intel_fbc_disable(crtc);
+ intel_disable_shared_dpll(old_crtc_state);
+
+ /*
+ * Underruns don't always raise interrupts,
+ * so check manually.
+ */
+ intel_check_cpu_fifo_underruns(dev_priv);
+ intel_check_pch_fifo_underruns(dev_priv);
+
+ /* FIXME unify this for all platforms */
+ if (!new_crtc_state->base.active &&
+ !HAS_GMCH(dev_priv) &&
+ dev_priv->display.initial_watermarks)
+ dev_priv->display.initial_watermarks(state,
+ new_crtc_state);
+}
+
+static void intel_trans_port_sync_modeset_disables(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_crtc_state *old_crtc_state,
+ struct intel_crtc_state *new_crtc_state)
+{
+ struct intel_crtc *slave_crtc = intel_get_slave_crtc(new_crtc_state);
+ struct intel_crtc_state *new_slave_crtc_state =
+ intel_atomic_get_new_crtc_state(state, slave_crtc);
+ struct intel_crtc_state *old_slave_crtc_state =
+ intel_atomic_get_old_crtc_state(state, slave_crtc);
+
+ WARN_ON(!slave_crtc || !new_slave_crtc_state ||
+ !old_slave_crtc_state);
+
+ /* Disable Slave first */
+ intel_pre_plane_update(old_slave_crtc_state, new_slave_crtc_state);
+ if (old_slave_crtc_state->base.active)
+ intel_old_crtc_state_disables(state,
+ old_slave_crtc_state,
+ new_slave_crtc_state,
+ slave_crtc);
+
+ /* Disable Master */
+ intel_pre_plane_update(old_crtc_state, new_crtc_state);
+ if (old_crtc_state->base.active)
+ intel_old_crtc_state_disables(state,
+ old_crtc_state,
+ new_crtc_state,
+ crtc);
+}
+
+static void intel_commit_modeset_disables(struct intel_atomic_state *state)
+{
+ struct intel_crtc_state *new_crtc_state, *old_crtc_state;
+ struct intel_crtc *crtc;
+ int i;
+
+ /*
+ * Disable CRTC/pipes in reverse order because some features(MST in
+ * TGL+) requires master and slave relationship between pipes, so it
+ * should always pick the lowest pipe as master as it will be enabled
+ * first and disable in the reverse order so the master will be the
+ * last one to be disabled.
+ */
+ for_each_oldnew_intel_crtc_in_state_reverse(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ if (!needs_modeset(new_crtc_state))
+ continue;
+
+ /* In case of Transcoder port Sync master slave CRTCs can be
+ * assigned in any order and we need to make sure that
+ * slave CRTCs are disabled first and then master CRTC since
+ * Slave vblanks are masked till Master Vblanks.
+ */
+ if (is_trans_port_sync_mode(new_crtc_state)) {
+ if (is_trans_port_sync_master(new_crtc_state))
+ intel_trans_port_sync_modeset_disables(state,
+ crtc,
+ old_crtc_state,
+ new_crtc_state);
+ else
+ continue;
+ } else {
+ intel_pre_plane_update(old_crtc_state, new_crtc_state);
+
+ if (old_crtc_state->base.active)
+ intel_old_crtc_state_disables(state,
+ old_crtc_state,
+ new_crtc_state,
+ crtc);
+ }
+ }
+}
+
+static void intel_commit_modeset_enables(struct intel_atomic_state *state)
{
struct intel_crtc *crtc;
struct intel_crtc_state *old_crtc_state, *new_crtc_state;
@@ -13784,14 +14380,120 @@ static void intel_update_crtcs(struct intel_atomic_state *state)
}
}
-static void skl_update_crtcs(struct intel_atomic_state *state)
+static void intel_crtc_enable_trans_port_sync(struct intel_crtc *crtc,
+ struct intel_atomic_state *state,
+ struct intel_crtc_state *new_crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+
+ intel_crtc_update_active_timings(new_crtc_state);
+ dev_priv->display.crtc_enable(new_crtc_state, state);
+ intel_crtc_enable_pipe_crc(crtc);
+}
+
+static void intel_set_dp_tp_ctl_normal(struct intel_crtc *crtc,
+ struct intel_atomic_state *state)
+{
+ struct drm_connector *uninitialized_var(conn);
+ struct drm_connector_state *conn_state;
+ struct intel_dp *intel_dp;
+ int i;
+
+ for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
+ if (conn_state->crtc == &crtc->base)
+ break;
+ }
+ intel_dp = enc_to_intel_dp(&intel_attached_encoder(conn)->base);
+ intel_dp_stop_link_train(intel_dp);
+}
+
+static void intel_post_crtc_enable_updates(struct intel_crtc *crtc,
+ struct intel_atomic_state *state)
+{
+ struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ struct intel_plane_state *new_plane_state =
+ intel_atomic_get_new_plane_state(state,
+ to_intel_plane(crtc->base.primary));
+ bool modeset = needs_modeset(new_crtc_state);
+
+ if (new_crtc_state->update_pipe && !new_crtc_state->enable_fbc)
+ intel_fbc_disable(crtc);
+ else if (new_plane_state)
+ intel_fbc_enable(crtc, new_crtc_state, new_plane_state);
+
+ /* Perform vblank evasion around commit operation */
+ intel_pipe_update_start(new_crtc_state);
+ commit_pipe_config(state, old_crtc_state, new_crtc_state);
+ skl_update_planes_on_crtc(state, crtc);
+ intel_pipe_update_end(new_crtc_state);
+
+ /*
+ * We usually enable FIFO underrun interrupts as part of the
+ * CRTC enable sequence during modesets. But when we inherit a
+ * valid pipe configuration from the BIOS we need to take care
+ * of enabling them on the CRTC's first fastset.
+ */
+ if (new_crtc_state->update_pipe && !modeset &&
+ old_crtc_state->base.mode.private_flags & I915_MODE_FLAG_INHERITED)
+ intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
+}
+
+static void intel_update_trans_port_sync_crtcs(struct intel_crtc *crtc,
+ struct intel_atomic_state *state,
+ struct intel_crtc_state *old_crtc_state,
+ struct intel_crtc_state *new_crtc_state)
+{
+ struct intel_crtc *slave_crtc = intel_get_slave_crtc(new_crtc_state);
+ struct intel_crtc_state *new_slave_crtc_state =
+ intel_atomic_get_new_crtc_state(state, slave_crtc);
+ struct intel_crtc_state *old_slave_crtc_state =
+ intel_atomic_get_old_crtc_state(state, slave_crtc);
+
+ WARN_ON(!slave_crtc || !new_slave_crtc_state ||
+ !old_slave_crtc_state);
+
+ DRM_DEBUG_KMS("Updating Transcoder Port Sync Master CRTC = %d %s and Slave CRTC %d %s\n",
+ crtc->base.base.id, crtc->base.name, slave_crtc->base.base.id,
+ slave_crtc->base.name);
+
+ /* Enable seq for slave with with DP_TP_CTL left Idle until the
+ * master is ready
+ */
+ intel_crtc_enable_trans_port_sync(slave_crtc,
+ state,
+ new_slave_crtc_state);
+
+ /* Enable seq for master with with DP_TP_CTL left Idle */
+ intel_crtc_enable_trans_port_sync(crtc,
+ state,
+ new_crtc_state);
+
+ /* Set Slave's DP_TP_CTL to Normal */
+ intel_set_dp_tp_ctl_normal(slave_crtc,
+ state);
+
+ /* Set Master's DP_TP_CTL To Normal */
+ usleep_range(200, 400);
+ intel_set_dp_tp_ctl_normal(crtc,
+ state);
+
+ /* Now do the post crtc enable for all master and slaves */
+ intel_post_crtc_enable_updates(slave_crtc,
+ state);
+ intel_post_crtc_enable_updates(crtc,
+ state);
+}
+
+static void skl_commit_modeset_enables(struct intel_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc *crtc;
struct intel_crtc_state *old_crtc_state, *new_crtc_state;
unsigned int updated = 0;
bool progress;
- enum pipe pipe;
int i;
u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
u8 required_slices = state->wm_results.ddb.enabled_slices;
@@ -13816,20 +14518,19 @@ static void skl_update_crtcs(struct intel_atomic_state *state)
progress = false;
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ enum pipe pipe = crtc->pipe;
bool vbl_wait = false;
- unsigned int cmask = drm_crtc_mask(&crtc->base);
-
- pipe = crtc->pipe;
+ bool modeset = needs_modeset(new_crtc_state);
- if (updated & cmask || !new_crtc_state->base.active)
+ if (updated & BIT(crtc->pipe) || !new_crtc_state->base.active)
continue;
if (skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
entries,
- INTEL_INFO(dev_priv)->num_pipes, i))
+ INTEL_NUM_PIPES(dev_priv), i))
continue;
- updated |= cmask;
+ updated |= BIT(pipe);
entries[i] = new_crtc_state->wm.skl.ddb;
/*
@@ -13840,12 +14541,22 @@ static void skl_update_crtcs(struct intel_atomic_state *state)
*/
if (!skl_ddb_entry_equal(&new_crtc_state->wm.skl.ddb,
&old_crtc_state->wm.skl.ddb) &&
- !new_crtc_state->base.active_changed &&
+ !modeset &&
state->wm_results.dirty_pipes != updated)
vbl_wait = true;
- intel_update_crtc(crtc, state, old_crtc_state,
- new_crtc_state);
+ if (modeset && is_trans_port_sync_mode(new_crtc_state)) {
+ if (is_trans_port_sync_master(new_crtc_state))
+ intel_update_trans_port_sync_crtcs(crtc,
+ state,
+ old_crtc_state,
+ new_crtc_state);
+ else
+ continue;
+ } else {
+ intel_update_crtc(crtc, state, old_crtc_state,
+ new_crtc_state);
+ }
if (vbl_wait)
intel_wait_for_vblank(dev_priv, pipe);
@@ -13934,49 +14645,18 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
if (state->modeset)
wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
- for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
if (needs_modeset(new_crtc_state) ||
new_crtc_state->update_pipe) {
put_domains[crtc->pipe] =
modeset_get_crtc_power_domains(new_crtc_state);
}
-
- if (!needs_modeset(new_crtc_state))
- continue;
-
- intel_pre_plane_update(old_crtc_state, new_crtc_state);
-
- if (old_crtc_state->base.active) {
- intel_crtc_disable_planes(state, crtc);
-
- /*
- * We need to disable pipe CRC before disabling the pipe,
- * or we race against vblank off.
- */
- intel_crtc_disable_pipe_crc(crtc);
-
- dev_priv->display.crtc_disable(old_crtc_state, state);
- crtc->active = false;
- intel_fbc_disable(crtc);
- intel_disable_shared_dpll(old_crtc_state);
-
- /*
- * Underruns don't always raise
- * interrupts, so check manually.
- */
- intel_check_cpu_fifo_underruns(dev_priv);
- intel_check_pch_fifo_underruns(dev_priv);
-
- /* FIXME unify this for all platforms */
- if (!new_crtc_state->base.active &&
- !HAS_GMCH(dev_priv) &&
- dev_priv->display.initial_watermarks)
- dev_priv->display.initial_watermarks(state,
- new_crtc_state);
- }
}
+ intel_commit_modeset_disables(state);
+
/* FIXME: Eventually get rid of our crtc->config pointer */
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
crtc->config = new_crtc_state;
@@ -14017,7 +14697,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
intel_encoders_update_prepare(state);
/* Now enable the clocks, plane, pipe, and connectors that we set up. */
- dev_priv->display.update_crtcs(state);
+ dev_priv->display.commit_modeset_enables(state);
if (state->modeset) {
intel_encoders_update_complete(state);
@@ -14148,6 +14828,14 @@ static void intel_atomic_track_fbs(struct intel_atomic_state *state)
plane->frontbuffer_bit);
}
+static void assert_global_state_locked(struct drm_i915_private *dev_priv)
+{
+ struct intel_crtc *crtc;
+
+ for_each_intel_crtc(&dev_priv->drm, crtc)
+ drm_modeset_lock_assert_held(&crtc->base.mutex);
+}
+
static int intel_atomic_commit(struct drm_device *dev,
struct drm_atomic_state *_state,
bool nonblock)
@@ -14213,12 +14901,14 @@ static int intel_atomic_commit(struct drm_device *dev,
intel_shared_dpll_swap_state(state);
intel_atomic_track_fbs(state);
- if (state->modeset) {
+ if (state->global_state_changed) {
+ assert_global_state_locked(dev_priv);
+
memcpy(dev_priv->min_cdclk, state->min_cdclk,
sizeof(state->min_cdclk));
memcpy(dev_priv->min_voltage_level, state->min_voltage_level,
sizeof(state->min_voltage_level));
- dev_priv->active_crtcs = state->active_crtcs;
+ dev_priv->active_pipes = state->active_pipes;
dev_priv->cdclk.force_min_cdclk = state->cdclk.force_min_cdclk;
intel_cdclk_swap_state(state);
@@ -14231,7 +14921,7 @@ static int intel_atomic_commit(struct drm_device *dev,
if (nonblock && state->modeset) {
queue_work(dev_priv->modeset_wq, &state->base.commit_work);
} else if (nonblock) {
- queue_work(system_unbound_wq, &state->base.commit_work);
+ queue_work(dev_priv->flip_wq, &state->base.commit_work);
} else {
if (state->modeset)
flush_workqueue(dev_priv->modeset_wq);
@@ -14260,7 +14950,7 @@ static int do_rps_boost(struct wait_queue_entry *_wait,
* vblank without our intervention, so leave RPS alone.
*/
if (!i915_request_started(rq))
- gen6_rps_boost(rq);
+ intel_rps_boost(rq);
i915_request_put(rq);
drm_crtc_vblank_put(wait->crtc);
@@ -14341,7 +15031,7 @@ static void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state)
static void fb_obj_bump_render_priority(struct drm_i915_gem_object *obj)
{
struct i915_sched_attr attr = {
- .priority = I915_PRIORITY_DISPLAY,
+ .priority = I915_USER_PRIORITY(I915_PRIORITY_DISPLAY),
};
i915_gem_object_wait_priority(obj, 0, &attr);
@@ -14350,25 +15040,25 @@ static void fb_obj_bump_render_priority(struct drm_i915_gem_object *obj)
/**
* intel_prepare_plane_fb - Prepare fb for usage on plane
* @plane: drm plane to prepare for
- * @new_state: the plane state being prepared
+ * @_new_plane_state: the plane state being prepared
*
* Prepares a framebuffer for usage on a display plane. Generally this
* involves pinning the underlying object and updating the frontbuffer tracking
* bits. Some older platforms need special physical address handling for
* cursor planes.
*
- * Must be called with struct_mutex held.
- *
* Returns 0 on success, negative error code on failure.
*/
int
intel_prepare_plane_fb(struct drm_plane *plane,
- struct drm_plane_state *new_state)
+ struct drm_plane_state *_new_plane_state)
{
+ struct intel_plane_state *new_plane_state =
+ to_intel_plane_state(_new_plane_state);
struct intel_atomic_state *intel_state =
- to_intel_atomic_state(new_state->state);
+ to_intel_atomic_state(new_plane_state->base.state);
struct drm_i915_private *dev_priv = to_i915(plane->dev);
- struct drm_framebuffer *fb = new_state->fb;
+ struct drm_framebuffer *fb = new_plane_state->base.fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb);
int ret;
@@ -14399,9 +15089,9 @@ intel_prepare_plane_fb(struct drm_plane *plane,
}
}
- if (new_state->fence) { /* explicit fencing */
+ if (new_plane_state->base.fence) { /* explicit fencing */
ret = i915_sw_fence_await_dma_fence(&intel_state->commit_ready,
- new_state->fence,
+ new_plane_state->base.fence,
I915_FENCE_TIMEOUT,
GFP_KERNEL);
if (ret < 0)
@@ -14415,15 +15105,8 @@ intel_prepare_plane_fb(struct drm_plane *plane,
if (ret)
return ret;
- ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex);
- if (ret) {
- i915_gem_object_unpin_pages(obj);
- return ret;
- }
-
- ret = intel_plane_pin_fb(to_intel_plane_state(new_state));
+ ret = intel_plane_pin_fb(new_plane_state);
- mutex_unlock(&dev_priv->drm.struct_mutex);
i915_gem_object_unpin_pages(obj);
if (ret)
return ret;
@@ -14431,7 +15114,7 @@ intel_prepare_plane_fb(struct drm_plane *plane,
fb_obj_bump_render_priority(obj);
intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_DIRTYFB);
- if (!new_state->fence) { /* implicit fencing */
+ if (!new_plane_state->base.fence) { /* implicit fencing */
struct dma_fence *fence;
ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
@@ -14443,11 +15126,13 @@ intel_prepare_plane_fb(struct drm_plane *plane,
fence = dma_resv_get_excl_rcu(obj->base.resv);
if (fence) {
- add_rps_boost_after_vblank(new_state->crtc, fence);
+ add_rps_boost_after_vblank(new_plane_state->base.crtc,
+ fence);
dma_fence_put(fence);
}
} else {
- add_rps_boost_after_vblank(new_state->crtc, new_state->fence);
+ add_rps_boost_after_vblank(new_plane_state->base.crtc,
+ new_plane_state->base.fence);
}
/*
@@ -14459,7 +15144,7 @@ intel_prepare_plane_fb(struct drm_plane *plane,
* maximum clocks following a vblank miss (see do_rps_boost()).
*/
if (!intel_state->rps_interactive) {
- intel_rps_mark_interactive(dev_priv, true);
+ intel_rps_mark_interactive(&dev_priv->gt.rps, true);
intel_state->rps_interactive = true;
}
@@ -14469,130 +15154,27 @@ intel_prepare_plane_fb(struct drm_plane *plane,
/**
* intel_cleanup_plane_fb - Cleans up an fb after plane use
* @plane: drm plane to clean up for
- * @old_state: the state from the previous modeset
+ * @_old_plane_state: the state from the previous modeset
*
* Cleans up a framebuffer that has just been removed from a plane.
- *
- * Must be called with struct_mutex held.
*/
void
intel_cleanup_plane_fb(struct drm_plane *plane,
- struct drm_plane_state *old_state)
+ struct drm_plane_state *_old_plane_state)
{
+ struct intel_plane_state *old_plane_state =
+ to_intel_plane_state(_old_plane_state);
struct intel_atomic_state *intel_state =
- to_intel_atomic_state(old_state->state);
+ to_intel_atomic_state(old_plane_state->base.state);
struct drm_i915_private *dev_priv = to_i915(plane->dev);
if (intel_state->rps_interactive) {
- intel_rps_mark_interactive(dev_priv, false);
+ intel_rps_mark_interactive(&dev_priv->gt.rps, false);
intel_state->rps_interactive = false;
}
/* Should only be called after a successful intel_prepare_plane_fb()! */
- mutex_lock(&dev_priv->drm.struct_mutex);
- intel_plane_unpin_fb(to_intel_plane_state(old_state));
- mutex_unlock(&dev_priv->drm.struct_mutex);
-}
-
-int
-skl_max_scale(const struct intel_crtc_state *crtc_state,
- u32 pixel_format)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- int max_scale, mult;
- int crtc_clock, max_dotclk, tmpclk1, tmpclk2;
-
- if (!crtc_state->base.enable)
- return DRM_PLANE_HELPER_NO_SCALING;
-
- crtc_clock = crtc_state->base.adjusted_mode.crtc_clock;
- max_dotclk = to_intel_atomic_state(crtc_state->base.state)->cdclk.logical.cdclk;
-
- if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10)
- max_dotclk *= 2;
-
- if (WARN_ON_ONCE(!crtc_clock || max_dotclk < crtc_clock))
- return DRM_PLANE_HELPER_NO_SCALING;
-
- /*
- * skl max scale is lower of:
- * close to 3 but not 3, -1 is for that purpose
- * or
- * cdclk/crtc_clock
- */
- mult = is_planar_yuv_format(pixel_format) ? 2 : 3;
- tmpclk1 = (1 << 16) * mult - 1;
- tmpclk2 = (1 << 8) * ((max_dotclk << 8) / crtc_clock);
- max_scale = min(tmpclk1, tmpclk2);
-
- return max_scale;
-}
-
-static void intel_begin_crtc_commit(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct intel_crtc_state *old_crtc_state =
- intel_atomic_get_old_crtc_state(state, crtc);
- struct intel_crtc_state *new_crtc_state =
- intel_atomic_get_new_crtc_state(state, crtc);
- bool modeset = needs_modeset(new_crtc_state);
-
- /* Perform vblank evasion around commit operation */
- intel_pipe_update_start(new_crtc_state);
-
- if (modeset)
- goto out;
-
- if (new_crtc_state->base.color_mgmt_changed ||
- new_crtc_state->update_pipe)
- intel_color_commit(new_crtc_state);
-
- if (new_crtc_state->update_pipe)
- intel_update_pipe_config(old_crtc_state, new_crtc_state);
- else if (INTEL_GEN(dev_priv) >= 9)
- skl_detach_scalers(new_crtc_state);
-
- if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
- bdw_set_pipemisc(new_crtc_state);
-
-out:
- if (dev_priv->display.atomic_update_watermarks)
- dev_priv->display.atomic_update_watermarks(state,
- new_crtc_state);
-}
-
-void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-
- if (!IS_GEN(dev_priv, 2))
- intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
-
- if (crtc_state->has_pch_encoder) {
- enum pipe pch_transcoder =
- intel_crtc_pch_transcoder(crtc);
-
- intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true);
- }
-}
-
-static void intel_finish_crtc_commit(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
-{
- struct intel_crtc_state *old_crtc_state =
- intel_atomic_get_old_crtc_state(state, crtc);
- struct intel_crtc_state *new_crtc_state =
- intel_atomic_get_new_crtc_state(state, crtc);
-
- intel_pipe_update_end(new_crtc_state);
-
- if (new_crtc_state->update_pipe &&
- !needs_modeset(new_crtc_state) &&
- old_crtc_state->base.mode.private_flags & I915_MODE_FLAG_INHERITED)
- intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
+ intel_plane_unpin_fb(old_plane_state);
}
/**
@@ -14649,6 +15231,7 @@ static bool i965_plane_format_mod_supported(struct drm_plane *_plane,
case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_XRGB2101010:
case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_XBGR16161616F:
return modifier == DRM_FORMAT_MOD_LINEAR ||
modifier == I915_FORMAT_MOD_X_TILED;
default:
@@ -14682,8 +15265,8 @@ static const struct drm_plane_funcs i8xx_plane_funcs = {
};
static int
-intel_legacy_cursor_update(struct drm_plane *plane,
- struct drm_crtc *crtc,
+intel_legacy_cursor_update(struct drm_plane *_plane,
+ struct drm_crtc *_crtc,
struct drm_framebuffer *fb,
int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
@@ -14691,11 +15274,13 @@ intel_legacy_cursor_update(struct drm_plane *plane,
u32 src_w, u32 src_h,
struct drm_modeset_acquire_ctx *ctx)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
- struct drm_plane_state *old_plane_state, *new_plane_state;
- struct intel_plane *intel_plane = to_intel_plane(plane);
+ struct intel_plane *plane = to_intel_plane(_plane);
+ struct intel_crtc *crtc = to_intel_crtc(_crtc);
+ struct intel_plane_state *old_plane_state =
+ to_intel_plane_state(plane->base.state);
+ struct intel_plane_state *new_plane_state;
struct intel_crtc_state *crtc_state =
- to_intel_crtc_state(crtc->state);
+ to_intel_crtc_state(crtc->base.state);
struct intel_crtc_state *new_crtc_state;
int ret;
@@ -14707,14 +15292,13 @@ intel_legacy_cursor_update(struct drm_plane *plane,
crtc_state->update_pipe)
goto slow;
- old_plane_state = plane->state;
/*
* Don't do an async update if there is an outstanding commit modifying
* the plane. This prevents our async update's changes from getting
* overridden by a previous synchronous update's state.
*/
- if (old_plane_state->commit &&
- !try_wait_for_completion(&old_plane_state->commit->hw_done))
+ if (old_plane_state->base.commit &&
+ !try_wait_for_completion(&old_plane_state->base.commit->hw_done))
goto slow;
/*
@@ -14722,56 +15306,51 @@ intel_legacy_cursor_update(struct drm_plane *plane,
* take the slowpath. Only changing fb or position should be
* in the fastpath.
*/
- if (old_plane_state->crtc != crtc ||
- old_plane_state->src_w != src_w ||
- old_plane_state->src_h != src_h ||
- old_plane_state->crtc_w != crtc_w ||
- old_plane_state->crtc_h != crtc_h ||
- !old_plane_state->fb != !fb)
+ if (old_plane_state->base.crtc != &crtc->base ||
+ old_plane_state->base.src_w != src_w ||
+ old_plane_state->base.src_h != src_h ||
+ old_plane_state->base.crtc_w != crtc_w ||
+ old_plane_state->base.crtc_h != crtc_h ||
+ !old_plane_state->base.fb != !fb)
goto slow;
- new_plane_state = intel_plane_duplicate_state(plane);
+ new_plane_state = to_intel_plane_state(intel_plane_duplicate_state(&plane->base));
if (!new_plane_state)
return -ENOMEM;
- new_crtc_state = to_intel_crtc_state(intel_crtc_duplicate_state(crtc));
+ new_crtc_state = to_intel_crtc_state(intel_crtc_duplicate_state(&crtc->base));
if (!new_crtc_state) {
ret = -ENOMEM;
goto out_free;
}
- drm_atomic_set_fb_for_plane(new_plane_state, fb);
+ drm_atomic_set_fb_for_plane(&new_plane_state->base, fb);
- new_plane_state->src_x = src_x;
- new_plane_state->src_y = src_y;
- new_plane_state->src_w = src_w;
- new_plane_state->src_h = src_h;
- new_plane_state->crtc_x = crtc_x;
- new_plane_state->crtc_y = crtc_y;
- new_plane_state->crtc_w = crtc_w;
- new_plane_state->crtc_h = crtc_h;
+ new_plane_state->base.src_x = src_x;
+ new_plane_state->base.src_y = src_y;
+ new_plane_state->base.src_w = src_w;
+ new_plane_state->base.src_h = src_h;
+ new_plane_state->base.crtc_x = crtc_x;
+ new_plane_state->base.crtc_y = crtc_y;
+ new_plane_state->base.crtc_w = crtc_w;
+ new_plane_state->base.crtc_h = crtc_h;
ret = intel_plane_atomic_check_with_state(crtc_state, new_crtc_state,
- to_intel_plane_state(old_plane_state),
- to_intel_plane_state(new_plane_state));
+ old_plane_state, new_plane_state);
if (ret)
goto out_free;
- ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex);
+ ret = intel_plane_pin_fb(new_plane_state);
if (ret)
goto out_free;
- ret = intel_plane_pin_fb(to_intel_plane_state(new_plane_state));
- if (ret)
- goto out_unlock;
-
- intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_FLIP);
- intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->fb),
- to_intel_frontbuffer(fb),
- intel_plane->frontbuffer_bit);
+ intel_frontbuffer_flush(to_intel_frontbuffer(new_plane_state->base.fb), ORIGIN_FLIP);
+ intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->base.fb),
+ to_intel_frontbuffer(new_plane_state->base.fb),
+ plane->frontbuffer_bit);
/* Swap plane state */
- plane->state = new_plane_state;
+ plane->base.state = &new_plane_state->base;
/*
* We cannot swap crtc_state as it may be in use by an atomic commit or
@@ -14785,27 +15364,24 @@ intel_legacy_cursor_update(struct drm_plane *plane,
*/
crtc_state->active_planes = new_crtc_state->active_planes;
- if (plane->state->visible)
- intel_update_plane(intel_plane, crtc_state,
- to_intel_plane_state(plane->state));
+ if (new_plane_state->base.visible)
+ intel_update_plane(plane, crtc_state, new_plane_state);
else
- intel_disable_plane(intel_plane, crtc_state);
+ intel_disable_plane(plane, crtc_state);
- intel_plane_unpin_fb(to_intel_plane_state(old_plane_state));
+ intel_plane_unpin_fb(old_plane_state);
-out_unlock:
- mutex_unlock(&dev_priv->drm.struct_mutex);
out_free:
if (new_crtc_state)
- intel_crtc_destroy_state(crtc, &new_crtc_state->base);
+ intel_crtc_destroy_state(&crtc->base, &new_crtc_state->base);
if (ret)
- intel_plane_destroy_state(plane, new_plane_state);
+ intel_plane_destroy_state(&plane->base, &new_plane_state->base);
else
- intel_plane_destroy_state(plane, old_plane_state);
+ intel_plane_destroy_state(&plane->base, &old_plane_state->base);
return ret;
slow:
- return drm_atomic_helper_update_plane(plane, crtc, fb,
+ return drm_atomic_helper_update_plane(&plane->base, &crtc->base, fb,
crtc_x, crtc_y, crtc_w, crtc_h,
src_x, src_y, src_w, src_h, ctx);
}
@@ -14846,7 +15422,7 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
const u64 *modifiers;
const u32 *formats;
int num_formats;
- int ret;
+ int ret, zpos;
if (INTEL_GEN(dev_priv) >= 9)
return skl_universal_plane_create(dev_priv, pipe,
@@ -14876,8 +15452,26 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
}
if (INTEL_GEN(dev_priv) >= 4) {
- formats = i965_primary_formats;
- num_formats = ARRAY_SIZE(i965_primary_formats);
+ /*
+ * WaFP16GammaEnabling:ivb
+ * "Workaround : When using the 64-bit format, the plane
+ * output on each color channel has one quarter amplitude.
+ * It can be brought up to full amplitude by using pipe
+ * gamma correction or pipe color space conversion to
+ * multiply the plane output by four."
+ *
+ * There is no dedicated plane gamma for the primary plane,
+ * and using the pipe gamma/csc could conflict with other
+ * planes, so we choose not to expose fp16 on IVB primary
+ * planes. HSW primary planes no longer have this problem.
+ */
+ if (IS_IVYBRIDGE(dev_priv)) {
+ formats = ivb_primary_formats;
+ num_formats = ARRAY_SIZE(ivb_primary_formats);
+ } else {
+ formats = i965_primary_formats;
+ num_formats = ARRAY_SIZE(i965_primary_formats);
+ }
modifiers = i9xx_format_modifiers;
plane->max_stride = i9xx_plane_max_stride;
@@ -14886,6 +15480,15 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
plane->get_hw_state = i9xx_plane_get_hw_state;
plane->check_plane = i9xx_plane_check;
+ if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
+ plane->min_cdclk = hsw_plane_min_cdclk;
+ else if (IS_IVYBRIDGE(dev_priv))
+ plane->min_cdclk = ivb_plane_min_cdclk;
+ else if (IS_CHERRYVIEW(dev_priv) || IS_VALLEYVIEW(dev_priv))
+ plane->min_cdclk = vlv_plane_min_cdclk;
+ else
+ plane->min_cdclk = i9xx_plane_min_cdclk;
+
plane_funcs = &i965_plane_funcs;
} else {
formats = i8xx_primary_formats;
@@ -14897,6 +15500,7 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
plane->disable_plane = i9xx_disable_plane;
plane->get_hw_state = i9xx_plane_get_hw_state;
plane->check_plane = i9xx_plane_check;
+ plane->min_cdclk = i9xx_plane_min_cdclk;
plane_funcs = &i8xx_plane_funcs;
}
@@ -14935,6 +15539,9 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
DRM_MODE_ROTATE_0,
supported_rotations);
+ zpos = 0;
+ drm_plane_create_zpos_immutable_property(&plane->base, zpos);
+
drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs);
return plane;
@@ -14951,7 +15558,7 @@ intel_cursor_plane_create(struct drm_i915_private *dev_priv,
{
unsigned int possible_crtcs;
struct intel_plane *cursor;
- int ret;
+ int ret, zpos;
cursor = intel_plane_alloc();
if (IS_ERR(cursor))
@@ -15000,6 +15607,9 @@ intel_cursor_plane_create(struct drm_i915_private *dev_priv,
DRM_MODE_ROTATE_0 |
DRM_MODE_ROTATE_180);
+ zpos = RUNTIME_INFO(dev_priv)->num_sprites[pipe] + 1;
+ drm_plane_create_zpos_immutable_property(&cursor->base, zpos);
+
drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs);
return cursor;
@@ -15075,12 +15685,12 @@ static const struct drm_crtc_funcs i965_crtc_funcs = {
.disable_vblank = i965_disable_vblank,
};
-static const struct drm_crtc_funcs i945gm_crtc_funcs = {
+static const struct drm_crtc_funcs i915gm_crtc_funcs = {
INTEL_CRTC_FUNCS,
.get_vblank_counter = i915_get_vblank_counter,
- .enable_vblank = i945gm_enable_vblank,
- .disable_vblank = i945gm_disable_vblank,
+ .enable_vblank = i915gm_enable_vblank,
+ .disable_vblank = i915gm_disable_vblank,
};
static const struct drm_crtc_funcs i915_crtc_funcs = {
@@ -15151,8 +15761,8 @@ static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
funcs = &g4x_crtc_funcs;
else if (IS_GEN(dev_priv, 4))
funcs = &i965_crtc_funcs;
- else if (IS_I945GM(dev_priv))
- funcs = &i945gm_crtc_funcs;
+ else if (IS_I945GM(dev_priv) || IS_I915GM(dev_priv))
+ funcs = &i915gm_crtc_funcs;
else if (IS_GEN(dev_priv, 3))
funcs = &i915_crtc_funcs;
else
@@ -15187,8 +15797,6 @@ static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
dev_priv->plane_to_crtc_mapping[i9xx_plane] = intel_crtc;
}
- drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
-
intel_color_init(intel_crtc);
WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe);
@@ -15223,21 +15831,32 @@ int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
return 0;
}
-static int intel_encoder_clones(struct intel_encoder *encoder)
+static u32 intel_encoder_possible_clones(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
struct intel_encoder *source_encoder;
- int index_mask = 0;
- int entry = 0;
+ u32 possible_clones = 0;
for_each_intel_encoder(dev, source_encoder) {
if (encoders_cloneable(encoder, source_encoder))
- index_mask |= (1 << entry);
+ possible_clones |= drm_encoder_mask(&source_encoder->base);
+ }
- entry++;
+ return possible_clones;
+}
+
+static u32 intel_encoder_possible_crtcs(struct intel_encoder *encoder)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct intel_crtc *crtc;
+ u32 possible_crtcs = 0;
+
+ for_each_intel_crtc(dev, crtc) {
+ if (encoder->pipe_mask & BIT(crtc->pipe))
+ possible_crtcs |= drm_crtc_mask(&crtc->base);
}
- return index_mask;
+ return possible_crtcs;
}
static bool ilk_has_edp_a(struct drm_i915_private *dev_priv)
@@ -15319,13 +15938,18 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
intel_pps_init(dev_priv);
- if (!HAS_DISPLAY(dev_priv))
+ if (!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv))
return;
if (INTEL_GEN(dev_priv) >= 12) {
- /* TODO: initialize TC ports as well */
intel_ddi_init(dev_priv, PORT_A);
intel_ddi_init(dev_priv, PORT_B);
+ intel_ddi_init(dev_priv, PORT_D);
+ intel_ddi_init(dev_priv, PORT_E);
+ intel_ddi_init(dev_priv, PORT_F);
+ intel_ddi_init(dev_priv, PORT_G);
+ intel_ddi_init(dev_priv, PORT_H);
+ intel_ddi_init(dev_priv, PORT_I);
icl_dsi_init(dev_priv);
} else if (IS_ELKHARTLAKE(dev_priv)) {
intel_ddi_init(dev_priv, PORT_A);
@@ -15535,9 +16159,10 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
intel_psr_init(dev_priv);
for_each_intel_encoder(&dev_priv->drm, encoder) {
- encoder->base.possible_crtcs = encoder->crtc_mask;
+ encoder->base.possible_crtcs =
+ intel_encoder_possible_crtcs(encoder);
encoder->base.possible_clones =
- intel_encoder_clones(encoder);
+ intel_encoder_possible_clones(encoder);
}
intel_init_pch_refclk(dev_priv);
@@ -15792,8 +16417,14 @@ intel_mode_valid(struct drm_device *dev,
DRM_MODE_FLAG_CLKDIV2))
return MODE_BAD;
- if (INTEL_GEN(dev_priv) >= 9 ||
- IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
+ /* Transcoder timing limits */
+ if (INTEL_GEN(dev_priv) >= 11) {
+ hdisplay_max = 16384;
+ vdisplay_max = 8192;
+ htotal_max = 16384;
+ vtotal_max = 8192;
+ } else if (INTEL_GEN(dev_priv) >= 9 ||
+ IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */
vdisplay_max = 4096;
htotal_max = 8192;
@@ -15822,6 +16453,56 @@ intel_mode_valid(struct drm_device *dev,
mode->vtotal > vtotal_max)
return MODE_V_ILLEGAL;
+ if (INTEL_GEN(dev_priv) >= 5) {
+ if (mode->hdisplay < 64 ||
+ mode->htotal - mode->hdisplay < 32)
+ return MODE_H_ILLEGAL;
+
+ if (mode->vtotal - mode->vdisplay < 5)
+ return MODE_V_ILLEGAL;
+ } else {
+ if (mode->htotal - mode->hdisplay < 32)
+ return MODE_H_ILLEGAL;
+
+ if (mode->vtotal - mode->vdisplay < 3)
+ return MODE_V_ILLEGAL;
+ }
+
+ return MODE_OK;
+}
+
+enum drm_mode_status
+intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv,
+ const struct drm_display_mode *mode)
+{
+ int plane_width_max, plane_height_max;
+
+ /*
+ * intel_mode_valid() should be
+ * sufficient on older platforms.
+ */
+ if (INTEL_GEN(dev_priv) < 9)
+ return MODE_OK;
+
+ /*
+ * Most people will probably want a fullscreen
+ * plane so let's not advertize modes that are
+ * too big for that.
+ */
+ if (INTEL_GEN(dev_priv) >= 11) {
+ plane_width_max = 5120;
+ plane_height_max = 4320;
+ } else {
+ plane_width_max = 5120;
+ plane_height_max = 4096;
+ }
+
+ if (mode->hdisplay > plane_width_max)
+ return MODE_H_ILLEGAL;
+
+ if (mode->vdisplay > plane_height_max)
+ return MODE_V_ILLEGAL;
+
return MODE_OK;
}
@@ -15925,47 +16606,17 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv)
}
if (INTEL_GEN(dev_priv) >= 9)
- dev_priv->display.update_crtcs = skl_update_crtcs;
- else
- dev_priv->display.update_crtcs = intel_update_crtcs;
-}
-
-static i915_reg_t i915_vgacntrl_reg(struct drm_i915_private *dev_priv)
-{
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- return VLV_VGACNTRL;
- else if (INTEL_GEN(dev_priv) >= 5)
- return CPU_VGACNTRL;
+ dev_priv->display.commit_modeset_enables = skl_commit_modeset_enables;
else
- return VGACNTRL;
-}
-
-/* Disable the VGA plane that we never use */
-static void i915_disable_vga(struct drm_i915_private *dev_priv)
-{
- struct pci_dev *pdev = dev_priv->drm.pdev;
- u8 sr1;
- i915_reg_t vga_reg = i915_vgacntrl_reg(dev_priv);
+ dev_priv->display.commit_modeset_enables = intel_commit_modeset_enables;
- /* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */
- vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
- outb(SR01, VGA_SR_INDEX);
- sr1 = inb(VGA_SR_DATA);
- outb(sr1 | 1<<5, VGA_SR_DATA);
- vga_put(pdev, VGA_RSRC_LEGACY_IO);
- udelay(300);
-
- I915_WRITE(vga_reg, VGA_DISP_DISABLE);
- POSTING_READ(vga_reg);
}
-void intel_modeset_init_hw(struct drm_device *dev)
+void intel_modeset_init_hw(struct drm_i915_private *i915)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
- intel_update_cdclk(dev_priv);
- intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
- dev_priv->cdclk.logical = dev_priv->cdclk.actual = dev_priv->cdclk.hw;
+ intel_update_cdclk(i915);
+ intel_dump_cdclk_state(&i915->cdclk.hw, "Current CDCLK");
+ i915->cdclk.logical = i915->cdclk.actual = i915->cdclk.hw;
}
/*
@@ -16125,114 +16776,111 @@ out:
return ret;
}
-int intel_modeset_init(struct drm_device *dev)
+static void intel_mode_config_init(struct drm_i915_private *i915)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
- enum pipe pipe;
- struct intel_crtc *crtc;
- int ret;
+ struct drm_mode_config *mode_config = &i915->drm.mode_config;
- dev_priv->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0);
+ drm_mode_config_init(&i915->drm);
- drm_mode_config_init(dev);
+ mode_config->min_width = 0;
+ mode_config->min_height = 0;
- ret = intel_bw_init(dev_priv);
- if (ret)
- return ret;
-
- dev->mode_config.min_width = 0;
- dev->mode_config.min_height = 0;
-
- dev->mode_config.preferred_depth = 24;
- dev->mode_config.prefer_shadow = 1;
-
- dev->mode_config.allow_fb_modifiers = true;
-
- dev->mode_config.funcs = &intel_mode_funcs;
-
- init_llist_head(&dev_priv->atomic_helper.free_list);
- INIT_WORK(&dev_priv->atomic_helper.free_work,
- intel_atomic_helper_free_state_worker);
+ mode_config->preferred_depth = 24;
+ mode_config->prefer_shadow = 1;
- intel_init_quirks(dev_priv);
+ mode_config->allow_fb_modifiers = true;
- intel_fbc_init(dev_priv);
-
- intel_init_pm(dev_priv);
-
- /*
- * There may be no VBT; and if the BIOS enabled SSC we can
- * just keep using it to avoid unnecessary flicker. Whereas if the
- * BIOS isn't using it, don't assume it will work even if the VBT
- * indicates as much.
- */
- if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
- bool bios_lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) &
- DREF_SSC1_ENABLE);
-
- if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
- DRM_DEBUG_KMS("SSC %sabled by BIOS, overriding VBT which says %sabled\n",
- bios_lvds_use_ssc ? "en" : "dis",
- dev_priv->vbt.lvds_use_ssc ? "en" : "dis");
- dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
- }
- }
+ mode_config->funcs = &intel_mode_funcs;
/*
* Maximum framebuffer dimensions, chosen to match
* the maximum render engine surface size on gen4+.
*/
- if (INTEL_GEN(dev_priv) >= 7) {
- dev->mode_config.max_width = 16384;
- dev->mode_config.max_height = 16384;
- } else if (INTEL_GEN(dev_priv) >= 4) {
- dev->mode_config.max_width = 8192;
- dev->mode_config.max_height = 8192;
- } else if (IS_GEN(dev_priv, 3)) {
- dev->mode_config.max_width = 4096;
- dev->mode_config.max_height = 4096;
+ if (INTEL_GEN(i915) >= 7) {
+ mode_config->max_width = 16384;
+ mode_config->max_height = 16384;
+ } else if (INTEL_GEN(i915) >= 4) {
+ mode_config->max_width = 8192;
+ mode_config->max_height = 8192;
+ } else if (IS_GEN(i915, 3)) {
+ mode_config->max_width = 4096;
+ mode_config->max_height = 4096;
} else {
- dev->mode_config.max_width = 2048;
- dev->mode_config.max_height = 2048;
+ mode_config->max_width = 2048;
+ mode_config->max_height = 2048;
}
- if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
- dev->mode_config.cursor_width = IS_I845G(dev_priv) ? 64 : 512;
- dev->mode_config.cursor_height = 1023;
- } else if (IS_GEN(dev_priv, 2)) {
- dev->mode_config.cursor_width = 64;
- dev->mode_config.cursor_height = 64;
+ if (IS_I845G(i915) || IS_I865G(i915)) {
+ mode_config->cursor_width = IS_I845G(i915) ? 64 : 512;
+ mode_config->cursor_height = 1023;
+ } else if (IS_GEN(i915, 2)) {
+ mode_config->cursor_width = 64;
+ mode_config->cursor_height = 64;
} else {
- dev->mode_config.cursor_width = 256;
- dev->mode_config.cursor_height = 256;
+ mode_config->cursor_width = 256;
+ mode_config->cursor_height = 256;
}
+}
- DRM_DEBUG_KMS("%d display pipe%s available.\n",
- INTEL_INFO(dev_priv)->num_pipes,
- INTEL_INFO(dev_priv)->num_pipes > 1 ? "s" : "");
+int intel_modeset_init(struct drm_i915_private *i915)
+{
+ struct drm_device *dev = &i915->drm;
+ enum pipe pipe;
+ struct intel_crtc *crtc;
+ int ret;
- for_each_pipe(dev_priv, pipe) {
- ret = intel_crtc_init(dev_priv, pipe);
- if (ret) {
- drm_mode_config_cleanup(dev);
- return ret;
+ i915->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0);
+ i915->flip_wq = alloc_workqueue("i915_flip", WQ_HIGHPRI |
+ WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE);
+
+ intel_mode_config_init(i915);
+
+ ret = intel_bw_init(i915);
+ if (ret)
+ return ret;
+
+ init_llist_head(&i915->atomic_helper.free_list);
+ INIT_WORK(&i915->atomic_helper.free_work,
+ intel_atomic_helper_free_state_worker);
+
+ intel_init_quirks(i915);
+
+ intel_fbc_init(i915);
+
+ intel_init_pm(i915);
+
+ intel_panel_sanitize_ssc(i915);
+
+ intel_gmbus_setup(i915);
+
+ DRM_DEBUG_KMS("%d display pipe%s available.\n",
+ INTEL_NUM_PIPES(i915),
+ INTEL_NUM_PIPES(i915) > 1 ? "s" : "");
+
+ if (HAS_DISPLAY(i915) && INTEL_DISPLAY_ENABLED(i915)) {
+ for_each_pipe(i915, pipe) {
+ ret = intel_crtc_init(i915, pipe);
+ if (ret) {
+ drm_mode_config_cleanup(dev);
+ return ret;
+ }
}
}
intel_shared_dpll_init(dev);
- intel_update_fdi_pll_freq(dev_priv);
+ intel_update_fdi_pll_freq(i915);
- intel_update_czclk(dev_priv);
- intel_modeset_init_hw(dev);
+ intel_update_czclk(i915);
+ intel_modeset_init_hw(i915);
- intel_hdcp_component_init(dev_priv);
+ intel_hdcp_component_init(i915);
- if (dev_priv->max_cdclk_freq == 0)
- intel_update_max_cdclk(dev_priv);
+ if (i915->max_cdclk_freq == 0)
+ intel_update_max_cdclk(i915);
/* Just disable it once at startup */
- i915_disable_vga(dev_priv);
- intel_setup_outputs(dev_priv);
+ intel_vga_disable(i915);
+ intel_setup_outputs(i915);
drm_modeset_lock_all(dev);
intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx);
@@ -16251,8 +16899,7 @@ int intel_modeset_init(struct drm_device *dev)
* can even allow for smooth boot transitions if the BIOS
* fb is large enough for the active pipe configuration.
*/
- dev_priv->display.get_initial_plane_config(crtc,
- &plane_config);
+ i915->display.get_initial_plane_config(crtc, &plane_config);
/*
* If the fb is shared between multiple heads, we'll
@@ -16266,7 +16913,7 @@ int intel_modeset_init(struct drm_device *dev)
* Note that we need to do this after reconstructing the BIOS fb's
* since the watermark calculation done here will use pstate->fb.
*/
- if (!HAS_GMCH(dev_priv))
+ if (!HAS_GMCH(i915))
sanitize_watermarks(dev);
/*
@@ -16591,39 +17238,6 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
icl_sanitize_encoder_pll_mapping(encoder);
}
-void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv)
-{
- i915_reg_t vga_reg = i915_vgacntrl_reg(dev_priv);
-
- if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
- DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
- i915_disable_vga(dev_priv);
- }
-}
-
-void i915_redisable_vga(struct drm_i915_private *dev_priv)
-{
- intel_wakeref_t wakeref;
-
- /*
- * This function can be called both from intel_modeset_setup_hw_state or
- * at a very early point in our resume sequence, where the power well
- * structures are not yet restored. Since this function is at a very
- * paranoid "someone might have enabled VGA while we were not looking"
- * level, just check if the power well is enabled instead of trying to
- * follow the "don't touch the power well if we don't need it" policy
- * the rest of the driver uses.
- */
- wakeref = intel_display_power_get_if_enabled(dev_priv,
- POWER_DOMAIN_VGA);
- if (!wakeref)
- return;
-
- i915_redisable_vga_power_on(dev_priv);
-
- intel_display_power_put(dev_priv, POWER_DOMAIN_VGA, wakeref);
-}
-
/* FIXME read out full plane state for all planes */
static void readout_plane_state(struct drm_i915_private *dev_priv)
{
@@ -16667,7 +17281,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
struct drm_connector_list_iter conn_iter;
int i;
- dev_priv->active_crtcs = 0;
+ dev_priv->active_pipes = 0;
for_each_intel_crtc(dev, crtc) {
struct intel_crtc_state *crtc_state =
@@ -16684,7 +17298,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
crtc->active = crtc_state->base.active;
if (crtc_state->base.active)
- dev_priv->active_crtcs |= 1 << crtc->pipe;
+ dev_priv->active_pipes |= BIT(crtc->pipe);
DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n",
crtc->base.base.id, crtc->base.name,
@@ -16744,24 +17358,28 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
drm_connector_list_iter_begin(dev, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter) {
if (connector->get_hw_state(connector)) {
+ struct intel_crtc_state *crtc_state;
+ struct intel_crtc *crtc;
+
connector->base.dpms = DRM_MODE_DPMS_ON;
encoder = connector->encoder;
connector->base.encoder = &encoder->base;
- if (encoder->base.crtc &&
- encoder->base.crtc->state->active) {
+ crtc = to_intel_crtc(encoder->base.crtc);
+ crtc_state = crtc ? to_intel_crtc_state(crtc->base.state) : NULL;
+
+ if (crtc_state && crtc_state->base.active) {
/*
* This has to be done during hardware readout
* because anything calling .crtc_disable may
* rely on the connector_mask being accurate.
*/
- encoder->base.crtc->state->connector_mask |=
+ crtc_state->base.connector_mask |=
drm_connector_mask(&connector->base);
- encoder->base.crtc->state->encoder_mask |=
+ crtc_state->base.encoder_mask |=
drm_encoder_mask(&encoder->base);
}
-
} else {
connector->base.dpms = DRM_MODE_DPMS_OFF;
connector->base.encoder = NULL;
@@ -16780,13 +17398,16 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
struct intel_plane *plane;
int min_cdclk = 0;
- memset(&crtc->base.mode, 0, sizeof(crtc->base.mode));
if (crtc_state->base.active) {
- intel_mode_from_pipe_config(&crtc->base.mode, crtc_state);
- crtc->base.mode.hdisplay = crtc_state->pipe_src_w;
- crtc->base.mode.vdisplay = crtc_state->pipe_src_h;
- intel_mode_from_pipe_config(&crtc_state->base.adjusted_mode, crtc_state);
- WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, &crtc->base.mode));
+ struct drm_display_mode mode;
+
+ intel_mode_from_pipe_config(&crtc_state->base.adjusted_mode,
+ crtc_state);
+
+ mode = crtc_state->base.adjusted_mode;
+ mode.hdisplay = crtc_state->pipe_src_w;
+ mode.vdisplay = crtc_state->pipe_src_h;
+ WARN_ON(drm_atomic_set_mode_for_crtc(&crtc_state->base, &mode));
/*
* The initial mode needs to be set in order to keep
@@ -16801,21 +17422,9 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
intel_crtc_compute_pixel_rate(crtc_state);
- if (dev_priv->display.modeset_calc_cdclk) {
- min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
- if (WARN_ON(min_cdclk < 0))
- min_cdclk = 0;
- }
-
- drm_calc_timestamping_constants(&crtc->base,
- &crtc_state->base.adjusted_mode);
- update_scanline_offset(crtc_state);
+ intel_crtc_update_active_timings(crtc_state);
}
- dev_priv->min_cdclk[crtc->pipe] = min_cdclk;
- dev_priv->min_voltage_level[crtc->pipe] =
- crtc_state->min_voltage_level;
-
for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
const struct intel_plane_state *plane_state =
to_intel_plane_state(plane->base.state);
@@ -16827,8 +17436,34 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
if (plane_state->base.visible)
crtc_state->data_rate[plane->id] =
4 * crtc_state->pixel_rate;
+ /*
+ * FIXME don't have the fb yet, so can't
+ * use plane->min_cdclk() :(
+ */
+ if (plane_state->base.visible && plane->min_cdclk) {
+ if (crtc_state->double_wide ||
+ INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+ crtc_state->min_cdclk[plane->id] =
+ DIV_ROUND_UP(crtc_state->pixel_rate, 2);
+ else
+ crtc_state->min_cdclk[plane->id] =
+ crtc_state->pixel_rate;
+ }
+ DRM_DEBUG_KMS("[PLANE:%d:%s] min_cdclk %d kHz\n",
+ plane->base.base.id, plane->base.name,
+ crtc_state->min_cdclk[plane->id]);
+ }
+
+ if (crtc_state->base.active) {
+ min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
+ if (WARN_ON(min_cdclk < 0))
+ min_cdclk = 0;
}
+ dev_priv->min_cdclk[crtc->pipe] = min_cdclk;
+ dev_priv->min_voltage_level[crtc->pipe] =
+ crtc_state->min_voltage_level;
+
intel_bw_crtc_update(bw_state, crtc_state);
intel_pipe_config_sanity_check(dev_priv, crtc_state);
@@ -17069,13 +17704,13 @@ void intel_display_resume(struct drm_device *dev)
drm_atomic_state_put(state);
}
-static void intel_hpd_poll_fini(struct drm_device *dev)
+static void intel_hpd_poll_fini(struct drm_i915_private *i915)
{
struct intel_connector *connector;
struct drm_connector_list_iter conn_iter;
/* Kill all the work that may have been queued by hpd. */
- drm_connector_list_iter_begin(dev, &conn_iter);
+ drm_connector_list_iter_begin(&i915->drm, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter) {
if (connector->modeset_retry_work.func)
cancel_work_sync(&connector->modeset_retry_work);
@@ -17087,78 +17722,49 @@ static void intel_hpd_poll_fini(struct drm_device *dev)
drm_connector_list_iter_end(&conn_iter);
}
-void intel_modeset_driver_remove(struct drm_device *dev)
+void intel_modeset_driver_remove(struct drm_i915_private *i915)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
+ flush_workqueue(i915->flip_wq);
+ flush_workqueue(i915->modeset_wq);
- flush_workqueue(dev_priv->modeset_wq);
-
- flush_work(&dev_priv->atomic_helper.free_work);
- WARN_ON(!llist_empty(&dev_priv->atomic_helper.free_list));
+ flush_work(&i915->atomic_helper.free_work);
+ WARN_ON(!llist_empty(&i915->atomic_helper.free_list));
/*
* Interrupts and polling as the first thing to avoid creating havoc.
* Too much stuff here (turning of connectors, ...) would
* experience fancy races otherwise.
*/
- intel_irq_uninstall(dev_priv);
+ intel_irq_uninstall(i915);
/*
* Due to the hpd irq storm handling the hotplug work can re-arm the
* poll handlers. Hence disable polling after hpd handling is shut down.
*/
- intel_hpd_poll_fini(dev);
+ intel_hpd_poll_fini(i915);
/* poll work can call into fbdev, hence clean that up afterwards */
- intel_fbdev_fini(dev_priv);
+ intel_fbdev_fini(i915);
intel_unregister_dsm_handler();
- intel_fbc_global_disable(dev_priv);
+ intel_fbc_global_disable(i915);
/* flush any delayed tasks or pending work */
flush_scheduled_work();
- intel_hdcp_component_fini(dev_priv);
-
- drm_mode_config_cleanup(dev);
+ intel_hdcp_component_fini(i915);
- intel_overlay_cleanup(dev_priv);
+ drm_mode_config_cleanup(&i915->drm);
- intel_gmbus_teardown(dev_priv);
+ intel_overlay_cleanup(i915);
- destroy_workqueue(dev_priv->modeset_wq);
+ intel_gmbus_teardown(i915);
- intel_fbc_cleanup_cfb(dev_priv);
-}
+ destroy_workqueue(i915->flip_wq);
+ destroy_workqueue(i915->modeset_wq);
-/*
- * set vga decode state - true == enable VGA decode
- */
-int intel_modeset_vga_set_state(struct drm_i915_private *dev_priv, bool state)
-{
- unsigned reg = INTEL_GEN(dev_priv) >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
- u16 gmch_ctrl;
-
- if (pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl)) {
- DRM_ERROR("failed to read control word\n");
- return -EIO;
- }
-
- if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !state)
- return 0;
-
- if (state)
- gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
- else
- gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
-
- if (pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl)) {
- DRM_ERROR("failed to write control word\n");
- return -EIO;
- }
-
- return 0;
+ intel_fbc_cleanup_cfb(i915);
}
#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
@@ -17221,7 +17827,7 @@ intel_display_capture_error_state(struct drm_i915_private *dev_priv)
BUILD_BUG_ON(ARRAY_SIZE(transcoders) != ARRAY_SIZE(error->transcoder));
- if (!HAS_DISPLAY(dev_priv))
+ if (!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv))
return NULL;
error = kzalloc(sizeof(*error), GFP_ATOMIC);
@@ -17300,7 +17906,7 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m,
if (!error)
return;
- err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev_priv)->num_pipes);
+ err_printf(m, "Num Pipes: %d\n", INTEL_NUM_PIPES(dev_priv));
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
err_printf(m, "PWR_WELL_CTL2: %08x\n",
error->power_well_driver);
diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h
index 01fa87ad3270..f417e0948001 100644
--- a/drivers/gpu/drm/i915/display/intel_display.h
+++ b/drivers/gpu/drm/i915/display/intel_display.h
@@ -1,5 +1,5 @@
/*
- * Copyright © 2006-2017 Intel Corporation
+ * Copyright © 2006-2019 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -32,8 +32,10 @@ enum link_m_n_set;
struct dpll;
struct drm_connector;
struct drm_device;
+struct drm_display_mode;
struct drm_encoder;
struct drm_file;
+struct drm_format_info;
struct drm_framebuffer;
struct drm_i915_error_state_buf;
struct drm_i915_gem_object;
@@ -52,6 +54,7 @@ struct intel_plane;
struct intel_plane_state;
struct intel_remapped_info;
struct intel_rotation_info;
+struct intel_crtc_state;
enum i915_gpio {
GPIOA,
@@ -91,6 +94,7 @@ enum pipe {
#define pipe_name(p) ((p) + 'A')
enum transcoder {
+ INVALID_TRANSCODER = -1,
/*
* The following transcoders have a 1:1 transcoder -> pipe mapping,
* keep their values fixed: the code assumes that TRANSCODER_A=0, the
@@ -182,6 +186,24 @@ enum plane_id {
for ((__p) = PLANE_PRIMARY; (__p) < I915_MAX_PLANES; (__p)++) \
for_each_if((__crtc)->plane_ids_mask & BIT(__p))
+enum port {
+ PORT_NONE = -1,
+
+ PORT_A = 0,
+ PORT_B,
+ PORT_C,
+ PORT_D,
+ PORT_E,
+ PORT_F,
+ PORT_G,
+ PORT_H,
+ PORT_I,
+
+ I915_MAX_PORTS
+};
+
+#define port_name(p) ((p) + 'A')
+
/*
* Ports identifier referenced from other drivers.
* Expected to remain stable over time
@@ -251,6 +273,7 @@ enum aux_ch {
AUX_CH_D,
AUX_CH_E, /* ICL+ */
AUX_CH_F,
+ AUX_CH_G,
};
#define aux_ch_name(a) ((a) + 'A')
@@ -289,10 +312,10 @@ enum phy_fia {
};
#define for_each_pipe(__dev_priv, __p) \
- for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++)
+ for ((__p) = 0; (__p) < INTEL_NUM_PIPES(__dev_priv); (__p)++)
#define for_each_pipe_masked(__dev_priv, __p, __mask) \
- for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++) \
+ for ((__p) = 0; (__p) < INTEL_NUM_PIPES(__dev_priv); (__p)++) \
for_each_if((__mask) & BIT(__p))
#define for_each_cpu_transcoder_masked(__dev_priv, __t, __mask) \
@@ -330,7 +353,7 @@ enum phy_fia {
&(dev)->mode_config.plane_list, \
base.head) \
for_each_if((plane_mask) & \
- drm_plane_mask(&intel_plane->base)))
+ drm_plane_mask(&intel_plane->base))
#define for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) \
list_for_each_entry(intel_plane, \
@@ -411,6 +434,23 @@ enum phy_fia {
(__i)++) \
for_each_if(crtc)
+#define for_each_oldnew_intel_crtc_in_state_reverse(__state, crtc, old_crtc_state, new_crtc_state, __i) \
+ for ((__i) = (__state)->base.dev->mode_config.num_crtc - 1; \
+ (__i) >= 0 && \
+ ((crtc) = to_intel_crtc((__state)->base.crtcs[__i].ptr), \
+ (old_crtc_state) = to_intel_crtc_state((__state)->base.crtcs[__i].old_state), \
+ (new_crtc_state) = to_intel_crtc_state((__state)->base.crtcs[__i].new_state), 1); \
+ (__i)--) \
+ for_each_if(crtc)
+
+#define intel_atomic_crtc_state_for_each_plane_state( \
+ plane, plane_state, \
+ crtc_state) \
+ for_each_intel_plane_mask(((crtc_state)->base.state->dev), (plane), \
+ ((crtc_state)->base.plane_mask)) \
+ for_each_if ((plane_state = \
+ to_intel_plane_state(__drm_atomic_get_current_plane_state((crtc_state)->base.state, &plane->base))))
+
void intel_link_compute_m_n(u16 bpp, int nlanes,
int pixel_clock, int link_clock,
struct intel_link_m_n *m_n,
@@ -420,7 +460,11 @@ void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv);
u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
u32 pixel_format, u64 modifier);
bool intel_plane_can_remap(const struct intel_plane_state *plane_state);
+enum drm_mode_status
+intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv,
+ const struct drm_display_mode *mode);
enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port);
+bool is_trans_port_sync_mode(const struct intel_crtc_state *state);
void intel_plane_destroy(struct drm_plane *plane);
void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe);
@@ -464,7 +508,6 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
struct intel_digital_port *dport,
unsigned int expected_mask);
int intel_get_load_detect_pipe(struct drm_connector *connector,
- const struct drm_display_mode *mode,
struct intel_load_detect_pipe *old,
struct drm_modeset_acquire_ctx *ctx);
void intel_release_load_detect_pipe(struct drm_connector *connector,
@@ -499,8 +542,6 @@ void intel_dp_get_m_n(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config);
void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state,
enum link_m_n_set m_n);
-void intel_dp_ycbcr_420_enable(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state);
int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n);
bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state,
struct dpll *best_clock);
@@ -520,8 +561,6 @@ void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_center);
int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state);
-int skl_max_scale(const struct intel_crtc_state *crtc_state,
- u32 pixel_format);
u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state);
u32 glk_plane_color_ctl_crtc(const struct intel_crtc_state *crtc_state);
@@ -544,13 +583,10 @@ void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
struct intel_display_error_state *error);
/* modesetting */
-void intel_modeset_init_hw(struct drm_device *dev);
-int intel_modeset_init(struct drm_device *dev);
-void intel_modeset_driver_remove(struct drm_device *dev);
-int intel_modeset_vga_set_state(struct drm_i915_private *dev_priv, bool state);
+void intel_modeset_init_hw(struct drm_i915_private *i915);
+int intel_modeset_init(struct drm_i915_private *i915);
+void intel_modeset_driver_remove(struct drm_i915_private *i915);
void intel_display_resume(struct drm_device *dev);
-void i915_redisable_vga(struct drm_i915_private *dev_priv);
-void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv);
void intel_init_pch_refclk(struct drm_i915_private *dev_priv);
/* modesetting asserts */
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
index c002f234ff31..ce1b64f4dd44 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power.c
@@ -3,8 +3,6 @@
* Copyright © 2019 Intel Corporation
*/
-#include <linux/vgaarb.h>
-
#include "display/intel_crt.h"
#include "display/intel_dp.h"
@@ -19,16 +17,14 @@
#include "intel_hotplug.h"
#include "intel_sideband.h"
#include "intel_tc.h"
+#include "intel_vga.h"
bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
enum i915_power_well_id power_well_id);
const char *
-intel_display_power_domain_str(struct drm_i915_private *i915,
- enum intel_display_power_domain domain)
+intel_display_power_domain_str(enum intel_display_power_domain domain)
{
- bool ddi_tc_ports = IS_GEN(i915, 12);
-
switch (domain) {
case POWER_DOMAIN_DISPLAY_CORE:
return "DISPLAY_CORE";
@@ -71,23 +67,17 @@ intel_display_power_domain_str(struct drm_i915_private *i915,
case POWER_DOMAIN_PORT_DDI_C_LANES:
return "PORT_DDI_C_LANES";
case POWER_DOMAIN_PORT_DDI_D_LANES:
- BUILD_BUG_ON(POWER_DOMAIN_PORT_DDI_D_LANES !=
- POWER_DOMAIN_PORT_DDI_TC1_LANES);
- return ddi_tc_ports ? "PORT_DDI_TC1_LANES" : "PORT_DDI_D_LANES";
+ return "PORT_DDI_D_LANES";
case POWER_DOMAIN_PORT_DDI_E_LANES:
- BUILD_BUG_ON(POWER_DOMAIN_PORT_DDI_E_LANES !=
- POWER_DOMAIN_PORT_DDI_TC2_LANES);
- return ddi_tc_ports ? "PORT_DDI_TC2_LANES" : "PORT_DDI_E_LANES";
+ return "PORT_DDI_E_LANES";
case POWER_DOMAIN_PORT_DDI_F_LANES:
- BUILD_BUG_ON(POWER_DOMAIN_PORT_DDI_F_LANES !=
- POWER_DOMAIN_PORT_DDI_TC3_LANES);
- return ddi_tc_ports ? "PORT_DDI_TC3_LANES" : "PORT_DDI_F_LANES";
- case POWER_DOMAIN_PORT_DDI_TC4_LANES:
- return "PORT_DDI_TC4_LANES";
- case POWER_DOMAIN_PORT_DDI_TC5_LANES:
- return "PORT_DDI_TC5_LANES";
- case POWER_DOMAIN_PORT_DDI_TC6_LANES:
- return "PORT_DDI_TC6_LANES";
+ return "PORT_DDI_F_LANES";
+ case POWER_DOMAIN_PORT_DDI_G_LANES:
+ return "PORT_DDI_G_LANES";
+ case POWER_DOMAIN_PORT_DDI_H_LANES:
+ return "PORT_DDI_H_LANES";
+ case POWER_DOMAIN_PORT_DDI_I_LANES:
+ return "PORT_DDI_I_LANES";
case POWER_DOMAIN_PORT_DDI_A_IO:
return "PORT_DDI_A_IO";
case POWER_DOMAIN_PORT_DDI_B_IO:
@@ -95,23 +85,17 @@ intel_display_power_domain_str(struct drm_i915_private *i915,
case POWER_DOMAIN_PORT_DDI_C_IO:
return "PORT_DDI_C_IO";
case POWER_DOMAIN_PORT_DDI_D_IO:
- BUILD_BUG_ON(POWER_DOMAIN_PORT_DDI_D_IO !=
- POWER_DOMAIN_PORT_DDI_TC1_IO);
- return ddi_tc_ports ? "PORT_DDI_TC1_IO" : "PORT_DDI_D_IO";
+ return "PORT_DDI_D_IO";
case POWER_DOMAIN_PORT_DDI_E_IO:
- BUILD_BUG_ON(POWER_DOMAIN_PORT_DDI_E_IO !=
- POWER_DOMAIN_PORT_DDI_TC2_IO);
- return ddi_tc_ports ? "PORT_DDI_TC2_IO" : "PORT_DDI_E_IO";
+ return "PORT_DDI_E_IO";
case POWER_DOMAIN_PORT_DDI_F_IO:
- BUILD_BUG_ON(POWER_DOMAIN_PORT_DDI_F_IO !=
- POWER_DOMAIN_PORT_DDI_TC3_IO);
- return ddi_tc_ports ? "PORT_DDI_TC3_IO" : "PORT_DDI_F_IO";
- case POWER_DOMAIN_PORT_DDI_TC4_IO:
- return "PORT_DDI_TC4_IO";
- case POWER_DOMAIN_PORT_DDI_TC5_IO:
- return "PORT_DDI_TC5_IO";
- case POWER_DOMAIN_PORT_DDI_TC6_IO:
- return "PORT_DDI_TC6_IO";
+ return "PORT_DDI_F_IO";
+ case POWER_DOMAIN_PORT_DDI_G_IO:
+ return "PORT_DDI_G_IO";
+ case POWER_DOMAIN_PORT_DDI_H_IO:
+ return "PORT_DDI_H_IO";
+ case POWER_DOMAIN_PORT_DDI_I_IO:
+ return "PORT_DDI_I_IO";
case POWER_DOMAIN_PORT_DSI:
return "PORT_DSI";
case POWER_DOMAIN_PORT_CRT:
@@ -129,34 +113,33 @@ intel_display_power_domain_str(struct drm_i915_private *i915,
case POWER_DOMAIN_AUX_C:
return "AUX_C";
case POWER_DOMAIN_AUX_D:
- BUILD_BUG_ON(POWER_DOMAIN_AUX_D != POWER_DOMAIN_AUX_TC1);
- return ddi_tc_ports ? "AUX_TC1" : "AUX_D";
+ return "AUX_D";
case POWER_DOMAIN_AUX_E:
- BUILD_BUG_ON(POWER_DOMAIN_AUX_E != POWER_DOMAIN_AUX_TC2);
- return ddi_tc_ports ? "AUX_TC2" : "AUX_E";
+ return "AUX_E";
case POWER_DOMAIN_AUX_F:
- BUILD_BUG_ON(POWER_DOMAIN_AUX_F != POWER_DOMAIN_AUX_TC3);
- return ddi_tc_ports ? "AUX_TC3" : "AUX_F";
- case POWER_DOMAIN_AUX_TC4:
- return "AUX_TC4";
- case POWER_DOMAIN_AUX_TC5:
- return "AUX_TC5";
- case POWER_DOMAIN_AUX_TC6:
- return "AUX_TC6";
+ return "AUX_F";
+ case POWER_DOMAIN_AUX_G:
+ return "AUX_G";
+ case POWER_DOMAIN_AUX_H:
+ return "AUX_H";
+ case POWER_DOMAIN_AUX_I:
+ return "AUX_I";
case POWER_DOMAIN_AUX_IO_A:
return "AUX_IO_A";
- case POWER_DOMAIN_AUX_TBT1:
- return "AUX_TBT1";
- case POWER_DOMAIN_AUX_TBT2:
- return "AUX_TBT2";
- case POWER_DOMAIN_AUX_TBT3:
- return "AUX_TBT3";
- case POWER_DOMAIN_AUX_TBT4:
- return "AUX_TBT4";
- case POWER_DOMAIN_AUX_TBT5:
- return "AUX_TBT5";
- case POWER_DOMAIN_AUX_TBT6:
- return "AUX_TBT6";
+ case POWER_DOMAIN_AUX_C_TBT:
+ return "AUX_C_TBT";
+ case POWER_DOMAIN_AUX_D_TBT:
+ return "AUX_D_TBT";
+ case POWER_DOMAIN_AUX_E_TBT:
+ return "AUX_E_TBT";
+ case POWER_DOMAIN_AUX_F_TBT:
+ return "AUX_F_TBT";
+ case POWER_DOMAIN_AUX_G_TBT:
+ return "AUX_G_TBT";
+ case POWER_DOMAIN_AUX_H_TBT:
+ return "AUX_H_TBT";
+ case POWER_DOMAIN_AUX_I_TBT:
+ return "AUX_I_TBT";
case POWER_DOMAIN_GMBUS:
return "GMBUS";
case POWER_DOMAIN_INIT:
@@ -283,23 +266,8 @@ bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv,
u8 irq_pipe_mask, bool has_vga)
{
- struct pci_dev *pdev = dev_priv->drm.pdev;
-
- /*
- * After we re-enable the power well, if we touch VGA register 0x3d5
- * we'll get unclaimed register interrupts. This stops after we write
- * anything to the VGA MSR register. The vgacon module uses this
- * register all the time, so if we unbind our driver and, as a
- * consequence, bind vgacon, we'll get stuck in an infinite loop at
- * console_unlock(). So make here we touch the VGA MSR register, making
- * sure vgacon can keep working normally without triggering interrupts
- * and error messages.
- */
- if (has_vga) {
- vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
- outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
- vga_put(pdev, VGA_RSRC_LEGACY_IO);
- }
+ if (has_vga)
+ intel_vga_reset_io_mem(dev_priv);
if (irq_pipe_mask)
gen8_irq_power_well_post_enable(dev_priv, irq_pipe_mask);
@@ -578,6 +546,8 @@ static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv,
#endif
+#define TGL_AUX_PW_TO_TC_PORT(pw_idx) ((pw_idx) - TGL_PW_CTL_IDX_AUX_TC1)
+
static void
icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
@@ -594,6 +564,17 @@ icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
I915_WRITE(DP_AUX_CH_CTL(aux_ch), val);
hsw_power_well_enable(dev_priv, power_well);
+
+ if (INTEL_GEN(dev_priv) >= 12 && !power_well->desc->hsw.is_tc_tbt) {
+ enum tc_port tc_port;
+
+ tc_port = TGL_AUX_PW_TO_TC_PORT(power_well->desc->hsw.idx);
+ I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, 0x2));
+
+ if (intel_de_wait_for_set(dev_priv, DKL_CMN_UC_DW_27(tc_port),
+ DKL_CMN_UC_DW27_UC_HEALTH, 1))
+ DRM_WARN("Timeout waiting TC uC health\n");
+ }
}
static void
@@ -714,7 +695,11 @@ static u32 gen9_dc_mask(struct drm_i915_private *dev_priv)
u32 mask;
mask = DC_STATE_EN_UPTO_DC5;
- if (INTEL_GEN(dev_priv) >= 11)
+
+ if (INTEL_GEN(dev_priv) >= 12)
+ mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6
+ | DC_STATE_EN_DC9;
+ else if (IS_GEN(dev_priv, 11))
mask |= DC_STATE_EN_UPTO_DC6 | DC_STATE_EN_DC9;
else if (IS_GEN9_LP(dev_priv))
mask |= DC_STATE_EN_DC9;
@@ -784,6 +769,52 @@ static void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state)
dev_priv->csr.dc_state = val & mask;
}
+static u32
+sanitize_target_dc_state(struct drm_i915_private *dev_priv,
+ u32 target_dc_state)
+{
+ u32 states[] = {
+ DC_STATE_EN_UPTO_DC6,
+ DC_STATE_EN_UPTO_DC5,
+ DC_STATE_EN_DC3CO,
+ DC_STATE_DISABLE,
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(states) - 1; i++) {
+ if (target_dc_state != states[i])
+ continue;
+
+ if (dev_priv->csr.allowed_dc_mask & target_dc_state)
+ break;
+
+ target_dc_state = states[i + 1];
+ }
+
+ return target_dc_state;
+}
+
+static void tgl_enable_dc3co(struct drm_i915_private *dev_priv)
+{
+ DRM_DEBUG_KMS("Enabling DC3CO\n");
+ gen9_set_dc_state(dev_priv, DC_STATE_EN_DC3CO);
+}
+
+static void tgl_disable_dc3co(struct drm_i915_private *dev_priv)
+{
+ u32 val;
+
+ DRM_DEBUG_KMS("Disabling DC3CO\n");
+ val = I915_READ(DC_STATE_EN);
+ val &= ~DC_STATE_DC3CO_STATUS;
+ I915_WRITE(DC_STATE_EN, val);
+ gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
+ /*
+ * Delay of 200us DC3CO Exit time B.Spec 49196
+ */
+ usleep_range(200, 210);
+}
+
static void bxt_enable_dc9(struct drm_i915_private *dev_priv)
{
assert_can_enable_dc9(dev_priv);
@@ -839,6 +870,51 @@ lookup_power_well(struct drm_i915_private *dev_priv,
return &dev_priv->power_domains.power_wells[0];
}
+/**
+ * intel_display_power_set_target_dc_state - Set target dc state.
+ * @dev_priv: i915 device
+ * @state: state which needs to be set as target_dc_state.
+ *
+ * This function set the "DC off" power well target_dc_state,
+ * based upon this target_dc_stste, "DC off" power well will
+ * enable desired DC state.
+ */
+void intel_display_power_set_target_dc_state(struct drm_i915_private *dev_priv,
+ u32 state)
+{
+ struct i915_power_well *power_well;
+ bool dc_off_enabled;
+ struct i915_power_domains *power_domains = &dev_priv->power_domains;
+
+ mutex_lock(&power_domains->lock);
+ power_well = lookup_power_well(dev_priv, SKL_DISP_DC_OFF);
+
+ if (WARN_ON(!power_well))
+ goto unlock;
+
+ state = sanitize_target_dc_state(dev_priv, state);
+
+ if (state == dev_priv->csr.target_dc_state)
+ goto unlock;
+
+ dc_off_enabled = power_well->desc->ops->is_enabled(dev_priv,
+ power_well);
+ /*
+ * If DC off power well is disabled, need to enable and disable the
+ * DC off power well to effect target DC state.
+ */
+ if (!dc_off_enabled)
+ power_well->desc->ops->enable(dev_priv, power_well);
+
+ dev_priv->csr.target_dc_state = state;
+
+ if (!dc_off_enabled)
+ power_well->desc->ops->disable(dev_priv, power_well);
+
+unlock:
+ mutex_unlock(&power_domains->lock);
+}
+
static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
{
bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv,
@@ -951,7 +1027,8 @@ static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv)
static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- return (I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0;
+ return ((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC3CO) == 0 &&
+ (I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0);
}
static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv)
@@ -967,6 +1044,11 @@ static void gen9_disable_dc_states(struct drm_i915_private *dev_priv)
{
struct intel_cdclk_state cdclk_state = {};
+ if (dev_priv->csr.target_dc_state == DC_STATE_EN_DC3CO) {
+ tgl_disable_dc3co(dev_priv);
+ return;
+ }
+
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
dev_priv->display.get_cdclk(dev_priv, &cdclk_state);
@@ -999,10 +1081,17 @@ static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
if (!dev_priv->csr.dmc_payload)
return;
- if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC6)
+ switch (dev_priv->csr.target_dc_state) {
+ case DC_STATE_EN_DC3CO:
+ tgl_enable_dc3co(dev_priv);
+ break;
+ case DC_STATE_EN_UPTO_DC6:
skl_enable_dc6(dev_priv);
- else if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5)
+ break;
+ case DC_STATE_EN_UPTO_DC5:
gen9_enable_dc5(dev_priv);
+ break;
+ }
}
static void i9xx_power_well_sync_hw_noop(struct drm_i915_private *dev_priv,
@@ -1208,7 +1297,7 @@ static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
intel_crt_reset(&encoder->base);
}
- i915_redisable_vga_power_on(dev_priv);
+ intel_vga_redisable_power_on(dev_priv);
intel_pps_unlock_regs_wa(dev_priv);
}
@@ -1718,15 +1807,12 @@ __async_put_domains_state_ok(struct i915_power_domains *power_domains)
static void print_power_domains(struct i915_power_domains *power_domains,
const char *prefix, u64 mask)
{
- struct drm_i915_private *i915 =
- container_of(power_domains, struct drm_i915_private,
- power_domains);
enum intel_display_power_domain domain;
DRM_DEBUG_DRIVER("%s (%lu):\n", prefix, hweight64(mask));
for_each_power_domain(domain, mask)
DRM_DEBUG_DRIVER("%s use_count %d\n",
- intel_display_power_domain_str(i915, domain),
+ intel_display_power_domain_str(domain),
power_domains->domain_use_count[domain]);
}
@@ -1896,7 +1982,7 @@ __intel_display_power_put_domain(struct drm_i915_private *dev_priv,
{
struct i915_power_domains *power_domains;
struct i915_power_well *power_well;
- const char *name = intel_display_power_domain_str(dev_priv, domain);
+ const char *name = intel_display_power_domain_str(domain);
power_domains = &dev_priv->power_domains;
@@ -2487,10 +2573,10 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
BIT_ULL(POWER_DOMAIN_AUX_D) | \
BIT_ULL(POWER_DOMAIN_AUX_E) | \
BIT_ULL(POWER_DOMAIN_AUX_F) | \
- BIT_ULL(POWER_DOMAIN_AUX_TBT1) | \
- BIT_ULL(POWER_DOMAIN_AUX_TBT2) | \
- BIT_ULL(POWER_DOMAIN_AUX_TBT3) | \
- BIT_ULL(POWER_DOMAIN_AUX_TBT4) | \
+ BIT_ULL(POWER_DOMAIN_AUX_C_TBT) | \
+ BIT_ULL(POWER_DOMAIN_AUX_D_TBT) | \
+ BIT_ULL(POWER_DOMAIN_AUX_E_TBT) | \
+ BIT_ULL(POWER_DOMAIN_AUX_F_TBT) | \
BIT_ULL(POWER_DOMAIN_VGA) | \
BIT_ULL(POWER_DOMAIN_AUDIO) | \
BIT_ULL(POWER_DOMAIN_INIT))
@@ -2530,22 +2616,22 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
BIT_ULL(POWER_DOMAIN_AUX_A))
#define ICL_AUX_B_IO_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_AUX_B))
-#define ICL_AUX_C_IO_POWER_DOMAINS ( \
+#define ICL_AUX_C_TC1_IO_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_AUX_C))
-#define ICL_AUX_D_IO_POWER_DOMAINS ( \
+#define ICL_AUX_D_TC2_IO_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_AUX_D))
-#define ICL_AUX_E_IO_POWER_DOMAINS ( \
+#define ICL_AUX_E_TC3_IO_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_AUX_E))
-#define ICL_AUX_F_IO_POWER_DOMAINS ( \
+#define ICL_AUX_F_TC4_IO_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_AUX_F))
-#define ICL_AUX_TBT1_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_TBT1))
-#define ICL_AUX_TBT2_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_TBT2))
-#define ICL_AUX_TBT3_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_TBT3))
-#define ICL_AUX_TBT4_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_TBT4))
+#define ICL_AUX_C_TBT1_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_C_TBT))
+#define ICL_AUX_D_TBT2_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_D_TBT))
+#define ICL_AUX_E_TBT3_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_E_TBT))
+#define ICL_AUX_F_TBT4_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_F_TBT))
#define TGL_PW_5_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_PIPE_D) | \
@@ -2565,24 +2651,24 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
BIT_ULL(POWER_DOMAIN_PIPE_B) | \
BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_TC1_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_TC2_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_TC3_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_TC4_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_TC5_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_TC6_LANES) | \
- BIT_ULL(POWER_DOMAIN_AUX_TC1) | \
- BIT_ULL(POWER_DOMAIN_AUX_TC2) | \
- BIT_ULL(POWER_DOMAIN_AUX_TC3) | \
- BIT_ULL(POWER_DOMAIN_AUX_TC4) | \
- BIT_ULL(POWER_DOMAIN_AUX_TC5) | \
- BIT_ULL(POWER_DOMAIN_AUX_TC6) | \
- BIT_ULL(POWER_DOMAIN_AUX_TBT1) | \
- BIT_ULL(POWER_DOMAIN_AUX_TBT2) | \
- BIT_ULL(POWER_DOMAIN_AUX_TBT3) | \
- BIT_ULL(POWER_DOMAIN_AUX_TBT4) | \
- BIT_ULL(POWER_DOMAIN_AUX_TBT5) | \
- BIT_ULL(POWER_DOMAIN_AUX_TBT6) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_G_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_H_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_I_LANES) | \
+ BIT_ULL(POWER_DOMAIN_AUX_D) | \
+ BIT_ULL(POWER_DOMAIN_AUX_E) | \
+ BIT_ULL(POWER_DOMAIN_AUX_F) | \
+ BIT_ULL(POWER_DOMAIN_AUX_G) | \
+ BIT_ULL(POWER_DOMAIN_AUX_H) | \
+ BIT_ULL(POWER_DOMAIN_AUX_I) | \
+ BIT_ULL(POWER_DOMAIN_AUX_D_TBT) | \
+ BIT_ULL(POWER_DOMAIN_AUX_E_TBT) | \
+ BIT_ULL(POWER_DOMAIN_AUX_F_TBT) | \
+ BIT_ULL(POWER_DOMAIN_AUX_G_TBT) | \
+ BIT_ULL(POWER_DOMAIN_AUX_H_TBT) | \
+ BIT_ULL(POWER_DOMAIN_AUX_I_TBT) | \
BIT_ULL(POWER_DOMAIN_VGA) | \
BIT_ULL(POWER_DOMAIN_AUDIO) | \
BIT_ULL(POWER_DOMAIN_INIT))
@@ -2596,37 +2682,54 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
TGL_PW_2_POWER_DOMAINS | \
BIT_ULL(POWER_DOMAIN_MODESET) | \
BIT_ULL(POWER_DOMAIN_AUX_A) | \
+ BIT_ULL(POWER_DOMAIN_AUX_B) | \
+ BIT_ULL(POWER_DOMAIN_AUX_C) | \
BIT_ULL(POWER_DOMAIN_INIT))
-#define TGL_DDI_IO_TC1_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_TC1_IO))
-#define TGL_DDI_IO_TC2_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_TC2_IO))
-#define TGL_DDI_IO_TC3_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_TC3_IO))
-#define TGL_DDI_IO_TC4_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_TC4_IO))
-#define TGL_DDI_IO_TC5_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_TC5_IO))
-#define TGL_DDI_IO_TC6_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_TC6_IO))
-
-#define TGL_AUX_TC1_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_TC1))
-#define TGL_AUX_TC2_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_TC2))
-#define TGL_AUX_TC3_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_TC3))
-#define TGL_AUX_TC4_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_TC4))
-#define TGL_AUX_TC5_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_TC5))
-#define TGL_AUX_TC6_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_TC6))
-#define TGL_AUX_TBT5_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_TBT5))
-#define TGL_AUX_TBT6_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_TBT6))
+#define TGL_DDI_IO_D_TC1_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO))
+#define TGL_DDI_IO_E_TC2_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO))
+#define TGL_DDI_IO_F_TC3_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO))
+#define TGL_DDI_IO_G_TC4_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_G_IO))
+#define TGL_DDI_IO_H_TC5_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_H_IO))
+#define TGL_DDI_IO_I_TC6_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_I_IO))
+
+#define TGL_AUX_A_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \
+ BIT_ULL(POWER_DOMAIN_AUX_A))
+#define TGL_AUX_B_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_B))
+#define TGL_AUX_C_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_C))
+#define TGL_AUX_D_TC1_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_D))
+#define TGL_AUX_E_TC2_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_E))
+#define TGL_AUX_F_TC3_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_F))
+#define TGL_AUX_G_TC4_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_G))
+#define TGL_AUX_H_TC5_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_H))
+#define TGL_AUX_I_TC6_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_I))
+#define TGL_AUX_D_TBT1_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_D_TBT))
+#define TGL_AUX_E_TBT2_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_E_TBT))
+#define TGL_AUX_F_TBT3_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_F_TBT))
+#define TGL_AUX_G_TBT4_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_G_TBT))
+#define TGL_AUX_H_TBT5_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_H_TBT))
+#define TGL_AUX_I_TBT6_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_I_TBT))
static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
.sync_hw = i9xx_power_well_sync_hw_noop,
@@ -2938,7 +3041,7 @@ static const struct i915_power_well_desc skl_power_wells[] = {
.name = "DC off",
.domains = SKL_DISPLAY_DC_OFF_POWER_DOMAINS,
.ops = &gen9_dc_off_power_well_ops,
- .id = DISP_PW_ID_NONE,
+ .id = SKL_DISP_DC_OFF,
},
{
.name = "power well 2",
@@ -3020,7 +3123,7 @@ static const struct i915_power_well_desc bxt_power_wells[] = {
.name = "DC off",
.domains = BXT_DISPLAY_DC_OFF_POWER_DOMAINS,
.ops = &gen9_dc_off_power_well_ops,
- .id = DISP_PW_ID_NONE,
+ .id = SKL_DISP_DC_OFF,
},
{
.name = "power well 2",
@@ -3080,7 +3183,7 @@ static const struct i915_power_well_desc glk_power_wells[] = {
.name = "DC off",
.domains = GLK_DISPLAY_DC_OFF_POWER_DOMAINS,
.ops = &gen9_dc_off_power_well_ops,
- .id = DISP_PW_ID_NONE,
+ .id = SKL_DISP_DC_OFF,
},
{
.name = "power well 2",
@@ -3249,7 +3352,7 @@ static const struct i915_power_well_desc cnl_power_wells[] = {
.name = "DC off",
.domains = CNL_DISPLAY_DC_OFF_POWER_DOMAINS,
.ops = &gen9_dc_off_power_well_ops,
- .id = DISP_PW_ID_NONE,
+ .id = SKL_DISP_DC_OFF,
},
{
.name = "power well 2",
@@ -3377,7 +3480,7 @@ static const struct i915_power_well_desc icl_power_wells[] = {
.name = "DC off",
.domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS,
.ops = &gen9_dc_off_power_well_ops,
- .id = DISP_PW_ID_NONE,
+ .id = SKL_DISP_DC_OFF,
},
{
.name = "power well 2",
@@ -3484,8 +3587,8 @@ static const struct i915_power_well_desc icl_power_wells[] = {
},
},
{
- .name = "AUX C",
- .domains = ICL_AUX_C_IO_POWER_DOMAINS,
+ .name = "AUX C TC1",
+ .domains = ICL_AUX_C_TC1_IO_POWER_DOMAINS,
.ops = &icl_tc_phy_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -3495,8 +3598,8 @@ static const struct i915_power_well_desc icl_power_wells[] = {
},
},
{
- .name = "AUX D",
- .domains = ICL_AUX_D_IO_POWER_DOMAINS,
+ .name = "AUX D TC2",
+ .domains = ICL_AUX_D_TC2_IO_POWER_DOMAINS,
.ops = &icl_tc_phy_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -3506,8 +3609,8 @@ static const struct i915_power_well_desc icl_power_wells[] = {
},
},
{
- .name = "AUX E",
- .domains = ICL_AUX_E_IO_POWER_DOMAINS,
+ .name = "AUX E TC3",
+ .domains = ICL_AUX_E_TC3_IO_POWER_DOMAINS,
.ops = &icl_tc_phy_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -3517,8 +3620,8 @@ static const struct i915_power_well_desc icl_power_wells[] = {
},
},
{
- .name = "AUX F",
- .domains = ICL_AUX_F_IO_POWER_DOMAINS,
+ .name = "AUX F TC4",
+ .domains = ICL_AUX_F_TC4_IO_POWER_DOMAINS,
.ops = &icl_tc_phy_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -3528,8 +3631,8 @@ static const struct i915_power_well_desc icl_power_wells[] = {
},
},
{
- .name = "AUX TBT1",
- .domains = ICL_AUX_TBT1_IO_POWER_DOMAINS,
+ .name = "AUX C TBT1",
+ .domains = ICL_AUX_C_TBT1_IO_POWER_DOMAINS,
.ops = &icl_tc_phy_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -3539,8 +3642,8 @@ static const struct i915_power_well_desc icl_power_wells[] = {
},
},
{
- .name = "AUX TBT2",
- .domains = ICL_AUX_TBT2_IO_POWER_DOMAINS,
+ .name = "AUX D TBT2",
+ .domains = ICL_AUX_D_TBT2_IO_POWER_DOMAINS,
.ops = &icl_tc_phy_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -3550,8 +3653,8 @@ static const struct i915_power_well_desc icl_power_wells[] = {
},
},
{
- .name = "AUX TBT3",
- .domains = ICL_AUX_TBT3_IO_POWER_DOMAINS,
+ .name = "AUX E TBT3",
+ .domains = ICL_AUX_E_TBT3_IO_POWER_DOMAINS,
.ops = &icl_tc_phy_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -3561,8 +3664,8 @@ static const struct i915_power_well_desc icl_power_wells[] = {
},
},
{
- .name = "AUX TBT4",
- .domains = ICL_AUX_TBT4_IO_POWER_DOMAINS,
+ .name = "AUX F TBT4",
+ .domains = ICL_AUX_F_TBT4_IO_POWER_DOMAINS,
.ops = &icl_tc_phy_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -3610,7 +3713,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
.name = "DC off",
.domains = TGL_DISPLAY_DC_OFF_POWER_DOMAINS,
.ops = &gen9_dc_off_power_well_ops,
- .id = DISP_PW_ID_NONE,
+ .id = SKL_DISP_DC_OFF,
},
{
.name = "power well 2",
@@ -3667,8 +3770,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
}
},
{
- .name = "DDI TC1 IO",
- .domains = TGL_DDI_IO_TC1_POWER_DOMAINS,
+ .name = "DDI D TC1 IO",
+ .domains = TGL_DDI_IO_D_TC1_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -3677,8 +3780,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
},
{
- .name = "DDI TC2 IO",
- .domains = TGL_DDI_IO_TC2_POWER_DOMAINS,
+ .name = "DDI E TC2 IO",
+ .domains = TGL_DDI_IO_E_TC2_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -3687,8 +3790,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
},
{
- .name = "DDI TC3 IO",
- .domains = TGL_DDI_IO_TC3_POWER_DOMAINS,
+ .name = "DDI F TC3 IO",
+ .domains = TGL_DDI_IO_F_TC3_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -3697,8 +3800,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
},
{
- .name = "DDI TC4 IO",
- .domains = TGL_DDI_IO_TC4_POWER_DOMAINS,
+ .name = "DDI G TC4 IO",
+ .domains = TGL_DDI_IO_G_TC4_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -3707,8 +3810,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
},
{
- .name = "DDI TC5 IO",
- .domains = TGL_DDI_IO_TC5_POWER_DOMAINS,
+ .name = "DDI H TC5 IO",
+ .domains = TGL_DDI_IO_H_TC5_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -3717,8 +3820,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
},
{
- .name = "DDI TC6 IO",
- .domains = TGL_DDI_IO_TC6_POWER_DOMAINS,
+ .name = "DDI I TC6 IO",
+ .domains = TGL_DDI_IO_I_TC6_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -3728,7 +3831,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
{
.name = "AUX A",
- .domains = ICL_AUX_A_IO_POWER_DOMAINS,
+ .domains = TGL_AUX_A_IO_POWER_DOMAINS,
.ops = &icl_combo_phy_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -3738,7 +3841,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
{
.name = "AUX B",
- .domains = ICL_AUX_B_IO_POWER_DOMAINS,
+ .domains = TGL_AUX_B_IO_POWER_DOMAINS,
.ops = &icl_combo_phy_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -3748,7 +3851,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
{
.name = "AUX C",
- .domains = ICL_AUX_C_IO_POWER_DOMAINS,
+ .domains = TGL_AUX_C_IO_POWER_DOMAINS,
.ops = &icl_combo_phy_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -3757,8 +3860,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
},
{
- .name = "AUX TC1",
- .domains = TGL_AUX_TC1_IO_POWER_DOMAINS,
+ .name = "AUX D TC1",
+ .domains = TGL_AUX_D_TC1_IO_POWER_DOMAINS,
.ops = &icl_tc_phy_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -3768,8 +3871,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
},
{
- .name = "AUX TC2",
- .domains = TGL_AUX_TC2_IO_POWER_DOMAINS,
+ .name = "AUX E TC2",
+ .domains = TGL_AUX_E_TC2_IO_POWER_DOMAINS,
.ops = &icl_tc_phy_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -3779,8 +3882,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
},
{
- .name = "AUX TC3",
- .domains = TGL_AUX_TC3_IO_POWER_DOMAINS,
+ .name = "AUX F TC3",
+ .domains = TGL_AUX_F_TC3_IO_POWER_DOMAINS,
.ops = &icl_tc_phy_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -3790,8 +3893,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
},
{
- .name = "AUX TC4",
- .domains = TGL_AUX_TC4_IO_POWER_DOMAINS,
+ .name = "AUX G TC4",
+ .domains = TGL_AUX_G_TC4_IO_POWER_DOMAINS,
.ops = &icl_tc_phy_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -3801,8 +3904,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
},
{
- .name = "AUX TC5",
- .domains = TGL_AUX_TC5_IO_POWER_DOMAINS,
+ .name = "AUX H TC5",
+ .domains = TGL_AUX_H_TC5_IO_POWER_DOMAINS,
.ops = &icl_tc_phy_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -3812,8 +3915,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
},
{
- .name = "AUX TC6",
- .domains = TGL_AUX_TC6_IO_POWER_DOMAINS,
+ .name = "AUX I TC6",
+ .domains = TGL_AUX_I_TC6_IO_POWER_DOMAINS,
.ops = &icl_tc_phy_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -3823,8 +3926,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
},
{
- .name = "AUX TBT1",
- .domains = ICL_AUX_TBT1_IO_POWER_DOMAINS,
+ .name = "AUX D TBT1",
+ .domains = TGL_AUX_D_TBT1_IO_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -3834,8 +3937,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
},
{
- .name = "AUX TBT2",
- .domains = ICL_AUX_TBT2_IO_POWER_DOMAINS,
+ .name = "AUX E TBT2",
+ .domains = TGL_AUX_E_TBT2_IO_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -3845,8 +3948,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
},
{
- .name = "AUX TBT3",
- .domains = ICL_AUX_TBT3_IO_POWER_DOMAINS,
+ .name = "AUX F TBT3",
+ .domains = TGL_AUX_F_TBT3_IO_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -3856,8 +3959,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
},
{
- .name = "AUX TBT4",
- .domains = ICL_AUX_TBT4_IO_POWER_DOMAINS,
+ .name = "AUX G TBT4",
+ .domains = TGL_AUX_G_TBT4_IO_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -3867,8 +3970,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
},
{
- .name = "AUX TBT5",
- .domains = TGL_AUX_TBT5_IO_POWER_DOMAINS,
+ .name = "AUX H TBT5",
+ .domains = TGL_AUX_H_TBT5_IO_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -3878,8 +3981,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
},
},
{
- .name = "AUX TBT6",
- .domains = TGL_AUX_TBT6_IO_POWER_DOMAINS,
+ .name = "AUX I TBT6",
+ .domains = TGL_AUX_I_TBT6_IO_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
.id = DISP_PW_ID_NONE,
{
@@ -3931,14 +4034,17 @@ static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
int requested_dc;
int max_dc;
- if (INTEL_GEN(dev_priv) >= 11) {
- max_dc = 2;
+ if (INTEL_GEN(dev_priv) >= 12) {
+ max_dc = 4;
/*
* DC9 has a separate HW flow from the rest of the DC states,
* not depending on the DMC firmware. It's needed by system
* suspend/resume, so allow it unconditionally.
*/
mask = DC_STATE_EN_DC9;
+ } else if (IS_GEN(dev_priv, 11)) {
+ max_dc = 2;
+ mask = DC_STATE_EN_DC9;
} else if (IS_GEN(dev_priv, 10) || IS_GEN9_BC(dev_priv)) {
max_dc = 2;
mask = 0;
@@ -3957,7 +4063,7 @@ static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
requested_dc = enable_dc;
} else if (enable_dc == -1) {
requested_dc = max_dc;
- } else if (enable_dc > max_dc && enable_dc <= 2) {
+ } else if (enable_dc > max_dc && enable_dc <= 4) {
DRM_DEBUG_KMS("Adjusting requested max DC state (%d->%d)\n",
enable_dc, max_dc);
requested_dc = max_dc;
@@ -3966,10 +4072,20 @@ static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
requested_dc = max_dc;
}
- if (requested_dc > 1)
+ switch (requested_dc) {
+ case 4:
+ mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6;
+ break;
+ case 3:
+ mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC5;
+ break;
+ case 2:
mask |= DC_STATE_EN_UPTO_DC6;
- if (requested_dc > 0)
+ break;
+ case 1:
mask |= DC_STATE_EN_UPTO_DC5;
+ break;
+ }
DRM_DEBUG_KMS("Allowed DC state mask %02x\n", mask);
@@ -4030,6 +4146,9 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
dev_priv->csr.allowed_dc_mask =
get_allowed_dc_mask(dev_priv, i915_modparams.enable_dc);
+ dev_priv->csr.target_dc_state =
+ sanitize_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
+
BUILD_BUG_ON(POWER_DOMAIN_NUM > 64);
mutex_init(&power_domains->lock);
@@ -5107,8 +5226,7 @@ static void intel_power_domains_dump_info(struct drm_i915_private *i915)
for_each_power_domain(domain, power_well->desc->domains)
DRM_DEBUG_DRIVER(" %-23s %d\n",
- intel_display_power_domain_str(i915,
- domain),
+ intel_display_power_domain_str(domain),
power_domains->domain_use_count[domain]);
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.h b/drivers/gpu/drm/i915/display/intel_display_power.h
index a50605b8b1ad..1da04f3e0fb3 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.h
+++ b/drivers/gpu/drm/i915/display/intel_display_power.h
@@ -36,29 +36,20 @@ enum intel_display_power_domain {
POWER_DOMAIN_PORT_DDI_B_LANES,
POWER_DOMAIN_PORT_DDI_C_LANES,
POWER_DOMAIN_PORT_DDI_D_LANES,
- POWER_DOMAIN_PORT_DDI_TC1_LANES = POWER_DOMAIN_PORT_DDI_D_LANES,
POWER_DOMAIN_PORT_DDI_E_LANES,
- POWER_DOMAIN_PORT_DDI_TC2_LANES = POWER_DOMAIN_PORT_DDI_E_LANES,
POWER_DOMAIN_PORT_DDI_F_LANES,
- POWER_DOMAIN_PORT_DDI_TC3_LANES = POWER_DOMAIN_PORT_DDI_F_LANES,
- POWER_DOMAIN_PORT_DDI_TC4_LANES,
- POWER_DOMAIN_PORT_DDI_TC5_LANES,
- POWER_DOMAIN_PORT_DDI_TC6_LANES,
+ POWER_DOMAIN_PORT_DDI_G_LANES,
+ POWER_DOMAIN_PORT_DDI_H_LANES,
+ POWER_DOMAIN_PORT_DDI_I_LANES,
POWER_DOMAIN_PORT_DDI_A_IO,
POWER_DOMAIN_PORT_DDI_B_IO,
POWER_DOMAIN_PORT_DDI_C_IO,
POWER_DOMAIN_PORT_DDI_D_IO,
- POWER_DOMAIN_PORT_DDI_TC1_IO = POWER_DOMAIN_PORT_DDI_D_IO,
POWER_DOMAIN_PORT_DDI_E_IO,
- POWER_DOMAIN_PORT_DDI_TC2_IO = POWER_DOMAIN_PORT_DDI_E_IO,
POWER_DOMAIN_PORT_DDI_F_IO,
- POWER_DOMAIN_PORT_DDI_TC3_IO = POWER_DOMAIN_PORT_DDI_F_IO,
POWER_DOMAIN_PORT_DDI_G_IO,
- POWER_DOMAIN_PORT_DDI_TC4_IO = POWER_DOMAIN_PORT_DDI_G_IO,
POWER_DOMAIN_PORT_DDI_H_IO,
- POWER_DOMAIN_PORT_DDI_TC5_IO = POWER_DOMAIN_PORT_DDI_H_IO,
POWER_DOMAIN_PORT_DDI_I_IO,
- POWER_DOMAIN_PORT_DDI_TC6_IO = POWER_DOMAIN_PORT_DDI_I_IO,
POWER_DOMAIN_PORT_DSI,
POWER_DOMAIN_PORT_CRT,
POWER_DOMAIN_PORT_OTHER,
@@ -68,21 +59,19 @@ enum intel_display_power_domain {
POWER_DOMAIN_AUX_B,
POWER_DOMAIN_AUX_C,
POWER_DOMAIN_AUX_D,
- POWER_DOMAIN_AUX_TC1 = POWER_DOMAIN_AUX_D,
POWER_DOMAIN_AUX_E,
- POWER_DOMAIN_AUX_TC2 = POWER_DOMAIN_AUX_E,
POWER_DOMAIN_AUX_F,
- POWER_DOMAIN_AUX_TC3 = POWER_DOMAIN_AUX_F,
- POWER_DOMAIN_AUX_TC4,
- POWER_DOMAIN_AUX_TC5,
- POWER_DOMAIN_AUX_TC6,
+ POWER_DOMAIN_AUX_G,
+ POWER_DOMAIN_AUX_H,
+ POWER_DOMAIN_AUX_I,
POWER_DOMAIN_AUX_IO_A,
- POWER_DOMAIN_AUX_TBT1,
- POWER_DOMAIN_AUX_TBT2,
- POWER_DOMAIN_AUX_TBT3,
- POWER_DOMAIN_AUX_TBT4,
- POWER_DOMAIN_AUX_TBT5,
- POWER_DOMAIN_AUX_TBT6,
+ POWER_DOMAIN_AUX_C_TBT,
+ POWER_DOMAIN_AUX_D_TBT,
+ POWER_DOMAIN_AUX_E_TBT,
+ POWER_DOMAIN_AUX_F_TBT,
+ POWER_DOMAIN_AUX_G_TBT,
+ POWER_DOMAIN_AUX_H_TBT,
+ POWER_DOMAIN_AUX_I_TBT,
POWER_DOMAIN_GMBUS,
POWER_DOMAIN_MODESET,
POWER_DOMAIN_GT_IRQ,
@@ -111,6 +100,7 @@ enum i915_power_well_id {
SKL_DISP_PW_MISC_IO,
SKL_DISP_PW_1,
SKL_DISP_PW_2,
+ SKL_DISP_DC_OFF,
};
#define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A)
@@ -267,10 +257,11 @@ void intel_display_power_suspend_late(struct drm_i915_private *i915);
void intel_display_power_resume_early(struct drm_i915_private *i915);
void intel_display_power_suspend(struct drm_i915_private *i915);
void intel_display_power_resume(struct drm_i915_private *i915);
+void intel_display_power_set_target_dc_state(struct drm_i915_private *dev_priv,
+ u32 state);
const char *
-intel_display_power_domain_str(struct drm_i915_private *i915,
- enum intel_display_power_domain domain);
+intel_display_power_domain_str(enum intel_display_power_domain domain);
bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain);
diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
index 4075b0387c87..1a7334dbe802 100644
--- a/drivers/gpu/drm/i915/display/intel_display_types.h
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -128,7 +128,8 @@ struct intel_encoder {
enum intel_output_type type;
enum port port;
- unsigned int cloneable;
+ u16 cloneable;
+ u8 pipe_mask;
enum intel_hotplug_state (*hotplug)(struct intel_encoder *encoder,
struct intel_connector *connector,
bool irq_received);
@@ -187,7 +188,6 @@ struct intel_encoder {
* device interrupts are disabled.
*/
void (*suspend)(struct intel_encoder *);
- int crtc_mask;
enum hpd_pin hpd_pin;
enum intel_display_power_domain power_domain;
/* for communication with audio component; protected by av_mutex */
@@ -388,6 +388,13 @@ struct intel_hdcp {
wait_queue_head_t cp_irq_queue;
atomic_t cp_irq_count;
int cp_irq_count_cached;
+
+ /*
+ * HDCP register access for gen12+ need the transcoder associated.
+ * Transcoder attached to the connector could be changed at modeset.
+ * Hence caching the transcoder here.
+ */
+ enum transcoder cpu_transcoder;
};
struct intel_connector {
@@ -481,9 +488,9 @@ struct intel_atomic_state {
* but the converse is not necessarily true; simply changing a mode may
* not flip the final active status of any CRTC's
*/
- unsigned int active_pipe_changes;
+ u8 active_pipe_changes;
- unsigned int active_crtcs;
+ u8 active_pipes;
/* minimum acceptable cdclk for each pipe */
int min_cdclk[I915_MAX_PIPES];
/* minimum acceptable voltage level for each pipe */
@@ -499,6 +506,14 @@ struct intel_atomic_state {
bool rps_interactive;
+ /*
+ * active_pipes
+ * min_cdclk[]
+ * min_voltage_level[]
+ * cdclk.*
+ */
+ bool global_state_changed;
+
/* Gen9+ only */
struct skl_ddb_values wm_results;
@@ -552,24 +567,24 @@ struct intel_plane_state {
int scaler_id;
/*
- * linked_plane:
+ * planar_linked_plane:
*
* ICL planar formats require 2 planes that are updated as pairs.
* This member is used to make sure the other plane is also updated
* when required, and for update_slave() to find the correct
* plane_state to pass as argument.
*/
- struct intel_plane *linked_plane;
+ struct intel_plane *planar_linked_plane;
/*
- * slave:
+ * planar_slave:
* If set don't update use the linked plane's state for updating
* this plane during atomic commit with the update_slave() callback.
*
* It's also used by the watermark code to ignore wm calculations on
* this plane. They're calculated by the linked plane's wm code.
*/
- u32 slave;
+ u32 planar_slave;
struct drm_intel_sprite_colorkey ckey;
};
@@ -759,7 +774,6 @@ struct intel_crtc_state {
bool update_pipe; /* can a fast modeset be performed? */
bool disable_cxsr;
bool update_wm_pre, update_wm_post; /* watermarks are updated */
- bool fb_changed; /* fb on any of the planes is changed */
bool fifo_changed; /* FIFO split is changed */
bool preload_luts;
@@ -865,6 +879,7 @@ struct intel_crtc_state {
bool has_psr;
bool has_psr2;
+ u32 dc3co_exitline;
/*
* Frequence the dpll for the port should run at. Differs from the
@@ -926,6 +941,8 @@ struct intel_crtc_state {
struct intel_crtc_wm_state wm;
+ int min_cdclk[I915_MAX_PLANES];
+
u32 data_rate[I915_MAX_PLANES];
/* Gamma mode programmed on the pipe */
@@ -980,11 +997,17 @@ struct intel_crtc_state {
bool dsc_split;
u16 compressed_bpp;
u8 slice_count;
- } dsc_params;
- struct drm_dsc_config dp_dsc_cfg;
+ struct drm_dsc_config config;
+ } dsc;
/* Forward Error correction State */
bool fec_enable;
+
+ /* Pointer to master transcoder in case of tiled displays */
+ enum transcoder master_transcoder;
+
+ /* Bitmask to indicate slaves attached */
+ u8 sync_mode_slaves_mask;
};
struct intel_crtc {
@@ -1027,6 +1050,9 @@ struct intel_crtc {
/* scalers available on this crtc */
int num_scalers;
+
+ /* per pipe DSB related info */
+ struct intel_dsb dsb;
};
struct intel_plane {
@@ -1062,6 +1088,8 @@ struct intel_plane {
bool (*get_hw_state)(struct intel_plane *plane, enum pipe *pipe);
int (*check_plane)(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state);
+ int (*min_cdclk)(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state);
};
struct intel_watermark_params {
@@ -1177,6 +1205,7 @@ struct intel_dp {
/* sink or branch descriptor */
struct drm_dp_desc desc;
struct drm_dp_aux aux;
+ u32 aux_busy_last_status;
u8 train_set[4];
int panel_power_up_delay;
int panel_power_down_delay;
@@ -1212,6 +1241,15 @@ struct intel_dp {
bool can_mst; /* this port supports mst */
bool is_mst;
int active_mst_links;
+
+ /*
+ * DP_TP_* registers may be either on port or transcoder register space.
+ */
+ struct {
+ i915_reg_t dp_tp_ctl;
+ i915_reg_t dp_tp_status;
+ } regs;
+
/* connector directly attached - won't be use for modeset in mst world */
struct intel_connector *attached_connector;
@@ -1270,6 +1308,7 @@ struct intel_digital_port {
char tc_port_name[8];
enum tc_port_mode tc_mode;
enum phy_fia tc_phy_fia;
+ u8 tc_phy_fia_idx;
void (*write_infoframe)(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
@@ -1510,7 +1549,7 @@ intel_wait_for_vblank(struct drm_i915_private *dev_priv, enum pipe pipe)
drm_wait_one_vblank(&dev_priv->drm, pipe);
}
static inline void
-intel_wait_for_vblank_if_active(struct drm_i915_private *dev_priv, int pipe)
+intel_wait_for_vblank_if_active(struct drm_i915_private *dev_priv, enum pipe pipe)
{
const struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index 9b15ac4f2fb6..c61ac0c3acb5 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -68,11 +68,6 @@
#define DP_DPRX_ESI_LEN 14
-/* DP DSC small joiner has 2 FIFOs each of 640 x 6 bytes */
-#define DP_DSC_MAX_SMALL_JOINER_RAM_BUFFER 61440
-#define DP_DSC_MIN_SUPPORTED_BPC 8
-#define DP_DSC_MAX_SUPPORTED_BPC 10
-
/* DP DSC throughput values used for slice count calculations KPixels/s */
#define DP_DSC_PEAK_PIXEL_RATE 2720000
#define DP_DSC_MAX_ENC_THROUGHPUT_0 340000
@@ -500,7 +495,17 @@ u32 intel_dp_mode_to_fec_clock(u32 mode_clock)
DP_DSC_FEC_OVERHEAD_FACTOR);
}
-static u16 intel_dp_dsc_get_output_bpp(u32 link_clock, u32 lane_count,
+static int
+small_joiner_ram_size_bits(struct drm_i915_private *i915)
+{
+ if (INTEL_GEN(i915) >= 11)
+ return 7680 * 8;
+ else
+ return 6144 * 8;
+}
+
+static u16 intel_dp_dsc_get_output_bpp(struct drm_i915_private *i915,
+ u32 link_clock, u32 lane_count,
u32 mode_clock, u32 mode_hdisplay)
{
u32 bits_per_pixel, max_bpp_small_joiner_ram;
@@ -517,7 +522,8 @@ static u16 intel_dp_dsc_get_output_bpp(u32 link_clock, u32 lane_count,
DRM_DEBUG_KMS("Max link bpp: %u\n", bits_per_pixel);
/* Small Joiner Check: output bpp <= joiner RAM (bits) / Horiz. width */
- max_bpp_small_joiner_ram = DP_DSC_MAX_SMALL_JOINER_RAM_BUFFER / mode_hdisplay;
+ max_bpp_small_joiner_ram = small_joiner_ram_size_bits(i915) /
+ mode_hdisplay;
DRM_DEBUG_KMS("Max small joiner bpp: %u\n", max_bpp_small_joiner_ram);
/*
@@ -585,6 +591,25 @@ static u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp,
return 0;
}
+static bool intel_dp_hdisplay_bad(struct drm_i915_private *dev_priv,
+ int hdisplay)
+{
+ /*
+ * Older platforms don't like hdisplay==4096 with DP.
+ *
+ * On ILK/SNB/IVB the pipe seems to be somewhat running (scanline
+ * and frame counter increment), but we don't get vblank interrupts,
+ * and the pipe underruns immediately. The link also doesn't seem
+ * to get trained properly.
+ *
+ * On CHV the vblank interrupts don't seem to disappear but
+ * otherwise the symptoms are similar.
+ *
+ * TODO: confirm the behaviour on HSW+
+ */
+ return hdisplay == 4096 && !HAS_DDI(dev_priv);
+}
+
static enum drm_mode_status
intel_dp_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
@@ -620,6 +645,9 @@ intel_dp_mode_valid(struct drm_connector *connector,
max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
mode_rate = intel_dp_link_required(target_clock, 18);
+ if (intel_dp_hdisplay_bad(dev_priv, mode->hdisplay))
+ return MODE_H_ILLEGAL;
+
/*
* Output bpp is stored in 6.4 format so right shift by 4 to get the
* integer value since we support only integer values of bpp.
@@ -634,7 +662,8 @@ intel_dp_mode_valid(struct drm_connector *connector,
true);
} else if (drm_dp_sink_supports_fec(intel_dp->fec_capable)) {
dsc_max_output_bpp =
- intel_dp_dsc_get_output_bpp(max_link_clock,
+ intel_dp_dsc_get_output_bpp(dev_priv,
+ max_link_clock,
max_lanes,
target_clock,
mode->hdisplay) >> 4;
@@ -655,7 +684,7 @@ intel_dp_mode_valid(struct drm_connector *connector,
if (mode->flags & DRM_MODE_FLAG_DBLCLK)
return MODE_H_ILLEGAL;
- return MODE_OK;
+ return intel_mode_valid_max_plane_size(dev_priv, mode);
}
u32 intel_dp_pack_aux(const u8 *src, int src_bytes)
@@ -732,12 +761,14 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
u32 DP;
if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
- "skipping pipe %c power sequencer kick due to port %c being active\n",
- pipe_name(pipe), port_name(intel_dig_port->base.port)))
+ "skipping pipe %c power sequencer kick due to [ENCODER:%d:%s] being active\n",
+ pipe_name(pipe), intel_dig_port->base.base.base.id,
+ intel_dig_port->base.base.name))
return;
- DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
- pipe_name(pipe), port_name(intel_dig_port->base.port));
+ DRM_DEBUG_KMS("kicking pipe %c power sequencer for [ENCODER:%d:%s]\n",
+ pipe_name(pipe), intel_dig_port->base.base.base.id,
+ intel_dig_port->base.base.name);
/* Preserve the BIOS-computed detected bit. This is
* supposed to be read-only.
@@ -855,9 +886,10 @@ vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
vlv_steal_power_sequencer(dev_priv, pipe);
intel_dp->pps_pipe = pipe;
- DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
+ DRM_DEBUG_KMS("picked pipe %c power sequencer for [ENCODER:%d:%s]\n",
pipe_name(intel_dp->pps_pipe),
- port_name(intel_dig_port->base.port));
+ intel_dig_port->base.base.base.id,
+ intel_dig_port->base.base.name);
/* init power sequencer on this pipe and port */
intel_dp_init_panel_power_sequencer(intel_dp);
@@ -965,13 +997,16 @@ vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
/* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
if (intel_dp->pps_pipe == INVALID_PIPE) {
- DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
- port_name(port));
+ DRM_DEBUG_KMS("no initial power sequencer for [ENCODER:%d:%s]\n",
+ intel_dig_port->base.base.base.id,
+ intel_dig_port->base.base.name);
return;
}
- DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
- port_name(port), pipe_name(intel_dp->pps_pipe));
+ DRM_DEBUG_KMS("initial power sequencer for [ENCODER:%d:%s]: pipe %c\n",
+ intel_dig_port->base.base.base.id,
+ intel_dig_port->base.base.name,
+ pipe_name(intel_dp->pps_pipe));
intel_dp_init_panel_power_sequencer(intel_dp);
intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
@@ -1144,18 +1179,20 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
+ const unsigned int timeout_ms = 10;
u32 status;
bool done;
#define C (((status = intel_uncore_read_notrace(&i915->uncore, ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
done = wait_event_timeout(i915->gmbus_wait_queue, C,
- msecs_to_jiffies_timeout(10));
+ msecs_to_jiffies_timeout(timeout_ms));
/* just trace the final value */
trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true);
if (!done)
- DRM_ERROR("dp aux hw did not signal timeout!\n");
+ DRM_ERROR("%s did not complete or timeout within %ums (status 0x%08x)\n",
+ intel_dp->aux.name, timeout_ms, status);
#undef C
return status;
@@ -1338,13 +1375,12 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true);
if (try == 3) {
- static u32 last_status = -1;
const u32 status = intel_uncore_read(uncore, ch_ctl);
- if (status != last_status) {
+ if (status != intel_dp->aux_busy_last_status) {
WARN(1, "dp_aux_ch not started status 0x%08x\n",
status);
- last_status = status;
+ intel_dp->aux_busy_last_status = status;
}
ret = -EBUSY;
@@ -1636,6 +1672,7 @@ static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp)
case AUX_CH_D:
case AUX_CH_E:
case AUX_CH_F:
+ case AUX_CH_G:
return DP_AUX_CH_CTL(aux_ch);
default:
MISSING_CASE(aux_ch);
@@ -1656,6 +1693,7 @@ static i915_reg_t skl_aux_data_reg(struct intel_dp *intel_dp, int index)
case AUX_CH_D:
case AUX_CH_E:
case AUX_CH_F:
+ case AUX_CH_G:
return DP_AUX_CH_DATA(aux_ch, index);
default:
MISSING_CASE(aux_ch);
@@ -1834,8 +1872,14 @@ static bool intel_dp_source_supports_fec(struct intel_dp *intel_dp,
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- return INTEL_GEN(dev_priv) >= 11 &&
- pipe_config->cpu_transcoder != TRANSCODER_A;
+ /* On TGL, FEC is supported on all Pipes */
+ if (INTEL_GEN(dev_priv) >= 12)
+ return true;
+
+ if (IS_GEN(dev_priv, 11) && pipe_config->cpu_transcoder != TRANSCODER_A)
+ return true;
+
+ return false;
}
static bool intel_dp_supports_fec(struct intel_dp *intel_dp,
@@ -1850,8 +1894,18 @@ static bool intel_dp_source_supports_dsc(struct intel_dp *intel_dp,
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- return INTEL_GEN(dev_priv) >= 10 &&
- pipe_config->cpu_transcoder != TRANSCODER_A;
+ if (!INTEL_INFO(dev_priv)->display.has_dsc)
+ return false;
+
+ /* On TGL, DSC is supported on all Pipes */
+ if (INTEL_GEN(dev_priv) >= 12)
+ return true;
+
+ if (INTEL_GEN(dev_priv) >= 10 &&
+ pipe_config->cpu_transcoder != TRANSCODER_A)
+ return true;
+
+ return false;
}
static bool intel_dp_supports_dsc(struct intel_dp *intel_dp,
@@ -2010,11 +2064,17 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
if (!intel_dp_supports_dsc(intel_dp, pipe_config))
return -EINVAL;
- dsc_max_bpc = min_t(u8, DP_DSC_MAX_SUPPORTED_BPC,
- conn_state->max_requested_bpc);
+ /* Max DSC Input BPC for ICL is 10 and for TGL+ is 12 */
+ if (INTEL_GEN(dev_priv) >= 12)
+ dsc_max_bpc = min_t(u8, 12, conn_state->max_requested_bpc);
+ else
+ dsc_max_bpc = min_t(u8, 10,
+ conn_state->max_requested_bpc);
pipe_bpp = intel_dp_dsc_compute_bpp(intel_dp, dsc_max_bpc);
- if (pipe_bpp < DP_DSC_MIN_SUPPORTED_BPC * 3) {
+
+ /* Min Input BPC for ICL+ is 8 */
+ if (pipe_bpp < 8 * 3) {
DRM_DEBUG_KMS("No DSC support for less than 8bpc\n");
return -EINVAL;
}
@@ -2029,10 +2089,10 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
pipe_config->lane_count = limits->max_lane_count;
if (intel_dp_is_edp(intel_dp)) {
- pipe_config->dsc_params.compressed_bpp =
+ pipe_config->dsc.compressed_bpp =
min_t(u16, drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4,
pipe_config->pipe_bpp);
- pipe_config->dsc_params.slice_count =
+ pipe_config->dsc.slice_count =
drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
true);
} else {
@@ -2040,7 +2100,8 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
u8 dsc_dp_slice_count;
dsc_max_output_bpp =
- intel_dp_dsc_get_output_bpp(pipe_config->port_clock,
+ intel_dp_dsc_get_output_bpp(dev_priv,
+ pipe_config->port_clock,
pipe_config->lane_count,
adjusted_mode->crtc_clock,
adjusted_mode->crtc_hdisplay);
@@ -2052,10 +2113,10 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
DRM_DEBUG_KMS("Compressed BPP/Slice Count not supported\n");
return -EINVAL;
}
- pipe_config->dsc_params.compressed_bpp = min_t(u16,
+ pipe_config->dsc.compressed_bpp = min_t(u16,
dsc_max_output_bpp >> 4,
pipe_config->pipe_bpp);
- pipe_config->dsc_params.slice_count = dsc_dp_slice_count;
+ pipe_config->dsc.slice_count = dsc_dp_slice_count;
}
/*
* VDSC engine operates at 1 Pixel per clock, so if peak pixel rate
@@ -2063,8 +2124,8 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
* then we need to use 2 VDSC instances.
*/
if (adjusted_mode->crtc_clock > dev_priv->max_cdclk_freq) {
- if (pipe_config->dsc_params.slice_count > 1) {
- pipe_config->dsc_params.dsc_split = true;
+ if (pipe_config->dsc.slice_count > 1) {
+ pipe_config->dsc.dsc_split = true;
} else {
DRM_DEBUG_KMS("Cannot split stream to use 2 VDSC instances\n");
return -EINVAL;
@@ -2076,16 +2137,16 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
DRM_DEBUG_KMS("Cannot compute valid DSC parameters for Input Bpp = %d "
"Compressed BPP = %d\n",
pipe_config->pipe_bpp,
- pipe_config->dsc_params.compressed_bpp);
+ pipe_config->dsc.compressed_bpp);
return ret;
}
- pipe_config->dsc_params.compression_enable = true;
+ pipe_config->dsc.compression_enable = true;
DRM_DEBUG_KMS("DP DSC computed with Input Bpp = %d "
"Compressed Bpp = %d Slice Count = %d\n",
pipe_config->pipe_bpp,
- pipe_config->dsc_params.compressed_bpp,
- pipe_config->dsc_params.slice_count);
+ pipe_config->dsc.compressed_bpp,
+ pipe_config->dsc.slice_count);
return 0;
}
@@ -2159,15 +2220,15 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
return ret;
}
- if (pipe_config->dsc_params.compression_enable) {
+ if (pipe_config->dsc.compression_enable) {
DRM_DEBUG_KMS("DP lane count %d clock %d Input bpp %d Compressed bpp %d\n",
pipe_config->lane_count, pipe_config->port_clock,
pipe_config->pipe_bpp,
- pipe_config->dsc_params.compressed_bpp);
+ pipe_config->dsc.compressed_bpp);
DRM_DEBUG_KMS("DP link rate required %i available %i\n",
intel_dp_link_required(adjusted_mode->crtc_clock,
- pipe_config->dsc_params.compressed_bpp),
+ pipe_config->dsc.compressed_bpp),
intel_dp_max_data_rate(pipe_config->port_clock,
pipe_config->lane_count));
} else {
@@ -2222,6 +2283,16 @@ bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state,
const struct drm_display_mode *adjusted_mode =
&crtc_state->base.adjusted_mode;
+ /*
+ * Our YCbCr output is always limited range.
+ * crtc_state->limited_color_range only applies to RGB,
+ * and it must never be set for YCbCr or we risk setting
+ * some conflicting bits in PIPECONF which will mess up
+ * the colors on the monitor.
+ */
+ if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
+ return false;
+
if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) {
/*
* See:
@@ -2259,6 +2330,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
pipe_config->has_pch_encoder = true;
pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
+
if (lspcon->active)
lspcon_ycbcr420_config(&intel_connector->base, pipe_config);
else
@@ -2304,6 +2376,9 @@ intel_dp_compute_config(struct intel_encoder *encoder,
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
return -EINVAL;
+ if (intel_dp_hdisplay_bad(dev_priv, adjusted_mode->crtc_hdisplay))
+ return -EINVAL;
+
ret = intel_dp_compute_link_config(encoder, pipe_config, conn_state);
if (ret < 0)
return ret;
@@ -2311,8 +2386,8 @@ intel_dp_compute_config(struct intel_encoder *encoder,
pipe_config->limited_color_range =
intel_dp_limited_color_range(pipe_config, conn_state);
- if (pipe_config->dsc_params.compression_enable)
- output_bpp = pipe_config->dsc_params.compressed_bpp;
+ if (pipe_config->dsc.compression_enable)
+ output_bpp = pipe_config->dsc.compressed_bpp;
else
output_bpp = intel_dp_output_bpp(pipe_config, pipe_config->pipe_bpp);
@@ -2339,6 +2414,9 @@ intel_dp_compute_config(struct intel_encoder *encoder,
intel_psr_compute_config(intel_dp, pipe_config);
+ intel_hdcp_transcoder_config(intel_connector,
+ pipe_config->cpu_transcoder);
+
return 0;
}
@@ -2366,6 +2444,9 @@ static void intel_dp_prepare(struct intel_encoder *encoder,
intel_crtc_has_type(pipe_config,
INTEL_OUTPUT_DP_MST));
+ intel_dp->regs.dp_tp_ctl = DP_TP_CTL(port);
+ intel_dp->regs.dp_tp_status = DP_TP_STATUS(port);
+
/*
* There are four kinds of DP registers:
*
@@ -2567,8 +2648,9 @@ static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
intel_display_power_get(dev_priv,
intel_aux_power_domain(intel_dig_port));
- DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
- port_name(intel_dig_port->base.port));
+ DRM_DEBUG_KMS("Turning [ENCODER:%d:%s] VDD on\n",
+ intel_dig_port->base.base.base.id,
+ intel_dig_port->base.base.name);
if (!edp_have_panel_power(intel_dp))
wait_panel_power_cycle(intel_dp);
@@ -2587,8 +2669,9 @@ static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
* If the panel wasn't on, delay before accessing aux channel
*/
if (!edp_have_panel_power(intel_dp)) {
- DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
- port_name(intel_dig_port->base.port));
+ DRM_DEBUG_KMS("[ENCODER:%d:%s] panel power wasn't enabled\n",
+ intel_dig_port->base.base.base.id,
+ intel_dig_port->base.base.name);
msleep(intel_dp->panel_power_up_delay);
}
@@ -2613,8 +2696,9 @@ void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
vdd = false;
with_pps_lock(intel_dp, wakeref)
vdd = edp_panel_vdd_on(intel_dp);
- I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
- port_name(dp_to_dig_port(intel_dp)->base.port));
+ I915_STATE_WARN(!vdd, "[ENCODER:%d:%s] VDD already requested on\n",
+ dp_to_dig_port(intel_dp)->base.base.base.id,
+ dp_to_dig_port(intel_dp)->base.base.name);
}
static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
@@ -2632,8 +2716,9 @@ static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
if (!edp_have_panel_vdd(intel_dp))
return;
- DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
- port_name(intel_dig_port->base.port));
+ DRM_DEBUG_KMS("Turning [ENCODER:%d:%s] VDD off\n",
+ intel_dig_port->base.base.base.id,
+ intel_dig_port->base.base.name);
pp = ironlake_get_pp_control(intel_dp);
pp &= ~EDP_FORCE_VDD;
@@ -2695,8 +2780,9 @@ static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
if (!intel_dp_is_edp(intel_dp))
return;
- I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
- port_name(dp_to_dig_port(intel_dp)->base.port));
+ I915_STATE_WARN(!intel_dp->want_panel_vdd, "[ENCODER:%d:%s] VDD not forced on",
+ dp_to_dig_port(intel_dp)->base.base.base.id,
+ dp_to_dig_port(intel_dp)->base.base.name);
intel_dp->want_panel_vdd = false;
@@ -2717,12 +2803,14 @@ static void edp_panel_on(struct intel_dp *intel_dp)
if (!intel_dp_is_edp(intel_dp))
return;
- DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
- port_name(dp_to_dig_port(intel_dp)->base.port));
+ DRM_DEBUG_KMS("Turn [ENCODER:%d:%s] panel power on\n",
+ dp_to_dig_port(intel_dp)->base.base.base.id,
+ dp_to_dig_port(intel_dp)->base.base.name);
if (WARN(edp_have_panel_power(intel_dp),
- "eDP port %c panel power already on\n",
- port_name(dp_to_dig_port(intel_dp)->base.port)))
+ "[ENCODER:%d:%s] panel power already on\n",
+ dp_to_dig_port(intel_dp)->base.base.base.id,
+ dp_to_dig_port(intel_dp)->base.base.name))
return;
wait_panel_power_cycle(intel_dp);
@@ -2777,11 +2865,11 @@ static void edp_panel_off(struct intel_dp *intel_dp)
if (!intel_dp_is_edp(intel_dp))
return;
- DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
- port_name(dig_port->base.port));
+ DRM_DEBUG_KMS("Turn [ENCODER:%d:%s] panel power off\n",
+ dig_port->base.base.base.id, dig_port->base.base.name);
- WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
- port_name(dig_port->base.port));
+ WARN(!intel_dp->want_panel_vdd, "Need [ENCODER:%d:%s] VDD to turn off panel\n",
+ dig_port->base.base.base.id, dig_port->base.base.name);
pp = ironlake_get_pp_control(intel_dp);
/* We need to switch off panel power _and_ force vdd, for otherwise some
@@ -2926,8 +3014,8 @@ static void assert_dp_port(struct intel_dp *intel_dp, bool state)
bool cur_state = I915_READ(intel_dp->output_reg) & DP_PORT_EN;
I915_STATE_WARN(cur_state != state,
- "DP port %c state assertion failure (expected %s, current %s)\n",
- port_name(dig_port->base.port),
+ "[ENCODER:%d:%s] state assertion failure (expected %s, current %s)\n",
+ dig_port->base.base.base.id, dig_port->base.base.name,
onoff(state), onoff(cur_state));
}
#define assert_dp_port_disabled(d) assert_dp_port((d), false)
@@ -3023,7 +3111,7 @@ void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp,
{
int ret;
- if (!crtc_state->dsc_params.compression_enable)
+ if (!crtc_state->dsc.compression_enable)
return;
ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_DSC_ENABLE,
@@ -3315,7 +3403,7 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp,
dp_train_pat & train_pat_mask);
if (HAS_DDI(dev_priv)) {
- u32 temp = I915_READ(DP_TP_CTL(port));
+ u32 temp = I915_READ(intel_dp->regs.dp_tp_ctl);
if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
@@ -3341,7 +3429,7 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp,
temp |= DP_TP_CTL_LINK_TRAIN_PAT4;
break;
}
- I915_WRITE(DP_TP_CTL(port), temp);
+ I915_WRITE(intel_dp->regs.dp_tp_ctl, temp);
} else if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) ||
(HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
@@ -3505,8 +3593,9 @@ static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
* port select always when logically disconnecting a power sequencer
* from a port.
*/
- DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
- pipe_name(pipe), port_name(intel_dig_port->base.port));
+ DRM_DEBUG_KMS("detaching pipe %c power sequencer from [ENCODER:%d:%s]\n",
+ pipe_name(pipe), intel_dig_port->base.base.base.id,
+ intel_dig_port->base.base.name);
I915_WRITE(pp_on_reg, 0);
POSTING_READ(pp_on_reg);
@@ -3522,17 +3611,18 @@ static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
for_each_intel_dp(&dev_priv->drm, encoder) {
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
- enum port port = encoder->port;
WARN(intel_dp->active_pipe == pipe,
- "stealing pipe %c power sequencer from active (e)DP port %c\n",
- pipe_name(pipe), port_name(port));
+ "stealing pipe %c power sequencer from active [ENCODER:%d:%s]\n",
+ pipe_name(pipe), encoder->base.base.id,
+ encoder->base.name);
if (intel_dp->pps_pipe != pipe)
continue;
- DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
- pipe_name(pipe), port_name(port));
+ DRM_DEBUG_KMS("stealing pipe %c power sequencer from [ENCODER:%d:%s]\n",
+ pipe_name(pipe), encoder->base.base.id,
+ encoder->base.name);
/* make sure vdd is off before we steal it */
vlv_detach_power_sequencer(intel_dp);
@@ -3574,8 +3664,9 @@ static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder,
/* now it's all ours */
intel_dp->pps_pipe = crtc->pipe;
- DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
- pipe_name(intel_dp->pps_pipe), port_name(encoder->port));
+ DRM_DEBUG_KMS("initializing pipe %c power sequencer for [ENCODER:%d:%s]\n",
+ pipe_name(intel_dp->pps_pipe), encoder->base.base.id,
+ encoder->base.name);
/* init power sequencer on this pipe and port */
intel_dp_init_panel_power_sequencer(intel_dp);
@@ -4039,22 +4130,22 @@ void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
if (!HAS_DDI(dev_priv))
return;
- val = I915_READ(DP_TP_CTL(port));
+ val = I915_READ(intel_dp->regs.dp_tp_ctl);
val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
val |= DP_TP_CTL_LINK_TRAIN_IDLE;
- I915_WRITE(DP_TP_CTL(port), val);
+ I915_WRITE(intel_dp->regs.dp_tp_ctl, val);
/*
- * On PORT_A we can have only eDP in SST mode. There the only reason
- * we need to set idle transmission mode is to work around a HW issue
- * where we enable the pipe while not in idle link-training mode.
+ * Until TGL on PORT_A we can have only eDP in SST mode. There the only
+ * reason we need to set idle transmission mode is to work around a HW
+ * issue where we enable the pipe while not in idle link-training mode.
* In this case there is requirement to wait for a minimum number of
* idle patterns to be sent.
*/
- if (port == PORT_A)
+ if (port == PORT_A && INTEL_GEN(dev_priv) < 12)
return;
- if (intel_de_wait_for_set(dev_priv, DP_TP_STATUS(port),
+ if (intel_de_wait_for_set(dev_priv, intel_dp->regs.dp_tp_status,
DP_TP_STATUS_IDLE_DONE, 1))
DRM_ERROR("Timed out waiting for DP idle patterns\n");
}
@@ -4396,9 +4487,10 @@ intel_dp_configure_mst(struct intel_dp *intel_dp)
&dp_to_dig_port(intel_dp)->base;
bool sink_can_mst = intel_dp_sink_can_mst(intel_dp);
- DRM_DEBUG_KMS("MST support? port %c: %s, sink: %s, modparam: %s\n",
- port_name(encoder->port), yesno(intel_dp->can_mst),
- yesno(sink_can_mst), yesno(i915_modparams.enable_dp_mst));
+ DRM_DEBUG_KMS("[ENCODER:%d:%s] MST support: port: %s, sink: %s, modparam: %s\n",
+ encoder->base.base.id, encoder->base.name,
+ yesno(intel_dp->can_mst), yesno(sink_can_mst),
+ yesno(i915_modparams.enable_dp_mst));
if (!intel_dp->can_mst)
return;
@@ -4418,9 +4510,36 @@ intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
DP_DPRX_ESI_LEN;
}
+bool
+intel_dp_needs_vsc_sdp(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ /*
+ * As per DP 1.4a spec section 2.2.4.3 [MSA Field for Indication
+ * of Color Encoding Format and Content Color Gamut], in order to
+ * sending YCBCR 420 or HDR BT.2020 signals we should use DP VSC SDP.
+ */
+ if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
+ return true;
+
+ switch (conn_state->colorspace) {
+ case DRM_MODE_COLORIMETRY_SYCC_601:
+ case DRM_MODE_COLORIMETRY_OPYCC_601:
+ case DRM_MODE_COLORIMETRY_BT2020_YCC:
+ case DRM_MODE_COLORIMETRY_BT2020_RGB:
+ case DRM_MODE_COLORIMETRY_BT2020_CYCC:
+ return true;
+ default:
+ break;
+ }
+
+ return false;
+}
+
static void
-intel_pixel_encoding_setup_vsc(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state)
+intel_dp_setup_vsc_sdp(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct dp_sdp vsc_sdp = {};
@@ -4441,13 +4560,55 @@ intel_pixel_encoding_setup_vsc(struct intel_dp *intel_dp,
*/
vsc_sdp.sdp_header.HB3 = 0x13;
- /*
- * YCbCr 420 = 3h DB16[7:4] ITU-R BT.601 = 0h, ITU-R BT.709 = 1h
- * DB16[3:0] DP 1.4a spec, Table 2-120
- */
- vsc_sdp.db[16] = 0x3 << 4; /* 0x3 << 4 , YCbCr 420*/
- /* RGB->YCBCR color conversion uses the BT.709 color space. */
- vsc_sdp.db[16] |= 0x1; /* 0x1, ITU-R BT.709 */
+ /* DP 1.4a spec, Table 2-120 */
+ switch (crtc_state->output_format) {
+ case INTEL_OUTPUT_FORMAT_YCBCR444:
+ vsc_sdp.db[16] = 0x1 << 4; /* YCbCr 444 : DB16[7:4] = 1h */
+ break;
+ case INTEL_OUTPUT_FORMAT_YCBCR420:
+ vsc_sdp.db[16] = 0x3 << 4; /* YCbCr 420 : DB16[7:4] = 3h */
+ break;
+ case INTEL_OUTPUT_FORMAT_RGB:
+ default:
+ /* RGB: DB16[7:4] = 0h */
+ break;
+ }
+
+ switch (conn_state->colorspace) {
+ case DRM_MODE_COLORIMETRY_BT709_YCC:
+ vsc_sdp.db[16] |= 0x1;
+ break;
+ case DRM_MODE_COLORIMETRY_XVYCC_601:
+ vsc_sdp.db[16] |= 0x2;
+ break;
+ case DRM_MODE_COLORIMETRY_XVYCC_709:
+ vsc_sdp.db[16] |= 0x3;
+ break;
+ case DRM_MODE_COLORIMETRY_SYCC_601:
+ vsc_sdp.db[16] |= 0x4;
+ break;
+ case DRM_MODE_COLORIMETRY_OPYCC_601:
+ vsc_sdp.db[16] |= 0x5;
+ break;
+ case DRM_MODE_COLORIMETRY_BT2020_CYCC:
+ case DRM_MODE_COLORIMETRY_BT2020_RGB:
+ vsc_sdp.db[16] |= 0x6;
+ break;
+ case DRM_MODE_COLORIMETRY_BT2020_YCC:
+ vsc_sdp.db[16] |= 0x7;
+ break;
+ case DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65:
+ case DRM_MODE_COLORIMETRY_DCI_P3_RGB_THEATER:
+ vsc_sdp.db[16] |= 0x4; /* DCI-P3 (SMPTE RP 431-2) */
+ break;
+ default:
+ /* sRGB (IEC 61966-2-1) / ITU-R BT.601: DB16[0:3] = 0h */
+
+ /* RGB->YCBCR color conversion uses the BT.709 color space. */
+ if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
+ vsc_sdp.db[16] |= 0x1; /* 0x1, ITU-R BT.709 */
+ break;
+ }
/*
* For pixel encoding formats YCbCr444, YCbCr422, YCbCr420, and Y Only,
@@ -4499,13 +4660,106 @@ intel_pixel_encoding_setup_vsc(struct intel_dp *intel_dp,
crtc_state, DP_SDP_VSC, &vsc_sdp, sizeof(vsc_sdp));
}
-void intel_dp_ycbcr_420_enable(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state)
+static void
+intel_dp_setup_hdr_metadata_infoframe_sdp(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
{
- if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_YCBCR420)
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct dp_sdp infoframe_sdp = {};
+ struct hdmi_drm_infoframe drm_infoframe = {};
+ const int infoframe_size = HDMI_INFOFRAME_HEADER_SIZE + HDMI_DRM_INFOFRAME_SIZE;
+ unsigned char buf[HDMI_INFOFRAME_HEADER_SIZE + HDMI_DRM_INFOFRAME_SIZE];
+ ssize_t len;
+ int ret;
+
+ ret = drm_hdmi_infoframe_set_hdr_metadata(&drm_infoframe, conn_state);
+ if (ret) {
+ DRM_DEBUG_KMS("couldn't set HDR metadata in infoframe\n");
return;
+ }
- intel_pixel_encoding_setup_vsc(intel_dp, crtc_state);
+ len = hdmi_drm_infoframe_pack_only(&drm_infoframe, buf, sizeof(buf));
+ if (len < 0) {
+ DRM_DEBUG_KMS("buffer size is smaller than hdr metadata infoframe\n");
+ return;
+ }
+
+ if (len != infoframe_size) {
+ DRM_DEBUG_KMS("wrong static hdr metadata size\n");
+ return;
+ }
+
+ /*
+ * Set up the infoframe sdp packet for HDR static metadata.
+ * Prepare VSC Header for SU as per DP 1.4a spec,
+ * Table 2-100 and Table 2-101
+ */
+
+ /* Packet ID, 00h for non-Audio INFOFRAME */
+ infoframe_sdp.sdp_header.HB0 = 0;
+ /*
+ * Packet Type 80h + Non-audio INFOFRAME Type value
+ * HDMI_INFOFRAME_TYPE_DRM: 0x87,
+ */
+ infoframe_sdp.sdp_header.HB1 = drm_infoframe.type;
+ /*
+ * Least Significant Eight Bits of (Data Byte Count – 1)
+ * infoframe_size - 1,
+ */
+ infoframe_sdp.sdp_header.HB2 = 0x1D;
+ /* INFOFRAME SDP Version Number */
+ infoframe_sdp.sdp_header.HB3 = (0x13 << 2);
+ /* CTA Header Byte 2 (INFOFRAME Version Number) */
+ infoframe_sdp.db[0] = drm_infoframe.version;
+ /* CTA Header Byte 3 (Length of INFOFRAME): HDMI_DRM_INFOFRAME_SIZE */
+ infoframe_sdp.db[1] = drm_infoframe.length;
+ /*
+ * Copy HDMI_DRM_INFOFRAME_SIZE size from a buffer after
+ * HDMI_INFOFRAME_HEADER_SIZE
+ */
+ BUILD_BUG_ON(sizeof(infoframe_sdp.db) < HDMI_DRM_INFOFRAME_SIZE + 2);
+ memcpy(&infoframe_sdp.db[2], &buf[HDMI_INFOFRAME_HEADER_SIZE],
+ HDMI_DRM_INFOFRAME_SIZE);
+
+ /*
+ * Size of DP infoframe sdp packet for HDR static metadata is consist of
+ * - DP SDP Header(struct dp_sdp_header): 4 bytes
+ * - Two Data Blocks: 2 bytes
+ * CTA Header Byte2 (INFOFRAME Version Number)
+ * CTA Header Byte3 (Length of INFOFRAME)
+ * - HDMI_DRM_INFOFRAME_SIZE: 26 bytes
+ *
+ * Prior to GEN11's GMP register size is identical to DP HDR static metadata
+ * infoframe size. But GEN11+ has larger than that size, write_infoframe
+ * will pad rest of the size.
+ */
+ intel_dig_port->write_infoframe(&intel_dig_port->base, crtc_state,
+ HDMI_PACKET_TYPE_GAMUT_METADATA,
+ &infoframe_sdp,
+ sizeof(struct dp_sdp_header) + 2 + HDMI_DRM_INFOFRAME_SIZE);
+}
+
+void intel_dp_vsc_enable(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ if (!intel_dp_needs_vsc_sdp(crtc_state, conn_state))
+ return;
+
+ intel_dp_setup_vsc_sdp(intel_dp, crtc_state, conn_state);
+}
+
+void intel_dp_hdr_metadata_enable(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ if (!conn_state->hdr_output_metadata)
+ return;
+
+ intel_dp_setup_hdr_metadata_infoframe_sdp(intel_dp,
+ crtc_state,
+ conn_state);
}
static u8 intel_dp_autotest_link_training(struct intel_dp *intel_dp)
@@ -5227,6 +5481,9 @@ static bool icl_combo_port_connected(struct drm_i915_private *dev_priv,
{
enum port port = intel_dig_port->base.port;
+ if (HAS_PCH_MCC(dev_priv) && port == PORT_C)
+ return I915_READ(SDEISR) & SDE_TC_HOTPLUG_ICP(PORT_TC1);
+
return I915_READ(SDEISR) & SDE_DDI_HOTPLUG_ICP(port);
}
@@ -5506,7 +5763,6 @@ static int
intel_dp_connector_register(struct drm_connector *connector)
{
struct intel_dp *intel_dp = intel_attached_dp(connector);
- struct drm_device *dev = connector->dev;
int ret;
ret = intel_connector_register(connector);
@@ -5521,8 +5777,7 @@ intel_dp_connector_register(struct drm_connector *connector)
intel_dp->aux.dev = connector->kdev;
ret = drm_dp_aux_register(&intel_dp->aux);
if (!ret)
- drm_dp_cec_register_connector(&intel_dp->aux,
- connector->name, dev->dev);
+ drm_dp_cec_register_connector(&intel_dp->aux, connector);
return ret;
}
@@ -6280,13 +6535,15 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
* would end up in an endless cycle of
* "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
*/
- DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
- port_name(intel_dig_port->base.port));
+ DRM_DEBUG_KMS("ignoring long hpd on eDP [ENCODER:%d:%s]\n",
+ intel_dig_port->base.base.base.id,
+ intel_dig_port->base.base.name);
return IRQ_HANDLED;
}
- DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
- port_name(intel_dig_port->base.port),
+ DRM_DEBUG_KMS("got hpd irq on [ENCODER:%d:%s] - %s\n",
+ intel_dig_port->base.base.base.id,
+ intel_dig_port->base.base.name,
long_hpd ? "long" : "short");
if (long_hpd) {
@@ -6353,6 +6610,13 @@ intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connect
else if (INTEL_GEN(dev_priv) >= 5)
drm_connector_attach_max_bpc_property(connector, 6, 12);
+ intel_attach_colorspace_property(connector);
+
+ if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 11)
+ drm_object_attach_property(&connector->base,
+ connector->dev->mode_config.hdr_output_metadata_property,
+ 0);
+
if (intel_dp_is_edp(intel_dp)) {
u32 allowed_scalers;
@@ -7150,8 +7414,9 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
intel_dp_modeset_retry_work_fn);
if (WARN(intel_dig_port->max_lanes < 1,
- "Not enough lanes (%d) for DP on port %c\n",
- intel_dig_port->max_lanes, port_name(port)))
+ "Not enough lanes (%d) for DP on [ENCODER:%d:%s]\n",
+ intel_dig_port->max_lanes, intel_encoder->base.base.id,
+ intel_encoder->base.name))
return false;
intel_dp_set_source_rates(intel_dp);
@@ -7192,9 +7457,9 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
port != PORT_B && port != PORT_C))
return false;
- DRM_DEBUG_KMS("Adding %s connector on port %c\n",
- type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
- port_name(port));
+ DRM_DEBUG_KMS("Adding %s connector on [ENCODER:%d:%s]\n",
+ type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
+ intel_encoder->base.base.id, intel_encoder->base.name);
drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
@@ -7218,11 +7483,8 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
intel_connector->get_hw_state = intel_connector_get_hw_state;
/* init MST on ports that can support it */
- if (HAS_DP_MST(dev_priv) && !intel_dp_is_edp(intel_dp) &&
- (port == PORT_B || port == PORT_C ||
- port == PORT_D || port == PORT_F))
- intel_dp_mst_encoder_init(intel_dig_port,
- intel_connector->base.base.id);
+ intel_dp_mst_encoder_init(intel_dig_port,
+ intel_connector->base.base.id);
if (!intel_edp_init_connector(intel_dp, intel_connector)) {
intel_dp_aux_fini(intel_dp);
@@ -7313,11 +7575,11 @@ bool intel_dp_init(struct drm_i915_private *dev_priv,
intel_encoder->power_domain = intel_port_to_power_domain(port);
if (IS_CHERRYVIEW(dev_priv)) {
if (port == PORT_D)
- intel_encoder->crtc_mask = 1 << 2;
+ intel_encoder->pipe_mask = BIT(PIPE_C);
else
- intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
+ intel_encoder->pipe_mask = BIT(PIPE_A) | BIT(PIPE_B);
} else {
- intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
+ intel_encoder->pipe_mask = ~0;
}
intel_encoder->cloneable = 0;
intel_encoder->port = port;
@@ -7378,7 +7640,8 @@ void intel_dp_mst_resume(struct drm_i915_private *dev_priv)
if (!intel_dp->can_mst)
continue;
- ret = drm_dp_mst_topology_mgr_resume(&intel_dp->mst_mgr);
+ ret = drm_dp_mst_topology_mgr_resume(&intel_dp->mst_mgr,
+ true);
if (ret) {
intel_dp->is_mst = false;
drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h
index 00981fb9414b..3da166054788 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.h
+++ b/drivers/gpu/drm/i915/display/intel_dp.h
@@ -13,6 +13,7 @@
#include "i915_reg.h"
enum pipe;
+enum port;
struct drm_connector_state;
struct drm_encoder;
struct drm_i915_private;
@@ -107,6 +108,14 @@ bool intel_dp_read_dpcd(struct intel_dp *intel_dp);
bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp);
int intel_dp_link_required(int pixel_clock, int bpp);
int intel_dp_max_data_rate(int max_link_clock, int max_lanes);
+bool intel_dp_needs_vsc_sdp(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state);
+void intel_dp_vsc_enable(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state);
+void intel_dp_hdr_metadata_enable(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state);
bool intel_digital_port_connected(struct intel_encoder *encoder);
static inline unsigned int intel_dp_unused_lane_mask(int lane_count)
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index 600873c796d0..03d1cba0b696 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -215,7 +215,7 @@ static void intel_mst_disable_dp(struct intel_encoder *encoder,
ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr);
if (ret) {
- DRM_ERROR("failed to update payload %d\n", ret);
+ DRM_DEBUG_KMS("failed to update payload %d\n", ret);
}
if (old_crtc_state->has_audio)
intel_audio_codec_disable(encoder,
@@ -295,7 +295,6 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
struct intel_digital_port *intel_dig_port = intel_mst->primary;
struct intel_dp *intel_dp = &intel_dig_port->dp;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum port port = intel_dig_port->base.port;
struct intel_connector *connector =
to_intel_connector(conn_state->connector);
int ret;
@@ -326,12 +325,14 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
DRM_ERROR("failed to allocate vcpi\n");
intel_dp->active_mst_links++;
- temp = I915_READ(DP_TP_STATUS(port));
- I915_WRITE(DP_TP_STATUS(port), temp);
+ temp = I915_READ(intel_dp->regs.dp_tp_status);
+ I915_WRITE(intel_dp->regs.dp_tp_status, temp);
ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr);
intel_ddi_enable_pipe_clock(pipe_config);
+
+ intel_ddi_set_dp_msa(pipe_config, conn_state);
}
static void intel_mst_enable_dp(struct intel_encoder *encoder,
@@ -342,11 +343,10 @@ static void intel_mst_enable_dp(struct intel_encoder *encoder,
struct intel_digital_port *intel_dig_port = intel_mst->primary;
struct intel_dp *intel_dp = &intel_dig_port->dp;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum port port = intel_dig_port->base.port;
DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links);
- if (intel_de_wait_for_set(dev_priv, DP_TP_STATUS(port),
+ if (intel_de_wait_for_set(dev_priv, intel_dp->regs.dp_tp_status,
DP_TP_STATUS_ACT_SENT, 1))
DRM_ERROR("Timed out waiting for ACT sent\n");
@@ -393,20 +393,7 @@ static int intel_dp_mst_get_ddc_modes(struct drm_connector *connector)
return ret;
}
-static enum drm_connector_status
-intel_dp_mst_detect(struct drm_connector *connector, bool force)
-{
- struct intel_connector *intel_connector = to_intel_connector(connector);
- struct intel_dp *intel_dp = intel_connector->mst_port;
-
- if (drm_connector_is_unregistered(connector))
- return connector_status_disconnected;
- return drm_dp_mst_detect_port(connector, &intel_dp->mst_mgr,
- intel_connector->port);
-}
-
static const struct drm_connector_funcs intel_dp_mst_connector_funcs = {
- .detect = intel_dp_mst_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.atomic_get_property = intel_digital_connector_atomic_get_property,
.atomic_set_property = intel_digital_connector_atomic_set_property,
@@ -426,6 +413,7 @@ static enum drm_mode_status
intel_dp_mst_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
+ struct drm_i915_private *dev_priv = to_i915(connector->dev);
struct intel_connector *intel_connector = to_intel_connector(connector);
struct intel_dp *intel_dp = intel_connector->mst_port;
int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
@@ -453,7 +441,7 @@ intel_dp_mst_mode_valid(struct drm_connector *connector,
if (mode_rate > max_rate || mode->clock > max_dotclk)
return MODE_CLOCK_HIGH;
- return MODE_OK;
+ return intel_mode_valid_max_plane_size(dev_priv, mode);
}
static struct drm_encoder *intel_mst_atomic_best_encoder(struct drm_connector *connector,
@@ -466,11 +454,26 @@ static struct drm_encoder *intel_mst_atomic_best_encoder(struct drm_connector *c
return &intel_dp->mst_encoders[crtc->pipe]->base.base;
}
+static int
+intel_dp_mst_detect(struct drm_connector *connector,
+ struct drm_modeset_acquire_ctx *ctx, bool force)
+{
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_dp *intel_dp = intel_connector->mst_port;
+
+ if (drm_connector_is_unregistered(connector))
+ return connector_status_disconnected;
+
+ return drm_dp_mst_detect_port(connector, ctx, &intel_dp->mst_mgr,
+ intel_connector->port);
+}
+
static const struct drm_connector_helper_funcs intel_dp_mst_connector_helper_funcs = {
.get_modes = intel_dp_mst_get_modes,
.mode_valid = intel_dp_mst_mode_valid,
.atomic_best_encoder = intel_mst_atomic_best_encoder,
.atomic_check = intel_dp_mst_atomic_check,
+ .detect_ctx = intel_dp_mst_detect,
};
static void intel_dp_mst_encoder_destroy(struct drm_encoder *encoder)
@@ -615,8 +618,16 @@ intel_dp_create_fake_mst_encoder(struct intel_digital_port *intel_dig_port, enum
intel_encoder->type = INTEL_OUTPUT_DP_MST;
intel_encoder->power_domain = intel_dig_port->base.power_domain;
intel_encoder->port = intel_dig_port->base.port;
- intel_encoder->crtc_mask = 0x7;
intel_encoder->cloneable = 0;
+ /*
+ * This is wrong, but broken userspace uses the intersection
+ * of possible_crtcs of all the encoders of a given connector
+ * to figure out which crtcs can drive said connector. What
+ * should be used instead is the union of possible_crtcs.
+ * To keep such userspace functioning we must misconfigure
+ * this to make sure the intersection is not empty :(
+ */
+ intel_encoder->pipe_mask = ~0;
intel_encoder->compute_config = intel_dp_mst_compute_config;
intel_encoder->disable = intel_mst_disable_dp;
@@ -653,21 +664,31 @@ intel_dp_mst_encoder_active_links(struct intel_digital_port *intel_dig_port)
int
intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_base_id)
{
+ struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
struct intel_dp *intel_dp = &intel_dig_port->dp;
- struct drm_device *dev = intel_dig_port->base.base.dev;
+ enum port port = intel_dig_port->base.port;
int ret;
- intel_dp->can_mst = true;
+ if (!HAS_DP_MST(i915) || intel_dp_is_edp(intel_dp))
+ return 0;
+
+ if (INTEL_GEN(i915) < 12 && port == PORT_A)
+ return 0;
+
+ if (INTEL_GEN(i915) < 11 && port == PORT_E)
+ return 0;
+
intel_dp->mst_mgr.cbs = &mst_cbs;
/* create encoders */
intel_dp_create_fake_mst_encoders(intel_dig_port);
- ret = drm_dp_mst_topology_mgr_init(&intel_dp->mst_mgr, dev,
+ ret = drm_dp_mst_topology_mgr_init(&intel_dp->mst_mgr, &i915->drm,
&intel_dp->aux, 16, 3, conn_base_id);
- if (ret) {
- intel_dp->can_mst = false;
+ if (ret)
return ret;
- }
+
+ intel_dp->can_mst = true;
+
return 0;
}
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
index d5a298c3c83b..3ce0a023eee0 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
@@ -247,8 +247,7 @@ static struct intel_shared_dpll *
intel_find_shared_dpll(struct intel_atomic_state *state,
const struct intel_crtc *crtc,
const struct intel_dpll_hw_state *pll_state,
- enum intel_dpll_id range_min,
- enum intel_dpll_id range_max)
+ unsigned long dpll_mask)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_shared_dpll *pll, *unused_pll = NULL;
@@ -257,7 +256,9 @@ intel_find_shared_dpll(struct intel_atomic_state *state,
shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
- for (i = range_min; i <= range_max; i++) {
+ WARN_ON(dpll_mask & ~(BIT(I915_NUM_PLLS) - 1));
+
+ for_each_set_bit(i, &dpll_mask, I915_NUM_PLLS) {
pll = &dev_priv->shared_dplls[i];
/* Only want to check enabled timings first */
@@ -464,8 +465,8 @@ static bool ibx_get_dpll(struct intel_atomic_state *state,
} else {
pll = intel_find_shared_dpll(state, crtc,
&crtc_state->dpll_hw_state,
- DPLL_ID_PCH_PLL_A,
- DPLL_ID_PCH_PLL_B);
+ BIT(DPLL_ID_PCH_PLL_B) |
+ BIT(DPLL_ID_PCH_PLL_A));
}
if (!pll)
@@ -829,7 +830,8 @@ hsw_ddi_hdmi_get_dpll(struct intel_atomic_state *state,
pll = intel_find_shared_dpll(state, crtc,
&crtc_state->dpll_hw_state,
- DPLL_ID_WRPLL1, DPLL_ID_WRPLL2);
+ BIT(DPLL_ID_WRPLL2) |
+ BIT(DPLL_ID_WRPLL1));
if (!pll)
return NULL;
@@ -892,7 +894,7 @@ static bool hsw_get_dpll(struct intel_atomic_state *state,
pll = intel_find_shared_dpll(state, crtc,
&crtc_state->dpll_hw_state,
- DPLL_ID_SPLL, DPLL_ID_SPLL);
+ BIT(DPLL_ID_SPLL));
} else {
return false;
}
@@ -1462,13 +1464,13 @@ static bool skl_get_dpll(struct intel_atomic_state *state,
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
pll = intel_find_shared_dpll(state, crtc,
&crtc_state->dpll_hw_state,
- DPLL_ID_SKL_DPLL0,
- DPLL_ID_SKL_DPLL0);
+ BIT(DPLL_ID_SKL_DPLL0));
else
pll = intel_find_shared_dpll(state, crtc,
&crtc_state->dpll_hw_state,
- DPLL_ID_SKL_DPLL1,
- DPLL_ID_SKL_DPLL3);
+ BIT(DPLL_ID_SKL_DPLL3) |
+ BIT(DPLL_ID_SKL_DPLL2) |
+ BIT(DPLL_ID_SKL_DPLL1));
if (!pll)
return false;
@@ -2416,8 +2418,9 @@ static bool cnl_get_dpll(struct intel_atomic_state *state,
pll = intel_find_shared_dpll(state, crtc,
&crtc_state->dpll_hw_state,
- DPLL_ID_SKL_DPLL0,
- DPLL_ID_SKL_DPLL2);
+ BIT(DPLL_ID_SKL_DPLL2) |
+ BIT(DPLL_ID_SKL_DPLL1) |
+ BIT(DPLL_ID_SKL_DPLL0));
if (!pll) {
DRM_DEBUG_KMS("No PLL selected\n");
return false;
@@ -2535,6 +2538,18 @@ static const struct skl_wrpll_params icl_tbt_pll_19_2MHz_values = {
.pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
};
+static const struct skl_wrpll_params tgl_tbt_pll_19_2MHz_values = {
+ .dco_integer = 0x54, .dco_fraction = 0x3000,
+ /* the following params are unused */
+ .pdiv = 0, .kdiv = 0, .qdiv_mode = 0, .qdiv_ratio = 0,
+};
+
+static const struct skl_wrpll_params tgl_tbt_pll_24MHz_values = {
+ .dco_integer = 0x43, .dco_fraction = 0x4000,
+ /* the following params are unused */
+ .pdiv = 0, .kdiv = 0, .qdiv_mode = 0, .qdiv_ratio = 0,
+};
+
static bool icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state,
struct skl_wrpll_params *pll_params)
{
@@ -2562,8 +2577,34 @@ static bool icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
- *pll_params = dev_priv->cdclk.hw.ref == 24000 ?
- icl_tbt_pll_24MHz_values : icl_tbt_pll_19_2MHz_values;
+ if (INTEL_GEN(dev_priv) >= 12) {
+ switch (dev_priv->cdclk.hw.ref) {
+ default:
+ MISSING_CASE(dev_priv->cdclk.hw.ref);
+ /* fall-through */
+ case 19200:
+ case 38400:
+ *pll_params = tgl_tbt_pll_19_2MHz_values;
+ break;
+ case 24000:
+ *pll_params = tgl_tbt_pll_24MHz_values;
+ break;
+ }
+ } else {
+ switch (dev_priv->cdclk.hw.ref) {
+ default:
+ MISSING_CASE(dev_priv->cdclk.hw.ref);
+ /* fall-through */
+ case 19200:
+ case 38400:
+ *pll_params = icl_tbt_pll_19_2MHz_values;
+ break;
+ case 24000:
+ *pll_params = icl_tbt_pll_24MHz_values;
+ break;
+ }
+ }
+
return true;
}
@@ -2622,7 +2663,8 @@ enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port)
static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
u32 *target_dco_khz,
- struct intel_dpll_hw_state *state)
+ struct intel_dpll_hw_state *state,
+ bool is_dkl)
{
u32 dco_min_freq, dco_max_freq;
int div1_vals[] = {7, 5, 3, 2};
@@ -2644,8 +2686,13 @@ static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
continue;
if (div2 >= 2) {
+ /*
+ * Note: a_divratio not matching TGL BSpec
+ * algorithm but matching hardcoded values and
+ * working on HW for DP alt-mode at least
+ */
a_divratio = is_dp ? 10 : 5;
- tlinedrv = 2;
+ tlinedrv = is_dkl ? 1 : 2;
} else {
a_divratio = 5;
tlinedrv = 0;
@@ -2708,11 +2755,12 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
u64 tmp;
bool use_ssc = false;
bool is_dp = !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI);
+ bool is_dkl = INTEL_GEN(dev_priv) >= 12;
memset(pll_state, 0, sizeof(*pll_state));
if (!icl_mg_pll_find_divisors(clock, is_dp, use_ssc, &dco_khz,
- pll_state)) {
+ pll_state, is_dkl)) {
DRM_DEBUG_KMS("Failed to find divisors for clock %d\n", clock);
return false;
}
@@ -2720,8 +2768,11 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
m1div = 2;
m2div_int = dco_khz / (refclk_khz * m1div);
if (m2div_int > 255) {
- m1div = 4;
- m2div_int = dco_khz / (refclk_khz * m1div);
+ if (!is_dkl) {
+ m1div = 4;
+ m2div_int = dco_khz / (refclk_khz * m1div);
+ }
+
if (m2div_int > 255) {
DRM_DEBUG_KMS("Failed to find mdiv for clock %d\n",
clock);
@@ -2801,60 +2852,94 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
}
ssc_steplog = 4;
- pll_state->mg_pll_div0 = (m2div_rem > 0 ? MG_PLL_DIV0_FRACNEN_H : 0) |
- MG_PLL_DIV0_FBDIV_FRAC(m2div_frac) |
- MG_PLL_DIV0_FBDIV_INT(m2div_int);
-
- pll_state->mg_pll_div1 = MG_PLL_DIV1_IREF_NDIVRATIO(iref_ndiv) |
- MG_PLL_DIV1_DITHER_DIV_2 |
- MG_PLL_DIV1_NDIVRATIO(1) |
- MG_PLL_DIV1_FBPREDIV(m1div);
-
- pll_state->mg_pll_lf = MG_PLL_LF_TDCTARGETCNT(tdc_targetcnt) |
- MG_PLL_LF_AFCCNTSEL_512 |
- MG_PLL_LF_GAINCTRL(1) |
- MG_PLL_LF_INT_COEFF(int_coeff) |
- MG_PLL_LF_PROP_COEFF(prop_coeff);
-
- pll_state->mg_pll_frac_lock = MG_PLL_FRAC_LOCK_TRUELOCK_CRIT_32 |
- MG_PLL_FRAC_LOCK_EARLYLOCK_CRIT_32 |
- MG_PLL_FRAC_LOCK_LOCKTHRESH(10) |
- MG_PLL_FRAC_LOCK_DCODITHEREN |
- MG_PLL_FRAC_LOCK_FEEDFWRDGAIN(feedfwgain);
- if (use_ssc || m2div_rem > 0)
- pll_state->mg_pll_frac_lock |= MG_PLL_FRAC_LOCK_FEEDFWRDCAL_EN;
-
- pll_state->mg_pll_ssc = (use_ssc ? MG_PLL_SSC_EN : 0) |
- MG_PLL_SSC_TYPE(2) |
- MG_PLL_SSC_STEPLENGTH(ssc_steplen) |
- MG_PLL_SSC_STEPNUM(ssc_steplog) |
- MG_PLL_SSC_FLLEN |
- MG_PLL_SSC_STEPSIZE(ssc_stepsize);
-
- pll_state->mg_pll_tdc_coldst_bias = MG_PLL_TDC_COLDST_COLDSTART |
- MG_PLL_TDC_COLDST_IREFINT_EN |
- MG_PLL_TDC_COLDST_REFBIAS_START_PULSE_W(iref_pulse_w) |
- MG_PLL_TDC_TDCOVCCORR_EN |
- MG_PLL_TDC_TDCSEL(3);
-
- pll_state->mg_pll_bias = MG_PLL_BIAS_BIAS_GB_SEL(3) |
- MG_PLL_BIAS_INIT_DCOAMP(0x3F) |
- MG_PLL_BIAS_BIAS_BONUS(10) |
- MG_PLL_BIAS_BIASCAL_EN |
- MG_PLL_BIAS_CTRIM(12) |
- MG_PLL_BIAS_VREF_RDAC(4) |
- MG_PLL_BIAS_IREFTRIM(iref_trim);
-
- if (refclk_khz == 38400) {
- pll_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
- pll_state->mg_pll_bias_mask = 0;
+ /* write pll_state calculations */
+ if (is_dkl) {
+ pll_state->mg_pll_div0 = DKL_PLL_DIV0_INTEG_COEFF(int_coeff) |
+ DKL_PLL_DIV0_PROP_COEFF(prop_coeff) |
+ DKL_PLL_DIV0_FBPREDIV(m1div) |
+ DKL_PLL_DIV0_FBDIV_INT(m2div_int);
+
+ pll_state->mg_pll_div1 = DKL_PLL_DIV1_IREF_TRIM(iref_trim) |
+ DKL_PLL_DIV1_TDC_TARGET_CNT(tdc_targetcnt);
+
+ pll_state->mg_pll_ssc = DKL_PLL_SSC_IREF_NDIV_RATIO(iref_ndiv) |
+ DKL_PLL_SSC_STEP_LEN(ssc_steplen) |
+ DKL_PLL_SSC_STEP_NUM(ssc_steplog) |
+ (use_ssc ? DKL_PLL_SSC_EN : 0);
+
+ pll_state->mg_pll_bias = (m2div_frac ? DKL_PLL_BIAS_FRAC_EN_H : 0) |
+ DKL_PLL_BIAS_FBDIV_FRAC(m2div_frac);
+
+ pll_state->mg_pll_tdc_coldst_bias =
+ DKL_PLL_TDC_SSC_STEP_SIZE(ssc_stepsize) |
+ DKL_PLL_TDC_FEED_FWD_GAIN(feedfwgain);
+
} else {
- pll_state->mg_pll_tdc_coldst_bias_mask = -1U;
- pll_state->mg_pll_bias_mask = -1U;
- }
+ pll_state->mg_pll_div0 =
+ (m2div_rem > 0 ? MG_PLL_DIV0_FRACNEN_H : 0) |
+ MG_PLL_DIV0_FBDIV_FRAC(m2div_frac) |
+ MG_PLL_DIV0_FBDIV_INT(m2div_int);
+
+ pll_state->mg_pll_div1 =
+ MG_PLL_DIV1_IREF_NDIVRATIO(iref_ndiv) |
+ MG_PLL_DIV1_DITHER_DIV_2 |
+ MG_PLL_DIV1_NDIVRATIO(1) |
+ MG_PLL_DIV1_FBPREDIV(m1div);
+
+ pll_state->mg_pll_lf =
+ MG_PLL_LF_TDCTARGETCNT(tdc_targetcnt) |
+ MG_PLL_LF_AFCCNTSEL_512 |
+ MG_PLL_LF_GAINCTRL(1) |
+ MG_PLL_LF_INT_COEFF(int_coeff) |
+ MG_PLL_LF_PROP_COEFF(prop_coeff);
+
+ pll_state->mg_pll_frac_lock =
+ MG_PLL_FRAC_LOCK_TRUELOCK_CRIT_32 |
+ MG_PLL_FRAC_LOCK_EARLYLOCK_CRIT_32 |
+ MG_PLL_FRAC_LOCK_LOCKTHRESH(10) |
+ MG_PLL_FRAC_LOCK_DCODITHEREN |
+ MG_PLL_FRAC_LOCK_FEEDFWRDGAIN(feedfwgain);
+ if (use_ssc || m2div_rem > 0)
+ pll_state->mg_pll_frac_lock |=
+ MG_PLL_FRAC_LOCK_FEEDFWRDCAL_EN;
+
+ pll_state->mg_pll_ssc =
+ (use_ssc ? MG_PLL_SSC_EN : 0) |
+ MG_PLL_SSC_TYPE(2) |
+ MG_PLL_SSC_STEPLENGTH(ssc_steplen) |
+ MG_PLL_SSC_STEPNUM(ssc_steplog) |
+ MG_PLL_SSC_FLLEN |
+ MG_PLL_SSC_STEPSIZE(ssc_stepsize);
+
+ pll_state->mg_pll_tdc_coldst_bias =
+ MG_PLL_TDC_COLDST_COLDSTART |
+ MG_PLL_TDC_COLDST_IREFINT_EN |
+ MG_PLL_TDC_COLDST_REFBIAS_START_PULSE_W(iref_pulse_w) |
+ MG_PLL_TDC_TDCOVCCORR_EN |
+ MG_PLL_TDC_TDCSEL(3);
+
+ pll_state->mg_pll_bias =
+ MG_PLL_BIAS_BIAS_GB_SEL(3) |
+ MG_PLL_BIAS_INIT_DCOAMP(0x3F) |
+ MG_PLL_BIAS_BIAS_BONUS(10) |
+ MG_PLL_BIAS_BIASCAL_EN |
+ MG_PLL_BIAS_CTRIM(12) |
+ MG_PLL_BIAS_VREF_RDAC(4) |
+ MG_PLL_BIAS_IREFTRIM(iref_trim);
+
+ if (refclk_khz == 38400) {
+ pll_state->mg_pll_tdc_coldst_bias_mask =
+ MG_PLL_TDC_COLDST_COLDSTART;
+ pll_state->mg_pll_bias_mask = 0;
+ } else {
+ pll_state->mg_pll_tdc_coldst_bias_mask = -1U;
+ pll_state->mg_pll_bias_mask = -1U;
+ }
- pll_state->mg_pll_tdc_coldst_bias &= pll_state->mg_pll_tdc_coldst_bias_mask;
- pll_state->mg_pll_bias &= pll_state->mg_pll_bias_mask;
+ pll_state->mg_pll_tdc_coldst_bias &=
+ pll_state->mg_pll_tdc_coldst_bias_mask;
+ pll_state->mg_pll_bias &= pll_state->mg_pll_bias_mask;
+ }
return true;
}
@@ -2908,7 +2993,7 @@ static bool icl_get_combo_phy_dpll(struct intel_atomic_state *state,
&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum port port = encoder->port;
- bool has_dpll4 = false;
+ unsigned long dpll_mask;
if (!icl_calc_dpll_state(crtc_state, encoder, &port_dpll->hw_state)) {
DRM_DEBUG_KMS("Could not calculate combo PHY PLL state.\n");
@@ -2917,16 +3002,19 @@ static bool icl_get_combo_phy_dpll(struct intel_atomic_state *state,
}
if (IS_ELKHARTLAKE(dev_priv) && port != PORT_A)
- has_dpll4 = true;
+ dpll_mask =
+ BIT(DPLL_ID_EHL_DPLL4) |
+ BIT(DPLL_ID_ICL_DPLL1) |
+ BIT(DPLL_ID_ICL_DPLL0);
+ else
+ dpll_mask = BIT(DPLL_ID_ICL_DPLL1) | BIT(DPLL_ID_ICL_DPLL0);
port_dpll->pll = intel_find_shared_dpll(state, crtc,
&port_dpll->hw_state,
- DPLL_ID_ICL_DPLL0,
- has_dpll4 ? DPLL_ID_EHL_DPLL4
- : DPLL_ID_ICL_DPLL1);
+ dpll_mask);
if (!port_dpll->pll) {
- DRM_DEBUG_KMS("No combo PHY PLL found for port %c\n",
- port_name(encoder->port));
+ DRM_DEBUG_KMS("No combo PHY PLL found for [ENCODER:%d:%s]\n",
+ encoder->base.base.id, encoder->base.name);
return false;
}
@@ -2956,8 +3044,7 @@ static bool icl_get_tc_phy_dplls(struct intel_atomic_state *state,
port_dpll->pll = intel_find_shared_dpll(state, crtc,
&port_dpll->hw_state,
- DPLL_ID_ICL_TBTPLL,
- DPLL_ID_ICL_TBTPLL);
+ BIT(DPLL_ID_ICL_TBTPLL));
if (!port_dpll->pll) {
DRM_DEBUG_KMS("No TBT-ALT PLL found\n");
return false;
@@ -2976,8 +3063,7 @@ static bool icl_get_tc_phy_dplls(struct intel_atomic_state *state,
encoder->port));
port_dpll->pll = intel_find_shared_dpll(state, crtc,
&port_dpll->hw_state,
- dpll_id,
- dpll_id);
+ BIT(dpll_id));
if (!port_dpll->pll) {
DRM_DEBUG_KMS("No MG PHY PLL found\n");
goto err_unreference_tbt_pll;
@@ -3101,6 +3187,78 @@ out:
return ret;
}
+static bool dkl_pll_get_hw_state(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll,
+ struct intel_dpll_hw_state *hw_state)
+{
+ const enum intel_dpll_id id = pll->info->id;
+ enum tc_port tc_port = icl_pll_id_to_tc_port(id);
+ intel_wakeref_t wakeref;
+ bool ret = false;
+ u32 val;
+
+ wakeref = intel_display_power_get_if_enabled(dev_priv,
+ POWER_DOMAIN_DISPLAY_CORE);
+ if (!wakeref)
+ return false;
+
+ val = I915_READ(MG_PLL_ENABLE(tc_port));
+ if (!(val & PLL_ENABLE))
+ goto out;
+
+ /*
+ * All registers read here have the same HIP_INDEX_REG even though
+ * they are on different building blocks
+ */
+ I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, 0x2));
+
+ hw_state->mg_refclkin_ctl = I915_READ(DKL_REFCLKIN_CTL(tc_port));
+ hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
+
+ hw_state->mg_clktop2_hsclkctl =
+ I915_READ(DKL_CLKTOP2_HSCLKCTL(tc_port));
+ hw_state->mg_clktop2_hsclkctl &=
+ MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
+ MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
+ MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
+ MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
+
+ hw_state->mg_clktop2_coreclkctl1 =
+ I915_READ(DKL_CLKTOP2_CORECLKCTL1(tc_port));
+ hw_state->mg_clktop2_coreclkctl1 &=
+ MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
+
+ hw_state->mg_pll_div0 = I915_READ(DKL_PLL_DIV0(tc_port));
+ hw_state->mg_pll_div0 &= (DKL_PLL_DIV0_INTEG_COEFF_MASK |
+ DKL_PLL_DIV0_PROP_COEFF_MASK |
+ DKL_PLL_DIV0_FBPREDIV_MASK |
+ DKL_PLL_DIV0_FBDIV_INT_MASK);
+
+ hw_state->mg_pll_div1 = I915_READ(DKL_PLL_DIV1(tc_port));
+ hw_state->mg_pll_div1 &= (DKL_PLL_DIV1_IREF_TRIM_MASK |
+ DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
+
+ hw_state->mg_pll_ssc = I915_READ(DKL_PLL_SSC(tc_port));
+ hw_state->mg_pll_ssc &= (DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
+ DKL_PLL_SSC_STEP_LEN_MASK |
+ DKL_PLL_SSC_STEP_NUM_MASK |
+ DKL_PLL_SSC_EN);
+
+ hw_state->mg_pll_bias = I915_READ(DKL_PLL_BIAS(tc_port));
+ hw_state->mg_pll_bias &= (DKL_PLL_BIAS_FRAC_EN_H |
+ DKL_PLL_BIAS_FBDIV_FRAC_MASK);
+
+ hw_state->mg_pll_tdc_coldst_bias =
+ I915_READ(DKL_PLL_TDC_COLDST_BIAS(tc_port));
+ hw_state->mg_pll_tdc_coldst_bias &= (DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
+ DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
+
+ ret = true;
+out:
+ intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
+ return ret;
+}
+
static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll,
struct intel_dpll_hw_state *hw_state,
@@ -3235,6 +3393,75 @@ static void icl_mg_pll_write(struct drm_i915_private *dev_priv,
POSTING_READ(MG_PLL_TDC_COLDST_BIAS(tc_port));
}
+static void dkl_pll_write(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll)
+{
+ struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
+ enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
+ u32 val;
+
+ /*
+ * All registers programmed here have the same HIP_INDEX_REG even
+ * though on different building block
+ */
+ I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, 0x2));
+
+ /* All the registers are RMW */
+ val = I915_READ(DKL_REFCLKIN_CTL(tc_port));
+ val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
+ val |= hw_state->mg_refclkin_ctl;
+ I915_WRITE(DKL_REFCLKIN_CTL(tc_port), val);
+
+ val = I915_READ(DKL_CLKTOP2_CORECLKCTL1(tc_port));
+ val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
+ val |= hw_state->mg_clktop2_coreclkctl1;
+ I915_WRITE(DKL_CLKTOP2_CORECLKCTL1(tc_port), val);
+
+ val = I915_READ(DKL_CLKTOP2_HSCLKCTL(tc_port));
+ val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
+ MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
+ MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
+ MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
+ val |= hw_state->mg_clktop2_hsclkctl;
+ I915_WRITE(DKL_CLKTOP2_HSCLKCTL(tc_port), val);
+
+ val = I915_READ(DKL_PLL_DIV0(tc_port));
+ val &= ~(DKL_PLL_DIV0_INTEG_COEFF_MASK |
+ DKL_PLL_DIV0_PROP_COEFF_MASK |
+ DKL_PLL_DIV0_FBPREDIV_MASK |
+ DKL_PLL_DIV0_FBDIV_INT_MASK);
+ val |= hw_state->mg_pll_div0;
+ I915_WRITE(DKL_PLL_DIV0(tc_port), val);
+
+ val = I915_READ(DKL_PLL_DIV1(tc_port));
+ val &= ~(DKL_PLL_DIV1_IREF_TRIM_MASK |
+ DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
+ val |= hw_state->mg_pll_div1;
+ I915_WRITE(DKL_PLL_DIV1(tc_port), val);
+
+ val = I915_READ(DKL_PLL_SSC(tc_port));
+ val &= ~(DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
+ DKL_PLL_SSC_STEP_LEN_MASK |
+ DKL_PLL_SSC_STEP_NUM_MASK |
+ DKL_PLL_SSC_EN);
+ val |= hw_state->mg_pll_ssc;
+ I915_WRITE(DKL_PLL_SSC(tc_port), val);
+
+ val = I915_READ(DKL_PLL_BIAS(tc_port));
+ val &= ~(DKL_PLL_BIAS_FRAC_EN_H |
+ DKL_PLL_BIAS_FBDIV_FRAC_MASK);
+ val |= hw_state->mg_pll_bias;
+ I915_WRITE(DKL_PLL_BIAS(tc_port), val);
+
+ val = I915_READ(DKL_PLL_TDC_COLDST_BIAS(tc_port));
+ val &= ~(DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
+ DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
+ val |= hw_state->mg_pll_tdc_coldst_bias;
+ I915_WRITE(DKL_PLL_TDC_COLDST_BIAS(tc_port), val);
+
+ POSTING_READ(DKL_PLL_TDC_COLDST_BIAS(tc_port));
+}
+
static void icl_pll_power_enable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll,
i915_reg_t enable_reg)
@@ -3327,7 +3554,10 @@ static void mg_pll_enable(struct drm_i915_private *dev_priv,
icl_pll_power_enable(dev_priv, pll, enable_reg);
- icl_mg_pll_write(dev_priv, pll);
+ if (INTEL_GEN(dev_priv) >= 12)
+ dkl_pll_write(dev_priv, pll);
+ else
+ icl_mg_pll_write(dev_priv, pll);
/*
* DVFS pre sequence would be here, but in our driver the cdclk code
@@ -3482,11 +3712,22 @@ static const struct intel_dpll_mgr ehl_pll_mgr = {
.dump_hw_state = icl_dump_hw_state,
};
+static const struct intel_shared_dpll_funcs dkl_pll_funcs = {
+ .enable = mg_pll_enable,
+ .disable = mg_pll_disable,
+ .get_hw_state = dkl_pll_get_hw_state,
+};
+
static const struct dpll_info tgl_plls[] = {
{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
{ "TBT PLL", &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
- /* TODO: Add typeC plls */
+ { "TC PLL 1", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
+ { "TC PLL 2", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
+ { "TC PLL 3", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
+ { "TC PLL 4", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
+ { "TC PLL 5", &dkl_pll_funcs, DPLL_ID_TGL_MGPLL5, 0 },
+ { "TC PLL 6", &dkl_pll_funcs, DPLL_ID_TGL_MGPLL6, 0 },
{ },
};
@@ -3494,6 +3735,7 @@ static const struct intel_dpll_mgr tgl_pll_mgr = {
.dpll_info = tgl_plls,
.get_dplls = icl_get_dplls,
.put_dplls = icl_put_dplls,
+ .update_active_dpll = icl_update_active_dpll,
.dump_hw_state = icl_dump_hw_state,
};
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
index 104cf6d42333..2a104c64291d 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
@@ -337,6 +337,11 @@ struct intel_shared_dpll {
* @info: platform specific info
*/
const struct dpll_info *info;
+
+ /**
+ * @wakeref: In some platforms a device-level runtime pm reference may
+ * need to be grabbed to disable DC states while this DPLL is enabled
+ */
intel_wakeref_t wakeref;
};
diff --git a/drivers/gpu/drm/i915/display/intel_dsb.c b/drivers/gpu/drm/i915/display/intel_dsb.c
new file mode 100644
index 000000000000..bb5a0e91b370
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dsb.c
@@ -0,0 +1,332 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ *
+ */
+
+#include "i915_drv.h"
+#include "intel_display_types.h"
+
+#define DSB_BUF_SIZE (2 * PAGE_SIZE)
+
+/**
+ * DOC: DSB
+ *
+ * A DSB (Display State Buffer) is a queue of MMIO instructions in the memory
+ * which can be offloaded to DSB HW in Display Controller. DSB HW is a DMA
+ * engine that can be programmed to download the DSB from memory.
+ * It allows driver to batch submit display HW programming. This helps to
+ * reduce loading time and CPU activity, thereby making the context switch
+ * faster. DSB Support added from Gen12 Intel graphics based platform.
+ *
+ * DSB's can access only the pipe, plane, and transcoder Data Island Packet
+ * registers.
+ *
+ * DSB HW can support only register writes (both indexed and direct MMIO
+ * writes). There are no registers reads possible with DSB HW engine.
+ */
+
+/* DSB opcodes. */
+#define DSB_OPCODE_SHIFT 24
+#define DSB_OPCODE_MMIO_WRITE 0x1
+#define DSB_OPCODE_INDEXED_WRITE 0x9
+#define DSB_BYTE_EN 0xF
+#define DSB_BYTE_EN_SHIFT 20
+#define DSB_REG_VALUE_MASK 0xfffff
+
+static inline bool is_dsb_busy(struct intel_dsb *dsb)
+{
+ struct intel_crtc *crtc = container_of(dsb, typeof(*crtc), dsb);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+
+ return DSB_STATUS & I915_READ(DSB_CTRL(pipe, dsb->id));
+}
+
+static inline bool intel_dsb_enable_engine(struct intel_dsb *dsb)
+{
+ struct intel_crtc *crtc = container_of(dsb, typeof(*crtc), dsb);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+ u32 dsb_ctrl;
+
+ dsb_ctrl = I915_READ(DSB_CTRL(pipe, dsb->id));
+ if (DSB_STATUS & dsb_ctrl) {
+ DRM_DEBUG_KMS("DSB engine is busy.\n");
+ return false;
+ }
+
+ dsb_ctrl |= DSB_ENABLE;
+ I915_WRITE(DSB_CTRL(pipe, dsb->id), dsb_ctrl);
+
+ POSTING_READ(DSB_CTRL(pipe, dsb->id));
+ return true;
+}
+
+static inline bool intel_dsb_disable_engine(struct intel_dsb *dsb)
+{
+ struct intel_crtc *crtc = container_of(dsb, typeof(*crtc), dsb);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+ u32 dsb_ctrl;
+
+ dsb_ctrl = I915_READ(DSB_CTRL(pipe, dsb->id));
+ if (DSB_STATUS & dsb_ctrl) {
+ DRM_DEBUG_KMS("DSB engine is busy.\n");
+ return false;
+ }
+
+ dsb_ctrl &= ~DSB_ENABLE;
+ I915_WRITE(DSB_CTRL(pipe, dsb->id), dsb_ctrl);
+
+ POSTING_READ(DSB_CTRL(pipe, dsb->id));
+ return true;
+}
+
+/**
+ * intel_dsb_get() - Allocate DSB context and return a DSB instance.
+ * @crtc: intel_crtc structure to get pipe info.
+ *
+ * This function provides handle of a DSB instance, for the further DSB
+ * operations.
+ *
+ * Returns: address of Intel_dsb instance requested for.
+ * Failure: Returns the same DSB instance, but without a command buffer.
+ */
+
+struct intel_dsb *
+intel_dsb_get(struct intel_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *i915 = to_i915(dev);
+ struct intel_dsb *dsb = &crtc->dsb;
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ intel_wakeref_t wakeref;
+
+ if (!HAS_DSB(i915))
+ return dsb;
+
+ if (atomic_add_return(1, &dsb->refcount) != 1)
+ return dsb;
+
+ dsb->id = DSB1;
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+
+ obj = i915_gem_object_create_internal(i915, DSB_BUF_SIZE);
+ if (IS_ERR(obj)) {
+ DRM_ERROR("Gem object creation failed\n");
+ goto err;
+ }
+
+ vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
+ if (IS_ERR(vma)) {
+ DRM_ERROR("Vma creation failed\n");
+ i915_gem_object_put(obj);
+ atomic_dec(&dsb->refcount);
+ goto err;
+ }
+
+ dsb->cmd_buf = i915_gem_object_pin_map(vma->obj, I915_MAP_WC);
+ if (IS_ERR(dsb->cmd_buf)) {
+ DRM_ERROR("Command buffer creation failed\n");
+ i915_vma_unpin_and_release(&vma, 0);
+ dsb->cmd_buf = NULL;
+ atomic_dec(&dsb->refcount);
+ goto err;
+ }
+ dsb->vma = vma;
+
+err:
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+ return dsb;
+}
+
+/**
+ * intel_dsb_put() - To destroy DSB context.
+ * @dsb: intel_dsb structure.
+ *
+ * This function destroys the DSB context allocated by a dsb_get(), by
+ * unpinning and releasing the VMA object associated with it.
+ */
+
+void intel_dsb_put(struct intel_dsb *dsb)
+{
+ struct intel_crtc *crtc = container_of(dsb, typeof(*crtc), dsb);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+
+ if (!HAS_DSB(i915))
+ return;
+
+ if (WARN_ON(atomic_read(&dsb->refcount) == 0))
+ return;
+
+ if (atomic_dec_and_test(&dsb->refcount)) {
+ i915_vma_unpin_and_release(&dsb->vma, I915_VMA_RELEASE_MAP);
+ dsb->cmd_buf = NULL;
+ dsb->free_pos = 0;
+ dsb->ins_start_offset = 0;
+ }
+}
+
+/**
+ * intel_dsb_indexed_reg_write() -Write to the DSB context for auto
+ * increment register.
+ * @dsb: intel_dsb structure.
+ * @reg: register address.
+ * @val: value.
+ *
+ * This function is used for writing register-value pair in command
+ * buffer of DSB for auto-increment register. During command buffer overflow,
+ * a warning is thrown and rest all erroneous condition register programming
+ * is done through mmio write.
+ */
+
+void intel_dsb_indexed_reg_write(struct intel_dsb *dsb, i915_reg_t reg,
+ u32 val)
+{
+ struct intel_crtc *crtc = container_of(dsb, typeof(*crtc), dsb);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ u32 *buf = dsb->cmd_buf;
+ u32 reg_val;
+
+ if (!buf) {
+ I915_WRITE(reg, val);
+ return;
+ }
+
+ if (WARN_ON(dsb->free_pos >= DSB_BUF_SIZE)) {
+ DRM_DEBUG_KMS("DSB buffer overflow\n");
+ return;
+ }
+
+ /*
+ * For example the buffer will look like below for 3 dwords for auto
+ * increment register:
+ * +--------------------------------------------------------+
+ * | size = 3 | offset &| value1 | value2 | value3 | zero |
+ * | | opcode | | | | |
+ * +--------------------------------------------------------+
+ * + + + + + + +
+ * 0 4 8 12 16 20 24
+ * Byte
+ *
+ * As every instruction is 8 byte aligned the index of dsb instruction
+ * will start always from even number while dealing with u32 array. If
+ * we are writing odd no of dwords, Zeros will be added in the end for
+ * padding.
+ */
+ reg_val = buf[dsb->ins_start_offset + 1] & DSB_REG_VALUE_MASK;
+ if (reg_val != i915_mmio_reg_offset(reg)) {
+ /* Every instruction should be 8 byte aligned. */
+ dsb->free_pos = ALIGN(dsb->free_pos, 2);
+
+ dsb->ins_start_offset = dsb->free_pos;
+
+ /* Update the size. */
+ buf[dsb->free_pos++] = 1;
+
+ /* Update the opcode and reg. */
+ buf[dsb->free_pos++] = (DSB_OPCODE_INDEXED_WRITE <<
+ DSB_OPCODE_SHIFT) |
+ i915_mmio_reg_offset(reg);
+
+ /* Update the value. */
+ buf[dsb->free_pos++] = val;
+ } else {
+ /* Update the new value. */
+ buf[dsb->free_pos++] = val;
+
+ /* Update the size. */
+ buf[dsb->ins_start_offset]++;
+ }
+
+ /* if number of data words is odd, then the last dword should be 0.*/
+ if (dsb->free_pos & 0x1)
+ buf[dsb->free_pos] = 0;
+}
+
+/**
+ * intel_dsb_reg_write() -Write to the DSB context for normal
+ * register.
+ * @dsb: intel_dsb structure.
+ * @reg: register address.
+ * @val: value.
+ *
+ * This function is used for writing register-value pair in command
+ * buffer of DSB. During command buffer overflow, a warning is thrown
+ * and rest all erroneous condition register programming is done
+ * through mmio write.
+ */
+void intel_dsb_reg_write(struct intel_dsb *dsb, i915_reg_t reg, u32 val)
+{
+ struct intel_crtc *crtc = container_of(dsb, typeof(*crtc), dsb);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ u32 *buf = dsb->cmd_buf;
+
+ if (!buf) {
+ I915_WRITE(reg, val);
+ return;
+ }
+
+ if (WARN_ON(dsb->free_pos >= DSB_BUF_SIZE)) {
+ DRM_DEBUG_KMS("DSB buffer overflow\n");
+ return;
+ }
+
+ dsb->ins_start_offset = dsb->free_pos;
+ buf[dsb->free_pos++] = val;
+ buf[dsb->free_pos++] = (DSB_OPCODE_MMIO_WRITE << DSB_OPCODE_SHIFT) |
+ (DSB_BYTE_EN << DSB_BYTE_EN_SHIFT) |
+ i915_mmio_reg_offset(reg);
+}
+
+/**
+ * intel_dsb_commit() - Trigger workload execution of DSB.
+ * @dsb: intel_dsb structure.
+ *
+ * This function is used to do actual write to hardware using DSB.
+ * On errors, fall back to MMIO. Also this function help to reset the context.
+ */
+void intel_dsb_commit(struct intel_dsb *dsb)
+{
+ struct intel_crtc *crtc = container_of(dsb, typeof(*crtc), dsb);
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ enum pipe pipe = crtc->pipe;
+ u32 tail;
+
+ if (!dsb->free_pos)
+ return;
+
+ if (!intel_dsb_enable_engine(dsb))
+ goto reset;
+
+ if (is_dsb_busy(dsb)) {
+ DRM_ERROR("HEAD_PTR write failed - dsb engine is busy.\n");
+ goto reset;
+ }
+ I915_WRITE(DSB_HEAD(pipe, dsb->id), i915_ggtt_offset(dsb->vma));
+
+ tail = ALIGN(dsb->free_pos * 4, CACHELINE_BYTES);
+ if (tail > dsb->free_pos * 4)
+ memset(&dsb->cmd_buf[dsb->free_pos], 0,
+ (tail - dsb->free_pos * 4));
+
+ if (is_dsb_busy(dsb)) {
+ DRM_ERROR("TAIL_PTR write failed - dsb engine is busy.\n");
+ goto reset;
+ }
+ DRM_DEBUG_KMS("DSB execution started - head 0x%x, tail 0x%x\n",
+ i915_ggtt_offset(dsb->vma), tail);
+ I915_WRITE(DSB_TAIL(pipe, dsb->id), i915_ggtt_offset(dsb->vma) + tail);
+ if (wait_for(!is_dsb_busy(dsb), 1)) {
+ DRM_ERROR("Timed out waiting for DSB workload completion.\n");
+ goto reset;
+ }
+
+reset:
+ dsb->free_pos = 0;
+ dsb->ins_start_offset = 0;
+ intel_dsb_disable_engine(dsb);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_dsb.h b/drivers/gpu/drm/i915/display/intel_dsb.h
new file mode 100644
index 000000000000..6f95c8e909e6
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dsb.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef _INTEL_DSB_H
+#define _INTEL_DSB_H
+
+#include <linux/types.h>
+
+#include "i915_reg.h"
+
+struct intel_crtc;
+struct i915_vma;
+
+enum dsb_id {
+ INVALID_DSB = -1,
+ DSB1,
+ DSB2,
+ DSB3,
+ MAX_DSB_PER_PIPE
+};
+
+struct intel_dsb {
+ atomic_t refcount;
+ enum dsb_id id;
+ u32 *cmd_buf;
+ struct i915_vma *vma;
+
+ /*
+ * free_pos will point the first free entry position
+ * and help in calculating tail of command buffer.
+ */
+ int free_pos;
+
+ /*
+ * ins_start_offset will help to store start address of the dsb
+ * instuction and help in identifying the batch of auto-increment
+ * register.
+ */
+ u32 ins_start_offset;
+};
+
+struct intel_dsb *
+intel_dsb_get(struct intel_crtc *crtc);
+void intel_dsb_put(struct intel_dsb *dsb);
+void intel_dsb_reg_write(struct intel_dsb *dsb, i915_reg_t reg, u32 val);
+void intel_dsb_indexed_reg_write(struct intel_dsb *dsb, i915_reg_t reg,
+ u32 val);
+void intel_dsb_commit(struct intel_dsb *dsb);
+
+#endif
diff --git a/drivers/gpu/drm/i915/display/intel_dsi.c b/drivers/gpu/drm/i915/display/intel_dsi.c
index 5fec02aceaed..a2a937109a5a 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi.c
@@ -55,6 +55,7 @@ int intel_dsi_get_modes(struct drm_connector *connector)
enum drm_mode_status intel_dsi_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
+ struct drm_i915_private *dev_priv = to_i915(connector->dev);
struct intel_connector *intel_connector = to_intel_connector(connector);
const struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
@@ -73,7 +74,7 @@ enum drm_mode_status intel_dsi_mode_valid(struct drm_connector *connector,
return MODE_CLOCK_HIGH;
}
- return MODE_OK;
+ return intel_mode_valid_max_plane_size(dev_priv, mode);
}
struct intel_dsi_host *intel_dsi_host_init(struct intel_dsi *intel_dsi,
diff --git a/drivers/gpu/drm/i915/display/intel_dvo.c b/drivers/gpu/drm/i915/display/intel_dvo.c
index 93baf366692e..bcfbcb743e7d 100644
--- a/drivers/gpu/drm/i915/display/intel_dvo.c
+++ b/drivers/gpu/drm/i915/display/intel_dvo.c
@@ -280,7 +280,7 @@ static void intel_dvo_pre_enable(struct intel_encoder *encoder,
struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
- int pipe = crtc->pipe;
+ enum pipe pipe = crtc->pipe;
u32 dvo_val;
i915_reg_t dvo_reg = intel_dvo->dev.dvo_reg;
i915_reg_t dvo_srcdim_reg = intel_dvo->dev.dvo_srcdim_reg;
@@ -505,7 +505,7 @@ void intel_dvo_init(struct drm_i915_private *dev_priv)
intel_encoder->type = INTEL_OUTPUT_DVO;
intel_encoder->power_domain = POWER_DOMAIN_PORT_OTHER;
intel_encoder->port = port;
- intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
+ intel_encoder->pipe_mask = ~0;
switch (dvo->type) {
case INTEL_DVO_CHIP_TMDS:
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c
index 16ed44bfd734..3111ecaeabd0 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.c
+++ b/drivers/gpu/drm/i915/display/intel_fbc.c
@@ -343,8 +343,8 @@ static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
HSW_FBCQ_DIS);
}
- if (IS_GEN(dev_priv, 11))
- /* Wa_1409120013:icl,ehl */
+ if (INTEL_GEN(dev_priv) >= 11)
+ /* Wa_1409120013:icl,ehl,tgl */
I915_WRITE(ILK_DPFC_CHICKEN, ILK_DPFC_CHICKEN_COMP_DUMMY_PIXEL);
I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
@@ -1320,6 +1320,9 @@ void intel_fbc_init(struct drm_i915_private *dev_priv)
fbc->enabled = false;
fbc->active = false;
+ if (!drm_mm_initialized(&dev_priv->mm.stolen))
+ mkwrite_device_info(dev_priv)->display.has_fbc = false;
+
if (need_fbc_vtd_wa(dev_priv))
mkwrite_device_info(dev_priv)->display.has_fbc = false;
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c
index b5c588e511dd..48c960ca12fb 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/display/intel_fbdev.c
@@ -141,10 +141,10 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
/* If the FB is too big, just don't use it since fbdev is not very
* important and we should probably use that space with FBC or other
* features. */
- obj = NULL;
+ obj = ERR_PTR(-ENODEV);
if (size * 2 < dev_priv->stolen_usable_size)
obj = i915_gem_object_create_stolen(dev_priv, size);
- if (obj == NULL)
+ if (IS_ERR(obj))
obj = i915_gem_object_create_shmem(dev_priv, size);
if (IS_ERR(obj)) {
DRM_ERROR("failed to allocate framebuffer\n");
@@ -204,7 +204,6 @@ static int intelfb_create(struct drm_fb_helper *helper,
sizes->fb_height = intel_fb->base.height;
}
- mutex_lock(&dev->struct_mutex);
wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
/* Pin the GGTT vma for our access via info->screen_base.
@@ -267,7 +266,6 @@ static int intelfb_create(struct drm_fb_helper *helper,
ifbdev->vma_flags = flags;
intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
- mutex_unlock(&dev->struct_mutex);
vga_switcheroo_client_fb_set(pdev, info);
return 0;
@@ -275,7 +273,6 @@ out_unpin:
intel_unpin_fb_vma(vma, flags);
out_unlock:
intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
- mutex_unlock(&dev->struct_mutex);
return ret;
}
@@ -292,11 +289,8 @@ static void intel_fbdev_destroy(struct intel_fbdev *ifbdev)
drm_fb_helper_fini(&ifbdev->helper);
- if (ifbdev->vma) {
- mutex_lock(&ifbdev->helper.dev->struct_mutex);
+ if (ifbdev->vma)
intel_unpin_fb_vma(ifbdev->vma, ifbdev->vma_flags);
- mutex_unlock(&ifbdev->helper.dev->struct_mutex);
- }
if (ifbdev->fb)
drm_framebuffer_remove(&ifbdev->fb->base);
@@ -445,7 +439,7 @@ int intel_fbdev_init(struct drm_device *dev)
struct intel_fbdev *ifbdev;
int ret;
- if (WARN_ON(!HAS_DISPLAY(dev_priv)))
+ if (WARN_ON(!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv)))
return -ENODEV;
ifbdev = kzalloc(sizeof(struct intel_fbdev), GFP_KERNEL);
diff --git a/drivers/gpu/drm/i915/display/intel_frontbuffer.c b/drivers/gpu/drm/i915/display/intel_frontbuffer.c
index 719379774fa5..84b164f31895 100644
--- a/drivers/gpu/drm/i915/display/intel_frontbuffer.c
+++ b/drivers/gpu/drm/i915/display/intel_frontbuffer.c
@@ -206,6 +206,7 @@ static int frontbuffer_active(struct i915_active *ref)
return 0;
}
+__i915_active_call
static void frontbuffer_retire(struct i915_active *ref)
{
struct intel_frontbuffer *front =
@@ -220,11 +221,18 @@ static void frontbuffer_release(struct kref *ref)
{
struct intel_frontbuffer *front =
container_of(ref, typeof(*front), ref);
+ struct drm_i915_gem_object *obj = front->obj;
+ struct i915_vma *vma;
- front->obj->frontbuffer = NULL;
- spin_unlock(&to_i915(front->obj->base.dev)->fb_tracking.lock);
+ spin_lock(&obj->vma.lock);
+ for_each_ggtt_vma(vma, obj)
+ vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
+ spin_unlock(&obj->vma.lock);
- i915_gem_object_put(front->obj);
+ obj->frontbuffer = NULL;
+ spin_unlock(&to_i915(obj->base.dev)->fb_tracking.lock);
+
+ i915_gem_object_put(obj);
kfree(front);
}
@@ -249,8 +257,9 @@ intel_frontbuffer_get(struct drm_i915_gem_object *obj)
front->obj = obj;
kref_init(&front->ref);
atomic_set(&front->bits, 0);
- i915_active_init(i915, &front->write,
- frontbuffer_active, frontbuffer_retire);
+ i915_active_init(&front->write,
+ frontbuffer_active,
+ i915_active_may_sleep(frontbuffer_retire));
spin_lock(&i915->fb_tracking.lock);
if (obj->frontbuffer) {
diff --git a/drivers/gpu/drm/i915/display/intel_gmbus.c b/drivers/gpu/drm/i915/display/intel_gmbus.c
index d6775a005726..3d4d19ac1d14 100644
--- a/drivers/gpu/drm/i915/display/intel_gmbus.c
+++ b/drivers/gpu/drm/i915/display/intel_gmbus.c
@@ -836,7 +836,7 @@ int intel_gmbus_setup(struct drm_i915_private *dev_priv)
unsigned int pin;
int ret;
- if (!HAS_DISPLAY(dev_priv))
+ if (!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv))
return 0;
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c
index 6ec5ceeab601..f1f41ca8402b 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.c
@@ -1,9 +1,11 @@
/* SPDX-License-Identifier: MIT */
/*
* Copyright (C) 2017 Google, Inc.
+ * Copyright _ 2017-2019, Intel Corporation.
*
* Authors:
* Sean Paul <seanpaul@chromium.org>
+ * Ramalingam C <ramalingam.c@intel.com>
*/
#include <linux/component.h>
@@ -18,6 +20,7 @@
#include "intel_display_types.h"
#include "intel_hdcp.h"
#include "intel_sideband.h"
+#include "intel_connector.h"
#define KEY_LOAD_TRIES 5
#define ENCRYPT_STATUS_CHANGE_TIMEOUT_MS 50
@@ -105,24 +108,20 @@ bool intel_hdcp2_capable(struct intel_connector *connector)
return capable;
}
-static inline bool intel_hdcp_in_use(struct intel_connector *connector)
+static inline
+bool intel_hdcp_in_use(struct drm_i915_private *dev_priv,
+ enum transcoder cpu_transcoder, enum port port)
{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- enum port port = connector->encoder->port;
- u32 reg;
-
- reg = I915_READ(PORT_HDCP_STATUS(port));
- return reg & HDCP_STATUS_ENC;
+ return I915_READ(HDCP_STATUS(dev_priv, cpu_transcoder, port)) &
+ HDCP_STATUS_ENC;
}
-static inline bool intel_hdcp2_in_use(struct intel_connector *connector)
+static inline
+bool intel_hdcp2_in_use(struct drm_i915_private *dev_priv,
+ enum transcoder cpu_transcoder, enum port port)
{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- enum port port = connector->encoder->port;
- u32 reg;
-
- reg = I915_READ(HDCP2_STATUS_DDI(port));
- return reg & LINK_ENCRYPTION_STATUS;
+ return I915_READ(HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
+ LINK_ENCRYPTION_STATUS;
}
static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *intel_dig_port,
@@ -253,9 +252,29 @@ static int intel_write_sha_text(struct drm_i915_private *dev_priv, u32 sha_text)
}
static
-u32 intel_hdcp_get_repeater_ctl(struct intel_digital_port *intel_dig_port)
+u32 intel_hdcp_get_repeater_ctl(struct drm_i915_private *dev_priv,
+ enum transcoder cpu_transcoder, enum port port)
{
- enum port port = intel_dig_port->base.port;
+ if (INTEL_GEN(dev_priv) >= 12) {
+ switch (cpu_transcoder) {
+ case TRANSCODER_A:
+ return HDCP_TRANSA_REP_PRESENT |
+ HDCP_TRANSA_SHA1_M0;
+ case TRANSCODER_B:
+ return HDCP_TRANSB_REP_PRESENT |
+ HDCP_TRANSB_SHA1_M0;
+ case TRANSCODER_C:
+ return HDCP_TRANSC_REP_PRESENT |
+ HDCP_TRANSC_SHA1_M0;
+ case TRANSCODER_D:
+ return HDCP_TRANSD_REP_PRESENT |
+ HDCP_TRANSD_SHA1_M0;
+ default:
+ DRM_ERROR("Unknown transcoder %d\n", cpu_transcoder);
+ return -EINVAL;
+ }
+ }
+
switch (port) {
case PORT_A:
return HDCP_DDIA_REP_PRESENT | HDCP_DDIA_SHA1_M0;
@@ -268,18 +287,20 @@ u32 intel_hdcp_get_repeater_ctl(struct intel_digital_port *intel_dig_port)
case PORT_E:
return HDCP_DDIE_REP_PRESENT | HDCP_DDIE_SHA1_M0;
default:
- break;
+ DRM_ERROR("Unknown port %d\n", port);
+ return -EINVAL;
}
- DRM_ERROR("Unknown port %d\n", port);
- return -EINVAL;
}
static
-int intel_hdcp_validate_v_prime(struct intel_digital_port *intel_dig_port,
+int intel_hdcp_validate_v_prime(struct intel_connector *connector,
const struct intel_hdcp_shim *shim,
u8 *ksv_fifo, u8 num_downstream, u8 *bstatus)
{
+ struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
struct drm_i915_private *dev_priv;
+ enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder;
+ enum port port = intel_dig_port->base.port;
u32 vprime, sha_text, sha_leftovers, rep_ctl;
int ret, i, j, sha_idx;
@@ -306,7 +327,7 @@ int intel_hdcp_validate_v_prime(struct intel_digital_port *intel_dig_port,
sha_idx = 0;
sha_text = 0;
sha_leftovers = 0;
- rep_ctl = intel_hdcp_get_repeater_ctl(intel_dig_port);
+ rep_ctl = intel_hdcp_get_repeater_ctl(dev_priv, cpu_transcoder, port);
I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
for (i = 0; i < num_downstream; i++) {
unsigned int sha_empty;
@@ -548,7 +569,7 @@ int intel_hdcp_auth_downstream(struct intel_connector *connector)
* V prime atleast twice.
*/
for (i = 0; i < tries; i++) {
- ret = intel_hdcp_validate_v_prime(intel_dig_port, shim,
+ ret = intel_hdcp_validate_v_prime(connector, shim,
ksv_fifo, num_downstream,
bstatus);
if (!ret)
@@ -576,6 +597,7 @@ static int intel_hdcp_auth(struct intel_connector *connector)
struct drm_device *dev = connector->base.dev;
const struct intel_hdcp_shim *shim = hdcp->shim;
struct drm_i915_private *dev_priv;
+ enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder;
enum port port;
unsigned long r0_prime_gen_start;
int ret, i, tries = 2;
@@ -615,18 +637,21 @@ static int intel_hdcp_auth(struct intel_connector *connector)
/* Initialize An with 2 random values and acquire it */
for (i = 0; i < 2; i++)
- I915_WRITE(PORT_HDCP_ANINIT(port), get_random_u32());
- I915_WRITE(PORT_HDCP_CONF(port), HDCP_CONF_CAPTURE_AN);
+ I915_WRITE(HDCP_ANINIT(dev_priv, cpu_transcoder, port),
+ get_random_u32());
+ I915_WRITE(HDCP_CONF(dev_priv, cpu_transcoder, port),
+ HDCP_CONF_CAPTURE_AN);
/* Wait for An to be acquired */
- if (intel_de_wait_for_set(dev_priv, PORT_HDCP_STATUS(port),
+ if (intel_de_wait_for_set(dev_priv,
+ HDCP_STATUS(dev_priv, cpu_transcoder, port),
HDCP_STATUS_AN_READY, 1)) {
DRM_ERROR("Timed out waiting for An\n");
return -ETIMEDOUT;
}
- an.reg[0] = I915_READ(PORT_HDCP_ANLO(port));
- an.reg[1] = I915_READ(PORT_HDCP_ANHI(port));
+ an.reg[0] = I915_READ(HDCP_ANLO(dev_priv, cpu_transcoder, port));
+ an.reg[1] = I915_READ(HDCP_ANHI(dev_priv, cpu_transcoder, port));
ret = shim->write_an_aksv(intel_dig_port, an.shim);
if (ret)
return ret;
@@ -644,24 +669,26 @@ static int intel_hdcp_auth(struct intel_connector *connector)
return -EPERM;
}
- I915_WRITE(PORT_HDCP_BKSVLO(port), bksv.reg[0]);
- I915_WRITE(PORT_HDCP_BKSVHI(port), bksv.reg[1]);
+ I915_WRITE(HDCP_BKSVLO(dev_priv, cpu_transcoder, port), bksv.reg[0]);
+ I915_WRITE(HDCP_BKSVHI(dev_priv, cpu_transcoder, port), bksv.reg[1]);
ret = shim->repeater_present(intel_dig_port, &repeater_present);
if (ret)
return ret;
if (repeater_present)
I915_WRITE(HDCP_REP_CTL,
- intel_hdcp_get_repeater_ctl(intel_dig_port));
+ intel_hdcp_get_repeater_ctl(dev_priv, cpu_transcoder,
+ port));
ret = shim->toggle_signalling(intel_dig_port, true);
if (ret)
return ret;
- I915_WRITE(PORT_HDCP_CONF(port), HDCP_CONF_AUTH_AND_ENC);
+ I915_WRITE(HDCP_CONF(dev_priv, cpu_transcoder, port),
+ HDCP_CONF_AUTH_AND_ENC);
/* Wait for R0 ready */
- if (wait_for(I915_READ(PORT_HDCP_STATUS(port)) &
+ if (wait_for(I915_READ(HDCP_STATUS(dev_priv, cpu_transcoder, port)) &
(HDCP_STATUS_R0_READY | HDCP_STATUS_ENC), 1)) {
DRM_ERROR("Timed out waiting for R0 ready\n");
return -ETIMEDOUT;
@@ -689,22 +716,25 @@ static int intel_hdcp_auth(struct intel_connector *connector)
ret = shim->read_ri_prime(intel_dig_port, ri.shim);
if (ret)
return ret;
- I915_WRITE(PORT_HDCP_RPRIME(port), ri.reg);
+ I915_WRITE(HDCP_RPRIME(dev_priv, cpu_transcoder, port), ri.reg);
/* Wait for Ri prime match */
- if (!wait_for(I915_READ(PORT_HDCP_STATUS(port)) &
+ if (!wait_for(I915_READ(HDCP_STATUS(dev_priv, cpu_transcoder,
+ port)) &
(HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1))
break;
}
if (i == tries) {
DRM_DEBUG_KMS("Timed out waiting for Ri prime match (%x)\n",
- I915_READ(PORT_HDCP_STATUS(port)));
+ I915_READ(HDCP_STATUS(dev_priv, cpu_transcoder,
+ port)));
return -ETIMEDOUT;
}
/* Wait for encryption confirmation */
- if (intel_de_wait_for_set(dev_priv, PORT_HDCP_STATUS(port),
+ if (intel_de_wait_for_set(dev_priv,
+ HDCP_STATUS(dev_priv, cpu_transcoder, port),
HDCP_STATUS_ENC,
ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
DRM_ERROR("Timed out waiting for encryption\n");
@@ -729,15 +759,17 @@ static int _intel_hdcp_disable(struct intel_connector *connector)
struct drm_i915_private *dev_priv = connector->base.dev->dev_private;
struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
enum port port = intel_dig_port->base.port;
+ enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
int ret;
DRM_DEBUG_KMS("[%s:%d] HDCP is being disabled...\n",
connector->base.name, connector->base.base.id);
hdcp->hdcp_encrypted = false;
- I915_WRITE(PORT_HDCP_CONF(port), 0);
- if (intel_de_wait_for_clear(dev_priv, PORT_HDCP_STATUS(port), ~0,
- ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
+ I915_WRITE(HDCP_CONF(dev_priv, cpu_transcoder, port), 0);
+ if (intel_de_wait_for_clear(dev_priv,
+ HDCP_STATUS(dev_priv, cpu_transcoder, port),
+ ~0, ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
DRM_ERROR("Failed to disable HDCP, timeout clearing status\n");
return -ETIMEDOUT;
}
@@ -808,9 +840,11 @@ static int intel_hdcp_check_link(struct intel_connector *connector)
struct drm_i915_private *dev_priv = connector->base.dev->dev_private;
struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
enum port port = intel_dig_port->base.port;
+ enum transcoder cpu_transcoder;
int ret = 0;
mutex_lock(&hdcp->mutex);
+ cpu_transcoder = hdcp->cpu_transcoder;
/* Check_link valid only when HDCP1.4 is enabled */
if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED ||
@@ -819,10 +853,11 @@ static int intel_hdcp_check_link(struct intel_connector *connector)
goto out;
}
- if (WARN_ON(!intel_hdcp_in_use(connector))) {
+ if (WARN_ON(!intel_hdcp_in_use(dev_priv, cpu_transcoder, port))) {
DRM_ERROR("%s:%d HDCP link stopped encryption,%x\n",
connector->base.name, connector->base.base.id,
- I915_READ(PORT_HDCP_STATUS(port)));
+ I915_READ(HDCP_STATUS(dev_priv, cpu_transcoder,
+ port)));
ret = -ENXIO;
hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
schedule_work(&hdcp->prop_work);
@@ -887,7 +922,7 @@ static void intel_hdcp_prop_work(struct work_struct *work)
bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port)
{
/* PORT E doesn't have HDCP, and PORT F is disabled */
- return INTEL_GEN(dev_priv) >= 9 && port < PORT_E;
+ return INTEL_INFO(dev_priv)->display.has_hdcp && port < PORT_E;
}
static int
@@ -1493,10 +1528,11 @@ static int hdcp2_enable_encryption(struct intel_connector *connector)
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
enum port port = connector->encoder->port;
+ enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
int ret;
- WARN_ON(I915_READ(HDCP2_STATUS_DDI(port)) & LINK_ENCRYPTION_STATUS);
-
+ WARN_ON(I915_READ(HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
+ LINK_ENCRYPTION_STATUS);
if (hdcp->shim->toggle_signalling) {
ret = hdcp->shim->toggle_signalling(intel_dig_port, true);
if (ret) {
@@ -1506,14 +1542,18 @@ static int hdcp2_enable_encryption(struct intel_connector *connector)
}
}
- if (I915_READ(HDCP2_STATUS_DDI(port)) & LINK_AUTH_STATUS) {
+ if (I915_READ(HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
+ LINK_AUTH_STATUS) {
/* Link is Authenticated. Now set for Encryption */
- I915_WRITE(HDCP2_CTL_DDI(port),
- I915_READ(HDCP2_CTL_DDI(port)) |
+ I915_WRITE(HDCP2_CTL(dev_priv, cpu_transcoder, port),
+ I915_READ(HDCP2_CTL(dev_priv, cpu_transcoder,
+ port)) |
CTL_LINK_ENCRYPTION_REQ);
}
- ret = intel_de_wait_for_set(dev_priv, HDCP2_STATUS_DDI(port),
+ ret = intel_de_wait_for_set(dev_priv,
+ HDCP2_STATUS(dev_priv, cpu_transcoder,
+ port),
LINK_ENCRYPTION_STATUS,
ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
@@ -1526,14 +1566,19 @@ static int hdcp2_disable_encryption(struct intel_connector *connector)
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
enum port port = connector->encoder->port;
+ enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
int ret;
- WARN_ON(!(I915_READ(HDCP2_STATUS_DDI(port)) & LINK_ENCRYPTION_STATUS));
+ WARN_ON(!(I915_READ(HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
+ LINK_ENCRYPTION_STATUS));
- I915_WRITE(HDCP2_CTL_DDI(port),
- I915_READ(HDCP2_CTL_DDI(port)) & ~CTL_LINK_ENCRYPTION_REQ);
+ I915_WRITE(HDCP2_CTL(dev_priv, cpu_transcoder, port),
+ I915_READ(HDCP2_CTL(dev_priv, cpu_transcoder, port)) &
+ ~CTL_LINK_ENCRYPTION_REQ);
- ret = intel_de_wait_for_clear(dev_priv, HDCP2_STATUS_DDI(port),
+ ret = intel_de_wait_for_clear(dev_priv,
+ HDCP2_STATUS(dev_priv, cpu_transcoder,
+ port),
LINK_ENCRYPTION_STATUS,
ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
if (ret == -ETIMEDOUT)
@@ -1632,9 +1677,11 @@ static int intel_hdcp2_check_link(struct intel_connector *connector)
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
enum port port = connector->encoder->port;
+ enum transcoder cpu_transcoder;
int ret = 0;
mutex_lock(&hdcp->mutex);
+ cpu_transcoder = hdcp->cpu_transcoder;
/* hdcp2_check_link is expected only when HDCP2.2 is Enabled */
if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED ||
@@ -1643,9 +1690,10 @@ static int intel_hdcp2_check_link(struct intel_connector *connector)
goto out;
}
- if (WARN_ON(!intel_hdcp2_in_use(connector))) {
+ if (WARN_ON(!intel_hdcp2_in_use(dev_priv, cpu_transcoder, port))) {
DRM_ERROR("HDCP2.2 link stopped the encryption, %x\n",
- I915_READ(HDCP2_STATUS_DDI(port)));
+ I915_READ(HDCP2_STATUS(dev_priv, cpu_transcoder,
+ port)));
ret = -ENXIO;
hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
schedule_work(&hdcp->prop_work);
@@ -1749,13 +1797,71 @@ static const struct component_ops i915_hdcp_component_ops = {
.unbind = i915_hdcp_component_unbind,
};
+static inline
+enum mei_fw_ddi intel_get_mei_fw_ddi_index(enum port port)
+{
+ switch (port) {
+ case PORT_A:
+ return MEI_DDI_A;
+ case PORT_B ... PORT_F:
+ return (enum mei_fw_ddi)port;
+ default:
+ return MEI_DDI_INVALID_PORT;
+ }
+}
+
+static inline
+enum mei_fw_tc intel_get_mei_fw_tc(enum transcoder cpu_transcoder)
+{
+ switch (cpu_transcoder) {
+ case TRANSCODER_A ... TRANSCODER_D:
+ return (enum mei_fw_tc)(cpu_transcoder | 0x10);
+ default: /* eDP, DSI TRANSCODERS are non HDCP capable */
+ return MEI_INVALID_TRANSCODER;
+ }
+}
+
+void intel_hdcp_transcoder_config(struct intel_connector *connector,
+ enum transcoder cpu_transcoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_hdcp *hdcp = &connector->hdcp;
+
+ if (!hdcp->shim)
+ return;
+
+ if (INTEL_GEN(dev_priv) >= 12) {
+ mutex_lock(&hdcp->mutex);
+ hdcp->cpu_transcoder = cpu_transcoder;
+ hdcp->port_data.fw_tc = intel_get_mei_fw_tc(cpu_transcoder);
+ mutex_unlock(&hdcp->mutex);
+ }
+}
+
static inline int initialize_hdcp_port_data(struct intel_connector *connector,
const struct intel_hdcp_shim *shim)
{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
struct hdcp_port_data *data = &hdcp->port_data;
- data->port = connector->encoder->port;
+ if (INTEL_GEN(dev_priv) < 12)
+ data->fw_ddi =
+ intel_get_mei_fw_ddi_index(connector->encoder->port);
+ else
+ /*
+ * As per ME FW API expectation, for GEN 12+, fw_ddi is filled
+ * with zero(INVALID PORT index).
+ */
+ data->fw_ddi = MEI_DDI_INVALID_PORT;
+
+ /*
+ * As associated transcoder is set and modified at modeset, here fw_tc
+ * is initialized to zero (invalid transcoder index). This will be
+ * retained for <Gen12 forever.
+ */
+ data->fw_tc = MEI_INVALID_TRANSCODER;
+
data->port_type = (u8)HDCP_PORT_TYPE_INTEGRATED;
data->protocol = (u8)shim->protocol;
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.h b/drivers/gpu/drm/i915/display/intel_hdcp.h
index 13555b054930..41c1053d9e38 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.h
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.h
@@ -15,10 +15,14 @@ struct drm_connector_state;
struct drm_i915_private;
struct intel_connector;
struct intel_hdcp_shim;
+enum port;
+enum transcoder;
void intel_hdcp_atomic_check(struct drm_connector *connector,
struct drm_connector_state *old_state,
struct drm_connector_state *new_state);
+void intel_hdcp_transcoder_config(struct intel_connector *connector,
+ enum transcoder cpu_transcoder);
int intel_hdcp_init(struct intel_connector *connector,
const struct intel_hdcp_shim *hdcp_shim);
int intel_hdcp_enable(struct intel_connector *connector, u8 content_type);
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c
index b030f7ae3302..f6f5312205c4 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.c
@@ -189,13 +189,19 @@ hsw_dip_data_reg(struct drm_i915_private *dev_priv,
}
}
-static int hsw_dip_data_size(unsigned int type)
+static int hsw_dip_data_size(struct drm_i915_private *dev_priv,
+ unsigned int type)
{
switch (type) {
case DP_SDP_VSC:
return VIDEO_DIP_VSC_DATA_SIZE;
case DP_SDP_PPS:
return VIDEO_DIP_PPS_DATA_SIZE;
+ case HDMI_PACKET_TYPE_GAMUT_METADATA:
+ if (INTEL_GEN(dev_priv) >= 11)
+ return VIDEO_DIP_GMP_DATA_SIZE;
+ else
+ return VIDEO_DIP_DATA_SIZE;
default:
return VIDEO_DIP_DATA_SIZE;
}
@@ -514,7 +520,9 @@ static void hsw_write_infoframe(struct intel_encoder *encoder,
int i;
u32 val = I915_READ(ctl_reg);
- data_size = hsw_dip_data_size(type);
+ data_size = hsw_dip_data_size(dev_priv, type);
+
+ WARN_ON(len > data_size);
val &= ~hsw_infoframe_enable(type);
I915_WRITE(ctl_reg, val);
@@ -724,11 +732,20 @@ intel_hdmi_compute_avi_infoframe(struct intel_encoder *encoder,
drm_hdmi_avi_infoframe_colorspace(frame, conn_state);
- drm_hdmi_avi_infoframe_quant_range(frame, connector,
- adjusted_mode,
- crtc_state->limited_color_range ?
- HDMI_QUANTIZATION_RANGE_LIMITED :
- HDMI_QUANTIZATION_RANGE_FULL);
+ /* nonsense combination */
+ WARN_ON(crtc_state->limited_color_range &&
+ crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
+
+ if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_RGB) {
+ drm_hdmi_avi_infoframe_quant_range(frame, connector,
+ adjusted_mode,
+ crtc_state->limited_color_range ?
+ HDMI_QUANTIZATION_RANGE_LIMITED :
+ HDMI_QUANTIZATION_RANGE_FULL);
+ } else {
+ frame->quantization_range = HDMI_QUANTIZATION_RANGE_DEFAULT;
+ frame->ycc_quantization_range = HDMI_YCC_QUANTIZATION_RANGE_LIMITED;
+ }
drm_hdmi_avi_infoframe_content_type(frame, conn_state);
@@ -1491,7 +1508,10 @@ bool intel_hdmi_hdcp_check_link(struct intel_digital_port *intel_dig_port)
{
struct drm_i915_private *dev_priv =
intel_dig_port->base.base.dev->dev_private;
+ struct intel_connector *connector =
+ intel_dig_port->hdmi.attached_connector;
enum port port = intel_dig_port->base.port;
+ enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder;
int ret;
union {
u32 reg;
@@ -1502,39 +1522,30 @@ bool intel_hdmi_hdcp_check_link(struct intel_digital_port *intel_dig_port)
if (ret)
return false;
- I915_WRITE(PORT_HDCP_RPRIME(port), ri.reg);
+ I915_WRITE(HDCP_RPRIME(dev_priv, cpu_transcoder, port), ri.reg);
/* Wait for Ri prime match */
- if (wait_for(I915_READ(PORT_HDCP_STATUS(port)) &
+ if (wait_for(I915_READ(HDCP_STATUS(dev_priv, cpu_transcoder, port)) &
(HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1)) {
DRM_ERROR("Ri' mismatch detected, link check failed (%x)\n",
- I915_READ(PORT_HDCP_STATUS(port)));
+ I915_READ(HDCP_STATUS(dev_priv, cpu_transcoder,
+ port)));
return false;
}
return true;
}
-struct hdcp2_hdmi_msg_data {
+struct hdcp2_hdmi_msg_timeout {
u8 msg_id;
- u32 timeout;
- u32 timeout2;
+ u16 timeout;
};
-static const struct hdcp2_hdmi_msg_data hdcp2_msg_data[] = {
- { HDCP_2_2_AKE_INIT, 0, 0 },
- { HDCP_2_2_AKE_SEND_CERT, HDCP_2_2_CERT_TIMEOUT_MS, 0 },
- { HDCP_2_2_AKE_NO_STORED_KM, 0, 0 },
- { HDCP_2_2_AKE_STORED_KM, 0, 0 },
- { HDCP_2_2_AKE_SEND_HPRIME, HDCP_2_2_HPRIME_PAIRED_TIMEOUT_MS,
- HDCP_2_2_HPRIME_NO_PAIRED_TIMEOUT_MS },
- { HDCP_2_2_AKE_SEND_PAIRING_INFO, HDCP_2_2_PAIRING_TIMEOUT_MS, 0 },
- { HDCP_2_2_LC_INIT, 0, 0 },
- { HDCP_2_2_LC_SEND_LPRIME, HDCP_2_2_HDMI_LPRIME_TIMEOUT_MS, 0 },
- { HDCP_2_2_SKE_SEND_EKS, 0, 0 },
- { HDCP_2_2_REP_SEND_RECVID_LIST, HDCP_2_2_RECVID_LIST_TIMEOUT_MS, 0 },
- { HDCP_2_2_REP_SEND_ACK, 0, 0 },
- { HDCP_2_2_REP_STREAM_MANAGE, 0, 0 },
- { HDCP_2_2_REP_STREAM_READY, HDCP_2_2_STREAM_READY_TIMEOUT_MS, 0 },
+static const struct hdcp2_hdmi_msg_timeout hdcp2_msg_timeout[] = {
+ { HDCP_2_2_AKE_SEND_CERT, HDCP_2_2_CERT_TIMEOUT_MS, },
+ { HDCP_2_2_AKE_SEND_PAIRING_INFO, HDCP_2_2_PAIRING_TIMEOUT_MS, },
+ { HDCP_2_2_LC_SEND_LPRIME, HDCP_2_2_HDMI_LPRIME_TIMEOUT_MS, },
+ { HDCP_2_2_REP_SEND_RECVID_LIST, HDCP_2_2_RECVID_LIST_TIMEOUT_MS, },
+ { HDCP_2_2_REP_STREAM_READY, HDCP_2_2_STREAM_READY_TIMEOUT_MS, },
};
static
@@ -1551,12 +1562,17 @@ static int get_hdcp2_msg_timeout(u8 msg_id, bool is_paired)
{
int i;
- for (i = 0; i < ARRAY_SIZE(hdcp2_msg_data); i++)
- if (hdcp2_msg_data[i].msg_id == msg_id &&
- (msg_id != HDCP_2_2_AKE_SEND_HPRIME || is_paired))
- return hdcp2_msg_data[i].timeout;
- else if (hdcp2_msg_data[i].msg_id == msg_id)
- return hdcp2_msg_data[i].timeout2;
+ if (msg_id == HDCP_2_2_AKE_SEND_HPRIME) {
+ if (is_paired)
+ return HDCP_2_2_HPRIME_PAIRED_TIMEOUT_MS;
+ else
+ return HDCP_2_2_HPRIME_NO_PAIRED_TIMEOUT_MS;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(hdcp2_msg_timeout); i++) {
+ if (hdcp2_msg_timeout[i].msg_id == msg_id)
+ return hdcp2_msg_timeout[i].timeout;
+ }
return -EINVAL;
}
@@ -2184,8 +2200,10 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
status = hdmi_port_clock_valid(hdmi, clock * 5 / 4,
true, force_dvi);
}
+ if (status != MODE_OK)
+ return status;
- return status;
+ return intel_mode_valid_max_plane_size(dev_priv, mode);
}
static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state,
@@ -2261,9 +2279,7 @@ static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state,
static bool
intel_hdmi_ycbcr420_config(struct drm_connector *connector,
- struct intel_crtc_state *config,
- int *clock_12bpc, int *clock_10bpc,
- int *clock_8bpc)
+ struct intel_crtc_state *config)
{
struct intel_crtc *intel_crtc = to_intel_crtc(config->base.crtc);
@@ -2272,11 +2288,6 @@ intel_hdmi_ycbcr420_config(struct drm_connector *connector,
return false;
}
- /* YCBCR420 TMDS rate requirement is half the pixel clock */
- config->port_clock /= 2;
- *clock_12bpc /= 2;
- *clock_10bpc /= 2;
- *clock_8bpc /= 2;
config->output_format = INTEL_OUTPUT_FORMAT_YCBCR420;
/* YCBCR 420 output conversion needs a scaler */
@@ -2291,6 +2302,104 @@ intel_hdmi_ycbcr420_config(struct drm_connector *connector,
return true;
}
+static int intel_hdmi_port_clock(int clock, int bpc)
+{
+ /*
+ * Need to adjust the port link by:
+ * 1.5x for 12bpc
+ * 1.25x for 10bpc
+ */
+ return clock * bpc / 8;
+}
+
+static int intel_hdmi_compute_bpc(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ int clock, bool force_dvi)
+{
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+ int bpc;
+
+ for (bpc = 12; bpc >= 10; bpc -= 2) {
+ if (hdmi_deep_color_possible(crtc_state, bpc) &&
+ hdmi_port_clock_valid(intel_hdmi,
+ intel_hdmi_port_clock(clock, bpc),
+ true, force_dvi) == MODE_OK)
+ return bpc;
+ }
+
+ return 8;
+}
+
+static int intel_hdmi_compute_clock(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ bool force_dvi)
+{
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->base.adjusted_mode;
+ int bpc, clock = adjusted_mode->crtc_clock;
+
+ if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
+ clock *= 2;
+
+ /* YCBCR420 TMDS rate requirement is half the pixel clock */
+ if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
+ clock /= 2;
+
+ bpc = intel_hdmi_compute_bpc(encoder, crtc_state,
+ clock, force_dvi);
+
+ crtc_state->port_clock = intel_hdmi_port_clock(clock, bpc);
+
+ /*
+ * pipe_bpp could already be below 8bpc due to
+ * FDI bandwidth constraints. We shouldn't bump it
+ * back up to 8bpc in that case.
+ */
+ if (crtc_state->pipe_bpp > bpc * 3)
+ crtc_state->pipe_bpp = bpc * 3;
+
+ DRM_DEBUG_KMS("picking %d bpc for HDMI output (pipe bpp: %d)\n",
+ bpc, crtc_state->pipe_bpp);
+
+ if (hdmi_port_clock_valid(intel_hdmi, crtc_state->port_clock,
+ false, force_dvi) != MODE_OK) {
+ DRM_DEBUG_KMS("unsupported HDMI clock (%d kHz), rejecting mode\n",
+ crtc_state->port_clock);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static bool intel_hdmi_limited_color_range(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ const struct intel_digital_connector_state *intel_conn_state =
+ to_intel_digital_connector_state(conn_state);
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->base.adjusted_mode;
+
+ /*
+ * Our YCbCr output is always limited range.
+ * crtc_state->limited_color_range only applies to RGB,
+ * and it must never be set for YCbCr or we risk setting
+ * some conflicting bits in PIPECONF which will mess up
+ * the colors on the monitor.
+ */
+ if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
+ return false;
+
+ if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) {
+ /* See CEA-861-E - 5.1 Default Encoding Parameters */
+ return crtc_state->has_hdmi_sink &&
+ drm_default_rgb_quant_range(adjusted_mode) ==
+ HDMI_QUANTIZATION_RANGE_LIMITED;
+ } else {
+ return intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_LIMITED;
+ }
+}
+
int intel_hdmi_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
@@ -2302,11 +2411,8 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder,
struct drm_scdc *scdc = &connector->display_info.hdmi.scdc;
struct intel_digital_connector_state *intel_conn_state =
to_intel_digital_connector_state(conn_state);
- int clock_8bpc = pipe_config->base.adjusted_mode.crtc_clock;
- int clock_10bpc = clock_8bpc * 5 / 4;
- int clock_12bpc = clock_8bpc * 3 / 2;
- int desired_bpp;
bool force_dvi = intel_conn_state->force_audio == HDMI_AUDIO_OFF_DVI;
+ int ret;
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
return -EINVAL;
@@ -2317,33 +2423,19 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder,
if (pipe_config->has_hdmi_sink)
pipe_config->has_infoframe = true;
- if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) {
- /* See CEA-861-E - 5.1 Default Encoding Parameters */
- pipe_config->limited_color_range =
- pipe_config->has_hdmi_sink &&
- drm_default_rgb_quant_range(adjusted_mode) ==
- HDMI_QUANTIZATION_RANGE_LIMITED;
- } else {
- pipe_config->limited_color_range =
- intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_LIMITED;
- }
-
- if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) {
+ if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
pipe_config->pixel_multiplier = 2;
- clock_8bpc *= 2;
- clock_10bpc *= 2;
- clock_12bpc *= 2;
- }
if (drm_mode_is_420_only(&connector->display_info, adjusted_mode)) {
- if (!intel_hdmi_ycbcr420_config(connector, pipe_config,
- &clock_12bpc, &clock_10bpc,
- &clock_8bpc)) {
+ if (!intel_hdmi_ycbcr420_config(connector, pipe_config)) {
DRM_ERROR("Can't support YCBCR420 output\n");
return -EINVAL;
}
}
+ pipe_config->limited_color_range =
+ intel_hdmi_limited_color_range(pipe_config, conn_state);
+
if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv))
pipe_config->has_pch_encoder = true;
@@ -2355,43 +2447,9 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder,
intel_conn_state->force_audio == HDMI_AUDIO_ON;
}
- /*
- * Note that g4x/vlv don't support 12bpc hdmi outputs. We also need
- * to check that the higher clock still fits within limits.
- */
- if (hdmi_deep_color_possible(pipe_config, 12) &&
- hdmi_port_clock_valid(intel_hdmi, clock_12bpc,
- true, force_dvi) == MODE_OK) {
- DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n");
- desired_bpp = 12*3;
-
- /* Need to adjust the port link by 1.5x for 12bpc. */
- pipe_config->port_clock = clock_12bpc;
- } else if (hdmi_deep_color_possible(pipe_config, 10) &&
- hdmi_port_clock_valid(intel_hdmi, clock_10bpc,
- true, force_dvi) == MODE_OK) {
- DRM_DEBUG_KMS("picking bpc to 10 for HDMI output\n");
- desired_bpp = 10 * 3;
-
- /* Need to adjust the port link by 1.25x for 10bpc. */
- pipe_config->port_clock = clock_10bpc;
- } else {
- DRM_DEBUG_KMS("picking bpc to 8 for HDMI output\n");
- desired_bpp = 8*3;
-
- pipe_config->port_clock = clock_8bpc;
- }
-
- if (!pipe_config->bw_constrained) {
- DRM_DEBUG_KMS("forcing pipe bpp to %i for HDMI\n", desired_bpp);
- pipe_config->pipe_bpp = desired_bpp;
- }
-
- if (hdmi_port_clock_valid(intel_hdmi, pipe_config->port_clock,
- false, force_dvi) != MODE_OK) {
- DRM_DEBUG_KMS("unsupported HDMI clock, rejecting mode\n");
- return -EINVAL;
- }
+ ret = intel_hdmi_compute_clock(encoder, pipe_config, force_dvi);
+ if (ret)
+ return ret;
/* Set user selected PAR to incoming mode's member */
adjusted_mode->picture_aspect_ratio = conn_state->picture_aspect_ratio;
@@ -2431,6 +2489,9 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder,
return -EINVAL;
}
+ intel_hdcp_transcoder_config(intel_hdmi->attached_connector,
+ pipe_config->cpu_transcoder);
+
return 0;
}
@@ -2757,8 +2818,9 @@ intel_hdmi_connector_register(struct drm_connector *connector)
static void intel_hdmi_destroy(struct drm_connector *connector)
{
- if (intel_attached_hdmi(connector)->cec_notifier)
- cec_notifier_put(intel_attached_hdmi(connector)->cec_notifier);
+ struct cec_notifier *n = intel_attached_hdmi(connector)->cec_notifier;
+
+ cec_notifier_conn_unregister(n);
intel_connector_destroy(connector);
}
@@ -3007,7 +3069,7 @@ static u8 intel_hdmi_ddc_pin(struct drm_i915_private *dev_priv,
if (HAS_PCH_MCC(dev_priv))
ddc_pin = mcc_port_to_ddc_pin(dev_priv, port);
- else if (HAS_PCH_TGP(dev_priv) || HAS_PCH_ICP(dev_priv))
+ else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
ddc_pin = icl_port_to_ddc_pin(dev_priv, port);
else if (HAS_PCH_CNP(dev_priv))
ddc_pin = cnp_port_to_ddc_pin(dev_priv, port);
@@ -3073,13 +3135,15 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
struct drm_device *dev = intel_encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
enum port port = intel_encoder->port;
+ struct cec_connector_info conn_info;
- DRM_DEBUG_KMS("Adding HDMI connector on port %c\n",
- port_name(port));
+ DRM_DEBUG_KMS("Adding HDMI connector on [ENCODER:%d:%s]\n",
+ intel_encoder->base.base.id, intel_encoder->base.name);
if (WARN(intel_dig_port->max_lanes < 4,
- "Not enough lanes (%d) for HDMI on port %c\n",
- intel_dig_port->max_lanes, port_name(port)))
+ "Not enough lanes (%d) for HDMI on [ENCODER:%d:%s]\n",
+ intel_dig_port->max_lanes, intel_encoder->base.base.id,
+ intel_encoder->base.name))
return;
drm_connector_init(dev, connector, &intel_hdmi_connector_funcs,
@@ -3125,8 +3189,11 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
}
- intel_hdmi->cec_notifier = cec_notifier_get_conn(dev->dev,
- port_identifier(port));
+ cec_fill_conn_info_from_drm(&conn_info, connector);
+
+ intel_hdmi->cec_notifier =
+ cec_notifier_conn_register(dev->dev, port_identifier(port),
+ &conn_info);
if (!intel_hdmi->cec_notifier)
DRM_DEBUG_KMS("CEC notifier get failed\n");
}
@@ -3216,11 +3283,11 @@ void intel_hdmi_init(struct drm_i915_private *dev_priv,
intel_encoder->port = port;
if (IS_CHERRYVIEW(dev_priv)) {
if (port == PORT_D)
- intel_encoder->crtc_mask = 1 << 2;
+ intel_encoder->pipe_mask = BIT(PIPE_C);
else
- intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
+ intel_encoder->pipe_mask = BIT(PIPE_A) | BIT(PIPE_B);
} else {
- intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
+ intel_encoder->pipe_mask = ~0;
}
intel_encoder->cloneable = 1 << INTEL_OUTPUT_ANALOG;
/*
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.h b/drivers/gpu/drm/i915/display/intel_hdmi.h
index 106c2e0bc3c9..cf1ea5427639 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.h
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.h
@@ -23,6 +23,7 @@ struct intel_crtc_state;
struct intel_hdmi;
struct drm_connector_state;
union hdmi_infoframe;
+enum port;
void intel_hdmi_init(struct drm_i915_private *dev_priv, i915_reg_t hdmi_reg,
enum port port);
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.c b/drivers/gpu/drm/i915/display/intel_hotplug.c
index 56be20f6f47e..fc29046d48ea 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/display/intel_hotplug.c
@@ -481,7 +481,8 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
long_hpd = long_mask & BIT(pin);
- DRM_DEBUG_DRIVER("digital hpd port %c - %s\n", port_name(port),
+ DRM_DEBUG_DRIVER("digital hpd on [ENCODER:%d:%s] - %s\n",
+ encoder->base.base.id, encoder->base.name,
long_hpd ? "long" : "short");
queue_dig = true;
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.h b/drivers/gpu/drm/i915/display/intel_hotplug.h
index b0cd447b7fbc..087b5f57b321 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug.h
+++ b/drivers/gpu/drm/i915/display/intel_hotplug.h
@@ -13,6 +13,7 @@
struct drm_i915_private;
struct intel_connector;
struct intel_encoder;
+enum port;
void intel_hpd_poll_init(struct drm_i915_private *dev_priv);
enum intel_hotplug_state intel_encoder_hotplug(struct intel_encoder *encoder,
diff --git a/drivers/gpu/drm/i915/display/intel_lpe_audio.c b/drivers/gpu/drm/i915/display/intel_lpe_audio.c
index b19800b58442..0b67f7887cd0 100644
--- a/drivers/gpu/drm/i915/display/intel_lpe_audio.c
+++ b/drivers/gpu/drm/i915/display/intel_lpe_audio.c
@@ -114,7 +114,7 @@ lpe_audio_platdev_create(struct drm_i915_private *dev_priv)
pinfo.size_data = sizeof(*pdata);
pinfo.dma_mask = DMA_BIT_MASK(32);
- pdata->num_pipes = INTEL_INFO(dev_priv)->num_pipes;
+ pdata->num_pipes = INTEL_NUM_PIPES(dev_priv);
pdata->num_ports = IS_CHERRYVIEW(dev_priv) ? 3 : 2; /* B,C,D or B,C */
pdata->port[0].pipe = -1;
pdata->port[1].pipe = -1;
diff --git a/drivers/gpu/drm/i915/display/intel_lvds.c b/drivers/gpu/drm/i915/display/intel_lvds.c
index b7c459a8931c..b1bc78623647 100644
--- a/drivers/gpu/drm/i915/display/intel_lvds.c
+++ b/drivers/gpu/drm/i915/display/intel_lvds.c
@@ -232,7 +232,7 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
- int pipe = crtc->pipe;
+ enum pipe pipe = crtc->pipe;
u32 temp;
if (HAS_PCH_SPLIT(dev_priv)) {
@@ -899,12 +899,10 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
intel_encoder->power_domain = POWER_DOMAIN_PORT_OTHER;
intel_encoder->port = PORT_NONE;
intel_encoder->cloneable = 0;
- if (HAS_PCH_SPLIT(dev_priv))
- intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
- else if (IS_GEN(dev_priv, 4))
- intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
+ if (INTEL_GEN(dev_priv) < 4)
+ intel_encoder->pipe_mask = BIT(PIPE_B);
else
- intel_encoder->crtc_mask = (1 << 1);
+ intel_encoder->pipe_mask = ~0;
drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs);
connector->display_info.subpixel_order = SubPixelHorizontalRGB;
diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c
index 29edfc343716..848ce07a8ec2 100644
--- a/drivers/gpu/drm/i915/display/intel_overlay.c
+++ b/drivers/gpu/drm/i915/display/intel_overlay.c
@@ -30,6 +30,7 @@
#include <drm/i915_drm.h>
#include "gem/i915_gem_pm.h"
+#include "gt/intel_ring.h"
#include "i915_drv.h"
#include "i915_reg.h"
@@ -230,7 +231,7 @@ alloc_request(struct intel_overlay *overlay, void (*fn)(struct intel_overlay *))
if (IS_ERR(rq))
return rq;
- err = i915_active_ref(&overlay->last_flip, rq->timeline, rq);
+ err = i915_active_add_request(&overlay->last_flip, rq);
if (err) {
i915_request_add(rq);
return ERR_PTR(err);
@@ -439,8 +440,6 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
struct i915_request *rq;
u32 *cs;
- lockdep_assert_held(&dev_priv->drm.struct_mutex);
-
/*
* Only wait if there is actually an old frame to release to
* guarantee forward progress.
@@ -751,7 +750,6 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
struct i915_vma *vma;
int ret, tmp_width;
- lockdep_assert_held(&dev_priv->drm.struct_mutex);
WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
ret = intel_overlay_release_old_vid(overlay);
@@ -852,7 +850,6 @@ int intel_overlay_switch_off(struct intel_overlay *overlay)
struct drm_i915_private *dev_priv = overlay->i915;
int ret;
- lockdep_assert_held(&dev_priv->drm.struct_mutex);
WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
ret = intel_overlay_recover_from_interrupt(overlay);
@@ -1068,11 +1065,7 @@ int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
if (!(params->flags & I915_OVERLAY_ENABLE)) {
drm_modeset_lock_all(dev);
- mutex_lock(&dev->struct_mutex);
-
ret = intel_overlay_switch_off(overlay);
-
- mutex_unlock(&dev->struct_mutex);
drm_modeset_unlock_all(dev);
return ret;
@@ -1088,7 +1081,6 @@ int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
return -ENOENT;
drm_modeset_lock_all(dev);
- mutex_lock(&dev->struct_mutex);
if (i915_gem_object_is_tiled(new_bo)) {
DRM_DEBUG_KMS("buffer used for overlay image can not be tiled\n");
@@ -1152,14 +1144,12 @@ int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
if (ret != 0)
goto out_unlock;
- mutex_unlock(&dev->struct_mutex);
drm_modeset_unlock_all(dev);
i915_gem_object_put(new_bo);
return 0;
out_unlock:
- mutex_unlock(&dev->struct_mutex);
drm_modeset_unlock_all(dev);
i915_gem_object_put(new_bo);
@@ -1233,7 +1223,6 @@ int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
}
drm_modeset_lock_all(dev);
- mutex_lock(&dev->struct_mutex);
ret = -EINVAL;
if (!(attrs->flags & I915_OVERLAY_UPDATE_ATTRS)) {
@@ -1290,7 +1279,6 @@ int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
ret = 0;
out_unlock:
- mutex_unlock(&dev->struct_mutex);
drm_modeset_unlock_all(dev);
return ret;
@@ -1303,15 +1291,11 @@ static int get_registers(struct intel_overlay *overlay, bool use_phys)
struct i915_vma *vma;
int err;
- mutex_lock(&i915->drm.struct_mutex);
-
obj = i915_gem_object_create_stolen(i915, PAGE_SIZE);
- if (obj == NULL)
+ if (IS_ERR(obj))
obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
- if (IS_ERR(obj)) {
- err = PTR_ERR(obj);
- goto err_unlock;
- }
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
if (IS_ERR(vma)) {
@@ -1332,13 +1316,10 @@ static int get_registers(struct intel_overlay *overlay, bool use_phys)
}
overlay->reg_bo = obj;
- mutex_unlock(&i915->drm.struct_mutex);
return 0;
err_put_bo:
i915_gem_object_put(obj);
-err_unlock:
- mutex_unlock(&i915->drm.struct_mutex);
return err;
}
@@ -1367,8 +1348,7 @@ void intel_overlay_setup(struct drm_i915_private *dev_priv)
overlay->contrast = 75;
overlay->saturation = 146;
- i915_active_init(dev_priv,
- &overlay->last_flip,
+ i915_active_init(&overlay->last_flip,
NULL, intel_overlay_last_flip_retire);
ret = get_registers(overlay, OVERLAY_NEEDS_PHYSICAL(dev_priv));
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index 3bfb720560c2..6a9f322d3fca 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -76,7 +76,7 @@ static bool intel_psr2_enabled(struct drm_i915_private *dev_priv,
const struct intel_crtc_state *crtc_state)
{
/* Cannot enable DSC and PSR2 simultaneously */
- WARN_ON(crtc_state->dsc_params.compression_enable &&
+ WARN_ON(crtc_state->dsc.compression_enable &&
crtc_state->has_psr2);
switch (dev_priv->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
@@ -88,48 +88,35 @@ static bool intel_psr2_enabled(struct drm_i915_private *dev_priv,
}
}
-static int edp_psr_shift(enum transcoder cpu_transcoder)
+static void psr_irq_control(struct drm_i915_private *dev_priv)
{
- switch (cpu_transcoder) {
- case TRANSCODER_A:
- return EDP_PSR_TRANSCODER_A_SHIFT;
- case TRANSCODER_B:
- return EDP_PSR_TRANSCODER_B_SHIFT;
- case TRANSCODER_C:
- return EDP_PSR_TRANSCODER_C_SHIFT;
- default:
- MISSING_CASE(cpu_transcoder);
- /* fallthrough */
- case TRANSCODER_EDP:
- return EDP_PSR_TRANSCODER_EDP_SHIFT;
- }
-}
-
-void intel_psr_irq_control(struct drm_i915_private *dev_priv, u32 debug)
-{
- u32 debug_mask, mask;
- enum transcoder cpu_transcoder;
- u32 transcoders = BIT(TRANSCODER_EDP);
+ enum transcoder trans_shift;
+ u32 mask, val;
+ i915_reg_t imr_reg;
- if (INTEL_GEN(dev_priv) >= 8)
- transcoders |= BIT(TRANSCODER_A) |
- BIT(TRANSCODER_B) |
- BIT(TRANSCODER_C);
-
- debug_mask = 0;
- mask = 0;
- for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, transcoders) {
- int shift = edp_psr_shift(cpu_transcoder);
-
- mask |= EDP_PSR_ERROR(shift);
- debug_mask |= EDP_PSR_POST_EXIT(shift) |
- EDP_PSR_PRE_ENTRY(shift);
+ /*
+ * gen12+ has registers relative to transcoder and one per transcoder
+ * using the same bit definition: handle it as TRANSCODER_EDP to force
+ * 0 shift in bit definition
+ */
+ if (INTEL_GEN(dev_priv) >= 12) {
+ trans_shift = 0;
+ imr_reg = TRANS_PSR_IMR(dev_priv->psr.transcoder);
+ } else {
+ trans_shift = dev_priv->psr.transcoder;
+ imr_reg = EDP_PSR_IMR;
}
- if (debug & I915_PSR_DEBUG_IRQ)
- mask |= debug_mask;
+ mask = EDP_PSR_ERROR(trans_shift);
+ if (dev_priv->psr.debug & I915_PSR_DEBUG_IRQ)
+ mask |= EDP_PSR_POST_EXIT(trans_shift) |
+ EDP_PSR_PRE_ENTRY(trans_shift);
- I915_WRITE(EDP_PSR_IMR, ~mask);
+ /* Warning: it is masking/setting reserved bits too */
+ val = I915_READ(imr_reg);
+ val &= ~EDP_PSR_TRANS_MASK(trans_shift);
+ val |= ~mask;
+ I915_WRITE(imr_reg, val);
}
static void psr_event_print(u32 val, bool psr2_enabled)
@@ -171,60 +158,58 @@ static void psr_event_print(u32 val, bool psr2_enabled)
void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir)
{
- u32 transcoders = BIT(TRANSCODER_EDP);
- enum transcoder cpu_transcoder;
+ enum transcoder cpu_transcoder = dev_priv->psr.transcoder;
+ enum transcoder trans_shift;
+ i915_reg_t imr_reg;
ktime_t time_ns = ktime_get();
- u32 mask = 0;
- if (INTEL_GEN(dev_priv) >= 8)
- transcoders |= BIT(TRANSCODER_A) |
- BIT(TRANSCODER_B) |
- BIT(TRANSCODER_C);
+ if (INTEL_GEN(dev_priv) >= 12) {
+ trans_shift = 0;
+ imr_reg = TRANS_PSR_IMR(dev_priv->psr.transcoder);
+ } else {
+ trans_shift = dev_priv->psr.transcoder;
+ imr_reg = EDP_PSR_IMR;
+ }
- for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, transcoders) {
- int shift = edp_psr_shift(cpu_transcoder);
+ if (psr_iir & EDP_PSR_PRE_ENTRY(trans_shift)) {
+ dev_priv->psr.last_entry_attempt = time_ns;
+ DRM_DEBUG_KMS("[transcoder %s] PSR entry attempt in 2 vblanks\n",
+ transcoder_name(cpu_transcoder));
+ }
- if (psr_iir & EDP_PSR_ERROR(shift)) {
- DRM_WARN("[transcoder %s] PSR aux error\n",
- transcoder_name(cpu_transcoder));
+ if (psr_iir & EDP_PSR_POST_EXIT(trans_shift)) {
+ dev_priv->psr.last_exit = time_ns;
+ DRM_DEBUG_KMS("[transcoder %s] PSR exit completed\n",
+ transcoder_name(cpu_transcoder));
- dev_priv->psr.irq_aux_error = true;
+ if (INTEL_GEN(dev_priv) >= 9) {
+ u32 val = I915_READ(PSR_EVENT(cpu_transcoder));
+ bool psr2_enabled = dev_priv->psr.psr2_enabled;
- /*
- * If this interruption is not masked it will keep
- * interrupting so fast that it prevents the scheduled
- * work to run.
- * Also after a PSR error, we don't want to arm PSR
- * again so we don't care about unmask the interruption
- * or unset irq_aux_error.
- */
- mask |= EDP_PSR_ERROR(shift);
- }
-
- if (psr_iir & EDP_PSR_PRE_ENTRY(shift)) {
- dev_priv->psr.last_entry_attempt = time_ns;
- DRM_DEBUG_KMS("[transcoder %s] PSR entry attempt in 2 vblanks\n",
- transcoder_name(cpu_transcoder));
+ I915_WRITE(PSR_EVENT(cpu_transcoder), val);
+ psr_event_print(val, psr2_enabled);
}
+ }
- if (psr_iir & EDP_PSR_POST_EXIT(shift)) {
- dev_priv->psr.last_exit = time_ns;
- DRM_DEBUG_KMS("[transcoder %s] PSR exit completed\n",
- transcoder_name(cpu_transcoder));
+ if (psr_iir & EDP_PSR_ERROR(trans_shift)) {
+ u32 val;
- if (INTEL_GEN(dev_priv) >= 9) {
- u32 val = I915_READ(PSR_EVENT(cpu_transcoder));
- bool psr2_enabled = dev_priv->psr.psr2_enabled;
+ DRM_WARN("[transcoder %s] PSR aux error\n",
+ transcoder_name(cpu_transcoder));
- I915_WRITE(PSR_EVENT(cpu_transcoder), val);
- psr_event_print(val, psr2_enabled);
- }
- }
- }
+ dev_priv->psr.irq_aux_error = true;
- if (mask) {
- mask |= I915_READ(EDP_PSR_IMR);
- I915_WRITE(EDP_PSR_IMR, mask);
+ /*
+ * If this interruption is not masked it will keep
+ * interrupting so fast that it prevents the scheduled
+ * work to run.
+ * Also after a PSR error, we don't want to arm PSR
+ * again so we don't care about unmask the interruption
+ * or unset irq_aux_error.
+ */
+ val = I915_READ(imr_reg);
+ val |= EDP_PSR_ERROR(trans_shift);
+ I915_WRITE(imr_reg, val);
schedule_work(&dev_priv->psr.work);
}
@@ -283,6 +268,11 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv =
to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
+ if (dev_priv->psr.dp) {
+ DRM_WARN("More than one eDP panel found, PSR support should be extended\n");
+ return;
+ }
+
drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd,
sizeof(intel_dp->psr_dpcd));
@@ -305,7 +295,6 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp)
dev_priv->psr.sink_sync_latency =
intel_dp_get_sink_sync_latency(intel_dp);
- WARN_ON(dev_priv->psr.dp);
dev_priv->psr.dp = intel_dp;
if (INTEL_GEN(dev_priv) >= 9 &&
@@ -390,7 +379,7 @@ static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
BUILD_BUG_ON(sizeof(aux_msg) > 20);
for (i = 0; i < sizeof(aux_msg); i += 4)
- I915_WRITE(EDP_PSR_AUX_DATA(i >> 2),
+ I915_WRITE(EDP_PSR_AUX_DATA(dev_priv->psr.transcoder, i >> 2),
intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i));
aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
@@ -401,7 +390,7 @@ static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
/* Select only valid bits for SRD_AUX_CTL */
aux_ctl &= psr_aux_mask;
- I915_WRITE(EDP_PSR_AUX_CTL, aux_ctl);
+ I915_WRITE(EDP_PSR_AUX_CTL(dev_priv->psr.transcoder), aux_ctl);
}
static void intel_psr_enable_sink(struct intel_dp *intel_dp)
@@ -491,8 +480,9 @@ static void hsw_activate_psr1(struct intel_dp *intel_dp)
if (INTEL_GEN(dev_priv) >= 8)
val |= EDP_PSR_CRC_ENABLE;
- val |= I915_READ(EDP_PSR_CTL) & EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK;
- I915_WRITE(EDP_PSR_CTL, val);
+ val |= (I915_READ(EDP_PSR_CTL(dev_priv->psr.transcoder)) &
+ EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK);
+ I915_WRITE(EDP_PSR_CTL(dev_priv->psr.transcoder), val);
}
static void hsw_activate_psr2(struct intel_dp *intel_dp)
@@ -528,9 +518,87 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
* PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is
* recommending keep this bit unset while PSR2 is enabled.
*/
- I915_WRITE(EDP_PSR_CTL, 0);
+ I915_WRITE(EDP_PSR_CTL(dev_priv->psr.transcoder), 0);
+
+ I915_WRITE(EDP_PSR2_CTL(dev_priv->psr.transcoder), val);
+}
- I915_WRITE(EDP_PSR2_CTL, val);
+static bool
+transcoder_has_psr2(struct drm_i915_private *dev_priv, enum transcoder trans)
+{
+ if (INTEL_GEN(dev_priv) < 9)
+ return false;
+ else if (INTEL_GEN(dev_priv) >= 12)
+ return trans == TRANSCODER_A;
+ else
+ return trans == TRANSCODER_EDP;
+}
+
+static u32 intel_get_frame_time_us(const struct intel_crtc_state *cstate)
+{
+ if (!cstate || !cstate->base.active)
+ return 0;
+
+ return DIV_ROUND_UP(1000 * 1000,
+ drm_mode_vrefresh(&cstate->base.adjusted_mode));
+}
+
+static void psr2_program_idle_frames(struct drm_i915_private *dev_priv,
+ u32 idle_frames)
+{
+ u32 val;
+
+ idle_frames <<= EDP_PSR2_IDLE_FRAME_SHIFT;
+ val = I915_READ(EDP_PSR2_CTL(dev_priv->psr.transcoder));
+ val &= ~EDP_PSR2_IDLE_FRAME_MASK;
+ val |= idle_frames;
+ I915_WRITE(EDP_PSR2_CTL(dev_priv->psr.transcoder), val);
+}
+
+static void tgl_psr2_enable_dc3co(struct drm_i915_private *dev_priv)
+{
+ psr2_program_idle_frames(dev_priv, 0);
+ intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_DC3CO);
+}
+
+static void tgl_psr2_disable_dc3co(struct drm_i915_private *dev_priv)
+{
+ int idle_frames;
+
+ intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
+ /*
+ * Restore PSR2 idle frame let's use 6 as the minimum to cover all known
+ * cases including the off-by-one issue that HW has in some cases.
+ */
+ idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
+ idle_frames = max(idle_frames, dev_priv->psr.sink_sync_latency + 1);
+ psr2_program_idle_frames(dev_priv, idle_frames);
+}
+
+static void tgl_dc5_idle_thread(struct work_struct *work)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(work, typeof(*dev_priv), psr.idle_work.work);
+
+ mutex_lock(&dev_priv->psr.lock);
+ /* If delayed work is pending, it is not idle */
+ if (delayed_work_pending(&dev_priv->psr.idle_work))
+ goto unlock;
+
+ DRM_DEBUG_KMS("DC5/6 idle thread\n");
+ tgl_psr2_disable_dc3co(dev_priv);
+unlock:
+ mutex_unlock(&dev_priv->psr.lock);
+}
+
+static void tgl_disallow_dc3co_on_psr2_exit(struct drm_i915_private *dev_priv)
+{
+ if (!dev_priv->psr.dc3co_enabled)
+ return;
+
+ cancel_delayed_work(&dev_priv->psr.idle_work);
+ /* Before PSR2 exit disallow dc3co*/
+ tgl_psr2_disable_dc3co(dev_priv);
}
static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
@@ -544,17 +612,26 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
if (!dev_priv->psr.sink_psr2_support)
return false;
+ if (!transcoder_has_psr2(dev_priv, crtc_state->cpu_transcoder)) {
+ DRM_DEBUG_KMS("PSR2 not supported in transcoder %s\n",
+ transcoder_name(crtc_state->cpu_transcoder));
+ return false;
+ }
+
/*
* DSC and PSR2 cannot be enabled simultaneously. If a requested
* resolution requires DSC to be enabled, priority is given to DSC
* over PSR2.
*/
- if (crtc_state->dsc_params.compression_enable) {
+ if (crtc_state->dsc.compression_enable) {
DRM_DEBUG_KMS("PSR2 cannot be enabled since DSC is enabled\n");
return false;
}
- if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
+ if (INTEL_GEN(dev_priv) >= 12) {
+ psr_max_h = 5120;
+ psr_max_v = 3200;
+ } else if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
psr_max_h = 4096;
psr_max_v = 2304;
} else if (IS_GEN(dev_priv, 9)) {
@@ -606,10 +683,9 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
/*
* HSW spec explicitly says PSR is tied to port A.
- * BDW+ platforms with DDI implementation of PSR have different
- * PSR registers per transcoder and we only implement transcoder EDP
- * ones. Since by Display design transcoder EDP is tied to port A
- * we can safely escape based on the port A.
+ * BDW+ platforms have a instance of PSR registers per transcoder but
+ * for now it only supports one instance of PSR, so lets keep it
+ * hardcoded to PORT_A
*/
if (dig_port->base.port != PORT_A) {
DRM_DEBUG_KMS("PSR condition failed: Port not supported\n");
@@ -648,9 +724,10 @@ static void intel_psr_activate(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- if (INTEL_GEN(dev_priv) >= 9)
- WARN_ON(I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE);
- WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
+ if (transcoder_has_psr2(dev_priv, dev_priv->psr.transcoder))
+ WARN_ON(I915_READ(EDP_PSR2_CTL(dev_priv->psr.transcoder)) & EDP_PSR2_ENABLE);
+
+ WARN_ON(I915_READ(EDP_PSR_CTL(dev_priv->psr.transcoder)) & EDP_PSR_ENABLE);
WARN_ON(dev_priv->psr.active);
lockdep_assert_held(&dev_priv->psr.lock);
@@ -663,25 +740,6 @@ static void intel_psr_activate(struct intel_dp *intel_dp)
dev_priv->psr.active = true;
}
-static i915_reg_t gen9_chicken_trans_reg(struct drm_i915_private *dev_priv,
- enum transcoder cpu_transcoder)
-{
- static const i915_reg_t regs[] = {
- [TRANSCODER_A] = CHICKEN_TRANS_A,
- [TRANSCODER_B] = CHICKEN_TRANS_B,
- [TRANSCODER_C] = CHICKEN_TRANS_C,
- [TRANSCODER_EDP] = CHICKEN_TRANS_EDP,
- };
-
- WARN_ON(INTEL_GEN(dev_priv) < 9);
-
- if (WARN_ON(cpu_transcoder >= ARRAY_SIZE(regs) ||
- !regs[cpu_transcoder].reg))
- cpu_transcoder = TRANSCODER_A;
-
- return regs[cpu_transcoder];
-}
-
static void intel_psr_enable_source(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
@@ -697,8 +755,7 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
if (dev_priv->psr.psr2_enabled && (IS_GEN(dev_priv, 9) &&
!IS_GEMINILAKE(dev_priv))) {
- i915_reg_t reg = gen9_chicken_trans_reg(dev_priv,
- cpu_transcoder);
+ i915_reg_t reg = CHICKEN_TRANS(cpu_transcoder);
u32 chicken = I915_READ(reg);
chicken |= PSR2_VSC_ENABLE_PROG_HEADER |
@@ -720,19 +777,46 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
if (INTEL_GEN(dev_priv) < 11)
mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE;
- I915_WRITE(EDP_PSR_DEBUG, mask);
+ I915_WRITE(EDP_PSR_DEBUG(dev_priv->psr.transcoder), mask);
+
+ psr_irq_control(dev_priv);
}
static void intel_psr_enable_locked(struct drm_i915_private *dev_priv,
const struct intel_crtc_state *crtc_state)
{
struct intel_dp *intel_dp = dev_priv->psr.dp;
+ u32 val;
WARN_ON(dev_priv->psr.enabled);
dev_priv->psr.psr2_enabled = intel_psr2_enabled(dev_priv, crtc_state);
dev_priv->psr.busy_frontbuffer_bits = 0;
dev_priv->psr.pipe = to_intel_crtc(crtc_state->base.crtc)->pipe;
+ dev_priv->psr.dc3co_enabled = !!crtc_state->dc3co_exitline;
+ dev_priv->psr.dc3co_exit_delay = intel_get_frame_time_us(crtc_state);
+ dev_priv->psr.transcoder = crtc_state->cpu_transcoder;
+
+ /*
+ * If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR
+ * will still keep the error set even after the reset done in the
+ * irq_preinstall and irq_uninstall hooks.
+ * And enabling in this situation cause the screen to freeze in the
+ * first time that PSR HW tries to activate so lets keep PSR disabled
+ * to avoid any rendering problems.
+ */
+ if (INTEL_GEN(dev_priv) >= 12) {
+ val = I915_READ(TRANS_PSR_IIR(dev_priv->psr.transcoder));
+ val &= EDP_PSR_ERROR(0);
+ } else {
+ val = I915_READ(EDP_PSR_IIR);
+ val &= EDP_PSR_ERROR(dev_priv->psr.transcoder);
+ }
+ if (val) {
+ dev_priv->psr.sink_not_reliable = true;
+ DRM_DEBUG_KMS("PSR interruption error set, not enabling PSR\n");
+ return;
+ }
DRM_DEBUG_KMS("Enabling PSR%s\n",
dev_priv->psr.psr2_enabled ? "2" : "1");
@@ -782,20 +866,28 @@ static void intel_psr_exit(struct drm_i915_private *dev_priv)
u32 val;
if (!dev_priv->psr.active) {
- if (INTEL_GEN(dev_priv) >= 9)
- WARN_ON(I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE);
- WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
+ if (transcoder_has_psr2(dev_priv, dev_priv->psr.transcoder)) {
+ val = I915_READ(EDP_PSR2_CTL(dev_priv->psr.transcoder));
+ WARN_ON(val & EDP_PSR2_ENABLE);
+ }
+
+ val = I915_READ(EDP_PSR_CTL(dev_priv->psr.transcoder));
+ WARN_ON(val & EDP_PSR_ENABLE);
+
return;
}
if (dev_priv->psr.psr2_enabled) {
- val = I915_READ(EDP_PSR2_CTL);
+ tgl_disallow_dc3co_on_psr2_exit(dev_priv);
+ val = I915_READ(EDP_PSR2_CTL(dev_priv->psr.transcoder));
WARN_ON(!(val & EDP_PSR2_ENABLE));
- I915_WRITE(EDP_PSR2_CTL, val & ~EDP_PSR2_ENABLE);
+ val &= ~EDP_PSR2_ENABLE;
+ I915_WRITE(EDP_PSR2_CTL(dev_priv->psr.transcoder), val);
} else {
- val = I915_READ(EDP_PSR_CTL);
+ val = I915_READ(EDP_PSR_CTL(dev_priv->psr.transcoder));
WARN_ON(!(val & EDP_PSR_ENABLE));
- I915_WRITE(EDP_PSR_CTL, val & ~EDP_PSR_ENABLE);
+ val &= ~EDP_PSR_ENABLE;
+ I915_WRITE(EDP_PSR_CTL(dev_priv->psr.transcoder), val);
}
dev_priv->psr.active = false;
}
@@ -817,10 +909,10 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
intel_psr_exit(dev_priv);
if (dev_priv->psr.psr2_enabled) {
- psr_status = EDP_PSR2_STATUS;
+ psr_status = EDP_PSR2_STATUS(dev_priv->psr.transcoder);
psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
} else {
- psr_status = EDP_PSR_STATUS;
+ psr_status = EDP_PSR_STATUS(dev_priv->psr.transcoder);
psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
}
@@ -859,6 +951,7 @@ void intel_psr_disable(struct intel_dp *intel_dp,
mutex_unlock(&dev_priv->psr.lock);
cancel_work_sync(&dev_priv->psr.work);
+ cancel_delayed_work_sync(&dev_priv->psr.idle_work);
}
static void psr_force_hw_tracking_exit(struct drm_i915_private *dev_priv)
@@ -963,7 +1056,8 @@ int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state,
* defensive enough to cover everything.
*/
- return __intel_wait_for_register(&dev_priv->uncore, EDP_PSR_STATUS,
+ return __intel_wait_for_register(&dev_priv->uncore,
+ EDP_PSR_STATUS(dev_priv->psr.transcoder),
EDP_PSR_STATUS_STATE_MASK,
EDP_PSR_STATUS_STATE_IDLE, 2, 50,
out_value);
@@ -979,10 +1073,10 @@ static bool __psr_wait_for_idle_locked(struct drm_i915_private *dev_priv)
return false;
if (dev_priv->psr.psr2_enabled) {
- reg = EDP_PSR2_STATUS;
+ reg = EDP_PSR2_STATUS(dev_priv->psr.transcoder);
mask = EDP_PSR2_STATUS_STATE_MASK;
} else {
- reg = EDP_PSR_STATUS;
+ reg = EDP_PSR_STATUS(dev_priv->psr.transcoder);
mask = EDP_PSR_STATUS_STATE_MASK;
}
@@ -1067,7 +1161,13 @@ int intel_psr_debug_set(struct drm_i915_private *dev_priv, u64 val)
old_mode = dev_priv->psr.debug & I915_PSR_DEBUG_MODE_MASK;
dev_priv->psr.debug = val;
- intel_psr_irq_control(dev_priv, dev_priv->psr.debug);
+
+ /*
+ * Do it right away if it's already enabled, otherwise it will be done
+ * when enabling the source.
+ */
+ if (dev_priv->psr.enabled)
+ psr_irq_control(dev_priv);
mutex_unlock(&dev_priv->psr.lock);
@@ -1159,6 +1259,44 @@ void intel_psr_invalidate(struct drm_i915_private *dev_priv,
mutex_unlock(&dev_priv->psr.lock);
}
+/*
+ * When we will be completely rely on PSR2 S/W tracking in future,
+ * intel_psr_flush() will invalidate and flush the PSR for ORIGIN_FLIP
+ * event also therefore tgl_dc3co_flush() require to be changed
+ * accrodingly in future.
+ */
+static void
+tgl_dc3co_flush(struct drm_i915_private *dev_priv,
+ unsigned int frontbuffer_bits, enum fb_op_origin origin)
+{
+ u32 delay;
+
+ mutex_lock(&dev_priv->psr.lock);
+
+ if (!dev_priv->psr.dc3co_enabled)
+ goto unlock;
+
+ if (!dev_priv->psr.psr2_enabled || !dev_priv->psr.active)
+ goto unlock;
+
+ /*
+ * At every frontbuffer flush flip event modified delay of delayed work,
+ * when delayed work schedules that means display has been idle.
+ */
+ if (!(frontbuffer_bits &
+ INTEL_FRONTBUFFER_ALL_MASK(dev_priv->psr.pipe)))
+ goto unlock;
+
+ tgl_psr2_enable_dc3co(dev_priv);
+ /* DC5/DC6 required idle frames = 6 */
+ delay = 6 * dev_priv->psr.dc3co_exit_delay;
+ mod_delayed_work(system_wq, &dev_priv->psr.idle_work,
+ usecs_to_jiffies(delay));
+
+unlock:
+ mutex_unlock(&dev_priv->psr.lock);
+}
+
/**
* intel_psr_flush - Flush PSR
* @dev_priv: i915 device
@@ -1178,8 +1316,10 @@ void intel_psr_flush(struct drm_i915_private *dev_priv,
if (!CAN_PSR(dev_priv))
return;
- if (origin == ORIGIN_FLIP)
+ if (origin == ORIGIN_FLIP) {
+ tgl_dc3co_flush(dev_priv, frontbuffer_bits, origin);
return;
+ }
mutex_lock(&dev_priv->psr.lock);
if (!dev_priv->psr.enabled) {
@@ -1208,45 +1348,34 @@ void intel_psr_flush(struct drm_i915_private *dev_priv,
*/
void intel_psr_init(struct drm_i915_private *dev_priv)
{
- u32 val;
-
if (!HAS_PSR(dev_priv))
return;
- dev_priv->psr_mmio_base = IS_HASWELL(dev_priv) ?
- HSW_EDP_PSR_BASE : BDW_EDP_PSR_BASE;
-
if (!dev_priv->psr.sink_support)
return;
+ if (IS_HASWELL(dev_priv))
+ /*
+ * HSW don't have PSR registers on the same space as transcoder
+ * so set this to a value that when subtract to the register
+ * in transcoder space results in the right offset for HSW
+ */
+ dev_priv->hsw_psr_mmio_adjust = _SRD_CTL_EDP - _HSW_EDP_PSR_BASE;
+
if (i915_modparams.enable_psr == -1)
if (INTEL_GEN(dev_priv) < 9 || !dev_priv->vbt.psr.enable)
i915_modparams.enable_psr = 0;
- /*
- * If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR
- * will still keep the error set even after the reset done in the
- * irq_preinstall and irq_uninstall hooks.
- * And enabling in this situation cause the screen to freeze in the
- * first time that PSR HW tries to activate so lets keep PSR disabled
- * to avoid any rendering problems.
- */
- val = I915_READ(EDP_PSR_IIR);
- val &= EDP_PSR_ERROR(edp_psr_shift(TRANSCODER_EDP));
- if (val) {
- DRM_DEBUG_KMS("PSR interruption error set\n");
- dev_priv->psr.sink_not_reliable = true;
- }
-
/* Set link_standby x link_off defaults */
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
/* HSW and BDW require workarounds that we don't implement. */
dev_priv->psr.link_standby = false;
- else
- /* For new platforms let's respect VBT back again */
+ else if (INTEL_GEN(dev_priv) < 12)
+ /* For new platforms up to TGL let's respect VBT back again */
dev_priv->psr.link_standby = dev_priv->vbt.psr.full_link;
INIT_WORK(&dev_priv->psr.work, intel_psr_work);
+ INIT_DELAYED_WORK(&dev_priv->psr.idle_work, tgl_dc5_idle_thread);
mutex_init(&dev_priv->psr.lock);
}
@@ -1288,7 +1417,7 @@ void intel_psr_short_pulse(struct intel_dp *intel_dp)
if (val & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
DRM_DEBUG_KMS("PSR VSC SDP uncorrectable error, disabling PSR\n");
if (val & DP_PSR_LINK_CRC_ERROR)
- DRM_ERROR("PSR Link CRC error, disabling PSR\n");
+ DRM_DEBUG_KMS("PSR Link CRC error, disabling PSR\n");
if (val & ~errors)
DRM_ERROR("PSR_ERROR_STATUS unhandled errors %x\n",
diff --git a/drivers/gpu/drm/i915/display/intel_psr.h b/drivers/gpu/drm/i915/display/intel_psr.h
index dc818826f36d..46e4de8b8cd5 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.h
+++ b/drivers/gpu/drm/i915/display/intel_psr.h
@@ -30,7 +30,6 @@ void intel_psr_flush(struct drm_i915_private *dev_priv,
void intel_psr_init(struct drm_i915_private *dev_priv);
void intel_psr_compute_config(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state);
-void intel_psr_irq_control(struct drm_i915_private *dev_priv, u32 debug);
void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir);
void intel_psr_short_pulse(struct intel_dp *intel_dp);
int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state,
diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c
index adeb1c840976..5b7f4baf7348 100644
--- a/drivers/gpu/drm/i915/display/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/display/intel_sdvo.c
@@ -2921,7 +2921,7 @@ intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, u16 flags)
bytes[0], bytes[1]);
return false;
}
- intel_sdvo->base.crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
+ intel_sdvo->base.pipe_mask = ~0;
return true;
}
diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.h b/drivers/gpu/drm/i915/display/intel_sdvo.h
index c9e05bcdd141..a66f224aa17d 100644
--- a/drivers/gpu/drm/i915/display/intel_sdvo.h
+++ b/drivers/gpu/drm/i915/display/intel_sdvo.h
@@ -14,6 +14,7 @@
struct drm_i915_private;
enum pipe;
+enum port;
bool intel_sdvo_port_enabled(struct drm_i915_private *dev_priv,
i915_reg_t sdvo_reg, enum pipe *pipe);
diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c
index cae25e493128..72fda0430062 100644
--- a/drivers/gpu/drm/i915/display/intel_sprite.c
+++ b/drivers/gpu/drm/i915/display/intel_sprite.c
@@ -48,19 +48,6 @@
#include "intel_psr.h"
#include "intel_sprite.h"
-bool is_planar_yuv_format(u32 pixelformat)
-{
- switch (pixelformat) {
- case DRM_FORMAT_NV12:
- case DRM_FORMAT_P010:
- case DRM_FORMAT_P012:
- case DRM_FORMAT_P016:
- return true;
- default:
- return false;
- }
-}
-
int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
int usecs)
{
@@ -300,10 +287,8 @@ int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state)
src_y = src->y1 >> 16;
src_h = drm_rect_height(src) >> 16;
- src->x1 = src_x << 16;
- src->x2 = (src_x + src_w) << 16;
- src->y1 = src_y << 16;
- src->y2 = (src_y + src_h) << 16;
+ drm_rect_init(src, src_x << 16, src_y << 16,
+ src_w << 16, src_h << 16);
if (!fb->format->is_yuv)
return 0;
@@ -337,6 +322,55 @@ bool icl_is_hdr_plane(struct drm_i915_private *dev_priv, enum plane_id plane_id)
icl_hdr_plane_mask() & BIT(plane_id);
}
+static void
+skl_plane_ratio(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ unsigned int *num, unsigned int *den)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane_state->base.plane->dev);
+ const struct drm_framebuffer *fb = plane_state->base.fb;
+
+ if (fb->format->cpp[0] == 8) {
+ if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
+ *num = 10;
+ *den = 8;
+ } else {
+ *num = 9;
+ *den = 8;
+ }
+ } else {
+ *num = 1;
+ *den = 1;
+ }
+}
+
+static int skl_plane_min_cdclk(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane_state->base.plane->dev);
+ unsigned int pixel_rate = crtc_state->pixel_rate;
+ unsigned int src_w, src_h, dst_w, dst_h;
+ unsigned int num, den;
+
+ skl_plane_ratio(crtc_state, plane_state, &num, &den);
+
+ /* two pixels per clock on glk+ */
+ if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+ den *= 2;
+
+ src_w = drm_rect_width(&plane_state->base.src) >> 16;
+ src_h = drm_rect_height(&plane_state->base.src) >> 16;
+ dst_w = drm_rect_width(&plane_state->base.dst);
+ dst_h = drm_rect_height(&plane_state->base.dst);
+
+ /* Downscaling limits the maximum pixel rate */
+ dst_w = min(src_w, dst_w);
+ dst_h = min(src_h, dst_h);
+
+ return DIV64_U64_ROUND_UP(mul_u32_u32(pixel_rate * num, src_w * src_h),
+ mul_u32_u32(den, dst_w * dst_h));
+}
+
static unsigned int
skl_plane_max_stride(struct intel_plane *plane,
u32 pixel_format, u64 modifier,
@@ -361,6 +395,7 @@ skl_program_scaler(struct intel_plane *plane,
const struct intel_plane_state *plane_state)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ const struct drm_framebuffer *fb = plane_state->base.fb;
enum pipe pipe = plane->pipe;
int scaler_id = plane_state->scaler_id;
const struct intel_scaler *scaler =
@@ -381,7 +416,7 @@ skl_program_scaler(struct intel_plane *plane,
0, INT_MAX);
/* TODO: handle sub-pixel coordinates */
- if (is_planar_yuv_format(plane_state->base.fb->format->format) &&
+ if (drm_format_info_is_yuv_semiplanar(fb->format) &&
!icl_is_hdr_plane(dev_priv, plane->id)) {
y_hphase = skl_scaler_calc_phase(1, hscale, false);
y_vphase = skl_scaler_calc_phase(1, vscale, false);
@@ -554,7 +589,7 @@ skl_program_plane(struct intel_plane *plane,
u32 y = plane_state->color_plane[color_plane].y;
u32 src_w = drm_rect_width(&plane_state->base.src) >> 16;
u32 src_h = drm_rect_height(&plane_state->base.src) >> 16;
- struct intel_plane *linked = plane_state->linked_plane;
+ struct intel_plane *linked = plane_state->planar_linked_plane;
const struct drm_framebuffer *fb = plane_state->base.fb;
u8 alpha = plane_state->base.alpha >> 8;
u32 plane_color_ctl = 0;
@@ -653,7 +688,7 @@ skl_update_plane(struct intel_plane *plane,
{
int color_plane = 0;
- if (plane_state->linked_plane) {
+ if (plane_state->planar_linked_plane) {
/* Program the UV plane */
color_plane = 1;
}
@@ -825,6 +860,85 @@ vlv_update_clrc(const struct intel_plane_state *plane_state)
SP_SH_SIN(sh_sin) | SP_SH_COS(sh_cos));
}
+static void
+vlv_plane_ratio(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ unsigned int *num, unsigned int *den)
+{
+ u8 active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR);
+ const struct drm_framebuffer *fb = plane_state->base.fb;
+ unsigned int cpp = fb->format->cpp[0];
+
+ /*
+ * VLV bspec only considers cases where all three planes are
+ * enabled, and cases where the primary and one sprite is enabled.
+ * Let's assume the case with just two sprites enabled also
+ * maps to the latter case.
+ */
+ if (hweight8(active_planes) == 3) {
+ switch (cpp) {
+ case 8:
+ *num = 11;
+ *den = 8;
+ break;
+ case 4:
+ *num = 18;
+ *den = 16;
+ break;
+ default:
+ *num = 1;
+ *den = 1;
+ break;
+ }
+ } else if (hweight8(active_planes) == 2) {
+ switch (cpp) {
+ case 8:
+ *num = 10;
+ *den = 8;
+ break;
+ case 4:
+ *num = 17;
+ *den = 16;
+ break;
+ default:
+ *num = 1;
+ *den = 1;
+ break;
+ }
+ } else {
+ switch (cpp) {
+ case 8:
+ *num = 10;
+ *den = 8;
+ break;
+ default:
+ *num = 1;
+ *den = 1;
+ break;
+ }
+ }
+}
+
+int vlv_plane_min_cdclk(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ unsigned int pixel_rate;
+ unsigned int num, den;
+
+ /*
+ * Note that crtc_state->pixel_rate accounts for both
+ * horizontal and vertical panel fitter downscaling factors.
+ * Pre-HSW bspec tells us to only consider the horizontal
+ * downscaling factor here. We ignore that and just consider
+ * both for simplicity.
+ */
+ pixel_rate = crtc_state->pixel_rate;
+
+ vlv_plane_ratio(crtc_state, plane_state, &num, &den);
+
+ return DIV_ROUND_UP(pixel_rate * num, den);
+}
+
static u32 vlv_sprite_ctl_crtc(const struct intel_crtc_state *crtc_state)
{
u32 sprctl = 0;
@@ -1031,6 +1145,164 @@ vlv_plane_get_hw_state(struct intel_plane *plane,
return ret;
}
+static void ivb_plane_ratio(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ unsigned int *num, unsigned int *den)
+{
+ u8 active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR);
+ const struct drm_framebuffer *fb = plane_state->base.fb;
+ unsigned int cpp = fb->format->cpp[0];
+
+ if (hweight8(active_planes) == 2) {
+ switch (cpp) {
+ case 8:
+ *num = 10;
+ *den = 8;
+ break;
+ case 4:
+ *num = 17;
+ *den = 16;
+ break;
+ default:
+ *num = 1;
+ *den = 1;
+ break;
+ }
+ } else {
+ switch (cpp) {
+ case 8:
+ *num = 9;
+ *den = 8;
+ break;
+ default:
+ *num = 1;
+ *den = 1;
+ break;
+ }
+ }
+}
+
+static void ivb_plane_ratio_scaling(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ unsigned int *num, unsigned int *den)
+{
+ const struct drm_framebuffer *fb = plane_state->base.fb;
+ unsigned int cpp = fb->format->cpp[0];
+
+ switch (cpp) {
+ case 8:
+ *num = 12;
+ *den = 8;
+ break;
+ case 4:
+ *num = 19;
+ *den = 16;
+ break;
+ case 2:
+ *num = 33;
+ *den = 32;
+ break;
+ default:
+ *num = 1;
+ *den = 1;
+ break;
+ }
+}
+
+int ivb_plane_min_cdclk(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ unsigned int pixel_rate;
+ unsigned int num, den;
+
+ /*
+ * Note that crtc_state->pixel_rate accounts for both
+ * horizontal and vertical panel fitter downscaling factors.
+ * Pre-HSW bspec tells us to only consider the horizontal
+ * downscaling factor here. We ignore that and just consider
+ * both for simplicity.
+ */
+ pixel_rate = crtc_state->pixel_rate;
+
+ ivb_plane_ratio(crtc_state, plane_state, &num, &den);
+
+ return DIV_ROUND_UP(pixel_rate * num, den);
+}
+
+static int ivb_sprite_min_cdclk(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ unsigned int src_w, dst_w, pixel_rate;
+ unsigned int num, den;
+
+ /*
+ * Note that crtc_state->pixel_rate accounts for both
+ * horizontal and vertical panel fitter downscaling factors.
+ * Pre-HSW bspec tells us to only consider the horizontal
+ * downscaling factor here. We ignore that and just consider
+ * both for simplicity.
+ */
+ pixel_rate = crtc_state->pixel_rate;
+
+ src_w = drm_rect_width(&plane_state->base.src) >> 16;
+ dst_w = drm_rect_width(&plane_state->base.dst);
+
+ if (src_w != dst_w)
+ ivb_plane_ratio_scaling(crtc_state, plane_state, &num, &den);
+ else
+ ivb_plane_ratio(crtc_state, plane_state, &num, &den);
+
+ /* Horizontal downscaling limits the maximum pixel rate */
+ dst_w = min(src_w, dst_w);
+
+ return DIV_ROUND_UP_ULL(mul_u32_u32(pixel_rate, num * src_w),
+ den * dst_w);
+}
+
+static void hsw_plane_ratio(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ unsigned int *num, unsigned int *den)
+{
+ u8 active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR);
+ const struct drm_framebuffer *fb = plane_state->base.fb;
+ unsigned int cpp = fb->format->cpp[0];
+
+ if (hweight8(active_planes) == 2) {
+ switch (cpp) {
+ case 8:
+ *num = 10;
+ *den = 8;
+ break;
+ default:
+ *num = 1;
+ *den = 1;
+ break;
+ }
+ } else {
+ switch (cpp) {
+ case 8:
+ *num = 9;
+ *den = 8;
+ break;
+ default:
+ *num = 1;
+ *den = 1;
+ break;
+ }
+ }
+}
+
+int hsw_plane_min_cdclk(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ unsigned int pixel_rate = crtc_state->pixel_rate;
+ unsigned int num, den;
+
+ hsw_plane_ratio(crtc_state, plane_state, &num, &den);
+
+ return DIV_ROUND_UP(pixel_rate * num, den);
+}
+
static u32 ivb_sprite_ctl_crtc(const struct intel_crtc_state *crtc_state)
{
u32 sprctl = 0;
@@ -1044,6 +1316,16 @@ static u32 ivb_sprite_ctl_crtc(const struct intel_crtc_state *crtc_state)
return sprctl;
}
+static bool ivb_need_sprite_gamma(const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *dev_priv =
+ to_i915(plane_state->base.plane->dev);
+ const struct drm_framebuffer *fb = plane_state->base.fb;
+
+ return fb->format->cpp[0] == 8 &&
+ (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv));
+}
+
static u32 ivb_sprite_ctl(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
@@ -1066,6 +1348,12 @@ static u32 ivb_sprite_ctl(const struct intel_crtc_state *crtc_state,
case DRM_FORMAT_XRGB8888:
sprctl |= SPRITE_FORMAT_RGBX888;
break;
+ case DRM_FORMAT_XBGR16161616F:
+ sprctl |= SPRITE_FORMAT_RGBX161616 | SPRITE_RGB_ORDER_RGBX;
+ break;
+ case DRM_FORMAT_XRGB16161616F:
+ sprctl |= SPRITE_FORMAT_RGBX161616;
+ break;
case DRM_FORMAT_YUYV:
sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_YUYV;
break;
@@ -1083,7 +1371,8 @@ static u32 ivb_sprite_ctl(const struct intel_crtc_state *crtc_state,
return 0;
}
- sprctl |= SPRITE_INT_GAMMA_DISABLE;
+ if (!ivb_need_sprite_gamma(plane_state))
+ sprctl |= SPRITE_INT_GAMMA_DISABLE;
if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
sprctl |= SPRITE_YUV_TO_RGB_CSC_FORMAT_BT709;
@@ -1105,12 +1394,26 @@ static u32 ivb_sprite_ctl(const struct intel_crtc_state *crtc_state,
return sprctl;
}
-static void ivb_sprite_linear_gamma(u16 gamma[18])
+static void ivb_sprite_linear_gamma(const struct intel_plane_state *plane_state,
+ u16 gamma[18])
{
- int i;
+ int scale, i;
- for (i = 0; i < 17; i++)
- gamma[i] = (i << 10) / 16;
+ /*
+ * WaFP16GammaEnabling:ivb,hsw
+ * "Workaround : When using the 64-bit format, the sprite output
+ * on each color channel has one quarter amplitude. It can be
+ * brought up to full amplitude by using sprite internal gamma
+ * correction, pipe gamma correction, or pipe color space
+ * conversion to multiply the sprite output by four."
+ */
+ scale = 4;
+
+ for (i = 0; i < 16; i++)
+ gamma[i] = min((scale * i << 10) / 16, (1 << 10) - 1);
+
+ gamma[i] = min((scale * i << 10) / 16, 1 << 10);
+ i++;
gamma[i] = 3 << 10;
i++;
@@ -1124,7 +1427,10 @@ static void ivb_update_gamma(const struct intel_plane_state *plane_state)
u16 gamma[18];
int i;
- ivb_sprite_linear_gamma(gamma);
+ if (!ivb_need_sprite_gamma(plane_state))
+ return;
+
+ ivb_sprite_linear_gamma(plane_state, gamma);
/* FIXME these register are single buffered :( */
for (i = 0; i < 16; i++)
@@ -1257,6 +1563,53 @@ ivb_plane_get_hw_state(struct intel_plane *plane,
return ret;
}
+static int g4x_sprite_min_cdclk(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ const struct drm_framebuffer *fb = plane_state->base.fb;
+ unsigned int hscale, pixel_rate;
+ unsigned int limit, decimate;
+
+ /*
+ * Note that crtc_state->pixel_rate accounts for both
+ * horizontal and vertical panel fitter downscaling factors.
+ * Pre-HSW bspec tells us to only consider the horizontal
+ * downscaling factor here. We ignore that and just consider
+ * both for simplicity.
+ */
+ pixel_rate = crtc_state->pixel_rate;
+
+ /* Horizontal downscaling limits the maximum pixel rate */
+ hscale = drm_rect_calc_hscale(&plane_state->base.src,
+ &plane_state->base.dst,
+ 0, INT_MAX);
+ if (hscale < 0x10000)
+ return pixel_rate;
+
+ /* Decimation steps at 2x,4x,8x,16x */
+ decimate = ilog2(hscale >> 16);
+ hscale >>= decimate;
+
+ /* Starting limit is 90% of cdclk */
+ limit = 9;
+
+ /* -10% per decimation step */
+ limit -= decimate;
+
+ /* -10% for RGB */
+ if (fb->format->cpp[0] >= 4)
+ limit--; /* -10% for RGB */
+
+ /*
+ * We should also do -10% if sprite scaling is enabled
+ * on the other pipe, but we can't really check for that,
+ * so we ignore it.
+ */
+
+ return DIV_ROUND_UP_ULL(mul_u32_u32(pixel_rate, 10 * hscale),
+ limit << 16);
+}
+
static unsigned int
g4x_sprite_max_stride(struct intel_plane *plane,
u32 pixel_format, u64 modifier,
@@ -1300,6 +1653,12 @@ static u32 g4x_sprite_ctl(const struct intel_crtc_state *crtc_state,
case DRM_FORMAT_XRGB8888:
dvscntr |= DVS_FORMAT_RGBX888;
break;
+ case DRM_FORMAT_XBGR16161616F:
+ dvscntr |= DVS_FORMAT_RGBX161616 | DVS_RGB_ORDER_XBGR;
+ break;
+ case DRM_FORMAT_XRGB16161616F:
+ dvscntr |= DVS_FORMAT_RGBX161616;
+ break;
case DRM_FORMAT_YUYV:
dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_YUYV;
break;
@@ -1513,6 +1872,11 @@ static bool intel_fb_scalable(const struct drm_framebuffer *fb)
switch (fb->format->format) {
case DRM_FORMAT_C8:
return false;
+ case DRM_FORMAT_XRGB16161616F:
+ case DRM_FORMAT_ARGB16161616F:
+ case DRM_FORMAT_XBGR16161616F:
+ case DRM_FORMAT_ABGR16161616F:
+ return INTEL_GEN(to_i915(fb->dev)) >= 11;
default:
return true;
}
@@ -1791,7 +2155,7 @@ static int skl_plane_check_nv12_rotation(const struct intel_plane_state *plane_s
int src_w = drm_rect_width(&plane_state->base.src) >> 16;
/* Display WA #1106 */
- if (is_planar_yuv_format(fb->format->format) && src_w & 3 &&
+ if (drm_format_info_is_yuv_semiplanar(fb->format) && src_w & 3 &&
(rotation == DRM_MODE_ROTATE_270 ||
rotation == (DRM_MODE_REFLECT_X | DRM_MODE_ROTATE_90))) {
DRM_DEBUG_KMS("src width must be multiple of 4 for rotated planar YUV\n");
@@ -1801,6 +2165,22 @@ static int skl_plane_check_nv12_rotation(const struct intel_plane_state *plane_s
return 0;
}
+static int skl_plane_max_scale(struct drm_i915_private *dev_priv,
+ const struct drm_framebuffer *fb)
+{
+ /*
+ * We don't yet know the final source width nor
+ * whether we can use the HQ scaler mode. Assume
+ * the best case.
+ * FIXME need to properly check this later.
+ */
+ if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv) ||
+ !drm_format_info_is_yuv_semiplanar(fb->format))
+ return 0x30000 - 1;
+ else
+ return 0x20000 - 1;
+}
+
static int skl_plane_check(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state)
{
@@ -1818,7 +2198,7 @@ static int skl_plane_check(struct intel_crtc_state *crtc_state,
/* use scaler when colorkey is not required */
if (!plane_state->ckey.flags && intel_fb_scalable(fb)) {
min_scale = 1;
- max_scale = skl_max_scale(crtc_state, fb->format->format);
+ max_scale = skl_plane_max_scale(dev_priv, fb);
}
ret = drm_atomic_helper_check_plane_state(&plane_state->base,
@@ -1993,8 +2373,10 @@ static const u64 i9xx_plane_format_modifiers[] = {
};
static const u32 snb_plane_formats[] = {
- DRM_FORMAT_XBGR8888,
DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_XRGB16161616F,
+ DRM_FORMAT_XBGR16161616F,
DRM_FORMAT_YUYV,
DRM_FORMAT_YVYU,
DRM_FORMAT_UYVY,
@@ -2024,6 +2406,8 @@ static const u32 skl_plane_formats[] = {
DRM_FORMAT_ABGR8888,
DRM_FORMAT_XRGB2101010,
DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_XRGB16161616F,
+ DRM_FORMAT_XBGR16161616F,
DRM_FORMAT_YUYV,
DRM_FORMAT_YVYU,
DRM_FORMAT_UYVY,
@@ -2039,6 +2423,8 @@ static const u32 skl_planar_formats[] = {
DRM_FORMAT_ABGR8888,
DRM_FORMAT_XRGB2101010,
DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_XRGB16161616F,
+ DRM_FORMAT_XBGR16161616F,
DRM_FORMAT_YUYV,
DRM_FORMAT_YVYU,
DRM_FORMAT_UYVY,
@@ -2055,6 +2441,8 @@ static const u32 glk_planar_formats[] = {
DRM_FORMAT_ABGR8888,
DRM_FORMAT_XRGB2101010,
DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_XRGB16161616F,
+ DRM_FORMAT_XBGR16161616F,
DRM_FORMAT_YUYV,
DRM_FORMAT_YVYU,
DRM_FORMAT_UYVY,
@@ -2158,6 +2546,13 @@ static const u64 skl_plane_format_modifiers_ccs[] = {
DRM_FORMAT_MOD_INVALID
};
+static const u64 gen12_plane_format_modifiers_noccs[] = {
+ I915_FORMAT_MOD_Y_TILED,
+ I915_FORMAT_MOD_X_TILED,
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID
+};
+
static bool g4x_sprite_format_mod_supported(struct drm_plane *_plane,
u32 format, u64 modifier)
{
@@ -2198,6 +2593,8 @@ static bool snb_sprite_format_mod_supported(struct drm_plane *_plane,
switch (format) {
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_XRGB16161616F:
+ case DRM_FORMAT_XBGR16161616F:
case DRM_FORMAT_YUYV:
case DRM_FORMAT_YVYU:
case DRM_FORMAT_UYVY:
@@ -2306,6 +2703,55 @@ static bool skl_plane_format_mod_supported(struct drm_plane *_plane,
}
}
+static bool gen12_plane_format_mod_supported(struct drm_plane *_plane,
+ u32 format, u64 modifier)
+{
+ switch (modifier) {
+ case DRM_FORMAT_MOD_LINEAR:
+ case I915_FORMAT_MOD_X_TILED:
+ case I915_FORMAT_MOD_Y_TILED:
+ break;
+ default:
+ return false;
+ }
+
+ switch (format) {
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_ABGR8888:
+ case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_YVYU:
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_VYUY:
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_P010:
+ case DRM_FORMAT_P012:
+ case DRM_FORMAT_P016:
+ case DRM_FORMAT_XVYU2101010:
+ case DRM_FORMAT_C8:
+ case DRM_FORMAT_XBGR16161616F:
+ case DRM_FORMAT_ABGR16161616F:
+ case DRM_FORMAT_XRGB16161616F:
+ case DRM_FORMAT_ARGB16161616F:
+ case DRM_FORMAT_Y210:
+ case DRM_FORMAT_Y212:
+ case DRM_FORMAT_Y216:
+ case DRM_FORMAT_XVYU12_16161616:
+ case DRM_FORMAT_XVYU16161616:
+ if (modifier == DRM_FORMAT_MOD_LINEAR ||
+ modifier == I915_FORMAT_MOD_X_TILED ||
+ modifier == I915_FORMAT_MOD_Y_TILED)
+ return true;
+ /* fall through */
+ default:
+ return false;
+ }
+}
+
static const struct drm_plane_funcs g4x_sprite_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
@@ -2342,6 +2788,15 @@ static const struct drm_plane_funcs skl_plane_funcs = {
.format_mod_supported = skl_plane_format_mod_supported,
};
+static const struct drm_plane_funcs gen12_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = intel_plane_destroy,
+ .atomic_duplicate_state = intel_plane_duplicate_state,
+ .atomic_destroy_state = intel_plane_destroy_state,
+ .format_mod_supported = gen12_plane_format_mod_supported,
+};
+
static bool skl_plane_has_fbc(struct drm_i915_private *dev_priv,
enum pipe pipe, enum plane_id plane_id)
{
@@ -2430,6 +2885,7 @@ struct intel_plane *
skl_universal_plane_create(struct drm_i915_private *dev_priv,
enum pipe pipe, enum plane_id plane_id)
{
+ const struct drm_plane_funcs *plane_funcs;
struct intel_plane *plane;
enum drm_plane_type plane_type;
unsigned int supported_rotations;
@@ -2459,6 +2915,7 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv,
plane->disable_plane = skl_disable_plane;
plane->get_hw_state = skl_plane_get_hw_state;
plane->check_plane = skl_plane_check;
+ plane->min_cdclk = skl_plane_min_cdclk;
if (icl_is_nv12_y_plane(plane_id))
plane->update_slave = icl_update_slave;
@@ -2472,11 +2929,19 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv,
formats = skl_get_plane_formats(dev_priv, pipe,
plane_id, &num_formats);
- plane->has_ccs = skl_plane_has_ccs(dev_priv, pipe, plane_id);
- if (plane->has_ccs)
- modifiers = skl_plane_format_modifiers_ccs;
- else
- modifiers = skl_plane_format_modifiers_noccs;
+ if (INTEL_GEN(dev_priv) >= 12) {
+ /* TODO: Implement support for gen-12 CCS modifiers */
+ plane->has_ccs = false;
+ modifiers = gen12_plane_format_modifiers_noccs;
+ plane_funcs = &gen12_plane_funcs;
+ } else {
+ plane->has_ccs = skl_plane_has_ccs(dev_priv, pipe, plane_id);
+ if (plane->has_ccs)
+ modifiers = skl_plane_format_modifiers_ccs;
+ else
+ modifiers = skl_plane_format_modifiers_noccs;
+ plane_funcs = &skl_plane_funcs;
+ }
if (plane_id == PLANE_PRIMARY)
plane_type = DRM_PLANE_TYPE_PRIMARY;
@@ -2486,7 +2951,7 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv,
possible_crtcs = BIT(pipe);
ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
- possible_crtcs, &skl_plane_funcs,
+ possible_crtcs, plane_funcs,
formats, num_formats, modifiers,
plane_type,
"plane %d%c", plane_id + 1,
@@ -2519,6 +2984,8 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv,
BIT(DRM_MODE_BLEND_PREMULTI) |
BIT(DRM_MODE_BLEND_COVERAGE));
+ drm_plane_create_zpos_immutable_property(&plane->base, plane_id);
+
drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs);
return plane;
@@ -2540,7 +3007,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
const u64 *modifiers;
const u32 *formats;
int num_formats;
- int ret;
+ int ret, zpos;
if (INTEL_GEN(dev_priv) >= 9)
return skl_universal_plane_create(dev_priv, pipe,
@@ -2556,6 +3023,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
plane->disable_plane = vlv_disable_plane;
plane->get_hw_state = vlv_plane_get_hw_state;
plane->check_plane = vlv_sprite_check;
+ plane->min_cdclk = vlv_plane_min_cdclk;
formats = vlv_plane_formats;
num_formats = ARRAY_SIZE(vlv_plane_formats);
@@ -2569,6 +3037,11 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
plane->get_hw_state = ivb_plane_get_hw_state;
plane->check_plane = g4x_sprite_check;
+ if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
+ plane->min_cdclk = hsw_plane_min_cdclk;
+ else
+ plane->min_cdclk = ivb_sprite_min_cdclk;
+
formats = snb_plane_formats;
num_formats = ARRAY_SIZE(snb_plane_formats);
modifiers = i9xx_plane_format_modifiers;
@@ -2580,6 +3053,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
plane->disable_plane = g4x_disable_plane;
plane->get_hw_state = g4x_plane_get_hw_state;
plane->check_plane = g4x_sprite_check;
+ plane->min_cdclk = g4x_sprite_min_cdclk;
modifiers = i9xx_plane_format_modifiers;
if (IS_GEN(dev_priv, 6)) {
@@ -2630,6 +3104,9 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
DRM_COLOR_YCBCR_BT709,
DRM_COLOR_YCBCR_LIMITED_RANGE);
+ zpos = sprite + 1;
+ drm_plane_create_zpos_immutable_property(&plane->base, zpos);
+
drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs);
return plane;
diff --git a/drivers/gpu/drm/i915/display/intel_sprite.h b/drivers/gpu/drm/i915/display/intel_sprite.h
index 093a2d156f1e..5eeaa92420d1 100644
--- a/drivers/gpu/drm/i915/display/intel_sprite.h
+++ b/drivers/gpu/drm/i915/display/intel_sprite.h
@@ -17,7 +17,6 @@ struct drm_i915_private;
struct intel_crtc_state;
struct intel_plane_state;
-bool is_planar_yuv_format(u32 pixelformat);
int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
int usecs);
struct intel_plane *intel_sprite_plane_create(struct drm_i915_private *dev_priv,
@@ -50,4 +49,11 @@ static inline u8 icl_hdr_plane_mask(void)
bool icl_is_hdr_plane(struct drm_i915_private *dev_priv, enum plane_id plane_id);
+int ivb_plane_min_cdclk(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state);
+int hsw_plane_min_cdclk(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state);
+int vlv_plane_min_cdclk(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state);
+
#endif /* __INTEL_SPRITE_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c
index 85743a43bee2..7773169b7331 100644
--- a/drivers/gpu/drm/i915/display/intel_tc.c
+++ b/drivers/gpu/drm/i915/display/intel_tc.c
@@ -23,32 +23,38 @@ static const char *tc_port_mode_name(enum tc_port_mode mode)
return names[mode];
}
-static bool has_modular_fia(struct drm_i915_private *i915)
-{
- if (!INTEL_INFO(i915)->display.has_modular_fia)
- return false;
-
- return intel_uncore_read(&i915->uncore,
- PORT_TX_DFLEXDPSP(FIA1)) & MODULAR_FIA_MASK;
-}
-
-static enum phy_fia tc_port_to_fia(struct drm_i915_private *i915,
- enum tc_port tc_port)
+static void
+tc_port_load_fia_params(struct drm_i915_private *i915,
+ struct intel_digital_port *dig_port)
{
- if (!has_modular_fia(i915))
- return FIA1;
+ enum port port = dig_port->base.port;
+ enum tc_port tc_port = intel_port_to_tc(i915, port);
+ u32 modular_fia;
+
+ if (INTEL_INFO(i915)->display.has_modular_fia) {
+ modular_fia = intel_uncore_read(&i915->uncore,
+ PORT_TX_DFLEXDPSP(FIA1));
+ modular_fia &= MODULAR_FIA_MASK;
+ } else {
+ modular_fia = 0;
+ }
/*
* Each Modular FIA instance houses 2 TC ports. In SOC that has more
* than two TC ports, there are multiple instances of Modular FIA.
*/
- return tc_port / 2;
+ if (modular_fia) {
+ dig_port->tc_phy_fia = tc_port / 2;
+ dig_port->tc_phy_fia_idx = tc_port % 2;
+ } else {
+ dig_port->tc_phy_fia = FIA1;
+ dig_port->tc_phy_fia_idx = tc_port;
+ }
}
u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
- enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port);
struct intel_uncore *uncore = &i915->uncore;
u32 lane_mask;
@@ -57,8 +63,23 @@ u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port)
WARN_ON(lane_mask == 0xffffffff);
- return (lane_mask & DP_LANE_ASSIGNMENT_MASK(tc_port)) >>
- DP_LANE_ASSIGNMENT_SHIFT(tc_port);
+ lane_mask &= DP_LANE_ASSIGNMENT_MASK(dig_port->tc_phy_fia_idx);
+ return lane_mask >> DP_LANE_ASSIGNMENT_SHIFT(dig_port->tc_phy_fia_idx);
+}
+
+u32 intel_tc_port_get_pin_assignment_mask(struct intel_digital_port *dig_port)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_uncore *uncore = &i915->uncore;
+ u32 pin_mask;
+
+ pin_mask = intel_uncore_read(uncore,
+ PORT_TX_DFLEXPA1(dig_port->tc_phy_fia));
+
+ WARN_ON(pin_mask == 0xffffffff);
+
+ return (pin_mask & DP_PIN_ASSIGNMENT_MASK(dig_port->tc_phy_fia_idx)) >>
+ DP_PIN_ASSIGNMENT_SHIFT(dig_port->tc_phy_fia_idx);
}
int intel_tc_port_fia_max_lane_count(struct intel_digital_port *dig_port)
@@ -95,7 +116,6 @@ void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port,
int required_lanes)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
- enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port);
bool lane_reversal = dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
struct intel_uncore *uncore = &i915->uncore;
u32 val;
@@ -104,19 +124,21 @@ void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port,
val = intel_uncore_read(uncore,
PORT_TX_DFLEXDPMLE1(dig_port->tc_phy_fia));
- val &= ~DFLEXDPMLE1_DPMLETC_MASK(tc_port);
+ val &= ~DFLEXDPMLE1_DPMLETC_MASK(dig_port->tc_phy_fia_idx);
switch (required_lanes) {
case 1:
- val |= lane_reversal ? DFLEXDPMLE1_DPMLETC_ML3(tc_port) :
- DFLEXDPMLE1_DPMLETC_ML0(tc_port);
+ val |= lane_reversal ?
+ DFLEXDPMLE1_DPMLETC_ML3(dig_port->tc_phy_fia_idx) :
+ DFLEXDPMLE1_DPMLETC_ML0(dig_port->tc_phy_fia_idx);
break;
case 2:
- val |= lane_reversal ? DFLEXDPMLE1_DPMLETC_ML3_2(tc_port) :
- DFLEXDPMLE1_DPMLETC_ML1_0(tc_port);
+ val |= lane_reversal ?
+ DFLEXDPMLE1_DPMLETC_ML3_2(dig_port->tc_phy_fia_idx) :
+ DFLEXDPMLE1_DPMLETC_ML1_0(dig_port->tc_phy_fia_idx);
break;
case 4:
- val |= DFLEXDPMLE1_DPMLETC_ML3_0(tc_port);
+ val |= DFLEXDPMLE1_DPMLETC_ML3_0(dig_port->tc_phy_fia_idx);
break;
default:
MISSING_CASE(required_lanes);
@@ -164,9 +186,9 @@ static u32 tc_port_live_status_mask(struct intel_digital_port *dig_port)
return mask;
}
- if (val & TC_LIVE_STATE_TBT(tc_port))
+ if (val & TC_LIVE_STATE_TBT(dig_port->tc_phy_fia_idx))
mask |= BIT(TC_PORT_TBT_ALT);
- if (val & TC_LIVE_STATE_TC(tc_port))
+ if (val & TC_LIVE_STATE_TC(dig_port->tc_phy_fia_idx))
mask |= BIT(TC_PORT_DP_ALT);
if (intel_uncore_read(uncore, SDEISR) & SDE_TC_HOTPLUG_ICP(tc_port))
@@ -182,7 +204,6 @@ static u32 tc_port_live_status_mask(struct intel_digital_port *dig_port)
static bool icl_tc_phy_status_complete(struct intel_digital_port *dig_port)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
- enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port);
struct intel_uncore *uncore = &i915->uncore;
u32 val;
@@ -194,14 +215,13 @@ static bool icl_tc_phy_status_complete(struct intel_digital_port *dig_port)
return false;
}
- return val & DP_PHY_MODE_STATUS_COMPLETED(tc_port);
+ return val & DP_PHY_MODE_STATUS_COMPLETED(dig_port->tc_phy_fia_idx);
}
static bool icl_tc_phy_set_safe_mode(struct intel_digital_port *dig_port,
bool enable)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
- enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port);
struct intel_uncore *uncore = &i915->uncore;
u32 val;
@@ -215,9 +235,9 @@ static bool icl_tc_phy_set_safe_mode(struct intel_digital_port *dig_port,
return false;
}
- val &= ~DP_PHY_MODE_STATUS_NOT_SAFE(tc_port);
+ val &= ~DP_PHY_MODE_STATUS_NOT_SAFE(dig_port->tc_phy_fia_idx);
if (!enable)
- val |= DP_PHY_MODE_STATUS_NOT_SAFE(tc_port);
+ val |= DP_PHY_MODE_STATUS_NOT_SAFE(dig_port->tc_phy_fia_idx);
intel_uncore_write(uncore,
PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia), val);
@@ -232,7 +252,6 @@ static bool icl_tc_phy_set_safe_mode(struct intel_digital_port *dig_port,
static bool icl_tc_phy_is_in_safe_mode(struct intel_digital_port *dig_port)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
- enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port);
struct intel_uncore *uncore = &i915->uncore;
u32 val;
@@ -244,7 +263,7 @@ static bool icl_tc_phy_is_in_safe_mode(struct intel_digital_port *dig_port)
return true;
}
- return !(val & DP_PHY_MODE_STATUS_NOT_SAFE(tc_port));
+ return !(val & DP_PHY_MODE_STATUS_NOT_SAFE(dig_port->tc_phy_fia_idx));
}
/*
@@ -540,5 +559,5 @@ void intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy)
mutex_init(&dig_port->tc_lock);
dig_port->tc_legacy_port = is_legacy;
dig_port->tc_link_refcount = 0;
- dig_port->tc_phy_fia = tc_port_to_fia(i915, tc_port);
+ tc_port_load_fia_params(i915, dig_port);
}
diff --git a/drivers/gpu/drm/i915/display/intel_tc.h b/drivers/gpu/drm/i915/display/intel_tc.h
index 783d75531435..463f1b3c836f 100644
--- a/drivers/gpu/drm/i915/display/intel_tc.h
+++ b/drivers/gpu/drm/i915/display/intel_tc.h
@@ -13,6 +13,7 @@ struct intel_digital_port;
bool intel_tc_port_connected(struct intel_digital_port *dig_port);
u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port);
+u32 intel_tc_port_get_pin_assignment_mask(struct intel_digital_port *dig_port);
int intel_tc_port_fia_max_lane_count(struct intel_digital_port *dig_port);
void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port,
int required_lanes);
diff --git a/drivers/gpu/drm/i915/display/intel_tv.c b/drivers/gpu/drm/i915/display/intel_tv.c
index b70221f5112a..9983fadf6c28 100644
--- a/drivers/gpu/drm/i915/display/intel_tv.c
+++ b/drivers/gpu/drm/i915/display/intel_tv.c
@@ -961,11 +961,10 @@ intel_tv_mode_valid(struct drm_connector *connector,
return MODE_CLOCK_HIGH;
/* Ensure TV refresh is close to desired refresh */
- if (tv_mode && abs(tv_mode->refresh - drm_mode_vrefresh(mode) * 1000)
- < 1000)
- return MODE_OK;
+ if (abs(tv_mode->refresh - drm_mode_vrefresh(mode) * 1000) >= 1000)
+ return MODE_CLOCK_RANGE;
- return MODE_CLOCK_RANGE;
+ return MODE_OK;
}
static int
@@ -1702,7 +1701,7 @@ intel_tv_detect(struct drm_connector *connector,
struct intel_load_detect_pipe tmp;
int ret;
- ret = intel_get_load_detect_pipe(connector, NULL, &tmp, ctx);
+ ret = intel_get_load_detect_pipe(connector, &tmp, ctx);
if (ret < 0)
return ret;
@@ -1948,9 +1947,8 @@ intel_tv_init(struct drm_i915_private *dev_priv)
intel_encoder->type = INTEL_OUTPUT_TVOUT;
intel_encoder->power_domain = POWER_DOMAIN_PORT_OTHER;
intel_encoder->port = PORT_NONE;
- intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
+ intel_encoder->pipe_mask = ~0;
intel_encoder->cloneable = 0;
- intel_encoder->base.possible_crtcs = ((1 << 0) | (1 << 1));
intel_tv->type = DRM_MODE_CONNECTOR_Unknown;
/* BIOS margin values */
diff --git a/drivers/gpu/drm/i915/display/intel_vbt_defs.h b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
index dfcd156b5094..69a7cb1fa121 100644
--- a/drivers/gpu/drm/i915/display/intel_vbt_defs.h
+++ b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
@@ -114,6 +114,7 @@ enum bdb_block_id {
BDB_LVDS_POWER = 44,
BDB_MIPI_CONFIG = 52,
BDB_MIPI_SEQUENCE = 53,
+ BDB_COMPRESSION_PARAMETERS = 56,
BDB_SKIP = 254, /* VBIOS private block, ignore */
};
@@ -291,6 +292,8 @@ struct bdb_general_features {
#define DVO_PORT_HDMIE 12 /* 193 */
#define DVO_PORT_DPF 13 /* N/A */
#define DVO_PORT_HDMIF 14 /* N/A */
+#define DVO_PORT_DPG 15
+#define DVO_PORT_HDMIG 16
#define DVO_PORT_MIPIA 21 /* 171 */
#define DVO_PORT_MIPIB 22 /* 171 */
#define DVO_PORT_MIPIC 23 /* 171 */
@@ -325,6 +328,7 @@ enum vbt_gmbus_ddi {
#define DP_AUX_D 0x30
#define DP_AUX_E 0x50
#define DP_AUX_F 0x60
+#define DP_AUX_G 0x70
#define VBT_DP_MAX_LINK_RATE_HBR3 0
#define VBT_DP_MAX_LINK_RATE_HBR2 1
@@ -808,4 +812,55 @@ struct bdb_mipi_sequence {
u8 data[0]; /* up to 6 variable length blocks */
} __packed;
+/*
+ * Block 56 - Compression Parameters
+ */
+
+#define VBT_RC_BUFFER_BLOCK_SIZE_1KB 0
+#define VBT_RC_BUFFER_BLOCK_SIZE_4KB 1
+#define VBT_RC_BUFFER_BLOCK_SIZE_16KB 2
+#define VBT_RC_BUFFER_BLOCK_SIZE_64KB 3
+
+#define VBT_DSC_LINE_BUFFER_DEPTH(vbt_value) ((vbt_value) + 8) /* bits */
+#define VBT_DSC_MAX_BPP(vbt_value) (6 + (vbt_value) * 2)
+
+struct dsc_compression_parameters_entry {
+ u8 version_major:4;
+ u8 version_minor:4;
+
+ u8 rc_buffer_block_size:2;
+ u8 reserved1:6;
+
+ /*
+ * Buffer size in bytes:
+ *
+ * 4 ^ rc_buffer_block_size * 1024 * (rc_buffer_size + 1) bytes
+ */
+ u8 rc_buffer_size;
+ u32 slices_per_line;
+
+ u8 line_buffer_depth:4;
+ u8 reserved2:4;
+
+ /* Flag Bits 1 */
+ u8 block_prediction_enable:1;
+ u8 reserved3:7;
+
+ u8 max_bpp; /* mapping */
+
+ /* Color depth capabilities */
+ u8 reserved4:1;
+ u8 support_8bpc:1;
+ u8 support_10bpc:1;
+ u8 support_12bpc:1;
+ u8 reserved5:4;
+
+ u16 slice_height;
+} __packed;
+
+struct bdb_compression_parameters {
+ u16 entry_size;
+ struct dsc_compression_parameters_entry data[16];
+} __packed;
+
#endif /* _INTEL_VBT_DEFS_H_ */
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c
index d4fb7f16f9f6..896b0c334f5e 100644
--- a/drivers/gpu/drm/i915/display/intel_vdsc.c
+++ b/drivers/gpu/drm/i915/display/intel_vdsc.c
@@ -322,8 +322,8 @@ static int get_column_index_for_rc_params(u8 bits_per_component)
int intel_dp_compute_dsc_params(struct intel_dp *intel_dp,
struct intel_crtc_state *pipe_config)
{
- struct drm_dsc_config *vdsc_cfg = &pipe_config->dp_dsc_cfg;
- u16 compressed_bpp = pipe_config->dsc_params.compressed_bpp;
+ struct drm_dsc_config *vdsc_cfg = &pipe_config->dsc.config;
+ u16 compressed_bpp = pipe_config->dsc.compressed_bpp;
u8 i = 0;
int row_index = 0;
int column_index = 0;
@@ -332,7 +332,7 @@ int intel_dp_compute_dsc_params(struct intel_dp *intel_dp,
vdsc_cfg->pic_width = pipe_config->base.adjusted_mode.crtc_hdisplay;
vdsc_cfg->pic_height = pipe_config->base.adjusted_mode.crtc_vdisplay;
vdsc_cfg->slice_width = DIV_ROUND_UP(vdsc_cfg->pic_width,
- pipe_config->dsc_params.slice_count);
+ pipe_config->dsc.slice_count);
/*
* Slice Height of 8 works for all currently available panels. So start
* with that if pic_height is an integral multiple of 8.
@@ -485,13 +485,13 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- const struct drm_dsc_config *vdsc_cfg = &crtc_state->dp_dsc_cfg;
+ const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
enum pipe pipe = crtc->pipe;
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
u32 pps_val = 0;
u32 rc_buf_thresh_dword[4];
u32 rc_range_params_dword[8];
- u8 num_vdsc_instances = (crtc_state->dsc_params.dsc_split) ? 2 : 1;
+ u8 num_vdsc_instances = (crtc_state->dsc.dsc_split) ? 2 : 1;
int i = 0;
/* Populate PICTURE_PARAMETER_SET_0 registers */
@@ -514,11 +514,11 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
* If 2 VDSC instances are needed, configure PPS for second
* VDSC
*/
- if (crtc_state->dsc_params.dsc_split)
+ if (crtc_state->dsc.dsc_split)
I915_WRITE(DSCC_PICTURE_PARAMETER_SET_0, pps_val);
} else {
I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_0(pipe), pps_val);
- if (crtc_state->dsc_params.dsc_split)
+ if (crtc_state->dsc.dsc_split)
I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_0(pipe),
pps_val);
}
@@ -533,11 +533,11 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
* If 2 VDSC instances are needed, configure PPS for second
* VDSC
*/
- if (crtc_state->dsc_params.dsc_split)
+ if (crtc_state->dsc.dsc_split)
I915_WRITE(DSCC_PICTURE_PARAMETER_SET_1, pps_val);
} else {
I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_1(pipe), pps_val);
- if (crtc_state->dsc_params.dsc_split)
+ if (crtc_state->dsc.dsc_split)
I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_1(pipe),
pps_val);
}
@@ -553,11 +553,11 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
* If 2 VDSC instances are needed, configure PPS for second
* VDSC
*/
- if (crtc_state->dsc_params.dsc_split)
+ if (crtc_state->dsc.dsc_split)
I915_WRITE(DSCC_PICTURE_PARAMETER_SET_2, pps_val);
} else {
I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_2(pipe), pps_val);
- if (crtc_state->dsc_params.dsc_split)
+ if (crtc_state->dsc.dsc_split)
I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_2(pipe),
pps_val);
}
@@ -573,11 +573,11 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
* If 2 VDSC instances are needed, configure PPS for second
* VDSC
*/
- if (crtc_state->dsc_params.dsc_split)
+ if (crtc_state->dsc.dsc_split)
I915_WRITE(DSCC_PICTURE_PARAMETER_SET_3, pps_val);
} else {
I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_3(pipe), pps_val);
- if (crtc_state->dsc_params.dsc_split)
+ if (crtc_state->dsc.dsc_split)
I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_3(pipe),
pps_val);
}
@@ -593,11 +593,11 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
* If 2 VDSC instances are needed, configure PPS for second
* VDSC
*/
- if (crtc_state->dsc_params.dsc_split)
+ if (crtc_state->dsc.dsc_split)
I915_WRITE(DSCC_PICTURE_PARAMETER_SET_4, pps_val);
} else {
I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_4(pipe), pps_val);
- if (crtc_state->dsc_params.dsc_split)
+ if (crtc_state->dsc.dsc_split)
I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_4(pipe),
pps_val);
}
@@ -613,11 +613,11 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
* If 2 VDSC instances are needed, configure PPS for second
* VDSC
*/
- if (crtc_state->dsc_params.dsc_split)
+ if (crtc_state->dsc.dsc_split)
I915_WRITE(DSCC_PICTURE_PARAMETER_SET_5, pps_val);
} else {
I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_5(pipe), pps_val);
- if (crtc_state->dsc_params.dsc_split)
+ if (crtc_state->dsc.dsc_split)
I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_5(pipe),
pps_val);
}
@@ -635,11 +635,11 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
* If 2 VDSC instances are needed, configure PPS for second
* VDSC
*/
- if (crtc_state->dsc_params.dsc_split)
+ if (crtc_state->dsc.dsc_split)
I915_WRITE(DSCC_PICTURE_PARAMETER_SET_6, pps_val);
} else {
I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_6(pipe), pps_val);
- if (crtc_state->dsc_params.dsc_split)
+ if (crtc_state->dsc.dsc_split)
I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_6(pipe),
pps_val);
}
@@ -655,11 +655,11 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
* If 2 VDSC instances are needed, configure PPS for second
* VDSC
*/
- if (crtc_state->dsc_params.dsc_split)
+ if (crtc_state->dsc.dsc_split)
I915_WRITE(DSCC_PICTURE_PARAMETER_SET_7, pps_val);
} else {
I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_7(pipe), pps_val);
- if (crtc_state->dsc_params.dsc_split)
+ if (crtc_state->dsc.dsc_split)
I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_7(pipe),
pps_val);
}
@@ -675,11 +675,11 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
* If 2 VDSC instances are needed, configure PPS for second
* VDSC
*/
- if (crtc_state->dsc_params.dsc_split)
+ if (crtc_state->dsc.dsc_split)
I915_WRITE(DSCC_PICTURE_PARAMETER_SET_8, pps_val);
} else {
I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_8(pipe), pps_val);
- if (crtc_state->dsc_params.dsc_split)
+ if (crtc_state->dsc.dsc_split)
I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_8(pipe),
pps_val);
}
@@ -695,11 +695,11 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
* If 2 VDSC instances are needed, configure PPS for second
* VDSC
*/
- if (crtc_state->dsc_params.dsc_split)
+ if (crtc_state->dsc.dsc_split)
I915_WRITE(DSCC_PICTURE_PARAMETER_SET_9, pps_val);
} else {
I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_9(pipe), pps_val);
- if (crtc_state->dsc_params.dsc_split)
+ if (crtc_state->dsc.dsc_split)
I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_9(pipe),
pps_val);
}
@@ -717,11 +717,11 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
* If 2 VDSC instances are needed, configure PPS for second
* VDSC
*/
- if (crtc_state->dsc_params.dsc_split)
+ if (crtc_state->dsc.dsc_split)
I915_WRITE(DSCC_PICTURE_PARAMETER_SET_10, pps_val);
} else {
I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_10(pipe), pps_val);
- if (crtc_state->dsc_params.dsc_split)
+ if (crtc_state->dsc.dsc_split)
I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_10(pipe),
pps_val);
}
@@ -740,11 +740,11 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
* If 2 VDSC instances are needed, configure PPS for second
* VDSC
*/
- if (crtc_state->dsc_params.dsc_split)
+ if (crtc_state->dsc.dsc_split)
I915_WRITE(DSCC_PICTURE_PARAMETER_SET_16, pps_val);
} else {
I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_16(pipe), pps_val);
- if (crtc_state->dsc_params.dsc_split)
+ if (crtc_state->dsc.dsc_split)
I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_16(pipe),
pps_val);
}
@@ -763,7 +763,7 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
I915_WRITE(DSCA_RC_BUF_THRESH_0_UDW, rc_buf_thresh_dword[1]);
I915_WRITE(DSCA_RC_BUF_THRESH_1, rc_buf_thresh_dword[2]);
I915_WRITE(DSCA_RC_BUF_THRESH_1_UDW, rc_buf_thresh_dword[3]);
- if (crtc_state->dsc_params.dsc_split) {
+ if (crtc_state->dsc.dsc_split) {
I915_WRITE(DSCC_RC_BUF_THRESH_0,
rc_buf_thresh_dword[0]);
I915_WRITE(DSCC_RC_BUF_THRESH_0_UDW,
@@ -782,7 +782,7 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
rc_buf_thresh_dword[2]);
I915_WRITE(ICL_DSC0_RC_BUF_THRESH_1_UDW(pipe),
rc_buf_thresh_dword[3]);
- if (crtc_state->dsc_params.dsc_split) {
+ if (crtc_state->dsc.dsc_split) {
I915_WRITE(ICL_DSC1_RC_BUF_THRESH_0(pipe),
rc_buf_thresh_dword[0]);
I915_WRITE(ICL_DSC1_RC_BUF_THRESH_0_UDW(pipe),
@@ -824,7 +824,7 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
rc_range_params_dword[6]);
I915_WRITE(DSCA_RC_RANGE_PARAMETERS_3_UDW,
rc_range_params_dword[7]);
- if (crtc_state->dsc_params.dsc_split) {
+ if (crtc_state->dsc.dsc_split) {
I915_WRITE(DSCC_RC_RANGE_PARAMETERS_0,
rc_range_params_dword[0]);
I915_WRITE(DSCC_RC_RANGE_PARAMETERS_0_UDW,
@@ -859,7 +859,7 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
rc_range_params_dword[6]);
I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW(pipe),
rc_range_params_dword[7]);
- if (crtc_state->dsc_params.dsc_split) {
+ if (crtc_state->dsc.dsc_split) {
I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_0(pipe),
rc_range_params_dword[0]);
I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW(pipe),
@@ -885,7 +885,7 @@ static void intel_dp_write_dsc_pps_sdp(struct intel_encoder *encoder,
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- const struct drm_dsc_config *vdsc_cfg = &crtc_state->dp_dsc_cfg;
+ const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
struct drm_dsc_pps_infoframe dp_dsc_pps_sdp;
/* Prepare DP SDP PPS header as per DP 1.4 spec, Table 2-123 */
@@ -909,7 +909,7 @@ void intel_dsc_enable(struct intel_encoder *encoder,
u32 dss_ctl1_val = 0;
u32 dss_ctl2_val = 0;
- if (!crtc_state->dsc_params.compression_enable)
+ if (!crtc_state->dsc.compression_enable)
return;
/* Enable Power wells for VDSC/joining */
@@ -928,7 +928,7 @@ void intel_dsc_enable(struct intel_encoder *encoder,
dss_ctl2_reg = ICL_PIPE_DSS_CTL2(pipe);
}
dss_ctl2_val |= LEFT_BRANCH_VDSC_ENABLE;
- if (crtc_state->dsc_params.dsc_split) {
+ if (crtc_state->dsc.dsc_split) {
dss_ctl2_val |= RIGHT_BRANCH_VDSC_ENABLE;
dss_ctl1_val |= JOINER_ENABLE;
}
@@ -944,7 +944,7 @@ void intel_dsc_disable(const struct intel_crtc_state *old_crtc_state)
i915_reg_t dss_ctl1_reg, dss_ctl2_reg;
u32 dss_ctl1_val = 0, dss_ctl2_val = 0;
- if (!old_crtc_state->dsc_params.compression_enable)
+ if (!old_crtc_state->dsc.compression_enable)
return;
if (old_crtc_state->cpu_transcoder == TRANSCODER_EDP) {
diff --git a/drivers/gpu/drm/i915/display/intel_vga.c b/drivers/gpu/drm/i915/display/intel_vga.c
new file mode 100644
index 000000000000..2ff7293986d4
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_vga.c
@@ -0,0 +1,160 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <linux/pci.h>
+#include <linux/vgaarb.h>
+
+#include <drm/i915_drm.h>
+
+#include "i915_drv.h"
+#include "intel_vga.h"
+
+static i915_reg_t intel_vga_cntrl_reg(struct drm_i915_private *i915)
+{
+ if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
+ return VLV_VGACNTRL;
+ else if (INTEL_GEN(i915) >= 5)
+ return CPU_VGACNTRL;
+ else
+ return VGACNTRL;
+}
+
+/* Disable the VGA plane that we never use */
+void intel_vga_disable(struct drm_i915_private *dev_priv)
+{
+ struct pci_dev *pdev = dev_priv->drm.pdev;
+ i915_reg_t vga_reg = intel_vga_cntrl_reg(dev_priv);
+ u8 sr1;
+
+ /* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */
+ vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
+ outb(SR01, VGA_SR_INDEX);
+ sr1 = inb(VGA_SR_DATA);
+ outb(sr1 | 1 << 5, VGA_SR_DATA);
+ vga_put(pdev, VGA_RSRC_LEGACY_IO);
+ udelay(300);
+
+ I915_WRITE(vga_reg, VGA_DISP_DISABLE);
+ POSTING_READ(vga_reg);
+}
+
+void intel_vga_redisable_power_on(struct drm_i915_private *dev_priv)
+{
+ i915_reg_t vga_reg = intel_vga_cntrl_reg(dev_priv);
+
+ if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
+ DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
+ intel_vga_disable(dev_priv);
+ }
+}
+
+void intel_vga_redisable(struct drm_i915_private *i915)
+{
+ intel_wakeref_t wakeref;
+
+ /*
+ * This function can be called both from intel_modeset_setup_hw_state or
+ * at a very early point in our resume sequence, where the power well
+ * structures are not yet restored. Since this function is at a very
+ * paranoid "someone might have enabled VGA while we were not looking"
+ * level, just check if the power well is enabled instead of trying to
+ * follow the "don't touch the power well if we don't need it" policy
+ * the rest of the driver uses.
+ */
+ wakeref = intel_display_power_get_if_enabled(i915, POWER_DOMAIN_VGA);
+ if (!wakeref)
+ return;
+
+ intel_vga_redisable_power_on(i915);
+
+ intel_display_power_put(i915, POWER_DOMAIN_VGA, wakeref);
+}
+
+void intel_vga_reset_io_mem(struct drm_i915_private *i915)
+{
+ struct pci_dev *pdev = i915->drm.pdev;
+
+ /*
+ * After we re-enable the power well, if we touch VGA register 0x3d5
+ * we'll get unclaimed register interrupts. This stops after we write
+ * anything to the VGA MSR register. The vgacon module uses this
+ * register all the time, so if we unbind our driver and, as a
+ * consequence, bind vgacon, we'll get stuck in an infinite loop at
+ * console_unlock(). So make here we touch the VGA MSR register, making
+ * sure vgacon can keep working normally without triggering interrupts
+ * and error messages.
+ */
+ vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
+ outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
+ vga_put(pdev, VGA_RSRC_LEGACY_IO);
+}
+
+static int
+intel_vga_set_state(struct drm_i915_private *i915, bool enable_decode)
+{
+ unsigned int reg = INTEL_GEN(i915) >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
+ u16 gmch_ctrl;
+
+ if (pci_read_config_word(i915->bridge_dev, reg, &gmch_ctrl)) {
+ DRM_ERROR("failed to read control word\n");
+ return -EIO;
+ }
+
+ if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !enable_decode)
+ return 0;
+
+ if (enable_decode)
+ gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
+ else
+ gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
+
+ if (pci_write_config_word(i915->bridge_dev, reg, gmch_ctrl)) {
+ DRM_ERROR("failed to write control word\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static unsigned int
+intel_vga_set_decode(void *cookie, bool enable_decode)
+{
+ struct drm_i915_private *i915 = cookie;
+
+ intel_vga_set_state(i915, enable_decode);
+
+ if (enable_decode)
+ return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
+ VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
+ else
+ return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
+}
+
+int intel_vga_register(struct drm_i915_private *i915)
+{
+ struct pci_dev *pdev = i915->drm.pdev;
+ int ret;
+
+ /*
+ * If we have > 1 VGA cards, then we need to arbitrate access to the
+ * common VGA resources.
+ *
+ * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA),
+ * then we do not take part in VGA arbitration and the
+ * vga_client_register() fails with -ENODEV.
+ */
+ ret = vga_client_register(pdev, i915, NULL, intel_vga_set_decode);
+ if (ret && ret != -ENODEV)
+ return ret;
+
+ return 0;
+}
+
+void intel_vga_unregister(struct drm_i915_private *i915)
+{
+ struct pci_dev *pdev = i915->drm.pdev;
+
+ vga_client_register(pdev, NULL, NULL, NULL);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_vga.h b/drivers/gpu/drm/i915/display/intel_vga.h
new file mode 100644
index 000000000000..ba5b55b917f0
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_vga.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_VGA_H__
+#define __INTEL_VGA_H__
+
+struct drm_i915_private;
+
+void intel_vga_reset_io_mem(struct drm_i915_private *i915);
+void intel_vga_disable(struct drm_i915_private *i915);
+void intel_vga_redisable(struct drm_i915_private *i915);
+void intel_vga_redisable_power_on(struct drm_i915_private *i915);
+int intel_vga_register(struct drm_i915_private *i915);
+void intel_vga_unregister(struct drm_i915_private *i915);
+
+#endif /* __INTEL_VGA_H__ */
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c
index a71b22bdd95b..0ca49b1604c6 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi.c
@@ -749,7 +749,7 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder,
struct drm_crtc *crtc = pipe_config->base.crtc;
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe;
+ enum pipe pipe = intel_crtc->pipe;
enum port port;
u32 val;
bool glk_cold_boot = false;
@@ -1870,11 +1870,11 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
* port C. BXT isn't limited like this.
*/
if (IS_GEN9_LP(dev_priv))
- intel_encoder->crtc_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C);
+ intel_encoder->pipe_mask = ~0;
else if (port == PORT_A)
- intel_encoder->crtc_mask = BIT(PIPE_A);
+ intel_encoder->pipe_mask = BIT(PIPE_A);
else
- intel_encoder->crtc_mask = BIT(PIPE_B);
+ intel_encoder->pipe_mask = BIT(PIPE_B);
if (dev_priv->vbt.dsi.config->dual_link)
intel_dsi->ports = BIT(PORT_A) | BIT(PORT_C);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
index f99920652751..81366aa4812b 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
@@ -155,7 +155,6 @@ static void clear_pages_dma_fence_cb(struct dma_fence *fence,
static void clear_pages_worker(struct work_struct *work)
{
struct clear_pages_work *w = container_of(work, typeof(*w), work);
- struct drm_i915_private *i915 = w->ce->engine->i915;
struct drm_i915_gem_object *obj = w->sleeve->vma->obj;
struct i915_vma *vma = w->sleeve->vma;
struct i915_request *rq;
@@ -173,11 +172,9 @@ static void clear_pages_worker(struct work_struct *work)
obj->read_domains = I915_GEM_GPU_DOMAINS;
obj->write_domain = 0;
- /* XXX: we need to kill this */
- mutex_lock(&i915->drm.struct_mutex);
err = i915_vma_pin(vma, 0, 0, PIN_USER);
if (unlikely(err))
- goto out_unlock;
+ goto out_signal;
batch = intel_emit_vma_fill_blt(w->ce, vma, w->value);
if (IS_ERR(batch)) {
@@ -211,7 +208,7 @@ static void clear_pages_worker(struct work_struct *work)
* keep track of the GPU activity within this vma/request, and
* propagate the signal from the request to w->dma.
*/
- err = i915_active_ref(&vma->active, rq->timeline, rq);
+ err = __i915_vma_move_to_active(vma, rq);
if (err)
goto out_request;
@@ -229,8 +226,6 @@ out_batch:
intel_emit_vma_release(w->ce, batch);
out_unpin:
i915_vma_unpin(vma);
-out_unlock:
- mutex_unlock(&i915->drm.struct_mutex);
out_signal:
if (unlikely(err)) {
dma_fence_set_error(&w->dma, err);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index 755c4542629f..e553ca8d98eb 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -69,8 +69,10 @@
#include <drm/i915_drm.h>
-#include "gt/intel_lrc_reg.h"
+#include "gt/intel_engine_heartbeat.h"
#include "gt/intel_engine_user.h"
+#include "gt/intel_lrc_reg.h"
+#include "gt/intel_ring.h"
#include "i915_gem_context.h"
#include "i915_globals.h"
@@ -167,97 +169,6 @@ lookup_user_engine(struct i915_gem_context *ctx,
return i915_gem_context_get_engine(ctx, idx);
}
-static inline int new_hw_id(struct drm_i915_private *i915, gfp_t gfp)
-{
- unsigned int max;
-
- lockdep_assert_held(&i915->contexts.mutex);
-
- if (INTEL_GEN(i915) >= 12)
- max = GEN12_MAX_CONTEXT_HW_ID;
- else if (INTEL_GEN(i915) >= 11)
- max = GEN11_MAX_CONTEXT_HW_ID;
- else if (USES_GUC_SUBMISSION(i915))
- /*
- * When using GuC in proxy submission, GuC consumes the
- * highest bit in the context id to indicate proxy submission.
- */
- max = MAX_GUC_CONTEXT_HW_ID;
- else
- max = MAX_CONTEXT_HW_ID;
-
- return ida_simple_get(&i915->contexts.hw_ida, 0, max, gfp);
-}
-
-static int steal_hw_id(struct drm_i915_private *i915)
-{
- struct i915_gem_context *ctx, *cn;
- LIST_HEAD(pinned);
- int id = -ENOSPC;
-
- lockdep_assert_held(&i915->contexts.mutex);
-
- list_for_each_entry_safe(ctx, cn,
- &i915->contexts.hw_id_list, hw_id_link) {
- if (atomic_read(&ctx->hw_id_pin_count)) {
- list_move_tail(&ctx->hw_id_link, &pinned);
- continue;
- }
-
- GEM_BUG_ON(!ctx->hw_id); /* perma-pinned kernel context */
- list_del_init(&ctx->hw_id_link);
- id = ctx->hw_id;
- break;
- }
-
- /*
- * Remember how far we got up on the last repossesion scan, so the
- * list is kept in a "least recently scanned" order.
- */
- list_splice_tail(&pinned, &i915->contexts.hw_id_list);
- return id;
-}
-
-static int assign_hw_id(struct drm_i915_private *i915, unsigned int *out)
-{
- int ret;
-
- lockdep_assert_held(&i915->contexts.mutex);
-
- /*
- * We prefer to steal/stall ourselves and our users over that of the
- * entire system. That may be a little unfair to our users, and
- * even hurt high priority clients. The choice is whether to oomkill
- * something else, or steal a context id.
- */
- ret = new_hw_id(i915, GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
- if (unlikely(ret < 0)) {
- ret = steal_hw_id(i915);
- if (ret < 0) /* once again for the correct errno code */
- ret = new_hw_id(i915, GFP_KERNEL);
- if (ret < 0)
- return ret;
- }
-
- *out = ret;
- return 0;
-}
-
-static void release_hw_id(struct i915_gem_context *ctx)
-{
- struct drm_i915_private *i915 = ctx->i915;
-
- if (list_empty(&ctx->hw_id_link))
- return;
-
- mutex_lock(&i915->contexts.mutex);
- if (!list_empty(&ctx->hw_id_link)) {
- ida_simple_remove(&i915->contexts.hw_ida, ctx->hw_id);
- list_del_init(&ctx->hw_id_link);
- }
- mutex_unlock(&i915->contexts.mutex);
-}
-
static void __free_engines(struct i915_gem_engines *e, unsigned int count)
{
while (count--) {
@@ -294,27 +205,33 @@ static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx)
for_each_engine(engine, gt, id) {
struct intel_context *ce;
+ if (engine->legacy_idx == INVALID_ENGINE)
+ continue;
+
+ GEM_BUG_ON(engine->legacy_idx >= I915_NUM_ENGINES);
+ GEM_BUG_ON(e->engines[engine->legacy_idx]);
+
ce = intel_context_create(ctx, engine);
if (IS_ERR(ce)) {
- __free_engines(e, id);
+ __free_engines(e, e->num_engines + 1);
return ERR_CAST(ce);
}
- e->engines[id] = ce;
- e->num_engines = id + 1;
+ e->engines[engine->legacy_idx] = ce;
+ e->num_engines = max(e->num_engines, engine->legacy_idx);
}
+ e->num_engines++;
return e;
}
static void i915_gem_context_free(struct i915_gem_context *ctx)
{
- lockdep_assert_held(&ctx->i915->drm.struct_mutex);
GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
- release_hw_id(ctx);
- if (ctx->vm)
- i915_vm_put(ctx->vm);
+ spin_lock(&ctx->i915->gem.contexts.lock);
+ list_del(&ctx->link);
+ spin_unlock(&ctx->i915->gem.contexts.lock);
free_engines(rcu_access_pointer(ctx->engines));
mutex_destroy(&ctx->engines_mutex);
@@ -327,70 +244,202 @@ static void i915_gem_context_free(struct i915_gem_context *ctx)
kfree(ctx->name);
put_pid(ctx->pid);
- list_del(&ctx->link);
mutex_destroy(&ctx->mutex);
kfree_rcu(ctx, rcu);
}
-static void contexts_free(struct drm_i915_private *i915)
+static void contexts_free_all(struct llist_node *list)
{
- struct llist_node *freed = llist_del_all(&i915->contexts.free_list);
struct i915_gem_context *ctx, *cn;
- lockdep_assert_held(&i915->drm.struct_mutex);
-
- llist_for_each_entry_safe(ctx, cn, freed, free_link)
+ llist_for_each_entry_safe(ctx, cn, list, free_link)
i915_gem_context_free(ctx);
}
-static void contexts_free_first(struct drm_i915_private *i915)
+static void contexts_flush_free(struct i915_gem_contexts *gc)
{
- struct i915_gem_context *ctx;
- struct llist_node *freed;
-
- lockdep_assert_held(&i915->drm.struct_mutex);
-
- freed = llist_del_first(&i915->contexts.free_list);
- if (!freed)
- return;
-
- ctx = container_of(freed, typeof(*ctx), free_link);
- i915_gem_context_free(ctx);
+ contexts_free_all(llist_del_all(&gc->free_list));
}
static void contexts_free_worker(struct work_struct *work)
{
- struct drm_i915_private *i915 =
- container_of(work, typeof(*i915), contexts.free_work);
+ struct i915_gem_contexts *gc =
+ container_of(work, typeof(*gc), free_work);
- mutex_lock(&i915->drm.struct_mutex);
- contexts_free(i915);
- mutex_unlock(&i915->drm.struct_mutex);
+ contexts_flush_free(gc);
}
void i915_gem_context_release(struct kref *ref)
{
struct i915_gem_context *ctx = container_of(ref, typeof(*ctx), ref);
- struct drm_i915_private *i915 = ctx->i915;
+ struct i915_gem_contexts *gc = &ctx->i915->gem.contexts;
trace_i915_context_free(ctx);
- if (llist_add(&ctx->free_link, &i915->contexts.free_list))
- queue_work(i915->wq, &i915->contexts.free_work);
+ if (llist_add(&ctx->free_link, &gc->free_list))
+ schedule_work(&gc->free_work);
}
-static void context_close(struct i915_gem_context *ctx)
+static inline struct i915_gem_engines *
+__context_engines_static(const struct i915_gem_context *ctx)
{
- mutex_lock(&ctx->mutex);
+ return rcu_dereference_protected(ctx->engines, true);
+}
- i915_gem_context_set_closed(ctx);
- ctx->file_priv = ERR_PTR(-EBADF);
+static bool __reset_engine(struct intel_engine_cs *engine)
+{
+ struct intel_gt *gt = engine->gt;
+ bool success = false;
+
+ if (!intel_has_reset_engine(gt))
+ return false;
+
+ if (!test_and_set_bit(I915_RESET_ENGINE + engine->id,
+ &gt->reset.flags)) {
+ success = intel_engine_reset(engine, NULL) == 0;
+ clear_and_wake_up_bit(I915_RESET_ENGINE + engine->id,
+ &gt->reset.flags);
+ }
+
+ return success;
+}
+
+static void __reset_context(struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine)
+{
+ intel_gt_handle_error(engine->gt, engine->mask, 0,
+ "context closure in %s", ctx->name);
+}
+
+static bool __cancel_engine(struct intel_engine_cs *engine)
+{
+ /*
+ * Send a "high priority pulse" down the engine to cause the
+ * current request to be momentarily preempted. (If it fails to
+ * be preempted, it will be reset). As we have marked our context
+ * as banned, any incomplete request, including any running, will
+ * be skipped following the preemption.
+ *
+ * If there is no hangchecking (one of the reasons why we try to
+ * cancel the context) and no forced preemption, there may be no
+ * means by which we reset the GPU and evict the persistent hog.
+ * Ergo if we are unable to inject a preemptive pulse that can
+ * kill the banned context, we fallback to doing a local reset
+ * instead.
+ */
+ if (IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT) &&
+ !intel_engine_pulse(engine))
+ return true;
+
+ /* If we are unable to send a pulse, try resetting this engine. */
+ return __reset_engine(engine);
+}
+
+static struct intel_engine_cs *__active_engine(struct i915_request *rq)
+{
+ struct intel_engine_cs *engine, *locked;
+
+ /*
+ * Serialise with __i915_request_submit() so that it sees
+ * is-banned?, or we know the request is already inflight.
+ */
+ locked = READ_ONCE(rq->engine);
+ spin_lock_irq(&locked->active.lock);
+ while (unlikely(locked != (engine = READ_ONCE(rq->engine)))) {
+ spin_unlock(&locked->active.lock);
+ spin_lock(&engine->active.lock);
+ locked = engine;
+ }
+
+ engine = NULL;
+ if (i915_request_is_active(rq) && !rq->fence.error)
+ engine = rq->engine;
+
+ spin_unlock_irq(&locked->active.lock);
+
+ return engine;
+}
+
+static struct intel_engine_cs *active_engine(struct intel_context *ce)
+{
+ struct intel_engine_cs *engine = NULL;
+ struct i915_request *rq;
+
+ if (!ce->timeline)
+ return NULL;
+
+ rcu_read_lock();
+ list_for_each_entry_reverse(rq, &ce->timeline->requests, link) {
+ if (i915_request_completed(rq))
+ break;
+
+ /* Check with the backend if the request is inflight */
+ engine = __active_engine(rq);
+ if (engine)
+ break;
+ }
+ rcu_read_unlock();
+
+ return engine;
+}
+
+static void kill_context(struct i915_gem_context *ctx)
+{
+ struct i915_gem_engines_iter it;
+ struct intel_context *ce;
+
+ /*
+ * If we are already banned, it was due to a guilty request causing
+ * a reset and the entire context being evicted from the GPU.
+ */
+ if (i915_gem_context_is_banned(ctx))
+ return;
+
+ i915_gem_context_set_banned(ctx);
/*
- * This context will never again be assinged to HW, so we can
- * reuse its ID for the next context.
+ * Map the user's engine back to the actual engines; one virtual
+ * engine will be mapped to multiple engines, and using ctx->engine[]
+ * the same engine may be have multiple instances in the user's map.
+ * However, we only care about pending requests, so only include
+ * engines on which there are incomplete requests.
*/
- release_hw_id(ctx);
+ for_each_gem_engine(ce, __context_engines_static(ctx), it) {
+ struct intel_engine_cs *engine;
+
+ /*
+ * Check the current active state of this context; if we
+ * are currently executing on the GPU we need to evict
+ * ourselves. On the other hand, if we haven't yet been
+ * submitted to the GPU or if everything is complete,
+ * we have nothing to do.
+ */
+ engine = active_engine(ce);
+
+ /* First attempt to gracefully cancel the context */
+ if (engine && !__cancel_engine(engine))
+ /*
+ * If we are unable to send a preemptive pulse to bump
+ * the context from the GPU, we have to resort to a full
+ * reset. We hope the collateral damage is worth it.
+ */
+ __reset_context(ctx, engine);
+ }
+}
+
+static void context_close(struct i915_gem_context *ctx)
+{
+ struct i915_address_space *vm;
+
+ i915_gem_context_set_closed(ctx);
+
+ mutex_lock(&ctx->mutex);
+
+ vm = i915_gem_context_vm(ctx);
+ if (vm)
+ i915_vm_close(vm);
+
+ ctx->file_priv = ERR_PTR(-EBADF);
/*
* The LUT uses the VMA as a backpointer to unref the object,
@@ -400,9 +449,47 @@ static void context_close(struct i915_gem_context *ctx)
lut_close(ctx);
mutex_unlock(&ctx->mutex);
+
+ /*
+ * If the user has disabled hangchecking, we can not be sure that
+ * the batches will ever complete after the context is closed,
+ * keeping the context and all resources pinned forever. So in this
+ * case we opt to forcibly kill off all remaining requests on
+ * context close.
+ */
+ if (!i915_gem_context_is_persistent(ctx) ||
+ !i915_modparams.enable_hangcheck)
+ kill_context(ctx);
+
i915_gem_context_put(ctx);
}
+static int __context_set_persistence(struct i915_gem_context *ctx, bool state)
+{
+ if (i915_gem_context_is_persistent(ctx) == state)
+ return 0;
+
+ if (state) {
+ /*
+ * Only contexts that are short-lived [that will expire or be
+ * reset] are allowed to survive past termination. We require
+ * hangcheck to ensure that the persistent requests are healthy.
+ */
+ if (!i915_modparams.enable_hangcheck)
+ return -EINVAL;
+
+ i915_gem_context_set_persistence(ctx);
+ } else {
+ /* To cancel a context we use "preempt-to-idle" */
+ if (!(ctx->i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION))
+ return -ENODEV;
+
+ i915_gem_context_clear_persistence(ctx);
+ }
+
+ return 0;
+}
+
static struct i915_gem_context *
__create_context(struct drm_i915_private *i915)
{
@@ -416,7 +503,6 @@ __create_context(struct drm_i915_private *i915)
return ERR_PTR(-ENOMEM);
kref_init(&ctx->ref);
- list_add_tail(&ctx->link, &i915->contexts.list);
ctx->i915 = i915;
ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_NORMAL);
mutex_init(&ctx->mutex);
@@ -430,7 +516,6 @@ __create_context(struct drm_i915_private *i915)
RCU_INIT_POINTER(ctx->engines, e);
INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
- INIT_LIST_HEAD(&ctx->hw_id_link);
/* NB: Mark all slices as needing a remap so that when the context first
* loads it will restore whatever remap state already exists. If there
@@ -439,6 +524,7 @@ __create_context(struct drm_i915_private *i915)
i915_gem_context_set_bannable(ctx);
i915_gem_context_set_recoverable(ctx);
+ __context_set_persistence(ctx, true /* cgroup hook? */);
for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++)
ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES;
@@ -446,6 +532,10 @@ __create_context(struct drm_i915_private *i915)
ctx->jump_whitelist = NULL;
ctx->jump_whitelist_cmds = 0;
+ spin_lock(&i915->gem.contexts.lock);
+ list_add_tail(&ctx->link, &i915->gem.contexts.list);
+ spin_unlock(&i915->gem.contexts.lock);
+
return ctx;
err_free:
@@ -475,11 +565,11 @@ static void __apply_ppgtt(struct intel_context *ce, void *vm)
static struct i915_address_space *
__set_ppgtt(struct i915_gem_context *ctx, struct i915_address_space *vm)
{
- struct i915_address_space *old = ctx->vm;
+ struct i915_address_space *old = i915_gem_context_vm(ctx);
GEM_BUG_ON(old && i915_vm_is_4lvl(vm) != i915_vm_is_4lvl(old));
- ctx->vm = i915_vm_get(vm);
+ rcu_assign_pointer(ctx->vm, i915_vm_open(vm));
context_apply_all(ctx, __apply_ppgtt, vm);
return old;
@@ -488,12 +578,12 @@ __set_ppgtt(struct i915_gem_context *ctx, struct i915_address_space *vm)
static void __assign_ppgtt(struct i915_gem_context *ctx,
struct i915_address_space *vm)
{
- if (vm == ctx->vm)
+ if (vm == rcu_access_pointer(ctx->vm))
return;
vm = __set_ppgtt(ctx, vm);
if (vm)
- i915_vm_put(vm);
+ i915_vm_close(vm);
}
static void __set_timeline(struct intel_timeline **dst,
@@ -520,27 +610,25 @@ static void __assign_timeline(struct i915_gem_context *ctx,
}
static struct i915_gem_context *
-i915_gem_create_context(struct drm_i915_private *dev_priv, unsigned int flags)
+i915_gem_create_context(struct drm_i915_private *i915, unsigned int flags)
{
struct i915_gem_context *ctx;
- lockdep_assert_held(&dev_priv->drm.struct_mutex);
-
if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE &&
- !HAS_EXECLISTS(dev_priv))
+ !HAS_EXECLISTS(i915))
return ERR_PTR(-EINVAL);
- /* Reap the most stale context */
- contexts_free_first(dev_priv);
+ /* Reap the stale contexts */
+ contexts_flush_free(&i915->gem.contexts);
- ctx = __create_context(dev_priv);
+ ctx = __create_context(i915);
if (IS_ERR(ctx))
return ctx;
- if (HAS_FULL_PPGTT(dev_priv)) {
+ if (HAS_FULL_PPGTT(i915)) {
struct i915_ppgtt *ppgtt;
- ppgtt = i915_ppgtt_create(dev_priv);
+ ppgtt = i915_ppgtt_create(i915);
if (IS_ERR(ppgtt)) {
DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
PTR_ERR(ppgtt));
@@ -548,14 +636,17 @@ i915_gem_create_context(struct drm_i915_private *dev_priv, unsigned int flags)
return ERR_CAST(ppgtt);
}
+ mutex_lock(&ctx->mutex);
__assign_ppgtt(ctx, &ppgtt->vm);
+ mutex_unlock(&ctx->mutex);
+
i915_vm_put(&ppgtt->vm);
}
if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE) {
struct intel_timeline *timeline;
- timeline = intel_timeline_create(&dev_priv->gt, NULL);
+ timeline = intel_timeline_create(&i915->gt, NULL);
if (IS_ERR(timeline)) {
context_close(ctx);
return ERR_CAST(timeline);
@@ -587,19 +678,13 @@ struct i915_gem_context *
i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio)
{
struct i915_gem_context *ctx;
- int err;
ctx = i915_gem_create_context(i915, 0);
if (IS_ERR(ctx))
return ctx;
- err = i915_gem_context_pin_hw_id(ctx);
- if (err) {
- destroy_kernel_context(&ctx);
- return ERR_PTR(err);
- }
-
i915_gem_context_clear_bannable(ctx);
+ i915_gem_context_set_persistence(ctx);
ctx->sched.priority = I915_USER_PRIORITY(prio);
GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
@@ -607,62 +692,42 @@ i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio)
return ctx;
}
-static void init_contexts(struct drm_i915_private *i915)
+static void init_contexts(struct i915_gem_contexts *gc)
{
- mutex_init(&i915->contexts.mutex);
- INIT_LIST_HEAD(&i915->contexts.list);
-
- /* Using the simple ida interface, the max is limited by sizeof(int) */
- BUILD_BUG_ON(MAX_CONTEXT_HW_ID > INT_MAX);
- BUILD_BUG_ON(GEN11_MAX_CONTEXT_HW_ID > INT_MAX);
- ida_init(&i915->contexts.hw_ida);
- INIT_LIST_HEAD(&i915->contexts.hw_id_list);
+ spin_lock_init(&gc->lock);
+ INIT_LIST_HEAD(&gc->list);
- INIT_WORK(&i915->contexts.free_work, contexts_free_worker);
- init_llist_head(&i915->contexts.free_list);
+ INIT_WORK(&gc->free_work, contexts_free_worker);
+ init_llist_head(&gc->free_list);
}
-int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
+int i915_gem_init_contexts(struct drm_i915_private *i915)
{
struct i915_gem_context *ctx;
/* Reassure ourselves we are only called once */
- GEM_BUG_ON(dev_priv->kernel_context);
+ GEM_BUG_ON(i915->kernel_context);
- init_contexts(dev_priv);
+ init_contexts(&i915->gem.contexts);
/* lowest priority; idle task */
- ctx = i915_gem_context_create_kernel(dev_priv, I915_PRIORITY_MIN);
+ ctx = i915_gem_context_create_kernel(i915, I915_PRIORITY_MIN);
if (IS_ERR(ctx)) {
DRM_ERROR("Failed to create default global context\n");
return PTR_ERR(ctx);
}
- /*
- * For easy recognisablity, we want the kernel context to be 0 and then
- * all user contexts will have non-zero hw_id. Kernel contexts are
- * permanently pinned, so that we never suffer a stall and can
- * use them from any allocation context (e.g. for evicting other
- * contexts and from inside the shrinker).
- */
- GEM_BUG_ON(ctx->hw_id);
- GEM_BUG_ON(!atomic_read(&ctx->hw_id_pin_count));
- dev_priv->kernel_context = ctx;
+ i915->kernel_context = ctx;
DRM_DEBUG_DRIVER("%s context support initialized\n",
- DRIVER_CAPS(dev_priv)->has_logical_contexts ?
+ DRIVER_CAPS(i915)->has_logical_contexts ?
"logical" : "fake");
return 0;
}
-void i915_gem_contexts_fini(struct drm_i915_private *i915)
+void i915_gem_driver_release__contexts(struct drm_i915_private *i915)
{
- lockdep_assert_held(&i915->drm.struct_mutex);
-
destroy_kernel_context(&i915->kernel_context);
-
- /* Must free all deferred contexts (via flush_workqueue) first */
- GEM_BUG_ON(!list_empty(&i915->contexts.hw_id_list));
- ida_destroy(&i915->contexts.hw_ida);
+ flush_work(&i915->gem.contexts.free_work);
}
static int context_idr_cleanup(int id, void *p, void *data)
@@ -680,11 +745,16 @@ static int vm_idr_cleanup(int id, void *p, void *data)
static int gem_context_register(struct i915_gem_context *ctx,
struct drm_i915_file_private *fpriv)
{
+ struct i915_address_space *vm;
int ret;
ctx->file_priv = fpriv;
- if (ctx->vm)
- ctx->vm->file = fpriv;
+
+ mutex_lock(&ctx->mutex);
+ vm = i915_gem_context_vm(ctx);
+ if (vm)
+ WRITE_ONCE(vm->file, fpriv); /* XXX */
+ mutex_unlock(&ctx->mutex);
ctx->pid = get_task_pid(current, PIDTYPE_PID);
ctx->name = kasprintf(GFP_KERNEL, "%s[%d]",
@@ -721,9 +791,7 @@ int i915_gem_context_open(struct drm_i915_private *i915,
idr_init(&file_priv->context_idr);
idr_init_base(&file_priv->vm_idr, 1);
- mutex_lock(&i915->drm.struct_mutex);
ctx = i915_gem_create_context(i915, 0);
- mutex_unlock(&i915->drm.struct_mutex);
if (IS_ERR(ctx)) {
err = PTR_ERR(ctx);
goto err;
@@ -751,6 +819,7 @@ err:
void i915_gem_context_close(struct drm_file *file)
{
struct drm_i915_file_private *file_priv = file->driver_priv;
+ struct drm_i915_private *i915 = file_priv->dev_priv;
idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
idr_destroy(&file_priv->context_idr);
@@ -759,6 +828,8 @@ void i915_gem_context_close(struct drm_file *file)
idr_for_each(&file_priv->vm_idr, vm_idr_cleanup, NULL);
idr_destroy(&file_priv->vm_idr);
mutex_destroy(&file_priv->vm_idr_lock);
+
+ contexts_flush_free(&i915->gem.contexts);
}
int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
@@ -851,6 +922,7 @@ struct context_barrier_task {
void *data;
};
+__i915_active_call
static void cb_retire(struct i915_active *base)
{
struct context_barrier_task *cb = container_of(base, typeof(*cb), base);
@@ -870,20 +942,18 @@ static int context_barrier_task(struct i915_gem_context *ctx,
void (*task)(void *data),
void *data)
{
- struct drm_i915_private *i915 = ctx->i915;
struct context_barrier_task *cb;
struct i915_gem_engines_iter it;
struct intel_context *ce;
int err = 0;
- lockdep_assert_held(&i915->drm.struct_mutex);
GEM_BUG_ON(!task);
cb = kmalloc(sizeof(*cb), GFP_KERNEL);
if (!cb)
return -ENOMEM;
- i915_active_init(i915, &cb->base, NULL, cb_retire);
+ i915_active_init(&cb->base, NULL, cb_retire);
err = i915_active_acquire(&cb->base);
if (err) {
kfree(cb);
@@ -915,7 +985,7 @@ static int context_barrier_task(struct i915_gem_context *ctx,
if (emit)
err = emit(rq, data);
if (err == 0)
- err = i915_active_ref(&cb->base, rq->timeline, rq);
+ err = i915_active_add_request(&cb->base, rq);
i915_request_add(rq);
if (err)
@@ -938,16 +1008,12 @@ static int get_ppgtt(struct drm_i915_file_private *file_priv,
struct i915_address_space *vm;
int ret;
- if (!ctx->vm)
+ if (!rcu_access_pointer(ctx->vm))
return -ENODEV;
- /* XXX rcu acquire? */
- ret = mutex_lock_interruptible(&ctx->i915->drm.struct_mutex);
- if (ret)
- return ret;
-
+ rcu_read_lock();
vm = i915_vm_get(ctx->vm);
- mutex_unlock(&ctx->i915->drm.struct_mutex);
+ rcu_read_unlock();
ret = mutex_lock_interruptible(&file_priv->vm_idr_lock);
if (ret)
@@ -958,7 +1024,7 @@ static int get_ppgtt(struct drm_i915_file_private *file_priv,
if (ret < 0)
goto err_unlock;
- i915_vm_get(vm);
+ i915_vm_open(vm);
args->size = 0;
args->value = ret;
@@ -978,7 +1044,7 @@ static void set_ppgtt_barrier(void *data)
if (INTEL_GEN(old->i915) < 8)
gen6_ppgtt_unpin_all(i915_vm_to_ppgtt(old));
- i915_vm_put(old);
+ i915_vm_close(old);
}
static int emit_ppgtt_update(struct i915_request *rq, void *data)
@@ -1008,12 +1074,18 @@ static int emit_ppgtt_update(struct i915_request *rq, void *data)
intel_ring_advance(rq, cs);
} else if (HAS_LOGICAL_RING_CONTEXTS(engine->i915)) {
struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
+ int err;
+
+ /* Magic required to prevent forcewake errors! */
+ err = engine->emit_flush(rq, EMIT_INVALIDATE);
+ if (err)
+ return err;
cs = intel_ring_begin(rq, 4 * GEN8_3LVL_PDPES + 2);
if (IS_ERR(cs))
return PTR_ERR(cs);
- *cs++ = MI_LOAD_REGISTER_IMM(2 * GEN8_3LVL_PDPES);
+ *cs++ = MI_LOAD_REGISTER_IMM(2 * GEN8_3LVL_PDPES) | MI_LRI_FORCE_POSTED;
for (i = GEN8_3LVL_PDPES; i--; ) {
const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
@@ -1050,34 +1122,34 @@ static int set_ppgtt(struct drm_i915_file_private *file_priv,
if (args->size)
return -EINVAL;
- if (!ctx->vm)
+ if (!rcu_access_pointer(ctx->vm))
return -ENODEV;
if (upper_32_bits(args->value))
return -ENOENT;
- err = mutex_lock_interruptible(&file_priv->vm_idr_lock);
- if (err)
- return err;
-
+ rcu_read_lock();
vm = idr_find(&file_priv->vm_idr, args->value);
- if (vm)
- i915_vm_get(vm);
- mutex_unlock(&file_priv->vm_idr_lock);
+ if (vm && !kref_get_unless_zero(&vm->ref))
+ vm = NULL;
+ rcu_read_unlock();
if (!vm)
return -ENOENT;
- err = mutex_lock_interruptible(&ctx->i915->drm.struct_mutex);
+ err = mutex_lock_interruptible(&ctx->mutex);
if (err)
goto out;
- if (vm == ctx->vm)
+ if (i915_gem_context_is_closed(ctx)) {
+ err = -ENOENT;
+ goto unlock;
+ }
+
+ if (vm == rcu_access_pointer(ctx->vm))
goto unlock;
/* Teardown the existing obj:vma cache, it will have to be rebuilt. */
- mutex_lock(&ctx->mutex);
lut_close(ctx);
- mutex_unlock(&ctx->mutex);
old = __set_ppgtt(ctx, vm);
@@ -1092,13 +1164,12 @@ static int set_ppgtt(struct drm_i915_file_private *file_priv,
set_ppgtt_barrier,
old);
if (err) {
- i915_vm_put(__set_ppgtt(ctx, old));
- i915_vm_put(old);
+ i915_vm_close(__set_ppgtt(ctx, old));
+ i915_vm_close(old);
}
unlock:
- mutex_unlock(&ctx->i915->drm.struct_mutex);
-
+ mutex_unlock(&ctx->mutex);
out:
i915_vm_put(vm);
return err;
@@ -1117,7 +1188,7 @@ static int gen8_emit_rpcs_config(struct i915_request *rq,
offset = i915_ggtt_offset(ce->state) +
LRC_STATE_PN * PAGE_SIZE +
- (CTX_R_PWR_CLK_STATE + 1) * 4;
+ CTX_R_PWR_CLK_STATE * 4;
*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
*cs++ = lower_32_bits(offset);
@@ -1160,8 +1231,7 @@ gen8_modify_rpcs(struct intel_context *ce, struct intel_sseu sseu)
}
static int
-__intel_context_reconfigure_sseu(struct intel_context *ce,
- struct intel_sseu sseu)
+intel_context_reconfigure_sseu(struct intel_context *ce, struct intel_sseu sseu)
{
int ret;
@@ -1185,23 +1255,6 @@ unlock:
}
static int
-intel_context_reconfigure_sseu(struct intel_context *ce, struct intel_sseu sseu)
-{
- struct drm_i915_private *i915 = ce->engine->i915;
- int ret;
-
- ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
- if (ret)
- return ret;
-
- ret = __intel_context_reconfigure_sseu(ce, sseu);
-
- mutex_unlock(&i915->drm.struct_mutex);
-
- return ret;
-}
-
-static int
user_to_context_sseu(struct drm_i915_private *i915,
const struct drm_i915_gem_context_param_sseu *user,
struct intel_sseu *context)
@@ -1743,6 +1796,16 @@ err_free:
return err;
}
+static int
+set_persistence(struct i915_gem_context *ctx,
+ const struct drm_i915_gem_context_param *args)
+{
+ if (args->size)
+ return -EINVAL;
+
+ return __context_set_persistence(ctx, args->value);
+}
+
static int ctx_setparam(struct drm_i915_file_private *fpriv,
struct i915_gem_context *ctx,
struct drm_i915_gem_context_param *args)
@@ -1820,6 +1883,10 @@ static int ctx_setparam(struct drm_i915_file_private *fpriv,
ret = set_engines(ctx, args);
break;
+ case I915_CONTEXT_PARAM_PERSISTENCE:
+ ret = set_persistence(ctx, args);
+ break;
+
case I915_CONTEXT_PARAM_BAN_PERIOD:
default:
ret = -EINVAL;
@@ -1972,10 +2039,11 @@ static int clone_vm(struct i915_gem_context *dst,
struct i915_gem_context *src)
{
struct i915_address_space *vm;
+ int err = 0;
rcu_read_lock();
do {
- vm = READ_ONCE(src->vm);
+ vm = rcu_dereference(src->vm);
if (!vm)
break;
@@ -1997,7 +2065,7 @@ static int clone_vm(struct i915_gem_context *dst,
* it cannot be reallocated elsewhere.
*/
- if (vm == READ_ONCE(src->vm))
+ if (vm == rcu_access_pointer(src->vm))
break;
i915_vm_put(vm);
@@ -2005,11 +2073,16 @@ static int clone_vm(struct i915_gem_context *dst,
rcu_read_unlock();
if (vm) {
- __assign_ppgtt(dst, vm);
+ if (!mutex_lock_interruptible(&dst->mutex)) {
+ __assign_ppgtt(dst, vm);
+ mutex_unlock(&dst->mutex);
+ } else {
+ err = -EINTR;
+ }
i915_vm_put(vm);
}
- return 0;
+ return err;
}
static int create_clone(struct i915_user_extension __user *ext, void *data)
@@ -2099,12 +2172,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
return -EIO;
}
- ret = i915_mutex_lock_interruptible(dev);
- if (ret)
- return ret;
-
ext_data.ctx = i915_gem_create_context(i915, args->flags);
- mutex_unlock(&dev->struct_mutex);
if (IS_ERR(ext_data.ctx))
return PTR_ERR(ext_data.ctx);
@@ -2231,12 +2299,12 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
case I915_CONTEXT_PARAM_GTT_SIZE:
args->size = 0;
- if (ctx->vm)
- args->value = ctx->vm->total;
- else if (to_i915(dev)->ggtt.alias)
- args->value = to_i915(dev)->ggtt.alias->vm.total;
+ rcu_read_lock();
+ if (rcu_access_pointer(ctx->vm))
+ args->value = rcu_dereference(ctx->vm)->total;
else
args->value = to_i915(dev)->ggtt.vm.total;
+ rcu_read_unlock();
break;
case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
@@ -2271,6 +2339,11 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
ret = get_engines(ctx, args);
break;
+ case I915_CONTEXT_PARAM_PERSISTENCE:
+ args->size = 0;
+ args->value = i915_gem_context_is_persistent(ctx);
+ break;
+
case I915_CONTEXT_PARAM_BAN_PERIOD:
default:
ret = -EINVAL;
@@ -2302,7 +2375,7 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
int i915_gem_context_reset_stats_ioctl(struct drm_device *dev,
void *data, struct drm_file *file)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *i915 = to_i915(dev);
struct drm_i915_reset_stats *args = data;
struct i915_gem_context *ctx;
int ret;
@@ -2324,7 +2397,7 @@ int i915_gem_context_reset_stats_ioctl(struct drm_device *dev,
*/
if (capable(CAP_SYS_ADMIN))
- args->reset_count = i915_reset_count(&dev_priv->gpu_error);
+ args->reset_count = i915_reset_count(&i915->gpu_error);
else
args->reset_count = 0;
@@ -2337,33 +2410,6 @@ out:
return ret;
}
-int __i915_gem_context_pin_hw_id(struct i915_gem_context *ctx)
-{
- struct drm_i915_private *i915 = ctx->i915;
- int err = 0;
-
- mutex_lock(&i915->contexts.mutex);
-
- GEM_BUG_ON(i915_gem_context_is_closed(ctx));
-
- if (list_empty(&ctx->hw_id_link)) {
- GEM_BUG_ON(atomic_read(&ctx->hw_id_pin_count));
-
- err = assign_hw_id(i915, &ctx->hw_id);
- if (err)
- goto out_unlock;
-
- list_add_tail(&ctx->hw_id_link, &i915->contexts.hw_id_list);
- }
-
- GEM_BUG_ON(atomic_read(&ctx->hw_id_pin_count) == ~0u);
- atomic_inc(&ctx->hw_id_pin_count);
-
-out_unlock:
- mutex_unlock(&i915->contexts.mutex);
- return err;
-}
-
/* GEM context-engines iterator: for_each_gem_engine() */
struct intel_context *
i915_gem_engines_iter_next(struct i915_gem_engines_iter *it)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.h b/drivers/gpu/drm/i915/gem/i915_gem_context.h
index 176978608b6f..18e50a769a6e 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.h
@@ -11,7 +11,9 @@
#include "gt/intel_context.h"
+#include "i915_drv.h"
#include "i915_gem.h"
+#include "i915_gem_gtt.h"
#include "i915_scheduler.h"
#include "intel_device_info.h"
@@ -74,6 +76,21 @@ static inline void i915_gem_context_clear_recoverable(struct i915_gem_context *c
clear_bit(UCONTEXT_RECOVERABLE, &ctx->user_flags);
}
+static inline bool i915_gem_context_is_persistent(const struct i915_gem_context *ctx)
+{
+ return test_bit(UCONTEXT_PERSISTENCE, &ctx->user_flags);
+}
+
+static inline void i915_gem_context_set_persistence(struct i915_gem_context *ctx)
+{
+ set_bit(UCONTEXT_PERSISTENCE, &ctx->user_flags);
+}
+
+static inline void i915_gem_context_clear_persistence(struct i915_gem_context *ctx)
+{
+ clear_bit(UCONTEXT_PERSISTENCE, &ctx->user_flags);
+}
+
static inline bool i915_gem_context_is_banned(const struct i915_gem_context *ctx)
{
return test_bit(CONTEXT_BANNED, &ctx->flags);
@@ -112,19 +129,22 @@ i915_gem_context_clear_user_engines(struct i915_gem_context *ctx)
clear_bit(CONTEXT_USER_ENGINES, &ctx->flags);
}
-int __i915_gem_context_pin_hw_id(struct i915_gem_context *ctx);
-static inline int i915_gem_context_pin_hw_id(struct i915_gem_context *ctx)
+static inline bool
+i915_gem_context_nopreempt(const struct i915_gem_context *ctx)
{
- if (atomic_inc_not_zero(&ctx->hw_id_pin_count))
- return 0;
+ return test_bit(CONTEXT_NOPREEMPT, &ctx->flags);
+}
- return __i915_gem_context_pin_hw_id(ctx);
+static inline void
+i915_gem_context_set_nopreempt(struct i915_gem_context *ctx)
+{
+ set_bit(CONTEXT_NOPREEMPT, &ctx->flags);
}
-static inline void i915_gem_context_unpin_hw_id(struct i915_gem_context *ctx)
+static inline void
+i915_gem_context_clear_nopreempt(struct i915_gem_context *ctx)
{
- GEM_BUG_ON(atomic_read(&ctx->hw_id_pin_count) == 0u);
- atomic_dec(&ctx->hw_id_pin_count);
+ clear_bit(CONTEXT_NOPREEMPT, &ctx->flags);
}
static inline bool i915_gem_context_is_kernel(struct i915_gem_context *ctx)
@@ -133,8 +153,8 @@ static inline bool i915_gem_context_is_kernel(struct i915_gem_context *ctx)
}
/* i915_gem_context.c */
-int __must_check i915_gem_contexts_init(struct drm_i915_private *dev_priv);
-void i915_gem_contexts_fini(struct drm_i915_private *dev_priv);
+int __must_check i915_gem_init_contexts(struct drm_i915_private *i915);
+void i915_gem_driver_release__contexts(struct drm_i915_private *i915);
int i915_gem_context_open(struct drm_i915_private *i915,
struct drm_file *file);
@@ -173,6 +193,27 @@ static inline void i915_gem_context_put(struct i915_gem_context *ctx)
kref_put(&ctx->ref, i915_gem_context_release);
}
+static inline struct i915_address_space *
+i915_gem_context_vm(struct i915_gem_context *ctx)
+{
+ return rcu_dereference_protected(ctx->vm, lockdep_is_held(&ctx->mutex));
+}
+
+static inline struct i915_address_space *
+i915_gem_context_get_vm_rcu(struct i915_gem_context *ctx)
+{
+ struct i915_address_space *vm;
+
+ rcu_read_lock();
+ vm = rcu_dereference(ctx->vm);
+ if (!vm)
+ vm = &ctx->i915->ggtt.vm;
+ vm = i915_vm_get(vm);
+ rcu_read_unlock();
+
+ return vm;
+}
+
static inline struct i915_gem_engines *
i915_gem_context_engines(struct i915_gem_context *ctx)
{
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
index 00537b9d7006..3870dd5daaa0 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
@@ -88,7 +88,7 @@ struct i915_gem_context {
* In other modes, this is a NULL pointer with the expectation that
* the caller uses the shared global GTT.
*/
- struct i915_address_space *vm;
+ struct i915_address_space __rcu *vm;
/**
* @pid: process id of creator
@@ -137,6 +137,7 @@ struct i915_gem_context {
#define UCONTEXT_NO_ERROR_CAPTURE 1
#define UCONTEXT_BANNABLE 2
#define UCONTEXT_RECOVERABLE 3
+#define UCONTEXT_PERSISTENCE 4
/**
* @flags: small set of booleans
@@ -146,24 +147,7 @@ struct i915_gem_context {
#define CONTEXT_CLOSED 1
#define CONTEXT_FORCE_SINGLE_SUBMISSION 2
#define CONTEXT_USER_ENGINES 3
-
- /**
- * @hw_id: - unique identifier for the context
- *
- * The hardware needs to uniquely identify the context for a few
- * functions like fault reporting, PASID, scheduling. The
- * &drm_i915_private.context_hw_ida is used to assign a unqiue
- * id for the lifetime of the context.
- *
- * @hw_id_pin_count: - number of times this context had been pinned
- * for use (should be, at most, once per engine).
- *
- * @hw_id_link: - all contexts with an assigned id are tracked
- * for possible repossession.
- */
- unsigned int hw_id;
- atomic_t hw_id_pin_count;
- struct list_head hw_id_link;
+#define CONTEXT_NOPREEMPT 4
struct mutex mutex;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
index 96ce95c8ac5a..eaea49d08eb5 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
@@ -256,6 +256,7 @@ static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = {
struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
struct dma_buf *dma_buf)
{
+ static struct lock_class_key lock_class;
struct dma_buf_attachment *attach;
struct drm_i915_gem_object *obj;
int ret;
@@ -287,7 +288,7 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
}
drm_gem_private_object_init(dev, &obj->base, dma_buf->size);
- i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops);
+ i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops, &lock_class);
obj->base.import_attach = attach;
obj->base.resv = dma_buf->resv;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_domain.c b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
index 9c58e8fac1d9..9937b4c341f1 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_domain.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
@@ -27,7 +27,7 @@ static void __i915_gem_object_flush_for_display(struct drm_i915_gem_object *obj)
void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj)
{
- if (!READ_ONCE(obj->pin_global))
+ if (!i915_gem_object_is_framebuffer(obj))
return;
i915_gem_object_lock(obj);
@@ -288,14 +288,21 @@ restart:
if (!drm_mm_node_allocated(&vma->node))
continue;
- ret = i915_vma_bind(vma, cache_level, PIN_UPDATE);
+ /* Wait for an earlier async bind, need to rewrite it */
+ ret = i915_vma_sync(vma);
+ if (ret)
+ return ret;
+
+ ret = i915_vma_bind(vma, cache_level, PIN_UPDATE, NULL);
if (ret)
return ret;
}
}
- list_for_each_entry(vma, &obj->vma.list, obj_link)
- vma->node.color = cache_level;
+ list_for_each_entry(vma, &obj->vma.list, obj_link) {
+ if (i915_vm_has_cache_coloring(vma->vm))
+ vma->node.color = cache_level;
+ }
i915_gem_object_set_cache_coherency(obj, cache_level);
obj->cache_dirty = true; /* Always invalidate stale cachelines */
@@ -389,16 +396,11 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
if (ret)
goto out;
- ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
- if (ret)
- goto out;
-
ret = i915_gem_object_lock_interruptible(obj);
if (ret == 0) {
ret = i915_gem_object_set_cache_level(obj, level);
i915_gem_object_unlock(obj);
}
- mutex_unlock(&i915->drm.struct_mutex);
out:
i915_gem_object_put(obj);
@@ -422,12 +424,8 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
assert_object_held(obj);
- /* Mark the global pin early so that we account for the
- * display coherency whilst setting up the cache domains.
- */
- obj->pin_global++;
-
- /* The display engine is not coherent with the LLC cache on gen6. As
+ /*
+ * The display engine is not coherent with the LLC cache on gen6. As
* a result, we make sure that the pinning that is about to occur is
* done with uncached PTEs. This is lowest common denominator for all
* chipsets.
@@ -439,12 +437,11 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
ret = i915_gem_object_set_cache_level(obj,
HAS_WT(to_i915(obj->base.dev)) ?
I915_CACHE_WT : I915_CACHE_NONE);
- if (ret) {
- vma = ERR_PTR(ret);
- goto err_unpin_global;
- }
+ if (ret)
+ return ERR_PTR(ret);
- /* As the user may map the buffer once pinned in the display plane
+ /*
+ * As the user may map the buffer once pinned in the display plane
* (e.g. libkms for the bootup splash), we have to ensure that we
* always use map_and_fenceable for all scanout buffers. However,
* it may simply be too big to fit into mappable, in which case
@@ -461,22 +458,19 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
if (IS_ERR(vma))
vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, flags);
if (IS_ERR(vma))
- goto err_unpin_global;
+ return vma;
vma->display_alignment = max_t(u64, vma->display_alignment, alignment);
__i915_gem_object_flush_for_display(obj);
- /* It should now be out of any other write domains, and we can update
+ /*
+ * It should now be out of any other write domains, and we can update
* the domain values for our changes.
*/
obj->read_domains |= I915_GEM_DOMAIN_GTT;
return vma;
-
-err_unpin_global:
- obj->pin_global--;
- return vma;
}
static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
@@ -491,6 +485,7 @@ static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
if (!drm_mm_node_allocated(&vma->node))
continue;
+ GEM_BUG_ON(vma->vm != &i915->ggtt.vm);
list_move_tail(&vma->vm_link, &vma->vm->bound_list);
}
mutex_unlock(&i915->ggtt.vm.mutex);
@@ -500,7 +495,8 @@ static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
spin_lock_irqsave(&i915->mm.obj_lock, flags);
- if (obj->mm.madv == I915_MADV_WILLNEED)
+ if (obj->mm.madv == I915_MADV_WILLNEED &&
+ !atomic_read(&obj->mm.shrink_pin))
list_move_tail(&obj->mm.link, &i915->mm.shrink_list);
spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
@@ -514,12 +510,6 @@ i915_gem_object_unpin_from_display_plane(struct i915_vma *vma)
assert_object_held(obj);
- if (WARN_ON(obj->pin_global == 0))
- return;
-
- if (--obj->pin_global == 0)
- vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
-
/* Bump the LRU to try and avoid premature eviction whilst flipping */
i915_gem_object_bump_inactive_ggtt(obj);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index e635e1e5f4d3..f0998f1225af 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -19,6 +19,7 @@
#include "gt/intel_engine_pool.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_pm.h"
+#include "gt/intel_ring.h"
#include "i915_drv.h"
#include "i915_gem_clflush.h"
@@ -252,6 +253,7 @@ struct i915_execbuffer {
bool has_fence : 1;
bool needs_unfenced : 1;
+ struct intel_context *ce;
struct i915_request *rq;
u32 *rq_cmd;
unsigned int rq_size;
@@ -699,7 +701,9 @@ static int eb_reserve(struct i915_execbuffer *eb)
case 1:
/* Too fragmented, unbind everything and retry */
+ mutex_lock(&eb->context->vm->mutex);
err = i915_gem_evict_vm(eb->context->vm);
+ mutex_unlock(&eb->context->vm->mutex);
if (err)
return err;
break;
@@ -727,7 +731,7 @@ static int eb_select_context(struct i915_execbuffer *eb)
return -ENOENT;
eb->gem_context = ctx;
- if (ctx->vm)
+ if (rcu_access_pointer(ctx->vm))
eb->invalid_flags |= EXEC_OBJECT_NEEDS_GTT;
eb->context_flags = 0;
@@ -882,6 +886,9 @@ static void eb_destroy(const struct i915_execbuffer *eb)
{
GEM_BUG_ON(eb->reloc_cache.rq);
+ if (eb->reloc_cache.ce)
+ intel_context_put(eb->reloc_cache.ce);
+
if (eb->lut_size > 0)
kfree(eb->buckets);
}
@@ -904,7 +911,8 @@ static void reloc_cache_init(struct reloc_cache *cache,
cache->use_64bit_reloc = HAS_64BIT_RELOC(i915);
cache->has_fence = cache->gen < 4;
cache->needs_unfenced = INTEL_INFO(i915)->unfenced_needs_alignment;
- cache->node.allocated = false;
+ cache->node.flags = 0;
+ cache->ce = NULL;
cache->rq = NULL;
cache->rq_size = 0;
}
@@ -965,11 +973,13 @@ static void reloc_cache_reset(struct reloc_cache *cache)
intel_gt_flush_ggtt_writes(ggtt->vm.gt);
io_mapping_unmap_atomic((void __iomem *)vaddr);
- if (cache->node.allocated) {
+ if (drm_mm_node_allocated(&cache->node)) {
ggtt->vm.clear_range(&ggtt->vm,
cache->node.start,
cache->node.size);
+ mutex_lock(&ggtt->vm.mutex);
drm_mm_remove_node(&cache->node);
+ mutex_unlock(&ggtt->vm.mutex);
} else {
i915_vma_unpin((struct i915_vma *)cache->node.mm);
}
@@ -1044,11 +1054,13 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
PIN_NOEVICT);
if (IS_ERR(vma)) {
memset(&cache->node, 0, sizeof(cache->node));
+ mutex_lock(&ggtt->vm.mutex);
err = drm_mm_insert_node_in_range
(&ggtt->vm.mm, &cache->node,
PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
0, ggtt->mappable_end,
DRM_MM_INSERT_LOW);
+ mutex_unlock(&ggtt->vm.mutex);
if (err) /* no inactive aperture space, use cpu reloc */
return NULL;
} else {
@@ -1058,7 +1070,7 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
}
offset = cache->node.start;
- if (cache->node.allocated) {
+ if (drm_mm_node_allocated(&cache->node)) {
ggtt->vm.insert_page(&ggtt->vm,
i915_gem_object_get_dma_address(obj, page),
offset, I915_CACHE_NONE, 0);
@@ -1147,7 +1159,7 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
u32 *cmd;
int err;
- pool = intel_engine_pool_get(&eb->engine->pool, PAGE_SIZE);
+ pool = intel_engine_get_pool(eb->engine, PAGE_SIZE);
if (IS_ERR(pool))
return PTR_ERR(pool);
@@ -1170,7 +1182,7 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
if (err)
goto err_unmap;
- rq = i915_request_create(eb->context);
+ rq = intel_context_create_request(cache->ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_unpin;
@@ -1241,6 +1253,29 @@ static u32 *reloc_gpu(struct i915_execbuffer *eb,
if (!intel_engine_can_store_dword(eb->engine))
return ERR_PTR(-ENODEV);
+ if (!cache->ce) {
+ struct intel_context *ce;
+
+ /*
+ * The CS pre-parser can pre-fetch commands across
+ * memory sync points and starting gen12 it is able to
+ * pre-fetch across BB_START and BB_END boundaries
+ * (within the same context). We therefore use a
+ * separate context gen12+ to guarantee that the reloc
+ * writes land before the parser gets to the target
+ * memory location.
+ */
+ if (cache->gen >= 12)
+ ce = intel_context_create(eb->context->gem_context,
+ eb->engine);
+ else
+ ce = intel_context_get(eb->context);
+ if (IS_ERR(ce))
+ return ERR_CAST(ce);
+
+ cache->ce = ce;
+ }
+
err = __reloc_gpu_alloc(eb, vma, len);
if (unlikely(err))
return ERR_PTR(err);
@@ -1390,7 +1425,7 @@ eb_relocate_entry(struct i915_execbuffer *eb,
if (reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
IS_GEN(eb->i915, 6)) {
err = i915_vma_bind(target, target->obj->cache_level,
- PIN_GLOBAL);
+ PIN_GLOBAL, NULL);
if (WARN_ONCE(err,
"Unexpected failure to bind target VMA!"))
return err;
@@ -1992,7 +2027,7 @@ static struct i915_vma *eb_parse(struct i915_execbuffer *eb)
u64 shadow_batch_start;
int err;
- pool = intel_engine_pool_get(&eb->engine->pool, eb->batch_len);
+ pool = intel_engine_get_pool(eb->engine, eb->batch_len);
if (IS_ERR(pool))
return ERR_CAST(pool);
@@ -2099,6 +2134,9 @@ static int eb_submit(struct i915_execbuffer *eb)
if (err)
return err;
+ if (i915_gem_context_nopreempt(eb->gem_context))
+ eb->request->flags |= I915_REQUEST_NOPREEMPT;
+
return 0;
}
@@ -2168,35 +2206,6 @@ static struct i915_request *eb_throttle(struct intel_context *ce)
return i915_request_get(rq);
}
-static int
-__eb_pin_context(struct i915_execbuffer *eb, struct intel_context *ce)
-{
- int err;
-
- if (likely(atomic_inc_not_zero(&ce->pin_count)))
- return 0;
-
- err = mutex_lock_interruptible(&eb->i915->drm.struct_mutex);
- if (err)
- return err;
-
- err = __intel_context_do_pin(ce);
- mutex_unlock(&eb->i915->drm.struct_mutex);
-
- return err;
-}
-
-static void
-__eb_unpin_context(struct i915_execbuffer *eb, struct intel_context *ce)
-{
- if (likely(atomic_add_unless(&ce->pin_count, -1, 1)))
- return;
-
- mutex_lock(&eb->i915->drm.struct_mutex);
- intel_context_unpin(ce);
- mutex_unlock(&eb->i915->drm.struct_mutex);
-}
-
static int __eb_pin_engine(struct i915_execbuffer *eb, struct intel_context *ce)
{
struct intel_timeline *tl;
@@ -2216,7 +2225,7 @@ static int __eb_pin_engine(struct i915_execbuffer *eb, struct intel_context *ce)
* GGTT space, so do this first before we reserve a seqno for
* ourselves.
*/
- err = __eb_pin_context(eb, ce);
+ err = intel_context_pin(ce);
if (err)
return err;
@@ -2260,7 +2269,7 @@ err_exit:
intel_context_exit(ce);
intel_context_timeline_unlock(tl);
err_unpin:
- __eb_unpin_context(eb, ce);
+ intel_context_unpin(ce);
return err;
}
@@ -2273,7 +2282,7 @@ static void eb_unpin_engine(struct i915_execbuffer *eb)
intel_context_exit(ce);
mutex_unlock(&tl->mutex);
- __eb_unpin_context(eb, ce);
+ intel_context_unpin(ce);
}
static unsigned int
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_internal.c b/drivers/gpu/drm/i915/gem/i915_gem_internal.c
index 0c41e04ab8fa..9cfb0e41ff06 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_internal.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_internal.c
@@ -117,13 +117,6 @@ create_st:
goto err;
}
- /* Mark the pages as dontneed whilst they are still pinned. As soon
- * as they are unpinned they are allowed to be reaped by the shrinker,
- * and the caller is expected to repopulate - the contents of this
- * object are only valid whilst active and pinned.
- */
- obj->mm.madv = I915_MADV_DONTNEED;
-
__i915_gem_object_set_pages(obj, st, sg_page_sizes);
return 0;
@@ -143,7 +136,6 @@ static void i915_gem_object_put_pages_internal(struct drm_i915_gem_object *obj,
internal_free_pages(pages);
obj->mm.dirty = false;
- obj->mm.madv = I915_MADV_WILLNEED;
}
static const struct drm_i915_gem_object_ops i915_gem_object_internal_ops = {
@@ -172,6 +164,7 @@ struct drm_i915_gem_object *
i915_gem_object_create_internal(struct drm_i915_private *i915,
phys_addr_t size)
{
+ static struct lock_class_key lock_class;
struct drm_i915_gem_object *obj;
unsigned int cache_level;
@@ -186,7 +179,16 @@ i915_gem_object_create_internal(struct drm_i915_private *i915,
return ERR_PTR(-ENOMEM);
drm_gem_private_object_init(&i915->drm, &obj->base, size);
- i915_gem_object_init(obj, &i915_gem_object_internal_ops);
+ i915_gem_object_init(obj, &i915_gem_object_internal_ops, &lock_class);
+
+ /*
+ * Mark the object as volatile, such that the pages are marked as
+ * dontneed whilst they are still pinned. As soon as they are unpinned
+ * they are allowed to be reaped by the shrinker, and the caller is
+ * expected to repopulate - the contents of this object are only valid
+ * whilst active and pinned.
+ */
+ i915_gem_object_set_volatile(obj);
obj->read_domains = I915_GEM_DOMAIN_CPU;
obj->write_domain = I915_GEM_DOMAIN_CPU;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
new file mode 100644
index 000000000000..0e2bf6b7e143
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
@@ -0,0 +1,99 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "intel_memory_region.h"
+#include "gem/i915_gem_region.h"
+#include "gem/i915_gem_lmem.h"
+#include "i915_drv.h"
+
+const struct drm_i915_gem_object_ops i915_gem_lmem_obj_ops = {
+ .flags = I915_GEM_OBJECT_HAS_IOMEM,
+
+ .get_pages = i915_gem_object_get_pages_buddy,
+ .put_pages = i915_gem_object_put_pages_buddy,
+ .release = i915_gem_object_release_memory_region,
+};
+
+/* XXX: Time to vfunc your life up? */
+void __iomem *
+i915_gem_object_lmem_io_map_page(struct drm_i915_gem_object *obj,
+ unsigned long n)
+{
+ resource_size_t offset;
+
+ offset = i915_gem_object_get_dma_address(obj, n);
+ offset -= obj->mm.region->region.start;
+
+ return io_mapping_map_wc(&obj->mm.region->iomap, offset, PAGE_SIZE);
+}
+
+void __iomem *
+i915_gem_object_lmem_io_map_page_atomic(struct drm_i915_gem_object *obj,
+ unsigned long n)
+{
+ resource_size_t offset;
+
+ offset = i915_gem_object_get_dma_address(obj, n);
+ offset -= obj->mm.region->region.start;
+
+ return io_mapping_map_atomic_wc(&obj->mm.region->iomap, offset);
+}
+
+void __iomem *
+i915_gem_object_lmem_io_map(struct drm_i915_gem_object *obj,
+ unsigned long n,
+ unsigned long size)
+{
+ resource_size_t offset;
+
+ GEM_BUG_ON(!i915_gem_object_is_contiguous(obj));
+
+ offset = i915_gem_object_get_dma_address(obj, n);
+ offset -= obj->mm.region->region.start;
+
+ return io_mapping_map_wc(&obj->mm.region->iomap, offset, size);
+}
+
+bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj)
+{
+ return obj->ops == &i915_gem_lmem_obj_ops;
+}
+
+struct drm_i915_gem_object *
+i915_gem_object_create_lmem(struct drm_i915_private *i915,
+ resource_size_t size,
+ unsigned int flags)
+{
+ return i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_LMEM],
+ size, flags);
+}
+
+struct drm_i915_gem_object *
+__i915_gem_lmem_object_create(struct intel_memory_region *mem,
+ resource_size_t size,
+ unsigned int flags)
+{
+ static struct lock_class_key lock_class;
+ struct drm_i915_private *i915 = mem->i915;
+ struct drm_i915_gem_object *obj;
+
+ if (size > BIT(mem->mm.max_order) * mem->mm.chunk_size)
+ return ERR_PTR(-E2BIG);
+
+ obj = i915_gem_object_alloc();
+ if (!obj)
+ return ERR_PTR(-ENOMEM);
+
+ drm_gem_private_object_init(&i915->drm, &obj->base, size);
+ i915_gem_object_init(obj, &i915_gem_lmem_obj_ops, &lock_class);
+
+ obj->read_domains = I915_GEM_DOMAIN_WC | I915_GEM_DOMAIN_GTT;
+
+ i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE);
+
+ i915_gem_object_init_memory_region(obj, mem, flags);
+
+ return obj;
+}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.h b/drivers/gpu/drm/i915/gem/i915_gem_lmem.h
new file mode 100644
index 000000000000..7c176b8b7d2f
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __I915_GEM_LMEM_H
+#define __I915_GEM_LMEM_H
+
+#include <linux/types.h>
+
+struct drm_i915_private;
+struct drm_i915_gem_object;
+struct intel_memory_region;
+
+extern const struct drm_i915_gem_object_ops i915_gem_lmem_obj_ops;
+
+void __iomem *i915_gem_object_lmem_io_map(struct drm_i915_gem_object *obj,
+ unsigned long n, unsigned long size);
+void __iomem *i915_gem_object_lmem_io_map_page(struct drm_i915_gem_object *obj,
+ unsigned long n);
+void __iomem *
+i915_gem_object_lmem_io_map_page_atomic(struct drm_i915_gem_object *obj,
+ unsigned long n);
+
+bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj);
+
+struct drm_i915_gem_object *
+i915_gem_object_create_lmem(struct drm_i915_private *i915,
+ resource_size_t size,
+ unsigned int flags);
+
+struct drm_i915_gem_object *
+__i915_gem_lmem_object_create(struct intel_memory_region *mem,
+ resource_size_t size,
+ unsigned int flags);
+
+#endif /* !__I915_GEM_LMEM_H */
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index 05289edbafe3..e3002849844b 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -8,6 +8,7 @@
#include <linux/sizes.h>
#include "gt/intel_gt.h"
+#include "gt/intel_gt_requests.h"
#include "i915_drv.h"
#include "i915_gem_gtt.h"
@@ -249,16 +250,6 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
if (ret)
goto err_rpm;
- ret = i915_mutex_lock_interruptible(dev);
- if (ret)
- goto err_reset;
-
- /* Access to snoopable pages through the GTT is incoherent. */
- if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(i915)) {
- ret = -EFAULT;
- goto err_unlock;
- }
-
/* Now pin it into the GTT as needed */
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
PIN_MAPPABLE |
@@ -285,10 +276,19 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
view.type = I915_GGTT_VIEW_PARTIAL;
vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, flags);
}
+
+ /* The entire mappable GGTT is pinned? Unexpected! */
+ GEM_BUG_ON(vma == ERR_PTR(-ENOSPC));
}
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
- goto err_unlock;
+ goto err_reset;
+ }
+
+ /* Access to snoopable pages through the GTT is incoherent. */
+ if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(i915)) {
+ ret = -EFAULT;
+ goto err_unpin;
}
ret = i915_vma_pin_fence(vma);
@@ -312,7 +312,7 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
list_add(&obj->userfault_link, &i915->ggtt.userfault_list);
mutex_unlock(&i915->ggtt.vm.mutex);
- if (CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND)
+ if (IS_ACTIVE(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND))
intel_wakeref_auto(&i915->ggtt.userfault_wakeref,
msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND));
@@ -326,8 +326,6 @@ err_fence:
i915_vma_unpin_fence(vma);
err_unpin:
__i915_vma_unpin(vma);
-err_unlock:
- mutex_unlock(&dev->struct_mutex);
err_reset:
intel_gt_reset_unlock(ggtt->vm.gt, srcu);
err_rpm:
@@ -335,23 +333,20 @@ err_rpm:
i915_gem_object_unpin_pages(obj);
err:
switch (ret) {
- case -EIO:
- /*
- * We eat errors when the gpu is terminally wedged to avoid
- * userspace unduly crashing (gl has no provisions for mmaps to
- * fail). But any other -EIO isn't ours (e.g. swap in failure)
- * and so needs to be reported.
- */
- if (!intel_gt_is_wedged(ggtt->vm.gt))
- return VM_FAULT_SIGBUS;
- /* else, fall through */
- case -EAGAIN:
- /*
- * EAGAIN means the gpu is hung and we'll wait for the error
- * handler to reset everything when re-faulting in
- * i915_mutex_lock_interruptible.
- */
+ default:
+ WARN_ONCE(ret, "unhandled error in %s: %i\n", __func__, ret);
+ /* fallthrough */
+ case -EIO: /* shmemfs failure from swap device */
+ case -EFAULT: /* purged object */
+ case -ENODEV: /* bad object, how did you get here! */
+ return VM_FAULT_SIGBUS;
+
+ case -ENOSPC: /* shmemfs allocation failure */
+ case -ENOMEM: /* our allocation failure */
+ return VM_FAULT_OOM;
+
case 0:
+ case -EAGAIN:
case -ERESTARTSYS:
case -EINTR:
case -EBUSY:
@@ -360,15 +355,6 @@ err:
* already did the job.
*/
return VM_FAULT_NOPAGE;
- case -ENOMEM:
- return VM_FAULT_OOM;
- case -ENOSPC:
- case -EFAULT:
- case -ENODEV: /* bad object, how did you get here! */
- return VM_FAULT_SIGBUS;
- default:
- WARN_ONCE(ret, "unhandled error in %s: %i\n", __func__, ret);
- return VM_FAULT_SIGBUS;
}
}
@@ -439,6 +425,7 @@ out:
static int create_mmap_offset(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ struct intel_gt *gt = &i915->gt;
int err;
err = drm_gem_create_mmap_offset(&obj->base);
@@ -446,21 +433,12 @@ static int create_mmap_offset(struct drm_i915_gem_object *obj)
return 0;
/* Attempt to reap some mmap space from dead objects */
- do {
- err = i915_gem_wait_for_idle(i915,
- I915_WAIT_INTERRUPTIBLE,
- MAX_SCHEDULE_TIMEOUT);
- if (err)
- break;
-
- i915_gem_drain_freed_objects(i915);
- err = drm_gem_create_mmap_offset(&obj->base);
- if (!err)
- break;
-
- } while (flush_delayed_work(&i915->gem.retire_work));
+ err = intel_gt_retire_requests_timeout(gt, MAX_SCHEDULE_TIMEOUT);
+ if (err)
+ return err;
- return err;
+ i915_gem_drain_freed_objects(i915);
+ return drm_gem_create_mmap_offset(&obj->base);
}
int
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index d7855dc5a5c5..a50296cce0d8 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -47,9 +47,10 @@ void i915_gem_object_free(struct drm_i915_gem_object *obj)
}
void i915_gem_object_init(struct drm_i915_gem_object *obj,
- const struct drm_i915_gem_object_ops *ops)
+ const struct drm_i915_gem_object_ops *ops,
+ struct lock_class_key *key)
{
- mutex_init(&obj->mm.lock);
+ __mutex_init(&obj->mm.lock, "obj->mm.lock", key);
spin_lock_init(&obj->vma.lock);
INIT_LIST_HEAD(&obj->vma.list);
@@ -155,21 +156,30 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
llist_for_each_entry_safe(obj, on, freed, freed) {
- struct i915_vma *vma, *vn;
-
trace_i915_gem_object_destroy(obj);
- mutex_lock(&i915->drm.struct_mutex);
-
- list_for_each_entry_safe(vma, vn, &obj->vma.list, obj_link) {
- GEM_BUG_ON(i915_vma_is_active(vma));
- vma->flags &= ~I915_VMA_PIN_MASK;
- i915_vma_destroy(vma);
+ if (!list_empty(&obj->vma.list)) {
+ struct i915_vma *vma;
+
+ /*
+ * Note that the vma keeps an object reference while
+ * it is active, so it *should* not sleep while we
+ * destroy it. Our debug code errs insits it *might*.
+ * For the moment, play along.
+ */
+ spin_lock(&obj->vma.lock);
+ while ((vma = list_first_entry_or_null(&obj->vma.list,
+ struct i915_vma,
+ obj_link))) {
+ GEM_BUG_ON(vma->obj != obj);
+ spin_unlock(&obj->vma.lock);
+
+ i915_vma_destroy(vma);
+
+ spin_lock(&obj->vma.lock);
+ }
+ spin_unlock(&obj->vma.lock);
}
- GEM_BUG_ON(!list_empty(&obj->vma.list));
- GEM_BUG_ON(!RB_EMPTY_ROOT(&obj->vma.tree));
-
- mutex_unlock(&i915->drm.struct_mutex);
GEM_BUG_ON(atomic_read(&obj->bind_count));
GEM_BUG_ON(obj->userfault_count);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index ddf3605bea8e..458cd51331f1 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -23,12 +23,14 @@ struct drm_i915_gem_object *i915_gem_object_alloc(void);
void i915_gem_object_free(struct drm_i915_gem_object *obj);
void i915_gem_object_init(struct drm_i915_gem_object *obj,
- const struct drm_i915_gem_object_ops *ops);
+ const struct drm_i915_gem_object_ops *ops,
+ struct lock_class_key *key);
struct drm_i915_gem_object *
-i915_gem_object_create_shmem(struct drm_i915_private *i915, u64 size);
+i915_gem_object_create_shmem(struct drm_i915_private *i915,
+ resource_size_t size);
struct drm_i915_gem_object *
i915_gem_object_create_shmem_from_data(struct drm_i915_private *i915,
- const void *data, size_t size);
+ const void *data, resource_size_t size);
extern const struct drm_i915_gem_object_ops i915_gem_shmem_ops;
void __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
@@ -106,6 +108,11 @@ static inline void i915_gem_object_lock(struct drm_i915_gem_object *obj)
dma_resv_lock(obj->base.resv, NULL);
}
+static inline bool i915_gem_object_trylock(struct drm_i915_gem_object *obj)
+{
+ return dma_resv_trylock(obj->base.resv);
+}
+
static inline int
i915_gem_object_lock_interruptible(struct drm_i915_gem_object *obj)
{
@@ -135,33 +142,58 @@ i915_gem_object_is_readonly(const struct drm_i915_gem_object *obj)
}
static inline bool
+i915_gem_object_is_contiguous(const struct drm_i915_gem_object *obj)
+{
+ return obj->flags & I915_BO_ALLOC_CONTIGUOUS;
+}
+
+static inline bool
+i915_gem_object_is_volatile(const struct drm_i915_gem_object *obj)
+{
+ return obj->flags & I915_BO_ALLOC_VOLATILE;
+}
+
+static inline void
+i915_gem_object_set_volatile(struct drm_i915_gem_object *obj)
+{
+ obj->flags |= I915_BO_ALLOC_VOLATILE;
+}
+
+static inline bool
+i915_gem_object_type_has(const struct drm_i915_gem_object *obj,
+ unsigned long flags)
+{
+ return obj->ops->flags & flags;
+}
+
+static inline bool
i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
{
- return obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE;
+ return i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_STRUCT_PAGE);
}
static inline bool
i915_gem_object_is_shrinkable(const struct drm_i915_gem_object *obj)
{
- return obj->ops->flags & I915_GEM_OBJECT_IS_SHRINKABLE;
+ return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_SHRINKABLE);
}
static inline bool
i915_gem_object_is_proxy(const struct drm_i915_gem_object *obj)
{
- return obj->ops->flags & I915_GEM_OBJECT_IS_PROXY;
+ return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_PROXY);
}
static inline bool
i915_gem_object_never_bind_ggtt(const struct drm_i915_gem_object *obj)
{
- return obj->ops->flags & I915_GEM_OBJECT_NO_GGTT;
+ return i915_gem_object_type_has(obj, I915_GEM_OBJECT_NO_GGTT);
}
static inline bool
i915_gem_object_needs_async_cancel(const struct drm_i915_gem_object *obj)
{
- return obj->ops->flags & I915_GEM_OBJECT_ASYNC_CANCEL;
+ return i915_gem_object_type_has(obj, I915_GEM_OBJECT_ASYNC_CANCEL);
}
static inline bool
@@ -412,7 +444,8 @@ static inline bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
return true;
- return obj->pin_global; /* currently in use by HW, keep flushed */
+ /* Currently in use by HW (display engine)? Keep flushed. */
+ return i915_gem_object_is_framebuffer(obj);
}
static inline void __start_cpu_write(struct drm_i915_gem_object *obj)
@@ -429,6 +462,5 @@ int i915_gem_object_wait(struct drm_i915_gem_object *obj,
int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
unsigned int flags,
const struct i915_sched_attr *attr);
-#define I915_PRIORITY_DISPLAY I915_USER_PRIORITY(I915_PRIORITY_MAX)
#endif
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c
index 6415f9a17e2d..70809d8897cd 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c
@@ -8,6 +8,7 @@
#include "gt/intel_engine_pm.h"
#include "gt/intel_engine_pool.h"
#include "gt/intel_gt.h"
+#include "gt/intel_ring.h"
#include "i915_gem_clflush.h"
#include "i915_gem_object_blt.h"
@@ -16,7 +17,7 @@ struct i915_vma *intel_emit_vma_fill_blt(struct intel_context *ce,
u32 value)
{
struct drm_i915_private *i915 = ce->vm->i915;
- const u32 block_size = S16_MAX * PAGE_SIZE;
+ const u32 block_size = SZ_8M; /* ~1ms at 8GiB/s preemption delay */
struct intel_engine_pool_node *pool;
struct i915_vma *batch;
u64 offset;
@@ -29,10 +30,10 @@ struct i915_vma *intel_emit_vma_fill_blt(struct intel_context *ce,
GEM_BUG_ON(intel_engine_is_virtual(ce->engine));
intel_engine_pm_get(ce->engine);
- count = div_u64(vma->size, block_size);
+ count = div_u64(round_up(vma->size, block_size), block_size);
size = (1 + 8 * count) * sizeof(u32);
size = round_up(size, PAGE_SIZE);
- pool = intel_engine_pool_get(&ce->engine->pool, size);
+ pool = intel_engine_get_pool(ce->engine, size);
if (IS_ERR(pool)) {
err = PTR_ERR(pool);
goto out_pm;
@@ -200,7 +201,7 @@ struct i915_vma *intel_emit_vma_copy_blt(struct intel_context *ce,
struct i915_vma *dst)
{
struct drm_i915_private *i915 = ce->vm->i915;
- const u32 block_size = S16_MAX * PAGE_SIZE;
+ const u32 block_size = SZ_8M; /* ~1ms at 8GiB/s preemption delay */
struct intel_engine_pool_node *pool;
struct i915_vma *batch;
u64 src_offset, dst_offset;
@@ -213,10 +214,10 @@ struct i915_vma *intel_emit_vma_copy_blt(struct intel_context *ce,
GEM_BUG_ON(intel_engine_is_virtual(ce->engine));
intel_engine_pm_get(ce->engine);
- count = div_u64(dst->size, block_size);
+ count = div_u64(round_up(dst->size, block_size), block_size);
size = (1 + 11 * count) * sizeof(u32);
size = round_up(size, PAGE_SIZE);
- pool = intel_engine_pool_get(&ce->engine->pool, size);
+ pool = intel_engine_get_pool(ce->engine, size);
if (IS_ERR(pool)) {
err = PTR_ERR(pool);
goto out_pm;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index 646859fea224..96008374a412 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -8,6 +8,7 @@
#define __I915_GEM_OBJECT_TYPES_H__
#include <drm/drm_gem.h>
+#include <uapi/drm/i915_drm.h>
#include "i915_active.h"
#include "i915_selftest.h"
@@ -30,10 +31,11 @@ struct i915_lut_handle {
struct drm_i915_gem_object_ops {
unsigned int flags;
#define I915_GEM_OBJECT_HAS_STRUCT_PAGE BIT(0)
-#define I915_GEM_OBJECT_IS_SHRINKABLE BIT(1)
-#define I915_GEM_OBJECT_IS_PROXY BIT(2)
-#define I915_GEM_OBJECT_NO_GGTT BIT(3)
-#define I915_GEM_OBJECT_ASYNC_CANCEL BIT(4)
+#define I915_GEM_OBJECT_HAS_IOMEM BIT(1)
+#define I915_GEM_OBJECT_IS_SHRINKABLE BIT(2)
+#define I915_GEM_OBJECT_IS_PROXY BIT(3)
+#define I915_GEM_OBJECT_NO_GGTT BIT(4)
+#define I915_GEM_OBJECT_ASYNC_CANCEL BIT(5)
/* Interface between the GEM object and its backing storage.
* get_pages() is called once prior to the use of the associated set
@@ -118,6 +120,11 @@ struct drm_i915_gem_object {
I915_SELFTEST_DECLARE(struct list_head st_link);
+ unsigned long flags;
+#define I915_BO_ALLOC_CONTIGUOUS BIT(0)
+#define I915_BO_ALLOC_VOLATILE BIT(1)
+#define I915_BO_ALLOC_FLAGS (I915_BO_ALLOC_CONTIGUOUS | I915_BO_ALLOC_VOLATILE)
+
/*
* Is the object to be mapped as read-only to the GPU
* Only honoured if hardware has relevant pte bit
@@ -153,17 +160,30 @@ struct drm_i915_gem_object {
/** Count of VMA actually bound by this object */
atomic_t bind_count;
- /** Count of how many global VMA are currently pinned for use by HW */
- unsigned int pin_global;
struct {
struct mutex lock; /* protects the pages and their use */
atomic_t pages_pin_count;
+ atomic_t shrink_pin;
+
+ /**
+ * Memory region for this object.
+ */
+ struct intel_memory_region *region;
+ /**
+ * List of memory region blocks allocated for this object.
+ */
+ struct list_head blocks;
+ /**
+ * Element within memory_region->objects or region->purgeable
+ * if the object is marked as DONTNEED. Access is protected by
+ * region->obj_lock.
+ */
+ struct list_head region_link;
struct sg_table *pages;
void *mapping;
- /* TODO: whack some of this into the error state */
struct i915_page_sizes {
/**
* The sg mask of the pages sg_table. i.e the mask of
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
index 18f0ce0135c1..29f4c2850745 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
@@ -7,6 +7,7 @@
#include "i915_drv.h"
#include "i915_gem_object.h"
#include "i915_scatterlist.h"
+#include "i915_gem_lmem.h"
void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages,
@@ -18,6 +19,9 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
lockdep_assert_held(&obj->mm.lock);
+ if (i915_gem_object_is_volatile(obj))
+ obj->mm.madv = I915_MADV_DONTNEED;
+
/* Make the pages coherent with the GPU (flushing any swapin). */
if (obj->cache_dirty) {
obj->write_domain = 0;
@@ -71,6 +75,7 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
list = &i915->mm.shrink_list;
list_add_tail(&obj->mm.link, list);
+ atomic_set(&obj->mm.shrink_pin, 0);
spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
}
}
@@ -150,6 +155,16 @@ static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
rcu_read_unlock();
}
+static void unmap_object(struct drm_i915_gem_object *obj, void *ptr)
+{
+ if (i915_gem_object_is_lmem(obj))
+ io_mapping_unmap((void __force __iomem *)ptr);
+ else if (is_vmalloc_addr(ptr))
+ vunmap(ptr);
+ else
+ kunmap(kmap_to_page(ptr));
+}
+
struct sg_table *
__i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
{
@@ -159,17 +174,13 @@ __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
if (IS_ERR_OR_NULL(pages))
return pages;
+ if (i915_gem_object_is_volatile(obj))
+ obj->mm.madv = I915_MADV_WILLNEED;
+
i915_gem_object_make_unshrinkable(obj);
if (obj->mm.mapping) {
- void *ptr;
-
- ptr = page_mask_bits(obj->mm.mapping);
- if (is_vmalloc_addr(ptr))
- vunmap(ptr);
- else
- kunmap(kmap_to_page(ptr));
-
+ unmap_object(obj, page_mask_bits(obj->mm.mapping));
obj->mm.mapping = NULL;
}
@@ -224,7 +235,7 @@ unlock:
}
/* The 'mapping' part of i915_gem_object_pin_map() below */
-static void *i915_gem_object_map(const struct drm_i915_gem_object *obj,
+static void *i915_gem_object_map(struct drm_i915_gem_object *obj,
enum i915_map_type type)
{
unsigned long n_pages = obj->base.size >> PAGE_SHIFT;
@@ -237,6 +248,16 @@ static void *i915_gem_object_map(const struct drm_i915_gem_object *obj,
pgprot_t pgprot;
void *addr;
+ if (i915_gem_object_is_lmem(obj)) {
+ void __iomem *io;
+
+ if (type != I915_MAP_WC)
+ return NULL;
+
+ io = i915_gem_object_lmem_io_map(obj, 0, obj->base.size);
+ return (void __force *)io;
+ }
+
/* A single page can always be kmapped */
if (n_pages == 1 && type == I915_MAP_WB)
return kmap(sg_page(sgt->sgl));
@@ -278,11 +299,13 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
enum i915_map_type type)
{
enum i915_map_type has_type;
+ unsigned int flags;
bool pinned;
void *ptr;
int err;
- if (unlikely(!i915_gem_object_has_struct_page(obj)))
+ flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE | I915_GEM_OBJECT_HAS_IOMEM;
+ if (!i915_gem_object_type_has(obj, flags))
return ERR_PTR(-ENXIO);
err = mutex_lock_interruptible(&obj->mm.lock);
@@ -314,10 +337,7 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
goto err_unpin;
}
- if (is_vmalloc_addr(ptr))
- vunmap(ptr);
- else
- kunmap(kmap_to_page(ptr));
+ unmap_object(obj, ptr);
ptr = obj->mm.mapping = NULL;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_phys.c b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
index 768356908160..8043ff63d73f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_phys.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
@@ -16,6 +16,7 @@
#include "gt/intel_gt.h"
#include "i915_drv.h"
#include "i915_gem_object.h"
+#include "i915_gem_region.h"
#include "i915_scatterlist.h"
static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
@@ -191,8 +192,10 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
/* Perma-pin (until release) the physical set of pages */
__i915_gem_object_pin_pages(obj);
- if (!IS_ERR_OR_NULL(pages))
+ if (!IS_ERR_OR_NULL(pages)) {
i915_gem_shmem_ops.put_pages(obj, pages);
+ i915_gem_object_release_memory_region(obj);
+ }
mutex_unlock(&obj->mm.lock);
return 0;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
index ad2a63dbcac2..f88ee1317bb4 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
@@ -7,138 +7,9 @@
#include "gem/i915_gem_pm.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_pm.h"
+#include "gt/intel_gt_requests.h"
#include "i915_drv.h"
-#include "i915_globals.h"
-
-static void call_idle_barriers(struct intel_engine_cs *engine)
-{
- struct llist_node *node, *next;
-
- llist_for_each_safe(node, next, llist_del_all(&engine->barrier_tasks)) {
- struct i915_active_request *active =
- container_of((struct list_head *)node,
- typeof(*active), link);
-
- INIT_LIST_HEAD(&active->link);
- RCU_INIT_POINTER(active->request, NULL);
-
- active->retire(active, NULL);
- }
-}
-
-static void i915_gem_park(struct drm_i915_private *i915)
-{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
-
- lockdep_assert_held(&i915->drm.struct_mutex);
-
- for_each_engine(engine, i915, id)
- call_idle_barriers(engine); /* cleanup after wedging */
-
- i915_vma_parked(i915);
-
- i915_globals_park();
-}
-
-static void idle_work_handler(struct work_struct *work)
-{
- struct drm_i915_private *i915 =
- container_of(work, typeof(*i915), gem.idle_work);
- bool park;
-
- cancel_delayed_work_sync(&i915->gem.retire_work);
- mutex_lock(&i915->drm.struct_mutex);
-
- intel_wakeref_lock(&i915->gt.wakeref);
- park = (!intel_wakeref_is_active(&i915->gt.wakeref) &&
- !work_pending(work));
- intel_wakeref_unlock(&i915->gt.wakeref);
- if (park)
- i915_gem_park(i915);
- else
- queue_delayed_work(i915->wq,
- &i915->gem.retire_work,
- round_jiffies_up_relative(HZ));
-
- mutex_unlock(&i915->drm.struct_mutex);
-}
-
-static void retire_work_handler(struct work_struct *work)
-{
- struct drm_i915_private *i915 =
- container_of(work, typeof(*i915), gem.retire_work.work);
-
- /* Come back later if the device is busy... */
- if (mutex_trylock(&i915->drm.struct_mutex)) {
- i915_retire_requests(i915);
- mutex_unlock(&i915->drm.struct_mutex);
- }
-
- queue_delayed_work(i915->wq,
- &i915->gem.retire_work,
- round_jiffies_up_relative(HZ));
-}
-
-static int pm_notifier(struct notifier_block *nb,
- unsigned long action,
- void *data)
-{
- struct drm_i915_private *i915 =
- container_of(nb, typeof(*i915), gem.pm_notifier);
-
- switch (action) {
- case INTEL_GT_UNPARK:
- i915_globals_unpark();
- queue_delayed_work(i915->wq,
- &i915->gem.retire_work,
- round_jiffies_up_relative(HZ));
- break;
-
- case INTEL_GT_PARK:
- queue_work(i915->wq, &i915->gem.idle_work);
- break;
- }
-
- return NOTIFY_OK;
-}
-
-static bool switch_to_kernel_context_sync(struct intel_gt *gt)
-{
- bool result = !intel_gt_is_wedged(gt);
-
- do {
- if (i915_gem_wait_for_idle(gt->i915,
- I915_WAIT_LOCKED |
- I915_WAIT_FOR_IDLE_BOOST,
- I915_GEM_IDLE_TIMEOUT) == -ETIME) {
- /* XXX hide warning from gem_eio */
- if (i915_modparams.reset) {
- dev_err(gt->i915->drm.dev,
- "Failed to idle engines, declaring wedged!\n");
- GEM_TRACE_DUMP();
- }
-
- /*
- * Forcibly cancel outstanding work and leave
- * the gpu quiet.
- */
- intel_gt_set_wedged(gt);
- result = false;
- }
- } while (i915_retire_requests(gt->i915) && result);
-
- if (intel_gt_pm_wait_for_idle(gt))
- result = false;
-
- return result;
-}
-
-bool i915_gem_load_power_context(struct drm_i915_private *i915)
-{
- return switch_to_kernel_context_sync(&i915->gt);
-}
void i915_gem_suspend(struct drm_i915_private *i915)
{
@@ -147,8 +18,6 @@ void i915_gem_suspend(struct drm_i915_private *i915)
intel_wakeref_auto(&i915->ggtt.userfault_wakeref, 0);
flush_workqueue(i915->wq);
- mutex_lock(&i915->drm.struct_mutex);
-
/*
* We have to flush all the executing contexts to main memory so
* that they can saved in the hibernation image. To ensure the last
@@ -158,15 +27,9 @@ void i915_gem_suspend(struct drm_i915_private *i915)
* state. Fortunately, the kernel_context is disposable and we do
* not rely on its state.
*/
- switch_to_kernel_context_sync(&i915->gt);
-
- mutex_unlock(&i915->drm.struct_mutex);
-
- cancel_delayed_work_sync(&i915->gt.hangcheck.work);
+ intel_gt_suspend_prepare(&i915->gt);
i915_gem_drain_freed_objects(i915);
-
- intel_uc_suspend(&i915->gt.uc);
}
static struct drm_i915_gem_object *first_mm_object(struct list_head *list)
@@ -206,6 +69,8 @@ void i915_gem_suspend_late(struct drm_i915_private *i915)
* machine in an unusable condition.
*/
+ intel_gt_suspend_late(&i915->gt);
+
spin_lock_irqsave(&i915->mm.obj_lock, flags);
for (phase = phases; *phase; phase++) {
LIST_HEAD(keep);
@@ -230,18 +95,15 @@ void i915_gem_suspend_late(struct drm_i915_private *i915)
list_splice_tail(&keep, *phase);
}
spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
-
- i915_gem_sanitize(i915);
}
void i915_gem_resume(struct drm_i915_private *i915)
{
GEM_TRACE("\n");
- mutex_lock(&i915->drm.struct_mutex);
intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
- if (i915_gem_init_hw(i915))
+ if (intel_gt_init_hw(&i915->gt))
goto err_wedged;
/*
@@ -252,15 +114,8 @@ void i915_gem_resume(struct drm_i915_private *i915)
if (intel_gt_resume(&i915->gt))
goto err_wedged;
- intel_uc_resume(&i915->gt.uc);
-
- /* Always reload a context for powersaving. */
- if (!i915_gem_load_power_context(i915))
- goto err_wedged;
-
out_unlock:
intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
- mutex_unlock(&i915->drm.struct_mutex);
return;
err_wedged:
@@ -271,13 +126,3 @@ err_wedged:
}
goto out_unlock;
}
-
-void i915_gem_init__pm(struct drm_i915_private *i915)
-{
- INIT_WORK(&i915->gem.idle_work, idle_work_handler);
- INIT_DELAYED_WORK(&i915->gem.retire_work, retire_work_handler);
-
- i915->gem.pm_notifier.notifier_call = pm_notifier;
- blocking_notifier_chain_register(&i915->gt.pm_notifications,
- &i915->gem.pm_notifier);
-}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.h b/drivers/gpu/drm/i915/gem/i915_gem_pm.h
index 6f7d5d11ac3b..26b78dbdc225 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pm.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.h
@@ -12,9 +12,6 @@
struct drm_i915_private;
struct work_struct;
-void i915_gem_init__pm(struct drm_i915_private *i915);
-
-bool i915_gem_load_power_context(struct drm_i915_private *i915);
void i915_gem_resume(struct drm_i915_private *i915);
void i915_gem_idle_work_handler(struct work_struct *work);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_region.c b/drivers/gpu/drm/i915/gem/i915_gem_region.c
new file mode 100644
index 000000000000..2f7bcfb9c964
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_region.c
@@ -0,0 +1,174 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "intel_memory_region.h"
+#include "i915_gem_region.h"
+#include "i915_drv.h"
+#include "i915_trace.h"
+
+void
+i915_gem_object_put_pages_buddy(struct drm_i915_gem_object *obj,
+ struct sg_table *pages)
+{
+ __intel_memory_region_put_pages_buddy(obj->mm.region, &obj->mm.blocks);
+
+ obj->mm.dirty = false;
+ sg_free_table(pages);
+ kfree(pages);
+}
+
+int
+i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj)
+{
+ struct intel_memory_region *mem = obj->mm.region;
+ struct list_head *blocks = &obj->mm.blocks;
+ resource_size_t size = obj->base.size;
+ resource_size_t prev_end;
+ struct i915_buddy_block *block;
+ unsigned int flags;
+ struct sg_table *st;
+ struct scatterlist *sg;
+ unsigned int sg_page_sizes;
+ int ret;
+
+ st = kmalloc(sizeof(*st), GFP_KERNEL);
+ if (!st)
+ return -ENOMEM;
+
+ if (sg_alloc_table(st, size >> ilog2(mem->mm.chunk_size), GFP_KERNEL)) {
+ kfree(st);
+ return -ENOMEM;
+ }
+
+ flags = I915_ALLOC_MIN_PAGE_SIZE;
+ if (obj->flags & I915_BO_ALLOC_CONTIGUOUS)
+ flags |= I915_ALLOC_CONTIGUOUS;
+
+ ret = __intel_memory_region_get_pages_buddy(mem, size, flags, blocks);
+ if (ret)
+ goto err_free_sg;
+
+ GEM_BUG_ON(list_empty(blocks));
+
+ sg = st->sgl;
+ st->nents = 0;
+ sg_page_sizes = 0;
+ prev_end = (resource_size_t)-1;
+
+ list_for_each_entry(block, blocks, link) {
+ u64 block_size, offset;
+
+ block_size = min_t(u64, size,
+ i915_buddy_block_size(&mem->mm, block));
+ offset = i915_buddy_block_offset(block);
+
+ GEM_BUG_ON(overflows_type(block_size, sg->length));
+
+ if (offset != prev_end ||
+ add_overflows_t(typeof(sg->length), sg->length, block_size)) {
+ if (st->nents) {
+ sg_page_sizes |= sg->length;
+ sg = __sg_next(sg);
+ }
+
+ sg_dma_address(sg) = mem->region.start + offset;
+ sg_dma_len(sg) = block_size;
+
+ sg->length = block_size;
+
+ st->nents++;
+ } else {
+ sg->length += block_size;
+ sg_dma_len(sg) += block_size;
+ }
+
+ prev_end = offset + block_size;
+ };
+
+ sg_page_sizes |= sg->length;
+ sg_mark_end(sg);
+ i915_sg_trim(st);
+
+ __i915_gem_object_set_pages(obj, st, sg_page_sizes);
+
+ return 0;
+
+err_free_sg:
+ sg_free_table(st);
+ kfree(st);
+ return ret;
+}
+
+void i915_gem_object_init_memory_region(struct drm_i915_gem_object *obj,
+ struct intel_memory_region *mem,
+ unsigned long flags)
+{
+ INIT_LIST_HEAD(&obj->mm.blocks);
+ obj->mm.region = intel_memory_region_get(mem);
+ obj->flags |= flags;
+
+ mutex_lock(&mem->objects.lock);
+
+ if (obj->flags & I915_BO_ALLOC_VOLATILE)
+ list_add(&obj->mm.region_link, &mem->objects.purgeable);
+ else
+ list_add(&obj->mm.region_link, &mem->objects.list);
+
+ mutex_unlock(&mem->objects.lock);
+}
+
+void i915_gem_object_release_memory_region(struct drm_i915_gem_object *obj)
+{
+ struct intel_memory_region *mem = obj->mm.region;
+
+ mutex_lock(&mem->objects.lock);
+ list_del(&obj->mm.region_link);
+ mutex_unlock(&mem->objects.lock);
+
+ intel_memory_region_put(mem);
+}
+
+struct drm_i915_gem_object *
+i915_gem_object_create_region(struct intel_memory_region *mem,
+ resource_size_t size,
+ unsigned int flags)
+{
+ struct drm_i915_gem_object *obj;
+
+ /*
+ * NB: Our use of resource_size_t for the size stems from using struct
+ * resource for the mem->region. We might need to revisit this in the
+ * future.
+ */
+
+ GEM_BUG_ON(flags & ~I915_BO_ALLOC_FLAGS);
+
+ if (!mem)
+ return ERR_PTR(-ENODEV);
+
+ size = round_up(size, mem->min_page_size);
+
+ GEM_BUG_ON(!size);
+ GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_MIN_ALIGNMENT));
+
+ /*
+ * XXX: There is a prevalence of the assumption that we fit the
+ * object's page count inside a 32bit _signed_ variable. Let's document
+ * this and catch if we ever need to fix it. In the meantime, if you do
+ * spot such a local variable, please consider fixing!
+ */
+
+ if (size >> PAGE_SHIFT > INT_MAX)
+ return ERR_PTR(-E2BIG);
+
+ if (overflows_type(size, obj->base.size))
+ return ERR_PTR(-E2BIG);
+
+ obj = mem->ops->create_object(mem, size, flags);
+ if (!IS_ERR(obj))
+ trace_i915_gem_object_create(obj);
+
+ return obj;
+}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_region.h b/drivers/gpu/drm/i915/gem/i915_gem_region.h
new file mode 100644
index 000000000000..f2ff6f8bff74
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_region.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __I915_GEM_REGION_H__
+#define __I915_GEM_REGION_H__
+
+#include <linux/types.h>
+
+struct intel_memory_region;
+struct drm_i915_gem_object;
+struct sg_table;
+
+int i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj);
+void i915_gem_object_put_pages_buddy(struct drm_i915_gem_object *obj,
+ struct sg_table *pages);
+
+void i915_gem_object_init_memory_region(struct drm_i915_gem_object *obj,
+ struct intel_memory_region *mem,
+ unsigned long flags);
+void i915_gem_object_release_memory_region(struct drm_i915_gem_object *obj);
+
+struct drm_i915_gem_object *
+i915_gem_object_create_region(struct intel_memory_region *mem,
+ resource_size_t size,
+ unsigned int flags);
+
+#endif
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
index 4c4954e8ce0a..4d69c3fc3439 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
@@ -7,7 +7,9 @@
#include <linux/pagevec.h>
#include <linux/swap.h>
+#include "gem/i915_gem_region.h"
#include "i915_drv.h"
+#include "i915_gemfs.h"
#include "i915_gem_object.h"
#include "i915_scatterlist.h"
#include "i915_trace.h"
@@ -26,6 +28,7 @@ static void check_release_pagevec(struct pagevec *pvec)
static int shmem_get_pages(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ struct intel_memory_region *mem = obj->mm.region;
const unsigned long page_count = obj->base.size / PAGE_SIZE;
unsigned long i;
struct address_space *mapping;
@@ -52,7 +55,7 @@ static int shmem_get_pages(struct drm_i915_gem_object *obj)
* If there's no chance of allocating enough pages for the whole
* object, bail early.
*/
- if (page_count > totalram_pages())
+ if (obj->base.size > resource_size(&mem->region))
return -ENOMEM;
st = kmalloc(sizeof(*st), GFP_KERNEL);
@@ -417,6 +420,8 @@ shmem_pwrite(struct drm_i915_gem_object *obj,
static void shmem_release(struct drm_i915_gem_object *obj)
{
+ i915_gem_object_release_memory_region(obj);
+
fput(obj->base.filp);
}
@@ -434,9 +439,9 @@ const struct drm_i915_gem_object_ops i915_gem_shmem_ops = {
.release = shmem_release,
};
-static int create_shmem(struct drm_i915_private *i915,
- struct drm_gem_object *obj,
- size_t size)
+static int __create_shmem(struct drm_i915_private *i915,
+ struct drm_gem_object *obj,
+ resource_size_t size)
{
unsigned long flags = VM_NORESERVE;
struct file *filp;
@@ -455,31 +460,24 @@ static int create_shmem(struct drm_i915_private *i915,
return 0;
}
-struct drm_i915_gem_object *
-i915_gem_object_create_shmem(struct drm_i915_private *i915, u64 size)
+static struct drm_i915_gem_object *
+create_shmem(struct intel_memory_region *mem,
+ resource_size_t size,
+ unsigned int flags)
{
+ static struct lock_class_key lock_class;
+ struct drm_i915_private *i915 = mem->i915;
struct drm_i915_gem_object *obj;
struct address_space *mapping;
unsigned int cache_level;
gfp_t mask;
int ret;
- /* There is a prevalence of the assumption that we fit the object's
- * page count inside a 32bit _signed_ variable. Let's document this and
- * catch if we ever need to fix it. In the meantime, if you do spot
- * such a local variable, please consider fixing!
- */
- if (size >> PAGE_SHIFT > INT_MAX)
- return ERR_PTR(-E2BIG);
-
- if (overflows_type(size, obj->base.size))
- return ERR_PTR(-E2BIG);
-
obj = i915_gem_object_alloc();
if (!obj)
return ERR_PTR(-ENOMEM);
- ret = create_shmem(i915, &obj->base, size);
+ ret = __create_shmem(i915, &obj->base, size);
if (ret)
goto fail;
@@ -494,7 +492,7 @@ i915_gem_object_create_shmem(struct drm_i915_private *i915, u64 size)
mapping_set_gfp_mask(mapping, mask);
GEM_BUG_ON(!(mapping_gfp_mask(mapping) & __GFP_RECLAIM));
- i915_gem_object_init(obj, &i915_gem_shmem_ops);
+ i915_gem_object_init(obj, &i915_gem_shmem_ops, &lock_class);
obj->write_domain = I915_GEM_DOMAIN_CPU;
obj->read_domains = I915_GEM_DOMAIN_CPU;
@@ -518,7 +516,7 @@ i915_gem_object_create_shmem(struct drm_i915_private *i915, u64 size)
i915_gem_object_set_cache_coherency(obj, cache_level);
- trace_i915_gem_object_create(obj);
+ i915_gem_object_init_memory_region(obj, mem, 0);
return obj;
@@ -527,14 +525,22 @@ fail:
return ERR_PTR(ret);
}
+struct drm_i915_gem_object *
+i915_gem_object_create_shmem(struct drm_i915_private *i915,
+ resource_size_t size)
+{
+ return i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_SMEM],
+ size, 0);
+}
+
/* Allocate a new GEM object and fill it with the supplied data */
struct drm_i915_gem_object *
i915_gem_object_create_shmem_from_data(struct drm_i915_private *dev_priv,
- const void *data, size_t size)
+ const void *data, resource_size_t size)
{
struct drm_i915_gem_object *obj;
struct file *file;
- size_t offset;
+ resource_size_t offset;
int err;
obj = i915_gem_object_create_shmem(dev_priv, round_up(size, PAGE_SIZE));
@@ -577,3 +583,35 @@ fail:
i915_gem_object_put(obj);
return ERR_PTR(err);
}
+
+static int init_shmem(struct intel_memory_region *mem)
+{
+ int err;
+
+ err = i915_gemfs_init(mem->i915);
+ if (err) {
+ DRM_NOTE("Unable to create a private tmpfs mount, hugepage support will be disabled(%d).\n",
+ err);
+ }
+
+ return 0; /* Don't error, we can simply fallback to the kernel mnt */
+}
+
+static void release_shmem(struct intel_memory_region *mem)
+{
+ i915_gemfs_fini(mem->i915);
+}
+
+static const struct intel_memory_region_ops shmem_region_ops = {
+ .init = init_shmem,
+ .release = release_shmem,
+ .create_object = create_shmem,
+};
+
+struct intel_memory_region *i915_gem_shmem_setup(struct drm_i915_private *i915)
+{
+ return intel_memory_region_create(i915, 0,
+ totalram_pages() << PAGE_SHIFT,
+ PAGE_SIZE, 0,
+ &shmem_region_ops);
+}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
index 1a51b3598d63..f2418a1cfe68 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
@@ -16,40 +16,6 @@
#include "i915_trace.h"
-static bool shrinker_lock(struct drm_i915_private *i915,
- unsigned int flags,
- bool *unlock)
-{
- struct mutex *m = &i915->drm.struct_mutex;
-
- switch (mutex_trylock_recursive(m)) {
- case MUTEX_TRYLOCK_RECURSIVE:
- *unlock = false;
- return true;
-
- case MUTEX_TRYLOCK_FAILED:
- *unlock = false;
- if (flags & I915_SHRINK_ACTIVE &&
- mutex_lock_killable_nested(m, I915_MM_SHRINKER) == 0)
- *unlock = true;
- return *unlock;
-
- case MUTEX_TRYLOCK_SUCCESS:
- *unlock = true;
- return true;
- }
-
- BUG();
-}
-
-static void shrinker_unlock(struct drm_i915_private *i915, bool unlock)
-{
- if (!unlock)
- return;
-
- mutex_unlock(&i915->drm.struct_mutex);
-}
-
static bool swap_available(void)
{
return get_nr_swap_pages() > 0;
@@ -61,7 +27,8 @@ static bool can_release_pages(struct drm_i915_gem_object *obj)
if (!i915_gem_object_is_shrinkable(obj))
return false;
- /* Only report true if by unbinding the object and putting its pages
+ /*
+ * Only report true if by unbinding the object and putting its pages
* we can actually make forward progress towards freeing physical
* pages.
*
@@ -72,16 +39,8 @@ static bool can_release_pages(struct drm_i915_gem_object *obj)
if (atomic_read(&obj->mm.pages_pin_count) > atomic_read(&obj->bind_count))
return false;
- /* If any vma are "permanently" pinned, it will prevent us from
- * reclaiming the obj->mm.pages. We only allow scanout objects to claim
- * a permanent pin, along with a few others like the context objects.
- * To simplify the scan, and to avoid walking the list of vma under the
- * object, we just check the count of its permanently pinned.
- */
- if (READ_ONCE(obj->pin_global))
- return false;
-
- /* We can only return physical pages to the system if we can either
+ /*
+ * We can only return physical pages to the system if we can either
* discard the contents (because the user has marked them as being
* purgeable) or if we can move their contents out to swap.
*/
@@ -162,10 +121,6 @@ i915_gem_shrink(struct drm_i915_private *i915,
intel_wakeref_t wakeref = 0;
unsigned long count = 0;
unsigned long scanned = 0;
- bool unlock;
-
- if (!shrinker_lock(i915, shrink, &unlock))
- return 0;
/*
* When shrinking the active list, we should also consider active
@@ -275,8 +230,6 @@ i915_gem_shrink(struct drm_i915_private *i915,
if (shrink & I915_SHRINK_BOUND)
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- shrinker_unlock(i915, unlock);
-
if (nr_scanned)
*nr_scanned += scanned;
return count;
@@ -346,19 +299,14 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
struct drm_i915_private *i915 =
container_of(shrinker, struct drm_i915_private, mm.shrinker);
unsigned long freed;
- bool unlock;
sc->nr_scanned = 0;
- if (!shrinker_lock(i915, 0, &unlock))
- return SHRINK_STOP;
-
freed = i915_gem_shrink(i915,
sc->nr_to_scan,
&sc->nr_scanned,
I915_SHRINK_BOUND |
- I915_SHRINK_UNBOUND |
- I915_SHRINK_WRITEBACK);
+ I915_SHRINK_UNBOUND);
if (sc->nr_scanned < sc->nr_to_scan && current_is_kswapd()) {
intel_wakeref_t wakeref;
@@ -373,8 +321,6 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
}
}
- shrinker_unlock(i915, unlock);
-
return sc->nr_scanned ? freed : SHRINK_STOP;
}
@@ -391,6 +337,7 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
freed_pages = 0;
with_intel_runtime_pm(&i915->runtime_pm, wakeref)
freed_pages += i915_gem_shrink(i915, -1UL, NULL,
+ I915_SHRINK_ACTIVE |
I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND |
I915_SHRINK_WRITEBACK);
@@ -426,10 +373,6 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
struct i915_vma *vma, *next;
unsigned long freed_pages = 0;
intel_wakeref_t wakeref;
- bool unlock;
-
- if (!shrinker_lock(i915, 0, &unlock))
- return NOTIFY_DONE;
with_intel_runtime_pm(&i915->runtime_pm, wakeref)
freed_pages += i915_gem_shrink(i915, -1UL, NULL,
@@ -446,15 +389,11 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
if (!vma->iomap || i915_vma_is_active(vma))
continue;
- mutex_unlock(&i915->ggtt.vm.mutex);
- if (i915_vma_unbind(vma) == 0)
+ if (__i915_vma_unbind(vma) == 0)
freed_pages += count;
- mutex_lock(&i915->ggtt.vm.mutex);
}
mutex_unlock(&i915->ggtt.vm.mutex);
- shrinker_unlock(i915, unlock);
-
*(unsigned long *)ptr += freed_pages;
return NOTIFY_DONE;
}
@@ -497,22 +436,9 @@ void i915_gem_shrinker_taints_mutex(struct drm_i915_private *i915,
fs_reclaim_acquire(GFP_KERNEL);
- /*
- * As we invariably rely on the struct_mutex within the shrinker,
- * but have a complicated recursion dance, taint all the mutexes used
- * within the shrinker with the struct_mutex. For completeness, we
- * taint with all subclass of struct_mutex, even though we should
- * only need tainting by I915_MM_NORMAL to catch possible ABBA
- * deadlocks from using struct_mutex inside @mutex.
- */
- mutex_acquire(&i915->drm.struct_mutex.dep_map,
- I915_MM_SHRINKER, 0, _RET_IP_);
-
mutex_acquire(&mutex->dep_map, 0, 0, _RET_IP_);
mutex_release(&mutex->dep_map, _RET_IP_);
- mutex_release(&i915->drm.struct_mutex.dep_map, _RET_IP_);
-
fs_reclaim_release(GFP_KERNEL);
if (unlock)
@@ -523,46 +449,52 @@ void i915_gem_shrinker_taints_mutex(struct drm_i915_private *i915,
void i915_gem_object_make_unshrinkable(struct drm_i915_gem_object *obj)
{
+ struct drm_i915_private *i915 = obj_to_i915(obj);
+ unsigned long flags;
+
/*
* We can only be called while the pages are pinned or when
* the pages are released. If pinned, we should only be called
* from a single caller under controlled conditions; and on release
* only one caller may release us. Neither the two may cross.
*/
- if (!list_empty(&obj->mm.link)) { /* pinned by caller */
- struct drm_i915_private *i915 = obj_to_i915(obj);
- unsigned long flags;
-
- spin_lock_irqsave(&i915->mm.obj_lock, flags);
- GEM_BUG_ON(list_empty(&obj->mm.link));
+ if (atomic_add_unless(&obj->mm.shrink_pin, 1, 0))
+ return;
+ spin_lock_irqsave(&i915->mm.obj_lock, flags);
+ if (!atomic_fetch_inc(&obj->mm.shrink_pin) &&
+ !list_empty(&obj->mm.link)) {
list_del_init(&obj->mm.link);
i915->mm.shrink_count--;
i915->mm.shrink_memory -= obj->base.size;
-
- spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
}
+ spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
}
static void __i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj,
struct list_head *head)
{
+ struct drm_i915_private *i915 = obj_to_i915(obj);
+ unsigned long flags;
+
GEM_BUG_ON(!i915_gem_object_has_pages(obj));
- GEM_BUG_ON(!list_empty(&obj->mm.link));
+ if (!i915_gem_object_is_shrinkable(obj))
+ return;
- if (i915_gem_object_is_shrinkable(obj)) {
- struct drm_i915_private *i915 = obj_to_i915(obj);
- unsigned long flags;
+ if (atomic_add_unless(&obj->mm.shrink_pin, -1, 1))
+ return;
- spin_lock_irqsave(&i915->mm.obj_lock, flags);
- GEM_BUG_ON(!kref_read(&obj->base.refcount));
+ spin_lock_irqsave(&i915->mm.obj_lock, flags);
+ GEM_BUG_ON(!kref_read(&obj->base.refcount));
+ if (atomic_dec_and_test(&obj->mm.shrink_pin)) {
+ GEM_BUG_ON(!list_empty(&obj->mm.link));
list_add_tail(&obj->mm.link, head);
i915->mm.shrink_count++;
i915->mm.shrink_memory += obj->base.size;
- spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
}
+ spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
}
void i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
index aa533b4ab5f5..a2d49c04e6a4 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
@@ -10,6 +10,7 @@
#include <drm/drm_mm.h>
#include <drm/i915_drm.h>
+#include "gem/i915_gem_region.h"
#include "i915_drv.h"
#include "i915_gem_stolen.h"
@@ -150,7 +151,7 @@ static int i915_adjust_stolen(struct drm_i915_private *dev_priv,
return 0;
}
-void i915_gem_cleanup_stolen(struct drm_i915_private *dev_priv)
+static void i915_gem_cleanup_stolen(struct drm_i915_private *dev_priv)
{
if (!drm_mm_initialized(&dev_priv->mm.stolen))
return;
@@ -355,7 +356,7 @@ static void icl_get_stolen_reserved(struct drm_i915_private *i915,
}
}
-int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
+static int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
{
resource_size_t reserved_base, stolen_top;
resource_size_t reserved_total, reserved_size;
@@ -425,8 +426,11 @@ int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
bdw_get_stolen_reserved(dev_priv,
&reserved_base, &reserved_size);
break;
- case 11:
default:
+ MISSING_CASE(INTEL_GEN(dev_priv));
+ /* fall-through */
+ case 11:
+ case 12:
icl_get_stolen_reserved(dev_priv, &reserved_base,
&reserved_size);
break;
@@ -536,6 +540,9 @@ i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
i915_gem_stolen_remove_node(dev_priv, stolen);
kfree(stolen);
+
+ if (obj->mm.region)
+ i915_gem_object_release_memory_region(obj);
}
static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
@@ -545,65 +552,116 @@ static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
};
static struct drm_i915_gem_object *
-_i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
- struct drm_mm_node *stolen)
+__i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
+ struct drm_mm_node *stolen,
+ struct intel_memory_region *mem)
{
+ static struct lock_class_key lock_class;
struct drm_i915_gem_object *obj;
unsigned int cache_level;
+ int err = -ENOMEM;
obj = i915_gem_object_alloc();
- if (obj == NULL)
- return NULL;
+ if (!obj)
+ goto err;
drm_gem_private_object_init(&dev_priv->drm, &obj->base, stolen->size);
- i915_gem_object_init(obj, &i915_gem_object_stolen_ops);
+ i915_gem_object_init(obj, &i915_gem_object_stolen_ops, &lock_class);
obj->stolen = stolen;
obj->read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
cache_level = HAS_LLC(dev_priv) ? I915_CACHE_LLC : I915_CACHE_NONE;
i915_gem_object_set_cache_coherency(obj, cache_level);
- if (i915_gem_object_pin_pages(obj))
+ err = i915_gem_object_pin_pages(obj);
+ if (err)
goto cleanup;
+ if (mem)
+ i915_gem_object_init_memory_region(obj, mem, 0);
+
return obj;
cleanup:
i915_gem_object_free(obj);
- return NULL;
+err:
+ return ERR_PTR(err);
}
-struct drm_i915_gem_object *
-i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
- resource_size_t size)
+static struct drm_i915_gem_object *
+_i915_gem_object_create_stolen(struct intel_memory_region *mem,
+ resource_size_t size,
+ unsigned int flags)
{
+ struct drm_i915_private *dev_priv = mem->i915;
struct drm_i915_gem_object *obj;
struct drm_mm_node *stolen;
int ret;
if (!drm_mm_initialized(&dev_priv->mm.stolen))
- return NULL;
+ return ERR_PTR(-ENODEV);
if (size == 0)
- return NULL;
+ return ERR_PTR(-EINVAL);
stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
if (!stolen)
- return NULL;
+ return ERR_PTR(-ENOMEM);
ret = i915_gem_stolen_insert_node(dev_priv, stolen, size, 4096);
if (ret) {
- kfree(stolen);
- return NULL;
+ obj = ERR_PTR(ret);
+ goto err_free;
}
- obj = _i915_gem_object_create_stolen(dev_priv, stolen);
- if (obj)
- return obj;
+ obj = __i915_gem_object_create_stolen(dev_priv, stolen, mem);
+ if (IS_ERR(obj))
+ goto err_remove;
+ return obj;
+
+err_remove:
i915_gem_stolen_remove_node(dev_priv, stolen);
+err_free:
kfree(stolen);
- return NULL;
+ return obj;
+}
+
+struct drm_i915_gem_object *
+i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
+ resource_size_t size)
+{
+ return i915_gem_object_create_region(dev_priv->mm.regions[INTEL_REGION_STOLEN],
+ size, I915_BO_ALLOC_CONTIGUOUS);
+}
+
+static int init_stolen(struct intel_memory_region *mem)
+{
+ /*
+ * Initialise stolen early so that we may reserve preallocated
+ * objects for the BIOS to KMS transition.
+ */
+ return i915_gem_init_stolen(mem->i915);
+}
+
+static void release_stolen(struct intel_memory_region *mem)
+{
+ i915_gem_cleanup_stolen(mem->i915);
+}
+
+static const struct intel_memory_region_ops i915_region_stolen_ops = {
+ .init = init_stolen,
+ .release = release_stolen,
+ .create_object = _i915_gem_object_create_stolen,
+};
+
+struct intel_memory_region *i915_gem_stolen_setup(struct drm_i915_private *i915)
+{
+ return intel_memory_region_create(i915,
+ intel_graphics_stolen_res.start,
+ resource_size(&intel_graphics_stolen_res),
+ PAGE_SIZE, 0,
+ &i915_region_stolen_ops);
}
struct drm_i915_gem_object *
@@ -619,9 +677,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
int ret;
if (!drm_mm_initialized(&dev_priv->mm.stolen))
- return NULL;
-
- lockdep_assert_held(&dev_priv->drm.struct_mutex);
+ return ERR_PTR(-ENODEV);
DRM_DEBUG_DRIVER("creating preallocated stolen object: stolen_offset=%pa, gtt_offset=%pa, size=%pa\n",
&stolen_offset, &gtt_offset, &size);
@@ -630,11 +686,11 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
if (WARN_ON(size == 0) ||
WARN_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)) ||
WARN_ON(!IS_ALIGNED(stolen_offset, I915_GTT_MIN_ALIGNMENT)))
- return NULL;
+ return ERR_PTR(-EINVAL);
stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
if (!stolen)
- return NULL;
+ return ERR_PTR(-ENOMEM);
stolen->start = stolen_offset;
stolen->size = size;
@@ -644,15 +700,15 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
if (ret) {
DRM_DEBUG_DRIVER("failed to allocate stolen space\n");
kfree(stolen);
- return NULL;
+ return ERR_PTR(ret);
}
- obj = _i915_gem_object_create_stolen(dev_priv, stolen);
- if (obj == NULL) {
+ obj = __i915_gem_object_create_stolen(dev_priv, stolen, NULL);
+ if (IS_ERR(obj)) {
DRM_DEBUG_DRIVER("failed to allocate stolen object\n");
i915_gem_stolen_remove_node(dev_priv, stolen);
kfree(stolen);
- return NULL;
+ return obj;
}
/* Some objects just need physical mem from stolen space */
@@ -674,22 +730,26 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
* setting up the GTT space. The actual reservation will occur
* later.
*/
+ mutex_lock(&ggtt->vm.mutex);
ret = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
size, gtt_offset, obj->cache_level,
0);
if (ret) {
DRM_DEBUG_DRIVER("failed to allocate stolen GTT space\n");
+ mutex_unlock(&ggtt->vm.mutex);
goto err_pages;
}
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
+ GEM_BUG_ON(vma->pages);
vma->pages = obj->mm.pages;
- vma->flags |= I915_VMA_GLOBAL_BIND;
+ atomic_set(&vma->pages_count, I915_VMA_PAGES_ACTIVE);
+
+ set_bit(I915_VMA_GLOBAL_BIND_BIT, __i915_vma_flags(vma));
__i915_vma_set_map_and_fenceable(vma);
- mutex_lock(&ggtt->vm.mutex);
- list_move_tail(&vma->vm_link, &ggtt->vm.bound_list);
+ list_add_tail(&vma->vm_link, &ggtt->vm.bound_list);
mutex_unlock(&ggtt->vm.mutex);
GEM_BUG_ON(i915_gem_object_is_shrinkable(obj));
@@ -701,5 +761,5 @@ err_pages:
i915_gem_object_unpin_pages(obj);
err:
i915_gem_object_put(obj);
- return NULL;
+ return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.h b/drivers/gpu/drm/i915/gem/i915_gem_stolen.h
index 2289644d8604..c1040627fbf3 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.h
@@ -21,8 +21,7 @@ int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
u64 end);
void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
struct drm_mm_node *node);
-int i915_gem_init_stolen(struct drm_i915_private *dev_priv);
-void i915_gem_cleanup_stolen(struct drm_i915_private *dev_priv);
+struct intel_memory_region *i915_gem_stolen_setup(struct drm_i915_private *i915);
struct drm_i915_gem_object *
i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
resource_size_t size);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_throttle.c b/drivers/gpu/drm/i915/gem/i915_gem_throttle.c
index 1e372420771b..540ef0551789 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_throttle.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_throttle.c
@@ -50,10 +50,8 @@ i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
if (time_after_eq(request->emitted_jiffies, recent_enough))
break;
- if (target) {
+ if (target && xchg(&target->file_priv, NULL))
list_del(&target->client_link);
- target->file_priv = NULL;
- }
target = request;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
index ca0c2f451742..1fa592d82af5 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
@@ -181,22 +181,25 @@ static int
i915_gem_object_fence_prepare(struct drm_i915_gem_object *obj,
int tiling_mode, unsigned int stride)
{
+ struct i915_ggtt *ggtt = &to_i915(obj->base.dev)->ggtt;
struct i915_vma *vma;
- int ret;
+ int ret = 0;
if (tiling_mode == I915_TILING_NONE)
return 0;
+ mutex_lock(&ggtt->vm.mutex);
for_each_ggtt_vma(vma, obj) {
if (i915_vma_fence_prepare(vma, tiling_mode, stride))
continue;
- ret = i915_vma_unbind(vma);
+ ret = __i915_vma_unbind(vma);
if (ret)
- return ret;
+ break;
}
+ mutex_unlock(&ggtt->vm.mutex);
- return 0;
+ return ret;
}
int
@@ -212,7 +215,6 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
GEM_BUG_ON(!i915_tiling_ok(obj, tiling, stride));
GEM_BUG_ON(!stride ^ (tiling == I915_TILING_NONE));
- lockdep_assert_held(&i915->drm.struct_mutex);
if ((tiling | stride) == obj->tiling_and_stride)
return 0;
@@ -233,16 +235,18 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
* whilst executing a fenced command for an untiled object.
*/
- err = i915_gem_object_fence_prepare(obj, tiling, stride);
- if (err)
- return err;
-
i915_gem_object_lock(obj);
if (i915_gem_object_is_framebuffer(obj)) {
i915_gem_object_unlock(obj);
return -EBUSY;
}
+ err = i915_gem_object_fence_prepare(obj, tiling, stride);
+ if (err) {
+ i915_gem_object_unlock(obj);
+ return err;
+ }
+
/* If the memory has unknown (i.e. varying) swizzling, we pin the
* pages to prevent them being swapped out and causing corruption
* due to the change in swizzling.
@@ -313,10 +317,14 @@ int
i915_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_set_tiling *args = data;
struct drm_i915_gem_object *obj;
int err;
+ if (!dev_priv->ggtt.num_fences)
+ return -EOPNOTSUPP;
+
obj = i915_gem_object_lookup(file, args->handle);
if (!obj)
return -ENOENT;
@@ -340,9 +348,9 @@ i915_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
args->stride = 0;
} else {
if (args->tiling_mode == I915_TILING_X)
- args->swizzle_mode = to_i915(dev)->mm.bit_6_swizzle_x;
+ args->swizzle_mode = to_i915(dev)->ggtt.bit_6_swizzle_x;
else
- args->swizzle_mode = to_i915(dev)->mm.bit_6_swizzle_y;
+ args->swizzle_mode = to_i915(dev)->ggtt.bit_6_swizzle_y;
/* Hide bit 17 swizzling from the user. This prevents old Mesa
* from aborting the application on sw fallbacks to bit 17,
@@ -364,12 +372,7 @@ i915_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
}
}
- err = mutex_lock_interruptible(&dev->struct_mutex);
- if (err)
- goto err;
-
err = i915_gem_object_set_tiling(obj, args->tiling_mode, args->stride);
- mutex_unlock(&dev->struct_mutex);
/* We have to maintain this existing ABI... */
args->stride = i915_gem_object_get_stride(obj);
@@ -402,6 +405,9 @@ i915_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
struct drm_i915_gem_object *obj;
int err = -ENOENT;
+ if (!dev_priv->ggtt.num_fences)
+ return -EOPNOTSUPP;
+
rcu_read_lock();
obj = i915_gem_object_lookup_rcu(file, args->handle);
if (obj) {
@@ -415,10 +421,10 @@ i915_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
switch (args->tiling_mode) {
case I915_TILING_X:
- args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
+ args->swizzle_mode = dev_priv->ggtt.bit_6_swizzle_x;
break;
case I915_TILING_Y:
- args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y;
+ args->swizzle_mode = dev_priv->ggtt.bit_6_swizzle_y;
break;
default:
case I915_TILING_NONE:
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
index abfbac49b8e8..4c72d74d6576 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
@@ -92,7 +92,6 @@ userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
struct i915_mmu_notifier *mn =
container_of(_mn, struct i915_mmu_notifier, mn);
struct interval_tree_node *it;
- struct mutex *unlock = NULL;
unsigned long end;
int ret = 0;
@@ -129,33 +128,13 @@ userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
}
spin_unlock(&mn->lock);
- if (!unlock) {
- unlock = &mn->mm->i915->drm.struct_mutex;
-
- switch (mutex_trylock_recursive(unlock)) {
- default:
- case MUTEX_TRYLOCK_FAILED:
- if (mutex_lock_killable_nested(unlock, I915_MM_SHRINKER)) {
- i915_gem_object_put(obj);
- return -EINTR;
- }
- /* fall through */
- case MUTEX_TRYLOCK_SUCCESS:
- break;
-
- case MUTEX_TRYLOCK_RECURSIVE:
- unlock = ERR_PTR(-EEXIST);
- break;
- }
- }
-
ret = i915_gem_object_unbind(obj,
I915_GEM_OBJECT_UNBIND_ACTIVE);
if (ret == 0)
ret = __i915_gem_object_put_pages(obj, I915_MM_SHRINKER);
i915_gem_object_put(obj);
if (ret)
- goto unlock;
+ return ret;
spin_lock(&mn->lock);
@@ -168,10 +147,6 @@ userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
}
spin_unlock(&mn->lock);
-unlock:
- if (!IS_ERR_OR_NULL(unlock))
- mutex_unlock(unlock);
-
return ret;
}
@@ -770,6 +745,7 @@ i915_gem_userptr_ioctl(struct drm_device *dev,
void *data,
struct drm_file *file)
{
+ static struct lock_class_key lock_class;
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_userptr *args = data;
struct drm_i915_gem_object *obj;
@@ -803,7 +779,8 @@ i915_gem_userptr_ioctl(struct drm_device *dev,
* On almost all of the older hw, we cannot tell the GPU that
* a page is readonly.
*/
- vm = dev_priv->kernel_context->vm;
+ vm = rcu_dereference_protected(dev_priv->kernel_context->vm,
+ true); /* static vm */
if (!vm || !vm->has_read_only)
return -ENODEV;
}
@@ -813,7 +790,7 @@ i915_gem_userptr_ioctl(struct drm_device *dev,
return -ENOMEM;
drm_gem_private_object_init(dev, &obj->base, args->user_size);
- i915_gem_object_init(obj, &i915_gem_userptr_ops);
+ i915_gem_object_init(obj, &i915_gem_userptr_ops, &lock_class);
obj->read_domains = I915_GEM_DOMAIN_CPU;
obj->write_domain = I915_GEM_DOMAIN_CPU;
i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c b/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c
index 3c5d17b2b670..892d12db6c49 100644
--- a/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c
@@ -96,6 +96,7 @@ huge_gem_object(struct drm_i915_private *i915,
phys_addr_t phys_size,
dma_addr_t dma_size)
{
+ static struct lock_class_key lock_class;
struct drm_i915_gem_object *obj;
unsigned int cache_level;
@@ -111,7 +112,7 @@ huge_gem_object(struct drm_i915_private *i915,
return ERR_PTR(-ENOMEM);
drm_gem_private_object_init(&i915->drm, &obj->base, dma_size);
- i915_gem_object_init(obj, &huge_ops);
+ i915_gem_object_init(obj, &huge_ops, &lock_class);
obj->read_domains = I915_GEM_DOMAIN_CPU;
obj->write_domain = I915_GEM_DOMAIN_CPU;
diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
index 8de83c6d81f5..688c49a24f32 100644
--- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
@@ -8,6 +8,8 @@
#include "i915_selftest.h"
+#include "gem/i915_gem_region.h"
+#include "gem/i915_gem_lmem.h"
#include "gem/i915_gem_pm.h"
#include "gt/intel_gt.h"
@@ -17,6 +19,7 @@
#include "selftests/mock_drm.h"
#include "selftests/mock_gem_device.h"
+#include "selftests/mock_region.h"
#include "selftests/i915_random.h"
static const unsigned int page_sizes[] = {
@@ -113,8 +116,6 @@ static int get_huge_pages(struct drm_i915_gem_object *obj)
if (i915_gem_gtt_prepare_pages(obj, st))
goto err;
- obj->mm.madv = I915_MADV_DONTNEED;
-
GEM_BUG_ON(sg_page_sizes != obj->mm.page_mask);
__i915_gem_object_set_pages(obj, st, sg_page_sizes);
@@ -135,7 +136,6 @@ static void put_huge_pages(struct drm_i915_gem_object *obj,
huge_pages_free_pages(pages);
obj->mm.dirty = false;
- obj->mm.madv = I915_MADV_WILLNEED;
}
static const struct drm_i915_gem_object_ops huge_page_ops = {
@@ -150,6 +150,7 @@ huge_pages_object(struct drm_i915_private *i915,
u64 size,
unsigned int page_mask)
{
+ static struct lock_class_key lock_class;
struct drm_i915_gem_object *obj;
GEM_BUG_ON(!size);
@@ -166,7 +167,9 @@ huge_pages_object(struct drm_i915_private *i915,
return ERR_PTR(-ENOMEM);
drm_gem_private_object_init(&i915->drm, &obj->base, size);
- i915_gem_object_init(obj, &huge_page_ops);
+ i915_gem_object_init(obj, &huge_page_ops, &lock_class);
+
+ i915_gem_object_set_volatile(obj);
obj->write_domain = I915_GEM_DOMAIN_CPU;
obj->read_domains = I915_GEM_DOMAIN_CPU;
@@ -227,8 +230,6 @@ static int fake_get_huge_pages(struct drm_i915_gem_object *obj)
i915_sg_trim(st);
- obj->mm.madv = I915_MADV_DONTNEED;
-
__i915_gem_object_set_pages(obj, st, sg_page_sizes);
return 0;
@@ -261,8 +262,6 @@ static int fake_get_huge_pages_single(struct drm_i915_gem_object *obj)
sg_dma_len(sg) = obj->base.size;
sg_dma_address(sg) = page_size;
- obj->mm.madv = I915_MADV_DONTNEED;
-
__i915_gem_object_set_pages(obj, st, sg->length);
return 0;
@@ -281,7 +280,6 @@ static void fake_put_huge_pages(struct drm_i915_gem_object *obj,
{
fake_free_huge_pages(obj, pages);
obj->mm.dirty = false;
- obj->mm.madv = I915_MADV_WILLNEED;
}
static const struct drm_i915_gem_object_ops fake_ops = {
@@ -299,6 +297,7 @@ static const struct drm_i915_gem_object_ops fake_ops_single = {
static struct drm_i915_gem_object *
fake_huge_pages_object(struct drm_i915_private *i915, u64 size, bool single)
{
+ static struct lock_class_key lock_class;
struct drm_i915_gem_object *obj;
GEM_BUG_ON(!size);
@@ -317,9 +316,11 @@ fake_huge_pages_object(struct drm_i915_private *i915, u64 size, bool single)
drm_gem_private_object_init(&i915->drm, &obj->base, size);
if (single)
- i915_gem_object_init(obj, &fake_ops_single);
+ i915_gem_object_init(obj, &fake_ops_single, &lock_class);
else
- i915_gem_object_init(obj, &fake_ops);
+ i915_gem_object_init(obj, &fake_ops, &lock_class);
+
+ i915_gem_object_set_volatile(obj);
obj->write_domain = I915_GEM_DOMAIN_CPU;
obj->read_domains = I915_GEM_DOMAIN_CPU;
@@ -333,7 +334,12 @@ static int igt_check_page_sizes(struct i915_vma *vma)
struct drm_i915_private *i915 = vma->vm->i915;
unsigned int supported = INTEL_INFO(i915)->page_sizes;
struct drm_i915_gem_object *obj = vma->obj;
- int err = 0;
+ int err;
+
+ /* We have to wait for the async bind to complete before our asserts */
+ err = i915_vma_sync(vma);
+ if (err)
+ return err;
if (!HAS_PAGE_SIZES(i915, vma->page_sizes.sg)) {
pr_err("unsupported page_sizes.sg=%u, supported=%u\n",
@@ -447,6 +453,88 @@ out_device:
return err;
}
+static int igt_mock_memory_region_huge_pages(void *arg)
+{
+ const unsigned int flags[] = { 0, I915_BO_ALLOC_CONTIGUOUS };
+ struct i915_ppgtt *ppgtt = arg;
+ struct drm_i915_private *i915 = ppgtt->vm.i915;
+ unsigned long supported = INTEL_INFO(i915)->page_sizes;
+ struct intel_memory_region *mem;
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ int bit;
+ int err = 0;
+
+ mem = mock_region_create(i915, 0, SZ_2G, I915_GTT_PAGE_SIZE_4K, 0);
+ if (IS_ERR(mem)) {
+ pr_err("%s failed to create memory region\n", __func__);
+ return PTR_ERR(mem);
+ }
+
+ for_each_set_bit(bit, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) {
+ unsigned int page_size = BIT(bit);
+ resource_size_t phys;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(flags); ++i) {
+ obj = i915_gem_object_create_region(mem, page_size,
+ flags[i]);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto out_region;
+ }
+
+ vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto out_put;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err)
+ goto out_close;
+
+ err = igt_check_page_sizes(vma);
+ if (err)
+ goto out_unpin;
+
+ phys = i915_gem_object_get_dma_address(obj, 0);
+ if (!IS_ALIGNED(phys, page_size)) {
+ pr_err("%s addr misaligned(%pa) page_size=%u\n",
+ __func__, &phys, page_size);
+ err = -EINVAL;
+ goto out_unpin;
+ }
+
+ if (vma->page_sizes.gtt != page_size) {
+ pr_err("%s page_sizes.gtt=%u, expected=%u\n",
+ __func__, vma->page_sizes.gtt,
+ page_size);
+ err = -EINVAL;
+ goto out_unpin;
+ }
+
+ i915_vma_unpin(vma);
+ i915_vma_close(vma);
+
+ __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
+ i915_gem_object_put(obj);
+ }
+ }
+
+ goto out_region;
+
+out_unpin:
+ i915_vma_unpin(vma);
+out_close:
+ i915_vma_close(vma);
+out_put:
+ i915_gem_object_put(obj);
+out_region:
+ intel_memory_region_put(mem);
+ return err;
+}
+
static int igt_mock_ppgtt_misaligned_dma(void *arg)
{
struct i915_ppgtt *ppgtt = arg;
@@ -879,9 +967,8 @@ out_object_put:
return err;
}
-static int gpu_write(struct i915_vma *vma,
- struct i915_gem_context *ctx,
- struct intel_engine_cs *engine,
+static int gpu_write(struct intel_context *ce,
+ struct i915_vma *vma,
u32 dw,
u32 val)
{
@@ -893,11 +980,12 @@ static int gpu_write(struct i915_vma *vma,
if (err)
return err;
- return igt_gpu_fill_dw(vma, ctx, engine, dw * sizeof(u32),
+ return igt_gpu_fill_dw(ce, vma, dw * sizeof(u32),
vma->size >> PAGE_SHIFT, val);
}
-static int cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val)
+static int
+__cpu_check_shmem(struct drm_i915_gem_object *obj, u32 dword, u32 val)
{
unsigned int needs_flush;
unsigned long n;
@@ -929,18 +1017,61 @@ static int cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val)
return err;
}
-static int __igt_write_huge(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine,
+static int __cpu_check_lmem(struct drm_i915_gem_object *obj, u32 dword, u32 val)
+{
+ unsigned long n;
+ int err;
+
+ i915_gem_object_lock(obj);
+ err = i915_gem_object_set_to_wc_domain(obj, false);
+ i915_gem_object_unlock(obj);
+ if (err)
+ return err;
+
+ err = i915_gem_object_pin_pages(obj);
+ if (err)
+ return err;
+
+ for (n = 0; n < obj->base.size >> PAGE_SHIFT; ++n) {
+ u32 __iomem *base;
+ u32 read_val;
+
+ base = i915_gem_object_lmem_io_map_page_atomic(obj, n);
+
+ read_val = ioread32(base + dword);
+ io_mapping_unmap_atomic(base);
+ if (read_val != val) {
+ pr_err("n=%lu base[%u]=%u, val=%u\n",
+ n, dword, read_val, val);
+ err = -EINVAL;
+ break;
+ }
+ }
+
+ i915_gem_object_unpin_pages(obj);
+ return err;
+}
+
+static int cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val)
+{
+ if (i915_gem_object_has_struct_page(obj))
+ return __cpu_check_shmem(obj, dword, val);
+ else if (i915_gem_object_is_lmem(obj))
+ return __cpu_check_lmem(obj, dword, val);
+
+ return -ENODEV;
+}
+
+static int __igt_write_huge(struct intel_context *ce,
struct drm_i915_gem_object *obj,
u64 size, u64 offset,
u32 dword, u32 val)
{
- struct i915_address_space *vm = ctx->vm ?: &engine->gt->ggtt->vm;
unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
struct i915_vma *vma;
int err;
- vma = i915_vma_instance(obj, vm, NULL);
+ vma = i915_vma_instance(obj, ce->vm, NULL);
if (IS_ERR(vma))
return PTR_ERR(vma);
@@ -954,7 +1085,7 @@ static int __igt_write_huge(struct i915_gem_context *ctx,
* The ggtt may have some pages reserved so
* refrain from erroring out.
*/
- if (err == -ENOSPC && i915_is_ggtt(vm))
+ if (err == -ENOSPC && i915_is_ggtt(ce->vm))
err = 0;
goto out_vma_close;
@@ -964,7 +1095,7 @@ static int __igt_write_huge(struct i915_gem_context *ctx,
if (err)
goto out_vma_unpin;
- err = gpu_write(vma, ctx, engine, dword, val);
+ err = gpu_write(ce, vma, dword, val);
if (err) {
pr_err("gpu-write failed at offset=%llx\n", offset);
goto out_vma_unpin;
@@ -987,14 +1118,13 @@ out_vma_close:
static int igt_write_huge(struct i915_gem_context *ctx,
struct drm_i915_gem_object *obj)
{
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
- struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm;
- static struct intel_engine_cs *engines[I915_NUM_ENGINES];
- struct intel_engine_cs *engine;
+ struct i915_gem_engines *engines;
+ struct i915_gem_engines_iter it;
+ struct intel_context *ce;
I915_RND_STATE(prng);
IGT_TIMEOUT(end_time);
unsigned int max_page_size;
- unsigned int id;
+ unsigned int count;
u64 max;
u64 num;
u64 size;
@@ -1008,19 +1138,18 @@ static int igt_write_huge(struct i915_gem_context *ctx,
if (obj->mm.page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
size = round_up(size, I915_GTT_PAGE_SIZE_2M);
- max_page_size = rounddown_pow_of_two(obj->mm.page_sizes.sg);
- max = div_u64((vm->total - size), max_page_size);
-
n = 0;
- for_each_engine(engine, i915, id) {
- if (!intel_engine_can_store_dword(engine)) {
- pr_info("store-dword-imm not supported on engine=%u\n",
- id);
+ count = 0;
+ max = U64_MAX;
+ for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
+ count++;
+ if (!intel_engine_can_store_dword(ce->engine))
continue;
- }
- engines[n++] = engine;
- }
+ max = min(max, ce->vm->total);
+ n++;
+ }
+ i915_gem_context_unlock_engines(ctx);
if (!n)
return 0;
@@ -1029,23 +1158,30 @@ static int igt_write_huge(struct i915_gem_context *ctx,
* randomized order, lets also make feeding to the same engine a few
* times in succession a possibility by enlarging the permutation array.
*/
- order = i915_random_order(n * I915_NUM_ENGINES, &prng);
+ order = i915_random_order(count * count, &prng);
if (!order)
return -ENOMEM;
+ max_page_size = rounddown_pow_of_two(obj->mm.page_sizes.sg);
+ max = div_u64(max - size, max_page_size);
+
/*
* Try various offsets in an ascending/descending fashion until we
* timeout -- we want to avoid issues hidden by effectively always using
* offset = 0.
*/
i = 0;
+ engines = i915_gem_context_lock_engines(ctx);
for_each_prime_number_from(num, 0, max) {
u64 offset_low = num * max_page_size;
u64 offset_high = (max - num) * max_page_size;
u32 dword = offset_in_page(num) / 4;
+ struct intel_context *ce;
- engine = engines[order[i] % n];
- i = (i + 1) % (n * I915_NUM_ENGINES);
+ ce = engines->engines[order[i] % engines->num_engines];
+ i = (i + 1) % (count * count);
+ if (!ce || !intel_engine_can_store_dword(ce->engine))
+ continue;
/*
* In order to utilize 64K pages we need to both pad the vma
@@ -1057,22 +1193,23 @@ static int igt_write_huge(struct i915_gem_context *ctx,
offset_low = round_down(offset_low,
I915_GTT_PAGE_SIZE_2M);
- err = __igt_write_huge(ctx, engine, obj, size, offset_low,
+ err = __igt_write_huge(ce, obj, size, offset_low,
dword, num + 1);
if (err)
break;
- err = __igt_write_huge(ctx, engine, obj, size, offset_high,
+ err = __igt_write_huge(ce, obj, size, offset_high,
dword, num + 1);
if (err)
break;
if (igt_timeout(end_time,
- "%s timed out on engine=%u, offset_low=%llx offset_high=%llx, max_page_size=%x\n",
- __func__, engine->id, offset_low, offset_high,
+ "%s timed out on %s, offset_low=%llx offset_high=%llx, max_page_size=%x\n",
+ __func__, ce->engine->name, offset_low, offset_high,
max_page_size))
break;
}
+ i915_gem_context_unlock_engines(ctx);
kfree(order);
@@ -1180,131 +1317,235 @@ out_device:
return err;
}
-static int igt_ppgtt_internal_huge(void *arg)
+typedef struct drm_i915_gem_object *
+(*igt_create_fn)(struct drm_i915_private *i915, u32 size, u32 flags);
+
+static inline bool igt_can_allocate_thp(struct drm_i915_private *i915)
+{
+ return i915->mm.gemfs && has_transparent_hugepage();
+}
+
+static struct drm_i915_gem_object *
+igt_create_shmem(struct drm_i915_private *i915, u32 size, u32 flags)
+{
+ if (!igt_can_allocate_thp(i915)) {
+ pr_info("%s missing THP support, skipping\n", __func__);
+ return ERR_PTR(-ENODEV);
+ }
+
+ return i915_gem_object_create_shmem(i915, size);
+}
+
+static struct drm_i915_gem_object *
+igt_create_internal(struct drm_i915_private *i915, u32 size, u32 flags)
+{
+ return i915_gem_object_create_internal(i915, size);
+}
+
+static struct drm_i915_gem_object *
+igt_create_system(struct drm_i915_private *i915, u32 size, u32 flags)
+{
+ return huge_pages_object(i915, size, size);
+}
+
+static struct drm_i915_gem_object *
+igt_create_local(struct drm_i915_private *i915, u32 size, u32 flags)
+{
+ return i915_gem_object_create_lmem(i915, size, flags);
+}
+
+static u32 igt_random_size(struct rnd_state *prng,
+ u32 min_page_size,
+ u32 max_page_size)
+{
+ u64 mask;
+ u32 size;
+
+ GEM_BUG_ON(!is_power_of_2(min_page_size));
+ GEM_BUG_ON(!is_power_of_2(max_page_size));
+ GEM_BUG_ON(min_page_size < PAGE_SIZE);
+ GEM_BUG_ON(min_page_size > max_page_size);
+
+ mask = ((max_page_size << 1ULL) - 1) & PAGE_MASK;
+ size = prandom_u32_state(prng) & mask;
+ if (size < min_page_size)
+ size |= min_page_size;
+
+ return size;
+}
+
+static int igt_ppgtt_smoke_huge(void *arg)
{
struct i915_gem_context *ctx = arg;
struct drm_i915_private *i915 = ctx->i915;
struct drm_i915_gem_object *obj;
- static const unsigned int sizes[] = {
- SZ_64K,
- SZ_128K,
- SZ_256K,
- SZ_512K,
- SZ_1M,
- SZ_2M,
+ I915_RND_STATE(prng);
+ struct {
+ igt_create_fn fn;
+ u32 min;
+ u32 max;
+ } backends[] = {
+ { igt_create_internal, SZ_64K, SZ_2M, },
+ { igt_create_shmem, SZ_64K, SZ_32M, },
+ { igt_create_local, SZ_64K, SZ_1G, },
};
- int i;
int err;
+ int i;
/*
- * Sanity check that the HW uses huge pages correctly through internal
- * -- ensure that our writes land in the right place.
+ * Sanity check that the HW uses huge pages correctly through our
+ * various backends -- ensure that our writes land in the right place.
*/
- for (i = 0; i < ARRAY_SIZE(sizes); ++i) {
- unsigned int size = sizes[i];
+ for (i = 0; i < ARRAY_SIZE(backends); ++i) {
+ u32 min = backends[i].min;
+ u32 max = backends[i].max;
+ u32 size = max;
+try_again:
+ size = igt_random_size(&prng, min, rounddown_pow_of_two(size));
- obj = i915_gem_object_create_internal(i915, size);
- if (IS_ERR(obj))
- return PTR_ERR(obj);
+ obj = backends[i].fn(i915, size, 0);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ if (err == -E2BIG) {
+ size >>= 1;
+ goto try_again;
+ } else if (err == -ENODEV) {
+ err = 0;
+ continue;
+ }
+
+ return err;
+ }
err = i915_gem_object_pin_pages(obj);
- if (err)
+ if (err) {
+ if (err == -ENXIO) {
+ i915_gem_object_put(obj);
+ size >>= 1;
+ goto try_again;
+ }
goto out_put;
+ }
- if (obj->mm.page_sizes.phys < I915_GTT_PAGE_SIZE_64K) {
- pr_info("internal unable to allocate huge-page(s) with size=%u\n",
- size);
+ if (obj->mm.page_sizes.phys < min) {
+ pr_info("%s unable to allocate huge-page(s) with size=%u, i=%d\n",
+ __func__, size, i);
+ err = -ENOMEM;
goto out_unpin;
}
err = igt_write_huge(ctx, obj);
if (err) {
- pr_err("internal write-huge failed with size=%u\n",
- size);
- goto out_unpin;
+ pr_err("%s write-huge failed with size=%u, i=%d\n",
+ __func__, size, i);
}
-
+out_unpin:
i915_gem_object_unpin_pages(obj);
__i915_gem_object_put_pages(obj, I915_MM_NORMAL);
+out_put:
i915_gem_object_put(obj);
- }
- return 0;
+ if (err == -ENOMEM || err == -ENXIO)
+ err = 0;
-out_unpin:
- i915_gem_object_unpin_pages(obj);
-out_put:
- i915_gem_object_put(obj);
+ if (err)
+ break;
- return err;
-}
+ cond_resched();
+ }
-static inline bool igt_can_allocate_thp(struct drm_i915_private *i915)
-{
- return i915->mm.gemfs && has_transparent_hugepage();
+ return err;
}
-static int igt_ppgtt_gemfs_huge(void *arg)
+static int igt_ppgtt_sanity_check(void *arg)
{
struct i915_gem_context *ctx = arg;
struct drm_i915_private *i915 = ctx->i915;
- struct drm_i915_gem_object *obj;
- static const unsigned int sizes[] = {
- SZ_2M,
- SZ_4M,
- SZ_8M,
- SZ_16M,
- SZ_32M,
+ unsigned int supported = INTEL_INFO(i915)->page_sizes;
+ struct {
+ igt_create_fn fn;
+ unsigned int flags;
+ } backends[] = {
+ { igt_create_system, 0, },
+ { igt_create_local, I915_BO_ALLOC_CONTIGUOUS, },
};
- int i;
+ struct {
+ u32 size;
+ u32 pages;
+ } combos[] = {
+ { SZ_64K, SZ_64K },
+ { SZ_2M, SZ_2M },
+ { SZ_2M, SZ_64K },
+ { SZ_2M - SZ_64K, SZ_64K },
+ { SZ_2M - SZ_4K, SZ_64K | SZ_4K },
+ { SZ_2M + SZ_4K, SZ_64K | SZ_4K },
+ { SZ_2M + SZ_4K, SZ_2M | SZ_4K },
+ { SZ_2M + SZ_64K, SZ_2M | SZ_64K },
+ };
+ int i, j;
int err;
+ if (supported == I915_GTT_PAGE_SIZE_4K)
+ return 0;
+
/*
- * Sanity check that the HW uses huge pages correctly through gemfs --
- * ensure that our writes land in the right place.
+ * Sanity check that the HW behaves with a limited set of combinations.
+ * We already have a bunch of randomised testing, which should give us
+ * a decent amount of variation between runs, however we should keep
+ * this to limit the chances of introducing a temporary regression, by
+ * testing the most obvious cases that might make something blow up.
*/
- if (!igt_can_allocate_thp(i915)) {
- pr_info("missing THP support, skipping\n");
- return 0;
- }
+ for (i = 0; i < ARRAY_SIZE(backends); ++i) {
+ for (j = 0; j < ARRAY_SIZE(combos); ++j) {
+ struct drm_i915_gem_object *obj;
+ u32 size = combos[j].size;
+ u32 pages = combos[j].pages;
- for (i = 0; i < ARRAY_SIZE(sizes); ++i) {
- unsigned int size = sizes[i];
+ obj = backends[i].fn(i915, size, backends[i].flags);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ if (err == -ENODEV) {
+ pr_info("Device lacks local memory, skipping\n");
+ err = 0;
+ break;
+ }
- obj = i915_gem_object_create_shmem(i915, size);
- if (IS_ERR(obj))
- return PTR_ERR(obj);
+ return err;
+ }
- err = i915_gem_object_pin_pages(obj);
- if (err)
- goto out_put;
+ err = i915_gem_object_pin_pages(obj);
+ if (err) {
+ i915_gem_object_put(obj);
+ goto out;
+ }
- if (obj->mm.page_sizes.phys < I915_GTT_PAGE_SIZE_2M) {
- pr_info("finishing test early, gemfs unable to allocate huge-page(s) with size=%u\n",
- size);
- goto out_unpin;
- }
+ GEM_BUG_ON(pages > obj->base.size);
+ pages = pages & supported;
- err = igt_write_huge(ctx, obj);
- if (err) {
- pr_err("gemfs write-huge failed with size=%u\n",
- size);
- goto out_unpin;
+ if (pages)
+ obj->mm.page_sizes.sg = pages;
+
+ err = igt_write_huge(ctx, obj);
+
+ i915_gem_object_unpin_pages(obj);
+ __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
+ i915_gem_object_put(obj);
+
+ if (err) {
+ pr_err("%s write-huge failed with size=%u pages=%u i=%d, j=%d\n",
+ __func__, size, pages, i, j);
+ goto out;
+ }
}
- i915_gem_object_unpin_pages(obj);
- __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
- i915_gem_object_put(obj);
+ cond_resched();
}
- return 0;
-
-out_unpin:
- i915_gem_object_unpin_pages(obj);
-out_put:
- i915_gem_object_put(obj);
+out:
+ if (err == -ENOMEM)
+ err = 0;
return err;
}
@@ -1314,15 +1555,15 @@ static int igt_ppgtt_pin_update(void *arg)
struct i915_gem_context *ctx = arg;
struct drm_i915_private *dev_priv = ctx->i915;
unsigned long supported = INTEL_INFO(dev_priv)->page_sizes;
- struct i915_address_space *vm = ctx->vm;
struct drm_i915_gem_object *obj;
+ struct i915_gem_engines_iter it;
+ struct i915_address_space *vm;
+ struct intel_context *ce;
struct i915_vma *vma;
unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
unsigned int n;
int first, last;
- int err;
+ int err = 0;
/*
* Make sure there's no funny business when doing a PIN_UPDATE -- in the
@@ -1332,9 +1573,10 @@ static int igt_ppgtt_pin_update(void *arg)
* huge-gtt-pages.
*/
- if (!vm || !i915_vm_is_4lvl(vm)) {
+ vm = i915_gem_context_get_vm_rcu(ctx);
+ if (!i915_vm_is_4lvl(vm)) {
pr_info("48b PPGTT not supported, skipping\n");
- return 0;
+ goto out_vm;
}
first = ilog2(I915_GTT_PAGE_SIZE_64K);
@@ -1387,7 +1629,7 @@ static int igt_ppgtt_pin_update(void *arg)
goto out_unpin;
}
- err = i915_vma_bind(vma, I915_CACHE_NONE, PIN_UPDATE);
+ err = i915_vma_bind(vma, I915_CACHE_NONE, PIN_UPDATE, NULL);
if (err)
goto out_unpin;
@@ -1419,14 +1661,18 @@ static int igt_ppgtt_pin_update(void *arg)
*/
n = 0;
- for_each_engine(engine, dev_priv, id) {
- if (!intel_engine_can_store_dword(engine))
+ for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
+ if (!intel_engine_can_store_dword(ce->engine))
continue;
- err = gpu_write(vma, ctx, engine, n++, 0xdeadbeaf);
+ err = gpu_write(ce, vma, n++, 0xdeadbeaf);
if (err)
- goto out_unpin;
+ break;
}
+ i915_gem_context_unlock_engines(ctx);
+ if (err)
+ goto out_unpin;
+
while (n--) {
err = cpu_check(obj, n, 0xdeadbeaf);
if (err)
@@ -1439,6 +1685,8 @@ out_close:
i915_vma_close(vma);
out_put:
i915_gem_object_put(obj);
+out_vm:
+ i915_vm_put(vm);
return err;
}
@@ -1448,7 +1696,7 @@ static int igt_tmpfs_fallback(void *arg)
struct i915_gem_context *ctx = arg;
struct drm_i915_private *i915 = ctx->i915;
struct vfsmount *gemfs = i915->mm.gemfs;
- struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm;
+ struct i915_address_space *vm = i915_gem_context_get_vm_rcu(ctx);
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
u32 *vaddr;
@@ -1498,6 +1746,7 @@ out_put:
out_restore:
i915->mm.gemfs = gemfs;
+ i915_vm_put(vm);
return err;
}
@@ -1505,14 +1754,14 @@ static int igt_shrink_thp(void *arg)
{
struct i915_gem_context *ctx = arg;
struct drm_i915_private *i915 = ctx->i915;
- struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm;
+ struct i915_address_space *vm = i915_gem_context_get_vm_rcu(ctx);
struct drm_i915_gem_object *obj;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
+ struct i915_gem_engines_iter it;
+ struct intel_context *ce;
struct i915_vma *vma;
unsigned int flags = PIN_USER;
unsigned int n;
- int err;
+ int err = 0;
/*
* Sanity check shrinking huge-paged object -- make sure nothing blows
@@ -1521,12 +1770,14 @@ static int igt_shrink_thp(void *arg)
if (!igt_can_allocate_thp(i915)) {
pr_info("missing THP support, skipping\n");
- return 0;
+ goto out_vm;
}
obj = i915_gem_object_create_shmem(i915, SZ_2M);
- if (IS_ERR(obj))
- return PTR_ERR(obj);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto out_vm;
+ }
vma = i915_vma_instance(obj, vm, NULL);
if (IS_ERR(vma)) {
@@ -1548,16 +1799,19 @@ static int igt_shrink_thp(void *arg)
goto out_unpin;
n = 0;
- for_each_engine(engine, i915, id) {
- if (!intel_engine_can_store_dword(engine))
+
+ for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
+ if (!intel_engine_can_store_dword(ce->engine))
continue;
- err = gpu_write(vma, ctx, engine, n++, 0xdeadbeaf);
+ err = gpu_write(ce, vma, n++, 0xdeadbeaf);
if (err)
- goto out_unpin;
+ break;
}
-
+ i915_gem_context_unlock_engines(ctx);
i915_vma_unpin(vma);
+ if (err)
+ goto out_close;
/*
* Now that the pages are *unpinned* shrink-all should invoke
@@ -1583,16 +1837,17 @@ static int igt_shrink_thp(void *arg)
while (n--) {
err = cpu_check(obj, n, 0xdeadbeaf);
if (err)
- goto out_unpin;
+ break;
}
-
out_unpin:
i915_vma_unpin(vma);
out_close:
i915_vma_close(vma);
out_put:
i915_gem_object_put(obj);
+out_vm:
+ i915_vm_put(vm);
return err;
}
@@ -1601,6 +1856,7 @@ int i915_gem_huge_page_mock_selftests(void)
{
static const struct i915_subtest tests[] = {
SUBTEST(igt_mock_exhaust_device_supported_pages),
+ SUBTEST(igt_mock_memory_region_huge_pages),
SUBTEST(igt_mock_ppgtt_misaligned_dma),
SUBTEST(igt_mock_ppgtt_huge_fill),
SUBTEST(igt_mock_ppgtt_64K),
@@ -1617,7 +1873,6 @@ int i915_gem_huge_page_mock_selftests(void)
mkwrite_device_info(dev_priv)->ppgtt_type = INTEL_PPGTT_FULL;
mkwrite_device_info(dev_priv)->ppgtt_size = 48;
- mutex_lock(&dev_priv->drm.struct_mutex);
ppgtt = i915_ppgtt_create(dev_priv);
if (IS_ERR(ppgtt)) {
err = PTR_ERR(ppgtt);
@@ -1643,9 +1898,7 @@ out_close:
i915_vm_put(&ppgtt->vm);
out_unlock:
- mutex_unlock(&dev_priv->drm.struct_mutex);
drm_dev_put(&dev_priv->drm);
-
return err;
}
@@ -1656,12 +1909,12 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *i915)
SUBTEST(igt_ppgtt_pin_update),
SUBTEST(igt_tmpfs_fallback),
SUBTEST(igt_ppgtt_exhaust_huge),
- SUBTEST(igt_ppgtt_gemfs_huge),
- SUBTEST(igt_ppgtt_internal_huge),
+ SUBTEST(igt_ppgtt_smoke_huge),
+ SUBTEST(igt_ppgtt_sanity_check),
};
struct drm_file *file;
struct i915_gem_context *ctx;
- intel_wakeref_t wakeref;
+ struct i915_address_space *vm;
int err;
if (!HAS_PPGTT(i915)) {
@@ -1676,25 +1929,21 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *i915)
if (IS_ERR(file))
return PTR_ERR(file);
- mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
-
ctx = live_context(i915, file);
if (IS_ERR(ctx)) {
err = PTR_ERR(ctx);
- goto out_unlock;
+ goto out_file;
}
- if (ctx->vm)
- ctx->vm->scrub_64K = true;
+ mutex_lock(&ctx->mutex);
+ vm = i915_gem_context_vm(ctx);
+ if (vm)
+ WRITE_ONCE(vm->scrub_64K, true);
+ mutex_unlock(&ctx->mutex);
err = i915_subtests(tests, ctx);
-out_unlock:
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
-
+out_file:
mock_file_free(i915, file);
-
return err;
}
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
index d8804a847945..da8edee4fe0a 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
@@ -5,6 +5,7 @@
#include "i915_selftest.h"
+#include "gt/intel_engine_user.h"
#include "gt/intel_gt.h"
#include "selftests/igt_flush_test.h"
@@ -12,10 +13,9 @@
#include "huge_gem_object.h"
#include "mock_context.h"
-static int igt_client_fill(void *arg)
+static int __igt_client_fill(struct intel_engine_cs *engine)
{
- struct drm_i915_private *i915 = arg;
- struct intel_context *ce = i915->engine[BCS0]->kernel_context;
+ struct intel_context *ce = engine->kernel_context;
struct drm_i915_gem_object *obj;
struct rnd_state prng;
IGT_TIMEOUT(end);
@@ -37,7 +37,7 @@ static int igt_client_fill(void *arg)
pr_debug("%s with phys_sz= %x, sz=%x, val=%x\n", __func__,
phys_sz, sz, val);
- obj = huge_gem_object(i915, phys_sz, sz);
+ obj = huge_gem_object(engine->i915, phys_sz, sz);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
goto err_flush;
@@ -103,6 +103,28 @@ err_flush:
return err;
}
+static int igt_client_fill(void *arg)
+{
+ int inst = 0;
+
+ do {
+ struct intel_engine_cs *engine;
+ int err;
+
+ engine = intel_engine_lookup_user(arg,
+ I915_ENGINE_CLASS_COPY,
+ inst++);
+ if (!engine)
+ return 0;
+
+ err = __igt_client_fill(engine);
+ if (err == -ENOMEM)
+ err = 0;
+ if (err)
+ return err;
+ } while (1);
+}
+
int i915_gem_client_blt_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
index 0ff7a89aadca..2b29f6b4e1dd 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
@@ -7,13 +7,18 @@
#include <linux/prime_numbers.h>
#include "gt/intel_gt.h"
+#include "gt/intel_gt_pm.h"
+#include "gt/intel_ring.h"
#include "i915_selftest.h"
#include "selftests/i915_random.h"
-static int cpu_set(struct drm_i915_gem_object *obj,
- unsigned long offset,
- u32 v)
+struct context {
+ struct drm_i915_gem_object *obj;
+ struct intel_engine_cs *engine;
+};
+
+static int cpu_set(struct context *ctx, unsigned long offset, u32 v)
{
unsigned int needs_clflush;
struct page *page;
@@ -21,11 +26,11 @@ static int cpu_set(struct drm_i915_gem_object *obj,
u32 *cpu;
int err;
- err = i915_gem_object_prepare_write(obj, &needs_clflush);
+ err = i915_gem_object_prepare_write(ctx->obj, &needs_clflush);
if (err)
return err;
- page = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
+ page = i915_gem_object_get_page(ctx->obj, offset >> PAGE_SHIFT);
map = kmap_atomic(page);
cpu = map + offset_in_page(offset);
@@ -38,14 +43,12 @@ static int cpu_set(struct drm_i915_gem_object *obj,
drm_clflush_virt_range(cpu, sizeof(*cpu));
kunmap_atomic(map);
- i915_gem_object_finish_access(obj);
+ i915_gem_object_finish_access(ctx->obj);
return 0;
}
-static int cpu_get(struct drm_i915_gem_object *obj,
- unsigned long offset,
- u32 *v)
+static int cpu_get(struct context *ctx, unsigned long offset, u32 *v)
{
unsigned int needs_clflush;
struct page *page;
@@ -53,11 +56,11 @@ static int cpu_get(struct drm_i915_gem_object *obj,
u32 *cpu;
int err;
- err = i915_gem_object_prepare_read(obj, &needs_clflush);
+ err = i915_gem_object_prepare_read(ctx->obj, &needs_clflush);
if (err)
return err;
- page = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
+ page = i915_gem_object_get_page(ctx->obj, offset >> PAGE_SHIFT);
map = kmap_atomic(page);
cpu = map + offset_in_page(offset);
@@ -67,136 +70,137 @@ static int cpu_get(struct drm_i915_gem_object *obj,
*v = *cpu;
kunmap_atomic(map);
- i915_gem_object_finish_access(obj);
+ i915_gem_object_finish_access(ctx->obj);
return 0;
}
-static int gtt_set(struct drm_i915_gem_object *obj,
- unsigned long offset,
- u32 v)
+static int gtt_set(struct context *ctx, unsigned long offset, u32 v)
{
struct i915_vma *vma;
u32 __iomem *map;
- int err;
+ int err = 0;
- i915_gem_object_lock(obj);
- err = i915_gem_object_set_to_gtt_domain(obj, true);
- i915_gem_object_unlock(obj);
+ i915_gem_object_lock(ctx->obj);
+ err = i915_gem_object_set_to_gtt_domain(ctx->obj, true);
+ i915_gem_object_unlock(ctx->obj);
if (err)
return err;
- vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
+ vma = i915_gem_object_ggtt_pin(ctx->obj, NULL, 0, 0, PIN_MAPPABLE);
if (IS_ERR(vma))
return PTR_ERR(vma);
+ intel_gt_pm_get(vma->vm->gt);
+
map = i915_vma_pin_iomap(vma);
i915_vma_unpin(vma);
- if (IS_ERR(map))
- return PTR_ERR(map);
+ if (IS_ERR(map)) {
+ err = PTR_ERR(map);
+ goto out_rpm;
+ }
iowrite32(v, &map[offset / sizeof(*map)]);
i915_vma_unpin_iomap(vma);
- return 0;
+out_rpm:
+ intel_gt_pm_put(vma->vm->gt);
+ return err;
}
-static int gtt_get(struct drm_i915_gem_object *obj,
- unsigned long offset,
- u32 *v)
+static int gtt_get(struct context *ctx, unsigned long offset, u32 *v)
{
struct i915_vma *vma;
u32 __iomem *map;
- int err;
+ int err = 0;
- i915_gem_object_lock(obj);
- err = i915_gem_object_set_to_gtt_domain(obj, false);
- i915_gem_object_unlock(obj);
+ i915_gem_object_lock(ctx->obj);
+ err = i915_gem_object_set_to_gtt_domain(ctx->obj, false);
+ i915_gem_object_unlock(ctx->obj);
if (err)
return err;
- vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
+ vma = i915_gem_object_ggtt_pin(ctx->obj, NULL, 0, 0, PIN_MAPPABLE);
if (IS_ERR(vma))
return PTR_ERR(vma);
+ intel_gt_pm_get(vma->vm->gt);
+
map = i915_vma_pin_iomap(vma);
i915_vma_unpin(vma);
- if (IS_ERR(map))
- return PTR_ERR(map);
+ if (IS_ERR(map)) {
+ err = PTR_ERR(map);
+ goto out_rpm;
+ }
*v = ioread32(&map[offset / sizeof(*map)]);
i915_vma_unpin_iomap(vma);
- return 0;
+out_rpm:
+ intel_gt_pm_put(vma->vm->gt);
+ return err;
}
-static int wc_set(struct drm_i915_gem_object *obj,
- unsigned long offset,
- u32 v)
+static int wc_set(struct context *ctx, unsigned long offset, u32 v)
{
u32 *map;
int err;
- i915_gem_object_lock(obj);
- err = i915_gem_object_set_to_wc_domain(obj, true);
- i915_gem_object_unlock(obj);
+ i915_gem_object_lock(ctx->obj);
+ err = i915_gem_object_set_to_wc_domain(ctx->obj, true);
+ i915_gem_object_unlock(ctx->obj);
if (err)
return err;
- map = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ map = i915_gem_object_pin_map(ctx->obj, I915_MAP_WC);
if (IS_ERR(map))
return PTR_ERR(map);
map[offset / sizeof(*map)] = v;
- i915_gem_object_unpin_map(obj);
+ i915_gem_object_unpin_map(ctx->obj);
return 0;
}
-static int wc_get(struct drm_i915_gem_object *obj,
- unsigned long offset,
- u32 *v)
+static int wc_get(struct context *ctx, unsigned long offset, u32 *v)
{
u32 *map;
int err;
- i915_gem_object_lock(obj);
- err = i915_gem_object_set_to_wc_domain(obj, false);
- i915_gem_object_unlock(obj);
+ i915_gem_object_lock(ctx->obj);
+ err = i915_gem_object_set_to_wc_domain(ctx->obj, false);
+ i915_gem_object_unlock(ctx->obj);
if (err)
return err;
- map = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ map = i915_gem_object_pin_map(ctx->obj, I915_MAP_WC);
if (IS_ERR(map))
return PTR_ERR(map);
*v = map[offset / sizeof(*map)];
- i915_gem_object_unpin_map(obj);
+ i915_gem_object_unpin_map(ctx->obj);
return 0;
}
-static int gpu_set(struct drm_i915_gem_object *obj,
- unsigned long offset,
- u32 v)
+static int gpu_set(struct context *ctx, unsigned long offset, u32 v)
{
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct i915_request *rq;
struct i915_vma *vma;
u32 *cs;
int err;
- i915_gem_object_lock(obj);
- err = i915_gem_object_set_to_gtt_domain(obj, true);
- i915_gem_object_unlock(obj);
+ i915_gem_object_lock(ctx->obj);
+ err = i915_gem_object_set_to_gtt_domain(ctx->obj, true);
+ i915_gem_object_unlock(ctx->obj);
if (err)
return err;
- vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
+ vma = i915_gem_object_ggtt_pin(ctx->obj, NULL, 0, 0, 0);
if (IS_ERR(vma))
return PTR_ERR(vma);
- rq = i915_request_create(i915->engine[RCS0]->kernel_context);
+ rq = i915_request_create(ctx->engine->kernel_context);
if (IS_ERR(rq)) {
i915_vma_unpin(vma);
return PTR_ERR(rq);
@@ -209,12 +213,12 @@ static int gpu_set(struct drm_i915_gem_object *obj,
return PTR_ERR(cs);
}
- if (INTEL_GEN(i915) >= 8) {
+ if (INTEL_GEN(ctx->engine->i915) >= 8) {
*cs++ = MI_STORE_DWORD_IMM_GEN4 | 1 << 22;
*cs++ = lower_32_bits(i915_ggtt_offset(vma) + offset);
*cs++ = upper_32_bits(i915_ggtt_offset(vma) + offset);
*cs++ = v;
- } else if (INTEL_GEN(i915) >= 4) {
+ } else if (INTEL_GEN(ctx->engine->i915) >= 4) {
*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
*cs++ = 0;
*cs++ = i915_ggtt_offset(vma) + offset;
@@ -239,32 +243,34 @@ static int gpu_set(struct drm_i915_gem_object *obj,
return err;
}
-static bool always_valid(struct drm_i915_private *i915)
+static bool always_valid(struct context *ctx)
{
return true;
}
-static bool needs_fence_registers(struct drm_i915_private *i915)
+static bool needs_fence_registers(struct context *ctx)
{
- return !intel_gt_is_wedged(&i915->gt);
-}
+ struct intel_gt *gt = ctx->engine->gt;
-static bool needs_mi_store_dword(struct drm_i915_private *i915)
-{
- if (intel_gt_is_wedged(&i915->gt))
+ if (intel_gt_is_wedged(gt))
return false;
- if (!HAS_ENGINE(i915, RCS0))
+ return gt->ggtt->num_fences;
+}
+
+static bool needs_mi_store_dword(struct context *ctx)
+{
+ if (intel_gt_is_wedged(ctx->engine->gt))
return false;
- return intel_engine_can_store_dword(i915->engine[RCS0]);
+ return intel_engine_can_store_dword(ctx->engine);
}
static const struct igt_coherency_mode {
const char *name;
- int (*set)(struct drm_i915_gem_object *, unsigned long offset, u32 v);
- int (*get)(struct drm_i915_gem_object *, unsigned long offset, u32 *v);
- bool (*valid)(struct drm_i915_private *i915);
+ int (*set)(struct context *ctx, unsigned long offset, u32 v);
+ int (*get)(struct context *ctx, unsigned long offset, u32 *v);
+ bool (*valid)(struct context *ctx);
} igt_coherency_mode[] = {
{ "cpu", cpu_set, cpu_get, always_valid },
{ "gtt", gtt_set, gtt_get, needs_fence_registers },
@@ -273,19 +279,37 @@ static const struct igt_coherency_mode {
{ },
};
+static struct intel_engine_cs *
+random_engine(struct drm_i915_private *i915, struct rnd_state *prng)
+{
+ struct intel_engine_cs *engine;
+ unsigned int count;
+
+ count = 0;
+ for_each_uabi_engine(engine, i915)
+ count++;
+
+ count = i915_prandom_u32_max_state(count, prng);
+ for_each_uabi_engine(engine, i915)
+ if (count-- == 0)
+ return engine;
+
+ return NULL;
+}
+
static int igt_gem_coherency(void *arg)
{
const unsigned int ncachelines = PAGE_SIZE/64;
- I915_RND_STATE(prng);
struct drm_i915_private *i915 = arg;
const struct igt_coherency_mode *read, *write, *over;
- struct drm_i915_gem_object *obj;
- intel_wakeref_t wakeref;
unsigned long count, n;
u32 *offsets, *values;
+ I915_RND_STATE(prng);
+ struct context ctx;
int err = 0;
- /* We repeatedly write, overwrite and read from a sequence of
+ /*
+ * We repeatedly write, overwrite and read from a sequence of
* cachelines in order to try and detect incoherency (unflushed writes
* from either the CPU or GPU). Each setter/getter uses our cache
* domain API which should prevent incoherency.
@@ -299,34 +323,36 @@ static int igt_gem_coherency(void *arg)
values = offsets + ncachelines;
- mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+ ctx.engine = random_engine(i915, &prng);
+ GEM_BUG_ON(!ctx.engine);
+ pr_info("%s: using %s\n", __func__, ctx.engine->name);
+
for (over = igt_coherency_mode; over->name; over++) {
if (!over->set)
continue;
- if (!over->valid(i915))
+ if (!over->valid(&ctx))
continue;
for (write = igt_coherency_mode; write->name; write++) {
if (!write->set)
continue;
- if (!write->valid(i915))
+ if (!write->valid(&ctx))
continue;
for (read = igt_coherency_mode; read->name; read++) {
if (!read->get)
continue;
- if (!read->valid(i915))
+ if (!read->valid(&ctx))
continue;
for_each_prime_number_from(count, 1, ncachelines) {
- obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
- if (IS_ERR(obj)) {
- err = PTR_ERR(obj);
- goto unlock;
+ ctx.obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ if (IS_ERR(ctx.obj)) {
+ err = PTR_ERR(ctx.obj);
+ goto free;
}
i915_random_reorder(offsets, ncachelines, &prng);
@@ -334,7 +360,7 @@ static int igt_gem_coherency(void *arg)
values[n] = prandom_u32_state(&prng);
for (n = 0; n < count; n++) {
- err = over->set(obj, offsets[n], ~values[n]);
+ err = over->set(&ctx, offsets[n], ~values[n]);
if (err) {
pr_err("Failed to set stale value[%ld/%ld] in object using %s, err=%d\n",
n, count, over->name, err);
@@ -343,7 +369,7 @@ static int igt_gem_coherency(void *arg)
}
for (n = 0; n < count; n++) {
- err = write->set(obj, offsets[n], values[n]);
+ err = write->set(&ctx, offsets[n], values[n]);
if (err) {
pr_err("Failed to set value[%ld/%ld] in object using %s, err=%d\n",
n, count, write->name, err);
@@ -354,7 +380,7 @@ static int igt_gem_coherency(void *arg)
for (n = 0; n < count; n++) {
u32 found;
- err = read->get(obj, offsets[n], &found);
+ err = read->get(&ctx, offsets[n], &found);
if (err) {
pr_err("Failed to get value[%ld/%ld] in object using %s, err=%d\n",
n, count, read->name, err);
@@ -372,20 +398,18 @@ static int igt_gem_coherency(void *arg)
}
}
- i915_gem_object_put(obj);
+ i915_gem_object_put(ctx.obj);
}
}
}
}
-unlock:
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
+free:
kfree(offsets);
return err;
put_object:
- i915_gem_object_put(obj);
- goto unlock;
+ i915_gem_object_put(ctx.obj);
+ goto free;
}
int i915_gem_coherency_live_selftests(struct drm_i915_private *i915)
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
index 3e6f4a65d356..62fabc023a83 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
@@ -8,6 +8,7 @@
#include "gem/i915_gem_pm.h"
#include "gt/intel_gt.h"
+#include "gt/intel_gt_requests.h"
#include "gt/intel_reset.h"
#include "i915_selftest.h"
@@ -31,7 +32,6 @@ static int live_nop_switch(void *arg)
struct drm_i915_private *i915 = arg;
struct intel_engine_cs *engine;
struct i915_gem_context **ctx;
- enum intel_engine_id id;
struct igt_live_test t;
struct drm_file *file;
unsigned long n;
@@ -52,23 +52,21 @@ static int live_nop_switch(void *arg)
if (IS_ERR(file))
return PTR_ERR(file);
- mutex_lock(&i915->drm.struct_mutex);
-
ctx = kcalloc(nctx, sizeof(*ctx), GFP_KERNEL);
if (!ctx) {
err = -ENOMEM;
- goto out_unlock;
+ goto out_file;
}
for (n = 0; n < nctx; n++) {
ctx[n] = live_context(i915, file);
if (IS_ERR(ctx[n])) {
err = PTR_ERR(ctx[n]);
- goto out_unlock;
+ goto out_file;
}
}
- for_each_engine(engine, i915, id) {
+ for_each_uabi_engine(engine, i915) {
struct i915_request *rq;
unsigned long end_time, prime;
ktime_t times[2] = {};
@@ -78,7 +76,7 @@ static int live_nop_switch(void *arg)
rq = igt_request_alloc(ctx[n], engine);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
- goto out_unlock;
+ goto out_file;
}
i915_request_add(rq);
}
@@ -86,7 +84,7 @@ static int live_nop_switch(void *arg)
pr_err("Failed to populated %d contexts\n", nctx);
intel_gt_set_wedged(&i915->gt);
err = -EIO;
- goto out_unlock;
+ goto out_file;
}
times[1] = ktime_get_raw();
@@ -96,7 +94,7 @@ static int live_nop_switch(void *arg)
err = igt_live_test_begin(&t, i915, __func__, engine->name);
if (err)
- goto out_unlock;
+ goto out_file;
end_time = jiffies + i915_selftest.timeout_jiffies;
for_each_prime_number_from(prime, 2, 8192) {
@@ -106,7 +104,7 @@ static int live_nop_switch(void *arg)
rq = igt_request_alloc(ctx[n % nctx], engine);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
- goto out_unlock;
+ goto out_file;
}
/*
@@ -142,7 +140,7 @@ static int live_nop_switch(void *arg)
err = igt_live_test_end(&t);
if (err)
- goto out_unlock;
+ goto out_file;
pr_info("Switch latencies on %s: 1 = %lluns, %lu = %lluns\n",
engine->name,
@@ -150,8 +148,235 @@ static int live_nop_switch(void *arg)
prime - 1, div64_u64(ktime_to_ns(times[1]), prime - 1));
}
-out_unlock:
- mutex_unlock(&i915->drm.struct_mutex);
+out_file:
+ mock_file_free(i915, file);
+ return err;
+}
+
+struct parallel_switch {
+ struct task_struct *tsk;
+ struct intel_context *ce[2];
+};
+
+static int __live_parallel_switch1(void *data)
+{
+ struct parallel_switch *arg = data;
+ IGT_TIMEOUT(end_time);
+ unsigned long count;
+
+ count = 0;
+ do {
+ struct i915_request *rq = NULL;
+ int err, n;
+
+ err = 0;
+ for (n = 0; !err && n < ARRAY_SIZE(arg->ce); n++) {
+ struct i915_request *prev = rq;
+
+ rq = i915_request_create(arg->ce[n]);
+ if (IS_ERR(rq)) {
+ i915_request_put(prev);
+ return PTR_ERR(rq);
+ }
+
+ i915_request_get(rq);
+ if (prev) {
+ err = i915_request_await_dma_fence(rq, &prev->fence);
+ i915_request_put(prev);
+ }
+
+ i915_request_add(rq);
+ }
+ if (i915_request_wait(rq, 0, HZ / 5) < 0)
+ err = -ETIME;
+ i915_request_put(rq);
+ if (err)
+ return err;
+
+ count++;
+ } while (!__igt_timeout(end_time, NULL));
+
+ pr_info("%s: %lu switches (sync)\n", arg->ce[0]->engine->name, count);
+ return 0;
+}
+
+static int __live_parallel_switchN(void *data)
+{
+ struct parallel_switch *arg = data;
+ struct i915_request *rq = NULL;
+ IGT_TIMEOUT(end_time);
+ unsigned long count;
+ int n;
+
+ count = 0;
+ do {
+ for (n = 0; n < ARRAY_SIZE(arg->ce); n++) {
+ struct i915_request *prev = rq;
+ int err = 0;
+
+ rq = i915_request_create(arg->ce[n]);
+ if (IS_ERR(rq)) {
+ i915_request_put(prev);
+ return PTR_ERR(rq);
+ }
+
+ i915_request_get(rq);
+ if (prev) {
+ err = i915_request_await_dma_fence(rq, &prev->fence);
+ i915_request_put(prev);
+ }
+
+ i915_request_add(rq);
+ if (err) {
+ i915_request_put(rq);
+ return err;
+ }
+ }
+
+ count++;
+ } while (!__igt_timeout(end_time, NULL));
+ i915_request_put(rq);
+
+ pr_info("%s: %lu switches (many)\n", arg->ce[0]->engine->name, count);
+ return 0;
+}
+
+static int live_parallel_switch(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ static int (* const func[])(void *arg) = {
+ __live_parallel_switch1,
+ __live_parallel_switchN,
+ NULL,
+ };
+ struct parallel_switch *data = NULL;
+ struct i915_gem_engines *engines;
+ struct i915_gem_engines_iter it;
+ int (* const *fn)(void *arg);
+ struct i915_gem_context *ctx;
+ struct intel_context *ce;
+ struct drm_file *file;
+ int n, m, count;
+ int err = 0;
+
+ /*
+ * Check we can process switches on all engines simultaneously.
+ */
+
+ if (!DRIVER_CAPS(i915)->has_logical_contexts)
+ return 0;
+
+ file = mock_file(i915);
+ if (IS_ERR(file))
+ return PTR_ERR(file);
+
+ ctx = live_context(i915, file);
+ if (IS_ERR(ctx)) {
+ err = PTR_ERR(ctx);
+ goto out_file;
+ }
+
+ engines = i915_gem_context_lock_engines(ctx);
+ count = engines->num_engines;
+
+ data = kcalloc(count, sizeof(*data), GFP_KERNEL);
+ if (!data) {
+ i915_gem_context_unlock_engines(ctx);
+ err = -ENOMEM;
+ goto out_file;
+ }
+
+ m = 0; /* Use the first context as our template for the engines */
+ for_each_gem_engine(ce, engines, it) {
+ err = intel_context_pin(ce);
+ if (err) {
+ i915_gem_context_unlock_engines(ctx);
+ goto out;
+ }
+ data[m++].ce[0] = intel_context_get(ce);
+ }
+ i915_gem_context_unlock_engines(ctx);
+
+ /* Clone the same set of engines into the other contexts */
+ for (n = 1; n < ARRAY_SIZE(data->ce); n++) {
+ ctx = live_context(i915, file);
+ if (IS_ERR(ctx)) {
+ err = PTR_ERR(ctx);
+ goto out;
+ }
+
+ for (m = 0; m < count; m++) {
+ if (!data[m].ce[0])
+ continue;
+
+ ce = intel_context_create(ctx, data[m].ce[0]->engine);
+ if (IS_ERR(ce))
+ goto out;
+
+ err = intel_context_pin(ce);
+ if (err) {
+ intel_context_put(ce);
+ goto out;
+ }
+
+ data[m].ce[n] = ce;
+ }
+ }
+
+ for (fn = func; !err && *fn; fn++) {
+ struct igt_live_test t;
+ int n;
+
+ err = igt_live_test_begin(&t, i915, __func__, "");
+ if (err)
+ break;
+
+ for (n = 0; n < count; n++) {
+ if (!data[n].ce[0])
+ continue;
+
+ data[n].tsk = kthread_run(*fn, &data[n],
+ "igt/parallel:%s",
+ data[n].ce[0]->engine->name);
+ if (IS_ERR(data[n].tsk)) {
+ err = PTR_ERR(data[n].tsk);
+ break;
+ }
+ get_task_struct(data[n].tsk);
+ }
+
+ yield(); /* start all threads before we kthread_stop() */
+
+ for (n = 0; n < count; n++) {
+ int status;
+
+ if (IS_ERR_OR_NULL(data[n].tsk))
+ continue;
+
+ status = kthread_stop(data[n].tsk);
+ if (status && !err)
+ err = status;
+
+ put_task_struct(data[n].tsk);
+ data[n].tsk = NULL;
+ }
+
+ if (igt_live_test_end(&t))
+ err = -EIO;
+ }
+
+out:
+ for (n = 0; n < count; n++) {
+ for (m = 0; m < ARRAY_SIZE(data->ce); m++) {
+ if (!data[n].ce[m])
+ continue;
+
+ intel_context_unpin(data[n].ce[m]);
+ intel_context_put(data[n].ce[m]);
+ }
+ }
+ kfree(data);
+out_file:
mock_file_free(i915, file);
return err;
}
@@ -166,28 +391,20 @@ static unsigned long fake_page_count(struct drm_i915_gem_object *obj)
return huge_gem_object_dma_size(obj) >> PAGE_SHIFT;
}
-static int gpu_fill(struct drm_i915_gem_object *obj,
- struct i915_gem_context *ctx,
- struct intel_engine_cs *engine,
+static int gpu_fill(struct intel_context *ce,
+ struct drm_i915_gem_object *obj,
unsigned int dw)
{
- struct i915_address_space *vm = ctx->vm ?: &engine->gt->ggtt->vm;
struct i915_vma *vma;
int err;
- GEM_BUG_ON(obj->base.size > vm->total);
- GEM_BUG_ON(!intel_engine_can_store_dword(engine));
+ GEM_BUG_ON(obj->base.size > ce->vm->total);
+ GEM_BUG_ON(!intel_engine_can_store_dword(ce->engine));
- vma = i915_vma_instance(obj, vm, NULL);
+ vma = i915_vma_instance(obj, ce->vm, NULL);
if (IS_ERR(vma))
return PTR_ERR(vma);
- i915_gem_object_lock(obj);
- err = i915_gem_object_set_to_gtt_domain(obj, true);
- i915_gem_object_unlock(obj);
- if (err)
- return err;
-
err = i915_vma_pin(vma, 0, 0, PIN_HIGH | PIN_USER);
if (err)
return err;
@@ -200,9 +417,7 @@ static int gpu_fill(struct drm_i915_gem_object *obj,
* whilst checking that each context provides a unique view
* into the object.
*/
- err = igt_gpu_fill_dw(vma,
- ctx,
- engine,
+ err = igt_gpu_fill_dw(ce, vma,
(dw * real_page_count(obj)) << PAGE_SHIFT |
(dw * sizeof(u32)),
real_page_count(obj),
@@ -305,22 +520,21 @@ static int file_add_object(struct drm_file *file,
}
static struct drm_i915_gem_object *
-create_test_object(struct i915_gem_context *ctx,
+create_test_object(struct i915_address_space *vm,
struct drm_file *file,
struct list_head *objects)
{
struct drm_i915_gem_object *obj;
- struct i915_address_space *vm = ctx->vm ?: &ctx->i915->ggtt.vm;
u64 size;
int err;
/* Keep in GEM's good graces */
- i915_retire_requests(ctx->i915);
+ intel_gt_retire_requests(vm->gt);
size = min(vm->total / 2, 1024ull * DW_PER_PAGE * PAGE_SIZE);
size = round_down(size, DW_PER_PAGE * PAGE_SIZE);
- obj = huge_gem_object(ctx->i915, DW_PER_PAGE * PAGE_SIZE, size);
+ obj = huge_gem_object(vm->i915, DW_PER_PAGE * PAGE_SIZE, size);
if (IS_ERR(obj))
return obj;
@@ -348,11 +562,49 @@ static unsigned long max_dwords(struct drm_i915_gem_object *obj)
return npages / DW_PER_PAGE;
}
+static void throttle_release(struct i915_request **q, int count)
+{
+ int i;
+
+ for (i = 0; i < count; i++) {
+ if (IS_ERR_OR_NULL(q[i]))
+ continue;
+
+ i915_request_put(fetch_and_zero(&q[i]));
+ }
+}
+
+static int throttle(struct intel_context *ce,
+ struct i915_request **q, int count)
+{
+ int i;
+
+ if (!IS_ERR_OR_NULL(q[0])) {
+ if (i915_request_wait(q[0],
+ I915_WAIT_INTERRUPTIBLE,
+ MAX_SCHEDULE_TIMEOUT) < 0)
+ return -EINTR;
+
+ i915_request_put(q[0]);
+ }
+
+ for (i = 0; i < count - 1; i++)
+ q[i] = q[i + 1];
+
+ q[i] = intel_context_create_request(ce);
+ if (IS_ERR(q[i]))
+ return PTR_ERR(q[i]);
+
+ i915_request_get(q[i]);
+ i915_request_add(q[i]);
+
+ return 0;
+}
+
static int igt_ctx_exec(void *arg)
{
struct drm_i915_private *i915 = arg;
struct intel_engine_cs *engine;
- enum intel_engine_id id;
int err = -ENODEV;
/*
@@ -364,9 +616,10 @@ static int igt_ctx_exec(void *arg)
if (!DRIVER_CAPS(i915)->has_logical_contexts)
return 0;
- for_each_engine(engine, i915, id) {
+ for_each_uabi_engine(engine, i915) {
struct drm_i915_gem_object *obj = NULL;
unsigned long ncontexts, ndwords, dw;
+ struct i915_request *tq[5] = {};
struct igt_live_test t;
struct drm_file *file;
IGT_TIMEOUT(end_time);
@@ -382,39 +635,53 @@ static int igt_ctx_exec(void *arg)
if (IS_ERR(file))
return PTR_ERR(file);
- mutex_lock(&i915->drm.struct_mutex);
-
err = igt_live_test_begin(&t, i915, __func__, engine->name);
if (err)
- goto out_unlock;
+ goto out_file;
ncontexts = 0;
ndwords = 0;
dw = 0;
while (!time_after(jiffies, end_time)) {
struct i915_gem_context *ctx;
+ struct intel_context *ce;
- ctx = live_context(i915, file);
+ ctx = kernel_context(i915);
if (IS_ERR(ctx)) {
err = PTR_ERR(ctx);
- goto out_unlock;
+ goto out_file;
}
+ ce = i915_gem_context_get_engine(ctx, engine->legacy_idx);
+ GEM_BUG_ON(IS_ERR(ce));
+
if (!obj) {
- obj = create_test_object(ctx, file, &objects);
+ obj = create_test_object(ce->vm, file, &objects);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
- goto out_unlock;
+ intel_context_put(ce);
+ kernel_context_close(ctx);
+ goto out_file;
}
}
- err = gpu_fill(obj, ctx, engine, dw);
+ err = gpu_fill(ce, obj, dw);
if (err) {
- pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
+ pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) [full-ppgtt? %s], err=%d\n",
ndwords, dw, max_dwords(obj),
- engine->name, ctx->hw_id,
- yesno(!!ctx->vm), err);
- goto out_unlock;
+ engine->name,
+ yesno(!!rcu_access_pointer(ctx->vm)),
+ err);
+ intel_context_put(ce);
+ kernel_context_close(ctx);
+ goto out_file;
+ }
+
+ err = throttle(ce, tq, ARRAY_SIZE(tq));
+ if (err) {
+ intel_context_put(ce);
+ kernel_context_close(ctx);
+ goto out_file;
}
if (++dw == max_dwords(obj)) {
@@ -424,6 +691,9 @@ static int igt_ctx_exec(void *arg)
ndwords++;
ncontexts++;
+
+ intel_context_put(ce);
+ kernel_context_close(ctx);
}
pr_info("Submitted %lu contexts to %s, filling %lu dwords\n",
@@ -441,10 +711,10 @@ static int igt_ctx_exec(void *arg)
dw += rem;
}
-out_unlock:
+out_file:
+ throttle_release(tq, ARRAY_SIZE(tq));
if (igt_live_test_end(&t))
err = -EIO;
- mutex_unlock(&i915->drm.struct_mutex);
mock_file_free(i915, file);
if (err)
@@ -459,9 +729,9 @@ out_unlock:
static int igt_shared_ctx_exec(void *arg)
{
struct drm_i915_private *i915 = arg;
+ struct i915_request *tq[5] = {};
struct i915_gem_context *parent;
struct intel_engine_cs *engine;
- enum intel_engine_id id;
struct igt_live_test t;
struct drm_file *file;
int err = 0;
@@ -478,24 +748,22 @@ static int igt_shared_ctx_exec(void *arg)
if (IS_ERR(file))
return PTR_ERR(file);
- mutex_lock(&i915->drm.struct_mutex);
-
parent = live_context(i915, file);
if (IS_ERR(parent)) {
err = PTR_ERR(parent);
- goto out_unlock;
+ goto out_file;
}
if (!parent->vm) { /* not full-ppgtt; nothing to share */
err = 0;
- goto out_unlock;
+ goto out_file;
}
err = igt_live_test_begin(&t, i915, __func__, "");
if (err)
- goto out_unlock;
+ goto out_file;
- for_each_engine(engine, i915, id) {
+ for_each_uabi_engine(engine, i915) {
unsigned long ncontexts, ndwords, dw;
struct drm_i915_gem_object *obj = NULL;
IGT_TIMEOUT(end_time);
@@ -509,6 +777,7 @@ static int igt_shared_ctx_exec(void *arg)
ncontexts = 0;
while (!time_after(jiffies, end_time)) {
struct i915_gem_context *ctx;
+ struct intel_context *ce;
ctx = kernel_context(i915);
if (IS_ERR(ctx)) {
@@ -516,23 +785,38 @@ static int igt_shared_ctx_exec(void *arg)
goto out_test;
}
+ mutex_lock(&ctx->mutex);
__assign_ppgtt(ctx, parent->vm);
+ mutex_unlock(&ctx->mutex);
+
+ ce = i915_gem_context_get_engine(ctx, engine->legacy_idx);
+ GEM_BUG_ON(IS_ERR(ce));
if (!obj) {
- obj = create_test_object(parent, file, &objects);
+ obj = create_test_object(parent->vm, file, &objects);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
+ intel_context_put(ce);
kernel_context_close(ctx);
goto out_test;
}
}
- err = gpu_fill(obj, ctx, engine, dw);
+ err = gpu_fill(ce, obj, dw);
if (err) {
- pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
+ pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) [full-ppgtt? %s], err=%d\n",
ndwords, dw, max_dwords(obj),
- engine->name, ctx->hw_id,
- yesno(!!ctx->vm), err);
+ engine->name,
+ yesno(!!rcu_access_pointer(ctx->vm)),
+ err);
+ intel_context_put(ce);
+ kernel_context_close(ctx);
+ goto out_test;
+ }
+
+ err = throttle(ce, tq, ARRAY_SIZE(tq));
+ if (err) {
+ intel_context_put(ce);
kernel_context_close(ctx);
goto out_test;
}
@@ -545,6 +829,7 @@ static int igt_shared_ctx_exec(void *arg)
ndwords++;
ncontexts++;
+ intel_context_put(ce);
kernel_context_close(ctx);
}
pr_info("Submitted %lu contexts to %s, filling %lu dwords\n",
@@ -562,16 +847,13 @@ static int igt_shared_ctx_exec(void *arg)
dw += rem;
}
- mutex_unlock(&i915->drm.struct_mutex);
i915_gem_drain_freed_objects(i915);
- mutex_lock(&i915->drm.struct_mutex);
}
out_test:
+ throttle_release(tq, ARRAY_SIZE(tq));
if (igt_live_test_end(&t))
err = -EIO;
-out_unlock:
- mutex_unlock(&i915->drm.struct_mutex);
-
+out_file:
mock_file_free(i915, file);
return err;
}
@@ -604,6 +886,8 @@ static struct i915_vma *rpcs_query_batch(struct i915_vma *vma)
__i915_gem_object_flush_map(obj, 0, 64);
i915_gem_object_unpin_map(obj);
+ intel_gt_chipset_flush(vma->vm->gt);
+
vma = i915_vma_instance(obj, vma->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
@@ -681,10 +965,7 @@ emit_rpcs_query(struct drm_i915_gem_object *obj,
if (err)
goto skip_request;
- i915_vma_unpin(batch);
- i915_vma_close(batch);
- i915_vma_put(batch);
-
+ i915_vma_unpin_and_release(&batch, 0);
i915_vma_unpin(vma);
*rq_out = i915_request_get(rq);
@@ -698,8 +979,7 @@ skip_request:
err_request:
i915_request_add(rq);
err_batch:
- i915_vma_unpin(batch);
- i915_vma_put(batch);
+ i915_vma_unpin_and_release(&batch, 0);
err_vma:
i915_vma_unpin(vma);
@@ -860,8 +1140,8 @@ out:
igt_spinner_end(spin);
if ((flags & TEST_IDLE) && ret == 0) {
- ret = i915_gem_wait_for_idle(ce->engine->i915,
- 0, MAX_SCHEDULE_TIMEOUT);
+ ret = intel_gt_wait_for_idle(ce->engine->gt,
+ MAX_SCHEDULE_TIMEOUT);
if (ret)
return ret;
@@ -887,7 +1167,7 @@ __sseu_test(const char *name,
if (ret)
return ret;
- ret = __intel_context_reconfigure_sseu(ce, sseu);
+ ret = intel_context_reconfigure_sseu(ce, sseu);
if (ret)
goto out_spin;
@@ -908,106 +1188,97 @@ __igt_ctx_sseu(struct drm_i915_private *i915,
const char *name,
unsigned int flags)
{
- struct intel_engine_cs *engine = i915->engine[RCS0];
struct drm_i915_gem_object *obj;
- struct i915_gem_context *ctx;
- struct intel_context *ce;
- struct intel_sseu pg_sseu;
- struct drm_file *file;
- int ret;
-
- if (INTEL_GEN(i915) < 9 || !engine)
- return 0;
-
- if (!RUNTIME_INFO(i915)->sseu.has_slice_pg)
- return 0;
+ int inst = 0;
+ int ret = 0;
- if (hweight32(engine->sseu.slice_mask) < 2)
+ if (INTEL_GEN(i915) < 9 || !RUNTIME_INFO(i915)->sseu.has_slice_pg)
return 0;
- /*
- * Gen11 VME friendly power-gated configuration with half enabled
- * sub-slices.
- */
- pg_sseu = engine->sseu;
- pg_sseu.slice_mask = 1;
- pg_sseu.subslice_mask =
- ~(~0 << (hweight32(engine->sseu.subslice_mask) / 2));
-
- pr_info("SSEU subtest '%s', flags=%x, def_slices=%u, pg_slices=%u\n",
- name, flags, hweight32(engine->sseu.slice_mask),
- hweight32(pg_sseu.slice_mask));
-
- file = mock_file(i915);
- if (IS_ERR(file))
- return PTR_ERR(file);
-
if (flags & TEST_RESET)
igt_global_reset_lock(&i915->gt);
- mutex_lock(&i915->drm.struct_mutex);
-
- ctx = live_context(i915, file);
- if (IS_ERR(ctx)) {
- ret = PTR_ERR(ctx);
- goto out_unlock;
- }
- i915_gem_context_clear_bannable(ctx); /* to reset and beyond! */
-
obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
if (IS_ERR(obj)) {
ret = PTR_ERR(obj);
goto out_unlock;
}
- ce = i915_gem_context_get_engine(ctx, RCS0);
- if (IS_ERR(ce)) {
- ret = PTR_ERR(ce);
- goto out_put;
- }
+ do {
+ struct intel_engine_cs *engine;
+ struct intel_context *ce;
+ struct intel_sseu pg_sseu;
- ret = intel_context_pin(ce);
- if (ret)
- goto out_context;
+ engine = intel_engine_lookup_user(i915,
+ I915_ENGINE_CLASS_RENDER,
+ inst++);
+ if (!engine)
+ break;
- /* First set the default mask. */
- ret = __sseu_test(name, flags, ce, obj, engine->sseu);
- if (ret)
- goto out_fail;
+ if (hweight32(engine->sseu.slice_mask) < 2)
+ continue;
- /* Then set a power-gated configuration. */
- ret = __sseu_test(name, flags, ce, obj, pg_sseu);
- if (ret)
- goto out_fail;
+ /*
+ * Gen11 VME friendly power-gated configuration with
+ * half enabled sub-slices.
+ */
+ pg_sseu = engine->sseu;
+ pg_sseu.slice_mask = 1;
+ pg_sseu.subslice_mask =
+ ~(~0 << (hweight32(engine->sseu.subslice_mask) / 2));
+
+ pr_info("%s: SSEU subtest '%s', flags=%x, def_slices=%u, pg_slices=%u\n",
+ engine->name, name, flags,
+ hweight32(engine->sseu.slice_mask),
+ hweight32(pg_sseu.slice_mask));
+
+ ce = intel_context_create(engine->kernel_context->gem_context,
+ engine);
+ if (IS_ERR(ce)) {
+ ret = PTR_ERR(ce);
+ goto out_put;
+ }
- /* Back to defaults. */
- ret = __sseu_test(name, flags, ce, obj, engine->sseu);
- if (ret)
- goto out_fail;
+ ret = intel_context_pin(ce);
+ if (ret)
+ goto out_ce;
- /* One last power-gated configuration for the road. */
- ret = __sseu_test(name, flags, ce, obj, pg_sseu);
- if (ret)
- goto out_fail;
+ /* First set the default mask. */
+ ret = __sseu_test(name, flags, ce, obj, engine->sseu);
+ if (ret)
+ goto out_unpin;
+
+ /* Then set a power-gated configuration. */
+ ret = __sseu_test(name, flags, ce, obj, pg_sseu);
+ if (ret)
+ goto out_unpin;
+
+ /* Back to defaults. */
+ ret = __sseu_test(name, flags, ce, obj, engine->sseu);
+ if (ret)
+ goto out_unpin;
-out_fail:
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ /* One last power-gated configuration for the road. */
+ ret = __sseu_test(name, flags, ce, obj, pg_sseu);
+ if (ret)
+ goto out_unpin;
+
+out_unpin:
+ intel_context_unpin(ce);
+out_ce:
+ intel_context_put(ce);
+ } while (!ret);
+
+ if (igt_flush_test(i915))
ret = -EIO;
- intel_context_unpin(ce);
-out_context:
- intel_context_put(ce);
out_put:
i915_gem_object_put(obj);
out_unlock:
- mutex_unlock(&i915->drm.struct_mutex);
-
if (flags & TEST_RESET)
igt_global_reset_unlock(&i915->gt);
- mock_file_free(i915, file);
-
if (ret)
pr_err("%s: Failed with %d!\n", name, ret);
@@ -1041,6 +1312,7 @@ static int igt_ctx_readonly(void *arg)
{
struct drm_i915_private *i915 = arg;
struct drm_i915_gem_object *obj = NULL;
+ struct i915_request *tq[5] = {};
struct i915_address_space *vm;
struct i915_gem_context *ctx;
unsigned long idx, ndwords, dw;
@@ -1061,52 +1333,63 @@ static int igt_ctx_readonly(void *arg)
if (IS_ERR(file))
return PTR_ERR(file);
- mutex_lock(&i915->drm.struct_mutex);
-
err = igt_live_test_begin(&t, i915, __func__, "");
if (err)
- goto out_unlock;
+ goto out_file;
ctx = live_context(i915, file);
if (IS_ERR(ctx)) {
err = PTR_ERR(ctx);
- goto out_unlock;
+ goto out_file;
}
- vm = ctx->vm ?: &i915->ggtt.alias->vm;
+ rcu_read_lock();
+ vm = rcu_dereference(ctx->vm) ?: &i915->ggtt.alias->vm;
if (!vm || !vm->has_read_only) {
+ rcu_read_unlock();
err = 0;
- goto out_unlock;
+ goto out_file;
}
+ rcu_read_unlock();
ndwords = 0;
dw = 0;
while (!time_after(jiffies, end_time)) {
- struct intel_engine_cs *engine;
- unsigned int id;
+ struct i915_gem_engines_iter it;
+ struct intel_context *ce;
- for_each_engine(engine, i915, id) {
- if (!intel_engine_can_store_dword(engine))
+ for_each_gem_engine(ce,
+ i915_gem_context_lock_engines(ctx), it) {
+ if (!intel_engine_can_store_dword(ce->engine))
continue;
if (!obj) {
- obj = create_test_object(ctx, file, &objects);
+ obj = create_test_object(ce->vm, file, &objects);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
- goto out_unlock;
+ i915_gem_context_unlock_engines(ctx);
+ goto out_file;
}
if (prandom_u32_state(&prng) & 1)
i915_gem_object_set_readonly(obj);
}
- err = gpu_fill(obj, ctx, engine, dw);
+ err = gpu_fill(ce, obj, dw);
if (err) {
- pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
+ pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) [full-ppgtt? %s], err=%d\n",
ndwords, dw, max_dwords(obj),
- engine->name, ctx->hw_id,
- yesno(!!ctx->vm), err);
- goto out_unlock;
+ ce->engine->name,
+ yesno(!!rcu_access_pointer(ctx->vm)),
+ err);
+ i915_gem_context_unlock_engines(ctx);
+ goto out_file;
+ }
+
+ err = throttle(ce, tq, ARRAY_SIZE(tq));
+ if (err) {
+ i915_gem_context_unlock_engines(ctx);
+ goto out_file;
}
if (++dw == max_dwords(obj)) {
@@ -1115,6 +1398,7 @@ static int igt_ctx_readonly(void *arg)
}
ndwords++;
}
+ i915_gem_context_unlock_engines(ctx);
}
pr_info("Submitted %lu dwords (across %u engines)\n",
ndwords, RUNTIME_INFO(i915)->num_engines);
@@ -1137,19 +1421,19 @@ static int igt_ctx_readonly(void *arg)
dw += rem;
}
-out_unlock:
+out_file:
+ throttle_release(tq, ARRAY_SIZE(tq));
if (igt_live_test_end(&t))
err = -EIO;
- mutex_unlock(&i915->drm.struct_mutex);
mock_file_free(i915, file);
return err;
}
-static int check_scratch(struct i915_gem_context *ctx, u64 offset)
+static int check_scratch(struct i915_address_space *vm, u64 offset)
{
struct drm_mm_node *node =
- __drm_mm_interval_first(&ctx->vm->mm,
+ __drm_mm_interval_first(&vm->mm,
offset, offset + sizeof(u32) - 1);
if (!node || node->start > offset)
return 0;
@@ -1167,6 +1451,7 @@ static int write_to_scratch(struct i915_gem_context *ctx,
{
struct drm_i915_private *i915 = ctx->i915;
struct drm_i915_gem_object *obj;
+ struct i915_address_space *vm;
struct i915_request *rq;
struct i915_vma *vma;
u32 *cmd;
@@ -1197,17 +1482,20 @@ static int write_to_scratch(struct i915_gem_context *ctx,
__i915_gem_object_flush_map(obj, 0, 64);
i915_gem_object_unpin_map(obj);
- vma = i915_vma_instance(obj, ctx->vm, NULL);
+ intel_gt_chipset_flush(engine->gt);
+
+ vm = i915_gem_context_get_vm_rcu(ctx);
+ vma = i915_vma_instance(obj, vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
- goto err;
+ goto err_vm;
}
err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_OFFSET_FIXED);
if (err)
- goto err;
+ goto err_vm;
- err = check_scratch(ctx, offset);
+ err = check_scratch(vm, offset);
if (err)
goto err_unpin;
@@ -1229,12 +1517,11 @@ static int write_to_scratch(struct i915_gem_context *ctx,
if (err)
goto skip_request;
- i915_vma_unpin(vma);
- i915_vma_close(vma);
- i915_vma_put(vma);
+ i915_vma_unpin_and_release(&vma, 0);
i915_request_add(rq);
+ i915_vm_put(vm);
return 0;
skip_request:
@@ -1243,6 +1530,8 @@ err_request:
i915_request_add(rq);
err_unpin:
i915_vma_unpin(vma);
+err_vm:
+ i915_vm_put(vm);
err:
i915_gem_object_put(obj);
return err;
@@ -1254,6 +1543,7 @@ static int read_from_scratch(struct i915_gem_context *ctx,
{
struct drm_i915_private *i915 = ctx->i915;
struct drm_i915_gem_object *obj;
+ struct i915_address_space *vm;
const u32 RCS_GPR0 = 0x2600; /* not all engines have their own GPR! */
const u32 result = 0x100;
struct i915_request *rq;
@@ -1296,17 +1586,20 @@ static int read_from_scratch(struct i915_gem_context *ctx,
i915_gem_object_flush_map(obj);
i915_gem_object_unpin_map(obj);
- vma = i915_vma_instance(obj, ctx->vm, NULL);
+ intel_gt_chipset_flush(engine->gt);
+
+ vm = i915_gem_context_get_vm_rcu(ctx);
+ vma = i915_vma_instance(obj, vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
- goto err;
+ goto err_vm;
}
err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_OFFSET_FIXED);
if (err)
- goto err;
+ goto err_vm;
- err = check_scratch(ctx, offset);
+ err = check_scratch(vm, offset);
if (err)
goto err_unpin;
@@ -1337,12 +1630,12 @@ static int read_from_scratch(struct i915_gem_context *ctx,
err = i915_gem_object_set_to_cpu_domain(obj, false);
i915_gem_object_unlock(obj);
if (err)
- goto err;
+ goto err_vm;
cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
if (IS_ERR(cmd)) {
err = PTR_ERR(cmd);
- goto err;
+ goto err_vm;
}
*value = cmd[result / sizeof(*cmd)];
@@ -1357,6 +1650,8 @@ err_request:
i915_request_add(rq);
err_unpin:
i915_vma_unpin(vma);
+err_vm:
+ i915_vm_put(vm);
err:
i915_gem_object_put(obj);
return err;
@@ -1371,7 +1666,6 @@ static int igt_vm_isolation(void *arg)
struct drm_file *file;
I915_RND_STATE(prng);
unsigned long count;
- unsigned int id;
u64 vm_total;
int err;
@@ -1387,34 +1681,32 @@ static int igt_vm_isolation(void *arg)
if (IS_ERR(file))
return PTR_ERR(file);
- mutex_lock(&i915->drm.struct_mutex);
-
err = igt_live_test_begin(&t, i915, __func__, "");
if (err)
- goto out_unlock;
+ goto out_file;
ctx_a = live_context(i915, file);
if (IS_ERR(ctx_a)) {
err = PTR_ERR(ctx_a);
- goto out_unlock;
+ goto out_file;
}
ctx_b = live_context(i915, file);
if (IS_ERR(ctx_b)) {
err = PTR_ERR(ctx_b);
- goto out_unlock;
+ goto out_file;
}
/* We can only test vm isolation, if the vm are distinct */
if (ctx_a->vm == ctx_b->vm)
- goto out_unlock;
+ goto out_file;
vm_total = ctx_a->vm->total;
GEM_BUG_ON(ctx_b->vm->total != vm_total);
vm_total -= I915_GTT_PAGE_SIZE;
count = 0;
- for_each_engine(engine, i915, id) {
+ for_each_uabi_engine(engine, i915) {
IGT_TIMEOUT(end_time);
unsigned long this = 0;
@@ -1436,7 +1728,7 @@ static int igt_vm_isolation(void *arg)
err = read_from_scratch(ctx_b, engine,
offset, &value);
if (err)
- goto out_unlock;
+ goto out_file;
if (value) {
pr_err("%s: Read %08x from scratch (offset 0x%08x_%08x), after %lu reads!\n",
@@ -1445,7 +1737,7 @@ static int igt_vm_isolation(void *arg)
lower_32_bits(offset),
this);
err = -EINVAL;
- goto out_unlock;
+ goto out_file;
}
this++;
@@ -1455,30 +1747,13 @@ static int igt_vm_isolation(void *arg)
pr_info("Checked %lu scratch offsets across %d engines\n",
count, RUNTIME_INFO(i915)->num_engines);
-out_unlock:
+out_file:
if (igt_live_test_end(&t))
err = -EIO;
- mutex_unlock(&i915->drm.struct_mutex);
-
mock_file_free(i915, file);
return err;
}
-static __maybe_unused const char *
-__engine_name(struct drm_i915_private *i915, intel_engine_mask_t engines)
-{
- struct intel_engine_cs *engine;
- intel_engine_mask_t tmp;
-
- if (engines == ALL_ENGINES)
- return "all";
-
- for_each_engine_masked(engine, i915, engines, tmp)
- return engine->name;
-
- return "none";
-}
-
static bool skip_unused_engines(struct intel_context *ce, void *data)
{
return !ce->state;
@@ -1506,13 +1781,9 @@ static int mock_context_barrier(void *arg)
* a request; useful for retiring old state after loading new.
*/
- mutex_lock(&i915->drm.struct_mutex);
-
ctx = mock_context(i915, "mock");
- if (!ctx) {
- err = -ENOMEM;
- goto unlock;
- }
+ if (!ctx)
+ return -ENOMEM;
counter = 0;
err = context_barrier_task(ctx, 0,
@@ -1585,8 +1856,6 @@ static int mock_context_barrier(void *arg)
out:
mock_context_close(ctx);
-unlock:
- mutex_unlock(&i915->drm.struct_mutex);
return err;
#undef pr_fmt
#define pr_fmt(x) x
@@ -1614,6 +1883,7 @@ int i915_gem_context_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(live_nop_switch),
+ SUBTEST(live_parallel_switch),
SUBTEST(igt_ctx_exec),
SUBTEST(igt_ctx_readonly),
SUBTEST(igt_ctx_sseu),
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
index 1d27babff0ce..29b2077b73d2 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
@@ -10,6 +10,7 @@
#include "gt/intel_gt_pm.h"
#include "huge_gem_object.h"
#include "i915_selftest.h"
+#include "selftests/i915_random.h"
#include "selftests/igt_flush_test.h"
struct tile {
@@ -76,18 +77,103 @@ static u64 tiled_offset(const struct tile *tile, u64 v)
static int check_partial_mapping(struct drm_i915_gem_object *obj,
const struct tile *tile,
- unsigned long end_time)
+ struct rnd_state *prng)
{
- const unsigned int nreal = obj->scratch / PAGE_SIZE;
const unsigned long npages = obj->base.size / PAGE_SIZE;
+ struct i915_ggtt_view view;
struct i915_vma *vma;
unsigned long page;
+ u32 __iomem *io;
+ struct page *p;
+ unsigned int n;
+ u64 offset;
+ u32 *cpu;
int err;
- if (igt_timeout(end_time,
- "%s: timed out before tiling=%d stride=%d\n",
- __func__, tile->tiling, tile->stride))
- return -EINTR;
+ err = i915_gem_object_set_tiling(obj, tile->tiling, tile->stride);
+ if (err) {
+ pr_err("Failed to set tiling mode=%u, stride=%u, err=%d\n",
+ tile->tiling, tile->stride, err);
+ return err;
+ }
+
+ GEM_BUG_ON(i915_gem_object_get_tiling(obj) != tile->tiling);
+ GEM_BUG_ON(i915_gem_object_get_stride(obj) != tile->stride);
+
+ i915_gem_object_lock(obj);
+ err = i915_gem_object_set_to_gtt_domain(obj, true);
+ i915_gem_object_unlock(obj);
+ if (err) {
+ pr_err("Failed to flush to GTT write domain; err=%d\n", err);
+ return err;
+ }
+
+ page = i915_prandom_u32_max_state(npages, prng);
+ view = compute_partial_view(obj, page, MIN_CHUNK_PAGES);
+
+ vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
+ if (IS_ERR(vma)) {
+ pr_err("Failed to pin partial view: offset=%lu; err=%d\n",
+ page, (int)PTR_ERR(vma));
+ return PTR_ERR(vma);
+ }
+
+ n = page - view.partial.offset;
+ GEM_BUG_ON(n >= view.partial.size);
+
+ io = i915_vma_pin_iomap(vma);
+ i915_vma_unpin(vma);
+ if (IS_ERR(io)) {
+ pr_err("Failed to iomap partial view: offset=%lu; err=%d\n",
+ page, (int)PTR_ERR(io));
+ err = PTR_ERR(io);
+ goto out;
+ }
+
+ iowrite32(page, io + n * PAGE_SIZE / sizeof(*io));
+ i915_vma_unpin_iomap(vma);
+
+ offset = tiled_offset(tile, page << PAGE_SHIFT);
+ if (offset >= obj->base.size)
+ goto out;
+
+ intel_gt_flush_ggtt_writes(&to_i915(obj->base.dev)->gt);
+
+ p = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
+ cpu = kmap(p) + offset_in_page(offset);
+ drm_clflush_virt_range(cpu, sizeof(*cpu));
+ if (*cpu != (u32)page) {
+ pr_err("Partial view for %lu [%u] (offset=%llu, size=%u [%llu, row size %u], fence=%d, tiling=%d, stride=%d) misalignment, expected write to page (%llu + %u [0x%llx]) of 0x%x, found 0x%x\n",
+ page, n,
+ view.partial.offset,
+ view.partial.size,
+ vma->size >> PAGE_SHIFT,
+ tile->tiling ? tile_row_pages(obj) : 0,
+ vma->fence ? vma->fence->id : -1, tile->tiling, tile->stride,
+ offset >> PAGE_SHIFT,
+ (unsigned int)offset_in_page(offset),
+ offset,
+ (u32)page, *cpu);
+ err = -EINVAL;
+ }
+ *cpu = 0;
+ drm_clflush_virt_range(cpu, sizeof(*cpu));
+ kunmap(p);
+
+out:
+ i915_vma_destroy(vma);
+ return err;
+}
+
+static int check_partial_mappings(struct drm_i915_gem_object *obj,
+ const struct tile *tile,
+ unsigned long end_time)
+{
+ const unsigned int nreal = obj->scratch / PAGE_SIZE;
+ const unsigned long npages = obj->base.size / PAGE_SIZE;
+ struct i915_vma *vma;
+ unsigned long page;
+ int err;
err = i915_gem_object_set_tiling(obj, tile->tiling, tile->stride);
if (err) {
@@ -170,11 +256,42 @@ static int check_partial_mapping(struct drm_i915_gem_object *obj,
return err;
i915_vma_destroy(vma);
+
+ if (igt_timeout(end_time,
+ "%s: timed out after tiling=%d stride=%d\n",
+ __func__, tile->tiling, tile->stride))
+ return -EINTR;
}
return 0;
}
+static unsigned int
+setup_tile_size(struct tile *tile, struct drm_i915_private *i915)
+{
+ if (INTEL_GEN(i915) <= 2) {
+ tile->height = 16;
+ tile->width = 128;
+ tile->size = 11;
+ } else if (tile->tiling == I915_TILING_Y &&
+ HAS_128_BYTE_Y_TILING(i915)) {
+ tile->height = 32;
+ tile->width = 128;
+ tile->size = 12;
+ } else {
+ tile->height = 8;
+ tile->width = 512;
+ tile->size = 12;
+ }
+
+ if (INTEL_GEN(i915) < 4)
+ return 8192 / tile->width;
+ else if (INTEL_GEN(i915) < 7)
+ return 128 * I965_FENCE_MAX_PITCH_VAL / tile->width;
+ else
+ return 128 * GEN7_FENCE_MAX_PITCH_VAL / tile->width;
+}
+
static int igt_partial_tiling(void *arg)
{
const unsigned int nreal = 1 << 12; /* largest tile row x2 */
@@ -184,6 +301,9 @@ static int igt_partial_tiling(void *arg)
int tiling;
int err;
+ if (!i915_ggtt_has_aperture(&i915->ggtt))
+ return 0;
+
/* We want to check the page mapping and fencing of a large object
* mmapped through the GTT. The object we create is larger than can
* possibly be mmaped as a whole, and so we must use partial GGTT vma.
@@ -205,7 +325,6 @@ static int igt_partial_tiling(void *arg)
goto out;
}
- mutex_lock(&i915->drm.struct_mutex);
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
if (1) {
@@ -219,7 +338,7 @@ static int igt_partial_tiling(void *arg)
tile.swizzle = I915_BIT_6_SWIZZLE_NONE;
tile.tiling = I915_TILING_NONE;
- err = check_partial_mapping(obj, &tile, end);
+ err = check_partial_mappings(obj, &tile, end);
if (err && err != -EINTR)
goto out_unlock;
}
@@ -241,10 +360,10 @@ static int igt_partial_tiling(void *arg)
tile.tiling = tiling;
switch (tiling) {
case I915_TILING_X:
- tile.swizzle = i915->mm.bit_6_swizzle_x;
+ tile.swizzle = i915->ggtt.bit_6_swizzle_x;
break;
case I915_TILING_Y:
- tile.swizzle = i915->mm.bit_6_swizzle_y;
+ tile.swizzle = i915->ggtt.bit_6_swizzle_y;
break;
}
@@ -253,31 +372,11 @@ static int igt_partial_tiling(void *arg)
tile.swizzle == I915_BIT_6_SWIZZLE_9_10_17)
continue;
- if (INTEL_GEN(i915) <= 2) {
- tile.height = 16;
- tile.width = 128;
- tile.size = 11;
- } else if (tile.tiling == I915_TILING_Y &&
- HAS_128_BYTE_Y_TILING(i915)) {
- tile.height = 32;
- tile.width = 128;
- tile.size = 12;
- } else {
- tile.height = 8;
- tile.width = 512;
- tile.size = 12;
- }
-
- if (INTEL_GEN(i915) < 4)
- max_pitch = 8192 / tile.width;
- else if (INTEL_GEN(i915) < 7)
- max_pitch = 128 * I965_FENCE_MAX_PITCH_VAL / tile.width;
- else
- max_pitch = 128 * GEN7_FENCE_MAX_PITCH_VAL / tile.width;
+ max_pitch = setup_tile_size(&tile, i915);
for (pitch = max_pitch; pitch; pitch >>= 1) {
tile.stride = tile.width * pitch;
- err = check_partial_mapping(obj, &tile, end);
+ err = check_partial_mappings(obj, &tile, end);
if (err == -EINTR)
goto next_tiling;
if (err)
@@ -285,7 +384,7 @@ static int igt_partial_tiling(void *arg)
if (pitch > 2 && INTEL_GEN(i915) >= 4) {
tile.stride = tile.width * (pitch - 1);
- err = check_partial_mapping(obj, &tile, end);
+ err = check_partial_mappings(obj, &tile, end);
if (err == -EINTR)
goto next_tiling;
if (err)
@@ -294,7 +393,7 @@ static int igt_partial_tiling(void *arg)
if (pitch < max_pitch && INTEL_GEN(i915) >= 4) {
tile.stride = tile.width * (pitch + 1);
- err = check_partial_mapping(obj, &tile, end);
+ err = check_partial_mappings(obj, &tile, end);
if (err == -EINTR)
goto next_tiling;
if (err)
@@ -305,7 +404,7 @@ static int igt_partial_tiling(void *arg)
if (INTEL_GEN(i915) >= 4) {
for_each_prime_number(pitch, max_pitch) {
tile.stride = tile.width * pitch;
- err = check_partial_mapping(obj, &tile, end);
+ err = check_partial_mappings(obj, &tile, end);
if (err == -EINTR)
goto next_tiling;
if (err)
@@ -318,7 +417,100 @@ next_tiling: ;
out_unlock:
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
+ i915_gem_object_unpin_pages(obj);
+out:
+ i915_gem_object_put(obj);
+ return err;
+}
+
+static int igt_smoke_tiling(void *arg)
+{
+ const unsigned int nreal = 1 << 12; /* largest tile row x2 */
+ struct drm_i915_private *i915 = arg;
+ struct drm_i915_gem_object *obj;
+ intel_wakeref_t wakeref;
+ I915_RND_STATE(prng);
+ unsigned long count;
+ IGT_TIMEOUT(end);
+ int err;
+
+ if (!i915_ggtt_has_aperture(&i915->ggtt))
+ return 0;
+
+ /*
+ * igt_partial_tiling() does an exhastive check of partial tiling
+ * chunking, but will undoubtably run out of time. Here, we do a
+ * randomised search and hope over many runs of 1s with different
+ * seeds we will do a thorough check.
+ *
+ * Remember to look at the st_seed if we see a flip-flop in BAT!
+ */
+
+ if (i915->quirks & QUIRK_PIN_SWIZZLED_PAGES)
+ return 0;
+
+ obj = huge_gem_object(i915,
+ nreal << PAGE_SHIFT,
+ (1 + next_prime_number(i915->ggtt.vm.total >> PAGE_SHIFT)) << PAGE_SHIFT);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ err = i915_gem_object_pin_pages(obj);
+ if (err) {
+ pr_err("Failed to allocate %u pages (%lu total), err=%d\n",
+ nreal, obj->base.size / PAGE_SIZE, err);
+ goto out;
+ }
+
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+
+ count = 0;
+ do {
+ struct tile tile;
+
+ tile.tiling =
+ i915_prandom_u32_max_state(I915_TILING_Y + 1, &prng);
+ switch (tile.tiling) {
+ case I915_TILING_NONE:
+ tile.height = 1;
+ tile.width = 1;
+ tile.size = 0;
+ tile.stride = 0;
+ tile.swizzle = I915_BIT_6_SWIZZLE_NONE;
+ break;
+
+ case I915_TILING_X:
+ tile.swizzle = i915->ggtt.bit_6_swizzle_x;
+ break;
+ case I915_TILING_Y:
+ tile.swizzle = i915->ggtt.bit_6_swizzle_y;
+ break;
+ }
+
+ if (tile.swizzle == I915_BIT_6_SWIZZLE_9_17 ||
+ tile.swizzle == I915_BIT_6_SWIZZLE_9_10_17)
+ continue;
+
+ if (tile.tiling != I915_TILING_NONE) {
+ unsigned int max_pitch = setup_tile_size(&tile, i915);
+
+ tile.stride =
+ i915_prandom_u32_max_state(max_pitch, &prng);
+ tile.stride = (1 + tile.stride) * tile.width;
+ if (INTEL_GEN(i915) < 4)
+ tile.stride = rounddown_pow_of_two(tile.stride);
+ }
+
+ err = check_partial_mapping(obj, &tile, &prng);
+ if (err)
+ break;
+
+ count++;
+ } while (!__igt_timeout(end, NULL));
+
+ pr_info("%s: Completed %lu trials\n", __func__, count);
+
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
i915_gem_object_unpin_pages(obj);
out:
i915_gem_object_put(obj);
@@ -329,20 +521,19 @@ static int make_obj_busy(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct intel_engine_cs *engine;
- enum intel_engine_id id;
- struct i915_vma *vma;
- int err;
- vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
- if (IS_ERR(vma))
- return PTR_ERR(vma);
+ for_each_uabi_engine(engine, i915) {
+ struct i915_request *rq;
+ struct i915_vma *vma;
+ int err;
- err = i915_vma_pin(vma, 0, 0, PIN_USER);
- if (err)
- return err;
+ vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
- for_each_engine(engine, i915, id) {
- struct i915_request *rq;
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err)
+ return err;
rq = i915_request_create(engine->kernel_context);
if (IS_ERR(rq)) {
@@ -358,12 +549,13 @@ static int make_obj_busy(struct drm_i915_gem_object *obj)
i915_vma_unlock(vma);
i915_request_add(rq);
+ i915_vma_unpin(vma);
+ if (err)
+ return err;
}
- i915_vma_unpin(vma);
i915_gem_object_put(obj); /* leave it only alive via its active ref */
-
- return err;
+ return 0;
}
static bool assert_mmap_offset(struct drm_i915_private *i915,
@@ -386,21 +578,14 @@ static bool assert_mmap_offset(struct drm_i915_private *i915,
static void disable_retire_worker(struct drm_i915_private *i915)
{
i915_gem_driver_unregister__shrinker(i915);
-
intel_gt_pm_get(&i915->gt);
-
- cancel_delayed_work_sync(&i915->gem.retire_work);
- flush_work(&i915->gem.idle_work);
+ cancel_delayed_work_sync(&i915->gt.requests.retire_work);
}
static void restore_retire_worker(struct drm_i915_private *i915)
{
+ igt_flush_test(i915);
intel_gt_pm_put(&i915->gt);
-
- mutex_lock(&i915->drm.struct_mutex);
- igt_flush_test(i915, I915_WAIT_LOCKED);
- mutex_unlock(&i915->drm.struct_mutex);
-
i915_gem_driver_register__shrinker(i915);
}
@@ -490,9 +675,7 @@ static int igt_mmap_offset_exhaustion(void *arg)
goto out;
}
- mutex_lock(&i915->drm.struct_mutex);
err = make_obj_busy(obj);
- mutex_unlock(&i915->drm.struct_mutex);
if (err) {
pr_err("[loop %d] Failed to busy the object\n", loop);
goto err_obj;
@@ -515,6 +698,7 @@ int i915_gem_mman_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(igt_partial_tiling),
+ SUBTEST(igt_smoke_tiling),
SUBTEST(igt_mmap_offset_exhaustion),
};
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c
index c21d747e7d05..e8132aca0bb6 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c
@@ -3,40 +3,241 @@
* Copyright © 2019 Intel Corporation
*/
+#include <linux/sort.h>
+
#include "gt/intel_gt.h"
+#include "gt/intel_engine_user.h"
#include "i915_selftest.h"
+#include "gem/i915_gem_context.h"
#include "selftests/igt_flush_test.h"
+#include "selftests/i915_random.h"
#include "selftests/mock_drm.h"
#include "huge_gem_object.h"
#include "mock_context.h"
-static int igt_fill_blt(void *arg)
+static int wrap_ktime_compare(const void *A, const void *B)
+{
+ const ktime_t *a = A, *b = B;
+
+ return ktime_compare(*a, *b);
+}
+
+static int __perf_fill_blt(struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ int inst = 0;
+
+ do {
+ struct intel_engine_cs *engine;
+ ktime_t t[5];
+ int pass;
+ int err;
+
+ engine = intel_engine_lookup_user(i915,
+ I915_ENGINE_CLASS_COPY,
+ inst++);
+ if (!engine)
+ return 0;
+
+ for (pass = 0; pass < ARRAY_SIZE(t); pass++) {
+ struct intel_context *ce = engine->kernel_context;
+ ktime_t t0, t1;
+
+ t0 = ktime_get();
+
+ err = i915_gem_object_fill_blt(obj, ce, 0);
+ if (err)
+ return err;
+
+ err = i915_gem_object_wait(obj,
+ I915_WAIT_ALL,
+ MAX_SCHEDULE_TIMEOUT);
+ if (err)
+ return err;
+
+ t1 = ktime_get();
+ t[pass] = ktime_sub(t1, t0);
+ }
+
+ sort(t, ARRAY_SIZE(t), sizeof(*t), wrap_ktime_compare, NULL);
+ pr_info("%s: blt %zd KiB fill: %lld MiB/s\n",
+ engine->name,
+ obj->base.size >> 10,
+ div64_u64(mul_u32_u32(4 * obj->base.size,
+ 1000 * 1000 * 1000),
+ t[1] + 2 * t[2] + t[3]) >> 20);
+ } while (1);
+}
+
+static int perf_fill_blt(void *arg)
{
struct drm_i915_private *i915 = arg;
- struct intel_context *ce = i915->engine[BCS0]->kernel_context;
- struct drm_i915_gem_object *obj;
+ static const unsigned long sizes[] = {
+ SZ_4K,
+ SZ_64K,
+ SZ_2M,
+ SZ_64M
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sizes); i++) {
+ struct drm_i915_gem_object *obj;
+ int err;
+
+ obj = i915_gem_object_create_internal(i915, sizes[i]);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ err = __perf_fill_blt(obj);
+ i915_gem_object_put(obj);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int __perf_copy_blt(struct drm_i915_gem_object *src,
+ struct drm_i915_gem_object *dst)
+{
+ struct drm_i915_private *i915 = to_i915(src->base.dev);
+ int inst = 0;
+
+ do {
+ struct intel_engine_cs *engine;
+ ktime_t t[5];
+ int pass;
+
+ engine = intel_engine_lookup_user(i915,
+ I915_ENGINE_CLASS_COPY,
+ inst++);
+ if (!engine)
+ return 0;
+
+ for (pass = 0; pass < ARRAY_SIZE(t); pass++) {
+ struct intel_context *ce = engine->kernel_context;
+ ktime_t t0, t1;
+ int err;
+
+ t0 = ktime_get();
+
+ err = i915_gem_object_copy_blt(src, dst, ce);
+ if (err)
+ return err;
+
+ err = i915_gem_object_wait(dst,
+ I915_WAIT_ALL,
+ MAX_SCHEDULE_TIMEOUT);
+ if (err)
+ return err;
+
+ t1 = ktime_get();
+ t[pass] = ktime_sub(t1, t0);
+ }
+
+ sort(t, ARRAY_SIZE(t), sizeof(*t), wrap_ktime_compare, NULL);
+ pr_info("%s: blt %zd KiB copy: %lld MiB/s\n",
+ engine->name,
+ src->base.size >> 10,
+ div64_u64(mul_u32_u32(4 * src->base.size,
+ 1000 * 1000 * 1000),
+ t[1] + 2 * t[2] + t[3]) >> 20);
+ } while (1);
+}
+
+static int perf_copy_blt(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ static const unsigned long sizes[] = {
+ SZ_4K,
+ SZ_64K,
+ SZ_2M,
+ SZ_64M
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sizes); i++) {
+ struct drm_i915_gem_object *src, *dst;
+ int err;
+
+ src = i915_gem_object_create_internal(i915, sizes[i]);
+ if (IS_ERR(src))
+ return PTR_ERR(src);
+
+ dst = i915_gem_object_create_internal(i915, sizes[i]);
+ if (IS_ERR(dst)) {
+ err = PTR_ERR(dst);
+ goto err_src;
+ }
+
+ err = __perf_copy_blt(src, dst);
+
+ i915_gem_object_put(dst);
+err_src:
+ i915_gem_object_put(src);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+struct igt_thread_arg {
+ struct drm_i915_private *i915;
struct rnd_state prng;
+ unsigned int n_cpus;
+};
+
+static int igt_fill_blt_thread(void *arg)
+{
+ struct igt_thread_arg *thread = arg;
+ struct drm_i915_private *i915 = thread->i915;
+ struct rnd_state *prng = &thread->prng;
+ struct drm_i915_gem_object *obj;
+ struct i915_gem_context *ctx;
+ struct intel_context *ce;
+ struct drm_file *file;
+ unsigned int prio;
IGT_TIMEOUT(end);
- u32 *vaddr;
- int err = 0;
+ int err;
+
+ file = mock_file(i915);
+ if (IS_ERR(file))
+ return PTR_ERR(file);
+
+ ctx = live_context(i915, file);
+ if (IS_ERR(ctx)) {
+ err = PTR_ERR(ctx);
+ goto out_file;
+ }
- prandom_seed_state(&prng, i915_selftest.random_seed);
+ prio = i915_prandom_u32_max_state(I915_PRIORITY_MAX, prng);
+ ctx->sched.priority = I915_USER_PRIORITY(prio);
- /*
- * XXX: needs some threads to scale all these tests, also maybe throw
- * in submission from higher priority context to see if we are
- * preempted for very large objects...
- */
+ ce = i915_gem_context_get_engine(ctx, BCS0);
+ GEM_BUG_ON(IS_ERR(ce));
do {
const u32 max_block_size = S16_MAX * PAGE_SIZE;
- u32 sz = min_t(u64, ce->vm->total >> 4, prandom_u32_state(&prng));
- u32 phys_sz = sz % (max_block_size + 1);
- u32 val = prandom_u32_state(&prng);
+ u32 val = prandom_u32_state(prng);
+ u64 total = ce->vm->total;
+ u32 phys_sz;
+ u32 sz;
+ u32 *vaddr;
u32 i;
+ /*
+ * If we have a tiny shared address space, like for the GGTT
+ * then we can't be too greedy.
+ */
+ if (i915_is_ggtt(ce->vm))
+ total = div64_u64(total, thread->n_cpus);
+
+ sz = min_t(u64, total >> 4, prandom_u32_state(prng));
+ phys_sz = sz % (max_block_size + 1);
+
sz = round_up(sz, PAGE_SIZE);
phys_sz = round_up(phys_sz, PAGE_SIZE);
@@ -65,9 +266,7 @@ static int igt_fill_blt(void *arg)
if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
obj->cache_dirty = true;
- mutex_lock(&i915->drm.struct_mutex);
err = i915_gem_object_fill_blt(obj, ce, val);
- mutex_unlock(&i915->drm.struct_mutex);
if (err)
goto err_unpin;
@@ -100,28 +299,56 @@ err_flush:
if (err == -ENOMEM)
err = 0;
+ intel_context_put(ce);
+out_file:
+ mock_file_free(i915, file);
return err;
}
-static int igt_copy_blt(void *arg)
+static int igt_copy_blt_thread(void *arg)
{
- struct drm_i915_private *i915 = arg;
- struct intel_context *ce = i915->engine[BCS0]->kernel_context;
+ struct igt_thread_arg *thread = arg;
+ struct drm_i915_private *i915 = thread->i915;
+ struct rnd_state *prng = &thread->prng;
struct drm_i915_gem_object *src, *dst;
- struct rnd_state prng;
+ struct i915_gem_context *ctx;
+ struct intel_context *ce;
+ struct drm_file *file;
+ unsigned int prio;
IGT_TIMEOUT(end);
- u32 *vaddr;
- int err = 0;
+ int err;
+
+ file = mock_file(i915);
+ if (IS_ERR(file))
+ return PTR_ERR(file);
- prandom_seed_state(&prng, i915_selftest.random_seed);
+ ctx = live_context(i915, file);
+ if (IS_ERR(ctx)) {
+ err = PTR_ERR(ctx);
+ goto out_file;
+ }
+
+ prio = i915_prandom_u32_max_state(I915_PRIORITY_MAX, prng);
+ ctx->sched.priority = I915_USER_PRIORITY(prio);
+
+ ce = i915_gem_context_get_engine(ctx, BCS0);
+ GEM_BUG_ON(IS_ERR(ce));
do {
const u32 max_block_size = S16_MAX * PAGE_SIZE;
- u32 sz = min_t(u64, ce->vm->total >> 4, prandom_u32_state(&prng));
- u32 phys_sz = sz % (max_block_size + 1);
- u32 val = prandom_u32_state(&prng);
+ u32 val = prandom_u32_state(prng);
+ u64 total = ce->vm->total;
+ u32 phys_sz;
+ u32 sz;
+ u32 *vaddr;
u32 i;
+ if (i915_is_ggtt(ce->vm))
+ total = div64_u64(total, thread->n_cpus);
+
+ sz = min_t(u64, total >> 4, prandom_u32_state(prng));
+ phys_sz = sz % (max_block_size + 1);
+
sz = round_up(sz, PAGE_SIZE);
phys_sz = round_up(phys_sz, PAGE_SIZE);
@@ -166,9 +393,7 @@ static int igt_copy_blt(void *arg)
if (!(dst->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
dst->cache_dirty = true;
- mutex_lock(&i915->drm.struct_mutex);
err = i915_gem_object_copy_blt(src, dst, ce);
- mutex_unlock(&i915->drm.struct_mutex);
if (err)
goto err_unpin;
@@ -205,12 +430,85 @@ err_flush:
if (err == -ENOMEM)
err = 0;
+ intel_context_put(ce);
+out_file:
+ mock_file_free(i915, file);
+ return err;
+}
+
+static int igt_threaded_blt(struct drm_i915_private *i915,
+ int (*blt_fn)(void *arg))
+{
+ struct igt_thread_arg *thread;
+ struct task_struct **tsk;
+ I915_RND_STATE(prng);
+ unsigned int n_cpus;
+ unsigned int i;
+ int err = 0;
+
+ n_cpus = num_online_cpus() + 1;
+
+ tsk = kcalloc(n_cpus, sizeof(struct task_struct *), GFP_KERNEL);
+ if (!tsk)
+ return 0;
+
+ thread = kcalloc(n_cpus, sizeof(struct igt_thread_arg), GFP_KERNEL);
+ if (!thread) {
+ kfree(tsk);
+ return 0;
+ }
+
+ for (i = 0; i < n_cpus; ++i) {
+ thread[i].i915 = i915;
+ thread[i].n_cpus = n_cpus;
+ thread[i].prng =
+ I915_RND_STATE_INITIALIZER(prandom_u32_state(&prng));
+
+ tsk[i] = kthread_run(blt_fn, &thread[i], "igt/blt-%d", i);
+ if (IS_ERR(tsk[i])) {
+ err = PTR_ERR(tsk[i]);
+ break;
+ }
+
+ get_task_struct(tsk[i]);
+ }
+
+ yield(); /* start all threads before we kthread_stop() */
+
+ for (i = 0; i < n_cpus; ++i) {
+ int status;
+
+ if (IS_ERR_OR_NULL(tsk[i]))
+ continue;
+
+ status = kthread_stop(tsk[i]);
+ if (status && !err)
+ err = status;
+
+ put_task_struct(tsk[i]);
+ }
+
+ kfree(tsk);
+ kfree(thread);
+
return err;
}
+static int igt_fill_blt(void *arg)
+{
+ return igt_threaded_blt(arg, igt_fill_blt_thread);
+}
+
+static int igt_copy_blt(void *arg)
+{
+ return igt_threaded_blt(arg, igt_copy_blt_thread);
+}
+
int i915_gem_object_blt_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
+ SUBTEST(perf_fill_blt),
+ SUBTEST(perf_copy_blt),
SUBTEST(igt_fill_blt),
SUBTEST(igt_copy_blt),
};
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_phys.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_phys.c
index 94a15e3f6db8..34932871b3a5 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_phys.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_phys.c
@@ -25,9 +25,7 @@ static int mock_phys_object(void *arg)
goto out;
}
- mutex_lock(&i915->drm.struct_mutex);
err = i915_gem_object_attach_phys(obj, PAGE_SIZE);
- mutex_unlock(&i915->drm.struct_mutex);
if (err) {
pr_err("i915_gem_object_attach_phys failed, err=%d\n", err);
goto out_obj;
diff --git a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c
index 57ece53c1075..6718da20f35d 100644
--- a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c
+++ b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c
@@ -9,6 +9,7 @@
#include "gem/i915_gem_context.h"
#include "gem/i915_gem_pm.h"
#include "gt/intel_context.h"
+#include "gt/intel_gt.h"
#include "i915_vma.h"
#include "i915_drv.h"
@@ -84,6 +85,8 @@ igt_emit_store_dw(struct i915_vma *vma,
*cmd = MI_BATCH_BUFFER_END;
i915_gem_object_unpin_map(obj);
+ intel_gt_chipset_flush(vma->vm->gt);
+
vma = i915_vma_instance(obj, vma->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
@@ -101,40 +104,35 @@ err:
return ERR_PTR(err);
}
-int igt_gpu_fill_dw(struct i915_vma *vma,
- struct i915_gem_context *ctx,
- struct intel_engine_cs *engine,
- u64 offset,
- unsigned long count,
- u32 val)
+int igt_gpu_fill_dw(struct intel_context *ce,
+ struct i915_vma *vma, u64 offset,
+ unsigned long count, u32 val)
{
- struct i915_address_space *vm = ctx->vm ?: &engine->gt->ggtt->vm;
struct i915_request *rq;
struct i915_vma *batch;
unsigned int flags;
int err;
- GEM_BUG_ON(vma->size > vm->total);
- GEM_BUG_ON(!intel_engine_can_store_dword(engine));
+ GEM_BUG_ON(!intel_engine_can_store_dword(ce->engine));
GEM_BUG_ON(!i915_vma_is_pinned(vma));
batch = igt_emit_store_dw(vma, offset, count, val);
if (IS_ERR(batch))
return PTR_ERR(batch);
- rq = igt_request_alloc(ctx, engine);
+ rq = intel_context_create_request(ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_batch;
}
flags = 0;
- if (INTEL_GEN(vm->i915) <= 5)
+ if (INTEL_GEN(ce->vm->i915) <= 5)
flags |= I915_DISPATCH_SECURE;
- err = engine->emit_bb_start(rq,
- batch->node.start, batch->node.size,
- flags);
+ err = rq->engine->emit_bb_start(rq,
+ batch->node.start, batch->node.size,
+ flags);
if (err)
goto err_request;
@@ -156,9 +154,7 @@ int igt_gpu_fill_dw(struct i915_vma *vma,
i915_request_add(rq);
- i915_vma_unpin(batch);
- i915_vma_close(batch);
- i915_vma_put(batch);
+ i915_vma_unpin_and_release(&batch, 0);
return 0;
@@ -167,7 +163,6 @@ skip_request:
err_request:
i915_request_add(rq);
err_batch:
- i915_vma_unpin(batch);
- i915_vma_put(batch);
+ i915_vma_unpin_and_release(&batch, 0);
return err;
}
diff --git a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.h b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.h
index 361a7ef866b0..4221cf84d175 100644
--- a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.h
+++ b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.h
@@ -11,9 +11,11 @@
struct i915_request;
struct i915_gem_context;
-struct intel_engine_cs;
struct i915_vma;
+struct intel_context;
+struct intel_engine_cs;
+
struct i915_request *
igt_request_alloc(struct i915_gem_context *ctx, struct intel_engine_cs *engine);
@@ -23,11 +25,8 @@ igt_emit_store_dw(struct i915_vma *vma,
unsigned long count,
u32 val);
-int igt_gpu_fill_dw(struct i915_vma *vma,
- struct i915_gem_context *ctx,
- struct intel_engine_cs *engine,
- u64 offset,
- unsigned long count,
- u32 val);
+int igt_gpu_fill_dw(struct intel_context *ce,
+ struct i915_vma *vma, u64 offset,
+ unsigned long count, u32 val);
#endif /* __IGT_GEM_UTILS_H__ */
diff --git a/drivers/gpu/drm/i915/gem/selftests/mock_context.c b/drivers/gpu/drm/i915/gem/selftests/mock_context.c
index be8974ccff24..29b8984f0e47 100644
--- a/drivers/gpu/drm/i915/gem/selftests/mock_context.c
+++ b/drivers/gpu/drm/i915/gem/selftests/mock_context.c
@@ -13,7 +13,6 @@ mock_context(struct drm_i915_private *i915,
{
struct i915_gem_context *ctx;
struct i915_gem_engines *e;
- int ret;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
@@ -23,6 +22,8 @@ mock_context(struct drm_i915_private *i915,
INIT_LIST_HEAD(&ctx->link);
ctx->i915 = i915;
+ i915_gem_context_set_persistence(ctx);
+
mutex_init(&ctx->engines_mutex);
e = default_engines(ctx);
if (IS_ERR(e))
@@ -30,13 +31,8 @@ mock_context(struct drm_i915_private *i915,
RCU_INIT_POINTER(ctx->engines, e);
INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
- INIT_LIST_HEAD(&ctx->hw_id_link);
mutex_init(&ctx->mutex);
- ret = i915_gem_context_pin_hw_id(ctx);
- if (ret < 0)
- goto err_engines;
-
if (name) {
struct i915_ppgtt *ppgtt;
@@ -48,14 +44,15 @@ mock_context(struct drm_i915_private *i915,
if (!ppgtt)
goto err_put;
+ mutex_lock(&ctx->mutex);
__set_ppgtt(ctx, &ppgtt->vm);
+ mutex_unlock(&ctx->mutex);
+
i915_vm_put(&ppgtt->vm);
}
return ctx;
-err_engines:
- free_engines(rcu_access_pointer(ctx->engines));
err_free:
kfree(ctx);
return NULL;
@@ -73,7 +70,7 @@ void mock_context_close(struct i915_gem_context *ctx)
void mock_init_contexts(struct drm_i915_private *i915)
{
- init_contexts(i915);
+ init_contexts(&i915->gem.contexts);
}
struct i915_gem_context *
@@ -82,8 +79,6 @@ live_context(struct drm_i915_private *i915, struct drm_file *file)
struct i915_gem_context *ctx;
int err;
- lockdep_assert_held(&i915->drm.struct_mutex);
-
ctx = i915_gem_create_context(i915, 0);
if (IS_ERR(ctx))
return ctx;
diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
index 09c68dda2098..55317081d48b 100644
--- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
@@ -120,7 +120,6 @@ __dma_fence_signal__notify(struct dma_fence *fence,
struct dma_fence_cb *cur, *tmp;
lockdep_assert_held(fence->lock);
- lockdep_assert_irqs_disabled();
list_for_each_entry_safe(cur, tmp, list, node) {
INIT_LIST_HEAD(&cur->node);
@@ -134,9 +133,10 @@ void intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine)
const ktime_t timestamp = ktime_get();
struct intel_context *ce, *cn;
struct list_head *pos, *next;
+ unsigned long flags;
LIST_HEAD(signal);
- spin_lock(&b->irq_lock);
+ spin_lock_irqsave(&b->irq_lock, flags);
if (b->irq_armed && list_empty(&b->signalers))
__intel_breadcrumbs_disarm_irq(b);
@@ -182,30 +182,23 @@ void intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine)
}
}
- spin_unlock(&b->irq_lock);
+ spin_unlock_irqrestore(&b->irq_lock, flags);
list_for_each_safe(pos, next, &signal) {
struct i915_request *rq =
list_entry(pos, typeof(*rq), signal_link);
struct list_head cb_list;
- spin_lock(&rq->lock);
+ spin_lock_irqsave(&rq->lock, flags);
list_replace(&rq->fence.cb_list, &cb_list);
__dma_fence_signal__timestamp(&rq->fence, timestamp);
__dma_fence_signal__notify(&rq->fence, &cb_list);
- spin_unlock(&rq->lock);
+ spin_unlock_irqrestore(&rq->lock, flags);
i915_request_put(rq);
}
}
-void intel_engine_signal_breadcrumbs(struct intel_engine_cs *engine)
-{
- local_irq_disable();
- intel_engine_breadcrumbs_irq(engine);
- local_irq_enable();
-}
-
static void signal_irq_work(struct irq_work *work)
{
struct intel_engine_cs *engine =
@@ -275,7 +268,6 @@ void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine)
bool i915_request_enable_breadcrumb(struct i915_request *rq)
{
lockdep_assert_held(&rq->lock);
- lockdep_assert_irqs_disabled();
if (test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags)) {
struct intel_breadcrumbs *b = &rq->engine->breadcrumbs;
@@ -325,7 +317,6 @@ void i915_request_cancel_breadcrumb(struct i915_request *rq)
struct intel_breadcrumbs *b = &rq->engine->breadcrumbs;
lockdep_assert_held(&rq->lock);
- lockdep_assert_irqs_disabled();
/*
* We must wait for b->irq_lock so that we know the interrupt handler
diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
index f55691d151ae..ee9d2bcd2c13 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.c
+++ b/drivers/gpu/drm/i915/gt/intel_context.c
@@ -13,6 +13,7 @@
#include "intel_context.h"
#include "intel_engine.h"
#include "intel_engine_pm.h"
+#include "intel_ring.h"
static struct i915_global_context {
struct i915_global base;
@@ -62,7 +63,7 @@ int __intel_context_do_pin(struct intel_context *ce)
}
err = 0;
- with_intel_runtime_pm(&ce->engine->i915->runtime_pm, wakeref)
+ with_intel_runtime_pm(ce->engine->uncore->rpm, wakeref)
err = ce->ops->pin(ce);
if (err)
goto err;
@@ -134,10 +135,11 @@ static int __context_pin_state(struct i915_vma *vma)
static void __context_unpin_state(struct i915_vma *vma)
{
- __i915_vma_unpin(vma);
i915_vma_make_shrinkable(vma);
+ __i915_vma_unpin(vma);
}
+__i915_active_call
static void __intel_context_retire(struct i915_active *active)
{
struct intel_context *ce = container_of(active, typeof(*ce), active);
@@ -150,6 +152,7 @@ static void __intel_context_retire(struct i915_active *active)
intel_timeline_unpin(ce->timeline);
intel_ring_unpin(ce->ring);
+
intel_context_put(ce);
}
@@ -219,12 +222,20 @@ intel_context_init(struct intel_context *ce,
struct i915_gem_context *ctx,
struct intel_engine_cs *engine)
{
+ struct i915_address_space *vm;
+
GEM_BUG_ON(!engine->cops);
kref_init(&ce->ref);
ce->gem_context = ctx;
- ce->vm = i915_vm_get(ctx->vm ?: &engine->gt->ggtt->vm);
+ rcu_read_lock();
+ vm = rcu_dereference(ctx->vm);
+ if (vm)
+ ce->vm = i915_vm_get(vm);
+ else
+ ce->vm = i915_vm_get(&engine->gt->ggtt->vm);
+ rcu_read_unlock();
if (ctx->timeline)
ce->timeline = intel_timeline_get(ctx->timeline);
@@ -238,7 +249,7 @@ intel_context_init(struct intel_context *ce,
mutex_init(&ce->pin_mutex);
- i915_active_init(ctx->i915, &ce->active,
+ i915_active_init(&ce->active,
__intel_context_active, __intel_context_retire);
}
@@ -298,14 +309,14 @@ int intel_context_prepare_remote_request(struct intel_context *ce,
/* Only suitable for use in remotely modifying this context */
GEM_BUG_ON(rq->hw_context == ce);
- if (rq->timeline != tl) { /* beware timeline sharing */
+ if (rcu_access_pointer(rq->timeline) != tl) { /* timeline sharing! */
err = mutex_lock_interruptible_nested(&tl->mutex,
SINGLE_DEPTH_NESTING);
if (err)
return err;
/* Queue this switch after current activity by this context. */
- err = i915_active_request_set(&tl->last_request, rq);
+ err = i915_active_fence_set(&tl->last_request, rq);
mutex_unlock(&tl->mutex);
if (err)
return err;
@@ -319,7 +330,7 @@ int intel_context_prepare_remote_request(struct intel_context *ce,
* words transfer the pinned ce object to tracked active request.
*/
GEM_BUG_ON(i915_active_is_idle(&ce->active));
- return i915_active_ref(&ce->active, rq->timeline, rq);
+ return i915_active_add_request(&ce->active, rq);
}
struct i915_request *intel_context_create_request(struct intel_context *ce)
diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h
index dd742ac2fbdb..68b3d317d959 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.h
+++ b/drivers/gpu/drm/i915/gt/intel_context.h
@@ -12,6 +12,7 @@
#include "i915_active.h"
#include "intel_context_types.h"
#include "intel_engine_types.h"
+#include "intel_ring_types.h"
#include "intel_timeline_types.h"
void intel_context_init(struct intel_context *ce,
diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h
index bf9cedfccbf0..6959b05ae5f8 100644
--- a/drivers/gpu/drm/i915/gt/intel_context_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_context_types.h
@@ -58,6 +58,7 @@ struct intel_context {
u32 *lrc_reg_state;
u64 lrc_desc;
+ u32 tag; /* cookie passed to HW to track this context on submission */
unsigned int active_count; /* protected by timeline->mutex */
diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h
index 22aab8593abf..bc3b72bfa9e3 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine.h
@@ -19,6 +19,7 @@
#include "intel_workarounds.h"
struct drm_printer;
+struct intel_gt;
/* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill,
* but keeps the logic simple. Indeed, the whole purpose of this macro is just
@@ -89,38 +90,6 @@ struct drm_printer;
/* seqno size is actually only a uint32, but since we plan to use MI_FLUSH_DW to
* do the writes, and that must have qw aligned offsets, simply pretend it's 8b.
*/
-enum intel_engine_hangcheck_action {
- ENGINE_IDLE = 0,
- ENGINE_WAIT,
- ENGINE_ACTIVE_SEQNO,
- ENGINE_ACTIVE_HEAD,
- ENGINE_ACTIVE_SUBUNITS,
- ENGINE_WAIT_KICK,
- ENGINE_DEAD,
-};
-
-static inline const char *
-hangcheck_action_to_str(const enum intel_engine_hangcheck_action a)
-{
- switch (a) {
- case ENGINE_IDLE:
- return "idle";
- case ENGINE_WAIT:
- return "wait";
- case ENGINE_ACTIVE_SEQNO:
- return "active seqno";
- case ENGINE_ACTIVE_HEAD:
- return "active head";
- case ENGINE_ACTIVE_SUBUNITS:
- return "active subunits";
- case ENGINE_WAIT_KICK:
- return "wait kick";
- case ENGINE_DEAD:
- return "dead";
- }
-
- return "unknown";
-}
static inline unsigned int
execlists_num_ports(const struct intel_engine_execlists * const execlists)
@@ -206,126 +175,13 @@ intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value)
#define I915_HWS_CSB_WRITE_INDEX 0x1f
#define CNL_HWS_CSB_WRITE_INDEX 0x2f
-struct intel_ring *
-intel_engine_create_ring(struct intel_engine_cs *engine, int size);
-int intel_ring_pin(struct intel_ring *ring);
-void intel_ring_reset(struct intel_ring *ring, u32 tail);
-unsigned int intel_ring_update_space(struct intel_ring *ring);
-void intel_ring_unpin(struct intel_ring *ring);
-void intel_ring_free(struct kref *ref);
-
-static inline struct intel_ring *intel_ring_get(struct intel_ring *ring)
-{
- kref_get(&ring->ref);
- return ring;
-}
-
-static inline void intel_ring_put(struct intel_ring *ring)
-{
- kref_put(&ring->ref, intel_ring_free);
-}
-
void intel_engine_stop(struct intel_engine_cs *engine);
void intel_engine_cleanup(struct intel_engine_cs *engine);
-int __must_check intel_ring_cacheline_align(struct i915_request *rq);
-
-u32 __must_check *intel_ring_begin(struct i915_request *rq, unsigned int n);
-
-static inline void intel_ring_advance(struct i915_request *rq, u32 *cs)
-{
- /* Dummy function.
- *
- * This serves as a placeholder in the code so that the reader
- * can compare against the preceding intel_ring_begin() and
- * check that the number of dwords emitted matches the space
- * reserved for the command packet (i.e. the value passed to
- * intel_ring_begin()).
- */
- GEM_BUG_ON((rq->ring->vaddr + rq->ring->emit) != cs);
-}
-
-static inline u32 intel_ring_wrap(const struct intel_ring *ring, u32 pos)
-{
- return pos & (ring->size - 1);
-}
-
-static inline bool
-intel_ring_offset_valid(const struct intel_ring *ring,
- unsigned int pos)
-{
- if (pos & -ring->size) /* must be strictly within the ring */
- return false;
-
- if (!IS_ALIGNED(pos, 8)) /* must be qword aligned */
- return false;
-
- return true;
-}
-
-static inline u32 intel_ring_offset(const struct i915_request *rq, void *addr)
-{
- /* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */
- u32 offset = addr - rq->ring->vaddr;
- GEM_BUG_ON(offset > rq->ring->size);
- return intel_ring_wrap(rq->ring, offset);
-}
-
-static inline void
-assert_ring_tail_valid(const struct intel_ring *ring, unsigned int tail)
-{
- GEM_BUG_ON(!intel_ring_offset_valid(ring, tail));
-
- /*
- * "Ring Buffer Use"
- * Gen2 BSpec "1. Programming Environment" / 1.4.4.6
- * Gen3 BSpec "1c Memory Interface Functions" / 2.3.4.5
- * Gen4+ BSpec "1c Memory Interface and Command Stream" / 5.3.4.5
- * "If the Ring Buffer Head Pointer and the Tail Pointer are on the
- * same cacheline, the Head Pointer must not be greater than the Tail
- * Pointer."
- *
- * We use ring->head as the last known location of the actual RING_HEAD,
- * it may have advanced but in the worst case it is equally the same
- * as ring->head and so we should never program RING_TAIL to advance
- * into the same cacheline as ring->head.
- */
-#define cacheline(a) round_down(a, CACHELINE_BYTES)
- GEM_BUG_ON(cacheline(tail) == cacheline(ring->head) &&
- tail < ring->head);
-#undef cacheline
-}
-
-static inline unsigned int
-intel_ring_set_tail(struct intel_ring *ring, unsigned int tail)
-{
- /* Whilst writes to the tail are strictly order, there is no
- * serialisation between readers and the writers. The tail may be
- * read by i915_request_retire() just as it is being updated
- * by execlists, as although the breadcrumb is complete, the context
- * switch hasn't been seen.
- */
- assert_ring_tail_valid(ring, tail);
- ring->tail = tail;
- return tail;
-}
-
-static inline unsigned int
-__intel_ring_space(unsigned int head, unsigned int tail, unsigned int size)
-{
- /*
- * "If the Ring Buffer Head Pointer and the Tail Pointer are on the
- * same cacheline, the Head Pointer must not be greater than the Tail
- * Pointer."
- */
- GEM_BUG_ON(!is_power_of_2(size));
- return (head - tail - CACHELINE_BYTES) & (size - 1);
-}
-
-int intel_engines_init_mmio(struct drm_i915_private *i915);
-int intel_engines_setup(struct drm_i915_private *i915);
-int intel_engines_init(struct drm_i915_private *i915);
-void intel_engines_cleanup(struct drm_i915_private *i915);
+int intel_engines_init_mmio(struct intel_gt *gt);
+int intel_engines_setup(struct intel_gt *gt);
+int intel_engines_init(struct intel_gt *gt);
+void intel_engines_cleanup(struct intel_gt *gt);
int intel_engine_init_common(struct intel_engine_cs *engine);
void intel_engine_cleanup_common(struct intel_engine_cs *engine);
@@ -349,7 +205,6 @@ void intel_engine_init_execlists(struct intel_engine_cs *engine);
void intel_engine_init_breadcrumbs(struct intel_engine_cs *engine);
void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);
-void intel_engine_signal_breadcrumbs(struct intel_engine_cs *engine);
void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine);
static inline void
@@ -422,8 +277,9 @@ static inline void __intel_engine_reset(struct intel_engine_cs *engine,
engine->serial++; /* contexts lost */
}
-bool intel_engine_is_idle(struct intel_engine_cs *engine);
bool intel_engines_are_idle(struct intel_gt *gt);
+bool intel_engine_is_idle(struct intel_engine_cs *engine);
+void intel_engine_flush_submission(struct intel_engine_cs *engine);
void intel_engines_reset_default_submission(struct intel_gt *gt);
@@ -434,61 +290,6 @@ void intel_engine_dump(struct intel_engine_cs *engine,
struct drm_printer *m,
const char *header, ...);
-static inline void intel_engine_context_in(struct intel_engine_cs *engine)
-{
- unsigned long flags;
-
- if (READ_ONCE(engine->stats.enabled) == 0)
- return;
-
- write_seqlock_irqsave(&engine->stats.lock, flags);
-
- if (engine->stats.enabled > 0) {
- if (engine->stats.active++ == 0)
- engine->stats.start = ktime_get();
- GEM_BUG_ON(engine->stats.active == 0);
- }
-
- write_sequnlock_irqrestore(&engine->stats.lock, flags);
-}
-
-static inline void intel_engine_context_out(struct intel_engine_cs *engine)
-{
- unsigned long flags;
-
- if (READ_ONCE(engine->stats.enabled) == 0)
- return;
-
- write_seqlock_irqsave(&engine->stats.lock, flags);
-
- if (engine->stats.enabled > 0) {
- ktime_t last;
-
- if (engine->stats.active && --engine->stats.active == 0) {
- /*
- * Decrement the active context count and in case GPU
- * is now idle add up to the running total.
- */
- last = ktime_sub(ktime_get(), engine->stats.start);
-
- engine->stats.total = ktime_add(engine->stats.total,
- last);
- } else if (engine->stats.active == 0) {
- /*
- * After turning on engine stats, context out might be
- * the first event in which case we account from the
- * time stats gathering was turned on.
- */
- last = ktime_sub(ktime_get(), engine->stats.enabled_at);
-
- engine->stats.total = ktime_add(engine->stats.total,
- last);
- }
- }
-
- write_sequnlock_irqrestore(&engine->stats.lock, flags);
-}
-
int intel_enable_engine_stats(struct intel_engine_cs *engine);
void intel_disable_engine_stats(struct intel_engine_cs *engine);
@@ -525,4 +326,22 @@ void intel_engine_init_active(struct intel_engine_cs *engine,
#define ENGINE_MOCK 1
#define ENGINE_VIRTUAL 2
+static inline bool
+intel_engine_has_preempt_reset(const struct intel_engine_cs *engine)
+{
+ if (!IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT))
+ return false;
+
+ return intel_engine_has_preemption(engine);
+}
+
+static inline bool
+intel_engine_has_timeslices(const struct intel_engine_cs *engine)
+{
+ if (!IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION))
+ return false;
+
+ return intel_engine_has_semaphores(engine);
+}
+
#endif /* _INTEL_RINGBUFFER_H_ */
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index 4ce8626b140e..5ca3ec911e50 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -37,6 +37,7 @@
#include "intel_context.h"
#include "intel_lrc.h"
#include "intel_reset.h"
+#include "intel_ring.h"
/* Haswell does have the CXT_SIZE register however it does not appear to be
* valid. Now, docs explain in dwords what is in the context object. The full
@@ -277,6 +278,9 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id)
BUILD_BUG_ON(MAX_ENGINE_CLASS >= BIT(GEN11_ENGINE_CLASS_WIDTH));
BUILD_BUG_ON(MAX_ENGINE_INSTANCE >= BIT(GEN11_ENGINE_INSTANCE_WIDTH));
+ if (GEM_DEBUG_WARN_ON(id >= ARRAY_SIZE(gt->engine)))
+ return -EINVAL;
+
if (GEM_DEBUG_WARN_ON(info->class > MAX_ENGINE_CLASS))
return -EINVAL;
@@ -293,6 +297,7 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id)
BUILD_BUG_ON(BITS_PER_TYPE(engine->mask) < I915_NUM_ENGINES);
engine->id = id;
+ engine->legacy_idx = INVALID_ENGINE;
engine->mask = BIT(id);
engine->i915 = gt->i915;
engine->gt = gt;
@@ -304,6 +309,15 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id)
engine->instance = info->instance;
__sprint_engine_name(engine);
+ engine->props.heartbeat_interval_ms =
+ CONFIG_DRM_I915_HEARTBEAT_INTERVAL;
+ engine->props.preempt_timeout_ms =
+ CONFIG_DRM_I915_PREEMPT_TIMEOUT;
+ engine->props.stop_timeout_ms =
+ CONFIG_DRM_I915_STOP_TIMEOUT;
+ engine->props.timeslice_duration_ms =
+ CONFIG_DRM_I915_TIMESLICE_DURATION;
+
/*
* To be overridden by the backend on setup. However to facilitate
* cleanup on error during setup, we always provide the destroy vfunc.
@@ -328,6 +342,7 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id)
intel_engine_sanitize_mmio(engine);
gt->engine_class[info->class][info->instance] = engine;
+ gt->engine[id] = engine;
intel_engine_add_user(engine);
gt->i915->engine[id] = engine;
@@ -365,38 +380,40 @@ static void __setup_engine_capabilities(struct intel_engine_cs *engine)
}
}
-static void intel_setup_engine_capabilities(struct drm_i915_private *i915)
+static void intel_setup_engine_capabilities(struct intel_gt *gt)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
- for_each_engine(engine, i915, id)
+ for_each_engine(engine, gt, id)
__setup_engine_capabilities(engine);
}
/**
* intel_engines_cleanup() - free the resources allocated for Command Streamers
- * @i915: the i915 devic
+ * @gt: pointer to struct intel_gt
*/
-void intel_engines_cleanup(struct drm_i915_private *i915)
+void intel_engines_cleanup(struct intel_gt *gt)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt, id) {
engine->destroy(engine);
- i915->engine[id] = NULL;
+ gt->engine[id] = NULL;
+ gt->i915->engine[id] = NULL;
}
}
/**
* intel_engines_init_mmio() - allocate and prepare the Engine Command Streamers
- * @i915: the i915 device
+ * @gt: pointer to struct intel_gt
*
* Return: non-zero if the initialization failed.
*/
-int intel_engines_init_mmio(struct drm_i915_private *i915)
+int intel_engines_init_mmio(struct intel_gt *gt)
{
+ struct drm_i915_private *i915 = gt->i915;
struct intel_device_info *device_info = mkwrite_device_info(i915);
const unsigned int engine_mask = INTEL_INFO(i915)->engine_mask;
unsigned int mask = 0;
@@ -414,7 +431,7 @@ int intel_engines_init_mmio(struct drm_i915_private *i915)
if (!HAS_ENGINE(i915, i))
continue;
- err = intel_engine_setup(&i915->gt, i);
+ err = intel_engine_setup(gt, i);
if (err)
goto cleanup;
@@ -431,36 +448,36 @@ int intel_engines_init_mmio(struct drm_i915_private *i915)
RUNTIME_INFO(i915)->num_engines = hweight32(mask);
- intel_gt_check_and_clear_faults(&i915->gt);
+ intel_gt_check_and_clear_faults(gt);
- intel_setup_engine_capabilities(i915);
+ intel_setup_engine_capabilities(gt);
return 0;
cleanup:
- intel_engines_cleanup(i915);
+ intel_engines_cleanup(gt);
return err;
}
/**
* intel_engines_init() - init the Engine Command Streamers
- * @i915: i915 device private
+ * @gt: pointer to struct intel_gt
*
* Return: non-zero if the initialization failed.
*/
-int intel_engines_init(struct drm_i915_private *i915)
+int intel_engines_init(struct intel_gt *gt)
{
int (*init)(struct intel_engine_cs *engine);
struct intel_engine_cs *engine;
enum intel_engine_id id;
int err;
- if (HAS_EXECLISTS(i915))
+ if (HAS_EXECLISTS(gt->i915))
init = intel_execlists_submission_init;
else
init = intel_ring_submission_init;
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt, id) {
err = init(engine);
if (err)
goto cleanup;
@@ -469,7 +486,7 @@ int intel_engines_init(struct drm_i915_private *i915)
return 0;
cleanup:
- intel_engines_cleanup(i915);
+ intel_engines_cleanup(gt);
return err;
}
@@ -513,7 +530,7 @@ static int pin_ggtt_status_page(struct intel_engine_cs *engine,
unsigned int flags;
flags = PIN_GLOBAL;
- if (!HAS_LLC(engine->i915))
+ if (!HAS_LLC(engine->i915) && i915_ggtt_has_aperture(engine->gt->ggtt))
/*
* On g33, we cannot place HWS above 256MiB, so
* restrict its pinning to the low mappable arena.
@@ -597,7 +614,6 @@ static int intel_engine_setup_common(struct intel_engine_cs *engine)
intel_engine_init_active(engine, ENGINE_PHYSICAL);
intel_engine_init_breadcrumbs(engine);
intel_engine_init_execlists(engine);
- intel_engine_init_hangcheck(engine);
intel_engine_init_cmd_parser(engine);
intel_engine_init__pm(engine);
@@ -616,26 +632,26 @@ static int intel_engine_setup_common(struct intel_engine_cs *engine)
/**
* intel_engines_setup- setup engine state not requiring hw access
- * @i915: Device to setup.
+ * @gt: pointer to struct intel_gt
*
* Initializes engine structure members shared between legacy and execlists
* submission modes which do not require hardware access.
*
* Typically done early in the submission mode specific engine setup stage.
*/
-int intel_engines_setup(struct drm_i915_private *i915)
+int intel_engines_setup(struct intel_gt *gt)
{
int (*setup)(struct intel_engine_cs *engine);
struct intel_engine_cs *engine;
enum intel_engine_id id;
int err;
- if (HAS_EXECLISTS(i915))
+ if (HAS_EXECLISTS(gt->i915))
setup = intel_execlists_submission_setup;
else
setup = intel_ring_submission_setup;
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt, id) {
err = intel_engine_setup_common(engine);
if (err)
goto cleanup;
@@ -653,7 +669,7 @@ int intel_engines_setup(struct drm_i915_private *i915)
return 0;
cleanup:
- intel_engines_cleanup(i915);
+ intel_engines_cleanup(gt);
return err;
}
@@ -680,6 +696,8 @@ static int measure_breadcrumb_dw(struct intel_engine_cs *engine)
engine->status_page.vma))
goto out_frame;
+ mutex_lock(&frame->timeline.mutex);
+
frame->ring.vaddr = frame->cs;
frame->ring.size = sizeof(frame->cs);
frame->ring.effective_size = frame->ring.size;
@@ -688,18 +706,22 @@ static int measure_breadcrumb_dw(struct intel_engine_cs *engine)
frame->rq.i915 = engine->i915;
frame->rq.engine = engine;
frame->rq.ring = &frame->ring;
- frame->rq.timeline = &frame->timeline;
+ rcu_assign_pointer(frame->rq.timeline, &frame->timeline);
dw = intel_timeline_pin(&frame->timeline);
if (dw < 0)
goto out_timeline;
+ spin_lock_irq(&engine->active.lock);
dw = engine->emit_fini_breadcrumb(&frame->rq, frame->cs) - frame->cs;
+ spin_unlock_irq(&engine->active.lock);
+
GEM_BUG_ON(dw & 1); /* RING_TAIL must be qword aligned */
intel_timeline_unpin(&frame->timeline);
out_timeline:
+ mutex_unlock(&frame->timeline.mutex);
intel_timeline_fini(&frame->timeline);
out_frame:
kfree(frame);
@@ -730,6 +752,7 @@ intel_engine_init_active(struct intel_engine_cs *engine, unsigned int subclass)
static struct intel_context *
create_kernel_context(struct intel_engine_cs *engine)
{
+ static struct lock_class_key kernel;
struct intel_context *ce;
int err;
@@ -745,6 +768,14 @@ create_kernel_context(struct intel_engine_cs *engine)
return ERR_PTR(err);
}
+ /*
+ * Give our perma-pinned kernel timelines a separate lockdep class,
+ * so that we can use them from within the normal user timelines
+ * should we need to inject GPU operations during their request
+ * construction.
+ */
+ lockdep_set_class(&ce->timeline->mutex, &kernel);
+
return ce;
}
@@ -814,8 +845,10 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
if (engine->default_state)
i915_gem_object_put(engine->default_state);
- intel_context_unpin(engine->kernel_context);
- intel_context_put(engine->kernel_context);
+ if (engine->kernel_context) {
+ intel_context_unpin(engine->kernel_context);
+ intel_context_put(engine->kernel_context);
+ }
GEM_BUG_ON(!llist_empty(&engine->barrier_tasks));
intel_wa_list_free(&engine->ctx_wa_list);
@@ -851,6 +884,21 @@ u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine)
return bbaddr;
}
+static unsigned long stop_timeout(const struct intel_engine_cs *engine)
+{
+ if (in_atomic() || irqs_disabled()) /* inside atomic preempt-reset? */
+ return 0;
+
+ /*
+ * If we are doing a normal GPU reset, we can take our time and allow
+ * the engine to quiesce. We've stopped submission to the engine, and
+ * if we wait long enough an innocent context should complete and
+ * leave the engine idle. So they should not be caught unaware by
+ * the forthcoming GPU reset (which usually follows the stop_cs)!
+ */
+ return READ_ONCE(engine->props.stop_timeout_ms);
+}
+
int intel_engine_stop_cs(struct intel_engine_cs *engine)
{
struct intel_uncore *uncore = engine->uncore;
@@ -868,7 +916,7 @@ int intel_engine_stop_cs(struct intel_engine_cs *engine)
err = 0;
if (__intel_wait_for_register_fw(uncore,
mode, MODE_IDLE, MODE_IDLE,
- 1000, 0,
+ 1000, stop_timeout(engine),
NULL)) {
GEM_TRACE("%s: timed out on STOP_RING -> IDLE\n", engine->name);
err = -ETIMEDOUT;
@@ -948,6 +996,7 @@ void intel_engine_get_instdone(struct intel_engine_cs *engine,
struct intel_instdone *instdone)
{
struct drm_i915_private *i915 = engine->i915;
+ const struct sseu_dev_info *sseu = &RUNTIME_INFO(i915)->sseu;
struct intel_uncore *uncore = engine->uncore;
u32 mmio_base = engine->mmio_base;
int slice;
@@ -965,7 +1014,7 @@ void intel_engine_get_instdone(struct intel_engine_cs *engine,
instdone->slice_common =
intel_uncore_read(uncore, GEN7_SC_INSTDONE);
- for_each_instdone_slice_subslice(i915, slice, subslice) {
+ for_each_instdone_slice_subslice(i915, sseu, slice, subslice) {
instdone->sampler[slice][subslice] =
read_subslice_reg(engine, slice, subslice,
GEN7_SAMPLER_INSTDONE);
@@ -1031,6 +1080,25 @@ static bool ring_is_idle(struct intel_engine_cs *engine)
return idle;
}
+void intel_engine_flush_submission(struct intel_engine_cs *engine)
+{
+ struct tasklet_struct *t = &engine->execlists.tasklet;
+
+ if (__tasklet_is_scheduled(t)) {
+ local_bh_disable();
+ if (tasklet_trylock(t)) {
+ /* Must wait for any GPU reset in progress. */
+ if (__tasklet_is_enabled(t))
+ t->func(t->data);
+ tasklet_unlock(t);
+ }
+ local_bh_enable();
+ }
+
+ /* Otherwise flush the tasklet if it was running on another cpu */
+ tasklet_unlock_wait(t);
+}
+
/**
* intel_engine_is_idle() - Report if the engine has finished process all work
* @engine: the intel_engine_cs
@@ -1049,21 +1117,9 @@ bool intel_engine_is_idle(struct intel_engine_cs *engine)
/* Waiting to drain ELSP? */
if (execlists_active(&engine->execlists)) {
- struct tasklet_struct *t = &engine->execlists.tasklet;
-
synchronize_hardirq(engine->i915->drm.pdev->irq);
- local_bh_disable();
- if (tasklet_trylock(t)) {
- /* Must wait for any GPU reset in progress. */
- if (__tasklet_is_enabled(t))
- t->func(t->data);
- tasklet_unlock(t);
- }
- local_bh_enable();
-
- /* Otherwise flush the tasklet if it was on another cpu */
- tasklet_unlock_wait(t);
+ intel_engine_flush_submission(engine);
if (execlists_active(&engine->execlists))
return false;
@@ -1093,7 +1149,7 @@ bool intel_engines_are_idle(struct intel_gt *gt)
if (!READ_ONCE(gt->awake))
return true;
- for_each_engine(engine, gt->i915, id) {
+ for_each_engine(engine, gt, id) {
if (!intel_engine_is_idle(engine))
return false;
}
@@ -1106,7 +1162,7 @@ void intel_engines_reset_default_submission(struct intel_gt *gt)
struct intel_engine_cs *engine;
enum intel_engine_id id;
- for_each_engine(engine, gt->i915, id)
+ for_each_engine(engine, gt, id)
engine->set_default_submission(engine);
}
@@ -1118,6 +1174,8 @@ bool intel_engine_can_store_dword(struct intel_engine_cs *engine)
case 3:
/* maybe only uses physical not virtual addresses */
return !(IS_I915G(engine->i915) || IS_I915GM(engine->i915));
+ case 4:
+ return !IS_I965G(engine->i915); /* who knows! */
case 6:
return engine->class != VIDEO_DECODE_CLASS; /* b0rked */
default:
@@ -1193,6 +1251,38 @@ static void hexdump(struct drm_printer *m, const void *buf, size_t len)
}
}
+static struct intel_timeline *get_timeline(struct i915_request *rq)
+{
+ struct intel_timeline *tl;
+
+ /*
+ * Even though we are holding the engine->active.lock here, there
+ * is no control over the submission queue per-se and we are
+ * inspecting the active state at a random point in time, with an
+ * unknown queue. Play safe and make sure the timeline remains valid.
+ * (Only being used for pretty printing, one extra kref shouldn't
+ * cause a camel stampede!)
+ */
+ rcu_read_lock();
+ tl = rcu_dereference(rq->timeline);
+ if (!kref_get_unless_zero(&tl->kref))
+ tl = NULL;
+ rcu_read_unlock();
+
+ return tl;
+}
+
+static const char *repr_timer(const struct timer_list *t)
+{
+ if (!READ_ONCE(t->expires))
+ return "inactive";
+
+ if (timer_pending(t))
+ return "active";
+
+ return "expired";
+}
+
static void intel_engine_print_registers(struct intel_engine_cs *engine,
struct drm_printer *m)
{
@@ -1254,19 +1344,21 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
unsigned int idx;
u8 read, write;
- drm_printf(m, "\tExeclist status: 0x%08x %08x, entries %u\n",
- ENGINE_READ(engine, RING_EXECLIST_STATUS_LO),
- ENGINE_READ(engine, RING_EXECLIST_STATUS_HI),
- num_entries);
+ drm_printf(m, "\tExeclist tasklet queued? %s (%s), preempt? %s, timeslice? %s\n",
+ yesno(test_bit(TASKLET_STATE_SCHED,
+ &engine->execlists.tasklet.state)),
+ enableddisabled(!atomic_read(&engine->execlists.tasklet.count)),
+ repr_timer(&engine->execlists.preempt),
+ repr_timer(&engine->execlists.timer));
read = execlists->csb_head;
write = READ_ONCE(*execlists->csb_write);
- drm_printf(m, "\tExeclist CSB read %d, write %d, tasklet queued? %s (%s)\n",
- read, write,
- yesno(test_bit(TASKLET_STATE_SCHED,
- &engine->execlists.tasklet.state)),
- enableddisabled(!atomic_read(&engine->execlists.tasklet.count)));
+ drm_printf(m, "\tExeclist status: 0x%08x %08x; CSB read:%d, write:%d, entries:%d\n",
+ ENGINE_READ(engine, RING_EXECLIST_STATUS_LO),
+ ENGINE_READ(engine, RING_EXECLIST_STATUS_HI),
+ read, write, num_entries);
+
if (read >= num_entries)
read = 0;
if (write >= num_entries)
@@ -1280,33 +1372,45 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
}
execlists_active_lock_bh(execlists);
+ rcu_read_lock();
for (port = execlists->active; (rq = *port); port++) {
char hdr[80];
int len;
len = snprintf(hdr, sizeof(hdr),
- "\t\tActive[%d: ",
+ "\t\tActive[%d]: ",
(int)(port - execlists->active));
- if (!i915_request_signaled(rq))
+ if (!i915_request_signaled(rq)) {
+ struct intel_timeline *tl = get_timeline(rq);
+
len += snprintf(hdr + len, sizeof(hdr) - len,
"ring:{start:%08x, hwsp:%08x, seqno:%08x}, ",
i915_ggtt_offset(rq->ring->vma),
- rq->timeline->hwsp_offset,
+ tl ? tl->hwsp_offset : 0,
hwsp_seqno(rq));
+
+ if (tl)
+ intel_timeline_put(tl);
+ }
snprintf(hdr + len, sizeof(hdr) - len, "rq: ");
print_request(m, rq, hdr);
}
for (port = execlists->pending; (rq = *port); port++) {
+ struct intel_timeline *tl = get_timeline(rq);
char hdr[80];
snprintf(hdr, sizeof(hdr),
"\t\tPending[%d] ring:{start:%08x, hwsp:%08x, seqno:%08x}, rq: ",
(int)(port - execlists->pending),
i915_ggtt_offset(rq->ring->vma),
- rq->timeline->hwsp_offset,
+ tl ? tl->hwsp_offset : 0,
hwsp_seqno(rq));
print_request(m, rq, hdr);
+
+ if (tl)
+ intel_timeline_put(tl);
}
+ rcu_read_unlock();
execlists_active_unlock_bh(execlists);
} else if (INTEL_GEN(dev_priv) > 6) {
drm_printf(m, "\tPP_DIR_BASE: 0x%08x\n",
@@ -1372,8 +1476,13 @@ void intel_engine_dump(struct intel_engine_cs *engine,
drm_printf(m, "*** WEDGED ***\n");
drm_printf(m, "\tAwake? %d\n", atomic_read(&engine->wakeref.count));
- drm_printf(m, "\tHangcheck: %d ms ago\n",
- jiffies_to_msecs(jiffies - engine->hangcheck.action_timestamp));
+
+ rcu_read_lock();
+ rq = READ_ONCE(engine->heartbeat.systole);
+ if (rq)
+ drm_printf(m, "\tHeartbeat: %d ms ago\n",
+ jiffies_to_msecs(jiffies - rq->emitted_jiffies));
+ rcu_read_unlock();
drm_printf(m, "\tReset count: %d (global %d)\n",
i915_reset_engine_count(error, engine),
i915_reset_count(error));
@@ -1383,6 +1492,8 @@ void intel_engine_dump(struct intel_engine_cs *engine,
spin_lock_irqsave(&engine->active.lock, flags);
rq = intel_engine_find_active_request(engine);
if (rq) {
+ struct intel_timeline *tl = get_timeline(rq);
+
print_request(m, rq, "\t\tactive ");
drm_printf(m, "\t\tring->start: 0x%08x\n",
@@ -1395,18 +1506,27 @@ void intel_engine_dump(struct intel_engine_cs *engine,
rq->ring->emit);
drm_printf(m, "\t\tring->space: 0x%08x\n",
rq->ring->space);
- drm_printf(m, "\t\tring->hwsp: 0x%08x\n",
- rq->timeline->hwsp_offset);
+
+ if (tl) {
+ drm_printf(m, "\t\tring->hwsp: 0x%08x\n",
+ tl->hwsp_offset);
+ intel_timeline_put(tl);
+ }
print_request_ring(m, rq);
+
+ if (rq->hw_context->lrc_reg_state) {
+ drm_printf(m, "Logical Ring Context:\n");
+ hexdump(m, rq->hw_context->lrc_reg_state, PAGE_SIZE);
+ }
}
spin_unlock_irqrestore(&engine->active.lock, flags);
drm_printf(m, "\tMMIO base: 0x%08x\n", engine->mmio_base);
- wakeref = intel_runtime_pm_get_if_in_use(&engine->i915->runtime_pm);
+ wakeref = intel_runtime_pm_get_if_in_use(engine->uncore->rpm);
if (wakeref) {
intel_engine_print_registers(engine, m);
- intel_runtime_pm_put(&engine->i915->runtime_pm, wakeref);
+ intel_runtime_pm_put(engine->uncore->rpm, wakeref);
} else {
drm_printf(m, "\tDevice is asleep; skipping register dump\n");
}
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
new file mode 100644
index 000000000000..06aa14c7aa8c
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
@@ -0,0 +1,234 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "i915_request.h"
+
+#include "intel_context.h"
+#include "intel_engine_heartbeat.h"
+#include "intel_engine_pm.h"
+#include "intel_engine.h"
+#include "intel_gt.h"
+#include "intel_reset.h"
+
+/*
+ * While the engine is active, we send a periodic pulse along the engine
+ * to check on its health and to flush any idle-barriers. If that request
+ * is stuck, and we fail to preempt it, we declare the engine hung and
+ * issue a reset -- in the hope that restores progress.
+ */
+
+static bool next_heartbeat(struct intel_engine_cs *engine)
+{
+ long delay;
+
+ delay = READ_ONCE(engine->props.heartbeat_interval_ms);
+ if (!delay)
+ return false;
+
+ delay = msecs_to_jiffies_timeout(delay);
+ if (delay >= HZ)
+ delay = round_jiffies_up_relative(delay);
+ schedule_delayed_work(&engine->heartbeat.work, delay);
+
+ return true;
+}
+
+static void idle_pulse(struct intel_engine_cs *engine, struct i915_request *rq)
+{
+ engine->wakeref_serial = READ_ONCE(engine->serial) + 1;
+ i915_request_add_active_barriers(rq);
+}
+
+static void show_heartbeat(const struct i915_request *rq,
+ struct intel_engine_cs *engine)
+{
+ struct drm_printer p = drm_debug_printer("heartbeat");
+
+ intel_engine_dump(engine, &p,
+ "%s heartbeat {prio:%d} not ticking\n",
+ engine->name,
+ rq->sched.attr.priority);
+}
+
+static void heartbeat(struct work_struct *wrk)
+{
+ struct i915_sched_attr attr = {
+ .priority = I915_USER_PRIORITY(I915_PRIORITY_MIN),
+ };
+ struct intel_engine_cs *engine =
+ container_of(wrk, typeof(*engine), heartbeat.work.work);
+ struct intel_context *ce = engine->kernel_context;
+ struct i915_request *rq;
+
+ if (!intel_engine_pm_get_if_awake(engine))
+ return;
+
+ rq = engine->heartbeat.systole;
+ if (rq && i915_request_completed(rq)) {
+ i915_request_put(rq);
+ engine->heartbeat.systole = NULL;
+ }
+
+ if (intel_gt_is_wedged(engine->gt))
+ goto out;
+
+ if (engine->heartbeat.systole) {
+ if (engine->schedule &&
+ rq->sched.attr.priority < I915_PRIORITY_BARRIER) {
+ /*
+ * Gradually raise the priority of the heartbeat to
+ * give high priority work [which presumably desires
+ * low latency and no jitter] the chance to naturally
+ * complete before being preempted.
+ */
+ attr.priority = I915_PRIORITY_MASK;
+ if (rq->sched.attr.priority >= attr.priority)
+ attr.priority |= I915_USER_PRIORITY(I915_PRIORITY_HEARTBEAT);
+ if (rq->sched.attr.priority >= attr.priority)
+ attr.priority = I915_PRIORITY_BARRIER;
+
+ local_bh_disable();
+ engine->schedule(rq, &attr);
+ local_bh_enable();
+ } else {
+ if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+ show_heartbeat(rq, engine);
+
+ intel_gt_handle_error(engine->gt, engine->mask,
+ I915_ERROR_CAPTURE,
+ "stopped heartbeat on %s",
+ engine->name);
+ }
+ goto out;
+ }
+
+ if (engine->wakeref_serial == engine->serial)
+ goto out;
+
+ mutex_lock(&ce->timeline->mutex);
+
+ intel_context_enter(ce);
+ rq = __i915_request_create(ce, GFP_NOWAIT | __GFP_NOWARN);
+ intel_context_exit(ce);
+ if (IS_ERR(rq))
+ goto unlock;
+
+ idle_pulse(engine, rq);
+ if (i915_modparams.enable_hangcheck)
+ engine->heartbeat.systole = i915_request_get(rq);
+
+ __i915_request_commit(rq);
+ __i915_request_queue(rq, &attr);
+
+unlock:
+ mutex_unlock(&ce->timeline->mutex);
+out:
+ if (!next_heartbeat(engine))
+ i915_request_put(fetch_and_zero(&engine->heartbeat.systole));
+ intel_engine_pm_put(engine);
+}
+
+void intel_engine_unpark_heartbeat(struct intel_engine_cs *engine)
+{
+ if (!IS_ACTIVE(CONFIG_DRM_I915_HEARTBEAT_INTERVAL))
+ return;
+
+ next_heartbeat(engine);
+}
+
+void intel_engine_park_heartbeat(struct intel_engine_cs *engine)
+{
+ if (cancel_delayed_work(&engine->heartbeat.work))
+ i915_request_put(fetch_and_zero(&engine->heartbeat.systole));
+}
+
+void intel_engine_init_heartbeat(struct intel_engine_cs *engine)
+{
+ INIT_DELAYED_WORK(&engine->heartbeat.work, heartbeat);
+}
+
+int intel_engine_set_heartbeat(struct intel_engine_cs *engine,
+ unsigned long delay)
+{
+ int err;
+
+ /* Send one last pulse before to cleanup persistent hogs */
+ if (!delay && IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT)) {
+ err = intel_engine_pulse(engine);
+ if (err)
+ return err;
+ }
+
+ WRITE_ONCE(engine->props.heartbeat_interval_ms, delay);
+
+ if (intel_engine_pm_get_if_awake(engine)) {
+ if (delay)
+ intel_engine_unpark_heartbeat(engine);
+ else
+ intel_engine_park_heartbeat(engine);
+ intel_engine_pm_put(engine);
+ }
+
+ return 0;
+}
+
+int intel_engine_pulse(struct intel_engine_cs *engine)
+{
+ struct i915_sched_attr attr = { .priority = I915_PRIORITY_BARRIER };
+ struct intel_context *ce = engine->kernel_context;
+ struct i915_request *rq;
+ int err = 0;
+
+ if (!intel_engine_has_preemption(engine))
+ return -ENODEV;
+
+ if (!intel_engine_pm_get_if_awake(engine))
+ return 0;
+
+ if (mutex_lock_interruptible(&ce->timeline->mutex))
+ goto out_rpm;
+
+ intel_context_enter(ce);
+ rq = __i915_request_create(ce, GFP_NOWAIT | __GFP_NOWARN);
+ intel_context_exit(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out_unlock;
+ }
+
+ rq->flags |= I915_REQUEST_SENTINEL;
+ idle_pulse(engine, rq);
+
+ __i915_request_commit(rq);
+ __i915_request_queue(rq, &attr);
+
+out_unlock:
+ mutex_unlock(&ce->timeline->mutex);
+out_rpm:
+ intel_engine_pm_put(engine);
+ return err;
+}
+
+int intel_engine_flush_barriers(struct intel_engine_cs *engine)
+{
+ struct i915_request *rq;
+
+ if (llist_empty(&engine->barrier_tasks))
+ return 0;
+
+ rq = i915_request_create(engine->kernel_context);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ idle_pulse(engine, rq);
+ i915_request_add(rq);
+
+ return 0;
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftest_engine_heartbeat.c"
+#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.h b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.h
new file mode 100644
index 000000000000..a7b8c0f9e005
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.h
@@ -0,0 +1,23 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef INTEL_ENGINE_HEARTBEAT_H
+#define INTEL_ENGINE_HEARTBEAT_H
+
+struct intel_engine_cs;
+
+void intel_engine_init_heartbeat(struct intel_engine_cs *engine);
+
+int intel_engine_set_heartbeat(struct intel_engine_cs *engine,
+ unsigned long delay);
+
+void intel_engine_park_heartbeat(struct intel_engine_cs *engine);
+void intel_engine_unpark_heartbeat(struct intel_engine_cs *engine);
+
+int intel_engine_pulse(struct intel_engine_cs *engine);
+int intel_engine_flush_barriers(struct intel_engine_cs *engine);
+
+#endif /* INTEL_ENGINE_HEARTBEAT_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
index 7f647243b3b9..874d82677179 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
@@ -7,10 +7,13 @@
#include "i915_drv.h"
#include "intel_engine.h"
+#include "intel_engine_heartbeat.h"
#include "intel_engine_pm.h"
#include "intel_engine_pool.h"
#include "intel_gt.h"
#include "intel_gt_pm.h"
+#include "intel_rc6.h"
+#include "intel_ring.h"
static int __engine_unpark(struct intel_wakeref *wf)
{
@@ -33,7 +36,7 @@ static int __engine_unpark(struct intel_wakeref *wf)
if (engine->unpark)
engine->unpark(engine);
- intel_engine_init_hangcheck(engine);
+ intel_engine_unpark_heartbeat(engine);
return 0;
}
@@ -103,14 +106,14 @@ static bool switch_to_kernel_context(struct intel_engine_cs *engine)
/* Context switch failed, hope for the best! Maybe reset? */
goto out_unlock;
- intel_timeline_enter(rq->timeline);
+ intel_timeline_enter(i915_request_timeline(rq));
/* Check again on the next retirement. */
engine->wakeref_serial = engine->serial + 1;
i915_request_add_active_barriers(rq);
/* Install ourselves as a preemption barrier */
- rq->sched.attr.priority = I915_PRIORITY_UNPREEMPTABLE;
+ rq->sched.attr.priority = I915_PRIORITY_BARRIER;
__i915_request_commit(rq);
/* Release our exclusive hold on the engine */
@@ -123,6 +126,19 @@ out_unlock:
return result;
}
+static void call_idle_barriers(struct intel_engine_cs *engine)
+{
+ struct llist_node *node, *next;
+
+ llist_for_each_safe(node, next, llist_del_all(&engine->barrier_tasks)) {
+ struct dma_fence_cb *cb =
+ container_of((struct list_head *)node,
+ typeof(*cb), node);
+
+ cb->func(NULL, cb);
+ }
+}
+
static int __engine_park(struct intel_wakeref *wf)
{
struct intel_engine_cs *engine =
@@ -142,6 +158,9 @@ static int __engine_park(struct intel_wakeref *wf)
GEM_TRACE("%s\n", engine->name);
+ call_idle_barriers(engine); /* cleanup after wedging */
+
+ intel_engine_park_heartbeat(engine);
intel_engine_disarm_breadcrumbs(engine);
intel_engine_pool_park(&engine->pool);
@@ -169,9 +188,10 @@ static const struct intel_wakeref_ops wf_ops = {
void intel_engine_init__pm(struct intel_engine_cs *engine)
{
- struct intel_runtime_pm *rpm = &engine->i915->runtime_pm;
+ struct intel_runtime_pm *rpm = engine->uncore->rpm;
intel_wakeref_init(&engine->wakeref, rpm, &wf_ops);
+ intel_engine_init_heartbeat(engine);
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pool.c b/drivers/gpu/drm/i915/gt/intel_engine_pool.c
index 379a91780bd4..397186818305 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pool.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pool.c
@@ -61,6 +61,7 @@ static int pool_active(struct i915_active *ref)
return 0;
}
+__i915_active_call
static void pool_retire(struct i915_active *ref)
{
struct intel_engine_pool_node *node =
@@ -94,7 +95,7 @@ node_create(struct intel_engine_pool *pool, size_t sz)
return ERR_PTR(-ENOMEM);
node->pool = pool;
- i915_active_init(engine->i915, &node->active, pool_active, pool_retire);
+ i915_active_init(&node->active, pool_active, pool_retire);
obj = i915_gem_object_create_internal(engine->i915, sz);
if (IS_ERR(obj)) {
@@ -109,9 +110,19 @@ node_create(struct intel_engine_pool *pool, size_t sz)
return node;
}
+static struct intel_engine_pool *lookup_pool(struct intel_engine_cs *engine)
+{
+ if (intel_engine_is_virtual(engine))
+ engine = intel_virtual_engine_get_sibling(engine, 0);
+
+ GEM_BUG_ON(!engine);
+ return &engine->pool;
+}
+
struct intel_engine_pool_node *
-intel_engine_pool_get(struct intel_engine_pool *pool, size_t size)
+intel_engine_get_pool(struct intel_engine_cs *engine, size_t size)
{
+ struct intel_engine_pool *pool = lookup_pool(engine);
struct intel_engine_pool_node *node;
struct list_head *list;
unsigned long flags;
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pool.h b/drivers/gpu/drm/i915/gt/intel_engine_pool.h
index 8d069efd9457..1bd89cadc3b7 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pool.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pool.h
@@ -12,13 +12,13 @@
#include "i915_request.h"
struct intel_engine_pool_node *
-intel_engine_pool_get(struct intel_engine_pool *pool, size_t size);
+intel_engine_get_pool(struct intel_engine_cs *engine, size_t size);
static inline int
intel_engine_pool_mark_active(struct intel_engine_pool_node *node,
struct i915_request *rq)
{
- return i915_active_ref(&node->active, rq->timeline, rq);
+ return i915_active_add_request(&node->active, rq);
}
static inline void
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index 9dd8c299cb2d..758f0e8ec672 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -15,6 +15,7 @@
#include <linux/rbtree.h>
#include <linux/timer.h>
#include <linux/types.h>
+#include <linux/workqueue.h>
#include "i915_gem.h"
#include "i915_pmu.h"
@@ -58,6 +59,7 @@ struct i915_gem_context;
struct i915_request;
struct i915_sched_attr;
struct intel_gt;
+struct intel_ring;
struct intel_uncore;
typedef u8 intel_engine_mask_t;
@@ -76,40 +78,6 @@ struct intel_instdone {
u32 row[I915_MAX_SLICES][I915_MAX_SUBSLICES];
};
-struct intel_engine_hangcheck {
- u64 acthd;
- u32 last_ring;
- u32 last_head;
- unsigned long action_timestamp;
- struct intel_instdone instdone;
-};
-
-struct intel_ring {
- struct kref ref;
- struct i915_vma *vma;
- void *vaddr;
-
- /*
- * As we have two types of rings, one global to the engine used
- * by ringbuffer submission and those that are exclusive to a
- * context used by execlists, we have to play safe and allow
- * atomic updates to the pin_count. However, the actual pinning
- * of the context is either done during initialisation for
- * ringbuffer submission or serialised as part of the context
- * pinning for execlists, and so we do not need a mutex ourselves
- * to serialise intel_ring_pin/intel_ring_unpin.
- */
- atomic_t pin_count;
-
- u32 head;
- u32 tail;
- u32 emit;
-
- u32 space;
- u32 size;
- u32 effective_size;
-};
-
/*
* we use a single page to load ctx workarounds so all of these
* values are referred in terms of dwords
@@ -148,6 +116,7 @@ enum intel_engine_id {
VECS1,
#define _VECS(n) (VECS0 + (n))
I915_NUM_ENGINES
+#define INVALID_ENGINE ((enum intel_engine_id)-1)
};
struct st_preempt_hang {
@@ -174,6 +143,11 @@ struct intel_engine_execlists {
struct timer_list timer;
/**
+ * @preempt: reset the current context if it fails to give way
+ */
+ struct timer_list preempt;
+
+ /**
* @default_priolist: priority list for I915_PRIORITY_NORMAL
*/
struct i915_priolist default_priolist;
@@ -303,10 +277,12 @@ struct intel_engine_cs {
u8 uabi_class;
u8 uabi_instance;
+ u32 uabi_capabilities;
u32 context_size;
u32 mmio_base;
- u32 uabi_capabilities;
+ unsigned int context_tag;
+#define NUM_CONTEXT_TAG roundup_pow_of_two(2 * EXECLIST_MAX_PORTS)
struct rb_node uabi_node;
@@ -323,6 +299,11 @@ struct intel_engine_cs {
intel_engine_mask_t saturated; /* submitting semaphores too late? */
+ struct {
+ struct delayed_work work;
+ struct i915_request *systole;
+ } heartbeat;
+
unsigned long serial;
unsigned long wakeref_serial;
@@ -473,14 +454,13 @@ struct intel_engine_cs {
/* status_notifier: list of callbacks for context-switch changes */
struct atomic_notifier_head context_status_notifier;
- struct intel_engine_hangcheck hangcheck;
-
#define I915_ENGINE_USING_CMD_PARSER BIT(0)
#define I915_ENGINE_SUPPORTS_STATS BIT(1)
#define I915_ENGINE_HAS_PREEMPTION BIT(2)
#define I915_ENGINE_HAS_SEMAPHORES BIT(3)
#define I915_ENGINE_NEEDS_BREADCRUMB_TASKLET BIT(4)
#define I915_ENGINE_IS_VIRTUAL BIT(5)
+#define I915_ENGINE_HAS_RELATIVE_MMIO BIT(6)
#define I915_ENGINE_REQUIRES_CMD_PARSER BIT(7)
unsigned int flags;
@@ -539,6 +519,13 @@ struct intel_engine_cs {
*/
ktime_t total;
} stats;
+
+ struct {
+ unsigned long heartbeat_interval_ms;
+ unsigned long preempt_timeout_ms;
+ unsigned long stop_timeout_ms;
+ unsigned long timeslice_duration_ms;
+ } props;
};
static inline bool
@@ -583,20 +570,24 @@ intel_engine_is_virtual(const struct intel_engine_cs *engine)
return engine->flags & I915_ENGINE_IS_VIRTUAL;
}
-#define instdone_slice_mask(dev_priv__) \
- (IS_GEN(dev_priv__, 7) ? \
- 1 : RUNTIME_INFO(dev_priv__)->sseu.slice_mask)
+static inline bool
+intel_engine_has_relative_mmio(const struct intel_engine_cs * const engine)
+{
+ return engine->flags & I915_ENGINE_HAS_RELATIVE_MMIO;
+}
-#define instdone_subslice_mask(dev_priv__) \
- (IS_GEN(dev_priv__, 7) ? \
- 1 : RUNTIME_INFO(dev_priv__)->sseu.subslice_mask[0])
+#define instdone_has_slice(dev_priv___, sseu___, slice___) \
+ ((IS_GEN(dev_priv___, 7) ? 1 : ((sseu___)->slice_mask)) & BIT(slice___))
-#define for_each_instdone_slice_subslice(dev_priv__, slice__, subslice__) \
- for ((slice__) = 0, (subslice__) = 0; \
- (slice__) < I915_MAX_SLICES; \
- (subslice__) = ((subslice__) + 1) < I915_MAX_SUBSLICES ? (subslice__) + 1 : 0, \
- (slice__) += ((subslice__) == 0)) \
- for_each_if((BIT(slice__) & instdone_slice_mask(dev_priv__)) && \
- (BIT(subslice__) & instdone_subslice_mask(dev_priv__)))
+#define instdone_has_subslice(dev_priv__, sseu__, slice__, subslice__) \
+ (IS_GEN(dev_priv__, 7) ? (1 & BIT(subslice__)) : \
+ intel_sseu_has_subslice(sseu__, 0, subslice__))
+#define for_each_instdone_slice_subslice(dev_priv_, sseu_, slice_, subslice_) \
+ for ((slice_) = 0, (subslice_) = 0; (slice_) < I915_MAX_SLICES; \
+ (subslice_) = ((subslice_) + 1) % I915_MAX_SUBSLICES, \
+ (slice_) += ((subslice_) == 0)) \
+ for_each_if((instdone_has_slice(dev_priv_, sseu_, slice_)) && \
+ (instdone_has_subslice(dev_priv_, sseu_, slice_, \
+ subslice_)))
#endif /* __INTEL_ENGINE_TYPES_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_user.c b/drivers/gpu/drm/i915/gt/intel_engine_user.c
index 77cd5de83930..7f7150a733f4 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_user.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_user.c
@@ -160,10 +160,10 @@ static int legacy_ring_idx(const struct legacy_ring *ring)
};
if (GEM_DEBUG_WARN_ON(ring->class >= ARRAY_SIZE(map)))
- return -1;
+ return INVALID_ENGINE;
if (GEM_DEBUG_WARN_ON(ring->instance >= map[ring->class].max))
- return -1;
+ return INVALID_ENGINE;
return map[ring->class].base + ring->instance;
}
@@ -171,23 +171,15 @@ static int legacy_ring_idx(const struct legacy_ring *ring)
static void add_legacy_ring(struct legacy_ring *ring,
struct intel_engine_cs *engine)
{
- int idx;
-
if (engine->gt != ring->gt || engine->class != ring->class) {
ring->gt = engine->gt;
ring->class = engine->class;
ring->instance = 0;
}
- idx = legacy_ring_idx(ring);
- if (unlikely(idx == -1))
- return;
-
- GEM_BUG_ON(idx >= ARRAY_SIZE(ring->gt->engine));
- ring->gt->engine[idx] = engine;
- ring->instance++;
-
- engine->legacy_idx = idx;
+ engine->legacy_idx = legacy_ring_idx(ring);
+ if (engine->legacy_idx != INVALID_ENGINE)
+ ring->instance++;
}
void intel_engines_driver_register(struct drm_i915_private *i915)
diff --git a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
index 86e00a2db8a4..4294f146f13c 100644
--- a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
+++ b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
@@ -112,6 +112,7 @@
#define MI_SEMAPHORE_SIGNAL MI_INSTR(0x1b, 0) /* GEN8+ */
#define MI_SEMAPHORE_TARGET(engine) ((engine)<<15)
#define MI_SEMAPHORE_WAIT MI_INSTR(0x1c, 2) /* GEN8+ */
+#define MI_SEMAPHORE_WAIT_TOKEN MI_INSTR(0x1c, 3) /* GEN12+ */
#define MI_SEMAPHORE_POLL (1 << 15)
#define MI_SEMAPHORE_SAD_GT_SDD (0 << 12)
#define MI_SEMAPHORE_SAD_GTE_SDD (1 << 12)
@@ -119,6 +120,8 @@
#define MI_SEMAPHORE_SAD_LTE_SDD (3 << 12)
#define MI_SEMAPHORE_SAD_EQ_SDD (4 << 12)
#define MI_SEMAPHORE_SAD_NEQ_SDD (5 << 12)
+#define MI_SEMAPHORE_TOKEN_MASK REG_GENMASK(9, 5)
+#define MI_SEMAPHORE_TOKEN_SHIFT 5
#define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1)
#define MI_STORE_DWORD_IMM_GEN4 MI_INSTR(0x20, 2)
#define MI_MEM_VIRTUAL (1 << 22) /* 945,g33,965 */
@@ -132,7 +135,10 @@
* address/value pairs. Don't overdue it, though, x <= 2^4 must hold!
*/
#define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*(x)-1)
+/* Gen11+. addr = base + (ctx_restore ? offset & GENMASK(12,2) : offset) */
+#define MI_LRI_CS_MMIO (1<<19)
#define MI_LRI_FORCE_POSTED (1<<12)
+#define MI_LOAD_REGISTER_IMM_MAX_REGS (126)
#define MI_STORE_REGISTER_MEM MI_INSTR(0x24, 1)
#define MI_STORE_REGISTER_MEM_GEN8 MI_INSTR(0x24, 2)
#define MI_SRM_LRM_GLOBAL_GTT (1<<22)
@@ -147,6 +153,7 @@
#define MI_FLUSH_DW_USE_PPGTT (0<<2)
#define MI_LOAD_REGISTER_MEM MI_INSTR(0x29, 1)
#define MI_LOAD_REGISTER_MEM_GEN8 MI_INSTR(0x29, 2)
+#define MI_LOAD_REGISTER_REG MI_INSTR(0x2A, 1)
#define MI_BATCH_BUFFER MI_INSTR(0x30, 1)
#define MI_BATCH_NON_SECURE (1)
/* for snb/ivb/vlv this also means "batch in ppgtt" when ppgtt is enabled. */
@@ -156,7 +163,8 @@
#define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0)
#define MI_BATCH_GTT (2<<6) /* aliased with (1<<7) on gen4 */
#define MI_BATCH_BUFFER_START_GEN8 MI_INSTR(0x31, 1)
-#define MI_BATCH_RESOURCE_STREAMER (1<<10)
+#define MI_BATCH_RESOURCE_STREAMER REG_BIT(10)
+#define MI_BATCH_PREDICATE REG_BIT(15) /* HSW+ on RCS only*/
/*
* 3D instructions used by the kernel
@@ -217,6 +225,7 @@
#define PIPE_CONTROL_CS_STALL (1<<20)
#define PIPE_CONTROL_TLB_INVALIDATE (1<<18)
#define PIPE_CONTROL_MEDIA_STATE_CLEAR (1<<16)
+#define PIPE_CONTROL_WRITE_TIMESTAMP (3<<14)
#define PIPE_CONTROL_QW_WRITE (1<<14)
#define PIPE_CONTROL_POST_SYNC_OP_MASK (3<<14)
#define PIPE_CONTROL_DEPTH_STALL (1<<13)
@@ -224,7 +233,9 @@
#define PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH (1<<12) /* gen6+ */
#define PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE (1<<11) /* MBZ on ILK */
#define PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE (1<<10) /* GM45+ only */
+#define PIPE_CONTROL_L3_RO_CACHE_INVALIDATE REG_BIT(10) /* gen12 */
#define PIPE_CONTROL_INDIRECT_STATE_DISABLE (1<<9)
+#define PIPE_CONTROL_HDC_PIPELINE_FLUSH REG_BIT(9) /* gen12 */
#define PIPE_CONTROL_NOTIFY (1<<8)
#define PIPE_CONTROL_FLUSH_ENABLE (1<<7) /* gen7+ */
#define PIPE_CONTROL_DC_FLUSH_ENABLE (1<<5)
@@ -235,6 +246,29 @@
#define PIPE_CONTROL_DEPTH_CACHE_FLUSH (1<<0)
#define PIPE_CONTROL_GLOBAL_GTT (1<<2) /* in addr dword */
+#define MI_MATH(x) MI_INSTR(0x1a, (x) - 1)
+#define MI_MATH_INSTR(opcode, op1, op2) ((opcode) << 20 | (op1) << 10 | (op2))
+/* Opcodes for MI_MATH_INSTR */
+#define MI_MATH_NOOP MI_MATH_INSTR(0x000, 0x0, 0x0)
+#define MI_MATH_LOAD(op1, op2) MI_MATH_INSTR(0x080, op1, op2)
+#define MI_MATH_LOADINV(op1, op2) MI_MATH_INSTR(0x480, op1, op2)
+#define MI_MATH_LOAD0(op1) MI_MATH_INSTR(0x081, op1)
+#define MI_MATH_LOAD1(op1) MI_MATH_INSTR(0x481, op1)
+#define MI_MATH_ADD MI_MATH_INSTR(0x100, 0x0, 0x0)
+#define MI_MATH_SUB MI_MATH_INSTR(0x101, 0x0, 0x0)
+#define MI_MATH_AND MI_MATH_INSTR(0x102, 0x0, 0x0)
+#define MI_MATH_OR MI_MATH_INSTR(0x103, 0x0, 0x0)
+#define MI_MATH_XOR MI_MATH_INSTR(0x104, 0x0, 0x0)
+#define MI_MATH_STORE(op1, op2) MI_MATH_INSTR(0x180, op1, op2)
+#define MI_MATH_STOREINV(op1, op2) MI_MATH_INSTR(0x580, op1, op2)
+/* Registers used as operands in MI_MATH_INSTR */
+#define MI_MATH_REG(x) (x)
+#define MI_MATH_REG_SRCA 0x20
+#define MI_MATH_REG_SRCB 0x21
+#define MI_MATH_REG_ACCU 0x31
+#define MI_MATH_REG_ZF 0x32
+#define MI_MATH_REG_CF 0x33
+
/*
* Commands used only by the command parser
*/
@@ -251,7 +285,6 @@
#define MI_CLFLUSH MI_INSTR(0x27, 0)
#define MI_REPORT_PERF_COUNT MI_INSTR(0x28, 0)
#define MI_REPORT_PERF_COUNT_GGTT (1<<0)
-#define MI_LOAD_REGISTER_REG MI_INSTR(0x2A, 0)
#define MI_RS_STORE_DATA_IMM MI_INSTR(0x2B, 0)
#define MI_LOAD_URB_MEM MI_INSTR(0x2C, 0)
#define MI_STORE_URB_MEM MI_INSTR(0x2D, 0)
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c
index d48ec9a76ed1..4c26daf7ee46 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt.c
@@ -6,7 +6,12 @@
#include "i915_drv.h"
#include "intel_gt.h"
#include "intel_gt_pm.h"
+#include "intel_gt_requests.h"
+#include "intel_mocs.h"
+#include "intel_rc6.h"
+#include "intel_rps.h"
#include "intel_uncore.h"
+#include "intel_pm.h"
void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915)
{
@@ -18,15 +23,108 @@ void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915)
INIT_LIST_HEAD(&gt->closed_vma);
spin_lock_init(&gt->closed_lock);
- intel_gt_init_hangcheck(gt);
intel_gt_init_reset(gt);
+ intel_gt_init_requests(gt);
intel_gt_pm_init_early(gt);
+
+ intel_rps_init_early(&gt->rps);
intel_uc_init_early(&gt->uc);
}
-void intel_gt_init_hw(struct drm_i915_private *i915)
+void intel_gt_init_hw_early(struct intel_gt *gt, struct i915_ggtt *ggtt)
+{
+ gt->ggtt = ggtt;
+
+ intel_gt_sanitize(gt, false);
+}
+
+static void init_unused_ring(struct intel_gt *gt, u32 base)
{
- i915->gt.ggtt = &i915->ggtt;
+ struct intel_uncore *uncore = gt->uncore;
+
+ intel_uncore_write(uncore, RING_CTL(base), 0);
+ intel_uncore_write(uncore, RING_HEAD(base), 0);
+ intel_uncore_write(uncore, RING_TAIL(base), 0);
+ intel_uncore_write(uncore, RING_START(base), 0);
+}
+
+static void init_unused_rings(struct intel_gt *gt)
+{
+ struct drm_i915_private *i915 = gt->i915;
+
+ if (IS_I830(i915)) {
+ init_unused_ring(gt, PRB1_BASE);
+ init_unused_ring(gt, SRB0_BASE);
+ init_unused_ring(gt, SRB1_BASE);
+ init_unused_ring(gt, SRB2_BASE);
+ init_unused_ring(gt, SRB3_BASE);
+ } else if (IS_GEN(i915, 2)) {
+ init_unused_ring(gt, SRB0_BASE);
+ init_unused_ring(gt, SRB1_BASE);
+ } else if (IS_GEN(i915, 3)) {
+ init_unused_ring(gt, PRB1_BASE);
+ init_unused_ring(gt, PRB2_BASE);
+ }
+}
+
+int intel_gt_init_hw(struct intel_gt *gt)
+{
+ struct drm_i915_private *i915 = gt->i915;
+ struct intel_uncore *uncore = gt->uncore;
+ int ret;
+
+ BUG_ON(!i915->kernel_context);
+ ret = intel_gt_terminally_wedged(gt);
+ if (ret)
+ return ret;
+
+ gt->last_init_time = ktime_get();
+
+ /* Double layer security blanket, see i915_gem_init() */
+ intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
+
+ if (HAS_EDRAM(i915) && INTEL_GEN(i915) < 9)
+ intel_uncore_rmw(uncore, HSW_IDICR, 0, IDIHASHMSK(0xf));
+
+ if (IS_HASWELL(i915))
+ intel_uncore_write(uncore,
+ MI_PREDICATE_RESULT_2,
+ IS_HSW_GT3(i915) ?
+ LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
+
+ /* Apply the GT workarounds... */
+ intel_gt_apply_workarounds(gt);
+ /* ...and determine whether they are sticking. */
+ intel_gt_verify_workarounds(gt, "init");
+
+ intel_gt_init_swizzling(gt);
+
+ /*
+ * At least 830 can leave some of the unused rings
+ * "active" (ie. head != tail) after resume which
+ * will prevent c3 entry. Makes sure all unused rings
+ * are totally idle.
+ */
+ init_unused_rings(gt);
+
+ ret = i915_ppgtt_init_hw(gt);
+ if (ret) {
+ DRM_ERROR("Enabling PPGTT failed (%d)\n", ret);
+ goto out;
+ }
+
+ /* We can't enable contexts until all firmware is loaded */
+ ret = intel_uc_init_hw(&gt->uc);
+ if (ret) {
+ i915_probe_error(i915, "Enabling uc failed (%d)\n", ret);
+ goto out;
+ }
+
+ intel_mocs_init(gt);
+
+out:
+ intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
+ return ret;
}
static void rmw_set(struct intel_uncore *uncore, i915_reg_t reg, u32 set)
@@ -89,7 +187,7 @@ intel_gt_clear_error_registers(struct intel_gt *gt,
struct intel_engine_cs *engine;
enum intel_engine_id id;
- for_each_engine_masked(engine, i915, engine_mask, id)
+ for_each_engine_masked(engine, gt, engine_mask, id)
gen8_clear_engine_error_register(engine);
}
}
@@ -100,7 +198,7 @@ static void gen6_check_faults(struct intel_gt *gt)
enum intel_engine_id id;
u32 fault;
- for_each_engine(engine, gt->i915, id) {
+ for_each_engine(engine, gt, id) {
fault = GEN6_RING_FAULT_REG_READ(engine);
if (fault & RING_FAULT_VALID) {
DRM_DEBUG_DRIVER("Unexpected fault\n"
@@ -176,7 +274,7 @@ void intel_gt_check_and_clear_faults(struct intel_gt *gt)
void intel_gt_flush_ggtt_writes(struct intel_gt *gt)
{
- struct drm_i915_private *i915 = gt->i915;
+ struct intel_uncore *uncore = gt->uncore;
intel_wakeref_t wakeref;
/*
@@ -200,18 +298,18 @@ void intel_gt_flush_ggtt_writes(struct intel_gt *gt)
wmb();
- if (INTEL_INFO(i915)->has_coherent_ggtt)
+ if (INTEL_INFO(gt->i915)->has_coherent_ggtt)
return;
intel_gt_chipset_flush(gt);
- with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
- struct intel_uncore *uncore = gt->uncore;
+ with_intel_runtime_pm(uncore->rpm, wakeref) {
+ unsigned long flags;
- spin_lock_irq(&uncore->lock);
+ spin_lock_irqsave(&uncore->lock, flags);
intel_uncore_posting_read_fw(uncore,
RING_HEAD(RENDER_RING_BASE));
- spin_unlock_irq(&uncore->lock);
+ spin_unlock_irqrestore(&uncore->lock, flags);
}
}
@@ -222,7 +320,12 @@ void intel_gt_chipset_flush(struct intel_gt *gt)
intel_gtt_chipset_flush();
}
-int intel_gt_init_scratch(struct intel_gt *gt, unsigned int size)
+void intel_gt_driver_register(struct intel_gt *gt)
+{
+ intel_rps_driver_register(&gt->rps);
+}
+
+static int intel_gt_init_scratch(struct intel_gt *gt, unsigned int size)
{
struct drm_i915_private *i915 = gt->i915;
struct drm_i915_gem_object *obj;
@@ -230,7 +333,7 @@ int intel_gt_init_scratch(struct intel_gt *gt, unsigned int size)
int ret;
obj = i915_gem_object_create_stolen(i915, size);
- if (!obj)
+ if (IS_ERR(obj))
obj = i915_gem_object_create_internal(i915, size);
if (IS_ERR(obj)) {
DRM_ERROR("Failed to allocate scratch page\n");
@@ -256,11 +359,40 @@ err_unref:
return ret;
}
-void intel_gt_fini_scratch(struct intel_gt *gt)
+static void intel_gt_fini_scratch(struct intel_gt *gt)
{
i915_vma_unpin_and_release(&gt->scratch, 0);
}
+int intel_gt_init(struct intel_gt *gt)
+{
+ int err;
+
+ err = intel_gt_init_scratch(gt, IS_GEN(gt->i915, 2) ? SZ_256K : SZ_4K);
+ if (err)
+ return err;
+
+ intel_gt_pm_init(gt);
+
+ return 0;
+}
+
+void intel_gt_driver_remove(struct intel_gt *gt)
+{
+ GEM_BUG_ON(gt->awake);
+}
+
+void intel_gt_driver_unregister(struct intel_gt *gt)
+{
+ intel_rps_driver_unregister(&gt->rps);
+}
+
+void intel_gt_driver_release(struct intel_gt *gt)
+{
+ intel_gt_pm_fini(gt);
+ intel_gt_fini_scratch(gt);
+}
+
void intel_gt_driver_late_release(struct intel_gt *gt)
{
intel_uc_driver_late_release(&gt->uc);
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.h b/drivers/gpu/drm/i915/gt/intel_gt.h
index 4920cb351f10..5436f8c30708 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt.h
@@ -28,7 +28,14 @@ static inline struct intel_gt *huc_to_gt(struct intel_huc *huc)
}
void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915);
-void intel_gt_init_hw(struct drm_i915_private *i915);
+void intel_gt_init_hw_early(struct intel_gt *gt, struct i915_ggtt *ggtt);
+int __must_check intel_gt_init_hw(struct intel_gt *gt);
+int intel_gt_init(struct intel_gt *gt);
+void intel_gt_driver_register(struct intel_gt *gt);
+
+void intel_gt_driver_unregister(struct intel_gt *gt);
+void intel_gt_driver_remove(struct intel_gt *gt);
+void intel_gt_driver_release(struct intel_gt *gt);
void intel_gt_driver_late_release(struct intel_gt *gt);
@@ -39,11 +46,6 @@ void intel_gt_clear_error_registers(struct intel_gt *gt,
void intel_gt_flush_ggtt_writes(struct intel_gt *gt);
void intel_gt_chipset_flush(struct intel_gt *gt);
-void intel_gt_init_hangcheck(struct intel_gt *gt);
-
-int intel_gt_init_scratch(struct intel_gt *gt, unsigned int size);
-void intel_gt_fini_scratch(struct intel_gt *gt);
-
static inline u32 intel_gt_scratch_offset(const struct intel_gt *gt,
enum intel_gt_scratch_field field)
{
@@ -55,6 +57,4 @@ static inline bool intel_gt_is_wedged(struct intel_gt *gt)
return __intel_reset_failed(&gt->reset);
}
-void intel_gt_queue_hangcheck(struct intel_gt *gt);
-
#endif /* __INTEL_GT_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_irq.c b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
index 34a4fb624bf7..973ee7eded64 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_irq.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
@@ -11,6 +11,7 @@
#include "intel_gt.h"
#include "intel_gt_irq.h"
#include "intel_uncore.h"
+#include "intel_rps.h"
static void guc_irq_handler(struct intel_guc *guc, u16 iir)
{
@@ -77,7 +78,7 @@ gen11_other_irq_handler(struct intel_gt *gt, const u8 instance,
return guc_irq_handler(&gt->uc.guc, iir);
if (instance == OTHER_GTPM_INSTANCE)
- return gen11_rps_irq_handler(gt, iir);
+ return gen11_rps_irq_handler(&gt->rps, iir);
WARN_ONCE(1, "unhandled other interrupt instance=0x%x, iir=0x%x\n",
instance, iir);
@@ -336,7 +337,7 @@ void gen8_gt_irq_handler(struct intel_gt *gt, u32 master_ctl, u32 gt_iir[4])
}
if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) {
- gen6_rps_irq_handler(gt->i915, gt_iir[2]);
+ gen6_rps_irq_handler(&gt->rps, gt_iir[2]);
guc_irq_handler(&gt->uc.guc, gt_iir[2] >> 16);
}
}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
index fac75afed35b..6187cdd06646 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
@@ -4,17 +4,38 @@
* Copyright © 2019 Intel Corporation
*/
+#include <linux/suspend.h>
+
#include "i915_drv.h"
+#include "i915_globals.h"
#include "i915_params.h"
+#include "intel_context.h"
#include "intel_engine_pm.h"
#include "intel_gt.h"
#include "intel_gt_pm.h"
+#include "intel_gt_requests.h"
+#include "intel_llc.h"
#include "intel_pm.h"
+#include "intel_rc6.h"
+#include "intel_rps.h"
#include "intel_wakeref.h"
-static void pm_notify(struct drm_i915_private *i915, int state)
+static void user_forcewake(struct intel_gt *gt, bool suspend)
{
- blocking_notifier_call_chain(&i915->gt.pm_notifications, state, i915);
+ int count = atomic_read(&gt->user_wakeref);
+
+ /* Inside suspend/resume so single threaded, no races to worry about. */
+ if (likely(!count))
+ return;
+
+ intel_gt_pm_get(gt);
+ if (suspend) {
+ GEM_BUG_ON(count > atomic_read(&gt->wakeref.count));
+ atomic_sub(count, &gt->wakeref.count);
+ } else {
+ atomic_add(count, &gt->wakeref.count);
+ }
+ intel_gt_pm_put(gt);
}
static int __gt_unpark(struct intel_wakeref *wf)
@@ -24,6 +45,8 @@ static int __gt_unpark(struct intel_wakeref *wf)
GEM_TRACE("\n");
+ i915_globals_unpark();
+
/*
* It seems that the DMC likes to transition between the DC states a lot
* when there are no connected displays (no active power domains) during
@@ -41,46 +64,41 @@ static int __gt_unpark(struct intel_wakeref *wf)
if (NEEDS_RC6_CTX_CORRUPTION_WA(i915))
intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
- intel_enable_gt_powersave(i915);
-
- i915_update_gfx_val(i915);
- if (INTEL_GEN(i915) >= 6)
- gen6_rps_busy(i915);
-
+ intel_rps_unpark(&gt->rps);
i915_pmu_gt_unparked(i915);
- intel_gt_queue_hangcheck(gt);
-
- pm_notify(i915, INTEL_GT_UNPARK);
+ intel_gt_unpark_requests(gt);
return 0;
}
static int __gt_park(struct intel_wakeref *wf)
{
- struct drm_i915_private *i915 =
- container_of(wf, typeof(*i915), gt.wakeref);
- intel_wakeref_t wakeref = fetch_and_zero(&i915->gt.awake);
+ struct intel_gt *gt = container_of(wf, typeof(*gt), wakeref);
+ intel_wakeref_t wakeref = fetch_and_zero(&gt->awake);
+ struct drm_i915_private *i915 = gt->i915;
GEM_TRACE("\n");
- pm_notify(i915, INTEL_GT_PARK);
+ intel_gt_park_requests(gt);
+ i915_vma_parked(gt);
i915_pmu_gt_parked(i915);
- if (INTEL_GEN(i915) >= 6)
- gen6_rps_idle(i915);
+ intel_rps_park(&gt->rps);
+
+ /* Everything switched off, flush any residual interrupt just in case */
+ intel_synchronize_irq(i915);
if (NEEDS_RC6_CTX_CORRUPTION_WA(i915)) {
- i915_rc6_ctx_wa_check(i915);
+ intel_rc6_ctx_wa_check(&i915->gt.rc6);
intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
}
- /* Everything switched off, flush any residual interrupt just in case */
- intel_synchronize_irq(i915);
-
GEM_BUG_ON(!wakeref);
intel_display_power_put(i915, POWER_DOMAIN_GT_IRQ, wakeref);
+ i915_globals_park();
+
return 0;
}
@@ -92,9 +110,18 @@ static const struct intel_wakeref_ops wf_ops = {
void intel_gt_pm_init_early(struct intel_gt *gt)
{
- intel_wakeref_init(&gt->wakeref, &gt->i915->runtime_pm, &wf_ops);
+ intel_wakeref_init(&gt->wakeref, gt->uncore->rpm, &wf_ops);
+}
- BLOCKING_INIT_NOTIFIER_HEAD(&gt->pm_notifications);
+void intel_gt_pm_init(struct intel_gt *gt)
+{
+ /*
+ * Enabling power-management should be "self-healing". If we cannot
+ * enable a feature, simply leave it disabled with a notice to the
+ * user.
+ */
+ intel_rc6_init(&gt->rc6);
+ intel_rps_init(&gt->rps);
}
static bool reset_engines(struct intel_gt *gt)
@@ -119,16 +146,47 @@ void intel_gt_sanitize(struct intel_gt *gt, bool force)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
+ intel_wakeref_t wakeref;
- GEM_TRACE("\n");
+ GEM_TRACE("force:%s\n", yesno(force));
+
+ /* Use a raw wakeref to avoid calling intel_display_power_get early */
+ wakeref = intel_runtime_pm_get(gt->uncore->rpm);
+ intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
+
+ /*
+ * As we have just resumed the machine and woken the device up from
+ * deep PCI sleep (presumably D3_cold), assume the HW has been reset
+ * back to defaults, recovering from whatever wedged state we left it
+ * in and so worth trying to use the device once more.
+ */
+ if (intel_gt_is_wedged(gt))
+ intel_gt_unset_wedged(gt);
intel_uc_sanitize(&gt->uc);
- if (!reset_engines(gt) && !force)
- return;
+ for_each_engine(engine, gt, id)
+ if (engine->reset.prepare)
+ engine->reset.prepare(engine);
- for_each_engine(engine, gt->i915, id)
- __intel_engine_reset(engine, false);
+ intel_uc_reset_prepare(&gt->uc);
+
+ if (reset_engines(gt) || force) {
+ for_each_engine(engine, gt, id)
+ __intel_engine_reset(engine, false);
+ }
+
+ for_each_engine(engine, gt, id)
+ if (engine->reset.finish)
+ engine->reset.finish(engine);
+
+ intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
+ intel_runtime_pm_put(gt->uncore->rpm, wakeref);
+}
+
+void intel_gt_pm_fini(struct intel_gt *gt)
+{
+ intel_rc6_fini(&gt->rc6);
}
int intel_gt_resume(struct intel_gt *gt)
@@ -137,6 +195,8 @@ int intel_gt_resume(struct intel_gt *gt)
enum intel_engine_id id;
int err = 0;
+ GEM_TRACE("\n");
+
/*
* After resume, we may need to poke into the pinned kernel
* contexts to paper over any damage caused by the sudden suspend.
@@ -144,14 +204,23 @@ int intel_gt_resume(struct intel_gt *gt)
* allowing us to fixup the user contexts on their first pin.
*/
intel_gt_pm_get(gt);
- for_each_engine(engine, gt->i915, id) {
+
+ intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
+ intel_rc6_sanitize(&gt->rc6);
+
+ intel_rps_enable(&gt->rps);
+ intel_llc_enable(&gt->llc);
+
+ for_each_engine(engine, gt, id) {
struct intel_context *ce;
intel_engine_pm_get(engine);
ce = engine->kernel_context;
- if (ce)
+ if (ce) {
+ GEM_BUG_ON(!intel_context_is_pinned(ce));
ce->ops->reset(ce);
+ }
engine->serial++; /* kernel context lost */
err = engine->resume(engine);
@@ -164,19 +233,99 @@ int intel_gt_resume(struct intel_gt *gt)
break;
}
}
+
+ intel_rc6_enable(&gt->rc6);
+
+ intel_uc_resume(&gt->uc);
+
+ user_forcewake(gt, false);
+
+ intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
intel_gt_pm_put(gt);
return err;
}
+static void wait_for_suspend(struct intel_gt *gt)
+{
+ if (!intel_gt_pm_is_awake(gt))
+ return;
+
+ if (intel_gt_wait_for_idle(gt, I915_GEM_IDLE_TIMEOUT) == -ETIME) {
+ /*
+ * Forcibly cancel outstanding work and leave
+ * the gpu quiet.
+ */
+ intel_gt_set_wedged(gt);
+ }
+
+ intel_gt_pm_wait_for_idle(gt);
+}
+
+void intel_gt_suspend_prepare(struct intel_gt *gt)
+{
+ user_forcewake(gt, true);
+ wait_for_suspend(gt);
+
+ intel_uc_suspend(&gt->uc);
+}
+
+static suspend_state_t pm_suspend_target(void)
+{
+#if IS_ENABLED(CONFIG_PM_SLEEP)
+ return pm_suspend_target_state;
+#else
+ return PM_SUSPEND_TO_IDLE;
+#endif
+}
+
+void intel_gt_suspend_late(struct intel_gt *gt)
+{
+ intel_wakeref_t wakeref;
+
+ /* We expect to be idle already; but also want to be independent */
+ wait_for_suspend(gt);
+
+ /*
+ * On disabling the device, we want to turn off HW access to memory
+ * that we no longer own.
+ *
+ * However, not all suspend-states disable the device. S0 (s2idle)
+ * is effectively runtime-suspend, the device is left powered on
+ * but needs to be put into a low power state. We need to keep
+ * powermanagement enabled, but we also retain system state and so
+ * it remains safe to keep on using our allocated memory.
+ */
+ if (pm_suspend_target() == PM_SUSPEND_TO_IDLE)
+ return;
+
+ with_intel_runtime_pm(gt->uncore->rpm, wakeref) {
+ intel_rps_disable(&gt->rps);
+ intel_rc6_disable(&gt->rc6);
+ intel_llc_disable(&gt->llc);
+ }
+
+ intel_gt_sanitize(gt, false);
+
+ GEM_TRACE("\n");
+}
+
void intel_gt_runtime_suspend(struct intel_gt *gt)
{
intel_uc_runtime_suspend(&gt->uc);
+
+ GEM_TRACE("\n");
}
int intel_gt_runtime_resume(struct intel_gt *gt)
{
+ GEM_TRACE("\n");
+
intel_gt_init_swizzling(gt);
return intel_uc_runtime_resume(&gt->uc);
}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftest_gt_pm.c"
+#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.h b/drivers/gpu/drm/i915/gt/intel_gt_pm.h
index fb39d99cd6ee..b3e17399be9b 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.h
@@ -12,11 +12,6 @@
#include "intel_gt_types.h"
#include "intel_wakeref.h"
-enum {
- INTEL_GT_UNPARK,
- INTEL_GT_PARK,
-};
-
static inline bool intel_gt_pm_is_awake(const struct intel_gt *gt)
{
return intel_wakeref_is_active(&gt->wakeref);
@@ -43,10 +38,21 @@ static inline int intel_gt_pm_wait_for_idle(struct intel_gt *gt)
}
void intel_gt_pm_init_early(struct intel_gt *gt);
+void intel_gt_pm_init(struct intel_gt *gt);
+void intel_gt_pm_fini(struct intel_gt *gt);
void intel_gt_sanitize(struct intel_gt *gt, bool force);
+
+void intel_gt_suspend_prepare(struct intel_gt *gt);
+void intel_gt_suspend_late(struct intel_gt *gt);
int intel_gt_resume(struct intel_gt *gt);
+
void intel_gt_runtime_suspend(struct intel_gt *gt);
int intel_gt_runtime_resume(struct intel_gt *gt);
+static inline bool is_mock_gt(const struct intel_gt *gt)
+{
+ return I915_SELFTEST_ONLY(gt->awake == -ENODEV);
+}
+
#endif /* INTEL_GT_PM_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_requests.c b/drivers/gpu/drm/i915/gt/intel_gt_requests.c
new file mode 100644
index 000000000000..353809ac2754
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gt_requests.c
@@ -0,0 +1,137 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "i915_drv.h" /* for_each_engine() */
+#include "i915_request.h"
+#include "intel_gt.h"
+#include "intel_gt_pm.h"
+#include "intel_gt_requests.h"
+#include "intel_timeline.h"
+
+static void retire_requests(struct intel_timeline *tl)
+{
+ struct i915_request *rq, *rn;
+
+ list_for_each_entry_safe(rq, rn, &tl->requests, link)
+ if (!i915_request_retire(rq))
+ break;
+}
+
+static void flush_submission(struct intel_gt *gt)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ for_each_engine(engine, gt, id)
+ intel_engine_flush_submission(engine);
+}
+
+long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
+{
+ struct intel_gt_timelines *timelines = &gt->timelines;
+ struct intel_timeline *tl, *tn;
+ unsigned long active_count = 0;
+ unsigned long flags;
+ bool interruptible;
+ LIST_HEAD(free);
+
+ interruptible = true;
+ if (unlikely(timeout < 0))
+ timeout = -timeout, interruptible = false;
+
+ flush_submission(gt); /* kick the ksoftirqd tasklets */
+
+ spin_lock_irqsave(&timelines->lock, flags);
+ list_for_each_entry_safe(tl, tn, &timelines->active_list, link) {
+ if (!mutex_trylock(&tl->mutex)) {
+ active_count++; /* report busy to caller, try again? */
+ continue;
+ }
+
+ intel_timeline_get(tl);
+ GEM_BUG_ON(!tl->active_count);
+ tl->active_count++; /* pin the list element */
+ spin_unlock_irqrestore(&timelines->lock, flags);
+
+ if (timeout > 0) {
+ struct dma_fence *fence;
+
+ fence = i915_active_fence_get(&tl->last_request);
+ if (fence) {
+ timeout = dma_fence_wait_timeout(fence,
+ interruptible,
+ timeout);
+ dma_fence_put(fence);
+ }
+ }
+
+ retire_requests(tl);
+
+ spin_lock_irqsave(&timelines->lock, flags);
+
+ /* Resume iteration after dropping lock */
+ list_safe_reset_next(tl, tn, link);
+ if (!--tl->active_count)
+ list_del(&tl->link);
+ else
+ active_count += !!rcu_access_pointer(tl->last_request.fence);
+
+ mutex_unlock(&tl->mutex);
+
+ /* Defer the final release to after the spinlock */
+ if (refcount_dec_and_test(&tl->kref.refcount)) {
+ GEM_BUG_ON(tl->active_count);
+ list_add(&tl->link, &free);
+ }
+ }
+ spin_unlock_irqrestore(&timelines->lock, flags);
+
+ list_for_each_entry_safe(tl, tn, &free, link)
+ __intel_timeline_free(&tl->kref);
+
+ return active_count ? timeout : 0;
+}
+
+int intel_gt_wait_for_idle(struct intel_gt *gt, long timeout)
+{
+ /* If the device is asleep, we have no requests outstanding */
+ if (!intel_gt_pm_is_awake(gt))
+ return 0;
+
+ while ((timeout = intel_gt_retire_requests_timeout(gt, timeout)) > 0) {
+ cond_resched();
+ if (signal_pending(current))
+ return -EINTR;
+ }
+
+ return timeout;
+}
+
+static void retire_work_handler(struct work_struct *work)
+{
+ struct intel_gt *gt =
+ container_of(work, typeof(*gt), requests.retire_work.work);
+
+ intel_gt_retire_requests(gt);
+ schedule_delayed_work(&gt->requests.retire_work,
+ round_jiffies_up_relative(HZ));
+}
+
+void intel_gt_init_requests(struct intel_gt *gt)
+{
+ INIT_DELAYED_WORK(&gt->requests.retire_work, retire_work_handler);
+}
+
+void intel_gt_park_requests(struct intel_gt *gt)
+{
+ cancel_delayed_work(&gt->requests.retire_work);
+}
+
+void intel_gt_unpark_requests(struct intel_gt *gt)
+{
+ schedule_delayed_work(&gt->requests.retire_work,
+ round_jiffies_up_relative(HZ));
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_requests.h b/drivers/gpu/drm/i915/gt/intel_gt_requests.h
new file mode 100644
index 000000000000..bd31cbce47e0
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gt_requests.h
@@ -0,0 +1,24 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef INTEL_GT_REQUESTS_H
+#define INTEL_GT_REQUESTS_H
+
+struct intel_gt;
+
+long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout);
+static inline void intel_gt_retire_requests(struct intel_gt *gt)
+{
+ intel_gt_retire_requests_timeout(gt, 0);
+}
+
+int intel_gt_wait_for_idle(struct intel_gt *gt, long timeout);
+
+void intel_gt_init_requests(struct intel_gt *gt);
+void intel_gt_park_requests(struct intel_gt *gt);
+void intel_gt_unpark_requests(struct intel_gt *gt);
+
+#endif /* INTEL_GT_REQUESTS_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h
index dc295c196d11..d4e14dbd172e 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h
@@ -17,7 +17,10 @@
#include "i915_vma.h"
#include "intel_engine_types.h"
+#include "intel_llc_types.h"
#include "intel_reset_types.h"
+#include "intel_rc6_types.h"
+#include "intel_rps_types.h"
#include "intel_wakeref.h"
struct drm_i915_private;
@@ -25,14 +28,6 @@ struct i915_ggtt;
struct intel_engine_cs;
struct intel_uncore;
-struct intel_hangcheck {
- /* For hangcheck timer */
-#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
-#define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)
-
- struct delayed_work work;
-};
-
struct intel_gt {
struct drm_i915_private *i915;
struct intel_uncore *uncore;
@@ -49,12 +44,23 @@ struct intel_gt {
struct list_head hwsp_free_list;
} timelines;
+ struct intel_gt_requests {
+ /**
+ * We leave the user IRQ off as much as possible,
+ * but this means that requests will finish and never
+ * be retired once the system goes idle. Set a timer to
+ * fire periodically while the ring is running. When it
+ * fires, go retire requests.
+ */
+ struct delayed_work retire_work;
+ } requests;
+
struct intel_wakeref wakeref;
+ atomic_t user_wakeref;
struct list_head closed_vma;
spinlock_t closed_lock; /* guards the list of closed_vma */
- struct intel_hangcheck hangcheck;
struct intel_reset reset;
/**
@@ -66,7 +72,9 @@ struct intel_gt {
*/
intel_wakeref_t awake;
- struct blocking_notifier_head pm_notifications;
+ struct intel_llc llc;
+ struct intel_rc6 rc6;
+ struct intel_rps rps;
ktime_t last_init_time;
@@ -89,14 +97,16 @@ enum intel_gt_scratch_field {
INTEL_GT_SCRATCH_FIELD_DEFAULT = 0,
/* 8 bytes */
- INTEL_GT_SCRATCH_FIELD_CLEAR_SLM_WA = 128,
-
- /* 8 bytes */
INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH = 128,
/* 8 bytes */
INTEL_GT_SCRATCH_FIELD_COHERENTL3_WA = 256,
+ /* 6 * 8 bytes */
+ INTEL_GT_SCRATCH_FIELD_PERF_CS_GPR = 2048,
+
+ /* 4 bytes */
+ INTEL_GT_SCRATCH_FIELD_PERF_PREDICATE_RESULT_1 = 2096,
};
#endif /* __INTEL_GT_TYPES_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_hangcheck.c b/drivers/gpu/drm/i915/gt/intel_hangcheck.c
deleted file mode 100644
index 05d042cdefe2..000000000000
--- a/drivers/gpu/drm/i915/gt/intel_hangcheck.c
+++ /dev/null
@@ -1,360 +0,0 @@
-/*
- * Copyright © 2016 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- */
-
-#include "i915_drv.h"
-#include "intel_engine.h"
-#include "intel_gt.h"
-#include "intel_reset.h"
-
-struct hangcheck {
- u64 acthd;
- u32 ring;
- u32 head;
- enum intel_engine_hangcheck_action action;
- unsigned long action_timestamp;
- int deadlock;
- struct intel_instdone instdone;
- bool wedged:1;
- bool stalled:1;
-};
-
-static bool instdone_unchanged(u32 current_instdone, u32 *old_instdone)
-{
- u32 tmp = current_instdone | *old_instdone;
- bool unchanged;
-
- unchanged = tmp == *old_instdone;
- *old_instdone |= tmp;
-
- return unchanged;
-}
-
-static bool subunits_stuck(struct intel_engine_cs *engine)
-{
- struct drm_i915_private *dev_priv = engine->i915;
- struct intel_instdone instdone;
- struct intel_instdone *accu_instdone = &engine->hangcheck.instdone;
- bool stuck;
- int slice;
- int subslice;
-
- intel_engine_get_instdone(engine, &instdone);
-
- /* There might be unstable subunit states even when
- * actual head is not moving. Filter out the unstable ones by
- * accumulating the undone -> done transitions and only
- * consider those as progress.
- */
- stuck = instdone_unchanged(instdone.instdone,
- &accu_instdone->instdone);
- stuck &= instdone_unchanged(instdone.slice_common,
- &accu_instdone->slice_common);
-
- for_each_instdone_slice_subslice(dev_priv, slice, subslice) {
- stuck &= instdone_unchanged(instdone.sampler[slice][subslice],
- &accu_instdone->sampler[slice][subslice]);
- stuck &= instdone_unchanged(instdone.row[slice][subslice],
- &accu_instdone->row[slice][subslice]);
- }
-
- return stuck;
-}
-
-static enum intel_engine_hangcheck_action
-head_stuck(struct intel_engine_cs *engine, u64 acthd)
-{
- if (acthd != engine->hangcheck.acthd) {
-
- /* Clear subunit states on head movement */
- memset(&engine->hangcheck.instdone, 0,
- sizeof(engine->hangcheck.instdone));
-
- return ENGINE_ACTIVE_HEAD;
- }
-
- if (!subunits_stuck(engine))
- return ENGINE_ACTIVE_SUBUNITS;
-
- return ENGINE_DEAD;
-}
-
-static enum intel_engine_hangcheck_action
-engine_stuck(struct intel_engine_cs *engine, u64 acthd)
-{
- enum intel_engine_hangcheck_action ha;
- u32 tmp;
-
- ha = head_stuck(engine, acthd);
- if (ha != ENGINE_DEAD)
- return ha;
-
- if (IS_GEN(engine->i915, 2))
- return ENGINE_DEAD;
-
- /* Is the chip hanging on a WAIT_FOR_EVENT?
- * If so we can simply poke the RB_WAIT bit
- * and break the hang. This should work on
- * all but the second generation chipsets.
- */
- tmp = ENGINE_READ(engine, RING_CTL);
- if (tmp & RING_WAIT) {
- intel_gt_handle_error(engine->gt, engine->mask, 0,
- "stuck wait on %s", engine->name);
- ENGINE_WRITE(engine, RING_CTL, tmp);
- return ENGINE_WAIT_KICK;
- }
-
- return ENGINE_DEAD;
-}
-
-static void hangcheck_load_sample(struct intel_engine_cs *engine,
- struct hangcheck *hc)
-{
- hc->acthd = intel_engine_get_active_head(engine);
- hc->ring = ENGINE_READ(engine, RING_START);
- hc->head = ENGINE_READ(engine, RING_HEAD);
-}
-
-static void hangcheck_store_sample(struct intel_engine_cs *engine,
- const struct hangcheck *hc)
-{
- engine->hangcheck.acthd = hc->acthd;
- engine->hangcheck.last_ring = hc->ring;
- engine->hangcheck.last_head = hc->head;
-}
-
-static enum intel_engine_hangcheck_action
-hangcheck_get_action(struct intel_engine_cs *engine,
- const struct hangcheck *hc)
-{
- if (intel_engine_is_idle(engine))
- return ENGINE_IDLE;
-
- if (engine->hangcheck.last_ring != hc->ring)
- return ENGINE_ACTIVE_SEQNO;
-
- if (engine->hangcheck.last_head != hc->head)
- return ENGINE_ACTIVE_SEQNO;
-
- return engine_stuck(engine, hc->acthd);
-}
-
-static void hangcheck_accumulate_sample(struct intel_engine_cs *engine,
- struct hangcheck *hc)
-{
- unsigned long timeout = I915_ENGINE_DEAD_TIMEOUT;
-
- hc->action = hangcheck_get_action(engine, hc);
-
- /* We always increment the progress
- * if the engine is busy and still processing
- * the same request, so that no single request
- * can run indefinitely (such as a chain of
- * batches). The only time we do not increment
- * the hangcheck score on this ring, if this
- * engine is in a legitimate wait for another
- * engine. In that case the waiting engine is a
- * victim and we want to be sure we catch the
- * right culprit. Then every time we do kick
- * the ring, make it as a progress as the seqno
- * advancement might ensure and if not, it
- * will catch the hanging engine.
- */
-
- switch (hc->action) {
- case ENGINE_IDLE:
- case ENGINE_ACTIVE_SEQNO:
- /* Clear head and subunit states on seqno movement */
- hc->acthd = 0;
-
- memset(&engine->hangcheck.instdone, 0,
- sizeof(engine->hangcheck.instdone));
-
- /* Intentional fall through */
- case ENGINE_WAIT_KICK:
- case ENGINE_WAIT:
- engine->hangcheck.action_timestamp = jiffies;
- break;
-
- case ENGINE_ACTIVE_HEAD:
- case ENGINE_ACTIVE_SUBUNITS:
- /*
- * Seqno stuck with still active engine gets leeway,
- * in hopes that it is just a long shader.
- */
- timeout = I915_SEQNO_DEAD_TIMEOUT;
- break;
-
- case ENGINE_DEAD:
- break;
-
- default:
- MISSING_CASE(hc->action);
- }
-
- hc->stalled = time_after(jiffies,
- engine->hangcheck.action_timestamp + timeout);
- hc->wedged = time_after(jiffies,
- engine->hangcheck.action_timestamp +
- I915_ENGINE_WEDGED_TIMEOUT);
-}
-
-static void hangcheck_declare_hang(struct intel_gt *gt,
- intel_engine_mask_t hung,
- intel_engine_mask_t stuck)
-{
- struct intel_engine_cs *engine;
- intel_engine_mask_t tmp;
- char msg[80];
- int len;
-
- /* If some rings hung but others were still busy, only
- * blame the hanging rings in the synopsis.
- */
- if (stuck != hung)
- hung &= ~stuck;
- len = scnprintf(msg, sizeof(msg),
- "%s on ", stuck == hung ? "no progress" : "hang");
- for_each_engine_masked(engine, gt->i915, hung, tmp)
- len += scnprintf(msg + len, sizeof(msg) - len,
- "%s, ", engine->name);
- msg[len-2] = '\0';
-
- return intel_gt_handle_error(gt, hung, I915_ERROR_CAPTURE, "%s", msg);
-}
-
-/*
- * This is called when the chip hasn't reported back with completed
- * batchbuffers in a long time. We keep track per ring seqno progress and
- * if there are no progress, hangcheck score for that ring is increased.
- * Further, acthd is inspected to see if the ring is stuck. On stuck case
- * we kick the ring. If we see no progress on three subsequent calls
- * we assume chip is wedged and try to fix it by resetting the chip.
- */
-static void hangcheck_elapsed(struct work_struct *work)
-{
- struct intel_gt *gt =
- container_of(work, typeof(*gt), hangcheck.work.work);
- intel_engine_mask_t hung = 0, stuck = 0, wedged = 0;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- intel_wakeref_t wakeref;
-
- if (!i915_modparams.enable_hangcheck)
- return;
-
- if (!READ_ONCE(gt->awake))
- return;
-
- if (intel_gt_is_wedged(gt))
- return;
-
- wakeref = intel_runtime_pm_get_if_in_use(&gt->i915->runtime_pm);
- if (!wakeref)
- return;
-
- /* As enabling the GPU requires fairly extensive mmio access,
- * periodically arm the mmio checker to see if we are triggering
- * any invalid access.
- */
- intel_uncore_arm_unclaimed_mmio_detection(gt->uncore);
-
- for_each_engine(engine, gt->i915, id) {
- struct hangcheck hc;
-
- intel_engine_signal_breadcrumbs(engine);
-
- hangcheck_load_sample(engine, &hc);
- hangcheck_accumulate_sample(engine, &hc);
- hangcheck_store_sample(engine, &hc);
-
- if (hc.stalled) {
- hung |= engine->mask;
- if (hc.action != ENGINE_DEAD)
- stuck |= engine->mask;
- }
-
- if (hc.wedged)
- wedged |= engine->mask;
- }
-
- if (GEM_SHOW_DEBUG() && (hung | stuck)) {
- struct drm_printer p = drm_debug_printer("hangcheck");
-
- for_each_engine(engine, gt->i915, id) {
- if (intel_engine_is_idle(engine))
- continue;
-
- intel_engine_dump(engine, &p, "%s\n", engine->name);
- }
- }
-
- if (wedged) {
- dev_err(gt->i915->drm.dev,
- "GPU recovery timed out,"
- " cancelling all in-flight rendering.\n");
- GEM_TRACE_DUMP();
- intel_gt_set_wedged(gt);
- }
-
- if (hung)
- hangcheck_declare_hang(gt, hung, stuck);
-
- intel_runtime_pm_put(&gt->i915->runtime_pm, wakeref);
-
- /* Reset timer in case GPU hangs without another request being added */
- intel_gt_queue_hangcheck(gt);
-}
-
-void intel_gt_queue_hangcheck(struct intel_gt *gt)
-{
- unsigned long delay;
-
- if (unlikely(!i915_modparams.enable_hangcheck))
- return;
-
- /*
- * Don't continually defer the hangcheck so that it is always run at
- * least once after work has been scheduled on any ring. Otherwise,
- * we will ignore a hung ring if a second ring is kept busy.
- */
-
- delay = round_jiffies_up_relative(DRM_I915_HANGCHECK_JIFFIES);
- queue_delayed_work(system_long_wq, &gt->hangcheck.work, delay);
-}
-
-void intel_engine_init_hangcheck(struct intel_engine_cs *engine)
-{
- memset(&engine->hangcheck, 0, sizeof(engine->hangcheck));
- engine->hangcheck.action_timestamp = jiffies;
-}
-
-void intel_gt_init_hangcheck(struct intel_gt *gt)
-{
- INIT_DELAYED_WORK(&gt->hangcheck.work, hangcheck_elapsed);
-}
-
-#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
-#include "selftest_hangcheck.c"
-#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_llc.c b/drivers/gpu/drm/i915/gt/intel_llc.c
new file mode 100644
index 000000000000..ceb785b75c25
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_llc.c
@@ -0,0 +1,161 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <linux/cpufreq.h>
+
+#include "i915_drv.h"
+#include "intel_gt.h"
+#include "intel_llc.h"
+#include "intel_sideband.h"
+
+struct ia_constants {
+ unsigned int min_gpu_freq;
+ unsigned int max_gpu_freq;
+
+ unsigned int min_ring_freq;
+ unsigned int max_ia_freq;
+};
+
+static struct intel_gt *llc_to_gt(struct intel_llc *llc)
+{
+ return container_of(llc, struct intel_gt, llc);
+}
+
+static unsigned int cpu_max_MHz(void)
+{
+ struct cpufreq_policy *policy;
+ unsigned int max_khz;
+
+ policy = cpufreq_cpu_get(0);
+ if (policy) {
+ max_khz = policy->cpuinfo.max_freq;
+ cpufreq_cpu_put(policy);
+ } else {
+ /*
+ * Default to measured freq if none found, PCU will ensure we
+ * don't go over
+ */
+ max_khz = tsc_khz;
+ }
+
+ return max_khz / 1000;
+}
+
+static bool get_ia_constants(struct intel_llc *llc,
+ struct ia_constants *consts)
+{
+ struct drm_i915_private *i915 = llc_to_gt(llc)->i915;
+ struct intel_rps *rps = &llc_to_gt(llc)->rps;
+
+ if (rps->max_freq <= rps->min_freq)
+ return false;
+
+ consts->max_ia_freq = cpu_max_MHz();
+
+ consts->min_ring_freq =
+ intel_uncore_read(llc_to_gt(llc)->uncore, DCLK) & 0xf;
+ /* convert DDR frequency from units of 266.6MHz to bandwidth */
+ consts->min_ring_freq = mult_frac(consts->min_ring_freq, 8, 3);
+
+ consts->min_gpu_freq = rps->min_freq;
+ consts->max_gpu_freq = rps->max_freq;
+ if (INTEL_GEN(i915) >= 9) {
+ /* Convert GT frequency to 50 HZ units */
+ consts->min_gpu_freq /= GEN9_FREQ_SCALER;
+ consts->max_gpu_freq /= GEN9_FREQ_SCALER;
+ }
+
+ return true;
+}
+
+static void calc_ia_freq(struct intel_llc *llc,
+ unsigned int gpu_freq,
+ const struct ia_constants *consts,
+ unsigned int *out_ia_freq,
+ unsigned int *out_ring_freq)
+{
+ struct drm_i915_private *i915 = llc_to_gt(llc)->i915;
+ const int diff = consts->max_gpu_freq - gpu_freq;
+ unsigned int ia_freq = 0, ring_freq = 0;
+
+ if (INTEL_GEN(i915) >= 9) {
+ /*
+ * ring_freq = 2 * GT. ring_freq is in 100MHz units
+ * No floor required for ring frequency on SKL.
+ */
+ ring_freq = gpu_freq;
+ } else if (INTEL_GEN(i915) >= 8) {
+ /* max(2 * GT, DDR). NB: GT is 50MHz units */
+ ring_freq = max(consts->min_ring_freq, gpu_freq);
+ } else if (IS_HASWELL(i915)) {
+ ring_freq = mult_frac(gpu_freq, 5, 4);
+ ring_freq = max(consts->min_ring_freq, ring_freq);
+ /* leave ia_freq as the default, chosen by cpufreq */
+ } else {
+ const int min_freq = 15;
+ const int scale = 180;
+
+ /*
+ * On older processors, there is no separate ring
+ * clock domain, so in order to boost the bandwidth
+ * of the ring, we need to upclock the CPU (ia_freq).
+ *
+ * For GPU frequencies less than 750MHz,
+ * just use the lowest ring freq.
+ */
+ if (gpu_freq < min_freq)
+ ia_freq = 800;
+ else
+ ia_freq = consts->max_ia_freq - diff * scale / 2;
+ ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
+ }
+
+ *out_ia_freq = ia_freq;
+ *out_ring_freq = ring_freq;
+}
+
+static void gen6_update_ring_freq(struct intel_llc *llc)
+{
+ struct drm_i915_private *i915 = llc_to_gt(llc)->i915;
+ struct ia_constants consts;
+ unsigned int gpu_freq;
+
+ if (!get_ia_constants(llc, &consts))
+ return;
+
+ /*
+ * For each potential GPU frequency, load a ring frequency we'd like
+ * to use for memory access. We do this by specifying the IA frequency
+ * the PCU should use as a reference to determine the ring frequency.
+ */
+ for (gpu_freq = consts.max_gpu_freq;
+ gpu_freq >= consts.min_gpu_freq;
+ gpu_freq--) {
+ unsigned int ia_freq, ring_freq;
+
+ calc_ia_freq(llc, gpu_freq, &consts, &ia_freq, &ring_freq);
+ sandybridge_pcode_write(i915,
+ GEN6_PCODE_WRITE_MIN_FREQ_TABLE,
+ ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT |
+ ring_freq << GEN6_PCODE_FREQ_RING_RATIO_SHIFT |
+ gpu_freq);
+ }
+}
+
+void intel_llc_enable(struct intel_llc *llc)
+{
+ if (HAS_LLC(llc_to_gt(llc)->i915))
+ gen6_update_ring_freq(llc);
+}
+
+void intel_llc_disable(struct intel_llc *llc)
+{
+ /* Currently there is no HW configuration to be done to disable. */
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftest_llc.c"
+#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_llc.h b/drivers/gpu/drm/i915/gt/intel_llc.h
new file mode 100644
index 000000000000..ef09a890d2b7
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_llc.h
@@ -0,0 +1,15 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef INTEL_LLC_H
+#define INTEL_LLC_H
+
+struct intel_llc;
+
+void intel_llc_enable(struct intel_llc *llc);
+void intel_llc_disable(struct intel_llc *llc);
+
+#endif /* INTEL_LLC_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_llc_types.h b/drivers/gpu/drm/i915/gt/intel_llc_types.h
new file mode 100644
index 000000000000..ecad4687b930
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_llc_types.h
@@ -0,0 +1,13 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef INTEL_LLC_TYPES_H
+#define INTEL_LLC_TYPES_H
+
+struct intel_llc {
+};
+
+#endif /* INTEL_LLC_TYPES_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index 06a506c29463..0ac3b26674ad 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -145,6 +145,7 @@
#include "intel_lrc_reg.h"
#include "intel_mocs.h"
#include "intel_reset.h"
+#include "intel_ring.h"
#include "intel_workarounds.h"
#define RING_EXECLIST_QFULL (1 << 0x2)
@@ -230,17 +231,42 @@ static int __execlists_context_alloc(struct intel_context *ce,
struct intel_engine_cs *engine);
static void execlists_init_reg_state(u32 *reg_state,
- struct intel_context *ce,
- struct intel_engine_cs *engine,
- struct intel_ring *ring);
+ const struct intel_context *ce,
+ const struct intel_engine_cs *engine,
+ const struct intel_ring *ring,
+ bool close);
+static void
+__execlists_update_reg_state(const struct intel_context *ce,
+ const struct intel_engine_cs *engine);
static void mark_eio(struct i915_request *rq)
{
- if (!i915_request_signaled(rq))
- dma_fence_set_error(&rq->fence, -EIO);
+ if (i915_request_completed(rq))
+ return;
+
+ GEM_BUG_ON(i915_request_signaled(rq));
+
+ dma_fence_set_error(&rq->fence, -EIO);
i915_request_mark_complete(rq);
}
+static struct i915_request *
+active_request(const struct intel_timeline * const tl, struct i915_request *rq)
+{
+ struct i915_request *active = rq;
+
+ rcu_read_lock();
+ list_for_each_entry_continue_reverse(rq, &tl->requests, link) {
+ if (i915_request_completed(rq))
+ break;
+
+ active = rq;
+ }
+ rcu_read_unlock();
+
+ return active;
+}
+
static inline u32 intel_hws_preempt_address(struct intel_engine_cs *engine)
{
return (i915_ggtt_offset(engine->status_page.vma) +
@@ -337,10 +363,15 @@ static inline bool need_preempt(const struct intel_engine_cs *engine,
* However, the priority hint is a mere hint that we may need to
* preempt. If that hint is stale or we may be trying to preempt
* ourselves, ignore the request.
+ *
+ * More naturally we would write
+ * prio >= max(0, last);
+ * except that we wish to prevent triggering preemption at the same
+ * priority level: the task that is running should remain running
+ * to preserve FIFO ordering of dependencies.
*/
- last_prio = effective_prio(rq);
- if (!i915_scheduler_need_preempt(engine->execlists.queue_priority_hint,
- last_prio))
+ last_prio = max(effective_prio(rq), I915_PRIORITY_NORMAL - 1);
+ if (engine->execlists.queue_priority_hint <= last_prio)
return false;
/*
@@ -429,12 +460,8 @@ assert_priority_queue(const struct i915_request *prev,
static u64
lrc_descriptor(struct intel_context *ce, struct intel_engine_cs *engine)
{
- struct i915_gem_context *ctx = ce->gem_context;
u64 desc;
- BUILD_BUG_ON(MAX_CONTEXT_HW_ID > (BIT(GEN8_CTX_ID_WIDTH)));
- BUILD_BUG_ON(GEN11_MAX_CONTEXT_HW_ID > (BIT(GEN11_SW_CTX_ID_WIDTH)));
-
desc = INTEL_LEGACY_32B_CONTEXT;
if (i915_vm_is_4lvl(ce->vm))
desc = INTEL_LEGACY_64B_CONTEXT;
@@ -444,33 +471,379 @@ lrc_descriptor(struct intel_context *ce, struct intel_engine_cs *engine)
if (IS_GEN(engine->i915, 8))
desc |= GEN8_CTX_L3LLC_COHERENT;
- desc |= i915_ggtt_offset(ce->state) + LRC_HEADER_PAGES * PAGE_SIZE;
- /* bits 12-31 */
+ desc |= i915_ggtt_offset(ce->state); /* bits 12-31 */
/*
* The following 32bits are copied into the OA reports (dword 2).
* Consider updating oa_get_render_ctx_id in i915_perf.c when changing
* anything below.
*/
if (INTEL_GEN(engine->i915) >= 11) {
- GEM_BUG_ON(ctx->hw_id >= BIT(GEN11_SW_CTX_ID_WIDTH));
- desc |= (u64)ctx->hw_id << GEN11_SW_CTX_ID_SHIFT;
- /* bits 37-47 */
-
desc |= (u64)engine->instance << GEN11_ENGINE_INSTANCE_SHIFT;
/* bits 48-53 */
- /* TODO: decide what to do with SW counter (bits 55-60) */
-
desc |= (u64)engine->class << GEN11_ENGINE_CLASS_SHIFT;
/* bits 61-63 */
- } else {
- GEM_BUG_ON(ctx->hw_id >= BIT(GEN8_CTX_ID_WIDTH));
- desc |= (u64)ctx->hw_id << GEN8_CTX_ID_SHIFT; /* bits 32-52 */
}
return desc;
}
+static u32 *set_offsets(u32 *regs,
+ const u8 *data,
+ const struct intel_engine_cs *engine)
+#define NOP(x) (BIT(7) | (x))
+#define LRI(count, flags) ((flags) << 6 | (count))
+#define POSTED BIT(0)
+#define REG(x) (((x) >> 2) | BUILD_BUG_ON_ZERO(x >= 0x200))
+#define REG16(x) \
+ (((x) >> 9) | BIT(7) | BUILD_BUG_ON_ZERO(x >= 0x10000)), \
+ (((x) >> 2) & 0x7f)
+#define END() 0
+{
+ const u32 base = engine->mmio_base;
+
+ while (*data) {
+ u8 count, flags;
+
+ if (*data & BIT(7)) { /* skip */
+ regs += *data++ & ~BIT(7);
+ continue;
+ }
+
+ count = *data & 0x3f;
+ flags = *data >> 6;
+ data++;
+
+ *regs = MI_LOAD_REGISTER_IMM(count);
+ if (flags & POSTED)
+ *regs |= MI_LRI_FORCE_POSTED;
+ if (INTEL_GEN(engine->i915) >= 11)
+ *regs |= MI_LRI_CS_MMIO;
+ regs++;
+
+ GEM_BUG_ON(!count);
+ do {
+ u32 offset = 0;
+ u8 v;
+
+ do {
+ v = *data++;
+ offset <<= 7;
+ offset |= v & ~BIT(7);
+ } while (v & BIT(7));
+
+ *regs = base + (offset << 2);
+ regs += 2;
+ } while (--count);
+ }
+
+ return regs;
+}
+
+static const u8 gen8_xcs_offsets[] = {
+ NOP(1),
+ LRI(11, 0),
+ REG16(0x244),
+ REG(0x034),
+ REG(0x030),
+ REG(0x038),
+ REG(0x03c),
+ REG(0x168),
+ REG(0x140),
+ REG(0x110),
+ REG(0x11c),
+ REG(0x114),
+ REG(0x118),
+
+ NOP(9),
+ LRI(9, 0),
+ REG16(0x3a8),
+ REG16(0x28c),
+ REG16(0x288),
+ REG16(0x284),
+ REG16(0x280),
+ REG16(0x27c),
+ REG16(0x278),
+ REG16(0x274),
+ REG16(0x270),
+
+ NOP(13),
+ LRI(2, 0),
+ REG16(0x200),
+ REG(0x028),
+
+ END(),
+};
+
+static const u8 gen9_xcs_offsets[] = {
+ NOP(1),
+ LRI(14, POSTED),
+ REG16(0x244),
+ REG(0x034),
+ REG(0x030),
+ REG(0x038),
+ REG(0x03c),
+ REG(0x168),
+ REG(0x140),
+ REG(0x110),
+ REG(0x11c),
+ REG(0x114),
+ REG(0x118),
+ REG(0x1c0),
+ REG(0x1c4),
+ REG(0x1c8),
+
+ NOP(3),
+ LRI(9, POSTED),
+ REG16(0x3a8),
+ REG16(0x28c),
+ REG16(0x288),
+ REG16(0x284),
+ REG16(0x280),
+ REG16(0x27c),
+ REG16(0x278),
+ REG16(0x274),
+ REG16(0x270),
+
+ NOP(13),
+ LRI(1, POSTED),
+ REG16(0x200),
+
+ NOP(13),
+ LRI(44, POSTED),
+ REG(0x028),
+ REG(0x09c),
+ REG(0x0c0),
+ REG(0x178),
+ REG(0x17c),
+ REG16(0x358),
+ REG(0x170),
+ REG(0x150),
+ REG(0x154),
+ REG(0x158),
+ REG16(0x41c),
+ REG16(0x600),
+ REG16(0x604),
+ REG16(0x608),
+ REG16(0x60c),
+ REG16(0x610),
+ REG16(0x614),
+ REG16(0x618),
+ REG16(0x61c),
+ REG16(0x620),
+ REG16(0x624),
+ REG16(0x628),
+ REG16(0x62c),
+ REG16(0x630),
+ REG16(0x634),
+ REG16(0x638),
+ REG16(0x63c),
+ REG16(0x640),
+ REG16(0x644),
+ REG16(0x648),
+ REG16(0x64c),
+ REG16(0x650),
+ REG16(0x654),
+ REG16(0x658),
+ REG16(0x65c),
+ REG16(0x660),
+ REG16(0x664),
+ REG16(0x668),
+ REG16(0x66c),
+ REG16(0x670),
+ REG16(0x674),
+ REG16(0x678),
+ REG16(0x67c),
+ REG(0x068),
+
+ END(),
+};
+
+static const u8 gen12_xcs_offsets[] = {
+ NOP(1),
+ LRI(13, POSTED),
+ REG16(0x244),
+ REG(0x034),
+ REG(0x030),
+ REG(0x038),
+ REG(0x03c),
+ REG(0x168),
+ REG(0x140),
+ REG(0x110),
+ REG(0x1c0),
+ REG(0x1c4),
+ REG(0x1c8),
+ REG(0x180),
+ REG16(0x2b4),
+
+ NOP(5),
+ LRI(9, POSTED),
+ REG16(0x3a8),
+ REG16(0x28c),
+ REG16(0x288),
+ REG16(0x284),
+ REG16(0x280),
+ REG16(0x27c),
+ REG16(0x278),
+ REG16(0x274),
+ REG16(0x270),
+
+ END(),
+};
+
+static const u8 gen8_rcs_offsets[] = {
+ NOP(1),
+ LRI(14, POSTED),
+ REG16(0x244),
+ REG(0x034),
+ REG(0x030),
+ REG(0x038),
+ REG(0x03c),
+ REG(0x168),
+ REG(0x140),
+ REG(0x110),
+ REG(0x11c),
+ REG(0x114),
+ REG(0x118),
+ REG(0x1c0),
+ REG(0x1c4),
+ REG(0x1c8),
+
+ NOP(3),
+ LRI(9, POSTED),
+ REG16(0x3a8),
+ REG16(0x28c),
+ REG16(0x288),
+ REG16(0x284),
+ REG16(0x280),
+ REG16(0x27c),
+ REG16(0x278),
+ REG16(0x274),
+ REG16(0x270),
+
+ NOP(13),
+ LRI(1, 0),
+ REG(0x0c8),
+
+ END(),
+};
+
+static const u8 gen11_rcs_offsets[] = {
+ NOP(1),
+ LRI(15, POSTED),
+ REG16(0x244),
+ REG(0x034),
+ REG(0x030),
+ REG(0x038),
+ REG(0x03c),
+ REG(0x168),
+ REG(0x140),
+ REG(0x110),
+ REG(0x11c),
+ REG(0x114),
+ REG(0x118),
+ REG(0x1c0),
+ REG(0x1c4),
+ REG(0x1c8),
+ REG(0x180),
+
+ NOP(1),
+ LRI(9, POSTED),
+ REG16(0x3a8),
+ REG16(0x28c),
+ REG16(0x288),
+ REG16(0x284),
+ REG16(0x280),
+ REG16(0x27c),
+ REG16(0x278),
+ REG16(0x274),
+ REG16(0x270),
+
+ LRI(1, POSTED),
+ REG(0x1b0),
+
+ NOP(10),
+ LRI(1, 0),
+ REG(0x0c8),
+
+ END(),
+};
+
+static const u8 gen12_rcs_offsets[] = {
+ NOP(1),
+ LRI(13, POSTED),
+ REG16(0x244),
+ REG(0x034),
+ REG(0x030),
+ REG(0x038),
+ REG(0x03c),
+ REG(0x168),
+ REG(0x140),
+ REG(0x110),
+ REG(0x1c0),
+ REG(0x1c4),
+ REG(0x1c8),
+ REG(0x180),
+ REG16(0x2b4),
+
+ NOP(5),
+ LRI(9, POSTED),
+ REG16(0x3a8),
+ REG16(0x28c),
+ REG16(0x288),
+ REG16(0x284),
+ REG16(0x280),
+ REG16(0x27c),
+ REG16(0x278),
+ REG16(0x274),
+ REG16(0x270),
+
+ LRI(3, POSTED),
+ REG(0x1b0),
+ REG16(0x5a8),
+ REG16(0x5ac),
+
+ NOP(6),
+ LRI(1, 0),
+ REG(0x0c8),
+
+ END(),
+};
+
+#undef END
+#undef REG16
+#undef REG
+#undef LRI
+#undef NOP
+
+static const u8 *reg_offsets(const struct intel_engine_cs *engine)
+{
+ /*
+ * The gen12+ lists only have the registers we program in the basic
+ * default state. We rely on the context image using relative
+ * addressing to automatic fixup the register state between the
+ * physical engines for virtual engine.
+ */
+ GEM_BUG_ON(INTEL_GEN(engine->i915) >= 12 &&
+ !intel_engine_has_relative_mmio(engine));
+
+ if (engine->class == RENDER_CLASS) {
+ if (INTEL_GEN(engine->i915) >= 12)
+ return gen12_rcs_offsets;
+ else if (INTEL_GEN(engine->i915) >= 11)
+ return gen11_rcs_offsets;
+ else
+ return gen8_rcs_offsets;
+ } else {
+ if (INTEL_GEN(engine->i915) >= 12)
+ return gen12_xcs_offsets;
+ else if (INTEL_GEN(engine->i915) >= 9)
+ return gen9_xcs_offsets;
+ else
+ return gen8_xcs_offsets;
+ }
+}
+
static void unwind_wa_tail(struct i915_request *rq)
{
rq->tail = intel_ring_wrap(rq->ring, rq->wa_tail - WA_TAIL_BYTES);
@@ -489,7 +862,6 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine)
list_for_each_entry_safe_reverse(rq, rn,
&engine->active.requests,
sched.link) {
- struct intel_engine_cs *owner;
if (i915_request_completed(rq))
continue; /* XXX */
@@ -504,8 +876,7 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine)
* engine so that it can be moved across onto another physical
* engine as load dictates.
*/
- owner = rq->hw_context->engine;
- if (likely(owner == engine)) {
+ if (likely(rq->execution_mask == engine->mask)) {
GEM_BUG_ON(rq_prio(rq) == I915_PRIORITY_INVALID);
if (rq_prio(rq) != prio) {
prio = rq_prio(rq);
@@ -516,6 +887,8 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine)
list_move(&rq->sched.link, pl);
active = rq;
} else {
+ struct intel_engine_cs *owner = rq->hw_context->engine;
+
/*
* Decouple the virtual breadcrumb before moving it
* back to the virtual engine -- we don't want the
@@ -525,7 +898,8 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine)
*/
if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
&rq->fence.flags)) {
- spin_lock(&rq->lock);
+ spin_lock_nested(&rq->lock,
+ SINGLE_DEPTH_NESTING);
i915_request_cancel_breadcrumb(rq);
spin_unlock(&rq->lock);
}
@@ -561,6 +935,114 @@ execlists_context_status_change(struct i915_request *rq, unsigned long status)
status, rq);
}
+static void intel_engine_context_in(struct intel_engine_cs *engine)
+{
+ unsigned long flags;
+
+ if (READ_ONCE(engine->stats.enabled) == 0)
+ return;
+
+ write_seqlock_irqsave(&engine->stats.lock, flags);
+
+ if (engine->stats.enabled > 0) {
+ if (engine->stats.active++ == 0)
+ engine->stats.start = ktime_get();
+ GEM_BUG_ON(engine->stats.active == 0);
+ }
+
+ write_sequnlock_irqrestore(&engine->stats.lock, flags);
+}
+
+static void intel_engine_context_out(struct intel_engine_cs *engine)
+{
+ unsigned long flags;
+
+ if (READ_ONCE(engine->stats.enabled) == 0)
+ return;
+
+ write_seqlock_irqsave(&engine->stats.lock, flags);
+
+ if (engine->stats.enabled > 0) {
+ ktime_t last;
+
+ if (engine->stats.active && --engine->stats.active == 0) {
+ /*
+ * Decrement the active context count and in case GPU
+ * is now idle add up to the running total.
+ */
+ last = ktime_sub(ktime_get(), engine->stats.start);
+
+ engine->stats.total = ktime_add(engine->stats.total,
+ last);
+ } else if (engine->stats.active == 0) {
+ /*
+ * After turning on engine stats, context out might be
+ * the first event in which case we account from the
+ * time stats gathering was turned on.
+ */
+ last = ktime_sub(ktime_get(), engine->stats.enabled_at);
+
+ engine->stats.total = ktime_add(engine->stats.total,
+ last);
+ }
+ }
+
+ write_sequnlock_irqrestore(&engine->stats.lock, flags);
+}
+
+static void restore_default_state(struct intel_context *ce,
+ struct intel_engine_cs *engine)
+{
+ u32 *regs = ce->lrc_reg_state;
+
+ if (engine->pinned_default_state)
+ memcpy(regs, /* skip restoring the vanilla PPHWSP */
+ engine->pinned_default_state + LRC_STATE_PN * PAGE_SIZE,
+ engine->context_size - PAGE_SIZE);
+
+ execlists_init_reg_state(regs, ce, engine, ce->ring, false);
+}
+
+static void reset_active(struct i915_request *rq,
+ struct intel_engine_cs *engine)
+{
+ struct intel_context * const ce = rq->hw_context;
+ u32 head;
+
+ /*
+ * The executing context has been cancelled. We want to prevent
+ * further execution along this context and propagate the error on
+ * to anything depending on its results.
+ *
+ * In __i915_request_submit(), we apply the -EIO and remove the
+ * requests' payloads for any banned requests. But first, we must
+ * rewind the context back to the start of the incomplete request so
+ * that we do not jump back into the middle of the batch.
+ *
+ * We preserve the breadcrumbs and semaphores of the incomplete
+ * requests so that inter-timeline dependencies (i.e other timelines)
+ * remain correctly ordered. And we defer to __i915_request_submit()
+ * so that all asynchronous waits are correctly handled.
+ */
+ GEM_TRACE("%s(%s): { rq=%llx:%lld }\n",
+ __func__, engine->name, rq->fence.context, rq->fence.seqno);
+
+ /* On resubmission of the active request, payload will be scrubbed */
+ if (i915_request_completed(rq))
+ head = rq->tail;
+ else
+ head = active_request(ce->timeline, rq)->head;
+ ce->ring->head = intel_ring_wrap(ce->ring, head);
+ intel_ring_update_space(ce->ring);
+
+ /* Scrub the context image to prevent replaying the previous batch */
+ restore_default_state(ce, engine);
+ __execlists_update_reg_state(ce, engine);
+
+ /* We've switched away, so this should be a no-op, but intent matters */
+ ce->lrc_desc |= CTX_DESC_FORCE_RESTORE;
+}
+
static inline struct intel_engine_cs *
__execlists_schedule_in(struct i915_request *rq)
{
@@ -569,6 +1051,21 @@ __execlists_schedule_in(struct i915_request *rq)
intel_context_get(ce);
+ if (unlikely(i915_gem_context_is_banned(ce->gem_context)))
+ reset_active(rq, engine);
+
+ if (ce->tag) {
+ /* Use a fixed tag for OA and friends */
+ ce->lrc_desc |= (u64)ce->tag << 32;
+ } else {
+ /* We don't need a strict matching tag, just different values */
+ ce->lrc_desc &= ~GENMASK_ULL(47, 37);
+ ce->lrc_desc |=
+ (u64)(engine->context_tag++ % NUM_CONTEXT_TAG) <<
+ GEN11_SW_CTX_ID_SHIFT;
+ BUILD_BUG_ON(NUM_CONTEXT_TAG > GEN12_MAX_CONTEXT_HW_ID);
+ }
+
intel_gt_pm_get(engine->gt);
execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_IN);
intel_engine_context_in(engine);
@@ -612,6 +1109,12 @@ __execlists_schedule_out(struct i915_request *rq,
{
struct intel_context * const ce = rq->hw_context;
+ /*
+ * NB process_csb() is not under the engine->active.lock and hence
+ * schedule_out can race with schedule_in meaning that we should
+ * refrain from doing non-trivial work here.
+ */
+
intel_engine_context_out(engine);
execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_OUT);
intel_gt_pm_put(engine->gt);
@@ -654,7 +1157,7 @@ static u64 execlists_update_context(const struct i915_request *rq)
struct intel_context *ce = rq->hw_context;
u64 desc;
- ce->lrc_reg_state[CTX_RING_TAIL + 1] =
+ ce->lrc_reg_state[CTX_RING_TAIL] =
intel_ring_set_tail(rq->ring, rq->tail);
/*
@@ -677,6 +1180,10 @@ static u64 execlists_update_context(const struct i915_request *rq)
desc = ce->lrc_desc;
ce->lrc_desc &= ~CTX_DESC_FORCE_RESTORE;
+ /* Wa_1607138340:tgl */
+ if (IS_TGL_REVID(rq->i915, TGL_REVID_A0, TGL_REVID_A0))
+ desc |= CTX_DESC_FORCE_RESTORE;
+
return desc;
}
@@ -699,6 +1206,9 @@ trace_ports(const struct intel_engine_execlists *execlists,
const struct intel_engine_cs *engine =
container_of(execlists, typeof(*engine), execlists);
+ if (!ports[0])
+ return;
+
GEM_TRACE("%s: %s { %llx:%lld%s, %llx:%lld }\n",
engine->name, msg,
ports[0]->fence.context,
@@ -719,25 +1229,45 @@ assert_pending_valid(const struct intel_engine_execlists *execlists,
trace_ports(execlists, msg, execlists->pending);
- if (!execlists->pending[0])
+ if (!execlists->pending[0]) {
+ GEM_TRACE_ERR("Nothing pending for promotion!\n");
return false;
+ }
- if (execlists->pending[execlists_num_ports(execlists)])
+ if (execlists->pending[execlists_num_ports(execlists)]) {
+ GEM_TRACE_ERR("Excess pending[%d] for promotion!\n",
+ execlists_num_ports(execlists));
return false;
+ }
for (port = execlists->pending; (rq = *port); port++) {
- if (ce == rq->hw_context)
+ if (ce == rq->hw_context) {
+ GEM_TRACE_ERR("Duplicate context in pending[%zd]\n",
+ port - execlists->pending);
return false;
+ }
ce = rq->hw_context;
if (i915_request_completed(rq))
continue;
- if (i915_active_is_idle(&ce->active))
+ if (i915_active_is_idle(&ce->active)) {
+ GEM_TRACE_ERR("Inactive context in pending[%zd]\n",
+ port - execlists->pending);
+ return false;
+ }
+
+ if (!i915_vma_is_pinned(ce->state)) {
+ GEM_TRACE_ERR("Unpinned context in pending[%zd]\n",
+ port - execlists->pending);
return false;
+ }
- if (!i915_vma_is_pinned(ce->state))
+ if (!i915_vma_is_pinned(ce->ring->vma)) {
+ GEM_TRACE_ERR("Unpinned ringbuffer in pending[%zd]\n",
+ port - execlists->pending);
return false;
+ }
}
return ce;
@@ -814,6 +1344,10 @@ static bool can_merge_rq(const struct i915_request *prev,
if (i915_request_completed(next))
return true;
+ if (unlikely((prev->flags ^ next->flags) &
+ (I915_REQUEST_NOPREEMPT | I915_REQUEST_SENTINEL)))
+ return false;
+
if (!can_merge_ctx(prev->hw_context, next->hw_context))
return false;
@@ -823,47 +1357,7 @@ static bool can_merge_rq(const struct i915_request *prev,
static void virtual_update_register_offsets(u32 *regs,
struct intel_engine_cs *engine)
{
- u32 base = engine->mmio_base;
-
- /* Must match execlists_init_reg_state()! */
-
- regs[CTX_CONTEXT_CONTROL] =
- i915_mmio_reg_offset(RING_CONTEXT_CONTROL(base));
- regs[CTX_RING_HEAD] = i915_mmio_reg_offset(RING_HEAD(base));
- regs[CTX_RING_TAIL] = i915_mmio_reg_offset(RING_TAIL(base));
- regs[CTX_RING_BUFFER_START] = i915_mmio_reg_offset(RING_START(base));
- regs[CTX_RING_BUFFER_CONTROL] = i915_mmio_reg_offset(RING_CTL(base));
-
- regs[CTX_BB_HEAD_U] = i915_mmio_reg_offset(RING_BBADDR_UDW(base));
- regs[CTX_BB_HEAD_L] = i915_mmio_reg_offset(RING_BBADDR(base));
- regs[CTX_BB_STATE] = i915_mmio_reg_offset(RING_BBSTATE(base));
- regs[CTX_SECOND_BB_HEAD_U] =
- i915_mmio_reg_offset(RING_SBBADDR_UDW(base));
- regs[CTX_SECOND_BB_HEAD_L] = i915_mmio_reg_offset(RING_SBBADDR(base));
- regs[CTX_SECOND_BB_STATE] = i915_mmio_reg_offset(RING_SBBSTATE(base));
-
- regs[CTX_CTX_TIMESTAMP] =
- i915_mmio_reg_offset(RING_CTX_TIMESTAMP(base));
- regs[CTX_PDP3_UDW] = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, 3));
- regs[CTX_PDP3_LDW] = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, 3));
- regs[CTX_PDP2_UDW] = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, 2));
- regs[CTX_PDP2_LDW] = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, 2));
- regs[CTX_PDP1_UDW] = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, 1));
- regs[CTX_PDP1_LDW] = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, 1));
- regs[CTX_PDP0_UDW] = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, 0));
- regs[CTX_PDP0_LDW] = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, 0));
-
- if (engine->class == RENDER_CLASS) {
- regs[CTX_RCS_INDIRECT_CTX] =
- i915_mmio_reg_offset(RING_INDIRECT_CTX(base));
- regs[CTX_RCS_INDIRECT_CTX_OFFSET] =
- i915_mmio_reg_offset(RING_INDIRECT_CTX_OFFSET(base));
- regs[CTX_BB_PER_CTX_PTR] =
- i915_mmio_reg_offset(RING_BB_PER_CTX_PTR(base));
-
- regs[CTX_R_PWR_CLK_STATE] =
- i915_mmio_reg_offset(GEN8_R_PWR_CLK_STATE);
- }
+ set_offsets(regs, reg_offsets(engine), engine);
}
static bool virtual_matches(const struct virtual_engine *ve,
@@ -978,7 +1472,7 @@ need_timeslice(struct intel_engine_cs *engine, const struct i915_request *rq)
{
int hint;
- if (!intel_engine_has_semaphores(engine))
+ if (!intel_engine_has_timeslices(engine))
return false;
if (list_is_last(&rq->sched.link, &engine->active.requests))
@@ -999,15 +1493,32 @@ switch_prio(struct intel_engine_cs *engine, const struct i915_request *rq)
return rq_prio(list_next_entry(rq, sched.link));
}
-static bool
-enable_timeslice(const struct intel_engine_execlists *execlists)
+static inline unsigned long
+timeslice(const struct intel_engine_cs *engine)
+{
+ return READ_ONCE(engine->props.timeslice_duration_ms);
+}
+
+static unsigned long
+active_timeslice(const struct intel_engine_cs *engine)
{
- const struct i915_request *rq = *execlists->active;
+ const struct i915_request *rq = *engine->execlists.active;
if (i915_request_completed(rq))
- return false;
+ return 0;
+
+ if (engine->execlists.switch_priority_hint < effective_prio(rq))
+ return 0;
+
+ return timeslice(engine);
+}
+
+static void set_timeslice(struct intel_engine_cs *engine)
+{
+ if (!intel_engine_has_timeslices(engine))
+ return;
- return execlists->switch_priority_hint >= effective_prio(rq);
+ set_timer_ms(&engine->execlists.timer, active_timeslice(engine));
}
static void record_preemption(struct intel_engine_execlists *execlists)
@@ -1015,6 +1526,30 @@ static void record_preemption(struct intel_engine_execlists *execlists)
(void)I915_SELFTEST_ONLY(execlists->preempt_hang.count++);
}
+static unsigned long active_preempt_timeout(struct intel_engine_cs *engine)
+{
+ struct i915_request *rq;
+
+ rq = last_active(&engine->execlists);
+ if (!rq)
+ return 0;
+
+ /* Force a fast reset for terminated contexts (ignoring sysfs!) */
+ if (unlikely(i915_gem_context_is_banned(rq->gem_context)))
+ return 1;
+
+ return READ_ONCE(engine->props.preempt_timeout_ms);
+}
+
+static void set_preempt_timeout(struct intel_engine_cs *engine)
+{
+ if (!intel_engine_has_preempt_reset(engine))
+ return;
+
+ set_timer_ms(&engine->execlists.preempt,
+ active_preempt_timeout(engine));
+}
+
static void execlists_dequeue(struct intel_engine_cs *engine)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
@@ -1111,7 +1646,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
last->hw_context->lrc_desc |= CTX_DESC_FORCE_RESTORE;
last = NULL;
} else if (need_timeslice(engine, last) &&
- !timer_pending(&engine->execlists.timer)) {
+ timer_expired(&engine->execlists.timer)) {
GEM_TRACE("%s: expired last=%llx:%lld, prio=%d, hint=%d\n",
engine->name,
last->fence.context,
@@ -1147,8 +1682,18 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* submission.
*/
if (!list_is_last(&last->sched.link,
- &engine->active.requests))
+ &engine->active.requests)) {
+ /*
+ * Even if ELSP[1] is occupied and not worthy
+ * of timeslices, our queue might be.
+ */
+ if (!execlists->timer.expires &&
+ need_timeslice(engine, last))
+ set_timer_ms(&execlists->timer,
+ timeslice(engine));
+
return;
+ }
/*
* WaIdleLiteRestore:bdw,skl
@@ -1216,7 +1761,10 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
unsigned int n;
GEM_BUG_ON(READ_ONCE(ve->context.inflight));
- virtual_update_register_offsets(regs, engine);
+
+ if (!intel_engine_has_relative_mmio(engine))
+ virtual_update_register_offsets(regs,
+ engine);
if (!list_empty(&ve->context.signals))
virtual_xfer_breadcrumbs(ve, engine);
@@ -1299,6 +1847,9 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
if (last->hw_context == rq->hw_context)
goto done;
+ if (i915_request_has_sentinel(last))
+ goto done;
+
/*
* If GVT overrides us we only ever submit
* port[0], leaving port[1] empty. Note that we
@@ -1357,11 +1908,28 @@ done:
if (submit) {
*port = execlists_schedule_in(last, port - execlists->pending);
- memset(port + 1, 0, (last_port - port) * sizeof(*port));
execlists->switch_priority_hint =
switch_prio(engine, *execlists->pending);
+
+ /*
+ * Skip if we ended up with exactly the same set of requests,
+ * e.g. trying to timeslice a pair of ordered contexts
+ */
+ if (!memcmp(execlists->active, execlists->pending,
+ (port - execlists->pending + 1) * sizeof(*port))) {
+ do
+ execlists_schedule_out(fetch_and_zero(port));
+ while (port-- != execlists->pending);
+
+ goto skip_submit;
+ }
+
+ memset(port + 1, 0, (last_port - port) * sizeof(*port));
execlists_submit_ports(engine);
+
+ set_preempt_timeout(engine);
} else {
+skip_submit:
ring_set_paused(engine, 0);
}
}
@@ -1394,13 +1962,6 @@ reset_in_progress(const struct intel_engine_execlists *execlists)
return unlikely(!__tasklet_is_enabled(&execlists->tasklet));
}
-enum csb_step {
- CSB_NOP,
- CSB_PROMOTE,
- CSB_PREEMPT,
- CSB_COMPLETE,
-};
-
/*
* Starting with Gen12, the status has a new format:
*
@@ -1427,7 +1988,7 @@ enum csb_step {
* bits 47-57: sw context id of the lrc the GT switched away from
* bits 58-63: sw counter of the lrc the GT switched away from
*/
-static inline enum csb_step
+static inline bool
gen12_csb_parse(const struct intel_engine_execlists *execlists, const u32 *csb)
{
u32 lower_dw = csb[0];
@@ -1436,9 +1997,6 @@ gen12_csb_parse(const struct intel_engine_execlists *execlists, const u32 *csb)
bool ctx_away_valid = GEN12_CSB_CTX_VALID(upper_dw);
bool new_queue = lower_dw & GEN12_CTX_STATUS_SWITCHED_TO_NEW_QUEUE;
- if (!ctx_away_valid && ctx_to_valid)
- return CSB_PROMOTE;
-
/*
* The context switch detail is not guaranteed to be 5 when a preemption
* occurs, so we can't just check for that. The check below works for
@@ -1446,8 +2004,10 @@ gen12_csb_parse(const struct intel_engine_execlists *execlists, const u32 *csb)
* instructions and lite-restore. Preempt-to-idle via the CTRL register
* would require some extra handling, but we don't support that.
*/
- if (new_queue && ctx_away_valid)
- return CSB_PREEMPT;
+ if (!ctx_away_valid || new_queue) {
+ GEM_BUG_ON(!ctx_to_valid);
+ return true;
+ }
/*
* switch detail = 5 is covered by the case above and we do not expect a
@@ -1455,30 +2015,13 @@ gen12_csb_parse(const struct intel_engine_execlists *execlists, const u32 *csb)
* use polling mode.
*/
GEM_BUG_ON(GEN12_CTX_SWITCH_DETAIL(upper_dw));
-
- if (*execlists->active) {
- GEM_BUG_ON(!ctx_away_valid);
- return CSB_COMPLETE;
- }
-
- return CSB_NOP;
+ return false;
}
-static inline enum csb_step
+static inline bool
gen8_csb_parse(const struct intel_engine_execlists *execlists, const u32 *csb)
{
- unsigned int status = *csb;
-
- if (status & GEN8_CTX_STATUS_IDLE_ACTIVE)
- return CSB_PROMOTE;
-
- if (status & GEN8_CTX_STATUS_PREEMPTED)
- return CSB_PREEMPT;
-
- if (*execlists->active)
- return CSB_COMPLETE;
-
- return CSB_NOP;
+ return *csb & (GEN8_CTX_STATUS_IDLE_ACTIVE | GEN8_CTX_STATUS_PREEMPTED);
}
static void process_csb(struct intel_engine_cs *engine)
@@ -1488,7 +2031,14 @@ static void process_csb(struct intel_engine_cs *engine)
const u8 num_entries = execlists->csb_size;
u8 head, tail;
- GEM_BUG_ON(USES_GUC_SUBMISSION(engine->i915));
+ /*
+ * As we modify our execlists state tracking we require exclusive
+ * access. Either we are inside the tasklet, or the tasklet is disabled
+ * and we assume that is only inside the reset paths and so serialised.
+ */
+ GEM_BUG_ON(!tasklet_is_locked(&execlists->tasklet) &&
+ !reset_in_progress(execlists));
+ GEM_BUG_ON(!intel_engine_in_execlists_submission_mode(engine));
/*
* Note that csb_write, csb_status may be either in HWSP or mmio.
@@ -1517,7 +2067,7 @@ static void process_csb(struct intel_engine_cs *engine)
rmb();
do {
- enum csb_step csb_step;
+ bool promote;
if (++head == num_entries)
head = 0;
@@ -1545,20 +2095,19 @@ static void process_csb(struct intel_engine_cs *engine)
buf[2 * head + 0], buf[2 * head + 1]);
if (INTEL_GEN(engine->i915) >= 12)
- csb_step = gen12_csb_parse(execlists, buf + 2 * head);
+ promote = gen12_csb_parse(execlists, buf + 2 * head);
else
- csb_step = gen8_csb_parse(execlists, buf + 2 * head);
+ promote = gen8_csb_parse(execlists, buf + 2 * head);
+ if (promote) {
+ if (!inject_preempt_hang(execlists))
+ ring_set_paused(engine, 0);
- switch (csb_step) {
- case CSB_PREEMPT: /* cancel old inflight, prepare for switch */
+ /* cancel old inflight, prepare for switch */
trace_ports(execlists, "preempted", execlists->active);
-
while (*execlists->active)
execlists_schedule_out(*execlists->active++);
- /* fallthrough */
- case CSB_PROMOTE: /* switch pending to inflight */
- GEM_BUG_ON(*execlists->active);
+ /* switch pending to inflight */
GEM_BUG_ON(!assert_pending_valid(execlists, "promote"));
execlists->active =
memcpy(execlists->inflight,
@@ -1566,16 +2115,13 @@ static void process_csb(struct intel_engine_cs *engine)
execlists_num_ports(execlists) *
sizeof(*execlists->pending));
- if (enable_timeslice(execlists))
- mod_timer(&execlists->timer, jiffies + 1);
-
- if (!inject_preempt_hang(execlists))
- ring_set_paused(engine, 0);
+ set_timeslice(engine);
WRITE_ONCE(execlists->pending[0], NULL);
- break;
+ } else {
+ GEM_BUG_ON(!*execlists->active);
- case CSB_COMPLETE: /* port0 completed, advanced to port1 */
+ /* port0 completed, advanced to port1 */
trace_ports(execlists, "completed", execlists->active);
/*
@@ -1590,10 +2136,6 @@ static void process_csb(struct intel_engine_cs *engine)
GEM_BUG_ON(execlists->active - execlists->inflight >
execlists_num_ports(execlists));
- break;
-
- case CSB_NOP:
- break;
}
} while (head != tail);
@@ -1623,6 +2165,43 @@ static void __execlists_submission_tasklet(struct intel_engine_cs *const engine)
}
}
+static noinline void preempt_reset(struct intel_engine_cs *engine)
+{
+ const unsigned int bit = I915_RESET_ENGINE + engine->id;
+ unsigned long *lock = &engine->gt->reset.flags;
+
+ if (i915_modparams.reset < 3)
+ return;
+
+ if (test_and_set_bit(bit, lock))
+ return;
+
+ /* Mark this tasklet as disabled to avoid waiting for it to complete */
+ tasklet_disable_nosync(&engine->execlists.tasklet);
+
+ GEM_TRACE("%s: preempt timeout %lu+%ums\n",
+ engine->name,
+ READ_ONCE(engine->props.preempt_timeout_ms),
+ jiffies_to_msecs(jiffies - engine->execlists.preempt.expires));
+ intel_engine_reset(engine, "preemption time out");
+
+ tasklet_enable(&engine->execlists.tasklet);
+ clear_and_wake_up_bit(bit, lock);
+}
+
+static bool preempt_timeout(const struct intel_engine_cs *const engine)
+{
+ const struct timer_list *t = &engine->execlists.preempt;
+
+ if (!CONFIG_DRM_I915_PREEMPT_TIMEOUT)
+ return false;
+
+ if (!timer_expired(t))
+ return false;
+
+ return READ_ONCE(engine->execlists.pending[0]);
+}
+
/*
* Check the unread Context Status Buffers and manage the submission of new
* contexts to the ELSP accordingly.
@@ -1630,23 +2209,39 @@ static void __execlists_submission_tasklet(struct intel_engine_cs *const engine)
static void execlists_submission_tasklet(unsigned long data)
{
struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
- unsigned long flags;
+ bool timeout = preempt_timeout(engine);
process_csb(engine);
- if (!READ_ONCE(engine->execlists.pending[0])) {
+ if (!READ_ONCE(engine->execlists.pending[0]) || timeout) {
+ unsigned long flags;
+
spin_lock_irqsave(&engine->active.lock, flags);
__execlists_submission_tasklet(engine);
spin_unlock_irqrestore(&engine->active.lock, flags);
+
+ /* Recheck after serialising with direct-submission */
+ if (timeout && preempt_timeout(engine))
+ preempt_reset(engine);
}
}
-static void execlists_submission_timer(struct timer_list *timer)
+static void __execlists_kick(struct intel_engine_execlists *execlists)
{
- struct intel_engine_cs *engine =
- from_timer(engine, timer, execlists.timer);
-
/* Kick the tasklet for some interrupt coalescing and reset handling */
- tasklet_hi_schedule(&engine->execlists.tasklet);
+ tasklet_hi_schedule(&execlists->tasklet);
+}
+
+#define execlists_kick(t, member) \
+ __execlists_kick(container_of(t, struct intel_engine_execlists, member))
+
+static void execlists_timeslice(struct timer_list *timer)
+{
+ execlists_kick(timer, timer);
+}
+
+static void execlists_preempt(struct timer_list *timer)
+{
+ execlists_kick(timer, preempt);
}
static void queue_request(struct intel_engine_cs *engine,
@@ -1726,7 +2321,6 @@ set_redzone(void *vaddr, const struct intel_engine_cs *engine)
if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
return;
- vaddr += LRC_HEADER_PAGES * PAGE_SIZE;
vaddr += engine->context_size;
memset(vaddr, POISON_INUSE, I915_GTT_PAGE_SIZE);
@@ -1738,7 +2332,6 @@ check_redzone(const void *vaddr, const struct intel_engine_cs *engine)
if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
return;
- vaddr += LRC_HEADER_PAGES * PAGE_SIZE;
vaddr += engine->context_size;
if (memchr_inv(vaddr, POISON_INUSE, I915_GTT_PAGE_SIZE))
@@ -1752,14 +2345,13 @@ static void execlists_context_unpin(struct intel_context *ce)
check_redzone((void *)ce->lrc_reg_state - LRC_STATE_PN * PAGE_SIZE,
ce->engine);
- i915_gem_context_unpin_hw_id(ce->gem_context);
i915_gem_object_unpin_map(ce->state->obj);
intel_ring_reset(ce->ring, ce->ring->tail);
}
static void
-__execlists_update_reg_state(struct intel_context *ce,
- struct intel_engine_cs *engine)
+__execlists_update_reg_state(const struct intel_context *ce,
+ const struct intel_engine_cs *engine)
{
struct intel_ring *ring = ce->ring;
u32 *regs = ce->lrc_reg_state;
@@ -1767,16 +2359,16 @@ __execlists_update_reg_state(struct intel_context *ce,
GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->head));
GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->tail));
- regs[CTX_RING_BUFFER_START + 1] = i915_ggtt_offset(ring->vma);
- regs[CTX_RING_HEAD + 1] = ring->head;
- regs[CTX_RING_TAIL + 1] = ring->tail;
+ regs[CTX_RING_BUFFER_START] = i915_ggtt_offset(ring->vma);
+ regs[CTX_RING_HEAD] = ring->head;
+ regs[CTX_RING_TAIL] = ring->tail;
/* RPCS */
if (engine->class == RENDER_CLASS) {
- regs[CTX_R_PWR_CLK_STATE + 1] =
+ regs[CTX_R_PWR_CLK_STATE] =
intel_sseu_make_rpcs(engine->i915, &ce->sseu);
- i915_oa_init_reg_state(engine, ce, regs);
+ i915_oa_init_reg_state(ce, engine);
}
}
@@ -1802,18 +2394,12 @@ __execlists_context_pin(struct intel_context *ce,
goto unpin_active;
}
- ret = i915_gem_context_pin_hw_id(ce->gem_context);
- if (ret)
- goto unpin_map;
-
ce->lrc_desc = lrc_descriptor(ce, engine);
ce->lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
__execlists_update_reg_state(ce, engine);
return 0;
-unpin_map:
- i915_gem_object_unpin_map(ce->state->obj);
unpin_active:
intel_context_active_release(ce);
err:
@@ -1869,7 +2455,7 @@ static int gen8_emit_init_breadcrumb(struct i915_request *rq)
{
u32 *cs;
- GEM_BUG_ON(!rq->timeline->has_initial_breadcrumb);
+ GEM_BUG_ON(!i915_request_timeline(rq)->has_initial_breadcrumb);
cs = intel_ring_begin(rq, 6);
if (IS_ERR(cs))
@@ -1885,7 +2471,7 @@ static int gen8_emit_init_breadcrumb(struct i915_request *rq)
*cs++ = MI_NOOP;
*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
- *cs++ = rq->timeline->hwsp_offset;
+ *cs++ = i915_request_timeline(rq)->hwsp_offset;
*cs++ = 0;
*cs++ = rq->fence.seqno - 1;
@@ -1897,60 +2483,6 @@ static int gen8_emit_init_breadcrumb(struct i915_request *rq)
return 0;
}
-static int emit_pdps(struct i915_request *rq)
-{
- const struct intel_engine_cs * const engine = rq->engine;
- struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(rq->hw_context->vm);
- int err, i;
- u32 *cs;
-
- GEM_BUG_ON(intel_vgpu_active(rq->i915));
-
- /*
- * Beware ye of the dragons, this sequence is magic!
- *
- * Small changes to this sequence can cause anything from
- * GPU hangs to forcewake errors and machine lockups!
- */
-
- /* Flush any residual operations from the context load */
- err = engine->emit_flush(rq, EMIT_FLUSH);
- if (err)
- return err;
-
- /* Magic required to prevent forcewake errors! */
- err = engine->emit_flush(rq, EMIT_INVALIDATE);
- if (err)
- return err;
-
- cs = intel_ring_begin(rq, 4 * GEN8_3LVL_PDPES + 2);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- /* Ensure the LRI have landed before we invalidate & continue */
- *cs++ = MI_LOAD_REGISTER_IMM(2 * GEN8_3LVL_PDPES) | MI_LRI_FORCE_POSTED;
- for (i = GEN8_3LVL_PDPES; i--; ) {
- const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
- u32 base = engine->mmio_base;
-
- *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, i));
- *cs++ = upper_32_bits(pd_daddr);
- *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, i));
- *cs++ = lower_32_bits(pd_daddr);
- }
- *cs++ = MI_NOOP;
-
- intel_ring_advance(rq, cs);
-
- /* Be doubly sure the LRI have landed before proceeding */
- err = engine->emit_flush(rq, EMIT_FLUSH);
- if (err)
- return err;
-
- /* Re-invalidate the TLB for luck */
- return engine->emit_flush(rq, EMIT_INVALIDATE);
-}
-
static int execlists_request_alloc(struct i915_request *request)
{
int ret;
@@ -1973,10 +2505,7 @@ static int execlists_request_alloc(struct i915_request *request)
*/
/* Unconditionally invalidate GPU caches and TLBs. */
- if (i915_vm_is_4lvl(request->hw_context->vm))
- ret = request->engine->emit_flush(request, EMIT_INVALIDATE);
- else
- ret = emit_pdps(request);
+ ret = request->engine->emit_flush(request, EMIT_INVALIDATE);
if (ret)
return ret;
@@ -2028,12 +2557,6 @@ gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine, u32 *batch)
return batch;
}
-static u32 slm_offset(struct intel_engine_cs *engine)
-{
- return intel_gt_scratch_offset(engine->gt,
- INTEL_GT_SCRATCH_FIELD_CLEAR_SLM_WA);
-}
-
/*
* Typically we only have one indirect_ctx and per_ctx batch buffer which are
* initialized at the beginning and shared across all contexts but this field
@@ -2062,10 +2585,10 @@ static u32 *gen8_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
/* Actual scratch location is at 128 bytes offset */
batch = gen8_emit_pipe_control(batch,
PIPE_CONTROL_FLUSH_L3 |
- PIPE_CONTROL_GLOBAL_GTT_IVB |
+ PIPE_CONTROL_STORE_DATA_INDEX |
PIPE_CONTROL_CS_STALL |
PIPE_CONTROL_QW_WRITE,
- slm_offset(engine));
+ LRC_PPHWSP_SCRATCH_ADDR);
*batch++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
@@ -2423,27 +2946,29 @@ static void reset_csb_pointers(struct intel_engine_cs *engine)
&execlists->csb_status[reset_value]);
}
-static struct i915_request *active_request(struct i915_request *rq)
+static int lrc_ring_mi_mode(const struct intel_engine_cs *engine)
{
- const struct intel_context * const ce = rq->hw_context;
- struct i915_request *active = NULL;
- struct list_head *list;
-
- if (!i915_request_is_active(rq)) /* unwound, but incomplete! */
- return rq;
-
- list = &rq->timeline->requests;
- list_for_each_entry_from_reverse(rq, list, link) {
- if (i915_request_completed(rq))
- break;
+ if (INTEL_GEN(engine->i915) >= 12)
+ return 0x60;
+ else if (INTEL_GEN(engine->i915) >= 9)
+ return 0x54;
+ else if (engine->class == RENDER_CLASS)
+ return 0x58;
+ else
+ return -1;
+}
- if (rq->hw_context != ce)
- break;
+static void __execlists_reset_reg_state(const struct intel_context *ce,
+ const struct intel_engine_cs *engine)
+{
+ u32 *regs = ce->lrc_reg_state;
+ int x;
- active = rq;
+ x = lrc_ring_mi_mode(engine);
+ if (x != -1) {
+ regs[x + 1] &= ~STOP_RING;
+ regs[x + 1] |= STOP_RING << 16;
}
-
- return active;
}
static void __execlists_reset(struct intel_engine_cs *engine, bool stalled)
@@ -2451,7 +2976,10 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled)
struct intel_engine_execlists * const execlists = &engine->execlists;
struct intel_context *ce;
struct i915_request *rq;
- u32 *regs;
+
+ mb(); /* paranoia: read the CSB pointers from after the reset */
+ clflush(execlists->csb_write);
+ mb();
process_csb(engine); /* drain preemption events */
@@ -2467,16 +2995,23 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled)
if (!rq)
goto unwind;
+ /* We still have requests in-flight; the engine should be active */
+ GEM_BUG_ON(!intel_engine_pm_is_awake(engine));
+
ce = rq->hw_context;
- GEM_BUG_ON(i915_active_is_idle(&ce->active));
GEM_BUG_ON(!i915_vma_is_pinned(ce->state));
- rq = active_request(rq);
- if (!rq) {
- ce->ring->head = ce->ring->tail;
+
+ if (i915_request_completed(rq)) {
+ /* Idle context; tidy up the ring so we can restart afresh */
+ ce->ring->head = intel_ring_wrap(ce->ring, rq->tail);
goto out_replay;
}
+ /* Context has requests still in-flight; it should not be idle! */
+ GEM_BUG_ON(i915_active_is_idle(&ce->active));
+ rq = active_request(ce->timeline, rq);
ce->ring->head = intel_ring_wrap(ce->ring, rq->head);
+ GEM_BUG_ON(ce->ring->head == ce->ring->tail);
/*
* If this request hasn't started yet, e.g. it is waiting on a
@@ -2516,19 +3051,16 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled)
* future request will be after userspace has had the opportunity
* to recreate its own state.
*/
- regs = ce->lrc_reg_state;
- if (engine->pinned_default_state) {
- memcpy(regs, /* skip restoring the vanilla PPHWSP */
- engine->pinned_default_state + LRC_STATE_PN * PAGE_SIZE,
- engine->context_size - PAGE_SIZE);
- }
- execlists_init_reg_state(regs, ce, engine, ce->ring);
+ GEM_BUG_ON(!intel_context_is_pinned(ce));
+ restore_default_state(ce, engine);
out_replay:
- GEM_TRACE("%s replay {head:%04x, tail:%04x\n",
+ GEM_TRACE("%s replay {head:%04x, tail:%04x}\n",
engine->name, ce->ring->head, ce->ring->tail);
intel_ring_update_space(ce->ring);
+ __execlists_reset_reg_state(ce, engine);
__execlists_update_reg_state(ce, engine);
+ ce->lrc_desc |= CTX_DESC_FORCE_RESTORE; /* paranoid: GPU was reset! */
unwind:
/* Push back any incomplete requests for replay after the reset. */
@@ -2749,7 +3281,7 @@ static int gen8_emit_flush(struct i915_request *request, u32 mode)
}
*cs++ = cmd;
- *cs++ = I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT;
+ *cs++ = LRC_PPHWSP_SCRATCH_ADDR;
*cs++ = 0; /* upper addr */
*cs++ = 0; /* value */
intel_ring_advance(request, cs);
@@ -2760,10 +3292,6 @@ static int gen8_emit_flush(struct i915_request *request, u32 mode)
static int gen8_emit_flush_render(struct i915_request *request,
u32 mode)
{
- struct intel_engine_cs *engine = request->engine;
- u32 scratch_addr =
- intel_gt_scratch_offset(engine->gt,
- INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH);
bool vf_flush_wa = false, dc_flush_wa = false;
u32 *cs, flags = 0;
int len;
@@ -2785,7 +3313,7 @@ static int gen8_emit_flush_render(struct i915_request *request,
flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_QW_WRITE;
- flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
+ flags |= PIPE_CONTROL_STORE_DATA_INDEX;
/*
* On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL
@@ -2818,7 +3346,7 @@ static int gen8_emit_flush_render(struct i915_request *request,
cs = gen8_emit_pipe_control(cs, PIPE_CONTROL_DC_FLUSH_ENABLE,
0);
- cs = gen8_emit_pipe_control(cs, flags, scratch_addr);
+ cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR);
if (dc_flush_wa)
cs = gen8_emit_pipe_control(cs, PIPE_CONTROL_CS_STALL, 0);
@@ -2831,11 +3359,6 @@ static int gen8_emit_flush_render(struct i915_request *request,
static int gen11_emit_flush_render(struct i915_request *request,
u32 mode)
{
- struct intel_engine_cs *engine = request->engine;
- const u32 scratch_addr =
- intel_gt_scratch_offset(engine->gt,
- INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH);
-
if (mode & EMIT_FLUSH) {
u32 *cs;
u32 flags = 0;
@@ -2848,13 +3371,13 @@ static int gen11_emit_flush_render(struct i915_request *request,
flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
flags |= PIPE_CONTROL_FLUSH_ENABLE;
flags |= PIPE_CONTROL_QW_WRITE;
- flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
+ flags |= PIPE_CONTROL_STORE_DATA_INDEX;
cs = intel_ring_begin(request, 6);
if (IS_ERR(cs))
return PTR_ERR(cs);
- cs = gen8_emit_pipe_control(cs, flags, scratch_addr);
+ cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR);
intel_ring_advance(request, cs);
}
@@ -2872,14 +3395,106 @@ static int gen11_emit_flush_render(struct i915_request *request,
flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_QW_WRITE;
- flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
+ flags |= PIPE_CONTROL_STORE_DATA_INDEX;
+
+ cs = intel_ring_begin(request, 6);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR);
+ intel_ring_advance(request, cs);
+ }
+
+ return 0;
+}
+
+static u32 preparser_disable(bool state)
+{
+ return MI_ARB_CHECK | 1 << 8 | state;
+}
+
+static int gen12_emit_flush_render(struct i915_request *request,
+ u32 mode)
+{
+ if (mode & EMIT_FLUSH) {
+ u32 flags = 0;
+ u32 *cs;
+
+ flags |= PIPE_CONTROL_TILE_CACHE_FLUSH;
+ flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
+ flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
+ /* Wa_1409600907:tgl */
+ flags |= PIPE_CONTROL_DEPTH_STALL;
+ flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
+ flags |= PIPE_CONTROL_FLUSH_ENABLE;
+ flags |= PIPE_CONTROL_HDC_PIPELINE_FLUSH;
+
+ flags |= PIPE_CONTROL_STORE_DATA_INDEX;
+ flags |= PIPE_CONTROL_QW_WRITE;
+
+ flags |= PIPE_CONTROL_CS_STALL;
cs = intel_ring_begin(request, 6);
if (IS_ERR(cs))
return PTR_ERR(cs);
- cs = gen8_emit_pipe_control(cs, flags, scratch_addr);
+ cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR);
+ intel_ring_advance(request, cs);
+ }
+
+ if (mode & EMIT_INVALIDATE) {
+ u32 flags = 0;
+ u32 *cs;
+
+ flags |= PIPE_CONTROL_COMMAND_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_TLB_INVALIDATE;
+ flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_L3_RO_CACHE_INVALIDATE;
+
+ flags |= PIPE_CONTROL_STORE_DATA_INDEX;
+ flags |= PIPE_CONTROL_QW_WRITE;
+
+ flags |= PIPE_CONTROL_CS_STALL;
+
+ cs = intel_ring_begin(request, 8);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ /*
+ * Prevent the pre-parser from skipping past the TLB
+ * invalidate and loading a stale page for the batch
+ * buffer / request payload.
+ */
+ *cs++ = preparser_disable(true);
+
+ cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR);
+
+ *cs++ = preparser_disable(false);
intel_ring_advance(request, cs);
+
+ /*
+ * Wa_1604544889:tgl
+ */
+ if (IS_TGL_REVID(request->i915, TGL_REVID_A0, TGL_REVID_A0)) {
+ flags = 0;
+ flags |= PIPE_CONTROL_CS_STALL;
+ flags |= PIPE_CONTROL_HDC_PIPELINE_FLUSH;
+
+ flags |= PIPE_CONTROL_STORE_DATA_INDEX;
+ flags |= PIPE_CONTROL_QW_WRITE;
+
+ cs = intel_ring_begin(request, 6);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ cs = gen8_emit_pipe_control(cs, flags,
+ LRC_PPHWSP_SCRATCH_ADDR);
+ intel_ring_advance(request, cs);
+ }
}
return 0;
@@ -2933,7 +3548,7 @@ static u32 *gen8_emit_fini_breadcrumb(struct i915_request *request, u32 *cs)
{
cs = gen8_emit_ggtt_write(cs,
request->fence.seqno,
- request->timeline->hwsp_offset,
+ i915_request_active_timeline(request)->hwsp_offset,
0);
return gen8_emit_fini_breadcrumb_footer(request, cs);
@@ -2941,28 +3556,28 @@ static u32 *gen8_emit_fini_breadcrumb(struct i915_request *request, u32 *cs)
static u32 *gen8_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs)
{
- cs = gen8_emit_ggtt_write_rcs(cs,
- request->fence.seqno,
- request->timeline->hwsp_offset,
- PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
- PIPE_CONTROL_DEPTH_CACHE_FLUSH |
- PIPE_CONTROL_DC_FLUSH_ENABLE);
-
- /* XXX flush+write+CS_STALL all in one upsets gem_concurrent_blt:kbl */
cs = gen8_emit_pipe_control(cs,
- PIPE_CONTROL_FLUSH_ENABLE |
- PIPE_CONTROL_CS_STALL,
+ PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
+ PIPE_CONTROL_DEPTH_CACHE_FLUSH |
+ PIPE_CONTROL_DC_FLUSH_ENABLE,
0);
+ /* XXX flush+write+CS_STALL all in one upsets gem_concurrent_blt:kbl */
+ cs = gen8_emit_ggtt_write_rcs(cs,
+ request->fence.seqno,
+ i915_request_active_timeline(request)->hwsp_offset,
+ PIPE_CONTROL_FLUSH_ENABLE |
+ PIPE_CONTROL_CS_STALL);
+
return gen8_emit_fini_breadcrumb_footer(request, cs);
}
-static u32 *gen11_emit_fini_breadcrumb_rcs(struct i915_request *request,
- u32 *cs)
+static u32 *
+gen11_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs)
{
cs = gen8_emit_ggtt_write_rcs(cs,
request->fence.seqno,
- request->timeline->hwsp_offset,
+ i915_request_active_timeline(request)->hwsp_offset,
PIPE_CONTROL_CS_STALL |
PIPE_CONTROL_TILE_CACHE_FLUSH |
PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
@@ -2973,9 +3588,88 @@ static u32 *gen11_emit_fini_breadcrumb_rcs(struct i915_request *request,
return gen8_emit_fini_breadcrumb_footer(request, cs);
}
+/*
+ * Note that the CS instruction pre-parser will not stall on the breadcrumb
+ * flush and will continue pre-fetching the instructions after it before the
+ * memory sync is completed. On pre-gen12 HW, the pre-parser will stop at
+ * BB_START/END instructions, so, even though we might pre-fetch the pre-amble
+ * of the next request before the memory has been flushed, we're guaranteed that
+ * we won't access the batch itself too early.
+ * However, on gen12+ the parser can pre-fetch across the BB_START/END commands,
+ * so, if the current request is modifying an instruction in the next request on
+ * the same intel_context, we might pre-fetch and then execute the pre-update
+ * instruction. To avoid this, the users of self-modifying code should either
+ * disable the parser around the code emitting the memory writes, via a new flag
+ * added to MI_ARB_CHECK, or emit the writes from a different intel_context. For
+ * the in-kernel use-cases we've opted to use a separate context, see
+ * reloc_gpu() as an example.
+ * All the above applies only to the instructions themselves. Non-inline data
+ * used by the instructions is not pre-fetched.
+ */
+
+static u32 *gen12_emit_preempt_busywait(struct i915_request *request, u32 *cs)
+{
+ *cs++ = MI_SEMAPHORE_WAIT_TOKEN |
+ MI_SEMAPHORE_GLOBAL_GTT |
+ MI_SEMAPHORE_POLL |
+ MI_SEMAPHORE_SAD_EQ_SDD;
+ *cs++ = 0;
+ *cs++ = intel_hws_preempt_address(request->engine);
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = MI_NOOP;
+
+ return cs;
+}
+
+static __always_inline u32*
+gen12_emit_fini_breadcrumb_footer(struct i915_request *request, u32 *cs)
+{
+ *cs++ = MI_USER_INTERRUPT;
+
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+ if (intel_engine_has_semaphores(request->engine))
+ cs = gen12_emit_preempt_busywait(request, cs);
+
+ request->tail = intel_ring_offset(request, cs);
+ assert_ring_tail_valid(request->ring, request->tail);
+
+ return gen8_emit_wa_tail(request, cs);
+}
+
+static u32 *gen12_emit_fini_breadcrumb(struct i915_request *request, u32 *cs)
+{
+ cs = gen8_emit_ggtt_write(cs,
+ request->fence.seqno,
+ i915_request_active_timeline(request)->hwsp_offset,
+ 0);
+
+ return gen12_emit_fini_breadcrumb_footer(request, cs);
+}
+
+static u32 *
+gen12_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs)
+{
+ cs = gen8_emit_ggtt_write_rcs(cs,
+ request->fence.seqno,
+ i915_request_active_timeline(request)->hwsp_offset,
+ PIPE_CONTROL_CS_STALL |
+ PIPE_CONTROL_TILE_CACHE_FLUSH |
+ PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
+ PIPE_CONTROL_DEPTH_CACHE_FLUSH |
+ /* Wa_1409600907:tgl */
+ PIPE_CONTROL_DEPTH_STALL |
+ PIPE_CONTROL_DC_FLUSH_ENABLE |
+ PIPE_CONTROL_FLUSH_ENABLE |
+ PIPE_CONTROL_HDC_PIPELINE_FLUSH);
+
+ return gen12_emit_fini_breadcrumb_footer(request, cs);
+}
+
static void execlists_park(struct intel_engine_cs *engine)
{
- del_timer(&engine->execlists.timer);
+ cancel_timer(&engine->execlists.timer);
+ cancel_timer(&engine->execlists.preempt);
}
void intel_execlists_set_default_submission(struct intel_engine_cs *engine)
@@ -2998,6 +3692,9 @@ void intel_execlists_set_default_submission(struct intel_engine_cs *engine)
if (HAS_LOGICAL_RING_PREEMPTION(engine->i915))
engine->flags |= I915_ENGINE_HAS_PREEMPTION;
}
+
+ if (INTEL_GEN(engine->i915) >= 12)
+ engine->flags |= I915_ENGINE_HAS_RELATIVE_MMIO;
}
static void execlists_destroy(struct intel_engine_cs *engine)
@@ -3025,6 +3722,8 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine)
engine->emit_flush = gen8_emit_flush;
engine->emit_init_breadcrumb = gen8_emit_init_breadcrumb;
engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb;
+ if (INTEL_GEN(engine->i915) >= 12)
+ engine->emit_fini_breadcrumb = gen12_emit_fini_breadcrumb;
engine->set_default_submission = intel_execlists_set_default_submission;
@@ -3070,6 +3769,9 @@ static void rcs_submission_override(struct intel_engine_cs *engine)
{
switch (INTEL_GEN(engine->i915)) {
case 12:
+ engine->emit_flush = gen12_emit_flush_render;
+ engine->emit_fini_breadcrumb = gen12_emit_fini_breadcrumb_rcs;
+ break;
case 11:
engine->emit_flush = gen11_emit_flush_render;
engine->emit_fini_breadcrumb = gen11_emit_fini_breadcrumb_rcs;
@@ -3085,7 +3787,8 @@ int intel_execlists_submission_setup(struct intel_engine_cs *engine)
{
tasklet_init(&engine->execlists.tasklet,
execlists_submission_tasklet, (unsigned long)engine);
- timer_setup(&engine->execlists.timer, execlists_submission_timer, 0);
+ timer_setup(&engine->execlists.timer, execlists_timeslice, 0);
+ timer_setup(&engine->execlists.preempt, execlists_preempt, 0);
logical_ring_default_vfuncs(engine);
logical_ring_default_irqs(engine);
@@ -3142,7 +3845,7 @@ int intel_execlists_submission_init(struct intel_engine_cs *engine)
return 0;
}
-static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *engine)
+static u32 intel_lr_indirect_ctx_offset(const struct intel_engine_cs *engine)
{
u32 indirect_ctx_offset;
@@ -3175,86 +3878,50 @@ static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *engine)
return indirect_ctx_offset;
}
-static void execlists_init_reg_state(u32 *regs,
- struct intel_context *ce,
- struct intel_engine_cs *engine,
- struct intel_ring *ring)
-{
- struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(ce->vm);
- bool rcs = engine->class == RENDER_CLASS;
- u32 base = engine->mmio_base;
- /*
- * A context is actually a big batch buffer with several
- * MI_LOAD_REGISTER_IMM commands followed by (reg, value) pairs. The
- * values we are setting here are only for the first context restore:
- * on a subsequent save, the GPU will recreate this batchbuffer with new
- * values (including all the missing MI_LOAD_REGISTER_IMM commands that
- * we are not initializing here).
- *
- * Must keep consistent with virtual_update_register_offsets().
- */
- regs[CTX_LRI_HEADER_0] = MI_LOAD_REGISTER_IMM(rcs ? 14 : 11) |
- MI_LRI_FORCE_POSTED;
-
- CTX_REG(regs, CTX_CONTEXT_CONTROL, RING_CONTEXT_CONTROL(base),
+static void init_common_reg_state(u32 * const regs,
+ const struct intel_engine_cs *engine,
+ const struct intel_ring *ring)
+{
+ regs[CTX_CONTEXT_CONTROL] =
_MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT) |
- _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH));
- if (INTEL_GEN(engine->i915) < 11) {
- regs[CTX_CONTEXT_CONTROL + 1] |=
+ _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH);
+ if (INTEL_GEN(engine->i915) < 11)
+ regs[CTX_CONTEXT_CONTROL] |=
_MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT |
CTX_CTRL_RS_CTX_ENABLE);
- }
- CTX_REG(regs, CTX_RING_HEAD, RING_HEAD(base), 0);
- CTX_REG(regs, CTX_RING_TAIL, RING_TAIL(base), 0);
- CTX_REG(regs, CTX_RING_BUFFER_START, RING_START(base), 0);
- CTX_REG(regs, CTX_RING_BUFFER_CONTROL, RING_CTL(base),
- RING_CTL_SIZE(ring->size) | RING_VALID);
- CTX_REG(regs, CTX_BB_HEAD_U, RING_BBADDR_UDW(base), 0);
- CTX_REG(regs, CTX_BB_HEAD_L, RING_BBADDR(base), 0);
- CTX_REG(regs, CTX_BB_STATE, RING_BBSTATE(base), RING_BB_PPGTT);
- CTX_REG(regs, CTX_SECOND_BB_HEAD_U, RING_SBBADDR_UDW(base), 0);
- CTX_REG(regs, CTX_SECOND_BB_HEAD_L, RING_SBBADDR(base), 0);
- CTX_REG(regs, CTX_SECOND_BB_STATE, RING_SBBSTATE(base), 0);
- if (rcs) {
- struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx;
-
- CTX_REG(regs, CTX_RCS_INDIRECT_CTX, RING_INDIRECT_CTX(base), 0);
- CTX_REG(regs, CTX_RCS_INDIRECT_CTX_OFFSET,
- RING_INDIRECT_CTX_OFFSET(base), 0);
- if (wa_ctx->indirect_ctx.size) {
- u32 ggtt_offset = i915_ggtt_offset(wa_ctx->vma);
-
- regs[CTX_RCS_INDIRECT_CTX + 1] =
- (ggtt_offset + wa_ctx->indirect_ctx.offset) |
- (wa_ctx->indirect_ctx.size / CACHELINE_BYTES);
-
- regs[CTX_RCS_INDIRECT_CTX_OFFSET + 1] =
- intel_lr_indirect_ctx_offset(engine) << 6;
- }
- CTX_REG(regs, CTX_BB_PER_CTX_PTR, RING_BB_PER_CTX_PTR(base), 0);
- if (wa_ctx->per_ctx.size) {
- u32 ggtt_offset = i915_ggtt_offset(wa_ctx->vma);
+ regs[CTX_RING_BUFFER_CONTROL] = RING_CTL_SIZE(ring->size) | RING_VALID;
+ regs[CTX_BB_STATE] = RING_BB_PPGTT;
+}
- regs[CTX_BB_PER_CTX_PTR + 1] =
- (ggtt_offset + wa_ctx->per_ctx.offset) | 0x01;
- }
+static void init_wa_bb_reg_state(u32 * const regs,
+ const struct intel_engine_cs *engine,
+ u32 pos_bb_per_ctx)
+{
+ const struct i915_ctx_workarounds * const wa_ctx = &engine->wa_ctx;
+
+ if (wa_ctx->per_ctx.size) {
+ const u32 ggtt_offset = i915_ggtt_offset(wa_ctx->vma);
+
+ regs[pos_bb_per_ctx] =
+ (ggtt_offset + wa_ctx->per_ctx.offset) | 0x01;
}
- regs[CTX_LRI_HEADER_1] = MI_LOAD_REGISTER_IMM(9) | MI_LRI_FORCE_POSTED;
+ if (wa_ctx->indirect_ctx.size) {
+ const u32 ggtt_offset = i915_ggtt_offset(wa_ctx->vma);
+
+ regs[pos_bb_per_ctx + 2] =
+ (ggtt_offset + wa_ctx->indirect_ctx.offset) |
+ (wa_ctx->indirect_ctx.size / CACHELINE_BYTES);
- CTX_REG(regs, CTX_CTX_TIMESTAMP, RING_CTX_TIMESTAMP(base), 0);
- /* PDP values well be assigned later if needed */
- CTX_REG(regs, CTX_PDP3_UDW, GEN8_RING_PDP_UDW(base, 3), 0);
- CTX_REG(regs, CTX_PDP3_LDW, GEN8_RING_PDP_LDW(base, 3), 0);
- CTX_REG(regs, CTX_PDP2_UDW, GEN8_RING_PDP_UDW(base, 2), 0);
- CTX_REG(regs, CTX_PDP2_LDW, GEN8_RING_PDP_LDW(base, 2), 0);
- CTX_REG(regs, CTX_PDP1_UDW, GEN8_RING_PDP_UDW(base, 1), 0);
- CTX_REG(regs, CTX_PDP1_LDW, GEN8_RING_PDP_LDW(base, 1), 0);
- CTX_REG(regs, CTX_PDP0_UDW, GEN8_RING_PDP_UDW(base, 0), 0);
- CTX_REG(regs, CTX_PDP0_LDW, GEN8_RING_PDP_LDW(base, 0), 0);
+ regs[pos_bb_per_ctx + 4] =
+ intel_lr_indirect_ctx_offset(engine) << 6;
+ }
+}
+static void init_ppgtt_reg_state(u32 *regs, const struct i915_ppgtt *ppgtt)
+{
if (i915_vm_is_4lvl(&ppgtt->vm)) {
/* 64b PPGTT (48bit canonical)
* PDP0_DESCRIPTOR contains the base address to PML4 and
@@ -3267,15 +3934,47 @@ static void execlists_init_reg_state(u32 *regs,
ASSIGN_CTX_PDP(ppgtt, regs, 1);
ASSIGN_CTX_PDP(ppgtt, regs, 0);
}
+}
- if (rcs) {
- regs[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1);
- CTX_REG(regs, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE, 0);
+static struct i915_ppgtt *vm_alias(struct i915_address_space *vm)
+{
+ if (i915_is_ggtt(vm))
+ return i915_vm_to_ggtt(vm)->alias;
+ else
+ return i915_vm_to_ppgtt(vm);
+}
+
+static void execlists_init_reg_state(u32 *regs,
+ const struct intel_context *ce,
+ const struct intel_engine_cs *engine,
+ const struct intel_ring *ring,
+ bool close)
+{
+ /*
+ * A context is actually a big batch buffer with several
+ * MI_LOAD_REGISTER_IMM commands followed by (reg, value) pairs. The
+ * values we are setting here are only for the first context restore:
+ * on a subsequent save, the GPU will recreate this batchbuffer with new
+ * values (including all the missing MI_LOAD_REGISTER_IMM commands that
+ * we are not initializing here).
+ *
+ * Must keep consistent with virtual_update_register_offsets().
+ */
+ u32 *bbe = set_offsets(regs, reg_offsets(engine), engine);
+
+ if (close) { /* Close the batch; used mainly by live_lrc_layout() */
+ *bbe = MI_BATCH_BUFFER_END;
+ if (INTEL_GEN(engine->i915) >= 10)
+ *bbe |= BIT(0);
}
- regs[CTX_END] = MI_BATCH_BUFFER_END;
- if (INTEL_GEN(engine->i915) >= 10)
- regs[CTX_END] |= BIT(0);
+ init_common_reg_state(regs, engine, ring);
+ init_ppgtt_reg_state(regs, vm_alias(ce->vm));
+
+ init_wa_bb_reg_state(regs, engine,
+ INTEL_GEN(engine->i915) >= 12 ?
+ GEN12_CTX_BB_PER_CTX_PTR :
+ CTX_BB_PER_CTX_PTR);
}
static int
@@ -3284,6 +3983,7 @@ populate_lr_context(struct intel_context *ce,
struct intel_engine_cs *engine,
struct intel_ring *ring)
{
+ bool inhibit = true;
void *vaddr;
u32 *regs;
int ret;
@@ -3298,12 +3998,6 @@ populate_lr_context(struct intel_context *ce,
set_redzone(vaddr, engine);
if (engine->default_state) {
- /*
- * We only want to copy over the template context state;
- * skipping over the headers reserved for GuC communication,
- * leaving those as zero.
- */
- const unsigned long start = LRC_HEADER_PAGES * PAGE_SIZE;
void *defaults;
defaults = i915_gem_object_pin_map(engine->default_state,
@@ -3313,23 +4007,22 @@ populate_lr_context(struct intel_context *ce,
goto err_unpin_ctx;
}
- memcpy(vaddr + start, defaults + start, engine->context_size);
+ memcpy(vaddr, defaults, engine->context_size);
i915_gem_object_unpin_map(engine->default_state);
+ inhibit = false;
}
/* The second page of the context object contains some fields which must
* be set up prior to the first execution. */
regs = vaddr + LRC_STATE_PN * PAGE_SIZE;
- execlists_init_reg_state(regs, ce, engine, ring);
- if (!engine->default_state)
- regs[CTX_CONTEXT_CONTROL + 1] |=
+ execlists_init_reg_state(regs, ce, engine, ring, inhibit);
+ if (inhibit)
+ regs[CTX_CONTEXT_CONTROL] |=
_MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
ret = 0;
err_unpin_ctx:
- __i915_gem_object_flush_map(ctx_obj,
- LRC_HEADER_PAGES * PAGE_SIZE,
- engine->context_size);
+ __i915_gem_object_flush_map(ctx_obj, 0, engine->context_size);
i915_gem_object_unpin_map(ctx_obj);
return ret;
}
@@ -3346,11 +4039,6 @@ static int __execlists_context_alloc(struct intel_context *ce,
GEM_BUG_ON(ce->state);
context_size = round_up(engine->context_size, I915_GTT_PAGE_SIZE);
- /*
- * Before the actual start of the context image, we insert a few pages
- * for our own use and for sharing with the GuC.
- */
- context_size += LRC_HEADER_PAGES * PAGE_SIZE;
if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
context_size += I915_GTT_PAGE_SIZE; /* for redzone */
@@ -3462,8 +4150,9 @@ static void virtual_engine_initial_hint(struct virtual_engine *ve)
return;
swap(ve->siblings[swp], ve->siblings[0]);
- virtual_update_register_offsets(ve->context.lrc_reg_state,
- ve->siblings[0]);
+ if (!intel_engine_has_relative_mmio(ve->siblings[0]))
+ virtual_update_register_offsets(ve->context.lrc_reg_state,
+ ve->siblings[0]);
}
static int virtual_context_pin(struct intel_context *ce)
@@ -3714,6 +4403,7 @@ intel_execlists_create_virtual(struct i915_gem_context *ctx,
ve->base.i915 = ctx->i915;
ve->base.gt = siblings[0]->gt;
+ ve->base.uncore = siblings[0]->uncore;
ve->base.id = -1;
ve->base.class = OTHER_CLASS;
ve->base.uabi_class = I915_ENGINE_CLASS_INVALID;
@@ -3737,6 +4427,7 @@ intel_execlists_create_virtual(struct i915_gem_context *ctx,
snprintf(ve->base.name, sizeof(ve->base.name), "virtual");
intel_engine_init_active(&ve->base, ENGINE_VIRTUAL);
+ intel_engine_init_breadcrumbs(&ve->base);
intel_engine_init_execlists(&ve->base);
@@ -3899,6 +4590,18 @@ int intel_virtual_engine_attach_bond(struct intel_engine_cs *engine,
return 0;
}
+struct intel_engine_cs *
+intel_virtual_engine_get_sibling(struct intel_engine_cs *engine,
+ unsigned int sibling)
+{
+ struct virtual_engine *ve = to_virtual_engine(engine);
+
+ if (sibling >= ve->num_siblings)
+ return NULL;
+
+ return ve->siblings[sibling];
+}
+
void intel_execlists_show_requests(struct intel_engine_cs *engine,
struct drm_printer *m,
void (*show_request)(struct drm_printer *m,
@@ -3987,6 +4690,8 @@ void intel_lr_context_reset(struct intel_engine_cs *engine,
u32 head,
bool scrub)
{
+ GEM_BUG_ON(!intel_context_is_pinned(ce));
+
/*
* We want a simple context + ring to execute the breadcrumb update.
* We cannot rely on the context being intact across the GPU hang,
@@ -3995,16 +4700,8 @@ void intel_lr_context_reset(struct intel_engine_cs *engine,
* future request will be after userspace has had the opportunity
* to recreate its own state.
*/
- if (scrub) {
- u32 *regs = ce->lrc_reg_state;
-
- if (engine->pinned_default_state) {
- memcpy(regs, /* skip restoring the vanilla PPHWSP */
- engine->pinned_default_state + LRC_STATE_PN * PAGE_SIZE,
- engine->context_size - PAGE_SIZE);
- }
- execlists_init_reg_state(regs, ce, engine, ce->ring);
- }
+ if (scrub)
+ restore_default_state(ce, engine);
/* Rerun the request; its payload has been neutered (if guilty). */
ce->ring->head = head;
@@ -4013,6 +4710,13 @@ void intel_lr_context_reset(struct intel_engine_cs *engine,
__execlists_update_reg_state(ce, engine);
}
+bool
+intel_engine_in_execlists_submission_mode(const struct intel_engine_cs *engine)
+{
+ return engine->set_default_submission ==
+ intel_execlists_set_default_submission;
+}
+
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftest_lrc.c"
#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.h b/drivers/gpu/drm/i915/gt/intel_lrc.h
index c2bba82bcc16..04511d8ebdc1 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.h
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.h
@@ -43,6 +43,7 @@ struct intel_engine_cs;
#define CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT (1 << 0)
#define CTX_CTRL_RS_CTX_ENABLE (1 << 1)
#define CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT (1 << 2)
+#define GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE (1 << 8)
#define RING_CONTEXT_STATUS_PTR(base) _MMIO((base) + 0x3a0)
#define RING_EXECLIST_SQ_CONTENTS(base) _MMIO((base) + 0x510)
#define RING_EXECLIST_CONTROL(base) _MMIO((base) + 0x550)
@@ -66,6 +67,12 @@ struct intel_engine_cs;
#define GEN11_CSB_READ_PTR_MASK (GEN11_CSB_PTR_MASK << 8)
#define GEN11_CSB_WRITE_PTR_MASK (GEN11_CSB_PTR_MASK << 0)
+#define MAX_CONTEXT_HW_ID (1<<21) /* exclusive */
+#define MAX_GUC_CONTEXT_HW_ID (1 << 20) /* exclusive */
+#define GEN11_MAX_CONTEXT_HW_ID (1<<11) /* exclusive */
+/* in Gen12 ID 0x7FF is reserved to indicate idle */
+#define GEN12_MAX_CONTEXT_HW_ID (GEN11_MAX_CONTEXT_HW_ID - 1)
+
enum {
INTEL_CONTEXT_SCHEDULE_IN = 0,
INTEL_CONTEXT_SCHEDULE_OUT,
@@ -79,30 +86,15 @@ int intel_execlists_submission_setup(struct intel_engine_cs *engine);
int intel_execlists_submission_init(struct intel_engine_cs *engine);
/* Logical Ring Contexts */
-
-/*
- * We allocate a header at the start of the context image for our own
- * use, therefore the actual location of the logical state is offset
- * from the start of the VMA. The layout is
- *
- * | [guc] | [hwsp] [logical state] |
- * |<- our header ->|<- context image ->|
- *
- */
-/* The first page is used for sharing data with the GuC */
-#define LRC_GUCSHR_PN (0)
-#define LRC_GUCSHR_SZ (1)
/* At the start of the context image is its per-process HWS page */
-#define LRC_PPHWSP_PN (LRC_GUCSHR_PN + LRC_GUCSHR_SZ)
+#define LRC_PPHWSP_PN (0)
#define LRC_PPHWSP_SZ (1)
-/* Finally we have the logical state for the context */
+/* After the PPHWSP we have the logical state for the context */
#define LRC_STATE_PN (LRC_PPHWSP_PN + LRC_PPHWSP_SZ)
-/*
- * Currently we include the PPHWSP in __intel_engine_context_size() so
- * the size of the header is synonymous with the start of the PPHWSP.
- */
-#define LRC_HEADER_PAGES LRC_PPHWSP_PN
+/* Space within PPHWSP reserved to be used as scratch */
+#define LRC_PPHWSP_SCRATCH 0x34
+#define LRC_PPHWSP_SCRATCH_ADDR (LRC_PPHWSP_SCRATCH * sizeof(u32))
void intel_execlists_set_default_submission(struct intel_engine_cs *engine);
@@ -131,4 +123,11 @@ int intel_virtual_engine_attach_bond(struct intel_engine_cs *engine,
const struct intel_engine_cs *master,
const struct intel_engine_cs *sibling);
+struct intel_engine_cs *
+intel_virtual_engine_get_sibling(struct intel_engine_cs *engine,
+ unsigned int sibling);
+
+bool
+intel_engine_in_execlists_submission_mode(const struct intel_engine_cs *engine);
+
#endif /* _INTEL_LRC_H_ */
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc_reg.h b/drivers/gpu/drm/i915/gt/intel_lrc_reg.h
index b8f20ad71169..06ab0276e10e 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc_reg.h
+++ b/drivers/gpu/drm/i915/gt/intel_lrc_reg.h
@@ -9,55 +9,41 @@
#include <linux/types.h>
-/* GEN8+ Reg State Context */
-#define CTX_LRI_HEADER_0 0x01
-#define CTX_CONTEXT_CONTROL 0x02
-#define CTX_RING_HEAD 0x04
-#define CTX_RING_TAIL 0x06
-#define CTX_RING_BUFFER_START 0x08
-#define CTX_RING_BUFFER_CONTROL 0x0a
-#define CTX_BB_HEAD_U 0x0c
-#define CTX_BB_HEAD_L 0x0e
-#define CTX_BB_STATE 0x10
-#define CTX_SECOND_BB_HEAD_U 0x12
-#define CTX_SECOND_BB_HEAD_L 0x14
-#define CTX_SECOND_BB_STATE 0x16
-#define CTX_BB_PER_CTX_PTR 0x18
-#define CTX_RCS_INDIRECT_CTX 0x1a
-#define CTX_RCS_INDIRECT_CTX_OFFSET 0x1c
-#define CTX_LRI_HEADER_1 0x21
-#define CTX_CTX_TIMESTAMP 0x22
-#define CTX_PDP3_UDW 0x24
-#define CTX_PDP3_LDW 0x26
-#define CTX_PDP2_UDW 0x28
-#define CTX_PDP2_LDW 0x2a
-#define CTX_PDP1_UDW 0x2c
-#define CTX_PDP1_LDW 0x2e
-#define CTX_PDP0_UDW 0x30
-#define CTX_PDP0_LDW 0x32
-#define CTX_LRI_HEADER_2 0x41
-#define CTX_R_PWR_CLK_STATE 0x42
-#define CTX_END 0x44
-
-#define CTX_REG(reg_state, pos, reg, val) do { \
- u32 *reg_state__ = (reg_state); \
- const u32 pos__ = (pos); \
- (reg_state__)[(pos__) + 0] = i915_mmio_reg_offset(reg); \
- (reg_state__)[(pos__) + 1] = (val); \
-} while (0)
+/* GEN8 to GEN11 Reg State Context */
+#define CTX_CONTEXT_CONTROL (0x02 + 1)
+#define CTX_RING_HEAD (0x04 + 1)
+#define CTX_RING_TAIL (0x06 + 1)
+#define CTX_RING_BUFFER_START (0x08 + 1)
+#define CTX_RING_BUFFER_CONTROL (0x0a + 1)
+#define CTX_BB_STATE (0x10 + 1)
+#define CTX_BB_PER_CTX_PTR (0x18 + 1)
+#define CTX_PDP3_UDW (0x24 + 1)
+#define CTX_PDP3_LDW (0x26 + 1)
+#define CTX_PDP2_UDW (0x28 + 1)
+#define CTX_PDP2_LDW (0x2a + 1)
+#define CTX_PDP1_UDW (0x2c + 1)
+#define CTX_PDP1_LDW (0x2e + 1)
+#define CTX_PDP0_UDW (0x30 + 1)
+#define CTX_PDP0_LDW (0x32 + 1)
+#define CTX_R_PWR_CLK_STATE (0x42 + 1)
+
+#define GEN9_CTX_RING_MI_MODE 0x54
+
+/* GEN12+ Reg State Context */
+#define GEN12_CTX_BB_PER_CTX_PTR (0x12 + 1)
#define ASSIGN_CTX_PDP(ppgtt, reg_state, n) do { \
u32 *reg_state__ = (reg_state); \
const u64 addr__ = i915_page_dir_dma_addr((ppgtt), (n)); \
- (reg_state__)[CTX_PDP ## n ## _UDW + 1] = upper_32_bits(addr__); \
- (reg_state__)[CTX_PDP ## n ## _LDW + 1] = lower_32_bits(addr__); \
+ (reg_state__)[CTX_PDP ## n ## _UDW] = upper_32_bits(addr__); \
+ (reg_state__)[CTX_PDP ## n ## _LDW] = lower_32_bits(addr__); \
} while (0)
#define ASSIGN_CTX_PML4(ppgtt, reg_state) do { \
u32 *reg_state__ = (reg_state); \
const u64 addr__ = px_dma(ppgtt->pd); \
- (reg_state__)[CTX_PDP0_UDW + 1] = upper_32_bits(addr__); \
- (reg_state__)[CTX_PDP0_LDW + 1] = lower_32_bits(addr__); \
+ (reg_state__)[CTX_PDP0_UDW] = upper_32_bits(addr__); \
+ (reg_state__)[CTX_PDP0_LDW] = lower_32_bits(addr__); \
} while (0)
#define GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x17
diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c b/drivers/gpu/drm/i915/gt/intel_mocs.c
index cea184a7dde9..2b977991b785 100644
--- a/drivers/gpu/drm/i915/gt/intel_mocs.c
+++ b/drivers/gpu/drm/i915/gt/intel_mocs.c
@@ -26,6 +26,7 @@
#include "intel_gt.h"
#include "intel_mocs.h"
#include "intel_lrc.h"
+#include "intel_ring.h"
/* structures required */
struct drm_i915_mocs_entry {
@@ -279,10 +280,9 @@ static const struct drm_i915_mocs_entry icelake_mocs_table[] = {
GEN11_MOCS_ENTRIES
};
-static bool get_mocs_settings(struct intel_gt *gt,
+static bool get_mocs_settings(const struct drm_i915_private *i915,
struct drm_i915_mocs_table *table)
{
- struct drm_i915_private *i915 = gt->i915;
bool result = false;
if (INTEL_GEN(i915) >= 12) {
@@ -323,9 +323,9 @@ static bool get_mocs_settings(struct intel_gt *gt,
return result;
}
-static i915_reg_t mocs_register(enum intel_engine_id engine_id, int index)
+static i915_reg_t mocs_register(const struct intel_engine_cs *engine, int index)
{
- switch (engine_id) {
+ switch (engine->id) {
case RCS0:
return GEN9_GFX_MOCS(index);
case VCS0:
@@ -339,7 +339,7 @@ static i915_reg_t mocs_register(enum intel_engine_id engine_id, int index)
case VCS2:
return GEN11_MFX2_MOCS(index);
default:
- MISSING_CASE(engine_id);
+ MISSING_CASE(engine->id);
return INVALID_MMIO_REG;
}
}
@@ -357,118 +357,25 @@ static u32 get_entry_control(const struct drm_i915_mocs_table *table,
return table->table[I915_MOCS_PTE].control_value;
}
-/**
- * intel_mocs_init_engine() - emit the mocs control table
- * @engine: The engine for whom to emit the registers.
- *
- * This function simply emits a MI_LOAD_REGISTER_IMM command for the
- * given table starting at the given address.
- */
-void intel_mocs_init_engine(struct intel_engine_cs *engine)
+static void init_mocs_table(struct intel_engine_cs *engine,
+ const struct drm_i915_mocs_table *table)
{
- struct intel_gt *gt = engine->gt;
- struct intel_uncore *uncore = gt->uncore;
- struct drm_i915_mocs_table table;
- unsigned int index;
- u32 unused_value;
-
- /* Platforms with global MOCS do not need per-engine initialization. */
- if (HAS_GLOBAL_MOCS_REGISTERS(gt->i915))
- return;
-
- /* Called under a blanket forcewake */
- assert_forcewakes_active(uncore, FORCEWAKE_ALL);
-
- if (!get_mocs_settings(gt, &table))
- return;
-
- /* Set unused values to PTE */
- unused_value = table.table[I915_MOCS_PTE].control_value;
-
- for (index = 0; index < table.size; index++) {
- u32 value = get_entry_control(&table, index);
+ struct intel_uncore *uncore = engine->uncore;
+ u32 unused_value = table->table[I915_MOCS_PTE].control_value;
+ unsigned int i;
+ for (i = 0; i < table->size; i++)
intel_uncore_write_fw(uncore,
- mocs_register(engine->id, index),
- value);
- }
+ mocs_register(engine, i),
+ get_entry_control(table, i));
- /* All remaining entries are also unused */
- for (; index < table.n_entries; index++)
+ /* All remaining entries are unused */
+ for (; i < table->n_entries; i++)
intel_uncore_write_fw(uncore,
- mocs_register(engine->id, index),
+ mocs_register(engine, i),
unused_value);
}
-static void intel_mocs_init_global(struct intel_gt *gt)
-{
- struct intel_uncore *uncore = gt->uncore;
- struct drm_i915_mocs_table table;
- unsigned int index;
-
- GEM_BUG_ON(!HAS_GLOBAL_MOCS_REGISTERS(gt->i915));
-
- if (!get_mocs_settings(gt, &table))
- return;
-
- if (GEM_DEBUG_WARN_ON(table.size > table.n_entries))
- return;
-
- for (index = 0; index < table.size; index++)
- intel_uncore_write(uncore,
- GEN12_GLOBAL_MOCS(index),
- table.table[index].control_value);
-
- /*
- * Ok, now set the unused entries to the invalid entry (index 0). These
- * entries are officially undefined and no contract for the contents and
- * settings is given for these entries.
- */
- for (; index < table.n_entries; index++)
- intel_uncore_write(uncore,
- GEN12_GLOBAL_MOCS(index),
- table.table[0].control_value);
-}
-
-static int emit_mocs_control_table(struct i915_request *rq,
- const struct drm_i915_mocs_table *table)
-{
- enum intel_engine_id engine = rq->engine->id;
- unsigned int index;
- u32 unused_value;
- u32 *cs;
-
- if (GEM_WARN_ON(table->size > table->n_entries))
- return -ENODEV;
-
- /* Set unused values to PTE */
- unused_value = table->table[I915_MOCS_PTE].control_value;
-
- cs = intel_ring_begin(rq, 2 + 2 * table->n_entries);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- *cs++ = MI_LOAD_REGISTER_IMM(table->n_entries);
-
- for (index = 0; index < table->size; index++) {
- u32 value = get_entry_control(table, index);
-
- *cs++ = i915_mmio_reg_offset(mocs_register(engine, index));
- *cs++ = value;
- }
-
- /* All remaining entries are also unused */
- for (; index < table->n_entries; index++) {
- *cs++ = i915_mmio_reg_offset(mocs_register(engine, index));
- *cs++ = unused_value;
- }
-
- *cs++ = MI_NOOP;
- intel_ring_advance(rq, cs);
-
- return 0;
-}
-
/*
* Get l3cc_value from MOCS entry taking into account when it's not used:
* I915_MOCS_PTE's value is returned in this case.
@@ -486,141 +393,99 @@ static inline u32 l3cc_combine(const struct drm_i915_mocs_table *table,
u16 low,
u16 high)
{
- return low | high << 16;
+ return low | (u32)high << 16;
}
-static int emit_mocs_l3cc_table(struct i915_request *rq,
- const struct drm_i915_mocs_table *table)
+static void init_l3cc_table(struct intel_engine_cs *engine,
+ const struct drm_i915_mocs_table *table)
{
- u16 unused_value;
+ struct intel_uncore *uncore = engine->uncore;
+ u16 unused_value = table->table[I915_MOCS_PTE].l3cc_value;
unsigned int i;
- u32 *cs;
-
- if (GEM_WARN_ON(table->size > table->n_entries))
- return -ENODEV;
-
- /* Set unused values to PTE */
- unused_value = table->table[I915_MOCS_PTE].l3cc_value;
-
- cs = intel_ring_begin(rq, 2 + table->n_entries);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- *cs++ = MI_LOAD_REGISTER_IMM(table->n_entries / 2);
for (i = 0; i < table->size / 2; i++) {
u16 low = get_entry_l3cc(table, 2 * i);
u16 high = get_entry_l3cc(table, 2 * i + 1);
- *cs++ = i915_mmio_reg_offset(GEN9_LNCFCMOCS(i));
- *cs++ = l3cc_combine(table, low, high);
+ intel_uncore_write(uncore,
+ GEN9_LNCFCMOCS(i),
+ l3cc_combine(table, low, high));
}
/* Odd table size - 1 left over */
- if (table->size & 0x01) {
+ if (table->size & 1) {
u16 low = get_entry_l3cc(table, 2 * i);
- *cs++ = i915_mmio_reg_offset(GEN9_LNCFCMOCS(i));
- *cs++ = l3cc_combine(table, low, unused_value);
+ intel_uncore_write(uncore,
+ GEN9_LNCFCMOCS(i),
+ l3cc_combine(table, low, unused_value));
i++;
}
/* All remaining entries are also unused */
- for (; i < table->n_entries / 2; i++) {
- *cs++ = i915_mmio_reg_offset(GEN9_LNCFCMOCS(i));
- *cs++ = l3cc_combine(table, unused_value, unused_value);
- }
+ for (; i < table->n_entries / 2; i++)
+ intel_uncore_write(uncore,
+ GEN9_LNCFCMOCS(i),
+ l3cc_combine(table, unused_value,
+ unused_value));
+}
+
+void intel_mocs_init_engine(struct intel_engine_cs *engine)
+{
+ struct drm_i915_mocs_table table;
+
+ /* Called under a blanket forcewake */
+ assert_forcewakes_active(engine->uncore, FORCEWAKE_ALL);
- *cs++ = MI_NOOP;
- intel_ring_advance(rq, cs);
+ if (!get_mocs_settings(engine->i915, &table))
+ return;
+
+ /* Platforms with global MOCS do not need per-engine initialization. */
+ if (!HAS_GLOBAL_MOCS_REGISTERS(engine->i915))
+ init_mocs_table(engine, &table);
- return 0;
+ if (engine->class == RENDER_CLASS)
+ init_l3cc_table(engine, &table);
}
-static void intel_mocs_init_l3cc_table(struct intel_gt *gt)
+static void intel_mocs_init_global(struct intel_gt *gt)
{
struct intel_uncore *uncore = gt->uncore;
struct drm_i915_mocs_table table;
- unsigned int i;
- u16 unused_value;
+ unsigned int index;
- if (!get_mocs_settings(gt, &table))
+ /*
+ * LLC and eDRAM control values are not applicable to dgfx
+ */
+ if (IS_DGFX(gt->i915))
return;
- /* Set unused values to PTE */
- unused_value = table.table[I915_MOCS_PTE].l3cc_value;
-
- for (i = 0; i < table.size / 2; i++) {
- u16 low = get_entry_l3cc(&table, 2 * i);
- u16 high = get_entry_l3cc(&table, 2 * i + 1);
+ GEM_BUG_ON(!HAS_GLOBAL_MOCS_REGISTERS(gt->i915));
- intel_uncore_write(uncore,
- GEN9_LNCFCMOCS(i),
- l3cc_combine(&table, low, high));
- }
+ if (!get_mocs_settings(gt->i915, &table))
+ return;
- /* Odd table size - 1 left over */
- if (table.size & 0x01) {
- u16 low = get_entry_l3cc(&table, 2 * i);
+ if (GEM_DEBUG_WARN_ON(table.size > table.n_entries))
+ return;
+ for (index = 0; index < table.size; index++)
intel_uncore_write(uncore,
- GEN9_LNCFCMOCS(i),
- l3cc_combine(&table, low, unused_value));
- i++;
- }
+ GEN12_GLOBAL_MOCS(index),
+ table.table[index].control_value);
- /* All remaining entries are also unused */
- for (; i < table.n_entries / 2; i++)
+ /*
+ * Ok, now set the unused entries to the invalid entry (index 0). These
+ * entries are officially undefined and no contract for the contents and
+ * settings is given for these entries.
+ */
+ for (; index < table.n_entries; index++)
intel_uncore_write(uncore,
- GEN9_LNCFCMOCS(i),
- l3cc_combine(&table, unused_value,
- unused_value));
-}
-
-/**
- * intel_mocs_emit() - program the MOCS register.
- * @rq: Request to use to set up the MOCS tables.
- *
- * This function will emit a batch buffer with the values required for
- * programming the MOCS register values for all the currently supported
- * rings.
- *
- * These registers are partially stored in the RCS context, so they are
- * emitted at the same time so that when a context is created these registers
- * are set up. These registers have to be emitted into the start of the
- * context as setting the ELSP will re-init some of these registers back
- * to the hw values.
- *
- * Return: 0 on success, otherwise the error status.
- */
-int intel_mocs_emit(struct i915_request *rq)
-{
- struct drm_i915_mocs_table t;
- int ret;
-
- if (HAS_GLOBAL_MOCS_REGISTERS(rq->i915) ||
- rq->engine->class != RENDER_CLASS)
- return 0;
-
- if (get_mocs_settings(rq->engine->gt, &t)) {
- /* Program the RCS control registers */
- ret = emit_mocs_control_table(rq, &t);
- if (ret)
- return ret;
-
- /* Now program the l3cc registers */
- ret = emit_mocs_l3cc_table(rq, &t);
- if (ret)
- return ret;
- }
-
- return 0;
+ GEN12_GLOBAL_MOCS(index),
+ table.table[0].control_value);
}
void intel_mocs_init(struct intel_gt *gt)
{
- intel_mocs_init_l3cc_table(gt);
-
if (HAS_GLOBAL_MOCS_REGISTERS(gt->i915))
intel_mocs_init_global(gt);
}
diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.h b/drivers/gpu/drm/i915/gt/intel_mocs.h
index 2ae816b7ca19..83371f3e6ba1 100644
--- a/drivers/gpu/drm/i915/gt/intel_mocs.h
+++ b/drivers/gpu/drm/i915/gt/intel_mocs.h
@@ -49,13 +49,10 @@
* context handling keep the MOCS in step.
*/
-struct i915_request;
struct intel_engine_cs;
struct intel_gt;
void intel_mocs_init(struct intel_gt *gt);
void intel_mocs_init_engine(struct intel_engine_cs *engine);
-int intel_mocs_emit(struct i915_request *rq);
-
#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.c b/drivers/gpu/drm/i915/gt/intel_rc6.c
new file mode 100644
index 000000000000..700104b90163
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_rc6.c
@@ -0,0 +1,787 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <linux/pm_runtime.h>
+
+#include "i915_drv.h"
+#include "intel_gt.h"
+#include "intel_gt_pm.h"
+#include "intel_rc6.h"
+#include "intel_sideband.h"
+
+/**
+ * DOC: RC6
+ *
+ * RC6 is a special power stage which allows the GPU to enter an very
+ * low-voltage mode when idle, using down to 0V while at this stage. This
+ * stage is entered automatically when the GPU is idle when RC6 support is
+ * enabled, and as soon as new workload arises GPU wakes up automatically as
+ * well.
+ *
+ * There are different RC6 modes available in Intel GPU, which differentiate
+ * among each other with the latency required to enter and leave RC6 and
+ * voltage consumed by the GPU in different states.
+ *
+ * The combination of the following flags define which states GPU is allowed
+ * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
+ * RC6pp is deepest RC6. Their support by hardware varies according to the
+ * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
+ * which brings the most power savings; deeper states save more power, but
+ * require higher latency to switch to and wake up.
+ */
+
+static struct intel_gt *rc6_to_gt(struct intel_rc6 *rc6)
+{
+ return container_of(rc6, struct intel_gt, rc6);
+}
+
+static struct intel_uncore *rc6_to_uncore(struct intel_rc6 *rc)
+{
+ return rc6_to_gt(rc)->uncore;
+}
+
+static struct drm_i915_private *rc6_to_i915(struct intel_rc6 *rc)
+{
+ return rc6_to_gt(rc)->i915;
+}
+
+static inline void set(struct intel_uncore *uncore, i915_reg_t reg, u32 val)
+{
+ intel_uncore_write_fw(uncore, reg, val);
+}
+
+static void gen11_rc6_enable(struct intel_rc6 *rc6)
+{
+ struct intel_uncore *uncore = rc6_to_uncore(rc6);
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ /* 2b: Program RC6 thresholds.*/
+ set(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16 | 85);
+ set(uncore, GEN10_MEDIA_WAKE_RATE_LIMIT, 150);
+
+ set(uncore, GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
+ set(uncore, GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
+ for_each_engine(engine, rc6_to_gt(rc6), id)
+ set(uncore, RING_MAX_IDLE(engine->mmio_base), 10);
+
+ set(uncore, GUC_MAX_IDLE_COUNT, 0xA);
+
+ set(uncore, GEN6_RC_SLEEP, 0);
+
+ set(uncore, GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
+
+ /*
+ * 2c: Program Coarse Power Gating Policies.
+ *
+ * Bspec's guidance is to use 25us (really 25 * 1280ns) here. What we
+ * use instead is a more conservative estimate for the maximum time
+ * it takes us to service a CS interrupt and submit a new ELSP - that
+ * is the time which the GPU is idle waiting for the CPU to select the
+ * next request to execute. If the idle hysteresis is less than that
+ * interrupt service latency, the hardware will automatically gate
+ * the power well and we will then incur the wake up cost on top of
+ * the service latency. A similar guide from plane_state is that we
+ * do not want the enable hysteresis to less than the wakeup latency.
+ *
+ * igt/gem_exec_nop/sequential provides a rough estimate for the
+ * service latency, and puts it around 10us for Broadwell (and other
+ * big core) and around 40us for Broxton (and other low power cores).
+ * [Note that for legacy ringbuffer submission, this is less than 1us!]
+ * However, the wakeup latency on Broxton is closer to 100us. To be
+ * conservative, we have to factor in a context switch on top (due
+ * to ksoftirqd).
+ */
+ set(uncore, GEN9_MEDIA_PG_IDLE_HYSTERESIS, 250);
+ set(uncore, GEN9_RENDER_PG_IDLE_HYSTERESIS, 250);
+
+ /* 3a: Enable RC6 */
+ set(uncore, GEN6_RC_CONTROL,
+ GEN6_RC_CTL_HW_ENABLE |
+ GEN6_RC_CTL_RC6_ENABLE |
+ GEN6_RC_CTL_EI_MODE(1));
+
+ set(uncore, GEN9_PG_ENABLE,
+ GEN9_RENDER_PG_ENABLE |
+ GEN9_MEDIA_PG_ENABLE |
+ GEN11_MEDIA_SAMPLER_PG_ENABLE);
+}
+
+static void gen9_rc6_enable(struct intel_rc6 *rc6)
+{
+ struct intel_uncore *uncore = rc6_to_uncore(rc6);
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ u32 rc6_mode;
+
+ /* 2b: Program RC6 thresholds.*/
+ if (INTEL_GEN(rc6_to_i915(rc6)) >= 10) {
+ set(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16 | 85);
+ set(uncore, GEN10_MEDIA_WAKE_RATE_LIMIT, 150);
+ } else if (IS_SKYLAKE(rc6_to_i915(rc6))) {
+ /*
+ * WaRsDoubleRc6WrlWithCoarsePowerGating:skl Doubling WRL only
+ * when CPG is enabled
+ */
+ set(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 108 << 16);
+ } else {
+ set(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16);
+ }
+
+ set(uncore, GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
+ set(uncore, GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
+ for_each_engine(engine, rc6_to_gt(rc6), id)
+ set(uncore, RING_MAX_IDLE(engine->mmio_base), 10);
+
+ set(uncore, GUC_MAX_IDLE_COUNT, 0xA);
+
+ set(uncore, GEN6_RC_SLEEP, 0);
+
+ /*
+ * 2c: Program Coarse Power Gating Policies.
+ *
+ * Bspec's guidance is to use 25us (really 25 * 1280ns) here. What we
+ * use instead is a more conservative estimate for the maximum time
+ * it takes us to service a CS interrupt and submit a new ELSP - that
+ * is the time which the GPU is idle waiting for the CPU to select the
+ * next request to execute. If the idle hysteresis is less than that
+ * interrupt service latency, the hardware will automatically gate
+ * the power well and we will then incur the wake up cost on top of
+ * the service latency. A similar guide from plane_state is that we
+ * do not want the enable hysteresis to less than the wakeup latency.
+ *
+ * igt/gem_exec_nop/sequential provides a rough estimate for the
+ * service latency, and puts it around 10us for Broadwell (and other
+ * big core) and around 40us for Broxton (and other low power cores).
+ * [Note that for legacy ringbuffer submission, this is less than 1us!]
+ * However, the wakeup latency on Broxton is closer to 100us. To be
+ * conservative, we have to factor in a context switch on top (due
+ * to ksoftirqd).
+ */
+ set(uncore, GEN9_MEDIA_PG_IDLE_HYSTERESIS, 250);
+ set(uncore, GEN9_RENDER_PG_IDLE_HYSTERESIS, 250);
+
+ /* 3a: Enable RC6 */
+ set(uncore, GEN6_RC6_THRESHOLD, 37500); /* 37.5/125ms per EI */
+
+ /* WaRsUseTimeoutMode:cnl (pre-prod) */
+ if (IS_CNL_REVID(rc6_to_i915(rc6), CNL_REVID_A0, CNL_REVID_C0))
+ rc6_mode = GEN7_RC_CTL_TO_MODE;
+ else
+ rc6_mode = GEN6_RC_CTL_EI_MODE(1);
+
+ set(uncore, GEN6_RC_CONTROL,
+ GEN6_RC_CTL_HW_ENABLE |
+ GEN6_RC_CTL_RC6_ENABLE |
+ rc6_mode);
+
+ /*
+ * WaRsDisableCoarsePowerGating:skl,cnl
+ * - Render/Media PG need to be disabled with RC6.
+ */
+ if (!NEEDS_WaRsDisableCoarsePowerGating(rc6_to_i915(rc6)))
+ set(uncore, GEN9_PG_ENABLE,
+ GEN9_RENDER_PG_ENABLE | GEN9_MEDIA_PG_ENABLE);
+}
+
+static void gen8_rc6_enable(struct intel_rc6 *rc6)
+{
+ struct intel_uncore *uncore = rc6_to_uncore(rc6);
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ /* 2b: Program RC6 thresholds.*/
+ set(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
+ set(uncore, GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
+ set(uncore, GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
+ for_each_engine(engine, rc6_to_gt(rc6), id)
+ set(uncore, RING_MAX_IDLE(engine->mmio_base), 10);
+ set(uncore, GEN6_RC_SLEEP, 0);
+ set(uncore, GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */
+
+ /* 3: Enable RC6 */
+ set(uncore, GEN6_RC_CONTROL,
+ GEN6_RC_CTL_HW_ENABLE |
+ GEN7_RC_CTL_TO_MODE |
+ GEN6_RC_CTL_RC6_ENABLE);
+}
+
+static void gen6_rc6_enable(struct intel_rc6 *rc6)
+{
+ struct intel_uncore *uncore = rc6_to_uncore(rc6);
+ struct drm_i915_private *i915 = rc6_to_i915(rc6);
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ u32 rc6vids, rc6_mask;
+ int ret;
+
+ set(uncore, GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
+ set(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
+ set(uncore, GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
+ set(uncore, GEN6_RC_EVALUATION_INTERVAL, 125000);
+ set(uncore, GEN6_RC_IDLE_HYSTERSIS, 25);
+
+ for_each_engine(engine, rc6_to_gt(rc6), id)
+ set(uncore, RING_MAX_IDLE(engine->mmio_base), 10);
+
+ set(uncore, GEN6_RC_SLEEP, 0);
+ set(uncore, GEN6_RC1e_THRESHOLD, 1000);
+ if (IS_IVYBRIDGE(i915))
+ set(uncore, GEN6_RC6_THRESHOLD, 125000);
+ else
+ set(uncore, GEN6_RC6_THRESHOLD, 50000);
+ set(uncore, GEN6_RC6p_THRESHOLD, 150000);
+ set(uncore, GEN6_RC6pp_THRESHOLD, 64000); /* unused */
+
+ /* We don't use those on Haswell */
+ rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
+ if (HAS_RC6p(i915))
+ rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
+ if (HAS_RC6pp(i915))
+ rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
+ set(uncore, GEN6_RC_CONTROL,
+ rc6_mask |
+ GEN6_RC_CTL_EI_MODE(1) |
+ GEN6_RC_CTL_HW_ENABLE);
+
+ rc6vids = 0;
+ ret = sandybridge_pcode_read(i915, GEN6_PCODE_READ_RC6VIDS,
+ &rc6vids, NULL);
+ if (IS_GEN(i915, 6) && ret) {
+ DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n");
+ } else if (IS_GEN(i915, 6) &&
+ (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) {
+ DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
+ GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450);
+ rc6vids &= 0xffff00;
+ rc6vids |= GEN6_ENCODE_RC6_VID(450);
+ ret = sandybridge_pcode_write(i915, GEN6_PCODE_WRITE_RC6VIDS, rc6vids);
+ if (ret)
+ DRM_ERROR("Couldn't fix incorrect rc6 voltage\n");
+ }
+}
+
+/* Check that the pcbr address is not empty. */
+static int chv_rc6_init(struct intel_rc6 *rc6)
+{
+ struct intel_uncore *uncore = rc6_to_uncore(rc6);
+ resource_size_t pctx_paddr, paddr;
+ resource_size_t pctx_size = 32 * SZ_1K;
+ u32 pcbr;
+
+ pcbr = intel_uncore_read(uncore, VLV_PCBR);
+ if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) {
+ DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
+ paddr = rc6_to_i915(rc6)->dsm.end + 1 - pctx_size;
+ GEM_BUG_ON(paddr > U32_MAX);
+
+ pctx_paddr = (paddr & ~4095);
+ intel_uncore_write(uncore, VLV_PCBR, pctx_paddr);
+ }
+
+ return 0;
+}
+
+static int vlv_rc6_init(struct intel_rc6 *rc6)
+{
+ struct drm_i915_private *i915 = rc6_to_i915(rc6);
+ struct intel_uncore *uncore = rc6_to_uncore(rc6);
+ struct drm_i915_gem_object *pctx;
+ resource_size_t pctx_paddr;
+ resource_size_t pctx_size = 24 * SZ_1K;
+ u32 pcbr;
+
+ pcbr = intel_uncore_read(uncore, VLV_PCBR);
+ if (pcbr) {
+ /* BIOS set it up already, grab the pre-alloc'd space */
+ resource_size_t pcbr_offset;
+
+ pcbr_offset = (pcbr & ~4095) - i915->dsm.start;
+ pctx = i915_gem_object_create_stolen_for_preallocated(i915,
+ pcbr_offset,
+ I915_GTT_OFFSET_NONE,
+ pctx_size);
+ if (IS_ERR(pctx))
+ return PTR_ERR(pctx);
+
+ goto out;
+ }
+
+ DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
+
+ /*
+ * From the Gunit register HAS:
+ * The Gfx driver is expected to program this register and ensure
+ * proper allocation within Gfx stolen memory. For example, this
+ * register should be programmed such than the PCBR range does not
+ * overlap with other ranges, such as the frame buffer, protected
+ * memory, or any other relevant ranges.
+ */
+ pctx = i915_gem_object_create_stolen(i915, pctx_size);
+ if (IS_ERR(pctx)) {
+ DRM_DEBUG("not enough stolen space for PCTX, disabling\n");
+ return PTR_ERR(pctx);
+ }
+
+ GEM_BUG_ON(range_overflows_t(u64,
+ i915->dsm.start,
+ pctx->stolen->start,
+ U32_MAX));
+ pctx_paddr = i915->dsm.start + pctx->stolen->start;
+ intel_uncore_write(uncore, VLV_PCBR, pctx_paddr);
+
+out:
+ rc6->pctx = pctx;
+ return 0;
+}
+
+static void chv_rc6_enable(struct intel_rc6 *rc6)
+{
+ struct intel_uncore *uncore = rc6_to_uncore(rc6);
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ /* 2a: Program RC6 thresholds.*/
+ set(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
+ set(uncore, GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
+ set(uncore, GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
+
+ for_each_engine(engine, rc6_to_gt(rc6), id)
+ set(uncore, RING_MAX_IDLE(engine->mmio_base), 10);
+ set(uncore, GEN6_RC_SLEEP, 0);
+
+ /* TO threshold set to 500 us (0x186 * 1.28 us) */
+ set(uncore, GEN6_RC6_THRESHOLD, 0x186);
+
+ /* Allows RC6 residency counter to work */
+ set(uncore, VLV_COUNTER_CONTROL,
+ _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
+ VLV_MEDIA_RC6_COUNT_EN |
+ VLV_RENDER_RC6_COUNT_EN));
+
+ /* 3: Enable RC6 */
+ set(uncore, GEN6_RC_CONTROL, GEN7_RC_CTL_TO_MODE);
+}
+
+static void vlv_rc6_enable(struct intel_rc6 *rc6)
+{
+ struct intel_uncore *uncore = rc6_to_uncore(rc6);
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ set(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 0x00280000);
+ set(uncore, GEN6_RC_EVALUATION_INTERVAL, 125000);
+ set(uncore, GEN6_RC_IDLE_HYSTERSIS, 25);
+
+ for_each_engine(engine, rc6_to_gt(rc6), id)
+ set(uncore, RING_MAX_IDLE(engine->mmio_base), 10);
+
+ set(uncore, GEN6_RC6_THRESHOLD, 0x557);
+
+ /* Allows RC6 residency counter to work */
+ set(uncore, VLV_COUNTER_CONTROL,
+ _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
+ VLV_MEDIA_RC0_COUNT_EN |
+ VLV_RENDER_RC0_COUNT_EN |
+ VLV_MEDIA_RC6_COUNT_EN |
+ VLV_RENDER_RC6_COUNT_EN));
+
+ set(uncore, GEN6_RC_CONTROL,
+ GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL);
+}
+
+static bool bxt_check_bios_rc6_setup(struct intel_rc6 *rc6)
+{
+ struct intel_uncore *uncore = rc6_to_uncore(rc6);
+ struct drm_i915_private *i915 = rc6_to_i915(rc6);
+ u32 rc6_ctx_base, rc_ctl, rc_sw_target;
+ bool enable_rc6 = true;
+
+ rc_ctl = intel_uncore_read(uncore, GEN6_RC_CONTROL);
+ rc_sw_target = intel_uncore_read(uncore, GEN6_RC_STATE);
+ rc_sw_target &= RC_SW_TARGET_STATE_MASK;
+ rc_sw_target >>= RC_SW_TARGET_STATE_SHIFT;
+ DRM_DEBUG_DRIVER("BIOS enabled RC states: "
+ "HW_CTRL %s HW_RC6 %s SW_TARGET_STATE %x\n",
+ onoff(rc_ctl & GEN6_RC_CTL_HW_ENABLE),
+ onoff(rc_ctl & GEN6_RC_CTL_RC6_ENABLE),
+ rc_sw_target);
+
+ if (!(intel_uncore_read(uncore, RC6_LOCATION) & RC6_CTX_IN_DRAM)) {
+ DRM_DEBUG_DRIVER("RC6 Base location not set properly.\n");
+ enable_rc6 = false;
+ }
+
+ /*
+ * The exact context size is not known for BXT, so assume a page size
+ * for this check.
+ */
+ rc6_ctx_base =
+ intel_uncore_read(uncore, RC6_CTX_BASE) & RC6_CTX_BASE_MASK;
+ if (!(rc6_ctx_base >= i915->dsm_reserved.start &&
+ rc6_ctx_base + PAGE_SIZE < i915->dsm_reserved.end)) {
+ DRM_DEBUG_DRIVER("RC6 Base address not as expected.\n");
+ enable_rc6 = false;
+ }
+
+ if (!((intel_uncore_read(uncore, PWRCTX_MAXCNT_RCSUNIT) & IDLE_TIME_MASK) > 1 &&
+ (intel_uncore_read(uncore, PWRCTX_MAXCNT_VCSUNIT0) & IDLE_TIME_MASK) > 1 &&
+ (intel_uncore_read(uncore, PWRCTX_MAXCNT_BCSUNIT) & IDLE_TIME_MASK) > 1 &&
+ (intel_uncore_read(uncore, PWRCTX_MAXCNT_VECSUNIT) & IDLE_TIME_MASK) > 1)) {
+ DRM_DEBUG_DRIVER("Engine Idle wait time not set properly.\n");
+ enable_rc6 = false;
+ }
+
+ if (!intel_uncore_read(uncore, GEN8_PUSHBUS_CONTROL) ||
+ !intel_uncore_read(uncore, GEN8_PUSHBUS_ENABLE) ||
+ !intel_uncore_read(uncore, GEN8_PUSHBUS_SHIFT)) {
+ DRM_DEBUG_DRIVER("Pushbus not setup properly.\n");
+ enable_rc6 = false;
+ }
+
+ if (!intel_uncore_read(uncore, GEN6_GFXPAUSE)) {
+ DRM_DEBUG_DRIVER("GFX pause not setup properly.\n");
+ enable_rc6 = false;
+ }
+
+ if (!intel_uncore_read(uncore, GEN8_MISC_CTRL0)) {
+ DRM_DEBUG_DRIVER("GPM control not setup properly.\n");
+ enable_rc6 = false;
+ }
+
+ return enable_rc6;
+}
+
+static bool rc6_supported(struct intel_rc6 *rc6)
+{
+ struct drm_i915_private *i915 = rc6_to_i915(rc6);
+
+ if (!HAS_RC6(i915))
+ return false;
+
+ if (intel_vgpu_active(i915))
+ return false;
+
+ if (is_mock_gt(rc6_to_gt(rc6)))
+ return false;
+
+ if (IS_GEN9_LP(i915) && !bxt_check_bios_rc6_setup(rc6)) {
+ dev_notice(i915->drm.dev,
+ "RC6 and powersaving disabled by BIOS\n");
+ return false;
+ }
+
+ return true;
+}
+
+static void rpm_get(struct intel_rc6 *rc6)
+{
+ GEM_BUG_ON(rc6->wakeref);
+ pm_runtime_get_sync(&rc6_to_i915(rc6)->drm.pdev->dev);
+ rc6->wakeref = true;
+}
+
+static void rpm_put(struct intel_rc6 *rc6)
+{
+ GEM_BUG_ON(!rc6->wakeref);
+ pm_runtime_put(&rc6_to_i915(rc6)->drm.pdev->dev);
+ rc6->wakeref = false;
+}
+
+static bool intel_rc6_ctx_corrupted(struct intel_rc6 *rc6)
+{
+ return !intel_uncore_read(rc6_to_uncore(rc6), GEN8_RC6_CTX_INFO);
+}
+
+static void intel_rc6_ctx_wa_init(struct intel_rc6 *rc6)
+{
+ struct drm_i915_private *i915 = rc6_to_i915(rc6);
+
+ if (!NEEDS_RC6_CTX_CORRUPTION_WA(i915))
+ return;
+
+ if (intel_rc6_ctx_corrupted(rc6)) {
+ DRM_INFO("RC6 context corrupted, disabling runtime power management\n");
+ rc6->ctx_corrupted = true;
+ }
+}
+
+/**
+ * intel_rc6_ctx_wa_resume - system resume sequence for the RC6 CTX WA
+ * @rc6: rc6 state
+ *
+ * Perform any steps needed to re-init the RC6 CTX WA after system resume.
+ */
+void intel_rc6_ctx_wa_resume(struct intel_rc6 *rc6)
+{
+ if (rc6->ctx_corrupted && !intel_rc6_ctx_corrupted(rc6)) {
+ DRM_INFO("RC6 context restored, re-enabling runtime power management\n");
+ rc6->ctx_corrupted = false;
+ }
+}
+
+/**
+ * intel_rc6_ctx_wa_check - check for a new RC6 CTX corruption
+ * @rc6: rc6 state
+ *
+ * Check if an RC6 CTX corruption has happened since the last check and if so
+ * disable RC6 and runtime power management.
+*/
+void intel_rc6_ctx_wa_check(struct intel_rc6 *rc6)
+{
+ struct drm_i915_private *i915 = rc6_to_i915(rc6);
+
+ if (!NEEDS_RC6_CTX_CORRUPTION_WA(i915))
+ return;
+
+ if (rc6->ctx_corrupted)
+ return;
+
+ if (!intel_rc6_ctx_corrupted(rc6))
+ return;
+
+ DRM_NOTE("RC6 context corruption, disabling runtime power management\n");
+
+ intel_rc6_disable(rc6);
+ rc6->ctx_corrupted = true;
+
+ return;
+}
+
+static void __intel_rc6_disable(struct intel_rc6 *rc6)
+{
+ struct drm_i915_private *i915 = rc6_to_i915(rc6);
+ struct intel_uncore *uncore = rc6_to_uncore(rc6);
+
+ intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
+ if (INTEL_GEN(i915) >= 9)
+ set(uncore, GEN9_PG_ENABLE, 0);
+ set(uncore, GEN6_RC_CONTROL, 0);
+ set(uncore, GEN6_RC_STATE, 0);
+ intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
+}
+
+void intel_rc6_init(struct intel_rc6 *rc6)
+{
+ struct drm_i915_private *i915 = rc6_to_i915(rc6);
+ int err;
+
+ /* Disable runtime-pm until we can save the GPU state with rc6 pctx */
+ rpm_get(rc6);
+
+ if (!rc6_supported(rc6))
+ return;
+
+ intel_rc6_ctx_wa_init(rc6);
+
+ if (IS_CHERRYVIEW(i915))
+ err = chv_rc6_init(rc6);
+ else if (IS_VALLEYVIEW(i915))
+ err = vlv_rc6_init(rc6);
+ else
+ err = 0;
+
+ /* Sanitize rc6, ensure it is disabled before we are ready. */
+ __intel_rc6_disable(rc6);
+
+ rc6->supported = err == 0;
+}
+
+void intel_rc6_sanitize(struct intel_rc6 *rc6)
+{
+ if (rc6->enabled) { /* unbalanced suspend/resume */
+ rpm_get(rc6);
+ rc6->enabled = false;
+ }
+
+ if (rc6->supported)
+ __intel_rc6_disable(rc6);
+}
+
+void intel_rc6_enable(struct intel_rc6 *rc6)
+{
+ struct drm_i915_private *i915 = rc6_to_i915(rc6);
+ struct intel_uncore *uncore = rc6_to_uncore(rc6);
+
+ if (!rc6->supported)
+ return;
+
+ GEM_BUG_ON(rc6->enabled);
+
+ if (rc6->ctx_corrupted)
+ return;
+
+ intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
+
+ if (IS_CHERRYVIEW(i915))
+ chv_rc6_enable(rc6);
+ else if (IS_VALLEYVIEW(i915))
+ vlv_rc6_enable(rc6);
+ else if (INTEL_GEN(i915) >= 11)
+ gen11_rc6_enable(rc6);
+ else if (INTEL_GEN(i915) >= 9)
+ gen9_rc6_enable(rc6);
+ else if (IS_BROADWELL(i915))
+ gen8_rc6_enable(rc6);
+ else if (INTEL_GEN(i915) >= 6)
+ gen6_rc6_enable(rc6);
+
+ intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
+
+ /* rc6 is ready, runtime-pm is go! */
+ rpm_put(rc6);
+ rc6->enabled = true;
+}
+
+void intel_rc6_disable(struct intel_rc6 *rc6)
+{
+ if (!rc6->enabled)
+ return;
+
+ rpm_get(rc6);
+ rc6->enabled = false;
+
+ __intel_rc6_disable(rc6);
+}
+
+void intel_rc6_fini(struct intel_rc6 *rc6)
+{
+ struct drm_i915_gem_object *pctx;
+
+ intel_rc6_disable(rc6);
+
+ pctx = fetch_and_zero(&rc6->pctx);
+ if (pctx)
+ i915_gem_object_put(pctx);
+
+ if (rc6->wakeref)
+ rpm_put(rc6);
+}
+
+static u64 vlv_residency_raw(struct intel_uncore *uncore, const i915_reg_t reg)
+{
+ u32 lower, upper, tmp;
+ int loop = 2;
+
+ /*
+ * The register accessed do not need forcewake. We borrow
+ * uncore lock to prevent concurrent access to range reg.
+ */
+ lockdep_assert_held(&uncore->lock);
+
+ /*
+ * vlv and chv residency counters are 40 bits in width.
+ * With a control bit, we can choose between upper or lower
+ * 32bit window into this counter.
+ *
+ * Although we always use the counter in high-range mode elsewhere,
+ * userspace may attempt to read the value before rc6 is initialised,
+ * before we have set the default VLV_COUNTER_CONTROL value. So always
+ * set the high bit to be safe.
+ */
+ set(uncore, VLV_COUNTER_CONTROL,
+ _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH));
+ upper = intel_uncore_read_fw(uncore, reg);
+ do {
+ tmp = upper;
+
+ set(uncore, VLV_COUNTER_CONTROL,
+ _MASKED_BIT_DISABLE(VLV_COUNT_RANGE_HIGH));
+ lower = intel_uncore_read_fw(uncore, reg);
+
+ set(uncore, VLV_COUNTER_CONTROL,
+ _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH));
+ upper = intel_uncore_read_fw(uncore, reg);
+ } while (upper != tmp && --loop);
+
+ /*
+ * Everywhere else we always use VLV_COUNTER_CONTROL with the
+ * VLV_COUNT_RANGE_HIGH bit set - so it is safe to leave it set
+ * now.
+ */
+
+ return lower | (u64)upper << 8;
+}
+
+u64 intel_rc6_residency_ns(struct intel_rc6 *rc6, const i915_reg_t reg)
+{
+ struct drm_i915_private *i915 = rc6_to_i915(rc6);
+ struct intel_uncore *uncore = rc6_to_uncore(rc6);
+ u64 time_hw, prev_hw, overflow_hw;
+ unsigned int fw_domains;
+ unsigned long flags;
+ unsigned int i;
+ u32 mul, div;
+
+ if (!rc6->supported)
+ return 0;
+
+ /*
+ * Store previous hw counter values for counter wrap-around handling.
+ *
+ * There are only four interesting registers and they live next to each
+ * other so we can use the relative address, compared to the smallest
+ * one as the index into driver storage.
+ */
+ i = (i915_mmio_reg_offset(reg) -
+ i915_mmio_reg_offset(GEN6_GT_GFX_RC6_LOCKED)) / sizeof(u32);
+ if (WARN_ON_ONCE(i >= ARRAY_SIZE(rc6->cur_residency)))
+ return 0;
+
+ fw_domains = intel_uncore_forcewake_for_reg(uncore, reg, FW_REG_READ);
+
+ spin_lock_irqsave(&uncore->lock, flags);
+ intel_uncore_forcewake_get__locked(uncore, fw_domains);
+
+ /* On VLV and CHV, residency time is in CZ units rather than 1.28us */
+ if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
+ mul = 1000000;
+ div = i915->czclk_freq;
+ overflow_hw = BIT_ULL(40);
+ time_hw = vlv_residency_raw(uncore, reg);
+ } else {
+ /* 833.33ns units on Gen9LP, 1.28us elsewhere. */
+ if (IS_GEN9_LP(i915)) {
+ mul = 10000;
+ div = 12;
+ } else {
+ mul = 1280;
+ div = 1;
+ }
+
+ overflow_hw = BIT_ULL(32);
+ time_hw = intel_uncore_read_fw(uncore, reg);
+ }
+
+ /*
+ * Counter wrap handling.
+ *
+ * But relying on a sufficient frequency of queries otherwise counters
+ * can still wrap.
+ */
+ prev_hw = rc6->prev_hw_residency[i];
+ rc6->prev_hw_residency[i] = time_hw;
+
+ /* RC6 delta from last sample. */
+ if (time_hw >= prev_hw)
+ time_hw -= prev_hw;
+ else
+ time_hw += overflow_hw - prev_hw;
+
+ /* Add delta to RC6 extended raw driver copy. */
+ time_hw += rc6->cur_residency[i];
+ rc6->cur_residency[i] = time_hw;
+
+ intel_uncore_forcewake_put__locked(uncore, fw_domains);
+ spin_unlock_irqrestore(&uncore->lock, flags);
+
+ return mul_u64_u32_div(time_hw, mul, div);
+}
+
+u64 intel_rc6_residency_us(struct intel_rc6 *rc6, i915_reg_t reg)
+{
+ return DIV_ROUND_UP_ULL(intel_rc6_residency_ns(rc6, reg), 1000);
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.h b/drivers/gpu/drm/i915/gt/intel_rc6.h
new file mode 100644
index 000000000000..1370f6834a4c
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_rc6.h
@@ -0,0 +1,28 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef INTEL_RC6_H
+#define INTEL_RC6_H
+
+#include "i915_reg.h"
+
+struct intel_engine_cs;
+struct intel_rc6;
+
+void intel_rc6_init(struct intel_rc6 *rc6);
+void intel_rc6_fini(struct intel_rc6 *rc6);
+
+void intel_rc6_sanitize(struct intel_rc6 *rc6);
+void intel_rc6_enable(struct intel_rc6 *rc6);
+void intel_rc6_disable(struct intel_rc6 *rc6);
+
+u64 intel_rc6_residency_ns(struct intel_rc6 *rc6, i915_reg_t reg);
+u64 intel_rc6_residency_us(struct intel_rc6 *rc6, i915_reg_t reg);
+
+void intel_rc6_ctx_wa_check(struct intel_rc6 *rc6);
+void intel_rc6_ctx_wa_resume(struct intel_rc6 *rc6);
+
+#endif /* INTEL_RC6_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_rc6_types.h b/drivers/gpu/drm/i915/gt/intel_rc6_types.h
new file mode 100644
index 000000000000..89ad5697a8d4
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_rc6_types.h
@@ -0,0 +1,29 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef INTEL_RC6_TYPES_H
+#define INTEL_RC6_TYPES_H
+
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#include "intel_engine_types.h"
+
+struct drm_i915_gem_object;
+
+struct intel_rc6 {
+ u64 prev_hw_residency[4];
+ u64 cur_residency[4];
+
+ struct drm_i915_gem_object *pctx;
+
+ bool supported : 1;
+ bool enabled : 1;
+ bool wakeref : 1;
+ bool ctx_corrupted : 1;
+};
+
+#endif /* INTEL_RC6_TYPES_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_renderstate.c b/drivers/gpu/drm/i915/gt/intel_renderstate.c
index 6d05f9c64178..c4edc35e7d89 100644
--- a/drivers/gpu/drm/i915/gt/intel_renderstate.c
+++ b/drivers/gpu/drm/i915/gt/intel_renderstate.c
@@ -27,6 +27,7 @@
#include "i915_drv.h"
#include "intel_renderstate.h"
+#include "intel_ring.h"
struct intel_renderstate {
const struct intel_renderstate_rodata *rodata;
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
index 8cea42379dd7..f03e000051c1 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.c
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
@@ -282,14 +282,14 @@ static int gen6_reset_engines(struct intel_gt *gt,
intel_engine_mask_t engine_mask,
unsigned int retry)
{
- struct intel_engine_cs *engine;
- const u32 hw_engine_mask[] = {
+ static const u32 hw_engine_mask[] = {
[RCS0] = GEN6_GRDOM_RENDER,
[BCS0] = GEN6_GRDOM_BLT,
[VCS0] = GEN6_GRDOM_MEDIA,
[VCS1] = GEN8_GRDOM_MEDIA2,
[VECS0] = GEN6_GRDOM_VECS,
};
+ struct intel_engine_cs *engine;
u32 hw_mask;
if (engine_mask == ALL_ENGINES) {
@@ -298,7 +298,7 @@ static int gen6_reset_engines(struct intel_gt *gt,
intel_engine_mask_t tmp;
hw_mask = 0;
- for_each_engine_masked(engine, gt->i915, engine_mask, tmp) {
+ for_each_engine_masked(engine, gt, engine_mask, tmp) {
GEM_BUG_ON(engine->id >= ARRAY_SIZE(hw_engine_mask));
hw_mask |= hw_engine_mask[engine->id];
}
@@ -307,7 +307,7 @@ static int gen6_reset_engines(struct intel_gt *gt,
return gen6_hw_domain_reset(gt, hw_mask);
}
-static u32 gen11_lock_sfc(struct intel_engine_cs *engine)
+static int gen11_lock_sfc(struct intel_engine_cs *engine, u32 *hw_mask)
{
struct intel_uncore *uncore = engine->uncore;
u8 vdbox_sfc_access = RUNTIME_INFO(engine->i915)->vdbox_sfc_access;
@@ -316,6 +316,7 @@ static u32 gen11_lock_sfc(struct intel_engine_cs *engine)
i915_reg_t sfc_usage;
u32 sfc_usage_bit;
u32 sfc_reset_bit;
+ int ret;
switch (engine->class) {
case VIDEO_DECODE_CLASS:
@@ -350,27 +351,33 @@ static u32 gen11_lock_sfc(struct intel_engine_cs *engine)
}
/*
- * Tell the engine that a software reset is going to happen. The engine
- * will then try to force lock the SFC (if currently locked, it will
- * remain so until we tell the engine it is safe to unlock; if currently
- * unlocked, it will ignore this and all new lock requests). If SFC
- * ends up being locked to the engine we want to reset, we have to reset
- * it as well (we will unlock it once the reset sequence is completed).
+ * If the engine is using a SFC, tell the engine that a software reset
+ * is going to happen. The engine will then try to force lock the SFC.
+ * If SFC ends up being locked to the engine we want to reset, we have
+ * to reset it as well (we will unlock it once the reset sequence is
+ * completed).
*/
+ if (!(intel_uncore_read_fw(uncore, sfc_usage) & sfc_usage_bit))
+ return 0;
+
rmw_set_fw(uncore, sfc_forced_lock, sfc_forced_lock_bit);
- if (__intel_wait_for_register_fw(uncore,
- sfc_forced_lock_ack,
- sfc_forced_lock_ack_bit,
- sfc_forced_lock_ack_bit,
- 1000, 0, NULL)) {
- DRM_DEBUG_DRIVER("Wait for SFC forced lock ack failed\n");
+ ret = __intel_wait_for_register_fw(uncore,
+ sfc_forced_lock_ack,
+ sfc_forced_lock_ack_bit,
+ sfc_forced_lock_ack_bit,
+ 1000, 0, NULL);
+
+ /* Was the SFC released while we were trying to lock it? */
+ if (!(intel_uncore_read_fw(uncore, sfc_usage) & sfc_usage_bit))
return 0;
- }
- if (intel_uncore_read_fw(uncore, sfc_usage) & sfc_usage_bit)
- return sfc_reset_bit;
+ if (ret) {
+ DRM_DEBUG_DRIVER("Wait for SFC forced lock ack failed\n");
+ return ret;
+ }
+ *hw_mask |= sfc_reset_bit;
return 0;
}
@@ -406,7 +413,7 @@ static int gen11_reset_engines(struct intel_gt *gt,
intel_engine_mask_t engine_mask,
unsigned int retry)
{
- const u32 hw_engine_mask[] = {
+ static const u32 hw_engine_mask[] = {
[RCS0] = GEN11_GRDOM_RENDER,
[BCS0] = GEN11_GRDOM_BLT,
[VCS0] = GEN11_GRDOM_MEDIA,
@@ -425,17 +432,26 @@ static int gen11_reset_engines(struct intel_gt *gt,
hw_mask = GEN11_GRDOM_FULL;
} else {
hw_mask = 0;
- for_each_engine_masked(engine, gt->i915, engine_mask, tmp) {
+ for_each_engine_masked(engine, gt, engine_mask, tmp) {
GEM_BUG_ON(engine->id >= ARRAY_SIZE(hw_engine_mask));
hw_mask |= hw_engine_mask[engine->id];
- hw_mask |= gen11_lock_sfc(engine);
+ ret = gen11_lock_sfc(engine, &hw_mask);
+ if (ret)
+ goto sfc_unlock;
}
}
ret = gen6_hw_domain_reset(gt, hw_mask);
+sfc_unlock:
+ /*
+ * We unlock the SFC based on the lock status and not the result of
+ * gen11_lock_sfc to make sure that we clean properly if something
+ * wrong happened during the lock (e.g. lock acquired after timeout
+ * expiration).
+ */
if (engine_mask != ALL_ENGINES)
- for_each_engine_masked(engine, gt->i915, engine_mask, tmp)
+ for_each_engine_masked(engine, gt, engine_mask, tmp)
gen11_unlock_sfc(engine);
return ret;
@@ -494,7 +510,7 @@ static int gen8_reset_engines(struct intel_gt *gt,
intel_engine_mask_t tmp;
int ret;
- for_each_engine_masked(engine, gt->i915, engine_mask, tmp) {
+ for_each_engine_masked(engine, gt, engine_mask, tmp) {
ret = gen8_engine_reset_prepare(engine);
if (ret && !reset_non_ready)
goto skip_reset;
@@ -520,19 +536,30 @@ static int gen8_reset_engines(struct intel_gt *gt,
ret = gen6_reset_engines(gt, engine_mask, retry);
skip_reset:
- for_each_engine_masked(engine, gt->i915, engine_mask, tmp)
+ for_each_engine_masked(engine, gt, engine_mask, tmp)
gen8_engine_reset_cancel(engine);
return ret;
}
+static int mock_reset(struct intel_gt *gt,
+ intel_engine_mask_t mask,
+ unsigned int retry)
+{
+ return 0;
+}
+
typedef int (*reset_func)(struct intel_gt *,
intel_engine_mask_t engine_mask,
unsigned int retry);
-static reset_func intel_get_gpu_reset(struct drm_i915_private *i915)
+static reset_func intel_get_gpu_reset(const struct intel_gt *gt)
{
- if (INTEL_GEN(i915) >= 8)
+ struct drm_i915_private *i915 = gt->i915;
+
+ if (is_mock_gt(gt))
+ return mock_reset;
+ else if (INTEL_GEN(i915) >= 8)
return gen8_reset_engines;
else if (INTEL_GEN(i915) >= 6)
return gen6_reset_engines;
@@ -555,7 +582,7 @@ int __intel_gt_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask)
int ret = -ETIMEDOUT;
int retry;
- reset = intel_get_gpu_reset(gt->i915);
+ reset = intel_get_gpu_reset(gt);
if (!reset)
return -ENODEV;
@@ -575,17 +602,20 @@ int __intel_gt_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask)
return ret;
}
-bool intel_has_gpu_reset(struct drm_i915_private *i915)
+bool intel_has_gpu_reset(const struct intel_gt *gt)
{
if (!i915_modparams.reset)
return NULL;
- return intel_get_gpu_reset(i915);
+ return intel_get_gpu_reset(gt);
}
-bool intel_has_reset_engine(struct drm_i915_private *i915)
+bool intel_has_reset_engine(const struct intel_gt *gt)
{
- return INTEL_INFO(i915)->has_reset_engine && i915_modparams.reset >= 2;
+ if (i915_modparams.reset < 2)
+ return false;
+
+ return INTEL_INFO(gt->i915)->has_reset_engine;
}
int intel_reset_guc(struct intel_gt *gt)
@@ -652,7 +682,7 @@ static intel_engine_mask_t reset_prepare(struct intel_gt *gt)
intel_engine_mask_t awake = 0;
enum intel_engine_id id;
- for_each_engine(engine, gt->i915, id) {
+ for_each_engine(engine, gt, id) {
if (intel_engine_pm_get_if_awake(engine))
awake |= engine->mask;
reset_prepare_engine(engine);
@@ -682,10 +712,10 @@ static int gt_reset(struct intel_gt *gt, intel_engine_mask_t stalled_mask)
if (err)
return err;
- for_each_engine(engine, gt->i915, id)
+ for_each_engine(engine, gt, id)
__intel_engine_reset(engine, stalled_mask & engine->mask);
- i915_gem_restore_fences(gt->i915);
+ i915_gem_restore_fences(gt->ggtt);
return err;
}
@@ -695,7 +725,7 @@ static void reset_finish_engine(struct intel_engine_cs *engine)
engine->reset.finish(engine);
intel_uncore_forcewake_put(engine->uncore, FORCEWAKE_ALL);
- intel_engine_signal_breadcrumbs(engine);
+ intel_engine_breadcrumbs_irq(engine);
}
static void reset_finish(struct intel_gt *gt, intel_engine_mask_t awake)
@@ -703,7 +733,7 @@ static void reset_finish(struct intel_gt *gt, intel_engine_mask_t awake)
struct intel_engine_cs *engine;
enum intel_engine_id id;
- for_each_engine(engine, gt->i915, id) {
+ for_each_engine(engine, gt, id) {
reset_finish_engine(engine);
if (awake & engine->mask)
intel_engine_pm_put(engine);
@@ -739,7 +769,7 @@ static void __intel_gt_set_wedged(struct intel_gt *gt)
if (GEM_SHOW_DEBUG() && !intel_engines_are_idle(gt)) {
struct drm_printer p = drm_debug_printer(__func__);
- for_each_engine(engine, gt->i915, id)
+ for_each_engine(engine, gt, id)
intel_engine_dump(engine, &p, "%s\n", engine->name);
}
@@ -756,7 +786,7 @@ static void __intel_gt_set_wedged(struct intel_gt *gt)
if (!INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
__intel_gt_reset(gt, ALL_ENGINES);
- for_each_engine(engine, gt->i915, id)
+ for_each_engine(engine, gt, id)
engine->submit_request = nop_submit_request;
/*
@@ -768,7 +798,7 @@ static void __intel_gt_set_wedged(struct intel_gt *gt)
set_bit(I915_WEDGED, &gt->reset.flags);
/* Mark all executing requests as skipped */
- for_each_engine(engine, gt->i915, id)
+ for_each_engine(engine, gt, id)
engine->cancel_requests(engine);
reset_finish(gt, awake);
@@ -781,7 +811,7 @@ void intel_gt_set_wedged(struct intel_gt *gt)
intel_wakeref_t wakeref;
mutex_lock(&gt->reset.mutex);
- with_intel_runtime_pm(&gt->i915->runtime_pm, wakeref)
+ with_intel_runtime_pm(gt->uncore->rpm, wakeref)
__intel_gt_set_wedged(gt);
mutex_unlock(&gt->reset.mutex);
}
@@ -791,11 +821,13 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt)
struct intel_gt_timelines *timelines = &gt->timelines;
struct intel_timeline *tl;
unsigned long flags;
+ bool ok;
if (!test_bit(I915_WEDGED, &gt->reset.flags))
return true;
- if (!gt->scratch) /* Never full initialised, recovery impossible */
+ /* Never fully initialised, recovery impossible */
+ if (test_bit(I915_WEDGED_ON_INIT, &gt->reset.flags))
return false;
GEM_TRACE("start\n");
@@ -812,10 +844,10 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt)
*/
spin_lock_irqsave(&timelines->lock, flags);
list_for_each_entry(tl, &timelines->active_list, link) {
- struct i915_request *rq;
+ struct dma_fence *fence;
- rq = i915_active_request_get_unlocked(&tl->last_request);
- if (!rq)
+ fence = i915_active_fence_get(&tl->last_request);
+ if (!fence)
continue;
spin_unlock_irqrestore(&timelines->lock, flags);
@@ -827,8 +859,8 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt)
* (I915_FENCE_TIMEOUT) so this wait should not be unbounded
* in the worst case.
*/
- dma_fence_default_wait(&rq->fence, false, MAX_SCHEDULE_TIMEOUT);
- i915_request_put(rq);
+ dma_fence_default_wait(fence, false, MAX_SCHEDULE_TIMEOUT);
+ dma_fence_put(fence);
/* Restart iteration after droping lock */
spin_lock_irqsave(&timelines->lock, flags);
@@ -836,7 +868,18 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt)
}
spin_unlock_irqrestore(&timelines->lock, flags);
- intel_gt_sanitize(gt, false);
+ /* We must reset pending GPU events before restoring our submission */
+ ok = !HAS_EXECLISTS(gt->i915); /* XXX better agnosticism desired */
+ if (!INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
+ ok = __intel_gt_reset(gt, ALL_ENGINES) == 0;
+ if (!ok) {
+ /*
+ * Warn CI about the unrecoverable wedged condition.
+ * Time for a reboot.
+ */
+ add_taint_for_CI(TAINT_WARN);
+ return false;
+ }
/*
* Undo nop_submit_request. We prevent all new i915 requests from
@@ -891,7 +934,7 @@ static int resume(struct intel_gt *gt)
enum intel_engine_id id;
int ret;
- for_each_engine(engine, gt->i915, id) {
+ for_each_engine(engine, gt, id) {
ret = engine->resume(engine);
if (ret)
return ret;
@@ -941,7 +984,7 @@ void intel_gt_reset(struct intel_gt *gt,
awake = reset_prepare(gt);
- if (!intel_has_gpu_reset(gt->i915)) {
+ if (!intel_has_gpu_reset(gt)) {
if (i915_modparams.reset)
dev_err(gt->i915->drm.dev, "GPU reset not supported\n");
else
@@ -970,7 +1013,7 @@ void intel_gt_reset(struct intel_gt *gt,
* was running at the time of the reset (i.e. we weren't VT
* switched away).
*/
- ret = i915_gem_init_hw(gt->i915);
+ ret = intel_gt_init_hw(gt);
if (ret) {
DRM_ERROR("Failed to initialise HW following reset (%d)\n",
ret);
@@ -981,8 +1024,6 @@ void intel_gt_reset(struct intel_gt *gt,
if (ret)
goto taint;
- intel_gt_queue_hangcheck(gt);
-
finish:
reset_finish(gt, awake);
unlock:
@@ -1149,7 +1190,7 @@ void intel_gt_handle_error(struct intel_gt *gt,
* isn't the case at least when we get here by doing a
* simulated reset via debugfs, so get an RPM reference.
*/
- wakeref = intel_runtime_pm_get(&gt->i915->runtime_pm);
+ wakeref = intel_runtime_pm_get(gt->uncore->rpm);
engine_mask &= INTEL_INFO(gt->i915)->engine_mask;
@@ -1162,8 +1203,8 @@ void intel_gt_handle_error(struct intel_gt *gt,
* Try engine reset when available. We fall back to full reset if
* single reset fails.
*/
- if (intel_has_reset_engine(gt->i915) && !intel_gt_is_wedged(gt)) {
- for_each_engine_masked(engine, gt->i915, engine_mask, tmp) {
+ if (intel_has_reset_engine(gt) && !intel_gt_is_wedged(gt)) {
+ for_each_engine_masked(engine, gt, engine_mask, tmp) {
BUILD_BUG_ON(I915_RESET_MODESET >= I915_RESET_ENGINE);
if (test_and_set_bit(I915_RESET_ENGINE + engine->id,
&gt->reset.flags))
@@ -1191,7 +1232,7 @@ void intel_gt_handle_error(struct intel_gt *gt,
synchronize_rcu_expedited();
/* Prevent any other reset-engine attempt. */
- for_each_engine(engine, gt->i915, tmp) {
+ for_each_engine(engine, gt, tmp) {
while (test_and_set_bit(I915_RESET_ENGINE + engine->id,
&gt->reset.flags))
wait_on_bit(&gt->reset.flags,
@@ -1201,7 +1242,7 @@ void intel_gt_handle_error(struct intel_gt *gt,
intel_gt_reset_global(gt, engine_mask, msg);
- for_each_engine(engine, gt->i915, tmp)
+ for_each_engine(engine, gt, tmp)
clear_bit_unlock(I915_RESET_ENGINE + engine->id,
&gt->reset.flags);
clear_bit_unlock(I915_RESET_BACKOFF, &gt->reset.flags);
@@ -1209,7 +1250,7 @@ void intel_gt_handle_error(struct intel_gt *gt,
wake_up_all(&gt->reset.queue);
out:
- intel_runtime_pm_put(&gt->i915->runtime_pm, wakeref);
+ intel_runtime_pm_put(gt->uncore->rpm, wakeref);
}
int intel_gt_reset_trylock(struct intel_gt *gt, int *srcu)
@@ -1251,10 +1292,6 @@ int intel_gt_terminally_wedged(struct intel_gt *gt)
if (!test_bit(I915_RESET_BACKOFF, &gt->reset.flags))
return -EIO;
- /* XXX intel_reset_finish() still takes struct_mutex!!! */
- if (mutex_is_locked(&gt->i915->drm.struct_mutex))
- return -EAGAIN;
-
if (wait_event_interruptible(gt->reset.queue,
!test_bit(I915_RESET_BACKOFF,
&gt->reset.flags)))
@@ -1263,6 +1300,14 @@ int intel_gt_terminally_wedged(struct intel_gt *gt)
return intel_gt_is_wedged(gt) ? -EIO : 0;
}
+void intel_gt_set_wedged_on_init(struct intel_gt *gt)
+{
+ BUILD_BUG_ON(I915_RESET_ENGINE + I915_NUM_ENGINES >
+ I915_WEDGED_ON_INIT);
+ intel_gt_set_wedged(gt);
+ set_bit(I915_WEDGED_ON_INIT, &gt->reset.flags);
+}
+
void intel_gt_init_reset(struct intel_gt *gt)
{
init_waitqueue_head(&gt->reset.queue);
@@ -1306,4 +1351,5 @@ void __intel_fini_wedge(struct intel_wedge_me *w)
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftest_reset.c"
+#include "selftest_hangcheck.c"
#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.h b/drivers/gpu/drm/i915/gt/intel_reset.h
index 52c00199e069..8e8d5f761166 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.h
+++ b/drivers/gpu/drm/i915/gt/intel_reset.h
@@ -14,7 +14,6 @@
#include "intel_engine_types.h"
#include "intel_reset_types.h"
-struct drm_i915_private;
struct i915_request;
struct intel_engine_cs;
struct intel_gt;
@@ -45,6 +44,12 @@ void intel_gt_set_wedged(struct intel_gt *gt);
bool intel_gt_unset_wedged(struct intel_gt *gt);
int intel_gt_terminally_wedged(struct intel_gt *gt);
+/*
+ * There's no unset_wedged_on_init paired with this one.
+ * Once we're wedged on init, there's no going back.
+ */
+void intel_gt_set_wedged_on_init(struct intel_gt *gt);
+
int __intel_gt_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask);
int intel_reset_guc(struct intel_gt *gt);
@@ -68,10 +73,13 @@ void __intel_fini_wedge(struct intel_wedge_me *w);
static inline bool __intel_reset_failed(const struct intel_reset *reset)
{
+ GEM_BUG_ON(test_bit(I915_WEDGED_ON_INIT, &reset->flags) ?
+ !test_bit(I915_WEDGED, &reset->flags) : false);
+
return unlikely(test_bit(I915_WEDGED, &reset->flags));
}
-bool intel_has_gpu_reset(struct drm_i915_private *i915);
-bool intel_has_reset_engine(struct drm_i915_private *i915);
+bool intel_has_gpu_reset(const struct intel_gt *gt);
+bool intel_has_reset_engine(const struct intel_gt *gt);
#endif /* I915_RESET_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_reset_types.h b/drivers/gpu/drm/i915/gt/intel_reset_types.h
index 31968356e0c0..f43bc3a0fe4f 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_reset_types.h
@@ -29,11 +29,17 @@ struct intel_reset {
* we set the #I915_WEDGED bit. Prior to command submission, e.g.
* i915_request_alloc(), this bit is checked and the sequence
* aborted (with -EIO reported to userspace) if set.
+ *
+ * #I915_WEDGED_ON_INIT - If we fail to initialize the GPU we can no
+ * longer use the GPU - similar to #I915_WEDGED bit. The difference in
+ * in the way we're handling "forced" unwedged (e.g. through debugfs),
+ * which is not allowed in case we failed to initialize.
*/
unsigned long flags;
#define I915_RESET_BACKOFF 0
#define I915_RESET_MODESET 1
#define I915_RESET_ENGINE 2
+#define I915_WEDGED_ON_INIT (BITS_PER_LONG - 2)
#define I915_WEDGED (BITS_PER_LONG - 1)
struct mutex mutex; /* serialises wedging/unwedging */
diff --git a/drivers/gpu/drm/i915/gt/intel_ring.c b/drivers/gpu/drm/i915/gt/intel_ring.c
new file mode 100644
index 000000000000..ece20504d240
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_ring.c
@@ -0,0 +1,323 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "gem/i915_gem_object.h"
+#include "i915_drv.h"
+#include "i915_vma.h"
+#include "intel_engine.h"
+#include "intel_ring.h"
+#include "intel_timeline.h"
+
+unsigned int intel_ring_update_space(struct intel_ring *ring)
+{
+ unsigned int space;
+
+ space = __intel_ring_space(ring->head, ring->emit, ring->size);
+
+ ring->space = space;
+ return space;
+}
+
+int intel_ring_pin(struct intel_ring *ring)
+{
+ struct i915_vma *vma = ring->vma;
+ unsigned int flags;
+ void *addr;
+ int ret;
+
+ if (atomic_fetch_inc(&ring->pin_count))
+ return 0;
+
+ flags = PIN_GLOBAL;
+
+ /* Ring wraparound at offset 0 sometimes hangs. No idea why. */
+ flags |= PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma);
+
+ if (vma->obj->stolen)
+ flags |= PIN_MAPPABLE;
+ else
+ flags |= PIN_HIGH;
+
+ ret = i915_vma_pin(vma, 0, 0, flags);
+ if (unlikely(ret))
+ goto err_unpin;
+
+ if (i915_vma_is_map_and_fenceable(vma))
+ addr = (void __force *)i915_vma_pin_iomap(vma);
+ else
+ addr = i915_gem_object_pin_map(vma->obj,
+ i915_coherent_map_type(vma->vm->i915));
+ if (IS_ERR(addr)) {
+ ret = PTR_ERR(addr);
+ goto err_ring;
+ }
+
+ i915_vma_make_unshrinkable(vma);
+
+ GEM_BUG_ON(ring->vaddr);
+ ring->vaddr = addr;
+
+ return 0;
+
+err_ring:
+ i915_vma_unpin(vma);
+err_unpin:
+ atomic_dec(&ring->pin_count);
+ return ret;
+}
+
+void intel_ring_reset(struct intel_ring *ring, u32 tail)
+{
+ tail = intel_ring_wrap(ring, tail);
+ ring->tail = tail;
+ ring->head = tail;
+ ring->emit = tail;
+ intel_ring_update_space(ring);
+}
+
+void intel_ring_unpin(struct intel_ring *ring)
+{
+ struct i915_vma *vma = ring->vma;
+
+ if (!atomic_dec_and_test(&ring->pin_count))
+ return;
+
+ /* Discard any unused bytes beyond that submitted to hw. */
+ intel_ring_reset(ring, ring->emit);
+
+ i915_vma_unset_ggtt_write(vma);
+ if (i915_vma_is_map_and_fenceable(vma))
+ i915_vma_unpin_iomap(vma);
+ else
+ i915_gem_object_unpin_map(vma->obj);
+
+ GEM_BUG_ON(!ring->vaddr);
+ ring->vaddr = NULL;
+
+ i915_vma_unpin(vma);
+ i915_vma_make_purgeable(vma);
+}
+
+static struct i915_vma *create_ring_vma(struct i915_ggtt *ggtt, int size)
+{
+ struct i915_address_space *vm = &ggtt->vm;
+ struct drm_i915_private *i915 = vm->i915;
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+
+ obj = ERR_PTR(-ENODEV);
+ if (i915_ggtt_has_aperture(ggtt))
+ obj = i915_gem_object_create_stolen(i915, size);
+ if (IS_ERR(obj))
+ obj = i915_gem_object_create_internal(i915, size);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ /*
+ * Mark ring buffers as read-only from GPU side (so no stray overwrites)
+ * if supported by the platform's GGTT.
+ */
+ if (vm->has_read_only)
+ i915_gem_object_set_readonly(obj);
+
+ vma = i915_vma_instance(obj, vm, NULL);
+ if (IS_ERR(vma))
+ goto err;
+
+ return vma;
+
+err:
+ i915_gem_object_put(obj);
+ return vma;
+}
+
+struct intel_ring *
+intel_engine_create_ring(struct intel_engine_cs *engine, int size)
+{
+ struct drm_i915_private *i915 = engine->i915;
+ struct intel_ring *ring;
+ struct i915_vma *vma;
+
+ GEM_BUG_ON(!is_power_of_2(size));
+ GEM_BUG_ON(RING_CTL_SIZE(size) & ~RING_NR_PAGES);
+
+ ring = kzalloc(sizeof(*ring), GFP_KERNEL);
+ if (!ring)
+ return ERR_PTR(-ENOMEM);
+
+ kref_init(&ring->ref);
+ ring->size = size;
+
+ /*
+ * Workaround an erratum on the i830 which causes a hang if
+ * the TAIL pointer points to within the last 2 cachelines
+ * of the buffer.
+ */
+ ring->effective_size = size;
+ if (IS_I830(i915) || IS_I845G(i915))
+ ring->effective_size -= 2 * CACHELINE_BYTES;
+
+ intel_ring_update_space(ring);
+
+ vma = create_ring_vma(engine->gt->ggtt, size);
+ if (IS_ERR(vma)) {
+ kfree(ring);
+ return ERR_CAST(vma);
+ }
+ ring->vma = vma;
+
+ return ring;
+}
+
+void intel_ring_free(struct kref *ref)
+{
+ struct intel_ring *ring = container_of(ref, typeof(*ring), ref);
+
+ i915_vma_put(ring->vma);
+ kfree(ring);
+}
+
+static noinline int
+wait_for_space(struct intel_ring *ring,
+ struct intel_timeline *tl,
+ unsigned int bytes)
+{
+ struct i915_request *target;
+ long timeout;
+
+ if (intel_ring_update_space(ring) >= bytes)
+ return 0;
+
+ GEM_BUG_ON(list_empty(&tl->requests));
+ list_for_each_entry(target, &tl->requests, link) {
+ if (target->ring != ring)
+ continue;
+
+ /* Would completion of this request free enough space? */
+ if (bytes <= __intel_ring_space(target->postfix,
+ ring->emit, ring->size))
+ break;
+ }
+
+ if (GEM_WARN_ON(&target->link == &tl->requests))
+ return -ENOSPC;
+
+ timeout = i915_request_wait(target,
+ I915_WAIT_INTERRUPTIBLE,
+ MAX_SCHEDULE_TIMEOUT);
+ if (timeout < 0)
+ return timeout;
+
+ i915_request_retire_upto(target);
+
+ intel_ring_update_space(ring);
+ GEM_BUG_ON(ring->space < bytes);
+ return 0;
+}
+
+u32 *intel_ring_begin(struct i915_request *rq, unsigned int num_dwords)
+{
+ struct intel_ring *ring = rq->ring;
+ const unsigned int remain_usable = ring->effective_size - ring->emit;
+ const unsigned int bytes = num_dwords * sizeof(u32);
+ unsigned int need_wrap = 0;
+ unsigned int total_bytes;
+ u32 *cs;
+
+ /* Packets must be qword aligned. */
+ GEM_BUG_ON(num_dwords & 1);
+
+ total_bytes = bytes + rq->reserved_space;
+ GEM_BUG_ON(total_bytes > ring->effective_size);
+
+ if (unlikely(total_bytes > remain_usable)) {
+ const int remain_actual = ring->size - ring->emit;
+
+ if (bytes > remain_usable) {
+ /*
+ * Not enough space for the basic request. So need to
+ * flush out the remainder and then wait for
+ * base + reserved.
+ */
+ total_bytes += remain_actual;
+ need_wrap = remain_actual | 1;
+ } else {
+ /*
+ * The base request will fit but the reserved space
+ * falls off the end. So we don't need an immediate
+ * wrap and only need to effectively wait for the
+ * reserved size from the start of ringbuffer.
+ */
+ total_bytes = rq->reserved_space + remain_actual;
+ }
+ }
+
+ if (unlikely(total_bytes > ring->space)) {
+ int ret;
+
+ /*
+ * Space is reserved in the ringbuffer for finalising the
+ * request, as that cannot be allowed to fail. During request
+ * finalisation, reserved_space is set to 0 to stop the
+ * overallocation and the assumption is that then we never need
+ * to wait (which has the risk of failing with EINTR).
+ *
+ * See also i915_request_alloc() and i915_request_add().
+ */
+ GEM_BUG_ON(!rq->reserved_space);
+
+ ret = wait_for_space(ring,
+ i915_request_timeline(rq),
+ total_bytes);
+ if (unlikely(ret))
+ return ERR_PTR(ret);
+ }
+
+ if (unlikely(need_wrap)) {
+ need_wrap &= ~1;
+ GEM_BUG_ON(need_wrap > ring->space);
+ GEM_BUG_ON(ring->emit + need_wrap > ring->size);
+ GEM_BUG_ON(!IS_ALIGNED(need_wrap, sizeof(u64)));
+
+ /* Fill the tail with MI_NOOP */
+ memset64(ring->vaddr + ring->emit, 0, need_wrap / sizeof(u64));
+ ring->space -= need_wrap;
+ ring->emit = 0;
+ }
+
+ GEM_BUG_ON(ring->emit > ring->size - bytes);
+ GEM_BUG_ON(ring->space < bytes);
+ cs = ring->vaddr + ring->emit;
+ GEM_DEBUG_EXEC(memset32(cs, POISON_INUSE, bytes / sizeof(*cs)));
+ ring->emit += bytes;
+ ring->space -= bytes;
+
+ return cs;
+}
+
+/* Align the ring tail to a cacheline boundary */
+int intel_ring_cacheline_align(struct i915_request *rq)
+{
+ int num_dwords;
+ void *cs;
+
+ num_dwords = (rq->ring->emit & (CACHELINE_BYTES - 1)) / sizeof(u32);
+ if (num_dwords == 0)
+ return 0;
+
+ num_dwords = CACHELINE_DWORDS - num_dwords;
+ GEM_BUG_ON(num_dwords & 1);
+
+ cs = intel_ring_begin(rq, num_dwords);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ memset64(cs, (u64)MI_NOOP << 32 | MI_NOOP, num_dwords / 2);
+ intel_ring_advance(rq, cs + num_dwords);
+
+ GEM_BUG_ON(rq->ring->emit & (CACHELINE_BYTES - 1));
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_ring.h b/drivers/gpu/drm/i915/gt/intel_ring.h
new file mode 100644
index 000000000000..ea2839d9e044
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_ring.h
@@ -0,0 +1,131 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef INTEL_RING_H
+#define INTEL_RING_H
+
+#include "i915_gem.h" /* GEM_BUG_ON */
+#include "i915_request.h"
+#include "intel_ring_types.h"
+
+struct intel_engine_cs;
+
+struct intel_ring *
+intel_engine_create_ring(struct intel_engine_cs *engine, int size);
+
+u32 *intel_ring_begin(struct i915_request *rq, unsigned int num_dwords);
+int intel_ring_cacheline_align(struct i915_request *rq);
+
+unsigned int intel_ring_update_space(struct intel_ring *ring);
+
+int intel_ring_pin(struct intel_ring *ring);
+void intel_ring_unpin(struct intel_ring *ring);
+void intel_ring_reset(struct intel_ring *ring, u32 tail);
+
+void intel_ring_free(struct kref *ref);
+
+static inline struct intel_ring *intel_ring_get(struct intel_ring *ring)
+{
+ kref_get(&ring->ref);
+ return ring;
+}
+
+static inline void intel_ring_put(struct intel_ring *ring)
+{
+ kref_put(&ring->ref, intel_ring_free);
+}
+
+static inline void intel_ring_advance(struct i915_request *rq, u32 *cs)
+{
+ /* Dummy function.
+ *
+ * This serves as a placeholder in the code so that the reader
+ * can compare against the preceding intel_ring_begin() and
+ * check that the number of dwords emitted matches the space
+ * reserved for the command packet (i.e. the value passed to
+ * intel_ring_begin()).
+ */
+ GEM_BUG_ON((rq->ring->vaddr + rq->ring->emit) != cs);
+}
+
+static inline u32 intel_ring_wrap(const struct intel_ring *ring, u32 pos)
+{
+ return pos & (ring->size - 1);
+}
+
+static inline bool
+intel_ring_offset_valid(const struct intel_ring *ring,
+ unsigned int pos)
+{
+ if (pos & -ring->size) /* must be strictly within the ring */
+ return false;
+
+ if (!IS_ALIGNED(pos, 8)) /* must be qword aligned */
+ return false;
+
+ return true;
+}
+
+static inline u32 intel_ring_offset(const struct i915_request *rq, void *addr)
+{
+ /* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */
+ u32 offset = addr - rq->ring->vaddr;
+ GEM_BUG_ON(offset > rq->ring->size);
+ return intel_ring_wrap(rq->ring, offset);
+}
+
+static inline void
+assert_ring_tail_valid(const struct intel_ring *ring, unsigned int tail)
+{
+ GEM_BUG_ON(!intel_ring_offset_valid(ring, tail));
+
+ /*
+ * "Ring Buffer Use"
+ * Gen2 BSpec "1. Programming Environment" / 1.4.4.6
+ * Gen3 BSpec "1c Memory Interface Functions" / 2.3.4.5
+ * Gen4+ BSpec "1c Memory Interface and Command Stream" / 5.3.4.5
+ * "If the Ring Buffer Head Pointer and the Tail Pointer are on the
+ * same cacheline, the Head Pointer must not be greater than the Tail
+ * Pointer."
+ *
+ * We use ring->head as the last known location of the actual RING_HEAD,
+ * it may have advanced but in the worst case it is equally the same
+ * as ring->head and so we should never program RING_TAIL to advance
+ * into the same cacheline as ring->head.
+ */
+#define cacheline(a) round_down(a, CACHELINE_BYTES)
+ GEM_BUG_ON(cacheline(tail) == cacheline(ring->head) &&
+ tail < ring->head);
+#undef cacheline
+}
+
+static inline unsigned int
+intel_ring_set_tail(struct intel_ring *ring, unsigned int tail)
+{
+ /* Whilst writes to the tail are strictly order, there is no
+ * serialisation between readers and the writers. The tail may be
+ * read by i915_request_retire() just as it is being updated
+ * by execlists, as although the breadcrumb is complete, the context
+ * switch hasn't been seen.
+ */
+ assert_ring_tail_valid(ring, tail);
+ ring->tail = tail;
+ return tail;
+}
+
+static inline unsigned int
+__intel_ring_space(unsigned int head, unsigned int tail, unsigned int size)
+{
+ /*
+ * "If the Ring Buffer Head Pointer and the Tail Pointer are on the
+ * same cacheline, the Head Pointer must not be greater than the Tail
+ * Pointer."
+ */
+ GEM_BUG_ON(!is_power_of_2(size));
+ return (head - tail - CACHELINE_BYTES) & (size - 1);
+}
+
+#endif /* INTEL_RING_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
index bacaa7bb8c9a..a47d5a7c32c9 100644
--- a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
@@ -40,6 +40,7 @@
#include "intel_gt_irq.h"
#include "intel_gt_pm_irq.h"
#include "intel_reset.h"
+#include "intel_ring.h"
#include "intel_workarounds.h"
/* Rough estimate of the typical request size, performing a flush,
@@ -47,16 +48,6 @@
*/
#define LEGACY_REQUEST_SIZE 200
-unsigned int intel_ring_update_space(struct intel_ring *ring)
-{
- unsigned int space;
-
- space = __intel_ring_space(ring->head, ring->emit, ring->size);
-
- ring->space = space;
- return space;
-}
-
static int
gen2_render_ring_flush(struct i915_request *rq, u32 mode)
{
@@ -322,7 +313,8 @@ static u32 *gen6_rcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
PIPE_CONTROL_DC_FLUSH_ENABLE |
PIPE_CONTROL_QW_WRITE |
PIPE_CONTROL_CS_STALL);
- *cs++ = rq->timeline->hwsp_offset | PIPE_CONTROL_GLOBAL_GTT;
+ *cs++ = i915_request_active_timeline(rq)->hwsp_offset |
+ PIPE_CONTROL_GLOBAL_GTT;
*cs++ = rq->fence.seqno;
*cs++ = MI_USER_INTERRUPT;
@@ -425,7 +417,7 @@ static u32 *gen7_rcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
PIPE_CONTROL_QW_WRITE |
PIPE_CONTROL_GLOBAL_GTT_IVB |
PIPE_CONTROL_CS_STALL);
- *cs++ = rq->timeline->hwsp_offset;
+ *cs++ = i915_request_active_timeline(rq)->hwsp_offset;
*cs++ = rq->fence.seqno;
*cs++ = MI_USER_INTERRUPT;
@@ -439,8 +431,8 @@ static u32 *gen7_rcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
static u32 *gen6_xcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
{
- GEM_BUG_ON(rq->timeline->hwsp_ggtt != rq->engine->status_page.vma);
- GEM_BUG_ON(offset_in_page(rq->timeline->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR);
+ GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma);
+ GEM_BUG_ON(offset_in_page(i915_request_active_timeline(rq)->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR);
*cs++ = MI_FLUSH_DW | MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX;
*cs++ = I915_GEM_HWS_SEQNO_ADDR | MI_FLUSH_DW_USE_GTT;
@@ -459,8 +451,8 @@ static u32 *gen7_xcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
{
int i;
- GEM_BUG_ON(rq->timeline->hwsp_ggtt != rq->engine->status_page.vma);
- GEM_BUG_ON(offset_in_page(rq->timeline->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR);
+ GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma);
+ GEM_BUG_ON(offset_in_page(i915_request_active_timeline(rq)->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR);
*cs++ = MI_FLUSH_DW | MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX;
*cs++ = I915_GEM_HWS_SEQNO_ADDR | MI_FLUSH_DW_USE_GTT;
@@ -930,6 +922,7 @@ static void cancel_requests(struct intel_engine_cs *engine)
static void i9xx_submit_request(struct i915_request *request)
{
i915_request_submit(request);
+ wmb(); /* paranoid flush writes out of the WCB before mmio */
ENGINE_WRITE(request->engine, RING_TAIL,
intel_ring_set_tail(request->ring, request->tail));
@@ -937,8 +930,8 @@ static void i9xx_submit_request(struct i915_request *request)
static u32 *i9xx_emit_breadcrumb(struct i915_request *rq, u32 *cs)
{
- GEM_BUG_ON(rq->timeline->hwsp_ggtt != rq->engine->status_page.vma);
- GEM_BUG_ON(offset_in_page(rq->timeline->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR);
+ GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma);
+ GEM_BUG_ON(offset_in_page(i915_request_active_timeline(rq)->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR);
*cs++ = MI_FLUSH;
@@ -960,8 +953,8 @@ static u32 *gen5_emit_breadcrumb(struct i915_request *rq, u32 *cs)
{
int i;
- GEM_BUG_ON(rq->timeline->hwsp_ggtt != rq->engine->status_page.vma);
- GEM_BUG_ON(offset_in_page(rq->timeline->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR);
+ GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma);
+ GEM_BUG_ON(offset_in_page(i915_request_active_timeline(rq)->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR);
*cs++ = MI_FLUSH;
@@ -1184,167 +1177,9 @@ i915_emit_bb_start(struct i915_request *rq,
return 0;
}
-int intel_ring_pin(struct intel_ring *ring)
-{
- struct i915_vma *vma = ring->vma;
- unsigned int flags;
- void *addr;
- int ret;
-
- if (atomic_fetch_inc(&ring->pin_count))
- return 0;
-
- flags = PIN_GLOBAL;
-
- /* Ring wraparound at offset 0 sometimes hangs. No idea why. */
- flags |= PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma);
-
- if (vma->obj->stolen)
- flags |= PIN_MAPPABLE;
- else
- flags |= PIN_HIGH;
-
- ret = i915_vma_pin(vma, 0, 0, flags);
- if (unlikely(ret))
- goto err_unpin;
-
- if (i915_vma_is_map_and_fenceable(vma))
- addr = (void __force *)i915_vma_pin_iomap(vma);
- else
- addr = i915_gem_object_pin_map(vma->obj,
- i915_coherent_map_type(vma->vm->i915));
- if (IS_ERR(addr)) {
- ret = PTR_ERR(addr);
- goto err_ring;
- }
-
- i915_vma_make_unshrinkable(vma);
-
- GEM_BUG_ON(ring->vaddr);
- ring->vaddr = addr;
-
- return 0;
-
-err_ring:
- i915_vma_unpin(vma);
-err_unpin:
- atomic_dec(&ring->pin_count);
- return ret;
-}
-
-void intel_ring_reset(struct intel_ring *ring, u32 tail)
-{
- tail = intel_ring_wrap(ring, tail);
- ring->tail = tail;
- ring->head = tail;
- ring->emit = tail;
- intel_ring_update_space(ring);
-}
-
-void intel_ring_unpin(struct intel_ring *ring)
-{
- struct i915_vma *vma = ring->vma;
-
- if (!atomic_dec_and_test(&ring->pin_count))
- return;
-
- /* Discard any unused bytes beyond that submitted to hw. */
- intel_ring_reset(ring, ring->emit);
-
- i915_vma_unset_ggtt_write(vma);
- if (i915_vma_is_map_and_fenceable(vma))
- i915_vma_unpin_iomap(vma);
- else
- i915_gem_object_unpin_map(vma->obj);
-
- GEM_BUG_ON(!ring->vaddr);
- ring->vaddr = NULL;
-
- i915_vma_unpin(vma);
- i915_vma_make_purgeable(vma);
-}
-
-static struct i915_vma *create_ring_vma(struct i915_ggtt *ggtt, int size)
-{
- struct i915_address_space *vm = &ggtt->vm;
- struct drm_i915_private *i915 = vm->i915;
- struct drm_i915_gem_object *obj;
- struct i915_vma *vma;
-
- obj = i915_gem_object_create_stolen(i915, size);
- if (!obj)
- obj = i915_gem_object_create_internal(i915, size);
- if (IS_ERR(obj))
- return ERR_CAST(obj);
-
- /*
- * Mark ring buffers as read-only from GPU side (so no stray overwrites)
- * if supported by the platform's GGTT.
- */
- if (vm->has_read_only)
- i915_gem_object_set_readonly(obj);
-
- vma = i915_vma_instance(obj, vm, NULL);
- if (IS_ERR(vma))
- goto err;
-
- return vma;
-
-err:
- i915_gem_object_put(obj);
- return vma;
-}
-
-struct intel_ring *
-intel_engine_create_ring(struct intel_engine_cs *engine, int size)
-{
- struct drm_i915_private *i915 = engine->i915;
- struct intel_ring *ring;
- struct i915_vma *vma;
-
- GEM_BUG_ON(!is_power_of_2(size));
- GEM_BUG_ON(RING_CTL_SIZE(size) & ~RING_NR_PAGES);
-
- ring = kzalloc(sizeof(*ring), GFP_KERNEL);
- if (!ring)
- return ERR_PTR(-ENOMEM);
-
- kref_init(&ring->ref);
-
- ring->size = size;
- /* Workaround an erratum on the i830 which causes a hang if
- * the TAIL pointer points to within the last 2 cachelines
- * of the buffer.
- */
- ring->effective_size = size;
- if (IS_I830(i915) || IS_I845G(i915))
- ring->effective_size -= 2 * CACHELINE_BYTES;
-
- intel_ring_update_space(ring);
-
- vma = create_ring_vma(engine->gt->ggtt, size);
- if (IS_ERR(vma)) {
- kfree(ring);
- return ERR_CAST(vma);
- }
- ring->vma = vma;
-
- return ring;
-}
-
-void intel_ring_free(struct kref *ref)
-{
- struct intel_ring *ring = container_of(ref, typeof(*ring), ref);
-
- i915_vma_close(ring->vma);
- i915_vma_put(ring->vma);
-
- kfree(ring);
-}
-
static void __ring_context_fini(struct intel_context *ce)
{
- i915_gem_object_put(ce->state->obj);
+ i915_vma_put(ce->state);
}
static void ring_context_destroy(struct kref *ref)
@@ -1609,7 +1444,7 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
struct intel_engine_cs *signaller;
*cs++ = MI_LOAD_REGISTER_IMM(num_engines);
- for_each_engine(signaller, i915, id) {
+ for_each_engine(signaller, engine->gt, id) {
if (signaller == engine)
continue;
@@ -1663,7 +1498,7 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
i915_reg_t last_reg = {}; /* keep gcc quiet */
*cs++ = MI_LOAD_REGISTER_IMM(num_engines);
- for_each_engine(signaller, i915, id) {
+ for_each_engine(signaller, engine->gt, id) {
if (signaller == engine)
continue;
@@ -1676,7 +1511,7 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
/* Insert a delay before the next switch! */
*cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
*cs++ = i915_mmio_reg_offset(last_reg);
- *cs++ = intel_gt_scratch_offset(rq->engine->gt,
+ *cs++ = intel_gt_scratch_offset(engine->gt,
INTEL_GT_SCRATCH_FIELD_DEFAULT);
*cs++ = MI_NOOP;
}
@@ -1741,46 +1576,22 @@ static int remap_l3(struct i915_request *rq)
static int switch_context(struct i915_request *rq)
{
- struct intel_engine_cs *engine = rq->engine;
- struct i915_address_space *vm = vm_alias(rq->hw_context);
- unsigned int unwind_mm = 0;
- u32 hw_flags = 0;
+ struct intel_context *ce = rq->hw_context;
+ struct i915_address_space *vm = vm_alias(ce);
int ret;
GEM_BUG_ON(HAS_EXECLISTS(rq->i915));
if (vm) {
- struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
- int loops;
-
- /*
- * Baytail takes a little more convincing that it really needs
- * to reload the PD between contexts. It is not just a little
- * longer, as adding more stalls after the load_pd_dir (i.e.
- * adding a long loop around flush_pd_dir) is not as effective
- * as reloading the PD umpteen times. 32 is derived from
- * experimentation (gem_exec_parallel/fds) and has no good
- * explanation.
- */
- loops = 1;
- if (engine->id == BCS0 && IS_VALLEYVIEW(engine->i915))
- loops = 32;
-
- do {
- ret = load_pd_dir(rq, ppgtt);
- if (ret)
- goto err;
- } while (--loops);
-
- if (ppgtt->pd_dirty_engines & engine->mask) {
- unwind_mm = engine->mask;
- ppgtt->pd_dirty_engines &= ~unwind_mm;
- hw_flags = MI_FORCE_RESTORE;
- }
+ ret = load_pd_dir(rq, i915_vm_to_ppgtt(vm));
+ if (ret)
+ return ret;
}
- if (rq->hw_context->state) {
- GEM_BUG_ON(engine->id != RCS0);
+ if (ce->state) {
+ u32 hw_flags;
+
+ GEM_BUG_ON(rq->engine->id != RCS0);
/*
* The kernel context(s) is treated as pure scratch and is not
@@ -1789,22 +1600,25 @@ static int switch_context(struct i915_request *rq)
* as nothing actually executes using the kernel context; it
* is purely used for flushing user contexts.
*/
+ hw_flags = 0;
if (i915_gem_context_is_kernel(rq->gem_context))
hw_flags = MI_RESTORE_INHIBIT;
ret = mi_set_context(rq, hw_flags);
if (ret)
- goto err_mm;
+ return ret;
}
if (vm) {
+ struct intel_engine_cs *engine = rq->engine;
+
ret = engine->emit_flush(rq, EMIT_INVALIDATE);
if (ret)
- goto err_mm;
+ return ret;
ret = flush_pd_dir(rq);
if (ret)
- goto err_mm;
+ return ret;
/*
* Not only do we need a full barrier (post-sync write) after
@@ -1816,24 +1630,18 @@ static int switch_context(struct i915_request *rq)
*/
ret = engine->emit_flush(rq, EMIT_INVALIDATE);
if (ret)
- goto err_mm;
+ return ret;
ret = engine->emit_flush(rq, EMIT_FLUSH);
if (ret)
- goto err_mm;
+ return ret;
}
ret = remap_l3(rq);
if (ret)
- goto err_mm;
+ return ret;
return 0;
-
-err_mm:
- if (unwind_mm)
- i915_vm_to_ppgtt(vm)->pd_dirty_engines |= unwind_mm;
-err:
- return ret;
}
static int ring_request_alloc(struct i915_request *request)
@@ -1841,7 +1649,7 @@ static int ring_request_alloc(struct i915_request *request)
int ret;
GEM_BUG_ON(!intel_context_is_pinned(request->hw_context));
- GEM_BUG_ON(request->timeline->has_initial_breadcrumb);
+ GEM_BUG_ON(i915_request_timeline(request)->has_initial_breadcrumb);
/*
* Flush enough space to reduce the likelihood of waiting after
@@ -1863,146 +1671,6 @@ static int ring_request_alloc(struct i915_request *request)
return 0;
}
-static noinline int
-wait_for_space(struct intel_ring *ring,
- struct intel_timeline *tl,
- unsigned int bytes)
-{
- struct i915_request *target;
- long timeout;
-
- if (intel_ring_update_space(ring) >= bytes)
- return 0;
-
- GEM_BUG_ON(list_empty(&tl->requests));
- list_for_each_entry(target, &tl->requests, link) {
- if (target->ring != ring)
- continue;
-
- /* Would completion of this request free enough space? */
- if (bytes <= __intel_ring_space(target->postfix,
- ring->emit, ring->size))
- break;
- }
-
- if (GEM_WARN_ON(&target->link == &tl->requests))
- return -ENOSPC;
-
- timeout = i915_request_wait(target,
- I915_WAIT_INTERRUPTIBLE,
- MAX_SCHEDULE_TIMEOUT);
- if (timeout < 0)
- return timeout;
-
- i915_request_retire_upto(target);
-
- intel_ring_update_space(ring);
- GEM_BUG_ON(ring->space < bytes);
- return 0;
-}
-
-u32 *intel_ring_begin(struct i915_request *rq, unsigned int num_dwords)
-{
- struct intel_ring *ring = rq->ring;
- const unsigned int remain_usable = ring->effective_size - ring->emit;
- const unsigned int bytes = num_dwords * sizeof(u32);
- unsigned int need_wrap = 0;
- unsigned int total_bytes;
- u32 *cs;
-
- /* Packets must be qword aligned. */
- GEM_BUG_ON(num_dwords & 1);
-
- total_bytes = bytes + rq->reserved_space;
- GEM_BUG_ON(total_bytes > ring->effective_size);
-
- if (unlikely(total_bytes > remain_usable)) {
- const int remain_actual = ring->size - ring->emit;
-
- if (bytes > remain_usable) {
- /*
- * Not enough space for the basic request. So need to
- * flush out the remainder and then wait for
- * base + reserved.
- */
- total_bytes += remain_actual;
- need_wrap = remain_actual | 1;
- } else {
- /*
- * The base request will fit but the reserved space
- * falls off the end. So we don't need an immediate
- * wrap and only need to effectively wait for the
- * reserved size from the start of ringbuffer.
- */
- total_bytes = rq->reserved_space + remain_actual;
- }
- }
-
- if (unlikely(total_bytes > ring->space)) {
- int ret;
-
- /*
- * Space is reserved in the ringbuffer for finalising the
- * request, as that cannot be allowed to fail. During request
- * finalisation, reserved_space is set to 0 to stop the
- * overallocation and the assumption is that then we never need
- * to wait (which has the risk of failing with EINTR).
- *
- * See also i915_request_alloc() and i915_request_add().
- */
- GEM_BUG_ON(!rq->reserved_space);
-
- ret = wait_for_space(ring, rq->timeline, total_bytes);
- if (unlikely(ret))
- return ERR_PTR(ret);
- }
-
- if (unlikely(need_wrap)) {
- need_wrap &= ~1;
- GEM_BUG_ON(need_wrap > ring->space);
- GEM_BUG_ON(ring->emit + need_wrap > ring->size);
- GEM_BUG_ON(!IS_ALIGNED(need_wrap, sizeof(u64)));
-
- /* Fill the tail with MI_NOOP */
- memset64(ring->vaddr + ring->emit, 0, need_wrap / sizeof(u64));
- ring->space -= need_wrap;
- ring->emit = 0;
- }
-
- GEM_BUG_ON(ring->emit > ring->size - bytes);
- GEM_BUG_ON(ring->space < bytes);
- cs = ring->vaddr + ring->emit;
- GEM_DEBUG_EXEC(memset32(cs, POISON_INUSE, bytes / sizeof(*cs)));
- ring->emit += bytes;
- ring->space -= bytes;
-
- return cs;
-}
-
-/* Align the ring tail to a cacheline boundary */
-int intel_ring_cacheline_align(struct i915_request *rq)
-{
- int num_dwords;
- void *cs;
-
- num_dwords = (rq->ring->emit & (CACHELINE_BYTES - 1)) / sizeof(u32);
- if (num_dwords == 0)
- return 0;
-
- num_dwords = CACHELINE_DWORDS - num_dwords;
- GEM_BUG_ON(num_dwords & 1);
-
- cs = intel_ring_begin(rq, num_dwords);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- memset64(cs, (u64)MI_NOOP << 32 | MI_NOOP, num_dwords / 2);
- intel_ring_advance(rq, cs);
-
- GEM_BUG_ON(rq->ring->emit & (CACHELINE_BYTES - 1));
- return 0;
-}
-
static void gen6_bsd_submit_request(struct i915_request *request)
{
struct intel_uncore *uncore = request->engine->uncore;
diff --git a/drivers/gpu/drm/i915/gt/intel_ring_types.h b/drivers/gpu/drm/i915/gt/intel_ring_types.h
new file mode 100644
index 000000000000..d9f17f38e0cc
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_ring_types.h
@@ -0,0 +1,51 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef INTEL_RING_TYPES_H
+#define INTEL_RING_TYPES_H
+
+#include <linux/atomic.h>
+#include <linux/kref.h>
+#include <linux/types.h>
+
+/*
+ * Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill,
+ * but keeps the logic simple. Indeed, the whole purpose of this macro is just
+ * to give some inclination as to some of the magic values used in the various
+ * workarounds!
+ */
+#define CACHELINE_BYTES 64
+#define CACHELINE_DWORDS (CACHELINE_BYTES / sizeof(u32))
+
+struct i915_vma;
+
+struct intel_ring {
+ struct kref ref;
+ struct i915_vma *vma;
+ void *vaddr;
+
+ /*
+ * As we have two types of rings, one global to the engine used
+ * by ringbuffer submission and those that are exclusive to a
+ * context used by execlists, we have to play safe and allow
+ * atomic updates to the pin_count. However, the actual pinning
+ * of the context is either done during initialisation for
+ * ringbuffer submission or serialised as part of the context
+ * pinning for execlists, and so we do not need a mutex ourselves
+ * to serialise intel_ring_pin/intel_ring_unpin.
+ */
+ atomic_t pin_count;
+
+ u32 head;
+ u32 tail;
+ u32 emit;
+
+ u32 space;
+ u32 size;
+ u32 effective_size;
+};
+
+#endif /* INTEL_RING_TYPES_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c
new file mode 100644
index 000000000000..20d6ee148afc
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_rps.c
@@ -0,0 +1,1872 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "intel_gt.h"
+#include "intel_gt_irq.h"
+#include "intel_gt_pm_irq.h"
+#include "intel_rps.h"
+#include "intel_sideband.h"
+#include "../../../platform/x86/intel_ips.h"
+
+/*
+ * Lock protecting IPS related data structures
+ */
+static DEFINE_SPINLOCK(mchdev_lock);
+
+static struct intel_gt *rps_to_gt(struct intel_rps *rps)
+{
+ return container_of(rps, struct intel_gt, rps);
+}
+
+static struct drm_i915_private *rps_to_i915(struct intel_rps *rps)
+{
+ return rps_to_gt(rps)->i915;
+}
+
+static struct intel_uncore *rps_to_uncore(struct intel_rps *rps)
+{
+ return rps_to_gt(rps)->uncore;
+}
+
+static u32 rps_pm_sanitize_mask(struct intel_rps *rps, u32 mask)
+{
+ return mask & ~rps->pm_intrmsk_mbz;
+}
+
+static u32 rps_pm_mask(struct intel_rps *rps, u8 val)
+{
+ u32 mask = 0;
+
+ /* We use UP_EI_EXPIRED interrupts for both up/down in manual mode */
+ if (val > rps->min_freq_softlimit)
+ mask |= (GEN6_PM_RP_UP_EI_EXPIRED |
+ GEN6_PM_RP_DOWN_THRESHOLD |
+ GEN6_PM_RP_DOWN_TIMEOUT);
+
+ if (val < rps->max_freq_softlimit)
+ mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD;
+
+ mask &= rps->pm_events;
+
+ return rps_pm_sanitize_mask(rps, ~mask);
+}
+
+static void rps_reset_ei(struct intel_rps *rps)
+{
+ memset(&rps->ei, 0, sizeof(rps->ei));
+}
+
+static void rps_enable_interrupts(struct intel_rps *rps)
+{
+ struct intel_gt *gt = rps_to_gt(rps);
+
+ rps_reset_ei(rps);
+
+ if (IS_VALLEYVIEW(gt->i915))
+ /* WaGsvRC0ResidencyMethod:vlv */
+ rps->pm_events = GEN6_PM_RP_UP_EI_EXPIRED;
+ else
+ rps->pm_events = (GEN6_PM_RP_UP_THRESHOLD |
+ GEN6_PM_RP_DOWN_THRESHOLD |
+ GEN6_PM_RP_DOWN_TIMEOUT);
+
+ spin_lock_irq(&gt->irq_lock);
+ gen6_gt_pm_enable_irq(gt, rps->pm_events);
+ spin_unlock_irq(&gt->irq_lock);
+
+ intel_uncore_write(gt->uncore, GEN6_PMINTRMSK,
+ rps_pm_mask(rps, rps->cur_freq));
+}
+
+static void gen6_rps_reset_interrupts(struct intel_rps *rps)
+{
+ gen6_gt_pm_reset_iir(rps_to_gt(rps), GEN6_PM_RPS_EVENTS);
+}
+
+static void gen11_rps_reset_interrupts(struct intel_rps *rps)
+{
+ while (gen11_gt_reset_one_iir(rps_to_gt(rps), 0, GEN11_GTPM))
+ ;
+}
+
+static void rps_reset_interrupts(struct intel_rps *rps)
+{
+ struct intel_gt *gt = rps_to_gt(rps);
+
+ spin_lock_irq(&gt->irq_lock);
+ if (INTEL_GEN(gt->i915) >= 11)
+ gen11_rps_reset_interrupts(rps);
+ else
+ gen6_rps_reset_interrupts(rps);
+
+ rps->pm_iir = 0;
+ spin_unlock_irq(&gt->irq_lock);
+}
+
+static void rps_disable_interrupts(struct intel_rps *rps)
+{
+ struct intel_gt *gt = rps_to_gt(rps);
+
+ rps->pm_events = 0;
+
+ intel_uncore_write(gt->uncore, GEN6_PMINTRMSK,
+ rps_pm_sanitize_mask(rps, ~0u));
+
+ spin_lock_irq(&gt->irq_lock);
+ gen6_gt_pm_disable_irq(gt, GEN6_PM_RPS_EVENTS);
+ spin_unlock_irq(&gt->irq_lock);
+
+ intel_synchronize_irq(gt->i915);
+
+ /*
+ * Now that we will not be generating any more work, flush any
+ * outstanding tasks. As we are called on the RPS idle path,
+ * we will reset the GPU to minimum frequencies, so the current
+ * state of the worker can be discarded.
+ */
+ cancel_work_sync(&rps->work);
+
+ rps_reset_interrupts(rps);
+}
+
+static const struct cparams {
+ u16 i;
+ u16 t;
+ u16 m;
+ u16 c;
+} cparams[] = {
+ { 1, 1333, 301, 28664 },
+ { 1, 1066, 294, 24460 },
+ { 1, 800, 294, 25192 },
+ { 0, 1333, 276, 27605 },
+ { 0, 1066, 276, 27605 },
+ { 0, 800, 231, 23784 },
+};
+
+static void gen5_rps_init(struct intel_rps *rps)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ struct intel_uncore *uncore = rps_to_uncore(rps);
+ u8 fmax, fmin, fstart;
+ u32 rgvmodectl;
+ int c_m, i;
+
+ if (i915->fsb_freq <= 3200)
+ c_m = 0;
+ else if (i915->fsb_freq <= 4800)
+ c_m = 1;
+ else
+ c_m = 2;
+
+ for (i = 0; i < ARRAY_SIZE(cparams); i++) {
+ if (cparams[i].i == c_m && cparams[i].t == i915->mem_freq) {
+ rps->ips.m = cparams[i].m;
+ rps->ips.c = cparams[i].c;
+ break;
+ }
+ }
+
+ rgvmodectl = intel_uncore_read(uncore, MEMMODECTL);
+
+ /* Set up min, max, and cur for interrupt handling */
+ fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
+ fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
+ fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
+ MEMMODE_FSTART_SHIFT;
+ DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
+ fmax, fmin, fstart);
+
+ rps->min_freq = fmax;
+ rps->max_freq = fmin;
+
+ rps->idle_freq = rps->min_freq;
+ rps->cur_freq = rps->idle_freq;
+}
+
+static unsigned long
+__ips_chipset_val(struct intel_ips *ips)
+{
+ struct intel_uncore *uncore =
+ rps_to_uncore(container_of(ips, struct intel_rps, ips));
+ unsigned long now = jiffies_to_msecs(jiffies), dt;
+ unsigned long result;
+ u64 total, delta;
+
+ lockdep_assert_held(&mchdev_lock);
+
+ /*
+ * Prevent division-by-zero if we are asking too fast.
+ * Also, we don't get interesting results if we are polling
+ * faster than once in 10ms, so just return the saved value
+ * in such cases.
+ */
+ dt = now - ips->last_time1;
+ if (dt <= 10)
+ return ips->chipset_power;
+
+ /* FIXME: handle per-counter overflow */
+ total = intel_uncore_read(uncore, DMIEC);
+ total += intel_uncore_read(uncore, DDREC);
+ total += intel_uncore_read(uncore, CSIEC);
+
+ delta = total - ips->last_count1;
+
+ result = div_u64(div_u64(ips->m * delta, dt) + ips->c, 10);
+
+ ips->last_count1 = total;
+ ips->last_time1 = now;
+
+ ips->chipset_power = result;
+
+ return result;
+}
+
+static unsigned long ips_mch_val(struct intel_uncore *uncore)
+{
+ unsigned int m, x, b;
+ u32 tsfs;
+
+ tsfs = intel_uncore_read(uncore, TSFS);
+ x = intel_uncore_read8(uncore, TR1);
+
+ b = tsfs & TSFS_INTR_MASK;
+ m = (tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT;
+
+ return m * x / 127 - b;
+}
+
+static int _pxvid_to_vd(u8 pxvid)
+{
+ if (pxvid == 0)
+ return 0;
+
+ if (pxvid >= 8 && pxvid < 31)
+ pxvid = 31;
+
+ return (pxvid + 2) * 125;
+}
+
+static u32 pvid_to_extvid(struct drm_i915_private *i915, u8 pxvid)
+{
+ const int vd = _pxvid_to_vd(pxvid);
+
+ if (INTEL_INFO(i915)->is_mobile)
+ return max(vd - 1125, 0);
+
+ return vd;
+}
+
+static void __gen5_ips_update(struct intel_ips *ips)
+{
+ struct intel_uncore *uncore =
+ rps_to_uncore(container_of(ips, struct intel_rps, ips));
+ u64 now, delta, dt;
+ u32 count;
+
+ lockdep_assert_held(&mchdev_lock);
+
+ now = ktime_get_raw_ns();
+ dt = now - ips->last_time2;
+ do_div(dt, NSEC_PER_MSEC);
+
+ /* Don't divide by 0 */
+ if (dt <= 10)
+ return;
+
+ count = intel_uncore_read(uncore, GFXEC);
+ delta = count - ips->last_count2;
+
+ ips->last_count2 = count;
+ ips->last_time2 = now;
+
+ /* More magic constants... */
+ ips->gfx_power = div_u64(delta * 1181, dt * 10);
+}
+
+static void gen5_rps_update(struct intel_rps *rps)
+{
+ spin_lock_irq(&mchdev_lock);
+ __gen5_ips_update(&rps->ips);
+ spin_unlock_irq(&mchdev_lock);
+}
+
+static bool gen5_rps_set(struct intel_rps *rps, u8 val)
+{
+ struct intel_uncore *uncore = rps_to_uncore(rps);
+ u16 rgvswctl;
+
+ lockdep_assert_held(&mchdev_lock);
+
+ rgvswctl = intel_uncore_read16(uncore, MEMSWCTL);
+ if (rgvswctl & MEMCTL_CMD_STS) {
+ DRM_DEBUG("gpu busy, RCS change rejected\n");
+ return false; /* still busy with another command */
+ }
+
+ /* Invert the frequency bin into an ips delay */
+ val = rps->max_freq - val;
+ val = rps->min_freq + val;
+
+ rgvswctl =
+ (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
+ (val << MEMCTL_FREQ_SHIFT) |
+ MEMCTL_SFCAVM;
+ intel_uncore_write16(uncore, MEMSWCTL, rgvswctl);
+ intel_uncore_posting_read16(uncore, MEMSWCTL);
+
+ rgvswctl |= MEMCTL_CMD_STS;
+ intel_uncore_write16(uncore, MEMSWCTL, rgvswctl);
+
+ return true;
+}
+
+static unsigned long intel_pxfreq(u32 vidfreq)
+{
+ int div = (vidfreq & 0x3f0000) >> 16;
+ int post = (vidfreq & 0x3000) >> 12;
+ int pre = (vidfreq & 0x7);
+
+ if (!pre)
+ return 0;
+
+ return div * 133333 / (pre << post);
+}
+
+static unsigned int init_emon(struct intel_uncore *uncore)
+{
+ u8 pxw[16];
+ int i;
+
+ /* Disable to program */
+ intel_uncore_write(uncore, ECR, 0);
+ intel_uncore_posting_read(uncore, ECR);
+
+ /* Program energy weights for various events */
+ intel_uncore_write(uncore, SDEW, 0x15040d00);
+ intel_uncore_write(uncore, CSIEW0, 0x007f0000);
+ intel_uncore_write(uncore, CSIEW1, 0x1e220004);
+ intel_uncore_write(uncore, CSIEW2, 0x04000004);
+
+ for (i = 0; i < 5; i++)
+ intel_uncore_write(uncore, PEW(i), 0);
+ for (i = 0; i < 3; i++)
+ intel_uncore_write(uncore, DEW(i), 0);
+
+ /* Program P-state weights to account for frequency power adjustment */
+ for (i = 0; i < 16; i++) {
+ u32 pxvidfreq = intel_uncore_read(uncore, PXVFREQ(i));
+ unsigned int freq = intel_pxfreq(pxvidfreq);
+ unsigned int vid =
+ (pxvidfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT;
+ unsigned int val;
+
+ val = vid * vid * freq / 1000 * 255;
+ val /= 127 * 127 * 900;
+
+ pxw[i] = val;
+ }
+ /* Render standby states get 0 weight */
+ pxw[14] = 0;
+ pxw[15] = 0;
+
+ for (i = 0; i < 4; i++) {
+ intel_uncore_write(uncore, PXW(i),
+ pxw[i * 4 + 0] << 24 |
+ pxw[i * 4 + 1] << 16 |
+ pxw[i * 4 + 2] << 8 |
+ pxw[i * 4 + 3] << 0);
+ }
+
+ /* Adjust magic regs to magic values (more experimental results) */
+ intel_uncore_write(uncore, OGW0, 0);
+ intel_uncore_write(uncore, OGW1, 0);
+ intel_uncore_write(uncore, EG0, 0x00007f00);
+ intel_uncore_write(uncore, EG1, 0x0000000e);
+ intel_uncore_write(uncore, EG2, 0x000e0000);
+ intel_uncore_write(uncore, EG3, 0x68000300);
+ intel_uncore_write(uncore, EG4, 0x42000000);
+ intel_uncore_write(uncore, EG5, 0x00140031);
+ intel_uncore_write(uncore, EG6, 0);
+ intel_uncore_write(uncore, EG7, 0);
+
+ for (i = 0; i < 8; i++)
+ intel_uncore_write(uncore, PXWL(i), 0);
+
+ /* Enable PMON + select events */
+ intel_uncore_write(uncore, ECR, 0x80000019);
+
+ return intel_uncore_read(uncore, LCFUSE02) & LCFUSE_HIV_MASK;
+}
+
+static bool gen5_rps_enable(struct intel_rps *rps)
+{
+ struct intel_uncore *uncore = rps_to_uncore(rps);
+ u8 fstart, vstart;
+ u32 rgvmodectl;
+
+ spin_lock_irq(&mchdev_lock);
+
+ rgvmodectl = intel_uncore_read(uncore, MEMMODECTL);
+
+ /* Enable temp reporting */
+ intel_uncore_write16(uncore, PMMISC,
+ intel_uncore_read16(uncore, PMMISC) | MCPPCE_EN);
+ intel_uncore_write16(uncore, TSC1,
+ intel_uncore_read16(uncore, TSC1) | TSE);
+
+ /* 100ms RC evaluation intervals */
+ intel_uncore_write(uncore, RCUPEI, 100000);
+ intel_uncore_write(uncore, RCDNEI, 100000);
+
+ /* Set max/min thresholds to 90ms and 80ms respectively */
+ intel_uncore_write(uncore, RCBMAXAVG, 90000);
+ intel_uncore_write(uncore, RCBMINAVG, 80000);
+
+ intel_uncore_write(uncore, MEMIHYST, 1);
+
+ /* Set up min, max, and cur for interrupt handling */
+ fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
+ MEMMODE_FSTART_SHIFT;
+
+ vstart = (intel_uncore_read(uncore, PXVFREQ(fstart)) &
+ PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT;
+
+ intel_uncore_write(uncore,
+ MEMINTREN,
+ MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
+
+ intel_uncore_write(uncore, VIDSTART, vstart);
+ intel_uncore_posting_read(uncore, VIDSTART);
+
+ rgvmodectl |= MEMMODE_SWMODE_EN;
+ intel_uncore_write(uncore, MEMMODECTL, rgvmodectl);
+
+ if (wait_for_atomic((intel_uncore_read(uncore, MEMSWCTL) &
+ MEMCTL_CMD_STS) == 0, 10))
+ DRM_ERROR("stuck trying to change perf mode\n");
+ mdelay(1);
+
+ gen5_rps_set(rps, rps->cur_freq);
+
+ rps->ips.last_count1 = intel_uncore_read(uncore, DMIEC);
+ rps->ips.last_count1 += intel_uncore_read(uncore, DDREC);
+ rps->ips.last_count1 += intel_uncore_read(uncore, CSIEC);
+ rps->ips.last_time1 = jiffies_to_msecs(jiffies);
+
+ rps->ips.last_count2 = intel_uncore_read(uncore, GFXEC);
+ rps->ips.last_time2 = ktime_get_raw_ns();
+
+ spin_unlock_irq(&mchdev_lock);
+
+ rps->ips.corr = init_emon(uncore);
+
+ return true;
+}
+
+static void gen5_rps_disable(struct intel_rps *rps)
+{
+ struct intel_uncore *uncore = rps_to_uncore(rps);
+ u16 rgvswctl;
+
+ spin_lock_irq(&mchdev_lock);
+
+ rgvswctl = intel_uncore_read16(uncore, MEMSWCTL);
+
+ /* Ack interrupts, disable EFC interrupt */
+ intel_uncore_write(uncore, MEMINTREN,
+ intel_uncore_read(uncore, MEMINTREN) &
+ ~MEMINT_EVAL_CHG_EN);
+ intel_uncore_write(uncore, MEMINTRSTS, MEMINT_EVAL_CHG);
+ intel_uncore_write(uncore, DEIER,
+ intel_uncore_read(uncore, DEIER) & ~DE_PCU_EVENT);
+ intel_uncore_write(uncore, DEIIR, DE_PCU_EVENT);
+ intel_uncore_write(uncore, DEIMR,
+ intel_uncore_read(uncore, DEIMR) | DE_PCU_EVENT);
+
+ /* Go back to the starting frequency */
+ gen5_rps_set(rps, rps->idle_freq);
+ mdelay(1);
+ rgvswctl |= MEMCTL_CMD_STS;
+ intel_uncore_write(uncore, MEMSWCTL, rgvswctl);
+ mdelay(1);
+
+ spin_unlock_irq(&mchdev_lock);
+}
+
+static u32 rps_limits(struct intel_rps *rps, u8 val)
+{
+ u32 limits;
+
+ /*
+ * Only set the down limit when we've reached the lowest level to avoid
+ * getting more interrupts, otherwise leave this clear. This prevents a
+ * race in the hw when coming out of rc6: There's a tiny window where
+ * the hw runs at the minimal clock before selecting the desired
+ * frequency, if the down threshold expires in that window we will not
+ * receive a down interrupt.
+ */
+ if (INTEL_GEN(rps_to_i915(rps)) >= 9) {
+ limits = rps->max_freq_softlimit << 23;
+ if (val <= rps->min_freq_softlimit)
+ limits |= rps->min_freq_softlimit << 14;
+ } else {
+ limits = rps->max_freq_softlimit << 24;
+ if (val <= rps->min_freq_softlimit)
+ limits |= rps->min_freq_softlimit << 16;
+ }
+
+ return limits;
+}
+
+static void rps_set_power(struct intel_rps *rps, int new_power)
+{
+ struct intel_uncore *uncore = rps_to_uncore(rps);
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ u32 threshold_up = 0, threshold_down = 0; /* in % */
+ u32 ei_up = 0, ei_down = 0;
+
+ lockdep_assert_held(&rps->power.mutex);
+
+ if (new_power == rps->power.mode)
+ return;
+
+ /* Note the units here are not exactly 1us, but 1280ns. */
+ switch (new_power) {
+ case LOW_POWER:
+ /* Upclock if more than 95% busy over 16ms */
+ ei_up = 16000;
+ threshold_up = 95;
+
+ /* Downclock if less than 85% busy over 32ms */
+ ei_down = 32000;
+ threshold_down = 85;
+ break;
+
+ case BETWEEN:
+ /* Upclock if more than 90% busy over 13ms */
+ ei_up = 13000;
+ threshold_up = 90;
+
+ /* Downclock if less than 75% busy over 32ms */
+ ei_down = 32000;
+ threshold_down = 75;
+ break;
+
+ case HIGH_POWER:
+ /* Upclock if more than 85% busy over 10ms */
+ ei_up = 10000;
+ threshold_up = 85;
+
+ /* Downclock if less than 60% busy over 32ms */
+ ei_down = 32000;
+ threshold_down = 60;
+ break;
+ }
+
+ /* When byt can survive without system hang with dynamic
+ * sw freq adjustments, this restriction can be lifted.
+ */
+ if (IS_VALLEYVIEW(i915))
+ goto skip_hw_write;
+
+ intel_uncore_write(uncore, GEN6_RP_UP_EI,
+ GT_INTERVAL_FROM_US(i915, ei_up));
+ intel_uncore_write(uncore, GEN6_RP_UP_THRESHOLD,
+ GT_INTERVAL_FROM_US(i915,
+ ei_up * threshold_up / 100));
+
+ intel_uncore_write(uncore, GEN6_RP_DOWN_EI,
+ GT_INTERVAL_FROM_US(i915, ei_down));
+ intel_uncore_write(uncore, GEN6_RP_DOWN_THRESHOLD,
+ GT_INTERVAL_FROM_US(i915,
+ ei_down * threshold_down / 100));
+
+ intel_uncore_write(uncore, GEN6_RP_CONTROL,
+ (INTEL_GEN(i915) > 9 ? 0 : GEN6_RP_MEDIA_TURBO) |
+ GEN6_RP_MEDIA_HW_NORMAL_MODE |
+ GEN6_RP_MEDIA_IS_GFX |
+ GEN6_RP_ENABLE |
+ GEN6_RP_UP_BUSY_AVG |
+ GEN6_RP_DOWN_IDLE_AVG);
+
+skip_hw_write:
+ rps->power.mode = new_power;
+ rps->power.up_threshold = threshold_up;
+ rps->power.down_threshold = threshold_down;
+}
+
+static void gen6_rps_set_thresholds(struct intel_rps *rps, u8 val)
+{
+ int new_power;
+
+ new_power = rps->power.mode;
+ switch (rps->power.mode) {
+ case LOW_POWER:
+ if (val > rps->efficient_freq + 1 &&
+ val > rps->cur_freq)
+ new_power = BETWEEN;
+ break;
+
+ case BETWEEN:
+ if (val <= rps->efficient_freq &&
+ val < rps->cur_freq)
+ new_power = LOW_POWER;
+ else if (val >= rps->rp0_freq &&
+ val > rps->cur_freq)
+ new_power = HIGH_POWER;
+ break;
+
+ case HIGH_POWER:
+ if (val < (rps->rp1_freq + rps->rp0_freq) >> 1 &&
+ val < rps->cur_freq)
+ new_power = BETWEEN;
+ break;
+ }
+ /* Max/min bins are special */
+ if (val <= rps->min_freq_softlimit)
+ new_power = LOW_POWER;
+ if (val >= rps->max_freq_softlimit)
+ new_power = HIGH_POWER;
+
+ mutex_lock(&rps->power.mutex);
+ if (rps->power.interactive)
+ new_power = HIGH_POWER;
+ rps_set_power(rps, new_power);
+ mutex_unlock(&rps->power.mutex);
+}
+
+void intel_rps_mark_interactive(struct intel_rps *rps, bool interactive)
+{
+ mutex_lock(&rps->power.mutex);
+ if (interactive) {
+ if (!rps->power.interactive++ && rps->active)
+ rps_set_power(rps, HIGH_POWER);
+ } else {
+ GEM_BUG_ON(!rps->power.interactive);
+ rps->power.interactive--;
+ }
+ mutex_unlock(&rps->power.mutex);
+}
+
+static int gen6_rps_set(struct intel_rps *rps, u8 val)
+{
+ struct intel_uncore *uncore = rps_to_uncore(rps);
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ u32 swreq;
+
+ if (INTEL_GEN(i915) >= 9)
+ swreq = GEN9_FREQUENCY(val);
+ else if (IS_HASWELL(i915) || IS_BROADWELL(i915))
+ swreq = HSW_FREQUENCY(val);
+ else
+ swreq = (GEN6_FREQUENCY(val) |
+ GEN6_OFFSET(0) |
+ GEN6_AGGRESSIVE_TURBO);
+ intel_uncore_write(uncore, GEN6_RPNSWREQ, swreq);
+
+ return 0;
+}
+
+static int vlv_rps_set(struct intel_rps *rps, u8 val)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ int err;
+
+ vlv_punit_get(i915);
+ err = vlv_punit_write(i915, PUNIT_REG_GPU_FREQ_REQ, val);
+ vlv_punit_put(i915);
+
+ return err;
+}
+
+static int rps_set(struct intel_rps *rps, u8 val)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ int err;
+
+ if (INTEL_GEN(i915) < 6)
+ return 0;
+
+ if (val == rps->last_freq)
+ return 0;
+
+ if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
+ err = vlv_rps_set(rps, val);
+ else
+ err = gen6_rps_set(rps, val);
+ if (err)
+ return err;
+
+ gen6_rps_set_thresholds(rps, val);
+ rps->last_freq = val;
+
+ return 0;
+}
+
+void intel_rps_unpark(struct intel_rps *rps)
+{
+ u8 freq;
+
+ if (!rps->enabled)
+ return;
+
+ /*
+ * Use the user's desired frequency as a guide, but for better
+ * performance, jump directly to RPe as our starting frequency.
+ */
+ mutex_lock(&rps->lock);
+ rps->active = true;
+ freq = max(rps->cur_freq, rps->efficient_freq),
+ freq = clamp(freq, rps->min_freq_softlimit, rps->max_freq_softlimit);
+ intel_rps_set(rps, freq);
+ rps->last_adj = 0;
+ mutex_unlock(&rps->lock);
+
+ if (INTEL_GEN(rps_to_i915(rps)) >= 6)
+ rps_enable_interrupts(rps);
+
+ if (IS_GEN(rps_to_i915(rps), 5))
+ gen5_rps_update(rps);
+}
+
+void intel_rps_park(struct intel_rps *rps)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+
+ if (!rps->enabled)
+ return;
+
+ if (INTEL_GEN(i915) >= 6)
+ rps_disable_interrupts(rps);
+
+ rps->active = false;
+ if (rps->last_freq <= rps->idle_freq)
+ return;
+
+ /*
+ * The punit delays the write of the frequency and voltage until it
+ * determines the GPU is awake. During normal usage we don't want to
+ * waste power changing the frequency if the GPU is sleeping (rc6).
+ * However, the GPU and driver is now idle and we do not want to delay
+ * switching to minimum voltage (reducing power whilst idle) as we do
+ * not expect to be woken in the near future and so must flush the
+ * change by waking the device.
+ *
+ * We choose to take the media powerwell (either would do to trick the
+ * punit into committing the voltage change) as that takes a lot less
+ * power than the render powerwell.
+ */
+ intel_uncore_forcewake_get(rps_to_uncore(rps), FORCEWAKE_MEDIA);
+ rps_set(rps, rps->idle_freq);
+ intel_uncore_forcewake_put(rps_to_uncore(rps), FORCEWAKE_MEDIA);
+}
+
+void intel_rps_boost(struct i915_request *rq)
+{
+ struct intel_rps *rps = &rq->engine->gt->rps;
+ unsigned long flags;
+
+ if (i915_request_signaled(rq) || !rps->active)
+ return;
+
+ /* Serializes with i915_request_retire() */
+ spin_lock_irqsave(&rq->lock, flags);
+ if (!i915_request_has_waitboost(rq) &&
+ !dma_fence_is_signaled_locked(&rq->fence)) {
+ rq->flags |= I915_REQUEST_WAITBOOST;
+
+ if (!atomic_fetch_inc(&rps->num_waiters) &&
+ READ_ONCE(rps->cur_freq) < rps->boost_freq)
+ schedule_work(&rps->work);
+
+ atomic_inc(&rps->boosts);
+ }
+ spin_unlock_irqrestore(&rq->lock, flags);
+}
+
+int intel_rps_set(struct intel_rps *rps, u8 val)
+{
+ int err = 0;
+
+ lockdep_assert_held(&rps->lock);
+ GEM_BUG_ON(val > rps->max_freq);
+ GEM_BUG_ON(val < rps->min_freq);
+
+ if (rps->active) {
+ err = rps_set(rps, val);
+
+ /*
+ * Make sure we continue to get interrupts
+ * until we hit the minimum or maximum frequencies.
+ */
+ if (INTEL_GEN(rps_to_i915(rps)) >= 6) {
+ struct intel_uncore *uncore = rps_to_uncore(rps);
+
+ intel_uncore_write(uncore, GEN6_RP_INTERRUPT_LIMITS,
+ rps_limits(rps, val));
+
+ intel_uncore_write(uncore, GEN6_PMINTRMSK,
+ rps_pm_mask(rps, val));
+ }
+ }
+
+ if (err == 0)
+ rps->cur_freq = val;
+
+ return err;
+}
+
+static void gen6_rps_init(struct intel_rps *rps)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ struct intel_uncore *uncore = rps_to_uncore(rps);
+
+ /* All of these values are in units of 50MHz */
+
+ /* static values from HW: RP0 > RP1 > RPn (min_freq) */
+ if (IS_GEN9_LP(i915)) {
+ u32 rp_state_cap = intel_uncore_read(uncore, BXT_RP_STATE_CAP);
+
+ rps->rp0_freq = (rp_state_cap >> 16) & 0xff;
+ rps->rp1_freq = (rp_state_cap >> 8) & 0xff;
+ rps->min_freq = (rp_state_cap >> 0) & 0xff;
+ } else {
+ u32 rp_state_cap = intel_uncore_read(uncore, GEN6_RP_STATE_CAP);
+
+ rps->rp0_freq = (rp_state_cap >> 0) & 0xff;
+ rps->rp1_freq = (rp_state_cap >> 8) & 0xff;
+ rps->min_freq = (rp_state_cap >> 16) & 0xff;
+ }
+
+ /* hw_max = RP0 until we check for overclocking */
+ rps->max_freq = rps->rp0_freq;
+
+ rps->efficient_freq = rps->rp1_freq;
+ if (IS_HASWELL(i915) || IS_BROADWELL(i915) ||
+ IS_GEN9_BC(i915) || INTEL_GEN(i915) >= 10) {
+ u32 ddcc_status = 0;
+
+ if (sandybridge_pcode_read(i915,
+ HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL,
+ &ddcc_status, NULL) == 0)
+ rps->efficient_freq =
+ clamp_t(u8,
+ (ddcc_status >> 8) & 0xff,
+ rps->min_freq,
+ rps->max_freq);
+ }
+
+ if (IS_GEN9_BC(i915) || INTEL_GEN(i915) >= 10) {
+ /* Store the frequency values in 16.66 MHZ units, which is
+ * the natural hardware unit for SKL
+ */
+ rps->rp0_freq *= GEN9_FREQ_SCALER;
+ rps->rp1_freq *= GEN9_FREQ_SCALER;
+ rps->min_freq *= GEN9_FREQ_SCALER;
+ rps->max_freq *= GEN9_FREQ_SCALER;
+ rps->efficient_freq *= GEN9_FREQ_SCALER;
+ }
+}
+
+static bool rps_reset(struct intel_rps *rps)
+{
+ /* force a reset */
+ rps->power.mode = -1;
+ rps->last_freq = -1;
+
+ if (rps_set(rps, rps->min_freq)) {
+ DRM_ERROR("Failed to reset RPS to initial values\n");
+ return false;
+ }
+
+ rps->cur_freq = rps->min_freq;
+ return true;
+}
+
+/* See the Gen9_GT_PM_Programming_Guide doc for the below */
+static bool gen9_rps_enable(struct intel_rps *rps)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ struct intel_uncore *uncore = rps_to_uncore(rps);
+
+ /* Program defaults and thresholds for RPS */
+ if (IS_GEN(i915, 9))
+ intel_uncore_write_fw(uncore, GEN6_RC_VIDEO_FREQ,
+ GEN9_FREQUENCY(rps->rp1_freq));
+
+ /* 1 second timeout */
+ intel_uncore_write_fw(uncore, GEN6_RP_DOWN_TIMEOUT,
+ GT_INTERVAL_FROM_US(i915, 1000000));
+
+ intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 0xa);
+
+ return rps_reset(rps);
+}
+
+static bool gen8_rps_enable(struct intel_rps *rps)
+{
+ struct intel_uncore *uncore = rps_to_uncore(rps);
+
+ intel_uncore_write_fw(uncore, GEN6_RC_VIDEO_FREQ,
+ HSW_FREQUENCY(rps->rp1_freq));
+
+ /* NB: Docs say 1s, and 1000000 - which aren't equivalent */
+ intel_uncore_write_fw(uncore, GEN6_RP_DOWN_TIMEOUT,
+ 100000000 / 128); /* 1 second timeout */
+
+ intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 10);
+
+ return rps_reset(rps);
+}
+
+static bool gen6_rps_enable(struct intel_rps *rps)
+{
+ struct intel_uncore *uncore = rps_to_uncore(rps);
+
+ /* Power down if completely idle for over 50ms */
+ intel_uncore_write_fw(uncore, GEN6_RP_DOWN_TIMEOUT, 50000);
+ intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 10);
+
+ return rps_reset(rps);
+}
+
+static int chv_rps_max_freq(struct intel_rps *rps)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ u32 val;
+
+ val = vlv_punit_read(i915, FB_GFX_FMAX_AT_VMAX_FUSE);
+
+ switch (RUNTIME_INFO(i915)->sseu.eu_total) {
+ case 8:
+ /* (2 * 4) config */
+ val >>= FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT;
+ break;
+ case 12:
+ /* (2 * 6) config */
+ val >>= FB_GFX_FMAX_AT_VMAX_2SS6EU_FUSE_SHIFT;
+ break;
+ case 16:
+ /* (2 * 8) config */
+ default:
+ /* Setting (2 * 8) Min RP0 for any other combination */
+ val >>= FB_GFX_FMAX_AT_VMAX_2SS8EU_FUSE_SHIFT;
+ break;
+ }
+
+ return val & FB_GFX_FREQ_FUSE_MASK;
+}
+
+static int chv_rps_rpe_freq(struct intel_rps *rps)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ u32 val;
+
+ val = vlv_punit_read(i915, PUNIT_GPU_DUTYCYCLE_REG);
+ val >>= PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT;
+
+ return val & PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK;
+}
+
+static int chv_rps_guar_freq(struct intel_rps *rps)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ u32 val;
+
+ val = vlv_punit_read(i915, FB_GFX_FMAX_AT_VMAX_FUSE);
+
+ return val & FB_GFX_FREQ_FUSE_MASK;
+}
+
+static u32 chv_rps_min_freq(struct intel_rps *rps)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ u32 val;
+
+ val = vlv_punit_read(i915, FB_GFX_FMIN_AT_VMIN_FUSE);
+ val >>= FB_GFX_FMIN_AT_VMIN_FUSE_SHIFT;
+
+ return val & FB_GFX_FREQ_FUSE_MASK;
+}
+
+static bool chv_rps_enable(struct intel_rps *rps)
+{
+ struct intel_uncore *uncore = rps_to_uncore(rps);
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ u32 val;
+
+ /* 1: Program defaults and thresholds for RPS*/
+ intel_uncore_write_fw(uncore, GEN6_RP_DOWN_TIMEOUT, 1000000);
+ intel_uncore_write_fw(uncore, GEN6_RP_UP_THRESHOLD, 59400);
+ intel_uncore_write_fw(uncore, GEN6_RP_DOWN_THRESHOLD, 245000);
+ intel_uncore_write_fw(uncore, GEN6_RP_UP_EI, 66000);
+ intel_uncore_write_fw(uncore, GEN6_RP_DOWN_EI, 350000);
+
+ intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 10);
+
+ /* 2: Enable RPS */
+ intel_uncore_write_fw(uncore, GEN6_RP_CONTROL,
+ GEN6_RP_MEDIA_HW_NORMAL_MODE |
+ GEN6_RP_MEDIA_IS_GFX |
+ GEN6_RP_ENABLE |
+ GEN6_RP_UP_BUSY_AVG |
+ GEN6_RP_DOWN_IDLE_AVG);
+
+ /* Setting Fixed Bias */
+ vlv_punit_get(i915);
+
+ val = VLV_OVERRIDE_EN | VLV_SOC_TDP_EN | CHV_BIAS_CPU_50_SOC_50;
+ vlv_punit_write(i915, VLV_TURBO_SOC_OVERRIDE, val);
+
+ val = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS);
+
+ vlv_punit_put(i915);
+
+ /* RPS code assumes GPLL is used */
+ WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
+
+ DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE));
+ DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
+
+ return rps_reset(rps);
+}
+
+static int vlv_rps_guar_freq(struct intel_rps *rps)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ u32 val, rp1;
+
+ val = vlv_nc_read(i915, IOSF_NC_FB_GFX_FREQ_FUSE);
+
+ rp1 = val & FB_GFX_FGUARANTEED_FREQ_FUSE_MASK;
+ rp1 >>= FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT;
+
+ return rp1;
+}
+
+static int vlv_rps_max_freq(struct intel_rps *rps)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ u32 val, rp0;
+
+ val = vlv_nc_read(i915, IOSF_NC_FB_GFX_FREQ_FUSE);
+
+ rp0 = (val & FB_GFX_MAX_FREQ_FUSE_MASK) >> FB_GFX_MAX_FREQ_FUSE_SHIFT;
+ /* Clamp to max */
+ rp0 = min_t(u32, rp0, 0xea);
+
+ return rp0;
+}
+
+static int vlv_rps_rpe_freq(struct intel_rps *rps)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ u32 val, rpe;
+
+ val = vlv_nc_read(i915, IOSF_NC_FB_GFX_FMAX_FUSE_LO);
+ rpe = (val & FB_FMAX_VMIN_FREQ_LO_MASK) >> FB_FMAX_VMIN_FREQ_LO_SHIFT;
+ val = vlv_nc_read(i915, IOSF_NC_FB_GFX_FMAX_FUSE_HI);
+ rpe |= (val & FB_FMAX_VMIN_FREQ_HI_MASK) << 5;
+
+ return rpe;
+}
+
+static int vlv_rps_min_freq(struct intel_rps *rps)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ u32 val;
+
+ val = vlv_punit_read(i915, PUNIT_REG_GPU_LFM) & 0xff;
+ /*
+ * According to the BYT Punit GPU turbo HAS 1.1.6.3 the minimum value
+ * for the minimum frequency in GPLL mode is 0xc1. Contrary to this on
+ * a BYT-M B0 the above register contains 0xbf. Moreover when setting
+ * a frequency Punit will not allow values below 0xc0. Clamp it 0xc0
+ * to make sure it matches what Punit accepts.
+ */
+ return max_t(u32, val, 0xc0);
+}
+
+static bool vlv_rps_enable(struct intel_rps *rps)
+{
+ struct intel_uncore *uncore = rps_to_uncore(rps);
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ u32 val;
+
+ intel_uncore_write_fw(uncore, GEN6_RP_DOWN_TIMEOUT, 1000000);
+ intel_uncore_write_fw(uncore, GEN6_RP_UP_THRESHOLD, 59400);
+ intel_uncore_write_fw(uncore, GEN6_RP_DOWN_THRESHOLD, 245000);
+ intel_uncore_write_fw(uncore, GEN6_RP_UP_EI, 66000);
+ intel_uncore_write_fw(uncore, GEN6_RP_DOWN_EI, 350000);
+
+ intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 10);
+
+ intel_uncore_write_fw(uncore, GEN6_RP_CONTROL,
+ GEN6_RP_MEDIA_TURBO |
+ GEN6_RP_MEDIA_HW_NORMAL_MODE |
+ GEN6_RP_MEDIA_IS_GFX |
+ GEN6_RP_ENABLE |
+ GEN6_RP_UP_BUSY_AVG |
+ GEN6_RP_DOWN_IDLE_CONT);
+
+ vlv_punit_get(i915);
+
+ /* Setting Fixed Bias */
+ val = VLV_OVERRIDE_EN | VLV_SOC_TDP_EN | VLV_BIAS_CPU_125_SOC_875;
+ vlv_punit_write(i915, VLV_TURBO_SOC_OVERRIDE, val);
+
+ val = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS);
+
+ vlv_punit_put(i915);
+
+ /* RPS code assumes GPLL is used */
+ WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
+
+ DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE));
+ DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
+
+ return rps_reset(rps);
+}
+
+static unsigned long __ips_gfx_val(struct intel_ips *ips)
+{
+ struct intel_rps *rps = container_of(ips, typeof(*rps), ips);
+ struct intel_uncore *uncore = rps_to_uncore(rps);
+ unsigned long t, corr, state1, corr2, state2;
+ u32 pxvid, ext_v;
+
+ lockdep_assert_held(&mchdev_lock);
+
+ pxvid = intel_uncore_read(uncore, PXVFREQ(rps->cur_freq));
+ pxvid = (pxvid >> 24) & 0x7f;
+ ext_v = pvid_to_extvid(rps_to_i915(rps), pxvid);
+
+ state1 = ext_v;
+
+ /* Revel in the empirically derived constants */
+
+ /* Correction factor in 1/100000 units */
+ t = ips_mch_val(uncore);
+ if (t > 80)
+ corr = t * 2349 + 135940;
+ else if (t >= 50)
+ corr = t * 964 + 29317;
+ else /* < 50 */
+ corr = t * 301 + 1004;
+
+ corr = corr * 150142 * state1 / 10000 - 78642;
+ corr /= 100000;
+ corr2 = corr * ips->corr;
+
+ state2 = corr2 * state1 / 10000;
+ state2 /= 100; /* convert to mW */
+
+ __gen5_ips_update(ips);
+
+ return ips->gfx_power + state2;
+}
+
+void intel_rps_enable(struct intel_rps *rps)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ struct intel_uncore *uncore = rps_to_uncore(rps);
+
+ intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
+ if (IS_CHERRYVIEW(i915))
+ rps->enabled = chv_rps_enable(rps);
+ else if (IS_VALLEYVIEW(i915))
+ rps->enabled = vlv_rps_enable(rps);
+ else if (INTEL_GEN(i915) >= 9)
+ rps->enabled = gen9_rps_enable(rps);
+ else if (INTEL_GEN(i915) >= 8)
+ rps->enabled = gen8_rps_enable(rps);
+ else if (INTEL_GEN(i915) >= 6)
+ rps->enabled = gen6_rps_enable(rps);
+ else if (IS_IRONLAKE_M(i915))
+ rps->enabled = gen5_rps_enable(rps);
+ intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
+ if (!rps->enabled)
+ return;
+
+ WARN_ON(rps->max_freq < rps->min_freq);
+ WARN_ON(rps->idle_freq > rps->max_freq);
+
+ WARN_ON(rps->efficient_freq < rps->min_freq);
+ WARN_ON(rps->efficient_freq > rps->max_freq);
+}
+
+static void gen6_rps_disable(struct intel_rps *rps)
+{
+ intel_uncore_write(rps_to_uncore(rps), GEN6_RP_CONTROL, 0);
+}
+
+void intel_rps_disable(struct intel_rps *rps)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+
+ rps->enabled = false;
+
+ if (INTEL_GEN(i915) >= 6)
+ gen6_rps_disable(rps);
+ else if (IS_IRONLAKE_M(i915))
+ gen5_rps_disable(rps);
+}
+
+static int byt_gpu_freq(struct intel_rps *rps, int val)
+{
+ /*
+ * N = val - 0xb7
+ * Slow = Fast = GPLL ref * N
+ */
+ return DIV_ROUND_CLOSEST(rps->gpll_ref_freq * (val - 0xb7), 1000);
+}
+
+static int byt_freq_opcode(struct intel_rps *rps, int val)
+{
+ return DIV_ROUND_CLOSEST(1000 * val, rps->gpll_ref_freq) + 0xb7;
+}
+
+static int chv_gpu_freq(struct intel_rps *rps, int val)
+{
+ /*
+ * N = val / 2
+ * CU (slow) = CU2x (fast) / 2 = GPLL ref * N / 2
+ */
+ return DIV_ROUND_CLOSEST(rps->gpll_ref_freq * val, 2 * 2 * 1000);
+}
+
+static int chv_freq_opcode(struct intel_rps *rps, int val)
+{
+ /* CHV needs even values */
+ return DIV_ROUND_CLOSEST(2 * 1000 * val, rps->gpll_ref_freq) * 2;
+}
+
+int intel_gpu_freq(struct intel_rps *rps, int val)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+
+ if (INTEL_GEN(i915) >= 9)
+ return DIV_ROUND_CLOSEST(val * GT_FREQUENCY_MULTIPLIER,
+ GEN9_FREQ_SCALER);
+ else if (IS_CHERRYVIEW(i915))
+ return chv_gpu_freq(rps, val);
+ else if (IS_VALLEYVIEW(i915))
+ return byt_gpu_freq(rps, val);
+ else
+ return val * GT_FREQUENCY_MULTIPLIER;
+}
+
+int intel_freq_opcode(struct intel_rps *rps, int val)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+
+ if (INTEL_GEN(i915) >= 9)
+ return DIV_ROUND_CLOSEST(val * GEN9_FREQ_SCALER,
+ GT_FREQUENCY_MULTIPLIER);
+ else if (IS_CHERRYVIEW(i915))
+ return chv_freq_opcode(rps, val);
+ else if (IS_VALLEYVIEW(i915))
+ return byt_freq_opcode(rps, val);
+ else
+ return DIV_ROUND_CLOSEST(val, GT_FREQUENCY_MULTIPLIER);
+}
+
+static void vlv_init_gpll_ref_freq(struct intel_rps *rps)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+
+ rps->gpll_ref_freq =
+ vlv_get_cck_clock(i915, "GPLL ref",
+ CCK_GPLL_CLOCK_CONTROL,
+ i915->czclk_freq);
+
+ DRM_DEBUG_DRIVER("GPLL reference freq: %d kHz\n", rps->gpll_ref_freq);
+}
+
+static void vlv_rps_init(struct intel_rps *rps)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ u32 val;
+
+ vlv_iosf_sb_get(i915,
+ BIT(VLV_IOSF_SB_PUNIT) |
+ BIT(VLV_IOSF_SB_NC) |
+ BIT(VLV_IOSF_SB_CCK));
+
+ vlv_init_gpll_ref_freq(rps);
+
+ val = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS);
+ switch ((val >> 6) & 3) {
+ case 0:
+ case 1:
+ i915->mem_freq = 800;
+ break;
+ case 2:
+ i915->mem_freq = 1066;
+ break;
+ case 3:
+ i915->mem_freq = 1333;
+ break;
+ }
+ DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", i915->mem_freq);
+
+ rps->max_freq = vlv_rps_max_freq(rps);
+ rps->rp0_freq = rps->max_freq;
+ DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
+ intel_gpu_freq(rps, rps->max_freq),
+ rps->max_freq);
+
+ rps->efficient_freq = vlv_rps_rpe_freq(rps);
+ DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
+ intel_gpu_freq(rps, rps->efficient_freq),
+ rps->efficient_freq);
+
+ rps->rp1_freq = vlv_rps_guar_freq(rps);
+ DRM_DEBUG_DRIVER("RP1(Guar Freq) GPU freq: %d MHz (%u)\n",
+ intel_gpu_freq(rps, rps->rp1_freq),
+ rps->rp1_freq);
+
+ rps->min_freq = vlv_rps_min_freq(rps);
+ DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
+ intel_gpu_freq(rps, rps->min_freq),
+ rps->min_freq);
+
+ vlv_iosf_sb_put(i915,
+ BIT(VLV_IOSF_SB_PUNIT) |
+ BIT(VLV_IOSF_SB_NC) |
+ BIT(VLV_IOSF_SB_CCK));
+}
+
+static void chv_rps_init(struct intel_rps *rps)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ u32 val;
+
+ vlv_iosf_sb_get(i915,
+ BIT(VLV_IOSF_SB_PUNIT) |
+ BIT(VLV_IOSF_SB_NC) |
+ BIT(VLV_IOSF_SB_CCK));
+
+ vlv_init_gpll_ref_freq(rps);
+
+ val = vlv_cck_read(i915, CCK_FUSE_REG);
+
+ switch ((val >> 2) & 0x7) {
+ case 3:
+ i915->mem_freq = 2000;
+ break;
+ default:
+ i915->mem_freq = 1600;
+ break;
+ }
+ DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", i915->mem_freq);
+
+ rps->max_freq = chv_rps_max_freq(rps);
+ rps->rp0_freq = rps->max_freq;
+ DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
+ intel_gpu_freq(rps, rps->max_freq),
+ rps->max_freq);
+
+ rps->efficient_freq = chv_rps_rpe_freq(rps);
+ DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
+ intel_gpu_freq(rps, rps->efficient_freq),
+ rps->efficient_freq);
+
+ rps->rp1_freq = chv_rps_guar_freq(rps);
+ DRM_DEBUG_DRIVER("RP1(Guar) GPU freq: %d MHz (%u)\n",
+ intel_gpu_freq(rps, rps->rp1_freq),
+ rps->rp1_freq);
+
+ rps->min_freq = chv_rps_min_freq(rps);
+ DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
+ intel_gpu_freq(rps, rps->min_freq),
+ rps->min_freq);
+
+ vlv_iosf_sb_put(i915,
+ BIT(VLV_IOSF_SB_PUNIT) |
+ BIT(VLV_IOSF_SB_NC) |
+ BIT(VLV_IOSF_SB_CCK));
+
+ WARN_ONCE((rps->max_freq | rps->efficient_freq | rps->rp1_freq |
+ rps->min_freq) & 1,
+ "Odd GPU freq values\n");
+}
+
+static void vlv_c0_read(struct intel_uncore *uncore, struct intel_rps_ei *ei)
+{
+ ei->ktime = ktime_get_raw();
+ ei->render_c0 = intel_uncore_read(uncore, VLV_RENDER_C0_COUNT);
+ ei->media_c0 = intel_uncore_read(uncore, VLV_MEDIA_C0_COUNT);
+}
+
+static u32 vlv_wa_c0_ei(struct intel_rps *rps, u32 pm_iir)
+{
+ struct intel_uncore *uncore = rps_to_uncore(rps);
+ const struct intel_rps_ei *prev = &rps->ei;
+ struct intel_rps_ei now;
+ u32 events = 0;
+
+ if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0)
+ return 0;
+
+ vlv_c0_read(uncore, &now);
+
+ if (prev->ktime) {
+ u64 time, c0;
+ u32 render, media;
+
+ time = ktime_us_delta(now.ktime, prev->ktime);
+
+ time *= rps_to_i915(rps)->czclk_freq;
+
+ /* Workload can be split between render + media,
+ * e.g. SwapBuffers being blitted in X after being rendered in
+ * mesa. To account for this we need to combine both engines
+ * into our activity counter.
+ */
+ render = now.render_c0 - prev->render_c0;
+ media = now.media_c0 - prev->media_c0;
+ c0 = max(render, media);
+ c0 *= 1000 * 100 << 8; /* to usecs and scale to threshold% */
+
+ if (c0 > time * rps->power.up_threshold)
+ events = GEN6_PM_RP_UP_THRESHOLD;
+ else if (c0 < time * rps->power.down_threshold)
+ events = GEN6_PM_RP_DOWN_THRESHOLD;
+ }
+
+ rps->ei = now;
+ return events;
+}
+
+static void rps_work(struct work_struct *work)
+{
+ struct intel_rps *rps = container_of(work, typeof(*rps), work);
+ struct intel_gt *gt = rps_to_gt(rps);
+ bool client_boost = false;
+ int new_freq, adj, min, max;
+ u32 pm_iir = 0;
+
+ spin_lock_irq(&gt->irq_lock);
+ pm_iir = fetch_and_zero(&rps->pm_iir);
+ client_boost = atomic_read(&rps->num_waiters);
+ spin_unlock_irq(&gt->irq_lock);
+
+ /* Make sure we didn't queue anything we're not going to process. */
+ if ((pm_iir & rps->pm_events) == 0 && !client_boost)
+ goto out;
+
+ mutex_lock(&rps->lock);
+
+ pm_iir |= vlv_wa_c0_ei(rps, pm_iir);
+
+ adj = rps->last_adj;
+ new_freq = rps->cur_freq;
+ min = rps->min_freq_softlimit;
+ max = rps->max_freq_softlimit;
+ if (client_boost)
+ max = rps->max_freq;
+ if (client_boost && new_freq < rps->boost_freq) {
+ new_freq = rps->boost_freq;
+ adj = 0;
+ } else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
+ if (adj > 0)
+ adj *= 2;
+ else /* CHV needs even encode values */
+ adj = IS_CHERRYVIEW(gt->i915) ? 2 : 1;
+
+ if (new_freq >= rps->max_freq_softlimit)
+ adj = 0;
+ } else if (client_boost) {
+ adj = 0;
+ } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
+ if (rps->cur_freq > rps->efficient_freq)
+ new_freq = rps->efficient_freq;
+ else if (rps->cur_freq > rps->min_freq_softlimit)
+ new_freq = rps->min_freq_softlimit;
+ adj = 0;
+ } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
+ if (adj < 0)
+ adj *= 2;
+ else /* CHV needs even encode values */
+ adj = IS_CHERRYVIEW(gt->i915) ? -2 : -1;
+
+ if (new_freq <= rps->min_freq_softlimit)
+ adj = 0;
+ } else { /* unknown event */
+ adj = 0;
+ }
+
+ rps->last_adj = adj;
+
+ /*
+ * Limit deboosting and boosting to keep ourselves at the extremes
+ * when in the respective power modes (i.e. slowly decrease frequencies
+ * while in the HIGH_POWER zone and slowly increase frequencies while
+ * in the LOW_POWER zone). On idle, we will hit the timeout and drop
+ * to the next level quickly, and conversely if busy we expect to
+ * hit a waitboost and rapidly switch into max power.
+ */
+ if ((adj < 0 && rps->power.mode == HIGH_POWER) ||
+ (adj > 0 && rps->power.mode == LOW_POWER))
+ rps->last_adj = 0;
+
+ /* sysfs frequency interfaces may have snuck in while servicing the
+ * interrupt
+ */
+ new_freq += adj;
+ new_freq = clamp_t(int, new_freq, min, max);
+
+ if (intel_rps_set(rps, new_freq)) {
+ DRM_DEBUG_DRIVER("Failed to set new GPU frequency\n");
+ rps->last_adj = 0;
+ }
+
+ mutex_unlock(&rps->lock);
+
+out:
+ spin_lock_irq(&gt->irq_lock);
+ gen6_gt_pm_unmask_irq(gt, rps->pm_events);
+ spin_unlock_irq(&gt->irq_lock);
+}
+
+void gen11_rps_irq_handler(struct intel_rps *rps, u32 pm_iir)
+{
+ struct intel_gt *gt = rps_to_gt(rps);
+ const u32 events = rps->pm_events & pm_iir;
+
+ lockdep_assert_held(&gt->irq_lock);
+
+ if (unlikely(!events))
+ return;
+
+ gen6_gt_pm_mask_irq(gt, events);
+
+ rps->pm_iir |= events;
+ schedule_work(&rps->work);
+}
+
+void gen6_rps_irq_handler(struct intel_rps *rps, u32 pm_iir)
+{
+ struct intel_gt *gt = rps_to_gt(rps);
+
+ if (pm_iir & rps->pm_events) {
+ spin_lock(&gt->irq_lock);
+ gen6_gt_pm_mask_irq(gt, pm_iir & rps->pm_events);
+ rps->pm_iir |= pm_iir & rps->pm_events;
+ schedule_work(&rps->work);
+ spin_unlock(&gt->irq_lock);
+ }
+
+ if (INTEL_GEN(gt->i915) >= 8)
+ return;
+
+ if (pm_iir & PM_VEBOX_USER_INTERRUPT)
+ intel_engine_breadcrumbs_irq(gt->engine[VECS0]);
+
+ if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
+ DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
+}
+
+void gen5_rps_irq_handler(struct intel_rps *rps)
+{
+ struct intel_uncore *uncore = rps_to_uncore(rps);
+ u32 busy_up, busy_down, max_avg, min_avg;
+ u8 new_freq;
+
+ spin_lock(&mchdev_lock);
+
+ intel_uncore_write16(uncore,
+ MEMINTRSTS,
+ intel_uncore_read(uncore, MEMINTRSTS));
+
+ intel_uncore_write16(uncore, MEMINTRSTS, MEMINT_EVAL_CHG);
+ busy_up = intel_uncore_read(uncore, RCPREVBSYTUPAVG);
+ busy_down = intel_uncore_read(uncore, RCPREVBSYTDNAVG);
+ max_avg = intel_uncore_read(uncore, RCBMAXAVG);
+ min_avg = intel_uncore_read(uncore, RCBMINAVG);
+
+ /* Handle RCS change request from hw */
+ new_freq = rps->cur_freq;
+ if (busy_up > max_avg)
+ new_freq++;
+ else if (busy_down < min_avg)
+ new_freq--;
+ new_freq = clamp(new_freq,
+ rps->min_freq_softlimit,
+ rps->max_freq_softlimit);
+
+ if (new_freq != rps->cur_freq && gen5_rps_set(rps, new_freq))
+ rps->cur_freq = new_freq;
+
+ spin_unlock(&mchdev_lock);
+}
+
+void intel_rps_init_early(struct intel_rps *rps)
+{
+ mutex_init(&rps->lock);
+ mutex_init(&rps->power.mutex);
+
+ INIT_WORK(&rps->work, rps_work);
+
+ atomic_set(&rps->num_waiters, 0);
+}
+
+void intel_rps_init(struct intel_rps *rps)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+
+ if (IS_CHERRYVIEW(i915))
+ chv_rps_init(rps);
+ else if (IS_VALLEYVIEW(i915))
+ vlv_rps_init(rps);
+ else if (INTEL_GEN(i915) >= 6)
+ gen6_rps_init(rps);
+ else if (IS_IRONLAKE_M(i915))
+ gen5_rps_init(rps);
+
+ /* Derive initial user preferences/limits from the hardware limits */
+ rps->max_freq_softlimit = rps->max_freq;
+ rps->min_freq_softlimit = rps->min_freq;
+
+ /* After setting max-softlimit, find the overclock max freq */
+ if (IS_GEN(i915, 6) || IS_IVYBRIDGE(i915) || IS_HASWELL(i915)) {
+ u32 params = 0;
+
+ sandybridge_pcode_read(i915, GEN6_READ_OC_PARAMS,
+ &params, NULL);
+ if (params & BIT(31)) { /* OC supported */
+ DRM_DEBUG_DRIVER("Overclocking supported, max: %dMHz, overclock: %dMHz\n",
+ (rps->max_freq & 0xff) * 50,
+ (params & 0xff) * 50);
+ rps->max_freq = params & 0xff;
+ }
+ }
+
+ /* Finally allow us to boost to max by default */
+ rps->boost_freq = rps->max_freq;
+ rps->idle_freq = rps->min_freq;
+ rps->cur_freq = rps->idle_freq;
+
+ rps->pm_intrmsk_mbz = 0;
+
+ /*
+ * SNB,IVB,HSW can while VLV,CHV may hard hang on looping batchbuffer
+ * if GEN6_PM_UP_EI_EXPIRED is masked.
+ *
+ * TODO: verify if this can be reproduced on VLV,CHV.
+ */
+ if (INTEL_GEN(i915) <= 7)
+ rps->pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED;
+
+ if (INTEL_GEN(i915) >= 8)
+ rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
+}
+
+u32 intel_get_cagf(struct intel_rps *rps, u32 rpstat)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ u32 cagf;
+
+ if (INTEL_GEN(i915) >= 9)
+ cagf = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT;
+ else if (IS_HASWELL(i915) || IS_BROADWELL(i915))
+ cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
+ else
+ cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
+
+ return cagf;
+}
+
+/* External interface for intel_ips.ko */
+
+static struct drm_i915_private __rcu *ips_mchdev;
+
+/**
+ * Tells the intel_ips driver that the i915 driver is now loaded, if
+ * IPS got loaded first.
+ *
+ * This awkward dance is so that neither module has to depend on the
+ * other in order for IPS to do the appropriate communication of
+ * GPU turbo limits to i915.
+ */
+static void
+ips_ping_for_i915_load(void)
+{
+ void (*link)(void);
+
+ link = symbol_get(ips_link_to_i915_driver);
+ if (link) {
+ link();
+ symbol_put(ips_link_to_i915_driver);
+ }
+}
+
+void intel_rps_driver_register(struct intel_rps *rps)
+{
+ struct intel_gt *gt = rps_to_gt(rps);
+
+ /*
+ * We only register the i915 ips part with intel-ips once everything is
+ * set up, to avoid intel-ips sneaking in and reading bogus values.
+ */
+ if (IS_GEN(gt->i915, 5)) {
+ rcu_assign_pointer(ips_mchdev, gt->i915);
+ ips_ping_for_i915_load();
+ }
+}
+
+void intel_rps_driver_unregister(struct intel_rps *rps)
+{
+ rcu_assign_pointer(ips_mchdev, NULL);
+}
+
+static struct drm_i915_private *mchdev_get(void)
+{
+ struct drm_i915_private *i915;
+
+ rcu_read_lock();
+ i915 = rcu_dereference(ips_mchdev);
+ if (!kref_get_unless_zero(&i915->drm.ref))
+ i915 = NULL;
+ rcu_read_unlock();
+
+ return i915;
+}
+
+/**
+ * i915_read_mch_val - return value for IPS use
+ *
+ * Calculate and return a value for the IPS driver to use when deciding whether
+ * we have thermal and power headroom to increase CPU or GPU power budget.
+ */
+unsigned long i915_read_mch_val(void)
+{
+ struct drm_i915_private *i915;
+ unsigned long chipset_val = 0;
+ unsigned long graphics_val = 0;
+ intel_wakeref_t wakeref;
+
+ i915 = mchdev_get();
+ if (!i915)
+ return 0;
+
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
+ struct intel_ips *ips = &i915->gt.rps.ips;
+
+ spin_lock_irq(&mchdev_lock);
+ chipset_val = __ips_chipset_val(ips);
+ graphics_val = __ips_gfx_val(ips);
+ spin_unlock_irq(&mchdev_lock);
+ }
+
+ drm_dev_put(&i915->drm);
+ return chipset_val + graphics_val;
+}
+EXPORT_SYMBOL_GPL(i915_read_mch_val);
+
+/**
+ * i915_gpu_raise - raise GPU frequency limit
+ *
+ * Raise the limit; IPS indicates we have thermal headroom.
+ */
+bool i915_gpu_raise(void)
+{
+ struct drm_i915_private *i915;
+ struct intel_rps *rps;
+
+ i915 = mchdev_get();
+ if (!i915)
+ return false;
+
+ rps = &i915->gt.rps;
+
+ spin_lock_irq(&mchdev_lock);
+ if (rps->max_freq_softlimit < rps->max_freq)
+ rps->max_freq_softlimit++;
+ spin_unlock_irq(&mchdev_lock);
+
+ drm_dev_put(&i915->drm);
+ return true;
+}
+EXPORT_SYMBOL_GPL(i915_gpu_raise);
+
+/**
+ * i915_gpu_lower - lower GPU frequency limit
+ *
+ * IPS indicates we're close to a thermal limit, so throttle back the GPU
+ * frequency maximum.
+ */
+bool i915_gpu_lower(void)
+{
+ struct drm_i915_private *i915;
+ struct intel_rps *rps;
+
+ i915 = mchdev_get();
+ if (!i915)
+ return false;
+
+ rps = &i915->gt.rps;
+
+ spin_lock_irq(&mchdev_lock);
+ if (rps->max_freq_softlimit > rps->min_freq)
+ rps->max_freq_softlimit--;
+ spin_unlock_irq(&mchdev_lock);
+
+ drm_dev_put(&i915->drm);
+ return true;
+}
+EXPORT_SYMBOL_GPL(i915_gpu_lower);
+
+/**
+ * i915_gpu_busy - indicate GPU business to IPS
+ *
+ * Tell the IPS driver whether or not the GPU is busy.
+ */
+bool i915_gpu_busy(void)
+{
+ struct drm_i915_private *i915;
+ bool ret;
+
+ i915 = mchdev_get();
+ if (!i915)
+ return false;
+
+ ret = i915->gt.awake;
+
+ drm_dev_put(&i915->drm);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(i915_gpu_busy);
+
+/**
+ * i915_gpu_turbo_disable - disable graphics turbo
+ *
+ * Disable graphics turbo by resetting the max frequency and setting the
+ * current frequency to the default.
+ */
+bool i915_gpu_turbo_disable(void)
+{
+ struct drm_i915_private *i915;
+ struct intel_rps *rps;
+ bool ret;
+
+ i915 = mchdev_get();
+ if (!i915)
+ return false;
+
+ rps = &i915->gt.rps;
+
+ spin_lock_irq(&mchdev_lock);
+ rps->max_freq_softlimit = rps->min_freq;
+ ret = gen5_rps_set(&i915->gt.rps, rps->min_freq);
+ spin_unlock_irq(&mchdev_lock);
+
+ drm_dev_put(&i915->drm);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.h b/drivers/gpu/drm/i915/gt/intel_rps.h
new file mode 100644
index 000000000000..9518c66c9792
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_rps.h
@@ -0,0 +1,38 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef INTEL_RPS_H
+#define INTEL_RPS_H
+
+#include "intel_rps_types.h"
+
+struct i915_request;
+
+void intel_rps_init_early(struct intel_rps *rps);
+void intel_rps_init(struct intel_rps *rps);
+
+void intel_rps_driver_register(struct intel_rps *rps);
+void intel_rps_driver_unregister(struct intel_rps *rps);
+
+void intel_rps_enable(struct intel_rps *rps);
+void intel_rps_disable(struct intel_rps *rps);
+
+void intel_rps_park(struct intel_rps *rps);
+void intel_rps_unpark(struct intel_rps *rps);
+void intel_rps_boost(struct i915_request *rq);
+
+int intel_rps_set(struct intel_rps *rps, u8 val);
+void intel_rps_mark_interactive(struct intel_rps *rps, bool interactive);
+
+int intel_gpu_freq(struct intel_rps *rps, int val);
+int intel_freq_opcode(struct intel_rps *rps, int val);
+u32 intel_get_cagf(struct intel_rps *rps, u32 rpstat1);
+
+void gen5_rps_irq_handler(struct intel_rps *rps);
+void gen6_rps_irq_handler(struct intel_rps *rps, u32 pm_iir);
+void gen11_rps_irq_handler(struct intel_rps *rps, u32 pm_iir);
+
+#endif /* INTEL_RPS_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_rps_types.h b/drivers/gpu/drm/i915/gt/intel_rps_types.h
new file mode 100644
index 000000000000..c2e279154bd5
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_rps_types.h
@@ -0,0 +1,93 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef INTEL_RPS_TYPES_H
+#define INTEL_RPS_TYPES_H
+
+#include <linux/atomic.h>
+#include <linux/ktime.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+struct intel_ips {
+ u64 last_count1;
+ unsigned long last_time1;
+ unsigned long chipset_power;
+ u64 last_count2;
+ u64 last_time2;
+ unsigned long gfx_power;
+ u8 corr;
+
+ int c, m;
+};
+
+struct intel_rps_ei {
+ ktime_t ktime;
+ u32 render_c0;
+ u32 media_c0;
+};
+
+struct intel_rps {
+ struct mutex lock; /* protects enabling and the worker */
+
+ /*
+ * work, interrupts_enabled and pm_iir are protected by
+ * dev_priv->irq_lock
+ */
+ struct work_struct work;
+ bool enabled;
+ bool active;
+ u32 pm_iir;
+
+ /* PM interrupt bits that should never be masked */
+ u32 pm_intrmsk_mbz;
+ u32 pm_events;
+
+ /* Frequencies are stored in potentially platform dependent multiples.
+ * In other words, *_freq needs to be multiplied by X to be interesting.
+ * Soft limits are those which are used for the dynamic reclocking done
+ * by the driver (raise frequencies under heavy loads, and lower for
+ * lighter loads). Hard limits are those imposed by the hardware.
+ *
+ * A distinction is made for overclocking, which is never enabled by
+ * default, and is considered to be above the hard limit if it's
+ * possible at all.
+ */
+ u8 cur_freq; /* Current frequency (cached, may not == HW) */
+ u8 last_freq; /* Last SWREQ frequency */
+ u8 min_freq_softlimit; /* Minimum frequency permitted by the driver */
+ u8 max_freq_softlimit; /* Max frequency permitted by the driver */
+ u8 max_freq; /* Maximum frequency, RP0 if not overclocking */
+ u8 min_freq; /* AKA RPn. Minimum frequency */
+ u8 boost_freq; /* Frequency to request when wait boosting */
+ u8 idle_freq; /* Frequency to request when we are idle */
+ u8 efficient_freq; /* AKA RPe. Pre-determined balanced frequency */
+ u8 rp1_freq; /* "less than" RP0 power/freqency */
+ u8 rp0_freq; /* Non-overclocked max frequency. */
+ u16 gpll_ref_freq; /* vlv/chv GPLL reference frequency */
+
+ int last_adj;
+
+ struct {
+ struct mutex mutex;
+
+ enum { LOW_POWER, BETWEEN, HIGH_POWER } mode;
+ unsigned int interactive;
+
+ u8 up_threshold; /* Current %busy required to uplock */
+ u8 down_threshold; /* Current %busy required to downclock */
+ } power;
+
+ atomic_t num_waiters;
+ atomic_t boosts;
+
+ /* manual wa residency calculations */
+ struct intel_rps_ei ei;
+ struct intel_ips ips;
+};
+
+#endif /* INTEL_RPS_TYPES_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.c b/drivers/gpu/drm/i915/gt/intel_sseu.c
index 6bf2d87da109..74f793423231 100644
--- a/drivers/gpu/drm/i915/gt/intel_sseu.c
+++ b/drivers/gpu/drm/i915/gt/intel_sseu.c
@@ -8,6 +8,19 @@
#include "intel_lrc_reg.h"
#include "intel_sseu.h"
+void intel_sseu_set_info(struct sseu_dev_info *sseu, u8 max_slices,
+ u8 max_subslices, u8 max_eus_per_subslice)
+{
+ sseu->max_slices = max_slices;
+ sseu->max_subslices = max_subslices;
+ sseu->max_eus_per_subslice = max_eus_per_subslice;
+
+ sseu->ss_stride = GEN_SSEU_STRIDE(sseu->max_subslices);
+ GEM_BUG_ON(sseu->ss_stride > GEN_MAX_SUBSLICE_STRIDE);
+ sseu->eu_stride = GEN_SSEU_STRIDE(sseu->max_eus_per_subslice);
+ GEM_BUG_ON(sseu->eu_stride > GEN_MAX_EU_STRIDE);
+}
+
unsigned int
intel_sseu_subslice_total(const struct sseu_dev_info *sseu)
{
@@ -19,10 +32,32 @@ intel_sseu_subslice_total(const struct sseu_dev_info *sseu)
return total;
}
+u32 intel_sseu_get_subslices(const struct sseu_dev_info *sseu, u8 slice)
+{
+ int i, offset = slice * sseu->ss_stride;
+ u32 mask = 0;
+
+ GEM_BUG_ON(slice >= sseu->max_slices);
+
+ for (i = 0; i < sseu->ss_stride; i++)
+ mask |= (u32)sseu->subslice_mask[offset + i] <<
+ i * BITS_PER_BYTE;
+
+ return mask;
+}
+
+void intel_sseu_set_subslices(struct sseu_dev_info *sseu, int slice,
+ u32 ss_mask)
+{
+ int offset = slice * sseu->ss_stride;
+
+ memcpy(&sseu->subslice_mask[offset], &ss_mask, sseu->ss_stride);
+}
+
unsigned int
intel_sseu_subslices_per_slice(const struct sseu_dev_info *sseu, u8 slice)
{
- return hweight8(sseu->subslice_mask[slice]);
+ return hweight32(intel_sseu_get_subslices(sseu, slice));
}
u32 intel_sseu_make_rpcs(struct drm_i915_private *i915,
diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.h b/drivers/gpu/drm/i915/gt/intel_sseu.h
index b50d0401a4e2..d1d225204f09 100644
--- a/drivers/gpu/drm/i915/gt/intel_sseu.h
+++ b/drivers/gpu/drm/i915/gt/intel_sseu.h
@@ -10,15 +10,21 @@
#include <linux/types.h>
#include <linux/kernel.h>
+#include "i915_gem.h"
+
struct drm_i915_private;
#define GEN_MAX_SLICES (6) /* CNL upper bound */
#define GEN_MAX_SUBSLICES (8) /* ICL upper bound */
#define GEN_SSEU_STRIDE(max_entries) DIV_ROUND_UP(max_entries, BITS_PER_BYTE)
+#define GEN_MAX_SUBSLICE_STRIDE GEN_SSEU_STRIDE(GEN_MAX_SUBSLICES)
+#define GEN_MAX_EUS (16) /* TGL upper bound */
+#define GEN_MAX_EU_STRIDE GEN_SSEU_STRIDE(GEN_MAX_EUS)
struct sseu_dev_info {
u8 slice_mask;
- u8 subslice_mask[GEN_MAX_SLICES];
+ u8 subslice_mask[GEN_MAX_SLICES * GEN_MAX_SUBSLICE_STRIDE];
+ u8 eu_mask[GEN_MAX_SLICES * GEN_MAX_SUBSLICES * GEN_MAX_EU_STRIDE];
u16 eu_total;
u8 eu_per_subslice;
u8 min_eu_in_pool;
@@ -33,11 +39,8 @@ struct sseu_dev_info {
u8 max_subslices;
u8 max_eus_per_subslice;
- /* We don't have more than 8 eus per subslice at the moment and as we
- * store eus enabled using bits, no need to multiply by eus per
- * subslice.
- */
- u8 eu_mask[GEN_MAX_SLICES * GEN_MAX_SUBSLICES];
+ u8 ss_stride;
+ u8 eu_stride;
};
/*
@@ -63,12 +66,34 @@ intel_sseu_from_device_info(const struct sseu_dev_info *sseu)
return value;
}
+static inline bool
+intel_sseu_has_subslice(const struct sseu_dev_info *sseu, int slice,
+ int subslice)
+{
+ u8 mask;
+ int ss_idx = subslice / BITS_PER_BYTE;
+
+ GEM_BUG_ON(ss_idx >= sseu->ss_stride);
+
+ mask = sseu->subslice_mask[slice * sseu->ss_stride + ss_idx];
+
+ return mask & BIT(subslice % BITS_PER_BYTE);
+}
+
+void intel_sseu_set_info(struct sseu_dev_info *sseu, u8 max_slices,
+ u8 max_subslices, u8 max_eus_per_subslice);
+
unsigned int
intel_sseu_subslice_total(const struct sseu_dev_info *sseu);
unsigned int
intel_sseu_subslices_per_slice(const struct sseu_dev_info *sseu, u8 slice);
+u32 intel_sseu_get_subslices(const struct sseu_dev_info *sseu, u8 slice);
+
+void intel_sseu_set_subslices(struct sseu_dev_info *sseu, int slice,
+ u32 ss_mask);
+
u32 intel_sseu_make_rpcs(struct drm_i915_private *i915,
const struct intel_sseu *req_sseu);
diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.c b/drivers/gpu/drm/i915/gt/intel_timeline.c
index 9cb01d9828f1..14ad10acd548 100644
--- a/drivers/gpu/drm/i915/gt/intel_timeline.c
+++ b/drivers/gpu/drm/i915/gt/intel_timeline.c
@@ -4,13 +4,13 @@
* Copyright © 2016-2018 Intel Corporation
*/
-#include "gt/intel_gt_types.h"
-
#include "i915_drv.h"
#include "i915_active.h"
#include "i915_syncmap.h"
-#include "gt/intel_timeline.h"
+#include "intel_gt.h"
+#include "intel_ring.h"
+#include "intel_timeline.h"
#define ptr_set_bit(ptr, bit) ((typeof(ptr))((unsigned long)(ptr) | BIT(bit)))
#define ptr_test_bit(ptr, bit) ((unsigned long)(ptr) & BIT(bit))
@@ -136,6 +136,7 @@ static void __idle_cacheline_free(struct intel_timeline_cacheline *cl)
kfree(cl);
}
+__i915_active_call
static void __cacheline_retire(struct i915_active *active)
{
struct intel_timeline_cacheline *cl =
@@ -177,8 +178,7 @@ cacheline_alloc(struct intel_timeline_hwsp *hwsp, unsigned int cacheline)
cl->hwsp = hwsp;
cl->vaddr = page_pack_bits(vaddr, cacheline);
- i915_active_init(hwsp->gt->i915, &cl->active,
- __cacheline_active, __cacheline_retire);
+ i915_active_init(&cl->active, __cacheline_active, __cacheline_retire);
return cl;
}
@@ -254,7 +254,7 @@ int intel_timeline_init(struct intel_timeline *timeline,
mutex_init(&timeline->mutex);
- INIT_ACTIVE_REQUEST(&timeline->last_request, &timeline->mutex);
+ INIT_ACTIVE_FENCE(&timeline->last_request, &timeline->mutex);
INIT_LIST_HEAD(&timeline->requests);
i915_syncmap_init(&timeline->sync);
@@ -442,7 +442,7 @@ __intel_timeline_get_seqno(struct intel_timeline *tl,
* free it after the current request is retired, which ensures that
* all writes into the cacheline from previous requests are complete.
*/
- err = i915_active_ref(&tl->hwsp_cacheline->active, tl, rq);
+ err = i915_active_ref(&tl->hwsp_cacheline->active, tl, &rq->fence);
if (err)
goto err_cacheline;
@@ -493,24 +493,39 @@ int intel_timeline_get_seqno(struct intel_timeline *tl,
static int cacheline_ref(struct intel_timeline_cacheline *cl,
struct i915_request *rq)
{
- return i915_active_ref(&cl->active, rq->timeline, rq);
+ return i915_active_add_request(&cl->active, rq);
}
int intel_timeline_read_hwsp(struct i915_request *from,
struct i915_request *to,
u32 *hwsp)
{
- struct intel_timeline_cacheline *cl = from->hwsp_cacheline;
- struct intel_timeline *tl = from->timeline;
+ struct intel_timeline *tl;
int err;
- GEM_BUG_ON(to->timeline == tl);
+ rcu_read_lock();
+ tl = rcu_dereference(from->timeline);
+ if (i915_request_completed(from) || !kref_get_unless_zero(&tl->kref))
+ tl = NULL;
+ rcu_read_unlock();
+ if (!tl) /* already completed */
+ return 1;
+
+ GEM_BUG_ON(rcu_access_pointer(to->timeline) == tl);
+
+ err = -EBUSY;
+ if (mutex_trylock(&tl->mutex)) {
+ struct intel_timeline_cacheline *cl = from->hwsp_cacheline;
+
+ if (i915_request_completed(from)) {
+ err = 1;
+ goto unlock;
+ }
- mutex_lock_nested(&tl->mutex, SINGLE_DEPTH_NESTING);
- err = i915_request_completed(from);
- if (!err)
err = cacheline_ref(cl, to);
- if (!err) {
+ if (err)
+ goto unlock;
+
if (likely(cl == tl->hwsp_cacheline)) {
*hwsp = tl->hwsp_offset;
} else { /* across a seqno wrap, recover the original offset */
@@ -518,8 +533,11 @@ int intel_timeline_read_hwsp(struct i915_request *from,
ptr_unmask_bits(cl->vaddr, CACHELINE_BITS) *
CACHELINE_BYTES;
}
+
+unlock:
+ mutex_unlock(&tl->mutex);
}
- mutex_unlock(&tl->mutex);
+ intel_timeline_put(tl);
return err;
}
@@ -541,7 +559,7 @@ void __intel_timeline_free(struct kref *kref)
container_of(kref, typeof(*timeline), kref);
intel_timeline_fini(timeline);
- kfree(timeline);
+ kfree_rcu(timeline, rcu);
}
static void timelines_fini(struct intel_gt *gt)
diff --git a/drivers/gpu/drm/i915/gt/intel_timeline_types.h b/drivers/gpu/drm/i915/gt/intel_timeline_types.h
index 2b1baf2fcc8e..98d9ee166379 100644
--- a/drivers/gpu/drm/i915/gt/intel_timeline_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_timeline_types.h
@@ -58,12 +58,13 @@ struct intel_timeline {
*/
struct list_head requests;
- /* Contains an RCU guarded pointer to the last request. No reference is
+ /*
+ * Contains an RCU guarded pointer to the last request. No reference is
* held to the request, users must carefully acquire a reference to
- * the request using i915_active_request_get_request_rcu(), or hold the
- * struct_mutex.
+ * the request using i915_active_fence_get(), or manage the RCU
+ * protection themselves (cf the i915_active_fence API).
*/
- struct i915_active_request last_request;
+ struct i915_active_fence last_request;
/**
* We track the most recent seqno that we wait on in every context so
@@ -80,6 +81,7 @@ struct intel_timeline {
struct intel_gt *gt;
struct kref kref;
+ struct rcu_head rcu;
};
#endif /* __I915_TIMELINE_TYPES_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
index 5f6ec2fd29a0..e4bccc14602f 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
@@ -7,6 +7,7 @@
#include "i915_drv.h"
#include "intel_context.h"
#include "intel_gt.h"
+#include "intel_ring.h"
#include "intel_workarounds.h"
/**
@@ -567,6 +568,9 @@ static void icl_ctx_workarounds_init(struct intel_engine_cs *engine,
static void tgl_ctx_workarounds_init(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{
+ /* Wa_1409142259:tgl */
+ WA_SET_BIT_MASKED(GEN11_COMMON_SLICE_CHICKEN3,
+ GEN12_DISABLE_CPS_AWARE_COLOR_PIPE);
}
static void
@@ -796,11 +800,10 @@ wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal)
}
slice = fls(sseu->slice_mask) - 1;
- GEM_BUG_ON(slice >= ARRAY_SIZE(sseu->subslice_mask));
- subslice = fls(l3_en & sseu->subslice_mask[slice]);
+ subslice = fls(l3_en & intel_sseu_get_subslices(sseu, slice));
if (!subslice) {
DRM_WARN("No common index found between subslice mask %x and L3 bank mask %x!\n",
- sseu->subslice_mask[slice], l3_en);
+ intel_sseu_get_subslices(sseu, slice), l3_en);
subslice = fls(l3_en);
WARN_ON(!subslice);
}
@@ -890,11 +893,27 @@ icl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
wa_write_or(wal,
GAMT_CHKN_BIT_REG,
GAMT_CHKN_DISABLE_L3_COH_PIPE);
+
+ /* Wa_1607087056:icl */
+ wa_write_or(wal,
+ SLICE_UNIT_LEVEL_CLKGATE,
+ L3_CLKGATE_DIS | L3_CR2X_CLKGATE_DIS);
}
static void
tgl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
{
+ /* Wa_1409420604:tgl */
+ if (IS_TGL_REVID(i915, TGL_REVID_A0, TGL_REVID_A0))
+ wa_write_or(wal,
+ SUBSLICE_UNIT_LEVEL_CLKGATE2,
+ CPSSUNIT_CLKGATE_DIS);
+
+ /* Wa_1409180338:tgl */
+ if (IS_TGL_REVID(i915, TGL_REVID_A0, TGL_REVID_A0))
+ wa_write_or(wal,
+ SLICE_UNIT_LEVEL_CLKGATE,
+ L3_CLKGATE_DIS | L3_CR2X_CLKGATE_DIS);
}
static void
@@ -1197,6 +1216,26 @@ static void icl_whitelist_build(struct intel_engine_cs *engine)
static void tgl_whitelist_build(struct intel_engine_cs *engine)
{
+ struct i915_wa_list *w = &engine->whitelist;
+
+ switch (engine->class) {
+ case RENDER_CLASS:
+ /*
+ * WaAllowPMDepthAndInvocationCountAccessFromUMD:tgl
+ *
+ * This covers 4 registers which are next to one another :
+ * - PS_INVOCATION_COUNT
+ * - PS_INVOCATION_COUNT_UDW
+ * - PS_DEPTH_COUNT
+ * - PS_DEPTH_COUNT_UDW
+ */
+ whitelist_reg_ext(w, PS_INVOCATION_COUNT,
+ RING_FORCE_TO_NONPRIV_ACCESS_RD |
+ RING_FORCE_TO_NONPRIV_RANGE_4);
+ break;
+ default:
+ break;
+ }
}
void intel_engine_init_whitelist(struct intel_engine_cs *engine)
@@ -1258,6 +1297,26 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
{
struct drm_i915_private *i915 = engine->i915;
+ if (IS_TGL_REVID(i915, TGL_REVID_A0, TGL_REVID_A0)) {
+ /* Wa_1606700617:tgl */
+ wa_masked_en(wal,
+ GEN9_CS_DEBUG_MODE1,
+ FF_DOP_CLOCK_GATE_DISABLE);
+
+ /* Wa_1607138336:tgl */
+ wa_write_or(wal,
+ GEN9_CTX_PREEMPT_REG,
+ GEN12_DISABLE_POSH_BUSY_FF_DOP_CG);
+
+ /* Wa_1607030317:tgl */
+ /* Wa_1607186500:tgl */
+ /* Wa_1607297627:tgl */
+ wa_masked_en(wal,
+ GEN6_RC_SLEEP_PSMI_CONTROL,
+ GEN12_WAIT_FOR_EVENT_POWER_DOWN_DISABLE |
+ GEN8_RC_SEMA_IDLE_MSG_DISABLE);
+ }
+
if (IS_GEN(i915, 11)) {
/* This is not an Wa. Enable for better image quality */
wa_masked_en(wal,
@@ -1452,7 +1511,7 @@ static bool mcr_range(struct drm_i915_private *i915, u32 offset)
* which only controls CPU initiated MMIO. Routing does not
* work for CS access so we cannot verify them on this path.
*/
- if (INTEL_GEN(i915) >= 8 && (offset >= 0xb100 && offset <= 0xb3ff))
+ if (INTEL_GEN(i915) >= 8 && (offset >= 0xb000 && offset <= 0xb4ff))
return true;
return false;
diff --git a/drivers/gpu/drm/i915/gt/mock_engine.c b/drivers/gpu/drm/i915/gt/mock_engine.c
index 5d43cbc3f345..83f549d203a0 100644
--- a/drivers/gpu/drm/i915/gt/mock_engine.c
+++ b/drivers/gpu/drm/i915/gt/mock_engine.c
@@ -23,6 +23,7 @@
*/
#include "gem/i915_gem_context.h"
+#include "gt/intel_ring.h"
#include "i915_drv.h"
#include "intel_context.h"
@@ -240,6 +241,7 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
struct mock_engine *engine;
GEM_BUG_ON(id >= I915_NUM_ENGINES);
+ GEM_BUG_ON(!i915->gt.uncore);
engine = kzalloc(sizeof(*engine) + PAGE_SIZE, GFP_KERNEL);
if (!engine)
@@ -248,9 +250,11 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
/* minimal engine setup for requests */
engine->base.i915 = i915;
engine->base.gt = &i915->gt;
+ engine->base.uncore = i915->gt.uncore;
snprintf(engine->base.name, sizeof(engine->base.name), "%s", name);
engine->base.id = id;
engine->base.mask = BIT(id);
+ engine->base.legacy_idx = INVALID_ENGINE;
engine->base.instance = id;
engine->base.status_page.addr = (void *)(engine + 1);
@@ -265,6 +269,9 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
engine->base.reset.finish = mock_reset_finish;
engine->base.cancel_requests = mock_cancel_requests;
+ i915->gt.engine[id] = &engine->base;
+ i915->gt.engine_class[0][id] = &engine->base;
+
/* fake hw queue */
spin_lock_init(&engine->hw_lock);
timer_setup(&engine->hw_delay, hw_delay_complete, 0);
diff --git a/drivers/gpu/drm/i915/gt/selftest_context.c b/drivers/gpu/drm/i915/gt/selftest_context.c
index 9d1ea26c7a2d..bc720defc6b8 100644
--- a/drivers/gpu/drm/i915/gt/selftest_context.c
+++ b/drivers/gpu/drm/i915/gt/selftest_context.c
@@ -14,22 +14,28 @@
static int request_sync(struct i915_request *rq)
{
+ struct intel_timeline *tl = i915_request_timeline(rq);
long timeout;
int err = 0;
+ intel_timeline_get(tl);
i915_request_get(rq);
- i915_request_add(rq);
+ /* Opencode i915_request_add() so we can keep the timeline locked. */
+ __i915_request_commit(rq);
+ __i915_request_queue(rq, NULL);
+
timeout = i915_request_wait(rq, 0, HZ / 10);
- if (timeout < 0) {
+ if (timeout < 0)
err = timeout;
- } else {
- mutex_lock(&rq->timeline->mutex);
+ else
i915_request_retire_upto(rq);
- mutex_unlock(&rq->timeline->mutex);
- }
+
+ lockdep_unpin_lock(&tl->mutex, rq->cookie);
+ mutex_unlock(&tl->mutex);
i915_request_put(rq);
+ intel_timeline_put(tl);
return err;
}
@@ -41,24 +47,20 @@ static int context_sync(struct intel_context *ce)
mutex_lock(&tl->mutex);
do {
- struct i915_request *rq;
+ struct dma_fence *fence;
long timeout;
- rcu_read_lock();
- rq = rcu_dereference(tl->last_request.request);
- if (rq)
- rq = i915_request_get_rcu(rq);
- rcu_read_unlock();
- if (!rq)
+ fence = i915_active_fence_get(&tl->last_request);
+ if (!fence)
break;
- timeout = i915_request_wait(rq, 0, HZ / 10);
+ timeout = dma_fence_wait_timeout(fence, false, HZ / 10);
if (timeout < 0)
err = timeout;
else
- i915_request_retire_upto(rq);
+ i915_request_retire_upto(to_request(fence));
- i915_request_put(rq);
+ dma_fence_put(fence);
} while (!err);
mutex_unlock(&tl->mutex);
@@ -101,9 +103,6 @@ static int __live_context_size(struct intel_engine_cs *engine,
*
* TLDR; this overlaps with the execlists redzone.
*/
- if (HAS_EXECLISTS(engine->i915))
- vaddr += LRC_HEADER_PAGES * PAGE_SIZE;
-
vaddr += engine->context_size - I915_GTT_PAGE_SIZE;
memset(vaddr, POISON_INUSE, I915_GTT_PAGE_SIZE);
@@ -153,15 +152,11 @@ static int live_context_size(void *arg)
* HW tries to write past the end of one.
*/
- mutex_lock(&gt->i915->drm.struct_mutex);
-
fixme = kernel_context(gt->i915);
- if (IS_ERR(fixme)) {
- err = PTR_ERR(fixme);
- goto unlock;
- }
+ if (IS_ERR(fixme))
+ return PTR_ERR(fixme);
- for_each_engine(engine, gt->i915, id) {
+ for_each_engine(engine, gt, id) {
struct {
struct drm_i915_gem_object *state;
void *pinned;
@@ -199,8 +194,6 @@ static int live_context_size(void *arg)
}
kernel_context_close(fixme);
-unlock:
- mutex_unlock(&gt->i915->drm.struct_mutex);
return err;
}
@@ -303,26 +296,23 @@ static int live_active_context(void *arg)
if (IS_ERR(file))
return PTR_ERR(file);
- mutex_lock(&gt->i915->drm.struct_mutex);
-
fixme = live_context(gt->i915, file);
if (IS_ERR(fixme)) {
err = PTR_ERR(fixme);
- goto unlock;
+ goto out_file;
}
- for_each_engine(engine, gt->i915, id) {
+ for_each_engine(engine, gt, id) {
err = __live_active_context(engine, fixme);
if (err)
break;
- err = igt_flush_test(gt->i915, I915_WAIT_LOCKED);
+ err = igt_flush_test(gt->i915);
if (err)
break;
}
-unlock:
- mutex_unlock(&gt->i915->drm.struct_mutex);
+out_file:
mock_file_free(gt->i915, file);
return err;
}
@@ -416,26 +406,23 @@ static int live_remote_context(void *arg)
if (IS_ERR(file))
return PTR_ERR(file);
- mutex_lock(&gt->i915->drm.struct_mutex);
-
fixme = live_context(gt->i915, file);
if (IS_ERR(fixme)) {
err = PTR_ERR(fixme);
- goto unlock;
+ goto out_file;
}
- for_each_engine(engine, gt->i915, id) {
+ for_each_engine(engine, gt, id) {
err = __live_remote_context(engine, fixme);
if (err)
break;
- err = igt_flush_test(gt->i915, I915_WAIT_LOCKED);
+ err = igt_flush_test(gt->i915);
if (err)
break;
}
-unlock:
- mutex_unlock(&gt->i915->drm.struct_mutex);
+out_file:
mock_file_free(gt->i915, file);
return err;
}
diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
new file mode 100644
index 000000000000..e864406bd2d9
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
@@ -0,0 +1,350 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ */
+
+#include <linux/sort.h>
+
+#include "i915_drv.h"
+
+#include "intel_gt_requests.h"
+#include "i915_selftest.h"
+
+struct pulse {
+ struct i915_active active;
+ struct kref kref;
+};
+
+static int pulse_active(struct i915_active *active)
+{
+ kref_get(&container_of(active, struct pulse, active)->kref);
+ return 0;
+}
+
+static void pulse_free(struct kref *kref)
+{
+ kfree(container_of(kref, struct pulse, kref));
+}
+
+static void pulse_put(struct pulse *p)
+{
+ kref_put(&p->kref, pulse_free);
+}
+
+static void pulse_retire(struct i915_active *active)
+{
+ pulse_put(container_of(active, struct pulse, active));
+}
+
+static struct pulse *pulse_create(void)
+{
+ struct pulse *p;
+
+ p = kmalloc(sizeof(*p), GFP_KERNEL);
+ if (!p)
+ return p;
+
+ kref_init(&p->kref);
+ i915_active_init(&p->active, pulse_active, pulse_retire);
+
+ return p;
+}
+
+static void pulse_unlock_wait(struct pulse *p)
+{
+ mutex_lock(&p->active.mutex);
+ mutex_unlock(&p->active.mutex);
+ flush_work(&p->active.work);
+}
+
+static int __live_idle_pulse(struct intel_engine_cs *engine,
+ int (*fn)(struct intel_engine_cs *cs))
+{
+ struct pulse *p;
+ int err;
+
+ GEM_BUG_ON(!intel_engine_pm_is_awake(engine));
+
+ p = pulse_create();
+ if (!p)
+ return -ENOMEM;
+
+ err = i915_active_acquire(&p->active);
+ if (err)
+ goto out;
+
+ err = i915_active_acquire_preallocate_barrier(&p->active, engine);
+ if (err) {
+ i915_active_release(&p->active);
+ goto out;
+ }
+
+ i915_active_acquire_barrier(&p->active);
+ i915_active_release(&p->active);
+
+ GEM_BUG_ON(i915_active_is_idle(&p->active));
+ GEM_BUG_ON(llist_empty(&engine->barrier_tasks));
+
+ err = fn(engine);
+ if (err)
+ goto out;
+
+ GEM_BUG_ON(!llist_empty(&engine->barrier_tasks));
+
+ if (intel_gt_retire_requests_timeout(engine->gt, HZ / 5)) {
+ err = -ETIME;
+ goto out;
+ }
+
+ GEM_BUG_ON(READ_ONCE(engine->serial) != engine->wakeref_serial);
+
+ pulse_unlock_wait(p); /* synchronize with the retirement callback */
+
+ if (!i915_active_is_idle(&p->active)) {
+ struct drm_printer m = drm_err_printer("pulse");
+
+ pr_err("%s: heartbeat pulse did not flush idle tasks\n",
+ engine->name);
+ i915_active_print(&p->active, &m);
+
+ err = -EINVAL;
+ goto out;
+ }
+
+out:
+ pulse_put(p);
+ return err;
+}
+
+static int live_idle_flush(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err = 0;
+
+ /* Check that we can flush the idle barriers */
+
+ for_each_engine(engine, gt, id) {
+ intel_engine_pm_get(engine);
+ err = __live_idle_pulse(engine, intel_engine_flush_barriers);
+ intel_engine_pm_put(engine);
+ if (err)
+ break;
+ }
+
+ return err;
+}
+
+static int live_idle_pulse(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err = 0;
+
+ /* Check that heartbeat pulses flush the idle barriers */
+
+ for_each_engine(engine, gt, id) {
+ intel_engine_pm_get(engine);
+ err = __live_idle_pulse(engine, intel_engine_pulse);
+ intel_engine_pm_put(engine);
+ if (err && err != -ENODEV)
+ break;
+
+ err = 0;
+ }
+
+ return err;
+}
+
+static int cmp_u32(const void *_a, const void *_b)
+{
+ const u32 *a = _a, *b = _b;
+
+ return *a - *b;
+}
+
+static int __live_heartbeat_fast(struct intel_engine_cs *engine)
+{
+ struct intel_context *ce;
+ struct i915_request *rq;
+ ktime_t t0, t1;
+ u32 times[5];
+ int err;
+ int i;
+
+ ce = intel_context_create(engine->kernel_context->gem_context,
+ engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+
+ intel_engine_pm_get(engine);
+
+ err = intel_engine_set_heartbeat(engine, 1);
+ if (err)
+ goto err_pm;
+
+ for (i = 0; i < ARRAY_SIZE(times); i++) {
+ /* Manufacture a tick */
+ do {
+ while (READ_ONCE(engine->heartbeat.systole))
+ flush_delayed_work(&engine->heartbeat.work);
+
+ engine->serial++; /* quick, pretend we are not idle! */
+ flush_delayed_work(&engine->heartbeat.work);
+ if (!delayed_work_pending(&engine->heartbeat.work)) {
+ pr_err("%s: heartbeat did not start\n",
+ engine->name);
+ err = -EINVAL;
+ goto err_pm;
+ }
+
+ rcu_read_lock();
+ rq = READ_ONCE(engine->heartbeat.systole);
+ if (rq)
+ rq = i915_request_get_rcu(rq);
+ rcu_read_unlock();
+ } while (!rq);
+
+ t0 = ktime_get();
+ while (rq == READ_ONCE(engine->heartbeat.systole))
+ yield(); /* work is on the local cpu! */
+ t1 = ktime_get();
+
+ i915_request_put(rq);
+ times[i] = ktime_us_delta(t1, t0);
+ }
+
+ sort(times, ARRAY_SIZE(times), sizeof(times[0]), cmp_u32, NULL);
+
+ pr_info("%s: Heartbeat delay: %uus [%u, %u]\n",
+ engine->name,
+ times[ARRAY_SIZE(times) / 2],
+ times[0],
+ times[ARRAY_SIZE(times) - 1]);
+
+ /* Min work delay is 2 * 2 (worst), +1 for scheduling, +1 for slack */
+ if (times[ARRAY_SIZE(times) / 2] > jiffies_to_usecs(6)) {
+ pr_err("%s: Heartbeat delay was %uus, expected less than %dus\n",
+ engine->name,
+ times[ARRAY_SIZE(times) / 2],
+ jiffies_to_usecs(6));
+ err = -EINVAL;
+ }
+
+ intel_engine_set_heartbeat(engine, CONFIG_DRM_I915_HEARTBEAT_INTERVAL);
+err_pm:
+ intel_engine_pm_put(engine);
+ intel_context_put(ce);
+ return err;
+}
+
+static int live_heartbeat_fast(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err = 0;
+
+ /* Check that the heartbeat ticks at the desired rate. */
+ if (!CONFIG_DRM_I915_HEARTBEAT_INTERVAL)
+ return 0;
+
+ for_each_engine(engine, gt, id) {
+ err = __live_heartbeat_fast(engine);
+ if (err)
+ break;
+ }
+
+ return err;
+}
+
+static int __live_heartbeat_off(struct intel_engine_cs *engine)
+{
+ int err;
+
+ intel_engine_pm_get(engine);
+
+ engine->serial++;
+ flush_delayed_work(&engine->heartbeat.work);
+ if (!delayed_work_pending(&engine->heartbeat.work)) {
+ pr_err("%s: heartbeat not running\n",
+ engine->name);
+ err = -EINVAL;
+ goto err_pm;
+ }
+
+ err = intel_engine_set_heartbeat(engine, 0);
+ if (err)
+ goto err_pm;
+
+ engine->serial++;
+ flush_delayed_work(&engine->heartbeat.work);
+ if (delayed_work_pending(&engine->heartbeat.work)) {
+ pr_err("%s: heartbeat still running\n",
+ engine->name);
+ err = -EINVAL;
+ goto err_beat;
+ }
+
+ if (READ_ONCE(engine->heartbeat.systole)) {
+ pr_err("%s: heartbeat still allocated\n",
+ engine->name);
+ err = -EINVAL;
+ goto err_beat;
+ }
+
+err_beat:
+ intel_engine_set_heartbeat(engine, CONFIG_DRM_I915_HEARTBEAT_INTERVAL);
+err_pm:
+ intel_engine_pm_put(engine);
+ return err;
+}
+
+static int live_heartbeat_off(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err = 0;
+
+ /* Check that we can turn off heartbeat and not interrupt VIP */
+ if (!CONFIG_DRM_I915_HEARTBEAT_INTERVAL)
+ return 0;
+
+ for_each_engine(engine, gt, id) {
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ err = __live_heartbeat_off(engine);
+ if (err)
+ break;
+ }
+
+ return err;
+}
+
+int intel_heartbeat_live_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(live_idle_flush),
+ SUBTEST(live_idle_pulse),
+ SUBTEST(live_heartbeat_fast),
+ SUBTEST(live_heartbeat_off),
+ };
+ int saved_hangcheck;
+ int err;
+
+ if (intel_gt_is_wedged(&i915->gt))
+ return 0;
+
+ saved_hangcheck = i915_modparams.enable_hangcheck;
+ i915_modparams.enable_hangcheck = INT_MAX;
+
+ err = intel_gt_live_subtests(tests, &i915->gt);
+
+ i915_modparams.enable_hangcheck = saved_hangcheck;
+ return err;
+}
diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_pm.c b/drivers/gpu/drm/i915/gt/selftest_engine_pm.c
index 3a1419376912..20b9c83f43ad 100644
--- a/drivers/gpu/drm/i915/gt/selftest_engine_pm.c
+++ b/drivers/gpu/drm/i915/gt/selftest_engine_pm.c
@@ -25,7 +25,7 @@ static int live_engine_pm(void *arg)
}
GEM_BUG_ON(intel_gt_pm_is_awake(gt));
- for_each_engine(engine, gt->i915, id) {
+ for_each_engine(engine, gt, id) {
const typeof(*igt_atomic_phases) *p;
for (p = igt_atomic_phases; p->name; p++) {
diff --git a/drivers/gpu/drm/i915/gt/selftest_gt_pm.c b/drivers/gpu/drm/i915/gt/selftest_gt_pm.c
new file mode 100644
index 000000000000..d1752f15702a
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/selftest_gt_pm.c
@@ -0,0 +1,60 @@
+
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "selftest_llc.h"
+
+static int live_gt_resume(void *arg)
+{
+ struct intel_gt *gt = arg;
+ IGT_TIMEOUT(end_time);
+ int err;
+
+ /* Do several suspend/resume cycles to check we don't explode! */
+ do {
+ intel_gt_suspend_prepare(gt);
+ intel_gt_suspend_late(gt);
+
+ if (gt->rc6.enabled) {
+ pr_err("rc6 still enabled after suspend!\n");
+ intel_gt_set_wedged_on_init(gt);
+ err = -EINVAL;
+ break;
+ }
+
+ err = intel_gt_resume(gt);
+ if (err)
+ break;
+
+ if (gt->rc6.supported && !gt->rc6.enabled) {
+ pr_err("rc6 not enabled upon resume!\n");
+ intel_gt_set_wedged_on_init(gt);
+ err = -EINVAL;
+ break;
+ }
+
+ err = st_llc_verify(&gt->llc);
+ if (err) {
+ pr_err("llc state not restored upon resume!\n");
+ intel_gt_set_wedged_on_init(gt);
+ break;
+ }
+ } while (!__igt_timeout(end_time, NULL));
+
+ return err;
+}
+
+int intel_gt_pm_live_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(live_gt_resume),
+ };
+
+ if (intel_gt_is_wedged(&i915->gt))
+ return 0;
+
+ return intel_gt_live_subtests(tests, &i915->gt);
+}
diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
index a0098fc35921..85e9ccf5c304 100644
--- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
+++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
@@ -131,7 +131,7 @@ static struct i915_request *
hang_create_request(struct hang *h, struct intel_engine_cs *engine)
{
struct intel_gt *gt = h->gt;
- struct i915_address_space *vm = h->ctx->vm ?: &engine->gt->ggtt->vm;
+ struct i915_address_space *vm = i915_gem_context_get_vm_rcu(h->ctx);
struct drm_i915_gem_object *obj;
struct i915_request *rq = NULL;
struct i915_vma *hws, *vma;
@@ -141,12 +141,15 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
int err;
obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
- if (IS_ERR(obj))
+ if (IS_ERR(obj)) {
+ i915_vm_put(vm);
return ERR_CAST(obj);
+ }
vaddr = i915_gem_object_pin_map(obj, i915_coherent_map_type(gt->i915));
if (IS_ERR(vaddr)) {
i915_gem_object_put(obj);
+ i915_vm_put(vm);
return ERR_CAST(vaddr);
}
@@ -157,16 +160,22 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
h->batch = vaddr;
vma = i915_vma_instance(h->obj, vm, NULL);
- if (IS_ERR(vma))
+ if (IS_ERR(vma)) {
+ i915_vm_put(vm);
return ERR_CAST(vma);
+ }
hws = i915_vma_instance(h->hws, vm, NULL);
- if (IS_ERR(hws))
+ if (IS_ERR(hws)) {
+ i915_vm_put(vm);
return ERR_CAST(hws);
+ }
err = i915_vma_pin(vma, 0, 0, PIN_USER);
- if (err)
+ if (err) {
+ i915_vm_put(vm);
return ERR_PTR(err);
+ }
err = i915_vma_pin(hws, 0, 0, PIN_USER);
if (err)
@@ -264,6 +273,7 @@ unpin_hws:
i915_vma_unpin(hws);
unpin_vma:
i915_vma_unpin(vma);
+ i915_vm_put(vm);
return err ? ERR_PTR(err) : rq;
}
@@ -285,7 +295,7 @@ static void hang_fini(struct hang *h)
kernel_context_close(h->ctx);
- igt_flush_test(h->gt->i915, I915_WAIT_LOCKED);
+ igt_flush_test(h->gt->i915);
}
static bool wait_until_running(struct hang *h, struct i915_request *rq)
@@ -309,12 +319,11 @@ static int igt_hang_sanitycheck(void *arg)
/* Basic check that we can execute our hanging batch */
- mutex_lock(&gt->i915->drm.struct_mutex);
err = hang_init(&h, gt);
if (err)
- goto unlock;
+ return err;
- for_each_engine(engine, gt->i915, id) {
+ for_each_engine(engine, gt, id) {
struct intel_wedge_me w;
long timeout;
@@ -355,8 +364,6 @@ static int igt_hang_sanitycheck(void *arg)
fini:
hang_fini(&h);
-unlock:
- mutex_unlock(&gt->i915->drm.struct_mutex);
return err;
}
@@ -383,9 +390,7 @@ static int igt_reset_nop(void *arg)
if (IS_ERR(file))
return PTR_ERR(file);
- mutex_lock(&gt->i915->drm.struct_mutex);
ctx = live_context(gt->i915, file);
- mutex_unlock(&gt->i915->drm.struct_mutex);
if (IS_ERR(ctx)) {
err = PTR_ERR(ctx);
goto out;
@@ -395,9 +400,7 @@ static int igt_reset_nop(void *arg)
reset_count = i915_reset_count(global);
count = 0;
do {
- mutex_lock(&gt->i915->drm.struct_mutex);
-
- for_each_engine(engine, gt->i915, id) {
+ for_each_engine(engine, gt, id) {
int i;
for (i = 0; i < 16; i++) {
@@ -417,7 +420,6 @@ static int igt_reset_nop(void *arg)
intel_gt_reset(gt, ALL_ENGINES, NULL);
igt_global_reset_unlock(gt);
- mutex_unlock(&gt->i915->drm.struct_mutex);
if (intel_gt_is_wedged(gt)) {
err = -EIO;
break;
@@ -429,16 +431,13 @@ static int igt_reset_nop(void *arg)
break;
}
- err = igt_flush_test(gt->i915, 0);
+ err = igt_flush_test(gt->i915);
if (err)
break;
} while (time_before(jiffies, end_time));
pr_info("%s: %d resets\n", __func__, count);
- mutex_lock(&gt->i915->drm.struct_mutex);
- err = igt_flush_test(gt->i915, I915_WAIT_LOCKED);
- mutex_unlock(&gt->i915->drm.struct_mutex);
-
+ err = igt_flush_test(gt->i915);
out:
mock_file_free(gt->i915, file);
if (intel_gt_is_wedged(gt))
@@ -458,23 +457,21 @@ static int igt_reset_nop_engine(void *arg)
/* Check that we can engine-reset during non-user portions */
- if (!intel_has_reset_engine(gt->i915))
+ if (!intel_has_reset_engine(gt))
return 0;
file = mock_file(gt->i915);
if (IS_ERR(file))
return PTR_ERR(file);
- mutex_lock(&gt->i915->drm.struct_mutex);
ctx = live_context(gt->i915, file);
- mutex_unlock(&gt->i915->drm.struct_mutex);
if (IS_ERR(ctx)) {
err = PTR_ERR(ctx);
goto out;
}
i915_gem_context_clear_bannable(ctx);
- for_each_engine(engine, gt->i915, id) {
+ for_each_engine(engine, gt, id) {
unsigned int reset_count, reset_engine_count;
unsigned int count;
IGT_TIMEOUT(end_time);
@@ -494,7 +491,6 @@ static int igt_reset_nop_engine(void *arg)
break;
}
- mutex_lock(&gt->i915->drm.struct_mutex);
for (i = 0; i < 16; i++) {
struct i915_request *rq;
@@ -507,7 +503,6 @@ static int igt_reset_nop_engine(void *arg)
i915_request_add(rq);
}
err = intel_engine_reset(engine, NULL);
- mutex_unlock(&gt->i915->drm.struct_mutex);
if (err) {
pr_err("i915_reset_engine failed\n");
break;
@@ -533,15 +528,12 @@ static int igt_reset_nop_engine(void *arg)
if (err)
break;
- err = igt_flush_test(gt->i915, 0);
+ err = igt_flush_test(gt->i915);
if (err)
break;
}
- mutex_lock(&gt->i915->drm.struct_mutex);
- err = igt_flush_test(gt->i915, I915_WAIT_LOCKED);
- mutex_unlock(&gt->i915->drm.struct_mutex);
-
+ err = igt_flush_test(gt->i915);
out:
mock_file_free(gt->i915, file);
if (intel_gt_is_wedged(gt))
@@ -559,18 +551,16 @@ static int __igt_reset_engine(struct intel_gt *gt, bool active)
/* Check that we can issue an engine reset on an idle engine (no-op) */
- if (!intel_has_reset_engine(gt->i915))
+ if (!intel_has_reset_engine(gt))
return 0;
if (active) {
- mutex_lock(&gt->i915->drm.struct_mutex);
err = hang_init(&h, gt);
- mutex_unlock(&gt->i915->drm.struct_mutex);
if (err)
return err;
}
- for_each_engine(engine, gt->i915, id) {
+ for_each_engine(engine, gt, id) {
unsigned int reset_count, reset_engine_count;
IGT_TIMEOUT(end_time);
@@ -593,17 +583,14 @@ static int __igt_reset_engine(struct intel_gt *gt, bool active)
if (active) {
struct i915_request *rq;
- mutex_lock(&gt->i915->drm.struct_mutex);
rq = hang_create_request(&h, engine);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
- mutex_unlock(&gt->i915->drm.struct_mutex);
break;
}
i915_request_get(rq);
i915_request_add(rq);
- mutex_unlock(&gt->i915->drm.struct_mutex);
if (!wait_until_running(&h, rq)) {
struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
@@ -647,7 +634,7 @@ static int __igt_reset_engine(struct intel_gt *gt, bool active)
if (err)
break;
- err = igt_flush_test(gt->i915, 0);
+ err = igt_flush_test(gt->i915);
if (err)
break;
}
@@ -655,11 +642,8 @@ static int __igt_reset_engine(struct intel_gt *gt, bool active)
if (intel_gt_is_wedged(gt))
err = -EIO;
- if (active) {
- mutex_lock(&gt->i915->drm.struct_mutex);
+ if (active)
hang_fini(&h);
- mutex_unlock(&gt->i915->drm.struct_mutex);
- }
return err;
}
@@ -725,9 +709,7 @@ static int active_engine(void *data)
return PTR_ERR(file);
for (count = 0; count < ARRAY_SIZE(ctx); count++) {
- mutex_lock(&engine->i915->drm.struct_mutex);
ctx[count] = live_context(engine->i915, file);
- mutex_unlock(&engine->i915->drm.struct_mutex);
if (IS_ERR(ctx[count])) {
err = PTR_ERR(ctx[count]);
while (--count)
@@ -741,10 +723,8 @@ static int active_engine(void *data)
struct i915_request *old = rq[idx];
struct i915_request *new;
- mutex_lock(&engine->i915->drm.struct_mutex);
new = igt_request_alloc(ctx[idx], engine);
if (IS_ERR(new)) {
- mutex_unlock(&engine->i915->drm.struct_mutex);
err = PTR_ERR(new);
break;
}
@@ -755,7 +735,6 @@ static int active_engine(void *data)
rq[idx] = i915_request_get(new);
i915_request_add(new);
- mutex_unlock(&engine->i915->drm.struct_mutex);
err = active_request_put(old);
if (err)
@@ -791,13 +770,11 @@ static int __igt_reset_engines(struct intel_gt *gt,
* with any other engine.
*/
- if (!intel_has_reset_engine(gt->i915))
+ if (!intel_has_reset_engine(gt))
return 0;
if (flags & TEST_ACTIVE) {
- mutex_lock(&gt->i915->drm.struct_mutex);
err = hang_init(&h, gt);
- mutex_unlock(&gt->i915->drm.struct_mutex);
if (err)
return err;
@@ -805,7 +782,7 @@ static int __igt_reset_engines(struct intel_gt *gt,
h.ctx->sched.priority = 1024;
}
- for_each_engine(engine, gt->i915, id) {
+ for_each_engine(engine, gt, id) {
struct active_engine threads[I915_NUM_ENGINES] = {};
unsigned long device = i915_reset_count(global);
unsigned long count = 0, reported;
@@ -823,7 +800,7 @@ static int __igt_reset_engines(struct intel_gt *gt,
}
memset(threads, 0, sizeof(threads));
- for_each_engine(other, gt->i915, tmp) {
+ for_each_engine(other, gt, tmp) {
struct task_struct *tsk;
threads[tmp].resets =
@@ -849,23 +826,22 @@ static int __igt_reset_engines(struct intel_gt *gt,
get_task_struct(tsk);
}
+ yield(); /* start all threads before we begin */
+
intel_engine_pm_get(engine);
set_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
do {
struct i915_request *rq = NULL;
if (flags & TEST_ACTIVE) {
- mutex_lock(&gt->i915->drm.struct_mutex);
rq = hang_create_request(&h, engine);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
- mutex_unlock(&gt->i915->drm.struct_mutex);
break;
}
i915_request_get(rq);
i915_request_add(rq);
- mutex_unlock(&gt->i915->drm.struct_mutex);
if (!wait_until_running(&h, rq)) {
struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
@@ -940,7 +916,7 @@ static int __igt_reset_engines(struct intel_gt *gt,
}
unwind:
- for_each_engine(other, gt->i915, tmp) {
+ for_each_engine(other, gt, tmp) {
int ret;
if (!threads[tmp].task)
@@ -977,9 +953,7 @@ unwind:
if (err)
break;
- mutex_lock(&gt->i915->drm.struct_mutex);
- err = igt_flush_test(gt->i915, I915_WAIT_LOCKED);
- mutex_unlock(&gt->i915->drm.struct_mutex);
+ err = igt_flush_test(gt->i915);
if (err)
break;
}
@@ -987,11 +961,8 @@ unwind:
if (intel_gt_is_wedged(gt))
err = -EIO;
- if (flags & TEST_ACTIVE) {
- mutex_lock(&gt->i915->drm.struct_mutex);
+ if (flags & TEST_ACTIVE)
hang_fini(&h);
- mutex_unlock(&gt->i915->drm.struct_mutex);
- }
return err;
}
@@ -1047,7 +1018,7 @@ static int igt_reset_wait(void *arg)
{
struct intel_gt *gt = arg;
struct i915_gpu_error *global = &gt->i915->gpu_error;
- struct intel_engine_cs *engine = gt->i915->engine[RCS0];
+ struct intel_engine_cs *engine = gt->engine[RCS0];
struct i915_request *rq;
unsigned int reset_count;
struct hang h;
@@ -1061,7 +1032,6 @@ static int igt_reset_wait(void *arg)
igt_global_reset_lock(gt);
- mutex_lock(&gt->i915->drm.struct_mutex);
err = hang_init(&h, gt);
if (err)
goto unlock;
@@ -1109,7 +1079,6 @@ out_rq:
fini:
hang_fini(&h);
unlock:
- mutex_unlock(&gt->i915->drm.struct_mutex);
igt_global_reset_unlock(gt);
if (intel_gt_is_wedged(gt))
@@ -1127,15 +1096,14 @@ static int evict_vma(void *data)
{
struct evict_vma *arg = data;
struct i915_address_space *vm = arg->vma->vm;
- struct drm_i915_private *i915 = vm->i915;
struct drm_mm_node evict = arg->vma->node;
int err;
complete(&arg->completion);
- mutex_lock(&i915->drm.struct_mutex);
+ mutex_lock(&vm->mutex);
err = i915_gem_evict_for_node(vm, &evict, 0);
- mutex_unlock(&i915->drm.struct_mutex);
+ mutex_unlock(&vm->mutex);
return err;
}
@@ -1143,39 +1111,33 @@ static int evict_vma(void *data)
static int evict_fence(void *data)
{
struct evict_vma *arg = data;
- struct drm_i915_private *i915 = arg->vma->vm->i915;
int err;
complete(&arg->completion);
- mutex_lock(&i915->drm.struct_mutex);
-
/* Mark the fence register as dirty to force the mmio update. */
err = i915_gem_object_set_tiling(arg->vma->obj, I915_TILING_Y, 512);
if (err) {
pr_err("Invalid Y-tiling settings; err:%d\n", err);
- goto out_unlock;
+ return err;
}
err = i915_vma_pin(arg->vma, 0, 0, PIN_GLOBAL | PIN_MAPPABLE);
if (err) {
pr_err("Unable to pin vma for Y-tiled fence; err:%d\n", err);
- goto out_unlock;
+ return err;
}
err = i915_vma_pin_fence(arg->vma);
i915_vma_unpin(arg->vma);
if (err) {
pr_err("Unable to pin Y-tiled fence; err:%d\n", err);
- goto out_unlock;
+ return err;
}
i915_vma_unpin_fence(arg->vma);
-out_unlock:
- mutex_unlock(&i915->drm.struct_mutex);
-
- return err;
+ return 0;
}
static int __igt_reset_evict_vma(struct intel_gt *gt,
@@ -1183,23 +1145,26 @@ static int __igt_reset_evict_vma(struct intel_gt *gt,
int (*fn)(void *),
unsigned int flags)
{
- struct intel_engine_cs *engine = gt->i915->engine[RCS0];
+ struct intel_engine_cs *engine = gt->engine[RCS0];
struct drm_i915_gem_object *obj;
struct task_struct *tsk = NULL;
struct i915_request *rq;
struct evict_vma arg;
struct hang h;
+ unsigned int pin_flags;
int err;
+ if (!gt->ggtt->num_fences && flags & EXEC_OBJECT_NEEDS_FENCE)
+ return 0;
+
if (!engine || !intel_engine_can_store_dword(engine))
return 0;
/* Check that we can recover an unbind stuck on a hanging request */
- mutex_lock(&gt->i915->drm.struct_mutex);
err = hang_init(&h, gt);
if (err)
- goto unlock;
+ return err;
obj = i915_gem_object_create_internal(gt->i915, SZ_1M);
if (IS_ERR(obj)) {
@@ -1227,10 +1192,12 @@ static int __igt_reset_evict_vma(struct intel_gt *gt,
goto out_obj;
}
- err = i915_vma_pin(arg.vma, 0, 0,
- i915_vma_is_ggtt(arg.vma) ?
- PIN_GLOBAL | PIN_MAPPABLE :
- PIN_USER);
+ pin_flags = i915_vma_is_ggtt(arg.vma) ? PIN_GLOBAL : PIN_USER;
+
+ if (flags & EXEC_OBJECT_NEEDS_FENCE)
+ pin_flags |= PIN_MAPPABLE;
+
+ err = i915_vma_pin(arg.vma, 0, 0, pin_flags);
if (err) {
i915_request_add(rq);
goto out_obj;
@@ -1262,8 +1229,6 @@ static int __igt_reset_evict_vma(struct intel_gt *gt,
if (err)
goto out_rq;
- mutex_unlock(&gt->i915->drm.struct_mutex);
-
if (!wait_until_running(&h, rq)) {
struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
@@ -1312,16 +1277,12 @@ out_reset:
put_task_struct(tsk);
}
- mutex_lock(&gt->i915->drm.struct_mutex);
out_rq:
i915_request_put(rq);
out_obj:
i915_gem_object_put(obj);
fini:
hang_fini(&h);
-unlock:
- mutex_unlock(&gt->i915->drm.struct_mutex);
-
if (intel_gt_is_wedged(gt))
return -EIO;
@@ -1340,6 +1301,7 @@ static int igt_reset_evict_ppgtt(void *arg)
{
struct intel_gt *gt = arg;
struct i915_gem_context *ctx;
+ struct i915_address_space *vm;
struct drm_file *file;
int err;
@@ -1347,18 +1309,20 @@ static int igt_reset_evict_ppgtt(void *arg)
if (IS_ERR(file))
return PTR_ERR(file);
- mutex_lock(&gt->i915->drm.struct_mutex);
ctx = live_context(gt->i915, file);
- mutex_unlock(&gt->i915->drm.struct_mutex);
if (IS_ERR(ctx)) {
err = PTR_ERR(ctx);
goto out;
}
err = 0;
- if (ctx->vm) /* aliasing == global gtt locking, covered above */
- err = __igt_reset_evict_vma(gt, ctx->vm,
+ vm = i915_gem_context_get_vm_rcu(ctx);
+ if (!i915_is_ggtt(vm)) {
+ /* aliasing == global gtt locking, covered above */
+ err = __igt_reset_evict_vma(gt, vm,
evict_vma, EXEC_OBJECT_WRITE);
+ }
+ i915_vm_put(vm);
out:
mock_file_free(gt->i915, file);
@@ -1379,7 +1343,7 @@ static int wait_for_others(struct intel_gt *gt,
struct intel_engine_cs *engine;
enum intel_engine_id id;
- for_each_engine(engine, gt->i915, id) {
+ for_each_engine(engine, gt, id) {
if (engine == exclude)
continue;
@@ -1403,12 +1367,11 @@ static int igt_reset_queue(void *arg)
igt_global_reset_lock(gt);
- mutex_lock(&gt->i915->drm.struct_mutex);
err = hang_init(&h, gt);
if (err)
goto unlock;
- for_each_engine(engine, gt->i915, id) {
+ for_each_engine(engine, gt, id) {
struct i915_request *prev;
IGT_TIMEOUT(end_time);
unsigned int count;
@@ -1518,7 +1481,7 @@ static int igt_reset_queue(void *arg)
i915_request_put(prev);
- err = igt_flush_test(gt->i915, I915_WAIT_LOCKED);
+ err = igt_flush_test(gt->i915);
if (err)
break;
}
@@ -1526,7 +1489,6 @@ static int igt_reset_queue(void *arg)
fini:
hang_fini(&h);
unlock:
- mutex_unlock(&gt->i915->drm.struct_mutex);
igt_global_reset_unlock(gt);
if (intel_gt_is_wedged(gt))
@@ -1539,7 +1501,7 @@ static int igt_handle_error(void *arg)
{
struct intel_gt *gt = arg;
struct i915_gpu_error *global = &gt->i915->gpu_error;
- struct intel_engine_cs *engine = gt->i915->engine[RCS0];
+ struct intel_engine_cs *engine = gt->engine[RCS0];
struct hang h;
struct i915_request *rq;
struct i915_gpu_state *error;
@@ -1547,17 +1509,15 @@ static int igt_handle_error(void *arg)
/* Check that we can issue a global GPU and engine reset */
- if (!intel_has_reset_engine(gt->i915))
+ if (!intel_has_reset_engine(gt))
return 0;
if (!engine || !intel_engine_can_store_dword(engine))
return 0;
- mutex_lock(&gt->i915->drm.struct_mutex);
-
err = hang_init(&h, gt);
if (err)
- goto err_unlock;
+ return err;
rq = hang_create_request(&h, engine);
if (IS_ERR(rq)) {
@@ -1581,8 +1541,6 @@ static int igt_handle_error(void *arg)
goto err_request;
}
- mutex_unlock(&gt->i915->drm.struct_mutex);
-
/* Temporarily disable error capture */
error = xchg(&global->first_error, (void *)-1);
@@ -1590,8 +1548,6 @@ static int igt_handle_error(void *arg)
xchg(&global->first_error, error);
- mutex_lock(&gt->i915->drm.struct_mutex);
-
if (rq->fence.error != -EIO) {
pr_err("Guilty request not identified!\n");
err = -EINVAL;
@@ -1602,8 +1558,6 @@ err_request:
i915_request_put(rq);
err_fini:
hang_fini(&h);
-err_unlock:
- mutex_unlock(&gt->i915->drm.struct_mutex);
return err;
}
@@ -1617,7 +1571,7 @@ static int __igt_atomic_reset_engine(struct intel_engine_cs *engine,
GEM_TRACE("i915_reset_engine(%s:%s) under %s\n",
engine->name, mode, p->name);
- tasklet_disable_nosync(t);
+ tasklet_disable(t);
p->critical_section_begin();
err = intel_engine_reset(engine, NULL);
@@ -1689,14 +1643,13 @@ static int igt_reset_engines_atomic(void *arg)
/* Check that the engines resets are usable from atomic context */
- if (!intel_has_reset_engine(gt->i915))
+ if (!intel_has_reset_engine(gt))
return 0;
if (USES_GUC_SUBMISSION(gt->i915))
return 0;
igt_global_reset_lock(gt);
- mutex_lock(&gt->i915->drm.struct_mutex);
/* Flush any requests before we get started and check basics */
if (!igt_force_reset(gt))
@@ -1706,7 +1659,7 @@ static int igt_reset_engines_atomic(void *arg)
struct intel_engine_cs *engine;
enum intel_engine_id id;
- for_each_engine(engine, gt->i915, id) {
+ for_each_engine(engine, gt, id) {
err = igt_atomic_reset_engine(engine, p);
if (err)
goto out;
@@ -1716,9 +1669,7 @@ static int igt_reset_engines_atomic(void *arg)
out:
/* As we poke around the guts, do a full reset before continuing. */
igt_force_reset(gt);
-
unlock:
- mutex_unlock(&gt->i915->drm.struct_mutex);
igt_global_reset_unlock(gt);
return err;
@@ -1743,27 +1694,19 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915)
};
struct intel_gt *gt = &i915->gt;
intel_wakeref_t wakeref;
- bool saved_hangcheck;
int err;
- if (!intel_has_gpu_reset(gt->i915))
+ if (!intel_has_gpu_reset(gt))
return 0;
if (intel_gt_is_wedged(gt))
return -EIO; /* we're long past hope of a successful reset */
- wakeref = intel_runtime_pm_get(&gt->i915->runtime_pm);
- saved_hangcheck = fetch_and_zero(&i915_modparams.enable_hangcheck);
- drain_delayed_work(&gt->hangcheck.work); /* flush param */
+ wakeref = intel_runtime_pm_get(gt->uncore->rpm);
err = intel_gt_live_subtests(tests, gt);
- mutex_lock(&gt->i915->drm.struct_mutex);
- igt_flush_test(gt->i915, I915_WAIT_LOCKED);
- mutex_unlock(&gt->i915->drm.struct_mutex);
-
- i915_modparams.enable_hangcheck = saved_hangcheck;
- intel_runtime_pm_put(&gt->i915->runtime_pm, wakeref);
+ intel_runtime_pm_put(gt->uncore->rpm, wakeref);
return err;
}
diff --git a/drivers/gpu/drm/i915/gt/selftest_llc.c b/drivers/gpu/drm/i915/gt/selftest_llc.c
new file mode 100644
index 000000000000..fd3770e48ac7
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/selftest_llc.c
@@ -0,0 +1,80 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "intel_pm.h" /* intel_gpu_freq() */
+#include "selftest_llc.h"
+#include "intel_rps.h"
+
+static int gen6_verify_ring_freq(struct intel_llc *llc)
+{
+ struct drm_i915_private *i915 = llc_to_gt(llc)->i915;
+ struct ia_constants consts;
+ intel_wakeref_t wakeref;
+ unsigned int gpu_freq;
+ int err = 0;
+
+ wakeref = intel_runtime_pm_get(llc_to_gt(llc)->uncore->rpm);
+
+ if (!get_ia_constants(llc, &consts)) {
+ err = -ENODEV;
+ goto out_rpm;
+ }
+
+ for (gpu_freq = consts.min_gpu_freq;
+ gpu_freq <= consts.max_gpu_freq;
+ gpu_freq++) {
+ struct intel_rps *rps = &llc_to_gt(llc)->rps;
+
+ unsigned int ia_freq, ring_freq, found;
+ u32 val;
+
+ calc_ia_freq(llc, gpu_freq, &consts, &ia_freq, &ring_freq);
+
+ val = gpu_freq;
+ if (sandybridge_pcode_read(i915,
+ GEN6_PCODE_READ_MIN_FREQ_TABLE,
+ &val, NULL)) {
+ pr_err("Failed to read freq table[%d], range [%d, %d]\n",
+ gpu_freq, consts.min_gpu_freq, consts.max_gpu_freq);
+ err = -ENXIO;
+ break;
+ }
+
+ found = (val >> 0) & 0xff;
+ if (found != ia_freq) {
+ pr_err("Min freq table(%d/[%d, %d]):%dMHz did not match expected CPU freq, found %d, expected %d\n",
+ gpu_freq, consts.min_gpu_freq, consts.max_gpu_freq,
+ intel_gpu_freq(rps, gpu_freq * (INTEL_GEN(i915) >= 9 ? GEN9_FREQ_SCALER : 1)),
+ found, ia_freq);
+ err = -EINVAL;
+ break;
+ }
+
+ found = (val >> 8) & 0xff;
+ if (found != ring_freq) {
+ pr_err("Min freq table(%d/[%d, %d]):%dMHz did not match expected ring freq, found %d, expected %d\n",
+ gpu_freq, consts.min_gpu_freq, consts.max_gpu_freq,
+ intel_gpu_freq(rps, gpu_freq * (INTEL_GEN(i915) >= 9 ? GEN9_FREQ_SCALER : 1)),
+ found, ring_freq);
+ err = -EINVAL;
+ break;
+ }
+ }
+
+out_rpm:
+ intel_runtime_pm_put(llc_to_gt(llc)->uncore->rpm, wakeref);
+ return err;
+}
+
+int st_llc_verify(struct intel_llc *llc)
+{
+ int err = 0;
+
+ if (HAS_LLC(llc_to_gt(llc)->i915))
+ err = gen6_verify_ring_freq(llc);
+
+ return err;
+}
diff --git a/drivers/gpu/drm/i915/gt/selftest_llc.h b/drivers/gpu/drm/i915/gt/selftest_llc.h
new file mode 100644
index 000000000000..873f896e72f2
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/selftest_llc.h
@@ -0,0 +1,14 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef SELFTEST_LLC_H
+#define SELFTEST_LLC_H
+
+struct intel_llc;
+
+int st_llc_verify(struct intel_llc *llc);
+
+#endif /* SELFTEST_LLC_H */
diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c
index d791158988d6..eb71ac2f992c 100644
--- a/drivers/gpu/drm/i915/gt/selftest_lrc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c
@@ -7,6 +7,7 @@
#include <linux/prime_numbers.h>
#include "gem/i915_gem_pm.h"
+#include "gt/intel_engine_heartbeat.h"
#include "gt/intel_reset.h"
#include "i915_selftest.h"
@@ -19,26 +20,52 @@
#include "gem/selftests/igt_gem_utils.h"
#include "gem/selftests/mock_context.h"
+#define CS_GPR(engine, n) ((engine)->mmio_base + 0x600 + (n) * 4)
+#define NUM_GPR_DW (16 * 2) /* each GPR is 2 dwords */
+
+static struct i915_vma *create_scratch(struct intel_gt *gt)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ int err;
+
+ obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ i915_gem_object_set_cache_coherency(obj, I915_CACHING_CACHED);
+
+ vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
+ if (IS_ERR(vma)) {
+ i915_gem_object_put(obj);
+ return vma;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
+ if (err) {
+ i915_gem_object_put(obj);
+ return ERR_PTR(err);
+ }
+
+ return vma;
+}
+
static int live_sanitycheck(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
struct i915_gem_engines_iter it;
struct i915_gem_context *ctx;
struct intel_context *ce;
struct igt_spinner spin;
- intel_wakeref_t wakeref;
int err = -ENOMEM;
- if (!HAS_LOGICAL_RING_CONTEXTS(i915))
+ if (!HAS_LOGICAL_RING_CONTEXTS(gt->i915))
return 0;
- mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
-
- if (igt_spinner_init(&spin, &i915->gt))
- goto err_unlock;
+ if (igt_spinner_init(&spin, gt))
+ return -ENOMEM;
- ctx = kernel_context(i915);
+ ctx = kernel_context(gt->i915);
if (!ctx)
goto err_spin;
@@ -55,13 +82,13 @@ static int live_sanitycheck(void *arg)
if (!igt_wait_for_spinner(&spin, rq)) {
GEM_TRACE("spinner failed to start\n");
GEM_TRACE_DUMP();
- intel_gt_set_wedged(&i915->gt);
+ intel_gt_set_wedged(gt);
err = -EIO;
goto err_ctx;
}
igt_spinner_end(&spin);
- if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
+ if (igt_flush_test(gt->i915)) {
err = -EIO;
goto err_ctx;
}
@@ -73,12 +100,175 @@ err_ctx:
kernel_context_close(ctx);
err_spin:
igt_spinner_fini(&spin);
-err_unlock:
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
return err;
}
+static int live_unlite_restore(struct intel_gt *gt, int prio)
+{
+ struct intel_engine_cs *engine;
+ struct i915_gem_context *ctx;
+ enum intel_engine_id id;
+ struct igt_spinner spin;
+ int err = -ENOMEM;
+
+ /*
+ * Check that we can correctly context switch between 2 instances
+ * on the same engine from the same parent context.
+ */
+
+ if (igt_spinner_init(&spin, gt))
+ return err;
+
+ ctx = kernel_context(gt->i915);
+ if (!ctx)
+ goto err_spin;
+
+ err = 0;
+ for_each_engine(engine, gt, id) {
+ struct intel_context *ce[2] = {};
+ struct i915_request *rq[2];
+ struct igt_live_test t;
+ int n;
+
+ if (prio && !intel_engine_has_preemption(engine))
+ continue;
+
+ if (!intel_engine_can_store_dword(engine))
+ continue;
+
+ if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
+ err = -EIO;
+ break;
+ }
+
+ for (n = 0; n < ARRAY_SIZE(ce); n++) {
+ struct intel_context *tmp;
+
+ tmp = intel_context_create(ctx, engine);
+ if (IS_ERR(tmp)) {
+ err = PTR_ERR(tmp);
+ goto err_ce;
+ }
+
+ err = intel_context_pin(tmp);
+ if (err) {
+ intel_context_put(tmp);
+ goto err_ce;
+ }
+
+ /*
+ * Setup the pair of contexts such that if we
+ * lite-restore using the RING_TAIL from ce[1] it
+ * will execute garbage from ce[0]->ring.
+ */
+ memset(tmp->ring->vaddr,
+ POISON_INUSE, /* IPEHR: 0x5a5a5a5a [hung!] */
+ tmp->ring->vma->size);
+
+ ce[n] = tmp;
+ }
+ GEM_BUG_ON(!ce[1]->ring->size);
+ intel_ring_reset(ce[1]->ring, ce[1]->ring->size / 2);
+ __execlists_update_reg_state(ce[1], engine);
+
+ rq[0] = igt_spinner_create_request(&spin, ce[0], MI_ARB_CHECK);
+ if (IS_ERR(rq[0])) {
+ err = PTR_ERR(rq[0]);
+ goto err_ce;
+ }
+
+ i915_request_get(rq[0]);
+ i915_request_add(rq[0]);
+ GEM_BUG_ON(rq[0]->postfix > ce[1]->ring->emit);
+
+ if (!igt_wait_for_spinner(&spin, rq[0])) {
+ i915_request_put(rq[0]);
+ goto err_ce;
+ }
+
+ rq[1] = i915_request_create(ce[1]);
+ if (IS_ERR(rq[1])) {
+ err = PTR_ERR(rq[1]);
+ i915_request_put(rq[0]);
+ goto err_ce;
+ }
+
+ if (!prio) {
+ /*
+ * Ensure we do the switch to ce[1] on completion.
+ *
+ * rq[0] is already submitted, so this should reduce
+ * to a no-op (a wait on a request on the same engine
+ * uses the submit fence, not the completion fence),
+ * but it will install a dependency on rq[1] for rq[0]
+ * that will prevent the pair being reordered by
+ * timeslicing.
+ */
+ i915_request_await_dma_fence(rq[1], &rq[0]->fence);
+ }
+
+ i915_request_get(rq[1]);
+ i915_request_add(rq[1]);
+ GEM_BUG_ON(rq[1]->postfix <= rq[0]->postfix);
+ i915_request_put(rq[0]);
+
+ if (prio) {
+ struct i915_sched_attr attr = {
+ .priority = prio,
+ };
+
+ /* Alternatively preempt the spinner with ce[1] */
+ engine->schedule(rq[1], &attr);
+ }
+
+ /* And switch back to ce[0] for good measure */
+ rq[0] = i915_request_create(ce[0]);
+ if (IS_ERR(rq[0])) {
+ err = PTR_ERR(rq[0]);
+ i915_request_put(rq[1]);
+ goto err_ce;
+ }
+
+ i915_request_await_dma_fence(rq[0], &rq[1]->fence);
+ i915_request_get(rq[0]);
+ i915_request_add(rq[0]);
+ GEM_BUG_ON(rq[0]->postfix > rq[1]->postfix);
+ i915_request_put(rq[1]);
+ i915_request_put(rq[0]);
+
+err_ce:
+ tasklet_kill(&engine->execlists.tasklet); /* flush submission */
+ igt_spinner_end(&spin);
+ for (n = 0; n < ARRAY_SIZE(ce); n++) {
+ if (IS_ERR_OR_NULL(ce[n]))
+ break;
+
+ intel_context_unpin(ce[n]);
+ intel_context_put(ce[n]);
+ }
+
+ if (igt_live_test_end(&t))
+ err = -EIO;
+ if (err)
+ break;
+ }
+
+ kernel_context_close(ctx);
+err_spin:
+ igt_spinner_fini(&spin);
+ return err;
+}
+
+static int live_unlite_switch(void *arg)
+{
+ return live_unlite_restore(arg, 0);
+}
+
+static int live_unlite_preempt(void *arg)
+{
+ return live_unlite_restore(arg, I915_USER_PRIORITY(I915_PRIORITY_MAX));
+}
+
static int
emit_semaphore_chain(struct i915_request *rq, struct i915_vma *vma, int idx)
{
@@ -131,7 +321,13 @@ semaphore_queue(struct intel_engine_cs *engine, struct i915_vma *vma, int idx)
if (IS_ERR(rq))
goto out_ctx;
- err = emit_semaphore_chain(rq, vma, idx);
+ err = 0;
+ if (rq->engine->emit_init_breadcrumb)
+ err = rq->engine->emit_init_breadcrumb(rq);
+ if (err == 0)
+ err = emit_semaphore_chain(rq, vma, idx);
+ if (err == 0)
+ i915_request_get(rq);
i915_request_add(rq);
if (err)
rq = ERR_PTR(err);
@@ -144,10 +340,10 @@ out_ctx:
static int
release_queue(struct intel_engine_cs *engine,
struct i915_vma *vma,
- int idx)
+ int idx, int prio)
{
struct i915_sched_attr attr = {
- .priority = I915_USER_PRIORITY(I915_PRIORITY_MAX),
+ .priority = prio,
};
struct i915_request *rq;
u32 *cs;
@@ -168,9 +364,15 @@ release_queue(struct intel_engine_cs *engine,
*cs++ = 1;
intel_ring_advance(rq, cs);
+
+ i915_request_get(rq);
i915_request_add(rq);
+ local_bh_disable();
engine->schedule(rq, &attr);
+ local_bh_enable(); /* kick tasklet */
+
+ i915_request_put(rq);
return 0;
}
@@ -189,8 +391,7 @@ slice_semaphore_queue(struct intel_engine_cs *outer,
if (IS_ERR(head))
return PTR_ERR(head);
- i915_request_get(head);
- for_each_engine(engine, outer->i915, id) {
+ for_each_engine(engine, outer->gt, id) {
for (i = 0; i < count; i++) {
struct i915_request *rq;
@@ -199,15 +400,16 @@ slice_semaphore_queue(struct intel_engine_cs *outer,
err = PTR_ERR(rq);
goto out;
}
+
+ i915_request_put(rq);
}
}
- err = release_queue(outer, vma, n);
+ err = release_queue(outer, vma, n, INT_MAX);
if (err)
goto out;
- if (i915_request_wait(head,
- I915_WAIT_LOCKED,
+ if (i915_request_wait(head, 0,
2 * RUNTIME_INFO(outer->i915)->num_engines * (count + 2) * (count + 3)) < 0) {
pr_err("Failed to slice along semaphore chain of length (%d, %d)!\n",
count, n);
@@ -223,9 +425,8 @@ out:
static int live_timeslice_preempt(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
struct drm_i915_gem_object *obj;
- intel_wakeref_t wakeref;
struct i915_vma *vma;
void *vaddr;
int err = 0;
@@ -239,17 +440,14 @@ static int live_timeslice_preempt(void *arg)
* need to preempt the current task and replace it with another
* ready task.
*/
+ if (!IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION))
+ return 0;
- mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
-
- obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
- if (IS_ERR(obj)) {
- err = PTR_ERR(obj);
- goto err_unlock;
- }
+ obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
- vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
+ vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto err_obj;
@@ -269,7 +467,7 @@ static int live_timeslice_preempt(void *arg)
struct intel_engine_cs *engine;
enum intel_engine_id id;
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt, id) {
if (!intel_engine_has_preemption(engine))
continue;
@@ -279,7 +477,7 @@ static int live_timeslice_preempt(void *arg)
if (err)
goto err_pin;
- if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
+ if (igt_flush_test(gt->i915)) {
err = -EIO;
goto err_pin;
}
@@ -292,22 +490,168 @@ err_map:
i915_gem_object_unpin_map(obj);
err_obj:
i915_gem_object_put(obj);
-err_unlock:
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
+ return err;
+}
+
+static struct i915_request *nop_request(struct intel_engine_cs *engine)
+{
+ struct i915_request *rq;
+
+ rq = i915_request_create(engine->kernel_context);
+ if (IS_ERR(rq))
+ return rq;
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+ return rq;
+}
+
+static void wait_for_submit(struct intel_engine_cs *engine,
+ struct i915_request *rq)
+{
+ do {
+ cond_resched();
+ intel_engine_flush_submission(engine);
+ } while (!i915_request_is_active(rq));
+}
+
+static long timeslice_threshold(const struct intel_engine_cs *engine)
+{
+ return 2 * msecs_to_jiffies_timeout(timeslice(engine)) + 1;
+}
+
+static int live_timeslice_queue(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct drm_i915_gem_object *obj;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ struct i915_vma *vma;
+ void *vaddr;
+ int err = 0;
+
+ /*
+ * Make sure that even if ELSP[0] and ELSP[1] are filled with
+ * timeslicing between them disabled, we *do* enable timeslicing
+ * if the queue demands it. (Normally, we do not submit if
+ * ELSP[1] is already occupied, so must rely on timeslicing to
+ * eject ELSP[0] in favour of the queue.)
+ */
+ if (!IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION))
+ return 0;
+
+ obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err_obj;
+ }
+
+ vaddr = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ if (IS_ERR(vaddr)) {
+ err = PTR_ERR(vaddr);
+ goto err_obj;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
+ if (err)
+ goto err_map;
+
+ for_each_engine(engine, gt, id) {
+ struct i915_sched_attr attr = {
+ .priority = I915_USER_PRIORITY(I915_PRIORITY_MAX),
+ };
+ struct i915_request *rq, *nop;
+
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ memset(vaddr, 0, PAGE_SIZE);
+
+ /* ELSP[0]: semaphore wait */
+ rq = semaphore_queue(engine, vma, 0);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_pin;
+ }
+ engine->schedule(rq, &attr);
+ wait_for_submit(engine, rq);
+
+ /* ELSP[1]: nop request */
+ nop = nop_request(engine);
+ if (IS_ERR(nop)) {
+ err = PTR_ERR(nop);
+ i915_request_put(rq);
+ goto err_pin;
+ }
+ wait_for_submit(engine, nop);
+ i915_request_put(nop);
+
+ GEM_BUG_ON(i915_request_completed(rq));
+ GEM_BUG_ON(execlists_active(&engine->execlists) != rq);
+
+ /* Queue: semaphore signal, matching priority as semaphore */
+ err = release_queue(engine, vma, 1, effective_prio(rq));
+ if (err) {
+ i915_request_put(rq);
+ goto err_pin;
+ }
+
+ intel_engine_flush_submission(engine);
+ if (!READ_ONCE(engine->execlists.timer.expires) &&
+ !i915_request_completed(rq)) {
+ struct drm_printer p =
+ drm_info_printer(gt->i915->drm.dev);
+
+ GEM_TRACE_ERR("%s: Failed to enable timeslicing!\n",
+ engine->name);
+ intel_engine_dump(engine, &p,
+ "%s\n", engine->name);
+ GEM_TRACE_DUMP();
+
+ memset(vaddr, 0xff, PAGE_SIZE);
+ err = -EINVAL;
+ }
+
+ /* Timeslice every jiffy, so within 2 we should signal */
+ if (i915_request_wait(rq, 0, timeslice_threshold(engine)) < 0) {
+ struct drm_printer p =
+ drm_info_printer(gt->i915->drm.dev);
+
+ pr_err("%s: Failed to timeslice into queue\n",
+ engine->name);
+ intel_engine_dump(engine, &p,
+ "%s\n", engine->name);
+
+ memset(vaddr, 0xff, PAGE_SIZE);
+ err = -EIO;
+ }
+ i915_request_put(rq);
+ if (err)
+ break;
+ }
+
+err_pin:
+ i915_vma_unpin(vma);
+err_map:
+ i915_gem_object_unpin_map(obj);
+err_obj:
+ i915_gem_object_put(obj);
return err;
}
static int live_busywait_preempt(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
struct i915_gem_context *ctx_hi, *ctx_lo;
struct intel_engine_cs *engine;
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
enum intel_engine_id id;
- intel_wakeref_t wakeref;
int err = -ENOMEM;
u32 *map;
@@ -316,22 +660,19 @@ static int live_busywait_preempt(void *arg)
* preempt the busywaits used to synchronise between rings.
*/
- mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
-
- ctx_hi = kernel_context(i915);
+ ctx_hi = kernel_context(gt->i915);
if (!ctx_hi)
- goto err_unlock;
+ return -ENOMEM;
ctx_hi->sched.priority =
I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY);
- ctx_lo = kernel_context(i915);
+ ctx_lo = kernel_context(gt->i915);
if (!ctx_lo)
goto err_ctx_hi;
ctx_lo->sched.priority =
I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY);
- obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
goto err_ctx_lo;
@@ -343,7 +684,7 @@ static int live_busywait_preempt(void *arg)
goto err_obj;
}
- vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
+ vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto err_map;
@@ -353,7 +694,7 @@ static int live_busywait_preempt(void *arg)
if (err)
goto err_map;
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt, id) {
struct i915_request *lo, *hi;
struct igt_live_test t;
u32 *cs;
@@ -364,7 +705,7 @@ static int live_busywait_preempt(void *arg)
if (!intel_engine_can_store_dword(engine))
continue;
- if (igt_live_test_begin(&t, i915, __func__, engine->name)) {
+ if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
err = -EIO;
goto err_vma;
}
@@ -444,7 +785,7 @@ static int live_busywait_preempt(void *arg)
i915_request_add(hi);
if (i915_request_wait(lo, 0, HZ / 5) < 0) {
- struct drm_printer p = drm_info_printer(i915->drm.dev);
+ struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
pr_err("%s: Failed to preempt semaphore busywait!\n",
engine->name);
@@ -452,7 +793,7 @@ static int live_busywait_preempt(void *arg)
intel_engine_dump(engine, &p, "%s\n", engine->name);
GEM_TRACE_DUMP();
- intel_gt_set_wedged(&i915->gt);
+ intel_gt_set_wedged(gt);
err = -EIO;
goto err_vma;
}
@@ -475,9 +816,6 @@ err_ctx_lo:
kernel_context_close(ctx_lo);
err_ctx_hi:
kernel_context_close(ctx_hi);
-err_unlock:
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
return err;
}
@@ -501,49 +839,45 @@ spinner_create_request(struct igt_spinner *spin,
static int live_preempt(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
struct i915_gem_context *ctx_hi, *ctx_lo;
struct igt_spinner spin_hi, spin_lo;
struct intel_engine_cs *engine;
enum intel_engine_id id;
- intel_wakeref_t wakeref;
int err = -ENOMEM;
- if (!HAS_LOGICAL_RING_PREEMPTION(i915))
+ if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915))
return 0;
- if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION))
+ if (!(gt->i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION))
pr_err("Logical preemption supported, but not exposed\n");
- mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
-
- if (igt_spinner_init(&spin_hi, &i915->gt))
- goto err_unlock;
+ if (igt_spinner_init(&spin_hi, gt))
+ return -ENOMEM;
- if (igt_spinner_init(&spin_lo, &i915->gt))
+ if (igt_spinner_init(&spin_lo, gt))
goto err_spin_hi;
- ctx_hi = kernel_context(i915);
+ ctx_hi = kernel_context(gt->i915);
if (!ctx_hi)
goto err_spin_lo;
ctx_hi->sched.priority =
I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY);
- ctx_lo = kernel_context(i915);
+ ctx_lo = kernel_context(gt->i915);
if (!ctx_lo)
goto err_ctx_hi;
ctx_lo->sched.priority =
I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY);
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt, id) {
struct igt_live_test t;
struct i915_request *rq;
if (!intel_engine_has_preemption(engine))
continue;
- if (igt_live_test_begin(&t, i915, __func__, engine->name)) {
+ if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
err = -EIO;
goto err_ctx_lo;
}
@@ -559,7 +893,7 @@ static int live_preempt(void *arg)
if (!igt_wait_for_spinner(&spin_lo, rq)) {
GEM_TRACE("lo spinner failed to start\n");
GEM_TRACE_DUMP();
- intel_gt_set_wedged(&i915->gt);
+ intel_gt_set_wedged(gt);
err = -EIO;
goto err_ctx_lo;
}
@@ -576,7 +910,7 @@ static int live_preempt(void *arg)
if (!igt_wait_for_spinner(&spin_hi, rq)) {
GEM_TRACE("hi spinner failed to start\n");
GEM_TRACE_DUMP();
- intel_gt_set_wedged(&i915->gt);
+ intel_gt_set_wedged(gt);
err = -EIO;
goto err_ctx_lo;
}
@@ -599,54 +933,47 @@ err_spin_lo:
igt_spinner_fini(&spin_lo);
err_spin_hi:
igt_spinner_fini(&spin_hi);
-err_unlock:
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
return err;
}
static int live_late_preempt(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
struct i915_gem_context *ctx_hi, *ctx_lo;
struct igt_spinner spin_hi, spin_lo;
struct intel_engine_cs *engine;
struct i915_sched_attr attr = {};
enum intel_engine_id id;
- intel_wakeref_t wakeref;
int err = -ENOMEM;
- if (!HAS_LOGICAL_RING_PREEMPTION(i915))
+ if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915))
return 0;
- mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
-
- if (igt_spinner_init(&spin_hi, &i915->gt))
- goto err_unlock;
+ if (igt_spinner_init(&spin_hi, gt))
+ return -ENOMEM;
- if (igt_spinner_init(&spin_lo, &i915->gt))
+ if (igt_spinner_init(&spin_lo, gt))
goto err_spin_hi;
- ctx_hi = kernel_context(i915);
+ ctx_hi = kernel_context(gt->i915);
if (!ctx_hi)
goto err_spin_lo;
- ctx_lo = kernel_context(i915);
+ ctx_lo = kernel_context(gt->i915);
if (!ctx_lo)
goto err_ctx_hi;
/* Make sure ctx_lo stays before ctx_hi until we trigger preemption. */
ctx_lo->sched.priority = I915_USER_PRIORITY(1);
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt, id) {
struct igt_live_test t;
struct i915_request *rq;
if (!intel_engine_has_preemption(engine))
continue;
- if (igt_live_test_begin(&t, i915, __func__, engine->name)) {
+ if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
err = -EIO;
goto err_ctx_lo;
}
@@ -705,15 +1032,12 @@ err_spin_lo:
igt_spinner_fini(&spin_lo);
err_spin_hi:
igt_spinner_fini(&spin_hi);
-err_unlock:
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
return err;
err_wedged:
igt_spinner_end(&spin_hi);
igt_spinner_end(&spin_lo);
- intel_gt_set_wedged(&i915->gt);
+ intel_gt_set_wedged(gt);
err = -EIO;
goto err_ctx_lo;
}
@@ -723,14 +1047,13 @@ struct preempt_client {
struct i915_gem_context *ctx;
};
-static int preempt_client_init(struct drm_i915_private *i915,
- struct preempt_client *c)
+static int preempt_client_init(struct intel_gt *gt, struct preempt_client *c)
{
- c->ctx = kernel_context(i915);
+ c->ctx = kernel_context(gt->i915);
if (!c->ctx)
return -ENOMEM;
- if (igt_spinner_init(&c->spin, &i915->gt))
+ if (igt_spinner_init(&c->spin, gt))
goto err_ctx;
return 0;
@@ -748,11 +1071,10 @@ static void preempt_client_fini(struct preempt_client *c)
static int live_nopreempt(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
struct intel_engine_cs *engine;
struct preempt_client a, b;
enum intel_engine_id id;
- intel_wakeref_t wakeref;
int err = -ENOMEM;
/*
@@ -760,19 +1082,16 @@ static int live_nopreempt(void *arg)
* that may be being observed and not want to be interrupted.
*/
- if (!HAS_LOGICAL_RING_PREEMPTION(i915))
+ if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915))
return 0;
- mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
-
- if (preempt_client_init(i915, &a))
- goto err_unlock;
- if (preempt_client_init(i915, &b))
+ if (preempt_client_init(gt, &a))
+ return -ENOMEM;
+ if (preempt_client_init(gt, &b))
goto err_client_a;
b.ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX);
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt, id) {
struct i915_request *rq_a, *rq_b;
if (!intel_engine_has_preemption(engine))
@@ -832,7 +1151,7 @@ static int live_nopreempt(void *arg)
goto err_wedged;
}
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ if (igt_flush_test(gt->i915))
goto err_wedged;
}
@@ -841,29 +1160,344 @@ err_client_b:
preempt_client_fini(&b);
err_client_a:
preempt_client_fini(&a);
-err_unlock:
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
return err;
err_wedged:
igt_spinner_end(&b.spin);
igt_spinner_end(&a.spin);
- intel_gt_set_wedged(&i915->gt);
+ intel_gt_set_wedged(gt);
err = -EIO;
goto err_client_b;
}
+struct live_preempt_cancel {
+ struct intel_engine_cs *engine;
+ struct preempt_client a, b;
+};
+
+static int __cancel_active0(struct live_preempt_cancel *arg)
+{
+ struct i915_request *rq;
+ struct igt_live_test t;
+ int err;
+
+ /* Preempt cancel of ELSP0 */
+ GEM_TRACE("%s(%s)\n", __func__, arg->engine->name);
+ if (igt_live_test_begin(&t, arg->engine->i915,
+ __func__, arg->engine->name))
+ return -EIO;
+
+ clear_bit(CONTEXT_BANNED, &arg->a.ctx->flags);
+ rq = spinner_create_request(&arg->a.spin,
+ arg->a.ctx, arg->engine,
+ MI_ARB_CHECK);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+ if (!igt_wait_for_spinner(&arg->a.spin, rq)) {
+ err = -EIO;
+ goto out;
+ }
+
+ i915_gem_context_set_banned(arg->a.ctx);
+ err = intel_engine_pulse(arg->engine);
+ if (err)
+ goto out;
+
+ if (i915_request_wait(rq, 0, HZ / 5) < 0) {
+ err = -EIO;
+ goto out;
+ }
+
+ if (rq->fence.error != -EIO) {
+ pr_err("Cancelled inflight0 request did not report -EIO\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+out:
+ i915_request_put(rq);
+ if (igt_live_test_end(&t))
+ err = -EIO;
+ return err;
+}
+
+static int __cancel_active1(struct live_preempt_cancel *arg)
+{
+ struct i915_request *rq[2] = {};
+ struct igt_live_test t;
+ int err;
+
+ /* Preempt cancel of ELSP1 */
+ GEM_TRACE("%s(%s)\n", __func__, arg->engine->name);
+ if (igt_live_test_begin(&t, arg->engine->i915,
+ __func__, arg->engine->name))
+ return -EIO;
+
+ clear_bit(CONTEXT_BANNED, &arg->a.ctx->flags);
+ rq[0] = spinner_create_request(&arg->a.spin,
+ arg->a.ctx, arg->engine,
+ MI_NOOP); /* no preemption */
+ if (IS_ERR(rq[0]))
+ return PTR_ERR(rq[0]);
+
+ i915_request_get(rq[0]);
+ i915_request_add(rq[0]);
+ if (!igt_wait_for_spinner(&arg->a.spin, rq[0])) {
+ err = -EIO;
+ goto out;
+ }
+
+ clear_bit(CONTEXT_BANNED, &arg->b.ctx->flags);
+ rq[1] = spinner_create_request(&arg->b.spin,
+ arg->b.ctx, arg->engine,
+ MI_ARB_CHECK);
+ if (IS_ERR(rq[1])) {
+ err = PTR_ERR(rq[1]);
+ goto out;
+ }
+
+ i915_request_get(rq[1]);
+ err = i915_request_await_dma_fence(rq[1], &rq[0]->fence);
+ i915_request_add(rq[1]);
+ if (err)
+ goto out;
+
+ i915_gem_context_set_banned(arg->b.ctx);
+ err = intel_engine_pulse(arg->engine);
+ if (err)
+ goto out;
+
+ igt_spinner_end(&arg->a.spin);
+ if (i915_request_wait(rq[1], 0, HZ / 5) < 0) {
+ err = -EIO;
+ goto out;
+ }
+
+ if (rq[0]->fence.error != 0) {
+ pr_err("Normal inflight0 request did not complete\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (rq[1]->fence.error != -EIO) {
+ pr_err("Cancelled inflight1 request did not report -EIO\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+out:
+ i915_request_put(rq[1]);
+ i915_request_put(rq[0]);
+ if (igt_live_test_end(&t))
+ err = -EIO;
+ return err;
+}
+
+static int __cancel_queued(struct live_preempt_cancel *arg)
+{
+ struct i915_request *rq[3] = {};
+ struct igt_live_test t;
+ int err;
+
+ /* Full ELSP and one in the wings */
+ GEM_TRACE("%s(%s)\n", __func__, arg->engine->name);
+ if (igt_live_test_begin(&t, arg->engine->i915,
+ __func__, arg->engine->name))
+ return -EIO;
+
+ clear_bit(CONTEXT_BANNED, &arg->a.ctx->flags);
+ rq[0] = spinner_create_request(&arg->a.spin,
+ arg->a.ctx, arg->engine,
+ MI_ARB_CHECK);
+ if (IS_ERR(rq[0]))
+ return PTR_ERR(rq[0]);
+
+ i915_request_get(rq[0]);
+ i915_request_add(rq[0]);
+ if (!igt_wait_for_spinner(&arg->a.spin, rq[0])) {
+ err = -EIO;
+ goto out;
+ }
+
+ clear_bit(CONTEXT_BANNED, &arg->b.ctx->flags);
+ rq[1] = igt_request_alloc(arg->b.ctx, arg->engine);
+ if (IS_ERR(rq[1])) {
+ err = PTR_ERR(rq[1]);
+ goto out;
+ }
+
+ i915_request_get(rq[1]);
+ err = i915_request_await_dma_fence(rq[1], &rq[0]->fence);
+ i915_request_add(rq[1]);
+ if (err)
+ goto out;
+
+ rq[2] = spinner_create_request(&arg->b.spin,
+ arg->a.ctx, arg->engine,
+ MI_ARB_CHECK);
+ if (IS_ERR(rq[2])) {
+ err = PTR_ERR(rq[2]);
+ goto out;
+ }
+
+ i915_request_get(rq[2]);
+ err = i915_request_await_dma_fence(rq[2], &rq[1]->fence);
+ i915_request_add(rq[2]);
+ if (err)
+ goto out;
+
+ i915_gem_context_set_banned(arg->a.ctx);
+ err = intel_engine_pulse(arg->engine);
+ if (err)
+ goto out;
+
+ if (i915_request_wait(rq[2], 0, HZ / 5) < 0) {
+ err = -EIO;
+ goto out;
+ }
+
+ if (rq[0]->fence.error != -EIO) {
+ pr_err("Cancelled inflight0 request did not report -EIO\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (rq[1]->fence.error != 0) {
+ pr_err("Normal inflight1 request did not complete\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (rq[2]->fence.error != -EIO) {
+ pr_err("Cancelled queued request did not report -EIO\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+out:
+ i915_request_put(rq[2]);
+ i915_request_put(rq[1]);
+ i915_request_put(rq[0]);
+ if (igt_live_test_end(&t))
+ err = -EIO;
+ return err;
+}
+
+static int __cancel_hostile(struct live_preempt_cancel *arg)
+{
+ struct i915_request *rq;
+ int err;
+
+ /* Preempt cancel non-preemptible spinner in ELSP0 */
+ if (!IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT))
+ return 0;
+
+ GEM_TRACE("%s(%s)\n", __func__, arg->engine->name);
+ clear_bit(CONTEXT_BANNED, &arg->a.ctx->flags);
+ rq = spinner_create_request(&arg->a.spin,
+ arg->a.ctx, arg->engine,
+ MI_NOOP); /* preemption disabled */
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+ if (!igt_wait_for_spinner(&arg->a.spin, rq)) {
+ err = -EIO;
+ goto out;
+ }
+
+ i915_gem_context_set_banned(arg->a.ctx);
+ err = intel_engine_pulse(arg->engine); /* force reset */
+ if (err)
+ goto out;
+
+ if (i915_request_wait(rq, 0, HZ / 5) < 0) {
+ err = -EIO;
+ goto out;
+ }
+
+ if (rq->fence.error != -EIO) {
+ pr_err("Cancelled inflight0 request did not report -EIO\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+out:
+ i915_request_put(rq);
+ if (igt_flush_test(arg->engine->i915))
+ err = -EIO;
+ return err;
+}
+
+static int live_preempt_cancel(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct live_preempt_cancel data;
+ enum intel_engine_id id;
+ int err = -ENOMEM;
+
+ /*
+ * To cancel an inflight context, we need to first remove it from the
+ * GPU. That sounds like preemption! Plus a little bit of bookkeeping.
+ */
+
+ if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915))
+ return 0;
+
+ if (preempt_client_init(gt, &data.a))
+ return -ENOMEM;
+ if (preempt_client_init(gt, &data.b))
+ goto err_client_a;
+
+ for_each_engine(data.engine, gt, id) {
+ if (!intel_engine_has_preemption(data.engine))
+ continue;
+
+ err = __cancel_active0(&data);
+ if (err)
+ goto err_wedged;
+
+ err = __cancel_active1(&data);
+ if (err)
+ goto err_wedged;
+
+ err = __cancel_queued(&data);
+ if (err)
+ goto err_wedged;
+
+ err = __cancel_hostile(&data);
+ if (err)
+ goto err_wedged;
+ }
+
+ err = 0;
+err_client_b:
+ preempt_client_fini(&data.b);
+err_client_a:
+ preempt_client_fini(&data.a);
+ return err;
+
+err_wedged:
+ GEM_TRACE_DUMP();
+ igt_spinner_end(&data.b.spin);
+ igt_spinner_end(&data.a.spin);
+ intel_gt_set_wedged(gt);
+ goto err_client_b;
+}
+
static int live_suppress_self_preempt(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
struct intel_engine_cs *engine;
struct i915_sched_attr attr = {
.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX)
};
struct preempt_client a, b;
enum intel_engine_id id;
- intel_wakeref_t wakeref;
int err = -ENOMEM;
/*
@@ -873,30 +1507,31 @@ static int live_suppress_self_preempt(void *arg)
* completion event.
*/
- if (!HAS_LOGICAL_RING_PREEMPTION(i915))
+ if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915))
return 0;
- if (USES_GUC_SUBMISSION(i915))
+ if (USES_GUC_SUBMISSION(gt->i915))
return 0; /* presume black blox */
- if (intel_vgpu_active(i915))
+ if (intel_vgpu_active(gt->i915))
return 0; /* GVT forces single port & request submission */
- mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
-
- if (preempt_client_init(i915, &a))
- goto err_unlock;
- if (preempt_client_init(i915, &b))
+ if (preempt_client_init(gt, &a))
+ return -ENOMEM;
+ if (preempt_client_init(gt, &b))
goto err_client_a;
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt, id) {
struct i915_request *rq_a, *rq_b;
int depth;
if (!intel_engine_has_preemption(engine))
continue;
+ if (igt_flush_test(gt->i915))
+ goto err_wedged;
+
+ intel_engine_pm_get(engine);
engine->execlists.preempt_hang.count = 0;
rq_a = spinner_create_request(&a.spin,
@@ -904,12 +1539,14 @@ static int live_suppress_self_preempt(void *arg)
MI_NOOP);
if (IS_ERR(rq_a)) {
err = PTR_ERR(rq_a);
+ intel_engine_pm_put(engine);
goto err_client_b;
}
i915_request_add(rq_a);
if (!igt_wait_for_spinner(&a.spin, rq_a)) {
pr_err("First client failed to start\n");
+ intel_engine_pm_put(engine);
goto err_wedged;
}
@@ -921,6 +1558,7 @@ static int live_suppress_self_preempt(void *arg)
MI_NOOP);
if (IS_ERR(rq_b)) {
err = PTR_ERR(rq_b);
+ intel_engine_pm_put(engine);
goto err_client_b;
}
i915_request_add(rq_b);
@@ -931,6 +1569,7 @@ static int live_suppress_self_preempt(void *arg)
if (!igt_wait_for_spinner(&b.spin, rq_b)) {
pr_err("Second client failed to start\n");
+ intel_engine_pm_put(engine);
goto err_wedged;
}
@@ -944,11 +1583,13 @@ static int live_suppress_self_preempt(void *arg)
engine->name,
engine->execlists.preempt_hang.count,
depth);
+ intel_engine_pm_put(engine);
err = -EINVAL;
goto err_client_b;
}
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ intel_engine_pm_put(engine);
+ if (igt_flush_test(gt->i915))
goto err_wedged;
}
@@ -957,15 +1598,12 @@ err_client_b:
preempt_client_fini(&b);
err_client_a:
preempt_client_fini(&a);
-err_unlock:
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
return err;
err_wedged:
igt_spinner_end(&b.spin);
igt_spinner_end(&a.spin);
- intel_gt_set_wedged(&i915->gt);
+ intel_gt_set_wedged(gt);
err = -EIO;
goto err_client_b;
}
@@ -984,9 +1622,13 @@ static struct i915_request *dummy_request(struct intel_engine_cs *engine)
if (!rq)
return NULL;
- INIT_LIST_HEAD(&rq->active_list);
rq->engine = engine;
+ spin_lock_init(&rq->lock);
+ INIT_LIST_HEAD(&rq->fence.cb_list);
+ rq->fence.lock = &rq->lock;
+ rq->fence.ops = &i915_fence_ops;
+
i915_sched_node_init(&rq->sched);
/* mark this request as permanently incomplete */
@@ -1021,11 +1663,10 @@ static void dummy_request_free(struct i915_request *dummy)
static int live_suppress_wait_preempt(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
struct preempt_client client[4];
struct intel_engine_cs *engine;
enum intel_engine_id id;
- intel_wakeref_t wakeref;
int err = -ENOMEM;
int i;
@@ -1035,22 +1676,19 @@ static int live_suppress_wait_preempt(void *arg)
* not needlessly generate preempt-to-idle cycles.
*/
- if (!HAS_LOGICAL_RING_PREEMPTION(i915))
+ if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915))
return 0;
- mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
-
- if (preempt_client_init(i915, &client[0])) /* ELSP[0] */
- goto err_unlock;
- if (preempt_client_init(i915, &client[1])) /* ELSP[1] */
+ if (preempt_client_init(gt, &client[0])) /* ELSP[0] */
+ return -ENOMEM;
+ if (preempt_client_init(gt, &client[1])) /* ELSP[1] */
goto err_client_0;
- if (preempt_client_init(i915, &client[2])) /* head of queue */
+ if (preempt_client_init(gt, &client[2])) /* head of queue */
goto err_client_1;
- if (preempt_client_init(i915, &client[3])) /* bystander */
+ if (preempt_client_init(gt, &client[3])) /* bystander */
goto err_client_2;
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt, id) {
int depth;
if (!intel_engine_has_preemption(engine))
@@ -1079,8 +1717,8 @@ static int live_suppress_wait_preempt(void *arg)
}
/* Disable NEWCLIENT promotion */
- __i915_active_request_set(&rq[i]->timeline->last_request,
- dummy);
+ __i915_active_fence_set(&i915_request_timeline(rq[i])->last_request,
+ &dummy->fence);
i915_request_add(rq[i]);
}
@@ -1105,7 +1743,7 @@ static int live_suppress_wait_preempt(void *arg)
for (i = 0; i < ARRAY_SIZE(client); i++)
igt_spinner_end(&client[i].spin);
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ if (igt_flush_test(gt->i915))
goto err_wedged;
if (engine->execlists.preempt_hang.count) {
@@ -1128,26 +1766,22 @@ err_client_1:
preempt_client_fini(&client[1]);
err_client_0:
preempt_client_fini(&client[0]);
-err_unlock:
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
return err;
err_wedged:
for (i = 0; i < ARRAY_SIZE(client); i++)
igt_spinner_end(&client[i].spin);
- intel_gt_set_wedged(&i915->gt);
+ intel_gt_set_wedged(gt);
err = -EIO;
goto err_client_3;
}
static int live_chain_preempt(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
struct intel_engine_cs *engine;
struct preempt_client hi, lo;
enum intel_engine_id id;
- intel_wakeref_t wakeref;
int err = -ENOMEM;
/*
@@ -1156,19 +1790,16 @@ static int live_chain_preempt(void *arg)
* the previously submitted spinner in B.
*/
- if (!HAS_LOGICAL_RING_PREEMPTION(i915))
+ if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915))
return 0;
- mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
-
- if (preempt_client_init(i915, &hi))
- goto err_unlock;
+ if (preempt_client_init(gt, &hi))
+ return -ENOMEM;
- if (preempt_client_init(i915, &lo))
+ if (preempt_client_init(gt, &lo))
goto err_client_hi;
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt, id) {
struct i915_sched_attr attr = {
.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX),
};
@@ -1199,7 +1830,7 @@ static int live_chain_preempt(void *arg)
goto err_wedged;
}
- if (igt_live_test_begin(&t, i915, __func__, engine->name)) {
+ if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
err = -EIO;
goto err_wedged;
}
@@ -1237,7 +1868,7 @@ static int live_chain_preempt(void *arg)
igt_spinner_end(&hi.spin);
if (i915_request_wait(rq, 0, HZ / 5) < 0) {
struct drm_printer p =
- drm_info_printer(i915->drm.dev);
+ drm_info_printer(gt->i915->drm.dev);
pr_err("Failed to preempt over chain of %d\n",
count);
@@ -1253,7 +1884,7 @@ static int live_chain_preempt(void *arg)
i915_request_add(rq);
if (i915_request_wait(rq, 0, HZ / 5) < 0) {
struct drm_printer p =
- drm_info_printer(i915->drm.dev);
+ drm_info_printer(gt->i915->drm.dev);
pr_err("Failed to flush low priority chain of %d requests\n",
count);
@@ -1274,57 +1905,50 @@ err_client_lo:
preempt_client_fini(&lo);
err_client_hi:
preempt_client_fini(&hi);
-err_unlock:
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
return err;
err_wedged:
igt_spinner_end(&hi.spin);
igt_spinner_end(&lo.spin);
- intel_gt_set_wedged(&i915->gt);
+ intel_gt_set_wedged(gt);
err = -EIO;
goto err_client_lo;
}
static int live_preempt_hang(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
struct i915_gem_context *ctx_hi, *ctx_lo;
struct igt_spinner spin_hi, spin_lo;
struct intel_engine_cs *engine;
enum intel_engine_id id;
- intel_wakeref_t wakeref;
int err = -ENOMEM;
- if (!HAS_LOGICAL_RING_PREEMPTION(i915))
+ if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915))
return 0;
- if (!intel_has_reset_engine(i915))
+ if (!intel_has_reset_engine(gt))
return 0;
- mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
-
- if (igt_spinner_init(&spin_hi, &i915->gt))
- goto err_unlock;
+ if (igt_spinner_init(&spin_hi, gt))
+ return -ENOMEM;
- if (igt_spinner_init(&spin_lo, &i915->gt))
+ if (igt_spinner_init(&spin_lo, gt))
goto err_spin_hi;
- ctx_hi = kernel_context(i915);
+ ctx_hi = kernel_context(gt->i915);
if (!ctx_hi)
goto err_spin_lo;
ctx_hi->sched.priority =
I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY);
- ctx_lo = kernel_context(i915);
+ ctx_lo = kernel_context(gt->i915);
if (!ctx_lo)
goto err_ctx_hi;
ctx_lo->sched.priority =
I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY);
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt, id) {
struct i915_request *rq;
if (!intel_engine_has_preemption(engine))
@@ -1341,7 +1965,7 @@ static int live_preempt_hang(void *arg)
if (!igt_wait_for_spinner(&spin_lo, rq)) {
GEM_TRACE("lo spinner failed to start\n");
GEM_TRACE_DUMP();
- intel_gt_set_wedged(&i915->gt);
+ intel_gt_set_wedged(gt);
err = -EIO;
goto err_ctx_lo;
}
@@ -1363,28 +1987,28 @@ static int live_preempt_hang(void *arg)
HZ / 10)) {
pr_err("Preemption did not occur within timeout!");
GEM_TRACE_DUMP();
- intel_gt_set_wedged(&i915->gt);
+ intel_gt_set_wedged(gt);
err = -EIO;
goto err_ctx_lo;
}
- set_bit(I915_RESET_ENGINE + id, &i915->gt.reset.flags);
+ set_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
intel_engine_reset(engine, NULL);
- clear_bit(I915_RESET_ENGINE + id, &i915->gt.reset.flags);
+ clear_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
engine->execlists.preempt_hang.inject_hang = false;
if (!igt_wait_for_spinner(&spin_hi, rq)) {
GEM_TRACE("hi spinner failed to start\n");
GEM_TRACE_DUMP();
- intel_gt_set_wedged(&i915->gt);
+ intel_gt_set_wedged(gt);
err = -EIO;
goto err_ctx_lo;
}
igt_spinner_end(&spin_hi);
igt_spinner_end(&spin_lo);
- if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
+ if (igt_flush_test(gt->i915)) {
err = -EIO;
goto err_ctx_lo;
}
@@ -1399,9 +2023,105 @@ err_spin_lo:
igt_spinner_fini(&spin_lo);
err_spin_hi:
igt_spinner_fini(&spin_hi);
-err_unlock:
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
+ return err;
+}
+
+static int live_preempt_timeout(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct i915_gem_context *ctx_hi, *ctx_lo;
+ struct igt_spinner spin_lo;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err = -ENOMEM;
+
+ /*
+ * Check that we force preemption to occur by cancelling the previous
+ * context if it refuses to yield the GPU.
+ */
+ if (!IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT))
+ return 0;
+
+ if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915))
+ return 0;
+
+ if (!intel_has_reset_engine(gt))
+ return 0;
+
+ if (igt_spinner_init(&spin_lo, gt))
+ return -ENOMEM;
+
+ ctx_hi = kernel_context(gt->i915);
+ if (!ctx_hi)
+ goto err_spin_lo;
+ ctx_hi->sched.priority =
+ I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY);
+
+ ctx_lo = kernel_context(gt->i915);
+ if (!ctx_lo)
+ goto err_ctx_hi;
+ ctx_lo->sched.priority =
+ I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY);
+
+ for_each_engine(engine, gt, id) {
+ unsigned long saved_timeout;
+ struct i915_request *rq;
+
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ rq = spinner_create_request(&spin_lo, ctx_lo, engine,
+ MI_NOOP); /* preemption disabled */
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_ctx_lo;
+ }
+
+ i915_request_add(rq);
+ if (!igt_wait_for_spinner(&spin_lo, rq)) {
+ intel_gt_set_wedged(gt);
+ err = -EIO;
+ goto err_ctx_lo;
+ }
+
+ rq = igt_request_alloc(ctx_hi, engine);
+ if (IS_ERR(rq)) {
+ igt_spinner_end(&spin_lo);
+ err = PTR_ERR(rq);
+ goto err_ctx_lo;
+ }
+
+ /* Flush the previous CS ack before changing timeouts */
+ while (READ_ONCE(engine->execlists.pending[0]))
+ cpu_relax();
+
+ saved_timeout = engine->props.preempt_timeout_ms;
+ engine->props.preempt_timeout_ms = 1; /* in ms, -> 1 jiffie */
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ intel_engine_flush_submission(engine);
+ engine->props.preempt_timeout_ms = saved_timeout;
+
+ if (i915_request_wait(rq, 0, HZ / 10) < 0) {
+ intel_gt_set_wedged(gt);
+ i915_request_put(rq);
+ err = -ETIME;
+ goto err_ctx_lo;
+ }
+
+ igt_spinner_end(&spin_lo);
+ i915_request_put(rq);
+ }
+
+ err = 0;
+err_ctx_lo:
+ kernel_context_close(ctx_lo);
+err_ctx_hi:
+ kernel_context_close(ctx_hi);
+err_spin_lo:
+ igt_spinner_fini(&spin_lo);
return err;
}
@@ -1416,7 +2136,7 @@ static int random_priority(struct rnd_state *rnd)
}
struct preempt_smoke {
- struct drm_i915_private *i915;
+ struct intel_gt *gt;
struct i915_gem_context **contexts;
struct intel_engine_cs *engine;
struct drm_i915_gem_object *batch;
@@ -1440,7 +2160,11 @@ static int smoke_submit(struct preempt_smoke *smoke,
int err = 0;
if (batch) {
- vma = i915_vma_instance(batch, ctx->vm, NULL);
+ struct i915_address_space *vm;
+
+ vm = i915_gem_context_get_vm_rcu(ctx);
+ vma = i915_vma_instance(batch, vm, NULL);
+ i915_vm_put(vm);
if (IS_ERR(vma))
return PTR_ERR(vma);
@@ -1489,11 +2213,9 @@ static int smoke_crescendo_thread(void *arg)
struct i915_gem_context *ctx = smoke_context(smoke);
int err;
- mutex_lock(&smoke->i915->drm.struct_mutex);
err = smoke_submit(smoke,
ctx, count % I915_PRIORITY_MAX,
smoke->batch);
- mutex_unlock(&smoke->i915->drm.struct_mutex);
if (err)
return err;
@@ -1514,9 +2236,7 @@ static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags)
unsigned long count;
int err = 0;
- mutex_unlock(&smoke->i915->drm.struct_mutex);
-
- for_each_engine(engine, smoke->i915, id) {
+ for_each_engine(engine, smoke->gt, id) {
arg[id] = *smoke;
arg[id].engine = engine;
if (!(flags & BATCH))
@@ -1532,8 +2252,10 @@ static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags)
get_task_struct(tsk[id]);
}
+ yield(); /* start all threads before we kthread_stop() */
+
count = 0;
- for_each_engine(engine, smoke->i915, id) {
+ for_each_engine(engine, smoke->gt, id) {
int status;
if (IS_ERR_OR_NULL(tsk[id]))
@@ -1548,11 +2270,9 @@ static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags)
put_task_struct(tsk[id]);
}
- mutex_lock(&smoke->i915->drm.struct_mutex);
-
pr_info("Submitted %lu crescendo:%x requests across %d engines and %d contexts\n",
count, flags,
- RUNTIME_INFO(smoke->i915)->num_engines, smoke->ncontext);
+ RUNTIME_INFO(smoke->gt->i915)->num_engines, smoke->ncontext);
return 0;
}
@@ -1564,7 +2284,7 @@ static int smoke_random(struct preempt_smoke *smoke, unsigned int flags)
count = 0;
do {
- for_each_engine(smoke->engine, smoke->i915, id) {
+ for_each_engine(smoke->engine, smoke->gt, id) {
struct i915_gem_context *ctx = smoke_context(smoke);
int err;
@@ -1580,25 +2300,24 @@ static int smoke_random(struct preempt_smoke *smoke, unsigned int flags)
pr_info("Submitted %lu random:%x requests across %d engines and %d contexts\n",
count, flags,
- RUNTIME_INFO(smoke->i915)->num_engines, smoke->ncontext);
+ RUNTIME_INFO(smoke->gt->i915)->num_engines, smoke->ncontext);
return 0;
}
static int live_preempt_smoke(void *arg)
{
struct preempt_smoke smoke = {
- .i915 = arg,
+ .gt = arg,
.prng = I915_RND_STATE_INITIALIZER(i915_selftest.random_seed),
.ncontext = 1024,
};
const unsigned int phase[] = { 0, BATCH };
- intel_wakeref_t wakeref;
struct igt_live_test t;
int err = -ENOMEM;
u32 *cs;
int n;
- if (!HAS_LOGICAL_RING_PREEMPTION(smoke.i915))
+ if (!HAS_LOGICAL_RING_PREEMPTION(smoke.gt->i915))
return 0;
smoke.contexts = kmalloc_array(smoke.ncontext,
@@ -1607,13 +2326,11 @@ static int live_preempt_smoke(void *arg)
if (!smoke.contexts)
return -ENOMEM;
- mutex_lock(&smoke.i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&smoke.i915->runtime_pm);
-
- smoke.batch = i915_gem_object_create_internal(smoke.i915, PAGE_SIZE);
+ smoke.batch =
+ i915_gem_object_create_internal(smoke.gt->i915, PAGE_SIZE);
if (IS_ERR(smoke.batch)) {
err = PTR_ERR(smoke.batch);
- goto err_unlock;
+ goto err_free;
}
cs = i915_gem_object_pin_map(smoke.batch, I915_MAP_WB);
@@ -1627,13 +2344,13 @@ static int live_preempt_smoke(void *arg)
i915_gem_object_flush_map(smoke.batch);
i915_gem_object_unpin_map(smoke.batch);
- if (igt_live_test_begin(&t, smoke.i915, __func__, "all")) {
+ if (igt_live_test_begin(&t, smoke.gt->i915, __func__, "all")) {
err = -EIO;
goto err_batch;
}
for (n = 0; n < smoke.ncontext; n++) {
- smoke.contexts[n] = kernel_context(smoke.i915);
+ smoke.contexts[n] = kernel_context(smoke.gt->i915);
if (!smoke.contexts[n])
goto err_ctx;
}
@@ -1660,15 +2377,13 @@ err_ctx:
err_batch:
i915_gem_object_put(smoke.batch);
-err_unlock:
- intel_runtime_pm_put(&smoke.i915->runtime_pm, wakeref);
- mutex_unlock(&smoke.i915->drm.struct_mutex);
+err_free:
kfree(smoke.contexts);
return err;
}
-static int nop_virtual_engine(struct drm_i915_private *i915,
+static int nop_virtual_engine(struct intel_gt *gt,
struct intel_engine_cs **siblings,
unsigned int nsibling,
unsigned int nctx,
@@ -1687,7 +2402,7 @@ static int nop_virtual_engine(struct drm_i915_private *i915,
GEM_BUG_ON(!nctx || nctx > ARRAY_SIZE(ctx));
for (n = 0; n < nctx; n++) {
- ctx[n] = kernel_context(i915);
+ ctx[n] = kernel_context(gt->i915);
if (!ctx[n]) {
err = -ENOMEM;
nctx = n;
@@ -1712,7 +2427,7 @@ static int nop_virtual_engine(struct drm_i915_private *i915,
}
}
- err = igt_live_test_begin(&t, i915, __func__, ve[0]->engine->name);
+ err = igt_live_test_begin(&t, gt->i915, __func__, ve[0]->engine->name);
if (err)
goto out;
@@ -1759,7 +2474,7 @@ static int nop_virtual_engine(struct drm_i915_private *i915,
request[nc]->fence.context,
request[nc]->fence.seqno);
GEM_TRACE_DUMP();
- intel_gt_set_wedged(&i915->gt);
+ intel_gt_set_wedged(gt);
break;
}
}
@@ -1781,7 +2496,7 @@ static int nop_virtual_engine(struct drm_i915_private *i915,
prime, div64_u64(ktime_to_ns(times[1]), prime));
out:
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ if (igt_flush_test(gt->i915))
err = -EIO;
for (nc = 0; nc < nctx; nc++) {
@@ -1794,25 +2509,22 @@ out:
static int live_virtual_engine(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
struct intel_engine_cs *engine;
- struct intel_gt *gt = &i915->gt;
enum intel_engine_id id;
unsigned int class, inst;
- int err = -ENODEV;
+ int err;
- if (USES_GUC_SUBMISSION(i915))
+ if (USES_GUC_SUBMISSION(gt->i915))
return 0;
- mutex_lock(&i915->drm.struct_mutex);
-
- for_each_engine(engine, i915, id) {
- err = nop_virtual_engine(i915, &engine, 1, 1, 0);
+ for_each_engine(engine, gt, id) {
+ err = nop_virtual_engine(gt, &engine, 1, 1, 0);
if (err) {
pr_err("Failed to wrap engine %s: err=%d\n",
engine->name, err);
- goto out_unlock;
+ return err;
}
}
@@ -1830,23 +2542,21 @@ static int live_virtual_engine(void *arg)
continue;
for (n = 1; n <= nsibling + 1; n++) {
- err = nop_virtual_engine(i915, siblings, nsibling,
+ err = nop_virtual_engine(gt, siblings, nsibling,
n, 0);
if (err)
- goto out_unlock;
+ return err;
}
- err = nop_virtual_engine(i915, siblings, nsibling, n, CHAIN);
+ err = nop_virtual_engine(gt, siblings, nsibling, n, CHAIN);
if (err)
- goto out_unlock;
+ return err;
}
-out_unlock:
- mutex_unlock(&i915->drm.struct_mutex);
- return err;
+ return 0;
}
-static int mask_virtual_engine(struct drm_i915_private *i915,
+static int mask_virtual_engine(struct intel_gt *gt,
struct intel_engine_cs **siblings,
unsigned int nsibling)
{
@@ -1862,7 +2572,7 @@ static int mask_virtual_engine(struct drm_i915_private *i915,
* restrict it to our desired engine within the virtual engine.
*/
- ctx = kernel_context(i915);
+ ctx = kernel_context(gt->i915);
if (!ctx)
return -ENOMEM;
@@ -1876,7 +2586,7 @@ static int mask_virtual_engine(struct drm_i915_private *i915,
if (err)
goto out_put;
- err = igt_live_test_begin(&t, i915, __func__, ve->engine->name);
+ err = igt_live_test_begin(&t, gt->i915, __func__, ve->engine->name);
if (err)
goto out_unpin;
@@ -1907,7 +2617,7 @@ static int mask_virtual_engine(struct drm_i915_private *i915,
request[n]->fence.context,
request[n]->fence.seqno);
GEM_TRACE_DUMP();
- intel_gt_set_wedged(&i915->gt);
+ intel_gt_set_wedged(gt);
err = -EIO;
goto out;
}
@@ -1922,11 +2632,8 @@ static int mask_virtual_engine(struct drm_i915_private *i915,
}
err = igt_live_test_end(&t);
- if (err)
- goto out;
-
out:
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ if (igt_flush_test(gt->i915))
err = -EIO;
for (n = 0; n < nsibling; n++)
@@ -1943,17 +2650,14 @@ out_close:
static int live_virtual_mask(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
- struct intel_gt *gt = &i915->gt;
unsigned int class, inst;
- int err = 0;
+ int err;
- if (USES_GUC_SUBMISSION(i915))
+ if (USES_GUC_SUBMISSION(gt->i915))
return 0;
- mutex_lock(&i915->drm.struct_mutex);
-
for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
unsigned int nsibling;
@@ -1967,17 +2671,166 @@ static int live_virtual_mask(void *arg)
if (nsibling < 2)
continue;
- err = mask_virtual_engine(i915, siblings, nsibling);
+ err = mask_virtual_engine(gt, siblings, nsibling);
if (err)
- goto out_unlock;
+ return err;
+ }
+
+ return 0;
+}
+
+static int preserved_virtual_engine(struct intel_gt *gt,
+ struct intel_engine_cs **siblings,
+ unsigned int nsibling)
+{
+ struct i915_request *last = NULL;
+ struct i915_gem_context *ctx;
+ struct intel_context *ve;
+ struct i915_vma *scratch;
+ struct igt_live_test t;
+ unsigned int n;
+ int err = 0;
+ u32 *cs;
+
+ ctx = kernel_context(gt->i915);
+ if (!ctx)
+ return -ENOMEM;
+
+ scratch = create_scratch(siblings[0]->gt);
+ if (IS_ERR(scratch)) {
+ err = PTR_ERR(scratch);
+ goto out_close;
}
-out_unlock:
- mutex_unlock(&i915->drm.struct_mutex);
+ ve = intel_execlists_create_virtual(ctx, siblings, nsibling);
+ if (IS_ERR(ve)) {
+ err = PTR_ERR(ve);
+ goto out_scratch;
+ }
+
+ err = intel_context_pin(ve);
+ if (err)
+ goto out_put;
+
+ err = igt_live_test_begin(&t, gt->i915, __func__, ve->engine->name);
+ if (err)
+ goto out_unpin;
+
+ for (n = 0; n < NUM_GPR_DW; n++) {
+ struct intel_engine_cs *engine = siblings[n % nsibling];
+ struct i915_request *rq;
+
+ rq = i915_request_create(ve);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out_end;
+ }
+
+ i915_request_put(last);
+ last = i915_request_get(rq);
+
+ cs = intel_ring_begin(rq, 8);
+ if (IS_ERR(cs)) {
+ i915_request_add(rq);
+ err = PTR_ERR(cs);
+ goto out_end;
+ }
+
+ *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
+ *cs++ = CS_GPR(engine, n);
+ *cs++ = i915_ggtt_offset(scratch) + n * sizeof(u32);
+ *cs++ = 0;
+
+ *cs++ = MI_LOAD_REGISTER_IMM(1);
+ *cs++ = CS_GPR(engine, (n + 1) % NUM_GPR_DW);
+ *cs++ = n + 1;
+
+ *cs++ = MI_NOOP;
+ intel_ring_advance(rq, cs);
+
+ /* Restrict this request to run on a particular engine */
+ rq->execution_mask = engine->mask;
+ i915_request_add(rq);
+ }
+
+ if (i915_request_wait(last, 0, HZ / 5) < 0) {
+ err = -ETIME;
+ goto out_end;
+ }
+
+ cs = i915_gem_object_pin_map(scratch->obj, I915_MAP_WB);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ goto out_end;
+ }
+
+ for (n = 0; n < NUM_GPR_DW; n++) {
+ if (cs[n] != n) {
+ pr_err("Incorrect value[%d] found for GPR[%d]\n",
+ cs[n], n);
+ err = -EINVAL;
+ break;
+ }
+ }
+
+ i915_gem_object_unpin_map(scratch->obj);
+
+out_end:
+ if (igt_live_test_end(&t))
+ err = -EIO;
+ i915_request_put(last);
+out_unpin:
+ intel_context_unpin(ve);
+out_put:
+ intel_context_put(ve);
+out_scratch:
+ i915_vma_unpin_and_release(&scratch, 0);
+out_close:
+ kernel_context_close(ctx);
return err;
}
-static int bond_virtual_engine(struct drm_i915_private *i915,
+static int live_virtual_preserved(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
+ unsigned int class, inst;
+
+ /*
+ * Check that the context image retains non-privileged (user) registers
+ * from one engine to the next. For this we check that the CS_GPR
+ * are preserved.
+ */
+
+ if (USES_GUC_SUBMISSION(gt->i915))
+ return 0;
+
+ /* As we use CS_GPR we cannot run before they existed on all engines. */
+ if (INTEL_GEN(gt->i915) < 9)
+ return 0;
+
+ for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
+ int nsibling, err;
+
+ nsibling = 0;
+ for (inst = 0; inst <= MAX_ENGINE_INSTANCE; inst++) {
+ if (!gt->engine_class[class][inst])
+ continue;
+
+ siblings[nsibling++] = gt->engine_class[class][inst];
+ }
+ if (nsibling < 2)
+ continue;
+
+ err = preserved_virtual_engine(gt, siblings, nsibling);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int bond_virtual_engine(struct intel_gt *gt,
unsigned int class,
struct intel_engine_cs **siblings,
unsigned int nsibling,
@@ -1993,13 +2846,13 @@ static int bond_virtual_engine(struct drm_i915_private *i915,
GEM_BUG_ON(nsibling >= ARRAY_SIZE(rq) - 1);
- ctx = kernel_context(i915);
+ ctx = kernel_context(gt->i915);
if (!ctx)
return -ENOMEM;
err = 0;
rq[0] = ERR_PTR(-ENOMEM);
- for_each_engine(master, i915, id) {
+ for_each_engine(master, gt, id) {
struct i915_sw_fence fence = {};
if (master->class == class)
@@ -2104,7 +2957,7 @@ static int bond_virtual_engine(struct drm_i915_private *i915,
out:
for (n = 0; !IS_ERR(rq[n]); n++)
i915_request_put(rq[n]);
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ if (igt_flush_test(gt->i915))
err = -EIO;
kernel_context_close(ctx);
@@ -2121,17 +2974,14 @@ static int live_virtual_bond(void *arg)
{ "schedule", BOND_SCHEDULE },
{ },
};
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
- struct intel_gt *gt = &i915->gt;
unsigned int class, inst;
- int err = 0;
+ int err;
- if (USES_GUC_SUBMISSION(i915))
+ if (USES_GUC_SUBMISSION(gt->i915))
return 0;
- mutex_lock(&i915->drm.struct_mutex);
-
for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
const struct phase *p;
int nsibling;
@@ -2148,38 +2998,42 @@ static int live_virtual_bond(void *arg)
continue;
for (p = phases; p->name; p++) {
- err = bond_virtual_engine(i915,
+ err = bond_virtual_engine(gt,
class, siblings, nsibling,
p->flags);
if (err) {
pr_err("%s(%s): failed class=%d, nsibling=%d, err=%d\n",
__func__, p->name, class, nsibling, err);
- goto out_unlock;
+ return err;
}
}
}
-out_unlock:
- mutex_unlock(&i915->drm.struct_mutex);
- return err;
+ return 0;
}
int intel_execlists_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(live_sanitycheck),
+ SUBTEST(live_unlite_switch),
+ SUBTEST(live_unlite_preempt),
SUBTEST(live_timeslice_preempt),
+ SUBTEST(live_timeslice_queue),
SUBTEST(live_busywait_preempt),
SUBTEST(live_preempt),
SUBTEST(live_late_preempt),
SUBTEST(live_nopreempt),
+ SUBTEST(live_preempt_cancel),
SUBTEST(live_suppress_self_preempt),
SUBTEST(live_suppress_wait_preempt),
SUBTEST(live_chain_preempt),
SUBTEST(live_preempt_hang),
+ SUBTEST(live_preempt_timeout),
SUBTEST(live_preempt_smoke),
SUBTEST(live_virtual_engine),
SUBTEST(live_virtual_mask),
+ SUBTEST(live_virtual_preserved),
SUBTEST(live_virtual_bond),
};
@@ -2189,5 +3043,512 @@ int intel_execlists_live_selftests(struct drm_i915_private *i915)
if (intel_gt_is_wedged(&i915->gt))
return 0;
- return i915_live_subtests(tests, i915);
+ return intel_gt_live_subtests(tests, &i915->gt);
+}
+
+static void hexdump(const void *buf, size_t len)
+{
+ const size_t rowsize = 8 * sizeof(u32);
+ const void *prev = NULL;
+ bool skip = false;
+ size_t pos;
+
+ for (pos = 0; pos < len; pos += rowsize) {
+ char line[128];
+
+ if (prev && !memcmp(prev, buf + pos, rowsize)) {
+ if (!skip) {
+ pr_info("*\n");
+ skip = true;
+ }
+ continue;
+ }
+
+ WARN_ON_ONCE(hex_dump_to_buffer(buf + pos, len - pos,
+ rowsize, sizeof(u32),
+ line, sizeof(line),
+ false) >= sizeof(line));
+ pr_info("[%04zx] %s\n", pos, line);
+
+ prev = buf + pos;
+ skip = false;
+ }
+}
+
+static int live_lrc_layout(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ u32 *mem;
+ int err;
+
+ /*
+ * Check the registers offsets we use to create the initial reg state
+ * match the layout saved by HW.
+ */
+
+ mem = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!mem)
+ return -ENOMEM;
+
+ err = 0;
+ for_each_engine(engine, gt, id) {
+ u32 *hw, *lrc;
+ int dw;
+
+ if (!engine->default_state)
+ continue;
+
+ hw = i915_gem_object_pin_map(engine->default_state,
+ I915_MAP_WB);
+ if (IS_ERR(hw)) {
+ err = PTR_ERR(hw);
+ break;
+ }
+ hw += LRC_STATE_PN * PAGE_SIZE / sizeof(*hw);
+
+ lrc = memset(mem, 0, PAGE_SIZE);
+ execlists_init_reg_state(lrc,
+ engine->kernel_context,
+ engine,
+ engine->kernel_context->ring,
+ true);
+
+ dw = 0;
+ do {
+ u32 lri = hw[dw];
+
+ if (lri == 0) {
+ dw++;
+ continue;
+ }
+
+ if ((lri & GENMASK(31, 23)) != MI_INSTR(0x22, 0)) {
+ pr_err("%s: Expected LRI command at dword %d, found %08x\n",
+ engine->name, dw, lri);
+ err = -EINVAL;
+ break;
+ }
+
+ if (lrc[dw] != lri) {
+ pr_err("%s: LRI command mismatch at dword %d, expected %08x found %08x\n",
+ engine->name, dw, lri, lrc[dw]);
+ err = -EINVAL;
+ break;
+ }
+
+ lri &= 0x7f;
+ lri++;
+ dw++;
+
+ while (lri) {
+ if (hw[dw] != lrc[dw]) {
+ pr_err("%s: Different registers found at dword %d, expected %x, found %x\n",
+ engine->name, dw, hw[dw], lrc[dw]);
+ err = -EINVAL;
+ break;
+ }
+
+ /*
+ * Skip over the actual register value as we
+ * expect that to differ.
+ */
+ dw += 2;
+ lri -= 2;
+ }
+ } while ((lrc[dw] & ~BIT(0)) != MI_BATCH_BUFFER_END);
+
+ if (err) {
+ pr_info("%s: HW register image:\n", engine->name);
+ hexdump(hw, PAGE_SIZE);
+
+ pr_info("%s: SW register image:\n", engine->name);
+ hexdump(lrc, PAGE_SIZE);
+ }
+
+ i915_gem_object_unpin_map(engine->default_state);
+ if (err)
+ break;
+ }
+
+ kfree(mem);
+ return err;
+}
+
+static int find_offset(const u32 *lri, u32 offset)
+{
+ int i;
+
+ for (i = 0; i < PAGE_SIZE / sizeof(u32); i++)
+ if (lri[i] == offset)
+ return i;
+
+ return -1;
+}
+
+static int live_lrc_fixed(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err = 0;
+
+ /*
+ * Check the assumed register offsets match the actual locations in
+ * the context image.
+ */
+
+ for_each_engine(engine, gt, id) {
+ const struct {
+ u32 reg;
+ u32 offset;
+ const char *name;
+ } tbl[] = {
+ {
+ i915_mmio_reg_offset(RING_START(engine->mmio_base)),
+ CTX_RING_BUFFER_START - 1,
+ "RING_START"
+ },
+ {
+ i915_mmio_reg_offset(RING_CTL(engine->mmio_base)),
+ CTX_RING_BUFFER_CONTROL - 1,
+ "RING_CTL"
+ },
+ {
+ i915_mmio_reg_offset(RING_HEAD(engine->mmio_base)),
+ CTX_RING_HEAD - 1,
+ "RING_HEAD"
+ },
+ {
+ i915_mmio_reg_offset(RING_TAIL(engine->mmio_base)),
+ CTX_RING_TAIL - 1,
+ "RING_TAIL"
+ },
+ {
+ i915_mmio_reg_offset(RING_MI_MODE(engine->mmio_base)),
+ lrc_ring_mi_mode(engine),
+ "RING_MI_MODE"
+ },
+ {
+ engine->mmio_base + 0x110,
+ CTX_BB_STATE - 1,
+ "BB_STATE"
+ },
+ { },
+ }, *t;
+ u32 *hw;
+
+ if (!engine->default_state)
+ continue;
+
+ hw = i915_gem_object_pin_map(engine->default_state,
+ I915_MAP_WB);
+ if (IS_ERR(hw)) {
+ err = PTR_ERR(hw);
+ break;
+ }
+ hw += LRC_STATE_PN * PAGE_SIZE / sizeof(*hw);
+
+ for (t = tbl; t->name; t++) {
+ int dw = find_offset(hw, t->reg);
+
+ if (dw != t->offset) {
+ pr_err("%s: Offset for %s [0x%x] mismatch, found %x, expected %x\n",
+ engine->name,
+ t->name,
+ t->reg,
+ dw,
+ t->offset);
+ err = -EINVAL;
+ }
+ }
+
+ i915_gem_object_unpin_map(engine->default_state);
+ }
+
+ return err;
+}
+
+static int __live_lrc_state(struct i915_gem_context *fixme,
+ struct intel_engine_cs *engine,
+ struct i915_vma *scratch)
+{
+ struct intel_context *ce;
+ struct i915_request *rq;
+ enum {
+ RING_START_IDX = 0,
+ RING_TAIL_IDX,
+ MAX_IDX
+ };
+ u32 expected[MAX_IDX];
+ u32 *cs;
+ int err;
+ int n;
+
+ ce = intel_context_create(fixme, engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+
+ err = intel_context_pin(ce);
+ if (err)
+ goto err_put;
+
+ rq = i915_request_create(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_unpin;
+ }
+
+ cs = intel_ring_begin(rq, 4 * MAX_IDX);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ i915_request_add(rq);
+ goto err_unpin;
+ }
+
+ *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
+ *cs++ = i915_mmio_reg_offset(RING_START(engine->mmio_base));
+ *cs++ = i915_ggtt_offset(scratch) + RING_START_IDX * sizeof(u32);
+ *cs++ = 0;
+
+ expected[RING_START_IDX] = i915_ggtt_offset(ce->ring->vma);
+
+ *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
+ *cs++ = i915_mmio_reg_offset(RING_TAIL(engine->mmio_base));
+ *cs++ = i915_ggtt_offset(scratch) + RING_TAIL_IDX * sizeof(u32);
+ *cs++ = 0;
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ intel_engine_flush_submission(engine);
+ expected[RING_TAIL_IDX] = ce->ring->tail;
+
+ if (i915_request_wait(rq, 0, HZ / 5) < 0) {
+ err = -ETIME;
+ goto err_rq;
+ }
+
+ cs = i915_gem_object_pin_map(scratch->obj, I915_MAP_WB);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ goto err_rq;
+ }
+
+ for (n = 0; n < MAX_IDX; n++) {
+ if (cs[n] != expected[n]) {
+ pr_err("%s: Stored register[%d] value[0x%x] did not match expected[0x%x]\n",
+ engine->name, n, cs[n], expected[n]);
+ err = -EINVAL;
+ break;
+ }
+ }
+
+ i915_gem_object_unpin_map(scratch->obj);
+
+err_rq:
+ i915_request_put(rq);
+err_unpin:
+ intel_context_unpin(ce);
+err_put:
+ intel_context_put(ce);
+ return err;
+}
+
+static int live_lrc_state(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ struct i915_gem_context *fixme;
+ struct i915_vma *scratch;
+ enum intel_engine_id id;
+ int err = 0;
+
+ /*
+ * Check the live register state matches what we expect for this
+ * intel_context.
+ */
+
+ fixme = kernel_context(gt->i915);
+ if (!fixme)
+ return -ENOMEM;
+
+ scratch = create_scratch(gt);
+ if (IS_ERR(scratch)) {
+ err = PTR_ERR(scratch);
+ goto out_close;
+ }
+
+ for_each_engine(engine, gt, id) {
+ err = __live_lrc_state(fixme, engine, scratch);
+ if (err)
+ break;
+ }
+
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+
+ i915_vma_unpin_and_release(&scratch, 0);
+out_close:
+ kernel_context_close(fixme);
+ return err;
+}
+
+static int gpr_make_dirty(struct intel_engine_cs *engine)
+{
+ struct i915_request *rq;
+ u32 *cs;
+ int n;
+
+ rq = i915_request_create(engine->kernel_context);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ cs = intel_ring_begin(rq, 2 * NUM_GPR_DW + 2);
+ if (IS_ERR(cs)) {
+ i915_request_add(rq);
+ return PTR_ERR(cs);
+ }
+
+ *cs++ = MI_LOAD_REGISTER_IMM(NUM_GPR_DW);
+ for (n = 0; n < NUM_GPR_DW; n++) {
+ *cs++ = CS_GPR(engine, n);
+ *cs++ = STACK_MAGIC;
+ }
+ *cs++ = MI_NOOP;
+
+ intel_ring_advance(rq, cs);
+ i915_request_add(rq);
+
+ return 0;
+}
+
+static int __live_gpr_clear(struct i915_gem_context *fixme,
+ struct intel_engine_cs *engine,
+ struct i915_vma *scratch)
+{
+ struct intel_context *ce;
+ struct i915_request *rq;
+ u32 *cs;
+ int err;
+ int n;
+
+ if (INTEL_GEN(engine->i915) < 9 && engine->class != RENDER_CLASS)
+ return 0; /* GPR only on rcs0 for gen8 */
+
+ err = gpr_make_dirty(engine);
+ if (err)
+ return err;
+
+ ce = intel_context_create(fixme, engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_put;
+ }
+
+ cs = intel_ring_begin(rq, 4 * NUM_GPR_DW);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ i915_request_add(rq);
+ goto err_put;
+ }
+
+ for (n = 0; n < NUM_GPR_DW; n++) {
+ *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
+ *cs++ = CS_GPR(engine, n);
+ *cs++ = i915_ggtt_offset(scratch) + n * sizeof(u32);
+ *cs++ = 0;
+ }
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ if (i915_request_wait(rq, 0, HZ / 5) < 0) {
+ err = -ETIME;
+ goto err_rq;
+ }
+
+ cs = i915_gem_object_pin_map(scratch->obj, I915_MAP_WB);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ goto err_rq;
+ }
+
+ for (n = 0; n < NUM_GPR_DW; n++) {
+ if (cs[n]) {
+ pr_err("%s: GPR[%d].%s was not zero, found 0x%08x!\n",
+ engine->name,
+ n / 2, n & 1 ? "udw" : "ldw",
+ cs[n]);
+ err = -EINVAL;
+ break;
+ }
+ }
+
+ i915_gem_object_unpin_map(scratch->obj);
+
+err_rq:
+ i915_request_put(rq);
+err_put:
+ intel_context_put(ce);
+ return err;
+}
+
+static int live_gpr_clear(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ struct i915_gem_context *fixme;
+ struct i915_vma *scratch;
+ enum intel_engine_id id;
+ int err = 0;
+
+ /*
+ * Check that GPR registers are cleared in new contexts as we need
+ * to avoid leaking any information from previous contexts.
+ */
+
+ fixme = kernel_context(gt->i915);
+ if (!fixme)
+ return -ENOMEM;
+
+ scratch = create_scratch(gt);
+ if (IS_ERR(scratch)) {
+ err = PTR_ERR(scratch);
+ goto out_close;
+ }
+
+ for_each_engine(engine, gt, id) {
+ err = __live_gpr_clear(fixme, engine, scratch);
+ if (err)
+ break;
+ }
+
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+
+ i915_vma_unpin_and_release(&scratch, 0);
+out_close:
+ kernel_context_close(fixme);
+ return err;
+}
+
+int intel_lrc_live_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(live_lrc_layout),
+ SUBTEST(live_lrc_fixed),
+ SUBTEST(live_lrc_state),
+ SUBTEST(live_gpr_clear),
+ };
+
+ if (!HAS_LOGICAL_RING_CONTEXTS(i915))
+ return 0;
+
+ return intel_gt_live_subtests(tests, &i915->gt);
}
diff --git a/drivers/gpu/drm/i915/gt/selftest_reset.c b/drivers/gpu/drm/i915/gt/selftest_reset.c
index 00a4f60cdfd5..6ad6aca315f6 100644
--- a/drivers/gpu/drm/i915/gt/selftest_reset.c
+++ b/drivers/gpu/drm/i915/gt/selftest_reset.c
@@ -17,7 +17,7 @@ static int igt_global_reset(void *arg)
/* Check that we can issue a global GPU reset */
igt_global_reset_lock(gt);
- wakeref = intel_runtime_pm_get(&gt->i915->runtime_pm);
+ wakeref = intel_runtime_pm_get(gt->uncore->rpm);
reset_count = i915_reset_count(&gt->i915->gpu_error);
@@ -28,7 +28,7 @@ static int igt_global_reset(void *arg)
err = -EINVAL;
}
- intel_runtime_pm_put(&gt->i915->runtime_pm, wakeref);
+ intel_runtime_pm_put(gt->uncore->rpm, wakeref);
igt_global_reset_unlock(gt);
if (intel_gt_is_wedged(gt))
@@ -45,14 +45,14 @@ static int igt_wedged_reset(void *arg)
/* Check that we can recover a wedged device with a GPU reset */
igt_global_reset_lock(gt);
- wakeref = intel_runtime_pm_get(&gt->i915->runtime_pm);
+ wakeref = intel_runtime_pm_get(gt->uncore->rpm);
intel_gt_set_wedged(gt);
GEM_BUG_ON(!intel_gt_is_wedged(gt));
intel_gt_reset(gt, ALL_ENGINES, NULL);
- intel_runtime_pm_put(&gt->i915->runtime_pm, wakeref);
+ intel_runtime_pm_put(gt->uncore->rpm, wakeref);
igt_global_reset_unlock(gt);
return intel_gt_is_wedged(gt) ? -EIO : 0;
@@ -112,7 +112,7 @@ static int igt_atomic_engine_reset(void *arg)
/* Check that the resets are usable from atomic context */
- if (!intel_has_reset_engine(gt->i915))
+ if (!intel_has_reset_engine(gt))
return 0;
if (USES_GUC_SUBMISSION(gt->i915))
@@ -125,8 +125,8 @@ static int igt_atomic_engine_reset(void *arg)
if (!igt_force_reset(gt))
goto out_unlock;
- for_each_engine(engine, gt->i915, id) {
- tasklet_disable_nosync(&engine->execlists.tasklet);
+ for_each_engine(engine, gt, id) {
+ tasklet_disable(&engine->execlists.tasklet);
intel_engine_pm_get(engine);
for (p = igt_atomic_phases; p->name; p++) {
@@ -170,7 +170,7 @@ int intel_reset_live_selftests(struct drm_i915_private *i915)
};
struct intel_gt *gt = &i915->gt;
- if (!intel_has_gpu_reset(gt->i915))
+ if (!intel_has_gpu_reset(gt))
return 0;
if (intel_gt_is_wedged(gt))
diff --git a/drivers/gpu/drm/i915/gt/selftest_timeline.c b/drivers/gpu/drm/i915/gt/selftest_timeline.c
index 321481403165..f04a59fe5d2c 100644
--- a/drivers/gpu/drm/i915/gt/selftest_timeline.c
+++ b/drivers/gpu/drm/i915/gt/selftest_timeline.c
@@ -6,8 +6,10 @@
#include <linux/prime_numbers.h>
-#include "gem/i915_gem_pm.h"
+#include "intel_engine_pm.h"
#include "intel_gt.h"
+#include "intel_gt_requests.h"
+#include "intel_ring.h"
#include "../selftests/i915_random.h"
#include "../i915_selftest.h"
@@ -34,7 +36,7 @@ static unsigned long hwsp_cacheline(struct intel_timeline *tl)
#define CACHELINES_PER_PAGE (PAGE_SIZE / CACHELINE_BYTES)
struct mock_hwsp_freelist {
- struct drm_i915_private *i915;
+ struct intel_gt *gt;
struct radix_tree_root cachelines;
struct intel_timeline **history;
unsigned long count, max;
@@ -67,7 +69,7 @@ static int __mock_hwsp_timeline(struct mock_hwsp_freelist *state,
unsigned long cacheline;
int err;
- tl = intel_timeline_create(&state->i915->gt, NULL);
+ tl = intel_timeline_create(state->gt, NULL);
if (IS_ERR(tl))
return PTR_ERR(tl);
@@ -105,6 +107,7 @@ static int __mock_hwsp_timeline(struct mock_hwsp_freelist *state,
static int mock_hwsp_freelist(void *arg)
{
struct mock_hwsp_freelist state;
+ struct drm_i915_private *i915;
const struct {
const char *name;
unsigned int flags;
@@ -116,12 +119,14 @@ static int mock_hwsp_freelist(void *arg)
unsigned int na;
int err = 0;
+ i915 = mock_gem_device();
+ if (!i915)
+ return -ENOMEM;
+
INIT_RADIX_TREE(&state.cachelines, GFP_KERNEL);
state.prng = I915_RND_STATE_INITIALIZER(i915_selftest.random_seed);
- state.i915 = mock_gem_device();
- if (!state.i915)
- return -ENOMEM;
+ state.gt = &i915->gt;
/*
* Create a bunch of timelines and check that their HWSP do not overlap.
@@ -136,7 +141,6 @@ static int mock_hwsp_freelist(void *arg)
goto err_put;
}
- mutex_lock(&state.i915->drm.struct_mutex);
for (p = phases; p->name; p++) {
pr_debug("%s(%s)\n", __func__, p->name);
for_each_prime_number_from(na, 1, 2 * CACHELINES_PER_PAGE) {
@@ -149,10 +153,9 @@ static int mock_hwsp_freelist(void *arg)
out:
for (na = 0; na < state.max; na++)
__mock_hwsp_record(&state, na, NULL);
- mutex_unlock(&state.i915->drm.struct_mutex);
kfree(state.history);
err_put:
- drm_dev_put(&state.i915->drm);
+ drm_dev_put(&i915->drm);
return err;
}
@@ -449,8 +452,6 @@ tl_write(struct intel_timeline *tl, struct intel_engine_cs *engine, u32 value)
struct i915_request *rq;
int err;
- lockdep_assert_held(&tl->gt->i915->drm.struct_mutex); /* lazy rq refs */
-
err = intel_timeline_pin(tl);
if (err) {
rq = ERR_PTR(err);
@@ -461,10 +462,14 @@ tl_write(struct intel_timeline *tl, struct intel_engine_cs *engine, u32 value)
if (IS_ERR(rq))
goto out_unpin;
+ i915_request_get(rq);
+
err = emit_ggtt_store_dw(rq, tl->hwsp_offset, value);
i915_request_add(rq);
- if (err)
+ if (err) {
+ i915_request_put(rq);
rq = ERR_PTR(err);
+ }
out_unpin:
intel_timeline_unpin(tl);
@@ -475,11 +480,11 @@ out:
}
static struct intel_timeline *
-checked_intel_timeline_create(struct drm_i915_private *i915)
+checked_intel_timeline_create(struct intel_gt *gt)
{
struct intel_timeline *tl;
- tl = intel_timeline_create(&i915->gt, NULL);
+ tl = intel_timeline_create(gt, NULL);
if (IS_ERR(tl))
return tl;
@@ -496,11 +501,10 @@ checked_intel_timeline_create(struct drm_i915_private *i915)
static int live_hwsp_engine(void *arg)
{
#define NUM_TIMELINES 4096
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
struct intel_timeline **timelines;
struct intel_engine_cs *engine;
enum intel_engine_id id;
- intel_wakeref_t wakeref;
unsigned long count, n;
int err = 0;
@@ -515,37 +519,40 @@ static int live_hwsp_engine(void *arg)
if (!timelines)
return -ENOMEM;
- mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
-
count = 0;
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt, id) {
if (!intel_engine_can_store_dword(engine))
continue;
+ intel_engine_pm_get(engine);
+
for (n = 0; n < NUM_TIMELINES; n++) {
struct intel_timeline *tl;
struct i915_request *rq;
- tl = checked_intel_timeline_create(i915);
+ tl = checked_intel_timeline_create(gt);
if (IS_ERR(tl)) {
err = PTR_ERR(tl);
- goto out;
+ break;
}
rq = tl_write(tl, engine, count);
if (IS_ERR(rq)) {
intel_timeline_put(tl);
err = PTR_ERR(rq);
- goto out;
+ break;
}
timelines[count++] = tl;
+ i915_request_put(rq);
}
+
+ intel_engine_pm_put(engine);
+ if (err)
+ break;
}
-out:
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ if (igt_flush_test(gt->i915))
err = -EIO;
for (n = 0; n < count; n++) {
@@ -559,11 +566,7 @@ out:
intel_timeline_put(tl);
}
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
-
kvfree(timelines);
-
return err;
#undef NUM_TIMELINES
}
@@ -571,11 +574,10 @@ out:
static int live_hwsp_alternate(void *arg)
{
#define NUM_TIMELINES 4096
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
struct intel_timeline **timelines;
struct intel_engine_cs *engine;
enum intel_engine_id id;
- intel_wakeref_t wakeref;
unsigned long count, n;
int err = 0;
@@ -591,25 +593,25 @@ static int live_hwsp_alternate(void *arg)
if (!timelines)
return -ENOMEM;
- mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
-
count = 0;
for (n = 0; n < NUM_TIMELINES; n++) {
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt, id) {
struct intel_timeline *tl;
struct i915_request *rq;
if (!intel_engine_can_store_dword(engine))
continue;
- tl = checked_intel_timeline_create(i915);
+ tl = checked_intel_timeline_create(gt);
if (IS_ERR(tl)) {
+ intel_engine_pm_put(engine);
err = PTR_ERR(tl);
goto out;
}
+ intel_engine_pm_get(engine);
rq = tl_write(tl, engine, count);
+ intel_engine_pm_put(engine);
if (IS_ERR(rq)) {
intel_timeline_put(tl);
err = PTR_ERR(rq);
@@ -617,11 +619,12 @@ static int live_hwsp_alternate(void *arg)
}
timelines[count++] = tl;
+ i915_request_put(rq);
}
}
out:
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ if (igt_flush_test(gt->i915))
err = -EIO;
for (n = 0; n < count; n++) {
@@ -635,22 +638,17 @@ out:
intel_timeline_put(tl);
}
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
-
kvfree(timelines);
-
return err;
#undef NUM_TIMELINES
}
static int live_hwsp_wrap(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
struct intel_engine_cs *engine;
struct intel_timeline *tl;
enum intel_engine_id id;
- intel_wakeref_t wakeref;
int err = 0;
/*
@@ -658,14 +656,10 @@ static int live_hwsp_wrap(void *arg)
* foreign GPU references.
*/
- mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+ tl = intel_timeline_create(gt, NULL);
+ if (IS_ERR(tl))
+ return PTR_ERR(tl);
- tl = intel_timeline_create(&i915->gt, NULL);
- if (IS_ERR(tl)) {
- err = PTR_ERR(tl);
- goto out_rpm;
- }
if (!tl->has_initial_breadcrumb || !tl->hwsp_cacheline)
goto out_free;
@@ -673,7 +667,7 @@ static int live_hwsp_wrap(void *arg)
if (err)
goto out_free;
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt, id) {
const u32 *hwsp_seqno[2];
struct i915_request *rq;
u32 seqno[2];
@@ -681,7 +675,9 @@ static int live_hwsp_wrap(void *arg)
if (!intel_engine_can_store_dword(engine))
continue;
+ intel_engine_pm_get(engine);
rq = i915_request_create(engine->kernel_context);
+ intel_engine_pm_put(engine);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto out;
@@ -743,29 +739,24 @@ static int live_hwsp_wrap(void *arg)
goto out;
}
- i915_retire_requests(i915); /* recycle HWSP */
+ intel_gt_retire_requests(gt); /* recycle HWSP */
}
out:
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ if (igt_flush_test(gt->i915))
err = -EIO;
intel_timeline_unpin(tl);
out_free:
intel_timeline_put(tl);
-out_rpm:
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
-
return err;
}
static int live_hwsp_recycle(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
struct intel_engine_cs *engine;
enum intel_engine_id id;
- intel_wakeref_t wakeref;
unsigned long count;
int err = 0;
@@ -775,38 +766,38 @@ static int live_hwsp_recycle(void *arg)
* want to confuse ourselves or the GPU.
*/
- mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
-
count = 0;
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt, id) {
IGT_TIMEOUT(end_time);
if (!intel_engine_can_store_dword(engine))
continue;
+ intel_engine_pm_get(engine);
+
do {
struct intel_timeline *tl;
struct i915_request *rq;
- tl = checked_intel_timeline_create(i915);
+ tl = checked_intel_timeline_create(gt);
if (IS_ERR(tl)) {
err = PTR_ERR(tl);
- goto out;
+ break;
}
rq = tl_write(tl, engine, count);
if (IS_ERR(rq)) {
intel_timeline_put(tl);
err = PTR_ERR(rq);
- goto out;
+ break;
}
if (i915_request_wait(rq, 0, HZ / 5) < 0) {
pr_err("Wait for timeline writes timed out!\n");
+ i915_request_put(rq);
intel_timeline_put(tl);
err = -EIO;
- goto out;
+ break;
}
if (*tl->hwsp_seqno != count) {
@@ -815,17 +806,18 @@ static int live_hwsp_recycle(void *arg)
err = -EINVAL;
}
+ i915_request_put(rq);
intel_timeline_put(tl);
count++;
if (err)
- goto out;
+ break;
} while (!__igt_timeout(end_time, NULL));
- }
-out:
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
+ intel_engine_pm_put(engine);
+ if (err)
+ break;
+ }
return err;
}
@@ -842,5 +834,5 @@ int intel_timeline_live_selftests(struct drm_i915_private *i915)
if (intel_gt_is_wedged(&i915->gt))
return 0;
- return i915_live_subtests(tests, i915);
+ return intel_gt_live_subtests(tests, &i915->gt);
}
diff --git a/drivers/gpu/drm/i915/gt/selftest_workarounds.c b/drivers/gpu/drm/i915/gt/selftest_workarounds.c
index d06d68ac2a3b..abce6e4ec9c0 100644
--- a/drivers/gpu/drm/i915/gt/selftest_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/selftest_workarounds.c
@@ -33,8 +33,32 @@ struct wa_lists {
} engine[I915_NUM_ENGINES];
};
+static int request_add_sync(struct i915_request *rq, int err)
+{
+ i915_request_get(rq);
+ i915_request_add(rq);
+ if (i915_request_wait(rq, 0, HZ / 5) < 0)
+ err = -EIO;
+ i915_request_put(rq);
+
+ return err;
+}
+
+static int request_add_spin(struct i915_request *rq, struct igt_spinner *spin)
+{
+ int err = 0;
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+ if (spin && !igt_wait_for_spinner(spin, rq))
+ err = -ETIMEDOUT;
+ i915_request_put(rq);
+
+ return err;
+}
+
static void
-reference_lists_init(struct drm_i915_private *i915, struct wa_lists *lists)
+reference_lists_init(struct intel_gt *gt, struct wa_lists *lists)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
@@ -42,10 +66,10 @@ reference_lists_init(struct drm_i915_private *i915, struct wa_lists *lists)
memset(lists, 0, sizeof(*lists));
wa_init_start(&lists->gt_wa_list, "GT_REF", "global");
- gt_init_workarounds(i915, &lists->gt_wa_list);
+ gt_init_workarounds(gt->i915, &lists->gt_wa_list);
wa_init_finish(&lists->gt_wa_list);
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt, id) {
struct i915_wa_list *wal = &lists->engine[id].wa_list;
wa_init_start(wal, "REF", engine->name);
@@ -59,12 +83,12 @@ reference_lists_init(struct drm_i915_private *i915, struct wa_lists *lists)
}
static void
-reference_lists_fini(struct drm_i915_private *i915, struct wa_lists *lists)
+reference_lists_fini(struct intel_gt *gt, struct wa_lists *lists)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
- for_each_engine(engine, i915, id)
+ for_each_engine(engine, gt, id)
intel_wa_list_free(&lists->engine[id].wa_list);
intel_wa_list_free(&lists->gt_wa_list);
@@ -191,10 +215,10 @@ static int check_whitelist(struct i915_gem_context *ctx,
err = 0;
i915_gem_object_lock(results);
- intel_wedge_on_timeout(&wedge, &ctx->i915->gt, HZ / 5) /* safety net! */
+ intel_wedge_on_timeout(&wedge, engine->gt, HZ / 5) /* safety net! */
err = i915_gem_object_set_to_cpu_domain(results, false);
i915_gem_object_unlock(results);
- if (intel_gt_is_wedged(&ctx->i915->gt))
+ if (intel_gt_is_wedged(engine->gt))
err = -EIO;
if (err)
goto out_put;
@@ -243,7 +267,6 @@ switch_to_scratch_context(struct intel_engine_cs *engine,
struct i915_gem_context *ctx;
struct intel_context *ce;
struct i915_request *rq;
- intel_wakeref_t wakeref;
int err = 0;
ctx = kernel_context(engine->i915);
@@ -255,12 +278,9 @@ switch_to_scratch_context(struct intel_engine_cs *engine,
ce = i915_gem_context_get_engine(ctx, engine->legacy_idx);
GEM_BUG_ON(IS_ERR(ce));
- rq = ERR_PTR(-ENODEV);
- with_intel_runtime_pm(&engine->i915->runtime_pm, wakeref)
- rq = igt_spinner_create_request(spin, ce, MI_NOOP);
+ rq = igt_spinner_create_request(spin, ce, MI_NOOP);
intel_context_put(ce);
- kernel_context_close(ctx);
if (IS_ERR(rq)) {
spin = NULL;
@@ -268,17 +288,12 @@ switch_to_scratch_context(struct intel_engine_cs *engine,
goto err;
}
- i915_request_add(rq);
-
- if (spin && !igt_wait_for_spinner(spin, rq)) {
- pr_err("Spinner failed to start\n");
- err = -ETIMEDOUT;
- }
-
+ err = request_add_spin(rq, spin);
err:
if (err && spin)
igt_spinner_end(spin);
+ kernel_context_close(ctx);
return err;
}
@@ -313,7 +328,7 @@ static int check_whitelist_across_reset(struct intel_engine_cs *engine,
if (err)
goto out_spin;
- with_intel_runtime_pm(&i915->runtime_pm, wakeref)
+ with_intel_runtime_pm(engine->uncore->rpm, wakeref)
err = reset(engine);
igt_spinner_end(&spin);
@@ -355,6 +370,7 @@ out_ctx:
static struct i915_vma *create_batch(struct i915_gem_context *ctx)
{
struct drm_i915_gem_object *obj;
+ struct i915_address_space *vm;
struct i915_vma *vma;
int err;
@@ -362,7 +378,9 @@ static struct i915_vma *create_batch(struct i915_gem_context *ctx)
if (IS_ERR(obj))
return ERR_CAST(obj);
- vma = i915_vma_instance(obj, ctx->vm, NULL);
+ vm = i915_gem_context_get_vm_rcu(ctx);
+ vma = i915_vma_instance(obj, vm, NULL);
+ i915_vm_put(vm);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto err_obj;
@@ -463,12 +481,15 @@ static int check_dirty_whitelist(struct i915_gem_context *ctx,
0xffff00ff,
0xffffffff,
};
+ struct i915_address_space *vm;
struct i915_vma *scratch;
struct i915_vma *batch;
int err = 0, i, v;
u32 *cs, *results;
- scratch = create_scratch(ctx->vm, 2 * ARRAY_SIZE(values) + 1);
+ vm = i915_gem_context_get_vm_rcu(ctx);
+ scratch = create_scratch(vm, 2 * ARRAY_SIZE(values) + 1);
+ i915_vm_put(vm);
if (IS_ERR(scratch))
return PTR_ERR(scratch);
@@ -492,6 +513,9 @@ static int check_dirty_whitelist(struct i915_gem_context *ctx,
ro_reg = ro_register(reg);
+ /* Clear non priv flags */
+ reg &= RING_FORCE_TO_NONPRIV_ADDRESS_MASK;
+
srm = MI_STORE_REGISTER_MEM;
lrm = MI_LOAD_REGISTER_MEM;
if (INTEL_GEN(ctx->i915) >= 8)
@@ -565,6 +589,14 @@ static int check_dirty_whitelist(struct i915_gem_context *ctx,
goto err_request;
}
+ i915_vma_lock(batch);
+ err = i915_request_await_object(rq, batch->obj, false);
+ if (err == 0)
+ err = i915_vma_move_to_active(batch, rq, 0);
+ i915_vma_unlock(batch);
+ if (err)
+ goto err_request;
+
err = engine->emit_bb_start(rq,
batch->node.start, PAGE_SIZE,
0);
@@ -572,15 +604,11 @@ static int check_dirty_whitelist(struct i915_gem_context *ctx,
goto err_request;
err_request:
- i915_request_add(rq);
- if (err)
- goto out_batch;
-
- if (i915_request_wait(rq, 0, HZ / 5) < 0) {
+ err = request_add_sync(rq, err);
+ if (err) {
pr_err("%s: Futzing %x timedout; cancelling test\n",
engine->name, reg);
- intel_gt_set_wedged(&ctx->i915->gt);
- err = -EIO;
+ intel_gt_set_wedged(engine->gt);
goto out_batch;
}
@@ -668,7 +696,7 @@ out_unpin:
break;
}
- if (igt_flush_test(ctx->i915, I915_WAIT_LOCKED))
+ if (igt_flush_test(ctx->i915))
err = -EIO;
out_batch:
i915_vma_unpin_and_release(&batch, 0);
@@ -679,36 +707,29 @@ out_scratch:
static int live_dirty_whitelist(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
struct intel_engine_cs *engine;
struct i915_gem_context *ctx;
enum intel_engine_id id;
- intel_wakeref_t wakeref;
struct drm_file *file;
int err = 0;
/* Can the user write to the whitelisted registers? */
- if (INTEL_GEN(i915) < 7) /* minimum requirement for LRI, SRM, LRM */
+ if (INTEL_GEN(gt->i915) < 7) /* minimum requirement for LRI, SRM, LRM */
return 0;
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+ file = mock_file(gt->i915);
+ if (IS_ERR(file))
+ return PTR_ERR(file);
- mutex_unlock(&i915->drm.struct_mutex);
- file = mock_file(i915);
- mutex_lock(&i915->drm.struct_mutex);
- if (IS_ERR(file)) {
- err = PTR_ERR(file);
- goto out_rpm;
- }
-
- ctx = live_context(i915, file);
+ ctx = live_context(gt->i915, file);
if (IS_ERR(ctx)) {
err = PTR_ERR(ctx);
goto out_file;
}
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt, id) {
if (engine->whitelist.count == 0)
continue;
@@ -718,45 +739,43 @@ static int live_dirty_whitelist(void *arg)
}
out_file:
- mutex_unlock(&i915->drm.struct_mutex);
- mock_file_free(i915, file);
- mutex_lock(&i915->drm.struct_mutex);
-out_rpm:
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+ mock_file_free(gt->i915, file);
return err;
}
static int live_reset_whitelist(void *arg)
{
- struct drm_i915_private *i915 = arg;
- struct intel_engine_cs *engine = i915->engine[RCS0];
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
int err = 0;
/* If we reset the gpu, we should not lose the RING_NONPRIV */
+ igt_global_reset_lock(gt);
- if (!engine || engine->whitelist.count == 0)
- return 0;
-
- igt_global_reset_lock(&i915->gt);
+ for_each_engine(engine, gt, id) {
+ if (engine->whitelist.count == 0)
+ continue;
- if (intel_has_reset_engine(i915)) {
- err = check_whitelist_across_reset(engine,
- do_engine_reset,
- "engine");
- if (err)
- goto out;
- }
+ if (intel_has_reset_engine(gt)) {
+ err = check_whitelist_across_reset(engine,
+ do_engine_reset,
+ "engine");
+ if (err)
+ goto out;
+ }
- if (intel_has_gpu_reset(i915)) {
- err = check_whitelist_across_reset(engine,
- do_device_reset,
- "device");
- if (err)
- goto out;
+ if (intel_has_gpu_reset(gt)) {
+ err = check_whitelist_across_reset(engine,
+ do_device_reset,
+ "device");
+ if (err)
+ goto out;
+ }
}
out:
- igt_global_reset_unlock(&i915->gt);
+ igt_global_reset_unlock(gt);
return err;
}
@@ -772,6 +791,14 @@ static int read_whitelisted_registers(struct i915_gem_context *ctx,
if (IS_ERR(rq))
return PTR_ERR(rq);
+ i915_vma_lock(results);
+ err = i915_request_await_object(rq, results->obj, true);
+ if (err == 0)
+ err = i915_vma_move_to_active(results, rq, EXEC_OBJECT_WRITE);
+ i915_vma_unlock(results);
+ if (err)
+ goto err_req;
+
srm = MI_STORE_REGISTER_MEM;
if (INTEL_GEN(ctx->i915) >= 8)
srm++;
@@ -786,8 +813,8 @@ static int read_whitelisted_registers(struct i915_gem_context *ctx,
u64 offset = results->node.start + sizeof(u32) * i;
u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
- /* Clear access permission field */
- reg &= ~RING_FORCE_TO_NONPRIV_ACCESS_MASK;
+ /* Clear non priv flags */
+ reg &= RING_FORCE_TO_NONPRIV_ADDRESS_MASK;
*cs++ = srm;
*cs++ = reg;
@@ -797,12 +824,7 @@ static int read_whitelisted_registers(struct i915_gem_context *ctx,
intel_ring_advance(rq, cs);
err_req:
- i915_request_add(rq);
-
- if (i915_request_wait(rq, 0, HZ / 5) < 0)
- err = -EIO;
-
- return err;
+ return request_add_sync(rq, err);
}
static int scrub_whitelisted_registers(struct i915_gem_context *ctx,
@@ -830,6 +852,9 @@ static int scrub_whitelisted_registers(struct i915_gem_context *ctx,
if (ro_register(reg))
continue;
+ /* Clear non priv flags */
+ reg &= RING_FORCE_TO_NONPRIV_ADDRESS_MASK;
+
*cs++ = reg;
*cs++ = 0xffffffff;
}
@@ -850,13 +875,19 @@ static int scrub_whitelisted_registers(struct i915_gem_context *ctx,
goto err_request;
}
+ i915_vma_lock(batch);
+ err = i915_request_await_object(rq, batch->obj, false);
+ if (err == 0)
+ err = i915_vma_move_to_active(batch, rq, 0);
+ i915_vma_unlock(batch);
+ if (err)
+ goto err_request;
+
/* Perform the writes from an unprivileged "user" batch */
err = engine->emit_bb_start(rq, batch->node.start, 0, 0);
err_request:
- i915_request_add(rq);
- if (i915_request_wait(rq, 0, HZ / 5) < 0)
- err = -EIO;
+ err = request_add_sync(rq, err);
err_unpin:
i915_gem_object_unpin_map(batch->obj);
@@ -973,7 +1004,7 @@ err_a:
static int live_isolated_whitelist(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
struct {
struct i915_gem_context *ctx;
struct i915_vma *scratch[2];
@@ -987,40 +1018,46 @@ static int live_isolated_whitelist(void *arg)
* invisible to a second context.
*/
- if (!intel_engines_has_context_isolation(i915))
- return 0;
-
- if (!i915->kernel_context->vm)
+ if (!intel_engines_has_context_isolation(gt->i915))
return 0;
for (i = 0; i < ARRAY_SIZE(client); i++) {
+ struct i915_address_space *vm;
struct i915_gem_context *c;
- c = kernel_context(i915);
+ c = kernel_context(gt->i915);
if (IS_ERR(c)) {
err = PTR_ERR(c);
goto err;
}
- client[i].scratch[0] = create_scratch(c->vm, 1024);
+ vm = i915_gem_context_get_vm_rcu(c);
+
+ client[i].scratch[0] = create_scratch(vm, 1024);
if (IS_ERR(client[i].scratch[0])) {
err = PTR_ERR(client[i].scratch[0]);
+ i915_vm_put(vm);
kernel_context_close(c);
goto err;
}
- client[i].scratch[1] = create_scratch(c->vm, 1024);
+ client[i].scratch[1] = create_scratch(vm, 1024);
if (IS_ERR(client[i].scratch[1])) {
err = PTR_ERR(client[i].scratch[1]);
i915_vma_unpin_and_release(&client[i].scratch[0], 0);
+ i915_vm_put(vm);
kernel_context_close(c);
goto err;
}
client[i].ctx = c;
+ i915_vm_put(vm);
}
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt, id) {
+ if (!engine->kernel_context->vm)
+ continue;
+
if (!whitelist_writable_count(engine))
continue;
@@ -1074,7 +1111,7 @@ err:
kernel_context_close(client[i].ctx);
}
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ if (igt_flush_test(gt->i915))
err = -EIO;
return err;
@@ -1109,16 +1146,16 @@ verify_wa_lists(struct i915_gem_context *ctx, struct wa_lists *lists,
static int
live_gpu_reset_workarounds(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
struct i915_gem_context *ctx;
intel_wakeref_t wakeref;
struct wa_lists lists;
bool ok;
- if (!intel_has_gpu_reset(i915))
+ if (!intel_has_gpu_reset(gt))
return 0;
- ctx = kernel_context(i915);
+ ctx = kernel_context(gt->i915);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
@@ -1126,25 +1163,25 @@ live_gpu_reset_workarounds(void *arg)
pr_info("Verifying after GPU reset...\n");
- igt_global_reset_lock(&i915->gt);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+ igt_global_reset_lock(gt);
+ wakeref = intel_runtime_pm_get(gt->uncore->rpm);
- reference_lists_init(i915, &lists);
+ reference_lists_init(gt, &lists);
ok = verify_wa_lists(ctx, &lists, "before reset");
if (!ok)
goto out;
- intel_gt_reset(&i915->gt, ALL_ENGINES, "live_workarounds");
+ intel_gt_reset(gt, ALL_ENGINES, "live_workarounds");
ok = verify_wa_lists(ctx, &lists, "after reset");
out:
i915_gem_context_unlock_engines(ctx);
kernel_context_close(ctx);
- reference_lists_fini(i915, &lists);
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- igt_global_reset_unlock(&i915->gt);
+ reference_lists_fini(gt, &lists);
+ intel_runtime_pm_put(gt->uncore->rpm, wakeref);
+ igt_global_reset_unlock(gt);
return ok ? 0 : -ESRCH;
}
@@ -1152,7 +1189,7 @@ out:
static int
live_engine_reset_workarounds(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
struct i915_gem_engines_iter it;
struct i915_gem_context *ctx;
struct intel_context *ce;
@@ -1162,17 +1199,17 @@ live_engine_reset_workarounds(void *arg)
struct wa_lists lists;
int ret = 0;
- if (!intel_has_reset_engine(i915))
+ if (!intel_has_reset_engine(gt))
return 0;
- ctx = kernel_context(i915);
+ ctx = kernel_context(gt->i915);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
- igt_global_reset_lock(&i915->gt);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+ igt_global_reset_lock(gt);
+ wakeref = intel_runtime_pm_get(gt->uncore->rpm);
- reference_lists_init(i915, &lists);
+ reference_lists_init(gt, &lists);
for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
struct intel_engine_cs *engine = ce->engine;
@@ -1205,12 +1242,10 @@ live_engine_reset_workarounds(void *arg)
goto err;
}
- i915_request_add(rq);
-
- if (!igt_wait_for_spinner(&spin, rq)) {
+ ret = request_add_spin(rq, &spin);
+ if (ret) {
pr_err("Spinner failed to start\n");
igt_spinner_fini(&spin);
- ret = -ETIMEDOUT;
goto err;
}
@@ -1227,12 +1262,12 @@ live_engine_reset_workarounds(void *arg)
}
err:
i915_gem_context_unlock_engines(ctx);
- reference_lists_fini(i915, &lists);
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- igt_global_reset_unlock(&i915->gt);
+ reference_lists_fini(gt, &lists);
+ intel_runtime_pm_put(gt->uncore->rpm, wakeref);
+ igt_global_reset_unlock(gt);
kernel_context_close(ctx);
- igt_flush_test(i915, I915_WAIT_LOCKED);
+ igt_flush_test(gt->i915);
return ret;
}
@@ -1246,14 +1281,9 @@ int intel_workarounds_live_selftests(struct drm_i915_private *i915)
SUBTEST(live_gpu_reset_workarounds),
SUBTEST(live_engine_reset_workarounds),
};
- int err;
if (intel_gt_is_wedged(&i915->gt))
return 0;
- mutex_lock(&i915->drm.struct_mutex);
- err = i915_subtests(tests, i915);
- mutex_unlock(&i915->drm.struct_mutex);
-
- return err;
+ return intel_gt_live_subtests(tests, &i915->gt);
}
diff --git a/drivers/gpu/drm/i915/gt/selftests/mock_timeline.c b/drivers/gpu/drm/i915/gt/selftests/mock_timeline.c
index 598170efcaf6..2a77c051f36a 100644
--- a/drivers/gpu/drm/i915/gt/selftests/mock_timeline.c
+++ b/drivers/gpu/drm/i915/gt/selftests/mock_timeline.c
@@ -15,7 +15,7 @@ void mock_timeline_init(struct intel_timeline *timeline, u64 context)
mutex_init(&timeline->mutex);
- INIT_ACTIVE_REQUEST(&timeline->last_request, &timeline->mutex);
+ INIT_ACTIVE_FENCE(&timeline->last_request, &timeline->mutex);
INIT_LIST_HEAD(&timeline->requests);
i915_syncmap_init(&timeline->sync);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
index 249c747e9756..3ee4a4e7689d 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
@@ -4,11 +4,34 @@
*/
#include "gt/intel_gt.h"
+#include "gt/intel_gt_irq.h"
+#include "gt/intel_gt_pm_irq.h"
#include "intel_guc.h"
#include "intel_guc_ads.h"
#include "intel_guc_submission.h"
#include "i915_drv.h"
+/**
+ * DOC: GuC
+ *
+ * The GuC is a microcontroller inside the GT HW, introduced in gen9. The GuC is
+ * designed to offload some of the functionality usually performed by the host
+ * driver; currently the main operations it can take care of are:
+ *
+ * - Authentication of the HuC, which is required to fully enable HuC usage.
+ * - Low latency graphics context scheduling (a.k.a. GuC submission).
+ * - GT Power management.
+ *
+ * The enable_guc module parameter can be used to select which of those
+ * operations to enable within GuC. Note that not all the operations are
+ * supported on all gen9+ platforms.
+ *
+ * Enabling the GuC is not mandatory and therefore the firmware is only loaded
+ * if at least one of the operations is selected. However, not loading the GuC
+ * might result in the loss of some features that do require the GuC (currently
+ * just the HuC, but more are expected to land in the future).
+ */
+
static void gen8_guc_raise_irq(struct intel_guc *guc)
{
struct intel_gt *gt = guc_to_gt(guc);
@@ -56,6 +79,93 @@ void intel_guc_init_send_regs(struct intel_guc *guc)
guc->send_regs.fw_domains = fw_domains;
}
+static void gen9_reset_guc_interrupts(struct intel_guc *guc)
+{
+ struct intel_gt *gt = guc_to_gt(guc);
+
+ assert_rpm_wakelock_held(&gt->i915->runtime_pm);
+
+ spin_lock_irq(&gt->irq_lock);
+ gen6_gt_pm_reset_iir(gt, gt->pm_guc_events);
+ spin_unlock_irq(&gt->irq_lock);
+}
+
+static void gen9_enable_guc_interrupts(struct intel_guc *guc)
+{
+ struct intel_gt *gt = guc_to_gt(guc);
+
+ assert_rpm_wakelock_held(&gt->i915->runtime_pm);
+
+ spin_lock_irq(&gt->irq_lock);
+ if (!guc->interrupts.enabled) {
+ WARN_ON_ONCE(intel_uncore_read(gt->uncore, GEN8_GT_IIR(2)) &
+ gt->pm_guc_events);
+ guc->interrupts.enabled = true;
+ gen6_gt_pm_enable_irq(gt, gt->pm_guc_events);
+ }
+ spin_unlock_irq(&gt->irq_lock);
+}
+
+static void gen9_disable_guc_interrupts(struct intel_guc *guc)
+{
+ struct intel_gt *gt = guc_to_gt(guc);
+
+ assert_rpm_wakelock_held(&gt->i915->runtime_pm);
+
+ spin_lock_irq(&gt->irq_lock);
+ guc->interrupts.enabled = false;
+
+ gen6_gt_pm_disable_irq(gt, gt->pm_guc_events);
+
+ spin_unlock_irq(&gt->irq_lock);
+ intel_synchronize_irq(gt->i915);
+
+ gen9_reset_guc_interrupts(guc);
+}
+
+static void gen11_reset_guc_interrupts(struct intel_guc *guc)
+{
+ struct intel_gt *gt = guc_to_gt(guc);
+
+ spin_lock_irq(&gt->irq_lock);
+ gen11_gt_reset_one_iir(gt, 0, GEN11_GUC);
+ spin_unlock_irq(&gt->irq_lock);
+}
+
+static void gen11_enable_guc_interrupts(struct intel_guc *guc)
+{
+ struct intel_gt *gt = guc_to_gt(guc);
+
+ spin_lock_irq(&gt->irq_lock);
+ if (!guc->interrupts.enabled) {
+ u32 events = REG_FIELD_PREP(ENGINE1_MASK, GUC_INTR_GUC2HOST);
+
+ WARN_ON_ONCE(gen11_gt_reset_one_iir(gt, 0, GEN11_GUC));
+ intel_uncore_write(gt->uncore,
+ GEN11_GUC_SG_INTR_ENABLE, events);
+ intel_uncore_write(gt->uncore,
+ GEN11_GUC_SG_INTR_MASK, ~events);
+ guc->interrupts.enabled = true;
+ }
+ spin_unlock_irq(&gt->irq_lock);
+}
+
+static void gen11_disable_guc_interrupts(struct intel_guc *guc)
+{
+ struct intel_gt *gt = guc_to_gt(guc);
+
+ spin_lock_irq(&gt->irq_lock);
+ guc->interrupts.enabled = false;
+
+ intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_MASK, ~0);
+ intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_ENABLE, 0);
+
+ spin_unlock_irq(&gt->irq_lock);
+ intel_synchronize_irq(gt->i915);
+
+ gen11_reset_guc_interrupts(guc);
+}
+
void intel_guc_init_early(struct intel_guc *guc)
{
struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
@@ -82,32 +192,6 @@ void intel_guc_init_early(struct intel_guc *guc)
}
}
-static int guc_shared_data_create(struct intel_guc *guc)
-{
- struct i915_vma *vma;
- void *vaddr;
-
- vma = intel_guc_allocate_vma(guc, PAGE_SIZE);
- if (IS_ERR(vma))
- return PTR_ERR(vma);
-
- vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
- if (IS_ERR(vaddr)) {
- i915_vma_unpin_and_release(&vma, 0);
- return PTR_ERR(vaddr);
- }
-
- guc->shared_data = vma;
- guc->shared_data_vaddr = vaddr;
-
- return 0;
-}
-
-static void guc_shared_data_destroy(struct intel_guc *guc)
-{
- i915_vma_unpin_and_release(&guc->shared_data, I915_VMA_RELEASE_MAP);
-}
-
static u32 guc_ctl_debug_flags(struct intel_guc *guc)
{
u32 level = intel_guc_log_get_level(&guc->log);
@@ -254,14 +338,9 @@ int intel_guc_init(struct intel_guc *guc)
if (ret)
goto err_fetch;
- ret = guc_shared_data_create(guc);
- if (ret)
- goto err_fw;
- GEM_BUG_ON(!guc->shared_data);
-
ret = intel_guc_log_create(&guc->log);
if (ret)
- goto err_shared;
+ goto err_fw;
ret = intel_guc_ads_create(guc);
if (ret)
@@ -296,8 +375,6 @@ err_ads:
intel_guc_ads_destroy(guc);
err_log:
intel_guc_log_destroy(&guc->log);
-err_shared:
- guc_shared_data_destroy(guc);
err_fw:
intel_uc_fw_fini(&guc->fw);
err_fetch:
@@ -322,7 +399,6 @@ void intel_guc_fini(struct intel_guc *guc)
intel_guc_ads_destroy(guc);
intel_guc_log_destroy(&guc->log);
- guc_shared_data_destroy(guc);
intel_uc_fw_fini(&guc->fw);
intel_uc_fw_cleanup_fetch(&guc->fw);
}
@@ -478,6 +554,13 @@ int intel_guc_suspend(struct intel_guc *guc)
};
/*
+ * If GuC communication is enabled but submission is not supported,
+ * we do not need to suspend the GuC.
+ */
+ if (!intel_guc_submission_is_enabled(guc))
+ return 0;
+
+ /*
* The ENTER_S_STATE action queues the save/restore operation in GuC FW
* and then returns, so waiting on the H2G is not enough to guarantee
* GuC is done. When all the processing is done, GuC writes
@@ -518,19 +601,9 @@ int intel_guc_suspend(struct intel_guc *guc)
int intel_guc_reset_engine(struct intel_guc *guc,
struct intel_engine_cs *engine)
{
- u32 data[7];
-
- GEM_BUG_ON(!guc->execbuf_client);
-
- data[0] = INTEL_GUC_ACTION_REQUEST_ENGINE_RESET;
- data[1] = engine->guc_id;
- data[2] = 0;
- data[3] = 0;
- data[4] = 0;
- data[5] = guc->execbuf_client->stage_id;
- data[6] = intel_guc_ggtt_offset(guc, guc->shared_data);
+ /* XXX: to be implemented with submission interface rework */
- return intel_guc_send(guc, data, ARRAY_SIZE(data));
+ return -ENODEV;
}
/**
@@ -544,13 +617,27 @@ int intel_guc_resume(struct intel_guc *guc)
GUC_POWER_D0,
};
+ /*
+ * If GuC communication is enabled but submission is not supported,
+ * we do not need to resume the GuC but we do need to enable the
+ * GuC communication on resume (above).
+ */
+ if (!intel_guc_submission_is_enabled(guc))
+ return 0;
+
return intel_guc_send(guc, action, ARRAY_SIZE(action));
}
/**
- * DOC: GuC Address Space
+ * DOC: GuC Memory Management
*
- * The layout of GuC address space is shown below:
+ * GuC can't allocate any memory for its own usage, so all the allocations must
+ * be handled by the host driver. GuC accesses the memory via the GGTT, with the
+ * exception of the top and bottom parts of the 4GB address space, which are
+ * instead re-mapped by the GuC HW to memory location of the FW itself (WOPCM)
+ * or other parts of the HW. The driver must take care not to place objects that
+ * the GuC is going to access in these reserved ranges. The layout of the GuC
+ * address space is shown below:
*
* ::
*
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
index 2b2f046d3cc3..e6400204a2bd 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
@@ -47,8 +47,6 @@ struct intel_guc {
struct i915_vma *stage_desc_pool;
void *stage_desc_pool_vaddr;
struct ida stage_ids;
- struct i915_vma *shared_data;
- void *shared_data_vaddr;
struct intel_guc_client *execbuf_client;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
index 1d3cdd67ca2f..a26a85d50209 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
@@ -548,6 +548,7 @@ enum intel_guc_action {
INTEL_GUC_ACTION_ALLOCATE_DOORBELL = 0x10,
INTEL_GUC_ACTION_DEALLOCATE_DOORBELL = 0x20,
INTEL_GUC_ACTION_LOG_BUFFER_FILE_FLUSH_COMPLETE = 0x30,
+ INTEL_GUC_ACTION_UK_LOG_ENABLE_LOGGING = 0x40,
INTEL_GUC_ACTION_FORCE_LOG_BUFFER_FLUSH = 0x302,
INTEL_GUC_ACTION_ENTER_S_STATE = 0x501,
INTEL_GUC_ACTION_EXIT_S_STATE = 0x502,
@@ -556,7 +557,6 @@ enum intel_guc_action {
INTEL_GUC_ACTION_AUTHENTICATE_HUC = 0x4000,
INTEL_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER = 0x4505,
INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER = 0x4506,
- INTEL_GUC_ACTION_UK_LOG_ENABLE_LOGGING = 0x0E000,
INTEL_GUC_ACTION_LIMIT
};
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
index 36332064de9c..caed0d57e704 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
@@ -226,7 +226,7 @@ static void guc_read_update_log_buffer(struct intel_guc_log *log)
mutex_lock(&log->relay.lock);
- if (WARN_ON(!intel_guc_log_relay_enabled(log)))
+ if (WARN_ON(!intel_guc_log_relay_created(log)))
goto out_unlock;
/* Get the pointer to shared GuC log buffer */
@@ -361,6 +361,7 @@ void intel_guc_log_init_early(struct intel_guc_log *log)
{
mutex_init(&log->relay.lock);
INIT_WORK(&log->relay.flush_work, capture_logs_work);
+ log->relay.started = false;
}
static int guc_log_relay_create(struct intel_guc_log *log)
@@ -546,7 +547,7 @@ out_unlock:
return ret;
}
-bool intel_guc_log_relay_enabled(const struct intel_guc_log *log)
+bool intel_guc_log_relay_created(const struct intel_guc_log *log)
{
return log->relay.buf_addr;
}
@@ -560,7 +561,7 @@ int intel_guc_log_relay_open(struct intel_guc_log *log)
mutex_lock(&log->relay.lock);
- if (intel_guc_log_relay_enabled(log)) {
+ if (intel_guc_log_relay_created(log)) {
ret = -EEXIST;
goto out_unlock;
}
@@ -585,6 +586,21 @@ int intel_guc_log_relay_open(struct intel_guc_log *log)
mutex_unlock(&log->relay.lock);
+ return 0;
+
+out_relay:
+ guc_log_relay_destroy(log);
+out_unlock:
+ mutex_unlock(&log->relay.lock);
+
+ return ret;
+}
+
+int intel_guc_log_relay_start(struct intel_guc_log *log)
+{
+ if (log->relay.started)
+ return -EEXIST;
+
guc_log_enable_flush_events(log);
/*
@@ -594,47 +610,59 @@ int intel_guc_log_relay_open(struct intel_guc_log *log)
*/
queue_work(system_highpri_wq, &log->relay.flush_work);
- return 0;
+ log->relay.started = true;
-out_relay:
- guc_log_relay_destroy(log);
-out_unlock:
- mutex_unlock(&log->relay.lock);
-
- return ret;
+ return 0;
}
void intel_guc_log_relay_flush(struct intel_guc_log *log)
{
struct intel_guc *guc = log_to_guc(log);
- struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
intel_wakeref_t wakeref;
+ if (!log->relay.started)
+ return;
+
/*
* Before initiating the forceful flush, wait for any pending/ongoing
* flush to complete otherwise forceful flush may not actually happen.
*/
flush_work(&log->relay.flush_work);
- with_intel_runtime_pm(&i915->runtime_pm, wakeref)
+ with_intel_runtime_pm(guc_to_gt(guc)->uncore->rpm, wakeref)
guc_action_flush_log(guc);
/* GuC would have updated log buffer by now, so capture it */
guc_log_capture_logs(log);
}
-void intel_guc_log_relay_close(struct intel_guc_log *log)
+/*
+ * Stops the relay log. Called from intel_guc_log_relay_close(), so no
+ * possibility of race with start/flush since relay_write cannot race
+ * relay_close.
+ */
+static void guc_log_relay_stop(struct intel_guc_log *log)
{
struct intel_guc *guc = log_to_guc(log);
struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+ if (!log->relay.started)
+ return;
+
guc_log_disable_flush_events(log);
intel_synchronize_irq(i915);
flush_work(&log->relay.flush_work);
+ log->relay.started = false;
+}
+
+void intel_guc_log_relay_close(struct intel_guc_log *log)
+{
+ guc_log_relay_stop(log);
+
mutex_lock(&log->relay.lock);
- GEM_BUG_ON(!intel_guc_log_relay_enabled(log));
+ GEM_BUG_ON(!intel_guc_log_relay_created(log));
guc_log_unmap(log);
guc_log_relay_destroy(log);
mutex_unlock(&log->relay.lock);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h
index 6f764879acb1..c252c022c5fc 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h
@@ -47,6 +47,7 @@ struct intel_guc_log {
struct i915_vma *vma;
struct {
void *buf_addr;
+ bool started;
struct work_struct flush_work;
struct rchan *channel;
struct mutex lock;
@@ -65,8 +66,9 @@ int intel_guc_log_create(struct intel_guc_log *log);
void intel_guc_log_destroy(struct intel_guc_log *log);
int intel_guc_log_set_level(struct intel_guc_log *log, u32 level);
-bool intel_guc_log_relay_enabled(const struct intel_guc_log *log);
+bool intel_guc_log_relay_created(const struct intel_guc_log *log);
int intel_guc_log_relay_open(struct intel_guc_log *log);
+int intel_guc_log_relay_start(struct intel_guc_log *log);
void intel_guc_log_relay_flush(struct intel_guc_log *log);
void intel_guc_log_relay_close(struct intel_guc_log *log);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h
index edf194d23c6b..1949346e714e 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h
@@ -83,6 +83,9 @@
#define GEN8_GTCR _MMIO(0x4274)
#define GEN8_GTCR_INVALIDATE (1<<0)
+#define GEN12_GUC_TLB_INV_CR _MMIO(0xcee8)
+#define GEN12_GUC_TLB_INV_CR_INVALIDATE (1 << 0)
+
#define GUC_ARAT_C6DIS _MMIO(0xA178)
#define GUC_SHIM_CONTROL _MMIO(0xc064)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index f325d3dd564f..2498c55e0ea5 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -6,12 +6,13 @@
#include <linux/circ_buf.h>
#include "gem/i915_gem_context.h"
-
#include "gt/intel_context.h"
#include "gt/intel_engine_pm.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_pm.h"
#include "gt/intel_lrc_reg.h"
+#include "gt/intel_ring.h"
+
#include "intel_guc_submission.h"
#include "i915_drv.h"
@@ -29,6 +30,12 @@ enum {
/**
* DOC: GuC-based command submission
*
+ * IMPORTANT NOTE: GuC submission is currently not supported in i915. The GuC
+ * firmware is moving to an updated submission interface and we plan to
+ * turn submission back on when that lands. The below documentation (and related
+ * code) matches the old submission model and will be updated as part of the
+ * upgrade to the new flow.
+ *
* GuC client:
* A intel_guc_client refers to a submission path through GuC. Currently, there
* is only one client, which is charged with all submissions to the GuC. This
@@ -1004,7 +1011,7 @@ void intel_guc_submission_fini(struct intel_guc *guc)
static void guc_interrupts_capture(struct intel_gt *gt)
{
- struct intel_rps *rps = &gt->i915->gt_pm.rps;
+ struct intel_rps *rps = &gt->rps;
struct intel_uncore *uncore = gt->uncore;
struct intel_engine_cs *engine;
enum intel_engine_id id;
@@ -1014,7 +1021,7 @@ static void guc_interrupts_capture(struct intel_gt *gt)
* to GuC
*/
irqs = _MASKED_BIT_ENABLE(GFX_INTERRUPT_STEERING);
- for_each_engine(engine, gt->i915, id)
+ for_each_engine(engine, gt, id)
ENGINE_WRITE(engine, RING_MODE_GEN7, irqs);
/* route USER_INTERRUPT to Host, all others are sent to GuC. */
@@ -1050,7 +1057,7 @@ static void guc_interrupts_capture(struct intel_gt *gt)
static void guc_interrupts_release(struct intel_gt *gt)
{
- struct intel_rps *rps = &gt->i915->gt_pm.rps;
+ struct intel_rps *rps = &gt->rps;
struct intel_uncore *uncore = gt->uncore;
struct intel_engine_cs *engine;
enum intel_engine_id id;
@@ -1062,7 +1069,7 @@ static void guc_interrupts_release(struct intel_gt *gt)
*/
irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_NEVER);
irqs |= _MASKED_BIT_DISABLE(GFX_INTERRUPT_STEERING);
- for_each_engine(engine, gt->i915, id)
+ for_each_engine(engine, gt, id)
ENGINE_WRITE(engine, RING_MODE_GEN7, irqs);
/* route all GT interrupts to the host */
@@ -1119,7 +1126,7 @@ int intel_guc_submission_enable(struct intel_guc *guc)
enum intel_engine_id id;
int err;
- err = i915_inject_load_error(gt->i915, -ENXIO);
+ err = i915_inject_probe_error(gt->i915, -ENXIO);
if (err)
return err;
@@ -1145,7 +1152,7 @@ int intel_guc_submission_enable(struct intel_guc *guc)
/* Take over from manual control of ELSP (execlists) */
guc_interrupts_capture(gt);
- for_each_engine(engine, gt->i915, id) {
+ for_each_engine(engine, gt, id) {
engine->set_default_submission = guc_set_default_submission;
engine->set_default_submission(engine);
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.c b/drivers/gpu/drm/i915/gt/uc/intel_huc.c
index d4625c97b4f9..32a069841c14 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_huc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.c
@@ -9,6 +9,34 @@
#include "intel_huc.h"
#include "i915_drv.h"
+/**
+ * DOC: HuC
+ *
+ * The HuC is a dedicated microcontroller for usage in media HEVC (High
+ * Efficiency Video Coding) operations. Userspace can directly use the firmware
+ * capabilities by adding HuC specific commands to batch buffers.
+ *
+ * The kernel driver is only responsible for loading the HuC firmware and
+ * triggering its security authentication, which is performed by the GuC. For
+ * The GuC to correctly perform the authentication, the HuC binary must be
+ * loaded before the GuC one. Loading the HuC is optional; however, not using
+ * the HuC might negatively impact power usage and/or performance of media
+ * workloads, depending on the use-cases.
+ *
+ * See https://github.com/intel/media-driver for the latest details on HuC
+ * functionality.
+ */
+
+/**
+ * DOC: HuC Memory Management
+ *
+ * Similarly to the GuC, the HuC can't do any memory allocations on its own,
+ * with the difference being that the allocations for HuC usage are handled by
+ * the userspace driver instead of the kernel one. The HuC accesses the memory
+ * via the PPGTT belonging to the context loaded on the VCS executing the
+ * HuC-specific commands.
+ */
+
void intel_huc_init_early(struct intel_huc *huc)
{
struct drm_i915_private *i915 = huc_to_gt(huc)->i915;
@@ -35,7 +63,7 @@ static int intel_huc_rsa_data_create(struct intel_huc *huc)
void *vaddr;
int err;
- err = i915_inject_load_error(gt->i915, -ENXIO);
+ err = i915_inject_probe_error(gt->i915, -ENXIO);
if (err)
return err;
@@ -118,10 +146,9 @@ void intel_huc_fini(struct intel_huc *huc)
*
* Called after HuC and GuC firmware loading during intel_uc_init_hw().
*
- * This function pins HuC firmware image object into GGTT.
- * Then it invokes GuC action to authenticate passing the offset to RSA
- * signature through intel_guc_auth_huc(). It then waits for 50ms for
- * firmware verification ACK and unpins the object.
+ * This function invokes the GuC action to authenticate the HuC firmware,
+ * passing the offset of the RSA signature to intel_guc_auth_huc(). It then
+ * waits for up to 50ms for firmware verification ACK.
*/
int intel_huc_auth(struct intel_huc *huc)
{
@@ -134,7 +161,7 @@ int intel_huc_auth(struct intel_huc *huc)
if (!intel_uc_fw_is_loaded(&huc->fw))
return -ENOEXEC;
- ret = i915_inject_load_error(gt->i915, -ENXIO);
+ ret = i915_inject_probe_error(gt->i915, -ENXIO);
if (ret)
goto fail;
@@ -185,7 +212,7 @@ int intel_huc_check_status(struct intel_huc *huc)
if (!intel_huc_is_supported(huc))
return -ENODEV;
- with_intel_runtime_pm(&gt->i915->runtime_pm, wakeref)
+ with_intel_runtime_pm(gt->uncore->rpm, wakeref)
status = intel_uncore_read(gt->uncore, huc->status.reg);
return (status & huc->status.mask) == huc->status.value;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c
index 74602487ed67..d654340d4d03 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c
@@ -8,21 +8,6 @@
#include "i915_drv.h"
/**
- * DOC: HuC Firmware
- *
- * Motivation:
- * GEN9 introduces a new dedicated firmware for usage in media HEVC (High
- * Efficiency Video Coding) operations. Userspace can use the firmware
- * capabilities by adding HuC specific commands to batch buffers.
- *
- * Implementation:
- * The same firmware loader is used as the GuC. However, the actual
- * loading to HW is deferred until GEM initialization is done.
- *
- * Note that HuC firmware loading must be done before GuC loading.
- */
-
-/**
* intel_huc_fw_init_early() - initializes HuC firmware struct
* @huc: intel_huc struct
*
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
index 71ee7ab035cc..629b19377a29 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
@@ -20,7 +20,7 @@ static int __intel_uc_reset_hw(struct intel_uc *uc)
int ret;
u32 guc_status;
- ret = i915_inject_load_error(gt->i915, -ENXIO);
+ ret = i915_inject_probe_error(gt->i915, -ENXIO);
if (ret)
return ret;
@@ -197,7 +197,7 @@ static int guc_enable_communication(struct intel_guc *guc)
GEM_BUG_ON(guc_communication_enabled(guc));
- ret = i915_inject_load_error(i915, -ENXIO);
+ ret = i915_inject_probe_error(i915, -ENXIO);
if (ret)
return ret;
@@ -224,17 +224,7 @@ static int guc_enable_communication(struct intel_guc *guc)
return 0;
}
-static void guc_stop_communication(struct intel_guc *guc)
-{
- intel_guc_ct_stop(&guc->ct);
-
- guc->send = intel_guc_send_nop;
- guc->handler = intel_guc_to_host_event_handler_nop;
-
- guc_clear_mmio_msg(guc);
-}
-
-static void guc_disable_communication(struct intel_guc *guc)
+static void __guc_stop_communication(struct intel_guc *guc)
{
/*
* Events generated during or after CT disable are logged by guc in
@@ -247,6 +237,20 @@ static void guc_disable_communication(struct intel_guc *guc)
guc->send = intel_guc_send_nop;
guc->handler = intel_guc_to_host_event_handler_nop;
+}
+
+static void guc_stop_communication(struct intel_guc *guc)
+{
+ intel_guc_ct_stop(&guc->ct);
+
+ __guc_stop_communication(guc);
+
+ DRM_INFO("GuC communication stopped\n");
+}
+
+static void guc_disable_communication(struct intel_guc *guc)
+{
+ __guc_stop_communication(guc);
intel_guc_ct_disable(&guc->ct);
@@ -368,7 +372,7 @@ static int uc_init_wopcm(struct intel_uc *uc)
GEM_BUG_ON(!(size & GUC_WOPCM_SIZE_MASK));
GEM_BUG_ON(size & ~GUC_WOPCM_SIZE_MASK);
- err = i915_inject_load_error(gt->i915, -ENXIO);
+ err = i915_inject_probe_error(gt->i915, -ENXIO);
if (err)
return err;
@@ -537,7 +541,9 @@ void intel_uc_fini_hw(struct intel_uc *uc)
if (intel_uc_supports_guc_submission(uc))
intel_guc_submission_disable(guc);
- guc_disable_communication(guc);
+ if (guc_communication_enabled(guc))
+ guc_disable_communication(guc);
+
__uc_sanitize(uc);
}
@@ -581,7 +587,7 @@ void intel_uc_suspend(struct intel_uc *uc)
if (!intel_guc_is_running(guc))
return;
- with_intel_runtime_pm(&uc_to_gt(uc)->i915->runtime_pm, wakeref)
+ with_intel_runtime_pm(uc_to_gt(uc)->uncore->rpm, wakeref)
intel_uc_runtime_suspend(uc);
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
index bd22bf11adad..66a30ab7044a 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
@@ -37,27 +37,34 @@ void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw,
/*
* List of required GuC and HuC binaries per-platform.
* Must be ordered based on platform + revid, from newer to older.
+ *
+ * TGL 35.2 is interface-compatible with 33.0 for previous Gens. The deltas
+ * between 33.0 and 35.2 are only related to new additions to support new Gen12
+ * features.
*/
#define INTEL_UC_FIRMWARE_DEFS(fw_def, guc_def, huc_def) \
- fw_def(ICELAKE, 0, guc_def(icl, 33, 0, 0), huc_def(icl, 8, 4, 3238)) \
- fw_def(COFFEELAKE, 0, guc_def(kbl, 33, 0, 0), huc_def(kbl, 02, 00, 1810)) \
- fw_def(GEMINILAKE, 0, guc_def(glk, 33, 0, 0), huc_def(glk, 03, 01, 2893)) \
- fw_def(KABYLAKE, 0, guc_def(kbl, 33, 0, 0), huc_def(kbl, 02, 00, 1810)) \
- fw_def(BROXTON, 0, guc_def(bxt, 33, 0, 0), huc_def(bxt, 01, 8, 2893)) \
- fw_def(SKYLAKE, 0, guc_def(skl, 33, 0, 0), huc_def(skl, 01, 07, 1398))
-
-#define __MAKE_UC_FW_PATH(prefix_, name_, separator_, major_, minor_, patch_) \
+ fw_def(TIGERLAKE, 0, guc_def(tgl, 35, 2, 0), huc_def(tgl, 7, 0, 3)) \
+ fw_def(ELKHARTLAKE, 0, guc_def(ehl, 33, 0, 4), huc_def(ehl, 9, 0, 0)) \
+ fw_def(ICELAKE, 0, guc_def(icl, 33, 0, 0), huc_def(icl, 9, 0, 0)) \
+ fw_def(COFFEELAKE, 5, guc_def(cml, 33, 0, 0), huc_def(cml, 4, 0, 0)) \
+ fw_def(COFFEELAKE, 0, guc_def(kbl, 33, 0, 0), huc_def(kbl, 4, 0, 0)) \
+ fw_def(GEMINILAKE, 0, guc_def(glk, 33, 0, 0), huc_def(glk, 4, 0, 0)) \
+ fw_def(KABYLAKE, 0, guc_def(kbl, 33, 0, 0), huc_def(kbl, 4, 0, 0)) \
+ fw_def(BROXTON, 0, guc_def(bxt, 33, 0, 0), huc_def(bxt, 2, 0, 0)) \
+ fw_def(SKYLAKE, 0, guc_def(skl, 33, 0, 0), huc_def(skl, 2, 0, 0))
+
+#define __MAKE_UC_FW_PATH(prefix_, name_, major_, minor_, patch_) \
"i915/" \
__stringify(prefix_) name_ \
- __stringify(major_) separator_ \
- __stringify(minor_) separator_ \
+ __stringify(major_) "." \
+ __stringify(minor_) "." \
__stringify(patch_) ".bin"
#define MAKE_GUC_FW_PATH(prefix_, major_, minor_, patch_) \
- __MAKE_UC_FW_PATH(prefix_, "_guc_", ".", major_, minor_, patch_)
+ __MAKE_UC_FW_PATH(prefix_, "_guc_", major_, minor_, patch_)
#define MAKE_HUC_FW_PATH(prefix_, major_, minor_, bld_num_) \
- __MAKE_UC_FW_PATH(prefix_, "_huc_ver", "_", major_, minor_, bld_num_)
+ __MAKE_UC_FW_PATH(prefix_, "_huc_", major_, minor_, bld_num_)
/* All blobs need to be declared via MODULE_FIRMWARE() */
#define INTEL_UC_MODULE_FW(platform_, revid_, guc_, huc_) \
@@ -218,29 +225,31 @@ static void __force_fw_fetch_failures(struct intel_uc_fw *uc_fw,
{
bool user = e == -EINVAL;
- if (i915_inject_load_error(i915, e)) {
+ if (i915_inject_probe_error(i915, e)) {
/* non-existing blob */
uc_fw->path = "<invalid>";
uc_fw->user_overridden = user;
- } else if (i915_inject_load_error(i915, e)) {
+ } else if (i915_inject_probe_error(i915, e)) {
/* require next major version */
uc_fw->major_ver_wanted += 1;
uc_fw->minor_ver_wanted = 0;
uc_fw->user_overridden = user;
- } else if (i915_inject_load_error(i915, e)) {
+ } else if (i915_inject_probe_error(i915, e)) {
/* require next minor version */
uc_fw->minor_ver_wanted += 1;
uc_fw->user_overridden = user;
- } else if (uc_fw->major_ver_wanted && i915_inject_load_error(i915, e)) {
+ } else if (uc_fw->major_ver_wanted &&
+ i915_inject_probe_error(i915, e)) {
/* require prev major version */
uc_fw->major_ver_wanted -= 1;
uc_fw->minor_ver_wanted = 0;
uc_fw->user_overridden = user;
- } else if (uc_fw->minor_ver_wanted && i915_inject_load_error(i915, e)) {
+ } else if (uc_fw->minor_ver_wanted &&
+ i915_inject_probe_error(i915, e)) {
/* require prev minor version - hey, this should work! */
uc_fw->minor_ver_wanted -= 1;
uc_fw->user_overridden = user;
- } else if (user && i915_inject_load_error(i915, e)) {
+ } else if (user && i915_inject_probe_error(i915, e)) {
/* officially unsupported platform */
uc_fw->major_ver_wanted = 0;
uc_fw->minor_ver_wanted = 0;
@@ -269,7 +278,7 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw, struct drm_i915_private *i915)
GEM_BUG_ON(!i915->wopcm.size);
GEM_BUG_ON(!intel_uc_fw_is_enabled(uc_fw));
- err = i915_inject_load_error(i915, -ENXIO);
+ err = i915_inject_probe_error(i915, -ENXIO);
if (err)
return err;
@@ -337,25 +346,10 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw, struct drm_i915_private *i915)
}
/* Get version numbers from the CSS header */
- switch (uc_fw->type) {
- case INTEL_UC_FW_TYPE_GUC:
- uc_fw->major_ver_found = FIELD_GET(CSS_SW_VERSION_GUC_MAJOR,
- css->sw_version);
- uc_fw->minor_ver_found = FIELD_GET(CSS_SW_VERSION_GUC_MINOR,
- css->sw_version);
- break;
-
- case INTEL_UC_FW_TYPE_HUC:
- uc_fw->major_ver_found = FIELD_GET(CSS_SW_VERSION_HUC_MAJOR,
- css->sw_version);
- uc_fw->minor_ver_found = FIELD_GET(CSS_SW_VERSION_HUC_MINOR,
- css->sw_version);
- break;
-
- default:
- MISSING_CASE(uc_fw->type);
- break;
- }
+ uc_fw->major_ver_found = FIELD_GET(CSS_SW_VERSION_UC_MAJOR,
+ css->sw_version);
+ uc_fw->minor_ver_found = FIELD_GET(CSS_SW_VERSION_UC_MINOR,
+ css->sw_version);
if (uc_fw->major_ver_found != uc_fw->major_ver_wanted ||
uc_fw->minor_ver_found < uc_fw->minor_ver_wanted) {
@@ -400,7 +394,7 @@ static u32 uc_fw_ggtt_offset(struct intel_uc_fw *uc_fw, struct i915_ggtt *ggtt)
{
struct drm_mm_node *node = &ggtt->uc_fw;
- GEM_BUG_ON(!node->allocated);
+ GEM_BUG_ON(!drm_mm_node_allocated(node));
GEM_BUG_ON(upper_32_bits(node->start));
GEM_BUG_ON(upper_32_bits(node->start + node->size - 1));
@@ -445,7 +439,7 @@ static int uc_fw_xfer(struct intel_uc_fw *uc_fw, struct intel_gt *gt,
u64 offset;
int ret;
- ret = i915_inject_load_error(gt->i915, -ETIMEDOUT);
+ ret = i915_inject_probe_error(gt->i915, -ETIMEDOUT);
if (ret)
return ret;
@@ -506,7 +500,7 @@ int intel_uc_fw_upload(struct intel_uc_fw *uc_fw, struct intel_gt *gt,
/* make sure the status was cleared the last time we reset the uc */
GEM_BUG_ON(intel_uc_fw_is_loaded(uc_fw));
- err = i915_inject_load_error(gt->i915, -ENOEXEC);
+ err = i915_inject_probe_error(gt->i915, -ENOEXEC);
if (err)
return err;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw_abi.h b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw_abi.h
index ae58e8a8c53b..029214cdedd5 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw_abi.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw_abi.h
@@ -39,9 +39,6 @@
* 3. Length info of each component can be found in header, in dwords.
* 4. Modulus and exponent key are not required by driver. They may not appear
* in fw. So driver will load a truncated firmware in this case.
- *
- * The only difference between GuC and HuC firmwares is how the version
- * information is saved.
*/
struct uc_css_header {
@@ -69,11 +66,9 @@ struct uc_css_header {
char username[8];
char buildnumber[12];
u32 sw_version;
-#define CSS_SW_VERSION_GUC_MAJOR (0xFF << 16)
-#define CSS_SW_VERSION_GUC_MINOR (0xFF << 8)
-#define CSS_SW_VERSION_GUC_PATCH (0xFF << 0)
-#define CSS_SW_VERSION_HUC_MAJOR (0xFFFF << 16)
-#define CSS_SW_VERSION_HUC_MINOR (0xFFFF << 0)
+#define CSS_SW_VERSION_UC_MAJOR (0xFF << 16)
+#define CSS_SW_VERSION_UC_MINOR (0xFF << 8)
+#define CSS_SW_VERSION_UC_PATCH (0xFF << 0)
u32 reserved[14];
u32 header_info;
} __packed;
diff --git a/drivers/gpu/drm/i915/gt/uc/selftest_guc.c b/drivers/gpu/drm/i915/gt/uc/selftest_guc.c
index bba0eafe1cdb..d8a80388bd31 100644
--- a/drivers/gpu/drm/i915/gt/uc/selftest_guc.c
+++ b/drivers/gpu/drm/i915/gt/uc/selftest_guc.c
@@ -108,23 +108,15 @@ static bool client_doorbell_in_sync(struct intel_guc_client *client)
* validating that the doorbells status expected by the driver matches what the
* GuC/HW have.
*/
-static int igt_guc_clients(void *args)
+static int igt_guc_clients(void *arg)
{
- struct drm_i915_private *dev_priv = args;
+ struct intel_gt *gt = arg;
+ struct intel_guc *guc = &gt->uc.guc;
intel_wakeref_t wakeref;
- struct intel_guc *guc;
int err = 0;
- GEM_BUG_ON(!HAS_GT_UC(dev_priv));
- mutex_lock(&dev_priv->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
-
- guc = &dev_priv->gt.uc.guc;
- if (!guc) {
- pr_err("No guc object!\n");
- err = -EINVAL;
- goto unlock;
- }
+ GEM_BUG_ON(!HAS_GT_UC(gt->i915));
+ wakeref = intel_runtime_pm_get(gt->uncore->rpm);
err = check_all_doorbells(guc);
if (err)
@@ -189,8 +181,7 @@ out:
guc_clients_create(guc);
guc_clients_enable(guc);
unlock:
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
- mutex_unlock(&dev_priv->drm.struct_mutex);
+ intel_runtime_pm_put(gt->uncore->rpm, wakeref);
return err;
}
@@ -201,22 +192,14 @@ unlock:
*/
static int igt_guc_doorbells(void *arg)
{
- struct drm_i915_private *dev_priv = arg;
+ struct intel_gt *gt = arg;
+ struct intel_guc *guc = &gt->uc.guc;
intel_wakeref_t wakeref;
- struct intel_guc *guc;
int i, err = 0;
u16 db_id;
- GEM_BUG_ON(!HAS_GT_UC(dev_priv));
- mutex_lock(&dev_priv->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
-
- guc = &dev_priv->gt.uc.guc;
- if (!guc) {
- pr_err("No guc object!\n");
- err = -EINVAL;
- goto unlock;
- }
+ GEM_BUG_ON(!HAS_GT_UC(gt->i915));
+ wakeref = intel_runtime_pm_get(gt->uncore->rpm);
err = check_all_doorbells(guc);
if (err)
@@ -298,20 +281,19 @@ out:
guc_client_free(clients[i]);
}
unlock:
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
- mutex_unlock(&dev_priv->drm.struct_mutex);
+ intel_runtime_pm_put(gt->uncore->rpm, wakeref);
return err;
}
-int intel_guc_live_selftest(struct drm_i915_private *dev_priv)
+int intel_guc_live_selftest(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(igt_guc_clients),
SUBTEST(igt_guc_doorbells),
};
- if (!USES_GUC_SUBMISSION(dev_priv))
+ if (!USES_GUC_SUBMISSION(i915))
return 0;
- return i915_subtests(tests, dev_priv);
+ return intel_gt_live_subtests(tests, &i915->gt);
}
diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c
index 5ff2437b2998..771420453f82 100644
--- a/drivers/gpu/drm/i915/gvt/aperture_gm.c
+++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c
@@ -61,14 +61,14 @@ static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)
flags = PIN_MAPPABLE;
}
- mutex_lock(&dev_priv->drm.struct_mutex);
+ mutex_lock(&dev_priv->ggtt.vm.mutex);
mmio_hw_access_pre(dev_priv);
ret = i915_gem_gtt_insert(&dev_priv->ggtt.vm, node,
size, I915_GTT_PAGE_SIZE,
I915_COLOR_UNEVICTABLE,
start, end, flags);
mmio_hw_access_post(dev_priv);
- mutex_unlock(&dev_priv->drm.struct_mutex);
+ mutex_unlock(&dev_priv->ggtt.vm.mutex);
if (ret)
gvt_err("fail to alloc %s gm space from host\n",
high_gm ? "high" : "low");
@@ -98,9 +98,9 @@ static int alloc_vgpu_gm(struct intel_vgpu *vgpu)
return 0;
out_free_aperture:
- mutex_lock(&dev_priv->drm.struct_mutex);
+ mutex_lock(&dev_priv->ggtt.vm.mutex);
drm_mm_remove_node(&vgpu->gm.low_gm_node);
- mutex_unlock(&dev_priv->drm.struct_mutex);
+ mutex_unlock(&dev_priv->ggtt.vm.mutex);
return ret;
}
@@ -108,10 +108,10 @@ static void free_vgpu_gm(struct intel_vgpu *vgpu)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
- mutex_lock(&dev_priv->drm.struct_mutex);
+ mutex_lock(&dev_priv->ggtt.vm.mutex);
drm_mm_remove_node(&vgpu->gm.low_gm_node);
drm_mm_remove_node(&vgpu->gm.high_gm_node);
- mutex_unlock(&dev_priv->drm.struct_mutex);
+ mutex_unlock(&dev_priv->ggtt.vm.mutex);
}
/**
@@ -198,7 +198,7 @@ static int alloc_vgpu_fence(struct intel_vgpu *vgpu)
mutex_lock(&dev_priv->ggtt.vm.mutex);
for (i = 0; i < vgpu_fence_sz(vgpu); i++) {
- reg = i915_reserve_fence(dev_priv);
+ reg = i915_reserve_fence(&dev_priv->ggtt);
if (IS_ERR(reg))
goto out_free_fence;
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index e753b1e706e2..6a3ac8cde95d 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -35,7 +35,9 @@
*/
#include <linux/slab.h>
+
#include "i915_drv.h"
+#include "gt/intel_ring.h"
#include "gvt.h"
#include "i915_pvinfo.h"
#include "trace.h"
diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c
index 4bfaefdf548d..e451298d11c3 100644
--- a/drivers/gpu/drm/i915/gvt/dmabuf.c
+++ b/drivers/gpu/drm/i915/gvt/dmabuf.c
@@ -152,6 +152,7 @@ static const struct drm_i915_gem_object_ops intel_vgpu_gem_ops = {
static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev,
struct intel_vgpu_fb_info *info)
{
+ static struct lock_class_key lock_class;
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_object *obj;
@@ -161,7 +162,7 @@ static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev,
drm_gem_private_object_init(dev, &obj->base,
roundup(info->size, PAGE_SIZE));
- i915_gem_object_init(obj, &intel_vgpu_gem_ops);
+ i915_gem_object_init(obj, &intel_vgpu_gem_ops, &lock_class);
obj->read_domains = I915_GEM_DOMAIN_GTT;
obj->write_domain = 0;
diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c
index f21b8fb5b37e..d6e7a1189bad 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.c
+++ b/drivers/gpu/drm/i915/gvt/execlist.c
@@ -534,7 +534,7 @@ static void clean_execlist(struct intel_vgpu *vgpu,
struct intel_vgpu_submission *s = &vgpu->submission;
intel_engine_mask_t tmp;
- for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
+ for_each_engine_masked(engine, &dev_priv->gt, engine_mask, tmp) {
kfree(s->ring_scan_buffer[engine->id]);
s->ring_scan_buffer[engine->id] = NULL;
s->ring_scan_buffer_size[engine->id] = 0;
@@ -548,7 +548,7 @@ static void reset_execlist(struct intel_vgpu *vgpu,
struct intel_engine_cs *engine;
intel_engine_mask_t tmp;
- for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
+ for_each_engine_masked(engine, &dev_priv->gt, engine_mask, tmp)
init_vgpu_execlist(vgpu, engine->id);
}
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 25f78196b964..bd12af349123 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -819,13 +819,16 @@ static int trigger_aux_channel_interrupt(struct intel_vgpu *vgpu,
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
enum intel_gvt_event_type event;
- if (reg == _DPA_AUX_CH_CTL)
+ if (reg == i915_mmio_reg_offset(DP_AUX_CH_CTL(AUX_CH_A)))
event = AUX_CHANNEL_A;
- else if (reg == _PCH_DPB_AUX_CH_CTL || reg == _DPB_AUX_CH_CTL)
+ else if (reg == _PCH_DPB_AUX_CH_CTL ||
+ reg == i915_mmio_reg_offset(DP_AUX_CH_CTL(AUX_CH_B)))
event = AUX_CHANNEL_B;
- else if (reg == _PCH_DPC_AUX_CH_CTL || reg == _DPC_AUX_CH_CTL)
+ else if (reg == _PCH_DPC_AUX_CH_CTL ||
+ reg == i915_mmio_reg_offset(DP_AUX_CH_CTL(AUX_CH_C)))
event = AUX_CHANNEL_C;
- else if (reg == _PCH_DPD_AUX_CH_CTL || reg == _DPD_AUX_CH_CTL)
+ else if (reg == _PCH_DPD_AUX_CH_CTL ||
+ reg == i915_mmio_reg_offset(DP_AUX_CH_CTL(AUX_CH_D)))
event = AUX_CHANNEL_D;
else {
WARN_ON(true);
@@ -2796,7 +2799,7 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
MMIO_D(CHICKEN_PIPESL_1(PIPE_C), D_BDW_PLUS);
MMIO_D(WM_MISC, D_BDW);
- MMIO_D(_MMIO(BDW_EDP_PSR_BASE), D_BDW);
+ MMIO_D(_MMIO(_SRD_CTL_EDP), D_BDW);
MMIO_D(_MMIO(0x6671c), D_BDW_PLUS);
MMIO_D(_MMIO(0x66c00), D_BDW_PLUS);
@@ -2872,11 +2875,11 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_DH(FORCEWAKE_MEDIA_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write);
MMIO_DH(FORCEWAKE_ACK_MEDIA_GEN9, D_SKL_PLUS, NULL, NULL);
- MMIO_F(_MMIO(_DPB_AUX_CH_CTL), 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL,
+ MMIO_F(DP_AUX_CH_CTL(AUX_CH_B), 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL,
dp_aux_ch_ctl_mmio_write);
- MMIO_F(_MMIO(_DPC_AUX_CH_CTL), 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL,
+ MMIO_F(DP_AUX_CH_CTL(AUX_CH_C), 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL,
dp_aux_ch_ctl_mmio_write);
- MMIO_F(_MMIO(_DPD_AUX_CH_CTL), 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL,
+ MMIO_F(DP_AUX_CH_CTL(AUX_CH_D), 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL,
dp_aux_ch_ctl_mmio_write);
MMIO_D(HSW_PWR_WELL_CTL1, D_SKL_PLUS);
@@ -3417,6 +3420,10 @@ int intel_gvt_for_each_tracked_mmio(struct intel_gvt *gvt,
}
for (i = 0; i < gvt->mmio.num_mmio_block; i++, block++) {
+ /* pvinfo data doesn't come from hw mmio */
+ if (i915_mmio_reg_offset(block->offset) == VGT_PVINFO_PAGE)
+ continue;
+
for (j = 0; j < block->size; j += 4) {
ret = handler(gvt,
i915_mmio_reg_offset(block->offset) + j,
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index 343d79c1cb7e..04a5a0d90823 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -1564,27 +1564,10 @@ vgpu_id_show(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "\n");
}
-static ssize_t
-hw_id_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct mdev_device *mdev = mdev_from_dev(dev);
-
- if (mdev) {
- struct intel_vgpu *vgpu = (struct intel_vgpu *)
- mdev_get_drvdata(mdev);
- return sprintf(buf, "%u\n",
- vgpu->submission.shadow[0]->gem_context->hw_id);
- }
- return sprintf(buf, "\n");
-}
-
static DEVICE_ATTR_RO(vgpu_id);
-static DEVICE_ATTR_RO(hw_id);
static struct attribute *intel_vgpu_attrs[] = {
&dev_attr_vgpu_id.attr,
- &dev_attr_hw_id.attr,
NULL
};
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c
index 4208e40445b1..aaf15916d29a 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.c
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.c
@@ -35,6 +35,7 @@
#include "i915_drv.h"
#include "gt/intel_context.h"
+#include "gt/intel_ring.h"
#include "gvt.h"
#include "trace.h"
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 6c79d16b381e..5b2a7d072ec9 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -38,6 +38,7 @@
#include "gem/i915_gem_context.h"
#include "gem/i915_gem_pm.h"
#include "gt/intel_context.h"
+#include "gt/intel_ring.h"
#include "i915_drv.h"
#include "gvt.h"
@@ -194,7 +195,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
return -EFAULT;
}
- page = i915_gem_object_get_page(ctx_obj, LRC_HEADER_PAGES + i);
+ page = i915_gem_object_get_page(ctx_obj, i);
dst = kmap(page);
intel_gvt_hypervisor_read_gpa(vgpu, context_gpa, dst,
I915_GTT_PAGE_SIZE);
@@ -365,7 +366,8 @@ static void set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
struct i915_gem_context *ctx)
{
struct intel_vgpu_mm *mm = workload->shadow_mm;
- struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(ctx->vm);
+ struct i915_ppgtt *ppgtt =
+ i915_vm_to_ppgtt(i915_gem_context_get_vm_rcu(ctx));
int i = 0;
if (mm->ppgtt_mm.root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
@@ -378,6 +380,8 @@ static void set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
px_dma(pd) = mm->ppgtt_mm.shadow_pdps[i];
}
}
+
+ i915_vm_put(&ppgtt->vm);
}
static int
@@ -385,11 +389,8 @@ intel_gvt_workload_req_alloc(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
struct intel_vgpu_submission *s = &vgpu->submission;
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
struct i915_request *rq;
- lockdep_assert_held(&dev_priv->drm.struct_mutex);
-
if (workload->req)
return 0;
@@ -415,10 +416,9 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
struct intel_vgpu_submission *s = &vgpu->submission;
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
int ret;
- lockdep_assert_held(&dev_priv->drm.struct_mutex);
+ lockdep_assert_held(&vgpu->vgpu_lock);
if (workload->shadow)
return 0;
@@ -580,8 +580,6 @@ static void update_vreg_in_ctx(struct intel_vgpu_workload *workload)
static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
{
- struct intel_vgpu *vgpu = workload->vgpu;
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
struct intel_vgpu_shadow_bb *bb, *pos;
if (list_empty(&workload->shadow_bb))
@@ -590,8 +588,6 @@ static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
bb = list_first_entry(&workload->shadow_bb,
struct intel_vgpu_shadow_bb, list);
- mutex_lock(&dev_priv->drm.struct_mutex);
-
list_for_each_entry_safe(bb, pos, &workload->shadow_bb, list) {
if (bb->obj) {
if (bb->accessing)
@@ -609,8 +605,6 @@ static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
list_del(&bb->list);
kfree(bb);
}
-
- mutex_unlock(&dev_priv->drm.struct_mutex);
}
static int prepare_workload(struct intel_vgpu_workload *workload)
@@ -685,7 +679,6 @@ err_unpin_mm:
static int dispatch_workload(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
struct i915_request *rq;
int ring_id = workload->ring_id;
int ret;
@@ -694,7 +687,6 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
ring_id, workload);
mutex_lock(&vgpu->vgpu_lock);
- mutex_lock(&dev_priv->drm.struct_mutex);
ret = intel_gvt_workload_req_alloc(workload);
if (ret)
@@ -729,7 +721,6 @@ out:
err_req:
if (ret)
workload->status = ret;
- mutex_unlock(&dev_priv->drm.struct_mutex);
mutex_unlock(&vgpu->vgpu_lock);
return ret;
}
@@ -844,7 +835,7 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
return;
}
- page = i915_gem_object_get_page(ctx_obj, LRC_HEADER_PAGES + i);
+ page = i915_gem_object_get_page(ctx_obj, i);
src = kmap(page);
intel_gvt_hypervisor_write_gpa(vgpu, context_gpa, src,
I915_GTT_PAGE_SIZE);
@@ -887,7 +878,7 @@ void intel_vgpu_clean_workloads(struct intel_vgpu *vgpu,
intel_engine_mask_t tmp;
/* free the unsubmited workloads in the queues. */
- for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
+ for_each_engine_masked(engine, &dev_priv->gt, engine_mask, tmp) {
list_for_each_entry_safe(pos, n,
&s->workload_q_head[engine->id], list) {
list_del_init(&pos->list);
@@ -1233,20 +1224,18 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
struct intel_vgpu_submission *s = &vgpu->submission;
struct intel_engine_cs *engine;
struct i915_gem_context *ctx;
+ struct i915_ppgtt *ppgtt;
enum intel_engine_id i;
int ret;
- mutex_lock(&i915->drm.struct_mutex);
-
ctx = i915_gem_context_create_kernel(i915, I915_PRIORITY_MAX);
- if (IS_ERR(ctx)) {
- ret = PTR_ERR(ctx);
- goto out_unlock;
- }
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
i915_gem_context_set_force_single_submission(ctx);
- i915_context_ppgtt_root_save(s, i915_vm_to_ppgtt(ctx->vm));
+ ppgtt = i915_vm_to_ppgtt(i915_gem_context_get_vm_rcu(ctx));
+ i915_context_ppgtt_root_save(s, ppgtt);
for_each_engine(engine, i915, i) {
struct intel_context *ce;
@@ -1291,12 +1280,12 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
atomic_set(&s->running_workload_num, 0);
bitmap_zero(s->tlb_handle_pending, I915_NUM_ENGINES);
+ i915_vm_put(&ppgtt->vm);
i915_gem_context_put(ctx);
- mutex_unlock(&i915->drm.struct_mutex);
return 0;
out_shadow_ctx:
- i915_context_ppgtt_root_restore(s, i915_vm_to_ppgtt(ctx->vm));
+ i915_context_ppgtt_root_restore(s, ppgtt);
for_each_engine(engine, i915, i) {
if (IS_ERR(s->shadow[i]))
break;
@@ -1304,9 +1293,8 @@ out_shadow_ctx:
intel_context_unpin(s->shadow[i]);
intel_context_put(s->shadow[i]);
}
+ i915_vm_put(&ppgtt->vm);
i915_gem_context_put(ctx);
-out_unlock:
- mutex_unlock(&i915->drm.struct_mutex);
return ret;
}
@@ -1597,9 +1585,7 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
*/
if (list_empty(workload_q_head(vgpu, ring_id))) {
intel_runtime_pm_get(&dev_priv->runtime_pm);
- mutex_lock(&dev_priv->drm.struct_mutex);
ret = intel_gvt_scan_and_shadow_workload(workload);
- mutex_unlock(&dev_priv->drm.struct_mutex);
intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
}
diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
index 48e16ad93bbd..3c424cb90702 100644
--- a/drivers/gpu/drm/i915/i915_active.c
+++ b/drivers/gpu/drm/i915/i915_active.c
@@ -7,13 +7,12 @@
#include <linux/debugobjects.h>
#include "gt/intel_engine_pm.h"
+#include "gt/intel_ring.h"
#include "i915_drv.h"
#include "i915_active.h"
#include "i915_globals.h"
-#define BKL(ref) (&(ref)->i915->drm.struct_mutex)
-
/*
* Active refs memory management
*
@@ -27,35 +26,35 @@ static struct i915_global_active {
} global;
struct active_node {
- struct i915_active_request base;
+ struct i915_active_fence base;
struct i915_active *ref;
struct rb_node node;
u64 timeline;
};
static inline struct active_node *
-node_from_active(struct i915_active_request *active)
+node_from_active(struct i915_active_fence *active)
{
return container_of(active, struct active_node, base);
}
#define take_preallocated_barriers(x) llist_del_all(&(x)->preallocated_barriers)
-static inline bool is_barrier(const struct i915_active_request *active)
+static inline bool is_barrier(const struct i915_active_fence *active)
{
- return IS_ERR(rcu_access_pointer(active->request));
+ return IS_ERR(rcu_access_pointer(active->fence));
}
static inline struct llist_node *barrier_to_ll(struct active_node *node)
{
GEM_BUG_ON(!is_barrier(&node->base));
- return (struct llist_node *)&node->base.link;
+ return (struct llist_node *)&node->base.cb.node;
}
static inline struct intel_engine_cs *
__barrier_to_engine(struct active_node *node)
{
- return (struct intel_engine_cs *)READ_ONCE(node->base.link.prev);
+ return (struct intel_engine_cs *)READ_ONCE(node->base.cb.node.prev);
}
static inline struct intel_engine_cs *
@@ -68,7 +67,7 @@ barrier_to_engine(struct active_node *node)
static inline struct active_node *barrier_from_ll(struct llist_node *x)
{
return container_of((struct list_head *)x,
- struct active_node, base.link);
+ struct active_node, base.cb.node);
}
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) && IS_ENABLED(CONFIG_DEBUG_OBJECTS)
@@ -92,12 +91,17 @@ static void debug_active_init(struct i915_active *ref)
static void debug_active_activate(struct i915_active *ref)
{
- debug_object_activate(ref, &active_debug_desc);
+ spin_lock_irq(&ref->tree_lock);
+ if (!atomic_read(&ref->count)) /* before the first inc */
+ debug_object_activate(ref, &active_debug_desc);
+ spin_unlock_irq(&ref->tree_lock);
}
static void debug_active_deactivate(struct i915_active *ref)
{
- debug_object_deactivate(ref, &active_debug_desc);
+ lockdep_assert_held(&ref->tree_lock);
+ if (!atomic_read(&ref->count)) /* after the last dec */
+ debug_object_deactivate(ref, &active_debug_desc);
}
static void debug_active_fini(struct i915_active *ref)
@@ -125,31 +129,46 @@ __active_retire(struct i915_active *ref)
{
struct active_node *it, *n;
struct rb_root root;
- bool retire = false;
+ unsigned long flags;
- lockdep_assert_held(&ref->mutex);
+ GEM_BUG_ON(i915_active_is_idle(ref));
/* return the unused nodes to our slabcache -- flushing the allocator */
- if (atomic_dec_and_test(&ref->count)) {
- debug_active_deactivate(ref);
- root = ref->tree;
- ref->tree = RB_ROOT;
- ref->cache = NULL;
- retire = true;
- }
-
- mutex_unlock(&ref->mutex);
- if (!retire)
+ if (!atomic_dec_and_lock_irqsave(&ref->count, &ref->tree_lock, flags))
return;
- rbtree_postorder_for_each_entry_safe(it, n, &root, node) {
- GEM_BUG_ON(i915_active_request_isset(&it->base));
- kmem_cache_free(global.slab_cache, it);
- }
+ GEM_BUG_ON(rcu_access_pointer(ref->excl.fence));
+ debug_active_deactivate(ref);
+
+ root = ref->tree;
+ ref->tree = RB_ROOT;
+ ref->cache = NULL;
+
+ spin_unlock_irqrestore(&ref->tree_lock, flags);
/* After the final retire, the entire struct may be freed */
if (ref->retire)
ref->retire(ref);
+
+ /* ... except if you wait on it, you must manage your own references! */
+ wake_up_var(ref);
+
+ rbtree_postorder_for_each_entry_safe(it, n, &root, node) {
+ GEM_BUG_ON(i915_active_fence_isset(&it->base));
+ kmem_cache_free(global.slab_cache, it);
+ }
+}
+
+static void
+active_work(struct work_struct *wrk)
+{
+ struct i915_active *ref = container_of(wrk, typeof(*ref), work);
+
+ GEM_BUG_ON(!atomic_read(&ref->count));
+ if (atomic_add_unless(&ref->count, -1, 1))
+ return;
+
+ __active_retire(ref);
}
static void
@@ -159,18 +178,29 @@ active_retire(struct i915_active *ref)
if (atomic_add_unless(&ref->count, -1, 1))
return;
- /* One active may be flushed from inside the acquire of another */
- mutex_lock_nested(&ref->mutex, SINGLE_DEPTH_NESTING);
+ if (ref->flags & I915_ACTIVE_RETIRE_SLEEPS) {
+ queue_work(system_unbound_wq, &ref->work);
+ return;
+ }
+
__active_retire(ref);
}
static void
-node_retire(struct i915_active_request *base, struct i915_request *rq)
+node_retire(struct dma_fence *fence, struct dma_fence_cb *cb)
{
- active_retire(node_from_active(base)->ref);
+ i915_active_fence_cb(fence, cb);
+ active_retire(container_of(cb, struct active_node, base.cb)->ref);
}
-static struct i915_active_request *
+static void
+excl_retire(struct dma_fence *fence, struct dma_fence_cb *cb)
+{
+ i915_active_fence_cb(fence, cb);
+ active_retire(container_of(cb, struct i915_active, excl.cb));
+}
+
+static struct i915_active_fence *
active_instance(struct i915_active *ref, struct intel_timeline *tl)
{
struct active_node *node, *prealloc;
@@ -193,7 +223,7 @@ active_instance(struct i915_active *ref, struct intel_timeline *tl)
if (!prealloc)
return NULL;
- mutex_lock(&ref->mutex);
+ spin_lock_irq(&ref->tree_lock);
GEM_BUG_ON(i915_active_is_idle(ref));
parent = NULL;
@@ -214,7 +244,7 @@ active_instance(struct i915_active *ref, struct intel_timeline *tl)
}
node = prealloc;
- i915_active_request_init(&node->base, &tl->mutex, NULL, node_retire);
+ __i915_active_fence_init(&node->base, &tl->mutex, NULL, node_retire);
node->ref = ref;
node->timeline = idx;
@@ -223,29 +253,36 @@ active_instance(struct i915_active *ref, struct intel_timeline *tl)
out:
ref->cache = node;
- mutex_unlock(&ref->mutex);
+ spin_unlock_irq(&ref->tree_lock);
BUILD_BUG_ON(offsetof(typeof(*node), base));
return &node->base;
}
-void __i915_active_init(struct drm_i915_private *i915,
- struct i915_active *ref,
+void __i915_active_init(struct i915_active *ref,
int (*active)(struct i915_active *ref),
void (*retire)(struct i915_active *ref),
struct lock_class_key *key)
{
+ unsigned long bits;
+
debug_active_init(ref);
- ref->i915 = i915;
ref->flags = 0;
ref->active = active;
- ref->retire = retire;
+ ref->retire = ptr_unpack_bits(retire, &bits, 2);
+ if (bits & I915_ACTIVE_MAY_SLEEP)
+ ref->flags |= I915_ACTIVE_RETIRE_SLEEPS;
+
+ spin_lock_init(&ref->tree_lock);
ref->tree = RB_ROOT;
ref->cache = NULL;
+
init_llist_head(&ref->preallocated_barriers);
atomic_set(&ref->count, 0);
__mutex_init(&ref->mutex, "i915_active", key);
+ __i915_active_fence_init(&ref->excl, &ref->mutex, NULL, excl_retire);
+ INIT_WORK(&ref->work, active_work);
}
static bool ____active_del_barrier(struct i915_active *ref,
@@ -298,9 +335,9 @@ __active_del_barrier(struct i915_active *ref, struct active_node *node)
int i915_active_ref(struct i915_active *ref,
struct intel_timeline *tl,
- struct i915_request *rq)
+ struct dma_fence *fence)
{
- struct i915_active_request *active;
+ struct i915_active_fence *active;
int err;
lockdep_assert_held(&tl->mutex);
@@ -323,26 +360,44 @@ int i915_active_ref(struct i915_active *ref,
* request that we want to emit on the kernel_context.
*/
__active_del_barrier(ref, node_from_active(active));
- RCU_INIT_POINTER(active->request, NULL);
- INIT_LIST_HEAD(&active->link);
- } else {
- if (!i915_active_request_isset(active))
- atomic_inc(&ref->count);
+ RCU_INIT_POINTER(active->fence, NULL);
+ atomic_dec(&ref->count);
}
- GEM_BUG_ON(!atomic_read(&ref->count));
- __i915_active_request_set(active, rq);
+ if (!__i915_active_fence_set(active, fence))
+ atomic_inc(&ref->count);
out:
i915_active_release(ref);
return err;
}
+void i915_active_set_exclusive(struct i915_active *ref, struct dma_fence *f)
+{
+ /* We expect the caller to manage the exclusive timeline ordering */
+ GEM_BUG_ON(i915_active_is_idle(ref));
+
+ /*
+ * As we don't know which mutex the caller is using, we told a small
+ * lie to the debug code that it is using the i915_active.mutex;
+ * and now we must stick to that lie.
+ */
+ mutex_acquire(&ref->mutex.dep_map, 0, 0, _THIS_IP_);
+ if (!__i915_active_fence_set(&ref->excl, f))
+ atomic_inc(&ref->count);
+ mutex_release(&ref->mutex.dep_map, _THIS_IP_);
+}
+
+bool i915_active_acquire_if_busy(struct i915_active *ref)
+{
+ debug_active_assert(ref);
+ return atomic_add_unless(&ref->count, 1, 0);
+}
+
int i915_active_acquire(struct i915_active *ref)
{
int err;
- debug_active_assert(ref);
- if (atomic_add_unless(&ref->count, 1, 0))
+ if (i915_active_acquire_if_busy(ref))
return 0;
err = mutex_lock_interruptible(&ref->mutex);
@@ -367,109 +422,66 @@ void i915_active_release(struct i915_active *ref)
active_retire(ref);
}
-static void __active_ungrab(struct i915_active *ref)
-{
- clear_and_wake_up_bit(I915_ACTIVE_GRAB_BIT, &ref->flags);
-}
-
-bool i915_active_trygrab(struct i915_active *ref)
+static void enable_signaling(struct i915_active_fence *active)
{
- debug_active_assert(ref);
-
- if (test_and_set_bit(I915_ACTIVE_GRAB_BIT, &ref->flags))
- return false;
+ struct dma_fence *fence;
- if (!atomic_add_unless(&ref->count, 1, 0)) {
- __active_ungrab(ref);
- return false;
- }
+ fence = i915_active_fence_get(active);
+ if (!fence)
+ return;
- return true;
-}
-
-void i915_active_ungrab(struct i915_active *ref)
-{
- GEM_BUG_ON(!test_bit(I915_ACTIVE_GRAB_BIT, &ref->flags));
-
- active_retire(ref);
- __active_ungrab(ref);
+ dma_fence_enable_sw_signaling(fence);
+ dma_fence_put(fence);
}
int i915_active_wait(struct i915_active *ref)
{
struct active_node *it, *n;
- int err;
+ int err = 0;
might_sleep();
- might_lock(&ref->mutex);
- if (i915_active_is_idle(ref))
+ if (!i915_active_acquire_if_busy(ref))
return 0;
- err = mutex_lock_interruptible(&ref->mutex);
- if (err)
- return err;
-
- if (!atomic_add_unless(&ref->count, 1, 0)) {
- mutex_unlock(&ref->mutex);
- return 0;
- }
-
+ /* Flush lazy signals */
+ enable_signaling(&ref->excl);
rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
- if (is_barrier(&it->base)) { /* unconnected idle-barrier */
- err = -EBUSY;
- break;
- }
+ if (is_barrier(&it->base)) /* unconnected idle barrier */
+ continue;
- err = i915_active_request_retire(&it->base, BKL(ref));
- if (err)
- break;
+ enable_signaling(&it->base);
}
+ /* Any fence added after the wait begins will not be auto-signaled */
- __active_retire(ref);
+ i915_active_release(ref);
if (err)
return err;
- if (wait_on_bit(&ref->flags, I915_ACTIVE_GRAB_BIT, TASK_KILLABLE))
+ if (wait_var_event_interruptible(ref, i915_active_is_idle(ref)))
return -EINTR;
- if (!i915_active_is_idle(ref))
- return -EBUSY;
-
return 0;
}
-int i915_request_await_active_request(struct i915_request *rq,
- struct i915_active_request *active)
-{
- struct i915_request *barrier =
- i915_active_request_raw(active, &rq->i915->drm.struct_mutex);
-
- return barrier ? i915_request_await_dma_fence(rq, &barrier->fence) : 0;
-}
-
int i915_request_await_active(struct i915_request *rq, struct i915_active *ref)
{
- struct active_node *it, *n;
- int err;
-
- if (RB_EMPTY_ROOT(&ref->tree))
- return 0;
+ int err = 0;
- /* await allocates and so we need to avoid hitting the shrinker */
- err = i915_active_acquire(ref);
- if (err)
- return err;
+ if (rcu_access_pointer(ref->excl.fence)) {
+ struct dma_fence *fence;
- mutex_lock(&ref->mutex);
- rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
- err = i915_request_await_active_request(rq, &it->base);
- if (err)
- break;
+ rcu_read_lock();
+ fence = dma_fence_get_rcu_safe(&ref->excl.fence);
+ rcu_read_unlock();
+ if (fence) {
+ err = i915_request_await_dma_fence(rq, fence);
+ dma_fence_put(fence);
+ }
}
- mutex_unlock(&ref->mutex);
- i915_active_release(ref);
+ /* In the future we may choose to await on all fences */
+
return err;
}
@@ -477,15 +489,16 @@ int i915_request_await_active(struct i915_request *rq, struct i915_active *ref)
void i915_active_fini(struct i915_active *ref)
{
debug_active_fini(ref);
- GEM_BUG_ON(!RB_EMPTY_ROOT(&ref->tree));
GEM_BUG_ON(atomic_read(&ref->count));
+ GEM_BUG_ON(work_pending(&ref->work));
+ GEM_BUG_ON(!RB_EMPTY_ROOT(&ref->tree));
mutex_destroy(&ref->mutex);
}
#endif
static inline bool is_idle_barrier(struct active_node *node, u64 idx)
{
- return node->timeline == idx && !i915_active_request_isset(&node->base);
+ return node->timeline == idx && !i915_active_fence_isset(&node->base);
}
static struct active_node *reuse_idle_barrier(struct i915_active *ref, u64 idx)
@@ -495,7 +508,7 @@ static struct active_node *reuse_idle_barrier(struct i915_active *ref, u64 idx)
if (RB_EMPTY_ROOT(&ref->tree))
return NULL;
- mutex_lock(&ref->mutex);
+ spin_lock_irq(&ref->tree_lock);
GEM_BUG_ON(i915_active_is_idle(ref));
/*
@@ -560,7 +573,7 @@ static struct active_node *reuse_idle_barrier(struct i915_active *ref, u64 idx)
goto match;
}
- mutex_unlock(&ref->mutex);
+ spin_unlock_irq(&ref->tree_lock);
return NULL;
@@ -568,7 +581,7 @@ match:
rb_erase(p, &ref->tree); /* Hide from waits and sibling allocations */
if (p == &ref->cache->node)
ref->cache = NULL;
- mutex_unlock(&ref->mutex);
+ spin_unlock_irq(&ref->tree_lock);
return rb_entry(p, struct active_node, node);
}
@@ -576,11 +589,12 @@ match:
int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
struct intel_engine_cs *engine)
{
- struct drm_i915_private *i915 = engine->i915;
intel_engine_mask_t tmp, mask = engine->mask;
+ struct intel_gt *gt = engine->gt;
struct llist_node *pos, *next;
int err;
+ GEM_BUG_ON(i915_active_is_idle(ref));
GEM_BUG_ON(!llist_empty(&ref->preallocated_barriers));
/*
@@ -589,7 +603,7 @@ int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
* We can then use the preallocated nodes in
* i915_active_acquire_barrier()
*/
- for_each_engine_masked(engine, i915, mask, tmp) {
+ for_each_engine_masked(engine, gt, mask, tmp) {
u64 idx = engine->kernel_context->timeline->fence_context;
struct active_node *node;
@@ -605,13 +619,13 @@ int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
node->base.lock =
&engine->kernel_context->timeline->mutex;
#endif
- RCU_INIT_POINTER(node->base.request, NULL);
- node->base.retire = node_retire;
+ RCU_INIT_POINTER(node->base.fence, NULL);
+ node->base.cb.func = node_retire;
node->timeline = idx;
node->ref = ref;
}
- if (!i915_active_request_isset(&node->base)) {
+ if (!i915_active_fence_isset(&node->base)) {
/*
* Mark this as being *our* unconnected proto-node.
*
@@ -621,8 +635,8 @@ int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
* and then we can use the rb_node and list pointers
* for our tracking of the pending barrier.
*/
- RCU_INIT_POINTER(node->base.request, ERR_PTR(-EAGAIN));
- node->base.link.prev = (void *)engine;
+ RCU_INIT_POINTER(node->base.fence, ERR_PTR(-EAGAIN));
+ node->base.cb.node.prev = (void *)engine;
atomic_inc(&ref->count);
}
@@ -648,6 +662,7 @@ unwind:
void i915_active_acquire_barrier(struct i915_active *ref)
{
struct llist_node *pos, *next;
+ unsigned long flags;
GEM_BUG_ON(i915_active_is_idle(ref));
@@ -657,7 +672,7 @@ void i915_active_acquire_barrier(struct i915_active *ref)
* populated by i915_request_add_active_barriers() to point to the
* request that will eventually release them.
*/
- mutex_lock_nested(&ref->mutex, SINGLE_DEPTH_NESTING);
+ spin_lock_irqsave_nested(&ref->tree_lock, flags, SINGLE_DEPTH_NESTING);
llist_for_each_safe(pos, next, take_preallocated_barriers(ref)) {
struct active_node *node = barrier_from_ll(pos);
struct intel_engine_cs *engine = barrier_to_engine(node);
@@ -679,54 +694,124 @@ void i915_active_acquire_barrier(struct i915_active *ref)
rb_link_node(&node->node, parent, p);
rb_insert_color(&node->node, &ref->tree);
+ GEM_BUG_ON(!intel_engine_pm_is_awake(engine));
llist_add(barrier_to_ll(node), &engine->barrier_tasks);
intel_engine_pm_put(engine);
}
- mutex_unlock(&ref->mutex);
+ spin_unlock_irqrestore(&ref->tree_lock, flags);
}
void i915_request_add_active_barriers(struct i915_request *rq)
{
struct intel_engine_cs *engine = rq->engine;
struct llist_node *node, *next;
+ unsigned long flags;
GEM_BUG_ON(intel_engine_is_virtual(engine));
- GEM_BUG_ON(rq->timeline != engine->kernel_context->timeline);
+ GEM_BUG_ON(i915_request_timeline(rq) != engine->kernel_context->timeline);
+ node = llist_del_all(&engine->barrier_tasks);
+ if (!node)
+ return;
/*
* Attach the list of proto-fences to the in-flight request such
* that the parent i915_active will be released when this request
* is retired.
*/
- llist_for_each_safe(node, next, llist_del_all(&engine->barrier_tasks)) {
- RCU_INIT_POINTER(barrier_from_ll(node)->base.request, rq);
+ spin_lock_irqsave(&rq->lock, flags);
+ llist_for_each_safe(node, next, node) {
+ RCU_INIT_POINTER(barrier_from_ll(node)->base.fence, &rq->fence);
smp_wmb(); /* serialise with reuse_idle_barrier */
- list_add_tail((struct list_head *)node, &rq->active_list);
+ list_add_tail((struct list_head *)node, &rq->fence.cb_list);
}
+ spin_unlock_irqrestore(&rq->lock, flags);
}
-int i915_active_request_set(struct i915_active_request *active,
- struct i915_request *rq)
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
+#define active_is_held(active) lockdep_is_held((active)->lock)
+#else
+#define active_is_held(active) true
+#endif
+
+/*
+ * __i915_active_fence_set: Update the last active fence along its timeline
+ * @active: the active tracker
+ * @fence: the new fence (under construction)
+ *
+ * Records the new @fence as the last active fence along its timeline in
+ * this active tracker, moving the tracking callbacks from the previous
+ * fence onto this one. Returns the previous fence (if not already completed),
+ * which the caller must ensure is executed before the new fence. To ensure
+ * that the order of fences within the timeline of the i915_active_fence is
+ * maintained, it must be locked by the caller.
+ */
+struct dma_fence *
+__i915_active_fence_set(struct i915_active_fence *active,
+ struct dma_fence *fence)
{
- int err;
+ struct dma_fence *prev;
+ unsigned long flags;
+
+ /* NB: must be serialised by an outer timeline mutex (active->lock) */
+ spin_lock_irqsave(fence->lock, flags);
+ GEM_BUG_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags));
+
+ prev = rcu_dereference_protected(active->fence, active_is_held(active));
+ if (prev) {
+ GEM_BUG_ON(prev == fence);
+ spin_lock_nested(prev->lock, SINGLE_DEPTH_NESTING);
+ __list_del_entry(&active->cb.node);
+ spin_unlock(prev->lock); /* serialise with prev->cb_list */
+
+ /*
+ * active->fence is reset by the callback from inside
+ * interrupt context. We need to serialise our list
+ * manipulation with the fence->lock to prevent the prev
+ * being lost inside an interrupt (it can't be replaced as
+ * no other caller is allowed to enter __i915_active_fence_set
+ * as we hold the timeline lock). After serialising with
+ * the callback, we need to double check which ran first,
+ * our list_del() [decoupling prev from the callback] or
+ * the callback...
+ */
+ prev = rcu_access_pointer(active->fence);
+ }
+
+ rcu_assign_pointer(active->fence, fence);
+ list_add_tail(&active->cb.node, &fence->cb_list);
+
+ spin_unlock_irqrestore(fence->lock, flags);
+
+ return prev;
+}
+
+int i915_active_fence_set(struct i915_active_fence *active,
+ struct i915_request *rq)
+{
+ struct dma_fence *fence;
+ int err = 0;
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
lockdep_assert_held(active->lock);
#endif
- /* Must maintain ordering wrt previous active requests */
- err = i915_request_await_active_request(rq, active);
- if (err)
- return err;
+ /* Must maintain timeline ordering wrt previous active requests */
+ rcu_read_lock();
+ fence = __i915_active_fence_set(active, &rq->fence);
+ if (fence) /* but the previous fence may not belong to that timeline! */
+ fence = dma_fence_get_rcu(fence);
+ rcu_read_unlock();
+ if (fence) {
+ err = i915_request_await_dma_fence(rq, fence);
+ dma_fence_put(fence);
+ }
- __i915_active_request_set(active, rq);
- return 0;
+ return err;
}
-void i915_active_retire_noop(struct i915_active_request *active,
- struct i915_request *request)
+void i915_active_noop(struct dma_fence *fence, struct dma_fence_cb *cb)
{
- /* Space left intentionally blank */
+ i915_active_fence_cb(fence, cb);
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
diff --git a/drivers/gpu/drm/i915/i915_active.h b/drivers/gpu/drm/i915/i915_active.h
index f95058f99057..44859356ce97 100644
--- a/drivers/gpu/drm/i915/i915_active.h
+++ b/drivers/gpu/drm/i915/i915_active.h
@@ -12,6 +12,10 @@
#include "i915_active_types.h"
#include "i915_request.h"
+struct i915_request;
+struct intel_engine_cs;
+struct intel_timeline;
+
/*
* We treat requests as fences. This is not be to confused with our
* "fence registers" but pipeline synchronisation objects ala GL_ARB_sync.
@@ -28,308 +32,108 @@
* write access so that we can perform concurrent read operations between
* the CPU and GPU engines, as well as waiting for all rendering to
* complete, or waiting for the last GPU user of a "fence register". The
- * object then embeds a #i915_active_request to track the most recent (in
+ * object then embeds a #i915_active_fence to track the most recent (in
* retirement order) request relevant for the desired mode of access.
- * The #i915_active_request is updated with i915_active_request_set() to
+ * The #i915_active_fence is updated with i915_active_fence_set() to
* track the most recent fence request, typically this is done as part of
* i915_vma_move_to_active().
*
- * When the #i915_active_request completes (is retired), it will
+ * When the #i915_active_fence completes (is retired), it will
* signal its completion to the owner through a callback as well as mark
- * itself as idle (i915_active_request.request == NULL). The owner
+ * itself as idle (i915_active_fence.request == NULL). The owner
* can then perform any action, such as delayed freeing of an active
* resource including itself.
*/
-void i915_active_retire_noop(struct i915_active_request *active,
- struct i915_request *request);
+void i915_active_noop(struct dma_fence *fence, struct dma_fence_cb *cb);
/**
- * i915_active_request_init - prepares the activity tracker for use
+ * __i915_active_fence_init - prepares the activity tracker for use
* @active - the active tracker
- * @rq - initial request to track, can be NULL
+ * @fence - initial fence to track, can be NULL
* @func - a callback when then the tracker is retired (becomes idle),
* can be NULL
*
- * i915_active_request_init() prepares the embedded @active struct for use as
- * an activity tracker, that is for tracking the last known active request
- * associated with it. When the last request becomes idle, when it is retired
+ * i915_active_fence_init() prepares the embedded @active struct for use as
+ * an activity tracker, that is for tracking the last known active fence
+ * associated with it. When the last fence becomes idle, when it is retired
* after completion, the optional callback @func is invoked.
*/
static inline void
-i915_active_request_init(struct i915_active_request *active,
+__i915_active_fence_init(struct i915_active_fence *active,
struct mutex *lock,
- struct i915_request *rq,
- i915_active_retire_fn retire)
+ void *fence,
+ dma_fence_func_t fn)
{
- RCU_INIT_POINTER(active->request, rq);
- INIT_LIST_HEAD(&active->link);
- active->retire = retire ?: i915_active_retire_noop;
+ RCU_INIT_POINTER(active->fence, fence);
+ active->cb.func = fn ?: i915_active_noop;
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
active->lock = lock;
#endif
}
-#define INIT_ACTIVE_REQUEST(name, lock) \
- i915_active_request_init((name), (lock), NULL, NULL)
-
-/**
- * i915_active_request_set - updates the tracker to watch the current request
- * @active - the active tracker
- * @request - the request to watch
- *
- * __i915_active_request_set() watches the given @request for completion. Whilst
- * that @request is busy, the @active reports busy. When that @request is
- * retired, the @active tracker is updated to report idle.
- */
-static inline void
-__i915_active_request_set(struct i915_active_request *active,
- struct i915_request *request)
-{
-#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
- lockdep_assert_held(active->lock);
-#endif
- list_move(&active->link, &request->active_list);
- rcu_assign_pointer(active->request, request);
-}
+#define INIT_ACTIVE_FENCE(A, LOCK) \
+ __i915_active_fence_init((A), (LOCK), NULL, NULL)
-int __must_check
-i915_active_request_set(struct i915_active_request *active,
- struct i915_request *rq);
+struct dma_fence *
+__i915_active_fence_set(struct i915_active_fence *active,
+ struct dma_fence *fence);
/**
- * i915_active_request_raw - return the active request
+ * i915_active_fence_set - updates the tracker to watch the current fence
* @active - the active tracker
+ * @rq - the request to watch
*
- * i915_active_request_raw() returns the current request being tracked, or NULL.
- * It does not obtain a reference on the request for the caller, so the caller
- * must hold struct_mutex.
+ * i915_active_fence_set() watches the given @rq for completion. While
+ * that @rq is busy, the @active reports busy. When that @rq is signaled
+ * (or else retired) the @active tracker is updated to report idle.
*/
-static inline struct i915_request *
-i915_active_request_raw(const struct i915_active_request *active,
- struct mutex *mutex)
-{
- return rcu_dereference_protected(active->request,
- lockdep_is_held(mutex));
-}
-
-/**
- * i915_active_request_peek - report the active request being monitored
- * @active - the active tracker
- *
- * i915_active_request_peek() returns the current request being tracked if
- * still active, or NULL. It does not obtain a reference on the request
- * for the caller, so the caller must hold struct_mutex.
- */
-static inline struct i915_request *
-i915_active_request_peek(const struct i915_active_request *active,
- struct mutex *mutex)
-{
- struct i915_request *request;
-
- request = i915_active_request_raw(active, mutex);
- if (!request || i915_request_completed(request))
- return NULL;
-
- return request;
-}
-
-/**
- * i915_active_request_get - return a reference to the active request
- * @active - the active tracker
- *
- * i915_active_request_get() returns a reference to the active request, or NULL
- * if the active tracker is idle. The caller must hold struct_mutex.
- */
-static inline struct i915_request *
-i915_active_request_get(const struct i915_active_request *active,
- struct mutex *mutex)
-{
- return i915_request_get(i915_active_request_peek(active, mutex));
-}
-
-/**
- * __i915_active_request_get_rcu - return a reference to the active request
- * @active - the active tracker
- *
- * __i915_active_request_get() returns a reference to the active request,
- * or NULL if the active tracker is idle. The caller must hold the RCU read
- * lock, but the returned pointer is safe to use outside of RCU.
- */
-static inline struct i915_request *
-__i915_active_request_get_rcu(const struct i915_active_request *active)
-{
- /*
- * Performing a lockless retrieval of the active request is super
- * tricky. SLAB_TYPESAFE_BY_RCU merely guarantees that the backing
- * slab of request objects will not be freed whilst we hold the
- * RCU read lock. It does not guarantee that the request itself
- * will not be freed and then *reused*. Viz,
- *
- * Thread A Thread B
- *
- * rq = active.request
- * retire(rq) -> free(rq);
- * (rq is now first on the slab freelist)
- * active.request = NULL
- *
- * rq = new submission on a new object
- * ref(rq)
- *
- * To prevent the request from being reused whilst the caller
- * uses it, we take a reference like normal. Whilst acquiring
- * the reference we check that it is not in a destroyed state
- * (refcnt == 0). That prevents the request being reallocated
- * whilst the caller holds on to it. To check that the request
- * was not reallocated as we acquired the reference we have to
- * check that our request remains the active request across
- * the lookup, in the same manner as a seqlock. The visibility
- * of the pointer versus the reference counting is controlled
- * by using RCU barriers (rcu_dereference and rcu_assign_pointer).
- *
- * In the middle of all that, we inspect whether the request is
- * complete. Retiring is lazy so the request may be completed long
- * before the active tracker is updated. Querying whether the
- * request is complete is far cheaper (as it involves no locked
- * instructions setting cachelines to exclusive) than acquiring
- * the reference, so we do it first. The RCU read lock ensures the
- * pointer dereference is valid, but does not ensure that the
- * seqno nor HWS is the right one! However, if the request was
- * reallocated, that means the active tracker's request was complete.
- * If the new request is also complete, then both are and we can
- * just report the active tracker is idle. If the new request is
- * incomplete, then we acquire a reference on it and check that
- * it remained the active request.
- *
- * It is then imperative that we do not zero the request on
- * reallocation, so that we can chase the dangling pointers!
- * See i915_request_alloc().
- */
- do {
- struct i915_request *request;
-
- request = rcu_dereference(active->request);
- if (!request || i915_request_completed(request))
- return NULL;
-
- /*
- * An especially silly compiler could decide to recompute the
- * result of i915_request_completed, more specifically
- * re-emit the load for request->fence.seqno. A race would catch
- * a later seqno value, which could flip the result from true to
- * false. Which means part of the instructions below might not
- * be executed, while later on instructions are executed. Due to
- * barriers within the refcounting the inconsistency can't reach
- * past the call to i915_request_get_rcu, but not executing
- * that while still executing i915_request_put() creates
- * havoc enough. Prevent this with a compiler barrier.
- */
- barrier();
-
- request = i915_request_get_rcu(request);
-
- /*
- * What stops the following rcu_access_pointer() from occurring
- * before the above i915_request_get_rcu()? If we were
- * to read the value before pausing to get the reference to
- * the request, we may not notice a change in the active
- * tracker.
- *
- * The rcu_access_pointer() is a mere compiler barrier, which
- * means both the CPU and compiler are free to perform the
- * memory read without constraint. The compiler only has to
- * ensure that any operations after the rcu_access_pointer()
- * occur afterwards in program order. This means the read may
- * be performed earlier by an out-of-order CPU, or adventurous
- * compiler.
- *
- * The atomic operation at the heart of
- * i915_request_get_rcu(), see dma_fence_get_rcu(), is
- * atomic_inc_not_zero() which is only a full memory barrier
- * when successful. That is, if i915_request_get_rcu()
- * returns the request (and so with the reference counted
- * incremented) then the following read for rcu_access_pointer()
- * must occur after the atomic operation and so confirm
- * that this request is the one currently being tracked.
- *
- * The corresponding write barrier is part of
- * rcu_assign_pointer().
- */
- if (!request || request == rcu_access_pointer(active->request))
- return rcu_pointer_handoff(request);
-
- i915_request_put(request);
- } while (1);
-}
-
+int __must_check
+i915_active_fence_set(struct i915_active_fence *active,
+ struct i915_request *rq);
/**
- * i915_active_request_get_unlocked - return a reference to the active request
+ * i915_active_fence_get - return a reference to the active fence
* @active - the active tracker
*
- * i915_active_request_get_unlocked() returns a reference to the active request,
+ * i915_active_fence_get() returns a reference to the active fence,
* or NULL if the active tracker is idle. The reference is obtained under RCU,
* so no locking is required by the caller.
*
- * The reference should be freed with i915_request_put().
+ * The reference should be freed with dma_fence_put().
*/
-static inline struct i915_request *
-i915_active_request_get_unlocked(const struct i915_active_request *active)
+static inline struct dma_fence *
+i915_active_fence_get(struct i915_active_fence *active)
{
- struct i915_request *request;
+ struct dma_fence *fence;
rcu_read_lock();
- request = __i915_active_request_get_rcu(active);
+ fence = dma_fence_get_rcu_safe(&active->fence);
rcu_read_unlock();
- return request;
+ return fence;
}
/**
- * i915_active_request_isset - report whether the active tracker is assigned
+ * i915_active_fence_isset - report whether the active tracker is assigned
* @active - the active tracker
*
- * i915_active_request_isset() returns true if the active tracker is currently
- * assigned to a request. Due to the lazy retiring, that request may be idle
+ * i915_active_fence_isset() returns true if the active tracker is currently
+ * assigned to a fence. Due to the lazy retiring, that fence may be idle
* and this may report stale information.
*/
static inline bool
-i915_active_request_isset(const struct i915_active_request *active)
+i915_active_fence_isset(const struct i915_active_fence *active)
{
- return rcu_access_pointer(active->request);
+ return rcu_access_pointer(active->fence);
}
-/**
- * i915_active_request_retire - waits until the request is retired
- * @active - the active request on which to wait
- *
- * i915_active_request_retire() waits until the request is completed,
- * and then ensures that at least the retirement handler for this
- * @active tracker is called before returning. If the @active
- * tracker is idle, the function returns immediately.
- */
-static inline int __must_check
-i915_active_request_retire(struct i915_active_request *active,
- struct mutex *mutex)
+static inline void
+i915_active_fence_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
{
- struct i915_request *request;
- long ret;
-
- request = i915_active_request_raw(active, mutex);
- if (!request)
- return 0;
-
- ret = i915_request_wait(request,
- I915_WAIT_INTERRUPTIBLE,
- MAX_SCHEDULE_TIMEOUT);
- if (ret < 0)
- return ret;
+ struct i915_active_fence *active =
+ container_of(cb, typeof(*active), cb);
- list_del_init(&active->link);
- RCU_INIT_POINTER(active->request, NULL);
-
- active->retire(active, request);
-
- return 0;
+ RCU_INIT_POINTER(active->fence, NULL);
}
/*
@@ -358,34 +162,40 @@ i915_active_request_retire(struct i915_active_request *active,
* synchronisation.
*/
-void __i915_active_init(struct drm_i915_private *i915,
- struct i915_active *ref,
+void __i915_active_init(struct i915_active *ref,
int (*active)(struct i915_active *ref),
void (*retire)(struct i915_active *ref),
struct lock_class_key *key);
-#define i915_active_init(i915, ref, active, retire) do { \
+#define i915_active_init(ref, active, retire) do { \
static struct lock_class_key __key; \
\
- __i915_active_init(i915, ref, active, retire, &__key); \
+ __i915_active_init(ref, active, retire, &__key); \
} while (0)
int i915_active_ref(struct i915_active *ref,
struct intel_timeline *tl,
- struct i915_request *rq);
+ struct dma_fence *fence);
+
+static inline int
+i915_active_add_request(struct i915_active *ref, struct i915_request *rq)
+{
+ return i915_active_ref(ref, i915_request_timeline(rq), &rq->fence);
+}
+
+void i915_active_set_exclusive(struct i915_active *ref, struct dma_fence *f);
+
+static inline bool i915_active_has_exclusive(struct i915_active *ref)
+{
+ return rcu_access_pointer(ref->excl.fence);
+}
int i915_active_wait(struct i915_active *ref);
-int i915_request_await_active(struct i915_request *rq,
- struct i915_active *ref);
-int i915_request_await_active_request(struct i915_request *rq,
- struct i915_active_request *active);
+int i915_request_await_active(struct i915_request *rq, struct i915_active *ref);
int i915_active_acquire(struct i915_active *ref);
+bool i915_active_acquire_if_busy(struct i915_active *ref);
void i915_active_release(struct i915_active *ref);
-void __i915_active_release_nested(struct i915_active *ref, int subclass);
-
-bool i915_active_trygrab(struct i915_active *ref);
-void i915_active_ungrab(struct i915_active *ref);
static inline bool
i915_active_is_idle(const struct i915_active *ref)
@@ -404,4 +214,6 @@ int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
void i915_active_acquire_barrier(struct i915_active *ref);
void i915_request_add_active_barriers(struct i915_request *rq);
+void i915_active_print(struct i915_active *ref, struct drm_printer *m);
+
#endif /* _I915_ACTIVE_H_ */
diff --git a/drivers/gpu/drm/i915/i915_active_types.h b/drivers/gpu/drm/i915/i915_active_types.h
index 1854e7d168c1..96aed0ee700a 100644
--- a/drivers/gpu/drm/i915/i915_active_types.h
+++ b/drivers/gpu/drm/i915/i915_active_types.h
@@ -8,22 +8,18 @@
#define _I915_ACTIVE_TYPES_H_
#include <linux/atomic.h>
+#include <linux/dma-fence.h>
#include <linux/llist.h>
#include <linux/mutex.h>
#include <linux/rbtree.h>
#include <linux/rcupdate.h>
+#include <linux/workqueue.h>
-struct drm_i915_private;
-struct i915_active_request;
-struct i915_request;
+#include "i915_utils.h"
-typedef void (*i915_active_retire_fn)(struct i915_active_request *,
- struct i915_request *);
-
-struct i915_active_request {
- struct i915_request __rcu *request;
- struct list_head link;
- i915_active_retire_fn retire;
+struct i915_active_fence {
+ struct dma_fence __rcu *fence;
+ struct dma_fence_cb cb;
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
/*
* Incorporeal!
@@ -43,20 +39,30 @@ struct i915_active_request {
struct active_node;
+#define I915_ACTIVE_MAY_SLEEP BIT(0)
+
+#define __i915_active_call __aligned(4)
+#define i915_active_may_sleep(fn) ptr_pack_bits(&(fn), I915_ACTIVE_MAY_SLEEP, 2)
+
struct i915_active {
- struct drm_i915_private *i915;
+ atomic_t count;
+ struct mutex mutex;
+ spinlock_t tree_lock;
struct active_node *cache;
struct rb_root tree;
- struct mutex mutex;
- atomic_t count;
+
+ /* Preallocated "exclusive" node */
+ struct i915_active_fence excl;
unsigned long flags;
-#define I915_ACTIVE_GRAB_BIT 0
+#define I915_ACTIVE_RETIRE_SLEEPS BIT(0)
int (*active)(struct i915_active *ref);
void (*retire)(struct i915_active *ref);
+ struct work_struct work;
+
struct llist_head preallocated_barriers;
};
diff --git a/drivers/gpu/drm/i915/i915_buddy.c b/drivers/gpu/drm/i915/i915_buddy.c
index fe1871d7c126..e9d4200ce3bc 100644
--- a/drivers/gpu/drm/i915/i915_buddy.c
+++ b/drivers/gpu/drm/i915/i915_buddy.c
@@ -38,6 +38,7 @@ int __init i915_global_buddy_init(void)
if (!global.slab_blocks)
return -ENOMEM;
+ i915_global_register(&global.base);
return 0;
}
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index b0f51591f2e4..8016484ebcd3 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -41,7 +41,10 @@
#include "gem/i915_gem_context.h"
#include "gt/intel_gt_pm.h"
+#include "gt/intel_gt_requests.h"
#include "gt/intel_reset.h"
+#include "gt/intel_rc6.h"
+#include "gt/intel_rps.h"
#include "gt/uc/intel_guc_submission.h"
#include "i915_debugfs.h"
@@ -61,11 +64,18 @@ static int i915_capabilities(struct seq_file *m, void *data)
struct drm_i915_private *dev_priv = node_to_i915(m->private);
const struct intel_device_info *info = INTEL_INFO(dev_priv);
struct drm_printer p = drm_seq_file_printer(m);
+ const char *msg;
seq_printf(m, "gen: %d\n", INTEL_GEN(dev_priv));
seq_printf(m, "platform: %s\n", intel_platform_name(info->platform));
seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev_priv));
+ msg = "n/a";
+#ifdef CONFIG_INTEL_IOMMU
+ msg = enableddisabled(intel_iommu_gfx_mapped);
+#endif
+ seq_printf(m, "iommu: %s\n", msg);
+
intel_device_info_dump_flags(info, &p);
intel_device_info_dump_runtime(RUNTIME_INFO(dev_priv), &p);
intel_driver_caps_print(&dev_priv->caps, &p);
@@ -77,11 +87,6 @@ static int i915_capabilities(struct seq_file *m, void *data)
return 0;
}
-static char get_pin_flag(struct drm_i915_gem_object *obj)
-{
- return obj->pin_global ? 'p' : ' ';
-}
-
static char get_tiling_flag(struct drm_i915_gem_object *obj)
{
switch (i915_gem_object_get_tiling(obj)) {
@@ -140,9 +145,8 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
struct i915_vma *vma;
int pin_count = 0;
- seq_printf(m, "%pK: %c%c%c%c %8zdKiB %02x %02x %s%s%s",
+ seq_printf(m, "%pK: %c%c%c %8zdKiB %02x %02x %s%s%s",
&obj->base,
- get_pin_flag(obj),
get_tiling_flag(obj),
get_global_flag(obj),
get_pin_mapped_flag(obj),
@@ -221,8 +225,8 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
seq_printf(m, " (pinned x %d)", pin_count);
if (obj->stolen)
seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
- if (obj->pin_global)
- seq_printf(m, " (global)");
+ if (i915_gem_object_is_framebuffer(obj))
+ seq_printf(m, " (fb)");
engine = i915_gem_object_last_write_engine(obj);
if (engine)
@@ -243,6 +247,9 @@ static int per_file_stats(int id, void *ptr, void *data)
struct file_stats *stats = data;
struct i915_vma *vma;
+ if (!kref_get_unless_zero(&obj->base.refcount))
+ return 0;
+
stats->count++;
stats->total += obj->base.size;
if (!atomic_read(&obj->bind_count))
@@ -290,6 +297,7 @@ static int per_file_stats(int id, void *ptr, void *data)
}
spin_unlock(&obj->vma.lock);
+ i915_gem_object_put(obj);
return 0;
}
@@ -309,34 +317,44 @@ static void print_context_stats(struct seq_file *m,
struct drm_i915_private *i915)
{
struct file_stats kstats = {};
- struct i915_gem_context *ctx;
+ struct i915_gem_context *ctx, *cn;
- list_for_each_entry(ctx, &i915->contexts.list, link) {
+ spin_lock(&i915->gem.contexts.lock);
+ list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) {
struct i915_gem_engines_iter it;
struct intel_context *ce;
+ if (!kref_get_unless_zero(&ctx->ref))
+ continue;
+
+ spin_unlock(&i915->gem.contexts.lock);
+
for_each_gem_engine(ce,
i915_gem_context_lock_engines(ctx), it) {
intel_context_lock_pinned(ce);
if (intel_context_is_pinned(ce)) {
+ rcu_read_lock();
if (ce->state)
per_file_stats(0,
ce->state->obj, &kstats);
per_file_stats(0, ce->ring->vma->obj, &kstats);
+ rcu_read_unlock();
}
intel_context_unlock_pinned(ce);
}
i915_gem_context_unlock_engines(ctx);
if (!IS_ERR_OR_NULL(ctx->file_priv)) {
- struct file_stats stats = { .vm = ctx->vm, };
+ struct file_stats stats = {
+ .vm = rcu_access_pointer(ctx->vm),
+ };
struct drm_file *file = ctx->file_priv->file;
struct task_struct *task;
char name[80];
- spin_lock(&file->table_lock);
+ rcu_read_lock();
idr_for_each(&file->object_idr, per_file_stats, &stats);
- spin_unlock(&file->table_lock);
+ rcu_read_unlock();
rcu_read_lock();
task = pid_task(ctx->pid ?: file->pid, PIDTYPE_PID);
@@ -346,7 +364,12 @@ static void print_context_stats(struct seq_file *m,
print_file_stats(m, name, stats);
}
+
+ spin_lock(&i915->gem.contexts.lock);
+ list_safe_reset_next(ctx, cn, link);
+ i915_gem_context_put(ctx);
}
+ spin_unlock(&i915->gem.contexts.lock);
print_file_stats(m, "[k]contexts", kstats);
}
@@ -354,7 +377,6 @@ static void print_context_stats(struct seq_file *m,
static int i915_gem_object_info(struct seq_file *m, void *data)
{
struct drm_i915_private *i915 = node_to_i915(m->private);
- int ret;
seq_printf(m, "%u shrinkable [%u free] objects, %llu bytes\n",
i915->mm.shrink_count,
@@ -363,12 +385,7 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
seq_putc(m, '\n');
- ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
- if (ret)
- return ret;
-
print_context_stats(m, i915);
- mutex_unlock(&i915->drm.struct_mutex);
return 0;
}
@@ -376,7 +393,7 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
static void gen8_display_interrupt_info(struct seq_file *m)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
- int pipe;
+ enum pipe pipe;
for_each_pipe(dev_priv, pipe) {
enum intel_display_power_domain power_domain;
@@ -527,6 +544,8 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
gen8_display_interrupt_info(m);
} else if (IS_VALLEYVIEW(dev_priv)) {
+ intel_wakeref_t pref;
+
seq_printf(m, "Display IER:\t%08x\n",
I915_READ(VLV_IER));
seq_printf(m, "Display IIR:\t%08x\n",
@@ -537,7 +556,6 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
I915_READ(VLV_IMR));
for_each_pipe(dev_priv, pipe) {
enum intel_display_power_domain power_domain;
- intel_wakeref_t pref;
power_domain = POWER_DOMAIN_PIPE(pipe);
pref = intel_display_power_get_if_enabled(dev_priv,
@@ -571,12 +589,14 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
seq_printf(m, "PM IMR:\t\t%08x\n",
I915_READ(GEN6_PMIMR));
+ pref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
seq_printf(m, "Port hotplug:\t%08x\n",
I915_READ(PORT_HOTPLUG_EN));
seq_printf(m, "DPFLIPSTAT:\t%08x\n",
I915_READ(VLV_DPFLIPSTAT));
seq_printf(m, "DPINVGTT:\t%08x\n",
I915_READ(DPINVGTT));
+ intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, pref);
} else if (!HAS_PCH_SPLIT(dev_priv)) {
seq_printf(m, "Interrupt enable: %08x\n",
@@ -772,7 +792,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct intel_uncore *uncore = &dev_priv->uncore;
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
+ struct intel_rps *rps = &dev_priv->gt.rps;
intel_wakeref_t wakeref;
int ret = 0;
@@ -808,23 +828,23 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
seq_printf(m, "actual GPU freq: %d MHz\n",
- intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
+ intel_gpu_freq(rps, (freq_sts >> 8) & 0xff));
seq_printf(m, "current GPU freq: %d MHz\n",
- intel_gpu_freq(dev_priv, rps->cur_freq));
+ intel_gpu_freq(rps, rps->cur_freq));
seq_printf(m, "max GPU freq: %d MHz\n",
- intel_gpu_freq(dev_priv, rps->max_freq));
+ intel_gpu_freq(rps, rps->max_freq));
seq_printf(m, "min GPU freq: %d MHz\n",
- intel_gpu_freq(dev_priv, rps->min_freq));
+ intel_gpu_freq(rps, rps->min_freq));
seq_printf(m, "idle GPU freq: %d MHz\n",
- intel_gpu_freq(dev_priv, rps->idle_freq));
+ intel_gpu_freq(rps, rps->idle_freq));
seq_printf(m,
"efficient (RPe) frequency: %d MHz\n",
- intel_gpu_freq(dev_priv, rps->efficient_freq));
+ intel_gpu_freq(rps, rps->efficient_freq));
} else if (INTEL_GEN(dev_priv) >= 6) {
u32 rp_state_limits;
u32 gt_perf_status;
@@ -858,7 +878,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
else
reqf >>= 25;
}
- reqf = intel_gpu_freq(dev_priv, reqf);
+ reqf = intel_gpu_freq(rps, reqf);
rpmodectl = I915_READ(GEN6_RP_CONTROL);
rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD);
@@ -871,8 +891,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
- cagf = intel_gpu_freq(dev_priv,
- intel_get_cagf(dev_priv, rpstat));
+ cagf = intel_gpu_freq(rps, intel_get_cagf(rps, rpstat));
intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
@@ -949,37 +968,37 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
max_freq *= (IS_GEN9_BC(dev_priv) ||
INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
- intel_gpu_freq(dev_priv, max_freq));
+ intel_gpu_freq(rps, max_freq));
max_freq = (rp_state_cap & 0xff00) >> 8;
max_freq *= (IS_GEN9_BC(dev_priv) ||
INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
- intel_gpu_freq(dev_priv, max_freq));
+ intel_gpu_freq(rps, max_freq));
max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 :
rp_state_cap >> 0) & 0xff;
max_freq *= (IS_GEN9_BC(dev_priv) ||
INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
- intel_gpu_freq(dev_priv, max_freq));
+ intel_gpu_freq(rps, max_freq));
seq_printf(m, "Max overclocked frequency: %dMHz\n",
- intel_gpu_freq(dev_priv, rps->max_freq));
+ intel_gpu_freq(rps, rps->max_freq));
seq_printf(m, "Current freq: %d MHz\n",
- intel_gpu_freq(dev_priv, rps->cur_freq));
+ intel_gpu_freq(rps, rps->cur_freq));
seq_printf(m, "Actual freq: %d MHz\n", cagf);
seq_printf(m, "Idle freq: %d MHz\n",
- intel_gpu_freq(dev_priv, rps->idle_freq));
+ intel_gpu_freq(rps, rps->idle_freq));
seq_printf(m, "Min freq: %d MHz\n",
- intel_gpu_freq(dev_priv, rps->min_freq));
+ intel_gpu_freq(rps, rps->min_freq));
seq_printf(m, "Boost freq: %d MHz\n",
- intel_gpu_freq(dev_priv, rps->boost_freq));
+ intel_gpu_freq(rps, rps->boost_freq));
seq_printf(m, "Max freq: %d MHz\n",
- intel_gpu_freq(dev_priv, rps->max_freq));
+ intel_gpu_freq(rps, rps->max_freq));
seq_printf(m,
"efficient (RPe) frequency: %d MHz\n",
- intel_gpu_freq(dev_priv, rps->efficient_freq));
+ intel_gpu_freq(rps, rps->efficient_freq));
} else {
seq_puts(m, "no P-state info available\n");
}
@@ -992,91 +1011,6 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
return ret;
}
-static void i915_instdone_info(struct drm_i915_private *dev_priv,
- struct seq_file *m,
- struct intel_instdone *instdone)
-{
- int slice;
- int subslice;
-
- seq_printf(m, "\t\tINSTDONE: 0x%08x\n",
- instdone->instdone);
-
- if (INTEL_GEN(dev_priv) <= 3)
- return;
-
- seq_printf(m, "\t\tSC_INSTDONE: 0x%08x\n",
- instdone->slice_common);
-
- if (INTEL_GEN(dev_priv) <= 6)
- return;
-
- for_each_instdone_slice_subslice(dev_priv, slice, subslice)
- seq_printf(m, "\t\tSAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
- slice, subslice, instdone->sampler[slice][subslice]);
-
- for_each_instdone_slice_subslice(dev_priv, slice, subslice)
- seq_printf(m, "\t\tROW_INSTDONE[%d][%d]: 0x%08x\n",
- slice, subslice, instdone->row[slice][subslice]);
-}
-
-static int i915_hangcheck_info(struct seq_file *m, void *unused)
-{
- struct drm_i915_private *i915 = node_to_i915(m->private);
- struct intel_gt *gt = &i915->gt;
- struct intel_engine_cs *engine;
- intel_wakeref_t wakeref;
- enum intel_engine_id id;
-
- seq_printf(m, "Reset flags: %lx\n", gt->reset.flags);
- if (test_bit(I915_WEDGED, &gt->reset.flags))
- seq_puts(m, "\tWedged\n");
- if (test_bit(I915_RESET_BACKOFF, &gt->reset.flags))
- seq_puts(m, "\tDevice (global) reset in progress\n");
-
- if (!i915_modparams.enable_hangcheck) {
- seq_puts(m, "Hangcheck disabled\n");
- return 0;
- }
-
- if (timer_pending(&gt->hangcheck.work.timer))
- seq_printf(m, "Hangcheck active, timer fires in %dms\n",
- jiffies_to_msecs(gt->hangcheck.work.timer.expires -
- jiffies));
- else if (delayed_work_pending(&gt->hangcheck.work))
- seq_puts(m, "Hangcheck active, work pending\n");
- else
- seq_puts(m, "Hangcheck inactive\n");
-
- seq_printf(m, "GT active? %s\n", yesno(gt->awake));
-
- with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
- for_each_engine(engine, i915, id) {
- struct intel_instdone instdone;
-
- seq_printf(m, "%s: %d ms ago\n",
- engine->name,
- jiffies_to_msecs(jiffies -
- engine->hangcheck.action_timestamp));
-
- seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
- (long long)engine->hangcheck.acthd,
- intel_engine_get_active_head(engine));
-
- intel_engine_get_instdone(engine, &instdone);
-
- seq_puts(m, "\tinstdone read =\n");
- i915_instdone_info(i915, m, &instdone);
-
- seq_puts(m, "\tinstdone accu =\n");
- i915_instdone_info(i915, m,
- &engine->hangcheck.instdone);
- }
- }
-
- return 0;
-}
-
static int ironlake_drpc_info(struct seq_file *m)
{
struct drm_i915_private *i915 = node_to_i915(m->private);
@@ -1157,11 +1091,13 @@ static void print_rc6_res(struct seq_file *m,
const char *title,
const i915_reg_t reg)
{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_i915_private *i915 = node_to_i915(m->private);
+ intel_wakeref_t wakeref;
- seq_printf(m, "%s %u (%llu us)\n",
- title, I915_READ(reg),
- intel_rc6_residency_us(dev_priv, reg));
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref)
+ seq_printf(m, "%s %u (%llu us)\n", title,
+ intel_uncore_read(&i915->uncore, reg),
+ intel_rc6_residency_us(&i915->gt.rc6, reg));
}
static int vlv_drpc_info(struct seq_file *m)
@@ -1439,7 +1375,7 @@ static int i915_sr_status(struct seq_file *m, void *unused)
static int i915_ring_freq_table(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
+ struct intel_rps *rps = &dev_priv->gt.rps;
unsigned int max_gpu_freq, min_gpu_freq;
intel_wakeref_t wakeref;
int gpu_freq, ia_freq;
@@ -1464,10 +1400,11 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
GEN6_PCODE_READ_MIN_FREQ_TABLE,
&ia_freq, NULL);
seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
- intel_gpu_freq(dev_priv, (gpu_freq *
- (IS_GEN9_BC(dev_priv) ||
- INTEL_GEN(dev_priv) >= 10 ?
- GEN9_FREQ_SCALER : 1))),
+ intel_gpu_freq(rps,
+ (gpu_freq *
+ (IS_GEN9_BC(dev_priv) ||
+ INTEL_GEN(dev_priv) >= 10 ?
+ GEN9_FREQ_SCALER : 1))),
((ia_freq >> 0) & 0xff) * 100,
((ia_freq >> 8) & 0xff) * 100);
}
@@ -1478,21 +1415,11 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
static int i915_opregion(struct seq_file *m, void *unused)
{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct drm_device *dev = &dev_priv->drm;
- struct intel_opregion *opregion = &dev_priv->opregion;
- int ret;
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- goto out;
+ struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
if (opregion->header)
seq_write(m, opregion->header, OPREGION_SIZE);
- mutex_unlock(&dev->struct_mutex);
-
-out:
return 0;
}
@@ -1512,11 +1439,6 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
struct drm_device *dev = &dev_priv->drm;
struct intel_framebuffer *fbdev_fb = NULL;
struct drm_framebuffer *drm_fb;
- int ret;
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
#ifdef CONFIG_DRM_FBDEV_EMULATION
if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) {
@@ -1551,7 +1473,6 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
seq_putc(m, '\n');
}
mutex_unlock(&dev->mode_config.fb_lock);
- mutex_unlock(&dev->struct_mutex);
return 0;
}
@@ -1564,23 +1485,20 @@ static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring)
static int i915_context_status(struct seq_file *m, void *unused)
{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct drm_device *dev = &dev_priv->drm;
- struct i915_gem_context *ctx;
- int ret;
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
+ struct drm_i915_private *i915 = node_to_i915(m->private);
+ struct i915_gem_context *ctx, *cn;
- list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
+ spin_lock(&i915->gem.contexts.lock);
+ list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) {
struct i915_gem_engines_iter it;
struct intel_context *ce;
+ if (!kref_get_unless_zero(&ctx->ref))
+ continue;
+
+ spin_unlock(&i915->gem.contexts.lock);
+
seq_puts(m, "HW context ");
- if (!list_empty(&ctx->hw_id_link))
- seq_printf(m, "%x [pin %u]", ctx->hw_id,
- atomic_read(&ctx->hw_id_pin_count));
if (ctx->pid) {
struct task_struct *task;
@@ -1614,9 +1532,12 @@ static int i915_context_status(struct seq_file *m, void *unused)
i915_gem_context_unlock_engines(ctx);
seq_putc(m, '\n');
- }
- mutex_unlock(&dev->struct_mutex);
+ spin_lock(&i915->gem.contexts.lock);
+ list_safe_reset_next(ctx, cn, link);
+ i915_gem_context_put(ctx);
+ }
+ spin_unlock(&i915->gem.contexts.lock);
return 0;
}
@@ -1654,9 +1575,9 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
- swizzle_string(dev_priv->mm.bit_6_swizzle_x));
+ swizzle_string(dev_priv->ggtt.bit_6_swizzle_x));
seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
- swizzle_string(dev_priv->mm.bit_6_swizzle_y));
+ swizzle_string(dev_priv->ggtt.bit_6_swizzle_y));
if (IS_GEN_RANGE(dev_priv, 3, 4)) {
seq_printf(m, "DDC = 0x%08x\n",
@@ -1711,7 +1632,7 @@ static const char *rps_power_to_str(unsigned int power)
static int i915_rps_boost_info(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
+ struct intel_rps *rps = &dev_priv->gt.rps;
u32 act_freq = rps->cur_freq;
intel_wakeref_t wakeref;
@@ -1723,7 +1644,7 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
vlv_punit_put(dev_priv);
act_freq = (act_freq >> 8) & 0xff;
} else {
- act_freq = intel_get_cagf(dev_priv,
+ act_freq = intel_get_cagf(rps,
I915_READ(GEN6_RPSTAT1));
}
}
@@ -1734,17 +1655,17 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
atomic_read(&rps->num_waiters));
seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive));
seq_printf(m, "Frequency requested %d, actual %d\n",
- intel_gpu_freq(dev_priv, rps->cur_freq),
- intel_gpu_freq(dev_priv, act_freq));
+ intel_gpu_freq(rps, rps->cur_freq),
+ intel_gpu_freq(rps, act_freq));
seq_printf(m, " min hard:%d, soft:%d; max soft:%d, hard:%d\n",
- intel_gpu_freq(dev_priv, rps->min_freq),
- intel_gpu_freq(dev_priv, rps->min_freq_softlimit),
- intel_gpu_freq(dev_priv, rps->max_freq_softlimit),
- intel_gpu_freq(dev_priv, rps->max_freq));
+ intel_gpu_freq(rps, rps->min_freq),
+ intel_gpu_freq(rps, rps->min_freq_softlimit),
+ intel_gpu_freq(rps, rps->max_freq_softlimit),
+ intel_gpu_freq(rps, rps->max_freq));
seq_printf(m, " idle:%d, efficient:%d, boost:%d\n",
- intel_gpu_freq(dev_priv, rps->idle_freq),
- intel_gpu_freq(dev_priv, rps->efficient_freq),
- intel_gpu_freq(dev_priv, rps->boost_freq));
+ intel_gpu_freq(rps, rps->idle_freq),
+ intel_gpu_freq(rps, rps->efficient_freq),
+ intel_gpu_freq(rps, rps->boost_freq));
seq_printf(m, "Wait boosts: %d\n", atomic_read(&rps->boosts));
@@ -1860,8 +1781,8 @@ static void i915_guc_log_info(struct seq_file *m,
struct intel_guc_log *log = &dev_priv->gt.uc.guc.log;
enum guc_log_buffer_type type;
- if (!intel_guc_log_relay_enabled(log)) {
- seq_puts(m, "GuC log relay disabled\n");
+ if (!intel_guc_log_relay_created(log)) {
+ seq_puts(m, "GuC log relay not created\n");
return;
}
@@ -2048,9 +1969,23 @@ i915_guc_log_relay_write(struct file *filp,
loff_t *ppos)
{
struct intel_guc_log *log = filp->private_data;
+ int val;
+ int ret;
- intel_guc_log_relay_flush(log);
- return cnt;
+ ret = kstrtoint_from_user(ubuf, cnt, 0, &val);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Enable and start the guc log relay on value of 1.
+ * Flush log relay for any other value.
+ */
+ if (val == 1)
+ ret = intel_guc_log_relay_start(log);
+ else
+ intel_guc_log_relay_flush(log);
+
+ return ret ?: cnt;
}
static int i915_guc_log_relay_release(struct inode *inode, struct file *file)
@@ -2133,7 +2068,7 @@ psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
"BUF_ON",
"TG_ON"
};
- val = I915_READ(EDP_PSR2_STATUS);
+ val = I915_READ(EDP_PSR2_STATUS(dev_priv->psr.transcoder));
status_val = (val & EDP_PSR2_STATUS_STATE_MASK) >>
EDP_PSR2_STATUS_STATE_SHIFT;
if (status_val < ARRAY_SIZE(live_status))
@@ -2149,7 +2084,7 @@ psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
"SRDOFFACK",
"SRDENT_ON",
};
- val = I915_READ(EDP_PSR_STATUS);
+ val = I915_READ(EDP_PSR_STATUS(dev_priv->psr.transcoder));
status_val = (val & EDP_PSR_STATUS_STATE_MASK) >>
EDP_PSR_STATUS_STATE_SHIFT;
if (status_val < ARRAY_SIZE(live_status))
@@ -2188,14 +2123,18 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
status = "disabled";
seq_printf(m, "PSR mode: %s\n", status);
- if (!psr->enabled)
+ if (!psr->enabled) {
+ seq_printf(m, "PSR sink not reliable: %s\n",
+ yesno(psr->sink_not_reliable));
+
goto unlock;
+ }
if (psr->psr2_enabled) {
- val = I915_READ(EDP_PSR2_CTL);
+ val = I915_READ(EDP_PSR2_CTL(dev_priv->psr.transcoder));
enabled = val & EDP_PSR2_ENABLE;
} else {
- val = I915_READ(EDP_PSR_CTL);
+ val = I915_READ(EDP_PSR_CTL(dev_priv->psr.transcoder));
enabled = val & EDP_PSR_ENABLE;
}
seq_printf(m, "Source PSR ctl: %s [0x%08x]\n",
@@ -2208,7 +2147,8 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
* SKL+ Perf counter is reset to 0 everytime DC state is entered
*/
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
- val = I915_READ(EDP_PSR_PERF_CNT) & EDP_PSR_PERF_CNT_MASK;
+ val = I915_READ(EDP_PSR_PERF_CNT(dev_priv->psr.transcoder));
+ val &= EDP_PSR_PERF_CNT_MASK;
seq_printf(m, "Performance counter: %u\n", val);
}
@@ -2226,8 +2166,11 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
* Reading all 3 registers before hand to minimize crossing a
* frame boundary between register reads
*/
- for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3)
- su_frames_val[frame / 3] = I915_READ(PSR2_SU_STATUS(frame));
+ for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) {
+ val = I915_READ(PSR2_SU_STATUS(dev_priv->psr.transcoder,
+ frame));
+ su_frames_val[frame / 3] = val;
+ }
seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
@@ -2360,8 +2303,7 @@ static int i915_power_domain_info(struct seq_file *m, void *unused)
for_each_power_domain(power_domain, power_well->desc->domains)
seq_printf(m, " %-23s %d\n",
- intel_display_power_domain_str(dev_priv,
- power_domain),
+ intel_display_power_domain_str(power_domain),
power_domains->domain_use_count[power_domain]);
}
@@ -2396,6 +2338,13 @@ static int i915_dmc_info(struct seq_file *m, void *unused)
if (INTEL_GEN(dev_priv) >= 12) {
dc5_reg = TGL_DMC_DEBUG_DC5_COUNT;
dc6_reg = TGL_DMC_DEBUG_DC6_COUNT;
+ /*
+ * NOTE: DMC_DEBUG3 is a general purpose reg.
+ * According to B.Specs:49196 DMC f/w reuses DC5/6 counter
+ * reg for DC3CO debugging and validation,
+ * but TGL DMC f/w is using DMC_DEBUG3 reg for DC3CO counter.
+ */
+ seq_printf(m, "DC3CO count: %d\n", I915_READ(DMC_DEBUG3));
} else {
dc5_reg = IS_BROXTON(dev_priv) ? BXT_CSR_DC3_DC5_COUNT :
SKL_CSR_DC3_DC5_COUNT;
@@ -3110,8 +3059,9 @@ static int i915_dp_mst_info(struct seq_file *m, void *unused)
if (!intel_dig_port->dp.can_mst)
continue;
- seq_printf(m, "MST Source Port %c\n",
- port_name(intel_dig_port->base.port));
+ seq_printf(m, "MST Source Port [ENCODER:%d:%s]\n",
+ intel_dig_port->base.base.base.id,
+ intel_dig_port->base.base.name);
drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
}
drm_connector_list_iter_end(&conn_iter);
@@ -3573,6 +3523,37 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
i915_wedged_get, i915_wedged_set,
"%llu\n");
+static int
+i915_perf_noa_delay_set(void *data, u64 val)
+{
+ struct drm_i915_private *i915 = data;
+ const u32 clk = RUNTIME_INFO(i915)->cs_timestamp_frequency_khz;
+
+ /*
+ * This would lead to infinite waits as we're doing timestamp
+ * difference on the CS with only 32bits.
+ */
+ if (val > mul_u32_u32(U32_MAX, clk))
+ return -EINVAL;
+
+ atomic64_set(&i915->perf.noa_programming_delay, val);
+ return 0;
+}
+
+static int
+i915_perf_noa_delay_get(void *data, u64 *val)
+{
+ struct drm_i915_private *i915 = data;
+
+ *val = atomic64_read(&i915->perf.noa_programming_delay);
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(i915_perf_noa_delay_fops,
+ i915_perf_noa_delay_get,
+ i915_perf_noa_delay_set,
+ "%llu\n");
+
#define DROP_UNBOUND BIT(0)
#define DROP_BOUND BIT(1)
#define DROP_RETIRE BIT(2)
@@ -3582,6 +3563,7 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
#define DROP_IDLE BIT(6)
#define DROP_RESET_ACTIVE BIT(7)
#define DROP_RESET_SEQNO BIT(8)
+#define DROP_RCU BIT(9)
#define DROP_ALL (DROP_UNBOUND | \
DROP_BOUND | \
DROP_RETIRE | \
@@ -3590,7 +3572,8 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
DROP_SHRINK_ALL |\
DROP_IDLE | \
DROP_RESET_ACTIVE | \
- DROP_RESET_SEQNO)
+ DROP_RESET_SEQNO | \
+ DROP_RCU)
static int
i915_drop_caches_get(void *data, u64 *val)
{
@@ -3598,58 +3581,48 @@ i915_drop_caches_get(void *data, u64 *val)
return 0;
}
-
static int
-i915_drop_caches_set(void *data, u64 val)
+gt_drop_caches(struct intel_gt *gt, u64 val)
{
- struct drm_i915_private *i915 = data;
-
- DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n",
- val, val & DROP_ALL);
+ int ret;
if (val & DROP_RESET_ACTIVE &&
- wait_for(intel_engines_are_idle(&i915->gt),
- I915_IDLE_ENGINES_TIMEOUT))
- intel_gt_set_wedged(&i915->gt);
+ wait_for(intel_engines_are_idle(gt), I915_IDLE_ENGINES_TIMEOUT))
+ intel_gt_set_wedged(gt);
- /* No need to check and wait for gpu resets, only libdrm auto-restarts
- * on ioctls on -EAGAIN. */
- if (val & (DROP_ACTIVE | DROP_IDLE | DROP_RETIRE | DROP_RESET_SEQNO)) {
- int ret;
+ if (val & DROP_RETIRE)
+ intel_gt_retire_requests(gt);
- ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
+ if (val & (DROP_IDLE | DROP_ACTIVE)) {
+ ret = intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT);
if (ret)
return ret;
+ }
- /*
- * To finish the flush of the idle_worker, we must complete
- * the switch-to-kernel-context, which requires a double
- * pass through wait_for_idle: first queues the switch,
- * second waits for the switch.
- */
- if (ret == 0 && val & (DROP_IDLE | DROP_ACTIVE))
- ret = i915_gem_wait_for_idle(i915,
- I915_WAIT_INTERRUPTIBLE |
- I915_WAIT_LOCKED,
- MAX_SCHEDULE_TIMEOUT);
+ if (val & DROP_IDLE) {
+ ret = intel_gt_pm_wait_for_idle(gt);
+ if (ret)
+ return ret;
+ }
- if (ret == 0 && val & DROP_IDLE)
- ret = i915_gem_wait_for_idle(i915,
- I915_WAIT_INTERRUPTIBLE |
- I915_WAIT_LOCKED,
- MAX_SCHEDULE_TIMEOUT);
+ if (val & DROP_RESET_ACTIVE && intel_gt_terminally_wedged(gt))
+ intel_gt_handle_error(gt, ALL_ENGINES, 0, NULL);
- if (val & DROP_RETIRE)
- i915_retire_requests(i915);
+ return 0;
+}
- mutex_unlock(&i915->drm.struct_mutex);
+static int
+i915_drop_caches_set(void *data, u64 val)
+{
+ struct drm_i915_private *i915 = data;
+ int ret;
- if (ret == 0 && val & DROP_IDLE)
- ret = intel_gt_pm_wait_for_idle(&i915->gt);
- }
+ DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n",
+ val, val & DROP_ALL);
- if (val & DROP_RESET_ACTIVE && intel_gt_terminally_wedged(&i915->gt))
- intel_gt_handle_error(&i915->gt, ALL_ENGINES, 0, NULL);
+ ret = gt_drop_caches(&i915->gt, val);
+ if (ret)
+ return ret;
fs_reclaim_acquire(GFP_KERNEL);
if (val & DROP_BOUND)
@@ -3662,10 +3635,8 @@ i915_drop_caches_set(void *data, u64 val)
i915_gem_shrink_all(i915);
fs_reclaim_release(GFP_KERNEL);
- if (val & DROP_IDLE) {
- flush_delayed_work(&i915->gem.retire_work);
- flush_work(&i915->gem.idle_work);
- }
+ if (val & DROP_RCU)
+ rcu_barrier();
if (val & DROP_FREED)
i915_gem_drain_freed_objects(i915);
@@ -3721,6 +3692,15 @@ i915_cache_sharing_set(void *data, u64 val)
return 0;
}
+static void
+intel_sseu_copy_subslices(const struct sseu_dev_info *sseu, int slice,
+ u8 *to_mask)
+{
+ int offset = slice * sseu->ss_stride;
+
+ memcpy(&to_mask[offset], &sseu->subslice_mask[offset], sseu->ss_stride);
+}
+
DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
i915_cache_sharing_get, i915_cache_sharing_set,
"%llu\n");
@@ -3794,12 +3774,13 @@ static void gen10_sseu_device_status(struct drm_i915_private *dev_priv,
continue;
sseu->slice_mask |= BIT(s);
- sseu->subslice_mask[s] = info->sseu.subslice_mask[s];
+ intel_sseu_copy_subslices(&info->sseu, s, sseu->subslice_mask);
for (ss = 0; ss < info->sseu.max_subslices; ss++) {
unsigned int eu_cnt;
- if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
+ if (info->sseu.has_subslice_pg &&
+ !(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
/* skip disabled subslice */
continue;
@@ -3845,18 +3826,21 @@ static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
sseu->slice_mask |= BIT(s);
if (IS_GEN9_BC(dev_priv))
- sseu->subslice_mask[s] =
- RUNTIME_INFO(dev_priv)->sseu.subslice_mask[s];
+ intel_sseu_copy_subslices(&info->sseu, s,
+ sseu->subslice_mask);
for (ss = 0; ss < info->sseu.max_subslices; ss++) {
unsigned int eu_cnt;
+ u8 ss_idx = s * info->sseu.ss_stride +
+ ss / BITS_PER_BYTE;
if (IS_GEN9_LP(dev_priv)) {
if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
/* skip disabled subslice */
continue;
- sseu->subslice_mask[s] |= BIT(ss);
+ sseu->subslice_mask[ss_idx] |=
+ BIT(ss % BITS_PER_BYTE);
}
eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] &
@@ -3873,25 +3857,23 @@ static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv,
struct sseu_dev_info *sseu)
{
+ const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO);
int s;
sseu->slice_mask = slice_info & GEN8_LSLICESTAT_MASK;
if (sseu->slice_mask) {
- sseu->eu_per_subslice =
- RUNTIME_INFO(dev_priv)->sseu.eu_per_subslice;
- for (s = 0; s < fls(sseu->slice_mask); s++) {
- sseu->subslice_mask[s] =
- RUNTIME_INFO(dev_priv)->sseu.subslice_mask[s];
- }
+ sseu->eu_per_subslice = info->sseu.eu_per_subslice;
+ for (s = 0; s < fls(sseu->slice_mask); s++)
+ intel_sseu_copy_subslices(&info->sseu, s,
+ sseu->subslice_mask);
sseu->eu_total = sseu->eu_per_subslice *
intel_sseu_subslice_total(sseu);
/* subtract fused off EU(s) from enabled slice(s) */
for (s = 0; s < fls(sseu->slice_mask); s++) {
- u8 subslice_7eu =
- RUNTIME_INFO(dev_priv)->sseu.subslice_7eu[s];
+ u8 subslice_7eu = info->sseu.subslice_7eu[s];
sseu->eu_total -= hweight8(subslice_7eu);
}
@@ -3938,6 +3920,7 @@ static void i915_print_sseu_info(struct seq_file *m, bool is_available_info,
static int i915_sseu_status(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
struct sseu_dev_info sseu;
intel_wakeref_t wakeref;
@@ -3945,14 +3928,13 @@ static int i915_sseu_status(struct seq_file *m, void *unused)
return -ENODEV;
seq_puts(m, "SSEU Device Info\n");
- i915_print_sseu_info(m, true, &RUNTIME_INFO(dev_priv)->sseu);
+ i915_print_sseu_info(m, true, &info->sseu);
seq_puts(m, "SSEU Device Status\n");
memset(&sseu, 0, sizeof(sseu));
- sseu.max_slices = RUNTIME_INFO(dev_priv)->sseu.max_slices;
- sseu.max_subslices = RUNTIME_INFO(dev_priv)->sseu.max_subslices;
- sseu.max_eus_per_subslice =
- RUNTIME_INFO(dev_priv)->sseu.max_eus_per_subslice;
+ intel_sseu_set_info(&sseu, info->sseu.max_slices,
+ info->sseu.max_subslices,
+ info->sseu.max_eus_per_subslice);
with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
if (IS_CHERRYVIEW(dev_priv))
@@ -3973,13 +3955,12 @@ static int i915_sseu_status(struct seq_file *m, void *unused)
static int i915_forcewake_open(struct inode *inode, struct file *file)
{
struct drm_i915_private *i915 = inode->i_private;
+ struct intel_gt *gt = &i915->gt;
- if (INTEL_GEN(i915) < 6)
- return 0;
-
- file->private_data =
- (void *)(uintptr_t)intel_runtime_pm_get(&i915->runtime_pm);
- intel_uncore_forcewake_user_get(&i915->uncore);
+ atomic_inc(&gt->user_wakeref);
+ intel_gt_pm_get(gt);
+ if (INTEL_GEN(i915) >= 6)
+ intel_uncore_forcewake_user_get(gt->uncore);
return 0;
}
@@ -3987,13 +3968,12 @@ static int i915_forcewake_open(struct inode *inode, struct file *file)
static int i915_forcewake_release(struct inode *inode, struct file *file)
{
struct drm_i915_private *i915 = inode->i_private;
+ struct intel_gt *gt = &i915->gt;
- if (INTEL_GEN(i915) < 6)
- return 0;
-
- intel_uncore_forcewake_user_put(&i915->uncore);
- intel_runtime_pm_put(&i915->runtime_pm,
- (intel_wakeref_t)(uintptr_t)file->private_data);
+ if (INTEL_GEN(i915) >= 6)
+ intel_uncore_forcewake_user_put(&i915->uncore);
+ intel_gt_pm_put(gt);
+ atomic_dec(&gt->user_wakeref);
return 0;
}
@@ -4302,7 +4282,6 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_guc_stage_pool", i915_guc_stage_pool, 0},
{"i915_huc_load_status", i915_huc_load_status_info, 0},
{"i915_frequency_info", i915_frequency_info, 0},
- {"i915_hangcheck_info", i915_hangcheck_info, 0},
{"i915_drpc_info", i915_drpc_info, 0},
{"i915_ring_freq_table", i915_ring_freq_table, 0},
{"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
@@ -4339,6 +4318,7 @@ static const struct i915_debugfs_files {
const char *name;
const struct file_operations *fops;
} i915_debugfs_files[] = {
+ {"i915_perf_noa_delay", &i915_perf_noa_delay_fops},
{"i915_wedged", &i915_wedged_fops},
{"i915_cache_sharing", &i915_cache_sharing_fops},
{"i915_gem_drop_caches", &i915_drop_caches_fops},
@@ -4528,7 +4508,7 @@ static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
intel_dp = enc_to_intel_dp(&intel_attached_encoder(connector)->base);
crtc_state = to_intel_crtc_state(crtc->state);
seq_printf(m, "DSC_Enabled: %s\n",
- yesno(crtc_state->dsc_params.compression_enable));
+ yesno(crtc_state->dsc.compression_enable));
seq_printf(m, "DSC_Sink_Support: %s\n",
yesno(drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)));
seq_printf(m, "Force_DSC_Enable: %s\n",
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 3d717e282908..3c512c571e60 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -36,7 +36,6 @@
#include <linux/pm_runtime.h>
#include <linux/pnp.h>
#include <linux/slab.h>
-#include <linux/vgaarb.h>
#include <linux/vga_switcheroo.h>
#include <linux/vt.h>
#include <acpi/video.h>
@@ -54,16 +53,17 @@
#include "display/intel_display_types.h"
#include "display/intel_dp.h"
#include "display/intel_fbdev.h"
-#include "display/intel_gmbus.h"
#include "display/intel_hotplug.h"
#include "display/intel_overlay.h"
#include "display/intel_pipe_crc.h"
#include "display/intel_sprite.h"
+#include "display/intel_vga.h"
#include "gem/i915_gem_context.h"
#include "gem/i915_gem_ioctls.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_pm.h"
+#include "gt/intel_rc6.h"
#include "i915_debugfs.h"
#include "i915_drv.h"
@@ -72,10 +72,12 @@
#include "i915_perf.h"
#include "i915_query.h"
#include "i915_suspend.h"
+#include "i915_switcheroo.h"
#include "i915_sysfs.h"
#include "i915_trace.h"
#include "i915_vgpu.h"
#include "intel_csr.h"
+#include "intel_memory_region.h"
#include "intel_pm.h"
static struct drm_driver driver;
@@ -269,179 +271,97 @@ intel_teardown_mchbar(struct drm_i915_private *dev_priv)
release_resource(&dev_priv->mch_res);
}
-/* true = enable decode, false = disable decoder */
-static unsigned int i915_vga_set_decode(void *cookie, bool state)
+static int i915_driver_modeset_probe(struct drm_i915_private *i915)
{
- struct drm_i915_private *dev_priv = cookie;
-
- intel_modeset_vga_set_state(dev_priv, state);
- if (state)
- return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
- VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
- else
- return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
-}
-
-static int i915_resume_switcheroo(struct drm_i915_private *i915);
-static int i915_suspend_switcheroo(struct drm_i915_private *i915,
- pm_message_t state);
-
-static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
-{
- struct drm_i915_private *i915 = pdev_to_i915(pdev);
- pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
-
- if (!i915) {
- dev_err(&pdev->dev, "DRM not initialized, aborting switch.\n");
- return;
- }
-
- if (state == VGA_SWITCHEROO_ON) {
- pr_info("switched on\n");
- i915->drm.switch_power_state = DRM_SWITCH_POWER_CHANGING;
- /* i915 resume handler doesn't set to D0 */
- pci_set_power_state(pdev, PCI_D0);
- i915_resume_switcheroo(i915);
- i915->drm.switch_power_state = DRM_SWITCH_POWER_ON;
- } else {
- pr_info("switched off\n");
- i915->drm.switch_power_state = DRM_SWITCH_POWER_CHANGING;
- i915_suspend_switcheroo(i915, pmm);
- i915->drm.switch_power_state = DRM_SWITCH_POWER_OFF;
- }
-}
-
-static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
-{
- struct drm_i915_private *i915 = pdev_to_i915(pdev);
-
- /*
- * FIXME: open_count is protected by drm_global_mutex but that would lead to
- * locking inversion with the driver load path. And the access here is
- * completely racy anyway. So don't bother with locking for now.
- */
- return i915 && i915->drm.open_count == 0;
-}
-
-static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
- .set_gpu_state = i915_switcheroo_set_state,
- .reprobe = NULL,
- .can_switch = i915_switcheroo_can_switch,
-};
-
-static int i915_driver_modeset_probe(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct pci_dev *pdev = dev_priv->drm.pdev;
int ret;
- if (i915_inject_probe_failure(dev_priv))
+ if (i915_inject_probe_failure(i915))
return -ENODEV;
- if (HAS_DISPLAY(dev_priv)) {
- ret = drm_vblank_init(&dev_priv->drm,
- INTEL_INFO(dev_priv)->num_pipes);
+ if (HAS_DISPLAY(i915) && INTEL_DISPLAY_ENABLED(i915)) {
+ ret = drm_vblank_init(&i915->drm,
+ INTEL_NUM_PIPES(i915));
if (ret)
goto out;
}
- intel_bios_init(dev_priv);
+ intel_bios_init(i915);
- /* If we have > 1 VGA cards, then we need to arbitrate access
- * to the common VGA resources.
- *
- * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA),
- * then we do not take part in VGA arbitration and the
- * vga_client_register() fails with -ENODEV.
- */
- ret = vga_client_register(pdev, dev_priv, NULL, i915_vga_set_decode);
- if (ret && ret != -ENODEV)
+ ret = intel_vga_register(i915);
+ if (ret)
goto out;
intel_register_dsm_handler();
- ret = vga_switcheroo_register_client(pdev, &i915_switcheroo_ops, false);
+ ret = i915_switcheroo_register(i915);
if (ret)
goto cleanup_vga_client;
- intel_power_domains_init_hw(dev_priv, false);
+ intel_power_domains_init_hw(i915, false);
- intel_csr_ucode_init(dev_priv);
+ intel_csr_ucode_init(i915);
- ret = intel_irq_install(dev_priv);
+ ret = intel_irq_install(i915);
if (ret)
goto cleanup_csr;
- intel_gmbus_setup(dev_priv);
-
/* Important: The output setup functions called by modeset_init need
* working irqs for e.g. gmbus and dp aux transfers. */
- ret = intel_modeset_init(dev);
+ ret = intel_modeset_init(i915);
if (ret)
goto cleanup_irq;
- ret = i915_gem_init(dev_priv);
+ ret = i915_gem_init(i915);
if (ret)
goto cleanup_modeset;
- intel_overlay_setup(dev_priv);
+ intel_overlay_setup(i915);
- if (!HAS_DISPLAY(dev_priv))
+ if (!HAS_DISPLAY(i915) || !INTEL_DISPLAY_ENABLED(i915))
return 0;
- ret = intel_fbdev_init(dev);
+ ret = intel_fbdev_init(&i915->drm);
if (ret)
goto cleanup_gem;
/* Only enable hotplug handling once the fbdev is fully set up. */
- intel_hpd_init(dev_priv);
+ intel_hpd_init(i915);
- intel_init_ipc(dev_priv);
+ intel_init_ipc(i915);
return 0;
cleanup_gem:
- i915_gem_suspend(dev_priv);
- i915_gem_driver_remove(dev_priv);
- i915_gem_driver_release(dev_priv);
+ i915_gem_suspend(i915);
+ i915_gem_driver_remove(i915);
+ i915_gem_driver_release(i915);
cleanup_modeset:
- intel_modeset_driver_remove(dev);
+ intel_modeset_driver_remove(i915);
cleanup_irq:
- intel_irq_uninstall(dev_priv);
- intel_gmbus_teardown(dev_priv);
+ intel_irq_uninstall(i915);
cleanup_csr:
- intel_csr_ucode_fini(dev_priv);
- intel_power_domains_driver_remove(dev_priv);
- vga_switcheroo_unregister_client(pdev);
+ intel_csr_ucode_fini(i915);
+ intel_power_domains_driver_remove(i915);
+ i915_switcheroo_unregister(i915);
cleanup_vga_client:
- vga_client_register(pdev, NULL, NULL, NULL);
+ intel_vga_unregister(i915);
out:
return ret;
}
-static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
+static void i915_driver_modeset_remove(struct drm_i915_private *i915)
{
- struct apertures_struct *ap;
- struct pci_dev *pdev = dev_priv->drm.pdev;
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
- bool primary;
- int ret;
-
- ap = alloc_apertures(1);
- if (!ap)
- return -ENOMEM;
+ intel_modeset_driver_remove(i915);
- ap->ranges[0].base = ggtt->gmadr.start;
- ap->ranges[0].size = ggtt->mappable_end;
+ intel_irq_uninstall(i915);
- primary =
- pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
+ intel_bios_driver_remove(i915);
- ret = drm_fb_helper_remove_conflicting_framebuffers(ap, "inteldrmfb", primary);
+ i915_switcheroo_unregister(i915);
- kfree(ap);
+ intel_vga_unregister(i915);
- return ret;
+ intel_csr_ucode_fini(i915);
}
static void intel_init_dpio(struct drm_i915_private *dev_priv)
@@ -598,9 +518,7 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
intel_gt_init_early(&dev_priv->gt, dev_priv);
- ret = i915_gem_init_early(dev_priv);
- if (ret < 0)
- goto err_gt;
+ i915_gem_init_early(dev_priv);
/* This must be called before any calls to HAS_PCH_* */
intel_detect_pch(dev_priv);
@@ -622,7 +540,6 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
err_gem:
i915_gem_cleanup_early(dev_priv);
-err_gt:
intel_gt_driver_late_release(&dev_priv->gt);
vlv_free_s0ix_state(dev_priv);
err_workqueues:
@@ -680,12 +597,10 @@ static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv)
intel_uc_init_mmio(&dev_priv->gt.uc);
- ret = intel_engines_init_mmio(dev_priv);
+ ret = intel_engines_init_mmio(&dev_priv->gt);
if (ret)
goto err_uncore;
- i915_gem_init_mmio(dev_priv);
-
return 0;
err_uncore:
@@ -703,7 +618,7 @@ err_bridge:
*/
static void i915_driver_mmio_release(struct drm_i915_private *dev_priv)
{
- intel_engines_cleanup(dev_priv);
+ intel_engines_cleanup(&dev_priv->gt);
intel_teardown_mchbar(dev_priv);
intel_uncore_fini_mmio(&dev_priv->uncore);
pci_dev_put(dev_priv->bridge_dev);
@@ -1157,8 +1072,8 @@ intel_get_dram_info(struct drm_i915_private *dev_priv)
static u32 gen9_edram_size_mb(struct drm_i915_private *dev_priv, u32 cap)
{
- const unsigned int ways[8] = { 4, 8, 12, 16, 16, 16, 16, 16 };
- const unsigned int sets[4] = { 1, 1, 2, 2 };
+ static const u8 ways[8] = { 4, 8, 12, 16, 16, 16, 16, 16 };
+ static const u8 sets[4] = { 1, 1, 2, 2 };
return EDRAM_NUM_BANKS(cap) *
ways[EDRAM_WAYS_IDX(cap)] *
@@ -1246,32 +1161,24 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
if (ret)
goto err_perf;
- /*
- * WARNING: Apparently we must kick fbdev drivers before vgacon,
- * otherwise the vga fbdev driver falls over.
- */
- ret = i915_kick_out_firmware_fb(dev_priv);
- if (ret) {
- DRM_ERROR("failed to remove conflicting framebuffer drivers\n");
+ ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, "inteldrmfb");
+ if (ret)
goto err_ggtt;
- }
- ret = vga_remove_vgacon(pdev);
- if (ret) {
- DRM_ERROR("failed to remove conflicting VGA console\n");
+ ret = i915_ggtt_init_hw(dev_priv);
+ if (ret)
goto err_ggtt;
- }
- ret = i915_ggtt_init_hw(dev_priv);
+ ret = intel_memory_regions_hw_probe(dev_priv);
if (ret)
goto err_ggtt;
- intel_gt_init_hw(dev_priv);
+ intel_gt_init_hw_early(&dev_priv->gt, &dev_priv->ggtt);
ret = i915_ggtt_enable_hw(dev_priv);
if (ret) {
DRM_ERROR("failed to enable GGTT\n");
- goto err_ggtt;
+ goto err_mem_regions;
}
pci_set_master(pdev);
@@ -1288,7 +1195,7 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
if (ret) {
DRM_ERROR("failed to set DMA mask\n");
- goto err_ggtt;
+ goto err_mem_regions;
}
}
@@ -1306,16 +1213,13 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
if (ret) {
DRM_ERROR("failed to set DMA mask\n");
- goto err_ggtt;
+ goto err_mem_regions;
}
}
pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY,
PM_QOS_DEFAULT_VALUE);
- /* BIOS often leaves RC6 enabled, but disable it for hw init */
- intel_sanitize_gt_powersave(dev_priv);
-
intel_gt_init_workarounds(dev_priv);
/* On the 945G/GM, the chipset reports the MSI capability on the
@@ -1361,6 +1265,8 @@ err_msi:
if (pdev->msi_enabled)
pci_disable_msi(pdev);
pm_qos_remove_request(&dev_priv->pm_qos);
+err_mem_regions:
+ intel_memory_regions_driver_release(dev_priv);
err_ggtt:
i915_ggtt_driver_release(dev_priv);
err_perf:
@@ -1415,14 +1321,13 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
} else
DRM_ERROR("Failed to register driver for userspace access!\n");
- if (HAS_DISPLAY(dev_priv)) {
+ if (HAS_DISPLAY(dev_priv) && INTEL_DISPLAY_ENABLED(dev_priv)) {
/* Must be done after probing outputs */
intel_opregion_register(dev_priv);
acpi_video_register();
}
- if (IS_GEN(dev_priv, 5))
- intel_gpu_ips_init(dev_priv);
+ intel_gt_driver_register(&dev_priv->gt);
intel_audio_init(dev_priv);
@@ -1439,7 +1344,7 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
* We need to coordinate the hotplugs with the asynchronous fbdev
* configuration, for which we use the fbdev->async_cookie.
*/
- if (HAS_DISPLAY(dev_priv))
+ if (HAS_DISPLAY(dev_priv) && INTEL_DISPLAY_ENABLED(dev_priv))
drm_kms_helper_poll_init(dev);
intel_power_domains_enable(dev_priv);
@@ -1465,7 +1370,7 @@ static void i915_driver_unregister(struct drm_i915_private *dev_priv)
*/
drm_kms_helper_poll_fini(&dev_priv->drm);
- intel_gpu_ips_teardown();
+ intel_gt_driver_unregister(&dev_priv->gt);
acpi_video_unregister();
intel_opregion_unregister(dev_priv);
@@ -1574,6 +1479,23 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (!i915_modparams.nuclear_pageflip && match_info->gen < 5)
dev_priv->drm.driver_features &= ~DRIVER_ATOMIC;
+ /*
+ * Check if we support fake LMEM -- for now we only unleash this for
+ * the live selftests(test-and-exit).
+ */
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+ if (IS_ENABLED(CONFIG_DRM_I915_UNSTABLE_FAKE_LMEM)) {
+ if (INTEL_GEN(dev_priv) >= 9 && i915_selftest.live < 0 &&
+ i915_modparams.fake_lmem_start) {
+ mkwrite_device_info(dev_priv)->memory_regions =
+ REGION_SMEM | REGION_LMEM | REGION_STOLEN;
+ mkwrite_device_info(dev_priv)->is_dgfx = true;
+ GEM_BUG_ON(!HAS_LMEM(dev_priv));
+ GEM_BUG_ON(!IS_DGFX(dev_priv));
+ }
+ }
+#endif
+
ret = pci_enable_device(pdev);
if (ret)
goto out_fini;
@@ -1594,7 +1516,7 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret < 0)
goto out_cleanup_mmio;
- ret = i915_driver_modeset_probe(&dev_priv->drm);
+ ret = i915_driver_modeset_probe(dev_priv);
if (ret < 0)
goto out_cleanup_hw;
@@ -1608,10 +1530,8 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
out_cleanup_hw:
i915_driver_hw_remove(dev_priv);
+ intel_memory_regions_driver_release(dev_priv);
i915_ggtt_driver_release(dev_priv);
-
- /* Paranoia: make sure we have disabled everything before we exit. */
- intel_sanitize_gt_powersave(dev_priv);
out_cleanup_mmio:
i915_driver_mmio_release(dev_priv);
out_runtime_pm_put:
@@ -1627,8 +1547,6 @@ out_fini:
void i915_driver_remove(struct drm_i915_private *i915)
{
- struct pci_dev *pdev = i915->drm.pdev;
-
disable_rpm_wakeref_asserts(&i915->runtime_pm);
i915_driver_unregister(i915);
@@ -1649,19 +1567,9 @@ void i915_driver_remove(struct drm_i915_private *i915)
intel_gvt_driver_remove(i915);
- intel_modeset_driver_remove(&i915->drm);
-
- intel_bios_driver_remove(i915);
+ i915_driver_modeset_remove(i915);
- vga_switcheroo_unregister_client(pdev);
- vga_client_register(pdev, NULL, NULL, NULL);
-
- intel_csr_ucode_fini(i915);
-
- /* Free error state after interrupts are fully disabled. */
- cancel_delayed_work_sync(&i915->gt.hangcheck.work);
i915_reset_error_state(i915);
-
i915_gem_driver_remove(i915);
intel_power_domains_driver_remove(i915);
@@ -1680,11 +1588,9 @@ static void i915_driver_release(struct drm_device *dev)
i915_gem_driver_release(dev_priv);
+ intel_memory_regions_driver_release(dev_priv);
i915_ggtt_driver_release(dev_priv);
- /* Paranoia: make sure we have disabled everything before we exit. */
- intel_sanitize_gt_powersave(dev_priv);
-
i915_driver_mmio_release(dev_priv);
enable_rpm_wakeref_asserts(rpm);
@@ -1728,12 +1634,10 @@ static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
{
struct drm_i915_file_private *file_priv = file->driver_priv;
- mutex_lock(&dev->struct_mutex);
i915_gem_context_close(file);
i915_gem_release(dev, file);
- mutex_unlock(&dev->struct_mutex);
- kfree(file_priv);
+ kfree_rcu(file_priv, rcu);
/* Catch up with all the deferred frees from "this" client */
i915_gem_flush_free_objects(to_i915(dev));
@@ -1847,8 +1751,6 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
i915_gem_suspend_late(dev_priv);
- i915_rc6_ctx_wa_suspend(dev_priv);
-
intel_uncore_suspend(&dev_priv->uncore);
intel_power_domains_suspend(dev_priv,
@@ -1890,8 +1792,7 @@ out:
return ret;
}
-static int
-i915_suspend_switcheroo(struct drm_i915_private *i915, pm_message_t state)
+int i915_suspend_switcheroo(struct drm_i915_private *i915, pm_message_t state)
{
int error;
@@ -1915,18 +1816,17 @@ static int i915_drm_resume(struct drm_device *dev)
int ret;
disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
- intel_sanitize_gt_powersave(dev_priv);
- i915_gem_sanitize(dev_priv);
+ intel_rc6_ctx_wa_resume(&dev_priv->gt.rc6);
+
+ intel_gt_sanitize(&dev_priv->gt, true);
ret = i915_ggtt_enable_hw(dev_priv);
if (ret)
DRM_ERROR("failed to re-enable GGTT\n");
- mutex_lock(&dev_priv->drm.struct_mutex);
i915_gem_restore_gtt_mappings(dev_priv);
- i915_gem_restore_fences(dev_priv);
- mutex_unlock(&dev_priv->drm.struct_mutex);
+ i915_gem_restore_fences(&dev_priv->ggtt);
intel_csr_ucode_resume(dev_priv);
@@ -1951,7 +1851,7 @@ static int i915_drm_resume(struct drm_device *dev)
i915_gem_resume(dev_priv);
- intel_modeset_init_hw(dev);
+ intel_modeset_init_hw(dev_priv);
intel_init_clock_gating(dev_priv);
spin_lock_irq(&dev_priv->irq_lock);
@@ -2048,20 +1948,14 @@ static int i915_drm_resume_early(struct drm_device *dev)
intel_display_power_resume_early(dev_priv);
- intel_sanitize_gt_powersave(dev_priv);
-
intel_power_domains_resume(dev_priv);
- i915_rc6_ctx_wa_resume(dev_priv);
-
- intel_gt_sanitize(&dev_priv->gt, true);
-
enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
return ret;
}
-static int i915_resume_switcheroo(struct drm_i915_private *i915)
+int i915_resume_switcheroo(struct drm_i915_private *i915)
{
int ret;
@@ -2594,9 +2488,6 @@ static int intel_runtime_suspend(struct device *kdev)
struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
int ret = 0;
- if (WARN_ON_ONCE(!(dev_priv->gt_pm.rc6.enabled && HAS_RC6(dev_priv))))
- return -ENODEV;
-
if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev_priv)))
return -ENODEV;
@@ -2629,7 +2520,7 @@ static int intel_runtime_suspend(struct device *kdev)
intel_gt_runtime_resume(&dev_priv->gt);
- i915_gem_restore_fences(dev_priv);
+ i915_gem_restore_fences(&dev_priv->ggtt);
enable_rpm_wakeref_asserts(rpm);
@@ -2709,7 +2600,7 @@ static int intel_runtime_resume(struct device *kdev)
* we can do is to hope that things will still work (and disable RPM).
*/
intel_gt_runtime_resume(&dev_priv->gt);
- i915_gem_restore_fences(dev_priv);
+ i915_gem_restore_fences(&dev_priv->ggtt);
/*
* On VLV/CHV display interrupts are part of the display
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 89b6112bd66b..e29bc137e7ba 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -67,6 +67,7 @@
#include "display/intel_display.h"
#include "display/intel_display_power.h"
#include "display/intel_dpll_mgr.h"
+#include "display/intel_dsb.h"
#include "display/intel_frontbuffer.h"
#include "display/intel_gmbus.h"
#include "display/intel_opregion.h"
@@ -84,6 +85,7 @@
#include "intel_device_info.h"
#include "intel_pch.h"
#include "intel_runtime_pm.h"
+#include "intel_memory_region.h"
#include "intel_uncore.h"
#include "intel_wakeref.h"
#include "intel_wopcm.h"
@@ -92,12 +94,15 @@
#include "i915_gem_fence_reg.h"
#include "i915_gem_gtt.h"
#include "i915_gpu_error.h"
+#include "i915_perf_types.h"
#include "i915_request.h"
#include "i915_scheduler.h"
#include "gt/intel_timeline.h"
#include "i915_vma.h"
#include "i915_irq.h"
+#include "intel_region_lmem.h"
+
#include "intel_gvt.h"
/* General customization:
@@ -105,8 +110,8 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20190822"
-#define DRIVER_TIMESTAMP 1566477988
+#define DRIVER_DATE "20191101"
+#define DRIVER_TIMESTAMP 1572604873
struct drm_i915_gem_object;
@@ -185,7 +190,11 @@ struct i915_mmu_object;
struct drm_i915_file_private {
struct drm_i915_private *dev_priv;
- struct drm_file *file;
+
+ union {
+ struct drm_file *file;
+ struct rcu_head rcu;
+ };
struct {
spinlock_t lock;
@@ -272,6 +281,7 @@ struct drm_i915_display_funcs {
int (*compute_global_watermarks)(struct intel_atomic_state *state);
void (*update_wm)(struct intel_crtc *crtc);
int (*modeset_calc_cdclk)(struct intel_atomic_state *state);
+ u8 (*calc_voltage_level)(int cdclk);
/* Returns the active state of the crtc, and if the crtc is active,
* fills out the pipe-config with the hw state. */
bool (*get_pipe_config)(struct intel_crtc *,
@@ -284,7 +294,8 @@ struct drm_i915_display_funcs {
struct intel_atomic_state *old_state);
void (*crtc_disable)(struct intel_crtc_state *old_crtc_state,
struct intel_atomic_state *old_state);
- void (*update_crtcs)(struct intel_atomic_state *state);
+ void (*commit_modeset_enables)(struct intel_atomic_state *state);
+ void (*commit_modeset_disables)(struct intel_atomic_state *state);
void (*audio_codec_enable)(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
@@ -331,6 +342,7 @@ struct intel_csr {
i915_reg_t mmioaddr[20];
u32 mmiodata[20];
u32 dc_state;
+ u32 target_dc_state;
u32 allowed_dc_mask;
intel_wakeref_t wakeref;
};
@@ -479,6 +491,7 @@ struct i915_psr {
bool enabled;
struct intel_dp *dp;
enum pipe pipe;
+ enum transcoder transcoder;
bool active;
struct work_struct work;
unsigned busy_frontbuffer_bits;
@@ -492,6 +505,9 @@ struct i915_psr {
bool sink_not_reliable;
bool irq_aux_error;
u16 su_x_granularity;
+ bool dc3co_enabled;
+ u32 dc3co_exit_delay;
+ struct delayed_work idle_work;
};
#define QUIRK_LVDS_SSC_DISABLE (1<<1)
@@ -529,108 +545,6 @@ struct i915_suspend_saved_registers {
struct vlv_s0ix_state;
-struct intel_rps_ei {
- ktime_t ktime;
- u32 render_c0;
- u32 media_c0;
-};
-
-struct intel_rps {
- struct mutex lock; /* protects enabling and the worker */
-
- /*
- * work, interrupts_enabled and pm_iir are protected by
- * dev_priv->irq_lock
- */
- struct work_struct work;
- bool interrupts_enabled;
- u32 pm_iir;
-
- /* PM interrupt bits that should never be masked */
- u32 pm_intrmsk_mbz;
-
- /* Frequencies are stored in potentially platform dependent multiples.
- * In other words, *_freq needs to be multiplied by X to be interesting.
- * Soft limits are those which are used for the dynamic reclocking done
- * by the driver (raise frequencies under heavy loads, and lower for
- * lighter loads). Hard limits are those imposed by the hardware.
- *
- * A distinction is made for overclocking, which is never enabled by
- * default, and is considered to be above the hard limit if it's
- * possible at all.
- */
- u8 cur_freq; /* Current frequency (cached, may not == HW) */
- u8 min_freq_softlimit; /* Minimum frequency permitted by the driver */
- u8 max_freq_softlimit; /* Max frequency permitted by the driver */
- u8 max_freq; /* Maximum frequency, RP0 if not overclocking */
- u8 min_freq; /* AKA RPn. Minimum frequency */
- u8 boost_freq; /* Frequency to request when wait boosting */
- u8 idle_freq; /* Frequency to request when we are idle */
- u8 efficient_freq; /* AKA RPe. Pre-determined balanced frequency */
- u8 rp1_freq; /* "less than" RP0 power/freqency */
- u8 rp0_freq; /* Non-overclocked max frequency. */
- u16 gpll_ref_freq; /* vlv/chv GPLL reference frequency */
-
- int last_adj;
-
- struct {
- struct mutex mutex;
-
- enum { LOW_POWER, BETWEEN, HIGH_POWER } mode;
- unsigned int interactive;
-
- u8 up_threshold; /* Current %busy required to uplock */
- u8 down_threshold; /* Current %busy required to downclock */
- } power;
-
- bool enabled;
- atomic_t num_waiters;
- atomic_t boosts;
-
- /* manual wa residency calculations */
- struct intel_rps_ei ei;
-};
-
-struct intel_rc6 {
- bool enabled;
- bool ctx_corrupted;
- intel_wakeref_t ctx_corrupted_wakeref;
- u64 prev_hw_residency[4];
- u64 cur_residency[4];
-};
-
-struct intel_llc_pstate {
- bool enabled;
-};
-
-struct intel_gen6_power_mgmt {
- struct intel_rps rps;
- struct intel_rc6 rc6;
- struct intel_llc_pstate llc_pstate;
-};
-
-/* defined intel_pm.c */
-extern spinlock_t mchdev_lock;
-
-struct intel_ilk_power_mgmt {
- u8 cur_delay;
- u8 min_delay;
- u8 max_delay;
- u8 fmax;
- u8 fstart;
-
- u64 last_count1;
- unsigned long last_time1;
- unsigned long chipset_power;
- u64 last_count2;
- u64 last_time2;
- unsigned long gfx_power;
- u8 corr;
-
- int c_m;
- int r_t;
-};
-
#define MAX_L3_SLICES 2
struct intel_l3_parity {
u32 *remap_info[MAX_L3_SLICES];
@@ -679,6 +593,8 @@ struct i915_gem_mm {
*/
struct vfsmount *gemfs;
+ struct intel_memory_region *regions[INTEL_REGION_UNKNOWN];
+
struct notifier_block oom_notifier;
struct notifier_block vmap_notifier;
struct shrinker shrinker;
@@ -690,11 +606,6 @@ struct i915_gem_mm {
*/
struct workqueue_struct *userptr_wq;
- /** Bit 6 swizzling required for X tiling */
- u32 bit_6_swizzle_x;
- /** Bit 6 swizzling required for Y tiling */
- u32 bit_6_swizzle_y;
-
/* shrinker accounting, also useful for userland debugging */
u64 shrink_memory;
u32 shrink_count;
@@ -975,305 +886,6 @@ struct intel_wm_config {
bool sprites_scaled;
};
-struct i915_oa_format {
- u32 format;
- int size;
-};
-
-struct i915_oa_reg {
- i915_reg_t addr;
- u32 value;
-};
-
-struct i915_oa_config {
- char uuid[UUID_STRING_LEN + 1];
- int id;
-
- const struct i915_oa_reg *mux_regs;
- u32 mux_regs_len;
- const struct i915_oa_reg *b_counter_regs;
- u32 b_counter_regs_len;
- const struct i915_oa_reg *flex_regs;
- u32 flex_regs_len;
-
- struct attribute_group sysfs_metric;
- struct attribute *attrs[2];
- struct device_attribute sysfs_metric_id;
-
- atomic_t ref_count;
-};
-
-struct i915_perf_stream;
-
-/**
- * struct i915_perf_stream_ops - the OPs to support a specific stream type
- */
-struct i915_perf_stream_ops {
- /**
- * @enable: Enables the collection of HW samples, either in response to
- * `I915_PERF_IOCTL_ENABLE` or implicitly called when stream is opened
- * without `I915_PERF_FLAG_DISABLED`.
- */
- void (*enable)(struct i915_perf_stream *stream);
-
- /**
- * @disable: Disables the collection of HW samples, either in response
- * to `I915_PERF_IOCTL_DISABLE` or implicitly called before destroying
- * the stream.
- */
- void (*disable)(struct i915_perf_stream *stream);
-
- /**
- * @poll_wait: Call poll_wait, passing a wait queue that will be woken
- * once there is something ready to read() for the stream
- */
- void (*poll_wait)(struct i915_perf_stream *stream,
- struct file *file,
- poll_table *wait);
-
- /**
- * @wait_unlocked: For handling a blocking read, wait until there is
- * something to ready to read() for the stream. E.g. wait on the same
- * wait queue that would be passed to poll_wait().
- */
- int (*wait_unlocked)(struct i915_perf_stream *stream);
-
- /**
- * @read: Copy buffered metrics as records to userspace
- * **buf**: the userspace, destination buffer
- * **count**: the number of bytes to copy, requested by userspace
- * **offset**: zero at the start of the read, updated as the read
- * proceeds, it represents how many bytes have been copied so far and
- * the buffer offset for copying the next record.
- *
- * Copy as many buffered i915 perf samples and records for this stream
- * to userspace as will fit in the given buffer.
- *
- * Only write complete records; returning -%ENOSPC if there isn't room
- * for a complete record.
- *
- * Return any error condition that results in a short read such as
- * -%ENOSPC or -%EFAULT, even though these may be squashed before
- * returning to userspace.
- */
- int (*read)(struct i915_perf_stream *stream,
- char __user *buf,
- size_t count,
- size_t *offset);
-
- /**
- * @destroy: Cleanup any stream specific resources.
- *
- * The stream will always be disabled before this is called.
- */
- void (*destroy)(struct i915_perf_stream *stream);
-};
-
-/**
- * struct i915_perf_stream - state for a single open stream FD
- */
-struct i915_perf_stream {
- /**
- * @dev_priv: i915 drm device
- */
- struct drm_i915_private *dev_priv;
-
- /**
- * @link: Links the stream into ``&drm_i915_private->streams``
- */
- struct list_head link;
-
- /**
- * @wakeref: As we keep the device awake while the perf stream is
- * active, we track our runtime pm reference for later release.
- */
- intel_wakeref_t wakeref;
-
- /**
- * @sample_flags: Flags representing the `DRM_I915_PERF_PROP_SAMPLE_*`
- * properties given when opening a stream, representing the contents
- * of a single sample as read() by userspace.
- */
- u32 sample_flags;
-
- /**
- * @sample_size: Considering the configured contents of a sample
- * combined with the required header size, this is the total size
- * of a single sample record.
- */
- int sample_size;
-
- /**
- * @ctx: %NULL if measuring system-wide across all contexts or a
- * specific context that is being monitored.
- */
- struct i915_gem_context *ctx;
-
- /**
- * @enabled: Whether the stream is currently enabled, considering
- * whether the stream was opened in a disabled state and based
- * on `I915_PERF_IOCTL_ENABLE` and `I915_PERF_IOCTL_DISABLE` calls.
- */
- bool enabled;
-
- /**
- * @ops: The callbacks providing the implementation of this specific
- * type of configured stream.
- */
- const struct i915_perf_stream_ops *ops;
-
- /**
- * @oa_config: The OA configuration used by the stream.
- */
- struct i915_oa_config *oa_config;
-
- /**
- * The OA context specific information.
- */
- struct intel_context *pinned_ctx;
- u32 specific_ctx_id;
- u32 specific_ctx_id_mask;
-
- struct hrtimer poll_check_timer;
- wait_queue_head_t poll_wq;
- bool pollin;
-
- bool periodic;
- int period_exponent;
-
- /**
- * State of the OA buffer.
- */
- struct {
- struct i915_vma *vma;
- u8 *vaddr;
- u32 last_ctx_id;
- int format;
- int format_size;
- int size_exponent;
-
- /**
- * Locks reads and writes to all head/tail state
- *
- * Consider: the head and tail pointer state needs to be read
- * consistently from a hrtimer callback (atomic context) and
- * read() fop (user context) with tail pointer updates happening
- * in atomic context and head updates in user context and the
- * (unlikely) possibility of read() errors needing to reset all
- * head/tail state.
- *
- * Note: Contention/performance aren't currently a significant
- * concern here considering the relatively low frequency of
- * hrtimer callbacks (5ms period) and that reads typically only
- * happen in response to a hrtimer event and likely complete
- * before the next callback.
- *
- * Note: This lock is not held *while* reading and copying data
- * to userspace so the value of head observed in htrimer
- * callbacks won't represent any partial consumption of data.
- */
- spinlock_t ptr_lock;
-
- /**
- * One 'aging' tail pointer and one 'aged' tail pointer ready to
- * used for reading.
- *
- * Initial values of 0xffffffff are invalid and imply that an
- * update is required (and should be ignored by an attempted
- * read)
- */
- struct {
- u32 offset;
- } tails[2];
-
- /**
- * Index for the aged tail ready to read() data up to.
- */
- unsigned int aged_tail_idx;
-
- /**
- * A monotonic timestamp for when the current aging tail pointer
- * was read; used to determine when it is old enough to trust.
- */
- u64 aging_timestamp;
-
- /**
- * Although we can always read back the head pointer register,
- * we prefer to avoid trusting the HW state, just to avoid any
- * risk that some hardware condition could * somehow bump the
- * head pointer unpredictably and cause us to forward the wrong
- * OA buffer data to userspace.
- */
- u32 head;
- } oa_buffer;
-};
-
-/**
- * struct i915_oa_ops - Gen specific implementation of an OA unit stream
- */
-struct i915_oa_ops {
- /**
- * @is_valid_b_counter_reg: Validates register's address for
- * programming boolean counters for a particular platform.
- */
- bool (*is_valid_b_counter_reg)(struct drm_i915_private *dev_priv,
- u32 addr);
-
- /**
- * @is_valid_mux_reg: Validates register's address for programming mux
- * for a particular platform.
- */
- bool (*is_valid_mux_reg)(struct drm_i915_private *dev_priv, u32 addr);
-
- /**
- * @is_valid_flex_reg: Validates register's address for programming
- * flex EU filtering for a particular platform.
- */
- bool (*is_valid_flex_reg)(struct drm_i915_private *dev_priv, u32 addr);
-
- /**
- * @enable_metric_set: Selects and applies any MUX configuration to set
- * up the Boolean and Custom (B/C) counters that are part of the
- * counter reports being sampled. May apply system constraints such as
- * disabling EU clock gating as required.
- */
- int (*enable_metric_set)(struct i915_perf_stream *stream);
-
- /**
- * @disable_metric_set: Remove system constraints associated with using
- * the OA unit.
- */
- void (*disable_metric_set)(struct i915_perf_stream *stream);
-
- /**
- * @oa_enable: Enable periodic sampling
- */
- void (*oa_enable)(struct i915_perf_stream *stream);
-
- /**
- * @oa_disable: Disable periodic sampling
- */
- void (*oa_disable)(struct i915_perf_stream *stream);
-
- /**
- * @read: Copy data from the circular OA buffer into a given userspace
- * buffer.
- */
- int (*read)(struct i915_perf_stream *stream,
- char __user *buf,
- size_t count,
- size_t *offset);
-
- /**
- * @oa_hw_tail_read: read the OA tail pointer register
- *
- * In particular this enables us to share all the fiddly code for
- * handling the OA unit tail pointer race that affects multiple
- * generations.
- */
- u32 (*oa_hw_tail_read)(struct i915_perf_stream *stream);
-};
-
struct intel_cdclk_state {
unsigned int cdclk, vco, ref, bypass;
u8 voltage_level;
@@ -1333,11 +945,11 @@ struct drm_i915_private {
*/
u32 gpio_mmio_base;
+ u32 hsw_psr_mmio_adjust;
+
/* MMIO base address for MIPI regs */
u32 mipi_mmio_base;
- u32 psr_mmio_base;
-
u32 pps_mmio_base;
wait_queue_head_t gmbus_wait_queue;
@@ -1369,7 +981,6 @@ struct drm_i915_private {
u32 irq_mask;
u32 de_irq_mask[I915_MAX_PIPES];
};
- u32 pm_rps_events;
u32 pipestat_irq_mask[I915_MAX_PIPES];
struct i915_hotplug hotplug;
@@ -1399,13 +1010,14 @@ struct drm_i915_private {
unsigned int fdi_pll_freq;
unsigned int czclk_freq;
+ /*
+ * For reading holding any crtc lock is sufficient,
+ * for writing must hold all of them.
+ */
struct {
/*
* The current logical cdclk state.
* See intel_atomic_state.cdclk.logical
- *
- * For reading holding any crtc lock is sufficient,
- * for writing must hold all of them.
*/
struct intel_cdclk_state logical;
/*
@@ -1416,6 +1028,9 @@ struct drm_i915_private {
/* The current hardware cdclk state */
struct intel_cdclk_state hw;
+ /* cdclk, divider, and ratio table from bspec */
+ const struct intel_cdclk_vals *table;
+
int force_min_cdclk;
} cdclk;
@@ -1430,6 +1045,8 @@ struct drm_i915_private {
/* ordered wq for modesets */
struct workqueue_struct *modeset_wq;
+ /* unbound hipri wq for page flips/plane updates */
+ struct workqueue_struct *flip_wq;
/* Display functions */
struct drm_i915_display_funcs display;
@@ -1470,7 +1087,11 @@ struct drm_i915_private {
*/
struct mutex dpll_lock;
- unsigned int active_crtcs;
+ /*
+ * For reading active_pipes, min_cdclk, min_voltage_level holding
+ * any crtc lock is sufficient, for writing must hold all of them.
+ */
+ u8 active_pipes;
/* minimum acceptable cdclk for each pipe */
int min_cdclk[I915_MAX_PIPES];
/* minimum acceptable voltage level for each pipe */
@@ -1499,13 +1120,6 @@ struct drm_i915_private {
*/
u32 edram_size_mb;
- /* gen6+ GT PM state */
- struct intel_gen6_power_mgmt gt_pm;
-
- /* ilk-only ips/rps state. Everything in here is protected by the global
- * mchdev_lock in intel_pm.c */
- struct intel_ilk_power_mgmt ips;
-
struct i915_power_domains power_domains;
struct i915_psr psr;
@@ -1530,25 +1144,7 @@ struct drm_i915_private {
*/
struct mutex av_mutex;
int audio_power_refcount;
-
- struct {
- struct mutex mutex;
- struct list_head list;
- struct llist_head free_list;
- struct work_struct free_work;
-
- /* The hw wants to have a stable context identifier for the
- * lifetime of the context (for OA, PASID, faults, etc).
- * This is limited in execlists to 21 bits.
- */
- struct ida hw_ida;
-#define MAX_CONTEXT_HW_ID (1<<21) /* exclusive */
-#define MAX_GUC_CONTEXT_HW_ID (1 << 20) /* exclusive */
-#define GEN11_MAX_CONTEXT_HW_ID (1<<11) /* exclusive */
-/* in Gen12 ID 0x7FF is reserved to indicate idle */
-#define GEN12_MAX_CONTEXT_HW_ID (GEN11_MAX_CONTEXT_HW_ID - 1)
- struct list_head hw_id_list;
- } contexts;
+ u32 audio_freq_cntrl;
u32 fdi_rx_config;
@@ -1574,6 +1170,8 @@ struct drm_i915_private {
I915_SAGV_NOT_CONTROLLED
} sagv_status;
+ u32 sagv_block_time_us;
+
struct {
/*
* Raw watermark latency values:
@@ -1644,61 +1242,7 @@ struct drm_i915_private {
struct intel_runtime_pm runtime_pm;
- struct {
- bool initialized;
-
- struct kobject *metrics_kobj;
- struct ctl_table_header *sysctl_header;
-
- /*
- * Lock associated with adding/modifying/removing OA configs
- * in dev_priv->perf.metrics_idr.
- */
- struct mutex metrics_lock;
-
- /*
- * List of dynamic configurations, you need to hold
- * dev_priv->perf.metrics_lock to access it.
- */
- struct idr metrics_idr;
-
- /*
- * Lock associated with anything below within this structure
- * except exclusive_stream.
- */
- struct mutex lock;
- struct list_head streams;
-
- /*
- * The stream currently using the OA unit. If accessed
- * outside a syscall associated to its file
- * descriptor, you need to hold
- * dev_priv->drm.struct_mutex.
- */
- struct i915_perf_stream *exclusive_stream;
-
- /**
- * For rate limiting any notifications of spurious
- * invalid OA reports
- */
- struct ratelimit_state spurious_report_rs;
-
- struct i915_oa_config test_config;
-
- u32 gen7_latched_oastatus1;
- u32 ctx_oactxctrl_offset;
- u32 ctx_flexeu0_offset;
-
- /**
- * The RPT_ID/reason field for Gen8+ includes a bit
- * to determine if the CTX ID in the report is valid
- * but the specific bit differs between Gen 8 and 9
- */
- u32 gen8_valid_ctx_bit;
-
- struct i915_oa_ops ops;
- const struct i915_oa_format *oa_formats;
- } perf;
+ struct i915_perf perf;
/* Abstract the submission mechanism (legacy ringbuffer or execlists) away */
struct intel_gt gt;
@@ -1706,34 +1250,19 @@ struct drm_i915_private {
struct {
struct notifier_block pm_notifier;
- /**
- * We leave the user IRQ off as much as possible,
- * but this means that requests will finish and never
- * be retired once the system goes idle. Set a timer to
- * fire periodically while the ring is running. When it
- * fires, go retire requests.
- */
- struct delayed_work retire_work;
-
- /**
- * When we detect an idle GPU, we want to turn on
- * powersaving features. So once we see that there
- * are no more requests outstanding and no more
- * arrive within a small period of time, we fire
- * off the idle_work.
- */
- struct work_struct idle_work;
+ struct i915_gem_contexts {
+ spinlock_t lock; /* locks list */
+ struct list_head list;
+
+ struct llist_head free_list;
+ struct work_struct free_work;
+ } contexts;
} gem;
u8 pch_ssc_use;
- /* For i945gm vblank irq vs. C3 workaround */
- struct {
- struct work_struct work;
- struct pm_qos_request pm_qos;
- u8 c3_disable_latency;
- u8 enabled;
- } i945gm_vblank;
+ /* For i915gm/i945gm vblank irq workaround */
+ u8 vblank_enabled;
/* perform PHY state sanity checks? */
bool chv_phy_assert[2];
@@ -1796,10 +1325,10 @@ static inline struct drm_i915_private *pdev_to_i915(struct pci_dev *pdev)
for_each_if ((engine__) = (dev_priv__)->engine[(id__)])
/* Iterator over subset of engines selected by mask */
-#define for_each_engine_masked(engine__, dev_priv__, mask__, tmp__) \
- for ((tmp__) = (mask__) & INTEL_INFO(dev_priv__)->engine_mask; \
+#define for_each_engine_masked(engine__, gt__, mask__, tmp__) \
+ for ((tmp__) = (mask__) & INTEL_INFO((gt__)->i915)->engine_mask; \
(tmp__) ? \
- ((engine__) = (dev_priv__)->engine[__mask_next_bit(tmp__)]), 1 : \
+ ((engine__) = (gt__)->engine[__mask_next_bit(tmp__)]), 1 : \
0;)
#define rb_to_uabi_engine(rb) \
@@ -1855,6 +1384,8 @@ static inline struct drm_i915_private *pdev_to_i915(struct pci_dev *pdev)
(BUILD_BUG_ON_ZERO(!__builtin_constant_p(n)) + \
INTEL_INFO(dev_priv)->gen == (n))
+#define HAS_DSB(dev_priv) (INTEL_INFO(dev_priv)->display.has_dsb)
+
/*
* Return true if revision is in range [since,until] inclusive.
*
@@ -1926,6 +1457,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
}
#define IS_MOBILE(dev_priv) (INTEL_INFO(dev_priv)->is_mobile)
+#define IS_DGFX(dev_priv) (INTEL_INFO(dev_priv)->is_dgfx)
#define IS_I830(dev_priv) IS_PLATFORM(dev_priv, INTEL_I830)
#define IS_I845G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I845G)
@@ -2060,6 +1592,11 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define IS_ICL_REVID(p, since, until) \
(IS_ICELAKE(p) && IS_REVID(p, since, until))
+#define TGL_REVID_A0 0x0
+
+#define IS_TGL_REVID(p, since, until) \
+ (IS_TIGERLAKE(p) && IS_REVID(p, since, until))
+
#define IS_LP(dev_priv) (INTEL_INFO(dev_priv)->is_lp)
#define IS_GEN9_LP(dev_priv) (IS_GEN(dev_priv, 9) && IS_LP(dev_priv))
#define IS_GEN9_BC(dev_priv) (IS_GEN(dev_priv, 9) && !IS_LP(dev_priv))
@@ -2166,6 +1703,9 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define HAS_IPC(dev_priv) (INTEL_INFO(dev_priv)->display.has_ipc)
+#define HAS_REGION(i915, i) (INTEL_INFO(i915)->memory_regions & (i))
+#define HAS_LMEM(i915) HAS_REGION(i915, REGION_LMEM)
+
#define HAS_GT_UC(dev_priv) (INTEL_INFO(dev_priv)->has_gt_uc)
/* Having GuC is not the same as using GuC */
@@ -2189,7 +1729,12 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define GT_FREQUENCY_MULTIPLIER 50
#define GEN9_FREQ_SCALER 3
-#define HAS_DISPLAY(dev_priv) (INTEL_INFO(dev_priv)->num_pipes > 0)
+#define INTEL_NUM_PIPES(dev_priv) (hweight8(INTEL_INFO(dev_priv)->pipe_mask))
+
+#define HAS_DISPLAY(dev_priv) (INTEL_INFO(dev_priv)->pipe_mask != 0)
+
+/* Only valid when HAS_DISPLAY() is true */
+#define INTEL_DISPLAY_ENABLED(dev_priv) (WARN_ON(!HAS_DISPLAY(dev_priv)), !i915_modparams.disable_display)
static inline bool intel_vtd_active(void)
{
@@ -2222,7 +1767,9 @@ extern const struct dev_pm_ops i915_pm_ops;
int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
void i915_driver_remove(struct drm_i915_private *i915);
-void intel_engine_init_hangcheck(struct intel_engine_cs *engine);
+int i915_resume_switcheroo(struct drm_i915_private *i915);
+int i915_suspend_switcheroo(struct drm_i915_private *i915, pm_message_t state);
+
int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on);
static inline bool intel_gvt_active(struct drm_i915_private *dev_priv)
@@ -2241,12 +1788,13 @@ int i915_getparam_ioctl(struct drm_device *dev, void *data,
/* i915_gem.c */
int i915_gem_init_userptr(struct drm_i915_private *dev_priv);
void i915_gem_cleanup_userptr(struct drm_i915_private *dev_priv);
-void i915_gem_sanitize(struct drm_i915_private *i915);
-int i915_gem_init_early(struct drm_i915_private *dev_priv);
+void i915_gem_init_early(struct drm_i915_private *dev_priv);
void i915_gem_cleanup_early(struct drm_i915_private *dev_priv);
int i915_gem_freeze(struct drm_i915_private *dev_priv);
int i915_gem_freeze_late(struct drm_i915_private *dev_priv);
+struct intel_memory_region *i915_gem_shmem_setup(struct drm_i915_private *i915);
+
static inline void i915_gem_drain_freed_objects(struct drm_i915_private *i915)
{
/*
@@ -2331,15 +1879,11 @@ static inline u32 i915_reset_engine_count(struct i915_gpu_error *error,
return atomic_read(&error->reset_engine_count[engine->uabi_class]);
}
-void i915_gem_init_mmio(struct drm_i915_private *i915);
int __must_check i915_gem_init(struct drm_i915_private *dev_priv);
-int __must_check i915_gem_init_hw(struct drm_i915_private *dev_priv);
void i915_gem_driver_register(struct drm_i915_private *i915);
void i915_gem_driver_unregister(struct drm_i915_private *i915);
void i915_gem_driver_remove(struct drm_i915_private *dev_priv);
void i915_gem_driver_release(struct drm_i915_private *dev_priv);
-int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv,
- unsigned int flags, long timeout);
void i915_gem_suspend(struct drm_i915_private *dev_priv);
void i915_gem_suspend_late(struct drm_i915_private *dev_priv);
void i915_gem_resume(struct drm_i915_private *dev_priv);
@@ -2379,7 +1923,7 @@ i915_gem_context_lookup(struct drm_i915_file_private *file_priv, u32 id)
/* i915_gem_evict.c */
int __must_check i915_gem_evict_something(struct i915_address_space *vm,
u64 min_size, u64 alignment,
- unsigned cache_level,
+ unsigned long color,
u64 start, u64 end,
unsigned flags);
int __must_check i915_gem_evict_for_node(struct i915_address_space *vm,
@@ -2395,9 +1939,9 @@ i915_gem_object_create_internal(struct drm_i915_private *dev_priv,
/* i915_gem_tiling.c */
static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
{
- struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
- return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
+ return i915->ggtt.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
i915_gem_object_is_tiled(obj);
}
@@ -2501,4 +2045,10 @@ i915_coherent_map_type(struct drm_i915_private *i915)
return HAS_LLC(i915) ? I915_MAP_WB : I915_MAP_WC;
}
+static inline bool intel_guc_submission_is_enabled(struct intel_guc *guc)
+{
+ return intel_guc_is_submission_supported(guc) &&
+ intel_guc_is_running(guc);
+}
+
#endif
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 98305d987ac1..b9eb6b3149b7 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -45,13 +45,14 @@
#include "gem/i915_gem_context.h"
#include "gem/i915_gem_ioctls.h"
#include "gem/i915_gem_pm.h"
-#include "gem/i915_gemfs.h"
#include "gt/intel_engine_user.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_pm.h"
+#include "gt/intel_gt_requests.h"
#include "gt/intel_mocs.h"
#include "gt/intel_reset.h"
#include "gt/intel_renderstate.h"
+#include "gt/intel_rps.h"
#include "gt/intel_workarounds.h"
#include "i915_drv.h"
@@ -62,20 +63,31 @@
#include "intel_pm.h"
static int
-insert_mappable_node(struct i915_ggtt *ggtt,
- struct drm_mm_node *node, u32 size)
+insert_mappable_node(struct i915_ggtt *ggtt, struct drm_mm_node *node, u32 size)
{
+ int err;
+
+ err = mutex_lock_interruptible(&ggtt->vm.mutex);
+ if (err)
+ return err;
+
memset(node, 0, sizeof(*node));
- return drm_mm_insert_node_in_range(&ggtt->vm.mm, node,
- size, 0, I915_COLOR_UNEVICTABLE,
- 0, ggtt->mappable_end,
- DRM_MM_INSERT_LOW);
+ err = drm_mm_insert_node_in_range(&ggtt->vm.mm, node,
+ size, 0, I915_COLOR_UNEVICTABLE,
+ 0, ggtt->mappable_end,
+ DRM_MM_INSERT_LOW);
+
+ mutex_unlock(&ggtt->vm.mutex);
+
+ return err;
}
static void
-remove_mappable_node(struct drm_mm_node *node)
+remove_mappable_node(struct i915_ggtt *ggtt, struct drm_mm_node *node)
{
+ mutex_lock(&ggtt->vm.mutex);
drm_mm_remove_node(node);
+ mutex_unlock(&ggtt->vm.mutex);
}
int
@@ -87,7 +99,8 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
struct i915_vma *vma;
u64 pinned;
- mutex_lock(&ggtt->vm.mutex);
+ if (mutex_lock_interruptible(&ggtt->vm.mutex))
+ return -EINTR;
pinned = ggtt->vm.reserved;
list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link)
@@ -109,20 +122,24 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
LIST_HEAD(still_in_list);
int ret = 0;
- lockdep_assert_held(&obj->base.dev->struct_mutex);
-
spin_lock(&obj->vma.lock);
while (!ret && (vma = list_first_entry_or_null(&obj->vma.list,
struct i915_vma,
obj_link))) {
+ struct i915_address_space *vm = vma->vm;
+
+ ret = -EBUSY;
+ if (!i915_vm_tryopen(vm))
+ break;
+
list_move_tail(&vma->obj_link, &still_in_list);
spin_unlock(&obj->vma.lock);
- ret = -EBUSY;
if (flags & I915_GEM_OBJECT_UNBIND_ACTIVE ||
!i915_vma_is_active(vma))
ret = i915_vma_unbind(vma);
+ i915_vm_close(vm);
spin_lock(&obj->vma.lock);
}
list_splice(&still_in_list, &obj->vma.list);
@@ -338,10 +355,6 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
u64 remain, offset;
int ret;
- ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
- if (ret)
- return ret;
-
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
vma = ERR_PTR(-ENODEV);
if (!i915_gem_object_is_tiled(obj))
@@ -351,16 +364,14 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
PIN_NOEVICT);
if (!IS_ERR(vma)) {
node.start = i915_ggtt_offset(vma);
- node.allocated = false;
+ node.flags = 0;
} else {
ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
if (ret)
- goto out_unlock;
- GEM_BUG_ON(!node.allocated);
+ goto out_rpm;
+ GEM_BUG_ON(!drm_mm_node_allocated(&node));
}
- mutex_unlock(&i915->drm.struct_mutex);
-
ret = i915_gem_object_lock_interruptible(obj);
if (ret)
goto out_unpin;
@@ -393,7 +404,7 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
unsigned page_offset = offset_in_page(offset);
unsigned page_length = PAGE_SIZE - page_offset;
page_length = remain < page_length ? remain : page_length;
- if (node.allocated) {
+ if (drm_mm_node_allocated(&node)) {
ggtt->vm.insert_page(&ggtt->vm,
i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
node.start, I915_CACHE_NONE, 0);
@@ -414,17 +425,14 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
i915_gem_object_unlock_fence(obj, fence);
out_unpin:
- mutex_lock(&i915->drm.struct_mutex);
- if (node.allocated) {
+ if (drm_mm_node_allocated(&node)) {
ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
- remove_mappable_node(&node);
+ remove_mappable_node(ggtt, &node);
} else {
i915_vma_unpin(vma);
}
-out_unlock:
+out_rpm:
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
-
return ret;
}
@@ -531,10 +539,6 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
void __user *user_data;
int ret;
- ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
- if (ret)
- return ret;
-
if (i915_gem_object_has_struct_page(obj)) {
/*
* Avoid waking the device up if we can fallback, as
@@ -544,10 +548,8 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
* using the cache bypass of indirect GGTT access.
*/
wakeref = intel_runtime_pm_get_if_in_use(rpm);
- if (!wakeref) {
- ret = -EFAULT;
- goto out_unlock;
- }
+ if (!wakeref)
+ return -EFAULT;
} else {
/* No backing pages, no fallback, we must force GGTT access */
wakeref = intel_runtime_pm_get(rpm);
@@ -561,16 +563,14 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
PIN_NOEVICT);
if (!IS_ERR(vma)) {
node.start = i915_ggtt_offset(vma);
- node.allocated = false;
+ node.flags = 0;
} else {
ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
if (ret)
goto out_rpm;
- GEM_BUG_ON(!node.allocated);
+ GEM_BUG_ON(!drm_mm_node_allocated(&node));
}
- mutex_unlock(&i915->drm.struct_mutex);
-
ret = i915_gem_object_lock_interruptible(obj);
if (ret)
goto out_unpin;
@@ -604,7 +604,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
unsigned int page_offset = offset_in_page(offset);
unsigned int page_length = PAGE_SIZE - page_offset;
page_length = remain < page_length ? remain : page_length;
- if (node.allocated) {
+ if (drm_mm_node_allocated(&node)) {
/* flush the write before we modify the GGTT */
intel_gt_flush_ggtt_writes(ggtt->vm.gt);
ggtt->vm.insert_page(&ggtt->vm,
@@ -634,18 +634,15 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
i915_gem_object_unlock_fence(obj, fence);
out_unpin:
- mutex_lock(&i915->drm.struct_mutex);
intel_gt_flush_ggtt_writes(ggtt->vm.gt);
- if (node.allocated) {
+ if (drm_mm_node_allocated(&node)) {
ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
- remove_mappable_node(&node);
+ remove_mappable_node(ggtt, &node);
} else {
i915_vma_unpin(vma);
}
out_rpm:
intel_runtime_pm_put(rpm, wakeref);
-out_unlock:
- mutex_unlock(&i915->drm.struct_mutex);
return ret;
}
@@ -887,74 +884,6 @@ void i915_gem_runtime_suspend(struct drm_i915_private *i915)
}
}
-static long
-wait_for_timelines(struct drm_i915_private *i915,
- unsigned int wait, long timeout)
-{
- struct intel_gt_timelines *timelines = &i915->gt.timelines;
- struct intel_timeline *tl;
- unsigned long flags;
-
- spin_lock_irqsave(&timelines->lock, flags);
- list_for_each_entry(tl, &timelines->active_list, link) {
- struct i915_request *rq;
-
- rq = i915_active_request_get_unlocked(&tl->last_request);
- if (!rq)
- continue;
-
- spin_unlock_irqrestore(&timelines->lock, flags);
-
- /*
- * "Race-to-idle".
- *
- * Switching to the kernel context is often used a synchronous
- * step prior to idling, e.g. in suspend for flushing all
- * current operations to memory before sleeping. These we
- * want to complete as quickly as possible to avoid prolonged
- * stalls, so allow the gpu to boost to maximum clocks.
- */
- if (wait & I915_WAIT_FOR_IDLE_BOOST)
- gen6_rps_boost(rq);
-
- timeout = i915_request_wait(rq, wait, timeout);
- i915_request_put(rq);
- if (timeout < 0)
- return timeout;
-
- /* restart after reacquiring the lock */
- spin_lock_irqsave(&timelines->lock, flags);
- tl = list_entry(&timelines->active_list, typeof(*tl), link);
- }
- spin_unlock_irqrestore(&timelines->lock, flags);
-
- return timeout;
-}
-
-int i915_gem_wait_for_idle(struct drm_i915_private *i915,
- unsigned int flags, long timeout)
-{
- /* If the device is asleep, we have no requests outstanding */
- if (!intel_gt_pm_is_awake(&i915->gt))
- return 0;
-
- GEM_TRACE("flags=%x (%s), timeout=%ld%s\n",
- flags, flags & I915_WAIT_LOCKED ? "locked" : "unlocked",
- timeout, timeout == MAX_SCHEDULE_TIMEOUT ? " (forever)" : "");
-
- timeout = wait_for_timelines(i915, flags, timeout);
- if (timeout < 0)
- return timeout;
-
- if (flags & I915_WAIT_LOCKED) {
- lockdep_assert_held(&i915->drm.struct_mutex);
-
- i915_retire_requests(i915);
- }
-
- return 0;
-}
-
struct i915_vma *
i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
const struct i915_ggtt_view *view,
@@ -981,8 +910,6 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
struct i915_vma *vma;
int ret;
- lockdep_assert_held(&obj->base.dev->struct_mutex);
-
if (i915_gem_object_never_bind_ggtt(obj))
return ERR_PTR(-ENODEV);
@@ -1032,13 +959,6 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
return ERR_PTR(-ENOSPC);
}
- WARN(i915_vma_is_pinned(vma),
- "bo is already pinned in ggtt with incorrect alignment:"
- " offset=%08x, req.alignment=%llx,"
- " req.map_and_fenceable=%d, vma->map_and_fenceable=%d\n",
- i915_ggtt_offset(vma), alignment,
- !!(flags & PIN_MAPPABLE),
- i915_vma_is_map_and_fenceable(vma));
ret = i915_vma_unbind(vma);
if (ret)
return ERR_PTR(ret);
@@ -1133,128 +1053,7 @@ out:
return err;
}
-void i915_gem_sanitize(struct drm_i915_private *i915)
-{
- intel_wakeref_t wakeref;
-
- GEM_TRACE("\n");
-
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
- intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
-
- /*
- * As we have just resumed the machine and woken the device up from
- * deep PCI sleep (presumably D3_cold), assume the HW has been reset
- * back to defaults, recovering from whatever wedged state we left it
- * in and so worth trying to use the device once more.
- */
- if (intel_gt_is_wedged(&i915->gt))
- intel_gt_unset_wedged(&i915->gt);
-
- /*
- * If we inherit context state from the BIOS or earlier occupants
- * of the GPU, the GPU may be in an inconsistent state when we
- * try to take over. The only way to remove the earlier state
- * is by resetting. However, resetting on earlier gen is tricky as
- * it may impact the display and we are uncertain about the stability
- * of the reset, so this could be applied to even earlier gen.
- */
- intel_gt_sanitize(&i915->gt, false);
-
- intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
-}
-
-static void init_unused_ring(struct intel_gt *gt, u32 base)
-{
- struct intel_uncore *uncore = gt->uncore;
-
- intel_uncore_write(uncore, RING_CTL(base), 0);
- intel_uncore_write(uncore, RING_HEAD(base), 0);
- intel_uncore_write(uncore, RING_TAIL(base), 0);
- intel_uncore_write(uncore, RING_START(base), 0);
-}
-
-static void init_unused_rings(struct intel_gt *gt)
-{
- struct drm_i915_private *i915 = gt->i915;
-
- if (IS_I830(i915)) {
- init_unused_ring(gt, PRB1_BASE);
- init_unused_ring(gt, SRB0_BASE);
- init_unused_ring(gt, SRB1_BASE);
- init_unused_ring(gt, SRB2_BASE);
- init_unused_ring(gt, SRB3_BASE);
- } else if (IS_GEN(i915, 2)) {
- init_unused_ring(gt, SRB0_BASE);
- init_unused_ring(gt, SRB1_BASE);
- } else if (IS_GEN(i915, 3)) {
- init_unused_ring(gt, PRB1_BASE);
- init_unused_ring(gt, PRB2_BASE);
- }
-}
-
-int i915_gem_init_hw(struct drm_i915_private *i915)
-{
- struct intel_uncore *uncore = &i915->uncore;
- struct intel_gt *gt = &i915->gt;
- int ret;
-
- BUG_ON(!i915->kernel_context);
- ret = intel_gt_terminally_wedged(gt);
- if (ret)
- return ret;
-
- gt->last_init_time = ktime_get();
-
- /* Double layer security blanket, see i915_gem_init() */
- intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
-
- if (HAS_EDRAM(i915) && INTEL_GEN(i915) < 9)
- intel_uncore_rmw(uncore, HSW_IDICR, 0, IDIHASHMSK(0xf));
-
- if (IS_HASWELL(i915))
- intel_uncore_write(uncore,
- MI_PREDICATE_RESULT_2,
- IS_HSW_GT3(i915) ?
- LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
-
- /* Apply the GT workarounds... */
- intel_gt_apply_workarounds(gt);
- /* ...and determine whether they are sticking. */
- intel_gt_verify_workarounds(gt, "init");
-
- intel_gt_init_swizzling(gt);
-
- /*
- * At least 830 can leave some of the unused rings
- * "active" (ie. head != tail) after resume which
- * will prevent c3 entry. Makes sure all unused rings
- * are totally idle.
- */
- init_unused_rings(gt);
-
- ret = i915_ppgtt_init_hw(gt);
- if (ret) {
- DRM_ERROR("Enabling PPGTT failed (%d)\n", ret);
- goto out;
- }
-
- /* We can't enable contexts until all firmware is loaded */
- ret = intel_uc_init_hw(&gt->uc);
- if (ret) {
- i915_probe_error(i915, "Enabling uc failed (%d)\n", ret);
- goto out;
- }
-
- intel_mocs_init(gt);
-
-out:
- intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
- return ret;
-}
-
-static int __intel_engines_record_defaults(struct drm_i915_private *i915)
+static int __intel_engines_record_defaults(struct intel_gt *gt)
{
struct i915_request *requests[I915_NUM_ENGINES] = {};
struct intel_engine_cs *engine;
@@ -1270,7 +1069,7 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915)
* from the same default HW values.
*/
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt, id) {
struct intel_context *ce;
struct i915_request *rq;
@@ -1278,7 +1077,8 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915)
GEM_BUG_ON(!engine->kernel_context);
engine->serial++; /* force the kernel context switch */
- ce = intel_context_create(i915->kernel_context, engine);
+ ce = intel_context_create(engine->kernel_context->gem_context,
+ engine);
if (IS_ERR(ce)) {
err = PTR_ERR(ce);
goto out;
@@ -1295,15 +1095,6 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915)
if (err)
goto err_rq;
- /*
- * Failing to program the MOCS is non-fatal.The system will not
- * run at peak performance. So warn the user and carry on.
- */
- err = intel_mocs_emit(rq);
- if (err)
- dev_notice(i915->drm.dev,
- "Failed to program MOCS registers; expect performance issues.\n");
-
err = intel_renderstate_emit(rq);
if (err)
goto err_rq;
@@ -1316,7 +1107,7 @@ err_rq:
}
/* Flush the default context image to memory, and enable powersaving. */
- if (!i915_gem_load_power_context(i915)) {
+ if (intel_gt_wait_for_idle(gt, I915_GEM_IDLE_TIMEOUT) == -ETIME) {
err = -EIO;
goto out;
}
@@ -1375,7 +1166,7 @@ out:
* this is by declaring ourselves wedged.
*/
if (err)
- intel_gt_set_wedged(&i915->gt);
+ intel_gt_set_wedged(gt);
for (id = 0; id < ARRAY_SIZE(requests); id++) {
struct intel_context *ce;
@@ -1392,18 +1183,7 @@ out:
return err;
}
-static int
-i915_gem_init_scratch(struct drm_i915_private *i915, unsigned int size)
-{
- return intel_gt_init_scratch(&i915->gt, size);
-}
-
-static void i915_gem_fini_scratch(struct drm_i915_private *i915)
-{
- intel_gt_fini_scratch(&i915->gt);
-}
-
-static int intel_engines_verify_workarounds(struct drm_i915_private *i915)
+static int intel_engines_verify_workarounds(struct intel_gt *gt)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
@@ -1412,7 +1192,7 @@ static int intel_engines_verify_workarounds(struct drm_i915_private *i915)
if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
return 0;
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt, id) {
if (intel_engine_verify_workarounds(engine, "load"))
err = -EIO;
}
@@ -1444,7 +1224,6 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
* we hold the forcewake during initialisation these problems
* just magically go away.
*/
- mutex_lock(&dev_priv->drm.struct_mutex);
intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
ret = i915_init_ggtt(dev_priv);
@@ -1453,36 +1232,29 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
goto err_unlock;
}
- ret = i915_gem_init_scratch(dev_priv,
- IS_GEN(dev_priv, 2) ? SZ_256K : PAGE_SIZE);
- if (ret) {
- GEM_BUG_ON(ret == -EIO);
- goto err_ggtt;
- }
+ intel_gt_init(&dev_priv->gt);
- ret = intel_engines_setup(dev_priv);
+ ret = intel_engines_setup(&dev_priv->gt);
if (ret) {
GEM_BUG_ON(ret == -EIO);
goto err_unlock;
}
- ret = i915_gem_contexts_init(dev_priv);
+ ret = i915_gem_init_contexts(dev_priv);
if (ret) {
GEM_BUG_ON(ret == -EIO);
goto err_scratch;
}
- ret = intel_engines_init(dev_priv);
+ ret = intel_engines_init(&dev_priv->gt);
if (ret) {
GEM_BUG_ON(ret == -EIO);
goto err_context;
}
- intel_init_gt_powersave(dev_priv);
-
intel_uc_init(&dev_priv->gt.uc);
- ret = i915_gem_init_hw(dev_priv);
+ ret = intel_gt_init_hw(&dev_priv->gt);
if (ret)
goto err_uc_init;
@@ -1502,24 +1274,23 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
*/
intel_init_clock_gating(dev_priv);
- ret = intel_engines_verify_workarounds(dev_priv);
+ ret = intel_engines_verify_workarounds(&dev_priv->gt);
if (ret)
goto err_gt;
- ret = __intel_engines_record_defaults(dev_priv);
+ ret = __intel_engines_record_defaults(&dev_priv->gt);
if (ret)
goto err_gt;
- ret = i915_inject_load_error(dev_priv, -ENODEV);
+ ret = i915_inject_probe_error(dev_priv, -ENODEV);
if (ret)
goto err_gt;
- ret = i915_inject_load_error(dev_priv, -EIO);
+ ret = i915_inject_probe_error(dev_priv, -EIO);
if (ret)
goto err_gt;
intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
- mutex_unlock(&dev_priv->drm.struct_mutex);
return 0;
@@ -1530,32 +1301,25 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
* driver doesn't explode during runtime.
*/
err_gt:
- mutex_unlock(&dev_priv->drm.struct_mutex);
-
- intel_gt_set_wedged(&dev_priv->gt);
+ intel_gt_set_wedged_on_init(&dev_priv->gt);
i915_gem_suspend(dev_priv);
i915_gem_suspend_late(dev_priv);
i915_gem_drain_workqueue(dev_priv);
-
- mutex_lock(&dev_priv->drm.struct_mutex);
err_init_hw:
intel_uc_fini_hw(&dev_priv->gt.uc);
err_uc_init:
if (ret != -EIO) {
intel_uc_fini(&dev_priv->gt.uc);
- intel_cleanup_gt_powersave(dev_priv);
- intel_engines_cleanup(dev_priv);
+ intel_engines_cleanup(&dev_priv->gt);
}
err_context:
if (ret != -EIO)
- i915_gem_contexts_fini(dev_priv);
+ i915_gem_driver_release__contexts(dev_priv);
err_scratch:
- i915_gem_fini_scratch(dev_priv);
-err_ggtt:
+ intel_gt_driver_release(&dev_priv->gt);
err_unlock:
intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
- mutex_unlock(&dev_priv->drm.struct_mutex);
if (ret != -EIO) {
intel_uc_cleanup_firmwares(&dev_priv->gt.uc);
@@ -1564,8 +1328,6 @@ err_unlock:
}
if (ret == -EIO) {
- mutex_lock(&dev_priv->drm.struct_mutex);
-
/*
* Allow engines or uC initialisation to fail by marking the GPU
* as wedged. But we only want to do this when the GPU is angry,
@@ -1580,10 +1342,8 @@ err_unlock:
/* Minimal basic recovery for KMS */
ret = i915_ggtt_enable_hw(dev_priv);
i915_gem_restore_gtt_mappings(dev_priv);
- i915_gem_restore_fences(dev_priv);
+ i915_gem_restore_fences(&dev_priv->ggtt);
intel_init_clock_gating(dev_priv);
-
- mutex_unlock(&dev_priv->drm.struct_mutex);
}
i915_gem_drain_freed_objects(dev_priv);
@@ -1604,48 +1364,35 @@ void i915_gem_driver_unregister(struct drm_i915_private *i915)
void i915_gem_driver_remove(struct drm_i915_private *dev_priv)
{
- GEM_BUG_ON(dev_priv->gt.awake);
-
intel_wakeref_auto_fini(&dev_priv->ggtt.userfault_wakeref);
i915_gem_suspend_late(dev_priv);
- intel_disable_gt_powersave(dev_priv);
+ intel_gt_driver_remove(&dev_priv->gt);
/* Flush any outstanding unpin_work. */
i915_gem_drain_workqueue(dev_priv);
- mutex_lock(&dev_priv->drm.struct_mutex);
intel_uc_fini_hw(&dev_priv->gt.uc);
intel_uc_fini(&dev_priv->gt.uc);
- mutex_unlock(&dev_priv->drm.struct_mutex);
i915_gem_drain_freed_objects(dev_priv);
}
void i915_gem_driver_release(struct drm_i915_private *dev_priv)
{
- mutex_lock(&dev_priv->drm.struct_mutex);
- intel_engines_cleanup(dev_priv);
- i915_gem_contexts_fini(dev_priv);
- i915_gem_fini_scratch(dev_priv);
- mutex_unlock(&dev_priv->drm.struct_mutex);
+ intel_engines_cleanup(&dev_priv->gt);
+ i915_gem_driver_release__contexts(dev_priv);
+ intel_gt_driver_release(&dev_priv->gt);
intel_wa_list_free(&dev_priv->gt_wa_list);
- intel_cleanup_gt_powersave(dev_priv);
-
intel_uc_cleanup_firmwares(&dev_priv->gt.uc);
i915_gem_cleanup_userptr(dev_priv);
intel_timelines_fini(dev_priv);
i915_gem_drain_freed_objects(dev_priv);
- WARN_ON(!list_empty(&dev_priv->contexts.list));
-}
-
-void i915_gem_init_mmio(struct drm_i915_private *i915)
-{
- i915_gem_sanitize(i915);
+ WARN_ON(!list_empty(&dev_priv->gem.contexts.list));
}
static void i915_gem_init__mm(struct drm_i915_private *i915)
@@ -1660,20 +1407,11 @@ static void i915_gem_init__mm(struct drm_i915_private *i915)
i915_gem_init__objects(i915);
}
-int i915_gem_init_early(struct drm_i915_private *dev_priv)
+void i915_gem_init_early(struct drm_i915_private *dev_priv)
{
- int err;
-
i915_gem_init__mm(dev_priv);
- i915_gem_init__pm(dev_priv);
spin_lock_init(&dev_priv->fb_tracking.lock);
-
- err = i915_gemfs_init(dev_priv);
- if (err)
- DRM_NOTE("Unable to create a private tmpfs mount, hugepage support will be disabled(%d).\n", err);
-
- return 0;
}
void i915_gem_cleanup_early(struct drm_i915_private *dev_priv)
@@ -1682,8 +1420,6 @@ void i915_gem_cleanup_early(struct drm_i915_private *dev_priv)
GEM_BUG_ON(!llist_empty(&dev_priv->mm.free_list));
GEM_BUG_ON(atomic_read(&dev_priv->mm.free_count));
WARN_ON(dev_priv->mm.shrink_count);
-
- i915_gemfs_fini(dev_priv);
}
int i915_gem_freeze(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/i915_gem.h b/drivers/gpu/drm/i915/i915_gem.h
index 6795f1daa3d5..f6f9675848b8 100644
--- a/drivers/gpu/drm/i915/i915_gem.h
+++ b/drivers/gpu/drm/i915/i915_gem.h
@@ -37,10 +37,8 @@ struct drm_i915_private;
#define GEM_SHOW_DEBUG() (drm_debug & DRM_UT_DRIVER)
#define GEM_BUG_ON(condition) do { if (unlikely((condition))) { \
- pr_err("%s:%d GEM_BUG_ON(%s)\n", \
- __func__, __LINE__, __stringify(condition)); \
- GEM_TRACE("%s:%d GEM_BUG_ON(%s)\n", \
- __func__, __LINE__, __stringify(condition)); \
+ GEM_TRACE_ERR("%s:%d GEM_BUG_ON(%s)\n", \
+ __func__, __LINE__, __stringify(condition)); \
BUG(); \
} \
} while(0)
@@ -66,11 +64,16 @@ struct drm_i915_private;
#if IS_ENABLED(CONFIG_DRM_I915_TRACE_GEM)
#define GEM_TRACE(...) trace_printk(__VA_ARGS__)
+#define GEM_TRACE_ERR(...) do { \
+ pr_err(__VA_ARGS__); \
+ trace_printk(__VA_ARGS__); \
+} while (0)
#define GEM_TRACE_DUMP() ftrace_dump(DUMP_ALL)
#define GEM_TRACE_DUMP_ON(expr) \
do { if (expr) ftrace_dump(DUMP_ALL); } while (0)
#else
#define GEM_TRACE(...) do { } while (0)
+#define GEM_TRACE_ERR(...) do { } while (0)
#define GEM_TRACE_DUMP() do { } while (0)
#define GEM_TRACE_DUMP_ON(expr) BUILD_BUG_ON_INVALID(expr)
#endif
@@ -83,6 +86,11 @@ static inline void tasklet_lock(struct tasklet_struct *t)
cpu_relax();
}
+static inline bool tasklet_is_locked(const struct tasklet_struct *t)
+{
+ return test_bit(TASKLET_STATE_RUN, &t->state);
+}
+
static inline void __tasklet_disable_sync_once(struct tasklet_struct *t)
{
if (!atomic_fetch_inc(&t->count))
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 52c86c6e0673..7e62c310290f 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -29,6 +29,7 @@
#include <drm/i915_drm.h>
#include "gem/i915_gem_context.h"
+#include "gt/intel_gt_requests.h"
#include "i915_drv.h"
#include "i915_trace.h"
@@ -37,7 +38,7 @@ I915_SELFTEST_DECLARE(static struct igt_evict_ctl {
bool fail_if_busy:1;
} igt_evict_ctl;)
-static int ggtt_flush(struct drm_i915_private *i915)
+static int ggtt_flush(struct intel_gt *gt)
{
/*
* Not everything in the GGTT is tracked via vma (otherwise we
@@ -46,10 +47,7 @@ static int ggtt_flush(struct drm_i915_private *i915)
* the hopes that we can then remove contexts and the like only
* bound by their active reference.
*/
- return i915_gem_wait_for_idle(i915,
- I915_WAIT_INTERRUPTIBLE |
- I915_WAIT_LOCKED,
- MAX_SCHEDULE_TIMEOUT);
+ return intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT);
}
static bool
@@ -70,7 +68,7 @@ mark_free(struct drm_mm_scan *scan,
* @vm: address space to evict from
* @min_size: size of the desired free space
* @alignment: alignment constraint of the desired free space
- * @cache_level: cache_level for the desired space
+ * @color: color for the desired space
* @start: start (inclusive) of the range from which to evict objects
* @end: end (exclusive) of the range from which to evict objects
* @flags: additional flags to control the eviction algorithm
@@ -91,11 +89,10 @@ mark_free(struct drm_mm_scan *scan,
int
i915_gem_evict_something(struct i915_address_space *vm,
u64 min_size, u64 alignment,
- unsigned cache_level,
+ unsigned long color,
u64 start, u64 end,
unsigned flags)
{
- struct drm_i915_private *dev_priv = vm->i915;
struct drm_mm_scan scan;
struct list_head eviction_list;
struct i915_vma *vma, *next;
@@ -104,7 +101,7 @@ i915_gem_evict_something(struct i915_address_space *vm,
struct i915_vma *active;
int ret;
- lockdep_assert_held(&vm->i915->drm.struct_mutex);
+ lockdep_assert_held(&vm->mutex);
trace_i915_gem_evict(vm, min_size, alignment, flags);
/*
@@ -124,17 +121,10 @@ i915_gem_evict_something(struct i915_address_space *vm,
if (flags & PIN_MAPPABLE)
mode = DRM_MM_INSERT_LOW;
drm_mm_scan_init_with_range(&scan, &vm->mm,
- min_size, alignment, cache_level,
+ min_size, alignment, color,
start, end, mode);
- /*
- * Retire before we search the active list. Although we have
- * reasonable accuracy in our retirement lists, we may have
- * a stray pin (preventing eviction) that can only be resolved by
- * retiring.
- */
- if (!(flags & PIN_NONBLOCK))
- i915_retire_requests(dev_priv);
+ intel_gt_retire_requests(vm->gt);
search_again:
active = NULL;
@@ -207,7 +197,7 @@ search_again:
if (I915_SELFTEST_ONLY(igt_evict_ctl.fail_if_busy))
return -EBUSY;
- ret = ggtt_flush(dev_priv);
+ ret = ggtt_flush(vm->gt);
if (ret)
return ret;
@@ -235,12 +225,12 @@ found:
list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
__i915_vma_unpin(vma);
if (ret == 0)
- ret = i915_vma_unbind(vma);
+ ret = __i915_vma_unbind(vma);
}
while (ret == 0 && (node = drm_mm_scan_color_evict(&scan))) {
vma = container_of(node, struct i915_vma, node);
- ret = i915_vma_unbind(vma);
+ ret = __i915_vma_unbind(vma);
}
return ret;
@@ -266,25 +256,23 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
u64 start = target->start;
u64 end = start + target->size;
struct i915_vma *vma, *next;
- bool check_color;
int ret = 0;
- lockdep_assert_held(&vm->i915->drm.struct_mutex);
+ lockdep_assert_held(&vm->mutex);
GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
trace_i915_gem_evict_node(vm, target, flags);
- /* Retire before we search the active list. Although we have
+ /*
+ * Retire before we search the active list. Although we have
* reasonable accuracy in our retirement lists, we may have
* a stray pin (preventing eviction) that can only be resolved by
* retiring.
*/
- if (!(flags & PIN_NONBLOCK))
- i915_retire_requests(vm->i915);
+ intel_gt_retire_requests(vm->gt);
- check_color = vm->mm.color_adjust;
- if (check_color) {
+ if (i915_vm_has_cache_coloring(vm)) {
/* Expand search to cover neighbouring guard pages (or lack!) */
if (start)
start -= I915_GTT_PAGE_SIZE;
@@ -301,7 +289,7 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
break;
}
- GEM_BUG_ON(!node->allocated);
+ GEM_BUG_ON(!drm_mm_node_allocated(node));
vma = container_of(node, typeof(*vma), node);
/* If we are using coloring to insert guard pages between
@@ -310,7 +298,7 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
* abutt and conflict. If they are in conflict, then we evict
* those as well to make room for our guard pages.
*/
- if (check_color) {
+ if (i915_vm_has_cache_coloring(vm)) {
if (node->start + node->size == target->start) {
if (node->color == target->color)
continue;
@@ -351,7 +339,7 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
__i915_vma_unpin(vma);
if (ret == 0)
- ret = i915_vma_unbind(vma);
+ ret = __i915_vma_unbind(vma);
}
return ret;
@@ -375,7 +363,7 @@ int i915_gem_evict_vm(struct i915_address_space *vm)
struct i915_vma *vma, *next;
int ret;
- lockdep_assert_held(&vm->i915->drm.struct_mutex);
+ lockdep_assert_held(&vm->mutex);
trace_i915_gem_evict_vm(vm);
/* Switch back to the default context in order to unpin
@@ -384,13 +372,12 @@ int i915_gem_evict_vm(struct i915_address_space *vm)
* switch otherwise is ineffective.
*/
if (i915_is_ggtt(vm)) {
- ret = ggtt_flush(vm->i915);
+ ret = ggtt_flush(vm->gt);
if (ret)
return ret;
}
INIT_LIST_HEAD(&eviction_list);
- mutex_lock(&vm->mutex);
list_for_each_entry(vma, &vm->bound_list, vm_link) {
if (i915_vma_is_pinned(vma))
continue;
@@ -398,13 +385,12 @@ int i915_gem_evict_vm(struct i915_address_space *vm)
__i915_vma_pin(vma);
list_add(&vma->evict_link, &eviction_list);
}
- mutex_unlock(&vm->mutex);
ret = 0;
list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
__i915_vma_unpin(vma);
if (ret == 0)
- ret = i915_vma_unbind(vma);
+ ret = __i915_vma_unbind(vma);
}
return ret;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.c b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
index 615a9f4ef30c..71efccfde122 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence_reg.c
+++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
@@ -59,6 +59,16 @@
#define pipelined 0
+static struct drm_i915_private *fence_to_i915(struct i915_fence_reg *fence)
+{
+ return fence->ggtt->vm.i915;
+}
+
+static struct intel_uncore *fence_to_uncore(struct i915_fence_reg *fence)
+{
+ return fence->ggtt->vm.gt->uncore;
+}
+
static void i965_write_fence_reg(struct i915_fence_reg *fence,
struct i915_vma *vma)
{
@@ -66,7 +76,7 @@ static void i965_write_fence_reg(struct i915_fence_reg *fence,
int fence_pitch_shift;
u64 val;
- if (INTEL_GEN(fence->i915) >= 6) {
+ if (INTEL_GEN(fence_to_i915(fence)) >= 6) {
fence_reg_lo = FENCE_REG_GEN6_LO(fence->id);
fence_reg_hi = FENCE_REG_GEN6_HI(fence->id);
fence_pitch_shift = GEN6_FENCE_PITCH_SHIFT;
@@ -95,7 +105,7 @@ static void i965_write_fence_reg(struct i915_fence_reg *fence,
}
if (!pipelined) {
- struct intel_uncore *uncore = &fence->i915->uncore;
+ struct intel_uncore *uncore = fence_to_uncore(fence);
/*
* To w/a incoherency with non-atomic 64-bit register updates,
@@ -132,7 +142,7 @@ static void i915_write_fence_reg(struct i915_fence_reg *fence,
GEM_BUG_ON(!is_power_of_2(vma->fence_size));
GEM_BUG_ON(!IS_ALIGNED(vma->node.start, vma->fence_size));
- if (is_y_tiled && HAS_128_BYTE_Y_TILING(fence->i915))
+ if (is_y_tiled && HAS_128_BYTE_Y_TILING(fence_to_i915(fence)))
stride /= 128;
else
stride /= 512;
@@ -148,7 +158,7 @@ static void i915_write_fence_reg(struct i915_fence_reg *fence,
}
if (!pipelined) {
- struct intel_uncore *uncore = &fence->i915->uncore;
+ struct intel_uncore *uncore = fence_to_uncore(fence);
i915_reg_t reg = FENCE_REG(fence->id);
intel_uncore_write_fw(uncore, reg, val);
@@ -180,7 +190,7 @@ static void i830_write_fence_reg(struct i915_fence_reg *fence,
}
if (!pipelined) {
- struct intel_uncore *uncore = &fence->i915->uncore;
+ struct intel_uncore *uncore = fence_to_uncore(fence);
i915_reg_t reg = FENCE_REG(fence->id);
intel_uncore_write_fw(uncore, reg, val);
@@ -191,15 +201,17 @@ static void i830_write_fence_reg(struct i915_fence_reg *fence,
static void fence_write(struct i915_fence_reg *fence,
struct i915_vma *vma)
{
+ struct drm_i915_private *i915 = fence_to_i915(fence);
+
/*
* Previous access through the fence register is marshalled by
* the mb() inside the fault handlers (i915_gem_release_mmaps)
* and explicitly managed for internal users.
*/
- if (IS_GEN(fence->i915, 2))
+ if (IS_GEN(i915, 2))
i830_write_fence_reg(fence, vma);
- else if (IS_GEN(fence->i915, 3))
+ else if (IS_GEN(i915, 3))
i915_write_fence_reg(fence, vma);
else
i965_write_fence_reg(fence, vma);
@@ -215,6 +227,8 @@ static void fence_write(struct i915_fence_reg *fence,
static int fence_update(struct i915_fence_reg *fence,
struct i915_vma *vma)
{
+ struct i915_ggtt *ggtt = fence->ggtt;
+ struct intel_uncore *uncore = fence_to_uncore(fence);
intel_wakeref_t wakeref;
struct i915_vma *old;
int ret;
@@ -230,14 +244,15 @@ static int fence_update(struct i915_fence_reg *fence,
i915_gem_object_get_tiling(vma->obj)))
return -EINVAL;
- ret = i915_active_wait(&vma->active);
+ ret = i915_vma_sync(vma);
if (ret)
return ret;
}
old = xchg(&fence->vma, NULL);
if (old) {
- ret = i915_active_wait(&old->active);
+ /* XXX Ideally we would move the waiting to outside the mutex */
+ ret = i915_vma_sync(old);
if (ret) {
fence->vma = old;
return ret;
@@ -255,7 +270,7 @@ static int fence_update(struct i915_fence_reg *fence,
old->fence = NULL;
}
- list_move(&fence->link, &fence->i915->ggtt.fence_list);
+ list_move(&fence->link, &ggtt->fence_list);
}
/*
@@ -268,7 +283,7 @@ static int fence_update(struct i915_fence_reg *fence,
* be cleared before we can use any other fences to ensure that
* the new fences do not overlap the elided clears, confusing HW.
*/
- wakeref = intel_runtime_pm_get_if_in_use(&fence->i915->runtime_pm);
+ wakeref = intel_runtime_pm_get_if_in_use(uncore->rpm);
if (!wakeref) {
GEM_BUG_ON(vma);
return 0;
@@ -279,10 +294,10 @@ static int fence_update(struct i915_fence_reg *fence,
if (vma) {
vma->fence = fence;
- list_move_tail(&fence->link, &fence->i915->ggtt.fence_list);
+ list_move_tail(&fence->link, &ggtt->fence_list);
}
- intel_runtime_pm_put(&fence->i915->runtime_pm, wakeref);
+ intel_runtime_pm_put(uncore->rpm, wakeref);
return 0;
}
@@ -311,11 +326,11 @@ int i915_vma_revoke_fence(struct i915_vma *vma)
return fence_update(fence, NULL);
}
-static struct i915_fence_reg *fence_find(struct drm_i915_private *i915)
+static struct i915_fence_reg *fence_find(struct i915_ggtt *ggtt)
{
struct i915_fence_reg *fence;
- list_for_each_entry(fence, &i915->ggtt.fence_list, link) {
+ list_for_each_entry(fence, &ggtt->fence_list, link) {
GEM_BUG_ON(fence->vma && fence->vma->fence != fence);
if (atomic_read(&fence->pin_count))
@@ -325,19 +340,21 @@ static struct i915_fence_reg *fence_find(struct drm_i915_private *i915)
}
/* Wait for completion of pending flips which consume fences */
- if (intel_has_pending_fb_unpin(i915))
+ if (intel_has_pending_fb_unpin(ggtt->vm.i915))
return ERR_PTR(-EAGAIN);
return ERR_PTR(-EDEADLK);
}
-static int __i915_vma_pin_fence(struct i915_vma *vma)
+int __i915_vma_pin_fence(struct i915_vma *vma)
{
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vma->vm);
struct i915_fence_reg *fence;
struct i915_vma *set = i915_gem_object_is_tiled(vma->obj) ? vma : NULL;
int err;
+ lockdep_assert_held(&vma->vm->mutex);
+
/* Just update our place in the LRU if our fence is getting reused. */
if (vma->fence) {
fence = vma->fence;
@@ -348,7 +365,7 @@ static int __i915_vma_pin_fence(struct i915_vma *vma)
return 0;
}
} else if (set) {
- fence = fence_find(vma->vm->i915);
+ fence = fence_find(ggtt);
if (IS_ERR(fence))
return PTR_ERR(fence);
@@ -399,7 +416,7 @@ int i915_vma_pin_fence(struct i915_vma *vma)
* Note that we revoke fences on runtime suspend. Therefore the user
* must keep the device awake whilst using the fence.
*/
- assert_rpm_wakelock_held(&vma->vm->i915->runtime_pm);
+ assert_rpm_wakelock_held(vma->vm->gt->uncore->rpm);
GEM_BUG_ON(!i915_vma_is_pinned(vma));
GEM_BUG_ON(!i915_vma_is_ggtt(vma));
@@ -415,14 +432,13 @@ int i915_vma_pin_fence(struct i915_vma *vma)
/**
* i915_reserve_fence - Reserve a fence for vGPU
- * @i915: i915 device private
+ * @ggtt: Global GTT
*
* This function walks the fence regs looking for a free one and remove
* it from the fence_list. It is used to reserve fence for vGPU to use.
*/
-struct i915_fence_reg *i915_reserve_fence(struct drm_i915_private *i915)
+struct i915_fence_reg *i915_reserve_fence(struct i915_ggtt *ggtt)
{
- struct i915_ggtt *ggtt = &i915->ggtt;
struct i915_fence_reg *fence;
int count;
int ret;
@@ -436,7 +452,7 @@ struct i915_fence_reg *i915_reserve_fence(struct drm_i915_private *i915)
if (count <= 1)
return ERR_PTR(-ENOSPC);
- fence = fence_find(i915);
+ fence = fence_find(ggtt);
if (IS_ERR(fence))
return fence;
@@ -460,7 +476,7 @@ struct i915_fence_reg *i915_reserve_fence(struct drm_i915_private *i915)
*/
void i915_unreserve_fence(struct i915_fence_reg *fence)
{
- struct i915_ggtt *ggtt = &fence->i915->ggtt;
+ struct i915_ggtt *ggtt = fence->ggtt;
lockdep_assert_held(&ggtt->vm.mutex);
@@ -469,19 +485,19 @@ void i915_unreserve_fence(struct i915_fence_reg *fence)
/**
* i915_gem_restore_fences - restore fence state
- * @i915: i915 device private
+ * @ggtt: Global GTT
*
* Restore the hw fence state to match the software tracking again, to be called
* after a gpu reset and on resume. Note that on runtime suspend we only cancel
* the fences, to be reacquired by the user later.
*/
-void i915_gem_restore_fences(struct drm_i915_private *i915)
+void i915_gem_restore_fences(struct i915_ggtt *ggtt)
{
int i;
rcu_read_lock(); /* keep obj alive as we dereference */
- for (i = 0; i < i915->ggtt.num_fences; i++) {
- struct i915_fence_reg *reg = &i915->ggtt.fence_regs[i];
+ for (i = 0; i < ggtt->num_fences; i++) {
+ struct i915_fence_reg *reg = &ggtt->fence_regs[i];
struct i915_vma *vma = READ_ONCE(reg->vma);
GEM_BUG_ON(vma && vma->fence != reg);
@@ -547,15 +563,16 @@ void i915_gem_restore_fences(struct drm_i915_private *i915)
*/
/**
- * i915_gem_detect_bit_6_swizzle - detect bit 6 swizzling pattern
- * @i915: i915 device private
+ * detect_bit_6_swizzle - detect bit 6 swizzling pattern
+ * @ggtt: Global GGTT
*
* Detects bit 6 swizzling of address lookup between IGD access and CPU
* access through main memory.
*/
-static void detect_bit_6_swizzle(struct drm_i915_private *i915)
+static void detect_bit_6_swizzle(struct i915_ggtt *ggtt)
{
- struct intel_uncore *uncore = &i915->uncore;
+ struct intel_uncore *uncore = ggtt->vm.gt->uncore;
+ struct drm_i915_private *i915 = ggtt->vm.i915;
u32 swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
u32 swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
@@ -717,8 +734,8 @@ static void detect_bit_6_swizzle(struct drm_i915_private *i915)
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
}
- i915->mm.bit_6_swizzle_x = swizzle_x;
- i915->mm.bit_6_swizzle_y = swizzle_y;
+ i915->ggtt.bit_6_swizzle_x = swizzle_x;
+ i915->ggtt.bit_6_swizzle_y = swizzle_y;
}
/*
@@ -819,17 +836,20 @@ i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj,
void i915_ggtt_init_fences(struct i915_ggtt *ggtt)
{
struct drm_i915_private *i915 = ggtt->vm.i915;
+ struct intel_uncore *uncore = ggtt->vm.gt->uncore;
int num_fences;
int i;
INIT_LIST_HEAD(&ggtt->fence_list);
INIT_LIST_HEAD(&ggtt->userfault_list);
- intel_wakeref_auto_init(&ggtt->userfault_wakeref, &i915->runtime_pm);
+ intel_wakeref_auto_init(&ggtt->userfault_wakeref, uncore->rpm);
- detect_bit_6_swizzle(i915);
+ detect_bit_6_swizzle(ggtt);
- if (INTEL_GEN(i915) >= 7 &&
- !(IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)))
+ if (!i915_ggtt_has_aperture(ggtt))
+ num_fences = 0;
+ else if (INTEL_GEN(i915) >= 7 &&
+ !(IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)))
num_fences = 32;
else if (INTEL_GEN(i915) >= 4 ||
IS_I945G(i915) || IS_I945GM(i915) ||
@@ -839,20 +859,20 @@ void i915_ggtt_init_fences(struct i915_ggtt *ggtt)
num_fences = 8;
if (intel_vgpu_active(i915))
- num_fences = intel_uncore_read(&i915->uncore,
+ num_fences = intel_uncore_read(uncore,
vgtif_reg(avail_rs.fence_num));
/* Initialize fence registers to zero */
for (i = 0; i < num_fences; i++) {
struct i915_fence_reg *fence = &ggtt->fence_regs[i];
- fence->i915 = i915;
+ fence->ggtt = ggtt;
fence->id = i;
list_add_tail(&fence->link, &ggtt->fence_list);
}
ggtt->num_fences = num_fences;
- i915_gem_restore_fences(i915);
+ i915_gem_restore_fences(ggtt);
}
void intel_gt_init_swizzling(struct intel_gt *gt)
@@ -861,7 +881,7 @@ void intel_gt_init_swizzling(struct intel_gt *gt)
struct intel_uncore *uncore = gt->uncore;
if (INTEL_GEN(i915) < 5 ||
- i915->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
+ i915->ggtt.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
return;
intel_uncore_rmw(uncore, DISP_ARB_CTL, 0, DISP_TILE_SURFACE_SWIZZLING);
diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.h b/drivers/gpu/drm/i915/i915_gem_fence_reg.h
index 99866fb9d94f..7bd521cd7cd7 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence_reg.h
+++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.h
@@ -29,7 +29,6 @@
#include <linux/types.h>
struct drm_i915_gem_object;
-struct drm_i915_private;
struct i915_ggtt;
struct i915_vma;
struct intel_gt;
@@ -39,7 +38,7 @@ struct sg_table;
struct i915_fence_reg {
struct list_head link;
- struct drm_i915_private *i915;
+ struct i915_ggtt *ggtt;
struct i915_vma *vma;
atomic_t pin_count;
int id;
@@ -55,10 +54,10 @@ struct i915_fence_reg {
};
/* i915_gem_fence_reg.c */
-struct i915_fence_reg *i915_reserve_fence(struct drm_i915_private *i915);
+struct i915_fence_reg *i915_reserve_fence(struct i915_ggtt *ggtt);
void i915_unreserve_fence(struct i915_fence_reg *fence);
-void i915_gem_restore_fences(struct drm_i915_private *i915);
+void i915_gem_restore_fences(struct i915_ggtt *ggtt);
void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj,
struct sg_table *pages);
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index b1a7a8b9b46a..6239a9adbf14 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -38,6 +38,7 @@
#include "display/intel_frontbuffer.h"
#include "gt/intel_gt.h"
+#include "gt/intel_gt_requests.h"
#include "i915_drv.h"
#include "i915_scatterlist.h"
@@ -132,9 +133,15 @@ static void gen6_ggtt_invalidate(struct i915_ggtt *ggtt)
static void guc_ggtt_invalidate(struct i915_ggtt *ggtt)
{
struct intel_uncore *uncore = ggtt->vm.gt->uncore;
+ struct drm_i915_private *i915 = ggtt->vm.i915;
gen6_ggtt_invalidate(ggtt);
- intel_uncore_write_fw(uncore, GEN8_GTCR, GEN8_GTCR_INVALIDATE);
+
+ if (INTEL_GEN(i915) >= 12)
+ intel_uncore_write_fw(uncore, GEN12_GUC_TLB_INV_CR,
+ GEN12_GUC_TLB_INV_CR_INVALIDATE);
+ else
+ intel_uncore_write_fw(uncore, GEN8_GTCR, GEN8_GTCR_INVALIDATE);
}
static void gmch_ggtt_invalidate(struct i915_ggtt *ggtt)
@@ -144,16 +151,18 @@ static void gmch_ggtt_invalidate(struct i915_ggtt *ggtt)
static int ppgtt_bind_vma(struct i915_vma *vma,
enum i915_cache_level cache_level,
- u32 unused)
+ u32 flags)
{
u32 pte_flags;
int err;
- if (!(vma->flags & I915_VMA_LOCAL_BIND)) {
+ if (flags & I915_VMA_ALLOC) {
err = vma->vm->allocate_va_range(vma->vm,
vma->node.start, vma->size);
if (err)
return err;
+
+ set_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma));
}
/* Applicable to VLV, and gen8+ */
@@ -161,14 +170,17 @@ static int ppgtt_bind_vma(struct i915_vma *vma,
if (i915_gem_object_is_readonly(vma->obj))
pte_flags |= PTE_READ_ONLY;
+ GEM_BUG_ON(!test_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma)));
vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
+ wmb();
return 0;
}
static void ppgtt_unbind_vma(struct i915_vma *vma)
{
- vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
+ if (test_and_clear_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma)))
+ vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
}
static int ppgtt_set_pages(struct i915_vma *vma)
@@ -496,22 +508,26 @@ static void i915_address_space_fini(struct i915_address_space *vm)
mutex_destroy(&vm->mutex);
}
-static void ppgtt_destroy_vma(struct i915_address_space *vm)
+void __i915_vm_close(struct i915_address_space *vm)
{
- struct list_head *phases[] = {
- &vm->bound_list,
- &vm->unbound_list,
- NULL,
- }, **phase;
+ struct i915_vma *vma, *vn;
+
+ mutex_lock(&vm->mutex);
+ list_for_each_entry_safe(vma, vn, &vm->bound_list, vm_link) {
+ struct drm_i915_gem_object *obj = vma->obj;
- mutex_lock(&vm->i915->drm.struct_mutex);
- for (phase = phases; *phase; phase++) {
- struct i915_vma *vma, *vn;
+ /* Keep the obj (and hence the vma) alive as _we_ destroy it */
+ if (!kref_get_unless_zero(&obj->base.refcount))
+ continue;
+
+ atomic_and(~I915_VMA_PIN_MASK, &vma->flags);
+ WARN_ON(__i915_vma_unbind(vma));
+ i915_vma_destroy(vma);
- list_for_each_entry_safe(vma, vn, *phase, vm_link)
- i915_vma_destroy(vma);
+ i915_gem_object_put(obj);
}
- mutex_unlock(&vm->i915->drm.struct_mutex);
+ GEM_BUG_ON(!list_empty(&vm->bound_list));
+ mutex_unlock(&vm->mutex);
}
static void __i915_vm_release(struct work_struct *work)
@@ -519,11 +535,6 @@ static void __i915_vm_release(struct work_struct *work)
struct i915_address_space *vm =
container_of(work, struct i915_address_space, rcu.work);
- ppgtt_destroy_vma(vm);
-
- GEM_BUG_ON(!list_empty(&vm->bound_list));
- GEM_BUG_ON(!list_empty(&vm->unbound_list));
-
vm->cleanup(vm);
i915_address_space_fini(vm);
@@ -538,7 +549,6 @@ void i915_vm_release(struct kref *kref)
GEM_BUG_ON(i915_is_ggtt(vm));
trace_i915_ppgtt_release(vm);
- vm->closed = true;
queue_rcu_work(vm->i915->wq, &vm->rcu);
}
@@ -546,6 +556,7 @@ static void i915_address_space_init(struct i915_address_space *vm, int subclass)
{
kref_init(&vm->ref);
INIT_RCU_WORK(&vm->rcu, __i915_vm_release);
+ atomic_set(&vm->open, 1);
/*
* The vm->mutex must be reclaim safe (for use in the shrinker).
@@ -562,7 +573,6 @@ static void i915_address_space_init(struct i915_address_space *vm, int subclass)
stash_init(&vm->free_pages);
- INIT_LIST_HEAD(&vm->unbound_list);
INIT_LIST_HEAD(&vm->bound_list);
}
@@ -816,17 +826,6 @@ release_pd_entry(struct i915_page_directory * const pd,
return free;
}
-/*
- * PDE TLBs are a pain to invalidate on GEN8+. When we modify
- * the page table structures, we mark them dirty so that
- * context switching/execlist queuing code takes extra steps
- * to ensure that tlbs are flushed.
- */
-static void mark_tlbs_dirty(struct i915_ppgtt *ppgtt)
-{
- ppgtt->pd_dirty_engines = ALL_ENGINES;
-}
-
static void gen8_ppgtt_notify_vgt(struct i915_ppgtt *ppgtt, bool create)
{
struct drm_i915_private *dev_priv = ppgtt->vm.i915;
@@ -1367,7 +1366,9 @@ static int gen8_init_scratch(struct i915_address_space *vm)
if (vm->has_read_only &&
vm->i915->kernel_context &&
vm->i915->kernel_context->vm) {
- struct i915_address_space *clone = vm->i915->kernel_context->vm;
+ struct i915_address_space *clone =
+ rcu_dereference_protected(vm->i915->kernel_context->vm,
+ true); /* static */
GEM_BUG_ON(!clone->has_read_only);
@@ -1422,6 +1423,7 @@ static int gen8_preallocate_top_level_pdp(struct i915_ppgtt *ppgtt)
set_pd_entry(pd, idx, pde);
atomic_inc(px_used(pde)); /* keep pinned */
}
+ wmb();
return 0;
}
@@ -1489,8 +1491,10 @@ static struct i915_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915)
*
* Gen11 has HSDES#:1807136187 unresolved. Disable ro support
* for now.
+ *
+ * Gen12 has inherited the same read-only fault issue from gen11.
*/
- ppgtt->vm.has_read_only = INTEL_GEN(i915) != 11;
+ ppgtt->vm.has_read_only = !IS_GEN_RANGE(i915, 11, 12);
/* There are only few exceptions for gen >=6. chv and bxt.
* And we are not sure about the latter so play safe for now.
@@ -1509,13 +1513,12 @@ static struct i915_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915)
}
if (!i915_vm_is_4lvl(&ppgtt->vm)) {
- if (intel_vgpu_active(i915)) {
- err = gen8_preallocate_top_level_pdp(ppgtt);
- if (err)
- goto err_free_pd;
- }
+ err = gen8_preallocate_top_level_pdp(ppgtt);
+ if (err)
+ goto err_free_pd;
}
+ ppgtt->vm.bind_async_flags = I915_VMA_LOCAL_BIND;
ppgtt->vm.insert_entries = gen8_ppgtt_insert;
ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc;
ppgtt->vm.clear_range = gen8_ppgtt_clear;
@@ -1566,7 +1569,7 @@ static void gen7_ppgtt_enable(struct intel_gt *gt)
}
intel_uncore_write(uncore, GAM_ECOCHK, ecochk);
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt, id) {
/* GFX_MODE is per-ring on gen7+ */
ENGINE_WRITE(engine,
RING_MODE_GEN7,
@@ -1729,10 +1732,8 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
}
spin_unlock(&pd->lock);
- if (flush) {
- mark_tlbs_dirty(&ppgtt->base);
+ if (flush)
gen6_ggtt_invalidate(vm->gt->ggtt);
- }
goto out;
@@ -1786,15 +1787,13 @@ static void gen6_ppgtt_free_pd(struct gen6_ppgtt *ppgtt)
static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
{
struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
- struct drm_i915_private *i915 = vm->i915;
- /* FIXME remove the struct_mutex to bring the locking under control */
- mutex_lock(&i915->drm.struct_mutex);
i915_vma_destroy(ppgtt->vma);
- mutex_unlock(&i915->drm.struct_mutex);
gen6_ppgtt_free_pd(ppgtt);
free_scratch(vm);
+
+ mutex_destroy(&ppgtt->pin_mutex);
kfree(ppgtt->base.pd);
}
@@ -1827,7 +1826,6 @@ static int pd_vma_bind(struct i915_vma *vma,
gen6_for_all_pdes(pt, ppgtt->base.pd, pde)
gen6_write_pde(ppgtt, pde, pt);
- mark_tlbs_dirty(&ppgtt->base);
gen6_ggtt_invalidate(ggtt);
return 0;
@@ -1866,7 +1864,6 @@ static const struct i915_vma_ops pd_vma_ops = {
static struct i915_vma *pd_vma_create(struct gen6_ppgtt *ppgtt, int size)
{
- struct drm_i915_private *i915 = ppgtt->base.vm.i915;
struct i915_ggtt *ggtt = ppgtt->base.vm.gt->ggtt;
struct i915_vma *vma;
@@ -1877,33 +1874,30 @@ static struct i915_vma *pd_vma_create(struct gen6_ppgtt *ppgtt, int size)
if (!vma)
return ERR_PTR(-ENOMEM);
- i915_active_init(i915, &vma->active, NULL, NULL);
+ i915_active_init(&vma->active, NULL, NULL);
- vma->vm = &ggtt->vm;
+ mutex_init(&vma->pages_mutex);
+ vma->vm = i915_vm_get(&ggtt->vm);
vma->ops = &pd_vma_ops;
vma->private = ppgtt;
vma->size = size;
vma->fence_size = size;
- vma->flags = I915_VMA_GGTT;
+ atomic_set(&vma->flags, I915_VMA_GGTT);
vma->ggtt_view.type = I915_GGTT_VIEW_ROTATED; /* prevent fencing */
INIT_LIST_HEAD(&vma->obj_link);
INIT_LIST_HEAD(&vma->closed_link);
- mutex_lock(&vma->vm->mutex);
- list_add(&vma->vm_link, &vma->vm->unbound_list);
- mutex_unlock(&vma->vm->mutex);
-
return vma;
}
int gen6_ppgtt_pin(struct i915_ppgtt *base)
{
struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base);
- int err;
+ int err = 0;
- GEM_BUG_ON(ppgtt->base.vm.closed);
+ GEM_BUG_ON(!atomic_read(&ppgtt->base.vm.open));
/*
* Workaround the limited maximum vma->pin_count and the aliasing_ppgtt
@@ -1911,24 +1905,26 @@ int gen6_ppgtt_pin(struct i915_ppgtt *base)
* (When vma->pin_count becomes atomic, I expect we will naturally
* need a larger, unpacked, type and kill this redundancy.)
*/
- if (ppgtt->pin_count++)
+ if (atomic_add_unless(&ppgtt->pin_count, 1, 0))
return 0;
+ if (mutex_lock_interruptible(&ppgtt->pin_mutex))
+ return -EINTR;
+
/*
* PPGTT PDEs reside in the GGTT and consists of 512 entries. The
* allocator works in address space sizes, so it's multiplied by page
* size. We allocate at the top of the GTT to avoid fragmentation.
*/
- err = i915_vma_pin(ppgtt->vma,
- 0, GEN6_PD_ALIGN,
- PIN_GLOBAL | PIN_HIGH);
- if (err)
- goto unpin;
-
- return 0;
+ if (!atomic_read(&ppgtt->pin_count)) {
+ err = i915_vma_pin(ppgtt->vma,
+ 0, GEN6_PD_ALIGN,
+ PIN_GLOBAL | PIN_HIGH);
+ }
+ if (!err)
+ atomic_inc(&ppgtt->pin_count);
+ mutex_unlock(&ppgtt->pin_mutex);
-unpin:
- ppgtt->pin_count = 0;
return err;
}
@@ -1936,22 +1932,20 @@ void gen6_ppgtt_unpin(struct i915_ppgtt *base)
{
struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base);
- GEM_BUG_ON(!ppgtt->pin_count);
- if (--ppgtt->pin_count)
- return;
-
- i915_vma_unpin(ppgtt->vma);
+ GEM_BUG_ON(!atomic_read(&ppgtt->pin_count));
+ if (atomic_dec_and_test(&ppgtt->pin_count))
+ i915_vma_unpin(ppgtt->vma);
}
void gen6_ppgtt_unpin_all(struct i915_ppgtt *base)
{
struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base);
- if (!ppgtt->pin_count)
+ if (!atomic_read(&ppgtt->pin_count))
return;
- ppgtt->pin_count = 0;
i915_vma_unpin(ppgtt->vma);
+ atomic_set(&ppgtt->pin_count, 0);
}
static struct i915_ppgtt *gen6_ppgtt_create(struct drm_i915_private *i915)
@@ -1964,9 +1958,12 @@ static struct i915_ppgtt *gen6_ppgtt_create(struct drm_i915_private *i915)
if (!ppgtt)
return ERR_PTR(-ENOMEM);
+ mutex_init(&ppgtt->pin_mutex);
+
ppgtt_init(&ppgtt->base, &i915->gt);
ppgtt->base.vm.top = 1;
+ ppgtt->base.vm.bind_async_flags = I915_VMA_LOCAL_BIND;
ppgtt->base.vm.allocate_va_range = gen6_alloc_va_range;
ppgtt->base.vm.clear_range = gen6_ppgtt_clear_range;
ppgtt->base.vm.insert_entries = gen6_ppgtt_insert_entries;
@@ -2023,7 +2020,7 @@ static void gtt_write_workarounds(struct intel_gt *gt)
intel_uncore_write(uncore,
GEN8_L3_LRA_1_GPGPU,
GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT);
- else if (INTEL_GEN(i915) >= 9)
+ else if (INTEL_GEN(i915) >= 9 && INTEL_GEN(i915) <= 11)
intel_uncore_write(uncore,
GEN8_L3_LRA_1_GPGPU,
GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL);
@@ -2202,7 +2199,7 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm;
gtt_entries += vma->node.start / I915_GTT_PAGE_SIZE;
- for_each_sgt_dma(addr, sgt_iter, vma->pages)
+ for_each_sgt_daddr(addr, sgt_iter, vma->pages)
gen8_set_pte(gtt_entries++, pte_encode | addr);
/*
@@ -2243,7 +2240,7 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
unsigned int i = vma->node.start / I915_GTT_PAGE_SIZE;
struct sgt_iter iter;
dma_addr_t addr;
- for_each_sgt_dma(addr, iter, vma->pages)
+ for_each_sgt_daddr(addr, iter, vma->pages)
iowrite32(vm->pte_encode(addr, level, flags), &entries[i++]);
/*
@@ -2448,7 +2445,7 @@ static int ggtt_bind_vma(struct i915_vma *vma,
* GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally
* upgrade to both bound if we bind either to avoid double-binding.
*/
- vma->flags |= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
+ atomic_or(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND, &vma->flags);
return 0;
}
@@ -2478,14 +2475,18 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
if (flags & I915_VMA_LOCAL_BIND) {
struct i915_ppgtt *alias = i915_vm_to_ggtt(vma->vm)->alias;
- if (!(vma->flags & I915_VMA_LOCAL_BIND)) {
+ if (flags & I915_VMA_ALLOC) {
ret = alias->vm.allocate_va_range(&alias->vm,
vma->node.start,
vma->size);
if (ret)
return ret;
+
+ set_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma));
}
+ GEM_BUG_ON(!test_bit(I915_VMA_ALLOC_BIT,
+ __i915_vma_flags(vma)));
alias->vm.insert_entries(&alias->vm, vma,
cache_level, pte_flags);
}
@@ -2506,7 +2507,7 @@ static void aliasing_gtt_unbind_vma(struct i915_vma *vma)
{
struct drm_i915_private *i915 = vma->vm->i915;
- if (vma->flags & I915_VMA_GLOBAL_BIND) {
+ if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)) {
struct i915_address_space *vm = vma->vm;
intel_wakeref_t wakeref;
@@ -2514,7 +2515,7 @@ static void aliasing_gtt_unbind_vma(struct i915_vma *vma)
vm->clear_range(vm, vma->node.start, vma->size);
}
- if (vma->flags & I915_VMA_LOCAL_BIND) {
+ if (test_and_clear_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma))) {
struct i915_address_space *vm =
&i915_vm_to_ggtt(vma->vm)->alias->vm;
@@ -2530,7 +2531,9 @@ void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
struct i915_ggtt *ggtt = &dev_priv->ggtt;
if (unlikely(ggtt->do_idle_maps)) {
- if (i915_gem_wait_for_idle(dev_priv, 0, MAX_SCHEDULE_TIMEOUT)) {
+ /* XXX This does not prevent more requests being submitted! */
+ if (intel_gt_retire_requests_timeout(ggtt->vm.gt,
+ -MAX_SCHEDULE_TIMEOUT)) {
DRM_ERROR("Failed to wait for idle; VT'd may hang.\n");
/* Wait a bit, in hopes it avoids the hang */
udelay(10);
@@ -2555,12 +2558,12 @@ static int ggtt_set_pages(struct i915_vma *vma)
return 0;
}
-static void i915_gtt_color_adjust(const struct drm_mm_node *node,
- unsigned long color,
- u64 *start,
- u64 *end)
+static void i915_ggtt_color_adjust(const struct drm_mm_node *node,
+ unsigned long color,
+ u64 *start,
+ u64 *end)
{
- if (node->allocated && node->color != color)
+ if (i915_node_color_differs(node, color))
*start += I915_GTT_PAGE_SIZE;
/* Also leave a space between the unallocated reserved node after the
@@ -2598,6 +2601,7 @@ static int init_aliasing_ppgtt(struct i915_ggtt *ggtt)
goto err_ppgtt;
ggtt->alias = ppgtt;
+ ggtt->vm.bind_async_flags |= ppgtt->vm.bind_async_flags;
GEM_BUG_ON(ggtt->vm.vma_ops.bind_vma != ggtt_bind_vma);
ggtt->vm.vma_ops.bind_vma = aliasing_gtt_bind_vma;
@@ -2614,22 +2618,16 @@ err_ppgtt:
static void fini_aliasing_ppgtt(struct i915_ggtt *ggtt)
{
- struct drm_i915_private *i915 = ggtt->vm.i915;
struct i915_ppgtt *ppgtt;
- mutex_lock(&i915->drm.struct_mutex);
-
ppgtt = fetch_and_zero(&ggtt->alias);
if (!ppgtt)
- goto out;
+ return;
i915_vm_put(&ppgtt->vm);
ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma;
ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
-
-out:
- mutex_unlock(&i915->drm.struct_mutex);
}
static int ggtt_reserve_guc_top(struct i915_ggtt *ggtt)
@@ -2661,7 +2659,8 @@ static void ggtt_release_guc_top(struct i915_ggtt *ggtt)
static void cleanup_init_ggtt(struct i915_ggtt *ggtt)
{
ggtt_release_guc_top(ggtt);
- drm_mm_remove_node(&ggtt->error_capture);
+ if (drm_mm_node_allocated(&ggtt->error_capture))
+ drm_mm_remove_node(&ggtt->error_capture);
}
static int init_ggtt(struct i915_ggtt *ggtt)
@@ -2692,13 +2691,15 @@ static int init_ggtt(struct i915_ggtt *ggtt)
if (ret)
return ret;
- /* Reserve a mappable slot for our lockless error capture */
- ret = drm_mm_insert_node_in_range(&ggtt->vm.mm, &ggtt->error_capture,
- PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
- 0, ggtt->mappable_end,
- DRM_MM_INSERT_LOW);
- if (ret)
- return ret;
+ if (ggtt->mappable_end) {
+ /* Reserve a mappable slot for our lockless error capture */
+ ret = drm_mm_insert_node_in_range(&ggtt->vm.mm, &ggtt->error_capture,
+ PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
+ 0, ggtt->mappable_end,
+ DRM_MM_INSERT_LOW);
+ if (ret)
+ return ret;
+ }
/*
* The upper portion of the GuC address space has a sizeable hole
@@ -2746,35 +2747,33 @@ int i915_init_ggtt(struct drm_i915_private *i915)
static void ggtt_cleanup_hw(struct i915_ggtt *ggtt)
{
- struct drm_i915_private *i915 = ggtt->vm.i915;
struct i915_vma *vma, *vn;
- ggtt->vm.closed = true;
+ atomic_set(&ggtt->vm.open, 0);
rcu_barrier(); /* flush the RCU'ed__i915_vm_release */
- flush_workqueue(i915->wq);
+ flush_workqueue(ggtt->vm.i915->wq);
- mutex_lock(&i915->drm.struct_mutex);
+ mutex_lock(&ggtt->vm.mutex);
list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link)
- WARN_ON(i915_vma_unbind(vma));
+ WARN_ON(__i915_vma_unbind(vma));
if (drm_mm_node_allocated(&ggtt->error_capture))
drm_mm_remove_node(&ggtt->error_capture);
ggtt_release_guc_top(ggtt);
-
- if (drm_mm_initialized(&ggtt->vm.mm)) {
- intel_vgt_deballoon(ggtt);
- i915_address_space_fini(&ggtt->vm);
- }
+ intel_vgt_deballoon(ggtt);
ggtt->vm.cleanup(&ggtt->vm);
- mutex_unlock(&i915->drm.struct_mutex);
+ mutex_unlock(&ggtt->vm.mutex);
+ i915_address_space_fini(&ggtt->vm);
arch_phys_wc_del(ggtt->mtrr);
- io_mapping_fini(&ggtt->iomap);
+
+ if (ggtt->iomap.size)
+ io_mapping_fini(&ggtt->iomap);
}
/**
@@ -2794,8 +2793,6 @@ void i915_ggtt_driver_release(struct drm_i915_private *i915)
set_pages_array_wb(pvec->pages, pvec->nr);
__pagevec_release(pvec);
}
-
- i915_gem_cleanup_stolen(i915);
}
static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
@@ -2873,35 +2870,51 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
return 0;
}
-static void tgl_setup_private_ppat(struct drm_i915_private *dev_priv)
+static void tgl_setup_private_ppat(struct intel_uncore *uncore)
{
/* TGL doesn't support LLC or AGE settings */
- I915_WRITE(GEN12_PAT_INDEX(0), GEN8_PPAT_WB);
- I915_WRITE(GEN12_PAT_INDEX(1), GEN8_PPAT_WC);
- I915_WRITE(GEN12_PAT_INDEX(2), GEN8_PPAT_WT);
- I915_WRITE(GEN12_PAT_INDEX(3), GEN8_PPAT_UC);
- I915_WRITE(GEN12_PAT_INDEX(4), GEN8_PPAT_WB);
- I915_WRITE(GEN12_PAT_INDEX(5), GEN8_PPAT_WB);
- I915_WRITE(GEN12_PAT_INDEX(6), GEN8_PPAT_WB);
- I915_WRITE(GEN12_PAT_INDEX(7), GEN8_PPAT_WB);
-}
-
-static void cnl_setup_private_ppat(struct drm_i915_private *dev_priv)
-{
- I915_WRITE(GEN10_PAT_INDEX(0), GEN8_PPAT_WB | GEN8_PPAT_LLC);
- I915_WRITE(GEN10_PAT_INDEX(1), GEN8_PPAT_WC | GEN8_PPAT_LLCELLC);
- I915_WRITE(GEN10_PAT_INDEX(2), GEN8_PPAT_WT | GEN8_PPAT_LLCELLC);
- I915_WRITE(GEN10_PAT_INDEX(3), GEN8_PPAT_UC);
- I915_WRITE(GEN10_PAT_INDEX(4), GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0));
- I915_WRITE(GEN10_PAT_INDEX(5), GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1));
- I915_WRITE(GEN10_PAT_INDEX(6), GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2));
- I915_WRITE(GEN10_PAT_INDEX(7), GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
+ intel_uncore_write(uncore, GEN12_PAT_INDEX(0), GEN8_PPAT_WB);
+ intel_uncore_write(uncore, GEN12_PAT_INDEX(1), GEN8_PPAT_WC);
+ intel_uncore_write(uncore, GEN12_PAT_INDEX(2), GEN8_PPAT_WT);
+ intel_uncore_write(uncore, GEN12_PAT_INDEX(3), GEN8_PPAT_UC);
+ intel_uncore_write(uncore, GEN12_PAT_INDEX(4), GEN8_PPAT_WB);
+ intel_uncore_write(uncore, GEN12_PAT_INDEX(5), GEN8_PPAT_WB);
+ intel_uncore_write(uncore, GEN12_PAT_INDEX(6), GEN8_PPAT_WB);
+ intel_uncore_write(uncore, GEN12_PAT_INDEX(7), GEN8_PPAT_WB);
+}
+
+static void cnl_setup_private_ppat(struct intel_uncore *uncore)
+{
+ intel_uncore_write(uncore,
+ GEN10_PAT_INDEX(0),
+ GEN8_PPAT_WB | GEN8_PPAT_LLC);
+ intel_uncore_write(uncore,
+ GEN10_PAT_INDEX(1),
+ GEN8_PPAT_WC | GEN8_PPAT_LLCELLC);
+ intel_uncore_write(uncore,
+ GEN10_PAT_INDEX(2),
+ GEN8_PPAT_WT | GEN8_PPAT_LLCELLC);
+ intel_uncore_write(uncore,
+ GEN10_PAT_INDEX(3),
+ GEN8_PPAT_UC);
+ intel_uncore_write(uncore,
+ GEN10_PAT_INDEX(4),
+ GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0));
+ intel_uncore_write(uncore,
+ GEN10_PAT_INDEX(5),
+ GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1));
+ intel_uncore_write(uncore,
+ GEN10_PAT_INDEX(6),
+ GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2));
+ intel_uncore_write(uncore,
+ GEN10_PAT_INDEX(7),
+ GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
}
/* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability
* bits. When using advanced contexts each context stores its own PAT, but
* writing this data shouldn't be harmful even in those cases. */
-static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv)
+static void bdw_setup_private_ppat(struct intel_uncore *uncore)
{
u64 pat;
@@ -2914,11 +2927,11 @@ static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv)
GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) |
GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
- I915_WRITE(GEN8_PRIVATE_PAT_LO, lower_32_bits(pat));
- I915_WRITE(GEN8_PRIVATE_PAT_HI, upper_32_bits(pat));
+ intel_uncore_write(uncore, GEN8_PRIVATE_PAT_LO, lower_32_bits(pat));
+ intel_uncore_write(uncore, GEN8_PRIVATE_PAT_HI, upper_32_bits(pat));
}
-static void chv_setup_private_ppat(struct drm_i915_private *dev_priv)
+static void chv_setup_private_ppat(struct intel_uncore *uncore)
{
u64 pat;
@@ -2950,8 +2963,8 @@ static void chv_setup_private_ppat(struct drm_i915_private *dev_priv)
GEN8_PPAT(6, CHV_PPAT_SNOOP) |
GEN8_PPAT(7, CHV_PPAT_SNOOP);
- I915_WRITE(GEN8_PRIVATE_PAT_LO, lower_32_bits(pat));
- I915_WRITE(GEN8_PRIVATE_PAT_HI, upper_32_bits(pat));
+ intel_uncore_write(uncore, GEN8_PRIVATE_PAT_LO, lower_32_bits(pat));
+ intel_uncore_write(uncore, GEN8_PRIVATE_PAT_HI, upper_32_bits(pat));
}
static void gen6_gmch_remove(struct i915_address_space *vm)
@@ -2962,18 +2975,26 @@ static void gen6_gmch_remove(struct i915_address_space *vm)
cleanup_scratch_page(vm);
}
-static void setup_private_pat(struct drm_i915_private *dev_priv)
+static void setup_private_pat(struct intel_uncore *uncore)
{
- GEM_BUG_ON(INTEL_GEN(dev_priv) < 8);
+ struct drm_i915_private *i915 = uncore->i915;
- if (INTEL_GEN(dev_priv) >= 12)
- tgl_setup_private_ppat(dev_priv);
- else if (INTEL_GEN(dev_priv) >= 10)
- cnl_setup_private_ppat(dev_priv);
- else if (IS_CHERRYVIEW(dev_priv) || IS_GEN9_LP(dev_priv))
- chv_setup_private_ppat(dev_priv);
+ GEM_BUG_ON(INTEL_GEN(i915) < 8);
+
+ if (INTEL_GEN(i915) >= 12)
+ tgl_setup_private_ppat(uncore);
+ else if (INTEL_GEN(i915) >= 10)
+ cnl_setup_private_ppat(uncore);
+ else if (IS_CHERRYVIEW(i915) || IS_GEN9_LP(i915))
+ chv_setup_private_ppat(uncore);
else
- bdw_setup_private_ppat(dev_priv);
+ bdw_setup_private_ppat(uncore);
+}
+
+static struct resource pci_resource(struct pci_dev *pdev, int bar)
+{
+ return (struct resource)DEFINE_RES_MEM(pci_resource_start(pdev, bar),
+ pci_resource_len(pdev, bar));
}
static int gen8_gmch_probe(struct i915_ggtt *ggtt)
@@ -2985,10 +3006,10 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
int err;
/* TODO: We're not aware of mappable constraints on gen8 yet */
- ggtt->gmadr =
- (struct resource) DEFINE_RES_MEM(pci_resource_start(pdev, 2),
- pci_resource_len(pdev, 2));
- ggtt->mappable_end = resource_size(&ggtt->gmadr);
+ if (!IS_DGFX(dev_priv)) {
+ ggtt->gmadr = pci_resource(pdev, 2);
+ ggtt->mappable_end = resource_size(&ggtt->gmadr);
+ }
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(39));
if (!err)
@@ -3029,7 +3050,7 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
ggtt->vm.pte_encode = gen8_pte_encode;
- setup_private_pat(dev_priv);
+ setup_private_pat(ggtt->vm.gt->uncore);
return ggtt_probe_common(ggtt, size);
}
@@ -3200,9 +3221,6 @@ int i915_ggtt_probe_hw(struct drm_i915_private *i915)
static int ggtt_init_hw(struct i915_ggtt *ggtt)
{
struct drm_i915_private *i915 = ggtt->vm.i915;
- int ret = 0;
-
- mutex_lock(&i915->drm.struct_mutex);
i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT);
@@ -3212,24 +3230,23 @@ static int ggtt_init_hw(struct i915_ggtt *ggtt)
ggtt->vm.has_read_only = IS_VALLEYVIEW(i915);
if (!HAS_LLC(i915) && !HAS_PPGTT(i915))
- ggtt->vm.mm.color_adjust = i915_gtt_color_adjust;
-
- if (!io_mapping_init_wc(&ggtt->iomap,
- ggtt->gmadr.start,
- ggtt->mappable_end)) {
- ggtt->vm.cleanup(&ggtt->vm);
- ret = -EIO;
- goto out;
- }
+ ggtt->vm.mm.color_adjust = i915_ggtt_color_adjust;
+
+ if (ggtt->mappable_end) {
+ if (!io_mapping_init_wc(&ggtt->iomap,
+ ggtt->gmadr.start,
+ ggtt->mappable_end)) {
+ ggtt->vm.cleanup(&ggtt->vm);
+ return -EIO;
+ }
- ggtt->mtrr = arch_phys_wc_add(ggtt->gmadr.start, ggtt->mappable_end);
+ ggtt->mtrr = arch_phys_wc_add(ggtt->gmadr.start,
+ ggtt->mappable_end);
+ }
i915_ggtt_init_fences(ggtt);
-out:
- mutex_unlock(&i915->drm.struct_mutex);
-
- return ret;
+ return 0;
}
/**
@@ -3251,19 +3268,7 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
if (ret)
return ret;
- /*
- * Initialise stolen early so that we may reserve preallocated
- * objects for the BIOS to KMS transition.
- */
- ret = i915_gem_init_stolen(dev_priv);
- if (ret)
- goto out_gtt_cleanup;
-
return 0;
-
-out_gtt_cleanup:
- dev_priv->ggtt.vm.cleanup(&dev_priv->ggtt.vm);
- return ret;
}
int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv)
@@ -3301,6 +3306,7 @@ static void ggtt_restore_mappings(struct i915_ggtt *ggtt)
{
struct i915_vma *vma, *vn;
bool flush = false;
+ int open;
intel_gt_check_and_clear_faults(ggtt->vm.gt);
@@ -3308,33 +3314,31 @@ static void ggtt_restore_mappings(struct i915_ggtt *ggtt)
/* First fill our portion of the GTT with scratch pages */
ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total);
- ggtt->vm.closed = true; /* skip rewriting PTE on VMA unbind */
+
+ /* Skip rewriting PTE on VMA unbind. */
+ open = atomic_xchg(&ggtt->vm.open, 0);
/* clflush objects bound into the GGTT and rebind them. */
list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) {
struct drm_i915_gem_object *obj = vma->obj;
- if (!(vma->flags & I915_VMA_GLOBAL_BIND))
+ if (!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
continue;
- mutex_unlock(&ggtt->vm.mutex);
-
- if (!i915_vma_unbind(vma))
- goto lock;
+ if (!__i915_vma_unbind(vma))
+ continue;
+ clear_bit(I915_VMA_GLOBAL_BIND_BIT, __i915_vma_flags(vma));
WARN_ON(i915_vma_bind(vma,
obj ? obj->cache_level : 0,
- PIN_UPDATE));
+ PIN_GLOBAL, NULL));
if (obj) { /* only used during resume => exclusive access */
flush |= fetch_and_zero(&obj->write_domain);
obj->read_domains |= I915_GEM_DOMAIN_GTT;
}
-
-lock:
- mutex_lock(&ggtt->vm.mutex);
}
- ggtt->vm.closed = false;
+ atomic_set(&ggtt->vm.open, open);
ggtt->invalidate(ggtt);
mutex_unlock(&ggtt->vm.mutex);
@@ -3345,10 +3349,12 @@ lock:
void i915_gem_restore_gtt_mappings(struct drm_i915_private *i915)
{
- ggtt_restore_mappings(&i915->ggtt);
+ struct i915_ggtt *ggtt = &i915->ggtt;
+
+ ggtt_restore_mappings(ggtt);
if (INTEL_GEN(i915) >= 8)
- setup_private_pat(i915);
+ setup_private_pat(ggtt->vm.gt->uncore);
}
static struct scatterlist *
@@ -3726,7 +3732,8 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
u64 offset;
int err;
- lockdep_assert_held(&vm->i915->drm.struct_mutex);
+ lockdep_assert_held(&vm->mutex);
+
GEM_BUG_ON(!size);
GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
GEM_BUG_ON(alignment && !is_power_of_2(alignment));
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index b97a47fc7a68..402283ce2864 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -148,8 +148,8 @@ typedef u64 gen8_pte_t;
#define GEN8_PDE_IPS_64K BIT(11)
#define GEN8_PDE_PS_2M BIT(7)
-#define for_each_sgt_dma(__dmap, __iter, __sgt) \
- __for_each_sgt_dma(__dmap, __iter, __sgt, I915_GTT_PAGE_SIZE)
+#define for_each_sgt_daddr(__dp, __iter, __sgt) \
+ __for_each_sgt_daddr(__dp, __iter, __sgt, I915_GTT_PAGE_SIZE)
struct intel_remapped_plane_info {
/* in gtt pages */
@@ -305,7 +305,16 @@ struct i915_address_space {
u64 total; /* size addr space maps (ex. 2GB for ggtt) */
u64 reserved; /* size addr space reserved */
- bool closed;
+ unsigned int bind_async_flags;
+
+ /*
+ * Each active user context has its own address space (in full-ppgtt).
+ * Since the vm may be shared between multiple contexts, we count how
+ * many contexts keep us "open". Once open hits zero, we are closed
+ * and do not allow any new attachments, and proceed to shutdown our
+ * vma and page directories.
+ */
+ atomic_t open;
struct mutex mutex; /* protects vma and our lists */
#define VM_CLASS_GGTT 0
@@ -320,11 +329,6 @@ struct i915_address_space {
*/
struct list_head bound_list;
- /**
- * List of vma that are not unbound.
- */
- struct list_head unbound_list;
-
struct pagestash free_pages;
/* Global GTT */
@@ -376,6 +380,12 @@ i915_vm_has_scratch_64K(struct i915_address_space *vm)
return vm->scratch_order == get_order(I915_GTT_PAGE_SIZE_64K);
}
+static inline bool
+i915_vm_has_cache_coloring(struct i915_address_space *vm)
+{
+ return i915_is_ggtt(vm) && vm->mm.color_adjust;
+}
+
/* The Graphics Translation Table is the way in which GEN hardware translates a
* Graphics Virtual Address into a Physical Address. In addition to the normal
* collateral associated with any va->pa translations GEN hardware also has a
@@ -401,6 +411,11 @@ struct i915_ggtt {
int mtrr;
+ /** Bit 6 swizzling required for X tiling */
+ u32 bit_6_swizzle_x;
+ /** Bit 6 swizzling required for Y tiling */
+ u32 bit_6_swizzle_y;
+
u32 pin_bias;
unsigned int num_fences;
@@ -422,7 +437,6 @@ struct i915_ggtt {
struct i915_ppgtt {
struct i915_address_space vm;
- intel_engine_mask_t pd_dirty_engines;
struct i915_page_directory *pd;
};
@@ -432,7 +446,9 @@ struct gen6_ppgtt {
struct i915_vma *vma;
gen6_pte_t __iomem *pd_addr;
- unsigned int pin_count;
+ atomic_t pin_count;
+ struct mutex pin_mutex;
+
bool scan_for_unused_pt;
};
@@ -559,6 +575,11 @@ void i915_ggtt_disable_guc(struct i915_ggtt *ggtt);
int i915_init_ggtt(struct drm_i915_private *dev_priv);
void i915_ggtt_driver_release(struct drm_i915_private *dev_priv);
+static inline bool i915_ggtt_has_aperture(const struct i915_ggtt *ggtt)
+{
+ return ggtt->mappable_end > 0;
+}
+
int i915_ppgtt_init_hw(struct intel_gt *gt);
struct i915_ppgtt *i915_ppgtt_create(struct drm_i915_private *dev_priv);
@@ -577,6 +598,35 @@ static inline void i915_vm_put(struct i915_address_space *vm)
kref_put(&vm->ref, i915_vm_release);
}
+static inline struct i915_address_space *
+i915_vm_open(struct i915_address_space *vm)
+{
+ GEM_BUG_ON(!atomic_read(&vm->open));
+ atomic_inc(&vm->open);
+ return i915_vm_get(vm);
+}
+
+static inline bool
+i915_vm_tryopen(struct i915_address_space *vm)
+{
+ if (atomic_add_unless(&vm->open, 1, 0))
+ return i915_vm_get(vm);
+
+ return false;
+}
+
+void __i915_vm_close(struct i915_address_space *vm);
+
+static inline void
+i915_vm_close(struct i915_address_space *vm)
+{
+ GEM_BUG_ON(!atomic_read(&vm->open));
+ if (atomic_dec_and_test(&vm->open))
+ __i915_vm_close(vm);
+
+ i915_vm_put(vm);
+}
+
int gen6_ppgtt_pin(struct i915_ppgtt *base);
void gen6_ppgtt_unpin(struct i915_ppgtt *base);
void gen6_ppgtt_unpin_all(struct i915_ppgtt *base);
@@ -609,10 +659,9 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
#define PIN_OFFSET_BIAS BIT_ULL(6)
#define PIN_OFFSET_FIXED BIT_ULL(7)
-#define PIN_MBZ BIT_ULL(8) /* I915_VMA_PIN_OVERFLOW */
-#define PIN_GLOBAL BIT_ULL(9) /* I915_VMA_GLOBAL_BIND */
-#define PIN_USER BIT_ULL(10) /* I915_VMA_LOCAL_BIND */
-#define PIN_UPDATE BIT_ULL(11)
+#define PIN_UPDATE BIT_ULL(9)
+#define PIN_GLOBAL BIT_ULL(10) /* I915_VMA_GLOBAL_BIND */
+#define PIN_USER BIT_ULL(11) /* I915_VMA_LOCAL_BIND */
#define PIN_OFFSET_MASK (-I915_GTT_PAGE_SIZE)
diff --git a/drivers/gpu/drm/i915/i915_getparam.c b/drivers/gpu/drm/i915/i915_getparam.c
index 9f1517af5b7f..cf8a8c3ef047 100644
--- a/drivers/gpu/drm/i915/i915_getparam.c
+++ b/drivers/gpu/drm/i915/i915_getparam.c
@@ -5,6 +5,7 @@
#include "gt/intel_engine_user.h"
#include "i915_drv.h"
+#include "i915_perf.h"
int i915_getparam_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
@@ -79,8 +80,8 @@ int i915_getparam_ioctl(struct drm_device *dev, void *data,
break;
case I915_PARAM_HAS_GPU_RESET:
value = i915_modparams.enable_hangcheck &&
- intel_has_gpu_reset(i915);
- if (value && intel_has_reset_engine(i915))
+ intel_has_gpu_reset(&i915->gt);
+ if (value && intel_has_reset_engine(&i915->gt))
value = 2;
break;
case I915_PARAM_HAS_RESOURCE_STREAMER:
@@ -156,6 +157,9 @@ int i915_getparam_ioctl(struct drm_device *dev, void *data,
case I915_PARAM_MMAP_GTT_COHERENT:
value = INTEL_INFO(i915)->has_coherent_ggtt;
break;
+ case I915_PARAM_PERF_REVISION:
+ value = i915_perf_ioctl_version();
+ break;
default:
DRM_DEBUG("Unknown parameter %d\n", param->param);
return -EINVAL;
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index e284bd76fa86..3c85cb0ee99f 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -40,6 +40,7 @@
#include "display/intel_overlay.h"
#include "gem/i915_gem_context.h"
+#include "gem/i915_gem_lmem.h"
#include "i915_drv.h"
#include "i915_gpu_error.h"
@@ -235,6 +236,7 @@ struct compress {
struct pagevec pool;
struct z_stream_s zstream;
void *tmp;
+ bool wc;
};
static bool compress_init(struct compress *c)
@@ -292,7 +294,7 @@ static int compress_page(struct compress *c,
struct z_stream_s *zstream = &c->zstream;
zstream->next_in = src;
- if (c->tmp && i915_memcpy_from_wc(c->tmp, src, PAGE_SIZE))
+ if (c->wc && c->tmp && i915_memcpy_from_wc(c->tmp, src, PAGE_SIZE))
zstream->next_in = c->tmp;
zstream->avail_in = PAGE_SIZE;
@@ -367,6 +369,7 @@ static void err_compression_marker(struct drm_i915_error_state_buf *m)
struct compress {
struct pagevec pool;
+ bool wc;
};
static bool compress_init(struct compress *c)
@@ -389,7 +392,7 @@ static int compress_page(struct compress *c,
if (!ptr)
return -ENOMEM;
- if (!i915_memcpy_from_wc(ptr, src, PAGE_SIZE))
+ if (!(c->wc && i915_memcpy_from_wc(ptr, src, PAGE_SIZE)))
memcpy(ptr, src, PAGE_SIZE);
dst->pages[dst->page_count++] = ptr;
@@ -421,6 +424,7 @@ static void err_compression_marker(struct drm_i915_error_state_buf *m)
static void error_print_instdone(struct drm_i915_error_state_buf *m,
const struct drm_i915_error_engine *ee)
{
+ const struct sseu_dev_info *sseu = &RUNTIME_INFO(m->i915)->sseu;
int slice;
int subslice;
@@ -436,12 +440,12 @@ static void error_print_instdone(struct drm_i915_error_state_buf *m,
if (INTEL_GEN(m->i915) <= 6)
return;
- for_each_instdone_slice_subslice(m->i915, slice, subslice)
+ for_each_instdone_slice_subslice(m->i915, sseu, slice, subslice)
err_printf(m, " SAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
slice, subslice,
ee->instdone.sampler[slice][subslice]);
- for_each_instdone_slice_subslice(m->i915, slice, subslice)
+ for_each_instdone_slice_subslice(m->i915, sseu, slice, subslice)
err_printf(m, " ROW_INSTDONE[%d][%d]: 0x%08x\n",
slice, subslice,
ee->instdone.row[slice][subslice]);
@@ -470,9 +474,9 @@ static void error_print_context(struct drm_i915_error_state_buf *m,
const char *header,
const struct drm_i915_error_context *ctx)
{
- err_printf(m, "%s%s[%d] hw_id %d, prio %d, guilty %d active %d\n",
- header, ctx->comm, ctx->pid, ctx->hw_id,
- ctx->sched_attr.priority, ctx->guilty, ctx->active);
+ err_printf(m, "%s%s[%d] prio %d, guilty %d active %d\n",
+ header, ctx->comm, ctx->pid, ctx->sched_attr.priority,
+ ctx->guilty, ctx->active);
}
static void error_print_engine(struct drm_i915_error_state_buf *m,
@@ -533,10 +537,6 @@ static void error_print_engine(struct drm_i915_error_state_buf *m,
}
err_printf(m, " ring->head: 0x%08x\n", ee->cpu_ring_head);
err_printf(m, " ring->tail: 0x%08x\n", ee->cpu_ring_tail);
- err_printf(m, " hangcheck timestamp: %dms (%lu%s)\n",
- jiffies_to_msecs(ee->hangcheck_timestamp - epoch),
- ee->hangcheck_timestamp,
- ee->hangcheck_timestamp == epoch ? "; epoch" : "");
err_printf(m, " engine reset count: %u\n", ee->reset_count);
for (n = 0; n < ee->num_ports; n++) {
@@ -574,6 +574,9 @@ static void print_error_obj(struct drm_i915_error_state_buf *m,
lower_32_bits(obj->gtt_offset));
}
+ if (obj->gtt_page_sizes > I915_GTT_PAGE_SIZE_4K)
+ err_printf(m, "gtt_page_sizes = 0x%08x\n", obj->gtt_page_sizes);
+
err_compression_marker(m);
for (page = 0; page < obj->page_count; page++) {
int i, len;
@@ -675,11 +678,8 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
ts = ktime_to_timespec64(error->uptime);
err_printf(m, "Uptime: %lld s %ld us\n",
(s64)ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC);
- err_printf(m, "Epoch: %lu jiffies (%u HZ)\n", error->epoch, HZ);
- err_printf(m, "Capture: %lu jiffies; %d ms ago, %d ms after epoch\n",
- error->capture,
- jiffies_to_msecs(jiffies - error->capture),
- jiffies_to_msecs(error->capture - error->epoch));
+ err_printf(m, "Capture: %lu jiffies; %d ms ago\n",
+ error->capture, jiffies_to_msecs(jiffies - error->capture));
for (ee = error->engine; ee; ee = ee->next)
err_printf(m, "Active process (on ring %s): %s [%d]\n",
@@ -734,8 +734,24 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
if (IS_GEN(m->i915, 7))
err_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
+ if (IS_GEN_RANGE(m->i915, 8, 11))
+ err_printf(m, "GTT_CACHE_EN: 0x%08x\n", error->gtt_cache);
+
+ if (IS_GEN(m->i915, 12))
+ err_printf(m, "AUX_ERR_DBG: 0x%08x\n", error->aux_err);
+
+ if (INTEL_GEN(m->i915) >= 12) {
+ int i;
+
+ for (i = 0; i < GEN12_SFC_DONE_MAX; i++)
+ err_printf(m, " SFC_DONE[%d]: 0x%08x\n", i,
+ error->sfc_done[i]);
+
+ err_printf(m, " GAM_DONE: 0x%08x\n", error->gam_done);
+ }
+
for (ee = error->engine; ee; ee = ee->next)
- error_print_engine(m, ee, error->epoch);
+ error_print_engine(m, ee, error->capture);
for (ee = error->engine; ee; ee = ee->next) {
const struct drm_i915_error_object *obj;
@@ -763,7 +779,7 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
for (j = 0; j < ee->num_requests; j++)
error_print_request(m, " ",
&ee->requests[j],
- error->epoch);
+ error->capture);
}
print_error_obj(m, ee->engine, "ringbuffer", ee->ringbuffer);
@@ -963,7 +979,6 @@ i915_error_object_create(struct drm_i915_private *i915,
struct drm_i915_error_object *dst;
unsigned long num_pages;
struct sgt_iter iter;
- dma_addr_t dma;
int ret;
might_sleep();
@@ -984,21 +999,59 @@ i915_error_object_create(struct drm_i915_private *i915,
dst->gtt_offset = vma->node.start;
dst->gtt_size = vma->node.size;
+ dst->gtt_page_sizes = vma->page_sizes.gtt;
dst->num_pages = num_pages;
dst->page_count = 0;
dst->unused = 0;
+ compress->wc = i915_gem_object_is_lmem(vma->obj) ||
+ drm_mm_node_allocated(&ggtt->error_capture);
+
ret = -EINVAL;
- for_each_sgt_dma(dma, iter, vma->pages) {
+ if (drm_mm_node_allocated(&ggtt->error_capture)) {
void __iomem *s;
+ dma_addr_t dma;
- ggtt->vm.insert_page(&ggtt->vm, dma, slot, I915_CACHE_NONE, 0);
+ for_each_sgt_daddr(dma, iter, vma->pages) {
+ ggtt->vm.insert_page(&ggtt->vm, dma, slot,
+ I915_CACHE_NONE, 0);
- s = io_mapping_map_wc(&ggtt->iomap, slot, PAGE_SIZE);
- ret = compress_page(compress, (void __force *)s, dst);
- io_mapping_unmap(s);
- if (ret)
- break;
+ s = io_mapping_map_wc(&ggtt->iomap, slot, PAGE_SIZE);
+ ret = compress_page(compress, (void __force *)s, dst);
+ io_mapping_unmap(s);
+ if (ret)
+ break;
+ }
+ } else if (i915_gem_object_is_lmem(vma->obj)) {
+ struct intel_memory_region *mem = vma->obj->mm.region;
+ dma_addr_t dma;
+
+ for_each_sgt_daddr(dma, iter, vma->pages) {
+ void __iomem *s;
+
+ s = io_mapping_map_wc(&mem->iomap, dma, PAGE_SIZE);
+ ret = compress_page(compress, (void __force *)s, dst);
+ io_mapping_unmap(s);
+ if (ret)
+ break;
+ }
+ } else {
+ struct page *page;
+
+ for_each_sgt_page(page, iter, vma->pages) {
+ void *s;
+
+ drm_clflush_pages(&page, 1);
+
+ s = kmap(page);
+ ret = compress_page(compress, s, dst);
+ kunmap(s);
+
+ drm_clflush_pages(&page, 1);
+
+ if (ret)
+ break;
+ }
}
if (ret || compress_flush(compress, dst)) {
@@ -1136,8 +1189,6 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
}
ee->idle = intel_engine_is_idle(engine);
- if (!ee->idle)
- ee->hangcheck_timestamp = engine->hangcheck.action_timestamp;
ee->reset_count = i915_reset_engine_count(&dev_priv->gpu_error,
engine);
@@ -1263,7 +1314,6 @@ static bool record_context(struct drm_i915_error_context *e,
rcu_read_unlock();
}
- e->hw_id = ctx->hw_id;
e->sched_attr = ctx->sched;
e->guilty = atomic_read(&ctx->guilty_count);
e->active = atomic_read(&ctx->active_count);
@@ -1291,7 +1341,7 @@ capture_vma(struct capture_vma *next,
if (!c)
return next;
- if (!i915_active_trygrab(&vma->active)) {
+ if (!i915_active_acquire_if_busy(&vma->active)) {
kfree(c);
return next;
}
@@ -1431,7 +1481,7 @@ gem_record_rings(struct i915_gpu_state *error, struct compress *compress)
*this->slot =
i915_error_object_create(i915, vma, compress);
- i915_active_ungrab(&vma->active);
+ i915_active_release(&vma->active);
i915_vma_put(vma);
capture = this->next;
@@ -1553,6 +1603,21 @@ static void capture_reg_state(struct i915_gpu_state *error)
error->gac_eco = intel_uncore_read(uncore, GAC_ECO_BITS);
}
+ if (IS_GEN_RANGE(i915, 8, 11))
+ error->gtt_cache = intel_uncore_read(uncore, HSW_GTT_CACHE_EN);
+
+ if (IS_GEN(i915, 12))
+ error->aux_err = intel_uncore_read(uncore, GEN12_AUX_ERR_DBG);
+
+ if (INTEL_GEN(i915) >= 12) {
+ for (i = 0; i < GEN12_SFC_DONE_MAX; i++) {
+ error->sfc_done[i] =
+ intel_uncore_read(uncore, GEN12_SFC_DONE(i));
+ }
+
+ error->gam_done = intel_uncore_read(uncore, GEN12_GAM_DONE);
+ }
+
/* 4: Everything else */
if (INTEL_GEN(i915) >= 11) {
error->ier = intel_uncore_read(uncore, GEN8_DE_MISC_IER);
@@ -1647,26 +1712,15 @@ static void capture_params(struct i915_gpu_state *error)
i915_params_copy(&error->params, &i915_modparams);
}
-static unsigned long capture_find_epoch(const struct i915_gpu_state *error)
-{
- const struct drm_i915_error_engine *ee;
- unsigned long epoch = error->capture;
-
- for (ee = error->engine; ee; ee = ee->next) {
- if (ee->hangcheck_timestamp &&
- time_before(ee->hangcheck_timestamp, epoch))
- epoch = ee->hangcheck_timestamp;
- }
-
- return epoch;
-}
-
static void capture_finish(struct i915_gpu_state *error)
{
struct i915_ggtt *ggtt = &error->i915->ggtt;
- const u64 slot = ggtt->error_capture.start;
- ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE);
+ if (drm_mm_node_allocated(&ggtt->error_capture)) {
+ const u64 slot = ggtt->error_capture.start;
+
+ ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE);
+ }
}
#define DAY_AS_SECONDS(x) (24 * 60 * 60 * (x))
@@ -1712,8 +1766,6 @@ i915_capture_gpu_state(struct drm_i915_private *i915)
error->overlay = intel_overlay_capture_error_state(i915);
error->display = intel_display_capture_error_state(i915);
- error->epoch = capture_find_epoch(error);
-
capture_finish(error);
compress_fini(&compress);
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
index df9f57766626..5d2c3372ff99 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.h
+++ b/drivers/gpu/drm/i915/i915_gpu_error.h
@@ -34,7 +34,6 @@ struct i915_gpu_state {
ktime_t boottime;
ktime_t uptime;
unsigned long capture;
- unsigned long epoch;
struct drm_i915_private *i915;
@@ -74,6 +73,10 @@ struct i915_gpu_state {
u32 gam_ecochk;
u32 gab_ctl;
u32 gfx_mode;
+ u32 gtt_cache;
+ u32 aux_err; /* gen12 */
+ u32 sfc_done[GEN12_SFC_DONE_MAX]; /* gen12 */
+ u32 gam_done; /* gen12 */
u32 nfence;
u64 fence[I915_MAX_NUM_FENCES];
@@ -85,7 +88,6 @@ struct i915_gpu_state {
/* Software tracked state */
bool idle;
- unsigned long hangcheck_timestamp;
int num_requests;
u32 reset_count;
@@ -118,7 +120,6 @@ struct i915_gpu_state {
struct drm_i915_error_context {
char comm[TASK_COMM_LEN];
pid_t pid;
- u32 hw_id;
int active;
int guilty;
struct i915_sched_attr sched_attr;
@@ -127,6 +128,7 @@ struct i915_gpu_state {
struct drm_i915_error_object {
u64 gtt_offset;
u64 gtt_size;
+ u32 gtt_page_sizes;
int num_pages;
int page_count;
int unused;
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 37e3dd3c1a9d..dae00f7dd7df 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -29,7 +29,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/circ_buf.h>
-#include <linux/cpuidle.h>
#include <linux/slab.h>
#include <linux/sysrq.h>
@@ -46,6 +45,7 @@
#include "gt/intel_gt.h"
#include "gt/intel_gt_irq.h"
#include "gt/intel_gt_pm_irq.h"
+#include "gt/intel_rps.h"
#include "i915_drv.h"
#include "i915_irq.h"
@@ -149,30 +149,24 @@ static const u32 hpd_gen12[HPD_NUM_PINS] = {
};
static const u32 hpd_icp[HPD_NUM_PINS] = {
- [HPD_PORT_A] = SDE_DDIA_HOTPLUG_ICP,
- [HPD_PORT_B] = SDE_DDIB_HOTPLUG_ICP,
- [HPD_PORT_C] = SDE_TC1_HOTPLUG_ICP,
- [HPD_PORT_D] = SDE_TC2_HOTPLUG_ICP,
- [HPD_PORT_E] = SDE_TC3_HOTPLUG_ICP,
- [HPD_PORT_F] = SDE_TC4_HOTPLUG_ICP
-};
-
-static const u32 hpd_mcc[HPD_NUM_PINS] = {
- [HPD_PORT_A] = SDE_DDIA_HOTPLUG_ICP,
- [HPD_PORT_B] = SDE_DDIB_HOTPLUG_ICP,
- [HPD_PORT_C] = SDE_TC1_HOTPLUG_ICP
+ [HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(PORT_A),
+ [HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(PORT_B),
+ [HPD_PORT_C] = SDE_TC_HOTPLUG_ICP(PORT_TC1),
+ [HPD_PORT_D] = SDE_TC_HOTPLUG_ICP(PORT_TC2),
+ [HPD_PORT_E] = SDE_TC_HOTPLUG_ICP(PORT_TC3),
+ [HPD_PORT_F] = SDE_TC_HOTPLUG_ICP(PORT_TC4),
};
static const u32 hpd_tgp[HPD_NUM_PINS] = {
- [HPD_PORT_A] = SDE_DDIA_HOTPLUG_ICP,
- [HPD_PORT_B] = SDE_DDIB_HOTPLUG_ICP,
- [HPD_PORT_C] = SDE_DDIC_HOTPLUG_TGP,
- [HPD_PORT_D] = SDE_TC1_HOTPLUG_ICP,
- [HPD_PORT_E] = SDE_TC2_HOTPLUG_ICP,
- [HPD_PORT_F] = SDE_TC3_HOTPLUG_ICP,
- [HPD_PORT_G] = SDE_TC4_HOTPLUG_ICP,
- [HPD_PORT_H] = SDE_TC5_HOTPLUG_TGP,
- [HPD_PORT_I] = SDE_TC6_HOTPLUG_TGP,
+ [HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(PORT_A),
+ [HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(PORT_B),
+ [HPD_PORT_C] = SDE_DDI_HOTPLUG_ICP(PORT_C),
+ [HPD_PORT_D] = SDE_TC_HOTPLUG_ICP(PORT_TC1),
+ [HPD_PORT_E] = SDE_TC_HOTPLUG_ICP(PORT_TC2),
+ [HPD_PORT_F] = SDE_TC_HOTPLUG_ICP(PORT_TC3),
+ [HPD_PORT_G] = SDE_TC_HOTPLUG_ICP(PORT_TC4),
+ [HPD_PORT_H] = SDE_TC_HOTPLUG_ICP(PORT_TC5),
+ [HPD_PORT_I] = SDE_TC_HOTPLUG_ICP(PORT_TC6),
};
void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr,
@@ -327,180 +321,6 @@ void ilk_update_display_irq(struct drm_i915_private *dev_priv,
}
}
-static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv)
-{
- WARN_ON_ONCE(INTEL_GEN(dev_priv) >= 11);
-
- return INTEL_GEN(dev_priv) >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
-}
-
-void gen11_reset_rps_interrupts(struct drm_i915_private *dev_priv)
-{
- struct intel_gt *gt = &dev_priv->gt;
-
- spin_lock_irq(&gt->irq_lock);
-
- while (gen11_gt_reset_one_iir(gt, 0, GEN11_GTPM))
- ;
-
- dev_priv->gt_pm.rps.pm_iir = 0;
-
- spin_unlock_irq(&gt->irq_lock);
-}
-
-void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv)
-{
- struct intel_gt *gt = &dev_priv->gt;
-
- spin_lock_irq(&gt->irq_lock);
- gen6_gt_pm_reset_iir(gt, GEN6_PM_RPS_EVENTS);
- dev_priv->gt_pm.rps.pm_iir = 0;
- spin_unlock_irq(&gt->irq_lock);
-}
-
-void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv)
-{
- struct intel_gt *gt = &dev_priv->gt;
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
-
- if (READ_ONCE(rps->interrupts_enabled))
- return;
-
- spin_lock_irq(&gt->irq_lock);
- WARN_ON_ONCE(rps->pm_iir);
-
- if (INTEL_GEN(dev_priv) >= 11)
- WARN_ON_ONCE(gen11_gt_reset_one_iir(gt, 0, GEN11_GTPM));
- else
- WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
-
- rps->interrupts_enabled = true;
- gen6_gt_pm_enable_irq(gt, dev_priv->pm_rps_events);
-
- spin_unlock_irq(&gt->irq_lock);
-}
-
-u32 gen6_sanitize_rps_pm_mask(const struct drm_i915_private *i915, u32 mask)
-{
- return mask & ~i915->gt_pm.rps.pm_intrmsk_mbz;
-}
-
-void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
-{
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
- struct intel_gt *gt = &dev_priv->gt;
-
- if (!READ_ONCE(rps->interrupts_enabled))
- return;
-
- spin_lock_irq(&gt->irq_lock);
- rps->interrupts_enabled = false;
-
- I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0u));
-
- gen6_gt_pm_disable_irq(gt, GEN6_PM_RPS_EVENTS);
-
- spin_unlock_irq(&gt->irq_lock);
- intel_synchronize_irq(dev_priv);
-
- /* Now that we will not be generating any more work, flush any
- * outstanding tasks. As we are called on the RPS idle path,
- * we will reset the GPU to minimum frequencies, so the current
- * state of the worker can be discarded.
- */
- cancel_work_sync(&rps->work);
- if (INTEL_GEN(dev_priv) >= 11)
- gen11_reset_rps_interrupts(dev_priv);
- else
- gen6_reset_rps_interrupts(dev_priv);
-}
-
-void gen9_reset_guc_interrupts(struct intel_guc *guc)
-{
- struct intel_gt *gt = guc_to_gt(guc);
-
- assert_rpm_wakelock_held(&gt->i915->runtime_pm);
-
- spin_lock_irq(&gt->irq_lock);
- gen6_gt_pm_reset_iir(gt, gt->pm_guc_events);
- spin_unlock_irq(&gt->irq_lock);
-}
-
-void gen9_enable_guc_interrupts(struct intel_guc *guc)
-{
- struct intel_gt *gt = guc_to_gt(guc);
-
- assert_rpm_wakelock_held(&gt->i915->runtime_pm);
-
- spin_lock_irq(&gt->irq_lock);
- if (!guc->interrupts.enabled) {
- WARN_ON_ONCE(intel_uncore_read(gt->uncore,
- gen6_pm_iir(gt->i915)) &
- gt->pm_guc_events);
- guc->interrupts.enabled = true;
- gen6_gt_pm_enable_irq(gt, gt->pm_guc_events);
- }
- spin_unlock_irq(&gt->irq_lock);
-}
-
-void gen9_disable_guc_interrupts(struct intel_guc *guc)
-{
- struct intel_gt *gt = guc_to_gt(guc);
-
- assert_rpm_wakelock_held(&gt->i915->runtime_pm);
-
- spin_lock_irq(&gt->irq_lock);
- guc->interrupts.enabled = false;
-
- gen6_gt_pm_disable_irq(gt, gt->pm_guc_events);
-
- spin_unlock_irq(&gt->irq_lock);
- intel_synchronize_irq(gt->i915);
-
- gen9_reset_guc_interrupts(guc);
-}
-
-void gen11_reset_guc_interrupts(struct intel_guc *guc)
-{
- struct intel_gt *gt = guc_to_gt(guc);
-
- spin_lock_irq(&gt->irq_lock);
- gen11_gt_reset_one_iir(gt, 0, GEN11_GUC);
- spin_unlock_irq(&gt->irq_lock);
-}
-
-void gen11_enable_guc_interrupts(struct intel_guc *guc)
-{
- struct intel_gt *gt = guc_to_gt(guc);
-
- spin_lock_irq(&gt->irq_lock);
- if (!guc->interrupts.enabled) {
- u32 events = REG_FIELD_PREP(ENGINE1_MASK, GUC_INTR_GUC2HOST);
-
- WARN_ON_ONCE(gen11_gt_reset_one_iir(gt, 0, GEN11_GUC));
- intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_ENABLE, events);
- intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_MASK, ~events);
- guc->interrupts.enabled = true;
- }
- spin_unlock_irq(&gt->irq_lock);
-}
-
-void gen11_disable_guc_interrupts(struct intel_guc *guc)
-{
- struct intel_gt *gt = guc_to_gt(guc);
-
- spin_lock_irq(&gt->irq_lock);
- guc->interrupts.enabled = false;
-
- intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_MASK, ~0);
- intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_ENABLE, 0);
-
- spin_unlock_irq(&gt->irq_lock);
- intel_synchronize_irq(gt->i915);
-
- gen11_reset_guc_interrupts(guc);
-}
-
/**
* bdw_update_port_irq - update DE port interrupt
* @dev_priv: driver private
@@ -942,14 +762,14 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
return (position + crtc->scanline_offset) % vtotal;
}
-bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
+bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int index,
bool in_vblank_irq, int *vpos, int *hpos,
ktime_t *stime, ktime_t *etime,
const struct drm_display_mode *mode)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv,
- pipe);
+ struct intel_crtc *crtc = to_intel_crtc(drm_crtc_from_index(dev, index));
+ enum pipe pipe = crtc->pipe;
int position;
int vbl_start, vbl_end, hsync_start, htotal, vtotal;
unsigned long irqflags;
@@ -992,7 +812,7 @@ bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
/* No obvious pixelcount register. Only query vertical
* scanout position from Display scan line register.
*/
- position = __intel_get_crtc_scanline(intel_crtc);
+ position = __intel_get_crtc_scanline(crtc);
} else {
/* Have access to pixelcount since start of frame.
* We can split this into vertical and horizontal
@@ -1072,199 +892,6 @@ int intel_get_crtc_scanline(struct intel_crtc *crtc)
return position;
}
-static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv)
-{
- struct intel_uncore *uncore = &dev_priv->uncore;
- u32 busy_up, busy_down, max_avg, min_avg;
- u8 new_delay;
-
- spin_lock(&mchdev_lock);
-
- intel_uncore_write16(uncore,
- MEMINTRSTS,
- intel_uncore_read(uncore, MEMINTRSTS));
-
- new_delay = dev_priv->ips.cur_delay;
-
- intel_uncore_write16(uncore, MEMINTRSTS, MEMINT_EVAL_CHG);
- busy_up = intel_uncore_read(uncore, RCPREVBSYTUPAVG);
- busy_down = intel_uncore_read(uncore, RCPREVBSYTDNAVG);
- max_avg = intel_uncore_read(uncore, RCBMAXAVG);
- min_avg = intel_uncore_read(uncore, RCBMINAVG);
-
- /* Handle RCS change request from hw */
- if (busy_up > max_avg) {
- if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
- new_delay = dev_priv->ips.cur_delay - 1;
- if (new_delay < dev_priv->ips.max_delay)
- new_delay = dev_priv->ips.max_delay;
- } else if (busy_down < min_avg) {
- if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
- new_delay = dev_priv->ips.cur_delay + 1;
- if (new_delay > dev_priv->ips.min_delay)
- new_delay = dev_priv->ips.min_delay;
- }
-
- if (ironlake_set_drps(dev_priv, new_delay))
- dev_priv->ips.cur_delay = new_delay;
-
- spin_unlock(&mchdev_lock);
-
- return;
-}
-
-static void vlv_c0_read(struct drm_i915_private *dev_priv,
- struct intel_rps_ei *ei)
-{
- ei->ktime = ktime_get_raw();
- ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT);
- ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT);
-}
-
-void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
-{
- memset(&dev_priv->gt_pm.rps.ei, 0, sizeof(dev_priv->gt_pm.rps.ei));
-}
-
-static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
-{
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
- const struct intel_rps_ei *prev = &rps->ei;
- struct intel_rps_ei now;
- u32 events = 0;
-
- if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0)
- return 0;
-
- vlv_c0_read(dev_priv, &now);
-
- if (prev->ktime) {
- u64 time, c0;
- u32 render, media;
-
- time = ktime_us_delta(now.ktime, prev->ktime);
-
- time *= dev_priv->czclk_freq;
-
- /* Workload can be split between render + media,
- * e.g. SwapBuffers being blitted in X after being rendered in
- * mesa. To account for this we need to combine both engines
- * into our activity counter.
- */
- render = now.render_c0 - prev->render_c0;
- media = now.media_c0 - prev->media_c0;
- c0 = max(render, media);
- c0 *= 1000 * 100 << 8; /* to usecs and scale to threshold% */
-
- if (c0 > time * rps->power.up_threshold)
- events = GEN6_PM_RP_UP_THRESHOLD;
- else if (c0 < time * rps->power.down_threshold)
- events = GEN6_PM_RP_DOWN_THRESHOLD;
- }
-
- rps->ei = now;
- return events;
-}
-
-static void gen6_pm_rps_work(struct work_struct *work)
-{
- struct drm_i915_private *dev_priv =
- container_of(work, struct drm_i915_private, gt_pm.rps.work);
- struct intel_gt *gt = &dev_priv->gt;
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
- bool client_boost = false;
- int new_delay, adj, min, max;
- u32 pm_iir = 0;
-
- spin_lock_irq(&gt->irq_lock);
- if (rps->interrupts_enabled) {
- pm_iir = fetch_and_zero(&rps->pm_iir);
- client_boost = atomic_read(&rps->num_waiters);
- }
- spin_unlock_irq(&gt->irq_lock);
-
- /* Make sure we didn't queue anything we're not going to process. */
- WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
- if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost)
- goto out;
-
- mutex_lock(&rps->lock);
-
- pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir);
-
- adj = rps->last_adj;
- new_delay = rps->cur_freq;
- min = rps->min_freq_softlimit;
- max = rps->max_freq_softlimit;
- if (client_boost)
- max = rps->max_freq;
- if (client_boost && new_delay < rps->boost_freq) {
- new_delay = rps->boost_freq;
- adj = 0;
- } else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
- if (adj > 0)
- adj *= 2;
- else /* CHV needs even encode values */
- adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1;
-
- if (new_delay >= rps->max_freq_softlimit)
- adj = 0;
- } else if (client_boost) {
- adj = 0;
- } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
- if (rps->cur_freq > rps->efficient_freq)
- new_delay = rps->efficient_freq;
- else if (rps->cur_freq > rps->min_freq_softlimit)
- new_delay = rps->min_freq_softlimit;
- adj = 0;
- } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
- if (adj < 0)
- adj *= 2;
- else /* CHV needs even encode values */
- adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1;
-
- if (new_delay <= rps->min_freq_softlimit)
- adj = 0;
- } else { /* unknown event */
- adj = 0;
- }
-
- rps->last_adj = adj;
-
- /*
- * Limit deboosting and boosting to keep ourselves at the extremes
- * when in the respective power modes (i.e. slowly decrease frequencies
- * while in the HIGH_POWER zone and slowly increase frequencies while
- * in the LOW_POWER zone). On idle, we will hit the timeout and drop
- * to the next level quickly, and conversely if busy we expect to
- * hit a waitboost and rapidly switch into max power.
- */
- if ((adj < 0 && rps->power.mode == HIGH_POWER) ||
- (adj > 0 && rps->power.mode == LOW_POWER))
- rps->last_adj = 0;
-
- /* sysfs frequency interfaces may have snuck in while servicing the
- * interrupt
- */
- new_delay += adj;
- new_delay = clamp_t(int, new_delay, min, max);
-
- if (intel_set_rps(dev_priv, new_delay)) {
- DRM_DEBUG_DRIVER("Failed to set new GPU frequency\n");
- rps->last_adj = 0;
- }
-
- mutex_unlock(&rps->lock);
-
-out:
- /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
- spin_lock_irq(&gt->irq_lock);
- if (rps->interrupts_enabled)
- gen6_gt_pm_unmask_irq(gt, dev_priv->pm_rps_events);
- spin_unlock_irq(&gt->irq_lock);
-}
-
-
/**
* ivybridge_parity_work - Workqueue called when a parity error interrupt
* occurred.
@@ -1401,11 +1028,11 @@ static bool icp_ddi_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
{
switch (pin) {
case HPD_PORT_A:
- return val & ICP_DDIA_HPD_LONG_DETECT;
+ return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(PORT_A);
case HPD_PORT_B:
- return val & ICP_DDIB_HPD_LONG_DETECT;
+ return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(PORT_B);
case HPD_PORT_C:
- return val & TGP_DDIC_HPD_LONG_DETECT;
+ return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(PORT_C);
default:
return false;
}
@@ -1427,20 +1054,6 @@ static bool icp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
}
}
-static bool tgp_ddi_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
-{
- switch (pin) {
- case HPD_PORT_A:
- return val & ICP_DDIA_HPD_LONG_DETECT;
- case HPD_PORT_B:
- return val & ICP_DDIB_HPD_LONG_DETECT;
- case HPD_PORT_C:
- return val & TGP_DDIC_HPD_LONG_DETECT;
- default:
- return false;
- }
-}
-
static bool tgp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
{
switch (pin) {
@@ -1652,54 +1265,6 @@ static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
res1, res2);
}
-/* The RPS events need forcewake, so we add them to a work queue and mask their
- * IMR bits until the work is done. Other interrupts can be processed without
- * the work queue. */
-void gen11_rps_irq_handler(struct intel_gt *gt, u32 pm_iir)
-{
- struct drm_i915_private *i915 = gt->i915;
- struct intel_rps *rps = &i915->gt_pm.rps;
- const u32 events = i915->pm_rps_events & pm_iir;
-
- lockdep_assert_held(&gt->irq_lock);
-
- if (unlikely(!events))
- return;
-
- gen6_gt_pm_mask_irq(gt, events);
-
- if (!rps->interrupts_enabled)
- return;
-
- rps->pm_iir |= events;
- schedule_work(&rps->work);
-}
-
-void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
-{
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
- struct intel_gt *gt = &dev_priv->gt;
-
- if (pm_iir & dev_priv->pm_rps_events) {
- spin_lock(&gt->irq_lock);
- gen6_gt_pm_mask_irq(gt, pm_iir & dev_priv->pm_rps_events);
- if (rps->interrupts_enabled) {
- rps->pm_iir |= pm_iir & dev_priv->pm_rps_events;
- schedule_work(&rps->work);
- }
- spin_unlock(&gt->irq_lock);
- }
-
- if (INTEL_GEN(dev_priv) >= 8)
- return;
-
- if (pm_iir & PM_VEBOX_USER_INTERRUPT)
- intel_engine_breadcrumbs_irq(dev_priv->engine[VECS0]);
-
- if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
- DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
-}
-
static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv)
{
enum pipe pipe;
@@ -1716,7 +1281,7 @@ static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv)
static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
u32 iir, u32 pipe_stats[I915_MAX_PIPES])
{
- int pipe;
+ enum pipe pipe;
spin_lock(&dev_priv->irq_lock);
@@ -1741,6 +1306,7 @@ static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
status_mask = PIPE_FIFO_UNDERRUN_STATUS;
switch (pipe) {
+ default:
case PIPE_A:
iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
break;
@@ -2009,7 +1575,7 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
if (gt_iir)
gen6_gt_irq_handler(&dev_priv->gt, gt_iir);
if (pm_iir)
- gen6_rps_irq_handler(dev_priv, pm_iir);
+ gen6_rps_irq_handler(&dev_priv->gt.rps, pm_iir);
if (hotplug_status)
i9xx_hpd_irq_handler(dev_priv, hotplug_status);
@@ -2136,7 +1702,7 @@ static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv,
static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
{
- int pipe;
+ enum pipe pipe;
u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ibx);
@@ -2222,7 +1788,7 @@ static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
{
- int pipe;
+ enum pipe pipe;
u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_cpt);
@@ -2256,19 +1822,35 @@ static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
cpt_serr_int_handler(dev_priv);
}
-static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir,
- const u32 *pins)
+static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
{
- u32 ddi_hotplug_trigger;
- u32 tc_hotplug_trigger;
+ u32 ddi_hotplug_trigger, tc_hotplug_trigger;
u32 pin_mask = 0, long_mask = 0;
+ bool (*tc_port_hotplug_long_detect)(enum hpd_pin pin, u32 val);
+ const u32 *pins;
- if (HAS_PCH_MCC(dev_priv)) {
+ if (HAS_PCH_TGP(dev_priv)) {
+ ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_TGP;
+ tc_hotplug_trigger = pch_iir & SDE_TC_MASK_TGP;
+ tc_port_hotplug_long_detect = tgp_tc_port_hotplug_long_detect;
+ pins = hpd_tgp;
+ } else if (HAS_PCH_JSP(dev_priv)) {
ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_TGP;
tc_hotplug_trigger = 0;
+ pins = hpd_tgp;
+ } else if (HAS_PCH_MCC(dev_priv)) {
+ ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_ICP;
+ tc_hotplug_trigger = pch_iir & SDE_TC_HOTPLUG_ICP(PORT_TC1);
+ tc_port_hotplug_long_detect = icp_tc_port_hotplug_long_detect;
+ pins = hpd_icp;
} else {
+ WARN(!HAS_PCH_ICP(dev_priv),
+ "Unrecognized PCH type 0x%x\n", INTEL_PCH_TYPE(dev_priv));
+
ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_ICP;
tc_hotplug_trigger = pch_iir & SDE_TC_MASK_ICP;
+ tc_port_hotplug_long_detect = icp_tc_port_hotplug_long_detect;
+ pins = hpd_icp;
}
if (ddi_hotplug_trigger) {
@@ -2292,44 +1874,7 @@ static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir,
intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
tc_hotplug_trigger,
dig_hotplug_reg, pins,
- icp_tc_port_hotplug_long_detect);
- }
-
- if (pin_mask)
- intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
-
- if (pch_iir & SDE_GMBUS_ICP)
- gmbus_irq_handler(dev_priv);
-}
-
-static void tgp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
-{
- u32 ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_TGP;
- u32 tc_hotplug_trigger = pch_iir & SDE_TC_MASK_TGP;
- u32 pin_mask = 0, long_mask = 0;
-
- if (ddi_hotplug_trigger) {
- u32 dig_hotplug_reg;
-
- dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_DDI);
- I915_WRITE(SHOTPLUG_CTL_DDI, dig_hotplug_reg);
-
- intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
- ddi_hotplug_trigger,
- dig_hotplug_reg, hpd_tgp,
- tgp_ddi_port_hotplug_long_detect);
- }
-
- if (tc_hotplug_trigger) {
- u32 dig_hotplug_reg;
-
- dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_TC);
- I915_WRITE(SHOTPLUG_CTL_TC, dig_hotplug_reg);
-
- intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
- tc_hotplug_trigger,
- dig_hotplug_reg, hpd_tgp,
- tgp_tc_port_hotplug_long_detect);
+ tc_port_hotplug_long_detect);
}
if (pin_mask)
@@ -2434,7 +1979,7 @@ static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
}
if (IS_GEN(dev_priv, 5) && de_iir & DE_PCU_EVENT)
- ironlake_rps_change_irq_handler(dev_priv);
+ gen5_rps_irq_handler(&dev_priv->gt.rps);
}
static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
@@ -2539,7 +2084,7 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
if (pm_iir) {
I915_WRITE(GEN6_PMIIR, pm_iir);
ret = IRQ_HANDLED;
- gen6_rps_irq_handler(dev_priv, pm_iir);
+ gen6_rps_irq_handler(&dev_priv->gt.rps, pm_iir);
}
}
@@ -2616,10 +2161,16 @@ static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv)
u32 mask;
if (INTEL_GEN(dev_priv) >= 12)
- /* TODO: Add AUX entries for USBC */
return TGL_DE_PORT_AUX_DDIA |
TGL_DE_PORT_AUX_DDIB |
- TGL_DE_PORT_AUX_DDIC;
+ TGL_DE_PORT_AUX_DDIC |
+ TGL_DE_PORT_AUX_USBC1 |
+ TGL_DE_PORT_AUX_USBC2 |
+ TGL_DE_PORT_AUX_USBC3 |
+ TGL_DE_PORT_AUX_USBC4 |
+ TGL_DE_PORT_AUX_USBC5 |
+ TGL_DE_PORT_AUX_USBC6;
+
mask = GEN8_AUX_CHANNEL_A;
if (INTEL_GEN(dev_priv) >= 9)
@@ -2638,7 +2189,9 @@ static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv)
static u32 gen8_de_pipe_fault_mask(struct drm_i915_private *dev_priv)
{
- if (INTEL_GEN(dev_priv) >= 9)
+ if (INTEL_GEN(dev_priv) >= 11)
+ return GEN11_DE_PIPE_IRQ_FAULT_ERRORS;
+ else if (INTEL_GEN(dev_priv) >= 9)
return GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
else
return GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
@@ -2655,11 +2208,21 @@ gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
}
if (iir & GEN8_DE_EDP_PSR) {
- u32 psr_iir = I915_READ(EDP_PSR_IIR);
+ u32 psr_iir;
+ i915_reg_t iir_reg;
+
+ if (INTEL_GEN(dev_priv) >= 12)
+ iir_reg = TRANS_PSR_IIR(dev_priv->psr.transcoder);
+ else
+ iir_reg = EDP_PSR_IIR;
+
+ psr_iir = I915_READ(iir_reg);
+ I915_WRITE(iir_reg, psr_iir);
+
+ if (psr_iir)
+ found = true;
intel_psr_irq_handler(dev_priv, psr_iir);
- I915_WRITE(EDP_PSR_IIR, psr_iir);
- found = true;
}
if (!found)
@@ -2780,12 +2343,8 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
I915_WRITE(SDEIIR, iir);
ret = IRQ_HANDLED;
- if (INTEL_PCH_TYPE(dev_priv) >= PCH_TGP)
- tgp_irq_handler(dev_priv, iir);
- else if (INTEL_PCH_TYPE(dev_priv) >= PCH_MCC)
- icp_irq_handler(dev_priv, iir, hpd_mcc);
- else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
- icp_irq_handler(dev_priv, iir, hpd_icp);
+ if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
+ icp_irq_handler(dev_priv, iir);
else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
spt_irq_handler(dev_priv, iir);
else
@@ -2894,9 +2453,11 @@ static inline void gen11_master_intr_enable(void __iomem * const regs)
raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ);
}
-static irqreturn_t gen11_irq_handler(int irq, void *arg)
+static __always_inline irqreturn_t
+__gen11_irq_handler(struct drm_i915_private * const i915,
+ u32 (*intr_disable)(void __iomem * const regs),
+ void (*intr_enable)(void __iomem * const regs))
{
- struct drm_i915_private * const i915 = arg;
void __iomem * const regs = i915->uncore.regs;
struct intel_gt *gt = &i915->gt;
u32 master_ctl;
@@ -2905,9 +2466,9 @@ static irqreturn_t gen11_irq_handler(int irq, void *arg)
if (!intel_irqs_enabled(i915))
return IRQ_NONE;
- master_ctl = gen11_master_intr_disable(regs);
+ master_ctl = intr_disable(regs);
if (!master_ctl) {
- gen11_master_intr_enable(regs);
+ intr_enable(regs);
return IRQ_NONE;
}
@@ -2929,13 +2490,20 @@ static irqreturn_t gen11_irq_handler(int irq, void *arg)
gu_misc_iir = gen11_gu_misc_irq_ack(gt, master_ctl);
- gen11_master_intr_enable(regs);
+ intr_enable(regs);
gen11_gu_misc_irq_handler(gt, gu_misc_iir);
return IRQ_HANDLED;
}
+static irqreturn_t gen11_irq_handler(int irq, void *arg)
+{
+ return __gen11_irq_handler(arg,
+ gen11_master_intr_disable,
+ gen11_master_intr_enable);
+}
+
/* Called from drm generic code, passed 'crtc' which
* we use as a pipe index
*/
@@ -2952,12 +2520,18 @@ int i8xx_enable_vblank(struct drm_crtc *crtc)
return 0;
}
-int i945gm_enable_vblank(struct drm_crtc *crtc)
+int i915gm_enable_vblank(struct drm_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
- if (dev_priv->i945gm_vblank.enabled++ == 0)
- schedule_work(&dev_priv->i945gm_vblank.work);
+ /*
+ * Vblank interrupts fail to wake the device up from C2+.
+ * Disabling render clock gating during C-states avoids
+ * the problem. There is a small power cost so we do this
+ * only when vblank interrupts are actually enabled.
+ */
+ if (dev_priv->vblank_enabled++ == 0)
+ I915_WRITE(SCPD0, _MASKED_BIT_ENABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
return i8xx_enable_vblank(crtc);
}
@@ -3030,14 +2604,14 @@ void i8xx_disable_vblank(struct drm_crtc *crtc)
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
-void i945gm_disable_vblank(struct drm_crtc *crtc)
+void i915gm_disable_vblank(struct drm_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
i8xx_disable_vblank(crtc);
- if (--dev_priv->i945gm_vblank.enabled == 0)
- schedule_work(&dev_priv->i945gm_vblank.work);
+ if (--dev_priv->vblank_enabled == 0)
+ I915_WRITE(SCPD0, _MASKED_BIT_DISABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
}
void i965_disable_vblank(struct drm_crtc *crtc)
@@ -3076,60 +2650,6 @@ void bdw_disable_vblank(struct drm_crtc *crtc)
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
-static void i945gm_vblank_work_func(struct work_struct *work)
-{
- struct drm_i915_private *dev_priv =
- container_of(work, struct drm_i915_private, i945gm_vblank.work);
-
- /*
- * Vblank interrupts fail to wake up the device from C3,
- * hence we want to prevent C3 usage while vblank interrupts
- * are enabled.
- */
- pm_qos_update_request(&dev_priv->i945gm_vblank.pm_qos,
- READ_ONCE(dev_priv->i945gm_vblank.enabled) ?
- dev_priv->i945gm_vblank.c3_disable_latency :
- PM_QOS_DEFAULT_VALUE);
-}
-
-static int cstate_disable_latency(const char *name)
-{
- const struct cpuidle_driver *drv;
- int i;
-
- drv = cpuidle_get_driver();
- if (!drv)
- return 0;
-
- for (i = 0; i < drv->state_count; i++) {
- const struct cpuidle_state *state = &drv->states[i];
-
- if (!strcmp(state->name, name))
- return state->exit_latency ?
- state->exit_latency - 1 : 0;
- }
-
- return 0;
-}
-
-static void i945gm_vblank_work_init(struct drm_i915_private *dev_priv)
-{
- INIT_WORK(&dev_priv->i945gm_vblank.work,
- i945gm_vblank_work_func);
-
- dev_priv->i945gm_vblank.c3_disable_latency =
- cstate_disable_latency("C3");
- pm_qos_add_request(&dev_priv->i945gm_vblank.pm_qos,
- PM_QOS_CPU_DMA_LATENCY,
- PM_QOS_DEFAULT_VALUE);
-}
-
-static void i945gm_vblank_work_fini(struct drm_i915_private *dev_priv)
-{
- cancel_work_sync(&dev_priv->i945gm_vblank.work);
- pm_qos_remove_request(&dev_priv->i945gm_vblank.pm_qos);
-}
-
static void ibx_irq_reset(struct drm_i915_private *dev_priv)
{
struct intel_uncore *uncore = &dev_priv->uncore;
@@ -3246,7 +2766,7 @@ static void valleyview_irq_reset(struct drm_i915_private *dev_priv)
static void gen8_irq_reset(struct drm_i915_private *dev_priv)
{
struct intel_uncore *uncore = &dev_priv->uncore;
- int pipe;
+ enum pipe pipe;
gen8_master_intr_disable(dev_priv->uncore.regs);
@@ -3271,7 +2791,7 @@ static void gen8_irq_reset(struct drm_i915_private *dev_priv)
static void gen11_irq_reset(struct drm_i915_private *dev_priv)
{
struct intel_uncore *uncore = &dev_priv->uncore;
- int pipe;
+ enum pipe pipe;
gen11_master_intr_disable(dev_priv->uncore.regs);
@@ -3279,8 +2799,23 @@ static void gen11_irq_reset(struct drm_i915_private *dev_priv)
intel_uncore_write(uncore, GEN11_DISPLAY_INT_CTL, 0);
- intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
- intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
+ if (INTEL_GEN(dev_priv) >= 12) {
+ enum transcoder trans;
+
+ for (trans = TRANSCODER_A; trans <= TRANSCODER_D; trans++) {
+ enum intel_display_power_domain domain;
+
+ domain = POWER_DOMAIN_TRANSCODER(trans);
+ if (!intel_display_power_is_enabled(dev_priv, domain))
+ continue;
+
+ intel_uncore_write(uncore, TRANS_PSR_IMR(trans), 0xffffffff);
+ intel_uncore_write(uncore, TRANS_PSR_IIR(trans), 0xffffffff);
+ }
+ } else {
+ intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
+ intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
+ }
for_each_pipe(dev_priv, pipe)
if (intel_display_power_is_enabled(dev_priv,
@@ -3431,42 +2966,44 @@ static void icp_hpd_detection_setup(struct drm_i915_private *dev_priv,
}
}
-static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv)
+static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv,
+ u32 sde_ddi_mask, u32 sde_tc_mask,
+ u32 ddi_enable_mask, u32 tc_enable_mask,
+ const u32 *pins)
{
u32 hotplug_irqs, enabled_irqs;
- hotplug_irqs = SDE_DDI_MASK_ICP | SDE_TC_MASK_ICP;
- enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_icp);
+ hotplug_irqs = sde_ddi_mask | sde_tc_mask;
+ enabled_irqs = intel_hpd_enabled_irqs(dev_priv, pins);
ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
- icp_hpd_detection_setup(dev_priv, ICP_DDI_HPD_ENABLE_MASK,
- ICP_TC_HPD_ENABLE_MASK);
+ icp_hpd_detection_setup(dev_priv, ddi_enable_mask, tc_enable_mask);
}
+/*
+ * EHL doesn't need most of gen11_hpd_irq_setup, it's handling only the
+ * equivalent of SDE.
+ */
static void mcc_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
- u32 hotplug_irqs, enabled_irqs;
-
- hotplug_irqs = SDE_DDI_MASK_TGP;
- enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_mcc);
-
- ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
-
- icp_hpd_detection_setup(dev_priv, TGP_DDI_HPD_ENABLE_MASK, 0);
+ icp_hpd_irq_setup(dev_priv,
+ SDE_DDI_MASK_ICP, SDE_TC_HOTPLUG_ICP(PORT_TC1),
+ ICP_DDI_HPD_ENABLE_MASK, ICP_TC_HPD_ENABLE(PORT_TC1),
+ hpd_icp);
}
-static void tgp_hpd_irq_setup(struct drm_i915_private *dev_priv)
+/*
+ * JSP behaves exactly the same as MCC above except that port C is mapped to
+ * the DDI-C pins instead of the TC1 pins. This means we should follow TGP's
+ * masks & tables rather than ICP's masks & tables.
+ */
+static void jsp_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
- u32 hotplug_irqs, enabled_irqs;
-
- hotplug_irqs = SDE_DDI_MASK_TGP | SDE_TC_MASK_TGP;
- enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_tgp);
-
- ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
-
- icp_hpd_detection_setup(dev_priv, TGP_DDI_HPD_ENABLE_MASK,
- TGP_TC_HPD_ENABLE_MASK);
+ icp_hpd_irq_setup(dev_priv,
+ SDE_DDI_MASK_TGP, 0,
+ TGP_DDI_HPD_ENABLE_MASK, 0,
+ hpd_tgp);
}
static void gen11_hpd_detection_setup(struct drm_i915_private *dev_priv)
@@ -3506,9 +3043,13 @@ static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv)
gen11_hpd_detection_setup(dev_priv);
if (INTEL_PCH_TYPE(dev_priv) >= PCH_TGP)
- tgp_hpd_irq_setup(dev_priv);
+ icp_hpd_irq_setup(dev_priv, SDE_DDI_MASK_TGP, SDE_TC_MASK_TGP,
+ TGP_DDI_HPD_ENABLE_MASK,
+ TGP_TC_HPD_ENABLE_MASK, hpd_tgp);
else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
- icp_hpd_irq_setup(dev_priv);
+ icp_hpd_irq_setup(dev_priv, SDE_DDI_MASK_ICP, SDE_TC_MASK_ICP,
+ ICP_DDI_HPD_ENABLE_MASK,
+ ICP_TC_HPD_ENABLE_MASK, hpd_icp);
}
static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv)
@@ -3684,7 +3225,6 @@ static void ironlake_irq_postinstall(struct drm_i915_private *dev_priv)
if (IS_HASWELL(dev_priv)) {
gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
- intel_psr_irq_control(dev_priv, dev_priv->psr.debug);
display_mask |= DE_EDP_PSR_INT_HSW;
}
@@ -3794,8 +3334,21 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
else if (IS_BROADWELL(dev_priv))
de_port_enables |= GEN8_PORT_DP_A_HOTPLUG;
- gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
- intel_psr_irq_control(dev_priv, dev_priv->psr.debug);
+ if (INTEL_GEN(dev_priv) >= 12) {
+ enum transcoder trans;
+
+ for (trans = TRANSCODER_A; trans <= TRANSCODER_D; trans++) {
+ enum intel_display_power_domain domain;
+
+ domain = POWER_DOMAIN_TRANSCODER(trans);
+ if (!intel_display_power_is_enabled(dev_priv, domain))
+ continue;
+
+ gen3_assert_iir_is_zero(uncore, TRANS_PSR_IIR(trans));
+ }
+ } else {
+ gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
+ }
for_each_pipe(dev_priv, pipe) {
dev_priv->de_irq_mask[pipe] = ~de_pipe_masked;
@@ -3853,8 +3406,11 @@ static void icp_irq_postinstall(struct drm_i915_private *dev_priv)
if (HAS_PCH_TGP(dev_priv))
icp_hpd_detection_setup(dev_priv, TGP_DDI_HPD_ENABLE_MASK,
TGP_TC_HPD_ENABLE_MASK);
- else if (HAS_PCH_MCC(dev_priv))
+ else if (HAS_PCH_JSP(dev_priv))
icp_hpd_detection_setup(dev_priv, TGP_DDI_HPD_ENABLE_MASK, 0);
+ else if (HAS_PCH_MCC(dev_priv))
+ icp_hpd_detection_setup(dev_priv, ICP_DDI_HPD_ENABLE_MASK,
+ ICP_TC_HPD_ENABLE(PORT_TC1));
else
icp_hpd_detection_setup(dev_priv, ICP_DDI_HPD_ENABLE_MASK,
ICP_TC_HPD_ENABLE_MASK);
@@ -4317,16 +3873,10 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
void intel_irq_init(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = &dev_priv->drm;
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
int i;
- if (IS_I945GM(dev_priv))
- i945gm_vblank_work_init(dev_priv);
-
intel_hpd_init_work(dev_priv);
- INIT_WORK(&rps->work, gen6_pm_rps_work);
-
INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
for (i = 0; i < MAX_L3_SLICES; ++i)
dev_priv->l3_parity.remap_info[i] = NULL;
@@ -4335,33 +3885,6 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
if (HAS_GT_UC(dev_priv) && INTEL_GEN(dev_priv) < 11)
dev_priv->gt.pm_guc_events = GUC_INTR_GUC2HOST << 16;
- /* Let's track the enabled rps events */
- if (IS_VALLEYVIEW(dev_priv))
- /* WaGsvRC0ResidencyMethod:vlv */
- dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
- else
- dev_priv->pm_rps_events = (GEN6_PM_RP_UP_THRESHOLD |
- GEN6_PM_RP_DOWN_THRESHOLD |
- GEN6_PM_RP_DOWN_TIMEOUT);
-
- /* We share the register with other engine */
- if (INTEL_GEN(dev_priv) > 9)
- GEM_WARN_ON(dev_priv->pm_rps_events & 0xffff0000);
-
- rps->pm_intrmsk_mbz = 0;
-
- /*
- * SNB,IVB,HSW can while VLV,CHV may hard hang on looping batchbuffer
- * if GEN6_PM_UP_EI_EXPIRED is masked.
- *
- * TODO: verify if this can be reproduced on VLV,CHV.
- */
- if (INTEL_GEN(dev_priv) <= 7)
- rps->pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED;
-
- if (INTEL_GEN(dev_priv) >= 8)
- rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
-
dev->vblank_disable_immediate = true;
/* Most platforms treat the display irq block as an always-on
@@ -4387,8 +3910,9 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
if (I915_HAS_HOTPLUG(dev_priv))
dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
} else {
- if (HAS_PCH_MCC(dev_priv))
- /* EHL doesn't need most of gen11_hpd_irq_setup */
+ if (HAS_PCH_JSP(dev_priv))
+ dev_priv->display.hpd_irq_setup = jsp_hpd_irq_setup;
+ else if (HAS_PCH_MCC(dev_priv))
dev_priv->display.hpd_irq_setup = mcc_hpd_irq_setup;
else if (INTEL_GEN(dev_priv) >= 11)
dev_priv->display.hpd_irq_setup = gen11_hpd_irq_setup;
@@ -4411,9 +3935,6 @@ void intel_irq_fini(struct drm_i915_private *i915)
{
int i;
- if (IS_I945GM(i915))
- i945gm_vblank_work_fini(i915);
-
for (i = 0; i < MAX_L3_SLICES; ++i)
kfree(i915->l3_parity.remap_info[i]);
}
@@ -4538,10 +4059,10 @@ void intel_irq_uninstall(struct drm_i915_private *dev_priv)
int irq = dev_priv->drm.pdev->irq;
/*
- * FIXME we can get called twice during driver load
- * error handling due to intel_modeset_cleanup()
- * calling us out of sequence. Would be nice if
- * it didn't do that...
+ * FIXME we can get called twice during driver probe
+ * error handling as well as during driver remove due to
+ * intel_modeset_driver_remove() calling us out of sequence.
+ * Would be nice if it didn't do that...
*/
if (!dev_priv->drm.irq_enabled)
return;
diff --git a/drivers/gpu/drm/i915/i915_irq.h b/drivers/gpu/drm/i915/i915_irq.h
index 8e7e6071777e..812c47a9c2d6 100644
--- a/drivers/gpu/drm/i915/i915_irq.h
+++ b/drivers/gpu/drm/i915/i915_irq.h
@@ -17,14 +17,8 @@ struct drm_device;
struct drm_display_mode;
struct drm_i915_private;
struct intel_crtc;
-struct intel_crtc;
-struct intel_gt;
-struct intel_guc;
struct intel_uncore;
-void gen11_rps_irq_handler(struct intel_gt *gt, u32 pm_iir);
-void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
-
void intel_irq_init(struct drm_i915_private *dev_priv);
void intel_irq_fini(struct drm_i915_private *dev_priv);
int intel_irq_install(struct drm_i915_private *dev_priv);
@@ -106,12 +100,6 @@ void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
u8 pipe_mask);
void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
u8 pipe_mask);
-void gen9_reset_guc_interrupts(struct intel_guc *guc);
-void gen9_enable_guc_interrupts(struct intel_guc *guc);
-void gen9_disable_guc_interrupts(struct intel_guc *guc);
-void gen11_reset_guc_interrupts(struct intel_guc *guc);
-void gen11_enable_guc_interrupts(struct intel_guc *guc);
-void gen11_disable_guc_interrupts(struct intel_guc *guc);
bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
bool in_vblank_irq, int *vpos, int *hpos,
@@ -122,12 +110,12 @@ u32 i915_get_vblank_counter(struct drm_crtc *crtc);
u32 g4x_get_vblank_counter(struct drm_crtc *crtc);
int i8xx_enable_vblank(struct drm_crtc *crtc);
-int i945gm_enable_vblank(struct drm_crtc *crtc);
+int i915gm_enable_vblank(struct drm_crtc *crtc);
int i965_enable_vblank(struct drm_crtc *crtc);
int ilk_enable_vblank(struct drm_crtc *crtc);
int bdw_enable_vblank(struct drm_crtc *crtc);
void i8xx_disable_vblank(struct drm_crtc *crtc);
-void i945gm_disable_vblank(struct drm_crtc *crtc);
+void i915gm_disable_vblank(struct drm_crtc *crtc);
void i965_disable_vblank(struct drm_crtc *crtc);
void ilk_disable_vblank(struct drm_crtc *crtc);
void bdw_disable_vblank(struct drm_crtc *crtc);
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index 296452f9efe4..1dd1f3652795 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -46,7 +46,8 @@ i915_param_named(modeset, int, 0400,
i915_param_named_unsafe(enable_dc, int, 0400,
"Enable power-saving display C-states. "
- "(-1=auto [default]; 0=disable; 1=up to DC5; 2=up to DC6)");
+ "(-1=auto [default]; 0=disable; 1=up to DC5; 2=up to DC6; "
+ "3=up to DC5 with DC3CO; 4=up to DC6 with DC3CO)");
i915_param_named_unsafe(enable_fbc, int, 0600,
"Enable frame buffer compression for power savings "
@@ -165,7 +166,7 @@ i915_param_named_unsafe(enable_dp_mst, bool, 0600,
"Enable multi-stream transport (MST) for new DisplayPort sinks. (default: true)");
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
-i915_param_named_unsafe(inject_load_failure, uint, 0400,
+i915_param_named_unsafe(inject_probe_failure, uint, 0400,
"Force an error after a number of failure check points (0:disabled (default), N:force failure at the Nth failure check point)");
#endif
@@ -178,6 +179,11 @@ i915_param_named(enable_gvt, bool, 0400,
"Enable support for Intel GVT-g graphics virtualization host support(default:false)");
#endif
+#if IS_ENABLED(CONFIG_DRM_I915_UNSTABLE_FAKE_LMEM)
+i915_param_named_unsafe(fake_lmem_start, ulong, 0600,
+ "Fake LMEM start offset (default: 0)");
+#endif
+
static __always_inline void _print_param(struct drm_printer *p,
const char *name,
const char *type,
@@ -189,6 +195,8 @@ static __always_inline void _print_param(struct drm_printer *p,
drm_printf(p, "i915.%s=%d\n", name, *(const int *)x);
else if (!__builtin_strcmp(type, "unsigned int"))
drm_printf(p, "i915.%s=%u\n", name, *(const unsigned int *)x);
+ else if (!__builtin_strcmp(type, "unsigned long"))
+ drm_printf(p, "i915.%s=%lu\n", name, *(const unsigned long *)x);
else if (!__builtin_strcmp(type, "char *"))
drm_printf(p, "i915.%s=%s\n", name, *(const char **)x);
else
diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h
index d29ade3b7de6..31b88f297fbc 100644
--- a/drivers/gpu/drm/i915/i915_params.h
+++ b/drivers/gpu/drm/i915/i915_params.h
@@ -61,11 +61,12 @@ struct drm_printer;
param(char *, dmc_firmware_path, NULL) \
param(int, mmio_debug, -IS_ENABLED(CONFIG_DRM_I915_DEBUG_MMIO)) \
param(int, edp_vswing, 0) \
- param(int, reset, 2) \
- param(unsigned int, inject_load_failure, 0) \
+ param(int, reset, 3) \
+ param(unsigned int, inject_probe_failure, 0) \
param(int, fastboot, -1) \
param(int, enable_dpcd_backlight, 0) \
param(char *, force_probe, CONFIG_DRM_I915_FORCE_PROBE) \
+ param(unsigned long, fake_lmem_start, 0) \
/* leave bools at the end to not create holes */ \
param(bool, alpha_support, IS_ENABLED(CONFIG_DRM_I915_ALPHA_SUPPORT)) \
param(bool, enable_hangcheck, true) \
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index 1974e4c78a43..1bb701d32a5d 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -23,7 +23,6 @@
*/
#include <linux/console.h>
-#include <linux/vgaarb.h>
#include <linux/vga_switcheroo.h>
#include <drm/drm_drv.h>
@@ -118,6 +117,14 @@
[PIPE_C] = IVB_CURSOR_C_OFFSET, \
}
+#define TGL_CURSOR_OFFSETS \
+ .cursor_offsets = { \
+ [PIPE_A] = CURSOR_A_OFFSET, \
+ [PIPE_B] = IVB_CURSOR_B_OFFSET, \
+ [PIPE_C] = IVB_CURSOR_C_OFFSET, \
+ [PIPE_D] = TGL_CURSOR_D_OFFSET, \
+ }
+
#define I9XX_COLORS \
.color = { .gamma_lut_size = 256 }
#define I965_COLORS \
@@ -144,10 +151,13 @@
#define GEN_DEFAULT_PAGE_SIZES \
.page_sizes = I915_GTT_PAGE_SIZE_4K
+#define GEN_DEFAULT_REGIONS \
+ .memory_regions = REGION_SMEM | REGION_STOLEN
+
#define I830_FEATURES \
GEN(2), \
.is_mobile = 1, \
- .num_pipes = 2, \
+ .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \
.display.has_overlay = 1, \
.display.cursor_needs_physical = 1, \
.display.overlay_needs_physical = 1, \
@@ -161,11 +171,12 @@
I9XX_PIPE_OFFSETS, \
I9XX_CURSOR_OFFSETS, \
I9XX_COLORS, \
- GEN_DEFAULT_PAGE_SIZES
+ GEN_DEFAULT_PAGE_SIZES, \
+ GEN_DEFAULT_REGIONS
#define I845_FEATURES \
GEN(2), \
- .num_pipes = 1, \
+ .pipe_mask = BIT(PIPE_A), \
.display.has_overlay = 1, \
.display.overlay_needs_physical = 1, \
.display.has_gmch = 1, \
@@ -178,7 +189,8 @@
I845_PIPE_OFFSETS, \
I845_CURSOR_OFFSETS, \
I9XX_COLORS, \
- GEN_DEFAULT_PAGE_SIZES
+ GEN_DEFAULT_PAGE_SIZES, \
+ GEN_DEFAULT_REGIONS
static const struct intel_device_info intel_i830_info = {
I830_FEATURES,
@@ -203,7 +215,7 @@ static const struct intel_device_info intel_i865g_info = {
#define GEN3_FEATURES \
GEN(3), \
- .num_pipes = 2, \
+ .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \
.display.has_gmch = 1, \
.gpu_reset_clobbers_display = true, \
.engine_mask = BIT(RCS0), \
@@ -212,7 +224,8 @@ static const struct intel_device_info intel_i865g_info = {
I9XX_PIPE_OFFSETS, \
I9XX_CURSOR_OFFSETS, \
I9XX_COLORS, \
- GEN_DEFAULT_PAGE_SIZES
+ GEN_DEFAULT_PAGE_SIZES, \
+ GEN_DEFAULT_REGIONS
static const struct intel_device_info intel_i915g_info = {
GEN3_FEATURES,
@@ -287,7 +300,7 @@ static const struct intel_device_info intel_pineview_m_info = {
#define GEN4_FEATURES \
GEN(4), \
- .num_pipes = 2, \
+ .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \
.display.has_hotplug = 1, \
.display.has_gmch = 1, \
.gpu_reset_clobbers_display = true, \
@@ -297,7 +310,8 @@ static const struct intel_device_info intel_pineview_m_info = {
I9XX_PIPE_OFFSETS, \
I9XX_CURSOR_OFFSETS, \
I965_COLORS, \
- GEN_DEFAULT_PAGE_SIZES
+ GEN_DEFAULT_PAGE_SIZES, \
+ GEN_DEFAULT_REGIONS
static const struct intel_device_info intel_i965g_info = {
GEN4_FEATURES,
@@ -337,7 +351,7 @@ static const struct intel_device_info intel_gm45_info = {
#define GEN5_FEATURES \
GEN(5), \
- .num_pipes = 2, \
+ .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \
.display.has_hotplug = 1, \
.engine_mask = BIT(RCS0) | BIT(VCS0), \
.has_snoop = true, \
@@ -347,7 +361,8 @@ static const struct intel_device_info intel_gm45_info = {
I9XX_PIPE_OFFSETS, \
I9XX_CURSOR_OFFSETS, \
ILK_COLORS, \
- GEN_DEFAULT_PAGE_SIZES
+ GEN_DEFAULT_PAGE_SIZES, \
+ GEN_DEFAULT_REGIONS
static const struct intel_device_info intel_ironlake_d_info = {
GEN5_FEATURES,
@@ -363,7 +378,7 @@ static const struct intel_device_info intel_ironlake_m_info = {
#define GEN6_FEATURES \
GEN(6), \
- .num_pipes = 2, \
+ .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \
.display.has_hotplug = 1, \
.display.has_fbc = 1, \
.engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \
@@ -377,7 +392,8 @@ static const struct intel_device_info intel_ironlake_m_info = {
I9XX_PIPE_OFFSETS, \
I9XX_CURSOR_OFFSETS, \
ILK_COLORS, \
- GEN_DEFAULT_PAGE_SIZES
+ GEN_DEFAULT_PAGE_SIZES, \
+ GEN_DEFAULT_REGIONS
#define SNB_D_PLATFORM \
GEN6_FEATURES, \
@@ -411,7 +427,7 @@ static const struct intel_device_info intel_sandybridge_m_gt2_info = {
#define GEN7_FEATURES \
GEN(7), \
- .num_pipes = 3, \
+ .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), \
.display.has_hotplug = 1, \
.display.has_fbc = 1, \
.engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \
@@ -420,12 +436,13 @@ static const struct intel_device_info intel_sandybridge_m_gt2_info = {
.has_rc6 = 1, \
.has_rc6p = 1, \
.has_rps = true, \
- .ppgtt_type = INTEL_PPGTT_FULL, \
+ .ppgtt_type = INTEL_PPGTT_ALIASING, \
.ppgtt_size = 31, \
IVB_PIPE_OFFSETS, \
IVB_CURSOR_OFFSETS, \
IVB_COLORS, \
- GEN_DEFAULT_PAGE_SIZES
+ GEN_DEFAULT_PAGE_SIZES, \
+ GEN_DEFAULT_REGIONS
#define IVB_D_PLATFORM \
GEN7_FEATURES, \
@@ -462,7 +479,7 @@ static const struct intel_device_info intel_ivybridge_q_info = {
GEN7_FEATURES,
PLATFORM(INTEL_IVYBRIDGE),
.gt = 2,
- .num_pipes = 0, /* legal, last one wins */
+ .pipe_mask = 0, /* legal, last one wins */
.has_l3_dpf = 1,
};
@@ -470,13 +487,13 @@ static const struct intel_device_info intel_valleyview_info = {
PLATFORM(INTEL_VALLEYVIEW),
GEN(7),
.is_lp = 1,
- .num_pipes = 2,
+ .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B),
.has_runtime_pm = 1,
.has_rc6 = 1,
.has_rps = true,
.display.has_gmch = 1,
.display.has_hotplug = 1,
- .ppgtt_type = INTEL_PPGTT_FULL,
+ .ppgtt_type = INTEL_PPGTT_ALIASING,
.ppgtt_size = 31,
.has_snoop = true,
.has_coherent_ggtt = false,
@@ -486,6 +503,7 @@ static const struct intel_device_info intel_valleyview_info = {
I9XX_CURSOR_OFFSETS,
I965_COLORS,
GEN_DEFAULT_PAGE_SIZES,
+ GEN_DEFAULT_REGIONS,
};
#define G75_FEATURES \
@@ -560,7 +578,7 @@ static const struct intel_device_info intel_broadwell_gt3_info = {
static const struct intel_device_info intel_cherryview_info = {
PLATFORM(INTEL_CHERRYVIEW),
GEN(8),
- .num_pipes = 3,
+ .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
.display.has_hotplug = 1,
.is_lp = 1,
.engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0),
@@ -570,7 +588,7 @@ static const struct intel_device_info intel_cherryview_info = {
.has_rps = true,
.has_logical_ring_contexts = 1,
.display.has_gmch = 1,
- .ppgtt_type = INTEL_PPGTT_FULL,
+ .ppgtt_type = INTEL_PPGTT_ALIASING,
.ppgtt_size = 32,
.has_reset_engine = 1,
.has_snoop = true,
@@ -580,6 +598,7 @@ static const struct intel_device_info intel_cherryview_info = {
CHV_CURSOR_OFFSETS,
CHV_COLORS,
GEN_DEFAULT_PAGE_SIZES,
+ GEN_DEFAULT_REGIONS,
};
#define GEN9_DEFAULT_PAGE_SIZES \
@@ -593,6 +612,7 @@ static const struct intel_device_info intel_cherryview_info = {
.has_logical_ring_preemption = 1, \
.display.has_csr = 1, \
.has_gt_uc = 1, \
+ .display.has_hdcp = 1, \
.display.has_ipc = 1, \
.ddb_size = 896
@@ -631,11 +651,12 @@ static const struct intel_device_info intel_skylake_gt4_info = {
.is_lp = 1, \
.display.has_hotplug = 1, \
.engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \
- .num_pipes = 3, \
+ .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), \
.has_64bit_reloc = 1, \
.display.has_ddi = 1, \
.has_fpga_dbg = 1, \
.display.has_fbc = 1, \
+ .display.has_hdcp = 1, \
.display.has_psr = 1, \
.has_runtime_pm = 1, \
.display.has_csr = 1, \
@@ -654,7 +675,8 @@ static const struct intel_device_info intel_skylake_gt4_info = {
HSW_PIPE_OFFSETS, \
IVB_CURSOR_OFFSETS, \
IVB_COLORS, \
- GEN9_DEFAULT_PAGE_SIZES
+ GEN9_DEFAULT_PAGE_SIZES, \
+ GEN_DEFAULT_REGIONS
static const struct intel_device_info intel_broxton_info = {
GEN9_LP_FEATURES,
@@ -715,6 +737,7 @@ static const struct intel_device_info intel_coffeelake_gt3_info = {
GEN9_FEATURES, \
GEN(10), \
.ddb_size = 1024, \
+ .display.has_dsc = 1, \
.has_coherent_ggtt = false, \
GLK_COLORS
@@ -787,18 +810,25 @@ static const struct intel_device_info intel_elkhartlake_info = {
[TRANSCODER_DSI_0] = TRANSCODER_DSI0_OFFSET, \
[TRANSCODER_DSI_1] = TRANSCODER_DSI1_OFFSET, \
}, \
- .has_global_mocs = 1
+ TGL_CURSOR_OFFSETS, \
+ .has_global_mocs = 1, \
+ .display.has_dsb = 1
static const struct intel_device_info intel_tigerlake_12_info = {
GEN12_FEATURES,
PLATFORM(INTEL_TIGERLAKE),
- .num_pipes = 4,
+ .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
.require_force_probe = 1,
.display.has_modular_fia = 1,
.engine_mask =
BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2),
+ .has_rps = false, /* XXX disabled for debugging */
};
+#define GEN12_DGFX_FEATURES \
+ GEN12_FEATURES, \
+ .is_dgfx = 1
+
#undef GEN
#undef PLATFORM
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index e42b86827d6b..65d7c2e599de 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -196,8 +196,11 @@
#include <linux/uuid.h>
#include "gem/i915_gem_context.h"
-#include "gem/i915_gem_pm.h"
+#include "gt/intel_engine_pm.h"
+#include "gt/intel_engine_user.h"
+#include "gt/intel_gt.h"
#include "gt/intel_lrc_reg.h"
+#include "gt/intel_ring.h"
#include "i915_drv.h"
#include "i915_perf.h"
@@ -215,6 +218,7 @@
#include "oa/i915_oa_cflgt3.h"
#include "oa/i915_oa_cnl.h"
#include "oa/i915_oa_icl.h"
+#include "oa/i915_oa_tgl.h"
/* HW requires this to be a power of two, between 128k and 16M, though driver
* is currently generally designed assuming the largest 16M size is used such
@@ -291,6 +295,7 @@ static u32 i915_perf_stream_paranoid = true;
/* On Gen8+ automatically triggered OA reports include a 'reason' field... */
#define OAREPORT_REASON_MASK 0x3f
+#define OAREPORT_REASON_MASK_EXTENDED 0x7f
#define OAREPORT_REASON_SHIFT 19
#define OAREPORT_REASON_TIMER (1<<0)
#define OAREPORT_REASON_CTX_SWITCH (1<<3)
@@ -336,17 +341,24 @@ static const struct i915_oa_format gen8_plus_oa_formats[I915_OA_FORMAT_MAX] = {
[I915_OA_FORMAT_C4_B8] = { 7, 64 },
};
+static const struct i915_oa_format gen12_oa_formats[I915_OA_FORMAT_MAX] = {
+ [I915_OA_FORMAT_A32u40_A4u32_B8_C8] = { 5, 256 },
+};
+
#define SAMPLE_OA_REPORT (1<<0)
/**
* struct perf_open_properties - for validated properties given to open a stream
* @sample_flags: `DRM_I915_PERF_PROP_SAMPLE_*` properties are tracked as flags
* @single_context: Whether a single or all gpu contexts should be monitored
+ * @hold_preemption: Whether the preemption is disabled for the filtered
+ * context
* @ctx_handle: A gem ctx handle for use with @single_context
* @metrics_set: An ID for an OA unit metric set advertised via sysfs
* @oa_format: An OA unit HW report format
* @oa_periodic: Whether to enable periodic OA unit sampling
* @oa_period_exponent: The OA unit sampling period is derived from this
+ * @engine: The engine (typically rcs0) being monitored by the OA unit
*
* As read_properties_unlocked() enumerates and validates the properties given
* to open a stream of metrics the configuration is built up in the structure
@@ -356,6 +368,7 @@ struct perf_open_properties {
u32 sample_flags;
u64 single_context:1;
+ u64 hold_preemption:1;
u64 ctx_handle;
/* OA sampling state */
@@ -363,69 +376,74 @@ struct perf_open_properties {
int oa_format;
bool oa_periodic;
int oa_period_exponent;
+
+ struct intel_engine_cs *engine;
+};
+
+struct i915_oa_config_bo {
+ struct llist_node node;
+
+ struct i915_oa_config *oa_config;
+ struct i915_vma *vma;
};
static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer);
-static void free_oa_config(struct drm_i915_private *dev_priv,
- struct i915_oa_config *oa_config)
+void i915_oa_config_release(struct kref *ref)
{
- if (!PTR_ERR(oa_config->flex_regs))
- kfree(oa_config->flex_regs);
- if (!PTR_ERR(oa_config->b_counter_regs))
- kfree(oa_config->b_counter_regs);
- if (!PTR_ERR(oa_config->mux_regs))
- kfree(oa_config->mux_regs);
- kfree(oa_config);
-}
+ struct i915_oa_config *oa_config =
+ container_of(ref, typeof(*oa_config), ref);
-static void put_oa_config(struct drm_i915_private *dev_priv,
- struct i915_oa_config *oa_config)
-{
- if (!atomic_dec_and_test(&oa_config->ref_count))
- return;
+ kfree(oa_config->flex_regs);
+ kfree(oa_config->b_counter_regs);
+ kfree(oa_config->mux_regs);
- free_oa_config(dev_priv, oa_config);
+ kfree_rcu(oa_config, rcu);
}
-static int get_oa_config(struct drm_i915_private *dev_priv,
- int metrics_set,
- struct i915_oa_config **out_config)
+struct i915_oa_config *
+i915_perf_get_oa_config(struct i915_perf *perf, int metrics_set)
{
- int ret;
+ struct i915_oa_config *oa_config;
- if (metrics_set == 1) {
- *out_config = &dev_priv->perf.test_config;
- atomic_inc(&dev_priv->perf.test_config.ref_count);
- return 0;
- }
+ rcu_read_lock();
+ if (metrics_set == 1)
+ oa_config = &perf->test_config;
+ else
+ oa_config = idr_find(&perf->metrics_idr, metrics_set);
+ if (oa_config)
+ oa_config = i915_oa_config_get(oa_config);
+ rcu_read_unlock();
- ret = mutex_lock_interruptible(&dev_priv->perf.metrics_lock);
- if (ret)
- return ret;
+ return oa_config;
+}
- *out_config = idr_find(&dev_priv->perf.metrics_idr, metrics_set);
- if (!*out_config)
- ret = -EINVAL;
- else
- atomic_inc(&(*out_config)->ref_count);
+static void free_oa_config_bo(struct i915_oa_config_bo *oa_bo)
+{
+ i915_oa_config_put(oa_bo->oa_config);
+ i915_vma_put(oa_bo->vma);
+ kfree(oa_bo);
+}
- mutex_unlock(&dev_priv->perf.metrics_lock);
+static u32 gen12_oa_hw_tail_read(struct i915_perf_stream *stream)
+{
+ struct intel_uncore *uncore = stream->uncore;
- return ret;
+ return intel_uncore_read(uncore, GEN12_OAG_OATAILPTR) &
+ GEN12_OAG_OATAILPTR_MASK;
}
static u32 gen8_oa_hw_tail_read(struct i915_perf_stream *stream)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
+ struct intel_uncore *uncore = stream->uncore;
- return I915_READ(GEN8_OATAILPTR) & GEN8_OATAILPTR_MASK;
+ return intel_uncore_read(uncore, GEN8_OATAILPTR) & GEN8_OATAILPTR_MASK;
}
static u32 gen7_oa_hw_tail_read(struct i915_perf_stream *stream)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
- u32 oastatus1 = I915_READ(GEN7_OASTATUS1);
+ struct intel_uncore *uncore = stream->uncore;
+ u32 oastatus1 = intel_uncore_read(uncore, GEN7_OASTATUS1);
return oastatus1 & GEN7_OASTATUS1_TAIL_MASK;
}
@@ -456,7 +474,6 @@ static u32 gen7_oa_hw_tail_read(struct i915_perf_stream *stream)
*/
static bool oa_buffer_check_unlocked(struct i915_perf_stream *stream)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
int report_size = stream->oa_buffer.format_size;
unsigned long flags;
unsigned int aged_idx;
@@ -479,7 +496,7 @@ static bool oa_buffer_check_unlocked(struct i915_perf_stream *stream)
aged_tail = stream->oa_buffer.tails[aged_idx].offset;
aging_tail = stream->oa_buffer.tails[!aged_idx].offset;
- hw_tail = dev_priv->perf.ops.oa_hw_tail_read(stream);
+ hw_tail = stream->perf->ops.oa_hw_tail_read(stream);
/* The tail pointer increases in 64 byte increments,
* not in report_size steps...
@@ -536,7 +553,7 @@ static bool oa_buffer_check_unlocked(struct i915_perf_stream *stream)
aging_tail = hw_tail;
stream->oa_buffer.aging_timestamp = now;
} else {
- DRM_ERROR("Ignoring spurious out of range OA buffer tail pointer = %u\n",
+ DRM_ERROR("Ignoring spurious out of range OA buffer tail pointer = %x\n",
hw_tail);
}
}
@@ -655,7 +672,7 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
size_t count,
size_t *offset)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
+ struct intel_uncore *uncore = stream->uncore;
int report_size = stream->oa_buffer.format_size;
u8 *oa_buf_base = stream->oa_buffer.vaddr;
u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
@@ -738,9 +755,11 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
* it to userspace...
*/
reason = ((report32[0] >> OAREPORT_REASON_SHIFT) &
- OAREPORT_REASON_MASK);
+ (IS_GEN(stream->perf->i915, 12) ?
+ OAREPORT_REASON_MASK_EXTENDED :
+ OAREPORT_REASON_MASK));
if (reason == 0) {
- if (__ratelimit(&dev_priv->perf.spurious_report_rs))
+ if (__ratelimit(&stream->perf->spurious_report_rs))
DRM_NOTE("Skipping spurious, invalid OA report\n");
continue;
}
@@ -755,7 +774,8 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
* Note: that we don't clear the valid_ctx_bit so userspace can
* understand that the ID has been squashed by the kernel.
*/
- if (!(report32[0] & dev_priv->perf.gen8_valid_ctx_bit))
+ if (!(report32[0] & stream->perf->gen8_valid_ctx_bit) &&
+ INTEL_GEN(stream->perf->i915) <= 11)
ctx_id = report32[2] = INVALID_CTX_ID;
/*
@@ -789,7 +809,7 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
* switches since it's not-uncommon for periodic samples to
* identify a switch before any 'context switch' report.
*/
- if (!dev_priv->perf.exclusive_stream->ctx ||
+ if (!stream->perf->exclusive_stream->ctx ||
stream->specific_ctx_id == ctx_id ||
stream->oa_buffer.last_ctx_id == stream->specific_ctx_id ||
reason & OAREPORT_REASON_CTX_SWITCH) {
@@ -798,7 +818,7 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
* While filtering for a single context we avoid
* leaking the IDs of other contexts.
*/
- if (dev_priv->perf.exclusive_stream->ctx &&
+ if (stream->perf->exclusive_stream->ctx &&
stream->specific_ctx_id != ctx_id) {
report32[2] = INVALID_CTX_ID;
}
@@ -822,6 +842,11 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
}
if (start_offset != *offset) {
+ i915_reg_t oaheadptr;
+
+ oaheadptr = IS_GEN(stream->perf->i915, 12) ?
+ GEN12_OAG_OAHEADPTR : GEN8_OAHEADPTR;
+
spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
/*
@@ -829,8 +854,8 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
* relative to oa_buf_base so put back here...
*/
head += gtt_offset;
-
- I915_WRITE(GEN8_OAHEADPTR, head & GEN8_OAHEADPTR_MASK);
+ intel_uncore_write(uncore, oaheadptr,
+ head & GEN12_OAG_OAHEADPTR_MASK);
stream->oa_buffer.head = head;
spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
@@ -864,14 +889,18 @@ static int gen8_oa_read(struct i915_perf_stream *stream,
size_t count,
size_t *offset)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
+ struct intel_uncore *uncore = stream->uncore;
u32 oastatus;
+ i915_reg_t oastatus_reg;
int ret;
if (WARN_ON(!stream->oa_buffer.vaddr))
return -EIO;
- oastatus = I915_READ(GEN8_OASTATUS);
+ oastatus_reg = IS_GEN(stream->perf->i915, 12) ?
+ GEN12_OAG_OASTATUS : GEN8_OASTATUS;
+
+ oastatus = intel_uncore_read(uncore, oastatus_reg);
/*
* We treat OABUFFER_OVERFLOW as a significant error:
@@ -896,14 +925,14 @@ static int gen8_oa_read(struct i915_perf_stream *stream,
DRM_DEBUG("OA buffer overflow (exponent = %d): force restart\n",
stream->period_exponent);
- dev_priv->perf.ops.oa_disable(stream);
- dev_priv->perf.ops.oa_enable(stream);
+ stream->perf->ops.oa_disable(stream);
+ stream->perf->ops.oa_enable(stream);
/*
* Note: .oa_enable() is expected to re-init the oabuffer and
* reset GEN8_OASTATUS for us
*/
- oastatus = I915_READ(GEN8_OASTATUS);
+ oastatus = intel_uncore_read(uncore, oastatus_reg);
}
if (oastatus & GEN8_OASTATUS_REPORT_LOST) {
@@ -911,8 +940,8 @@ static int gen8_oa_read(struct i915_perf_stream *stream,
DRM_I915_PERF_RECORD_OA_REPORT_LOST);
if (ret)
return ret;
- I915_WRITE(GEN8_OASTATUS,
- oastatus & ~GEN8_OASTATUS_REPORT_LOST);
+ intel_uncore_write(uncore, oastatus_reg,
+ oastatus & ~GEN8_OASTATUS_REPORT_LOST);
}
return gen8_append_oa_reports(stream, buf, count, offset);
@@ -943,7 +972,7 @@ static int gen7_append_oa_reports(struct i915_perf_stream *stream,
size_t count,
size_t *offset)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
+ struct intel_uncore *uncore = stream->uncore;
int report_size = stream->oa_buffer.format_size;
u8 *oa_buf_base = stream->oa_buffer.vaddr;
u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
@@ -1017,7 +1046,7 @@ static int gen7_append_oa_reports(struct i915_perf_stream *stream,
* copying it to userspace...
*/
if (report32[0] == 0) {
- if (__ratelimit(&dev_priv->perf.spurious_report_rs))
+ if (__ratelimit(&stream->perf->spurious_report_rs))
DRM_NOTE("Skipping spurious, invalid OA report\n");
continue;
}
@@ -1043,9 +1072,9 @@ static int gen7_append_oa_reports(struct i915_perf_stream *stream,
*/
head += gtt_offset;
- I915_WRITE(GEN7_OASTATUS2,
- ((head & GEN7_OASTATUS2_HEAD_MASK) |
- GEN7_OASTATUS2_MEM_SELECT_GGTT));
+ intel_uncore_write(uncore, GEN7_OASTATUS2,
+ (head & GEN7_OASTATUS2_HEAD_MASK) |
+ GEN7_OASTATUS2_MEM_SELECT_GGTT);
stream->oa_buffer.head = head;
spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
@@ -1075,21 +1104,21 @@ static int gen7_oa_read(struct i915_perf_stream *stream,
size_t count,
size_t *offset)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
+ struct intel_uncore *uncore = stream->uncore;
u32 oastatus1;
int ret;
if (WARN_ON(!stream->oa_buffer.vaddr))
return -EIO;
- oastatus1 = I915_READ(GEN7_OASTATUS1);
+ oastatus1 = intel_uncore_read(uncore, GEN7_OASTATUS1);
/* XXX: On Haswell we don't have a safe way to clear oastatus1
* bits while the OA unit is enabled (while the tail pointer
* may be updated asynchronously) so we ignore status bits
* that have already been reported to userspace.
*/
- oastatus1 &= ~dev_priv->perf.gen7_latched_oastatus1;
+ oastatus1 &= ~stream->perf->gen7_latched_oastatus1;
/* We treat OABUFFER_OVERFLOW as a significant error:
*
@@ -1120,10 +1149,10 @@ static int gen7_oa_read(struct i915_perf_stream *stream,
DRM_DEBUG("OA buffer overflow (exponent = %d): force restart\n",
stream->period_exponent);
- dev_priv->perf.ops.oa_disable(stream);
- dev_priv->perf.ops.oa_enable(stream);
+ stream->perf->ops.oa_disable(stream);
+ stream->perf->ops.oa_enable(stream);
- oastatus1 = I915_READ(GEN7_OASTATUS1);
+ oastatus1 = intel_uncore_read(uncore, GEN7_OASTATUS1);
}
if (unlikely(oastatus1 & GEN7_OASTATUS1_REPORT_LOST)) {
@@ -1131,7 +1160,7 @@ static int gen7_oa_read(struct i915_perf_stream *stream,
DRM_I915_PERF_RECORD_OA_REPORT_LOST);
if (ret)
return ret;
- dev_priv->perf.gen7_latched_oastatus1 |=
+ stream->perf->gen7_latched_oastatus1 |=
GEN7_OASTATUS1_REPORT_LOST;
}
@@ -1196,25 +1225,18 @@ static int i915_oa_read(struct i915_perf_stream *stream,
size_t count,
size_t *offset)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
-
- return dev_priv->perf.ops.read(stream, buf, count, offset);
+ return stream->perf->ops.read(stream, buf, count, offset);
}
static struct intel_context *oa_pin_context(struct i915_perf_stream *stream)
{
struct i915_gem_engines_iter it;
- struct drm_i915_private *i915 = stream->dev_priv;
struct i915_gem_context *ctx = stream->ctx;
struct intel_context *ce;
int err;
- err = i915_mutex_lock_interruptible(&i915->drm);
- if (err)
- return ERR_PTR(err);
-
for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
- if (ce->engine->class != RENDER_CLASS)
+ if (ce->engine != stream->engine) /* first match! */
continue;
/*
@@ -1229,10 +1251,6 @@ static struct intel_context *oa_pin_context(struct i915_perf_stream *stream)
}
i915_gem_context_unlock_engines(ctx);
- mutex_unlock(&i915->drm.struct_mutex);
- if (err)
- return ERR_PTR(err);
-
return stream->pinned_ctx;
}
@@ -1248,14 +1266,13 @@ static struct intel_context *oa_pin_context(struct i915_perf_stream *stream)
*/
static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
{
- struct drm_i915_private *i915 = stream->dev_priv;
struct intel_context *ce;
ce = oa_pin_context(stream);
if (IS_ERR(ce))
return PTR_ERR(ce);
- switch (INTEL_GEN(i915)) {
+ switch (INTEL_GEN(ce->engine->i915)) {
case 7: {
/*
* On Haswell we don't do any post processing of the reports
@@ -1269,7 +1286,11 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
case 8:
case 9:
case 10:
- if (USES_GUC_SUBMISSION(i915)) {
+ if (intel_engine_in_execlists_submission_mode(ce->engine)) {
+ stream->specific_ctx_id_mask =
+ (1U << GEN8_CTX_ID_WIDTH) - 1;
+ stream->specific_ctx_id = stream->specific_ctx_id_mask;
+ } else {
/*
* When using GuC, the context descriptor we write in
* i915 is read by GuC and rewritten before it's
@@ -1289,31 +1310,23 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
*/
stream->specific_ctx_id_mask =
(1U << (GEN8_CTX_ID_WIDTH - 1)) - 1;
- } else {
- stream->specific_ctx_id_mask =
- (1U << GEN8_CTX_ID_WIDTH) - 1;
- stream->specific_ctx_id =
- upper_32_bits(ce->lrc_desc);
- stream->specific_ctx_id &=
- stream->specific_ctx_id_mask;
}
break;
- case 11: {
+ case 11:
+ case 12: {
stream->specific_ctx_id_mask =
- ((1U << GEN11_SW_CTX_ID_WIDTH) - 1) << (GEN11_SW_CTX_ID_SHIFT - 32) |
- ((1U << GEN11_ENGINE_INSTANCE_WIDTH) - 1) << (GEN11_ENGINE_INSTANCE_SHIFT - 32) |
- ((1 << GEN11_ENGINE_CLASS_WIDTH) - 1) << (GEN11_ENGINE_CLASS_SHIFT - 32);
- stream->specific_ctx_id = upper_32_bits(ce->lrc_desc);
- stream->specific_ctx_id &=
- stream->specific_ctx_id_mask;
+ ((1U << GEN11_SW_CTX_ID_WIDTH) - 1) << (GEN11_SW_CTX_ID_SHIFT - 32);
+ stream->specific_ctx_id = stream->specific_ctx_id_mask;
break;
}
default:
- MISSING_CASE(INTEL_GEN(i915));
+ MISSING_CASE(INTEL_GEN(ce->engine->i915));
}
+ ce->tag = stream->specific_ctx_id_mask;
+
DRM_DEBUG_DRIVER("filtering on ctx_id=0x%x ctx_id_mask=0x%x\n",
stream->specific_ctx_id,
stream->specific_ctx_id_mask);
@@ -1330,69 +1343,76 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
*/
static void oa_put_render_ctx_id(struct i915_perf_stream *stream)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
struct intel_context *ce;
- stream->specific_ctx_id = INVALID_CTX_ID;
- stream->specific_ctx_id_mask = 0;
-
ce = fetch_and_zero(&stream->pinned_ctx);
if (ce) {
- mutex_lock(&dev_priv->drm.struct_mutex);
+ ce->tag = 0; /* recomputed on next submission after parking */
intel_context_unpin(ce);
- mutex_unlock(&dev_priv->drm.struct_mutex);
}
+
+ stream->specific_ctx_id = INVALID_CTX_ID;
+ stream->specific_ctx_id_mask = 0;
}
static void
free_oa_buffer(struct i915_perf_stream *stream)
{
- struct drm_i915_private *i915 = stream->dev_priv;
-
- mutex_lock(&i915->drm.struct_mutex);
-
i915_vma_unpin_and_release(&stream->oa_buffer.vma,
I915_VMA_RELEASE_MAP);
- mutex_unlock(&i915->drm.struct_mutex);
-
stream->oa_buffer.vaddr = NULL;
}
+static void
+free_oa_configs(struct i915_perf_stream *stream)
+{
+ struct i915_oa_config_bo *oa_bo, *tmp;
+
+ i915_oa_config_put(stream->oa_config);
+ llist_for_each_entry_safe(oa_bo, tmp, stream->oa_config_bos.first, node)
+ free_oa_config_bo(oa_bo);
+}
+
+static void
+free_noa_wait(struct i915_perf_stream *stream)
+{
+ i915_vma_unpin_and_release(&stream->noa_wait, 0);
+}
+
static void i915_oa_stream_destroy(struct i915_perf_stream *stream)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
+ struct i915_perf *perf = stream->perf;
- BUG_ON(stream != dev_priv->perf.exclusive_stream);
+ BUG_ON(stream != perf->exclusive_stream);
/*
* Unset exclusive_stream first, it will be checked while disabling
* the metric set on gen8+.
*/
- mutex_lock(&dev_priv->drm.struct_mutex);
- dev_priv->perf.exclusive_stream = NULL;
- dev_priv->perf.ops.disable_metric_set(stream);
- mutex_unlock(&dev_priv->drm.struct_mutex);
+ perf->exclusive_stream = NULL;
+ perf->ops.disable_metric_set(stream);
free_oa_buffer(stream);
- intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
- intel_runtime_pm_put(&dev_priv->runtime_pm, stream->wakeref);
+ intel_uncore_forcewake_put(stream->uncore, FORCEWAKE_ALL);
+ intel_engine_pm_put(stream->engine);
if (stream->ctx)
oa_put_render_ctx_id(stream);
- put_oa_config(dev_priv, stream->oa_config);
+ free_oa_configs(stream);
+ free_noa_wait(stream);
- if (dev_priv->perf.spurious_report_rs.missed) {
+ if (perf->spurious_report_rs.missed) {
DRM_NOTE("%d spurious OA report notices suppressed due to ratelimiting\n",
- dev_priv->perf.spurious_report_rs.missed);
+ perf->spurious_report_rs.missed);
}
}
static void gen7_init_oa_buffer(struct i915_perf_stream *stream)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
+ struct intel_uncore *uncore = stream->uncore;
u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
unsigned long flags;
@@ -1401,13 +1421,14 @@ static void gen7_init_oa_buffer(struct i915_perf_stream *stream)
/* Pre-DevBDW: OABUFFER must be set with counters off,
* before OASTATUS1, but after OASTATUS2
*/
- I915_WRITE(GEN7_OASTATUS2,
- gtt_offset | GEN7_OASTATUS2_MEM_SELECT_GGTT); /* head */
+ intel_uncore_write(uncore, GEN7_OASTATUS2, /* head */
+ gtt_offset | GEN7_OASTATUS2_MEM_SELECT_GGTT);
stream->oa_buffer.head = gtt_offset;
- I915_WRITE(GEN7_OABUFFER, gtt_offset);
+ intel_uncore_write(uncore, GEN7_OABUFFER, gtt_offset);
- I915_WRITE(GEN7_OASTATUS1, gtt_offset | OABUFFER_SIZE_16M); /* tail */
+ intel_uncore_write(uncore, GEN7_OASTATUS1, /* tail */
+ gtt_offset | OABUFFER_SIZE_16M);
/* Mark that we need updated tail pointers to read from... */
stream->oa_buffer.tails[0].offset = INVALID_TAIL_PTR;
@@ -1419,7 +1440,7 @@ static void gen7_init_oa_buffer(struct i915_perf_stream *stream)
* already seen since they can't be cleared while periodic
* sampling is enabled.
*/
- dev_priv->perf.gen7_latched_oastatus1 = 0;
+ stream->perf->gen7_latched_oastatus1 = 0;
/* NB: although the OA buffer will initially be allocated
* zeroed via shmfs (and so this memset is redundant when
@@ -1434,25 +1455,22 @@ static void gen7_init_oa_buffer(struct i915_perf_stream *stream)
*/
memset(stream->oa_buffer.vaddr, 0, OA_BUFFER_SIZE);
- /* Maybe make ->pollin per-stream state if we support multiple
- * concurrent streams in the future.
- */
stream->pollin = false;
}
static void gen8_init_oa_buffer(struct i915_perf_stream *stream)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
+ struct intel_uncore *uncore = stream->uncore;
u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
unsigned long flags;
spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
- I915_WRITE(GEN8_OASTATUS, 0);
- I915_WRITE(GEN8_OAHEADPTR, gtt_offset);
+ intel_uncore_write(uncore, GEN8_OASTATUS, 0);
+ intel_uncore_write(uncore, GEN8_OAHEADPTR, gtt_offset);
stream->oa_buffer.head = gtt_offset;
- I915_WRITE(GEN8_OABUFFER_UDW, 0);
+ intel_uncore_write(uncore, GEN8_OABUFFER_UDW, 0);
/*
* PRM says:
@@ -1462,9 +1480,9 @@ static void gen8_init_oa_buffer(struct i915_perf_stream *stream)
* to enable proper functionality of the overflow
* bit."
*/
- I915_WRITE(GEN8_OABUFFER, gtt_offset |
+ intel_uncore_write(uncore, GEN8_OABUFFER, gtt_offset |
OABUFFER_SIZE_16M | GEN8_OABUFFER_MEM_SELECT_GGTT);
- I915_WRITE(GEN8_OATAILPTR, gtt_offset & GEN8_OATAILPTR_MASK);
+ intel_uncore_write(uncore, GEN8_OATAILPTR, gtt_offset & GEN8_OATAILPTR_MASK);
/* Mark that we need updated tail pointers to read from... */
stream->oa_buffer.tails[0].offset = INVALID_TAIL_PTR;
@@ -1493,35 +1511,82 @@ static void gen8_init_oa_buffer(struct i915_perf_stream *stream)
*/
memset(stream->oa_buffer.vaddr, 0, OA_BUFFER_SIZE);
+ stream->pollin = false;
+}
+
+static void gen12_init_oa_buffer(struct i915_perf_stream *stream)
+{
+ struct intel_uncore *uncore = stream->uncore;
+ u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
+ unsigned long flags;
+
+ spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
+
+ intel_uncore_write(uncore, GEN12_OAG_OASTATUS, 0);
+ intel_uncore_write(uncore, GEN12_OAG_OAHEADPTR,
+ gtt_offset & GEN12_OAG_OAHEADPTR_MASK);
+ stream->oa_buffer.head = gtt_offset;
+
+ /*
+ * PRM says:
+ *
+ * "This MMIO must be set before the OATAILPTR
+ * register and after the OAHEADPTR register. This is
+ * to enable proper functionality of the overflow
+ * bit."
+ */
+ intel_uncore_write(uncore, GEN12_OAG_OABUFFER, gtt_offset |
+ OABUFFER_SIZE_16M | GEN8_OABUFFER_MEM_SELECT_GGTT);
+ intel_uncore_write(uncore, GEN12_OAG_OATAILPTR,
+ gtt_offset & GEN12_OAG_OATAILPTR_MASK);
+
+ /* Mark that we need updated tail pointers to read from... */
+ stream->oa_buffer.tails[0].offset = INVALID_TAIL_PTR;
+ stream->oa_buffer.tails[1].offset = INVALID_TAIL_PTR;
+
+ /*
+ * Reset state used to recognise context switches, affecting which
+ * reports we will forward to userspace while filtering for a single
+ * context.
+ */
+ stream->oa_buffer.last_ctx_id = INVALID_CTX_ID;
+
+ spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
+
/*
- * Maybe make ->pollin per-stream state if we support multiple
- * concurrent streams in the future.
+ * NB: although the OA buffer will initially be allocated
+ * zeroed via shmfs (and so this memset is redundant when
+ * first allocating), we may re-init the OA buffer, either
+ * when re-enabling a stream or in error/reset paths.
+ *
+ * The reason we clear the buffer for each re-init is for the
+ * sanity check in gen8_append_oa_reports() that looks at the
+ * reason field to make sure it's non-zero which relies on
+ * the assumption that new reports are being written to zeroed
+ * memory...
*/
+ memset(stream->oa_buffer.vaddr, 0,
+ stream->oa_buffer.vma->size);
+
stream->pollin = false;
}
static int alloc_oa_buffer(struct i915_perf_stream *stream)
{
struct drm_i915_gem_object *bo;
- struct drm_i915_private *dev_priv = stream->dev_priv;
struct i915_vma *vma;
int ret;
if (WARN_ON(stream->oa_buffer.vma))
return -ENODEV;
- ret = i915_mutex_lock_interruptible(&dev_priv->drm);
- if (ret)
- return ret;
-
BUILD_BUG_ON_NOT_POWER_OF_2(OA_BUFFER_SIZE);
BUILD_BUG_ON(OA_BUFFER_SIZE < SZ_128K || OA_BUFFER_SIZE > SZ_16M);
- bo = i915_gem_object_create_shmem(dev_priv, OA_BUFFER_SIZE);
+ bo = i915_gem_object_create_shmem(stream->perf->i915, OA_BUFFER_SIZE);
if (IS_ERR(bo)) {
DRM_ERROR("Failed to allocate OA buffer\n");
- ret = PTR_ERR(bo);
- goto unlock;
+ return PTR_ERR(bo);
}
i915_gem_object_set_cache_coherency(bo, I915_CACHE_LLC);
@@ -1541,11 +1606,7 @@ static int alloc_oa_buffer(struct i915_perf_stream *stream)
goto err_unpin;
}
- DRM_DEBUG_DRIVER("OA Buffer initialized, gtt offset = 0x%x, vaddr = %p\n",
- i915_ggtt_offset(stream->oa_buffer.vma),
- stream->oa_buffer.vaddr);
-
- goto unlock;
+ return 0;
err_unpin:
__i915_vma_unpin(vma);
@@ -1556,55 +1617,389 @@ err_unref:
stream->oa_buffer.vaddr = NULL;
stream->oa_buffer.vma = NULL;
-unlock:
- mutex_unlock(&dev_priv->drm.struct_mutex);
return ret;
}
-static void config_oa_regs(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg *regs,
- u32 n_regs)
+static u32 *save_restore_register(struct i915_perf_stream *stream, u32 *cs,
+ bool save, i915_reg_t reg, u32 offset,
+ u32 dword_count)
+{
+ u32 cmd;
+ u32 d;
+
+ cmd = save ? MI_STORE_REGISTER_MEM : MI_LOAD_REGISTER_MEM;
+ if (INTEL_GEN(stream->perf->i915) >= 8)
+ cmd++;
+
+ for (d = 0; d < dword_count; d++) {
+ *cs++ = cmd;
+ *cs++ = i915_mmio_reg_offset(reg) + 4 * d;
+ *cs++ = intel_gt_scratch_offset(stream->engine->gt,
+ offset) + 4 * d;
+ *cs++ = 0;
+ }
+
+ return cs;
+}
+
+static int alloc_noa_wait(struct i915_perf_stream *stream)
+{
+ struct drm_i915_private *i915 = stream->perf->i915;
+ struct drm_i915_gem_object *bo;
+ struct i915_vma *vma;
+ const u64 delay_ticks = 0xffffffffffffffff -
+ DIV64_U64_ROUND_UP(
+ atomic64_read(&stream->perf->noa_programming_delay) *
+ RUNTIME_INFO(i915)->cs_timestamp_frequency_khz,
+ 1000000ull);
+ const u32 base = stream->engine->mmio_base;
+#define CS_GPR(x) GEN8_RING_CS_GPR(base, x)
+ u32 *batch, *ts0, *cs, *jump;
+ int ret, i;
+ enum {
+ START_TS,
+ NOW_TS,
+ DELTA_TS,
+ JUMP_PREDICATE,
+ DELTA_TARGET,
+ N_CS_GPR
+ };
+
+ bo = i915_gem_object_create_internal(i915, 4096);
+ if (IS_ERR(bo)) {
+ DRM_ERROR("Failed to allocate NOA wait batchbuffer\n");
+ return PTR_ERR(bo);
+ }
+
+ /*
+ * We pin in GGTT because we jump into this buffer now because
+ * multiple OA config BOs will have a jump to this address and it
+ * needs to be fixed during the lifetime of the i915/perf stream.
+ */
+ vma = i915_gem_object_ggtt_pin(bo, NULL, 0, 0, PIN_HIGH);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
+ goto err_unref;
+ }
+
+ batch = cs = i915_gem_object_pin_map(bo, I915_MAP_WB);
+ if (IS_ERR(batch)) {
+ ret = PTR_ERR(batch);
+ goto err_unpin;
+ }
+
+ /* Save registers. */
+ for (i = 0; i < N_CS_GPR; i++)
+ cs = save_restore_register(
+ stream, cs, true /* save */, CS_GPR(i),
+ INTEL_GT_SCRATCH_FIELD_PERF_CS_GPR + 8 * i, 2);
+ cs = save_restore_register(
+ stream, cs, true /* save */, MI_PREDICATE_RESULT_1,
+ INTEL_GT_SCRATCH_FIELD_PERF_PREDICATE_RESULT_1, 1);
+
+ /* First timestamp snapshot location. */
+ ts0 = cs;
+
+ /*
+ * Initial snapshot of the timestamp register to implement the wait.
+ * We work with 32b values, so clear out the top 32b bits of the
+ * register because the ALU works 64bits.
+ */
+ *cs++ = MI_LOAD_REGISTER_IMM(1);
+ *cs++ = i915_mmio_reg_offset(CS_GPR(START_TS)) + 4;
+ *cs++ = 0;
+ *cs++ = MI_LOAD_REGISTER_REG | (3 - 2);
+ *cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(base));
+ *cs++ = i915_mmio_reg_offset(CS_GPR(START_TS));
+
+ /*
+ * This is the location we're going to jump back into until the
+ * required amount of time has passed.
+ */
+ jump = cs;
+
+ /*
+ * Take another snapshot of the timestamp register. Take care to clear
+ * up the top 32bits of CS_GPR(1) as we're using it for other
+ * operations below.
+ */
+ *cs++ = MI_LOAD_REGISTER_IMM(1);
+ *cs++ = i915_mmio_reg_offset(CS_GPR(NOW_TS)) + 4;
+ *cs++ = 0;
+ *cs++ = MI_LOAD_REGISTER_REG | (3 - 2);
+ *cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(base));
+ *cs++ = i915_mmio_reg_offset(CS_GPR(NOW_TS));
+
+ /*
+ * Do a diff between the 2 timestamps and store the result back into
+ * CS_GPR(1).
+ */
+ *cs++ = MI_MATH(5);
+ *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCA, MI_MATH_REG(NOW_TS));
+ *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCB, MI_MATH_REG(START_TS));
+ *cs++ = MI_MATH_SUB;
+ *cs++ = MI_MATH_STORE(MI_MATH_REG(DELTA_TS), MI_MATH_REG_ACCU);
+ *cs++ = MI_MATH_STORE(MI_MATH_REG(JUMP_PREDICATE), MI_MATH_REG_CF);
+
+ /*
+ * Transfer the carry flag (set to 1 if ts1 < ts0, meaning the
+ * timestamp have rolled over the 32bits) into the predicate register
+ * to be used for the predicated jump.
+ */
+ *cs++ = MI_LOAD_REGISTER_REG | (3 - 2);
+ *cs++ = i915_mmio_reg_offset(CS_GPR(JUMP_PREDICATE));
+ *cs++ = i915_mmio_reg_offset(MI_PREDICATE_RESULT_1);
+
+ /* Restart from the beginning if we had timestamps roll over. */
+ *cs++ = (INTEL_GEN(i915) < 8 ?
+ MI_BATCH_BUFFER_START :
+ MI_BATCH_BUFFER_START_GEN8) |
+ MI_BATCH_PREDICATE;
+ *cs++ = i915_ggtt_offset(vma) + (ts0 - batch) * 4;
+ *cs++ = 0;
+
+ /*
+ * Now add the diff between to previous timestamps and add it to :
+ * (((1 * << 64) - 1) - delay_ns)
+ *
+ * When the Carry Flag contains 1 this means the elapsed time is
+ * longer than the expected delay, and we can exit the wait loop.
+ */
+ *cs++ = MI_LOAD_REGISTER_IMM(2);
+ *cs++ = i915_mmio_reg_offset(CS_GPR(DELTA_TARGET));
+ *cs++ = lower_32_bits(delay_ticks);
+ *cs++ = i915_mmio_reg_offset(CS_GPR(DELTA_TARGET)) + 4;
+ *cs++ = upper_32_bits(delay_ticks);
+
+ *cs++ = MI_MATH(4);
+ *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCA, MI_MATH_REG(DELTA_TS));
+ *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCB, MI_MATH_REG(DELTA_TARGET));
+ *cs++ = MI_MATH_ADD;
+ *cs++ = MI_MATH_STOREINV(MI_MATH_REG(JUMP_PREDICATE), MI_MATH_REG_CF);
+
+ /*
+ * Transfer the result into the predicate register to be used for the
+ * predicated jump.
+ */
+ *cs++ = MI_LOAD_REGISTER_REG | (3 - 2);
+ *cs++ = i915_mmio_reg_offset(CS_GPR(JUMP_PREDICATE));
+ *cs++ = i915_mmio_reg_offset(MI_PREDICATE_RESULT_1);
+
+ /* Predicate the jump. */
+ *cs++ = (INTEL_GEN(i915) < 8 ?
+ MI_BATCH_BUFFER_START :
+ MI_BATCH_BUFFER_START_GEN8) |
+ MI_BATCH_PREDICATE;
+ *cs++ = i915_ggtt_offset(vma) + (jump - batch) * 4;
+ *cs++ = 0;
+
+ /* Restore registers. */
+ for (i = 0; i < N_CS_GPR; i++)
+ cs = save_restore_register(
+ stream, cs, false /* restore */, CS_GPR(i),
+ INTEL_GT_SCRATCH_FIELD_PERF_CS_GPR + 8 * i, 2);
+ cs = save_restore_register(
+ stream, cs, false /* restore */, MI_PREDICATE_RESULT_1,
+ INTEL_GT_SCRATCH_FIELD_PERF_PREDICATE_RESULT_1, 1);
+
+ /* And return to the ring. */
+ *cs++ = MI_BATCH_BUFFER_END;
+
+ GEM_BUG_ON(cs - batch > PAGE_SIZE / sizeof(*batch));
+
+ i915_gem_object_flush_map(bo);
+ i915_gem_object_unpin_map(bo);
+
+ stream->noa_wait = vma;
+ return 0;
+
+err_unpin:
+ i915_vma_unpin_and_release(&vma, 0);
+err_unref:
+ i915_gem_object_put(bo);
+ return ret;
+}
+
+static u32 *write_cs_mi_lri(u32 *cs,
+ const struct i915_oa_reg *reg_data,
+ u32 n_regs)
{
u32 i;
for (i = 0; i < n_regs; i++) {
- const struct i915_oa_reg *reg = regs + i;
+ if ((i % MI_LOAD_REGISTER_IMM_MAX_REGS) == 0) {
+ u32 n_lri = min_t(u32,
+ n_regs - i,
+ MI_LOAD_REGISTER_IMM_MAX_REGS);
- I915_WRITE(reg->addr, reg->value);
+ *cs++ = MI_LOAD_REGISTER_IMM(n_lri);
+ }
+ *cs++ = i915_mmio_reg_offset(reg_data[i].addr);
+ *cs++ = reg_data[i].value;
}
+
+ return cs;
}
-static void delay_after_mux(void)
+static int num_lri_dwords(int num_regs)
{
+ int count = 0;
+
+ if (num_regs > 0) {
+ count += DIV_ROUND_UP(num_regs, MI_LOAD_REGISTER_IMM_MAX_REGS);
+ count += num_regs * 2;
+ }
+
+ return count;
+}
+
+static struct i915_oa_config_bo *
+alloc_oa_config_buffer(struct i915_perf_stream *stream,
+ struct i915_oa_config *oa_config)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_oa_config_bo *oa_bo;
+ size_t config_length = 0;
+ u32 *cs;
+ int err;
+
+ oa_bo = kzalloc(sizeof(*oa_bo), GFP_KERNEL);
+ if (!oa_bo)
+ return ERR_PTR(-ENOMEM);
+
+ config_length += num_lri_dwords(oa_config->mux_regs_len);
+ config_length += num_lri_dwords(oa_config->b_counter_regs_len);
+ config_length += num_lri_dwords(oa_config->flex_regs_len);
+ config_length += 3; /* MI_BATCH_BUFFER_START */
+ config_length = ALIGN(sizeof(u32) * config_length, I915_GTT_PAGE_SIZE);
+
+ obj = i915_gem_object_create_shmem(stream->perf->i915, config_length);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto err_free;
+ }
+
+ cs = i915_gem_object_pin_map(obj, I915_MAP_WB);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ goto err_oa_bo;
+ }
+
+ cs = write_cs_mi_lri(cs,
+ oa_config->mux_regs,
+ oa_config->mux_regs_len);
+ cs = write_cs_mi_lri(cs,
+ oa_config->b_counter_regs,
+ oa_config->b_counter_regs_len);
+ cs = write_cs_mi_lri(cs,
+ oa_config->flex_regs,
+ oa_config->flex_regs_len);
+
+ /* Jump into the active wait. */
+ *cs++ = (INTEL_GEN(stream->perf->i915) < 8 ?
+ MI_BATCH_BUFFER_START :
+ MI_BATCH_BUFFER_START_GEN8);
+ *cs++ = i915_ggtt_offset(stream->noa_wait);
+ *cs++ = 0;
+
+ i915_gem_object_flush_map(obj);
+ i915_gem_object_unpin_map(obj);
+
+ oa_bo->vma = i915_vma_instance(obj,
+ &stream->engine->gt->ggtt->vm,
+ NULL);
+ if (IS_ERR(oa_bo->vma)) {
+ err = PTR_ERR(oa_bo->vma);
+ goto err_oa_bo;
+ }
+
+ oa_bo->oa_config = i915_oa_config_get(oa_config);
+ llist_add(&oa_bo->node, &stream->oa_config_bos);
+
+ return oa_bo;
+
+err_oa_bo:
+ i915_gem_object_put(obj);
+err_free:
+ kfree(oa_bo);
+ return ERR_PTR(err);
+}
+
+static struct i915_vma *
+get_oa_vma(struct i915_perf_stream *stream, struct i915_oa_config *oa_config)
+{
+ struct i915_oa_config_bo *oa_bo;
+
/*
- * It apparently takes a fairly long time for a new MUX
- * configuration to be be applied after these register writes.
- * This delay duration was derived empirically based on the
- * render_basic config but hopefully it covers the maximum
- * configuration latency.
- *
- * As a fallback, the checks in _append_oa_reports() to skip
- * invalid OA reports do also seem to work to discard reports
- * generated before this config has completed - albeit not
- * silently.
- *
- * Unfortunately this is essentially a magic number, since we
- * don't currently know of a reliable mechanism for predicting
- * how long the MUX config will take to apply and besides
- * seeing invalid reports we don't know of a reliable way to
- * explicitly check that the MUX config has landed.
- *
- * It's even possible we've miss characterized the underlying
- * problem - it just seems like the simplest explanation why
- * a delay at this location would mitigate any invalid reports.
+ * Look for the buffer in the already allocated BOs attached
+ * to the stream.
*/
- usleep_range(15000, 20000);
+ llist_for_each_entry(oa_bo, stream->oa_config_bos.first, node) {
+ if (oa_bo->oa_config == oa_config &&
+ memcmp(oa_bo->oa_config->uuid,
+ oa_config->uuid,
+ sizeof(oa_config->uuid)) == 0)
+ goto out;
+ }
+
+ oa_bo = alloc_oa_config_buffer(stream, oa_config);
+ if (IS_ERR(oa_bo))
+ return ERR_CAST(oa_bo);
+
+out:
+ return i915_vma_get(oa_bo->vma);
+}
+
+static int emit_oa_config(struct i915_perf_stream *stream,
+ struct i915_oa_config *oa_config,
+ struct intel_context *ce)
+{
+ struct i915_request *rq;
+ struct i915_vma *vma;
+ int err;
+
+ vma = get_oa_vma(stream, oa_config);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
+ err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
+ if (err)
+ goto err_vma_put;
+
+ rq = i915_request_create(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_vma_unpin;
+ }
+
+ i915_vma_lock(vma);
+ err = i915_request_await_object(rq, vma->obj, 0);
+ if (!err)
+ err = i915_vma_move_to_active(vma, rq, 0);
+ i915_vma_unlock(vma);
+ if (err)
+ goto err_add_request;
+
+ err = rq->engine->emit_bb_start(rq,
+ vma->node.start, 0,
+ I915_DISPATCH_SECURE);
+err_add_request:
+ i915_request_add(rq);
+err_vma_unpin:
+ i915_vma_unpin(vma);
+err_vma_put:
+ i915_vma_put(vma);
+ return err;
+}
+
+static struct intel_context *oa_context(struct i915_perf_stream *stream)
+{
+ return stream->pinned_ctx ?: stream->engine->kernel_context;
}
static int hsw_enable_metric_set(struct i915_perf_stream *stream)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
- const struct i915_oa_config *oa_config = stream->oa_config;
+ struct intel_uncore *uncore = stream->uncore;
/*
* PRM:
@@ -1616,31 +2011,24 @@ static int hsw_enable_metric_set(struct i915_perf_stream *stream)
* count the events from non-render domain. Unit level clock
* gating for RCS should also be disabled.
*/
- I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) &
- ~GEN7_DOP_CLOCK_GATE_ENABLE));
- I915_WRITE(GEN6_UCGCTL1, (I915_READ(GEN6_UCGCTL1) |
- GEN6_CSUNIT_CLOCK_GATE_DISABLE));
-
- config_oa_regs(dev_priv, oa_config->mux_regs, oa_config->mux_regs_len);
- delay_after_mux();
-
- config_oa_regs(dev_priv, oa_config->b_counter_regs,
- oa_config->b_counter_regs_len);
+ intel_uncore_rmw(uncore, GEN7_MISCCPCTL,
+ GEN7_DOP_CLOCK_GATE_ENABLE, 0);
+ intel_uncore_rmw(uncore, GEN6_UCGCTL1,
+ 0, GEN6_CSUNIT_CLOCK_GATE_DISABLE);
- return 0;
+ return emit_oa_config(stream, stream->oa_config, oa_context(stream));
}
static void hsw_disable_metric_set(struct i915_perf_stream *stream)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
+ struct intel_uncore *uncore = stream->uncore;
- I915_WRITE(GEN6_UCGCTL1, (I915_READ(GEN6_UCGCTL1) &
- ~GEN6_CSUNIT_CLOCK_GATE_DISABLE));
- I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) |
- GEN7_DOP_CLOCK_GATE_ENABLE));
+ intel_uncore_rmw(uncore, GEN6_UCGCTL1,
+ GEN6_CSUNIT_CLOCK_GATE_DISABLE, 0);
+ intel_uncore_rmw(uncore, GEN7_MISCCPCTL,
+ 0, GEN7_DOP_CLOCK_GATE_ENABLE);
- I915_WRITE(GDT_CHICKEN_BITS, (I915_READ(GDT_CHICKEN_BITS) &
- ~GT_NOA_ENABLE));
+ intel_uncore_rmw(uncore, GDT_CHICKEN_BITS, GT_NOA_ENABLE, 0);
}
static u32 oa_config_flex_reg(const struct i915_oa_config *oa_config,
@@ -1672,14 +2060,11 @@ static u32 oa_config_flex_reg(const struct i915_oa_config *oa_config,
* in the case that the OA unit has been disabled.
*/
static void
-gen8_update_reg_state_unlocked(struct i915_perf_stream *stream,
- struct intel_context *ce,
- u32 *reg_state,
- const struct i915_oa_config *oa_config)
-{
- struct drm_i915_private *i915 = ce->engine->i915;
- u32 ctx_oactxctrl = i915->perf.ctx_oactxctrl_offset;
- u32 ctx_flexeu0 = i915->perf.ctx_flexeu0_offset;
+gen8_update_reg_state_unlocked(const struct intel_context *ce,
+ const struct i915_perf_stream *stream)
+{
+ u32 ctx_oactxctrl = stream->perf->ctx_oactxctrl_offset;
+ u32 ctx_flexeu0 = stream->perf->ctx_flexeu0_offset;
/* The MMIO offsets for Flex EU registers aren't contiguous */
i915_reg_t flex_regs[] = {
EU_PERF_CNTL0,
@@ -1690,21 +2075,28 @@ gen8_update_reg_state_unlocked(struct i915_perf_stream *stream,
EU_PERF_CNTL5,
EU_PERF_CNTL6,
};
+ u32 *reg_state = ce->lrc_reg_state;
int i;
- CTX_REG(reg_state, ctx_oactxctrl, GEN8_OACTXCONTROL,
- (stream->period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
- (stream->periodic ? GEN8_OA_TIMER_ENABLE : 0) |
- GEN8_OA_COUNTER_RESUME);
+ if (IS_GEN(stream->perf->i915, 12)) {
+ u32 format = stream->oa_buffer.format;
- for (i = 0; i < ARRAY_SIZE(flex_regs); i++) {
- CTX_REG(reg_state, ctx_flexeu0 + i * 2, flex_regs[i],
- oa_config_flex_reg(oa_config, flex_regs[i]));
+ reg_state[ctx_oactxctrl + 1] =
+ (format << GEN12_OAR_OACONTROL_COUNTER_FORMAT_SHIFT) |
+ (stream->oa_config ? GEN12_OAR_OACONTROL_COUNTER_ENABLE : 0);
+ } else {
+ reg_state[ctx_oactxctrl + 1] =
+ (stream->period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
+ (stream->periodic ? GEN8_OA_TIMER_ENABLE : 0) |
+ GEN8_OA_COUNTER_RESUME;
}
- CTX_REG(reg_state,
- CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE,
- intel_sseu_make_rpcs(i915, &ce->sseu));
+ for (i = 0; !!ctx_flexeu0 && i < ARRAY_SIZE(flex_regs); i++)
+ reg_state[ctx_flexeu0 + i * 2 + 1] =
+ oa_config_flex_reg(stream->oa_config, flex_regs[i]);
+
+ reg_state[CTX_R_PWR_CLK_STATE] =
+ intel_sseu_make_rpcs(ce->engine->i915, &ce->sseu);
}
struct flex {
@@ -1728,7 +2120,7 @@ gen8_store_flex(struct i915_request *rq,
offset = i915_ggtt_offset(ce->state) + LRC_STATE_PN * PAGE_SIZE;
do {
*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
- *cs++ = offset + (flex->offset + 1) * sizeof(u32);
+ *cs++ = offset + flex->offset * sizeof(u32);
*cs++ = 0;
*cs++ = flex->value;
} while (flex++, --count);
@@ -1832,6 +2224,36 @@ static int gen8_configure_context(struct i915_gem_context *ctx,
return err;
}
+static int gen12_emit_oar_config(struct intel_context *ce, bool enable)
+{
+ struct i915_request *rq;
+ u32 *cs;
+ int err = 0;
+
+ rq = i915_request_create(ce);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ cs = intel_ring_begin(rq, 4);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ goto out;
+ }
+
+ *cs++ = MI_LOAD_REGISTER_IMM(1);
+ *cs++ = i915_mmio_reg_offset(RING_CONTEXT_CONTROL(ce->engine->mmio_base));
+ *cs++ = _MASKED_FIELD(GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE,
+ enable ? GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE : 0);
+ *cs++ = MI_NOOP;
+
+ intel_ring_advance(rq, cs);
+
+out:
+ i915_request_add(rq);
+
+ return err;
+}
+
/*
* Manages updating the per-context aspects of the OA stream
* configuration across all contexts.
@@ -1856,24 +2278,22 @@ static int gen8_configure_context(struct i915_gem_context *ctx,
*
* Note: it's only the RCS/Render context that has any OA state.
*/
-static int gen8_configure_all_contexts(struct i915_perf_stream *stream,
- const struct i915_oa_config *oa_config)
+static int lrc_configure_all_contexts(struct i915_perf_stream *stream,
+ const struct i915_oa_config *oa_config)
{
- struct drm_i915_private *i915 = stream->dev_priv;
+ struct drm_i915_private *i915 = stream->perf->i915;
/* The MMIO offsets for Flex EU registers aren't contiguous */
- const u32 ctx_flexeu0 = i915->perf.ctx_flexeu0_offset;
-#define ctx_flexeuN(N) (ctx_flexeu0 + 2 * (N))
+ const u32 ctx_flexeu0 = stream->perf->ctx_flexeu0_offset;
+#define ctx_flexeuN(N) (ctx_flexeu0 + 2 * (N) + 1)
struct flex regs[] = {
{
GEN8_R_PWR_CLK_STATE,
CTX_R_PWR_CLK_STATE,
},
{
- GEN8_OACTXCONTROL,
- i915->perf.ctx_oactxctrl_offset,
- ((stream->period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
- (stream->periodic ? GEN8_OA_TIMER_ENABLE : 0) |
- GEN8_OA_COUNTER_RESUME)
+ IS_GEN(i915, 12) ?
+ GEN12_OAR_OACONTROL : GEN8_OACTXCONTROL,
+ stream->perf->ctx_oactxctrl_offset + 1,
},
{ EU_PERF_CNTL0, ctx_flexeuN(0) },
{ EU_PERF_CNTL1, ctx_flexeuN(1) },
@@ -1885,13 +2305,27 @@ static int gen8_configure_all_contexts(struct i915_perf_stream *stream,
};
#undef ctx_flexeuN
struct intel_engine_cs *engine;
- struct i915_gem_context *ctx;
- int i;
+ struct i915_gem_context *ctx, *cn;
+ size_t array_size = IS_GEN(i915, 12) ? 2 : ARRAY_SIZE(regs);
+ int i, err;
- for (i = 2; i < ARRAY_SIZE(regs); i++)
+ if (IS_GEN(i915, 12)) {
+ u32 format = stream->oa_buffer.format;
+
+ regs[1].value =
+ (format << GEN12_OAR_OACONTROL_COUNTER_FORMAT_SHIFT) |
+ (oa_config ? GEN12_OAR_OACONTROL_COUNTER_ENABLE : 0);
+ } else {
+ regs[1].value =
+ (stream->period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
+ (stream->periodic ? GEN8_OA_TIMER_ENABLE : 0) |
+ GEN8_OA_COUNTER_RESUME;
+ }
+
+ for (i = 2; !!ctx_flexeu0 && i < array_size; i++)
regs[i].value = oa_config_flex_reg(oa_config, regs[i].reg);
- lockdep_assert_held(&i915->drm.struct_mutex);
+ lockdep_assert_held(&stream->perf->lock);
/*
* The OA register config is setup through the context image. This image
@@ -1909,16 +2343,27 @@ static int gen8_configure_all_contexts(struct i915_perf_stream *stream,
* context. Contexts idle at the time of reconfiguration are not
* trapped behind the barrier.
*/
- list_for_each_entry(ctx, &i915->contexts.list, link) {
- int err;
-
+ spin_lock(&i915->gem.contexts.lock);
+ list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) {
if (ctx == i915->kernel_context)
continue;
- err = gen8_configure_context(ctx, regs, ARRAY_SIZE(regs));
- if (err)
+ if (!kref_get_unless_zero(&ctx->ref))
+ continue;
+
+ spin_unlock(&i915->gem.contexts.lock);
+
+ err = gen8_configure_context(ctx, regs, array_size);
+ if (err) {
+ i915_gem_context_put(ctx);
return err;
+ }
+
+ spin_lock(&i915->gem.contexts.lock);
+ list_safe_reset_next(ctx, cn, link);
+ i915_gem_context_put(ctx);
}
+ spin_unlock(&i915->gem.contexts.lock);
/*
* After updating all other contexts, we need to modify ourselves.
@@ -1927,14 +2372,13 @@ static int gen8_configure_all_contexts(struct i915_perf_stream *stream,
*/
for_each_uabi_engine(engine, i915) {
struct intel_context *ce = engine->kernel_context;
- int err;
if (engine->class != RENDER_CLASS)
continue;
regs[0].value = intel_sseu_make_rpcs(i915, &ce->sseu);
- err = gen8_modify_self(ce, regs, ARRAY_SIZE(regs));
+ err = gen8_modify_self(ce, regs, array_size);
if (err)
return err;
}
@@ -1944,8 +2388,8 @@ static int gen8_configure_all_contexts(struct i915_perf_stream *stream,
static int gen8_enable_metric_set(struct i915_perf_stream *stream)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
- const struct i915_oa_config *oa_config = stream->oa_config;
+ struct intel_uncore *uncore = stream->uncore;
+ struct i915_oa_config *oa_config = stream->oa_config;
int ret;
/*
@@ -1971,10 +2415,10 @@ static int gen8_enable_metric_set(struct i915_perf_stream *stream)
* be read back from automatically triggered reports, as part of the
* RPT_ID field.
*/
- if (IS_GEN_RANGE(dev_priv, 9, 11)) {
- I915_WRITE(GEN8_OA_DEBUG,
- _MASKED_BIT_ENABLE(GEN9_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS |
- GEN9_OA_DEBUG_INCLUDE_CLK_RATIO));
+ if (IS_GEN_RANGE(stream->perf->i915, 9, 11)) {
+ intel_uncore_write(uncore, GEN8_OA_DEBUG,
+ _MASKED_BIT_ENABLE(GEN9_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS |
+ GEN9_OA_DEBUG_INCLUDE_CLK_RATIO));
}
/*
@@ -1982,45 +2426,102 @@ static int gen8_enable_metric_set(struct i915_perf_stream *stream)
* to make sure all slices/subslices are ON before writing to NOA
* registers.
*/
- ret = gen8_configure_all_contexts(stream, oa_config);
+ ret = lrc_configure_all_contexts(stream, oa_config);
if (ret)
return ret;
- config_oa_regs(dev_priv, oa_config->mux_regs, oa_config->mux_regs_len);
- delay_after_mux();
+ return emit_oa_config(stream, oa_config, oa_context(stream));
+}
+
+static int gen12_enable_metric_set(struct i915_perf_stream *stream)
+{
+ struct intel_uncore *uncore = stream->uncore;
+ struct i915_oa_config *oa_config = stream->oa_config;
+ bool periodic = stream->periodic;
+ u32 period_exponent = stream->period_exponent;
+ int ret;
- config_oa_regs(dev_priv, oa_config->b_counter_regs,
- oa_config->b_counter_regs_len);
+ intel_uncore_write(uncore, GEN12_OAG_OA_DEBUG,
+ /* Disable clk ratio reports, like previous Gens. */
+ _MASKED_BIT_ENABLE(GEN12_OAG_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS |
+ GEN12_OAG_OA_DEBUG_INCLUDE_CLK_RATIO) |
+ /*
+ * If the user didn't require OA reports, instruct the
+ * hardware not to emit ctx switch reports.
+ */
+ !(stream->sample_flags & SAMPLE_OA_REPORT) ?
+ _MASKED_BIT_ENABLE(GEN12_OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS) :
+ _MASKED_BIT_DISABLE(GEN12_OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS));
+
+ intel_uncore_write(uncore, GEN12_OAG_OAGLBCTXCTRL, periodic ?
+ (GEN12_OAG_OAGLBCTXCTRL_COUNTER_RESUME |
+ GEN12_OAG_OAGLBCTXCTRL_TIMER_ENABLE |
+ (period_exponent << GEN12_OAG_OAGLBCTXCTRL_TIMER_PERIOD_SHIFT))
+ : 0);
- return 0;
+ /*
+ * Update all contexts prior writing the mux configurations as we need
+ * to make sure all slices/subslices are ON before writing to NOA
+ * registers.
+ */
+ ret = lrc_configure_all_contexts(stream, oa_config);
+ if (ret)
+ return ret;
+
+ /*
+ * For Gen12, performance counters are context
+ * saved/restored. Only enable it for the context that
+ * requested this.
+ */
+ if (stream->ctx) {
+ ret = gen12_emit_oar_config(stream->pinned_ctx,
+ oa_config != NULL);
+ if (ret)
+ return ret;
+ }
+
+ return emit_oa_config(stream, oa_config, oa_context(stream));
}
static void gen8_disable_metric_set(struct i915_perf_stream *stream)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
+ struct intel_uncore *uncore = stream->uncore;
/* Reset all contexts' slices/subslices configurations. */
- gen8_configure_all_contexts(stream, NULL);
+ lrc_configure_all_contexts(stream, NULL);
- I915_WRITE(GDT_CHICKEN_BITS, (I915_READ(GDT_CHICKEN_BITS) &
- ~GT_NOA_ENABLE));
+ intel_uncore_rmw(uncore, GDT_CHICKEN_BITS, GT_NOA_ENABLE, 0);
}
static void gen10_disable_metric_set(struct i915_perf_stream *stream)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
+ struct intel_uncore *uncore = stream->uncore;
+
+ /* Reset all contexts' slices/subslices configurations. */
+ lrc_configure_all_contexts(stream, NULL);
+
+ /* Make sure we disable noa to save power. */
+ intel_uncore_rmw(uncore, RPM_CONFIG1, GEN10_GT_NOA_ENABLE, 0);
+}
+
+static void gen12_disable_metric_set(struct i915_perf_stream *stream)
+{
+ struct intel_uncore *uncore = stream->uncore;
/* Reset all contexts' slices/subslices configurations. */
- gen8_configure_all_contexts(stream, NULL);
+ lrc_configure_all_contexts(stream, NULL);
+
+ /* disable the context save/restore or OAR counters */
+ if (stream->ctx)
+ gen12_emit_oar_config(stream->pinned_ctx, false);
/* Make sure we disable noa to save power. */
- I915_WRITE(RPM_CONFIG1,
- I915_READ(RPM_CONFIG1) & ~GEN10_GT_NOA_ENABLE);
+ intel_uncore_rmw(uncore, RPM_CONFIG1, GEN10_GT_NOA_ENABLE, 0);
}
static void gen7_oa_enable(struct i915_perf_stream *stream)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
+ struct intel_uncore *uncore = stream->uncore;
struct i915_gem_context *ctx = stream->ctx;
u32 ctx_id = stream->specific_ctx_id;
bool periodic = stream->periodic;
@@ -2038,19 +2539,19 @@ static void gen7_oa_enable(struct i915_perf_stream *stream)
*/
gen7_init_oa_buffer(stream);
- I915_WRITE(GEN7_OACONTROL,
- (ctx_id & GEN7_OACONTROL_CTX_MASK) |
- (period_exponent <<
- GEN7_OACONTROL_TIMER_PERIOD_SHIFT) |
- (periodic ? GEN7_OACONTROL_TIMER_ENABLE : 0) |
- (report_format << GEN7_OACONTROL_FORMAT_SHIFT) |
- (ctx ? GEN7_OACONTROL_PER_CTX_ENABLE : 0) |
- GEN7_OACONTROL_ENABLE);
+ intel_uncore_write(uncore, GEN7_OACONTROL,
+ (ctx_id & GEN7_OACONTROL_CTX_MASK) |
+ (period_exponent <<
+ GEN7_OACONTROL_TIMER_PERIOD_SHIFT) |
+ (periodic ? GEN7_OACONTROL_TIMER_ENABLE : 0) |
+ (report_format << GEN7_OACONTROL_FORMAT_SHIFT) |
+ (ctx ? GEN7_OACONTROL_PER_CTX_ENABLE : 0) |
+ GEN7_OACONTROL_ENABLE);
}
static void gen8_oa_enable(struct i915_perf_stream *stream)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
+ struct intel_uncore *uncore = stream->uncore;
u32 report_format = stream->oa_buffer.format;
/*
@@ -2069,9 +2570,28 @@ static void gen8_oa_enable(struct i915_perf_stream *stream)
* filtering and instead filter on the cpu based on the context-id
* field of reports
*/
- I915_WRITE(GEN8_OACONTROL, (report_format <<
- GEN8_OA_REPORT_FORMAT_SHIFT) |
- GEN8_OA_COUNTER_ENABLE);
+ intel_uncore_write(uncore, GEN8_OACONTROL,
+ (report_format << GEN8_OA_REPORT_FORMAT_SHIFT) |
+ GEN8_OA_COUNTER_ENABLE);
+}
+
+static void gen12_oa_enable(struct i915_perf_stream *stream)
+{
+ struct intel_uncore *uncore = stream->uncore;
+ u32 report_format = stream->oa_buffer.format;
+
+ /*
+ * If we don't want OA reports from the OA buffer, then we don't even
+ * need to program the OAG unit.
+ */
+ if (!(stream->sample_flags & SAMPLE_OA_REPORT))
+ return;
+
+ gen12_init_oa_buffer(stream);
+
+ intel_uncore_write(uncore, GEN12_OAG_OACONTROL,
+ (report_format << GEN12_OAG_OACONTROL_OA_COUNTER_FORMAT_SHIFT) |
+ GEN12_OAG_OACONTROL_OA_COUNTER_ENABLE);
}
/**
@@ -2085,9 +2605,7 @@ static void gen8_oa_enable(struct i915_perf_stream *stream)
*/
static void i915_oa_stream_enable(struct i915_perf_stream *stream)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
-
- dev_priv->perf.ops.oa_enable(stream);
+ stream->perf->ops.oa_enable(stream);
if (stream->periodic)
hrtimer_start(&stream->poll_check_timer,
@@ -2097,7 +2615,7 @@ static void i915_oa_stream_enable(struct i915_perf_stream *stream)
static void gen7_oa_disable(struct i915_perf_stream *stream)
{
- struct intel_uncore *uncore = &stream->dev_priv->uncore;
+ struct intel_uncore *uncore = stream->uncore;
intel_uncore_write(uncore, GEN7_OACONTROL, 0);
if (intel_wait_for_register(uncore,
@@ -2108,7 +2626,7 @@ static void gen7_oa_disable(struct i915_perf_stream *stream)
static void gen8_oa_disable(struct i915_perf_stream *stream)
{
- struct intel_uncore *uncore = &stream->dev_priv->uncore;
+ struct intel_uncore *uncore = stream->uncore;
intel_uncore_write(uncore, GEN8_OACONTROL, 0);
if (intel_wait_for_register(uncore,
@@ -2117,6 +2635,18 @@ static void gen8_oa_disable(struct i915_perf_stream *stream)
DRM_ERROR("wait for OA to be disabled timed out\n");
}
+static void gen12_oa_disable(struct i915_perf_stream *stream)
+{
+ struct intel_uncore *uncore = stream->uncore;
+
+ intel_uncore_write(uncore, GEN12_OAG_OACONTROL, 0);
+ if (intel_wait_for_register(uncore,
+ GEN12_OAG_OACONTROL,
+ GEN12_OAG_OACONTROL_OA_COUNTER_ENABLE, 0,
+ 50))
+ DRM_ERROR("wait for OA to be disabled timed out\n");
+}
+
/**
* i915_oa_stream_disable - handle `I915_PERF_IOCTL_DISABLE` for OA stream
* @stream: An i915 perf stream opened for OA metrics
@@ -2127,9 +2657,7 @@ static void gen8_oa_disable(struct i915_perf_stream *stream)
*/
static void i915_oa_stream_disable(struct i915_perf_stream *stream)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
-
- dev_priv->perf.ops.oa_disable(stream);
+ stream->perf->ops.oa_disable(stream);
if (stream->periodic)
hrtimer_cancel(&stream->poll_check_timer);
@@ -2166,15 +2694,21 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
struct drm_i915_perf_open_param *param,
struct perf_open_properties *props)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
+ struct i915_perf *perf = stream->perf;
int format_size;
int ret;
- /* If the sysfs metrics/ directory wasn't registered for some
+ if (!props->engine) {
+ DRM_DEBUG("OA engine not specified\n");
+ return -EINVAL;
+ }
+
+ /*
+ * If the sysfs metrics/ directory wasn't registered for some
* reason then don't let userspace try their luck with config
* IDs
*/
- if (!dev_priv->perf.metrics_kobj) {
+ if (!perf->metrics_kobj) {
DRM_DEBUG("OA metrics weren't advertised via sysfs\n");
return -EINVAL;
}
@@ -2184,16 +2718,17 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
return -EINVAL;
}
- if (!dev_priv->perf.ops.enable_metric_set) {
+ if (!perf->ops.enable_metric_set) {
DRM_DEBUG("OA unit not supported\n");
return -ENODEV;
}
- /* To avoid the complexity of having to accurately filter
+ /*
+ * To avoid the complexity of having to accurately filter
* counter reports and marshal to the appropriate client
* we currently only allow exclusive access
*/
- if (dev_priv->perf.exclusive_stream) {
+ if (perf->exclusive_stream) {
DRM_DEBUG("OA unit already in use\n");
return -EBUSY;
}
@@ -2203,9 +2738,12 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
return -EINVAL;
}
+ stream->engine = props->engine;
+ stream->uncore = stream->engine->gt->uncore;
+
stream->sample_size = sizeof(struct drm_i915_perf_record_header);
- format_size = dev_priv->perf.oa_formats[props->oa_format].size;
+ format_size = perf->oa_formats[props->oa_format].size;
stream->sample_flags |= SAMPLE_OA_REPORT;
stream->sample_size += format_size;
@@ -2214,8 +2752,10 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
if (WARN_ON(stream->oa_buffer.format_size == 0))
return -EINVAL;
+ stream->hold_preemption = props->hold_preemption;
+
stream->oa_buffer.format =
- dev_priv->perf.oa_formats[props->oa_format].format;
+ perf->oa_formats[props->oa_format].format;
stream->periodic = props->oa_periodic;
if (stream->periodic)
@@ -2229,9 +2769,16 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
}
}
- ret = get_oa_config(dev_priv, props->metrics_set, &stream->oa_config);
+ ret = alloc_noa_wait(stream);
if (ret) {
+ DRM_DEBUG("Unable to allocate NOA wait batch buffer\n");
+ goto err_noa_wait_alloc;
+ }
+
+ stream->oa_config = i915_perf_get_oa_config(perf, props->metrics_set);
+ if (!stream->oa_config) {
DRM_DEBUG("Invalid OA config id=%i\n", props->metrics_set);
+ ret = -EINVAL;
goto err_config;
}
@@ -2247,27 +2794,24 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
* In our case we are expecting that taking pm + FORCEWAKE
* references will effectively disable RC6.
*/
- stream->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
- intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
+ intel_engine_pm_get(stream->engine);
+ intel_uncore_forcewake_get(stream->uncore, FORCEWAKE_ALL);
ret = alloc_oa_buffer(stream);
if (ret)
goto err_oa_buf_alloc;
- ret = i915_mutex_lock_interruptible(&dev_priv->drm);
- if (ret)
- goto err_lock;
-
stream->ops = &i915_oa_stream_ops;
- dev_priv->perf.exclusive_stream = stream;
+ perf->exclusive_stream = stream;
- ret = dev_priv->perf.ops.enable_metric_set(stream);
+ ret = perf->ops.enable_metric_set(stream);
if (ret) {
DRM_DEBUG("Unable to enable metric set\n");
goto err_enable;
}
- mutex_unlock(&dev_priv->drm.struct_mutex);
+ DRM_DEBUG("opening stream oa config uuid=%s\n",
+ stream->oa_config->uuid);
hrtimer_init(&stream->poll_check_timer,
CLOCK_MONOTONIC, HRTIMER_MODE_REL);
@@ -2278,38 +2822,40 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
return 0;
err_enable:
- dev_priv->perf.exclusive_stream = NULL;
- dev_priv->perf.ops.disable_metric_set(stream);
- mutex_unlock(&dev_priv->drm.struct_mutex);
+ perf->exclusive_stream = NULL;
+ perf->ops.disable_metric_set(stream);
-err_lock:
free_oa_buffer(stream);
err_oa_buf_alloc:
- put_oa_config(dev_priv, stream->oa_config);
+ free_oa_configs(stream);
- intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
- intel_runtime_pm_put(&dev_priv->runtime_pm, stream->wakeref);
+ intel_uncore_forcewake_put(stream->uncore, FORCEWAKE_ALL);
+ intel_engine_pm_put(stream->engine);
err_config:
+ free_noa_wait(stream);
+
+err_noa_wait_alloc:
if (stream->ctx)
oa_put_render_ctx_id(stream);
return ret;
}
-void i915_oa_init_reg_state(struct intel_engine_cs *engine,
- struct intel_context *ce,
- u32 *regs)
+void i915_oa_init_reg_state(const struct intel_context *ce,
+ const struct intel_engine_cs *engine)
{
struct i915_perf_stream *stream;
+ /* perf.exclusive_stream serialised by lrc_configure_all_contexts() */
+
if (engine->class != RENDER_CLASS)
return;
stream = engine->i915->perf.exclusive_stream;
if (stream)
- gen8_update_reg_state_unlocked(stream, ce, regs, stream->oa_config);
+ gen8_update_reg_state_unlocked(ce, stream);
}
/**
@@ -2379,7 +2925,7 @@ static ssize_t i915_perf_read(struct file *file,
loff_t *ppos)
{
struct i915_perf_stream *stream = file->private_data;
- struct drm_i915_private *dev_priv = stream->dev_priv;
+ struct i915_perf *perf = stream->perf;
ssize_t ret;
/* To ensure it's handled consistently we simply treat all reads of a
@@ -2402,15 +2948,15 @@ static ssize_t i915_perf_read(struct file *file,
if (ret)
return ret;
- mutex_lock(&dev_priv->perf.lock);
+ mutex_lock(&perf->lock);
ret = i915_perf_read_locked(stream, file,
buf, count, ppos);
- mutex_unlock(&dev_priv->perf.lock);
+ mutex_unlock(&perf->lock);
} while (ret == -EAGAIN);
} else {
- mutex_lock(&dev_priv->perf.lock);
+ mutex_lock(&perf->lock);
ret = i915_perf_read_locked(stream, file, buf, count, ppos);
- mutex_unlock(&dev_priv->perf.lock);
+ mutex_unlock(&perf->lock);
}
/* We allow the poll checking to sometimes report false positive EPOLLIN
@@ -2448,7 +2994,6 @@ static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer)
/**
* i915_perf_poll_locked - poll_wait() with a suitable wait queue for stream
- * @dev_priv: i915 device instance
* @stream: An i915 perf stream
* @file: An i915 perf stream file
* @wait: poll() state table
@@ -2457,15 +3002,14 @@ static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer)
* &i915_perf_stream_ops->poll_wait to call poll_wait() with a wait queue that
* will be woken for new stream data.
*
- * Note: The &drm_i915_private->perf.lock mutex has been taken to serialize
+ * Note: The &perf->lock mutex has been taken to serialize
* with any non-file-operation driver hooks.
*
* Returns: any poll events that are ready without sleeping
*/
-static __poll_t i915_perf_poll_locked(struct drm_i915_private *dev_priv,
- struct i915_perf_stream *stream,
- struct file *file,
- poll_table *wait)
+static __poll_t i915_perf_poll_locked(struct i915_perf_stream *stream,
+ struct file *file,
+ poll_table *wait)
{
__poll_t events = 0;
@@ -2499,12 +3043,12 @@ static __poll_t i915_perf_poll_locked(struct drm_i915_private *dev_priv,
static __poll_t i915_perf_poll(struct file *file, poll_table *wait)
{
struct i915_perf_stream *stream = file->private_data;
- struct drm_i915_private *dev_priv = stream->dev_priv;
+ struct i915_perf *perf = stream->perf;
__poll_t ret;
- mutex_lock(&dev_priv->perf.lock);
- ret = i915_perf_poll_locked(dev_priv, stream, file, wait);
- mutex_unlock(&dev_priv->perf.lock);
+ mutex_lock(&perf->lock);
+ ret = i915_perf_poll_locked(stream, file, wait);
+ mutex_unlock(&perf->lock);
return ret;
}
@@ -2529,6 +3073,9 @@ static void i915_perf_enable_locked(struct i915_perf_stream *stream)
if (stream->ops->enable)
stream->ops->enable(stream);
+
+ if (stream->hold_preemption)
+ i915_gem_context_set_nopreempt(stream->ctx);
}
/**
@@ -2553,17 +3100,54 @@ static void i915_perf_disable_locked(struct i915_perf_stream *stream)
/* Allow stream->ops->disable() to refer to this */
stream->enabled = false;
+ if (stream->hold_preemption)
+ i915_gem_context_clear_nopreempt(stream->ctx);
+
if (stream->ops->disable)
stream->ops->disable(stream);
}
+static long i915_perf_config_locked(struct i915_perf_stream *stream,
+ unsigned long metrics_set)
+{
+ struct i915_oa_config *config;
+ long ret = stream->oa_config->id;
+
+ config = i915_perf_get_oa_config(stream->perf, metrics_set);
+ if (!config)
+ return -EINVAL;
+
+ if (config != stream->oa_config) {
+ int err;
+
+ /*
+ * If OA is bound to a specific context, emit the
+ * reconfiguration inline from that context. The update
+ * will then be ordered with respect to submission on that
+ * context.
+ *
+ * When set globally, we use a low priority kernel context,
+ * so it will effectively take effect when idle.
+ */
+ err = emit_oa_config(stream, config, oa_context(stream));
+ if (err == 0)
+ config = xchg(&stream->oa_config, config);
+ else
+ ret = err;
+ }
+
+ i915_oa_config_put(config);
+
+ return ret;
+}
+
/**
* i915_perf_ioctl - support ioctl() usage with i915 perf stream FDs
* @stream: An i915 perf stream
* @cmd: the ioctl request
* @arg: the ioctl data
*
- * Note: The &drm_i915_private->perf.lock mutex has been taken to serialize
+ * Note: The &perf->lock mutex has been taken to serialize
* with any non-file-operation driver hooks.
*
* Returns: zero on success or a negative error code. Returns -EINVAL for
@@ -2580,6 +3164,8 @@ static long i915_perf_ioctl_locked(struct i915_perf_stream *stream,
case I915_PERF_IOCTL_DISABLE:
i915_perf_disable_locked(stream);
return 0;
+ case I915_PERF_IOCTL_CONFIG:
+ return i915_perf_config_locked(stream, arg);
}
return -EINVAL;
@@ -2601,12 +3187,12 @@ static long i915_perf_ioctl(struct file *file,
unsigned long arg)
{
struct i915_perf_stream *stream = file->private_data;
- struct drm_i915_private *dev_priv = stream->dev_priv;
+ struct i915_perf *perf = stream->perf;
long ret;
- mutex_lock(&dev_priv->perf.lock);
+ mutex_lock(&perf->lock);
ret = i915_perf_ioctl_locked(stream, cmd, arg);
- mutex_unlock(&dev_priv->perf.lock);
+ mutex_unlock(&perf->lock);
return ret;
}
@@ -2618,7 +3204,7 @@ static long i915_perf_ioctl(struct file *file,
* Frees all resources associated with the given i915 perf @stream, disabling
* any associated data capture in the process.
*
- * Note: The &drm_i915_private->perf.lock mutex has been taken to serialize
+ * Note: The &perf->lock mutex has been taken to serialize
* with any non-file-operation driver hooks.
*/
static void i915_perf_destroy_locked(struct i915_perf_stream *stream)
@@ -2629,8 +3215,6 @@ static void i915_perf_destroy_locked(struct i915_perf_stream *stream)
if (stream->ops->destroy)
stream->ops->destroy(stream);
- list_del(&stream->link);
-
if (stream->ctx)
i915_gem_context_put(stream->ctx);
@@ -2651,14 +3235,14 @@ static void i915_perf_destroy_locked(struct i915_perf_stream *stream)
static int i915_perf_release(struct inode *inode, struct file *file)
{
struct i915_perf_stream *stream = file->private_data;
- struct drm_i915_private *dev_priv = stream->dev_priv;
+ struct i915_perf *perf = stream->perf;
- mutex_lock(&dev_priv->perf.lock);
+ mutex_lock(&perf->lock);
i915_perf_destroy_locked(stream);
- mutex_unlock(&dev_priv->perf.lock);
+ mutex_unlock(&perf->lock);
/* Release the reference the perf stream kept on the driver. */
- drm_dev_put(&dev_priv->drm);
+ drm_dev_put(&perf->i915->drm);
return 0;
}
@@ -2680,7 +3264,7 @@ static const struct file_operations fops = {
/**
* i915_perf_open_ioctl_locked - DRM ioctl() for userspace to open a stream FD
- * @dev_priv: i915 device instance
+ * @perf: i915 perf instance
* @param: The open parameters passed to 'DRM_I915_PERF_OPEN`
* @props: individually validated u64 property value pairs
* @file: drm file
@@ -2688,7 +3272,7 @@ static const struct file_operations fops = {
* See i915_perf_ioctl_open() for interface details.
*
* Implements further stream config validation and stream initialization on
- * behalf of i915_perf_open_ioctl() with the &drm_i915_private->perf.lock mutex
+ * behalf of i915_perf_open_ioctl() with the &perf->lock mutex
* taken to serialize with any non-file-operation driver hooks.
*
* Note: at this point the @props have only been validated in isolation and
@@ -2703,7 +3287,7 @@ static const struct file_operations fops = {
* Returns: zero on success or a negative error code.
*/
static int
-i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv,
+i915_perf_open_ioctl_locked(struct i915_perf *perf,
struct drm_i915_perf_open_param *param,
struct perf_open_properties *props,
struct drm_file *file)
@@ -2734,17 +3318,34 @@ i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv,
* rest of the system, which we consider acceptable for a
* non-privileged client.
*
- * For Gen8+ the OA unit no longer supports clock gating off for a
+ * For Gen8->11 the OA unit no longer supports clock gating off for a
* specific context and the kernel can't securely stop the counters
* from updating as system-wide / global values. Even though we can
* filter reports based on the included context ID we can't block
* clients from seeing the raw / global counter values via
* MI_REPORT_PERF_COUNT commands and so consider it a privileged op to
* enable the OA unit by default.
+ *
+ * For Gen12+ we gain a new OAR unit that only monitors the RCS on a
+ * per context basis. So we can relax requirements there if the user
+ * doesn't request global stream access (i.e. query based sampling
+ * using MI_RECORD_PERF_COUNT.
*/
- if (IS_HASWELL(dev_priv) && specific_ctx)
+ if (IS_HASWELL(perf->i915) && specific_ctx)
+ privileged_op = false;
+ else if (IS_GEN(perf->i915, 12) && specific_ctx &&
+ (props->sample_flags & SAMPLE_OA_REPORT) == 0)
privileged_op = false;
+ if (props->hold_preemption) {
+ if (!props->single_context) {
+ DRM_DEBUG("preemption disable with no context\n");
+ ret = -EINVAL;
+ goto err;
+ }
+ privileged_op = true;
+ }
+
/* Similar to perf's kernel.perf_paranoid_cpu sysctl option
* we check a dev.i915.perf_stream_paranoid sysctl option
* to determine if it's ok to access system wide OA counters
@@ -2752,7 +3353,7 @@ i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv,
*/
if (privileged_op &&
i915_perf_stream_paranoid && !capable(CAP_SYS_ADMIN)) {
- DRM_DEBUG("Insufficient privileges to open system-wide i915 perf stream\n");
+ DRM_DEBUG("Insufficient privileges to open i915 perf stream\n");
ret = -EACCES;
goto err_ctx;
}
@@ -2763,7 +3364,7 @@ i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv,
goto err_ctx;
}
- stream->dev_priv = dev_priv;
+ stream->perf = perf;
stream->ctx = specific_ctx;
ret = i915_oa_stream_init(stream, param, props);
@@ -2779,8 +3380,6 @@ i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv,
goto err_flags;
}
- list_add(&stream->link, &dev_priv->perf.streams);
-
if (param->flags & I915_PERF_FLAG_FD_CLOEXEC)
f_flags |= O_CLOEXEC;
if (param->flags & I915_PERF_FLAG_FD_NONBLOCK)
@@ -2789,7 +3388,7 @@ i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv,
stream_fd = anon_inode_getfd("[i915_perf]", &fops, stream, f_flags);
if (stream_fd < 0) {
ret = stream_fd;
- goto err_open;
+ goto err_flags;
}
if (!(param->flags & I915_PERF_FLAG_DISABLED))
@@ -2798,12 +3397,10 @@ i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv,
/* Take a reference on the driver that will be kept with stream_fd
* until its release.
*/
- drm_dev_get(&dev_priv->drm);
+ drm_dev_get(&perf->i915->drm);
return stream_fd;
-err_open:
- list_del(&stream->link);
err_flags:
if (stream->ops->destroy)
stream->ops->destroy(stream);
@@ -2816,15 +3413,15 @@ err:
return ret;
}
-static u64 oa_exponent_to_ns(struct drm_i915_private *dev_priv, int exponent)
+static u64 oa_exponent_to_ns(struct i915_perf *perf, int exponent)
{
return div64_u64(1000000000ULL * (2ULL << exponent),
- 1000ULL * RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_khz);
+ 1000ULL * RUNTIME_INFO(perf->i915)->cs_timestamp_frequency_khz);
}
/**
* read_properties_unlocked - validate + copy userspace stream open properties
- * @dev_priv: i915 device instance
+ * @perf: i915 perf instance
* @uprops: The array of u64 key value pairs given by userspace
* @n_props: The number of key value pairs expected in @uprops
* @props: The stream configuration built up while validating properties
@@ -2837,7 +3434,7 @@ static u64 oa_exponent_to_ns(struct drm_i915_private *dev_priv, int exponent)
* we shouldn't validate or assume anything about ordering here. This doesn't
* rule out defining new properties with ordering requirements in the future.
*/
-static int read_properties_unlocked(struct drm_i915_private *dev_priv,
+static int read_properties_unlocked(struct i915_perf *perf,
u64 __user *uprops,
u32 n_props,
struct perf_open_properties *props)
@@ -2852,6 +3449,15 @@ static int read_properties_unlocked(struct drm_i915_private *dev_priv,
return -EINVAL;
}
+ /* At the moment we only support using i915-perf on the RCS. */
+ props->engine = intel_engine_lookup_user(perf->i915,
+ I915_ENGINE_CLASS_RENDER,
+ 0);
+ if (!props->engine) {
+ DRM_DEBUG("No RENDER-capable engines\n");
+ return -EINVAL;
+ }
+
/* Considering that ID = 0 is reserved and assuming that we don't
* (currently) expect any configurations to ever specify duplicate
* values for a particular property ID then the last _PROP_MAX value is
@@ -2903,7 +3509,7 @@ static int read_properties_unlocked(struct drm_i915_private *dev_priv,
value);
return -EINVAL;
}
- if (!dev_priv->perf.oa_formats[value].size) {
+ if (!perf->oa_formats[value].size) {
DRM_DEBUG("Unsupported OA report format %llu\n",
value);
return -EINVAL;
@@ -2924,7 +3530,7 @@ static int read_properties_unlocked(struct drm_i915_private *dev_priv,
*/
BUILD_BUG_ON(sizeof(oa_period) != 8);
- oa_period = oa_exponent_to_ns(dev_priv, value);
+ oa_period = oa_exponent_to_ns(perf, value);
/* This check is primarily to ensure that oa_period <=
* UINT32_MAX (before passing to do_div which only
@@ -2949,6 +3555,9 @@ static int read_properties_unlocked(struct drm_i915_private *dev_priv,
props->oa_periodic = true;
props->oa_period_exponent = value;
break;
+ case DRM_I915_PERF_PROP_HOLD_PREEMPTION:
+ props->hold_preemption = !!value;
+ break;
case DRM_I915_PERF_PROP_MAX:
MISSING_CASE(id);
return -EINVAL;
@@ -2978,7 +3587,7 @@ static int read_properties_unlocked(struct drm_i915_private *dev_priv,
* mutex to avoid an awkward lockdep with mmap_sem.
*
* Most of the implementation details are handled by
- * i915_perf_open_ioctl_locked() after taking the &drm_i915_private->perf.lock
+ * i915_perf_open_ioctl_locked() after taking the &perf->lock
* mutex for serializing with any non-file-operation driver hooks.
*
* Return: A newly opened i915 Perf stream file descriptor or negative
@@ -2987,13 +3596,13 @@ static int read_properties_unlocked(struct drm_i915_private *dev_priv,
int i915_perf_open_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_perf *perf = &to_i915(dev)->perf;
struct drm_i915_perf_open_param *param = data;
struct perf_open_properties props;
u32 known_open_flags;
int ret;
- if (!dev_priv->perf.initialized) {
+ if (!perf->i915) {
DRM_DEBUG("i915 perf interface not available for this system\n");
return -ENOTSUPP;
}
@@ -3006,124 +3615,130 @@ int i915_perf_open_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
}
- ret = read_properties_unlocked(dev_priv,
+ ret = read_properties_unlocked(perf,
u64_to_user_ptr(param->properties_ptr),
param->num_properties,
&props);
if (ret)
return ret;
- mutex_lock(&dev_priv->perf.lock);
- ret = i915_perf_open_ioctl_locked(dev_priv, param, &props, file);
- mutex_unlock(&dev_priv->perf.lock);
+ mutex_lock(&perf->lock);
+ ret = i915_perf_open_ioctl_locked(perf, param, &props, file);
+ mutex_unlock(&perf->lock);
return ret;
}
/**
* i915_perf_register - exposes i915-perf to userspace
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
*
* In particular OA metric sets are advertised under a sysfs metrics/
* directory allowing userspace to enumerate valid IDs that can be
* used to open an i915-perf stream.
*/
-void i915_perf_register(struct drm_i915_private *dev_priv)
+void i915_perf_register(struct drm_i915_private *i915)
{
+ struct i915_perf *perf = &i915->perf;
int ret;
- if (!dev_priv->perf.initialized)
+ if (!perf->i915)
return;
/* To be sure we're synchronized with an attempted
* i915_perf_open_ioctl(); considering that we register after
* being exposed to userspace.
*/
- mutex_lock(&dev_priv->perf.lock);
+ mutex_lock(&perf->lock);
- dev_priv->perf.metrics_kobj =
+ perf->metrics_kobj =
kobject_create_and_add("metrics",
- &dev_priv->drm.primary->kdev->kobj);
- if (!dev_priv->perf.metrics_kobj)
+ &i915->drm.primary->kdev->kobj);
+ if (!perf->metrics_kobj)
goto exit;
- sysfs_attr_init(&dev_priv->perf.test_config.sysfs_metric_id.attr);
-
- if (INTEL_GEN(dev_priv) >= 11) {
- i915_perf_load_test_config_icl(dev_priv);
- } else if (IS_CANNONLAKE(dev_priv)) {
- i915_perf_load_test_config_cnl(dev_priv);
- } else if (IS_COFFEELAKE(dev_priv)) {
- if (IS_CFL_GT2(dev_priv))
- i915_perf_load_test_config_cflgt2(dev_priv);
- if (IS_CFL_GT3(dev_priv))
- i915_perf_load_test_config_cflgt3(dev_priv);
- } else if (IS_GEMINILAKE(dev_priv)) {
- i915_perf_load_test_config_glk(dev_priv);
- } else if (IS_KABYLAKE(dev_priv)) {
- if (IS_KBL_GT2(dev_priv))
- i915_perf_load_test_config_kblgt2(dev_priv);
- else if (IS_KBL_GT3(dev_priv))
- i915_perf_load_test_config_kblgt3(dev_priv);
- } else if (IS_BROXTON(dev_priv)) {
- i915_perf_load_test_config_bxt(dev_priv);
- } else if (IS_SKYLAKE(dev_priv)) {
- if (IS_SKL_GT2(dev_priv))
- i915_perf_load_test_config_sklgt2(dev_priv);
- else if (IS_SKL_GT3(dev_priv))
- i915_perf_load_test_config_sklgt3(dev_priv);
- else if (IS_SKL_GT4(dev_priv))
- i915_perf_load_test_config_sklgt4(dev_priv);
- } else if (IS_CHERRYVIEW(dev_priv)) {
- i915_perf_load_test_config_chv(dev_priv);
- } else if (IS_BROADWELL(dev_priv)) {
- i915_perf_load_test_config_bdw(dev_priv);
- } else if (IS_HASWELL(dev_priv)) {
- i915_perf_load_test_config_hsw(dev_priv);
-}
-
- if (dev_priv->perf.test_config.id == 0)
+ sysfs_attr_init(&perf->test_config.sysfs_metric_id.attr);
+
+ if (IS_TIGERLAKE(i915)) {
+ i915_perf_load_test_config_tgl(i915);
+ } else if (INTEL_GEN(i915) >= 11) {
+ i915_perf_load_test_config_icl(i915);
+ } else if (IS_CANNONLAKE(i915)) {
+ i915_perf_load_test_config_cnl(i915);
+ } else if (IS_COFFEELAKE(i915)) {
+ if (IS_CFL_GT2(i915))
+ i915_perf_load_test_config_cflgt2(i915);
+ if (IS_CFL_GT3(i915))
+ i915_perf_load_test_config_cflgt3(i915);
+ } else if (IS_GEMINILAKE(i915)) {
+ i915_perf_load_test_config_glk(i915);
+ } else if (IS_KABYLAKE(i915)) {
+ if (IS_KBL_GT2(i915))
+ i915_perf_load_test_config_kblgt2(i915);
+ else if (IS_KBL_GT3(i915))
+ i915_perf_load_test_config_kblgt3(i915);
+ } else if (IS_BROXTON(i915)) {
+ i915_perf_load_test_config_bxt(i915);
+ } else if (IS_SKYLAKE(i915)) {
+ if (IS_SKL_GT2(i915))
+ i915_perf_load_test_config_sklgt2(i915);
+ else if (IS_SKL_GT3(i915))
+ i915_perf_load_test_config_sklgt3(i915);
+ else if (IS_SKL_GT4(i915))
+ i915_perf_load_test_config_sklgt4(i915);
+ } else if (IS_CHERRYVIEW(i915)) {
+ i915_perf_load_test_config_chv(i915);
+ } else if (IS_BROADWELL(i915)) {
+ i915_perf_load_test_config_bdw(i915);
+ } else if (IS_HASWELL(i915)) {
+ i915_perf_load_test_config_hsw(i915);
+ }
+
+ if (perf->test_config.id == 0)
goto sysfs_error;
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj,
- &dev_priv->perf.test_config.sysfs_metric);
+ ret = sysfs_create_group(perf->metrics_kobj,
+ &perf->test_config.sysfs_metric);
if (ret)
goto sysfs_error;
- atomic_set(&dev_priv->perf.test_config.ref_count, 1);
+ perf->test_config.perf = perf;
+ kref_init(&perf->test_config.ref);
goto exit;
sysfs_error:
- kobject_put(dev_priv->perf.metrics_kobj);
- dev_priv->perf.metrics_kobj = NULL;
+ kobject_put(perf->metrics_kobj);
+ perf->metrics_kobj = NULL;
exit:
- mutex_unlock(&dev_priv->perf.lock);
+ mutex_unlock(&perf->lock);
}
/**
* i915_perf_unregister - hide i915-perf from userspace
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
*
* i915-perf state cleanup is split up into an 'unregister' and
* 'deinit' phase where the interface is first hidden from
* userspace by i915_perf_unregister() before cleaning up
* remaining state in i915_perf_fini().
*/
-void i915_perf_unregister(struct drm_i915_private *dev_priv)
+void i915_perf_unregister(struct drm_i915_private *i915)
{
- if (!dev_priv->perf.metrics_kobj)
+ struct i915_perf *perf = &i915->perf;
+
+ if (!perf->metrics_kobj)
return;
- sysfs_remove_group(dev_priv->perf.metrics_kobj,
- &dev_priv->perf.test_config.sysfs_metric);
+ sysfs_remove_group(perf->metrics_kobj,
+ &perf->test_config.sysfs_metric);
- kobject_put(dev_priv->perf.metrics_kobj);
- dev_priv->perf.metrics_kobj = NULL;
+ kobject_put(perf->metrics_kobj);
+ perf->metrics_kobj = NULL;
}
-static bool gen8_is_valid_flex_addr(struct drm_i915_private *dev_priv, u32 addr)
+static bool gen8_is_valid_flex_addr(struct i915_perf *perf, u32 addr)
{
static const i915_reg_t flex_eu_regs[] = {
EU_PERF_CNTL0,
@@ -3143,56 +3758,80 @@ static bool gen8_is_valid_flex_addr(struct drm_i915_private *dev_priv, u32 addr)
return false;
}
-static bool gen7_is_valid_b_counter_addr(struct drm_i915_private *dev_priv, u32 addr)
+#define ADDR_IN_RANGE(addr, start, end) \
+ ((addr) >= (start) && \
+ (addr) <= (end))
+
+#define REG_IN_RANGE(addr, start, end) \
+ ((addr) >= i915_mmio_reg_offset(start) && \
+ (addr) <= i915_mmio_reg_offset(end))
+
+#define REG_EQUAL(addr, mmio) \
+ ((addr) == i915_mmio_reg_offset(mmio))
+
+static bool gen7_is_valid_b_counter_addr(struct i915_perf *perf, u32 addr)
+{
+ return REG_IN_RANGE(addr, OASTARTTRIG1, OASTARTTRIG8) ||
+ REG_IN_RANGE(addr, OAREPORTTRIG1, OAREPORTTRIG8) ||
+ REG_IN_RANGE(addr, OACEC0_0, OACEC7_1);
+}
+
+static bool gen7_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
+{
+ return REG_EQUAL(addr, HALF_SLICE_CHICKEN2) ||
+ REG_IN_RANGE(addr, MICRO_BP0_0, NOA_WRITE) ||
+ REG_IN_RANGE(addr, OA_PERFCNT1_LO, OA_PERFCNT2_HI) ||
+ REG_IN_RANGE(addr, OA_PERFMATRIX_LO, OA_PERFMATRIX_HI);
+}
+
+static bool gen8_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
{
- return (addr >= i915_mmio_reg_offset(OASTARTTRIG1) &&
- addr <= i915_mmio_reg_offset(OASTARTTRIG8)) ||
- (addr >= i915_mmio_reg_offset(OAREPORTTRIG1) &&
- addr <= i915_mmio_reg_offset(OAREPORTTRIG8)) ||
- (addr >= i915_mmio_reg_offset(OACEC0_0) &&
- addr <= i915_mmio_reg_offset(OACEC7_1));
+ return gen7_is_valid_mux_addr(perf, addr) ||
+ REG_EQUAL(addr, WAIT_FOR_RC6_EXIT) ||
+ REG_IN_RANGE(addr, RPM_CONFIG0, NOA_CONFIG(8));
}
-static bool gen7_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr)
+static bool gen10_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
{
- return addr == i915_mmio_reg_offset(HALF_SLICE_CHICKEN2) ||
- (addr >= i915_mmio_reg_offset(MICRO_BP0_0) &&
- addr <= i915_mmio_reg_offset(NOA_WRITE)) ||
- (addr >= i915_mmio_reg_offset(OA_PERFCNT1_LO) &&
- addr <= i915_mmio_reg_offset(OA_PERFCNT2_HI)) ||
- (addr >= i915_mmio_reg_offset(OA_PERFMATRIX_LO) &&
- addr <= i915_mmio_reg_offset(OA_PERFMATRIX_HI));
+ return gen8_is_valid_mux_addr(perf, addr) ||
+ REG_EQUAL(addr, GEN10_NOA_WRITE_HIGH) ||
+ REG_IN_RANGE(addr, OA_PERFCNT3_LO, OA_PERFCNT4_HI);
}
-static bool gen8_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr)
+static bool hsw_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
{
- return gen7_is_valid_mux_addr(dev_priv, addr) ||
- addr == i915_mmio_reg_offset(WAIT_FOR_RC6_EXIT) ||
- (addr >= i915_mmio_reg_offset(RPM_CONFIG0) &&
- addr <= i915_mmio_reg_offset(NOA_CONFIG(8)));
+ return gen7_is_valid_mux_addr(perf, addr) ||
+ ADDR_IN_RANGE(addr, 0x25100, 0x2FF90) ||
+ REG_IN_RANGE(addr, HSW_MBVID2_NOA0, HSW_MBVID2_NOA9) ||
+ REG_EQUAL(addr, HSW_MBVID2_MISR0);
}
-static bool gen10_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr)
+static bool chv_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
{
- return gen8_is_valid_mux_addr(dev_priv, addr) ||
- addr == i915_mmio_reg_offset(GEN10_NOA_WRITE_HIGH) ||
- (addr >= i915_mmio_reg_offset(OA_PERFCNT3_LO) &&
- addr <= i915_mmio_reg_offset(OA_PERFCNT4_HI));
+ return gen7_is_valid_mux_addr(perf, addr) ||
+ ADDR_IN_RANGE(addr, 0x182300, 0x1823A4);
}
-static bool hsw_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr)
+static bool gen12_is_valid_b_counter_addr(struct i915_perf *perf, u32 addr)
{
- return gen7_is_valid_mux_addr(dev_priv, addr) ||
- (addr >= 0x25100 && addr <= 0x2FF90) ||
- (addr >= i915_mmio_reg_offset(HSW_MBVID2_NOA0) &&
- addr <= i915_mmio_reg_offset(HSW_MBVID2_NOA9)) ||
- addr == i915_mmio_reg_offset(HSW_MBVID2_MISR0);
+ return REG_IN_RANGE(addr, GEN12_OAG_OASTARTTRIG1, GEN12_OAG_OASTARTTRIG8) ||
+ REG_IN_RANGE(addr, GEN12_OAG_OAREPORTTRIG1, GEN12_OAG_OAREPORTTRIG8) ||
+ REG_IN_RANGE(addr, GEN12_OAG_CEC0_0, GEN12_OAG_CEC7_1) ||
+ REG_IN_RANGE(addr, GEN12_OAG_SCEC0_0, GEN12_OAG_SCEC7_1) ||
+ REG_EQUAL(addr, GEN12_OAA_DBG_REG) ||
+ REG_EQUAL(addr, GEN12_OAG_OA_PESS) ||
+ REG_EQUAL(addr, GEN12_OAG_SPCTR_CNF);
}
-static bool chv_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr)
+static bool gen12_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
{
- return gen7_is_valid_mux_addr(dev_priv, addr) ||
- (addr >= 0x182300 && addr <= 0x1823A4);
+ return REG_EQUAL(addr, NOA_WRITE) ||
+ REG_EQUAL(addr, GEN10_NOA_WRITE_HIGH) ||
+ REG_EQUAL(addr, GDT_CHICKEN_BITS) ||
+ REG_EQUAL(addr, WAIT_FOR_RC6_EXIT) ||
+ REG_EQUAL(addr, RPM_CONFIG0) ||
+ REG_EQUAL(addr, RPM_CONFIG1) ||
+ REG_IN_RANGE(addr, NOA_CONFIG(0), NOA_CONFIG(8));
}
static u32 mask_reg_value(u32 reg, u32 val)
@@ -3201,21 +3840,21 @@ static u32 mask_reg_value(u32 reg, u32 val)
* WaDisableSTUnitPowerOptimization workaround. Make sure the value
* programmed by userspace doesn't change this.
*/
- if (i915_mmio_reg_offset(HALF_SLICE_CHICKEN2) == reg)
+ if (REG_EQUAL(reg, HALF_SLICE_CHICKEN2))
val = val & ~_MASKED_BIT_ENABLE(GEN8_ST_PO_DISABLE);
/* WAIT_FOR_RC6_EXIT has only one bit fullfilling the function
* indicated by its name and a bunch of selection fields used by OA
* configs.
*/
- if (i915_mmio_reg_offset(WAIT_FOR_RC6_EXIT) == reg)
+ if (REG_EQUAL(reg, WAIT_FOR_RC6_EXIT))
val = val & ~_MASKED_BIT_ENABLE(HSW_WAIT_FOR_RC6_EXIT_ENABLE);
return val;
}
-static struct i915_oa_reg *alloc_oa_regs(struct drm_i915_private *dev_priv,
- bool (*is_valid)(struct drm_i915_private *dev_priv, u32 addr),
+static struct i915_oa_reg *alloc_oa_regs(struct i915_perf *perf,
+ bool (*is_valid)(struct i915_perf *perf, u32 addr),
u32 __user *regs,
u32 n_regs)
{
@@ -3245,7 +3884,7 @@ static struct i915_oa_reg *alloc_oa_regs(struct drm_i915_private *dev_priv,
if (err)
goto addr_err;
- if (!is_valid(dev_priv, addr)) {
+ if (!is_valid(perf, addr)) {
DRM_DEBUG("Invalid oa_reg address: %X\n", addr);
err = -EINVAL;
goto addr_err;
@@ -3278,7 +3917,7 @@ static ssize_t show_dynamic_id(struct device *dev,
return sprintf(buf, "%d\n", oa_config->id);
}
-static int create_dynamic_oa_sysfs_entry(struct drm_i915_private *dev_priv,
+static int create_dynamic_oa_sysfs_entry(struct i915_perf *perf,
struct i915_oa_config *oa_config)
{
sysfs_attr_init(&oa_config->sysfs_metric_id.attr);
@@ -3293,7 +3932,7 @@ static int create_dynamic_oa_sysfs_entry(struct drm_i915_private *dev_priv,
oa_config->sysfs_metric.name = oa_config->uuid;
oa_config->sysfs_metric.attrs = oa_config->attrs;
- return sysfs_create_group(dev_priv->perf.metrics_kobj,
+ return sysfs_create_group(perf->metrics_kobj,
&oa_config->sysfs_metric);
}
@@ -3313,17 +3952,18 @@ static int create_dynamic_oa_sysfs_entry(struct drm_i915_private *dev_priv,
int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_perf *perf = &to_i915(dev)->perf;
struct drm_i915_perf_oa_config *args = data;
struct i915_oa_config *oa_config, *tmp;
+ static struct i915_oa_reg *regs;
int err, id;
- if (!dev_priv->perf.initialized) {
+ if (!perf->i915) {
DRM_DEBUG("i915 perf interface not available for this system\n");
return -ENOTSUPP;
}
- if (!dev_priv->perf.metrics_kobj) {
+ if (!perf->metrics_kobj) {
DRM_DEBUG("OA metrics weren't advertised via sysfs\n");
return -EINVAL;
}
@@ -3346,7 +3986,8 @@ int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
return -ENOMEM;
}
- atomic_set(&oa_config->ref_count, 1);
+ oa_config->perf = perf;
+ kref_init(&oa_config->ref);
if (!uuid_is_valid(args->uuid)) {
DRM_DEBUG("Invalid uuid format for OA config\n");
@@ -3360,59 +4001,59 @@ int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
memcpy(oa_config->uuid, args->uuid, sizeof(args->uuid));
oa_config->mux_regs_len = args->n_mux_regs;
- oa_config->mux_regs =
- alloc_oa_regs(dev_priv,
- dev_priv->perf.ops.is_valid_mux_reg,
- u64_to_user_ptr(args->mux_regs_ptr),
- args->n_mux_regs);
+ regs = alloc_oa_regs(perf,
+ perf->ops.is_valid_mux_reg,
+ u64_to_user_ptr(args->mux_regs_ptr),
+ args->n_mux_regs);
- if (IS_ERR(oa_config->mux_regs)) {
+ if (IS_ERR(regs)) {
DRM_DEBUG("Failed to create OA config for mux_regs\n");
- err = PTR_ERR(oa_config->mux_regs);
+ err = PTR_ERR(regs);
goto reg_err;
}
+ oa_config->mux_regs = regs;
oa_config->b_counter_regs_len = args->n_boolean_regs;
- oa_config->b_counter_regs =
- alloc_oa_regs(dev_priv,
- dev_priv->perf.ops.is_valid_b_counter_reg,
- u64_to_user_ptr(args->boolean_regs_ptr),
- args->n_boolean_regs);
+ regs = alloc_oa_regs(perf,
+ perf->ops.is_valid_b_counter_reg,
+ u64_to_user_ptr(args->boolean_regs_ptr),
+ args->n_boolean_regs);
- if (IS_ERR(oa_config->b_counter_regs)) {
+ if (IS_ERR(regs)) {
DRM_DEBUG("Failed to create OA config for b_counter_regs\n");
- err = PTR_ERR(oa_config->b_counter_regs);
+ err = PTR_ERR(regs);
goto reg_err;
}
+ oa_config->b_counter_regs = regs;
- if (INTEL_GEN(dev_priv) < 8) {
+ if (INTEL_GEN(perf->i915) < 8) {
if (args->n_flex_regs != 0) {
err = -EINVAL;
goto reg_err;
}
} else {
oa_config->flex_regs_len = args->n_flex_regs;
- oa_config->flex_regs =
- alloc_oa_regs(dev_priv,
- dev_priv->perf.ops.is_valid_flex_reg,
- u64_to_user_ptr(args->flex_regs_ptr),
- args->n_flex_regs);
+ regs = alloc_oa_regs(perf,
+ perf->ops.is_valid_flex_reg,
+ u64_to_user_ptr(args->flex_regs_ptr),
+ args->n_flex_regs);
- if (IS_ERR(oa_config->flex_regs)) {
+ if (IS_ERR(regs)) {
DRM_DEBUG("Failed to create OA config for flex_regs\n");
- err = PTR_ERR(oa_config->flex_regs);
+ err = PTR_ERR(regs);
goto reg_err;
}
+ oa_config->flex_regs = regs;
}
- err = mutex_lock_interruptible(&dev_priv->perf.metrics_lock);
+ err = mutex_lock_interruptible(&perf->metrics_lock);
if (err)
goto reg_err;
/* We shouldn't have too many configs, so this iteration shouldn't be
* too costly.
*/
- idr_for_each_entry(&dev_priv->perf.metrics_idr, tmp, id) {
+ idr_for_each_entry(&perf->metrics_idr, tmp, id) {
if (!strcmp(tmp->uuid, oa_config->uuid)) {
DRM_DEBUG("OA config already exists with this uuid\n");
err = -EADDRINUSE;
@@ -3420,14 +4061,14 @@ int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
}
}
- err = create_dynamic_oa_sysfs_entry(dev_priv, oa_config);
+ err = create_dynamic_oa_sysfs_entry(perf, oa_config);
if (err) {
DRM_DEBUG("Failed to create sysfs entry for OA config\n");
goto sysfs_err;
}
/* Config id 0 is invalid, id 1 for kernel stored test config. */
- oa_config->id = idr_alloc(&dev_priv->perf.metrics_idr,
+ oa_config->id = idr_alloc(&perf->metrics_idr,
oa_config, 2,
0, GFP_KERNEL);
if (oa_config->id < 0) {
@@ -3436,16 +4077,16 @@ int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
goto sysfs_err;
}
- mutex_unlock(&dev_priv->perf.metrics_lock);
+ mutex_unlock(&perf->metrics_lock);
DRM_DEBUG("Added config %s id=%i\n", oa_config->uuid, oa_config->id);
return oa_config->id;
sysfs_err:
- mutex_unlock(&dev_priv->perf.metrics_lock);
+ mutex_unlock(&perf->metrics_lock);
reg_err:
- put_oa_config(dev_priv, oa_config);
+ i915_oa_config_put(oa_config);
DRM_DEBUG("Failed to add new OA config\n");
return err;
}
@@ -3464,12 +4105,12 @@ reg_err:
int i915_perf_remove_config_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_perf *perf = &to_i915(dev)->perf;
u64 *arg = data;
struct i915_oa_config *oa_config;
int ret;
- if (!dev_priv->perf.initialized) {
+ if (!perf->i915) {
DRM_DEBUG("i915 perf interface not available for this system\n");
return -ENOTSUPP;
}
@@ -3479,31 +4120,33 @@ int i915_perf_remove_config_ioctl(struct drm_device *dev, void *data,
return -EACCES;
}
- ret = mutex_lock_interruptible(&dev_priv->perf.metrics_lock);
+ ret = mutex_lock_interruptible(&perf->metrics_lock);
if (ret)
- goto lock_err;
+ return ret;
- oa_config = idr_find(&dev_priv->perf.metrics_idr, *arg);
+ oa_config = idr_find(&perf->metrics_idr, *arg);
if (!oa_config) {
DRM_DEBUG("Failed to remove unknown OA config\n");
ret = -ENOENT;
- goto config_err;
+ goto err_unlock;
}
GEM_BUG_ON(*arg != oa_config->id);
- sysfs_remove_group(dev_priv->perf.metrics_kobj,
- &oa_config->sysfs_metric);
+ sysfs_remove_group(perf->metrics_kobj, &oa_config->sysfs_metric);
+
+ idr_remove(&perf->metrics_idr, *arg);
- idr_remove(&dev_priv->perf.metrics_idr, *arg);
+ mutex_unlock(&perf->metrics_lock);
DRM_DEBUG("Removed config %s id=%i\n", oa_config->uuid, oa_config->id);
- put_oa_config(dev_priv, oa_config);
+ i915_oa_config_put(oa_config);
-config_err:
- mutex_unlock(&dev_priv->perf.metrics_lock);
-lock_err:
+ return 0;
+
+err_unlock:
+ mutex_unlock(&perf->metrics_lock);
return ret;
}
@@ -3551,103 +4194,126 @@ static struct ctl_table dev_root[] = {
/**
* i915_perf_init - initialize i915-perf state on module load
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
*
* Initializes i915-perf state without exposing anything to userspace.
*
* Note: i915-perf initialization is split into an 'init' and 'register'
* phase with the i915_perf_register() exposing state to userspace.
*/
-void i915_perf_init(struct drm_i915_private *dev_priv)
-{
- if (IS_HASWELL(dev_priv)) {
- dev_priv->perf.ops.is_valid_b_counter_reg =
- gen7_is_valid_b_counter_addr;
- dev_priv->perf.ops.is_valid_mux_reg =
- hsw_is_valid_mux_addr;
- dev_priv->perf.ops.is_valid_flex_reg = NULL;
- dev_priv->perf.ops.enable_metric_set = hsw_enable_metric_set;
- dev_priv->perf.ops.disable_metric_set = hsw_disable_metric_set;
- dev_priv->perf.ops.oa_enable = gen7_oa_enable;
- dev_priv->perf.ops.oa_disable = gen7_oa_disable;
- dev_priv->perf.ops.read = gen7_oa_read;
- dev_priv->perf.ops.oa_hw_tail_read =
- gen7_oa_hw_tail_read;
-
- dev_priv->perf.oa_formats = hsw_oa_formats;
- } else if (HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
+void i915_perf_init(struct drm_i915_private *i915)
+{
+ struct i915_perf *perf = &i915->perf;
+
+ /* XXX const struct i915_perf_ops! */
+
+ if (IS_HASWELL(i915)) {
+ perf->ops.is_valid_b_counter_reg = gen7_is_valid_b_counter_addr;
+ perf->ops.is_valid_mux_reg = hsw_is_valid_mux_addr;
+ perf->ops.is_valid_flex_reg = NULL;
+ perf->ops.enable_metric_set = hsw_enable_metric_set;
+ perf->ops.disable_metric_set = hsw_disable_metric_set;
+ perf->ops.oa_enable = gen7_oa_enable;
+ perf->ops.oa_disable = gen7_oa_disable;
+ perf->ops.read = gen7_oa_read;
+ perf->ops.oa_hw_tail_read = gen7_oa_hw_tail_read;
+
+ perf->oa_formats = hsw_oa_formats;
+ } else if (HAS_LOGICAL_RING_CONTEXTS(i915)) {
/* Note: that although we could theoretically also support the
* legacy ringbuffer mode on BDW (and earlier iterations of
* this driver, before upstreaming did this) it didn't seem
* worth the complexity to maintain now that BDW+ enable
* execlist mode by default.
*/
- dev_priv->perf.oa_formats = gen8_plus_oa_formats;
+ perf->ops.read = gen8_oa_read;
- dev_priv->perf.ops.oa_enable = gen8_oa_enable;
- dev_priv->perf.ops.oa_disable = gen8_oa_disable;
- dev_priv->perf.ops.read = gen8_oa_read;
- dev_priv->perf.ops.oa_hw_tail_read = gen8_oa_hw_tail_read;
+ if (IS_GEN_RANGE(i915, 8, 9)) {
+ perf->oa_formats = gen8_plus_oa_formats;
- if (IS_GEN_RANGE(dev_priv, 8, 9)) {
- dev_priv->perf.ops.is_valid_b_counter_reg =
+ perf->ops.is_valid_b_counter_reg =
gen7_is_valid_b_counter_addr;
- dev_priv->perf.ops.is_valid_mux_reg =
+ perf->ops.is_valid_mux_reg =
gen8_is_valid_mux_addr;
- dev_priv->perf.ops.is_valid_flex_reg =
+ perf->ops.is_valid_flex_reg =
gen8_is_valid_flex_addr;
- if (IS_CHERRYVIEW(dev_priv)) {
- dev_priv->perf.ops.is_valid_mux_reg =
+ if (IS_CHERRYVIEW(i915)) {
+ perf->ops.is_valid_mux_reg =
chv_is_valid_mux_addr;
}
- dev_priv->perf.ops.enable_metric_set = gen8_enable_metric_set;
- dev_priv->perf.ops.disable_metric_set = gen8_disable_metric_set;
+ perf->ops.oa_enable = gen8_oa_enable;
+ perf->ops.oa_disable = gen8_oa_disable;
+ perf->ops.enable_metric_set = gen8_enable_metric_set;
+ perf->ops.disable_metric_set = gen8_disable_metric_set;
+ perf->ops.oa_hw_tail_read = gen8_oa_hw_tail_read;
- if (IS_GEN(dev_priv, 8)) {
- dev_priv->perf.ctx_oactxctrl_offset = 0x120;
- dev_priv->perf.ctx_flexeu0_offset = 0x2ce;
+ if (IS_GEN(i915, 8)) {
+ perf->ctx_oactxctrl_offset = 0x120;
+ perf->ctx_flexeu0_offset = 0x2ce;
- dev_priv->perf.gen8_valid_ctx_bit = BIT(25);
+ perf->gen8_valid_ctx_bit = BIT(25);
} else {
- dev_priv->perf.ctx_oactxctrl_offset = 0x128;
- dev_priv->perf.ctx_flexeu0_offset = 0x3de;
+ perf->ctx_oactxctrl_offset = 0x128;
+ perf->ctx_flexeu0_offset = 0x3de;
- dev_priv->perf.gen8_valid_ctx_bit = BIT(16);
+ perf->gen8_valid_ctx_bit = BIT(16);
}
- } else if (IS_GEN_RANGE(dev_priv, 10, 11)) {
- dev_priv->perf.ops.is_valid_b_counter_reg =
+ } else if (IS_GEN_RANGE(i915, 10, 11)) {
+ perf->oa_formats = gen8_plus_oa_formats;
+
+ perf->ops.is_valid_b_counter_reg =
gen7_is_valid_b_counter_addr;
- dev_priv->perf.ops.is_valid_mux_reg =
+ perf->ops.is_valid_mux_reg =
gen10_is_valid_mux_addr;
- dev_priv->perf.ops.is_valid_flex_reg =
+ perf->ops.is_valid_flex_reg =
gen8_is_valid_flex_addr;
- dev_priv->perf.ops.enable_metric_set = gen8_enable_metric_set;
- dev_priv->perf.ops.disable_metric_set = gen10_disable_metric_set;
+ perf->ops.oa_enable = gen8_oa_enable;
+ perf->ops.oa_disable = gen8_oa_disable;
+ perf->ops.enable_metric_set = gen8_enable_metric_set;
+ perf->ops.disable_metric_set = gen10_disable_metric_set;
+ perf->ops.oa_hw_tail_read = gen8_oa_hw_tail_read;
- if (IS_GEN(dev_priv, 10)) {
- dev_priv->perf.ctx_oactxctrl_offset = 0x128;
- dev_priv->perf.ctx_flexeu0_offset = 0x3de;
+ if (IS_GEN(i915, 10)) {
+ perf->ctx_oactxctrl_offset = 0x128;
+ perf->ctx_flexeu0_offset = 0x3de;
} else {
- dev_priv->perf.ctx_oactxctrl_offset = 0x124;
- dev_priv->perf.ctx_flexeu0_offset = 0x78e;
+ perf->ctx_oactxctrl_offset = 0x124;
+ perf->ctx_flexeu0_offset = 0x78e;
}
- dev_priv->perf.gen8_valid_ctx_bit = BIT(16);
+ perf->gen8_valid_ctx_bit = BIT(16);
+ } else if (IS_GEN(i915, 12)) {
+ perf->oa_formats = gen12_oa_formats;
+
+ perf->ops.is_valid_b_counter_reg =
+ gen12_is_valid_b_counter_addr;
+ perf->ops.is_valid_mux_reg =
+ gen12_is_valid_mux_addr;
+ perf->ops.is_valid_flex_reg =
+ gen8_is_valid_flex_addr;
+
+ perf->ops.oa_enable = gen12_oa_enable;
+ perf->ops.oa_disable = gen12_oa_disable;
+ perf->ops.enable_metric_set = gen12_enable_metric_set;
+ perf->ops.disable_metric_set = gen12_disable_metric_set;
+ perf->ops.oa_hw_tail_read = gen12_oa_hw_tail_read;
+
+ perf->ctx_flexeu0_offset = 0;
+ perf->ctx_oactxctrl_offset = 0x144;
}
}
- if (dev_priv->perf.ops.enable_metric_set) {
- INIT_LIST_HEAD(&dev_priv->perf.streams);
- mutex_init(&dev_priv->perf.lock);
+ if (perf->ops.enable_metric_set) {
+ mutex_init(&perf->lock);
oa_sample_rate_hard_limit = 1000 *
- (RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_khz / 2);
- dev_priv->perf.sysctl_header = register_sysctl_table(dev_root);
+ (RUNTIME_INFO(i915)->cs_timestamp_frequency_khz / 2);
+ perf->sysctl_header = register_sysctl_table(dev_root);
- mutex_init(&dev_priv->perf.metrics_lock);
- idr_init(&dev_priv->perf.metrics_idr);
+ mutex_init(&perf->metrics_lock);
+ idr_init(&perf->metrics_idr);
/* We set up some ratelimit state to potentially throttle any
* _NOTES about spurious, invalid OA reports which we don't
@@ -3659,44 +4325,70 @@ void i915_perf_init(struct drm_i915_private *dev_priv)
*
* Using the same limiting factors as printk_ratelimit()
*/
- ratelimit_state_init(&dev_priv->perf.spurious_report_rs,
- 5 * HZ, 10);
+ ratelimit_state_init(&perf->spurious_report_rs, 5 * HZ, 10);
/* Since we use a DRM_NOTE for spurious reports it would be
* inconsistent to let __ratelimit() automatically print a
* warning for throttling.
*/
- ratelimit_set_flags(&dev_priv->perf.spurious_report_rs,
+ ratelimit_set_flags(&perf->spurious_report_rs,
RATELIMIT_MSG_ON_RELEASE);
- dev_priv->perf.initialized = true;
+ atomic64_set(&perf->noa_programming_delay,
+ 500 * 1000 /* 500us */);
+
+ perf->i915 = i915;
}
}
static int destroy_config(int id, void *p, void *data)
{
- struct drm_i915_private *dev_priv = data;
- struct i915_oa_config *oa_config = p;
-
- put_oa_config(dev_priv, oa_config);
-
+ i915_oa_config_put(p);
return 0;
}
/**
* i915_perf_fini - Counter part to i915_perf_init()
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
*/
-void i915_perf_fini(struct drm_i915_private *dev_priv)
+void i915_perf_fini(struct drm_i915_private *i915)
{
- if (!dev_priv->perf.initialized)
+ struct i915_perf *perf = &i915->perf;
+
+ if (!perf->i915)
return;
- idr_for_each(&dev_priv->perf.metrics_idr, destroy_config, dev_priv);
- idr_destroy(&dev_priv->perf.metrics_idr);
+ idr_for_each(&perf->metrics_idr, destroy_config, perf);
+ idr_destroy(&perf->metrics_idr);
- unregister_sysctl_table(dev_priv->perf.sysctl_header);
+ unregister_sysctl_table(perf->sysctl_header);
- memset(&dev_priv->perf.ops, 0, sizeof(dev_priv->perf.ops));
+ memset(&perf->ops, 0, sizeof(perf->ops));
+ perf->i915 = NULL;
+}
- dev_priv->perf.initialized = false;
+/**
+ * i915_perf_ioctl_version - Version of the i915-perf subsystem
+ *
+ * This version number is used by userspace to detect available features.
+ */
+int i915_perf_ioctl_version(void)
+{
+ /*
+ * 1: Initial version
+ * I915_PERF_IOCTL_ENABLE
+ * I915_PERF_IOCTL_DISABLE
+ *
+ * 2: Added runtime modification of OA config.
+ * I915_PERF_IOCTL_CONFIG
+ *
+ * 3: Add DRM_I915_PERF_PROP_HOLD_PREEMPTION parameter to hold
+ * preemption on a particular context so that performance data is
+ * accessible from a delta of MI_RPC reports without looking at the
+ * OA buffer.
+ */
+ return 3;
}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftests/i915_perf.c"
+#endif
diff --git a/drivers/gpu/drm/i915/i915_perf.h b/drivers/gpu/drm/i915/i915_perf.h
index a412b16d9ffc..4ceebce72060 100644
--- a/drivers/gpu/drm/i915/i915_perf.h
+++ b/drivers/gpu/drm/i915/i915_perf.h
@@ -6,11 +6,15 @@
#ifndef __I915_PERF_H__
#define __I915_PERF_H__
+#include <linux/kref.h>
#include <linux/types.h>
+#include "i915_perf_types.h"
+
struct drm_device;
struct drm_file;
struct drm_i915_private;
+struct i915_oa_config;
struct intel_context;
struct intel_engine_cs;
@@ -18,6 +22,7 @@ void i915_perf_init(struct drm_i915_private *i915);
void i915_perf_fini(struct drm_i915_private *i915);
void i915_perf_register(struct drm_i915_private *i915);
void i915_perf_unregister(struct drm_i915_private *i915);
+int i915_perf_ioctl_version(void);
int i915_perf_open_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
@@ -25,8 +30,29 @@ int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
int i915_perf_remove_config_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
-void i915_oa_init_reg_state(struct intel_engine_cs *engine,
- struct intel_context *ce,
- u32 *reg_state);
+
+void i915_oa_init_reg_state(const struct intel_context *ce,
+ const struct intel_engine_cs *engine);
+
+struct i915_oa_config *
+i915_perf_get_oa_config(struct i915_perf *perf, int metrics_set);
+
+static inline struct i915_oa_config *
+i915_oa_config_get(struct i915_oa_config *oa_config)
+{
+ if (kref_get_unless_zero(&oa_config->ref))
+ return oa_config;
+ else
+ return NULL;
+}
+
+void i915_oa_config_release(struct kref *ref);
+static inline void i915_oa_config_put(struct i915_oa_config *oa_config)
+{
+ if (!oa_config)
+ return;
+
+ kref_put(&oa_config->ref, i915_oa_config_release);
+}
#endif /* __I915_PERF_H__ */
diff --git a/drivers/gpu/drm/i915/i915_perf_types.h b/drivers/gpu/drm/i915/i915_perf_types.h
new file mode 100644
index 000000000000..74ddc20a0d37
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_perf_types.h
@@ -0,0 +1,435 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef _I915_PERF_TYPES_H_
+#define _I915_PERF_TYPES_H_
+
+#include <linux/atomic.h>
+#include <linux/device.h>
+#include <linux/hrtimer.h>
+#include <linux/llist.h>
+#include <linux/poll.h>
+#include <linux/sysfs.h>
+#include <linux/types.h>
+#include <linux/uuid.h>
+#include <linux/wait.h>
+
+#include "i915_reg.h"
+#include "intel_wakeref.h"
+
+struct drm_i915_private;
+struct file;
+struct i915_gem_context;
+struct i915_perf;
+struct i915_vma;
+struct intel_context;
+struct intel_engine_cs;
+
+struct i915_oa_format {
+ u32 format;
+ int size;
+};
+
+struct i915_oa_reg {
+ i915_reg_t addr;
+ u32 value;
+};
+
+struct i915_oa_config {
+ struct i915_perf *perf;
+
+ char uuid[UUID_STRING_LEN + 1];
+ int id;
+
+ const struct i915_oa_reg *mux_regs;
+ u32 mux_regs_len;
+ const struct i915_oa_reg *b_counter_regs;
+ u32 b_counter_regs_len;
+ const struct i915_oa_reg *flex_regs;
+ u32 flex_regs_len;
+
+ struct attribute_group sysfs_metric;
+ struct attribute *attrs[2];
+ struct device_attribute sysfs_metric_id;
+
+ struct kref ref;
+ struct rcu_head rcu;
+};
+
+struct i915_perf_stream;
+
+/**
+ * struct i915_perf_stream_ops - the OPs to support a specific stream type
+ */
+struct i915_perf_stream_ops {
+ /**
+ * @enable: Enables the collection of HW samples, either in response to
+ * `I915_PERF_IOCTL_ENABLE` or implicitly called when stream is opened
+ * without `I915_PERF_FLAG_DISABLED`.
+ */
+ void (*enable)(struct i915_perf_stream *stream);
+
+ /**
+ * @disable: Disables the collection of HW samples, either in response
+ * to `I915_PERF_IOCTL_DISABLE` or implicitly called before destroying
+ * the stream.
+ */
+ void (*disable)(struct i915_perf_stream *stream);
+
+ /**
+ * @poll_wait: Call poll_wait, passing a wait queue that will be woken
+ * once there is something ready to read() for the stream
+ */
+ void (*poll_wait)(struct i915_perf_stream *stream,
+ struct file *file,
+ poll_table *wait);
+
+ /**
+ * @wait_unlocked: For handling a blocking read, wait until there is
+ * something to ready to read() for the stream. E.g. wait on the same
+ * wait queue that would be passed to poll_wait().
+ */
+ int (*wait_unlocked)(struct i915_perf_stream *stream);
+
+ /**
+ * @read: Copy buffered metrics as records to userspace
+ * **buf**: the userspace, destination buffer
+ * **count**: the number of bytes to copy, requested by userspace
+ * **offset**: zero at the start of the read, updated as the read
+ * proceeds, it represents how many bytes have been copied so far and
+ * the buffer offset for copying the next record.
+ *
+ * Copy as many buffered i915 perf samples and records for this stream
+ * to userspace as will fit in the given buffer.
+ *
+ * Only write complete records; returning -%ENOSPC if there isn't room
+ * for a complete record.
+ *
+ * Return any error condition that results in a short read such as
+ * -%ENOSPC or -%EFAULT, even though these may be squashed before
+ * returning to userspace.
+ */
+ int (*read)(struct i915_perf_stream *stream,
+ char __user *buf,
+ size_t count,
+ size_t *offset);
+
+ /**
+ * @destroy: Cleanup any stream specific resources.
+ *
+ * The stream will always be disabled before this is called.
+ */
+ void (*destroy)(struct i915_perf_stream *stream);
+};
+
+/**
+ * struct i915_perf_stream - state for a single open stream FD
+ */
+struct i915_perf_stream {
+ /**
+ * @perf: i915_perf backpointer
+ */
+ struct i915_perf *perf;
+
+ /**
+ * @uncore: mmio access path
+ */
+ struct intel_uncore *uncore;
+
+ /**
+ * @engine: Engine associated with this performance stream.
+ */
+ struct intel_engine_cs *engine;
+
+ /**
+ * @sample_flags: Flags representing the `DRM_I915_PERF_PROP_SAMPLE_*`
+ * properties given when opening a stream, representing the contents
+ * of a single sample as read() by userspace.
+ */
+ u32 sample_flags;
+
+ /**
+ * @sample_size: Considering the configured contents of a sample
+ * combined with the required header size, this is the total size
+ * of a single sample record.
+ */
+ int sample_size;
+
+ /**
+ * @ctx: %NULL if measuring system-wide across all contexts or a
+ * specific context that is being monitored.
+ */
+ struct i915_gem_context *ctx;
+
+ /**
+ * @enabled: Whether the stream is currently enabled, considering
+ * whether the stream was opened in a disabled state and based
+ * on `I915_PERF_IOCTL_ENABLE` and `I915_PERF_IOCTL_DISABLE` calls.
+ */
+ bool enabled;
+
+ /**
+ * @hold_preemption: Whether preemption is put on hold for command
+ * submissions done on the @ctx. This is useful for some drivers that
+ * cannot easily post process the OA buffer context to subtract delta
+ * of performance counters not associated with @ctx.
+ */
+ bool hold_preemption;
+
+ /**
+ * @ops: The callbacks providing the implementation of this specific
+ * type of configured stream.
+ */
+ const struct i915_perf_stream_ops *ops;
+
+ /**
+ * @oa_config: The OA configuration used by the stream.
+ */
+ struct i915_oa_config *oa_config;
+
+ /**
+ * @oa_config_bos: A list of struct i915_oa_config_bo allocated lazily
+ * each time @oa_config changes.
+ */
+ struct llist_head oa_config_bos;
+
+ /**
+ * @pinned_ctx: The OA context specific information.
+ */
+ struct intel_context *pinned_ctx;
+
+ /**
+ * @specific_ctx_id: The id of the specific context.
+ */
+ u32 specific_ctx_id;
+
+ /**
+ * @specific_ctx_id_mask: The mask used to masking specific_ctx_id bits.
+ */
+ u32 specific_ctx_id_mask;
+
+ /**
+ * @poll_check_timer: High resolution timer that will periodically
+ * check for data in the circular OA buffer for notifying userspace
+ * (e.g. during a read() or poll()).
+ */
+ struct hrtimer poll_check_timer;
+
+ /**
+ * @poll_wq: The wait queue that hrtimer callback wakes when it
+ * sees data ready to read in the circular OA buffer.
+ */
+ wait_queue_head_t poll_wq;
+
+ /**
+ * @pollin: Whether there is data available to read.
+ */
+ bool pollin;
+
+ /**
+ * @periodic: Whether periodic sampling is currently enabled.
+ */
+ bool periodic;
+
+ /**
+ * @period_exponent: The OA unit sampling frequency is derived from this.
+ */
+ int period_exponent;
+
+ /**
+ * @oa_buffer: State of the OA buffer.
+ */
+ struct {
+ struct i915_vma *vma;
+ u8 *vaddr;
+ u32 last_ctx_id;
+ int format;
+ int format_size;
+ int size_exponent;
+
+ /**
+ * @ptr_lock: Locks reads and writes to all head/tail state
+ *
+ * Consider: the head and tail pointer state needs to be read
+ * consistently from a hrtimer callback (atomic context) and
+ * read() fop (user context) with tail pointer updates happening
+ * in atomic context and head updates in user context and the
+ * (unlikely) possibility of read() errors needing to reset all
+ * head/tail state.
+ *
+ * Note: Contention/performance aren't currently a significant
+ * concern here considering the relatively low frequency of
+ * hrtimer callbacks (5ms period) and that reads typically only
+ * happen in response to a hrtimer event and likely complete
+ * before the next callback.
+ *
+ * Note: This lock is not held *while* reading and copying data
+ * to userspace so the value of head observed in htrimer
+ * callbacks won't represent any partial consumption of data.
+ */
+ spinlock_t ptr_lock;
+
+ /**
+ * @tails: One 'aging' tail pointer and one 'aged' tail pointer ready to
+ * used for reading.
+ *
+ * Initial values of 0xffffffff are invalid and imply that an
+ * update is required (and should be ignored by an attempted
+ * read)
+ */
+ struct {
+ u32 offset;
+ } tails[2];
+
+ /**
+ * @aged_tail_idx: Index for the aged tail ready to read() data up to.
+ */
+ unsigned int aged_tail_idx;
+
+ /**
+ * @aging_timestamp: A monotonic timestamp for when the current aging tail pointer
+ * was read; used to determine when it is old enough to trust.
+ */
+ u64 aging_timestamp;
+
+ /**
+ * @head: Although we can always read back the head pointer register,
+ * we prefer to avoid trusting the HW state, just to avoid any
+ * risk that some hardware condition could * somehow bump the
+ * head pointer unpredictably and cause us to forward the wrong
+ * OA buffer data to userspace.
+ */
+ u32 head;
+ } oa_buffer;
+
+ /**
+ * @noa_wait: A batch buffer doing a wait on the GPU for the NOA logic to be
+ * reprogrammed.
+ */
+ struct i915_vma *noa_wait;
+};
+
+/**
+ * struct i915_oa_ops - Gen specific implementation of an OA unit stream
+ */
+struct i915_oa_ops {
+ /**
+ * @is_valid_b_counter_reg: Validates register's address for
+ * programming boolean counters for a particular platform.
+ */
+ bool (*is_valid_b_counter_reg)(struct i915_perf *perf, u32 addr);
+
+ /**
+ * @is_valid_mux_reg: Validates register's address for programming mux
+ * for a particular platform.
+ */
+ bool (*is_valid_mux_reg)(struct i915_perf *perf, u32 addr);
+
+ /**
+ * @is_valid_flex_reg: Validates register's address for programming
+ * flex EU filtering for a particular platform.
+ */
+ bool (*is_valid_flex_reg)(struct i915_perf *perf, u32 addr);
+
+ /**
+ * @enable_metric_set: Selects and applies any MUX configuration to set
+ * up the Boolean and Custom (B/C) counters that are part of the
+ * counter reports being sampled. May apply system constraints such as
+ * disabling EU clock gating as required.
+ */
+ int (*enable_metric_set)(struct i915_perf_stream *stream);
+
+ /**
+ * @disable_metric_set: Remove system constraints associated with using
+ * the OA unit.
+ */
+ void (*disable_metric_set)(struct i915_perf_stream *stream);
+
+ /**
+ * @oa_enable: Enable periodic sampling
+ */
+ void (*oa_enable)(struct i915_perf_stream *stream);
+
+ /**
+ * @oa_disable: Disable periodic sampling
+ */
+ void (*oa_disable)(struct i915_perf_stream *stream);
+
+ /**
+ * @read: Copy data from the circular OA buffer into a given userspace
+ * buffer.
+ */
+ int (*read)(struct i915_perf_stream *stream,
+ char __user *buf,
+ size_t count,
+ size_t *offset);
+
+ /**
+ * @oa_hw_tail_read: read the OA tail pointer register
+ *
+ * In particular this enables us to share all the fiddly code for
+ * handling the OA unit tail pointer race that affects multiple
+ * generations.
+ */
+ u32 (*oa_hw_tail_read)(struct i915_perf_stream *stream);
+};
+
+struct i915_perf {
+ struct drm_i915_private *i915;
+
+ struct kobject *metrics_kobj;
+ struct ctl_table_header *sysctl_header;
+
+ /*
+ * Lock associated with adding/modifying/removing OA configs
+ * in perf->metrics_idr.
+ */
+ struct mutex metrics_lock;
+
+ /*
+ * List of dynamic configurations (struct i915_oa_config), you
+ * need to hold perf->metrics_lock to access it.
+ */
+ struct idr metrics_idr;
+
+ /*
+ * Lock associated with anything below within this structure
+ * except exclusive_stream.
+ */
+ struct mutex lock;
+
+ /*
+ * The stream currently using the OA unit. If accessed
+ * outside a syscall associated to its file
+ * descriptor.
+ */
+ struct i915_perf_stream *exclusive_stream;
+
+ /**
+ * For rate limiting any notifications of spurious
+ * invalid OA reports
+ */
+ struct ratelimit_state spurious_report_rs;
+
+ struct i915_oa_config test_config;
+
+ u32 gen7_latched_oastatus1;
+ u32 ctx_oactxctrl_offset;
+ u32 ctx_flexeu0_offset;
+
+ /**
+ * The RPT_ID/reason field for Gen8+ includes a bit
+ * to determine if the CTX ID in the report is valid
+ * but the specific bit differs between Gen 8 and 9
+ */
+ u32 gen8_valid_ctx_bit;
+
+ struct i915_oa_ops ops;
+ const struct i915_oa_format *oa_formats;
+
+ atomic64_t noa_programming_delay;
+};
+
+#endif /* _I915_PERF_TYPES_H_ */
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index 212acaef581e..0d40dccd1409 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -11,6 +11,8 @@
#include "gt/intel_engine_pm.h"
#include "gt/intel_engine_user.h"
#include "gt/intel_gt_pm.h"
+#include "gt/intel_rc6.h"
+#include "gt/intel_rps.h"
#include "i915_drv.h"
#include "i915_pmu.h"
@@ -116,22 +118,124 @@ static bool pmu_needs_timer(struct i915_pmu *pmu, bool gpu_active)
return enable;
}
-void i915_pmu_gt_parked(struct drm_i915_private *i915)
+static u64 __get_rc6(struct intel_gt *gt)
{
- struct i915_pmu *pmu = &i915->pmu;
+ struct drm_i915_private *i915 = gt->i915;
+ u64 val;
- if (!pmu->base.event_init)
- return;
+ val = intel_rc6_residency_ns(&gt->rc6,
+ IS_VALLEYVIEW(i915) ?
+ VLV_GT_RENDER_RC6 :
+ GEN6_GT_GFX_RC6);
+
+ if (HAS_RC6p(i915))
+ val += intel_rc6_residency_ns(&gt->rc6, GEN6_GT_GFX_RC6p);
+
+ if (HAS_RC6pp(i915))
+ val += intel_rc6_residency_ns(&gt->rc6, GEN6_GT_GFX_RC6pp);
+
+ return val;
+}
+
+#if IS_ENABLED(CONFIG_PM)
+
+static inline s64 ktime_since(const ktime_t kt)
+{
+ return ktime_to_ns(ktime_sub(ktime_get(), kt));
+}
+
+static u64 __pmu_estimate_rc6(struct i915_pmu *pmu)
+{
+ u64 val;
- spin_lock_irq(&pmu->lock);
/*
- * Signal sampling timer to stop if only engine events are enabled and
- * GPU went idle.
+ * We think we are runtime suspended.
+ *
+ * Report the delta from when the device was suspended to now,
+ * on top of the last known real value, as the approximated RC6
+ * counter value.
*/
- pmu->timer_enabled = pmu_needs_timer(pmu, false);
- spin_unlock_irq(&pmu->lock);
+ val = ktime_since(pmu->sleep_last);
+ val += pmu->sample[__I915_SAMPLE_RC6].cur;
+
+ pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur = val;
+
+ return val;
}
+static u64 __pmu_update_rc6(struct i915_pmu *pmu, u64 val)
+{
+ /*
+ * If we are coming back from being runtime suspended we must
+ * be careful not to report a larger value than returned
+ * previously.
+ */
+ if (val >= pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur) {
+ pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur = 0;
+ pmu->sample[__I915_SAMPLE_RC6].cur = val;
+ } else {
+ val = pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur;
+ }
+
+ return val;
+}
+
+static u64 get_rc6(struct intel_gt *gt)
+{
+ struct drm_i915_private *i915 = gt->i915;
+ struct i915_pmu *pmu = &i915->pmu;
+ unsigned long flags;
+ u64 val;
+
+ val = 0;
+ if (intel_gt_pm_get_if_awake(gt)) {
+ val = __get_rc6(gt);
+ intel_gt_pm_put(gt);
+ }
+
+ spin_lock_irqsave(&pmu->lock, flags);
+
+ if (val)
+ val = __pmu_update_rc6(pmu, val);
+ else
+ val = __pmu_estimate_rc6(pmu);
+
+ spin_unlock_irqrestore(&pmu->lock, flags);
+
+ return val;
+}
+
+static void park_rc6(struct drm_i915_private *i915)
+{
+ struct i915_pmu *pmu = &i915->pmu;
+
+ if (pmu->enable & config_enabled_mask(I915_PMU_RC6_RESIDENCY))
+ __pmu_update_rc6(pmu, __get_rc6(&i915->gt));
+
+ pmu->sleep_last = ktime_get();
+}
+
+static void unpark_rc6(struct drm_i915_private *i915)
+{
+ struct i915_pmu *pmu = &i915->pmu;
+
+ /* Estimate how long we slept and accumulate that into rc6 counters */
+ if (pmu->enable & config_enabled_mask(I915_PMU_RC6_RESIDENCY))
+ __pmu_estimate_rc6(pmu);
+}
+
+#else
+
+static u64 get_rc6(struct intel_gt *gt)
+{
+ return __get_rc6(gt);
+}
+
+static void park_rc6(struct drm_i915_private *i915) {}
+static void unpark_rc6(struct drm_i915_private *i915) {}
+
+#endif
+
static void __i915_pmu_maybe_start_timer(struct i915_pmu *pmu)
{
if (!pmu->timer_enabled && pmu_needs_timer(pmu, true)) {
@@ -143,6 +247,26 @@ static void __i915_pmu_maybe_start_timer(struct i915_pmu *pmu)
}
}
+void i915_pmu_gt_parked(struct drm_i915_private *i915)
+{
+ struct i915_pmu *pmu = &i915->pmu;
+
+ if (!pmu->base.event_init)
+ return;
+
+ spin_lock_irq(&pmu->lock);
+
+ park_rc6(i915);
+
+ /*
+ * Signal sampling timer to stop if only engine events are enabled and
+ * GPU went idle.
+ */
+ pmu->timer_enabled = pmu_needs_timer(pmu, false);
+
+ spin_unlock_irq(&pmu->lock);
+}
+
void i915_pmu_gt_unparked(struct drm_i915_private *i915)
{
struct i915_pmu *pmu = &i915->pmu;
@@ -151,10 +275,14 @@ void i915_pmu_gt_unparked(struct drm_i915_private *i915)
return;
spin_lock_irq(&pmu->lock);
+
/*
* Re-enable sampling timer when GPU goes active.
*/
__i915_pmu_maybe_start_timer(pmu);
+
+ unpark_rc6(i915);
+
spin_unlock_irq(&pmu->lock);
}
@@ -174,7 +302,7 @@ engines_sample(struct intel_gt *gt, unsigned int period_ns)
if ((i915->pmu.enable & ENGINE_SAMPLE_MASK) == 0)
return;
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt, id) {
struct intel_engine_pmu *pmu = &engine->pmu;
unsigned long flags;
bool busy;
@@ -194,6 +322,10 @@ engines_sample(struct intel_gt *gt, unsigned int period_ns)
if (val & RING_WAIT_SEMAPHORE)
add_sample(&pmu->sample[I915_SAMPLE_SEMA], period_ns);
+ /* No need to sample when busy stats are supported. */
+ if (intel_engine_supports_stats(engine))
+ goto skip;
+
/*
* While waiting on a semaphore or event, MI_MODE reports the
* ring as idle. However, previously using the seqno, and with
@@ -227,25 +359,26 @@ frequency_sample(struct intel_gt *gt, unsigned int period_ns)
struct drm_i915_private *i915 = gt->i915;
struct intel_uncore *uncore = gt->uncore;
struct i915_pmu *pmu = &i915->pmu;
+ struct intel_rps *rps = &gt->rps;
if (pmu->enable & config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY)) {
u32 val;
- val = i915->gt_pm.rps.cur_freq;
+ val = rps->cur_freq;
if (intel_gt_pm_get_if_awake(gt)) {
val = intel_uncore_read_notrace(uncore, GEN6_RPSTAT1);
- val = intel_get_cagf(i915, val);
+ val = intel_get_cagf(rps, val);
intel_gt_pm_put(gt);
}
add_sample_mult(&pmu->sample[__I915_SAMPLE_FREQ_ACT],
- intel_gpu_freq(i915, val),
+ intel_gpu_freq(rps, val),
period_ns / 1000);
}
if (pmu->enable & config_enabled_mask(I915_PMU_REQUESTED_FREQUENCY)) {
add_sample_mult(&pmu->sample[__I915_SAMPLE_FREQ_REQ],
- intel_gpu_freq(i915, i915->gt_pm.rps.cur_freq),
+ intel_gpu_freq(rps, rps->cur_freq),
period_ns / 1000);
}
}
@@ -426,104 +559,6 @@ static int i915_pmu_event_init(struct perf_event *event)
return 0;
}
-static u64 __get_rc6(struct intel_gt *gt)
-{
- struct drm_i915_private *i915 = gt->i915;
- u64 val;
-
- val = intel_rc6_residency_ns(i915,
- IS_VALLEYVIEW(i915) ?
- VLV_GT_RENDER_RC6 :
- GEN6_GT_GFX_RC6);
-
- if (HAS_RC6p(i915))
- val += intel_rc6_residency_ns(i915, GEN6_GT_GFX_RC6p);
-
- if (HAS_RC6pp(i915))
- val += intel_rc6_residency_ns(i915, GEN6_GT_GFX_RC6pp);
-
- return val;
-}
-
-static u64 get_rc6(struct intel_gt *gt)
-{
-#if IS_ENABLED(CONFIG_PM)
- struct drm_i915_private *i915 = gt->i915;
- struct intel_runtime_pm *rpm = &i915->runtime_pm;
- struct i915_pmu *pmu = &i915->pmu;
- intel_wakeref_t wakeref;
- unsigned long flags;
- u64 val;
-
- wakeref = intel_runtime_pm_get_if_in_use(rpm);
- if (wakeref) {
- val = __get_rc6(gt);
- intel_runtime_pm_put(rpm, wakeref);
-
- /*
- * If we are coming back from being runtime suspended we must
- * be careful not to report a larger value than returned
- * previously.
- */
-
- spin_lock_irqsave(&pmu->lock, flags);
-
- if (val >= pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur) {
- pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur = 0;
- pmu->sample[__I915_SAMPLE_RC6].cur = val;
- } else {
- val = pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur;
- }
-
- spin_unlock_irqrestore(&pmu->lock, flags);
- } else {
- struct device *kdev = rpm->kdev;
-
- /*
- * We are runtime suspended.
- *
- * Report the delta from when the device was suspended to now,
- * on top of the last known real value, as the approximated RC6
- * counter value.
- */
- spin_lock_irqsave(&pmu->lock, flags);
-
- /*
- * After the above branch intel_runtime_pm_get_if_in_use failed
- * to get the runtime PM reference we cannot assume we are in
- * runtime suspend since we can either: a) race with coming out
- * of it before we took the power.lock, or b) there are other
- * states than suspended which can bring us here.
- *
- * We need to double-check that we are indeed currently runtime
- * suspended and if not we cannot do better than report the last
- * known RC6 value.
- */
- if (pm_runtime_status_suspended(kdev)) {
- val = pm_runtime_suspended_time(kdev);
-
- if (!pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur)
- pmu->suspended_time_last = val;
-
- val -= pmu->suspended_time_last;
- val += pmu->sample[__I915_SAMPLE_RC6].cur;
-
- pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur = val;
- } else if (pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur) {
- val = pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur;
- } else {
- val = pmu->sample[__I915_SAMPLE_RC6].cur;
- }
-
- spin_unlock_irqrestore(&pmu->lock, flags);
- }
-
- return val;
-#else
- return __get_rc6(gt);
-#endif
-}
-
static u64 __i915_pmu_event_read(struct perf_event *event)
{
struct drm_i915_private *i915 =
@@ -1047,21 +1082,43 @@ static void i915_pmu_unregister_cpuhp_state(struct i915_pmu *pmu)
cpuhp_remove_multi_state(cpuhp_slot);
}
+static bool is_igp(struct drm_i915_private *i915)
+{
+ struct pci_dev *pdev = i915->drm.pdev;
+
+ /* IGP is 0000:00:02.0 */
+ return pci_domain_nr(pdev->bus) == 0 &&
+ pdev->bus->number == 0 &&
+ PCI_SLOT(pdev->devfn) == 2 &&
+ PCI_FUNC(pdev->devfn) == 0;
+}
+
void i915_pmu_register(struct drm_i915_private *i915)
{
struct i915_pmu *pmu = &i915->pmu;
- int ret;
+ int ret = -ENOMEM;
if (INTEL_GEN(i915) <= 2) {
dev_info(i915->drm.dev, "PMU not supported for this GPU.");
return;
}
- i915_pmu_events_attr_group.attrs = create_event_attributes(pmu);
- if (!i915_pmu_events_attr_group.attrs) {
- ret = -ENOMEM;
+ spin_lock_init(&pmu->lock);
+ hrtimer_init(&pmu->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ pmu->timer.function = i915_sample;
+
+ if (!is_igp(i915))
+ pmu->name = kasprintf(GFP_KERNEL,
+ "i915-%s",
+ dev_name(i915->drm.dev));
+ else
+ pmu->name = "i915";
+ if (!pmu->name)
goto err;
- }
+
+ i915_pmu_events_attr_group.attrs = create_event_attributes(pmu);
+ if (!i915_pmu_events_attr_group.attrs)
+ goto err_name;
pmu->base.attr_groups = i915_pmu_attr_groups;
pmu->base.task_ctx_nr = perf_invalid_context;
@@ -1073,13 +1130,9 @@ void i915_pmu_register(struct drm_i915_private *i915)
pmu->base.read = i915_pmu_event_read;
pmu->base.event_idx = i915_pmu_event_event_idx;
- spin_lock_init(&pmu->lock);
- hrtimer_init(&pmu->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- pmu->timer.function = i915_sample;
-
- ret = perf_pmu_register(&pmu->base, "i915", -1);
+ ret = perf_pmu_register(&pmu->base, pmu->name, -1);
if (ret)
- goto err;
+ goto err_attr;
ret = i915_pmu_register_cpuhp_state(pmu);
if (ret)
@@ -1089,10 +1142,14 @@ void i915_pmu_register(struct drm_i915_private *i915)
err_unreg:
perf_pmu_unregister(&pmu->base);
-err:
+err_attr:
pmu->base.event_init = NULL;
free_event_attributes(pmu);
- DRM_NOTE("Failed to register PMU! (err=%d)\n", ret);
+err_name:
+ if (!is_igp(i915))
+ kfree(pmu->name);
+err:
+ dev_notice(i915->drm.dev, "Failed to register PMU!\n");
}
void i915_pmu_unregister(struct drm_i915_private *i915)
@@ -1110,5 +1167,7 @@ void i915_pmu_unregister(struct drm_i915_private *i915)
perf_pmu_unregister(&pmu->base);
pmu->base.event_init = NULL;
+ if (!is_igp(i915))
+ kfree(pmu->name);
free_event_attributes(pmu);
}
diff --git a/drivers/gpu/drm/i915/i915_pmu.h b/drivers/gpu/drm/i915/i915_pmu.h
index 4fc4f2478301..bf52e3983631 100644
--- a/drivers/gpu/drm/i915/i915_pmu.h
+++ b/drivers/gpu/drm/i915/i915_pmu.h
@@ -47,6 +47,10 @@ struct i915_pmu {
*/
struct pmu base;
/**
+ * @name: Name as registered with perf core.
+ */
+ const char *name;
+ /**
* @lock: Lock protecting enable mask and ref count handling.
*/
spinlock_t lock;
@@ -97,9 +101,9 @@ struct i915_pmu {
*/
struct i915_pmu_sample sample[__I915_NUM_PMU_SAMPLERS];
/**
- * @suspended_time_last: Cached suspend time from PM core.
+ * @sleep_last: Last time GT parked for RC6 estimation.
*/
- u64 suspended_time_last;
+ ktime_t sleep_last;
/**
* @i915_attr: Memory block holding device attributes.
*/
diff --git a/drivers/gpu/drm/i915/i915_priolist_types.h b/drivers/gpu/drm/i915/i915_priolist_types.h
index 21037a2e2038..732aad148881 100644
--- a/drivers/gpu/drm/i915/i915_priolist_types.h
+++ b/drivers/gpu/drm/i915/i915_priolist_types.h
@@ -16,6 +16,12 @@ enum {
I915_PRIORITY_MIN = I915_CONTEXT_MIN_USER_PRIORITY - 1,
I915_PRIORITY_NORMAL = I915_CONTEXT_DEFAULT_PRIORITY,
I915_PRIORITY_MAX = I915_CONTEXT_MAX_USER_PRIORITY + 1,
+
+ /* A preemptive pulse used to monitor the health of each engine */
+ I915_PRIORITY_HEARTBEAT,
+
+ /* Interactive workload, scheduled for immediate pageflipping */
+ I915_PRIORITY_DISPLAY,
};
#define I915_USER_PRIORITY_SHIFT 2
@@ -39,6 +45,7 @@ enum {
* active request.
*/
#define I915_PRIORITY_UNPREEMPTABLE INT_MAX
+#define I915_PRIORITY_BARRIER INT_MAX
#define __NO_PREEMPTION (I915_PRIORITY_WAIT)
diff --git a/drivers/gpu/drm/i915/i915_query.c b/drivers/gpu/drm/i915/i915_query.c
index ad9240a0817a..c27cfef9281c 100644
--- a/drivers/gpu/drm/i915/i915_query.c
+++ b/drivers/gpu/drm/i915/i915_query.c
@@ -7,6 +7,7 @@
#include <linux/nospec.h>
#include "i915_drv.h"
+#include "i915_perf.h"
#include "i915_query.h"
#include <uapi/drm/i915_drm.h>
@@ -37,8 +38,6 @@ static int query_topology_info(struct drm_i915_private *dev_priv,
const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
struct drm_i915_query_topology_info topo;
u32 slice_length, subslice_length, eu_length, total_length;
- u8 subslice_stride = GEN_SSEU_STRIDE(sseu->max_subslices);
- u8 eu_stride = GEN_SSEU_STRIDE(sseu->max_eus_per_subslice);
int ret;
if (query_item->flags != 0)
@@ -50,8 +49,8 @@ static int query_topology_info(struct drm_i915_private *dev_priv,
BUILD_BUG_ON(sizeof(u8) != sizeof(sseu->slice_mask));
slice_length = sizeof(sseu->slice_mask);
- subslice_length = sseu->max_slices * subslice_stride;
- eu_length = sseu->max_slices * sseu->max_subslices * eu_stride;
+ subslice_length = sseu->max_slices * sseu->ss_stride;
+ eu_length = sseu->max_slices * sseu->max_subslices * sseu->eu_stride;
total_length = sizeof(topo) + slice_length + subslice_length +
eu_length;
@@ -69,9 +68,9 @@ static int query_topology_info(struct drm_i915_private *dev_priv,
topo.max_eus_per_subslice = sseu->max_eus_per_subslice;
topo.subslice_offset = slice_length;
- topo.subslice_stride = subslice_stride;
+ topo.subslice_stride = sseu->ss_stride;
topo.eu_offset = slice_length + subslice_length;
- topo.eu_stride = eu_stride;
+ topo.eu_stride = sseu->eu_stride;
if (__copy_to_user(u64_to_user_ptr(query_item->data_ptr),
&topo, sizeof(topo)))
@@ -142,10 +141,305 @@ query_engine_info(struct drm_i915_private *i915,
return len;
}
+static int can_copy_perf_config_registers_or_number(u32 user_n_regs,
+ u64 user_regs_ptr,
+ u32 kernel_n_regs)
+{
+ /*
+ * We'll just put the number of registers, and won't copy the
+ * register.
+ */
+ if (user_n_regs == 0)
+ return 0;
+
+ if (user_n_regs < kernel_n_regs)
+ return -EINVAL;
+
+ if (!access_ok(u64_to_user_ptr(user_regs_ptr),
+ 2 * sizeof(u32) * kernel_n_regs))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int copy_perf_config_registers_or_number(const struct i915_oa_reg *kernel_regs,
+ u32 kernel_n_regs,
+ u64 user_regs_ptr,
+ u32 *user_n_regs)
+{
+ u32 r;
+
+ if (*user_n_regs == 0) {
+ *user_n_regs = kernel_n_regs;
+ return 0;
+ }
+
+ *user_n_regs = kernel_n_regs;
+
+ for (r = 0; r < kernel_n_regs; r++) {
+ u32 __user *user_reg_ptr =
+ u64_to_user_ptr(user_regs_ptr + sizeof(u32) * r * 2);
+ u32 __user *user_val_ptr =
+ u64_to_user_ptr(user_regs_ptr + sizeof(u32) * r * 2 +
+ sizeof(u32));
+ int ret;
+
+ ret = __put_user(i915_mmio_reg_offset(kernel_regs[r].addr),
+ user_reg_ptr);
+ if (ret)
+ return -EFAULT;
+
+ ret = __put_user(kernel_regs[r].value, user_val_ptr);
+ if (ret)
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int query_perf_config_data(struct drm_i915_private *i915,
+ struct drm_i915_query_item *query_item,
+ bool use_uuid)
+{
+ struct drm_i915_query_perf_config __user *user_query_config_ptr =
+ u64_to_user_ptr(query_item->data_ptr);
+ struct drm_i915_perf_oa_config __user *user_config_ptr =
+ u64_to_user_ptr(query_item->data_ptr +
+ sizeof(struct drm_i915_query_perf_config));
+ struct drm_i915_perf_oa_config user_config;
+ struct i915_perf *perf = &i915->perf;
+ struct i915_oa_config *oa_config;
+ char uuid[UUID_STRING_LEN + 1];
+ u64 config_id;
+ u32 flags, total_size;
+ int ret;
+
+ if (!perf->i915)
+ return -ENODEV;
+
+ total_size =
+ sizeof(struct drm_i915_query_perf_config) +
+ sizeof(struct drm_i915_perf_oa_config);
+
+ if (query_item->length == 0)
+ return total_size;
+
+ if (query_item->length < total_size) {
+ DRM_DEBUG("Invalid query config data item size=%u expected=%u\n",
+ query_item->length, total_size);
+ return -EINVAL;
+ }
+
+ if (!access_ok(user_query_config_ptr, total_size))
+ return -EFAULT;
+
+ if (__get_user(flags, &user_query_config_ptr->flags))
+ return -EFAULT;
+
+ if (flags != 0)
+ return -EINVAL;
+
+ if (use_uuid) {
+ struct i915_oa_config *tmp;
+ int id;
+
+ BUILD_BUG_ON(sizeof(user_query_config_ptr->uuid) >= sizeof(uuid));
+
+ memset(&uuid, 0, sizeof(uuid));
+ if (__copy_from_user(uuid, user_query_config_ptr->uuid,
+ sizeof(user_query_config_ptr->uuid)))
+ return -EFAULT;
+
+ oa_config = NULL;
+ rcu_read_lock();
+ idr_for_each_entry(&perf->metrics_idr, tmp, id) {
+ if (!strcmp(tmp->uuid, uuid)) {
+ oa_config = i915_oa_config_get(tmp);
+ break;
+ }
+ }
+ rcu_read_unlock();
+ } else {
+ if (__get_user(config_id, &user_query_config_ptr->config))
+ return -EFAULT;
+
+ oa_config = i915_perf_get_oa_config(perf, config_id);
+ }
+ if (!oa_config)
+ return -ENOENT;
+
+ if (__copy_from_user(&user_config, user_config_ptr,
+ sizeof(user_config))) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ ret = can_copy_perf_config_registers_or_number(user_config.n_boolean_regs,
+ user_config.boolean_regs_ptr,
+ oa_config->b_counter_regs_len);
+ if (ret)
+ goto out;
+
+ ret = can_copy_perf_config_registers_or_number(user_config.n_flex_regs,
+ user_config.flex_regs_ptr,
+ oa_config->flex_regs_len);
+ if (ret)
+ goto out;
+
+ ret = can_copy_perf_config_registers_or_number(user_config.n_mux_regs,
+ user_config.mux_regs_ptr,
+ oa_config->mux_regs_len);
+ if (ret)
+ goto out;
+
+ ret = copy_perf_config_registers_or_number(oa_config->b_counter_regs,
+ oa_config->b_counter_regs_len,
+ user_config.boolean_regs_ptr,
+ &user_config.n_boolean_regs);
+ if (ret)
+ goto out;
+
+ ret = copy_perf_config_registers_or_number(oa_config->flex_regs,
+ oa_config->flex_regs_len,
+ user_config.flex_regs_ptr,
+ &user_config.n_flex_regs);
+ if (ret)
+ goto out;
+
+ ret = copy_perf_config_registers_or_number(oa_config->mux_regs,
+ oa_config->mux_regs_len,
+ user_config.mux_regs_ptr,
+ &user_config.n_mux_regs);
+ if (ret)
+ goto out;
+
+ memcpy(user_config.uuid, oa_config->uuid, sizeof(user_config.uuid));
+
+ if (__copy_to_user(user_config_ptr, &user_config,
+ sizeof(user_config))) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ ret = total_size;
+
+out:
+ i915_oa_config_put(oa_config);
+ return ret;
+}
+
+static size_t sizeof_perf_config_list(size_t count)
+{
+ return sizeof(struct drm_i915_query_perf_config) + sizeof(u64) * count;
+}
+
+static size_t sizeof_perf_metrics(struct i915_perf *perf)
+{
+ struct i915_oa_config *tmp;
+ size_t i;
+ int id;
+
+ i = 1;
+ rcu_read_lock();
+ idr_for_each_entry(&perf->metrics_idr, tmp, id)
+ i++;
+ rcu_read_unlock();
+
+ return sizeof_perf_config_list(i);
+}
+
+static int query_perf_config_list(struct drm_i915_private *i915,
+ struct drm_i915_query_item *query_item)
+{
+ struct drm_i915_query_perf_config __user *user_query_config_ptr =
+ u64_to_user_ptr(query_item->data_ptr);
+ struct i915_perf *perf = &i915->perf;
+ u64 *oa_config_ids = NULL;
+ int alloc, n_configs;
+ u32 flags;
+ int ret;
+
+ if (!perf->i915)
+ return -ENODEV;
+
+ if (query_item->length == 0)
+ return sizeof_perf_metrics(perf);
+
+ if (get_user(flags, &user_query_config_ptr->flags))
+ return -EFAULT;
+
+ if (flags != 0)
+ return -EINVAL;
+
+ n_configs = 1;
+ do {
+ struct i915_oa_config *tmp;
+ u64 *ids;
+ int id;
+
+ ids = krealloc(oa_config_ids,
+ n_configs * sizeof(*oa_config_ids),
+ GFP_KERNEL);
+ if (!ids)
+ return -ENOMEM;
+
+ alloc = fetch_and_zero(&n_configs);
+
+ ids[n_configs++] = 1ull; /* reserved for test_config */
+ rcu_read_lock();
+ idr_for_each_entry(&perf->metrics_idr, tmp, id) {
+ if (n_configs < alloc)
+ ids[n_configs] = id;
+ n_configs++;
+ }
+ rcu_read_unlock();
+
+ oa_config_ids = ids;
+ } while (n_configs > alloc);
+
+ if (query_item->length < sizeof_perf_config_list(n_configs)) {
+ DRM_DEBUG("Invalid query config list item size=%u expected=%zu\n",
+ query_item->length,
+ sizeof_perf_config_list(n_configs));
+ kfree(oa_config_ids);
+ return -EINVAL;
+ }
+
+ if (put_user(n_configs, &user_query_config_ptr->config)) {
+ kfree(oa_config_ids);
+ return -EFAULT;
+ }
+
+ ret = copy_to_user(user_query_config_ptr + 1,
+ oa_config_ids,
+ n_configs * sizeof(*oa_config_ids));
+ kfree(oa_config_ids);
+ if (ret)
+ return -EFAULT;
+
+ return sizeof_perf_config_list(n_configs);
+}
+
+static int query_perf_config(struct drm_i915_private *i915,
+ struct drm_i915_query_item *query_item)
+{
+ switch (query_item->flags) {
+ case DRM_I915_QUERY_PERF_CONFIG_LIST:
+ return query_perf_config_list(i915, query_item);
+ case DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID:
+ return query_perf_config_data(i915, query_item, true);
+ case DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_ID:
+ return query_perf_config_data(i915, query_item, false);
+ default:
+ return -EINVAL;
+ }
+}
+
static int (* const i915_query_funcs[])(struct drm_i915_private *dev_priv,
struct drm_i915_query_item *query_item) = {
query_topology_info,
query_engine_info,
+ query_perf_config,
};
int i915_query_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index f8ee9aba3955..73079b503724 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -413,6 +413,9 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define GEN11_VECS_SFC_USAGE(engine) _MMIO((engine)->mmio_base + 0x2014)
#define GEN11_VECS_SFC_USAGE_BIT (1 << 0)
+#define GEN12_SFC_DONE(n) _MMIO(0x1cc00 + (n) * 0x100)
+#define GEN12_SFC_DONE_MAX 4
+
#define RING_PP_DIR_BASE(base) _MMIO((base) + 0x228)
#define RING_PP_DIR_BASE_READ(base) _MMIO((base) + 0x518)
#define RING_PP_DIR_DCLV(base) _MMIO((base) + 0x220)
@@ -547,7 +550,9 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define MI_PREDICATE_SRC0_UDW _MMIO(0x2400 + 4)
#define MI_PREDICATE_SRC1 _MMIO(0x2408)
#define MI_PREDICATE_SRC1_UDW _MMIO(0x2408 + 4)
-
+#define MI_PREDICATE_DATA _MMIO(0x2410)
+#define MI_PREDICATE_RESULT _MMIO(0x2418)
+#define MI_PREDICATE_RESULT_1 _MMIO(0x241c)
#define MI_PREDICATE_RESULT_2 _MMIO(0x2214)
#define LOWER_SLICE_ENABLED (1 << 0)
#define LOWER_SLICE_DISABLED (0 << 0)
@@ -688,6 +693,45 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define OABUFFER_SIZE_8M (6 << 3)
#define OABUFFER_SIZE_16M (7 << 3)
+/* Gen12 OAR unit */
+#define GEN12_OAR_OACONTROL _MMIO(0x2960)
+#define GEN12_OAR_OACONTROL_COUNTER_FORMAT_SHIFT 1
+#define GEN12_OAR_OACONTROL_COUNTER_ENABLE (1 << 0)
+
+#define GEN12_OACTXCONTROL _MMIO(0x2360)
+#define GEN12_OAR_OASTATUS _MMIO(0x2968)
+
+/* Gen12 OAG unit */
+#define GEN12_OAG_OAHEADPTR _MMIO(0xdb00)
+#define GEN12_OAG_OAHEADPTR_MASK 0xffffffc0
+#define GEN12_OAG_OATAILPTR _MMIO(0xdb04)
+#define GEN12_OAG_OATAILPTR_MASK 0xffffffc0
+
+#define GEN12_OAG_OABUFFER _MMIO(0xdb08)
+#define GEN12_OAG_OABUFFER_BUFFER_SIZE_MASK (0x7)
+#define GEN12_OAG_OABUFFER_BUFFER_SIZE_SHIFT (3)
+#define GEN12_OAG_OABUFFER_MEMORY_SELECT (1 << 0) /* 0: PPGTT, 1: GGTT */
+
+#define GEN12_OAG_OAGLBCTXCTRL _MMIO(0x2b28)
+#define GEN12_OAG_OAGLBCTXCTRL_TIMER_PERIOD_SHIFT 2
+#define GEN12_OAG_OAGLBCTXCTRL_TIMER_ENABLE (1 << 1)
+#define GEN12_OAG_OAGLBCTXCTRL_COUNTER_RESUME (1 << 0)
+
+#define GEN12_OAG_OACONTROL _MMIO(0xdaf4)
+#define GEN12_OAG_OACONTROL_OA_COUNTER_FORMAT_SHIFT 2
+#define GEN12_OAG_OACONTROL_OA_COUNTER_ENABLE (1 << 0)
+
+#define GEN12_OAG_OA_DEBUG _MMIO(0xdaf8)
+#define GEN12_OAG_OA_DEBUG_INCLUDE_CLK_RATIO (1 << 6)
+#define GEN12_OAG_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS (1 << 5)
+#define GEN12_OAG_OA_DEBUG_DISABLE_GO_1_0_REPORTS (1 << 2)
+#define GEN12_OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS (1 << 1)
+
+#define GEN12_OAG_OASTATUS _MMIO(0xdafc)
+#define GEN12_OAG_OASTATUS_COUNTER_OVERFLOW (1 << 2)
+#define GEN12_OAG_OASTATUS_BUFFER_OVERFLOW (1 << 1)
+#define GEN12_OAG_OASTATUS_REPORT_LOST (1 << 0)
+
/*
* Flexible, Aggregate EU Counter Registers.
* Note: these aren't contiguous
@@ -924,6 +968,26 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define OAREPORTTRIG8_NOA_SELECT_6_SHIFT 24
#define OAREPORTTRIG8_NOA_SELECT_7_SHIFT 28
+/* Same layout as OASTARTTRIGX */
+#define GEN12_OAG_OASTARTTRIG1 _MMIO(0xd900)
+#define GEN12_OAG_OASTARTTRIG2 _MMIO(0xd904)
+#define GEN12_OAG_OASTARTTRIG3 _MMIO(0xd908)
+#define GEN12_OAG_OASTARTTRIG4 _MMIO(0xd90c)
+#define GEN12_OAG_OASTARTTRIG5 _MMIO(0xd910)
+#define GEN12_OAG_OASTARTTRIG6 _MMIO(0xd914)
+#define GEN12_OAG_OASTARTTRIG7 _MMIO(0xd918)
+#define GEN12_OAG_OASTARTTRIG8 _MMIO(0xd91c)
+
+/* Same layout as OAREPORTTRIGX */
+#define GEN12_OAG_OAREPORTTRIG1 _MMIO(0xd920)
+#define GEN12_OAG_OAREPORTTRIG2 _MMIO(0xd924)
+#define GEN12_OAG_OAREPORTTRIG3 _MMIO(0xd928)
+#define GEN12_OAG_OAREPORTTRIG4 _MMIO(0xd92c)
+#define GEN12_OAG_OAREPORTTRIG5 _MMIO(0xd930)
+#define GEN12_OAG_OAREPORTTRIG6 _MMIO(0xd934)
+#define GEN12_OAG_OAREPORTTRIG7 _MMIO(0xd938)
+#define GEN12_OAG_OAREPORTTRIG8 _MMIO(0xd93c)
+
/* CECX_0 */
#define OACEC_COMPARE_LESS_OR_EQUAL 6
#define OACEC_COMPARE_NOT_EQUAL 5
@@ -940,6 +1004,10 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define OACEC_SELECT_PREV (1 << 19)
#define OACEC_SELECT_BOOLEAN (2 << 19)
+/* 11-bit array 0: pass-through, 1: negated */
+#define GEN12_OASCEC_NEGATE_MASK 0x7ff
+#define GEN12_OASCEC_NEGATE_SHIFT 21
+
/* CECX_1 */
#define OACEC_MASK_MASK 0xffff
#define OACEC_CONSIDERATIONS_MASK 0xffff
@@ -962,6 +1030,42 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define OACEC7_0 _MMIO(0x27a8)
#define OACEC7_1 _MMIO(0x27ac)
+/* Same layout as CECX_Y */
+#define GEN12_OAG_CEC0_0 _MMIO(0xd940)
+#define GEN12_OAG_CEC0_1 _MMIO(0xd944)
+#define GEN12_OAG_CEC1_0 _MMIO(0xd948)
+#define GEN12_OAG_CEC1_1 _MMIO(0xd94c)
+#define GEN12_OAG_CEC2_0 _MMIO(0xd950)
+#define GEN12_OAG_CEC2_1 _MMIO(0xd954)
+#define GEN12_OAG_CEC3_0 _MMIO(0xd958)
+#define GEN12_OAG_CEC3_1 _MMIO(0xd95c)
+#define GEN12_OAG_CEC4_0 _MMIO(0xd960)
+#define GEN12_OAG_CEC4_1 _MMIO(0xd964)
+#define GEN12_OAG_CEC5_0 _MMIO(0xd968)
+#define GEN12_OAG_CEC5_1 _MMIO(0xd96c)
+#define GEN12_OAG_CEC6_0 _MMIO(0xd970)
+#define GEN12_OAG_CEC6_1 _MMIO(0xd974)
+#define GEN12_OAG_CEC7_0 _MMIO(0xd978)
+#define GEN12_OAG_CEC7_1 _MMIO(0xd97c)
+
+/* Same layout as CECX_Y + negate 11-bit array */
+#define GEN12_OAG_SCEC0_0 _MMIO(0xdc00)
+#define GEN12_OAG_SCEC0_1 _MMIO(0xdc04)
+#define GEN12_OAG_SCEC1_0 _MMIO(0xdc08)
+#define GEN12_OAG_SCEC1_1 _MMIO(0xdc0c)
+#define GEN12_OAG_SCEC2_0 _MMIO(0xdc10)
+#define GEN12_OAG_SCEC2_1 _MMIO(0xdc14)
+#define GEN12_OAG_SCEC3_0 _MMIO(0xdc18)
+#define GEN12_OAG_SCEC3_1 _MMIO(0xdc1c)
+#define GEN12_OAG_SCEC4_0 _MMIO(0xdc20)
+#define GEN12_OAG_SCEC4_1 _MMIO(0xdc24)
+#define GEN12_OAG_SCEC5_0 _MMIO(0xdc28)
+#define GEN12_OAG_SCEC5_1 _MMIO(0xdc2c)
+#define GEN12_OAG_SCEC6_0 _MMIO(0xdc30)
+#define GEN12_OAG_SCEC6_1 _MMIO(0xdc34)
+#define GEN12_OAG_SCEC7_0 _MMIO(0xdc38)
+#define GEN12_OAG_SCEC7_1 _MMIO(0xdc3c)
+
/* OA perf counters */
#define OA_PERFCNT1_LO _MMIO(0x91B8)
#define OA_PERFCNT1_HI _MMIO(0x91BC)
@@ -1042,6 +1146,10 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define MICRO_BP3_COUNT_STATUS23 _MMIO(0x9838)
#define MICRO_BP_FIRED_ARMED _MMIO(0x983C)
+#define GEN12_OAA_DBG_REG _MMIO(0xdc44)
+#define GEN12_OAG_OA_PESS _MMIO(0x2b2c)
+#define GEN12_OAG_SPCTR_CNF _MMIO(0xdc40)
+
#define GDT_CHICKEN_BITS _MMIO(0x9840)
#define GT_NOA_ENABLE 0x00000080
@@ -1962,8 +2070,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define ICL_DPHY_CHKN(port) _MMIO(_ICL_COMBOPHY(port) + _ICL_DPHY_CHKN_REG)
#define ICL_DPHY_CHKN_AFE_OVER_PPI_STRAP REG_BIT(7)
-#define MG_PHY_PORT_LN(ln, port, ln0p1, ln0p2, ln1p1) \
- _MMIO(_PORT((port) - PORT_C, ln0p1, ln0p2) + (ln) * ((ln1p1) - (ln0p1)))
+#define MG_PHY_PORT_LN(ln, tc_port, ln0p1, ln0p2, ln1p1) \
+ _MMIO(_PORT(tc_port, ln0p1, ln0p2) + (ln) * ((ln1p1) - (ln0p1)))
#define MG_TX_LINK_PARAMS_TX1LN0_PORT1 0x16812C
#define MG_TX_LINK_PARAMS_TX1LN1_PORT1 0x16852C
@@ -1973,10 +2081,10 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define MG_TX_LINK_PARAMS_TX1LN1_PORT3 0x16A52C
#define MG_TX_LINK_PARAMS_TX1LN0_PORT4 0x16B12C
#define MG_TX_LINK_PARAMS_TX1LN1_PORT4 0x16B52C
-#define MG_TX1_LINK_PARAMS(ln, port) \
- MG_PHY_PORT_LN(ln, port, MG_TX_LINK_PARAMS_TX1LN0_PORT1, \
- MG_TX_LINK_PARAMS_TX1LN0_PORT2, \
- MG_TX_LINK_PARAMS_TX1LN1_PORT1)
+#define MG_TX1_LINK_PARAMS(ln, tc_port) \
+ MG_PHY_PORT_LN(ln, tc_port, MG_TX_LINK_PARAMS_TX1LN0_PORT1, \
+ MG_TX_LINK_PARAMS_TX1LN0_PORT2, \
+ MG_TX_LINK_PARAMS_TX1LN1_PORT1)
#define MG_TX_LINK_PARAMS_TX2LN0_PORT1 0x1680AC
#define MG_TX_LINK_PARAMS_TX2LN1_PORT1 0x1684AC
@@ -1986,10 +2094,10 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define MG_TX_LINK_PARAMS_TX2LN1_PORT3 0x16A4AC
#define MG_TX_LINK_PARAMS_TX2LN0_PORT4 0x16B0AC
#define MG_TX_LINK_PARAMS_TX2LN1_PORT4 0x16B4AC
-#define MG_TX2_LINK_PARAMS(ln, port) \
- MG_PHY_PORT_LN(ln, port, MG_TX_LINK_PARAMS_TX2LN0_PORT1, \
- MG_TX_LINK_PARAMS_TX2LN0_PORT2, \
- MG_TX_LINK_PARAMS_TX2LN1_PORT1)
+#define MG_TX2_LINK_PARAMS(ln, tc_port) \
+ MG_PHY_PORT_LN(ln, tc_port, MG_TX_LINK_PARAMS_TX2LN0_PORT1, \
+ MG_TX_LINK_PARAMS_TX2LN0_PORT2, \
+ MG_TX_LINK_PARAMS_TX2LN1_PORT1)
#define CRI_USE_FS32 (1 << 5)
#define MG_TX_PISO_READLOAD_TX1LN0_PORT1 0x16814C
@@ -2000,10 +2108,10 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define MG_TX_PISO_READLOAD_TX1LN1_PORT3 0x16A54C
#define MG_TX_PISO_READLOAD_TX1LN0_PORT4 0x16B14C
#define MG_TX_PISO_READLOAD_TX1LN1_PORT4 0x16B54C
-#define MG_TX1_PISO_READLOAD(ln, port) \
- MG_PHY_PORT_LN(ln, port, MG_TX_PISO_READLOAD_TX1LN0_PORT1, \
- MG_TX_PISO_READLOAD_TX1LN0_PORT2, \
- MG_TX_PISO_READLOAD_TX1LN1_PORT1)
+#define MG_TX1_PISO_READLOAD(ln, tc_port) \
+ MG_PHY_PORT_LN(ln, tc_port, MG_TX_PISO_READLOAD_TX1LN0_PORT1, \
+ MG_TX_PISO_READLOAD_TX1LN0_PORT2, \
+ MG_TX_PISO_READLOAD_TX1LN1_PORT1)
#define MG_TX_PISO_READLOAD_TX2LN0_PORT1 0x1680CC
#define MG_TX_PISO_READLOAD_TX2LN1_PORT1 0x1684CC
@@ -2013,10 +2121,10 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define MG_TX_PISO_READLOAD_TX2LN1_PORT3 0x16A4CC
#define MG_TX_PISO_READLOAD_TX2LN0_PORT4 0x16B0CC
#define MG_TX_PISO_READLOAD_TX2LN1_PORT4 0x16B4CC
-#define MG_TX2_PISO_READLOAD(ln, port) \
- MG_PHY_PORT_LN(ln, port, MG_TX_PISO_READLOAD_TX2LN0_PORT1, \
- MG_TX_PISO_READLOAD_TX2LN0_PORT2, \
- MG_TX_PISO_READLOAD_TX2LN1_PORT1)
+#define MG_TX2_PISO_READLOAD(ln, tc_port) \
+ MG_PHY_PORT_LN(ln, tc_port, MG_TX_PISO_READLOAD_TX2LN0_PORT1, \
+ MG_TX_PISO_READLOAD_TX2LN0_PORT2, \
+ MG_TX_PISO_READLOAD_TX2LN1_PORT1)
#define CRI_CALCINIT (1 << 1)
#define MG_TX_SWINGCTRL_TX1LN0_PORT1 0x168148
@@ -2027,10 +2135,10 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define MG_TX_SWINGCTRL_TX1LN1_PORT3 0x16A548
#define MG_TX_SWINGCTRL_TX1LN0_PORT4 0x16B148
#define MG_TX_SWINGCTRL_TX1LN1_PORT4 0x16B548
-#define MG_TX1_SWINGCTRL(ln, port) \
- MG_PHY_PORT_LN(ln, port, MG_TX_SWINGCTRL_TX1LN0_PORT1, \
- MG_TX_SWINGCTRL_TX1LN0_PORT2, \
- MG_TX_SWINGCTRL_TX1LN1_PORT1)
+#define MG_TX1_SWINGCTRL(ln, tc_port) \
+ MG_PHY_PORT_LN(ln, tc_port, MG_TX_SWINGCTRL_TX1LN0_PORT1, \
+ MG_TX_SWINGCTRL_TX1LN0_PORT2, \
+ MG_TX_SWINGCTRL_TX1LN1_PORT1)
#define MG_TX_SWINGCTRL_TX2LN0_PORT1 0x1680C8
#define MG_TX_SWINGCTRL_TX2LN1_PORT1 0x1684C8
@@ -2040,10 +2148,10 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define MG_TX_SWINGCTRL_TX2LN1_PORT3 0x16A4C8
#define MG_TX_SWINGCTRL_TX2LN0_PORT4 0x16B0C8
#define MG_TX_SWINGCTRL_TX2LN1_PORT4 0x16B4C8
-#define MG_TX2_SWINGCTRL(ln, port) \
- MG_PHY_PORT_LN(ln, port, MG_TX_SWINGCTRL_TX2LN0_PORT1, \
- MG_TX_SWINGCTRL_TX2LN0_PORT2, \
- MG_TX_SWINGCTRL_TX2LN1_PORT1)
+#define MG_TX2_SWINGCTRL(ln, tc_port) \
+ MG_PHY_PORT_LN(ln, tc_port, MG_TX_SWINGCTRL_TX2LN0_PORT1, \
+ MG_TX_SWINGCTRL_TX2LN0_PORT2, \
+ MG_TX_SWINGCTRL_TX2LN1_PORT1)
#define CRI_TXDEEMPH_OVERRIDE_17_12(x) ((x) << 0)
#define CRI_TXDEEMPH_OVERRIDE_17_12_MASK (0x3F << 0)
@@ -2055,10 +2163,10 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define MG_TX_DRVCTRL_TX1LN1_TXPORT3 0x16A544
#define MG_TX_DRVCTRL_TX1LN0_TXPORT4 0x16B144
#define MG_TX_DRVCTRL_TX1LN1_TXPORT4 0x16B544
-#define MG_TX1_DRVCTRL(ln, port) \
- MG_PHY_PORT_LN(ln, port, MG_TX_DRVCTRL_TX1LN0_TXPORT1, \
- MG_TX_DRVCTRL_TX1LN0_TXPORT2, \
- MG_TX_DRVCTRL_TX1LN1_TXPORT1)
+#define MG_TX1_DRVCTRL(ln, tc_port) \
+ MG_PHY_PORT_LN(ln, tc_port, MG_TX_DRVCTRL_TX1LN0_TXPORT1, \
+ MG_TX_DRVCTRL_TX1LN0_TXPORT2, \
+ MG_TX_DRVCTRL_TX1LN1_TXPORT1)
#define MG_TX_DRVCTRL_TX2LN0_PORT1 0x1680C4
#define MG_TX_DRVCTRL_TX2LN1_PORT1 0x1684C4
@@ -2068,10 +2176,10 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define MG_TX_DRVCTRL_TX2LN1_PORT3 0x16A4C4
#define MG_TX_DRVCTRL_TX2LN0_PORT4 0x16B0C4
#define MG_TX_DRVCTRL_TX2LN1_PORT4 0x16B4C4
-#define MG_TX2_DRVCTRL(ln, port) \
- MG_PHY_PORT_LN(ln, port, MG_TX_DRVCTRL_TX2LN0_PORT1, \
- MG_TX_DRVCTRL_TX2LN0_PORT2, \
- MG_TX_DRVCTRL_TX2LN1_PORT1)
+#define MG_TX2_DRVCTRL(ln, tc_port) \
+ MG_PHY_PORT_LN(ln, tc_port, MG_TX_DRVCTRL_TX2LN0_PORT1, \
+ MG_TX_DRVCTRL_TX2LN0_PORT2, \
+ MG_TX_DRVCTRL_TX2LN1_PORT1)
#define CRI_TXDEEMPH_OVERRIDE_11_6(x) ((x) << 24)
#define CRI_TXDEEMPH_OVERRIDE_11_6_MASK (0x3F << 24)
#define CRI_TXDEEMPH_OVERRIDE_EN (1 << 22)
@@ -2088,10 +2196,10 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define MG_CLKHUB_LN1_PORT3 0x16A79C
#define MG_CLKHUB_LN0_PORT4 0x16B39C
#define MG_CLKHUB_LN1_PORT4 0x16B79C
-#define MG_CLKHUB(ln, port) \
- MG_PHY_PORT_LN(ln, port, MG_CLKHUB_LN0_PORT1, \
- MG_CLKHUB_LN0_PORT2, \
- MG_CLKHUB_LN1_PORT1)
+#define MG_CLKHUB(ln, tc_port) \
+ MG_PHY_PORT_LN(ln, tc_port, MG_CLKHUB_LN0_PORT1, \
+ MG_CLKHUB_LN0_PORT2, \
+ MG_CLKHUB_LN1_PORT1)
#define CFG_LOW_RATE_LKREN_EN (1 << 11)
#define MG_TX_DCC_TX1LN0_PORT1 0x168110
@@ -2102,10 +2210,10 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define MG_TX_DCC_TX1LN1_PORT3 0x16A510
#define MG_TX_DCC_TX1LN0_PORT4 0x16B110
#define MG_TX_DCC_TX1LN1_PORT4 0x16B510
-#define MG_TX1_DCC(ln, port) \
- MG_PHY_PORT_LN(ln, port, MG_TX_DCC_TX1LN0_PORT1, \
- MG_TX_DCC_TX1LN0_PORT2, \
- MG_TX_DCC_TX1LN1_PORT1)
+#define MG_TX1_DCC(ln, tc_port) \
+ MG_PHY_PORT_LN(ln, tc_port, MG_TX_DCC_TX1LN0_PORT1, \
+ MG_TX_DCC_TX1LN0_PORT2, \
+ MG_TX_DCC_TX1LN1_PORT1)
#define MG_TX_DCC_TX2LN0_PORT1 0x168090
#define MG_TX_DCC_TX2LN1_PORT1 0x168490
#define MG_TX_DCC_TX2LN0_PORT2 0x169090
@@ -2114,10 +2222,10 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define MG_TX_DCC_TX2LN1_PORT3 0x16A490
#define MG_TX_DCC_TX2LN0_PORT4 0x16B090
#define MG_TX_DCC_TX2LN1_PORT4 0x16B490
-#define MG_TX2_DCC(ln, port) \
- MG_PHY_PORT_LN(ln, port, MG_TX_DCC_TX2LN0_PORT1, \
- MG_TX_DCC_TX2LN0_PORT2, \
- MG_TX_DCC_TX2LN1_PORT1)
+#define MG_TX2_DCC(ln, tc_port) \
+ MG_PHY_PORT_LN(ln, tc_port, MG_TX_DCC_TX2LN0_PORT1, \
+ MG_TX_DCC_TX2LN0_PORT2, \
+ MG_TX_DCC_TX2LN1_PORT1)
#define CFG_AMI_CK_DIV_OVERRIDE_VAL(x) ((x) << 25)
#define CFG_AMI_CK_DIV_OVERRIDE_VAL_MASK (0x3 << 25)
#define CFG_AMI_CK_DIV_OVERRIDE_EN (1 << 24)
@@ -2130,10 +2238,10 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define MG_DP_MODE_LN1_ACU_PORT3 0x16A7A0
#define MG_DP_MODE_LN0_ACU_PORT4 0x16B3A0
#define MG_DP_MODE_LN1_ACU_PORT4 0x16B7A0
-#define MG_DP_MODE(ln, port) \
- MG_PHY_PORT_LN(ln, port, MG_DP_MODE_LN0_ACU_PORT1, \
- MG_DP_MODE_LN0_ACU_PORT2, \
- MG_DP_MODE_LN1_ACU_PORT1)
+#define MG_DP_MODE(ln, tc_port) \
+ MG_PHY_PORT_LN(ln, tc_port, MG_DP_MODE_LN0_ACU_PORT1, \
+ MG_DP_MODE_LN0_ACU_PORT2, \
+ MG_DP_MODE_LN1_ACU_PORT1)
#define MG_DP_MODE_CFG_DP_X2_MODE (1 << 7)
#define MG_DP_MODE_CFG_DP_X1_MODE (1 << 6)
#define MG_DP_MODE_CFG_TR2PWR_GATING (1 << 5)
@@ -2172,13 +2280,13 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define _MMIO_FIA(fia, off) _MMIO(_FIA(fia) + (off))
/* ICL PHY DFLEX registers */
-#define PORT_TX_DFLEXDPMLE1(fia) _MMIO_FIA((fia), 0x008C0)
-#define DFLEXDPMLE1_DPMLETC_MASK(tc_port) (0xf << (4 * (tc_port)))
-#define DFLEXDPMLE1_DPMLETC_ML0(tc_port) (1 << (4 * (tc_port)))
-#define DFLEXDPMLE1_DPMLETC_ML1_0(tc_port) (3 << (4 * (tc_port)))
-#define DFLEXDPMLE1_DPMLETC_ML3(tc_port) (8 << (4 * (tc_port)))
-#define DFLEXDPMLE1_DPMLETC_ML3_2(tc_port) (12 << (4 * (tc_port)))
-#define DFLEXDPMLE1_DPMLETC_ML3_0(tc_port) (15 << (4 * (tc_port)))
+#define PORT_TX_DFLEXDPMLE1(fia) _MMIO_FIA((fia), 0x008C0)
+#define DFLEXDPMLE1_DPMLETC_MASK(idx) (0xf << (4 * (idx)))
+#define DFLEXDPMLE1_DPMLETC_ML0(idx) (1 << (4 * (idx)))
+#define DFLEXDPMLE1_DPMLETC_ML1_0(idx) (3 << (4 * (idx)))
+#define DFLEXDPMLE1_DPMLETC_ML3(idx) (8 << (4 * (idx)))
+#define DFLEXDPMLE1_DPMLETC_ML3_2(idx) (12 << (4 * (idx)))
+#define DFLEXDPMLE1_DPMLETC_ML3_0(idx) (15 << (4 * (idx)))
/* BXT PHY Ref registers */
#define _PORT_REF_DW3_A 0x16218C
@@ -2459,6 +2567,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define RING_FAULT_FAULT_TYPE(x) (((x) >> 1) & 0x3)
#define RING_FAULT_VALID (1 << 0)
#define DONE_REG _MMIO(0x40b0)
+#define GEN12_GAM_DONE _MMIO(0xcf68)
#define GEN8_PRIVATE_PAT_LO _MMIO(0x40e0)
#define GEN8_PRIVATE_PAT_HI _MMIO(0x40e0 + 4)
#define GEN10_PAT_INDEX(index) _MMIO(0x40e0 + (index) * 4)
@@ -2489,7 +2598,12 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define RING_WAIT (1 << 11) /* gen3+, PRBx_CTL */
#define RING_WAIT_SEMAPHORE (1 << 10) /* gen6+ */
+/* There are 16 64-bit CS General Purpose Registers per-engine on Gen8+ */
+#define GEN8_RING_CS_GPR(base, n) _MMIO((base) + 0x600 + (n) * 8)
+#define GEN8_RING_CS_GPR_UDW(base, n) _MMIO((base) + 0x600 + (n) * 8 + 4)
+
#define RING_FORCE_TO_NONPRIV(base, i) _MMIO(((base) + 0x4D0) + (i) * 4)
+#define RING_FORCE_TO_NONPRIV_ADDRESS_MASK REG_GENMASK(25, 2)
#define RING_FORCE_TO_NONPRIV_ACCESS_RW (0 << 28) /* CFL+ & Gen11+ */
#define RING_FORCE_TO_NONPRIV_ACCESS_RD (1 << 28)
#define RING_FORCE_TO_NONPRIV_ACCESS_WR (2 << 28)
@@ -2602,6 +2716,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define FAULT_VA_HIGH_BITS (0xf << 0)
#define FAULT_GTT_SEL (1 << 4)
+#define GEN12_AUX_ERR_DBG _MMIO(0x43f4)
+
#define FPGA_DBG _MMIO(0x42300)
#define FPGA_DBG_RM_NOCLAIM (1 << 31)
@@ -2711,6 +2827,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define VLV_GU_CTL0 _MMIO(VLV_DISPLAY_BASE + 0x2030)
#define VLV_GU_CTL1 _MMIO(VLV_DISPLAY_BASE + 0x2034)
#define SCPD0 _MMIO(0x209c) /* 915+ only */
+#define CSTATE_RENDER_CLOCK_GATE_DISABLE (1 << 5)
#define GEN2_IER _MMIO(0x20a0)
#define GEN2_IIR _MMIO(0x20a4)
#define GEN2_IMR _MMIO(0x20a8)
@@ -2884,6 +3001,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define GEN6_RC_SLEEP_PSMI_CONTROL _MMIO(0x2050)
#define GEN6_PSMI_SLEEP_MSG_DISABLE (1 << 0)
+#define GEN12_WAIT_FOR_EVENT_POWER_DOWN_DISABLE REG_BIT(7)
#define GEN8_RC_SEMA_IDLE_MSG_DISABLE (1 << 12)
#define GEN8_FF_DOP_CLOCK_GATE_DISABLE (1 << 10)
@@ -2962,6 +3080,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define GEN11_GT_SUBSLICE_DISABLE _MMIO(0x913C)
+#define GEN12_GT_DSS_ENABLE _MMIO(0x913C)
+
#define GEN6_BSD_SLEEP_PSMI_CONTROL _MMIO(0x12050)
#define GEN6_BSD_SLEEP_MSG_DISABLE (1 << 0)
#define GEN6_BSD_SLEEP_FLUSH_DISABLE (1 << 2)
@@ -3564,6 +3684,9 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define _PALETTE_A 0xa000
#define _PALETTE_B 0xa800
#define _CHV_PALETTE_C 0xc000
+#define PALETTE_RED_MASK REG_GENMASK(23, 16)
+#define PALETTE_GREEN_MASK REG_GENMASK(15, 8)
+#define PALETTE_BLUE_MASK REG_GENMASK(7, 0)
#define PALETTE(pipe, i) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + \
_PICK((pipe), _PALETTE_A, \
_PALETTE_B, _CHV_PALETTE_C) + \
@@ -4044,10 +4167,15 @@ enum {
#define SARBUNIT_CLKGATE_DIS (1 << 5)
#define RCCUNIT_CLKGATE_DIS (1 << 7)
#define MSCUNIT_CLKGATE_DIS (1 << 10)
+#define L3_CLKGATE_DIS REG_BIT(16)
+#define L3_CR2X_CLKGATE_DIS REG_BIT(17)
#define SUBSLICE_UNIT_LEVEL_CLKGATE _MMIO(0x9524)
#define GWUNIT_CLKGATE_DIS (1 << 16)
+#define SUBSLICE_UNIT_LEVEL_CLKGATE2 _MMIO(0x9528)
+#define CPSSUNIT_CLKGATE_DIS REG_BIT(9)
+
#define UNSLICE_UNIT_LEVEL_CLKGATE _MMIO(0x9434)
#define VFUNIT_CLKGATE_DIS (1 << 20)
@@ -4141,6 +4269,7 @@ enum {
#define _VTOTAL_A 0x6000c
#define _VBLANK_A 0x60010
#define _VSYNC_A 0x60014
+#define _EXITLINE_A 0x60018
#define _PIPEASRC 0x6001c
#define _BCLRPAT_A 0x60020
#define _VSYNCSHIFT_A 0x60028
@@ -4192,10 +4321,22 @@ enum {
#define PIPESRC(trans) _MMIO_TRANS2(trans, _PIPEASRC)
#define PIPE_MULT(trans) _MMIO_TRANS2(trans, _PIPE_MULT_A)
-/* HSW+ eDP PSR registers */
-#define HSW_EDP_PSR_BASE 0x64800
-#define BDW_EDP_PSR_BASE 0x6f800
-#define EDP_PSR_CTL _MMIO(dev_priv->psr_mmio_base + 0)
+#define EXITLINE(trans) _MMIO_TRANS2(trans, _EXITLINE_A)
+#define EXITLINE_ENABLE REG_BIT(31)
+#define EXITLINE_MASK REG_GENMASK(12, 0)
+#define EXITLINE_SHIFT 0
+
+/*
+ * HSW+ eDP PSR registers
+ *
+ * HSW PSR registers are relative to DDIA(_DDI_BUF_CTL_A + 0x800) with just one
+ * instance of it
+ */
+#define _HSW_EDP_PSR_BASE 0x64800
+#define _SRD_CTL_A 0x60800
+#define _SRD_CTL_EDP 0x6f800
+#define _PSR_ADJ(tran, reg) (_TRANS2(tran, reg) - dev_priv->hsw_psr_mmio_adjust)
+#define EDP_PSR_CTL(tran) _MMIO(_PSR_ADJ(tran, _SRD_CTL_A))
#define EDP_PSR_ENABLE (1 << 31)
#define BDW_PSR_SINGLE_FRAME (1 << 30)
#define EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK (1 << 29) /* SW can't modify */
@@ -4221,27 +4362,40 @@ enum {
#define EDP_PSR_TP1_TIME_0us (3 << 4)
#define EDP_PSR_IDLE_FRAME_SHIFT 0
-/* Bspec claims those aren't shifted but stay at 0x64800 */
+/*
+ * Until TGL, IMR/IIR are fixed at 0x648xx. On TGL+ those registers are relative
+ * to transcoder and bits defined for each one as if using no shift (i.e. as if
+ * it was for TRANSCODER_EDP)
+ */
#define EDP_PSR_IMR _MMIO(0x64834)
#define EDP_PSR_IIR _MMIO(0x64838)
-#define EDP_PSR_ERROR(shift) (1 << ((shift) + 2))
-#define EDP_PSR_POST_EXIT(shift) (1 << ((shift) + 1))
-#define EDP_PSR_PRE_ENTRY(shift) (1 << (shift))
-#define EDP_PSR_TRANSCODER_C_SHIFT 24
-#define EDP_PSR_TRANSCODER_B_SHIFT 16
-#define EDP_PSR_TRANSCODER_A_SHIFT 8
-#define EDP_PSR_TRANSCODER_EDP_SHIFT 0
-
-#define EDP_PSR_AUX_CTL _MMIO(dev_priv->psr_mmio_base + 0x10)
+#define _PSR_IMR_A 0x60814
+#define _PSR_IIR_A 0x60818
+#define TRANS_PSR_IMR(tran) _MMIO_TRANS2(tran, _PSR_IMR_A)
+#define TRANS_PSR_IIR(tran) _MMIO_TRANS2(tran, _PSR_IIR_A)
+#define _EDP_PSR_TRANS_SHIFT(trans) ((trans) == TRANSCODER_EDP ? \
+ 0 : ((trans) - TRANSCODER_A + 1) * 8)
+#define EDP_PSR_TRANS_MASK(trans) (0x7 << _EDP_PSR_TRANS_SHIFT(trans))
+#define EDP_PSR_ERROR(trans) (0x4 << _EDP_PSR_TRANS_SHIFT(trans))
+#define EDP_PSR_POST_EXIT(trans) (0x2 << _EDP_PSR_TRANS_SHIFT(trans))
+#define EDP_PSR_PRE_ENTRY(trans) (0x1 << _EDP_PSR_TRANS_SHIFT(trans))
+
+#define _SRD_AUX_CTL_A 0x60810
+#define _SRD_AUX_CTL_EDP 0x6f810
+#define EDP_PSR_AUX_CTL(tran) _MMIO(_PSR_ADJ(tran, _SRD_AUX_CTL_A))
#define EDP_PSR_AUX_CTL_TIME_OUT_MASK (3 << 26)
#define EDP_PSR_AUX_CTL_MESSAGE_SIZE_MASK (0x1f << 20)
#define EDP_PSR_AUX_CTL_PRECHARGE_2US_MASK (0xf << 16)
#define EDP_PSR_AUX_CTL_ERROR_INTERRUPT (1 << 11)
#define EDP_PSR_AUX_CTL_BIT_CLOCK_2X_MASK (0x7ff)
-#define EDP_PSR_AUX_DATA(i) _MMIO(dev_priv->psr_mmio_base + 0x14 + (i) * 4) /* 5 registers */
+#define _SRD_AUX_DATA_A 0x60814
+#define _SRD_AUX_DATA_EDP 0x6f814
+#define EDP_PSR_AUX_DATA(tran, i) _MMIO(_PSR_ADJ(tran, _SRD_AUX_DATA_A) + (i) + 4) /* 5 registers */
-#define EDP_PSR_STATUS _MMIO(dev_priv->psr_mmio_base + 0x40)
+#define _SRD_STATUS_A 0x60840
+#define _SRD_STATUS_EDP 0x6f840
+#define EDP_PSR_STATUS(tran) _MMIO(_PSR_ADJ(tran, _SRD_STATUS_A))
#define EDP_PSR_STATUS_STATE_MASK (7 << 29)
#define EDP_PSR_STATUS_STATE_SHIFT 29
#define EDP_PSR_STATUS_STATE_IDLE (0 << 29)
@@ -4266,10 +4420,15 @@ enum {
#define EDP_PSR_STATUS_SENDING_TP1 (1 << 4)
#define EDP_PSR_STATUS_IDLE_MASK 0xf
-#define EDP_PSR_PERF_CNT _MMIO(dev_priv->psr_mmio_base + 0x44)
+#define _SRD_PERF_CNT_A 0x60844
+#define _SRD_PERF_CNT_EDP 0x6f844
+#define EDP_PSR_PERF_CNT(tran) _MMIO(_PSR_ADJ(tran, _SRD_PERF_CNT_A))
#define EDP_PSR_PERF_CNT_MASK 0xffffff
-#define EDP_PSR_DEBUG _MMIO(dev_priv->psr_mmio_base + 0x60) /* PSR_MASK on SKL+ */
+/* PSR_MASK on SKL+ */
+#define _SRD_DEBUG_A 0x60860
+#define _SRD_DEBUG_EDP 0x6f860
+#define EDP_PSR_DEBUG(tran) _MMIO(_PSR_ADJ(tran, _SRD_DEBUG_A))
#define EDP_PSR_DEBUG_MASK_MAX_SLEEP (1 << 28)
#define EDP_PSR_DEBUG_MASK_LPSP (1 << 27)
#define EDP_PSR_DEBUG_MASK_MEMUP (1 << 26)
@@ -4277,7 +4436,9 @@ enum {
#define EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (1 << 16) /* Reserved in ICL+ */
#define EDP_PSR_DEBUG_EXIT_ON_PIXEL_UNDERRUN (1 << 15) /* SKL+ */
-#define EDP_PSR2_CTL _MMIO(0x6f900)
+#define _PSR2_CTL_A 0x60900
+#define _PSR2_CTL_EDP 0x6f900
+#define EDP_PSR2_CTL(tran) _MMIO_TRANS2(tran, _PSR2_CTL_A)
#define EDP_PSR2_ENABLE (1 << 31)
#define EDP_SU_TRACK_ENABLE (1 << 30)
#define EDP_Y_COORDINATE_VALID (1 << 26) /* GLK and CNL+ */
@@ -4299,8 +4460,8 @@ enum {
#define _PSR_EVENT_TRANS_B 0x61848
#define _PSR_EVENT_TRANS_C 0x62848
#define _PSR_EVENT_TRANS_D 0x63848
-#define _PSR_EVENT_TRANS_EDP 0x6F848
-#define PSR_EVENT(trans) _MMIO_TRANS2(trans, _PSR_EVENT_TRANS_A)
+#define _PSR_EVENT_TRANS_EDP 0x6f848
+#define PSR_EVENT(tran) _MMIO_TRANS2(tran, _PSR_EVENT_TRANS_A)
#define PSR_EVENT_PSR2_WD_TIMER_EXPIRE (1 << 17)
#define PSR_EVENT_PSR2_DISABLED (1 << 16)
#define PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN (1 << 15)
@@ -4318,15 +4479,16 @@ enum {
#define PSR_EVENT_LPSP_MODE_EXIT (1 << 1)
#define PSR_EVENT_PSR_DISABLE (1 << 0)
-#define EDP_PSR2_STATUS _MMIO(0x6f940)
+#define _PSR2_STATUS_A 0x60940
+#define _PSR2_STATUS_EDP 0x6f940
+#define EDP_PSR2_STATUS(tran) _MMIO_TRANS2(tran, _PSR2_STATUS_A)
#define EDP_PSR2_STATUS_STATE_MASK (0xf << 28)
#define EDP_PSR2_STATUS_STATE_SHIFT 28
-#define _PSR2_SU_STATUS_0 0x6F914
-#define _PSR2_SU_STATUS_1 0x6F918
-#define _PSR2_SU_STATUS_2 0x6F91C
-#define _PSR2_SU_STATUS(index) _MMIO(_PICK_EVEN((index), _PSR2_SU_STATUS_0, _PSR2_SU_STATUS_1))
-#define PSR2_SU_STATUS(frame) (_PSR2_SU_STATUS((frame) / 3))
+#define _PSR2_SU_STATUS_A 0x60914
+#define _PSR2_SU_STATUS_EDP 0x6f914
+#define _PSR2_SU_STATUS(tran, index) _MMIO(_TRANS2(tran, _PSR2_SU_STATUS_A) + (index) * 4)
+#define PSR2_SU_STATUS(tran, frame) (_PSR2_SU_STATUS(tran, (frame) / 3))
#define PSR2_SU_STATUS_SHIFT(frame) (((frame) % 3) * 10)
#define PSR2_SU_STATUS_MASK(frame) (0x3ff << PSR2_SU_STATUS_SHIFT(frame))
#define PSR2_SU_STATUS_FRAMES 8
@@ -4652,6 +4814,7 @@ enum {
* (Haswell and newer) to see which VIDEO_DIP_DATA byte corresponds to each byte
* of the infoframe structure specified by CEA-861. */
#define VIDEO_DIP_DATA_SIZE 32
+#define VIDEO_DIP_GMP_DATA_SIZE 36
#define VIDEO_DIP_VSC_DATA_SIZE 36
#define VIDEO_DIP_PPS_DATA_SIZE 132
#define VIDEO_DIP_CTL _MMIO(0x61170)
@@ -5488,45 +5651,9 @@ enum {
*/
#define _DPA_AUX_CH_CTL (DISPLAY_MMIO_BASE(dev_priv) + 0x64010)
#define _DPA_AUX_CH_DATA1 (DISPLAY_MMIO_BASE(dev_priv) + 0x64014)
-#define _DPA_AUX_CH_DATA2 (DISPLAY_MMIO_BASE(dev_priv) + 0x64018)
-#define _DPA_AUX_CH_DATA3 (DISPLAY_MMIO_BASE(dev_priv) + 0x6401c)
-#define _DPA_AUX_CH_DATA4 (DISPLAY_MMIO_BASE(dev_priv) + 0x64020)
-#define _DPA_AUX_CH_DATA5 (DISPLAY_MMIO_BASE(dev_priv) + 0x64024)
#define _DPB_AUX_CH_CTL (DISPLAY_MMIO_BASE(dev_priv) + 0x64110)
#define _DPB_AUX_CH_DATA1 (DISPLAY_MMIO_BASE(dev_priv) + 0x64114)
-#define _DPB_AUX_CH_DATA2 (DISPLAY_MMIO_BASE(dev_priv) + 0x64118)
-#define _DPB_AUX_CH_DATA3 (DISPLAY_MMIO_BASE(dev_priv) + 0x6411c)
-#define _DPB_AUX_CH_DATA4 (DISPLAY_MMIO_BASE(dev_priv) + 0x64120)
-#define _DPB_AUX_CH_DATA5 (DISPLAY_MMIO_BASE(dev_priv) + 0x64124)
-
-#define _DPC_AUX_CH_CTL (DISPLAY_MMIO_BASE(dev_priv) + 0x64210)
-#define _DPC_AUX_CH_DATA1 (DISPLAY_MMIO_BASE(dev_priv) + 0x64214)
-#define _DPC_AUX_CH_DATA2 (DISPLAY_MMIO_BASE(dev_priv) + 0x64218)
-#define _DPC_AUX_CH_DATA3 (DISPLAY_MMIO_BASE(dev_priv) + 0x6421c)
-#define _DPC_AUX_CH_DATA4 (DISPLAY_MMIO_BASE(dev_priv) + 0x64220)
-#define _DPC_AUX_CH_DATA5 (DISPLAY_MMIO_BASE(dev_priv) + 0x64224)
-
-#define _DPD_AUX_CH_CTL (DISPLAY_MMIO_BASE(dev_priv) + 0x64310)
-#define _DPD_AUX_CH_DATA1 (DISPLAY_MMIO_BASE(dev_priv) + 0x64314)
-#define _DPD_AUX_CH_DATA2 (DISPLAY_MMIO_BASE(dev_priv) + 0x64318)
-#define _DPD_AUX_CH_DATA3 (DISPLAY_MMIO_BASE(dev_priv) + 0x6431c)
-#define _DPD_AUX_CH_DATA4 (DISPLAY_MMIO_BASE(dev_priv) + 0x64320)
-#define _DPD_AUX_CH_DATA5 (DISPLAY_MMIO_BASE(dev_priv) + 0x64324)
-
-#define _DPE_AUX_CH_CTL (DISPLAY_MMIO_BASE(dev_priv) + 0x64410)
-#define _DPE_AUX_CH_DATA1 (DISPLAY_MMIO_BASE(dev_priv) + 0x64414)
-#define _DPE_AUX_CH_DATA2 (DISPLAY_MMIO_BASE(dev_priv) + 0x64418)
-#define _DPE_AUX_CH_DATA3 (DISPLAY_MMIO_BASE(dev_priv) + 0x6441c)
-#define _DPE_AUX_CH_DATA4 (DISPLAY_MMIO_BASE(dev_priv) + 0x64420)
-#define _DPE_AUX_CH_DATA5 (DISPLAY_MMIO_BASE(dev_priv) + 0x64424)
-
-#define _DPF_AUX_CH_CTL (DISPLAY_MMIO_BASE(dev_priv) + 0x64510)
-#define _DPF_AUX_CH_DATA1 (DISPLAY_MMIO_BASE(dev_priv) + 0x64514)
-#define _DPF_AUX_CH_DATA2 (DISPLAY_MMIO_BASE(dev_priv) + 0x64518)
-#define _DPF_AUX_CH_DATA3 (DISPLAY_MMIO_BASE(dev_priv) + 0x6451c)
-#define _DPF_AUX_CH_DATA4 (DISPLAY_MMIO_BASE(dev_priv) + 0x64520)
-#define _DPF_AUX_CH_DATA5 (DISPLAY_MMIO_BASE(dev_priv) + 0x64524)
#define DP_AUX_CH_CTL(aux_ch) _MMIO_PORT(aux_ch, _DPA_AUX_CH_CTL, _DPB_AUX_CH_CTL)
#define DP_AUX_CH_DATA(aux_ch, i) _MMIO(_PORT(aux_ch, _DPA_AUX_CH_DATA1, _DPB_AUX_CH_DATA1) + (i) * 4) /* 5 registers */
@@ -5658,6 +5785,11 @@ enum {
#define PIPECONF_CXSR_DOWNCLOCK (1 << 16)
#define PIPECONF_EDP_RR_MODE_SWITCH_VLV (1 << 14)
#define PIPECONF_COLOR_RANGE_SELECT (1 << 13)
+#define PIPECONF_OUTPUT_COLORSPACE_MASK (3 << 11) /* ilk-ivb */
+#define PIPECONF_OUTPUT_COLORSPACE_RGB (0 << 11) /* ilk-ivb */
+#define PIPECONF_OUTPUT_COLORSPACE_YUV601 (1 << 11) /* ilk-ivb */
+#define PIPECONF_OUTPUT_COLORSPACE_YUV709 (2 << 11) /* ilk-ivb */
+#define PIPECONF_OUTPUT_COLORSPACE_YUV_HSW (1 << 11) /* hsw only */
#define PIPECONF_BPC_MASK (0x7 << 5)
#define PIPECONF_8BPC (0 << 5)
#define PIPECONF_10BPC (1 << 5)
@@ -5745,12 +5877,13 @@ enum {
#define _PIPEAGCMAX 0x70010
#define _PIPEBGCMAX 0x71010
+#define PIPEGCMAX_RGB_MASK REG_GENMASK(15, 0)
#define PIPEGCMAX(pipe, i) _MMIO_PIPE2(pipe, _PIPEAGCMAX + (i) * 4)
#define _PIPE_MISC_A 0x70030
#define _PIPE_MISC_B 0x71030
-#define PIPEMISC_YUV420_ENABLE (1 << 27)
-#define PIPEMISC_YUV420_MODE_FULL_BLEND (1 << 26)
+#define PIPEMISC_YUV420_ENABLE (1 << 27) /* glk+ */
+#define PIPEMISC_YUV420_MODE_FULL_BLEND (1 << 26) /* glk+ */
#define PIPEMISC_HDR_MODE_PRECISION (1 << 23) /* icl+ */
#define PIPEMISC_OUTPUT_COLORSPACE_YUV (1 << 11)
#define PIPEMISC_DITHER_BPC_MASK (7 << 5)
@@ -6207,6 +6340,7 @@ enum {
#define CHV_CURSOR_C_OFFSET 0x700e0
#define IVB_CURSOR_B_OFFSET 0x71080
#define IVB_CURSOR_C_OFFSET 0x72080
+#define TGL_CURSOR_D_OFFSET 0x73080
/* Display A control */
#define _DSPACNTR 0x70180
@@ -7177,11 +7311,17 @@ enum {
/* legacy palette */
#define _LGC_PALETTE_A 0x4a000
#define _LGC_PALETTE_B 0x4a800
+#define LGC_PALETTE_RED_MASK REG_GENMASK(23, 16)
+#define LGC_PALETTE_GREEN_MASK REG_GENMASK(15, 8)
+#define LGC_PALETTE_BLUE_MASK REG_GENMASK(7, 0)
#define LGC_PALETTE(pipe, i) _MMIO(_PIPE(pipe, _LGC_PALETTE_A, _LGC_PALETTE_B) + (i) * 4)
/* ilk/snb precision palette */
#define _PREC_PALETTE_A 0x4b000
#define _PREC_PALETTE_B 0x4c000
+#define PREC_PALETTE_RED_MASK REG_GENMASK(29, 20)
+#define PREC_PALETTE_GREEN_MASK REG_GENMASK(19, 10)
+#define PREC_PALETTE_BLUE_MASK REG_GENMASK(9, 0)
#define PREC_PALETTE(pipe, i) _MMIO(_PIPE(pipe, _PREC_PALETTE_A, _PREC_PALETTE_B) + (i) * 4)
#define _PREC_PIPEAGCMAX 0x4d000
@@ -7217,6 +7357,8 @@ enum {
#define TGL_DMC_DEBUG_DC5_COUNT _MMIO(0x101084)
#define TGL_DMC_DEBUG_DC6_COUNT _MMIO(0x101088)
+#define DMC_DEBUG3 _MMIO(0x101090)
+
/* Display Internal Timeout Register */
#define RM_TIMEOUT _MMIO(0x42060)
#define MMIO_TIMEOUT_US(us) ((us) << 0)
@@ -7332,6 +7474,9 @@ enum {
#define GEN8_PIPE_VSYNC (1 << 1)
#define GEN8_PIPE_VBLANK (1 << 0)
#define GEN9_PIPE_CURSOR_FAULT (1 << 11)
+#define GEN11_PIPE_PLANE7_FAULT (1 << 22)
+#define GEN11_PIPE_PLANE6_FAULT (1 << 21)
+#define GEN11_PIPE_PLANE5_FAULT (1 << 20)
#define GEN9_PIPE_PLANE4_FAULT (1 << 10)
#define GEN9_PIPE_PLANE3_FAULT (1 << 9)
#define GEN9_PIPE_PLANE2_FAULT (1 << 8)
@@ -7351,6 +7496,11 @@ enum {
GEN9_PIPE_PLANE3_FAULT | \
GEN9_PIPE_PLANE2_FAULT | \
GEN9_PIPE_PLANE1_FAULT)
+#define GEN11_DE_PIPE_IRQ_FAULT_ERRORS \
+ (GEN9_DE_PIPE_IRQ_FAULT_ERRORS | \
+ GEN11_PIPE_PLANE7_FAULT | \
+ GEN11_PIPE_PLANE6_FAULT | \
+ GEN11_PIPE_PLANE5_FAULT)
#define GEN8_DE_PORT_ISR _MMIO(0x44440)
#define GEN8_DE_PORT_IMR _MMIO(0x44444)
@@ -7370,6 +7520,12 @@ enum {
#define GEN8_PORT_DP_A_HOTPLUG (1 << 3)
#define BXT_DE_PORT_GMBUS (1 << 1)
#define GEN8_AUX_CHANNEL_A (1 << 0)
+#define TGL_DE_PORT_AUX_USBC6 (1 << 13)
+#define TGL_DE_PORT_AUX_USBC5 (1 << 12)
+#define TGL_DE_PORT_AUX_USBC4 (1 << 11)
+#define TGL_DE_PORT_AUX_USBC3 (1 << 10)
+#define TGL_DE_PORT_AUX_USBC2 (1 << 9)
+#define TGL_DE_PORT_AUX_USBC1 (1 << 8)
#define TGL_DE_PORT_AUX_DDIC (1 << 2)
#define TGL_DE_PORT_AUX_DDIB (1 << 1)
#define TGL_DE_PORT_AUX_DDIA (1 << 0)
@@ -7558,10 +7714,17 @@ enum {
#define BDW_DPRS_MASK_VBLANK_SRD (1 << 0)
#define CHICKEN_PIPESL_1(pipe) _MMIO_PIPE(pipe, _CHICKEN_PIPESL_1_A, _CHICKEN_PIPESL_1_B)
-#define CHICKEN_TRANS_A _MMIO(0x420c0)
-#define CHICKEN_TRANS_B _MMIO(0x420c4)
-#define CHICKEN_TRANS_C _MMIO(0x420c8)
-#define CHICKEN_TRANS_EDP _MMIO(0x420cc)
+#define _CHICKEN_TRANS_A 0x420c0
+#define _CHICKEN_TRANS_B 0x420c4
+#define _CHICKEN_TRANS_C 0x420c8
+#define _CHICKEN_TRANS_EDP 0x420cc
+#define _CHICKEN_TRANS_D 0x420d8
+#define CHICKEN_TRANS(trans) _MMIO(_PICK((trans), \
+ [TRANSCODER_EDP] = _CHICKEN_TRANS_EDP, \
+ [TRANSCODER_A] = _CHICKEN_TRANS_A, \
+ [TRANSCODER_B] = _CHICKEN_TRANS_B, \
+ [TRANSCODER_C] = _CHICKEN_TRANS_C, \
+ [TRANSCODER_D] = _CHICKEN_TRANS_D))
#define VSC_DATA_SEL_SOFTWARE_CONTROL (1 << 25) /* GLK and CNL+ */
#define DDI_TRAINING_OVERRIDE_ENABLE (1 << 19)
#define DDI_TRAINING_OVERRIDE_VALUE (1 << 18)
@@ -7594,15 +7757,19 @@ enum {
#define CNL_DDI_CLOCK_REG_ACCESS_ON (1 << 7)
#define SKL_DFSM _MMIO(0x51000)
-#define SKL_DFSM_CDCLK_LIMIT_MASK (3 << 23)
-#define SKL_DFSM_CDCLK_LIMIT_675 (0 << 23)
-#define SKL_DFSM_CDCLK_LIMIT_540 (1 << 23)
-#define SKL_DFSM_CDCLK_LIMIT_450 (2 << 23)
-#define SKL_DFSM_CDCLK_LIMIT_337_5 (3 << 23)
-#define SKL_DFSM_PIPE_A_DISABLE (1 << 30)
-#define SKL_DFSM_PIPE_B_DISABLE (1 << 21)
-#define SKL_DFSM_PIPE_C_DISABLE (1 << 28)
-#define TGL_DFSM_PIPE_D_DISABLE (1 << 22)
+#define SKL_DFSM_DISPLAY_PM_DISABLE (1 << 27)
+#define SKL_DFSM_DISPLAY_HDCP_DISABLE (1 << 25)
+#define SKL_DFSM_CDCLK_LIMIT_MASK (3 << 23)
+#define SKL_DFSM_CDCLK_LIMIT_675 (0 << 23)
+#define SKL_DFSM_CDCLK_LIMIT_540 (1 << 23)
+#define SKL_DFSM_CDCLK_LIMIT_450 (2 << 23)
+#define SKL_DFSM_CDCLK_LIMIT_337_5 (3 << 23)
+#define ICL_DFSM_DMC_DISABLE (1 << 23)
+#define SKL_DFSM_PIPE_A_DISABLE (1 << 30)
+#define SKL_DFSM_PIPE_B_DISABLE (1 << 21)
+#define SKL_DFSM_PIPE_C_DISABLE (1 << 28)
+#define TGL_DFSM_PIPE_D_DISABLE (1 << 22)
+#define CNL_DFSM_DISPLAY_DSC_DISABLE (1 << 7)
#define SKL_DSSM _MMIO(0x51004)
#define CNL_DSSM_CDCLK_PLL_REFCLK_24MHz (1 << 31)
@@ -7619,7 +7786,10 @@ enum {
#define GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE (1 << 10)
#define GEN9_CS_DEBUG_MODE1 _MMIO(0x20ec)
+#define FF_DOP_CLOCK_GATE_DISABLE REG_BIT(1)
#define GEN9_CTX_PREEMPT_REG _MMIO(0x2248)
+#define GEN12_DISABLE_POSH_BUSY_FF_DOP_CG REG_BIT(11)
+
#define GEN8_CS_CHICKEN1 _MMIO(0x2580)
#define GEN9_PREEMPT_3D_OBJECT_LEVEL (1 << 0)
#define GEN9_PREEMPT_GPGPU_LEVEL(hi, lo) (((hi) << 2) | ((lo) << 1))
@@ -7644,6 +7814,7 @@ enum {
#define GEN11_COMMON_SLICE_CHICKEN3 _MMIO(0x7304)
#define GEN11_BLEND_EMB_FIX_DISABLE_IN_RCC (1 << 11)
+ #define GEN12_DISABLE_CPS_AWARE_COLOR_PIPE (1 << 9)
#define HIZ_CHICKEN _MMIO(0x7018)
# define CHV_HZ_8X8_MODE_IN_1X (1 << 15)
@@ -7828,29 +7999,24 @@ enum {
SDE_FDI_RXA_CPT)
/* south display engine interrupt: ICP/TGP */
-#define SDE_TC6_HOTPLUG_TGP (1 << 29)
-#define SDE_TC5_HOTPLUG_TGP (1 << 28)
-#define SDE_TC4_HOTPLUG_ICP (1 << 27)
-#define SDE_TC3_HOTPLUG_ICP (1 << 26)
-#define SDE_TC2_HOTPLUG_ICP (1 << 25)
-#define SDE_TC1_HOTPLUG_ICP (1 << 24)
#define SDE_GMBUS_ICP (1 << 23)
-#define SDE_DDIC_HOTPLUG_TGP (1 << 18)
-#define SDE_DDIB_HOTPLUG_ICP (1 << 17)
-#define SDE_DDIA_HOTPLUG_ICP (1 << 16)
#define SDE_TC_HOTPLUG_ICP(tc_port) (1 << ((tc_port) + 24))
#define SDE_DDI_HOTPLUG_ICP(port) (1 << ((port) + 16))
-#define SDE_DDI_MASK_ICP (SDE_DDIB_HOTPLUG_ICP | \
- SDE_DDIA_HOTPLUG_ICP)
-#define SDE_TC_MASK_ICP (SDE_TC4_HOTPLUG_ICP | \
- SDE_TC3_HOTPLUG_ICP | \
- SDE_TC2_HOTPLUG_ICP | \
- SDE_TC1_HOTPLUG_ICP)
-#define SDE_DDI_MASK_TGP (SDE_DDIC_HOTPLUG_TGP | \
- SDE_DDI_MASK_ICP)
-#define SDE_TC_MASK_TGP (SDE_TC6_HOTPLUG_TGP | \
- SDE_TC5_HOTPLUG_TGP | \
- SDE_TC_MASK_ICP)
+#define SDE_DDI_MASK_ICP (SDE_DDI_HOTPLUG_ICP(PORT_B) | \
+ SDE_DDI_HOTPLUG_ICP(PORT_A))
+#define SDE_TC_MASK_ICP (SDE_TC_HOTPLUG_ICP(PORT_TC4) | \
+ SDE_TC_HOTPLUG_ICP(PORT_TC3) | \
+ SDE_TC_HOTPLUG_ICP(PORT_TC2) | \
+ SDE_TC_HOTPLUG_ICP(PORT_TC1))
+#define SDE_DDI_MASK_TGP (SDE_DDI_HOTPLUG_ICP(PORT_C) | \
+ SDE_DDI_HOTPLUG_ICP(PORT_B) | \
+ SDE_DDI_HOTPLUG_ICP(PORT_A))
+#define SDE_TC_MASK_TGP (SDE_TC_HOTPLUG_ICP(PORT_TC6) | \
+ SDE_TC_HOTPLUG_ICP(PORT_TC5) | \
+ SDE_TC_HOTPLUG_ICP(PORT_TC4) | \
+ SDE_TC_HOTPLUG_ICP(PORT_TC3) | \
+ SDE_TC_HOTPLUG_ICP(PORT_TC2) | \
+ SDE_TC_HOTPLUG_ICP(PORT_TC1))
#define SDEISR _MMIO(0xc4000)
#define SDEIMR _MMIO(0xc4004)
@@ -7917,26 +8083,13 @@ enum {
* SHOTPLUG_CTL_DDI and SHOTPLUG_CTL_TC.
*/
-#define SHOTPLUG_CTL_DDI _MMIO(0xc4030)
-#define TGP_DDIC_HPD_ENABLE (1 << 11)
-#define TGP_DDIC_HPD_STATUS_MASK (3 << 8)
-#define TGP_DDIC_HPD_NO_DETECT (0 << 8)
-#define TGP_DDIC_HPD_SHORT_DETECT (1 << 8)
-#define TGP_DDIC_HPD_LONG_DETECT (2 << 8)
-#define TGP_DDIC_HPD_SHORT_LONG_DETECT (3 << 8)
-#define ICP_DDIB_HPD_ENABLE (1 << 7)
-#define ICP_DDIB_HPD_STATUS_MASK (3 << 4)
-#define ICP_DDIB_HPD_NO_DETECT (0 << 4)
-#define ICP_DDIB_HPD_SHORT_DETECT (1 << 4)
-#define ICP_DDIB_HPD_LONG_DETECT (2 << 4)
-#define ICP_DDIB_HPD_SHORT_LONG_DETECT (3 << 4)
-#define ICP_DDIA_HPD_ENABLE (1 << 3)
-#define ICP_DDIA_HPD_OP_DRIVE_1 (1 << 2)
-#define ICP_DDIA_HPD_STATUS_MASK (3 << 0)
-#define ICP_DDIA_HPD_NO_DETECT (0 << 0)
-#define ICP_DDIA_HPD_SHORT_DETECT (1 << 0)
-#define ICP_DDIA_HPD_LONG_DETECT (2 << 0)
-#define ICP_DDIA_HPD_SHORT_LONG_DETECT (3 << 0)
+#define SHOTPLUG_CTL_DDI _MMIO(0xc4030)
+#define SHOTPLUG_CTL_DDI_HPD_ENABLE(port) (0x8 << (4 * (port)))
+#define SHOTPLUG_CTL_DDI_HPD_STATUS_MASK(port) (0x3 << (4 * (port)))
+#define SHOTPLUG_CTL_DDI_HPD_NO_DETECT(port) (0x0 << (4 * (port)))
+#define SHOTPLUG_CTL_DDI_HPD_SHORT_DETECT(port) (0x1 << (4 * (port)))
+#define SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(port) (0x2 << (4 * (port)))
+#define SHOTPLUG_CTL_DDI_HPD_SHORT_LONG_DETECT(port) (0x3 << (4 * (port)))
#define SHOTPLUG_CTL_TC _MMIO(0xc4034)
#define ICP_TC_HPD_ENABLE(tc_port) (8 << (tc_port) * 4)
@@ -8047,14 +8200,15 @@ enum {
#define ICP_TC_HPD_LONG_DETECT(tc_port) (2 << (tc_port) * 4)
#define ICP_TC_HPD_SHORT_DETECT(tc_port) (1 << (tc_port) * 4)
-#define ICP_DDI_HPD_ENABLE_MASK (ICP_DDIB_HPD_ENABLE | \
- ICP_DDIA_HPD_ENABLE)
+#define ICP_DDI_HPD_ENABLE_MASK (SHOTPLUG_CTL_DDI_HPD_ENABLE(PORT_B) | \
+ SHOTPLUG_CTL_DDI_HPD_ENABLE(PORT_A))
#define ICP_TC_HPD_ENABLE_MASK (ICP_TC_HPD_ENABLE(PORT_TC4) | \
ICP_TC_HPD_ENABLE(PORT_TC3) | \
ICP_TC_HPD_ENABLE(PORT_TC2) | \
ICP_TC_HPD_ENABLE(PORT_TC1))
-#define TGP_DDI_HPD_ENABLE_MASK (TGP_DDIC_HPD_ENABLE | \
- ICP_DDI_HPD_ENABLE_MASK)
+#define TGP_DDI_HPD_ENABLE_MASK (SHOTPLUG_CTL_DDI_HPD_ENABLE(PORT_C) | \
+ SHOTPLUG_CTL_DDI_HPD_ENABLE(PORT_B) | \
+ SHOTPLUG_CTL_DDI_HPD_ENABLE(PORT_A))
#define TGP_TC_HPD_ENABLE_MASK (ICP_TC_HPD_ENABLE(PORT_TC6) | \
ICP_TC_HPD_ENABLE(PORT_TC5) | \
ICP_TC_HPD_ENABLE_MASK)
@@ -8604,6 +8758,10 @@ enum {
#define GEN9_PWRGT_MEDIA_STATUS_MASK (1 << 0)
#define GEN9_PWRGT_RENDER_STATUS_MASK (1 << 1)
+#define POWERGATE_ENABLE _MMIO(0xa210)
+#define VDN_HCP_POWERGATE_ENABLE(n) BIT(((n) * 2) + 3)
+#define VDN_MFX_POWERGATE_ENABLE(n) BIT(((n) * 2) + 4)
+
#define GTFIFODBG _MMIO(0x120000)
#define GT_FIFO_SBDEDICATE_FREE_ENTRY_CHV (0x1f << 20)
#define GT_FIFO_FREE_ENTRIES_CHV (0x7f << 13)
@@ -8841,6 +8999,7 @@ enum {
#define GEN9_SAGV_DISABLE 0x0
#define GEN9_SAGV_IS_DISABLED 0x1
#define GEN9_SAGV_ENABLE 0x3
+#define GEN12_PCODE_READ_SAGV_BLOCK_TIME_US 0x23
#define GEN6_PCODE_DATA _MMIO(0x138128)
#define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8
#define GEN6_PCODE_FREQ_RING_RATIO_SHIFT 16
@@ -9104,6 +9263,10 @@ enum {
#define HSW_AUD_CHICKENBIT _MMIO(0x65f10)
#define SKL_AUD_CODEC_WAKE_SIGNAL (1 << 15)
+#define AUD_FREQ_CNTRL _MMIO(0x65900)
+#define AUD_PIN_BUF_CTL _MMIO(0x48414)
+#define AUD_PIN_BUF_ENABLE REG_BIT(31)
+
/*
* HSW - ICL power wells
*
@@ -9266,12 +9429,20 @@ enum skl_power_gate {
/* HDCP Repeater Registers */
#define HDCP_REP_CTL _MMIO(0x66d00)
+#define HDCP_TRANSA_REP_PRESENT BIT(31)
+#define HDCP_TRANSB_REP_PRESENT BIT(30)
+#define HDCP_TRANSC_REP_PRESENT BIT(29)
+#define HDCP_TRANSD_REP_PRESENT BIT(28)
#define HDCP_DDIB_REP_PRESENT BIT(30)
#define HDCP_DDIA_REP_PRESENT BIT(29)
#define HDCP_DDIC_REP_PRESENT BIT(28)
#define HDCP_DDID_REP_PRESENT BIT(27)
#define HDCP_DDIF_REP_PRESENT BIT(26)
#define HDCP_DDIE_REP_PRESENT BIT(25)
+#define HDCP_TRANSA_SHA1_M0 (1 << 20)
+#define HDCP_TRANSB_SHA1_M0 (2 << 20)
+#define HDCP_TRANSC_SHA1_M0 (3 << 20)
+#define HDCP_TRANSD_SHA1_M0 (4 << 20)
#define HDCP_DDIB_SHA1_M0 (1 << 20)
#define HDCP_DDIA_SHA1_M0 (2 << 20)
#define HDCP_DDIC_SHA1_M0 (3 << 20)
@@ -9311,15 +9482,92 @@ enum skl_power_gate {
_PORTE_HDCP_AUTHENC, \
_PORTF_HDCP_AUTHENC) + (x))
#define PORT_HDCP_CONF(port) _PORT_HDCP_AUTHENC(port, 0x0)
+#define _TRANSA_HDCP_CONF 0x66400
+#define _TRANSB_HDCP_CONF 0x66500
+#define TRANS_HDCP_CONF(trans) _MMIO_TRANS(trans, _TRANSA_HDCP_CONF, \
+ _TRANSB_HDCP_CONF)
+#define HDCP_CONF(dev_priv, trans, port) \
+ (INTEL_GEN(dev_priv) >= 12 ? \
+ TRANS_HDCP_CONF(trans) : \
+ PORT_HDCP_CONF(port))
+
#define HDCP_CONF_CAPTURE_AN BIT(0)
#define HDCP_CONF_AUTH_AND_ENC (BIT(1) | BIT(0))
#define PORT_HDCP_ANINIT(port) _PORT_HDCP_AUTHENC(port, 0x4)
+#define _TRANSA_HDCP_ANINIT 0x66404
+#define _TRANSB_HDCP_ANINIT 0x66504
+#define TRANS_HDCP_ANINIT(trans) _MMIO_TRANS(trans, \
+ _TRANSA_HDCP_ANINIT, \
+ _TRANSB_HDCP_ANINIT)
+#define HDCP_ANINIT(dev_priv, trans, port) \
+ (INTEL_GEN(dev_priv) >= 12 ? \
+ TRANS_HDCP_ANINIT(trans) : \
+ PORT_HDCP_ANINIT(port))
+
#define PORT_HDCP_ANLO(port) _PORT_HDCP_AUTHENC(port, 0x8)
+#define _TRANSA_HDCP_ANLO 0x66408
+#define _TRANSB_HDCP_ANLO 0x66508
+#define TRANS_HDCP_ANLO(trans) _MMIO_TRANS(trans, _TRANSA_HDCP_ANLO, \
+ _TRANSB_HDCP_ANLO)
+#define HDCP_ANLO(dev_priv, trans, port) \
+ (INTEL_GEN(dev_priv) >= 12 ? \
+ TRANS_HDCP_ANLO(trans) : \
+ PORT_HDCP_ANLO(port))
+
#define PORT_HDCP_ANHI(port) _PORT_HDCP_AUTHENC(port, 0xC)
+#define _TRANSA_HDCP_ANHI 0x6640C
+#define _TRANSB_HDCP_ANHI 0x6650C
+#define TRANS_HDCP_ANHI(trans) _MMIO_TRANS(trans, _TRANSA_HDCP_ANHI, \
+ _TRANSB_HDCP_ANHI)
+#define HDCP_ANHI(dev_priv, trans, port) \
+ (INTEL_GEN(dev_priv) >= 12 ? \
+ TRANS_HDCP_ANHI(trans) : \
+ PORT_HDCP_ANHI(port))
+
#define PORT_HDCP_BKSVLO(port) _PORT_HDCP_AUTHENC(port, 0x10)
+#define _TRANSA_HDCP_BKSVLO 0x66410
+#define _TRANSB_HDCP_BKSVLO 0x66510
+#define TRANS_HDCP_BKSVLO(trans) _MMIO_TRANS(trans, \
+ _TRANSA_HDCP_BKSVLO, \
+ _TRANSB_HDCP_BKSVLO)
+#define HDCP_BKSVLO(dev_priv, trans, port) \
+ (INTEL_GEN(dev_priv) >= 12 ? \
+ TRANS_HDCP_BKSVLO(trans) : \
+ PORT_HDCP_BKSVLO(port))
+
#define PORT_HDCP_BKSVHI(port) _PORT_HDCP_AUTHENC(port, 0x14)
+#define _TRANSA_HDCP_BKSVHI 0x66414
+#define _TRANSB_HDCP_BKSVHI 0x66514
+#define TRANS_HDCP_BKSVHI(trans) _MMIO_TRANS(trans, \
+ _TRANSA_HDCP_BKSVHI, \
+ _TRANSB_HDCP_BKSVHI)
+#define HDCP_BKSVHI(dev_priv, trans, port) \
+ (INTEL_GEN(dev_priv) >= 12 ? \
+ TRANS_HDCP_BKSVHI(trans) : \
+ PORT_HDCP_BKSVHI(port))
+
#define PORT_HDCP_RPRIME(port) _PORT_HDCP_AUTHENC(port, 0x18)
+#define _TRANSA_HDCP_RPRIME 0x66418
+#define _TRANSB_HDCP_RPRIME 0x66518
+#define TRANS_HDCP_RPRIME(trans) _MMIO_TRANS(trans, \
+ _TRANSA_HDCP_RPRIME, \
+ _TRANSB_HDCP_RPRIME)
+#define HDCP_RPRIME(dev_priv, trans, port) \
+ (INTEL_GEN(dev_priv) >= 12 ? \
+ TRANS_HDCP_RPRIME(trans) : \
+ PORT_HDCP_RPRIME(port))
+
#define PORT_HDCP_STATUS(port) _PORT_HDCP_AUTHENC(port, 0x1C)
+#define _TRANSA_HDCP_STATUS 0x6641C
+#define _TRANSB_HDCP_STATUS 0x6651C
+#define TRANS_HDCP_STATUS(trans) _MMIO_TRANS(trans, \
+ _TRANSA_HDCP_STATUS, \
+ _TRANSB_HDCP_STATUS)
+#define HDCP_STATUS(dev_priv, trans, port) \
+ (INTEL_GEN(dev_priv) >= 12 ? \
+ TRANS_HDCP_STATUS(trans) : \
+ PORT_HDCP_STATUS(port))
+
#define HDCP_STATUS_STREAM_A_ENC BIT(31)
#define HDCP_STATUS_STREAM_B_ENC BIT(30)
#define HDCP_STATUS_STREAM_C_ENC BIT(29)
@@ -9346,23 +9594,44 @@ enum skl_power_gate {
_PORTD_HDCP2_BASE, \
_PORTE_HDCP2_BASE, \
_PORTF_HDCP2_BASE) + (x))
-
-#define HDCP2_AUTH_DDI(port) _PORT_HDCP2_BASE(port, 0x98)
+#define PORT_HDCP2_AUTH(port) _PORT_HDCP2_BASE(port, 0x98)
+#define _TRANSA_HDCP2_AUTH 0x66498
+#define _TRANSB_HDCP2_AUTH 0x66598
+#define TRANS_HDCP2_AUTH(trans) _MMIO_TRANS(trans, _TRANSA_HDCP2_AUTH, \
+ _TRANSB_HDCP2_AUTH)
#define AUTH_LINK_AUTHENTICATED BIT(31)
#define AUTH_LINK_TYPE BIT(30)
#define AUTH_FORCE_CLR_INPUTCTR BIT(19)
#define AUTH_CLR_KEYS BIT(18)
-
-#define HDCP2_CTL_DDI(port) _PORT_HDCP2_BASE(port, 0xB0)
+#define HDCP2_AUTH(dev_priv, trans, port) \
+ (INTEL_GEN(dev_priv) >= 12 ? \
+ TRANS_HDCP2_AUTH(trans) : \
+ PORT_HDCP2_AUTH(port))
+
+#define PORT_HDCP2_CTL(port) _PORT_HDCP2_BASE(port, 0xB0)
+#define _TRANSA_HDCP2_CTL 0x664B0
+#define _TRANSB_HDCP2_CTL 0x665B0
+#define TRANS_HDCP2_CTL(trans) _MMIO_TRANS(trans, _TRANSA_HDCP2_CTL, \
+ _TRANSB_HDCP2_CTL)
#define CTL_LINK_ENCRYPTION_REQ BIT(31)
-
-#define HDCP2_STATUS_DDI(port) _PORT_HDCP2_BASE(port, 0xB4)
-#define STREAM_ENCRYPTION_STATUS_A BIT(31)
-#define STREAM_ENCRYPTION_STATUS_B BIT(30)
-#define STREAM_ENCRYPTION_STATUS_C BIT(29)
+#define HDCP2_CTL(dev_priv, trans, port) \
+ (INTEL_GEN(dev_priv) >= 12 ? \
+ TRANS_HDCP2_CTL(trans) : \
+ PORT_HDCP2_CTL(port))
+
+#define PORT_HDCP2_STATUS(port) _PORT_HDCP2_BASE(port, 0xB4)
+#define _TRANSA_HDCP2_STATUS 0x664B4
+#define _TRANSB_HDCP2_STATUS 0x665B4
+#define TRANS_HDCP2_STATUS(trans) _MMIO_TRANS(trans, \
+ _TRANSA_HDCP2_STATUS, \
+ _TRANSB_HDCP2_STATUS)
#define LINK_TYPE_STATUS BIT(22)
#define LINK_AUTH_STATUS BIT(21)
#define LINK_ENCRYPTION_STATUS BIT(20)
+#define HDCP2_STATUS(dev_priv, trans, port) \
+ (INTEL_GEN(dev_priv) >= 12 ? \
+ TRANS_HDCP2_STATUS(trans) : \
+ PORT_HDCP2_STATUS(port))
/* Per-pipe DDI Function Control */
#define _TRANS_DDI_FUNC_CTL_A 0x60400
@@ -9402,6 +9671,9 @@ enum skl_power_gate {
#define TRANS_DDI_EDP_INPUT_A_ONOFF (4 << 12)
#define TRANS_DDI_EDP_INPUT_B_ONOFF (5 << 12)
#define TRANS_DDI_EDP_INPUT_C_ONOFF (6 << 12)
+#define TRANS_DDI_MST_TRANSPORT_SELECT_MASK REG_GENMASK(11, 10)
+#define TRANS_DDI_MST_TRANSPORT_SELECT(trans) \
+ REG_FIELD_PREP(TRANS_DDI_MST_TRANSPORT_SELECT_MASK, trans)
#define TRANS_DDI_HDCP_SIGNALLING (1 << 9)
#define TRANS_DDI_DP_VC_PAYLOAD_ALLOC (1 << 8)
#define TRANS_DDI_HDMI_SCRAMBLER_CTS_ENABLE (1 << 7)
@@ -9429,7 +9701,9 @@ enum skl_power_gate {
/* DisplayPort Transport Control */
#define _DP_TP_CTL_A 0x64040
#define _DP_TP_CTL_B 0x64140
+#define _TGL_DP_TP_CTL_A 0x60540
#define DP_TP_CTL(port) _MMIO_PORT(port, _DP_TP_CTL_A, _DP_TP_CTL_B)
+#define TGL_DP_TP_CTL(tran) _MMIO_TRANS2((tran), _TGL_DP_TP_CTL_A)
#define DP_TP_CTL_ENABLE (1 << 31)
#define DP_TP_CTL_FEC_ENABLE (1 << 30)
#define DP_TP_CTL_MODE_SST (0 << 27)
@@ -9449,7 +9723,9 @@ enum skl_power_gate {
/* DisplayPort Transport Status */
#define _DP_TP_STATUS_A 0x64044
#define _DP_TP_STATUS_B 0x64144
+#define _TGL_DP_TP_STATUS_A 0x60544
#define DP_TP_STATUS(port) _MMIO_PORT(port, _DP_TP_STATUS_A, _DP_TP_STATUS_B)
+#define TGL_DP_TP_STATUS(tran) _MMIO_TRANS2((tran), _TGL_DP_TP_STATUS_A)
#define DP_TP_STATUS_FEC_ENABLE_LIVE (1 << 28)
#define DP_TP_STATUS_IDLE_DONE (1 << 25)
#define DP_TP_STATUS_ACT_SENT (1 << 24)
@@ -9604,17 +9880,7 @@ enum skl_power_gate {
#define _TRANSC_MSA_MISC 0x62410
#define _TRANS_EDP_MSA_MISC 0x6f410
#define TRANS_MSA_MISC(tran) _MMIO_TRANS2(tran, _TRANSA_MSA_MISC)
-
-#define TRANS_MSA_SYNC_CLK (1 << 0)
-#define TRANS_MSA_SAMPLING_444 (2 << 1)
-#define TRANS_MSA_CLRSP_YCBCR (2 << 3)
-#define TRANS_MSA_6_BPC (0 << 5)
-#define TRANS_MSA_8_BPC (1 << 5)
-#define TRANS_MSA_10_BPC (2 << 5)
-#define TRANS_MSA_12_BPC (3 << 5)
-#define TRANS_MSA_16_BPC (4 << 5)
-#define TRANS_MSA_CEA_RANGE (1 << 3)
-#define TRANS_MSA_USE_VSC_SDP (1 << 14)
+/* See DP_MSA_MISC_* for the bit definitions */
/* LCPLL Control */
#define LCPLL_CTL _MMIO(0x130040)
@@ -9655,7 +9921,10 @@ enum skl_power_gate {
#define BXT_CDCLK_CD2X_PIPE(pipe) ((pipe) << 20)
#define CDCLK_DIVMUX_CD_OVERRIDE (1 << 19)
#define BXT_CDCLK_CD2X_PIPE_NONE BXT_CDCLK_CD2X_PIPE(3)
+#define ICL_CDCLK_CD2X_PIPE(pipe) (_PICK(pipe, 0, 2, 6) << 19)
#define ICL_CDCLK_CD2X_PIPE_NONE (7 << 19)
+#define TGL_CDCLK_CD2X_PIPE(pipe) BXT_CDCLK_CD2X_PIPE(pipe)
+#define TGL_CDCLK_CD2X_PIPE_NONE ICL_CDCLK_CD2X_PIPE_NONE
#define BXT_CDCLK_SSA_PRECHARGE_ENABLE (1 << 16)
#define CDCLK_FREQ_DECIMAL_MASK (0x7ff)
@@ -9976,6 +10245,166 @@ enum skl_power_gate {
_TGL_DPLL1_CFGCR1, \
_TGL_TBTPLL_CFGCR1)
+#define _DKL_PHY1_BASE 0x168000
+#define _DKL_PHY2_BASE 0x169000
+#define _DKL_PHY3_BASE 0x16A000
+#define _DKL_PHY4_BASE 0x16B000
+#define _DKL_PHY5_BASE 0x16C000
+#define _DKL_PHY6_BASE 0x16D000
+
+/* DEKEL PHY MMIO Address = Phy base + (internal address & ~index_mask) */
+#define _DKL_PLL_DIV0 0x200
+#define DKL_PLL_DIV0_INTEG_COEFF(x) ((x) << 16)
+#define DKL_PLL_DIV0_INTEG_COEFF_MASK (0x1F << 16)
+#define DKL_PLL_DIV0_PROP_COEFF(x) ((x) << 12)
+#define DKL_PLL_DIV0_PROP_COEFF_MASK (0xF << 12)
+#define DKL_PLL_DIV0_FBPREDIV_SHIFT (8)
+#define DKL_PLL_DIV0_FBPREDIV(x) ((x) << DKL_PLL_DIV0_FBPREDIV_SHIFT)
+#define DKL_PLL_DIV0_FBPREDIV_MASK (0xF << DKL_PLL_DIV0_FBPREDIV_SHIFT)
+#define DKL_PLL_DIV0_FBDIV_INT(x) ((x) << 0)
+#define DKL_PLL_DIV0_FBDIV_INT_MASK (0xFF << 0)
+#define DKL_PLL_DIV0(tc_port) _MMIO(_PORT(tc_port, _DKL_PHY1_BASE, \
+ _DKL_PHY2_BASE) + \
+ _DKL_PLL_DIV0)
+
+#define _DKL_PLL_DIV1 0x204
+#define DKL_PLL_DIV1_IREF_TRIM(x) ((x) << 16)
+#define DKL_PLL_DIV1_IREF_TRIM_MASK (0x1F << 16)
+#define DKL_PLL_DIV1_TDC_TARGET_CNT(x) ((x) << 0)
+#define DKL_PLL_DIV1_TDC_TARGET_CNT_MASK (0xFF << 0)
+#define DKL_PLL_DIV1(tc_port) _MMIO(_PORT(tc_port, _DKL_PHY1_BASE, \
+ _DKL_PHY2_BASE) + \
+ _DKL_PLL_DIV1)
+
+#define _DKL_PLL_SSC 0x210
+#define DKL_PLL_SSC_IREF_NDIV_RATIO(x) ((x) << 29)
+#define DKL_PLL_SSC_IREF_NDIV_RATIO_MASK (0x7 << 29)
+#define DKL_PLL_SSC_STEP_LEN(x) ((x) << 16)
+#define DKL_PLL_SSC_STEP_LEN_MASK (0xFF << 16)
+#define DKL_PLL_SSC_STEP_NUM(x) ((x) << 11)
+#define DKL_PLL_SSC_STEP_NUM_MASK (0x7 << 11)
+#define DKL_PLL_SSC_EN (1 << 9)
+#define DKL_PLL_SSC(tc_port) _MMIO(_PORT(tc_port, _DKL_PHY1_BASE, \
+ _DKL_PHY2_BASE) + \
+ _DKL_PLL_SSC)
+
+#define _DKL_PLL_BIAS 0x214
+#define DKL_PLL_BIAS_FRAC_EN_H (1 << 30)
+#define DKL_PLL_BIAS_FBDIV_SHIFT (8)
+#define DKL_PLL_BIAS_FBDIV_FRAC(x) ((x) << DKL_PLL_BIAS_FBDIV_SHIFT)
+#define DKL_PLL_BIAS_FBDIV_FRAC_MASK (0x3FFFFF << DKL_PLL_BIAS_FBDIV_SHIFT)
+#define DKL_PLL_BIAS(tc_port) _MMIO(_PORT(tc_port, _DKL_PHY1_BASE, \
+ _DKL_PHY2_BASE) + \
+ _DKL_PLL_BIAS)
+
+#define _DKL_PLL_TDC_COLDST_BIAS 0x218
+#define DKL_PLL_TDC_SSC_STEP_SIZE(x) ((x) << 8)
+#define DKL_PLL_TDC_SSC_STEP_SIZE_MASK (0xFF << 8)
+#define DKL_PLL_TDC_FEED_FWD_GAIN(x) ((x) << 0)
+#define DKL_PLL_TDC_FEED_FWD_GAIN_MASK (0xFF << 0)
+#define DKL_PLL_TDC_COLDST_BIAS(tc_port) _MMIO(_PORT(tc_port, \
+ _DKL_PHY1_BASE, \
+ _DKL_PHY2_BASE) + \
+ _DKL_PLL_TDC_COLDST_BIAS)
+
+#define _DKL_REFCLKIN_CTL 0x12C
+/* Bits are the same as MG_REFCLKIN_CTL */
+#define DKL_REFCLKIN_CTL(tc_port) _MMIO(_PORT(tc_port, \
+ _DKL_PHY1_BASE, \
+ _DKL_PHY2_BASE) + \
+ _DKL_REFCLKIN_CTL)
+
+#define _DKL_CLKTOP2_HSCLKCTL 0xD4
+/* Bits are the same as MG_CLKTOP2_HSCLKCTL */
+#define DKL_CLKTOP2_HSCLKCTL(tc_port) _MMIO(_PORT(tc_port, \
+ _DKL_PHY1_BASE, \
+ _DKL_PHY2_BASE) + \
+ _DKL_CLKTOP2_HSCLKCTL)
+
+#define _DKL_CLKTOP2_CORECLKCTL1 0xD8
+/* Bits are the same as MG_CLKTOP2_CORECLKCTL1 */
+#define DKL_CLKTOP2_CORECLKCTL1(tc_port) _MMIO(_PORT(tc_port, \
+ _DKL_PHY1_BASE, \
+ _DKL_PHY2_BASE) + \
+ _DKL_CLKTOP2_CORECLKCTL1)
+
+#define _DKL_TX_DPCNTL0 0x2C0
+#define DKL_TX_PRESHOOT_COEFF(x) ((x) << 13)
+#define DKL_TX_PRESHOOT_COEFF_MASK (0x1f << 13)
+#define DKL_TX_DE_EMPHASIS_COEFF(x) ((x) << 8)
+#define DKL_TX_DE_EMPAHSIS_COEFF_MASK (0x1f << 8)
+#define DKL_TX_VSWING_CONTROL(x) ((x) << 0)
+#define DKL_TX_VSWING_CONTROL_MASK (0x7 << 0)
+#define DKL_TX_DPCNTL0(tc_port) _MMIO(_PORT(tc_port, \
+ _DKL_PHY1_BASE, \
+ _DKL_PHY2_BASE) + \
+ _DKL_TX_DPCNTL0)
+
+#define _DKL_TX_DPCNTL1 0x2C4
+/* Bits are the same as DKL_TX_DPCNTRL0 */
+#define DKL_TX_DPCNTL1(tc_port) _MMIO(_PORT(tc_port, \
+ _DKL_PHY1_BASE, \
+ _DKL_PHY2_BASE) + \
+ _DKL_TX_DPCNTL1)
+
+#define _DKL_TX_DPCNTL2 0x2C8
+#define DKL_TX_DP20BITMODE (1 << 2)
+#define DKL_TX_DPCNTL2(tc_port) _MMIO(_PORT(tc_port, \
+ _DKL_PHY1_BASE, \
+ _DKL_PHY2_BASE) + \
+ _DKL_TX_DPCNTL2)
+
+#define _DKL_TX_FW_CALIB 0x2F8
+#define DKL_TX_CFG_DISABLE_WAIT_INIT (1 << 7)
+#define DKL_TX_FW_CALIB(tc_port) _MMIO(_PORT(tc_port, \
+ _DKL_PHY1_BASE, \
+ _DKL_PHY2_BASE) + \
+ _DKL_TX_FW_CALIB)
+
+#define _DKL_TX_PMD_LANE_SUS 0xD00
+#define DKL_TX_PMD_LANE_SUS(tc_port) _MMIO(_PORT(tc_port, \
+ _DKL_PHY1_BASE, \
+ _DKL_PHY2_BASE) + \
+ _DKL_TX_PMD_LANE_SUS)
+
+#define _DKL_TX_DW17 0xDC4
+#define DKL_TX_DW17(tc_port) _MMIO(_PORT(tc_port, \
+ _DKL_PHY1_BASE, \
+ _DKL_PHY2_BASE) + \
+ _DKL_TX_DW17)
+
+#define _DKL_TX_DW18 0xDC8
+#define DKL_TX_DW18(tc_port) _MMIO(_PORT(tc_port, \
+ _DKL_PHY1_BASE, \
+ _DKL_PHY2_BASE) + \
+ _DKL_TX_DW18)
+
+#define _DKL_DP_MODE 0xA0
+#define DKL_DP_MODE(tc_port) _MMIO(_PORT(tc_port, \
+ _DKL_PHY1_BASE, \
+ _DKL_PHY2_BASE) + \
+ _DKL_DP_MODE)
+
+#define _DKL_CMN_UC_DW27 0x36C
+#define DKL_CMN_UC_DW27_UC_HEALTH (0x1 << 15)
+#define DKL_CMN_UC_DW_27(tc_port) _MMIO(_PORT(tc_port, \
+ _DKL_PHY1_BASE, \
+ _DKL_PHY2_BASE) + \
+ _DKL_CMN_UC_DW27)
+
+/*
+ * Each Dekel PHY is addressed through a 4KB aperture. Each PHY has more than
+ * 4KB of register space, so a separate index is programmed in HIP_INDEX_REG0
+ * or HIP_INDEX_REG1, based on the port number, to set the upper 2 address
+ * bits that point the 4KB window into the full PHY register space.
+ */
+#define _HIP_INDEX_REG0 0x1010A0
+#define _HIP_INDEX_REG1 0x1010A4
+#define HIP_INDEX_REG(tc_port) _MMIO((tc_port) < 4 ? _HIP_INDEX_REG0 \
+ : _HIP_INDEX_REG1)
+#define _HIP_INDEX_SHIFT(tc_port) (8 * ((tc_port) % 4))
+#define HIP_INDEX_VAL(tc_port, val) ((val) << _HIP_INDEX_SHIFT(tc_port))
+
/* BXT display engine PLL */
#define BXT_DE_PLL_CTL _MMIO(0x6d000)
#define BXT_DE_PLL_RATIO(x) (x) /* {60,65,100} * 19.2MHz */
@@ -9990,6 +10419,8 @@ enum skl_power_gate {
/* GEN9 DC */
#define DC_STATE_EN _MMIO(0x45504)
#define DC_STATE_DISABLE 0
+#define DC_STATE_EN_DC3CO REG_BIT(30)
+#define DC_STATE_DC3CO_STATUS REG_BIT(29)
#define DC_STATE_EN_UPTO_DC5 (1 << 0)
#define DC_STATE_EN_DC9 (1 << 3)
#define DC_STATE_EN_UPTO_DC6 (2 << 0)
@@ -10118,11 +10549,11 @@ enum skl_power_gate {
#define _PIPE_A_CSC_COEFF_BV 0x49024
#define _PIPE_A_CSC_MODE 0x49028
-#define ICL_CSC_ENABLE (1 << 31)
-#define ICL_OUTPUT_CSC_ENABLE (1 << 30)
-#define CSC_BLACK_SCREEN_OFFSET (1 << 2)
-#define CSC_POSITION_BEFORE_GAMMA (1 << 1)
-#define CSC_MODE_YUV_TO_RGB (1 << 0)
+#define ICL_CSC_ENABLE (1 << 31) /* icl+ */
+#define ICL_OUTPUT_CSC_ENABLE (1 << 30) /* icl+ */
+#define CSC_BLACK_SCREEN_OFFSET (1 << 2) /* ilk/snb */
+#define CSC_POSITION_BEFORE_GAMMA (1 << 1) /* pre-glk */
+#define CSC_MODE_YUV_TO_RGB (1 << 0) /* ilk/snb */
#define _PIPE_A_CSC_PREOFF_HI 0x49030
#define _PIPE_A_CSC_PREOFF_ME 0x49034
@@ -10238,6 +10669,9 @@ enum skl_power_gate {
#define _PAL_PREC_GC_MAX_A 0x4A410
#define _PAL_PREC_GC_MAX_B 0x4AC10
#define _PAL_PREC_GC_MAX_C 0x4B410
+#define PREC_PAL_DATA_RED_MASK REG_GENMASK(29, 20)
+#define PREC_PAL_DATA_GREEN_MASK REG_GENMASK(19, 10)
+#define PREC_PAL_DATA_BLUE_MASK REG_GENMASK(9, 0)
#define _PAL_PREC_EXT_GC_MAX_A 0x4A420
#define _PAL_PREC_EXT_GC_MAX_B 0x4AC20
#define _PAL_PREC_EXT_GC_MAX_C 0x4B420
@@ -10290,6 +10724,9 @@ enum skl_power_gate {
#define CGM_PIPE_MODE_GAMMA (1 << 2)
#define CGM_PIPE_MODE_CSC (1 << 1)
#define CGM_PIPE_MODE_DEGAMMA (1 << 0)
+#define CGM_PIPE_GAMMA_RED_MASK REG_GENMASK(9, 0)
+#define CGM_PIPE_GAMMA_GREEN_MASK REG_GENMASK(25, 16)
+#define CGM_PIPE_GAMMA_BLUE_MASK REG_GENMASK(9, 0)
#define _CGM_PIPE_B_CSC_COEFF01 (VLV_DISPLAY_BASE + 0x69900)
#define _CGM_PIPE_B_CSC_COEFF23 (VLV_DISPLAY_BASE + 0x69904)
@@ -11537,16 +11974,31 @@ enum skl_power_gate {
#define PORT_TX_DFLEXDPSP(fia) _MMIO_FIA((fia), 0x008A0)
#define MODULAR_FIA_MASK (1 << 4)
-#define TC_LIVE_STATE_TBT(tc_port) (1 << ((tc_port) * 8 + 6))
-#define TC_LIVE_STATE_TC(tc_port) (1 << ((tc_port) * 8 + 5))
-#define DP_LANE_ASSIGNMENT_SHIFT(tc_port) ((tc_port) * 8)
-#define DP_LANE_ASSIGNMENT_MASK(tc_port) (0xf << ((tc_port) * 8))
-#define DP_LANE_ASSIGNMENT(tc_port, x) ((x) << ((tc_port) * 8))
+#define TC_LIVE_STATE_TBT(idx) (1 << ((idx) * 8 + 6))
+#define TC_LIVE_STATE_TC(idx) (1 << ((idx) * 8 + 5))
+#define DP_LANE_ASSIGNMENT_SHIFT(idx) ((idx) * 8)
+#define DP_LANE_ASSIGNMENT_MASK(idx) (0xf << ((idx) * 8))
+#define DP_LANE_ASSIGNMENT(idx, x) ((x) << ((idx) * 8))
#define PORT_TX_DFLEXDPPMS(fia) _MMIO_FIA((fia), 0x00890)
-#define DP_PHY_MODE_STATUS_COMPLETED(tc_port) (1 << (tc_port))
+#define DP_PHY_MODE_STATUS_COMPLETED(idx) (1 << (idx))
#define PORT_TX_DFLEXDPCSSS(fia) _MMIO_FIA((fia), 0x00894)
-#define DP_PHY_MODE_STATUS_NOT_SAFE(tc_port) (1 << (tc_port))
+#define DP_PHY_MODE_STATUS_NOT_SAFE(idx) (1 << (idx))
+
+#define PORT_TX_DFLEXPA1(fia) _MMIO_FIA((fia), 0x00880)
+#define DP_PIN_ASSIGNMENT_SHIFT(idx) ((idx) * 4)
+#define DP_PIN_ASSIGNMENT_MASK(idx) (0xf << ((idx) * 4))
+#define DP_PIN_ASSIGNMENT(idx, x) ((x) << ((idx) * 4))
+
+/* This register controls the Display State Buffer (DSB) engines. */
+#define _DSBSL_INSTANCE_BASE 0x70B00
+#define DSBSL_INSTANCE(pipe, id) (_DSBSL_INSTANCE_BASE + \
+ (pipe) * 0x1000 + (id) * 100)
+#define DSB_HEAD(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x0)
+#define DSB_TAIL(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x4)
+#define DSB_CTRL(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x8)
+#define DSB_ENABLE (1 << 31)
+#define DSB_STATUS (1 << 0)
#endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index bc828a9ace84..bbd71af00a91 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -31,6 +31,8 @@
#include "gem/i915_gem_context.h"
#include "gt/intel_context.h"
+#include "gt/intel_ring.h"
+#include "gt/intel_rps.h"
#include "i915_active.h"
#include "i915_drv.h"
@@ -169,16 +171,17 @@ remove_from_client(struct i915_request *request)
{
struct drm_i915_file_private *file_priv;
- file_priv = READ_ONCE(request->file_priv);
- if (!file_priv)
+ if (!READ_ONCE(request->file_priv))
return;
- spin_lock(&file_priv->mm.lock);
- if (request->file_priv) {
+ rcu_read_lock();
+ file_priv = xchg(&request->file_priv, NULL);
+ if (file_priv) {
+ spin_lock(&file_priv->mm.lock);
list_del(&request->client_link);
- request->file_priv = NULL;
+ spin_unlock(&file_priv->mm.lock);
}
- spin_unlock(&file_priv->mm.lock);
+ rcu_read_unlock();
}
static void free_capture_list(struct i915_request *request)
@@ -205,21 +208,18 @@ static void remove_from_engine(struct i915_request *rq)
* check that the rq still belongs to the newly locked engine.
*/
locked = READ_ONCE(rq->engine);
- spin_lock(&locked->active.lock);
+ spin_lock_irq(&locked->active.lock);
while (unlikely(locked != (engine = READ_ONCE(rq->engine)))) {
spin_unlock(&locked->active.lock);
spin_lock(&engine->active.lock);
locked = engine;
}
list_del(&rq->sched.link);
- spin_unlock(&locked->active.lock);
+ spin_unlock_irq(&locked->active.lock);
}
-static bool i915_request_retire(struct i915_request *rq)
+bool i915_request_retire(struct i915_request *rq)
{
- struct i915_active_request *active, *next;
-
- lockdep_assert_held(&rq->timeline->mutex);
if (!i915_request_completed(rq))
return false;
@@ -240,41 +240,11 @@ static bool i915_request_retire(struct i915_request *rq)
* Note this requires that we are always called in request
* completion order.
*/
- GEM_BUG_ON(!list_is_first(&rq->link, &rq->timeline->requests));
+ GEM_BUG_ON(!list_is_first(&rq->link,
+ &i915_request_timeline(rq)->requests));
rq->ring->head = rq->postfix;
/*
- * Walk through the active list, calling retire on each. This allows
- * objects to track their GPU activity and mark themselves as idle
- * when their *last* active request is completed (updating state
- * tracking lists for eviction, active references for GEM, etc).
- *
- * As the ->retire() may free the node, we decouple it first and
- * pass along the auxiliary information (to avoid dereferencing
- * the node after the callback).
- */
- list_for_each_entry_safe(active, next, &rq->active_list, link) {
- /*
- * In microbenchmarks or focusing upon time inside the kernel,
- * we may spend an inordinate amount of time simply handling
- * the retirement of requests and processing their callbacks.
- * Of which, this loop itself is particularly hot due to the
- * cache misses when jumping around the list of
- * i915_active_request. So we try to keep this loop as
- * streamlined as possible and also prefetch the next
- * i915_active_request to try and hide the likely cache miss.
- */
- prefetchw(next);
-
- INIT_LIST_HEAD(&active->link);
- RCU_INIT_POINTER(active->request, NULL);
-
- active->retire(active, rq);
- }
-
- local_irq_disable();
-
- /*
* We only loosely track inflight requests across preemption,
* and so we may find ourselves attempting to retire a _completed_
* request that we have removed from the HW and put back on a run
@@ -282,24 +252,22 @@ static bool i915_request_retire(struct i915_request *rq)
*/
remove_from_engine(rq);
- spin_lock(&rq->lock);
+ spin_lock_irq(&rq->lock);
i915_request_mark_complete(rq);
if (!i915_request_signaled(rq))
dma_fence_signal_locked(&rq->fence);
if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &rq->fence.flags))
i915_request_cancel_breadcrumb(rq);
if (i915_request_has_waitboost(rq)) {
- GEM_BUG_ON(!atomic_read(&rq->i915->gt_pm.rps.num_waiters));
- atomic_dec(&rq->i915->gt_pm.rps.num_waiters);
+ GEM_BUG_ON(!atomic_read(&rq->engine->gt->rps.num_waiters));
+ atomic_dec(&rq->engine->gt->rps.num_waiters);
}
if (!test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags)) {
set_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
__notify_execute_cb(rq);
}
GEM_BUG_ON(!list_empty(&rq->execute_cb));
- spin_unlock(&rq->lock);
-
- local_irq_enable();
+ spin_unlock_irq(&rq->lock);
remove_from_client(rq);
list_del(&rq->link);
@@ -316,7 +284,7 @@ static bool i915_request_retire(struct i915_request *rq)
void i915_request_retire_upto(struct i915_request *rq)
{
- struct intel_timeline * const tl = rq->timeline;
+ struct intel_timeline * const tl = i915_request_timeline(rq);
struct i915_request *tmp;
GEM_TRACE("%s fence %llx:%lld, current %d\n",
@@ -324,7 +292,6 @@ void i915_request_retire_upto(struct i915_request *rq)
rq->fence.context, rq->fence.seqno,
hwsp_seqno(rq));
- lockdep_assert_held(&tl->mutex);
GEM_BUG_ON(!i915_request_completed(rq));
do {
@@ -680,9 +647,12 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
rq->gem_context = ce->gem_context;
rq->engine = ce->engine;
rq->ring = ce->ring;
- rq->timeline = tl;
+ rq->execution_mask = ce->engine->mask;
+
+ rcu_assign_pointer(rq->timeline, tl);
rq->hwsp_seqno = tl->hwsp_seqno;
rq->hwsp_cacheline = tl->hwsp_cacheline;
+
rq->rcustate = get_state_synchronize_rcu(); /* acts as smp_mb() */
spin_lock_init(&rq->lock);
@@ -700,9 +670,7 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
rq->batch = NULL;
rq->capture_list = NULL;
rq->flags = 0;
- rq->execution_mask = ALL_ENGINES;
- INIT_LIST_HEAD(&rq->active_list);
INIT_LIST_HEAD(&rq->execute_cb);
/*
@@ -741,7 +709,6 @@ err_unwind:
ce->ring->emit = rq->head;
/* Make sure we didn't add ourselves to external state before freeing */
- GEM_BUG_ON(!list_empty(&rq->active_list));
GEM_BUG_ON(!list_empty(&rq->sched.signalers_list));
GEM_BUG_ON(!list_empty(&rq->sched.waiters_list));
@@ -786,16 +753,43 @@ err_unlock:
static int
i915_request_await_start(struct i915_request *rq, struct i915_request *signal)
{
- if (list_is_first(&signal->link, &signal->timeline->requests))
- return 0;
+ struct intel_timeline *tl;
+ struct dma_fence *fence;
+ int err;
- signal = list_prev_entry(signal, link);
- if (intel_timeline_sync_is_later(rq->timeline, &signal->fence))
+ GEM_BUG_ON(i915_request_timeline(rq) ==
+ rcu_access_pointer(signal->timeline));
+
+ rcu_read_lock();
+ tl = rcu_dereference(signal->timeline);
+ if (i915_request_started(signal) || !kref_get_unless_zero(&tl->kref))
+ tl = NULL;
+ rcu_read_unlock();
+ if (!tl) /* already started or maybe even completed */
return 0;
- return i915_sw_fence_await_dma_fence(&rq->submit,
- &signal->fence, 0,
- I915_FENCE_GFP);
+ fence = ERR_PTR(-EBUSY);
+ if (mutex_trylock(&tl->mutex)) {
+ fence = NULL;
+ if (!i915_request_started(signal) &&
+ !list_is_first(&signal->link, &tl->requests)) {
+ signal = list_prev_entry(signal, link);
+ fence = dma_fence_get(&signal->fence);
+ }
+ mutex_unlock(&tl->mutex);
+ }
+ intel_timeline_put(tl);
+ if (IS_ERR_OR_NULL(fence))
+ return PTR_ERR_OR_ZERO(fence);
+
+ err = 0;
+ if (intel_timeline_sync_is_later(i915_request_timeline(rq), fence))
+ err = i915_sw_fence_await_dma_fence(&rq->submit,
+ fence, 0,
+ I915_FENCE_GFP);
+ dma_fence_put(fence);
+
+ return err;
}
static intel_engine_mask_t
@@ -821,34 +815,33 @@ emit_semaphore_wait(struct i915_request *to,
struct i915_request *from,
gfp_t gfp)
{
+ const int has_token = INTEL_GEN(to->i915) >= 12;
u32 hwsp_offset;
+ int len;
u32 *cs;
- int err;
- GEM_BUG_ON(!from->timeline->has_initial_breadcrumb);
GEM_BUG_ON(INTEL_GEN(to->i915) < 8);
/* Just emit the first semaphore we see as request space is limited. */
if (already_busywaiting(to) & from->engine->mask)
- return i915_sw_fence_await_dma_fence(&to->submit,
- &from->fence, 0,
- I915_FENCE_GFP);
+ goto await_fence;
- err = i915_request_await_start(to, from);
- if (err < 0)
- return err;
+ if (i915_request_await_start(to, from) < 0)
+ goto await_fence;
/* Only submit our spinner after the signaler is running! */
- err = __i915_request_await_execution(to, from, NULL, gfp);
- if (err)
- return err;
+ if (__i915_request_await_execution(to, from, NULL, gfp))
+ goto await_fence;
/* We need to pin the signaler's HWSP until we are finished reading. */
- err = intel_timeline_read_hwsp(from, to, &hwsp_offset);
- if (err)
- return err;
+ if (intel_timeline_read_hwsp(from, to, &hwsp_offset))
+ goto await_fence;
+
+ len = 4;
+ if (has_token)
+ len += 2;
- cs = intel_ring_begin(to, 4);
+ cs = intel_ring_begin(to, len);
if (IS_ERR(cs))
return PTR_ERR(cs);
@@ -860,18 +853,28 @@ emit_semaphore_wait(struct i915_request *to,
* (post-wrap) values than they were expecting (and so wait
* forever).
*/
- *cs++ = MI_SEMAPHORE_WAIT |
- MI_SEMAPHORE_GLOBAL_GTT |
- MI_SEMAPHORE_POLL |
- MI_SEMAPHORE_SAD_GTE_SDD;
+ *cs++ = (MI_SEMAPHORE_WAIT |
+ MI_SEMAPHORE_GLOBAL_GTT |
+ MI_SEMAPHORE_POLL |
+ MI_SEMAPHORE_SAD_GTE_SDD) +
+ has_token;
*cs++ = from->fence.seqno;
*cs++ = hwsp_offset;
*cs++ = 0;
+ if (has_token) {
+ *cs++ = 0;
+ *cs++ = MI_NOOP;
+ }
intel_ring_advance(to, cs);
to->sched.semaphores |= from->engine->mask;
to->sched.flags |= I915_SCHED_HAS_SEMAPHORE_CHAIN;
return 0;
+
+await_fence:
+ return i915_sw_fence_await_dma_fence(&to->submit,
+ &from->fence, 0,
+ I915_FENCE_GFP);
}
static int
@@ -955,21 +958,23 @@ i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence)
/* Squash repeated waits to the same timelines */
if (fence->context &&
- intel_timeline_sync_is_later(rq->timeline, fence))
+ intel_timeline_sync_is_later(i915_request_timeline(rq),
+ fence))
continue;
if (dma_fence_is_i915(fence))
ret = i915_request_await_request(rq, to_request(fence));
else
ret = i915_sw_fence_await_dma_fence(&rq->submit, fence,
- I915_FENCE_TIMEOUT,
+ fence->context ? I915_FENCE_TIMEOUT : 0,
I915_FENCE_GFP);
if (ret < 0)
return ret;
/* Record the latest fence used against each timeline */
if (fence->context)
- intel_timeline_sync_set(rq->timeline, fence);
+ intel_timeline_sync_set(i915_request_timeline(rq),
+ fence);
} while (--nchild);
return 0;
@@ -1111,7 +1116,7 @@ void i915_request_skip(struct i915_request *rq, int error)
static struct i915_request *
__i915_request_add_to_timeline(struct i915_request *rq)
{
- struct intel_timeline *timeline = rq->timeline;
+ struct intel_timeline *timeline = i915_request_timeline(rq);
struct i915_request *prev;
/*
@@ -1134,8 +1139,8 @@ __i915_request_add_to_timeline(struct i915_request *rq)
* precludes optimising to use semaphores serialisation of a single
* timeline across engines.
*/
- prev = rcu_dereference_protected(timeline->last_request.request,
- lockdep_is_held(&timeline->mutex));
+ prev = to_request(__i915_active_fence_set(&timeline->last_request,
+ &rq->fence));
if (prev && !i915_request_completed(prev)) {
if (is_power_of_2(prev->engine->mask | rq->engine->mask))
i915_sw_fence_await_sw_fence(&rq->submit,
@@ -1160,7 +1165,6 @@ __i915_request_add_to_timeline(struct i915_request *rq)
* us, the timeline will hold its seqno which is later than ours.
*/
GEM_BUG_ON(timeline->seqno != rq->fence.seqno);
- __i915_active_request_set(&timeline->last_request, rq);
return prev;
}
@@ -1224,7 +1228,7 @@ void __i915_request_queue(struct i915_request *rq,
void i915_request_add(struct i915_request *rq)
{
struct i915_sched_attr attr = rq->gem_context->sched;
- struct intel_timeline * const tl = rq->timeline;
+ struct intel_timeline * const tl = i915_request_timeline(rq);
struct i915_request *prev;
lockdep_assert_held(&tl->mutex);
@@ -1279,7 +1283,9 @@ void i915_request_add(struct i915_request *rq)
* work on behalf of others -- but instead we should benefit from
* improved resource management. (Well, that's the theory at least.)
*/
- if (prev && i915_request_completed(prev) && prev->timeline == tl)
+ if (prev &&
+ i915_request_completed(prev) &&
+ rcu_access_pointer(prev->timeline) == tl)
i915_request_retire_upto(prev);
mutex_unlock(&tl->mutex);
@@ -1442,7 +1448,7 @@ long i915_request_wait(struct i915_request *rq,
* completion. That requires having a good predictor for the request
* duration, which we currently lack.
*/
- if (CONFIG_DRM_I915_SPIN_REQUEST &&
+ if (IS_ACTIVE(CONFIG_DRM_I915_SPIN_REQUEST) &&
__i915_spin_request(rq, state, CONFIG_DRM_I915_SPIN_REQUEST)) {
dma_fence_signal(&rq->fence);
goto out;
@@ -1462,7 +1468,7 @@ long i915_request_wait(struct i915_request *rq,
*/
if (flags & I915_WAIT_PRIORITY) {
if (!i915_request_started(rq) && INTEL_GEN(rq->i915) >= 6)
- gen6_rps_boost(rq);
+ intel_rps_boost(rq);
i915_schedule_bump_priority(rq, I915_PRIORITY_WAIT);
}
@@ -1488,6 +1494,7 @@ long i915_request_wait(struct i915_request *rq,
break;
}
+ intel_engine_flush_submission(rq->engine);
timeout = io_schedule_timeout(timeout);
}
__set_current_state(TASK_RUNNING);
@@ -1500,48 +1507,6 @@ out:
return timeout;
}
-bool i915_retire_requests(struct drm_i915_private *i915)
-{
- struct intel_gt_timelines *timelines = &i915->gt.timelines;
- struct intel_timeline *tl, *tn;
- unsigned long flags;
- LIST_HEAD(free);
-
- spin_lock_irqsave(&timelines->lock, flags);
- list_for_each_entry_safe(tl, tn, &timelines->active_list, link) {
- if (!mutex_trylock(&tl->mutex))
- continue;
-
- intel_timeline_get(tl);
- GEM_BUG_ON(!tl->active_count);
- tl->active_count++; /* pin the list element */
- spin_unlock_irqrestore(&timelines->lock, flags);
-
- retire_requests(tl);
-
- spin_lock_irqsave(&timelines->lock, flags);
-
- /* Resume iteration after dropping lock */
- list_safe_reset_next(tl, tn, link);
- if (!--tl->active_count)
- list_del(&tl->link);
-
- mutex_unlock(&tl->mutex);
-
- /* Defer the final release to after the spinlock */
- if (refcount_dec_and_test(&tl->kref.refcount)) {
- GEM_BUG_ON(tl->active_count);
- list_add(&tl->link, &free);
- }
- }
- spin_unlock_irqrestore(&timelines->lock, flags);
-
- list_for_each_entry_safe(tl, tn, &free, link)
- __intel_timeline_free(&tl->kref);
-
- return !list_empty(&timelines->active_list);
-}
-
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/mock_request.c"
#include "selftests/i915_request.c"
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index e4dd013761e8..96991d64759c 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -113,7 +113,7 @@ struct i915_request {
struct intel_engine_cs *engine;
struct intel_context *hw_context;
struct intel_ring *ring;
- struct intel_timeline *timeline;
+ struct intel_timeline __rcu *timeline;
struct list_head signal_link;
/*
@@ -211,14 +211,14 @@ struct i915_request {
* on the active_list (of their final request).
*/
struct i915_capture_list *capture_list;
- struct list_head active_list;
/** Time at which this request was emitted, in jiffies. */
unsigned long emitted_jiffies;
unsigned long flags;
-#define I915_REQUEST_WAITBOOST BIT(0)
-#define I915_REQUEST_NOPREEMPT BIT(1)
+#define I915_REQUEST_WAITBOOST BIT(0)
+#define I915_REQUEST_NOPREEMPT BIT(1)
+#define I915_REQUEST_SENTINEL BIT(2)
/** timeline->request entry for this request */
struct list_head link;
@@ -251,6 +251,7 @@ struct i915_request *__i915_request_commit(struct i915_request *request);
void __i915_request_queue(struct i915_request *rq,
const struct i915_sched_attr *attr);
+bool i915_request_retire(struct i915_request *rq);
void i915_request_retire_upto(struct i915_request *rq);
static inline struct i915_request *
@@ -309,10 +310,8 @@ long i915_request_wait(struct i915_request *rq,
long timeout)
__attribute__((nonnull(1)));
#define I915_WAIT_INTERRUPTIBLE BIT(0)
-#define I915_WAIT_LOCKED BIT(1) /* struct_mutex held, handle GPU reset */
-#define I915_WAIT_PRIORITY BIT(2) /* small priority bump for the request */
-#define I915_WAIT_ALL BIT(3) /* used by i915_gem_object_wait() */
-#define I915_WAIT_FOR_IDLE_BOOST BIT(4)
+#define I915_WAIT_PRIORITY BIT(1) /* small priority bump for the request */
+#define I915_WAIT_ALL BIT(2) /* used by i915_gem_object_wait() */
static inline bool i915_request_signaled(const struct i915_request *rq)
{
@@ -442,6 +441,29 @@ static inline bool i915_request_has_nopreempt(const struct i915_request *rq)
return unlikely(rq->flags & I915_REQUEST_NOPREEMPT);
}
-bool i915_retire_requests(struct drm_i915_private *i915);
+static inline bool i915_request_has_sentinel(const struct i915_request *rq)
+{
+ return unlikely(rq->flags & I915_REQUEST_SENTINEL);
+}
+
+static inline struct intel_timeline *
+i915_request_timeline(struct i915_request *rq)
+{
+ /* Valid only while the request is being constructed (or retired). */
+ return rcu_dereference_protected(rq->timeline,
+ lockdep_is_held(&rcu_access_pointer(rq->timeline)->mutex));
+}
+
+static inline struct intel_timeline *
+i915_request_active_timeline(struct i915_request *rq)
+{
+ /*
+ * When in use during submission, we are protected by a guarantee that
+ * the context/timeline is pinned and must remain pinned until after
+ * this submission.
+ */
+ return rcu_dereference_protected(rq->timeline,
+ lockdep_is_held(&rq->engine->active.lock));
+}
#endif /* I915_REQUEST_H */
diff --git a/drivers/gpu/drm/i915/i915_scatterlist.h b/drivers/gpu/drm/i915/i915_scatterlist.h
index 6617963df9ed..b7b59328cb76 100644
--- a/drivers/gpu/drm/i915/i915_scatterlist.h
+++ b/drivers/gpu/drm/i915/i915_scatterlist.h
@@ -67,15 +67,15 @@ static inline struct scatterlist *__sg_next(struct scatterlist *sg)
}
/**
- * __for_each_sgt_dma - iterate over the DMA addresses of the given sg_table
- * @__dmap: DMA address (output)
+ * __for_each_sgt_daddr - iterate over the device addresses of the given sg_table
+ * @__dp: Device address (output)
* @__iter: 'struct sgt_iter' (iterator state, internal)
* @__sgt: sg_table to iterate over (input)
* @__step: step size
*/
-#define __for_each_sgt_dma(__dmap, __iter, __sgt, __step) \
+#define __for_each_sgt_daddr(__dp, __iter, __sgt, __step) \
for ((__iter) = __sgt_iter((__sgt)->sgl, true); \
- ((__dmap) = (__iter).dma + (__iter).curr); \
+ ((__dp) = (__iter).dma + (__iter).curr), (__iter).sgp; \
(((__iter).curr += (__step)) >= (__iter).max) ? \
(__iter) = __sgt_iter(__sg_next((__iter).sgp), true), 0 : 0)
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index 3eba8a2b39c2..010d67f48ad9 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -211,10 +211,7 @@ static void kick_submission(struct intel_engine_cs *engine,
/*
* If we are already the currently executing context, don't
- * bother evaluating if we should preempt ourselves, or if
- * we expect nothing to change as a result of running the
- * tasklet, i.e. we have not change the priority queue
- * sufficiently to oust the running context.
+ * bother evaluating if we should preempt ourselves.
*/
if (inflight->hw_context == rq->hw_context)
goto unlock;
diff --git a/drivers/gpu/drm/i915/i915_scheduler.h b/drivers/gpu/drm/i915/i915_scheduler.h
index 7eefccff39bf..07d243acf553 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.h
+++ b/drivers/gpu/drm/i915/i915_scheduler.h
@@ -52,22 +52,4 @@ static inline void i915_priolist_free(struct i915_priolist *p)
__i915_priolist_free(p);
}
-static inline bool i915_scheduler_need_preempt(int prio, int active)
-{
- /*
- * Allow preemption of low -> normal -> high, but we do
- * not allow low priority tasks to preempt other low priority
- * tasks under the impression that latency for low priority
- * tasks does not matter (as much as background throughput),
- * so kiss.
- *
- * More naturally we would write
- * prio >= max(0, last);
- * except that we wish to prevent triggering preemption at the same
- * priority level: the task that is running should remain running
- * to preserve FIFO ordering of dependencies.
- */
- return prio > max(I915_PRIORITY_NORMAL - 1, active);
-}
-
#endif /* _I915_SCHEDULER_H_ */
diff --git a/drivers/gpu/drm/i915/i915_scheduler_types.h b/drivers/gpu/drm/i915/i915_scheduler_types.h
index aad81acba9dc..d18e70550054 100644
--- a/drivers/gpu/drm/i915/i915_scheduler_types.h
+++ b/drivers/gpu/drm/i915/i915_scheduler_types.h
@@ -49,6 +49,15 @@ struct i915_sched_attr {
* DAG of each request, we are able to insert it into a sorted queue when it
* is ready, and are able to reorder its portion of the graph to accommodate
* dynamic priority changes.
+ *
+ * Ok, there is now one active element to the "scheduler" in the backends.
+ * We let a new context run for a small amount of time before re-evaluating
+ * the run order. As we re-evaluate, we maintain the strict ordering of
+ * dependencies, but attempt to rotate the active contexts (the current context
+ * is put to the back of its priority queue, then reshuffling its dependents).
+ * This provides minimal timeslicing and prevents a userspace hog (e.g.
+ * something waiting on a user semaphore [VkEvent]) from denying service to
+ * others.
*/
struct i915_sched_node {
struct list_head signalers_list; /* those before us, we depend upon */
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 8508a01ad8b9..8812cdd9007f 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -28,6 +28,7 @@
#include "display/intel_fbc.h"
#include "display/intel_gmbus.h"
+#include "display/intel_vga.h"
#include "i915_drv.h"
#include "i915_reg.h"
@@ -57,7 +58,7 @@ static void i915_restore_display(struct drm_i915_private *dev_priv)
if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) <= 4 && !IS_G4X(dev_priv))
I915_WRITE(FBC_CONTROL, dev_priv->regfile.saveFBC_CONTROL);
- i915_redisable_vga(dev_priv);
+ intel_vga_redisable(dev_priv);
}
int i915_save_state(struct drm_i915_private *dev_priv)
@@ -65,8 +66,6 @@ int i915_save_state(struct drm_i915_private *dev_priv)
struct pci_dev *pdev = dev_priv->drm.pdev;
int i;
- mutex_lock(&dev_priv->drm.struct_mutex);
-
i915_save_display(dev_priv);
if (IS_GEN(dev_priv, 4))
@@ -100,8 +99,6 @@ int i915_save_state(struct drm_i915_private *dev_priv)
dev_priv->regfile.saveSWF3[i] = I915_READ(SWF3(i));
}
- mutex_unlock(&dev_priv->drm.struct_mutex);
-
return 0;
}
@@ -110,8 +107,6 @@ int i915_restore_state(struct drm_i915_private *dev_priv)
struct pci_dev *pdev = dev_priv->drm.pdev;
int i;
- mutex_lock(&dev_priv->drm.struct_mutex);
-
if (IS_GEN(dev_priv, 4))
pci_write_config_word(pdev, GCDGMBUS,
dev_priv->regfile.saveGCDGMBUS);
@@ -145,8 +140,6 @@ int i915_restore_state(struct drm_i915_private *dev_priv)
I915_WRITE(SWF3(i), dev_priv->regfile.saveSWF3[i]);
}
- mutex_unlock(&dev_priv->drm.struct_mutex);
-
intel_gmbus_reset(dev_priv);
return 0;
diff --git a/drivers/gpu/drm/i915/i915_switcheroo.c b/drivers/gpu/drm/i915/i915_switcheroo.c
new file mode 100644
index 000000000000..39c79e1c5b52
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_switcheroo.c
@@ -0,0 +1,67 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <linux/vga_switcheroo.h>
+
+#include "i915_drv.h"
+#include "i915_switcheroo.h"
+
+static void i915_switcheroo_set_state(struct pci_dev *pdev,
+ enum vga_switcheroo_state state)
+{
+ struct drm_i915_private *i915 = pdev_to_i915(pdev);
+ pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
+
+ if (!i915) {
+ dev_err(&pdev->dev, "DRM not initialized, aborting switch.\n");
+ return;
+ }
+
+ if (state == VGA_SWITCHEROO_ON) {
+ pr_info("switched on\n");
+ i915->drm.switch_power_state = DRM_SWITCH_POWER_CHANGING;
+ /* i915 resume handler doesn't set to D0 */
+ pci_set_power_state(pdev, PCI_D0);
+ i915_resume_switcheroo(i915);
+ i915->drm.switch_power_state = DRM_SWITCH_POWER_ON;
+ } else {
+ pr_info("switched off\n");
+ i915->drm.switch_power_state = DRM_SWITCH_POWER_CHANGING;
+ i915_suspend_switcheroo(i915, pmm);
+ i915->drm.switch_power_state = DRM_SWITCH_POWER_OFF;
+ }
+}
+
+static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
+{
+ struct drm_i915_private *i915 = pdev_to_i915(pdev);
+
+ /*
+ * FIXME: open_count is protected by drm_global_mutex but that would lead to
+ * locking inversion with the driver load path. And the access here is
+ * completely racy anyway. So don't bother with locking for now.
+ */
+ return i915 && i915->drm.open_count == 0;
+}
+
+static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
+ .set_gpu_state = i915_switcheroo_set_state,
+ .reprobe = NULL,
+ .can_switch = i915_switcheroo_can_switch,
+};
+
+int i915_switcheroo_register(struct drm_i915_private *i915)
+{
+ struct pci_dev *pdev = i915->drm.pdev;
+
+ return vga_switcheroo_register_client(pdev, &i915_switcheroo_ops, false);
+}
+
+void i915_switcheroo_unregister(struct drm_i915_private *i915)
+{
+ struct pci_dev *pdev = i915->drm.pdev;
+
+ vga_switcheroo_unregister_client(pdev);
+}
diff --git a/drivers/gpu/drm/i915/i915_switcheroo.h b/drivers/gpu/drm/i915/i915_switcheroo.h
new file mode 100644
index 000000000000..59b6c1e07d75
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_switcheroo.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __I915_SWITCHEROO__
+#define __I915_SWITCHEROO__
+
+struct drm_i915_private;
+
+int i915_switcheroo_register(struct drm_i915_private *i915);
+void i915_switcheroo_unregister(struct drm_i915_private *i915);
+
+#endif /* __I915_SWITCHEROO__ */
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index d8a3b180c084..65476909d1bf 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -30,6 +30,9 @@
#include <linux/stat.h>
#include <linux/sysfs.h>
+#include "gt/intel_rc6.h"
+#include "gt/intel_rps.h"
+
#include "i915_drv.h"
#include "i915_sysfs.h"
#include "intel_pm.h"
@@ -49,7 +52,7 @@ static u32 calc_residency(struct drm_i915_private *dev_priv,
u64 res = 0;
with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
- res = intel_rc6_residency_us(dev_priv, reg);
+ res = intel_rc6_residency_us(&dev_priv->gt.rc6, reg);
return DIV_ROUND_CLOSEST_ULL(res, 1000);
}
@@ -142,12 +145,12 @@ static const struct attribute_group media_rc6_attr_group = {
};
#endif
-static int l3_access_valid(struct drm_i915_private *dev_priv, loff_t offset)
+static int l3_access_valid(struct drm_i915_private *i915, loff_t offset)
{
- if (!HAS_L3_DPF(dev_priv))
+ if (!HAS_L3_DPF(i915))
return -EPERM;
- if (offset % 4 != 0)
+ if (!IS_ALIGNED(offset, sizeof(u32)))
return -EINVAL;
if (offset >= GEN7_L3LOG_SIZE)
@@ -162,31 +165,24 @@ i915_l3_read(struct file *filp, struct kobject *kobj,
loff_t offset, size_t count)
{
struct device *kdev = kobj_to_dev(kobj);
- struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
- struct drm_device *dev = &dev_priv->drm;
+ struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
int slice = (int)(uintptr_t)attr->private;
int ret;
- count = round_down(count, 4);
-
- ret = l3_access_valid(dev_priv, offset);
+ ret = l3_access_valid(i915, offset);
if (ret)
return ret;
+ count = round_down(count, sizeof(u32));
count = min_t(size_t, GEN7_L3LOG_SIZE - offset, count);
+ memset(buf, 0, count);
- ret = i915_mutex_lock_interruptible(dev);
- if (ret)
- return ret;
-
- if (dev_priv->l3_parity.remap_info[slice])
+ spin_lock(&i915->gem.contexts.lock);
+ if (i915->l3_parity.remap_info[slice])
memcpy(buf,
- dev_priv->l3_parity.remap_info[slice] + (offset/4),
+ i915->l3_parity.remap_info[slice] + offset / sizeof(u32),
count);
- else
- memset(buf, 0, count);
-
- mutex_unlock(&dev->struct_mutex);
+ spin_unlock(&i915->gem.contexts.lock);
return count;
}
@@ -197,46 +193,49 @@ i915_l3_write(struct file *filp, struct kobject *kobj,
loff_t offset, size_t count)
{
struct device *kdev = kobj_to_dev(kobj);
- struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
- struct drm_device *dev = &dev_priv->drm;
- struct i915_gem_context *ctx;
+ struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
int slice = (int)(uintptr_t)attr->private;
- u32 **remap_info;
+ u32 *remap_info, *freeme = NULL;
+ struct i915_gem_context *ctx;
int ret;
- ret = l3_access_valid(dev_priv, offset);
+ ret = l3_access_valid(i915, offset);
if (ret)
return ret;
- ret = i915_mutex_lock_interruptible(dev);
- if (ret)
- return ret;
+ if (count < sizeof(u32))
+ return -EINVAL;
- remap_info = &dev_priv->l3_parity.remap_info[slice];
- if (!*remap_info) {
- *remap_info = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL);
- if (!*remap_info) {
- ret = -ENOMEM;
- goto out;
- }
+ remap_info = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL);
+ if (!remap_info)
+ return -ENOMEM;
+
+ spin_lock(&i915->gem.contexts.lock);
+
+ if (i915->l3_parity.remap_info[slice]) {
+ freeme = remap_info;
+ remap_info = i915->l3_parity.remap_info[slice];
+ } else {
+ i915->l3_parity.remap_info[slice] = remap_info;
}
- /* TODO: Ideally we really want a GPU reset here to make sure errors
- * aren't propagated. Since I cannot find a stable way to reset the GPU
- * at this point it is left as a TODO.
- */
- memcpy(*remap_info + (offset/4), buf, count);
+ count = round_down(count, sizeof(u32));
+ memcpy(remap_info + offset / sizeof(u32), buf, count);
/* NB: We defer the remapping until we switch to the context */
- list_for_each_entry(ctx, &dev_priv->contexts.list, link)
- ctx->remap_slice |= (1<<slice);
+ list_for_each_entry(ctx, &i915->gem.contexts.list, link)
+ ctx->remap_slice |= BIT(slice);
- ret = count;
+ spin_unlock(&i915->gem.contexts.lock);
+ kfree(freeme);
-out:
- mutex_unlock(&dev->struct_mutex);
+ /*
+ * TODO: Ideally we really want a GPU reset here to make sure errors
+ * aren't propagated. Since I cannot find a stable way to reset the GPU
+ * at this point it is left as a TODO.
+ */
- return ret;
+ return count;
}
static const struct bin_attribute dpf_attrs = {
@@ -261,6 +260,7 @@ static ssize_t gt_act_freq_mhz_show(struct device *kdev,
struct device_attribute *attr, char *buf)
{
struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
+ struct intel_rps *rps = &dev_priv->gt.rps;
intel_wakeref_t wakeref;
u32 freq;
@@ -273,31 +273,31 @@ static ssize_t gt_act_freq_mhz_show(struct device *kdev,
freq = (freq >> 8) & 0xff;
} else {
- freq = intel_get_cagf(dev_priv, I915_READ(GEN6_RPSTAT1));
+ freq = intel_get_cagf(rps, I915_READ(GEN6_RPSTAT1));
}
intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
- return snprintf(buf, PAGE_SIZE, "%d\n", intel_gpu_freq(dev_priv, freq));
+ return snprintf(buf, PAGE_SIZE, "%d\n", intel_gpu_freq(rps, freq));
}
static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
struct device_attribute *attr, char *buf)
{
struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
+ struct intel_rps *rps = &dev_priv->gt.rps;
return snprintf(buf, PAGE_SIZE, "%d\n",
- intel_gpu_freq(dev_priv,
- dev_priv->gt_pm.rps.cur_freq));
+ intel_gpu_freq(rps, rps->cur_freq));
}
static ssize_t gt_boost_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
{
struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
+ struct intel_rps *rps = &dev_priv->gt.rps;
return snprintf(buf, PAGE_SIZE, "%d\n",
- intel_gpu_freq(dev_priv,
- dev_priv->gt_pm.rps.boost_freq));
+ intel_gpu_freq(rps, rps->boost_freq));
}
static ssize_t gt_boost_freq_mhz_store(struct device *kdev,
@@ -305,7 +305,7 @@ static ssize_t gt_boost_freq_mhz_store(struct device *kdev,
const char *buf, size_t count)
{
struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
+ struct intel_rps *rps = &dev_priv->gt.rps;
bool boost = false;
ssize_t ret;
u32 val;
@@ -315,7 +315,7 @@ static ssize_t gt_boost_freq_mhz_store(struct device *kdev,
return ret;
/* Validate against (static) hardware limits */
- val = intel_freq_opcode(dev_priv, val);
+ val = intel_freq_opcode(rps, val);
if (val < rps->min_freq || val > rps->max_freq)
return -EINVAL;
@@ -335,19 +335,19 @@ static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
struct device_attribute *attr, char *buf)
{
struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
+ struct intel_rps *rps = &dev_priv->gt.rps;
return snprintf(buf, PAGE_SIZE, "%d\n",
- intel_gpu_freq(dev_priv,
- dev_priv->gt_pm.rps.efficient_freq));
+ intel_gpu_freq(rps, rps->efficient_freq));
}
static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
{
struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
+ struct intel_rps *rps = &dev_priv->gt.rps;
return snprintf(buf, PAGE_SIZE, "%d\n",
- intel_gpu_freq(dev_priv,
- dev_priv->gt_pm.rps.max_freq_softlimit));
+ intel_gpu_freq(rps, rps->max_freq_softlimit));
}
static ssize_t gt_max_freq_mhz_store(struct device *kdev,
@@ -355,19 +355,17 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
const char *buf, size_t count)
{
struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
- intel_wakeref_t wakeref;
- u32 val;
+ struct intel_rps *rps = &dev_priv->gt.rps;
ssize_t ret;
+ u32 val;
ret = kstrtou32(buf, 0, &val);
if (ret)
return ret;
- wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
mutex_lock(&rps->lock);
- val = intel_freq_opcode(dev_priv, val);
+ val = intel_freq_opcode(rps, val);
if (val < rps->min_freq ||
val > rps->max_freq ||
val < rps->min_freq_softlimit) {
@@ -377,7 +375,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
if (val > rps->rp0_freq)
DRM_DEBUG("User requested overclocking to %d\n",
- intel_gpu_freq(dev_priv, val));
+ intel_gpu_freq(rps, val));
rps->max_freq_softlimit = val;
@@ -385,14 +383,15 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
rps->min_freq_softlimit,
rps->max_freq_softlimit);
- /* We still need *_set_rps to process the new max_delay and
+ /*
+ * We still need *_set_rps to process the new max_delay and
* update the interrupt limits and PMINTRMSK even though
- * frequency request may be unchanged. */
- ret = intel_set_rps(dev_priv, val);
+ * frequency request may be unchanged.
+ */
+ intel_rps_set(rps, val);
unlock:
mutex_unlock(&rps->lock);
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
return ret ?: count;
}
@@ -400,10 +399,10 @@ unlock:
static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
{
struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
+ struct intel_rps *rps = &dev_priv->gt.rps;
return snprintf(buf, PAGE_SIZE, "%d\n",
- intel_gpu_freq(dev_priv,
- dev_priv->gt_pm.rps.min_freq_softlimit));
+ intel_gpu_freq(rps, rps->min_freq_softlimit));
}
static ssize_t gt_min_freq_mhz_store(struct device *kdev,
@@ -411,19 +410,17 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
const char *buf, size_t count)
{
struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
- intel_wakeref_t wakeref;
- u32 val;
+ struct intel_rps *rps = &dev_priv->gt.rps;
ssize_t ret;
+ u32 val;
ret = kstrtou32(buf, 0, &val);
if (ret)
return ret;
- wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
mutex_lock(&rps->lock);
- val = intel_freq_opcode(dev_priv, val);
+ val = intel_freq_opcode(rps, val);
if (val < rps->min_freq ||
val > rps->max_freq ||
val > rps->max_freq_softlimit) {
@@ -437,14 +434,15 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
rps->min_freq_softlimit,
rps->max_freq_softlimit);
- /* We still need *_set_rps to process the new min_delay and
+ /*
+ * We still need *_set_rps to process the new min_delay and
* update the interrupt limits and PMINTRMSK even though
- * frequency request may be unchanged. */
- ret = intel_set_rps(dev_priv, val);
+ * frequency request may be unchanged.
+ */
+ intel_rps_set(rps, val);
unlock:
mutex_unlock(&rps->lock);
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
return ret ?: count;
}
@@ -466,15 +464,15 @@ static DEVICE_ATTR(gt_RPn_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
{
struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
+ struct intel_rps *rps = &dev_priv->gt.rps;
u32 val;
if (attr == &dev_attr_gt_RP0_freq_mhz)
- val = intel_gpu_freq(dev_priv, rps->rp0_freq);
+ val = intel_gpu_freq(rps, rps->rp0_freq);
else if (attr == &dev_attr_gt_RP1_freq_mhz)
- val = intel_gpu_freq(dev_priv, rps->rp1_freq);
+ val = intel_gpu_freq(rps, rps->rp1_freq);
else if (attr == &dev_attr_gt_RPn_freq_mhz)
- val = intel_gpu_freq(dev_priv, rps->min_freq);
+ val = intel_gpu_freq(rps, rps->min_freq);
else
BUG();
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 24f2944da09d..7ef7a1e1664c 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -665,7 +665,6 @@ TRACE_EVENT(i915_request_queue,
TP_STRUCT__entry(
__field(u32, dev)
- __field(u32, hw_id)
__field(u64, ctx)
__field(u16, class)
__field(u16, instance)
@@ -675,7 +674,6 @@ TRACE_EVENT(i915_request_queue,
TP_fast_assign(
__entry->dev = rq->i915->drm.primary->index;
- __entry->hw_id = rq->gem_context->hw_id;
__entry->class = rq->engine->uabi_class;
__entry->instance = rq->engine->uabi_instance;
__entry->ctx = rq->fence.context;
@@ -683,10 +681,9 @@ TRACE_EVENT(i915_request_queue,
__entry->flags = flags;
),
- TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, flags=0x%x",
+ TP_printk("dev=%u, engine=%u:%u, ctx=%llu, seqno=%u, flags=0x%x",
__entry->dev, __entry->class, __entry->instance,
- __entry->hw_id, __entry->ctx, __entry->seqno,
- __entry->flags)
+ __entry->ctx, __entry->seqno, __entry->flags)
);
DECLARE_EVENT_CLASS(i915_request,
@@ -695,7 +692,6 @@ DECLARE_EVENT_CLASS(i915_request,
TP_STRUCT__entry(
__field(u32, dev)
- __field(u32, hw_id)
__field(u64, ctx)
__field(u16, class)
__field(u16, instance)
@@ -704,16 +700,15 @@ DECLARE_EVENT_CLASS(i915_request,
TP_fast_assign(
__entry->dev = rq->i915->drm.primary->index;
- __entry->hw_id = rq->gem_context->hw_id;
__entry->class = rq->engine->uabi_class;
__entry->instance = rq->engine->uabi_instance;
__entry->ctx = rq->fence.context;
__entry->seqno = rq->fence.seqno;
),
- TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u",
+ TP_printk("dev=%u, engine=%u:%u, ctx=%llu, seqno=%u",
__entry->dev, __entry->class, __entry->instance,
- __entry->hw_id, __entry->ctx, __entry->seqno)
+ __entry->ctx, __entry->seqno)
);
DEFINE_EVENT(i915_request, i915_request_add,
@@ -738,7 +733,6 @@ TRACE_EVENT(i915_request_in,
TP_STRUCT__entry(
__field(u32, dev)
- __field(u32, hw_id)
__field(u64, ctx)
__field(u16, class)
__field(u16, instance)
@@ -749,7 +743,6 @@ TRACE_EVENT(i915_request_in,
TP_fast_assign(
__entry->dev = rq->i915->drm.primary->index;
- __entry->hw_id = rq->gem_context->hw_id;
__entry->class = rq->engine->uabi_class;
__entry->instance = rq->engine->uabi_instance;
__entry->ctx = rq->fence.context;
@@ -758,9 +751,9 @@ TRACE_EVENT(i915_request_in,
__entry->port = port;
),
- TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, prio=%u, port=%u",
+ TP_printk("dev=%u, engine=%u:%u, ctx=%llu, seqno=%u, prio=%u, port=%u",
__entry->dev, __entry->class, __entry->instance,
- __entry->hw_id, __entry->ctx, __entry->seqno,
+ __entry->ctx, __entry->seqno,
__entry->prio, __entry->port)
);
@@ -770,7 +763,6 @@ TRACE_EVENT(i915_request_out,
TP_STRUCT__entry(
__field(u32, dev)
- __field(u32, hw_id)
__field(u64, ctx)
__field(u16, class)
__field(u16, instance)
@@ -780,7 +772,6 @@ TRACE_EVENT(i915_request_out,
TP_fast_assign(
__entry->dev = rq->i915->drm.primary->index;
- __entry->hw_id = rq->gem_context->hw_id;
__entry->class = rq->engine->uabi_class;
__entry->instance = rq->engine->uabi_instance;
__entry->ctx = rq->fence.context;
@@ -788,10 +779,9 @@ TRACE_EVENT(i915_request_out,
__entry->completed = i915_request_completed(rq);
),
- TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, completed?=%u",
+ TP_printk("dev=%u, engine=%u:%u, ctx=%llu, seqno=%u, completed?=%u",
__entry->dev, __entry->class, __entry->instance,
- __entry->hw_id, __entry->ctx, __entry->seqno,
- __entry->completed)
+ __entry->ctx, __entry->seqno, __entry->completed)
);
#else
@@ -829,7 +819,6 @@ TRACE_EVENT(i915_request_wait_begin,
TP_STRUCT__entry(
__field(u32, dev)
- __field(u32, hw_id)
__field(u64, ctx)
__field(u16, class)
__field(u16, instance)
@@ -845,7 +834,6 @@ TRACE_EVENT(i915_request_wait_begin,
*/
TP_fast_assign(
__entry->dev = rq->i915->drm.primary->index;
- __entry->hw_id = rq->gem_context->hw_id;
__entry->class = rq->engine->uabi_class;
__entry->instance = rq->engine->uabi_instance;
__entry->ctx = rq->fence.context;
@@ -853,9 +841,9 @@ TRACE_EVENT(i915_request_wait_begin,
__entry->flags = flags;
),
- TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, flags=0x%x",
+ TP_printk("dev=%u, engine=%u:%u, ctx=%llu, seqno=%u, flags=0x%x",
__entry->dev, __entry->class, __entry->instance,
- __entry->hw_id, __entry->ctx, __entry->seqno,
+ __entry->ctx, __entry->seqno,
__entry->flags)
);
@@ -958,19 +946,17 @@ DECLARE_EVENT_CLASS(i915_context,
TP_STRUCT__entry(
__field(u32, dev)
__field(struct i915_gem_context *, ctx)
- __field(u32, hw_id)
__field(struct i915_address_space *, vm)
),
TP_fast_assign(
__entry->dev = ctx->i915->drm.primary->index;
__entry->ctx = ctx;
- __entry->hw_id = ctx->hw_id;
- __entry->vm = ctx->vm;
+ __entry->vm = rcu_access_pointer(ctx->vm);
),
- TP_printk("dev=%u, ctx=%p, ctx_vm=%p, hw_id=%u",
- __entry->dev, __entry->ctx, __entry->vm, __entry->hw_id)
+ TP_printk("dev=%u, ctx=%p, ctx_vm=%p",
+ __entry->dev, __entry->ctx, __entry->vm)
)
DEFINE_EVENT(i915_context, i915_context_create,
diff --git a/drivers/gpu/drm/i915/i915_utils.c b/drivers/gpu/drm/i915/i915_utils.c
index 16acdf7bdbe6..0348c6d0ef5f 100644
--- a/drivers/gpu/drm/i915/i915_utils.c
+++ b/drivers/gpu/drm/i915/i915_utils.c
@@ -54,25 +54,54 @@ __i915_printk(struct drm_i915_private *dev_priv, const char *level,
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
static unsigned int i915_probe_fail_count;
-int __i915_inject_load_error(struct drm_i915_private *i915, int err,
- const char *func, int line)
+int __i915_inject_probe_error(struct drm_i915_private *i915, int err,
+ const char *func, int line)
{
- if (i915_probe_fail_count >= i915_modparams.inject_load_failure)
+ if (i915_probe_fail_count >= i915_modparams.inject_probe_failure)
return 0;
- if (++i915_probe_fail_count < i915_modparams.inject_load_failure)
+ if (++i915_probe_fail_count < i915_modparams.inject_probe_failure)
return 0;
__i915_printk(i915, KERN_INFO,
"Injecting failure %d at checkpoint %u [%s:%d]\n",
- err, i915_modparams.inject_load_failure, func, line);
- i915_modparams.inject_load_failure = 0;
+ err, i915_modparams.inject_probe_failure, func, line);
+ i915_modparams.inject_probe_failure = 0;
return err;
}
bool i915_error_injected(void)
{
- return i915_probe_fail_count && !i915_modparams.inject_load_failure;
+ return i915_probe_fail_count && !i915_modparams.inject_probe_failure;
}
#endif
+
+void cancel_timer(struct timer_list *t)
+{
+ if (!READ_ONCE(t->expires))
+ return;
+
+ del_timer(t);
+ WRITE_ONCE(t->expires, 0);
+}
+
+void set_timer_ms(struct timer_list *t, unsigned long timeout)
+{
+ if (!timeout) {
+ cancel_timer(t);
+ return;
+ }
+
+ timeout = msecs_to_jiffies_timeout(timeout);
+
+ /*
+ * Paranoia to make sure the compiler computes the timeout before
+ * loading 'jiffies' as jiffies is volatile and may be updated in
+ * the background by a timer tick. All to reduce the complexity
+ * of the addition and reduce the risk of losing a jiffie.
+ */
+ barrier();
+
+ mod_timer(t, jiffies + timeout);
+}
diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h
index 562f756da421..04139ba1191e 100644
--- a/drivers/gpu/drm/i915/i915_utils.h
+++ b/drivers/gpu/drm/i915/i915_utils.h
@@ -32,6 +32,7 @@
#include <linux/workqueue.h>
struct drm_i915_private;
+struct timer_list;
#undef WARN_ON
/* Many gcc seem to no see through this and fall over :( */
@@ -60,20 +61,20 @@ __i915_printk(struct drm_i915_private *dev_priv, const char *level,
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
-int __i915_inject_load_error(struct drm_i915_private *i915, int err,
- const char *func, int line);
-#define i915_inject_load_error(_i915, _err) \
- __i915_inject_load_error((_i915), (_err), __func__, __LINE__)
+int __i915_inject_probe_error(struct drm_i915_private *i915, int err,
+ const char *func, int line);
+#define i915_inject_probe_error(_i915, _err) \
+ __i915_inject_probe_error((_i915), (_err), __func__, __LINE__)
bool i915_error_injected(void);
#else
-#define i915_inject_load_error(_i915, _err) 0
+#define i915_inject_probe_error(_i915, _err) 0
#define i915_error_injected() false
#endif
-#define i915_inject_probe_failure(i915) i915_inject_load_error((i915), -ENODEV)
+#define i915_inject_probe_failure(i915) i915_inject_probe_error((i915), -ENODEV)
#define i915_probe_error(i915, fmt, ...) \
__i915_printk(i915, i915_error_injected() ? KERN_DEBUG : KERN_ERR, \
@@ -421,4 +422,25 @@ static inline void add_taint_for_CI(unsigned int taint)
add_taint(taint, LOCKDEP_STILL_OK);
}
+void cancel_timer(struct timer_list *t);
+void set_timer_ms(struct timer_list *t, unsigned long timeout);
+
+static inline bool timer_expired(const struct timer_list *t)
+{
+ return READ_ONCE(t->expires) && !timer_pending(t);
+}
+
+/*
+ * This is a lookalike for IS_ENABLED() that takes a kconfig value,
+ * e.g. CONFIG_DRM_I915_SPIN_REQUEST, and evaluates whether it is non-zero
+ * i.e. whether the configuration is active. Wrapping up the config inside
+ * a boolean context prevents clang and smatch from complaining about potential
+ * issues in confusing logical-&& with bitwise-& for constants.
+ *
+ * Sadly IS_ENABLED() itself does not work with kconfig values.
+ *
+ * Returns 0 if @config is 0, 1 if set to any value.
+ */
+#define IS_ACTIVE(config) ((config) != 0)
+
#endif /* !__I915_UTILS_H */
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index e0e677b2a3a9..e5512f26e20a 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -32,6 +32,7 @@
#include "i915_drv.h"
#include "i915_globals.h"
+#include "i915_sw_fence_work.h"
#include "i915_trace.h"
#include "i915_vma.h"
@@ -90,6 +91,7 @@ static int __i915_vma_active(struct i915_active *ref)
return i915_vma_tryget(active_to_vma(ref)) ? 0 : -ENOENT;
}
+__i915_active_call
static void __i915_vma_retire(struct i915_active *ref)
{
i915_vma_put(active_to_vma(ref));
@@ -104,21 +106,21 @@ vma_create(struct drm_i915_gem_object *obj,
struct rb_node *rb, **p;
/* The aliasing_ppgtt should never be used directly! */
- GEM_BUG_ON(vm == &vm->i915->ggtt.alias->vm);
+ GEM_BUG_ON(vm == &vm->gt->ggtt->alias->vm);
vma = i915_vma_alloc();
if (vma == NULL)
return ERR_PTR(-ENOMEM);
- vma->vm = vm;
+ mutex_init(&vma->pages_mutex);
+ vma->vm = i915_vm_get(vm);
vma->ops = &vm->vma_ops;
vma->obj = obj;
vma->resv = obj->base.resv;
vma->size = obj->base.size;
vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
- i915_active_init(vm->i915, &vma->active,
- __i915_vma_active, __i915_vma_retire);
+ i915_active_init(&vma->active, __i915_vma_active, __i915_vma_retire);
/* Declare ourselves safe for use inside shrinkers */
if (IS_ENABLED(CONFIG_LOCKDEP)) {
@@ -171,7 +173,7 @@ vma_create(struct drm_i915_gem_object *obj,
i915_gem_object_get_stride(obj));
GEM_BUG_ON(!is_power_of_2(vma->fence_alignment));
- vma->flags |= I915_VMA_GGTT;
+ __set_bit(I915_VMA_GGTT_BIT, __i915_vma_flags(vma));
}
spin_lock(&obj->vma.lock);
@@ -218,10 +220,6 @@ vma_create(struct drm_i915_gem_object *obj,
spin_unlock(&obj->vma.lock);
- mutex_lock(&vm->mutex);
- list_add(&vma->vm_link, &vm->unbound_list);
- mutex_unlock(&vm->mutex);
-
return vma;
err_vma:
@@ -265,8 +263,6 @@ vma_lookup(struct drm_i915_gem_object *obj,
* Once created, the VMA is kept until either the object is freed, or the
* address space is closed.
*
- * Must be called with struct_mutex held.
- *
* Returns the vma, or an error pointer.
*/
struct i915_vma *
@@ -277,7 +273,7 @@ i915_vma_instance(struct drm_i915_gem_object *obj,
struct i915_vma *vma;
GEM_BUG_ON(view && !i915_is_ggtt(vm));
- GEM_BUG_ON(vm->closed);
+ GEM_BUG_ON(!atomic_read(&vm->open));
spin_lock(&obj->vma.lock);
vma = vma_lookup(obj, vm, view);
@@ -291,18 +287,63 @@ i915_vma_instance(struct drm_i915_gem_object *obj,
return vma;
}
+struct i915_vma_work {
+ struct dma_fence_work base;
+ struct i915_vma *vma;
+ enum i915_cache_level cache_level;
+ unsigned int flags;
+};
+
+static int __vma_bind(struct dma_fence_work *work)
+{
+ struct i915_vma_work *vw = container_of(work, typeof(*vw), base);
+ struct i915_vma *vma = vw->vma;
+ int err;
+
+ err = vma->ops->bind_vma(vma, vw->cache_level, vw->flags);
+ if (err)
+ atomic_or(I915_VMA_ERROR, &vma->flags);
+
+ if (vma->obj)
+ __i915_gem_object_unpin_pages(vma->obj);
+
+ return err;
+}
+
+static const struct dma_fence_work_ops bind_ops = {
+ .name = "bind",
+ .work = __vma_bind,
+};
+
+struct i915_vma_work *i915_vma_work(void)
+{
+ struct i915_vma_work *vw;
+
+ vw = kzalloc(sizeof(*vw), GFP_KERNEL);
+ if (!vw)
+ return NULL;
+
+ dma_fence_work_init(&vw->base, &bind_ops);
+ vw->base.dma.error = -EAGAIN; /* disable the worker by default */
+
+ return vw;
+}
+
/**
* i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space.
* @vma: VMA to map
* @cache_level: mapping cache level
* @flags: flags like global or local mapping
+ * @work: preallocated worker for allocating and binding the PTE
*
* DMA addresses are taken from the scatter-gather table of this object (or of
* this VMA in case of non-default GGTT views) and PTE entries set up.
* Note that DMA addresses are also the only part of the SG table we care about.
*/
-int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
- u32 flags)
+int i915_vma_bind(struct i915_vma *vma,
+ enum i915_cache_level cache_level,
+ u32 flags,
+ struct i915_vma_work *work)
{
u32 bind_flags;
u32 vma_flags;
@@ -319,13 +360,11 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
if (GEM_DEBUG_WARN_ON(!flags))
return -EINVAL;
- bind_flags = 0;
- if (flags & PIN_GLOBAL)
- bind_flags |= I915_VMA_GLOBAL_BIND;
- if (flags & PIN_USER)
- bind_flags |= I915_VMA_LOCAL_BIND;
+ bind_flags = flags;
+ bind_flags &= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
- vma_flags = vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
+ vma_flags = atomic_read(&vma->flags);
+ vma_flags &= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
if (flags & PIN_UPDATE)
bind_flags |= vma_flags;
else
@@ -336,11 +375,34 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
GEM_BUG_ON(!vma->pages);
trace_i915_vma_bind(vma, bind_flags);
- ret = vma->ops->bind_vma(vma, cache_level, bind_flags);
- if (ret)
- return ret;
+ if (work && (bind_flags & ~vma_flags) & vma->vm->bind_async_flags) {
+ work->vma = vma;
+ work->cache_level = cache_level;
+ work->flags = bind_flags | I915_VMA_ALLOC;
- vma->flags |= bind_flags;
+ /*
+ * Note we only want to chain up to the migration fence on
+ * the pages (not the object itself). As we don't track that,
+ * yet, we have to use the exclusive fence instead.
+ *
+ * Also note that we do not want to track the async vma as
+ * part of the obj->resv->excl_fence as it only affects
+ * execution and not content or object's backing store lifetime.
+ */
+ GEM_BUG_ON(i915_active_has_exclusive(&vma->active));
+ i915_active_set_exclusive(&vma->active, &work->base.dma);
+ work->base.dma.error = 0; /* enable the queue_work() */
+
+ if (vma->obj)
+ __i915_gem_object_pin_pages(vma->obj);
+ } else {
+ GEM_BUG_ON((bind_flags & ~vma_flags) & vma->vm->bind_async_flags);
+ ret = vma->ops->bind_vma(vma, cache_level, bind_flags);
+ if (ret)
+ return ret;
+ }
+
+ atomic_or(bind_flags, &vma->flags);
return 0;
}
@@ -350,18 +412,16 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
int err;
/* Access through the GTT requires the device to be awake. */
- assert_rpm_wakelock_held(&vma->vm->i915->runtime_pm);
-
- lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
- if (WARN_ON(!i915_vma_is_map_and_fenceable(vma))) {
+ assert_rpm_wakelock_held(vma->vm->gt->uncore->rpm);
+ if (GEM_WARN_ON(!i915_vma_is_map_and_fenceable(vma))) {
err = -ENODEV;
goto err;
}
GEM_BUG_ON(!i915_vma_is_ggtt(vma));
- GEM_BUG_ON((vma->flags & I915_VMA_GLOBAL_BIND) == 0);
+ GEM_BUG_ON(!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND));
- ptr = vma->iomap;
+ ptr = READ_ONCE(vma->iomap);
if (ptr == NULL) {
ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->iomap,
vma->node.start,
@@ -371,7 +431,10 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
goto err;
}
- vma->iomap = ptr;
+ if (unlikely(cmpxchg(&vma->iomap, NULL, ptr))) {
+ io_mapping_unmap(ptr);
+ ptr = vma->iomap;
+ }
}
__i915_vma_pin(vma);
@@ -391,18 +454,12 @@ err:
void i915_vma_flush_writes(struct i915_vma *vma)
{
- if (!i915_vma_has_ggtt_write(vma))
- return;
-
- intel_gt_flush_ggtt_writes(vma->vm->gt);
-
- i915_vma_unset_ggtt_write(vma);
+ if (i915_vma_unset_ggtt_write(vma))
+ intel_gt_flush_ggtt_writes(vma->vm->gt);
}
void i915_vma_unpin_iomap(struct i915_vma *vma)
{
- lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
-
GEM_BUG_ON(vma->iomap == NULL);
i915_vma_flush_writes(vma);
@@ -438,6 +495,9 @@ bool i915_vma_misplaced(const struct i915_vma *vma,
if (!drm_mm_node_allocated(&vma->node))
return false;
+ if (test_bit(I915_VMA_ERROR_BIT, __i915_vma_flags(vma)))
+ return true;
+
if (vma->node.size < size)
return true;
@@ -472,17 +532,12 @@ void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
mappable = vma->node.start + vma->fence_size <= i915_vm_to_ggtt(vma->vm)->mappable_end;
if (mappable && fenceable)
- vma->flags |= I915_VMA_CAN_FENCE;
+ set_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
else
- vma->flags &= ~I915_VMA_CAN_FENCE;
-}
-
-static bool color_differs(struct drm_mm_node *node, unsigned long color)
-{
- return node->allocated && node->color != color;
+ clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
}
-bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level)
+bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long color)
{
struct drm_mm_node *node = &vma->node;
struct drm_mm_node *other;
@@ -494,7 +549,7 @@ bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level)
* these constraints apply and set the drm_mm.color_adjust
* appropriately.
*/
- if (vma->vm->mm.color_adjust == NULL)
+ if (!i915_vm_has_cache_coloring(vma->vm))
return true;
/* Only valid to be called on an already inserted vma */
@@ -502,11 +557,13 @@ bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level)
GEM_BUG_ON(list_empty(&node->node_list));
other = list_prev_entry(node, node_list);
- if (color_differs(other, cache_level) && !drm_mm_hole_follows(other))
+ if (i915_node_color_differs(other, color) &&
+ !drm_mm_hole_follows(other))
return false;
other = list_next_entry(node, node_list);
- if (color_differs(other, cache_level) && !drm_mm_hole_follows(node))
+ if (i915_node_color_differs(other, color) &&
+ !drm_mm_hole_follows(node))
return false;
return true;
@@ -541,13 +598,12 @@ static void assert_bind_count(const struct drm_i915_gem_object *obj)
static int
i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
{
- struct drm_i915_private *dev_priv = vma->vm->i915;
- unsigned int cache_level;
+ unsigned long color;
u64 start, end;
int ret;
GEM_BUG_ON(i915_vma_is_closed(vma));
- GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
+ GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
size = max(size, vma->size);
@@ -567,7 +623,7 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
end = vma->vm->total;
if (flags & PIN_MAPPABLE)
- end = min_t(u64, end, dev_priv->ggtt.mappable_end);
+ end = min_t(u64, end, i915_vm_to_ggtt(vma->vm)->mappable_end);
if (flags & PIN_ZONE_4G)
end = min_t(u64, end, (1ULL << 32) - I915_GTT_PAGE_SIZE);
GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
@@ -583,35 +639,21 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
return -ENOSPC;
}
- if (vma->obj) {
- ret = i915_gem_object_pin_pages(vma->obj);
- if (ret)
- return ret;
-
- cache_level = vma->obj->cache_level;
- } else {
- cache_level = 0;
- }
-
- GEM_BUG_ON(vma->pages);
-
- ret = vma->ops->set_pages(vma);
- if (ret)
- goto err_unpin;
+ color = 0;
+ if (vma->obj && i915_vm_has_cache_coloring(vma->vm))
+ color = vma->obj->cache_level;
if (flags & PIN_OFFSET_FIXED) {
u64 offset = flags & PIN_OFFSET_MASK;
if (!IS_ALIGNED(offset, alignment) ||
- range_overflows(offset, size, end)) {
- ret = -EINVAL;
- goto err_clear;
- }
+ range_overflows(offset, size, end))
+ return -EINVAL;
ret = i915_gem_gtt_reserve(vma->vm, &vma->node,
- size, offset, cache_level,
+ size, offset, color,
flags);
if (ret)
- goto err_clear;
+ return ret;
} else {
/*
* We only support huge gtt pages through the 48b PPGTT,
@@ -647,116 +689,259 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
}
ret = i915_gem_gtt_insert(vma->vm, &vma->node,
- size, alignment, cache_level,
+ size, alignment, color,
start, end, flags);
if (ret)
- goto err_clear;
+ return ret;
GEM_BUG_ON(vma->node.start < start);
GEM_BUG_ON(vma->node.start + vma->node.size > end);
}
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
- GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, cache_level));
-
- mutex_lock(&vma->vm->mutex);
- list_move_tail(&vma->vm_link, &vma->vm->bound_list);
- mutex_unlock(&vma->vm->mutex);
+ GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, color));
if (vma->obj) {
- atomic_inc(&vma->obj->bind_count);
- assert_bind_count(vma->obj);
+ struct drm_i915_gem_object *obj = vma->obj;
+
+ atomic_inc(&obj->bind_count);
+ assert_bind_count(obj);
}
+ list_add_tail(&vma->vm_link, &vma->vm->bound_list);
return 0;
-
-err_clear:
- vma->ops->clear_pages(vma);
-err_unpin:
- if (vma->obj)
- i915_gem_object_unpin_pages(vma->obj);
- return ret;
}
static void
-i915_vma_remove(struct i915_vma *vma)
+i915_vma_detach(struct i915_vma *vma)
{
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
- GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
-
- vma->ops->clear_pages(vma);
-
- mutex_lock(&vma->vm->mutex);
- drm_mm_remove_node(&vma->node);
- list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
- mutex_unlock(&vma->vm->mutex);
+ GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
/*
- * Since the unbound list is global, only move to that list if
- * no more VMAs exist.
+ * And finally now the object is completely decoupled from this
+ * vma, we can drop its hold on the backing storage and allow
+ * it to be reaped by the shrinker.
*/
+ list_del(&vma->vm_link);
if (vma->obj) {
struct drm_i915_gem_object *obj = vma->obj;
+ assert_bind_count(obj);
atomic_dec(&obj->bind_count);
+ }
+}
- /*
- * And finally now the object is completely decoupled from this
- * vma, we can drop its hold on the backing storage and allow
- * it to be reaped by the shrinker.
- */
- i915_gem_object_unpin_pages(obj);
- assert_bind_count(obj);
+static bool try_qad_pin(struct i915_vma *vma, unsigned int flags)
+{
+ unsigned int bound;
+ bool pinned = true;
+
+ bound = atomic_read(&vma->flags);
+ do {
+ if (unlikely(flags & ~bound))
+ return false;
+
+ if (unlikely(bound & (I915_VMA_OVERFLOW | I915_VMA_ERROR)))
+ return false;
+
+ if (!(bound & I915_VMA_PIN_MASK))
+ goto unpinned;
+
+ GEM_BUG_ON(((bound + 1) & I915_VMA_PIN_MASK) == 0);
+ } while (!atomic_try_cmpxchg(&vma->flags, &bound, bound + 1));
+
+ return true;
+
+unpinned:
+ /*
+ * If pin_count==0, but we are bound, check under the lock to avoid
+ * racing with a concurrent i915_vma_unbind().
+ */
+ mutex_lock(&vma->vm->mutex);
+ do {
+ if (unlikely(bound & (I915_VMA_OVERFLOW | I915_VMA_ERROR))) {
+ pinned = false;
+ break;
+ }
+
+ if (unlikely(flags & ~bound)) {
+ pinned = false;
+ break;
+ }
+ } while (!atomic_try_cmpxchg(&vma->flags, &bound, bound + 1));
+ mutex_unlock(&vma->vm->mutex);
+
+ return pinned;
+}
+
+static int vma_get_pages(struct i915_vma *vma)
+{
+ int err = 0;
+
+ if (atomic_add_unless(&vma->pages_count, 1, 0))
+ return 0;
+
+ /* Allocations ahoy! */
+ if (mutex_lock_interruptible(&vma->pages_mutex))
+ return -EINTR;
+
+ if (!atomic_read(&vma->pages_count)) {
+ if (vma->obj) {
+ err = i915_gem_object_pin_pages(vma->obj);
+ if (err)
+ goto unlock;
+ }
+
+ err = vma->ops->set_pages(vma);
+ if (err) {
+ if (vma->obj)
+ i915_gem_object_unpin_pages(vma->obj);
+ goto unlock;
+ }
}
+ atomic_inc(&vma->pages_count);
+
+unlock:
+ mutex_unlock(&vma->pages_mutex);
+
+ return err;
}
-int __i915_vma_do_pin(struct i915_vma *vma,
- u64 size, u64 alignment, u64 flags)
+static void __vma_put_pages(struct i915_vma *vma, unsigned int count)
{
- const unsigned int bound = vma->flags;
- int ret;
+ /* We allocate under vma_get_pages, so beware the shrinker */
+ mutex_lock_nested(&vma->pages_mutex, SINGLE_DEPTH_NESTING);
+ GEM_BUG_ON(atomic_read(&vma->pages_count) < count);
+ if (atomic_sub_return(count, &vma->pages_count) == 0) {
+ vma->ops->clear_pages(vma);
+ GEM_BUG_ON(vma->pages);
+ if (vma->obj)
+ i915_gem_object_unpin_pages(vma->obj);
+ }
+ mutex_unlock(&vma->pages_mutex);
+}
- lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
- GEM_BUG_ON((flags & (PIN_GLOBAL | PIN_USER)) == 0);
- GEM_BUG_ON((flags & PIN_GLOBAL) && !i915_vma_is_ggtt(vma));
+static void vma_put_pages(struct i915_vma *vma)
+{
+ if (atomic_add_unless(&vma->pages_count, -1, 1))
+ return;
- if (WARN_ON(bound & I915_VMA_PIN_OVERFLOW)) {
- ret = -EBUSY;
- goto err_unpin;
+ __vma_put_pages(vma, 1);
+}
+
+static void vma_unbind_pages(struct i915_vma *vma)
+{
+ unsigned int count;
+
+ lockdep_assert_held(&vma->vm->mutex);
+
+ /* The upper portion of pages_count is the number of bindings */
+ count = atomic_read(&vma->pages_count);
+ count >>= I915_VMA_PAGES_BIAS;
+ GEM_BUG_ON(!count);
+
+ __vma_put_pages(vma, count | count << I915_VMA_PAGES_BIAS);
+}
+
+int i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
+{
+ struct i915_vma_work *work = NULL;
+ unsigned int bound;
+ int err;
+
+ BUILD_BUG_ON(PIN_GLOBAL != I915_VMA_GLOBAL_BIND);
+ BUILD_BUG_ON(PIN_USER != I915_VMA_LOCAL_BIND);
+
+ GEM_BUG_ON(flags & PIN_UPDATE);
+ GEM_BUG_ON(!(flags & (PIN_USER | PIN_GLOBAL)));
+
+ /* First try and grab the pin without rebinding the vma */
+ if (try_qad_pin(vma, flags & I915_VMA_BIND_MASK))
+ return 0;
+
+ err = vma_get_pages(vma);
+ if (err)
+ return err;
+
+ if (flags & vma->vm->bind_async_flags) {
+ work = i915_vma_work();
+ if (!work) {
+ err = -ENOMEM;
+ goto err_pages;
+ }
}
- if ((bound & I915_VMA_BIND_MASK) == 0) {
- ret = i915_vma_insert(vma, size, alignment, flags);
- if (ret)
- goto err_unpin;
+ /* No more allocations allowed once we hold vm->mutex */
+ err = mutex_lock_interruptible(&vma->vm->mutex);
+ if (err)
+ goto err_fence;
+
+ bound = atomic_read(&vma->flags);
+ if (unlikely(bound & I915_VMA_ERROR)) {
+ err = -ENOMEM;
+ goto err_unlock;
}
- GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
- ret = i915_vma_bind(vma, vma->obj ? vma->obj->cache_level : 0, flags);
- if (ret)
- goto err_remove;
+ if (unlikely(!((bound + 1) & I915_VMA_PIN_MASK))) {
+ err = -EAGAIN; /* pins are meant to be fairly temporary */
+ goto err_unlock;
+ }
- GEM_BUG_ON((vma->flags & I915_VMA_BIND_MASK) == 0);
+ if (unlikely(!(flags & ~bound & I915_VMA_BIND_MASK))) {
+ __i915_vma_pin(vma);
+ goto err_unlock;
+ }
- if ((bound ^ vma->flags) & I915_VMA_GLOBAL_BIND)
- __i915_vma_set_map_and_fenceable(vma);
+ err = i915_active_acquire(&vma->active);
+ if (err)
+ goto err_unlock;
+
+ if (!(bound & I915_VMA_BIND_MASK)) {
+ err = i915_vma_insert(vma, size, alignment, flags);
+ if (err)
+ goto err_active;
+
+ if (i915_is_ggtt(vma->vm))
+ __i915_vma_set_map_and_fenceable(vma);
+ }
+
+ GEM_BUG_ON(!vma->pages);
+ err = i915_vma_bind(vma,
+ vma->obj ? vma->obj->cache_level : 0,
+ flags, work);
+ if (err)
+ goto err_remove;
+
+ /* There should only be at most 2 active bindings (user, global) */
+ GEM_BUG_ON(bound + I915_VMA_PAGES_ACTIVE < bound);
+ atomic_add(I915_VMA_PAGES_ACTIVE, &vma->pages_count);
+ list_move_tail(&vma->vm_link, &vma->vm->bound_list);
+ __i915_vma_pin(vma);
+ GEM_BUG_ON(!i915_vma_is_pinned(vma));
+ GEM_BUG_ON(!i915_vma_is_bound(vma, flags));
GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
- return 0;
err_remove:
- if ((bound & I915_VMA_BIND_MASK) == 0) {
- i915_vma_remove(vma);
- GEM_BUG_ON(vma->pages);
- GEM_BUG_ON(vma->flags & I915_VMA_BIND_MASK);
+ if (!i915_vma_is_bound(vma, I915_VMA_BIND_MASK)) {
+ i915_vma_detach(vma);
+ drm_mm_remove_node(&vma->node);
}
-err_unpin:
- __i915_vma_unpin(vma);
- return ret;
+err_active:
+ i915_active_release(&vma->active);
+err_unlock:
+ mutex_unlock(&vma->vm->mutex);
+err_fence:
+ if (work)
+ dma_fence_work_commit(&work->base);
+err_pages:
+ vma_put_pages(vma);
+ return err;
}
void i915_vma_close(struct i915_vma *vma)
{
- struct drm_i915_private *i915 = vma->vm->i915;
+ struct intel_gt *gt = vma->vm->gt;
unsigned long flags;
GEM_BUG_ON(i915_vma_is_closed(vma));
@@ -773,79 +958,87 @@ void i915_vma_close(struct i915_vma *vma)
* causing us to rebind the VMA once more. This ends up being a lot
* of wasted work for the steady state.
*/
- spin_lock_irqsave(&i915->gt.closed_lock, flags);
- list_add(&vma->closed_link, &i915->gt.closed_vma);
- spin_unlock_irqrestore(&i915->gt.closed_lock, flags);
+ spin_lock_irqsave(&gt->closed_lock, flags);
+ list_add(&vma->closed_link, &gt->closed_vma);
+ spin_unlock_irqrestore(&gt->closed_lock, flags);
}
static void __i915_vma_remove_closed(struct i915_vma *vma)
{
- struct drm_i915_private *i915 = vma->vm->i915;
-
- if (!i915_vma_is_closed(vma))
- return;
+ struct intel_gt *gt = vma->vm->gt;
- spin_lock_irq(&i915->gt.closed_lock);
+ spin_lock_irq(&gt->closed_lock);
list_del_init(&vma->closed_link);
- spin_unlock_irq(&i915->gt.closed_lock);
+ spin_unlock_irq(&gt->closed_lock);
}
void i915_vma_reopen(struct i915_vma *vma)
{
- __i915_vma_remove_closed(vma);
+ if (i915_vma_is_closed(vma))
+ __i915_vma_remove_closed(vma);
}
-static void __i915_vma_destroy(struct i915_vma *vma)
+void i915_vma_destroy(struct i915_vma *vma)
{
- GEM_BUG_ON(vma->node.allocated);
- GEM_BUG_ON(vma->fence);
-
- mutex_lock(&vma->vm->mutex);
- list_del(&vma->vm_link);
- mutex_unlock(&vma->vm->mutex);
+ if (drm_mm_node_allocated(&vma->node)) {
+ mutex_lock(&vma->vm->mutex);
+ atomic_and(~I915_VMA_PIN_MASK, &vma->flags);
+ WARN_ON(__i915_vma_unbind(vma));
+ mutex_unlock(&vma->vm->mutex);
+ GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
+ }
+ GEM_BUG_ON(i915_vma_is_active(vma));
if (vma->obj) {
struct drm_i915_gem_object *obj = vma->obj;
spin_lock(&obj->vma.lock);
list_del(&vma->obj_link);
- rb_erase(&vma->obj_node, &vma->obj->vma.tree);
+ rb_erase(&vma->obj_node, &obj->vma.tree);
spin_unlock(&obj->vma.lock);
}
- i915_active_fini(&vma->active);
+ __i915_vma_remove_closed(vma);
+ i915_vm_put(vma->vm);
+ i915_active_fini(&vma->active);
i915_vma_free(vma);
}
-void i915_vma_destroy(struct i915_vma *vma)
+void i915_vma_parked(struct intel_gt *gt)
{
- lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
+ struct i915_vma *vma, *next;
- GEM_BUG_ON(i915_vma_is_pinned(vma));
+ spin_lock_irq(&gt->closed_lock);
+ list_for_each_entry_safe(vma, next, &gt->closed_vma, closed_link) {
+ struct drm_i915_gem_object *obj = vma->obj;
+ struct i915_address_space *vm = vma->vm;
- __i915_vma_remove_closed(vma);
+ /* XXX All to avoid keeping a reference on i915_vma itself */
- WARN_ON(i915_vma_unbind(vma));
- GEM_BUG_ON(i915_vma_is_active(vma));
+ if (!kref_get_unless_zero(&obj->base.refcount))
+ continue;
- __i915_vma_destroy(vma);
-}
+ if (!i915_vm_tryopen(vm)) {
+ i915_gem_object_put(obj);
+ obj = NULL;
+ }
-void i915_vma_parked(struct drm_i915_private *i915)
-{
- struct i915_vma *vma, *next;
+ spin_unlock_irq(&gt->closed_lock);
- spin_lock_irq(&i915->gt.closed_lock);
- list_for_each_entry_safe(vma, next, &i915->gt.closed_vma, closed_link) {
- list_del_init(&vma->closed_link);
- spin_unlock_irq(&i915->gt.closed_lock);
+ if (obj) {
+ i915_vma_destroy(vma);
+ i915_gem_object_put(obj);
+ }
- i915_vma_destroy(vma);
+ i915_vm_close(vm);
- spin_lock_irq(&i915->gt.closed_lock);
+ /* Restart after dropping lock */
+ spin_lock_irq(&gt->closed_lock);
+ next = list_first_entry(&gt->closed_vma,
+ typeof(*next), closed_link);
}
- spin_unlock_irq(&i915->gt.closed_lock);
+ spin_unlock_irq(&gt->closed_lock);
}
static void __i915_vma_iounmap(struct i915_vma *vma)
@@ -883,6 +1076,20 @@ void i915_vma_revoke_mmap(struct i915_vma *vma)
list_del(&vma->obj->userfault_link);
}
+int __i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq)
+{
+ int err;
+
+ GEM_BUG_ON(!i915_vma_is_pinned(vma));
+
+ /* Wait for the vma to be bound before we start! */
+ err = i915_request_await_active(rq, &vma->active);
+ if (err)
+ return err;
+
+ return i915_active_add_request(&vma->active, rq);
+}
+
int i915_vma_move_to_active(struct i915_vma *vma,
struct i915_request *rq,
unsigned int flags)
@@ -890,27 +1097,15 @@ int i915_vma_move_to_active(struct i915_vma *vma,
struct drm_i915_gem_object *obj = vma->obj;
int err;
- assert_vma_held(vma);
assert_object_held(obj);
- GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
- /*
- * Add a reference if we're newly entering the active list.
- * The order in which we add operations to the retirement queue is
- * vital here: mark_active adds to the start of the callback list,
- * such that subsequent callbacks are called first. Therefore we
- * add the active reference first and queue for it to be dropped
- * *last*.
- */
- err = i915_active_ref(&vma->active, rq->timeline, rq);
+ err = __i915_vma_move_to_active(vma, rq);
if (unlikely(err))
return err;
if (flags & EXEC_OBJECT_WRITE) {
if (intel_frontbuffer_invalidate(obj->frontbuffer, ORIGIN_CS))
- i915_active_ref(&obj->frontbuffer->write,
- rq->timeline,
- rq);
+ i915_active_add_request(&obj->frontbuffer->write, rq);
dma_resv_add_excl_fence(vma->resv, &rq->fence);
obj->write_domain = I915_GEM_DOMAIN_RENDER;
@@ -930,44 +1125,31 @@ int i915_vma_move_to_active(struct i915_vma *vma,
return 0;
}
-int i915_vma_unbind(struct i915_vma *vma)
+int __i915_vma_unbind(struct i915_vma *vma)
{
int ret;
- lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
+ lockdep_assert_held(&vma->vm->mutex);
/*
* First wait upon any activity as retiring the request may
* have side-effects such as unpinning or even unbinding this vma.
+ *
+ * XXX Actually waiting under the vm->mutex is a hinderance and
+ * should be pipelined wherever possible. In cases where that is
+ * unavoidable, we should lift the wait to before the mutex.
*/
- might_sleep();
- if (i915_vma_is_active(vma)) {
- /*
- * When a closed VMA is retired, it is unbound - eek.
- * In order to prevent it from being recursively closed,
- * take a pin on the vma so that the second unbind is
- * aborted.
- *
- * Even more scary is that the retire callback may free
- * the object (last active vma). To prevent the explosion
- * we defer the actual object free to a worker that can
- * only proceed once it acquires the struct_mutex (which
- * we currently hold, therefore it cannot free this object
- * before we are finished).
- */
- __i915_vma_pin(vma);
- ret = i915_active_wait(&vma->active);
- __i915_vma_unpin(vma);
- if (ret)
- return ret;
- }
- GEM_BUG_ON(i915_vma_is_active(vma));
+ ret = i915_vma_sync(vma);
+ if (ret)
+ return ret;
+ GEM_BUG_ON(i915_vma_is_active(vma));
if (i915_vma_is_pinned(vma)) {
vma_print_allocator(vma, "is pinned");
return -EBUSY;
}
+ GEM_BUG_ON(i915_vma_is_active(vma));
if (!drm_mm_node_allocated(&vma->node))
return 0;
@@ -982,34 +1164,47 @@ int i915_vma_unbind(struct i915_vma *vma)
GEM_BUG_ON(i915_vma_has_ggtt_write(vma));
/* release the fence reg _after_ flushing */
- mutex_lock(&vma->vm->mutex);
ret = i915_vma_revoke_fence(vma);
- mutex_unlock(&vma->vm->mutex);
if (ret)
return ret;
/* Force a pagefault for domain tracking on next user access */
- mutex_lock(&vma->vm->mutex);
i915_vma_revoke_mmap(vma);
- mutex_unlock(&vma->vm->mutex);
__i915_vma_iounmap(vma);
- vma->flags &= ~I915_VMA_CAN_FENCE;
+ clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
}
GEM_BUG_ON(vma->fence);
GEM_BUG_ON(i915_vma_has_userfault(vma));
- if (likely(!vma->vm->closed)) {
+ if (likely(atomic_read(&vma->vm->open))) {
trace_i915_vma_unbind(vma);
vma->ops->unbind_vma(vma);
}
- vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
+ atomic_and(~(I915_VMA_BIND_MASK | I915_VMA_ERROR), &vma->flags);
- i915_vma_remove(vma);
+ i915_vma_detach(vma);
+ vma_unbind_pages(vma);
+ drm_mm_remove_node(&vma->node); /* pairs with i915_vma_destroy() */
return 0;
}
+int i915_vma_unbind(struct i915_vma *vma)
+{
+ struct i915_address_space *vm = vma->vm;
+ int err;
+
+ err = mutex_lock_interruptible(&vm->mutex);
+ if (err)
+ return err;
+
+ err = __i915_vma_unbind(vma);
+ mutex_unlock(&vm->mutex);
+
+ return err;
+}
+
struct i915_vma *i915_vma_make_unshrinkable(struct i915_vma *vma)
{
i915_gem_object_make_unshrinkable(vma->obj);
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
index 889fc7cb910a..465932813bc5 100644
--- a/drivers/gpu/drm/i915/i915_vma.h
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -72,7 +72,7 @@ struct i915_vma {
* that exist in the ctx->handle_vmas LUT for this vma.
*/
atomic_t open_count;
- unsigned long flags;
+ atomic_t flags;
/**
* How many users have pinned this object in GTT space.
*
@@ -96,22 +96,41 @@ struct i915_vma {
* exclusive cachelines of a single page, so a maximum of 64 possible
* users.
*/
-#define I915_VMA_PIN_MASK 0xff
-#define I915_VMA_PIN_OVERFLOW BIT(8)
+#define I915_VMA_PIN_MASK 0x3ff
+#define I915_VMA_OVERFLOW 0x200
/** Flags and address space this VMA is bound to */
-#define I915_VMA_GLOBAL_BIND BIT(9)
-#define I915_VMA_LOCAL_BIND BIT(10)
-#define I915_VMA_BIND_MASK (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND | I915_VMA_PIN_OVERFLOW)
+#define I915_VMA_GLOBAL_BIND_BIT 10
+#define I915_VMA_LOCAL_BIND_BIT 11
-#define I915_VMA_GGTT BIT(11)
-#define I915_VMA_CAN_FENCE BIT(12)
-#define I915_VMA_USERFAULT_BIT 13
-#define I915_VMA_USERFAULT BIT(I915_VMA_USERFAULT_BIT)
-#define I915_VMA_GGTT_WRITE BIT(14)
+#define I915_VMA_GLOBAL_BIND ((int)BIT(I915_VMA_GLOBAL_BIND_BIT))
+#define I915_VMA_LOCAL_BIND ((int)BIT(I915_VMA_LOCAL_BIND_BIT))
+
+#define I915_VMA_BIND_MASK (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND)
+
+#define I915_VMA_ALLOC_BIT 12
+#define I915_VMA_ALLOC ((int)BIT(I915_VMA_ALLOC_BIT))
+
+#define I915_VMA_ERROR_BIT 13
+#define I915_VMA_ERROR ((int)BIT(I915_VMA_ERROR_BIT))
+
+#define I915_VMA_GGTT_BIT 14
+#define I915_VMA_CAN_FENCE_BIT 15
+#define I915_VMA_USERFAULT_BIT 16
+#define I915_VMA_GGTT_WRITE_BIT 17
+
+#define I915_VMA_GGTT ((int)BIT(I915_VMA_GGTT_BIT))
+#define I915_VMA_CAN_FENCE ((int)BIT(I915_VMA_CAN_FENCE_BIT))
+#define I915_VMA_USERFAULT ((int)BIT(I915_VMA_USERFAULT_BIT))
+#define I915_VMA_GGTT_WRITE ((int)BIT(I915_VMA_GGTT_WRITE_BIT))
struct i915_active active;
+#define I915_VMA_PAGES_BIAS 24
+#define I915_VMA_PAGES_ACTIVE (BIT(24) | 1)
+ atomic_t pages_count; /* number of active binds to the pages */
+ struct mutex pages_mutex; /* protect acquire/release of backing pages */
+
/**
* Support different GGTT views into the same object.
* This means there can be multiple VMA mappings per object and per VM.
@@ -158,52 +177,57 @@ static inline bool i915_vma_is_active(const struct i915_vma *vma)
return !i915_active_is_idle(&vma->active);
}
+int __must_check __i915_vma_move_to_active(struct i915_vma *vma,
+ struct i915_request *rq);
int __must_check i915_vma_move_to_active(struct i915_vma *vma,
struct i915_request *rq,
unsigned int flags);
+#define __i915_vma_flags(v) ((unsigned long *)&(v)->flags.counter)
+
static inline bool i915_vma_is_ggtt(const struct i915_vma *vma)
{
- return vma->flags & I915_VMA_GGTT;
+ return test_bit(I915_VMA_GGTT_BIT, __i915_vma_flags(vma));
}
static inline bool i915_vma_has_ggtt_write(const struct i915_vma *vma)
{
- return vma->flags & I915_VMA_GGTT_WRITE;
+ return test_bit(I915_VMA_GGTT_WRITE_BIT, __i915_vma_flags(vma));
}
static inline void i915_vma_set_ggtt_write(struct i915_vma *vma)
{
GEM_BUG_ON(!i915_vma_is_ggtt(vma));
- vma->flags |= I915_VMA_GGTT_WRITE;
+ set_bit(I915_VMA_GGTT_WRITE_BIT, __i915_vma_flags(vma));
}
-static inline void i915_vma_unset_ggtt_write(struct i915_vma *vma)
+static inline bool i915_vma_unset_ggtt_write(struct i915_vma *vma)
{
- vma->flags &= ~I915_VMA_GGTT_WRITE;
+ return test_and_clear_bit(I915_VMA_GGTT_WRITE_BIT,
+ __i915_vma_flags(vma));
}
void i915_vma_flush_writes(struct i915_vma *vma);
static inline bool i915_vma_is_map_and_fenceable(const struct i915_vma *vma)
{
- return vma->flags & I915_VMA_CAN_FENCE;
+ return test_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
}
static inline bool i915_vma_set_userfault(struct i915_vma *vma)
{
GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
- return __test_and_set_bit(I915_VMA_USERFAULT_BIT, &vma->flags);
+ return test_and_set_bit(I915_VMA_USERFAULT_BIT, __i915_vma_flags(vma));
}
static inline void i915_vma_unset_userfault(struct i915_vma *vma)
{
- return __clear_bit(I915_VMA_USERFAULT_BIT, &vma->flags);
+ return clear_bit(I915_VMA_USERFAULT_BIT, __i915_vma_flags(vma));
}
static inline bool i915_vma_has_userfault(const struct i915_vma *vma)
{
- return test_bit(I915_VMA_USERFAULT_BIT, &vma->flags);
+ return test_bit(I915_VMA_USERFAULT_BIT, __i915_vma_flags(vma));
}
static inline bool i915_vma_is_closed(const struct i915_vma *vma)
@@ -214,7 +238,7 @@ static inline bool i915_vma_is_closed(const struct i915_vma *vma)
static inline u32 i915_ggtt_offset(const struct i915_vma *vma)
{
GEM_BUG_ON(!i915_vma_is_ggtt(vma));
- GEM_BUG_ON(!vma->node.allocated);
+ GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
GEM_BUG_ON(upper_32_bits(vma->node.start));
GEM_BUG_ON(upper_32_bits(vma->node.start + vma->node.size - 1));
return lower_32_bits(vma->node.start);
@@ -293,13 +317,18 @@ i915_vma_compare(struct i915_vma *vma,
return memcmp(&vma->ggtt_view.partial, &view->partial, view->type);
}
-int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
- u32 flags);
-bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level);
+struct i915_vma_work *i915_vma_work(void);
+int i915_vma_bind(struct i915_vma *vma,
+ enum i915_cache_level cache_level,
+ u32 flags,
+ struct i915_vma_work *work);
+
+bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long color);
bool i915_vma_misplaced(const struct i915_vma *vma,
u64 size, u64 alignment, u64 flags);
void __i915_vma_set_map_and_fenceable(struct i915_vma *vma);
void i915_vma_revoke_mmap(struct i915_vma *vma);
+int __i915_vma_unbind(struct i915_vma *vma);
int __must_check i915_vma_unbind(struct i915_vma *vma);
void i915_vma_unlink_ctx(struct i915_vma *vma);
void i915_vma_close(struct i915_vma *vma);
@@ -318,30 +347,12 @@ static inline void i915_vma_unlock(struct i915_vma *vma)
dma_resv_unlock(vma->resv);
}
-int __i915_vma_do_pin(struct i915_vma *vma,
- u64 size, u64 alignment, u64 flags);
-static inline int __must_check
-i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
-{
- BUILD_BUG_ON(PIN_MBZ != I915_VMA_PIN_OVERFLOW);
- BUILD_BUG_ON(PIN_GLOBAL != I915_VMA_GLOBAL_BIND);
- BUILD_BUG_ON(PIN_USER != I915_VMA_LOCAL_BIND);
-
- /* Pin early to prevent the shrinker/eviction logic from destroying
- * our vma as we insert and bind.
- */
- if (likely(((++vma->flags ^ flags) & I915_VMA_BIND_MASK) == 0)) {
- GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
- GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
- return 0;
- }
-
- return __i915_vma_do_pin(vma, size, alignment, flags);
-}
+int __must_check
+i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags);
static inline int i915_vma_pin_count(const struct i915_vma *vma)
{
- return vma->flags & I915_VMA_PIN_MASK;
+ return atomic_read(&vma->flags) & I915_VMA_PIN_MASK;
}
static inline bool i915_vma_is_pinned(const struct i915_vma *vma)
@@ -351,18 +362,18 @@ static inline bool i915_vma_is_pinned(const struct i915_vma *vma)
static inline void __i915_vma_pin(struct i915_vma *vma)
{
- vma->flags++;
- GEM_BUG_ON(vma->flags & I915_VMA_PIN_OVERFLOW);
+ atomic_inc(&vma->flags);
+ GEM_BUG_ON(!i915_vma_is_pinned(vma));
}
static inline void __i915_vma_unpin(struct i915_vma *vma)
{
- vma->flags--;
+ GEM_BUG_ON(!i915_vma_is_pinned(vma));
+ atomic_dec(&vma->flags);
}
static inline void i915_vma_unpin(struct i915_vma *vma)
{
- GEM_BUG_ON(!i915_vma_is_pinned(vma));
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
__i915_vma_unpin(vma);
}
@@ -370,7 +381,13 @@ static inline void i915_vma_unpin(struct i915_vma *vma)
static inline bool i915_vma_is_bound(const struct i915_vma *vma,
unsigned int where)
{
- return vma->flags & where;
+ return atomic_read(&vma->flags) & where;
+}
+
+static inline bool i915_node_color_differs(const struct drm_mm_node *node,
+ unsigned long color)
+{
+ return drm_mm_node_allocated(node) && node->color != color;
}
/**
@@ -382,8 +399,6 @@ static inline bool i915_vma_is_bound(const struct i915_vma *vma,
* the caller must call i915_vma_unpin_iomap to relinquish the pinning
* after the iomapping is no longer required.
*
- * Callers must hold the struct_mutex.
- *
* Returns a valid iomapped pointer or ERR_PTR.
*/
void __iomem *i915_vma_pin_iomap(struct i915_vma *vma);
@@ -395,8 +410,8 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma);
*
* Unpins the previously iomapped VMA from i915_vma_pin_iomap().
*
- * Callers must hold the struct_mutex. This function is only valid to be
- * called on a VMA previously iomapped by the caller with i915_vma_pin_iomap().
+ * This function is only valid to be called on a VMA previously
+ * iomapped by the caller with i915_vma_pin_iomap().
*/
void i915_vma_unpin_iomap(struct i915_vma *vma);
@@ -424,6 +439,8 @@ static inline struct page *i915_vma_first_page(struct i915_vma *vma)
int __must_check i915_vma_pin_fence(struct i915_vma *vma);
int __must_check i915_vma_revoke_fence(struct i915_vma *vma);
+int __i915_vma_pin_fence(struct i915_vma *vma);
+
static inline void __i915_vma_unpin_fence(struct i915_vma *vma)
{
GEM_BUG_ON(atomic_read(&vma->fence->pin_count) <= 0);
@@ -441,12 +458,11 @@ static inline void __i915_vma_unpin_fence(struct i915_vma *vma)
static inline void
i915_vma_unpin_fence(struct i915_vma *vma)
{
- /* lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); */
if (vma->fence)
__i915_vma_unpin_fence(vma);
}
-void i915_vma_parked(struct drm_i915_private *i915);
+void i915_vma_parked(struct intel_gt *gt);
#define for_each_until(cond) if (cond) break; else
@@ -470,4 +486,10 @@ struct i915_vma *i915_vma_make_unshrinkable(struct i915_vma *vma);
void i915_vma_make_shrinkable(struct i915_vma *vma);
void i915_vma_make_purgeable(struct i915_vma *vma);
+static inline int i915_vma_sync(struct i915_vma *vma)
+{
+ /* Wait for the asynchronous bindings and pending GPU reads */
+ return i915_active_wait(&vma->active);
+}
+
#endif
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c
index 546577e39b4e..09870a31b4f0 100644
--- a/drivers/gpu/drm/i915/intel_csr.c
+++ b/drivers/gpu/drm/i915/intel_csr.c
@@ -44,8 +44,8 @@
#define TGL_CSR_MAX_FW_SIZE 0x6000
MODULE_FIRMWARE(TGL_CSR_PATH);
-#define ICL_CSR_PATH "i915/icl_dmc_ver1_07.bin"
-#define ICL_CSR_VERSION_REQUIRED CSR_VERSION(1, 7)
+#define ICL_CSR_PATH "i915/icl_dmc_ver1_09.bin"
+#define ICL_CSR_VERSION_REQUIRED CSR_VERSION(1, 9)
#define ICL_CSR_MAX_FW_SIZE 0x6000
MODULE_FIRMWARE(ICL_CSR_PATH);
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index d0ed44d33484..a5b571364cf6 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -93,9 +93,9 @@ static void sseu_dump(const struct sseu_dev_info *sseu, struct drm_printer *p)
hweight8(sseu->slice_mask), sseu->slice_mask);
drm_printf(p, "subslice total: %u\n", intel_sseu_subslice_total(sseu));
for (s = 0; s < sseu->max_slices; s++) {
- drm_printf(p, "slice%d: %u subslices, mask=%04x\n",
+ drm_printf(p, "slice%d: %u subslices, mask=%08x\n",
s, intel_sseu_subslices_per_slice(sseu, s),
- sseu->subslice_mask[s]);
+ intel_sseu_get_subslices(sseu, s));
}
drm_printf(p, "EU total: %u\n", sseu->eu_total);
drm_printf(p, "EU per subslice: %u\n", sseu->eu_per_subslice);
@@ -118,10 +118,9 @@ void intel_device_info_dump_runtime(const struct intel_runtime_info *info,
static int sseu_eu_idx(const struct sseu_dev_info *sseu, int slice,
int subslice)
{
- int subslice_stride = GEN_SSEU_STRIDE(sseu->max_eus_per_subslice);
- int slice_stride = sseu->max_subslices * subslice_stride;
+ int slice_stride = sseu->max_subslices * sseu->eu_stride;
- return slice * slice_stride + subslice * subslice_stride;
+ return slice * slice_stride + subslice * sseu->eu_stride;
}
static u16 sseu_get_eus(const struct sseu_dev_info *sseu, int slice,
@@ -130,7 +129,7 @@ static u16 sseu_get_eus(const struct sseu_dev_info *sseu, int slice,
int i, offset = sseu_eu_idx(sseu, slice, subslice);
u16 eu_mask = 0;
- for (i = 0; i < GEN_SSEU_STRIDE(sseu->max_eus_per_subslice); i++) {
+ for (i = 0; i < sseu->eu_stride; i++) {
eu_mask |= ((u16)sseu->eu_mask[offset + i]) <<
(i * BITS_PER_BYTE);
}
@@ -143,7 +142,7 @@ static void sseu_set_eus(struct sseu_dev_info *sseu, int slice, int subslice,
{
int i, offset = sseu_eu_idx(sseu, slice, subslice);
- for (i = 0; i < GEN_SSEU_STRIDE(sseu->max_eus_per_subslice); i++) {
+ for (i = 0; i < sseu->eu_stride; i++) {
sseu->eu_mask[offset + i] =
(eu_mask >> (BITS_PER_BYTE * i)) & 0xff;
}
@@ -160,9 +159,9 @@ void intel_device_info_dump_topology(const struct sseu_dev_info *sseu,
}
for (s = 0; s < sseu->max_slices; s++) {
- drm_printf(p, "slice%d: %u subslice(s) (0x%hhx):\n",
+ drm_printf(p, "slice%d: %u subslice(s) (0x%08x):\n",
s, intel_sseu_subslices_per_slice(sseu, s),
- sseu->subslice_mask[s]);
+ intel_sseu_get_subslices(sseu, s));
for (ss = 0; ss < sseu->max_subslices; ss++) {
u16 enabled_eus = sseu_get_eus(sseu, s, ss);
@@ -183,44 +182,80 @@ static u16 compute_eu_total(const struct sseu_dev_info *sseu)
return total;
}
+static void gen11_compute_sseu_info(struct sseu_dev_info *sseu,
+ u8 s_en, u32 ss_en, u16 eu_en)
+{
+ int s, ss;
+
+ /* ss_en represents entire subslice mask across all slices */
+ GEM_BUG_ON(sseu->max_slices * sseu->max_subslices >
+ sizeof(ss_en) * BITS_PER_BYTE);
+
+ for (s = 0; s < sseu->max_slices; s++) {
+ if ((s_en & BIT(s)) == 0)
+ continue;
+
+ sseu->slice_mask |= BIT(s);
+
+ intel_sseu_set_subslices(sseu, s, ss_en);
+
+ for (ss = 0; ss < sseu->max_subslices; ss++)
+ if (intel_sseu_has_subslice(sseu, s, ss))
+ sseu_set_eus(sseu, s, ss, eu_en);
+ }
+ sseu->eu_per_subslice = hweight16(eu_en);
+ sseu->eu_total = compute_eu_total(sseu);
+}
+
+static void gen12_sseu_info_init(struct drm_i915_private *dev_priv)
+{
+ struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
+ u8 s_en;
+ u32 dss_en;
+ u16 eu_en = 0;
+ u8 eu_en_fuse;
+ int eu;
+
+ /*
+ * Gen12 has Dual-Subslices, which behave similarly to 2 gen11 SS.
+ * Instead of splitting these, provide userspace with an array
+ * of DSS to more closely represent the hardware resource.
+ */
+ intel_sseu_set_info(sseu, 1, 6, 16);
+
+ s_en = I915_READ(GEN11_GT_SLICE_ENABLE) & GEN11_GT_S_ENA_MASK;
+
+ dss_en = I915_READ(GEN12_GT_DSS_ENABLE);
+
+ /* one bit per pair of EUs */
+ eu_en_fuse = ~(I915_READ(GEN11_EU_DISABLE) & GEN11_EU_DIS_MASK);
+ for (eu = 0; eu < sseu->max_eus_per_subslice / 2; eu++)
+ if (eu_en_fuse & BIT(eu))
+ eu_en |= BIT(eu * 2) | BIT(eu * 2 + 1);
+
+ gen11_compute_sseu_info(sseu, s_en, dss_en, eu_en);
+
+ /* TGL only supports slice-level power gating */
+ sseu->has_slice_pg = 1;
+}
+
static void gen11_sseu_info_init(struct drm_i915_private *dev_priv)
{
struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
u8 s_en;
- u32 ss_en, ss_en_mask;
+ u32 ss_en;
u8 eu_en;
- int s;
- if (IS_ELKHARTLAKE(dev_priv)) {
- sseu->max_slices = 1;
- sseu->max_subslices = 4;
- sseu->max_eus_per_subslice = 8;
- } else {
- sseu->max_slices = 1;
- sseu->max_subslices = 8;
- sseu->max_eus_per_subslice = 8;
- }
+ if (IS_ELKHARTLAKE(dev_priv))
+ intel_sseu_set_info(sseu, 1, 4, 8);
+ else
+ intel_sseu_set_info(sseu, 1, 8, 8);
s_en = I915_READ(GEN11_GT_SLICE_ENABLE) & GEN11_GT_S_ENA_MASK;
ss_en = ~I915_READ(GEN11_GT_SUBSLICE_DISABLE);
- ss_en_mask = BIT(sseu->max_subslices) - 1;
eu_en = ~(I915_READ(GEN11_EU_DISABLE) & GEN11_EU_DIS_MASK);
- for (s = 0; s < sseu->max_slices; s++) {
- if (s_en & BIT(s)) {
- int ss_idx = sseu->max_subslices * s;
- int ss;
-
- sseu->slice_mask |= BIT(s);
- sseu->subslice_mask[s] = (ss_en >> ss_idx) & ss_en_mask;
- for (ss = 0; ss < sseu->max_subslices; ss++) {
- if (sseu->subslice_mask[s] & BIT(ss))
- sseu_set_eus(sseu, s, ss, eu_en);
- }
- }
- }
- sseu->eu_per_subslice = hweight8(eu_en);
- sseu->eu_total = compute_eu_total(sseu);
+ gen11_compute_sseu_info(sseu, s_en, ss_en, eu_en);
/* ICL has no power gating restrictions. */
sseu->has_slice_pg = 1;
@@ -236,23 +271,10 @@ static void gen10_sseu_info_init(struct drm_i915_private *dev_priv)
const int eu_mask = 0xff;
u32 subslice_mask, eu_en;
+ intel_sseu_set_info(sseu, 6, 4, 8);
+
sseu->slice_mask = (fuse2 & GEN10_F2_S_ENA_MASK) >>
GEN10_F2_S_ENA_SHIFT;
- sseu->max_slices = 6;
- sseu->max_subslices = 4;
- sseu->max_eus_per_subslice = 8;
-
- subslice_mask = (1 << 4) - 1;
- subslice_mask &= ~((fuse2 & GEN10_F2_SS_DIS_MASK) >>
- GEN10_F2_SS_DIS_SHIFT);
-
- /*
- * Slice0 can have up to 3 subslices, but there are only 2 in
- * slice1/2.
- */
- sseu->subslice_mask[0] = subslice_mask;
- for (s = 1; s < sseu->max_slices; s++)
- sseu->subslice_mask[s] = subslice_mask & 0x3;
/* Slice0 */
eu_en = ~I915_READ(GEN8_EU_DISABLE0);
@@ -277,14 +299,25 @@ static void gen10_sseu_info_init(struct drm_i915_private *dev_priv)
eu_en = ~I915_READ(GEN10_EU_DISABLE3);
sseu_set_eus(sseu, 5, 1, eu_en & eu_mask);
- /* Do a second pass where we mark the subslices disabled if all their
- * eus are off.
- */
+ subslice_mask = (1 << 4) - 1;
+ subslice_mask &= ~((fuse2 & GEN10_F2_SS_DIS_MASK) >>
+ GEN10_F2_SS_DIS_SHIFT);
+
for (s = 0; s < sseu->max_slices; s++) {
+ u32 subslice_mask_with_eus = subslice_mask;
+
for (ss = 0; ss < sseu->max_subslices; ss++) {
if (sseu_get_eus(sseu, s, ss) == 0)
- sseu->subslice_mask[s] &= ~BIT(ss);
+ subslice_mask_with_eus &= ~BIT(ss);
}
+
+ /*
+ * Slice0 can have up to 3 subslices, but there are only 2 in
+ * slice1/2.
+ */
+ intel_sseu_set_subslices(sseu, s, s == 0 ?
+ subslice_mask_with_eus :
+ subslice_mask_with_eus & 0x3);
}
sseu->eu_total = compute_eu_total(sseu);
@@ -310,13 +343,12 @@ static void cherryview_sseu_info_init(struct drm_i915_private *dev_priv)
{
struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
u32 fuse;
+ u8 subslice_mask = 0;
fuse = I915_READ(CHV_FUSE_GT);
sseu->slice_mask = BIT(0);
- sseu->max_slices = 1;
- sseu->max_subslices = 2;
- sseu->max_eus_per_subslice = 8;
+ intel_sseu_set_info(sseu, 1, 2, 8);
if (!(fuse & CHV_FGT_DISABLE_SS0)) {
u8 disabled_mask =
@@ -325,7 +357,7 @@ static void cherryview_sseu_info_init(struct drm_i915_private *dev_priv)
(((fuse & CHV_FGT_EU_DIS_SS0_R1_MASK) >>
CHV_FGT_EU_DIS_SS0_R1_SHIFT) << 4);
- sseu->subslice_mask[0] |= BIT(0);
+ subslice_mask |= BIT(0);
sseu_set_eus(sseu, 0, 0, ~disabled_mask);
}
@@ -336,10 +368,12 @@ static void cherryview_sseu_info_init(struct drm_i915_private *dev_priv)
(((fuse & CHV_FGT_EU_DIS_SS1_R1_MASK) >>
CHV_FGT_EU_DIS_SS1_R1_SHIFT) << 4);
- sseu->subslice_mask[0] |= BIT(1);
+ subslice_mask |= BIT(1);
sseu_set_eus(sseu, 0, 1, ~disabled_mask);
}
+ intel_sseu_set_subslices(sseu, 0, subslice_mask);
+
sseu->eu_total = compute_eu_total(sseu);
/*
@@ -372,9 +406,8 @@ static void gen9_sseu_info_init(struct drm_i915_private *dev_priv)
sseu->slice_mask = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
/* BXT has a single slice and at most 3 subslices. */
- sseu->max_slices = IS_GEN9_LP(dev_priv) ? 1 : 3;
- sseu->max_subslices = IS_GEN9_LP(dev_priv) ? 3 : 4;
- sseu->max_eus_per_subslice = 8;
+ intel_sseu_set_info(sseu, IS_GEN9_LP(dev_priv) ? 1 : 3,
+ IS_GEN9_LP(dev_priv) ? 3 : 4, 8);
/*
* The subslice disable field is global, i.e. it applies
@@ -393,14 +426,14 @@ static void gen9_sseu_info_init(struct drm_i915_private *dev_priv)
/* skip disabled slice */
continue;
- sseu->subslice_mask[s] = subslice_mask;
+ intel_sseu_set_subslices(sseu, s, subslice_mask);
eu_disable = I915_READ(GEN9_EU_DISABLE(s));
for (ss = 0; ss < sseu->max_subslices; ss++) {
int eu_per_ss;
u8 eu_disabled_mask;
- if (!(sseu->subslice_mask[s] & BIT(ss)))
+ if (!intel_sseu_has_subslice(sseu, s, ss))
/* skip disabled subslice */
continue;
@@ -473,9 +506,7 @@ static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv)
fuse2 = I915_READ(GEN8_FUSE2);
sseu->slice_mask = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
- sseu->max_slices = 3;
- sseu->max_subslices = 3;
- sseu->max_eus_per_subslice = 8;
+ intel_sseu_set_info(sseu, 3, 3, 8);
/*
* The subslice disable field is global, i.e. it applies
@@ -502,13 +533,13 @@ static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv)
/* skip disabled slice */
continue;
- sseu->subslice_mask[s] = subslice_mask;
+ intel_sseu_set_subslices(sseu, s, subslice_mask);
for (ss = 0; ss < sseu->max_subslices; ss++) {
u8 eu_disabled_mask;
u32 n_disabled;
- if (!(sseu->subslice_mask[s] & BIT(ss)))
+ if (!intel_sseu_has_subslice(sseu, s, ss))
/* skip disabled subslice */
continue;
@@ -552,6 +583,7 @@ static void haswell_sseu_info_init(struct drm_i915_private *dev_priv)
{
struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
u32 fuse1;
+ u8 subslice_mask = 0;
int s, ss;
/*
@@ -564,22 +596,18 @@ static void haswell_sseu_info_init(struct drm_i915_private *dev_priv)
/* fall through */
case 1:
sseu->slice_mask = BIT(0);
- sseu->subslice_mask[0] = BIT(0);
+ subslice_mask = BIT(0);
break;
case 2:
sseu->slice_mask = BIT(0);
- sseu->subslice_mask[0] = BIT(0) | BIT(1);
+ subslice_mask = BIT(0) | BIT(1);
break;
case 3:
sseu->slice_mask = BIT(0) | BIT(1);
- sseu->subslice_mask[0] = BIT(0) | BIT(1);
- sseu->subslice_mask[1] = BIT(0) | BIT(1);
+ subslice_mask = BIT(0) | BIT(1);
break;
}
- sseu->max_slices = hweight8(sseu->slice_mask);
- sseu->max_subslices = hweight8(sseu->subslice_mask[0]);
-
fuse1 = I915_READ(HSW_PAVP_FUSE1);
switch ((fuse1 & HSW_F1_EU_DIS_MASK) >> HSW_F1_EU_DIS_SHIFT) {
default:
@@ -596,9 +624,14 @@ static void haswell_sseu_info_init(struct drm_i915_private *dev_priv)
sseu->eu_per_subslice = 6;
break;
}
- sseu->max_eus_per_subslice = sseu->eu_per_subslice;
+
+ intel_sseu_set_info(sseu, hweight8(sseu->slice_mask),
+ hweight8(subslice_mask),
+ sseu->eu_per_subslice);
for (s = 0; s < sseu->max_slices; s++) {
+ intel_sseu_set_subslices(sseu, s, subslice_mask);
+
for (ss = 0; ss < sseu->max_subslices; ss++) {
sseu_set_eus(sseu, s, ss,
(1UL << sseu->eu_per_subslice) - 1);
@@ -900,12 +933,8 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
runtime->num_sprites[pipe] = 1;
}
- if (i915_modparams.disable_display) {
- DRM_INFO("Display disabled (module parameter)\n");
- info->num_pipes = 0;
- } else if (HAS_DISPLAY(dev_priv) &&
- (IS_GEN_RANGE(dev_priv, 7, 8)) &&
- HAS_PCH_SPLIT(dev_priv)) {
+ if (HAS_DISPLAY(dev_priv) && IS_GEN_RANGE(dev_priv, 7, 8) &&
+ HAS_PCH_SPLIT(dev_priv)) {
u32 fuse_strap = I915_READ(FUSE_STRAP);
u32 sfuse_strap = I915_READ(SFUSE_STRAP);
@@ -923,14 +952,14 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
(HAS_PCH_CPT(dev_priv) &&
!(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) {
DRM_INFO("Display fused off, disabling\n");
- info->num_pipes = 0;
+ info->pipe_mask = 0;
} else if (fuse_strap & IVB_PIPE_C_DISABLE) {
DRM_INFO("PipeC fused off\n");
- info->num_pipes -= 1;
+ info->pipe_mask &= ~BIT(PIPE_C);
}
} else if (HAS_DISPLAY(dev_priv) && INTEL_GEN(dev_priv) >= 9) {
u32 dfsm = I915_READ(SKL_DFSM);
- u8 enabled_mask = BIT(info->num_pipes) - 1;
+ u8 enabled_mask = info->pipe_mask;
if (dfsm & SKL_DFSM_PIPE_A_DISABLE)
enabled_mask &= ~BIT(PIPE_A);
@@ -951,7 +980,20 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
DRM_ERROR("invalid pipe fuse configuration: enabled_mask=0x%x\n",
enabled_mask);
else
- info->num_pipes = hweight8(enabled_mask);
+ info->pipe_mask = enabled_mask;
+
+ if (dfsm & SKL_DFSM_DISPLAY_HDCP_DISABLE)
+ info->display.has_hdcp = 0;
+
+ if (dfsm & SKL_DFSM_DISPLAY_PM_DISABLE)
+ info->display.has_fbc = 0;
+
+ if (INTEL_GEN(dev_priv) >= 11 && (dfsm & ICL_DFSM_DMC_DISABLE))
+ info->display.has_csr = 0;
+
+ if (INTEL_GEN(dev_priv) >= 10 &&
+ (dfsm & CNL_DFSM_DISPLAY_DSC_DISABLE))
+ info->display.has_dsc = 0;
}
/* Initialize slice/subslice/EU info */
@@ -965,8 +1007,10 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
gen9_sseu_info_init(dev_priv);
else if (IS_GEN(dev_priv, 10))
gen10_sseu_info_init(dev_priv);
- else if (INTEL_GEN(dev_priv) >= 11)
+ else if (IS_GEN(dev_priv, 11))
gen11_sseu_info_init(dev_priv);
+ else if (INTEL_GEN(dev_priv) >= 12)
+ gen12_sseu_info_init(dev_priv);
if (IS_GEN(dev_priv, 6) && intel_vtd_active()) {
DRM_INFO("Disabling ppGTT for VT-d support\n");
@@ -1010,8 +1054,10 @@ void intel_device_info_init_mmio(struct drm_i915_private *dev_priv)
GEN11_GT_VEBOX_DISABLE_SHIFT;
for (i = 0; i < I915_MAX_VCS; i++) {
- if (!HAS_ENGINE(dev_priv, _VCS(i)))
+ if (!HAS_ENGINE(dev_priv, _VCS(i))) {
+ vdbox_mask &= ~BIT(i);
continue;
+ }
if (!(BIT(i) & vdbox_mask)) {
info->engine_mask &= ~BIT(_VCS(i));
@@ -1032,8 +1078,10 @@ void intel_device_info_init_mmio(struct drm_i915_private *dev_priv)
GEM_BUG_ON(vdbox_mask != VDBOX_MASK(dev_priv));
for (i = 0; i < I915_MAX_VECS; i++) {
- if (!HAS_ENGINE(dev_priv, _VECS(i)))
+ if (!HAS_ENGINE(dev_priv, _VECS(i))) {
+ vebox_mask &= ~BIT(i);
continue;
+ }
if (!(BIT(i) & vebox_mask)) {
info->engine_mask &= ~BIT(_VECS(i));
diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h
index 92e0c2e0954c..4bdf8a6cfb47 100644
--- a/drivers/gpu/drm/i915/intel_device_info.h
+++ b/drivers/gpu/drm/i915/intel_device_info.h
@@ -107,6 +107,7 @@ enum intel_ppgtt_type {
func(is_mobile); \
func(is_lp); \
func(require_force_probe); \
+ func(is_dgfx); \
/* Keep has_* in alphabetical order */ \
func(has_64bit_reloc); \
func(gpu_reset_clobbers_display); \
@@ -135,8 +136,11 @@ enum intel_ppgtt_type {
func(has_csr); \
func(has_ddi); \
func(has_dp_mst); \
+ func(has_dsb); \
+ func(has_dsc); \
func(has_fbc); \
func(has_gmch); \
+ func(has_hdcp); \
func(has_hotplug); \
func(has_ipc); \
func(has_modular_fia); \
@@ -159,9 +163,11 @@ struct intel_device_info {
unsigned int page_sizes; /* page sizes supported by the HW */
+ u32 memory_regions; /* regions supported by the HW */
+
u32 display_mmio_offset;
- u8 num_pipes;
+ u8 pipe_mask;
#define DEFINE_FLAG(name) u8 name:1
DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG);
diff --git a/drivers/gpu/drm/i915/intel_memory_region.c b/drivers/gpu/drm/i915/intel_memory_region.c
new file mode 100644
index 000000000000..baaeaecc64af
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_memory_region.c
@@ -0,0 +1,272 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "intel_memory_region.h"
+#include "i915_drv.h"
+
+/* XXX: Hysterical raisins. BIT(inst) needs to just be (inst) at some point. */
+#define REGION_MAP(type, inst) \
+ BIT((type) + INTEL_MEMORY_TYPE_SHIFT) | BIT(inst)
+
+const u32 intel_region_map[] = {
+ [INTEL_REGION_SMEM] = REGION_MAP(INTEL_MEMORY_SYSTEM, 0),
+ [INTEL_REGION_LMEM] = REGION_MAP(INTEL_MEMORY_LOCAL, 0),
+ [INTEL_REGION_STOLEN] = REGION_MAP(INTEL_MEMORY_STOLEN, 0),
+};
+
+static u64
+intel_memory_region_free_pages(struct intel_memory_region *mem,
+ struct list_head *blocks)
+{
+ struct i915_buddy_block *block, *on;
+ u64 size = 0;
+
+ list_for_each_entry_safe(block, on, blocks, link) {
+ size += i915_buddy_block_size(&mem->mm, block);
+ i915_buddy_free(&mem->mm, block);
+ }
+ INIT_LIST_HEAD(blocks);
+
+ return size;
+}
+
+void
+__intel_memory_region_put_pages_buddy(struct intel_memory_region *mem,
+ struct list_head *blocks)
+{
+ mutex_lock(&mem->mm_lock);
+ intel_memory_region_free_pages(mem, blocks);
+ mutex_unlock(&mem->mm_lock);
+}
+
+void
+__intel_memory_region_put_block_buddy(struct i915_buddy_block *block)
+{
+ struct list_head blocks;
+
+ INIT_LIST_HEAD(&blocks);
+ list_add(&block->link, &blocks);
+ __intel_memory_region_put_pages_buddy(block->private, &blocks);
+}
+
+int
+__intel_memory_region_get_pages_buddy(struct intel_memory_region *mem,
+ resource_size_t size,
+ unsigned int flags,
+ struct list_head *blocks)
+{
+ unsigned int min_order = 0;
+ unsigned long n_pages;
+
+ GEM_BUG_ON(!IS_ALIGNED(size, mem->mm.chunk_size));
+ GEM_BUG_ON(!list_empty(blocks));
+
+ if (flags & I915_ALLOC_MIN_PAGE_SIZE) {
+ min_order = ilog2(mem->min_page_size) -
+ ilog2(mem->mm.chunk_size);
+ }
+
+ if (flags & I915_ALLOC_CONTIGUOUS) {
+ size = roundup_pow_of_two(size);
+ min_order = ilog2(size) - ilog2(mem->mm.chunk_size);
+ }
+
+ n_pages = size >> ilog2(mem->mm.chunk_size);
+
+ mutex_lock(&mem->mm_lock);
+
+ do {
+ struct i915_buddy_block *block;
+ unsigned int order;
+
+ order = fls(n_pages) - 1;
+ GEM_BUG_ON(order > mem->mm.max_order);
+ GEM_BUG_ON(order < min_order);
+
+ do {
+ block = i915_buddy_alloc(&mem->mm, order);
+ if (!IS_ERR(block))
+ break;
+
+ if (order-- == min_order)
+ goto err_free_blocks;
+ } while (1);
+
+ n_pages -= BIT(order);
+
+ block->private = mem;
+ list_add(&block->link, blocks);
+
+ if (!n_pages)
+ break;
+ } while (1);
+
+ mutex_unlock(&mem->mm_lock);
+ return 0;
+
+err_free_blocks:
+ intel_memory_region_free_pages(mem, blocks);
+ mutex_unlock(&mem->mm_lock);
+ return -ENXIO;
+}
+
+struct i915_buddy_block *
+__intel_memory_region_get_block_buddy(struct intel_memory_region *mem,
+ resource_size_t size,
+ unsigned int flags)
+{
+ struct i915_buddy_block *block;
+ LIST_HEAD(blocks);
+ int ret;
+
+ ret = __intel_memory_region_get_pages_buddy(mem, size, flags, &blocks);
+ if (ret)
+ return ERR_PTR(ret);
+
+ block = list_first_entry(&blocks, typeof(*block), link);
+ list_del_init(&block->link);
+ return block;
+}
+
+int intel_memory_region_init_buddy(struct intel_memory_region *mem)
+{
+ return i915_buddy_init(&mem->mm, resource_size(&mem->region),
+ PAGE_SIZE);
+}
+
+void intel_memory_region_release_buddy(struct intel_memory_region *mem)
+{
+ i915_buddy_fini(&mem->mm);
+}
+
+struct intel_memory_region *
+intel_memory_region_create(struct drm_i915_private *i915,
+ resource_size_t start,
+ resource_size_t size,
+ resource_size_t min_page_size,
+ resource_size_t io_start,
+ const struct intel_memory_region_ops *ops)
+{
+ struct intel_memory_region *mem;
+ int err;
+
+ mem = kzalloc(sizeof(*mem), GFP_KERNEL);
+ if (!mem)
+ return ERR_PTR(-ENOMEM);
+
+ mem->i915 = i915;
+ mem->region = (struct resource)DEFINE_RES_MEM(start, size);
+ mem->io_start = io_start;
+ mem->min_page_size = min_page_size;
+ mem->ops = ops;
+
+ mutex_init(&mem->objects.lock);
+ INIT_LIST_HEAD(&mem->objects.list);
+ INIT_LIST_HEAD(&mem->objects.purgeable);
+
+ mutex_init(&mem->mm_lock);
+
+ if (ops->init) {
+ err = ops->init(mem);
+ if (err)
+ goto err_free;
+ }
+
+ kref_init(&mem->kref);
+ return mem;
+
+err_free:
+ kfree(mem);
+ return ERR_PTR(err);
+}
+
+static void __intel_memory_region_destroy(struct kref *kref)
+{
+ struct intel_memory_region *mem =
+ container_of(kref, typeof(*mem), kref);
+
+ if (mem->ops->release)
+ mem->ops->release(mem);
+
+ mutex_destroy(&mem->mm_lock);
+ mutex_destroy(&mem->objects.lock);
+ kfree(mem);
+}
+
+struct intel_memory_region *
+intel_memory_region_get(struct intel_memory_region *mem)
+{
+ kref_get(&mem->kref);
+ return mem;
+}
+
+void intel_memory_region_put(struct intel_memory_region *mem)
+{
+ kref_put(&mem->kref, __intel_memory_region_destroy);
+}
+
+/* Global memory region registration -- only slight layer inversions! */
+
+int intel_memory_regions_hw_probe(struct drm_i915_private *i915)
+{
+ int err, i;
+
+ for (i = 0; i < ARRAY_SIZE(i915->mm.regions); i++) {
+ struct intel_memory_region *mem = ERR_PTR(-ENODEV);
+ u32 type;
+
+ if (!HAS_REGION(i915, BIT(i)))
+ continue;
+
+ type = MEMORY_TYPE_FROM_REGION(intel_region_map[i]);
+ switch (type) {
+ case INTEL_MEMORY_SYSTEM:
+ mem = i915_gem_shmem_setup(i915);
+ break;
+ case INTEL_MEMORY_STOLEN:
+ mem = i915_gem_stolen_setup(i915);
+ break;
+ case INTEL_MEMORY_LOCAL:
+ mem = intel_setup_fake_lmem(i915);
+ break;
+ }
+
+ if (IS_ERR(mem)) {
+ err = PTR_ERR(mem);
+ DRM_ERROR("Failed to setup region(%d) type=%d\n", err, type);
+ goto out_cleanup;
+ }
+
+ mem->id = intel_region_map[i];
+ mem->type = type;
+ mem->instance = MEMORY_INSTANCE_FROM_REGION(intel_region_map[i]);
+
+ i915->mm.regions[i] = mem;
+ }
+
+ return 0;
+
+out_cleanup:
+ intel_memory_regions_driver_release(i915);
+ return err;
+}
+
+void intel_memory_regions_driver_release(struct drm_i915_private *i915)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(i915->mm.regions); i++) {
+ struct intel_memory_region *region =
+ fetch_and_zero(&i915->mm.regions[i]);
+
+ if (region)
+ intel_memory_region_put(region);
+ }
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftests/intel_memory_region.c"
+#include "selftests/mock_region.c"
+#endif
diff --git a/drivers/gpu/drm/i915/intel_memory_region.h b/drivers/gpu/drm/i915/intel_memory_region.h
new file mode 100644
index 000000000000..238722009677
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_memory_region.h
@@ -0,0 +1,129 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_MEMORY_REGION_H__
+#define __INTEL_MEMORY_REGION_H__
+
+#include <linux/kref.h>
+#include <linux/ioport.h>
+#include <linux/mutex.h>
+#include <linux/io-mapping.h>
+#include <drm/drm_mm.h>
+
+#include "i915_buddy.h"
+
+struct drm_i915_private;
+struct drm_i915_gem_object;
+struct intel_memory_region;
+struct sg_table;
+
+/**
+ * Base memory type
+ */
+enum intel_memory_type {
+ INTEL_MEMORY_SYSTEM = 0,
+ INTEL_MEMORY_LOCAL,
+ INTEL_MEMORY_STOLEN,
+};
+
+enum intel_region_id {
+ INTEL_REGION_SMEM = 0,
+ INTEL_REGION_LMEM,
+ INTEL_REGION_STOLEN,
+ INTEL_REGION_UNKNOWN, /* Should be last */
+};
+
+#define REGION_SMEM BIT(INTEL_REGION_SMEM)
+#define REGION_LMEM BIT(INTEL_REGION_LMEM)
+#define REGION_STOLEN BIT(INTEL_REGION_STOLEN)
+
+#define INTEL_MEMORY_TYPE_SHIFT 16
+
+#define MEMORY_TYPE_FROM_REGION(r) (ilog2((r) >> INTEL_MEMORY_TYPE_SHIFT))
+#define MEMORY_INSTANCE_FROM_REGION(r) (ilog2((r) & 0xffff))
+
+#define I915_ALLOC_MIN_PAGE_SIZE BIT(0)
+#define I915_ALLOC_CONTIGUOUS BIT(1)
+
+/**
+ * Memory regions encoded as type | instance
+ */
+extern const u32 intel_region_map[];
+
+struct intel_memory_region_ops {
+ unsigned int flags;
+
+ int (*init)(struct intel_memory_region *mem);
+ void (*release)(struct intel_memory_region *mem);
+
+ struct drm_i915_gem_object *
+ (*create_object)(struct intel_memory_region *mem,
+ resource_size_t size,
+ unsigned int flags);
+};
+
+struct intel_memory_region {
+ struct drm_i915_private *i915;
+
+ const struct intel_memory_region_ops *ops;
+
+ struct io_mapping iomap;
+ struct resource region;
+
+ /* For fake LMEM */
+ struct drm_mm_node fake_mappable;
+
+ struct i915_buddy_mm mm;
+ struct mutex mm_lock;
+
+ struct kref kref;
+
+ resource_size_t io_start;
+ resource_size_t min_page_size;
+
+ unsigned int type;
+ unsigned int instance;
+ unsigned int id;
+
+ dma_addr_t remap_addr;
+
+ struct {
+ struct mutex lock; /* Protects access to objects */
+ struct list_head list;
+ struct list_head purgeable;
+ } objects;
+};
+
+int intel_memory_region_init_buddy(struct intel_memory_region *mem);
+void intel_memory_region_release_buddy(struct intel_memory_region *mem);
+
+int __intel_memory_region_get_pages_buddy(struct intel_memory_region *mem,
+ resource_size_t size,
+ unsigned int flags,
+ struct list_head *blocks);
+struct i915_buddy_block *
+__intel_memory_region_get_block_buddy(struct intel_memory_region *mem,
+ resource_size_t size,
+ unsigned int flags);
+void __intel_memory_region_put_pages_buddy(struct intel_memory_region *mem,
+ struct list_head *blocks);
+void __intel_memory_region_put_block_buddy(struct i915_buddy_block *block);
+
+struct intel_memory_region *
+intel_memory_region_create(struct drm_i915_private *i915,
+ resource_size_t start,
+ resource_size_t size,
+ resource_size_t min_page_size,
+ resource_size_t io_start,
+ const struct intel_memory_region_ops *ops);
+
+struct intel_memory_region *
+intel_memory_region_get(struct intel_memory_region *mem);
+void intel_memory_region_put(struct intel_memory_region *mem);
+
+int intel_memory_regions_hw_probe(struct drm_i915_private *i915);
+void intel_memory_regions_driver_release(struct drm_i915_private *i915);
+
+#endif
diff --git a/drivers/gpu/drm/i915/intel_pch.c b/drivers/gpu/drm/i915/intel_pch.c
index 15f8bff141f9..8fd92b9130a7 100644
--- a/drivers/gpu/drm/i915/intel_pch.c
+++ b/drivers/gpu/drm/i915/intel_pch.c
@@ -52,7 +52,8 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
return PCH_SPT;
case INTEL_PCH_SPT_LP_DEVICE_ID_TYPE:
DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
- WARN_ON(!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv));
+ WARN_ON(!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv) &&
+ !IS_COFFEELAKE(dev_priv));
return PCH_SPT;
case INTEL_PCH_KBP_DEVICE_ID_TYPE:
DRM_DEBUG_KMS("Found Kaby Lake PCH (KBP)\n");
@@ -74,12 +75,16 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
WARN_ON(!IS_COFFEELAKE(dev_priv));
/* CometPoint is CNP Compatible */
return PCH_CNP;
+ case INTEL_PCH_CMP_V_DEVICE_ID_TYPE:
+ DRM_DEBUG_KMS("Found Comet Lake V PCH (CMP-V)\n");
+ WARN_ON(!IS_COFFEELAKE(dev_priv));
+ /* Comet Lake V PCH is based on KBP, which is SPT compatible */
+ return PCH_SPT;
case INTEL_PCH_ICP_DEVICE_ID_TYPE:
DRM_DEBUG_KMS("Found Ice Lake PCH\n");
WARN_ON(!IS_ICELAKE(dev_priv));
return PCH_ICP;
case INTEL_PCH_MCC_DEVICE_ID_TYPE:
- case INTEL_PCH_MCC2_DEVICE_ID_TYPE:
DRM_DEBUG_KMS("Found Mule Creek Canyon PCH\n");
WARN_ON(!IS_ELKHARTLAKE(dev_priv));
return PCH_MCC;
@@ -87,6 +92,11 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
DRM_DEBUG_KMS("Found Tiger Lake LP PCH\n");
WARN_ON(!IS_TIGERLAKE(dev_priv));
return PCH_TGP;
+ case INTEL_PCH_JSP_DEVICE_ID_TYPE:
+ case INTEL_PCH_JSP2_DEVICE_ID_TYPE:
+ DRM_DEBUG_KMS("Found Jasper Lake PCH\n");
+ WARN_ON(!IS_ELKHARTLAKE(dev_priv));
+ return PCH_JSP;
default:
return PCH_NONE;
}
diff --git a/drivers/gpu/drm/i915/intel_pch.h b/drivers/gpu/drm/i915/intel_pch.h
index c29c81ec7971..d26c25dd8d54 100644
--- a/drivers/gpu/drm/i915/intel_pch.h
+++ b/drivers/gpu/drm/i915/intel_pch.h
@@ -23,6 +23,7 @@ enum intel_pch {
PCH_SPT, /* Sunrisepoint/Kaby Lake PCH */
PCH_CNP, /* Cannon/Comet Lake PCH */
PCH_ICP, /* Ice Lake PCH */
+ PCH_JSP, /* Jasper Lake PCH */
PCH_MCC, /* Mule Creek Canyon PCH */
PCH_TGP, /* Tiger Lake PCH */
};
@@ -42,16 +43,19 @@ enum intel_pch {
#define INTEL_PCH_CNP_LP_DEVICE_ID_TYPE 0x9D80
#define INTEL_PCH_CMP_DEVICE_ID_TYPE 0x0280
#define INTEL_PCH_CMP2_DEVICE_ID_TYPE 0x0680
+#define INTEL_PCH_CMP_V_DEVICE_ID_TYPE 0xA380
#define INTEL_PCH_ICP_DEVICE_ID_TYPE 0x3480
#define INTEL_PCH_MCC_DEVICE_ID_TYPE 0x4B00
-#define INTEL_PCH_MCC2_DEVICE_ID_TYPE 0x3880
#define INTEL_PCH_TGP_DEVICE_ID_TYPE 0xA080
+#define INTEL_PCH_JSP_DEVICE_ID_TYPE 0x4D80
+#define INTEL_PCH_JSP2_DEVICE_ID_TYPE 0x3880
#define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100
#define INTEL_PCH_P3X_DEVICE_ID_TYPE 0x7000
#define INTEL_PCH_QEMU_DEVICE_ID_TYPE 0x2900 /* qemu q35 has 2918 */
#define INTEL_PCH_TYPE(dev_priv) ((dev_priv)->pch_type)
#define INTEL_PCH_ID(dev_priv) ((dev_priv)->pch_id)
+#define HAS_PCH_JSP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_JSP)
#define HAS_PCH_MCC(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_MCC)
#define HAS_PCH_TGP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_TGP)
#define HAS_PCH_ICP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_ICP)
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 2efe1d12d5a9..809bff955b5a 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -25,7 +25,6 @@
*
*/
-#include <linux/cpufreq.h>
#include <linux/module.h>
#include <linux/pm_runtime.h>
@@ -38,6 +37,8 @@
#include "display/intel_fbc.h"
#include "display/intel_sprite.h"
+#include "gt/intel_llc.h"
+
#include "i915_drv.h"
#include "i915_irq.h"
#include "i915_trace.h"
@@ -45,26 +46,6 @@
#include "intel_sideband.h"
#include "../../../platform/x86/intel_ips.h"
-/**
- * DOC: RC6
- *
- * RC6 is a special power stage which allows the GPU to enter an very
- * low-voltage mode when idle, using down to 0V while at this stage. This
- * stage is entered automatically when the GPU is idle when RC6 support is
- * enabled, and as soon as new workload arises GPU wakes up automatically as well.
- *
- * There are different RC6 modes available in Intel GPU, which differentiate
- * among each other with the latency required to enter and leave RC6 and
- * voltage consumed by the GPU in different states.
- *
- * The combination of the following flags define which states GPU is allowed
- * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
- * RC6pp is deepest RC6. Their support by hardware varies according to the
- * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
- * which brings the most power savings; deeper states save more power, but
- * require higher latency to switch to and wake up.
- */
-
static void gen9_init_clock_gating(struct drm_i915_private *dev_priv)
{
if (HAS_LLC(dev_priv)) {
@@ -224,8 +205,6 @@ static void i915_ironlake_get_mem_freq(struct drm_i915_private *dev_priv)
break;
}
- dev_priv->ips.r_t = dev_priv->mem_freq;
-
switch (csipll & 0x3ff) {
case 0x00c:
dev_priv->fsb_freq = 3200;
@@ -254,14 +233,6 @@ static void i915_ironlake_get_mem_freq(struct drm_i915_private *dev_priv)
dev_priv->fsb_freq = 0;
break;
}
-
- if (dev_priv->fsb_freq == 3200) {
- dev_priv->ips.c_m = 0;
- } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) {
- dev_priv->ips.c_m = 1;
- } else {
- dev_priv->ips.c_m = 2;
- }
}
static const struct cxsr_latency cxsr_latency_table[] = {
@@ -1145,10 +1116,7 @@ static u16 g4x_compute_wm(const struct intel_crtc_state *crtc_state,
clock = adjusted_mode->crtc_clock;
htotal = adjusted_mode->crtc_htotal;
- if (plane->id == PLANE_CURSOR)
- width = plane_state->base.crtc_w;
- else
- width = drm_rect_width(&plane_state->base.dst);
+ width = drm_rect_width(&plane_state->base.dst);
if (plane->id == PLANE_CURSOR) {
wm = intel_wm_method2(clock, htotal, width, cpp, latency);
@@ -1335,8 +1303,8 @@ static int g4x_compute_pipe_wm(struct intel_crtc_state *crtc_state)
struct intel_atomic_state *state =
to_intel_atomic_state(crtc_state->base.state);
struct g4x_wm_state *wm_state = &crtc_state->wm.g4x.optimal;
- int num_active_planes = hweight32(crtc_state->active_planes &
- ~BIT(PLANE_CURSOR));
+ int num_active_planes = hweight8(crtc_state->active_planes &
+ ~BIT(PLANE_CURSOR));
const struct g4x_pipe_wm *raw;
const struct intel_plane_state *old_plane_state;
const struct intel_plane_state *new_plane_state;
@@ -1498,7 +1466,7 @@ static void g4x_merge_wm(struct drm_i915_private *dev_priv,
struct g4x_wm_values *wm)
{
struct intel_crtc *crtc;
- int num_active_crtcs = 0;
+ int num_active_pipes = 0;
wm->cxsr = true;
wm->hpll_en = true;
@@ -1517,10 +1485,10 @@ static void g4x_merge_wm(struct drm_i915_private *dev_priv,
if (!wm_state->fbc_en)
wm->fbc_en = false;
- num_active_crtcs++;
+ num_active_pipes++;
}
- if (num_active_crtcs != 1) {
+ if (num_active_pipes != 1) {
wm->cxsr = false;
wm->hpll_en = false;
wm->fbc_en = false;
@@ -1667,7 +1635,7 @@ static int vlv_compute_fifo(struct intel_crtc_state *crtc_state)
&crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2];
struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state;
unsigned int active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR);
- int num_active_planes = hweight32(active_planes);
+ int num_active_planes = hweight8(active_planes);
const int fifo_size = 511;
int fifo_extra, fifo_left = fifo_size;
int sprite0_fifo_extra = 0;
@@ -1856,8 +1824,8 @@ static int vlv_compute_pipe_wm(struct intel_crtc_state *crtc_state)
struct vlv_wm_state *wm_state = &crtc_state->wm.vlv.optimal;
const struct vlv_fifo_state *fifo_state =
&crtc_state->wm.vlv.fifo_state;
- int num_active_planes = hweight32(crtc_state->active_planes &
- ~BIT(PLANE_CURSOR));
+ int num_active_planes = hweight8(crtc_state->active_planes &
+ ~BIT(PLANE_CURSOR));
bool needs_modeset = drm_atomic_crtc_needs_modeset(&crtc_state->base);
const struct intel_plane_state *old_plane_state;
const struct intel_plane_state *new_plane_state;
@@ -1917,7 +1885,7 @@ static int vlv_compute_pipe_wm(struct intel_crtc_state *crtc_state)
for (level = 0; level < wm_state->num_levels; level++) {
const struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level];
- const int sr_fifo_size = INTEL_INFO(dev_priv)->num_pipes * 512 - 1;
+ const int sr_fifo_size = INTEL_NUM_PIPES(dev_priv) * 512 - 1;
if (!vlv_raw_crtc_wm_is_valid(crtc_state, level))
break;
@@ -2106,7 +2074,7 @@ static void vlv_merge_wm(struct drm_i915_private *dev_priv,
struct vlv_wm_values *wm)
{
struct intel_crtc *crtc;
- int num_active_crtcs = 0;
+ int num_active_pipes = 0;
wm->level = dev_priv->wm.max_level;
wm->cxsr = true;
@@ -2120,14 +2088,14 @@ static void vlv_merge_wm(struct drm_i915_private *dev_priv,
if (!wm_state->cxsr)
wm->cxsr = false;
- num_active_crtcs++;
+ num_active_pipes++;
wm->level = min_t(int, wm->level, wm_state->num_levels - 1);
}
- if (num_active_crtcs != 1)
+ if (num_active_pipes != 1)
wm->cxsr = false;
- if (num_active_crtcs > 1)
+ if (num_active_pipes > 1)
wm->level = VLV_WM_LEVEL_PM2;
for_each_intel_crtc(&dev_priv->drm, crtc) {
@@ -2577,7 +2545,8 @@ static u32 ilk_compute_cur_wm(const struct intel_crtc_state *crtc_state,
return ilk_wm_method2(crtc_state->pixel_rate,
crtc_state->base.adjusted_mode.crtc_htotal,
- plane_state->base.crtc_w, cpp, mem_value);
+ drm_rect_width(&plane_state->base.dst),
+ cpp, mem_value);
}
/* Only for WM_LP. */
@@ -2656,7 +2625,7 @@ static unsigned int ilk_plane_wm_max(const struct drm_i915_private *dev_priv,
/* HSW allows LP1+ watermarks even with multiple pipes */
if (level == 0 || config->num_pipes_active > 1) {
- fifo_size /= INTEL_INFO(dev_priv)->num_pipes;
+ fifo_size /= INTEL_NUM_PIPES(dev_priv);
/*
* For some reason the non self refresh
@@ -3117,8 +3086,8 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *crtc_state)
struct intel_pipe_wm *pipe_wm;
struct drm_device *dev = state->dev;
const struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_plane *plane;
- const struct drm_plane_state *plane_state;
+ struct intel_plane *plane;
+ const struct intel_plane_state *plane_state;
const struct intel_plane_state *pristate = NULL;
const struct intel_plane_state *sprstate = NULL;
const struct intel_plane_state *curstate = NULL;
@@ -3127,15 +3096,13 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *crtc_state)
pipe_wm = &crtc_state->wm.ilk.optimal;
- drm_atomic_crtc_state_for_each_plane_state(plane, plane_state, &crtc_state->base) {
- const struct intel_plane_state *ps = to_intel_plane_state(plane_state);
-
- if (plane->type == DRM_PLANE_TYPE_PRIMARY)
- pristate = ps;
- else if (plane->type == DRM_PLANE_TYPE_OVERLAY)
- sprstate = ps;
- else if (plane->type == DRM_PLANE_TYPE_CURSOR)
- curstate = ps;
+ intel_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state) {
+ if (plane->base.type == DRM_PLANE_TYPE_PRIMARY)
+ pristate = plane_state;
+ else if (plane->base.type == DRM_PLANE_TYPE_OVERLAY)
+ sprstate = plane_state;
+ else if (plane->base.type == DRM_PLANE_TYPE_CURSOR)
+ curstate = plane_state;
}
pipe_wm->pipe_enabled = crtc_state->base.active;
@@ -3662,10 +3629,47 @@ static bool skl_needs_memory_bw_wa(struct drm_i915_private *dev_priv)
static bool
intel_has_sagv(struct drm_i915_private *dev_priv)
{
+ /* HACK! */
+ if (IS_GEN(dev_priv, 12))
+ return false;
+
return (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) &&
dev_priv->sagv_status != I915_SAGV_NOT_CONTROLLED;
}
+static void
+skl_setup_sagv_block_time(struct drm_i915_private *dev_priv)
+{
+ if (INTEL_GEN(dev_priv) >= 12) {
+ u32 val = 0;
+ int ret;
+
+ ret = sandybridge_pcode_read(dev_priv,
+ GEN12_PCODE_READ_SAGV_BLOCK_TIME_US,
+ &val, NULL);
+ if (!ret) {
+ dev_priv->sagv_block_time_us = val;
+ return;
+ }
+
+ DRM_DEBUG_DRIVER("Couldn't read SAGV block time!\n");
+ } else if (IS_GEN(dev_priv, 11)) {
+ dev_priv->sagv_block_time_us = 10;
+ return;
+ } else if (IS_GEN(dev_priv, 10)) {
+ dev_priv->sagv_block_time_us = 20;
+ return;
+ } else if (IS_GEN(dev_priv, 9)) {
+ dev_priv->sagv_block_time_us = 30;
+ return;
+ } else {
+ MISSING_CASE(INTEL_GEN(dev_priv));
+ }
+
+ /* Default to an unusable block time */
+ dev_priv->sagv_block_time_us = -1;
+}
+
/*
* SAGV dynamically adjusts the system agent voltage and clock frequencies
* depending on power and performance requirements. The display engine access
@@ -3754,33 +3758,25 @@ bool intel_can_enable_sagv(struct intel_atomic_state *state)
struct intel_crtc_state *crtc_state;
enum pipe pipe;
int level, latency;
- int sagv_block_time_us;
if (!intel_has_sagv(dev_priv))
return false;
- if (IS_GEN(dev_priv, 9))
- sagv_block_time_us = 30;
- else if (IS_GEN(dev_priv, 10))
- sagv_block_time_us = 20;
- else
- sagv_block_time_us = 10;
-
/*
* If there are no active CRTCs, no additional checks need be performed
*/
- if (hweight32(state->active_crtcs) == 0)
+ if (hweight8(state->active_pipes) == 0)
return true;
/*
* SKL+ workaround: bspec recommends we disable SAGV when we have
* more then one pipe enabled
*/
- if (hweight32(state->active_crtcs) > 1)
+ if (hweight8(state->active_pipes) > 1)
return false;
/* Since we're now guaranteed to only have one active CRTC... */
- pipe = ffs(state->active_crtcs) - 1;
+ pipe = ffs(state->active_pipes) - 1;
crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
crtc_state = to_intel_crtc_state(crtc->base.state);
@@ -3812,7 +3808,7 @@ bool intel_can_enable_sagv(struct intel_atomic_state *state)
* incur memory latencies higher than sagv_block_time_us we
* can't enable SAGV.
*/
- if (latency < sagv_block_time_us)
+ if (latency < dev_priv->sagv_block_time_us)
return false;
}
@@ -3875,14 +3871,14 @@ skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
if (WARN_ON(!state) || !crtc_state->base.active) {
alloc->start = 0;
alloc->end = 0;
- *num_active = hweight32(dev_priv->active_crtcs);
+ *num_active = hweight8(dev_priv->active_pipes);
return;
}
if (intel_state->active_pipe_changes)
- *num_active = hweight32(intel_state->active_crtcs);
+ *num_active = hweight8(intel_state->active_pipes);
else
- *num_active = hweight32(dev_priv->active_crtcs);
+ *num_active = hweight8(dev_priv->active_pipes);
ddb_size = intel_get_ddb_size(dev_priv, crtc_state, total_data_rate,
*num_active, ddb);
@@ -4013,7 +4009,8 @@ skl_ddb_get_hw_plane_state(struct drm_i915_private *dev_priv,
val = I915_READ(PLANE_BUF_CFG(pipe, plane_id));
val2 = I915_READ(PLANE_NV12_BUF_CFG(pipe, plane_id));
- if (is_planar_yuv_format(fourcc))
+ if (fourcc &&
+ drm_format_info_is_yuv_semiplanar(drm_format_info(fourcc)))
swap(val, val2);
skl_ddb_entry_init_from_hw(dev_priv, ddb_y, val);
@@ -4071,7 +4068,6 @@ static uint_fixed_16_16_t
skl_plane_downscale_amount(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
- struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
u32 src_w, src_h, dst_w, dst_h;
uint_fixed_16_16_t fp_w_ratio, fp_h_ratio;
uint_fixed_16_16_t downscale_h, downscale_w;
@@ -4079,27 +4075,17 @@ skl_plane_downscale_amount(const struct intel_crtc_state *crtc_state,
if (WARN_ON(!intel_wm_plane_visible(crtc_state, plane_state)))
return u32_to_fixed16(0);
- /* n.b., src is 16.16 fixed point, dst is whole integer */
- if (plane->id == PLANE_CURSOR) {
- /*
- * Cursors only support 0/180 degree rotation,
- * hence no need to account for rotation here.
- */
- src_w = plane_state->base.src_w >> 16;
- src_h = plane_state->base.src_h >> 16;
- dst_w = plane_state->base.crtc_w;
- dst_h = plane_state->base.crtc_h;
- } else {
- /*
- * Src coordinates are already rotated by 270 degrees for
- * the 90/270 degree plane rotation cases (to match the
- * GTT mapping), hence no need to account for rotation here.
- */
- src_w = drm_rect_width(&plane_state->base.src) >> 16;
- src_h = drm_rect_height(&plane_state->base.src) >> 16;
- dst_w = drm_rect_width(&plane_state->base.dst);
- dst_h = drm_rect_height(&plane_state->base.dst);
- }
+ /*
+ * Src coordinates are already rotated by 270 degrees for
+ * the 90/270 degree plane rotation cases (to match the
+ * GTT mapping), hence no need to account for rotation here.
+ *
+ * n.b., src is 16.16 fixed point, dst is whole integer.
+ */
+ src_w = drm_rect_width(&plane_state->base.src) >> 16;
+ src_h = drm_rect_height(&plane_state->base.src) >> 16;
+ dst_w = drm_rect_width(&plane_state->base.dst);
+ dst_h = drm_rect_height(&plane_state->base.dst);
fp_w_ratio = div_fixed16(src_w, dst_w);
fp_h_ratio = div_fixed16(src_h, dst_h);
@@ -4109,117 +4095,26 @@ skl_plane_downscale_amount(const struct intel_crtc_state *crtc_state,
return mul_fixed16(downscale_w, downscale_h);
}
-static uint_fixed_16_16_t
-skl_pipe_downscale_amount(const struct intel_crtc_state *crtc_state)
-{
- uint_fixed_16_16_t pipe_downscale = u32_to_fixed16(1);
-
- if (!crtc_state->base.enable)
- return pipe_downscale;
-
- if (crtc_state->pch_pfit.enabled) {
- u32 src_w, src_h, dst_w, dst_h;
- u32 pfit_size = crtc_state->pch_pfit.size;
- uint_fixed_16_16_t fp_w_ratio, fp_h_ratio;
- uint_fixed_16_16_t downscale_h, downscale_w;
-
- src_w = crtc_state->pipe_src_w;
- src_h = crtc_state->pipe_src_h;
- dst_w = pfit_size >> 16;
- dst_h = pfit_size & 0xffff;
-
- if (!dst_w || !dst_h)
- return pipe_downscale;
-
- fp_w_ratio = div_fixed16(src_w, dst_w);
- fp_h_ratio = div_fixed16(src_h, dst_h);
- downscale_w = max_fixed16(fp_w_ratio, u32_to_fixed16(1));
- downscale_h = max_fixed16(fp_h_ratio, u32_to_fixed16(1));
-
- pipe_downscale = mul_fixed16(downscale_w, downscale_h);
- }
-
- return pipe_downscale;
-}
-
-int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc,
- struct intel_crtc_state *crtc_state)
-{
- struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
- struct drm_atomic_state *state = crtc_state->base.state;
- struct drm_plane *plane;
- const struct drm_plane_state *drm_plane_state;
- int crtc_clock, dotclk;
- u32 pipe_max_pixel_rate;
- uint_fixed_16_16_t pipe_downscale;
- uint_fixed_16_16_t max_downscale = u32_to_fixed16(1);
-
- if (!crtc_state->base.enable)
- return 0;
-
- drm_atomic_crtc_state_for_each_plane_state(plane, drm_plane_state, &crtc_state->base) {
- uint_fixed_16_16_t plane_downscale;
- uint_fixed_16_16_t fp_9_div_8 = div_fixed16(9, 8);
- int bpp;
- const struct intel_plane_state *plane_state =
- to_intel_plane_state(drm_plane_state);
-
- if (!intel_wm_plane_visible(crtc_state, plane_state))
- continue;
-
- if (WARN_ON(!plane_state->base.fb))
- return -EINVAL;
-
- plane_downscale = skl_plane_downscale_amount(crtc_state, plane_state);
- bpp = plane_state->base.fb->format->cpp[0] * 8;
- if (bpp == 64)
- plane_downscale = mul_fixed16(plane_downscale,
- fp_9_div_8);
-
- max_downscale = max_fixed16(plane_downscale, max_downscale);
- }
- pipe_downscale = skl_pipe_downscale_amount(crtc_state);
-
- pipe_downscale = mul_fixed16(pipe_downscale, max_downscale);
-
- crtc_clock = crtc_state->base.adjusted_mode.crtc_clock;
- dotclk = to_intel_atomic_state(state)->cdclk.logical.cdclk;
-
- if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10)
- dotclk *= 2;
-
- pipe_max_pixel_rate = div_round_up_u32_fixed16(dotclk, pipe_downscale);
-
- if (pipe_max_pixel_rate < crtc_clock) {
- DRM_DEBUG_KMS("Max supported pixel clock with scaling exceeded\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
static u64
skl_plane_relative_data_rate(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state,
- const int plane)
+ int color_plane)
{
- struct intel_plane *intel_plane = to_intel_plane(plane_state->base.plane);
+ struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ const struct drm_framebuffer *fb = plane_state->base.fb;
u32 data_rate;
u32 width = 0, height = 0;
- struct drm_framebuffer *fb;
- u32 format;
uint_fixed_16_16_t down_scale_amount;
u64 rate;
if (!plane_state->base.visible)
return 0;
- fb = plane_state->base.fb;
- format = fb->format->format;
-
- if (intel_plane->id == PLANE_CURSOR)
+ if (plane->id == PLANE_CURSOR)
return 0;
- if (plane == 1 && !is_planar_yuv_format(format))
+
+ if (color_plane == 1 &&
+ !drm_format_info_is_yuv_semiplanar(fb->format))
return 0;
/*
@@ -4231,7 +4126,7 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *crtc_state,
height = drm_rect_height(&plane_state->base.src) >> 16;
/* UV plane does 1/2 pixel sub-sampling */
- if (plane == 1 && is_planar_yuv_format(format)) {
+ if (color_plane == 1) {
width /= 2;
height /= 2;
}
@@ -4242,7 +4137,7 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *crtc_state,
rate = mul_round_up_u32_fixed16(data_rate, down_scale_amount);
- rate *= fb->format->cpp[plane];
+ rate *= fb->format->cpp[color_plane];
return rate;
}
@@ -4252,18 +4147,16 @@ skl_get_total_relative_data_rate(struct intel_crtc_state *crtc_state,
u64 *uv_plane_data_rate)
{
struct drm_atomic_state *state = crtc_state->base.state;
- struct drm_plane *plane;
- const struct drm_plane_state *drm_plane_state;
+ struct intel_plane *plane;
+ const struct intel_plane_state *plane_state;
u64 total_data_rate = 0;
if (WARN_ON(!state))
return 0;
/* Calculate and cache data rate for each plane */
- drm_atomic_crtc_state_for_each_plane_state(plane, drm_plane_state, &crtc_state->base) {
- enum plane_id plane_id = to_intel_plane(plane)->id;
- const struct intel_plane_state *plane_state =
- to_intel_plane_state(drm_plane_state);
+ intel_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state) {
+ enum plane_id plane_id = plane->id;
u64 rate;
/* packed/y */
@@ -4284,21 +4177,19 @@ static u64
icl_get_total_relative_data_rate(struct intel_crtc_state *crtc_state,
u64 *plane_data_rate)
{
- struct drm_plane *plane;
- const struct drm_plane_state *drm_plane_state;
+ struct intel_plane *plane;
+ const struct intel_plane_state *plane_state;
u64 total_data_rate = 0;
if (WARN_ON(!crtc_state->base.state))
return 0;
/* Calculate and cache data rate for each plane */
- drm_atomic_crtc_state_for_each_plane_state(plane, drm_plane_state, &crtc_state->base) {
- const struct intel_plane_state *plane_state =
- to_intel_plane_state(drm_plane_state);
- enum plane_id plane_id = to_intel_plane(plane)->id;
+ intel_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state) {
+ enum plane_id plane_id = plane->id;
u64 rate;
- if (!plane_state->linked_plane) {
+ if (!plane_state->planar_linked_plane) {
rate = skl_plane_relative_data_rate(crtc_state, plane_state, 0);
plane_data_rate[plane_id] = rate;
total_data_rate += rate;
@@ -4307,17 +4198,17 @@ icl_get_total_relative_data_rate(struct intel_crtc_state *crtc_state,
/*
* The slave plane might not iterate in
- * drm_atomic_crtc_state_for_each_plane_state(),
+ * intel_atomic_crtc_state_for_each_plane_state(),
* and needs the master plane state which may be
* NULL if we try get_new_plane_state(), so we
* always calculate from the master.
*/
- if (plane_state->slave)
+ if (plane_state->planar_slave)
continue;
/* Y plane rate is calculated on the slave */
rate = skl_plane_relative_data_rate(crtc_state, plane_state, 0);
- y_plane_id = plane_state->linked_plane->id;
+ y_plane_id = plane_state->planar_linked_plane->id;
plane_data_rate[y_plane_id] = rate;
total_data_rate += rate;
@@ -4647,7 +4538,7 @@ skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
u32 interm_pbpl;
/* only planar format has two planes */
- if (color_plane == 1 && !is_planar_yuv_format(format->format)) {
+ if (color_plane == 1 && !drm_format_info_is_yuv_semiplanar(format)) {
DRM_DEBUG_KMS("Non planar format have single plane\n");
return -EINVAL;
}
@@ -4659,7 +4550,7 @@ skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
wp->x_tiled = modifier == I915_FORMAT_MOD_X_TILED;
wp->rc_surface = modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
- wp->is_planar = is_planar_yuv_format(format->format);
+ wp->is_planar = drm_format_info_is_yuv_semiplanar(format);
wp->width = width;
if (color_plane == 1 && wp->is_planar)
@@ -4731,20 +4622,15 @@ skl_compute_plane_wm_params(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state,
struct skl_wm_params *wp, int color_plane)
{
- struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
const struct drm_framebuffer *fb = plane_state->base.fb;
int width;
- if (plane->id == PLANE_CURSOR) {
- width = plane_state->base.crtc_w;
- } else {
- /*
- * Src coordinates are already rotated by 270 degrees for
- * the 90/270 degree plane rotation cases (to match the
- * GTT mapping), hence no need to account for rotation here.
- */
- width = drm_rect_width(&plane_state->base.src) >> 16;
- }
+ /*
+ * Src coordinates are already rotated by 270 degrees for
+ * the 90/270 degree plane rotation cases (to match the
+ * GTT mapping), hence no need to account for rotation here.
+ */
+ width = drm_rect_width(&plane_state->base.src) >> 16;
return skl_compute_wm_params(crtc_state, width,
fb->format, fb->modifier,
@@ -5056,12 +4942,12 @@ static int icl_build_plane_wm(struct intel_crtc_state *crtc_state,
int ret;
/* Watermarks calculated in master */
- if (plane_state->slave)
+ if (plane_state->planar_slave)
return 0;
- if (plane_state->linked_plane) {
+ if (plane_state->planar_linked_plane) {
const struct drm_framebuffer *fb = plane_state->base.fb;
- enum plane_id y_plane_id = plane_state->linked_plane->id;
+ enum plane_id y_plane_id = plane_state->planar_linked_plane->id;
WARN_ON(!intel_wm_plane_visible(crtc_state, plane_state));
WARN_ON(!fb->format->is_yuv ||
@@ -5090,8 +4976,8 @@ static int skl_build_pipe_wm(struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal;
- struct drm_plane *plane;
- const struct drm_plane_state *drm_plane_state;
+ struct intel_plane *plane;
+ const struct intel_plane_state *plane_state;
int ret;
/*
@@ -5100,10 +4986,8 @@ static int skl_build_pipe_wm(struct intel_crtc_state *crtc_state)
*/
memset(pipe_wm->planes, 0, sizeof(pipe_wm->planes));
- drm_atomic_crtc_state_for_each_plane_state(plane, drm_plane_state,
- &crtc_state->base) {
- const struct intel_plane_state *plane_state =
- to_intel_plane_state(drm_plane_state);
+ intel_atomic_crtc_state_for_each_plane_state(plane, plane_state,
+ crtc_state) {
if (INTEL_GEN(dev_priv) >= 11)
ret = icl_build_plane_wm(crtc_state, plane_state);
@@ -5263,19 +5147,6 @@ bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb,
return false;
}
-static u32
-pipes_modified(struct intel_atomic_state *state)
-{
- struct intel_crtc *crtc;
- struct intel_crtc_state *crtc_state;
- u32 i, ret = 0;
-
- for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i)
- ret |= drm_crtc_mask(&crtc->base);
-
- return ret;
-}
-
static int
skl_ddb_add_affected_planes(const struct intel_crtc_state *old_crtc_state,
struct intel_crtc_state *new_crtc_state)
@@ -5451,36 +5322,27 @@ skl_print_wm_changes(struct intel_atomic_state *state)
}
}
-static int
-skl_ddb_add_affected_pipes(struct intel_atomic_state *state, bool *changed)
+static int intel_add_all_pipes(struct intel_atomic_state *state)
{
- struct drm_device *dev = state->base.dev;
- const struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc *crtc;
- struct intel_crtc_state *crtc_state;
- u32 realloc_pipes = pipes_modified(state);
- int ret, i;
- /*
- * When we distrust bios wm we always need to recompute to set the
- * expected DDB allocations for each CRTC.
- */
- if (dev_priv->wm.distrust_bios_wm)
- (*changed) = true;
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ struct intel_crtc_state *crtc_state;
- /*
- * If this transaction isn't actually touching any CRTC's, don't
- * bother with watermark calculation. Note that if we pass this
- * test, we're guaranteed to hold at least one CRTC state mutex,
- * which means we can safely use values like dev_priv->active_crtcs
- * since any racing commits that want to update them would need to
- * hold _all_ CRTC state mutexes.
- */
- for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i)
- (*changed) = true;
+ crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+ }
- if (!*changed)
- return 0;
+ return 0;
+}
+
+static int
+skl_ddb_add_affected_pipes(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ int ret;
/*
* If this is our first atomic update following hardware readout,
@@ -5489,7 +5351,7 @@ skl_ddb_add_affected_pipes(struct intel_atomic_state *state, bool *changed)
* ensure a full DDB recompute.
*/
if (dev_priv->wm.distrust_bios_wm) {
- ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
+ ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex,
state->base.acquire_ctx);
if (ret)
return ret;
@@ -5497,13 +5359,13 @@ skl_ddb_add_affected_pipes(struct intel_atomic_state *state, bool *changed)
state->active_pipe_changes = ~0;
/*
- * We usually only initialize state->active_crtcs if we
+ * We usually only initialize state->active_pipes if we
* we're doing a modeset; make sure this field is always
* initialized during the sanitization process that happens
* on the first commit too.
*/
if (!state->modeset)
- state->active_crtcs = dev_priv->active_crtcs;
+ state->active_pipes = dev_priv->active_pipes;
}
/*
@@ -5520,18 +5382,11 @@ skl_ddb_add_affected_pipes(struct intel_atomic_state *state, bool *changed)
* to grab the lock on *all* CRTC's.
*/
if (state->active_pipe_changes || state->modeset) {
- realloc_pipes = ~0;
state->wm_results.dirty_pipes = ~0;
- }
- /*
- * We're not recomputing for the pipes not included in the commit, so
- * make sure we start with the current state.
- */
- for_each_intel_crtc_mask(dev, crtc, realloc_pipes) {
- crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
- if (IS_ERR(crtc_state))
- return PTR_ERR(crtc_state);
+ ret = intel_add_all_pipes(state);
+ if (ret)
+ return ret;
}
return 0;
@@ -5604,14 +5459,13 @@ skl_compute_wm(struct intel_atomic_state *state)
struct intel_crtc_state *new_crtc_state;
struct intel_crtc_state *old_crtc_state;
struct skl_ddb_values *results = &state->wm_results;
- bool changed = false;
int ret, i;
/* Clear all dirty flags */
results->dirty_pipes = 0;
- ret = skl_ddb_add_affected_pipes(state, &changed);
- if (ret || !changed)
+ ret = skl_ddb_add_affected_pipes(state);
+ if (ret)
return ret;
/*
@@ -5633,7 +5487,7 @@ skl_compute_wm(struct intel_atomic_state *state)
if (!skl_pipe_wm_equals(crtc,
&old_crtc_state->wm.skl.optimal,
&new_crtc_state->wm.skl.optimal))
- results->dirty_pipes |= drm_crtc_mask(&crtc->base);
+ results->dirty_pipes |= BIT(crtc->pipe);
}
ret = skl_compute_ddb(state);
@@ -5653,7 +5507,7 @@ static void skl_atomic_update_crtc_wm(struct intel_atomic_state *state,
struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal;
enum pipe pipe = crtc->pipe;
- if (!(state->wm_results.dirty_pipes & drm_crtc_mask(&crtc->base)))
+ if ((state->wm_results.dirty_pipes & BIT(crtc->pipe)) == 0)
return;
I915_WRITE(PIPE_WM_LINETIME(pipe), pipe_wm->linetime);
@@ -5662,12 +5516,11 @@ static void skl_atomic_update_crtc_wm(struct intel_atomic_state *state,
static void skl_initial_wm(struct intel_atomic_state *state,
struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
- struct drm_device *dev = intel_crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct skl_ddb_values *results = &state->wm_results;
- if ((results->dirty_pipes & drm_crtc_mask(&intel_crtc->base)) == 0)
+ if ((results->dirty_pipes & BIT(crtc->pipe)) == 0)
return;
mutex_lock(&dev_priv->wm.wm_mutex);
@@ -5816,10 +5669,10 @@ void skl_wm_get_hw_state(struct drm_i915_private *dev_priv)
skl_pipe_wm_get_hw_state(crtc, &crtc_state->wm.skl.optimal);
if (crtc->active)
- hw->dirty_pipes |= drm_crtc_mask(&crtc->base);
+ hw->dirty_pipes |= BIT(crtc->pipe);
}
- if (dev_priv->active_crtcs) {
+ if (dev_priv->active_pipes) {
/* Fully recompute DDB on first atomic commit */
dev_priv->wm.distrust_bios_wm = true;
}
@@ -6397,2488 +6250,6 @@ void intel_init_ipc(struct drm_i915_private *dev_priv)
intel_enable_ipc(dev_priv);
}
-/*
- * Lock protecting IPS related data structures
- */
-DEFINE_SPINLOCK(mchdev_lock);
-
-bool ironlake_set_drps(struct drm_i915_private *i915, u8 val)
-{
- struct intel_uncore *uncore = &i915->uncore;
- u16 rgvswctl;
-
- lockdep_assert_held(&mchdev_lock);
-
- rgvswctl = intel_uncore_read16(uncore, MEMSWCTL);
- if (rgvswctl & MEMCTL_CMD_STS) {
- DRM_DEBUG("gpu busy, RCS change rejected\n");
- return false; /* still busy with another command */
- }
-
- rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
- (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
- intel_uncore_write16(uncore, MEMSWCTL, rgvswctl);
- intel_uncore_posting_read16(uncore, MEMSWCTL);
-
- rgvswctl |= MEMCTL_CMD_STS;
- intel_uncore_write16(uncore, MEMSWCTL, rgvswctl);
-
- return true;
-}
-
-static void ironlake_enable_drps(struct drm_i915_private *dev_priv)
-{
- struct intel_uncore *uncore = &dev_priv->uncore;
- u32 rgvmodectl;
- u8 fmax, fmin, fstart, vstart;
-
- spin_lock_irq(&mchdev_lock);
-
- rgvmodectl = intel_uncore_read(uncore, MEMMODECTL);
-
- /* Enable temp reporting */
- intel_uncore_write16(uncore, PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
- intel_uncore_write16(uncore, TSC1, I915_READ(TSC1) | TSE);
-
- /* 100ms RC evaluation intervals */
- intel_uncore_write(uncore, RCUPEI, 100000);
- intel_uncore_write(uncore, RCDNEI, 100000);
-
- /* Set max/min thresholds to 90ms and 80ms respectively */
- intel_uncore_write(uncore, RCBMAXAVG, 90000);
- intel_uncore_write(uncore, RCBMINAVG, 80000);
-
- intel_uncore_write(uncore, MEMIHYST, 1);
-
- /* Set up min, max, and cur for interrupt handling */
- fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
- fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
- fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
- MEMMODE_FSTART_SHIFT;
-
- vstart = (intel_uncore_read(uncore, PXVFREQ(fstart)) &
- PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT;
-
- dev_priv->ips.fmax = fmax; /* IPS callback will increase this */
- dev_priv->ips.fstart = fstart;
-
- dev_priv->ips.max_delay = fstart;
- dev_priv->ips.min_delay = fmin;
- dev_priv->ips.cur_delay = fstart;
-
- DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
- fmax, fmin, fstart);
-
- intel_uncore_write(uncore,
- MEMINTREN,
- MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
-
- /*
- * Interrupts will be enabled in ironlake_irq_postinstall
- */
-
- intel_uncore_write(uncore, VIDSTART, vstart);
- intel_uncore_posting_read(uncore, VIDSTART);
-
- rgvmodectl |= MEMMODE_SWMODE_EN;
- intel_uncore_write(uncore, MEMMODECTL, rgvmodectl);
-
- if (wait_for_atomic((intel_uncore_read(uncore, MEMSWCTL) &
- MEMCTL_CMD_STS) == 0, 10))
- DRM_ERROR("stuck trying to change perf mode\n");
- mdelay(1);
-
- ironlake_set_drps(dev_priv, fstart);
-
- dev_priv->ips.last_count1 =
- intel_uncore_read(uncore, DMIEC) +
- intel_uncore_read(uncore, DDREC) +
- intel_uncore_read(uncore, CSIEC);
- dev_priv->ips.last_time1 = jiffies_to_msecs(jiffies);
- dev_priv->ips.last_count2 = intel_uncore_read(uncore, GFXEC);
- dev_priv->ips.last_time2 = ktime_get_raw_ns();
-
- spin_unlock_irq(&mchdev_lock);
-}
-
-static void ironlake_disable_drps(struct drm_i915_private *i915)
-{
- struct intel_uncore *uncore = &i915->uncore;
- u16 rgvswctl;
-
- spin_lock_irq(&mchdev_lock);
-
- rgvswctl = intel_uncore_read16(uncore, MEMSWCTL);
-
- /* Ack interrupts, disable EFC interrupt */
- intel_uncore_write(uncore,
- MEMINTREN,
- intel_uncore_read(uncore, MEMINTREN) &
- ~MEMINT_EVAL_CHG_EN);
- intel_uncore_write(uncore, MEMINTRSTS, MEMINT_EVAL_CHG);
- intel_uncore_write(uncore,
- DEIER,
- intel_uncore_read(uncore, DEIER) & ~DE_PCU_EVENT);
- intel_uncore_write(uncore, DEIIR, DE_PCU_EVENT);
- intel_uncore_write(uncore,
- DEIMR,
- intel_uncore_read(uncore, DEIMR) | DE_PCU_EVENT);
-
- /* Go back to the starting frequency */
- ironlake_set_drps(i915, i915->ips.fstart);
- mdelay(1);
- rgvswctl |= MEMCTL_CMD_STS;
- intel_uncore_write(uncore, MEMSWCTL, rgvswctl);
- mdelay(1);
-
- spin_unlock_irq(&mchdev_lock);
-}
-
-/* There's a funny hw issue where the hw returns all 0 when reading from
- * GEN6_RP_INTERRUPT_LIMITS. Hence we always need to compute the desired value
- * ourselves, instead of doing a rmw cycle (which might result in us clearing
- * all limits and the gpu stuck at whatever frequency it is at atm).
- */
-static u32 intel_rps_limits(struct drm_i915_private *dev_priv, u8 val)
-{
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
- u32 limits;
-
- /* Only set the down limit when we've reached the lowest level to avoid
- * getting more interrupts, otherwise leave this clear. This prevents a
- * race in the hw when coming out of rc6: There's a tiny window where
- * the hw runs at the minimal clock before selecting the desired
- * frequency, if the down threshold expires in that window we will not
- * receive a down interrupt. */
- if (INTEL_GEN(dev_priv) >= 9) {
- limits = (rps->max_freq_softlimit) << 23;
- if (val <= rps->min_freq_softlimit)
- limits |= (rps->min_freq_softlimit) << 14;
- } else {
- limits = rps->max_freq_softlimit << 24;
- if (val <= rps->min_freq_softlimit)
- limits |= rps->min_freq_softlimit << 16;
- }
-
- return limits;
-}
-
-static void rps_set_power(struct drm_i915_private *dev_priv, int new_power)
-{
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
- u32 threshold_up = 0, threshold_down = 0; /* in % */
- u32 ei_up = 0, ei_down = 0;
-
- lockdep_assert_held(&rps->power.mutex);
-
- if (new_power == rps->power.mode)
- return;
-
- /* Note the units here are not exactly 1us, but 1280ns. */
- switch (new_power) {
- case LOW_POWER:
- /* Upclock if more than 95% busy over 16ms */
- ei_up = 16000;
- threshold_up = 95;
-
- /* Downclock if less than 85% busy over 32ms */
- ei_down = 32000;
- threshold_down = 85;
- break;
-
- case BETWEEN:
- /* Upclock if more than 90% busy over 13ms */
- ei_up = 13000;
- threshold_up = 90;
-
- /* Downclock if less than 75% busy over 32ms */
- ei_down = 32000;
- threshold_down = 75;
- break;
-
- case HIGH_POWER:
- /* Upclock if more than 85% busy over 10ms */
- ei_up = 10000;
- threshold_up = 85;
-
- /* Downclock if less than 60% busy over 32ms */
- ei_down = 32000;
- threshold_down = 60;
- break;
- }
-
- /* When byt can survive without system hang with dynamic
- * sw freq adjustments, this restriction can be lifted.
- */
- if (IS_VALLEYVIEW(dev_priv))
- goto skip_hw_write;
-
- I915_WRITE(GEN6_RP_UP_EI,
- GT_INTERVAL_FROM_US(dev_priv, ei_up));
- I915_WRITE(GEN6_RP_UP_THRESHOLD,
- GT_INTERVAL_FROM_US(dev_priv,
- ei_up * threshold_up / 100));
-
- I915_WRITE(GEN6_RP_DOWN_EI,
- GT_INTERVAL_FROM_US(dev_priv, ei_down));
- I915_WRITE(GEN6_RP_DOWN_THRESHOLD,
- GT_INTERVAL_FROM_US(dev_priv,
- ei_down * threshold_down / 100));
-
- I915_WRITE(GEN6_RP_CONTROL,
- (INTEL_GEN(dev_priv) > 9 ? 0 : GEN6_RP_MEDIA_TURBO) |
- GEN6_RP_MEDIA_HW_NORMAL_MODE |
- GEN6_RP_MEDIA_IS_GFX |
- GEN6_RP_ENABLE |
- GEN6_RP_UP_BUSY_AVG |
- GEN6_RP_DOWN_IDLE_AVG);
-
-skip_hw_write:
- rps->power.mode = new_power;
- rps->power.up_threshold = threshold_up;
- rps->power.down_threshold = threshold_down;
-}
-
-static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
-{
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
- int new_power;
-
- new_power = rps->power.mode;
- switch (rps->power.mode) {
- case LOW_POWER:
- if (val > rps->efficient_freq + 1 &&
- val > rps->cur_freq)
- new_power = BETWEEN;
- break;
-
- case BETWEEN:
- if (val <= rps->efficient_freq &&
- val < rps->cur_freq)
- new_power = LOW_POWER;
- else if (val >= rps->rp0_freq &&
- val > rps->cur_freq)
- new_power = HIGH_POWER;
- break;
-
- case HIGH_POWER:
- if (val < (rps->rp1_freq + rps->rp0_freq) >> 1 &&
- val < rps->cur_freq)
- new_power = BETWEEN;
- break;
- }
- /* Max/min bins are special */
- if (val <= rps->min_freq_softlimit)
- new_power = LOW_POWER;
- if (val >= rps->max_freq_softlimit)
- new_power = HIGH_POWER;
-
- mutex_lock(&rps->power.mutex);
- if (rps->power.interactive)
- new_power = HIGH_POWER;
- rps_set_power(dev_priv, new_power);
- mutex_unlock(&rps->power.mutex);
-}
-
-void intel_rps_mark_interactive(struct drm_i915_private *i915, bool interactive)
-{
- struct intel_rps *rps = &i915->gt_pm.rps;
-
- if (INTEL_GEN(i915) < 6)
- return;
-
- mutex_lock(&rps->power.mutex);
- if (interactive) {
- if (!rps->power.interactive++ && READ_ONCE(i915->gt.awake))
- rps_set_power(i915, HIGH_POWER);
- } else {
- GEM_BUG_ON(!rps->power.interactive);
- rps->power.interactive--;
- }
- mutex_unlock(&rps->power.mutex);
-}
-
-static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
-{
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
- u32 mask = 0;
-
- /* We use UP_EI_EXPIRED interupts for both up/down in manual mode */
- if (val > rps->min_freq_softlimit)
- mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT;
- if (val < rps->max_freq_softlimit)
- mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD;
-
- mask &= dev_priv->pm_rps_events;
-
- return gen6_sanitize_rps_pm_mask(dev_priv, ~mask);
-}
-
-/* gen6_set_rps is called to update the frequency request, but should also be
- * called when the range (min_delay and max_delay) is modified so that we can
- * update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */
-static int gen6_set_rps(struct drm_i915_private *dev_priv, u8 val)
-{
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
-
- /* min/max delay may still have been modified so be sure to
- * write the limits value.
- */
- if (val != rps->cur_freq) {
- gen6_set_rps_thresholds(dev_priv, val);
-
- if (INTEL_GEN(dev_priv) >= 9)
- I915_WRITE(GEN6_RPNSWREQ,
- GEN9_FREQUENCY(val));
- else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
- I915_WRITE(GEN6_RPNSWREQ,
- HSW_FREQUENCY(val));
- else
- I915_WRITE(GEN6_RPNSWREQ,
- GEN6_FREQUENCY(val) |
- GEN6_OFFSET(0) |
- GEN6_AGGRESSIVE_TURBO);
- }
-
- /* Make sure we continue to get interrupts
- * until we hit the minimum or maximum frequencies.
- */
- I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, intel_rps_limits(dev_priv, val));
- I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
-
- rps->cur_freq = val;
- trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
-
- return 0;
-}
-
-static int valleyview_set_rps(struct drm_i915_private *dev_priv, u8 val)
-{
- int err;
-
- if (WARN_ONCE(IS_CHERRYVIEW(dev_priv) && (val & 1),
- "Odd GPU freq value\n"))
- val &= ~1;
-
- I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
-
- if (val != dev_priv->gt_pm.rps.cur_freq) {
- vlv_punit_get(dev_priv);
- err = vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
- vlv_punit_put(dev_priv);
- if (err)
- return err;
-
- gen6_set_rps_thresholds(dev_priv, val);
- }
-
- dev_priv->gt_pm.rps.cur_freq = val;
- trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
-
- return 0;
-}
-
-/* vlv_set_rps_idle: Set the frequency to idle, if Gfx clocks are down
- *
- * * If Gfx is Idle, then
- * 1. Forcewake Media well.
- * 2. Request idle freq.
- * 3. Release Forcewake of Media well.
-*/
-static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
-{
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
- u32 val = rps->idle_freq;
- int err;
-
- if (rps->cur_freq <= val)
- return;
-
- /* The punit delays the write of the frequency and voltage until it
- * determines the GPU is awake. During normal usage we don't want to
- * waste power changing the frequency if the GPU is sleeping (rc6).
- * However, the GPU and driver is now idle and we do not want to delay
- * switching to minimum voltage (reducing power whilst idle) as we do
- * not expect to be woken in the near future and so must flush the
- * change by waking the device.
- *
- * We choose to take the media powerwell (either would do to trick the
- * punit into committing the voltage change) as that takes a lot less
- * power than the render powerwell.
- */
- intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_MEDIA);
- err = valleyview_set_rps(dev_priv, val);
- intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_MEDIA);
-
- if (err)
- DRM_ERROR("Failed to set RPS for idle\n");
-}
-
-void gen6_rps_busy(struct drm_i915_private *dev_priv)
-{
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
-
- mutex_lock(&rps->lock);
- if (rps->enabled) {
- u8 freq;
-
- if (dev_priv->pm_rps_events & GEN6_PM_RP_UP_EI_EXPIRED)
- gen6_rps_reset_ei(dev_priv);
- I915_WRITE(GEN6_PMINTRMSK,
- gen6_rps_pm_mask(dev_priv, rps->cur_freq));
-
- gen6_enable_rps_interrupts(dev_priv);
-
- /* Use the user's desired frequency as a guide, but for better
- * performance, jump directly to RPe as our starting frequency.
- */
- freq = max(rps->cur_freq,
- rps->efficient_freq);
-
- if (intel_set_rps(dev_priv,
- clamp(freq,
- rps->min_freq_softlimit,
- rps->max_freq_softlimit)))
- DRM_DEBUG_DRIVER("Failed to set idle frequency\n");
- }
- mutex_unlock(&rps->lock);
-}
-
-void gen6_rps_idle(struct drm_i915_private *dev_priv)
-{
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
-
- /* Flush our bottom-half so that it does not race with us
- * setting the idle frequency and so that it is bounded by
- * our rpm wakeref. And then disable the interrupts to stop any
- * futher RPS reclocking whilst we are asleep.
- */
- gen6_disable_rps_interrupts(dev_priv);
-
- mutex_lock(&rps->lock);
- if (rps->enabled) {
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- vlv_set_rps_idle(dev_priv);
- else
- gen6_set_rps(dev_priv, rps->idle_freq);
- rps->last_adj = 0;
- I915_WRITE(GEN6_PMINTRMSK,
- gen6_sanitize_rps_pm_mask(dev_priv, ~0));
- }
- mutex_unlock(&rps->lock);
-}
-
-void gen6_rps_boost(struct i915_request *rq)
-{
- struct intel_rps *rps = &rq->i915->gt_pm.rps;
- unsigned long flags;
- bool boost;
-
- /* This is intentionally racy! We peek at the state here, then
- * validate inside the RPS worker.
- */
- if (!rps->enabled)
- return;
-
- if (i915_request_signaled(rq))
- return;
-
- /* Serializes with i915_request_retire() */
- boost = false;
- spin_lock_irqsave(&rq->lock, flags);
- if (!i915_request_has_waitboost(rq) &&
- !dma_fence_is_signaled_locked(&rq->fence)) {
- boost = !atomic_fetch_inc(&rps->num_waiters);
- rq->flags |= I915_REQUEST_WAITBOOST;
- }
- spin_unlock_irqrestore(&rq->lock, flags);
- if (!boost)
- return;
-
- if (READ_ONCE(rps->cur_freq) < rps->boost_freq)
- schedule_work(&rps->work);
-
- atomic_inc(&rps->boosts);
-}
-
-int intel_set_rps(struct drm_i915_private *dev_priv, u8 val)
-{
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
- int err;
-
- lockdep_assert_held(&rps->lock);
- GEM_BUG_ON(val > rps->max_freq);
- GEM_BUG_ON(val < rps->min_freq);
-
- if (!rps->enabled) {
- rps->cur_freq = val;
- return 0;
- }
-
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- err = valleyview_set_rps(dev_priv, val);
- else
- err = gen6_set_rps(dev_priv, val);
-
- return err;
-}
-
-static void gen9_disable_rc6(struct drm_i915_private *dev_priv)
-{
- I915_WRITE(GEN6_RC_CONTROL, 0);
- I915_WRITE(GEN9_PG_ENABLE, 0);
-}
-
-static void gen9_disable_rps(struct drm_i915_private *dev_priv)
-{
- I915_WRITE(GEN6_RP_CONTROL, 0);
-}
-
-static void gen6_disable_rc6(struct drm_i915_private *dev_priv)
-{
- I915_WRITE(GEN6_RC_CONTROL, 0);
-}
-
-static void gen6_disable_rps(struct drm_i915_private *dev_priv)
-{
- I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
- I915_WRITE(GEN6_RP_CONTROL, 0);
-}
-
-static void cherryview_disable_rc6(struct drm_i915_private *dev_priv)
-{
- I915_WRITE(GEN6_RC_CONTROL, 0);
-}
-
-static void cherryview_disable_rps(struct drm_i915_private *dev_priv)
-{
- I915_WRITE(GEN6_RP_CONTROL, 0);
-}
-
-static void valleyview_disable_rc6(struct drm_i915_private *dev_priv)
-{
- /* We're doing forcewake before Disabling RC6,
- * This what the BIOS expects when going into suspend */
- intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
-
- I915_WRITE(GEN6_RC_CONTROL, 0);
-
- intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
-}
-
-static void valleyview_disable_rps(struct drm_i915_private *dev_priv)
-{
- I915_WRITE(GEN6_RP_CONTROL, 0);
-}
-
-static bool bxt_check_bios_rc6_setup(struct drm_i915_private *dev_priv)
-{
- bool enable_rc6 = true;
- unsigned long rc6_ctx_base;
- u32 rc_ctl;
- int rc_sw_target;
-
- rc_ctl = I915_READ(GEN6_RC_CONTROL);
- rc_sw_target = (I915_READ(GEN6_RC_STATE) & RC_SW_TARGET_STATE_MASK) >>
- RC_SW_TARGET_STATE_SHIFT;
- DRM_DEBUG_DRIVER("BIOS enabled RC states: "
- "HW_CTRL %s HW_RC6 %s SW_TARGET_STATE %x\n",
- onoff(rc_ctl & GEN6_RC_CTL_HW_ENABLE),
- onoff(rc_ctl & GEN6_RC_CTL_RC6_ENABLE),
- rc_sw_target);
-
- if (!(I915_READ(RC6_LOCATION) & RC6_CTX_IN_DRAM)) {
- DRM_DEBUG_DRIVER("RC6 Base location not set properly.\n");
- enable_rc6 = false;
- }
-
- /*
- * The exact context size is not known for BXT, so assume a page size
- * for this check.
- */
- rc6_ctx_base = I915_READ(RC6_CTX_BASE) & RC6_CTX_BASE_MASK;
- if (!((rc6_ctx_base >= dev_priv->dsm_reserved.start) &&
- (rc6_ctx_base + PAGE_SIZE < dev_priv->dsm_reserved.end))) {
- DRM_DEBUG_DRIVER("RC6 Base address not as expected.\n");
- enable_rc6 = false;
- }
-
- if (!(((I915_READ(PWRCTX_MAXCNT_RCSUNIT) & IDLE_TIME_MASK) > 1) &&
- ((I915_READ(PWRCTX_MAXCNT_VCSUNIT0) & IDLE_TIME_MASK) > 1) &&
- ((I915_READ(PWRCTX_MAXCNT_BCSUNIT) & IDLE_TIME_MASK) > 1) &&
- ((I915_READ(PWRCTX_MAXCNT_VECSUNIT) & IDLE_TIME_MASK) > 1))) {
- DRM_DEBUG_DRIVER("Engine Idle wait time not set properly.\n");
- enable_rc6 = false;
- }
-
- if (!I915_READ(GEN8_PUSHBUS_CONTROL) ||
- !I915_READ(GEN8_PUSHBUS_ENABLE) ||
- !I915_READ(GEN8_PUSHBUS_SHIFT)) {
- DRM_DEBUG_DRIVER("Pushbus not setup properly.\n");
- enable_rc6 = false;
- }
-
- if (!I915_READ(GEN6_GFXPAUSE)) {
- DRM_DEBUG_DRIVER("GFX pause not setup properly.\n");
- enable_rc6 = false;
- }
-
- if (!I915_READ(GEN8_MISC_CTRL0)) {
- DRM_DEBUG_DRIVER("GPM control not setup properly.\n");
- enable_rc6 = false;
- }
-
- return enable_rc6;
-}
-
-static bool sanitize_rc6(struct drm_i915_private *i915)
-{
- struct intel_device_info *info = mkwrite_device_info(i915);
-
- /* Powersaving is controlled by the host when inside a VM */
- if (intel_vgpu_active(i915)) {
- info->has_rc6 = 0;
- info->has_rps = false;
- }
-
- if (info->has_rc6 &&
- IS_GEN9_LP(i915) && !bxt_check_bios_rc6_setup(i915)) {
- DRM_INFO("RC6 disabled by BIOS\n");
- info->has_rc6 = 0;
- }
-
- /*
- * We assume that we do not have any deep rc6 levels if we don't have
- * have the previous rc6 level supported, i.e. we use HAS_RC6()
- * as the initial coarse check for rc6 in general, moving on to
- * progressively finer/deeper levels.
- */
- if (!info->has_rc6 && info->has_rc6p)
- info->has_rc6p = 0;
-
- return info->has_rc6;
-}
-
-static void gen6_init_rps_frequencies(struct drm_i915_private *dev_priv)
-{
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
-
- /* All of these values are in units of 50MHz */
-
- /* static values from HW: RP0 > RP1 > RPn (min_freq) */
- if (IS_GEN9_LP(dev_priv)) {
- u32 rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
- rps->rp0_freq = (rp_state_cap >> 16) & 0xff;
- rps->rp1_freq = (rp_state_cap >> 8) & 0xff;
- rps->min_freq = (rp_state_cap >> 0) & 0xff;
- } else {
- u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
- rps->rp0_freq = (rp_state_cap >> 0) & 0xff;
- rps->rp1_freq = (rp_state_cap >> 8) & 0xff;
- rps->min_freq = (rp_state_cap >> 16) & 0xff;
- }
- /* hw_max = RP0 until we check for overclocking */
- rps->max_freq = rps->rp0_freq;
-
- rps->efficient_freq = rps->rp1_freq;
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv) ||
- IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
- u32 ddcc_status = 0;
-
- if (sandybridge_pcode_read(dev_priv,
- HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL,
- &ddcc_status, NULL) == 0)
- rps->efficient_freq =
- clamp_t(u8,
- ((ddcc_status >> 8) & 0xff),
- rps->min_freq,
- rps->max_freq);
- }
-
- if (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
- /* Store the frequency values in 16.66 MHZ units, which is
- * the natural hardware unit for SKL
- */
- rps->rp0_freq *= GEN9_FREQ_SCALER;
- rps->rp1_freq *= GEN9_FREQ_SCALER;
- rps->min_freq *= GEN9_FREQ_SCALER;
- rps->max_freq *= GEN9_FREQ_SCALER;
- rps->efficient_freq *= GEN9_FREQ_SCALER;
- }
-}
-
-static void reset_rps(struct drm_i915_private *dev_priv,
- int (*set)(struct drm_i915_private *, u8))
-{
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
- u8 freq = rps->cur_freq;
-
- /* force a reset */
- rps->power.mode = -1;
- rps->cur_freq = -1;
-
- if (set(dev_priv, freq))
- DRM_ERROR("Failed to reset RPS to initial values\n");
-}
-
-/* See the Gen9_GT_PM_Programming_Guide doc for the below */
-static void gen9_enable_rps(struct drm_i915_private *dev_priv)
-{
- intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
-
- /* Program defaults and thresholds for RPS */
- if (IS_GEN(dev_priv, 9))
- I915_WRITE(GEN6_RC_VIDEO_FREQ,
- GEN9_FREQUENCY(dev_priv->gt_pm.rps.rp1_freq));
-
- /* 1 second timeout*/
- I915_WRITE(GEN6_RP_DOWN_TIMEOUT,
- GT_INTERVAL_FROM_US(dev_priv, 1000000));
-
- I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 0xa);
-
- /* Leaning on the below call to gen6_set_rps to program/setup the
- * Up/Down EI & threshold registers, as well as the RP_CONTROL,
- * RP_INTERRUPT_LIMITS & RPNSWREQ registers */
- reset_rps(dev_priv, gen6_set_rps);
-
- intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
-}
-
-static void gen11_enable_rc6(struct drm_i915_private *dev_priv)
-{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
-
- /* 1a: Software RC state - RC0 */
- I915_WRITE(GEN6_RC_STATE, 0);
-
- /*
- * 1b: Get forcewake during program sequence. Although the driver
- * hasn't enabled a state yet where we need forcewake, BIOS may have.
- */
- intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
-
- /* 2a: Disable RC states. */
- I915_WRITE(GEN6_RC_CONTROL, 0);
-
- /* 2b: Program RC6 thresholds.*/
- I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16 | 85);
- I915_WRITE(GEN10_MEDIA_WAKE_RATE_LIMIT, 150);
-
- I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
- I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
- for_each_engine(engine, dev_priv, id)
- I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
-
- if (HAS_GT_UC(dev_priv))
- I915_WRITE(GUC_MAX_IDLE_COUNT, 0xA);
-
- I915_WRITE(GEN6_RC_SLEEP, 0);
-
- I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
-
- /*
- * 2c: Program Coarse Power Gating Policies.
- *
- * Bspec's guidance is to use 25us (really 25 * 1280ns) here. What we
- * use instead is a more conservative estimate for the maximum time
- * it takes us to service a CS interrupt and submit a new ELSP - that
- * is the time which the GPU is idle waiting for the CPU to select the
- * next request to execute. If the idle hysteresis is less than that
- * interrupt service latency, the hardware will automatically gate
- * the power well and we will then incur the wake up cost on top of
- * the service latency. A similar guide from plane_state is that we
- * do not want the enable hysteresis to less than the wakeup latency.
- *
- * igt/gem_exec_nop/sequential provides a rough estimate for the
- * service latency, and puts it around 10us for Broadwell (and other
- * big core) and around 40us for Broxton (and other low power cores).
- * [Note that for legacy ringbuffer submission, this is less than 1us!]
- * However, the wakeup latency on Broxton is closer to 100us. To be
- * conservative, we have to factor in a context switch on top (due
- * to ksoftirqd).
- */
- I915_WRITE(GEN9_MEDIA_PG_IDLE_HYSTERESIS, 250);
- I915_WRITE(GEN9_RENDER_PG_IDLE_HYSTERESIS, 250);
-
- /* 3a: Enable RC6 */
- I915_WRITE(GEN6_RC_CONTROL,
- GEN6_RC_CTL_HW_ENABLE |
- GEN6_RC_CTL_RC6_ENABLE |
- GEN6_RC_CTL_EI_MODE(1));
-
- /* 3b: Enable Coarse Power Gating only when RC6 is enabled. */
- I915_WRITE(GEN9_PG_ENABLE,
- GEN9_RENDER_PG_ENABLE |
- GEN9_MEDIA_PG_ENABLE |
- GEN11_MEDIA_SAMPLER_PG_ENABLE);
-
- intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
-}
-
-static void gen9_enable_rc6(struct drm_i915_private *dev_priv)
-{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- u32 rc6_mode;
-
- /* 1a: Software RC state - RC0 */
- I915_WRITE(GEN6_RC_STATE, 0);
-
- /* 1b: Get forcewake during program sequence. Although the driver
- * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
- intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
-
- /* 2a: Disable RC states. */
- I915_WRITE(GEN6_RC_CONTROL, 0);
-
- /* 2b: Program RC6 thresholds.*/
- if (INTEL_GEN(dev_priv) >= 10) {
- I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16 | 85);
- I915_WRITE(GEN10_MEDIA_WAKE_RATE_LIMIT, 150);
- } else if (IS_SKYLAKE(dev_priv)) {
- /*
- * WaRsDoubleRc6WrlWithCoarsePowerGating:skl Doubling WRL only
- * when CPG is enabled
- */
- I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 108 << 16);
- } else {
- I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16);
- }
-
- I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
- I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
- for_each_engine(engine, dev_priv, id)
- I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
-
- if (HAS_GT_UC(dev_priv))
- I915_WRITE(GUC_MAX_IDLE_COUNT, 0xA);
-
- I915_WRITE(GEN6_RC_SLEEP, 0);
-
- /*
- * 2c: Program Coarse Power Gating Policies.
- *
- * Bspec's guidance is to use 25us (really 25 * 1280ns) here. What we
- * use instead is a more conservative estimate for the maximum time
- * it takes us to service a CS interrupt and submit a new ELSP - that
- * is the time which the GPU is idle waiting for the CPU to select the
- * next request to execute. If the idle hysteresis is less than that
- * interrupt service latency, the hardware will automatically gate
- * the power well and we will then incur the wake up cost on top of
- * the service latency. A similar guide from plane_state is that we
- * do not want the enable hysteresis to less than the wakeup latency.
- *
- * igt/gem_exec_nop/sequential provides a rough estimate for the
- * service latency, and puts it around 10us for Broadwell (and other
- * big core) and around 40us for Broxton (and other low power cores).
- * [Note that for legacy ringbuffer submission, this is less than 1us!]
- * However, the wakeup latency on Broxton is closer to 100us. To be
- * conservative, we have to factor in a context switch on top (due
- * to ksoftirqd).
- */
- I915_WRITE(GEN9_MEDIA_PG_IDLE_HYSTERESIS, 250);
- I915_WRITE(GEN9_RENDER_PG_IDLE_HYSTERESIS, 250);
-
- /* 3a: Enable RC6 */
- I915_WRITE(GEN6_RC6_THRESHOLD, 37500); /* 37.5/125ms per EI */
-
- /* WaRsUseTimeoutMode:cnl (pre-prod) */
- if (IS_CNL_REVID(dev_priv, CNL_REVID_A0, CNL_REVID_C0))
- rc6_mode = GEN7_RC_CTL_TO_MODE;
- else
- rc6_mode = GEN6_RC_CTL_EI_MODE(1);
-
- I915_WRITE(GEN6_RC_CONTROL,
- GEN6_RC_CTL_HW_ENABLE |
- GEN6_RC_CTL_RC6_ENABLE |
- rc6_mode);
-
- /*
- * 3b: Enable Coarse Power Gating only when RC6 is enabled.
- * WaRsDisableCoarsePowerGating:skl,cnl - Render/Media PG need to be disabled with RC6.
- */
- if (NEEDS_WaRsDisableCoarsePowerGating(dev_priv))
- I915_WRITE(GEN9_PG_ENABLE, 0);
- else
- I915_WRITE(GEN9_PG_ENABLE,
- GEN9_RENDER_PG_ENABLE | GEN9_MEDIA_PG_ENABLE);
-
- intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
-}
-
-static void gen8_enable_rc6(struct drm_i915_private *dev_priv)
-{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
-
- /* 1a: Software RC state - RC0 */
- I915_WRITE(GEN6_RC_STATE, 0);
-
- /* 1b: Get forcewake during program sequence. Although the driver
- * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
- intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
-
- /* 2a: Disable RC states. */
- I915_WRITE(GEN6_RC_CONTROL, 0);
-
- /* 2b: Program RC6 thresholds.*/
- I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
- I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
- I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
- for_each_engine(engine, dev_priv, id)
- I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
- I915_WRITE(GEN6_RC_SLEEP, 0);
- I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */
-
- /* 3: Enable RC6 */
-
- I915_WRITE(GEN6_RC_CONTROL,
- GEN6_RC_CTL_HW_ENABLE |
- GEN7_RC_CTL_TO_MODE |
- GEN6_RC_CTL_RC6_ENABLE);
-
- intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
-}
-
-static void gen8_enable_rps(struct drm_i915_private *dev_priv)
-{
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
-
- intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
-
- /* 1 Program defaults and thresholds for RPS*/
- I915_WRITE(GEN6_RPNSWREQ,
- HSW_FREQUENCY(rps->rp1_freq));
- I915_WRITE(GEN6_RC_VIDEO_FREQ,
- HSW_FREQUENCY(rps->rp1_freq));
- /* NB: Docs say 1s, and 1000000 - which aren't equivalent */
- I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 100000000 / 128); /* 1 second timeout */
-
- /* Docs recommend 900MHz, and 300 MHz respectively */
- I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
- rps->max_freq_softlimit << 24 |
- rps->min_freq_softlimit << 16);
-
- I915_WRITE(GEN6_RP_UP_THRESHOLD, 7600000 / 128); /* 76ms busyness per EI, 90% */
- I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 31300000 / 128); /* 313ms busyness per EI, 70%*/
- I915_WRITE(GEN6_RP_UP_EI, 66000); /* 84.48ms, XXX: random? */
- I915_WRITE(GEN6_RP_DOWN_EI, 350000); /* 448ms, XXX: random? */
-
- I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
-
- /* 2: Enable RPS */
- I915_WRITE(GEN6_RP_CONTROL,
- GEN6_RP_MEDIA_TURBO |
- GEN6_RP_MEDIA_HW_NORMAL_MODE |
- GEN6_RP_MEDIA_IS_GFX |
- GEN6_RP_ENABLE |
- GEN6_RP_UP_BUSY_AVG |
- GEN6_RP_DOWN_IDLE_AVG);
-
- reset_rps(dev_priv, gen6_set_rps);
-
- intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
-}
-
-static void gen6_enable_rc6(struct drm_i915_private *dev_priv)
-{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- u32 rc6vids, rc6_mask;
- u32 gtfifodbg;
- int ret;
-
- I915_WRITE(GEN6_RC_STATE, 0);
-
- /* Clear the DBG now so we don't confuse earlier errors */
- gtfifodbg = I915_READ(GTFIFODBG);
- if (gtfifodbg) {
- DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
- I915_WRITE(GTFIFODBG, gtfifodbg);
- }
-
- intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
-
- /* disable the counters and set deterministic thresholds */
- I915_WRITE(GEN6_RC_CONTROL, 0);
-
- I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
- I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
- I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
- I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
- I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
-
- for_each_engine(engine, dev_priv, id)
- I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
-
- I915_WRITE(GEN6_RC_SLEEP, 0);
- I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
- if (IS_IVYBRIDGE(dev_priv))
- I915_WRITE(GEN6_RC6_THRESHOLD, 125000);
- else
- I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
- I915_WRITE(GEN6_RC6p_THRESHOLD, 150000);
- I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
-
- /* We don't use those on Haswell */
- rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
- if (HAS_RC6p(dev_priv))
- rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
- if (HAS_RC6pp(dev_priv))
- rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
- I915_WRITE(GEN6_RC_CONTROL,
- rc6_mask |
- GEN6_RC_CTL_EI_MODE(1) |
- GEN6_RC_CTL_HW_ENABLE);
-
- rc6vids = 0;
- ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS,
- &rc6vids, NULL);
- if (IS_GEN(dev_priv, 6) && ret) {
- DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n");
- } else if (IS_GEN(dev_priv, 6) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) {
- DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
- GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450);
- rc6vids &= 0xffff00;
- rc6vids |= GEN6_ENCODE_RC6_VID(450);
- ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_RC6VIDS, rc6vids);
- if (ret)
- DRM_ERROR("Couldn't fix incorrect rc6 voltage\n");
- }
-
- intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
-}
-
-static void gen6_enable_rps(struct drm_i915_private *dev_priv)
-{
- /* Here begins a magic sequence of register writes to enable
- * auto-downclocking.
- *
- * Perhaps there might be some value in exposing these to
- * userspace...
- */
- intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
-
- /* Power down if completely idle for over 50ms */
- I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 50000);
- I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
-
- reset_rps(dev_priv, gen6_set_rps);
-
- intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
-}
-
-static void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
-{
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
- const int min_freq = 15;
- const int scaling_factor = 180;
- unsigned int gpu_freq;
- unsigned int max_ia_freq, min_ring_freq;
- unsigned int max_gpu_freq, min_gpu_freq;
- struct cpufreq_policy *policy;
-
- lockdep_assert_held(&rps->lock);
-
- if (rps->max_freq <= rps->min_freq)
- return;
-
- policy = cpufreq_cpu_get(0);
- if (policy) {
- max_ia_freq = policy->cpuinfo.max_freq;
- cpufreq_cpu_put(policy);
- } else {
- /*
- * Default to measured freq if none found, PCU will ensure we
- * don't go over
- */
- max_ia_freq = tsc_khz;
- }
-
- /* Convert from kHz to MHz */
- max_ia_freq /= 1000;
-
- min_ring_freq = I915_READ(DCLK) & 0xf;
- /* convert DDR frequency from units of 266.6MHz to bandwidth */
- min_ring_freq = mult_frac(min_ring_freq, 8, 3);
-
- min_gpu_freq = rps->min_freq;
- max_gpu_freq = rps->max_freq;
- if (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
- /* Convert GT frequency to 50 HZ units */
- min_gpu_freq /= GEN9_FREQ_SCALER;
- max_gpu_freq /= GEN9_FREQ_SCALER;
- }
-
- /*
- * For each potential GPU frequency, load a ring frequency we'd like
- * to use for memory access. We do this by specifying the IA frequency
- * the PCU should use as a reference to determine the ring frequency.
- */
- for (gpu_freq = max_gpu_freq; gpu_freq >= min_gpu_freq; gpu_freq--) {
- const int diff = max_gpu_freq - gpu_freq;
- unsigned int ia_freq = 0, ring_freq = 0;
-
- if (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
- /*
- * ring_freq = 2 * GT. ring_freq is in 100MHz units
- * No floor required for ring frequency on SKL.
- */
- ring_freq = gpu_freq;
- } else if (INTEL_GEN(dev_priv) >= 8) {
- /* max(2 * GT, DDR). NB: GT is 50MHz units */
- ring_freq = max(min_ring_freq, gpu_freq);
- } else if (IS_HASWELL(dev_priv)) {
- ring_freq = mult_frac(gpu_freq, 5, 4);
- ring_freq = max(min_ring_freq, ring_freq);
- /* leave ia_freq as the default, chosen by cpufreq */
- } else {
- /* On older processors, there is no separate ring
- * clock domain, so in order to boost the bandwidth
- * of the ring, we need to upclock the CPU (ia_freq).
- *
- * For GPU frequencies less than 750MHz,
- * just use the lowest ring freq.
- */
- if (gpu_freq < min_freq)
- ia_freq = 800;
- else
- ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
- ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
- }
-
- sandybridge_pcode_write(dev_priv,
- GEN6_PCODE_WRITE_MIN_FREQ_TABLE,
- ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT |
- ring_freq << GEN6_PCODE_FREQ_RING_RATIO_SHIFT |
- gpu_freq);
- }
-}
-
-static int cherryview_rps_max_freq(struct drm_i915_private *dev_priv)
-{
- u32 val, rp0;
-
- val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE);
-
- switch (RUNTIME_INFO(dev_priv)->sseu.eu_total) {
- case 8:
- /* (2 * 4) config */
- rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT);
- break;
- case 12:
- /* (2 * 6) config */
- rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS6EU_FUSE_SHIFT);
- break;
- case 16:
- /* (2 * 8) config */
- default:
- /* Setting (2 * 8) Min RP0 for any other combination */
- rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS8EU_FUSE_SHIFT);
- break;
- }
-
- rp0 = (rp0 & FB_GFX_FREQ_FUSE_MASK);
-
- return rp0;
-}
-
-static int cherryview_rps_rpe_freq(struct drm_i915_private *dev_priv)
-{
- u32 val, rpe;
-
- val = vlv_punit_read(dev_priv, PUNIT_GPU_DUTYCYCLE_REG);
- rpe = (val >> PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT) & PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK;
-
- return rpe;
-}
-
-static int cherryview_rps_guar_freq(struct drm_i915_private *dev_priv)
-{
- u32 val, rp1;
-
- val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE);
- rp1 = (val & FB_GFX_FREQ_FUSE_MASK);
-
- return rp1;
-}
-
-static u32 cherryview_rps_min_freq(struct drm_i915_private *dev_priv)
-{
- u32 val, rpn;
-
- val = vlv_punit_read(dev_priv, FB_GFX_FMIN_AT_VMIN_FUSE);
- rpn = ((val >> FB_GFX_FMIN_AT_VMIN_FUSE_SHIFT) &
- FB_GFX_FREQ_FUSE_MASK);
-
- return rpn;
-}
-
-static int valleyview_rps_guar_freq(struct drm_i915_private *dev_priv)
-{
- u32 val, rp1;
-
- val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE);
-
- rp1 = (val & FB_GFX_FGUARANTEED_FREQ_FUSE_MASK) >> FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT;
-
- return rp1;
-}
-
-static int valleyview_rps_max_freq(struct drm_i915_private *dev_priv)
-{
- u32 val, rp0;
-
- val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE);
-
- rp0 = (val & FB_GFX_MAX_FREQ_FUSE_MASK) >> FB_GFX_MAX_FREQ_FUSE_SHIFT;
- /* Clamp to max */
- rp0 = min_t(u32, rp0, 0xea);
-
- return rp0;
-}
-
-static int valleyview_rps_rpe_freq(struct drm_i915_private *dev_priv)
-{
- u32 val, rpe;
-
- val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_LO);
- rpe = (val & FB_FMAX_VMIN_FREQ_LO_MASK) >> FB_FMAX_VMIN_FREQ_LO_SHIFT;
- val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_HI);
- rpe |= (val & FB_FMAX_VMIN_FREQ_HI_MASK) << 5;
-
- return rpe;
-}
-
-static int valleyview_rps_min_freq(struct drm_i915_private *dev_priv)
-{
- u32 val;
-
- val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff;
- /*
- * According to the BYT Punit GPU turbo HAS 1.1.6.3 the minimum value
- * for the minimum frequency in GPLL mode is 0xc1. Contrary to this on
- * a BYT-M B0 the above register contains 0xbf. Moreover when setting
- * a frequency Punit will not allow values below 0xc0. Clamp it 0xc0
- * to make sure it matches what Punit accepts.
- */
- return max_t(u32, val, 0xc0);
-}
-
-/* Check that the pctx buffer wasn't move under us. */
-static void valleyview_check_pctx(struct drm_i915_private *dev_priv)
-{
- unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095;
-
- WARN_ON(pctx_addr != dev_priv->dsm.start +
- dev_priv->vlv_pctx->stolen->start);
-}
-
-
-/* Check that the pcbr address is not empty. */
-static void cherryview_check_pctx(struct drm_i915_private *dev_priv)
-{
- unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095;
-
- WARN_ON((pctx_addr >> VLV_PCBR_ADDR_SHIFT) == 0);
-}
-
-static void cherryview_setup_pctx(struct drm_i915_private *dev_priv)
-{
- resource_size_t pctx_paddr, paddr;
- resource_size_t pctx_size = 32*1024;
- u32 pcbr;
-
- pcbr = I915_READ(VLV_PCBR);
- if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) {
- DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
- paddr = dev_priv->dsm.end + 1 - pctx_size;
- GEM_BUG_ON(paddr > U32_MAX);
-
- pctx_paddr = (paddr & (~4095));
- I915_WRITE(VLV_PCBR, pctx_paddr);
- }
-
- DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
-}
-
-static void valleyview_setup_pctx(struct drm_i915_private *dev_priv)
-{
- struct drm_i915_gem_object *pctx;
- resource_size_t pctx_paddr;
- resource_size_t pctx_size = 24*1024;
- u32 pcbr;
-
- pcbr = I915_READ(VLV_PCBR);
- if (pcbr) {
- /* BIOS set it up already, grab the pre-alloc'd space */
- resource_size_t pcbr_offset;
-
- pcbr_offset = (pcbr & (~4095)) - dev_priv->dsm.start;
- pctx = i915_gem_object_create_stolen_for_preallocated(dev_priv,
- pcbr_offset,
- I915_GTT_OFFSET_NONE,
- pctx_size);
- goto out;
- }
-
- DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
-
- /*
- * From the Gunit register HAS:
- * The Gfx driver is expected to program this register and ensure
- * proper allocation within Gfx stolen memory. For example, this
- * register should be programmed such than the PCBR range does not
- * overlap with other ranges, such as the frame buffer, protected
- * memory, or any other relevant ranges.
- */
- pctx = i915_gem_object_create_stolen(dev_priv, pctx_size);
- if (!pctx) {
- DRM_DEBUG("not enough stolen space for PCTX, disabling\n");
- goto out;
- }
-
- GEM_BUG_ON(range_overflows_t(u64,
- dev_priv->dsm.start,
- pctx->stolen->start,
- U32_MAX));
- pctx_paddr = dev_priv->dsm.start + pctx->stolen->start;
- I915_WRITE(VLV_PCBR, pctx_paddr);
-
-out:
- DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
- dev_priv->vlv_pctx = pctx;
-}
-
-static void valleyview_cleanup_pctx(struct drm_i915_private *dev_priv)
-{
- struct drm_i915_gem_object *pctx;
-
- pctx = fetch_and_zero(&dev_priv->vlv_pctx);
- if (pctx)
- i915_gem_object_put(pctx);
-}
-
-static void vlv_init_gpll_ref_freq(struct drm_i915_private *dev_priv)
-{
- dev_priv->gt_pm.rps.gpll_ref_freq =
- vlv_get_cck_clock(dev_priv, "GPLL ref",
- CCK_GPLL_CLOCK_CONTROL,
- dev_priv->czclk_freq);
-
- DRM_DEBUG_DRIVER("GPLL reference freq: %d kHz\n",
- dev_priv->gt_pm.rps.gpll_ref_freq);
-}
-
-static void valleyview_init_gt_powersave(struct drm_i915_private *dev_priv)
-{
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
- u32 val;
-
- valleyview_setup_pctx(dev_priv);
-
- vlv_iosf_sb_get(dev_priv,
- BIT(VLV_IOSF_SB_PUNIT) |
- BIT(VLV_IOSF_SB_NC) |
- BIT(VLV_IOSF_SB_CCK));
-
- vlv_init_gpll_ref_freq(dev_priv);
-
- val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
- switch ((val >> 6) & 3) {
- case 0:
- case 1:
- dev_priv->mem_freq = 800;
- break;
- case 2:
- dev_priv->mem_freq = 1066;
- break;
- case 3:
- dev_priv->mem_freq = 1333;
- break;
- }
- DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq);
-
- rps->max_freq = valleyview_rps_max_freq(dev_priv);
- rps->rp0_freq = rps->max_freq;
- DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
- intel_gpu_freq(dev_priv, rps->max_freq),
- rps->max_freq);
-
- rps->efficient_freq = valleyview_rps_rpe_freq(dev_priv);
- DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
- intel_gpu_freq(dev_priv, rps->efficient_freq),
- rps->efficient_freq);
-
- rps->rp1_freq = valleyview_rps_guar_freq(dev_priv);
- DRM_DEBUG_DRIVER("RP1(Guar Freq) GPU freq: %d MHz (%u)\n",
- intel_gpu_freq(dev_priv, rps->rp1_freq),
- rps->rp1_freq);
-
- rps->min_freq = valleyview_rps_min_freq(dev_priv);
- DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
- intel_gpu_freq(dev_priv, rps->min_freq),
- rps->min_freq);
-
- vlv_iosf_sb_put(dev_priv,
- BIT(VLV_IOSF_SB_PUNIT) |
- BIT(VLV_IOSF_SB_NC) |
- BIT(VLV_IOSF_SB_CCK));
-}
-
-static void cherryview_init_gt_powersave(struct drm_i915_private *dev_priv)
-{
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
- u32 val;
-
- cherryview_setup_pctx(dev_priv);
-
- vlv_iosf_sb_get(dev_priv,
- BIT(VLV_IOSF_SB_PUNIT) |
- BIT(VLV_IOSF_SB_NC) |
- BIT(VLV_IOSF_SB_CCK));
-
- vlv_init_gpll_ref_freq(dev_priv);
-
- val = vlv_cck_read(dev_priv, CCK_FUSE_REG);
-
- switch ((val >> 2) & 0x7) {
- case 3:
- dev_priv->mem_freq = 2000;
- break;
- default:
- dev_priv->mem_freq = 1600;
- break;
- }
- DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq);
-
- rps->max_freq = cherryview_rps_max_freq(dev_priv);
- rps->rp0_freq = rps->max_freq;
- DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
- intel_gpu_freq(dev_priv, rps->max_freq),
- rps->max_freq);
-
- rps->efficient_freq = cherryview_rps_rpe_freq(dev_priv);
- DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
- intel_gpu_freq(dev_priv, rps->efficient_freq),
- rps->efficient_freq);
-
- rps->rp1_freq = cherryview_rps_guar_freq(dev_priv);
- DRM_DEBUG_DRIVER("RP1(Guar) GPU freq: %d MHz (%u)\n",
- intel_gpu_freq(dev_priv, rps->rp1_freq),
- rps->rp1_freq);
-
- rps->min_freq = cherryview_rps_min_freq(dev_priv);
- DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
- intel_gpu_freq(dev_priv, rps->min_freq),
- rps->min_freq);
-
- vlv_iosf_sb_put(dev_priv,
- BIT(VLV_IOSF_SB_PUNIT) |
- BIT(VLV_IOSF_SB_NC) |
- BIT(VLV_IOSF_SB_CCK));
-
- WARN_ONCE((rps->max_freq | rps->efficient_freq | rps->rp1_freq |
- rps->min_freq) & 1,
- "Odd GPU freq values\n");
-}
-
-static void valleyview_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
-{
- valleyview_cleanup_pctx(dev_priv);
-}
-
-static void cherryview_enable_rc6(struct drm_i915_private *dev_priv)
-{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- u32 gtfifodbg, rc6_mode, pcbr;
-
- gtfifodbg = I915_READ(GTFIFODBG) & ~(GT_FIFO_SBDEDICATE_FREE_ENTRY_CHV |
- GT_FIFO_FREE_ENTRIES_CHV);
- if (gtfifodbg) {
- DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
- gtfifodbg);
- I915_WRITE(GTFIFODBG, gtfifodbg);
- }
-
- cherryview_check_pctx(dev_priv);
-
- /* 1a & 1b: Get forcewake during program sequence. Although the driver
- * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
- intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
-
- /* Disable RC states. */
- I915_WRITE(GEN6_RC_CONTROL, 0);
-
- /* 2a: Program RC6 thresholds.*/
- I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
- I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
- I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
-
- for_each_engine(engine, dev_priv, id)
- I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
- I915_WRITE(GEN6_RC_SLEEP, 0);
-
- /* TO threshold set to 500 us ( 0x186 * 1.28 us) */
- I915_WRITE(GEN6_RC6_THRESHOLD, 0x186);
-
- /* Allows RC6 residency counter to work */
- I915_WRITE(VLV_COUNTER_CONTROL,
- _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
- VLV_MEDIA_RC6_COUNT_EN |
- VLV_RENDER_RC6_COUNT_EN));
-
- /* For now we assume BIOS is allocating and populating the PCBR */
- pcbr = I915_READ(VLV_PCBR);
-
- /* 3: Enable RC6 */
- rc6_mode = 0;
- if (pcbr >> VLV_PCBR_ADDR_SHIFT)
- rc6_mode = GEN7_RC_CTL_TO_MODE;
- I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
-
- intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
-}
-
-static void cherryview_enable_rps(struct drm_i915_private *dev_priv)
-{
- u32 val;
-
- intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
-
- /* 1: Program defaults and thresholds for RPS*/
- I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
- I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
- I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
- I915_WRITE(GEN6_RP_UP_EI, 66000);
- I915_WRITE(GEN6_RP_DOWN_EI, 350000);
-
- I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
-
- /* 2: Enable RPS */
- I915_WRITE(GEN6_RP_CONTROL,
- GEN6_RP_MEDIA_HW_NORMAL_MODE |
- GEN6_RP_MEDIA_IS_GFX |
- GEN6_RP_ENABLE |
- GEN6_RP_UP_BUSY_AVG |
- GEN6_RP_DOWN_IDLE_AVG);
-
- /* Setting Fixed Bias */
- vlv_punit_get(dev_priv);
-
- val = VLV_OVERRIDE_EN | VLV_SOC_TDP_EN | CHV_BIAS_CPU_50_SOC_50;
- vlv_punit_write(dev_priv, VLV_TURBO_SOC_OVERRIDE, val);
-
- val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
-
- vlv_punit_put(dev_priv);
-
- /* RPS code assumes GPLL is used */
- WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
-
- DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE));
- DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
-
- reset_rps(dev_priv, valleyview_set_rps);
-
- intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
-}
-
-static void valleyview_enable_rc6(struct drm_i915_private *dev_priv)
-{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- u32 gtfifodbg;
-
- valleyview_check_pctx(dev_priv);
-
- gtfifodbg = I915_READ(GTFIFODBG);
- if (gtfifodbg) {
- DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
- gtfifodbg);
- I915_WRITE(GTFIFODBG, gtfifodbg);
- }
-
- intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
-
- /* Disable RC states. */
- I915_WRITE(GEN6_RC_CONTROL, 0);
-
- I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 0x00280000);
- I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
- I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
-
- for_each_engine(engine, dev_priv, id)
- I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
-
- I915_WRITE(GEN6_RC6_THRESHOLD, 0x557);
-
- /* Allows RC6 residency counter to work */
- I915_WRITE(VLV_COUNTER_CONTROL,
- _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
- VLV_MEDIA_RC0_COUNT_EN |
- VLV_RENDER_RC0_COUNT_EN |
- VLV_MEDIA_RC6_COUNT_EN |
- VLV_RENDER_RC6_COUNT_EN));
-
- I915_WRITE(GEN6_RC_CONTROL,
- GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL);
-
- intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
-}
-
-static void valleyview_enable_rps(struct drm_i915_private *dev_priv)
-{
- u32 val;
-
- intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
-
- I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
- I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
- I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
- I915_WRITE(GEN6_RP_UP_EI, 66000);
- I915_WRITE(GEN6_RP_DOWN_EI, 350000);
-
- I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
-
- I915_WRITE(GEN6_RP_CONTROL,
- GEN6_RP_MEDIA_TURBO |
- GEN6_RP_MEDIA_HW_NORMAL_MODE |
- GEN6_RP_MEDIA_IS_GFX |
- GEN6_RP_ENABLE |
- GEN6_RP_UP_BUSY_AVG |
- GEN6_RP_DOWN_IDLE_CONT);
-
- vlv_punit_get(dev_priv);
-
- /* Setting Fixed Bias */
- val = VLV_OVERRIDE_EN | VLV_SOC_TDP_EN | VLV_BIAS_CPU_125_SOC_875;
- vlv_punit_write(dev_priv, VLV_TURBO_SOC_OVERRIDE, val);
-
- val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
-
- vlv_punit_put(dev_priv);
-
- /* RPS code assumes GPLL is used */
- WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
-
- DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE));
- DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
-
- reset_rps(dev_priv, valleyview_set_rps);
-
- intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
-}
-
-static unsigned long intel_pxfreq(u32 vidfreq)
-{
- unsigned long freq;
- int div = (vidfreq & 0x3f0000) >> 16;
- int post = (vidfreq & 0x3000) >> 12;
- int pre = (vidfreq & 0x7);
-
- if (!pre)
- return 0;
-
- freq = ((div * 133333) / ((1<<post) * pre));
-
- return freq;
-}
-
-static const struct cparams {
- u16 i;
- u16 t;
- u16 m;
- u16 c;
-} cparams[] = {
- { 1, 1333, 301, 28664 },
- { 1, 1066, 294, 24460 },
- { 1, 800, 294, 25192 },
- { 0, 1333, 276, 27605 },
- { 0, 1066, 276, 27605 },
- { 0, 800, 231, 23784 },
-};
-
-static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv)
-{
- u64 total_count, diff, ret;
- u32 count1, count2, count3, m = 0, c = 0;
- unsigned long now = jiffies_to_msecs(jiffies), diff1;
- int i;
-
- lockdep_assert_held(&mchdev_lock);
-
- diff1 = now - dev_priv->ips.last_time1;
-
- /* Prevent division-by-zero if we are asking too fast.
- * Also, we don't get interesting results if we are polling
- * faster than once in 10ms, so just return the saved value
- * in such cases.
- */
- if (diff1 <= 10)
- return dev_priv->ips.chipset_power;
-
- count1 = I915_READ(DMIEC);
- count2 = I915_READ(DDREC);
- count3 = I915_READ(CSIEC);
-
- total_count = count1 + count2 + count3;
-
- /* FIXME: handle per-counter overflow */
- if (total_count < dev_priv->ips.last_count1) {
- diff = ~0UL - dev_priv->ips.last_count1;
- diff += total_count;
- } else {
- diff = total_count - dev_priv->ips.last_count1;
- }
-
- for (i = 0; i < ARRAY_SIZE(cparams); i++) {
- if (cparams[i].i == dev_priv->ips.c_m &&
- cparams[i].t == dev_priv->ips.r_t) {
- m = cparams[i].m;
- c = cparams[i].c;
- break;
- }
- }
-
- diff = div_u64(diff, diff1);
- ret = ((m * diff) + c);
- ret = div_u64(ret, 10);
-
- dev_priv->ips.last_count1 = total_count;
- dev_priv->ips.last_time1 = now;
-
- dev_priv->ips.chipset_power = ret;
-
- return ret;
-}
-
-unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
-{
- intel_wakeref_t wakeref;
- unsigned long val = 0;
-
- if (!IS_GEN(dev_priv, 5))
- return 0;
-
- with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
- spin_lock_irq(&mchdev_lock);
- val = __i915_chipset_val(dev_priv);
- spin_unlock_irq(&mchdev_lock);
- }
-
- return val;
-}
-
-unsigned long i915_mch_val(struct drm_i915_private *i915)
-{
- unsigned long m, x, b;
- u32 tsfs;
-
- tsfs = intel_uncore_read(&i915->uncore, TSFS);
-
- m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT);
- x = intel_uncore_read8(&i915->uncore, TR1);
-
- b = tsfs & TSFS_INTR_MASK;
-
- return ((m * x) / 127) - b;
-}
-
-static int _pxvid_to_vd(u8 pxvid)
-{
- if (pxvid == 0)
- return 0;
-
- if (pxvid >= 8 && pxvid < 31)
- pxvid = 31;
-
- return (pxvid + 2) * 125;
-}
-
-static u32 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
-{
- const int vd = _pxvid_to_vd(pxvid);
- const int vm = vd - 1125;
-
- if (INTEL_INFO(dev_priv)->is_mobile)
- return vm > 0 ? vm : 0;
-
- return vd;
-}
-
-static void __i915_update_gfx_val(struct drm_i915_private *dev_priv)
-{
- u64 now, diff, diffms;
- u32 count;
-
- lockdep_assert_held(&mchdev_lock);
-
- now = ktime_get_raw_ns();
- diffms = now - dev_priv->ips.last_time2;
- do_div(diffms, NSEC_PER_MSEC);
-
- /* Don't divide by 0 */
- if (!diffms)
- return;
-
- count = I915_READ(GFXEC);
-
- if (count < dev_priv->ips.last_count2) {
- diff = ~0UL - dev_priv->ips.last_count2;
- diff += count;
- } else {
- diff = count - dev_priv->ips.last_count2;
- }
-
- dev_priv->ips.last_count2 = count;
- dev_priv->ips.last_time2 = now;
-
- /* More magic constants... */
- diff = diff * 1181;
- diff = div_u64(diff, diffms * 10);
- dev_priv->ips.gfx_power = diff;
-}
-
-void i915_update_gfx_val(struct drm_i915_private *dev_priv)
-{
- intel_wakeref_t wakeref;
-
- if (!IS_GEN(dev_priv, 5))
- return;
-
- with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
- spin_lock_irq(&mchdev_lock);
- __i915_update_gfx_val(dev_priv);
- spin_unlock_irq(&mchdev_lock);
- }
-}
-
-static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv)
-{
- unsigned long t, corr, state1, corr2, state2;
- u32 pxvid, ext_v;
-
- lockdep_assert_held(&mchdev_lock);
-
- pxvid = I915_READ(PXVFREQ(dev_priv->gt_pm.rps.cur_freq));
- pxvid = (pxvid >> 24) & 0x7f;
- ext_v = pvid_to_extvid(dev_priv, pxvid);
-
- state1 = ext_v;
-
- t = i915_mch_val(dev_priv);
-
- /* Revel in the empirically derived constants */
-
- /* Correction factor in 1/100000 units */
- if (t > 80)
- corr = ((t * 2349) + 135940);
- else if (t >= 50)
- corr = ((t * 964) + 29317);
- else /* < 50 */
- corr = ((t * 301) + 1004);
-
- corr = corr * ((150142 * state1) / 10000 - 78642);
- corr /= 100000;
- corr2 = (corr * dev_priv->ips.corr);
-
- state2 = (corr2 * state1) / 10000;
- state2 /= 100; /* convert to mW */
-
- __i915_update_gfx_val(dev_priv);
-
- return dev_priv->ips.gfx_power + state2;
-}
-
-unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
-{
- intel_wakeref_t wakeref;
- unsigned long val = 0;
-
- if (!IS_GEN(dev_priv, 5))
- return 0;
-
- with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
- spin_lock_irq(&mchdev_lock);
- val = __i915_gfx_val(dev_priv);
- spin_unlock_irq(&mchdev_lock);
- }
-
- return val;
-}
-
-static struct drm_i915_private __rcu *i915_mch_dev;
-
-static struct drm_i915_private *mchdev_get(void)
-{
- struct drm_i915_private *i915;
-
- rcu_read_lock();
- i915 = rcu_dereference(i915_mch_dev);
- if (!kref_get_unless_zero(&i915->drm.ref))
- i915 = NULL;
- rcu_read_unlock();
-
- return i915;
-}
-
-/**
- * i915_read_mch_val - return value for IPS use
- *
- * Calculate and return a value for the IPS driver to use when deciding whether
- * we have thermal and power headroom to increase CPU or GPU power budget.
- */
-unsigned long i915_read_mch_val(void)
-{
- struct drm_i915_private *i915;
- unsigned long chipset_val = 0;
- unsigned long graphics_val = 0;
- intel_wakeref_t wakeref;
-
- i915 = mchdev_get();
- if (!i915)
- return 0;
-
- with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
- spin_lock_irq(&mchdev_lock);
- chipset_val = __i915_chipset_val(i915);
- graphics_val = __i915_gfx_val(i915);
- spin_unlock_irq(&mchdev_lock);
- }
-
- drm_dev_put(&i915->drm);
- return chipset_val + graphics_val;
-}
-EXPORT_SYMBOL_GPL(i915_read_mch_val);
-
-/**
- * i915_gpu_raise - raise GPU frequency limit
- *
- * Raise the limit; IPS indicates we have thermal headroom.
- */
-bool i915_gpu_raise(void)
-{
- struct drm_i915_private *i915;
-
- i915 = mchdev_get();
- if (!i915)
- return false;
-
- spin_lock_irq(&mchdev_lock);
- if (i915->ips.max_delay > i915->ips.fmax)
- i915->ips.max_delay--;
- spin_unlock_irq(&mchdev_lock);
-
- drm_dev_put(&i915->drm);
- return true;
-}
-EXPORT_SYMBOL_GPL(i915_gpu_raise);
-
-/**
- * i915_gpu_lower - lower GPU frequency limit
- *
- * IPS indicates we're close to a thermal limit, so throttle back the GPU
- * frequency maximum.
- */
-bool i915_gpu_lower(void)
-{
- struct drm_i915_private *i915;
-
- i915 = mchdev_get();
- if (!i915)
- return false;
-
- spin_lock_irq(&mchdev_lock);
- if (i915->ips.max_delay < i915->ips.min_delay)
- i915->ips.max_delay++;
- spin_unlock_irq(&mchdev_lock);
-
- drm_dev_put(&i915->drm);
- return true;
-}
-EXPORT_SYMBOL_GPL(i915_gpu_lower);
-
-/**
- * i915_gpu_busy - indicate GPU business to IPS
- *
- * Tell the IPS driver whether or not the GPU is busy.
- */
-bool i915_gpu_busy(void)
-{
- struct drm_i915_private *i915;
- bool ret;
-
- i915 = mchdev_get();
- if (!i915)
- return false;
-
- ret = i915->gt.awake;
-
- drm_dev_put(&i915->drm);
- return ret;
-}
-EXPORT_SYMBOL_GPL(i915_gpu_busy);
-
-/**
- * i915_gpu_turbo_disable - disable graphics turbo
- *
- * Disable graphics turbo by resetting the max frequency and setting the
- * current frequency to the default.
- */
-bool i915_gpu_turbo_disable(void)
-{
- struct drm_i915_private *i915;
- bool ret;
-
- i915 = mchdev_get();
- if (!i915)
- return false;
-
- spin_lock_irq(&mchdev_lock);
- i915->ips.max_delay = i915->ips.fstart;
- ret = ironlake_set_drps(i915, i915->ips.fstart);
- spin_unlock_irq(&mchdev_lock);
-
- drm_dev_put(&i915->drm);
- return ret;
-}
-EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
-
-/**
- * Tells the intel_ips driver that the i915 driver is now loaded, if
- * IPS got loaded first.
- *
- * This awkward dance is so that neither module has to depend on the
- * other in order for IPS to do the appropriate communication of
- * GPU turbo limits to i915.
- */
-static void
-ips_ping_for_i915_load(void)
-{
- void (*link)(void);
-
- link = symbol_get(ips_link_to_i915_driver);
- if (link) {
- link();
- symbol_put(ips_link_to_i915_driver);
- }
-}
-
-void intel_gpu_ips_init(struct drm_i915_private *dev_priv)
-{
- /* We only register the i915 ips part with intel-ips once everything is
- * set up, to avoid intel-ips sneaking in and reading bogus values. */
- rcu_assign_pointer(i915_mch_dev, dev_priv);
-
- ips_ping_for_i915_load();
-}
-
-void intel_gpu_ips_teardown(void)
-{
- rcu_assign_pointer(i915_mch_dev, NULL);
-}
-
-static void intel_init_emon(struct drm_i915_private *dev_priv)
-{
- u32 lcfuse;
- u8 pxw[16];
- int i;
-
- /* Disable to program */
- I915_WRITE(ECR, 0);
- POSTING_READ(ECR);
-
- /* Program energy weights for various events */
- I915_WRITE(SDEW, 0x15040d00);
- I915_WRITE(CSIEW0, 0x007f0000);
- I915_WRITE(CSIEW1, 0x1e220004);
- I915_WRITE(CSIEW2, 0x04000004);
-
- for (i = 0; i < 5; i++)
- I915_WRITE(PEW(i), 0);
- for (i = 0; i < 3; i++)
- I915_WRITE(DEW(i), 0);
-
- /* Program P-state weights to account for frequency power adjustment */
- for (i = 0; i < 16; i++) {
- u32 pxvidfreq = I915_READ(PXVFREQ(i));
- unsigned long freq = intel_pxfreq(pxvidfreq);
- unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
- PXVFREQ_PX_SHIFT;
- unsigned long val;
-
- val = vid * vid;
- val *= (freq / 1000);
- val *= 255;
- val /= (127*127*900);
- if (val > 0xff)
- DRM_ERROR("bad pxval: %ld\n", val);
- pxw[i] = val;
- }
- /* Render standby states get 0 weight */
- pxw[14] = 0;
- pxw[15] = 0;
-
- for (i = 0; i < 4; i++) {
- u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) |
- (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]);
- I915_WRITE(PXW(i), val);
- }
-
- /* Adjust magic regs to magic values (more experimental results) */
- I915_WRITE(OGW0, 0);
- I915_WRITE(OGW1, 0);
- I915_WRITE(EG0, 0x00007f00);
- I915_WRITE(EG1, 0x0000000e);
- I915_WRITE(EG2, 0x000e0000);
- I915_WRITE(EG3, 0x68000300);
- I915_WRITE(EG4, 0x42000000);
- I915_WRITE(EG5, 0x00140031);
- I915_WRITE(EG6, 0);
- I915_WRITE(EG7, 0);
-
- for (i = 0; i < 8; i++)
- I915_WRITE(PXWL(i), 0);
-
- /* Enable PMON + select events */
- I915_WRITE(ECR, 0x80000019);
-
- lcfuse = I915_READ(LCFUSE02);
-
- dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK);
-}
-
-static bool i915_rc6_ctx_corrupted(struct drm_i915_private *dev_priv)
-{
- return !I915_READ(GEN8_RC6_CTX_INFO);
-}
-
-static void i915_rc6_ctx_wa_init(struct drm_i915_private *i915)
-{
- if (!NEEDS_RC6_CTX_CORRUPTION_WA(i915))
- return;
-
- if (i915_rc6_ctx_corrupted(i915)) {
- DRM_INFO("RC6 context corrupted, disabling runtime power management\n");
- i915->gt_pm.rc6.ctx_corrupted = true;
- i915->gt_pm.rc6.ctx_corrupted_wakeref =
- intel_runtime_pm_get(&i915->runtime_pm);
- }
-}
-
-static void i915_rc6_ctx_wa_cleanup(struct drm_i915_private *i915)
-{
- if (i915->gt_pm.rc6.ctx_corrupted) {
- intel_runtime_pm_put(&i915->runtime_pm,
- i915->gt_pm.rc6.ctx_corrupted_wakeref);
- i915->gt_pm.rc6.ctx_corrupted = false;
- }
-}
-
-/**
- * i915_rc6_ctx_wa_suspend - system suspend sequence for the RC6 CTX WA
- * @i915: i915 device
- *
- * Perform any steps needed to clean up the RC6 CTX WA before system suspend.
- */
-void i915_rc6_ctx_wa_suspend(struct drm_i915_private *i915)
-{
- if (i915->gt_pm.rc6.ctx_corrupted)
- intel_runtime_pm_put(&i915->runtime_pm,
- i915->gt_pm.rc6.ctx_corrupted_wakeref);
-}
-
-/**
- * i915_rc6_ctx_wa_resume - system resume sequence for the RC6 CTX WA
- * @i915: i915 device
- *
- * Perform any steps needed to re-init the RC6 CTX WA after system resume.
- */
-void i915_rc6_ctx_wa_resume(struct drm_i915_private *i915)
-{
- if (!i915->gt_pm.rc6.ctx_corrupted)
- return;
-
- if (i915_rc6_ctx_corrupted(i915)) {
- i915->gt_pm.rc6.ctx_corrupted_wakeref =
- intel_runtime_pm_get(&i915->runtime_pm);
- return;
- }
-
- DRM_INFO("RC6 context restored, re-enabling runtime power management\n");
- i915->gt_pm.rc6.ctx_corrupted = false;
-}
-
-static void intel_disable_rc6(struct drm_i915_private *dev_priv);
-
-/**
- * i915_rc6_ctx_wa_check - check for a new RC6 CTX corruption
- * @i915: i915 device
- *
- * Check if an RC6 CTX corruption has happened since the last check and if so
- * disable RC6 and runtime power management.
- *
- * Return false if no context corruption has happened since the last call of
- * this function, true otherwise.
-*/
-bool i915_rc6_ctx_wa_check(struct drm_i915_private *i915)
-{
- if (!NEEDS_RC6_CTX_CORRUPTION_WA(i915))
- return false;
-
- if (i915->gt_pm.rc6.ctx_corrupted)
- return false;
-
- if (!i915_rc6_ctx_corrupted(i915))
- return false;
-
- DRM_NOTE("RC6 context corruption, disabling runtime power management\n");
-
- intel_disable_rc6(i915);
- i915->gt_pm.rc6.ctx_corrupted = true;
- i915->gt_pm.rc6.ctx_corrupted_wakeref =
- intel_runtime_pm_get_noresume(&i915->runtime_pm);
-
- return true;
-}
-
-void intel_init_gt_powersave(struct drm_i915_private *dev_priv)
-{
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
-
- /*
- * RPM depends on RC6 to save restore the GT HW context, so make RC6 a
- * requirement.
- */
- if (!sanitize_rc6(dev_priv)) {
- DRM_INFO("RC6 disabled, disabling runtime PM support\n");
- pm_runtime_get(&dev_priv->drm.pdev->dev);
- }
-
- i915_rc6_ctx_wa_init(dev_priv);
-
- /* Initialize RPS limits (for userspace) */
- if (IS_CHERRYVIEW(dev_priv))
- cherryview_init_gt_powersave(dev_priv);
- else if (IS_VALLEYVIEW(dev_priv))
- valleyview_init_gt_powersave(dev_priv);
- else if (INTEL_GEN(dev_priv) >= 6)
- gen6_init_rps_frequencies(dev_priv);
-
- /* Derive initial user preferences/limits from the hardware limits */
- rps->max_freq_softlimit = rps->max_freq;
- rps->min_freq_softlimit = rps->min_freq;
-
- /* After setting max-softlimit, find the overclock max freq */
- if (IS_GEN(dev_priv, 6) ||
- IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv)) {
- u32 params = 0;
-
- sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS,
- &params, NULL);
- if (params & BIT(31)) { /* OC supported */
- DRM_DEBUG_DRIVER("Overclocking supported, max: %dMHz, overclock: %dMHz\n",
- (rps->max_freq & 0xff) * 50,
- (params & 0xff) * 50);
- rps->max_freq = params & 0xff;
- }
- }
-
- /* Finally allow us to boost to max by default */
- rps->boost_freq = rps->max_freq;
- rps->idle_freq = rps->min_freq;
- rps->cur_freq = rps->idle_freq;
-}
-
-void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
-{
- if (IS_VALLEYVIEW(dev_priv))
- valleyview_cleanup_gt_powersave(dev_priv);
-
- i915_rc6_ctx_wa_cleanup(dev_priv);
-
- if (!HAS_RC6(dev_priv))
- pm_runtime_put(&dev_priv->drm.pdev->dev);
-}
-
-void intel_sanitize_gt_powersave(struct drm_i915_private *dev_priv)
-{
- dev_priv->gt_pm.rps.enabled = true; /* force RPS disabling */
- dev_priv->gt_pm.rc6.enabled = true; /* force RC6 disabling */
- intel_disable_gt_powersave(dev_priv);
-
- if (INTEL_GEN(dev_priv) >= 11)
- gen11_reset_rps_interrupts(dev_priv);
- else if (INTEL_GEN(dev_priv) >= 6)
- gen6_reset_rps_interrupts(dev_priv);
-}
-
-static inline void intel_disable_llc_pstate(struct drm_i915_private *i915)
-{
- lockdep_assert_held(&i915->gt_pm.rps.lock);
-
- if (!i915->gt_pm.llc_pstate.enabled)
- return;
-
- /* Currently there is no HW configuration to be done to disable. */
-
- i915->gt_pm.llc_pstate.enabled = false;
-}
-
-static void __intel_disable_rc6(struct drm_i915_private *dev_priv)
-{
- lockdep_assert_held(&dev_priv->gt_pm.rps.lock);
-
- if (!dev_priv->gt_pm.rc6.enabled)
- return;
-
- if (INTEL_GEN(dev_priv) >= 9)
- gen9_disable_rc6(dev_priv);
- else if (IS_CHERRYVIEW(dev_priv))
- cherryview_disable_rc6(dev_priv);
- else if (IS_VALLEYVIEW(dev_priv))
- valleyview_disable_rc6(dev_priv);
- else if (INTEL_GEN(dev_priv) >= 6)
- gen6_disable_rc6(dev_priv);
-
- dev_priv->gt_pm.rc6.enabled = false;
-}
-
-static void intel_disable_rc6(struct drm_i915_private *dev_priv)
-{
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
-
- mutex_lock(&rps->lock);
- __intel_disable_rc6(dev_priv);
- mutex_unlock(&rps->lock);
-}
-
-static void intel_disable_rps(struct drm_i915_private *dev_priv)
-{
- lockdep_assert_held(&dev_priv->gt_pm.rps.lock);
-
- if (!dev_priv->gt_pm.rps.enabled)
- return;
-
- if (INTEL_GEN(dev_priv) >= 9)
- gen9_disable_rps(dev_priv);
- else if (IS_CHERRYVIEW(dev_priv))
- cherryview_disable_rps(dev_priv);
- else if (IS_VALLEYVIEW(dev_priv))
- valleyview_disable_rps(dev_priv);
- else if (INTEL_GEN(dev_priv) >= 6)
- gen6_disable_rps(dev_priv);
- else if (IS_IRONLAKE_M(dev_priv))
- ironlake_disable_drps(dev_priv);
-
- dev_priv->gt_pm.rps.enabled = false;
-}
-
-void intel_disable_gt_powersave(struct drm_i915_private *dev_priv)
-{
- mutex_lock(&dev_priv->gt_pm.rps.lock);
-
- __intel_disable_rc6(dev_priv);
- intel_disable_rps(dev_priv);
- if (HAS_LLC(dev_priv))
- intel_disable_llc_pstate(dev_priv);
-
- mutex_unlock(&dev_priv->gt_pm.rps.lock);
-}
-
-static inline void intel_enable_llc_pstate(struct drm_i915_private *i915)
-{
- lockdep_assert_held(&i915->gt_pm.rps.lock);
-
- if (i915->gt_pm.llc_pstate.enabled)
- return;
-
- gen6_update_ring_freq(i915);
-
- i915->gt_pm.llc_pstate.enabled = true;
-}
-
-static void intel_enable_rc6(struct drm_i915_private *dev_priv)
-{
- lockdep_assert_held(&dev_priv->gt_pm.rps.lock);
-
- if (dev_priv->gt_pm.rc6.enabled)
- return;
-
- if (dev_priv->gt_pm.rc6.ctx_corrupted)
- return;
-
- if (IS_CHERRYVIEW(dev_priv))
- cherryview_enable_rc6(dev_priv);
- else if (IS_VALLEYVIEW(dev_priv))
- valleyview_enable_rc6(dev_priv);
- else if (INTEL_GEN(dev_priv) >= 11)
- gen11_enable_rc6(dev_priv);
- else if (INTEL_GEN(dev_priv) >= 9)
- gen9_enable_rc6(dev_priv);
- else if (IS_BROADWELL(dev_priv))
- gen8_enable_rc6(dev_priv);
- else if (INTEL_GEN(dev_priv) >= 6)
- gen6_enable_rc6(dev_priv);
-
- dev_priv->gt_pm.rc6.enabled = true;
-}
-
-static void intel_enable_rps(struct drm_i915_private *dev_priv)
-{
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
-
- lockdep_assert_held(&rps->lock);
-
- if (rps->enabled)
- return;
-
- if (IS_CHERRYVIEW(dev_priv)) {
- cherryview_enable_rps(dev_priv);
- } else if (IS_VALLEYVIEW(dev_priv)) {
- valleyview_enable_rps(dev_priv);
- } else if (INTEL_GEN(dev_priv) >= 9) {
- gen9_enable_rps(dev_priv);
- } else if (IS_BROADWELL(dev_priv)) {
- gen8_enable_rps(dev_priv);
- } else if (INTEL_GEN(dev_priv) >= 6) {
- gen6_enable_rps(dev_priv);
- } else if (IS_IRONLAKE_M(dev_priv)) {
- ironlake_enable_drps(dev_priv);
- intel_init_emon(dev_priv);
- }
-
- WARN_ON(rps->max_freq < rps->min_freq);
- WARN_ON(rps->idle_freq > rps->max_freq);
-
- WARN_ON(rps->efficient_freq < rps->min_freq);
- WARN_ON(rps->efficient_freq > rps->max_freq);
-
- rps->enabled = true;
-}
-
-void intel_enable_gt_powersave(struct drm_i915_private *dev_priv)
-{
- /* Powersaving is controlled by the host when inside a VM */
- if (intel_vgpu_active(dev_priv))
- return;
-
- mutex_lock(&dev_priv->gt_pm.rps.lock);
-
- if (HAS_RC6(dev_priv))
- intel_enable_rc6(dev_priv);
- if (HAS_RPS(dev_priv))
- intel_enable_rps(dev_priv);
- if (HAS_LLC(dev_priv))
- intel_enable_llc_pstate(dev_priv);
-
- mutex_unlock(&dev_priv->gt_pm.rps.lock);
-}
-
static void ibx_init_clock_gating(struct drm_i915_private *dev_priv)
{
/*
@@ -8976,7 +6347,7 @@ static void ilk_init_clock_gating(struct drm_i915_private *dev_priv)
static void cpt_init_clock_gating(struct drm_i915_private *dev_priv)
{
- int pipe;
+ enum pipe pipe;
u32 val;
/*
@@ -9196,6 +6567,22 @@ static void icl_init_clock_gating(struct drm_i915_private *dev_priv)
_MASKED_BIT_ENABLE(GEN11_ENABLE_32_PLANE_MODE));
}
+static void tgl_init_clock_gating(struct drm_i915_private *dev_priv)
+{
+ u32 vd_pg_enable = 0;
+ unsigned int i;
+
+ /* This is not a WA. Enable VD HCP & MFX_ENC powergate */
+ for (i = 0; i < I915_MAX_VCS; i++) {
+ if (HAS_ENGINE(dev_priv, _VCS(i)))
+ vd_pg_enable |= VDN_HCP_POWERGATE_ENABLE(i) |
+ VDN_MFX_POWERGATE_ENABLE(i);
+ }
+
+ I915_WRITE(POWERGATE_ENABLE,
+ I915_READ(POWERGATE_ENABLE) | vd_pg_enable);
+}
+
static void cnp_init_clock_gating(struct drm_i915_private *dev_priv)
{
if (!HAS_PCH_CNP(dev_priv))
@@ -9716,7 +7103,7 @@ static void nop_init_clock_gating(struct drm_i915_private *dev_priv)
void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
{
if (IS_GEN(dev_priv, 12))
- dev_priv->display.init_clock_gating = nop_init_clock_gating;
+ dev_priv->display.init_clock_gating = tgl_init_clock_gating;
else if (IS_GEN(dev_priv, 11))
dev_priv->display.init_clock_gating = icl_init_clock_gating;
else if (IS_CANNONLAKE(dev_priv))
@@ -9772,6 +7159,9 @@ void intel_init_pm(struct drm_i915_private *dev_priv)
else if (IS_GEN(dev_priv, 5))
i915_ironlake_get_mem_freq(dev_priv);
+ if (intel_has_sagv(dev_priv))
+ skl_setup_sagv_block_time(dev_priv);
+
/* For FIFO watermark updates */
if (INTEL_GEN(dev_priv) >= 9) {
skl_setup_wm_latency(dev_priv);
@@ -9830,7 +7220,7 @@ void intel_init_pm(struct drm_i915_private *dev_priv)
dev_priv->display.update_wm = i9xx_update_wm;
dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
} else if (IS_GEN(dev_priv, 2)) {
- if (INTEL_INFO(dev_priv)->num_pipes == 1) {
+ if (INTEL_NUM_PIPES(dev_priv) == 1) {
dev_priv->display.update_wm = i845_update_wm;
dev_priv->display.get_fifo_size = i845_get_fifo_size;
} else {
@@ -9842,217 +7232,8 @@ void intel_init_pm(struct drm_i915_private *dev_priv)
}
}
-static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val)
-{
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
-
- /*
- * N = val - 0xb7
- * Slow = Fast = GPLL ref * N
- */
- return DIV_ROUND_CLOSEST(rps->gpll_ref_freq * (val - 0xb7), 1000);
-}
-
-static int byt_freq_opcode(struct drm_i915_private *dev_priv, int val)
-{
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
-
- return DIV_ROUND_CLOSEST(1000 * val, rps->gpll_ref_freq) + 0xb7;
-}
-
-static int chv_gpu_freq(struct drm_i915_private *dev_priv, int val)
-{
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
-
- /*
- * N = val / 2
- * CU (slow) = CU2x (fast) / 2 = GPLL ref * N / 2
- */
- return DIV_ROUND_CLOSEST(rps->gpll_ref_freq * val, 2 * 2 * 1000);
-}
-
-static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val)
-{
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
-
- /* CHV needs even values */
- return DIV_ROUND_CLOSEST(2 * 1000 * val, rps->gpll_ref_freq) * 2;
-}
-
-int intel_gpu_freq(struct drm_i915_private *dev_priv, int val)
-{
- if (INTEL_GEN(dev_priv) >= 9)
- return DIV_ROUND_CLOSEST(val * GT_FREQUENCY_MULTIPLIER,
- GEN9_FREQ_SCALER);
- else if (IS_CHERRYVIEW(dev_priv))
- return chv_gpu_freq(dev_priv, val);
- else if (IS_VALLEYVIEW(dev_priv))
- return byt_gpu_freq(dev_priv, val);
- else
- return val * GT_FREQUENCY_MULTIPLIER;
-}
-
-int intel_freq_opcode(struct drm_i915_private *dev_priv, int val)
-{
- if (INTEL_GEN(dev_priv) >= 9)
- return DIV_ROUND_CLOSEST(val * GEN9_FREQ_SCALER,
- GT_FREQUENCY_MULTIPLIER);
- else if (IS_CHERRYVIEW(dev_priv))
- return chv_freq_opcode(dev_priv, val);
- else if (IS_VALLEYVIEW(dev_priv))
- return byt_freq_opcode(dev_priv, val);
- else
- return DIV_ROUND_CLOSEST(val, GT_FREQUENCY_MULTIPLIER);
-}
-
void intel_pm_setup(struct drm_i915_private *dev_priv)
{
- mutex_init(&dev_priv->gt_pm.rps.lock);
- mutex_init(&dev_priv->gt_pm.rps.power.mutex);
-
- atomic_set(&dev_priv->gt_pm.rps.num_waiters, 0);
-
dev_priv->runtime_pm.suspended = false;
atomic_set(&dev_priv->runtime_pm.wakeref_count, 0);
}
-
-static u64 vlv_residency_raw(struct drm_i915_private *dev_priv,
- const i915_reg_t reg)
-{
- u32 lower, upper, tmp;
- int loop = 2;
-
- /*
- * The register accessed do not need forcewake. We borrow
- * uncore lock to prevent concurrent access to range reg.
- */
- lockdep_assert_held(&dev_priv->uncore.lock);
-
- /*
- * vlv and chv residency counters are 40 bits in width.
- * With a control bit, we can choose between upper or lower
- * 32bit window into this counter.
- *
- * Although we always use the counter in high-range mode elsewhere,
- * userspace may attempt to read the value before rc6 is initialised,
- * before we have set the default VLV_COUNTER_CONTROL value. So always
- * set the high bit to be safe.
- */
- I915_WRITE_FW(VLV_COUNTER_CONTROL,
- _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH));
- upper = I915_READ_FW(reg);
- do {
- tmp = upper;
-
- I915_WRITE_FW(VLV_COUNTER_CONTROL,
- _MASKED_BIT_DISABLE(VLV_COUNT_RANGE_HIGH));
- lower = I915_READ_FW(reg);
-
- I915_WRITE_FW(VLV_COUNTER_CONTROL,
- _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH));
- upper = I915_READ_FW(reg);
- } while (upper != tmp && --loop);
-
- /*
- * Everywhere else we always use VLV_COUNTER_CONTROL with the
- * VLV_COUNT_RANGE_HIGH bit set - so it is safe to leave it set
- * now.
- */
-
- return lower | (u64)upper << 8;
-}
-
-u64 intel_rc6_residency_ns(struct drm_i915_private *dev_priv,
- const i915_reg_t reg)
-{
- struct intel_uncore *uncore = &dev_priv->uncore;
- u64 time_hw, prev_hw, overflow_hw;
- unsigned int fw_domains;
- unsigned long flags;
- unsigned int i;
- u32 mul, div;
-
- if (!HAS_RC6(dev_priv))
- return 0;
-
- /*
- * Store previous hw counter values for counter wrap-around handling.
- *
- * There are only four interesting registers and they live next to each
- * other so we can use the relative address, compared to the smallest
- * one as the index into driver storage.
- */
- i = (i915_mmio_reg_offset(reg) -
- i915_mmio_reg_offset(GEN6_GT_GFX_RC6_LOCKED)) / sizeof(u32);
- if (WARN_ON_ONCE(i >= ARRAY_SIZE(dev_priv->gt_pm.rc6.cur_residency)))
- return 0;
-
- fw_domains = intel_uncore_forcewake_for_reg(uncore, reg, FW_REG_READ);
-
- spin_lock_irqsave(&uncore->lock, flags);
- intel_uncore_forcewake_get__locked(uncore, fw_domains);
-
- /* On VLV and CHV, residency time is in CZ units rather than 1.28us */
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- mul = 1000000;
- div = dev_priv->czclk_freq;
- overflow_hw = BIT_ULL(40);
- time_hw = vlv_residency_raw(dev_priv, reg);
- } else {
- /* 833.33ns units on Gen9LP, 1.28us elsewhere. */
- if (IS_GEN9_LP(dev_priv)) {
- mul = 10000;
- div = 12;
- } else {
- mul = 1280;
- div = 1;
- }
-
- overflow_hw = BIT_ULL(32);
- time_hw = intel_uncore_read_fw(uncore, reg);
- }
-
- /*
- * Counter wrap handling.
- *
- * But relying on a sufficient frequency of queries otherwise counters
- * can still wrap.
- */
- prev_hw = dev_priv->gt_pm.rc6.prev_hw_residency[i];
- dev_priv->gt_pm.rc6.prev_hw_residency[i] = time_hw;
-
- /* RC6 delta from last sample. */
- if (time_hw >= prev_hw)
- time_hw -= prev_hw;
- else
- time_hw += overflow_hw - prev_hw;
-
- /* Add delta to RC6 extended raw driver copy. */
- time_hw += dev_priv->gt_pm.rc6.cur_residency[i];
- dev_priv->gt_pm.rc6.cur_residency[i] = time_hw;
-
- intel_uncore_forcewake_put__locked(uncore, fw_domains);
- spin_unlock_irqrestore(&uncore->lock, flags);
-
- return mul_u64_u32_div(time_hw, mul, div);
-}
-
-u64 intel_rc6_residency_us(struct drm_i915_private *dev_priv,
- i915_reg_t reg)
-{
- return DIV_ROUND_UP_ULL(intel_rc6_residency_ns(dev_priv, reg), 1000);
-}
-
-u32 intel_get_cagf(struct drm_i915_private *dev_priv, u32 rpstat)
-{
- u32 cagf;
-
- if (INTEL_GEN(dev_priv) >= 9)
- cagf = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT;
- else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
- cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
- else
- cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
-
- return cagf;
-}
diff --git a/drivers/gpu/drm/i915/intel_pm.h b/drivers/gpu/drm/i915/intel_pm.h
index 0f7390c850ec..b579c724b915 100644
--- a/drivers/gpu/drm/i915/intel_pm.h
+++ b/drivers/gpu/drm/i915/intel_pm.h
@@ -29,19 +29,6 @@ void intel_update_watermarks(struct intel_crtc *crtc);
void intel_init_pm(struct drm_i915_private *dev_priv);
void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv);
void intel_pm_setup(struct drm_i915_private *dev_priv);
-void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
-void intel_gpu_ips_teardown(void);
-void intel_init_gt_powersave(struct drm_i915_private *dev_priv);
-void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv);
-void intel_sanitize_gt_powersave(struct drm_i915_private *dev_priv);
-void intel_enable_gt_powersave(struct drm_i915_private *dev_priv);
-void intel_disable_gt_powersave(struct drm_i915_private *dev_priv);
-bool i915_rc6_ctx_wa_check(struct drm_i915_private *i915);
-void i915_rc6_ctx_wa_suspend(struct drm_i915_private *i915);
-void i915_rc6_ctx_wa_resume(struct drm_i915_private *i915);
-void gen6_rps_busy(struct drm_i915_private *dev_priv);
-void gen6_rps_idle(struct drm_i915_private *dev_priv);
-void gen6_rps_boost(struct i915_request *rq);
void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv);
void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv);
void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv);
@@ -68,26 +55,9 @@ void skl_write_plane_wm(struct intel_plane *plane,
void skl_write_cursor_wm(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state);
bool ilk_disable_lp_wm(struct drm_device *dev);
-int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc,
- struct intel_crtc_state *cstate);
void intel_init_ipc(struct drm_i915_private *dev_priv);
void intel_enable_ipc(struct drm_i915_private *dev_priv);
-int intel_gpu_freq(struct drm_i915_private *dev_priv, int val);
-int intel_freq_opcode(struct drm_i915_private *dev_priv, int val);
-u64 intel_rc6_residency_ns(struct drm_i915_private *dev_priv, i915_reg_t reg);
-u64 intel_rc6_residency_us(struct drm_i915_private *dev_priv, i915_reg_t reg);
-
-u32 intel_get_cagf(struct drm_i915_private *dev_priv, u32 rpstat1);
-
-unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
-unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
-unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
-void i915_update_gfx_val(struct drm_i915_private *dev_priv);
-
-bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val);
-int intel_set_rps(struct drm_i915_private *dev_priv, u8 val);
-void intel_rps_mark_interactive(struct drm_i915_private *i915, bool interactive);
bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable);
#endif /* __INTEL_PM_H__ */
diff --git a/drivers/gpu/drm/i915/intel_region_lmem.c b/drivers/gpu/drm/i915/intel_region_lmem.c
new file mode 100644
index 000000000000..583118095635
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_region_lmem.c
@@ -0,0 +1,132 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "intel_memory_region.h"
+#include "gem/i915_gem_lmem.h"
+#include "gem/i915_gem_region.h"
+#include "intel_region_lmem.h"
+
+static int init_fake_lmem_bar(struct intel_memory_region *mem)
+{
+ struct drm_i915_private *i915 = mem->i915;
+ struct i915_ggtt *ggtt = &i915->ggtt;
+ unsigned long n;
+ int ret;
+
+ /* We want to 1:1 map the mappable aperture to our reserved region */
+
+ mem->fake_mappable.start = 0;
+ mem->fake_mappable.size = resource_size(&mem->region);
+ mem->fake_mappable.color = I915_COLOR_UNEVICTABLE;
+
+ ret = drm_mm_reserve_node(&ggtt->vm.mm, &mem->fake_mappable);
+ if (ret)
+ return ret;
+
+ mem->remap_addr = dma_map_resource(&i915->drm.pdev->dev,
+ mem->region.start,
+ mem->fake_mappable.size,
+ PCI_DMA_BIDIRECTIONAL,
+ DMA_ATTR_FORCE_CONTIGUOUS);
+ if (dma_mapping_error(&i915->drm.pdev->dev, mem->remap_addr)) {
+ drm_mm_remove_node(&mem->fake_mappable);
+ return -EINVAL;
+ }
+
+ for (n = 0; n < mem->fake_mappable.size >> PAGE_SHIFT; ++n) {
+ ggtt->vm.insert_page(&ggtt->vm,
+ mem->remap_addr + (n << PAGE_SHIFT),
+ n << PAGE_SHIFT,
+ I915_CACHE_NONE, 0);
+ }
+
+ mem->region = (struct resource)DEFINE_RES_MEM(mem->remap_addr,
+ mem->fake_mappable.size);
+
+ return 0;
+}
+
+static void release_fake_lmem_bar(struct intel_memory_region *mem)
+{
+ if (drm_mm_node_allocated(&mem->fake_mappable))
+ drm_mm_remove_node(&mem->fake_mappable);
+
+ dma_unmap_resource(&mem->i915->drm.pdev->dev,
+ mem->remap_addr,
+ mem->fake_mappable.size,
+ PCI_DMA_BIDIRECTIONAL,
+ DMA_ATTR_FORCE_CONTIGUOUS);
+}
+
+static void
+region_lmem_release(struct intel_memory_region *mem)
+{
+ release_fake_lmem_bar(mem);
+ io_mapping_fini(&mem->iomap);
+ intel_memory_region_release_buddy(mem);
+}
+
+static int
+region_lmem_init(struct intel_memory_region *mem)
+{
+ int ret;
+
+ if (i915_modparams.fake_lmem_start) {
+ ret = init_fake_lmem_bar(mem);
+ GEM_BUG_ON(ret);
+ }
+
+ if (!io_mapping_init_wc(&mem->iomap,
+ mem->io_start,
+ resource_size(&mem->region)))
+ return -EIO;
+
+ ret = intel_memory_region_init_buddy(mem);
+ if (ret)
+ io_mapping_fini(&mem->iomap);
+
+ return ret;
+}
+
+const struct intel_memory_region_ops intel_region_lmem_ops = {
+ .init = region_lmem_init,
+ .release = region_lmem_release,
+ .create_object = __i915_gem_lmem_object_create,
+};
+
+struct intel_memory_region *
+intel_setup_fake_lmem(struct drm_i915_private *i915)
+{
+ struct pci_dev *pdev = i915->drm.pdev;
+ struct intel_memory_region *mem;
+ resource_size_t mappable_end;
+ resource_size_t io_start;
+ resource_size_t start;
+
+ GEM_BUG_ON(i915_ggtt_has_aperture(&i915->ggtt));
+ GEM_BUG_ON(!i915_modparams.fake_lmem_start);
+
+ /* Your mappable aperture belongs to me now! */
+ mappable_end = pci_resource_len(pdev, 2);
+ io_start = pci_resource_start(pdev, 2),
+ start = i915_modparams.fake_lmem_start;
+
+ mem = intel_memory_region_create(i915,
+ start,
+ mappable_end,
+ PAGE_SIZE,
+ io_start,
+ &intel_region_lmem_ops);
+ if (!IS_ERR(mem)) {
+ DRM_INFO("Intel graphics fake LMEM: %pR\n", &mem->region);
+ DRM_INFO("Intel graphics fake LMEM IO start: %llx\n",
+ (u64)mem->io_start);
+ DRM_INFO("Intel graphics fake LMEM size: %llx\n",
+ (u64)resource_size(&mem->region));
+ }
+
+ return mem;
+}
diff --git a/drivers/gpu/drm/i915/intel_region_lmem.h b/drivers/gpu/drm/i915/intel_region_lmem.h
new file mode 100644
index 000000000000..213def7c7b8a
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_region_lmem.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_REGION_LMEM_H
+#define __INTEL_REGION_LMEM_H
+
+struct drm_i915_private;
+
+extern const struct intel_memory_region_ops intel_region_lmem_ops;
+
+struct intel_memory_region *
+intel_setup_fake_lmem(struct drm_i915_private *i915);
+
+#endif /* !__INTEL_REGION_LMEM_H */
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 2fd3c097e1f5..ad719c9602af 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -27,7 +27,6 @@
*/
#include <linux/pm_runtime.h>
-#include <linux/vgaarb.h>
#include <drm/drm_print.h>
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 9e583f13a9e4..94a97bf8c021 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -805,9 +805,6 @@ void assert_forcewakes_active(struct intel_uncore *uncore,
/* We give fast paths for the really cool registers */
#define NEEDS_FORCE_WAKE(reg) ((reg) < 0x40000)
-#define GEN11_NEEDS_FORCE_WAKE(reg) \
- ((reg) < 0x40000 || ((reg) >= 0x1c0000 && (reg) < 0x1dc000))
-
#define __gen6_reg_read_fw_domains(uncore, offset) \
({ \
enum forcewake_domains __fwd; \
@@ -903,12 +900,10 @@ static const struct intel_forcewake_range __vlv_fw_ranges[] = {
})
#define __gen11_fwtable_reg_read_fw_domains(uncore, offset) \
-({ \
- enum forcewake_domains __fwd = 0; \
- if (GEN11_NEEDS_FORCE_WAKE((offset))) \
- __fwd = find_fw_domain(uncore, offset); \
- __fwd; \
-})
+ find_fw_domain(uncore, offset)
+
+#define __gen12_fwtable_reg_read_fw_domains(uncore, offset) \
+ find_fw_domain(uncore, offset)
/* *Must* be sorted by offset! See intel_shadow_table_check(). */
static const i915_reg_t gen8_shadowed_regs[] = {
@@ -935,6 +930,20 @@ static const i915_reg_t gen11_shadowed_regs[] = {
/* TODO: Other registers are not yet used */
};
+static const i915_reg_t gen12_shadowed_regs[] = {
+ RING_TAIL(RENDER_RING_BASE), /* 0x2000 (base) */
+ GEN6_RPNSWREQ, /* 0xA008 */
+ GEN6_RC_VIDEO_FREQ, /* 0xA00C */
+ RING_TAIL(BLT_RING_BASE), /* 0x22000 (base) */
+ RING_TAIL(GEN11_BSD_RING_BASE), /* 0x1C0000 (base) */
+ RING_TAIL(GEN11_BSD2_RING_BASE), /* 0x1C4000 (base) */
+ RING_TAIL(GEN11_VEBOX_RING_BASE), /* 0x1C8000 (base) */
+ RING_TAIL(GEN11_BSD3_RING_BASE), /* 0x1D0000 (base) */
+ RING_TAIL(GEN11_BSD4_RING_BASE), /* 0x1D4000 (base) */
+ RING_TAIL(GEN11_VEBOX2_RING_BASE), /* 0x1D8000 (base) */
+ /* TODO: Other registers are not yet used */
+};
+
static int mmio_reg_cmp(u32 key, const i915_reg_t *reg)
{
u32 offset = i915_mmio_reg_offset(*reg);
@@ -957,6 +966,7 @@ static bool is_gen##x##_shadowed(u32 offset) \
__is_genX_shadowed(8)
__is_genX_shadowed(11)
+__is_genX_shadowed(12)
static enum forcewake_domains
gen6_reg_write_fw_domains(struct intel_uncore *uncore, i915_reg_t reg)
@@ -1005,8 +1015,18 @@ static const struct intel_forcewake_range __chv_fw_ranges[] = {
#define __gen11_fwtable_reg_write_fw_domains(uncore, offset) \
({ \
enum forcewake_domains __fwd = 0; \
- if (GEN11_NEEDS_FORCE_WAKE((offset)) && !is_gen11_shadowed(offset)) \
- __fwd = find_fw_domain(uncore, offset); \
+ const u32 __offset = (offset); \
+ if (!is_gen11_shadowed(__offset)) \
+ __fwd = find_fw_domain(uncore, __offset); \
+ __fwd; \
+})
+
+#define __gen12_fwtable_reg_write_fw_domains(uncore, offset) \
+({ \
+ enum forcewake_domains __fwd = 0; \
+ const u32 __offset = (offset); \
+ if (!is_gen12_shadowed(__offset)) \
+ __fwd = find_fw_domain(uncore, __offset); \
__fwd; \
})
@@ -1065,9 +1085,51 @@ static const struct intel_forcewake_range __gen11_fw_ranges[] = {
GEN_FW_RANGE(0x9400, 0x97ff, FORCEWAKE_ALL),
GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_BLITTER),
GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0xb480, 0xdeff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0xdf00, 0xe8ff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0xe900, 0x16dff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x16e00, 0x19fff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x1a000, 0x243ff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x24400, 0x247ff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x24800, 0x3ffff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x40000, 0x1bffff, 0),
+ GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0),
+ GEN_FW_RANGE(0x1c4000, 0x1c7fff, FORCEWAKE_MEDIA_VDBOX1),
+ GEN_FW_RANGE(0x1c8000, 0x1cbfff, FORCEWAKE_MEDIA_VEBOX0),
+ GEN_FW_RANGE(0x1cc000, 0x1cffff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x1d0000, 0x1d3fff, FORCEWAKE_MEDIA_VDBOX2),
+ GEN_FW_RANGE(0x1d4000, 0x1d7fff, FORCEWAKE_MEDIA_VDBOX3),
+ GEN_FW_RANGE(0x1d8000, 0x1dbfff, FORCEWAKE_MEDIA_VEBOX1)
+};
+
+/* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
+static const struct intel_forcewake_range __gen12_fw_ranges[] = {
+ GEN_FW_RANGE(0x0, 0xaff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0xb00, 0x1fff, 0), /* uncore range */
+ GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x8500, 0x8bff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x8d00, 0x93ff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x9400, 0x97ff, FORCEWAKE_ALL),
+ GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
GEN_FW_RANGE(0xb480, 0xdfff, FORCEWAKE_BLITTER),
GEN_FW_RANGE(0xe000, 0xe8ff, FORCEWAKE_RENDER),
- GEN_FW_RANGE(0xe900, 0x243ff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0xe900, 0x147ff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x14800, 0x148ff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x14900, 0x19fff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x1a000, 0x1a7ff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x1a800, 0x1afff, FORCEWAKE_BLITTER),
+ GEN_FW_RANGE(0x1b000, 0x1bfff, FORCEWAKE_RENDER),
+ GEN_FW_RANGE(0x1c000, 0x243ff, FORCEWAKE_BLITTER),
GEN_FW_RANGE(0x24400, 0x247ff, FORCEWAKE_RENDER),
GEN_FW_RANGE(0x24800, 0x3ffff, FORCEWAKE_BLITTER),
GEN_FW_RANGE(0x40000, 0x1bffff, 0),
@@ -1228,6 +1290,7 @@ __gen_read(func, 16) \
__gen_read(func, 32) \
__gen_read(func, 64)
+__gen_reg_read_funcs(gen12_fwtable);
__gen_reg_read_funcs(gen11_fwtable);
__gen_reg_read_funcs(fwtable);
__gen_reg_read_funcs(gen6);
@@ -1319,6 +1382,7 @@ __gen_write(func, 8) \
__gen_write(func, 16) \
__gen_write(func, 32)
+__gen_reg_write_funcs(gen12_fwtable);
__gen_reg_write_funcs(gen11_fwtable);
__gen_reg_write_funcs(fwtable);
__gen_reg_write_funcs(gen8);
@@ -1690,10 +1754,14 @@ static int uncore_forcewake_init(struct intel_uncore *uncore)
ASSIGN_FW_DOMAINS_TABLE(uncore, __gen9_fw_ranges);
ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable);
- } else {
+ } else if (IS_GEN(i915, 11)) {
ASSIGN_FW_DOMAINS_TABLE(uncore, __gen11_fw_ranges);
ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen11_fwtable);
ASSIGN_READ_MMIO_VFUNCS(uncore, gen11_fwtable);
+ } else {
+ ASSIGN_FW_DOMAINS_TABLE(uncore, __gen12_fw_ranges);
+ ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen12_fwtable);
+ ASSIGN_READ_MMIO_VFUNCS(uncore, gen12_fwtable);
}
uncore->pmic_bus_access_nb.notifier_call = i915_pmic_bus_access_notifier;
diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h
index 414fc2cb0459..dcfa243892c6 100644
--- a/drivers/gpu/drm/i915/intel_uncore.h
+++ b/drivers/gpu/drm/i915/intel_uncore.h
@@ -378,23 +378,23 @@ intel_uncore_read64_2x32(struct intel_uncore *uncore,
static inline void intel_uncore_rmw(struct intel_uncore *uncore,
i915_reg_t reg, u32 clear, u32 set)
{
- u32 val;
+ u32 old, val;
- val = intel_uncore_read(uncore, reg);
- val &= ~clear;
- val |= set;
- intel_uncore_write(uncore, reg, val);
+ old = intel_uncore_read(uncore, reg);
+ val = (old & ~clear) | set;
+ if (val != old)
+ intel_uncore_write(uncore, reg, val);
}
static inline void intel_uncore_rmw_fw(struct intel_uncore *uncore,
i915_reg_t reg, u32 clear, u32 set)
{
- u32 val;
+ u32 old, val;
- val = intel_uncore_read_fw(uncore, reg);
- val &= ~clear;
- val |= set;
- intel_uncore_write_fw(uncore, reg, val);
+ old = intel_uncore_read_fw(uncore, reg);
+ val = (old & ~clear) | set;
+ if (val != old)
+ intel_uncore_write_fw(uncore, reg, val);
}
static inline int intel_uncore_write_and_verify(struct intel_uncore *uncore,
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_tgl.c b/drivers/gpu/drm/i915/oa/i915_oa_tgl.c
new file mode 100644
index 000000000000..a29d93707345
--- /dev/null
+++ b/drivers/gpu/drm/i915/oa/i915_oa_tgl.c
@@ -0,0 +1,121 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2018 Intel Corporation
+ *
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
+ */
+
+#include <linux/sysfs.h>
+
+#include "i915_drv.h"
+#include "i915_oa_tgl.h"
+
+static const struct i915_oa_reg b_counter_config_test_oa[] = {
+ { _MMIO(0xD920), 0x00000000 },
+ { _MMIO(0xD900), 0x00000000 },
+ { _MMIO(0xD904), 0xF0800000 },
+ { _MMIO(0xD910), 0x00000000 },
+ { _MMIO(0xD914), 0xF0800000 },
+ { _MMIO(0xDC40), 0x00FF0000 },
+ { _MMIO(0xD940), 0x00000004 },
+ { _MMIO(0xD944), 0x0000FFFF },
+ { _MMIO(0xDC00), 0x00000004 },
+ { _MMIO(0xDC04), 0x0000FFFF },
+ { _MMIO(0xD948), 0x00000003 },
+ { _MMIO(0xD94C), 0x0000FFFF },
+ { _MMIO(0xDC08), 0x00000003 },
+ { _MMIO(0xDC0C), 0x0000FFFF },
+ { _MMIO(0xD950), 0x00000007 },
+ { _MMIO(0xD954), 0x0000FFFF },
+ { _MMIO(0xDC10), 0x00000007 },
+ { _MMIO(0xDC14), 0x0000FFFF },
+ { _MMIO(0xD958), 0x00100002 },
+ { _MMIO(0xD95C), 0x0000FFF7 },
+ { _MMIO(0xDC18), 0x00100002 },
+ { _MMIO(0xDC1C), 0x0000FFF7 },
+ { _MMIO(0xD960), 0x00100002 },
+ { _MMIO(0xD964), 0x0000FFCF },
+ { _MMIO(0xDC20), 0x00100002 },
+ { _MMIO(0xDC24), 0x0000FFCF },
+ { _MMIO(0xD968), 0x00100082 },
+ { _MMIO(0xD96C), 0x0000FFEF },
+ { _MMIO(0xDC28), 0x00100082 },
+ { _MMIO(0xDC2C), 0x0000FFEF },
+ { _MMIO(0xD970), 0x001000C2 },
+ { _MMIO(0xD974), 0x0000FFE7 },
+ { _MMIO(0xDC30), 0x001000C2 },
+ { _MMIO(0xDC34), 0x0000FFE7 },
+ { _MMIO(0xD978), 0x00100001 },
+ { _MMIO(0xD97C), 0x0000FFE7 },
+ { _MMIO(0xDC38), 0x00100001 },
+ { _MMIO(0xDC3C), 0x0000FFE7 },
+};
+
+static const struct i915_oa_reg flex_eu_config_test_oa[] = {
+};
+
+static const struct i915_oa_reg mux_config_test_oa[] = {
+ { _MMIO(0x0D04), 0x00000200 },
+ { _MMIO(0x9840), 0x00000000 },
+ { _MMIO(0x9884), 0x00000000 },
+ { _MMIO(0x9888), 0x280E0000 },
+ { _MMIO(0x9888), 0x1E0E0147 },
+ { _MMIO(0x9888), 0x180E0000 },
+ { _MMIO(0x9888), 0x160E0000 },
+ { _MMIO(0x9888), 0x1E0F1000 },
+ { _MMIO(0x9888), 0x1E104000 },
+ { _MMIO(0x9888), 0x2E020100 },
+ { _MMIO(0x9888), 0x2C030004 },
+ { _MMIO(0x9888), 0x38003000 },
+ { _MMIO(0x9888), 0x1E0A8000 },
+ { _MMIO(0x9884), 0x00000003 },
+ { _MMIO(0x9888), 0x49110000 },
+ { _MMIO(0x9888), 0x5D101400 },
+ { _MMIO(0x9888), 0x1D140020 },
+ { _MMIO(0x9888), 0x1D1103A3 },
+ { _MMIO(0x9888), 0x01110000 },
+ { _MMIO(0x9888), 0x61111000 },
+ { _MMIO(0x9888), 0x1F128000 },
+ { _MMIO(0x9888), 0x17100000 },
+ { _MMIO(0x9888), 0x55100630 },
+ { _MMIO(0x9888), 0x57100000 },
+ { _MMIO(0x9888), 0x31100000 },
+ { _MMIO(0x9884), 0x00000003 },
+ { _MMIO(0x9888), 0x65100002 },
+ { _MMIO(0x9884), 0x00000000 },
+ { _MMIO(0x9888), 0x42000001 },
+};
+
+static ssize_t
+show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "1\n");
+}
+
+void
+i915_perf_load_test_config_tgl(struct drm_i915_private *dev_priv)
+{
+ strlcpy(dev_priv->perf.test_config.uuid,
+ "80a833f0-2504-4321-8894-e9277844ce7b",
+ sizeof(dev_priv->perf.test_config.uuid));
+ dev_priv->perf.test_config.id = 1;
+
+ dev_priv->perf.test_config.mux_regs = mux_config_test_oa;
+ dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
+
+ dev_priv->perf.test_config.b_counter_regs = b_counter_config_test_oa;
+ dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
+
+ dev_priv->perf.test_config.flex_regs = flex_eu_config_test_oa;
+ dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
+
+ dev_priv->perf.test_config.sysfs_metric.name = "80a833f0-2504-4321-8894-e9277844ce7b";
+ dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs;
+
+ dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr;
+
+ dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id";
+ dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444;
+ dev_priv->perf.test_config.sysfs_metric_id.show = show_test_oa_id;
+}
diff --git a/drivers/gpu/drm/i915/oa/i915_oa_tgl.h b/drivers/gpu/drm/i915/oa/i915_oa_tgl.h
new file mode 100644
index 000000000000..4c25f0be825c
--- /dev/null
+++ b/drivers/gpu/drm/i915/oa/i915_oa_tgl.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2018 Intel Corporation
+ *
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
+ */
+
+#ifndef __I915_OA_TGL_H__
+#define __I915_OA_TGL_H__
+
+struct drm_i915_private;
+
+void i915_perf_load_test_config_tgl(struct drm_i915_private *dev_priv);
+
+#endif
diff --git a/drivers/gpu/drm/i915/selftests/i915_active.c b/drivers/gpu/drm/i915/selftests/i915_active.c
index 77d844ac8b71..260b0ee5d1e3 100644
--- a/drivers/gpu/drm/i915/selftests/i915_active.c
+++ b/drivers/gpu/drm/i915/selftests/i915_active.c
@@ -68,7 +68,7 @@ static struct live_active *__live_alloc(struct drm_i915_private *i915)
return NULL;
kref_init(&active->ref);
- i915_active_init(i915, &active->base, __live_active, __live_retire);
+ i915_active_init(&active->base, __live_active, __live_retire);
return active;
}
@@ -79,7 +79,6 @@ __live_active_setup(struct drm_i915_private *i915)
struct intel_engine_cs *engine;
struct i915_sw_fence *submit;
struct live_active *active;
- enum intel_engine_id id;
unsigned int count = 0;
int err = 0;
@@ -97,7 +96,7 @@ __live_active_setup(struct drm_i915_private *i915)
if (err)
goto out;
- for_each_engine(engine, i915, id) {
+ for_each_uabi_engine(engine, i915) {
struct i915_request *rq;
rq = i915_request_create(engine->kernel_context);
@@ -110,7 +109,7 @@ __live_active_setup(struct drm_i915_private *i915)
submit,
GFP_KERNEL);
if (err >= 0)
- err = i915_active_ref(&active->base, rq->timeline, rq);
+ err = i915_active_add_request(&active->base, rq);
i915_request_add(rq);
if (err) {
pr_err("Failed to track active ref!\n");
@@ -121,7 +120,7 @@ __live_active_setup(struct drm_i915_private *i915)
}
i915_active_release(&active->base);
- if (active->retired && count) {
+ if (READ_ONCE(active->retired) && count) {
pr_err("i915_active retired before submission!\n");
err = -EINVAL;
}
@@ -146,35 +145,25 @@ static int live_active_wait(void *arg)
{
struct drm_i915_private *i915 = arg;
struct live_active *active;
- intel_wakeref_t wakeref;
int err = 0;
/* Check that we get a callback when requests retire upon waiting */
- mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
-
active = __live_active_setup(i915);
- if (IS_ERR(active)) {
- err = PTR_ERR(active);
- goto err;
- }
+ if (IS_ERR(active))
+ return PTR_ERR(active);
i915_active_wait(&active->base);
- if (!active->retired) {
+ if (!READ_ONCE(active->retired)) {
pr_err("i915_active not retired after waiting!\n");
err = -EINVAL;
}
__live_put(active);
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ if (igt_flush_test(i915))
err = -EIO;
-err:
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
-
return err;
}
@@ -182,35 +171,25 @@ static int live_active_retire(void *arg)
{
struct drm_i915_private *i915 = arg;
struct live_active *active;
- intel_wakeref_t wakeref;
int err = 0;
/* Check that we get a callback when requests are indirectly retired */
- mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
-
active = __live_active_setup(i915);
- if (IS_ERR(active)) {
- err = PTR_ERR(active);
- goto err;
- }
+ if (IS_ERR(active))
+ return PTR_ERR(active);
/* waits for & retires all requests */
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ if (igt_flush_test(i915))
err = -EIO;
- if (!active->retired) {
+ if (!READ_ONCE(active->retired)) {
pr_err("i915_active not retired after flushing!\n");
err = -EINVAL;
}
__live_put(active);
-err:
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
-
return err;
}
@@ -226,3 +205,48 @@ int i915_active_live_selftests(struct drm_i915_private *i915)
return i915_subtests(tests, i915);
}
+
+static struct intel_engine_cs *node_to_barrier(struct active_node *it)
+{
+ struct intel_engine_cs *engine;
+
+ if (!is_barrier(&it->base))
+ return NULL;
+
+ engine = __barrier_to_engine(it);
+ smp_rmb(); /* serialise with add_active_barriers */
+ if (!is_barrier(&it->base))
+ return NULL;
+
+ return engine;
+}
+
+void i915_active_print(struct i915_active *ref, struct drm_printer *m)
+{
+ drm_printf(m, "active %pS:%pS\n", ref->active, ref->retire);
+ drm_printf(m, "\tcount: %d\n", atomic_read(&ref->count));
+ drm_printf(m, "\tpreallocated barriers? %s\n",
+ yesno(!llist_empty(&ref->preallocated_barriers)));
+
+ if (i915_active_acquire_if_busy(ref)) {
+ struct active_node *it, *n;
+
+ rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
+ struct intel_engine_cs *engine;
+
+ engine = node_to_barrier(it);
+ if (engine) {
+ drm_printf(m, "\tbarrier: %s\n", engine->name);
+ continue;
+ }
+
+ if (i915_active_fence_isset(&it->base)) {
+ drm_printf(m,
+ "\ttimeline: %llx\n", it->timeline);
+ continue;
+ }
+ }
+
+ i915_active_release(ref);
+ }
+}
diff --git a/drivers/gpu/drm/i915/selftests/i915_buddy.c b/drivers/gpu/drm/i915/selftests/i915_buddy.c
index 23f784eae1e7..1b856bae67b5 100644
--- a/drivers/gpu/drm/i915/selftests/i915_buddy.c
+++ b/drivers/gpu/drm/i915/selftests/i915_buddy.c
@@ -375,6 +375,8 @@ retry:
if (err)
break;
+
+ cond_resched();
}
if (err == -ENOMEM)
@@ -687,6 +689,8 @@ static int igt_buddy_alloc_range(void *arg)
rem -= size;
if (!rem)
break;
+
+ cond_resched();
}
if (err == -ENOMEM)
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem.c b/drivers/gpu/drm/i915/selftests/i915_gem.c
index 37593831b539..d83f6bf6d9d4 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem.c
@@ -15,23 +15,26 @@
#include "igt_flush_test.h"
#include "mock_drm.h"
-static int switch_to_context(struct drm_i915_private *i915,
- struct i915_gem_context *ctx)
+static int switch_to_context(struct i915_gem_context *ctx)
{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
+ struct i915_gem_engines_iter it;
+ struct intel_context *ce;
+ int err = 0;
- for_each_engine(engine, i915, id) {
+ for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
struct i915_request *rq;
- rq = igt_request_alloc(ctx, engine);
- if (IS_ERR(rq))
- return PTR_ERR(rq);
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ break;
+ }
i915_request_add(rq);
}
+ i915_gem_context_unlock_engines(ctx);
- return 0;
+ return err;
}
static void trash_stolen(struct drm_i915_private *i915)
@@ -42,6 +45,10 @@ static void trash_stolen(struct drm_i915_private *i915)
unsigned long page;
u32 prng = 0x12345678;
+ /* XXX: fsck. needs some more thought... */
+ if (!i915_ggtt_has_aperture(ggtt))
+ return;
+
for (page = 0; page < size; page += PAGE_SIZE) {
const dma_addr_t dma = i915->dsm.start + page;
u32 __iomem *s;
@@ -117,12 +124,9 @@ static void pm_resume(struct drm_i915_private *i915)
*/
with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
intel_gt_sanitize(&i915->gt, false);
- i915_gem_sanitize(i915);
- mutex_lock(&i915->drm.struct_mutex);
i915_gem_restore_gtt_mappings(i915);
- i915_gem_restore_fences(i915);
- mutex_unlock(&i915->drm.struct_mutex);
+ i915_gem_restore_fences(&i915->ggtt);
i915_gem_resume(i915);
}
@@ -140,11 +144,9 @@ static int igt_gem_suspend(void *arg)
return PTR_ERR(file);
err = -ENOMEM;
- mutex_lock(&i915->drm.struct_mutex);
ctx = live_context(i915, file);
if (!IS_ERR(ctx))
- err = switch_to_context(i915, ctx);
- mutex_unlock(&i915->drm.struct_mutex);
+ err = switch_to_context(ctx);
if (err)
goto out;
@@ -159,9 +161,7 @@ static int igt_gem_suspend(void *arg)
pm_resume(i915);
- mutex_lock(&i915->drm.struct_mutex);
- err = switch_to_context(i915, ctx);
- mutex_unlock(&i915->drm.struct_mutex);
+ err = switch_to_context(ctx);
out:
mock_file_free(i915, file);
return err;
@@ -179,11 +179,9 @@ static int igt_gem_hibernate(void *arg)
return PTR_ERR(file);
err = -ENOMEM;
- mutex_lock(&i915->drm.struct_mutex);
ctx = live_context(i915, file);
if (!IS_ERR(ctx))
- err = switch_to_context(i915, ctx);
- mutex_unlock(&i915->drm.struct_mutex);
+ err = switch_to_context(ctx);
if (err)
goto out;
@@ -198,9 +196,7 @@ static int igt_gem_hibernate(void *arg)
pm_resume(i915);
- mutex_lock(&i915->drm.struct_mutex);
- err = switch_to_context(i915, ctx);
- mutex_unlock(&i915->drm.struct_mutex);
+ err = switch_to_context(ctx);
out:
mock_file_free(i915, file);
return err;
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
index cb30c669b1b7..42e948144f1b 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
@@ -43,8 +43,7 @@ static void quirk_add(struct drm_i915_gem_object *obj,
list_add(&obj->st_link, objects);
}
-static int populate_ggtt(struct drm_i915_private *i915,
- struct list_head *objects)
+static int populate_ggtt(struct i915_ggtt *ggtt, struct list_head *objects)
{
unsigned long unbound, bound, count;
struct drm_i915_gem_object *obj;
@@ -53,7 +52,8 @@ static int populate_ggtt(struct drm_i915_private *i915,
do {
struct i915_vma *vma;
- obj = i915_gem_object_create_internal(i915, I915_GTT_PAGE_SIZE);
+ obj = i915_gem_object_create_internal(ggtt->vm.i915,
+ I915_GTT_PAGE_SIZE);
if (IS_ERR(obj))
return PTR_ERR(obj);
@@ -70,7 +70,7 @@ static int populate_ggtt(struct drm_i915_private *i915,
count++;
} while (1);
pr_debug("Filled GGTT with %lu pages [%llu total]\n",
- count, i915->ggtt.vm.total / PAGE_SIZE);
+ count, ggtt->vm.total / PAGE_SIZE);
bound = 0;
unbound = 0;
@@ -96,7 +96,7 @@ static int populate_ggtt(struct drm_i915_private *i915,
return -EINVAL;
}
- if (list_empty(&i915->ggtt.vm.bound_list)) {
+ if (list_empty(&ggtt->vm.bound_list)) {
pr_err("No objects on the GGTT inactive list!\n");
return -EINVAL;
}
@@ -104,20 +104,16 @@ static int populate_ggtt(struct drm_i915_private *i915,
return 0;
}
-static void unpin_ggtt(struct drm_i915_private *i915)
+static void unpin_ggtt(struct i915_ggtt *ggtt)
{
- struct i915_ggtt *ggtt = &i915->ggtt;
struct i915_vma *vma;
- mutex_lock(&ggtt->vm.mutex);
- list_for_each_entry(vma, &i915->ggtt.vm.bound_list, vm_link)
+ list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link)
if (vma->obj->mm.quirked)
i915_vma_unpin(vma);
- mutex_unlock(&ggtt->vm.mutex);
}
-static void cleanup_objects(struct drm_i915_private *i915,
- struct list_head *list)
+static void cleanup_objects(struct i915_ggtt *ggtt, struct list_head *list)
{
struct drm_i915_gem_object *obj, *on;
@@ -127,44 +123,44 @@ static void cleanup_objects(struct drm_i915_private *i915,
i915_gem_object_put(obj);
}
- mutex_unlock(&i915->drm.struct_mutex);
-
- i915_gem_drain_freed_objects(i915);
-
- mutex_lock(&i915->drm.struct_mutex);
+ i915_gem_drain_freed_objects(ggtt->vm.i915);
}
static int igt_evict_something(void *arg)
{
- struct drm_i915_private *i915 = arg;
- struct i915_ggtt *ggtt = &i915->ggtt;
+ struct intel_gt *gt = arg;
+ struct i915_ggtt *ggtt = gt->ggtt;
LIST_HEAD(objects);
int err;
/* Fill the GGTT with pinned objects and try to evict one. */
- err = populate_ggtt(i915, &objects);
+ err = populate_ggtt(ggtt, &objects);
if (err)
goto cleanup;
/* Everything is pinned, nothing should happen */
+ mutex_lock(&ggtt->vm.mutex);
err = i915_gem_evict_something(&ggtt->vm,
I915_GTT_PAGE_SIZE, 0, 0,
0, U64_MAX,
0);
+ mutex_unlock(&ggtt->vm.mutex);
if (err != -ENOSPC) {
pr_err("i915_gem_evict_something failed on a full GGTT with err=%d\n",
err);
goto cleanup;
}
- unpin_ggtt(i915);
+ unpin_ggtt(ggtt);
/* Everything is unpinned, we should be able to evict something */
+ mutex_lock(&ggtt->vm.mutex);
err = i915_gem_evict_something(&ggtt->vm,
I915_GTT_PAGE_SIZE, 0, 0,
0, U64_MAX,
0);
+ mutex_unlock(&ggtt->vm.mutex);
if (err) {
pr_err("i915_gem_evict_something failed on a full GGTT with err=%d\n",
err);
@@ -172,13 +168,14 @@ static int igt_evict_something(void *arg)
}
cleanup:
- cleanup_objects(i915, &objects);
+ cleanup_objects(ggtt, &objects);
return err;
}
static int igt_overcommit(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
+ struct i915_ggtt *ggtt = gt->ggtt;
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
LIST_HEAD(objects);
@@ -188,11 +185,11 @@ static int igt_overcommit(void *arg)
* We expect it to fail.
*/
- err = populate_ggtt(i915, &objects);
+ err = populate_ggtt(ggtt, &objects);
if (err)
goto cleanup;
- obj = i915_gem_object_create_internal(i915, I915_GTT_PAGE_SIZE);
+ obj = i915_gem_object_create_internal(gt->i915, I915_GTT_PAGE_SIZE);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
goto cleanup;
@@ -208,14 +205,14 @@ static int igt_overcommit(void *arg)
}
cleanup:
- cleanup_objects(i915, &objects);
+ cleanup_objects(ggtt, &objects);
return err;
}
static int igt_evict_for_vma(void *arg)
{
- struct drm_i915_private *i915 = arg;
- struct i915_ggtt *ggtt = &i915->ggtt;
+ struct intel_gt *gt = arg;
+ struct i915_ggtt *ggtt = gt->ggtt;
struct drm_mm_node target = {
.start = 0,
.size = 4096,
@@ -225,22 +222,26 @@ static int igt_evict_for_vma(void *arg)
/* Fill the GGTT with pinned objects and try to evict a range. */
- err = populate_ggtt(i915, &objects);
+ err = populate_ggtt(ggtt, &objects);
if (err)
goto cleanup;
/* Everything is pinned, nothing should happen */
+ mutex_lock(&ggtt->vm.mutex);
err = i915_gem_evict_for_node(&ggtt->vm, &target, 0);
+ mutex_unlock(&ggtt->vm.mutex);
if (err != -ENOSPC) {
pr_err("i915_gem_evict_for_node on a full GGTT returned err=%d\n",
err);
goto cleanup;
}
- unpin_ggtt(i915);
+ unpin_ggtt(ggtt);
/* Everything is unpinned, we should be able to evict the node */
+ mutex_lock(&ggtt->vm.mutex);
err = i915_gem_evict_for_node(&ggtt->vm, &target, 0);
+ mutex_unlock(&ggtt->vm.mutex);
if (err) {
pr_err("i915_gem_evict_for_node returned err=%d\n",
err);
@@ -248,7 +249,7 @@ static int igt_evict_for_vma(void *arg)
}
cleanup:
- cleanup_objects(i915, &objects);
+ cleanup_objects(ggtt, &objects);
return err;
}
@@ -261,8 +262,8 @@ static void mock_color_adjust(const struct drm_mm_node *node,
static int igt_evict_for_cache_color(void *arg)
{
- struct drm_i915_private *i915 = arg;
- struct i915_ggtt *ggtt = &i915->ggtt;
+ struct intel_gt *gt = arg;
+ struct i915_ggtt *ggtt = gt->ggtt;
const unsigned long flags = PIN_OFFSET_FIXED;
struct drm_mm_node target = {
.start = I915_GTT_PAGE_SIZE * 2,
@@ -274,14 +275,16 @@ static int igt_evict_for_cache_color(void *arg)
LIST_HEAD(objects);
int err;
- /* Currently the use of color_adjust is limited to cache domains within
- * the ggtt, and so the presence of mm.color_adjust is assumed to be
- * i915_gtt_color_adjust throughout our driver, so using a mock color
- * adjust will work just fine for our purposes.
+ /*
+ * Currently the use of color_adjust for the GGTT is limited to cache
+ * coloring and guard pages, and so the presence of mm.color_adjust for
+ * the GGTT is assumed to be i915_ggtt_color_adjust, hence using a mock
+ * color adjust will work just fine for our purposes.
*/
ggtt->vm.mm.color_adjust = mock_color_adjust;
+ GEM_BUG_ON(!i915_vm_has_cache_coloring(&ggtt->vm));
- obj = i915_gem_object_create_internal(i915, I915_GTT_PAGE_SIZE);
+ obj = i915_gem_object_create_internal(gt->i915, I915_GTT_PAGE_SIZE);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
goto cleanup;
@@ -297,7 +300,7 @@ static int igt_evict_for_cache_color(void *arg)
goto cleanup;
}
- obj = i915_gem_object_create_internal(i915, I915_GTT_PAGE_SIZE);
+ obj = i915_gem_object_create_internal(gt->i915, I915_GTT_PAGE_SIZE);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
goto cleanup;
@@ -317,7 +320,9 @@ static int igt_evict_for_cache_color(void *arg)
i915_vma_unpin(vma);
/* Remove just the second vma */
+ mutex_lock(&ggtt->vm.mutex);
err = i915_gem_evict_for_node(&ggtt->vm, &target, 0);
+ mutex_unlock(&ggtt->vm.mutex);
if (err) {
pr_err("[0]i915_gem_evict_for_node returned err=%d\n", err);
goto cleanup;
@@ -328,7 +333,9 @@ static int igt_evict_for_cache_color(void *arg)
*/
target.color = I915_CACHE_L3_LLC;
+ mutex_lock(&ggtt->vm.mutex);
err = i915_gem_evict_for_node(&ggtt->vm, &target, 0);
+ mutex_unlock(&ggtt->vm.mutex);
if (!err) {
pr_err("[1]i915_gem_evict_for_node returned err=%d\n", err);
err = -EINVAL;
@@ -338,36 +345,40 @@ static int igt_evict_for_cache_color(void *arg)
err = 0;
cleanup:
- unpin_ggtt(i915);
- cleanup_objects(i915, &objects);
+ unpin_ggtt(ggtt);
+ cleanup_objects(ggtt, &objects);
ggtt->vm.mm.color_adjust = NULL;
return err;
}
static int igt_evict_vm(void *arg)
{
- struct drm_i915_private *i915 = arg;
- struct i915_ggtt *ggtt = &i915->ggtt;
+ struct intel_gt *gt = arg;
+ struct i915_ggtt *ggtt = gt->ggtt;
LIST_HEAD(objects);
int err;
/* Fill the GGTT with pinned objects and try to evict everything. */
- err = populate_ggtt(i915, &objects);
+ err = populate_ggtt(ggtt, &objects);
if (err)
goto cleanup;
/* Everything is pinned, nothing should happen */
+ mutex_lock(&ggtt->vm.mutex);
err = i915_gem_evict_vm(&ggtt->vm);
+ mutex_unlock(&ggtt->vm.mutex);
if (err) {
pr_err("i915_gem_evict_vm on a full GGTT returned err=%d]\n",
err);
goto cleanup;
}
- unpin_ggtt(i915);
+ unpin_ggtt(ggtt);
+ mutex_lock(&ggtt->vm.mutex);
err = i915_gem_evict_vm(&ggtt->vm);
+ mutex_unlock(&ggtt->vm.mutex);
if (err) {
pr_err("i915_gem_evict_vm on a full GGTT returned err=%d]\n",
err);
@@ -375,14 +386,16 @@ static int igt_evict_vm(void *arg)
}
cleanup:
- cleanup_objects(i915, &objects);
+ cleanup_objects(ggtt, &objects);
return err;
}
static int igt_evict_contexts(void *arg)
{
const u64 PRETEND_GGTT_SIZE = 16ull << 20;
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
+ struct i915_ggtt *ggtt = gt->ggtt;
+ struct drm_i915_private *i915 = gt->i915;
struct intel_engine_cs *engine;
enum intel_engine_id id;
struct reserved {
@@ -408,14 +421,14 @@ static int igt_evict_contexts(void *arg)
if (!HAS_FULL_PPGTT(i915))
return 0;
- mutex_lock(&i915->drm.struct_mutex);
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
/* Reserve a block so that we know we have enough to fit a few rq */
memset(&hole, 0, sizeof(hole));
- err = i915_gem_gtt_insert(&i915->ggtt.vm, &hole,
+ mutex_lock(&ggtt->vm.mutex);
+ err = i915_gem_gtt_insert(&ggtt->vm, &hole,
PRETEND_GGTT_SIZE, 0, I915_COLOR_UNEVICTABLE,
- 0, i915->ggtt.vm.total,
+ 0, ggtt->vm.total,
PIN_NOEVICT);
if (err)
goto out_locked;
@@ -425,15 +438,17 @@ static int igt_evict_contexts(void *arg)
do {
struct reserved *r;
+ mutex_unlock(&ggtt->vm.mutex);
r = kcalloc(1, sizeof(*r), GFP_KERNEL);
+ mutex_lock(&ggtt->vm.mutex);
if (!r) {
err = -ENOMEM;
goto out_locked;
}
- if (i915_gem_gtt_insert(&i915->ggtt.vm, &r->node,
+ if (i915_gem_gtt_insert(&ggtt->vm, &r->node,
1ul << 20, 0, I915_COLOR_UNEVICTABLE,
- 0, i915->ggtt.vm.total,
+ 0, ggtt->vm.total,
PIN_NOEVICT)) {
kfree(r);
break;
@@ -445,11 +460,11 @@ static int igt_evict_contexts(void *arg)
count++;
} while (1);
drm_mm_remove_node(&hole);
- mutex_unlock(&i915->drm.struct_mutex);
+ mutex_unlock(&ggtt->vm.mutex);
pr_info("Filled GGTT with %lu 1MiB nodes\n", count);
/* Overfill the GGTT with context objects and so try to evict one. */
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt, id) {
struct i915_sw_fence fence;
struct drm_file *file;
@@ -460,7 +475,6 @@ static int igt_evict_contexts(void *arg)
}
count = 0;
- mutex_lock(&i915->drm.struct_mutex);
onstack_fence_init(&fence);
do {
struct i915_request *rq;
@@ -478,8 +492,8 @@ static int igt_evict_contexts(void *arg)
if (IS_ERR(rq)) {
/* When full, fail_if_busy will trigger EBUSY */
if (PTR_ERR(rq) != -EBUSY) {
- pr_err("Unexpected error from request alloc (ctx hw id %u, on %s): %d\n",
- ctx->hw_id, engine->name,
+ pr_err("Unexpected error from request alloc (on %s): %d\n",
+ engine->name,
(int)PTR_ERR(rq));
err = PTR_ERR(rq);
}
@@ -497,8 +511,6 @@ static int igt_evict_contexts(void *arg)
count++;
err = 0;
} while(1);
- mutex_unlock(&i915->drm.struct_mutex);
-
onstack_fence_fini(&fence);
pr_info("Submitted %lu contexts/requests on %s\n",
count, engine->name);
@@ -508,9 +520,9 @@ static int igt_evict_contexts(void *arg)
break;
}
- mutex_lock(&i915->drm.struct_mutex);
+ mutex_lock(&ggtt->vm.mutex);
out_locked:
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ if (igt_flush_test(i915))
err = -EIO;
while (reserved) {
struct reserved *next = reserved->next;
@@ -522,8 +534,8 @@ out_locked:
}
if (drm_mm_node_allocated(&hole))
drm_mm_remove_node(&hole);
+ mutex_unlock(&ggtt->vm.mutex);
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
return err;
}
@@ -545,11 +557,8 @@ int i915_gem_evict_mock_selftests(void)
if (!i915)
return -ENOMEM;
- mutex_lock(&i915->drm.struct_mutex);
with_intel_runtime_pm(&i915->runtime_pm, wakeref)
- err = i915_subtests(tests, i915);
-
- mutex_unlock(&i915->drm.struct_mutex);
+ err = i915_subtests(tests, &i915->gt);
drm_dev_put(&i915->drm);
return err;
@@ -564,5 +573,5 @@ int i915_gem_evict_live_selftests(struct drm_i915_private *i915)
if (intel_gt_is_wedged(&i915->gt))
return 0;
- return i915_subtests(tests, i915);
+ return intel_gt_live_subtests(tests, &i915->gt);
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
index 31a51ca1ddcb..3f7e80fb3bbd 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -25,26 +25,20 @@
#include <linux/list_sort.h>
#include <linux/prime_numbers.h>
+#include "gem/i915_gem_context.h"
#include "gem/selftests/mock_context.h"
+#include "gt/intel_context.h"
#include "i915_random.h"
#include "i915_selftest.h"
#include "mock_drm.h"
#include "mock_gem_device.h"
+#include "igt_flush_test.h"
static void cleanup_freed_objects(struct drm_i915_private *i915)
{
- /*
- * As we may hold onto the struct_mutex for inordinate lengths of
- * time, the NMI khungtaskd detector may fire for the free objects
- * worker.
- */
- mutex_unlock(&i915->drm.struct_mutex);
-
i915_gem_drain_freed_objects(i915);
-
- mutex_lock(&i915->drm.struct_mutex);
}
static void fake_free_pages(struct drm_i915_gem_object *obj,
@@ -88,8 +82,6 @@ static int fake_get_pages(struct drm_i915_gem_object *obj)
}
GEM_BUG_ON(rem);
- obj->mm.madv = I915_MADV_DONTNEED;
-
__i915_gem_object_set_pages(obj, pages, sg_page_sizes);
return 0;
@@ -101,7 +93,6 @@ static void fake_put_pages(struct drm_i915_gem_object *obj,
{
fake_free_pages(obj, pages);
obj->mm.dirty = false;
- obj->mm.madv = I915_MADV_WILLNEED;
}
static const struct drm_i915_gem_object_ops fake_ops = {
@@ -113,6 +104,7 @@ static const struct drm_i915_gem_object_ops fake_ops = {
static struct drm_i915_gem_object *
fake_dma_object(struct drm_i915_private *i915, u64 size)
{
+ static struct lock_class_key lock_class;
struct drm_i915_gem_object *obj;
GEM_BUG_ON(!size);
@@ -126,7 +118,9 @@ fake_dma_object(struct drm_i915_private *i915, u64 size)
goto err;
drm_gem_private_object_init(&i915->drm, &obj->base, size);
- i915_gem_object_init(obj, &fake_ops);
+ i915_gem_object_init(obj, &fake_ops, &lock_class);
+
+ i915_gem_object_set_volatile(obj);
obj->write_domain = I915_GEM_DOMAIN_CPU;
obj->read_domains = I915_GEM_DOMAIN_CPU;
@@ -293,18 +287,20 @@ static int lowlevel_hole(struct drm_i915_private *i915,
mock_vma.node.size = BIT_ULL(size);
mock_vma.node.start = addr;
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
- vm->insert_entries(vm, &mock_vma, I915_CACHE_NONE, 0);
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref)
+ vm->insert_entries(vm, &mock_vma,
+ I915_CACHE_NONE, 0);
}
count = n;
i915_random_reorder(order, count, &prng);
for (n = 0; n < count; n++) {
u64 addr = hole_start + order[n] * BIT_ULL(size);
+ intel_wakeref_t wakeref;
GEM_BUG_ON(addr + BIT_ULL(size) > vm->total);
- vm->clear_range(vm, addr, BIT_ULL(size));
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref)
+ vm->clear_range(vm, addr, BIT_ULL(size));
}
i915_gem_object_unpin_pages(obj);
@@ -875,6 +871,15 @@ static int __shrink_hole(struct drm_i915_private *i915,
i915_vma_unpin(vma);
addr += size;
+ /*
+ * Since we are injecting allocation faults at random intervals,
+ * wait for this allocation to complete before we change the
+ * faultinjection.
+ */
+ err = i915_vma_sync(vma);
+ if (err)
+ break;
+
if (igt_timeout(end_time,
"%s timed out at ofset %llx [%llx - %llx]\n",
__func__, addr, hole_start, hole_end)) {
@@ -1008,21 +1013,19 @@ static int exercise_ppgtt(struct drm_i915_private *dev_priv,
if (IS_ERR(file))
return PTR_ERR(file);
- mutex_lock(&dev_priv->drm.struct_mutex);
ppgtt = i915_ppgtt_create(dev_priv);
if (IS_ERR(ppgtt)) {
err = PTR_ERR(ppgtt);
- goto out_unlock;
+ goto out_free;
}
GEM_BUG_ON(offset_in_page(ppgtt->vm.total));
- GEM_BUG_ON(ppgtt->vm.closed);
+ GEM_BUG_ON(!atomic_read(&ppgtt->vm.open));
err = func(dev_priv, &ppgtt->vm, 0, ppgtt->vm.total, end_time);
i915_vm_put(&ppgtt->vm);
-out_unlock:
- mutex_unlock(&dev_priv->drm.struct_mutex);
+out_free:
mock_file_free(dev_priv, file);
return err;
}
@@ -1085,7 +1088,6 @@ static int exercise_ggtt(struct drm_i915_private *i915,
IGT_TIMEOUT(end_time);
int err = 0;
- mutex_lock(&i915->drm.struct_mutex);
restart:
list_sort(NULL, &ggtt->vm.mm.hole_stack, sort_holes);
drm_mm_for_each_hole(node, &ggtt->vm.mm, hole_start, hole_end) {
@@ -1106,7 +1108,6 @@ restart:
last = hole_end;
goto restart;
}
- mutex_unlock(&i915->drm.struct_mutex);
return err;
}
@@ -1148,13 +1149,12 @@ static int igt_ggtt_page(void *arg)
unsigned int *order, n;
int err;
- mutex_lock(&i915->drm.struct_mutex);
+ if (!i915_ggtt_has_aperture(ggtt))
+ return 0;
obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
- if (IS_ERR(obj)) {
- err = PTR_ERR(obj);
- goto out_unlock;
- }
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
err = i915_gem_object_pin_pages(obj);
if (err)
@@ -1222,8 +1222,6 @@ out_unpin:
i915_gem_object_unpin_pages(obj);
out_free:
i915_gem_object_put(obj);
-out_unlock:
- mutex_unlock(&i915->drm.struct_mutex);
return err;
}
@@ -1234,10 +1232,13 @@ static void track_vma_bind(struct i915_vma *vma)
atomic_inc(&obj->bind_count); /* track for eviction later */
__i915_gem_object_pin_pages(obj);
+ GEM_BUG_ON(vma->pages);
+ atomic_set(&vma->pages_count, I915_VMA_PAGES_ACTIVE);
+ __i915_gem_object_pin_pages(obj);
vma->pages = obj->mm.pages;
mutex_lock(&vma->vm->mutex);
- list_move_tail(&vma->vm_link, &vma->vm->bound_list);
+ list_add_tail(&vma->vm_link, &vma->vm->bound_list);
mutex_unlock(&vma->vm->mutex);
}
@@ -1248,6 +1249,7 @@ static int exercise_mock(struct drm_i915_private *i915,
unsigned long end_time))
{
const u64 limit = totalram_pages() << PAGE_SHIFT;
+ struct i915_address_space *vm;
struct i915_gem_context *ctx;
IGT_TIMEOUT(end_time);
int err;
@@ -1256,7 +1258,9 @@ static int exercise_mock(struct drm_i915_private *i915,
if (!ctx)
return -ENOMEM;
- err = func(i915, ctx->vm, 0, min(ctx->vm->total, limit), end_time);
+ vm = i915_gem_context_get_vm_rcu(ctx);
+ err = func(i915, vm, 0, min(vm->total, limit), end_time);
+ i915_vm_put(vm);
mock_context_close(ctx);
return err;
@@ -1294,6 +1298,7 @@ static int igt_gtt_reserve(void *arg)
{
struct i915_ggtt *ggtt = arg;
struct drm_i915_gem_object *obj, *on;
+ I915_RND_STATE(prng);
LIST_HEAD(objects);
u64 total;
int err = -ENODEV;
@@ -1330,11 +1335,13 @@ static int igt_gtt_reserve(void *arg)
goto out;
}
+ mutex_lock(&ggtt->vm.mutex);
err = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
obj->base.size,
total,
obj->cache_level,
0);
+ mutex_unlock(&ggtt->vm.mutex);
if (err) {
pr_err("i915_gem_gtt_reserve (pass 1) failed at %llu/%llu with err=%d\n",
total, ggtt->vm.total, err);
@@ -1380,11 +1387,13 @@ static int igt_gtt_reserve(void *arg)
goto out;
}
+ mutex_lock(&ggtt->vm.mutex);
err = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
obj->base.size,
total,
obj->cache_level,
0);
+ mutex_unlock(&ggtt->vm.mutex);
if (err) {
pr_err("i915_gem_gtt_reserve (pass 2) failed at %llu/%llu with err=%d\n",
total, ggtt->vm.total, err);
@@ -1420,15 +1429,18 @@ static int igt_gtt_reserve(void *arg)
goto out;
}
- offset = random_offset(0, ggtt->vm.total,
- 2*I915_GTT_PAGE_SIZE,
- I915_GTT_MIN_ALIGNMENT);
+ offset = igt_random_offset(&prng,
+ 0, ggtt->vm.total,
+ 2 * I915_GTT_PAGE_SIZE,
+ I915_GTT_MIN_ALIGNMENT);
+ mutex_lock(&ggtt->vm.mutex);
err = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
obj->base.size,
offset,
obj->cache_level,
0);
+ mutex_unlock(&ggtt->vm.mutex);
if (err) {
pr_err("i915_gem_gtt_reserve (pass 3) failed at %llu/%llu with err=%d\n",
total, ggtt->vm.total, err);
@@ -1497,11 +1509,13 @@ static int igt_gtt_insert(void *arg)
/* Check a couple of obviously invalid requests */
for (ii = invalid_insert; ii->size; ii++) {
+ mutex_lock(&ggtt->vm.mutex);
err = i915_gem_gtt_insert(&ggtt->vm, &tmp,
ii->size, ii->alignment,
I915_COLOR_UNEVICTABLE,
ii->start, ii->end,
0);
+ mutex_unlock(&ggtt->vm.mutex);
if (err != -ENOSPC) {
pr_err("Invalid i915_gem_gtt_insert(.size=%llx, .alignment=%llx, .start=%llx, .end=%llx) succeeded (err=%d)\n",
ii->size, ii->alignment, ii->start, ii->end,
@@ -1537,10 +1551,12 @@ static int igt_gtt_insert(void *arg)
goto out;
}
+ mutex_lock(&ggtt->vm.mutex);
err = i915_gem_gtt_insert(&ggtt->vm, &vma->node,
obj->base.size, 0, obj->cache_level,
0, ggtt->vm.total,
0);
+ mutex_unlock(&ggtt->vm.mutex);
if (err == -ENOSPC) {
/* maxed out the GGTT space */
i915_gem_object_put(obj);
@@ -1595,10 +1611,12 @@ static int igt_gtt_insert(void *arg)
goto out;
}
+ mutex_lock(&ggtt->vm.mutex);
err = i915_gem_gtt_insert(&ggtt->vm, &vma->node,
obj->base.size, 0, obj->cache_level,
0, ggtt->vm.total,
0);
+ mutex_unlock(&ggtt->vm.mutex);
if (err) {
pr_err("i915_gem_gtt_insert (pass 2) failed at %llu/%llu with err=%d\n",
total, ggtt->vm.total, err);
@@ -1642,10 +1660,12 @@ static int igt_gtt_insert(void *arg)
goto out;
}
+ mutex_lock(&ggtt->vm.mutex);
err = i915_gem_gtt_insert(&ggtt->vm, &vma->node,
obj->base.size, 0, obj->cache_level,
0, ggtt->vm.total,
0);
+ mutex_unlock(&ggtt->vm.mutex);
if (err) {
pr_err("i915_gem_gtt_insert (pass 3) failed at %llu/%llu with err=%d\n",
total, ggtt->vm.total, err);
@@ -1689,13 +1709,10 @@ int i915_gem_gtt_mock_selftests(void)
}
mock_init_ggtt(i915, ggtt);
- mutex_lock(&i915->drm.struct_mutex);
err = i915_subtests(tests, ggtt);
- mock_device_flush(i915);
- mutex_unlock(&i915->drm.struct_mutex);
+ mock_device_flush(i915);
i915_gem_drain_freed_objects(i915);
-
mock_fini_ggtt(ggtt);
kfree(ggtt);
out_put:
@@ -1703,6 +1720,312 @@ out_put:
return err;
}
+static int context_sync(struct intel_context *ce)
+{
+ struct i915_request *rq;
+ long timeout;
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ timeout = i915_request_wait(rq, 0, HZ / 5);
+ i915_request_put(rq);
+
+ return timeout < 0 ? -EIO : 0;
+}
+
+static struct i915_request *
+submit_batch(struct intel_context *ce, u64 addr)
+{
+ struct i915_request *rq;
+ int err;
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq))
+ return rq;
+
+ err = 0;
+ if (rq->engine->emit_init_breadcrumb) /* detect a hang */
+ err = rq->engine->emit_init_breadcrumb(rq);
+ if (err == 0)
+ err = rq->engine->emit_bb_start(rq, addr, 0, 0);
+
+ if (err == 0)
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ return err ? ERR_PTR(err) : rq;
+}
+
+static u32 *spinner(u32 *batch, int i)
+{
+ return batch + i * 64 / sizeof(*batch) + 4;
+}
+
+static void end_spin(u32 *batch, int i)
+{
+ *spinner(batch, i) = MI_BATCH_BUFFER_END;
+ wmb();
+}
+
+static int igt_cs_tlb(void *arg)
+{
+ const unsigned int count = PAGE_SIZE / 64;
+ const unsigned int chunk_size = count * PAGE_SIZE;
+ struct drm_i915_private *i915 = arg;
+ struct drm_i915_gem_object *bbe, *act, *out;
+ struct i915_gem_engines_iter it;
+ struct i915_address_space *vm;
+ struct i915_gem_context *ctx;
+ struct intel_context *ce;
+ struct drm_file *file;
+ struct i915_vma *vma;
+ I915_RND_STATE(prng);
+ unsigned int i;
+ u32 *result;
+ u32 *batch;
+ int err = 0;
+
+ /*
+ * Our mission here is to fool the hardware to execute something
+ * from scratch as it has not seen the batch move (due to missing
+ * the TLB invalidate).
+ */
+
+ file = mock_file(i915);
+ if (IS_ERR(file))
+ return PTR_ERR(file);
+
+ ctx = live_context(i915, file);
+ if (IS_ERR(ctx)) {
+ err = PTR_ERR(ctx);
+ goto out_unlock;
+ }
+
+ vm = i915_gem_context_get_vm_rcu(ctx);
+ if (i915_is_ggtt(vm))
+ goto out_vm;
+
+ /* Create two pages; dummy we prefill the TLB, and intended */
+ bbe = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ if (IS_ERR(bbe)) {
+ err = PTR_ERR(bbe);
+ goto out_vm;
+ }
+
+ batch = i915_gem_object_pin_map(bbe, I915_MAP_WC);
+ if (IS_ERR(batch)) {
+ err = PTR_ERR(batch);
+ goto out_put_bbe;
+ }
+ memset32(batch, MI_BATCH_BUFFER_END, PAGE_SIZE / sizeof(u32));
+ i915_gem_object_flush_map(bbe);
+ i915_gem_object_unpin_map(bbe);
+
+ act = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ if (IS_ERR(act)) {
+ err = PTR_ERR(act);
+ goto out_put_bbe;
+ }
+
+ /* Track the execution of each request by writing into different slot */
+ batch = i915_gem_object_pin_map(act, I915_MAP_WC);
+ if (IS_ERR(batch)) {
+ err = PTR_ERR(batch);
+ goto out_put_act;
+ }
+ for (i = 0; i < count; i++) {
+ u32 *cs = batch + i * 64 / sizeof(*cs);
+ u64 addr = (vm->total - PAGE_SIZE) + i * sizeof(u32);
+
+ GEM_BUG_ON(INTEL_GEN(i915) < 6);
+ cs[0] = MI_STORE_DWORD_IMM_GEN4;
+ if (INTEL_GEN(i915) >= 8) {
+ cs[1] = lower_32_bits(addr);
+ cs[2] = upper_32_bits(addr);
+ cs[3] = i;
+ cs[4] = MI_NOOP;
+ cs[5] = MI_BATCH_BUFFER_START_GEN8;
+ } else {
+ cs[1] = 0;
+ cs[2] = lower_32_bits(addr);
+ cs[3] = i;
+ cs[4] = MI_NOOP;
+ cs[5] = MI_BATCH_BUFFER_START;
+ }
+ }
+
+ out = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ if (IS_ERR(out)) {
+ err = PTR_ERR(out);
+ goto out_put_batch;
+ }
+ i915_gem_object_set_cache_coherency(out, I915_CACHING_CACHED);
+
+ vma = i915_vma_instance(out, vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto out_put_batch;
+ }
+
+ err = i915_vma_pin(vma, 0, 0,
+ PIN_USER |
+ PIN_OFFSET_FIXED |
+ (vm->total - PAGE_SIZE));
+ if (err)
+ goto out_put_out;
+ GEM_BUG_ON(vma->node.start != vm->total - PAGE_SIZE);
+
+ result = i915_gem_object_pin_map(out, I915_MAP_WB);
+ if (IS_ERR(result)) {
+ err = PTR_ERR(result);
+ goto out_put_out;
+ }
+
+ for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
+ IGT_TIMEOUT(end_time);
+ unsigned long pass = 0;
+
+ if (!intel_engine_can_store_dword(ce->engine))
+ continue;
+
+ while (!__igt_timeout(end_time, NULL)) {
+ struct i915_request *rq;
+ u64 offset;
+
+ offset = igt_random_offset(&prng,
+ 0, vm->total - PAGE_SIZE,
+ chunk_size, PAGE_SIZE);
+
+ err = vm->allocate_va_range(vm, offset, chunk_size);
+ if (err)
+ goto end;
+
+ memset32(result, STACK_MAGIC, PAGE_SIZE / sizeof(u32));
+
+ vma = i915_vma_instance(bbe, vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto end;
+ }
+
+ err = vma->ops->set_pages(vma);
+ if (err)
+ goto end;
+
+ /* Prime the TLB with the dummy pages */
+ for (i = 0; i < count; i++) {
+ vma->node.start = offset + i * PAGE_SIZE;
+ vm->insert_entries(vm, vma, I915_CACHE_NONE, 0);
+
+ rq = submit_batch(ce, vma->node.start);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto end;
+ }
+ i915_request_put(rq);
+ }
+
+ vma->ops->clear_pages(vma);
+
+ err = context_sync(ce);
+ if (err) {
+ pr_err("%s: dummy setup timed out\n",
+ ce->engine->name);
+ goto end;
+ }
+
+ vma = i915_vma_instance(act, vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto end;
+ }
+
+ err = vma->ops->set_pages(vma);
+ if (err)
+ goto end;
+
+ /* Replace the TLB with target batches */
+ for (i = 0; i < count; i++) {
+ struct i915_request *rq;
+ u32 *cs = batch + i * 64 / sizeof(*cs);
+ u64 addr;
+
+ vma->node.start = offset + i * PAGE_SIZE;
+ vm->insert_entries(vm, vma, I915_CACHE_NONE, 0);
+
+ addr = vma->node.start + i * 64;
+ cs[4] = MI_NOOP;
+ cs[6] = lower_32_bits(addr);
+ cs[7] = upper_32_bits(addr);
+ wmb();
+
+ rq = submit_batch(ce, addr);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto end;
+ }
+
+ /* Wait until the context chain has started */
+ if (i == 0) {
+ while (READ_ONCE(result[i]) &&
+ !i915_request_completed(rq))
+ cond_resched();
+ } else {
+ end_spin(batch, i - 1);
+ }
+
+ i915_request_put(rq);
+ }
+ end_spin(batch, count - 1);
+
+ vma->ops->clear_pages(vma);
+
+ err = context_sync(ce);
+ if (err) {
+ pr_err("%s: writes timed out\n",
+ ce->engine->name);
+ goto end;
+ }
+
+ for (i = 0; i < count; i++) {
+ if (result[i] != i) {
+ pr_err("%s: Write lost on pass %lu, at offset %llx, index %d, found %x, expected %x\n",
+ ce->engine->name, pass,
+ offset, i, result[i], i);
+ err = -EINVAL;
+ goto end;
+ }
+ }
+
+ vm->clear_range(vm, offset, chunk_size);
+ pass++;
+ }
+ }
+end:
+ if (igt_flush_test(i915))
+ err = -EIO;
+ i915_gem_context_unlock_engines(ctx);
+ i915_gem_object_unpin_map(out);
+out_put_out:
+ i915_gem_object_put(out);
+out_put_batch:
+ i915_gem_object_unpin_map(act);
+out_put_act:
+ i915_gem_object_put(act);
+out_put_bbe:
+ i915_gem_object_put(bbe);
+out_vm:
+ i915_vm_put(vm);
+out_unlock:
+ mock_file_free(i915, file);
+ return err;
+}
+
int i915_gem_gtt_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
@@ -1720,6 +2043,7 @@ int i915_gem_gtt_live_selftests(struct drm_i915_private *i915)
SUBTEST(igt_ggtt_pot),
SUBTEST(igt_ggtt_fill),
SUBTEST(igt_ggtt_page),
+ SUBTEST(igt_cs_tlb),
};
GEM_BUG_ON(offset_in_page(i915->ggtt.vm.total));
diff --git a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
index 1ccf0f731ac0..4b3cac73e291 100644
--- a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
@@ -15,6 +15,9 @@ selftest(workarounds, intel_workarounds_live_selftests)
selftest(gt_engines, intel_engine_live_selftests)
selftest(gt_timelines, intel_timeline_live_selftests)
selftest(gt_contexts, intel_context_live_selftests)
+selftest(gt_lrc, intel_lrc_live_selftests)
+selftest(gt_pm, intel_gt_pm_live_selftests)
+selftest(gt_heartbeat, intel_heartbeat_live_selftests)
selftest(requests, i915_request_live_selftests)
selftest(active, i915_active_live_selftests)
selftest(objects, i915_gem_object_live_selftests)
@@ -30,6 +33,8 @@ selftest(gem_contexts, i915_gem_context_live_selftests)
selftest(blt, i915_gem_object_blt_live_selftests)
selftest(client, i915_gem_client_blt_live_selftests)
selftest(reset, intel_reset_live_selftests)
+selftest(memory_region, intel_memory_region_live_selftests)
selftest(hangcheck, intel_hangcheck_live_selftests)
selftest(execlists, intel_execlists_live_selftests)
selftest(guc, intel_guc_live_selftest)
+selftest(perf, i915_perf_live_selftests)
diff --git a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
index b88084fe3269..aa5a0e7f5d9e 100644
--- a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
@@ -26,3 +26,4 @@ selftest(gtt, i915_gem_gtt_mock_selftests)
selftest(hugepages, i915_gem_huge_page_mock_selftests)
selftest(contexts, i915_gem_context_mock_selftests)
selftest(buddy, i915_buddy_mock_selftests)
+selftest(memory_region, intel_memory_region_mock_selftests)
diff --git a/drivers/gpu/drm/i915/selftests/i915_perf.c b/drivers/gpu/drm/i915/selftests/i915_perf.c
new file mode 100644
index 000000000000..aabd07f67e49
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/i915_perf.c
@@ -0,0 +1,217 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <linux/kref.h>
+
+#include "gem/i915_gem_pm.h"
+#include "gt/intel_gt.h"
+
+#include "i915_selftest.h"
+
+#include "igt_flush_test.h"
+#include "lib_sw_fence.h"
+
+static struct i915_perf_stream *
+test_stream(struct i915_perf *perf)
+{
+ struct drm_i915_perf_open_param param = {};
+ struct perf_open_properties props = {
+ .engine = intel_engine_lookup_user(perf->i915,
+ I915_ENGINE_CLASS_RENDER,
+ 0),
+ .sample_flags = SAMPLE_OA_REPORT,
+ .oa_format = IS_GEN(perf->i915, 12) ?
+ I915_OA_FORMAT_A32u40_A4u32_B8_C8 : I915_OA_FORMAT_C4_B8,
+ .metrics_set = 1,
+ };
+ struct i915_perf_stream *stream;
+
+ stream = kzalloc(sizeof(*stream), GFP_KERNEL);
+ if (!stream)
+ return NULL;
+
+ stream->perf = perf;
+
+ mutex_lock(&perf->lock);
+ if (i915_oa_stream_init(stream, &param, &props)) {
+ kfree(stream);
+ stream = NULL;
+ }
+ mutex_unlock(&perf->lock);
+
+ return stream;
+}
+
+static void stream_destroy(struct i915_perf_stream *stream)
+{
+ struct i915_perf *perf = stream->perf;
+
+ mutex_lock(&perf->lock);
+ i915_perf_destroy_locked(stream);
+ mutex_unlock(&perf->lock);
+}
+
+static int live_sanitycheck(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct i915_perf_stream *stream;
+
+ /* Quick check we can create a perf stream */
+
+ stream = test_stream(&i915->perf);
+ if (!stream)
+ return -EINVAL;
+
+ stream_destroy(stream);
+ return 0;
+}
+
+static int write_timestamp(struct i915_request *rq, int slot)
+{
+ u32 *cs;
+ int len;
+
+ cs = intel_ring_begin(rq, 6);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ len = 5;
+ if (INTEL_GEN(rq->i915) >= 8)
+ len++;
+
+ *cs++ = GFX_OP_PIPE_CONTROL(len);
+ *cs++ = PIPE_CONTROL_GLOBAL_GTT_IVB |
+ PIPE_CONTROL_STORE_DATA_INDEX |
+ PIPE_CONTROL_WRITE_TIMESTAMP;
+ *cs++ = slot * sizeof(u32);
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0;
+
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+static ktime_t poll_status(struct i915_request *rq, int slot)
+{
+ while (!intel_read_status_page(rq->engine, slot) &&
+ !i915_request_completed(rq))
+ cpu_relax();
+
+ return ktime_get();
+}
+
+static int live_noa_delay(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct i915_perf_stream *stream;
+ struct i915_request *rq;
+ ktime_t t0, t1;
+ u64 expected;
+ u32 delay;
+ int err;
+ int i;
+
+ /* Check that the GPU delays matches expectations */
+
+ stream = test_stream(&i915->perf);
+ if (!stream)
+ return -ENOMEM;
+
+ expected = atomic64_read(&stream->perf->noa_programming_delay);
+
+ if (stream->engine->class != RENDER_CLASS) {
+ err = -ENODEV;
+ goto out;
+ }
+
+ for (i = 0; i < 4; i++)
+ intel_write_status_page(stream->engine, 0x100 + i, 0);
+
+ rq = i915_request_create(stream->engine->kernel_context);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out;
+ }
+
+ if (rq->engine->emit_init_breadcrumb &&
+ i915_request_timeline(rq)->has_initial_breadcrumb) {
+ err = rq->engine->emit_init_breadcrumb(rq);
+ if (err) {
+ i915_request_add(rq);
+ goto out;
+ }
+ }
+
+ err = write_timestamp(rq, 0x100);
+ if (err) {
+ i915_request_add(rq);
+ goto out;
+ }
+
+ err = rq->engine->emit_bb_start(rq,
+ i915_ggtt_offset(stream->noa_wait), 0,
+ I915_DISPATCH_SECURE);
+ if (err) {
+ i915_request_add(rq);
+ goto out;
+ }
+
+ err = write_timestamp(rq, 0x102);
+ if (err) {
+ i915_request_add(rq);
+ goto out;
+ }
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ preempt_disable();
+ t0 = poll_status(rq, 0x100);
+ t1 = poll_status(rq, 0x102);
+ preempt_enable();
+
+ pr_info("CPU delay: %lluns, expected %lluns\n",
+ ktime_sub(t1, t0), expected);
+
+ delay = intel_read_status_page(stream->engine, 0x102);
+ delay -= intel_read_status_page(stream->engine, 0x100);
+ delay = div_u64(mul_u32_u32(delay, 1000 * 1000),
+ RUNTIME_INFO(i915)->cs_timestamp_frequency_khz);
+ pr_info("GPU delay: %uns, expected %lluns\n",
+ delay, expected);
+
+ if (4 * delay < 3 * expected || 2 * delay > 3 * expected) {
+ pr_err("GPU delay [%uus] outside of expected threshold! [%lluus, %lluus]\n",
+ delay / 1000,
+ div_u64(3 * expected, 4000),
+ div_u64(3 * expected, 2000));
+ err = -EINVAL;
+ }
+
+ i915_request_put(rq);
+out:
+ stream_destroy(stream);
+ return err;
+}
+
+int i915_perf_live_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(live_sanitycheck),
+ SUBTEST(live_noa_delay),
+ };
+ struct i915_perf *perf = &i915->perf;
+
+ if (!perf->metrics_kobj || !perf->ops.enable_metric_set)
+ return 0;
+
+ if (intel_gt_is_wedged(&i915->gt))
+ return 0;
+
+ return i915_subtests(tests, i915);
+}
diff --git a/drivers/gpu/drm/i915/selftests/i915_random.c b/drivers/gpu/drm/i915/selftests/i915_random.c
index 716a3f19f030..abdfadcf626b 100644
--- a/drivers/gpu/drm/i915/selftests/i915_random.c
+++ b/drivers/gpu/drm/i915/selftests/i915_random.c
@@ -29,6 +29,7 @@
#include <linux/types.h>
#include "i915_random.h"
+#include "i915_utils.h"
u64 i915_prandom_u64_state(struct rnd_state *rnd)
{
@@ -87,3 +88,22 @@ unsigned int *i915_random_order(unsigned int count, struct rnd_state *state)
i915_random_reorder(order, count, state);
return order;
}
+
+u64 igt_random_offset(struct rnd_state *state,
+ u64 start, u64 end,
+ u64 len, u64 align)
+{
+ u64 range, addr;
+
+ BUG_ON(range_overflows(start, len, end));
+ BUG_ON(round_up(start, align) > round_down(end - len, align));
+
+ range = round_down(end - len, align) - round_up(start, align);
+ if (range) {
+ addr = i915_prandom_u64_state(state);
+ div64_u64_rem(addr, range, &addr);
+ start += addr;
+ }
+
+ return round_up(start, align);
+}
diff --git a/drivers/gpu/drm/i915/selftests/i915_random.h b/drivers/gpu/drm/i915/selftests/i915_random.h
index 8e1ff9c105b6..35cc69a3a1b9 100644
--- a/drivers/gpu/drm/i915/selftests/i915_random.h
+++ b/drivers/gpu/drm/i915/selftests/i915_random.h
@@ -57,4 +57,8 @@ void i915_random_reorder(unsigned int *order,
void i915_prandom_shuffle(void *arr, size_t elsz, size_t count,
struct rnd_state *state);
+u64 igt_random_offset(struct rnd_state *state,
+ u64 start, u64 end,
+ u64 len, u64 align);
+
#endif /* !__I915_SELFTESTS_RANDOM_H__ */
diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c
index b3688543ed7d..8618a4dc0701 100644
--- a/drivers/gpu/drm/i915/selftests/i915_request.c
+++ b/drivers/gpu/drm/i915/selftests/i915_request.c
@@ -37,25 +37,32 @@
#include "mock_drm.h"
#include "mock_gem_device.h"
+static unsigned int num_uabi_engines(struct drm_i915_private *i915)
+{
+ struct intel_engine_cs *engine;
+ unsigned int count;
+
+ count = 0;
+ for_each_uabi_engine(engine, i915)
+ count++;
+
+ return count;
+}
+
static int igt_add_request(void *arg)
{
struct drm_i915_private *i915 = arg;
struct i915_request *request;
- int err = -ENOMEM;
/* Basic preliminary test to create a request and let it loose! */
- mutex_lock(&i915->drm.struct_mutex);
request = mock_request(i915->engine[RCS0]->kernel_context, HZ / 10);
if (!request)
- goto out_unlock;
+ return -ENOMEM;
i915_request_add(request);
- err = 0;
-out_unlock:
- mutex_unlock(&i915->drm.struct_mutex);
- return err;
+ return 0;
}
static int igt_wait_request(void *arg)
@@ -67,12 +74,10 @@ static int igt_wait_request(void *arg)
/* Submit a request, then wait upon it */
- mutex_lock(&i915->drm.struct_mutex);
request = mock_request(i915->engine[RCS0]->kernel_context, T);
- if (!request) {
- err = -ENOMEM;
- goto out_unlock;
- }
+ if (!request)
+ return -ENOMEM;
+
i915_request_get(request);
if (i915_request_wait(request, 0, 0) != -ETIME) {
@@ -125,9 +130,7 @@ static int igt_wait_request(void *arg)
err = 0;
out_request:
i915_request_put(request);
-out_unlock:
mock_device_flush(i915);
- mutex_unlock(&i915->drm.struct_mutex);
return err;
}
@@ -140,52 +143,45 @@ static int igt_fence_wait(void *arg)
/* Submit a request, treat it as a fence and wait upon it */
- mutex_lock(&i915->drm.struct_mutex);
request = mock_request(i915->engine[RCS0]->kernel_context, T);
- if (!request) {
- err = -ENOMEM;
- goto out_locked;
- }
+ if (!request)
+ return -ENOMEM;
if (dma_fence_wait_timeout(&request->fence, false, T) != -ETIME) {
pr_err("fence wait success before submit (expected timeout)!\n");
- goto out_locked;
+ goto out;
}
i915_request_add(request);
- mutex_unlock(&i915->drm.struct_mutex);
if (dma_fence_is_signaled(&request->fence)) {
pr_err("fence signaled immediately!\n");
- goto out_device;
+ goto out;
}
if (dma_fence_wait_timeout(&request->fence, false, T / 2) != -ETIME) {
pr_err("fence wait success after submit (expected timeout)!\n");
- goto out_device;
+ goto out;
}
if (dma_fence_wait_timeout(&request->fence, false, T) <= 0) {
pr_err("fence wait timed out (expected success)!\n");
- goto out_device;
+ goto out;
}
if (!dma_fence_is_signaled(&request->fence)) {
pr_err("fence unsignaled after waiting!\n");
- goto out_device;
+ goto out;
}
if (dma_fence_wait_timeout(&request->fence, false, T) <= 0) {
pr_err("fence wait timed out when complete (expected success)!\n");
- goto out_device;
+ goto out;
}
err = 0;
-out_device:
- mutex_lock(&i915->drm.struct_mutex);
-out_locked:
+out:
mock_device_flush(i915);
- mutex_unlock(&i915->drm.struct_mutex);
return err;
}
@@ -197,8 +193,8 @@ static int igt_request_rewind(void *arg)
struct intel_context *ce;
int err = -EINVAL;
- mutex_lock(&i915->drm.struct_mutex);
ctx[0] = mock_context(i915, "A");
+
ce = i915_gem_context_get_engine(ctx[0], RCS0);
GEM_BUG_ON(IS_ERR(ce));
request = mock_request(ce, 2 * HZ);
@@ -212,6 +208,7 @@ static int igt_request_rewind(void *arg)
i915_request_add(request);
ctx[1] = mock_context(i915, "B");
+
ce = i915_gem_context_get_engine(ctx[1], RCS0);
GEM_BUG_ON(IS_ERR(ce));
vip = mock_request(ce, 0);
@@ -233,7 +230,6 @@ static int igt_request_rewind(void *arg)
request->engine->submit_request(request);
rcu_read_unlock();
- mutex_unlock(&i915->drm.struct_mutex);
if (i915_request_wait(vip, 0, HZ) == -ETIME) {
pr_err("timed out waiting for high priority request\n");
@@ -248,14 +244,12 @@ static int igt_request_rewind(void *arg)
err = 0;
err:
i915_request_put(vip);
- mutex_lock(&i915->drm.struct_mutex);
err_context_1:
mock_context_close(ctx[1]);
i915_request_put(request);
err_context_0:
mock_context_close(ctx[0]);
mock_device_flush(i915);
- mutex_unlock(&i915->drm.struct_mutex);
return err;
}
@@ -282,7 +276,6 @@ __live_request_alloc(struct intel_context *ce)
static int __igt_breadcrumbs_smoketest(void *arg)
{
struct smoketest *t = arg;
- struct mutex * const BKL = &t->engine->i915->drm.struct_mutex;
const unsigned int max_batch = min(t->ncontexts, t->max_batch) - 1;
const unsigned int total = 4 * t->ncontexts + 1;
unsigned int num_waits = 0, num_fences = 0;
@@ -300,7 +293,7 @@ static int __igt_breadcrumbs_smoketest(void *arg)
* that the fences were marked as signaled.
*/
- requests = kmalloc_array(total, sizeof(*requests), GFP_KERNEL);
+ requests = kcalloc(total, sizeof(*requests), GFP_KERNEL);
if (!requests)
return -ENOMEM;
@@ -337,14 +330,11 @@ static int __igt_breadcrumbs_smoketest(void *arg)
struct i915_request *rq;
struct intel_context *ce;
- mutex_lock(BKL);
-
ce = i915_gem_context_get_engine(ctx, t->engine->legacy_idx);
GEM_BUG_ON(IS_ERR(ce));
rq = t->request_alloc(ce);
intel_context_put(ce);
if (IS_ERR(rq)) {
- mutex_unlock(BKL);
err = PTR_ERR(rq);
count = n;
break;
@@ -357,8 +347,6 @@ static int __igt_breadcrumbs_smoketest(void *arg)
requests[n] = i915_request_get(rq);
i915_request_add(rq);
- mutex_unlock(BKL);
-
if (err >= 0)
err = i915_sw_fence_await_dma_fence(wait,
&rq->fence,
@@ -446,18 +434,16 @@ static int mock_breadcrumbs_smoketest(void *arg)
* See __igt_breadcrumbs_smoketest();
*/
- threads = kmalloc_array(ncpus, sizeof(*threads), GFP_KERNEL);
+ threads = kcalloc(ncpus, sizeof(*threads), GFP_KERNEL);
if (!threads)
return -ENOMEM;
- t.contexts =
- kmalloc_array(t.ncontexts, sizeof(*t.contexts), GFP_KERNEL);
+ t.contexts = kcalloc(t.ncontexts, sizeof(*t.contexts), GFP_KERNEL);
if (!t.contexts) {
ret = -ENOMEM;
goto out_threads;
}
- mutex_lock(&t.engine->i915->drm.struct_mutex);
for (n = 0; n < t.ncontexts; n++) {
t.contexts[n] = mock_context(t.engine->i915, "mock");
if (!t.contexts[n]) {
@@ -465,7 +451,6 @@ static int mock_breadcrumbs_smoketest(void *arg)
goto out_contexts;
}
}
- mutex_unlock(&t.engine->i915->drm.struct_mutex);
for (n = 0; n < ncpus; n++) {
threads[n] = kthread_run(__igt_breadcrumbs_smoketest,
@@ -479,6 +464,7 @@ static int mock_breadcrumbs_smoketest(void *arg)
get_task_struct(threads[n]);
}
+ yield(); /* start all threads before we begin */
msleep(jiffies_to_msecs(i915_selftest.timeout_jiffies));
for (n = 0; n < ncpus; n++) {
@@ -495,18 +481,15 @@ static int mock_breadcrumbs_smoketest(void *arg)
atomic_long_read(&t.num_fences),
ncpus);
- mutex_lock(&t.engine->i915->drm.struct_mutex);
out_contexts:
for (n = 0; n < t.ncontexts; n++) {
if (!t.contexts[n])
break;
mock_context_close(t.contexts[n]);
}
- mutex_unlock(&t.engine->i915->drm.struct_mutex);
kfree(t.contexts);
out_threads:
kfree(threads);
-
return ret;
}
@@ -539,40 +522,37 @@ static int live_nop_request(void *arg)
{
struct drm_i915_private *i915 = arg;
struct intel_engine_cs *engine;
- intel_wakeref_t wakeref;
struct igt_live_test t;
- unsigned int id;
int err = -ENODEV;
- /* Submit various sized batches of empty requests, to each engine
+ /*
+ * Submit various sized batches of empty requests, to each engine
* (individually), and wait for the batch to complete. We can check
* the overhead of submitting requests to the hardware.
*/
- mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
-
- for_each_engine(engine, i915, id) {
- struct i915_request *request = NULL;
+ for_each_uabi_engine(engine, i915) {
unsigned long n, prime;
IGT_TIMEOUT(end_time);
ktime_t times[2] = {};
err = igt_live_test_begin(&t, i915, __func__, engine->name);
if (err)
- goto out_unlock;
+ return err;
for_each_prime_number_from(prime, 1, 8192) {
+ struct i915_request *request = NULL;
+
times[1] = ktime_get_raw();
for (n = 0; n < prime; n++) {
+ i915_request_put(request);
request = i915_request_create(engine->kernel_context);
- if (IS_ERR(request)) {
- err = PTR_ERR(request);
- goto out_unlock;
- }
+ if (IS_ERR(request))
+ return PTR_ERR(request);
- /* This space is left intentionally blank.
+ /*
+ * This space is left intentionally blank.
*
* We do not actually want to perform any
* action with this request, we just want
@@ -585,9 +565,11 @@ static int live_nop_request(void *arg)
* for latency.
*/
+ i915_request_get(request);
i915_request_add(request);
}
i915_request_wait(request, 0, MAX_SCHEDULE_TIMEOUT);
+ i915_request_put(request);
times[1] = ktime_sub(ktime_get_raw(), times[1]);
if (prime == 1)
@@ -599,7 +581,7 @@ static int live_nop_request(void *arg)
err = igt_live_test_end(&t);
if (err)
- goto out_unlock;
+ return err;
pr_info("Request latencies on %s: 1 = %lluns, %lu = %lluns\n",
engine->name,
@@ -607,9 +589,6 @@ static int live_nop_request(void *arg)
prime, div64_u64(ktime_to_ns(times[1]), prime));
}
-out_unlock:
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
return err;
}
@@ -647,8 +626,15 @@ static struct i915_vma *empty_batch(struct drm_i915_private *i915)
if (err)
goto err;
+ /* Force the wait wait now to avoid including it in the benchmark */
+ err = i915_vma_sync(vma);
+ if (err)
+ goto err_pin;
+
return vma;
+err_pin:
+ i915_vma_unpin(vma);
err:
i915_gem_object_put(obj);
return ERR_PTR(err);
@@ -672,6 +658,7 @@ empty_request(struct intel_engine_cs *engine,
if (err)
goto out_request;
+ i915_request_get(request);
out_request:
i915_request_add(request);
return err ? ERR_PTR(err) : request;
@@ -681,27 +668,21 @@ static int live_empty_request(void *arg)
{
struct drm_i915_private *i915 = arg;
struct intel_engine_cs *engine;
- intel_wakeref_t wakeref;
struct igt_live_test t;
struct i915_vma *batch;
- unsigned int id;
int err = 0;
- /* Submit various sized batches of empty requests, to each engine
+ /*
+ * Submit various sized batches of empty requests, to each engine
* (individually), and wait for the batch to complete. We can check
* the overhead of submitting requests to the hardware.
*/
- mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
-
batch = empty_batch(i915);
- if (IS_ERR(batch)) {
- err = PTR_ERR(batch);
- goto out_unlock;
- }
+ if (IS_ERR(batch))
+ return PTR_ERR(batch);
- for_each_engine(engine, i915, id) {
+ for_each_uabi_engine(engine, i915) {
IGT_TIMEOUT(end_time);
struct i915_request *request;
unsigned long n, prime;
@@ -723,6 +704,7 @@ static int live_empty_request(void *arg)
times[1] = ktime_get_raw();
for (n = 0; n < prime; n++) {
+ i915_request_put(request);
request = empty_request(engine, batch);
if (IS_ERR(request)) {
err = PTR_ERR(request);
@@ -738,6 +720,7 @@ static int live_empty_request(void *arg)
if (__igt_timeout(end_time, NULL))
break;
}
+ i915_request_put(request);
err = igt_live_test_end(&t);
if (err)
@@ -752,18 +735,15 @@ static int live_empty_request(void *arg)
out_batch:
i915_vma_unpin(batch);
i915_vma_put(batch);
-out_unlock:
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
return err;
}
static struct i915_vma *recursive_batch(struct drm_i915_private *i915)
{
struct i915_gem_context *ctx = i915->kernel_context;
- struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm;
struct drm_i915_gem_object *obj;
const int gen = INTEL_GEN(i915);
+ struct i915_address_space *vm;
struct i915_vma *vma;
u32 *cmd;
int err;
@@ -772,7 +752,9 @@ static struct i915_vma *recursive_batch(struct drm_i915_private *i915)
if (IS_ERR(obj))
return ERR_CAST(obj);
+ vm = i915_gem_context_get_vm_rcu(ctx);
vma = i915_vma_instance(obj, vm, NULL);
+ i915_vm_put(vm);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto err;
@@ -832,67 +814,73 @@ static int recursive_batch_resolve(struct i915_vma *batch)
static int live_all_engines(void *arg)
{
struct drm_i915_private *i915 = arg;
+ const unsigned int nengines = num_uabi_engines(i915);
struct intel_engine_cs *engine;
- struct i915_request *request[I915_NUM_ENGINES];
- intel_wakeref_t wakeref;
+ struct i915_request **request;
struct igt_live_test t;
struct i915_vma *batch;
- unsigned int id;
+ unsigned int idx;
int err;
- /* Check we can submit requests to all engines simultaneously. We
+ /*
+ * Check we can submit requests to all engines simultaneously. We
* send a recursive batch to each engine - checking that we don't
* block doing so, and that they don't complete too soon.
*/
- mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+ request = kcalloc(nengines, sizeof(*request), GFP_KERNEL);
+ if (!request)
+ return -ENOMEM;
err = igt_live_test_begin(&t, i915, __func__, "");
if (err)
- goto out_unlock;
+ goto out_free;
batch = recursive_batch(i915);
if (IS_ERR(batch)) {
err = PTR_ERR(batch);
pr_err("%s: Unable to create batch, err=%d\n", __func__, err);
- goto out_unlock;
+ goto out_free;
}
- for_each_engine(engine, i915, id) {
- request[id] = i915_request_create(engine->kernel_context);
- if (IS_ERR(request[id])) {
- err = PTR_ERR(request[id]);
+ idx = 0;
+ for_each_uabi_engine(engine, i915) {
+ request[idx] = i915_request_create(engine->kernel_context);
+ if (IS_ERR(request[idx])) {
+ err = PTR_ERR(request[idx]);
pr_err("%s: Request allocation failed with err=%d\n",
__func__, err);
goto out_request;
}
- err = engine->emit_bb_start(request[id],
+ err = engine->emit_bb_start(request[idx],
batch->node.start,
batch->node.size,
0);
GEM_BUG_ON(err);
- request[id]->batch = batch;
+ request[idx]->batch = batch;
i915_vma_lock(batch);
- err = i915_request_await_object(request[id], batch->obj, 0);
+ err = i915_request_await_object(request[idx], batch->obj, 0);
if (err == 0)
- err = i915_vma_move_to_active(batch, request[id], 0);
+ err = i915_vma_move_to_active(batch, request[idx], 0);
i915_vma_unlock(batch);
GEM_BUG_ON(err);
- i915_request_get(request[id]);
- i915_request_add(request[id]);
+ i915_request_get(request[idx]);
+ i915_request_add(request[idx]);
+ idx++;
}
- for_each_engine(engine, i915, id) {
- if (i915_request_completed(request[id])) {
+ idx = 0;
+ for_each_uabi_engine(engine, i915) {
+ if (i915_request_completed(request[idx])) {
pr_err("%s(%s): request completed too early!\n",
__func__, engine->name);
err = -EINVAL;
goto out_request;
}
+ idx++;
}
err = recursive_batch_resolve(batch);
@@ -901,10 +889,11 @@ static int live_all_engines(void *arg)
goto out_request;
}
- for_each_engine(engine, i915, id) {
+ idx = 0;
+ for_each_uabi_engine(engine, i915) {
long timeout;
- timeout = i915_request_wait(request[id], 0,
+ timeout = i915_request_wait(request[idx], 0,
MAX_SCHEDULE_TIMEOUT);
if (timeout < 0) {
err = timeout;
@@ -913,50 +902,56 @@ static int live_all_engines(void *arg)
goto out_request;
}
- GEM_BUG_ON(!i915_request_completed(request[id]));
- i915_request_put(request[id]);
- request[id] = NULL;
+ GEM_BUG_ON(!i915_request_completed(request[idx]));
+ i915_request_put(request[idx]);
+ request[idx] = NULL;
+ idx++;
}
err = igt_live_test_end(&t);
out_request:
- for_each_engine(engine, i915, id)
- if (request[id])
- i915_request_put(request[id]);
+ idx = 0;
+ for_each_uabi_engine(engine, i915) {
+ if (request[idx])
+ i915_request_put(request[idx]);
+ idx++;
+ }
i915_vma_unpin(batch);
i915_vma_put(batch);
-out_unlock:
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
+out_free:
+ kfree(request);
return err;
}
static int live_sequential_engines(void *arg)
{
struct drm_i915_private *i915 = arg;
- struct i915_request *request[I915_NUM_ENGINES] = {};
+ const unsigned int nengines = num_uabi_engines(i915);
+ struct i915_request **request;
struct i915_request *prev = NULL;
struct intel_engine_cs *engine;
- intel_wakeref_t wakeref;
struct igt_live_test t;
- unsigned int id;
+ unsigned int idx;
int err;
- /* Check we can submit requests to all engines sequentially, such
+ /*
+ * Check we can submit requests to all engines sequentially, such
* that each successive request waits for the earlier ones. This
* tests that we don't execute requests out of order, even though
* they are running on independent engines.
*/
- mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+ request = kcalloc(nengines, sizeof(*request), GFP_KERNEL);
+ if (!request)
+ return -ENOMEM;
err = igt_live_test_begin(&t, i915, __func__, "");
if (err)
- goto out_unlock;
+ goto out_free;
- for_each_engine(engine, i915, id) {
+ idx = 0;
+ for_each_uabi_engine(engine, i915) {
struct i915_vma *batch;
batch = recursive_batch(i915);
@@ -964,66 +959,69 @@ static int live_sequential_engines(void *arg)
err = PTR_ERR(batch);
pr_err("%s: Unable to create batch for %s, err=%d\n",
__func__, engine->name, err);
- goto out_unlock;
+ goto out_free;
}
- request[id] = i915_request_create(engine->kernel_context);
- if (IS_ERR(request[id])) {
- err = PTR_ERR(request[id]);
+ request[idx] = i915_request_create(engine->kernel_context);
+ if (IS_ERR(request[idx])) {
+ err = PTR_ERR(request[idx]);
pr_err("%s: Request allocation failed for %s with err=%d\n",
__func__, engine->name, err);
goto out_request;
}
if (prev) {
- err = i915_request_await_dma_fence(request[id],
+ err = i915_request_await_dma_fence(request[idx],
&prev->fence);
if (err) {
- i915_request_add(request[id]);
+ i915_request_add(request[idx]);
pr_err("%s: Request await failed for %s with err=%d\n",
__func__, engine->name, err);
goto out_request;
}
}
- err = engine->emit_bb_start(request[id],
+ err = engine->emit_bb_start(request[idx],
batch->node.start,
batch->node.size,
0);
GEM_BUG_ON(err);
- request[id]->batch = batch;
+ request[idx]->batch = batch;
i915_vma_lock(batch);
- err = i915_request_await_object(request[id], batch->obj, false);
+ err = i915_request_await_object(request[idx],
+ batch->obj, false);
if (err == 0)
- err = i915_vma_move_to_active(batch, request[id], 0);
+ err = i915_vma_move_to_active(batch, request[idx], 0);
i915_vma_unlock(batch);
GEM_BUG_ON(err);
- i915_request_get(request[id]);
- i915_request_add(request[id]);
+ i915_request_get(request[idx]);
+ i915_request_add(request[idx]);
- prev = request[id];
+ prev = request[idx];
+ idx++;
}
- for_each_engine(engine, i915, id) {
+ idx = 0;
+ for_each_uabi_engine(engine, i915) {
long timeout;
- if (i915_request_completed(request[id])) {
+ if (i915_request_completed(request[idx])) {
pr_err("%s(%s): request completed too early!\n",
__func__, engine->name);
err = -EINVAL;
goto out_request;
}
- err = recursive_batch_resolve(request[id]->batch);
+ err = recursive_batch_resolve(request[idx]->batch);
if (err) {
pr_err("%s: failed to resolve batch, err=%d\n",
__func__, err);
goto out_request;
}
- timeout = i915_request_wait(request[id], 0,
+ timeout = i915_request_wait(request[idx], 0,
MAX_SCHEDULE_TIMEOUT);
if (timeout < 0) {
err = timeout;
@@ -1032,33 +1030,156 @@ static int live_sequential_engines(void *arg)
goto out_request;
}
- GEM_BUG_ON(!i915_request_completed(request[id]));
+ GEM_BUG_ON(!i915_request_completed(request[idx]));
+ idx++;
}
err = igt_live_test_end(&t);
out_request:
- for_each_engine(engine, i915, id) {
+ idx = 0;
+ for_each_uabi_engine(engine, i915) {
u32 *cmd;
- if (!request[id])
+ if (!request[idx])
break;
- cmd = i915_gem_object_pin_map(request[id]->batch->obj,
+ cmd = i915_gem_object_pin_map(request[idx]->batch->obj,
I915_MAP_WC);
if (!IS_ERR(cmd)) {
*cmd = MI_BATCH_BUFFER_END;
intel_gt_chipset_flush(engine->gt);
- i915_gem_object_unpin_map(request[id]->batch->obj);
+ i915_gem_object_unpin_map(request[idx]->batch->obj);
}
- i915_vma_put(request[id]->batch);
- i915_request_put(request[id]);
+ i915_vma_put(request[idx]->batch);
+ i915_request_put(request[idx]);
+ idx++;
}
-out_unlock:
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
+out_free:
+ kfree(request);
+ return err;
+}
+
+static int __live_parallel_engine1(void *arg)
+{
+ struct intel_engine_cs *engine = arg;
+ IGT_TIMEOUT(end_time);
+ unsigned long count;
+
+ count = 0;
+ do {
+ struct i915_request *rq;
+ int err;
+
+ rq = i915_request_create(engine->kernel_context);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ err = 0;
+ if (i915_request_wait(rq, 0, HZ / 5) < 0)
+ err = -ETIME;
+ i915_request_put(rq);
+ if (err)
+ return err;
+
+ count++;
+ } while (!__igt_timeout(end_time, NULL));
+
+ pr_info("%s: %lu request + sync\n", engine->name, count);
+ return 0;
+}
+
+static int __live_parallel_engineN(void *arg)
+{
+ struct intel_engine_cs *engine = arg;
+ IGT_TIMEOUT(end_time);
+ unsigned long count;
+
+ count = 0;
+ do {
+ struct i915_request *rq;
+
+ rq = i915_request_create(engine->kernel_context);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ i915_request_add(rq);
+ count++;
+ } while (!__igt_timeout(end_time, NULL));
+
+ pr_info("%s: %lu requests\n", engine->name, count);
+ return 0;
+}
+
+static int live_parallel_engines(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ static int (* const func[])(void *arg) = {
+ __live_parallel_engine1,
+ __live_parallel_engineN,
+ NULL,
+ };
+ const unsigned int nengines = num_uabi_engines(i915);
+ struct intel_engine_cs *engine;
+ int (* const *fn)(void *arg);
+ struct task_struct **tsk;
+ int err = 0;
+
+ /*
+ * Check we can submit requests to all engines concurrently. This
+ * tests that we load up the system maximally.
+ */
+
+ tsk = kcalloc(nengines, sizeof(*tsk), GFP_KERNEL);
+ if (!tsk)
+ return -ENOMEM;
+
+ for (fn = func; !err && *fn; fn++) {
+ struct igt_live_test t;
+ unsigned int idx;
+
+ err = igt_live_test_begin(&t, i915, __func__, "");
+ if (err)
+ break;
+
+ idx = 0;
+ for_each_uabi_engine(engine, i915) {
+ tsk[idx] = kthread_run(*fn, engine,
+ "igt/parallel:%s",
+ engine->name);
+ if (IS_ERR(tsk[idx])) {
+ err = PTR_ERR(tsk[idx]);
+ break;
+ }
+ get_task_struct(tsk[idx++]);
+ }
+
+ yield(); /* start all threads before we kthread_stop() */
+
+ idx = 0;
+ for_each_uabi_engine(engine, i915) {
+ int status;
+
+ if (IS_ERR(tsk[idx]))
+ break;
+
+ status = kthread_stop(tsk[idx]);
+ if (status && !err)
+ err = status;
+
+ put_task_struct(tsk[idx++]);
+ }
+
+ if (igt_live_test_end(&t))
+ err = -EIO;
+ }
+
+ kfree(tsk);
return err;
}
@@ -1102,16 +1223,16 @@ max_batches(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
static int live_breadcrumbs_smoketest(void *arg)
{
struct drm_i915_private *i915 = arg;
- struct smoketest t[I915_NUM_ENGINES];
- unsigned int ncpus = num_online_cpus();
+ const unsigned int nengines = num_uabi_engines(i915);
+ const unsigned int ncpus = num_online_cpus();
unsigned long num_waits, num_fences;
struct intel_engine_cs *engine;
struct task_struct **threads;
struct igt_live_test live;
- enum intel_engine_id id;
intel_wakeref_t wakeref;
struct drm_file *file;
- unsigned int n;
+ struct smoketest *smoke;
+ unsigned int n, idx;
int ret = 0;
/*
@@ -1130,29 +1251,31 @@ static int live_breadcrumbs_smoketest(void *arg)
goto out_rpm;
}
- threads = kcalloc(ncpus * I915_NUM_ENGINES,
- sizeof(*threads),
- GFP_KERNEL);
- if (!threads) {
+ smoke = kcalloc(nengines, sizeof(*smoke), GFP_KERNEL);
+ if (!smoke) {
ret = -ENOMEM;
goto out_file;
}
- memset(&t[0], 0, sizeof(t[0]));
- t[0].request_alloc = __live_request_alloc;
- t[0].ncontexts = 64;
- t[0].contexts = kmalloc_array(t[0].ncontexts,
- sizeof(*t[0].contexts),
- GFP_KERNEL);
- if (!t[0].contexts) {
+ threads = kcalloc(ncpus * nengines, sizeof(*threads), GFP_KERNEL);
+ if (!threads) {
+ ret = -ENOMEM;
+ goto out_smoke;
+ }
+
+ smoke[0].request_alloc = __live_request_alloc;
+ smoke[0].ncontexts = 64;
+ smoke[0].contexts = kcalloc(smoke[0].ncontexts,
+ sizeof(*smoke[0].contexts),
+ GFP_KERNEL);
+ if (!smoke[0].contexts) {
ret = -ENOMEM;
goto out_threads;
}
- mutex_lock(&i915->drm.struct_mutex);
- for (n = 0; n < t[0].ncontexts; n++) {
- t[0].contexts[n] = live_context(i915, file);
- if (!t[0].contexts[n]) {
+ for (n = 0; n < smoke[0].ncontexts; n++) {
+ smoke[0].contexts[n] = live_context(i915, file);
+ if (!smoke[0].contexts[n]) {
ret = -ENOMEM;
goto out_contexts;
}
@@ -1162,45 +1285,48 @@ static int live_breadcrumbs_smoketest(void *arg)
if (ret)
goto out_contexts;
- for_each_engine(engine, i915, id) {
- t[id] = t[0];
- t[id].engine = engine;
- t[id].max_batch = max_batches(t[0].contexts[0], engine);
- if (t[id].max_batch < 0) {
- ret = t[id].max_batch;
- mutex_unlock(&i915->drm.struct_mutex);
+ idx = 0;
+ for_each_uabi_engine(engine, i915) {
+ smoke[idx] = smoke[0];
+ smoke[idx].engine = engine;
+ smoke[idx].max_batch =
+ max_batches(smoke[0].contexts[0], engine);
+ if (smoke[idx].max_batch < 0) {
+ ret = smoke[idx].max_batch;
goto out_flush;
}
/* One ring interleaved between requests from all cpus */
- t[id].max_batch /= num_online_cpus() + 1;
+ smoke[idx].max_batch /= num_online_cpus() + 1;
pr_debug("Limiting batches to %d requests on %s\n",
- t[id].max_batch, engine->name);
+ smoke[idx].max_batch, engine->name);
for (n = 0; n < ncpus; n++) {
struct task_struct *tsk;
tsk = kthread_run(__igt_breadcrumbs_smoketest,
- &t[id], "igt/%d.%d", id, n);
+ &smoke[idx], "igt/%d.%d", idx, n);
if (IS_ERR(tsk)) {
ret = PTR_ERR(tsk);
- mutex_unlock(&i915->drm.struct_mutex);
goto out_flush;
}
get_task_struct(tsk);
- threads[id * ncpus + n] = tsk;
+ threads[idx * ncpus + n] = tsk;
}
+
+ idx++;
}
- mutex_unlock(&i915->drm.struct_mutex);
+ yield(); /* start all threads before we begin */
msleep(jiffies_to_msecs(i915_selftest.timeout_jiffies));
out_flush:
+ idx = 0;
num_waits = 0;
num_fences = 0;
- for_each_engine(engine, i915, id) {
+ for_each_uabi_engine(engine, i915) {
for (n = 0; n < ncpus; n++) {
- struct task_struct *tsk = threads[id * ncpus + n];
+ struct task_struct *tsk = threads[idx * ncpus + n];
int err;
if (!tsk)
@@ -1213,19 +1339,20 @@ out_flush:
put_task_struct(tsk);
}
- num_waits += atomic_long_read(&t[id].num_waits);
- num_fences += atomic_long_read(&t[id].num_fences);
+ num_waits += atomic_long_read(&smoke[idx].num_waits);
+ num_fences += atomic_long_read(&smoke[idx].num_fences);
+ idx++;
}
pr_info("Completed %lu waits for %lu fences across %d engines and %d cpus\n",
num_waits, num_fences, RUNTIME_INFO(i915)->num_engines, ncpus);
- mutex_lock(&i915->drm.struct_mutex);
ret = igt_live_test_end(&live) ?: ret;
out_contexts:
- mutex_unlock(&i915->drm.struct_mutex);
- kfree(t[0].contexts);
+ kfree(smoke[0].contexts);
out_threads:
kfree(threads);
+out_smoke:
+ kfree(smoke);
out_file:
mock_file_free(i915, file);
out_rpm:
@@ -1240,6 +1367,7 @@ int i915_request_live_selftests(struct drm_i915_private *i915)
SUBTEST(live_nop_request),
SUBTEST(live_all_engines),
SUBTEST(live_sequential_engines),
+ SUBTEST(live_parallel_engines),
SUBTEST(live_empty_request),
SUBTEST(live_breadcrumbs_smoketest),
};
diff --git a/drivers/gpu/drm/i915/selftests/i915_selftest.c b/drivers/gpu/drm/i915/selftests/i915_selftest.c
index 438ea0eaa416..a6cca4ad96f6 100644
--- a/drivers/gpu/drm/i915/selftests/i915_selftest.c
+++ b/drivers/gpu/drm/i915/selftests/i915_selftest.c
@@ -23,13 +23,14 @@
#include <linux/random.h>
-#include "../i915_drv.h"
-#include "../i915_selftest.h"
+#include "gt/intel_gt_pm.h"
+#include "i915_drv.h"
+#include "i915_selftest.h"
#include "igt_flush_test.h"
struct i915_selftest i915_selftest __read_mostly = {
- .timeout_ms = 1000,
+ .timeout_ms = 500,
};
int i915_mock_sanitycheck(void)
@@ -256,6 +257,10 @@ int __i915_live_setup(void *data)
{
struct drm_i915_private *i915 = data;
+ /* The selftests expect an idle system */
+ if (intel_gt_pm_wait_for_idle(&i915->gt))
+ return -EIO;
+
return intel_gt_terminally_wedged(&i915->gt);
}
@@ -263,10 +268,8 @@ int __i915_live_teardown(int err, void *data)
{
struct drm_i915_private *i915 = data;
- mutex_lock(&i915->drm.struct_mutex);
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ if (igt_flush_test(i915))
err = -EIO;
- mutex_unlock(&i915->drm.struct_mutex);
i915_gem_drain_freed_objects(i915);
@@ -277,6 +280,10 @@ int __intel_gt_live_setup(void *data)
{
struct intel_gt *gt = data;
+ /* The selftests expect an idle system */
+ if (intel_gt_pm_wait_for_idle(gt))
+ return -EIO;
+
return intel_gt_terminally_wedged(gt);
}
@@ -284,10 +291,8 @@ int __intel_gt_live_teardown(int err, void *data)
{
struct intel_gt *gt = data;
- mutex_lock(&gt->i915->drm.struct_mutex);
- if (igt_flush_test(gt->i915, I915_WAIT_LOCKED))
+ if (igt_flush_test(gt->i915))
err = -EIO;
- mutex_unlock(&gt->i915->drm.struct_mutex);
i915_gem_drain_freed_objects(gt->i915);
diff --git a/drivers/gpu/drm/i915/selftests/i915_vma.c b/drivers/gpu/drm/i915/selftests/i915_vma.c
index a5bec0a4cdcc..58b5f40a07dd 100644
--- a/drivers/gpu/drm/i915/selftests/i915_vma.c
+++ b/drivers/gpu/drm/i915/selftests/i915_vma.c
@@ -24,6 +24,7 @@
#include <linux/prime_numbers.h>
+#include "gem/i915_gem_context.h"
#include "gem/selftests/mock_context.h"
#include "i915_scatterlist.h"
@@ -38,7 +39,7 @@ static bool assert_vma(struct i915_vma *vma,
{
bool ok = true;
- if (vma->vm != ctx->vm) {
+ if (vma->vm != rcu_access_pointer(ctx->vm)) {
pr_err("VMA created with wrong VM\n");
ok = false;
}
@@ -113,11 +114,13 @@ static int create_vmas(struct drm_i915_private *i915,
list_for_each_entry(obj, objects, st_link) {
for (pinned = 0; pinned <= 1; pinned++) {
list_for_each_entry(ctx, contexts, link) {
- struct i915_address_space *vm = ctx->vm;
+ struct i915_address_space *vm;
struct i915_vma *vma;
int err;
+ vm = i915_gem_context_get_vm_rcu(ctx);
vma = checked_vma_instance(obj, vm, NULL);
+ i915_vm_put(vm);
if (IS_ERR(vma))
return PTR_ERR(vma);
@@ -170,7 +173,7 @@ static int igt_vma_create(void *arg)
}
nc = 0;
- for_each_prime_number(num_ctx, MAX_CONTEXT_HW_ID) {
+ for_each_prime_number(num_ctx, 2 * NUM_CONTEXT_TAG) {
for (; nc < num_ctx; nc++) {
ctx = mock_context(i915, "mock");
if (!ctx)
@@ -623,7 +626,7 @@ static bool assert_partial(struct drm_i915_gem_object *obj,
struct sgt_iter sgt;
dma_addr_t dma;
- for_each_sgt_dma(dma, sgt, vma->pages) {
+ for_each_sgt_daddr(dma, sgt, vma->pages) {
dma_addr_t src;
if (!size) {
@@ -831,13 +834,10 @@ int i915_vma_mock_selftests(void)
}
mock_init_ggtt(i915, ggtt);
- mutex_lock(&i915->drm.struct_mutex);
err = i915_subtests(tests, ggtt);
- mock_device_flush(i915);
- mutex_unlock(&i915->drm.struct_mutex);
+ mock_device_flush(i915);
i915_gem_drain_freed_objects(i915);
-
mock_fini_ggtt(ggtt);
kfree(ggtt);
out_put:
@@ -879,8 +879,6 @@ static int igt_vma_remapped_gtt(void *arg)
if (IS_ERR(obj))
return PTR_ERR(obj);
- mutex_lock(&i915->drm.struct_mutex);
-
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
for (t = types; *t; t++) {
@@ -976,7 +974,6 @@ static int igt_vma_remapped_gtt(void *arg)
out:
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
i915_gem_object_put(obj);
return err;
diff --git a/drivers/gpu/drm/i915/selftests/igt_flush_test.c b/drivers/gpu/drm/i915/selftests/igt_flush_test.c
index d3b5eb402d33..7b0939e3f007 100644
--- a/drivers/gpu/drm/i915/selftests/igt_flush_test.c
+++ b/drivers/gpu/drm/i915/selftests/igt_flush_test.c
@@ -4,39 +4,32 @@
* Copyright © 2018 Intel Corporation
*/
-#include "gem/i915_gem_context.h"
#include "gt/intel_gt.h"
+#include "gt/intel_gt_requests.h"
#include "i915_drv.h"
#include "i915_selftest.h"
#include "igt_flush_test.h"
-int igt_flush_test(struct drm_i915_private *i915, unsigned int flags)
+int igt_flush_test(struct drm_i915_private *i915)
{
- int ret = intel_gt_is_wedged(&i915->gt) ? -EIO : 0;
- int repeat = !!(flags & I915_WAIT_LOCKED);
+ struct intel_gt *gt = &i915->gt;
+ int ret = intel_gt_is_wedged(gt) ? -EIO : 0;
cond_resched();
- do {
- if (i915_gem_wait_for_idle(i915, flags, HZ / 5) == -ETIME) {
- pr_err("%pS timed out, cancelling all further testing.\n",
- __builtin_return_address(0));
+ if (intel_gt_wait_for_idle(gt, HZ / 5) == -ETIME) {
+ pr_err("%pS timed out, cancelling all further testing.\n",
+ __builtin_return_address(0));
- GEM_TRACE("%pS timed out.\n",
- __builtin_return_address(0));
- GEM_TRACE_DUMP();
+ GEM_TRACE("%pS timed out.\n",
+ __builtin_return_address(0));
+ GEM_TRACE_DUMP();
- intel_gt_set_wedged(&i915->gt);
- repeat = 0;
- ret = -EIO;
- }
-
- /* Ensure we also flush after wedging. */
- if (flags & I915_WAIT_LOCKED)
- i915_retire_requests(i915);
- } while (repeat--);
+ intel_gt_set_wedged(gt);
+ ret = -EIO;
+ }
return ret;
}
diff --git a/drivers/gpu/drm/i915/selftests/igt_flush_test.h b/drivers/gpu/drm/i915/selftests/igt_flush_test.h
index 63e009927c43..7541fa74e641 100644
--- a/drivers/gpu/drm/i915/selftests/igt_flush_test.h
+++ b/drivers/gpu/drm/i915/selftests/igt_flush_test.h
@@ -9,6 +9,6 @@
struct drm_i915_private;
-int igt_flush_test(struct drm_i915_private *i915, unsigned int flags);
+int igt_flush_test(struct drm_i915_private *i915);
#endif /* IGT_FLUSH_TEST_H */
diff --git a/drivers/gpu/drm/i915/selftests/igt_live_test.c b/drivers/gpu/drm/i915/selftests/igt_live_test.c
index 3e902761cd16..c130010a7033 100644
--- a/drivers/gpu/drm/i915/selftests/igt_live_test.c
+++ b/drivers/gpu/drm/i915/selftests/igt_live_test.c
@@ -4,7 +4,8 @@
* Copyright © 2018 Intel Corporation
*/
-#include "../i915_drv.h"
+#include "i915_drv.h"
+#include "gt/intel_gt_requests.h"
#include "../i915_selftest.h"
#include "igt_flush_test.h"
@@ -15,20 +16,16 @@ int igt_live_test_begin(struct igt_live_test *t,
const char *func,
const char *name)
{
+ struct intel_gt *gt = &i915->gt;
struct intel_engine_cs *engine;
enum intel_engine_id id;
int err;
- lockdep_assert_held(&i915->drm.struct_mutex);
-
t->i915 = i915;
t->func = func;
t->name = name;
- err = i915_gem_wait_for_idle(i915,
- I915_WAIT_INTERRUPTIBLE |
- I915_WAIT_LOCKED,
- MAX_SCHEDULE_TIMEOUT);
+ err = intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT);
if (err) {
pr_err("%s(%s): failed to idle before, with err=%d!",
func, name, err);
@@ -37,7 +34,7 @@ int igt_live_test_begin(struct igt_live_test *t,
t->reset_global = i915_reset_count(&i915->gpu_error);
- for_each_engine(engine, i915, id)
+ for_each_engine(engine, gt, id)
t->reset_engine[id] =
i915_reset_engine_count(&i915->gpu_error, engine);
@@ -50,9 +47,7 @@ int igt_live_test_end(struct igt_live_test *t)
struct intel_engine_cs *engine;
enum intel_engine_id id;
- lockdep_assert_held(&i915->drm.struct_mutex);
-
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ if (igt_flush_test(i915))
return -EIO;
if (t->reset_global != i915_reset_count(&i915->gpu_error)) {
@@ -62,7 +57,7 @@ int igt_live_test_end(struct igt_live_test *t)
return -EIO;
}
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, &i915->gt, id) {
if (t->reset_engine[id] ==
i915_reset_engine_count(&i915->gpu_error, engine))
continue;
diff --git a/drivers/gpu/drm/i915/selftests/igt_reset.c b/drivers/gpu/drm/i915/selftests/igt_reset.c
index 7ec8f8b049c6..9f8590b868a9 100644
--- a/drivers/gpu/drm/i915/selftests/igt_reset.c
+++ b/drivers/gpu/drm/i915/selftests/igt_reset.c
@@ -22,7 +22,7 @@ void igt_global_reset_lock(struct intel_gt *gt)
wait_event(gt->reset.queue,
!test_bit(I915_RESET_BACKOFF, &gt->reset.flags));
- for_each_engine(engine, gt->i915, id) {
+ for_each_engine(engine, gt, id) {
while (test_and_set_bit(I915_RESET_ENGINE + id,
&gt->reset.flags))
wait_on_bit(&gt->reset.flags, I915_RESET_ENGINE + id,
@@ -35,7 +35,7 @@ void igt_global_reset_unlock(struct intel_gt *gt)
struct intel_engine_cs *engine;
enum intel_engine_id id;
- for_each_engine(engine, gt->i915, id)
+ for_each_engine(engine, gt, id)
clear_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
clear_bit(I915_RESET_BACKOFF, &gt->reset.flags);
diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.c b/drivers/gpu/drm/i915/selftests/igt_spinner.c
index 11f04ad48e68..ee8450b871da 100644
--- a/drivers/gpu/drm/i915/selftests/igt_spinner.c
+++ b/drivers/gpu/drm/i915/selftests/igt_spinner.c
@@ -147,7 +147,7 @@ igt_spinner_create_request(struct igt_spinner *spin,
intel_gt_chipset_flush(engine->gt);
if (engine->emit_init_breadcrumb &&
- rq->timeline->has_initial_breadcrumb) {
+ i915_request_timeline(rq)->has_initial_breadcrumb) {
err = engine->emit_init_breadcrumb(rq);
if (err)
goto cancel_rq;
diff --git a/drivers/gpu/drm/i915/selftests/intel_memory_region.c b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
new file mode 100644
index 000000000000..19e1cca8f143
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
@@ -0,0 +1,624 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <linux/prime_numbers.h>
+
+#include "../i915_selftest.h"
+
+#include "mock_drm.h"
+#include "mock_gem_device.h"
+#include "mock_region.h"
+
+#include "gem/i915_gem_context.h"
+#include "gem/i915_gem_lmem.h"
+#include "gem/i915_gem_region.h"
+#include "gem/i915_gem_object_blt.h"
+#include "gem/selftests/igt_gem_utils.h"
+#include "gem/selftests/mock_context.h"
+#include "gt/intel_engine_user.h"
+#include "gt/intel_gt.h"
+#include "selftests/igt_flush_test.h"
+#include "selftests/i915_random.h"
+
+static void close_objects(struct intel_memory_region *mem,
+ struct list_head *objects)
+{
+ struct drm_i915_private *i915 = mem->i915;
+ struct drm_i915_gem_object *obj, *on;
+
+ list_for_each_entry_safe(obj, on, objects, st_link) {
+ if (i915_gem_object_has_pinned_pages(obj))
+ i915_gem_object_unpin_pages(obj);
+ /* No polluting the memory region between tests */
+ __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
+ list_del(&obj->st_link);
+ i915_gem_object_put(obj);
+ }
+
+ cond_resched();
+
+ i915_gem_drain_freed_objects(i915);
+}
+
+static int igt_mock_fill(void *arg)
+{
+ struct intel_memory_region *mem = arg;
+ resource_size_t total = resource_size(&mem->region);
+ resource_size_t page_size;
+ resource_size_t rem;
+ unsigned long max_pages;
+ unsigned long page_num;
+ LIST_HEAD(objects);
+ int err = 0;
+
+ page_size = mem->mm.chunk_size;
+ max_pages = div64_u64(total, page_size);
+ rem = total;
+
+ for_each_prime_number_from(page_num, 1, max_pages) {
+ resource_size_t size = page_num * page_size;
+ struct drm_i915_gem_object *obj;
+
+ obj = i915_gem_object_create_region(mem, size, 0);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ break;
+ }
+
+ err = i915_gem_object_pin_pages(obj);
+ if (err) {
+ i915_gem_object_put(obj);
+ break;
+ }
+
+ list_add(&obj->st_link, &objects);
+ rem -= size;
+ }
+
+ if (err == -ENOMEM)
+ err = 0;
+ if (err == -ENXIO) {
+ if (page_num * page_size <= rem) {
+ pr_err("%s failed, space still left in region\n",
+ __func__);
+ err = -EINVAL;
+ } else {
+ err = 0;
+ }
+ }
+
+ close_objects(mem, &objects);
+
+ return err;
+}
+
+static struct drm_i915_gem_object *
+igt_object_create(struct intel_memory_region *mem,
+ struct list_head *objects,
+ u64 size,
+ unsigned int flags)
+{
+ struct drm_i915_gem_object *obj;
+ int err;
+
+ obj = i915_gem_object_create_region(mem, size, flags);
+ if (IS_ERR(obj))
+ return obj;
+
+ err = i915_gem_object_pin_pages(obj);
+ if (err)
+ goto put;
+
+ list_add(&obj->st_link, objects);
+ return obj;
+
+put:
+ i915_gem_object_put(obj);
+ return ERR_PTR(err);
+}
+
+static void igt_object_release(struct drm_i915_gem_object *obj)
+{
+ i915_gem_object_unpin_pages(obj);
+ __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
+ list_del(&obj->st_link);
+ i915_gem_object_put(obj);
+}
+
+static int igt_mock_contiguous(void *arg)
+{
+ struct intel_memory_region *mem = arg;
+ struct drm_i915_gem_object *obj;
+ unsigned long n_objects;
+ LIST_HEAD(objects);
+ LIST_HEAD(holes);
+ I915_RND_STATE(prng);
+ resource_size_t total;
+ resource_size_t min;
+ u64 target;
+ int err = 0;
+
+ total = resource_size(&mem->region);
+
+ /* Min size */
+ obj = igt_object_create(mem, &objects, mem->mm.chunk_size,
+ I915_BO_ALLOC_CONTIGUOUS);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ if (obj->mm.pages->nents != 1) {
+ pr_err("%s min object spans multiple sg entries\n", __func__);
+ err = -EINVAL;
+ goto err_close_objects;
+ }
+
+ igt_object_release(obj);
+
+ /* Max size */
+ obj = igt_object_create(mem, &objects, total, I915_BO_ALLOC_CONTIGUOUS);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ if (obj->mm.pages->nents != 1) {
+ pr_err("%s max object spans multiple sg entries\n", __func__);
+ err = -EINVAL;
+ goto err_close_objects;
+ }
+
+ igt_object_release(obj);
+
+ /* Internal fragmentation should not bleed into the object size */
+ target = i915_prandom_u64_state(&prng);
+ div64_u64_rem(target, total, &target);
+ target = round_up(target, PAGE_SIZE);
+ target = max_t(u64, PAGE_SIZE, target);
+
+ obj = igt_object_create(mem, &objects, target,
+ I915_BO_ALLOC_CONTIGUOUS);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ if (obj->base.size != target) {
+ pr_err("%s obj->base.size(%zx) != target(%llx)\n", __func__,
+ obj->base.size, target);
+ err = -EINVAL;
+ goto err_close_objects;
+ }
+
+ if (obj->mm.pages->nents != 1) {
+ pr_err("%s object spans multiple sg entries\n", __func__);
+ err = -EINVAL;
+ goto err_close_objects;
+ }
+
+ igt_object_release(obj);
+
+ /*
+ * Try to fragment the address space, such that half of it is free, but
+ * the max contiguous block size is SZ_64K.
+ */
+
+ target = SZ_64K;
+ n_objects = div64_u64(total, target);
+
+ while (n_objects--) {
+ struct list_head *list;
+
+ if (n_objects % 2)
+ list = &holes;
+ else
+ list = &objects;
+
+ obj = igt_object_create(mem, list, target,
+ I915_BO_ALLOC_CONTIGUOUS);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto err_close_objects;
+ }
+ }
+
+ close_objects(mem, &holes);
+
+ min = target;
+ target = total >> 1;
+
+ /* Make sure we can still allocate all the fragmented space */
+ obj = igt_object_create(mem, &objects, target, 0);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto err_close_objects;
+ }
+
+ igt_object_release(obj);
+
+ /*
+ * Even though we have enough free space, we don't have a big enough
+ * contiguous block. Make sure that holds true.
+ */
+
+ do {
+ bool should_fail = target > min;
+
+ obj = igt_object_create(mem, &objects, target,
+ I915_BO_ALLOC_CONTIGUOUS);
+ if (should_fail != IS_ERR(obj)) {
+ pr_err("%s target allocation(%llx) mismatch\n",
+ __func__, target);
+ err = -EINVAL;
+ goto err_close_objects;
+ }
+
+ target >>= 1;
+ } while (target >= mem->mm.chunk_size);
+
+err_close_objects:
+ list_splice_tail(&holes, &objects);
+ close_objects(mem, &objects);
+ return err;
+}
+
+static int igt_gpu_write_dw(struct intel_context *ce,
+ struct i915_vma *vma,
+ u32 dword,
+ u32 value)
+{
+ return igt_gpu_fill_dw(ce, vma, dword * sizeof(u32),
+ vma->size >> PAGE_SHIFT, value);
+}
+
+static int igt_cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val)
+{
+ unsigned long n;
+ int err;
+
+ i915_gem_object_lock(obj);
+ err = i915_gem_object_set_to_wc_domain(obj, false);
+ i915_gem_object_unlock(obj);
+ if (err)
+ return err;
+
+ err = i915_gem_object_pin_pages(obj);
+ if (err)
+ return err;
+
+ for (n = 0; n < obj->base.size >> PAGE_SHIFT; ++n) {
+ u32 __iomem *base;
+ u32 read_val;
+
+ base = i915_gem_object_lmem_io_map_page_atomic(obj, n);
+
+ read_val = ioread32(base + dword);
+ io_mapping_unmap_atomic(base);
+ if (read_val != val) {
+ pr_err("n=%lu base[%u]=%u, val=%u\n",
+ n, dword, read_val, val);
+ err = -EINVAL;
+ break;
+ }
+ }
+
+ i915_gem_object_unpin_pages(obj);
+ return err;
+}
+
+static int igt_gpu_write(struct i915_gem_context *ctx,
+ struct drm_i915_gem_object *obj)
+{
+ struct i915_gem_engines *engines;
+ struct i915_gem_engines_iter it;
+ struct i915_address_space *vm;
+ struct intel_context *ce;
+ I915_RND_STATE(prng);
+ IGT_TIMEOUT(end_time);
+ unsigned int count;
+ struct i915_vma *vma;
+ int *order;
+ int i, n;
+ int err = 0;
+
+ GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
+
+ n = 0;
+ count = 0;
+ for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
+ count++;
+ if (!intel_engine_can_store_dword(ce->engine))
+ continue;
+
+ vm = ce->vm;
+ n++;
+ }
+ i915_gem_context_unlock_engines(ctx);
+ if (!n)
+ return 0;
+
+ order = i915_random_order(count * count, &prng);
+ if (!order)
+ return -ENOMEM;
+
+ vma = i915_vma_instance(obj, vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto out_free;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err)
+ goto out_free;
+
+ i = 0;
+ engines = i915_gem_context_lock_engines(ctx);
+ do {
+ u32 rng = prandom_u32_state(&prng);
+ u32 dword = offset_in_page(rng) / 4;
+
+ ce = engines->engines[order[i] % engines->num_engines];
+ i = (i + 1) % (count * count);
+ if (!ce || !intel_engine_can_store_dword(ce->engine))
+ continue;
+
+ err = igt_gpu_write_dw(ce, vma, dword, rng);
+ if (err)
+ break;
+
+ err = igt_cpu_check(obj, dword, rng);
+ if (err)
+ break;
+ } while (!__igt_timeout(end_time, NULL));
+ i915_gem_context_unlock_engines(ctx);
+
+out_free:
+ kfree(order);
+
+ if (err == -ENOMEM)
+ err = 0;
+
+ return err;
+}
+
+static int igt_lmem_create(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct drm_i915_gem_object *obj;
+ int err = 0;
+
+ obj = i915_gem_object_create_lmem(i915, PAGE_SIZE, 0);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ err = i915_gem_object_pin_pages(obj);
+ if (err)
+ goto out_put;
+
+ i915_gem_object_unpin_pages(obj);
+out_put:
+ i915_gem_object_put(obj);
+
+ return err;
+}
+
+static int igt_lmem_write_gpu(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct drm_i915_gem_object *obj;
+ struct i915_gem_context *ctx;
+ struct drm_file *file;
+ I915_RND_STATE(prng);
+ u32 sz;
+ int err;
+
+ file = mock_file(i915);
+ if (IS_ERR(file))
+ return PTR_ERR(file);
+
+ ctx = live_context(i915, file);
+ if (IS_ERR(ctx)) {
+ err = PTR_ERR(ctx);
+ goto out_file;
+ }
+
+ sz = round_up(prandom_u32_state(&prng) % SZ_32M, PAGE_SIZE);
+
+ obj = i915_gem_object_create_lmem(i915, sz, 0);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto out_file;
+ }
+
+ err = i915_gem_object_pin_pages(obj);
+ if (err)
+ goto out_put;
+
+ err = igt_gpu_write(ctx, obj);
+ if (err)
+ pr_err("igt_gpu_write failed(%d)\n", err);
+
+ i915_gem_object_unpin_pages(obj);
+out_put:
+ i915_gem_object_put(obj);
+out_file:
+ mock_file_free(i915, file);
+ return err;
+}
+
+static struct intel_engine_cs *
+random_engine_class(struct drm_i915_private *i915,
+ unsigned int class,
+ struct rnd_state *prng)
+{
+ struct intel_engine_cs *engine;
+ unsigned int count;
+
+ count = 0;
+ for (engine = intel_engine_lookup_user(i915, class, 0);
+ engine && engine->uabi_class == class;
+ engine = rb_entry_safe(rb_next(&engine->uabi_node),
+ typeof(*engine), uabi_node))
+ count++;
+
+ count = i915_prandom_u32_max_state(count, prng);
+ return intel_engine_lookup_user(i915, class, count);
+}
+
+static int igt_lmem_write_cpu(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct drm_i915_gem_object *obj;
+ I915_RND_STATE(prng);
+ IGT_TIMEOUT(end_time);
+ u32 bytes[] = {
+ 0, /* rng placeholder */
+ sizeof(u32),
+ sizeof(u64),
+ 64, /* cl */
+ PAGE_SIZE,
+ PAGE_SIZE - sizeof(u32),
+ PAGE_SIZE - sizeof(u64),
+ PAGE_SIZE - 64,
+ };
+ struct intel_engine_cs *engine;
+ u32 *vaddr;
+ u32 sz;
+ u32 i;
+ int *order;
+ int count;
+ int err;
+
+ engine = random_engine_class(i915, I915_ENGINE_CLASS_COPY, &prng);
+ if (!engine)
+ return 0;
+
+ pr_info("%s: using %s\n", __func__, engine->name);
+
+ sz = round_up(prandom_u32_state(&prng) % SZ_32M, PAGE_SIZE);
+ sz = max_t(u32, 2 * PAGE_SIZE, sz);
+
+ obj = i915_gem_object_create_lmem(i915, sz, I915_BO_ALLOC_CONTIGUOUS);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ vaddr = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ if (IS_ERR(vaddr)) {
+ err = PTR_ERR(vaddr);
+ goto out_put;
+ }
+
+ /* Put the pages into a known state -- from the gpu for added fun */
+ err = i915_gem_object_fill_blt(obj, engine->kernel_context, 0xdeadbeaf);
+ if (err)
+ goto out_unpin;
+
+ i915_gem_object_lock(obj);
+ err = i915_gem_object_set_to_wc_domain(obj, true);
+ i915_gem_object_unlock(obj);
+ if (err)
+ goto out_unpin;
+
+ count = ARRAY_SIZE(bytes);
+ order = i915_random_order(count * count, &prng);
+ if (!order) {
+ err = -ENOMEM;
+ goto out_unpin;
+ }
+
+ /* We want to throw in a random width/align */
+ bytes[0] = igt_random_offset(&prng, 0, PAGE_SIZE, sizeof(u32),
+ sizeof(u32));
+
+ i = 0;
+ do {
+ u32 offset;
+ u32 align;
+ u32 dword;
+ u32 size;
+ u32 val;
+
+ size = bytes[order[i] % count];
+ i = (i + 1) % (count * count);
+
+ align = bytes[order[i] % count];
+ i = (i + 1) % (count * count);
+
+ align = max_t(u32, sizeof(u32), rounddown_pow_of_two(align));
+
+ offset = igt_random_offset(&prng, 0, obj->base.size,
+ size, align);
+
+ val = prandom_u32_state(&prng);
+ memset32(vaddr + offset / sizeof(u32), val ^ 0xdeadbeaf,
+ size / sizeof(u32));
+
+ /*
+ * Sample random dw -- don't waste precious time reading every
+ * single dw.
+ */
+ dword = igt_random_offset(&prng, offset,
+ offset + size,
+ sizeof(u32), sizeof(u32));
+ dword /= sizeof(u32);
+ if (vaddr[dword] != (val ^ 0xdeadbeaf)) {
+ pr_err("%s vaddr[%u]=%u, val=%u, size=%u, align=%u, offset=%u\n",
+ __func__, dword, vaddr[dword], val ^ 0xdeadbeaf,
+ size, align, offset);
+ err = -EINVAL;
+ break;
+ }
+ } while (!__igt_timeout(end_time, NULL));
+
+out_unpin:
+ i915_gem_object_unpin_map(obj);
+out_put:
+ i915_gem_object_put(obj);
+
+ return err;
+}
+
+int intel_memory_region_mock_selftests(void)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(igt_mock_fill),
+ SUBTEST(igt_mock_contiguous),
+ };
+ struct intel_memory_region *mem;
+ struct drm_i915_private *i915;
+ int err;
+
+ i915 = mock_gem_device();
+ if (!i915)
+ return -ENOMEM;
+
+ mem = mock_region_create(i915, 0, SZ_2G, I915_GTT_PAGE_SIZE_4K, 0);
+ if (IS_ERR(mem)) {
+ pr_err("failed to create memory region\n");
+ err = PTR_ERR(mem);
+ goto out_unref;
+ }
+
+ err = i915_subtests(tests, mem);
+
+ intel_memory_region_put(mem);
+out_unref:
+ drm_dev_put(&i915->drm);
+ return err;
+}
+
+int intel_memory_region_live_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(igt_lmem_create),
+ SUBTEST(igt_lmem_write_cpu),
+ SUBTEST(igt_lmem_write_gpu),
+ };
+
+ if (!HAS_LMEM(i915)) {
+ pr_info("device lacks LMEM support, skipping\n");
+ return 0;
+ }
+
+ if (intel_gt_is_wedged(&i915->gt))
+ return 0;
+
+ return i915_live_subtests(tests, i915);
+}
diff --git a/drivers/gpu/drm/i915/selftests/intel_uncore.c b/drivers/gpu/drm/i915/selftests/intel_uncore.c
index 86815c6072a1..0e4e6be0101d 100644
--- a/drivers/gpu/drm/i915/selftests/intel_uncore.c
+++ b/drivers/gpu/drm/i915/selftests/intel_uncore.c
@@ -67,6 +67,7 @@ static int intel_shadow_table_check(void)
} reg_lists[] = {
{ gen8_shadowed_regs, ARRAY_SIZE(gen8_shadowed_regs) },
{ gen11_shadowed_regs, ARRAY_SIZE(gen11_shadowed_regs) },
+ { gen12_shadowed_regs, ARRAY_SIZE(gen12_shadowed_regs) },
};
const i915_reg_t *reg;
unsigned int i, j;
@@ -101,6 +102,7 @@ int intel_uncore_mock_selftests(void)
{ __chv_fw_ranges, ARRAY_SIZE(__chv_fw_ranges), false },
{ __gen9_fw_ranges, ARRAY_SIZE(__gen9_fw_ranges), true },
{ __gen11_fw_ranges, ARRAY_SIZE(__gen11_fw_ranges), true },
+ { __gen12_fw_ranges, ARRAY_SIZE(__gen12_fw_ranges), true },
};
int err, i;
@@ -138,19 +140,19 @@ static int live_forcewake_ops(void *arg)
}
};
const struct reg *r;
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
struct intel_uncore_forcewake_domain *domain;
- struct intel_uncore *uncore = &i915->uncore;
+ struct intel_uncore *uncore = gt->uncore;
struct intel_engine_cs *engine;
enum intel_engine_id id;
intel_wakeref_t wakeref;
unsigned int tmp;
int err = 0;
- GEM_BUG_ON(i915->gt.awake);
+ GEM_BUG_ON(gt->awake);
/* vlv/chv with their pcu behave differently wrt reads */
- if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
+ if (IS_VALLEYVIEW(gt->i915) || IS_CHERRYVIEW(gt->i915)) {
pr_debug("PCU fakes forcewake badly; skipping\n");
return 0;
}
@@ -168,15 +170,15 @@ static int live_forcewake_ops(void *arg)
/* We have to pick carefully to get the exact behaviour we need */
for (r = registers; r->name; r++)
- if (r->platforms & INTEL_INFO(i915)->gen_mask)
+ if (r->platforms & INTEL_INFO(gt->i915)->gen_mask)
break;
if (!r->name) {
pr_debug("Forcewaked register not known for %s; skipping\n",
- intel_platform_name(INTEL_INFO(i915)->platform));
+ intel_platform_name(INTEL_INFO(gt->i915)->platform));
return 0;
}
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+ wakeref = intel_runtime_pm_get(uncore->rpm);
for_each_fw_domain(domain, uncore, tmp) {
smp_store_mb(domain->active, false);
@@ -186,7 +188,7 @@ static int live_forcewake_ops(void *arg)
intel_uncore_fw_release_timer(&domain->timer);
}
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt, id) {
i915_reg_t mmio = _MMIO(engine->mmio_base + r->offset);
u32 __iomem *reg = uncore->regs + engine->mmio_base + r->offset;
enum forcewake_domains fw_domains;
@@ -247,22 +249,22 @@ static int live_forcewake_ops(void *arg)
}
out_rpm:
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+ intel_runtime_pm_put(uncore->rpm, wakeref);
return err;
}
static int live_forcewake_domains(void *arg)
{
#define FW_RANGE 0x40000
- struct drm_i915_private *dev_priv = arg;
- struct intel_uncore *uncore = &dev_priv->uncore;
+ struct intel_gt *gt = arg;
+ struct intel_uncore *uncore = gt->uncore;
unsigned long *valid;
u32 offset;
int err;
- if (!HAS_FPGA_DBG_UNCLAIMED(dev_priv) &&
- !IS_VALLEYVIEW(dev_priv) &&
- !IS_CHERRYVIEW(dev_priv))
+ if (!HAS_FPGA_DBG_UNCLAIMED(gt->i915) &&
+ !IS_VALLEYVIEW(gt->i915) &&
+ !IS_CHERRYVIEW(gt->i915))
return 0;
/*
@@ -281,7 +283,7 @@ static int live_forcewake_domains(void *arg)
for (offset = 0; offset < FW_RANGE; offset += 4) {
i915_reg_t reg = { offset };
- (void)I915_READ_FW(reg);
+ intel_uncore_posting_read_fw(uncore, reg);
if (!check_for_unclaimed_mmio(uncore))
set_bit(offset, valid);
}
@@ -298,7 +300,7 @@ static int live_forcewake_domains(void *arg)
check_for_unclaimed_mmio(uncore);
- (void)I915_READ(reg);
+ intel_uncore_posting_read_fw(uncore, reg);
if (check_for_unclaimed_mmio(uncore)) {
pr_err("Unclaimed mmio read to register 0x%04x\n",
offset);
@@ -310,21 +312,23 @@ static int live_forcewake_domains(void *arg)
return err;
}
+static int live_fw_table(void *arg)
+{
+ struct intel_gt *gt = arg;
+
+ /* Confirm the table we load is still valid */
+ return intel_fw_table_check(gt->uncore->fw_domains_table,
+ gt->uncore->fw_domains_table_entries,
+ INTEL_GEN(gt->i915) >= 9);
+}
+
int intel_uncore_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
+ SUBTEST(live_fw_table),
SUBTEST(live_forcewake_ops),
SUBTEST(live_forcewake_domains),
};
- int err;
-
- /* Confirm the table we load is still valid */
- err = intel_fw_table_check(i915->uncore.fw_domains_table,
- i915->uncore.fw_domains_table_entries,
- INTEL_GEN(i915) >= 9);
- if (err)
- return err;
-
- return i915_subtests(tests, i915);
+ return intel_gt_live_subtests(tests, &i915->gt);
}
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index 01a89c071bf5..27ed3cee6a9b 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -26,27 +26,29 @@
#include <linux/pm_runtime.h>
#include "gt/intel_gt.h"
+#include "gt/intel_gt_requests.h"
#include "gt/mock_engine.h"
+#include "intel_memory_region.h"
#include "mock_request.h"
#include "mock_gem_device.h"
#include "mock_gtt.h"
#include "mock_uncore.h"
+#include "mock_region.h"
#include "gem/selftests/mock_context.h"
#include "gem/selftests/mock_gem_object.h"
void mock_device_flush(struct drm_i915_private *i915)
{
+ struct intel_gt *gt = &i915->gt;
struct intel_engine_cs *engine;
enum intel_engine_id id;
- lockdep_assert_held(&i915->drm.struct_mutex);
-
do {
- for_each_engine(engine, i915, id)
+ for_each_engine(engine, gt, id)
mock_engine_flush(engine);
- } while (i915_retire_requests(i915));
+ } while (intel_gt_retire_requests_timeout(gt, MAX_SCHEDULE_TIMEOUT));
}
static void mock_device_release(struct drm_device *dev)
@@ -55,31 +57,23 @@ static void mock_device_release(struct drm_device *dev)
struct intel_engine_cs *engine;
enum intel_engine_id id;
- mutex_lock(&i915->drm.struct_mutex);
mock_device_flush(i915);
- mutex_unlock(&i915->drm.struct_mutex);
- flush_work(&i915->gem.idle_work);
i915_gem_drain_workqueue(i915);
- mutex_lock(&i915->drm.struct_mutex);
- for_each_engine(engine, i915, id)
+ for_each_engine(engine, &i915->gt, id)
mock_engine_free(engine);
- i915_gem_contexts_fini(i915);
- mutex_unlock(&i915->drm.struct_mutex);
+ i915_gem_driver_release__contexts(i915);
intel_timelines_fini(i915);
drain_workqueue(i915->wq);
i915_gem_drain_freed_objects(i915);
- mutex_lock(&i915->drm.struct_mutex);
mock_fini_ggtt(&i915->ggtt);
- mutex_unlock(&i915->drm.struct_mutex);
-
destroy_workqueue(i915->wq);
- i915_gemfs_fini(i915);
+ intel_memory_regions_driver_release(i915);
drm_mode_config_cleanup(&i915->drm);
@@ -103,14 +97,6 @@ static void release_dev(struct device *dev)
kfree(pdev);
}
-static void mock_retire_work_handler(struct work_struct *work)
-{
-}
-
-static void mock_idle_work_handler(struct work_struct *work)
-{
-}
-
static int pm_domain_resume(struct device *dev)
{
return pm_generic_runtime_resume(dev);
@@ -178,10 +164,15 @@ struct drm_i915_private *mock_gem_device(void)
I915_GTT_PAGE_SIZE_64K |
I915_GTT_PAGE_SIZE_2M;
- mock_uncore_init(&i915->uncore);
+ mkwrite_device_info(i915)->memory_regions = REGION_SMEM;
+ intel_memory_regions_hw_probe(i915);
+
+ mock_uncore_init(&i915->uncore, i915);
+
i915_gem_init__mm(i915);
intel_gt_init_early(&i915->gt, i915);
atomic_inc(&i915->gt.wakeref.count); /* disable; no hw support */
+ i915->gt.awake = -ENODEV;
i915->wq = alloc_ordered_workqueue("mock", 0);
if (!i915->wq)
@@ -189,15 +180,8 @@ struct drm_i915_private *mock_gem_device(void)
mock_init_contexts(i915);
- INIT_DELAYED_WORK(&i915->gem.retire_work, mock_retire_work_handler);
- INIT_WORK(&i915->gem.idle_work, mock_idle_work_handler);
-
- i915->gt.awake = true;
-
intel_timelines_init(i915);
- mutex_lock(&i915->drm.struct_mutex);
-
mock_init_ggtt(i915, &i915->ggtt);
mkwrite_device_info(i915)->engine_mask = BIT(0);
@@ -214,21 +198,18 @@ struct drm_i915_private *mock_gem_device(void)
goto err_context;
intel_engines_driver_register(i915);
- mutex_unlock(&i915->drm.struct_mutex);
-
- WARN_ON(i915_gemfs_init(i915));
return i915;
err_context:
- i915_gem_contexts_fini(i915);
+ i915_gem_driver_release__contexts(i915);
err_engine:
mock_engine_free(i915->engine[RCS0]);
err_unlock:
- mutex_unlock(&i915->drm.struct_mutex);
intel_timelines_fini(i915);
destroy_workqueue(i915->wq);
err_drv:
+ intel_memory_regions_driver_release(i915);
drm_mode_config_cleanup(&i915->drm);
drm_dev_fini(&i915->drm);
put_device:
diff --git a/drivers/gpu/drm/i915/selftests/mock_gtt.c b/drivers/gpu/drm/i915/selftests/mock_gtt.c
index e62a67e0f79c..20ac3844edec 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gtt.c
@@ -43,7 +43,7 @@ static int mock_bind_ppgtt(struct i915_vma *vma,
u32 flags)
{
GEM_BUG_ON(flags & I915_VMA_GLOBAL_BIND);
- vma->flags |= I915_VMA_LOCAL_BIND;
+ set_bit(I915_VMA_LOCAL_BIND_BIT, __i915_vma_flags(vma));
return 0;
}
@@ -63,6 +63,7 @@ struct i915_ppgtt *mock_ppgtt(struct drm_i915_private *i915, const char *name)
if (!ppgtt)
return NULL;
+ ppgtt->vm.gt = &i915->gt;
ppgtt->vm.i915 = i915;
ppgtt->vm.total = round_down(U64_MAX, PAGE_SIZE);
ppgtt->vm.file = ERR_PTR(-ENODEV);
@@ -86,7 +87,7 @@ static int mock_bind_ggtt(struct i915_vma *vma,
enum i915_cache_level cache_level,
u32 flags)
{
- vma->flags |= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
+ atomic_or(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND, &vma->flags);
return 0;
}
@@ -117,8 +118,7 @@ void mock_init_ggtt(struct drm_i915_private *i915, struct i915_ggtt *ggtt)
ggtt->vm.vma_ops.clear_pages = clear_pages;
i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT);
-
- intel_gt_init_hw(i915);
+ i915->gt.ggtt = ggtt;
}
void mock_fini_ggtt(struct i915_ggtt *ggtt)
diff --git a/drivers/gpu/drm/i915/selftests/mock_region.c b/drivers/gpu/drm/i915/selftests/mock_region.c
new file mode 100644
index 000000000000..b2ad41c27e67
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/mock_region.c
@@ -0,0 +1,60 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "gem/i915_gem_region.h"
+#include "intel_memory_region.h"
+
+#include "mock_region.h"
+
+static const struct drm_i915_gem_object_ops mock_region_obj_ops = {
+ .get_pages = i915_gem_object_get_pages_buddy,
+ .put_pages = i915_gem_object_put_pages_buddy,
+ .release = i915_gem_object_release_memory_region,
+};
+
+static struct drm_i915_gem_object *
+mock_object_create(struct intel_memory_region *mem,
+ resource_size_t size,
+ unsigned int flags)
+{
+ static struct lock_class_key lock_class;
+ struct drm_i915_private *i915 = mem->i915;
+ struct drm_i915_gem_object *obj;
+
+ if (size > BIT(mem->mm.max_order) * mem->mm.chunk_size)
+ return ERR_PTR(-E2BIG);
+
+ obj = i915_gem_object_alloc();
+ if (!obj)
+ return ERR_PTR(-ENOMEM);
+
+ drm_gem_private_object_init(&i915->drm, &obj->base, size);
+ i915_gem_object_init(obj, &mock_region_obj_ops, &lock_class);
+
+ obj->read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
+
+ i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE);
+
+ i915_gem_object_init_memory_region(obj, mem, flags);
+
+ return obj;
+}
+
+static const struct intel_memory_region_ops mock_region_ops = {
+ .init = intel_memory_region_init_buddy,
+ .release = intel_memory_region_release_buddy,
+ .create_object = mock_object_create,
+};
+
+struct intel_memory_region *
+mock_region_create(struct drm_i915_private *i915,
+ resource_size_t start,
+ resource_size_t size,
+ resource_size_t min_page_size,
+ resource_size_t io_start)
+{
+ return intel_memory_region_create(i915, start, size, min_page_size,
+ io_start, &mock_region_ops);
+}
diff --git a/drivers/gpu/drm/i915/selftests/mock_region.h b/drivers/gpu/drm/i915/selftests/mock_region.h
new file mode 100644
index 000000000000..24608089d833
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/mock_region.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __MOCK_REGION_H
+#define __MOCK_REGION_H
+
+struct intel_memory_region *
+mock_region_create(struct drm_i915_private *i915,
+ resource_size_t start,
+ resource_size_t size,
+ resource_size_t min_page_size,
+ resource_size_t io_start);
+
+#endif /* !__MOCK_REGION_H */
diff --git a/drivers/gpu/drm/i915/selftests/mock_uncore.c b/drivers/gpu/drm/i915/selftests/mock_uncore.c
index 49585f16d4a2..ca57e4008701 100644
--- a/drivers/gpu/drm/i915/selftests/mock_uncore.c
+++ b/drivers/gpu/drm/i915/selftests/mock_uncore.c
@@ -39,8 +39,11 @@ __nop_read(16)
__nop_read(32)
__nop_read(64)
-void mock_uncore_init(struct intel_uncore *uncore)
+void mock_uncore_init(struct intel_uncore *uncore,
+ struct drm_i915_private *i915)
{
+ intel_uncore_init_early(uncore, i915);
+
ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, nop);
ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, nop);
}
diff --git a/drivers/gpu/drm/i915/selftests/mock_uncore.h b/drivers/gpu/drm/i915/selftests/mock_uncore.h
index dacb36b5ffcd..8a2cc553f466 100644
--- a/drivers/gpu/drm/i915/selftests/mock_uncore.h
+++ b/drivers/gpu/drm/i915/selftests/mock_uncore.h
@@ -25,6 +25,7 @@
#ifndef __MOCK_UNCORE_H
#define __MOCK_UNCORE_H
-void mock_uncore_init(struct intel_uncore *uncore);
+void mock_uncore_init(struct intel_uncore *uncore,
+ struct drm_i915_private *i915);
#endif /* !__MOCK_UNCORE_H */
diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c
index 695f307f36b2..208069faf183 100644
--- a/drivers/gpu/drm/imx/imx-ldb.c
+++ b/drivers/gpu/drm/imx/imx-ldb.c
@@ -20,6 +20,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c
index e7ce17503ae1..35518e5de356 100644
--- a/drivers/gpu/drm/imx/parallel-display.c
+++ b/drivers/gpu/drm/imx/parallel-display.c
@@ -13,6 +13,7 @@
#include <video/of_display_timing.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
diff --git a/drivers/gpu/drm/ingenic/ingenic-drm.c b/drivers/gpu/drm/ingenic/ingenic-drm.c
index 2e2ed653e9c6..ec32e1c67335 100644
--- a/drivers/gpu/drm/ingenic/ingenic-drm.c
+++ b/drivers/gpu/drm/ingenic/ingenic-drm.c
@@ -13,6 +13,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_drv.h>
@@ -676,8 +677,8 @@ static int ingenic_drm_probe(struct platform_device *pdev)
}
if (panel)
- bridge = devm_drm_panel_bridge_add(dev, panel,
- DRM_MODE_CONNECTOR_DPI);
+ bridge = devm_drm_panel_bridge_add_typed(dev, panel,
+ DRM_MODE_CONNECTOR_DPI);
priv->dma_hwdesc = dma_alloc_coherent(dev, sizeof(*priv->dma_hwdesc),
&priv->dma_hwdesc_phys,
diff --git a/drivers/gpu/drm/lima/Kconfig b/drivers/gpu/drm/lima/Kconfig
index bb4ddc6bb0a6..571dc369a7e9 100644
--- a/drivers/gpu/drm/lima/Kconfig
+++ b/drivers/gpu/drm/lima/Kconfig
@@ -9,5 +9,6 @@ config DRM_LIMA
depends on COMMON_CLK
depends on OF
select DRM_SCHED
+ select DRM_GEM_SHMEM_HELPER
help
DRM driver for ARM Mali 400/450 GPUs.
diff --git a/drivers/gpu/drm/lima/Makefile b/drivers/gpu/drm/lima/Makefile
index 38cc70281ba5..a85444b0a1d4 100644
--- a/drivers/gpu/drm/lima/Makefile
+++ b/drivers/gpu/drm/lima/Makefile
@@ -13,9 +13,7 @@ lima-y := \
lima_vm.o \
lima_sched.o \
lima_ctx.o \
- lima_gem_prime.o \
lima_dlbu.o \
- lima_bcast.o \
- lima_object.o
+ lima_bcast.o
obj-$(CONFIG_DRM_LIMA) += lima.o
diff --git a/drivers/gpu/drm/lima/lima_device.c b/drivers/gpu/drm/lima/lima_device.c
index d86b8d81a483..19829b543024 100644
--- a/drivers/gpu/drm/lima/lima_device.c
+++ b/drivers/gpu/drm/lima/lima_device.c
@@ -105,7 +105,8 @@ static int lima_clk_init(struct lima_device *dev)
if (err)
goto error_out0;
- dev->reset = devm_reset_control_get_optional(dev->dev, NULL);
+ dev->reset = devm_reset_control_array_get_optional_shared(dev->dev);
+
if (IS_ERR(dev->reset)) {
err = PTR_ERR(dev->reset);
if (err != -EPROBE_DEFER)
@@ -313,7 +314,7 @@ int lima_device_init(struct lima_device *ldev)
ldev->va_end = LIMA_VA_RESERVE_START;
ldev->dlbu_cpu = dma_alloc_wc(
ldev->dev, LIMA_PAGE_SIZE,
- &ldev->dlbu_dma, GFP_KERNEL);
+ &ldev->dlbu_dma, GFP_KERNEL | __GFP_NOWARN);
if (!ldev->dlbu_cpu) {
err = -ENOMEM;
goto err_out2;
diff --git a/drivers/gpu/drm/lima/lima_drv.c b/drivers/gpu/drm/lima/lima_drv.c
index 75ec703d22e0..124efe4fa97b 100644
--- a/drivers/gpu/drm/lima/lima_drv.c
+++ b/drivers/gpu/drm/lima/lima_drv.c
@@ -12,7 +12,6 @@
#include "lima_drv.h"
#include "lima_gem.h"
-#include "lima_gem_prime.h"
#include "lima_vm.h"
int lima_sched_timeout_ms;
@@ -240,16 +239,7 @@ static const struct drm_ioctl_desc lima_drm_driver_ioctls[] = {
DRM_IOCTL_DEF_DRV(LIMA_CTX_FREE, lima_ioctl_ctx_free, DRM_RENDER_ALLOW),
};
-static const struct file_operations lima_drm_driver_fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .release = drm_release,
- .unlocked_ioctl = drm_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = drm_compat_ioctl,
-#endif
- .mmap = lima_gem_mmap,
-};
+DEFINE_DRM_GEM_FOPS(lima_drm_driver_fops);
static struct drm_driver lima_drm_driver = {
.driver_features = DRIVER_RENDER | DRIVER_GEM | DRIVER_SYNCOBJ,
@@ -258,10 +248,6 @@ static struct drm_driver lima_drm_driver = {
.ioctls = lima_drm_driver_ioctls,
.num_ioctls = ARRAY_SIZE(lima_drm_driver_ioctls),
.fops = &lima_drm_driver_fops,
- .gem_free_object_unlocked = lima_gem_free_object,
- .gem_open_object = lima_gem_object_open,
- .gem_close_object = lima_gem_object_close,
- .gem_vm_ops = &lima_gem_vm_ops,
.name = "lima",
.desc = "lima DRM",
.date = "20190217",
@@ -269,11 +255,11 @@ static struct drm_driver lima_drm_driver = {
.minor = 0,
.patchlevel = 0,
+ .gem_create_object = lima_gem_create_object,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_import_sg_table = lima_gem_prime_import_sg_table,
+ .gem_prime_import_sg_table = drm_gem_shmem_prime_import_sg_table,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
- .gem_prime_get_sg_table = lima_gem_prime_get_sg_table,
- .gem_prime_mmap = lima_gem_prime_mmap,
+ .gem_prime_mmap = drm_gem_prime_mmap,
};
static int lima_pdev_probe(struct platform_device *pdev)
diff --git a/drivers/gpu/drm/lima/lima_gem.c b/drivers/gpu/drm/lima/lima_gem.c
index 4da21353c3a2..d0059d8c97d8 100644
--- a/drivers/gpu/drm/lima/lima_gem.c
+++ b/drivers/gpu/drm/lima/lima_gem.c
@@ -3,7 +3,7 @@
#include <linux/mm.h>
#include <linux/sync_file.h>
-#include <linux/pfn_t.h>
+#include <linux/pagemap.h>
#include <drm/drm_file.h>
#include <drm/drm_syncobj.h>
@@ -13,40 +13,55 @@
#include "lima_drv.h"
#include "lima_gem.h"
-#include "lima_gem_prime.h"
#include "lima_vm.h"
-#include "lima_object.h"
int lima_gem_create_handle(struct drm_device *dev, struct drm_file *file,
u32 size, u32 flags, u32 *handle)
{
int err;
- struct lima_bo *bo;
- struct lima_device *ldev = to_lima_dev(dev);
+ gfp_t mask;
+ struct drm_gem_shmem_object *shmem;
+ struct drm_gem_object *obj;
+ struct sg_table *sgt;
+
+ shmem = drm_gem_shmem_create(dev, size);
+ if (IS_ERR(shmem))
+ return PTR_ERR(shmem);
+
+ obj = &shmem->base;
- bo = lima_bo_create(ldev, size, flags, NULL);
- if (IS_ERR(bo))
- return PTR_ERR(bo);
+ /* Mali Utgard GPU can only support 32bit address space */
+ mask = mapping_gfp_mask(obj->filp->f_mapping);
+ mask &= ~__GFP_HIGHMEM;
+ mask |= __GFP_DMA32;
+ mapping_set_gfp_mask(obj->filp->f_mapping, mask);
- err = drm_gem_handle_create(file, &bo->gem, handle);
+ sgt = drm_gem_shmem_get_pages_sgt(obj);
+ if (IS_ERR(sgt)) {
+ err = PTR_ERR(sgt);
+ goto out;
+ }
+
+ err = drm_gem_handle_create(file, obj, handle);
+out:
/* drop reference from allocate - handle holds it now */
- drm_gem_object_put_unlocked(&bo->gem);
+ drm_gem_object_put_unlocked(obj);
return err;
}
-void lima_gem_free_object(struct drm_gem_object *obj)
+static void lima_gem_free_object(struct drm_gem_object *obj)
{
struct lima_bo *bo = to_lima_bo(obj);
if (!list_empty(&bo->va))
dev_err(obj->dev->dev, "lima gem free bo still has va\n");
- lima_bo_destroy(bo);
+ drm_gem_shmem_free_object(obj);
}
-int lima_gem_object_open(struct drm_gem_object *obj, struct drm_file *file)
+static int lima_gem_object_open(struct drm_gem_object *obj, struct drm_file *file)
{
struct lima_bo *bo = to_lima_bo(obj);
struct lima_drm_priv *priv = to_lima_drm_priv(file);
@@ -55,7 +70,7 @@ int lima_gem_object_open(struct drm_gem_object *obj, struct drm_file *file)
return lima_vm_bo_add(vm, bo, true);
}
-void lima_gem_object_close(struct drm_gem_object *obj, struct drm_file *file)
+static void lima_gem_object_close(struct drm_gem_object *obj, struct drm_file *file)
{
struct lima_bo *bo = to_lima_bo(obj);
struct lima_drm_priv *priv = to_lima_drm_priv(file);
@@ -64,13 +79,41 @@ void lima_gem_object_close(struct drm_gem_object *obj, struct drm_file *file)
lima_vm_bo_del(vm, bo);
}
+static const struct drm_gem_object_funcs lima_gem_funcs = {
+ .free = lima_gem_free_object,
+ .open = lima_gem_object_open,
+ .close = lima_gem_object_close,
+ .print_info = drm_gem_shmem_print_info,
+ .pin = drm_gem_shmem_pin,
+ .unpin = drm_gem_shmem_unpin,
+ .get_sg_table = drm_gem_shmem_get_sg_table,
+ .vmap = drm_gem_shmem_vmap,
+ .vunmap = drm_gem_shmem_vunmap,
+ .mmap = drm_gem_shmem_mmap,
+};
+
+struct drm_gem_object *lima_gem_create_object(struct drm_device *dev, size_t size)
+{
+ struct lima_bo *bo;
+
+ bo = kzalloc(sizeof(*bo), GFP_KERNEL);
+ if (!bo)
+ return NULL;
+
+ mutex_init(&bo->lock);
+ INIT_LIST_HEAD(&bo->va);
+
+ bo->base.base.funcs = &lima_gem_funcs;
+
+ return &bo->base.base;
+}
+
int lima_gem_get_info(struct drm_file *file, u32 handle, u32 *va, u64 *offset)
{
struct drm_gem_object *obj;
struct lima_bo *bo;
struct lima_drm_priv *priv = to_lima_drm_priv(file);
struct lima_vm *vm = priv->vm;
- int err;
obj = drm_gem_object_lookup(file, handle);
if (!obj)
@@ -80,53 +123,9 @@ int lima_gem_get_info(struct drm_file *file, u32 handle, u32 *va, u64 *offset)
*va = lima_vm_get_va(vm, bo);
- err = drm_gem_create_mmap_offset(obj);
- if (!err)
- *offset = drm_vma_node_offset_addr(&obj->vma_node);
+ *offset = drm_vma_node_offset_addr(&obj->vma_node);
drm_gem_object_put_unlocked(obj);
- return err;
-}
-
-static vm_fault_t lima_gem_fault(struct vm_fault *vmf)
-{
- struct vm_area_struct *vma = vmf->vma;
- struct drm_gem_object *obj = vma->vm_private_data;
- struct lima_bo *bo = to_lima_bo(obj);
- pfn_t pfn;
- pgoff_t pgoff;
-
- /* We don't use vmf->pgoff since that has the fake offset: */
- pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
- pfn = __pfn_to_pfn_t(page_to_pfn(bo->pages[pgoff]), PFN_DEV);
-
- return vmf_insert_mixed(vma, vmf->address, pfn);
-}
-
-const struct vm_operations_struct lima_gem_vm_ops = {
- .fault = lima_gem_fault,
- .open = drm_gem_vm_open,
- .close = drm_gem_vm_close,
-};
-
-void lima_set_vma_flags(struct vm_area_struct *vma)
-{
- pgprot_t prot = vm_get_page_prot(vma->vm_flags);
-
- vma->vm_flags |= VM_MIXEDMAP;
- vma->vm_flags &= ~VM_PFNMAP;
- vma->vm_page_prot = pgprot_writecombine(prot);
-}
-
-int lima_gem_mmap(struct file *filp, struct vm_area_struct *vma)
-{
- int ret;
-
- ret = drm_gem_mmap(filp, vma);
- if (ret)
- return ret;
-
- lima_set_vma_flags(vma);
return 0;
}
@@ -136,7 +135,7 @@ static int lima_gem_sync_bo(struct lima_sched_task *task, struct lima_bo *bo,
int err = 0;
if (!write) {
- err = dma_resv_reserve_shared(bo->gem.resv, 1);
+ err = dma_resv_reserve_shared(lima_bo_resv(bo), 1);
if (err)
return err;
}
@@ -145,62 +144,7 @@ static int lima_gem_sync_bo(struct lima_sched_task *task, struct lima_bo *bo,
if (explicit)
return 0;
- return drm_gem_fence_array_add_implicit(&task->deps, &bo->gem, write);
-}
-
-static int lima_gem_lock_bos(struct lima_bo **bos, u32 nr_bos,
- struct ww_acquire_ctx *ctx)
-{
- int i, ret = 0, contended, slow_locked = -1;
-
- ww_acquire_init(ctx, &reservation_ww_class);
-
-retry:
- for (i = 0; i < nr_bos; i++) {
- if (i == slow_locked) {
- slow_locked = -1;
- continue;
- }
-
- ret = ww_mutex_lock_interruptible(&bos[i]->gem.resv->lock, ctx);
- if (ret < 0) {
- contended = i;
- goto err;
- }
- }
-
- ww_acquire_done(ctx);
- return 0;
-
-err:
- for (i--; i >= 0; i--)
- ww_mutex_unlock(&bos[i]->gem.resv->lock);
-
- if (slow_locked >= 0)
- ww_mutex_unlock(&bos[slow_locked]->gem.resv->lock);
-
- if (ret == -EDEADLK) {
- /* we lost out in a seqno race, lock and retry.. */
- ret = ww_mutex_lock_slow_interruptible(
- &bos[contended]->gem.resv->lock, ctx);
- if (!ret) {
- slow_locked = contended;
- goto retry;
- }
- }
- ww_acquire_fini(ctx);
-
- return ret;
-}
-
-static void lima_gem_unlock_bos(struct lima_bo **bos, u32 nr_bos,
- struct ww_acquire_ctx *ctx)
-{
- int i;
-
- for (i = 0; i < nr_bos; i++)
- ww_mutex_unlock(&bos[i]->gem.resv->lock);
- ww_acquire_fini(ctx);
+ return drm_gem_fence_array_add_implicit(&task->deps, &bo->base.base, write);
}
static int lima_gem_add_deps(struct drm_file *file, struct lima_submit *submit)
@@ -268,7 +212,8 @@ int lima_gem_submit(struct drm_file *file, struct lima_submit *submit)
bos[i] = bo;
}
- err = lima_gem_lock_bos(bos, submit->nr_bos, &ctx);
+ err = drm_gem_lock_reservations((struct drm_gem_object **)bos,
+ submit->nr_bos, &ctx);
if (err)
goto err_out0;
@@ -296,15 +241,16 @@ int lima_gem_submit(struct drm_file *file, struct lima_submit *submit)
for (i = 0; i < submit->nr_bos; i++) {
if (submit->bos[i].flags & LIMA_SUBMIT_BO_WRITE)
- dma_resv_add_excl_fence(bos[i]->gem.resv, fence);
+ dma_resv_add_excl_fence(lima_bo_resv(bos[i]), fence);
else
- dma_resv_add_shared_fence(bos[i]->gem.resv, fence);
+ dma_resv_add_shared_fence(lima_bo_resv(bos[i]), fence);
}
- lima_gem_unlock_bos(bos, submit->nr_bos, &ctx);
+ drm_gem_unlock_reservations((struct drm_gem_object **)bos,
+ submit->nr_bos, &ctx);
for (i = 0; i < submit->nr_bos; i++)
- drm_gem_object_put_unlocked(&bos[i]->gem);
+ drm_gem_object_put_unlocked(&bos[i]->base.base);
if (out_sync) {
drm_syncobj_replace_fence(out_sync, fence);
@@ -318,13 +264,14 @@ int lima_gem_submit(struct drm_file *file, struct lima_submit *submit)
err_out2:
lima_sched_task_fini(submit->task);
err_out1:
- lima_gem_unlock_bos(bos, submit->nr_bos, &ctx);
+ drm_gem_unlock_reservations((struct drm_gem_object **)bos,
+ submit->nr_bos, &ctx);
err_out0:
for (i = 0; i < submit->nr_bos; i++) {
if (!bos[i])
break;
lima_vm_bo_del(vm, bos[i]);
- drm_gem_object_put_unlocked(&bos[i]->gem);
+ drm_gem_object_put_unlocked(&bos[i]->base.base);
}
if (out_sync)
drm_syncobj_put(out_sync);
diff --git a/drivers/gpu/drm/lima/lima_gem.h b/drivers/gpu/drm/lima/lima_gem.h
index 556111a01135..1800feb3e47f 100644
--- a/drivers/gpu/drm/lima/lima_gem.h
+++ b/drivers/gpu/drm/lima/lima_gem.h
@@ -4,19 +4,37 @@
#ifndef __LIMA_GEM_H__
#define __LIMA_GEM_H__
-struct lima_bo;
+#include <drm/drm_gem_shmem_helper.h>
+
struct lima_submit;
-extern const struct vm_operations_struct lima_gem_vm_ops;
+struct lima_bo {
+ struct drm_gem_shmem_object base;
+
+ struct mutex lock;
+ struct list_head va;
+};
+
+static inline struct lima_bo *
+to_lima_bo(struct drm_gem_object *obj)
+{
+ return container_of(to_drm_gem_shmem_obj(obj), struct lima_bo, base);
+}
+
+static inline size_t lima_bo_size(struct lima_bo *bo)
+{
+ return bo->base.base.size;
+}
+
+static inline struct dma_resv *lima_bo_resv(struct lima_bo *bo)
+{
+ return bo->base.base.resv;
+}
-struct lima_bo *lima_gem_create_bo(struct drm_device *dev, u32 size, u32 flags);
+struct drm_gem_object *lima_gem_create_object(struct drm_device *dev, size_t size);
int lima_gem_create_handle(struct drm_device *dev, struct drm_file *file,
u32 size, u32 flags, u32 *handle);
-void lima_gem_free_object(struct drm_gem_object *obj);
-int lima_gem_object_open(struct drm_gem_object *obj, struct drm_file *file);
-void lima_gem_object_close(struct drm_gem_object *obj, struct drm_file *file);
int lima_gem_get_info(struct drm_file *file, u32 handle, u32 *va, u64 *offset);
-int lima_gem_mmap(struct file *filp, struct vm_area_struct *vma);
int lima_gem_submit(struct drm_file *file, struct lima_submit *submit);
int lima_gem_wait(struct drm_file *file, u32 handle, u32 op, s64 timeout_ns);
diff --git a/drivers/gpu/drm/lima/lima_gem_prime.c b/drivers/gpu/drm/lima/lima_gem_prime.c
deleted file mode 100644
index e3eb251e0a12..000000000000
--- a/drivers/gpu/drm/lima/lima_gem_prime.c
+++ /dev/null
@@ -1,46 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0 OR MIT
-/* Copyright 2018-2019 Qiang Yu <yuq825@gmail.com> */
-
-#include <linux/dma-buf.h>
-#include <drm/drm_prime.h>
-#include <drm/drm_drv.h>
-#include <drm/drm_file.h>
-
-#include "lima_device.h"
-#include "lima_object.h"
-#include "lima_gem.h"
-#include "lima_gem_prime.h"
-
-struct drm_gem_object *lima_gem_prime_import_sg_table(
- struct drm_device *dev, struct dma_buf_attachment *attach,
- struct sg_table *sgt)
-{
- struct lima_device *ldev = to_lima_dev(dev);
- struct lima_bo *bo;
-
- bo = lima_bo_create(ldev, attach->dmabuf->size, 0, sgt);
- if (IS_ERR(bo))
- return ERR_CAST(bo);
-
- return &bo->gem;
-}
-
-struct sg_table *lima_gem_prime_get_sg_table(struct drm_gem_object *obj)
-{
- struct lima_bo *bo = to_lima_bo(obj);
- int npages = obj->size >> PAGE_SHIFT;
-
- return drm_prime_pages_to_sg(bo->pages, npages);
-}
-
-int lima_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
-{
- int ret;
-
- ret = drm_gem_mmap_obj(obj, obj->size, vma);
- if (ret)
- return ret;
-
- lima_set_vma_flags(vma);
- return 0;
-}
diff --git a/drivers/gpu/drm/lima/lima_gem_prime.h b/drivers/gpu/drm/lima/lima_gem_prime.h
deleted file mode 100644
index 34b4d35c21e3..000000000000
--- a/drivers/gpu/drm/lima/lima_gem_prime.h
+++ /dev/null
@@ -1,13 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR MIT */
-/* Copyright 2018-2019 Qiang Yu <yuq825@gmail.com> */
-
-#ifndef __LIMA_GEM_PRIME_H__
-#define __LIMA_GEM_PRIME_H__
-
-struct drm_gem_object *lima_gem_prime_import_sg_table(
- struct drm_device *dev, struct dma_buf_attachment *attach,
- struct sg_table *sgt);
-struct sg_table *lima_gem_prime_get_sg_table(struct drm_gem_object *obj);
-int lima_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
-
-#endif
diff --git a/drivers/gpu/drm/lima/lima_mmu.c b/drivers/gpu/drm/lima/lima_mmu.c
index 8e1651d6a61f..97ec09dee572 100644
--- a/drivers/gpu/drm/lima/lima_mmu.c
+++ b/drivers/gpu/drm/lima/lima_mmu.c
@@ -8,7 +8,6 @@
#include "lima_device.h"
#include "lima_mmu.h"
#include "lima_vm.h"
-#include "lima_object.h"
#include "lima_regs.h"
#define mmu_write(reg, data) writel(data, ip->iomem + reg)
diff --git a/drivers/gpu/drm/lima/lima_object.c b/drivers/gpu/drm/lima/lima_object.c
deleted file mode 100644
index 87123b1d083c..000000000000
--- a/drivers/gpu/drm/lima/lima_object.c
+++ /dev/null
@@ -1,119 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0 OR MIT
-/* Copyright 2018-2019 Qiang Yu <yuq825@gmail.com> */
-
-#include <drm/drm_prime.h>
-#include <linux/pagemap.h>
-#include <linux/dma-mapping.h>
-
-#include "lima_object.h"
-
-void lima_bo_destroy(struct lima_bo *bo)
-{
- if (bo->sgt) {
- kfree(bo->pages);
- drm_prime_gem_destroy(&bo->gem, bo->sgt);
- } else {
- if (bo->pages_dma_addr) {
- int i, npages = bo->gem.size >> PAGE_SHIFT;
-
- for (i = 0; i < npages; i++) {
- if (bo->pages_dma_addr[i])
- dma_unmap_page(bo->gem.dev->dev,
- bo->pages_dma_addr[i],
- PAGE_SIZE, DMA_BIDIRECTIONAL);
- }
- }
-
- if (bo->pages)
- drm_gem_put_pages(&bo->gem, bo->pages, true, true);
- }
-
- kfree(bo->pages_dma_addr);
- drm_gem_object_release(&bo->gem);
- kfree(bo);
-}
-
-static struct lima_bo *lima_bo_create_struct(struct lima_device *dev, u32 size, u32 flags)
-{
- struct lima_bo *bo;
- int err;
-
- size = PAGE_ALIGN(size);
-
- bo = kzalloc(sizeof(*bo), GFP_KERNEL);
- if (!bo)
- return ERR_PTR(-ENOMEM);
-
- mutex_init(&bo->lock);
- INIT_LIST_HEAD(&bo->va);
-
- err = drm_gem_object_init(dev->ddev, &bo->gem, size);
- if (err) {
- kfree(bo);
- return ERR_PTR(err);
- }
-
- return bo;
-}
-
-struct lima_bo *lima_bo_create(struct lima_device *dev, u32 size,
- u32 flags, struct sg_table *sgt)
-{
- int i, err;
- size_t npages;
- struct lima_bo *bo, *ret;
-
- bo = lima_bo_create_struct(dev, size, flags);
- if (IS_ERR(bo))
- return bo;
-
- npages = bo->gem.size >> PAGE_SHIFT;
-
- bo->pages_dma_addr = kcalloc(npages, sizeof(dma_addr_t), GFP_KERNEL);
- if (!bo->pages_dma_addr) {
- ret = ERR_PTR(-ENOMEM);
- goto err_out;
- }
-
- if (sgt) {
- bo->sgt = sgt;
-
- bo->pages = kcalloc(npages, sizeof(*bo->pages), GFP_KERNEL);
- if (!bo->pages) {
- ret = ERR_PTR(-ENOMEM);
- goto err_out;
- }
-
- err = drm_prime_sg_to_page_addr_arrays(
- sgt, bo->pages, bo->pages_dma_addr, npages);
- if (err) {
- ret = ERR_PTR(err);
- goto err_out;
- }
- } else {
- mapping_set_gfp_mask(bo->gem.filp->f_mapping, GFP_DMA32);
- bo->pages = drm_gem_get_pages(&bo->gem);
- if (IS_ERR(bo->pages)) {
- ret = ERR_CAST(bo->pages);
- bo->pages = NULL;
- goto err_out;
- }
-
- for (i = 0; i < npages; i++) {
- dma_addr_t addr = dma_map_page(dev->dev, bo->pages[i], 0,
- PAGE_SIZE, DMA_BIDIRECTIONAL);
- if (dma_mapping_error(dev->dev, addr)) {
- ret = ERR_PTR(-EFAULT);
- goto err_out;
- }
- bo->pages_dma_addr[i] = addr;
- }
-
- }
-
- return bo;
-
-err_out:
- lima_bo_destroy(bo);
- return ret;
-}
diff --git a/drivers/gpu/drm/lima/lima_object.h b/drivers/gpu/drm/lima/lima_object.h
deleted file mode 100644
index 31ca2d8dc0a1..000000000000
--- a/drivers/gpu/drm/lima/lima_object.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR MIT */
-/* Copyright 2018-2019 Qiang Yu <yuq825@gmail.com> */
-
-#ifndef __LIMA_OBJECT_H__
-#define __LIMA_OBJECT_H__
-
-#include <drm/drm_gem.h>
-
-#include "lima_device.h"
-
-struct lima_bo {
- struct drm_gem_object gem;
-
- struct page **pages;
- dma_addr_t *pages_dma_addr;
- struct sg_table *sgt;
- void *vaddr;
-
- struct mutex lock;
- struct list_head va;
-};
-
-static inline struct lima_bo *
-to_lima_bo(struct drm_gem_object *obj)
-{
- return container_of(obj, struct lima_bo, gem);
-}
-
-struct lima_bo *lima_bo_create(struct lima_device *dev, u32 size,
- u32 flags, struct sg_table *sgt);
-void lima_bo_destroy(struct lima_bo *bo);
-void *lima_bo_vmap(struct lima_bo *bo);
-void lima_bo_vunmap(struct lima_bo *bo);
-
-#endif
diff --git a/drivers/gpu/drm/lima/lima_sched.c b/drivers/gpu/drm/lima/lima_sched.c
index 4127cacac454..f522c5f99729 100644
--- a/drivers/gpu/drm/lima/lima_sched.c
+++ b/drivers/gpu/drm/lima/lima_sched.c
@@ -10,7 +10,7 @@
#include "lima_vm.h"
#include "lima_mmu.h"
#include "lima_l2_cache.h"
-#include "lima_object.h"
+#include "lima_gem.h"
struct lima_fence {
struct dma_fence base;
@@ -117,7 +117,7 @@ int lima_sched_task_init(struct lima_sched_task *task,
return -ENOMEM;
for (i = 0; i < num_bos; i++)
- drm_gem_object_get(&bos[i]->gem);
+ drm_gem_object_get(&bos[i]->base.base);
err = drm_sched_job_init(&task->base, &context->base, vm);
if (err) {
@@ -148,7 +148,7 @@ void lima_sched_task_fini(struct lima_sched_task *task)
if (task->bos) {
for (i = 0; i < task->num_bos; i++)
- drm_gem_object_put_unlocked(&task->bos[i]->gem);
+ drm_gem_object_put_unlocked(&task->bos[i]->base.base);
kfree(task->bos);
}
diff --git a/drivers/gpu/drm/lima/lima_vm.c b/drivers/gpu/drm/lima/lima_vm.c
index 19e88ca16527..840e2350d872 100644
--- a/drivers/gpu/drm/lima/lima_vm.c
+++ b/drivers/gpu/drm/lima/lima_vm.c
@@ -6,7 +6,7 @@
#include "lima_device.h"
#include "lima_vm.h"
-#include "lima_object.h"
+#include "lima_gem.h"
#include "lima_regs.h"
struct lima_bo_va {
@@ -32,7 +32,7 @@ struct lima_bo_va {
#define LIMA_BTE(va) ((va & LIMA_VM_BT_MASK) >> LIMA_VM_BT_SHIFT)
-static void lima_vm_unmap_page_table(struct lima_vm *vm, u32 start, u32 end)
+static void lima_vm_unmap_range(struct lima_vm *vm, u32 start, u32 end)
{
u32 addr;
@@ -44,41 +44,32 @@ static void lima_vm_unmap_page_table(struct lima_vm *vm, u32 start, u32 end)
}
}
-static int lima_vm_map_page_table(struct lima_vm *vm, dma_addr_t *dma,
- u32 start, u32 end)
+static int lima_vm_map_page(struct lima_vm *vm, dma_addr_t pa, u32 va)
{
- u64 addr;
- int i = 0;
-
- for (addr = start; addr <= end; addr += LIMA_PAGE_SIZE) {
- u32 pbe = LIMA_PBE(addr);
- u32 bte = LIMA_BTE(addr);
-
- if (!vm->bts[pbe].cpu) {
- dma_addr_t pts;
- u32 *pd;
- int j;
-
- vm->bts[pbe].cpu = dma_alloc_wc(
- vm->dev->dev, LIMA_PAGE_SIZE << LIMA_VM_NUM_PT_PER_BT_SHIFT,
- &vm->bts[pbe].dma, GFP_KERNEL | __GFP_ZERO);
- if (!vm->bts[pbe].cpu) {
- if (addr != start)
- lima_vm_unmap_page_table(vm, start, addr - 1);
- return -ENOMEM;
- }
-
- pts = vm->bts[pbe].dma;
- pd = vm->pd.cpu + (pbe << LIMA_VM_NUM_PT_PER_BT_SHIFT);
- for (j = 0; j < LIMA_VM_NUM_PT_PER_BT; j++) {
- pd[j] = pts | LIMA_VM_FLAG_PRESENT;
- pts += LIMA_PAGE_SIZE;
- }
+ u32 pbe = LIMA_PBE(va);
+ u32 bte = LIMA_BTE(va);
+
+ if (!vm->bts[pbe].cpu) {
+ dma_addr_t pts;
+ u32 *pd;
+ int j;
+
+ vm->bts[pbe].cpu = dma_alloc_wc(
+ vm->dev->dev, LIMA_PAGE_SIZE << LIMA_VM_NUM_PT_PER_BT_SHIFT,
+ &vm->bts[pbe].dma, GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
+ if (!vm->bts[pbe].cpu)
+ return -ENOMEM;
+
+ pts = vm->bts[pbe].dma;
+ pd = vm->pd.cpu + (pbe << LIMA_VM_NUM_PT_PER_BT_SHIFT);
+ for (j = 0; j < LIMA_VM_NUM_PT_PER_BT; j++) {
+ pd[j] = pts | LIMA_VM_FLAG_PRESENT;
+ pts += LIMA_PAGE_SIZE;
}
-
- vm->bts[pbe].cpu[bte] = dma[i++] | LIMA_VM_FLAGS_CACHE;
}
+ vm->bts[pbe].cpu[bte] = pa | LIMA_VM_FLAGS_CACHE;
+
return 0;
}
@@ -100,7 +91,8 @@ lima_vm_bo_find(struct lima_vm *vm, struct lima_bo *bo)
int lima_vm_bo_add(struct lima_vm *vm, struct lima_bo *bo, bool create)
{
struct lima_bo_va *bo_va;
- int err;
+ struct sg_dma_page_iter sg_iter;
+ int offset = 0, err;
mutex_lock(&bo->lock);
@@ -128,14 +120,18 @@ int lima_vm_bo_add(struct lima_vm *vm, struct lima_bo *bo, bool create)
mutex_lock(&vm->lock);
- err = drm_mm_insert_node(&vm->mm, &bo_va->node, bo->gem.size);
+ err = drm_mm_insert_node(&vm->mm, &bo_va->node, lima_bo_size(bo));
if (err)
goto err_out1;
- err = lima_vm_map_page_table(vm, bo->pages_dma_addr, bo_va->node.start,
- bo_va->node.start + bo_va->node.size - 1);
- if (err)
- goto err_out2;
+ for_each_sg_dma_page(bo->base.sgt->sgl, &sg_iter, bo->base.sgt->nents, 0) {
+ err = lima_vm_map_page(vm, sg_page_iter_dma_address(&sg_iter),
+ bo_va->node.start + offset);
+ if (err)
+ goto err_out2;
+
+ offset += PAGE_SIZE;
+ }
mutex_unlock(&vm->lock);
@@ -145,6 +141,8 @@ int lima_vm_bo_add(struct lima_vm *vm, struct lima_bo *bo, bool create)
return 0;
err_out2:
+ if (offset)
+ lima_vm_unmap_range(vm, bo_va->node.start, bo_va->node.start + offset - 1);
drm_mm_remove_node(&bo_va->node);
err_out1:
mutex_unlock(&vm->lock);
@@ -168,8 +166,8 @@ void lima_vm_bo_del(struct lima_vm *vm, struct lima_bo *bo)
mutex_lock(&vm->lock);
- lima_vm_unmap_page_table(vm, bo_va->node.start,
- bo_va->node.start + bo_va->node.size - 1);
+ lima_vm_unmap_range(vm, bo_va->node.start,
+ bo_va->node.start + bo_va->node.size - 1);
drm_mm_remove_node(&bo_va->node);
@@ -210,14 +208,13 @@ struct lima_vm *lima_vm_create(struct lima_device *dev)
kref_init(&vm->refcount);
vm->pd.cpu = dma_alloc_wc(dev->dev, LIMA_PAGE_SIZE, &vm->pd.dma,
- GFP_KERNEL | __GFP_ZERO);
+ GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
if (!vm->pd.cpu)
goto err_out0;
if (dev->dlbu_cpu) {
- int err = lima_vm_map_page_table(
- vm, &dev->dlbu_dma, LIMA_VA_RESERVE_DLBU,
- LIMA_VA_RESERVE_DLBU + LIMA_PAGE_SIZE - 1);
+ int err = lima_vm_map_page(
+ vm, dev->dlbu_dma, LIMA_VA_RESERVE_DLBU);
if (err)
goto err_out1;
}
diff --git a/drivers/gpu/drm/mcde/mcde_drv.c b/drivers/gpu/drm/mcde/mcde_drv.c
index 9a09eba53182..5649887d2b90 100644
--- a/drivers/gpu/drm/mcde/mcde_drv.c
+++ b/drivers/gpu/drm/mcde/mcde_drv.c
@@ -484,7 +484,8 @@ static int mcde_probe(struct platform_device *pdev)
}
if (!match) {
dev_err(dev, "no matching components\n");
- return -ENODEV;
+ ret = -ENODEV;
+ goto clk_disable;
}
if (IS_ERR(match)) {
dev_err(dev, "could not create component match\n");
diff --git a/drivers/gpu/drm/mcde/mcde_dsi.c b/drivers/gpu/drm/mcde/mcde_dsi.c
index f9c9e32b299c..d6214d3c8b33 100644
--- a/drivers/gpu/drm/mcde/mcde_dsi.c
+++ b/drivers/gpu/drm/mcde/mcde_dsi.c
@@ -946,8 +946,8 @@ static int mcde_dsi_bind(struct device *dev, struct device *master,
}
}
if (panel) {
- bridge = drm_panel_bridge_add(panel,
- DRM_MODE_CONNECTOR_DSI);
+ bridge = drm_panel_bridge_add_typed(panel,
+ DRM_MODE_CONNECTOR_DSI);
if (IS_ERR(bridge)) {
dev_err(dev, "error adding panel bridge\n");
return PTR_ERR(bridge);
diff --git a/drivers/gpu/drm/mediatek/Makefile b/drivers/gpu/drm/mediatek/Makefile
index 82ae49c64221..8067a4be8311 100644
--- a/drivers/gpu/drm/mediatek/Makefile
+++ b/drivers/gpu/drm/mediatek/Makefile
@@ -12,6 +12,8 @@ mediatek-drm-y := mtk_disp_color.o \
mtk_drm_plane.o \
mtk_dsi.o \
mtk_mipi_tx.o \
+ mtk_mt8173_mipi_tx.o \
+ mtk_mt8183_mipi_tx.o \
mtk_dpi.o
obj-$(CONFIG_DRM_MEDIATEK) += mediatek-drm.o
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
index 21851756c579..4a55bb6e2213 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
@@ -3,6 +3,8 @@
* Copyright (c) 2015 MediaTek Inc.
*/
+#include <drm/drm_fourcc.h>
+
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/module.h>
@@ -19,6 +21,8 @@
#define DISP_REG_OVL_EN 0x000c
#define DISP_REG_OVL_RST 0x0014
#define DISP_REG_OVL_ROI_SIZE 0x0020
+#define DISP_REG_OVL_DATAPATH_CON 0x0024
+#define OVL_BGCLR_SEL_IN BIT(2)
#define DISP_REG_OVL_ROI_BGCLR 0x0028
#define DISP_REG_OVL_SRC_CON 0x002c
#define DISP_REG_OVL_CON(n) (0x0030 + 0x20 * (n))
@@ -31,7 +35,9 @@
#define DISP_REG_OVL_ADDR_MT8173 0x0f40
#define DISP_REG_OVL_ADDR(ovl, n) ((ovl)->data->addr + 0x20 * (n))
-#define OVL_RDMA_MEM_GMC 0x40402020
+#define GMC_THRESHOLD_BITS 16
+#define GMC_THRESHOLD_HIGH ((1 << GMC_THRESHOLD_BITS) / 4)
+#define GMC_THRESHOLD_LOW ((1 << GMC_THRESHOLD_BITS) / 8)
#define OVL_CON_BYTE_SWAP BIT(24)
#define OVL_CON_MTX_YUV_TO_RGB (6 << 16)
@@ -46,9 +52,13 @@
OVL_CON_CLRFMT_RGB : 0)
#define OVL_CON_AEN BIT(8)
#define OVL_CON_ALPHA 0xff
+#define OVL_CON_VIRT_FLIP BIT(9)
+#define OVL_CON_HORZ_FLIP BIT(10)
struct mtk_disp_ovl_data {
unsigned int addr;
+ unsigned int gmc_bits;
+ unsigned int layer_nr;
bool fmt_rgb565_is_0;
};
@@ -126,15 +136,65 @@ static void mtk_ovl_config(struct mtk_ddp_comp *comp, unsigned int w,
static unsigned int mtk_ovl_layer_nr(struct mtk_ddp_comp *comp)
{
- return 4;
+ struct mtk_disp_ovl *ovl = comp_to_ovl(comp);
+
+ return ovl->data->layer_nr;
+}
+
+static unsigned int mtk_ovl_supported_rotations(struct mtk_ddp_comp *comp)
+{
+ return DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 |
+ DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y;
+}
+
+static int mtk_ovl_layer_check(struct mtk_ddp_comp *comp, unsigned int idx,
+ struct mtk_plane_state *mtk_state)
+{
+ struct drm_plane_state *state = &mtk_state->base;
+ unsigned int rotation = 0;
+
+ rotation = drm_rotation_simplify(state->rotation,
+ DRM_MODE_ROTATE_0 |
+ DRM_MODE_REFLECT_X |
+ DRM_MODE_REFLECT_Y);
+ rotation &= ~DRM_MODE_ROTATE_0;
+
+ /* We can only do reflection, not rotation */
+ if ((rotation & DRM_MODE_ROTATE_MASK) != 0)
+ return -EINVAL;
+
+ /*
+ * TODO: Rotating/reflecting YUV buffers is not supported at this time.
+ * Only RGB[AX] variants are supported.
+ */
+ if (state->fb->format->is_yuv && rotation != 0)
+ return -EINVAL;
+
+ state->rotation = rotation;
+
+ return 0;
}
static void mtk_ovl_layer_on(struct mtk_ddp_comp *comp, unsigned int idx)
{
unsigned int reg;
+ unsigned int gmc_thrshd_l;
+ unsigned int gmc_thrshd_h;
+ unsigned int gmc_value;
+ struct mtk_disp_ovl *ovl = comp_to_ovl(comp);
writel(0x1, comp->regs + DISP_REG_OVL_RDMA_CTRL(idx));
- writel(OVL_RDMA_MEM_GMC, comp->regs + DISP_REG_OVL_RDMA_GMC(idx));
+
+ gmc_thrshd_l = GMC_THRESHOLD_LOW >>
+ (GMC_THRESHOLD_BITS - ovl->data->gmc_bits);
+ gmc_thrshd_h = GMC_THRESHOLD_HIGH >>
+ (GMC_THRESHOLD_BITS - ovl->data->gmc_bits);
+ if (ovl->data->gmc_bits == 10)
+ gmc_value = gmc_thrshd_h | gmc_thrshd_h << 16;
+ else
+ gmc_value = gmc_thrshd_l | gmc_thrshd_l << 8 |
+ gmc_thrshd_h << 16 | gmc_thrshd_h << 24;
+ writel(gmc_value, comp->regs + DISP_REG_OVL_RDMA_GMC(idx));
reg = readl(comp->regs + DISP_REG_OVL_SRC_CON);
reg = reg | BIT(idx);
@@ -207,6 +267,16 @@ static void mtk_ovl_layer_config(struct mtk_ddp_comp *comp, unsigned int idx,
if (idx != 0)
con |= OVL_CON_AEN | OVL_CON_ALPHA;
+ if (pending->rotation & DRM_MODE_REFLECT_Y) {
+ con |= OVL_CON_VIRT_FLIP;
+ addr += (pending->height - 1) * pending->pitch;
+ }
+
+ if (pending->rotation & DRM_MODE_REFLECT_X) {
+ con |= OVL_CON_HORZ_FLIP;
+ addr += pending->pitch - 1;
+ }
+
writel_relaxed(con, comp->regs + DISP_REG_OVL_CON(idx));
writel_relaxed(pitch, comp->regs + DISP_REG_OVL_PITCH(idx));
writel_relaxed(src_size, comp->regs + DISP_REG_OVL_SRC_SIZE(idx));
@@ -217,16 +287,38 @@ static void mtk_ovl_layer_config(struct mtk_ddp_comp *comp, unsigned int idx,
mtk_ovl_layer_on(comp, idx);
}
+static void mtk_ovl_bgclr_in_on(struct mtk_ddp_comp *comp)
+{
+ unsigned int reg;
+
+ reg = readl(comp->regs + DISP_REG_OVL_DATAPATH_CON);
+ reg = reg | OVL_BGCLR_SEL_IN;
+ writel(reg, comp->regs + DISP_REG_OVL_DATAPATH_CON);
+}
+
+static void mtk_ovl_bgclr_in_off(struct mtk_ddp_comp *comp)
+{
+ unsigned int reg;
+
+ reg = readl(comp->regs + DISP_REG_OVL_DATAPATH_CON);
+ reg = reg & ~OVL_BGCLR_SEL_IN;
+ writel(reg, comp->regs + DISP_REG_OVL_DATAPATH_CON);
+}
+
static const struct mtk_ddp_comp_funcs mtk_disp_ovl_funcs = {
.config = mtk_ovl_config,
.start = mtk_ovl_start,
.stop = mtk_ovl_stop,
.enable_vblank = mtk_ovl_enable_vblank,
.disable_vblank = mtk_ovl_disable_vblank,
+ .supported_rotations = mtk_ovl_supported_rotations,
.layer_nr = mtk_ovl_layer_nr,
.layer_on = mtk_ovl_layer_on,
.layer_off = mtk_ovl_layer_off,
+ .layer_check = mtk_ovl_layer_check,
.layer_config = mtk_ovl_layer_config,
+ .bgclr_in_on = mtk_ovl_bgclr_in_on,
+ .bgclr_in_off = mtk_ovl_bgclr_in_off,
};
static int mtk_disp_ovl_bind(struct device *dev, struct device *master,
@@ -276,7 +368,12 @@ static int mtk_disp_ovl_probe(struct platform_device *pdev)
if (irq < 0)
return irq;
- comp_id = mtk_ddp_comp_get_id(dev->of_node, MTK_DISP_OVL);
+ priv->data = of_device_get_match_data(dev);
+
+ comp_id = mtk_ddp_comp_get_id(dev->of_node,
+ priv->data->layer_nr == 4 ?
+ MTK_DISP_OVL :
+ MTK_DISP_OVL_2L);
if (comp_id < 0) {
dev_err(dev, "Failed to identify by alias: %d\n", comp_id);
return comp_id;
@@ -289,8 +386,6 @@ static int mtk_disp_ovl_probe(struct platform_device *pdev)
return ret;
}
- priv->data = of_device_get_match_data(dev);
-
platform_set_drvdata(pdev, priv);
ret = devm_request_irq(dev, irq, mtk_disp_ovl_irq_handler,
@@ -316,11 +411,15 @@ static int mtk_disp_ovl_remove(struct platform_device *pdev)
static const struct mtk_disp_ovl_data mt2701_ovl_driver_data = {
.addr = DISP_REG_OVL_ADDR_MT2701,
+ .gmc_bits = 8,
+ .layer_nr = 4,
.fmt_rgb565_is_0 = false,
};
static const struct mtk_disp_ovl_data mt8173_ovl_driver_data = {
.addr = DISP_REG_OVL_ADDR_MT8173,
+ .gmc_bits = 8,
+ .layer_nr = 4,
.fmt_rgb565_is_0 = true,
};
diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c
index be6d95c5ff25..01fa8b8d763d 100644
--- a/drivers/gpu/drm/mediatek/mtk_dpi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dpi.c
@@ -17,6 +17,7 @@
#include <video/videomode.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
#include <drm/drm_of.h>
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
index 34a731755791..f80a8ba75977 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
@@ -207,6 +207,28 @@ static void mtk_crtc_ddp_clk_disable(struct mtk_drm_crtc *mtk_crtc)
clk_disable_unprepare(mtk_crtc->ddp_comp[i]->clk);
}
+static
+struct mtk_ddp_comp *mtk_drm_ddp_comp_for_plane(struct drm_crtc *crtc,
+ struct drm_plane *plane,
+ unsigned int *local_layer)
+{
+ struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
+ struct mtk_ddp_comp *comp;
+ int i, count = 0;
+
+ for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
+ comp = mtk_crtc->ddp_comp[i];
+ if (plane->index < (count + mtk_ddp_comp_layer_nr(comp))) {
+ *local_layer = plane->index - count;
+ return comp;
+ }
+ count += mtk_ddp_comp_layer_nr(comp);
+ }
+
+ WARN(1, "Failed to find component for plane %d\n", plane->index);
+ return NULL;
+}
+
static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc)
{
struct drm_crtc *crtc = &mtk_crtc->base;
@@ -272,6 +294,9 @@ static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc)
for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[i];
+ if (i == 1)
+ mtk_ddp_comp_bgclr_in_on(comp);
+
mtk_ddp_comp_config(comp, width, height, vrefresh, bpc);
mtk_ddp_comp_start(comp);
}
@@ -280,10 +305,12 @@ static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc)
for (i = 0; i < mtk_crtc->layer_nr; i++) {
struct drm_plane *plane = &mtk_crtc->planes[i];
struct mtk_plane_state *plane_state;
+ struct mtk_ddp_comp *comp;
+ unsigned int local_layer;
plane_state = to_mtk_plane_state(plane->state);
- mtk_ddp_comp_layer_config(mtk_crtc->ddp_comp[0], i,
- plane_state);
+ comp = mtk_drm_ddp_comp_for_plane(crtc, plane, &local_layer);
+ mtk_ddp_comp_layer_config(comp, local_layer, plane_state);
}
return 0;
@@ -301,8 +328,12 @@ static void mtk_crtc_ddp_hw_fini(struct mtk_drm_crtc *mtk_crtc)
int i;
DRM_DEBUG_DRIVER("%s\n", __func__);
- for (i = 0; i < mtk_crtc->ddp_comp_nr; i++)
+ for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
mtk_ddp_comp_stop(mtk_crtc->ddp_comp[i]);
+ if (i == 1)
+ mtk_ddp_comp_bgclr_in_off(mtk_crtc->ddp_comp[i]);
+ }
+
for (i = 0; i < mtk_crtc->ddp_comp_nr; i++)
mtk_disp_mutex_remove_comp(mtk_crtc->mutex,
mtk_crtc->ddp_comp[i]->id);
@@ -327,6 +358,7 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc)
struct mtk_crtc_state *state = to_mtk_crtc_state(mtk_crtc->base.state);
struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
unsigned int i;
+ unsigned int local_layer;
/*
* TODO: instead of updating the registers here, we should prepare
@@ -348,15 +380,30 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc)
plane_state = to_mtk_plane_state(plane->state);
- if (plane_state->pending.config) {
- mtk_ddp_comp_layer_config(comp, i, plane_state);
- plane_state->pending.config = false;
- }
+ if (!plane_state->pending.config)
+ continue;
+
+ comp = mtk_drm_ddp_comp_for_plane(crtc, plane,
+ &local_layer);
+
+ mtk_ddp_comp_layer_config(comp, local_layer,
+ plane_state);
+ plane_state->pending.config = false;
}
mtk_crtc->pending_planes = false;
}
}
+int mtk_drm_crtc_plane_check(struct drm_crtc *crtc, struct drm_plane *plane,
+ struct mtk_plane_state *state)
+{
+ unsigned int local_layer;
+ struct mtk_ddp_comp *comp;
+
+ comp = mtk_drm_ddp_comp_for_plane(crtc, plane, &local_layer);
+ return mtk_ddp_comp_layer_check(comp, local_layer, state);
+}
+
static void mtk_drm_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
{
@@ -518,14 +565,65 @@ void mtk_crtc_ddp_irq(struct drm_crtc *crtc, struct mtk_ddp_comp *comp)
mtk_drm_finish_page_flip(mtk_crtc);
}
+static int mtk_drm_crtc_num_comp_planes(struct mtk_drm_crtc *mtk_crtc,
+ int comp_idx)
+{
+ struct mtk_ddp_comp *comp;
+
+ if (comp_idx > 1)
+ return 0;
+
+ comp = mtk_crtc->ddp_comp[comp_idx];
+ if (!comp->funcs)
+ return 0;
+
+ if (comp_idx == 1 && !comp->funcs->bgclr_in_on)
+ return 0;
+
+ return mtk_ddp_comp_layer_nr(comp);
+}
+
+static inline
+enum drm_plane_type mtk_drm_crtc_plane_type(unsigned int plane_idx)
+{
+ if (plane_idx == 0)
+ return DRM_PLANE_TYPE_PRIMARY;
+ else if (plane_idx == 1)
+ return DRM_PLANE_TYPE_CURSOR;
+ else
+ return DRM_PLANE_TYPE_OVERLAY;
+
+}
+
+static int mtk_drm_crtc_init_comp_planes(struct drm_device *drm_dev,
+ struct mtk_drm_crtc *mtk_crtc,
+ int comp_idx, int pipe)
+{
+ int num_planes = mtk_drm_crtc_num_comp_planes(mtk_crtc, comp_idx);
+ struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[comp_idx];
+ int i, ret;
+
+ for (i = 0; i < num_planes; i++) {
+ ret = mtk_plane_init(drm_dev,
+ &mtk_crtc->planes[mtk_crtc->layer_nr],
+ BIT(pipe),
+ mtk_drm_crtc_plane_type(mtk_crtc->layer_nr),
+ mtk_ddp_comp_supported_rotations(comp));
+ if (ret)
+ return ret;
+
+ mtk_crtc->layer_nr++;
+ }
+ return 0;
+}
+
int mtk_drm_crtc_create(struct drm_device *drm_dev,
const enum mtk_ddp_comp_id *path, unsigned int path_len)
{
struct mtk_drm_private *priv = drm_dev->dev_private;
struct device *dev = drm_dev->dev;
struct mtk_drm_crtc *mtk_crtc;
- enum drm_plane_type type;
- unsigned int zpos;
+ unsigned int num_comp_planes = 0;
int pipe = priv->num_pipes;
int ret;
int i;
@@ -581,17 +679,15 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
mtk_crtc->ddp_comp[i] = comp;
}
- mtk_crtc->layer_nr = mtk_ddp_comp_layer_nr(mtk_crtc->ddp_comp[0]);
- mtk_crtc->planes = devm_kcalloc(dev, mtk_crtc->layer_nr,
- sizeof(struct drm_plane),
- GFP_KERNEL);
-
- for (zpos = 0; zpos < mtk_crtc->layer_nr; zpos++) {
- type = (zpos == 0) ? DRM_PLANE_TYPE_PRIMARY :
- (zpos == 1) ? DRM_PLANE_TYPE_CURSOR :
- DRM_PLANE_TYPE_OVERLAY;
- ret = mtk_plane_init(drm_dev, &mtk_crtc->planes[zpos],
- BIT(pipe), type);
+ for (i = 0; i < mtk_crtc->ddp_comp_nr; i++)
+ num_comp_planes += mtk_drm_crtc_num_comp_planes(mtk_crtc, i);
+
+ mtk_crtc->planes = devm_kcalloc(dev, num_comp_planes,
+ sizeof(struct drm_plane), GFP_KERNEL);
+
+ for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
+ ret = mtk_drm_crtc_init_comp_planes(drm_dev, mtk_crtc, i,
+ pipe);
if (ret)
return ret;
}
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.h b/drivers/gpu/drm/mediatek/mtk_drm_crtc.h
index fcc134eb00c9..6afe1c19557a 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.h
@@ -19,5 +19,7 @@ void mtk_crtc_ddp_irq(struct drm_crtc *crtc, struct mtk_ddp_comp *comp);
int mtk_drm_crtc_create(struct drm_device *drm_dev,
const enum mtk_ddp_comp_id *path,
unsigned int path_len);
+int mtk_drm_crtc_plane_check(struct drm_crtc *crtc, struct drm_plane *plane,
+ struct mtk_plane_state *state);
#endif /* MTK_DRM_CRTC_H */
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp.c
index 8106a71a7404..13035c906035 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp.c
@@ -33,12 +33,15 @@
#define DISP_REG_CONFIG_DSI_SEL 0x050
#define DISP_REG_CONFIG_DPI_SEL 0x064
-#define DISP_REG_MUTEX_EN(n) (0x20 + 0x20 * (n))
-#define DISP_REG_MUTEX(n) (0x24 + 0x20 * (n))
-#define DISP_REG_MUTEX_RST(n) (0x28 + 0x20 * (n))
-#define DISP_REG_MUTEX_MOD(n) (0x2c + 0x20 * (n))
-#define DISP_REG_MUTEX_SOF(n) (0x30 + 0x20 * (n))
-#define DISP_REG_MUTEX_MOD2(n) (0x34 + 0x20 * (n))
+#define MT2701_DISP_MUTEX0_MOD0 0x2c
+#define MT2701_DISP_MUTEX0_SOF0 0x30
+
+#define DISP_REG_MUTEX_EN(n) (0x20 + 0x20 * (n))
+#define DISP_REG_MUTEX(n) (0x24 + 0x20 * (n))
+#define DISP_REG_MUTEX_RST(n) (0x28 + 0x20 * (n))
+#define DISP_REG_MUTEX_MOD(mutex_mod_reg, n) (mutex_mod_reg + 0x20 * (n))
+#define DISP_REG_MUTEX_SOF(mutex_sof_reg, n) (mutex_sof_reg + 0x20 * (n))
+#define DISP_REG_MUTEX_MOD2(n) (0x34 + 0x20 * (n))
#define INT_MUTEX BIT(1)
@@ -139,12 +142,30 @@ struct mtk_disp_mutex {
bool claimed;
};
+enum mtk_ddp_mutex_sof_id {
+ DDP_MUTEX_SOF_SINGLE_MODE,
+ DDP_MUTEX_SOF_DSI0,
+ DDP_MUTEX_SOF_DSI1,
+ DDP_MUTEX_SOF_DPI0,
+ DDP_MUTEX_SOF_DPI1,
+ DDP_MUTEX_SOF_DSI2,
+ DDP_MUTEX_SOF_DSI3,
+};
+
+struct mtk_ddp_data {
+ const unsigned int *mutex_mod;
+ const unsigned int *mutex_sof;
+ const unsigned int mutex_mod_reg;
+ const unsigned int mutex_sof_reg;
+ const bool no_clk;
+};
+
struct mtk_ddp {
struct device *dev;
struct clk *clk;
void __iomem *regs;
struct mtk_disp_mutex mutex[10];
- const unsigned int *mutex_mod;
+ const struct mtk_ddp_data *data;
};
static const unsigned int mt2701_mutex_mod[DDP_COMPONENT_ID_MAX] = {
@@ -194,6 +215,37 @@ static const unsigned int mt8173_mutex_mod[DDP_COMPONENT_ID_MAX] = {
[DDP_COMPONENT_WDMA1] = MT8173_MUTEX_MOD_DISP_WDMA1,
};
+static const unsigned int mt2712_mutex_sof[DDP_MUTEX_SOF_DSI3 + 1] = {
+ [DDP_MUTEX_SOF_SINGLE_MODE] = MUTEX_SOF_SINGLE_MODE,
+ [DDP_MUTEX_SOF_DSI0] = MUTEX_SOF_DSI0,
+ [DDP_MUTEX_SOF_DSI1] = MUTEX_SOF_DSI1,
+ [DDP_MUTEX_SOF_DPI0] = MUTEX_SOF_DPI0,
+ [DDP_MUTEX_SOF_DPI1] = MUTEX_SOF_DPI1,
+ [DDP_MUTEX_SOF_DSI2] = MUTEX_SOF_DSI2,
+ [DDP_MUTEX_SOF_DSI3] = MUTEX_SOF_DSI3,
+};
+
+static const struct mtk_ddp_data mt2701_ddp_driver_data = {
+ .mutex_mod = mt2701_mutex_mod,
+ .mutex_sof = mt2712_mutex_sof,
+ .mutex_mod_reg = MT2701_DISP_MUTEX0_MOD0,
+ .mutex_sof_reg = MT2701_DISP_MUTEX0_SOF0,
+};
+
+static const struct mtk_ddp_data mt2712_ddp_driver_data = {
+ .mutex_mod = mt2712_mutex_mod,
+ .mutex_sof = mt2712_mutex_sof,
+ .mutex_mod_reg = MT2701_DISP_MUTEX0_MOD0,
+ .mutex_sof_reg = MT2701_DISP_MUTEX0_SOF0,
+};
+
+static const struct mtk_ddp_data mt8173_ddp_driver_data = {
+ .mutex_mod = mt8173_mutex_mod,
+ .mutex_sof = mt2712_mutex_sof,
+ .mutex_mod_reg = MT2701_DISP_MUTEX0_MOD0,
+ .mutex_sof_reg = MT2701_DISP_MUTEX0_SOF0,
+};
+
static unsigned int mtk_ddp_mout_en(enum mtk_ddp_comp_id cur,
enum mtk_ddp_comp_id next,
unsigned int *addr)
@@ -432,45 +484,49 @@ void mtk_disp_mutex_add_comp(struct mtk_disp_mutex *mutex,
struct mtk_ddp *ddp = container_of(mutex, struct mtk_ddp,
mutex[mutex->id]);
unsigned int reg;
+ unsigned int sof_id;
unsigned int offset;
WARN_ON(&ddp->mutex[mutex->id] != mutex);
switch (id) {
case DDP_COMPONENT_DSI0:
- reg = MUTEX_SOF_DSI0;
+ sof_id = DDP_MUTEX_SOF_DSI0;
break;
case DDP_COMPONENT_DSI1:
- reg = MUTEX_SOF_DSI0;
+ sof_id = DDP_MUTEX_SOF_DSI0;
break;
case DDP_COMPONENT_DSI2:
- reg = MUTEX_SOF_DSI2;
+ sof_id = DDP_MUTEX_SOF_DSI2;
break;
case DDP_COMPONENT_DSI3:
- reg = MUTEX_SOF_DSI3;
+ sof_id = DDP_MUTEX_SOF_DSI3;
break;
case DDP_COMPONENT_DPI0:
- reg = MUTEX_SOF_DPI0;
+ sof_id = DDP_MUTEX_SOF_DPI0;
break;
case DDP_COMPONENT_DPI1:
- reg = MUTEX_SOF_DPI1;
+ sof_id = DDP_MUTEX_SOF_DPI1;
break;
default:
- if (ddp->mutex_mod[id] < 32) {
- offset = DISP_REG_MUTEX_MOD(mutex->id);
+ if (ddp->data->mutex_mod[id] < 32) {
+ offset = DISP_REG_MUTEX_MOD(ddp->data->mutex_mod_reg,
+ mutex->id);
reg = readl_relaxed(ddp->regs + offset);
- reg |= 1 << ddp->mutex_mod[id];
+ reg |= 1 << ddp->data->mutex_mod[id];
writel_relaxed(reg, ddp->regs + offset);
} else {
offset = DISP_REG_MUTEX_MOD2(mutex->id);
reg = readl_relaxed(ddp->regs + offset);
- reg |= 1 << (ddp->mutex_mod[id] - 32);
+ reg |= 1 << (ddp->data->mutex_mod[id] - 32);
writel_relaxed(reg, ddp->regs + offset);
}
return;
}
- writel_relaxed(reg, ddp->regs + DISP_REG_MUTEX_SOF(mutex->id));
+ writel_relaxed(ddp->data->mutex_sof[sof_id],
+ ddp->regs +
+ DISP_REG_MUTEX_SOF(ddp->data->mutex_sof_reg, mutex->id));
}
void mtk_disp_mutex_remove_comp(struct mtk_disp_mutex *mutex,
@@ -491,18 +547,21 @@ void mtk_disp_mutex_remove_comp(struct mtk_disp_mutex *mutex,
case DDP_COMPONENT_DPI0:
case DDP_COMPONENT_DPI1:
writel_relaxed(MUTEX_SOF_SINGLE_MODE,
- ddp->regs + DISP_REG_MUTEX_SOF(mutex->id));
+ ddp->regs +
+ DISP_REG_MUTEX_SOF(ddp->data->mutex_sof_reg,
+ mutex->id));
break;
default:
- if (ddp->mutex_mod[id] < 32) {
- offset = DISP_REG_MUTEX_MOD(mutex->id);
+ if (ddp->data->mutex_mod[id] < 32) {
+ offset = DISP_REG_MUTEX_MOD(ddp->data->mutex_mod_reg,
+ mutex->id);
reg = readl_relaxed(ddp->regs + offset);
- reg &= ~(1 << ddp->mutex_mod[id]);
+ reg &= ~(1 << ddp->data->mutex_mod[id]);
writel_relaxed(reg, ddp->regs + offset);
} else {
offset = DISP_REG_MUTEX_MOD2(mutex->id);
reg = readl_relaxed(ddp->regs + offset);
- reg &= ~(1 << (ddp->mutex_mod[id] - 32));
+ reg &= ~(1 << (ddp->data->mutex_mod[id] - 32));
writel_relaxed(reg, ddp->regs + offset);
}
break;
@@ -564,10 +623,14 @@ static int mtk_ddp_probe(struct platform_device *pdev)
for (i = 0; i < 10; i++)
ddp->mutex[i].id = i;
- ddp->clk = devm_clk_get(dev, NULL);
- if (IS_ERR(ddp->clk)) {
- dev_err(dev, "Failed to get clock\n");
- return PTR_ERR(ddp->clk);
+ ddp->data = of_device_get_match_data(dev);
+
+ if (!ddp->data->no_clk) {
+ ddp->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(ddp->clk)) {
+ dev_err(dev, "Failed to get clock\n");
+ return PTR_ERR(ddp->clk);
+ }
}
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -577,8 +640,6 @@ static int mtk_ddp_probe(struct platform_device *pdev)
return PTR_ERR(ddp->regs);
}
- ddp->mutex_mod = of_device_get_match_data(dev);
-
platform_set_drvdata(pdev, ddp);
return 0;
@@ -590,9 +651,12 @@ static int mtk_ddp_remove(struct platform_device *pdev)
}
static const struct of_device_id ddp_driver_dt_match[] = {
- { .compatible = "mediatek,mt2701-disp-mutex", .data = mt2701_mutex_mod},
- { .compatible = "mediatek,mt2712-disp-mutex", .data = mt2712_mutex_mod},
- { .compatible = "mediatek,mt8173-disp-mutex", .data = mt8173_mutex_mod},
+ { .compatible = "mediatek,mt2701-disp-mutex",
+ .data = &mt2701_ddp_driver_data},
+ { .compatible = "mediatek,mt2712-disp-mutex",
+ .data = &mt2712_ddp_driver_data},
+ { .compatible = "mediatek,mt8173-disp-mutex",
+ .data = &mt8173_ddp_driver_data},
{},
};
MODULE_DEVICE_TABLE(of, ddp_driver_dt_match);
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
index efa85973e46b..7f21307cda75 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
@@ -33,6 +33,18 @@
#define DISP_AAL_EN 0x0000
#define DISP_AAL_SIZE 0x0030
+#define DISP_CCORR_EN 0x0000
+#define CCORR_EN BIT(0)
+#define DISP_CCORR_CFG 0x0020
+#define CCORR_RELAY_MODE BIT(0)
+#define DISP_CCORR_SIZE 0x0030
+
+#define DISP_DITHER_EN 0x0000
+#define DITHER_EN BIT(0)
+#define DISP_DITHER_CFG 0x0020
+#define DITHER_RELAY_MODE BIT(0)
+#define DISP_DITHER_SIZE 0x0030
+
#define DISP_GAMMA_EN 0x0000
#define DISP_GAMMA_CFG 0x0020
#define DISP_GAMMA_SIZE 0x0030
@@ -123,6 +135,42 @@ static void mtk_aal_stop(struct mtk_ddp_comp *comp)
writel_relaxed(0x0, comp->regs + DISP_AAL_EN);
}
+static void mtk_ccorr_config(struct mtk_ddp_comp *comp, unsigned int w,
+ unsigned int h, unsigned int vrefresh,
+ unsigned int bpc)
+{
+ writel(h << 16 | w, comp->regs + DISP_CCORR_SIZE);
+ writel(CCORR_RELAY_MODE, comp->regs + DISP_CCORR_CFG);
+}
+
+static void mtk_ccorr_start(struct mtk_ddp_comp *comp)
+{
+ writel(CCORR_EN, comp->regs + DISP_CCORR_EN);
+}
+
+static void mtk_ccorr_stop(struct mtk_ddp_comp *comp)
+{
+ writel_relaxed(0x0, comp->regs + DISP_CCORR_EN);
+}
+
+static void mtk_dither_config(struct mtk_ddp_comp *comp, unsigned int w,
+ unsigned int h, unsigned int vrefresh,
+ unsigned int bpc)
+{
+ writel(h << 16 | w, comp->regs + DISP_DITHER_SIZE);
+ writel(DITHER_RELAY_MODE, comp->regs + DISP_DITHER_CFG);
+}
+
+static void mtk_dither_start(struct mtk_ddp_comp *comp)
+{
+ writel(DITHER_EN, comp->regs + DISP_DITHER_EN);
+}
+
+static void mtk_dither_stop(struct mtk_ddp_comp *comp)
+{
+ writel_relaxed(0x0, comp->regs + DISP_DITHER_EN);
+}
+
static void mtk_gamma_config(struct mtk_ddp_comp *comp, unsigned int w,
unsigned int h, unsigned int vrefresh,
unsigned int bpc)
@@ -171,6 +219,18 @@ static const struct mtk_ddp_comp_funcs ddp_aal = {
.stop = mtk_aal_stop,
};
+static const struct mtk_ddp_comp_funcs ddp_ccorr = {
+ .config = mtk_ccorr_config,
+ .start = mtk_ccorr_start,
+ .stop = mtk_ccorr_stop,
+};
+
+static const struct mtk_ddp_comp_funcs ddp_dither = {
+ .config = mtk_dither_config,
+ .start = mtk_dither_start,
+ .stop = mtk_dither_stop,
+};
+
static const struct mtk_ddp_comp_funcs ddp_gamma = {
.gamma_set = mtk_gamma_set,
.config = mtk_gamma_config,
@@ -189,11 +249,14 @@ static const struct mtk_ddp_comp_funcs ddp_ufoe = {
static const char * const mtk_ddp_comp_stem[MTK_DDP_COMP_TYPE_MAX] = {
[MTK_DISP_OVL] = "ovl",
+ [MTK_DISP_OVL_2L] = "ovl_2l",
[MTK_DISP_RDMA] = "rdma",
[MTK_DISP_WDMA] = "wdma",
[MTK_DISP_COLOR] = "color",
+ [MTK_DISP_CCORR] = "ccorr",
[MTK_DISP_AAL] = "aal",
[MTK_DISP_GAMMA] = "gamma",
+ [MTK_DISP_DITHER] = "dither",
[MTK_DISP_UFOE] = "ufoe",
[MTK_DSI] = "dsi",
[MTK_DPI] = "dpi",
@@ -213,8 +276,10 @@ static const struct mtk_ddp_comp_match mtk_ddp_matches[DDP_COMPONENT_ID_MAX] = {
[DDP_COMPONENT_AAL0] = { MTK_DISP_AAL, 0, &ddp_aal },
[DDP_COMPONENT_AAL1] = { MTK_DISP_AAL, 1, &ddp_aal },
[DDP_COMPONENT_BLS] = { MTK_DISP_BLS, 0, NULL },
+ [DDP_COMPONENT_CCORR] = { MTK_DISP_CCORR, 0, &ddp_ccorr },
[DDP_COMPONENT_COLOR0] = { MTK_DISP_COLOR, 0, NULL },
[DDP_COMPONENT_COLOR1] = { MTK_DISP_COLOR, 1, NULL },
+ [DDP_COMPONENT_DITHER] = { MTK_DISP_DITHER, 0, &ddp_dither },
[DDP_COMPONENT_DPI0] = { MTK_DPI, 0, NULL },
[DDP_COMPONENT_DPI1] = { MTK_DPI, 1, NULL },
[DDP_COMPONENT_DSI0] = { MTK_DSI, 0, NULL },
@@ -226,6 +291,8 @@ static const struct mtk_ddp_comp_match mtk_ddp_matches[DDP_COMPONENT_ID_MAX] = {
[DDP_COMPONENT_OD1] = { MTK_DISP_OD, 1, &ddp_od },
[DDP_COMPONENT_OVL0] = { MTK_DISP_OVL, 0, NULL },
[DDP_COMPONENT_OVL1] = { MTK_DISP_OVL, 1, NULL },
+ [DDP_COMPONENT_OVL_2L0] = { MTK_DISP_OVL_2L, 0, NULL },
+ [DDP_COMPONENT_OVL_2L1] = { MTK_DISP_OVL_2L, 1, NULL },
[DDP_COMPONENT_PWM0] = { MTK_DISP_PWM, 0, NULL },
[DDP_COMPONENT_PWM1] = { MTK_DISP_PWM, 1, NULL },
[DDP_COMPONENT_PWM2] = { MTK_DISP_PWM, 2, NULL },
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
index 0ad287f427cc..2f1e9e75b8da 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
@@ -17,9 +17,12 @@ struct drm_crtc_state;
enum mtk_ddp_comp_type {
MTK_DISP_OVL,
+ MTK_DISP_OVL_2L,
MTK_DISP_RDMA,
MTK_DISP_WDMA,
MTK_DISP_COLOR,
+ MTK_DISP_CCORR,
+ MTK_DISP_DITHER,
MTK_DISP_AAL,
MTK_DISP_GAMMA,
MTK_DISP_UFOE,
@@ -36,8 +39,10 @@ enum mtk_ddp_comp_id {
DDP_COMPONENT_AAL0,
DDP_COMPONENT_AAL1,
DDP_COMPONENT_BLS,
+ DDP_COMPONENT_CCORR,
DDP_COMPONENT_COLOR0,
DDP_COMPONENT_COLOR1,
+ DDP_COMPONENT_DITHER,
DDP_COMPONENT_DPI0,
DDP_COMPONENT_DPI1,
DDP_COMPONENT_DSI0,
@@ -48,6 +53,8 @@ enum mtk_ddp_comp_id {
DDP_COMPONENT_OD0,
DDP_COMPONENT_OD1,
DDP_COMPONENT_OVL0,
+ DDP_COMPONENT_OVL_2L0,
+ DDP_COMPONENT_OVL_2L1,
DDP_COMPONENT_OVL1,
DDP_COMPONENT_PWM0,
DDP_COMPONENT_PWM1,
@@ -70,13 +77,19 @@ struct mtk_ddp_comp_funcs {
void (*stop)(struct mtk_ddp_comp *comp);
void (*enable_vblank)(struct mtk_ddp_comp *comp, struct drm_crtc *crtc);
void (*disable_vblank)(struct mtk_ddp_comp *comp);
+ unsigned int (*supported_rotations)(struct mtk_ddp_comp *comp);
unsigned int (*layer_nr)(struct mtk_ddp_comp *comp);
void (*layer_on)(struct mtk_ddp_comp *comp, unsigned int idx);
void (*layer_off)(struct mtk_ddp_comp *comp, unsigned int idx);
+ int (*layer_check)(struct mtk_ddp_comp *comp,
+ unsigned int idx,
+ struct mtk_plane_state *state);
void (*layer_config)(struct mtk_ddp_comp *comp, unsigned int idx,
struct mtk_plane_state *state);
void (*gamma_set)(struct mtk_ddp_comp *comp,
struct drm_crtc_state *state);
+ void (*bgclr_in_on)(struct mtk_ddp_comp *comp);
+ void (*bgclr_in_off)(struct mtk_ddp_comp *comp);
};
struct mtk_ddp_comp {
@@ -121,6 +134,15 @@ static inline void mtk_ddp_comp_disable_vblank(struct mtk_ddp_comp *comp)
comp->funcs->disable_vblank(comp);
}
+static inline
+unsigned int mtk_ddp_comp_supported_rotations(struct mtk_ddp_comp *comp)
+{
+ if (comp->funcs && comp->funcs->supported_rotations)
+ return comp->funcs->supported_rotations(comp);
+
+ return 0;
+}
+
static inline unsigned int mtk_ddp_comp_layer_nr(struct mtk_ddp_comp *comp)
{
if (comp->funcs && comp->funcs->layer_nr)
@@ -143,6 +165,15 @@ static inline void mtk_ddp_comp_layer_off(struct mtk_ddp_comp *comp,
comp->funcs->layer_off(comp, idx);
}
+static inline int mtk_ddp_comp_layer_check(struct mtk_ddp_comp *comp,
+ unsigned int idx,
+ struct mtk_plane_state *state)
+{
+ if (comp->funcs && comp->funcs->layer_check)
+ return comp->funcs->layer_check(comp, idx, state);
+ return 0;
+}
+
static inline void mtk_ddp_comp_layer_config(struct mtk_ddp_comp *comp,
unsigned int idx,
struct mtk_plane_state *state)
@@ -158,6 +189,18 @@ static inline void mtk_ddp_gamma_set(struct mtk_ddp_comp *comp,
comp->funcs->gamma_set(comp, state);
}
+static inline void mtk_ddp_comp_bgclr_in_on(struct mtk_ddp_comp *comp)
+{
+ if (comp->funcs && comp->funcs->bgclr_in_on)
+ comp->funcs->bgclr_in_on(comp);
+}
+
+static inline void mtk_ddp_comp_bgclr_in_off(struct mtk_ddp_comp *comp)
+{
+ if (comp->funcs && comp->funcs->bgclr_in_off)
+ comp->funcs->bgclr_in_off(comp);
+}
+
int mtk_ddp_comp_get_id(struct device_node *node,
enum mtk_ddp_comp_type comp_type);
int mtk_ddp_comp_init(struct device *dev, struct device_node *comp_node,
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index 352b81a7a670..84d14213d992 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -547,6 +547,7 @@ static int mtk_drm_probe(struct platform_device *pdev)
*/
if (comp_type == MTK_DISP_COLOR ||
comp_type == MTK_DISP_OVL ||
+ comp_type == MTK_DISP_OVL_2L ||
comp_type == MTK_DISP_RDMA ||
comp_type == MTK_DSI ||
comp_type == MTK_DPI) {
@@ -669,8 +670,8 @@ static struct platform_driver * const mtk_drm_drivers[] = {
&mtk_disp_rdma_driver,
&mtk_dpi_driver,
&mtk_drm_platform_driver,
- &mtk_dsi_driver,
&mtk_mipi_tx_driver,
+ &mtk_dsi_driver,
};
static int __init mtk_drm_init(void)
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_gem.c b/drivers/gpu/drm/mediatek/mtk_drm_gem.c
index ca672f1d140d..b04a3c2b111e 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_gem.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_gem.c
@@ -271,7 +271,7 @@ void *mtk_drm_gem_prime_vmap(struct drm_gem_object *obj)
pgprot_writecombine(PAGE_KERNEL));
out:
- kfree((void *)sgt);
+ kfree(sgt);
return mtk_gem->kvaddr;
}
@@ -285,5 +285,5 @@ void mtk_drm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
vunmap(vaddr);
mtk_gem->kvaddr = 0;
- kfree((void *)mtk_gem->pages);
+ kfree(mtk_gem->pages);
}
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_plane.c b/drivers/gpu/drm/mediatek/mtk_drm_plane.c
index 584a9ecadce6..3b0cc91c7023 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_plane.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_plane.c
@@ -20,6 +20,12 @@
static const u32 formats[] = {
DRM_FORMAT_XRGB8888,
DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_BGRX8888,
+ DRM_FORMAT_BGRA8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_BGR888,
DRM_FORMAT_RGB565,
DRM_FORMAT_UYVY,
DRM_FORMAT_YUYV,
@@ -84,6 +90,7 @@ static int mtk_plane_atomic_check(struct drm_plane *plane,
{
struct drm_framebuffer *fb = state->fb;
struct drm_crtc_state *crtc_state;
+ int ret;
if (!fb)
return 0;
@@ -91,6 +98,11 @@ static int mtk_plane_atomic_check(struct drm_plane *plane,
if (!state->crtc)
return 0;
+ ret = mtk_drm_crtc_plane_check(state->crtc, plane,
+ to_mtk_plane_state(state));
+ if (ret)
+ return ret;
+
crtc_state = drm_atomic_get_crtc_state(state->state, state->crtc);
if (IS_ERR(crtc_state))
return PTR_ERR(crtc_state);
@@ -132,6 +144,7 @@ static void mtk_plane_atomic_update(struct drm_plane *plane,
state->pending.y = plane->state->dst.y1;
state->pending.width = drm_rect_width(&plane->state->dst);
state->pending.height = drm_rect_height(&plane->state->dst);
+ state->pending.rotation = plane->state->rotation;
wmb(); /* Make sure the above parameters are set before update */
state->pending.dirty = true;
}
@@ -154,7 +167,8 @@ static const struct drm_plane_helper_funcs mtk_plane_helper_funcs = {
};
int mtk_plane_init(struct drm_device *dev, struct drm_plane *plane,
- unsigned long possible_crtcs, enum drm_plane_type type)
+ unsigned long possible_crtcs, enum drm_plane_type type,
+ unsigned int supported_rotations)
{
int err;
@@ -166,6 +180,14 @@ int mtk_plane_init(struct drm_device *dev, struct drm_plane *plane,
return err;
}
+ if (supported_rotations & ~DRM_MODE_ROTATE_0) {
+ err = drm_plane_create_rotation_property(plane,
+ DRM_MODE_ROTATE_0,
+ supported_rotations);
+ if (err)
+ DRM_INFO("Create rotation property failed\n");
+ }
+
drm_plane_helper_add(plane, &mtk_plane_helper_funcs);
return 0;
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_plane.h b/drivers/gpu/drm/mediatek/mtk_drm_plane.h
index 6f842df722c7..760885e35b27 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_plane.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_plane.h
@@ -20,6 +20,7 @@ struct mtk_plane_pending_state {
unsigned int y;
unsigned int width;
unsigned int height;
+ unsigned int rotation;
bool dirty;
};
@@ -35,6 +36,7 @@ to_mtk_plane_state(struct drm_plane_state *state)
}
int mtk_plane_init(struct drm_device *dev, struct drm_plane *plane,
- unsigned long possible_crtcs, enum drm_plane_type type);
+ unsigned long possible_crtcs, enum drm_plane_type type,
+ unsigned int supported_rotations);
#endif
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
index 224afb666881..e9931bbbe846 100644
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -16,6 +16,7 @@
#include <video/videomode.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
@@ -39,6 +40,7 @@
#define DSI_CON_CTRL 0x10
#define DSI_RESET BIT(0)
#define DSI_EN BIT(1)
+#define DPHY_RESET BIT(2)
#define DSI_MODE_CTRL 0x14
#define MODE (3)
@@ -72,6 +74,7 @@
#define DSI_VBP_NL 0x24
#define DSI_VFP_NL 0x28
#define DSI_VACT_NL 0x2C
+#define DSI_SIZE_CON 0x38
#define DSI_HSA_WC 0x50
#define DSI_HBP_WC 0x54
#define DSI_HFP_WC 0x58
@@ -125,7 +128,10 @@
#define VM_CMD_EN BIT(0)
#define TS_VFP_EN BIT(5)
-#define DSI_CMDQ0 0x180
+#define DSI_SHADOW_DEBUG 0x190U
+#define FORCE_COMMIT BIT(0)
+#define BYPASS_SHADOW BIT(1)
+
#define CONFIG (0xff << 0)
#define SHORT_PACKET 0
#define LONG_PACKET 2
@@ -134,12 +140,6 @@
#define DATA_0 (0xff << 16)
#define DATA_1 (0xff << 24)
-#define T_LPX 5
-#define T_HS_PREP 6
-#define T_HS_TRAIL 8
-#define T_HS_EXIT 7
-#define T_HS_ZERO 10
-
#define NS_TO_CYCLE(n, c) ((n) / (c) + (((n) % (c)) ? 1 : 0))
#define MTK_DSI_HOST_IS_READ(type) \
@@ -148,8 +148,33 @@
(type == MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM) || \
(type == MIPI_DSI_DCS_READ))
+struct mtk_phy_timing {
+ u32 lpx;
+ u32 da_hs_prepare;
+ u32 da_hs_zero;
+ u32 da_hs_trail;
+
+ u32 ta_go;
+ u32 ta_sure;
+ u32 ta_get;
+ u32 da_hs_exit;
+
+ u32 clk_hs_zero;
+ u32 clk_hs_trail;
+
+ u32 clk_hs_prepare;
+ u32 clk_hs_post;
+ u32 clk_hs_exit;
+};
+
struct phy;
+struct mtk_dsi_driver_data {
+ const u32 reg_cmdq_off;
+ bool has_shadow_ctl;
+ bool has_size_ctl;
+};
+
struct mtk_dsi {
struct mtk_ddp_comp ddp_comp;
struct device *dev;
@@ -172,10 +197,12 @@ struct mtk_dsi {
enum mipi_dsi_pixel_format format;
unsigned int lanes;
struct videomode vm;
+ struct mtk_phy_timing phy_timing;
int refcount;
bool enabled;
u32 irq_data;
wait_queue_head_t irq_wait_queue;
+ const struct mtk_dsi_driver_data *driver_data;
};
static inline struct mtk_dsi *encoder_to_dsi(struct drm_encoder *e)
@@ -204,17 +231,36 @@ static void mtk_dsi_phy_timconfig(struct mtk_dsi *dsi)
{
u32 timcon0, timcon1, timcon2, timcon3;
u32 ui, cycle_time;
+ struct mtk_phy_timing *timing = &dsi->phy_timing;
+
+ ui = DIV_ROUND_UP(1000000000, dsi->data_rate);
+ cycle_time = div_u64(8000000000ULL, dsi->data_rate);
+
+ timing->lpx = NS_TO_CYCLE(60, cycle_time);
+ timing->da_hs_prepare = NS_TO_CYCLE(50 + 5 * ui, cycle_time);
+ timing->da_hs_zero = NS_TO_CYCLE(110 + 6 * ui, cycle_time);
+ timing->da_hs_trail = NS_TO_CYCLE(77 + 4 * ui, cycle_time);
+
+ timing->ta_go = 4 * timing->lpx;
+ timing->ta_sure = 3 * timing->lpx / 2;
+ timing->ta_get = 5 * timing->lpx;
+ timing->da_hs_exit = 2 * timing->lpx;
- ui = 1000 / dsi->data_rate + 0x01;
- cycle_time = 8000 / dsi->data_rate + 0x01;
+ timing->clk_hs_zero = NS_TO_CYCLE(336, cycle_time);
+ timing->clk_hs_trail = NS_TO_CYCLE(100, cycle_time) + 10;
- timcon0 = T_LPX | T_HS_PREP << 8 | T_HS_ZERO << 16 | T_HS_TRAIL << 24;
- timcon1 = 4 * T_LPX | (3 * T_LPX / 2) << 8 | 5 * T_LPX << 16 |
- T_HS_EXIT << 24;
- timcon2 = ((NS_TO_CYCLE(0x64, cycle_time) + 0xa) << 24) |
- (NS_TO_CYCLE(0x150, cycle_time) << 16);
- timcon3 = NS_TO_CYCLE(0x40, cycle_time) | (2 * T_LPX) << 16 |
- NS_TO_CYCLE(80 + 52 * ui, cycle_time) << 8;
+ timing->clk_hs_prepare = NS_TO_CYCLE(64, cycle_time);
+ timing->clk_hs_post = NS_TO_CYCLE(80 + 52 * ui, cycle_time);
+ timing->clk_hs_exit = 2 * timing->lpx;
+
+ timcon0 = timing->lpx | timing->da_hs_prepare << 8 |
+ timing->da_hs_zero << 16 | timing->da_hs_trail << 24;
+ timcon1 = timing->ta_go | timing->ta_sure << 8 |
+ timing->ta_get << 16 | timing->da_hs_exit << 24;
+ timcon2 = 1 << 8 | timing->clk_hs_zero << 16 |
+ timing->clk_hs_trail << 24;
+ timcon3 = timing->clk_hs_prepare | timing->clk_hs_post << 8 |
+ timing->clk_hs_exit << 16;
writel(timcon0, dsi->regs + DSI_PHY_TIMECON0);
writel(timcon1, dsi->regs + DSI_PHY_TIMECON1);
@@ -238,6 +284,12 @@ static void mtk_dsi_reset_engine(struct mtk_dsi *dsi)
mtk_dsi_mask(dsi, DSI_CON_CTRL, DSI_RESET, 0);
}
+static void mtk_dsi_reset_dphy(struct mtk_dsi *dsi)
+{
+ mtk_dsi_mask(dsi, DSI_CON_CTRL, DPHY_RESET, DPHY_RESET);
+ mtk_dsi_mask(dsi, DSI_CON_CTRL, DPHY_RESET, 0);
+}
+
static void mtk_dsi_clk_ulp_mode_enter(struct mtk_dsi *dsi)
{
mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_HS_TX_EN, 0);
@@ -401,7 +453,8 @@ static void mtk_dsi_config_vdo_timing(struct mtk_dsi *dsi)
u32 horizontal_sync_active_byte;
u32 horizontal_backporch_byte;
u32 horizontal_frontporch_byte;
- u32 dsi_tmp_buf_bpp;
+ u32 dsi_tmp_buf_bpp, data_phy_cycles;
+ struct mtk_phy_timing *timing = &dsi->phy_timing;
struct videomode *vm = &dsi->vm;
@@ -415,6 +468,10 @@ static void mtk_dsi_config_vdo_timing(struct mtk_dsi *dsi)
writel(vm->vfront_porch, dsi->regs + DSI_VFP_NL);
writel(vm->vactive, dsi->regs + DSI_VACT_NL);
+ if (dsi->driver_data->has_size_ctl)
+ writel(vm->vactive << 16 | vm->hactive,
+ dsi->regs + DSI_SIZE_CON);
+
horizontal_sync_active_byte = (vm->hsync_len * dsi_tmp_buf_bpp - 10);
if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
@@ -424,7 +481,34 @@ static void mtk_dsi_config_vdo_timing(struct mtk_dsi *dsi)
horizontal_backporch_byte = ((vm->hback_porch + vm->hsync_len) *
dsi_tmp_buf_bpp - 10);
- horizontal_frontporch_byte = (vm->hfront_porch * dsi_tmp_buf_bpp - 12);
+ data_phy_cycles = timing->lpx + timing->da_hs_prepare +
+ timing->da_hs_zero + timing->da_hs_exit + 2;
+
+ if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST) {
+ if (vm->hfront_porch * dsi_tmp_buf_bpp >
+ data_phy_cycles * dsi->lanes + 18) {
+ horizontal_frontporch_byte = vm->hfront_porch *
+ dsi_tmp_buf_bpp -
+ data_phy_cycles *
+ dsi->lanes - 18;
+ } else {
+ DRM_WARN("HFP less than d-phy, FPS will under 60Hz\n");
+ horizontal_frontporch_byte = vm->hfront_porch *
+ dsi_tmp_buf_bpp;
+ }
+ } else {
+ if (vm->hfront_porch * dsi_tmp_buf_bpp >
+ data_phy_cycles * dsi->lanes + 12) {
+ horizontal_frontporch_byte = vm->hfront_porch *
+ dsi_tmp_buf_bpp -
+ data_phy_cycles *
+ dsi->lanes - 12;
+ } else {
+ DRM_WARN("HFP less than d-phy, FPS will under 60Hz\n");
+ horizontal_frontporch_byte = vm->hfront_porch *
+ dsi_tmp_buf_bpp;
+ }
+ }
writel(horizontal_sync_active_byte, dsi->regs + DSI_HSA_WC);
writel(horizontal_backporch_byte, dsi->regs + DSI_HBP_WC);
@@ -522,10 +606,9 @@ static s32 mtk_dsi_switch_to_cmd_mode(struct mtk_dsi *dsi, u8 irq_flag, u32 t)
static int mtk_dsi_poweron(struct mtk_dsi *dsi)
{
- struct device *dev = dsi->dev;
+ struct device *dev = dsi->host.dev;
int ret;
- u64 pixel_clock, total_bits;
- u32 htotal, htotal_bits, bit_per_pixel, overhead_cycles, overhead_bits;
+ u32 bit_per_pixel;
if (++dsi->refcount != 1)
return 0;
@@ -544,24 +627,8 @@ static int mtk_dsi_poweron(struct mtk_dsi *dsi)
break;
}
- /**
- * htotal_time = htotal * byte_per_pixel / num_lanes
- * overhead_time = lpx + hs_prepare + hs_zero + hs_trail + hs_exit
- * mipi_ratio = (htotal_time + overhead_time) / htotal_time
- * data_rate = pixel_clock * bit_per_pixel * mipi_ratio / num_lanes;
- */
- pixel_clock = dsi->vm.pixelclock;
- htotal = dsi->vm.hactive + dsi->vm.hback_porch + dsi->vm.hfront_porch +
- dsi->vm.hsync_len;
- htotal_bits = htotal * bit_per_pixel;
-
- overhead_cycles = T_LPX + T_HS_PREP + T_HS_ZERO + T_HS_TRAIL +
- T_HS_EXIT;
- overhead_bits = overhead_cycles * dsi->lanes * 8;
- total_bits = htotal_bits + overhead_bits;
-
- dsi->data_rate = DIV_ROUND_UP_ULL(pixel_clock * total_bits,
- htotal * dsi->lanes);
+ dsi->data_rate = DIV_ROUND_UP_ULL(dsi->vm.pixelclock * bit_per_pixel,
+ dsi->lanes);
ret = clk_set_rate(dsi->hs_clk, dsi->data_rate);
if (ret < 0) {
@@ -584,10 +651,17 @@ static int mtk_dsi_poweron(struct mtk_dsi *dsi)
}
mtk_dsi_enable(dsi);
+
+ if (dsi->driver_data->has_shadow_ctl)
+ writel(FORCE_COMMIT | BYPASS_SHADOW,
+ dsi->regs + DSI_SHADOW_DEBUG);
+
mtk_dsi_reset_engine(dsi);
mtk_dsi_phy_timconfig(dsi);
mtk_dsi_rxtx_control(dsi);
+ usleep_range(30, 100);
+ mtk_dsi_reset_dphy(dsi);
mtk_dsi_ps_control_vact(dsi);
mtk_dsi_set_vm_cmd(dsi);
mtk_dsi_config_vdo_timing(dsi);
@@ -938,6 +1012,7 @@ static void mtk_dsi_cmdq(struct mtk_dsi *dsi, const struct mipi_dsi_msg *msg)
const char *tx_buf = msg->tx_buf;
u8 config, cmdq_size, cmdq_off, type = msg->type;
u32 reg_val, cmdq_mask, i;
+ u32 reg_cmdq_off = dsi->driver_data->reg_cmdq_off;
if (MTK_DSI_HOST_IS_READ(type))
config = BTA;
@@ -957,9 +1032,11 @@ static void mtk_dsi_cmdq(struct mtk_dsi *dsi, const struct mipi_dsi_msg *msg)
}
for (i = 0; i < msg->tx_len; i++)
- writeb(tx_buf[i], dsi->regs + DSI_CMDQ0 + cmdq_off + i);
+ mtk_dsi_mask(dsi, (reg_cmdq_off + cmdq_off + i) & (~0x3U),
+ (0xffUL << (((i + cmdq_off) & 3U) * 8U)),
+ tx_buf[i] << (((i + cmdq_off) & 3U) * 8U));
- mtk_dsi_mask(dsi, DSI_CMDQ0, cmdq_mask, reg_val);
+ mtk_dsi_mask(dsi, reg_cmdq_off, cmdq_mask, reg_val);
mtk_dsi_mask(dsi, DSI_CMDQ_SIZE, CMDQ_SIZE, cmdq_size);
}
@@ -1049,12 +1126,6 @@ static int mtk_dsi_bind(struct device *dev, struct device *master, void *data)
return ret;
}
- ret = mipi_dsi_host_register(&dsi->host);
- if (ret < 0) {
- dev_err(dev, "failed to register DSI host: %d\n", ret);
- goto err_ddp_comp_unregister;
- }
-
ret = mtk_dsi_create_conn_enc(drm, dsi);
if (ret) {
DRM_ERROR("Encoder create failed with %d\n", ret);
@@ -1064,8 +1135,6 @@ static int mtk_dsi_bind(struct device *dev, struct device *master, void *data)
return 0;
err_unregister:
- mipi_dsi_host_unregister(&dsi->host);
-err_ddp_comp_unregister:
mtk_ddp_comp_unregister(drm, &dsi->ddp_comp);
return ret;
}
@@ -1077,7 +1146,6 @@ static void mtk_dsi_unbind(struct device *dev, struct device *master,
struct mtk_dsi *dsi = dev_get_drvdata(dev);
mtk_dsi_destroy_conn_enc(dsi);
- mipi_dsi_host_unregister(&dsi->host);
mtk_ddp_comp_unregister(drm, &dsi->ddp_comp);
}
@@ -1101,31 +1169,38 @@ static int mtk_dsi_probe(struct platform_device *pdev)
dsi->host.ops = &mtk_dsi_ops;
dsi->host.dev = dev;
+ ret = mipi_dsi_host_register(&dsi->host);
+ if (ret < 0) {
+ dev_err(dev, "failed to register DSI host: %d\n", ret);
+ return ret;
+ }
ret = drm_of_find_panel_or_bridge(dev->of_node, 0, 0,
&dsi->panel, &dsi->bridge);
if (ret)
- return ret;
+ goto err_unregister_host;
+
+ dsi->driver_data = of_device_get_match_data(dev);
dsi->engine_clk = devm_clk_get(dev, "engine");
if (IS_ERR(dsi->engine_clk)) {
ret = PTR_ERR(dsi->engine_clk);
dev_err(dev, "Failed to get engine clock: %d\n", ret);
- return ret;
+ goto err_unregister_host;
}
dsi->digital_clk = devm_clk_get(dev, "digital");
if (IS_ERR(dsi->digital_clk)) {
ret = PTR_ERR(dsi->digital_clk);
dev_err(dev, "Failed to get digital clock: %d\n", ret);
- return ret;
+ goto err_unregister_host;
}
dsi->hs_clk = devm_clk_get(dev, "hs");
if (IS_ERR(dsi->hs_clk)) {
ret = PTR_ERR(dsi->hs_clk);
dev_err(dev, "Failed to get hs clock: %d\n", ret);
- return ret;
+ goto err_unregister_host;
}
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1133,33 +1208,35 @@ static int mtk_dsi_probe(struct platform_device *pdev)
if (IS_ERR(dsi->regs)) {
ret = PTR_ERR(dsi->regs);
dev_err(dev, "Failed to ioremap memory: %d\n", ret);
- return ret;
+ goto err_unregister_host;
}
dsi->phy = devm_phy_get(dev, "dphy");
if (IS_ERR(dsi->phy)) {
ret = PTR_ERR(dsi->phy);
dev_err(dev, "Failed to get MIPI-DPHY: %d\n", ret);
- return ret;
+ goto err_unregister_host;
}
comp_id = mtk_ddp_comp_get_id(dev->of_node, MTK_DSI);
if (comp_id < 0) {
dev_err(dev, "Failed to identify by alias: %d\n", comp_id);
- return comp_id;
+ ret = comp_id;
+ goto err_unregister_host;
}
ret = mtk_ddp_comp_init(dev, dev->of_node, &dsi->ddp_comp, comp_id,
&mtk_dsi_funcs);
if (ret) {
dev_err(dev, "Failed to initialize component: %d\n", ret);
- return ret;
+ goto err_unregister_host;
}
irq_num = platform_get_irq(pdev, 0);
if (irq_num < 0) {
- dev_err(&pdev->dev, "failed to request dsi irq resource\n");
- return -EPROBE_DEFER;
+ dev_err(&pdev->dev, "failed to get dsi irq_num: %d\n", irq_num);
+ ret = irq_num;
+ goto err_unregister_host;
}
irq_set_status_flags(irq_num, IRQ_TYPE_LEVEL_LOW);
@@ -1167,14 +1244,24 @@ static int mtk_dsi_probe(struct platform_device *pdev)
IRQF_TRIGGER_LOW, dev_name(&pdev->dev), dsi);
if (ret) {
dev_err(&pdev->dev, "failed to request mediatek dsi irq\n");
- return -EPROBE_DEFER;
+ goto err_unregister_host;
}
init_waitqueue_head(&dsi->irq_wait_queue);
platform_set_drvdata(pdev, dsi);
- return component_add(&pdev->dev, &mtk_dsi_component_ops);
+ ret = component_add(&pdev->dev, &mtk_dsi_component_ops);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to add component: %d\n", ret);
+ goto err_unregister_host;
+ }
+
+ return 0;
+
+err_unregister_host:
+ mipi_dsi_host_unregister(&dsi->host);
+ return ret;
}
static int mtk_dsi_remove(struct platform_device *pdev)
@@ -1183,13 +1270,32 @@ static int mtk_dsi_remove(struct platform_device *pdev)
mtk_output_dsi_disable(dsi);
component_del(&pdev->dev, &mtk_dsi_component_ops);
+ mipi_dsi_host_unregister(&dsi->host);
return 0;
}
+static const struct mtk_dsi_driver_data mt8173_dsi_driver_data = {
+ .reg_cmdq_off = 0x200,
+};
+
+static const struct mtk_dsi_driver_data mt2701_dsi_driver_data = {
+ .reg_cmdq_off = 0x180,
+};
+
+static const struct mtk_dsi_driver_data mt8183_dsi_driver_data = {
+ .reg_cmdq_off = 0x200,
+ .has_shadow_ctl = true,
+ .has_size_ctl = true,
+};
+
static const struct of_device_id mtk_dsi_of_match[] = {
- { .compatible = "mediatek,mt2701-dsi" },
- { .compatible = "mediatek,mt8173-dsi" },
+ { .compatible = "mediatek,mt2701-dsi",
+ .data = &mt2701_dsi_driver_data },
+ { .compatible = "mediatek,mt8173-dsi",
+ .data = &mt8173_dsi_driver_data },
+ { .compatible = "mediatek,mt8183-dsi",
+ .data = &mt8183_dsi_driver_data },
{ },
};
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c
index ce91b61364eb..c79b1f855d89 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi.c
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c
@@ -23,6 +23,7 @@
#include <sound/hdmi-codec.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include <drm/drm_print.h>
diff --git a/drivers/gpu/drm/mediatek/mtk_mipi_tx.c b/drivers/gpu/drm/mediatek/mtk_mipi_tx.c
index 1842dc2caae9..e4d34484ecc8 100644
--- a/drivers/gpu/drm/mediatek/mtk_mipi_tx.c
+++ b/drivers/gpu/drm/mediatek/mtk_mipi_tx.c
@@ -3,292 +3,39 @@
* Copyright (c) 2015 MediaTek Inc.
*/
-#include <linux/clk.h>
-#include <linux/clk-provider.h>
-#include <linux/delay.h>
-#include <linux/io.h>
-#include <linux/module.h>
-#include <linux/of_device.h>
-#include <linux/platform_device.h>
-#include <linux/phy/phy.h>
-
-#define MIPITX_DSI_CON 0x00
-#define RG_DSI_LDOCORE_EN BIT(0)
-#define RG_DSI_CKG_LDOOUT_EN BIT(1)
-#define RG_DSI_BCLK_SEL (3 << 2)
-#define RG_DSI_LD_IDX_SEL (7 << 4)
-#define RG_DSI_PHYCLK_SEL (2 << 8)
-#define RG_DSI_DSICLK_FREQ_SEL BIT(10)
-#define RG_DSI_LPTX_CLMP_EN BIT(11)
-
-#define MIPITX_DSI_CLOCK_LANE 0x04
-#define MIPITX_DSI_DATA_LANE0 0x08
-#define MIPITX_DSI_DATA_LANE1 0x0c
-#define MIPITX_DSI_DATA_LANE2 0x10
-#define MIPITX_DSI_DATA_LANE3 0x14
-#define RG_DSI_LNTx_LDOOUT_EN BIT(0)
-#define RG_DSI_LNTx_CKLANE_EN BIT(1)
-#define RG_DSI_LNTx_LPTX_IPLUS1 BIT(2)
-#define RG_DSI_LNTx_LPTX_IPLUS2 BIT(3)
-#define RG_DSI_LNTx_LPTX_IMINUS BIT(4)
-#define RG_DSI_LNTx_LPCD_IPLUS BIT(5)
-#define RG_DSI_LNTx_LPCD_IMINUS BIT(6)
-#define RG_DSI_LNTx_RT_CODE (0xf << 8)
-
-#define MIPITX_DSI_TOP_CON 0x40
-#define RG_DSI_LNT_INTR_EN BIT(0)
-#define RG_DSI_LNT_HS_BIAS_EN BIT(1)
-#define RG_DSI_LNT_IMP_CAL_EN BIT(2)
-#define RG_DSI_LNT_TESTMODE_EN BIT(3)
-#define RG_DSI_LNT_IMP_CAL_CODE (0xf << 4)
-#define RG_DSI_LNT_AIO_SEL (7 << 8)
-#define RG_DSI_PAD_TIE_LOW_EN BIT(11)
-#define RG_DSI_DEBUG_INPUT_EN BIT(12)
-#define RG_DSI_PRESERVE (7 << 13)
-
-#define MIPITX_DSI_BG_CON 0x44
-#define RG_DSI_BG_CORE_EN BIT(0)
-#define RG_DSI_BG_CKEN BIT(1)
-#define RG_DSI_BG_DIV (0x3 << 2)
-#define RG_DSI_BG_FAST_CHARGE BIT(4)
-#define RG_DSI_VOUT_MSK (0x3ffff << 5)
-#define RG_DSI_V12_SEL (7 << 5)
-#define RG_DSI_V10_SEL (7 << 8)
-#define RG_DSI_V072_SEL (7 << 11)
-#define RG_DSI_V04_SEL (7 << 14)
-#define RG_DSI_V032_SEL (7 << 17)
-#define RG_DSI_V02_SEL (7 << 20)
-#define RG_DSI_BG_R1_TRIM (0xf << 24)
-#define RG_DSI_BG_R2_TRIM (0xf << 28)
-
-#define MIPITX_DSI_PLL_CON0 0x50
-#define RG_DSI_MPPLL_PLL_EN BIT(0)
-#define RG_DSI_MPPLL_DIV_MSK (0x1ff << 1)
-#define RG_DSI_MPPLL_PREDIV (3 << 1)
-#define RG_DSI_MPPLL_TXDIV0 (3 << 3)
-#define RG_DSI_MPPLL_TXDIV1 (3 << 5)
-#define RG_DSI_MPPLL_POSDIV (7 << 7)
-#define RG_DSI_MPPLL_MONVC_EN BIT(10)
-#define RG_DSI_MPPLL_MONREF_EN BIT(11)
-#define RG_DSI_MPPLL_VOD_EN BIT(12)
-
-#define MIPITX_DSI_PLL_CON1 0x54
-#define RG_DSI_MPPLL_SDM_FRA_EN BIT(0)
-#define RG_DSI_MPPLL_SDM_SSC_PH_INIT BIT(1)
-#define RG_DSI_MPPLL_SDM_SSC_EN BIT(2)
-#define RG_DSI_MPPLL_SDM_SSC_PRD (0xffff << 16)
-
-#define MIPITX_DSI_PLL_CON2 0x58
-
-#define MIPITX_DSI_PLL_TOP 0x64
-#define RG_DSI_MPPLL_PRESERVE (0xff << 8)
-
-#define MIPITX_DSI_PLL_PWR 0x68
-#define RG_DSI_MPPLL_SDM_PWR_ON BIT(0)
-#define RG_DSI_MPPLL_SDM_ISO_EN BIT(1)
-#define RG_DSI_MPPLL_SDM_PWR_ACK BIT(8)
-
-#define MIPITX_DSI_SW_CTRL 0x80
-#define SW_CTRL_EN BIT(0)
-
-#define MIPITX_DSI_SW_CTRL_CON0 0x84
-#define SW_LNTC_LPTX_PRE_OE BIT(0)
-#define SW_LNTC_LPTX_OE BIT(1)
-#define SW_LNTC_LPTX_P BIT(2)
-#define SW_LNTC_LPTX_N BIT(3)
-#define SW_LNTC_HSTX_PRE_OE BIT(4)
-#define SW_LNTC_HSTX_OE BIT(5)
-#define SW_LNTC_HSTX_ZEROCLK BIT(6)
-#define SW_LNT0_LPTX_PRE_OE BIT(7)
-#define SW_LNT0_LPTX_OE BIT(8)
-#define SW_LNT0_LPTX_P BIT(9)
-#define SW_LNT0_LPTX_N BIT(10)
-#define SW_LNT0_HSTX_PRE_OE BIT(11)
-#define SW_LNT0_HSTX_OE BIT(12)
-#define SW_LNT0_LPRX_EN BIT(13)
-#define SW_LNT1_LPTX_PRE_OE BIT(14)
-#define SW_LNT1_LPTX_OE BIT(15)
-#define SW_LNT1_LPTX_P BIT(16)
-#define SW_LNT1_LPTX_N BIT(17)
-#define SW_LNT1_HSTX_PRE_OE BIT(18)
-#define SW_LNT1_HSTX_OE BIT(19)
-#define SW_LNT2_LPTX_PRE_OE BIT(20)
-#define SW_LNT2_LPTX_OE BIT(21)
-#define SW_LNT2_LPTX_P BIT(22)
-#define SW_LNT2_LPTX_N BIT(23)
-#define SW_LNT2_HSTX_PRE_OE BIT(24)
-#define SW_LNT2_HSTX_OE BIT(25)
-
-struct mtk_mipitx_data {
- const u32 mppll_preserve;
-};
-
-struct mtk_mipi_tx {
- struct device *dev;
- void __iomem *regs;
- u32 data_rate;
- const struct mtk_mipitx_data *driver_data;
- struct clk_hw pll_hw;
- struct clk *pll;
-};
+#include "mtk_mipi_tx.h"
-static inline struct mtk_mipi_tx *mtk_mipi_tx_from_clk_hw(struct clk_hw *hw)
+inline struct mtk_mipi_tx *mtk_mipi_tx_from_clk_hw(struct clk_hw *hw)
{
return container_of(hw, struct mtk_mipi_tx, pll_hw);
}
-static void mtk_mipi_tx_clear_bits(struct mtk_mipi_tx *mipi_tx, u32 offset,
- u32 bits)
+void mtk_mipi_tx_clear_bits(struct mtk_mipi_tx *mipi_tx, u32 offset,
+ u32 bits)
{
u32 temp = readl(mipi_tx->regs + offset);
writel(temp & ~bits, mipi_tx->regs + offset);
}
-static void mtk_mipi_tx_set_bits(struct mtk_mipi_tx *mipi_tx, u32 offset,
- u32 bits)
+void mtk_mipi_tx_set_bits(struct mtk_mipi_tx *mipi_tx, u32 offset,
+ u32 bits)
{
u32 temp = readl(mipi_tx->regs + offset);
writel(temp | bits, mipi_tx->regs + offset);
}
-static void mtk_mipi_tx_update_bits(struct mtk_mipi_tx *mipi_tx, u32 offset,
- u32 mask, u32 data)
+void mtk_mipi_tx_update_bits(struct mtk_mipi_tx *mipi_tx, u32 offset,
+ u32 mask, u32 data)
{
u32 temp = readl(mipi_tx->regs + offset);
writel((temp & ~mask) | (data & mask), mipi_tx->regs + offset);
}
-static int mtk_mipi_tx_pll_prepare(struct clk_hw *hw)
-{
- struct mtk_mipi_tx *mipi_tx = mtk_mipi_tx_from_clk_hw(hw);
- u8 txdiv, txdiv0, txdiv1;
- u64 pcw;
-
- dev_dbg(mipi_tx->dev, "prepare: %u Hz\n", mipi_tx->data_rate);
-
- if (mipi_tx->data_rate >= 500000000) {
- txdiv = 1;
- txdiv0 = 0;
- txdiv1 = 0;
- } else if (mipi_tx->data_rate >= 250000000) {
- txdiv = 2;
- txdiv0 = 1;
- txdiv1 = 0;
- } else if (mipi_tx->data_rate >= 125000000) {
- txdiv = 4;
- txdiv0 = 2;
- txdiv1 = 0;
- } else if (mipi_tx->data_rate > 62000000) {
- txdiv = 8;
- txdiv0 = 2;
- txdiv1 = 1;
- } else if (mipi_tx->data_rate >= 50000000) {
- txdiv = 16;
- txdiv0 = 2;
- txdiv1 = 2;
- } else {
- return -EINVAL;
- }
-
- mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_BG_CON,
- RG_DSI_VOUT_MSK |
- RG_DSI_BG_CKEN | RG_DSI_BG_CORE_EN,
- (4 << 20) | (4 << 17) | (4 << 14) |
- (4 << 11) | (4 << 8) | (4 << 5) |
- RG_DSI_BG_CKEN | RG_DSI_BG_CORE_EN);
-
- usleep_range(30, 100);
-
- mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_TOP_CON,
- RG_DSI_LNT_IMP_CAL_CODE | RG_DSI_LNT_HS_BIAS_EN,
- (8 << 4) | RG_DSI_LNT_HS_BIAS_EN);
-
- mtk_mipi_tx_set_bits(mipi_tx, MIPITX_DSI_CON,
- RG_DSI_CKG_LDOOUT_EN | RG_DSI_LDOCORE_EN);
-
- mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_PWR,
- RG_DSI_MPPLL_SDM_PWR_ON |
- RG_DSI_MPPLL_SDM_ISO_EN,
- RG_DSI_MPPLL_SDM_PWR_ON);
-
- mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_PLL_CON0,
- RG_DSI_MPPLL_PLL_EN);
-
- mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_CON0,
- RG_DSI_MPPLL_TXDIV0 | RG_DSI_MPPLL_TXDIV1 |
- RG_DSI_MPPLL_PREDIV,
- (txdiv0 << 3) | (txdiv1 << 5));
-
- /*
- * PLL PCW config
- * PCW bit 24~30 = integer part of pcw
- * PCW bit 0~23 = fractional part of pcw
- * pcw = data_Rate*4*txdiv/(Ref_clk*2);
- * Post DIV =4, so need data_Rate*4
- * Ref_clk is 26MHz
- */
- pcw = div_u64(((u64)mipi_tx->data_rate * 2 * txdiv) << 24,
- 26000000);
- writel(pcw, mipi_tx->regs + MIPITX_DSI_PLL_CON2);
-
- mtk_mipi_tx_set_bits(mipi_tx, MIPITX_DSI_PLL_CON1,
- RG_DSI_MPPLL_SDM_FRA_EN);
-
- mtk_mipi_tx_set_bits(mipi_tx, MIPITX_DSI_PLL_CON0, RG_DSI_MPPLL_PLL_EN);
-
- usleep_range(20, 100);
-
- mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_PLL_CON1,
- RG_DSI_MPPLL_SDM_SSC_EN);
-
- mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_TOP,
- RG_DSI_MPPLL_PRESERVE,
- mipi_tx->driver_data->mppll_preserve);
-
- return 0;
-}
-
-static void mtk_mipi_tx_pll_unprepare(struct clk_hw *hw)
-{
- struct mtk_mipi_tx *mipi_tx = mtk_mipi_tx_from_clk_hw(hw);
-
- dev_dbg(mipi_tx->dev, "unprepare\n");
-
- mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_PLL_CON0,
- RG_DSI_MPPLL_PLL_EN);
-
- mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_TOP,
- RG_DSI_MPPLL_PRESERVE, 0);
-
- mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_PWR,
- RG_DSI_MPPLL_SDM_ISO_EN |
- RG_DSI_MPPLL_SDM_PWR_ON,
- RG_DSI_MPPLL_SDM_ISO_EN);
-
- mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_TOP_CON,
- RG_DSI_LNT_HS_BIAS_EN);
-
- mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_CON,
- RG_DSI_CKG_LDOOUT_EN | RG_DSI_LDOCORE_EN);
-
- mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_BG_CON,
- RG_DSI_BG_CKEN | RG_DSI_BG_CORE_EN);
-
- mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_PLL_CON0,
- RG_DSI_MPPLL_DIV_MSK);
-}
-
-static long mtk_mipi_tx_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
-{
- return clamp_val(rate, 50000000, 1250000000);
-}
-
-static int mtk_mipi_tx_pll_set_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long parent_rate)
+int mtk_mipi_tx_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
{
struct mtk_mipi_tx *mipi_tx = mtk_mipi_tx_from_clk_hw(hw);
@@ -299,37 +46,14 @@ static int mtk_mipi_tx_pll_set_rate(struct clk_hw *hw, unsigned long rate,
return 0;
}
-static unsigned long mtk_mipi_tx_pll_recalc_rate(struct clk_hw *hw,
- unsigned long parent_rate)
+unsigned long mtk_mipi_tx_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
{
struct mtk_mipi_tx *mipi_tx = mtk_mipi_tx_from_clk_hw(hw);
return mipi_tx->data_rate;
}
-static const struct clk_ops mtk_mipi_tx_pll_ops = {
- .prepare = mtk_mipi_tx_pll_prepare,
- .unprepare = mtk_mipi_tx_pll_unprepare,
- .round_rate = mtk_mipi_tx_pll_round_rate,
- .set_rate = mtk_mipi_tx_pll_set_rate,
- .recalc_rate = mtk_mipi_tx_pll_recalc_rate,
-};
-
-static int mtk_mipi_tx_power_on_signal(struct phy *phy)
-{
- struct mtk_mipi_tx *mipi_tx = phy_get_drvdata(phy);
- u32 reg;
-
- for (reg = MIPITX_DSI_CLOCK_LANE;
- reg <= MIPITX_DSI_DATA_LANE3; reg += 4)
- mtk_mipi_tx_set_bits(mipi_tx, reg, RG_DSI_LNTx_LDOOUT_EN);
-
- mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_TOP_CON,
- RG_DSI_PAD_TIE_LOW_EN);
-
- return 0;
-}
-
static int mtk_mipi_tx_power_on(struct phy *phy)
{
struct mtk_mipi_tx *mipi_tx = phy_get_drvdata(phy);
@@ -341,30 +65,16 @@ static int mtk_mipi_tx_power_on(struct phy *phy)
return ret;
/* Enable DSI Lane LDO outputs, disable pad tie low */
- mtk_mipi_tx_power_on_signal(phy);
-
+ mipi_tx->driver_data->mipi_tx_enable_signal(phy);
return 0;
}
-static void mtk_mipi_tx_power_off_signal(struct phy *phy)
-{
- struct mtk_mipi_tx *mipi_tx = phy_get_drvdata(phy);
- u32 reg;
-
- mtk_mipi_tx_set_bits(mipi_tx, MIPITX_DSI_TOP_CON,
- RG_DSI_PAD_TIE_LOW_EN);
-
- for (reg = MIPITX_DSI_CLOCK_LANE;
- reg <= MIPITX_DSI_DATA_LANE3; reg += 4)
- mtk_mipi_tx_clear_bits(mipi_tx, reg, RG_DSI_LNTx_LDOOUT_EN);
-}
-
static int mtk_mipi_tx_power_off(struct phy *phy)
{
struct mtk_mipi_tx *mipi_tx = phy_get_drvdata(phy);
/* Enable pad tie low, disable DSI Lane LDO outputs */
- mtk_mipi_tx_power_off_signal(phy);
+ mipi_tx->driver_data->mipi_tx_disable_signal(phy);
/* Disable PLL and power down core */
clk_disable_unprepare(mipi_tx->pll);
@@ -383,10 +93,9 @@ static int mtk_mipi_tx_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct mtk_mipi_tx *mipi_tx;
struct resource *mem;
- struct clk *ref_clk;
const char *ref_clk_name;
+ struct clk *ref_clk;
struct clk_init_data clk_init = {
- .ops = &mtk_mipi_tx_pll_ops,
.num_parents = 1,
.parent_names = (const char * const *)&ref_clk_name,
.flags = CLK_SET_RATE_GATE,
@@ -400,6 +109,7 @@ static int mtk_mipi_tx_probe(struct platform_device *pdev)
return -ENOMEM;
mipi_tx->driver_data = of_device_get_match_data(dev);
+
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
mipi_tx->regs = devm_ioremap_resource(dev, mem);
if (IS_ERR(mipi_tx->regs)) {
@@ -414,6 +124,7 @@ static int mtk_mipi_tx_probe(struct platform_device *pdev)
dev_err(dev, "Failed to get reference clock: %d\n", ret);
return ret;
}
+
ref_clk_name = __clk_get_name(ref_clk);
ret = of_property_read_string(dev->of_node, "clock-output-names",
@@ -423,6 +134,8 @@ static int mtk_mipi_tx_probe(struct platform_device *pdev)
return ret;
}
+ clk_init.ops = mipi_tx->driver_data->mipi_tx_clk_ops;
+
mipi_tx->pll_hw.init = &clk_init;
mipi_tx->pll = devm_clk_register(dev, &mipi_tx->pll_hw);
if (IS_ERR(mipi_tx->pll)) {
@@ -457,20 +170,14 @@ static int mtk_mipi_tx_remove(struct platform_device *pdev)
return 0;
}
-static const struct mtk_mipitx_data mt2701_mipitx_data = {
- .mppll_preserve = (3 << 8)
-};
-
-static const struct mtk_mipitx_data mt8173_mipitx_data = {
- .mppll_preserve = (0 << 8)
-};
-
static const struct of_device_id mtk_mipi_tx_match[] = {
{ .compatible = "mediatek,mt2701-mipi-tx",
.data = &mt2701_mipitx_data },
{ .compatible = "mediatek,mt8173-mipi-tx",
.data = &mt8173_mipitx_data },
- {},
+ { .compatible = "mediatek,mt8183-mipi-tx",
+ .data = &mt8183_mipitx_data },
+ { },
};
struct platform_driver mtk_mipi_tx_driver = {
@@ -481,3 +188,4 @@ struct platform_driver mtk_mipi_tx_driver = {
.of_match_table = mtk_mipi_tx_match,
},
};
+
diff --git a/drivers/gpu/drm/mediatek/mtk_mipi_tx.h b/drivers/gpu/drm/mediatek/mtk_mipi_tx.h
new file mode 100644
index 000000000000..413f35d86219
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_mipi_tx.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019 MediaTek Inc.
+ * Author: Jitao Shi <jitao.shi@mediatek.com>
+ */
+
+#ifndef _MTK_MIPI_TX_H
+#define _MTK_MIPI_TX_H
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/phy/phy.h>
+
+struct mtk_mipitx_data {
+ const u32 mppll_preserve;
+ const struct clk_ops *mipi_tx_clk_ops;
+ void (*mipi_tx_enable_signal)(struct phy *phy);
+ void (*mipi_tx_disable_signal)(struct phy *phy);
+};
+
+struct mtk_mipi_tx {
+ struct device *dev;
+ void __iomem *regs;
+ u32 data_rate;
+ const struct mtk_mipitx_data *driver_data;
+ struct clk_hw pll_hw;
+ struct clk *pll;
+};
+
+struct mtk_mipi_tx *mtk_mipi_tx_from_clk_hw(struct clk_hw *hw);
+void mtk_mipi_tx_clear_bits(struct mtk_mipi_tx *mipi_tx, u32 offset, u32 bits);
+void mtk_mipi_tx_set_bits(struct mtk_mipi_tx *mipi_tx, u32 offset, u32 bits);
+void mtk_mipi_tx_update_bits(struct mtk_mipi_tx *mipi_tx, u32 offset, u32 mask,
+ u32 data);
+int mtk_mipi_tx_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate);
+unsigned long mtk_mipi_tx_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate);
+
+extern const struct mtk_mipitx_data mt2701_mipitx_data;
+extern const struct mtk_mipitx_data mt8173_mipitx_data;
+extern const struct mtk_mipitx_data mt8183_mipitx_data;
+
+#endif
diff --git a/drivers/gpu/drm/mediatek/mtk_mt8173_mipi_tx.c b/drivers/gpu/drm/mediatek/mtk_mt8173_mipi_tx.c
new file mode 100644
index 000000000000..f18db14d8b63
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_mt8173_mipi_tx.c
@@ -0,0 +1,288 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019 MediaTek Inc.
+ * Author: jitao.shi <jitao.shi@mediatek.com>
+ */
+
+#include "mtk_mipi_tx.h"
+
+#define MIPITX_DSI_CON 0x00
+#define RG_DSI_LDOCORE_EN BIT(0)
+#define RG_DSI_CKG_LDOOUT_EN BIT(1)
+#define RG_DSI_BCLK_SEL (3 << 2)
+#define RG_DSI_LD_IDX_SEL (7 << 4)
+#define RG_DSI_PHYCLK_SEL (2 << 8)
+#define RG_DSI_DSICLK_FREQ_SEL BIT(10)
+#define RG_DSI_LPTX_CLMP_EN BIT(11)
+
+#define MIPITX_DSI_CLOCK_LANE 0x04
+#define MIPITX_DSI_DATA_LANE0 0x08
+#define MIPITX_DSI_DATA_LANE1 0x0c
+#define MIPITX_DSI_DATA_LANE2 0x10
+#define MIPITX_DSI_DATA_LANE3 0x14
+#define RG_DSI_LNTx_LDOOUT_EN BIT(0)
+#define RG_DSI_LNTx_CKLANE_EN BIT(1)
+#define RG_DSI_LNTx_LPTX_IPLUS1 BIT(2)
+#define RG_DSI_LNTx_LPTX_IPLUS2 BIT(3)
+#define RG_DSI_LNTx_LPTX_IMINUS BIT(4)
+#define RG_DSI_LNTx_LPCD_IPLUS BIT(5)
+#define RG_DSI_LNTx_LPCD_IMINUS BIT(6)
+#define RG_DSI_LNTx_RT_CODE (0xf << 8)
+
+#define MIPITX_DSI_TOP_CON 0x40
+#define RG_DSI_LNT_INTR_EN BIT(0)
+#define RG_DSI_LNT_HS_BIAS_EN BIT(1)
+#define RG_DSI_LNT_IMP_CAL_EN BIT(2)
+#define RG_DSI_LNT_TESTMODE_EN BIT(3)
+#define RG_DSI_LNT_IMP_CAL_CODE (0xf << 4)
+#define RG_DSI_LNT_AIO_SEL (7 << 8)
+#define RG_DSI_PAD_TIE_LOW_EN BIT(11)
+#define RG_DSI_DEBUG_INPUT_EN BIT(12)
+#define RG_DSI_PRESERVE (7 << 13)
+
+#define MIPITX_DSI_BG_CON 0x44
+#define RG_DSI_BG_CORE_EN BIT(0)
+#define RG_DSI_BG_CKEN BIT(1)
+#define RG_DSI_BG_DIV (0x3 << 2)
+#define RG_DSI_BG_FAST_CHARGE BIT(4)
+#define RG_DSI_VOUT_MSK (0x3ffff << 5)
+#define RG_DSI_V12_SEL (7 << 5)
+#define RG_DSI_V10_SEL (7 << 8)
+#define RG_DSI_V072_SEL (7 << 11)
+#define RG_DSI_V04_SEL (7 << 14)
+#define RG_DSI_V032_SEL (7 << 17)
+#define RG_DSI_V02_SEL (7 << 20)
+#define RG_DSI_BG_R1_TRIM (0xf << 24)
+#define RG_DSI_BG_R2_TRIM (0xf << 28)
+
+#define MIPITX_DSI_PLL_CON0 0x50
+#define RG_DSI_MPPLL_PLL_EN BIT(0)
+#define RG_DSI_MPPLL_DIV_MSK (0x1ff << 1)
+#define RG_DSI_MPPLL_PREDIV (3 << 1)
+#define RG_DSI_MPPLL_TXDIV0 (3 << 3)
+#define RG_DSI_MPPLL_TXDIV1 (3 << 5)
+#define RG_DSI_MPPLL_POSDIV (7 << 7)
+#define RG_DSI_MPPLL_MONVC_EN BIT(10)
+#define RG_DSI_MPPLL_MONREF_EN BIT(11)
+#define RG_DSI_MPPLL_VOD_EN BIT(12)
+
+#define MIPITX_DSI_PLL_CON1 0x54
+#define RG_DSI_MPPLL_SDM_FRA_EN BIT(0)
+#define RG_DSI_MPPLL_SDM_SSC_PH_INIT BIT(1)
+#define RG_DSI_MPPLL_SDM_SSC_EN BIT(2)
+#define RG_DSI_MPPLL_SDM_SSC_PRD (0xffff << 16)
+
+#define MIPITX_DSI_PLL_CON2 0x58
+
+#define MIPITX_DSI_PLL_TOP 0x64
+#define RG_DSI_MPPLL_PRESERVE (0xff << 8)
+
+#define MIPITX_DSI_PLL_PWR 0x68
+#define RG_DSI_MPPLL_SDM_PWR_ON BIT(0)
+#define RG_DSI_MPPLL_SDM_ISO_EN BIT(1)
+#define RG_DSI_MPPLL_SDM_PWR_ACK BIT(8)
+
+#define MIPITX_DSI_SW_CTRL 0x80
+#define SW_CTRL_EN BIT(0)
+
+#define MIPITX_DSI_SW_CTRL_CON0 0x84
+#define SW_LNTC_LPTX_PRE_OE BIT(0)
+#define SW_LNTC_LPTX_OE BIT(1)
+#define SW_LNTC_LPTX_P BIT(2)
+#define SW_LNTC_LPTX_N BIT(3)
+#define SW_LNTC_HSTX_PRE_OE BIT(4)
+#define SW_LNTC_HSTX_OE BIT(5)
+#define SW_LNTC_HSTX_ZEROCLK BIT(6)
+#define SW_LNT0_LPTX_PRE_OE BIT(7)
+#define SW_LNT0_LPTX_OE BIT(8)
+#define SW_LNT0_LPTX_P BIT(9)
+#define SW_LNT0_LPTX_N BIT(10)
+#define SW_LNT0_HSTX_PRE_OE BIT(11)
+#define SW_LNT0_HSTX_OE BIT(12)
+#define SW_LNT0_LPRX_EN BIT(13)
+#define SW_LNT1_LPTX_PRE_OE BIT(14)
+#define SW_LNT1_LPTX_OE BIT(15)
+#define SW_LNT1_LPTX_P BIT(16)
+#define SW_LNT1_LPTX_N BIT(17)
+#define SW_LNT1_HSTX_PRE_OE BIT(18)
+#define SW_LNT1_HSTX_OE BIT(19)
+#define SW_LNT2_LPTX_PRE_OE BIT(20)
+#define SW_LNT2_LPTX_OE BIT(21)
+#define SW_LNT2_LPTX_P BIT(22)
+#define SW_LNT2_LPTX_N BIT(23)
+#define SW_LNT2_HSTX_PRE_OE BIT(24)
+#define SW_LNT2_HSTX_OE BIT(25)
+
+static int mtk_mipi_tx_pll_prepare(struct clk_hw *hw)
+{
+ struct mtk_mipi_tx *mipi_tx = mtk_mipi_tx_from_clk_hw(hw);
+ u8 txdiv, txdiv0, txdiv1;
+ u64 pcw;
+
+ dev_dbg(mipi_tx->dev, "prepare: %u Hz\n", mipi_tx->data_rate);
+
+ if (mipi_tx->data_rate >= 500000000) {
+ txdiv = 1;
+ txdiv0 = 0;
+ txdiv1 = 0;
+ } else if (mipi_tx->data_rate >= 250000000) {
+ txdiv = 2;
+ txdiv0 = 1;
+ txdiv1 = 0;
+ } else if (mipi_tx->data_rate >= 125000000) {
+ txdiv = 4;
+ txdiv0 = 2;
+ txdiv1 = 0;
+ } else if (mipi_tx->data_rate > 62000000) {
+ txdiv = 8;
+ txdiv0 = 2;
+ txdiv1 = 1;
+ } else if (mipi_tx->data_rate >= 50000000) {
+ txdiv = 16;
+ txdiv0 = 2;
+ txdiv1 = 2;
+ } else {
+ return -EINVAL;
+ }
+
+ mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_BG_CON,
+ RG_DSI_VOUT_MSK |
+ RG_DSI_BG_CKEN | RG_DSI_BG_CORE_EN,
+ (4 << 20) | (4 << 17) | (4 << 14) |
+ (4 << 11) | (4 << 8) | (4 << 5) |
+ RG_DSI_BG_CKEN | RG_DSI_BG_CORE_EN);
+
+ usleep_range(30, 100);
+
+ mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_TOP_CON,
+ RG_DSI_LNT_IMP_CAL_CODE | RG_DSI_LNT_HS_BIAS_EN,
+ (8 << 4) | RG_DSI_LNT_HS_BIAS_EN);
+
+ mtk_mipi_tx_set_bits(mipi_tx, MIPITX_DSI_CON,
+ RG_DSI_CKG_LDOOUT_EN | RG_DSI_LDOCORE_EN);
+
+ mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_PWR,
+ RG_DSI_MPPLL_SDM_PWR_ON |
+ RG_DSI_MPPLL_SDM_ISO_EN,
+ RG_DSI_MPPLL_SDM_PWR_ON);
+
+ mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_PLL_CON0,
+ RG_DSI_MPPLL_PLL_EN);
+
+ mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_CON0,
+ RG_DSI_MPPLL_TXDIV0 | RG_DSI_MPPLL_TXDIV1 |
+ RG_DSI_MPPLL_PREDIV,
+ (txdiv0 << 3) | (txdiv1 << 5));
+
+ /*
+ * PLL PCW config
+ * PCW bit 24~30 = integer part of pcw
+ * PCW bit 0~23 = fractional part of pcw
+ * pcw = data_Rate*4*txdiv/(Ref_clk*2);
+ * Post DIV =4, so need data_Rate*4
+ * Ref_clk is 26MHz
+ */
+ pcw = div_u64(((u64)mipi_tx->data_rate * 2 * txdiv) << 24,
+ 26000000);
+ writel(pcw, mipi_tx->regs + MIPITX_DSI_PLL_CON2);
+
+ mtk_mipi_tx_set_bits(mipi_tx, MIPITX_DSI_PLL_CON1,
+ RG_DSI_MPPLL_SDM_FRA_EN);
+
+ mtk_mipi_tx_set_bits(mipi_tx, MIPITX_DSI_PLL_CON0, RG_DSI_MPPLL_PLL_EN);
+
+ usleep_range(20, 100);
+
+ mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_PLL_CON1,
+ RG_DSI_MPPLL_SDM_SSC_EN);
+
+ mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_TOP,
+ RG_DSI_MPPLL_PRESERVE,
+ mipi_tx->driver_data->mppll_preserve);
+
+ return 0;
+}
+
+static void mtk_mipi_tx_pll_unprepare(struct clk_hw *hw)
+{
+ struct mtk_mipi_tx *mipi_tx = mtk_mipi_tx_from_clk_hw(hw);
+
+ dev_dbg(mipi_tx->dev, "unprepare\n");
+
+ mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_PLL_CON0,
+ RG_DSI_MPPLL_PLL_EN);
+
+ mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_TOP,
+ RG_DSI_MPPLL_PRESERVE, 0);
+
+ mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_PWR,
+ RG_DSI_MPPLL_SDM_ISO_EN |
+ RG_DSI_MPPLL_SDM_PWR_ON,
+ RG_DSI_MPPLL_SDM_ISO_EN);
+
+ mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_TOP_CON,
+ RG_DSI_LNT_HS_BIAS_EN);
+
+ mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_CON,
+ RG_DSI_CKG_LDOOUT_EN | RG_DSI_LDOCORE_EN);
+
+ mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_BG_CON,
+ RG_DSI_BG_CKEN | RG_DSI_BG_CORE_EN);
+
+ mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_PLL_CON0,
+ RG_DSI_MPPLL_DIV_MSK);
+}
+
+static long mtk_mipi_tx_pll_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ return clamp_val(rate, 50000000, 1250000000);
+}
+
+static const struct clk_ops mtk_mipi_tx_pll_ops = {
+ .prepare = mtk_mipi_tx_pll_prepare,
+ .unprepare = mtk_mipi_tx_pll_unprepare,
+ .round_rate = mtk_mipi_tx_pll_round_rate,
+ .set_rate = mtk_mipi_tx_pll_set_rate,
+ .recalc_rate = mtk_mipi_tx_pll_recalc_rate,
+};
+
+static void mtk_mipi_tx_power_on_signal(struct phy *phy)
+{
+ struct mtk_mipi_tx *mipi_tx = phy_get_drvdata(phy);
+ u32 reg;
+
+ for (reg = MIPITX_DSI_CLOCK_LANE;
+ reg <= MIPITX_DSI_DATA_LANE3; reg += 4)
+ mtk_mipi_tx_set_bits(mipi_tx, reg, RG_DSI_LNTx_LDOOUT_EN);
+
+ mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_TOP_CON,
+ RG_DSI_PAD_TIE_LOW_EN);
+}
+
+static void mtk_mipi_tx_power_off_signal(struct phy *phy)
+{
+ struct mtk_mipi_tx *mipi_tx = phy_get_drvdata(phy);
+ u32 reg;
+
+ mtk_mipi_tx_set_bits(mipi_tx, MIPITX_DSI_TOP_CON,
+ RG_DSI_PAD_TIE_LOW_EN);
+
+ for (reg = MIPITX_DSI_CLOCK_LANE;
+ reg <= MIPITX_DSI_DATA_LANE3; reg += 4)
+ mtk_mipi_tx_clear_bits(mipi_tx, reg, RG_DSI_LNTx_LDOOUT_EN);
+}
+
+const struct mtk_mipitx_data mt2701_mipitx_data = {
+ .mppll_preserve = (3 << 8),
+ .mipi_tx_clk_ops = &mtk_mipi_tx_pll_ops,
+ .mipi_tx_enable_signal = mtk_mipi_tx_power_on_signal,
+ .mipi_tx_disable_signal = mtk_mipi_tx_power_off_signal,
+};
+
+const struct mtk_mipitx_data mt8173_mipitx_data = {
+ .mppll_preserve = (0 << 8),
+ .mipi_tx_clk_ops = &mtk_mipi_tx_pll_ops,
+ .mipi_tx_enable_signal = mtk_mipi_tx_power_on_signal,
+ .mipi_tx_disable_signal = mtk_mipi_tx_power_off_signal,
+};
diff --git a/drivers/gpu/drm/mediatek/mtk_mt8183_mipi_tx.c b/drivers/gpu/drm/mediatek/mtk_mt8183_mipi_tx.c
new file mode 100644
index 000000000000..91f08a351fd0
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_mt8183_mipi_tx.c
@@ -0,0 +1,149 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019 MediaTek Inc.
+ * Author: jitao.shi <jitao.shi@mediatek.com>
+ */
+
+#include "mtk_mipi_tx.h"
+
+#define MIPITX_LANE_CON 0x000c
+#define RG_DSI_CPHY_T1DRV_EN BIT(0)
+#define RG_DSI_ANA_CK_SEL BIT(1)
+#define RG_DSI_PHY_CK_SEL BIT(2)
+#define RG_DSI_CPHY_EN BIT(3)
+#define RG_DSI_PHYCK_INV_EN BIT(4)
+#define RG_DSI_PWR04_EN BIT(5)
+#define RG_DSI_BG_LPF_EN BIT(6)
+#define RG_DSI_BG_CORE_EN BIT(7)
+#define RG_DSI_PAD_TIEL_SEL BIT(8)
+
+#define MIPITX_PLL_PWR 0x0028
+#define MIPITX_PLL_CON0 0x002c
+#define MIPITX_PLL_CON1 0x0030
+#define MIPITX_PLL_CON2 0x0034
+#define MIPITX_PLL_CON3 0x0038
+#define MIPITX_PLL_CON4 0x003c
+#define RG_DSI_PLL_IBIAS (3 << 10)
+
+#define MIPITX_D2_SW_CTL_EN 0x0144
+#define MIPITX_D0_SW_CTL_EN 0x0244
+#define MIPITX_CK_CKMODE_EN 0x0328
+#define DSI_CK_CKMODE_EN BIT(0)
+#define MIPITX_CK_SW_CTL_EN 0x0344
+#define MIPITX_D1_SW_CTL_EN 0x0444
+#define MIPITX_D3_SW_CTL_EN 0x0544
+#define DSI_SW_CTL_EN BIT(0)
+#define AD_DSI_PLL_SDM_PWR_ON BIT(0)
+#define AD_DSI_PLL_SDM_ISO_EN BIT(1)
+
+#define RG_DSI_PLL_EN BIT(4)
+#define RG_DSI_PLL_POSDIV (0x7 << 8)
+
+static int mtk_mipi_tx_pll_enable(struct clk_hw *hw)
+{
+ struct mtk_mipi_tx *mipi_tx = mtk_mipi_tx_from_clk_hw(hw);
+ unsigned int txdiv, txdiv0;
+ u64 pcw;
+
+ dev_dbg(mipi_tx->dev, "enable: %u bps\n", mipi_tx->data_rate);
+
+ if (mipi_tx->data_rate >= 2000000000) {
+ txdiv = 1;
+ txdiv0 = 0;
+ } else if (mipi_tx->data_rate >= 1000000000) {
+ txdiv = 2;
+ txdiv0 = 1;
+ } else if (mipi_tx->data_rate >= 500000000) {
+ txdiv = 4;
+ txdiv0 = 2;
+ } else if (mipi_tx->data_rate > 250000000) {
+ txdiv = 8;
+ txdiv0 = 3;
+ } else if (mipi_tx->data_rate >= 125000000) {
+ txdiv = 16;
+ txdiv0 = 4;
+ } else {
+ return -EINVAL;
+ }
+
+ mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_PLL_CON4, RG_DSI_PLL_IBIAS);
+
+ mtk_mipi_tx_set_bits(mipi_tx, MIPITX_PLL_PWR, AD_DSI_PLL_SDM_PWR_ON);
+ mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_PLL_CON1, RG_DSI_PLL_EN);
+ udelay(1);
+ mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_PLL_PWR, AD_DSI_PLL_SDM_ISO_EN);
+ pcw = div_u64(((u64)mipi_tx->data_rate * txdiv) << 24, 26000000);
+ writel(pcw, mipi_tx->regs + MIPITX_PLL_CON0);
+ mtk_mipi_tx_update_bits(mipi_tx, MIPITX_PLL_CON1, RG_DSI_PLL_POSDIV,
+ txdiv0 << 8);
+ mtk_mipi_tx_set_bits(mipi_tx, MIPITX_PLL_CON1, RG_DSI_PLL_EN);
+
+ return 0;
+}
+
+static void mtk_mipi_tx_pll_disable(struct clk_hw *hw)
+{
+ struct mtk_mipi_tx *mipi_tx = mtk_mipi_tx_from_clk_hw(hw);
+
+ mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_PLL_CON1, RG_DSI_PLL_EN);
+
+ mtk_mipi_tx_set_bits(mipi_tx, MIPITX_PLL_PWR, AD_DSI_PLL_SDM_ISO_EN);
+ mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_PLL_PWR, AD_DSI_PLL_SDM_PWR_ON);
+}
+
+static long mtk_mipi_tx_pll_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ return clamp_val(rate, 50000000, 1600000000);
+}
+
+static const struct clk_ops mtk_mipi_tx_pll_ops = {
+ .enable = mtk_mipi_tx_pll_enable,
+ .disable = mtk_mipi_tx_pll_disable,
+ .round_rate = mtk_mipi_tx_pll_round_rate,
+ .set_rate = mtk_mipi_tx_pll_set_rate,
+ .recalc_rate = mtk_mipi_tx_pll_recalc_rate,
+};
+
+static void mtk_mipi_tx_power_on_signal(struct phy *phy)
+{
+ struct mtk_mipi_tx *mipi_tx = phy_get_drvdata(phy);
+
+ /* BG_LPF_EN / BG_CORE_EN */
+ writel(RG_DSI_PAD_TIEL_SEL | RG_DSI_BG_CORE_EN,
+ mipi_tx->regs + MIPITX_LANE_CON);
+ usleep_range(30, 100);
+ writel(RG_DSI_BG_CORE_EN | RG_DSI_BG_LPF_EN,
+ mipi_tx->regs + MIPITX_LANE_CON);
+
+ /* Switch OFF each Lane */
+ mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_D0_SW_CTL_EN, DSI_SW_CTL_EN);
+ mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_D1_SW_CTL_EN, DSI_SW_CTL_EN);
+ mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_D2_SW_CTL_EN, DSI_SW_CTL_EN);
+ mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_D3_SW_CTL_EN, DSI_SW_CTL_EN);
+ mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_CK_SW_CTL_EN, DSI_SW_CTL_EN);
+
+ mtk_mipi_tx_set_bits(mipi_tx, MIPITX_CK_CKMODE_EN, DSI_CK_CKMODE_EN);
+}
+
+static void mtk_mipi_tx_power_off_signal(struct phy *phy)
+{
+ struct mtk_mipi_tx *mipi_tx = phy_get_drvdata(phy);
+
+ /* Switch ON each Lane */
+ mtk_mipi_tx_set_bits(mipi_tx, MIPITX_D0_SW_CTL_EN, DSI_SW_CTL_EN);
+ mtk_mipi_tx_set_bits(mipi_tx, MIPITX_D1_SW_CTL_EN, DSI_SW_CTL_EN);
+ mtk_mipi_tx_set_bits(mipi_tx, MIPITX_D2_SW_CTL_EN, DSI_SW_CTL_EN);
+ mtk_mipi_tx_set_bits(mipi_tx, MIPITX_D3_SW_CTL_EN, DSI_SW_CTL_EN);
+ mtk_mipi_tx_set_bits(mipi_tx, MIPITX_CK_SW_CTL_EN, DSI_SW_CTL_EN);
+
+ writel(RG_DSI_PAD_TIEL_SEL | RG_DSI_BG_CORE_EN,
+ mipi_tx->regs + MIPITX_LANE_CON);
+ writel(RG_DSI_PAD_TIEL_SEL, mipi_tx->regs + MIPITX_LANE_CON);
+}
+
+const struct mtk_mipitx_data mt8183_mipitx_data = {
+ .mipi_tx_clk_ops = &mtk_mipi_tx_pll_ops,
+ .mipi_tx_enable_signal = mtk_mipi_tx_power_on_signal,
+ .mipi_tx_disable_signal = mtk_mipi_tx_power_off_signal,
+};
diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
index a24f8dec5adc..397c33182f4f 100644
--- a/drivers/gpu/drm/meson/meson_drv.c
+++ b/drivers/gpu/drm/meson/meson_drv.c
@@ -372,6 +372,33 @@ static const struct component_master_ops meson_drv_master_ops = {
.unbind = meson_drv_unbind,
};
+static int __maybe_unused meson_drv_pm_suspend(struct device *dev)
+{
+ struct meson_drm *priv = dev_get_drvdata(dev);
+
+ if (!priv)
+ return 0;
+
+ return drm_mode_config_helper_suspend(priv->drm);
+}
+
+static int __maybe_unused meson_drv_pm_resume(struct device *dev)
+{
+ struct meson_drm *priv = dev_get_drvdata(dev);
+
+ if (!priv)
+ return 0;
+
+ meson_vpu_init(priv);
+ meson_venc_init(priv);
+ meson_vpp_init(priv);
+ meson_viu_init(priv);
+
+ drm_mode_config_helper_resume(priv->drm);
+
+ return 0;
+}
+
static int compare_of(struct device *dev, void *data)
{
DRM_DEBUG_DRIVER("Comparing of node %pOF with %pOF\n",
@@ -467,11 +494,16 @@ static const struct of_device_id dt_match[] = {
};
MODULE_DEVICE_TABLE(of, dt_match);
+static const struct dev_pm_ops meson_drv_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(meson_drv_pm_suspend, meson_drv_pm_resume)
+};
+
static struct platform_driver meson_drm_platform_driver = {
.probe = meson_drv_probe,
.driver = {
.name = "meson-drm",
.of_match_table = dt_match,
+ .pm = &meson_drv_pm_ops,
},
};
diff --git a/drivers/gpu/drm/meson/meson_dw_hdmi.c b/drivers/gpu/drm/meson/meson_dw_hdmi.c
index 68bbd987147b..3bb7ffe5fc39 100644
--- a/drivers/gpu/drm/meson/meson_dw_hdmi.c
+++ b/drivers/gpu/drm/meson/meson_dw_hdmi.c
@@ -802,6 +802,47 @@ static bool meson_hdmi_connector_is_available(struct device *dev)
return false;
}
+static void meson_dw_hdmi_init(struct meson_dw_hdmi *meson_dw_hdmi)
+{
+ struct meson_drm *priv = meson_dw_hdmi->priv;
+
+ /* Enable clocks */
+ regmap_update_bits(priv->hhi, HHI_HDMI_CLK_CNTL, 0xffff, 0x100);
+
+ /* Bring HDMITX MEM output of power down */
+ regmap_update_bits(priv->hhi, HHI_MEM_PD_REG0, 0xff << 8, 0);
+
+ /* Reset HDMITX APB & TX & PHY */
+ reset_control_reset(meson_dw_hdmi->hdmitx_apb);
+ reset_control_reset(meson_dw_hdmi->hdmitx_ctrl);
+ reset_control_reset(meson_dw_hdmi->hdmitx_phy);
+
+ /* Enable APB3 fail on error */
+ if (!meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) {
+ writel_bits_relaxed(BIT(15), BIT(15),
+ meson_dw_hdmi->hdmitx + HDMITX_TOP_CTRL_REG);
+ writel_bits_relaxed(BIT(15), BIT(15),
+ meson_dw_hdmi->hdmitx + HDMITX_DWC_CTRL_REG);
+ }
+
+ /* Bring out of reset */
+ meson_dw_hdmi->data->top_write(meson_dw_hdmi,
+ HDMITX_TOP_SW_RESET, 0);
+
+ msleep(20);
+
+ meson_dw_hdmi->data->top_write(meson_dw_hdmi,
+ HDMITX_TOP_CLK_CNTL, 0xff);
+
+ /* Enable HDMI-TX Interrupt */
+ meson_dw_hdmi->data->top_write(meson_dw_hdmi, HDMITX_TOP_INTR_STAT_CLR,
+ HDMITX_TOP_INTR_CORE);
+
+ meson_dw_hdmi->data->top_write(meson_dw_hdmi, HDMITX_TOP_INTR_MASKN,
+ HDMITX_TOP_INTR_CORE);
+
+}
+
static int meson_dw_hdmi_bind(struct device *dev, struct device *master,
void *data)
{
@@ -925,40 +966,7 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master,
DRM_DEBUG_DRIVER("encoder initialized\n");
- /* Enable clocks */
- regmap_update_bits(priv->hhi, HHI_HDMI_CLK_CNTL, 0xffff, 0x100);
-
- /* Bring HDMITX MEM output of power down */
- regmap_update_bits(priv->hhi, HHI_MEM_PD_REG0, 0xff << 8, 0);
-
- /* Reset HDMITX APB & TX & PHY */
- reset_control_reset(meson_dw_hdmi->hdmitx_apb);
- reset_control_reset(meson_dw_hdmi->hdmitx_ctrl);
- reset_control_reset(meson_dw_hdmi->hdmitx_phy);
-
- /* Enable APB3 fail on error */
- if (!meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) {
- writel_bits_relaxed(BIT(15), BIT(15),
- meson_dw_hdmi->hdmitx + HDMITX_TOP_CTRL_REG);
- writel_bits_relaxed(BIT(15), BIT(15),
- meson_dw_hdmi->hdmitx + HDMITX_DWC_CTRL_REG);
- }
-
- /* Bring out of reset */
- meson_dw_hdmi->data->top_write(meson_dw_hdmi,
- HDMITX_TOP_SW_RESET, 0);
-
- msleep(20);
-
- meson_dw_hdmi->data->top_write(meson_dw_hdmi,
- HDMITX_TOP_CLK_CNTL, 0xff);
-
- /* Enable HDMI-TX Interrupt */
- meson_dw_hdmi->data->top_write(meson_dw_hdmi, HDMITX_TOP_INTR_STAT_CLR,
- HDMITX_TOP_INTR_CORE);
-
- meson_dw_hdmi->data->top_write(meson_dw_hdmi, HDMITX_TOP_INTR_MASKN,
- HDMITX_TOP_INTR_CORE);
+ meson_dw_hdmi_init(meson_dw_hdmi);
/* Bridge / Connector */
@@ -969,6 +977,11 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master,
dw_plat_data->input_bus_format = MEDIA_BUS_FMT_YUV8_1X24;
dw_plat_data->input_bus_encoding = V4L2_YCBCR_ENC_709;
+ if (dw_hdmi_is_compatible(meson_dw_hdmi, "amlogic,meson-gxl-dw-hdmi") ||
+ dw_hdmi_is_compatible(meson_dw_hdmi, "amlogic,meson-gxm-dw-hdmi") ||
+ dw_hdmi_is_compatible(meson_dw_hdmi, "amlogic,meson-g12a-dw-hdmi"))
+ dw_plat_data->use_drm_infoframe = true;
+
platform_set_drvdata(pdev, meson_dw_hdmi);
meson_dw_hdmi->hdmi = dw_hdmi_bind(pdev, encoder,
@@ -994,6 +1007,34 @@ static const struct component_ops meson_dw_hdmi_ops = {
.unbind = meson_dw_hdmi_unbind,
};
+static int __maybe_unused meson_dw_hdmi_pm_suspend(struct device *dev)
+{
+ struct meson_dw_hdmi *meson_dw_hdmi = dev_get_drvdata(dev);
+
+ if (!meson_dw_hdmi)
+ return 0;
+
+ /* Reset TOP */
+ meson_dw_hdmi->data->top_write(meson_dw_hdmi,
+ HDMITX_TOP_SW_RESET, 0);
+
+ return 0;
+}
+
+static int __maybe_unused meson_dw_hdmi_pm_resume(struct device *dev)
+{
+ struct meson_dw_hdmi *meson_dw_hdmi = dev_get_drvdata(dev);
+
+ if (!meson_dw_hdmi)
+ return 0;
+
+ meson_dw_hdmi_init(meson_dw_hdmi);
+
+ dw_hdmi_resume(meson_dw_hdmi->hdmi);
+
+ return 0;
+}
+
static int meson_dw_hdmi_probe(struct platform_device *pdev)
{
return component_add(&pdev->dev, &meson_dw_hdmi_ops);
@@ -1006,6 +1047,11 @@ static int meson_dw_hdmi_remove(struct platform_device *pdev)
return 0;
}
+static const struct dev_pm_ops meson_dw_hdmi_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(meson_dw_hdmi_pm_suspend,
+ meson_dw_hdmi_pm_resume)
+};
+
static const struct of_device_id meson_dw_hdmi_of_table[] = {
{ .compatible = "amlogic,meson-gxbb-dw-hdmi",
.data = &meson_dw_hdmi_gx_data },
@@ -1025,6 +1071,7 @@ static struct platform_driver meson_dw_hdmi_platform_driver = {
.driver = {
.name = DRIVER_NAME,
.of_match_table = meson_dw_hdmi_of_table,
+ .pm = &meson_dw_hdmi_pm_ops,
},
};
module_platform_driver(meson_dw_hdmi_platform_driver);
diff --git a/drivers/gpu/drm/meson/meson_vclk.c b/drivers/gpu/drm/meson/meson_vclk.c
index ac491a781952..f690793ae2d5 100644
--- a/drivers/gpu/drm/meson/meson_vclk.c
+++ b/drivers/gpu/drm/meson/meson_vclk.c
@@ -638,13 +638,18 @@ static bool meson_hdmi_pll_validate_params(struct meson_drm *priv,
if (frac >= HDMI_FRAC_MAX_GXBB)
return false;
} else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXM) ||
- meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXL) ||
- meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) {
+ meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXL)) {
/* Empiric supported min/max dividers */
if (m < 106 || m > 247)
return false;
if (frac >= HDMI_FRAC_MAX_GXL)
return false;
+ } else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) {
+ /* Empiric supported min/max dividers */
+ if (m < 106 || m > 247)
+ return false;
+ if (frac >= HDMI_FRAC_MAX_G12A)
+ return false;
}
return true;
diff --git a/drivers/gpu/drm/mgag200/Kconfig b/drivers/gpu/drm/mgag200/Kconfig
index 76fee0fbdcae..aed11f4f4c55 100644
--- a/drivers/gpu/drm/mgag200/Kconfig
+++ b/drivers/gpu/drm/mgag200/Kconfig
@@ -4,6 +4,8 @@ config DRM_MGAG200
depends on DRM && PCI && MMU
select DRM_KMS_HELPER
select DRM_VRAM_HELPER
+ select DRM_TTM
+ select DRM_TTM_HELPER
help
This is a KMS driver for the MGA G200 server chips, it
does not support the original MGA G200 or any of the desktop
diff --git a/drivers/gpu/drm/mgag200/mgag200_cursor.c b/drivers/gpu/drm/mgag200/mgag200_cursor.c
index 289ce3e29032..79711dbb5b03 100644
--- a/drivers/gpu/drm/mgag200/mgag200_cursor.c
+++ b/drivers/gpu/drm/mgag200/mgag200_cursor.c
@@ -12,35 +12,10 @@
static bool warn_transparent = true;
static bool warn_palette = true;
-/*
- Hide the cursor off screen. We can't disable the cursor hardware because it
- takes too long to re-activate and causes momentary corruption
-*/
-static void mga_hide_cursor(struct mga_device *mdev)
-{
- WREG8(MGA_CURPOSXL, 0);
- WREG8(MGA_CURPOSXH, 0);
- if (mdev->cursor.pixels_current)
- drm_gem_vram_unpin(mdev->cursor.pixels_current);
- mdev->cursor.pixels_current = NULL;
-}
-
-int mga_crtc_cursor_set(struct drm_crtc *crtc,
- struct drm_file *file_priv,
- uint32_t handle,
- uint32_t width,
- uint32_t height)
+static int mgag200_cursor_update(struct mga_device *mdev, void *dst, void *src,
+ unsigned int width, unsigned int height)
{
- struct drm_device *dev = crtc->dev;
- struct mga_device *mdev = (struct mga_device *)dev->dev_private;
- struct drm_gem_vram_object *pixels_1 = mdev->cursor.pixels_1;
- struct drm_gem_vram_object *pixels_2 = mdev->cursor.pixels_2;
- struct drm_gem_vram_object *pixels_current = mdev->cursor.pixels_current;
- struct drm_gem_vram_object *pixels_next;
- struct drm_gem_object *obj;
- struct drm_gem_vram_object *gbo = NULL;
- int ret = 0;
- u8 *src, *dst;
+ struct drm_device *dev = mdev->dev;
unsigned int i, row, col;
uint32_t colour_set[16];
uint32_t *next_space = &colour_set[0];
@@ -48,79 +23,9 @@ int mga_crtc_cursor_set(struct drm_crtc *crtc,
uint32_t this_colour;
bool found = false;
int colour_count = 0;
- s64 gpu_addr;
- u64 dst_gpu;
u8 reg_index;
u8 this_row[48];
- if (!pixels_1 || !pixels_2) {
- WREG8(MGA_CURPOSXL, 0);
- WREG8(MGA_CURPOSXH, 0);
- return -ENOTSUPP; /* Didn't allocate space for cursors */
- }
-
- if (WARN_ON(pixels_current &&
- pixels_1 != pixels_current &&
- pixels_2 != pixels_current)) {
- return -ENOTSUPP; /* inconsistent state */
- }
-
- if (!handle || !file_priv) {
- mga_hide_cursor(mdev);
- return 0;
- }
-
- if (width != 64 || height != 64) {
- WREG8(MGA_CURPOSXL, 0);
- WREG8(MGA_CURPOSXH, 0);
- return -EINVAL;
- }
-
- if (pixels_current == pixels_1)
- pixels_next = pixels_2;
- else
- pixels_next = pixels_1;
-
- obj = drm_gem_object_lookup(file_priv, handle);
- if (!obj)
- return -ENOENT;
- gbo = drm_gem_vram_of_gem(obj);
- ret = drm_gem_vram_pin(gbo, 0);
- if (ret) {
- dev_err(&dev->pdev->dev, "failed to lock user bo\n");
- goto err_drm_gem_object_put_unlocked;
- }
- src = drm_gem_vram_kmap(gbo, true, NULL);
- if (IS_ERR(src)) {
- ret = PTR_ERR(src);
- dev_err(&dev->pdev->dev,
- "failed to kmap user buffer updates\n");
- goto err_drm_gem_vram_unpin_src;
- }
-
- /* Pin and map up-coming buffer to write colour indices */
- ret = drm_gem_vram_pin(pixels_next, DRM_GEM_VRAM_PL_FLAG_VRAM);
- if (ret) {
- dev_err(&dev->pdev->dev,
- "failed to pin cursor buffer: %d\n", ret);
- goto err_drm_gem_vram_kunmap_src;
- }
- dst = drm_gem_vram_kmap(pixels_next, true, NULL);
- if (IS_ERR(dst)) {
- ret = PTR_ERR(dst);
- dev_err(&dev->pdev->dev,
- "failed to kmap cursor updates: %d\n", ret);
- goto err_drm_gem_vram_unpin_dst;
- }
- gpu_addr = drm_gem_vram_offset(pixels_next);
- if (gpu_addr < 0) {
- ret = (int)gpu_addr;
- dev_err(&dev->pdev->dev,
- "failed to get cursor scanout address: %d\n", ret);
- goto err_drm_gem_vram_kunmap_dst;
- }
- dst_gpu = (u64)gpu_addr;
-
memset(&colour_set[0], 0, sizeof(uint32_t)*16);
/* width*height*4 = 16384 */
for (i = 0; i < 16384; i += 4) {
@@ -133,8 +38,7 @@ int mga_crtc_cursor_set(struct drm_crtc *crtc,
dev_info(&dev->pdev->dev, "Not enabling hardware cursor.\n");
warn_transparent = false; /* Only tell the user once. */
}
- ret = -EINVAL;
- goto err_drm_gem_vram_kunmap_dst;
+ return -EINVAL;
}
/* Don't need to store transparent pixels as colours */
if (this_colour>>24 == 0x0)
@@ -155,8 +59,7 @@ int mga_crtc_cursor_set(struct drm_crtc *crtc,
dev_info(&dev->pdev->dev, "Not enabling hardware cursor.\n");
warn_palette = false; /* Only tell the user once. */
}
- ret = -EINVAL;
- goto err_drm_gem_vram_kunmap_dst;
+ return -EINVAL;
}
*next_space = this_colour;
next_space++;
@@ -200,54 +103,218 @@ int mga_crtc_cursor_set(struct drm_crtc *crtc,
memcpy_toio(dst + row*48, &this_row[0], 48);
}
+ return 0;
+}
+
+static void mgag200_cursor_set_base(struct mga_device *mdev, u64 address)
+{
+ u8 addrl = (address >> 10) & 0xff;
+ u8 addrh = (address >> 18) & 0x3f;
+
/* Program gpu address of cursor buffer */
- WREG_DAC(MGA1064_CURSOR_BASE_ADR_LOW, (u8)((dst_gpu>>10) & 0xff));
- WREG_DAC(MGA1064_CURSOR_BASE_ADR_HI, (u8)((dst_gpu>>18) & 0x3f));
+ WREG_DAC(MGA1064_CURSOR_BASE_ADR_LOW, addrl);
+ WREG_DAC(MGA1064_CURSOR_BASE_ADR_HI, addrh);
+}
+
+static int mgag200_show_cursor(struct mga_device *mdev, void *src,
+ unsigned int width, unsigned int height)
+{
+ struct drm_device *dev = mdev->dev;
+ struct drm_gem_vram_object *gbo;
+ void *dst;
+ s64 off;
+ int ret;
+
+ gbo = mdev->cursor.gbo[mdev->cursor.next_index];
+ if (!gbo) {
+ WREG8(MGA_CURPOSXL, 0);
+ WREG8(MGA_CURPOSXH, 0);
+ return -ENOTSUPP; /* Didn't allocate space for cursors */
+ }
+ dst = drm_gem_vram_vmap(gbo);
+ if (IS_ERR(dst)) {
+ ret = PTR_ERR(dst);
+ dev_err(&dev->pdev->dev,
+ "failed to map cursor updates: %d\n", ret);
+ return ret;
+ }
+ off = drm_gem_vram_offset(gbo);
+ if (off < 0) {
+ ret = (int)off;
+ dev_err(&dev->pdev->dev,
+ "failed to get cursor scanout address: %d\n", ret);
+ goto err_drm_gem_vram_vunmap;
+ }
+
+ ret = mgag200_cursor_update(mdev, dst, src, width, height);
+ if (ret)
+ goto err_drm_gem_vram_vunmap;
+ mgag200_cursor_set_base(mdev, off);
/* Adjust cursor control register to turn on the cursor */
WREG_DAC(MGA1064_CURSOR_CTL, 4); /* 16-colour palletized cursor mode */
- /* Now update internal buffer pointers */
- if (pixels_current)
- drm_gem_vram_unpin(pixels_current);
- mdev->cursor.pixels_current = pixels_next;
+ drm_gem_vram_vunmap(gbo, dst);
- drm_gem_vram_kunmap(pixels_next);
- drm_gem_vram_kunmap(gbo);
- drm_gem_vram_unpin(gbo);
- drm_gem_object_put_unlocked(obj);
+ ++mdev->cursor.next_index;
+ mdev->cursor.next_index %= ARRAY_SIZE(mdev->cursor.gbo);
+
+ return 0;
+
+err_drm_gem_vram_vunmap:
+ drm_gem_vram_vunmap(gbo, dst);
+ return ret;
+}
+
+/*
+ * Hide the cursor off screen. We can't disable the cursor hardware because
+ * it takes too long to re-activate and causes momentary corruption.
+ */
+static void mgag200_hide_cursor(struct mga_device *mdev)
+{
+ WREG8(MGA_CURPOSXL, 0);
+ WREG8(MGA_CURPOSXH, 0);
+}
+
+static void mgag200_move_cursor(struct mga_device *mdev, int x, int y)
+{
+ if (WARN_ON(x <= 0))
+ return;
+ if (WARN_ON(y <= 0))
+ return;
+ if (WARN_ON(x & ~0xffff))
+ return;
+ if (WARN_ON(y & ~0xffff))
+ return;
+
+ WREG8(MGA_CURPOSXL, x & 0xff);
+ WREG8(MGA_CURPOSXH, (x>>8) & 0xff);
+
+ WREG8(MGA_CURPOSYL, y & 0xff);
+ WREG8(MGA_CURPOSYH, (y>>8) & 0xff);
+}
+
+int mgag200_cursor_init(struct mga_device *mdev)
+{
+ struct drm_device *dev = mdev->dev;
+ size_t ncursors = ARRAY_SIZE(mdev->cursor.gbo);
+ size_t size;
+ int ret;
+ size_t i;
+ struct drm_gem_vram_object *gbo;
+
+ size = roundup(64 * 48, PAGE_SIZE);
+ if (size * ncursors > mdev->vram_fb_available)
+ return -ENOMEM;
+
+ for (i = 0; i < ncursors; ++i) {
+ gbo = drm_gem_vram_create(dev, &dev->vram_mm->bdev,
+ size, 0, false);
+ if (IS_ERR(gbo)) {
+ ret = PTR_ERR(gbo);
+ goto err_drm_gem_vram_put;
+ }
+ ret = drm_gem_vram_pin(gbo, DRM_GEM_VRAM_PL_FLAG_VRAM |
+ DRM_GEM_VRAM_PL_FLAG_TOPDOWN);
+ if (ret) {
+ drm_gem_vram_put(gbo);
+ goto err_drm_gem_vram_put;
+ }
+
+ mdev->cursor.gbo[i] = gbo;
+ }
+
+ /*
+ * At the high end of video memory, we reserve space for
+ * buffer objects. The cursor plane uses this memory to store
+ * a double-buffered image of the current cursor. Hence, it's
+ * not available for framebuffers.
+ */
+ mdev->vram_fb_available -= ncursors * size;
return 0;
-err_drm_gem_vram_kunmap_dst:
- drm_gem_vram_kunmap(pixels_next);
-err_drm_gem_vram_unpin_dst:
- drm_gem_vram_unpin(pixels_next);
-err_drm_gem_vram_kunmap_src:
- drm_gem_vram_kunmap(gbo);
-err_drm_gem_vram_unpin_src:
- drm_gem_vram_unpin(gbo);
+err_drm_gem_vram_put:
+ while (i) {
+ --i;
+ gbo = mdev->cursor.gbo[i];
+ drm_gem_vram_unpin(gbo);
+ drm_gem_vram_put(gbo);
+ mdev->cursor.gbo[i] = NULL;
+ }
+ return ret;
+}
+
+void mgag200_cursor_fini(struct mga_device *mdev)
+{
+ size_t i;
+ struct drm_gem_vram_object *gbo;
+
+ for (i = 0; i < ARRAY_SIZE(mdev->cursor.gbo); ++i) {
+ gbo = mdev->cursor.gbo[i];
+ drm_gem_vram_unpin(gbo);
+ drm_gem_vram_put(gbo);
+ }
+}
+
+int mgag200_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
+ uint32_t handle, uint32_t width, uint32_t height)
+{
+ struct drm_device *dev = crtc->dev;
+ struct mga_device *mdev = (struct mga_device *)dev->dev_private;
+ struct drm_gem_object *obj;
+ struct drm_gem_vram_object *gbo = NULL;
+ int ret;
+ u8 *src;
+
+ if (!handle || !file_priv) {
+ mgag200_hide_cursor(mdev);
+ return 0;
+ }
+
+ if (width != 64 || height != 64) {
+ WREG8(MGA_CURPOSXL, 0);
+ WREG8(MGA_CURPOSXH, 0);
+ return -EINVAL;
+ }
+
+ obj = drm_gem_object_lookup(file_priv, handle);
+ if (!obj)
+ return -ENOENT;
+ gbo = drm_gem_vram_of_gem(obj);
+ src = drm_gem_vram_vmap(gbo);
+ if (IS_ERR(src)) {
+ ret = PTR_ERR(src);
+ dev_err(&dev->pdev->dev,
+ "failed to map user buffer updates\n");
+ goto err_drm_gem_object_put_unlocked;
+ }
+
+ ret = mgag200_show_cursor(mdev, src, width, height);
+ if (ret)
+ goto err_drm_gem_vram_vunmap;
+
+ /* Now update internal buffer pointers */
+ drm_gem_vram_vunmap(gbo, src);
+ drm_gem_object_put_unlocked(obj);
+
+ return 0;
+err_drm_gem_vram_vunmap:
+ drm_gem_vram_vunmap(gbo, src);
err_drm_gem_object_put_unlocked:
drm_gem_object_put_unlocked(obj);
return ret;
}
-int mga_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
+int mgag200_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
{
struct mga_device *mdev = (struct mga_device *)crtc->dev->dev_private;
+
/* Our origin is at (64,64) */
x += 64;
y += 64;
- BUG_ON(x <= 0);
- BUG_ON(y <= 0);
- BUG_ON(x & ~0xffff);
- BUG_ON(y & ~0xffff);
+ mgag200_move_cursor(mdev, x, y);
- WREG8(MGA_CURPOSXL, x & 0xff);
- WREG8(MGA_CURPOSXH, (x>>8) & 0xff);
-
- WREG8(MGA_CURPOSYL, y & 0xff);
- WREG8(MGA_CURPOSYH, (y>>8) & 0xff);
return 0;
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c
index afd9119b6cf1..397f8b0a9af8 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.c
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
@@ -46,7 +46,7 @@ MODULE_DEVICE_TABLE(pci, pciidlist);
static int mga_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, 0, "mgag200drmfb");
+ drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, "mgag200drmfb");
return drm_get_pci_dev(pdev, ent, &driver);
}
@@ -58,10 +58,7 @@ static void mga_pci_remove(struct pci_dev *pdev)
drm_put_dev(dev);
}
-static const struct file_operations mgag200_driver_fops = {
- .owner = THIS_MODULE,
- DRM_VRAM_MM_FILE_OPERATIONS
-};
+DEFINE_DRM_GEM_FOPS(mgag200_driver_fops);
static struct drm_driver driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET,
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h
index 1c93f8dc08c7..0ea9a525e57d 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.h
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
@@ -19,7 +19,6 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem.h>
#include <drm/drm_gem_vram_helper.h>
-#include <drm/drm_vram_mm_helper.h>
#include "mgag200_reg.h"
@@ -130,16 +129,8 @@ struct mga_connector {
};
struct mga_cursor {
- /*
- We have to have 2 buffers for the cursor to avoid occasional
- corruption while switching cursor icons.
- If either of these is NULL, then don't do hardware cursors, and
- fall back to software.
- */
- struct drm_gem_vram_object *pixels_1;
- struct drm_gem_vram_object *pixels_2;
- /* The currently displayed icon, this points to one of pixels_1, or pixels_2 */
- struct drm_gem_vram_object *pixels_current;
+ struct drm_gem_vram_object *gbo[2];
+ unsigned int next_index;
};
struct mga_mc {
@@ -174,6 +165,8 @@ struct mga_device {
struct mga_cursor cursor;
+ size_t vram_fb_available;
+
bool suspended;
int num_crtc;
enum mga_type type;
@@ -204,8 +197,10 @@ int mgag200_mm_init(struct mga_device *mdev);
void mgag200_mm_fini(struct mga_device *mdev);
int mgag200_mmap(struct file *filp, struct vm_area_struct *vma);
-int mga_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
- uint32_t handle, uint32_t width, uint32_t height);
-int mga_crtc_cursor_move(struct drm_crtc *crtc, int x, int y);
+int mgag200_cursor_init(struct mga_device *mdev);
+void mgag200_cursor_fini(struct mga_device *mdev);
+int mgag200_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
+ uint32_t handle, uint32_t width, uint32_t height);
+int mgag200_crtc_cursor_move(struct drm_crtc *crtc, int x, int y);
#endif /* __MGAG200_DRV_H__ */
diff --git a/drivers/gpu/drm/mgag200/mgag200_main.c b/drivers/gpu/drm/mgag200/mgag200_main.c
index a9773334dedf..5f74aabcd3df 100644
--- a/drivers/gpu/drm/mgag200/mgag200_main.c
+++ b/drivers/gpu/drm/mgag200/mgag200_main.c
@@ -159,7 +159,7 @@ int mgag200_driver_load(struct drm_device *dev, unsigned long flags)
drm_mode_config_init(dev);
dev->mode_config.funcs = (void *)&mga_mode_funcs;
- if (IS_G200_SE(mdev) && mdev->mc.vram_size < (2048*1024))
+ if (IS_G200_SE(mdev) && mdev->vram_fb_available < (2048*1024))
dev->mode_config.preferred_depth = 16;
else
dev->mode_config.preferred_depth = 32;
@@ -171,20 +171,10 @@ int mgag200_driver_load(struct drm_device *dev, unsigned long flags)
goto err_modeset;
}
- /* Make small buffers to store a hardware cursor (double buffered icon updates) */
- mdev->cursor.pixels_1 = drm_gem_vram_create(dev, &dev->vram_mm->bdev,
- roundup(48*64, PAGE_SIZE),
- 0, 0);
- mdev->cursor.pixels_2 = drm_gem_vram_create(dev, &dev->vram_mm->bdev,
- roundup(48*64, PAGE_SIZE),
- 0, 0);
- if (IS_ERR(mdev->cursor.pixels_2) || IS_ERR(mdev->cursor.pixels_1)) {
- mdev->cursor.pixels_1 = NULL;
- mdev->cursor.pixels_2 = NULL;
+ r = mgag200_cursor_init(mdev);
+ if (r)
dev_warn(&dev->pdev->dev,
- "Could not allocate space for cursors. Not doing hardware cursors.\n");
- }
- mdev->cursor.pixels_current = NULL;
+ "Could not initialize cursors. Not doing hardware cursors.\n");
r = drm_fbdev_generic_setup(mdev->dev, 0);
if (r)
@@ -194,6 +184,7 @@ int mgag200_driver_load(struct drm_device *dev, unsigned long flags)
err_modeset:
drm_mode_config_cleanup(dev);
+ mgag200_cursor_fini(mdev);
mgag200_mm_fini(mdev);
err_mm:
dev->dev_private = NULL;
@@ -209,6 +200,7 @@ void mgag200_driver_unload(struct drm_device *dev)
return;
mgag200_modeset_fini(mdev);
drm_mode_config_cleanup(dev);
+ mgag200_cursor_fini(mdev);
mgag200_mm_fini(mdev);
dev->dev_private = NULL;
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index 5e778b5f1a10..5ec697148fc1 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -1413,8 +1413,8 @@ static void mga_crtc_disable(struct drm_crtc *crtc)
/* These provide the minimum set of functions required to handle a CRTC */
static const struct drm_crtc_funcs mga_crtc_funcs = {
- .cursor_set = mga_crtc_cursor_set,
- .cursor_move = mga_crtc_cursor_move,
+ .cursor_set = mgag200_crtc_cursor_set,
+ .cursor_move = mgag200_crtc_cursor_move,
.gamma_set = mga_crtc_gamma_set,
.set_config = drm_crtc_helper_set_config,
.destroy = mga_crtc_destroy,
@@ -1629,7 +1629,7 @@ static enum drm_mode_status mga_vga_mode_valid(struct drm_connector *connector,
bpp = connector->cmdline_mode.bpp;
}
- if ((mode->hdisplay * mode->vdisplay * (bpp/8)) > mdev->mc.vram_size) {
+ if ((mode->hdisplay * mode->vdisplay * (bpp/8)) > mdev->vram_fb_available) {
if (connector->cmdline_mode.specified)
connector->cmdline_mode.specified = false;
return MODE_BAD;
@@ -1638,16 +1638,6 @@ static enum drm_mode_status mga_vga_mode_valid(struct drm_connector *connector,
return MODE_OK;
}
-static struct drm_encoder *mga_connector_best_encoder(struct drm_connector
- *connector)
-{
- int enc_id = connector->encoder_ids[0];
- /* pick the encoder ids */
- if (enc_id)
- return drm_encoder_find(connector->dev, NULL, enc_id);
- return NULL;
-}
-
static void mga_connector_destroy(struct drm_connector *connector)
{
struct mga_connector *mga_connector = to_mga_connector(connector);
@@ -1659,7 +1649,6 @@ static void mga_connector_destroy(struct drm_connector *connector)
static const struct drm_connector_helper_funcs mga_vga_connector_helper_funcs = {
.get_modes = mga_vga_get_modes,
.mode_valid = mga_vga_mode_valid,
- .best_encoder = mga_connector_best_encoder,
};
static const struct drm_connector_funcs mga_vga_connector_funcs = {
diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c
index 73a6b848601c..99997d737362 100644
--- a/drivers/gpu/drm/mgag200/mgag200_ttm.c
+++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c
@@ -37,8 +37,7 @@ int mgag200_mm_init(struct mga_device *mdev)
struct drm_device *dev = mdev->dev;
vmm = drm_vram_helper_alloc_mm(dev, pci_resource_start(dev->pdev, 0),
- mdev->mc.vram_size,
- &drm_gem_vram_mm_funcs);
+ mdev->mc.vram_size);
if (IS_ERR(vmm)) {
ret = PTR_ERR(vmm);
DRM_ERROR("Error initializing VRAM MM; %d\n", ret);
@@ -51,6 +50,8 @@ int mgag200_mm_init(struct mga_device *mdev)
mdev->fb_mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 0),
pci_resource_len(dev->pdev, 0));
+ mdev->vram_fb_available = mdev->mc.vram_size;
+
return 0;
}
@@ -58,6 +59,8 @@ void mgag200_mm_fini(struct mga_device *mdev)
{
struct drm_device *dev = mdev->dev;
+ mdev->vram_fb_available = 0;
+
drm_vram_helper_release_mm(dev);
arch_io_free_memtype_wc(pci_resource_start(dev->pdev, 0),
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
index e686331fa089..691c1a277d91 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
@@ -352,26 +352,26 @@ static void a6xx_get_debugbus(struct msm_gpu *gpu,
cxdbg = ioremap(res->start, resource_size(res));
if (cxdbg) {
- cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_CNTLT,
+ cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_CNTLT,
A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT(0xf));
- cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_CNTLM,
+ cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_CNTLM,
A6XX_DBGC_CFG_DBGBUS_CNTLM_ENABLE(0xf));
- cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_IVTL_0, 0);
- cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_IVTL_1, 0);
- cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_IVTL_2, 0);
- cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_IVTL_3, 0);
+ cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_0, 0);
+ cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_1, 0);
+ cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_2, 0);
+ cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_3, 0);
- cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_BYTEL_0,
+ cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0,
0x76543210);
- cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_BYTEL_1,
+ cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1,
0xFEDCBA98);
- cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_MASKL_0, 0);
- cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_MASKL_1, 0);
- cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_MASKL_2, 0);
- cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_MASKL_3, 0);
+ cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_0, 0);
+ cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_1, 0);
+ cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_2, 0);
+ cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_3, 0);
}
a6xx_state->debugbus = state_kcalloc(a6xx_state,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
index 4c889aabdaf9..959d03e007fa 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
@@ -31,7 +31,7 @@
*/
#define DPU_DEBUG(fmt, ...) \
do { \
- if (unlikely(drm_debug & DRM_UT_KMS)) \
+ if (drm_debug_enabled(DRM_UT_KMS)) \
DRM_DEBUG(fmt, ##__VA_ARGS__); \
else \
pr_debug(fmt, ##__VA_ARGS__); \
@@ -43,7 +43,7 @@
*/
#define DPU_DEBUG_DRIVER(fmt, ...) \
do { \
- if (unlikely(drm_debug & DRM_UT_DRIVER)) \
+ if (drm_debug_enabled(DRM_UT_DRIVER)) \
DRM_ERROR(fmt, ##__VA_ARGS__); \
else \
pr_debug(fmt, ##__VA_ARGS__); \
diff --git a/drivers/gpu/drm/msm/dsi/dsi.h b/drivers/gpu/drm/msm/dsi/dsi.h
index 0da8a4e428ad..eff1a4c61258 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.h
+++ b/drivers/gpu/drm/msm/dsi/dsi.h
@@ -9,6 +9,7 @@
#include <linux/of_platform.h>
#include <linux/platform_device.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_panel.h>
diff --git a/drivers/gpu/drm/msm/edp/edp.c b/drivers/gpu/drm/msm/edp/edp.c
index 0f312ac5b624..ad4e963ccd9b 100644
--- a/drivers/gpu/drm/msm/edp/edp.c
+++ b/drivers/gpu/drm/msm/edp/edp.c
@@ -178,7 +178,9 @@ int msm_edp_modeset_init(struct msm_edp *edp, struct drm_device *dev,
goto fail;
}
- encoder->bridge = edp->bridge;
+ ret = drm_bridge_attach(encoder, edp->bridge, NULL);
+ if (ret)
+ goto fail;
priv->bridges[priv->num_bridges++] = edp->bridge;
priv->connectors[priv->num_connectors++] = edp->connector;
diff --git a/drivers/gpu/drm/msm/edp/edp.h b/drivers/gpu/drm/msm/edp/edp.h
index f2c17858a703..eb34243dad53 100644
--- a/drivers/gpu/drm/msm/edp/edp.h
+++ b/drivers/gpu/drm/msm/edp/edp.h
@@ -10,6 +10,7 @@
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
#include <drm/drm_dp_helper.h>
diff --git a/drivers/gpu/drm/msm/edp/edp_ctrl.c b/drivers/gpu/drm/msm/edp/edp_ctrl.c
index 7f3dd3ffe2c9..0d9657cc70db 100644
--- a/drivers/gpu/drm/msm/edp/edp_ctrl.c
+++ b/drivers/gpu/drm/msm/edp/edp_ctrl.c
@@ -89,7 +89,6 @@ struct edp_ctrl {
/* edid raw data */
struct edid *edid;
- struct drm_dp_link dp_link;
struct drm_dp_aux *drm_aux;
/* dpcd raw data */
@@ -403,7 +402,7 @@ static void edp_fill_link_cfg(struct edp_ctrl *ctrl)
u32 prate;
u32 lrate;
u32 bpp;
- u8 max_lane = ctrl->dp_link.num_lanes;
+ u8 max_lane = drm_dp_max_lane_count(ctrl->dpcd);
u8 lane;
prate = ctrl->pixel_rate;
@@ -413,7 +412,7 @@ static void edp_fill_link_cfg(struct edp_ctrl *ctrl)
* By default, use the maximum link rate and minimum lane count,
* so that we can do rate down shift during link training.
*/
- ctrl->link_rate = drm_dp_link_rate_to_bw_code(ctrl->dp_link.rate);
+ ctrl->link_rate = ctrl->dpcd[DP_MAX_LINK_RATE];
prate *= bpp;
prate /= 8; /* in kByte */
@@ -439,7 +438,7 @@ static void edp_config_ctrl(struct edp_ctrl *ctrl)
data = EDP_CONFIGURATION_CTRL_LANES(ctrl->lane_cnt - 1);
- if (ctrl->dp_link.capabilities & DP_LINK_CAP_ENHANCED_FRAMING)
+ if (drm_dp_enhanced_frame_cap(ctrl->dpcd))
data |= EDP_CONFIGURATION_CTRL_ENHANCED_FRAMING;
depth = EDP_6BIT;
@@ -701,7 +700,7 @@ static int edp_link_rate_down_shift(struct edp_ctrl *ctrl)
rate = ctrl->link_rate;
lane = ctrl->lane_cnt;
- max_lane = ctrl->dp_link.num_lanes;
+ max_lane = drm_dp_max_lane_count(ctrl->dpcd);
bpp = ctrl->color_depth * 3;
prate = ctrl->pixel_rate;
@@ -751,18 +750,22 @@ static int edp_clear_training_pattern(struct edp_ctrl *ctrl)
static int edp_do_link_train(struct edp_ctrl *ctrl)
{
+ u8 values[2];
int ret;
- struct drm_dp_link dp_link;
DBG("");
/*
* Set the current link rate and lane cnt to panel. They may have been
* adjusted and the values are different from them in DPCD CAP
*/
- dp_link.num_lanes = ctrl->lane_cnt;
- dp_link.rate = drm_dp_bw_code_to_link_rate(ctrl->link_rate);
- dp_link.capabilities = ctrl->dp_link.capabilities;
- if (drm_dp_link_configure(ctrl->drm_aux, &dp_link) < 0)
+ values[0] = ctrl->lane_cnt;
+ values[1] = ctrl->link_rate;
+
+ if (drm_dp_enhanced_frame_cap(ctrl->dpcd))
+ values[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
+
+ if (drm_dp_dpcd_write(ctrl->drm_aux, DP_LINK_BW_SET, values,
+ sizeof(values)) < 0)
return EDP_TRAIN_FAIL;
ctrl->v_level = 0; /* start from default level */
@@ -952,6 +955,7 @@ static void edp_ctrl_on_worker(struct work_struct *work)
{
struct edp_ctrl *ctrl = container_of(
work, struct edp_ctrl, on_work);
+ u8 value;
int ret;
mutex_lock(&ctrl->dev_mutex);
@@ -965,9 +969,27 @@ static void edp_ctrl_on_worker(struct work_struct *work)
edp_ctrl_link_enable(ctrl, 1);
edp_ctrl_irq_enable(ctrl, 1);
- ret = drm_dp_link_power_up(ctrl->drm_aux, &ctrl->dp_link);
- if (ret)
- goto fail;
+
+ /* DP_SET_POWER register is only available on DPCD v1.1 and later */
+ if (ctrl->dpcd[DP_DPCD_REV] >= 0x11) {
+ ret = drm_dp_dpcd_readb(ctrl->drm_aux, DP_SET_POWER, &value);
+ if (ret < 0)
+ goto fail;
+
+ value &= ~DP_SET_POWER_MASK;
+ value |= DP_SET_POWER_D0;
+
+ ret = drm_dp_dpcd_writeb(ctrl->drm_aux, DP_SET_POWER, value);
+ if (ret < 0)
+ goto fail;
+
+ /*
+ * According to the DP 1.1 specification, a "Sink Device must
+ * exit the power saving state within 1 ms" (Section 2.5.3.1,
+ * Table 5-52, "Sink Control Field" (register 0x600).
+ */
+ usleep_range(1000, 2000);
+ }
ctrl->power_on = true;
@@ -1011,7 +1033,19 @@ static void edp_ctrl_off_worker(struct work_struct *work)
edp_state_ctrl(ctrl, 0);
- drm_dp_link_power_down(ctrl->drm_aux, &ctrl->dp_link);
+ /* DP_SET_POWER register is only available on DPCD v1.1 and later */
+ if (ctrl->dpcd[DP_DPCD_REV] >= 0x11) {
+ u8 value;
+ int ret;
+
+ ret = drm_dp_dpcd_readb(ctrl->drm_aux, DP_SET_POWER, &value);
+ if (ret > 0) {
+ value &= ~DP_SET_POWER_MASK;
+ value |= DP_SET_POWER_D3;
+
+ drm_dp_dpcd_writeb(ctrl->drm_aux, DP_SET_POWER, value);
+ }
+ }
edp_ctrl_irq_enable(ctrl, 0);
@@ -1225,14 +1259,8 @@ int msm_edp_ctrl_get_panel_info(struct edp_ctrl *ctrl,
edp_ctrl_irq_enable(ctrl, 1);
}
- ret = drm_dp_link_probe(ctrl->drm_aux, &ctrl->dp_link);
- if (ret) {
- pr_err("%s: read dpcd cap failed, %d\n", __func__, ret);
- goto disable_ret;
- }
-
/* Initialize link rate as panel max link rate */
- ctrl->link_rate = drm_dp_link_rate_to_bw_code(ctrl->dp_link.rate);
+ ctrl->link_rate = ctrl->dpcd[DP_MAX_LINK_RATE];
ctrl->edid = drm_get_edid(connector, &ctrl->drm_aux->ddc);
if (!ctrl->edid) {
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
index 355afb936401..1a9b6289637d 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -327,7 +327,9 @@ int msm_hdmi_modeset_init(struct hdmi *hdmi,
goto fail;
}
- encoder->bridge = hdmi->bridge;
+ ret = drm_bridge_attach(encoder, hdmi->bridge, NULL);
+ if (ret)
+ goto fail;
priv->bridges[priv->num_bridges++] = hdmi->bridge;
priv->connectors[priv->num_connectors++] = hdmi->connector;
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h
index bdac452b00fb..d0b84f0abee1 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.h
@@ -14,6 +14,8 @@
#include <linux/gpio/consumer.h>
#include <linux/hdmi.h>
+#include <drm/drm_bridge.h>
+
#include "msm_drv.h"
#include "hdmi.xml.h"
diff --git a/drivers/gpu/drm/msm/msm_debugfs.c b/drivers/gpu/drm/msm/msm_debugfs.c
index 6be879578140..1c74381a4fc9 100644
--- a/drivers/gpu/drm/msm/msm_debugfs.c
+++ b/drivers/gpu/drm/msm/msm_debugfs.c
@@ -47,12 +47,8 @@ static int msm_gpu_release(struct inode *inode, struct file *file)
struct msm_gpu_show_priv *show_priv = m->private;
struct msm_drm_private *priv = show_priv->dev->dev_private;
struct msm_gpu *gpu = priv->gpu;
- int ret;
-
- ret = mutex_lock_interruptible(&show_priv->dev->struct_mutex);
- if (ret)
- return ret;
+ mutex_lock(&show_priv->dev->struct_mutex);
gpu->funcs->gpu_state_put(show_priv->state);
mutex_unlock(&show_priv->dev->struct_mutex);
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_crtc.c b/drivers/gpu/drm/mxsfb/mxsfb_crtc.c
index 12421567af89..b69ace8bf526 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_crtc.c
+++ b/drivers/gpu/drm/mxsfb/mxsfb_crtc.c
@@ -95,8 +95,11 @@ static void mxsfb_set_bus_fmt(struct mxsfb_drm_private *mxsfb)
reg = readl(mxsfb->base + LCDC_CTRL);
- if (mxsfb->connector.display_info.num_bus_formats)
- bus_format = mxsfb->connector.display_info.bus_formats[0];
+ if (mxsfb->connector->display_info.num_bus_formats)
+ bus_format = mxsfb->connector->display_info.bus_formats[0];
+
+ DRM_DEV_DEBUG_DRIVER(drm->dev, "Using bus_format: 0x%08X\n",
+ bus_format);
reg &= ~CTRL_BUS_WIDTH_MASK;
switch (bus_format) {
@@ -204,8 +207,9 @@ static dma_addr_t mxsfb_get_fb_paddr(struct mxsfb_drm_private *mxsfb)
static void mxsfb_crtc_mode_set_nofb(struct mxsfb_drm_private *mxsfb)
{
+ struct drm_device *drm = mxsfb->pipe.crtc.dev;
struct drm_display_mode *m = &mxsfb->pipe.crtc.state->adjusted_mode;
- const u32 bus_flags = mxsfb->connector.display_info.bus_flags;
+ u32 bus_flags = mxsfb->connector->display_info.bus_flags;
u32 vdctrl0, vsync_pulse_len, hsync_pulse_len;
int err;
@@ -229,6 +233,16 @@ static void mxsfb_crtc_mode_set_nofb(struct mxsfb_drm_private *mxsfb)
clk_set_rate(mxsfb->clk, m->crtc_clock * 1000);
+ if (mxsfb->bridge && mxsfb->bridge->timings)
+ bus_flags = mxsfb->bridge->timings->input_bus_flags;
+
+ DRM_DEV_DEBUG_DRIVER(drm->dev, "Pixel clock: %dkHz (actual: %dkHz)\n",
+ m->crtc_clock,
+ (int)(clk_get_rate(mxsfb->clk) / 1000));
+ DRM_DEV_DEBUG_DRIVER(drm->dev, "Connector bus_flags: 0x%08X\n",
+ bus_flags);
+ DRM_DEV_DEBUG_DRIVER(drm->dev, "Mode flags: 0x%08X\n", m->flags);
+
writel(TRANSFER_COUNT_SET_VCOUNT(m->crtc_vdisplay) |
TRANSFER_COUNT_SET_HCOUNT(m->crtc_hdisplay),
mxsfb->base + mxsfb->devdata->transfer_count);
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.c b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
index e8506335cd15..497cf443a9af 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_drv.c
+++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
@@ -101,9 +101,25 @@ static void mxsfb_pipe_enable(struct drm_simple_display_pipe *pipe,
struct drm_crtc_state *crtc_state,
struct drm_plane_state *plane_state)
{
+ struct drm_connector *connector;
struct mxsfb_drm_private *mxsfb = drm_pipe_to_mxsfb_drm_private(pipe);
struct drm_device *drm = pipe->plane.dev;
+ if (!mxsfb->connector) {
+ list_for_each_entry(connector,
+ &drm->mode_config.connector_list,
+ head)
+ if (connector->encoder == &mxsfb->pipe.encoder) {
+ mxsfb->connector = connector;
+ break;
+ }
+ }
+
+ if (!mxsfb->connector) {
+ dev_warn(drm->dev, "No connector attached, using default\n");
+ mxsfb->connector = &mxsfb->panel_connector;
+ }
+
pm_runtime_get_sync(drm->dev);
drm_panel_prepare(mxsfb->panel);
mxsfb_crtc_enable(mxsfb);
@@ -129,6 +145,9 @@ static void mxsfb_pipe_disable(struct drm_simple_display_pipe *pipe)
drm_crtc_send_vblank_event(crtc, event);
}
spin_unlock_irq(&drm->event_lock);
+
+ if (mxsfb->connector != &mxsfb->panel_connector)
+ mxsfb->connector = NULL;
}
static void mxsfb_pipe_update(struct drm_simple_display_pipe *pipe,
@@ -226,16 +245,33 @@ static int mxsfb_load(struct drm_device *drm, unsigned long flags)
ret = drm_simple_display_pipe_init(drm, &mxsfb->pipe, &mxsfb_funcs,
mxsfb_formats, ARRAY_SIZE(mxsfb_formats), NULL,
- &mxsfb->connector);
+ mxsfb->connector);
if (ret < 0) {
dev_err(drm->dev, "Cannot setup simple display pipe\n");
goto err_vblank;
}
- ret = drm_panel_attach(mxsfb->panel, &mxsfb->connector);
- if (ret) {
- dev_err(drm->dev, "Cannot connect panel\n");
- goto err_vblank;
+ /*
+ * Attach panel only if there is one.
+ * If there is no panel attach, it must be a bridge. In this case, we
+ * need a reference to its connector for a proper initialization.
+ * We will do this check in pipe->enable(), since the connector won't
+ * be attached to an encoder until then.
+ */
+
+ if (mxsfb->panel) {
+ ret = drm_panel_attach(mxsfb->panel, mxsfb->connector);
+ if (ret) {
+ dev_err(drm->dev, "Cannot connect panel: %d\n", ret);
+ goto err_vblank;
+ }
+ } else if (mxsfb->bridge) {
+ ret = drm_simple_display_pipe_attach_bridge(&mxsfb->pipe,
+ mxsfb->bridge);
+ if (ret) {
+ dev_err(drm->dev, "Cannot connect bridge: %d\n", ret);
+ goto err_vblank;
+ }
}
drm->mode_config.min_width = MXSFB_MIN_XRES;
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.h b/drivers/gpu/drm/mxsfb/mxsfb_drv.h
index d975300dca05..0b65b5194a9c 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_drv.h
+++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.h
@@ -27,8 +27,10 @@ struct mxsfb_drm_private {
struct clk *clk_disp_axi;
struct drm_simple_display_pipe pipe;
- struct drm_connector connector;
+ struct drm_connector panel_connector;
+ struct drm_connector *connector;
struct drm_panel *panel;
+ struct drm_bridge *bridge;
};
int mxsfb_setup_crtc(struct drm_device *dev);
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_out.c b/drivers/gpu/drm/mxsfb/mxsfb_out.c
index be36f4d6cc96..4eb94744c526 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_out.c
+++ b/drivers/gpu/drm/mxsfb/mxsfb_out.c
@@ -21,7 +21,8 @@
static struct mxsfb_drm_private *
drm_connector_to_mxsfb_drm_private(struct drm_connector *connector)
{
- return container_of(connector, struct mxsfb_drm_private, connector);
+ return container_of(connector, struct mxsfb_drm_private,
+ panel_connector);
}
static int mxsfb_panel_get_modes(struct drm_connector *connector)
@@ -76,22 +77,23 @@ static const struct drm_connector_funcs mxsfb_panel_connector_funcs = {
int mxsfb_create_output(struct drm_device *drm)
{
struct mxsfb_drm_private *mxsfb = drm->dev_private;
- struct drm_panel *panel;
int ret;
- ret = drm_of_find_panel_or_bridge(drm->dev->of_node, 0, 0, &panel, NULL);
+ ret = drm_of_find_panel_or_bridge(drm->dev->of_node, 0, 0,
+ &mxsfb->panel, &mxsfb->bridge);
if (ret)
return ret;
- mxsfb->connector.dpms = DRM_MODE_DPMS_OFF;
- mxsfb->connector.polled = 0;
- drm_connector_helper_add(&mxsfb->connector,
- &mxsfb_panel_connector_helper_funcs);
- ret = drm_connector_init(drm, &mxsfb->connector,
- &mxsfb_panel_connector_funcs,
- DRM_MODE_CONNECTOR_Unknown);
- if (!ret)
- mxsfb->panel = panel;
+ if (mxsfb->panel) {
+ mxsfb->connector = &mxsfb->panel_connector;
+ mxsfb->connector->dpms = DRM_MODE_DPMS_OFF;
+ mxsfb->connector->polled = 0;
+ drm_connector_helper_add(mxsfb->connector,
+ &mxsfb_panel_connector_helper_funcs);
+ ret = drm_connector_init(drm, mxsfb->connector,
+ &mxsfb_panel_connector_funcs,
+ DRM_MODE_CONNECTOR_Unknown);
+ }
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.c b/drivers/gpu/drm/nouveau/dispnv04/disp.c
index dc64863b5fd8..44ee82d0c9b6 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.c
@@ -256,7 +256,7 @@ nv04_display_create(struct drm_device *dev)
list_for_each_entry_safe(connector, ct,
&dev->mode_config.connector_list, head) {
- if (!connector->encoder_ids[0]) {
+ if (!connector->possible_encoders) {
NV_WARN(drm, "%s has no encoders, removing\n",
connector->name);
connector->funcs->destroy(connector);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index b46be8a091e9..549486f1d937 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -986,20 +986,11 @@ nv50_mstc_atomic_check(struct drm_connector *connector,
return drm_dp_atomic_release_vcpi_slots(state, mgr, mstc->port);
}
-static const struct drm_connector_helper_funcs
-nv50_mstc_help = {
- .get_modes = nv50_mstc_get_modes,
- .mode_valid = nv50_mstc_mode_valid,
- .best_encoder = nv50_mstc_best_encoder,
- .atomic_best_encoder = nv50_mstc_atomic_best_encoder,
- .atomic_check = nv50_mstc_atomic_check,
-};
-
-static enum drm_connector_status
-nv50_mstc_detect(struct drm_connector *connector, bool force)
+static int
+nv50_mstc_detect(struct drm_connector *connector,
+ struct drm_modeset_acquire_ctx *ctx, bool force)
{
struct nv50_mstc *mstc = nv50_mstc(connector);
- enum drm_connector_status conn_status;
int ret;
if (drm_connector_is_unregistered(connector))
@@ -1009,14 +1000,24 @@ nv50_mstc_detect(struct drm_connector *connector, bool force)
if (ret < 0 && ret != -EACCES)
return connector_status_disconnected;
- conn_status = drm_dp_mst_detect_port(connector, mstc->port->mgr,
- mstc->port);
+ ret = drm_dp_mst_detect_port(connector, ctx, mstc->port->mgr,
+ mstc->port);
pm_runtime_mark_last_busy(connector->dev->dev);
pm_runtime_put_autosuspend(connector->dev->dev);
- return conn_status;
+ return ret;
}
+static const struct drm_connector_helper_funcs
+nv50_mstc_help = {
+ .get_modes = nv50_mstc_get_modes,
+ .mode_valid = nv50_mstc_mode_valid,
+ .best_encoder = nv50_mstc_best_encoder,
+ .atomic_best_encoder = nv50_mstc_atomic_best_encoder,
+ .atomic_check = nv50_mstc_atomic_check,
+ .detect_ctx = nv50_mstc_detect,
+};
+
static void
nv50_mstc_destroy(struct drm_connector *connector)
{
@@ -1031,7 +1032,6 @@ nv50_mstc_destroy(struct drm_connector *connector)
static const struct drm_connector_funcs
nv50_mstc = {
.reset = nouveau_conn_reset,
- .detect = nv50_mstc_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = nv50_mstc_destroy,
.atomic_duplicate_state = nouveau_conn_atomic_duplicate_state,
@@ -1309,14 +1309,14 @@ nv50_mstm_fini(struct nv50_mstm *mstm)
}
static void
-nv50_mstm_init(struct nv50_mstm *mstm)
+nv50_mstm_init(struct nv50_mstm *mstm, bool runtime)
{
int ret;
if (!mstm || !mstm->mgr.mst_state)
return;
- ret = drm_dp_mst_topology_mgr_resume(&mstm->mgr);
+ ret = drm_dp_mst_topology_mgr_resume(&mstm->mgr, !runtime);
if (ret == -1) {
drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, false);
drm_kms_helper_hotplug_event(mstm->mgr.dev);
@@ -2263,7 +2263,7 @@ nv50_display_init(struct drm_device *dev, bool resume, bool runtime)
if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST) {
struct nouveau_encoder *nv_encoder =
nouveau_encoder(encoder);
- nv50_mstm_init(nv_encoder->dp.mstm);
+ nv50_mstm_init(nv_encoder->dp.mstm, runtime);
}
}
@@ -2392,7 +2392,7 @@ nv50_display_create(struct drm_device *dev)
/* cull any connectors we created that don't have an encoder */
list_for_each_entry_safe(connector, tmp, &dev->mode_config.connector_list, head) {
- if (connector->encoder_ids[0])
+ if (connector->possible_encoders)
continue;
NV_WARN(drm, "%s has no encoders, removing\n",
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 94dfa2e5a9ab..5b413588b823 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -365,9 +365,8 @@ find_encoder(struct drm_connector *connector, int type)
{
struct nouveau_encoder *nv_encoder;
struct drm_encoder *enc;
- int i;
- drm_connector_for_each_possible_encoder(connector, enc, i) {
+ drm_connector_for_each_possible_encoder(connector, enc) {
nv_encoder = nouveau_encoder(enc);
if (type == DCB_OUTPUT_ANY ||
@@ -414,10 +413,10 @@ nouveau_connector_ddc_detect(struct drm_connector *connector)
struct drm_device *dev = connector->dev;
struct nouveau_encoder *nv_encoder = NULL, *found = NULL;
struct drm_encoder *encoder;
- int i, ret;
+ int ret;
bool switcheroo_ddc = false;
- drm_connector_for_each_possible_encoder(connector, encoder, i) {
+ drm_connector_for_each_possible_encoder(connector, encoder) {
nv_encoder = nouveau_encoder(encoder);
switch (nv_encoder->dcb->type) {
@@ -1131,6 +1130,16 @@ nouveau_connector_hotplug(struct nvif_notify *notify)
const char *name = connector->name;
struct nouveau_encoder *nv_encoder;
int ret;
+ bool plugged = (rep->mask != NVIF_NOTIFY_CONN_V0_UNPLUG);
+
+ if (rep->mask & NVIF_NOTIFY_CONN_V0_IRQ) {
+ NV_DEBUG(drm, "service %s\n", name);
+ drm_dp_cec_irq(&nv_connector->aux);
+ if ((nv_encoder = find_encoder(connector, DCB_OUTPUT_DP)))
+ nv50_mstm_service(nv_encoder->dp.mstm);
+
+ return NVIF_NOTIFY_KEEP;
+ }
ret = pm_runtime_get(drm->dev->dev);
if (ret == 0) {
@@ -1151,25 +1160,16 @@ nouveau_connector_hotplug(struct nvif_notify *notify)
return NVIF_NOTIFY_DROP;
}
- if (rep->mask & NVIF_NOTIFY_CONN_V0_IRQ) {
- NV_DEBUG(drm, "service %s\n", name);
- drm_dp_cec_irq(&nv_connector->aux);
- if ((nv_encoder = find_encoder(connector, DCB_OUTPUT_DP)))
- nv50_mstm_service(nv_encoder->dp.mstm);
- } else {
- bool plugged = (rep->mask != NVIF_NOTIFY_CONN_V0_UNPLUG);
-
+ if (!plugged)
+ drm_dp_cec_unset_edid(&nv_connector->aux);
+ NV_DEBUG(drm, "%splugged %s\n", plugged ? "" : "un", name);
+ if ((nv_encoder = find_encoder(connector, DCB_OUTPUT_DP))) {
if (!plugged)
- drm_dp_cec_unset_edid(&nv_connector->aux);
- NV_DEBUG(drm, "%splugged %s\n", plugged ? "" : "un", name);
- if ((nv_encoder = find_encoder(connector, DCB_OUTPUT_DP))) {
- if (!plugged)
- nv50_mstm_remove(nv_encoder->dp.mstm);
- }
-
- drm_helper_hpd_irq_event(connector->dev);
+ nv50_mstm_remove(nv_encoder->dp.mstm);
}
+ drm_helper_hpd_irq_event(connector->dev);
+
pm_runtime_mark_last_busy(drm->dev->dev);
pm_runtime_put_autosuspend(drm->dev->dev);
return NVIF_NOTIFY_KEEP;
@@ -1415,8 +1415,7 @@ nouveau_connector_create(struct drm_device *dev,
switch (type) {
case DRM_MODE_CONNECTOR_DisplayPort:
case DRM_MODE_CONNECTOR_eDP:
- drm_dp_cec_register_connector(&nv_connector->aux,
- connector->name, dev->dev);
+ drm_dp_cec_register_connector(&nv_connector->aux, connector);
break;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 6f038511a03a..53f9bceaf17a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -407,6 +407,17 @@ nouveau_display_init(struct drm_device *dev, bool resume, bool runtime)
struct drm_connector_list_iter conn_iter;
int ret;
+ /*
+ * Enable hotplug interrupts (done as early as possible, since we need
+ * them for MST)
+ */
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) {
+ struct nouveau_connector *conn = nouveau_connector(connector);
+ nvif_notify_get(&conn->hpd);
+ }
+ drm_connector_list_iter_end(&conn_iter);
+
ret = disp->init(dev, resume, runtime);
if (ret)
return ret;
@@ -416,14 +427,6 @@ nouveau_display_init(struct drm_device *dev, bool resume, bool runtime)
*/
drm_kms_helper_poll_enable(dev);
- /* enable hotplug interrupts */
- drm_connector_list_iter_begin(dev, &conn_iter);
- nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) {
- struct nouveau_connector *conn = nouveau_connector(connector);
- nvif_notify_get(&conn->hpd);
- }
- drm_connector_list_iter_end(&conn_iter);
-
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c
index 668d4bd0c118..df9bf1fd1bc0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_svm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_svm.c
@@ -88,6 +88,7 @@ nouveau_ivmm_find(struct nouveau_svm *svm, u64 inst)
}
struct nouveau_svmm {
+ struct mmu_notifier notifier;
struct nouveau_vmm *vmm;
struct {
unsigned long start;
@@ -95,9 +96,6 @@ struct nouveau_svmm {
} unmanaged;
struct mutex mutex;
-
- struct mm_struct *mm;
- struct hmm_mirror mirror;
};
#define SVMM_DBG(s,f,a...) \
@@ -251,10 +249,11 @@ nouveau_svmm_invalidate(struct nouveau_svmm *svmm, u64 start, u64 limit)
}
static int
-nouveau_svmm_sync_cpu_device_pagetables(struct hmm_mirror *mirror,
- const struct mmu_notifier_range *update)
+nouveau_svmm_invalidate_range_start(struct mmu_notifier *mn,
+ const struct mmu_notifier_range *update)
{
- struct nouveau_svmm *svmm = container_of(mirror, typeof(*svmm), mirror);
+ struct nouveau_svmm *svmm =
+ container_of(mn, struct nouveau_svmm, notifier);
unsigned long start = update->start;
unsigned long limit = update->end;
@@ -264,6 +263,9 @@ nouveau_svmm_sync_cpu_device_pagetables(struct hmm_mirror *mirror,
SVMM_DBG(svmm, "invalidate %016lx-%016lx", start, limit);
mutex_lock(&svmm->mutex);
+ if (unlikely(!svmm->vmm))
+ goto out;
+
if (limit > svmm->unmanaged.start && start < svmm->unmanaged.limit) {
if (start < svmm->unmanaged.start) {
nouveau_svmm_invalidate(svmm, start,
@@ -273,19 +275,20 @@ nouveau_svmm_sync_cpu_device_pagetables(struct hmm_mirror *mirror,
}
nouveau_svmm_invalidate(svmm, start, limit);
+
+out:
mutex_unlock(&svmm->mutex);
return 0;
}
-static void
-nouveau_svmm_release(struct hmm_mirror *mirror)
+static void nouveau_svmm_free_notifier(struct mmu_notifier *mn)
{
+ kfree(container_of(mn, struct nouveau_svmm, notifier));
}
-static const struct hmm_mirror_ops
-nouveau_svmm = {
- .sync_cpu_device_pagetables = nouveau_svmm_sync_cpu_device_pagetables,
- .release = nouveau_svmm_release,
+static const struct mmu_notifier_ops nouveau_mn_ops = {
+ .invalidate_range_start = nouveau_svmm_invalidate_range_start,
+ .free_notifier = nouveau_svmm_free_notifier,
};
void
@@ -293,8 +296,10 @@ nouveau_svmm_fini(struct nouveau_svmm **psvmm)
{
struct nouveau_svmm *svmm = *psvmm;
if (svmm) {
- hmm_mirror_unregister(&svmm->mirror);
- kfree(*psvmm);
+ mutex_lock(&svmm->mutex);
+ svmm->vmm = NULL;
+ mutex_unlock(&svmm->mutex);
+ mmu_notifier_put(&svmm->notifier);
*psvmm = NULL;
}
}
@@ -320,7 +325,7 @@ nouveau_svmm_init(struct drm_device *dev, void *data,
mutex_lock(&cli->mutex);
if (cli->svm.cli) {
ret = -EBUSY;
- goto done;
+ goto out_free;
}
/* Allocate a new GPU VMM that can support SVM (managed by the
@@ -335,24 +340,26 @@ nouveau_svmm_init(struct drm_device *dev, void *data,
.fault_replay = true,
}, sizeof(struct gp100_vmm_v0), &cli->svm.vmm);
if (ret)
- goto done;
-
- /* Enable HMM mirroring of CPU address-space to VMM. */
- svmm->mm = get_task_mm(current);
- down_write(&svmm->mm->mmap_sem);
- svmm->mirror.ops = &nouveau_svmm;
- ret = hmm_mirror_register(&svmm->mirror, svmm->mm);
- if (ret == 0) {
- cli->svm.svmm = svmm;
- cli->svm.cli = cli;
- }
- up_write(&svmm->mm->mmap_sem);
- mmput(svmm->mm);
+ goto out_free;
-done:
+ down_write(&current->mm->mmap_sem);
+ svmm->notifier.ops = &nouveau_mn_ops;
+ ret = __mmu_notifier_register(&svmm->notifier, current->mm);
if (ret)
- nouveau_svmm_fini(&svmm);
+ goto out_mm_unlock;
+ /* Note, ownership of svmm transfers to mmu_notifier */
+
+ cli->svm.svmm = svmm;
+ cli->svm.cli = cli;
+ up_write(&current->mm->mmap_sem);
mutex_unlock(&cli->mutex);
+ return 0;
+
+out_mm_unlock:
+ up_write(&current->mm->mmap_sem);
+out_free:
+ mutex_unlock(&cli->mutex);
+ kfree(svmm);
return ret;
}
@@ -475,43 +482,90 @@ nouveau_svm_fault_cache(struct nouveau_svm *svm,
fault->inst, fault->addr, fault->access);
}
-static inline bool
-nouveau_range_done(struct hmm_range *range)
+struct svm_notifier {
+ struct mmu_interval_notifier notifier;
+ struct nouveau_svmm *svmm;
+};
+
+static bool nouveau_svm_range_invalidate(struct mmu_interval_notifier *mni,
+ const struct mmu_notifier_range *range,
+ unsigned long cur_seq)
{
- bool ret = hmm_range_valid(range);
+ struct svm_notifier *sn =
+ container_of(mni, struct svm_notifier, notifier);
- hmm_range_unregister(range);
- return ret;
+ /*
+ * serializes the update to mni->invalidate_seq done by caller and
+ * prevents invalidation of the PTE from progressing while HW is being
+ * programmed. This is very hacky and only works because the normal
+ * notifier that does invalidation is always called after the range
+ * notifier.
+ */
+ if (mmu_notifier_range_blockable(range))
+ mutex_lock(&sn->svmm->mutex);
+ else if (!mutex_trylock(&sn->svmm->mutex))
+ return false;
+ mmu_interval_set_seq(mni, cur_seq);
+ mutex_unlock(&sn->svmm->mutex);
+ return true;
}
-static int
-nouveau_range_fault(struct nouveau_svmm *svmm, struct hmm_range *range)
+static const struct mmu_interval_notifier_ops nouveau_svm_mni_ops = {
+ .invalidate = nouveau_svm_range_invalidate,
+};
+
+static int nouveau_range_fault(struct nouveau_svmm *svmm,
+ struct nouveau_drm *drm, void *data, u32 size,
+ u64 *pfns, struct svm_notifier *notifier)
{
+ unsigned long timeout =
+ jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
+ /* Have HMM fault pages within the fault window to the GPU. */
+ struct hmm_range range = {
+ .notifier = &notifier->notifier,
+ .start = notifier->notifier.interval_tree.start,
+ .end = notifier->notifier.interval_tree.last + 1,
+ .pfns = pfns,
+ .flags = nouveau_svm_pfn_flags,
+ .values = nouveau_svm_pfn_values,
+ .pfn_shift = NVIF_VMM_PFNMAP_V0_ADDR_SHIFT,
+ };
+ struct mm_struct *mm = notifier->notifier.mm;
long ret;
- range->default_flags = 0;
- range->pfn_flags_mask = -1UL;
+ while (true) {
+ if (time_after(jiffies, timeout))
+ return -EBUSY;
+
+ range.notifier_seq = mmu_interval_read_begin(range.notifier);
+ range.default_flags = 0;
+ range.pfn_flags_mask = -1UL;
+ down_read(&mm->mmap_sem);
+ ret = hmm_range_fault(&range, 0);
+ up_read(&mm->mmap_sem);
+ if (ret <= 0) {
+ if (ret == 0 || ret == -EBUSY)
+ continue;
+ return ret;
+ }
- ret = hmm_range_register(range, &svmm->mirror);
- if (ret) {
- up_read(&svmm->mm->mmap_sem);
- return (int)ret;
+ mutex_lock(&svmm->mutex);
+ if (mmu_interval_read_retry(range.notifier,
+ range.notifier_seq)) {
+ mutex_unlock(&svmm->mutex);
+ continue;
+ }
+ break;
}
- if (!hmm_range_wait_until_valid(range, HMM_RANGE_DEFAULT_TIMEOUT)) {
- up_read(&svmm->mm->mmap_sem);
- return -EBUSY;
- }
+ nouveau_dmem_convert_pfn(drm, &range);
- ret = hmm_range_fault(range, 0);
- if (ret <= 0) {
- if (ret == 0)
- ret = -EBUSY;
- up_read(&svmm->mm->mmap_sem);
- hmm_range_unregister(range);
- return ret;
- }
- return 0;
+ svmm->vmm->vmm.object.client->super = true;
+ ret = nvif_object_ioctl(&svmm->vmm->vmm.object, data, size, NULL);
+ svmm->vmm->vmm.object.client->super = false;
+ mutex_unlock(&svmm->mutex);
+
+ return ret;
}
static int
@@ -531,7 +585,6 @@ nouveau_svm_fault(struct nvif_notify *notify)
} i;
u64 phys[16];
} args;
- struct hmm_range range;
struct vm_area_struct *vma;
u64 inst, start, limit;
int fi, fn, pi, fill;
@@ -587,6 +640,9 @@ nouveau_svm_fault(struct nvif_notify *notify)
args.i.p.version = 0;
for (fi = 0; fn = fi + 1, fi < buffer->fault_nr; fi = fn) {
+ struct svm_notifier notifier;
+ struct mm_struct *mm;
+
/* Cancel any faults from non-SVM channels. */
if (!(svmm = buffer->fault[fi]->svmm)) {
nouveau_svm_fault_cancel_fault(svm, buffer->fault[fi]);
@@ -606,24 +662,32 @@ nouveau_svm_fault(struct nvif_notify *notify)
start = max_t(u64, start, svmm->unmanaged.limit);
SVMM_DBG(svmm, "wndw %016llx-%016llx", start, limit);
+ mm = svmm->notifier.mm;
+ if (!mmget_not_zero(mm)) {
+ nouveau_svm_fault_cancel_fault(svm, buffer->fault[fi]);
+ continue;
+ }
+
/* Intersect fault window with the CPU VMA, cancelling
* the fault if the address is invalid.
*/
- down_read(&svmm->mm->mmap_sem);
- vma = find_vma_intersection(svmm->mm, start, limit);
+ down_read(&mm->mmap_sem);
+ vma = find_vma_intersection(mm, start, limit);
if (!vma) {
SVMM_ERR(svmm, "wndw %016llx-%016llx", start, limit);
- up_read(&svmm->mm->mmap_sem);
+ up_read(&mm->mmap_sem);
+ mmput(mm);
nouveau_svm_fault_cancel_fault(svm, buffer->fault[fi]);
continue;
}
start = max_t(u64, start, vma->vm_start);
limit = min_t(u64, limit, vma->vm_end);
+ up_read(&mm->mmap_sem);
SVMM_DBG(svmm, "wndw %016llx-%016llx", start, limit);
if (buffer->fault[fi]->addr != start) {
SVMM_ERR(svmm, "addr %016llx", buffer->fault[fi]->addr);
- up_read(&svmm->mm->mmap_sem);
+ mmput(mm);
nouveau_svm_fault_cancel_fault(svm, buffer->fault[fi]);
continue;
}
@@ -679,33 +743,19 @@ nouveau_svm_fault(struct nvif_notify *notify)
args.i.p.addr,
args.i.p.addr + args.i.p.size, fn - fi);
- /* Have HMM fault pages within the fault window to the GPU. */
- range.start = args.i.p.addr;
- range.end = args.i.p.addr + args.i.p.size;
- range.pfns = args.phys;
- range.flags = nouveau_svm_pfn_flags;
- range.values = nouveau_svm_pfn_values;
- range.pfn_shift = NVIF_VMM_PFNMAP_V0_ADDR_SHIFT;
-again:
- ret = nouveau_range_fault(svmm, &range);
- if (ret == 0) {
- mutex_lock(&svmm->mutex);
- if (!nouveau_range_done(&range)) {
- mutex_unlock(&svmm->mutex);
- goto again;
- }
-
- nouveau_dmem_convert_pfn(svm->drm, &range);
-
- svmm->vmm->vmm.object.client->super = true;
- ret = nvif_object_ioctl(&svmm->vmm->vmm.object,
- &args, sizeof(args.i) +
- pi * sizeof(args.phys[0]),
- NULL);
- svmm->vmm->vmm.object.client->super = false;
- mutex_unlock(&svmm->mutex);
- up_read(&svmm->mm->mmap_sem);
+ notifier.svmm = svmm;
+ ret = mmu_interval_notifier_insert(&notifier.notifier,
+ svmm->notifier.mm,
+ args.i.p.addr, args.i.p.size,
+ &nouveau_svm_mni_ops);
+ if (!ret) {
+ ret = nouveau_range_fault(
+ svmm, svm->drm, &args,
+ sizeof(args.i) + pi * sizeof(args.phys[0]),
+ args.phys, &notifier);
+ mmu_interval_notifier_remove(&notifier.notifier);
}
+ mmput(mm);
/* Cancel any faults in the window whose pages didn't manage
* to keep their valid bit, or stay writeable when required.
@@ -714,10 +764,10 @@ again:
*/
while (fi < fn) {
struct nouveau_svm_fault *fault = buffer->fault[fi++];
- pi = (fault->addr - range.start) >> PAGE_SHIFT;
+ pi = (fault->addr - args.i.p.addr) >> PAGE_SHIFT;
if (ret ||
- !(range.pfns[pi] & NVIF_VMM_PFNMAP_V0_V) ||
- (!(range.pfns[pi] & NVIF_VMM_PFNMAP_V0_W) &&
+ !(args.phys[pi] & NVIF_VMM_PFNMAP_V0_V) ||
+ (!(args.phys[pi] & NVIF_VMM_PFNMAP_V0_W) &&
fault->access != 0 && fault->access != 3)) {
nouveau_svm_fault_cancel_fault(svm, fault);
continue;
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index f0daf958e03a..77a0c6ad3cef 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -236,6 +236,7 @@ nouveau_ttm_init(struct nouveau_drm *drm)
ret = ttm_bo_device_init(&drm->ttm.bdev,
&nouveau_bo_driver,
dev->anon_inode->i_mapping,
+ dev->vma_offset_manager,
drm->client.mmu.dmabits <= 32 ? true : false);
if (ret) {
NV_ERROR(drm, "error initialising bo driver, %d\n", ret);
diff --git a/drivers/gpu/drm/omapdrm/dss/Makefile b/drivers/gpu/drm/omapdrm/dss/Makefile
index 904101c5e79d..5950c3f52c2e 100644
--- a/drivers/gpu/drm/omapdrm/dss/Makefile
+++ b/drivers/gpu/drm/omapdrm/dss/Makefile
@@ -6,7 +6,7 @@ omapdss-base-y := base.o display.o dss-of.o output.o
obj-$(CONFIG_OMAP2_DSS) += omapdss.o
# Core DSS files
-omapdss-y := core.o dss.o dispc.o dispc_coefs.o \
+omapdss-y := dss.o dispc.o dispc_coefs.o \
pll.o video-pll.o
omapdss-$(CONFIG_OMAP2_DSS_DPI) += dpi.o
omapdss-$(CONFIG_OMAP2_DSS_VENC) += venc.o
diff --git a/drivers/gpu/drm/omapdrm/dss/core.c b/drivers/gpu/drm/omapdrm/dss/core.c
deleted file mode 100644
index 6ac497b63711..000000000000
--- a/drivers/gpu/drm/omapdrm/dss/core.c
+++ /dev/null
@@ -1,55 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2009 Nokia Corporation
- * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
- *
- * Some code and ideas taken from drivers/video/omap/ driver
- * by Imre Deak.
- */
-
-#define DSS_SUBSYS_NAME "CORE"
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-
-#include "omapdss.h"
-#include "dss.h"
-
-/* INIT */
-static struct platform_driver * const omap_dss_drivers[] = {
- &omap_dsshw_driver,
- &omap_dispchw_driver,
-#ifdef CONFIG_OMAP2_DSS_DSI
- &omap_dsihw_driver,
-#endif
-#ifdef CONFIG_OMAP2_DSS_VENC
- &omap_venchw_driver,
-#endif
-#ifdef CONFIG_OMAP4_DSS_HDMI
- &omapdss_hdmi4hw_driver,
-#endif
-#ifdef CONFIG_OMAP5_DSS_HDMI
- &omapdss_hdmi5hw_driver,
-#endif
-};
-
-static int __init omap_dss_init(void)
-{
- return platform_register_drivers(omap_dss_drivers,
- ARRAY_SIZE(omap_dss_drivers));
-}
-
-static void __exit omap_dss_exit(void)
-{
- platform_unregister_drivers(omap_dss_drivers,
- ARRAY_SIZE(omap_dss_drivers));
-}
-
-module_init(omap_dss_init);
-module_exit(omap_dss_exit);
-
-MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>");
-MODULE_DESCRIPTION("OMAP2/3 Display Subsystem");
-MODULE_LICENSE("GPL v2");
-
diff --git a/drivers/gpu/drm/omapdrm/dss/dispc.c b/drivers/gpu/drm/omapdrm/dss/dispc.c
index ed0ccbeed70f..413dbdd1771e 100644
--- a/drivers/gpu/drm/omapdrm/dss/dispc.c
+++ b/drivers/gpu/drm/omapdrm/dss/dispc.c
@@ -114,6 +114,7 @@ struct dispc_features {
const unsigned int num_reg_fields;
const enum omap_overlay_caps *overlay_caps;
const u32 **supported_color_modes;
+ const u32 *supported_scaler_color_modes;
unsigned int num_mgrs;
unsigned int num_ovls;
unsigned int buffer_size_unit;
@@ -184,9 +185,6 @@ struct dispc_device {
struct regmap *syscon_pol;
u32 syscon_pol_offset;
-
- /* DISPC_CONTROL & DISPC_CONFIG lock*/
- spinlock_t control_lock;
};
enum omap_color_component {
@@ -368,25 +366,17 @@ static inline u32 dispc_read_reg(struct dispc_device *dispc, u16 idx)
static u32 mgr_fld_read(struct dispc_device *dispc, enum omap_channel channel,
enum mgr_reg_fields regfld)
{
- const struct dispc_reg_field rfld = mgr_desc[channel].reg_desc[regfld];
+ const struct dispc_reg_field *rfld = &mgr_desc[channel].reg_desc[regfld];
- return REG_GET(dispc, rfld.reg, rfld.high, rfld.low);
+ return REG_GET(dispc, rfld->reg, rfld->high, rfld->low);
}
static void mgr_fld_write(struct dispc_device *dispc, enum omap_channel channel,
enum mgr_reg_fields regfld, int val)
{
- const struct dispc_reg_field rfld = mgr_desc[channel].reg_desc[regfld];
- const bool need_lock = rfld.reg == DISPC_CONTROL || rfld.reg == DISPC_CONFIG;
- unsigned long flags;
+ const struct dispc_reg_field *rfld = &mgr_desc[channel].reg_desc[regfld];
- if (need_lock) {
- spin_lock_irqsave(&dispc->control_lock, flags);
- REG_FLD_MOD(dispc, rfld.reg, val, rfld.high, rfld.low);
- spin_unlock_irqrestore(&dispc->control_lock, flags);
- } else {
- REG_FLD_MOD(dispc, rfld.reg, val, rfld.high, rfld.low);
- }
+ REG_FLD_MOD(dispc, rfld->reg, val, rfld->high, rfld->low);
}
static int dispc_get_num_ovls(struct dispc_device *dispc)
@@ -2510,6 +2500,19 @@ static int dispc_ovl_calc_scaling(struct dispc_device *dispc,
if (width == out_width && height == out_height)
return 0;
+ if (dispc->feat->supported_scaler_color_modes) {
+ const u32 *modes = dispc->feat->supported_scaler_color_modes;
+ unsigned int i;
+
+ for (i = 0; modes[i]; ++i) {
+ if (modes[i] == fourcc)
+ break;
+ }
+
+ if (modes[i] == 0)
+ return -EINVAL;
+ }
+
if (plane == OMAP_DSS_WB) {
switch (fourcc) {
case DRM_FORMAT_NV12:
@@ -4225,6 +4228,12 @@ static const u32 *omap4_dispc_supported_color_modes[] = {
DRM_FORMAT_RGBX8888),
};
+static const u32 omap3_dispc_supported_scaler_color_modes[] = {
+ DRM_FORMAT_XRGB8888, DRM_FORMAT_RGB565, DRM_FORMAT_YUYV,
+ DRM_FORMAT_UYVY,
+ 0,
+};
+
static const struct dispc_features omap24xx_dispc_feats = {
.sw_start = 5,
.fp_start = 15,
@@ -4253,6 +4262,7 @@ static const struct dispc_features omap24xx_dispc_feats = {
.num_reg_fields = ARRAY_SIZE(omap2_dispc_reg_fields),
.overlay_caps = omap2_dispc_overlay_caps,
.supported_color_modes = omap2_dispc_supported_color_modes,
+ .supported_scaler_color_modes = COLOR_ARRAY(DRM_FORMAT_XRGB8888),
.num_mgrs = 2,
.num_ovls = 3,
.buffer_size_unit = 1,
@@ -4287,6 +4297,7 @@ static const struct dispc_features omap34xx_rev1_0_dispc_feats = {
.num_reg_fields = ARRAY_SIZE(omap3_dispc_reg_fields),
.overlay_caps = omap3430_dispc_overlay_caps,
.supported_color_modes = omap3_dispc_supported_color_modes,
+ .supported_scaler_color_modes = omap3_dispc_supported_scaler_color_modes,
.num_mgrs = 2,
.num_ovls = 3,
.buffer_size_unit = 1,
@@ -4321,6 +4332,7 @@ static const struct dispc_features omap34xx_rev3_0_dispc_feats = {
.num_reg_fields = ARRAY_SIZE(omap3_dispc_reg_fields),
.overlay_caps = omap3430_dispc_overlay_caps,
.supported_color_modes = omap3_dispc_supported_color_modes,
+ .supported_scaler_color_modes = omap3_dispc_supported_scaler_color_modes,
.num_mgrs = 2,
.num_ovls = 3,
.buffer_size_unit = 1,
@@ -4355,6 +4367,7 @@ static const struct dispc_features omap36xx_dispc_feats = {
.num_reg_fields = ARRAY_SIZE(omap3_dispc_reg_fields),
.overlay_caps = omap3630_dispc_overlay_caps,
.supported_color_modes = omap3_dispc_supported_color_modes,
+ .supported_scaler_color_modes = omap3_dispc_supported_scaler_color_modes,
.num_mgrs = 2,
.num_ovls = 3,
.buffer_size_unit = 1,
@@ -4389,6 +4402,7 @@ static const struct dispc_features am43xx_dispc_feats = {
.num_reg_fields = ARRAY_SIZE(omap3_dispc_reg_fields),
.overlay_caps = omap3430_dispc_overlay_caps,
.supported_color_modes = omap3_dispc_supported_color_modes,
+ .supported_scaler_color_modes = omap3_dispc_supported_scaler_color_modes,
.num_mgrs = 1,
.num_ovls = 3,
.buffer_size_unit = 1,
@@ -4768,8 +4782,6 @@ static int dispc_bind(struct device *dev, struct device *master, void *data)
platform_set_drvdata(pdev, dispc);
dispc->dss = dss;
- spin_lock_init(&dispc->control_lock);
-
/*
* The OMAP3-based models can't be told apart using the compatible
* string, use SoC device matching.
diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c
index b30fcaa2d0f5..da16ea095f13 100644
--- a/drivers/gpu/drm/omapdrm/dss/dsi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dsi.c
@@ -3548,7 +3548,7 @@ static int dsi_proto_config(struct dsi_data *dsi)
static void dsi_proto_timings(struct dsi_data *dsi)
{
- unsigned int tlpx, tclk_zero, tclk_prepare, tclk_trail;
+ unsigned int tlpx, tclk_zero, tclk_prepare;
unsigned int tclk_pre, tclk_post;
unsigned int ths_prepare, ths_prepare_ths_zero, ths_zero;
unsigned int ths_trail, ths_exit;
@@ -3567,7 +3567,6 @@ static void dsi_proto_timings(struct dsi_data *dsi)
r = dsi_read_reg(dsi, DSI_DSIPHY_CFG1);
tlpx = FLD_GET(r, 20, 16) * 2;
- tclk_trail = FLD_GET(r, 15, 8);
tclk_zero = FLD_GET(r, 7, 0);
r = dsi_read_reg(dsi, DSI_DSIPHY_CFG2);
diff --git a/drivers/gpu/drm/omapdrm/dss/dss.c b/drivers/gpu/drm/omapdrm/dss/dss.c
index 4bdd63b57100..225ec808b01a 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss.c
+++ b/drivers/gpu/drm/omapdrm/dss/dss.c
@@ -1598,3 +1598,40 @@ struct platform_driver omap_dsshw_driver = {
.suppress_bind_attrs = true,
},
};
+
+/* INIT */
+static struct platform_driver * const omap_dss_drivers[] = {
+ &omap_dsshw_driver,
+ &omap_dispchw_driver,
+#ifdef CONFIG_OMAP2_DSS_DSI
+ &omap_dsihw_driver,
+#endif
+#ifdef CONFIG_OMAP2_DSS_VENC
+ &omap_venchw_driver,
+#endif
+#ifdef CONFIG_OMAP4_DSS_HDMI
+ &omapdss_hdmi4hw_driver,
+#endif
+#ifdef CONFIG_OMAP5_DSS_HDMI
+ &omapdss_hdmi5hw_driver,
+#endif
+};
+
+static int __init omap_dss_init(void)
+{
+ return platform_register_drivers(omap_dss_drivers,
+ ARRAY_SIZE(omap_dss_drivers));
+}
+
+static void __exit omap_dss_exit(void)
+{
+ platform_unregister_drivers(omap_dss_drivers,
+ ARRAY_SIZE(omap_dss_drivers));
+}
+
+module_init(omap_dss_init);
+module_exit(omap_dss_exit);
+
+MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>");
+MODULE_DESCRIPTION("OMAP2/3/4/5 Display Subsystem");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
index 5d5d5588ebc1..ea5d5c228534 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
@@ -542,8 +542,9 @@ static void hdmi_core_audio_config(struct hdmi_core_data *core,
}
/* Set ACR clock divisor */
- REG_FLD_MOD(av_base,
- HDMI_CORE_AV_FREQ_SVAL, cfg->mclk_mode, 2, 0);
+ if (cfg->use_mclk)
+ REG_FLD_MOD(av_base, HDMI_CORE_AV_FREQ_SVAL,
+ cfg->mclk_mode, 2, 0);
r = hdmi_read_reg(av_base, HDMI_CORE_AV_ACR_CTRL);
/*
@@ -675,7 +676,7 @@ int hdmi4_audio_config(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
struct hdmi_audio_format audio_format;
struct hdmi_audio_dma audio_dma;
struct hdmi_core_audio_config acore;
- int err, n, cts, channel_count;
+ int n, cts, channel_count;
unsigned int fs_nr;
bool word_length_16b = false;
@@ -737,7 +738,7 @@ int hdmi4_audio_config(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
return -EINVAL;
}
- err = hdmi_compute_acr(pclk, fs_nr, &n, &cts);
+ hdmi_compute_acr(pclk, fs_nr, &n, &cts);
/* Audio clock regeneration settings */
acore.n = n;
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c b/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c
index 7400fb99d453..ff4d35c8771f 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c
@@ -23,24 +23,12 @@
#include "hdmi5_core.h"
-/* only 24 bit color depth used for now */
-static const struct csc_table csc_table_deepcolor[] = {
- /* HDMI_DEEP_COLOR_24BIT */
- [0] = { 7036, 0, 0, 32, 0, 7036, 0, 32, 0, 0, 7036, 32, },
- /* HDMI_DEEP_COLOR_30BIT */
- [1] = { 7015, 0, 0, 128, 0, 7015, 0, 128, 0, 0, 7015, 128, },
- /* HDMI_DEEP_COLOR_36BIT */
- [2] = { 7010, 0, 0, 512, 0, 7010, 0, 512, 0, 0, 7010, 512, },
- /* FULL RANGE */
- [3] = { 8192, 0, 0, 0, 0, 8192, 0, 0, 0, 0, 8192, 0, },
-};
-
static void hdmi_core_ddc_init(struct hdmi_core_data *core)
{
void __iomem *base = core->base;
const unsigned long long iclk = 266000000; /* DSS L3 ICLK */
- const unsigned int ss_scl_high = 4600; /* ns */
- const unsigned int ss_scl_low = 5400; /* ns */
+ const unsigned int ss_scl_high = 4700; /* ns */
+ const unsigned int ss_scl_low = 5500; /* ns */
const unsigned int fs_scl_high = 600; /* ns */
const unsigned int fs_scl_low = 1300; /* ns */
const unsigned int sda_hold = 1000; /* ns */
@@ -397,14 +385,6 @@ static void hdmi_core_config_video_packetizer(struct hdmi_core_data *core)
REG_FLD_MOD(base, HDMI_CORE_VP_CONF, clr_depth ? 0 : 2, 1, 0);
}
-static void hdmi_core_config_csc(struct hdmi_core_data *core)
-{
- int clr_depth = 0; /* 24 bit color depth */
-
- /* CSC_COLORDEPTH */
- REG_FLD_MOD(core->base, HDMI_CORE_CSC_SCALE, clr_depth, 7, 4);
-}
-
static void hdmi_core_config_video_sampler(struct hdmi_core_data *core)
{
int video_mapping = 1; /* for 24 bit color depth */
@@ -469,47 +449,67 @@ static void hdmi_core_write_avi_infoframe(struct hdmi_core_data *core,
REG_FLD_MOD(base, HDMI_CORE_FC_PRCONF, pr, 3, 0);
}
-static void hdmi_core_csc_config(struct hdmi_core_data *core,
- struct csc_table csc_coeff)
+static void hdmi_core_write_csc(struct hdmi_core_data *core,
+ const struct csc_table *csc_coeff)
{
void __iomem *base = core->base;
- REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_A1_MSB, csc_coeff.a1 >> 8 , 6, 0);
- REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_A1_LSB, csc_coeff.a1, 7, 0);
- REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_A2_MSB, csc_coeff.a2 >> 8, 6, 0);
- REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_A2_LSB, csc_coeff.a2, 7, 0);
- REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_A3_MSB, csc_coeff.a3 >> 8, 6, 0);
- REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_A3_LSB, csc_coeff.a3, 7, 0);
- REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_A4_MSB, csc_coeff.a4 >> 8, 6, 0);
- REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_A4_LSB, csc_coeff.a4, 7, 0);
- REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_B1_MSB, csc_coeff.b1 >> 8, 6, 0);
- REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_B1_LSB, csc_coeff.b1, 7, 0);
- REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_B2_MSB, csc_coeff.b2 >> 8, 6, 0);
- REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_B2_LSB, csc_coeff.b2, 7, 0);
- REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_B3_MSB, csc_coeff.b3 >> 8, 6, 0);
- REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_B3_LSB, csc_coeff.b3, 7, 0);
- REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_B4_MSB, csc_coeff.b4 >> 8, 6, 0);
- REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_B4_LSB, csc_coeff.b4, 7, 0);
- REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_C1_MSB, csc_coeff.c1 >> 8, 6, 0);
- REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_C1_LSB, csc_coeff.c1, 7, 0);
- REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_C2_MSB, csc_coeff.c2 >> 8, 6, 0);
- REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_C2_LSB, csc_coeff.c2, 7, 0);
- REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_C3_MSB, csc_coeff.c3 >> 8, 6, 0);
- REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_C3_LSB, csc_coeff.c3, 7, 0);
- REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_C4_MSB, csc_coeff.c4 >> 8, 6, 0);
- REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_C4_LSB, csc_coeff.c4, 7, 0);
-
+ REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_A1_MSB, csc_coeff->a1 >> 8, 6, 0);
+ REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_A1_LSB, csc_coeff->a1, 7, 0);
+ REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_A2_MSB, csc_coeff->a2 >> 8, 6, 0);
+ REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_A2_LSB, csc_coeff->a2, 7, 0);
+ REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_A3_MSB, csc_coeff->a3 >> 8, 6, 0);
+ REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_A3_LSB, csc_coeff->a3, 7, 0);
+ REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_A4_MSB, csc_coeff->a4 >> 8, 6, 0);
+ REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_A4_LSB, csc_coeff->a4, 7, 0);
+ REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_B1_MSB, csc_coeff->b1 >> 8, 6, 0);
+ REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_B1_LSB, csc_coeff->b1, 7, 0);
+ REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_B2_MSB, csc_coeff->b2 >> 8, 6, 0);
+ REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_B2_LSB, csc_coeff->b2, 7, 0);
+ REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_B3_MSB, csc_coeff->b3 >> 8, 6, 0);
+ REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_B3_LSB, csc_coeff->b3, 7, 0);
+ REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_B4_MSB, csc_coeff->b4 >> 8, 6, 0);
+ REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_B4_LSB, csc_coeff->b4, 7, 0);
+ REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_C1_MSB, csc_coeff->c1 >> 8, 6, 0);
+ REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_C1_LSB, csc_coeff->c1, 7, 0);
+ REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_C2_MSB, csc_coeff->c2 >> 8, 6, 0);
+ REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_C2_LSB, csc_coeff->c2, 7, 0);
+ REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_C3_MSB, csc_coeff->c3 >> 8, 6, 0);
+ REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_C3_LSB, csc_coeff->c3, 7, 0);
+ REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_C4_MSB, csc_coeff->c4 >> 8, 6, 0);
+ REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_C4_LSB, csc_coeff->c4, 7, 0);
+
+ /* enable CSC */
REG_FLD_MOD(base, HDMI_CORE_MC_FLOWCTRL, 0x1, 0, 0);
}
-static void hdmi_core_configure_range(struct hdmi_core_data *core)
+static void hdmi_core_configure_range(struct hdmi_core_data *core,
+ enum hdmi_quantization_range range)
{
- struct csc_table csc_coeff = { 0 };
+ static const struct csc_table csc_limited_range = {
+ 7036, 0, 0, 32, 0, 7036, 0, 32, 0, 0, 7036, 32
+ };
+ static const struct csc_table csc_full_range = {
+ 8192, 0, 0, 0, 0, 8192, 0, 0, 0, 0, 8192, 0
+ };
+ const struct csc_table *csc_coeff;
+
+ /* CSC_COLORDEPTH = 24 bits*/
+ REG_FLD_MOD(core->base, HDMI_CORE_CSC_SCALE, 0, 7, 4);
+
+ switch (range) {
+ case HDMI_QUANTIZATION_RANGE_FULL:
+ csc_coeff = &csc_full_range;
+ break;
- /* support limited range with 24 bit color depth for now */
- csc_coeff = csc_table_deepcolor[0];
+ case HDMI_QUANTIZATION_RANGE_DEFAULT:
+ case HDMI_QUANTIZATION_RANGE_LIMITED:
+ default:
+ csc_coeff = &csc_limited_range;
+ break;
+ }
- hdmi_core_csc_config(core, csc_coeff);
+ hdmi_core_write_csc(core, csc_coeff);
}
static void hdmi_core_enable_video_path(struct hdmi_core_data *core)
@@ -600,9 +600,20 @@ void hdmi5_configure(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
struct videomode vm;
struct hdmi_video_format video_format;
struct hdmi_core_vid_config v_core_cfg;
+ enum hdmi_quantization_range range;
hdmi_core_mask_interrupts(core);
+ if (cfg->hdmi_dvi_mode == HDMI_HDMI) {
+ char vic = cfg->infoframe.video_code;
+
+ /* All CEA modes other than VIC 1 use limited quantization range. */
+ range = vic > 1 ? HDMI_QUANTIZATION_RANGE_LIMITED :
+ HDMI_QUANTIZATION_RANGE_FULL;
+ } else {
+ range = HDMI_QUANTIZATION_RANGE_FULL;
+ }
+
hdmi_core_init(&v_core_cfg, cfg);
hdmi_wp_init_vid_fmt_timings(&video_format, &vm, cfg);
@@ -616,9 +627,8 @@ void hdmi5_configure(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
hdmi_wp_video_config_interface(wp, &vm);
- /* support limited range with 24 bit color depth for now */
- hdmi_core_configure_range(core);
- cfg->infoframe.quantization_range = HDMI_QUANTIZATION_RANGE_LIMITED;
+ hdmi_core_configure_range(core, range);
+ cfg->infoframe.quantization_range = range;
/*
* configure core video part, set software reset in the core
@@ -628,7 +638,6 @@ void hdmi5_configure(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
hdmi_core_video_config(core, &v_core_cfg);
hdmi_core_config_video_packetizer(core);
- hdmi_core_config_csc(core);
hdmi_core_config_video_sampler(core);
if (cfg->hdmi_dvi_mode == HDMI_HDMI)
@@ -798,7 +807,7 @@ int hdmi5_audio_config(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
struct hdmi_audio_format audio_format;
struct hdmi_audio_dma audio_dma;
struct hdmi_core_audio_config core_cfg;
- int err, n, cts, channel_count;
+ int n, cts, channel_count;
unsigned int fs_nr;
bool word_length_16b = false;
@@ -841,7 +850,7 @@ int hdmi5_audio_config(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
return -EINVAL;
}
- err = hdmi_compute_acr(pclk, fs_nr, &n, &cts);
+ hdmi_compute_acr(pclk, fs_nr, &n, &cts);
core_cfg.n = n;
core_cfg.cts = cts;
diff --git a/drivers/gpu/drm/omapdrm/dss/output.c b/drivers/gpu/drm/omapdrm/dss/output.c
index 14b41de44ebc..0693d34fca1b 100644
--- a/drivers/gpu/drm/omapdrm/dss/output.c
+++ b/drivers/gpu/drm/omapdrm/dss/output.c
@@ -12,6 +12,7 @@
#include <linux/of.h>
#include <linux/of_graph.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_panel.h>
#include "dss.h"
diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.h b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.h
index 835e6654fa82..43c1d096b021 100644
--- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.h
+++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.h
@@ -113,7 +113,7 @@ extern struct platform_driver omap_dmm_driver;
/* GEM bo flags -> tiler fmt */
static inline enum tiler_fmt gem2fmt(u32 flags)
{
- switch (flags & OMAP_BO_TILED) {
+ switch (flags & OMAP_BO_TILED_MASK) {
case OMAP_BO_TILED_8:
return TILFMT_8BIT;
case OMAP_BO_TILED_16:
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index 2983c003698e..b3e22c890c51 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -11,6 +11,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_file.h>
diff --git a/drivers/gpu/drm/omapdrm/omap_encoder.c b/drivers/gpu/drm/omapdrm/omap_encoder.c
index 6fe14111cd95..24bbe9f2a32e 100644
--- a/drivers/gpu/drm/omapdrm/omap_encoder.c
+++ b/drivers/gpu/drm/omapdrm/omap_encoder.c
@@ -6,6 +6,7 @@
#include <linux/list.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_edid.h>
diff --git a/drivers/gpu/drm/omapdrm/omap_fb.c b/drivers/gpu/drm/omapdrm/omap_fb.c
index 1b8b5108caf8..9aeab81dfb90 100644
--- a/drivers/gpu/drm/omapdrm/omap_fb.c
+++ b/drivers/gpu/drm/omapdrm/omap_fb.c
@@ -95,7 +95,7 @@ static u32 get_linear_addr(struct drm_framebuffer *fb,
bool omap_framebuffer_supports_rotation(struct drm_framebuffer *fb)
{
- return omap_gem_flags(fb->obj[0]) & OMAP_BO_TILED;
+ return omap_gem_flags(fb->obj[0]) & OMAP_BO_TILED_MASK;
}
/* Note: DRM rotates counter-clockwise, TILER & DSS rotates clockwise */
@@ -135,7 +135,6 @@ void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
{
struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
const struct drm_format_info *format = omap_fb->format;
- struct plane *plane = &omap_fb->planes[0];
u32 x, y, orient = 0;
info->fourcc = fb->format->format;
@@ -154,7 +153,7 @@ void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
x = state->src_x >> 16;
y = state->src_y >> 16;
- if (omap_gem_flags(fb->obj[0]) & OMAP_BO_TILED) {
+ if (omap_gem_flags(fb->obj[0]) & OMAP_BO_TILED_MASK) {
u32 w = state->src_w >> 16;
u32 h = state->src_h >> 16;
@@ -209,10 +208,8 @@ void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
info->screen_width /= format->cpp[0];
if (fb->format->format == DRM_FORMAT_NV12) {
- plane = &omap_fb->planes[1];
-
if (info->rotation_type == OMAP_DSS_ROT_TILER) {
- WARN_ON(!(omap_gem_flags(fb->obj[1]) & OMAP_BO_TILED));
+ WARN_ON(!(omap_gem_flags(fb->obj[1]) & OMAP_BO_TILED_MASK));
omap_gem_rotated_dma_addr(fb->obj[1], orient, x/2, y/2,
&info->p_uv_addr);
} else {
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
index 08f539efddfb..e518d93ca6df 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -67,7 +67,7 @@ struct omap_gem_object {
/**
* # of users of dma_addr
*/
- u32 dma_addr_cnt;
+ refcount_t dma_addr_cnt;
/**
* If the buffer has been imported from a dmabuf the OMAP_DB_DMABUF flag
@@ -196,7 +196,7 @@ static void omap_gem_evict(struct drm_gem_object *obj)
struct omap_gem_object *omap_obj = to_omap_bo(obj);
struct omap_drm_private *priv = obj->dev->dev_private;
- if (omap_obj->flags & OMAP_BO_TILED) {
+ if (omap_obj->flags & OMAP_BO_TILED_MASK) {
enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
int i;
@@ -324,7 +324,7 @@ size_t omap_gem_mmap_size(struct drm_gem_object *obj)
struct omap_gem_object *omap_obj = to_omap_bo(obj);
size_t size = obj->size;
- if (omap_obj->flags & OMAP_BO_TILED) {
+ if (omap_obj->flags & OMAP_BO_TILED_MASK) {
/* for tiled buffers, the virtual size has stride rounded up
* to 4kb.. (to hide the fact that row n+1 might start 16kb or
* 32kb later!). But we don't back the entire buffer with
@@ -513,7 +513,7 @@ vm_fault_t omap_gem_fault(struct vm_fault *vmf)
* probably trigger put_pages()?
*/
- if (omap_obj->flags & OMAP_BO_TILED)
+ if (omap_obj->flags & OMAP_BO_TILED_MASK)
ret = omap_gem_fault_2d(obj, vma, vmf);
else
ret = omap_gem_fault_1d(obj, vma, vmf);
@@ -773,18 +773,20 @@ int omap_gem_pin(struct drm_gem_object *obj, dma_addr_t *dma_addr)
mutex_lock(&omap_obj->lock);
if (!omap_gem_is_contiguous(omap_obj) && priv->has_dmm) {
- if (omap_obj->dma_addr_cnt == 0) {
+ if (refcount_read(&omap_obj->dma_addr_cnt) == 0) {
u32 npages = obj->size >> PAGE_SHIFT;
enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
struct tiler_block *block;
BUG_ON(omap_obj->block);
+ refcount_set(&omap_obj->dma_addr_cnt, 1);
+
ret = omap_gem_attach_pages(obj);
if (ret)
goto fail;
- if (omap_obj->flags & OMAP_BO_TILED) {
+ if (omap_obj->flags & OMAP_BO_TILED_MASK) {
block = tiler_reserve_2d(fmt,
omap_obj->width,
omap_obj->height, 0);
@@ -813,13 +815,15 @@ int omap_gem_pin(struct drm_gem_object *obj, dma_addr_t *dma_addr)
omap_obj->block = block;
DBG("got dma address: %pad", &omap_obj->dma_addr);
+ } else {
+ refcount_inc(&omap_obj->dma_addr_cnt);
}
- omap_obj->dma_addr_cnt++;
-
- *dma_addr = omap_obj->dma_addr;
+ if (dma_addr)
+ *dma_addr = omap_obj->dma_addr;
} else if (omap_gem_is_contiguous(omap_obj)) {
- *dma_addr = omap_obj->dma_addr;
+ if (dma_addr)
+ *dma_addr = omap_obj->dma_addr;
} else {
ret = -EINVAL;
goto fail;
@@ -832,38 +836,46 @@ fail:
}
/**
+ * omap_gem_unpin_locked() - Unpin a GEM object from memory
+ * @obj: the GEM object
+ *
+ * omap_gem_unpin() without locking.
+ */
+static void omap_gem_unpin_locked(struct drm_gem_object *obj)
+{
+ struct omap_gem_object *omap_obj = to_omap_bo(obj);
+ int ret;
+
+ if (refcount_dec_and_test(&omap_obj->dma_addr_cnt)) {
+ ret = tiler_unpin(omap_obj->block);
+ if (ret) {
+ dev_err(obj->dev->dev,
+ "could not unpin pages: %d\n", ret);
+ }
+ ret = tiler_release(omap_obj->block);
+ if (ret) {
+ dev_err(obj->dev->dev,
+ "could not release unmap: %d\n", ret);
+ }
+ omap_obj->dma_addr = 0;
+ omap_obj->block = NULL;
+ }
+}
+
+/**
* omap_gem_unpin() - Unpin a GEM object from memory
* @obj: the GEM object
*
* Unpin the given GEM object previously pinned with omap_gem_pin(). Pins are
- * reference-counted, the actualy unpin will only be performed when the number
+ * reference-counted, the actual unpin will only be performed when the number
* of calls to this function matches the number of calls to omap_gem_pin().
*/
void omap_gem_unpin(struct drm_gem_object *obj)
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
- int ret;
mutex_lock(&omap_obj->lock);
-
- if (omap_obj->dma_addr_cnt > 0) {
- omap_obj->dma_addr_cnt--;
- if (omap_obj->dma_addr_cnt == 0) {
- ret = tiler_unpin(omap_obj->block);
- if (ret) {
- dev_err(obj->dev->dev,
- "could not unpin pages: %d\n", ret);
- }
- ret = tiler_release(omap_obj->block);
- if (ret) {
- dev_err(obj->dev->dev,
- "could not release unmap: %d\n", ret);
- }
- omap_obj->dma_addr = 0;
- omap_obj->block = NULL;
- }
- }
-
+ omap_gem_unpin_locked(obj);
mutex_unlock(&omap_obj->lock);
}
@@ -879,8 +891,8 @@ int omap_gem_rotated_dma_addr(struct drm_gem_object *obj, u32 orient,
mutex_lock(&omap_obj->lock);
- if ((omap_obj->dma_addr_cnt > 0) && omap_obj->block &&
- (omap_obj->flags & OMAP_BO_TILED)) {
+ if ((refcount_read(&omap_obj->dma_addr_cnt) > 0) && omap_obj->block &&
+ (omap_obj->flags & OMAP_BO_TILED_MASK)) {
*dma_addr = tiler_tsptr(omap_obj->block, orient, x, y);
ret = 0;
}
@@ -895,7 +907,7 @@ int omap_gem_tiled_stride(struct drm_gem_object *obj, u32 orient)
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
int ret = -EINVAL;
- if (omap_obj->flags & OMAP_BO_TILED)
+ if (omap_obj->flags & OMAP_BO_TILED_MASK)
ret = tiler_stride(gem2fmt(omap_obj->flags), orient);
return ret;
}
@@ -1030,10 +1042,11 @@ void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
seq_printf(m, "%08x: %2d (%2d) %08llx %pad (%2d) %p %4d",
omap_obj->flags, obj->name, kref_read(&obj->refcount),
- off, &omap_obj->dma_addr, omap_obj->dma_addr_cnt,
+ off, &omap_obj->dma_addr,
+ refcount_read(&omap_obj->dma_addr_cnt),
omap_obj->vaddr, omap_obj->roll);
- if (omap_obj->flags & OMAP_BO_TILED) {
+ if (omap_obj->flags & OMAP_BO_TILED_MASK) {
seq_printf(m, " %dx%d", omap_obj->width, omap_obj->height);
if (omap_obj->block) {
struct tcm_area *area = &omap_obj->block->area;
@@ -1093,7 +1106,7 @@ void omap_gem_free_object(struct drm_gem_object *obj)
mutex_lock(&omap_obj->lock);
/* The object should not be pinned. */
- WARN_ON(omap_obj->dma_addr_cnt > 0);
+ WARN_ON(refcount_read(&omap_obj->dma_addr_cnt) > 0);
if (omap_obj->pages) {
if (omap_obj->flags & OMAP_BO_MEM_DMABUF)
@@ -1120,6 +1133,38 @@ void omap_gem_free_object(struct drm_gem_object *obj)
kfree(omap_obj);
}
+static bool omap_gem_validate_flags(struct drm_device *dev, u32 flags)
+{
+ struct omap_drm_private *priv = dev->dev_private;
+
+ switch (flags & OMAP_BO_CACHE_MASK) {
+ case OMAP_BO_CACHED:
+ case OMAP_BO_WC:
+ case OMAP_BO_CACHE_MASK:
+ break;
+
+ default:
+ return false;
+ }
+
+ if (flags & OMAP_BO_TILED_MASK) {
+ if (!priv->usergart)
+ return false;
+
+ switch (flags & OMAP_BO_TILED_MASK) {
+ case OMAP_BO_TILED_8:
+ case OMAP_BO_TILED_16:
+ case OMAP_BO_TILED_32:
+ break;
+
+ default:
+ return false;
+ }
+ }
+
+ return true;
+}
+
/* GEM buffer object constructor */
struct drm_gem_object *omap_gem_new(struct drm_device *dev,
union omap_gem_size gsize, u32 flags)
@@ -1131,18 +1176,15 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev,
size_t size;
int ret;
- /* Validate the flags and compute the memory and cache flags. */
- if (flags & OMAP_BO_TILED) {
- if (!priv->usergart) {
- dev_err(dev->dev, "Tiled buffers require DMM\n");
- return NULL;
- }
+ if (!omap_gem_validate_flags(dev, flags))
+ return NULL;
+ /* Validate the flags and compute the memory and cache flags. */
+ if (flags & OMAP_BO_TILED_MASK) {
/*
* Tiled buffers are always shmem paged backed. When they are
* scanned out, they are remapped into DMM/TILER.
*/
- flags &= ~OMAP_BO_SCANOUT;
flags |= OMAP_BO_MEM_SHMEM;
/*
@@ -1153,9 +1195,8 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev,
flags |= tiler_get_cpu_cache_flags();
} else if ((flags & OMAP_BO_SCANOUT) && !priv->has_dmm) {
/*
- * OMAP_BO_SCANOUT hints that the buffer doesn't need to be
- * tiled. However, to lower the pressure on memory allocation,
- * use contiguous memory only if no TILER is available.
+ * If we don't have DMM, we must allocate scanout buffers
+ * from contiguous DMA memory.
*/
flags |= OMAP_BO_MEM_DMA_API;
} else if (!(flags & OMAP_BO_MEM_DMABUF)) {
@@ -1174,7 +1215,7 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev,
omap_obj->flags = flags;
mutex_init(&omap_obj->lock);
- if (flags & OMAP_BO_TILED) {
+ if (flags & OMAP_BO_TILED_MASK) {
/*
* For tiled buffers align dimensions to slot boundaries and
* calculate size based on aligned dimensions.
diff --git a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
index e8c3ae7ac77e..7344bb61936c 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
@@ -67,7 +67,7 @@ static int omap_gem_dmabuf_begin_cpu_access(struct dma_buf *buffer,
{
struct drm_gem_object *obj = buffer->priv;
struct page **pages;
- if (omap_gem_flags(obj) & OMAP_BO_TILED) {
+ if (omap_gem_flags(obj) & OMAP_BO_TILED_MASK) {
/* TODO we would need to pin at least part of the buffer to
* get de-tiled view. For now just reject it.
*/
diff --git a/drivers/gpu/drm/panel/panel-arm-versatile.c b/drivers/gpu/drm/panel/panel-arm-versatile.c
index 5f72c922a04b..a0574dc03e16 100644
--- a/drivers/gpu/drm/panel/panel-arm-versatile.c
+++ b/drivers/gpu/drm/panel/panel-arm-versatile.c
@@ -350,9 +350,8 @@ static int versatile_panel_probe(struct platform_device *pdev)
dev_info(dev, "panel mounted on IB2 daughterboard\n");
}
- drm_panel_init(&vpanel->panel);
- vpanel->panel.dev = dev;
- vpanel->panel.funcs = &versatile_panel_drm_funcs;
+ drm_panel_init(&vpanel->panel, dev, &versatile_panel_drm_funcs,
+ DRM_MODE_CONNECTOR_DPI);
return drm_panel_add(&vpanel->panel);
}
diff --git a/drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c b/drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c
index dabf59e0f56f..98f184b81187 100644
--- a/drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c
+++ b/drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c
@@ -204,9 +204,8 @@ static int feiyang_dsi_probe(struct mipi_dsi_device *dsi)
mipi_dsi_set_drvdata(dsi, ctx);
ctx->dsi = dsi;
- drm_panel_init(&ctx->panel);
- ctx->panel.dev = &dsi->dev;
- ctx->panel.funcs = &feiyang_funcs;
+ drm_panel_init(&ctx->panel, &dsi->dev, &feiyang_funcs,
+ DRM_MODE_CONNECTOR_DSI);
ctx->dvdd = devm_regulator_get(&dsi->dev, "dvdd");
if (IS_ERR(ctx->dvdd)) {
diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9322.c b/drivers/gpu/drm/panel/panel-ilitek-ili9322.c
index 3c58f63adbf7..24955bec1958 100644
--- a/drivers/gpu/drm/panel/panel-ilitek-ili9322.c
+++ b/drivers/gpu/drm/panel/panel-ilitek-ili9322.c
@@ -895,9 +895,8 @@ static int ili9322_probe(struct spi_device *spi)
ili->input = ili->conf->input;
}
- drm_panel_init(&ili->panel);
- ili->panel.dev = dev;
- ili->panel.funcs = &ili9322_drm_funcs;
+ drm_panel_init(&ili->panel, dev, &ili9322_drm_funcs,
+ DRM_MODE_CONNECTOR_DPI);
return drm_panel_add(&ili->panel);
}
diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
index 3ad4a46c4e94..e8789e460a16 100644
--- a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
+++ b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
@@ -433,9 +433,8 @@ static int ili9881c_dsi_probe(struct mipi_dsi_device *dsi)
mipi_dsi_set_drvdata(dsi, ctx);
ctx->dsi = dsi;
- drm_panel_init(&ctx->panel);
- ctx->panel.dev = &dsi->dev;
- ctx->panel.funcs = &ili9881c_funcs;
+ drm_panel_init(&ctx->panel, &dsi->dev, &ili9881c_funcs,
+ DRM_MODE_CONNECTOR_DSI);
ctx->power = devm_regulator_get(&dsi->dev, "power");
if (IS_ERR(ctx->power)) {
diff --git a/drivers/gpu/drm/panel/panel-innolux-p079zca.c b/drivers/gpu/drm/panel/panel-innolux-p079zca.c
index d92d1c98878c..83df1ac4211f 100644
--- a/drivers/gpu/drm/panel/panel-innolux-p079zca.c
+++ b/drivers/gpu/drm/panel/panel-innolux-p079zca.c
@@ -487,9 +487,8 @@ static int innolux_panel_add(struct mipi_dsi_device *dsi,
if (IS_ERR(innolux->backlight))
return PTR_ERR(innolux->backlight);
- drm_panel_init(&innolux->base);
- innolux->base.funcs = &innolux_panel_funcs;
- innolux->base.dev = dev;
+ drm_panel_init(&innolux->base, dev, &innolux_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
err = drm_panel_add(&innolux->base);
if (err < 0)
diff --git a/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c b/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c
index ff3e89e61e3f..56364a93f0b8 100644
--- a/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c
+++ b/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c
@@ -437,9 +437,8 @@ static int jdi_panel_add(struct jdi_panel *jdi)
return ret;
}
- drm_panel_init(&jdi->base);
- jdi->base.funcs = &jdi_panel_funcs;
- jdi->base.dev = &jdi->dsi->dev;
+ drm_panel_init(&jdi->base, &jdi->dsi->dev, &jdi_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
ret = drm_panel_add(&jdi->base);
diff --git a/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c b/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c
index 3ac04eb8d0fe..45f96556ec8c 100644
--- a/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c
+++ b/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c
@@ -391,9 +391,8 @@ static int kingdisplay_panel_add(struct kingdisplay_panel *kingdisplay)
if (IS_ERR(kingdisplay->backlight))
return PTR_ERR(kingdisplay->backlight);
- drm_panel_init(&kingdisplay->base);
- kingdisplay->base.funcs = &kingdisplay_panel_funcs;
- kingdisplay->base.dev = &kingdisplay->link->dev;
+ drm_panel_init(&kingdisplay->base, &kingdisplay->link->dev,
+ &kingdisplay_panel_funcs, DRM_MODE_CONNECTOR_DSI);
return drm_panel_add(&kingdisplay->base);
}
diff --git a/drivers/gpu/drm/panel/panel-lg-lb035q02.c b/drivers/gpu/drm/panel/panel-lg-lb035q02.c
index ee4379729a5b..7a1385e834f0 100644
--- a/drivers/gpu/drm/panel/panel-lg-lb035q02.c
+++ b/drivers/gpu/drm/panel/panel-lg-lb035q02.c
@@ -196,9 +196,8 @@ static int lb035q02_probe(struct spi_device *spi)
if (ret < 0)
return ret;
- drm_panel_init(&lcd->panel);
- lcd->panel.dev = &lcd->spi->dev;
- lcd->panel.funcs = &lb035q02_funcs;
+ drm_panel_init(&lcd->panel, &lcd->spi->dev, &lb035q02_funcs,
+ DRM_MODE_CONNECTOR_DPI);
return drm_panel_add(&lcd->panel);
}
diff --git a/drivers/gpu/drm/panel/panel-lg-lg4573.c b/drivers/gpu/drm/panel/panel-lg-lg4573.c
index 41bf02d122a1..db4865a4c2b9 100644
--- a/drivers/gpu/drm/panel/panel-lg-lg4573.c
+++ b/drivers/gpu/drm/panel/panel-lg-lg4573.c
@@ -259,9 +259,8 @@ static int lg4573_probe(struct spi_device *spi)
return ret;
}
- drm_panel_init(&ctx->panel);
- ctx->panel.dev = &spi->dev;
- ctx->panel.funcs = &lg4573_drm_funcs;
+ drm_panel_init(&ctx->panel, &spi->dev, &lg4573_drm_funcs,
+ DRM_MODE_CONNECTOR_DPI);
return drm_panel_add(&ctx->panel);
}
diff --git a/drivers/gpu/drm/panel/panel-lvds.c b/drivers/gpu/drm/panel/panel-lvds.c
index ad47cc95459e..2405f26e5d31 100644
--- a/drivers/gpu/drm/panel/panel-lvds.c
+++ b/drivers/gpu/drm/panel/panel-lvds.c
@@ -197,7 +197,6 @@ static int panel_lvds_parse_dt(struct panel_lvds *lvds)
static int panel_lvds_probe(struct platform_device *pdev)
{
struct panel_lvds *lvds;
- struct device_node *np;
int ret;
lvds = devm_kzalloc(&pdev->dev, sizeof(*lvds), GFP_KERNEL);
@@ -243,14 +242,9 @@ static int panel_lvds_probe(struct platform_device *pdev)
return ret;
}
- np = of_parse_phandle(lvds->dev->of_node, "backlight", 0);
- if (np) {
- lvds->backlight = of_find_backlight_by_node(np);
- of_node_put(np);
-
- if (!lvds->backlight)
- return -EPROBE_DEFER;
- }
+ lvds->backlight = devm_of_find_backlight(lvds->dev);
+ if (IS_ERR(lvds->backlight))
+ return PTR_ERR(lvds->backlight);
/*
* TODO: Handle all power supplies specified in the DT node in a generic
@@ -260,20 +254,15 @@ static int panel_lvds_probe(struct platform_device *pdev)
*/
/* Register the panel. */
- drm_panel_init(&lvds->panel);
- lvds->panel.dev = lvds->dev;
- lvds->panel.funcs = &panel_lvds_funcs;
+ drm_panel_init(&lvds->panel, lvds->dev, &panel_lvds_funcs,
+ DRM_MODE_CONNECTOR_LVDS);
ret = drm_panel_add(&lvds->panel);
if (ret < 0)
- goto error;
+ return ret;
dev_set_drvdata(lvds->dev, lvds);
return 0;
-
-error:
- put_device(&lvds->backlight->dev);
- return ret;
}
static int panel_lvds_remove(struct platform_device *pdev)
@@ -284,9 +273,6 @@ static int panel_lvds_remove(struct platform_device *pdev)
panel_lvds_disable(&lvds->panel);
- if (lvds->backlight)
- put_device(&lvds->backlight->dev);
-
return 0;
}
diff --git a/drivers/gpu/drm/panel/panel-nec-nl8048hl11.c b/drivers/gpu/drm/panel/panel-nec-nl8048hl11.c
index 20f17e46e65d..fd593532ab23 100644
--- a/drivers/gpu/drm/panel/panel-nec-nl8048hl11.c
+++ b/drivers/gpu/drm/panel/panel-nec-nl8048hl11.c
@@ -205,9 +205,8 @@ static int nl8048_probe(struct spi_device *spi)
if (ret < 0)
return ret;
- drm_panel_init(&lcd->panel);
- lcd->panel.dev = &lcd->spi->dev;
- lcd->panel.funcs = &nl8048_funcs;
+ drm_panel_init(&lcd->panel, &lcd->spi->dev, &nl8048_funcs,
+ DRM_MODE_CONNECTOR_DPI);
return drm_panel_add(&lcd->panel);
}
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt39016.c b/drivers/gpu/drm/panel/panel-novatek-nt39016.c
index 2ad1063b068d..60ccedce530c 100644
--- a/drivers/gpu/drm/panel/panel-novatek-nt39016.c
+++ b/drivers/gpu/drm/panel/panel-novatek-nt39016.c
@@ -292,9 +292,8 @@ static int nt39016_probe(struct spi_device *spi)
return err;
}
- drm_panel_init(&panel->drm_panel);
- panel->drm_panel.dev = dev;
- panel->drm_panel.funcs = &nt39016_funcs;
+ drm_panel_init(&panel->drm_panel, dev, &nt39016_funcs,
+ DRM_MODE_CONNECTOR_DPI);
err = drm_panel_add(&panel->drm_panel);
if (err < 0) {
diff --git a/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c b/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c
index 2bae1db3ff34..f2a72ee6ee07 100644
--- a/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c
+++ b/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c
@@ -288,9 +288,8 @@ static int lcd_olinuxino_probe(struct i2c_client *client,
if (IS_ERR(lcd->backlight))
return PTR_ERR(lcd->backlight);
- drm_panel_init(&lcd->panel);
- lcd->panel.dev = dev;
- lcd->panel.funcs = &lcd_olinuxino_funcs;
+ drm_panel_init(&lcd->panel, dev, &lcd_olinuxino_funcs,
+ DRM_MODE_CONNECTOR_DPI);
return drm_panel_add(&lcd->panel);
}
diff --git a/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c b/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
index c7b48df8869a..bf1f928b215f 100644
--- a/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
+++ b/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
@@ -455,9 +455,8 @@ static int otm8009a_probe(struct mipi_dsi_device *dsi)
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
MIPI_DSI_MODE_LPM;
- drm_panel_init(&ctx->panel);
- ctx->panel.dev = dev;
- ctx->panel.funcs = &otm8009a_drm_funcs;
+ drm_panel_init(&ctx->panel, dev, &otm8009a_drm_funcs,
+ DRM_MODE_CONNECTOR_DSI);
ctx->bl_dev = devm_backlight_device_register(dev, dev_name(dev),
dsi->host->dev, ctx,
diff --git a/drivers/gpu/drm/panel/panel-osd-osd101t2587-53ts.c b/drivers/gpu/drm/panel/panel-osd-osd101t2587-53ts.c
index e0e20ecff916..2b40913899d8 100644
--- a/drivers/gpu/drm/panel/panel-osd-osd101t2587-53ts.c
+++ b/drivers/gpu/drm/panel/panel-osd-osd101t2587-53ts.c
@@ -166,9 +166,8 @@ static int osd101t2587_panel_add(struct osd101t2587_panel *osd101t2587)
if (IS_ERR(osd101t2587->backlight))
return PTR_ERR(osd101t2587->backlight);
- drm_panel_init(&osd101t2587->base);
- osd101t2587->base.funcs = &osd101t2587_panel_funcs;
- osd101t2587->base.dev = &osd101t2587->dsi->dev;
+ drm_panel_init(&osd101t2587->base, &osd101t2587->dsi->dev,
+ &osd101t2587_panel_funcs, DRM_MODE_CONNECTOR_DSI);
return drm_panel_add(&osd101t2587->base);
}
diff --git a/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c b/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c
index 3dff0b3f73c2..664605071d34 100644
--- a/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c
+++ b/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c
@@ -223,9 +223,8 @@ static int wuxga_nt_panel_add(struct wuxga_nt_panel *wuxga_nt)
return -EPROBE_DEFER;
}
- drm_panel_init(&wuxga_nt->base);
- wuxga_nt->base.funcs = &wuxga_nt_panel_funcs;
- wuxga_nt->base.dev = &wuxga_nt->dsi->dev;
+ drm_panel_init(&wuxga_nt->base, &wuxga_nt->dsi->dev,
+ &wuxga_nt_panel_funcs, DRM_MODE_CONNECTOR_DSI);
ret = drm_panel_add(&wuxga_nt->base);
if (ret < 0)
diff --git a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
index b5b14aa059ea..09824e92fc78 100644
--- a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
+++ b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
@@ -426,8 +426,8 @@ static int rpi_touchscreen_probe(struct i2c_client *i2c,
return PTR_ERR(ts->dsi);
}
- ts->base.dev = dev;
- ts->base.funcs = &rpi_touchscreen_funcs;
+ drm_panel_init(&ts->base, dev, &rpi_touchscreen_funcs,
+ DRM_MODE_CONNECTOR_DSI);
/* This appears last, as it's what will unblock the DSI host
* driver's component bind function.
diff --git a/drivers/gpu/drm/panel/panel-raydium-rm67191.c b/drivers/gpu/drm/panel/panel-raydium-rm67191.c
index 6a5d37006103..fd67fc6185c4 100644
--- a/drivers/gpu/drm/panel/panel-raydium-rm67191.c
+++ b/drivers/gpu/drm/panel/panel-raydium-rm67191.c
@@ -606,9 +606,8 @@ static int rad_panel_probe(struct mipi_dsi_device *dsi)
if (ret)
return ret;
- drm_panel_init(&panel->panel);
- panel->panel.funcs = &rad_panel_funcs;
- panel->panel.dev = dev;
+ drm_panel_init(&panel->panel, dev, &rad_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
dev_set_drvdata(dev, panel);
ret = drm_panel_add(&panel->panel);
diff --git a/drivers/gpu/drm/panel/panel-raydium-rm68200.c b/drivers/gpu/drm/panel/panel-raydium-rm68200.c
index ba889625ad43..994e855721f4 100644
--- a/drivers/gpu/drm/panel/panel-raydium-rm68200.c
+++ b/drivers/gpu/drm/panel/panel-raydium-rm68200.c
@@ -404,9 +404,8 @@ static int rm68200_probe(struct mipi_dsi_device *dsi)
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
MIPI_DSI_MODE_LPM;
- drm_panel_init(&ctx->panel);
- ctx->panel.dev = dev;
- ctx->panel.funcs = &rm68200_drm_funcs;
+ drm_panel_init(&ctx->panel, dev, &rm68200_drm_funcs,
+ DRM_MODE_CONNECTOR_DSI);
drm_panel_add(&ctx->panel);
diff --git a/drivers/gpu/drm/panel/panel-rocktech-jh057n00900.c b/drivers/gpu/drm/panel/panel-rocktech-jh057n00900.c
index b9109922397f..31234b79d3b1 100644
--- a/drivers/gpu/drm/panel/panel-rocktech-jh057n00900.c
+++ b/drivers/gpu/drm/panel/panel-rocktech-jh057n00900.c
@@ -343,9 +343,8 @@ static int jh057n_probe(struct mipi_dsi_device *dsi)
return ret;
}
- drm_panel_init(&ctx->panel);
- ctx->panel.dev = dev;
- ctx->panel.funcs = &jh057n_drm_funcs;
+ drm_panel_init(&ctx->panel, dev, &jh057n_drm_funcs,
+ DRM_MODE_CONNECTOR_DSI);
drm_panel_add(&ctx->panel);
diff --git a/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c b/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c
index 3c15764f0c03..170a5cda21b9 100644
--- a/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c
+++ b/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c
@@ -173,9 +173,8 @@ static int rb070d30_panel_dsi_probe(struct mipi_dsi_device *dsi)
mipi_dsi_set_drvdata(dsi, ctx);
ctx->dsi = dsi;
- drm_panel_init(&ctx->panel);
- ctx->panel.dev = &dsi->dev;
- ctx->panel.funcs = &rb070d30_panel_funcs;
+ drm_panel_init(&ctx->panel, &dsi->dev, &rb070d30_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
ctx->gpios.reset = devm_gpiod_get(&dsi->dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(ctx->gpios.reset)) {
diff --git a/drivers/gpu/drm/panel/panel-samsung-ld9040.c b/drivers/gpu/drm/panel/panel-samsung-ld9040.c
index 3be902dcedc0..250809ba37c7 100644
--- a/drivers/gpu/drm/panel/panel-samsung-ld9040.c
+++ b/drivers/gpu/drm/panel/panel-samsung-ld9040.c
@@ -351,9 +351,8 @@ static int ld9040_probe(struct spi_device *spi)
return ret;
}
- drm_panel_init(&ctx->panel);
- ctx->panel.dev = dev;
- ctx->panel.funcs = &ld9040_drm_funcs;
+ drm_panel_init(&ctx->panel, dev, &ld9040_drm_funcs,
+ DRM_MODE_CONNECTOR_DPI);
return drm_panel_add(&ctx->panel);
}
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c b/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c
index f75bef24e050..e3a0397e953e 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c
@@ -215,9 +215,8 @@ static int s6d16d0_probe(struct mipi_dsi_device *dsi)
return ret;
}
- drm_panel_init(&s6->panel);
- s6->panel.dev = dev;
- s6->panel.funcs = &s6d16d0_drm_funcs;
+ drm_panel_init(&s6->panel, dev, &s6d16d0_drm_funcs,
+ DRM_MODE_CONNECTOR_DSI);
ret = drm_panel_add(&s6->panel);
if (ret < 0)
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c b/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c
index b923de23ed65..938ab72c5540 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c
@@ -732,9 +732,8 @@ static int s6e3ha2_probe(struct mipi_dsi_device *dsi)
ctx->bl_dev->props.brightness = S6E3HA2_DEFAULT_BRIGHTNESS;
ctx->bl_dev->props.power = FB_BLANK_POWERDOWN;
- drm_panel_init(&ctx->panel);
- ctx->panel.dev = dev;
- ctx->panel.funcs = &s6e3ha2_drm_funcs;
+ drm_panel_init(&ctx->panel, dev, &s6e3ha2_drm_funcs,
+ DRM_MODE_CONNECTOR_DSI);
ret = drm_panel_add(&ctx->panel);
if (ret < 0)
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c b/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c
index cd90fa700c49..a60635e9226d 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c
@@ -466,9 +466,8 @@ static int s6e63j0x03_probe(struct mipi_dsi_device *dsi)
return PTR_ERR(ctx->reset_gpio);
}
- drm_panel_init(&ctx->panel);
- ctx->panel.dev = dev;
- ctx->panel.funcs = &s6e63j0x03_funcs;
+ drm_panel_init(&ctx->panel, dev, &s6e63j0x03_funcs,
+ DRM_MODE_CONNECTOR_DSI);
ctx->bl_dev = backlight_device_register("s6e63j0x03", dev, ctx,
&s6e63j0x03_bl_ops, NULL);
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c b/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c
index 142d395ea512..ba01af0b14fd 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c
@@ -473,9 +473,8 @@ static int s6e63m0_probe(struct spi_device *spi)
return ret;
}
- drm_panel_init(&ctx->panel);
- ctx->panel.dev = dev;
- ctx->panel.funcs = &s6e63m0_drm_funcs;
+ drm_panel_init(&ctx->panel, dev, &s6e63m0_drm_funcs,
+ DRM_MODE_CONNECTOR_DPI);
ret = s6e63m0_backlight_register(ctx);
if (ret < 0)
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c b/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c
index 81858267723a..dbced6501204 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c
@@ -1017,9 +1017,8 @@ static int s6e8aa0_probe(struct mipi_dsi_device *dsi)
ctx->brightness = GAMMA_LEVEL_NUM - 1;
- drm_panel_init(&ctx->panel);
- ctx->panel.dev = dev;
- ctx->panel.funcs = &s6e8aa0_drm_funcs;
+ drm_panel_init(&ctx->panel, dev, &s6e8aa0_drm_funcs,
+ DRM_MODE_CONNECTOR_DSI);
ret = drm_panel_add(&ctx->panel);
if (ret < 0)
diff --git a/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c b/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c
index 18b22b1294fb..b3619ba443bd 100644
--- a/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c
+++ b/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c
@@ -274,9 +274,8 @@ static int seiko_panel_probe(struct device *dev,
return -EPROBE_DEFER;
}
- drm_panel_init(&panel->base);
- panel->base.dev = dev;
- panel->base.funcs = &seiko_panel_funcs;
+ drm_panel_init(&panel->base, dev, &seiko_panel_funcs,
+ DRM_MODE_CONNECTOR_DPI);
err = drm_panel_add(&panel->base);
if (err < 0)
diff --git a/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c b/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c
index e910b4ad1310..5e136c3ba185 100644
--- a/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c
+++ b/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c
@@ -329,9 +329,8 @@ static int sharp_panel_add(struct sharp_panel *sharp)
if (IS_ERR(sharp->backlight))
return PTR_ERR(sharp->backlight);
- drm_panel_init(&sharp->base);
- sharp->base.funcs = &sharp_panel_funcs;
- sharp->base.dev = &sharp->link1->dev;
+ drm_panel_init(&sharp->base, &sharp->link1->dev, &sharp_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
return drm_panel_add(&sharp->base);
}
diff --git a/drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c b/drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c
index 46cd9a250129..eeab7998c7de 100644
--- a/drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c
+++ b/drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c
@@ -185,9 +185,8 @@ static int ls037v7dw01_probe(struct platform_device *pdev)
return PTR_ERR(lcd->ud_gpio);
}
- drm_panel_init(&lcd->panel);
- lcd->panel.dev = &pdev->dev;
- lcd->panel.funcs = &ls037v7dw01_funcs;
+ drm_panel_init(&lcd->panel, &pdev->dev, &ls037v7dw01_funcs,
+ DRM_MODE_CONNECTOR_DPI);
return drm_panel_add(&lcd->panel);
}
diff --git a/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c b/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c
index c39abde9f9f1..b963ba4ab589 100644
--- a/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c
+++ b/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c
@@ -264,9 +264,8 @@ static int sharp_nt_panel_add(struct sharp_nt_panel *sharp_nt)
if (IS_ERR(sharp_nt->backlight))
return PTR_ERR(sharp_nt->backlight);
- drm_panel_init(&sharp_nt->base);
- sharp_nt->base.funcs = &sharp_nt_panel_funcs;
- sharp_nt->base.dev = &sharp_nt->dsi->dev;
+ drm_panel_init(&sharp_nt->base, &sharp_nt->dsi->dev,
+ &sharp_nt_panel_funcs, DRM_MODE_CONNECTOR_DSI);
return drm_panel_add(&sharp_nt->base);
}
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 28fa6ba7b767..5d487686d25c 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -94,6 +94,7 @@ struct panel_desc {
u32 bus_format;
u32 bus_flags;
+ int connector_type;
};
struct panel_simple {
@@ -464,9 +465,8 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc)
if (!of_get_display_timing(dev->of_node, "panel-timing", &dt))
panel_simple_parse_panel_timing_node(dev, panel, &dt);
- drm_panel_init(&panel->base);
- panel->base.dev = dev;
- panel->base.funcs = &panel_simple_funcs;
+ drm_panel_init(&panel->base, dev, &panel_simple_funcs,
+ desc->connector_type);
err = drm_panel_add(&panel->base);
if (err < 0)
@@ -833,6 +833,7 @@ static const struct panel_desc auo_g133han01 = {
.unprepare = 1000,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct display_timing auo_g185han01_timings = {
@@ -862,6 +863,7 @@ static const struct panel_desc auo_g185han01 = {
.unprepare = 1000,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct display_timing auo_p320hvn03_timings = {
@@ -890,6 +892,7 @@ static const struct panel_desc auo_p320hvn03 = {
.unprepare = 500,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct drm_display_mode auo_t215hvn01_mode = {
@@ -1205,6 +1208,7 @@ static const struct panel_desc dlc_dlc0700yzg_1 = {
.disable = 200,
},
.bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct display_timing dlc_dlc1010gig_timing = {
@@ -1235,6 +1239,7 @@ static const struct panel_desc dlc_dlc1010gig = {
.unprepare = 60,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct drm_display_mode edt_et035012dm6_mode = {
@@ -1501,6 +1506,7 @@ static const struct panel_desc hannstar_hsd070pww1 = {
.height = 94,
},
.bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct display_timing hannstar_hsd100pxn1_timing = {
@@ -1525,6 +1531,7 @@ static const struct panel_desc hannstar_hsd100pxn1 = {
.height = 152,
},
.bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct drm_display_mode hitachi_tx23d38vm0caa_mode = {
@@ -1631,6 +1638,7 @@ static const struct panel_desc innolux_g070y2_l01 = {
.unprepare = 800,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct display_timing innolux_g101ice_l01_timing = {
@@ -1659,6 +1667,7 @@ static const struct panel_desc innolux_g101ice_l01 = {
.disable = 200,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct display_timing innolux_g121i1_l01_timing = {
@@ -1686,6 +1695,7 @@ static const struct panel_desc innolux_g121i1_l01 = {
.disable = 20,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct drm_display_mode innolux_g121x1_l03_mode = {
@@ -1869,6 +1879,7 @@ static const struct panel_desc koe_tx31d200vm0baa = {
.height = 109,
},
.bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct display_timing kyo_tcg121xglp_timing = {
@@ -1893,6 +1904,7 @@ static const struct panel_desc kyo_tcg121xglp = {
.height = 184,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct drm_display_mode lemaker_bl035_rgb_002_mode = {
@@ -1941,6 +1953,7 @@ static const struct panel_desc lg_lb070wv8 = {
.height = 91,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct drm_display_mode lg_lp079qx1_sp0v_mode = {
@@ -2063,6 +2076,7 @@ static const struct panel_desc mitsubishi_aa070mc01 = {
.disable = 400,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
.bus_flags = DRM_BUS_FLAG_DE_HIGH,
};
@@ -2091,6 +2105,7 @@ static const struct panel_desc nec_nl12880bc20_05 = {
.disable = 50,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct drm_display_mode nec_nl4827hc19_05b_mode = {
@@ -2193,6 +2208,7 @@ static const struct panel_desc nlt_nl192108ac18_02d = {
.unprepare = 500,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct drm_display_mode nvd_9128_mode = {
@@ -2216,6 +2232,7 @@ static const struct panel_desc nvd_9128 = {
.height = 88,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct display_timing okaya_rs800480t_7x0gp_timing = {
@@ -2381,6 +2398,7 @@ static const struct panel_desc osddisplays_osd070t1718_19ts = {
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
.bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE,
+ .connector_type = DRM_MODE_CONNECTOR_DPI,
};
static const struct drm_display_mode pda_91_00156_a0_mode = {
@@ -2628,6 +2646,7 @@ static const struct panel_desc sharp_lq101k1ly04 = {
.height = 136,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct display_timing sharp_lq123p1jx31_timing = {
@@ -2807,6 +2826,7 @@ static const struct panel_desc tianma_tm070jdhg30 = {
.height = 95,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct display_timing tianma_tm070rvhg71_timing = {
@@ -2831,6 +2851,7 @@ static const struct panel_desc tianma_tm070rvhg71 = {
.height = 86,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct drm_display_mode ti_nspire_cx_lcd_mode[] = {
@@ -2913,6 +2934,7 @@ static const struct panel_desc toshiba_lt089ac29000 = {
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
.bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct drm_display_mode tpk_f07a_0102_mode = {
@@ -2983,6 +3005,7 @@ static const struct panel_desc urt_umsh_8596md_lvds = {
.height = 91,
},
.bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct panel_desc urt_umsh_8596md_parallel = {
diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7701.c b/drivers/gpu/drm/panel/panel-sitronix-st7701.c
index 09c5d9a6f9fa..ee3f23f45755 100644
--- a/drivers/gpu/drm/panel/panel-sitronix-st7701.c
+++ b/drivers/gpu/drm/panel/panel-sitronix-st7701.c
@@ -369,7 +369,8 @@ static int st7701_dsi_probe(struct mipi_dsi_device *dsi)
if (IS_ERR(st7701->backlight))
return PTR_ERR(st7701->backlight);
- drm_panel_init(&st7701->panel);
+ drm_panel_init(&st7701->panel, &dsi->dev, &st7701_funcs,
+ DRM_MODE_CONNECTOR_DSI);
/**
* Once sleep out has been issued, ST7701 IC required to wait 120ms
@@ -381,8 +382,6 @@ static int st7701_dsi_probe(struct mipi_dsi_device *dsi)
* ts8550b and there is no valid documentation for that.
*/
st7701->sleep_delay = 120 + desc->panel_sleep_delay;
- st7701->panel.funcs = &st7701_funcs;
- st7701->panel.dev = &dsi->dev;
ret = drm_panel_add(&st7701->panel);
if (ret < 0)
diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7789v.c b/drivers/gpu/drm/panel/panel-sitronix-st7789v.c
index 5e3e92ea9ea6..108a85bb6667 100644
--- a/drivers/gpu/drm/panel/panel-sitronix-st7789v.c
+++ b/drivers/gpu/drm/panel/panel-sitronix-st7789v.c
@@ -381,8 +381,8 @@ static int st7789v_probe(struct spi_device *spi)
spi_set_drvdata(spi, ctx);
ctx->spi = spi;
- ctx->panel.dev = &spi->dev;
- ctx->panel.funcs = &st7789v_drm_funcs;
+ drm_panel_init(&ctx->panel, &spi->dev, &st7789v_drm_funcs,
+ DRM_MODE_CONNECTOR_DPI);
ctx->power = devm_regulator_get(&spi->dev, "power");
if (IS_ERR(ctx->power))
diff --git a/drivers/gpu/drm/panel/panel-sony-acx565akm.c b/drivers/gpu/drm/panel/panel-sony-acx565akm.c
index 3d5b9c4f68d9..d6387d8f88a3 100644
--- a/drivers/gpu/drm/panel/panel-sony-acx565akm.c
+++ b/drivers/gpu/drm/panel/panel-sony-acx565akm.c
@@ -648,9 +648,8 @@ static int acx565akm_probe(struct spi_device *spi)
return ret;
}
- drm_panel_init(&lcd->panel);
- lcd->panel.dev = &lcd->spi->dev;
- lcd->panel.funcs = &acx565akm_funcs;
+ drm_panel_init(&lcd->panel, &lcd->spi->dev, &acx565akm_funcs,
+ DRM_MODE_CONNECTOR_DPI);
ret = drm_panel_add(&lcd->panel);
if (ret < 0) {
diff --git a/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c b/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c
index f2baff827f50..c44d6a65c0aa 100644
--- a/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c
+++ b/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c
@@ -347,9 +347,8 @@ static int td028ttec1_probe(struct spi_device *spi)
return ret;
}
- drm_panel_init(&lcd->panel);
- lcd->panel.dev = &lcd->spi->dev;
- lcd->panel.funcs = &td028ttec1_funcs;
+ drm_panel_init(&lcd->panel, &lcd->spi->dev, &td028ttec1_funcs,
+ DRM_MODE_CONNECTOR_DPI);
return drm_panel_add(&lcd->panel);
}
diff --git a/drivers/gpu/drm/panel/panel-tpo-td043mtea1.c b/drivers/gpu/drm/panel/panel-tpo-td043mtea1.c
index ba163c779084..621b65feec07 100644
--- a/drivers/gpu/drm/panel/panel-tpo-td043mtea1.c
+++ b/drivers/gpu/drm/panel/panel-tpo-td043mtea1.c
@@ -458,9 +458,8 @@ static int td043mtea1_probe(struct spi_device *spi)
return ret;
}
- drm_panel_init(&lcd->panel);
- lcd->panel.dev = &lcd->spi->dev;
- lcd->panel.funcs = &td043mtea1_funcs;
+ drm_panel_init(&lcd->panel, &lcd->spi->dev, &td043mtea1_funcs,
+ DRM_MODE_CONNECTOR_DPI);
ret = drm_panel_add(&lcd->panel);
if (ret < 0) {
diff --git a/drivers/gpu/drm/panel/panel-tpo-tpg110.c b/drivers/gpu/drm/panel/panel-tpo-tpg110.c
index 71591e5f5938..1a5418ae2ccf 100644
--- a/drivers/gpu/drm/panel/panel-tpo-tpg110.c
+++ b/drivers/gpu/drm/panel/panel-tpo-tpg110.c
@@ -457,9 +457,8 @@ static int tpg110_probe(struct spi_device *spi)
if (ret)
return ret;
- drm_panel_init(&tpg->panel);
- tpg->panel.dev = dev;
- tpg->panel.funcs = &tpg110_drm_funcs;
+ drm_panel_init(&tpg->panel, dev, &tpg110_drm_funcs,
+ DRM_MODE_CONNECTOR_DPI);
spi_set_drvdata(spi, tpg);
return drm_panel_add(&tpg->panel);
diff --git a/drivers/gpu/drm/panel/panel-truly-nt35597.c b/drivers/gpu/drm/panel/panel-truly-nt35597.c
index 77e1311b7c69..0feea2456e14 100644
--- a/drivers/gpu/drm/panel/panel-truly-nt35597.c
+++ b/drivers/gpu/drm/panel/panel-truly-nt35597.c
@@ -518,9 +518,8 @@ static int truly_nt35597_panel_add(struct truly_nt35597 *ctx)
/* dual port */
gpiod_set_value(ctx->mode_gpio, 0);
- drm_panel_init(&ctx->panel);
- ctx->panel.dev = dev;
- ctx->panel.funcs = &truly_nt35597_drm_funcs;
+ drm_panel_init(&ctx->panel, dev, &truly_nt35597_drm_funcs,
+ DRM_MODE_CONNECTOR_DSI);
drm_panel_add(&ctx->panel);
return 0;
diff --git a/drivers/gpu/drm/panfrost/TODO b/drivers/gpu/drm/panfrost/TODO
index 536a0d4f8d29..8c811a9e683b 100644
--- a/drivers/gpu/drm/panfrost/TODO
+++ b/drivers/gpu/drm/panfrost/TODO
@@ -10,3 +10,5 @@
- Compute job support. So called 'compute only' jobs need to be plumbed up to
userspace.
+
+- Support core dump on job failure
diff --git a/drivers/gpu/drm/panfrost/panfrost_devfreq.c b/drivers/gpu/drm/panfrost/panfrost_devfreq.c
index 12ff77dacc95..4c4e8a30a1ac 100644
--- a/drivers/gpu/drm/panfrost/panfrost_devfreq.c
+++ b/drivers/gpu/drm/panfrost/panfrost_devfreq.c
@@ -13,97 +13,42 @@
#include "panfrost_gpu.h"
#include "panfrost_regs.h"
-static void panfrost_devfreq_update_utilization(struct panfrost_device *pfdev, int slot);
+static void panfrost_devfreq_update_utilization(struct panfrost_device *pfdev);
static int panfrost_devfreq_target(struct device *dev, unsigned long *freq,
u32 flags)
{
- struct panfrost_device *pfdev = platform_get_drvdata(to_platform_device(dev));
- struct dev_pm_opp *opp;
- unsigned long old_clk_rate = pfdev->devfreq.cur_freq;
- unsigned long target_volt, target_rate;
+ struct panfrost_device *pfdev = dev_get_drvdata(dev);
int err;
- opp = devfreq_recommended_opp(dev, freq, flags);
- if (IS_ERR(opp))
- return PTR_ERR(opp);
-
- target_rate = dev_pm_opp_get_freq(opp);
- target_volt = dev_pm_opp_get_voltage(opp);
- dev_pm_opp_put(opp);
-
- if (old_clk_rate == target_rate)
- return 0;
-
- /*
- * If frequency scaling from low to high, adjust voltage first.
- * If frequency scaling from high to low, adjust frequency first.
- */
- if (old_clk_rate < target_rate) {
- err = regulator_set_voltage(pfdev->regulator, target_volt,
- target_volt);
- if (err) {
- dev_err(dev, "Cannot set voltage %lu uV\n",
- target_volt);
- return err;
- }
- }
-
- err = clk_set_rate(pfdev->clock, target_rate);
- if (err) {
- dev_err(dev, "Cannot set frequency %lu (%d)\n", target_rate,
- err);
- regulator_set_voltage(pfdev->regulator, pfdev->devfreq.cur_volt,
- pfdev->devfreq.cur_volt);
+ err = dev_pm_opp_set_rate(dev, *freq);
+ if (err)
return err;
- }
- if (old_clk_rate > target_rate) {
- err = regulator_set_voltage(pfdev->regulator, target_volt,
- target_volt);
- if (err)
- dev_err(dev, "Cannot set voltage %lu uV\n", target_volt);
- }
-
- pfdev->devfreq.cur_freq = target_rate;
- pfdev->devfreq.cur_volt = target_volt;
+ *freq = clk_get_rate(pfdev->clock);
return 0;
}
static void panfrost_devfreq_reset(struct panfrost_device *pfdev)
{
- ktime_t now = ktime_get();
- int i;
-
- for (i = 0; i < NUM_JOB_SLOTS; i++) {
- pfdev->devfreq.slot[i].busy_time = 0;
- pfdev->devfreq.slot[i].idle_time = 0;
- pfdev->devfreq.slot[i].time_last_update = now;
- }
+ pfdev->devfreq.busy_time = 0;
+ pfdev->devfreq.idle_time = 0;
+ pfdev->devfreq.time_last_update = ktime_get();
}
static int panfrost_devfreq_get_dev_status(struct device *dev,
struct devfreq_dev_status *status)
{
- struct panfrost_device *pfdev = platform_get_drvdata(to_platform_device(dev));
- int i;
+ struct panfrost_device *pfdev = dev_get_drvdata(dev);
- for (i = 0; i < NUM_JOB_SLOTS; i++) {
- panfrost_devfreq_update_utilization(pfdev, i);
- }
+ panfrost_devfreq_update_utilization(pfdev);
status->current_frequency = clk_get_rate(pfdev->clock);
- status->total_time = ktime_to_ns(ktime_add(pfdev->devfreq.slot[0].busy_time,
- pfdev->devfreq.slot[0].idle_time));
+ status->total_time = ktime_to_ns(ktime_add(pfdev->devfreq.busy_time,
+ pfdev->devfreq.idle_time));
- status->busy_time = 0;
- for (i = 0; i < NUM_JOB_SLOTS; i++) {
- status->busy_time += ktime_to_ns(pfdev->devfreq.slot[i].busy_time);
- }
-
- /* We're scheduling only to one core atm, so don't divide for now */
- /* status->busy_time /= NUM_JOB_SLOTS; */
+ status->busy_time = ktime_to_ns(pfdev->devfreq.busy_time);
panfrost_devfreq_reset(pfdev);
@@ -119,7 +64,7 @@ static int panfrost_devfreq_get_cur_freq(struct device *dev, unsigned long *freq
{
struct panfrost_device *pfdev = platform_get_drvdata(to_platform_device(dev));
- *freq = pfdev->devfreq.cur_freq;
+ *freq = clk_get_rate(pfdev->clock);
return 0;
}
@@ -135,6 +80,7 @@ int panfrost_devfreq_init(struct panfrost_device *pfdev)
{
int ret;
struct dev_pm_opp *opp;
+ unsigned long cur_freq;
ret = dev_pm_opp_of_add_table(&pfdev->pdev->dev);
if (ret == -ENODEV) /* Optional, continue without devfreq */
@@ -144,13 +90,13 @@ int panfrost_devfreq_init(struct panfrost_device *pfdev)
panfrost_devfreq_reset(pfdev);
- pfdev->devfreq.cur_freq = clk_get_rate(pfdev->clock);
+ cur_freq = clk_get_rate(pfdev->clock);
- opp = devfreq_recommended_opp(&pfdev->pdev->dev, &pfdev->devfreq.cur_freq, 0);
+ opp = devfreq_recommended_opp(&pfdev->pdev->dev, &cur_freq, 0);
if (IS_ERR(opp))
return PTR_ERR(opp);
- panfrost_devfreq_profile.initial_freq = pfdev->devfreq.cur_freq;
+ panfrost_devfreq_profile.initial_freq = cur_freq;
dev_pm_opp_put(opp);
pfdev->devfreq.devfreq = devm_devfreq_add_device(&pfdev->pdev->dev,
@@ -174,14 +120,10 @@ void panfrost_devfreq_fini(struct panfrost_device *pfdev)
void panfrost_devfreq_resume(struct panfrost_device *pfdev)
{
- int i;
-
if (!pfdev->devfreq.devfreq)
return;
panfrost_devfreq_reset(pfdev);
- for (i = 0; i < NUM_JOB_SLOTS; i++)
- pfdev->devfreq.slot[i].busy = false;
devfreq_resume_device(pfdev->devfreq.devfreq);
}
@@ -194,9 +136,8 @@ void panfrost_devfreq_suspend(struct panfrost_device *pfdev)
devfreq_suspend_device(pfdev->devfreq.devfreq);
}
-static void panfrost_devfreq_update_utilization(struct panfrost_device *pfdev, int slot)
+static void panfrost_devfreq_update_utilization(struct panfrost_device *pfdev)
{
- struct panfrost_devfreq_slot *devfreq_slot = &pfdev->devfreq.slot[slot];
ktime_t now;
ktime_t last;
@@ -204,22 +145,27 @@ static void panfrost_devfreq_update_utilization(struct panfrost_device *pfdev, i
return;
now = ktime_get();
- last = pfdev->devfreq.slot[slot].time_last_update;
+ last = pfdev->devfreq.time_last_update;
- /* If we last recorded a transition to busy, we have been idle since */
- if (devfreq_slot->busy)
- pfdev->devfreq.slot[slot].busy_time += ktime_sub(now, last);
+ if (atomic_read(&pfdev->devfreq.busy_count) > 0)
+ pfdev->devfreq.busy_time += ktime_sub(now, last);
else
- pfdev->devfreq.slot[slot].idle_time += ktime_sub(now, last);
+ pfdev->devfreq.idle_time += ktime_sub(now, last);
- pfdev->devfreq.slot[slot].time_last_update = now;
+ pfdev->devfreq.time_last_update = now;
+}
+
+void panfrost_devfreq_record_busy(struct panfrost_device *pfdev)
+{
+ panfrost_devfreq_update_utilization(pfdev);
+ atomic_inc(&pfdev->devfreq.busy_count);
}
-/* The job scheduler is expected to call this at every transition busy <-> idle */
-void panfrost_devfreq_record_transition(struct panfrost_device *pfdev, int slot)
+void panfrost_devfreq_record_idle(struct panfrost_device *pfdev)
{
- struct panfrost_devfreq_slot *devfreq_slot = &pfdev->devfreq.slot[slot];
+ int count;
- panfrost_devfreq_update_utilization(pfdev, slot);
- devfreq_slot->busy = !devfreq_slot->busy;
+ panfrost_devfreq_update_utilization(pfdev);
+ count = atomic_dec_if_positive(&pfdev->devfreq.busy_count);
+ WARN_ON(count < 0);
}
diff --git a/drivers/gpu/drm/panfrost/panfrost_devfreq.h b/drivers/gpu/drm/panfrost/panfrost_devfreq.h
index e3bc63e82843..0611beffc8d0 100644
--- a/drivers/gpu/drm/panfrost/panfrost_devfreq.h
+++ b/drivers/gpu/drm/panfrost/panfrost_devfreq.h
@@ -10,6 +10,7 @@ void panfrost_devfreq_fini(struct panfrost_device *pfdev);
void panfrost_devfreq_resume(struct panfrost_device *pfdev);
void panfrost_devfreq_suspend(struct panfrost_device *pfdev);
-void panfrost_devfreq_record_transition(struct panfrost_device *pfdev, int slot);
+void panfrost_devfreq_record_busy(struct panfrost_device *pfdev);
+void panfrost_devfreq_record_idle(struct panfrost_device *pfdev);
#endif /* __PANFROST_DEVFREQ_H__ */
diff --git a/drivers/gpu/drm/panfrost/panfrost_device.h b/drivers/gpu/drm/panfrost/panfrost_device.h
index 9c39b9794811..06713811b92c 100644
--- a/drivers/gpu/drm/panfrost/panfrost_device.h
+++ b/drivers/gpu/drm/panfrost/panfrost_device.h
@@ -51,13 +51,6 @@ struct panfrost_features {
unsigned long hw_issues[64 / BITS_PER_LONG];
};
-struct panfrost_devfreq_slot {
- ktime_t busy_time;
- ktime_t idle_time;
- ktime_t time_last_update;
- bool busy;
-};
-
struct panfrost_device {
struct device *dev;
struct drm_device *ddev;
@@ -93,9 +86,10 @@ struct panfrost_device {
struct {
struct devfreq *devfreq;
struct thermal_cooling_device *cooling;
- unsigned long cur_freq;
- unsigned long cur_volt;
- struct panfrost_devfreq_slot slot[NUM_JOB_SLOTS];
+ ktime_t busy_time;
+ ktime_t idle_time;
+ ktime_t time_last_update;
+ atomic_t busy_count;
} devfreq;
};
diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c
index f21bc8a7ee3a..9458dc6c750c 100644
--- a/drivers/gpu/drm/panfrost/panfrost_drv.c
+++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
@@ -470,7 +470,7 @@ static const struct drm_ioctl_desc panfrost_drm_driver_ioctls[] = {
PANFROST_IOCTL(MADVISE, madvise, DRM_RENDER_ALLOW),
};
-DEFINE_DRM_GEM_SHMEM_FOPS(panfrost_drm_driver_fops);
+DEFINE_DRM_GEM_FOPS(panfrost_drm_driver_fops);
/*
* Panfrost driver version:
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.c b/drivers/gpu/drm/panfrost/panfrost_gem.c
index acb07fe06580..deca0c30bbd4 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gem.c
+++ b/drivers/gpu/drm/panfrost/panfrost_gem.c
@@ -112,7 +112,7 @@ static const struct drm_gem_object_funcs panfrost_gem_funcs = {
.get_sg_table = drm_gem_shmem_get_sg_table,
.vmap = drm_gem_shmem_vmap,
.vunmap = drm_gem_shmem_vunmap,
- .vm_ops = &drm_gem_shmem_vm_ops,
+ .mmap = drm_gem_shmem_mmap,
};
/**
diff --git a/drivers/gpu/drm/panfrost/panfrost_issues.h b/drivers/gpu/drm/panfrost/panfrost_issues.h
index cec6dcdadb5c..8e59d765bf19 100644
--- a/drivers/gpu/drm/panfrost/panfrost_issues.h
+++ b/drivers/gpu/drm/panfrost/panfrost_issues.h
@@ -13,37 +13,118 @@
* to care about.
*/
enum panfrost_hw_issue {
+ /* Need way to guarantee that all previously-translated memory accesses
+ * are commited */
HW_ISSUE_6367,
+
+ /* On job complete with non-done the cache is not flushed */
HW_ISSUE_6787,
+
+ /* Write of PRFCNT_CONFIG_MODE_MANUAL to PRFCNT_CONFIG causes a
+ * instrumentation dump if PRFCNT_TILER_EN is enabled */
HW_ISSUE_8186,
+
+ /* TIB: Reports faults from a vtile which has not yet been allocated */
HW_ISSUE_8245,
+
+ /* uTLB deadlock could occur when writing to an invalid page at the
+ * same time as access to a valid page in the same uTLB cache line ( ==
+ * 4 PTEs == 16K block of mapping) */
HW_ISSUE_8316,
+
+ /* HT: TERMINATE for RUN command ignored if previous LOAD_DESCRIPTOR is
+ * still executing */
HW_ISSUE_8394,
+
+ /* CSE: Sends a TERMINATED response for a task that should not be
+ * terminated */
HW_ISSUE_8401,
+
+ /* Repeatedly Soft-stopping a job chain consisting of (Vertex Shader,
+ * Cache Flush, Tiler) jobs causes DATA_INVALID_FAULT on tiler job. */
HW_ISSUE_8408,
+
+ /* Disable the Pause Buffer in the LS pipe. */
HW_ISSUE_8443,
+
+ /* Change in RMUs in use causes problems related with the core's SDC */
HW_ISSUE_8987,
+
+ /* Compute endpoint has a 4-deep queue of tasks, meaning a soft stop
+ * won't complete until all 4 tasks have completed */
HW_ISSUE_9435,
+
+ /* HT: Tiler returns TERMINATED for non-terminated command */
HW_ISSUE_9510,
+
+ /* Occasionally the GPU will issue multiple page faults for the same
+ * address before the MMU page table has been read by the GPU */
HW_ISSUE_9630,
+
+ /* RA DCD load request to SDC returns invalid load ignore causing
+ * colour buffer mismatch */
HW_ISSUE_10327,
+
+ /* MMU TLB invalidation hazards */
HW_ISSUE_10649,
+
+ /* Missing cache flush in multi core-group configuration */
HW_ISSUE_10676,
+
+ /* Chicken bit on T72X for a hardware workaround in compiler */
HW_ISSUE_10797,
+
+ /* Soft-stopping fragment jobs might fail with TILE_RANGE_FAULT */
HW_ISSUE_10817,
+
+ /* Intermittent missing interrupt on job completion */
HW_ISSUE_10883,
+
+ /* Soft-stopping fragment jobs might fail with TILE_RANGE_ERROR
+ * (similar to issue 10817) and can use #10817 workaround */
HW_ISSUE_10959,
+
+ /* Soft-stopped fragment shader job can restart with out-of-bound
+ * restart index */
HW_ISSUE_10969,
+
+ /* Race condition can cause tile list corruption */
HW_ISSUE_11020,
+
+ /* Write buffer can cause tile list corruption */
HW_ISSUE_11024,
+
+ /* Pause buffer can cause a fragment job hang */
HW_ISSUE_11035,
+
+ /* Dynamic Core Scaling not supported due to errata */
HW_ISSUE_11056,
+
+ /* Clear encoder state for a hard stopped fragment job which is AFBC
+ * encoded by soft resetting the GPU. Only for T76X r0p0, r0p1 and
+ * r0p1_50rel0 */
HW_ISSUE_T76X_3542,
+
+ /* Keep tiler module clock on to prevent GPU stall */
HW_ISSUE_T76X_3953,
+
+ /* Must ensure L2 is not transitioning when we reset. Workaround with a
+ * busy wait until L2 completes transition; ensure there is a maximum
+ * loop count as she may never complete her transition. (On chips
+ * without this errata, it's totally okay if L2 transitions.) */
HW_ISSUE_TMIX_8463,
+
+ /* Don't set SC_LS_ATTR_CHECK_DISABLE/SC_LS_ALLOW_ATTR_TYPES */
GPUCORE_1619,
+
+ /* When a hard-stop follows close after a soft-stop, the completion
+ * code for the terminated job may be incorrectly set to STOPPED */
HW_ISSUE_TMIX_8438,
+
+ /* "Protected mode" is buggy on Mali-G31 some Bifrost chips, so the
+ * kernel must fiddle with L2 caches to prevent data leakage */
HW_ISSUE_TGOX_R1_1234,
+
HW_ISSUE_END
};
diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c
index 21f34d44aac2..d411eb6c8eb9 100644
--- a/drivers/gpu/drm/panfrost/panfrost_job.c
+++ b/drivers/gpu/drm/panfrost/panfrost_job.c
@@ -155,8 +155,7 @@ static void panfrost_job_hw_submit(struct panfrost_job *job, int js)
}
cfg = panfrost_mmu_as_get(pfdev, &job->file_priv->mmu);
-
- panfrost_devfreq_record_transition(pfdev, js);
+ panfrost_devfreq_record_busy(pfdev);
job_write(pfdev, JS_HEAD_NEXT_LO(js), jc_head & 0xFFFFFFFF);
job_write(pfdev, JS_HEAD_NEXT_HI(js), jc_head >> 32);
@@ -404,9 +403,7 @@ static void panfrost_job_timedout(struct drm_sched_job *sched_job)
}
spin_unlock_irqrestore(&pfdev->js->job_lock, flags);
- /* panfrost_core_dump(pfdev); */
-
- panfrost_devfreq_record_transition(pfdev, js);
+ panfrost_devfreq_record_idle(pfdev);
panfrost_device_reset(pfdev);
for (i = 0; i < NUM_JOB_SLOTS; i++)
@@ -469,7 +466,7 @@ static irqreturn_t panfrost_job_irq_handler(int irq, void *data)
pfdev->jobs[j] = NULL;
panfrost_mmu_as_put(pfdev, &job->file_priv->mmu);
- panfrost_devfreq_record_transition(pfdev, j);
+ panfrost_devfreq_record_idle(pfdev);
dma_fence_signal_locked(job->done_fence);
pm_runtime_put_autosuspend(pfdev->dev);
@@ -570,14 +567,14 @@ int panfrost_job_is_idle(struct panfrost_device *pfdev)
struct panfrost_job_slot *js = pfdev->js;
int i;
+ /* Check whether the hardware is idle */
+ if (atomic_read(&pfdev->devfreq.busy_count))
+ return false;
+
for (i = 0; i < NUM_JOB_SLOTS; i++) {
/* If there are any jobs in the HW queue, we're not idle */
if (atomic_read(&js->queue[i].sched.hw_rq_count))
return false;
-
- /* Check whether the hardware is idle */
- if (pfdev->devfreq.slot[i].busy)
- return false;
}
return true;
diff --git a/drivers/gpu/drm/pl111/pl111_display.c b/drivers/gpu/drm/pl111/pl111_display.c
index 024771a4083e..703ddc803c55 100644
--- a/drivers/gpu/drm/pl111/pl111_display.c
+++ b/drivers/gpu/drm/pl111/pl111_display.c
@@ -48,10 +48,10 @@ irqreturn_t pl111_irq(int irq, void *data)
}
static enum drm_mode_status
-pl111_mode_valid(struct drm_crtc *crtc,
+pl111_mode_valid(struct drm_simple_display_pipe *pipe,
const struct drm_display_mode *mode)
{
- struct drm_device *drm = crtc->dev;
+ struct drm_device *drm = pipe->crtc.dev;
struct pl111_drm_dev_private *priv = drm->dev_private;
u32 cpp = priv->variant->fb_bpp / 8;
u64 bw;
diff --git a/drivers/gpu/drm/pl111/pl111_drv.c b/drivers/gpu/drm/pl111/pl111_drv.c
index 276b53473a84..63dfcda04147 100644
--- a/drivers/gpu/drm/pl111/pl111_drv.c
+++ b/drivers/gpu/drm/pl111/pl111_drv.c
@@ -150,8 +150,8 @@ static int pl111_modeset_init(struct drm_device *dev)
return -EPROBE_DEFER;
if (panel) {
- bridge = drm_panel_bridge_add(panel,
- DRM_MODE_CONNECTOR_Unknown);
+ bridge = drm_panel_bridge_add_typed(panel,
+ DRM_MODE_CONNECTOR_Unknown);
if (IS_ERR(bridge)) {
ret = PTR_ERR(bridge);
goto out_config;
diff --git a/drivers/gpu/drm/qxl/Kconfig b/drivers/gpu/drm/qxl/Kconfig
index d0d691b31f4a..ca3f51c2a8fe 100644
--- a/drivers/gpu/drm/qxl/Kconfig
+++ b/drivers/gpu/drm/qxl/Kconfig
@@ -4,6 +4,7 @@ config DRM_QXL
depends on DRM && PCI && MMU
select DRM_KMS_HELPER
select DRM_TTM
+ select DRM_TTM_HELPER
select CRC32
help
QXL virtual GPU for Spice virtualization desktop integration.
diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c
index 265bfe9f8016..1d601f57a6ba 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.c
+++ b/drivers/gpu/drm/qxl/qxl_drv.c
@@ -88,7 +88,7 @@ qxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
goto free_dev;
- ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, 0, "qxl");
+ ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, "qxl");
if (ret)
goto disable_pci;
@@ -150,15 +150,7 @@ qxl_pci_remove(struct pci_dev *pdev)
drm_dev_put(dev);
}
-static const struct file_operations qxl_fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .release = drm_release,
- .unlocked_ioctl = drm_ioctl,
- .poll = drm_poll,
- .read = drm_read,
- .mmap = qxl_mmap,
-};
+DEFINE_DRM_GEM_FOPS(qxl_fops);
static int qxl_drm_freeze(struct drm_device *dev)
{
@@ -276,16 +268,8 @@ static struct drm_driver qxl_driver = {
#endif
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_pin = qxl_gem_prime_pin,
- .gem_prime_unpin = qxl_gem_prime_unpin,
- .gem_prime_get_sg_table = qxl_gem_prime_get_sg_table,
.gem_prime_import_sg_table = qxl_gem_prime_import_sg_table,
- .gem_prime_vmap = qxl_gem_prime_vmap,
- .gem_prime_vunmap = qxl_gem_prime_vunmap,
.gem_prime_mmap = qxl_gem_prime_mmap,
- .gem_free_object_unlocked = qxl_gem_object_free,
- .gem_open_object = qxl_gem_object_open,
- .gem_close_object = qxl_gem_object_close,
.fops = &qxl_fops,
.ioctls = qxl_ioctls,
.irq_handler = qxl_irq_handler,
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index 9e034c5fa87d..27e45a2d6b52 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -38,6 +38,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_encoder.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem_ttm_helper.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_gem.h>
#include <drm/qxl_drm.h>
@@ -354,7 +355,8 @@ int qxl_mode_dumb_mmap(struct drm_file *filp,
/* qxl ttm */
int qxl_ttm_init(struct qxl_device *qdev);
void qxl_ttm_fini(struct qxl_device *qdev);
-int qxl_mmap(struct file *filp, struct vm_area_struct *vma);
+int qxl_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
+ struct ttm_mem_reg *mem);
/* qxl image */
diff --git a/drivers/gpu/drm/qxl/qxl_object.c b/drivers/gpu/drm/qxl/qxl_object.c
index 548dfe6f3b26..ab72dc3476e9 100644
--- a/drivers/gpu/drm/qxl/qxl_object.c
+++ b/drivers/gpu/drm/qxl/qxl_object.c
@@ -54,9 +54,14 @@ bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo)
void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain, bool pinned)
{
u32 c = 0;
- u32 pflag = pinned ? TTM_PL_FLAG_NO_EVICT : 0;
+ u32 pflag = 0;
unsigned int i;
+ if (pinned)
+ pflag |= TTM_PL_FLAG_NO_EVICT;
+ if (qbo->tbo.base.size <= PAGE_SIZE)
+ pflag |= TTM_PL_FLAG_TOPDOWN;
+
qbo->placement.placement = qbo->placements;
qbo->placement.busy_placement = qbo->placements;
if (domain == QXL_GEM_DOMAIN_VRAM)
@@ -77,6 +82,19 @@ void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain, bool pinned)
}
}
+static const struct drm_gem_object_funcs qxl_object_funcs = {
+ .free = qxl_gem_object_free,
+ .open = qxl_gem_object_open,
+ .close = qxl_gem_object_close,
+ .pin = qxl_gem_prime_pin,
+ .unpin = qxl_gem_prime_unpin,
+ .get_sg_table = qxl_gem_prime_get_sg_table,
+ .vmap = qxl_gem_prime_vmap,
+ .vunmap = qxl_gem_prime_vunmap,
+ .mmap = drm_gem_ttm_mmap,
+ .print_info = drm_gem_ttm_print_info,
+};
+
int qxl_bo_create(struct qxl_device *qdev,
unsigned long size, bool kernel, bool pinned, u32 domain,
struct qxl_surface *surf,
@@ -100,6 +118,7 @@ int qxl_bo_create(struct qxl_device *qdev,
kfree(bo);
return r;
}
+ bo->tbo.base.funcs = &qxl_object_funcs;
bo->type = domain;
bo->pin_count = pinned ? 1 : 0;
bo->surface_id = 0;
@@ -148,7 +167,6 @@ int qxl_bo_kmap(struct qxl_bo *bo, void **ptr)
void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev,
struct qxl_bo *bo, int page_offset)
{
- struct ttm_mem_type_manager *man = &bo->tbo.bdev->man[bo->tbo.mem.mem_type];
void *rptr;
int ret;
struct io_mapping *map;
@@ -160,9 +178,7 @@ void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev,
else
goto fallback;
- (void) ttm_mem_io_lock(man, false);
- ret = ttm_mem_io_reserve(bo->tbo.bdev, &bo->tbo.mem);
- ttm_mem_io_unlock(man);
+ ret = qxl_ttm_io_mem_reserve(bo->tbo.bdev, &bo->tbo.mem);
return io_mapping_map_atomic_wc(map, bo->tbo.mem.bus.offset + page_offset);
fallback:
@@ -193,17 +209,11 @@ void qxl_bo_kunmap(struct qxl_bo *bo)
void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev,
struct qxl_bo *bo, void *pmap)
{
- struct ttm_mem_type_manager *man = &bo->tbo.bdev->man[bo->tbo.mem.mem_type];
-
if ((bo->tbo.mem.mem_type != TTM_PL_VRAM) &&
(bo->tbo.mem.mem_type != TTM_PL_PRIV))
goto fallback;
io_mapping_unmap_atomic(pmap);
-
- (void) ttm_mem_io_lock(man, false);
- ttm_mem_io_free(bo->tbo.bdev, &bo->tbo.mem);
- ttm_mem_io_unlock(man);
return;
fallback:
qxl_bo_kunmap(bo);
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
index 312216caeea2..2feca734c7b1 100644
--- a/drivers/gpu/drm/qxl/qxl_release.c
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -260,7 +260,7 @@ int qxl_release_reserve_list(struct qxl_release *release, bool no_intr)
return 0;
ret = ttm_eu_reserve_buffers(&release->ticket, &release->bos,
- !no_intr, NULL, true);
+ !no_intr, NULL);
if (ret)
return ret;
@@ -429,7 +429,6 @@ void qxl_release_unmap(struct qxl_device *qdev,
void qxl_release_fence_buffer_objects(struct qxl_release *release)
{
struct ttm_buffer_object *bo;
- struct ttm_bo_global *glob;
struct ttm_bo_device *bdev;
struct ttm_validate_buffer *entry;
struct qxl_device *qdev;
@@ -451,18 +450,16 @@ void qxl_release_fence_buffer_objects(struct qxl_release *release)
release->id | 0xf0000000, release->base.seqno);
trace_dma_fence_emit(&release->base);
- glob = bdev->glob;
-
- spin_lock(&glob->lru_lock);
+ spin_lock(&ttm_bo_glob.lru_lock);
list_for_each_entry(entry, &release->bos, head) {
bo = entry->bo;
dma_resv_add_shared_fence(bo->base.resv, &release->base);
- ttm_bo_add_to_lru(bo);
+ ttm_bo_move_to_lru_tail(bo, NULL);
dma_resv_unlock(bo->base.resv);
}
- spin_unlock(&glob->lru_lock);
+ spin_unlock(&ttm_bo_glob.lru_lock);
ww_acquire_fini(&release->ticket);
}
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
index 9b24514c75aa..16a5e903533d 100644
--- a/drivers/gpu/drm/qxl/qxl_ttm.c
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -48,47 +48,6 @@ static struct qxl_device *qxl_get_qdev(struct ttm_bo_device *bdev)
return qdev;
}
-static struct vm_operations_struct qxl_ttm_vm_ops;
-static const struct vm_operations_struct *ttm_vm_ops;
-
-static vm_fault_t qxl_ttm_fault(struct vm_fault *vmf)
-{
- struct ttm_buffer_object *bo;
- vm_fault_t ret;
-
- bo = (struct ttm_buffer_object *)vmf->vma->vm_private_data;
- if (bo == NULL)
- return VM_FAULT_NOPAGE;
- ret = ttm_vm_ops->fault(vmf);
- return ret;
-}
-
-int qxl_mmap(struct file *filp, struct vm_area_struct *vma)
-{
- int r;
- struct drm_file *file_priv = filp->private_data;
- struct qxl_device *qdev = file_priv->minor->dev->dev_private;
-
- if (qdev == NULL) {
- DRM_ERROR(
- "filp->private_data->minor->dev->dev_private == NULL\n");
- return -EINVAL;
- }
- DRM_DEBUG_DRIVER("filp->private_data = 0x%p, vma->vm_pgoff = %lx\n",
- filp->private_data, vma->vm_pgoff);
-
- r = ttm_bo_mmap(filp, vma, &qdev->mman.bdev);
- if (unlikely(r != 0))
- return r;
- if (unlikely(ttm_vm_ops == NULL)) {
- ttm_vm_ops = vma->vm_ops;
- qxl_ttm_vm_ops = *ttm_vm_ops;
- qxl_ttm_vm_ops.fault = &qxl_ttm_fault;
- }
- vma->vm_ops = &qxl_ttm_vm_ops;
- return 0;
-}
-
static int qxl_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
{
return 0;
@@ -151,16 +110,8 @@ static void qxl_evict_flags(struct ttm_buffer_object *bo,
*placement = qbo->placement;
}
-static int qxl_verify_access(struct ttm_buffer_object *bo, struct file *filp)
-{
- struct qxl_bo *qbo = to_qxl_bo(bo);
-
- return drm_vma_node_verify_access(&qbo->tbo.base.vma_node,
- filp->private_data);
-}
-
-static int qxl_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
- struct ttm_mem_reg *mem)
+int qxl_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
+ struct ttm_mem_reg *mem)
{
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
struct qxl_device *qdev = qxl_get_qdev(bdev);
@@ -310,7 +261,6 @@ static struct ttm_bo_driver qxl_bo_driver = {
.eviction_valuable = ttm_bo_eviction_valuable,
.evict_flags = &qxl_evict_flags,
.move = &qxl_bo_move,
- .verify_access = &qxl_verify_access,
.io_mem_reserve = &qxl_ttm_io_mem_reserve,
.io_mem_free = &qxl_ttm_io_mem_free,
.move_notify = &qxl_bo_move_notify,
@@ -325,6 +275,7 @@ int qxl_ttm_init(struct qxl_device *qdev)
r = ttm_bo_device_init(&qdev->mman.bdev,
&qxl_bo_driver,
qdev->ddev.anon_inode->i_mapping,
+ qdev->ddev.vma_offset_manager,
false);
if (r) {
DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
@@ -368,14 +319,11 @@ static int qxl_mm_dump_table(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *)m->private;
struct drm_mm *mm = (struct drm_mm *)node->info_ent->data;
- struct drm_device *dev = node->minor->dev;
- struct qxl_device *rdev = dev->dev_private;
- struct ttm_bo_global *glob = rdev->mman.bdev.glob;
struct drm_printer p = drm_seq_file_printer(m);
- spin_lock(&glob->lru_lock);
+ spin_lock(&ttm_bo_glob.lru_lock);
drm_mm_print(mm, &p);
- spin_unlock(&glob->lru_lock);
+ spin_unlock(&ttm_bo_glob.lru_lock);
return 0;
}
#endif
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index 62eab82a64f9..40a7e702c2a9 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -221,9 +221,7 @@ int ci_get_temp(struct radeon_device *rdev)
else
actual_temp = temp & 0x1ff;
- actual_temp = actual_temp * 1000;
-
- return actual_temp;
+ return actual_temp * 1000;
}
/* get temperature in millidegrees */
@@ -239,9 +237,7 @@ int kv_get_temp(struct radeon_device *rdev)
else
actual_temp = 0;
- actual_temp = actual_temp * 1000;
-
- return actual_temp;
+ return actual_temp * 1000;
}
/*
@@ -6969,8 +6965,8 @@ static int cik_irq_init(struct radeon_device *rdev)
}
/* setup interrupt control */
- /* XXX this should actually be a bus address, not an MC address. same on older asics */
- WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8);
+ /* set dummy read address to dummy page address */
+ WREG32(INTERRUPT_CNTL2, rdev->dummy_page.addr >> 8);
interrupt_cntl = RREG32(INTERRUPT_CNTL);
/* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
* IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
@@ -9504,7 +9500,6 @@ static void cik_pcie_gen3_enable(struct radeon_device *rdev)
{
struct pci_dev *root = rdev->pdev->bus->self;
enum pci_bus_speed speed_cap;
- int bridge_pos, gpu_pos;
u32 speed_cntl, current_data_rate;
int i;
u16 tmp16;
@@ -9546,12 +9541,7 @@ static void cik_pcie_gen3_enable(struct radeon_device *rdev)
DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
}
- bridge_pos = pci_pcie_cap(root);
- if (!bridge_pos)
- return;
-
- gpu_pos = pci_pcie_cap(rdev->pdev);
- if (!gpu_pos)
+ if (!pci_is_pcie(root) || !pci_is_pcie(rdev->pdev))
return;
if (speed_cap == PCIE_SPEED_8_0GT) {
@@ -9561,14 +9551,17 @@ static void cik_pcie_gen3_enable(struct radeon_device *rdev)
u16 bridge_cfg2, gpu_cfg2;
u32 max_lw, current_lw, tmp;
- pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
- pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
+ pcie_capability_read_word(root, PCI_EXP_LNKCTL,
+ &bridge_cfg);
+ pcie_capability_read_word(rdev->pdev, PCI_EXP_LNKCTL,
+ &gpu_cfg);
tmp16 = bridge_cfg | PCI_EXP_LNKCTL_HAWD;
- pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
+ pcie_capability_write_word(root, PCI_EXP_LNKCTL, tmp16);
tmp16 = gpu_cfg | PCI_EXP_LNKCTL_HAWD;
- pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
+ pcie_capability_write_word(rdev->pdev, PCI_EXP_LNKCTL,
+ tmp16);
tmp = RREG32_PCIE_PORT(PCIE_LC_STATUS1);
max_lw = (tmp & LC_DETECTED_LINK_WIDTH_MASK) >> LC_DETECTED_LINK_WIDTH_SHIFT;
@@ -9586,15 +9579,23 @@ static void cik_pcie_gen3_enable(struct radeon_device *rdev)
for (i = 0; i < 10; i++) {
/* check status */
- pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_DEVSTA, &tmp16);
+ pcie_capability_read_word(rdev->pdev,
+ PCI_EXP_DEVSTA,
+ &tmp16);
if (tmp16 & PCI_EXP_DEVSTA_TRPND)
break;
- pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
- pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
+ pcie_capability_read_word(root, PCI_EXP_LNKCTL,
+ &bridge_cfg);
+ pcie_capability_read_word(rdev->pdev,
+ PCI_EXP_LNKCTL,
+ &gpu_cfg);
- pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &bridge_cfg2);
- pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &gpu_cfg2);
+ pcie_capability_read_word(root, PCI_EXP_LNKCTL2,
+ &bridge_cfg2);
+ pcie_capability_read_word(rdev->pdev,
+ PCI_EXP_LNKCTL2,
+ &gpu_cfg2);
tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
tmp |= LC_SET_QUIESCE;
@@ -9607,26 +9608,45 @@ static void cik_pcie_gen3_enable(struct radeon_device *rdev)
msleep(100);
/* linkctl */
- pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &tmp16);
+ pcie_capability_read_word(root, PCI_EXP_LNKCTL,
+ &tmp16);
tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
tmp16 |= (bridge_cfg & PCI_EXP_LNKCTL_HAWD);
- pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
+ pcie_capability_write_word(root, PCI_EXP_LNKCTL,
+ tmp16);
- pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &tmp16);
+ pcie_capability_read_word(rdev->pdev,
+ PCI_EXP_LNKCTL,
+ &tmp16);
tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
tmp16 |= (gpu_cfg & PCI_EXP_LNKCTL_HAWD);
- pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
+ pcie_capability_write_word(rdev->pdev,
+ PCI_EXP_LNKCTL,
+ tmp16);
/* linkctl2 */
- pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &tmp16);
- tmp16 &= ~((1 << 4) | (7 << 9));
- tmp16 |= (bridge_cfg2 & ((1 << 4) | (7 << 9)));
- pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, tmp16);
-
- pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
- tmp16 &= ~((1 << 4) | (7 << 9));
- tmp16 |= (gpu_cfg2 & ((1 << 4) | (7 << 9)));
- pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
+ pcie_capability_read_word(root, PCI_EXP_LNKCTL2,
+ &tmp16);
+ tmp16 &= ~(PCI_EXP_LNKCTL2_ENTER_COMP |
+ PCI_EXP_LNKCTL2_TX_MARGIN);
+ tmp16 |= (bridge_cfg2 &
+ (PCI_EXP_LNKCTL2_ENTER_COMP |
+ PCI_EXP_LNKCTL2_TX_MARGIN));
+ pcie_capability_write_word(root,
+ PCI_EXP_LNKCTL2,
+ tmp16);
+
+ pcie_capability_read_word(rdev->pdev,
+ PCI_EXP_LNKCTL2,
+ &tmp16);
+ tmp16 &= ~(PCI_EXP_LNKCTL2_ENTER_COMP |
+ PCI_EXP_LNKCTL2_TX_MARGIN);
+ tmp16 |= (gpu_cfg2 &
+ (PCI_EXP_LNKCTL2_ENTER_COMP |
+ PCI_EXP_LNKCTL2_TX_MARGIN));
+ pcie_capability_write_word(rdev->pdev,
+ PCI_EXP_LNKCTL2,
+ tmp16);
tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
tmp &= ~LC_SET_QUIESCE;
@@ -9640,15 +9660,15 @@ static void cik_pcie_gen3_enable(struct radeon_device *rdev)
speed_cntl &= ~LC_FORCE_DIS_SW_SPEED_CHANGE;
WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
- pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
- tmp16 &= ~0xf;
+ pcie_capability_read_word(rdev->pdev, PCI_EXP_LNKCTL2, &tmp16);
+ tmp16 &= ~PCI_EXP_LNKCTL2_TLS;
if (speed_cap == PCIE_SPEED_8_0GT)
- tmp16 |= 3; /* gen3 */
+ tmp16 |= PCI_EXP_LNKCTL2_TLS_8_0GT; /* gen3 */
else if (speed_cap == PCIE_SPEED_5_0GT)
- tmp16 |= 2; /* gen2 */
+ tmp16 |= PCI_EXP_LNKCTL2_TLS_5_0GT; /* gen2 */
else
- tmp16 |= 1; /* gen1 */
- pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
+ tmp16 |= PCI_EXP_LNKCTL2_TLS_2_5GT; /* gen1 */
+ pcie_capability_write_word(rdev->pdev, PCI_EXP_LNKCTL2, tmp16);
speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
speed_cntl |= LC_INITIATE_LINK_SPEED_CHANGE;
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index e937cc01910d..033bc466a862 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -3696,8 +3696,8 @@ int r600_irq_init(struct radeon_device *rdev)
}
/* setup interrupt control */
- /* set dummy read address to ring address */
- WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8);
+ /* set dummy read address to dummy page address */
+ WREG32(INTERRUPT_CNTL2, rdev->dummy_page.addr >> 8);
interrupt_cntl = RREG32(INTERRUPT_CNTL);
/* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
* IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index d59b004f6695..30e32adc1fc6 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -68,6 +68,10 @@
#include <linux/hashtable.h>
#include <linux/dma-fence.h>
+#ifdef CONFIG_MMU_NOTIFIER
+#include <linux/mmu_notifier.h>
+#endif
+
#include <drm/ttm/ttm_bo_api.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
@@ -509,8 +513,9 @@ struct radeon_bo {
struct ttm_bo_kmap_obj dma_buf_vmap;
pid_t pid;
- struct radeon_mn *mn;
- struct list_head mn_list;
+#ifdef CONFIG_MMU_NOTIFIER
+ struct mmu_interval_notifier notifier;
+#endif
};
#define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, tbo.base)
diff --git a/drivers/gpu/drm/radeon/radeon_audio.c b/drivers/gpu/drm/radeon/radeon_audio.c
index b9aea5776d3d..72db2b41e96d 100644
--- a/drivers/gpu/drm/radeon/radeon_audio.c
+++ b/drivers/gpu/drm/radeon/radeon_audio.c
@@ -367,10 +367,10 @@ static void radeon_audio_write_sad_regs(struct drm_encoder *encoder)
return;
sad_count = drm_edid_to_sad(radeon_connector_edid(connector), &sads);
- if (sad_count <= 0) {
+ if (sad_count < 0)
DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
+ if (sad_count <= 0)
return;
- }
BUG_ON(!sads);
if (radeon_encoder->audio && radeon_encoder->audio->write_sad_regs)
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index b684cd719612..c07427d3c199 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -249,11 +249,10 @@ radeon_connector_update_scratch_regs(struct drm_connector *connector, enum drm_c
struct drm_encoder *encoder;
const struct drm_connector_helper_funcs *connector_funcs = connector->helper_private;
bool connected;
- int i;
best_encoder = connector_funcs->best_encoder(connector);
- drm_connector_for_each_possible_encoder(connector, encoder, i) {
+ drm_connector_for_each_possible_encoder(connector, encoder) {
if ((encoder == best_encoder) && (status == connector_status_connected))
connected = true;
else
@@ -269,9 +268,8 @@ radeon_connector_update_scratch_regs(struct drm_connector *connector, enum drm_c
static struct drm_encoder *radeon_find_encoder(struct drm_connector *connector, int encoder_type)
{
struct drm_encoder *encoder;
- int i;
- drm_connector_for_each_possible_encoder(connector, encoder, i) {
+ drm_connector_for_each_possible_encoder(connector, encoder) {
if (encoder->encoder_type == encoder_type)
return encoder;
}
@@ -380,10 +378,9 @@ static int radeon_ddc_get_modes(struct drm_connector *connector)
static struct drm_encoder *radeon_best_single_encoder(struct drm_connector *connector)
{
struct drm_encoder *encoder;
- int i;
/* pick the first one */
- drm_connector_for_each_possible_encoder(connector, encoder, i)
+ drm_connector_for_each_possible_encoder(connector, encoder)
return encoder;
return NULL;
@@ -428,14 +425,13 @@ radeon_connector_analog_encoder_conflict_solve(struct drm_connector *connector,
list_for_each_entry(conflict, &dev->mode_config.connector_list, head) {
struct drm_encoder *enc;
- int i;
if (conflict == connector)
continue;
radeon_conflict = to_radeon_connector(conflict);
- drm_connector_for_each_possible_encoder(conflict, enc, i) {
+ drm_connector_for_each_possible_encoder(conflict, enc) {
/* if the IDs match */
if (enc == encoder) {
if (conflict->status != connector_status_connected)
@@ -1363,9 +1359,7 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
/* find analog encoder */
if (radeon_connector->dac_load_detect) {
- int i;
-
- drm_connector_for_each_possible_encoder(connector, encoder, i) {
+ drm_connector_for_each_possible_encoder(connector, encoder) {
if (encoder->encoder_type != DRM_MODE_ENCODER_DAC &&
encoder->encoder_type != DRM_MODE_ENCODER_TVDAC)
continue;
@@ -1443,9 +1437,8 @@ static struct drm_encoder *radeon_dvi_encoder(struct drm_connector *connector)
{
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct drm_encoder *encoder;
- int i;
- drm_connector_for_each_possible_encoder(connector, encoder, i) {
+ drm_connector_for_each_possible_encoder(connector, encoder) {
if (radeon_connector->use_digital == true) {
if (encoder->encoder_type == DRM_MODE_ENCODER_TMDS)
return encoder;
@@ -1460,7 +1453,7 @@ static struct drm_encoder *radeon_dvi_encoder(struct drm_connector *connector)
/* then check use digitial */
/* pick the first one */
- drm_connector_for_each_possible_encoder(connector, encoder, i)
+ drm_connector_for_each_possible_encoder(connector, encoder)
return encoder;
return NULL;
@@ -1603,9 +1596,8 @@ u16 radeon_connector_encoder_get_dp_bridge_encoder_id(struct drm_connector *conn
{
struct drm_encoder *encoder;
struct radeon_encoder *radeon_encoder;
- int i;
- drm_connector_for_each_possible_encoder(connector, encoder, i) {
+ drm_connector_for_each_possible_encoder(connector, encoder) {
radeon_encoder = to_radeon_encoder(encoder);
switch (radeon_encoder->encoder_id) {
@@ -1624,10 +1616,9 @@ static bool radeon_connector_encoder_is_hbr2(struct drm_connector *connector)
{
struct drm_encoder *encoder;
struct radeon_encoder *radeon_encoder;
- int i;
bool found = false;
- drm_connector_for_each_possible_encoder(connector, encoder, i) {
+ drm_connector_for_each_possible_encoder(connector, encoder) {
radeon_encoder = to_radeon_encoder(encoder);
if (radeon_encoder->caps & ATOM_ENCODER_CAP_RECORD_HBR2)
found = true;
diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c
index 2994f07fbad9..ee28f5b3785e 100644
--- a/drivers/gpu/drm/radeon/radeon_dp_mst.c
+++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c
@@ -233,21 +233,26 @@ drm_encoder *radeon_mst_best_encoder(struct drm_connector *connector)
return &radeon_connector->mst_encoder->base;
}
+static int
+radeon_dp_mst_detect(struct drm_connector *connector,
+ struct drm_modeset_acquire_ctx *ctx,
+ bool force)
+{
+ struct radeon_connector *radeon_connector =
+ to_radeon_connector(connector);
+ struct radeon_connector *master = radeon_connector->mst_port;
+
+ return drm_dp_mst_detect_port(connector, ctx, &master->mst_mgr,
+ radeon_connector->port);
+}
+
static const struct drm_connector_helper_funcs radeon_dp_mst_connector_helper_funcs = {
.get_modes = radeon_dp_mst_get_modes,
.mode_valid = radeon_dp_mst_mode_valid,
.best_encoder = radeon_mst_best_encoder,
+ .detect_ctx = radeon_dp_mst_detect,
};
-static enum drm_connector_status
-radeon_dp_mst_detect(struct drm_connector *connector, bool force)
-{
- struct radeon_connector *radeon_connector = to_radeon_connector(connector);
- struct radeon_connector *master = radeon_connector->mst_port;
-
- return drm_dp_mst_detect_port(connector, &master->mst_mgr, radeon_connector->port);
-}
-
static void
radeon_dp_mst_connector_destroy(struct drm_connector *connector)
{
@@ -262,7 +267,6 @@ radeon_dp_mst_connector_destroy(struct drm_connector *connector)
static const struct drm_connector_funcs radeon_dp_mst_connector_funcs = {
.dpms = drm_helper_connector_dpms,
- .detect = radeon_dp_mst_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = radeon_dp_mst_connector_destroy,
};
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 4528f4dc0b2d..fd74e2611185 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -361,7 +361,7 @@ static int radeon_pci_probe(struct pci_dev *pdev,
return -EPROBE_DEFER;
/* Get rid of things like offb */
- ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, 0, "radeondrmfb");
+ ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, "radeondrmfb");
if (ret)
return ret;
@@ -379,10 +379,6 @@ radeon_pci_remove(struct pci_dev *pdev)
static void
radeon_pci_shutdown(struct pci_dev *pdev)
{
-#ifdef CONFIG_PPC64
- struct drm_device *ddev = pci_get_drvdata(pdev);
-#endif
-
/* if we are running in a VM, make sure the device
* torn down properly on reboot/shutdown
*/
@@ -390,13 +386,14 @@ radeon_pci_shutdown(struct pci_dev *pdev)
radeon_pci_remove(pdev);
#ifdef CONFIG_PPC64
- /* Some adapters need to be suspended before a
+ /*
+ * Some adapters need to be suspended before a
* shutdown occurs in order to prevent an error
* during kexec.
* Make this power specific becauase it breaks
* some non-power boards.
*/
- radeon_suspend_kms(ddev, true, true, false);
+ radeon_suspend_kms(pci_get_drvdata(pdev), true, true, false);
#endif
}
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index b2b076606f54..67298a0739cb 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -566,7 +566,7 @@ static void radeon_gem_va_update_vm(struct radeon_device *rdev,
if (!vm_bos)
return;
- r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL, true);
+ r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
if (r)
goto error_free;
diff --git a/drivers/gpu/drm/radeon/radeon_mn.c b/drivers/gpu/drm/radeon/radeon_mn.c
index dbab9a3a969b..f93829f08a4d 100644
--- a/drivers/gpu/drm/radeon/radeon_mn.c
+++ b/drivers/gpu/drm/radeon/radeon_mn.c
@@ -36,131 +36,51 @@
#include "radeon.h"
-struct radeon_mn {
- struct mmu_notifier mn;
-
- /* objects protected by lock */
- struct mutex lock;
- struct rb_root_cached objects;
-};
-
-struct radeon_mn_node {
- struct interval_tree_node it;
- struct list_head bos;
-};
-
/**
- * radeon_mn_invalidate_range_start - callback to notify about mm change
+ * radeon_mn_invalidate - callback to notify about mm change
*
* @mn: our notifier
- * @mn: the mm this callback is about
- * @start: start of updated range
- * @end: end of updated range
+ * @range: the VMA under invalidation
*
* We block for all BOs between start and end to be idle and
* unmap them by move them into system domain again.
*/
-static int radeon_mn_invalidate_range_start(struct mmu_notifier *mn,
- const struct mmu_notifier_range *range)
+static bool radeon_mn_invalidate(struct mmu_interval_notifier *mn,
+ const struct mmu_notifier_range *range,
+ unsigned long cur_seq)
{
- struct radeon_mn *rmn = container_of(mn, struct radeon_mn, mn);
+ struct radeon_bo *bo = container_of(mn, struct radeon_bo, notifier);
struct ttm_operation_ctx ctx = { false, false };
- struct interval_tree_node *it;
- unsigned long end;
- int ret = 0;
-
- /* notification is exclusive, but interval is inclusive */
- end = range->end - 1;
-
- /* TODO we should be able to split locking for interval tree and
- * the tear down.
- */
- if (mmu_notifier_range_blockable(range))
- mutex_lock(&rmn->lock);
- else if (!mutex_trylock(&rmn->lock))
- return -EAGAIN;
-
- it = interval_tree_iter_first(&rmn->objects, range->start, end);
- while (it) {
- struct radeon_mn_node *node;
- struct radeon_bo *bo;
- long r;
-
- if (!mmu_notifier_range_blockable(range)) {
- ret = -EAGAIN;
- goto out_unlock;
- }
-
- node = container_of(it, struct radeon_mn_node, it);
- it = interval_tree_iter_next(it, range->start, end);
+ long r;
- list_for_each_entry(bo, &node->bos, mn_list) {
+ if (!bo->tbo.ttm || bo->tbo.ttm->state != tt_bound)
+ return true;
- if (!bo->tbo.ttm || bo->tbo.ttm->state != tt_bound)
- continue;
+ if (!mmu_notifier_range_blockable(range))
+ return false;
- r = radeon_bo_reserve(bo, true);
- if (r) {
- DRM_ERROR("(%ld) failed to reserve user bo\n", r);
- continue;
- }
-
- r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv,
- true, false, MAX_SCHEDULE_TIMEOUT);
- if (r <= 0)
- DRM_ERROR("(%ld) failed to wait for user bo\n", r);
-
- radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_CPU);
- r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
- if (r)
- DRM_ERROR("(%ld) failed to validate user bo\n", r);
-
- radeon_bo_unreserve(bo);
- }
+ r = radeon_bo_reserve(bo, true);
+ if (r) {
+ DRM_ERROR("(%ld) failed to reserve user bo\n", r);
+ return true;
}
-
-out_unlock:
- mutex_unlock(&rmn->lock);
-
- return ret;
-}
-
-static void radeon_mn_release(struct mmu_notifier *mn, struct mm_struct *mm)
-{
- struct mmu_notifier_range range = {
- .mm = mm,
- .start = 0,
- .end = ULONG_MAX,
- .flags = 0,
- .event = MMU_NOTIFY_UNMAP,
- };
-
- radeon_mn_invalidate_range_start(mn, &range);
-}
-
-static struct mmu_notifier *radeon_mn_alloc_notifier(struct mm_struct *mm)
-{
- struct radeon_mn *rmn;
- rmn = kzalloc(sizeof(*rmn), GFP_KERNEL);
- if (!rmn)
- return ERR_PTR(-ENOMEM);
+ r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, true, false,
+ MAX_SCHEDULE_TIMEOUT);
+ if (r <= 0)
+ DRM_ERROR("(%ld) failed to wait for user bo\n", r);
- mutex_init(&rmn->lock);
- rmn->objects = RB_ROOT_CACHED;
- return &rmn->mn;
-}
+ radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_CPU);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+ if (r)
+ DRM_ERROR("(%ld) failed to validate user bo\n", r);
-static void radeon_mn_free_notifier(struct mmu_notifier *mn)
-{
- kfree(container_of(mn, struct radeon_mn, mn));
+ radeon_bo_unreserve(bo);
+ return true;
}
-static const struct mmu_notifier_ops radeon_mn_ops = {
- .release = radeon_mn_release,
- .invalidate_range_start = radeon_mn_invalidate_range_start,
- .alloc_notifier = radeon_mn_alloc_notifier,
- .free_notifier = radeon_mn_free_notifier,
+static const struct mmu_interval_notifier_ops radeon_mn_ops = {
+ .invalidate = radeon_mn_invalidate,
};
/**
@@ -174,51 +94,20 @@ static const struct mmu_notifier_ops radeon_mn_ops = {
*/
int radeon_mn_register(struct radeon_bo *bo, unsigned long addr)
{
- unsigned long end = addr + radeon_bo_size(bo) - 1;
- struct mmu_notifier *mn;
- struct radeon_mn *rmn;
- struct radeon_mn_node *node = NULL;
- struct list_head bos;
- struct interval_tree_node *it;
-
- mn = mmu_notifier_get(&radeon_mn_ops, current->mm);
- if (IS_ERR(mn))
- return PTR_ERR(mn);
- rmn = container_of(mn, struct radeon_mn, mn);
-
- INIT_LIST_HEAD(&bos);
-
- mutex_lock(&rmn->lock);
-
- while ((it = interval_tree_iter_first(&rmn->objects, addr, end))) {
- kfree(node);
- node = container_of(it, struct radeon_mn_node, it);
- interval_tree_remove(&node->it, &rmn->objects);
- addr = min(it->start, addr);
- end = max(it->last, end);
- list_splice(&node->bos, &bos);
- }
-
- if (!node) {
- node = kmalloc(sizeof(struct radeon_mn_node), GFP_KERNEL);
- if (!node) {
- mutex_unlock(&rmn->lock);
- return -ENOMEM;
- }
- }
-
- bo->mn = rmn;
-
- node->it.start = addr;
- node->it.last = end;
- INIT_LIST_HEAD(&node->bos);
- list_splice(&bos, &node->bos);
- list_add(&bo->mn_list, &node->bos);
-
- interval_tree_insert(&node->it, &rmn->objects);
-
- mutex_unlock(&rmn->lock);
-
+ int ret;
+
+ ret = mmu_interval_notifier_insert(&bo->notifier, current->mm, addr,
+ radeon_bo_size(bo), &radeon_mn_ops);
+ if (ret)
+ return ret;
+
+ /*
+ * FIXME: radeon appears to allow get_user_pages to run during
+ * invalidate_range_start/end, which is not a safe way to read the
+ * PTEs. It should use the mmu_interval_read_begin() scheme around the
+ * get_user_pages to ensure that the PTEs are read properly
+ */
+ mmu_interval_read_begin(&bo->notifier);
return 0;
}
@@ -231,27 +120,8 @@ int radeon_mn_register(struct radeon_bo *bo, unsigned long addr)
*/
void radeon_mn_unregister(struct radeon_bo *bo)
{
- struct radeon_mn *rmn = bo->mn;
- struct list_head *head;
-
- if (!rmn)
+ if (!bo->notifier.mm)
return;
-
- mutex_lock(&rmn->lock);
- /* save the next list entry for later */
- head = bo->mn_list.next;
-
- list_del(&bo->mn_list);
-
- if (list_empty(head)) {
- struct radeon_mn_node *node;
- node = container_of(head, struct radeon_mn_node, bos);
- interval_tree_remove(&node->it, &rmn->objects);
- kfree(node);
- }
-
- mutex_unlock(&rmn->lock);
-
- mmu_notifier_put(&rmn->mn);
- bo->mn = NULL;
+ mmu_interval_notifier_remove(&bo->notifier);
+ bo->notifier.mm = NULL;
}
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 2abe1eab471f..140d94cc080d 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -542,7 +542,7 @@ int radeon_bo_list_validate(struct radeon_device *rdev,
u64 bytes_moved_threshold = radeon_bo_get_threshold_for_moves(rdev);
INIT_LIST_HEAD(&duplicates);
- r = ttm_eu_reserve_buffers(ticket, head, true, &duplicates, true);
+ r = ttm_eu_reserve_buffers(ticket, head, true, &duplicates);
if (unlikely(r != 0)) {
return r;
}
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index a05e10724d46..098bc9f40b98 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -794,6 +794,7 @@ int radeon_ttm_init(struct radeon_device *rdev)
r = ttm_bo_device_init(&rdev->mman.bdev,
&radeon_bo_driver,
rdev->ddev->anon_inode->i_mapping,
+ rdev->ddev->vma_offset_manager,
dma_addressing_limited(&rdev->pdev->dev));
if (r) {
DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 05894d198a79..d7eea75b2c27 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -3257,7 +3257,7 @@ static void si_gpu_init(struct radeon_device *rdev)
/* XXX what about 12? */
rdev->config.si.tile_config |= (3 << 0);
break;
- }
+ }
switch ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) {
case 0: /* four banks */
rdev->config.si.tile_config |= 0 << 4;
@@ -5997,8 +5997,8 @@ static int si_irq_init(struct radeon_device *rdev)
}
/* setup interrupt control */
- /* set dummy read address to ring address */
- WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8);
+ /* set dummy read address to dummy page address */
+ WREG32(INTERRUPT_CNTL2, rdev->dummy_page.addr >> 8);
interrupt_cntl = RREG32(INTERRUPT_CNTL);
/* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
* IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
@@ -7087,7 +7087,6 @@ static void si_pcie_gen3_enable(struct radeon_device *rdev)
{
struct pci_dev *root = rdev->pdev->bus->self;
enum pci_bus_speed speed_cap;
- int bridge_pos, gpu_pos;
u32 speed_cntl, current_data_rate;
int i;
u16 tmp16;
@@ -7129,12 +7128,7 @@ static void si_pcie_gen3_enable(struct radeon_device *rdev)
DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
}
- bridge_pos = pci_pcie_cap(root);
- if (!bridge_pos)
- return;
-
- gpu_pos = pci_pcie_cap(rdev->pdev);
- if (!gpu_pos)
+ if (!pci_is_pcie(root) || !pci_is_pcie(rdev->pdev))
return;
if (speed_cap == PCIE_SPEED_8_0GT) {
@@ -7144,14 +7138,17 @@ static void si_pcie_gen3_enable(struct radeon_device *rdev)
u16 bridge_cfg2, gpu_cfg2;
u32 max_lw, current_lw, tmp;
- pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
- pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
+ pcie_capability_read_word(root, PCI_EXP_LNKCTL,
+ &bridge_cfg);
+ pcie_capability_read_word(rdev->pdev, PCI_EXP_LNKCTL,
+ &gpu_cfg);
tmp16 = bridge_cfg | PCI_EXP_LNKCTL_HAWD;
- pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
+ pcie_capability_write_word(root, PCI_EXP_LNKCTL, tmp16);
tmp16 = gpu_cfg | PCI_EXP_LNKCTL_HAWD;
- pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
+ pcie_capability_write_word(rdev->pdev, PCI_EXP_LNKCTL,
+ tmp16);
tmp = RREG32_PCIE(PCIE_LC_STATUS1);
max_lw = (tmp & LC_DETECTED_LINK_WIDTH_MASK) >> LC_DETECTED_LINK_WIDTH_SHIFT;
@@ -7169,15 +7166,23 @@ static void si_pcie_gen3_enable(struct radeon_device *rdev)
for (i = 0; i < 10; i++) {
/* check status */
- pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_DEVSTA, &tmp16);
+ pcie_capability_read_word(rdev->pdev,
+ PCI_EXP_DEVSTA,
+ &tmp16);
if (tmp16 & PCI_EXP_DEVSTA_TRPND)
break;
- pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
- pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
+ pcie_capability_read_word(root, PCI_EXP_LNKCTL,
+ &bridge_cfg);
+ pcie_capability_read_word(rdev->pdev,
+ PCI_EXP_LNKCTL,
+ &gpu_cfg);
- pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &bridge_cfg2);
- pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &gpu_cfg2);
+ pcie_capability_read_word(root, PCI_EXP_LNKCTL2,
+ &bridge_cfg2);
+ pcie_capability_read_word(rdev->pdev,
+ PCI_EXP_LNKCTL2,
+ &gpu_cfg2);
tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
tmp |= LC_SET_QUIESCE;
@@ -7190,26 +7195,46 @@ static void si_pcie_gen3_enable(struct radeon_device *rdev)
msleep(100);
/* linkctl */
- pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &tmp16);
+ pcie_capability_read_word(root, PCI_EXP_LNKCTL,
+ &tmp16);
tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
tmp16 |= (bridge_cfg & PCI_EXP_LNKCTL_HAWD);
- pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
+ pcie_capability_write_word(root,
+ PCI_EXP_LNKCTL,
+ tmp16);
- pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &tmp16);
+ pcie_capability_read_word(rdev->pdev,
+ PCI_EXP_LNKCTL,
+ &tmp16);
tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
tmp16 |= (gpu_cfg & PCI_EXP_LNKCTL_HAWD);
- pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
+ pcie_capability_write_word(rdev->pdev,
+ PCI_EXP_LNKCTL,
+ tmp16);
/* linkctl2 */
- pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &tmp16);
- tmp16 &= ~((1 << 4) | (7 << 9));
- tmp16 |= (bridge_cfg2 & ((1 << 4) | (7 << 9)));
- pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, tmp16);
-
- pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
- tmp16 &= ~((1 << 4) | (7 << 9));
- tmp16 |= (gpu_cfg2 & ((1 << 4) | (7 << 9)));
- pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
+ pcie_capability_read_word(root, PCI_EXP_LNKCTL2,
+ &tmp16);
+ tmp16 &= ~(PCI_EXP_LNKCTL2_ENTER_COMP |
+ PCI_EXP_LNKCTL2_TX_MARGIN);
+ tmp16 |= (bridge_cfg2 &
+ (PCI_EXP_LNKCTL2_ENTER_COMP |
+ PCI_EXP_LNKCTL2_TX_MARGIN));
+ pcie_capability_write_word(root,
+ PCI_EXP_LNKCTL2,
+ tmp16);
+
+ pcie_capability_read_word(rdev->pdev,
+ PCI_EXP_LNKCTL2,
+ &tmp16);
+ tmp16 &= ~(PCI_EXP_LNKCTL2_ENTER_COMP |
+ PCI_EXP_LNKCTL2_TX_MARGIN);
+ tmp16 |= (gpu_cfg2 &
+ (PCI_EXP_LNKCTL2_ENTER_COMP |
+ PCI_EXP_LNKCTL2_TX_MARGIN));
+ pcie_capability_write_word(rdev->pdev,
+ PCI_EXP_LNKCTL2,
+ tmp16);
tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
tmp &= ~LC_SET_QUIESCE;
@@ -7223,15 +7248,15 @@ static void si_pcie_gen3_enable(struct radeon_device *rdev)
speed_cntl &= ~LC_FORCE_DIS_SW_SPEED_CHANGE;
WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
- pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
- tmp16 &= ~0xf;
+ pcie_capability_read_word(rdev->pdev, PCI_EXP_LNKCTL2, &tmp16);
+ tmp16 &= ~PCI_EXP_LNKCTL2_TLS;
if (speed_cap == PCIE_SPEED_8_0GT)
- tmp16 |= 3; /* gen3 */
+ tmp16 |= PCI_EXP_LNKCTL2_TLS_8_0GT; /* gen3 */
else if (speed_cap == PCIE_SPEED_5_0GT)
- tmp16 |= 2; /* gen2 */
+ tmp16 |= PCI_EXP_LNKCTL2_TLS_5_0GT; /* gen2 */
else
- tmp16 |= 1; /* gen1 */
- pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
+ tmp16 |= PCI_EXP_LNKCTL2_TLS_2_5GT; /* gen1 */
+ pcie_capability_write_word(rdev->pdev, PCI_EXP_LNKCTL2, tmp16);
speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
speed_cntl |= LC_INITIATE_LINK_SPEED_CHANGE;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
index 9c93eb4fad8b..f266c17b907a 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
@@ -131,6 +131,35 @@ static const struct rcar_du_device_info rcar_du_r8a774a1_info = {
.dpll_mask = BIT(1),
};
+static const struct rcar_du_device_info rcar_du_r8a774b1_info = {
+ .gen = 3,
+ .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
+ | RCAR_DU_FEATURE_VSP1_SOURCE
+ | RCAR_DU_FEATURE_INTERLACED
+ | RCAR_DU_FEATURE_TVM_SYNC,
+ .channels_mask = BIT(3) | BIT(1) | BIT(0),
+ .routes = {
+ /*
+ * R8A774B1 has one RGB output, one LVDS output and one HDMI
+ * output.
+ */
+ [RCAR_DU_OUTPUT_DPAD0] = {
+ .possible_crtcs = BIT(2),
+ .port = 0,
+ },
+ [RCAR_DU_OUTPUT_HDMI0] = {
+ .possible_crtcs = BIT(1),
+ .port = 1,
+ },
+ [RCAR_DU_OUTPUT_LVDS0] = {
+ .possible_crtcs = BIT(0),
+ .port = 2,
+ },
+ },
+ .num_lvds = 1,
+ .dpll_mask = BIT(1),
+};
+
static const struct rcar_du_device_info rcar_du_r8a774c0_info = {
.gen = 3,
.features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
@@ -416,6 +445,7 @@ static const struct of_device_id rcar_du_of_table[] = {
{ .compatible = "renesas,du-r8a7745", .data = &rzg1_du_r8a7745_info },
{ .compatible = "renesas,du-r8a77470", .data = &rzg1_du_r8a77470_info },
{ .compatible = "renesas,du-r8a774a1", .data = &rcar_du_r8a774a1_info },
+ { .compatible = "renesas,du-r8a774b1", .data = &rcar_du_r8a774b1_info },
{ .compatible = "renesas,du-r8a774c0", .data = &rcar_du_r8a774c0_info },
{ .compatible = "renesas,du-r8a7779", .data = &rcar_du_r8a7779_info },
{ .compatible = "renesas,du-r8a7790", .data = &rcar_du_r8a7790_info },
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
index 0f00bdfe2366..3cd83a030a04 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
@@ -9,6 +9,7 @@
#include <linux/export.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_panel.h>
@@ -84,8 +85,8 @@ int rcar_du_encoder_init(struct rcar_du_device *rcdu,
goto done;
}
- bridge = devm_drm_panel_bridge_add(rcdu->dev, panel,
- DRM_MODE_CONNECTOR_DPI);
+ bridge = devm_drm_panel_bridge_add_typed(rcdu->dev, panel,
+ DRM_MODE_CONNECTOR_DPI);
if (IS_ERR(bridge)) {
ret = PTR_ERR(bridge);
goto done;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
index 2dc9caee8767..0d59f390de19 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
@@ -585,7 +585,11 @@ static int rcar_du_vsps_init(struct rcar_du_device *rcdu)
vsps[j].crtcs_mask |= BIT(i);
- /* Store the VSP pointer and pipe index in the CRTC. */
+ /*
+ * Store the VSP pointer and pipe index in the CRTC. If the
+ * second cell of the 'vsps' specifier isn't present, default
+ * to 0 to remain compatible with older DT bindings.
+ */
rcdu->crtcs[i].vsp = &rcdu->vsps[j];
rcdu->crtcs[i].vsp_pipe = cells >= 1 ? args.args[0] : 0;
}
diff --git a/drivers/gpu/drm/rcar-du/rcar_lvds.c b/drivers/gpu/drm/rcar-du/rcar_lvds.c
index 3fc7e6899cab..8c6c172bbf2e 100644
--- a/drivers/gpu/drm/rcar-du/rcar_lvds.c
+++ b/drivers/gpu/drm/rcar-du/rcar_lvds.c
@@ -16,6 +16,7 @@
#include <linux/of_graph.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
+#include <linux/sys_soc.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
@@ -842,8 +843,23 @@ static int rcar_lvds_get_clocks(struct rcar_lvds *lvds)
return 0;
}
+static const struct rcar_lvds_device_info rcar_lvds_r8a7790es1_info = {
+ .gen = 2,
+ .quirks = RCAR_LVDS_QUIRK_LANES,
+ .pll_setup = rcar_lvds_pll_setup_gen2,
+};
+
+static const struct soc_device_attribute lvds_quirk_matches[] = {
+ {
+ .soc_id = "r8a7790", .revision = "ES1.*",
+ .data = &rcar_lvds_r8a7790es1_info,
+ },
+ { /* sentinel */ }
+};
+
static int rcar_lvds_probe(struct platform_device *pdev)
{
+ const struct soc_device_attribute *attr;
struct rcar_lvds *lvds;
struct resource *mem;
int ret;
@@ -857,6 +873,10 @@ static int rcar_lvds_probe(struct platform_device *pdev)
lvds->dev = &pdev->dev;
lvds->info = of_device_get_match_data(&pdev->dev);
+ attr = soc_device_match(lvds_quirk_matches);
+ if (attr)
+ lvds->info = attr->data;
+
ret = rcar_lvds_parse_dt(lvds);
if (ret < 0)
return ret;
@@ -893,12 +913,6 @@ static const struct rcar_lvds_device_info rcar_lvds_gen2_info = {
.pll_setup = rcar_lvds_pll_setup_gen2,
};
-static const struct rcar_lvds_device_info rcar_lvds_r8a7790_info = {
- .gen = 2,
- .quirks = RCAR_LVDS_QUIRK_LANES,
- .pll_setup = rcar_lvds_pll_setup_gen2,
-};
-
static const struct rcar_lvds_device_info rcar_lvds_gen3_info = {
.gen = 3,
.quirks = RCAR_LVDS_QUIRK_PWD,
@@ -929,8 +943,9 @@ static const struct of_device_id rcar_lvds_of_table[] = {
{ .compatible = "renesas,r8a7743-lvds", .data = &rcar_lvds_gen2_info },
{ .compatible = "renesas,r8a7744-lvds", .data = &rcar_lvds_gen2_info },
{ .compatible = "renesas,r8a774a1-lvds", .data = &rcar_lvds_gen3_info },
+ { .compatible = "renesas,r8a774b1-lvds", .data = &rcar_lvds_gen3_info },
{ .compatible = "renesas,r8a774c0-lvds", .data = &rcar_lvds_r8a77990_info },
- { .compatible = "renesas,r8a7790-lvds", .data = &rcar_lvds_r8a7790_info },
+ { .compatible = "renesas,r8a7790-lvds", .data = &rcar_lvds_gen2_info },
{ .compatible = "renesas,r8a7791-lvds", .data = &rcar_lvds_gen2_info },
{ .compatible = "renesas,r8a7793-lvds", .data = &rcar_lvds_gen2_info },
{ .compatible = "renesas,r8a7795-lvds", .data = &rcar_lvds_gen3_info },
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c
index d505ea7d5384..eed594bd38d3 100644
--- a/drivers/gpu/drm/rockchip/cdn-dp-core.c
+++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c
@@ -477,8 +477,8 @@ static int cdn_dp_disable(struct cdn_dp_device *dp)
cdn_dp_set_firmware_active(dp, false);
cdn_dp_clk_disable(dp);
dp->active = false;
- dp->link.rate = 0;
- dp->link.num_lanes = 0;
+ dp->max_lanes = 0;
+ dp->max_rate = 0;
if (!dp->connected) {
kfree(dp->edid);
dp->edid = NULL;
@@ -570,7 +570,7 @@ static bool cdn_dp_check_link_status(struct cdn_dp_device *dp)
struct cdn_dp_port *port = cdn_dp_connected_port(dp);
u8 sink_lanes = drm_dp_max_lane_count(dp->dpcd);
- if (!port || !dp->link.rate || !dp->link.num_lanes)
+ if (!port || !dp->max_rate || !dp->max_lanes)
return false;
if (cdn_dp_dpcd_read(dp, DP_LANE0_1_STATUS, link_status,
@@ -952,8 +952,8 @@ static void cdn_dp_pd_event_work(struct work_struct *work)
/* Enabled and connected with a sink, re-train if requested */
} else if (!cdn_dp_check_link_status(dp)) {
- unsigned int rate = dp->link.rate;
- unsigned int lanes = dp->link.num_lanes;
+ unsigned int rate = dp->max_rate;
+ unsigned int lanes = dp->max_lanes;
struct drm_display_mode *mode = &dp->mode;
DRM_DEV_INFO(dp->dev, "Connected with sink. Re-train link\n");
@@ -966,7 +966,7 @@ static void cdn_dp_pd_event_work(struct work_struct *work)
/* If training result is changed, update the video config */
if (mode->clock &&
- (rate != dp->link.rate || lanes != dp->link.num_lanes)) {
+ (rate != dp->max_rate || lanes != dp->max_lanes)) {
ret = cdn_dp_config_video(dp);
if (ret) {
dp->connected = false;
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.h b/drivers/gpu/drm/rockchip/cdn-dp-core.h
index b85ea89eb60b..83c4586665b4 100644
--- a/drivers/gpu/drm/rockchip/cdn-dp-core.h
+++ b/drivers/gpu/drm/rockchip/cdn-dp-core.h
@@ -92,9 +92,10 @@ struct cdn_dp_device {
struct reset_control *core_rst;
struct audio_info audio_info;
struct video_info video_info;
- struct drm_dp_link link;
struct cdn_dp_port *port[MAX_PHY];
u8 ports;
+ u8 max_lanes;
+ u8 max_rate;
u8 lanes;
int active_port;
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-reg.c b/drivers/gpu/drm/rockchip/cdn-dp-reg.c
index 077c87021908..7361c07cb4a7 100644
--- a/drivers/gpu/drm/rockchip/cdn-dp-reg.c
+++ b/drivers/gpu/drm/rockchip/cdn-dp-reg.c
@@ -535,8 +535,8 @@ static int cdn_dp_get_training_status(struct cdn_dp_device *dp)
if (ret)
goto err_get_training_status;
- dp->link.rate = drm_dp_bw_code_to_link_rate(status[0]);
- dp->link.num_lanes = status[1];
+ dp->max_rate = drm_dp_bw_code_to_link_rate(status[0]);
+ dp->max_lanes = status[1];
err_get_training_status:
if (ret)
@@ -560,8 +560,8 @@ int cdn_dp_train_link(struct cdn_dp_device *dp)
return ret;
}
- DRM_DEV_DEBUG_KMS(dp->dev, "rate:0x%x, lanes:%d\n", dp->link.rate,
- dp->link.num_lanes);
+ DRM_DEV_DEBUG_KMS(dp->dev, "rate:0x%x, lanes:%d\n", dp->max_rate,
+ dp->max_lanes);
return ret;
}
@@ -639,7 +639,7 @@ int cdn_dp_config_video(struct cdn_dp_device *dp)
bit_per_pix = (video->color_fmt == YCBCR_4_2_2) ?
(video->color_depth * 2) : (video->color_depth * 3);
- link_rate = dp->link.rate / 1000;
+ link_rate = dp->max_rate / 1000;
ret = cdn_dp_reg_write(dp, BND_HSYNC2VSYNC, VIF_BYPASS_INTERLACE);
if (ret)
@@ -659,14 +659,13 @@ int cdn_dp_config_video(struct cdn_dp_device *dp)
do {
tu_size_reg += 2;
symbol = tu_size_reg * mode->clock * bit_per_pix;
- do_div(symbol, dp->link.num_lanes * link_rate * 8);
+ do_div(symbol, dp->max_lanes * link_rate * 8);
rem = do_div(symbol, 1000);
if (tu_size_reg > 64) {
ret = -EINVAL;
DRM_DEV_ERROR(dp->dev,
"tu error, clk:%d, lanes:%d, rate:%d\n",
- mode->clock, dp->link.num_lanes,
- link_rate);
+ mode->clock, dp->max_lanes, link_rate);
goto err_config_video;
}
} while ((symbol <= 1) || (tu_size_reg - symbol < 4) ||
@@ -680,7 +679,7 @@ int cdn_dp_config_video(struct cdn_dp_device *dp)
/* set the FIFO Buffer size */
val = div_u64(mode->clock * (symbol + 1), 1000) + link_rate;
- val /= (dp->link.num_lanes * link_rate);
+ val /= (dp->max_lanes * link_rate);
val = div_u64(8 * (symbol + 1), bit_per_pix) - val;
val += 2;
ret = cdn_dp_reg_write(dp, DP_VC_TABLE(15), val);
@@ -833,7 +832,7 @@ static void cdn_dp_audio_config_i2s(struct cdn_dp_device *dp,
u32 val;
if (audio->channels == 2) {
- if (dp->link.num_lanes == 1)
+ if (dp->max_lanes == 1)
sub_pckt_num = 2;
else
sub_pckt_num = 4;
diff --git a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
index 906891b03a38..7f56d8c3491d 100644
--- a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
@@ -450,6 +450,7 @@ static const struct dw_hdmi_plat_data rk3328_hdmi_drv_data = {
.phy_ops = &rk3328_hdmi_phy_ops,
.phy_name = "inno_dw_hdmi_phy2",
.phy_force_vendor = true,
+ .use_drm_infoframe = true,
};
static struct rockchip_hdmi_chip_data rk3399_chip_data = {
@@ -464,6 +465,7 @@ static const struct dw_hdmi_plat_data rk3399_hdmi_drv_data = {
.cur_ctr = rockchip_cur_ctr,
.phy_config = rockchip_phy_config,
.phy_data = &rk3399_chip_data,
+ .use_drm_infoframe = true,
};
static const struct of_device_id dw_hdmi_rockchip_dt_ids[] = {
diff --git a/drivers/gpu/drm/rockchip/rk3066_hdmi.c b/drivers/gpu/drm/rockchip/rk3066_hdmi.c
index 85fc5f01f761..cdb401f4283d 100644
--- a/drivers/gpu/drm/rockchip/rk3066_hdmi.c
+++ b/drivers/gpu/drm/rockchip/rk3066_hdmi.c
@@ -743,7 +743,6 @@ static int rk3066_hdmi_bind(struct device *dev, struct device *master,
struct platform_device *pdev = to_platform_device(dev);
struct drm_device *drm = data;
struct rk3066_hdmi *hdmi;
- struct resource *iores;
int irq;
int ret;
@@ -753,12 +752,7 @@ static int rk3066_hdmi_bind(struct device *dev, struct device *master,
hdmi->dev = dev;
hdmi->drm_dev = drm;
-
- iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!iores)
- return -ENXIO;
-
- hdmi->regs = devm_ioremap_resource(dev, iores);
+ hdmi->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(hdmi->regs))
return PTR_ERR(hdmi->regs);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
index 291e89b4045f..7582d0e6a60a 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
@@ -294,7 +294,7 @@ static void rockchip_gem_release_object(struct rockchip_gem_object *rk_obj)
kfree(rk_obj);
}
-struct rockchip_gem_object *
+static struct rockchip_gem_object *
rockchip_gem_alloc_object(struct drm_device *drm, unsigned int size)
{
struct rockchip_gem_object *rk_obj;
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index 613404f86668..d04b3492bdac 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -139,6 +139,7 @@ struct vop {
uint32_t *regsbak;
void __iomem *regs;
+ void __iomem *lut_regs;
/* physical map length of vop register */
uint32_t len;
@@ -1040,14 +1041,118 @@ static bool vop_crtc_mode_fixup(struct drm_crtc *crtc,
struct drm_display_mode *adjusted_mode)
{
struct vop *vop = to_vop(crtc);
+ unsigned long rate;
- adjusted_mode->clock =
- DIV_ROUND_UP(clk_round_rate(vop->dclk,
- adjusted_mode->clock * 1000), 1000);
+ /*
+ * Clock craziness.
+ *
+ * Key points:
+ *
+ * - DRM works in in kHz.
+ * - Clock framework works in Hz.
+ * - Rockchip's clock driver picks the clock rate that is the
+ * same _OR LOWER_ than the one requested.
+ *
+ * Action plan:
+ *
+ * 1. When DRM gives us a mode, we should add 999 Hz to it. That way
+ * if the clock we need is 60000001 Hz (~60 MHz) and DRM tells us to
+ * make 60000 kHz then the clock framework will actually give us
+ * the right clock.
+ *
+ * NOTE: if the PLL (maybe through a divider) could actually make
+ * a clock rate 999 Hz higher instead of the one we want then this
+ * could be a problem. Unfortunately there's not much we can do
+ * since it's baked into DRM to use kHz. It shouldn't matter in
+ * practice since Rockchip PLLs are controlled by tables and
+ * even if there is a divider in the middle I wouldn't expect PLL
+ * rates in the table that are just a few kHz different.
+ *
+ * 2. Get the clock framework to round the rate for us to tell us
+ * what it will actually make.
+ *
+ * 3. Store the rounded up rate so that we don't need to worry about
+ * this in the actual clk_set_rate().
+ */
+ rate = clk_round_rate(vop->dclk, adjusted_mode->clock * 1000 + 999);
+ adjusted_mode->clock = DIV_ROUND_UP(rate, 1000);
return true;
}
+static bool vop_dsp_lut_is_enabled(struct vop *vop)
+{
+ return vop_read_reg(vop, 0, &vop->data->common->dsp_lut_en);
+}
+
+static void vop_crtc_write_gamma_lut(struct vop *vop, struct drm_crtc *crtc)
+{
+ struct drm_color_lut *lut = crtc->state->gamma_lut->data;
+ unsigned int i;
+
+ for (i = 0; i < crtc->gamma_size; i++) {
+ u32 word;
+
+ word = (drm_color_lut_extract(lut[i].red, 10) << 20) |
+ (drm_color_lut_extract(lut[i].green, 10) << 10) |
+ drm_color_lut_extract(lut[i].blue, 10);
+ writel(word, vop->lut_regs + i * 4);
+ }
+}
+
+static void vop_crtc_gamma_set(struct vop *vop, struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
+{
+ struct drm_crtc_state *state = crtc->state;
+ unsigned int idle;
+ int ret;
+
+ if (!vop->lut_regs)
+ return;
+ /*
+ * To disable gamma (gamma_lut is null) or to write
+ * an update to the LUT, clear dsp_lut_en.
+ */
+ spin_lock(&vop->reg_lock);
+ VOP_REG_SET(vop, common, dsp_lut_en, 0);
+ vop_cfg_done(vop);
+ spin_unlock(&vop->reg_lock);
+
+ /*
+ * In order to write the LUT to the internal memory,
+ * we need to first make sure the dsp_lut_en bit is cleared.
+ */
+ ret = readx_poll_timeout(vop_dsp_lut_is_enabled, vop,
+ idle, !idle, 5, 30 * 1000);
+ if (ret) {
+ DRM_DEV_ERROR(vop->dev, "display LUT RAM enable timeout!\n");
+ return;
+ }
+
+ if (!state->gamma_lut)
+ return;
+
+ spin_lock(&vop->reg_lock);
+ vop_crtc_write_gamma_lut(vop, crtc);
+ VOP_REG_SET(vop, common, dsp_lut_en, 1);
+ vop_cfg_done(vop);
+ spin_unlock(&vop->reg_lock);
+}
+
+static void vop_crtc_atomic_begin(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
+{
+ struct vop *vop = to_vop(crtc);
+
+ /*
+ * Only update GAMMA if the 'active' flag is not changed,
+ * otherwise it's updated by .atomic_enable.
+ */
+ if (crtc->state->color_mgmt_changed &&
+ !crtc->state->active_changed)
+ vop_crtc_gamma_set(vop, crtc, old_crtc_state);
+}
+
static void vop_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
{
@@ -1075,6 +1180,14 @@ static void vop_crtc_atomic_enable(struct drm_crtc *crtc,
return;
}
+ /*
+ * If we have a GAMMA LUT in the state, then let's make sure
+ * it's updated. We might be coming out of suspend,
+ * which means the LUT internal memory needs to be re-written.
+ */
+ if (crtc->state->gamma_lut)
+ vop_crtc_gamma_set(vop, crtc, old_state);
+
mutex_lock(&vop->vop_lock);
WARN_ON(vop->event);
@@ -1085,9 +1198,7 @@ static void vop_crtc_atomic_enable(struct drm_crtc *crtc,
DRM_DEV_ERROR(vop->dev, "Failed to enable vop (%d)\n", ret);
return;
}
-
- pin_pol = BIT(DCLK_INVERT);
- pin_pol |= (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) ?
+ pin_pol = (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) ?
BIT(HSYNC_POSITIVE) : 0;
pin_pol |= (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) ?
BIT(VSYNC_POSITIVE) : 0;
@@ -1096,25 +1207,29 @@ static void vop_crtc_atomic_enable(struct drm_crtc *crtc,
switch (s->output_type) {
case DRM_MODE_CONNECTOR_LVDS:
- VOP_REG_SET(vop, output, rgb_en, 1);
+ VOP_REG_SET(vop, output, rgb_dclk_pol, 1);
VOP_REG_SET(vop, output, rgb_pin_pol, pin_pol);
+ VOP_REG_SET(vop, output, rgb_en, 1);
break;
case DRM_MODE_CONNECTOR_eDP:
+ VOP_REG_SET(vop, output, edp_dclk_pol, 1);
VOP_REG_SET(vop, output, edp_pin_pol, pin_pol);
VOP_REG_SET(vop, output, edp_en, 1);
break;
case DRM_MODE_CONNECTOR_HDMIA:
+ VOP_REG_SET(vop, output, hdmi_dclk_pol, 1);
VOP_REG_SET(vop, output, hdmi_pin_pol, pin_pol);
VOP_REG_SET(vop, output, hdmi_en, 1);
break;
case DRM_MODE_CONNECTOR_DSI:
+ VOP_REG_SET(vop, output, mipi_dclk_pol, 1);
VOP_REG_SET(vop, output, mipi_pin_pol, pin_pol);
VOP_REG_SET(vop, output, mipi_en, 1);
VOP_REG_SET(vop, output, mipi_dual_channel_en,
!!(s->output_flags & ROCKCHIP_OUTPUT_DSI_DUAL));
break;
case DRM_MODE_CONNECTOR_DisplayPort:
- pin_pol &= ~BIT(DCLK_INVERT);
+ VOP_REG_SET(vop, output, dp_dclk_pol, 0);
VOP_REG_SET(vop, output, dp_pin_pol, pin_pol);
VOP_REG_SET(vop, output, dp_en, 1);
break;
@@ -1191,6 +1306,26 @@ static void vop_wait_for_irq_handler(struct vop *vop)
synchronize_irq(vop->irq);
}
+static int vop_crtc_atomic_check(struct drm_crtc *crtc,
+ struct drm_crtc_state *crtc_state)
+{
+ struct vop *vop = to_vop(crtc);
+
+ if (vop->lut_regs && crtc_state->color_mgmt_changed &&
+ crtc_state->gamma_lut) {
+ unsigned int len;
+
+ len = drm_color_lut_size(crtc_state->gamma_lut);
+ if (len != crtc->gamma_size) {
+ DRM_DEBUG_KMS("Invalid LUT size; got %d, expected %d\n",
+ len, crtc->gamma_size);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
static void vop_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state)
{
@@ -1243,6 +1378,8 @@ static void vop_crtc_atomic_flush(struct drm_crtc *crtc,
static const struct drm_crtc_helper_funcs vop_crtc_helper_funcs = {
.mode_fixup = vop_crtc_mode_fixup,
+ .atomic_check = vop_crtc_atomic_check,
+ .atomic_begin = vop_crtc_atomic_begin,
.atomic_flush = vop_crtc_atomic_flush,
.atomic_enable = vop_crtc_atomic_enable,
.atomic_disable = vop_crtc_atomic_disable,
@@ -1361,6 +1498,7 @@ static const struct drm_crtc_funcs vop_crtc_funcs = {
.disable_vblank = vop_crtc_disable_vblank,
.set_crc_source = vop_crtc_set_crc_source,
.verify_crc_source = vop_crtc_verify_crc_source,
+ .gamma_set = drm_atomic_helper_legacy_gamma_set,
};
static void vop_fb_unref_worker(struct drm_flip_work *work, void *val)
@@ -1518,6 +1656,10 @@ static int vop_create_crtc(struct vop *vop)
goto err_cleanup_planes;
drm_crtc_helper_add(crtc, &vop_crtc_helper_funcs);
+ if (vop->lut_regs) {
+ drm_mode_crtc_set_gamma_size(crtc, vop_data->lut_size);
+ drm_crtc_enable_color_mgmt(crtc, 0, false, vop_data->lut_size);
+ }
/*
* Create drm_planes for overlay windows with possible_crtcs restricted
@@ -1822,6 +1964,17 @@ static int vop_bind(struct device *dev, struct device *master, void *data)
if (IS_ERR(vop->regs))
return PTR_ERR(vop->regs);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (res) {
+ if (!vop_data->lut_size) {
+ DRM_DEV_ERROR(dev, "no gamma LUT size defined\n");
+ return -EINVAL;
+ }
+ vop->lut_regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(vop->lut_regs))
+ return PTR_ERR(vop->lut_regs);
+ }
+
vop->regsbak = devm_kzalloc(dev, vop->len, GFP_KERNEL);
if (!vop->regsbak)
return -ENOMEM;
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
index 2149a889c29d..0b3d18c457b2 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
@@ -46,10 +46,15 @@ struct vop_modeset {
struct vop_output {
struct vop_reg pin_pol;
struct vop_reg dp_pin_pol;
+ struct vop_reg dp_dclk_pol;
struct vop_reg edp_pin_pol;
+ struct vop_reg edp_dclk_pol;
struct vop_reg hdmi_pin_pol;
+ struct vop_reg hdmi_dclk_pol;
struct vop_reg mipi_pin_pol;
+ struct vop_reg mipi_dclk_pol;
struct vop_reg rgb_pin_pol;
+ struct vop_reg rgb_dclk_pol;
struct vop_reg dp_en;
struct vop_reg edp_en;
struct vop_reg hdmi_en;
@@ -67,6 +72,7 @@ struct vop_common {
struct vop_reg dither_down_mode;
struct vop_reg dither_down_en;
struct vop_reg dither_up;
+ struct vop_reg dsp_lut_en;
struct vop_reg gate_en;
struct vop_reg mmu_en;
struct vop_reg out_mode;
@@ -170,6 +176,7 @@ struct vop_data {
const struct vop_win_yuv2yuv_data *win_yuv2yuv;
const struct vop_win_data *win;
unsigned int win_size;
+ unsigned int lut_size;
#define VOP_FEATURE_OUTPUT_RGB10 BIT(0)
#define VOP_FEATURE_INTERNAL_RGB BIT(1)
@@ -294,8 +301,7 @@ enum dither_down_mode_sel {
enum vop_pol {
HSYNC_POSITIVE = 0,
VSYNC_POSITIVE = 1,
- DEN_NEGATIVE = 2,
- DCLK_INVERT = 3
+ DEN_NEGATIVE = 2
};
#define FRAC_16_16(mult, div) (((mult) << 16) / (div))
diff --git a/drivers/gpu/drm/rockchip/rockchip_lvds.c b/drivers/gpu/drm/rockchip/rockchip_lvds.c
index 64aefa856896..8a4c9af0ba73 100644
--- a/drivers/gpu/drm/rockchip/rockchip_lvds.c
+++ b/drivers/gpu/drm/rockchip/rockchip_lvds.c
@@ -16,6 +16,7 @@
#include <linux/regmap.h>
#include <linux/reset.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_dp_helper.h>
#include <drm/drm_of.h>
diff --git a/drivers/gpu/drm/rockchip/rockchip_rgb.c b/drivers/gpu/drm/rockchip/rockchip_rgb.c
index 89e0bb0fe0ab..ae730275a34f 100644
--- a/drivers/gpu/drm/rockchip/rockchip_rgb.c
+++ b/drivers/gpu/drm/rockchip/rockchip_rgb.c
@@ -9,6 +9,7 @@
#include <linux/of_graph.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_dp_helper.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
@@ -135,7 +136,8 @@ struct rockchip_rgb *rockchip_rgb_init(struct device *dev,
drm_encoder_helper_add(encoder, &rockchip_rgb_encoder_helper_funcs);
if (panel) {
- bridge = drm_panel_bridge_add(panel, DRM_MODE_CONNECTOR_LVDS);
+ bridge = drm_panel_bridge_add_typed(panel,
+ DRM_MODE_CONNECTOR_LVDS);
if (IS_ERR(bridge))
return ERR_CAST(bridge);
}
diff --git a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
index d1494be14471..7a9d979c8d5d 100644
--- a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
+++ b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
@@ -16,6 +16,7 @@
#include "rockchip_drm_vop.h"
#include "rockchip_vop_reg.h"
+#include "rockchip_drm_drv.h"
#define _VOP_REG(off, _mask, _shift, _write_mask, _relaxed) \
{ \
@@ -214,9 +215,11 @@ static const struct vop_modeset px30_modeset = {
};
static const struct vop_output px30_output = {
- .rgb_pin_pol = VOP_REG(PX30_DSP_CTRL0, 0xf, 1),
- .mipi_pin_pol = VOP_REG(PX30_DSP_CTRL0, 0xf, 25),
+ .rgb_dclk_pol = VOP_REG(PX30_DSP_CTRL0, 0x1, 1),
+ .rgb_pin_pol = VOP_REG(PX30_DSP_CTRL0, 0x7, 2),
.rgb_en = VOP_REG(PX30_DSP_CTRL0, 0x1, 0),
+ .mipi_dclk_pol = VOP_REG(PX30_DSP_CTRL0, 0x1, 25),
+ .mipi_pin_pol = VOP_REG(PX30_DSP_CTRL0, 0x7, 26),
.mipi_en = VOP_REG(PX30_DSP_CTRL0, 0x1, 24),
};
@@ -598,6 +601,7 @@ static const struct vop_common rk3288_common = {
.dither_down_en = VOP_REG(RK3288_DSP_CTRL1, 0x1, 2),
.pre_dither_down = VOP_REG(RK3288_DSP_CTRL1, 0x1, 1),
.dither_up = VOP_REG(RK3288_DSP_CTRL1, 0x1, 6),
+ .dsp_lut_en = VOP_REG(RK3288_DSP_CTRL1, 0x1, 0),
.data_blank = VOP_REG(RK3288_DSP_CTRL0, 0x1, 19),
.dsp_blank = VOP_REG(RK3288_DSP_CTRL0, 0x3, 18),
.out_mode = VOP_REG(RK3288_DSP_CTRL0, 0xf, 0),
@@ -646,6 +650,7 @@ static const struct vop_data rk3288_vop = {
.output = &rk3288_output,
.win = rk3288_vop_win_data,
.win_size = ARRAY_SIZE(rk3288_vop_win_data),
+ .lut_size = 1024,
};
static const int rk3368_vop_intrs[] = {
@@ -717,10 +722,14 @@ static const struct vop_win_data rk3368_vop_win_data[] = {
};
static const struct vop_output rk3368_output = {
- .rgb_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0xf, 16),
- .hdmi_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0xf, 20),
- .edp_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0xf, 24),
- .mipi_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0xf, 28),
+ .rgb_dclk_pol = VOP_REG(RK3368_DSP_CTRL1, 0x1, 19),
+ .hdmi_dclk_pol = VOP_REG(RK3368_DSP_CTRL1, 0x1, 23),
+ .edp_dclk_pol = VOP_REG(RK3368_DSP_CTRL1, 0x1, 27),
+ .mipi_dclk_pol = VOP_REG(RK3368_DSP_CTRL1, 0x1, 31),
+ .rgb_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0x7, 16),
+ .hdmi_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0x7, 20),
+ .edp_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0x7, 24),
+ .mipi_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0x7, 28),
.rgb_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 12),
.hdmi_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 13),
.edp_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 14),
@@ -764,11 +773,16 @@ static const struct vop_data rk3366_vop = {
};
static const struct vop_output rk3399_output = {
- .dp_pin_pol = VOP_REG(RK3399_DSP_CTRL1, 0xf, 16),
- .rgb_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0xf, 16),
- .hdmi_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0xf, 20),
- .edp_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0xf, 24),
- .mipi_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0xf, 28),
+ .dp_dclk_pol = VOP_REG(RK3399_DSP_CTRL1, 0x1, 19),
+ .rgb_dclk_pol = VOP_REG(RK3368_DSP_CTRL1, 0x1, 19),
+ .hdmi_dclk_pol = VOP_REG(RK3368_DSP_CTRL1, 0x1, 23),
+ .edp_dclk_pol = VOP_REG(RK3368_DSP_CTRL1, 0x1, 27),
+ .mipi_dclk_pol = VOP_REG(RK3368_DSP_CTRL1, 0x1, 31),
+ .dp_pin_pol = VOP_REG(RK3399_DSP_CTRL1, 0x7, 16),
+ .rgb_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0x7, 16),
+ .hdmi_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0x7, 20),
+ .edp_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0x7, 24),
+ .mipi_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0x7, 28),
.dp_en = VOP_REG(RK3399_SYS_CTRL, 0x1, 11),
.rgb_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 12),
.hdmi_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 13),
@@ -872,14 +886,18 @@ static const struct vop_modeset rk3328_modeset = {
};
static const struct vop_output rk3328_output = {
+ .rgb_dclk_pol = VOP_REG(RK3328_DSP_CTRL1, 0x1, 19),
+ .hdmi_dclk_pol = VOP_REG(RK3328_DSP_CTRL1, 0x1, 23),
+ .edp_dclk_pol = VOP_REG(RK3328_DSP_CTRL1, 0x1, 27),
+ .mipi_dclk_pol = VOP_REG(RK3328_DSP_CTRL1, 0x1, 31),
.rgb_en = VOP_REG(RK3328_SYS_CTRL, 0x1, 12),
.hdmi_en = VOP_REG(RK3328_SYS_CTRL, 0x1, 13),
.edp_en = VOP_REG(RK3328_SYS_CTRL, 0x1, 14),
.mipi_en = VOP_REG(RK3328_SYS_CTRL, 0x1, 15),
- .rgb_pin_pol = VOP_REG(RK3328_DSP_CTRL1, 0xf, 16),
- .hdmi_pin_pol = VOP_REG(RK3328_DSP_CTRL1, 0xf, 20),
- .edp_pin_pol = VOP_REG(RK3328_DSP_CTRL1, 0xf, 24),
- .mipi_pin_pol = VOP_REG(RK3328_DSP_CTRL1, 0xf, 28),
+ .rgb_pin_pol = VOP_REG(RK3328_DSP_CTRL1, 0x7, 16),
+ .hdmi_pin_pol = VOP_REG(RK3328_DSP_CTRL1, 0x7, 20),
+ .edp_pin_pol = VOP_REG(RK3328_DSP_CTRL1, 0x7, 24),
+ .mipi_pin_pol = VOP_REG(RK3328_DSP_CTRL1, 0x7, 28),
};
static const struct vop_misc rk3328_misc = {
diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
index 1a5153197fe9..461a7a8129f4 100644
--- a/drivers/gpu/drm/scheduler/sched_entity.c
+++ b/drivers/gpu/drm/scheduler/sched_entity.c
@@ -23,6 +23,7 @@
#include <linux/kthread.h>
#include <linux/slab.h>
+#include <linux/completion.h>
#include <drm/drm_print.h>
#include <drm/gpu_scheduler.h>
@@ -68,6 +69,8 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,
if (!entity->rq_list)
return -ENOMEM;
+ init_completion(&entity->entity_idle);
+
for (i = 0; i < num_rq_list; ++i)
entity->rq_list[i] = rq_list[i];
@@ -286,11 +289,12 @@ void drm_sched_entity_fini(struct drm_sched_entity *entity)
*/
if (spsc_queue_count(&entity->job_queue)) {
if (sched) {
- /* Park the kernel for a moment to make sure it isn't processing
- * our enity.
+ /*
+ * Wait for thread to idle to make sure it isn't processing
+ * this entity.
*/
- kthread_park(sched->thread);
- kthread_unpark(sched->thread);
+ wait_for_completion(&entity->entity_idle);
+
}
if (entity->dependency) {
dma_fence_remove_callback(entity->dependency,
diff --git a/drivers/gpu/drm/scheduler/sched_fence.c b/drivers/gpu/drm/scheduler/sched_fence.c
index 54977408f574..8b45c3a1b84e 100644
--- a/drivers/gpu/drm/scheduler/sched_fence.c
+++ b/drivers/gpu/drm/scheduler/sched_fence.c
@@ -128,13 +128,13 @@ static void drm_sched_fence_release_finished(struct dma_fence *f)
dma_fence_put(&fence->scheduled);
}
-const struct dma_fence_ops drm_sched_fence_ops_scheduled = {
+static const struct dma_fence_ops drm_sched_fence_ops_scheduled = {
.get_driver_name = drm_sched_fence_get_driver_name,
.get_timeline_name = drm_sched_fence_get_timeline_name,
.release = drm_sched_fence_release_scheduled,
};
-const struct dma_fence_ops drm_sched_fence_ops_finished = {
+static const struct dma_fence_ops drm_sched_fence_ops_finished = {
.get_driver_name = drm_sched_fence_get_driver_name,
.get_timeline_name = drm_sched_fence_get_timeline_name,
.release = drm_sched_fence_release_finished,
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index f39b97ed4ade..3c57e84222ca 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -47,6 +47,7 @@
#include <linux/kthread.h>
#include <linux/wait.h>
#include <linux/sched.h>
+#include <linux/completion.h>
#include <uapi/linux/sched/types.h>
#include <drm/drm_print.h>
@@ -134,6 +135,7 @@ drm_sched_rq_select_entity(struct drm_sched_rq *rq)
list_for_each_entry_continue(entity, &rq->entities, list) {
if (drm_sched_entity_is_ready(entity)) {
rq->current_entity = entity;
+ reinit_completion(&entity->entity_idle);
spin_unlock(&rq->lock);
return entity;
}
@@ -144,6 +146,7 @@ drm_sched_rq_select_entity(struct drm_sched_rq *rq)
if (drm_sched_entity_is_ready(entity)) {
rq->current_entity = entity;
+ reinit_completion(&entity->entity_idle);
spin_unlock(&rq->lock);
return entity;
}
@@ -496,8 +499,10 @@ void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched)
fence = sched->ops->run_job(s_job);
if (IS_ERR_OR_NULL(fence)) {
+ if (IS_ERR(fence))
+ dma_fence_set_error(&s_fence->finished, PTR_ERR(fence));
+
s_job->s_fence->parent = NULL;
- dma_fence_set_error(&s_fence->finished, PTR_ERR(fence));
} else {
s_job->s_fence->parent = fence;
}
@@ -632,43 +637,45 @@ static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
}
/**
- * drm_sched_cleanup_jobs - destroy finished jobs
+ * drm_sched_get_cleanup_job - fetch the next finished job to be destroyed
*
* @sched: scheduler instance
*
- * Remove all finished jobs from the mirror list and destroy them.
+ * Returns the next finished job from the mirror list (if there is one)
+ * ready for it to be destroyed.
*/
-static void drm_sched_cleanup_jobs(struct drm_gpu_scheduler *sched)
+static struct drm_sched_job *
+drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
{
+ struct drm_sched_job *job;
unsigned long flags;
- /* Don't destroy jobs while the timeout worker is running */
- if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
- !cancel_delayed_work(&sched->work_tdr))
- return;
-
+ /*
+ * Don't destroy jobs while the timeout worker is running OR thread
+ * is being parked and hence assumed to not touch ring_mirror_list
+ */
+ if ((sched->timeout != MAX_SCHEDULE_TIMEOUT &&
+ !cancel_delayed_work(&sched->work_tdr)) ||
+ __kthread_should_park(sched->thread))
+ return NULL;
- while (!list_empty(&sched->ring_mirror_list)) {
- struct drm_sched_job *job;
+ spin_lock_irqsave(&sched->job_list_lock, flags);
- job = list_first_entry(&sched->ring_mirror_list,
+ job = list_first_entry_or_null(&sched->ring_mirror_list,
struct drm_sched_job, node);
- if (!dma_fence_is_signaled(&job->s_fence->finished))
- break;
- spin_lock_irqsave(&sched->job_list_lock, flags);
+ if (job && dma_fence_is_signaled(&job->s_fence->finished)) {
/* remove job from ring_mirror_list */
list_del_init(&job->node);
- spin_unlock_irqrestore(&sched->job_list_lock, flags);
-
- sched->ops->free_job(job);
+ } else {
+ job = NULL;
+ /* queue timeout for next job */
+ drm_sched_start_timeout(sched);
}
- /* queue timeout for next job */
- spin_lock_irqsave(&sched->job_list_lock, flags);
- drm_sched_start_timeout(sched);
spin_unlock_irqrestore(&sched->job_list_lock, flags);
+ return job;
}
/**
@@ -708,17 +715,27 @@ static int drm_sched_main(void *param)
struct drm_sched_fence *s_fence;
struct drm_sched_job *sched_job;
struct dma_fence *fence;
+ struct drm_sched_job *cleanup_job = NULL;
wait_event_interruptible(sched->wake_up_worker,
- (drm_sched_cleanup_jobs(sched),
+ (cleanup_job = drm_sched_get_cleanup_job(sched)) ||
(!drm_sched_blocked(sched) &&
(entity = drm_sched_select_entity(sched))) ||
- kthread_should_stop()));
+ kthread_should_stop());
+
+ if (cleanup_job) {
+ sched->ops->free_job(cleanup_job);
+ /* queue timeout for next job */
+ drm_sched_start_timeout(sched);
+ }
if (!entity)
continue;
sched_job = drm_sched_entity_pop_job(entity);
+
+ complete(&entity->entity_idle);
+
if (!sched_job)
continue;
@@ -741,8 +758,9 @@ static int drm_sched_main(void *param)
r);
dma_fence_put(fence);
} else {
+ if (IS_ERR(fence))
+ dma_fence_set_error(&s_fence->finished, PTR_ERR(fence));
- dma_fence_set_error(&s_fence->finished, PTR_ERR(fence));
drm_sched_process_job(NULL, &sched_job->cb);
}
diff --git a/drivers/gpu/drm/selftests/Makefile b/drivers/gpu/drm/selftests/Makefile
index aae88f8a016c..d2137342b371 100644
--- a/drivers/gpu/drm/selftests/Makefile
+++ b/drivers/gpu/drm/selftests/Makefile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
test-drm_modeset-y := test-drm_modeset_common.o test-drm_plane_helper.o \
test-drm_format.o test-drm_framebuffer.o \
- test-drm_damage_helper.o
+ test-drm_damage_helper.o test-drm_dp_mst_helper.o
obj-$(CONFIG_DRM_DEBUG_SELFTEST) += test-drm_mm.o test-drm_modeset.o test-drm_cmdline_parser.o
diff --git a/drivers/gpu/drm/selftests/drm_modeset_selftests.h b/drivers/gpu/drm/selftests/drm_modeset_selftests.h
index 464753746013..1898de0b4a4d 100644
--- a/drivers/gpu/drm/selftests/drm_modeset_selftests.h
+++ b/drivers/gpu/drm/selftests/drm_modeset_selftests.h
@@ -32,3 +32,5 @@ selftest(damage_iter_damage_one_intersect, igt_damage_iter_damage_one_intersect)
selftest(damage_iter_damage_one_outside, igt_damage_iter_damage_one_outside)
selftest(damage_iter_damage_src_moved, igt_damage_iter_damage_src_moved)
selftest(damage_iter_damage_not_visible, igt_damage_iter_damage_not_visible)
+selftest(dp_mst_calc_pbn_mode, igt_dp_mst_calc_pbn_mode)
+selftest(dp_mst_sideband_msg_req_decode, igt_dp_mst_sideband_msg_req_decode)
diff --git a/drivers/gpu/drm/selftests/test-drm_dp_mst_helper.c b/drivers/gpu/drm/selftests/test-drm_dp_mst_helper.c
new file mode 100644
index 000000000000..af2b2de65316
--- /dev/null
+++ b/drivers/gpu/drm/selftests/test-drm_dp_mst_helper.c
@@ -0,0 +1,238 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Test cases for for the DRM DP MST helpers
+ */
+
+#define PREFIX_STR "[drm_dp_mst_helper]"
+
+#include <drm/drm_dp_mst_helper.h>
+#include <drm/drm_print.h>
+
+#include "../drm_dp_mst_topology_internal.h"
+#include "test-drm_modeset_common.h"
+
+int igt_dp_mst_calc_pbn_mode(void *ignored)
+{
+ int pbn, i;
+ const struct {
+ int rate;
+ int bpp;
+ int expected;
+ } test_params[] = {
+ { 154000, 30, 689 },
+ { 234000, 30, 1047 },
+ { 297000, 24, 1063 },
+ };
+
+ for (i = 0; i < ARRAY_SIZE(test_params); i++) {
+ pbn = drm_dp_calc_pbn_mode(test_params[i].rate,
+ test_params[i].bpp);
+ FAIL(pbn != test_params[i].expected,
+ "Expected PBN %d for clock %d bpp %d, got %d\n",
+ test_params[i].expected, test_params[i].rate,
+ test_params[i].bpp, pbn);
+ }
+
+ return 0;
+}
+
+static bool
+sideband_msg_req_equal(const struct drm_dp_sideband_msg_req_body *in,
+ const struct drm_dp_sideband_msg_req_body *out)
+{
+ const struct drm_dp_remote_i2c_read_tx *txin, *txout;
+ int i;
+
+ if (in->req_type != out->req_type)
+ return false;
+
+ switch (in->req_type) {
+ /*
+ * Compare struct members manually for request types which can't be
+ * compared simply using memcmp(). This is because said request types
+ * contain pointers to other allocated structs
+ */
+ case DP_REMOTE_I2C_READ:
+#define IN in->u.i2c_read
+#define OUT out->u.i2c_read
+ if (IN.num_bytes_read != OUT.num_bytes_read ||
+ IN.num_transactions != OUT.num_transactions ||
+ IN.port_number != OUT.port_number ||
+ IN.read_i2c_device_id != OUT.read_i2c_device_id)
+ return false;
+
+ for (i = 0; i < IN.num_transactions; i++) {
+ txin = &IN.transactions[i];
+ txout = &OUT.transactions[i];
+
+ if (txin->i2c_dev_id != txout->i2c_dev_id ||
+ txin->no_stop_bit != txout->no_stop_bit ||
+ txin->num_bytes != txout->num_bytes ||
+ txin->i2c_transaction_delay !=
+ txout->i2c_transaction_delay)
+ return false;
+
+ if (memcmp(txin->bytes, txout->bytes,
+ txin->num_bytes) != 0)
+ return false;
+ }
+ break;
+#undef IN
+#undef OUT
+
+ case DP_REMOTE_DPCD_WRITE:
+#define IN in->u.dpcd_write
+#define OUT out->u.dpcd_write
+ if (IN.dpcd_address != OUT.dpcd_address ||
+ IN.num_bytes != OUT.num_bytes ||
+ IN.port_number != OUT.port_number)
+ return false;
+
+ return memcmp(IN.bytes, OUT.bytes, IN.num_bytes) == 0;
+#undef IN
+#undef OUT
+
+ case DP_REMOTE_I2C_WRITE:
+#define IN in->u.i2c_write
+#define OUT out->u.i2c_write
+ if (IN.port_number != OUT.port_number ||
+ IN.write_i2c_device_id != OUT.write_i2c_device_id ||
+ IN.num_bytes != OUT.num_bytes)
+ return false;
+
+ return memcmp(IN.bytes, OUT.bytes, IN.num_bytes) == 0;
+#undef IN
+#undef OUT
+
+ default:
+ return memcmp(in, out, sizeof(*in)) == 0;
+ }
+
+ return true;
+}
+
+static bool
+sideband_msg_req_encode_decode(struct drm_dp_sideband_msg_req_body *in)
+{
+ struct drm_dp_sideband_msg_req_body out = {0};
+ struct drm_printer p = drm_err_printer(PREFIX_STR);
+ struct drm_dp_sideband_msg_tx txmsg;
+ int i, ret;
+
+ drm_dp_encode_sideband_req(in, &txmsg);
+ ret = drm_dp_decode_sideband_req(&txmsg, &out);
+ if (ret < 0) {
+ drm_printf(&p, "Failed to decode sideband request: %d\n",
+ ret);
+ return false;
+ }
+
+ if (!sideband_msg_req_equal(in, &out)) {
+ drm_printf(&p, "Encode/decode failed, expected:\n");
+ drm_dp_dump_sideband_msg_req_body(in, 1, &p);
+ drm_printf(&p, "Got:\n");
+ drm_dp_dump_sideband_msg_req_body(&out, 1, &p);
+ return false;
+ }
+
+ switch (in->req_type) {
+ case DP_REMOTE_DPCD_WRITE:
+ kfree(out.u.dpcd_write.bytes);
+ break;
+ case DP_REMOTE_I2C_READ:
+ for (i = 0; i < out.u.i2c_read.num_transactions; i++)
+ kfree(out.u.i2c_read.transactions[i].bytes);
+ break;
+ case DP_REMOTE_I2C_WRITE:
+ kfree(out.u.i2c_write.bytes);
+ break;
+ }
+
+ /* Clear everything but the req_type for the input */
+ memset(&in->u, 0, sizeof(in->u));
+
+ return true;
+}
+
+int igt_dp_mst_sideband_msg_req_decode(void *unused)
+{
+ struct drm_dp_sideband_msg_req_body in = { 0 };
+ u8 data[] = { 0xff, 0x0, 0xdd };
+ int i;
+
+#define DO_TEST() FAIL_ON(!sideband_msg_req_encode_decode(&in))
+
+ in.req_type = DP_ENUM_PATH_RESOURCES;
+ in.u.port_num.port_number = 5;
+ DO_TEST();
+
+ in.req_type = DP_POWER_UP_PHY;
+ in.u.port_num.port_number = 5;
+ DO_TEST();
+
+ in.req_type = DP_POWER_DOWN_PHY;
+ in.u.port_num.port_number = 5;
+ DO_TEST();
+
+ in.req_type = DP_ALLOCATE_PAYLOAD;
+ in.u.allocate_payload.number_sdp_streams = 3;
+ for (i = 0; i < in.u.allocate_payload.number_sdp_streams; i++)
+ in.u.allocate_payload.sdp_stream_sink[i] = i + 1;
+ DO_TEST();
+ in.u.allocate_payload.port_number = 0xf;
+ DO_TEST();
+ in.u.allocate_payload.vcpi = 0x7f;
+ DO_TEST();
+ in.u.allocate_payload.pbn = U16_MAX;
+ DO_TEST();
+
+ in.req_type = DP_QUERY_PAYLOAD;
+ in.u.query_payload.port_number = 0xf;
+ DO_TEST();
+ in.u.query_payload.vcpi = 0x7f;
+ DO_TEST();
+
+ in.req_type = DP_REMOTE_DPCD_READ;
+ in.u.dpcd_read.port_number = 0xf;
+ DO_TEST();
+ in.u.dpcd_read.dpcd_address = 0xfedcb;
+ DO_TEST();
+ in.u.dpcd_read.num_bytes = U8_MAX;
+ DO_TEST();
+
+ in.req_type = DP_REMOTE_DPCD_WRITE;
+ in.u.dpcd_write.port_number = 0xf;
+ DO_TEST();
+ in.u.dpcd_write.dpcd_address = 0xfedcb;
+ DO_TEST();
+ in.u.dpcd_write.num_bytes = ARRAY_SIZE(data);
+ in.u.dpcd_write.bytes = data;
+ DO_TEST();
+
+ in.req_type = DP_REMOTE_I2C_READ;
+ in.u.i2c_read.port_number = 0xf;
+ DO_TEST();
+ in.u.i2c_read.read_i2c_device_id = 0x7f;
+ DO_TEST();
+ in.u.i2c_read.num_transactions = 3;
+ in.u.i2c_read.num_bytes_read = ARRAY_SIZE(data) * 3;
+ for (i = 0; i < in.u.i2c_read.num_transactions; i++) {
+ in.u.i2c_read.transactions[i].bytes = data;
+ in.u.i2c_read.transactions[i].num_bytes = ARRAY_SIZE(data);
+ in.u.i2c_read.transactions[i].i2c_dev_id = 0x7f & ~i;
+ in.u.i2c_read.transactions[i].i2c_transaction_delay = 0xf & ~i;
+ }
+ DO_TEST();
+
+ in.req_type = DP_REMOTE_I2C_WRITE;
+ in.u.i2c_write.port_number = 0xf;
+ DO_TEST();
+ in.u.i2c_write.write_i2c_device_id = 0x7f;
+ DO_TEST();
+ in.u.i2c_write.num_bytes = ARRAY_SIZE(data);
+ in.u.i2c_write.bytes = data;
+ DO_TEST();
+
+#undef DO_TEST
+ return 0;
+}
diff --git a/drivers/gpu/drm/selftests/test-drm_framebuffer.c b/drivers/gpu/drm/selftests/test-drm_framebuffer.c
index 74d5561a862b..2d29ea6f92e2 100644
--- a/drivers/gpu/drm/selftests/test-drm_framebuffer.c
+++ b/drivers/gpu/drm/selftests/test-drm_framebuffer.c
@@ -126,7 +126,7 @@ static struct drm_framebuffer_test createbuffer_tests[] = {
.handles = { 1, 1, 0 }, .pitches = { MAX_WIDTH, MAX_WIDTH - 1, 0 },
}
},
-{ .buffer_created = 0, .name = "NV12 Invalid modifier/misssing DRM_MODE_FB_MODIFIERS flag",
+{ .buffer_created = 0, .name = "NV12 Invalid modifier/missing DRM_MODE_FB_MODIFIERS flag",
.cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_NV12,
.handles = { 1, 1, 0 }, .modifier = { DRM_FORMAT_MOD_SAMSUNG_64_32_TILE, 0, 0 },
.pitches = { MAX_WIDTH, MAX_WIDTH, 0 },
diff --git a/drivers/gpu/drm/selftests/test-drm_mm.c b/drivers/gpu/drm/selftests/test-drm_mm.c
index 388f9844f4ba..9aabe82dcd3a 100644
--- a/drivers/gpu/drm/selftests/test-drm_mm.c
+++ b/drivers/gpu/drm/selftests/test-drm_mm.c
@@ -854,7 +854,7 @@ static bool assert_contiguous_in_range(struct drm_mm *mm,
if (start > 0) {
node = __drm_mm_interval_first(mm, 0, start - 1);
- if (node->allocated) {
+ if (drm_mm_node_allocated(node)) {
pr_err("node before start: node=%llx+%llu, start=%llx\n",
node->start, node->size, start);
return false;
@@ -863,7 +863,7 @@ static bool assert_contiguous_in_range(struct drm_mm *mm,
if (end < U64_MAX) {
node = __drm_mm_interval_first(mm, end, U64_MAX);
- if (node->allocated) {
+ if (drm_mm_node_allocated(node)) {
pr_err("node after end: node=%llx+%llu, end=%llx\n",
node->start, node->size, end);
return false;
@@ -1156,12 +1156,12 @@ static void show_holes(const struct drm_mm *mm, int count)
struct drm_mm_node *next = list_next_entry(hole, node_list);
const char *node1 = NULL, *node2 = NULL;
- if (hole->allocated)
+ if (drm_mm_node_allocated(hole))
node1 = kasprintf(GFP_KERNEL,
"[%llx + %lld, color=%ld], ",
hole->start, hole->size, hole->color);
- if (next->allocated)
+ if (drm_mm_node_allocated(next))
node2 = kasprintf(GFP_KERNEL,
", [%llx + %lld, color=%ld]",
next->start, next->size, next->color);
@@ -1900,18 +1900,18 @@ static void separate_adjacent_colors(const struct drm_mm_node *node,
u64 *start,
u64 *end)
{
- if (node->allocated && node->color != color)
+ if (drm_mm_node_allocated(node) && node->color != color)
++*start;
node = list_next_entry(node, node_list);
- if (node->allocated && node->color != color)
+ if (drm_mm_node_allocated(node) && node->color != color)
--*end;
}
static bool colors_abutt(const struct drm_mm_node *node)
{
if (!drm_mm_hole_follows(node) &&
- list_next_entry(node, node_list)->allocated) {
+ drm_mm_node_allocated(list_next_entry(node, node_list))) {
pr_err("colors abutt; %ld [%llx + %llx] is next to %ld [%llx + %llx]!\n",
node->color, node->start, node->size,
list_next_entry(node, node_list)->color,
diff --git a/drivers/gpu/drm/selftests/test-drm_modeset_common.h b/drivers/gpu/drm/selftests/test-drm_modeset_common.h
index 8c76f09c12d1..0fcb8bbc6a1b 100644
--- a/drivers/gpu/drm/selftests/test-drm_modeset_common.h
+++ b/drivers/gpu/drm/selftests/test-drm_modeset_common.h
@@ -39,5 +39,7 @@ int igt_damage_iter_damage_one_intersect(void *ignored);
int igt_damage_iter_damage_one_outside(void *ignored);
int igt_damage_iter_damage_src_moved(void *ignored);
int igt_damage_iter_damage_not_visible(void *ignored);
+int igt_dp_mst_calc_pbn_mode(void *ignored);
+int igt_dp_mst_sideband_msg_req_decode(void *ignored);
#endif
diff --git a/drivers/gpu/drm/sti/sti_cursor.c b/drivers/gpu/drm/sti/sti_cursor.c
index 0bf7c332cf0b..ea64c1dcaf63 100644
--- a/drivers/gpu/drm/sti/sti_cursor.c
+++ b/drivers/gpu/drm/sti/sti_cursor.c
@@ -47,7 +47,7 @@ struct dma_pixmap {
void *base;
};
-/**
+/*
* STI Cursor structure
*
* @sti_plane: sti_plane structure
diff --git a/drivers/gpu/drm/sti/sti_dvo.c b/drivers/gpu/drm/sti/sti_dvo.c
index e55870190bf5..68289b0b063a 100644
--- a/drivers/gpu/drm/sti/sti_dvo.c
+++ b/drivers/gpu/drm/sti/sti_dvo.c
@@ -12,6 +12,7 @@
#include <linux/platform_device.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_device.h>
#include <drm/drm_panel.h>
#include <drm/drm_print.h>
@@ -65,7 +66,7 @@ static struct dvo_config rgb_24bit_de_cfg = {
.awg_fwgen_fct = sti_awg_generate_code_data_enable_mode,
};
-/**
+/*
* STI digital video output structure
*
* @dev: driver device
diff --git a/drivers/gpu/drm/sti/sti_gdp.c b/drivers/gpu/drm/sti/sti_gdp.c
index 8e926cd6a1c8..11595c748844 100644
--- a/drivers/gpu/drm/sti/sti_gdp.c
+++ b/drivers/gpu/drm/sti/sti_gdp.c
@@ -103,7 +103,7 @@ struct sti_gdp_node_list {
dma_addr_t btm_field_paddr;
};
-/**
+/*
* STI GDP structure
*
* @sti_plane: sti_plane structure
diff --git a/drivers/gpu/drm/sti/sti_hda.c b/drivers/gpu/drm/sti/sti_hda.c
index 94e404f13234..8f7bf33815fd 100644
--- a/drivers/gpu/drm/sti/sti_hda.c
+++ b/drivers/gpu/drm/sti/sti_hda.c
@@ -12,6 +12,7 @@
#include <linux/seq_file.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_device.h>
#include <drm/drm_file.h>
@@ -230,7 +231,7 @@ static const struct sti_hda_video_config hda_supported_modes[] = {
AWGi_720x480p_60, NN_720x480p_60, VID_ED}
};
-/**
+/*
* STI hd analog structure
*
* @dev: driver device
diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c
index 9862c322f0c4..814560ead4e1 100644
--- a/drivers/gpu/drm/sti/sti_hdmi.c
+++ b/drivers/gpu/drm/sti/sti_hdmi.c
@@ -9,11 +9,12 @@
#include <linux/debugfs.h>
#include <linux/hdmi.h>
#include <linux/module.h>
-#include <linux/of_gpio.h>
+#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_drv.h>
#include <drm/drm_edid.h>
@@ -333,7 +334,6 @@ static void hdmi_infoframe_reset(struct sti_hdmi *hdmi,
* Helper to concatenate infoframe in 32 bits word
*
* @ptr: pointer on the hdmi internal structure
- * @data: infoframe to write
* @size: size to write
*/
static inline unsigned int hdmi_infoframe_subpack(const u8 *ptr, size_t size)
@@ -543,13 +543,14 @@ static int hdmi_vendor_infoframe_config(struct sti_hdmi *hdmi)
return 0;
}
+#define HDMI_TIMEOUT_SWRESET 100 /*milliseconds */
+
/**
* Software reset of the hdmi subsystem
*
* @hdmi: pointer on the hdmi internal structure
*
*/
-#define HDMI_TIMEOUT_SWRESET 100 /*milliseconds */
static void hdmi_swreset(struct sti_hdmi *hdmi)
{
u32 val;
@@ -1256,6 +1257,7 @@ static int sti_hdmi_bind(struct device *dev, struct device *master, void *data)
struct drm_device *drm_dev = data;
struct drm_encoder *encoder;
struct sti_hdmi_connector *connector;
+ struct cec_connector_info conn_info;
struct drm_connector *drm_connector;
struct drm_bridge *bridge;
int err;
@@ -1318,6 +1320,14 @@ static int sti_hdmi_bind(struct device *dev, struct device *master, void *data)
goto err_sysfs;
}
+ cec_fill_conn_info_from_drm(&conn_info, drm_connector);
+ hdmi->notifier = cec_notifier_conn_register(&hdmi->dev, NULL,
+ &conn_info);
+ if (!hdmi->notifier) {
+ hdmi->drm_connector = NULL;
+ return -ENOMEM;
+ }
+
/* Enable default interrupts */
hdmi_write(hdmi, HDMI_DEFAULT_INT, HDMI_INT_EN);
@@ -1331,6 +1341,9 @@ err_sysfs:
static void sti_hdmi_unbind(struct device *dev,
struct device *master, void *data)
{
+ struct sti_hdmi *hdmi = dev_get_drvdata(dev);
+
+ cec_notifier_conn_unregister(hdmi->notifier);
}
static const struct component_ops sti_hdmi_ops = {
@@ -1436,10 +1449,6 @@ static int sti_hdmi_probe(struct platform_device *pdev)
goto release_adapter;
}
- hdmi->notifier = cec_notifier_get(&pdev->dev);
- if (!hdmi->notifier)
- goto release_adapter;
-
hdmi->reset = devm_reset_control_get(dev, "hdmi");
/* Take hdmi out of reset */
if (!IS_ERR(hdmi->reset))
@@ -1459,14 +1468,11 @@ static int sti_hdmi_remove(struct platform_device *pdev)
{
struct sti_hdmi *hdmi = dev_get_drvdata(&pdev->dev);
- cec_notifier_set_phys_addr(hdmi->notifier, CEC_PHYS_ADDR_INVALID);
-
i2c_put_adapter(hdmi->ddc_adapt);
if (hdmi->audio_pdev)
platform_device_unregister(hdmi->audio_pdev);
component_del(&pdev->dev, &sti_hdmi_ops);
- cec_notifier_put(hdmi->notifier);
return 0;
}
diff --git a/drivers/gpu/drm/sti/sti_tvout.c b/drivers/gpu/drm/sti/sti_tvout.c
index aba79c172512..5767e93dd1cd 100644
--- a/drivers/gpu/drm/sti/sti_tvout.c
+++ b/drivers/gpu/drm/sti/sti_tvout.c
@@ -157,9 +157,9 @@ static void tvout_write(struct sti_tvout *tvout, u32 val, int offset)
*
* @tvout: tvout structure
* @reg: register to set
- * @cr_r:
- * @y_g:
- * @cb_b:
+ * @cr_r: red chroma or red order
+ * @y_g: y or green order
+ * @cb_b: blue chroma or blue order
*/
static void tvout_vip_set_color_order(struct sti_tvout *tvout, int reg,
u32 cr_r, u32 y_g, u32 cb_b)
@@ -214,7 +214,7 @@ static void tvout_vip_set_rnd(struct sti_tvout *tvout, int reg, u32 rnd)
* @tvout: tvout structure
* @reg: register to set
* @main_path: main or auxiliary path
- * @sel_input: selected_input (main/aux + conv)
+ * @video_out: selected_input (main/aux + conv)
*/
static void tvout_vip_set_sel_input(struct sti_tvout *tvout,
int reg,
@@ -251,7 +251,7 @@ static void tvout_vip_set_sel_input(struct sti_tvout *tvout,
*
* @tvout: tvout structure
* @reg: register to set
- * @in_vid_signed: used video input format
+ * @in_vid_fmt: used video input format
*/
static void tvout_vip_set_in_vid_fmt(struct sti_tvout *tvout,
int reg, u32 in_vid_fmt)
diff --git a/drivers/gpu/drm/sti/sti_vtg.c b/drivers/gpu/drm/sti/sti_vtg.c
index ef4009f11396..0b17ac8a3faa 100644
--- a/drivers/gpu/drm/sti/sti_vtg.c
+++ b/drivers/gpu/drm/sti/sti_vtg.c
@@ -121,7 +121,7 @@ struct sti_vtg_sync_params {
u32 vsync_off_bot;
};
-/**
+/*
* STI VTG structure
*
* @regs: register mapping
diff --git a/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c b/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c
index a03a642c147c..514efefb0016 100644
--- a/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c
+++ b/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c
@@ -260,8 +260,11 @@ dw_mipi_dsi_get_lane_mbps(void *priv_data, const struct drm_display_mode *mode,
/* Compute requested pll out */
bpp = mipi_dsi_pixel_format_to_bpp(format);
pll_out_khz = mode->clock * bpp / lanes;
+
/* Add 20% to pll out to be higher than pixel bw (burst mode only) */
- pll_out_khz = (pll_out_khz * 12) / 10;
+ if (mode_flags & MIPI_DSI_MODE_VIDEO_BURST)
+ pll_out_khz = (pll_out_khz * 12) / 10;
+
if (pll_out_khz > dsi->lane_max_kbps) {
pll_out_khz = dsi->lane_max_kbps;
DRM_WARN("Warning max phy mbps is used\n");
diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c
index 3ab4fbf8eb0d..5b51298921cf 100644
--- a/drivers/gpu/drm/stm/ltdc.c
+++ b/drivers/gpu/drm/stm/ltdc.c
@@ -15,6 +15,7 @@
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of_graph.h>
+#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
@@ -1040,6 +1041,36 @@ static const struct drm_encoder_funcs ltdc_encoder_funcs = {
.destroy = drm_encoder_cleanup,
};
+static void ltdc_encoder_disable(struct drm_encoder *encoder)
+{
+ struct drm_device *ddev = encoder->dev;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ /* Set to sleep state the pinctrl whatever type of encoder */
+ pinctrl_pm_select_sleep_state(ddev->dev);
+}
+
+static void ltdc_encoder_enable(struct drm_encoder *encoder)
+{
+ struct drm_device *ddev = encoder->dev;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ /*
+ * Set to default state the pinctrl only with DPI type.
+ * Others types like DSI, don't need pinctrl due to
+ * internal bridge (the signals do not come out of the chipset).
+ */
+ if (encoder->encoder_type == DRM_MODE_ENCODER_DPI)
+ pinctrl_pm_select_default_state(ddev->dev);
+}
+
+static const struct drm_encoder_helper_funcs ltdc_encoder_helper_funcs = {
+ .disable = ltdc_encoder_disable,
+ .enable = ltdc_encoder_enable,
+};
+
static int ltdc_encoder_init(struct drm_device *ddev, struct drm_bridge *bridge)
{
struct drm_encoder *encoder;
@@ -1055,6 +1086,8 @@ static int ltdc_encoder_init(struct drm_device *ddev, struct drm_bridge *bridge)
drm_encoder_init(ddev, encoder, &ltdc_encoder_funcs,
DRM_MODE_ENCODER_DPI, NULL);
+ drm_encoder_helper_add(encoder, &ltdc_encoder_helper_funcs);
+
ret = drm_bridge_attach(encoder, bridge, NULL);
if (ret) {
drm_encoder_cleanup(encoder);
@@ -1236,8 +1269,8 @@ int ltdc_load(struct drm_device *ddev)
/* Add endpoints panels or bridges if any */
for (i = 0; i < MAX_ENDPOINTS; i++) {
if (panel[i]) {
- bridge[i] = drm_panel_bridge_add(panel[i],
- DRM_MODE_CONNECTOR_DPI);
+ bridge[i] = drm_panel_bridge_add_typed(panel[i],
+ DRM_MODE_CONNECTOR_DPI);
if (IS_ERR(bridge[i])) {
DRM_ERROR("panel-bridge endpoint %d\n", i);
ret = PTR_ERR(bridge[i]);
@@ -1280,6 +1313,8 @@ int ltdc_load(struct drm_device *ddev)
clk_disable_unprepare(ldev->pixel_clk);
+ pinctrl_pm_select_sleep_state(ddev->dev);
+
pm_runtime_enable(ddev->dev);
return 0;
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
index eb8071a4d6d0..a7c4654445c7 100644
--- a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
+++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
@@ -490,6 +490,7 @@ static int sun4i_hdmi_bind(struct device *dev, struct device *master,
{
struct platform_device *pdev = to_platform_device(dev);
struct drm_device *drm = data;
+ struct cec_connector_info conn_info;
struct sun4i_drv *drv = drm->dev_private;
struct sun4i_hdmi *hdmi;
struct resource *res;
@@ -629,8 +630,7 @@ static int sun4i_hdmi_bind(struct device *dev, struct device *master,
#ifdef CONFIG_DRM_SUN4I_HDMI_CEC
hdmi->cec_adap = cec_pin_allocate_adapter(&sun4i_hdmi_cec_pin_ops,
- hdmi, "sun4i", CEC_CAP_TRANSMIT | CEC_CAP_LOG_ADDRS |
- CEC_CAP_PASSTHROUGH | CEC_CAP_RC);
+ hdmi, "sun4i", CEC_CAP_DEFAULTS | CEC_CAP_CONNECTOR_INFO);
ret = PTR_ERR_OR_ZERO(hdmi->cec_adap);
if (ret < 0)
goto err_cleanup_connector;
@@ -649,6 +649,8 @@ static int sun4i_hdmi_bind(struct device *dev, struct device *master,
"Couldn't initialise the HDMI connector\n");
goto err_cleanup_connector;
}
+ cec_fill_conn_info_from_drm(&conn_info, &hdmi->connector);
+ cec_s_conn_info(hdmi->cec_adap, &conn_info);
/* There is no HPD interrupt, so we need to poll the controller */
hdmi->connector.polled = DRM_CONNECTOR_POLL_CONNECT |
diff --git a/drivers/gpu/drm/sun4i/sun4i_lvds.c b/drivers/gpu/drm/sun4i/sun4i_lvds.c
index 7fbf425acb55..25ab2ef6d545 100644
--- a/drivers/gpu/drm/sun4i/sun4i_lvds.c
+++ b/drivers/gpu/drm/sun4i/sun4i_lvds.c
@@ -7,6 +7,7 @@
#include <linux/clk.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
#include <drm/drm_print.h>
diff --git a/drivers/gpu/drm/sun4i/sun4i_rgb.c b/drivers/gpu/drm/sun4i/sun4i_rgb.c
index aac56983f208..e74b9eddca01 100644
--- a/drivers/gpu/drm/sun4i/sun4i_rgb.c
+++ b/drivers/gpu/drm/sun4i/sun4i_rgb.c
@@ -9,6 +9,7 @@
#include <linux/clk.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
#include <drm/drm_print.h>
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
index b89439ed210d..42651d737c55 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
@@ -16,6 +16,7 @@
#include <linux/reset.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_connector.h>
#include <drm/drm_crtc.h>
#include <drm/drm_encoder.h>
diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
index 1636344ba9ec..c958ca9bae63 100644
--- a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
+++ b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
@@ -16,6 +16,7 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
#include <linux/reset.h>
#include <linux/slab.h>
@@ -365,8 +366,7 @@ static void sun6i_dsi_inst_init(struct sun6i_dsi *dsi,
static u16 sun6i_dsi_get_video_start_delay(struct sun6i_dsi *dsi,
struct drm_display_mode *mode)
{
- u16 start = clamp(mode->vtotal - mode->vdisplay - 10, 8, 100);
- u16 delay = mode->vtotal - (mode->vsync_end - mode->vdisplay) + start;
+ u16 delay = mode->vtotal - (mode->vsync_start - mode->vdisplay) + 1;
if (delay > mode->vtotal)
delay = delay % mode->vtotal;
@@ -437,9 +437,9 @@ static void sun6i_dsi_setup_burst(struct sun6i_dsi *dsi,
SUN6I_DSI_BURST_LINE_SYNC_POINT(SUN6I_DSI_SYNC_POINT));
val = SUN6I_DSI_TCON_DRQ_ENABLE_MODE;
- } else if ((mode->hsync_end - mode->hdisplay) > 20) {
+ } else if ((mode->hsync_start - mode->hdisplay) > 20) {
/* Maaaaaagic */
- u16 drq = (mode->hsync_end - mode->hdisplay) - 20;
+ u16 drq = (mode->hsync_start - mode->hdisplay) - 20;
drq *= mipi_dsi_pixel_format_to_bpp(device->format);
drq /= 32;
@@ -569,11 +569,12 @@ static void sun6i_dsi_setup_timings(struct sun6i_dsi *dsi,
(mode->htotal - mode->hsync_end) * Bpp - HBP_PACKET_OVERHEAD);
/*
- * The frontporch is set using a blanking packet (4
- * bytes + payload + 2 bytes). Its minimal size is
- * therefore 6 bytes
+ * The frontporch is set using a sync event (4 bytes)
+ * and two blanking packets (each one is 4 bytes +
+ * payload + 2 bytes). Its minimal size is therefore
+ * 16 bytes
*/
-#define HFP_PACKET_OVERHEAD 6
+#define HFP_PACKET_OVERHEAD 16
hfp = max((unsigned int)HFP_PACKET_OVERHEAD,
(mode->hsync_start - mode->hdisplay) * Bpp - HFP_PACKET_OVERHEAD);
@@ -831,8 +832,8 @@ static u32 sun6i_dsi_dcs_build_pkt_hdr(struct sun6i_dsi *dsi,
u32 pkt = msg->type;
if (msg->type == MIPI_DSI_DCS_LONG_WRITE) {
- pkt |= ((msg->tx_len + 1) & 0xffff) << 8;
- pkt |= (((msg->tx_len + 1) >> 8) & 0xffff) << 16;
+ pkt |= ((msg->tx_len) & 0xffff) << 8;
+ pkt |= (((msg->tx_len) >> 8) & 0xffff) << 16;
} else {
pkt |= (((u8 *)msg->tx_buf)[0] << 8);
if (msg->tx_len > 1)
@@ -1100,6 +1101,12 @@ static int sun6i_dsi_probe(struct platform_device *pdev)
return PTR_ERR(base);
}
+ dsi->regulator = devm_regulator_get(dev, "vcc-dsi");
+ if (IS_ERR(dsi->regulator)) {
+ dev_err(dev, "Couldn't get VCC-DSI supply\n");
+ return PTR_ERR(dsi->regulator);
+ }
+
dsi->regs = devm_regmap_init_mmio_clk(dev, "bus", base,
&sun6i_dsi_regmap_config);
if (IS_ERR(dsi->regs)) {
@@ -1173,6 +1180,13 @@ static int sun6i_dsi_remove(struct platform_device *pdev)
static int __maybe_unused sun6i_dsi_runtime_resume(struct device *dev)
{
struct sun6i_dsi *dsi = dev_get_drvdata(dev);
+ int err;
+
+ err = regulator_enable(dsi->regulator);
+ if (err) {
+ dev_err(dsi->dev, "failed to enable VCC-DSI supply: %d\n", err);
+ return err;
+ }
reset_control_deassert(dsi->reset);
clk_prepare_enable(dsi->mod_clk);
@@ -1205,6 +1219,7 @@ static int __maybe_unused sun6i_dsi_runtime_suspend(struct device *dev)
clk_disable_unprepare(dsi->mod_clk);
reset_control_assert(dsi->reset);
+ regulator_disable(dsi->regulator);
return 0;
}
diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.h b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.h
index 5c3ad5be0690..3f4846f581ef 100644
--- a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.h
+++ b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.h
@@ -23,6 +23,7 @@ struct sun6i_dsi {
struct clk *bus_clk;
struct clk *mod_clk;
struct regmap *regs;
+ struct regulator *regulator;
struct reset_control *reset;
struct phy *dphy;
diff --git a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
index a44dca4b0219..e8a317d5ba19 100644
--- a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
+++ b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
@@ -226,6 +226,7 @@ static int sun8i_dw_hdmi_bind(struct device *dev, struct device *master,
sun8i_hdmi_phy_init(hdmi->phy);
plat_data->mode_valid = hdmi->quirks->mode_valid;
+ plat_data->use_drm_infoframe = hdmi->quirks->use_drm_infoframe;
sun8i_hdmi_phy_set_ops(hdmi->phy, plat_data);
platform_set_drvdata(pdev, hdmi);
@@ -300,6 +301,7 @@ static const struct sun8i_dw_hdmi_quirks sun8i_a83t_quirks = {
static const struct sun8i_dw_hdmi_quirks sun50i_h6_quirks = {
.mode_valid = sun8i_dw_hdmi_mode_valid_h6,
+ .use_drm_infoframe = true,
};
static const struct of_device_id sun8i_dw_hdmi_dt_ids[] = {
diff --git a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h
index d707c9171824..8e64945167e9 100644
--- a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h
+++ b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h
@@ -179,6 +179,7 @@ struct sun8i_dw_hdmi_quirks {
enum drm_mode_status (*mode_valid)(struct drm_connector *connector,
const struct drm_display_mode *mode);
unsigned int set_rate : 1;
+ unsigned int use_drm_infoframe : 1;
};
struct sun8i_dw_hdmi {
diff --git a/drivers/gpu/drm/tegra/Kconfig b/drivers/gpu/drm/tegra/Kconfig
index 1d1269fde3c1..5043dcaf1cf9 100644
--- a/drivers/gpu/drm/tegra/Kconfig
+++ b/drivers/gpu/drm/tegra/Kconfig
@@ -9,7 +9,7 @@ config DRM_TEGRA
select DRM_MIPI_DSI
select DRM_PANEL
select TEGRA_HOST1X
- select IOMMU_IOVA if IOMMU_SUPPORT
+ select IOMMU_IOVA
select CEC_CORE if CEC_NOTIFIER
help
Choose this option if you have an NVIDIA Tegra SoC.
diff --git a/drivers/gpu/drm/tegra/Makefile b/drivers/gpu/drm/tegra/Makefile
index 33c463e8d49f..d6cf202414f0 100644
--- a/drivers/gpu/drm/tegra/Makefile
+++ b/drivers/gpu/drm/tegra/Makefile
@@ -5,6 +5,7 @@ tegra-drm-y := \
drm.o \
gem.o \
fb.o \
+ dp.o \
hub.o \
plane.o \
dc.o \
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index fbf57bc3cdab..5b1f9ff97576 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -715,9 +715,7 @@ static void tegra_plane_atomic_update(struct drm_plane *plane,
window.swap = state->swap;
for (i = 0; i < fb->format->num_planes; i++) {
- struct tegra_bo *bo = tegra_fb_get_plane(fb, i);
-
- window.base[i] = bo->paddr + fb->offsets[i];
+ window.base[i] = state->iova[i] + fb->offsets[i];
/*
* Tegra uses a shared stride for UV planes. Framebuffers are
@@ -732,6 +730,8 @@ static void tegra_plane_atomic_update(struct drm_plane *plane,
}
static const struct drm_plane_helper_funcs tegra_plane_helper_funcs = {
+ .prepare_fb = tegra_plane_prepare_fb,
+ .cleanup_fb = tegra_plane_cleanup_fb,
.atomic_check = tegra_plane_atomic_check,
.atomic_disable = tegra_plane_atomic_disable,
.atomic_update = tegra_plane_atomic_update,
@@ -869,11 +869,11 @@ static void tegra_cursor_atomic_update(struct drm_plane *plane,
return;
}
- value |= (bo->paddr >> 10) & 0x3fffff;
+ value |= (bo->iova >> 10) & 0x3fffff;
tegra_dc_writel(dc, value, DC_DISP_CURSOR_START_ADDR);
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- value = (bo->paddr >> 32) & 0x3;
+ value = (bo->iova >> 32) & 0x3;
tegra_dc_writel(dc, value, DC_DISP_CURSOR_START_ADDR_HI);
#endif
@@ -914,6 +914,8 @@ static void tegra_cursor_atomic_disable(struct drm_plane *plane,
}
static const struct drm_plane_helper_funcs tegra_cursor_plane_helper_funcs = {
+ .prepare_fb = tegra_plane_prepare_fb,
+ .cleanup_fb = tegra_plane_cleanup_fb,
.atomic_check = tegra_cursor_atomic_check,
.atomic_update = tegra_cursor_atomic_update,
.atomic_disable = tegra_cursor_atomic_disable,
@@ -2014,9 +2016,8 @@ static int tegra_dc_init(struct host1x_client *client)
if (!dc->syncpt)
dev_warn(dc->dev, "failed to allocate syncpoint\n");
- dc->group = host1x_client_iommu_attach(client, true);
- if (IS_ERR(dc->group)) {
- err = PTR_ERR(dc->group);
+ err = host1x_client_iommu_attach(client);
+ if (err < 0) {
dev_err(client->dev, "failed to attach to domain: %d\n", err);
return err;
}
@@ -2074,6 +2075,12 @@ static int tegra_dc_init(struct host1x_client *client)
goto cleanup;
}
+ /*
+ * Inherit the DMA parameters (such as maximum segment size) from the
+ * parent device.
+ */
+ client->dev->dma_parms = client->parent->dma_parms;
+
return 0;
cleanup:
@@ -2083,7 +2090,7 @@ cleanup:
if (!IS_ERR(primary))
drm_plane_cleanup(primary);
- host1x_client_iommu_detach(client, dc->group);
+ host1x_client_iommu_detach(client);
host1x_syncpt_free(dc->syncpt);
return err;
@@ -2097,6 +2104,9 @@ static int tegra_dc_exit(struct host1x_client *client)
if (!tegra_dc_has_window_groups(dc))
return 0;
+ /* avoid a dangling pointer just in case this disappears */
+ client->dev->dma_parms = NULL;
+
devm_free_irq(dc->dev, dc->irq, dc);
err = tegra_dc_rgb_exit(dc);
@@ -2105,7 +2115,7 @@ static int tegra_dc_exit(struct host1x_client *client)
return err;
}
- host1x_client_iommu_detach(client, dc->group);
+ host1x_client_iommu_detach(client);
host1x_syncpt_free(dc->syncpt);
return 0;
diff --git a/drivers/gpu/drm/tegra/dc.h b/drivers/gpu/drm/tegra/dc.h
index 0c4d17851f47..3d8ddccd758f 100644
--- a/drivers/gpu/drm/tegra/dc.h
+++ b/drivers/gpu/drm/tegra/dc.h
@@ -90,8 +90,6 @@ struct tegra_dc {
struct drm_info_list *debugfs_files;
const struct tegra_dc_soc_info *soc;
-
- struct iommu_group *group;
};
static inline struct tegra_dc *
diff --git a/drivers/gpu/drm/tegra/dp.c b/drivers/gpu/drm/tegra/dp.c
new file mode 100644
index 000000000000..70dfb7d1dec5
--- /dev/null
+++ b/drivers/gpu/drm/tegra/dp.c
@@ -0,0 +1,876 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright (C) 2013-2019 NVIDIA Corporation
+ * Copyright (C) 2015 Rob Clark
+ */
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_dp_helper.h>
+#include <drm/drm_print.h>
+
+#include "dp.h"
+
+static const u8 drm_dp_edp_revisions[] = { 0x11, 0x12, 0x13, 0x14 };
+
+static void drm_dp_link_caps_reset(struct drm_dp_link_caps *caps)
+{
+ caps->enhanced_framing = false;
+ caps->tps3_supported = false;
+ caps->fast_training = false;
+ caps->channel_coding = false;
+ caps->alternate_scrambler_reset = false;
+}
+
+void drm_dp_link_caps_copy(struct drm_dp_link_caps *dest,
+ const struct drm_dp_link_caps *src)
+{
+ dest->enhanced_framing = src->enhanced_framing;
+ dest->tps3_supported = src->tps3_supported;
+ dest->fast_training = src->fast_training;
+ dest->channel_coding = src->channel_coding;
+ dest->alternate_scrambler_reset = src->alternate_scrambler_reset;
+}
+
+static void drm_dp_link_reset(struct drm_dp_link *link)
+{
+ unsigned int i;
+
+ if (!link)
+ return;
+
+ link->revision = 0;
+ link->max_rate = 0;
+ link->max_lanes = 0;
+
+ drm_dp_link_caps_reset(&link->caps);
+ link->aux_rd_interval.cr = 0;
+ link->aux_rd_interval.ce = 0;
+ link->edp = 0;
+
+ link->rate = 0;
+ link->lanes = 0;
+
+ for (i = 0; i < DP_MAX_SUPPORTED_RATES; i++)
+ link->rates[i] = 0;
+
+ link->num_rates = 0;
+}
+
+/**
+ * drm_dp_link_add_rate() - add a rate to the list of supported rates
+ * @link: the link to add the rate to
+ * @rate: the rate to add
+ *
+ * Add a link rate to the list of supported link rates.
+ *
+ * Returns:
+ * 0 on success or one of the following negative error codes on failure:
+ * - ENOSPC if the maximum number of supported rates has been reached
+ * - EEXISTS if the link already supports this rate
+ *
+ * See also:
+ * drm_dp_link_remove_rate()
+ */
+int drm_dp_link_add_rate(struct drm_dp_link *link, unsigned long rate)
+{
+ unsigned int i, pivot;
+
+ if (link->num_rates == DP_MAX_SUPPORTED_RATES)
+ return -ENOSPC;
+
+ for (pivot = 0; pivot < link->num_rates; pivot++)
+ if (rate <= link->rates[pivot])
+ break;
+
+ if (pivot != link->num_rates && rate == link->rates[pivot])
+ return -EEXIST;
+
+ for (i = link->num_rates; i > pivot; i--)
+ link->rates[i] = link->rates[i - 1];
+
+ link->rates[pivot] = rate;
+ link->num_rates++;
+
+ return 0;
+}
+
+/**
+ * drm_dp_link_remove_rate() - remove a rate from the list of supported rates
+ * @link: the link from which to remove the rate
+ * @rate: the rate to remove
+ *
+ * Removes a link rate from the list of supported link rates.
+ *
+ * Returns:
+ * 0 on success or one of the following negative error codes on failure:
+ * - EINVAL if the specified rate is not among the supported rates
+ *
+ * See also:
+ * drm_dp_link_add_rate()
+ */
+int drm_dp_link_remove_rate(struct drm_dp_link *link, unsigned long rate)
+{
+ unsigned int i;
+
+ for (i = 0; i < link->num_rates; i++)
+ if (rate == link->rates[i])
+ break;
+
+ if (i == link->num_rates)
+ return -EINVAL;
+
+ link->num_rates--;
+
+ while (i < link->num_rates) {
+ link->rates[i] = link->rates[i + 1];
+ i++;
+ }
+
+ return 0;
+}
+
+/**
+ * drm_dp_link_update_rates() - normalize the supported link rates array
+ * @link: the link for which to normalize the supported link rates
+ *
+ * Users should call this function after they've manually modified the array
+ * of supported link rates. This function removes any stale entries, compacts
+ * the array and updates the supported link rate count. Note that calling the
+ * drm_dp_link_remove_rate() function already does this janitorial work.
+ *
+ * See also:
+ * drm_dp_link_add_rate(), drm_dp_link_remove_rate()
+ */
+void drm_dp_link_update_rates(struct drm_dp_link *link)
+{
+ unsigned int i, count = 0;
+
+ for (i = 0; i < link->num_rates; i++) {
+ if (link->rates[i] != 0)
+ link->rates[count++] = link->rates[i];
+ }
+
+ for (i = count; i < link->num_rates; i++)
+ link->rates[i] = 0;
+
+ link->num_rates = count;
+}
+
+/**
+ * drm_dp_link_probe() - probe a DisplayPort link for capabilities
+ * @aux: DisplayPort AUX channel
+ * @link: pointer to structure in which to return link capabilities
+ *
+ * The structure filled in by this function can usually be passed directly
+ * into drm_dp_link_power_up() and drm_dp_link_configure() to power up and
+ * configure the link based on the link's capabilities.
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int drm_dp_link_probe(struct drm_dp_aux *aux, struct drm_dp_link *link)
+{
+ u8 dpcd[DP_RECEIVER_CAP_SIZE], value;
+ unsigned int rd_interval;
+ int err;
+
+ drm_dp_link_reset(link);
+
+ err = drm_dp_dpcd_read(aux, DP_DPCD_REV, dpcd, sizeof(dpcd));
+ if (err < 0)
+ return err;
+
+ link->revision = dpcd[DP_DPCD_REV];
+ link->max_rate = drm_dp_max_link_rate(dpcd);
+ link->max_lanes = drm_dp_max_lane_count(dpcd);
+
+ link->caps.enhanced_framing = drm_dp_enhanced_frame_cap(dpcd);
+ link->caps.tps3_supported = drm_dp_tps3_supported(dpcd);
+ link->caps.fast_training = drm_dp_fast_training_cap(dpcd);
+ link->caps.channel_coding = drm_dp_channel_coding_supported(dpcd);
+
+ if (drm_dp_alternate_scrambler_reset_cap(dpcd)) {
+ link->caps.alternate_scrambler_reset = true;
+
+ err = drm_dp_dpcd_readb(aux, DP_EDP_DPCD_REV, &value);
+ if (err < 0)
+ return err;
+
+ if (value >= ARRAY_SIZE(drm_dp_edp_revisions))
+ DRM_ERROR("unsupported eDP version: %02x\n", value);
+ else
+ link->edp = drm_dp_edp_revisions[value];
+ }
+
+ /*
+ * The DPCD stores the AUX read interval in units of 4 ms. There are
+ * two special cases:
+ *
+ * 1) if the TRAINING_AUX_RD_INTERVAL field is 0, the clock recovery
+ * and channel equalization should use 100 us or 400 us AUX read
+ * intervals, respectively
+ *
+ * 2) for DP v1.4 and above, clock recovery should always use 100 us
+ * AUX read intervals
+ */
+ rd_interval = dpcd[DP_TRAINING_AUX_RD_INTERVAL] &
+ DP_TRAINING_AUX_RD_MASK;
+
+ if (rd_interval > 4) {
+ DRM_DEBUG_KMS("AUX interval %u out of range (max. 4)\n",
+ rd_interval);
+ rd_interval = 4;
+ }
+
+ rd_interval *= 4 * USEC_PER_MSEC;
+
+ if (rd_interval == 0 || link->revision >= DP_DPCD_REV_14)
+ link->aux_rd_interval.cr = 100;
+
+ if (rd_interval == 0)
+ link->aux_rd_interval.ce = 400;
+
+ link->rate = link->max_rate;
+ link->lanes = link->max_lanes;
+
+ /* Parse SUPPORTED_LINK_RATES from eDP 1.4 */
+ if (link->edp >= 0x14) {
+ u8 supported_rates[DP_MAX_SUPPORTED_RATES * 2];
+ unsigned int i;
+ u16 rate;
+
+ err = drm_dp_dpcd_read(aux, DP_SUPPORTED_LINK_RATES,
+ supported_rates,
+ sizeof(supported_rates));
+ if (err < 0)
+ return err;
+
+ for (i = 0; i < DP_MAX_SUPPORTED_RATES; i++) {
+ rate = supported_rates[i * 2 + 1] << 8 |
+ supported_rates[i * 2 + 0];
+
+ drm_dp_link_add_rate(link, rate * 200);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * drm_dp_link_power_up() - power up a DisplayPort link
+ * @aux: DisplayPort AUX channel
+ * @link: pointer to a structure containing the link configuration
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int drm_dp_link_power_up(struct drm_dp_aux *aux, struct drm_dp_link *link)
+{
+ u8 value;
+ int err;
+
+ /* DP_SET_POWER register is only available on DPCD v1.1 and later */
+ if (link->revision < 0x11)
+ return 0;
+
+ err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value);
+ if (err < 0)
+ return err;
+
+ value &= ~DP_SET_POWER_MASK;
+ value |= DP_SET_POWER_D0;
+
+ err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value);
+ if (err < 0)
+ return err;
+
+ /*
+ * According to the DP 1.1 specification, a "Sink Device must exit the
+ * power saving state within 1 ms" (Section 2.5.3.1, Table 5-52, "Sink
+ * Control Field" (register 0x600).
+ */
+ usleep_range(1000, 2000);
+
+ return 0;
+}
+
+/**
+ * drm_dp_link_power_down() - power down a DisplayPort link
+ * @aux: DisplayPort AUX channel
+ * @link: pointer to a structure containing the link configuration
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int drm_dp_link_power_down(struct drm_dp_aux *aux, struct drm_dp_link *link)
+{
+ u8 value;
+ int err;
+
+ /* DP_SET_POWER register is only available on DPCD v1.1 and later */
+ if (link->revision < 0x11)
+ return 0;
+
+ err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value);
+ if (err < 0)
+ return err;
+
+ value &= ~DP_SET_POWER_MASK;
+ value |= DP_SET_POWER_D3;
+
+ err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+/**
+ * drm_dp_link_configure() - configure a DisplayPort link
+ * @aux: DisplayPort AUX channel
+ * @link: pointer to a structure containing the link configuration
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int drm_dp_link_configure(struct drm_dp_aux *aux, struct drm_dp_link *link)
+{
+ u8 values[2], value;
+ int err;
+
+ if (link->ops && link->ops->configure) {
+ err = link->ops->configure(link);
+ if (err < 0) {
+ DRM_ERROR("failed to configure DP link: %d\n", err);
+ return err;
+ }
+ }
+
+ values[0] = drm_dp_link_rate_to_bw_code(link->rate);
+ values[1] = link->lanes;
+
+ if (link->caps.enhanced_framing)
+ values[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
+
+ err = drm_dp_dpcd_write(aux, DP_LINK_BW_SET, values, sizeof(values));
+ if (err < 0)
+ return err;
+
+ if (link->caps.channel_coding)
+ value = DP_SET_ANSI_8B10B;
+ else
+ value = 0;
+
+ err = drm_dp_dpcd_writeb(aux, DP_MAIN_LINK_CHANNEL_CODING_SET, value);
+ if (err < 0)
+ return err;
+
+ if (link->caps.alternate_scrambler_reset) {
+ err = drm_dp_dpcd_writeb(aux, DP_EDP_CONFIGURATION_SET,
+ DP_ALTERNATE_SCRAMBLER_RESET_ENABLE);
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * drm_dp_link_choose() - choose the lowest possible configuration for a mode
+ * @link: DRM DP link object
+ * @mode: DRM display mode
+ * @info: DRM display information
+ *
+ * According to the eDP specification, a source should select a configuration
+ * with the lowest number of lanes and the lowest possible link rate that can
+ * match the bitrate requirements of a video mode. However it must ensure not
+ * to exceed the capabilities of the sink.
+ *
+ * Returns: 0 on success or a negative error code on failure.
+ */
+int drm_dp_link_choose(struct drm_dp_link *link,
+ const struct drm_display_mode *mode,
+ const struct drm_display_info *info)
+{
+ /* available link symbol clock rates */
+ static const unsigned int rates[3] = { 162000, 270000, 540000 };
+ /* available number of lanes */
+ static const unsigned int lanes[3] = { 1, 2, 4 };
+ unsigned long requirement, capacity;
+ unsigned int rate = link->max_rate;
+ unsigned int i, j;
+
+ /* bandwidth requirement */
+ requirement = mode->clock * info->bpc * 3;
+
+ for (i = 0; i < ARRAY_SIZE(lanes) && lanes[i] <= link->max_lanes; i++) {
+ for (j = 0; j < ARRAY_SIZE(rates) && rates[j] <= rate; j++) {
+ /*
+ * Capacity for this combination of lanes and rate,
+ * factoring in the ANSI 8B/10B encoding.
+ *
+ * Link rates in the DRM DP helpers are really link
+ * symbol frequencies, so a tenth of the actual rate
+ * of the link.
+ */
+ capacity = lanes[i] * (rates[j] * 10) * 8 / 10;
+
+ if (capacity >= requirement) {
+ DRM_DEBUG_KMS("using %u lanes at %u kHz (%lu/%lu kbps)\n",
+ lanes[i], rates[j], requirement,
+ capacity);
+ link->lanes = lanes[i];
+ link->rate = rates[j];
+ return 0;
+ }
+ }
+ }
+
+ return -ERANGE;
+}
+
+/**
+ * DOC: Link training
+ *
+ * These functions contain common logic and helpers to implement DisplayPort
+ * link training.
+ */
+
+/**
+ * drm_dp_link_train_init() - initialize DisplayPort link training state
+ * @train: DisplayPort link training state
+ */
+void drm_dp_link_train_init(struct drm_dp_link_train *train)
+{
+ struct drm_dp_link_train_set *request = &train->request;
+ struct drm_dp_link_train_set *adjust = &train->adjust;
+ unsigned int i;
+
+ for (i = 0; i < 4; i++) {
+ request->voltage_swing[i] = 0;
+ adjust->voltage_swing[i] = 0;
+
+ request->pre_emphasis[i] = 0;
+ adjust->pre_emphasis[i] = 0;
+
+ request->post_cursor[i] = 0;
+ adjust->post_cursor[i] = 0;
+ }
+
+ train->pattern = DP_TRAINING_PATTERN_DISABLE;
+ train->clock_recovered = false;
+ train->channel_equalized = false;
+}
+
+static bool drm_dp_link_train_valid(const struct drm_dp_link_train *train)
+{
+ return train->clock_recovered && train->channel_equalized;
+}
+
+static int drm_dp_link_apply_training(struct drm_dp_link *link)
+{
+ struct drm_dp_link_train_set *request = &link->train.request;
+ unsigned int lanes = link->lanes, *vs, *pe, *pc, i;
+ struct drm_dp_aux *aux = link->aux;
+ u8 values[4], pattern = 0;
+ int err;
+
+ err = link->ops->apply_training(link);
+ if (err < 0) {
+ DRM_ERROR("failed to apply link training: %d\n", err);
+ return err;
+ }
+
+ vs = request->voltage_swing;
+ pe = request->pre_emphasis;
+ pc = request->post_cursor;
+
+ /* write currently selected voltage-swing and pre-emphasis levels */
+ for (i = 0; i < lanes; i++)
+ values[i] = DP_TRAIN_VOLTAGE_SWING_LEVEL(vs[i]) |
+ DP_TRAIN_PRE_EMPHASIS_LEVEL(pe[i]);
+
+ err = drm_dp_dpcd_write(aux, DP_TRAINING_LANE0_SET, values, lanes);
+ if (err < 0) {
+ DRM_ERROR("failed to set training parameters: %d\n", err);
+ return err;
+ }
+
+ /* write currently selected post-cursor level (if supported) */
+ if (link->revision >= 0x12 && link->rate == 540000) {
+ values[0] = values[1] = 0;
+
+ for (i = 0; i < lanes; i++)
+ values[i / 2] |= DP_LANE_POST_CURSOR(i, pc[i]);
+
+ err = drm_dp_dpcd_write(aux, DP_TRAINING_LANE0_1_SET2, values,
+ DIV_ROUND_UP(lanes, 2));
+ if (err < 0) {
+ DRM_ERROR("failed to set post-cursor: %d\n", err);
+ return err;
+ }
+ }
+
+ /* write link pattern */
+ if (link->train.pattern != DP_TRAINING_PATTERN_DISABLE)
+ pattern |= DP_LINK_SCRAMBLING_DISABLE;
+
+ pattern |= link->train.pattern;
+
+ err = drm_dp_dpcd_writeb(aux, DP_TRAINING_PATTERN_SET, pattern);
+ if (err < 0) {
+ DRM_ERROR("failed to set training pattern: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static void drm_dp_link_train_wait(struct drm_dp_link *link)
+{
+ unsigned long min = 0;
+
+ switch (link->train.pattern) {
+ case DP_TRAINING_PATTERN_1:
+ min = link->aux_rd_interval.cr;
+ break;
+
+ case DP_TRAINING_PATTERN_2:
+ case DP_TRAINING_PATTERN_3:
+ min = link->aux_rd_interval.ce;
+ break;
+
+ default:
+ break;
+ }
+
+ if (min > 0)
+ usleep_range(min, 2 * min);
+}
+
+static void drm_dp_link_get_adjustments(struct drm_dp_link *link,
+ u8 status[DP_LINK_STATUS_SIZE])
+{
+ struct drm_dp_link_train_set *adjust = &link->train.adjust;
+ unsigned int i;
+
+ for (i = 0; i < link->lanes; i++) {
+ adjust->voltage_swing[i] =
+ drm_dp_get_adjust_request_voltage(status, i) >>
+ DP_TRAIN_VOLTAGE_SWING_SHIFT;
+
+ adjust->pre_emphasis[i] =
+ drm_dp_get_adjust_request_pre_emphasis(status, i) >>
+ DP_TRAIN_PRE_EMPHASIS_SHIFT;
+
+ adjust->post_cursor[i] =
+ drm_dp_get_adjust_request_post_cursor(status, i);
+ }
+}
+
+static void drm_dp_link_train_adjust(struct drm_dp_link_train *train)
+{
+ struct drm_dp_link_train_set *request = &train->request;
+ struct drm_dp_link_train_set *adjust = &train->adjust;
+ unsigned int i;
+
+ for (i = 0; i < 4; i++)
+ if (request->voltage_swing[i] != adjust->voltage_swing[i])
+ request->voltage_swing[i] = adjust->voltage_swing[i];
+
+ for (i = 0; i < 4; i++)
+ if (request->pre_emphasis[i] != adjust->pre_emphasis[i])
+ request->pre_emphasis[i] = adjust->pre_emphasis[i];
+
+ for (i = 0; i < 4; i++)
+ if (request->post_cursor[i] != adjust->post_cursor[i])
+ request->post_cursor[i] = adjust->post_cursor[i];
+}
+
+static int drm_dp_link_recover_clock(struct drm_dp_link *link)
+{
+ u8 status[DP_LINK_STATUS_SIZE];
+ int err;
+
+ err = drm_dp_link_apply_training(link);
+ if (err < 0)
+ return err;
+
+ drm_dp_link_train_wait(link);
+
+ err = drm_dp_dpcd_read_link_status(link->aux, status);
+ if (err < 0) {
+ DRM_ERROR("failed to read link status: %d\n", err);
+ return err;
+ }
+
+ if (!drm_dp_clock_recovery_ok(status, link->lanes))
+ drm_dp_link_get_adjustments(link, status);
+ else
+ link->train.clock_recovered = true;
+
+ return 0;
+}
+
+static int drm_dp_link_clock_recovery(struct drm_dp_link *link)
+{
+ unsigned int repeat;
+ int err;
+
+ /* start clock recovery using training pattern 1 */
+ link->train.pattern = DP_TRAINING_PATTERN_1;
+
+ for (repeat = 1; repeat < 5; repeat++) {
+ err = drm_dp_link_recover_clock(link);
+ if (err < 0) {
+ DRM_ERROR("failed to recover clock: %d\n", err);
+ return err;
+ }
+
+ if (link->train.clock_recovered)
+ break;
+
+ drm_dp_link_train_adjust(&link->train);
+ }
+
+ return 0;
+}
+
+static int drm_dp_link_equalize_channel(struct drm_dp_link *link)
+{
+ struct drm_dp_aux *aux = link->aux;
+ u8 status[DP_LINK_STATUS_SIZE];
+ int err;
+
+ err = drm_dp_link_apply_training(link);
+ if (err < 0)
+ return err;
+
+ drm_dp_link_train_wait(link);
+
+ err = drm_dp_dpcd_read_link_status(aux, status);
+ if (err < 0) {
+ DRM_ERROR("failed to read link status: %d\n", err);
+ return err;
+ }
+
+ if (!drm_dp_clock_recovery_ok(status, link->lanes)) {
+ DRM_ERROR("clock recovery lost while equalizing channel\n");
+ link->train.clock_recovered = false;
+ return 0;
+ }
+
+ if (!drm_dp_channel_eq_ok(status, link->lanes))
+ drm_dp_link_get_adjustments(link, status);
+ else
+ link->train.channel_equalized = true;
+
+ return 0;
+}
+
+static int drm_dp_link_channel_equalization(struct drm_dp_link *link)
+{
+ unsigned int repeat;
+ int err;
+
+ /* start channel equalization using pattern 2 or 3 */
+ if (link->caps.tps3_supported)
+ link->train.pattern = DP_TRAINING_PATTERN_3;
+ else
+ link->train.pattern = DP_TRAINING_PATTERN_2;
+
+ for (repeat = 1; repeat < 5; repeat++) {
+ err = drm_dp_link_equalize_channel(link);
+ if (err < 0) {
+ DRM_ERROR("failed to equalize channel: %d\n", err);
+ return err;
+ }
+
+ if (link->train.channel_equalized)
+ break;
+
+ drm_dp_link_train_adjust(&link->train);
+ }
+
+ return 0;
+}
+
+static int drm_dp_link_downgrade(struct drm_dp_link *link)
+{
+ switch (link->rate) {
+ case 162000:
+ return -EINVAL;
+
+ case 270000:
+ link->rate = 162000;
+ break;
+
+ case 540000:
+ link->rate = 270000;
+ return 0;
+ }
+
+ return 0;
+}
+
+static void drm_dp_link_train_disable(struct drm_dp_link *link)
+{
+ int err;
+
+ link->train.pattern = DP_TRAINING_PATTERN_DISABLE;
+
+ err = drm_dp_link_apply_training(link);
+ if (err < 0)
+ DRM_ERROR("failed to disable link training: %d\n", err);
+}
+
+static int drm_dp_link_train_full(struct drm_dp_link *link)
+{
+ int err;
+
+retry:
+ DRM_DEBUG_KMS("full-training link: %u lane%s at %u MHz\n",
+ link->lanes, (link->lanes > 1) ? "s" : "",
+ link->rate / 100);
+
+ err = drm_dp_link_configure(link->aux, link);
+ if (err < 0) {
+ DRM_ERROR("failed to configure DP link: %d\n", err);
+ return err;
+ }
+
+ err = drm_dp_link_clock_recovery(link);
+ if (err < 0) {
+ DRM_ERROR("clock recovery failed: %d\n", err);
+ goto out;
+ }
+
+ if (!link->train.clock_recovered) {
+ DRM_ERROR("clock recovery failed, downgrading link\n");
+
+ err = drm_dp_link_downgrade(link);
+ if (err < 0)
+ goto out;
+
+ goto retry;
+ }
+
+ DRM_DEBUG_KMS("clock recovery succeeded\n");
+
+ err = drm_dp_link_channel_equalization(link);
+ if (err < 0) {
+ DRM_ERROR("channel equalization failed: %d\n", err);
+ goto out;
+ }
+
+ if (!link->train.channel_equalized) {
+ DRM_ERROR("channel equalization failed, downgrading link\n");
+
+ err = drm_dp_link_downgrade(link);
+ if (err < 0)
+ goto out;
+
+ goto retry;
+ }
+
+ DRM_DEBUG_KMS("channel equalization succeeded\n");
+
+out:
+ drm_dp_link_train_disable(link);
+ return err;
+}
+
+static int drm_dp_link_train_fast(struct drm_dp_link *link)
+{
+ u8 status[DP_LINK_STATUS_SIZE];
+ int err;
+
+ DRM_DEBUG_KMS("fast-training link: %u lane%s at %u MHz\n",
+ link->lanes, (link->lanes > 1) ? "s" : "",
+ link->rate / 100);
+
+ err = drm_dp_link_configure(link->aux, link);
+ if (err < 0) {
+ DRM_ERROR("failed to configure DP link: %d\n", err);
+ return err;
+ }
+
+ /* transmit training pattern 1 for 500 microseconds */
+ link->train.pattern = DP_TRAINING_PATTERN_1;
+
+ err = drm_dp_link_apply_training(link);
+ if (err < 0)
+ goto out;
+
+ usleep_range(500, 1000);
+
+ /* transmit training pattern 2 or 3 for 500 microseconds */
+ if (link->caps.tps3_supported)
+ link->train.pattern = DP_TRAINING_PATTERN_3;
+ else
+ link->train.pattern = DP_TRAINING_PATTERN_2;
+
+ err = drm_dp_link_apply_training(link);
+ if (err < 0)
+ goto out;
+
+ usleep_range(500, 1000);
+
+ err = drm_dp_dpcd_read_link_status(link->aux, status);
+ if (err < 0) {
+ DRM_ERROR("failed to read link status: %d\n", err);
+ goto out;
+ }
+
+ if (!drm_dp_clock_recovery_ok(status, link->lanes)) {
+ DRM_ERROR("clock recovery failed\n");
+ err = -EIO;
+ }
+
+ if (!drm_dp_channel_eq_ok(status, link->lanes)) {
+ DRM_ERROR("channel equalization failed\n");
+ err = -EIO;
+ }
+
+out:
+ drm_dp_link_train_disable(link);
+ return err;
+}
+
+/**
+ * drm_dp_link_train() - perform DisplayPort link training
+ * @link: a DP link object
+ *
+ * Uses the context stored in the DP link object to perform link training. It
+ * is expected that drivers will call drm_dp_link_probe() to obtain the link
+ * capabilities before performing link training.
+ *
+ * If the sink supports fast link training (no AUX CH handshake) and valid
+ * training settings are available, this function will try to perform fast
+ * link training and fall back to full link training on failure.
+ *
+ * Returns: 0 on success or a negative error code on failure.
+ */
+int drm_dp_link_train(struct drm_dp_link *link)
+{
+ int err;
+
+ drm_dp_link_train_init(&link->train);
+
+ if (link->caps.fast_training) {
+ if (drm_dp_link_train_valid(&link->train)) {
+ err = drm_dp_link_train_fast(link);
+ if (err < 0)
+ DRM_ERROR("fast link training failed: %d\n",
+ err);
+ else
+ return 0;
+ } else {
+ DRM_DEBUG_KMS("training parameters not available\n");
+ }
+ } else {
+ DRM_DEBUG_KMS("fast link training not supported\n");
+ }
+
+ err = drm_dp_link_train_full(link);
+ if (err < 0)
+ DRM_ERROR("full link training failed: %d\n", err);
+
+ return err;
+}
diff --git a/drivers/gpu/drm/tegra/dp.h b/drivers/gpu/drm/tegra/dp.h
new file mode 100644
index 000000000000..cb12ed0c54e7
--- /dev/null
+++ b/drivers/gpu/drm/tegra/dp.h
@@ -0,0 +1,177 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright (C) 2013-2019 NVIDIA Corporation.
+ * Copyright (C) 2015 Rob Clark
+ */
+
+#ifndef DRM_TEGRA_DP_H
+#define DRM_TEGRA_DP_H 1
+
+#include <linux/types.h>
+
+struct drm_display_info;
+struct drm_display_mode;
+struct drm_dp_aux;
+struct drm_dp_link;
+
+/**
+ * struct drm_dp_link_caps - DP link capabilities
+ */
+struct drm_dp_link_caps {
+ /**
+ * @enhanced_framing:
+ *
+ * enhanced framing capability (mandatory as of DP 1.2)
+ */
+ bool enhanced_framing;
+
+ /**
+ * tps3_supported:
+ *
+ * training pattern sequence 3 supported for equalization
+ */
+ bool tps3_supported;
+
+ /**
+ * @fast_training:
+ *
+ * AUX CH handshake not required for link training
+ */
+ bool fast_training;
+
+ /**
+ * @channel_coding:
+ *
+ * ANSI 8B/10B channel coding capability
+ */
+ bool channel_coding;
+
+ /**
+ * @alternate_scrambler_reset:
+ *
+ * eDP alternate scrambler reset capability
+ */
+ bool alternate_scrambler_reset;
+};
+
+void drm_dp_link_caps_copy(struct drm_dp_link_caps *dest,
+ const struct drm_dp_link_caps *src);
+
+/**
+ * struct drm_dp_link_ops - DP link operations
+ */
+struct drm_dp_link_ops {
+ /**
+ * @apply_training:
+ */
+ int (*apply_training)(struct drm_dp_link *link);
+
+ /**
+ * @configure:
+ */
+ int (*configure)(struct drm_dp_link *link);
+};
+
+#define DP_TRAIN_VOLTAGE_SWING_LEVEL(x) ((x) << 0)
+#define DP_TRAIN_PRE_EMPHASIS_LEVEL(x) ((x) << 3)
+#define DP_LANE_POST_CURSOR(i, x) (((x) & 0x3) << (((i) & 1) << 2))
+
+/**
+ * struct drm_dp_link_train_set - link training settings
+ * @voltage_swing: per-lane voltage swing
+ * @pre_emphasis: per-lane pre-emphasis
+ * @post_cursor: per-lane post-cursor
+ */
+struct drm_dp_link_train_set {
+ unsigned int voltage_swing[4];
+ unsigned int pre_emphasis[4];
+ unsigned int post_cursor[4];
+};
+
+/**
+ * struct drm_dp_link_train - link training state information
+ * @request: currently requested settings
+ * @adjust: adjustments requested by sink
+ * @pattern: currently requested training pattern
+ * @clock_recovered: flag to track if clock recovery has completed
+ * @channel_equalized: flag to track if channel equalization has completed
+ */
+struct drm_dp_link_train {
+ struct drm_dp_link_train_set request;
+ struct drm_dp_link_train_set adjust;
+
+ unsigned int pattern;
+
+ bool clock_recovered;
+ bool channel_equalized;
+};
+
+/**
+ * struct drm_dp_link - DP link capabilities and configuration
+ * @revision: DP specification revision supported on the link
+ * @max_rate: maximum clock rate supported on the link
+ * @max_lanes: maximum number of lanes supported on the link
+ * @caps: capabilities supported on the link (see &drm_dp_link_caps)
+ * @aux_rd_interval: AUX read interval to use for training (in microseconds)
+ * @edp: eDP revision (0x11: eDP 1.1, 0x12: eDP 1.2, ...)
+ * @rate: currently configured link rate
+ * @lanes: currently configured number of lanes
+ * @rates: additional supported link rates in kHz (eDP 1.4)
+ * @num_rates: number of additional supported link rates (eDP 1.4)
+ */
+struct drm_dp_link {
+ unsigned char revision;
+ unsigned int max_rate;
+ unsigned int max_lanes;
+
+ struct drm_dp_link_caps caps;
+
+ /**
+ * @cr: clock recovery read interval
+ * @ce: channel equalization read interval
+ */
+ struct {
+ unsigned int cr;
+ unsigned int ce;
+ } aux_rd_interval;
+
+ unsigned char edp;
+
+ unsigned int rate;
+ unsigned int lanes;
+
+ unsigned long rates[DP_MAX_SUPPORTED_RATES];
+ unsigned int num_rates;
+
+ /**
+ * @ops: DP link operations
+ */
+ const struct drm_dp_link_ops *ops;
+
+ /**
+ * @aux: DP AUX channel
+ */
+ struct drm_dp_aux *aux;
+
+ /**
+ * @train: DP link training state
+ */
+ struct drm_dp_link_train train;
+};
+
+int drm_dp_link_add_rate(struct drm_dp_link *link, unsigned long rate);
+int drm_dp_link_remove_rate(struct drm_dp_link *link, unsigned long rate);
+void drm_dp_link_update_rates(struct drm_dp_link *link);
+
+int drm_dp_link_probe(struct drm_dp_aux *aux, struct drm_dp_link *link);
+int drm_dp_link_power_up(struct drm_dp_aux *aux, struct drm_dp_link *link);
+int drm_dp_link_power_down(struct drm_dp_aux *aux, struct drm_dp_link *link);
+int drm_dp_link_configure(struct drm_dp_aux *aux, struct drm_dp_link *link);
+int drm_dp_link_choose(struct drm_dp_link *link,
+ const struct drm_display_mode *mode,
+ const struct drm_display_info *info);
+
+void drm_dp_link_train_init(struct drm_dp_link_train *train);
+int drm_dp_link_train(struct drm_dp_link *link);
+
+#endif
diff --git a/drivers/gpu/drm/tegra/dpaux.c b/drivers/gpu/drm/tegra/dpaux.c
index a0f6f9b0d258..622cdf1ad246 100644
--- a/drivers/gpu/drm/tegra/dpaux.c
+++ b/drivers/gpu/drm/tegra/dpaux.c
@@ -9,6 +9,7 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
+#include <linux/of_device.h>
#include <linux/of_gpio.h>
#include <linux/pinctrl/pinconf-generic.h>
#include <linux/pinctrl/pinctrl.h>
@@ -22,6 +23,7 @@
#include <drm/drm_dp_helper.h>
#include <drm/drm_panel.h>
+#include "dp.h"
#include "dpaux.h"
#include "drm.h"
#include "trace.h"
@@ -29,10 +31,18 @@
static DEFINE_MUTEX(dpaux_lock);
static LIST_HEAD(dpaux_list);
+struct tegra_dpaux_soc {
+ unsigned int cmh;
+ unsigned int drvz;
+ unsigned int drvi;
+};
+
struct tegra_dpaux {
struct drm_dp_aux aux;
struct device *dev;
+ const struct tegra_dpaux_soc *soc;
+
void __iomem *regs;
int irq;
@@ -120,6 +130,7 @@ static ssize_t tegra_dpaux_transfer(struct drm_dp_aux *aux,
struct tegra_dpaux *dpaux = to_dpaux(aux);
unsigned long status;
ssize_t ret = 0;
+ u8 reply = 0;
u32 value;
/* Tegra has 4x4 byte DP AUX transmit and receive FIFOs. */
@@ -214,23 +225,23 @@ static ssize_t tegra_dpaux_transfer(struct drm_dp_aux *aux,
switch ((value & DPAUX_DP_AUXSTAT_REPLY_TYPE_MASK) >> 16) {
case 0x00:
- msg->reply = DP_AUX_NATIVE_REPLY_ACK;
+ reply = DP_AUX_NATIVE_REPLY_ACK;
break;
case 0x01:
- msg->reply = DP_AUX_NATIVE_REPLY_NACK;
+ reply = DP_AUX_NATIVE_REPLY_NACK;
break;
case 0x02:
- msg->reply = DP_AUX_NATIVE_REPLY_DEFER;
+ reply = DP_AUX_NATIVE_REPLY_DEFER;
break;
case 0x04:
- msg->reply = DP_AUX_I2C_REPLY_NACK;
+ reply = DP_AUX_I2C_REPLY_NACK;
break;
case 0x08:
- msg->reply = DP_AUX_I2C_REPLY_DEFER;
+ reply = DP_AUX_I2C_REPLY_DEFER;
break;
}
@@ -238,14 +249,24 @@ static ssize_t tegra_dpaux_transfer(struct drm_dp_aux *aux,
if (msg->request & DP_AUX_I2C_READ) {
size_t count = value & DPAUX_DP_AUXSTAT_REPLY_MASK;
- if (WARN_ON(count != msg->size))
- count = min_t(size_t, count, msg->size);
+ /*
+ * There might be a smarter way to do this, but since
+ * the DP helpers will already retry transactions for
+ * an -EBUSY return value, simply reuse that instead.
+ */
+ if (count != msg->size) {
+ ret = -EBUSY;
+ goto out;
+ }
tegra_dpaux_read_fifo(dpaux, msg->buffer, count);
ret = count;
}
}
+ msg->reply = reply;
+
+out:
return ret;
}
@@ -310,9 +331,9 @@ static int tegra_dpaux_pad_config(struct tegra_dpaux *dpaux, unsigned function)
switch (function) {
case DPAUX_PADCTL_FUNC_AUX:
- value = DPAUX_HYBRID_PADCTL_AUX_CMH(2) |
- DPAUX_HYBRID_PADCTL_AUX_DRVZ(4) |
- DPAUX_HYBRID_PADCTL_AUX_DRVI(0x18) |
+ value = DPAUX_HYBRID_PADCTL_AUX_CMH(dpaux->soc->cmh) |
+ DPAUX_HYBRID_PADCTL_AUX_DRVZ(dpaux->soc->drvz) |
+ DPAUX_HYBRID_PADCTL_AUX_DRVI(dpaux->soc->drvi) |
DPAUX_HYBRID_PADCTL_AUX_INPUT_RCV |
DPAUX_HYBRID_PADCTL_MODE_AUX;
break;
@@ -320,9 +341,9 @@ static int tegra_dpaux_pad_config(struct tegra_dpaux *dpaux, unsigned function)
case DPAUX_PADCTL_FUNC_I2C:
value = DPAUX_HYBRID_PADCTL_I2C_SDA_INPUT_RCV |
DPAUX_HYBRID_PADCTL_I2C_SCL_INPUT_RCV |
- DPAUX_HYBRID_PADCTL_AUX_CMH(2) |
- DPAUX_HYBRID_PADCTL_AUX_DRVZ(4) |
- DPAUX_HYBRID_PADCTL_AUX_DRVI(0x18) |
+ DPAUX_HYBRID_PADCTL_AUX_CMH(dpaux->soc->cmh) |
+ DPAUX_HYBRID_PADCTL_AUX_DRVZ(dpaux->soc->drvz) |
+ DPAUX_HYBRID_PADCTL_AUX_DRVI(dpaux->soc->drvi) |
DPAUX_HYBRID_PADCTL_MODE_I2C;
break;
@@ -436,6 +457,7 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
if (!dpaux)
return -ENOMEM;
+ dpaux->soc = of_device_get_match_data(&pdev->dev);
INIT_WORK(&dpaux->work, tegra_dpaux_hotplug);
init_completion(&dpaux->complete);
INIT_LIST_HEAD(&dpaux->list);
@@ -493,6 +515,8 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
return PTR_ERR(dpaux->vdd);
}
+
+ dpaux->vdd = NULL;
}
platform_set_drvdata(pdev, dpaux);
@@ -641,11 +665,29 @@ static const struct dev_pm_ops tegra_dpaux_pm_ops = {
SET_RUNTIME_PM_OPS(tegra_dpaux_suspend, tegra_dpaux_resume, NULL)
};
+static const struct tegra_dpaux_soc tegra124_dpaux_soc = {
+ .cmh = 0x02,
+ .drvz = 0x04,
+ .drvi = 0x18,
+};
+
+static const struct tegra_dpaux_soc tegra210_dpaux_soc = {
+ .cmh = 0x02,
+ .drvz = 0x04,
+ .drvi = 0x30,
+};
+
+static const struct tegra_dpaux_soc tegra194_dpaux_soc = {
+ .cmh = 0x02,
+ .drvz = 0x04,
+ .drvi = 0x2c,
+};
+
static const struct of_device_id tegra_dpaux_of_match[] = {
- { .compatible = "nvidia,tegra194-dpaux", },
- { .compatible = "nvidia,tegra186-dpaux", },
- { .compatible = "nvidia,tegra210-dpaux", },
- { .compatible = "nvidia,tegra124-dpaux", },
+ { .compatible = "nvidia,tegra194-dpaux", .data = &tegra194_dpaux_soc },
+ { .compatible = "nvidia,tegra186-dpaux", .data = &tegra210_dpaux_soc },
+ { .compatible = "nvidia,tegra210-dpaux", .data = &tegra210_dpaux_soc },
+ { .compatible = "nvidia,tegra124-dpaux", .data = &tegra124_dpaux_soc },
{ },
};
MODULE_DEVICE_TABLE(of, tegra_dpaux_of_match);
@@ -686,25 +728,32 @@ int drm_dp_aux_attach(struct drm_dp_aux *aux, struct tegra_output *output)
output->connector.polled = DRM_CONNECTOR_POLL_HPD;
dpaux->output = output;
- err = regulator_enable(dpaux->vdd);
- if (err < 0)
- return err;
+ if (output->panel) {
+ enum drm_connector_status status;
- timeout = jiffies + msecs_to_jiffies(250);
+ if (dpaux->vdd) {
+ err = regulator_enable(dpaux->vdd);
+ if (err < 0)
+ return err;
+ }
- while (time_before(jiffies, timeout)) {
- enum drm_connector_status status;
+ timeout = jiffies + msecs_to_jiffies(250);
+
+ while (time_before(jiffies, timeout)) {
+ status = drm_dp_aux_detect(aux);
+
+ if (status == connector_status_connected)
+ break;
- status = drm_dp_aux_detect(aux);
- if (status == connector_status_connected) {
- enable_irq(dpaux->irq);
- return 0;
+ usleep_range(1000, 2000);
}
- usleep_range(1000, 2000);
+ if (status != connector_status_connected)
+ return -ETIMEDOUT;
}
- return -ETIMEDOUT;
+ enable_irq(dpaux->irq);
+ return 0;
}
int drm_dp_aux_detach(struct drm_dp_aux *aux)
@@ -715,25 +764,33 @@ int drm_dp_aux_detach(struct drm_dp_aux *aux)
disable_irq(dpaux->irq);
- err = regulator_disable(dpaux->vdd);
- if (err < 0)
- return err;
+ if (dpaux->output->panel) {
+ enum drm_connector_status status;
- timeout = jiffies + msecs_to_jiffies(250);
+ if (dpaux->vdd) {
+ err = regulator_disable(dpaux->vdd);
+ if (err < 0)
+ return err;
+ }
- while (time_before(jiffies, timeout)) {
- enum drm_connector_status status;
+ timeout = jiffies + msecs_to_jiffies(250);
+
+ while (time_before(jiffies, timeout)) {
+ status = drm_dp_aux_detect(aux);
+
+ if (status == connector_status_disconnected)
+ break;
- status = drm_dp_aux_detect(aux);
- if (status == connector_status_disconnected) {
- dpaux->output = NULL;
- return 0;
+ usleep_range(1000, 2000);
}
- usleep_range(1000, 2000);
+ if (status != connector_status_disconnected)
+ return -ETIMEDOUT;
+
+ dpaux->output = NULL;
}
- return -ETIMEDOUT;
+ return 0;
}
enum drm_connector_status drm_dp_aux_detect(struct drm_dp_aux *aux)
@@ -764,72 +821,3 @@ int drm_dp_aux_disable(struct drm_dp_aux *aux)
return 0;
}
-
-int drm_dp_aux_prepare(struct drm_dp_aux *aux, u8 encoding)
-{
- int err;
-
- err = drm_dp_dpcd_writeb(aux, DP_MAIN_LINK_CHANNEL_CODING_SET,
- encoding);
- if (err < 0)
- return err;
-
- return 0;
-}
-
-int drm_dp_aux_train(struct drm_dp_aux *aux, struct drm_dp_link *link,
- u8 pattern)
-{
- u8 tp = pattern & DP_TRAINING_PATTERN_MASK;
- u8 status[DP_LINK_STATUS_SIZE], values[4];
- unsigned int i;
- int err;
-
- err = drm_dp_dpcd_writeb(aux, DP_TRAINING_PATTERN_SET, pattern);
- if (err < 0)
- return err;
-
- if (tp == DP_TRAINING_PATTERN_DISABLE)
- return 0;
-
- for (i = 0; i < link->num_lanes; i++)
- values[i] = DP_TRAIN_MAX_PRE_EMPHASIS_REACHED |
- DP_TRAIN_PRE_EMPH_LEVEL_0 |
- DP_TRAIN_MAX_SWING_REACHED |
- DP_TRAIN_VOLTAGE_SWING_LEVEL_0;
-
- err = drm_dp_dpcd_write(aux, DP_TRAINING_LANE0_SET, values,
- link->num_lanes);
- if (err < 0)
- return err;
-
- usleep_range(500, 1000);
-
- err = drm_dp_dpcd_read_link_status(aux, status);
- if (err < 0)
- return err;
-
- switch (tp) {
- case DP_TRAINING_PATTERN_1:
- if (!drm_dp_clock_recovery_ok(status, link->num_lanes))
- return -EAGAIN;
-
- break;
-
- case DP_TRAINING_PATTERN_2:
- if (!drm_dp_channel_eq_ok(status, link->num_lanes))
- return -EAGAIN;
-
- break;
-
- default:
- dev_err(aux->dev, "unsupported training pattern %u\n", tp);
- return -EINVAL;
- }
-
- err = drm_dp_dpcd_writeb(aux, DP_EDP_CONFIGURATION_SET, 0);
- if (err < 0)
- return err;
-
- return 0;
-}
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index 6fb7d74ff553..56e5e7a5c108 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -20,10 +20,6 @@
#include <drm/drm_prime.h>
#include <drm/drm_vblank.h>
-#if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)
-#include <asm/dma-iommu.h>
-#endif
-
#include "drm.h"
#include "gem.h"
@@ -86,168 +82,6 @@ tegra_drm_mode_config_helpers = {
.atomic_commit_tail = tegra_atomic_commit_tail,
};
-static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
-{
- struct host1x_device *device = to_host1x_device(drm->dev);
- struct tegra_drm *tegra;
- int err;
-
- tegra = kzalloc(sizeof(*tegra), GFP_KERNEL);
- if (!tegra)
- return -ENOMEM;
-
- if (iommu_present(&platform_bus_type)) {
- tegra->domain = iommu_domain_alloc(&platform_bus_type);
- if (!tegra->domain) {
- err = -ENOMEM;
- goto free;
- }
-
- err = iova_cache_get();
- if (err < 0)
- goto domain;
- }
-
- mutex_init(&tegra->clients_lock);
- INIT_LIST_HEAD(&tegra->clients);
-
- drm->dev_private = tegra;
- tegra->drm = drm;
-
- drm_mode_config_init(drm);
-
- drm->mode_config.min_width = 0;
- drm->mode_config.min_height = 0;
-
- drm->mode_config.max_width = 4096;
- drm->mode_config.max_height = 4096;
-
- drm->mode_config.allow_fb_modifiers = true;
-
- drm->mode_config.normalize_zpos = true;
-
- drm->mode_config.funcs = &tegra_drm_mode_config_funcs;
- drm->mode_config.helper_private = &tegra_drm_mode_config_helpers;
-
- err = tegra_drm_fb_prepare(drm);
- if (err < 0)
- goto config;
-
- drm_kms_helper_poll_init(drm);
-
- err = host1x_device_init(device);
- if (err < 0)
- goto fbdev;
-
- if (tegra->domain) {
- u64 carveout_start, carveout_end, gem_start, gem_end;
- u64 dma_mask = dma_get_mask(&device->dev);
- dma_addr_t start, end;
- unsigned long order;
-
- start = tegra->domain->geometry.aperture_start & dma_mask;
- end = tegra->domain->geometry.aperture_end & dma_mask;
-
- gem_start = start;
- gem_end = end - CARVEOUT_SZ;
- carveout_start = gem_end + 1;
- carveout_end = end;
-
- order = __ffs(tegra->domain->pgsize_bitmap);
- init_iova_domain(&tegra->carveout.domain, 1UL << order,
- carveout_start >> order);
-
- tegra->carveout.shift = iova_shift(&tegra->carveout.domain);
- tegra->carveout.limit = carveout_end >> tegra->carveout.shift;
-
- drm_mm_init(&tegra->mm, gem_start, gem_end - gem_start + 1);
- mutex_init(&tegra->mm_lock);
-
- DRM_DEBUG("IOMMU apertures:\n");
- DRM_DEBUG(" GEM: %#llx-%#llx\n", gem_start, gem_end);
- DRM_DEBUG(" Carveout: %#llx-%#llx\n", carveout_start,
- carveout_end);
- }
-
- if (tegra->hub) {
- err = tegra_display_hub_prepare(tegra->hub);
- if (err < 0)
- goto device;
- }
-
- /*
- * We don't use the drm_irq_install() helpers provided by the DRM
- * core, so we need to set this manually in order to allow the
- * DRM_IOCTL_WAIT_VBLANK to operate correctly.
- */
- drm->irq_enabled = true;
-
- /* syncpoints are used for full 32-bit hardware VBLANK counters */
- drm->max_vblank_count = 0xffffffff;
-
- err = drm_vblank_init(drm, drm->mode_config.num_crtc);
- if (err < 0)
- goto hub;
-
- drm_mode_config_reset(drm);
-
- err = tegra_drm_fb_init(drm);
- if (err < 0)
- goto hub;
-
- return 0;
-
-hub:
- if (tegra->hub)
- tegra_display_hub_cleanup(tegra->hub);
-device:
- host1x_device_exit(device);
-fbdev:
- drm_kms_helper_poll_fini(drm);
- tegra_drm_fb_free(drm);
-config:
- drm_mode_config_cleanup(drm);
-
- if (tegra->domain) {
- mutex_destroy(&tegra->mm_lock);
- drm_mm_takedown(&tegra->mm);
- put_iova_domain(&tegra->carveout.domain);
- iova_cache_put();
- }
-domain:
- if (tegra->domain)
- iommu_domain_free(tegra->domain);
-free:
- kfree(tegra);
- return err;
-}
-
-static void tegra_drm_unload(struct drm_device *drm)
-{
- struct host1x_device *device = to_host1x_device(drm->dev);
- struct tegra_drm *tegra = drm->dev_private;
- int err;
-
- drm_kms_helper_poll_fini(drm);
- tegra_drm_fb_exit(drm);
- drm_atomic_helper_shutdown(drm);
- drm_mode_config_cleanup(drm);
-
- err = host1x_device_exit(device);
- if (err < 0)
- return;
-
- if (tegra->domain) {
- mutex_destroy(&tegra->mm_lock);
- drm_mm_takedown(&tegra->mm);
- put_iova_domain(&tegra->carveout.domain);
- iova_cache_put();
- iommu_domain_free(tegra->domain);
- }
-
- kfree(tegra);
-}
-
static int tegra_drm_open(struct drm_device *drm, struct drm_file *filp)
{
struct tegra_drm_file *fpriv;
@@ -311,6 +145,8 @@ static int host1x_reloc_copy_from_user(struct host1x_reloc *dest,
if (err < 0)
return err;
+ dest->flags = HOST1X_RELOC_READ | HOST1X_RELOC_WRITE;
+
dest->cmdbuf.bo = host1x_bo_lookup(file, cmdbuf);
if (!dest->cmdbuf.bo)
return -ENOENT;
@@ -1014,8 +850,6 @@ static int tegra_debugfs_init(struct drm_minor *minor)
static struct drm_driver tegra_drm_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM |
DRIVER_ATOMIC | DRIVER_RENDER,
- .load = tegra_drm_load,
- .unload = tegra_drm_unload,
.open = tegra_drm_open,
.postclose = tegra_drm_postclose,
.lastclose = drm_fb_helper_lastclose,
@@ -1068,57 +902,63 @@ int tegra_drm_unregister_client(struct tegra_drm *tegra,
return 0;
}
-struct iommu_group *host1x_client_iommu_attach(struct host1x_client *client,
- bool shared)
+int host1x_client_iommu_attach(struct host1x_client *client)
{
+ struct iommu_domain *domain = iommu_get_domain_for_dev(client->dev);
struct drm_device *drm = dev_get_drvdata(client->parent);
struct tegra_drm *tegra = drm->dev_private;
struct iommu_group *group = NULL;
int err;
+ /*
+ * If the host1x client is already attached to an IOMMU domain that is
+ * not the shared IOMMU domain, don't try to attach it to a different
+ * domain. This allows using the IOMMU-backed DMA API.
+ */
+ if (domain && domain != tegra->domain)
+ return 0;
+
if (tegra->domain) {
group = iommu_group_get(client->dev);
if (!group) {
dev_err(client->dev, "failed to get IOMMU group\n");
- return ERR_PTR(-ENODEV);
+ return -ENODEV;
}
- if (!shared || (shared && (group != tegra->group))) {
-#if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)
- if (client->dev->archdata.mapping) {
- struct dma_iommu_mapping *mapping =
- to_dma_iommu_mapping(client->dev);
- arm_iommu_detach_device(client->dev);
- arm_iommu_release_mapping(mapping);
- }
-#endif
+ if (domain != tegra->domain) {
err = iommu_attach_group(tegra->domain, group);
if (err < 0) {
iommu_group_put(group);
- return ERR_PTR(err);
+ return err;
}
-
- if (shared && !tegra->group)
- tegra->group = group;
}
+
+ tegra->use_explicit_iommu = true;
}
- return group;
+ client->group = group;
+
+ return 0;
}
-void host1x_client_iommu_detach(struct host1x_client *client,
- struct iommu_group *group)
+void host1x_client_iommu_detach(struct host1x_client *client)
{
struct drm_device *drm = dev_get_drvdata(client->parent);
struct tegra_drm *tegra = drm->dev_private;
+ struct iommu_domain *domain;
- if (group) {
- if (group == tegra->group) {
- iommu_detach_group(tegra->domain, group);
- tegra->group = NULL;
- }
+ if (client->group) {
+ /*
+ * Devices that are part of the same group may no longer be
+ * attached to a domain at this point because their group may
+ * have been detached by an earlier client.
+ */
+ domain = iommu_get_domain_for_dev(client->dev);
+ if (domain)
+ iommu_detach_group(tegra->domain, client->group);
- iommu_group_put(group);
+ iommu_group_put(client->group);
+ client->group = NULL;
}
}
@@ -1202,6 +1042,8 @@ void tegra_drm_free(struct tegra_drm *tegra, size_t size, void *virt,
static int host1x_drm_probe(struct host1x_device *dev)
{
struct drm_driver *driver = &tegra_drm_driver;
+ struct iommu_domain *domain;
+ struct tegra_drm *tegra;
struct drm_device *drm;
int err;
@@ -1209,18 +1051,180 @@ static int host1x_drm_probe(struct host1x_device *dev)
if (IS_ERR(drm))
return PTR_ERR(drm);
+ tegra = kzalloc(sizeof(*tegra), GFP_KERNEL);
+ if (!tegra) {
+ err = -ENOMEM;
+ goto put;
+ }
+
+ /*
+ * If the Tegra DRM clients are backed by an IOMMU, push buffers are
+ * likely to be allocated beyond the 32-bit boundary if sufficient
+ * system memory is available. This is problematic on earlier Tegra
+ * generations where host1x supports a maximum of 32 address bits in
+ * the GATHER opcode. In this case, unless host1x is behind an IOMMU
+ * as well it won't be able to process buffers allocated beyond the
+ * 32-bit boundary.
+ *
+ * The DMA API will use bounce buffers in this case, so that could
+ * perhaps still be made to work, even if less efficient, but there
+ * is another catch: in order to perform cache maintenance on pages
+ * allocated for discontiguous buffers we need to map and unmap the
+ * SG table representing these buffers. This is fine for something
+ * small like a push buffer, but it exhausts the bounce buffer pool
+ * (typically on the order of a few MiB) for framebuffers (many MiB
+ * for any modern resolution).
+ *
+ * Work around this by making sure that Tegra DRM clients only use
+ * an IOMMU if the parent host1x also uses an IOMMU.
+ *
+ * Note that there's still a small gap here that we don't cover: if
+ * the DMA API is backed by an IOMMU there's no way to control which
+ * device is attached to an IOMMU and which isn't, except via wiring
+ * up the device tree appropriately. This is considered an problem
+ * of integration, so care must be taken for the DT to be consistent.
+ */
+ domain = iommu_get_domain_for_dev(drm->dev->parent);
+
+ if (domain && iommu_present(&platform_bus_type)) {
+ tegra->domain = iommu_domain_alloc(&platform_bus_type);
+ if (!tegra->domain) {
+ err = -ENOMEM;
+ goto free;
+ }
+
+ err = iova_cache_get();
+ if (err < 0)
+ goto domain;
+ }
+
+ mutex_init(&tegra->clients_lock);
+ INIT_LIST_HEAD(&tegra->clients);
+
dev_set_drvdata(&dev->dev, drm);
+ drm->dev_private = tegra;
+ tegra->drm = drm;
+
+ drm_mode_config_init(drm);
- err = drm_fb_helper_remove_conflicting_framebuffers(NULL, "tegradrmfb", false);
+ drm->mode_config.min_width = 0;
+ drm->mode_config.min_height = 0;
+
+ drm->mode_config.max_width = 4096;
+ drm->mode_config.max_height = 4096;
+
+ drm->mode_config.allow_fb_modifiers = true;
+
+ drm->mode_config.normalize_zpos = true;
+
+ drm->mode_config.funcs = &tegra_drm_mode_config_funcs;
+ drm->mode_config.helper_private = &tegra_drm_mode_config_helpers;
+
+ err = tegra_drm_fb_prepare(drm);
if (err < 0)
- goto put;
+ goto config;
+
+ drm_kms_helper_poll_init(drm);
+
+ err = host1x_device_init(dev);
+ if (err < 0)
+ goto fbdev;
+
+ if (tegra->use_explicit_iommu) {
+ u64 carveout_start, carveout_end, gem_start, gem_end;
+ u64 dma_mask = dma_get_mask(&dev->dev);
+ dma_addr_t start, end;
+ unsigned long order;
+
+ start = tegra->domain->geometry.aperture_start & dma_mask;
+ end = tegra->domain->geometry.aperture_end & dma_mask;
+
+ gem_start = start;
+ gem_end = end - CARVEOUT_SZ;
+ carveout_start = gem_end + 1;
+ carveout_end = end;
+
+ order = __ffs(tegra->domain->pgsize_bitmap);
+ init_iova_domain(&tegra->carveout.domain, 1UL << order,
+ carveout_start >> order);
+
+ tegra->carveout.shift = iova_shift(&tegra->carveout.domain);
+ tegra->carveout.limit = carveout_end >> tegra->carveout.shift;
+
+ drm_mm_init(&tegra->mm, gem_start, gem_end - gem_start + 1);
+ mutex_init(&tegra->mm_lock);
+
+ DRM_DEBUG_DRIVER("IOMMU apertures:\n");
+ DRM_DEBUG_DRIVER(" GEM: %#llx-%#llx\n", gem_start, gem_end);
+ DRM_DEBUG_DRIVER(" Carveout: %#llx-%#llx\n", carveout_start,
+ carveout_end);
+ } else if (tegra->domain) {
+ iommu_domain_free(tegra->domain);
+ tegra->domain = NULL;
+ iova_cache_put();
+ }
+
+ if (tegra->hub) {
+ err = tegra_display_hub_prepare(tegra->hub);
+ if (err < 0)
+ goto device;
+ }
+
+ /*
+ * We don't use the drm_irq_install() helpers provided by the DRM
+ * core, so we need to set this manually in order to allow the
+ * DRM_IOCTL_WAIT_VBLANK to operate correctly.
+ */
+ drm->irq_enabled = true;
+
+ /* syncpoints are used for full 32-bit hardware VBLANK counters */
+ drm->max_vblank_count = 0xffffffff;
+
+ err = drm_vblank_init(drm, drm->mode_config.num_crtc);
+ if (err < 0)
+ goto hub;
+
+ drm_mode_config_reset(drm);
+
+ err = drm_fb_helper_remove_conflicting_framebuffers(NULL, "tegradrmfb",
+ false);
+ if (err < 0)
+ goto hub;
+
+ err = tegra_drm_fb_init(drm);
+ if (err < 0)
+ goto hub;
err = drm_dev_register(drm, 0);
if (err < 0)
- goto put;
+ goto fb;
return 0;
+fb:
+ tegra_drm_fb_exit(drm);
+hub:
+ if (tegra->hub)
+ tegra_display_hub_cleanup(tegra->hub);
+device:
+ if (tegra->domain) {
+ mutex_destroy(&tegra->mm_lock);
+ drm_mm_takedown(&tegra->mm);
+ put_iova_domain(&tegra->carveout.domain);
+ iova_cache_put();
+ }
+
+ host1x_device_exit(dev);
+fbdev:
+ drm_kms_helper_poll_fini(drm);
+ tegra_drm_fb_free(drm);
+config:
+ drm_mode_config_cleanup(drm);
+domain:
+ if (tegra->domain)
+ iommu_domain_free(tegra->domain);
+free:
+ kfree(tegra);
put:
drm_dev_put(drm);
return err;
@@ -1229,8 +1233,29 @@ put:
static int host1x_drm_remove(struct host1x_device *dev)
{
struct drm_device *drm = dev_get_drvdata(&dev->dev);
+ struct tegra_drm *tegra = drm->dev_private;
+ int err;
drm_dev_unregister(drm);
+
+ drm_kms_helper_poll_fini(drm);
+ tegra_drm_fb_exit(drm);
+ drm_atomic_helper_shutdown(drm);
+ drm_mode_config_cleanup(drm);
+
+ err = host1x_device_exit(dev);
+ if (err < 0)
+ dev_err(&dev->dev, "host1x device cleanup failed: %d\n", err);
+
+ if (tegra->domain) {
+ mutex_destroy(&tegra->mm_lock);
+ drm_mm_takedown(&tegra->mm);
+ put_iova_domain(&tegra->carveout.domain);
+ iova_cache_put();
+ iommu_domain_free(tegra->domain);
+ }
+
+ kfree(tegra);
drm_dev_put(drm);
return 0;
diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h
index 29911eff9ceb..d941553f7a3d 100644
--- a/drivers/gpu/drm/tegra/drm.h
+++ b/drivers/gpu/drm/tegra/drm.h
@@ -36,7 +36,7 @@ struct tegra_drm {
struct drm_device *drm;
struct iommu_domain *domain;
- struct iommu_group *group;
+ bool use_explicit_iommu;
struct mutex mm_lock;
struct drm_mm mm;
@@ -100,10 +100,8 @@ int tegra_drm_register_client(struct tegra_drm *tegra,
struct tegra_drm_client *client);
int tegra_drm_unregister_client(struct tegra_drm *tegra,
struct tegra_drm_client *client);
-struct iommu_group *host1x_client_iommu_attach(struct host1x_client *client,
- bool shared);
-void host1x_client_iommu_detach(struct host1x_client *client,
- struct iommu_group *group);
+int host1x_client_iommu_attach(struct host1x_client *client);
+void host1x_client_iommu_detach(struct host1x_client *client);
int tegra_drm_init(struct tegra_drm *tegra, struct drm_device *drm);
int tegra_drm_exit(struct tegra_drm *tegra);
@@ -155,17 +153,12 @@ void tegra_output_connector_destroy(struct drm_connector *connector);
void tegra_output_encoder_destroy(struct drm_encoder *encoder);
/* from dpaux.c */
-struct drm_dp_link;
-
struct drm_dp_aux *drm_dp_aux_find_by_of_node(struct device_node *np);
enum drm_connector_status drm_dp_aux_detect(struct drm_dp_aux *aux);
int drm_dp_aux_attach(struct drm_dp_aux *aux, struct tegra_output *output);
int drm_dp_aux_detach(struct drm_dp_aux *aux);
int drm_dp_aux_enable(struct drm_dp_aux *aux);
int drm_dp_aux_disable(struct drm_dp_aux *aux);
-int drm_dp_aux_prepare(struct drm_dp_aux *aux, u8 encoding);
-int drm_dp_aux_train(struct drm_dp_aux *aux, struct drm_dp_link *link,
- u8 pattern);
/* from fb.c */
struct tegra_bo *tegra_fb_get_plane(struct drm_framebuffer *framebuffer,
diff --git a/drivers/gpu/drm/tegra/falcon.c b/drivers/gpu/drm/tegra/falcon.c
index f49ad36e24db..56edef06c48e 100644
--- a/drivers/gpu/drm/tegra/falcon.c
+++ b/drivers/gpu/drm/tegra/falcon.c
@@ -58,32 +58,17 @@ static int falcon_copy_chunk(struct falcon *falcon,
static void falcon_copy_firmware_image(struct falcon *falcon,
const struct firmware *firmware)
{
- u32 *firmware_vaddr = falcon->firmware.vaddr;
- dma_addr_t daddr;
+ u32 *virt = falcon->firmware.virt;
size_t i;
- int err;
/* copy the whole thing taking into account endianness */
for (i = 0; i < firmware->size / sizeof(u32); i++)
- firmware_vaddr[i] = le32_to_cpu(((u32 *)firmware->data)[i]);
-
- /* ensure that caches are flushed and falcon can see the firmware */
- daddr = dma_map_single(falcon->dev, firmware_vaddr,
- falcon->firmware.size, DMA_TO_DEVICE);
- err = dma_mapping_error(falcon->dev, daddr);
- if (err) {
- dev_err(falcon->dev, "failed to map firmware: %d\n", err);
- return;
- }
- dma_sync_single_for_device(falcon->dev, daddr,
- falcon->firmware.size, DMA_TO_DEVICE);
- dma_unmap_single(falcon->dev, daddr, falcon->firmware.size,
- DMA_TO_DEVICE);
+ virt[i] = le32_to_cpu(((u32 *)firmware->data)[i]);
}
static int falcon_parse_firmware_image(struct falcon *falcon)
{
- struct falcon_fw_bin_header_v1 *bin = (void *)falcon->firmware.vaddr;
+ struct falcon_fw_bin_header_v1 *bin = (void *)falcon->firmware.virt;
struct falcon_fw_os_header_v1 *os;
/* endian problems would show up right here */
@@ -104,7 +89,7 @@ static int falcon_parse_firmware_image(struct falcon *falcon)
return -EINVAL;
}
- os = falcon->firmware.vaddr + bin->os_header_offset;
+ os = falcon->firmware.virt + bin->os_header_offset;
falcon->firmware.bin_data.size = bin->os_size;
falcon->firmware.bin_data.offset = bin->os_data_offset;
@@ -125,6 +110,8 @@ int falcon_read_firmware(struct falcon *falcon, const char *name)
if (err < 0)
return err;
+ falcon->firmware.size = falcon->firmware.firmware->size;
+
return 0;
}
@@ -133,16 +120,6 @@ int falcon_load_firmware(struct falcon *falcon)
const struct firmware *firmware = falcon->firmware.firmware;
int err;
- falcon->firmware.size = firmware->size;
-
- /* allocate iova space for the firmware */
- falcon->firmware.vaddr = falcon->ops->alloc(falcon, firmware->size,
- &falcon->firmware.paddr);
- if (IS_ERR(falcon->firmware.vaddr)) {
- dev_err(falcon->dev, "DMA memory mapping failed\n");
- return PTR_ERR(falcon->firmware.vaddr);
- }
-
/* copy firmware image into local area. this also ensures endianness */
falcon_copy_firmware_image(falcon, firmware);
@@ -150,45 +127,26 @@ int falcon_load_firmware(struct falcon *falcon)
err = falcon_parse_firmware_image(falcon);
if (err < 0) {
dev_err(falcon->dev, "failed to parse firmware image\n");
- goto err_setup_firmware_image;
+ return err;
}
release_firmware(firmware);
falcon->firmware.firmware = NULL;
return 0;
-
-err_setup_firmware_image:
- falcon->ops->free(falcon, falcon->firmware.size,
- falcon->firmware.paddr, falcon->firmware.vaddr);
-
- return err;
}
int falcon_init(struct falcon *falcon)
{
- /* check mandatory ops */
- if (!falcon->ops || !falcon->ops->alloc || !falcon->ops->free)
- return -EINVAL;
-
- falcon->firmware.vaddr = NULL;
+ falcon->firmware.virt = NULL;
return 0;
}
void falcon_exit(struct falcon *falcon)
{
- if (falcon->firmware.firmware) {
+ if (falcon->firmware.firmware)
release_firmware(falcon->firmware.firmware);
- falcon->firmware.firmware = NULL;
- }
-
- if (falcon->firmware.vaddr) {
- falcon->ops->free(falcon, falcon->firmware.size,
- falcon->firmware.paddr,
- falcon->firmware.vaddr);
- falcon->firmware.vaddr = NULL;
- }
}
int falcon_boot(struct falcon *falcon)
@@ -197,7 +155,7 @@ int falcon_boot(struct falcon *falcon)
u32 value;
int err;
- if (!falcon->firmware.vaddr)
+ if (!falcon->firmware.virt)
return -EINVAL;
err = readl_poll_timeout(falcon->regs + FALCON_DMACTL, value,
@@ -210,7 +168,7 @@ int falcon_boot(struct falcon *falcon)
falcon_writel(falcon, 0, FALCON_DMACTL);
/* setup the address of the binary data so Falcon can access it later */
- falcon_writel(falcon, (falcon->firmware.paddr +
+ falcon_writel(falcon, (falcon->firmware.iova +
falcon->firmware.bin_data.offset) >> 8,
FALCON_DMATRFBASE);
diff --git a/drivers/gpu/drm/tegra/falcon.h b/drivers/gpu/drm/tegra/falcon.h
index 3d1243217410..c56ee32d92ee 100644
--- a/drivers/gpu/drm/tegra/falcon.h
+++ b/drivers/gpu/drm/tegra/falcon.h
@@ -74,15 +74,6 @@ struct falcon_fw_os_header_v1 {
u32 data_size;
};
-struct falcon;
-
-struct falcon_ops {
- void *(*alloc)(struct falcon *falcon, size_t size,
- dma_addr_t *paddr);
- void (*free)(struct falcon *falcon, size_t size,
- dma_addr_t paddr, void *vaddr);
-};
-
struct falcon_firmware_section {
unsigned long offset;
size_t size;
@@ -93,8 +84,9 @@ struct falcon_firmware {
const struct firmware *firmware;
/* Raw firmware data */
- dma_addr_t paddr;
- void *vaddr;
+ dma_addr_t iova;
+ dma_addr_t phys;
+ void *virt;
size_t size;
/* Parsed firmware information */
@@ -107,8 +99,6 @@ struct falcon {
/* Set by falcon client */
struct device *dev;
void __iomem *regs;
- const struct falcon_ops *ops;
- void *data;
struct falcon_firmware firmware;
};
diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
index e34325c83d28..7cea89f29a5c 100644
--- a/drivers/gpu/drm/tegra/fb.c
+++ b/drivers/gpu/drm/tegra/fb.c
@@ -269,10 +269,10 @@ static int tegra_fbdev_probe(struct drm_fb_helper *helper,
}
}
- drm->mode_config.fb_base = (resource_size_t)bo->paddr;
+ drm->mode_config.fb_base = (resource_size_t)bo->iova;
info->screen_base = (void __iomem *)bo->vaddr + offset;
info->screen_size = size;
- info->fix.smem_start = (unsigned long)(bo->paddr + offset);
+ info->fix.smem_start = (unsigned long)(bo->iova + offset);
info->fix.smem_len = size;
return 0;
diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c
index fb7667c8dd4c..746dae32c484 100644
--- a/drivers/gpu/drm/tegra/gem.c
+++ b/drivers/gpu/drm/tegra/gem.c
@@ -27,17 +27,55 @@ static void tegra_bo_put(struct host1x_bo *bo)
drm_gem_object_put_unlocked(&obj->gem);
}
-static dma_addr_t tegra_bo_pin(struct host1x_bo *bo, struct sg_table **sgt)
+static struct sg_table *tegra_bo_pin(struct device *dev, struct host1x_bo *bo,
+ dma_addr_t *phys)
{
struct tegra_bo *obj = host1x_to_tegra_bo(bo);
+ struct sg_table *sgt;
+ int err;
+
+ /*
+ * If we've manually mapped the buffer object through the IOMMU, make
+ * sure to return the IOVA address of our mapping.
+ */
+ if (phys && obj->mm) {
+ *phys = obj->iova;
+ return NULL;
+ }
+
+ /*
+ * If we don't have a mapping for this buffer yet, return an SG table
+ * so that host1x can do the mapping for us via the DMA API.
+ */
+ sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
+ if (!sgt)
+ return ERR_PTR(-ENOMEM);
- *sgt = obj->sgt;
+ if (obj->pages) {
+ err = sg_alloc_table_from_pages(sgt, obj->pages, obj->num_pages,
+ 0, obj->gem.size, GFP_KERNEL);
+ if (err < 0)
+ goto free;
+ } else {
+ err = dma_get_sgtable(dev, sgt, obj->vaddr, obj->iova,
+ obj->gem.size);
+ if (err < 0)
+ goto free;
+ }
- return obj->paddr;
+ return sgt;
+
+free:
+ kfree(sgt);
+ return ERR_PTR(err);
}
-static void tegra_bo_unpin(struct host1x_bo *bo, struct sg_table *sgt)
+static void tegra_bo_unpin(struct device *dev, struct sg_table *sgt)
{
+ if (sgt) {
+ sg_free_table(sgt);
+ kfree(sgt);
+ }
}
static void *tegra_bo_mmap(struct host1x_bo *bo)
@@ -133,9 +171,9 @@ static int tegra_bo_iommu_map(struct tegra_drm *tegra, struct tegra_bo *bo)
goto unlock;
}
- bo->paddr = bo->mm->start;
+ bo->iova = bo->mm->start;
- bo->size = iommu_map_sg(tegra->domain, bo->paddr, bo->sgt->sgl,
+ bo->size = iommu_map_sg(tegra->domain, bo->iova, bo->sgt->sgl,
bo->sgt->nents, prot);
if (!bo->size) {
dev_err(tegra->drm->dev, "failed to map buffer\n");
@@ -161,7 +199,7 @@ static int tegra_bo_iommu_unmap(struct tegra_drm *tegra, struct tegra_bo *bo)
return 0;
mutex_lock(&tegra->mm_lock);
- iommu_unmap(tegra->domain, bo->paddr, bo->size);
+ iommu_unmap(tegra->domain, bo->iova, bo->size);
drm_mm_remove_node(bo->mm);
mutex_unlock(&tegra->mm_lock);
@@ -209,7 +247,7 @@ static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo)
sg_free_table(bo->sgt);
kfree(bo->sgt);
} else if (bo->vaddr) {
- dma_free_wc(drm->dev, bo->gem.size, bo->vaddr, bo->paddr);
+ dma_free_wc(drm->dev, bo->gem.size, bo->vaddr, bo->iova);
}
}
@@ -264,7 +302,7 @@ static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo)
} else {
size_t size = bo->gem.size;
- bo->vaddr = dma_alloc_wc(drm->dev, size, &bo->paddr,
+ bo->vaddr = dma_alloc_wc(drm->dev, size, &bo->iova,
GFP_KERNEL | __GFP_NOWARN);
if (!bo->vaddr) {
dev_err(drm->dev,
@@ -365,7 +403,7 @@ static struct tegra_bo *tegra_bo_import(struct drm_device *drm,
goto detach;
}
- bo->paddr = sg_dma_address(bo->sgt->sgl);
+ bo->iova = sg_dma_address(bo->sgt->sgl);
}
bo->gem.import_attach = attach;
@@ -461,7 +499,7 @@ int __tegra_gem_mmap(struct drm_gem_object *gem, struct vm_area_struct *vma)
vma->vm_flags &= ~VM_PFNMAP;
vma->vm_pgoff = 0;
- err = dma_mmap_wc(gem->dev->dev, vma, bo->vaddr, bo->paddr,
+ err = dma_mmap_wc(gem->dev->dev, vma, bo->vaddr, bo->iova,
gem->size);
if (err < 0) {
drm_gem_vm_close(vma);
@@ -508,25 +546,18 @@ tegra_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
return NULL;
if (bo->pages) {
- struct scatterlist *sg;
- unsigned int i;
-
- if (sg_alloc_table(sgt, bo->num_pages, GFP_KERNEL))
- goto free;
-
- for_each_sg(sgt->sgl, sg, bo->num_pages, i)
- sg_set_page(sg, bo->pages[i], PAGE_SIZE, 0);
-
- if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0)
+ if (sg_alloc_table_from_pages(sgt, bo->pages, bo->num_pages,
+ 0, gem->size, GFP_KERNEL) < 0)
goto free;
} else {
- if (sg_alloc_table(sgt, 1, GFP_KERNEL))
+ if (dma_get_sgtable(attach->dev, sgt, bo->vaddr, bo->iova,
+ gem->size) < 0)
goto free;
-
- sg_dma_address(sgt->sgl) = bo->paddr;
- sg_dma_len(sgt->sgl) = gem->size;
}
+ if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0)
+ goto free;
+
return sgt;
free:
diff --git a/drivers/gpu/drm/tegra/gem.h b/drivers/gpu/drm/tegra/gem.h
index 83ffb1e14ca3..fafb5724499b 100644
--- a/drivers/gpu/drm/tegra/gem.h
+++ b/drivers/gpu/drm/tegra/gem.h
@@ -31,7 +31,7 @@ struct tegra_bo {
struct host1x_bo base;
unsigned long flags;
struct sg_table *sgt;
- dma_addr_t paddr;
+ dma_addr_t iova;
void *vaddr;
struct drm_mm_node *mm;
diff --git a/drivers/gpu/drm/tegra/gr2d.c b/drivers/gpu/drm/tegra/gr2d.c
index 641299cc85b8..1fc4e56c7cc5 100644
--- a/drivers/gpu/drm/tegra/gr2d.c
+++ b/drivers/gpu/drm/tegra/gr2d.c
@@ -17,7 +17,6 @@ struct gr2d_soc {
};
struct gr2d {
- struct iommu_group *group;
struct tegra_drm_client client;
struct host1x_channel *channel;
struct clk *clk;
@@ -40,7 +39,7 @@ static int gr2d_init(struct host1x_client *client)
struct gr2d *gr2d = to_gr2d(drm);
int err;
- gr2d->channel = host1x_channel_request(client->dev);
+ gr2d->channel = host1x_channel_request(client);
if (!gr2d->channel)
return -ENOMEM;
@@ -51,9 +50,8 @@ static int gr2d_init(struct host1x_client *client)
goto put;
}
- gr2d->group = host1x_client_iommu_attach(client, false);
- if (IS_ERR(gr2d->group)) {
- err = PTR_ERR(gr2d->group);
+ err = host1x_client_iommu_attach(client);
+ if (err < 0) {
dev_err(client->dev, "failed to attach to domain: %d\n", err);
goto free;
}
@@ -67,7 +65,7 @@ static int gr2d_init(struct host1x_client *client)
return 0;
detach:
- host1x_client_iommu_detach(client, gr2d->group);
+ host1x_client_iommu_detach(client);
free:
host1x_syncpt_free(client->syncpts[0]);
put:
@@ -87,7 +85,7 @@ static int gr2d_exit(struct host1x_client *client)
if (err < 0)
return err;
- host1x_client_iommu_detach(client, gr2d->group);
+ host1x_client_iommu_detach(client);
host1x_syncpt_free(client->syncpts[0]);
host1x_channel_put(gr2d->channel);
diff --git a/drivers/gpu/drm/tegra/gr3d.c b/drivers/gpu/drm/tegra/gr3d.c
index 8b9a35b1cbb3..24fae0f64032 100644
--- a/drivers/gpu/drm/tegra/gr3d.c
+++ b/drivers/gpu/drm/tegra/gr3d.c
@@ -23,7 +23,6 @@ struct gr3d_soc {
};
struct gr3d {
- struct iommu_group *group;
struct tegra_drm_client client;
struct host1x_channel *channel;
struct clk *clk_secondary;
@@ -49,7 +48,7 @@ static int gr3d_init(struct host1x_client *client)
struct gr3d *gr3d = to_gr3d(drm);
int err;
- gr3d->channel = host1x_channel_request(client->dev);
+ gr3d->channel = host1x_channel_request(client);
if (!gr3d->channel)
return -ENOMEM;
@@ -60,9 +59,8 @@ static int gr3d_init(struct host1x_client *client)
goto put;
}
- gr3d->group = host1x_client_iommu_attach(client, false);
- if (IS_ERR(gr3d->group)) {
- err = PTR_ERR(gr3d->group);
+ err = host1x_client_iommu_attach(client);
+ if (err < 0) {
dev_err(client->dev, "failed to attach to domain: %d\n", err);
goto free;
}
@@ -76,7 +74,7 @@ static int gr3d_init(struct host1x_client *client)
return 0;
detach:
- host1x_client_iommu_detach(client, gr3d->group);
+ host1x_client_iommu_detach(client);
free:
host1x_syncpt_free(client->syncpts[0]);
put:
@@ -95,7 +93,7 @@ static int gr3d_exit(struct host1x_client *client)
if (err < 0)
return err;
- host1x_client_iommu_detach(client, gr3d->group);
+ host1x_client_iommu_detach(client);
host1x_syncpt_free(client->syncpts[0]);
host1x_channel_put(gr3d->channel);
diff --git a/drivers/gpu/drm/tegra/hub.c b/drivers/gpu/drm/tegra/hub.c
index 839b49c40e51..2b4082d0bc9e 100644
--- a/drivers/gpu/drm/tegra/hub.c
+++ b/drivers/gpu/drm/tegra/hub.c
@@ -413,7 +413,6 @@ static void tegra_shared_plane_atomic_update(struct drm_plane *plane,
unsigned int zpos = plane->state->normalized_zpos;
struct drm_framebuffer *fb = plane->state->fb;
struct tegra_plane *p = to_tegra_plane(plane);
- struct tegra_bo *bo;
dma_addr_t base;
u32 value;
@@ -456,8 +455,7 @@ static void tegra_shared_plane_atomic_update(struct drm_plane *plane,
/* disable compression */
tegra_plane_writel(p, 0, DC_WINBUF_CDE_CONTROL);
- bo = tegra_fb_get_plane(fb, 0);
- base = bo->paddr;
+ base = state->iova[0] + fb->offsets[0];
tegra_plane_writel(p, state->format, DC_WIN_COLOR_DEPTH);
tegra_plane_writel(p, 0, DC_WIN_PRECOMP_WGRP_PARAMS);
@@ -521,6 +519,8 @@ static void tegra_shared_plane_atomic_update(struct drm_plane *plane,
}
static const struct drm_plane_helper_funcs tegra_shared_plane_helper_funcs = {
+ .prepare_fb = tegra_plane_prepare_fb,
+ .cleanup_fb = tegra_plane_cleanup_fb,
.atomic_check = tegra_shared_plane_atomic_check,
.atomic_update = tegra_shared_plane_atomic_update,
.atomic_disable = tegra_shared_plane_atomic_disable,
diff --git a/drivers/gpu/drm/tegra/output.c b/drivers/gpu/drm/tegra/output.c
index bdcaa4c7168c..34373734ff68 100644
--- a/drivers/gpu/drm/tegra/output.c
+++ b/drivers/gpu/drm/tegra/output.c
@@ -70,6 +70,11 @@ tegra_output_connector_detect(struct drm_connector *connector, bool force)
void tegra_output_connector_destroy(struct drm_connector *connector)
{
+ struct tegra_output *output = connector_to_output(connector);
+
+ if (output->cec)
+ cec_notifier_conn_unregister(output->cec);
+
drm_connector_unregister(connector);
drm_connector_cleanup(connector);
}
@@ -163,18 +168,11 @@ int tegra_output_probe(struct tegra_output *output)
disable_irq(output->hpd_irq);
}
- output->cec = cec_notifier_get(output->dev);
- if (!output->cec)
- return -ENOMEM;
-
return 0;
}
void tegra_output_remove(struct tegra_output *output)
{
- if (output->cec)
- cec_notifier_put(output->cec);
-
if (output->hpd_gpio)
free_irq(output->hpd_irq, output);
@@ -184,6 +182,7 @@ void tegra_output_remove(struct tegra_output *output)
int tegra_output_init(struct drm_device *drm, struct tegra_output *output)
{
+ int connector_type;
int err;
if (output->panel) {
@@ -199,6 +198,21 @@ int tegra_output_init(struct drm_device *drm, struct tegra_output *output)
if (output->hpd_gpio)
enable_irq(output->hpd_irq);
+ connector_type = output->connector.connector_type;
+ /*
+ * Create a CEC notifier for HDMI connector.
+ */
+ if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
+ connector_type == DRM_MODE_CONNECTOR_HDMIB) {
+ struct cec_connector_info conn_info;
+
+ cec_fill_conn_info_from_drm(&conn_info, &output->connector);
+ output->cec = cec_notifier_conn_register(output->dev, NULL,
+ &conn_info);
+ if (!output->cec)
+ return -ENOMEM;
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/tegra/plane.c b/drivers/gpu/drm/tegra/plane.c
index 6bab71d6e81d..163b590be224 100644
--- a/drivers/gpu/drm/tegra/plane.c
+++ b/drivers/gpu/drm/tegra/plane.c
@@ -6,6 +6,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_plane_helper.h>
#include "dc.h"
@@ -23,6 +24,7 @@ static void tegra_plane_reset(struct drm_plane *plane)
{
struct tegra_plane *p = to_tegra_plane(plane);
struct tegra_plane_state *state;
+ unsigned int i;
if (plane->state)
__drm_atomic_helper_plane_destroy_state(plane->state);
@@ -36,6 +38,9 @@ static void tegra_plane_reset(struct drm_plane *plane)
plane->state->plane = plane;
plane->state->zpos = p->index;
plane->state->normalized_zpos = p->index;
+
+ for (i = 0; i < 3; i++)
+ state->iova[i] = DMA_MAPPING_ERROR;
}
}
@@ -60,6 +65,11 @@ tegra_plane_atomic_duplicate_state(struct drm_plane *plane)
for (i = 0; i < 2; i++)
copy->blending[i] = state->blending[i];
+ for (i = 0; i < 3; i++) {
+ copy->iova[i] = DMA_MAPPING_ERROR;
+ copy->sgt[i] = NULL;
+ }
+
return &copy->base;
}
@@ -95,6 +105,100 @@ const struct drm_plane_funcs tegra_plane_funcs = {
.format_mod_supported = tegra_plane_format_mod_supported,
};
+static int tegra_dc_pin(struct tegra_dc *dc, struct tegra_plane_state *state)
+{
+ unsigned int i;
+ int err;
+
+ for (i = 0; i < state->base.fb->format->num_planes; i++) {
+ struct tegra_bo *bo = tegra_fb_get_plane(state->base.fb, i);
+
+ if (!dc->client.group) {
+ struct sg_table *sgt;
+
+ sgt = host1x_bo_pin(dc->dev, &bo->base, NULL);
+ if (IS_ERR(sgt)) {
+ err = PTR_ERR(sgt);
+ goto unpin;
+ }
+
+ err = dma_map_sg(dc->dev, sgt->sgl, sgt->nents,
+ DMA_TO_DEVICE);
+ if (err == 0) {
+ err = -ENOMEM;
+ goto unpin;
+ }
+
+ state->iova[i] = sg_dma_address(sgt->sgl);
+ state->sgt[i] = sgt;
+ } else {
+ state->iova[i] = bo->iova;
+ }
+ }
+
+ return 0;
+
+unpin:
+ dev_err(dc->dev, "failed to map plane %u: %d\n", i, err);
+
+ while (i--) {
+ struct tegra_bo *bo = tegra_fb_get_plane(state->base.fb, i);
+ struct sg_table *sgt = state->sgt[i];
+
+ dma_unmap_sg(dc->dev, sgt->sgl, sgt->nents, DMA_TO_DEVICE);
+ host1x_bo_unpin(dc->dev, &bo->base, sgt);
+
+ state->iova[i] = DMA_MAPPING_ERROR;
+ state->sgt[i] = NULL;
+ }
+
+ return err;
+}
+
+static void tegra_dc_unpin(struct tegra_dc *dc, struct tegra_plane_state *state)
+{
+ unsigned int i;
+
+ for (i = 0; i < state->base.fb->format->num_planes; i++) {
+ struct tegra_bo *bo = tegra_fb_get_plane(state->base.fb, i);
+
+ if (!dc->client.group) {
+ struct sg_table *sgt = state->sgt[i];
+
+ if (sgt) {
+ dma_unmap_sg(dc->dev, sgt->sgl, sgt->nents,
+ DMA_TO_DEVICE);
+ host1x_bo_unpin(dc->dev, &bo->base, sgt);
+ }
+ }
+
+ state->iova[i] = DMA_MAPPING_ERROR;
+ state->sgt[i] = NULL;
+ }
+}
+
+int tegra_plane_prepare_fb(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct tegra_dc *dc = to_tegra_dc(state->crtc);
+
+ if (!state->fb)
+ return 0;
+
+ drm_gem_fb_prepare_fb(plane, state);
+
+ return tegra_dc_pin(dc, to_tegra_plane_state(state));
+}
+
+void tegra_plane_cleanup_fb(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct tegra_dc *dc = to_tegra_dc(state->crtc);
+
+ if (dc)
+ tegra_dc_unpin(dc, to_tegra_plane_state(state));
+}
+
int tegra_plane_state_add(struct tegra_plane *plane,
struct drm_plane_state *state)
{
diff --git a/drivers/gpu/drm/tegra/plane.h b/drivers/gpu/drm/tegra/plane.h
index 510c394e6d9a..a158a915109a 100644
--- a/drivers/gpu/drm/tegra/plane.h
+++ b/drivers/gpu/drm/tegra/plane.h
@@ -39,6 +39,9 @@ struct tegra_plane_legacy_blending_state {
struct tegra_plane_state {
struct drm_plane_state base;
+ struct sg_table *sgt[3];
+ dma_addr_t iova[3];
+
struct tegra_bo_tiling tiling;
u32 format;
u32 swap;
@@ -61,6 +64,11 @@ to_tegra_plane_state(struct drm_plane_state *state)
extern const struct drm_plane_funcs tegra_plane_funcs;
+int tegra_plane_prepare_fb(struct drm_plane *plane,
+ struct drm_plane_state *state);
+void tegra_plane_cleanup_fb(struct drm_plane *plane,
+ struct drm_plane_state *state);
+
int tegra_plane_state_add(struct tegra_plane *plane,
struct drm_plane_state *state);
diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
index e1669ada0a40..615cb319fa8b 100644
--- a/drivers/gpu/drm/tegra/sor.c
+++ b/drivers/gpu/drm/tegra/sor.c
@@ -25,6 +25,7 @@
#include <drm/drm_scdc_helper.h>
#include "dc.h"
+#include "dp.h"
#include "drm.h"
#include "hda.h"
#include "sor.h"
@@ -370,10 +371,11 @@ struct tegra_sor_regs {
};
struct tegra_sor_soc {
- bool supports_edp;
bool supports_lvds;
bool supports_hdmi;
bool supports_dp;
+ bool supports_audio;
+ bool supports_hdcp;
const struct tegra_sor_regs *regs;
bool has_nvdisplay;
@@ -382,6 +384,12 @@ struct tegra_sor_soc {
unsigned int num_settings;
const u8 *xbar_cfg;
+ const u8 *lane_map;
+
+ const u8 (*voltage_swing)[4][4];
+ const u8 (*pre_emphasis)[4][4];
+ const u8 (*post_cursor)[4][4];
+ const u8 (*tx_pu)[4][4];
};
struct tegra_sor;
@@ -390,6 +398,8 @@ struct tegra_sor_ops {
const char *name;
int (*probe)(struct tegra_sor *sor);
int (*remove)(struct tegra_sor *sor);
+ void (*audio_enable)(struct tegra_sor *sor);
+ void (*audio_disable)(struct tegra_sor *sor);
};
struct tegra_sor {
@@ -412,6 +422,7 @@ struct tegra_sor {
u8 xbar_cfg[5];
+ struct drm_dp_link link;
struct drm_dp_aux *aux;
struct drm_info_list *debugfs_files;
@@ -514,10 +525,19 @@ static inline struct tegra_clk_sor_pad *to_pad(struct clk_hw *hw)
return container_of(hw, struct tegra_clk_sor_pad, hw);
}
-static const char * const tegra_clk_sor_pad_parents[] = {
- "pll_d2_out0", "pll_dp"
+static const char * const tegra_clk_sor_pad_parents[2][2] = {
+ { "pll_d_out0", "pll_dp" },
+ { "pll_d2_out0", "pll_dp" },
};
+/*
+ * Implementing ->set_parent() here isn't really required because the parent
+ * will be explicitly selected in the driver code via the DP_CLK_SEL mux in
+ * the SOR_CLK_CNTRL register. This is primarily for compatibility with the
+ * Tegra186 and later SoC generations where the BPMP implements this clock
+ * and doesn't expose the mux via the common clock framework.
+ */
+
static int tegra_clk_sor_pad_set_parent(struct clk_hw *hw, u8 index)
{
struct tegra_clk_sor_pad *pad = to_pad(hw);
@@ -586,8 +606,8 @@ static struct clk *tegra_clk_sor_pad_register(struct tegra_sor *sor,
init.name = name;
init.flags = 0;
- init.parent_names = tegra_clk_sor_pad_parents;
- init.num_parents = ARRAY_SIZE(tegra_clk_sor_pad_parents);
+ init.parent_names = tegra_clk_sor_pad_parents[sor->index];
+ init.num_parents = ARRAY_SIZE(tegra_clk_sor_pad_parents[sor->index]);
init.ops = &tegra_clk_sor_pad_ops;
pad->hw.init = &init;
@@ -597,112 +617,340 @@ static struct clk *tegra_clk_sor_pad_register(struct tegra_sor *sor,
return clk;
}
-static int tegra_sor_dp_train_fast(struct tegra_sor *sor,
- struct drm_dp_link *link)
+static void tegra_sor_filter_rates(struct tegra_sor *sor)
{
+ struct drm_dp_link *link = &sor->link;
unsigned int i;
- u8 pattern;
+
+ /* Tegra only supports RBR, HBR and HBR2 */
+ for (i = 0; i < link->num_rates; i++) {
+ switch (link->rates[i]) {
+ case 1620000:
+ case 2700000:
+ case 5400000:
+ break;
+
+ default:
+ DRM_DEBUG_KMS("link rate %lu kHz not supported\n",
+ link->rates[i]);
+ link->rates[i] = 0;
+ break;
+ }
+ }
+
+ drm_dp_link_update_rates(link);
+}
+
+static int tegra_sor_power_up_lanes(struct tegra_sor *sor, unsigned int lanes)
+{
+ unsigned long timeout;
u32 value;
- int err;
- /* setup lane parameters */
- value = SOR_LANE_DRIVE_CURRENT_LANE3(0x40) |
- SOR_LANE_DRIVE_CURRENT_LANE2(0x40) |
- SOR_LANE_DRIVE_CURRENT_LANE1(0x40) |
- SOR_LANE_DRIVE_CURRENT_LANE0(0x40);
- tegra_sor_writel(sor, value, SOR_LANE_DRIVE_CURRENT0);
+ /*
+ * Clear or set the PD_TXD bit corresponding to each lane, depending
+ * on whether it is used or not.
+ */
+ value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0);
- value = SOR_LANE_PREEMPHASIS_LANE3(0x0f) |
- SOR_LANE_PREEMPHASIS_LANE2(0x0f) |
- SOR_LANE_PREEMPHASIS_LANE1(0x0f) |
- SOR_LANE_PREEMPHASIS_LANE0(0x0f);
- tegra_sor_writel(sor, value, SOR_LANE_PREEMPHASIS0);
+ if (lanes <= 2)
+ value &= ~(SOR_DP_PADCTL_PD_TXD(sor->soc->lane_map[3]) |
+ SOR_DP_PADCTL_PD_TXD(sor->soc->lane_map[2]));
+ else
+ value |= SOR_DP_PADCTL_PD_TXD(sor->soc->lane_map[3]) |
+ SOR_DP_PADCTL_PD_TXD(sor->soc->lane_map[2]);
- value = SOR_LANE_POSTCURSOR_LANE3(0x00) |
- SOR_LANE_POSTCURSOR_LANE2(0x00) |
- SOR_LANE_POSTCURSOR_LANE1(0x00) |
- SOR_LANE_POSTCURSOR_LANE0(0x00);
- tegra_sor_writel(sor, value, SOR_LANE_POSTCURSOR0);
+ if (lanes <= 1)
+ value &= ~SOR_DP_PADCTL_PD_TXD(sor->soc->lane_map[1]);
+ else
+ value |= SOR_DP_PADCTL_PD_TXD(sor->soc->lane_map[1]);
- /* disable LVDS mode */
- tegra_sor_writel(sor, 0, SOR_LVDS);
+ if (lanes == 0)
+ value &= ~SOR_DP_PADCTL_PD_TXD(sor->soc->lane_map[0]);
+ else
+ value |= SOR_DP_PADCTL_PD_TXD(sor->soc->lane_map[0]);
+
+ tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0);
+
+ /* start lane sequencer */
+ value = SOR_LANE_SEQ_CTL_TRIGGER | SOR_LANE_SEQ_CTL_SEQUENCE_DOWN |
+ SOR_LANE_SEQ_CTL_POWER_STATE_UP;
+ tegra_sor_writel(sor, value, SOR_LANE_SEQ_CTL);
+
+ timeout = jiffies + msecs_to_jiffies(250);
+ while (time_before(jiffies, timeout)) {
+ value = tegra_sor_readl(sor, SOR_LANE_SEQ_CTL);
+ if ((value & SOR_LANE_SEQ_CTL_TRIGGER) == 0)
+ break;
+
+ usleep_range(250, 1000);
+ }
+
+ if ((value & SOR_LANE_SEQ_CTL_TRIGGER) != 0)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int tegra_sor_power_down_lanes(struct tegra_sor *sor)
+{
+ unsigned long timeout;
+ u32 value;
+
+ /* power down all lanes */
value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0);
- value |= SOR_DP_PADCTL_TX_PU_ENABLE;
- value &= ~SOR_DP_PADCTL_TX_PU_MASK;
- value |= SOR_DP_PADCTL_TX_PU(2); /* XXX: don't hardcode? */
+ value &= ~(SOR_DP_PADCTL_PD_TXD_3 | SOR_DP_PADCTL_PD_TXD_0 |
+ SOR_DP_PADCTL_PD_TXD_1 | SOR_DP_PADCTL_PD_TXD_2);
tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0);
+ /* start lane sequencer */
+ value = SOR_LANE_SEQ_CTL_TRIGGER | SOR_LANE_SEQ_CTL_SEQUENCE_UP |
+ SOR_LANE_SEQ_CTL_POWER_STATE_DOWN;
+ tegra_sor_writel(sor, value, SOR_LANE_SEQ_CTL);
+
+ timeout = jiffies + msecs_to_jiffies(250);
+
+ while (time_before(jiffies, timeout)) {
+ value = tegra_sor_readl(sor, SOR_LANE_SEQ_CTL);
+ if ((value & SOR_LANE_SEQ_CTL_TRIGGER) == 0)
+ break;
+
+ usleep_range(25, 100);
+ }
+
+ if ((value & SOR_LANE_SEQ_CTL_TRIGGER) != 0)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static void tegra_sor_dp_precharge(struct tegra_sor *sor, unsigned int lanes)
+{
+ u32 value;
+
+ /* pre-charge all used lanes */
value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0);
- value |= SOR_DP_PADCTL_CM_TXD_3 | SOR_DP_PADCTL_CM_TXD_2 |
- SOR_DP_PADCTL_CM_TXD_1 | SOR_DP_PADCTL_CM_TXD_0;
+
+ if (lanes <= 2)
+ value &= ~(SOR_DP_PADCTL_CM_TXD(sor->soc->lane_map[3]) |
+ SOR_DP_PADCTL_CM_TXD(sor->soc->lane_map[2]));
+ else
+ value |= SOR_DP_PADCTL_CM_TXD(sor->soc->lane_map[3]) |
+ SOR_DP_PADCTL_CM_TXD(sor->soc->lane_map[2]);
+
+ if (lanes <= 1)
+ value &= ~SOR_DP_PADCTL_CM_TXD(sor->soc->lane_map[1]);
+ else
+ value |= SOR_DP_PADCTL_CM_TXD(sor->soc->lane_map[1]);
+
+ if (lanes == 0)
+ value &= ~SOR_DP_PADCTL_CM_TXD(sor->soc->lane_map[0]);
+ else
+ value |= SOR_DP_PADCTL_CM_TXD(sor->soc->lane_map[0]);
+
tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0);
- usleep_range(10, 100);
+ usleep_range(15, 100);
value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0);
value &= ~(SOR_DP_PADCTL_CM_TXD_3 | SOR_DP_PADCTL_CM_TXD_2 |
SOR_DP_PADCTL_CM_TXD_1 | SOR_DP_PADCTL_CM_TXD_0);
tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0);
+}
- err = drm_dp_aux_prepare(sor->aux, DP_SET_ANSI_8B10B);
- if (err < 0)
- return err;
+static void tegra_sor_dp_term_calibrate(struct tegra_sor *sor)
+{
+ u32 mask = 0x08, adj = 0, value;
+
+ /* enable pad calibration logic */
+ value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0);
+ value &= ~SOR_DP_PADCTL_PAD_CAL_PD;
+ tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0);
+
+ value = tegra_sor_readl(sor, sor->soc->regs->pll1);
+ value |= SOR_PLL1_TMDS_TERM;
+ tegra_sor_writel(sor, value, sor->soc->regs->pll1);
+
+ while (mask) {
+ adj |= mask;
+
+ value = tegra_sor_readl(sor, sor->soc->regs->pll1);
+ value &= ~SOR_PLL1_TMDS_TERMADJ_MASK;
+ value |= SOR_PLL1_TMDS_TERMADJ(adj);
+ tegra_sor_writel(sor, value, sor->soc->regs->pll1);
- for (i = 0, value = 0; i < link->num_lanes; i++) {
- unsigned long lane = SOR_DP_TPG_CHANNEL_CODING |
- SOR_DP_TPG_SCRAMBLER_NONE |
- SOR_DP_TPG_PATTERN_TRAIN1;
- value = (value << 8) | lane;
+ usleep_range(100, 200);
+
+ value = tegra_sor_readl(sor, sor->soc->regs->pll1);
+ if (value & SOR_PLL1_TERM_COMPOUT)
+ adj &= ~mask;
+
+ mask >>= 1;
}
- tegra_sor_writel(sor, value, SOR_DP_TPG);
+ value = tegra_sor_readl(sor, sor->soc->regs->pll1);
+ value &= ~SOR_PLL1_TMDS_TERMADJ_MASK;
+ value |= SOR_PLL1_TMDS_TERMADJ(adj);
+ tegra_sor_writel(sor, value, sor->soc->regs->pll1);
- pattern = DP_TRAINING_PATTERN_1;
+ /* disable pad calibration logic */
+ value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0);
+ value |= SOR_DP_PADCTL_PAD_CAL_PD;
+ tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0);
+}
- err = drm_dp_aux_train(sor->aux, link, pattern);
- if (err < 0)
- return err;
+static int tegra_sor_dp_link_apply_training(struct drm_dp_link *link)
+{
+ struct tegra_sor *sor = container_of(link, struct tegra_sor, link);
+ u32 voltage_swing = 0, pre_emphasis = 0, post_cursor = 0;
+ const struct tegra_sor_soc *soc = sor->soc;
+ u32 pattern = 0, tx_pu = 0, value;
+ unsigned int i;
- value = tegra_sor_readl(sor, SOR_DP_SPARE0);
- value |= SOR_DP_SPARE_SEQ_ENABLE;
- value &= ~SOR_DP_SPARE_PANEL_INTERNAL;
- value |= SOR_DP_SPARE_MACRO_SOR_CLK;
- tegra_sor_writel(sor, value, SOR_DP_SPARE0);
+ for (value = 0, i = 0; i < link->lanes; i++) {
+ u8 vs = link->train.request.voltage_swing[i];
+ u8 pe = link->train.request.pre_emphasis[i];
+ u8 pc = link->train.request.post_cursor[i];
+ u8 shift = sor->soc->lane_map[i] << 3;
+
+ voltage_swing |= soc->voltage_swing[pc][vs][pe] << shift;
+ pre_emphasis |= soc->pre_emphasis[pc][vs][pe] << shift;
+ post_cursor |= soc->post_cursor[pc][vs][pe] << shift;
+
+ if (sor->soc->tx_pu[pc][vs][pe] > tx_pu)
+ tx_pu = sor->soc->tx_pu[pc][vs][pe];
+
+ switch (link->train.pattern) {
+ case DP_TRAINING_PATTERN_DISABLE:
+ value = SOR_DP_TPG_SCRAMBLER_GALIOS |
+ SOR_DP_TPG_PATTERN_NONE;
+ break;
+
+ case DP_TRAINING_PATTERN_1:
+ value = SOR_DP_TPG_SCRAMBLER_NONE |
+ SOR_DP_TPG_PATTERN_TRAIN1;
+ break;
+
+ case DP_TRAINING_PATTERN_2:
+ value = SOR_DP_TPG_SCRAMBLER_NONE |
+ SOR_DP_TPG_PATTERN_TRAIN2;
+ break;
+
+ case DP_TRAINING_PATTERN_3:
+ value = SOR_DP_TPG_SCRAMBLER_NONE |
+ SOR_DP_TPG_PATTERN_TRAIN3;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ if (link->caps.channel_coding)
+ value |= SOR_DP_TPG_CHANNEL_CODING;
- for (i = 0, value = 0; i < link->num_lanes; i++) {
- unsigned long lane = SOR_DP_TPG_CHANNEL_CODING |
- SOR_DP_TPG_SCRAMBLER_NONE |
- SOR_DP_TPG_PATTERN_TRAIN2;
- value = (value << 8) | lane;
+ pattern = pattern << 8 | value;
}
- tegra_sor_writel(sor, value, SOR_DP_TPG);
+ tegra_sor_writel(sor, voltage_swing, SOR_LANE_DRIVE_CURRENT0);
+ tegra_sor_writel(sor, pre_emphasis, SOR_LANE_PREEMPHASIS0);
- pattern = DP_LINK_SCRAMBLING_DISABLE | DP_TRAINING_PATTERN_2;
+ if (link->caps.tps3_supported)
+ tegra_sor_writel(sor, post_cursor, SOR_LANE_POSTCURSOR0);
- err = drm_dp_aux_train(sor->aux, link, pattern);
- if (err < 0)
- return err;
+ tegra_sor_writel(sor, pattern, SOR_DP_TPG);
- for (i = 0, value = 0; i < link->num_lanes; i++) {
- unsigned long lane = SOR_DP_TPG_CHANNEL_CODING |
- SOR_DP_TPG_SCRAMBLER_GALIOS |
- SOR_DP_TPG_PATTERN_NONE;
- value = (value << 8) | lane;
+ value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0);
+ value &= ~SOR_DP_PADCTL_TX_PU_MASK;
+ value |= SOR_DP_PADCTL_TX_PU_ENABLE;
+ value |= SOR_DP_PADCTL_TX_PU(tx_pu);
+ tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0);
+
+ usleep_range(20, 100);
+
+ return 0;
+}
+
+static int tegra_sor_dp_link_configure(struct drm_dp_link *link)
+{
+ struct tegra_sor *sor = container_of(link, struct tegra_sor, link);
+ unsigned int rate, lanes;
+ u32 value;
+ int err;
+
+ rate = drm_dp_link_rate_to_bw_code(link->rate);
+ lanes = link->lanes;
+
+ /* configure link speed and lane count */
+ value = tegra_sor_readl(sor, SOR_CLK_CNTRL);
+ value &= ~SOR_CLK_CNTRL_DP_LINK_SPEED_MASK;
+ value |= SOR_CLK_CNTRL_DP_LINK_SPEED(rate);
+ tegra_sor_writel(sor, value, SOR_CLK_CNTRL);
+
+ value = tegra_sor_readl(sor, SOR_DP_LINKCTL0);
+ value &= ~SOR_DP_LINKCTL_LANE_COUNT_MASK;
+ value |= SOR_DP_LINKCTL_LANE_COUNT(lanes);
+
+ if (link->caps.enhanced_framing)
+ value |= SOR_DP_LINKCTL_ENHANCED_FRAME;
+
+ tegra_sor_writel(sor, value, SOR_DP_LINKCTL0);
+
+ usleep_range(400, 1000);
+
+ /* configure load pulse position adjustment */
+ value = tegra_sor_readl(sor, sor->soc->regs->pll1);
+ value &= ~SOR_PLL1_LOADADJ_MASK;
+
+ switch (rate) {
+ case DP_LINK_BW_1_62:
+ value |= SOR_PLL1_LOADADJ(0x3);
+ break;
+
+ case DP_LINK_BW_2_7:
+ value |= SOR_PLL1_LOADADJ(0x4);
+ break;
+
+ case DP_LINK_BW_5_4:
+ value |= SOR_PLL1_LOADADJ(0x6);
+ break;
}
- tegra_sor_writel(sor, value, SOR_DP_TPG);
+ tegra_sor_writel(sor, value, sor->soc->regs->pll1);
- pattern = DP_TRAINING_PATTERN_DISABLE;
+ /* use alternate scrambler reset for eDP */
+ value = tegra_sor_readl(sor, SOR_DP_SPARE0);
- err = drm_dp_aux_train(sor->aux, link, pattern);
- if (err < 0)
+ if (link->edp == 0)
+ value &= ~SOR_DP_SPARE_PANEL_INTERNAL;
+ else
+ value |= SOR_DP_SPARE_PANEL_INTERNAL;
+
+ tegra_sor_writel(sor, value, SOR_DP_SPARE0);
+
+ err = tegra_sor_power_down_lanes(sor);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to power down lanes: %d\n", err);
+ return err;
+ }
+
+ /* power up and pre-charge lanes */
+ err = tegra_sor_power_up_lanes(sor, lanes);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to power up %u lane%s: %d\n",
+ lanes, (lanes != 1) ? "s" : "", err);
return err;
+ }
+
+ tegra_sor_dp_precharge(sor, lanes);
return 0;
}
+static const struct drm_dp_link_ops tegra_sor_dp_link_ops = {
+ .apply_training = tegra_sor_dp_link_apply_training,
+ .configure = tegra_sor_dp_link_configure,
+};
+
static void tegra_sor_super_update(struct tegra_sor *sor)
{
tegra_sor_writel(sor, 0, SOR_SUPER_STATE0);
@@ -912,11 +1160,11 @@ static int tegra_sor_compute_config(struct tegra_sor *sor,
u32 num_syms_per_line;
unsigned int i;
- if (!link_rate || !link->num_lanes || !pclk || !config->bits_per_pixel)
+ if (!link_rate || !link->lanes || !pclk || !config->bits_per_pixel)
return -EINVAL;
- output = link_rate * 8 * link->num_lanes;
input = pclk * config->bits_per_pixel;
+ output = link_rate * 8 * link->lanes;
if (input >= output)
return -ERANGE;
@@ -959,7 +1207,7 @@ static int tegra_sor_compute_config(struct tegra_sor *sor,
watermark = div_u64(watermark + params.error, f);
config->watermark = watermark + (config->bits_per_pixel / 8) + 2;
num_syms_per_line = (mode->hdisplay * config->bits_per_pixel) *
- (link->num_lanes * 8);
+ (link->lanes * 8);
if (config->watermark > 30) {
config->watermark = 30;
@@ -976,15 +1224,15 @@ static int tegra_sor_compute_config(struct tegra_sor *sor,
num = ((mode->htotal - mode->hdisplay) - 7) * link_rate;
config->hblank_symbols = div_u64(num, pclk);
- if (link->capabilities & DP_LINK_CAP_ENHANCED_FRAMING)
+ if (link->caps.enhanced_framing)
config->hblank_symbols -= 3;
- config->hblank_symbols -= 12 / link->num_lanes;
+ config->hblank_symbols -= 12 / link->lanes;
/* compute the number of symbols per vertical blanking interval */
num = (mode->hdisplay - 25) * link_rate;
config->vblank_symbols = div_u64(num, pclk);
- config->vblank_symbols -= 36 / link->num_lanes + 4;
+ config->vblank_symbols -= 36 / link->lanes + 4;
dev_dbg(sor->dev, "blank symbols: H:%u V:%u\n", config->hblank_symbols,
config->vblank_symbols);
@@ -1200,29 +1448,6 @@ static int tegra_sor_power_down(struct tegra_sor *sor)
return err;
}
- value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0);
- value &= ~(SOR_DP_PADCTL_PD_TXD_3 | SOR_DP_PADCTL_PD_TXD_0 |
- SOR_DP_PADCTL_PD_TXD_1 | SOR_DP_PADCTL_PD_TXD_2);
- tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0);
-
- /* stop lane sequencer */
- value = SOR_LANE_SEQ_CTL_TRIGGER | SOR_LANE_SEQ_CTL_SEQUENCE_UP |
- SOR_LANE_SEQ_CTL_POWER_STATE_DOWN;
- tegra_sor_writel(sor, value, SOR_LANE_SEQ_CTL);
-
- timeout = jiffies + msecs_to_jiffies(250);
-
- while (time_before(jiffies, timeout)) {
- value = tegra_sor_readl(sor, SOR_LANE_SEQ_CTL);
- if ((value & SOR_LANE_SEQ_CTL_TRIGGER) == 0)
- break;
-
- usleep_range(25, 100);
- }
-
- if ((value & SOR_LANE_SEQ_CTL_TRIGGER) != 0)
- return -ETIMEDOUT;
-
value = tegra_sor_readl(sor, sor->soc->regs->pll2);
value |= SOR_PLL2_PORT_POWERDOWN;
tegra_sor_writel(sor, value, sor->soc->regs->pll2);
@@ -1584,403 +1809,6 @@ static const struct drm_encoder_funcs tegra_sor_encoder_funcs = {
.destroy = tegra_output_encoder_destroy,
};
-static void tegra_sor_edp_disable(struct drm_encoder *encoder)
-{
- struct tegra_output *output = encoder_to_output(encoder);
- struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
- struct tegra_sor *sor = to_sor(output);
- u32 value;
- int err;
-
- if (output->panel)
- drm_panel_disable(output->panel);
-
- err = tegra_sor_detach(sor);
- if (err < 0)
- dev_err(sor->dev, "failed to detach SOR: %d\n", err);
-
- tegra_sor_writel(sor, 0, SOR_STATE1);
- tegra_sor_update(sor);
-
- /*
- * The following accesses registers of the display controller, so make
- * sure it's only executed when the output is attached to one.
- */
- if (dc) {
- value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
- value &= ~SOR_ENABLE(0);
- tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
-
- tegra_dc_commit(dc);
- }
-
- err = tegra_sor_power_down(sor);
- if (err < 0)
- dev_err(sor->dev, "failed to power down SOR: %d\n", err);
-
- if (sor->aux) {
- err = drm_dp_aux_disable(sor->aux);
- if (err < 0)
- dev_err(sor->dev, "failed to disable DP: %d\n", err);
- }
-
- err = tegra_io_pad_power_disable(sor->pad);
- if (err < 0)
- dev_err(sor->dev, "failed to power off I/O pad: %d\n", err);
-
- if (output->panel)
- drm_panel_unprepare(output->panel);
-
- pm_runtime_put(sor->dev);
-}
-
-#if 0
-static int calc_h_ref_to_sync(const struct drm_display_mode *mode,
- unsigned int *value)
-{
- unsigned int hfp, hsw, hbp, a = 0, b;
-
- hfp = mode->hsync_start - mode->hdisplay;
- hsw = mode->hsync_end - mode->hsync_start;
- hbp = mode->htotal - mode->hsync_end;
-
- pr_info("hfp: %u, hsw: %u, hbp: %u\n", hfp, hsw, hbp);
-
- b = hfp - 1;
-
- pr_info("a: %u, b: %u\n", a, b);
- pr_info("a + hsw + hbp = %u\n", a + hsw + hbp);
-
- if (a + hsw + hbp <= 11) {
- a = 1 + 11 - hsw - hbp;
- pr_info("a: %u\n", a);
- }
-
- if (a > b)
- return -EINVAL;
-
- if (hsw < 1)
- return -EINVAL;
-
- if (mode->hdisplay < 16)
- return -EINVAL;
-
- if (value) {
- if (b > a && a % 2)
- *value = a + 1;
- else
- *value = a;
- }
-
- return 0;
-}
-#endif
-
-static void tegra_sor_edp_enable(struct drm_encoder *encoder)
-{
- struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
- struct tegra_output *output = encoder_to_output(encoder);
- struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
- struct tegra_sor *sor = to_sor(output);
- struct tegra_sor_config config;
- struct tegra_sor_state *state;
- struct drm_dp_link link;
- u8 rate, lanes;
- unsigned int i;
- int err = 0;
- u32 value;
-
- state = to_sor_state(output->connector.state);
-
- pm_runtime_get_sync(sor->dev);
-
- if (output->panel)
- drm_panel_prepare(output->panel);
-
- err = drm_dp_aux_enable(sor->aux);
- if (err < 0)
- dev_err(sor->dev, "failed to enable DP: %d\n", err);
-
- err = drm_dp_link_probe(sor->aux, &link);
- if (err < 0) {
- dev_err(sor->dev, "failed to probe eDP link: %d\n", err);
- return;
- }
-
- /* switch to safe parent clock */
- err = tegra_sor_set_parent_clock(sor, sor->clk_safe);
- if (err < 0)
- dev_err(sor->dev, "failed to set safe parent clock: %d\n", err);
-
- memset(&config, 0, sizeof(config));
- config.bits_per_pixel = state->bpc * 3;
-
- err = tegra_sor_compute_config(sor, mode, &config, &link);
- if (err < 0)
- dev_err(sor->dev, "failed to compute configuration: %d\n", err);
-
- value = tegra_sor_readl(sor, SOR_CLK_CNTRL);
- value &= ~SOR_CLK_CNTRL_DP_CLK_SEL_MASK;
- value |= SOR_CLK_CNTRL_DP_CLK_SEL_SINGLE_DPCLK;
- tegra_sor_writel(sor, value, SOR_CLK_CNTRL);
-
- value = tegra_sor_readl(sor, sor->soc->regs->pll2);
- value &= ~SOR_PLL2_BANDGAP_POWERDOWN;
- tegra_sor_writel(sor, value, sor->soc->regs->pll2);
- usleep_range(20, 100);
-
- value = tegra_sor_readl(sor, sor->soc->regs->pll3);
- value |= SOR_PLL3_PLL_VDD_MODE_3V3;
- tegra_sor_writel(sor, value, sor->soc->regs->pll3);
-
- value = SOR_PLL0_ICHPMP(0xf) | SOR_PLL0_VCOCAP_RST |
- SOR_PLL0_PLLREG_LEVEL_V45 | SOR_PLL0_RESISTOR_EXT;
- tegra_sor_writel(sor, value, sor->soc->regs->pll0);
-
- value = tegra_sor_readl(sor, sor->soc->regs->pll2);
- value |= SOR_PLL2_SEQ_PLLCAPPD;
- value &= ~SOR_PLL2_SEQ_PLLCAPPD_ENFORCE;
- value |= SOR_PLL2_LVDS_ENABLE;
- tegra_sor_writel(sor, value, sor->soc->regs->pll2);
-
- value = SOR_PLL1_TERM_COMPOUT | SOR_PLL1_TMDS_TERM;
- tegra_sor_writel(sor, value, sor->soc->regs->pll1);
-
- while (true) {
- value = tegra_sor_readl(sor, sor->soc->regs->pll2);
- if ((value & SOR_PLL2_SEQ_PLLCAPPD_ENFORCE) == 0)
- break;
-
- usleep_range(250, 1000);
- }
-
- value = tegra_sor_readl(sor, sor->soc->regs->pll2);
- value &= ~SOR_PLL2_POWERDOWN_OVERRIDE;
- value &= ~SOR_PLL2_PORT_POWERDOWN;
- tegra_sor_writel(sor, value, sor->soc->regs->pll2);
-
- /*
- * power up
- */
-
- /* set safe link bandwidth (1.62 Gbps) */
- value = tegra_sor_readl(sor, SOR_CLK_CNTRL);
- value &= ~SOR_CLK_CNTRL_DP_LINK_SPEED_MASK;
- value |= SOR_CLK_CNTRL_DP_LINK_SPEED_G1_62;
- tegra_sor_writel(sor, value, SOR_CLK_CNTRL);
-
- /* step 1 */
- value = tegra_sor_readl(sor, sor->soc->regs->pll2);
- value |= SOR_PLL2_SEQ_PLLCAPPD_ENFORCE | SOR_PLL2_PORT_POWERDOWN |
- SOR_PLL2_BANDGAP_POWERDOWN;
- tegra_sor_writel(sor, value, sor->soc->regs->pll2);
-
- value = tegra_sor_readl(sor, sor->soc->regs->pll0);
- value |= SOR_PLL0_VCOPD | SOR_PLL0_PWR;
- tegra_sor_writel(sor, value, sor->soc->regs->pll0);
-
- value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0);
- value &= ~SOR_DP_PADCTL_PAD_CAL_PD;
- tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0);
-
- /* step 2 */
- err = tegra_io_pad_power_enable(sor->pad);
- if (err < 0)
- dev_err(sor->dev, "failed to power on I/O pad: %d\n", err);
-
- usleep_range(5, 100);
-
- /* step 3 */
- value = tegra_sor_readl(sor, sor->soc->regs->pll2);
- value &= ~SOR_PLL2_BANDGAP_POWERDOWN;
- tegra_sor_writel(sor, value, sor->soc->regs->pll2);
-
- usleep_range(20, 100);
-
- /* step 4 */
- value = tegra_sor_readl(sor, sor->soc->regs->pll0);
- value &= ~SOR_PLL0_VCOPD;
- value &= ~SOR_PLL0_PWR;
- tegra_sor_writel(sor, value, sor->soc->regs->pll0);
-
- value = tegra_sor_readl(sor, sor->soc->regs->pll2);
- value &= ~SOR_PLL2_SEQ_PLLCAPPD_ENFORCE;
- tegra_sor_writel(sor, value, sor->soc->regs->pll2);
-
- usleep_range(200, 1000);
-
- /* step 5 */
- value = tegra_sor_readl(sor, sor->soc->regs->pll2);
- value &= ~SOR_PLL2_PORT_POWERDOWN;
- tegra_sor_writel(sor, value, sor->soc->regs->pll2);
-
- /* XXX not in TRM */
- for (value = 0, i = 0; i < 5; i++)
- value |= SOR_XBAR_CTRL_LINK0_XSEL(i, sor->xbar_cfg[i]) |
- SOR_XBAR_CTRL_LINK1_XSEL(i, i);
-
- tegra_sor_writel(sor, 0x00000000, SOR_XBAR_POL);
- tegra_sor_writel(sor, value, SOR_XBAR_CTRL);
-
- /* switch to DP parent clock */
- err = tegra_sor_set_parent_clock(sor, sor->clk_dp);
- if (err < 0)
- dev_err(sor->dev, "failed to set parent clock: %d\n", err);
-
- /* power DP lanes */
- value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0);
-
- if (link.num_lanes <= 2)
- value &= ~(SOR_DP_PADCTL_PD_TXD_3 | SOR_DP_PADCTL_PD_TXD_2);
- else
- value |= SOR_DP_PADCTL_PD_TXD_3 | SOR_DP_PADCTL_PD_TXD_2;
-
- if (link.num_lanes <= 1)
- value &= ~SOR_DP_PADCTL_PD_TXD_1;
- else
- value |= SOR_DP_PADCTL_PD_TXD_1;
-
- if (link.num_lanes == 0)
- value &= ~SOR_DP_PADCTL_PD_TXD_0;
- else
- value |= SOR_DP_PADCTL_PD_TXD_0;
-
- tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0);
-
- value = tegra_sor_readl(sor, SOR_DP_LINKCTL0);
- value &= ~SOR_DP_LINKCTL_LANE_COUNT_MASK;
- value |= SOR_DP_LINKCTL_LANE_COUNT(link.num_lanes);
- tegra_sor_writel(sor, value, SOR_DP_LINKCTL0);
-
- /* start lane sequencer */
- value = SOR_LANE_SEQ_CTL_TRIGGER | SOR_LANE_SEQ_CTL_SEQUENCE_DOWN |
- SOR_LANE_SEQ_CTL_POWER_STATE_UP;
- tegra_sor_writel(sor, value, SOR_LANE_SEQ_CTL);
-
- while (true) {
- value = tegra_sor_readl(sor, SOR_LANE_SEQ_CTL);
- if ((value & SOR_LANE_SEQ_CTL_TRIGGER) == 0)
- break;
-
- usleep_range(250, 1000);
- }
-
- /* set link bandwidth */
- value = tegra_sor_readl(sor, SOR_CLK_CNTRL);
- value &= ~SOR_CLK_CNTRL_DP_LINK_SPEED_MASK;
- value |= drm_dp_link_rate_to_bw_code(link.rate) << 2;
- tegra_sor_writel(sor, value, SOR_CLK_CNTRL);
-
- tegra_sor_apply_config(sor, &config);
-
- /* enable link */
- value = tegra_sor_readl(sor, SOR_DP_LINKCTL0);
- value |= SOR_DP_LINKCTL_ENABLE;
- value |= SOR_DP_LINKCTL_ENHANCED_FRAME;
- tegra_sor_writel(sor, value, SOR_DP_LINKCTL0);
-
- for (i = 0, value = 0; i < 4; i++) {
- unsigned long lane = SOR_DP_TPG_CHANNEL_CODING |
- SOR_DP_TPG_SCRAMBLER_GALIOS |
- SOR_DP_TPG_PATTERN_NONE;
- value = (value << 8) | lane;
- }
-
- tegra_sor_writel(sor, value, SOR_DP_TPG);
-
- /* enable pad calibration logic */
- value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0);
- value |= SOR_DP_PADCTL_PAD_CAL_PD;
- tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0);
-
- err = drm_dp_link_probe(sor->aux, &link);
- if (err < 0)
- dev_err(sor->dev, "failed to probe eDP link: %d\n", err);
-
- err = drm_dp_link_power_up(sor->aux, &link);
- if (err < 0)
- dev_err(sor->dev, "failed to power up eDP link: %d\n", err);
-
- err = drm_dp_link_configure(sor->aux, &link);
- if (err < 0)
- dev_err(sor->dev, "failed to configure eDP link: %d\n", err);
-
- rate = drm_dp_link_rate_to_bw_code(link.rate);
- lanes = link.num_lanes;
-
- value = tegra_sor_readl(sor, SOR_CLK_CNTRL);
- value &= ~SOR_CLK_CNTRL_DP_LINK_SPEED_MASK;
- value |= SOR_CLK_CNTRL_DP_LINK_SPEED(rate);
- tegra_sor_writel(sor, value, SOR_CLK_CNTRL);
-
- value = tegra_sor_readl(sor, SOR_DP_LINKCTL0);
- value &= ~SOR_DP_LINKCTL_LANE_COUNT_MASK;
- value |= SOR_DP_LINKCTL_LANE_COUNT(lanes);
-
- if (link.capabilities & DP_LINK_CAP_ENHANCED_FRAMING)
- value |= SOR_DP_LINKCTL_ENHANCED_FRAME;
-
- tegra_sor_writel(sor, value, SOR_DP_LINKCTL0);
-
- /* disable training pattern generator */
-
- for (i = 0; i < link.num_lanes; i++) {
- unsigned long lane = SOR_DP_TPG_CHANNEL_CODING |
- SOR_DP_TPG_SCRAMBLER_GALIOS |
- SOR_DP_TPG_PATTERN_NONE;
- value = (value << 8) | lane;
- }
-
- tegra_sor_writel(sor, value, SOR_DP_TPG);
-
- err = tegra_sor_dp_train_fast(sor, &link);
- if (err < 0)
- dev_err(sor->dev, "DP fast link training failed: %d\n", err);
-
- dev_dbg(sor->dev, "fast link training succeeded\n");
-
- err = tegra_sor_power_up(sor, 250);
- if (err < 0)
- dev_err(sor->dev, "failed to power up SOR: %d\n", err);
-
- /* CSTM (LVDS, link A/B, upper) */
- value = SOR_CSTM_LVDS | SOR_CSTM_LINK_ACT_A | SOR_CSTM_LINK_ACT_B |
- SOR_CSTM_UPPER;
- tegra_sor_writel(sor, value, SOR_CSTM);
-
- /* use DP-A protocol */
- value = tegra_sor_readl(sor, SOR_STATE1);
- value &= ~SOR_STATE_ASY_PROTOCOL_MASK;
- value |= SOR_STATE_ASY_PROTOCOL_DP_A;
- tegra_sor_writel(sor, value, SOR_STATE1);
-
- tegra_sor_mode_set(sor, mode, state);
-
- /* PWM setup */
- err = tegra_sor_setup_pwm(sor, 250);
- if (err < 0)
- dev_err(sor->dev, "failed to setup PWM: %d\n", err);
-
- tegra_sor_update(sor);
-
- value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
- value |= SOR_ENABLE(0);
- tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
-
- tegra_dc_commit(dc);
-
- err = tegra_sor_attach(sor);
- if (err < 0)
- dev_err(sor->dev, "failed to attach SOR: %d\n", err);
-
- err = tegra_sor_wakeup(sor);
- if (err < 0)
- dev_err(sor->dev, "failed to enable DC: %d\n", err);
-
- if (output->panel)
- drm_panel_enable(output->panel);
-}
-
static int
tegra_sor_encoder_atomic_check(struct drm_encoder *encoder,
struct drm_crtc_state *crtc_state,
@@ -2030,12 +1858,6 @@ tegra_sor_encoder_atomic_check(struct drm_encoder *encoder,
return 0;
}
-static const struct drm_encoder_helper_funcs tegra_sor_edp_helpers = {
- .disable = tegra_sor_edp_disable,
- .enable = tegra_sor_edp_enable,
- .atomic_check = tegra_sor_encoder_atomic_check,
-};
-
static inline u32 tegra_sor_hdmi_subpack(const u8 *ptr, size_t size)
{
u32 value = 0;
@@ -2160,6 +1982,15 @@ static void tegra_sor_audio_prepare(struct tegra_sor *sor)
{
u32 value;
+ /*
+ * Enable and unmask the HDA codec SCRATCH0 register interrupt. This
+ * is used for interoperability between the HDA codec driver and the
+ * HDMI/DP driver.
+ */
+ value = SOR_INT_CODEC_SCRATCH1 | SOR_INT_CODEC_SCRATCH0;
+ tegra_sor_writel(sor, value, SOR_INT_ENABLE);
+ tegra_sor_writel(sor, value, SOR_INT_MASK);
+
tegra_sor_write_eld(sor);
value = SOR_AUDIO_HDA_PRESENSE_ELDV | SOR_AUDIO_HDA_PRESENSE_PD;
@@ -2169,6 +2000,32 @@ static void tegra_sor_audio_prepare(struct tegra_sor *sor)
static void tegra_sor_audio_unprepare(struct tegra_sor *sor)
{
tegra_sor_writel(sor, 0, SOR_AUDIO_HDA_PRESENSE);
+ tegra_sor_writel(sor, 0, SOR_INT_MASK);
+ tegra_sor_writel(sor, 0, SOR_INT_ENABLE);
+}
+
+static void tegra_sor_audio_enable(struct tegra_sor *sor)
+{
+ u32 value;
+
+ value = tegra_sor_readl(sor, SOR_AUDIO_CNTRL);
+
+ /* select HDA audio input */
+ value &= ~SOR_AUDIO_CNTRL_SOURCE_SELECT(SOURCE_SELECT_MASK);
+ value |= SOR_AUDIO_CNTRL_SOURCE_SELECT(SOURCE_SELECT_HDA);
+
+ /* inject null samples */
+ if (sor->format.channels != 2)
+ value &= ~SOR_AUDIO_CNTRL_INJECT_NULLSMPL;
+ else
+ value |= SOR_AUDIO_CNTRL_INJECT_NULLSMPL;
+
+ value |= SOR_AUDIO_CNTRL_AFIFO_FLUSH;
+
+ tegra_sor_writel(sor, value, SOR_AUDIO_CNTRL);
+
+ /* enable advertising HBR capability */
+ tegra_sor_writel(sor, SOR_AUDIO_SPARE_HBR_ENABLE, SOR_AUDIO_SPARE);
}
static int tegra_sor_hdmi_enable_audio_infoframe(struct tegra_sor *sor)
@@ -2206,24 +2063,7 @@ static void tegra_sor_hdmi_audio_enable(struct tegra_sor *sor)
{
u32 value;
- value = tegra_sor_readl(sor, SOR_AUDIO_CNTRL);
-
- /* select HDA audio input */
- value &= ~SOR_AUDIO_CNTRL_SOURCE_SELECT(SOURCE_SELECT_MASK);
- value |= SOR_AUDIO_CNTRL_SOURCE_SELECT(SOURCE_SELECT_HDA);
-
- /* inject null samples */
- if (sor->format.channels != 2)
- value &= ~SOR_AUDIO_CNTRL_INJECT_NULLSMPL;
- else
- value |= SOR_AUDIO_CNTRL_INJECT_NULLSMPL;
-
- value |= SOR_AUDIO_CNTRL_AFIFO_FLUSH;
-
- tegra_sor_writel(sor, value, SOR_AUDIO_CNTRL);
-
- /* enable advertising HBR capability */
- tegra_sor_writel(sor, SOR_AUDIO_SPARE_HBR_ENABLE, SOR_AUDIO_SPARE);
+ tegra_sor_audio_enable(sor);
tegra_sor_writel(sor, 0, SOR_HDMI_ACR_CTRL);
@@ -2399,9 +2239,9 @@ static void tegra_sor_hdmi_disable(struct drm_encoder *encoder)
value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
if (!sor->soc->has_nvdisplay)
- value &= ~(SOR1_TIMING_CYA | SOR_ENABLE(1));
- else
- value &= ~SOR_ENABLE(sor->index);
+ value &= ~SOR1_TIMING_CYA;
+
+ value &= ~SOR_ENABLE(sor->index);
tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
@@ -2559,16 +2399,34 @@ static void tegra_sor_hdmi_enable(struct drm_encoder *encoder)
tegra_sor_writel(sor, 0x00000000, SOR_XBAR_POL);
tegra_sor_writel(sor, value, SOR_XBAR_CTRL);
- /* switch to parent clock */
- err = clk_set_parent(sor->clk, sor->clk_parent);
+ /*
+ * Switch the pad clock to the DP clock. Note that we cannot actually
+ * do this because Tegra186 and later don't support clk_set_parent()
+ * on the sorX_pad_clkout clocks. We already do the equivalent above
+ * using the DP_CLK_SEL mux of the SOR_CLK_CNTRL register.
+ */
+#if 0
+ err = clk_set_parent(sor->clk_pad, sor->clk_dp);
if (err < 0) {
- dev_err(sor->dev, "failed to set parent clock: %d\n", err);
+ dev_err(sor->dev, "failed to select pad parent clock: %d\n",
+ err);
return;
}
+#endif
+ /* switch the SOR clock to the pad clock */
err = tegra_sor_set_parent_clock(sor, sor->clk_pad);
if (err < 0) {
- dev_err(sor->dev, "failed to set pad clock: %d\n", err);
+ dev_err(sor->dev, "failed to select SOR parent clock: %d\n",
+ err);
+ return;
+ }
+
+ /* switch the output clock to the parent pixel clock */
+ err = clk_set_parent(sor->clk, sor->clk_parent);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to select output parent clock: %d\n",
+ err);
return;
}
@@ -2774,9 +2632,9 @@ static void tegra_sor_hdmi_enable(struct drm_encoder *encoder)
value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
if (!sor->soc->has_nvdisplay)
- value |= SOR_ENABLE(1) | SOR1_TIMING_CYA;
- else
- value |= SOR_ENABLE(sor->index);
+ value |= SOR1_TIMING_CYA;
+
+ value |= SOR_ENABLE(sor->index);
tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
@@ -2803,6 +2661,396 @@ static const struct drm_encoder_helper_funcs tegra_sor_hdmi_helpers = {
.atomic_check = tegra_sor_encoder_atomic_check,
};
+static void tegra_sor_dp_disable(struct drm_encoder *encoder)
+{
+ struct tegra_output *output = encoder_to_output(encoder);
+ struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
+ struct tegra_sor *sor = to_sor(output);
+ u32 value;
+ int err;
+
+ if (output->panel)
+ drm_panel_disable(output->panel);
+
+ /*
+ * Do not attempt to power down a DP link if we're not connected since
+ * the AUX transactions would just be timing out.
+ */
+ if (output->connector.status != connector_status_disconnected) {
+ err = drm_dp_link_power_down(sor->aux, &sor->link);
+ if (err < 0)
+ dev_err(sor->dev, "failed to power down link: %d\n",
+ err);
+ }
+
+ err = tegra_sor_detach(sor);
+ if (err < 0)
+ dev_err(sor->dev, "failed to detach SOR: %d\n", err);
+
+ tegra_sor_writel(sor, 0, SOR_STATE1);
+ tegra_sor_update(sor);
+
+ value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
+ value &= ~SOR_ENABLE(sor->index);
+ tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
+ tegra_dc_commit(dc);
+
+ value = tegra_sor_readl(sor, SOR_STATE1);
+ value &= ~SOR_STATE_ASY_PROTOCOL_MASK;
+ value &= ~SOR_STATE_ASY_SUBOWNER_MASK;
+ value &= ~SOR_STATE_ASY_OWNER_MASK;
+ tegra_sor_writel(sor, value, SOR_STATE1);
+ tegra_sor_update(sor);
+
+ /* switch to safe parent clock */
+ err = tegra_sor_set_parent_clock(sor, sor->clk_safe);
+ if (err < 0)
+ dev_err(sor->dev, "failed to set safe clock: %d\n", err);
+
+ err = tegra_sor_power_down(sor);
+ if (err < 0)
+ dev_err(sor->dev, "failed to power down SOR: %d\n", err);
+
+ err = tegra_io_pad_power_disable(sor->pad);
+ if (err < 0)
+ dev_err(sor->dev, "failed to power off I/O pad: %d\n", err);
+
+ err = drm_dp_aux_disable(sor->aux);
+ if (err < 0)
+ dev_err(sor->dev, "failed disable DPAUX: %d\n", err);
+
+ if (output->panel)
+ drm_panel_unprepare(output->panel);
+
+ pm_runtime_put(sor->dev);
+}
+
+static void tegra_sor_dp_enable(struct drm_encoder *encoder)
+{
+ struct tegra_output *output = encoder_to_output(encoder);
+ struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
+ struct tegra_sor *sor = to_sor(output);
+ struct tegra_sor_config config;
+ struct tegra_sor_state *state;
+ struct drm_display_mode *mode;
+ struct drm_display_info *info;
+ unsigned int i;
+ u32 value;
+ int err;
+
+ state = to_sor_state(output->connector.state);
+ mode = &encoder->crtc->state->adjusted_mode;
+ info = &output->connector.display_info;
+
+ pm_runtime_get_sync(sor->dev);
+
+ /* switch to safe parent clock */
+ err = tegra_sor_set_parent_clock(sor, sor->clk_safe);
+ if (err < 0)
+ dev_err(sor->dev, "failed to set safe parent clock: %d\n", err);
+
+ err = tegra_io_pad_power_enable(sor->pad);
+ if (err < 0)
+ dev_err(sor->dev, "failed to power on LVDS rail: %d\n", err);
+
+ usleep_range(20, 100);
+
+ err = drm_dp_aux_enable(sor->aux);
+ if (err < 0)
+ dev_err(sor->dev, "failed to enable DPAUX: %d\n", err);
+
+ err = drm_dp_link_probe(sor->aux, &sor->link);
+ if (err < 0)
+ dev_err(sor->dev, "failed to probe DP link: %d\n", err);
+
+ tegra_sor_filter_rates(sor);
+
+ err = drm_dp_link_choose(&sor->link, mode, info);
+ if (err < 0)
+ dev_err(sor->dev, "failed to choose link: %d\n", err);
+
+ if (output->panel)
+ drm_panel_prepare(output->panel);
+
+ value = tegra_sor_readl(sor, sor->soc->regs->pll2);
+ value &= ~SOR_PLL2_BANDGAP_POWERDOWN;
+ tegra_sor_writel(sor, value, sor->soc->regs->pll2);
+
+ usleep_range(20, 40);
+
+ value = tegra_sor_readl(sor, sor->soc->regs->pll3);
+ value |= SOR_PLL3_PLL_VDD_MODE_3V3;
+ tegra_sor_writel(sor, value, sor->soc->regs->pll3);
+
+ value = tegra_sor_readl(sor, sor->soc->regs->pll0);
+ value &= ~(SOR_PLL0_VCOPD | SOR_PLL0_PWR);
+ tegra_sor_writel(sor, value, sor->soc->regs->pll0);
+
+ value = tegra_sor_readl(sor, sor->soc->regs->pll2);
+ value &= ~SOR_PLL2_SEQ_PLLCAPPD_ENFORCE;
+ value |= SOR_PLL2_SEQ_PLLCAPPD;
+ tegra_sor_writel(sor, value, sor->soc->regs->pll2);
+
+ usleep_range(200, 400);
+
+ value = tegra_sor_readl(sor, sor->soc->regs->pll2);
+ value &= ~SOR_PLL2_POWERDOWN_OVERRIDE;
+ value &= ~SOR_PLL2_PORT_POWERDOWN;
+ tegra_sor_writel(sor, value, sor->soc->regs->pll2);
+
+ value = tegra_sor_readl(sor, SOR_CLK_CNTRL);
+ value &= ~SOR_CLK_CNTRL_DP_CLK_SEL_MASK;
+
+ if (output->panel)
+ value |= SOR_CLK_CNTRL_DP_CLK_SEL_SINGLE_DPCLK;
+ else
+ value |= SOR_CLK_CNTRL_DP_CLK_SEL_DIFF_DPCLK;
+
+ tegra_sor_writel(sor, value, SOR_CLK_CNTRL);
+
+ usleep_range(200, 400);
+
+ value = tegra_sor_readl(sor, SOR_DP_SPARE0);
+ /* XXX not in TRM */
+ if (output->panel)
+ value |= SOR_DP_SPARE_PANEL_INTERNAL;
+ else
+ value &= ~SOR_DP_SPARE_PANEL_INTERNAL;
+
+ value |= SOR_DP_SPARE_SEQ_ENABLE;
+ tegra_sor_writel(sor, value, SOR_DP_SPARE0);
+
+ /* XXX not in TRM */
+ tegra_sor_writel(sor, 0, SOR_LVDS);
+
+ value = tegra_sor_readl(sor, sor->soc->regs->pll0);
+ value &= ~SOR_PLL0_ICHPMP_MASK;
+ value &= ~SOR_PLL0_VCOCAP_MASK;
+ value |= SOR_PLL0_ICHPMP(0x1);
+ value |= SOR_PLL0_VCOCAP(0x3);
+ value |= SOR_PLL0_RESISTOR_EXT;
+ tegra_sor_writel(sor, value, sor->soc->regs->pll0);
+
+ /* XXX not in TRM */
+ for (value = 0, i = 0; i < 5; i++)
+ value |= SOR_XBAR_CTRL_LINK0_XSEL(i, sor->soc->xbar_cfg[i]) |
+ SOR_XBAR_CTRL_LINK1_XSEL(i, i);
+
+ tegra_sor_writel(sor, 0x00000000, SOR_XBAR_POL);
+ tegra_sor_writel(sor, value, SOR_XBAR_CTRL);
+
+ /*
+ * Switch the pad clock to the DP clock. Note that we cannot actually
+ * do this because Tegra186 and later don't support clk_set_parent()
+ * on the sorX_pad_clkout clocks. We already do the equivalent above
+ * using the DP_CLK_SEL mux of the SOR_CLK_CNTRL register.
+ */
+#if 0
+ err = clk_set_parent(sor->clk_pad, sor->clk_parent);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to select pad parent clock: %d\n",
+ err);
+ return;
+ }
+#endif
+
+ /* switch the SOR clock to the pad clock */
+ err = tegra_sor_set_parent_clock(sor, sor->clk_pad);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to select SOR parent clock: %d\n",
+ err);
+ return;
+ }
+
+ /* switch the output clock to the parent pixel clock */
+ err = clk_set_parent(sor->clk, sor->clk_parent);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to select output parent clock: %d\n",
+ err);
+ return;
+ }
+
+ /* use DP-A protocol */
+ value = tegra_sor_readl(sor, SOR_STATE1);
+ value &= ~SOR_STATE_ASY_PROTOCOL_MASK;
+ value |= SOR_STATE_ASY_PROTOCOL_DP_A;
+ tegra_sor_writel(sor, value, SOR_STATE1);
+
+ /* enable port */
+ value = tegra_sor_readl(sor, SOR_DP_LINKCTL0);
+ value |= SOR_DP_LINKCTL_ENABLE;
+ tegra_sor_writel(sor, value, SOR_DP_LINKCTL0);
+
+ tegra_sor_dp_term_calibrate(sor);
+
+ err = drm_dp_link_train(&sor->link);
+ if (err < 0)
+ dev_err(sor->dev, "link training failed: %d\n", err);
+ else
+ dev_dbg(sor->dev, "link training succeeded\n");
+
+ err = drm_dp_link_power_up(sor->aux, &sor->link);
+ if (err < 0)
+ dev_err(sor->dev, "failed to power up DP link: %d\n", err);
+
+ /* compute configuration */
+ memset(&config, 0, sizeof(config));
+ config.bits_per_pixel = state->bpc * 3;
+
+ err = tegra_sor_compute_config(sor, mode, &config, &sor->link);
+ if (err < 0)
+ dev_err(sor->dev, "failed to compute configuration: %d\n", err);
+
+ tegra_sor_apply_config(sor, &config);
+ tegra_sor_mode_set(sor, mode, state);
+
+ if (output->panel) {
+ /* CSTM (LVDS, link A/B, upper) */
+ value = SOR_CSTM_LVDS | SOR_CSTM_LINK_ACT_A | SOR_CSTM_LINK_ACT_B |
+ SOR_CSTM_UPPER;
+ tegra_sor_writel(sor, value, SOR_CSTM);
+
+ /* PWM setup */
+ err = tegra_sor_setup_pwm(sor, 250);
+ if (err < 0)
+ dev_err(sor->dev, "failed to setup PWM: %d\n", err);
+ }
+
+ tegra_sor_update(sor);
+
+ err = tegra_sor_power_up(sor, 250);
+ if (err < 0)
+ dev_err(sor->dev, "failed to power up SOR: %d\n", err);
+
+ /* attach and wake up */
+ err = tegra_sor_attach(sor);
+ if (err < 0)
+ dev_err(sor->dev, "failed to attach SOR: %d\n", err);
+
+ value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
+ value |= SOR_ENABLE(sor->index);
+ tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
+
+ tegra_dc_commit(dc);
+
+ err = tegra_sor_wakeup(sor);
+ if (err < 0)
+ dev_err(sor->dev, "failed to wakeup SOR: %d\n", err);
+
+ if (output->panel)
+ drm_panel_enable(output->panel);
+}
+
+static const struct drm_encoder_helper_funcs tegra_sor_dp_helpers = {
+ .disable = tegra_sor_dp_disable,
+ .enable = tegra_sor_dp_enable,
+ .atomic_check = tegra_sor_encoder_atomic_check,
+};
+
+static int tegra_sor_hdmi_probe(struct tegra_sor *sor)
+{
+ int err;
+
+ sor->avdd_io_supply = devm_regulator_get(sor->dev, "avdd-io");
+ if (IS_ERR(sor->avdd_io_supply)) {
+ dev_err(sor->dev, "cannot get AVDD I/O supply: %ld\n",
+ PTR_ERR(sor->avdd_io_supply));
+ return PTR_ERR(sor->avdd_io_supply);
+ }
+
+ err = regulator_enable(sor->avdd_io_supply);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to enable AVDD I/O supply: %d\n",
+ err);
+ return err;
+ }
+
+ sor->vdd_pll_supply = devm_regulator_get(sor->dev, "vdd-pll");
+ if (IS_ERR(sor->vdd_pll_supply)) {
+ dev_err(sor->dev, "cannot get VDD PLL supply: %ld\n",
+ PTR_ERR(sor->vdd_pll_supply));
+ return PTR_ERR(sor->vdd_pll_supply);
+ }
+
+ err = regulator_enable(sor->vdd_pll_supply);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to enable VDD PLL supply: %d\n",
+ err);
+ return err;
+ }
+
+ sor->hdmi_supply = devm_regulator_get(sor->dev, "hdmi");
+ if (IS_ERR(sor->hdmi_supply)) {
+ dev_err(sor->dev, "cannot get HDMI supply: %ld\n",
+ PTR_ERR(sor->hdmi_supply));
+ return PTR_ERR(sor->hdmi_supply);
+ }
+
+ err = regulator_enable(sor->hdmi_supply);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to enable HDMI supply: %d\n", err);
+ return err;
+ }
+
+ INIT_DELAYED_WORK(&sor->scdc, tegra_sor_hdmi_scdc_work);
+
+ return 0;
+}
+
+static int tegra_sor_hdmi_remove(struct tegra_sor *sor)
+{
+ regulator_disable(sor->hdmi_supply);
+ regulator_disable(sor->vdd_pll_supply);
+ regulator_disable(sor->avdd_io_supply);
+
+ return 0;
+}
+
+static const struct tegra_sor_ops tegra_sor_hdmi_ops = {
+ .name = "HDMI",
+ .probe = tegra_sor_hdmi_probe,
+ .remove = tegra_sor_hdmi_remove,
+ .audio_enable = tegra_sor_hdmi_audio_enable,
+ .audio_disable = tegra_sor_hdmi_audio_disable,
+};
+
+static int tegra_sor_dp_probe(struct tegra_sor *sor)
+{
+ int err;
+
+ sor->avdd_io_supply = devm_regulator_get(sor->dev, "avdd-io-hdmi-dp");
+ if (IS_ERR(sor->avdd_io_supply))
+ return PTR_ERR(sor->avdd_io_supply);
+
+ err = regulator_enable(sor->avdd_io_supply);
+ if (err < 0)
+ return err;
+
+ sor->vdd_pll_supply = devm_regulator_get(sor->dev, "vdd-hdmi-dp-pll");
+ if (IS_ERR(sor->vdd_pll_supply))
+ return PTR_ERR(sor->vdd_pll_supply);
+
+ err = regulator_enable(sor->vdd_pll_supply);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static int tegra_sor_dp_remove(struct tegra_sor *sor)
+{
+ regulator_disable(sor->vdd_pll_supply);
+ regulator_disable(sor->avdd_io_supply);
+
+ return 0;
+}
+
+static const struct tegra_sor_ops tegra_sor_dp_ops = {
+ .name = "DP",
+ .probe = tegra_sor_dp_probe,
+ .remove = tegra_sor_dp_remove,
+};
+
static int tegra_sor_init(struct host1x_client *client)
{
struct drm_device *drm = dev_get_drvdata(client->parent);
@@ -2810,11 +3058,10 @@ static int tegra_sor_init(struct host1x_client *client)
struct tegra_sor *sor = host1x_client_to_sor(client);
int connector = DRM_MODE_CONNECTOR_Unknown;
int encoder = DRM_MODE_ENCODER_NONE;
- u32 value;
int err;
if (!sor->aux) {
- if (sor->soc->supports_hdmi) {
+ if (sor->ops == &tegra_sor_hdmi_ops) {
connector = DRM_MODE_CONNECTOR_HDMIA;
encoder = DRM_MODE_ENCODER_TMDS;
helpers = &tegra_sor_hdmi_helpers;
@@ -2823,14 +3070,18 @@ static int tegra_sor_init(struct host1x_client *client)
encoder = DRM_MODE_ENCODER_LVDS;
}
} else {
- if (sor->soc->supports_edp) {
+ if (sor->output.panel) {
connector = DRM_MODE_CONNECTOR_eDP;
encoder = DRM_MODE_ENCODER_TMDS;
- helpers = &tegra_sor_edp_helpers;
- } else if (sor->soc->supports_dp) {
+ helpers = &tegra_sor_dp_helpers;
+ } else {
connector = DRM_MODE_CONNECTOR_DisplayPort;
encoder = DRM_MODE_ENCODER_TMDS;
+ helpers = &tegra_sor_dp_helpers;
}
+
+ sor->link.ops = &tegra_sor_dp_link_ops;
+ sor->link.aux = sor->aux;
}
sor->output.dev = sor->dev;
@@ -2913,15 +3164,6 @@ static int tegra_sor_init(struct host1x_client *client)
if (err < 0)
return err;
- /*
- * Enable and unmask the HDA codec SCRATCH0 register interrupt. This
- * is used for interoperability between the HDA codec driver and the
- * HDMI/DP driver.
- */
- value = SOR_INT_CODEC_SCRATCH1 | SOR_INT_CODEC_SCRATCH0;
- tegra_sor_writel(sor, value, SOR_INT_ENABLE);
- tegra_sor_writel(sor, value, SOR_INT_MASK);
-
return 0;
}
@@ -2930,9 +3172,6 @@ static int tegra_sor_exit(struct host1x_client *client)
struct tegra_sor *sor = host1x_client_to_sor(client);
int err;
- tegra_sor_writel(sor, 0, SOR_INT_MASK);
- tegra_sor_writel(sor, 0, SOR_INT_ENABLE);
-
tegra_output_exit(&sor->output);
if (sor->aux) {
@@ -2955,75 +3194,6 @@ static const struct host1x_client_ops sor_client_ops = {
.exit = tegra_sor_exit,
};
-static const struct tegra_sor_ops tegra_sor_edp_ops = {
- .name = "eDP",
-};
-
-static int tegra_sor_hdmi_probe(struct tegra_sor *sor)
-{
- int err;
-
- sor->avdd_io_supply = devm_regulator_get(sor->dev, "avdd-io");
- if (IS_ERR(sor->avdd_io_supply)) {
- dev_err(sor->dev, "cannot get AVDD I/O supply: %ld\n",
- PTR_ERR(sor->avdd_io_supply));
- return PTR_ERR(sor->avdd_io_supply);
- }
-
- err = regulator_enable(sor->avdd_io_supply);
- if (err < 0) {
- dev_err(sor->dev, "failed to enable AVDD I/O supply: %d\n",
- err);
- return err;
- }
-
- sor->vdd_pll_supply = devm_regulator_get(sor->dev, "vdd-pll");
- if (IS_ERR(sor->vdd_pll_supply)) {
- dev_err(sor->dev, "cannot get VDD PLL supply: %ld\n",
- PTR_ERR(sor->vdd_pll_supply));
- return PTR_ERR(sor->vdd_pll_supply);
- }
-
- err = regulator_enable(sor->vdd_pll_supply);
- if (err < 0) {
- dev_err(sor->dev, "failed to enable VDD PLL supply: %d\n",
- err);
- return err;
- }
-
- sor->hdmi_supply = devm_regulator_get(sor->dev, "hdmi");
- if (IS_ERR(sor->hdmi_supply)) {
- dev_err(sor->dev, "cannot get HDMI supply: %ld\n",
- PTR_ERR(sor->hdmi_supply));
- return PTR_ERR(sor->hdmi_supply);
- }
-
- err = regulator_enable(sor->hdmi_supply);
- if (err < 0) {
- dev_err(sor->dev, "failed to enable HDMI supply: %d\n", err);
- return err;
- }
-
- INIT_DELAYED_WORK(&sor->scdc, tegra_sor_hdmi_scdc_work);
-
- return 0;
-}
-
-static int tegra_sor_hdmi_remove(struct tegra_sor *sor)
-{
- regulator_disable(sor->hdmi_supply);
- regulator_disable(sor->vdd_pll_supply);
- regulator_disable(sor->avdd_io_supply);
-
- return 0;
-}
-
-static const struct tegra_sor_ops tegra_sor_hdmi_ops = {
- .name = "HDMI",
- .probe = tegra_sor_hdmi_probe,
- .remove = tegra_sor_hdmi_remove,
-};
-
static const u8 tegra124_sor_xbar_cfg[5] = {
0, 1, 2, 3, 4
};
@@ -3043,14 +3213,161 @@ static const struct tegra_sor_regs tegra124_sor_regs = {
.dp_padctl2 = 0x73,
};
+/* Tegra124 and Tegra132 have lanes 0 and 2 swapped. */
+static const u8 tegra124_sor_lane_map[4] = {
+ 2, 1, 0, 3,
+};
+
+static const u8 tegra124_sor_voltage_swing[4][4][4] = {
+ {
+ { 0x13, 0x19, 0x1e, 0x28 },
+ { 0x1e, 0x25, 0x2d, },
+ { 0x28, 0x32, },
+ { 0x3c, },
+ }, {
+ { 0x12, 0x17, 0x1b, 0x25 },
+ { 0x1c, 0x23, 0x2a, },
+ { 0x25, 0x2f, },
+ { 0x39, }
+ }, {
+ { 0x12, 0x16, 0x1a, 0x22 },
+ { 0x1b, 0x20, 0x27, },
+ { 0x24, 0x2d, },
+ { 0x36, },
+ }, {
+ { 0x11, 0x14, 0x17, 0x1f },
+ { 0x19, 0x1e, 0x24, },
+ { 0x22, 0x2a, },
+ { 0x32, },
+ },
+};
+
+static const u8 tegra124_sor_pre_emphasis[4][4][4] = {
+ {
+ { 0x00, 0x09, 0x13, 0x25 },
+ { 0x00, 0x0f, 0x1e, },
+ { 0x00, 0x14, },
+ { 0x00, },
+ }, {
+ { 0x00, 0x0a, 0x14, 0x28 },
+ { 0x00, 0x0f, 0x1e, },
+ { 0x00, 0x14, },
+ { 0x00 },
+ }, {
+ { 0x00, 0x0a, 0x14, 0x28 },
+ { 0x00, 0x0f, 0x1e, },
+ { 0x00, 0x14, },
+ { 0x00, },
+ }, {
+ { 0x00, 0x0a, 0x14, 0x28 },
+ { 0x00, 0x0f, 0x1e, },
+ { 0x00, 0x14, },
+ { 0x00, },
+ },
+};
+
+static const u8 tegra124_sor_post_cursor[4][4][4] = {
+ {
+ { 0x00, 0x00, 0x00, 0x00 },
+ { 0x00, 0x00, 0x00, },
+ { 0x00, 0x00, },
+ { 0x00, },
+ }, {
+ { 0x02, 0x02, 0x04, 0x05 },
+ { 0x02, 0x04, 0x05, },
+ { 0x04, 0x05, },
+ { 0x05, },
+ }, {
+ { 0x04, 0x05, 0x08, 0x0b },
+ { 0x05, 0x09, 0x0b, },
+ { 0x08, 0x0a, },
+ { 0x0b, },
+ }, {
+ { 0x05, 0x09, 0x0b, 0x12 },
+ { 0x09, 0x0d, 0x12, },
+ { 0x0b, 0x0f, },
+ { 0x12, },
+ },
+};
+
+static const u8 tegra124_sor_tx_pu[4][4][4] = {
+ {
+ { 0x20, 0x30, 0x40, 0x60 },
+ { 0x30, 0x40, 0x60, },
+ { 0x40, 0x60, },
+ { 0x60, },
+ }, {
+ { 0x20, 0x20, 0x30, 0x50 },
+ { 0x30, 0x40, 0x50, },
+ { 0x40, 0x50, },
+ { 0x60, },
+ }, {
+ { 0x20, 0x20, 0x30, 0x40, },
+ { 0x30, 0x30, 0x40, },
+ { 0x40, 0x50, },
+ { 0x60, },
+ }, {
+ { 0x20, 0x20, 0x20, 0x40, },
+ { 0x30, 0x30, 0x40, },
+ { 0x40, 0x40, },
+ { 0x60, },
+ },
+};
+
static const struct tegra_sor_soc tegra124_sor = {
- .supports_edp = true,
.supports_lvds = true,
.supports_hdmi = false,
- .supports_dp = false,
+ .supports_dp = true,
+ .supports_audio = false,
+ .supports_hdcp = false,
.regs = &tegra124_sor_regs,
.has_nvdisplay = false,
.xbar_cfg = tegra124_sor_xbar_cfg,
+ .lane_map = tegra124_sor_lane_map,
+ .voltage_swing = tegra124_sor_voltage_swing,
+ .pre_emphasis = tegra124_sor_pre_emphasis,
+ .post_cursor = tegra124_sor_post_cursor,
+ .tx_pu = tegra124_sor_tx_pu,
+};
+
+static const u8 tegra132_sor_pre_emphasis[4][4][4] = {
+ {
+ { 0x00, 0x08, 0x12, 0x24 },
+ { 0x01, 0x0e, 0x1d, },
+ { 0x01, 0x13, },
+ { 0x00, },
+ }, {
+ { 0x00, 0x08, 0x12, 0x24 },
+ { 0x00, 0x0e, 0x1d, },
+ { 0x00, 0x13, },
+ { 0x00 },
+ }, {
+ { 0x00, 0x08, 0x12, 0x24 },
+ { 0x00, 0x0e, 0x1d, },
+ { 0x00, 0x13, },
+ { 0x00, },
+ }, {
+ { 0x00, 0x08, 0x12, 0x24 },
+ { 0x00, 0x0e, 0x1d, },
+ { 0x00, 0x13, },
+ { 0x00, },
+ },
+};
+
+static const struct tegra_sor_soc tegra132_sor = {
+ .supports_lvds = true,
+ .supports_hdmi = false,
+ .supports_dp = true,
+ .supports_audio = false,
+ .supports_hdcp = false,
+ .regs = &tegra124_sor_regs,
+ .has_nvdisplay = false,
+ .xbar_cfg = tegra124_sor_xbar_cfg,
+ .lane_map = tegra124_sor_lane_map,
+ .voltage_swing = tegra124_sor_voltage_swing,
+ .pre_emphasis = tegra132_sor_pre_emphasis,
+ .post_cursor = tegra124_sor_post_cursor,
+ .tx_pu = tegra124_sor_tx_pu,
};
static const struct tegra_sor_regs tegra210_sor_regs = {
@@ -3068,33 +3385,50 @@ static const struct tegra_sor_regs tegra210_sor_regs = {
.dp_padctl2 = 0x73,
};
+static const u8 tegra210_sor_xbar_cfg[5] = {
+ 2, 1, 0, 3, 4
+};
+
+static const u8 tegra210_sor_lane_map[4] = {
+ 0, 1, 2, 3,
+};
+
static const struct tegra_sor_soc tegra210_sor = {
- .supports_edp = true,
.supports_lvds = false,
.supports_hdmi = false,
- .supports_dp = false,
+ .supports_dp = true,
+ .supports_audio = false,
+ .supports_hdcp = false,
+
.regs = &tegra210_sor_regs,
.has_nvdisplay = false,
- .xbar_cfg = tegra124_sor_xbar_cfg,
-};
-static const u8 tegra210_sor_xbar_cfg[5] = {
- 2, 1, 0, 3, 4
+ .xbar_cfg = tegra210_sor_xbar_cfg,
+ .lane_map = tegra210_sor_lane_map,
+ .voltage_swing = tegra124_sor_voltage_swing,
+ .pre_emphasis = tegra124_sor_pre_emphasis,
+ .post_cursor = tegra124_sor_post_cursor,
+ .tx_pu = tegra124_sor_tx_pu,
};
static const struct tegra_sor_soc tegra210_sor1 = {
- .supports_edp = false,
.supports_lvds = false,
.supports_hdmi = true,
.supports_dp = true,
+ .supports_audio = true,
+ .supports_hdcp = true,
.regs = &tegra210_sor_regs,
.has_nvdisplay = false,
.num_settings = ARRAY_SIZE(tegra210_sor_hdmi_defaults),
.settings = tegra210_sor_hdmi_defaults,
-
.xbar_cfg = tegra210_sor_xbar_cfg,
+ .lane_map = tegra210_sor_lane_map,
+ .voltage_swing = tegra124_sor_voltage_swing,
+ .pre_emphasis = tegra124_sor_pre_emphasis,
+ .post_cursor = tegra124_sor_post_cursor,
+ .tx_pu = tegra124_sor_tx_pu,
};
static const struct tegra_sor_regs tegra186_sor_regs = {
@@ -3112,31 +3446,72 @@ static const struct tegra_sor_regs tegra186_sor_regs = {
.dp_padctl2 = 0x16a,
};
-static const struct tegra_sor_soc tegra186_sor = {
- .supports_edp = false,
- .supports_lvds = false,
- .supports_hdmi = false,
- .supports_dp = true,
-
- .regs = &tegra186_sor_regs,
- .has_nvdisplay = true,
+static const u8 tegra186_sor_voltage_swing[4][4][4] = {
+ {
+ { 0x13, 0x19, 0x1e, 0x28 },
+ { 0x1e, 0x25, 0x2d, },
+ { 0x28, 0x32, },
+ { 0x39, },
+ }, {
+ { 0x12, 0x16, 0x1b, 0x25 },
+ { 0x1c, 0x23, 0x2a, },
+ { 0x25, 0x2f, },
+ { 0x37, }
+ }, {
+ { 0x12, 0x16, 0x1a, 0x22 },
+ { 0x1b, 0x20, 0x27, },
+ { 0x24, 0x2d, },
+ { 0x35, },
+ }, {
+ { 0x11, 0x14, 0x17, 0x1f },
+ { 0x19, 0x1e, 0x24, },
+ { 0x22, 0x2a, },
+ { 0x32, },
+ },
+};
- .xbar_cfg = tegra124_sor_xbar_cfg,
+static const u8 tegra186_sor_pre_emphasis[4][4][4] = {
+ {
+ { 0x00, 0x08, 0x12, 0x24 },
+ { 0x01, 0x0e, 0x1d, },
+ { 0x01, 0x13, },
+ { 0x00, },
+ }, {
+ { 0x00, 0x08, 0x12, 0x24 },
+ { 0x00, 0x0e, 0x1d, },
+ { 0x00, 0x13, },
+ { 0x00 },
+ }, {
+ { 0x00, 0x08, 0x14, 0x24 },
+ { 0x00, 0x0e, 0x1d, },
+ { 0x00, 0x13, },
+ { 0x00, },
+ }, {
+ { 0x00, 0x08, 0x12, 0x24 },
+ { 0x00, 0x0e, 0x1d, },
+ { 0x00, 0x13, },
+ { 0x00, },
+ },
};
-static const struct tegra_sor_soc tegra186_sor1 = {
- .supports_edp = false,
+static const struct tegra_sor_soc tegra186_sor = {
.supports_lvds = false,
.supports_hdmi = true,
.supports_dp = true,
+ .supports_audio = true,
+ .supports_hdcp = true,
.regs = &tegra186_sor_regs,
.has_nvdisplay = true,
.num_settings = ARRAY_SIZE(tegra186_sor_hdmi_defaults),
.settings = tegra186_sor_hdmi_defaults,
-
.xbar_cfg = tegra124_sor_xbar_cfg,
+ .lane_map = tegra124_sor_lane_map,
+ .voltage_swing = tegra186_sor_voltage_swing,
+ .pre_emphasis = tegra186_sor_pre_emphasis,
+ .post_cursor = tegra124_sor_post_cursor,
+ .tx_pu = tegra124_sor_tx_pu,
};
static const struct tegra_sor_regs tegra194_sor_regs = {
@@ -3155,10 +3530,11 @@ static const struct tegra_sor_regs tegra194_sor_regs = {
};
static const struct tegra_sor_soc tegra194_sor = {
- .supports_edp = true,
.supports_lvds = false,
.supports_hdmi = true,
.supports_dp = true,
+ .supports_audio = true,
+ .supports_hdcp = true,
.regs = &tegra194_sor_regs,
.has_nvdisplay = true,
@@ -3167,14 +3543,19 @@ static const struct tegra_sor_soc tegra194_sor = {
.settings = tegra194_sor_hdmi_defaults,
.xbar_cfg = tegra210_sor_xbar_cfg,
+ .lane_map = tegra124_sor_lane_map,
+ .voltage_swing = tegra186_sor_voltage_swing,
+ .pre_emphasis = tegra186_sor_pre_emphasis,
+ .post_cursor = tegra124_sor_post_cursor,
+ .tx_pu = tegra124_sor_tx_pu,
};
static const struct of_device_id tegra_sor_of_match[] = {
{ .compatible = "nvidia,tegra194-sor", .data = &tegra194_sor },
- { .compatible = "nvidia,tegra186-sor1", .data = &tegra186_sor1 },
{ .compatible = "nvidia,tegra186-sor", .data = &tegra186_sor },
{ .compatible = "nvidia,tegra210-sor1", .data = &tegra210_sor1 },
{ .compatible = "nvidia,tegra210-sor", .data = &tegra210_sor },
+ { .compatible = "nvidia,tegra132-sor", .data = &tegra132_sor },
{ .compatible = "nvidia,tegra124-sor", .data = &tegra124_sor },
{ },
};
@@ -3200,6 +3581,11 @@ static int tegra_sor_parse_dt(struct tegra_sor *sor)
* earlier
*/
sor->pad = TEGRA_IO_PAD_HDMI_DP0 + sor->index;
+ } else {
+ if (!sor->soc->supports_audio)
+ sor->index = 0;
+ else
+ sor->index = 1;
}
err = of_property_read_u32_array(np, "nvidia,xbar-cfg", xbar_cfg, 5);
@@ -3234,9 +3620,11 @@ static irqreturn_t tegra_sor_irq(int irq, void *data)
tegra_hda_parse_format(format, &sor->format);
- tegra_sor_hdmi_audio_enable(sor);
+ if (sor->ops->audio_enable)
+ sor->ops->audio_enable(sor);
} else {
- tegra_sor_hdmi_audio_disable(sor);
+ if (sor->ops->audio_disable)
+ sor->ops->audio_disable(sor);
}
}
@@ -3273,6 +3661,8 @@ static int tegra_sor_probe(struct platform_device *pdev)
if (!sor->aux)
return -EPROBE_DEFER;
+
+ sor->output.ddc = &sor->aux->ddc;
}
if (!sor->aux) {
@@ -3287,16 +3677,15 @@ static int tegra_sor_probe(struct platform_device *pdev)
return -ENODEV;
}
} else {
- if (sor->soc->supports_edp) {
- sor->ops = &tegra_sor_edp_ops;
- sor->pad = TEGRA_IO_PAD_LVDS;
- } else if (sor->soc->supports_dp) {
- dev_err(&pdev->dev, "DisplayPort not supported yet\n");
- return -ENODEV;
- } else {
- dev_err(&pdev->dev, "unknown (DP) support\n");
- return -ENODEV;
- }
+ np = of_parse_phandle(pdev->dev.of_node, "nvidia,panel", 0);
+ /*
+ * No need to keep this around since we only use it as a check
+ * to see if a panel is connected (eDP) or not (DP).
+ */
+ of_node_put(np);
+
+ sor->ops = &tegra_sor_dp_ops;
+ sor->pad = TEGRA_IO_PAD_LVDS;
}
err = tegra_sor_parse_dt(sor);
@@ -3451,6 +3840,8 @@ static int tegra_sor_probe(struct platform_device *pdev)
* pad output clock.
*/
if (!sor->clk_pad) {
+ char *name;
+
err = pm_runtime_get_sync(&pdev->dev);
if (err < 0) {
dev_err(&pdev->dev, "failed to get runtime PM: %d\n",
@@ -3458,8 +3849,13 @@ static int tegra_sor_probe(struct platform_device *pdev)
goto remove;
}
- sor->clk_pad = tegra_clk_sor_pad_register(sor,
- "sor1_pad_clkout");
+ name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "sor%u_pad_clkout", sor->index);
+ if (!name) {
+ err = -ENOMEM;
+ goto remove;
+ }
+
+ sor->clk_pad = tegra_clk_sor_pad_register(sor, name);
pm_runtime_put(&pdev->dev);
}
diff --git a/drivers/gpu/drm/tegra/sor.h b/drivers/gpu/drm/tegra/sor.h
index f8efd8be4b7c..00e09d5dca30 100644
--- a/drivers/gpu/drm/tegra/sor.h
+++ b/drivers/gpu/drm/tegra/sor.h
@@ -39,6 +39,7 @@
#define SOR_STATE_ASY_CRC_MODE_NON_ACTIVE (0x2 << 6)
#define SOR_STATE_ASY_CRC_MODE_COMPLETE (0x1 << 6)
#define SOR_STATE_ASY_CRC_MODE_ACTIVE (0x0 << 6)
+#define SOR_STATE_ASY_SUBOWNER_MASK (0x3 << 4)
#define SOR_STATE_ASY_OWNER_MASK 0xf
#define SOR_STATE_ASY_OWNER(x) (((x) & 0xf) << 0)
@@ -283,10 +284,12 @@
#define SOR_DP_PADCTL_CM_TXD_2 (1 << 6)
#define SOR_DP_PADCTL_CM_TXD_1 (1 << 5)
#define SOR_DP_PADCTL_CM_TXD_0 (1 << 4)
+#define SOR_DP_PADCTL_CM_TXD(x) (1 << (4 + (x)))
#define SOR_DP_PADCTL_PD_TXD_3 (1 << 3)
#define SOR_DP_PADCTL_PD_TXD_0 (1 << 2)
#define SOR_DP_PADCTL_PD_TXD_1 (1 << 1)
#define SOR_DP_PADCTL_PD_TXD_2 (1 << 0)
+#define SOR_DP_PADCTL_PD_TXD(x) (1 << (0 + (x)))
#define SOR_DP_PADCTL1 0x5d
diff --git a/drivers/gpu/drm/tegra/vic.c b/drivers/gpu/drm/tegra/vic.c
index cd0399fd8c63..9444ba183990 100644
--- a/drivers/gpu/drm/tegra/vic.c
+++ b/drivers/gpu/drm/tegra/vic.c
@@ -34,7 +34,6 @@ struct vic {
void __iomem *regs;
struct tegra_drm_client client;
struct host1x_channel *channel;
- struct iommu_domain *domain;
struct device *dev;
struct clk *clk;
struct reset_control *rst;
@@ -97,6 +96,9 @@ static int vic_runtime_suspend(struct device *dev)
static int vic_boot(struct vic *vic)
{
+#ifdef CONFIG_IOMMU_API
+ struct iommu_fwspec *spec = dev_iommu_fwspec_get(vic->dev);
+#endif
u32 fce_ucode_size, fce_bin_data_offset;
void *hdr;
int err = 0;
@@ -105,15 +107,14 @@ static int vic_boot(struct vic *vic)
return 0;
#ifdef CONFIG_IOMMU_API
- if (vic->config->supports_sid) {
- struct iommu_fwspec *spec = dev_iommu_fwspec_get(vic->dev);
+ if (vic->config->supports_sid && spec) {
u32 value;
value = TRANSCFG_ATT(1, TRANSCFG_SID_FALCON) |
TRANSCFG_ATT(0, TRANSCFG_SID_HW);
vic_writel(vic, value, VIC_TFBIF_TRANSCFG);
- if (spec && spec->num_ids > 0) {
+ if (spec->num_ids > 0) {
value = spec->ids[0] & 0xffff;
vic_writel(vic, value, VIC_THI_STREAMID0);
@@ -132,9 +133,9 @@ static int vic_boot(struct vic *vic)
if (err < 0)
return err;
- hdr = vic->falcon.firmware.vaddr;
+ hdr = vic->falcon.firmware.virt;
fce_bin_data_offset = *(u32 *)(hdr + VIC_UCODE_FCE_DATA_OFFSET);
- hdr = vic->falcon.firmware.vaddr +
+ hdr = vic->falcon.firmware.virt +
*(u32 *)(hdr + VIC_UCODE_FCE_HEADER_OFFSET);
fce_ucode_size = *(u32 *)(hdr + FCE_UCODE_SIZE_OFFSET);
@@ -142,7 +143,7 @@ static int vic_boot(struct vic *vic)
falcon_execute_method(&vic->falcon, VIC_SET_FCE_UCODE_SIZE,
fce_ucode_size);
falcon_execute_method(&vic->falcon, VIC_SET_FCE_UCODE_OFFSET,
- (vic->falcon.firmware.paddr + fce_bin_data_offset)
+ (vic->falcon.firmware.iova + fce_bin_data_offset)
>> 8);
err = falcon_wait_idle(&vic->falcon);
@@ -157,48 +158,21 @@ static int vic_boot(struct vic *vic)
return 0;
}
-static void *vic_falcon_alloc(struct falcon *falcon, size_t size,
- dma_addr_t *iova)
-{
- struct tegra_drm *tegra = falcon->data;
-
- return tegra_drm_alloc(tegra, size, iova);
-}
-
-static void vic_falcon_free(struct falcon *falcon, size_t size,
- dma_addr_t iova, void *va)
-{
- struct tegra_drm *tegra = falcon->data;
-
- return tegra_drm_free(tegra, size, va, iova);
-}
-
-static const struct falcon_ops vic_falcon_ops = {
- .alloc = vic_falcon_alloc,
- .free = vic_falcon_free
-};
-
static int vic_init(struct host1x_client *client)
{
struct tegra_drm_client *drm = host1x_to_drm_client(client);
- struct iommu_group *group = iommu_group_get(client->dev);
struct drm_device *dev = dev_get_drvdata(client->parent);
struct tegra_drm *tegra = dev->dev_private;
struct vic *vic = to_vic(drm);
int err;
- if (group && tegra->domain) {
- err = iommu_attach_group(tegra->domain, group);
- if (err < 0) {
- dev_err(vic->dev, "failed to attach to domain: %d\n",
- err);
- return err;
- }
-
- vic->domain = tegra->domain;
+ err = host1x_client_iommu_attach(client);
+ if (err < 0) {
+ dev_err(vic->dev, "failed to attach to domain: %d\n", err);
+ return err;
}
- vic->channel = host1x_channel_request(client->dev);
+ vic->channel = host1x_channel_request(client);
if (!vic->channel) {
err = -ENOMEM;
goto detach;
@@ -214,6 +188,12 @@ static int vic_init(struct host1x_client *client)
if (err < 0)
goto free_syncpt;
+ /*
+ * Inherit the DMA parameters (such as maximum segment size) from the
+ * parent device.
+ */
+ client->dev->dma_parms = client->parent->dma_parms;
+
return 0;
free_syncpt:
@@ -221,8 +201,7 @@ free_syncpt:
free_channel:
host1x_channel_put(vic->channel);
detach:
- if (group && tegra->domain)
- iommu_detach_group(tegra->domain, group);
+ host1x_client_iommu_detach(client);
return err;
}
@@ -230,22 +209,32 @@ detach:
static int vic_exit(struct host1x_client *client)
{
struct tegra_drm_client *drm = host1x_to_drm_client(client);
- struct iommu_group *group = iommu_group_get(client->dev);
struct drm_device *dev = dev_get_drvdata(client->parent);
struct tegra_drm *tegra = dev->dev_private;
struct vic *vic = to_vic(drm);
int err;
+ /* avoid a dangling pointer just in case this disappears */
+ client->dev->dma_parms = NULL;
+
err = tegra_drm_unregister_client(tegra, drm);
if (err < 0)
return err;
host1x_syncpt_free(client->syncpts[0]);
host1x_channel_put(vic->channel);
-
- if (vic->domain) {
- iommu_detach_group(vic->domain, group);
- vic->domain = NULL;
+ host1x_client_iommu_detach(client);
+
+ if (client->group) {
+ dma_unmap_single(vic->dev, vic->falcon.firmware.phys,
+ vic->falcon.firmware.size, DMA_TO_DEVICE);
+ tegra_drm_free(tegra, vic->falcon.firmware.size,
+ vic->falcon.firmware.virt,
+ vic->falcon.firmware.iova);
+ } else {
+ dma_free_coherent(vic->dev, vic->falcon.firmware.size,
+ vic->falcon.firmware.virt,
+ vic->falcon.firmware.iova);
}
return 0;
@@ -258,25 +247,64 @@ static const struct host1x_client_ops vic_client_ops = {
static int vic_load_firmware(struct vic *vic)
{
+ struct host1x_client *client = &vic->client.base;
+ struct tegra_drm *tegra = vic->client.drm;
+ dma_addr_t iova;
+ size_t size;
+ void *virt;
int err;
- if (vic->falcon.data)
+ if (vic->falcon.firmware.virt)
return 0;
- vic->falcon.data = vic->client.drm;
-
err = falcon_read_firmware(&vic->falcon, vic->config->firmware);
if (err < 0)
- goto cleanup;
+ return err;
+
+ size = vic->falcon.firmware.size;
+
+ if (!client->group) {
+ virt = dma_alloc_coherent(vic->dev, size, &iova, GFP_KERNEL);
+
+ err = dma_mapping_error(vic->dev, iova);
+ if (err < 0)
+ return err;
+ } else {
+ virt = tegra_drm_alloc(tegra, size, &iova);
+ }
+
+ vic->falcon.firmware.virt = virt;
+ vic->falcon.firmware.iova = iova;
err = falcon_load_firmware(&vic->falcon);
if (err < 0)
goto cleanup;
+ /*
+ * In this case we have received an IOVA from the shared domain, so we
+ * need to make sure to get the physical address so that the DMA API
+ * knows what memory pages to flush the cache for.
+ */
+ if (client->group) {
+ dma_addr_t phys;
+
+ phys = dma_map_single(vic->dev, virt, size, DMA_TO_DEVICE);
+
+ err = dma_mapping_error(vic->dev, phys);
+ if (err < 0)
+ goto cleanup;
+
+ vic->falcon.firmware.phys = phys;
+ }
+
return 0;
cleanup:
- vic->falcon.data = NULL;
+ if (!client->group)
+ dma_free_coherent(vic->dev, size, virt, iova);
+ else
+ tegra_drm_free(tegra, size, virt, iova);
+
return err;
}
@@ -374,6 +402,13 @@ static int vic_probe(struct platform_device *pdev)
struct vic *vic;
int err;
+ /* inherit DMA mask from host1x parent */
+ err = dma_coerce_mask_and_coherent(dev, *dev->parent->dma_mask);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to set DMA mask: %d\n", err);
+ return err;
+ }
+
vic = devm_kzalloc(dev, sizeof(*vic), GFP_KERNEL);
if (!vic)
return -ENOMEM;
@@ -410,7 +445,6 @@ static int vic_probe(struct platform_device *pdev)
vic->falcon.dev = dev;
vic->falcon.regs = vic->regs;
- vic->falcon.ops = &vic_falcon_ops;
err = falcon_init(&vic->falcon);
if (err < 0)
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_external.c b/drivers/gpu/drm/tilcdc/tilcdc_external.c
index 43d756b7810e..51d034e095f4 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_external.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_external.c
@@ -8,6 +8,7 @@
#include <linux/of_graph.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_of.h>
#include "tilcdc_drv.h"
@@ -139,8 +140,8 @@ int tilcdc_attach_external_device(struct drm_device *ddev)
}
if (panel) {
- bridge = devm_drm_panel_bridge_add(ddev->dev, panel,
- DRM_MODE_CONNECTOR_DPI);
+ bridge = devm_drm_panel_bridge_add_typed(ddev->dev, panel,
+ DRM_MODE_CONNECTOR_DPI);
if (IS_ERR(bridge)) {
ret = PTR_ERR(bridge);
goto err_encoder_cleanup;
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_plane.c b/drivers/gpu/drm/tilcdc/tilcdc_plane.c
index 3abb9641f212..e2090020b3a0 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_plane.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_plane.c
@@ -11,7 +11,7 @@
#include "tilcdc_drv.h"
-static struct drm_plane_funcs tilcdc_plane_funcs = {
+static const struct drm_plane_funcs tilcdc_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = drm_plane_cleanup,
diff --git a/drivers/gpu/drm/tiny/gm12u320.c b/drivers/gpu/drm/tiny/gm12u320.c
index 03d0e2df6774..94fb1f593564 100644
--- a/drivers/gpu/drm/tiny/gm12u320.c
+++ b/drivers/gpu/drm/tiny/gm12u320.c
@@ -649,7 +649,7 @@ static void gm12u320_driver_release(struct drm_device *dev)
kfree(gm12u320);
}
-DEFINE_DRM_GEM_SHMEM_FOPS(gm12u320_fops);
+DEFINE_DRM_GEM_FOPS(gm12u320_fops);
static struct drm_driver gm12u320_drm_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
diff --git a/drivers/gpu/drm/ttm/Makefile b/drivers/gpu/drm/ttm/Makefile
index 01fc670ce7a2..caea2a099496 100644
--- a/drivers/gpu/drm/ttm/Makefile
+++ b/drivers/gpu/drm/ttm/Makefile
@@ -4,8 +4,8 @@
ttm-y := ttm_memory.o ttm_tt.o ttm_bo.o \
ttm_bo_util.o ttm_bo_vm.o ttm_module.o \
- ttm_execbuf_util.o ttm_page_alloc.o ttm_bo_manager.o \
- ttm_page_alloc_dma.o
+ ttm_execbuf_util.o ttm_page_alloc.o ttm_bo_manager.o
ttm-$(CONFIG_AGP) += ttm_agp_backend.o
+ttm-$(CONFIG_DRM_TTM_DMA_PAGE_POOL) += ttm_page_alloc_dma.o
obj-$(CONFIG_DRM_TTM) += ttm.o
diff --git a/drivers/gpu/drm/ttm/ttm_agp_backend.c b/drivers/gpu/drm/ttm/ttm_agp_backend.c
index ea4d59eb8966..6050dc846894 100644
--- a/drivers/gpu/drm/ttm/ttm_agp_backend.c
+++ b/drivers/gpu/drm/ttm/ttm_agp_backend.c
@@ -51,7 +51,7 @@ struct ttm_agp_backend {
static int ttm_agp_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
{
struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm);
- struct page *dummy_read_page = ttm->bdev->glob->dummy_read_page;
+ struct page *dummy_read_page = ttm_bo_glob.dummy_read_page;
struct drm_mm_node *node = bo_mem->mm_node;
struct agp_memory *mem;
int ret, cached = (bo_mem->placement & TTM_PL_FLAG_CACHED);
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 98819462f025..8d91b0428af1 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -51,6 +51,7 @@ static void ttm_bo_global_kobj_release(struct kobject *kobj);
DEFINE_MUTEX(ttm_global_mutex);
unsigned ttm_bo_glob_use_count;
struct ttm_bo_global ttm_bo_glob;
+EXPORT_SYMBOL(ttm_bo_glob);
static struct attribute ttm_bo_count = {
.name = "bo_count",
@@ -148,23 +149,21 @@ static void ttm_bo_release_list(struct kref *list_kref)
{
struct ttm_buffer_object *bo =
container_of(list_kref, struct ttm_buffer_object, list_kref);
- struct ttm_bo_device *bdev = bo->bdev;
size_t acc_size = bo->acc_size;
BUG_ON(kref_read(&bo->list_kref));
BUG_ON(kref_read(&bo->kref));
- BUG_ON(atomic_read(&bo->cpu_writers));
BUG_ON(bo->mem.mm_node != NULL);
BUG_ON(!list_empty(&bo->lru));
BUG_ON(!list_empty(&bo->ddestroy));
ttm_tt_destroy(bo->ttm);
- atomic_dec(&bo->bdev->glob->bo_count);
+ atomic_dec(&ttm_bo_glob.bo_count);
dma_fence_put(bo->moving);
if (!ttm_bo_uses_embedded_gem_object(bo))
dma_resv_fini(&bo->base._resv);
mutex_destroy(&bo->wu_mutex);
bo->destroy(bo);
- ttm_mem_global_free(bdev->glob->mem_glob, acc_size);
+ ttm_mem_global_free(&ttm_mem_glob, acc_size);
}
static void ttm_bo_add_mem_to_lru(struct ttm_buffer_object *bo,
@@ -188,23 +187,17 @@ static void ttm_bo_add_mem_to_lru(struct ttm_buffer_object *bo,
if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm &&
!(bo->ttm->page_flags & (TTM_PAGE_FLAG_SG |
TTM_PAGE_FLAG_SWAPPED))) {
- list_add_tail(&bo->swap, &bdev->glob->swap_lru[bo->priority]);
+ list_add_tail(&bo->swap, &ttm_bo_glob.swap_lru[bo->priority]);
kref_get(&bo->list_kref);
}
}
-void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
-{
- ttm_bo_add_mem_to_lru(bo, &bo->mem);
-}
-EXPORT_SYMBOL(ttm_bo_add_to_lru);
-
static void ttm_bo_ref_bug(struct kref *list_kref)
{
BUG();
}
-void ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
+static void ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
{
struct ttm_bo_device *bdev = bo->bdev;
bool notify = false;
@@ -224,16 +217,6 @@ void ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
bdev->driver->del_from_lru_notify(bo);
}
-void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo)
-{
- struct ttm_bo_global *glob = bo->bdev->glob;
-
- spin_lock(&glob->lru_lock);
- ttm_bo_del_from_lru(bo);
- spin_unlock(&glob->lru_lock);
-}
-EXPORT_SYMBOL(ttm_bo_del_sub_from_lru);
-
static void ttm_bo_bulk_move_set_pos(struct ttm_lru_bulk_move_pos *pos,
struct ttm_buffer_object *bo)
{
@@ -248,7 +231,7 @@ void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo,
dma_resv_assert_held(bo->base.resv);
ttm_bo_del_from_lru(bo);
- ttm_bo_add_to_lru(bo);
+ ttm_bo_add_mem_to_lru(bo, &bo->mem);
if (bulk && !(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
switch (bo->mem.mem_type) {
@@ -311,7 +294,7 @@ void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk)
dma_resv_assert_held(pos->first->base.resv);
dma_resv_assert_held(pos->last->base.resv);
- lru = &pos->first->bdev->glob->swap_lru[i];
+ lru = &ttm_bo_glob.swap_lru[i];
list_bulk_move_tail(lru, &pos->first->swap, &pos->last->swap);
}
}
@@ -475,7 +458,6 @@ static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
{
struct ttm_bo_device *bdev = bo->bdev;
- struct ttm_bo_global *glob = bdev->glob;
int ret;
ret = ttm_bo_individualize_resv(bo);
@@ -485,16 +467,16 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
*/
dma_resv_wait_timeout_rcu(bo->base.resv, true, false,
30 * HZ);
- spin_lock(&glob->lru_lock);
+ spin_lock(&ttm_bo_glob.lru_lock);
goto error;
}
- spin_lock(&glob->lru_lock);
+ spin_lock(&ttm_bo_glob.lru_lock);
ret = dma_resv_trylock(bo->base.resv) ? 0 : -EBUSY;
if (!ret) {
if (dma_resv_test_signaled_rcu(&bo->base._resv, true)) {
ttm_bo_del_from_lru(bo);
- spin_unlock(&glob->lru_lock);
+ spin_unlock(&ttm_bo_glob.lru_lock);
if (bo->base.resv != &bo->base._resv)
dma_resv_unlock(&bo->base._resv);
@@ -512,7 +494,7 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
*/
if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) {
bo->mem.placement &= ~TTM_PL_FLAG_NO_EVICT;
- ttm_bo_add_to_lru(bo);
+ ttm_bo_move_to_lru_tail(bo, NULL);
}
dma_resv_unlock(bo->base.resv);
@@ -523,7 +505,7 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
error:
kref_get(&bo->list_kref);
list_add_tail(&bo->ddestroy, &bdev->ddestroy);
- spin_unlock(&glob->lru_lock);
+ spin_unlock(&ttm_bo_glob.lru_lock);
schedule_delayed_work(&bdev->wq,
((HZ / 100) < 1) ? 1 : HZ / 100);
@@ -546,7 +528,6 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
bool interruptible, bool no_wait_gpu,
bool unlock_resv)
{
- struct ttm_bo_global *glob = bo->bdev->glob;
struct dma_resv *resv;
int ret;
@@ -565,7 +546,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
if (unlock_resv)
dma_resv_unlock(bo->base.resv);
- spin_unlock(&glob->lru_lock);
+ spin_unlock(&ttm_bo_glob.lru_lock);
lret = dma_resv_wait_timeout_rcu(resv, true,
interruptible,
@@ -576,7 +557,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
else if (lret == 0)
return -EBUSY;
- spin_lock(&glob->lru_lock);
+ spin_lock(&ttm_bo_glob.lru_lock);
if (unlock_resv && !dma_resv_trylock(bo->base.resv)) {
/*
* We raced, and lost, someone else holds the reservation now,
@@ -586,7 +567,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
* delayed destruction would succeed, so just return success
* here.
*/
- spin_unlock(&glob->lru_lock);
+ spin_unlock(&ttm_bo_glob.lru_lock);
return 0;
}
ret = 0;
@@ -595,7 +576,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
if (ret || unlikely(list_empty(&bo->ddestroy))) {
if (unlock_resv)
dma_resv_unlock(bo->base.resv);
- spin_unlock(&glob->lru_lock);
+ spin_unlock(&ttm_bo_glob.lru_lock);
return ret;
}
@@ -603,7 +584,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
list_del_init(&bo->ddestroy);
kref_put(&bo->list_kref, ttm_bo_ref_bug);
- spin_unlock(&glob->lru_lock);
+ spin_unlock(&ttm_bo_glob.lru_lock);
ttm_bo_cleanup_memtype_use(bo);
if (unlock_resv)
@@ -618,7 +599,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
*/
static bool ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
{
- struct ttm_bo_global *glob = bdev->glob;
+ struct ttm_bo_global *glob = &ttm_bo_glob;
struct list_head removed;
bool empty;
@@ -676,7 +657,7 @@ static void ttm_bo_release(struct kref *kref)
if (bo->bdev->driver->release_notify)
bo->bdev->driver->release_notify(bo);
- drm_vma_offset_remove(&bdev->vma_manager, &bo->base.vma_node);
+ drm_vma_offset_remove(bdev->vma_manager, &bo->base.vma_node);
ttm_mem_io_lock(man, false);
ttm_mem_io_free_vm(bo);
ttm_mem_io_unlock(man);
@@ -842,13 +823,12 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
struct ww_acquire_ctx *ticket)
{
struct ttm_buffer_object *bo = NULL, *busy_bo = NULL;
- struct ttm_bo_global *glob = bdev->glob;
struct ttm_mem_type_manager *man = &bdev->man[mem_type];
bool locked = false;
unsigned i;
int ret;
- spin_lock(&glob->lru_lock);
+ spin_lock(&ttm_bo_glob.lru_lock);
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
list_for_each_entry(bo, &man->lru[i], lru) {
bool busy;
@@ -880,7 +860,7 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
if (!bo) {
if (busy_bo)
kref_get(&busy_bo->list_kref);
- spin_unlock(&glob->lru_lock);
+ spin_unlock(&ttm_bo_glob.lru_lock);
ret = ttm_mem_evict_wait_busy(busy_bo, ctx, ticket);
if (busy_bo)
kref_put(&busy_bo->list_kref, ttm_bo_release_list);
@@ -896,17 +876,11 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
return ret;
}
- ttm_bo_del_from_lru(bo);
- spin_unlock(&glob->lru_lock);
+ spin_unlock(&ttm_bo_glob.lru_lock);
ret = ttm_bo_evict(bo, ctx);
- if (locked) {
+ if (locked)
ttm_bo_unreserve(bo);
- } else {
- spin_lock(&glob->lru_lock);
- ttm_bo_add_to_lru(bo);
- spin_unlock(&glob->lru_lock);
- }
kref_put(&bo->list_kref, ttm_bo_release_list);
return ret;
@@ -926,7 +900,8 @@ EXPORT_SYMBOL(ttm_bo_mem_put);
*/
static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
struct ttm_mem_type_manager *man,
- struct ttm_mem_reg *mem)
+ struct ttm_mem_reg *mem,
+ bool no_wait_gpu)
{
struct dma_fence *fence;
int ret;
@@ -935,19 +910,22 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
fence = dma_fence_get(man->move);
spin_unlock(&man->move_lock);
- if (fence) {
- dma_resv_add_shared_fence(bo->base.resv, fence);
+ if (!fence)
+ return 0;
- ret = dma_resv_reserve_shared(bo->base.resv, 1);
- if (unlikely(ret)) {
- dma_fence_put(fence);
- return ret;
- }
+ if (no_wait_gpu)
+ return -EBUSY;
- dma_fence_put(bo->moving);
- bo->moving = fence;
+ dma_resv_add_shared_fence(bo->base.resv, fence);
+
+ ret = dma_resv_reserve_shared(bo->base.resv, 1);
+ if (unlikely(ret)) {
+ dma_fence_put(fence);
+ return ret;
}
+ dma_fence_put(bo->moving);
+ bo->moving = fence;
return 0;
}
@@ -978,7 +956,7 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
return ret;
} while (1);
- return ttm_bo_add_move_fence(bo, man, mem);
+ return ttm_bo_add_move_fence(bo, man, mem, ctx->no_wait_gpu);
}
static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
@@ -1068,12 +1046,10 @@ static int ttm_bo_mem_placement(struct ttm_buffer_object *bo,
mem->mem_type = mem_type;
mem->placement = cur_flags;
- if (bo->mem.mem_type < mem_type && !list_empty(&bo->lru)) {
- spin_lock(&bo->bdev->glob->lru_lock);
- ttm_bo_del_from_lru(bo);
- ttm_bo_add_mem_to_lru(bo, mem);
- spin_unlock(&bo->bdev->glob->lru_lock);
- }
+ spin_lock(&ttm_bo_glob.lru_lock);
+ ttm_bo_del_from_lru(bo);
+ ttm_bo_add_mem_to_lru(bo, mem);
+ spin_unlock(&ttm_bo_glob.lru_lock);
return 0;
}
@@ -1120,14 +1096,18 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
if (unlikely(ret))
goto error;
- if (mem->mm_node) {
- ret = ttm_bo_add_move_fence(bo, man, mem);
- if (unlikely(ret)) {
- (*man->func->put_node)(man, mem);
- goto error;
- }
- return 0;
+ if (!mem->mm_node)
+ continue;
+
+ ret = ttm_bo_add_move_fence(bo, man, mem, ctx->no_wait_gpu);
+ if (unlikely(ret)) {
+ (*man->func->put_node)(man, mem);
+ if (ret == -EBUSY)
+ continue;
+
+ goto error;
}
+ return 0;
}
for (i = 0; i < placement->num_busy_placement; ++i) {
@@ -1160,9 +1140,9 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
error:
if (bo->mem.mem_type == TTM_PL_SYSTEM && !list_empty(&bo->lru)) {
- spin_lock(&bo->bdev->glob->lru_lock);
+ spin_lock(&ttm_bo_glob.lru_lock);
ttm_bo_move_to_lru_tail(bo, NULL);
- spin_unlock(&bo->bdev->glob->lru_lock);
+ spin_unlock(&ttm_bo_glob.lru_lock);
}
return ret;
@@ -1286,9 +1266,9 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
struct dma_resv *resv,
void (*destroy) (struct ttm_buffer_object *))
{
+ struct ttm_mem_global *mem_glob = &ttm_mem_glob;
int ret = 0;
unsigned long num_pages;
- struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
bool locked;
ret = ttm_mem_global_alloc(mem_glob, acc_size, ctx);
@@ -1315,7 +1295,6 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
kref_init(&bo->kref);
kref_init(&bo->list_kref);
- atomic_set(&bo->cpu_writers, 0);
INIT_LIST_HEAD(&bo->lru);
INIT_LIST_HEAD(&bo->ddestroy);
INIT_LIST_HEAD(&bo->swap);
@@ -1349,7 +1328,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
dma_resv_init(&bo->base._resv);
drm_vma_node_reset(&bo->base.vma_node);
}
- atomic_inc(&bo->bdev->glob->bo_count);
+ atomic_inc(&ttm_bo_glob.bo_count);
/*
* For ttm_bo_type_device buffers, allocate
@@ -1357,7 +1336,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
*/
if (bo->type == ttm_bo_type_device ||
bo->type == ttm_bo_type_sg)
- ret = drm_vma_offset_add(&bdev->vma_manager, &bo->base.vma_node,
+ ret = drm_vma_offset_add(bdev->vma_manager, &bo->base.vma_node,
bo->mem.num_pages);
/* passed reservation objects should already be locked,
@@ -1379,11 +1358,9 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
return ret;
}
- if (resv && !(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
- spin_lock(&bdev->glob->lru_lock);
- ttm_bo_add_to_lru(bo);
- spin_unlock(&bdev->glob->lru_lock);
- }
+ spin_lock(&ttm_bo_glob.lru_lock);
+ ttm_bo_move_to_lru_tail(bo, NULL);
+ spin_unlock(&ttm_bo_glob.lru_lock);
return ret;
}
@@ -1481,7 +1458,7 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
.flags = TTM_OPT_FLAG_FORCE_ALLOC
};
struct ttm_mem_type_manager *man = &bdev->man[mem_type];
- struct ttm_bo_global *glob = bdev->glob;
+ struct ttm_bo_global *glob = &ttm_bo_glob;
struct dma_fence *fence;
int ret;
unsigned i;
@@ -1650,8 +1627,6 @@ static int ttm_bo_global_init(void)
goto out;
spin_lock_init(&glob->lru_lock);
- glob->mem_glob = &ttm_mem_glob;
- glob->mem_glob->bo_glob = glob;
glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
if (unlikely(glob->dummy_read_page == NULL)) {
@@ -1675,10 +1650,10 @@ out:
int ttm_bo_device_release(struct ttm_bo_device *bdev)
{
+ struct ttm_bo_global *glob = &ttm_bo_glob;
int ret = 0;
unsigned i = TTM_NUM_MEM_TYPES;
struct ttm_mem_type_manager *man;
- struct ttm_bo_global *glob = bdev->glob;
while (i--) {
man = &bdev->man[i];
@@ -1708,8 +1683,6 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev)
pr_debug("Swap list %d was clean\n", i);
spin_unlock(&glob->lru_lock);
- drm_vma_offset_manager_destroy(&bdev->vma_manager);
-
if (!ret)
ttm_bo_global_release();
@@ -1720,11 +1693,15 @@ EXPORT_SYMBOL(ttm_bo_device_release);
int ttm_bo_device_init(struct ttm_bo_device *bdev,
struct ttm_bo_driver *driver,
struct address_space *mapping,
+ struct drm_vma_offset_manager *vma_manager,
bool need_dma32)
{
struct ttm_bo_global *glob = &ttm_bo_glob;
int ret;
+ if (WARN_ON(vma_manager == NULL))
+ return -EINVAL;
+
ret = ttm_bo_global_init();
if (ret)
return ret;
@@ -1741,13 +1718,10 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
if (unlikely(ret != 0))
goto out_no_sys;
- drm_vma_offset_manager_init(&bdev->vma_manager,
- DRM_FILE_PAGE_OFFSET_START,
- DRM_FILE_PAGE_OFFSET_SIZE);
+ bdev->vma_manager = vma_manager;
INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
INIT_LIST_HEAD(&bdev->ddestroy);
bdev->dev_mapping = mapping;
- bdev->glob = glob;
bdev->need_dma32 = need_dma32;
mutex_lock(&ttm_global_mutex);
list_add_tail(&bdev->device_list, &glob->device_list);
@@ -1827,31 +1801,6 @@ int ttm_bo_wait(struct ttm_buffer_object *bo,
}
EXPORT_SYMBOL(ttm_bo_wait);
-int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
-{
- int ret = 0;
-
- /*
- * Using ttm_bo_reserve makes sure the lru lists are updated.
- */
-
- ret = ttm_bo_reserve(bo, true, no_wait, NULL);
- if (unlikely(ret != 0))
- return ret;
- ret = ttm_bo_wait(bo, true, no_wait);
- if (likely(ret == 0))
- atomic_inc(&bo->cpu_writers);
- ttm_bo_unreserve(bo);
- return ret;
-}
-EXPORT_SYMBOL(ttm_bo_synccpu_write_grab);
-
-void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
-{
- atomic_dec(&bo->cpu_writers);
-}
-EXPORT_SYMBOL(ttm_bo_synccpu_write_release);
-
/**
* A buffer object shrink method that tries to swap out the first
* buffer object on the bo_global::swap_lru list.
@@ -1951,8 +1900,7 @@ void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
.no_wait_gpu = false
};
- while (ttm_bo_swapout(bdev->glob, &ctx) == 0)
- ;
+ while (ttm_bo_swapout(&ttm_bo_glob, &ctx) == 0);
}
EXPORT_SYMBOL(ttm_bo_swapout_all);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index fe81c565e7ef..6b0883a1776e 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -102,7 +102,6 @@ int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
mutex_lock(&man->io_reserve_mutex);
return 0;
}
-EXPORT_SYMBOL(ttm_mem_io_lock);
void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
{
@@ -111,7 +110,6 @@ void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
mutex_unlock(&man->io_reserve_mutex);
}
-EXPORT_SYMBOL(ttm_mem_io_unlock);
static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
{
@@ -153,7 +151,6 @@ retry:
}
return ret;
}
-EXPORT_SYMBOL(ttm_mem_io_reserve);
void ttm_mem_io_free(struct ttm_bo_device *bdev,
struct ttm_mem_reg *mem)
@@ -169,7 +166,6 @@ void ttm_mem_io_free(struct ttm_bo_device *bdev,
bdev->driver->io_mem_free(bdev, mem);
}
-EXPORT_SYMBOL(ttm_mem_io_free);
int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
{
@@ -503,7 +499,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
* TODO: Explicit member copy would probably be better here.
*/
- atomic_inc(&bo->bdev->glob->bo_count);
+ atomic_inc(&ttm_bo_glob.bo_count);
INIT_LIST_HEAD(&fbo->base.ddestroy);
INIT_LIST_HEAD(&fbo->base.lru);
INIT_LIST_HEAD(&fbo->base.swap);
@@ -511,15 +507,16 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
mutex_init(&fbo->base.wu_mutex);
fbo->base.moving = NULL;
drm_vma_node_reset(&fbo->base.base.vma_node);
- atomic_set(&fbo->base.cpu_writers, 0);
kref_init(&fbo->base.list_kref);
kref_init(&fbo->base.kref);
fbo->base.destroy = &ttm_transfered_destroy;
fbo->base.acc_size = 0;
- fbo->base.base.resv = &fbo->base.base._resv;
- dma_resv_init(fbo->base.base.resv);
- ret = dma_resv_trylock(fbo->base.base.resv);
+ if (bo->base.resv == &bo->base._resv)
+ fbo->base.base.resv = &fbo->base.base._resv;
+
+ dma_resv_init(&fbo->base.base._resv);
+ ret = dma_resv_trylock(&fbo->base.base._resv);
WARN_ON(!ret);
*new_obj = &fbo->base;
@@ -716,7 +713,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
if (ret)
return ret;
- dma_resv_add_excl_fence(ghost_obj->base.resv, fence);
+ dma_resv_add_excl_fence(&ghost_obj->base._resv, fence);
/**
* If we're not moving to fixed memory, the TTM object
@@ -729,7 +726,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
else
bo->ttm = NULL;
- ttm_bo_unreserve(ghost_obj);
+ dma_resv_unlock(&ghost_obj->base._resv);
ttm_bo_put(ghost_obj);
}
@@ -772,7 +769,7 @@ int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
if (ret)
return ret;
- dma_resv_add_excl_fence(ghost_obj->base.resv, fence);
+ dma_resv_add_excl_fence(&ghost_obj->base._resv, fence);
/**
* If we're not moving to fixed memory, the TTM object
@@ -785,7 +782,7 @@ int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
else
bo->ttm = NULL;
- ttm_bo_unreserve(ghost_obj);
+ dma_resv_unlock(&ghost_obj->base._resv);
ttm_bo_put(ghost_obj);
} else if (from->flags & TTM_MEMTYPE_FLAG_FIXED) {
@@ -841,7 +838,7 @@ int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo)
if (ret)
return ret;
- ret = dma_resv_copy_fences(ghost->base.resv, bo->base.resv);
+ ret = dma_resv_copy_fences(&ghost->base._resv, bo->base.resv);
/* Last resort, wait for the BO to be idle when we are OOM */
if (ret)
ttm_bo_wait(bo, false, false);
@@ -850,7 +847,7 @@ int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo)
bo->mem.mem_type = TTM_PL_SYSTEM;
bo->ttm = NULL;
- ttm_bo_unreserve(ghost);
+ dma_resv_unlock(&ghost->base._resv);
ttm_bo_put(ghost);
return 0;
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 46dc3de7e81b..11863fbdd5d6 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -42,8 +42,6 @@
#include <linux/uaccess.h>
#include <linux/mem_encrypt.h>
-#define TTM_BO_VM_NUM_PREFAULT 16
-
static vm_fault_t ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
struct vm_fault *vmf)
{
@@ -106,25 +104,30 @@ static unsigned long ttm_bo_io_mem_pfn(struct ttm_buffer_object *bo,
+ page_offset;
}
-static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
+/**
+ * ttm_bo_vm_reserve - Reserve a buffer object in a retryable vm callback
+ * @bo: The buffer object
+ * @vmf: The fault structure handed to the callback
+ *
+ * vm callbacks like fault() and *_mkwrite() allow for the mm_sem to be dropped
+ * during long waits, and after the wait the callback will be restarted. This
+ * is to allow other threads using the same virtual memory space concurrent
+ * access to map(), unmap() completely unrelated buffer objects. TTM buffer
+ * object reservations sometimes wait for GPU and should therefore be
+ * considered long waits. This function reserves the buffer object interruptibly
+ * taking this into account. Starvation is avoided by the vm system not
+ * allowing too many repeated restarts.
+ * This function is intended to be used in customized fault() and _mkwrite()
+ * handlers.
+ *
+ * Return:
+ * 0 on success and the bo was reserved.
+ * VM_FAULT_RETRY if blocking wait.
+ * VM_FAULT_NOPAGE if blocking wait and retrying was not allowed.
+ */
+vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo,
+ struct vm_fault *vmf)
{
- struct vm_area_struct *vma = vmf->vma;
- struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
- vma->vm_private_data;
- struct ttm_bo_device *bdev = bo->bdev;
- unsigned long page_offset;
- unsigned long page_last;
- unsigned long pfn;
- struct ttm_tt *ttm = NULL;
- struct page *page;
- int err;
- int i;
- vm_fault_t ret = VM_FAULT_NOPAGE;
- unsigned long address = vmf->address;
- struct ttm_mem_type_manager *man =
- &bdev->man[bo->mem.mem_type];
- struct vm_area_struct cvma;
-
/*
* Work around locking order reversal in fault / nopfn
* between mmap_sem and bo_reserve: Perform a trylock operation
@@ -151,14 +154,54 @@ static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
return VM_FAULT_NOPAGE;
}
+ return 0;
+}
+EXPORT_SYMBOL(ttm_bo_vm_reserve);
+
+/**
+ * ttm_bo_vm_fault_reserved - TTM fault helper
+ * @vmf: The struct vm_fault given as argument to the fault callback
+ * @prot: The page protection to be used for this memory area.
+ * @num_prefault: Maximum number of prefault pages. The caller may want to
+ * specify this based on madvice settings and the size of the GPU object
+ * backed by the memory.
+ *
+ * This function inserts one or more page table entries pointing to the
+ * memory backing the buffer object, and then returns a return code
+ * instructing the caller to retry the page access.
+ *
+ * Return:
+ * VM_FAULT_NOPAGE on success or pending signal
+ * VM_FAULT_SIGBUS on unspecified error
+ * VM_FAULT_OOM on out-of-memory
+ * VM_FAULT_RETRY if retryable wait
+ */
+vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
+ pgprot_t prot,
+ pgoff_t num_prefault)
+{
+ struct vm_area_struct *vma = vmf->vma;
+ struct vm_area_struct cvma = *vma;
+ struct ttm_buffer_object *bo = vma->vm_private_data;
+ struct ttm_bo_device *bdev = bo->bdev;
+ unsigned long page_offset;
+ unsigned long page_last;
+ unsigned long pfn;
+ struct ttm_tt *ttm = NULL;
+ struct page *page;
+ int err;
+ pgoff_t i;
+ vm_fault_t ret = VM_FAULT_NOPAGE;
+ unsigned long address = vmf->address;
+ struct ttm_mem_type_manager *man =
+ &bdev->man[bo->mem.mem_type];
+
/*
* Refuse to fault imported pages. This should be handled
* (if at all) by redirecting mmap to the exporter.
*/
- if (bo->ttm && (bo->ttm->page_flags & TTM_PAGE_FLAG_SG)) {
- ret = VM_FAULT_SIGBUS;
- goto out_unlock;
- }
+ if (bo->ttm && (bo->ttm->page_flags & TTM_PAGE_FLAG_SG))
+ return VM_FAULT_SIGBUS;
if (bdev->driver->fault_reserve_notify) {
struct dma_fence *moving = dma_fence_get(bo->moving);
@@ -169,17 +212,15 @@ static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
break;
case -EBUSY:
case -ERESTARTSYS:
- ret = VM_FAULT_NOPAGE;
- goto out_unlock;
+ return VM_FAULT_NOPAGE;
default:
- ret = VM_FAULT_SIGBUS;
- goto out_unlock;
+ return VM_FAULT_SIGBUS;
}
if (bo->moving != moving) {
- spin_lock(&bdev->glob->lru_lock);
+ spin_lock(&ttm_bo_glob.lru_lock);
ttm_bo_move_to_lru_tail(bo, NULL);
- spin_unlock(&bdev->glob->lru_lock);
+ spin_unlock(&ttm_bo_glob.lru_lock);
}
dma_fence_put(moving);
}
@@ -189,21 +230,12 @@ static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
* move.
*/
ret = ttm_bo_vm_fault_idle(bo, vmf);
- if (unlikely(ret != 0)) {
- if (ret == VM_FAULT_RETRY &&
- !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
- /* The BO has already been unreserved. */
- return ret;
- }
-
- goto out_unlock;
- }
+ if (unlikely(ret != 0))
+ return ret;
err = ttm_mem_io_lock(man, true);
- if (unlikely(err != 0)) {
- ret = VM_FAULT_NOPAGE;
- goto out_unlock;
- }
+ if (unlikely(err != 0))
+ return VM_FAULT_NOPAGE;
err = ttm_mem_io_reserve_vm(bo);
if (unlikely(err != 0)) {
ret = VM_FAULT_SIGBUS;
@@ -220,18 +252,8 @@ static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
goto out_io_unlock;
}
- /*
- * Make a local vma copy to modify the page_prot member
- * and vm_flags if necessary. The vma parameter is protected
- * by mmap_sem in write mode.
- */
- cvma = *vma;
- cvma.vm_page_prot = vm_get_page_prot(cvma.vm_flags);
-
- if (bo->mem.bus.is_iomem) {
- cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
- cvma.vm_page_prot);
- } else {
+ cvma.vm_page_prot = ttm_io_prot(bo->mem.placement, prot);
+ if (!bo->mem.bus.is_iomem) {
struct ttm_operation_ctx ctx = {
.interruptible = false,
.no_wait_gpu = false,
@@ -240,24 +262,21 @@ static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
};
ttm = bo->ttm;
- cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
- cvma.vm_page_prot);
-
- /* Allocate all page at once, most common usage */
- if (ttm_tt_populate(ttm, &ctx)) {
+ if (ttm_tt_populate(bo->ttm, &ctx)) {
ret = VM_FAULT_OOM;
goto out_io_unlock;
}
+ } else {
+ /* Iomem should not be marked encrypted */
+ cvma.vm_page_prot = pgprot_decrypted(cvma.vm_page_prot);
}
/*
* Speculatively prefault a number of pages. Only error on
* first page.
*/
- for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) {
+ for (i = 0; i < num_prefault; ++i) {
if (bo->mem.bus.is_iomem) {
- /* Iomem should not be marked encrypted */
- cvma.vm_page_prot = pgprot_decrypted(cvma.vm_page_prot);
pfn = ttm_bo_io_mem_pfn(bo, page_offset);
} else {
page = ttm->pages[page_offset];
@@ -293,28 +312,49 @@ static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
ret = VM_FAULT_NOPAGE;
out_io_unlock:
ttm_mem_io_unlock(man);
-out_unlock:
+ return ret;
+}
+EXPORT_SYMBOL(ttm_bo_vm_fault_reserved);
+
+static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
+{
+ struct vm_area_struct *vma = vmf->vma;
+ pgprot_t prot;
+ struct ttm_buffer_object *bo = vma->vm_private_data;
+ vm_fault_t ret;
+
+ ret = ttm_bo_vm_reserve(bo, vmf);
+ if (ret)
+ return ret;
+
+ prot = vm_get_page_prot(vma->vm_flags);
+ ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT);
+ if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
+ return ret;
+
dma_resv_unlock(bo->base.resv);
+
return ret;
}
-static void ttm_bo_vm_open(struct vm_area_struct *vma)
+void ttm_bo_vm_open(struct vm_area_struct *vma)
{
- struct ttm_buffer_object *bo =
- (struct ttm_buffer_object *)vma->vm_private_data;
+ struct ttm_buffer_object *bo = vma->vm_private_data;
WARN_ON(bo->bdev->dev_mapping != vma->vm_file->f_mapping);
ttm_bo_get(bo);
}
+EXPORT_SYMBOL(ttm_bo_vm_open);
-static void ttm_bo_vm_close(struct vm_area_struct *vma)
+void ttm_bo_vm_close(struct vm_area_struct *vma)
{
- struct ttm_buffer_object *bo = (struct ttm_buffer_object *)vma->vm_private_data;
+ struct ttm_buffer_object *bo = vma->vm_private_data;
ttm_bo_put(bo);
vma->vm_private_data = NULL;
}
+EXPORT_SYMBOL(ttm_bo_vm_close);
static int ttm_bo_vm_access_kmap(struct ttm_buffer_object *bo,
unsigned long offset,
@@ -407,16 +447,16 @@ static struct ttm_buffer_object *ttm_bo_vm_lookup(struct ttm_bo_device *bdev,
struct drm_vma_offset_node *node;
struct ttm_buffer_object *bo = NULL;
- drm_vma_offset_lock_lookup(&bdev->vma_manager);
+ drm_vma_offset_lock_lookup(bdev->vma_manager);
- node = drm_vma_offset_lookup_locked(&bdev->vma_manager, offset, pages);
+ node = drm_vma_offset_lookup_locked(bdev->vma_manager, offset, pages);
if (likely(node)) {
bo = container_of(node, struct ttm_buffer_object,
base.vma_node);
bo = ttm_bo_get_unless_zero(bo);
}
- drm_vma_offset_unlock_lookup(&bdev->vma_manager);
+ drm_vma_offset_unlock_lookup(bdev->vma_manager);
if (!bo)
pr_err("Could not find buffer object to map\n");
@@ -424,6 +464,28 @@ static struct ttm_buffer_object *ttm_bo_vm_lookup(struct ttm_bo_device *bdev,
return bo;
}
+static void ttm_bo_mmap_vma_setup(struct ttm_buffer_object *bo, struct vm_area_struct *vma)
+{
+ vma->vm_ops = &ttm_bo_vm_ops;
+
+ /*
+ * Note: We're transferring the bo reference to
+ * vma->vm_private_data here.
+ */
+
+ vma->vm_private_data = bo;
+
+ /*
+ * We'd like to use VM_PFNMAP on shared mappings, where
+ * (vma->vm_flags & VM_SHARED) != 0, for performance reasons,
+ * but for some reason VM_PFNMAP + x86 PAT + write-combine is very
+ * bad for performance. Until that has been sorted out, use
+ * VM_MIXEDMAP on all mappings. See freedesktop.org bug #75719
+ */
+ vma->vm_flags |= VM_MIXEDMAP;
+ vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
+}
+
int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
struct ttm_bo_device *bdev)
{
@@ -447,24 +509,7 @@ int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
if (unlikely(ret != 0))
goto out_unref;
- vma->vm_ops = &ttm_bo_vm_ops;
-
- /*
- * Note: We're transferring the bo reference to
- * vma->vm_private_data here.
- */
-
- vma->vm_private_data = bo;
-
- /*
- * We'd like to use VM_PFNMAP on shared mappings, where
- * (vma->vm_flags & VM_SHARED) != 0, for performance reasons,
- * but for some reason VM_PFNMAP + x86 PAT + write-combine is very
- * bad for performance. Until that has been sorted out, use
- * VM_MIXEDMAP on all mappings. See freedesktop.org bug #75719
- */
- vma->vm_flags |= VM_MIXEDMAP;
- vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
+ ttm_bo_mmap_vma_setup(bo, vma);
return 0;
out_unref:
ttm_bo_put(bo);
@@ -472,17 +517,17 @@ out_unref:
}
EXPORT_SYMBOL(ttm_bo_mmap);
-int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
+int ttm_bo_mmap_obj(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
{
- if (vma->vm_pgoff != 0)
- return -EACCES;
-
ttm_bo_get(bo);
- vma->vm_ops = &ttm_bo_vm_ops;
- vma->vm_private_data = bo;
- vma->vm_flags |= VM_MIXEDMAP;
- vma->vm_flags |= VM_IO | VM_DONTEXPAND;
+ /*
+ * FIXME: &drm_gem_object_funcs.mmap is called with the fake offset
+ * removed. Add it back here until the rest of TTM works without it.
+ */
+ vma->vm_pgoff += drm_vma_node_start(&bo->base.vma_node);
+
+ ttm_bo_mmap_vma_setup(bo, vma);
return 0;
}
-EXPORT_SYMBOL(ttm_fbdev_mmap);
+EXPORT_SYMBOL(ttm_bo_mmap_obj);
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index 131dae8f4170..1797f04c0534 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -43,37 +43,22 @@ static void ttm_eu_backoff_reservation_reverse(struct list_head *list,
}
}
-static void ttm_eu_del_from_lru_locked(struct list_head *list)
-{
- struct ttm_validate_buffer *entry;
-
- list_for_each_entry(entry, list, head) {
- struct ttm_buffer_object *bo = entry->bo;
- ttm_bo_del_from_lru(bo);
- }
-}
-
void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
struct list_head *list)
{
struct ttm_validate_buffer *entry;
- struct ttm_bo_global *glob;
if (list_empty(list))
return;
- entry = list_first_entry(list, struct ttm_validate_buffer, head);
- glob = entry->bo->bdev->glob;
-
- spin_lock(&glob->lru_lock);
+ spin_lock(&ttm_bo_glob.lru_lock);
list_for_each_entry(entry, list, head) {
struct ttm_buffer_object *bo = entry->bo;
- if (list_empty(&bo->lru))
- ttm_bo_add_to_lru(bo);
+ ttm_bo_move_to_lru_tail(bo, NULL);
dma_resv_unlock(bo->base.resv);
}
- spin_unlock(&glob->lru_lock);
+ spin_unlock(&ttm_bo_glob.lru_lock);
if (ticket)
ww_acquire_fini(ticket);
@@ -94,18 +79,14 @@ EXPORT_SYMBOL(ttm_eu_backoff_reservation);
int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
struct list_head *list, bool intr,
- struct list_head *dups, bool del_lru)
+ struct list_head *dups)
{
- struct ttm_bo_global *glob;
struct ttm_validate_buffer *entry;
int ret;
if (list_empty(list))
return 0;
- entry = list_first_entry(list, struct ttm_validate_buffer, head);
- glob = entry->bo->bdev->glob;
-
if (ticket)
ww_acquire_init(ticket, &reservation_ww_class);
@@ -113,12 +94,7 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
struct ttm_buffer_object *bo = entry->bo;
ret = __ttm_bo_reserve(bo, intr, (ticket == NULL), ticket);
- if (!ret && unlikely(atomic_read(&bo->cpu_writers) > 0)) {
- dma_resv_unlock(bo->base.resv);
-
- ret = -EBUSY;
-
- } else if (ret == -EALREADY && dups) {
+ if (ret == -EALREADY && dups) {
struct ttm_validate_buffer *safe = entry;
entry = list_prev_entry(entry, head);
list_del(&safe->head);
@@ -173,11 +149,6 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
list_add(&entry->head, list);
}
- if (del_lru) {
- spin_lock(&glob->lru_lock);
- ttm_eu_del_from_lru_locked(list);
- spin_unlock(&glob->lru_lock);
- }
return 0;
}
EXPORT_SYMBOL(ttm_eu_reserve_buffers);
@@ -187,30 +158,22 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
struct dma_fence *fence)
{
struct ttm_validate_buffer *entry;
- struct ttm_buffer_object *bo;
- struct ttm_bo_global *glob;
if (list_empty(list))
return;
- bo = list_first_entry(list, struct ttm_validate_buffer, head)->bo;
- glob = bo->bdev->glob;
-
- spin_lock(&glob->lru_lock);
-
+ spin_lock(&ttm_bo_glob.lru_lock);
list_for_each_entry(entry, list, head) {
- bo = entry->bo;
+ struct ttm_buffer_object *bo = entry->bo;
+
if (entry->num_shared)
dma_resv_add_shared_fence(bo->base.resv, fence);
else
dma_resv_add_excl_fence(bo->base.resv, fence);
- if (list_empty(&bo->lru))
- ttm_bo_add_to_lru(bo);
- else
- ttm_bo_move_to_lru_tail(bo, NULL);
+ ttm_bo_move_to_lru_tail(bo, NULL);
dma_resv_unlock(bo->base.resv);
}
- spin_unlock(&glob->lru_lock);
+ spin_unlock(&ttm_bo_glob.lru_lock);
if (ticket)
ww_acquire_fini(ticket);
}
diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
index 8617958b7ae6..acd63b70d814 100644
--- a/drivers/gpu/drm/ttm/ttm_memory.c
+++ b/drivers/gpu/drm/ttm/ttm_memory.c
@@ -275,7 +275,7 @@ static void ttm_shrink(struct ttm_mem_global *glob, bool from_wq,
while (ttm_zones_above_swap_target(glob, from_wq, extra)) {
spin_unlock(&glob->lock);
- ret = ttm_bo_swapout(glob->bo_glob, ctx);
+ ret = ttm_bo_swapout(&ttm_bo_glob, ctx);
spin_lock(&glob->lock);
if (unlikely(ret != 0))
break;
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index 627f8dc91d0e..b40a4678c296 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -1028,7 +1028,7 @@ void ttm_page_alloc_fini(void)
static void
ttm_pool_unpopulate_helper(struct ttm_tt *ttm, unsigned mem_count_update)
{
- struct ttm_mem_global *mem_glob = ttm->bdev->glob->mem_glob;
+ struct ttm_mem_global *mem_glob = &ttm_mem_glob;
unsigned i;
if (mem_count_update == 0)
@@ -1049,7 +1049,7 @@ put_pages:
int ttm_pool_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
{
- struct ttm_mem_global *mem_glob = ttm->bdev->glob->mem_glob;
+ struct ttm_mem_global *mem_glob = &ttm_mem_glob;
unsigned i;
int ret;
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
index 7d78e6deac89..bf876faea592 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
@@ -33,7 +33,6 @@
* when freed).
*/
-#if defined(CONFIG_SWIOTLB) || defined(CONFIG_INTEL_IOMMU)
#define pr_fmt(fmt) "[TTM] " fmt
#include <linux/dma-mapping.h>
@@ -886,8 +885,8 @@ static gfp_t ttm_dma_pool_gfp_flags(struct ttm_dma_tt *ttm_dma, bool huge)
int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev,
struct ttm_operation_ctx *ctx)
{
+ struct ttm_mem_global *mem_glob = &ttm_mem_glob;
struct ttm_tt *ttm = &ttm_dma->ttm;
- struct ttm_mem_global *mem_glob = ttm->bdev->glob->mem_glob;
unsigned long num_pages = ttm->num_pages;
struct dma_pool *pool;
struct dma_page *d_page;
@@ -991,8 +990,8 @@ EXPORT_SYMBOL_GPL(ttm_dma_populate);
/* Put all pages in pages list to correct pool to wait for reuse */
void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
{
+ struct ttm_mem_global *mem_glob = &ttm_mem_glob;
struct ttm_tt *ttm = &ttm_dma->ttm;
- struct ttm_mem_global *mem_glob = ttm->bdev->glob->mem_glob;
struct dma_pool *pool;
struct dma_page *d_page, *next;
enum pool_type type;
@@ -1238,5 +1237,3 @@ int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data)
return 0;
}
EXPORT_SYMBOL_GPL(ttm_dma_page_alloc_debugfs);
-
-#endif
diff --git a/drivers/gpu/drm/tve200/tve200_drv.c b/drivers/gpu/drm/tve200/tve200_drv.c
index 416f24823c0a..954b09c948eb 100644
--- a/drivers/gpu/drm/tve200/tve200_drv.c
+++ b/drivers/gpu/drm/tve200/tve200_drv.c
@@ -80,8 +80,8 @@ static int tve200_modeset_init(struct drm_device *dev)
if (ret && ret != -ENODEV)
return ret;
if (panel) {
- bridge = drm_panel_bridge_add(panel,
- DRM_MODE_CONNECTOR_Unknown);
+ bridge = drm_panel_bridge_add_typed(panel,
+ DRM_MODE_CONNECTOR_Unknown);
if (IS_ERR(bridge)) {
ret = PTR_ERR(bridge);
goto out_bridge;
diff --git a/drivers/gpu/drm/udl/udl_connector.c b/drivers/gpu/drm/udl/udl_connector.c
index ddb61a60c610..b4ae3e89a7b4 100644
--- a/drivers/gpu/drm/udl/udl_connector.c
+++ b/drivers/gpu/drm/udl/udl_connector.c
@@ -90,13 +90,6 @@ udl_detect(struct drm_connector *connector, bool force)
return connector_status_connected;
}
-static struct drm_encoder*
-udl_best_single_encoder(struct drm_connector *connector)
-{
- int enc_id = connector->encoder_ids[0];
- return drm_encoder_find(connector->dev, NULL, enc_id);
-}
-
static int udl_connector_set_property(struct drm_connector *connector,
struct drm_property *property,
uint64_t val)
@@ -120,7 +113,6 @@ static void udl_connector_destroy(struct drm_connector *connector)
static const struct drm_connector_helper_funcs udl_connector_helper_funcs = {
.get_modes = udl_get_modes,
.mode_valid = udl_mode_valid,
- .best_encoder = udl_best_single_encoder,
};
static const struct drm_connector_funcs udl_connector_funcs = {
diff --git a/drivers/gpu/drm/v3d/v3d_bo.c b/drivers/gpu/drm/v3d/v3d_bo.c
index a22b75a3a533..edd299ab53d8 100644
--- a/drivers/gpu/drm/v3d/v3d_bo.c
+++ b/drivers/gpu/drm/v3d/v3d_bo.c
@@ -58,7 +58,7 @@ static const struct drm_gem_object_funcs v3d_gem_funcs = {
.get_sg_table = drm_gem_shmem_get_sg_table,
.vmap = drm_gem_shmem_vmap,
.vunmap = drm_gem_shmem_vunmap,
- .vm_ops = &drm_gem_shmem_vm_ops,
+ .mmap = drm_gem_shmem_mmap,
};
/* gem_create_object function for allocating a BO struct and doing
diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c
index 3506ae2723ae..1a07462b4528 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.c
+++ b/drivers/gpu/drm/v3d/v3d_drv.c
@@ -126,6 +126,9 @@ static int v3d_get_param_ioctl(struct drm_device *dev, void *data,
case DRM_V3D_PARAM_SUPPORTS_CSD:
args->value = v3d_has_csd(v3d);
return 0;
+ case DRM_V3D_PARAM_SUPPORTS_CACHE_FLUSH:
+ args->value = 1;
+ return 0;
default:
DRM_DEBUG("Unknown parameter %d\n", args->param);
return -EINVAL;
@@ -169,7 +172,7 @@ v3d_postclose(struct drm_device *dev, struct drm_file *file)
kfree(v3d_priv);
}
-DEFINE_DRM_GEM_SHMEM_FOPS(v3d_drm_fops);
+DEFINE_DRM_GEM_FOPS(v3d_drm_fops);
/* DRM_AUTH is required on SUBMIT_CL for now, while we don't have GMP
* protection between clients. Note that render nodes would be be
diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c
index 19c092d75266..549dde83408b 100644
--- a/drivers/gpu/drm/v3d/v3d_gem.c
+++ b/drivers/gpu/drm/v3d/v3d_gem.c
@@ -530,13 +530,16 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
struct drm_v3d_submit_cl *args = data;
struct v3d_bin_job *bin = NULL;
struct v3d_render_job *render;
+ struct v3d_job *clean_job = NULL;
+ struct v3d_job *last_job;
struct ww_acquire_ctx acquire_ctx;
int ret = 0;
trace_v3d_submit_cl_ioctl(&v3d->drm, args->rcl_start, args->rcl_end);
- if (args->pad != 0) {
- DRM_INFO("pad must be zero: %d\n", args->pad);
+ if (args->flags != 0 &&
+ args->flags != DRM_V3D_SUBMIT_CL_FLUSH_CACHE) {
+ DRM_INFO("invalid flags: %d\n", args->flags);
return -EINVAL;
}
@@ -565,6 +568,7 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
ret = v3d_job_init(v3d, file_priv, &bin->base,
v3d_job_free, args->in_sync_bcl);
if (ret) {
+ kfree(bin);
v3d_job_put(&render->base);
kfree(bin);
return ret;
@@ -578,12 +582,31 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
bin->render = render;
}
- ret = v3d_lookup_bos(dev, file_priv, &render->base,
+ if (args->flags & DRM_V3D_SUBMIT_CL_FLUSH_CACHE) {
+ clean_job = kcalloc(1, sizeof(*clean_job), GFP_KERNEL);
+ if (!clean_job) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ ret = v3d_job_init(v3d, file_priv, clean_job, v3d_job_free, 0);
+ if (ret) {
+ kfree(clean_job);
+ clean_job = NULL;
+ goto fail;
+ }
+
+ last_job = clean_job;
+ } else {
+ last_job = &render->base;
+ }
+
+ ret = v3d_lookup_bos(dev, file_priv, last_job,
args->bo_handles, args->bo_handle_count);
if (ret)
goto fail;
- ret = v3d_lock_bo_reservations(&render->base, &acquire_ctx);
+ ret = v3d_lock_bo_reservations(last_job, &acquire_ctx);
if (ret)
goto fail;
@@ -602,28 +625,44 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
ret = v3d_push_job(v3d_priv, &render->base, V3D_RENDER);
if (ret)
goto fail_unreserve;
+
+ if (clean_job) {
+ struct dma_fence *render_fence =
+ dma_fence_get(render->base.done_fence);
+ ret = drm_gem_fence_array_add(&clean_job->deps, render_fence);
+ if (ret)
+ goto fail_unreserve;
+ ret = v3d_push_job(v3d_priv, clean_job, V3D_CACHE_CLEAN);
+ if (ret)
+ goto fail_unreserve;
+ }
+
mutex_unlock(&v3d->sched_lock);
v3d_attach_fences_and_unlock_reservation(file_priv,
- &render->base,
+ last_job,
&acquire_ctx,
args->out_sync,
- render->base.done_fence);
+ last_job->done_fence);
if (bin)
v3d_job_put(&bin->base);
v3d_job_put(&render->base);
+ if (clean_job)
+ v3d_job_put(clean_job);
return 0;
fail_unreserve:
mutex_unlock(&v3d->sched_lock);
- drm_gem_unlock_reservations(render->base.bo,
- render->base.bo_count, &acquire_ctx);
+ drm_gem_unlock_reservations(last_job->bo,
+ last_job->bo_count, &acquire_ctx);
fail:
if (bin)
v3d_job_put(&bin->base);
v3d_job_put(&render->base);
+ if (clean_job)
+ v3d_job_put(clean_job);
return ret;
}
diff --git a/drivers/gpu/drm/vboxvideo/Kconfig b/drivers/gpu/drm/vboxvideo/Kconfig
index 56ba510f21a2..45fe135d6e43 100644
--- a/drivers/gpu/drm/vboxvideo/Kconfig
+++ b/drivers/gpu/drm/vboxvideo/Kconfig
@@ -4,6 +4,8 @@ config DRM_VBOXVIDEO
depends on DRM && X86 && PCI
select DRM_KMS_HELPER
select DRM_VRAM_HELPER
+ select DRM_TTM
+ select DRM_TTM_HELPER
select GENERIC_ALLOCATOR
help
This is a KMS driver for the virtual Graphics Card used in
diff --git a/drivers/gpu/drm/vboxvideo/Makefile b/drivers/gpu/drm/vboxvideo/Makefile
index 55d798c76b21..f2e968b5ffa6 100644
--- a/drivers/gpu/drm/vboxvideo/Makefile
+++ b/drivers/gpu/drm/vboxvideo/Makefile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
vboxvideo-y := hgsmi_base.o modesetting.o vbva_base.o \
- vbox_drv.o vbox_fb.o vbox_hgsmi.o vbox_irq.o vbox_main.o \
+ vbox_drv.o vbox_hgsmi.o vbox_irq.o vbox_main.o \
vbox_mode.o vbox_ttm.o
obj-$(CONFIG_DRM_VBOXVIDEO) += vboxvideo.o
diff --git a/drivers/gpu/drm/vboxvideo/vbox_drv.c b/drivers/gpu/drm/vboxvideo/vbox_drv.c
index 862db495d111..8512d970a09f 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_drv.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_drv.c
@@ -14,6 +14,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_drv.h>
+#include <drm/drm_fb_helper.h>
#include <drm/drm_file.h>
#include <drm/drm_ioctl.h>
@@ -32,10 +33,6 @@ static const struct pci_device_id pciidlist[] = {
};
MODULE_DEVICE_TABLE(pci, pciidlist);
-static const struct drm_fb_helper_funcs vbox_fb_helper_funcs = {
- .fb_probe = vboxfb_create,
-};
-
static int vbox_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct vbox_private *vbox;
@@ -79,20 +76,16 @@ static int vbox_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
goto err_mode_fini;
- ret = drm_fb_helper_fbdev_setup(&vbox->ddev, &vbox->fb_helper,
- &vbox_fb_helper_funcs, 32,
- vbox->num_crtcs);
+ ret = drm_fbdev_generic_setup(&vbox->ddev, 32);
if (ret)
goto err_irq_fini;
ret = drm_dev_register(&vbox->ddev, 0);
if (ret)
- goto err_fbdev_fini;
+ goto err_irq_fini;
return 0;
-err_fbdev_fini:
- vbox_fbdev_fini(vbox);
err_irq_fini:
vbox_irq_fini(vbox);
err_mode_fini:
@@ -113,7 +106,6 @@ static void vbox_pci_remove(struct pci_dev *pdev)
struct vbox_private *vbox = pci_get_drvdata(pdev);
drm_dev_unregister(&vbox->ddev);
- vbox_fbdev_fini(vbox);
vbox_irq_fini(vbox);
vbox_mode_fini(vbox);
vbox_mm_fini(vbox);
@@ -189,10 +181,7 @@ static struct pci_driver vbox_pci_driver = {
#endif
};
-static const struct file_operations vbox_fops = {
- .owner = THIS_MODULE,
- DRM_VRAM_MM_FILE_OPERATIONS
-};
+DEFINE_DRM_GEM_FOPS(vbox_fops);
static struct drm_driver driver = {
.driver_features =
diff --git a/drivers/gpu/drm/vboxvideo/vbox_drv.h b/drivers/gpu/drm/vboxvideo/vbox_drv.h
index e8cb9efc6088..87421903816c 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_drv.h
+++ b/drivers/gpu/drm/vboxvideo/vbox_drv.h
@@ -16,12 +16,9 @@
#include <linux/string.h>
#include <drm/drm_encoder.h>
-#include <drm/drm_fb_helper.h>
#include <drm/drm_gem.h>
#include <drm/drm_gem_vram_helper.h>
-#include <drm/drm_vram_mm_helper.h>
-
#include "vboxvideo_guest.h"
#include "vboxvideo_vbe.h"
#include "hgsmi_ch_setup.h"
@@ -48,16 +45,9 @@
sizeof(struct hgsmi_host_flags))
#define HOST_FLAGS_OFFSET GUEST_HEAP_USABLE_SIZE
-struct vbox_framebuffer {
- struct drm_framebuffer base;
- struct drm_gem_object *obj;
-};
-
struct vbox_private {
/* Must be first; or we must define our own release callback */
struct drm_device ddev;
- struct drm_fb_helper fb_helper;
- struct vbox_framebuffer afb;
u8 __iomem *guest_heap;
u8 __iomem *vbva_buffers;
@@ -137,7 +127,6 @@ struct vbox_encoder {
#define to_vbox_crtc(x) container_of(x, struct vbox_crtc, base)
#define to_vbox_connector(x) container_of(x, struct vbox_connector, base)
#define to_vbox_encoder(x) container_of(x, struct vbox_encoder, base)
-#define to_vbox_framebuffer(x) container_of(x, struct vbox_framebuffer, base)
bool vbox_check_supported(u16 id);
int vbox_hw_init(struct vbox_private *vbox);
@@ -148,25 +137,9 @@ void vbox_mode_fini(struct vbox_private *vbox);
void vbox_report_caps(struct vbox_private *vbox);
-void vbox_framebuffer_dirty_rectangles(struct drm_framebuffer *fb,
- struct drm_clip_rect *rects,
- unsigned int num_rects);
-
-int vbox_framebuffer_init(struct vbox_private *vbox,
- struct vbox_framebuffer *vbox_fb,
- const struct drm_mode_fb_cmd2 *mode_cmd,
- struct drm_gem_object *obj);
-
-int vboxfb_create(struct drm_fb_helper *helper,
- struct drm_fb_helper_surface_size *sizes);
-void vbox_fbdev_fini(struct vbox_private *vbox);
-
int vbox_mm_init(struct vbox_private *vbox);
void vbox_mm_fini(struct vbox_private *vbox);
-int vbox_gem_create(struct vbox_private *vbox,
- u32 size, bool iskernel, struct drm_gem_object **obj);
-
/* vbox_irq.c */
int vbox_irq_init(struct vbox_private *vbox);
void vbox_irq_fini(struct vbox_private *vbox);
diff --git a/drivers/gpu/drm/vboxvideo/vbox_fb.c b/drivers/gpu/drm/vboxvideo/vbox_fb.c
deleted file mode 100644
index 8f74bcffc034..000000000000
--- a/drivers/gpu/drm/vboxvideo/vbox_fb.c
+++ /dev/null
@@ -1,149 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright (C) 2013-2017 Oracle Corporation
- * This file is based on ast_fb.c
- * Copyright 2012 Red Hat Inc.
- * Authors: Dave Airlie <airlied@redhat.com>
- * Michael Thayer <michael.thayer@oracle.com,
- */
-#include <linux/delay.h>
-#include <linux/errno.h>
-#include <linux/fb.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/string.h>
-#include <linux/sysrq.h>
-#include <linux/tty.h>
-
-#include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
-#include <drm/drm_fb_helper.h>
-#include <drm/drm_fourcc.h>
-
-#include "vbox_drv.h"
-#include "vboxvideo.h"
-
-#ifdef CONFIG_DRM_KMS_FB_HELPER
-static struct fb_deferred_io vbox_defio = {
- .delay = HZ / 30,
- .deferred_io = drm_fb_helper_deferred_io,
-};
-#endif
-
-static struct fb_ops vboxfb_ops = {
- .owner = THIS_MODULE,
- DRM_FB_HELPER_DEFAULT_OPS,
- .fb_fillrect = drm_fb_helper_sys_fillrect,
- .fb_copyarea = drm_fb_helper_sys_copyarea,
- .fb_imageblit = drm_fb_helper_sys_imageblit,
-};
-
-int vboxfb_create(struct drm_fb_helper *helper,
- struct drm_fb_helper_surface_size *sizes)
-{
- struct vbox_private *vbox =
- container_of(helper, struct vbox_private, fb_helper);
- struct pci_dev *pdev = vbox->ddev.pdev;
- struct drm_mode_fb_cmd2 mode_cmd;
- struct drm_framebuffer *fb;
- struct fb_info *info;
- struct drm_gem_object *gobj;
- struct drm_gem_vram_object *gbo;
- int size, ret;
- s64 gpu_addr;
- u32 pitch;
-
- mode_cmd.width = sizes->surface_width;
- mode_cmd.height = sizes->surface_height;
- pitch = mode_cmd.width * ((sizes->surface_bpp + 7) / 8);
- mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
- sizes->surface_depth);
- mode_cmd.pitches[0] = pitch;
-
- size = pitch * mode_cmd.height;
-
- ret = vbox_gem_create(vbox, size, true, &gobj);
- if (ret) {
- DRM_ERROR("failed to create fbcon backing object %d\n", ret);
- return ret;
- }
-
- ret = vbox_framebuffer_init(vbox, &vbox->afb, &mode_cmd, gobj);
- if (ret)
- return ret;
-
- gbo = drm_gem_vram_of_gem(gobj);
-
- ret = drm_gem_vram_pin(gbo, DRM_GEM_VRAM_PL_FLAG_VRAM);
- if (ret)
- return ret;
-
- info = drm_fb_helper_alloc_fbi(helper);
- if (IS_ERR(info))
- return PTR_ERR(info);
-
- info->screen_size = size;
- info->screen_base = (char __iomem *)drm_gem_vram_kmap(gbo, true, NULL);
- if (IS_ERR(info->screen_base))
- return PTR_ERR(info->screen_base);
-
- fb = &vbox->afb.base;
- helper->fb = fb;
-
- info->fbops = &vboxfb_ops;
-
- /*
- * This seems to be done for safety checking that the framebuffer
- * is not registered twice by different drivers.
- */
- info->apertures->ranges[0].base = pci_resource_start(pdev, 0);
- info->apertures->ranges[0].size = pci_resource_len(pdev, 0);
-
- drm_fb_helper_fill_info(info, helper, sizes);
-
- gpu_addr = drm_gem_vram_offset(gbo);
- if (gpu_addr < 0)
- return (int)gpu_addr;
- info->fix.smem_start = info->apertures->ranges[0].base + gpu_addr;
- info->fix.smem_len = vbox->available_vram_size - gpu_addr;
-
-#ifdef CONFIG_DRM_KMS_FB_HELPER
- info->fbdefio = &vbox_defio;
- fb_deferred_io_init(info);
-#endif
-
- info->pixmap.flags = FB_PIXMAP_SYSTEM;
-
- DRM_DEBUG_KMS("allocated %dx%d\n", fb->width, fb->height);
-
- return 0;
-}
-
-void vbox_fbdev_fini(struct vbox_private *vbox)
-{
- struct vbox_framebuffer *afb = &vbox->afb;
-
-#ifdef CONFIG_DRM_KMS_FB_HELPER
- if (vbox->fb_helper.fbdev && vbox->fb_helper.fbdev->fbdefio)
- fb_deferred_io_cleanup(vbox->fb_helper.fbdev);
-#endif
-
- drm_fb_helper_unregister_fbi(&vbox->fb_helper);
-
- if (afb->obj) {
- struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(afb->obj);
-
- drm_gem_vram_kunmap(gbo);
- drm_gem_vram_unpin(gbo);
-
- drm_gem_object_put_unlocked(afb->obj);
- afb->obj = NULL;
- }
- drm_fb_helper_fini(&vbox->fb_helper);
-
- drm_framebuffer_unregister_private(&afb->base);
- drm_framebuffer_cleanup(&afb->base);
-}
diff --git a/drivers/gpu/drm/vboxvideo/vbox_main.c b/drivers/gpu/drm/vboxvideo/vbox_main.c
index 02fa8277ff1e..9dcab115a261 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_main.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_main.c
@@ -11,22 +11,12 @@
#include <linux/vbox_err.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_damage_helper.h>
#include "vbox_drv.h"
#include "vboxvideo_guest.h"
#include "vboxvideo_vbe.h"
-static void vbox_user_framebuffer_destroy(struct drm_framebuffer *fb)
-{
- struct vbox_framebuffer *vbox_fb = to_vbox_framebuffer(fb);
-
- if (vbox_fb->obj)
- drm_gem_object_put_unlocked(vbox_fb->obj);
-
- drm_framebuffer_cleanup(fb);
- kfree(fb);
-}
-
void vbox_report_caps(struct vbox_private *vbox)
{
u32 caps = VBVACAPS_DISABLE_CURSOR_INTEGRATION |
@@ -38,87 +28,6 @@ void vbox_report_caps(struct vbox_private *vbox)
hgsmi_send_caps_info(vbox->guest_pool, caps);
}
-/* Send information about dirty rectangles to VBVA. */
-void vbox_framebuffer_dirty_rectangles(struct drm_framebuffer *fb,
- struct drm_clip_rect *rects,
- unsigned int num_rects)
-{
- struct vbox_private *vbox = fb->dev->dev_private;
- struct drm_display_mode *mode;
- struct drm_crtc *crtc;
- int crtc_x, crtc_y;
- unsigned int i;
-
- mutex_lock(&vbox->hw_mutex);
- list_for_each_entry(crtc, &fb->dev->mode_config.crtc_list, head) {
- if (crtc->primary->state->fb != fb)
- continue;
-
- mode = &crtc->state->mode;
- crtc_x = crtc->primary->state->src_x >> 16;
- crtc_y = crtc->primary->state->src_y >> 16;
-
- for (i = 0; i < num_rects; ++i) {
- struct vbva_cmd_hdr cmd_hdr;
- unsigned int crtc_id = to_vbox_crtc(crtc)->crtc_id;
-
- if (rects[i].x1 > crtc_x + mode->hdisplay ||
- rects[i].y1 > crtc_y + mode->vdisplay ||
- rects[i].x2 < crtc_x ||
- rects[i].y2 < crtc_y)
- continue;
-
- cmd_hdr.x = (s16)rects[i].x1;
- cmd_hdr.y = (s16)rects[i].y1;
- cmd_hdr.w = (u16)rects[i].x2 - rects[i].x1;
- cmd_hdr.h = (u16)rects[i].y2 - rects[i].y1;
-
- if (!vbva_buffer_begin_update(&vbox->vbva_info[crtc_id],
- vbox->guest_pool))
- continue;
-
- vbva_write(&vbox->vbva_info[crtc_id], vbox->guest_pool,
- &cmd_hdr, sizeof(cmd_hdr));
- vbva_buffer_end_update(&vbox->vbva_info[crtc_id]);
- }
- }
- mutex_unlock(&vbox->hw_mutex);
-}
-
-static int vbox_user_framebuffer_dirty(struct drm_framebuffer *fb,
- struct drm_file *file_priv,
- unsigned int flags, unsigned int color,
- struct drm_clip_rect *rects,
- unsigned int num_rects)
-{
- vbox_framebuffer_dirty_rectangles(fb, rects, num_rects);
-
- return 0;
-}
-
-static const struct drm_framebuffer_funcs vbox_fb_funcs = {
- .destroy = vbox_user_framebuffer_destroy,
- .dirty = vbox_user_framebuffer_dirty,
-};
-
-int vbox_framebuffer_init(struct vbox_private *vbox,
- struct vbox_framebuffer *vbox_fb,
- const struct drm_mode_fb_cmd2 *mode_cmd,
- struct drm_gem_object *obj)
-{
- int ret;
-
- drm_helper_mode_fill_fb_struct(&vbox->ddev, &vbox_fb->base, mode_cmd);
- vbox_fb->obj = obj;
- ret = drm_framebuffer_init(&vbox->ddev, &vbox_fb->base, &vbox_fb_funcs);
- if (ret) {
- DRM_ERROR("framebuffer init failed %d\n", ret);
- return ret;
- }
-
- return 0;
-}
-
static int vbox_accel_init(struct vbox_private *vbox)
{
struct vbva_buffer *vbva;
@@ -270,29 +179,3 @@ void vbox_hw_fini(struct vbox_private *vbox)
gen_pool_destroy(vbox->guest_pool);
pci_iounmap(vbox->ddev.pdev, vbox->guest_heap);
}
-
-int vbox_gem_create(struct vbox_private *vbox,
- u32 size, bool iskernel, struct drm_gem_object **obj)
-{
- struct drm_gem_vram_object *gbo;
- int ret;
-
- *obj = NULL;
-
- size = roundup(size, PAGE_SIZE);
- if (size == 0)
- return -EINVAL;
-
- gbo = drm_gem_vram_create(&vbox->ddev, &vbox->ddev.vram_mm->bdev,
- size, 0, false);
- if (IS_ERR(gbo)) {
- ret = PTR_ERR(gbo);
- if (ret != -ERESTARTSYS)
- DRM_ERROR("failed to allocate GEM object\n");
- return ret;
- }
-
- *obj = &gbo->bo.base;
-
- return 0;
-}
diff --git a/drivers/gpu/drm/vboxvideo/vbox_mode.c b/drivers/gpu/drm/vboxvideo/vbox_mode.c
index e1e48ba919eb..19612132c8a3 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_mode.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_mode.c
@@ -13,7 +13,9 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
@@ -133,7 +135,7 @@ static bool vbox_set_up_input_mapping(struct vbox_private *vbox)
if (!fb1) {
fb1 = fb;
- if (to_vbox_framebuffer(fb1) == &vbox->afb)
+ if (fb1 == vbox->ddev.fb_helper->fb)
break;
} else if (fb != fb1) {
single_framebuffer = false;
@@ -172,8 +174,7 @@ static void vbox_crtc_set_base_and_mode(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
int x, int y)
{
- struct drm_gem_vram_object *gbo =
- drm_gem_vram_of_gem(to_vbox_framebuffer(fb)->obj);
+ struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(fb->obj[0]);
struct vbox_private *vbox = crtc->dev->dev_private;
struct vbox_crtc *vbox_crtc = to_vbox_crtc(crtc);
bool needs_modeset = drm_atomic_crtc_needs_modeset(crtc->state);
@@ -283,10 +284,43 @@ static void vbox_primary_atomic_update(struct drm_plane *plane,
{
struct drm_crtc *crtc = plane->state->crtc;
struct drm_framebuffer *fb = plane->state->fb;
+ struct vbox_private *vbox = fb->dev->dev_private;
+ struct drm_mode_rect *clips;
+ uint32_t num_clips, i;
vbox_crtc_set_base_and_mode(crtc, fb,
plane->state->src_x >> 16,
plane->state->src_y >> 16);
+
+ /* Send information about dirty rectangles to VBVA. */
+
+ clips = drm_plane_get_damage_clips(plane->state);
+ num_clips = drm_plane_get_damage_clips_count(plane->state);
+
+ if (!num_clips)
+ return;
+
+ mutex_lock(&vbox->hw_mutex);
+
+ for (i = 0; i < num_clips; ++i, ++clips) {
+ struct vbva_cmd_hdr cmd_hdr;
+ unsigned int crtc_id = to_vbox_crtc(crtc)->crtc_id;
+
+ cmd_hdr.x = (s16)clips->x1;
+ cmd_hdr.y = (s16)clips->y1;
+ cmd_hdr.w = (u16)clips->x2 - clips->x1;
+ cmd_hdr.h = (u16)clips->y2 - clips->y1;
+
+ if (!vbva_buffer_begin_update(&vbox->vbva_info[crtc_id],
+ vbox->guest_pool))
+ continue;
+
+ vbva_write(&vbox->vbva_info[crtc_id], vbox->guest_pool,
+ &cmd_hdr, sizeof(cmd_hdr));
+ vbva_buffer_end_update(&vbox->vbva_info[crtc_id]);
+ }
+
+ mutex_unlock(&vbox->hw_mutex);
}
static void vbox_primary_atomic_disable(struct drm_plane *plane,
@@ -300,35 +334,6 @@ static void vbox_primary_atomic_disable(struct drm_plane *plane,
old_state->src_y >> 16);
}
-static int vbox_primary_prepare_fb(struct drm_plane *plane,
- struct drm_plane_state *new_state)
-{
- struct drm_gem_vram_object *gbo;
- int ret;
-
- if (!new_state->fb)
- return 0;
-
- gbo = drm_gem_vram_of_gem(to_vbox_framebuffer(new_state->fb)->obj);
- ret = drm_gem_vram_pin(gbo, DRM_GEM_VRAM_PL_FLAG_VRAM);
- if (ret)
- DRM_WARN("Error %d pinning new fb, out of video mem?\n", ret);
-
- return ret;
-}
-
-static void vbox_primary_cleanup_fb(struct drm_plane *plane,
- struct drm_plane_state *old_state)
-{
- struct drm_gem_vram_object *gbo;
-
- if (!old_state->fb)
- return;
-
- gbo = drm_gem_vram_of_gem(to_vbox_framebuffer(old_state->fb)->obj);
- drm_gem_vram_unpin(gbo);
-}
-
static int vbox_cursor_atomic_check(struct drm_plane *plane,
struct drm_plane_state *new_state)
{
@@ -386,8 +391,7 @@ static void vbox_cursor_atomic_update(struct drm_plane *plane,
container_of(plane->dev, struct vbox_private, ddev);
struct vbox_crtc *vbox_crtc = to_vbox_crtc(plane->state->crtc);
struct drm_framebuffer *fb = plane->state->fb;
- struct drm_gem_vram_object *gbo =
- drm_gem_vram_of_gem(to_vbox_framebuffer(fb)->obj);
+ struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(fb->obj[0]);
u32 width = plane->state->crtc_w;
u32 height = plane->state->crtc_h;
size_t data_size, mask_size;
@@ -459,30 +463,6 @@ static void vbox_cursor_atomic_disable(struct drm_plane *plane,
mutex_unlock(&vbox->hw_mutex);
}
-static int vbox_cursor_prepare_fb(struct drm_plane *plane,
- struct drm_plane_state *new_state)
-{
- struct drm_gem_vram_object *gbo;
-
- if (!new_state->fb)
- return 0;
-
- gbo = drm_gem_vram_of_gem(to_vbox_framebuffer(new_state->fb)->obj);
- return drm_gem_vram_pin(gbo, DRM_GEM_VRAM_PL_FLAG_SYSTEM);
-}
-
-static void vbox_cursor_cleanup_fb(struct drm_plane *plane,
- struct drm_plane_state *old_state)
-{
- struct drm_gem_vram_object *gbo;
-
- if (!plane->state->fb)
- return;
-
- gbo = drm_gem_vram_of_gem(to_vbox_framebuffer(plane->state->fb)->obj);
- drm_gem_vram_unpin(gbo);
-}
-
static const u32 vbox_cursor_plane_formats[] = {
DRM_FORMAT_ARGB8888,
};
@@ -491,8 +471,8 @@ static const struct drm_plane_helper_funcs vbox_cursor_helper_funcs = {
.atomic_check = vbox_cursor_atomic_check,
.atomic_update = vbox_cursor_atomic_update,
.atomic_disable = vbox_cursor_atomic_disable,
- .prepare_fb = vbox_cursor_prepare_fb,
- .cleanup_fb = vbox_cursor_cleanup_fb,
+ .prepare_fb = drm_gem_vram_plane_helper_prepare_fb,
+ .cleanup_fb = drm_gem_vram_plane_helper_cleanup_fb,
};
static const struct drm_plane_funcs vbox_cursor_plane_funcs = {
@@ -513,8 +493,8 @@ static const struct drm_plane_helper_funcs vbox_primary_helper_funcs = {
.atomic_check = vbox_primary_atomic_check,
.atomic_update = vbox_primary_atomic_update,
.atomic_disable = vbox_primary_atomic_disable,
- .prepare_fb = vbox_primary_prepare_fb,
- .cleanup_fb = vbox_primary_cleanup_fb,
+ .prepare_fb = drm_gem_vram_plane_helper_prepare_fb,
+ .cleanup_fb = drm_gem_vram_plane_helper_cleanup_fb,
};
static const struct drm_plane_funcs vbox_primary_plane_funcs = {
@@ -856,40 +836,8 @@ static int vbox_connector_init(struct drm_device *dev,
return 0;
}
-static struct drm_framebuffer *vbox_user_framebuffer_create(
- struct drm_device *dev,
- struct drm_file *filp,
- const struct drm_mode_fb_cmd2 *mode_cmd)
-{
- struct vbox_private *vbox =
- container_of(dev, struct vbox_private, ddev);
- struct drm_gem_object *obj;
- struct vbox_framebuffer *vbox_fb;
- int ret = -ENOMEM;
-
- obj = drm_gem_object_lookup(filp, mode_cmd->handles[0]);
- if (!obj)
- return ERR_PTR(-ENOENT);
-
- vbox_fb = kzalloc(sizeof(*vbox_fb), GFP_KERNEL);
- if (!vbox_fb)
- goto err_unref_obj;
-
- ret = vbox_framebuffer_init(vbox, vbox_fb, mode_cmd, obj);
- if (ret)
- goto err_free_vbox_fb;
-
- return &vbox_fb->base;
-
-err_free_vbox_fb:
- kfree(vbox_fb);
-err_unref_obj:
- drm_gem_object_put_unlocked(obj);
- return ERR_PTR(ret);
-}
-
static const struct drm_mode_config_funcs vbox_mode_funcs = {
- .fb_create = vbox_user_framebuffer_create,
+ .fb_create = drm_gem_fb_create_with_dirty,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
};
diff --git a/drivers/gpu/drm/vboxvideo/vbox_ttm.c b/drivers/gpu/drm/vboxvideo/vbox_ttm.c
index b82595a9ed0f..976423d0c3cc 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_ttm.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_ttm.c
@@ -17,8 +17,7 @@ int vbox_mm_init(struct vbox_private *vbox)
struct drm_device *dev = &vbox->ddev;
vmm = drm_vram_helper_alloc_mm(dev, pci_resource_start(dev->pdev, 0),
- vbox->available_vram_size,
- &drm_gem_vram_mm_funcs);
+ vbox->available_vram_size);
if (IS_ERR(vmm)) {
ret = PTR_ERR(vmm);
DRM_ERROR("Error initializing VRAM MM; %d\n", ret);
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index f1f0a7c87771..b00e20f5ce05 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -994,7 +994,7 @@ static void vc4_crtc_destroy_state(struct drm_crtc *crtc,
struct vc4_dev *vc4 = to_vc4_dev(crtc->dev);
struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(state);
- if (vc4_state->mm.allocated) {
+ if (drm_mm_node_allocated(&vc4_state->mm)) {
unsigned long flags;
spin_lock_irqsave(&vc4->hvs->mm_lock, flags);
diff --git a/drivers/gpu/drm/vc4/vc4_dpi.c b/drivers/gpu/drm/vc4/vc4_dpi.c
index 8a27a6acee61..c586325de2a5 100644
--- a/drivers/gpu/drm/vc4/vc4_dpi.c
+++ b/drivers/gpu/drm/vc4/vc4_dpi.c
@@ -249,7 +249,8 @@ static int vc4_dpi_init_bridge(struct vc4_dpi *dpi)
}
if (panel)
- bridge = drm_panel_bridge_add(panel, DRM_MODE_CONNECTOR_DPI);
+ bridge = drm_panel_bridge_add_typed(panel,
+ DRM_MODE_CONNECTOR_DPI);
return drm_bridge_attach(dpi->encoder, bridge, NULL);
}
diff --git a/drivers/gpu/drm/vc4/vc4_dsi.c b/drivers/gpu/drm/vc4/vc4_dsi.c
index c78fa8144776..c9ba83ed49b9 100644
--- a/drivers/gpu/drm/vc4/vc4_dsi.c
+++ b/drivers/gpu/drm/vc4/vc4_dsi.c
@@ -31,6 +31,7 @@
#include <linux/pm_runtime.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_edid.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_of.h>
@@ -1575,8 +1576,8 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
}
if (panel) {
- dsi->bridge = devm_drm_panel_bridge_add(dev, panel,
- DRM_MODE_CONNECTOR_DSI);
+ dsi->bridge = devm_drm_panel_bridge_add_typed(dev, panel,
+ DRM_MODE_CONNECTOR_DSI);
if (IS_ERR(dsi->bridge))
return PTR_ERR(dsi->bridge);
}
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index ee7d4e7b0ee3..1c62c6c9244b 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -398,10 +398,7 @@ static void vc4_hdmi_set_avi_infoframe(struct drm_encoder *encoder)
HDMI_QUANTIZATION_RANGE_LIMITED :
HDMI_QUANTIZATION_RANGE_FULL);
- frame.avi.right_bar = cstate->tv.margins.right;
- frame.avi.left_bar = cstate->tv.margins.left;
- frame.avi.top_bar = cstate->tv.margins.top;
- frame.avi.bottom_bar = cstate->tv.margins.bottom;
+ drm_hdmi_avi_infoframe_bars(&frame.avi, cstate);
vc4_hdmi_write_infoframe(encoder, &frame);
}
@@ -1285,6 +1282,9 @@ static const struct cec_adap_ops vc4_hdmi_cec_adap_ops = {
static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
{
+#ifdef CONFIG_DRM_VC4_HDMI_CEC
+ struct cec_connector_info conn_info;
+#endif
struct platform_device *pdev = to_platform_device(dev);
struct drm_device *drm = dev_get_drvdata(master);
struct vc4_dev *vc4 = drm->dev_private;
@@ -1403,13 +1403,15 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
#ifdef CONFIG_DRM_VC4_HDMI_CEC
hdmi->cec_adap = cec_allocate_adapter(&vc4_hdmi_cec_adap_ops,
vc4, "vc4",
- CEC_CAP_TRANSMIT |
- CEC_CAP_LOG_ADDRS |
- CEC_CAP_PASSTHROUGH |
- CEC_CAP_RC, 1);
+ CEC_CAP_DEFAULTS |
+ CEC_CAP_CONNECTOR_INFO, 1);
ret = PTR_ERR_OR_ZERO(hdmi->cec_adap);
if (ret < 0)
goto err_destroy_conn;
+
+ cec_fill_conn_info_from_drm(&conn_info, hdmi->connector);
+ cec_s_conn_info(hdmi->cec_adap, &conn_info);
+
HDMI_WRITE(VC4_HDMI_CPU_MASK_SET, 0xffffffff);
value = HDMI_READ(VC4_HDMI_CEC_CNTRL_1);
value &= ~VC4_HDMI_CEC_DIV_CLK_CNT_MASK;
diff --git a/drivers/gpu/drm/vc4/vc4_hvs.c b/drivers/gpu/drm/vc4/vc4_hvs.c
index 9936b15d0bf1..5a43659da319 100644
--- a/drivers/gpu/drm/vc4/vc4_hvs.c
+++ b/drivers/gpu/drm/vc4/vc4_hvs.c
@@ -315,7 +315,7 @@ static void vc4_hvs_unbind(struct device *dev, struct device *master,
struct drm_device *drm = dev_get_drvdata(master);
struct vc4_dev *vc4 = drm->dev_private;
- if (vc4->hvs->mitchell_netravali_filter.allocated)
+ if (drm_mm_node_allocated(&vc4->hvs->mitchell_netravali_filter))
drm_mm_remove_node(&vc4->hvs->mitchell_netravali_filter);
drm_mm_takedown(&vc4->hvs->dlist_mm);
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index 5e5f90810aca..4934127f0d76 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -178,7 +178,7 @@ static void vc4_plane_destroy_state(struct drm_plane *plane,
struct vc4_dev *vc4 = to_vc4_dev(plane->dev);
struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
- if (vc4_state->lbm.allocated) {
+ if (drm_mm_node_allocated(&vc4_state->lbm)) {
unsigned long irqflags;
spin_lock_irqsave(&vc4->hvs->mm_lock, irqflags);
@@ -557,7 +557,7 @@ static int vc4_plane_allocate_lbm(struct drm_plane_state *state)
/* Allocate the LBM memory that the HVS will use for temporary
* storage due to our scaling/format conversion.
*/
- if (!vc4_state->lbm.allocated) {
+ if (!drm_mm_node_allocated(&vc4_state->lbm)) {
int ret;
spin_lock_irqsave(&vc4->hvs->mm_lock, irqflags);
diff --git a/drivers/gpu/drm/virtio/Kconfig b/drivers/gpu/drm/virtio/Kconfig
index ba36e933bb49..eff3047052d4 100644
--- a/drivers/gpu/drm/virtio/Kconfig
+++ b/drivers/gpu/drm/virtio/Kconfig
@@ -3,7 +3,7 @@ config DRM_VIRTIO_GPU
tristate "Virtio GPU driver"
depends on DRM && VIRTIO && MMU
select DRM_KMS_HELPER
- select DRM_TTM
+ select DRM_GEM_SHMEM_HELPER
help
This is the virtual GPU driver for virtio. It can be used with
QEMU based VMMs (like KVM or Xen).
diff --git a/drivers/gpu/drm/virtio/Makefile b/drivers/gpu/drm/virtio/Makefile
index 458e606a936f..92aa2b3d349d 100644
--- a/drivers/gpu/drm/virtio/Makefile
+++ b/drivers/gpu/drm/virtio/Makefile
@@ -4,7 +4,7 @@
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
virtio-gpu-y := virtgpu_drv.o virtgpu_kms.o virtgpu_gem.o \
- virtgpu_display.o virtgpu_vq.o virtgpu_ttm.o \
+ virtgpu_display.o virtgpu_vq.o \
virtgpu_fence.o virtgpu_object.o virtgpu_debugfs.o virtgpu_plane.o \
virtgpu_ioctl.o virtgpu_prime.o virtgpu_trace_points.o
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c
index 0fc32fa0b3c0..8dee698c90ff 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.c
@@ -56,7 +56,6 @@ static int virtio_gpu_pci_quirk(struct drm_device *dev, struct virtio_device *vd
dev->pdev = pdev;
if (vga)
drm_fb_helper_remove_conflicting_pci_framebuffers(pdev,
- 0,
"virtiodrmfb");
/*
@@ -185,17 +184,7 @@ MODULE_AUTHOR("Dave Airlie <airlied@redhat.com>");
MODULE_AUTHOR("Gerd Hoffmann <kraxel@redhat.com>");
MODULE_AUTHOR("Alon Levy");
-static const struct file_operations virtio_gpu_driver_fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .mmap = virtio_gpu_mmap,
- .poll = drm_poll,
- .read = drm_read,
- .unlocked_ioctl = drm_ioctl,
- .release = drm_release,
- .compat_ioctl = drm_compat_ioctl,
- .llseek = noop_llseek,
-};
+DEFINE_DRM_GEM_FOPS(virtio_gpu_driver_fops);
static struct drm_driver driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_RENDER | DRIVER_ATOMIC,
@@ -210,15 +199,10 @@ static struct drm_driver driver = {
#endif
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_get_sg_table = virtgpu_gem_prime_get_sg_table,
+ .gem_prime_mmap = drm_gem_prime_mmap,
.gem_prime_import_sg_table = virtgpu_gem_prime_import_sg_table,
- .gem_prime_vmap = virtgpu_gem_prime_vmap,
- .gem_prime_vunmap = virtgpu_gem_prime_vunmap,
- .gem_prime_mmap = virtgpu_gem_prime_mmap,
- .gem_free_object_unlocked = virtio_gpu_gem_free_object,
- .gem_open_object = virtio_gpu_gem_object_open,
- .gem_close_object = virtio_gpu_gem_object_close,
+ .gem_create_object = virtio_gpu_create_object,
.fops = &virtio_gpu_driver_fops,
.ioctls = virtio_gpu_ioctls,
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
index e28829661724..0b56ba005e25 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -35,12 +35,9 @@
#include <drm/drm_encoder.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem.h>
+#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_probe_helper.h>
-#include <drm/ttm/ttm_bo_api.h>
-#include <drm/ttm/ttm_bo_driver.h>
-#include <drm/ttm/ttm_module.h>
-#include <drm/ttm/ttm_placement.h>
#define DRIVER_NAME "virtio_gpu"
#define DRIVER_DESC "virtio GPU"
@@ -68,21 +65,23 @@ struct virtio_gpu_object_params {
};
struct virtio_gpu_object {
- struct drm_gem_object gem_base;
+ struct drm_gem_shmem_object base;
uint32_t hw_res_handle;
struct sg_table *pages;
uint32_t mapped;
- void *vmap;
bool dumb;
- struct ttm_place placement_code;
- struct ttm_placement placement;
- struct ttm_buffer_object tbo;
- struct ttm_bo_kmap_obj kmap;
bool created;
};
#define gem_to_virtio_gpu_obj(gobj) \
- container_of((gobj), struct virtio_gpu_object, gem_base)
+ container_of((gobj), struct virtio_gpu_object, base.base)
+
+struct virtio_gpu_object_array {
+ struct ww_acquire_ctx ticket;
+ struct list_head next;
+ u32 nents, total;
+ struct drm_gem_object *objs[];
+};
struct virtio_gpu_vbuffer;
struct virtio_gpu_device;
@@ -115,9 +114,9 @@ struct virtio_gpu_vbuffer {
char *resp_buf;
int resp_size;
-
virtio_gpu_resp_cb resp_cb;
+ struct virtio_gpu_object_array *objs;
struct list_head list;
};
@@ -147,10 +146,6 @@ struct virtio_gpu_framebuffer {
#define to_virtio_gpu_framebuffer(x) \
container_of(x, struct virtio_gpu_framebuffer, base)
-struct virtio_gpu_mman {
- struct ttm_bo_device bdev;
-};
-
struct virtio_gpu_queue {
struct virtqueue *vq;
spinlock_t qlock;
@@ -179,8 +174,6 @@ struct virtio_gpu_device {
struct virtio_device *vdev;
- struct virtio_gpu_mman mman;
-
struct virtio_gpu_output outputs[VIRTIO_GPU_MAX_SCANOUTS];
uint32_t num_scanouts;
@@ -205,6 +198,10 @@ struct virtio_gpu_device {
struct work_struct config_changed_work;
+ struct work_struct obj_free_work;
+ spinlock_t obj_free_lock;
+ struct list_head obj_free_list;
+
struct virtio_gpu_drv_capset *capsets;
uint32_t num_capsets;
struct list_head cap_cache;
@@ -217,9 +214,6 @@ struct virtio_gpu_fpriv {
/* virtio_ioctl.c */
#define DRM_VIRTIO_NUM_IOCTLS 10
extern struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS];
-int virtio_gpu_object_list_validate(struct ww_acquire_ctx *ticket,
- struct list_head *head);
-void virtio_gpu_unref_list(struct list_head *head);
/* virtio_kms.c */
int virtio_gpu_init(struct drm_device *dev);
@@ -240,10 +234,6 @@ int virtio_gpu_gem_object_open(struct drm_gem_object *obj,
struct drm_file *file);
void virtio_gpu_gem_object_close(struct drm_gem_object *obj,
struct drm_file *file);
-struct virtio_gpu_object*
-virtio_gpu_alloc_object(struct drm_device *dev,
- struct virtio_gpu_object_params *params,
- struct virtio_gpu_fence *fence);
int virtio_gpu_mode_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
@@ -251,20 +241,35 @@ int virtio_gpu_mode_dumb_mmap(struct drm_file *file_priv,
struct drm_device *dev,
uint32_t handle, uint64_t *offset_p);
+struct virtio_gpu_object_array *virtio_gpu_array_alloc(u32 nents);
+struct virtio_gpu_object_array*
+virtio_gpu_array_from_handles(struct drm_file *drm_file, u32 *handles, u32 nents);
+void virtio_gpu_array_add_obj(struct virtio_gpu_object_array *objs,
+ struct drm_gem_object *obj);
+int virtio_gpu_array_lock_resv(struct virtio_gpu_object_array *objs);
+void virtio_gpu_array_unlock_resv(struct virtio_gpu_object_array *objs);
+void virtio_gpu_array_add_fence(struct virtio_gpu_object_array *objs,
+ struct dma_fence *fence);
+void virtio_gpu_array_put_free(struct virtio_gpu_object_array *objs);
+void virtio_gpu_array_put_free_delayed(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_object_array *objs);
+void virtio_gpu_array_put_free_work(struct work_struct *work);
+
/* virtio vg */
int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev);
void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev);
void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *bo,
struct virtio_gpu_object_params *params,
+ struct virtio_gpu_object_array *objs,
struct virtio_gpu_fence *fence);
void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
uint32_t resource_id);
void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
- struct virtio_gpu_object *bo,
uint64_t offset,
- __le32 width, __le32 height,
- __le32 x, __le32 y,
+ uint32_t width, uint32_t height,
+ uint32_t x, uint32_t y,
+ struct virtio_gpu_object_array *objs,
struct virtio_gpu_fence *fence);
void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
uint32_t resource_id,
@@ -295,28 +300,32 @@ void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device *vgdev,
uint32_t id);
void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev,
uint32_t ctx_id,
- uint32_t resource_id);
+ struct virtio_gpu_object_array *objs);
void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev,
uint32_t ctx_id,
- uint32_t resource_id);
+ struct virtio_gpu_object_array *objs);
void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
void *data, uint32_t data_size,
- uint32_t ctx_id, struct virtio_gpu_fence *fence);
+ uint32_t ctx_id,
+ struct virtio_gpu_object_array *objs,
+ struct virtio_gpu_fence *fence);
void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
- uint32_t resource_id, uint32_t ctx_id,
+ uint32_t ctx_id,
uint64_t offset, uint32_t level,
struct virtio_gpu_box *box,
+ struct virtio_gpu_object_array *objs,
struct virtio_gpu_fence *fence);
void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
- struct virtio_gpu_object *bo,
uint32_t ctx_id,
uint64_t offset, uint32_t level,
struct virtio_gpu_box *box,
+ struct virtio_gpu_object_array *objs,
struct virtio_gpu_fence *fence);
void
virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *bo,
struct virtio_gpu_object_params *params,
+ struct virtio_gpu_object_array *objs,
struct virtio_gpu_fence *fence);
void virtio_gpu_ctrl_ack(struct virtqueue *vq);
void virtio_gpu_cursor_ack(struct virtqueue *vq);
@@ -339,11 +348,6 @@ struct drm_plane *virtio_gpu_plane_init(struct virtio_gpu_device *vgdev,
enum drm_plane_type type,
int index);
-/* virtio_gpu_ttm.c */
-int virtio_gpu_ttm_init(struct virtio_gpu_device *vgdev);
-void virtio_gpu_ttm_fini(struct virtio_gpu_device *vgdev);
-int virtio_gpu_mmap(struct file *filp, struct vm_area_struct *vma);
-
/* virtio_gpu_fence.c */
bool virtio_fence_signaled(struct dma_fence *f);
struct virtio_gpu_fence *virtio_gpu_fence_alloc(
@@ -355,70 +359,21 @@ void virtio_gpu_fence_event_process(struct virtio_gpu_device *vdev,
u64 last_seq);
/* virtio_gpu_object */
+struct drm_gem_object *virtio_gpu_create_object(struct drm_device *dev,
+ size_t size);
int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object_params *params,
struct virtio_gpu_object **bo_ptr,
struct virtio_gpu_fence *fence);
-void virtio_gpu_object_kunmap(struct virtio_gpu_object *bo);
-int virtio_gpu_object_kmap(struct virtio_gpu_object *bo);
-int virtio_gpu_object_get_sg_table(struct virtio_gpu_device *qdev,
- struct virtio_gpu_object *bo);
-void virtio_gpu_object_free_sg_table(struct virtio_gpu_object *bo);
-int virtio_gpu_object_wait(struct virtio_gpu_object *bo, bool no_wait);
/* virtgpu_prime.c */
-struct sg_table *virtgpu_gem_prime_get_sg_table(struct drm_gem_object *obj);
struct drm_gem_object *virtgpu_gem_prime_import_sg_table(
struct drm_device *dev, struct dma_buf_attachment *attach,
struct sg_table *sgt);
-void *virtgpu_gem_prime_vmap(struct drm_gem_object *obj);
-void virtgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
-int virtgpu_gem_prime_mmap(struct drm_gem_object *obj,
- struct vm_area_struct *vma);
-
-static inline struct virtio_gpu_object*
-virtio_gpu_object_ref(struct virtio_gpu_object *bo)
-{
- ttm_bo_get(&bo->tbo);
- return bo;
-}
-
-static inline void virtio_gpu_object_unref(struct virtio_gpu_object **bo)
-{
- struct ttm_buffer_object *tbo;
-
- if ((*bo) == NULL)
- return;
- tbo = &((*bo)->tbo);
- ttm_bo_put(tbo);
- *bo = NULL;
-}
static inline u64 virtio_gpu_object_mmap_offset(struct virtio_gpu_object *bo)
{
- return drm_vma_node_offset_addr(&bo->tbo.base.vma_node);
-}
-
-static inline int virtio_gpu_object_reserve(struct virtio_gpu_object *bo,
- bool no_wait)
-{
- int r;
-
- r = ttm_bo_reserve(&bo->tbo, true, no_wait, NULL);
- if (unlikely(r != 0)) {
- if (r != -ERESTARTSYS) {
- struct virtio_gpu_device *qdev =
- bo->gem_base.dev->dev_private;
- dev_err(qdev->dev, "%p reserve failed\n", bo);
- }
- return r;
- }
- return 0;
-}
-
-static inline void virtio_gpu_object_unreserve(struct virtio_gpu_object *bo)
-{
- ttm_bo_unreserve(&bo->tbo);
+ return drm_vma_node_offset_addr(&bo->base.base.vma_node);
}
/* virgl debufs */
diff --git a/drivers/gpu/drm/virtio/virtgpu_fence.c b/drivers/gpu/drm/virtio/virtgpu_fence.c
index a0514f5bd006..a4b9881ca1d3 100644
--- a/drivers/gpu/drm/virtio/virtgpu_fence.c
+++ b/drivers/gpu/drm/virtio/virtgpu_fence.c
@@ -41,6 +41,10 @@ bool virtio_fence_signaled(struct dma_fence *f)
{
struct virtio_gpu_fence *fence = to_virtio_fence(f);
+ if (WARN_ON_ONCE(fence->f.seqno == 0))
+ /* leaked fence outside driver before completing
+ * initialization with virtio_gpu_fence_emit */
+ return false;
if (atomic64_read(&fence->drv->last_seq) >= fence->f.seqno)
return true;
return false;
diff --git a/drivers/gpu/drm/virtio/virtgpu_gem.c b/drivers/gpu/drm/virtio/virtgpu_gem.c
index 292566146814..4c1f579edfb3 100644
--- a/drivers/gpu/drm/virtio/virtgpu_gem.c
+++ b/drivers/gpu/drm/virtio/virtgpu_gem.c
@@ -28,54 +28,31 @@
#include "virtgpu_drv.h"
-void virtio_gpu_gem_free_object(struct drm_gem_object *gem_obj)
-{
- struct virtio_gpu_object *obj = gem_to_virtio_gpu_obj(gem_obj);
-
- if (obj)
- virtio_gpu_object_unref(&obj);
-}
-
-struct virtio_gpu_object*
-virtio_gpu_alloc_object(struct drm_device *dev,
- struct virtio_gpu_object_params *params,
- struct virtio_gpu_fence *fence)
-{
- struct virtio_gpu_device *vgdev = dev->dev_private;
- struct virtio_gpu_object *obj;
- int ret;
-
- ret = virtio_gpu_object_create(vgdev, params, &obj, fence);
- if (ret)
- return ERR_PTR(ret);
-
- return obj;
-}
-
int virtio_gpu_gem_create(struct drm_file *file,
struct drm_device *dev,
struct virtio_gpu_object_params *params,
struct drm_gem_object **obj_p,
uint32_t *handle_p)
{
+ struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_object *obj;
int ret;
u32 handle;
- obj = virtio_gpu_alloc_object(dev, params, NULL);
- if (IS_ERR(obj))
- return PTR_ERR(obj);
+ ret = virtio_gpu_object_create(vgdev, params, &obj, NULL);
+ if (ret < 0)
+ return ret;
- ret = drm_gem_handle_create(file, &obj->gem_base, &handle);
+ ret = drm_gem_handle_create(file, &obj->base.base, &handle);
if (ret) {
- drm_gem_object_release(&obj->gem_base);
+ drm_gem_object_release(&obj->base.base);
return ret;
}
- *obj_p = &obj->gem_base;
+ *obj_p = &obj->base.base;
/* drop reference from allocate - handle holds it now */
- drm_gem_object_put_unlocked(&obj->gem_base);
+ drm_gem_object_put_unlocked(&obj->base.base);
*handle_p = handle;
return 0;
@@ -136,19 +113,18 @@ int virtio_gpu_gem_object_open(struct drm_gem_object *obj,
{
struct virtio_gpu_device *vgdev = obj->dev->dev_private;
struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
- struct virtio_gpu_object *qobj = gem_to_virtio_gpu_obj(obj);
- int r;
+ struct virtio_gpu_object_array *objs;
if (!vgdev->has_virgl_3d)
return 0;
- r = virtio_gpu_object_reserve(qobj, false);
- if (r)
- return r;
+ objs = virtio_gpu_array_alloc(1);
+ if (!objs)
+ return -ENOMEM;
+ virtio_gpu_array_add_obj(objs, obj);
virtio_gpu_cmd_context_attach_resource(vgdev, vfpriv->ctx_id,
- qobj->hw_res_handle);
- virtio_gpu_object_unreserve(qobj);
+ objs);
return 0;
}
@@ -157,17 +133,136 @@ void virtio_gpu_gem_object_close(struct drm_gem_object *obj,
{
struct virtio_gpu_device *vgdev = obj->dev->dev_private;
struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
- struct virtio_gpu_object *qobj = gem_to_virtio_gpu_obj(obj);
- int r;
+ struct virtio_gpu_object_array *objs;
if (!vgdev->has_virgl_3d)
return;
- r = virtio_gpu_object_reserve(qobj, false);
- if (r)
+ objs = virtio_gpu_array_alloc(1);
+ if (!objs)
return;
+ virtio_gpu_array_add_obj(objs, obj);
virtio_gpu_cmd_context_detach_resource(vgdev, vfpriv->ctx_id,
- qobj->hw_res_handle);
- virtio_gpu_object_unreserve(qobj);
+ objs);
+}
+
+struct virtio_gpu_object_array *virtio_gpu_array_alloc(u32 nents)
+{
+ struct virtio_gpu_object_array *objs;
+ size_t size = sizeof(*objs) + sizeof(objs->objs[0]) * nents;
+
+ objs = kmalloc(size, GFP_KERNEL);
+ if (!objs)
+ return NULL;
+
+ objs->nents = 0;
+ objs->total = nents;
+ return objs;
+}
+
+static void virtio_gpu_array_free(struct virtio_gpu_object_array *objs)
+{
+ kfree(objs);
+}
+
+struct virtio_gpu_object_array*
+virtio_gpu_array_from_handles(struct drm_file *drm_file, u32 *handles, u32 nents)
+{
+ struct virtio_gpu_object_array *objs;
+ u32 i;
+
+ objs = virtio_gpu_array_alloc(nents);
+ if (!objs)
+ return NULL;
+
+ for (i = 0; i < nents; i++) {
+ objs->objs[i] = drm_gem_object_lookup(drm_file, handles[i]);
+ if (!objs->objs[i]) {
+ objs->nents = i;
+ virtio_gpu_array_put_free(objs);
+ return NULL;
+ }
+ }
+ objs->nents = i;
+ return objs;
+}
+
+void virtio_gpu_array_add_obj(struct virtio_gpu_object_array *objs,
+ struct drm_gem_object *obj)
+{
+ if (WARN_ON_ONCE(objs->nents == objs->total))
+ return;
+
+ drm_gem_object_get(obj);
+ objs->objs[objs->nents] = obj;
+ objs->nents++;
+}
+
+int virtio_gpu_array_lock_resv(struct virtio_gpu_object_array *objs)
+{
+ int ret;
+
+ if (objs->nents == 1) {
+ ret = dma_resv_lock_interruptible(objs->objs[0]->resv, NULL);
+ } else {
+ ret = drm_gem_lock_reservations(objs->objs, objs->nents,
+ &objs->ticket);
+ }
+ return ret;
+}
+
+void virtio_gpu_array_unlock_resv(struct virtio_gpu_object_array *objs)
+{
+ if (objs->nents == 1) {
+ dma_resv_unlock(objs->objs[0]->resv);
+ } else {
+ drm_gem_unlock_reservations(objs->objs, objs->nents,
+ &objs->ticket);
+ }
+}
+
+void virtio_gpu_array_add_fence(struct virtio_gpu_object_array *objs,
+ struct dma_fence *fence)
+{
+ int i;
+
+ for (i = 0; i < objs->nents; i++)
+ dma_resv_add_excl_fence(objs->objs[i]->resv, fence);
+}
+
+void virtio_gpu_array_put_free(struct virtio_gpu_object_array *objs)
+{
+ u32 i;
+
+ for (i = 0; i < objs->nents; i++)
+ drm_gem_object_put_unlocked(objs->objs[i]);
+ virtio_gpu_array_free(objs);
+}
+
+void virtio_gpu_array_put_free_delayed(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_object_array *objs)
+{
+ spin_lock(&vgdev->obj_free_lock);
+ list_add_tail(&objs->next, &vgdev->obj_free_list);
+ spin_unlock(&vgdev->obj_free_lock);
+ schedule_work(&vgdev->obj_free_work);
+}
+
+void virtio_gpu_array_put_free_work(struct work_struct *work)
+{
+ struct virtio_gpu_device *vgdev =
+ container_of(work, struct virtio_gpu_device, obj_free_work);
+ struct virtio_gpu_object_array *objs;
+
+ spin_lock(&vgdev->obj_free_lock);
+ while (!list_empty(&vgdev->obj_free_list)) {
+ objs = list_first_entry(&vgdev->obj_free_list,
+ struct virtio_gpu_object_array, next);
+ list_del(&objs->next);
+ spin_unlock(&vgdev->obj_free_lock);
+ virtio_gpu_array_put_free(objs);
+ spin_lock(&vgdev->obj_free_lock);
+ }
+ spin_unlock(&vgdev->obj_free_lock);
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
index 0a88ef11b9d3..9af1ec62434f 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
@@ -29,7 +29,6 @@
#include <linux/sync_file.h>
#include <drm/drm_file.h>
-#include <drm/ttm/ttm_execbuf_util.h>
#include <drm/virtgpu_drm.h>
#include "virtgpu_drv.h"
@@ -56,45 +55,6 @@ static int virtio_gpu_map_ioctl(struct drm_device *dev, void *data,
&virtio_gpu_map->offset);
}
-int virtio_gpu_object_list_validate(struct ww_acquire_ctx *ticket,
- struct list_head *head)
-{
- struct ttm_operation_ctx ctx = { false, false };
- struct ttm_validate_buffer *buf;
- struct ttm_buffer_object *bo;
- struct virtio_gpu_object *qobj;
- int ret;
-
- ret = ttm_eu_reserve_buffers(ticket, head, true, NULL, true);
- if (ret != 0)
- return ret;
-
- list_for_each_entry(buf, head, head) {
- bo = buf->bo;
- qobj = container_of(bo, struct virtio_gpu_object, tbo);
- ret = ttm_bo_validate(bo, &qobj->placement, &ctx);
- if (ret) {
- ttm_eu_backoff_reservation(ticket, head);
- return ret;
- }
- }
- return 0;
-}
-
-void virtio_gpu_unref_list(struct list_head *head)
-{
- struct ttm_validate_buffer *buf;
- struct ttm_buffer_object *bo;
- struct virtio_gpu_object *qobj;
-
- list_for_each_entry(buf, head, head) {
- bo = buf->bo;
- qobj = container_of(bo, struct virtio_gpu_object, tbo);
-
- drm_gem_object_put_unlocked(&qobj->gem_base);
- }
-}
-
/*
* Usage of execbuffer:
* Relocations need to take into account the full VIRTIO_GPUDrawable size.
@@ -107,16 +67,11 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
struct drm_virtgpu_execbuffer *exbuf = data;
struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_fpriv *vfpriv = drm_file->driver_priv;
- struct drm_gem_object *gobj;
struct virtio_gpu_fence *out_fence;
- struct virtio_gpu_object *qobj;
int ret;
uint32_t *bo_handles = NULL;
void __user *user_bo_handles = NULL;
- struct list_head validate_list;
- struct ttm_validate_buffer *buflist = NULL;
- int i;
- struct ww_acquire_ctx ticket;
+ struct virtio_gpu_object_array *buflist = NULL;
struct sync_file *sync_file;
int in_fence_fd = exbuf->fence_fd;
int out_fence_fd = -1;
@@ -157,15 +112,10 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
return out_fence_fd;
}
- INIT_LIST_HEAD(&validate_list);
if (exbuf->num_bo_handles) {
-
bo_handles = kvmalloc_array(exbuf->num_bo_handles,
- sizeof(uint32_t), GFP_KERNEL);
- buflist = kvmalloc_array(exbuf->num_bo_handles,
- sizeof(struct ttm_validate_buffer),
- GFP_KERNEL | __GFP_ZERO);
- if (!bo_handles || !buflist) {
+ sizeof(uint32_t), GFP_KERNEL);
+ if (!bo_handles) {
ret = -ENOMEM;
goto out_unused_fd;
}
@@ -177,27 +127,23 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
goto out_unused_fd;
}
- for (i = 0; i < exbuf->num_bo_handles; i++) {
- gobj = drm_gem_object_lookup(drm_file, bo_handles[i]);
- if (!gobj) {
- ret = -ENOENT;
- goto out_unused_fd;
- }
-
- qobj = gem_to_virtio_gpu_obj(gobj);
- buflist[i].bo = &qobj->tbo;
-
- list_add(&buflist[i].head, &validate_list);
+ buflist = virtio_gpu_array_from_handles(drm_file, bo_handles,
+ exbuf->num_bo_handles);
+ if (!buflist) {
+ ret = -ENOENT;
+ goto out_unused_fd;
}
kvfree(bo_handles);
bo_handles = NULL;
}
- ret = virtio_gpu_object_list_validate(&ticket, &validate_list);
- if (ret)
- goto out_free;
+ if (buflist) {
+ ret = virtio_gpu_array_lock_resv(buflist);
+ if (ret)
+ goto out_unused_fd;
+ }
- buf = memdup_user(u64_to_user_ptr(exbuf->command), exbuf->size);
+ buf = vmemdup_user(u64_to_user_ptr(exbuf->command), exbuf->size);
if (IS_ERR(buf)) {
ret = PTR_ERR(buf);
goto out_unresv;
@@ -222,24 +168,18 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
}
virtio_gpu_cmd_submit(vgdev, buf, exbuf->size,
- vfpriv->ctx_id, out_fence);
-
- ttm_eu_fence_buffer_objects(&ticket, &validate_list, &out_fence->f);
-
- /* fence the command bo */
- virtio_gpu_unref_list(&validate_list);
- kvfree(buflist);
+ vfpriv->ctx_id, buflist, out_fence);
return 0;
out_memdup:
- kfree(buf);
+ kvfree(buf);
out_unresv:
- ttm_eu_backoff_reservation(&ticket, &validate_list);
-out_free:
- virtio_gpu_unref_list(&validate_list);
+ if (buflist)
+ virtio_gpu_array_unlock_resv(buflist);
out_unused_fd:
kvfree(bo_handles);
- kvfree(buflist);
+ if (buflist)
+ virtio_gpu_array_put_free(buflist);
if (out_fence_fd >= 0)
put_unused_fd(out_fence_fd);
@@ -316,11 +256,11 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
fence = virtio_gpu_fence_alloc(vgdev);
if (!fence)
return -ENOMEM;
- qobj = virtio_gpu_alloc_object(dev, &params, fence);
+ ret = virtio_gpu_object_create(vgdev, &params, &qobj, fence);
dma_fence_put(&fence->f);
- if (IS_ERR(qobj))
- return PTR_ERR(qobj);
- obj = &qobj->gem_base;
+ if (ret < 0)
+ return ret;
+ obj = &qobj->base.base;
ret = drm_gem_handle_create(file_priv, obj, &handle);
if (ret) {
@@ -347,7 +287,7 @@ static int virtio_gpu_resource_info_ioctl(struct drm_device *dev, void *data,
qobj = gem_to_virtio_gpu_obj(gobj);
- ri->size = qobj->gem_base.size;
+ ri->size = qobj->base.base.size;
ri->res_handle = qobj->hw_res_handle;
drm_gem_object_put_unlocked(gobj);
return 0;
@@ -360,9 +300,7 @@ static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
struct drm_virtgpu_3d_transfer_from_host *args = data;
- struct ttm_operation_ctx ctx = { true, false };
- struct drm_gem_object *gobj = NULL;
- struct virtio_gpu_object *qobj = NULL;
+ struct virtio_gpu_object_array *objs;
struct virtio_gpu_fence *fence;
int ret;
u32 offset = args->offset;
@@ -371,39 +309,31 @@ static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
if (vgdev->has_virgl_3d == false)
return -ENOSYS;
- gobj = drm_gem_object_lookup(file, args->bo_handle);
- if (gobj == NULL)
+ objs = virtio_gpu_array_from_handles(file, &args->bo_handle, 1);
+ if (objs == NULL)
return -ENOENT;
- qobj = gem_to_virtio_gpu_obj(gobj);
-
- ret = virtio_gpu_object_reserve(qobj, false);
- if (ret)
- goto out;
-
- ret = ttm_bo_validate(&qobj->tbo, &qobj->placement, &ctx);
- if (unlikely(ret))
- goto out_unres;
+ ret = virtio_gpu_array_lock_resv(objs);
+ if (ret != 0)
+ goto err_put_free;
convert_to_hw_box(&box, &args->box);
fence = virtio_gpu_fence_alloc(vgdev);
if (!fence) {
ret = -ENOMEM;
- goto out_unres;
+ goto err_unlock;
}
virtio_gpu_cmd_transfer_from_host_3d
- (vgdev, qobj->hw_res_handle,
- vfpriv->ctx_id, offset, args->level,
- &box, fence);
- dma_resv_add_excl_fence(qobj->tbo.base.resv,
- &fence->f);
-
+ (vgdev, vfpriv->ctx_id, offset, args->level,
+ &box, objs, fence);
dma_fence_put(&fence->f);
-out_unres:
- virtio_gpu_object_unreserve(qobj);
-out:
- drm_gem_object_put_unlocked(gobj);
+ return 0;
+
+err_unlock:
+ virtio_gpu_array_unlock_resv(objs);
+err_put_free:
+ virtio_gpu_array_put_free(objs);
return ret;
}
@@ -413,75 +343,71 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
struct drm_virtgpu_3d_transfer_to_host *args = data;
- struct ttm_operation_ctx ctx = { true, false };
- struct drm_gem_object *gobj = NULL;
- struct virtio_gpu_object *qobj = NULL;
+ struct virtio_gpu_object_array *objs;
struct virtio_gpu_fence *fence;
struct virtio_gpu_box box;
int ret;
u32 offset = args->offset;
- gobj = drm_gem_object_lookup(file, args->bo_handle);
- if (gobj == NULL)
+ objs = virtio_gpu_array_from_handles(file, &args->bo_handle, 1);
+ if (objs == NULL)
return -ENOENT;
- qobj = gem_to_virtio_gpu_obj(gobj);
-
- ret = virtio_gpu_object_reserve(qobj, false);
- if (ret)
- goto out;
-
- ret = ttm_bo_validate(&qobj->tbo, &qobj->placement, &ctx);
- if (unlikely(ret))
- goto out_unres;
-
convert_to_hw_box(&box, &args->box);
if (!vgdev->has_virgl_3d) {
virtio_gpu_cmd_transfer_to_host_2d
- (vgdev, qobj, offset,
- box.w, box.h, box.x, box.y, NULL);
+ (vgdev, offset,
+ box.w, box.h, box.x, box.y,
+ objs, NULL);
} else {
+ ret = virtio_gpu_array_lock_resv(objs);
+ if (ret != 0)
+ goto err_put_free;
+
+ ret = -ENOMEM;
fence = virtio_gpu_fence_alloc(vgdev);
- if (!fence) {
- ret = -ENOMEM;
- goto out_unres;
- }
+ if (!fence)
+ goto err_unlock;
+
virtio_gpu_cmd_transfer_to_host_3d
- (vgdev, qobj,
+ (vgdev,
vfpriv ? vfpriv->ctx_id : 0, offset,
- args->level, &box, fence);
- dma_resv_add_excl_fence(qobj->tbo.base.resv,
- &fence->f);
+ args->level, &box, objs, fence);
dma_fence_put(&fence->f);
}
+ return 0;
-out_unres:
- virtio_gpu_object_unreserve(qobj);
-out:
- drm_gem_object_put_unlocked(gobj);
+err_unlock:
+ virtio_gpu_array_unlock_resv(objs);
+err_put_free:
+ virtio_gpu_array_put_free(objs);
return ret;
}
static int virtio_gpu_wait_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file)
+ struct drm_file *file)
{
struct drm_virtgpu_3d_wait *args = data;
- struct drm_gem_object *gobj = NULL;
- struct virtio_gpu_object *qobj = NULL;
+ struct drm_gem_object *obj;
+ long timeout = 15 * HZ;
int ret;
- bool nowait = false;
- gobj = drm_gem_object_lookup(file, args->handle);
- if (gobj == NULL)
+ obj = drm_gem_object_lookup(file, args->handle);
+ if (obj == NULL)
return -ENOENT;
- qobj = gem_to_virtio_gpu_obj(gobj);
-
- if (args->flags & VIRTGPU_WAIT_NOWAIT)
- nowait = true;
- ret = virtio_gpu_object_wait(qobj, nowait);
+ if (args->flags & VIRTGPU_WAIT_NOWAIT) {
+ ret = dma_resv_test_signaled_rcu(obj->resv, true);
+ } else {
+ ret = dma_resv_wait_timeout_rcu(obj->resv, true, true,
+ timeout);
+ }
+ if (ret == 0)
+ ret = -EBUSY;
+ else if (ret > 0)
+ ret = 0;
- drm_gem_object_put_unlocked(gobj);
+ drm_gem_object_put_unlocked(obj);
return ret;
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c
index c190702fab72..2f5773e43557 100644
--- a/drivers/gpu/drm/virtio/virtgpu_kms.c
+++ b/drivers/gpu/drm/virtio/virtgpu_kms.c
@@ -147,19 +147,23 @@ int virtio_gpu_init(struct drm_device *dev)
INIT_WORK(&vgdev->config_changed_work,
virtio_gpu_config_changed_work_func);
+ INIT_WORK(&vgdev->obj_free_work,
+ virtio_gpu_array_put_free_work);
+ INIT_LIST_HEAD(&vgdev->obj_free_list);
+ spin_lock_init(&vgdev->obj_free_lock);
+
#ifdef __LITTLE_ENDIAN
if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_VIRGL))
vgdev->has_virgl_3d = true;
- DRM_INFO("virgl 3d acceleration %s\n",
- vgdev->has_virgl_3d ? "enabled" : "not supported by host");
-#else
- DRM_INFO("virgl 3d acceleration not supported by guest\n");
#endif
if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_EDID)) {
vgdev->has_edid = true;
- DRM_INFO("EDID support available.\n");
}
+ DRM_INFO("features: %cvirgl %cedid\n",
+ vgdev->has_virgl_3d ? '+' : '-',
+ vgdev->has_edid ? '+' : '-');
+
ret = virtio_find_vqs(vgdev->vdev, 2, vqs, callbacks, names, NULL);
if (ret) {
DRM_ERROR("failed to find virt queues\n");
@@ -173,12 +177,6 @@ int virtio_gpu_init(struct drm_device *dev)
goto err_vbufs;
}
- ret = virtio_gpu_ttm_init(vgdev);
- if (ret) {
- DRM_ERROR("failed to init ttm %d\n", ret);
- goto err_ttm;
- }
-
/* get display info */
virtio_cread(vgdev->vdev, struct virtio_gpu_config,
num_scanouts, &num_scanouts);
@@ -210,8 +208,6 @@ int virtio_gpu_init(struct drm_device *dev)
return 0;
err_scanouts:
- virtio_gpu_ttm_fini(vgdev);
-err_ttm:
virtio_gpu_free_vbufs(vgdev);
err_vbufs:
vgdev->vdev->config->del_vqs(vgdev->vdev);
@@ -234,6 +230,7 @@ void virtio_gpu_deinit(struct drm_device *dev)
{
struct virtio_gpu_device *vgdev = dev->dev_private;
+ flush_work(&vgdev->obj_free_work);
vgdev->vqs_ready = false;
flush_work(&vgdev->ctrlq.dequeue_work);
flush_work(&vgdev->cursorq.dequeue_work);
@@ -242,7 +239,6 @@ void virtio_gpu_deinit(struct drm_device *dev)
vgdev->vdev->config->del_vqs(vgdev->vdev);
virtio_gpu_modeset_fini(vgdev);
- virtio_gpu_ttm_fini(vgdev);
virtio_gpu_free_vbufs(vgdev);
virtio_gpu_cleanup_cap_cache(vgdev);
kfree(vgdev->capsets);
diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c
index 09b526518f5a..017a9e0fc3bb 100644
--- a/drivers/gpu/drm/virtio/virtgpu_object.c
+++ b/drivers/gpu/drm/virtio/virtgpu_object.c
@@ -23,73 +23,83 @@
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
-#include <drm/ttm/ttm_execbuf_util.h>
+#include <linux/moduleparam.h>
#include "virtgpu_drv.h"
+static int virtio_gpu_virglrenderer_workaround = 1;
+module_param_named(virglhack, virtio_gpu_virglrenderer_workaround, int, 0400);
+
static int virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev,
uint32_t *resid)
{
-#if 0
- int handle = ida_alloc(&vgdev->resource_ida, GFP_KERNEL);
-
- if (handle < 0)
- return handle;
-#else
- static int handle;
-
- /*
- * FIXME: dirty hack to avoid re-using IDs, virglrenderer
- * can't deal with that. Needs fixing in virglrenderer, also
- * should figure a better way to handle that in the guest.
- */
- handle++;
-#endif
-
- *resid = handle + 1;
+ if (virtio_gpu_virglrenderer_workaround) {
+ /*
+ * Hack to avoid re-using resource IDs.
+ *
+ * virglrenderer versions up to (and including) 0.7.0
+ * can't deal with that. virglrenderer commit
+ * "f91a9dd35715 Fix unlinking resources from hash
+ * table." (Feb 2019) fixes the bug.
+ */
+ static int handle;
+ handle++;
+ *resid = handle + 1;
+ } else {
+ int handle = ida_alloc(&vgdev->resource_ida, GFP_KERNEL);
+ if (handle < 0)
+ return handle;
+ *resid = handle + 1;
+ }
return 0;
}
static void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t id)
{
-#if 0
- ida_free(&vgdev->resource_ida, id - 1);
-#endif
+ if (!virtio_gpu_virglrenderer_workaround) {
+ ida_free(&vgdev->resource_ida, id - 1);
+ }
}
-static void virtio_gpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
+static void virtio_gpu_free_object(struct drm_gem_object *obj)
{
- struct virtio_gpu_object *bo;
- struct virtio_gpu_device *vgdev;
-
- bo = container_of(tbo, struct virtio_gpu_object, tbo);
- vgdev = (struct virtio_gpu_device *)bo->gem_base.dev->dev_private;
+ struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
+ struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private;
+ if (bo->pages)
+ virtio_gpu_object_detach(vgdev, bo);
if (bo->created)
virtio_gpu_cmd_unref_resource(vgdev, bo->hw_res_handle);
- if (bo->pages)
- virtio_gpu_object_free_sg_table(bo);
- if (bo->vmap)
- virtio_gpu_object_kunmap(bo);
- drm_gem_object_release(&bo->gem_base);
virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle);
- kfree(bo);
+
+ drm_gem_shmem_free_object(obj);
}
-static void virtio_gpu_init_ttm_placement(struct virtio_gpu_object *vgbo)
+static const struct drm_gem_object_funcs virtio_gpu_gem_funcs = {
+ .free = virtio_gpu_free_object,
+ .open = virtio_gpu_gem_object_open,
+ .close = virtio_gpu_gem_object_close,
+
+ .print_info = drm_gem_shmem_print_info,
+ .pin = drm_gem_shmem_pin,
+ .unpin = drm_gem_shmem_unpin,
+ .get_sg_table = drm_gem_shmem_get_sg_table,
+ .vmap = drm_gem_shmem_vmap,
+ .vunmap = drm_gem_shmem_vunmap,
+ .mmap = &drm_gem_shmem_mmap,
+};
+
+struct drm_gem_object *virtio_gpu_create_object(struct drm_device *dev,
+ size_t size)
{
- u32 c = 1;
-
- vgbo->placement.placement = &vgbo->placement_code;
- vgbo->placement.busy_placement = &vgbo->placement_code;
- vgbo->placement_code.fpfn = 0;
- vgbo->placement_code.lpfn = 0;
- vgbo->placement_code.flags =
- TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT |
- TTM_PL_FLAG_NO_EVICT;
- vgbo->placement.num_placement = c;
- vgbo->placement.num_busy_placement = c;
+ struct virtio_gpu_object *bo;
+
+ bo = kzalloc(sizeof(*bo), GFP_KERNEL);
+ if (!bo)
+ return NULL;
+ bo->base.base.funcs = &virtio_gpu_gem_funcs;
+ return &bo->base.base;
}
int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
@@ -97,157 +107,59 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object **bo_ptr,
struct virtio_gpu_fence *fence)
{
+ struct virtio_gpu_object_array *objs = NULL;
+ struct drm_gem_shmem_object *shmem_obj;
struct virtio_gpu_object *bo;
- size_t acc_size;
int ret;
*bo_ptr = NULL;
- acc_size = ttm_bo_dma_acc_size(&vgdev->mman.bdev, params->size,
- sizeof(struct virtio_gpu_object));
+ params->size = roundup(params->size, PAGE_SIZE);
+ shmem_obj = drm_gem_shmem_create(vgdev->ddev, params->size);
+ if (IS_ERR(shmem_obj))
+ return PTR_ERR(shmem_obj);
+ bo = gem_to_virtio_gpu_obj(&shmem_obj->base);
- bo = kzalloc(sizeof(struct virtio_gpu_object), GFP_KERNEL);
- if (bo == NULL)
- return -ENOMEM;
ret = virtio_gpu_resource_id_get(vgdev, &bo->hw_res_handle);
- if (ret < 0) {
- kfree(bo);
- return ret;
- }
- params->size = roundup(params->size, PAGE_SIZE);
- ret = drm_gem_object_init(vgdev->ddev, &bo->gem_base, params->size);
- if (ret != 0) {
- virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle);
- kfree(bo);
- return ret;
- }
+ if (ret < 0)
+ goto err_free_gem;
+
bo->dumb = params->dumb;
+ if (fence) {
+ ret = -ENOMEM;
+ objs = virtio_gpu_array_alloc(1);
+ if (!objs)
+ goto err_put_id;
+ virtio_gpu_array_add_obj(objs, &bo->base.base);
+
+ ret = virtio_gpu_array_lock_resv(objs);
+ if (ret != 0)
+ goto err_put_objs;
+ }
+
if (params->virgl) {
- virtio_gpu_cmd_resource_create_3d(vgdev, bo, params, fence);
+ virtio_gpu_cmd_resource_create_3d(vgdev, bo, params,
+ objs, fence);
} else {
- virtio_gpu_cmd_create_resource(vgdev, bo, params, fence);
+ virtio_gpu_cmd_create_resource(vgdev, bo, params,
+ objs, fence);
}
- virtio_gpu_init_ttm_placement(bo);
- ret = ttm_bo_init(&vgdev->mman.bdev, &bo->tbo, params->size,
- ttm_bo_type_device, &bo->placement, 0,
- true, acc_size, NULL, NULL,
- &virtio_gpu_ttm_bo_destroy);
- /* ttm_bo_init failure will call the destroy */
- if (ret != 0)
+ ret = virtio_gpu_object_attach(vgdev, bo, NULL);
+ if (ret != 0) {
+ virtio_gpu_free_object(&shmem_obj->base);
return ret;
-
- if (fence) {
- struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv;
- struct list_head validate_list;
- struct ttm_validate_buffer mainbuf;
- struct ww_acquire_ctx ticket;
- unsigned long irq_flags;
- bool signaled;
-
- INIT_LIST_HEAD(&validate_list);
- memset(&mainbuf, 0, sizeof(struct ttm_validate_buffer));
-
- /* use a gem reference since unref list undoes them */
- drm_gem_object_get(&bo->gem_base);
- mainbuf.bo = &bo->tbo;
- list_add(&mainbuf.head, &validate_list);
-
- ret = virtio_gpu_object_list_validate(&ticket, &validate_list);
- if (ret == 0) {
- spin_lock_irqsave(&drv->lock, irq_flags);
- signaled = virtio_fence_signaled(&fence->f);
- if (!signaled)
- /* virtio create command still in flight */
- ttm_eu_fence_buffer_objects(&ticket, &validate_list,
- &fence->f);
- spin_unlock_irqrestore(&drv->lock, irq_flags);
- if (signaled)
- /* virtio create command finished */
- ttm_eu_backoff_reservation(&ticket, &validate_list);
- }
- virtio_gpu_unref_list(&validate_list);
}
*bo_ptr = bo;
return 0;
-}
-
-void virtio_gpu_object_kunmap(struct virtio_gpu_object *bo)
-{
- bo->vmap = NULL;
- ttm_bo_kunmap(&bo->kmap);
-}
-
-int virtio_gpu_object_kmap(struct virtio_gpu_object *bo)
-{
- bool is_iomem;
- int r;
-
- WARN_ON(bo->vmap);
-
- r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
- if (r)
- return r;
- bo->vmap = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
- return 0;
-}
-int virtio_gpu_object_get_sg_table(struct virtio_gpu_device *qdev,
- struct virtio_gpu_object *bo)
-{
- int ret;
- struct page **pages = bo->tbo.ttm->pages;
- int nr_pages = bo->tbo.num_pages;
- struct ttm_operation_ctx ctx = {
- .interruptible = false,
- .no_wait_gpu = false
- };
- size_t max_segment;
-
- /* wtf swapping */
- if (bo->pages)
- return 0;
-
- if (bo->tbo.ttm->state == tt_unpopulated)
- bo->tbo.ttm->bdev->driver->ttm_tt_populate(bo->tbo.ttm, &ctx);
- bo->pages = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
- if (!bo->pages)
- goto out;
-
- max_segment = virtio_max_dma_size(qdev->vdev);
- max_segment &= PAGE_MASK;
- if (max_segment > SCATTERLIST_MAX_SEGMENT)
- max_segment = SCATTERLIST_MAX_SEGMENT;
- ret = __sg_alloc_table_from_pages(bo->pages, pages, nr_pages, 0,
- nr_pages << PAGE_SHIFT,
- max_segment, GFP_KERNEL);
- if (ret)
- goto out;
- return 0;
-out:
- kfree(bo->pages);
- bo->pages = NULL;
- return -ENOMEM;
-}
-
-void virtio_gpu_object_free_sg_table(struct virtio_gpu_object *bo)
-{
- sg_free_table(bo->pages);
- kfree(bo->pages);
- bo->pages = NULL;
-}
-
-int virtio_gpu_object_wait(struct virtio_gpu_object *bo, bool no_wait)
-{
- int r;
-
- r = ttm_bo_reserve(&bo->tbo, true, no_wait, NULL);
- if (unlikely(r != 0))
- return r;
- r = ttm_bo_wait(&bo->tbo, true, no_wait);
- ttm_bo_unreserve(&bo->tbo);
- return r;
+err_put_objs:
+ virtio_gpu_array_put_free(objs);
+err_put_id:
+ virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle);
+err_free_gem:
+ drm_gem_shmem_free_object(&shmem_obj->base);
+ return ret;
}
-
diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c
index a492ac3f4a7e..390524143139 100644
--- a/drivers/gpu/drm/virtio/virtgpu_plane.c
+++ b/drivers/gpu/drm/virtio/virtgpu_plane.c
@@ -84,7 +84,22 @@ static const struct drm_plane_funcs virtio_gpu_plane_funcs = {
static int virtio_gpu_plane_atomic_check(struct drm_plane *plane,
struct drm_plane_state *state)
{
- return 0;
+ bool is_cursor = plane->type == DRM_PLANE_TYPE_CURSOR;
+ struct drm_crtc_state *crtc_state;
+ int ret;
+
+ if (!state->fb || !state->crtc)
+ return 0;
+
+ crtc_state = drm_atomic_get_crtc_state(state->state, state->crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+
+ ret = drm_atomic_helper_check_plane_state(state, crtc_state,
+ DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_HELPER_NO_SCALING,
+ is_cursor, true);
+ return ret;
}
static void virtio_gpu_primary_plane_update(struct drm_plane *plane,
@@ -109,12 +124,19 @@ static void virtio_gpu_primary_plane_update(struct drm_plane *plane,
bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
handle = bo->hw_res_handle;
if (bo->dumb) {
+ struct virtio_gpu_object_array *objs;
+
+ objs = virtio_gpu_array_alloc(1);
+ if (!objs)
+ return;
+ virtio_gpu_array_add_obj(objs, vgfb->base.obj[0]);
virtio_gpu_cmd_transfer_to_host_2d
- (vgdev, bo, 0,
- cpu_to_le32(plane->state->src_w >> 16),
- cpu_to_le32(plane->state->src_h >> 16),
- cpu_to_le32(plane->state->src_x >> 16),
- cpu_to_le32(plane->state->src_y >> 16), NULL);
+ (vgdev, 0,
+ plane->state->src_w >> 16,
+ plane->state->src_h >> 16,
+ plane->state->src_x >> 16,
+ plane->state->src_y >> 16,
+ objs, NULL);
}
} else {
handle = 0;
@@ -186,7 +208,6 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
struct virtio_gpu_framebuffer *vgfb;
struct virtio_gpu_object *bo = NULL;
uint32_t handle;
- int ret = 0;
if (plane->state->crtc)
output = drm_crtc_to_virtio_gpu_output(plane->state->crtc);
@@ -205,20 +226,20 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
if (bo && bo->dumb && (plane->state->fb != old_state->fb)) {
/* new cursor -- update & wait */
+ struct virtio_gpu_object_array *objs;
+
+ objs = virtio_gpu_array_alloc(1);
+ if (!objs)
+ return;
+ virtio_gpu_array_add_obj(objs, vgfb->base.obj[0]);
virtio_gpu_cmd_transfer_to_host_2d
- (vgdev, bo, 0,
- cpu_to_le32(plane->state->crtc_w),
- cpu_to_le32(plane->state->crtc_h),
- 0, 0, vgfb->fence);
- ret = virtio_gpu_object_reserve(bo, false);
- if (!ret) {
- dma_resv_add_excl_fence(bo->tbo.base.resv,
- &vgfb->fence->f);
- dma_fence_put(&vgfb->fence->f);
- vgfb->fence = NULL;
- virtio_gpu_object_unreserve(bo);
- virtio_gpu_object_wait(bo, false);
- }
+ (vgdev, 0,
+ plane->state->crtc_w,
+ plane->state->crtc_h,
+ 0, 0, objs, vgfb->fence);
+ dma_fence_wait(&vgfb->fence->f, true);
+ dma_fence_put(&vgfb->fence->f);
+ vgfb->fence = NULL;
}
if (plane->state->fb != old_state->fb) {
diff --git a/drivers/gpu/drm/virtio/virtgpu_prime.c b/drivers/gpu/drm/virtio/virtgpu_prime.c
index dc642a884b88..050d24c39a8f 100644
--- a/drivers/gpu/drm/virtio/virtgpu_prime.c
+++ b/drivers/gpu/drm/virtio/virtgpu_prime.c
@@ -30,43 +30,9 @@
* device that might share buffers with virtgpu
*/
-struct sg_table *virtgpu_gem_prime_get_sg_table(struct drm_gem_object *obj)
-{
- struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
-
- if (!bo->tbo.ttm->pages || !bo->tbo.ttm->num_pages)
- /* should not happen */
- return ERR_PTR(-EINVAL);
-
- return drm_prime_pages_to_sg(bo->tbo.ttm->pages,
- bo->tbo.ttm->num_pages);
-}
-
struct drm_gem_object *virtgpu_gem_prime_import_sg_table(
struct drm_device *dev, struct dma_buf_attachment *attach,
struct sg_table *table)
{
return ERR_PTR(-ENODEV);
}
-
-void *virtgpu_gem_prime_vmap(struct drm_gem_object *obj)
-{
- struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
- int ret;
-
- ret = virtio_gpu_object_kmap(bo);
- if (ret)
- return NULL;
- return bo->vmap;
-}
-
-void virtgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
-{
- virtio_gpu_object_kunmap(gem_to_virtio_gpu_obj(obj));
-}
-
-int virtgpu_gem_prime_mmap(struct drm_gem_object *obj,
- struct vm_area_struct *vma)
-{
- return drm_gem_prime_mmap(obj, vma);
-}
diff --git a/drivers/gpu/drm/virtio/virtgpu_ttm.c b/drivers/gpu/drm/virtio/virtgpu_ttm.c
deleted file mode 100644
index f87903641847..000000000000
--- a/drivers/gpu/drm/virtio/virtgpu_ttm.c
+++ /dev/null
@@ -1,305 +0,0 @@
-/*
- * Copyright (C) 2015 Red Hat, Inc.
- * All Rights Reserved.
- *
- * Authors:
- * Dave Airlie
- * Alon Levy
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include <linux/delay.h>
-
-#include <drm/drm.h>
-#include <drm/drm_file.h>
-#include <drm/ttm/ttm_bo_api.h>
-#include <drm/ttm/ttm_bo_driver.h>
-#include <drm/ttm/ttm_module.h>
-#include <drm/ttm/ttm_page_alloc.h>
-#include <drm/ttm/ttm_placement.h>
-#include <drm/virtgpu_drm.h>
-
-#include "virtgpu_drv.h"
-
-static struct
-virtio_gpu_device *virtio_gpu_get_vgdev(struct ttm_bo_device *bdev)
-{
- struct virtio_gpu_mman *mman;
- struct virtio_gpu_device *vgdev;
-
- mman = container_of(bdev, struct virtio_gpu_mman, bdev);
- vgdev = container_of(mman, struct virtio_gpu_device, mman);
- return vgdev;
-}
-
-int virtio_gpu_mmap(struct file *filp, struct vm_area_struct *vma)
-{
- struct drm_file *file_priv;
- struct virtio_gpu_device *vgdev;
- int r;
-
- file_priv = filp->private_data;
- vgdev = file_priv->minor->dev->dev_private;
- if (vgdev == NULL) {
- DRM_ERROR(
- "filp->private_data->minor->dev->dev_private == NULL\n");
- return -EINVAL;
- }
- r = ttm_bo_mmap(filp, vma, &vgdev->mman.bdev);
-
- return r;
-}
-
-static int virtio_gpu_invalidate_caches(struct ttm_bo_device *bdev,
- uint32_t flags)
-{
- return 0;
-}
-
-static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
- struct ttm_buffer_object *bo,
- const struct ttm_place *place,
- struct ttm_mem_reg *mem)
-{
- mem->mm_node = (void *)1;
- return 0;
-}
-
-static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man,
- struct ttm_mem_reg *mem)
-{
- mem->mm_node = (void *)NULL;
-}
-
-static int ttm_bo_man_init(struct ttm_mem_type_manager *man,
- unsigned long p_size)
-{
- return 0;
-}
-
-static int ttm_bo_man_takedown(struct ttm_mem_type_manager *man)
-{
- return 0;
-}
-
-static void ttm_bo_man_debug(struct ttm_mem_type_manager *man,
- struct drm_printer *printer)
-{
-}
-
-static const struct ttm_mem_type_manager_func virtio_gpu_bo_manager_func = {
- .init = ttm_bo_man_init,
- .takedown = ttm_bo_man_takedown,
- .get_node = ttm_bo_man_get_node,
- .put_node = ttm_bo_man_put_node,
- .debug = ttm_bo_man_debug
-};
-
-static int virtio_gpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
- struct ttm_mem_type_manager *man)
-{
- switch (type) {
- case TTM_PL_SYSTEM:
- /* System memory */
- man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
- man->available_caching = TTM_PL_MASK_CACHING;
- man->default_caching = TTM_PL_FLAG_CACHED;
- break;
- case TTM_PL_TT:
- man->func = &virtio_gpu_bo_manager_func;
- man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
- man->available_caching = TTM_PL_MASK_CACHING;
- man->default_caching = TTM_PL_FLAG_CACHED;
- break;
- default:
- DRM_ERROR("Unsupported memory type %u\n", (unsigned int)type);
- return -EINVAL;
- }
- return 0;
-}
-
-static void virtio_gpu_evict_flags(struct ttm_buffer_object *bo,
- struct ttm_placement *placement)
-{
- static const struct ttm_place placements = {
- .fpfn = 0,
- .lpfn = 0,
- .flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM,
- };
-
- placement->placement = &placements;
- placement->busy_placement = &placements;
- placement->num_placement = 1;
- placement->num_busy_placement = 1;
-}
-
-static int virtio_gpu_verify_access(struct ttm_buffer_object *bo,
- struct file *filp)
-{
- return 0;
-}
-
-static int virtio_gpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
- struct ttm_mem_reg *mem)
-{
- struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
-
- mem->bus.addr = NULL;
- mem->bus.offset = 0;
- mem->bus.size = mem->num_pages << PAGE_SHIFT;
- mem->bus.base = 0;
- mem->bus.is_iomem = false;
- if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
- return -EINVAL;
- switch (mem->mem_type) {
- case TTM_PL_SYSTEM:
- case TTM_PL_TT:
- /* system memory */
- return 0;
- default:
- return -EINVAL;
- }
- return 0;
-}
-
-static void virtio_gpu_ttm_io_mem_free(struct ttm_bo_device *bdev,
- struct ttm_mem_reg *mem)
-{
-}
-
-/*
- * TTM backend functions.
- */
-struct virtio_gpu_ttm_tt {
- struct ttm_dma_tt ttm;
- struct virtio_gpu_object *obj;
-};
-
-static int virtio_gpu_ttm_tt_bind(struct ttm_tt *ttm,
- struct ttm_mem_reg *bo_mem)
-{
- struct virtio_gpu_ttm_tt *gtt =
- container_of(ttm, struct virtio_gpu_ttm_tt, ttm.ttm);
- struct virtio_gpu_device *vgdev =
- virtio_gpu_get_vgdev(gtt->obj->tbo.bdev);
-
- virtio_gpu_object_attach(vgdev, gtt->obj, NULL);
- return 0;
-}
-
-static int virtio_gpu_ttm_tt_unbind(struct ttm_tt *ttm)
-{
- struct virtio_gpu_ttm_tt *gtt =
- container_of(ttm, struct virtio_gpu_ttm_tt, ttm.ttm);
- struct virtio_gpu_device *vgdev =
- virtio_gpu_get_vgdev(gtt->obj->tbo.bdev);
-
- virtio_gpu_object_detach(vgdev, gtt->obj);
- return 0;
-}
-
-static void virtio_gpu_ttm_tt_destroy(struct ttm_tt *ttm)
-{
- struct virtio_gpu_ttm_tt *gtt =
- container_of(ttm, struct virtio_gpu_ttm_tt, ttm.ttm);
-
- ttm_dma_tt_fini(&gtt->ttm);
- kfree(gtt);
-}
-
-static struct ttm_backend_func virtio_gpu_tt_func = {
- .bind = &virtio_gpu_ttm_tt_bind,
- .unbind = &virtio_gpu_ttm_tt_unbind,
- .destroy = &virtio_gpu_ttm_tt_destroy,
-};
-
-static struct ttm_tt *virtio_gpu_ttm_tt_create(struct ttm_buffer_object *bo,
- uint32_t page_flags)
-{
- struct virtio_gpu_device *vgdev;
- struct virtio_gpu_ttm_tt *gtt;
-
- vgdev = virtio_gpu_get_vgdev(bo->bdev);
- gtt = kzalloc(sizeof(struct virtio_gpu_ttm_tt), GFP_KERNEL);
- if (gtt == NULL)
- return NULL;
- gtt->ttm.ttm.func = &virtio_gpu_tt_func;
- gtt->obj = container_of(bo, struct virtio_gpu_object, tbo);
- if (ttm_dma_tt_init(&gtt->ttm, bo, page_flags)) {
- kfree(gtt);
- return NULL;
- }
- return &gtt->ttm.ttm;
-}
-
-static void virtio_gpu_bo_swap_notify(struct ttm_buffer_object *tbo)
-{
- struct virtio_gpu_object *bo;
-
- bo = container_of(tbo, struct virtio_gpu_object, tbo);
-
- if (bo->pages)
- virtio_gpu_object_free_sg_table(bo);
-}
-
-static struct ttm_bo_driver virtio_gpu_bo_driver = {
- .ttm_tt_create = &virtio_gpu_ttm_tt_create,
- .invalidate_caches = &virtio_gpu_invalidate_caches,
- .init_mem_type = &virtio_gpu_init_mem_type,
- .eviction_valuable = ttm_bo_eviction_valuable,
- .evict_flags = &virtio_gpu_evict_flags,
- .verify_access = &virtio_gpu_verify_access,
- .io_mem_reserve = &virtio_gpu_ttm_io_mem_reserve,
- .io_mem_free = &virtio_gpu_ttm_io_mem_free,
- .swap_notify = &virtio_gpu_bo_swap_notify,
-};
-
-int virtio_gpu_ttm_init(struct virtio_gpu_device *vgdev)
-{
- int r;
-
- /* No others user of address space so set it to 0 */
- r = ttm_bo_device_init(&vgdev->mman.bdev,
- &virtio_gpu_bo_driver,
- vgdev->ddev->anon_inode->i_mapping,
- false);
- if (r) {
- DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
- goto err_dev_init;
- }
-
- r = ttm_bo_init_mm(&vgdev->mman.bdev, TTM_PL_TT, 0);
- if (r) {
- DRM_ERROR("Failed initializing GTT heap.\n");
- goto err_mm_init;
- }
- return 0;
-
-err_mm_init:
- ttm_bo_device_release(&vgdev->mman.bdev);
-err_dev_init:
- return r;
-}
-
-void virtio_gpu_ttm_fini(struct virtio_gpu_device *vgdev)
-{
- ttm_bo_device_release(&vgdev->mman.bdev);
- DRM_INFO("virtio_gpu: ttm finalized\n");
-}
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
index 7ac20490e1b4..74ad3bc3ebe8 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -155,7 +155,7 @@ static void free_vbuf(struct virtio_gpu_device *vgdev,
{
if (vbuf->resp_size > MAX_INLINE_RESP_SIZE)
kfree(vbuf->resp_buf);
- kfree(vbuf->data_buf);
+ kvfree(vbuf->data_buf);
kmem_cache_free(vgdev->vbufs, vbuf);
}
@@ -192,7 +192,7 @@ void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
} while (!virtqueue_enable_cb(vgdev->ctrlq.vq));
spin_unlock(&vgdev->ctrlq.qlock);
- list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
+ list_for_each_entry(entry, &reclaim_list, list) {
resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf;
trace_virtio_gpu_cmd_response(vgdev->ctrlq.vq, resp);
@@ -219,14 +219,18 @@ void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
}
if (entry->resp_cb)
entry->resp_cb(vgdev, entry);
-
- list_del(&entry->list);
- free_vbuf(vgdev, entry);
}
wake_up(&vgdev->ctrlq.ack_queue);
if (fence_id)
virtio_gpu_fence_event_process(vgdev, fence_id);
+
+ list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
+ if (entry->objs)
+ virtio_gpu_array_put_free_delayed(vgdev, entry->objs);
+ list_del(&entry->list);
+ free_vbuf(vgdev, entry);
+ }
}
void virtio_gpu_dequeue_cursor_func(struct work_struct *work)
@@ -252,26 +256,67 @@ void virtio_gpu_dequeue_cursor_func(struct work_struct *work)
wake_up(&vgdev->cursorq.ack_queue);
}
-static int virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev,
- struct virtio_gpu_vbuffer *vbuf)
+/* Create sg_table from a vmalloc'd buffer. */
+static struct sg_table *vmalloc_to_sgt(char *data, uint32_t size, int *sg_ents)
+{
+ int ret, s, i;
+ struct sg_table *sgt;
+ struct scatterlist *sg;
+ struct page *pg;
+
+ if (WARN_ON(!PAGE_ALIGNED(data)))
+ return NULL;
+
+ sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
+ if (!sgt)
+ return NULL;
+
+ *sg_ents = DIV_ROUND_UP(size, PAGE_SIZE);
+ ret = sg_alloc_table(sgt, *sg_ents, GFP_KERNEL);
+ if (ret) {
+ kfree(sgt);
+ return NULL;
+ }
+
+ for_each_sg(sgt->sgl, sg, *sg_ents, i) {
+ pg = vmalloc_to_page(data);
+ if (!pg) {
+ sg_free_table(sgt);
+ kfree(sgt);
+ return NULL;
+ }
+
+ s = min_t(int, PAGE_SIZE, size);
+ sg_set_page(sg, pg, s, 0);
+
+ size -= s;
+ data += s;
+ }
+
+ return sgt;
+}
+
+static bool virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_vbuffer *vbuf,
+ struct scatterlist *vout)
__releases(&vgdev->ctrlq.qlock)
__acquires(&vgdev->ctrlq.qlock)
{
struct virtqueue *vq = vgdev->ctrlq.vq;
- struct scatterlist *sgs[3], vcmd, vout, vresp;
+ struct scatterlist *sgs[3], vcmd, vresp;
int outcnt = 0, incnt = 0;
+ bool notify = false;
int ret;
if (!vgdev->vqs_ready)
- return -ENODEV;
+ return notify;
sg_init_one(&vcmd, vbuf->buf, vbuf->size);
sgs[outcnt + incnt] = &vcmd;
outcnt++;
- if (vbuf->data_size) {
- sg_init_one(&vout, vbuf->data_buf, vbuf->data_size);
- sgs[outcnt + incnt] = &vout;
+ if (vout) {
+ sgs[outcnt + incnt] = vout;
outcnt++;
}
@@ -292,32 +337,35 @@ retry:
trace_virtio_gpu_cmd_queue(vq,
(struct virtio_gpu_ctrl_hdr *)vbuf->buf);
- virtqueue_kick(vq);
+ notify = virtqueue_kick_prepare(vq);
}
-
- if (!ret)
- ret = vq->num_free;
- return ret;
-}
-
-static int virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
- struct virtio_gpu_vbuffer *vbuf)
-{
- int rc;
-
- spin_lock(&vgdev->ctrlq.qlock);
- rc = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf);
- spin_unlock(&vgdev->ctrlq.qlock);
- return rc;
+ return notify;
}
-static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
- struct virtio_gpu_vbuffer *vbuf,
- struct virtio_gpu_ctrl_hdr *hdr,
- struct virtio_gpu_fence *fence)
+static void virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_vbuffer *vbuf,
+ struct virtio_gpu_ctrl_hdr *hdr,
+ struct virtio_gpu_fence *fence)
{
struct virtqueue *vq = vgdev->ctrlq.vq;
- int rc;
+ struct scatterlist *vout = NULL, sg;
+ struct sg_table *sgt = NULL;
+ bool notify;
+ int outcnt = 0;
+
+ if (vbuf->data_size) {
+ if (is_vmalloc_addr(vbuf->data_buf)) {
+ sgt = vmalloc_to_sgt(vbuf->data_buf, vbuf->data_size,
+ &outcnt);
+ if (!sgt)
+ return;
+ vout = sgt->sgl;
+ } else {
+ sg_init_one(&sg, vbuf->data_buf, vbuf->data_size);
+ vout = &sg;
+ outcnt = 1;
+ }
+ }
again:
spin_lock(&vgdev->ctrlq.qlock);
@@ -330,29 +378,47 @@ again:
* to wait for free space, which can result in fence ids being
* submitted out-of-order.
*/
- if (vq->num_free < 3) {
+ if (vq->num_free < 2 + outcnt) {
spin_unlock(&vgdev->ctrlq.qlock);
wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= 3);
goto again;
}
- if (fence)
+ if (hdr && fence) {
virtio_gpu_fence_emit(vgdev, hdr, fence);
- rc = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf);
+ if (vbuf->objs) {
+ virtio_gpu_array_add_fence(vbuf->objs, &fence->f);
+ virtio_gpu_array_unlock_resv(vbuf->objs);
+ }
+ }
+ notify = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf, vout);
spin_unlock(&vgdev->ctrlq.qlock);
- return rc;
+ if (notify)
+ virtqueue_notify(vgdev->ctrlq.vq);
+
+ if (sgt) {
+ sg_free_table(sgt);
+ kfree(sgt);
+ }
+}
+
+static void virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_vbuffer *vbuf)
+{
+ virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, NULL, NULL);
}
-static int virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
- struct virtio_gpu_vbuffer *vbuf)
+static void virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_vbuffer *vbuf)
{
struct virtqueue *vq = vgdev->cursorq.vq;
struct scatterlist *sgs[1], ccmd;
+ bool notify;
int ret;
int outcnt;
if (!vgdev->vqs_ready)
- return -ENODEV;
+ return;
sg_init_one(&ccmd, vbuf->buf, vbuf->size);
sgs[0] = &ccmd;
@@ -370,14 +436,13 @@ retry:
trace_virtio_gpu_cmd_queue(vq,
(struct virtio_gpu_ctrl_hdr *)vbuf->buf);
- virtqueue_kick(vq);
+ notify = virtqueue_kick_prepare(vq);
}
spin_unlock(&vgdev->cursorq.qlock);
- if (!ret)
- ret = vq->num_free;
- return ret;
+ if (notify)
+ virtqueue_notify(vq);
}
/* just create gem objects for userspace and long lived objects,
@@ -388,6 +453,7 @@ retry:
void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *bo,
struct virtio_gpu_object_params *params,
+ struct virtio_gpu_object_array *objs,
struct virtio_gpu_fence *fence)
{
struct virtio_gpu_resource_create_2d *cmd_p;
@@ -395,6 +461,7 @@ void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
memset(cmd_p, 0, sizeof(*cmd_p));
+ vbuf->objs = objs;
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D);
cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
@@ -481,12 +548,13 @@ void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
}
void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
- struct virtio_gpu_object *bo,
uint64_t offset,
- __le32 width, __le32 height,
- __le32 x, __le32 y,
+ uint32_t width, uint32_t height,
+ uint32_t x, uint32_t y,
+ struct virtio_gpu_object_array *objs,
struct virtio_gpu_fence *fence)
{
+ struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
struct virtio_gpu_transfer_to_host_2d *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
@@ -498,14 +566,15 @@ void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
memset(cmd_p, 0, sizeof(*cmd_p));
+ vbuf->objs = objs;
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D);
cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
cmd_p->offset = cpu_to_le64(offset);
- cmd_p->r.width = width;
- cmd_p->r.height = height;
- cmd_p->r.x = x;
- cmd_p->r.y = y;
+ cmd_p->r.width = cpu_to_le32(width);
+ cmd_p->r.height = cpu_to_le32(height);
+ cmd_p->r.x = cpu_to_le32(x);
+ cmd_p->r.y = cpu_to_le32(y);
virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
}
@@ -826,34 +895,38 @@ void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device *vgdev,
void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev,
uint32_t ctx_id,
- uint32_t resource_id)
+ struct virtio_gpu_object_array *objs)
{
+ struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
struct virtio_gpu_ctx_resource *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
memset(cmd_p, 0, sizeof(*cmd_p));
+ vbuf->objs = objs;
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE);
cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
- cmd_p->resource_id = cpu_to_le32(resource_id);
+ cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
}
void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev,
uint32_t ctx_id,
- uint32_t resource_id)
+ struct virtio_gpu_object_array *objs)
{
+ struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
struct virtio_gpu_ctx_resource *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
memset(cmd_p, 0, sizeof(*cmd_p));
+ vbuf->objs = objs;
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE);
cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
- cmd_p->resource_id = cpu_to_le32(resource_id);
+ cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
}
@@ -861,6 +934,7 @@ void
virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *bo,
struct virtio_gpu_object_params *params,
+ struct virtio_gpu_object_array *objs,
struct virtio_gpu_fence *fence)
{
struct virtio_gpu_resource_create_3d *cmd_p;
@@ -868,6 +942,7 @@ virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
memset(cmd_p, 0, sizeof(*cmd_p));
+ vbuf->objs = objs;
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D);
cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
@@ -888,12 +963,13 @@ virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
}
void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
- struct virtio_gpu_object *bo,
uint32_t ctx_id,
uint64_t offset, uint32_t level,
struct virtio_gpu_box *box,
+ struct virtio_gpu_object_array *objs,
struct virtio_gpu_fence *fence)
{
+ struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
struct virtio_gpu_transfer_host_3d *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
@@ -906,6 +982,8 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
memset(cmd_p, 0, sizeof(*cmd_p));
+ vbuf->objs = objs;
+
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D);
cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
@@ -917,20 +995,24 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
}
void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
- uint32_t resource_id, uint32_t ctx_id,
+ uint32_t ctx_id,
uint64_t offset, uint32_t level,
struct virtio_gpu_box *box,
+ struct virtio_gpu_object_array *objs,
struct virtio_gpu_fence *fence)
{
+ struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
struct virtio_gpu_transfer_host_3d *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
memset(cmd_p, 0, sizeof(*cmd_p));
+ vbuf->objs = objs;
+
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D);
cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
- cmd_p->resource_id = cpu_to_le32(resource_id);
+ cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
cmd_p->box = *box;
cmd_p->offset = cpu_to_le64(offset);
cmd_p->level = cpu_to_le32(level);
@@ -940,7 +1022,9 @@ void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
void *data, uint32_t data_size,
- uint32_t ctx_id, struct virtio_gpu_fence *fence)
+ uint32_t ctx_id,
+ struct virtio_gpu_object_array *objs,
+ struct virtio_gpu_fence *fence)
{
struct virtio_gpu_cmd_submit *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
@@ -950,6 +1034,7 @@ void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
vbuf->data_buf = data;
vbuf->data_size = data_size;
+ vbuf->objs = objs;
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SUBMIT_3D);
cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
@@ -965,17 +1050,21 @@ int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
struct virtio_gpu_mem_entry *ents;
struct scatterlist *sg;
- int si, nents;
+ int si, nents, ret;
if (WARN_ON_ONCE(!obj->created))
return -EINVAL;
+ if (WARN_ON_ONCE(obj->pages))
+ return -EINVAL;
- if (!obj->pages) {
- int ret;
+ ret = drm_gem_shmem_pin(&obj->base.base);
+ if (ret < 0)
+ return -EINVAL;
- ret = virtio_gpu_object_get_sg_table(vgdev, obj);
- if (ret)
- return ret;
+ obj->pages = drm_gem_shmem_get_sg_table(&obj->base.base);
+ if (obj->pages == NULL) {
+ drm_gem_shmem_unpin(&obj->base.base);
+ return -EINVAL;
}
if (use_dma_api) {
@@ -1014,6 +1103,9 @@ void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev,
{
bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
+ if (WARN_ON_ONCE(!obj->pages))
+ return;
+
if (use_dma_api && obj->mapped) {
struct virtio_gpu_fence *fence = virtio_gpu_fence_alloc(vgdev);
/* detach backing and wait for the host process it ... */
@@ -1029,6 +1121,11 @@ void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev,
} else {
virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, NULL);
}
+
+ sg_free_table(obj->pages);
+ obj->pages = NULL;
+
+ drm_gem_shmem_unpin(&obj->base.base);
}
void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
diff --git a/drivers/gpu/drm/vkms/vkms_crtc.c b/drivers/gpu/drm/vkms/vkms_crtc.c
index 927dafaebc76..74f703b8d22a 100644
--- a/drivers/gpu/drm/vkms/vkms_crtc.c
+++ b/drivers/gpu/drm/vkms/vkms_crtc.c
@@ -16,17 +16,18 @@ static enum hrtimer_restart vkms_vblank_simulate(struct hrtimer *timer)
u64 ret_overrun;
bool ret;
- spin_lock(&output->lock);
-
ret_overrun = hrtimer_forward_now(&output->vblank_hrtimer,
output->period_ns);
WARN_ON(ret_overrun != 1);
+ spin_lock(&output->lock);
ret = drm_crtc_handle_vblank(crtc);
if (!ret)
DRM_ERROR("vkms failure on handling vblank");
state = output->composer_state;
+ spin_unlock(&output->lock);
+
if (state && output->composer_enabled) {
u64 frame = drm_crtc_accurate_vblank_count(crtc);
@@ -48,8 +49,6 @@ static enum hrtimer_restart vkms_vblank_simulate(struct hrtimer *timer)
DRM_DEBUG_DRIVER("Composer worker already queued\n");
}
- spin_unlock(&output->lock);
-
return HRTIMER_RESTART;
}
@@ -85,7 +84,7 @@ bool vkms_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe,
struct vkms_output *output = &vkmsdev->output;
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
- *vblank_time = output->vblank_hrtimer.node.expires;
+ *vblank_time = READ_ONCE(output->vblank_hrtimer.node.expires);
if (WARN_ON(*vblank_time == vblank->time))
return true;
diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c
index 44ab9f8ef8be..d1fe144aa289 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.c
+++ b/drivers/gpu/drm/vkms/vkms_drv.c
@@ -11,13 +11,14 @@
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <drm/drm_gem.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_file.h>
-#include <drm/drm_gem.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_probe_helper.h>
@@ -83,7 +84,7 @@ static void vkms_atomic_commit_tail(struct drm_atomic_state *old_state)
drm_atomic_helper_commit_hw_done(old_state);
- drm_atomic_helper_wait_for_vblanks(dev, old_state);
+ drm_atomic_helper_wait_for_flip_done(dev, old_state);
for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) {
struct vkms_crtc_state *vkms_state =
@@ -103,6 +104,8 @@ static struct drm_driver vkms_driver = {
.gem_vm_ops = &vkms_gem_vm_ops,
.gem_free_object_unlocked = vkms_gem_free_object,
.get_vblank_timestamp = vkms_get_vblank_timestamp,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_import_sg_table = vkms_prime_import_sg_table,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
@@ -157,6 +160,14 @@ static int __init vkms_init(void)
if (ret)
goto out_unregister;
+ ret = dma_coerce_mask_and_coherent(vkms_device->drm.dev,
+ DMA_BIT_MASK(64));
+
+ if (ret) {
+ DRM_ERROR("Could not initialize DMA support\n");
+ goto out_fini;
+ }
+
vkms_device->drm.irq_enabled = true;
ret = drm_vblank_init(&vkms_device->drm, 1);
diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h
index 5a95100fa18b..7d52e24564db 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.h
+++ b/drivers/gpu/drm/vkms/vkms_drv.h
@@ -137,6 +137,12 @@ int vkms_gem_vmap(struct drm_gem_object *obj);
void vkms_gem_vunmap(struct drm_gem_object *obj);
+/* Prime */
+struct drm_gem_object *
+vkms_prime_import_sg_table(struct drm_device *dev,
+ struct dma_buf_attachment *attach,
+ struct sg_table *sg);
+
/* CRC Support */
const char *const *vkms_get_crc_sources(struct drm_crtc *crtc,
size_t *count);
diff --git a/drivers/gpu/drm/vkms/vkms_gem.c b/drivers/gpu/drm/vkms/vkms_gem.c
index 6489bfe0a149..2e01186fb943 100644
--- a/drivers/gpu/drm/vkms/vkms_gem.c
+++ b/drivers/gpu/drm/vkms/vkms_gem.c
@@ -1,7 +1,9 @@
// SPDX-License-Identifier: GPL-2.0+
+#include <linux/dma-buf.h>
#include <linux/shmem_fs.h>
#include <linux/vmalloc.h>
+#include <drm/drm_prime.h>
#include "vkms_drv.h"
@@ -218,3 +220,28 @@ out:
mutex_unlock(&vkms_obj->pages_lock);
return ret;
}
+
+struct drm_gem_object *
+vkms_prime_import_sg_table(struct drm_device *dev,
+ struct dma_buf_attachment *attach,
+ struct sg_table *sg)
+{
+ struct vkms_gem_object *obj;
+ int npages;
+
+ obj = __vkms_gem_create(dev, attach->dmabuf->size);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ npages = PAGE_ALIGN(attach->dmabuf->size) / PAGE_SIZE;
+ DRM_DEBUG_PRIME("Importing %d pages\n", npages);
+
+ obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
+ if (!obj->pages) {
+ vkms_gem_free_object(&obj->gem);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ drm_prime_sg_to_page_addr_arrays(sg, obj->pages, NULL, npages);
+ return &obj->gem;
+}
diff --git a/drivers/gpu/drm/vmwgfx/Kconfig b/drivers/gpu/drm/vmwgfx/Kconfig
index 6b28a326f8bb..15acdf2a7c0f 100644
--- a/drivers/gpu/drm/vmwgfx/Kconfig
+++ b/drivers/gpu/drm/vmwgfx/Kconfig
@@ -8,6 +8,7 @@ config DRM_VMWGFX
select FB_CFB_IMAGEBLIT
select DRM_TTM
select FB
+ select MAPPING_DIRTY_HELPERS
# Only needed for the transitional use of drm_crtc_init - can be removed
# again once vmwgfx sets up the primary plane itself.
select DRM_KMS_HELPER
diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile
index 8841bd30e1e5..c877a21a0739 100644
--- a/drivers/gpu/drm/vmwgfx/Makefile
+++ b/drivers/gpu/drm/vmwgfx/Makefile
@@ -8,7 +8,7 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
vmwgfx_cmdbuf_res.o vmwgfx_cmdbuf.o vmwgfx_stdu.o \
vmwgfx_cotable.o vmwgfx_so.o vmwgfx_binding.o vmwgfx_msg.o \
vmwgfx_simple_resource.o vmwgfx_va.o vmwgfx_blit.o \
- vmwgfx_validation.o \
+ vmwgfx_validation.o vmwgfx_page_dirty.o \
ttm_object.o ttm_lock.o
obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h
index f2bfd3d80598..61414f105c67 100644
--- a/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h
@@ -1280,7 +1280,6 @@ svga3dsurface_get_pixel_offset(SVGA3dSurfaceFormat format,
return offset;
}
-
static inline u32
svga3dsurface_get_image_offset(SVGA3dSurfaceFormat format,
surf_size_struct baseLevelSize,
@@ -1375,4 +1374,236 @@ svga3dsurface_is_screen_target_format(SVGA3dSurfaceFormat format)
return svga3dsurface_is_dx_screen_target_format(format);
}
+/**
+ * struct svga3dsurface_mip - Mimpmap level information
+ * @bytes: Bytes required in the backing store of this mipmap level.
+ * @img_stride: Byte stride per image.
+ * @row_stride: Byte stride per block row.
+ * @size: The size of the mipmap.
+ */
+struct svga3dsurface_mip {
+ size_t bytes;
+ size_t img_stride;
+ size_t row_stride;
+ struct drm_vmw_size size;
+
+};
+
+/**
+ * struct svga3dsurface_cache - Cached surface information
+ * @desc: Pointer to the surface descriptor
+ * @mip: Array of mipmap level information. Valid size is @num_mip_levels.
+ * @mip_chain_bytes: Bytes required in the backing store for the whole chain
+ * of mip levels.
+ * @sheet_bytes: Bytes required in the backing store for a sheet
+ * representing a single sample.
+ * @num_mip_levels: Valid size of the @mip array. Number of mipmap levels in
+ * a chain.
+ * @num_layers: Number of slices in an array texture or number of faces in
+ * a cubemap texture.
+ */
+struct svga3dsurface_cache {
+ const struct svga3d_surface_desc *desc;
+ struct svga3dsurface_mip mip[DRM_VMW_MAX_MIP_LEVELS];
+ size_t mip_chain_bytes;
+ size_t sheet_bytes;
+ u32 num_mip_levels;
+ u32 num_layers;
+};
+
+/**
+ * struct svga3dsurface_loc - Surface location
+ * @sub_resource: Surface subresource. Defined as layer * num_mip_levels +
+ * mip_level.
+ * @x: X coordinate.
+ * @y: Y coordinate.
+ * @z: Z coordinate.
+ */
+struct svga3dsurface_loc {
+ u32 sub_resource;
+ u32 x, y, z;
+};
+
+/**
+ * svga3dsurface_subres - Compute the subresource from layer and mipmap.
+ * @cache: Surface layout data.
+ * @mip_level: The mipmap level.
+ * @layer: The surface layer (face or array slice).
+ *
+ * Return: The subresource.
+ */
+static inline u32 svga3dsurface_subres(const struct svga3dsurface_cache *cache,
+ u32 mip_level, u32 layer)
+{
+ return cache->num_mip_levels * layer + mip_level;
+}
+
+/**
+ * svga3dsurface_setup_cache - Build a surface cache entry
+ * @size: The surface base level dimensions.
+ * @format: The surface format.
+ * @num_mip_levels: Number of mipmap levels.
+ * @num_layers: Number of layers.
+ * @cache: Pointer to a struct svga3dsurface_cach object to be filled in.
+ *
+ * Return: Zero on success, -EINVAL on invalid surface layout.
+ */
+static inline int svga3dsurface_setup_cache(const struct drm_vmw_size *size,
+ SVGA3dSurfaceFormat format,
+ u32 num_mip_levels,
+ u32 num_layers,
+ u32 num_samples,
+ struct svga3dsurface_cache *cache)
+{
+ const struct svga3d_surface_desc *desc;
+ u32 i;
+
+ memset(cache, 0, sizeof(*cache));
+ cache->desc = desc = svga3dsurface_get_desc(format);
+ cache->num_mip_levels = num_mip_levels;
+ cache->num_layers = num_layers;
+ for (i = 0; i < cache->num_mip_levels; i++) {
+ struct svga3dsurface_mip *mip = &cache->mip[i];
+
+ mip->size = svga3dsurface_get_mip_size(*size, i);
+ mip->bytes = svga3dsurface_get_image_buffer_size
+ (desc, &mip->size, 0);
+ mip->row_stride =
+ __KERNEL_DIV_ROUND_UP(mip->size.width,
+ desc->block_size.width) *
+ desc->bytes_per_block * num_samples;
+ if (!mip->row_stride)
+ goto invalid_dim;
+
+ mip->img_stride =
+ __KERNEL_DIV_ROUND_UP(mip->size.height,
+ desc->block_size.height) *
+ mip->row_stride;
+ if (!mip->img_stride)
+ goto invalid_dim;
+
+ cache->mip_chain_bytes += mip->bytes;
+ }
+ cache->sheet_bytes = cache->mip_chain_bytes * num_layers;
+ if (!cache->sheet_bytes)
+ goto invalid_dim;
+
+ return 0;
+
+invalid_dim:
+ VMW_DEBUG_USER("Invalid surface layout for dirty tracking.\n");
+ return -EINVAL;
+}
+
+/**
+ * svga3dsurface_get_loc - Get a surface location from an offset into the
+ * backing store
+ * @cache: Surface layout data.
+ * @loc: Pointer to a struct svga3dsurface_loc to be filled in.
+ * @offset: Offset into the surface backing store.
+ */
+static inline void
+svga3dsurface_get_loc(const struct svga3dsurface_cache *cache,
+ struct svga3dsurface_loc *loc,
+ size_t offset)
+{
+ const struct svga3dsurface_mip *mip = &cache->mip[0];
+ const struct svga3d_surface_desc *desc = cache->desc;
+ u32 layer;
+ int i;
+
+ if (offset >= cache->sheet_bytes)
+ offset %= cache->sheet_bytes;
+
+ layer = offset / cache->mip_chain_bytes;
+ offset -= layer * cache->mip_chain_bytes;
+ for (i = 0; i < cache->num_mip_levels; ++i, ++mip) {
+ if (mip->bytes > offset)
+ break;
+ offset -= mip->bytes;
+ }
+
+ loc->sub_resource = svga3dsurface_subres(cache, i, layer);
+ loc->z = offset / mip->img_stride;
+ offset -= loc->z * mip->img_stride;
+ loc->z *= desc->block_size.depth;
+ loc->y = offset / mip->row_stride;
+ offset -= loc->y * mip->row_stride;
+ loc->y *= desc->block_size.height;
+ loc->x = offset / desc->bytes_per_block;
+ loc->x *= desc->block_size.width;
+}
+
+/**
+ * svga3dsurface_inc_loc - Clamp increment a surface location with one block
+ * size
+ * in each dimension.
+ * @loc: Pointer to a struct svga3dsurface_loc to be incremented.
+ *
+ * When computing the size of a range as size = end - start, the range does not
+ * include the end element. However a location representing the last byte
+ * of a touched region in the backing store *is* included in the range.
+ * This function modifies such a location to match the end definition
+ * given as start + size which is the one used in a SVGA3dBox.
+ */
+static inline void
+svga3dsurface_inc_loc(const struct svga3dsurface_cache *cache,
+ struct svga3dsurface_loc *loc)
+{
+ const struct svga3d_surface_desc *desc = cache->desc;
+ u32 mip = loc->sub_resource % cache->num_mip_levels;
+ const struct drm_vmw_size *size = &cache->mip[mip].size;
+
+ loc->sub_resource++;
+ loc->x += desc->block_size.width;
+ if (loc->x > size->width)
+ loc->x = size->width;
+ loc->y += desc->block_size.height;
+ if (loc->y > size->height)
+ loc->y = size->height;
+ loc->z += desc->block_size.depth;
+ if (loc->z > size->depth)
+ loc->z = size->depth;
+}
+
+/**
+ * svga3dsurface_min_loc - The start location in a subresource
+ * @cache: Surface layout data.
+ * @sub_resource: The subresource.
+ * @loc: Pointer to a struct svga3dsurface_loc to be filled in.
+ */
+static inline void
+svga3dsurface_min_loc(const struct svga3dsurface_cache *cache,
+ u32 sub_resource,
+ struct svga3dsurface_loc *loc)
+{
+ loc->sub_resource = sub_resource;
+ loc->x = loc->y = loc->z = 0;
+}
+
+/**
+ * svga3dsurface_min_loc - The end location in a subresource
+ * @cache: Surface layout data.
+ * @sub_resource: The subresource.
+ * @loc: Pointer to a struct svga3dsurface_loc to be filled in.
+ *
+ * Following the end definition given in svga3dsurface_inc_loc(),
+ * Compute the end location of a surface subresource.
+ */
+static inline void
+svga3dsurface_max_loc(const struct svga3dsurface_cache *cache,
+ u32 sub_resource,
+ struct svga3dsurface_loc *loc)
+{
+ const struct drm_vmw_size *size;
+ u32 mip;
+
+ loc->sub_resource = sub_resource + 1;
+ mip = sub_resource % cache->num_mip_levels;
+ size = &cache->mip[mip].size;
+ loc->x = size->width;
+ loc->y = size->height;
+ loc->z = size->depth;
+}
+
#endif /* _SVGA3D_SURFACEDEFS_H_ */
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
index aad8d8140259..8b71bf6b58ef 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
@@ -462,6 +462,8 @@ void vmw_bo_bo_free(struct ttm_buffer_object *bo)
{
struct vmw_buffer_object *vmw_bo = vmw_buffer_object(bo);
+ WARN_ON(vmw_bo->dirty);
+ WARN_ON(!RB_EMPTY_ROOT(&vmw_bo->res_tree));
vmw_bo_unmap(vmw_bo);
kfree(vmw_bo);
}
@@ -475,8 +477,11 @@ void vmw_bo_bo_free(struct ttm_buffer_object *bo)
static void vmw_user_bo_destroy(struct ttm_buffer_object *bo)
{
struct vmw_user_buffer_object *vmw_user_bo = vmw_user_buffer_object(bo);
+ struct vmw_buffer_object *vbo = &vmw_user_bo->vbo;
- vmw_bo_unmap(&vmw_user_bo->vbo);
+ WARN_ON(vbo->dirty);
+ WARN_ON(!RB_EMPTY_ROOT(&vbo->res_tree));
+ vmw_bo_unmap(vbo);
ttm_prime_object_kfree(vmw_user_bo, prime);
}
@@ -511,8 +516,7 @@ int vmw_bo_init(struct vmw_private *dev_priv,
memset(vmw_bo, 0, sizeof(*vmw_bo));
BUILD_BUG_ON(TTM_MAX_BO_PRIORITY <= 3);
vmw_bo->base.priority = 3;
-
- INIT_LIST_HEAD(&vmw_bo->res_list);
+ vmw_bo->res_tree = RB_ROOT;
ret = ttm_bo_init(bdev, &vmw_bo->base, size,
ttm_bo_type_device, placement,
@@ -566,7 +570,7 @@ static void vmw_user_bo_ref_obj_release(struct ttm_base_object *base,
switch (ref_type) {
case TTM_REF_SYNCCPU_WRITE:
- ttm_bo_synccpu_write_release(&user_bo->vbo.base);
+ atomic_dec(&user_bo->vbo.cpu_writers);
break;
default:
WARN_ONCE(true, "Undefined buffer object reference release.\n");
@@ -682,12 +686,12 @@ static int vmw_user_bo_synccpu_grab(struct vmw_user_buffer_object *user_bo,
struct ttm_object_file *tfile,
uint32_t flags)
{
+ bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
struct ttm_buffer_object *bo = &user_bo->vbo.base;
bool existed;
int ret;
if (flags & drm_vmw_synccpu_allow_cs) {
- bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
long lret;
lret = dma_resv_wait_timeout_rcu
@@ -700,15 +704,22 @@ static int vmw_user_bo_synccpu_grab(struct vmw_user_buffer_object *user_bo,
return 0;
}
- ret = ttm_bo_synccpu_write_grab
- (bo, !!(flags & drm_vmw_synccpu_dontblock));
+ ret = ttm_bo_reserve(bo, true, nonblock, NULL);
+ if (unlikely(ret != 0))
+ return ret;
+
+ ret = ttm_bo_wait(bo, true, nonblock);
+ if (likely(ret == 0))
+ atomic_inc(&user_bo->vbo.cpu_writers);
+
+ ttm_bo_unreserve(bo);
if (unlikely(ret != 0))
return ret;
ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
TTM_REF_SYNCCPU_WRITE, &existed, false);
if (ret != 0 || existed)
- ttm_bo_synccpu_write_release(&user_bo->vbo.base);
+ atomic_dec(&user_bo->vbo.cpu_writers);
return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index b38bcb032c99..e962048f65d2 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -576,8 +576,7 @@ static int vmw_dma_select_mode(struct vmw_private *dev_priv)
else
dev_priv->map_mode = vmw_dma_map_populate;
- /* No TTM coherent page pool? FIXME: Ask TTM instead! */
- if (!(IS_ENABLED(CONFIG_SWIOTLB) || IS_ENABLED(CONFIG_INTEL_IOMMU)) &&
+ if (!IS_ENABLED(CONFIG_DRM_TTM_DMA_PAGE_POOL) &&
(dev_priv->map_mode == vmw_dma_alloc_coherent))
return -EINVAL;
@@ -827,9 +826,13 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
goto out_no_fman;
}
+ drm_vma_offset_manager_init(&dev_priv->vma_manager,
+ DRM_FILE_PAGE_OFFSET_START,
+ DRM_FILE_PAGE_OFFSET_SIZE);
ret = ttm_bo_device_init(&dev_priv->bdev,
&vmw_bo_driver,
dev->anon_inode->i_mapping,
+ &dev_priv->vma_manager,
false);
if (unlikely(ret != 0)) {
DRM_ERROR("Failed initializing TTM buffer object driver.\n");
@@ -986,6 +989,7 @@ static void vmw_driver_unload(struct drm_device *dev)
if (dev_priv->has_mob)
(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
(void) ttm_bo_device_release(&dev_priv->bdev);
+ drm_vma_offset_manager_destroy(&dev_priv->vma_manager);
vmw_release_device_late(dev_priv);
vmw_fence_manager_takedown(dev_priv->fman);
if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 5eb73ded8e07..a31e726d6d71 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -56,9 +56,9 @@
#define VMWGFX_DRIVER_NAME "vmwgfx"
-#define VMWGFX_DRIVER_DATE "20180704"
+#define VMWGFX_DRIVER_DATE "20190328"
#define VMWGFX_DRIVER_MAJOR 2
-#define VMWGFX_DRIVER_MINOR 15
+#define VMWGFX_DRIVER_MINOR 16
#define VMWGFX_DRIVER_PATCHLEVEL 0
#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
#define VMWGFX_MAX_RELOCATIONS 2048
@@ -100,21 +100,26 @@ struct vmw_fpriv {
/**
* struct vmw_buffer_object - TTM buffer object with vmwgfx additions
* @base: The TTM buffer object
- * @res_list: List of resources using this buffer object as a backing MOB
+ * @res_tree: RB tree of resources using this buffer object as a backing MOB
* @pin_count: pin depth
+ * @cpu_writers: Number of synccpu write grabs. Protected by reservation when
+ * increased. May be decreased without reservation.
* @dx_query_ctx: DX context if this buffer object is used as a DX query MOB
* @map: Kmap object for semi-persistent mappings
* @res_prios: Eviction priority counts for attached resources
+ * @dirty: structure for user-space dirty-tracking
*/
struct vmw_buffer_object {
struct ttm_buffer_object base;
- struct list_head res_list;
+ struct rb_root res_tree;
s32 pin_count;
+ atomic_t cpu_writers;
/* Not ref-counted. Protected by binding_mutex */
struct vmw_resource *dx_query_ctx;
/* Protected by reservation */
struct ttm_bo_kmap_obj map;
u32 res_prios[TTM_MAX_BO_PRIORITY];
+ struct vmw_bo_dirty *dirty;
};
/**
@@ -145,7 +150,8 @@ struct vmw_res_func;
* @res_dirty: Resource contains data not yet in the backup buffer. Protected
* by resource reserved.
* @backup_dirty: Backup buffer contains data not yet in the HW resource.
- * Protecte by resource reserved.
+ * Protected by resource reserved.
+ * @coherent: Emulate coherency by tracking vm accesses.
* @backup: The backup buffer if any. Protected by resource reserved.
* @backup_offset: Offset into the backup buffer if any. Protected by resource
* reserved. Note that only a few resource types can have a @backup_offset
@@ -154,29 +160,32 @@ struct vmw_res_func;
* pin-count greater than zero. It is not on the resource LRU lists and its
* backup buffer is pinned. Hence it can't be evicted.
* @func: Method vtable for this resource. Immutable.
+ * @mob_node; Node for the MOB backup rbtree. Protected by @backup reserved.
* @lru_head: List head for the LRU list. Protected by @dev_priv::resource_lock.
- * @mob_head: List head for the MOB backup list. Protected by @backup reserved.
* @binding_head: List head for the context binding list. Protected by
* the @dev_priv::binding_mutex
* @res_free: The resource destructor.
* @hw_destroy: Callback to destroy the resource on the device, as part of
* resource destruction.
*/
+struct vmw_resource_dirty;
struct vmw_resource {
struct kref kref;
struct vmw_private *dev_priv;
int id;
u32 used_prio;
unsigned long backup_size;
- bool res_dirty;
- bool backup_dirty;
+ u32 res_dirty : 1;
+ u32 backup_dirty : 1;
+ u32 coherent : 1;
struct vmw_buffer_object *backup;
unsigned long backup_offset;
unsigned long pin_count;
const struct vmw_res_func *func;
+ struct rb_node mob_node;
struct list_head lru_head;
- struct list_head mob_head;
struct list_head binding_head;
+ struct vmw_resource_dirty *dirty;
void (*res_free) (struct vmw_resource *res);
void (*hw_destroy) (struct vmw_resource *res);
};
@@ -438,6 +447,7 @@ struct vmw_private {
struct vmw_fifo_state fifo;
struct drm_device *dev;
+ struct drm_vma_offset_manager vma_manager;
unsigned long vmw_chipset;
unsigned int io_start;
uint32_t vram_start;
@@ -674,7 +684,8 @@ extern void vmw_resource_unreference(struct vmw_resource **p_res);
extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res);
extern struct vmw_resource *
vmw_resource_reference_unless_doomed(struct vmw_resource *res);
-extern int vmw_resource_validate(struct vmw_resource *res, bool intr);
+extern int vmw_resource_validate(struct vmw_resource *res, bool intr,
+ bool dirtying);
extern int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
bool no_backup);
extern bool vmw_resource_needs_backup(const struct vmw_resource *res);
@@ -716,6 +727,10 @@ extern void vmw_resource_evict_all(struct vmw_private *dev_priv);
extern void vmw_resource_unbind_list(struct vmw_buffer_object *vbo);
void vmw_resource_mob_attach(struct vmw_resource *res);
void vmw_resource_mob_detach(struct vmw_resource *res);
+void vmw_resource_dirty_update(struct vmw_resource *res, pgoff_t start,
+ pgoff_t end);
+int vmw_resources_clean(struct vmw_buffer_object *vbo, pgoff_t start,
+ pgoff_t end, pgoff_t *num_prefault);
/**
* vmw_resource_mob_attached - Whether a resource currently has a mob attached
@@ -725,7 +740,7 @@ void vmw_resource_mob_detach(struct vmw_resource *res);
*/
static inline bool vmw_resource_mob_attached(const struct vmw_resource *res)
{
- return !list_empty(&res->mob_head);
+ return !RB_EMPTY_NODE(&res->mob_node);
}
/**
@@ -1403,6 +1418,17 @@ int vmw_host_log(const char *log);
#define VMW_DEBUG_USER(fmt, ...) \
DRM_DEBUG_DRIVER(fmt, ##__VA_ARGS__)
+/* Resource dirtying - vmwgfx_page_dirty.c */
+void vmw_bo_dirty_scan(struct vmw_buffer_object *vbo);
+int vmw_bo_dirty_add(struct vmw_buffer_object *vbo);
+void vmw_bo_dirty_transfer_to_res(struct vmw_resource *res);
+void vmw_bo_dirty_clear_res(struct vmw_resource *res);
+void vmw_bo_dirty_release(struct vmw_buffer_object *vbo);
+void vmw_bo_dirty_unmap(struct vmw_buffer_object *vbo,
+ pgoff_t start, pgoff_t end);
+vm_fault_t vmw_bo_vm_fault(struct vm_fault *vmf);
+vm_fault_t vmw_bo_vm_mkwrite(struct vm_fault *vmf);
+
/**
* VMW_DEBUG_KMS - Debug output for kernel mode-setting
*
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index ff86d49dc5e8..934ad7c0c342 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -2560,7 +2560,6 @@ static int vmw_cmd_dx_check_subresource(struct vmw_private *dev_priv,
offsetof(typeof(*cmd), sid));
cmd = container_of(header, typeof(*cmd), header);
-
return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
VMW_RES_DIRTY_NONE, user_surface_converter,
&cmd->sid, NULL);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
new file mode 100644
index 000000000000..f07aa857587c
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
@@ -0,0 +1,488 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/**************************************************************************
+ *
+ * Copyright 2019 VMware, Inc., Palo Alto, CA., USA
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+#include "vmwgfx_drv.h"
+
+/*
+ * Different methods for tracking dirty:
+ * VMW_BO_DIRTY_PAGETABLE - Scan the pagetable for hardware dirty bits
+ * VMW_BO_DIRTY_MKWRITE - Write-protect page table entries and record write-
+ * accesses in the VM mkwrite() callback
+ */
+enum vmw_bo_dirty_method {
+ VMW_BO_DIRTY_PAGETABLE,
+ VMW_BO_DIRTY_MKWRITE,
+};
+
+/*
+ * No dirtied pages at scan trigger a transition to the _MKWRITE method,
+ * similarly a certain percentage of dirty pages trigger a transition to
+ * the _PAGETABLE method. How many triggers should we wait for before
+ * changing method?
+ */
+#define VMW_DIRTY_NUM_CHANGE_TRIGGERS 2
+
+/* Percentage to trigger a transition to the _PAGETABLE method */
+#define VMW_DIRTY_PERCENTAGE 10
+
+/**
+ * struct vmw_bo_dirty - Dirty information for buffer objects
+ * @start: First currently dirty bit
+ * @end: Last currently dirty bit + 1
+ * @method: The currently used dirty method
+ * @change_count: Number of consecutive method change triggers
+ * @ref_count: Reference count for this structure
+ * @bitmap_size: The size of the bitmap in bits. Typically equal to the
+ * nuber of pages in the bo.
+ * @size: The accounting size for this struct.
+ * @bitmap: A bitmap where each bit represents a page. A set bit means a
+ * dirty page.
+ */
+struct vmw_bo_dirty {
+ unsigned long start;
+ unsigned long end;
+ enum vmw_bo_dirty_method method;
+ unsigned int change_count;
+ unsigned int ref_count;
+ unsigned long bitmap_size;
+ size_t size;
+ unsigned long bitmap[0];
+};
+
+/**
+ * vmw_bo_dirty_scan_pagetable - Perform a pagetable scan for dirty bits
+ * @vbo: The buffer object to scan
+ *
+ * Scans the pagetable for dirty bits. Clear those bits and modify the
+ * dirty structure with the results. This function may change the
+ * dirty-tracking method.
+ */
+static void vmw_bo_dirty_scan_pagetable(struct vmw_buffer_object *vbo)
+{
+ struct vmw_bo_dirty *dirty = vbo->dirty;
+ pgoff_t offset = drm_vma_node_start(&vbo->base.base.vma_node);
+ struct address_space *mapping = vbo->base.bdev->dev_mapping;
+ pgoff_t num_marked;
+
+ num_marked = clean_record_shared_mapping_range
+ (mapping,
+ offset, dirty->bitmap_size,
+ offset, &dirty->bitmap[0],
+ &dirty->start, &dirty->end);
+ if (num_marked == 0)
+ dirty->change_count++;
+ else
+ dirty->change_count = 0;
+
+ if (dirty->change_count > VMW_DIRTY_NUM_CHANGE_TRIGGERS) {
+ dirty->change_count = 0;
+ dirty->method = VMW_BO_DIRTY_MKWRITE;
+ wp_shared_mapping_range(mapping,
+ offset, dirty->bitmap_size);
+ clean_record_shared_mapping_range(mapping,
+ offset, dirty->bitmap_size,
+ offset, &dirty->bitmap[0],
+ &dirty->start, &dirty->end);
+ }
+}
+
+/**
+ * vmw_bo_dirty_scan_mkwrite - Reset the mkwrite dirty-tracking method
+ * @vbo: The buffer object to scan
+ *
+ * Write-protect pages written to so that consecutive write accesses will
+ * trigger a call to mkwrite.
+ *
+ * This function may change the dirty-tracking method.
+ */
+static void vmw_bo_dirty_scan_mkwrite(struct vmw_buffer_object *vbo)
+{
+ struct vmw_bo_dirty *dirty = vbo->dirty;
+ unsigned long offset = drm_vma_node_start(&vbo->base.base.vma_node);
+ struct address_space *mapping = vbo->base.bdev->dev_mapping;
+ pgoff_t num_marked;
+
+ if (dirty->end <= dirty->start)
+ return;
+
+ num_marked = wp_shared_mapping_range(vbo->base.bdev->dev_mapping,
+ dirty->start + offset,
+ dirty->end - dirty->start);
+
+ if (100UL * num_marked / dirty->bitmap_size >
+ VMW_DIRTY_PERCENTAGE) {
+ dirty->change_count++;
+ } else {
+ dirty->change_count = 0;
+ }
+
+ if (dirty->change_count > VMW_DIRTY_NUM_CHANGE_TRIGGERS) {
+ pgoff_t start = 0;
+ pgoff_t end = dirty->bitmap_size;
+
+ dirty->method = VMW_BO_DIRTY_PAGETABLE;
+ clean_record_shared_mapping_range(mapping, offset, end, offset,
+ &dirty->bitmap[0],
+ &start, &end);
+ bitmap_clear(&dirty->bitmap[0], 0, dirty->bitmap_size);
+ if (dirty->start < dirty->end)
+ bitmap_set(&dirty->bitmap[0], dirty->start,
+ dirty->end - dirty->start);
+ dirty->change_count = 0;
+ }
+}
+
+/**
+ * vmw_bo_dirty_scan - Scan for dirty pages and add them to the dirty
+ * tracking structure
+ * @vbo: The buffer object to scan
+ *
+ * This function may change the dirty tracking method.
+ */
+void vmw_bo_dirty_scan(struct vmw_buffer_object *vbo)
+{
+ struct vmw_bo_dirty *dirty = vbo->dirty;
+
+ if (dirty->method == VMW_BO_DIRTY_PAGETABLE)
+ vmw_bo_dirty_scan_pagetable(vbo);
+ else
+ vmw_bo_dirty_scan_mkwrite(vbo);
+}
+
+/**
+ * vmw_bo_dirty_pre_unmap - write-protect and pick up dirty pages before
+ * an unmap_mapping_range operation.
+ * @vbo: The buffer object,
+ * @start: First page of the range within the buffer object.
+ * @end: Last page of the range within the buffer object + 1.
+ *
+ * If we're using the _PAGETABLE scan method, we may leak dirty pages
+ * when calling unmap_mapping_range(). This function makes sure we pick
+ * up all dirty pages.
+ */
+static void vmw_bo_dirty_pre_unmap(struct vmw_buffer_object *vbo,
+ pgoff_t start, pgoff_t end)
+{
+ struct vmw_bo_dirty *dirty = vbo->dirty;
+ unsigned long offset = drm_vma_node_start(&vbo->base.base.vma_node);
+ struct address_space *mapping = vbo->base.bdev->dev_mapping;
+
+ if (dirty->method != VMW_BO_DIRTY_PAGETABLE || start >= end)
+ return;
+
+ wp_shared_mapping_range(mapping, start + offset, end - start);
+ clean_record_shared_mapping_range(mapping, start + offset,
+ end - start, offset,
+ &dirty->bitmap[0], &dirty->start,
+ &dirty->end);
+}
+
+/**
+ * vmw_bo_dirty_unmap - Clear all ptes pointing to a range within a bo
+ * @vbo: The buffer object,
+ * @start: First page of the range within the buffer object.
+ * @end: Last page of the range within the buffer object + 1.
+ *
+ * This is similar to ttm_bo_unmap_virtual_locked() except it takes a subrange.
+ */
+void vmw_bo_dirty_unmap(struct vmw_buffer_object *vbo,
+ pgoff_t start, pgoff_t end)
+{
+ unsigned long offset = drm_vma_node_start(&vbo->base.base.vma_node);
+ struct address_space *mapping = vbo->base.bdev->dev_mapping;
+
+ vmw_bo_dirty_pre_unmap(vbo, start, end);
+ unmap_shared_mapping_range(mapping, (offset + start) << PAGE_SHIFT,
+ (loff_t) (end - start) << PAGE_SHIFT);
+}
+
+/**
+ * vmw_bo_dirty_add - Add a dirty-tracking user to a buffer object
+ * @vbo: The buffer object
+ *
+ * This function registers a dirty-tracking user to a buffer object.
+ * A user can be for example a resource or a vma in a special user-space
+ * mapping.
+ *
+ * Return: Zero on success, -ENOMEM on memory allocation failure.
+ */
+int vmw_bo_dirty_add(struct vmw_buffer_object *vbo)
+{
+ struct vmw_bo_dirty *dirty = vbo->dirty;
+ pgoff_t num_pages = vbo->base.num_pages;
+ size_t size, acc_size;
+ int ret;
+ static struct ttm_operation_ctx ctx = {
+ .interruptible = false,
+ .no_wait_gpu = false
+ };
+
+ if (dirty) {
+ dirty->ref_count++;
+ return 0;
+ }
+
+ size = sizeof(*dirty) + BITS_TO_LONGS(num_pages) * sizeof(long);
+ acc_size = ttm_round_pot(size);
+ ret = ttm_mem_global_alloc(&ttm_mem_glob, acc_size, &ctx);
+ if (ret) {
+ VMW_DEBUG_USER("Out of graphics memory for buffer object "
+ "dirty tracker.\n");
+ return ret;
+ }
+ dirty = kvzalloc(size, GFP_KERNEL);
+ if (!dirty) {
+ ret = -ENOMEM;
+ goto out_no_dirty;
+ }
+
+ dirty->size = acc_size;
+ dirty->bitmap_size = num_pages;
+ dirty->start = dirty->bitmap_size;
+ dirty->end = 0;
+ dirty->ref_count = 1;
+ if (num_pages < PAGE_SIZE / sizeof(pte_t)) {
+ dirty->method = VMW_BO_DIRTY_PAGETABLE;
+ } else {
+ struct address_space *mapping = vbo->base.bdev->dev_mapping;
+ pgoff_t offset = drm_vma_node_start(&vbo->base.base.vma_node);
+
+ dirty->method = VMW_BO_DIRTY_MKWRITE;
+
+ /* Write-protect and then pick up already dirty bits */
+ wp_shared_mapping_range(mapping, offset, num_pages);
+ clean_record_shared_mapping_range(mapping, offset, num_pages,
+ offset,
+ &dirty->bitmap[0],
+ &dirty->start, &dirty->end);
+ }
+
+ vbo->dirty = dirty;
+
+ return 0;
+
+out_no_dirty:
+ ttm_mem_global_free(&ttm_mem_glob, acc_size);
+ return ret;
+}
+
+/**
+ * vmw_bo_dirty_release - Release a dirty-tracking user from a buffer object
+ * @vbo: The buffer object
+ *
+ * This function releases a dirty-tracking user from a buffer object.
+ * If the reference count reaches zero, then the dirty-tracking object is
+ * freed and the pointer to it cleared.
+ *
+ * Return: Zero on success, -ENOMEM on memory allocation failure.
+ */
+void vmw_bo_dirty_release(struct vmw_buffer_object *vbo)
+{
+ struct vmw_bo_dirty *dirty = vbo->dirty;
+
+ if (dirty && --dirty->ref_count == 0) {
+ size_t acc_size = dirty->size;
+
+ kvfree(dirty);
+ ttm_mem_global_free(&ttm_mem_glob, acc_size);
+ vbo->dirty = NULL;
+ }
+}
+
+/**
+ * vmw_bo_dirty_transfer_to_res - Pick up a resource's dirty region from
+ * its backing mob.
+ * @res: The resource
+ *
+ * This function will pick up all dirty ranges affecting the resource from
+ * it's backup mob, and call vmw_resource_dirty_update() once for each
+ * range. The transferred ranges will be cleared from the backing mob's
+ * dirty tracking.
+ */
+void vmw_bo_dirty_transfer_to_res(struct vmw_resource *res)
+{
+ struct vmw_buffer_object *vbo = res->backup;
+ struct vmw_bo_dirty *dirty = vbo->dirty;
+ pgoff_t start, cur, end;
+ unsigned long res_start = res->backup_offset;
+ unsigned long res_end = res->backup_offset + res->backup_size;
+
+ WARN_ON_ONCE(res_start & ~PAGE_MASK);
+ res_start >>= PAGE_SHIFT;
+ res_end = DIV_ROUND_UP(res_end, PAGE_SIZE);
+
+ if (res_start >= dirty->end || res_end <= dirty->start)
+ return;
+
+ cur = max(res_start, dirty->start);
+ res_end = max(res_end, dirty->end);
+ while (cur < res_end) {
+ unsigned long num;
+
+ start = find_next_bit(&dirty->bitmap[0], res_end, cur);
+ if (start >= res_end)
+ break;
+
+ end = find_next_zero_bit(&dirty->bitmap[0], res_end, start + 1);
+ cur = end + 1;
+ num = end - start;
+ bitmap_clear(&dirty->bitmap[0], start, num);
+ vmw_resource_dirty_update(res, start, end);
+ }
+
+ if (res_start <= dirty->start && res_end > dirty->start)
+ dirty->start = res_end;
+ if (res_start < dirty->end && res_end >= dirty->end)
+ dirty->end = res_start;
+}
+
+/**
+ * vmw_bo_dirty_clear_res - Clear a resource's dirty region from
+ * its backing mob.
+ * @res: The resource
+ *
+ * This function will clear all dirty ranges affecting the resource from
+ * it's backup mob's dirty tracking.
+ */
+void vmw_bo_dirty_clear_res(struct vmw_resource *res)
+{
+ unsigned long res_start = res->backup_offset;
+ unsigned long res_end = res->backup_offset + res->backup_size;
+ struct vmw_buffer_object *vbo = res->backup;
+ struct vmw_bo_dirty *dirty = vbo->dirty;
+
+ res_start >>= PAGE_SHIFT;
+ res_end = DIV_ROUND_UP(res_end, PAGE_SIZE);
+
+ if (res_start >= dirty->end || res_end <= dirty->start)
+ return;
+
+ res_start = max(res_start, dirty->start);
+ res_end = min(res_end, dirty->end);
+ bitmap_clear(&dirty->bitmap[0], res_start, res_end - res_start);
+
+ if (res_start <= dirty->start && res_end > dirty->start)
+ dirty->start = res_end;
+ if (res_start < dirty->end && res_end >= dirty->end)
+ dirty->end = res_start;
+}
+
+vm_fault_t vmw_bo_vm_mkwrite(struct vm_fault *vmf)
+{
+ struct vm_area_struct *vma = vmf->vma;
+ struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
+ vma->vm_private_data;
+ vm_fault_t ret;
+ unsigned long page_offset;
+ unsigned int save_flags;
+ struct vmw_buffer_object *vbo =
+ container_of(bo, typeof(*vbo), base);
+
+ /*
+ * mkwrite() doesn't handle the VM_FAULT_RETRY return value correctly.
+ * So make sure the TTM helpers are aware.
+ */
+ save_flags = vmf->flags;
+ vmf->flags &= ~FAULT_FLAG_ALLOW_RETRY;
+ ret = ttm_bo_vm_reserve(bo, vmf);
+ vmf->flags = save_flags;
+ if (ret)
+ return ret;
+
+ page_offset = vmf->pgoff - drm_vma_node_start(&bo->base.vma_node);
+ if (unlikely(page_offset >= bo->num_pages)) {
+ ret = VM_FAULT_SIGBUS;
+ goto out_unlock;
+ }
+
+ if (vbo->dirty && vbo->dirty->method == VMW_BO_DIRTY_MKWRITE &&
+ !test_bit(page_offset, &vbo->dirty->bitmap[0])) {
+ struct vmw_bo_dirty *dirty = vbo->dirty;
+
+ __set_bit(page_offset, &dirty->bitmap[0]);
+ dirty->start = min(dirty->start, page_offset);
+ dirty->end = max(dirty->end, page_offset + 1);
+ }
+
+out_unlock:
+ dma_resv_unlock(bo->base.resv);
+ return ret;
+}
+
+vm_fault_t vmw_bo_vm_fault(struct vm_fault *vmf)
+{
+ struct vm_area_struct *vma = vmf->vma;
+ struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
+ vma->vm_private_data;
+ struct vmw_buffer_object *vbo =
+ container_of(bo, struct vmw_buffer_object, base);
+ pgoff_t num_prefault;
+ pgprot_t prot;
+ vm_fault_t ret;
+
+ ret = ttm_bo_vm_reserve(bo, vmf);
+ if (ret)
+ return ret;
+
+ num_prefault = (vma->vm_flags & VM_RAND_READ) ? 1 :
+ TTM_BO_VM_NUM_PREFAULT;
+
+ if (vbo->dirty) {
+ pgoff_t allowed_prefault;
+ unsigned long page_offset;
+
+ page_offset = vmf->pgoff -
+ drm_vma_node_start(&bo->base.vma_node);
+ if (page_offset >= bo->num_pages ||
+ vmw_resources_clean(vbo, page_offset,
+ page_offset + PAGE_SIZE,
+ &allowed_prefault)) {
+ ret = VM_FAULT_SIGBUS;
+ goto out_unlock;
+ }
+
+ num_prefault = min(num_prefault, allowed_prefault);
+ }
+
+ /*
+ * If we don't track dirty using the MKWRITE method, make sure
+ * sure the page protection is write-enabled so we don't get
+ * a lot of unnecessary write faults.
+ */
+ if (vbo->dirty && vbo->dirty->method == VMW_BO_DIRTY_MKWRITE)
+ prot = vma->vm_page_prot;
+ else
+ prot = vm_get_page_prot(vma->vm_flags);
+
+ ret = ttm_bo_vm_fault_reserved(vmf, prot, num_prefault);
+ if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
+ return ret;
+
+out_unlock:
+ dma_resv_unlock(bo->base.resv);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 5581a7826b4c..c8441030637a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -40,11 +40,24 @@
void vmw_resource_mob_attach(struct vmw_resource *res)
{
struct vmw_buffer_object *backup = res->backup;
+ struct rb_node **new = &backup->res_tree.rb_node, *parent = NULL;
dma_resv_assert_held(res->backup->base.base.resv);
res->used_prio = (res->res_dirty) ? res->func->dirty_prio :
res->func->prio;
- list_add_tail(&res->mob_head, &backup->res_list);
+
+ while (*new) {
+ struct vmw_resource *this =
+ container_of(*new, struct vmw_resource, mob_node);
+
+ parent = *new;
+ new = (res->backup_offset < this->backup_offset) ?
+ &((*new)->rb_left) : &((*new)->rb_right);
+ }
+
+ rb_link_node(&res->mob_node, parent, new);
+ rb_insert_color(&res->mob_node, &backup->res_tree);
+
vmw_bo_prio_add(backup, res->used_prio);
}
@@ -58,7 +71,8 @@ void vmw_resource_mob_detach(struct vmw_resource *res)
dma_resv_assert_held(backup->base.base.resv);
if (vmw_resource_mob_attached(res)) {
- list_del_init(&res->mob_head);
+ rb_erase(&res->mob_node, &backup->res_tree);
+ RB_CLEAR_NODE(&res->mob_node);
vmw_bo_prio_del(backup, res->used_prio);
}
}
@@ -119,6 +133,10 @@ static void vmw_resource_release(struct kref *kref)
}
res->backup_dirty = false;
vmw_resource_mob_detach(res);
+ if (res->dirty)
+ res->func->dirty_free(res);
+ if (res->coherent)
+ vmw_bo_dirty_release(res->backup);
ttm_bo_unreserve(bo);
vmw_bo_unreference(&res->backup);
}
@@ -200,15 +218,17 @@ int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
res->res_free = res_free;
res->dev_priv = dev_priv;
res->func = func;
+ RB_CLEAR_NODE(&res->mob_node);
INIT_LIST_HEAD(&res->lru_head);
- INIT_LIST_HEAD(&res->mob_head);
INIT_LIST_HEAD(&res->binding_head);
res->id = -1;
res->backup = NULL;
res->backup_offset = 0;
res->backup_dirty = false;
res->res_dirty = false;
+ res->coherent = false;
res->used_prio = 3;
+ res->dirty = NULL;
if (delay_id)
return 0;
else
@@ -373,7 +393,8 @@ out_no_bo:
* should be retried once resources have been freed up.
*/
static int vmw_resource_do_validate(struct vmw_resource *res,
- struct ttm_validate_buffer *val_buf)
+ struct ttm_validate_buffer *val_buf,
+ bool dirtying)
{
int ret = 0;
const struct vmw_res_func *func = res->func;
@@ -395,6 +416,39 @@ static int vmw_resource_do_validate(struct vmw_resource *res,
vmw_resource_mob_attach(res);
}
+ /*
+ * Handle the case where the backup mob is marked coherent but
+ * the resource isn't.
+ */
+ if (func->dirty_alloc && vmw_resource_mob_attached(res) &&
+ !res->coherent) {
+ if (res->backup->dirty && !res->dirty) {
+ ret = func->dirty_alloc(res);
+ if (ret)
+ return ret;
+ } else if (!res->backup->dirty && res->dirty) {
+ func->dirty_free(res);
+ }
+ }
+
+ /*
+ * Transfer the dirty regions to the resource and update
+ * the resource.
+ */
+ if (res->dirty) {
+ if (dirtying && !res->res_dirty) {
+ pgoff_t start = res->backup_offset >> PAGE_SHIFT;
+ pgoff_t end = __KERNEL_DIV_ROUND_UP
+ (res->backup_offset + res->backup_size,
+ PAGE_SIZE);
+
+ vmw_bo_dirty_unmap(res->backup, start, end);
+ }
+
+ vmw_bo_dirty_transfer_to_res(res);
+ return func->dirty_sync(res);
+ }
+
return 0;
out_bind_failed:
@@ -433,16 +487,28 @@ void vmw_resource_unreserve(struct vmw_resource *res,
if (switch_backup && new_backup != res->backup) {
if (res->backup) {
vmw_resource_mob_detach(res);
+ if (res->coherent)
+ vmw_bo_dirty_release(res->backup);
vmw_bo_unreference(&res->backup);
}
if (new_backup) {
res->backup = vmw_bo_reference(new_backup);
+
+ /*
+ * The validation code should already have added a
+ * dirty tracker here.
+ */
+ WARN_ON(res->coherent && !new_backup->dirty);
+
vmw_resource_mob_attach(res);
} else {
res->backup = NULL;
}
+ } else if (switch_backup && res->coherent) {
+ vmw_bo_dirty_release(res->backup);
}
+
if (switch_backup)
res->backup_offset = new_backup_offset;
@@ -492,8 +558,7 @@ vmw_resource_check_buffer(struct ww_acquire_ctx *ticket,
val_buf->bo = &res->backup->base;
val_buf->num_shared = 0;
list_add_tail(&val_buf->head, &val_list);
- ret = ttm_eu_reserve_buffers(ticket, &val_list, interruptible, NULL,
- true);
+ ret = ttm_eu_reserve_buffers(ticket, &val_list, interruptible, NULL);
if (unlikely(ret != 0))
goto out_no_reserve;
@@ -623,6 +688,7 @@ out_no_unbind:
* to the device.
* @res: The resource to make visible to the device.
* @intr: Perform waits interruptible if possible.
+ * @dirtying: Pending GPU operation will dirty the resource
*
* On succesful return, any backup DMA buffer pointed to by @res->backup will
* be reserved and validated.
@@ -632,7 +698,8 @@ out_no_unbind:
* Return: Zero on success, -ERESTARTSYS if interrupted, negative error code
* on failure.
*/
-int vmw_resource_validate(struct vmw_resource *res, bool intr)
+int vmw_resource_validate(struct vmw_resource *res, bool intr,
+ bool dirtying)
{
int ret;
struct vmw_resource *evict_res;
@@ -649,7 +716,7 @@ int vmw_resource_validate(struct vmw_resource *res, bool intr)
if (res->backup)
val_buf.bo = &res->backup->base;
do {
- ret = vmw_resource_do_validate(res, &val_buf);
+ ret = vmw_resource_do_validate(res, &val_buf, dirtying);
if (likely(ret != -EBUSY))
break;
@@ -712,19 +779,20 @@ out_no_validate:
*/
void vmw_resource_unbind_list(struct vmw_buffer_object *vbo)
{
-
- struct vmw_resource *res, *next;
struct ttm_validate_buffer val_buf = {
.bo = &vbo->base,
.num_shared = 0
};
dma_resv_assert_held(vbo->base.base.resv);
- list_for_each_entry_safe(res, next, &vbo->res_list, mob_head) {
- if (!res->func->unbind)
- continue;
+ while (!RB_EMPTY_ROOT(&vbo->res_tree)) {
+ struct rb_node *node = vbo->res_tree.rb_node;
+ struct vmw_resource *res =
+ container_of(node, struct vmw_resource, mob_node);
+
+ if (!WARN_ON_ONCE(!res->func->unbind))
+ (void) res->func->unbind(res, res->res_dirty, &val_buf);
- (void) res->func->unbind(res, res->res_dirty, &val_buf);
res->backup_dirty = true;
res->res_dirty = false;
vmw_resource_mob_detach(res);
@@ -948,7 +1016,7 @@ int vmw_resource_pin(struct vmw_resource *res, bool interruptible)
/* Do we really need to pin the MOB as well? */
vmw_bo_pin_reserved(vbo, true);
}
- ret = vmw_resource_validate(res, interruptible);
+ ret = vmw_resource_validate(res, interruptible, true);
if (vbo)
ttm_bo_unreserve(&vbo->base);
if (ret)
@@ -1008,3 +1076,101 @@ enum vmw_res_type vmw_res_type(const struct vmw_resource *res)
{
return res->func->res_type;
}
+
+/**
+ * vmw_resource_update_dirty - Update a resource's dirty tracker with a
+ * sequential range of touched backing store memory.
+ * @res: The resource.
+ * @start: The first page touched.
+ * @end: The last page touched + 1.
+ */
+void vmw_resource_dirty_update(struct vmw_resource *res, pgoff_t start,
+ pgoff_t end)
+{
+ if (res->dirty)
+ res->func->dirty_range_add(res, start << PAGE_SHIFT,
+ end << PAGE_SHIFT);
+}
+
+/**
+ * vmw_resources_clean - Clean resources intersecting a mob range
+ * @vbo: The mob buffer object
+ * @start: The mob page offset starting the range
+ * @end: The mob page offset ending the range
+ * @num_prefault: Returns how many pages including the first have been
+ * cleaned and are ok to prefault
+ */
+int vmw_resources_clean(struct vmw_buffer_object *vbo, pgoff_t start,
+ pgoff_t end, pgoff_t *num_prefault)
+{
+ struct rb_node *cur = vbo->res_tree.rb_node;
+ struct vmw_resource *found = NULL;
+ unsigned long res_start = start << PAGE_SHIFT;
+ unsigned long res_end = end << PAGE_SHIFT;
+ unsigned long last_cleaned = 0;
+
+ /*
+ * Find the resource with lowest backup_offset that intersects the
+ * range.
+ */
+ while (cur) {
+ struct vmw_resource *cur_res =
+ container_of(cur, struct vmw_resource, mob_node);
+
+ if (cur_res->backup_offset >= res_end) {
+ cur = cur->rb_left;
+ } else if (cur_res->backup_offset + cur_res->backup_size <=
+ res_start) {
+ cur = cur->rb_right;
+ } else {
+ found = cur_res;
+ cur = cur->rb_left;
+ /* Continue to look for resources with lower offsets */
+ }
+ }
+
+ /*
+ * In order of increasing backup_offset, clean dirty resorces
+ * intersecting the range.
+ */
+ while (found) {
+ if (found->res_dirty) {
+ int ret;
+
+ if (!found->func->clean)
+ return -EINVAL;
+
+ ret = found->func->clean(found);
+ if (ret)
+ return ret;
+
+ found->res_dirty = false;
+ }
+ last_cleaned = found->backup_offset + found->backup_size;
+ cur = rb_next(&found->mob_node);
+ if (!cur)
+ break;
+
+ found = container_of(cur, struct vmw_resource, mob_node);
+ if (found->backup_offset >= res_end)
+ break;
+ }
+
+ /*
+ * Set number of pages allowed prefaulting and fence the buffer object
+ */
+ *num_prefault = 1;
+ if (last_cleaned > res_start) {
+ struct ttm_buffer_object *bo = &vbo->base;
+
+ *num_prefault = __KERNEL_DIV_ROUND_UP(last_cleaned - res_start,
+ PAGE_SIZE);
+ vmw_bo_fence_single(bo, NULL);
+ if (bo->moving)
+ dma_fence_put(bo->moving);
+ bo->moving = dma_fence_get
+ (dma_resv_get_excl(bo->base.resv));
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h
index 984e588c62ca..3b7438b2d289 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h
@@ -71,6 +71,13 @@ struct vmw_user_resource_conv {
* @commit_notify: If the resource is a command buffer managed resource,
* callback to notify that a define or remove command
* has been committed to the device.
+ * @dirty_alloc: Allocate a dirty tracker. NULL if dirty-tracking is not
+ * supported.
+ * @dirty_free: Free the dirty tracker.
+ * @dirty_sync: Upload the dirty mob contents to the resource.
+ * @dirty_add_range: Add a sequential dirty range to the resource
+ * dirty tracker.
+ * @clean: Clean the resource.
*/
struct vmw_res_func {
enum vmw_res_type res_type;
@@ -90,6 +97,12 @@ struct vmw_res_func {
struct ttm_validate_buffer *val_buf);
void (*commit_notify)(struct vmw_resource *res,
enum vmw_cmdbuf_res_state state);
+ int (*dirty_alloc)(struct vmw_resource *res);
+ void (*dirty_free)(struct vmw_resource *res);
+ int (*dirty_sync)(struct vmw_resource *res);
+ void (*dirty_range_add)(struct vmw_resource *res, size_t start,
+ size_t end);
+ int (*clean)(struct vmw_resource *res);
};
/**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index 29d8794f0421..32b9131b2bae 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -68,6 +68,20 @@ struct vmw_surface_offset {
uint32_t bo_offset;
};
+/**
+ * vmw_surface_dirty - Surface dirty-tracker
+ * @cache: Cached layout information of the surface.
+ * @size: Accounting size for the struct vmw_surface_dirty.
+ * @num_subres: Number of subresources.
+ * @boxes: Array of SVGA3dBoxes indicating dirty regions. One per subresource.
+ */
+struct vmw_surface_dirty {
+ struct svga3dsurface_cache cache;
+ size_t size;
+ u32 num_subres;
+ SVGA3dBox boxes[0];
+};
+
static void vmw_user_surface_free(struct vmw_resource *res);
static struct vmw_resource *
vmw_user_surface_base_to_res(struct ttm_base_object *base);
@@ -96,6 +110,13 @@ vmw_gb_surface_reference_internal(struct drm_device *dev,
struct drm_vmw_gb_surface_ref_ext_rep *rep,
struct drm_file *file_priv);
+static void vmw_surface_dirty_free(struct vmw_resource *res);
+static int vmw_surface_dirty_alloc(struct vmw_resource *res);
+static int vmw_surface_dirty_sync(struct vmw_resource *res);
+static void vmw_surface_dirty_range_add(struct vmw_resource *res, size_t start,
+ size_t end);
+static int vmw_surface_clean(struct vmw_resource *res);
+
static const struct vmw_user_resource_conv user_surface_conv = {
.object_type = VMW_RES_SURFACE,
.base_obj_to_res = vmw_user_surface_base_to_res,
@@ -133,7 +154,12 @@ static const struct vmw_res_func vmw_gb_surface_func = {
.create = vmw_gb_surface_create,
.destroy = vmw_gb_surface_destroy,
.bind = vmw_gb_surface_bind,
- .unbind = vmw_gb_surface_unbind
+ .unbind = vmw_gb_surface_unbind,
+ .dirty_alloc = vmw_surface_dirty_alloc,
+ .dirty_free = vmw_surface_dirty_free,
+ .dirty_sync = vmw_surface_dirty_sync,
+ .dirty_range_add = vmw_surface_dirty_range_add,
+ .clean = vmw_surface_clean,
};
/**
@@ -336,7 +362,6 @@ static void vmw_hw_surface_destroy(struct vmw_resource *res)
{
struct vmw_private *dev_priv = res->dev_priv;
- struct vmw_surface *srf;
void *cmd;
if (res->func->destroy == vmw_gb_surface_destroy) {
@@ -360,7 +385,6 @@ static void vmw_hw_surface_destroy(struct vmw_resource *res)
*/
mutex_lock(&dev_priv->cmdbuf_mutex);
- srf = vmw_res_to_srf(res);
dev_priv->used_memory_size -= res->backup_size;
mutex_unlock(&dev_priv->cmdbuf_mutex);
}
@@ -641,6 +665,7 @@ static void vmw_user_surface_free(struct vmw_resource *res)
struct vmw_private *dev_priv = srf->res.dev_priv;
uint32_t size = user_srf->size;
+ WARN_ON_ONCE(res->dirty);
if (user_srf->master)
drm_master_put(&user_srf->master);
kfree(srf->offsets);
@@ -1168,10 +1193,16 @@ static int vmw_gb_surface_bind(struct vmw_resource *res,
cmd2->header.id = SVGA_3D_CMD_UPDATE_GB_SURFACE;
cmd2->header.size = sizeof(cmd2->body);
cmd2->body.sid = res->id;
- res->backup_dirty = false;
}
vmw_fifo_commit(dev_priv, submit_size);
+ if (res->backup->dirty && res->backup_dirty) {
+ /* We've just made a full upload. Cear dirty regions. */
+ vmw_bo_dirty_clear_res(res);
+ }
+
+ res->backup_dirty = false;
+
return 0;
}
@@ -1636,7 +1667,8 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
}
}
} else if (req->base.drm_surface_flags &
- drm_vmw_surface_flag_create_buffer)
+ (drm_vmw_surface_flag_create_buffer |
+ drm_vmw_surface_flag_coherent))
ret = vmw_user_bo_alloc(dev_priv, tfile,
res->backup_size,
req->base.drm_surface_flags &
@@ -1650,6 +1682,26 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
goto out_unlock;
}
+ if (req->base.drm_surface_flags & drm_vmw_surface_flag_coherent) {
+ struct vmw_buffer_object *backup = res->backup;
+
+ ttm_bo_reserve(&backup->base, false, false, NULL);
+ if (!res->func->dirty_alloc)
+ ret = -EINVAL;
+ if (!ret)
+ ret = vmw_bo_dirty_add(backup);
+ if (!ret) {
+ res->coherent = true;
+ ret = res->func->dirty_alloc(res);
+ }
+ ttm_bo_unreserve(&backup->base);
+ if (ret) {
+ vmw_resource_unreference(&res);
+ goto out_unlock;
+ }
+
+ }
+
tmp = vmw_resource_reference(res);
ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime,
req->base.drm_surface_flags &
@@ -1758,3 +1810,338 @@ out_bad_resource:
return ret;
}
+
+/**
+ * vmw_subres_dirty_add - Add a dirty region to a subresource
+ * @dirty: The surfaces's dirty tracker.
+ * @loc_start: The location corresponding to the start of the region.
+ * @loc_end: The location corresponding to the end of the region.
+ *
+ * As we are assuming that @loc_start and @loc_end represent a sequential
+ * range of backing store memory, if the region spans multiple lines then
+ * regardless of the x coordinate, the full lines are dirtied.
+ * Correspondingly if the region spans multiple z slices, then full rather
+ * than partial z slices are dirtied.
+ */
+static void vmw_subres_dirty_add(struct vmw_surface_dirty *dirty,
+ const struct svga3dsurface_loc *loc_start,
+ const struct svga3dsurface_loc *loc_end)
+{
+ const struct svga3dsurface_cache *cache = &dirty->cache;
+ SVGA3dBox *box = &dirty->boxes[loc_start->sub_resource];
+ u32 mip = loc_start->sub_resource % cache->num_mip_levels;
+ const struct drm_vmw_size *size = &cache->mip[mip].size;
+ u32 box_c2 = box->z + box->d;
+
+ if (WARN_ON(loc_start->sub_resource >= dirty->num_subres))
+ return;
+
+ if (box->d == 0 || box->z > loc_start->z)
+ box->z = loc_start->z;
+ if (box_c2 < loc_end->z)
+ box->d = loc_end->z - box->z;
+
+ if (loc_start->z + 1 == loc_end->z) {
+ box_c2 = box->y + box->h;
+ if (box->h == 0 || box->y > loc_start->y)
+ box->y = loc_start->y;
+ if (box_c2 < loc_end->y)
+ box->h = loc_end->y - box->y;
+
+ if (loc_start->y + 1 == loc_end->y) {
+ box_c2 = box->x + box->w;
+ if (box->w == 0 || box->x > loc_start->x)
+ box->x = loc_start->x;
+ if (box_c2 < loc_end->x)
+ box->w = loc_end->x - box->x;
+ } else {
+ box->x = 0;
+ box->w = size->width;
+ }
+ } else {
+ box->y = 0;
+ box->h = size->height;
+ box->x = 0;
+ box->w = size->width;
+ }
+}
+
+/**
+ * vmw_subres_dirty_full - Mark a full subresource as dirty
+ * @dirty: The surface's dirty tracker.
+ * @subres: The subresource
+ */
+static void vmw_subres_dirty_full(struct vmw_surface_dirty *dirty, u32 subres)
+{
+ const struct svga3dsurface_cache *cache = &dirty->cache;
+ u32 mip = subres % cache->num_mip_levels;
+ const struct drm_vmw_size *size = &cache->mip[mip].size;
+ SVGA3dBox *box = &dirty->boxes[subres];
+
+ box->x = 0;
+ box->y = 0;
+ box->z = 0;
+ box->w = size->width;
+ box->h = size->height;
+ box->d = size->depth;
+}
+
+/*
+ * vmw_surface_tex_dirty_add_range - The dirty_add_range callback for texture
+ * surfaces.
+ */
+static void vmw_surface_tex_dirty_range_add(struct vmw_resource *res,
+ size_t start, size_t end)
+{
+ struct vmw_surface_dirty *dirty =
+ (struct vmw_surface_dirty *) res->dirty;
+ size_t backup_end = res->backup_offset + res->backup_size;
+ struct svga3dsurface_loc loc1, loc2;
+ const struct svga3dsurface_cache *cache;
+
+ start = max_t(size_t, start, res->backup_offset) - res->backup_offset;
+ end = min(end, backup_end) - res->backup_offset;
+ cache = &dirty->cache;
+ svga3dsurface_get_loc(cache, &loc1, start);
+ svga3dsurface_get_loc(cache, &loc2, end - 1);
+ svga3dsurface_inc_loc(cache, &loc2);
+
+ if (loc1.sub_resource + 1 == loc2.sub_resource) {
+ /* Dirty range covers a single sub-resource */
+ vmw_subres_dirty_add(dirty, &loc1, &loc2);
+ } else {
+ /* Dirty range covers multiple sub-resources */
+ struct svga3dsurface_loc loc_min, loc_max;
+ u32 sub_res;
+
+ svga3dsurface_max_loc(cache, loc1.sub_resource, &loc_max);
+ vmw_subres_dirty_add(dirty, &loc1, &loc_max);
+ svga3dsurface_min_loc(cache, loc2.sub_resource - 1, &loc_min);
+ vmw_subres_dirty_add(dirty, &loc_min, &loc2);
+ for (sub_res = loc1.sub_resource + 1;
+ sub_res < loc2.sub_resource - 1; ++sub_res)
+ vmw_subres_dirty_full(dirty, sub_res);
+ }
+}
+
+/*
+ * vmw_surface_tex_dirty_add_range - The dirty_add_range callback for buffer
+ * surfaces.
+ */
+static void vmw_surface_buf_dirty_range_add(struct vmw_resource *res,
+ size_t start, size_t end)
+{
+ struct vmw_surface_dirty *dirty =
+ (struct vmw_surface_dirty *) res->dirty;
+ const struct svga3dsurface_cache *cache = &dirty->cache;
+ size_t backup_end = res->backup_offset + cache->mip_chain_bytes;
+ SVGA3dBox *box = &dirty->boxes[0];
+ u32 box_c2;
+
+ box->h = box->d = 1;
+ start = max_t(size_t, start, res->backup_offset) - res->backup_offset;
+ end = min(end, backup_end) - res->backup_offset;
+ box_c2 = box->x + box->w;
+ if (box->w == 0 || box->x > start)
+ box->x = start;
+ if (box_c2 < end)
+ box->w = end - box->x;
+}
+
+/*
+ * vmw_surface_tex_dirty_add_range - The dirty_add_range callback for surfaces
+ */
+static void vmw_surface_dirty_range_add(struct vmw_resource *res, size_t start,
+ size_t end)
+{
+ struct vmw_surface *srf = vmw_res_to_srf(res);
+
+ if (WARN_ON(end <= res->backup_offset ||
+ start >= res->backup_offset + res->backup_size))
+ return;
+
+ if (srf->format == SVGA3D_BUFFER)
+ vmw_surface_buf_dirty_range_add(res, start, end);
+ else
+ vmw_surface_tex_dirty_range_add(res, start, end);
+}
+
+/*
+ * vmw_surface_dirty_sync - The surface's dirty_sync callback.
+ */
+static int vmw_surface_dirty_sync(struct vmw_resource *res)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+ bool has_dx = 0;
+ u32 i, num_dirty;
+ struct vmw_surface_dirty *dirty =
+ (struct vmw_surface_dirty *) res->dirty;
+ size_t alloc_size;
+ const struct svga3dsurface_cache *cache = &dirty->cache;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXUpdateSubResource body;
+ } *cmd1;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdUpdateGBImage body;
+ } *cmd2;
+ void *cmd;
+
+ num_dirty = 0;
+ for (i = 0; i < dirty->num_subres; ++i) {
+ const SVGA3dBox *box = &dirty->boxes[i];
+
+ if (box->d)
+ num_dirty++;
+ }
+
+ if (!num_dirty)
+ goto out;
+
+ alloc_size = num_dirty * ((has_dx) ? sizeof(*cmd1) : sizeof(*cmd2));
+ cmd = VMW_FIFO_RESERVE(dev_priv, alloc_size);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd1 = cmd;
+ cmd2 = cmd;
+
+ for (i = 0; i < dirty->num_subres; ++i) {
+ const SVGA3dBox *box = &dirty->boxes[i];
+
+ if (!box->d)
+ continue;
+
+ /*
+ * DX_UPDATE_SUBRESOURCE is aware of array surfaces.
+ * UPDATE_GB_IMAGE is not.
+ */
+ if (has_dx) {
+ cmd1->header.id = SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE;
+ cmd1->header.size = sizeof(cmd1->body);
+ cmd1->body.sid = res->id;
+ cmd1->body.subResource = i;
+ cmd1->body.box = *box;
+ cmd1++;
+ } else {
+ cmd2->header.id = SVGA_3D_CMD_UPDATE_GB_IMAGE;
+ cmd2->header.size = sizeof(cmd2->body);
+ cmd2->body.image.sid = res->id;
+ cmd2->body.image.face = i / cache->num_mip_levels;
+ cmd2->body.image.mipmap = i -
+ (cache->num_mip_levels * cmd2->body.image.face);
+ cmd2->body.box = *box;
+ cmd2++;
+ }
+
+ }
+ vmw_fifo_commit(dev_priv, alloc_size);
+ out:
+ memset(&dirty->boxes[0], 0, sizeof(dirty->boxes[0]) *
+ dirty->num_subres);
+
+ return 0;
+}
+
+/*
+ * vmw_surface_dirty_alloc - The surface's dirty_alloc callback.
+ */
+static int vmw_surface_dirty_alloc(struct vmw_resource *res)
+{
+ struct vmw_surface *srf = vmw_res_to_srf(res);
+ struct vmw_surface_dirty *dirty;
+ u32 num_layers = 1;
+ u32 num_mip;
+ u32 num_subres;
+ u32 num_samples;
+ size_t dirty_size, acc_size;
+ static struct ttm_operation_ctx ctx = {
+ .interruptible = false,
+ .no_wait_gpu = false
+ };
+ int ret;
+
+ if (srf->array_size)
+ num_layers = srf->array_size;
+ else if (srf->flags & SVGA3D_SURFACE_CUBEMAP)
+ num_layers *= SVGA3D_MAX_SURFACE_FACES;
+
+ num_mip = srf->mip_levels[0];
+ if (!num_mip)
+ num_mip = 1;
+
+ num_subres = num_layers * num_mip;
+ dirty_size = sizeof(*dirty) + num_subres * sizeof(dirty->boxes[0]);
+ acc_size = ttm_round_pot(dirty_size);
+ ret = ttm_mem_global_alloc(vmw_mem_glob(res->dev_priv),
+ acc_size, &ctx);
+ if (ret) {
+ VMW_DEBUG_USER("Out of graphics memory for surface "
+ "dirty tracker.\n");
+ return ret;
+ }
+
+ dirty = kvzalloc(dirty_size, GFP_KERNEL);
+ if (!dirty) {
+ ret = -ENOMEM;
+ goto out_no_dirty;
+ }
+
+ num_samples = max_t(u32, 1, srf->multisample_count);
+ ret = svga3dsurface_setup_cache(&srf->base_size, srf->format, num_mip,
+ num_layers, num_samples, &dirty->cache);
+ if (ret)
+ goto out_no_cache;
+
+ dirty->num_subres = num_subres;
+ dirty->size = acc_size;
+ res->dirty = (struct vmw_resource_dirty *) dirty;
+
+ return 0;
+
+out_no_cache:
+ kvfree(dirty);
+out_no_dirty:
+ ttm_mem_global_free(vmw_mem_glob(res->dev_priv), acc_size);
+ return ret;
+}
+
+/*
+ * vmw_surface_dirty_free - The surface's dirty_free callback
+ */
+static void vmw_surface_dirty_free(struct vmw_resource *res)
+{
+ struct vmw_surface_dirty *dirty =
+ (struct vmw_surface_dirty *) res->dirty;
+ size_t acc_size = dirty->size;
+
+ kvfree(dirty);
+ ttm_mem_global_free(vmw_mem_glob(res->dev_priv), acc_size);
+ res->dirty = NULL;
+}
+
+/*
+ * vmw_surface_clean - The surface's clean callback
+ */
+static int vmw_surface_clean(struct vmw_resource *res)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+ size_t alloc_size;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdReadbackGBSurface body;
+ } *cmd;
+
+ alloc_size = sizeof(*cmd);
+ cmd = VMW_FIFO_RESERVE(dev_priv, alloc_size);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->header.id = SVGA_3D_CMD_READBACK_GB_SURFACE;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.sid = res->id;
+ vmw_fifo_commit(dev_priv, alloc_size);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
index 5a7b8bb420de..ce288756531b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
@@ -29,10 +29,23 @@
int vmw_mmap(struct file *filp, struct vm_area_struct *vma)
{
+ static const struct vm_operations_struct vmw_vm_ops = {
+ .pfn_mkwrite = vmw_bo_vm_mkwrite,
+ .page_mkwrite = vmw_bo_vm_mkwrite,
+ .fault = vmw_bo_vm_fault,
+ .open = ttm_bo_vm_open,
+ .close = ttm_bo_vm_close
+ };
struct drm_file *file_priv = filp->private_data;
struct vmw_private *dev_priv = vmw_priv(file_priv->minor->dev);
+ int ret = ttm_bo_mmap(filp, vma, &dev_priv->bdev);
- return ttm_bo_mmap(filp, vma, &dev_priv->bdev);
+ if (ret)
+ return ret;
+
+ vma->vm_ops = &vmw_vm_ops;
+
+ return 0;
}
/* struct vmw_validation_mem callback */
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
index f611b2290a1b..e69bc373ae2e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
@@ -33,6 +33,8 @@
* struct vmw_validation_bo_node - Buffer object validation metadata.
* @base: Metadata used for TTM reservation- and validation.
* @hash: A hash entry used for the duplicate detection hash table.
+ * @coherent_count: If switching backup buffers, number of new coherent
+ * resources that will have this buffer as a backup buffer.
* @as_mob: Validate as mob.
* @cpu_blit: Validate for cpu blit access.
*
@@ -42,6 +44,7 @@
struct vmw_validation_bo_node {
struct ttm_validate_buffer base;
struct drm_hash_item hash;
+ unsigned int coherent_count;
u32 as_mob : 1;
u32 cpu_blit : 1;
};
@@ -459,6 +462,19 @@ int vmw_validation_res_reserve(struct vmw_validation_context *ctx,
if (ret)
goto out_unreserve;
}
+
+ if (val->switching_backup && val->new_backup &&
+ res->coherent) {
+ struct vmw_validation_bo_node *bo_node =
+ vmw_validation_find_bo_dup(ctx,
+ val->new_backup);
+
+ if (WARN_ON(!bo_node)) {
+ ret = -EINVAL;
+ goto out_unreserve;
+ }
+ bo_node->coherent_count++;
+ }
}
return 0;
@@ -521,6 +537,9 @@ int vmw_validation_bo_validate_single(struct ttm_buffer_object *bo,
};
int ret;
+ if (atomic_read(&vbo->cpu_writers))
+ return -EBUSY;
+
if (vbo->pin_count > 0)
return 0;
@@ -562,6 +581,9 @@ int vmw_validation_bo_validate(struct vmw_validation_context *ctx, bool intr)
int ret;
list_for_each_entry(entry, &ctx->bo_list, base.head) {
+ struct vmw_buffer_object *vbo =
+ container_of(entry->base.bo, typeof(*vbo), base);
+
if (entry->cpu_blit) {
struct ttm_operation_ctx ctx = {
.interruptible = intr,
@@ -576,6 +598,27 @@ int vmw_validation_bo_validate(struct vmw_validation_context *ctx, bool intr)
}
if (ret)
return ret;
+
+ /*
+ * Rather than having the resource code allocating the bo
+ * dirty tracker in resource_unreserve() where we can't fail,
+ * Do it here when validating the buffer object.
+ */
+ if (entry->coherent_count) {
+ unsigned int coherent_count = entry->coherent_count;
+
+ while (coherent_count) {
+ ret = vmw_bo_dirty_add(vbo);
+ if (ret)
+ return ret;
+
+ coherent_count--;
+ }
+ entry->coherent_count -= coherent_count;
+ }
+
+ if (vbo->dirty)
+ vmw_bo_dirty_scan(vbo);
}
return 0;
}
@@ -601,7 +644,8 @@ int vmw_validation_res_validate(struct vmw_validation_context *ctx, bool intr)
struct vmw_resource *res = val->res;
struct vmw_buffer_object *backup = res->backup;
- ret = vmw_resource_validate(res, intr);
+ ret = vmw_resource_validate(res, intr, val->dirty_set &&
+ val->dirty);
if (ret) {
if (ret != -ERESTARTSYS)
DRM_ERROR("Failed to validate resource.\n");
@@ -828,3 +872,34 @@ int vmw_validation_preload_res(struct vmw_validation_context *ctx,
ctx->mem_size_left += size;
return 0;
}
+
+/**
+ * vmw_validation_bo_backoff - Unreserve buffer objects registered with a
+ * validation context
+ * @ctx: The validation context
+ *
+ * This function unreserves the buffer objects previously reserved using
+ * vmw_validation_bo_reserve. It's typically used as part of an error path
+ */
+void vmw_validation_bo_backoff(struct vmw_validation_context *ctx)
+{
+ struct vmw_validation_bo_node *entry;
+
+ /*
+ * Switching coherent resource backup buffers failed.
+ * Release corresponding buffer object dirty trackers.
+ */
+ list_for_each_entry(entry, &ctx->bo_list, base.head) {
+ if (entry->coherent_count) {
+ unsigned int coherent_count = entry->coherent_count;
+ struct vmw_buffer_object *vbo =
+ container_of(entry->base.bo, typeof(*vbo),
+ base);
+
+ while (coherent_count--)
+ vmw_bo_dirty_release(vbo);
+ }
+ }
+
+ ttm_eu_backoff_reservation(&ctx->ticket, &ctx->bo_list);
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h
index 0e063743dd86..739906d1b3eb 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h
@@ -170,21 +170,7 @@ vmw_validation_bo_reserve(struct vmw_validation_context *ctx,
bool intr)
{
return ttm_eu_reserve_buffers(&ctx->ticket, &ctx->bo_list, intr,
- NULL, true);
-}
-
-/**
- * vmw_validation_bo_backoff - Unreserve buffer objects registered with a
- * validation context
- * @ctx: The validation context
- *
- * This function unreserves the buffer objects previously reserved using
- * vmw_validation_bo_reserve. It's typically used as part of an error path
- */
-static inline void
-vmw_validation_bo_backoff(struct vmw_validation_context *ctx)
-{
- ttm_eu_backoff_reservation(&ctx->ticket, &ctx->bo_list);
+ NULL);
}
/**
@@ -269,4 +255,6 @@ int vmw_validation_preload_res(struct vmw_validation_context *ctx,
unsigned int size);
void vmw_validation_res_set_dirty(struct vmw_validation_context *ctx,
void *val_private, u32 dirty);
+void vmw_validation_bo_backoff(struct vmw_validation_context *ctx);
+
#endif
diff --git a/drivers/gpu/drm/xen/xen_drm_front_kms.c b/drivers/gpu/drm/xen/xen_drm_front_kms.c
index 21ad1c359b61..ff506bc99414 100644
--- a/drivers/gpu/drm/xen/xen_drm_front_kms.c
+++ b/drivers/gpu/drm/xen/xen_drm_front_kms.c
@@ -270,11 +270,12 @@ static void display_update(struct drm_simple_display_pipe *pipe,
}
static enum drm_mode_status
-display_mode_valid(struct drm_crtc *crtc, const struct drm_display_mode *mode)
+display_mode_valid(struct drm_simple_display_pipe *pipe,
+ const struct drm_display_mode *mode)
{
struct xen_drm_front_drm_pipeline *pipeline =
- container_of(crtc, struct xen_drm_front_drm_pipeline,
- pipe.crtc);
+ container_of(pipe, struct xen_drm_front_drm_pipeline,
+ pipe);
if (mode->hdisplay != pipeline->width)
return MODE_ERROR;
diff --git a/drivers/gpu/host1x/Kconfig b/drivers/gpu/host1x/Kconfig
index cf987a317a55..6dab94adf25e 100644
--- a/drivers/gpu/host1x/Kconfig
+++ b/drivers/gpu/host1x/Kconfig
@@ -2,7 +2,7 @@
config TEGRA_HOST1X
tristate "NVIDIA Tegra host1x driver"
depends on ARCH_TEGRA || (ARM && COMPILE_TEST)
- select IOMMU_IOVA if IOMMU_SUPPORT
+ select IOMMU_IOVA
help
Driver for the NVIDIA Tegra host1x hardware.
diff --git a/drivers/gpu/host1x/bus.c b/drivers/gpu/host1x/bus.c
index 742aa9ff21b8..2c8559ff3481 100644
--- a/drivers/gpu/host1x/bus.c
+++ b/drivers/gpu/host1x/bus.c
@@ -445,7 +445,7 @@ static int host1x_device_add(struct host1x *host1x,
of_dma_configure(&device->dev, host1x->dev->of_node, true);
device->dev.dma_parms = &device->dma_parms;
- dma_set_max_seg_size(&device->dev, SZ_4M);
+ dma_set_max_seg_size(&device->dev, UINT_MAX);
err = host1x_device_parse_dt(device, driver);
if (err < 0) {
diff --git a/drivers/gpu/host1x/cdma.c b/drivers/gpu/host1x/cdma.c
index 48c84c48299c..e8d3fda91d8a 100644
--- a/drivers/gpu/host1x/cdma.c
+++ b/drivers/gpu/host1x/cdma.c
@@ -232,9 +232,9 @@ unsigned int host1x_cdma_wait_locked(struct host1x_cdma *cdma,
*
* Must be called with the cdma lock held.
*/
-int host1x_cdma_wait_pushbuffer_space(struct host1x *host1x,
- struct host1x_cdma *cdma,
- unsigned int needed)
+static int host1x_cdma_wait_pushbuffer_space(struct host1x *host1x,
+ struct host1x_cdma *cdma,
+ unsigned int needed)
{
while (true) {
struct push_buffer *pb = &cdma->push_buffer;
diff --git a/drivers/gpu/host1x/channel.c b/drivers/gpu/host1x/channel.c
index 1436295aa450..4cd212bb570d 100644
--- a/drivers/gpu/host1x/channel.c
+++ b/drivers/gpu/host1x/channel.c
@@ -115,14 +115,14 @@ static struct host1x_channel *acquire_unused_channel(struct host1x *host)
/**
* host1x_channel_request() - Allocate a channel
- * @device: Host1x unit this channel will be used to send commands to
+ * @client: Host1x client this channel will be used to send commands to
*
- * Allocates a new host1x channel for @device. May return NULL if CDMA
+ * Allocates a new host1x channel for @client. May return NULL if CDMA
* initialization fails.
*/
-struct host1x_channel *host1x_channel_request(struct device *dev)
+struct host1x_channel *host1x_channel_request(struct host1x_client *client)
{
- struct host1x *host = dev_get_drvdata(dev->parent);
+ struct host1x *host = dev_get_drvdata(client->dev->parent);
struct host1x_channel_list *chlist = &host->channel_list;
struct host1x_channel *channel;
int err;
@@ -133,7 +133,8 @@ struct host1x_channel *host1x_channel_request(struct device *dev)
kref_init(&channel->refcount);
mutex_init(&channel->submitlock);
- channel->dev = dev;
+ channel->client = client;
+ channel->dev = client->dev;
err = host1x_hw_channel_init(host, channel, channel->id);
if (err < 0)
@@ -148,7 +149,7 @@ struct host1x_channel *host1x_channel_request(struct device *dev)
fail:
clear_bit(channel->id, chlist->allocated_channels);
- dev_err(dev, "failed to initialize channel\n");
+ dev_err(client->dev, "failed to initialize channel\n");
return NULL;
}
diff --git a/drivers/gpu/host1x/channel.h b/drivers/gpu/host1x/channel.h
index 4fd694834f74..39044ff6c3aa 100644
--- a/drivers/gpu/host1x/channel.h
+++ b/drivers/gpu/host1x/channel.h
@@ -26,6 +26,7 @@ struct host1x_channel {
unsigned int id;
struct mutex submitlock;
void __iomem *regs;
+ struct host1x_client *client;
struct device *dev;
struct host1x_cdma cdma;
};
diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c
index 5a3f797240d4..a738ea55e407 100644
--- a/drivers/gpu/host1x/dev.c
+++ b/drivers/gpu/host1x/dev.c
@@ -18,10 +18,6 @@
#include <trace/events/host1x.h>
#undef CREATE_TRACE_POINTS
-#if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)
-#include <asm/dma-iommu.h>
-#endif
-
#include "bus.h"
#include "channel.h"
#include "debug.h"
@@ -77,6 +73,10 @@ static const struct host1x_info host1x01_info = {
.init = host1x01_init,
.sync_offset = 0x3000,
.dma_mask = DMA_BIT_MASK(32),
+ .has_wide_gather = false,
+ .has_hypervisor = false,
+ .num_sid_entries = 0,
+ .sid_table = NULL,
};
static const struct host1x_info host1x02_info = {
@@ -87,6 +87,10 @@ static const struct host1x_info host1x02_info = {
.init = host1x02_init,
.sync_offset = 0x3000,
.dma_mask = DMA_BIT_MASK(32),
+ .has_wide_gather = false,
+ .has_hypervisor = false,
+ .num_sid_entries = 0,
+ .sid_table = NULL,
};
static const struct host1x_info host1x04_info = {
@@ -97,6 +101,10 @@ static const struct host1x_info host1x04_info = {
.init = host1x04_init,
.sync_offset = 0x2100,
.dma_mask = DMA_BIT_MASK(34),
+ .has_wide_gather = false,
+ .has_hypervisor = false,
+ .num_sid_entries = 0,
+ .sid_table = NULL,
};
static const struct host1x_info host1x05_info = {
@@ -107,6 +115,10 @@ static const struct host1x_info host1x05_info = {
.init = host1x05_init,
.sync_offset = 0x2100,
.dma_mask = DMA_BIT_MASK(34),
+ .has_wide_gather = false,
+ .has_hypervisor = false,
+ .num_sid_entries = 0,
+ .sid_table = NULL,
};
static const struct host1x_sid_entry tegra186_sid_table[] = {
@@ -126,6 +138,7 @@ static const struct host1x_info host1x06_info = {
.init = host1x06_init,
.sync_offset = 0x0,
.dma_mask = DMA_BIT_MASK(40),
+ .has_wide_gather = true,
.has_hypervisor = true,
.num_sid_entries = ARRAY_SIZE(tegra186_sid_table),
.sid_table = tegra186_sid_table,
@@ -148,6 +161,7 @@ static const struct host1x_info host1x07_info = {
.init = host1x07_init,
.sync_offset = 0x0,
.dma_mask = DMA_BIT_MASK(40),
+ .has_wide_gather = true,
.has_hypervisor = true,
.num_sid_entries = ARRAY_SIZE(tegra194_sid_table),
.sid_table = tegra194_sid_table,
@@ -178,6 +192,117 @@ static void host1x_setup_sid_table(struct host1x *host)
}
}
+static struct iommu_domain *host1x_iommu_attach(struct host1x *host)
+{
+ struct iommu_domain *domain = iommu_get_domain_for_dev(host->dev);
+ int err;
+
+ /*
+ * If the host1x firewall is enabled, there's no need to enable IOMMU
+ * support. Similarly, if host1x is already attached to an IOMMU (via
+ * the DMA API), don't try to attach again.
+ */
+ if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL) || domain)
+ return domain;
+
+ host->group = iommu_group_get(host->dev);
+ if (host->group) {
+ struct iommu_domain_geometry *geometry;
+ dma_addr_t start, end;
+ unsigned long order;
+
+ err = iova_cache_get();
+ if (err < 0)
+ goto put_group;
+
+ host->domain = iommu_domain_alloc(&platform_bus_type);
+ if (!host->domain) {
+ err = -ENOMEM;
+ goto put_cache;
+ }
+
+ err = iommu_attach_group(host->domain, host->group);
+ if (err) {
+ if (err == -ENODEV)
+ err = 0;
+
+ goto free_domain;
+ }
+
+ geometry = &host->domain->geometry;
+ start = geometry->aperture_start & host->info->dma_mask;
+ end = geometry->aperture_end & host->info->dma_mask;
+
+ order = __ffs(host->domain->pgsize_bitmap);
+ init_iova_domain(&host->iova, 1UL << order, start >> order);
+ host->iova_end = end;
+
+ domain = host->domain;
+ }
+
+ return domain;
+
+free_domain:
+ iommu_domain_free(host->domain);
+ host->domain = NULL;
+put_cache:
+ iova_cache_put();
+put_group:
+ iommu_group_put(host->group);
+ host->group = NULL;
+
+ return ERR_PTR(err);
+}
+
+static int host1x_iommu_init(struct host1x *host)
+{
+ u64 mask = host->info->dma_mask;
+ struct iommu_domain *domain;
+ int err;
+
+ domain = host1x_iommu_attach(host);
+ if (IS_ERR(domain)) {
+ err = PTR_ERR(domain);
+ dev_err(host->dev, "failed to attach to IOMMU: %d\n", err);
+ return err;
+ }
+
+ /*
+ * If we're not behind an IOMMU make sure we don't get push buffers
+ * that are allocated outside of the range addressable by the GATHER
+ * opcode.
+ *
+ * Newer generations of Tegra (Tegra186 and later) support a wide
+ * variant of the GATHER opcode that allows addressing more bits.
+ */
+ if (!domain && !host->info->has_wide_gather)
+ mask = DMA_BIT_MASK(32);
+
+ err = dma_coerce_mask_and_coherent(host->dev, mask);
+ if (err < 0) {
+ dev_err(host->dev, "failed to set DMA mask: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static void host1x_iommu_exit(struct host1x *host)
+{
+ if (host->domain) {
+ put_iova_domain(&host->iova);
+ iommu_detach_group(host->domain, host->group);
+
+ iommu_domain_free(host->domain);
+ host->domain = NULL;
+
+ iova_cache_put();
+
+ iommu_group_put(host->group);
+ host->group = NULL;
+ }
+}
+
static int host1x_probe(struct platform_device *pdev)
{
struct host1x *host;
@@ -237,7 +362,8 @@ static int host1x_probe(struct platform_device *pdev)
return PTR_ERR(host->hv_regs);
}
- dma_set_mask_and_coherent(host->dev, host->info->dma_mask);
+ host->dev->dma_parms = &host->dma_parms;
+ dma_set_max_seg_size(host->dev, UINT_MAX);
if (host->info->init) {
err = host->info->init(host);
@@ -261,87 +387,42 @@ static int host1x_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "failed to get reset: %d\n", err);
return err;
}
-#if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)
- if (host->dev->archdata.mapping) {
- struct dma_iommu_mapping *mapping =
- to_dma_iommu_mapping(host->dev);
- arm_iommu_detach_device(host->dev);
- arm_iommu_release_mapping(mapping);
- }
-#endif
- if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL))
- goto skip_iommu;
-
- host->group = iommu_group_get(&pdev->dev);
- if (host->group) {
- struct iommu_domain_geometry *geometry;
- u64 mask = dma_get_mask(host->dev);
- dma_addr_t start, end;
- unsigned long order;
-
- err = iova_cache_get();
- if (err < 0)
- goto put_group;
-
- host->domain = iommu_domain_alloc(&platform_bus_type);
- if (!host->domain) {
- err = -ENOMEM;
- goto put_cache;
- }
- err = iommu_attach_group(host->domain, host->group);
- if (err) {
- if (err == -ENODEV) {
- iommu_domain_free(host->domain);
- host->domain = NULL;
- iova_cache_put();
- iommu_group_put(host->group);
- host->group = NULL;
- goto skip_iommu;
- }
-
- goto fail_free_domain;
- }
-
- geometry = &host->domain->geometry;
- start = geometry->aperture_start & mask;
- end = geometry->aperture_end & mask;
-
- order = __ffs(host->domain->pgsize_bitmap);
- init_iova_domain(&host->iova, 1UL << order, start >> order);
- host->iova_end = end;
+ err = host1x_iommu_init(host);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to setup IOMMU: %d\n", err);
+ return err;
}
-skip_iommu:
err = host1x_channel_list_init(&host->channel_list,
host->info->nb_channels);
if (err) {
dev_err(&pdev->dev, "failed to initialize channel list\n");
- goto fail_detach_device;
+ goto iommu_exit;
}
err = clk_prepare_enable(host->clk);
if (err < 0) {
dev_err(&pdev->dev, "failed to enable clock\n");
- goto fail_free_channels;
+ goto free_channels;
}
err = reset_control_deassert(host->rst);
if (err < 0) {
dev_err(&pdev->dev, "failed to deassert reset: %d\n", err);
- goto fail_unprepare_disable;
+ goto unprepare_disable;
}
err = host1x_syncpt_init(host);
if (err) {
dev_err(&pdev->dev, "failed to initialize syncpts\n");
- goto fail_reset_assert;
+ goto reset_assert;
}
err = host1x_intr_init(host, syncpt_irq);
if (err) {
dev_err(&pdev->dev, "failed to initialize interrupts\n");
- goto fail_deinit_syncpt;
+ goto deinit_syncpt;
}
host1x_debug_init(host);
@@ -351,33 +432,22 @@ skip_iommu:
err = host1x_register(host);
if (err < 0)
- goto fail_deinit_intr;
+ goto deinit_intr;
return 0;
-fail_deinit_intr:
+deinit_intr:
host1x_intr_deinit(host);
-fail_deinit_syncpt:
+deinit_syncpt:
host1x_syncpt_deinit(host);
-fail_reset_assert:
+reset_assert:
reset_control_assert(host->rst);
-fail_unprepare_disable:
+unprepare_disable:
clk_disable_unprepare(host->clk);
-fail_free_channels:
+free_channels:
host1x_channel_list_free(&host->channel_list);
-fail_detach_device:
- if (host->group && host->domain) {
- put_iova_domain(&host->iova);
- iommu_detach_group(host->domain, host->group);
- }
-fail_free_domain:
- if (host->domain)
- iommu_domain_free(host->domain);
-put_cache:
- if (host->group)
- iova_cache_put();
-put_group:
- iommu_group_put(host->group);
+iommu_exit:
+ host1x_iommu_exit(host);
return err;
}
@@ -387,18 +457,12 @@ static int host1x_remove(struct platform_device *pdev)
struct host1x *host = platform_get_drvdata(pdev);
host1x_unregister(host);
+ host1x_debug_deinit(host);
host1x_intr_deinit(host);
host1x_syncpt_deinit(host);
reset_control_assert(host->rst);
clk_disable_unprepare(host->clk);
-
- if (host->domain) {
- put_iova_domain(&host->iova);
- iommu_detach_group(host->domain, host->group);
- iommu_domain_free(host->domain);
- iova_cache_put();
- iommu_group_put(host->group);
- }
+ host1x_iommu_exit(host);
return 0;
}
diff --git a/drivers/gpu/host1x/dev.h b/drivers/gpu/host1x/dev.h
index ff56f5e23a02..f781a9b0f39d 100644
--- a/drivers/gpu/host1x/dev.h
+++ b/drivers/gpu/host1x/dev.h
@@ -97,6 +97,7 @@ struct host1x_info {
int (*init)(struct host1x *host1x); /* initialize per SoC ops */
unsigned int sync_offset; /* offset of syncpoint registers */
u64 dma_mask; /* mask of addressable memory */
+ bool has_wide_gather; /* supports GATHER_W opcode */
bool has_hypervisor; /* has hypervisor registers */
unsigned int num_sid_entries;
const struct host1x_sid_entry *sid_table;
@@ -140,6 +141,8 @@ struct host1x {
struct list_head devices;
struct list_head list;
+
+ struct device_dma_parameters dma_parms;
};
void host1x_hypervisor_writel(struct host1x *host1x, u32 r, u32 v);
diff --git a/drivers/gpu/host1x/intr.c b/drivers/gpu/host1x/intr.c
index 26f3c741d085..9245add23b5d 100644
--- a/drivers/gpu/host1x/intr.c
+++ b/drivers/gpu/host1x/intr.c
@@ -105,7 +105,6 @@ static void action_submit_complete(struct host1x_waitlist *waiter)
/* Add nr_completed to trace */
trace_host1x_channel_submit_complete(dev_name(channel->dev),
waiter->count, waiter->thresh);
-
}
static void action_wakeup(struct host1x_waitlist *waiter)
diff --git a/drivers/gpu/host1x/job.c b/drivers/gpu/host1x/job.c
index eaa5c3352c13..25ca54de8fc5 100644
--- a/drivers/gpu/host1x/job.c
+++ b/drivers/gpu/host1x/job.c
@@ -99,6 +99,8 @@ EXPORT_SYMBOL(host1x_job_add_gather);
static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
{
+ struct host1x_client *client = job->client;
+ struct device *dev = client->dev;
unsigned int i;
int err;
@@ -106,8 +108,8 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
for (i = 0; i < job->num_relocs; i++) {
struct host1x_reloc *reloc = &job->relocs[i];
+ dma_addr_t phys_addr, *phys;
struct sg_table *sgt;
- dma_addr_t phys_addr;
reloc->target.bo = host1x_bo_get(reloc->target.bo);
if (!reloc->target.bo) {
@@ -115,7 +117,50 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
goto unpin;
}
- phys_addr = host1x_bo_pin(reloc->target.bo, &sgt);
+ if (client->group)
+ phys = &phys_addr;
+ else
+ phys = NULL;
+
+ sgt = host1x_bo_pin(dev, reloc->target.bo, phys);
+ if (IS_ERR(sgt)) {
+ err = PTR_ERR(sgt);
+ goto unpin;
+ }
+
+ if (sgt) {
+ unsigned long mask = HOST1X_RELOC_READ |
+ HOST1X_RELOC_WRITE;
+ enum dma_data_direction dir;
+
+ switch (reloc->flags & mask) {
+ case HOST1X_RELOC_READ:
+ dir = DMA_TO_DEVICE;
+ break;
+
+ case HOST1X_RELOC_WRITE:
+ dir = DMA_FROM_DEVICE;
+ break;
+
+ case HOST1X_RELOC_READ | HOST1X_RELOC_WRITE:
+ dir = DMA_BIDIRECTIONAL;
+ break;
+
+ default:
+ err = -EINVAL;
+ goto unpin;
+ }
+
+ err = dma_map_sg(dev, sgt->sgl, sgt->nents, dir);
+ if (!err) {
+ err = -ENOMEM;
+ goto unpin;
+ }
+
+ job->unpins[job->num_unpins].dev = dev;
+ job->unpins[job->num_unpins].dir = dir;
+ phys_addr = sg_dma_address(sgt->sgl);
+ }
job->addr_phys[job->num_unpins] = phys_addr;
job->unpins[job->num_unpins].bo = reloc->target.bo;
@@ -139,7 +184,11 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
goto unpin;
}
- phys_addr = host1x_bo_pin(g->bo, &sgt);
+ sgt = host1x_bo_pin(host->dev, g->bo, NULL);
+ if (IS_ERR(sgt)) {
+ err = PTR_ERR(sgt);
+ goto unpin;
+ }
if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL) && host->domain) {
for_each_sg(sgt->sgl, sg, sgt->nents, j)
@@ -163,15 +212,24 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
goto unpin;
}
- job->addr_phys[job->num_unpins] =
- iova_dma_addr(&host->iova, alloc);
job->unpins[job->num_unpins].size = gather_size;
+ phys_addr = iova_dma_addr(&host->iova, alloc);
} else {
- job->addr_phys[job->num_unpins] = phys_addr;
+ err = dma_map_sg(host->dev, sgt->sgl, sgt->nents,
+ DMA_TO_DEVICE);
+ if (!err) {
+ err = -ENOMEM;
+ goto unpin;
+ }
+
+ job->unpins[job->num_unpins].dev = host->dev;
+ phys_addr = sg_dma_address(sgt->sgl);
}
- job->gather_addr_phys[i] = job->addr_phys[job->num_unpins];
+ job->addr_phys[job->num_unpins] = phys_addr;
+ job->gather_addr_phys[i] = phys_addr;
+ job->unpins[job->num_unpins].dir = DMA_TO_DEVICE;
job->unpins[job->num_unpins].bo = g->bo;
job->unpins[job->num_unpins].sgt = sgt;
job->num_unpins++;
@@ -436,7 +494,8 @@ out:
return err;
}
-static inline int copy_gathers(struct host1x_job *job, struct device *dev)
+static inline int copy_gathers(struct device *host, struct host1x_job *job,
+ struct device *dev)
{
struct host1x_firewall fw;
size_t size = 0;
@@ -459,12 +518,12 @@ static inline int copy_gathers(struct host1x_job *job, struct device *dev)
* Try a non-blocking allocation from a higher priority pools first,
* as awaiting for the allocation here is a major performance hit.
*/
- job->gather_copy_mapped = dma_alloc_wc(dev, size, &job->gather_copy,
+ job->gather_copy_mapped = dma_alloc_wc(host, size, &job->gather_copy,
GFP_NOWAIT);
/* the higher priority allocation failed, try the generic-blocking */
if (!job->gather_copy_mapped)
- job->gather_copy_mapped = dma_alloc_wc(dev, size,
+ job->gather_copy_mapped = dma_alloc_wc(host, size,
&job->gather_copy,
GFP_KERNEL);
if (!job->gather_copy_mapped)
@@ -512,7 +571,7 @@ int host1x_job_pin(struct host1x_job *job, struct device *dev)
goto out;
if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL)) {
- err = copy_gathers(job, dev);
+ err = copy_gathers(host->dev, job, dev);
if (err)
goto out;
}
@@ -557,6 +616,8 @@ void host1x_job_unpin(struct host1x_job *job)
for (i = 0; i < job->num_unpins; i++) {
struct host1x_job_unpin_data *unpin = &job->unpins[i];
+ struct device *dev = unpin->dev ?: host->dev;
+ struct sg_table *sgt = unpin->sgt;
if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL) &&
unpin->size && host->domain) {
@@ -566,14 +627,18 @@ void host1x_job_unpin(struct host1x_job *job)
iova_pfn(&host->iova, job->addr_phys[i]));
}
- host1x_bo_unpin(unpin->bo, unpin->sgt);
+ if (unpin->dev && sgt)
+ dma_unmap_sg(unpin->dev, sgt->sgl, sgt->nents,
+ unpin->dir);
+
+ host1x_bo_unpin(dev, unpin->bo, sgt);
host1x_bo_put(unpin->bo);
}
job->num_unpins = 0;
if (job->gather_copy_size)
- dma_free_wc(job->channel->dev, job->gather_copy_size,
+ dma_free_wc(host->dev, job->gather_copy_size,
job->gather_copy_mapped, job->gather_copy);
}
EXPORT_SYMBOL(host1x_job_unpin);
diff --git a/drivers/gpu/host1x/job.h b/drivers/gpu/host1x/job.h
index 62b8805e6b35..94bc2e4ae241 100644
--- a/drivers/gpu/host1x/job.h
+++ b/drivers/gpu/host1x/job.h
@@ -8,6 +8,8 @@
#ifndef __HOST1X_JOB_H
#define __HOST1X_JOB_H
+#include <linux/dma-direction.h>
+
struct host1x_job_gather {
unsigned int words;
dma_addr_t base;
@@ -19,7 +21,9 @@ struct host1x_job_gather {
struct host1x_job_unpin_data {
struct host1x_bo *bo;
struct sg_table *sgt;
+ struct device *dev;
size_t size;
+ enum dma_data_direction dir;
};
/*
diff --git a/drivers/greybus/connection.c b/drivers/greybus/connection.c
index fc8f57f97ce6..e3799a53a193 100644
--- a/drivers/greybus/connection.c
+++ b/drivers/greybus/connection.c
@@ -361,9 +361,6 @@ static int gb_connection_hd_cport_quiesce(struct gb_connection *connection)
if (connection->mode_switch)
peer_space += sizeof(struct gb_operation_msg_hdr);
- if (!hd->driver->cport_quiesce)
- return 0;
-
ret = hd->driver->cport_quiesce(hd, connection->hd_cport_id,
peer_space,
GB_CONNECTION_CPORT_QUIESCE_TIMEOUT);
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 1ecb5124421c..494a39e74939 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -525,6 +525,7 @@ config HID_LENOVO
config HID_LOGITECH
tristate "Logitech devices"
depends on HID
+ depends on LEDS_CLASS
default !EXPERT
---help---
Support for Logitech devices that are not fully compliant with HID standard.
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index 0c03308cfb08..bfefa365b1ce 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -64,6 +64,7 @@ obj-$(CONFIG_HID_KYE) += hid-kye.o
obj-$(CONFIG_HID_LCPOWER) += hid-lcpower.o
obj-$(CONFIG_HID_LENOVO) += hid-lenovo.o
obj-$(CONFIG_HID_LOGITECH) += hid-logitech.o
+obj-$(CONFIG_HID_LOGITECH) += hid-lg-g15.o
obj-$(CONFIG_HID_LOGITECH_DJ) += hid-logitech-dj.o
obj-$(CONFIG_HID_LOGITECH_HIDPP) += hid-logitech-hidpp.o
obj-$(CONFIG_HID_MACALLY) += hid-macally.o
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 63fdbf09b044..e0b241bd3070 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -212,6 +212,18 @@ static unsigned hid_lookup_collection(struct hid_parser *parser, unsigned type)
}
/*
+ * Concatenate usage which defines 16 bits or less with the
+ * currently defined usage page to form a 32 bit usage
+ */
+
+static void complete_usage(struct hid_parser *parser, unsigned int index)
+{
+ parser->local.usage[index] &= 0xFFFF;
+ parser->local.usage[index] |=
+ (parser->global.usage_page & 0xFFFF) << 16;
+}
+
+/*
* Add a usage to the temporary parser table.
*/
@@ -222,6 +234,14 @@ static int hid_add_usage(struct hid_parser *parser, unsigned usage, u8 size)
return -1;
}
parser->local.usage[parser->local.usage_index] = usage;
+
+ /*
+ * If Usage item only includes usage id, concatenate it with
+ * currently defined usage page
+ */
+ if (size <= 2)
+ complete_usage(parser, parser->local.usage_index);
+
parser->local.usage_size[parser->local.usage_index] = size;
parser->local.collection_index[parser->local.usage_index] =
parser->collection_stack_ptr ?
@@ -543,13 +563,32 @@ static int hid_parser_local(struct hid_parser *parser, struct hid_item *item)
* usage value."
*/
-static void hid_concatenate_usage_page(struct hid_parser *parser)
+static void hid_concatenate_last_usage_page(struct hid_parser *parser)
{
int i;
+ unsigned int usage_page;
+ unsigned int current_page;
+
+ if (!parser->local.usage_index)
+ return;
- for (i = 0; i < parser->local.usage_index; i++)
- if (parser->local.usage_size[i] <= 2)
- parser->local.usage[i] += parser->global.usage_page << 16;
+ usage_page = parser->global.usage_page;
+
+ /*
+ * Concatenate usage page again only if last declared Usage Page
+ * has not been already used in previous usages concatenation
+ */
+ for (i = parser->local.usage_index - 1; i >= 0; i--) {
+ if (parser->local.usage_size[i] > 2)
+ /* Ignore extended usages */
+ continue;
+
+ current_page = parser->local.usage[i] >> 16;
+ if (current_page == usage_page)
+ break;
+
+ complete_usage(parser, i);
+ }
}
/*
@@ -561,7 +600,7 @@ static int hid_parser_main(struct hid_parser *parser, struct hid_item *item)
__u32 data;
int ret;
- hid_concatenate_usage_page(parser);
+ hid_concatenate_last_usage_page(parser);
data = item_udata(item);
@@ -742,6 +781,10 @@ static void hid_scan_feature_usage(struct hid_parser *parser, u32 usage)
if (usage == 0xff0000c5 && parser->global.report_count == 256 &&
parser->global.report_size == 8)
parser->scan_flags |= HID_SCAN_FLAG_MT_WIN_8;
+
+ if (usage == 0xff0000c6 && parser->global.report_count == 1 &&
+ parser->global.report_size == 8)
+ parser->scan_flags |= HID_SCAN_FLAG_MT_WIN_8;
}
static void hid_scan_collection(struct hid_parser *parser, unsigned type)
@@ -772,7 +815,7 @@ static int hid_scan_main(struct hid_parser *parser, struct hid_item *item)
__u32 data;
int i;
- hid_concatenate_usage_page(parser);
+ hid_concatenate_last_usage_page(parser);
data = item_udata(item);
diff --git a/drivers/hid/hid-google-hammer.c b/drivers/hid/hid-google-hammer.c
index d86a9189e88f..2aa4ed157aec 100644
--- a/drivers/hid/hid-google-hammer.c
+++ b/drivers/hid/hid-google-hammer.c
@@ -35,6 +35,7 @@ struct cbas_ec {
struct device *dev; /* The platform device (EC) */
struct input_dev *input;
bool base_present;
+ bool base_folded;
struct notifier_block notifier;
};
@@ -208,7 +209,14 @@ static int __cbas_ec_probe(struct platform_device *pdev)
return error;
}
- input_report_switch(input, SW_TABLET_MODE, !cbas_ec.base_present);
+ if (!cbas_ec.base_present)
+ cbas_ec.base_folded = false;
+
+ dev_dbg(&pdev->dev, "%s: base: %d, folded: %d\n", __func__,
+ cbas_ec.base_present, cbas_ec.base_folded);
+
+ input_report_switch(input, SW_TABLET_MODE,
+ !cbas_ec.base_present || cbas_ec.base_folded);
cbas_ec_set_input(input);
@@ -322,10 +330,9 @@ static int hammer_kbd_brightness_set_blocking(struct led_classdev *cdev,
static int hammer_register_leds(struct hid_device *hdev)
{
struct hammer_kbd_leds *kbd_backlight;
+ int error;
- kbd_backlight = devm_kzalloc(&hdev->dev,
- sizeof(*kbd_backlight),
- GFP_KERNEL);
+ kbd_backlight = kzalloc(sizeof(*kbd_backlight), GFP_KERNEL);
if (!kbd_backlight)
return -ENOMEM;
@@ -339,12 +346,31 @@ static int hammer_register_leds(struct hid_device *hdev)
/* Set backlight to 0% initially. */
hammer_kbd_brightness_set_blocking(&kbd_backlight->cdev, 0);
- return devm_led_classdev_register(&hdev->dev, &kbd_backlight->cdev);
+ error = led_classdev_register(&hdev->dev, &kbd_backlight->cdev);
+ if (error)
+ goto err_free_mem;
+
+ hid_set_drvdata(hdev, kbd_backlight);
+ return 0;
+
+err_free_mem:
+ kfree(kbd_backlight);
+ return error;
+}
+
+static void hammer_unregister_leds(struct hid_device *hdev)
+{
+ struct hammer_kbd_leds *kbd_backlight = hid_get_drvdata(hdev);
+
+ if (kbd_backlight) {
+ led_classdev_unregister(&kbd_backlight->cdev);
+ kfree(kbd_backlight);
+ }
}
#define HID_UP_GOOGLEVENDOR 0xffd10000
#define HID_VD_KBD_FOLDED 0x00000019
-#define WHISKERS_KBD_FOLDED (HID_UP_GOOGLEVENDOR | HID_VD_KBD_FOLDED)
+#define HID_USAGE_KBD_FOLDED (HID_UP_GOOGLEVENDOR | HID_VD_KBD_FOLDED)
/* HID usage for keyboard backlight (Alphanumeric display brightness) */
#define HID_AD_BRIGHTNESS 0x00140046
@@ -354,8 +380,7 @@ static int hammer_input_mapping(struct hid_device *hdev, struct hid_input *hi,
struct hid_usage *usage,
unsigned long **bit, int *max)
{
- if (hdev->product == USB_DEVICE_ID_GOOGLE_WHISKERS &&
- usage->hid == WHISKERS_KBD_FOLDED) {
+ if (usage->hid == HID_USAGE_KBD_FOLDED) {
/*
* We do not want to have this usage mapped as it will get
* mixed in with "base attached" signal and delivered over
@@ -372,19 +397,19 @@ static int hammer_event(struct hid_device *hid, struct hid_field *field,
{
unsigned long flags;
- if (hid->product == USB_DEVICE_ID_GOOGLE_WHISKERS &&
- usage->hid == WHISKERS_KBD_FOLDED) {
+ if (usage->hid == HID_USAGE_KBD_FOLDED) {
spin_lock_irqsave(&cbas_ec_lock, flags);
- hid_dbg(hid, "%s: base: %d, folded: %d\n", __func__,
- cbas_ec.base_present, value);
-
/*
- * We should not get event if base is detached, but in case
- * we happen to service HID and EC notifications out of order
- * let's still check the "base present" flag.
+ * If we are getting events from Whiskers that means that it
+ * is attached to the lid.
*/
- if (cbas_ec.input && cbas_ec.base_present) {
+ cbas_ec.base_present = true;
+ cbas_ec.base_folded = value;
+ hid_dbg(hid, "%s: base: %d, folded: %d\n", __func__,
+ cbas_ec.base_present, cbas_ec.base_folded);
+
+ if (cbas_ec.input) {
input_report_switch(cbas_ec.input,
SW_TABLET_MODE, value);
input_sync(cbas_ec.input);
@@ -397,33 +422,22 @@ static int hammer_event(struct hid_device *hid, struct hid_field *field,
return 0;
}
-static bool hammer_is_keyboard_interface(struct hid_device *hdev)
+static bool hammer_has_usage(struct hid_device *hdev, unsigned int report_type,
+ unsigned application, unsigned usage)
{
- struct hid_report_enum *re = &hdev->report_enum[HID_INPUT_REPORT];
- struct hid_report *report;
-
- list_for_each_entry(report, &re->report_list, list)
- if (report->application == HID_GD_KEYBOARD)
- return true;
-
- return false;
-}
-
-static bool hammer_has_backlight_control(struct hid_device *hdev)
-{
- struct hid_report_enum *re = &hdev->report_enum[HID_OUTPUT_REPORT];
+ struct hid_report_enum *re = &hdev->report_enum[report_type];
struct hid_report *report;
int i, j;
list_for_each_entry(report, &re->report_list, list) {
- if (report->application != HID_GD_KEYBOARD)
+ if (report->application != application)
continue;
for (i = 0; i < report->maxfield; i++) {
struct hid_field *field = report->field[i];
for (j = 0; j < field->maxusage; j++)
- if (field->usage[j].hid == HID_AD_BRIGHTNESS)
+ if (field->usage[j].hid == usage)
return true;
}
}
@@ -431,21 +445,23 @@ static bool hammer_has_backlight_control(struct hid_device *hdev)
return false;
}
+static bool hammer_has_folded_event(struct hid_device *hdev)
+{
+ return hammer_has_usage(hdev, HID_INPUT_REPORT,
+ HID_GD_KEYBOARD, HID_USAGE_KBD_FOLDED);
+}
+
+static bool hammer_has_backlight_control(struct hid_device *hdev)
+{
+ return hammer_has_usage(hdev, HID_OUTPUT_REPORT,
+ HID_GD_KEYBOARD, HID_AD_BRIGHTNESS);
+}
+
static int hammer_probe(struct hid_device *hdev,
const struct hid_device_id *id)
{
int error;
- /*
- * We always want to poll for, and handle tablet mode events from
- * Whiskers, even when nobody has opened the input device. This also
- * prevents the hid core from dropping early tablet mode events from
- * the device.
- */
- if (hdev->product == USB_DEVICE_ID_GOOGLE_WHISKERS &&
- hammer_is_keyboard_interface(hdev))
- hdev->quirks |= HID_QUIRK_ALWAYS_POLL;
-
error = hid_parse(hdev);
if (error)
return error;
@@ -454,6 +470,19 @@ static int hammer_probe(struct hid_device *hdev,
if (error)
return error;
+ /*
+ * We always want to poll for, and handle tablet mode events from
+ * devices that have folded usage, even when nobody has opened the input
+ * device. This also prevents the hid core from dropping early tablet
+ * mode events from the device.
+ */
+ if (hammer_has_folded_event(hdev)) {
+ hdev->quirks |= HID_QUIRK_ALWAYS_POLL;
+ error = hid_hw_open(hdev);
+ if (error)
+ return error;
+ }
+
if (hammer_has_backlight_control(hdev)) {
error = hammer_register_leds(hdev);
if (error)
@@ -465,6 +494,36 @@ static int hammer_probe(struct hid_device *hdev,
return 0;
}
+static void hammer_remove(struct hid_device *hdev)
+{
+ unsigned long flags;
+
+ if (hammer_has_folded_event(hdev)) {
+ hid_hw_close(hdev);
+
+ /*
+ * If we are disconnecting then most likely Whiskers is
+ * being removed. Even if it is not removed, without proper
+ * keyboard we should not stay in clamshell mode.
+ *
+ * The reason for doing it here and not waiting for signal
+ * from EC, is that on some devices there are high leakage
+ * on Whiskers pins and we do not detect disconnect reliably,
+ * resulting in devices being stuck in clamshell mode.
+ */
+ spin_lock_irqsave(&cbas_ec_lock, flags);
+ if (cbas_ec.input && cbas_ec.base_present) {
+ input_report_switch(cbas_ec.input, SW_TABLET_MODE, 1);
+ input_sync(cbas_ec.input);
+ }
+ cbas_ec.base_present = false;
+ spin_unlock_irqrestore(&cbas_ec_lock, flags);
+ }
+
+ hammer_unregister_leds(hdev);
+
+ hid_hw_stop(hdev);
+}
static const struct hid_device_id hammer_devices[] = {
{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
@@ -487,6 +546,7 @@ static struct hid_driver hammer_driver = {
.name = "hammer",
.id_table = hammer_devices,
.probe = hammer_probe,
+ .remove = hammer_remove,
.input_mapping = hammer_input_mapping,
.event = hammer_event,
};
diff --git a/drivers/hid/hid-hyperv.c b/drivers/hid/hid-hyperv.c
index 79a28fc91521..dddfca555df9 100644
--- a/drivers/hid/hid-hyperv.c
+++ b/drivers/hid/hid-hyperv.c
@@ -192,6 +192,9 @@ static void mousevsc_on_receive_device_info(struct mousevsc_dev *input_device,
if (desc->bLength == 0)
goto cleanup;
+ /* The pointer is not NULL when we resume from hibernation */
+ if (input_device->hid_desc != NULL)
+ kfree(input_device->hid_desc);
input_device->hid_desc = kmemdup(desc, desc->bLength, GFP_ATOMIC);
if (!input_device->hid_desc)
@@ -203,6 +206,9 @@ static void mousevsc_on_receive_device_info(struct mousevsc_dev *input_device,
goto cleanup;
}
+ /* The pointer is not NULL when we resume from hibernation */
+ if (input_device->report_desc != NULL)
+ kfree(input_device->report_desc);
input_device->report_desc = kzalloc(input_device->report_desc_size,
GFP_ATOMIC);
@@ -342,6 +348,8 @@ static int mousevsc_connect_to_vsp(struct hv_device *device)
struct mousevsc_prt_msg *request;
struct mousevsc_prt_msg *response;
+ reinit_completion(&input_dev->wait_event);
+
request = &input_dev->protocol_req;
memset(request, 0, sizeof(struct mousevsc_prt_msg));
@@ -541,6 +549,30 @@ static int mousevsc_remove(struct hv_device *dev)
return 0;
}
+static int mousevsc_suspend(struct hv_device *dev)
+{
+ vmbus_close(dev->channel);
+
+ return 0;
+}
+
+static int mousevsc_resume(struct hv_device *dev)
+{
+ int ret;
+
+ ret = vmbus_open(dev->channel,
+ INPUTVSC_SEND_RING_BUFFER_SIZE,
+ INPUTVSC_RECV_RING_BUFFER_SIZE,
+ NULL, 0,
+ mousevsc_on_channel_callback,
+ dev);
+ if (ret)
+ return ret;
+
+ ret = mousevsc_connect_to_vsp(dev);
+ return ret;
+}
+
static const struct hv_vmbus_device_id id_table[] = {
/* Mouse guid */
{ HV_MOUSE_GUID, },
@@ -554,6 +586,8 @@ static struct hv_driver mousevsc_drv = {
.id_table = id_table,
.probe = mousevsc_probe,
.remove = mousevsc_remove,
+ .suspend = mousevsc_suspend,
+ .resume = mousevsc_resume,
.driver = {
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 447e8db21174..7e1689ef35f5 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -573,6 +573,7 @@
#define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_094A 0x094a
#define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_0941 0x0941
#define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_0641 0x0641
+#define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_1f4a 0x1f4a
#define USB_VENDOR_ID_HUION 0x256c
#define USB_DEVICE_ID_HUION_TABLET 0x006e
@@ -749,6 +750,10 @@
#define USB_DEVICE_ID_LOGITECH_DUAL_ACTION 0xc216
#define USB_DEVICE_ID_LOGITECH_RUMBLEPAD2 0xc218
#define USB_DEVICE_ID_LOGITECH_RUMBLEPAD2_2 0xc219
+#define USB_DEVICE_ID_LOGITECH_G15_LCD 0xc222
+#define USB_DEVICE_ID_LOGITECH_G15_V2_LCD 0xc227
+#define USB_DEVICE_ID_LOGITECH_G510 0xc22d
+#define USB_DEVICE_ID_LOGITECH_G510_USB_AUDIO 0xc22e
#define USB_DEVICE_ID_LOGITECH_G29_WHEEL 0xc24f
#define USB_DEVICE_ID_LOGITECH_G920_WHEEL 0xc262
#define USB_DEVICE_ID_LOGITECH_WINGMAN_F3D 0xc283
@@ -959,6 +964,7 @@
#define I2C_VENDOR_ID_RAYDIUM 0x2386
#define I2C_PRODUCT_ID_RAYDIUM_4B33 0x4b33
+#define I2C_PRODUCT_ID_RAYDIUM_3118 0x3118
#define USB_VENDOR_ID_RAZER 0x1532
#define USB_DEVICE_ID_RAZER_BLADE_14 0x011D
diff --git a/drivers/hid/hid-lg-g15.c b/drivers/hid/hid-lg-g15.c
new file mode 100644
index 000000000000..8a9268a5c66a
--- /dev/null
+++ b/drivers/hid/hid-lg-g15.c
@@ -0,0 +1,899 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * HID driver for gaming keys on Logitech gaming keyboards (such as the G15)
+ *
+ * Copyright (c) 2019 Hans de Goede <hdegoede@redhat.com>
+ */
+
+#include <linux/device.h>
+#include <linux/hid.h>
+#include <linux/module.h>
+#include <linux/random.h>
+#include <linux/sched.h>
+#include <linux/usb.h>
+#include <linux/wait.h>
+
+#include "hid-ids.h"
+
+#define LG_G15_TRANSFER_BUF_SIZE 20
+
+#define LG_G15_FEATURE_REPORT 0x02
+
+#define LG_G510_FEATURE_M_KEYS_LEDS 0x04
+#define LG_G510_FEATURE_BACKLIGHT_RGB 0x05
+#define LG_G510_FEATURE_POWER_ON_RGB 0x06
+
+enum lg_g15_model {
+ LG_G15,
+ LG_G15_V2,
+ LG_G510,
+ LG_G510_USB_AUDIO,
+};
+
+enum lg_g15_led_type {
+ LG_G15_KBD_BRIGHTNESS,
+ LG_G15_LCD_BRIGHTNESS,
+ LG_G15_BRIGHTNESS_MAX,
+ LG_G15_MACRO_PRESET1 = 2,
+ LG_G15_MACRO_PRESET2,
+ LG_G15_MACRO_PRESET3,
+ LG_G15_MACRO_RECORD,
+ LG_G15_LED_MAX
+};
+
+struct lg_g15_led {
+ struct led_classdev cdev;
+ enum led_brightness brightness;
+ enum lg_g15_led_type led;
+ u8 red, green, blue;
+};
+
+struct lg_g15_data {
+ /* Must be first for proper dma alignment */
+ u8 transfer_buf[LG_G15_TRANSFER_BUF_SIZE];
+ /* Protects the transfer_buf and led brightness */
+ struct mutex mutex;
+ struct work_struct work;
+ struct input_dev *input;
+ struct hid_device *hdev;
+ enum lg_g15_model model;
+ struct lg_g15_led leds[LG_G15_LED_MAX];
+ bool game_mode_enabled;
+};
+
+/******** G15 and G15 v2 LED functions ********/
+
+static int lg_g15_update_led_brightness(struct lg_g15_data *g15)
+{
+ int ret;
+
+ ret = hid_hw_raw_request(g15->hdev, LG_G15_FEATURE_REPORT,
+ g15->transfer_buf, 4,
+ HID_FEATURE_REPORT, HID_REQ_GET_REPORT);
+ if (ret != 4) {
+ hid_err(g15->hdev, "Error getting LED brightness: %d\n", ret);
+ return (ret < 0) ? ret : -EIO;
+ }
+
+ g15->leds[LG_G15_KBD_BRIGHTNESS].brightness = g15->transfer_buf[1];
+ g15->leds[LG_G15_LCD_BRIGHTNESS].brightness = g15->transfer_buf[2];
+
+ g15->leds[LG_G15_MACRO_PRESET1].brightness =
+ !(g15->transfer_buf[3] & 0x01);
+ g15->leds[LG_G15_MACRO_PRESET2].brightness =
+ !(g15->transfer_buf[3] & 0x02);
+ g15->leds[LG_G15_MACRO_PRESET3].brightness =
+ !(g15->transfer_buf[3] & 0x04);
+ g15->leds[LG_G15_MACRO_RECORD].brightness =
+ !(g15->transfer_buf[3] & 0x08);
+
+ return 0;
+}
+
+static enum led_brightness lg_g15_led_get(struct led_classdev *led_cdev)
+{
+ struct lg_g15_led *g15_led =
+ container_of(led_cdev, struct lg_g15_led, cdev);
+ struct lg_g15_data *g15 = dev_get_drvdata(led_cdev->dev->parent);
+ enum led_brightness brightness;
+
+ mutex_lock(&g15->mutex);
+ lg_g15_update_led_brightness(g15);
+ brightness = g15->leds[g15_led->led].brightness;
+ mutex_unlock(&g15->mutex);
+
+ return brightness;
+}
+
+static int lg_g15_led_set(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ struct lg_g15_led *g15_led =
+ container_of(led_cdev, struct lg_g15_led, cdev);
+ struct lg_g15_data *g15 = dev_get_drvdata(led_cdev->dev->parent);
+ u8 val, mask = 0;
+ int i, ret;
+
+ /* Ignore LED off on unregister / keyboard unplug */
+ if (led_cdev->flags & LED_UNREGISTERING)
+ return 0;
+
+ mutex_lock(&g15->mutex);
+
+ g15->transfer_buf[0] = LG_G15_FEATURE_REPORT;
+ g15->transfer_buf[3] = 0;
+
+ if (g15_led->led < LG_G15_BRIGHTNESS_MAX) {
+ g15->transfer_buf[1] = g15_led->led + 1;
+ g15->transfer_buf[2] = brightness << (g15_led->led * 4);
+ } else {
+ for (i = LG_G15_MACRO_PRESET1; i < LG_G15_LED_MAX; i++) {
+ if (i == g15_led->led)
+ val = brightness;
+ else
+ val = g15->leds[i].brightness;
+
+ if (val)
+ mask |= 1 << (i - LG_G15_MACRO_PRESET1);
+ }
+
+ g15->transfer_buf[1] = 0x04;
+ g15->transfer_buf[2] = ~mask;
+ }
+
+ ret = hid_hw_raw_request(g15->hdev, LG_G15_FEATURE_REPORT,
+ g15->transfer_buf, 4,
+ HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
+ if (ret == 4) {
+ /* Success */
+ g15_led->brightness = brightness;
+ ret = 0;
+ } else {
+ hid_err(g15->hdev, "Error setting LED brightness: %d\n", ret);
+ ret = (ret < 0) ? ret : -EIO;
+ }
+
+ mutex_unlock(&g15->mutex);
+
+ return ret;
+}
+
+static void lg_g15_leds_changed_work(struct work_struct *work)
+{
+ struct lg_g15_data *g15 = container_of(work, struct lg_g15_data, work);
+ enum led_brightness old_brightness[LG_G15_BRIGHTNESS_MAX];
+ enum led_brightness brightness[LG_G15_BRIGHTNESS_MAX];
+ int i, ret;
+
+ mutex_lock(&g15->mutex);
+ for (i = 0; i < LG_G15_BRIGHTNESS_MAX; i++)
+ old_brightness[i] = g15->leds[i].brightness;
+
+ ret = lg_g15_update_led_brightness(g15);
+
+ for (i = 0; i < LG_G15_BRIGHTNESS_MAX; i++)
+ brightness[i] = g15->leds[i].brightness;
+ mutex_unlock(&g15->mutex);
+
+ if (ret)
+ return;
+
+ for (i = 0; i < LG_G15_BRIGHTNESS_MAX; i++) {
+ if (brightness[i] == old_brightness[i])
+ continue;
+
+ led_classdev_notify_brightness_hw_changed(&g15->leds[i].cdev,
+ brightness[i]);
+ }
+}
+
+/******** G510 LED functions ********/
+
+static int lg_g510_get_initial_led_brightness(struct lg_g15_data *g15, int i)
+{
+ int ret, high;
+
+ ret = hid_hw_raw_request(g15->hdev, LG_G510_FEATURE_BACKLIGHT_RGB + i,
+ g15->transfer_buf, 4,
+ HID_FEATURE_REPORT, HID_REQ_GET_REPORT);
+ if (ret != 4) {
+ hid_err(g15->hdev, "Error getting LED brightness: %d\n", ret);
+ return (ret < 0) ? ret : -EIO;
+ }
+
+ high = max3(g15->transfer_buf[1], g15->transfer_buf[2],
+ g15->transfer_buf[3]);
+
+ if (high) {
+ g15->leds[i].red =
+ DIV_ROUND_CLOSEST(g15->transfer_buf[1] * 255, high);
+ g15->leds[i].green =
+ DIV_ROUND_CLOSEST(g15->transfer_buf[2] * 255, high);
+ g15->leds[i].blue =
+ DIV_ROUND_CLOSEST(g15->transfer_buf[3] * 255, high);
+ g15->leds[i].brightness = high;
+ } else {
+ g15->leds[i].red = 255;
+ g15->leds[i].green = 255;
+ g15->leds[i].blue = 255;
+ g15->leds[i].brightness = 0;
+ }
+
+ return 0;
+}
+
+/* Must be called with g15->mutex locked */
+static int lg_g510_kbd_led_write(struct lg_g15_data *g15,
+ struct lg_g15_led *g15_led,
+ enum led_brightness brightness)
+{
+ int ret;
+
+ g15->transfer_buf[0] = 5 + g15_led->led;
+ g15->transfer_buf[1] =
+ DIV_ROUND_CLOSEST(g15_led->red * brightness, 255);
+ g15->transfer_buf[2] =
+ DIV_ROUND_CLOSEST(g15_led->green * brightness, 255);
+ g15->transfer_buf[3] =
+ DIV_ROUND_CLOSEST(g15_led->blue * brightness, 255);
+
+ ret = hid_hw_raw_request(g15->hdev,
+ LG_G510_FEATURE_BACKLIGHT_RGB + g15_led->led,
+ g15->transfer_buf, 4,
+ HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
+ if (ret == 4) {
+ /* Success */
+ g15_led->brightness = brightness;
+ ret = 0;
+ } else {
+ hid_err(g15->hdev, "Error setting LED brightness: %d\n", ret);
+ ret = (ret < 0) ? ret : -EIO;
+ }
+
+ return ret;
+}
+
+static int lg_g510_kbd_led_set(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ struct lg_g15_led *g15_led =
+ container_of(led_cdev, struct lg_g15_led, cdev);
+ struct lg_g15_data *g15 = dev_get_drvdata(led_cdev->dev->parent);
+ int ret;
+
+ /* Ignore LED off on unregister / keyboard unplug */
+ if (led_cdev->flags & LED_UNREGISTERING)
+ return 0;
+
+ mutex_lock(&g15->mutex);
+ ret = lg_g510_kbd_led_write(g15, g15_led, brightness);
+ mutex_unlock(&g15->mutex);
+
+ return ret;
+}
+
+static enum led_brightness lg_g510_kbd_led_get(struct led_classdev *led_cdev)
+{
+ struct lg_g15_led *g15_led =
+ container_of(led_cdev, struct lg_g15_led, cdev);
+
+ return g15_led->brightness;
+}
+
+static ssize_t color_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct lg_g15_led *g15_led =
+ container_of(led_cdev, struct lg_g15_led, cdev);
+ struct lg_g15_data *g15 = dev_get_drvdata(led_cdev->dev->parent);
+ unsigned long value;
+ int ret;
+
+ if (count < 7 || (count == 8 && buf[7] != '\n') || count > 8)
+ return -EINVAL;
+
+ if (buf[0] != '#')
+ return -EINVAL;
+
+ ret = kstrtoul(buf + 1, 16, &value);
+ if (ret)
+ return ret;
+
+ mutex_lock(&g15->mutex);
+ g15_led->red = (value & 0xff0000) >> 16;
+ g15_led->green = (value & 0x00ff00) >> 8;
+ g15_led->blue = (value & 0x0000ff);
+ ret = lg_g510_kbd_led_write(g15, g15_led, g15_led->brightness);
+ mutex_unlock(&g15->mutex);
+
+ return (ret < 0) ? ret : count;
+}
+
+static ssize_t color_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct lg_g15_led *g15_led =
+ container_of(led_cdev, struct lg_g15_led, cdev);
+ struct lg_g15_data *g15 = dev_get_drvdata(led_cdev->dev->parent);
+ ssize_t ret;
+
+ mutex_lock(&g15->mutex);
+ ret = sprintf(buf, "#%02x%02x%02x\n",
+ g15_led->red, g15_led->green, g15_led->blue);
+ mutex_unlock(&g15->mutex);
+
+ return ret;
+}
+
+static DEVICE_ATTR_RW(color);
+
+static struct attribute *lg_g510_kbd_led_attrs[] = {
+ &dev_attr_color.attr,
+ NULL,
+};
+
+static const struct attribute_group lg_g510_kbd_led_group = {
+ .attrs = lg_g510_kbd_led_attrs,
+};
+
+static const struct attribute_group *lg_g510_kbd_led_groups[] = {
+ &lg_g510_kbd_led_group,
+ NULL,
+};
+
+static void lg_g510_leds_sync_work(struct work_struct *work)
+{
+ struct lg_g15_data *g15 = container_of(work, struct lg_g15_data, work);
+
+ mutex_lock(&g15->mutex);
+ lg_g510_kbd_led_write(g15, &g15->leds[LG_G15_KBD_BRIGHTNESS],
+ g15->leds[LG_G15_KBD_BRIGHTNESS].brightness);
+ mutex_unlock(&g15->mutex);
+}
+
+static int lg_g510_update_mkey_led_brightness(struct lg_g15_data *g15)
+{
+ int ret;
+
+ ret = hid_hw_raw_request(g15->hdev, LG_G510_FEATURE_M_KEYS_LEDS,
+ g15->transfer_buf, 2,
+ HID_FEATURE_REPORT, HID_REQ_GET_REPORT);
+ if (ret != 2) {
+ hid_err(g15->hdev, "Error getting LED brightness: %d\n", ret);
+ ret = (ret < 0) ? ret : -EIO;
+ }
+
+ g15->leds[LG_G15_MACRO_PRESET1].brightness =
+ !!(g15->transfer_buf[1] & 0x80);
+ g15->leds[LG_G15_MACRO_PRESET2].brightness =
+ !!(g15->transfer_buf[1] & 0x40);
+ g15->leds[LG_G15_MACRO_PRESET3].brightness =
+ !!(g15->transfer_buf[1] & 0x20);
+ g15->leds[LG_G15_MACRO_RECORD].brightness =
+ !!(g15->transfer_buf[1] & 0x10);
+
+ return 0;
+}
+
+static enum led_brightness lg_g510_mkey_led_get(struct led_classdev *led_cdev)
+{
+ struct lg_g15_led *g15_led =
+ container_of(led_cdev, struct lg_g15_led, cdev);
+ struct lg_g15_data *g15 = dev_get_drvdata(led_cdev->dev->parent);
+ enum led_brightness brightness;
+
+ mutex_lock(&g15->mutex);
+ lg_g510_update_mkey_led_brightness(g15);
+ brightness = g15->leds[g15_led->led].brightness;
+ mutex_unlock(&g15->mutex);
+
+ return brightness;
+}
+
+static int lg_g510_mkey_led_set(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ struct lg_g15_led *g15_led =
+ container_of(led_cdev, struct lg_g15_led, cdev);
+ struct lg_g15_data *g15 = dev_get_drvdata(led_cdev->dev->parent);
+ u8 val, mask = 0;
+ int i, ret;
+
+ /* Ignore LED off on unregister / keyboard unplug */
+ if (led_cdev->flags & LED_UNREGISTERING)
+ return 0;
+
+ mutex_lock(&g15->mutex);
+
+ for (i = LG_G15_MACRO_PRESET1; i < LG_G15_LED_MAX; i++) {
+ if (i == g15_led->led)
+ val = brightness;
+ else
+ val = g15->leds[i].brightness;
+
+ if (val)
+ mask |= 0x80 >> (i - LG_G15_MACRO_PRESET1);
+ }
+
+ g15->transfer_buf[0] = LG_G510_FEATURE_M_KEYS_LEDS;
+ g15->transfer_buf[1] = mask;
+
+ ret = hid_hw_raw_request(g15->hdev, LG_G510_FEATURE_M_KEYS_LEDS,
+ g15->transfer_buf, 2,
+ HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
+ if (ret == 2) {
+ /* Success */
+ g15_led->brightness = brightness;
+ ret = 0;
+ } else {
+ hid_err(g15->hdev, "Error setting LED brightness: %d\n", ret);
+ ret = (ret < 0) ? ret : -EIO;
+ }
+
+ mutex_unlock(&g15->mutex);
+
+ return ret;
+}
+
+/******** Generic LED functions ********/
+static int lg_g15_get_initial_led_brightness(struct lg_g15_data *g15)
+{
+ int ret;
+
+ switch (g15->model) {
+ case LG_G15:
+ case LG_G15_V2:
+ return lg_g15_update_led_brightness(g15);
+ case LG_G510:
+ case LG_G510_USB_AUDIO:
+ ret = lg_g510_get_initial_led_brightness(g15, 0);
+ if (ret)
+ return ret;
+
+ ret = lg_g510_get_initial_led_brightness(g15, 1);
+ if (ret)
+ return ret;
+
+ return lg_g510_update_mkey_led_brightness(g15);
+ }
+ return -EINVAL; /* Never reached */
+}
+
+/******** Input functions ********/
+
+/* On the G15 Mark I Logitech has been quite creative with which bit is what */
+static int lg_g15_event(struct lg_g15_data *g15, u8 *data, int size)
+{
+ int i, val;
+
+ /* G1 - G6 */
+ for (i = 0; i < 6; i++) {
+ val = data[i + 1] & (1 << i);
+ input_report_key(g15->input, KEY_MACRO1 + i, val);
+ }
+ /* G7 - G12 */
+ for (i = 0; i < 6; i++) {
+ val = data[i + 2] & (1 << i);
+ input_report_key(g15->input, KEY_MACRO7 + i, val);
+ }
+ /* G13 - G17 */
+ for (i = 0; i < 5; i++) {
+ val = data[i + 1] & (4 << i);
+ input_report_key(g15->input, KEY_MACRO13 + i, val);
+ }
+ /* G18 */
+ input_report_key(g15->input, KEY_MACRO18, data[8] & 0x40);
+
+ /* M1 - M3 */
+ for (i = 0; i < 3; i++) {
+ val = data[i + 6] & (1 << i);
+ input_report_key(g15->input, KEY_MACRO_PRESET1 + i, val);
+ }
+ /* MR */
+ input_report_key(g15->input, KEY_MACRO_RECORD_START, data[7] & 0x40);
+
+ /* Most left (round) button below the LCD */
+ input_report_key(g15->input, KEY_KBD_LCD_MENU1, data[8] & 0x80);
+ /* 4 other buttons below the LCD */
+ for (i = 0; i < 4; i++) {
+ val = data[i + 2] & 0x80;
+ input_report_key(g15->input, KEY_KBD_LCD_MENU2 + i, val);
+ }
+
+ /* Backlight cycle button pressed? */
+ if (data[1] & 0x80)
+ schedule_work(&g15->work);
+
+ input_sync(g15->input);
+ return 0;
+}
+
+static int lg_g15_v2_event(struct lg_g15_data *g15, u8 *data, int size)
+{
+ int i, val;
+
+ /* G1 - G6 */
+ for (i = 0; i < 6; i++) {
+ val = data[1] & (1 << i);
+ input_report_key(g15->input, KEY_MACRO1 + i, val);
+ }
+
+ /* M1 - M3 + MR */
+ input_report_key(g15->input, KEY_MACRO_PRESET1, data[1] & 0x40);
+ input_report_key(g15->input, KEY_MACRO_PRESET2, data[1] & 0x80);
+ input_report_key(g15->input, KEY_MACRO_PRESET3, data[2] & 0x20);
+ input_report_key(g15->input, KEY_MACRO_RECORD_START, data[2] & 0x40);
+
+ /* Round button to the left of the LCD */
+ input_report_key(g15->input, KEY_KBD_LCD_MENU1, data[2] & 0x80);
+ /* 4 buttons below the LCD */
+ for (i = 0; i < 4; i++) {
+ val = data[2] & (2 << i);
+ input_report_key(g15->input, KEY_KBD_LCD_MENU2 + i, val);
+ }
+
+ /* Backlight cycle button pressed? */
+ if (data[2] & 0x01)
+ schedule_work(&g15->work);
+
+ input_sync(g15->input);
+ return 0;
+}
+
+static int lg_g510_event(struct lg_g15_data *g15, u8 *data, int size)
+{
+ bool game_mode_enabled;
+ int i, val;
+
+ /* G1 - G18 */
+ for (i = 0; i < 18; i++) {
+ val = data[i / 8 + 1] & (1 << (i % 8));
+ input_report_key(g15->input, KEY_MACRO1 + i, val);
+ }
+
+ /* Game mode on/off slider */
+ game_mode_enabled = data[3] & 0x04;
+ if (game_mode_enabled != g15->game_mode_enabled) {
+ if (game_mode_enabled)
+ hid_info(g15->hdev, "Game Mode enabled, Windows (super) key is disabled\n");
+ else
+ hid_info(g15->hdev, "Game Mode disabled\n");
+ g15->game_mode_enabled = game_mode_enabled;
+ }
+
+ /* M1 - M3 */
+ for (i = 0; i < 3; i++) {
+ val = data[3] & (0x10 << i);
+ input_report_key(g15->input, KEY_MACRO_PRESET1 + i, val);
+ }
+ /* MR */
+ input_report_key(g15->input, KEY_MACRO_RECORD_START, data[3] & 0x80);
+
+ /* LCD menu keys */
+ for (i = 0; i < 5; i++) {
+ val = data[4] & (1 << i);
+ input_report_key(g15->input, KEY_KBD_LCD_MENU1 + i, val);
+ }
+
+ /* Headphone Mute */
+ input_report_key(g15->input, KEY_MUTE, data[4] & 0x20);
+ /* Microphone Mute */
+ input_report_key(g15->input, KEY_F20, data[4] & 0x40);
+
+ input_sync(g15->input);
+ return 0;
+}
+
+static int lg_g510_leds_event(struct lg_g15_data *g15, u8 *data, int size)
+{
+ bool backlight_disabled;
+
+ /*
+ * The G510 ignores backlight updates when the backlight is turned off
+ * through the light toggle button on the keyboard, to work around this
+ * we queue a workitem to sync values when the backlight is turned on.
+ */
+ backlight_disabled = data[1] & 0x04;
+ if (!backlight_disabled)
+ schedule_work(&g15->work);
+
+ return 0;
+}
+
+static int lg_g15_raw_event(struct hid_device *hdev, struct hid_report *report,
+ u8 *data, int size)
+{
+ struct lg_g15_data *g15 = hid_get_drvdata(hdev);
+
+ if (!g15)
+ return 0;
+
+ switch (g15->model) {
+ case LG_G15:
+ if (data[0] == 0x02 && size == 9)
+ return lg_g15_event(g15, data, size);
+ break;
+ case LG_G15_V2:
+ if (data[0] == 0x02 && size == 5)
+ return lg_g15_v2_event(g15, data, size);
+ break;
+ case LG_G510:
+ case LG_G510_USB_AUDIO:
+ if (data[0] == 0x03 && size == 5)
+ return lg_g510_event(g15, data, size);
+ if (data[0] == 0x04 && size == 2)
+ return lg_g510_leds_event(g15, data, size);
+ break;
+ }
+
+ return 0;
+}
+
+static int lg_g15_input_open(struct input_dev *dev)
+{
+ struct hid_device *hdev = input_get_drvdata(dev);
+
+ return hid_hw_open(hdev);
+}
+
+static void lg_g15_input_close(struct input_dev *dev)
+{
+ struct hid_device *hdev = input_get_drvdata(dev);
+
+ hid_hw_close(hdev);
+}
+
+static int lg_g15_register_led(struct lg_g15_data *g15, int i)
+{
+ const char * const led_names[] = {
+ "g15::kbd_backlight",
+ "g15::lcd_backlight",
+ "g15::macro_preset1",
+ "g15::macro_preset2",
+ "g15::macro_preset3",
+ "g15::macro_record",
+ };
+
+ g15->leds[i].led = i;
+ g15->leds[i].cdev.name = led_names[i];
+
+ switch (g15->model) {
+ case LG_G15:
+ case LG_G15_V2:
+ g15->leds[i].cdev.brightness_set_blocking = lg_g15_led_set;
+ g15->leds[i].cdev.brightness_get = lg_g15_led_get;
+ if (i < LG_G15_BRIGHTNESS_MAX) {
+ g15->leds[i].cdev.flags = LED_BRIGHT_HW_CHANGED;
+ g15->leds[i].cdev.max_brightness = 2;
+ } else {
+ g15->leds[i].cdev.max_brightness = 1;
+ }
+ break;
+ case LG_G510:
+ case LG_G510_USB_AUDIO:
+ switch (i) {
+ case LG_G15_LCD_BRIGHTNESS:
+ /*
+ * The G510 does not have a separate LCD brightness,
+ * but it does have a separate power-on (reset) value.
+ */
+ g15->leds[i].cdev.name = "g15::power_on_backlight_val";
+ /* fall through */
+ case LG_G15_KBD_BRIGHTNESS:
+ g15->leds[i].cdev.brightness_set_blocking =
+ lg_g510_kbd_led_set;
+ g15->leds[i].cdev.brightness_get =
+ lg_g510_kbd_led_get;
+ g15->leds[i].cdev.max_brightness = 255;
+ g15->leds[i].cdev.groups = lg_g510_kbd_led_groups;
+ break;
+ default:
+ g15->leds[i].cdev.brightness_set_blocking =
+ lg_g510_mkey_led_set;
+ g15->leds[i].cdev.brightness_get =
+ lg_g510_mkey_led_get;
+ g15->leds[i].cdev.max_brightness = 1;
+ }
+ break;
+ }
+
+ return devm_led_classdev_register(&g15->hdev->dev, &g15->leds[i].cdev);
+}
+
+static int lg_g15_probe(struct hid_device *hdev, const struct hid_device_id *id)
+{
+ u8 gkeys_settings_output_report = 0;
+ u8 gkeys_settings_feature_report = 0;
+ struct hid_report_enum *rep_enum;
+ unsigned int connect_mask = 0;
+ bool has_ff000000 = false;
+ struct lg_g15_data *g15;
+ struct input_dev *input;
+ struct hid_report *rep;
+ int ret, i, gkeys = 0;
+
+ hdev->quirks |= HID_QUIRK_INPUT_PER_APP;
+
+ ret = hid_parse(hdev);
+ if (ret)
+ return ret;
+
+ /*
+ * Some models have multiple interfaces, we want the interface with
+ * with the f000.0000 application input report.
+ */
+ rep_enum = &hdev->report_enum[HID_INPUT_REPORT];
+ list_for_each_entry(rep, &rep_enum->report_list, list) {
+ if (rep->application == 0xff000000)
+ has_ff000000 = true;
+ }
+ if (!has_ff000000)
+ return hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+
+ g15 = devm_kzalloc(&hdev->dev, sizeof(*g15), GFP_KERNEL);
+ if (!g15)
+ return -ENOMEM;
+
+ mutex_init(&g15->mutex);
+
+ input = devm_input_allocate_device(&hdev->dev);
+ if (!input)
+ return -ENOMEM;
+
+ g15->hdev = hdev;
+ g15->model = id->driver_data;
+ hid_set_drvdata(hdev, (void *)g15);
+
+ switch (g15->model) {
+ case LG_G15:
+ INIT_WORK(&g15->work, lg_g15_leds_changed_work);
+ /*
+ * The G15 and G15 v2 use a separate usb-device (on a builtin
+ * hub) which emulates a keyboard for the F1 - F12 emulation
+ * on the G-keys, which we disable, rendering the emulated kbd
+ * non-functional, so we do not let hid-input connect.
+ */
+ connect_mask = HID_CONNECT_HIDRAW;
+ gkeys_settings_output_report = 0x02;
+ gkeys = 18;
+ break;
+ case LG_G15_V2:
+ INIT_WORK(&g15->work, lg_g15_leds_changed_work);
+ connect_mask = HID_CONNECT_HIDRAW;
+ gkeys_settings_output_report = 0x02;
+ gkeys = 6;
+ break;
+ case LG_G510:
+ case LG_G510_USB_AUDIO:
+ INIT_WORK(&g15->work, lg_g510_leds_sync_work);
+ connect_mask = HID_CONNECT_HIDINPUT | HID_CONNECT_HIDRAW;
+ gkeys_settings_feature_report = 0x01;
+ gkeys = 18;
+ break;
+ }
+
+ ret = hid_hw_start(hdev, connect_mask);
+ if (ret)
+ return ret;
+
+ /* Tell the keyboard to stop sending F1-F12 + 1-6 for G1 - G18 */
+ if (gkeys_settings_output_report) {
+ g15->transfer_buf[0] = gkeys_settings_output_report;
+ memset(g15->transfer_buf + 1, 0, gkeys);
+ /*
+ * The kbd ignores our output report if we do not queue
+ * an URB on the USB input endpoint first...
+ */
+ ret = hid_hw_open(hdev);
+ if (ret)
+ goto error_hw_stop;
+ ret = hid_hw_output_report(hdev, g15->transfer_buf, gkeys + 1);
+ hid_hw_close(hdev);
+ }
+
+ if (gkeys_settings_feature_report) {
+ g15->transfer_buf[0] = gkeys_settings_feature_report;
+ memset(g15->transfer_buf + 1, 0, gkeys);
+ ret = hid_hw_raw_request(g15->hdev,
+ gkeys_settings_feature_report,
+ g15->transfer_buf, gkeys + 1,
+ HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
+ }
+
+ if (ret < 0) {
+ hid_err(hdev, "Error disabling keyboard emulation for the G-keys\n");
+ goto error_hw_stop;
+ }
+
+ /* Get initial brightness levels */
+ ret = lg_g15_get_initial_led_brightness(g15);
+ if (ret)
+ goto error_hw_stop;
+
+ /* Setup and register input device */
+ input->name = "Logitech Gaming Keyboard Gaming Keys";
+ input->phys = hdev->phys;
+ input->uniq = hdev->uniq;
+ input->id.bustype = hdev->bus;
+ input->id.vendor = hdev->vendor;
+ input->id.product = hdev->product;
+ input->id.version = hdev->version;
+ input->dev.parent = &hdev->dev;
+ input->open = lg_g15_input_open;
+ input->close = lg_g15_input_close;
+
+ /* G-keys */
+ for (i = 0; i < gkeys; i++)
+ input_set_capability(input, EV_KEY, KEY_MACRO1 + i);
+
+ /* M1 - M3 and MR keys */
+ for (i = 0; i < 3; i++)
+ input_set_capability(input, EV_KEY, KEY_MACRO_PRESET1 + i);
+ input_set_capability(input, EV_KEY, KEY_MACRO_RECORD_START);
+
+ /* Keys below the LCD, intended for controlling a menu on the LCD */
+ for (i = 0; i < 5; i++)
+ input_set_capability(input, EV_KEY, KEY_KBD_LCD_MENU1 + i);
+
+ /*
+ * On the G510 only report headphone and mic mute keys when *not* using
+ * the builtin USB audio device. When the builtin audio is used these
+ * keys directly toggle mute (and the LEDs) on/off.
+ */
+ if (g15->model == LG_G510) {
+ input_set_capability(input, EV_KEY, KEY_MUTE);
+ /* Userspace expects F20 for micmute */
+ input_set_capability(input, EV_KEY, KEY_F20);
+ }
+
+ g15->input = input;
+ input_set_drvdata(input, hdev);
+
+ ret = input_register_device(input);
+ if (ret)
+ goto error_hw_stop;
+
+ /* Register LED devices */
+ for (i = 0; i < LG_G15_LED_MAX; i++) {
+ ret = lg_g15_register_led(g15, i);
+ if (ret)
+ goto error_hw_stop;
+ }
+
+ return 0;
+
+error_hw_stop:
+ hid_hw_stop(hdev);
+ return ret;
+}
+
+static const struct hid_device_id lg_g15_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
+ USB_DEVICE_ID_LOGITECH_G15_LCD),
+ .driver_data = LG_G15 },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
+ USB_DEVICE_ID_LOGITECH_G15_V2_LCD),
+ .driver_data = LG_G15_V2 },
+ /* G510 without a headset plugged in */
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
+ USB_DEVICE_ID_LOGITECH_G510),
+ .driver_data = LG_G510 },
+ /* G510 with headset plugged in / with extra USB audio interface */
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
+ USB_DEVICE_ID_LOGITECH_G510_USB_AUDIO),
+ .driver_data = LG_G510_USB_AUDIO },
+ { }
+};
+MODULE_DEVICE_TABLE(hid, lg_g15_devices);
+
+static struct hid_driver lg_g15_driver = {
+ .name = "lg-g15",
+ .id_table = lg_g15_devices,
+ .raw_event = lg_g15_raw_event,
+ .probe = lg_g15_probe,
+};
+module_hid_driver(lg_g15_driver);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
index 8e91e2f06cb4..cd9193078525 100644
--- a/drivers/hid/hid-logitech-hidpp.c
+++ b/drivers/hid/hid-logitech-hidpp.c
@@ -1102,6 +1102,9 @@ static int hidpp20_batterylevel_get_battery_capacity(struct hidpp_device *hidpp,
ret = hidpp_send_fap_command_sync(hidpp, feature_index,
CMD_BATTERY_LEVEL_STATUS_GET_BATTERY_LEVEL_STATUS,
NULL, 0, &response);
+ /* Ignore these intermittent errors */
+ if (ret == HIDPP_ERROR_RESOURCE_ERROR)
+ return -EIO;
if (ret > 0) {
hid_err(hidpp->hid_dev, "%s: received protocol error 0x%02x\n",
__func__, ret);
diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
index c50bcd967d99..d1b39c29e353 100644
--- a/drivers/hid/hid-quirks.c
+++ b/drivers/hid/hid-quirks.c
@@ -94,6 +94,7 @@ static const struct hid_device_id hid_quirks[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_094A), HID_QUIRK_ALWAYS_POLL },
{ HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_0941), HID_QUIRK_ALWAYS_POLL },
{ HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_0641), HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_1f4a), HID_QUIRK_ALWAYS_POLL },
{ HID_USB_DEVICE(USB_VENDOR_ID_IDEACOM, USB_DEVICE_ID_IDEACOM_IDC6680), HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_INNOMEDIA, USB_DEVICE_ID_INNEX_GENESIS_ATARI), HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X), HID_QUIRK_MULTI_INPUT },
@@ -419,13 +420,6 @@ static const struct hid_device_id hid_have_special_driver[] = {
#if IS_ENABLED(CONFIG_HID_LCPOWER)
{ HID_USB_DEVICE(USB_VENDOR_ID_LCPOWER, USB_DEVICE_ID_LCPOWER_LC1000) },
#endif
-#if IS_ENABLED(CONFIG_HID_LED)
- { HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, USB_DEVICE_ID_DREAM_CHEEKY_WN) },
- { HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, USB_DEVICE_ID_DREAM_CHEEKY_FA) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_LUXAFOR) },
- { HID_USB_DEVICE(USB_VENDOR_ID_RISO_KAGAKU, USB_DEVICE_ID_RI_KA_WEBMAIL) },
- { HID_USB_DEVICE(USB_VENDOR_ID_THINGM, USB_DEVICE_ID_BLINK1) },
-#endif
#if IS_ENABLED(CONFIG_HID_LENOVO)
{ HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_TPKBD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_CUSBKBD) },
diff --git a/drivers/hid/hid-rmi.c b/drivers/hid/hid-rmi.c
index 7c6abd7e0979..9ce22acdfaca 100644
--- a/drivers/hid/hid-rmi.c
+++ b/drivers/hid/hid-rmi.c
@@ -744,7 +744,8 @@ static void rmi_remove(struct hid_device *hdev)
{
struct rmi_data *hdata = hid_get_drvdata(hdev);
- if (hdata->device_flags & RMI_DEVICE) {
+ if ((hdata->device_flags & RMI_DEVICE)
+ && test_bit(RMI_STARTED, &hdata->flags)) {
clear_bit(RMI_STARTED, &hdata->flags);
cancel_work_sync(&hdata->reset_work);
rmi_unregister_transport_device(&hdata->xport);
diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
index bbc6ec1aa5cb..c3fc0ceb8096 100644
--- a/drivers/hid/hidraw.c
+++ b/drivers/hid/hidraw.c
@@ -197,15 +197,15 @@ static ssize_t hidraw_get_report(struct file *file, char __user *buffer, size_t
}
if (count > HID_MAX_BUFFER_SIZE) {
- printk(KERN_WARNING "hidraw: pid %d passed too large report\n",
- task_pid_nr(current));
+ hid_warn(dev, "pid %d passed too large report\n",
+ task_pid_nr(current));
ret = -EINVAL;
goto out;
}
if (count < 2) {
- printk(KERN_WARNING "hidraw: pid %d passed too short report\n",
- task_pid_nr(current));
+ hid_warn(dev, "pid %d passed too short report\n",
+ task_pid_nr(current));
ret = -EINVAL;
goto out;
}
@@ -468,9 +468,7 @@ static const struct file_operations hidraw_ops = {
.release = hidraw_release,
.unlocked_ioctl = hidraw_ioctl,
.fasync = hidraw_fasync,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = hidraw_ioctl,
-#endif
+ .compat_ioctl = compat_ptr_ioctl,
.llseek = noop_llseek,
};
@@ -597,7 +595,7 @@ int __init hidraw_init(void)
if (result < 0)
goto error_class;
- printk(KERN_INFO "hidraw: raw HID events driver (C) Jiri Kosina\n");
+ pr_info("raw HID events driver (C) Jiri Kosina\n");
out:
return result;
diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c
index 04c088131e04..a358e61fbc82 100644
--- a/drivers/hid/i2c-hid/i2c-hid-core.c
+++ b/drivers/hid/i2c-hid/i2c-hid-core.c
@@ -48,6 +48,7 @@
#define I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV BIT(0)
#define I2C_HID_QUIRK_NO_IRQ_AFTER_RESET BIT(1)
#define I2C_HID_QUIRK_BOGUS_IRQ BIT(4)
+#define I2C_HID_QUIRK_RESET_ON_RESUME BIT(5)
/* flags */
#define I2C_HID_STARTED 0
@@ -157,8 +158,6 @@ struct i2c_hid {
bool irq_wake_enabled;
struct mutex reset_lock;
-
- unsigned long sleep_delay;
};
static const struct i2c_hid_quirks {
@@ -170,8 +169,12 @@ static const struct i2c_hid_quirks {
I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV },
{ I2C_VENDOR_ID_HANTICK, I2C_PRODUCT_ID_HANTICK_5288,
I2C_HID_QUIRK_NO_IRQ_AFTER_RESET },
+ { I2C_VENDOR_ID_RAYDIUM, I2C_PRODUCT_ID_RAYDIUM_3118,
+ I2C_HID_QUIRK_NO_IRQ_AFTER_RESET },
{ USB_VENDOR_ID_ELAN, HID_ANY_ID,
I2C_HID_QUIRK_BOGUS_IRQ },
+ { USB_VENDOR_ID_ALPS_JP, HID_ANY_ID,
+ I2C_HID_QUIRK_RESET_ON_RESUME },
{ 0, 0 }
};
@@ -1212,8 +1215,15 @@ static int i2c_hid_resume(struct device *dev)
* solves "incomplete reports" on Raydium devices 2386:3118 and
* 2386:4B33 and fixes various SIS touchscreens no longer sending
* data after a suspend/resume.
+ *
+ * However some ALPS touchpads generate IRQ storm without reset, so
+ * let's still reset them here.
*/
- ret = i2c_hid_set_power(client, I2C_HID_PWR_ON);
+ if (ihid->quirks & I2C_HID_QUIRK_RESET_ON_RESUME)
+ ret = i2c_hid_hwreset(client);
+ else
+ ret = i2c_hid_set_power(client, I2C_HID_PWR_ON);
+
if (ret)
return ret;
diff --git a/drivers/hid/intel-ish-hid/ishtp/hbm.c b/drivers/hid/intel-ish-hid/ishtp/hbm.c
index c6c9ac09dac3..30a91d068306 100644
--- a/drivers/hid/intel-ish-hid/ishtp/hbm.c
+++ b/drivers/hid/intel-ish-hid/ishtp/hbm.c
@@ -402,7 +402,7 @@ static void ishtp_hbm_cl_connect_res(struct ishtp_device *dev,
* @dev: ISHTP device instance
* @disconnect_req: disconnect request structure
*
- * Disconnect request bus message from the fw. Send diconnect response.
+ * Disconnect request bus message from the fw. Send disconnect response.
*/
static void ishtp_hbm_fw_disconnect_req(struct ishtp_device *dev,
struct hbm_client_connect_request *disconnect_req)
diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
index 1f9bc4483465..e421cdf2d1a4 100644
--- a/drivers/hid/usbhid/hiddev.c
+++ b/drivers/hid/usbhid/hiddev.c
@@ -854,13 +854,6 @@ ret_unlock:
return r;
}
-#ifdef CONFIG_COMPAT
-static long hiddev_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
- return hiddev_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
-}
-#endif
-
static const struct file_operations hiddev_fops = {
.owner = THIS_MODULE,
.read = hiddev_read,
@@ -870,9 +863,7 @@ static const struct file_operations hiddev_fops = {
.release = hiddev_release,
.unlocked_ioctl = hiddev_ioctl,
.fasync = hiddev_fasync,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = hiddev_compat_ioctl,
-#endif
+ .compat_ioctl = compat_ptr_ioctl,
.llseek = noop_llseek,
};
diff --git a/drivers/hv/Makefile b/drivers/hv/Makefile
index a1eec7177c2d..94daf8240c95 100644
--- a/drivers/hv/Makefile
+++ b/drivers/hv/Makefile
@@ -9,4 +9,5 @@ CFLAGS_hv_balloon.o = -I$(src)
hv_vmbus-y := vmbus_drv.o \
hv.o connection.o channel.o \
channel_mgmt.o ring_buffer.o hv_trace.o
+hv_vmbus-$(CONFIG_HYPERV_TESTING) += hv_debugfs.o
hv_utils-y := hv_util.o hv_kvp.o hv_snapshot.o hv_fcopy.o hv_utils_transport.o
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
index 6e4c015783ff..74e77de89b4f 100644
--- a/drivers/hv/connection.c
+++ b/drivers/hv/connection.c
@@ -14,6 +14,7 @@
#include <linux/wait.h>
#include <linux/delay.h>
#include <linux/mm.h>
+#include <linux/module.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/hyperv.h>
@@ -40,29 +41,30 @@ EXPORT_SYMBOL_GPL(vmbus_connection);
__u32 vmbus_proto_version;
EXPORT_SYMBOL_GPL(vmbus_proto_version);
-static __u32 vmbus_get_next_version(__u32 current_version)
-{
- switch (current_version) {
- case (VERSION_WIN7):
- return VERSION_WS2008;
-
- case (VERSION_WIN8):
- return VERSION_WIN7;
-
- case (VERSION_WIN8_1):
- return VERSION_WIN8;
+/*
+ * Table of VMBus versions listed from newest to oldest.
+ */
+static __u32 vmbus_versions[] = {
+ VERSION_WIN10_V5_2,
+ VERSION_WIN10_V5_1,
+ VERSION_WIN10_V5,
+ VERSION_WIN10_V4_1,
+ VERSION_WIN10,
+ VERSION_WIN8_1,
+ VERSION_WIN8,
+ VERSION_WIN7,
+ VERSION_WS2008
+};
- case (VERSION_WIN10):
- return VERSION_WIN8_1;
+/*
+ * Maximal VMBus protocol version guests can negotiate. Useful to cap the
+ * VMBus version for testing and debugging purpose.
+ */
+static uint max_version = VERSION_WIN10_V5_2;
- case (VERSION_WIN10_V5):
- return VERSION_WIN10;
-
- case (VERSION_WS2008):
- default:
- return VERSION_INVAL;
- }
-}
+module_param(max_version, uint, S_IRUGO);
+MODULE_PARM_DESC(max_version,
+ "Maximal VMBus protocol version which can be negotiated");
int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo, u32 version)
{
@@ -80,12 +82,12 @@ int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo, u32 version)
msg->vmbus_version_requested = version;
/*
- * VMBus protocol 5.0 (VERSION_WIN10_V5) requires that we must use
- * VMBUS_MESSAGE_CONNECTION_ID_4 for the Initiate Contact Message,
+ * VMBus protocol 5.0 (VERSION_WIN10_V5) and higher require that we must
+ * use VMBUS_MESSAGE_CONNECTION_ID_4 for the Initiate Contact Message,
* and for subsequent messages, we must use the Message Connection ID
* field in the host-returned Version Response Message. And, with
- * VERSION_WIN10_V5, we don't use msg->interrupt_page, but we tell
- * the host explicitly that we still use VMBUS_MESSAGE_SINT(2) for
+ * VERSION_WIN10_V5 and higher, we don't use msg->interrupt_page, but we
+ * tell the host explicitly that we still use VMBUS_MESSAGE_SINT(2) for
* compatibility.
*
* On old hosts, we should always use VMBUS_MESSAGE_CONNECTION_ID (1).
@@ -169,8 +171,8 @@ int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo, u32 version)
*/
int vmbus_connect(void)
{
- int ret = 0;
struct vmbus_channel_msginfo *msginfo = NULL;
+ int i, ret = 0;
__u32 version;
/* Initialize the vmbus connection */
@@ -206,7 +208,7 @@ int vmbus_connect(void)
* abstraction stuff
*/
vmbus_connection.int_page =
- (void *)__get_free_pages(GFP_KERNEL|__GFP_ZERO, 0);
+ (void *)hv_alloc_hyperv_zeroed_page();
if (vmbus_connection.int_page == NULL) {
ret = -ENOMEM;
goto cleanup;
@@ -215,14 +217,14 @@ int vmbus_connect(void)
vmbus_connection.recv_int_page = vmbus_connection.int_page;
vmbus_connection.send_int_page =
(void *)((unsigned long)vmbus_connection.int_page +
- (PAGE_SIZE >> 1));
+ (HV_HYP_PAGE_SIZE >> 1));
/*
* Setup the monitor notification facility. The 1st page for
* parent->child and the 2nd page for child->parent
*/
- vmbus_connection.monitor_pages[0] = (void *)__get_free_pages((GFP_KERNEL|__GFP_ZERO), 0);
- vmbus_connection.monitor_pages[1] = (void *)__get_free_pages((GFP_KERNEL|__GFP_ZERO), 0);
+ vmbus_connection.monitor_pages[0] = (void *)hv_alloc_hyperv_zeroed_page();
+ vmbus_connection.monitor_pages[1] = (void *)hv_alloc_hyperv_zeroed_page();
if ((vmbus_connection.monitor_pages[0] == NULL) ||
(vmbus_connection.monitor_pages[1] == NULL)) {
ret = -ENOMEM;
@@ -244,21 +246,21 @@ int vmbus_connect(void)
* version.
*/
- version = VERSION_CURRENT;
+ for (i = 0; ; i++) {
+ if (i == ARRAY_SIZE(vmbus_versions))
+ goto cleanup;
+
+ version = vmbus_versions[i];
+ if (version > max_version)
+ continue;
- do {
ret = vmbus_negotiate_version(msginfo, version);
if (ret == -ETIMEDOUT)
goto cleanup;
if (vmbus_connection.conn_state == CONNECTED)
break;
-
- version = vmbus_get_next_version(version);
- } while (version != VERSION_INVAL);
-
- if (version == VERSION_INVAL)
- goto cleanup;
+ }
vmbus_proto_version = version;
pr_info("Vmbus version:%d.%d\n",
@@ -295,12 +297,12 @@ void vmbus_disconnect(void)
destroy_workqueue(vmbus_connection.work_queue);
if (vmbus_connection.int_page) {
- free_pages((unsigned long)vmbus_connection.int_page, 0);
+ hv_free_hyperv_page((unsigned long)vmbus_connection.int_page);
vmbus_connection.int_page = NULL;
}
- free_pages((unsigned long)vmbus_connection.monitor_pages[0], 0);
- free_pages((unsigned long)vmbus_connection.monitor_pages[1], 0);
+ hv_free_hyperv_page((unsigned long)vmbus_connection.monitor_pages[0]);
+ hv_free_hyperv_page((unsigned long)vmbus_connection.monitor_pages[1]);
vmbus_connection.monitor_pages[0] = NULL;
vmbus_connection.monitor_pages[1] = NULL;
}
@@ -361,6 +363,7 @@ void vmbus_on_event(unsigned long data)
trace_vmbus_on_event(channel);
+ hv_debug_delay_test(channel, INTERRUPT_DELAY);
do {
void (*callback_fn)(void *);
@@ -413,7 +416,7 @@ int vmbus_post_msg(void *buffer, size_t buflen, bool can_sleep)
case HV_STATUS_INVALID_CONNECTION_ID:
/*
* See vmbus_negotiate_version(): VMBus protocol 5.0
- * requires that we must use
+ * and higher require that we must use
* VMBUS_MESSAGE_CONNECTION_ID_4 for the Initiate
* Contact message, but on old hosts that only
* support VMBus protocol 4.0 or lower, here we get
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
index 34bd73526afd..b155d0052981 100644
--- a/drivers/hv/hv_balloon.c
+++ b/drivers/hv/hv_balloon.c
@@ -23,6 +23,9 @@
#include <linux/percpu_counter.h>
#include <linux/hyperv.h>
+#include <asm/hyperv-tlfs.h>
+
+#include <asm/mshyperv.h>
#define CREATE_TRACE_POINTS
#include "hv_trace_balloon.h"
@@ -341,8 +344,6 @@ struct dm_unballoon_response {
*
* mem_range: Memory range to hot add.
*
- * On Linux we currently don't support this since we cannot hot add
- * arbitrary granularity of memory.
*/
struct dm_hot_add {
@@ -457,6 +458,7 @@ struct hot_add_wrk {
struct work_struct wrk;
};
+static bool allow_hibernation;
static bool hot_add = true;
static bool do_hot_add;
/*
@@ -477,7 +479,7 @@ module_param(pressure_report_delay, uint, (S_IRUGO | S_IWUSR));
MODULE_PARM_DESC(pressure_report_delay, "Delay in secs in reporting pressure");
static atomic_t trans_id = ATOMIC_INIT(0);
-static int dm_ring_size = (5 * PAGE_SIZE);
+static int dm_ring_size = 20 * 1024;
/*
* Driver specific state.
@@ -493,10 +495,10 @@ enum hv_dm_state {
};
-static __u8 recv_buffer[PAGE_SIZE];
-static __u8 balloon_up_send_buffer[PAGE_SIZE];
-#define PAGES_IN_2M 512
-#define HA_CHUNK (32 * 1024)
+static __u8 recv_buffer[HV_HYP_PAGE_SIZE];
+static __u8 balloon_up_send_buffer[HV_HYP_PAGE_SIZE];
+#define PAGES_IN_2M (2 * 1024 * 1024 / PAGE_SIZE)
+#define HA_CHUNK (128 * 1024 * 1024 / PAGE_SIZE)
struct hv_dynmem_device {
struct hv_device *dev;
@@ -680,9 +682,7 @@ static void hv_page_online_one(struct hv_hotadd_state *has, struct page *pg)
__ClearPageOffline(pg);
/* This frame is currently backed; online the page. */
- __online_page_set_limits(pg);
- __online_page_increment_counters(pg);
- __online_page_free(pg);
+ generic_online_page(pg, 0);
lockdep_assert_held(&dm_device.ha_lock);
dm_device.num_pages_onlined++;
@@ -1053,8 +1053,12 @@ static void hot_add_req(struct work_struct *dummy)
else
resp.result = 0;
- if (!do_hot_add || (resp.page_count == 0))
- pr_err("Memory hot add failed\n");
+ if (!do_hot_add || resp.page_count == 0) {
+ if (!allow_hibernation)
+ pr_err("Memory hot add failed\n");
+ else
+ pr_info("Ignore hot-add request!\n");
+ }
dm->state = DM_INITIALIZED;
resp.hdr.trans_id = atomic_inc_return(&trans_id);
@@ -1076,7 +1080,7 @@ static void process_info(struct hv_dynmem_device *dm, struct dm_info_msg *msg)
__u64 *max_page_count = (__u64 *)&info_hdr[1];
pr_info("Max. dynamic memory size: %llu MB\n",
- (*max_page_count) >> (20 - PAGE_SHIFT));
+ (*max_page_count) >> (20 - HV_HYP_PAGE_SHIFT));
}
break;
@@ -1218,7 +1222,7 @@ static unsigned int alloc_balloon_pages(struct hv_dynmem_device *dm,
for (i = 0; (i * alloc_unit) < num_pages; i++) {
if (bl_resp->hdr.size + sizeof(union dm_mem_page_range) >
- PAGE_SIZE)
+ HV_HYP_PAGE_SIZE)
return i * alloc_unit;
/*
@@ -1274,9 +1278,9 @@ static void balloon_up(struct work_struct *dummy)
/*
* We will attempt 2M allocations. However, if we fail to
- * allocate 2M chunks, we will go back to 4k allocations.
+ * allocate 2M chunks, we will go back to PAGE_SIZE allocations.
*/
- alloc_unit = 512;
+ alloc_unit = PAGES_IN_2M;
avail_pages = si_mem_available();
floor = compute_balloon_floor();
@@ -1292,7 +1296,7 @@ static void balloon_up(struct work_struct *dummy)
}
while (!done) {
- memset(balloon_up_send_buffer, 0, PAGE_SIZE);
+ memset(balloon_up_send_buffer, 0, HV_HYP_PAGE_SIZE);
bl_resp = (struct dm_balloon_response *)balloon_up_send_buffer;
bl_resp->hdr.type = DM_BALLOON_RESPONSE;
bl_resp->hdr.size = sizeof(struct dm_balloon_response);
@@ -1491,7 +1495,7 @@ static void balloon_onchannelcallback(void *context)
memset(recv_buffer, 0, sizeof(recv_buffer));
vmbus_recvpacket(dev->channel, recv_buffer,
- PAGE_SIZE, &recvlen, &requestid);
+ HV_HYP_PAGE_SIZE, &recvlen, &requestid);
if (recvlen > 0) {
dm_msg = (struct dm_message *)recv_buffer;
@@ -1509,6 +1513,11 @@ static void balloon_onchannelcallback(void *context)
break;
case DM_BALLOON_REQUEST:
+ if (allow_hibernation) {
+ pr_info("Ignore balloon-up request!\n");
+ break;
+ }
+
if (dm->state == DM_BALLOON_UP)
pr_warn("Currently ballooning\n");
bal_msg = (struct dm_balloon *)recv_buffer;
@@ -1518,6 +1527,11 @@ static void balloon_onchannelcallback(void *context)
break;
case DM_UNBALLOON_REQUEST:
+ if (allow_hibernation) {
+ pr_info("Ignore balloon-down request!\n");
+ break;
+ }
+
dm->state = DM_BALLOON_DOWN;
balloon_down(dm,
(struct dm_unballoon_request *)recv_buffer);
@@ -1623,6 +1637,11 @@ static int balloon_connect_vsp(struct hv_device *dev)
cap_msg.hdr.size = sizeof(struct dm_capabilities);
cap_msg.hdr.trans_id = atomic_inc_return(&trans_id);
+ /*
+ * When hibernation (i.e. virtual ACPI S4 state) is enabled, the host
+ * currently still requires the bits to be set, so we have to add code
+ * to fail the host's hot-add and balloon up/down requests, if any.
+ */
cap_msg.caps.cap_bits.balloon = 1;
cap_msg.caps.cap_bits.hot_add = 1;
@@ -1672,6 +1691,10 @@ static int balloon_probe(struct hv_device *dev,
{
int ret;
+ allow_hibernation = hv_is_hibernation_supported();
+ if (allow_hibernation)
+ hot_add = false;
+
#ifdef CONFIG_MEMORY_HOTPLUG
do_hot_add = hot_add;
#else
@@ -1711,6 +1734,8 @@ static int balloon_probe(struct hv_device *dev,
return 0;
probe_error:
+ dm_device.state = DM_INIT_ERROR;
+ dm_device.thread = NULL;
vmbus_close(dev->channel);
#ifdef CONFIG_MEMORY_HOTPLUG
unregister_memory_notifier(&hv_memory_nb);
@@ -1752,6 +1777,59 @@ static int balloon_remove(struct hv_device *dev)
return 0;
}
+static int balloon_suspend(struct hv_device *hv_dev)
+{
+ struct hv_dynmem_device *dm = hv_get_drvdata(hv_dev);
+
+ tasklet_disable(&hv_dev->channel->callback_event);
+
+ cancel_work_sync(&dm->balloon_wrk.wrk);
+ cancel_work_sync(&dm->ha_wrk.wrk);
+
+ if (dm->thread) {
+ kthread_stop(dm->thread);
+ dm->thread = NULL;
+ vmbus_close(hv_dev->channel);
+ }
+
+ tasklet_enable(&hv_dev->channel->callback_event);
+
+ return 0;
+
+}
+
+static int balloon_resume(struct hv_device *dev)
+{
+ int ret;
+
+ dm_device.state = DM_INITIALIZING;
+
+ ret = balloon_connect_vsp(dev);
+
+ if (ret != 0)
+ goto out;
+
+ dm_device.thread =
+ kthread_run(dm_thread_func, &dm_device, "hv_balloon");
+ if (IS_ERR(dm_device.thread)) {
+ ret = PTR_ERR(dm_device.thread);
+ dm_device.thread = NULL;
+ goto close_channel;
+ }
+
+ dm_device.state = DM_INITIALIZED;
+ return 0;
+close_channel:
+ vmbus_close(dev->channel);
+out:
+ dm_device.state = DM_INIT_ERROR;
+#ifdef CONFIG_MEMORY_HOTPLUG
+ unregister_memory_notifier(&hv_memory_nb);
+ restore_online_page_callback(&hv_online_page);
+#endif
+ return ret;
+}
+
static const struct hv_vmbus_device_id id_table[] = {
/* Dynamic Memory Class ID */
/* 525074DC-8985-46e2-8057-A307DC18A502 */
@@ -1766,6 +1844,8 @@ static struct hv_driver balloon_drv = {
.id_table = id_table,
.probe = balloon_probe,
.remove = balloon_remove,
+ .suspend = balloon_suspend,
+ .resume = balloon_resume,
.driver = {
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
diff --git a/drivers/hv/hv_debugfs.c b/drivers/hv/hv_debugfs.c
new file mode 100644
index 000000000000..8a2878573582
--- /dev/null
+++ b/drivers/hv/hv_debugfs.c
@@ -0,0 +1,178 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Authors:
+ * Branden Bonaby <brandonbonaby94@gmail.com>
+ */
+
+#include <linux/hyperv.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+
+#include "hyperv_vmbus.h"
+
+struct dentry *hv_debug_root;
+
+static int hv_debugfs_delay_get(void *data, u64 *val)
+{
+ *val = *(u32 *)data;
+ return 0;
+}
+
+static int hv_debugfs_delay_set(void *data, u64 val)
+{
+ if (val > 1000)
+ return -EINVAL;
+ *(u32 *)data = val;
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(hv_debugfs_delay_fops, hv_debugfs_delay_get,
+ hv_debugfs_delay_set, "%llu\n");
+
+static int hv_debugfs_state_get(void *data, u64 *val)
+{
+ *val = *(bool *)data;
+ return 0;
+}
+
+static int hv_debugfs_state_set(void *data, u64 val)
+{
+ if (val == 1)
+ *(bool *)data = true;
+ else if (val == 0)
+ *(bool *)data = false;
+ else
+ return -EINVAL;
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(hv_debugfs_state_fops, hv_debugfs_state_get,
+ hv_debugfs_state_set, "%llu\n");
+
+/* Setup delay files to store test values */
+static int hv_debug_delay_files(struct hv_device *dev, struct dentry *root)
+{
+ struct vmbus_channel *channel = dev->channel;
+ char *buffer = "fuzz_test_buffer_interrupt_delay";
+ char *message = "fuzz_test_message_delay";
+ int *buffer_val = &channel->fuzz_testing_interrupt_delay;
+ int *message_val = &channel->fuzz_testing_message_delay;
+ struct dentry *buffer_file, *message_file;
+
+ buffer_file = debugfs_create_file(buffer, 0644, root,
+ buffer_val,
+ &hv_debugfs_delay_fops);
+ if (IS_ERR(buffer_file)) {
+ pr_debug("debugfs_hyperv: file %s not created\n", buffer);
+ return PTR_ERR(buffer_file);
+ }
+
+ message_file = debugfs_create_file(message, 0644, root,
+ message_val,
+ &hv_debugfs_delay_fops);
+ if (IS_ERR(message_file)) {
+ pr_debug("debugfs_hyperv: file %s not created\n", message);
+ return PTR_ERR(message_file);
+ }
+
+ return 0;
+}
+
+/* Setup test state value for vmbus device */
+static int hv_debug_set_test_state(struct hv_device *dev, struct dentry *root)
+{
+ struct vmbus_channel *channel = dev->channel;
+ bool *state = &channel->fuzz_testing_state;
+ char *status = "fuzz_test_state";
+ struct dentry *test_state;
+
+ test_state = debugfs_create_file(status, 0644, root,
+ state,
+ &hv_debugfs_state_fops);
+ if (IS_ERR(test_state)) {
+ pr_debug("debugfs_hyperv: file %s not created\n", status);
+ return PTR_ERR(test_state);
+ }
+
+ return 0;
+}
+
+/* Bind hv device to a dentry for debugfs */
+static void hv_debug_set_dir_dentry(struct hv_device *dev, struct dentry *root)
+{
+ if (hv_debug_root)
+ dev->debug_dir = root;
+}
+
+/* Create all test dentry's and names for fuzz testing */
+int hv_debug_add_dev_dir(struct hv_device *dev)
+{
+ const char *device = dev_name(&dev->device);
+ char *delay_name = "delay";
+ struct dentry *delay, *dev_root;
+ int ret;
+
+ if (!IS_ERR(hv_debug_root)) {
+ dev_root = debugfs_create_dir(device, hv_debug_root);
+ if (IS_ERR(dev_root)) {
+ pr_debug("debugfs_hyperv: hyperv/%s/ not created\n",
+ device);
+ return PTR_ERR(dev_root);
+ }
+ hv_debug_set_test_state(dev, dev_root);
+ hv_debug_set_dir_dentry(dev, dev_root);
+ delay = debugfs_create_dir(delay_name, dev_root);
+
+ if (IS_ERR(delay)) {
+ pr_debug("debugfs_hyperv: hyperv/%s/%s/ not created\n",
+ device, delay_name);
+ return PTR_ERR(delay);
+ }
+ ret = hv_debug_delay_files(dev, delay);
+
+ return ret;
+ }
+ pr_debug("debugfs_hyperv: hyperv/ not in root debugfs path\n");
+ return PTR_ERR(hv_debug_root);
+}
+
+/* Remove dentry associated with released hv device */
+void hv_debug_rm_dev_dir(struct hv_device *dev)
+{
+ if (!IS_ERR(hv_debug_root))
+ debugfs_remove_recursive(dev->debug_dir);
+}
+
+/* Remove all dentrys associated with vmbus testing */
+void hv_debug_rm_all_dir(void)
+{
+ debugfs_remove_recursive(hv_debug_root);
+}
+
+/* Delay buffer/message reads on a vmbus channel */
+void hv_debug_delay_test(struct vmbus_channel *channel, enum delay delay_type)
+{
+ struct vmbus_channel *test_channel = channel->primary_channel ?
+ channel->primary_channel :
+ channel;
+ bool state = test_channel->fuzz_testing_state;
+
+ if (state) {
+ if (delay_type == 0)
+ udelay(test_channel->fuzz_testing_interrupt_delay);
+ else
+ udelay(test_channel->fuzz_testing_message_delay);
+ }
+}
+
+/* Initialize top dentry for vmbus testing */
+int hv_debug_init(void)
+{
+ hv_debug_root = debugfs_create_dir("hyperv", NULL);
+ if (IS_ERR(hv_debug_root)) {
+ pr_debug("debugfs_hyperv: hyperv/ not created\n");
+ return PTR_ERR(hv_debug_root);
+ }
+ return 0;
+}
diff --git a/drivers/hv/hv_fcopy.c b/drivers/hv/hv_fcopy.c
index 7e30ae0635cc..08fa4a5de644 100644
--- a/drivers/hv/hv_fcopy.c
+++ b/drivers/hv/hv_fcopy.c
@@ -13,6 +13,7 @@
#include <linux/workqueue.h>
#include <linux/hyperv.h>
#include <linux/sched.h>
+#include <asm/hyperv-tlfs.h>
#include "hyperv_vmbus.h"
#include "hv_utils_transport.h"
@@ -234,7 +235,7 @@ void hv_fcopy_onchannelcallback(void *context)
if (fcopy_transaction.state > HVUTIL_READY)
return;
- vmbus_recvpacket(channel, recv_buffer, PAGE_SIZE * 2, &recvlen,
+ vmbus_recvpacket(channel, recv_buffer, HV_HYP_PAGE_SIZE * 2, &recvlen,
&requestid);
if (recvlen <= 0)
return;
diff --git a/drivers/hv/hv_kvp.c b/drivers/hv/hv_kvp.c
index 5054d1105236..ae7c028dc5a8 100644
--- a/drivers/hv/hv_kvp.c
+++ b/drivers/hv/hv_kvp.c
@@ -27,6 +27,7 @@
#include <linux/connector.h>
#include <linux/workqueue.h>
#include <linux/hyperv.h>
+#include <asm/hyperv-tlfs.h>
#include "hyperv_vmbus.h"
#include "hv_utils_transport.h"
@@ -661,7 +662,7 @@ void hv_kvp_onchannelcallback(void *context)
if (kvp_transaction.state > HVUTIL_READY)
return;
- vmbus_recvpacket(channel, recv_buffer, PAGE_SIZE * 4, &recvlen,
+ vmbus_recvpacket(channel, recv_buffer, HV_HYP_PAGE_SIZE * 4, &recvlen,
&requestid);
if (recvlen > 0) {
diff --git a/drivers/hv/hv_snapshot.c b/drivers/hv/hv_snapshot.c
index 20ba95b75a94..03b6454268b3 100644
--- a/drivers/hv/hv_snapshot.c
+++ b/drivers/hv/hv_snapshot.c
@@ -12,6 +12,7 @@
#include <linux/connector.h>
#include <linux/workqueue.h>
#include <linux/hyperv.h>
+#include <asm/hyperv-tlfs.h>
#include "hyperv_vmbus.h"
#include "hv_utils_transport.h"
@@ -297,7 +298,7 @@ void hv_vss_onchannelcallback(void *context)
if (vss_transaction.state > HVUTIL_READY)
return;
- vmbus_recvpacket(channel, recv_buffer, PAGE_SIZE * 2, &recvlen,
+ vmbus_recvpacket(channel, recv_buffer, HV_HYP_PAGE_SIZE * 2, &recvlen,
&requestid);
if (recvlen > 0) {
diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c
index e32681ee7b9f..766bd8457346 100644
--- a/drivers/hv/hv_util.c
+++ b/drivers/hv/hv_util.c
@@ -136,7 +136,7 @@ static void shutdown_onchannelcallback(void *context)
struct icmsg_hdr *icmsghdrp;
vmbus_recvpacket(channel, shut_txf_buf,
- PAGE_SIZE, &recvlen, &requestid);
+ HV_HYP_PAGE_SIZE, &recvlen, &requestid);
if (recvlen > 0) {
icmsghdrp = (struct icmsg_hdr *)&shut_txf_buf[
@@ -284,7 +284,7 @@ static void timesync_onchannelcallback(void *context)
u8 *time_txf_buf = util_timesynch.recv_buffer;
vmbus_recvpacket(channel, time_txf_buf,
- PAGE_SIZE, &recvlen, &requestid);
+ HV_HYP_PAGE_SIZE, &recvlen, &requestid);
if (recvlen > 0) {
icmsghdrp = (struct icmsg_hdr *)&time_txf_buf[
@@ -346,7 +346,7 @@ static void heartbeat_onchannelcallback(void *context)
while (1) {
vmbus_recvpacket(channel, hbeat_txf_buf,
- PAGE_SIZE, &recvlen, &requestid);
+ HV_HYP_PAGE_SIZE, &recvlen, &requestid);
if (!recvlen)
break;
@@ -390,7 +390,7 @@ static int util_probe(struct hv_device *dev,
(struct hv_util_service *)dev_id->driver_data;
int ret;
- srv->recv_buffer = kmalloc(PAGE_SIZE * 4, GFP_KERNEL);
+ srv->recv_buffer = kmalloc(HV_HYP_PAGE_SIZE * 4, GFP_KERNEL);
if (!srv->recv_buffer)
return -ENOMEM;
srv->channel = dev->channel;
@@ -413,8 +413,9 @@ static int util_probe(struct hv_device *dev,
hv_set_drvdata(dev, srv);
- ret = vmbus_open(dev->channel, 4 * PAGE_SIZE, 4 * PAGE_SIZE, NULL, 0,
- srv->util_cb, dev->channel);
+ ret = vmbus_open(dev->channel, 4 * HV_HYP_PAGE_SIZE,
+ 4 * HV_HYP_PAGE_SIZE, NULL, 0, srv->util_cb,
+ dev->channel);
if (ret)
goto error;
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index af9379a3bf89..20edcfd3b96c 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -385,4 +385,35 @@ enum hvutil_device_state {
HVUTIL_DEVICE_DYING, /* driver unload is in progress */
};
+enum delay {
+ INTERRUPT_DELAY = 0,
+ MESSAGE_DELAY = 1,
+};
+
+#ifdef CONFIG_HYPERV_TESTING
+
+int hv_debug_add_dev_dir(struct hv_device *dev);
+void hv_debug_rm_dev_dir(struct hv_device *dev);
+void hv_debug_rm_all_dir(void);
+int hv_debug_init(void);
+void hv_debug_delay_test(struct vmbus_channel *channel, enum delay delay_type);
+
+#else /* CONFIG_HYPERV_TESTING */
+
+static inline void hv_debug_rm_dev_dir(struct hv_device *dev) {};
+static inline void hv_debug_rm_all_dir(void) {};
+static inline void hv_debug_delay_test(struct vmbus_channel *channel,
+ enum delay delay_type) {};
+static inline int hv_debug_init(void)
+{
+ return -1;
+}
+
+static inline int hv_debug_add_dev_dir(struct hv_device *dev)
+{
+ return -1;
+}
+
+#endif /* CONFIG_HYPERV_TESTING */
+
#endif /* _HYPERV_VMBUS_H */
diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
index 9a03b163cbbd..356e22159e83 100644
--- a/drivers/hv/ring_buffer.c
+++ b/drivers/hv/ring_buffer.c
@@ -396,6 +396,7 @@ struct vmpacket_descriptor *hv_pkt_iter_first(struct vmbus_channel *channel)
struct hv_ring_buffer_info *rbi = &channel->inbound;
struct vmpacket_descriptor *desc;
+ hv_debug_delay_test(channel, MESSAGE_DELAY);
if (hv_pkt_iter_avail(rbi) < sizeof(struct vmpacket_descriptor))
return NULL;
@@ -421,6 +422,7 @@ __hv_pkt_iter_next(struct vmbus_channel *channel,
u32 packetlen = desc->len8 << 3;
u32 dsize = rbi->ring_datasize;
+ hv_debug_delay_test(channel, MESSAGE_DELAY);
/* bump offset to next potential packet */
rbi->priv_read_index += packetlen + VMBUS_PKT_TRAILER;
if (rbi->priv_read_index >= dsize)
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 8c06b3361c27..4ef5a66df680 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -79,7 +79,7 @@ static struct notifier_block hyperv_panic_block = {
static const char *fb_mmio_name = "fb_range";
static struct resource *fb_mmio;
static struct resource *hyperv_mmio;
-static DEFINE_SEMAPHORE(hyperv_mmio_lock);
+static DEFINE_MUTEX(hyperv_mmio_lock);
static int vmbus_exists(void)
{
@@ -960,6 +960,8 @@ static void vmbus_device_release(struct device *device)
struct hv_device *hv_dev = device_to_hv_device(device);
struct vmbus_channel *channel = hv_dev->channel;
+ hv_debug_rm_dev_dir(hv_dev);
+
mutex_lock(&vmbus_connection.channel_mutex);
hv_process_channel_removal(channel);
mutex_unlock(&vmbus_connection.channel_mutex);
@@ -1273,7 +1275,7 @@ static void hv_kmsg_dump(struct kmsg_dumper *dumper,
* Write dump contents to the page. No need to synchronize; panic should
* be single-threaded.
*/
- kmsg_dump_get_buffer(dumper, true, hv_panic_page, PAGE_SIZE,
+ kmsg_dump_get_buffer(dumper, true, hv_panic_page, HV_HYP_PAGE_SIZE,
&bytes_written);
if (bytes_written)
hyperv_report_panic_msg(panic_pa, bytes_written);
@@ -1373,7 +1375,7 @@ static int vmbus_bus_init(void)
*/
hv_get_crash_ctl(hyperv_crash_ctl);
if (hyperv_crash_ctl & HV_CRASH_CTL_CRASH_NOTIFY_MSG) {
- hv_panic_page = (void *)get_zeroed_page(GFP_KERNEL);
+ hv_panic_page = (void *)hv_alloc_hyperv_zeroed_page();
if (hv_panic_page) {
ret = kmsg_dump_register(&hv_kmsg_dumper);
if (ret)
@@ -1401,7 +1403,7 @@ err_alloc:
hv_remove_vmbus_irq();
bus_unregister(&hv_bus);
- free_page((unsigned long)hv_panic_page);
+ hv_free_hyperv_page((unsigned long)hv_panic_page);
unregister_sysctl_table(hv_ctl_table_hdr);
hv_ctl_table_hdr = NULL;
return ret;
@@ -1809,6 +1811,7 @@ int vmbus_device_register(struct hv_device *child_device_obj)
pr_err("Unable to register primary channeln");
goto err_kset_unregister;
}
+ hv_debug_add_dev_dir(child_device_obj);
return 0;
@@ -2010,7 +2013,7 @@ int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
int retval;
retval = -ENXIO;
- down(&hyperv_mmio_lock);
+ mutex_lock(&hyperv_mmio_lock);
/*
* If overlaps with frame buffers are allowed, then first attempt to
@@ -2057,7 +2060,7 @@ int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
}
exit:
- up(&hyperv_mmio_lock);
+ mutex_unlock(&hyperv_mmio_lock);
return retval;
}
EXPORT_SYMBOL_GPL(vmbus_allocate_mmio);
@@ -2074,7 +2077,7 @@ void vmbus_free_mmio(resource_size_t start, resource_size_t size)
{
struct resource *iter;
- down(&hyperv_mmio_lock);
+ mutex_lock(&hyperv_mmio_lock);
for (iter = hyperv_mmio; iter; iter = iter->sibling) {
if ((iter->start >= start + size) || (iter->end <= start))
continue;
@@ -2082,7 +2085,7 @@ void vmbus_free_mmio(resource_size_t start, resource_size_t size)
__release_region(iter, start, size);
}
release_mem_region(start, size);
- up(&hyperv_mmio_lock);
+ mutex_unlock(&hyperv_mmio_lock);
}
EXPORT_SYMBOL_GPL(vmbus_free_mmio);
@@ -2215,8 +2218,7 @@ static int vmbus_bus_resume(struct device *dev)
* We only use the 'vmbus_proto_version', which was in use before
* hibernation, to re-negotiate with the host.
*/
- if (vmbus_proto_version == VERSION_INVAL ||
- vmbus_proto_version == 0) {
+ if (!vmbus_proto_version) {
pr_err("Invalid proto version = 0x%x\n", vmbus_proto_version);
return -EINVAL;
}
@@ -2303,7 +2305,7 @@ static void hv_crash_handler(struct pt_regs *regs)
vmbus_connection.conn_state = DISCONNECTED;
cpu = smp_processor_id();
hv_stimer_cleanup(cpu);
- hv_synic_cleanup(cpu);
+ hv_synic_disable_regs(cpu);
hyperv_cleanup();
};
@@ -2373,6 +2375,7 @@ static int __init hv_acpi_init(void)
ret = -ETIMEDOUT;
goto cleanup;
}
+ hv_debug_init();
ret = vmbus_bus_init();
if (ret)
@@ -2409,6 +2412,8 @@ static void __exit vmbus_exit(void)
tasklet_kill(&hv_cpu->msg_dpc);
}
+ hv_debug_rm_all_dir();
+
vmbus_free_channels();
if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) {
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 13a6b4afb4b3..23dfe848979a 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -40,7 +40,8 @@ comment "Native drivers"
config SENSORS_AB8500
tristate "AB8500 thermal monitoring"
- depends on AB8500_GPADC && AB8500_BM
+ depends on AB8500_GPADC && AB8500_BM && (IIO = y)
+ default n
help
If you say yes here you get support for the thermal sensor part
of the AB8500 chip. The driver includes thermal management for
@@ -309,7 +310,6 @@ config SENSORS_APPLESMC
depends on INPUT && X86
select NEW_LEDS
select LEDS_CLASS
- select INPUT_POLLDEV
help
This driver provides support for the Apple System Management
Controller, which provides an accelerometer (Apple Sudden Motion
@@ -727,6 +727,33 @@ config SENSORS_LTC2945
This driver can also be built as a module. If so, the module will
be called ltc2945.
+config SENSORS_LTC2947
+ tristate
+
+config SENSORS_LTC2947_I2C
+ tristate "Analog Devices LTC2947 High Precision Power and Energy Monitor over I2C"
+ depends on I2C
+ select REGMAP_I2C
+ select SENSORS_LTC2947
+ help
+ If you say yes here you get support for Linear Technology LTC2947
+ I2C High Precision Power and Energy Monitor
+
+ This driver can also be built as a module. If so, the module will
+ be called ltc2947-i2c.
+
+config SENSORS_LTC2947_SPI
+ tristate "Analog Devices LTC2947 High Precision Power and Energy Monitor over SPI"
+ depends on SPI_MASTER
+ select REGMAP_SPI
+ select SENSORS_LTC2947
+ help
+ If you say yes here you get support for Linear Technology LTC2947
+ SPI High Precision Power and Energy Monitor
+
+ This driver can also be built as a module. If so, the module will
+ be called ltc2947-spi.
+
config SENSORS_LTC2990
tristate "Linear Technology LTC2990"
depends on I2C
@@ -1709,6 +1736,16 @@ config SENSORS_TMP421
This driver can also be built as a module. If so, the module
will be called tmp421.
+config SENSORS_TMP513
+ tristate "Texas Instruments TMP513 and compatibles"
+ depends on I2C
+ help
+ If you say yes here you get support for Texas Instruments TMP512,
+ and TMP513 temperature and power supply sensor chips.
+
+ This driver can also be built as a module. If so, the module
+ will be called tmp513.
+
config SENSORS_VEXPRESS
tristate "Versatile Express"
depends on VEXPRESS_CONFIG
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index 40c036ea45e6..6db5db9cdc29 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -106,6 +106,9 @@ obj-$(CONFIG_SENSORS_LM95234) += lm95234.o
obj-$(CONFIG_SENSORS_LM95241) += lm95241.o
obj-$(CONFIG_SENSORS_LM95245) += lm95245.o
obj-$(CONFIG_SENSORS_LTC2945) += ltc2945.o
+obj-$(CONFIG_SENSORS_LTC2947) += ltc2947-core.o
+obj-$(CONFIG_SENSORS_LTC2947_I2C) += ltc2947-i2c.o
+obj-$(CONFIG_SENSORS_LTC2947_SPI) += ltc2947-spi.o
obj-$(CONFIG_SENSORS_LTC2990) += ltc2990.o
obj-$(CONFIG_SENSORS_LTC4151) += ltc4151.o
obj-$(CONFIG_SENSORS_LTC4215) += ltc4215.o
@@ -166,6 +169,7 @@ obj-$(CONFIG_SENSORS_TMP103) += tmp103.o
obj-$(CONFIG_SENSORS_TMP108) += tmp108.o
obj-$(CONFIG_SENSORS_TMP401) += tmp401.o
obj-$(CONFIG_SENSORS_TMP421) += tmp421.o
+obj-$(CONFIG_SENSORS_TMP513) += tmp513.o
obj-$(CONFIG_SENSORS_VEXPRESS) += vexpress-hwmon.o
obj-$(CONFIG_SENSORS_VIA_CPUTEMP)+= via-cputemp.o
obj-$(CONFIG_SENSORS_VIA686A) += via686a.o
diff --git a/drivers/hwmon/ab8500.c b/drivers/hwmon/ab8500.c
index 207f77f85a40..53f3379d799d 100644
--- a/drivers/hwmon/ab8500.c
+++ b/drivers/hwmon/ab8500.c
@@ -17,20 +17,24 @@
#include <linux/hwmon-sysfs.h>
#include <linux/mfd/abx500.h>
#include <linux/mfd/abx500/ab8500-bm.h>
-#include <linux/mfd/abx500/ab8500-gpadc.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/power/ab8500.h>
#include <linux/reboot.h>
#include <linux/slab.h>
#include <linux/sysfs.h>
+#include <linux/iio/consumer.h>
#include "abx500.h"
#define DEFAULT_POWER_OFF_DELAY (HZ * 10)
#define THERMAL_VCC 1800
#define PULL_UP_RESISTOR 47000
-/* Number of monitored sensors should not greater than NUM_SENSORS */
-#define NUM_MONITORED_SENSORS 4
+
+#define AB8500_SENSOR_AUX1 0
+#define AB8500_SENSOR_AUX2 1
+#define AB8500_SENSOR_BTEMP_BALL 2
+#define AB8500_SENSOR_BAT_CTRL 3
+#define NUM_MONITORED_SENSORS 4
struct ab8500_gpadc_cfg {
const struct abx500_res_to_temp *temp_tbl;
@@ -40,7 +44,8 @@ struct ab8500_gpadc_cfg {
};
struct ab8500_temp {
- struct ab8500_gpadc *gpadc;
+ struct iio_channel *aux1;
+ struct iio_channel *aux2;
struct ab8500_btemp *btemp;
struct delayed_work power_off_work;
struct ab8500_gpadc_cfg cfg;
@@ -82,15 +87,21 @@ static int ab8500_read_sensor(struct abx500_temp *data, u8 sensor, int *temp)
int voltage, ret;
struct ab8500_temp *ab8500_data = data->plat_data;
- if (sensor == BAT_CTRL) {
- *temp = ab8500_btemp_get_batctrl_temp(ab8500_data->btemp);
- } else if (sensor == BTEMP_BALL) {
+ if (sensor == AB8500_SENSOR_BTEMP_BALL) {
*temp = ab8500_btemp_get_temp(ab8500_data->btemp);
- } else {
- voltage = ab8500_gpadc_convert(ab8500_data->gpadc, sensor);
- if (voltage < 0)
- return voltage;
-
+ } else if (sensor == AB8500_SENSOR_BAT_CTRL) {
+ *temp = ab8500_btemp_get_batctrl_temp(ab8500_data->btemp);
+ } else if (sensor == AB8500_SENSOR_AUX1) {
+ ret = iio_read_channel_processed(ab8500_data->aux1, &voltage);
+ if (ret < 0)
+ return ret;
+ ret = ab8500_voltage_to_temp(&ab8500_data->cfg, voltage, temp);
+ if (ret < 0)
+ return ret;
+ } else if (sensor == AB8500_SENSOR_AUX2) {
+ ret = iio_read_channel_processed(ab8500_data->aux2, &voltage);
+ if (ret < 0)
+ return ret;
ret = ab8500_voltage_to_temp(&ab8500_data->cfg, voltage, temp);
if (ret < 0)
return ret;
@@ -164,10 +175,6 @@ int abx500_hwmon_init(struct abx500_temp *data)
if (!ab8500_data)
return -ENOMEM;
- ab8500_data->gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
- if (IS_ERR(ab8500_data->gpadc))
- return PTR_ERR(ab8500_data->gpadc);
-
ab8500_data->btemp = ab8500_btemp_get();
if (IS_ERR(ab8500_data->btemp))
return PTR_ERR(ab8500_data->btemp);
@@ -181,15 +188,25 @@ int abx500_hwmon_init(struct abx500_temp *data)
ab8500_data->cfg.tbl_sz = ab8500_temp_tbl_a_size;
data->plat_data = ab8500_data;
+ ab8500_data->aux1 = devm_iio_channel_get(&data->pdev->dev, "aux1");
+ if (IS_ERR(ab8500_data->aux1)) {
+ if (PTR_ERR(ab8500_data->aux1) == -ENODEV)
+ return -EPROBE_DEFER;
+ dev_err(&data->pdev->dev, "failed to get AUX1 ADC channel\n");
+ return PTR_ERR(ab8500_data->aux1);
+ }
+ ab8500_data->aux2 = devm_iio_channel_get(&data->pdev->dev, "aux2");
+ if (IS_ERR(ab8500_data->aux2)) {
+ if (PTR_ERR(ab8500_data->aux2) == -ENODEV)
+ return -EPROBE_DEFER;
+ dev_err(&data->pdev->dev, "failed to get AUX2 ADC channel\n");
+ return PTR_ERR(ab8500_data->aux2);
+ }
- /*
- * ADC_AUX1 and ADC_AUX2, connected to external NTC
- * BTEMP_BALL and BAT_CTRL, fixed usage
- */
- data->gpadc_addr[0] = ADC_AUX1;
- data->gpadc_addr[1] = ADC_AUX2;
- data->gpadc_addr[2] = BTEMP_BALL;
- data->gpadc_addr[3] = BAT_CTRL;
+ data->gpadc_addr[0] = AB8500_SENSOR_AUX1;
+ data->gpadc_addr[1] = AB8500_SENSOR_AUX2;
+ data->gpadc_addr[2] = AB8500_SENSOR_BTEMP_BALL;
+ data->gpadc_addr[3] = AB8500_SENSOR_BAT_CTRL;
data->monitored_sensors = NUM_MONITORED_SENSORS;
data->ops.read_sensor = ab8500_read_sensor;
diff --git a/drivers/hwmon/abituguru.c b/drivers/hwmon/abituguru.c
index a5cf6b2a6e49..681f0623868f 100644
--- a/drivers/hwmon/abituguru.c
+++ b/drivers/hwmon/abituguru.c
@@ -1264,7 +1264,7 @@ static int abituguru_probe(struct platform_device *pdev)
* El weirdo probe order, to keep the sysfs order identical to the
* BIOS and window-appliction listing order.
*/
- const u8 probe_order[ABIT_UGURU_MAX_BANK1_SENSORS] = {
+ static const u8 probe_order[ABIT_UGURU_MAX_BANK1_SENSORS] = {
0x00, 0x01, 0x03, 0x04, 0x0A, 0x08, 0x0E, 0x02,
0x09, 0x06, 0x05, 0x0B, 0x0F, 0x0D, 0x07, 0x0C };
diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
index 183ff3d25129..ec93b8d673f5 100644
--- a/drivers/hwmon/applesmc.c
+++ b/drivers/hwmon/applesmc.c
@@ -19,7 +19,7 @@
#include <linux/delay.h>
#include <linux/platform_device.h>
-#include <linux/input-polldev.h>
+#include <linux/input.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/module.h>
@@ -140,7 +140,7 @@ static s16 rest_y;
static u8 backlight_state[2];
static struct device *hwmon_dev;
-static struct input_polled_dev *applesmc_idev;
+static struct input_dev *applesmc_idev;
/*
* Last index written to key_at_index sysfs file, and value to use for all other
@@ -681,9 +681,8 @@ static void applesmc_calibrate(void)
rest_x = -rest_x;
}
-static void applesmc_idev_poll(struct input_polled_dev *dev)
+static void applesmc_idev_poll(struct input_dev *idev)
{
- struct input_dev *idev = dev->input;
s16 x, y;
if (applesmc_read_s16(MOTION_SENSOR_X_KEY, &x))
@@ -1134,7 +1133,6 @@ out:
/* Create accelerometer resources */
static int applesmc_create_accelerometer(void)
{
- struct input_dev *idev;
int ret;
if (!smcreg.has_accelerometer)
@@ -1144,37 +1142,38 @@ static int applesmc_create_accelerometer(void)
if (ret)
goto out;
- applesmc_idev = input_allocate_polled_device();
+ applesmc_idev = input_allocate_device();
if (!applesmc_idev) {
ret = -ENOMEM;
goto out_sysfs;
}
- applesmc_idev->poll = applesmc_idev_poll;
- applesmc_idev->poll_interval = APPLESMC_POLL_INTERVAL;
-
/* initial calibrate for the input device */
applesmc_calibrate();
/* initialize the input device */
- idev = applesmc_idev->input;
- idev->name = "applesmc";
- idev->id.bustype = BUS_HOST;
- idev->dev.parent = &pdev->dev;
- idev->evbit[0] = BIT_MASK(EV_ABS);
- input_set_abs_params(idev, ABS_X,
+ applesmc_idev->name = "applesmc";
+ applesmc_idev->id.bustype = BUS_HOST;
+ applesmc_idev->dev.parent = &pdev->dev;
+ input_set_abs_params(applesmc_idev, ABS_X,
-256, 256, APPLESMC_INPUT_FUZZ, APPLESMC_INPUT_FLAT);
- input_set_abs_params(idev, ABS_Y,
+ input_set_abs_params(applesmc_idev, ABS_Y,
-256, 256, APPLESMC_INPUT_FUZZ, APPLESMC_INPUT_FLAT);
- ret = input_register_polled_device(applesmc_idev);
+ ret = input_setup_polling(applesmc_idev, applesmc_idev_poll);
+ if (ret)
+ goto out_idev;
+
+ input_set_poll_interval(applesmc_idev, APPLESMC_POLL_INTERVAL);
+
+ ret = input_register_device(applesmc_idev);
if (ret)
goto out_idev;
return 0;
out_idev:
- input_free_polled_device(applesmc_idev);
+ input_free_device(applesmc_idev);
out_sysfs:
applesmc_destroy_nodes(accelerometer_group);
@@ -1189,8 +1188,7 @@ static void applesmc_release_accelerometer(void)
{
if (!smcreg.has_accelerometer)
return;
- input_unregister_polled_device(applesmc_idev);
- input_free_polled_device(applesmc_idev);
+ input_unregister_device(applesmc_idev);
applesmc_destroy_nodes(accelerometer_group);
}
diff --git a/drivers/hwmon/aspeed-pwm-tacho.c b/drivers/hwmon/aspeed-pwm-tacho.c
index 40c489be62ea..33fb54845bf6 100644
--- a/drivers/hwmon/aspeed-pwm-tacho.c
+++ b/drivers/hwmon/aspeed-pwm-tacho.c
@@ -891,17 +891,12 @@ static int aspeed_pwm_tacho_probe(struct platform_device *pdev)
struct device_node *np, *child;
struct aspeed_pwm_tacho_data *priv;
void __iomem *regs;
- struct resource *res;
struct device *hwmon;
struct clk *clk;
int ret;
np = dev->of_node;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENOENT;
- regs = devm_ioremap_resource(dev, res);
+ regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(regs))
return PTR_ERR(regs);
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c
index 4212d022d253..17583bf8c2dc 100644
--- a/drivers/hwmon/dell-smm-hwmon.c
+++ b/drivers/hwmon/dell-smm-hwmon.c
@@ -68,6 +68,8 @@ static uint i8k_pwm_mult;
static uint i8k_fan_max = I8K_FAN_HIGH;
static bool disallow_fan_type_call;
static bool disallow_fan_support;
+static unsigned int manual_fan;
+static unsigned int auto_fan;
#define I8K_HWMON_HAVE_TEMP1 (1 << 0)
#define I8K_HWMON_HAVE_TEMP2 (1 << 1)
@@ -301,6 +303,20 @@ static int i8k_get_fan_nominal_speed(int fan, int speed)
}
/*
+ * Enable or disable automatic BIOS fan control support
+ */
+static int i8k_enable_fan_auto_mode(bool enable)
+{
+ struct smm_regs regs = { };
+
+ if (disallow_fan_support)
+ return -EINVAL;
+
+ regs.eax = enable ? auto_fan : manual_fan;
+ return i8k_smm(&regs);
+}
+
+/*
* Set the fan speed (off, low, high). Returns the new fan status.
*/
static int i8k_set_fan(int fan, int speed)
@@ -726,6 +742,35 @@ static ssize_t i8k_hwmon_pwm_store(struct device *dev,
return err < 0 ? -EIO : count;
}
+static ssize_t i8k_hwmon_pwm_enable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int err;
+ bool enable;
+ unsigned long val;
+
+ if (!auto_fan)
+ return -ENODEV;
+
+ err = kstrtoul(buf, 10, &val);
+ if (err)
+ return err;
+
+ if (val == 1)
+ enable = false;
+ else if (val == 2)
+ enable = true;
+ else
+ return -EINVAL;
+
+ mutex_lock(&i8k_mutex);
+ err = i8k_enable_fan_auto_mode(enable);
+ mutex_unlock(&i8k_mutex);
+
+ return err ? err : count;
+}
+
static SENSOR_DEVICE_ATTR_RO(temp1_input, i8k_hwmon_temp, 0);
static SENSOR_DEVICE_ATTR_RO(temp1_label, i8k_hwmon_temp_label, 0);
static SENSOR_DEVICE_ATTR_RO(temp2_input, i8k_hwmon_temp, 1);
@@ -749,6 +794,7 @@ static SENSOR_DEVICE_ATTR_RO(temp10_label, i8k_hwmon_temp_label, 9);
static SENSOR_DEVICE_ATTR_RO(fan1_input, i8k_hwmon_fan, 0);
static SENSOR_DEVICE_ATTR_RO(fan1_label, i8k_hwmon_fan_label, 0);
static SENSOR_DEVICE_ATTR_RW(pwm1, i8k_hwmon_pwm, 0);
+static SENSOR_DEVICE_ATTR_WO(pwm1_enable, i8k_hwmon_pwm_enable, 0);
static SENSOR_DEVICE_ATTR_RO(fan2_input, i8k_hwmon_fan, 1);
static SENSOR_DEVICE_ATTR_RO(fan2_label, i8k_hwmon_fan_label, 1);
static SENSOR_DEVICE_ATTR_RW(pwm2, i8k_hwmon_pwm, 1);
@@ -780,12 +826,13 @@ static struct attribute *i8k_attrs[] = {
&sensor_dev_attr_fan1_input.dev_attr.attr, /* 20 */
&sensor_dev_attr_fan1_label.dev_attr.attr, /* 21 */
&sensor_dev_attr_pwm1.dev_attr.attr, /* 22 */
- &sensor_dev_attr_fan2_input.dev_attr.attr, /* 23 */
- &sensor_dev_attr_fan2_label.dev_attr.attr, /* 24 */
- &sensor_dev_attr_pwm2.dev_attr.attr, /* 25 */
- &sensor_dev_attr_fan3_input.dev_attr.attr, /* 26 */
- &sensor_dev_attr_fan3_label.dev_attr.attr, /* 27 */
- &sensor_dev_attr_pwm3.dev_attr.attr, /* 28 */
+ &sensor_dev_attr_pwm1_enable.dev_attr.attr, /* 23 */
+ &sensor_dev_attr_fan2_input.dev_attr.attr, /* 24 */
+ &sensor_dev_attr_fan2_label.dev_attr.attr, /* 25 */
+ &sensor_dev_attr_pwm2.dev_attr.attr, /* 26 */
+ &sensor_dev_attr_fan3_input.dev_attr.attr, /* 27 */
+ &sensor_dev_attr_fan3_label.dev_attr.attr, /* 28 */
+ &sensor_dev_attr_pwm3.dev_attr.attr, /* 29 */
NULL
};
@@ -828,16 +875,19 @@ static umode_t i8k_is_visible(struct kobject *kobj, struct attribute *attr,
!(i8k_hwmon_flags & I8K_HWMON_HAVE_TEMP10))
return 0;
- if (index >= 20 && index <= 22 &&
+ if (index >= 20 && index <= 23 &&
!(i8k_hwmon_flags & I8K_HWMON_HAVE_FAN1))
return 0;
- if (index >= 23 && index <= 25 &&
+ if (index >= 24 && index <= 26 &&
!(i8k_hwmon_flags & I8K_HWMON_HAVE_FAN2))
return 0;
- if (index >= 26 && index <= 28 &&
+ if (index >= 27 && index <= 29 &&
!(i8k_hwmon_flags & I8K_HWMON_HAVE_FAN3))
return 0;
+ if (index == 23 && !auto_fan)
+ return 0;
+
return attr->mode;
}
@@ -1135,12 +1185,48 @@ static struct dmi_system_id i8k_blacklist_fan_support_dmi_table[] __initdata = {
{ }
};
+struct i8k_fan_control_data {
+ unsigned int manual_fan;
+ unsigned int auto_fan;
+};
+
+enum i8k_fan_controls {
+ I8K_FAN_34A3_35A3,
+};
+
+static const struct i8k_fan_control_data i8k_fan_control_data[] = {
+ [I8K_FAN_34A3_35A3] = {
+ .manual_fan = 0x34a3,
+ .auto_fan = 0x35a3,
+ },
+};
+
+static struct dmi_system_id i8k_whitelist_fan_control[] __initdata = {
+ {
+ .ident = "Dell Precision 5530",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Precision 5530"),
+ },
+ .driver_data = (void *)&i8k_fan_control_data[I8K_FAN_34A3_35A3],
+ },
+ {
+ .ident = "Dell Latitude E6440",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Latitude E6440"),
+ },
+ .driver_data = (void *)&i8k_fan_control_data[I8K_FAN_34A3_35A3],
+ },
+ { }
+};
+
/*
* Probe for the presence of a supported laptop.
*/
static int __init i8k_probe(void)
{
- const struct dmi_system_id *id;
+ const struct dmi_system_id *id, *fan_control;
int fan, ret;
/*
@@ -1200,6 +1286,15 @@ static int __init i8k_probe(void)
i8k_fan_max = fan_max ? : I8K_FAN_HIGH; /* Must not be 0 */
i8k_pwm_mult = DIV_ROUND_UP(255, i8k_fan_max);
+ fan_control = dmi_first_match(i8k_whitelist_fan_control);
+ if (fan_control && fan_control->driver_data) {
+ const struct i8k_fan_control_data *data = fan_control->driver_data;
+
+ manual_fan = data->manual_fan;
+ auto_fan = data->auto_fan;
+ pr_info("enabling support for setting automatic/manual fan control\n");
+ }
+
if (!fan_mult) {
/*
* Autodetect fan multiplier based on nominal rpm
diff --git a/drivers/hwmon/fschmd.c b/drivers/hwmon/fschmd.c
index fa0c2f1fb443..4136643d8e0c 100644
--- a/drivers/hwmon/fschmd.c
+++ b/drivers/hwmon/fschmd.c
@@ -954,6 +954,7 @@ static const struct file_operations watchdog_fops = {
.release = watchdog_release,
.write = watchdog_write,
.unlocked_ioctl = watchdog_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
};
diff --git a/drivers/hwmon/ina3221.c b/drivers/hwmon/ina3221.c
index 8a51dcf055ea..f335d0cb0c77 100644
--- a/drivers/hwmon/ina3221.c
+++ b/drivers/hwmon/ina3221.c
@@ -31,6 +31,8 @@
#define INA3221_WARN2 0x0a
#define INA3221_CRIT3 0x0b
#define INA3221_WARN3 0x0c
+#define INA3221_SHUNT_SUM 0x0d
+#define INA3221_CRIT_SUM 0x0e
#define INA3221_MASK_ENABLE 0x0f
#define INA3221_CONFIG_MODE_MASK GENMASK(2, 0)
@@ -50,6 +52,8 @@
#define INA3221_CONFIG_CHs_EN_MASK GENMASK(14, 12)
#define INA3221_CONFIG_CHx_EN(x) BIT(14 - (x))
+#define INA3221_MASK_ENABLE_SCC_MASK GENMASK(14, 12)
+
#define INA3221_CONFIG_DEFAULT 0x7127
#define INA3221_RSHUNT_DEFAULT 10000
@@ -60,9 +64,11 @@ enum ina3221_fields {
/* Status Flags */
F_CVRF,
- /* Alert Flags */
+ /* Warning Flags */
F_WF3, F_WF2, F_WF1,
- F_CF3, F_CF2, F_CF1,
+
+ /* Alert Flags: SF is the summation-alert flag */
+ F_SF, F_CF3, F_CF2, F_CF1,
/* sentinel */
F_MAX_FIELDS
@@ -75,6 +81,7 @@ static const struct reg_field ina3221_reg_fields[] = {
[F_WF3] = REG_FIELD(INA3221_MASK_ENABLE, 3, 3),
[F_WF2] = REG_FIELD(INA3221_MASK_ENABLE, 4, 4),
[F_WF1] = REG_FIELD(INA3221_MASK_ENABLE, 5, 5),
+ [F_SF] = REG_FIELD(INA3221_MASK_ENABLE, 6, 6),
[F_CF3] = REG_FIELD(INA3221_MASK_ENABLE, 7, 7),
[F_CF2] = REG_FIELD(INA3221_MASK_ENABLE, 8, 8),
[F_CF1] = REG_FIELD(INA3221_MASK_ENABLE, 9, 9),
@@ -107,6 +114,7 @@ struct ina3221_input {
* @inputs: Array of channel input source specific structures
* @lock: mutex lock to serialize sysfs attribute accesses
* @reg_config: Register value of INA3221_CONFIG
+ * @summation_shunt_resistor: equivalent shunt resistor value for summation
* @single_shot: running in single-shot operating mode
*/
struct ina3221_data {
@@ -116,16 +124,51 @@ struct ina3221_data {
struct ina3221_input inputs[INA3221_NUM_CHANNELS];
struct mutex lock;
u32 reg_config;
+ int summation_shunt_resistor;
bool single_shot;
};
static inline bool ina3221_is_enabled(struct ina3221_data *ina, int channel)
{
+ /* Summation channel checks shunt resistor values */
+ if (channel > INA3221_CHANNEL3)
+ return ina->summation_shunt_resistor != 0;
+
return pm_runtime_active(ina->pm_dev) &&
(ina->reg_config & INA3221_CONFIG_CHx_EN(channel));
}
+/**
+ * Helper function to return the resistor value for current summation.
+ *
+ * There is a condition to calculate current summation -- all the shunt
+ * resistor values should be the same, so as to simply fit the formula:
+ * current summation = shunt voltage summation / shunt resistor
+ *
+ * Returns the equivalent shunt resistor value on success or 0 on failure
+ */
+static inline int ina3221_summation_shunt_resistor(struct ina3221_data *ina)
+{
+ struct ina3221_input *input = ina->inputs;
+ int i, shunt_resistor = 0;
+
+ for (i = 0; i < INA3221_NUM_CHANNELS; i++) {
+ if (input[i].disconnected || !input[i].shunt_resistor)
+ continue;
+ if (!shunt_resistor) {
+ /* Found the reference shunt resistor value */
+ shunt_resistor = input[i].shunt_resistor;
+ } else {
+ /* No summation if resistor values are different */
+ if (shunt_resistor != input[i].shunt_resistor)
+ return 0;
+ }
+ }
+
+ return shunt_resistor;
+}
+
/* Lookup table for Bus and Shunt conversion times in usec */
static const u16 ina3221_conv_time[] = {
140, 204, 332, 588, 1100, 2116, 4156, 8244,
@@ -183,7 +226,14 @@ static int ina3221_read_value(struct ina3221_data *ina, unsigned int reg,
if (ret)
return ret;
- *val = sign_extend32(regval >> 3, 12);
+ /*
+ * Shunt Voltage Sum register has 14-bit value with 1-bit shift
+ * Other Shunt Voltage registers have 12 bits with 3-bit shift
+ */
+ if (reg == INA3221_SHUNT_SUM)
+ *val = sign_extend32(regval >> 1, 14);
+ else
+ *val = sign_extend32(regval >> 3, 12);
return 0;
}
@@ -195,6 +245,7 @@ static const u8 ina3221_in_reg[] = {
INA3221_SHUNT1,
INA3221_SHUNT2,
INA3221_SHUNT3,
+ INA3221_SHUNT_SUM,
};
static int ina3221_read_chip(struct device *dev, u32 attr, long *val)
@@ -224,8 +275,12 @@ static int ina3221_read_in(struct device *dev, u32 attr, int channel, long *val)
u8 reg = ina3221_in_reg[channel];
int regval, ret;
- /* Translate shunt channel index to sensor channel index */
- channel %= INA3221_NUM_CHANNELS;
+ /*
+ * Translate shunt channel index to sensor channel index except
+ * the 7th channel (6 since being 0-aligned) is for summation.
+ */
+ if (channel != 6)
+ channel %= INA3221_NUM_CHANNELS;
switch (attr) {
case hwmon_in_input:
@@ -259,22 +314,29 @@ static int ina3221_read_in(struct device *dev, u32 attr, int channel, long *val)
}
}
-static const u8 ina3221_curr_reg[][INA3221_NUM_CHANNELS] = {
- [hwmon_curr_input] = { INA3221_SHUNT1, INA3221_SHUNT2, INA3221_SHUNT3 },
- [hwmon_curr_max] = { INA3221_WARN1, INA3221_WARN2, INA3221_WARN3 },
- [hwmon_curr_crit] = { INA3221_CRIT1, INA3221_CRIT2, INA3221_CRIT3 },
- [hwmon_curr_max_alarm] = { F_WF1, F_WF2, F_WF3 },
- [hwmon_curr_crit_alarm] = { F_CF1, F_CF2, F_CF3 },
+static const u8 ina3221_curr_reg[][INA3221_NUM_CHANNELS + 1] = {
+ [hwmon_curr_input] = { INA3221_SHUNT1, INA3221_SHUNT2,
+ INA3221_SHUNT3, INA3221_SHUNT_SUM },
+ [hwmon_curr_max] = { INA3221_WARN1, INA3221_WARN2, INA3221_WARN3, 0 },
+ [hwmon_curr_crit] = { INA3221_CRIT1, INA3221_CRIT2,
+ INA3221_CRIT3, INA3221_CRIT_SUM },
+ [hwmon_curr_max_alarm] = { F_WF1, F_WF2, F_WF3, 0 },
+ [hwmon_curr_crit_alarm] = { F_CF1, F_CF2, F_CF3, F_SF },
};
static int ina3221_read_curr(struct device *dev, u32 attr,
int channel, long *val)
{
struct ina3221_data *ina = dev_get_drvdata(dev);
- struct ina3221_input *input = &ina->inputs[channel];
- int resistance_uo = input->shunt_resistor;
+ struct ina3221_input *input = ina->inputs;
u8 reg = ina3221_curr_reg[attr][channel];
- int regval, voltage_nv, ret;
+ int resistance_uo, voltage_nv;
+ int regval, ret;
+
+ if (channel > INA3221_CHANNEL3)
+ resistance_uo = ina->summation_shunt_resistor;
+ else
+ resistance_uo = input[channel].shunt_resistor;
switch (attr) {
case hwmon_curr_input:
@@ -293,6 +355,9 @@ static int ina3221_read_curr(struct device *dev, u32 attr,
/* fall through */
case hwmon_curr_crit:
case hwmon_curr_max:
+ if (!resistance_uo)
+ return -ENODATA;
+
ret = ina3221_read_value(ina, reg, &regval);
if (ret)
return ret;
@@ -366,10 +431,18 @@ static int ina3221_write_curr(struct device *dev, u32 attr,
int channel, long val)
{
struct ina3221_data *ina = dev_get_drvdata(dev);
- struct ina3221_input *input = &ina->inputs[channel];
- int resistance_uo = input->shunt_resistor;
+ struct ina3221_input *input = ina->inputs;
u8 reg = ina3221_curr_reg[attr][channel];
- int regval, current_ma, voltage_uv;
+ int resistance_uo, current_ma, voltage_uv;
+ int regval;
+
+ if (channel > INA3221_CHANNEL3)
+ resistance_uo = ina->summation_shunt_resistor;
+ else
+ resistance_uo = input[channel].shunt_resistor;
+
+ if (!resistance_uo)
+ return -EOPNOTSUPP;
/* clamp current */
current_ma = clamp_val(val,
@@ -381,8 +454,21 @@ static int ina3221_write_curr(struct device *dev, u32 attr,
/* clamp voltage */
voltage_uv = clamp_val(voltage_uv, -163800, 163800);
- /* 1 / 40uV(scale) << 3(register shift) = 5 */
- regval = DIV_ROUND_CLOSEST(voltage_uv, 5) & 0xfff8;
+ /*
+ * Formula to convert voltage_uv to register value:
+ * regval = (voltage_uv / scale) << shift
+ * Note:
+ * The scale is 40uV for all shunt voltage registers
+ * Shunt Voltage Sum register left-shifts 1 bit
+ * All other Shunt Voltage registers shift 3 bits
+ * Results:
+ * SHUNT_SUM: (1 / 40uV) << 1 = 1 / 20uV
+ * SHUNT[1-3]: (1 / 40uV) << 3 = 1 / 5uV
+ */
+ if (reg == INA3221_SHUNT_SUM)
+ regval = DIV_ROUND_CLOSEST(voltage_uv, 20) & 0xfffe;
+ else
+ regval = DIV_ROUND_CLOSEST(voltage_uv, 5) & 0xfff8;
return regmap_write(ina->regmap, reg, regval);
}
@@ -499,7 +585,10 @@ static int ina3221_read_string(struct device *dev, enum hwmon_sensor_types type,
struct ina3221_data *ina = dev_get_drvdata(dev);
int index = channel - 1;
- *str = ina->inputs[index].label;
+ if (channel == 7)
+ *str = "sum of shunt voltages";
+ else
+ *str = ina->inputs[index].label;
return 0;
}
@@ -529,6 +618,8 @@ static umode_t ina3221_is_visible(const void *drvdata,
case hwmon_in_label:
if (channel - 1 <= INA3221_CHANNEL3)
input = &ina->inputs[channel - 1];
+ else if (channel == 7)
+ return 0444;
/* Hide label node if label is not provided */
return (input && input->label) ? 0444 : 0;
case hwmon_in_input:
@@ -573,11 +664,16 @@ static const struct hwmon_channel_info *ina3221_info[] = {
/* 4-6: shunt voltage Channels */
HWMON_I_INPUT,
HWMON_I_INPUT,
- HWMON_I_INPUT),
+ HWMON_I_INPUT,
+ /* 7: summation of shunt voltage channels */
+ HWMON_I_INPUT | HWMON_I_LABEL),
HWMON_CHANNEL_INFO(curr,
+ /* 1-3: current channels*/
+ INA3221_HWMON_CURR_CONFIG,
INA3221_HWMON_CURR_CONFIG,
INA3221_HWMON_CURR_CONFIG,
- INA3221_HWMON_CURR_CONFIG),
+ /* 4: summation of current channels */
+ HWMON_C_INPUT | HWMON_C_CRIT | HWMON_C_CRIT_ALARM),
NULL
};
@@ -624,6 +720,9 @@ static ssize_t ina3221_shunt_store(struct device *dev,
input->shunt_resistor = val;
+ /* Update summation_shunt_resistor for summation channel */
+ ina->summation_shunt_resistor = ina3221_summation_shunt_resistor(ina);
+
return count;
}
@@ -642,6 +741,7 @@ ATTRIBUTE_GROUPS(ina3221);
static const struct regmap_range ina3221_yes_ranges[] = {
regmap_reg_range(INA3221_CONFIG, INA3221_BUS3),
+ regmap_reg_range(INA3221_SHUNT_SUM, INA3221_SHUNT_SUM),
regmap_reg_range(INA3221_MASK_ENABLE, INA3221_MASK_ENABLE),
};
@@ -772,6 +872,9 @@ static int ina3221_probe(struct i2c_client *client,
ina->reg_config &= ~INA3221_CONFIG_CHx_EN(i);
}
+ /* Initialize summation_shunt_resistor for summation channel control */
+ ina->summation_shunt_resistor = ina3221_summation_shunt_resistor(ina);
+
ina->pm_dev = dev;
mutex_init(&ina->lock);
dev_set_drvdata(dev, ina);
@@ -875,6 +978,22 @@ static int __maybe_unused ina3221_resume(struct device *dev)
if (ret)
return ret;
+ /* Initialize summation channel control */
+ if (ina->summation_shunt_resistor) {
+ /*
+ * Take all three channels into summation by default
+ * Shunt measurements of disconnected channels should
+ * be 0, so it does not matter for summation.
+ */
+ ret = regmap_update_bits(ina->regmap, INA3221_MASK_ENABLE,
+ INA3221_MASK_ENABLE_SCC_MASK,
+ INA3221_MASK_ENABLE_SCC_MASK);
+ if (ret) {
+ dev_err(dev, "Unable to control summation channel\n");
+ return ret;
+ }
+ }
+
return 0;
}
diff --git a/drivers/hwmon/ltc2947-core.c b/drivers/hwmon/ltc2947-core.c
new file mode 100644
index 000000000000..bb3f7749a0b0
--- /dev/null
+++ b/drivers/hwmon/ltc2947-core.c
@@ -0,0 +1,1183 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Analog Devices LTC2947 high precision power and energy monitor
+ *
+ * Copyright 2019 Analog Devices Inc.
+ */
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+
+#include "ltc2947.h"
+
+/* register's */
+#define LTC2947_REG_PAGE_CTRL 0xFF
+#define LTC2947_REG_CTRL 0xF0
+#define LTC2947_REG_TBCTL 0xE9
+#define LTC2947_CONT_MODE_MASK BIT(3)
+#define LTC2947_CONT_MODE(x) FIELD_PREP(LTC2947_CONT_MODE_MASK, x)
+#define LTC2947_PRE_MASK GENMASK(2, 0)
+#define LTC2947_PRE(x) FIELD_PREP(LTC2947_PRE_MASK, x)
+#define LTC2947_DIV_MASK GENMASK(7, 3)
+#define LTC2947_DIV(x) FIELD_PREP(LTC2947_DIV_MASK, x)
+#define LTC2947_SHUTDOWN_MASK BIT(0)
+#define LTC2947_REG_ACCUM_POL 0xE1
+#define LTC2947_ACCUM_POL_1_MASK GENMASK(1, 0)
+#define LTC2947_ACCUM_POL_1(x) FIELD_PREP(LTC2947_ACCUM_POL_1_MASK, x)
+#define LTC2947_ACCUM_POL_2_MASK GENMASK(3, 2)
+#define LTC2947_ACCUM_POL_2(x) FIELD_PREP(LTC2947_ACCUM_POL_2_MASK, x)
+#define LTC2947_REG_ACCUM_DEADBAND 0xE4
+#define LTC2947_REG_GPIOSTATCTL 0x67
+#define LTC2947_GPIO_EN_MASK BIT(0)
+#define LTC2947_GPIO_EN(x) FIELD_PREP(LTC2947_GPIO_EN_MASK, x)
+#define LTC2947_GPIO_FAN_EN_MASK BIT(6)
+#define LTC2947_GPIO_FAN_EN(x) FIELD_PREP(LTC2947_GPIO_FAN_EN_MASK, x)
+#define LTC2947_GPIO_FAN_POL_MASK BIT(7)
+#define LTC2947_GPIO_FAN_POL(x) FIELD_PREP(LTC2947_GPIO_FAN_POL_MASK, x)
+#define LTC2947_REG_GPIO_ACCUM 0xE3
+/* 200Khz */
+#define LTC2947_CLK_MIN 200000
+/* 25Mhz */
+#define LTC2947_CLK_MAX 25000000
+#define LTC2947_PAGE0 0
+#define LTC2947_PAGE1 1
+/* Voltage registers */
+#define LTC2947_REG_VOLTAGE 0xA0
+#define LTC2947_REG_VOLTAGE_MAX 0x50
+#define LTC2947_REG_VOLTAGE_MIN 0x52
+#define LTC2947_REG_VOLTAGE_THRE_H 0x90
+#define LTC2947_REG_VOLTAGE_THRE_L 0x92
+#define LTC2947_REG_DVCC 0xA4
+#define LTC2947_REG_DVCC_MAX 0x58
+#define LTC2947_REG_DVCC_MIN 0x5A
+#define LTC2947_REG_DVCC_THRE_H 0x98
+#define LTC2947_REG_DVCC_THRE_L 0x9A
+#define LTC2947_VOLTAGE_GEN_CHAN 0
+#define LTC2947_VOLTAGE_DVCC_CHAN 1
+/* in mV */
+#define VOLTAGE_MAX 15500
+#define VOLTAGE_MIN -300
+#define VDVCC_MAX 15000
+#define VDVCC_MIN 4750
+/* Current registers */
+#define LTC2947_REG_CURRENT 0x90
+#define LTC2947_REG_CURRENT_MAX 0x40
+#define LTC2947_REG_CURRENT_MIN 0x42
+#define LTC2947_REG_CURRENT_THRE_H 0x80
+#define LTC2947_REG_CURRENT_THRE_L 0x82
+/* in mA */
+#define CURRENT_MAX 30000
+#define CURRENT_MIN -30000
+/* Power registers */
+#define LTC2947_REG_POWER 0x93
+#define LTC2947_REG_POWER_MAX 0x44
+#define LTC2947_REG_POWER_MIN 0x46
+#define LTC2947_REG_POWER_THRE_H 0x84
+#define LTC2947_REG_POWER_THRE_L 0x86
+/* in uW */
+#define POWER_MAX 450000000
+#define POWER_MIN -450000000
+/* Temperature registers */
+#define LTC2947_REG_TEMP 0xA2
+#define LTC2947_REG_TEMP_MAX 0x54
+#define LTC2947_REG_TEMP_MIN 0x56
+#define LTC2947_REG_TEMP_THRE_H 0x94
+#define LTC2947_REG_TEMP_THRE_L 0x96
+#define LTC2947_REG_TEMP_FAN_THRE_H 0x9C
+#define LTC2947_REG_TEMP_FAN_THRE_L 0x9E
+#define LTC2947_TEMP_FAN_CHAN 1
+/* in millidegress Celsius */
+#define TEMP_MAX 85000
+#define TEMP_MIN -40000
+/* Energy registers */
+#define LTC2947_REG_ENERGY1 0x06
+#define LTC2947_REG_ENERGY2 0x16
+/* Status/Alarm/Overflow registers */
+#define LTC2947_REG_STATUS 0x80
+#define LTC2947_REG_STATVT 0x81
+#define LTC2947_REG_STATIP 0x82
+#define LTC2947_REG_STATVDVCC 0x87
+
+#define LTC2947_ALERTS_SIZE (LTC2947_REG_STATVDVCC - LTC2947_REG_STATUS)
+#define LTC2947_MAX_VOLTAGE_MASK BIT(0)
+#define LTC2947_MIN_VOLTAGE_MASK BIT(1)
+#define LTC2947_MAX_CURRENT_MASK BIT(0)
+#define LTC2947_MIN_CURRENT_MASK BIT(1)
+#define LTC2947_MAX_POWER_MASK BIT(2)
+#define LTC2947_MIN_POWER_MASK BIT(3)
+#define LTC2947_MAX_TEMP_MASK BIT(2)
+#define LTC2947_MIN_TEMP_MASK BIT(3)
+#define LTC2947_MAX_TEMP_FAN_MASK BIT(4)
+#define LTC2947_MIN_TEMP_FAN_MASK BIT(5)
+
+struct ltc2947_data {
+ struct regmap *map;
+ struct device *dev;
+ /*
+ * The mutex is needed because the device has 2 memory pages. When
+ * reading/writing the correct page needs to be set so that, the
+ * complete sequence select_page->read/write needs to be protected.
+ */
+ struct mutex lock;
+ u32 lsb_energy;
+ bool gpio_out;
+};
+
+static int __ltc2947_val_read16(const struct ltc2947_data *st, const u8 reg,
+ u64 *val)
+{
+ __be16 __val = 0;
+ int ret;
+
+ ret = regmap_bulk_read(st->map, reg, &__val, 2);
+ if (ret)
+ return ret;
+
+ *val = be16_to_cpu(__val);
+
+ return 0;
+}
+
+static int __ltc2947_val_read24(const struct ltc2947_data *st, const u8 reg,
+ u64 *val)
+{
+ __be32 __val = 0;
+ int ret;
+
+ ret = regmap_bulk_read(st->map, reg, &__val, 3);
+ if (ret)
+ return ret;
+
+ *val = be32_to_cpu(__val) >> 8;
+
+ return 0;
+}
+
+static int __ltc2947_val_read64(const struct ltc2947_data *st, const u8 reg,
+ u64 *val)
+{
+ __be64 __val = 0;
+ int ret;
+
+ ret = regmap_bulk_read(st->map, reg, &__val, 6);
+ if (ret)
+ return ret;
+
+ *val = be64_to_cpu(__val) >> 16;
+
+ return 0;
+}
+
+static int ltc2947_val_read(struct ltc2947_data *st, const u8 reg,
+ const u8 page, const size_t size, s64 *val)
+{
+ int ret;
+ u64 __val = 0;
+
+ mutex_lock(&st->lock);
+
+ ret = regmap_write(st->map, LTC2947_REG_PAGE_CTRL, page);
+ if (ret) {
+ mutex_unlock(&st->lock);
+ return ret;
+ }
+
+ dev_dbg(st->dev, "Read val, reg:%02X, p:%d sz:%zu\n", reg, page,
+ size);
+
+ switch (size) {
+ case 2:
+ ret = __ltc2947_val_read16(st, reg, &__val);
+ break;
+ case 3:
+ ret = __ltc2947_val_read24(st, reg, &__val);
+ break;
+ case 6:
+ ret = __ltc2947_val_read64(st, reg, &__val);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ mutex_unlock(&st->lock);
+
+ if (ret)
+ return ret;
+
+ *val = sign_extend64(__val, (8 * size) - 1);
+
+ dev_dbg(st->dev, "Got s:%lld, u:%016llX\n", *val, __val);
+
+ return 0;
+}
+
+static int __ltc2947_val_write64(const struct ltc2947_data *st, const u8 reg,
+ const u64 val)
+{
+ __be64 __val;
+
+ __val = cpu_to_be64(val << 16);
+ return regmap_bulk_write(st->map, reg, &__val, 6);
+}
+
+static int __ltc2947_val_write16(const struct ltc2947_data *st, const u8 reg,
+ const u16 val)
+{
+ __be16 __val;
+
+ __val = cpu_to_be16(val);
+ return regmap_bulk_write(st->map, reg, &__val, 2);
+}
+
+static int ltc2947_val_write(struct ltc2947_data *st, const u8 reg,
+ const u8 page, const size_t size, const u64 val)
+{
+ int ret;
+
+ mutex_lock(&st->lock);
+ /* set device on correct page */
+ ret = regmap_write(st->map, LTC2947_REG_PAGE_CTRL, page);
+ if (ret) {
+ mutex_unlock(&st->lock);
+ return ret;
+ }
+
+ dev_dbg(st->dev, "Write val, r:%02X, p:%d, sz:%zu, val:%016llX\n",
+ reg, page, size, val);
+
+ switch (size) {
+ case 2:
+ ret = __ltc2947_val_write16(st, reg, val);
+ break;
+ case 6:
+ ret = __ltc2947_val_write64(st, reg, val);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ mutex_unlock(&st->lock);
+
+ return ret;
+}
+
+static int ltc2947_reset_history(struct ltc2947_data *st, const u8 reg_h,
+ const u8 reg_l)
+{
+ int ret;
+ /*
+ * let's reset the tracking register's. Tracking register's have all
+ * 2 bytes size
+ */
+ ret = ltc2947_val_write(st, reg_h, LTC2947_PAGE0, 2, 0x8000U);
+ if (ret)
+ return ret;
+
+ return ltc2947_val_write(st, reg_l, LTC2947_PAGE0, 2, 0x7FFFU);
+}
+
+static int ltc2947_alarm_read(struct ltc2947_data *st, const u8 reg,
+ const u32 mask, long *val)
+{
+ u8 offset = reg - LTC2947_REG_STATUS;
+ /* +1 to include status reg */
+ char alarms[LTC2947_ALERTS_SIZE + 1];
+ int ret = 0;
+
+ memset(alarms, 0, sizeof(alarms));
+
+ mutex_lock(&st->lock);
+
+ ret = regmap_write(st->map, LTC2947_REG_PAGE_CTRL, LTC2947_PAGE0);
+ if (ret)
+ goto unlock;
+
+ dev_dbg(st->dev, "Read alarm, reg:%02X, mask:%02X\n", reg, mask);
+ /*
+ * As stated in the datasheet, when Threshold and Overflow registers
+ * are used, the status and all alert registers must be read in one
+ * multi-byte transaction.
+ */
+ ret = regmap_bulk_read(st->map, LTC2947_REG_STATUS, alarms,
+ sizeof(alarms));
+ if (ret)
+ goto unlock;
+
+ /* get the alarm */
+ *val = !!(alarms[offset] & mask);
+unlock:
+ mutex_unlock(&st->lock);
+ return ret;
+}
+
+static ssize_t ltc2947_show_value(struct device *dev,
+ struct device_attribute *da, char *buf)
+{
+ struct ltc2947_data *st = dev_get_drvdata(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ int ret;
+ s64 val = 0;
+
+ ret = ltc2947_val_read(st, attr->index, LTC2947_PAGE0, 6, &val);
+ if (ret)
+ return ret;
+
+ /* value in microJoule. st->lsb_energy was multiplied by 10E9 */
+ val = div_s64(val * st->lsb_energy, 1000);
+
+ return sprintf(buf, "%lld\n", val);
+}
+
+static int ltc2947_read_temp(struct device *dev, const u32 attr, long *val,
+ const int channel)
+{
+ int ret;
+ struct ltc2947_data *st = dev_get_drvdata(dev);
+ s64 __val = 0;
+
+ switch (attr) {
+ case hwmon_temp_input:
+ ret = ltc2947_val_read(st, LTC2947_REG_TEMP, LTC2947_PAGE0,
+ 2, &__val);
+ break;
+ case hwmon_temp_highest:
+ ret = ltc2947_val_read(st, LTC2947_REG_TEMP_MAX, LTC2947_PAGE0,
+ 2, &__val);
+ break;
+ case hwmon_temp_lowest:
+ ret = ltc2947_val_read(st, LTC2947_REG_TEMP_MIN, LTC2947_PAGE0,
+ 2, &__val);
+ break;
+ case hwmon_temp_max_alarm:
+ if (channel == LTC2947_TEMP_FAN_CHAN)
+ return ltc2947_alarm_read(st, LTC2947_REG_STATVT,
+ LTC2947_MAX_TEMP_FAN_MASK,
+ val);
+
+ return ltc2947_alarm_read(st, LTC2947_REG_STATVT,
+ LTC2947_MAX_TEMP_MASK, val);
+ case hwmon_temp_min_alarm:
+ if (channel == LTC2947_TEMP_FAN_CHAN)
+ return ltc2947_alarm_read(st, LTC2947_REG_STATVT,
+ LTC2947_MIN_TEMP_FAN_MASK,
+ val);
+
+ return ltc2947_alarm_read(st, LTC2947_REG_STATVT,
+ LTC2947_MIN_TEMP_MASK, val);
+ case hwmon_temp_max:
+ if (channel == LTC2947_TEMP_FAN_CHAN)
+ ret = ltc2947_val_read(st, LTC2947_REG_TEMP_FAN_THRE_H,
+ LTC2947_PAGE1, 2, &__val);
+ else
+ ret = ltc2947_val_read(st, LTC2947_REG_TEMP_THRE_H,
+ LTC2947_PAGE1, 2, &__val);
+ break;
+ case hwmon_temp_min:
+ if (channel == LTC2947_TEMP_FAN_CHAN)
+ ret = ltc2947_val_read(st, LTC2947_REG_TEMP_FAN_THRE_L,
+ LTC2947_PAGE1, 2, &__val);
+ else
+ ret = ltc2947_val_read(st, LTC2947_REG_TEMP_THRE_L,
+ LTC2947_PAGE1, 2, &__val);
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ if (ret)
+ return ret;
+
+ /* in milidegrees celcius, temp is given by: */
+ *val = (__val * 204) + 550;
+
+ return 0;
+}
+
+static int ltc2947_read_power(struct device *dev, const u32 attr, long *val)
+{
+ struct ltc2947_data *st = dev_get_drvdata(dev);
+ int ret;
+ u32 lsb = 200000; /* in uW */
+ s64 __val = 0;
+
+ switch (attr) {
+ case hwmon_power_input:
+ ret = ltc2947_val_read(st, LTC2947_REG_POWER, LTC2947_PAGE0,
+ 3, &__val);
+ lsb = 50000;
+ break;
+ case hwmon_power_input_highest:
+ ret = ltc2947_val_read(st, LTC2947_REG_POWER_MAX, LTC2947_PAGE0,
+ 2, &__val);
+ break;
+ case hwmon_power_input_lowest:
+ ret = ltc2947_val_read(st, LTC2947_REG_POWER_MIN, LTC2947_PAGE0,
+ 2, &__val);
+ break;
+ case hwmon_power_max_alarm:
+ return ltc2947_alarm_read(st, LTC2947_REG_STATIP,
+ LTC2947_MAX_POWER_MASK, val);
+ case hwmon_power_min_alarm:
+ return ltc2947_alarm_read(st, LTC2947_REG_STATIP,
+ LTC2947_MIN_POWER_MASK, val);
+ case hwmon_power_max:
+ ret = ltc2947_val_read(st, LTC2947_REG_POWER_THRE_H,
+ LTC2947_PAGE1, 2, &__val);
+ break;
+ case hwmon_power_min:
+ ret = ltc2947_val_read(st, LTC2947_REG_POWER_THRE_L,
+ LTC2947_PAGE1, 2, &__val);
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ if (ret)
+ return ret;
+
+ *val = __val * lsb;
+
+ return 0;
+}
+
+static int ltc2947_read_curr(struct device *dev, const u32 attr, long *val)
+{
+ struct ltc2947_data *st = dev_get_drvdata(dev);
+ int ret;
+ u8 lsb = 12; /* in mA */
+ s64 __val = 0;
+
+ switch (attr) {
+ case hwmon_curr_input:
+ ret = ltc2947_val_read(st, LTC2947_REG_CURRENT,
+ LTC2947_PAGE0, 3, &__val);
+ lsb = 3;
+ break;
+ case hwmon_curr_highest:
+ ret = ltc2947_val_read(st, LTC2947_REG_CURRENT_MAX,
+ LTC2947_PAGE0, 2, &__val);
+ break;
+ case hwmon_curr_lowest:
+ ret = ltc2947_val_read(st, LTC2947_REG_CURRENT_MIN,
+ LTC2947_PAGE0, 2, &__val);
+ break;
+ case hwmon_curr_max_alarm:
+ return ltc2947_alarm_read(st, LTC2947_REG_STATIP,
+ LTC2947_MAX_CURRENT_MASK, val);
+ case hwmon_curr_min_alarm:
+ return ltc2947_alarm_read(st, LTC2947_REG_STATIP,
+ LTC2947_MIN_CURRENT_MASK, val);
+ case hwmon_curr_max:
+ ret = ltc2947_val_read(st, LTC2947_REG_CURRENT_THRE_H,
+ LTC2947_PAGE1, 2, &__val);
+ break;
+ case hwmon_curr_min:
+ ret = ltc2947_val_read(st, LTC2947_REG_CURRENT_THRE_L,
+ LTC2947_PAGE1, 2, &__val);
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ if (ret)
+ return ret;
+
+ *val = __val * lsb;
+
+ return 0;
+}
+
+static int ltc2947_read_in(struct device *dev, const u32 attr, long *val,
+ const int channel)
+{
+ struct ltc2947_data *st = dev_get_drvdata(dev);
+ int ret;
+ u8 lsb = 2; /* in mV */
+ s64 __val = 0;
+
+ if (channel < 0 || channel > LTC2947_VOLTAGE_DVCC_CHAN) {
+ dev_err(st->dev, "Invalid chan%d for voltage", channel);
+ return -EINVAL;
+ }
+
+ switch (attr) {
+ case hwmon_in_input:
+ if (channel == LTC2947_VOLTAGE_DVCC_CHAN) {
+ ret = ltc2947_val_read(st, LTC2947_REG_DVCC,
+ LTC2947_PAGE0, 2, &__val);
+ lsb = 145;
+ } else {
+ ret = ltc2947_val_read(st, LTC2947_REG_VOLTAGE,
+ LTC2947_PAGE0, 2, &__val);
+ }
+ break;
+ case hwmon_in_highest:
+ if (channel == LTC2947_VOLTAGE_DVCC_CHAN) {
+ ret = ltc2947_val_read(st, LTC2947_REG_DVCC_MAX,
+ LTC2947_PAGE0, 2, &__val);
+ lsb = 145;
+ } else {
+ ret = ltc2947_val_read(st, LTC2947_REG_VOLTAGE_MAX,
+ LTC2947_PAGE0, 2, &__val);
+ }
+ break;
+ case hwmon_in_lowest:
+ if (channel == LTC2947_VOLTAGE_DVCC_CHAN) {
+ ret = ltc2947_val_read(st, LTC2947_REG_DVCC_MIN,
+ LTC2947_PAGE0, 2, &__val);
+ lsb = 145;
+ } else {
+ ret = ltc2947_val_read(st, LTC2947_REG_VOLTAGE_MIN,
+ LTC2947_PAGE0, 2, &__val);
+ }
+ break;
+ case hwmon_in_max_alarm:
+ if (channel == LTC2947_VOLTAGE_DVCC_CHAN)
+ return ltc2947_alarm_read(st, LTC2947_REG_STATVDVCC,
+ LTC2947_MAX_VOLTAGE_MASK,
+ val);
+
+ return ltc2947_alarm_read(st, LTC2947_REG_STATVT,
+ LTC2947_MAX_VOLTAGE_MASK, val);
+ case hwmon_in_min_alarm:
+ if (channel == LTC2947_VOLTAGE_DVCC_CHAN)
+ return ltc2947_alarm_read(st, LTC2947_REG_STATVDVCC,
+ LTC2947_MIN_VOLTAGE_MASK,
+ val);
+
+ return ltc2947_alarm_read(st, LTC2947_REG_STATVT,
+ LTC2947_MIN_VOLTAGE_MASK, val);
+ case hwmon_in_max:
+ if (channel == LTC2947_VOLTAGE_DVCC_CHAN) {
+ ret = ltc2947_val_read(st, LTC2947_REG_DVCC_THRE_H,
+ LTC2947_PAGE1, 2, &__val);
+ lsb = 145;
+ } else {
+ ret = ltc2947_val_read(st, LTC2947_REG_VOLTAGE_THRE_H,
+ LTC2947_PAGE1, 2, &__val);
+ }
+ break;
+ case hwmon_in_min:
+ if (channel == LTC2947_VOLTAGE_DVCC_CHAN) {
+ ret = ltc2947_val_read(st, LTC2947_REG_DVCC_THRE_L,
+ LTC2947_PAGE1, 2, &__val);
+ lsb = 145;
+ } else {
+ ret = ltc2947_val_read(st, LTC2947_REG_VOLTAGE_THRE_L,
+ LTC2947_PAGE1, 2, &__val);
+ }
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ if (ret)
+ return ret;
+
+ *val = __val * lsb;
+
+ return 0;
+}
+
+static int ltc2947_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ switch (type) {
+ case hwmon_in:
+ return ltc2947_read_in(dev, attr, val, channel);
+ case hwmon_curr:
+ return ltc2947_read_curr(dev, attr, val);
+ case hwmon_power:
+ return ltc2947_read_power(dev, attr, val);
+ case hwmon_temp:
+ return ltc2947_read_temp(dev, attr, val, channel);
+ default:
+ return -ENOTSUPP;
+ }
+}
+
+static int ltc2947_write_temp(struct device *dev, const u32 attr,
+ long val, const int channel)
+{
+ struct ltc2947_data *st = dev_get_drvdata(dev);
+
+ if (channel < 0 || channel > LTC2947_TEMP_FAN_CHAN) {
+ dev_err(st->dev, "Invalid chan%d for temperature", channel);
+ return -EINVAL;
+ }
+
+ switch (attr) {
+ case hwmon_temp_reset_history:
+ if (val != 1)
+ return -EINVAL;
+ return ltc2947_reset_history(st, LTC2947_REG_TEMP_MAX,
+ LTC2947_REG_TEMP_MIN);
+ case hwmon_temp_max:
+ val = clamp_val(val, TEMP_MIN, TEMP_MAX);
+ if (channel == LTC2947_TEMP_FAN_CHAN) {
+ if (!st->gpio_out)
+ return -ENOTSUPP;
+
+ return ltc2947_val_write(st,
+ LTC2947_REG_TEMP_FAN_THRE_H,
+ LTC2947_PAGE1, 2,
+ DIV_ROUND_CLOSEST(val - 550, 204));
+ }
+
+ return ltc2947_val_write(st, LTC2947_REG_TEMP_THRE_H,
+ LTC2947_PAGE1, 2,
+ DIV_ROUND_CLOSEST(val - 550, 204));
+ case hwmon_temp_min:
+ val = clamp_val(val, TEMP_MIN, TEMP_MAX);
+ if (channel == LTC2947_TEMP_FAN_CHAN) {
+ if (!st->gpio_out)
+ return -ENOTSUPP;
+
+ return ltc2947_val_write(st,
+ LTC2947_REG_TEMP_FAN_THRE_L,
+ LTC2947_PAGE1, 2,
+ DIV_ROUND_CLOSEST(val - 550, 204));
+ }
+
+ return ltc2947_val_write(st, LTC2947_REG_TEMP_THRE_L,
+ LTC2947_PAGE1, 2,
+ DIV_ROUND_CLOSEST(val - 550, 204));
+ default:
+ return -ENOTSUPP;
+ }
+}
+
+static int ltc2947_write_power(struct device *dev, const u32 attr,
+ long val)
+{
+ struct ltc2947_data *st = dev_get_drvdata(dev);
+
+ switch (attr) {
+ case hwmon_power_reset_history:
+ if (val != 1)
+ return -EINVAL;
+ return ltc2947_reset_history(st, LTC2947_REG_POWER_MAX,
+ LTC2947_REG_POWER_MIN);
+ case hwmon_power_max:
+ val = clamp_val(val, POWER_MIN, POWER_MAX);
+ return ltc2947_val_write(st, LTC2947_REG_POWER_THRE_H,
+ LTC2947_PAGE1, 2,
+ DIV_ROUND_CLOSEST(val, 200000));
+ case hwmon_power_min:
+ val = clamp_val(val, POWER_MIN, POWER_MAX);
+ return ltc2947_val_write(st, LTC2947_REG_POWER_THRE_L,
+ LTC2947_PAGE1, 2,
+ DIV_ROUND_CLOSEST(val, 200000));
+ default:
+ return -ENOTSUPP;
+ }
+}
+
+static int ltc2947_write_curr(struct device *dev, const u32 attr,
+ long val)
+{
+ struct ltc2947_data *st = dev_get_drvdata(dev);
+
+ switch (attr) {
+ case hwmon_curr_reset_history:
+ if (val != 1)
+ return -EINVAL;
+ return ltc2947_reset_history(st, LTC2947_REG_CURRENT_MAX,
+ LTC2947_REG_CURRENT_MIN);
+ case hwmon_curr_max:
+ val = clamp_val(val, CURRENT_MIN, CURRENT_MAX);
+ return ltc2947_val_write(st, LTC2947_REG_CURRENT_THRE_H,
+ LTC2947_PAGE1, 2,
+ DIV_ROUND_CLOSEST(val, 12));
+ case hwmon_curr_min:
+ val = clamp_val(val, CURRENT_MIN, CURRENT_MAX);
+ return ltc2947_val_write(st, LTC2947_REG_CURRENT_THRE_L,
+ LTC2947_PAGE1, 2,
+ DIV_ROUND_CLOSEST(val, 12));
+ default:
+ return -ENOTSUPP;
+ }
+}
+
+static int ltc2947_write_in(struct device *dev, const u32 attr, long val,
+ const int channel)
+{
+ struct ltc2947_data *st = dev_get_drvdata(dev);
+
+ if (channel > LTC2947_VOLTAGE_DVCC_CHAN) {
+ dev_err(st->dev, "Invalid chan%d for voltage", channel);
+ return -EINVAL;
+ }
+
+ switch (attr) {
+ case hwmon_in_reset_history:
+ if (val != 1)
+ return -EINVAL;
+
+ if (channel == LTC2947_VOLTAGE_DVCC_CHAN)
+ return ltc2947_reset_history(st, LTC2947_REG_DVCC_MAX,
+ LTC2947_REG_DVCC_MIN);
+
+ return ltc2947_reset_history(st, LTC2947_REG_VOLTAGE_MAX,
+ LTC2947_REG_VOLTAGE_MIN);
+ case hwmon_in_max:
+ if (channel == LTC2947_VOLTAGE_DVCC_CHAN) {
+ val = clamp_val(val, VDVCC_MIN, VDVCC_MAX);
+ return ltc2947_val_write(st, LTC2947_REG_DVCC_THRE_H,
+ LTC2947_PAGE1, 2,
+ DIV_ROUND_CLOSEST(val, 145));
+ }
+
+ val = clamp_val(val, VOLTAGE_MIN, VOLTAGE_MAX);
+ return ltc2947_val_write(st, LTC2947_REG_VOLTAGE_THRE_H,
+ LTC2947_PAGE1, 2,
+ DIV_ROUND_CLOSEST(val, 2));
+ case hwmon_in_min:
+ if (channel == LTC2947_VOLTAGE_DVCC_CHAN) {
+ val = clamp_val(val, VDVCC_MIN, VDVCC_MAX);
+ return ltc2947_val_write(st, LTC2947_REG_DVCC_THRE_L,
+ LTC2947_PAGE1, 2,
+ DIV_ROUND_CLOSEST(val, 145));
+ }
+
+ val = clamp_val(val, VOLTAGE_MIN, VOLTAGE_MAX);
+ return ltc2947_val_write(st, LTC2947_REG_VOLTAGE_THRE_L,
+ LTC2947_PAGE1, 2,
+ DIV_ROUND_CLOSEST(val, 2));
+ default:
+ return -ENOTSUPP;
+ }
+}
+
+static int ltc2947_write(struct device *dev,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel, long val)
+{
+ switch (type) {
+ case hwmon_in:
+ return ltc2947_write_in(dev, attr, val, channel);
+ case hwmon_curr:
+ return ltc2947_write_curr(dev, attr, val);
+ case hwmon_power:
+ return ltc2947_write_power(dev, attr, val);
+ case hwmon_temp:
+ return ltc2947_write_temp(dev, attr, val, channel);
+ default:
+ return -ENOTSUPP;
+ }
+}
+
+static int ltc2947_read_labels(struct device *dev,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel, const char **str)
+{
+ switch (type) {
+ case hwmon_in:
+ if (channel == LTC2947_VOLTAGE_DVCC_CHAN)
+ *str = "DVCC";
+ else
+ *str = "VP-VM";
+ return 0;
+ case hwmon_curr:
+ *str = "IP-IM";
+ return 0;
+ case hwmon_temp:
+ if (channel == LTC2947_TEMP_FAN_CHAN)
+ *str = "TEMPFAN";
+ else
+ *str = "Ambient";
+ return 0;
+ case hwmon_power:
+ *str = "Power";
+ return 0;
+ default:
+ return -ENOTSUPP;
+ }
+}
+
+static int ltc2947_in_is_visible(const u32 attr)
+{
+ switch (attr) {
+ case hwmon_in_input:
+ case hwmon_in_highest:
+ case hwmon_in_lowest:
+ case hwmon_in_max_alarm:
+ case hwmon_in_min_alarm:
+ case hwmon_in_label:
+ return 0444;
+ case hwmon_in_reset_history:
+ return 0200;
+ case hwmon_in_max:
+ case hwmon_in_min:
+ return 0644;
+ default:
+ return 0;
+ }
+}
+
+static int ltc2947_curr_is_visible(const u32 attr)
+{
+ switch (attr) {
+ case hwmon_curr_input:
+ case hwmon_curr_highest:
+ case hwmon_curr_lowest:
+ case hwmon_curr_max_alarm:
+ case hwmon_curr_min_alarm:
+ case hwmon_curr_label:
+ return 0444;
+ case hwmon_curr_reset_history:
+ return 0200;
+ case hwmon_curr_max:
+ case hwmon_curr_min:
+ return 0644;
+ default:
+ return 0;
+ }
+}
+
+static int ltc2947_power_is_visible(const u32 attr)
+{
+ switch (attr) {
+ case hwmon_power_input:
+ case hwmon_power_input_highest:
+ case hwmon_power_input_lowest:
+ case hwmon_power_label:
+ case hwmon_power_max_alarm:
+ case hwmon_power_min_alarm:
+ return 0444;
+ case hwmon_power_reset_history:
+ return 0200;
+ case hwmon_power_max:
+ case hwmon_power_min:
+ return 0644;
+ default:
+ return 0;
+ }
+}
+
+static int ltc2947_temp_is_visible(const u32 attr)
+{
+ switch (attr) {
+ case hwmon_temp_input:
+ case hwmon_temp_highest:
+ case hwmon_temp_lowest:
+ case hwmon_temp_max_alarm:
+ case hwmon_temp_min_alarm:
+ case hwmon_temp_label:
+ return 0444;
+ case hwmon_temp_reset_history:
+ return 0200;
+ case hwmon_temp_max:
+ case hwmon_temp_min:
+ return 0644;
+ default:
+ return 0;
+ }
+}
+
+static umode_t ltc2947_is_visible(const void *data,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ switch (type) {
+ case hwmon_in:
+ return ltc2947_in_is_visible(attr);
+ case hwmon_curr:
+ return ltc2947_curr_is_visible(attr);
+ case hwmon_power:
+ return ltc2947_power_is_visible(attr);
+ case hwmon_temp:
+ return ltc2947_temp_is_visible(attr);
+ default:
+ return 0;
+ }
+}
+
+static const struct hwmon_channel_info *ltc2947_info[] = {
+ HWMON_CHANNEL_INFO(in,
+ HWMON_I_INPUT | HWMON_I_LOWEST | HWMON_I_HIGHEST |
+ HWMON_I_MAX | HWMON_I_MIN | HWMON_I_RESET_HISTORY |
+ HWMON_I_MIN_ALARM | HWMON_I_MAX_ALARM |
+ HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_LOWEST | HWMON_I_HIGHEST |
+ HWMON_I_MAX | HWMON_I_MIN | HWMON_I_RESET_HISTORY |
+ HWMON_I_MIN_ALARM | HWMON_I_MAX_ALARM |
+ HWMON_I_LABEL),
+ HWMON_CHANNEL_INFO(curr,
+ HWMON_C_INPUT | HWMON_C_LOWEST | HWMON_C_HIGHEST |
+ HWMON_C_MAX | HWMON_C_MIN | HWMON_C_RESET_HISTORY |
+ HWMON_C_MIN_ALARM | HWMON_C_MAX_ALARM |
+ HWMON_C_LABEL),
+ HWMON_CHANNEL_INFO(power,
+ HWMON_P_INPUT | HWMON_P_INPUT_LOWEST |
+ HWMON_P_INPUT_HIGHEST | HWMON_P_MAX | HWMON_P_MIN |
+ HWMON_P_RESET_HISTORY | HWMON_P_MAX_ALARM |
+ HWMON_P_MIN_ALARM | HWMON_P_LABEL),
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT | HWMON_T_LOWEST | HWMON_T_HIGHEST |
+ HWMON_T_MAX | HWMON_T_MIN | HWMON_T_RESET_HISTORY |
+ HWMON_T_MIN_ALARM | HWMON_T_MAX_ALARM |
+ HWMON_T_LABEL,
+ HWMON_T_MAX_ALARM | HWMON_T_MIN_ALARM | HWMON_T_MAX |
+ HWMON_T_MIN | HWMON_T_LABEL),
+ NULL
+};
+
+static const struct hwmon_ops ltc2947_hwmon_ops = {
+ .is_visible = ltc2947_is_visible,
+ .read = ltc2947_read,
+ .write = ltc2947_write,
+ .read_string = ltc2947_read_labels,
+};
+
+static const struct hwmon_chip_info ltc2947_chip_info = {
+ .ops = &ltc2947_hwmon_ops,
+ .info = ltc2947_info,
+};
+
+/* energy attributes are 6bytes wide so we need u64 */
+static SENSOR_DEVICE_ATTR(energy1_input, 0444, ltc2947_show_value, NULL,
+ LTC2947_REG_ENERGY1);
+static SENSOR_DEVICE_ATTR(energy2_input, 0444, ltc2947_show_value, NULL,
+ LTC2947_REG_ENERGY2);
+
+static struct attribute *ltc2947_attrs[] = {
+ &sensor_dev_attr_energy1_input.dev_attr.attr,
+ &sensor_dev_attr_energy2_input.dev_attr.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(ltc2947);
+
+static void ltc2947_clk_disable(void *data)
+{
+ struct clk *extclk = data;
+
+ clk_disable_unprepare(extclk);
+}
+
+static int ltc2947_setup(struct ltc2947_data *st)
+{
+ int ret;
+ struct clk *extclk;
+ u32 dummy, deadband, pol;
+ u32 accum[2];
+
+ /* clear status register by reading it */
+ ret = regmap_read(st->map, LTC2947_REG_STATUS, &dummy);
+ if (ret)
+ return ret;
+ /*
+ * Set max/min for power here since the default values x scale
+ * would overflow on 32bit arch
+ */
+ ret = ltc2947_val_write(st, LTC2947_REG_POWER_THRE_H, LTC2947_PAGE1, 2,
+ POWER_MAX / 200000);
+ if (ret)
+ return ret;
+
+ ret = ltc2947_val_write(st, LTC2947_REG_POWER_THRE_L, LTC2947_PAGE1, 2,
+ POWER_MIN / 200000);
+ if (ret)
+ return ret;
+
+ /* check external clock presence */
+ extclk = devm_clk_get(st->dev, NULL);
+ if (!IS_ERR(extclk)) {
+ unsigned long rate_hz;
+ u8 pre = 0, div, tbctl;
+ u64 aux;
+
+ /* let's calculate and set the right valus in TBCTL */
+ rate_hz = clk_get_rate(extclk);
+ if (rate_hz < LTC2947_CLK_MIN || rate_hz > LTC2947_CLK_MAX) {
+ dev_err(st->dev, "Invalid rate:%lu for external clock",
+ rate_hz);
+ return -EINVAL;
+ }
+
+ ret = clk_prepare_enable(extclk);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action_or_reset(st->dev, ltc2947_clk_disable,
+ extclk);
+ if (ret)
+ return ret;
+ /* as in table 1 of the datasheet */
+ if (rate_hz >= LTC2947_CLK_MIN && rate_hz <= 1000000)
+ pre = 0;
+ else if (rate_hz > 1000000 && rate_hz <= 2000000)
+ pre = 1;
+ else if (rate_hz > 2000000 && rate_hz <= 4000000)
+ pre = 2;
+ else if (rate_hz > 4000000 && rate_hz <= 8000000)
+ pre = 3;
+ else if (rate_hz > 8000000 && rate_hz <= 16000000)
+ pre = 4;
+ else if (rate_hz > 16000000 && rate_hz <= LTC2947_CLK_MAX)
+ pre = 5;
+ /*
+ * Div is given by:
+ * floor(fref / (2^PRE * 32768))
+ */
+ div = rate_hz / ((1 << pre) * 32768);
+ tbctl = LTC2947_PRE(pre) | LTC2947_DIV(div);
+
+ ret = regmap_write(st->map, LTC2947_REG_TBCTL, tbctl);
+ if (ret)
+ return ret;
+ /*
+ * The energy lsb is given by (in W*s):
+ * 06416 * (1/fref) * 2^PRE * (DIV + 1)
+ * The value is multiplied by 10E9
+ */
+ aux = (div + 1) * ((1 << pre) * 641600000ULL);
+ st->lsb_energy = DIV_ROUND_CLOSEST_ULL(aux, rate_hz);
+ } else {
+ /* 19.89E-6 * 10E9 */
+ st->lsb_energy = 19890;
+ }
+ ret = of_property_read_u32_array(st->dev->of_node,
+ "adi,accumulator-ctl-pol", accum,
+ ARRAY_SIZE(accum));
+ if (!ret) {
+ u32 accum_reg = LTC2947_ACCUM_POL_1(accum[0]) |
+ LTC2947_ACCUM_POL_2(accum[1]);
+
+ ret = regmap_write(st->map, LTC2947_REG_ACCUM_POL, accum_reg);
+ if (ret)
+ return ret;
+ }
+ ret = of_property_read_u32(st->dev->of_node,
+ "adi,accumulation-deadband-microamp",
+ &deadband);
+ if (!ret) {
+ /* the LSB is the same as the current, so 3mA */
+ ret = regmap_write(st->map, LTC2947_REG_ACCUM_DEADBAND,
+ deadband / (1000 * 3));
+ if (ret)
+ return ret;
+ }
+ /* check gpio cfg */
+ ret = of_property_read_u32(st->dev->of_node, "adi,gpio-out-pol", &pol);
+ if (!ret) {
+ /* setup GPIO as output */
+ u32 gpio_ctl = LTC2947_GPIO_EN(1) | LTC2947_GPIO_FAN_EN(1) |
+ LTC2947_GPIO_FAN_POL(pol);
+
+ st->gpio_out = true;
+ ret = regmap_write(st->map, LTC2947_REG_GPIOSTATCTL, gpio_ctl);
+ if (ret)
+ return ret;
+ }
+ ret = of_property_read_u32_array(st->dev->of_node, "adi,gpio-in-accum",
+ accum, ARRAY_SIZE(accum));
+ if (!ret) {
+ /*
+ * Setup the accum options. The gpioctl is already defined as
+ * input by default.
+ */
+ u32 accum_val = LTC2947_ACCUM_POL_1(accum[0]) |
+ LTC2947_ACCUM_POL_2(accum[1]);
+
+ if (st->gpio_out) {
+ dev_err(st->dev,
+ "Cannot have input gpio config if already configured as output");
+ return -EINVAL;
+ }
+
+ ret = regmap_write(st->map, LTC2947_REG_GPIO_ACCUM, accum_val);
+ if (ret)
+ return ret;
+ }
+
+ /* set continuos mode */
+ return regmap_update_bits(st->map, LTC2947_REG_CTRL,
+ LTC2947_CONT_MODE_MASK, LTC2947_CONT_MODE(1));
+}
+
+int ltc2947_core_probe(struct regmap *map, const char *name)
+{
+ struct ltc2947_data *st;
+ struct device *dev = regmap_get_device(map);
+ struct device *hwmon;
+ int ret;
+
+ st = devm_kzalloc(dev, sizeof(*st), GFP_KERNEL);
+ if (!st)
+ return -ENOMEM;
+
+ st->map = map;
+ st->dev = dev;
+ dev_set_drvdata(dev, st);
+ mutex_init(&st->lock);
+
+ ret = ltc2947_setup(st);
+ if (ret)
+ return ret;
+
+ hwmon = devm_hwmon_device_register_with_info(dev, name, st,
+ &ltc2947_chip_info,
+ ltc2947_groups);
+ return PTR_ERR_OR_ZERO(hwmon);
+}
+EXPORT_SYMBOL_GPL(ltc2947_core_probe);
+
+static int __maybe_unused ltc2947_resume(struct device *dev)
+{
+ struct ltc2947_data *st = dev_get_drvdata(dev);
+ u32 ctrl = 0;
+ int ret;
+
+ /* dummy read to wake the device */
+ ret = regmap_read(st->map, LTC2947_REG_CTRL, &ctrl);
+ if (ret)
+ return ret;
+ /*
+ * Wait for the device. It takes 100ms to wake up so, 10ms extra
+ * should be enough.
+ */
+ msleep(110);
+ ret = regmap_read(st->map, LTC2947_REG_CTRL, &ctrl);
+ if (ret)
+ return ret;
+ /* ctrl should be 0 */
+ if (ctrl != 0) {
+ dev_err(st->dev, "Device failed to wake up, ctl:%02X\n", ctrl);
+ return -ETIMEDOUT;
+ }
+
+ /* set continuous mode */
+ return regmap_update_bits(st->map, LTC2947_REG_CTRL,
+ LTC2947_CONT_MODE_MASK, LTC2947_CONT_MODE(1));
+}
+
+static int __maybe_unused ltc2947_suspend(struct device *dev)
+{
+ struct ltc2947_data *st = dev_get_drvdata(dev);
+
+ return regmap_update_bits(st->map, LTC2947_REG_CTRL,
+ LTC2947_SHUTDOWN_MASK, 1);
+}
+
+SIMPLE_DEV_PM_OPS(ltc2947_pm_ops, ltc2947_suspend, ltc2947_resume);
+EXPORT_SYMBOL_GPL(ltc2947_pm_ops);
+
+const struct of_device_id ltc2947_of_match[] = {
+ { .compatible = "adi,ltc2947" },
+ {}
+};
+EXPORT_SYMBOL_GPL(ltc2947_of_match);
+MODULE_DEVICE_TABLE(of, ltc2947_of_match);
+
+MODULE_AUTHOR("Nuno Sa <nuno.sa@analog.com>");
+MODULE_DESCRIPTION("LTC2947 power and energy monitor core driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/ltc2947-i2c.c b/drivers/hwmon/ltc2947-i2c.c
new file mode 100644
index 000000000000..cf6074b110ae
--- /dev/null
+++ b/drivers/hwmon/ltc2947-i2c.c
@@ -0,0 +1,49 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Analog Devices LTC2947 high precision power and energy monitor over I2C
+ *
+ * Copyright 2019 Analog Devices Inc.
+ */
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+
+#include "ltc2947.h"
+
+static const struct regmap_config ltc2947_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+
+static int ltc2947_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct regmap *map;
+
+ map = devm_regmap_init_i2c(i2c, &ltc2947_regmap_config);
+ if (IS_ERR(map))
+ return PTR_ERR(map);
+
+ return ltc2947_core_probe(map, i2c->name);
+}
+
+static const struct i2c_device_id ltc2947_id[] = {
+ {"ltc2947", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, ltc2947_id);
+
+static struct i2c_driver ltc2947_driver = {
+ .driver = {
+ .name = "ltc2947",
+ .of_match_table = ltc2947_of_match,
+ .pm = &ltc2947_pm_ops,
+ },
+ .probe = ltc2947_probe,
+ .id_table = ltc2947_id,
+};
+module_i2c_driver(ltc2947_driver);
+
+MODULE_AUTHOR("Nuno Sa <nuno.sa@analog.com>");
+MODULE_DESCRIPTION("LTC2947 I2C power and energy monitor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/ltc2947-spi.c b/drivers/hwmon/ltc2947-spi.c
new file mode 100644
index 000000000000..c24ca569db1b
--- /dev/null
+++ b/drivers/hwmon/ltc2947-spi.c
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Analog Devices LTC2947 high precision power and energy monitor over SPI
+ *
+ * Copyright 2019 Analog Devices Inc.
+ */
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+#include <linux/spi/spi.h>
+
+#include "ltc2947.h"
+
+static const struct regmap_config ltc2947_regmap_config = {
+ .reg_bits = 16,
+ .val_bits = 8,
+ .read_flag_mask = BIT(0),
+};
+
+static int ltc2947_probe(struct spi_device *spi)
+{
+ struct regmap *map;
+
+ map = devm_regmap_init_spi(spi, &ltc2947_regmap_config);
+ if (IS_ERR(map))
+ return PTR_ERR(map);
+
+ return ltc2947_core_probe(map, spi_get_device_id(spi)->name);
+}
+
+static const struct spi_device_id ltc2947_id[] = {
+ {"ltc2947", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(spi, ltc2947_id);
+
+static struct spi_driver ltc2947_driver = {
+ .driver = {
+ .name = "ltc2947",
+ .of_match_table = ltc2947_of_match,
+ .pm = &ltc2947_pm_ops,
+ },
+ .probe = ltc2947_probe,
+ .id_table = ltc2947_id,
+};
+module_spi_driver(ltc2947_driver);
+
+MODULE_AUTHOR("Nuno Sa <nuno.sa@analog.com>");
+MODULE_DESCRIPTION("LTC2947 SPI power and energy monitor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/ltc2947.h b/drivers/hwmon/ltc2947.h
new file mode 100644
index 000000000000..5b8ff81a3dba
--- /dev/null
+++ b/drivers/hwmon/ltc2947.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_LTC2947_H
+#define _LINUX_LTC2947_H
+
+struct regmap;
+
+extern const struct of_device_id ltc2947_of_match[];
+extern const struct dev_pm_ops ltc2947_pm_ops;
+
+int ltc2947_core_probe(struct regmap *map, const char *name);
+
+#endif
diff --git a/drivers/hwmon/pmbus/Kconfig b/drivers/hwmon/pmbus/Kconfig
index d62d69bb7e49..59859979571d 100644
--- a/drivers/hwmon/pmbus/Kconfig
+++ b/drivers/hwmon/pmbus/Kconfig
@@ -36,6 +36,15 @@ config SENSORS_ADM1275
This driver can also be built as a module. If so, the module will
be called adm1275.
+config SENSORS_BEL_PFE
+ tristate "Bel PFE Compatible Power Supplies"
+ help
+ If you say yes here you get hardware monitoring support for BEL
+ PFE1100 and PFE3000 Power Supplies.
+
+ This driver can also be built as a module. If so, the module will
+ be called bel-pfe.
+
config SENSORS_IBM_CFFPS
tristate "IBM Common Form Factor Power Supply"
depends on LEDS_CLASS
diff --git a/drivers/hwmon/pmbus/Makefile b/drivers/hwmon/pmbus/Makefile
index 03bacfcfd660..3f8c1014938b 100644
--- a/drivers/hwmon/pmbus/Makefile
+++ b/drivers/hwmon/pmbus/Makefile
@@ -6,6 +6,7 @@
obj-$(CONFIG_PMBUS) += pmbus_core.o
obj-$(CONFIG_SENSORS_PMBUS) += pmbus.o
obj-$(CONFIG_SENSORS_ADM1275) += adm1275.o
+obj-$(CONFIG_SENSORS_BEL_PFE) += bel-pfe.o
obj-$(CONFIG_SENSORS_IBM_CFFPS) += ibm-cffps.o
obj-$(CONFIG_SENSORS_INSPUR_IPSPS) += inspur-ipsps.o
obj-$(CONFIG_SENSORS_IR35221) += ir35221.o
diff --git a/drivers/hwmon/pmbus/bel-pfe.c b/drivers/hwmon/pmbus/bel-pfe.c
new file mode 100644
index 000000000000..f236e18f45a5
--- /dev/null
+++ b/drivers/hwmon/pmbus/bel-pfe.c
@@ -0,0 +1,131 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Hardware monitoring driver for BEL PFE family power supplies.
+ *
+ * Copyright (c) 2019 Facebook Inc.
+ */
+
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pmbus.h>
+
+#include "pmbus.h"
+
+enum chips {pfe1100, pfe3000};
+
+/*
+ * Disable status check for pfe3000 devices, because some devices report
+ * communication error (invalid command) for VOUT_MODE command (0x20)
+ * although correct VOUT_MODE (0x16) is returned: it leads to incorrect
+ * exponent in linear mode.
+ */
+static struct pmbus_platform_data pfe3000_plat_data = {
+ .flags = PMBUS_SKIP_STATUS_CHECK,
+};
+
+static struct pmbus_driver_info pfe_driver_info[] = {
+ [pfe1100] = {
+ .pages = 1,
+ .format[PSC_VOLTAGE_IN] = linear,
+ .format[PSC_VOLTAGE_OUT] = linear,
+ .format[PSC_CURRENT_IN] = linear,
+ .format[PSC_CURRENT_OUT] = linear,
+ .format[PSC_POWER] = linear,
+ .format[PSC_TEMPERATURE] = linear,
+ .format[PSC_FAN] = linear,
+
+ .func[0] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT |
+ PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT |
+ PMBUS_HAVE_POUT |
+ PMBUS_HAVE_VIN | PMBUS_HAVE_IIN |
+ PMBUS_HAVE_PIN | PMBUS_HAVE_STATUS_INPUT |
+ PMBUS_HAVE_TEMP | PMBUS_HAVE_TEMP2 |
+ PMBUS_HAVE_STATUS_TEMP |
+ PMBUS_HAVE_FAN12,
+ },
+
+ [pfe3000] = {
+ .pages = 7,
+ .format[PSC_VOLTAGE_IN] = linear,
+ .format[PSC_VOLTAGE_OUT] = linear,
+ .format[PSC_CURRENT_IN] = linear,
+ .format[PSC_CURRENT_OUT] = linear,
+ .format[PSC_POWER] = linear,
+ .format[PSC_TEMPERATURE] = linear,
+ .format[PSC_FAN] = linear,
+
+ /* Page 0: V1. */
+ .func[0] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT |
+ PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT |
+ PMBUS_HAVE_POUT | PMBUS_HAVE_FAN12 |
+ PMBUS_HAVE_VIN | PMBUS_HAVE_IIN |
+ PMBUS_HAVE_PIN | PMBUS_HAVE_STATUS_INPUT |
+ PMBUS_HAVE_TEMP | PMBUS_HAVE_TEMP2 |
+ PMBUS_HAVE_TEMP3 | PMBUS_HAVE_STATUS_TEMP |
+ PMBUS_HAVE_VCAP,
+
+ /* Page 1: Vsb. */
+ .func[1] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT |
+ PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT |
+ PMBUS_HAVE_PIN | PMBUS_HAVE_STATUS_INPUT |
+ PMBUS_HAVE_POUT,
+
+ /*
+ * Page 2: V1 Ishare.
+ * Page 3: Reserved.
+ * Page 4: V1 Cathode.
+ * Page 5: Vsb Cathode.
+ * Page 6: V1 Sense.
+ */
+ .func[2] = PMBUS_HAVE_VOUT,
+ .func[4] = PMBUS_HAVE_VOUT,
+ .func[5] = PMBUS_HAVE_VOUT,
+ .func[6] = PMBUS_HAVE_VOUT,
+ },
+};
+
+static int pfe_pmbus_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int model;
+
+ model = (int)id->driver_data;
+
+ /*
+ * PFE3000-12-069RA devices may not stay in page 0 during device
+ * probe which leads to probe failure (read status word failed).
+ * So let's set the device to page 0 at the beginning.
+ */
+ if (model == pfe3000) {
+ client->dev.platform_data = &pfe3000_plat_data;
+ i2c_smbus_write_byte_data(client, PMBUS_PAGE, 0);
+ }
+
+ return pmbus_do_probe(client, id, &pfe_driver_info[model]);
+}
+
+static const struct i2c_device_id pfe_device_id[] = {
+ {"pfe1100", pfe1100},
+ {"pfe3000", pfe3000},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, pfe_device_id);
+
+static struct i2c_driver pfe_pmbus_driver = {
+ .driver = {
+ .name = "bel-pfe",
+ },
+ .probe = pfe_pmbus_probe,
+ .remove = pmbus_do_remove,
+ .id_table = pfe_device_id,
+};
+
+module_i2c_driver(pfe_pmbus_driver);
+
+MODULE_AUTHOR("Tao Ren <rentao.bupt@gmail.com>");
+MODULE_DESCRIPTION("PMBus driver for BEL PFE Family Power Supplies");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/pmbus/ibm-cffps.c b/drivers/hwmon/pmbus/ibm-cffps.c
index d44745e498e7..d359b76bcb36 100644
--- a/drivers/hwmon/pmbus/ibm-cffps.c
+++ b/drivers/hwmon/pmbus/ibm-cffps.c
@@ -3,6 +3,7 @@
* Copyright 2017 IBM Corp.
*/
+#include <linux/bitfield.h>
#include <linux/bitops.h>
#include <linux/debugfs.h>
#include <linux/device.h>
@@ -29,6 +30,10 @@
#define CFFPS_INPUT_HISTORY_CMD 0xD6
#define CFFPS_INPUT_HISTORY_SIZE 100
+#define CFFPS_CCIN_VERSION GENMASK(15, 8)
+#define CFFPS_CCIN_VERSION_1 0x2b
+#define CFFPS_CCIN_VERSION_2 0x2e
+
/* STATUS_MFR_SPECIFIC bits */
#define CFFPS_MFR_FAN_FAULT BIT(0)
#define CFFPS_MFR_THERMAL_FAULT BIT(1)
@@ -39,9 +44,13 @@
#define CFFPS_MFR_VAUX_FAULT BIT(6)
#define CFFPS_MFR_CURRENT_SHARE_WARNING BIT(7)
+/*
+ * LED off state actually relinquishes LED control to PSU firmware, so it can
+ * turn on the LED for faults.
+ */
+#define CFFPS_LED_OFF 0
#define CFFPS_LED_BLINK BIT(0)
#define CFFPS_LED_ON BIT(1)
-#define CFFPS_LED_OFF BIT(2)
#define CFFPS_BLINK_RATE_MS 250
enum {
@@ -54,7 +63,7 @@ enum {
CFFPS_DEBUGFS_NUM_ENTRIES
};
-enum versions { cffps1, cffps2 };
+enum versions { cffps1, cffps2, cffps_unknown };
struct ibm_cffps_input_history {
struct mutex update_lock;
@@ -292,28 +301,38 @@ static int ibm_cffps_read_word_data(struct i2c_client *client, int page,
return rc;
}
-static void ibm_cffps_led_brightness_set(struct led_classdev *led_cdev,
- enum led_brightness brightness)
+static int ibm_cffps_led_brightness_set(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
{
int rc;
+ u8 next_led_state;
struct ibm_cffps *psu = container_of(led_cdev, struct ibm_cffps, led);
if (brightness == LED_OFF) {
- psu->led_state = CFFPS_LED_OFF;
+ next_led_state = CFFPS_LED_OFF;
} else {
brightness = LED_FULL;
+
if (psu->led_state != CFFPS_LED_BLINK)
- psu->led_state = CFFPS_LED_ON;
+ next_led_state = CFFPS_LED_ON;
+ else
+ next_led_state = CFFPS_LED_BLINK;
}
+ dev_dbg(&psu->client->dev, "LED brightness set: %d. Command: %d.\n",
+ brightness, next_led_state);
+
pmbus_set_page(psu->client, 0);
rc = i2c_smbus_write_byte_data(psu->client, CFFPS_SYS_CONFIG_CMD,
- psu->led_state);
+ next_led_state);
if (rc < 0)
- return;
+ return rc;
+ psu->led_state = next_led_state;
led_cdev->brightness = brightness;
+
+ return 0;
}
static int ibm_cffps_led_blink_set(struct led_classdev *led_cdev,
@@ -323,10 +342,7 @@ static int ibm_cffps_led_blink_set(struct led_classdev *led_cdev,
int rc;
struct ibm_cffps *psu = container_of(led_cdev, struct ibm_cffps, led);
- psu->led_state = CFFPS_LED_BLINK;
-
- if (led_cdev->brightness == LED_OFF)
- return 0;
+ dev_dbg(&psu->client->dev, "LED blink set.\n");
pmbus_set_page(psu->client, 0);
@@ -335,6 +351,8 @@ static int ibm_cffps_led_blink_set(struct led_classdev *led_cdev,
if (rc < 0)
return rc;
+ psu->led_state = CFFPS_LED_BLINK;
+ led_cdev->brightness = LED_FULL;
*delay_on = CFFPS_BLINK_RATE_MS;
*delay_off = CFFPS_BLINK_RATE_MS;
@@ -351,7 +369,7 @@ static void ibm_cffps_create_led_class(struct ibm_cffps *psu)
client->addr);
psu->led.name = psu->led_name;
psu->led.max_brightness = LED_FULL;
- psu->led.brightness_set = ibm_cffps_led_brightness_set;
+ psu->led.brightness_set_blocking = ibm_cffps_led_brightness_set;
psu->led.blink_set = ibm_cffps_led_blink_set;
rc = devm_led_classdev_register(dev, &psu->led);
@@ -395,7 +413,7 @@ static int ibm_cffps_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
int i, rc;
- enum versions vs;
+ enum versions vs = cffps_unknown;
struct dentry *debugfs;
struct dentry *ibm_cffps_dir;
struct ibm_cffps *psu;
@@ -405,8 +423,27 @@ static int ibm_cffps_probe(struct i2c_client *client,
vs = (enum versions)md;
else if (id)
vs = (enum versions)id->driver_data;
- else
- vs = cffps1;
+
+ if (vs == cffps_unknown) {
+ u16 ccin_version = CFFPS_CCIN_VERSION_1;
+ int ccin = i2c_smbus_read_word_swapped(client, CFFPS_CCIN_CMD);
+
+ if (ccin > 0)
+ ccin_version = FIELD_GET(CFFPS_CCIN_VERSION, ccin);
+
+ switch (ccin_version) {
+ default:
+ case CFFPS_CCIN_VERSION_1:
+ vs = cffps1;
+ break;
+ case CFFPS_CCIN_VERSION_2:
+ vs = cffps2;
+ break;
+ }
+
+ /* Set the client name to include the version number. */
+ snprintf(client->name, I2C_NAME_SIZE, "cffps%d", vs + 1);
+ }
client->dev.platform_data = &ibm_cffps_pdata;
rc = pmbus_do_probe(client, id, &ibm_cffps_info[vs]);
@@ -465,6 +502,7 @@ static int ibm_cffps_probe(struct i2c_client *client,
static const struct i2c_device_id ibm_cffps_id[] = {
{ "ibm_cffps1", cffps1 },
{ "ibm_cffps2", cffps2 },
+ { "ibm_cffps", cffps_unknown },
{}
};
MODULE_DEVICE_TABLE(i2c, ibm_cffps_id);
@@ -478,6 +516,10 @@ static const struct of_device_id ibm_cffps_of_match[] = {
.compatible = "ibm,cffps2",
.data = (void *)cffps2
},
+ {
+ .compatible = "ibm,cffps",
+ .data = (void *)cffps_unknown
+ },
{}
};
MODULE_DEVICE_TABLE(of, ibm_cffps_of_match);
diff --git a/drivers/hwmon/tmp421.c b/drivers/hwmon/tmp421.c
index a94e35cff3e5..83a4fab151d2 100644
--- a/drivers/hwmon/tmp421.c
+++ b/drivers/hwmon/tmp421.c
@@ -127,7 +127,8 @@ static struct tmp421_data *tmp421_update_device(struct device *dev)
mutex_lock(&data->update_lock);
- if (time_after(jiffies, data->last_updated + 2 * HZ) || !data->valid) {
+ if (time_after(jiffies, data->last_updated + (HZ / 2)) ||
+ !data->valid) {
data->config = i2c_smbus_read_byte_data(client,
TMP421_CONFIG_REG_1);
diff --git a/drivers/hwmon/tmp513.c b/drivers/hwmon/tmp513.c
new file mode 100644
index 000000000000..df66e0bc1253
--- /dev/null
+++ b/drivers/hwmon/tmp513.c
@@ -0,0 +1,772 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for Texas Instruments TMP512, TMP513 power monitor chips
+ *
+ * TMP513:
+ * Thermal/Power Management with Triple Remote and
+ * Local Temperature Sensor and Current Shunt Monitor
+ * Datasheet: http://www.ti.com/lit/gpn/tmp513
+ *
+ * TMP512:
+ * Thermal/Power Management with Dual Remote
+ * and Local Temperature Sensor and Current Shunt Monitor
+ * Datasheet: http://www.ti.com/lit/gpn/tmp512
+ *
+ * Copyright (C) 2019 Eric Tremblay <etremblay@distech-controls.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+
+#include <linux/err.h>
+#include <linux/hwmon.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/util_macros.h>
+
+// Common register definition
+#define TMP51X_SHUNT_CONFIG 0x00
+#define TMP51X_TEMP_CONFIG 0x01
+#define TMP51X_STATUS 0x02
+#define TMP51X_SMBUS_ALERT 0x03
+#define TMP51X_SHUNT_CURRENT_RESULT 0x04
+#define TMP51X_BUS_VOLTAGE_RESULT 0x05
+#define TMP51X_POWER_RESULT 0x06
+#define TMP51X_BUS_CURRENT_RESULT 0x07
+#define TMP51X_LOCAL_TEMP_RESULT 0x08
+#define TMP51X_REMOTE_TEMP_RESULT_1 0x09
+#define TMP51X_REMOTE_TEMP_RESULT_2 0x0A
+#define TMP51X_SHUNT_CURRENT_H_LIMIT 0x0C
+#define TMP51X_SHUNT_CURRENT_L_LIMIT 0x0D
+#define TMP51X_BUS_VOLTAGE_H_LIMIT 0x0E
+#define TMP51X_BUS_VOLTAGE_L_LIMIT 0x0F
+#define TMP51X_POWER_LIMIT 0x10
+#define TMP51X_LOCAL_TEMP_LIMIT 0x11
+#define TMP51X_REMOTE_TEMP_LIMIT_1 0x12
+#define TMP51X_REMOTE_TEMP_LIMIT_2 0x13
+#define TMP51X_SHUNT_CALIBRATION 0x15
+#define TMP51X_N_FACTOR_AND_HYST_1 0x16
+#define TMP51X_N_FACTOR_2 0x17
+#define TMP51X_MAN_ID_REG 0xFE
+#define TMP51X_DEVICE_ID_REG 0xFF
+
+// TMP513 specific register definition
+#define TMP513_REMOTE_TEMP_RESULT_3 0x0B
+#define TMP513_REMOTE_TEMP_LIMIT_3 0x14
+#define TMP513_N_FACTOR_3 0x18
+
+// Common attrs, and NULL
+#define TMP51X_MANUFACTURER_ID 0x55FF
+
+#define TMP512_DEVICE_ID 0x22FF
+#define TMP513_DEVICE_ID 0x23FF
+
+// Default config
+#define TMP51X_SHUNT_CONFIG_DEFAULT 0x399F
+#define TMP51X_SHUNT_VALUE_DEFAULT 1000
+#define TMP51X_VBUS_RANGE_DEFAULT TMP51X_VBUS_RANGE_32V
+#define TMP51X_PGA_DEFAULT 8
+#define TMP51X_MAX_REGISTER_ADDR 0xFF
+
+#define TMP512_TEMP_CONFIG_DEFAULT 0xBF80
+#define TMP513_TEMP_CONFIG_DEFAULT 0xFF80
+
+// Mask and shift
+#define CURRENT_SENSE_VOLTAGE_320_MASK 0x1800
+#define CURRENT_SENSE_VOLTAGE_160_MASK 0x1000
+#define CURRENT_SENSE_VOLTAGE_80_MASK 0x0800
+#define CURRENT_SENSE_VOLTAGE_40_MASK 0
+
+#define TMP51X_BUS_VOLTAGE_MASK 0x2000
+#define TMP51X_NFACTOR_MASK 0xFF00
+#define TMP51X_HYST_MASK 0x00FF
+
+#define TMP51X_BUS_VOLTAGE_SHIFT 3
+#define TMP51X_TEMP_SHIFT 3
+
+// Alarms
+#define TMP51X_SHUNT_CURRENT_H_LIMIT_POS 15
+#define TMP51X_SHUNT_CURRENT_L_LIMIT_POS 14
+#define TMP51X_BUS_VOLTAGE_H_LIMIT_POS 13
+#define TMP51X_BUS_VOLTAGE_L_LIMIT_POS 12
+#define TMP51X_POWER_LIMIT_POS 11
+#define TMP51X_LOCAL_TEMP_LIMIT_POS 10
+#define TMP51X_REMOTE_TEMP_LIMIT_1_POS 9
+#define TMP51X_REMOTE_TEMP_LIMIT_2_POS 8
+#define TMP513_REMOTE_TEMP_LIMIT_3_POS 7
+
+#define TMP51X_VBUS_RANGE_32V 32000000
+#define TMP51X_VBUS_RANGE_16V 16000000
+
+// Max and Min value
+#define MAX_BUS_VOLTAGE_32_LIMIT 32764
+#define MAX_BUS_VOLTAGE_16_LIMIT 16382
+
+// Max possible value is -256 to +256 but datasheet indicated -40 to 125.
+#define MAX_TEMP_LIMIT 125000
+#define MIN_TEMP_LIMIT -40000
+
+#define MAX_TEMP_HYST 127500
+
+static const u8 TMP51X_TEMP_INPUT[4] = {
+ TMP51X_LOCAL_TEMP_RESULT,
+ TMP51X_REMOTE_TEMP_RESULT_1,
+ TMP51X_REMOTE_TEMP_RESULT_2,
+ TMP513_REMOTE_TEMP_RESULT_3
+};
+
+static const u8 TMP51X_TEMP_CRIT[4] = {
+ TMP51X_LOCAL_TEMP_LIMIT,
+ TMP51X_REMOTE_TEMP_LIMIT_1,
+ TMP51X_REMOTE_TEMP_LIMIT_2,
+ TMP513_REMOTE_TEMP_LIMIT_3
+};
+
+static const u8 TMP51X_TEMP_CRIT_ALARM[4] = {
+ TMP51X_LOCAL_TEMP_LIMIT_POS,
+ TMP51X_REMOTE_TEMP_LIMIT_1_POS,
+ TMP51X_REMOTE_TEMP_LIMIT_2_POS,
+ TMP513_REMOTE_TEMP_LIMIT_3_POS
+};
+
+static const u8 TMP51X_TEMP_CRIT_HYST[4] = {
+ TMP51X_N_FACTOR_AND_HYST_1,
+ TMP51X_N_FACTOR_AND_HYST_1,
+ TMP51X_N_FACTOR_AND_HYST_1,
+ TMP51X_N_FACTOR_AND_HYST_1
+};
+
+static const u8 TMP51X_CURR_INPUT[2] = {
+ TMP51X_SHUNT_CURRENT_RESULT,
+ TMP51X_BUS_CURRENT_RESULT
+};
+
+static struct regmap_config tmp51x_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 16,
+ .max_register = TMP51X_MAX_REGISTER_ADDR,
+};
+
+enum tmp51x_ids {
+ tmp512, tmp513
+};
+
+struct tmp51x_data {
+ u16 shunt_config;
+ u16 pga_gain;
+ u32 vbus_range_uvolt;
+
+ u16 temp_config;
+ u32 nfactor[3];
+
+ u32 shunt_uohms;
+
+ u32 curr_lsb_ua;
+ u32 pwr_lsb_uw;
+
+ enum tmp51x_ids id;
+ struct regmap *regmap;
+};
+
+// Set the shift based on the gain 8=4, 4=3, 2=2, 1=1
+static inline u8 tmp51x_get_pga_shift(struct tmp51x_data *data)
+{
+ return 5 - ffs(data->pga_gain);
+}
+
+static int tmp51x_get_value(struct tmp51x_data *data, u8 reg, u8 pos,
+ unsigned int regval, long *val)
+{
+ switch (reg) {
+ case TMP51X_STATUS:
+ *val = (regval >> pos) & 1;
+ break;
+ case TMP51X_SHUNT_CURRENT_RESULT:
+ case TMP51X_SHUNT_CURRENT_H_LIMIT:
+ case TMP51X_SHUNT_CURRENT_L_LIMIT:
+ /*
+ * The valus is read in voltage in the chip but reported as
+ * current to the user.
+ * 2's compliment number shifted by one to four depending
+ * on the pga gain setting. 1lsb = 10uV
+ */
+ *val = sign_extend32(regval, 17 - tmp51x_get_pga_shift(data));
+ *val = DIV_ROUND_CLOSEST(*val * 10000, data->shunt_uohms);
+ break;
+ case TMP51X_BUS_VOLTAGE_RESULT:
+ case TMP51X_BUS_VOLTAGE_H_LIMIT:
+ case TMP51X_BUS_VOLTAGE_L_LIMIT:
+ // 1lsb = 4mV
+ *val = (regval >> TMP51X_BUS_VOLTAGE_SHIFT) * 4;
+ break;
+ case TMP51X_POWER_RESULT:
+ case TMP51X_POWER_LIMIT:
+ // Power = (current * BusVoltage) / 5000
+ *val = regval * data->pwr_lsb_uw;
+ break;
+ case TMP51X_BUS_CURRENT_RESULT:
+ // Current = (ShuntVoltage * CalibrationRegister) / 4096
+ *val = sign_extend32(regval, 16) * data->curr_lsb_ua;
+ *val = DIV_ROUND_CLOSEST(*val, 1000);
+ break;
+ case TMP51X_LOCAL_TEMP_RESULT:
+ case TMP51X_REMOTE_TEMP_RESULT_1:
+ case TMP51X_REMOTE_TEMP_RESULT_2:
+ case TMP513_REMOTE_TEMP_RESULT_3:
+ case TMP51X_LOCAL_TEMP_LIMIT:
+ case TMP51X_REMOTE_TEMP_LIMIT_1:
+ case TMP51X_REMOTE_TEMP_LIMIT_2:
+ case TMP513_REMOTE_TEMP_LIMIT_3:
+ // 1lsb = 0.0625 degrees centigrade
+ *val = sign_extend32(regval, 16) >> TMP51X_TEMP_SHIFT;
+ *val = DIV_ROUND_CLOSEST(*val * 625, 10);
+ break;
+ case TMP51X_N_FACTOR_AND_HYST_1:
+ // 1lsb = 0.5 degrees centigrade
+ *val = (regval & TMP51X_HYST_MASK) * 500;
+ break;
+ default:
+ // Programmer goofed
+ WARN_ON_ONCE(1);
+ *val = 0;
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int tmp51x_set_value(struct tmp51x_data *data, u8 reg, long val)
+{
+ int regval, max_val;
+ u32 mask = 0;
+
+ switch (reg) {
+ case TMP51X_SHUNT_CURRENT_H_LIMIT:
+ case TMP51X_SHUNT_CURRENT_L_LIMIT:
+ /*
+ * The user enter current value and we convert it to
+ * voltage. 1lsb = 10uV
+ */
+ val = DIV_ROUND_CLOSEST(val * data->shunt_uohms, 10000);
+ max_val = U16_MAX >> tmp51x_get_pga_shift(data);
+ regval = clamp_val(val, -max_val, max_val);
+ break;
+ case TMP51X_BUS_VOLTAGE_H_LIMIT:
+ case TMP51X_BUS_VOLTAGE_L_LIMIT:
+ // 1lsb = 4mV
+ max_val = (data->vbus_range_uvolt == TMP51X_VBUS_RANGE_32V) ?
+ MAX_BUS_VOLTAGE_32_LIMIT : MAX_BUS_VOLTAGE_16_LIMIT;
+
+ val = clamp_val(DIV_ROUND_CLOSEST(val, 4), 0, max_val);
+ regval = val << TMP51X_BUS_VOLTAGE_SHIFT;
+ break;
+ case TMP51X_POWER_LIMIT:
+ regval = clamp_val(DIV_ROUND_CLOSEST(val, data->pwr_lsb_uw), 0,
+ U16_MAX);
+ break;
+ case TMP51X_LOCAL_TEMP_LIMIT:
+ case TMP51X_REMOTE_TEMP_LIMIT_1:
+ case TMP51X_REMOTE_TEMP_LIMIT_2:
+ case TMP513_REMOTE_TEMP_LIMIT_3:
+ // 1lsb = 0.0625 degrees centigrade
+ val = clamp_val(val, MIN_TEMP_LIMIT, MAX_TEMP_LIMIT);
+ regval = DIV_ROUND_CLOSEST(val * 10, 625) << TMP51X_TEMP_SHIFT;
+ break;
+ case TMP51X_N_FACTOR_AND_HYST_1:
+ // 1lsb = 0.5 degrees centigrade
+ val = clamp_val(val, 0, MAX_TEMP_HYST);
+ regval = DIV_ROUND_CLOSEST(val, 500);
+ mask = TMP51X_HYST_MASK;
+ break;
+ default:
+ // Programmer goofed
+ WARN_ON_ONCE(1);
+ return -EOPNOTSUPP;
+ }
+
+ if (mask == 0)
+ return regmap_write(data->regmap, reg, regval);
+ else
+ return regmap_update_bits(data->regmap, reg, mask, regval);
+}
+
+static u8 tmp51x_get_reg(enum hwmon_sensor_types type, u32 attr, int channel)
+{
+ switch (type) {
+ case hwmon_temp:
+ switch (attr) {
+ case hwmon_temp_input:
+ return TMP51X_TEMP_INPUT[channel];
+ case hwmon_temp_crit_alarm:
+ return TMP51X_STATUS;
+ case hwmon_temp_crit:
+ return TMP51X_TEMP_CRIT[channel];
+ case hwmon_temp_crit_hyst:
+ return TMP51X_TEMP_CRIT_HYST[channel];
+ }
+ break;
+ case hwmon_in:
+ switch (attr) {
+ case hwmon_in_input:
+ return TMP51X_BUS_VOLTAGE_RESULT;
+ case hwmon_in_lcrit_alarm:
+ case hwmon_in_crit_alarm:
+ return TMP51X_STATUS;
+ case hwmon_in_lcrit:
+ return TMP51X_BUS_VOLTAGE_L_LIMIT;
+ case hwmon_in_crit:
+ return TMP51X_BUS_VOLTAGE_H_LIMIT;
+ }
+ break;
+ case hwmon_curr:
+ switch (attr) {
+ case hwmon_curr_input:
+ return TMP51X_CURR_INPUT[channel];
+ case hwmon_curr_lcrit_alarm:
+ case hwmon_curr_crit_alarm:
+ return TMP51X_STATUS;
+ case hwmon_curr_lcrit:
+ return TMP51X_SHUNT_CURRENT_L_LIMIT;
+ case hwmon_curr_crit:
+ return TMP51X_SHUNT_CURRENT_H_LIMIT;
+ }
+ break;
+ case hwmon_power:
+ switch (attr) {
+ case hwmon_power_input:
+ return TMP51X_POWER_RESULT;
+ case hwmon_power_crit_alarm:
+ return TMP51X_STATUS;
+ case hwmon_power_crit:
+ return TMP51X_POWER_LIMIT;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static u8 tmp51x_get_status_pos(enum hwmon_sensor_types type, u32 attr,
+ int channel)
+{
+ switch (type) {
+ case hwmon_temp:
+ switch (attr) {
+ case hwmon_temp_crit_alarm:
+ return TMP51X_TEMP_CRIT_ALARM[channel];
+ }
+ break;
+ case hwmon_in:
+ switch (attr) {
+ case hwmon_in_lcrit_alarm:
+ return TMP51X_BUS_VOLTAGE_L_LIMIT_POS;
+ case hwmon_in_crit_alarm:
+ return TMP51X_BUS_VOLTAGE_H_LIMIT_POS;
+ }
+ break;
+ case hwmon_curr:
+ switch (attr) {
+ case hwmon_curr_lcrit_alarm:
+ return TMP51X_SHUNT_CURRENT_L_LIMIT_POS;
+ case hwmon_curr_crit_alarm:
+ return TMP51X_SHUNT_CURRENT_H_LIMIT_POS;
+ }
+ break;
+ case hwmon_power:
+ switch (attr) {
+ case hwmon_power_crit_alarm:
+ return TMP51X_POWER_LIMIT_POS;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int tmp51x_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct tmp51x_data *data = dev_get_drvdata(dev);
+ int ret;
+ u32 regval;
+ u8 pos = 0, reg = 0;
+
+ reg = tmp51x_get_reg(type, attr, channel);
+ if (reg == 0)
+ return -EOPNOTSUPP;
+
+ if (reg == TMP51X_STATUS)
+ pos = tmp51x_get_status_pos(type, attr, channel);
+
+ ret = regmap_read(data->regmap, reg, &regval);
+ if (ret < 0)
+ return ret;
+
+ return tmp51x_get_value(data, reg, pos, regval, val);
+}
+
+static int tmp51x_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long val)
+{
+ u8 reg = 0;
+
+ reg = tmp51x_get_reg(type, attr, channel);
+ if (reg == 0)
+ return -EOPNOTSUPP;
+
+ return tmp51x_set_value(dev_get_drvdata(dev), reg, val);
+}
+
+static umode_t tmp51x_is_visible(const void *_data,
+ enum hwmon_sensor_types type, u32 attr,
+ int channel)
+{
+ const struct tmp51x_data *data = _data;
+
+ switch (type) {
+ case hwmon_temp:
+ if (data->id == tmp512 && channel == 4)
+ return 0;
+ switch (attr) {
+ case hwmon_temp_input:
+ case hwmon_temp_crit_alarm:
+ return 0444;
+ case hwmon_temp_crit:
+ return 0644;
+ case hwmon_temp_crit_hyst:
+ if (channel == 0)
+ return 0644;
+ return 0444;
+ }
+ break;
+ case hwmon_in:
+ switch (attr) {
+ case hwmon_in_input:
+ case hwmon_in_lcrit_alarm:
+ case hwmon_in_crit_alarm:
+ return 0444;
+ case hwmon_in_lcrit:
+ case hwmon_in_crit:
+ return 0644;
+ }
+ break;
+ case hwmon_curr:
+ if (!data->shunt_uohms)
+ return 0;
+
+ switch (attr) {
+ case hwmon_curr_input:
+ case hwmon_curr_lcrit_alarm:
+ case hwmon_curr_crit_alarm:
+ return 0444;
+ case hwmon_curr_lcrit:
+ case hwmon_curr_crit:
+ return 0644;
+ }
+ break;
+ case hwmon_power:
+ if (!data->shunt_uohms)
+ return 0;
+
+ switch (attr) {
+ case hwmon_power_input:
+ case hwmon_power_crit_alarm:
+ return 0444;
+ case hwmon_power_crit:
+ return 0644;
+ }
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static const struct hwmon_channel_info *tmp51x_info[] = {
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT | HWMON_T_CRIT | HWMON_T_CRIT_ALARM |
+ HWMON_T_CRIT_HYST,
+ HWMON_T_INPUT | HWMON_T_CRIT | HWMON_T_CRIT_ALARM |
+ HWMON_T_CRIT_HYST,
+ HWMON_T_INPUT | HWMON_T_CRIT | HWMON_T_CRIT_ALARM |
+ HWMON_T_CRIT_HYST,
+ HWMON_T_INPUT | HWMON_T_CRIT | HWMON_T_CRIT_ALARM |
+ HWMON_T_CRIT_HYST),
+ HWMON_CHANNEL_INFO(in,
+ HWMON_I_INPUT | HWMON_I_LCRIT | HWMON_I_LCRIT_ALARM |
+ HWMON_I_CRIT | HWMON_I_CRIT_ALARM),
+ HWMON_CHANNEL_INFO(curr,
+ HWMON_C_INPUT | HWMON_C_LCRIT | HWMON_C_LCRIT_ALARM |
+ HWMON_C_CRIT | HWMON_C_CRIT_ALARM,
+ HWMON_C_INPUT),
+ HWMON_CHANNEL_INFO(power,
+ HWMON_P_INPUT | HWMON_P_CRIT | HWMON_P_CRIT_ALARM),
+ NULL
+};
+
+static const struct hwmon_ops tmp51x_hwmon_ops = {
+ .is_visible = tmp51x_is_visible,
+ .read = tmp51x_read,
+ .write = tmp51x_write,
+};
+
+static const struct hwmon_chip_info tmp51x_chip_info = {
+ .ops = &tmp51x_hwmon_ops,
+ .info = tmp51x_info,
+};
+
+/*
+ * Calibrate the tmp51x following the datasheet method
+ */
+static int tmp51x_calibrate(struct tmp51x_data *data)
+{
+ int vshunt_max = data->pga_gain * 40;
+ u64 max_curr_ma;
+ u32 div;
+
+ /*
+ * If shunt_uohms is equal to 0, the calibration should be set to 0.
+ * The consequence will be that the current and power measurement engine
+ * of the sensor will not work. Temperature and voltage sensing will
+ * continue to work.
+ */
+ if (data->shunt_uohms == 0)
+ return regmap_write(data->regmap, TMP51X_SHUNT_CALIBRATION, 0);
+
+ max_curr_ma = DIV_ROUND_CLOSEST_ULL(vshunt_max * 1000 * 1000,
+ data->shunt_uohms);
+
+ /*
+ * Calculate the minimal bit resolution for the current and the power.
+ * Those values will be used during register interpretation.
+ */
+ data->curr_lsb_ua = DIV_ROUND_CLOSEST_ULL(max_curr_ma * 1000, 32767);
+ data->pwr_lsb_uw = 20 * data->curr_lsb_ua;
+
+ div = DIV_ROUND_CLOSEST_ULL(data->curr_lsb_ua * data->shunt_uohms,
+ 1000 * 1000);
+
+ return regmap_write(data->regmap, TMP51X_SHUNT_CALIBRATION,
+ DIV_ROUND_CLOSEST(40960, div));
+}
+
+/*
+ * Initialize the configuration and calibration registers.
+ */
+static int tmp51x_init(struct tmp51x_data *data)
+{
+ unsigned int regval;
+ int ret = regmap_write(data->regmap, TMP51X_SHUNT_CONFIG,
+ data->shunt_config);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_write(data->regmap, TMP51X_TEMP_CONFIG, data->temp_config);
+ if (ret < 0)
+ return ret;
+
+ // nFactor configuration
+ ret = regmap_update_bits(data->regmap, TMP51X_N_FACTOR_AND_HYST_1,
+ TMP51X_NFACTOR_MASK, data->nfactor[0] << 8);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_write(data->regmap, TMP51X_N_FACTOR_2,
+ data->nfactor[1] << 8);
+ if (ret < 0)
+ return ret;
+
+ if (data->id == tmp513) {
+ ret = regmap_write(data->regmap, TMP513_N_FACTOR_3,
+ data->nfactor[2] << 8);
+ if (ret < 0)
+ return ret;
+ }
+
+ ret = tmp51x_calibrate(data);
+ if (ret < 0)
+ return ret;
+
+ // Read the status register before using as the datasheet propose
+ return regmap_read(data->regmap, TMP51X_STATUS, &regval);
+}
+
+static const struct i2c_device_id tmp51x_id[] = {
+ { "tmp512", tmp512 },
+ { "tmp513", tmp513 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, tmp51x_id);
+
+static const struct of_device_id tmp51x_of_match[] = {
+ {
+ .compatible = "ti,tmp512",
+ .data = (void *)tmp512
+ },
+ {
+ .compatible = "ti,tmp513",
+ .data = (void *)tmp513
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, tmp51x_of_match);
+
+static int tmp51x_vbus_range_to_reg(struct device *dev,
+ struct tmp51x_data *data)
+{
+ if (data->vbus_range_uvolt == TMP51X_VBUS_RANGE_32V) {
+ data->shunt_config |= TMP51X_BUS_VOLTAGE_MASK;
+ } else if (data->vbus_range_uvolt == TMP51X_VBUS_RANGE_16V) {
+ data->shunt_config &= ~TMP51X_BUS_VOLTAGE_MASK;
+ } else {
+ dev_err(dev, "ti,bus-range-microvolt is invalid: %u\n",
+ data->vbus_range_uvolt);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int tmp51x_pga_gain_to_reg(struct device *dev, struct tmp51x_data *data)
+{
+ if (data->pga_gain == 8) {
+ data->shunt_config |= CURRENT_SENSE_VOLTAGE_320_MASK;
+ } else if (data->pga_gain == 4) {
+ data->shunt_config |= CURRENT_SENSE_VOLTAGE_160_MASK;
+ } else if (data->pga_gain == 2) {
+ data->shunt_config |= CURRENT_SENSE_VOLTAGE_80_MASK;
+ } else if (data->pga_gain == 1) {
+ data->shunt_config |= CURRENT_SENSE_VOLTAGE_40_MASK;
+ } else {
+ dev_err(dev, "ti,pga-gain is invalid: %u\n", data->pga_gain);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int tmp51x_read_properties(struct device *dev, struct tmp51x_data *data)
+{
+ int ret;
+ u32 nfactor[3];
+ u32 val;
+
+ ret = device_property_read_u32(dev, "shunt-resistor-micro-ohms", &val);
+ data->shunt_uohms = (ret >= 0) ? val : TMP51X_SHUNT_VALUE_DEFAULT;
+
+ ret = device_property_read_u32(dev, "ti,bus-range-microvolt", &val);
+ data->vbus_range_uvolt = (ret >= 0) ? val : TMP51X_VBUS_RANGE_DEFAULT;
+ ret = tmp51x_vbus_range_to_reg(dev, data);
+ if (ret < 0)
+ return ret;
+
+ ret = device_property_read_u32(dev, "ti,pga-gain", &val);
+ data->pga_gain = (ret >= 0) ? val : TMP51X_PGA_DEFAULT;
+ ret = tmp51x_pga_gain_to_reg(dev, data);
+ if (ret < 0)
+ return ret;
+
+ ret = device_property_read_u32_array(dev, "ti,nfactor", nfactor,
+ (data->id == tmp513) ? 3 : 2);
+ if (ret >= 0)
+ memcpy(data->nfactor, nfactor, (data->id == tmp513) ? 3 : 2);
+
+ // Check if shunt value is compatible with pga-gain
+ if (data->shunt_uohms > data->pga_gain * 40 * 1000 * 1000) {
+ dev_err(dev, "shunt-resistor: %u too big for pga_gain: %u\n",
+ data->shunt_uohms, data->pga_gain);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void tmp51x_use_default(struct tmp51x_data *data)
+{
+ data->vbus_range_uvolt = TMP51X_VBUS_RANGE_DEFAULT;
+ data->pga_gain = TMP51X_PGA_DEFAULT;
+ data->shunt_uohms = TMP51X_SHUNT_VALUE_DEFAULT;
+}
+
+static int tmp51x_configure(struct device *dev, struct tmp51x_data *data)
+{
+ data->shunt_config = TMP51X_SHUNT_CONFIG_DEFAULT;
+ data->temp_config = (data->id == tmp513) ?
+ TMP513_TEMP_CONFIG_DEFAULT : TMP512_TEMP_CONFIG_DEFAULT;
+
+ if (dev->of_node)
+ return tmp51x_read_properties(dev, data);
+
+ tmp51x_use_default(data);
+
+ return 0;
+}
+
+static int tmp51x_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct tmp51x_data *data;
+ struct device *hwmon_dev;
+ int ret;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ if (client->dev.of_node)
+ data->id = (enum tmp51x_ids)device_get_match_data(&client->dev);
+ else
+ data->id = id->driver_data;
+
+ ret = tmp51x_configure(dev, data);
+ if (ret < 0) {
+ dev_err(dev, "error configuring the device: %d\n", ret);
+ return ret;
+ }
+
+ data->regmap = devm_regmap_init_i2c(client, &tmp51x_regmap_config);
+ if (IS_ERR(data->regmap)) {
+ dev_err(dev, "failed to allocate register map\n");
+ return PTR_ERR(data->regmap);
+ }
+
+ ret = tmp51x_init(data);
+ if (ret < 0) {
+ dev_err(dev, "error configuring the device: %d\n", ret);
+ return -ENODEV;
+ }
+
+ hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name,
+ data,
+ &tmp51x_chip_info,
+ NULL);
+ if (IS_ERR(hwmon_dev))
+ return PTR_ERR(hwmon_dev);
+
+ dev_dbg(dev, "power monitor %s\n", id->name);
+
+ return 0;
+}
+
+static struct i2c_driver tmp51x_driver = {
+ .driver = {
+ .name = "tmp51x",
+ .of_match_table = of_match_ptr(tmp51x_of_match),
+ },
+ .probe = tmp51x_probe,
+ .id_table = tmp51x_id,
+};
+
+module_i2c_driver(tmp51x_driver);
+
+MODULE_AUTHOR("Eric Tremblay <etremblay@distechcontrols.com>");
+MODULE_DESCRIPTION("tmp51x driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/w83793.c b/drivers/hwmon/w83793.c
index 9df48b70c70c..3f59f2a1a5e3 100644
--- a/drivers/hwmon/w83793.c
+++ b/drivers/hwmon/w83793.c
@@ -1458,6 +1458,7 @@ static const struct file_operations watchdog_fops = {
.release = watchdog_close,
.write = watchdog_write,
.unlocked_ioctl = watchdog_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
};
/*
@@ -2096,7 +2097,7 @@ END:
static u8 w83793_read_value(struct i2c_client *client, u16 reg)
{
struct w83793_data *data = i2c_get_clientdata(client);
- u8 res = 0xff;
+ u8 res;
u8 new_bank = reg >> 8;
new_bank |= data->bank & 0xfc;
diff --git a/drivers/hwspinlock/hwspinlock_core.c b/drivers/hwspinlock/hwspinlock_core.c
index 8862445aa858..fd5f5c5a5244 100644
--- a/drivers/hwspinlock/hwspinlock_core.c
+++ b/drivers/hwspinlock/hwspinlock_core.c
@@ -92,8 +92,8 @@ int __hwspin_trylock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
{
int ret;
- BUG_ON(!hwlock);
- BUG_ON(!flags && mode == HWLOCK_IRQSTATE);
+ if (WARN_ON(!hwlock || (!flags && mode == HWLOCK_IRQSTATE)))
+ return -EINVAL;
/*
* This spin_lock{_irq, _irqsave} serves three purposes:
@@ -264,8 +264,8 @@ EXPORT_SYMBOL_GPL(__hwspin_lock_timeout);
*/
void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
{
- BUG_ON(!hwlock);
- BUG_ON(!flags && mode == HWLOCK_IRQSTATE);
+ if (WARN_ON(!hwlock || (!flags && mode == HWLOCK_IRQSTATE)))
+ return;
/*
* We must make sure that memory operations (both reads and writes),
@@ -657,13 +657,15 @@ static int __hwspin_lock_request(struct hwspinlock *hwlock)
/* notify PM core that power is now needed */
ret = pm_runtime_get_sync(dev);
- if (ret < 0) {
+ if (ret < 0 && ret != -EACCES) {
dev_err(dev, "%s: can't power on device\n", __func__);
pm_runtime_put_noidle(dev);
module_put(dev->driver->owner);
return ret;
}
+ ret = 0;
+
/* mark hwspinlock as used, should not fail */
tmp = radix_tree_tag_clear(&hwspinlock_tree, hwlock_to_id(hwlock),
HWSPINLOCK_UNUSED);
@@ -820,9 +822,7 @@ int hwspin_lock_free(struct hwspinlock *hwlock)
}
/* notify the underlying device that power is not needed */
- ret = pm_runtime_put(dev);
- if (ret < 0)
- goto out;
+ pm_runtime_put(dev);
/* mark this hwspinlock as available */
tmp = radix_tree_tag_set(&hwspinlock_tree, hwlock_to_id(hwlock),
diff --git a/drivers/hwspinlock/sprd_hwspinlock.c b/drivers/hwspinlock/sprd_hwspinlock.c
index dc42bf51f3e6..36dc8038bbb4 100644
--- a/drivers/hwspinlock/sprd_hwspinlock.c
+++ b/drivers/hwspinlock/sprd_hwspinlock.c
@@ -15,7 +15,6 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include "hwspinlock_internal.h"
@@ -79,11 +78,17 @@ static const struct hwspinlock_ops sprd_hwspinlock_ops = {
.relax = sprd_hwspinlock_relax,
};
+static void sprd_hwspinlock_disable(void *data)
+{
+ struct sprd_hwspinlock_dev *sprd_hwlock = data;
+
+ clk_disable_unprepare(sprd_hwlock->clk);
+}
+
static int sprd_hwspinlock_probe(struct platform_device *pdev)
{
struct sprd_hwspinlock_dev *sprd_hwlock;
struct hwspinlock *lock;
- struct resource *res;
int i, ret;
if (!pdev->dev.of_node)
@@ -96,8 +101,7 @@ static int sprd_hwspinlock_probe(struct platform_device *pdev)
if (!sprd_hwlock)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- sprd_hwlock->base = devm_ioremap_resource(&pdev->dev, res);
+ sprd_hwlock->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(sprd_hwlock->base))
return PTR_ERR(sprd_hwlock->base);
@@ -107,7 +111,17 @@ static int sprd_hwspinlock_probe(struct platform_device *pdev)
return PTR_ERR(sprd_hwlock->clk);
}
- clk_prepare_enable(sprd_hwlock->clk);
+ ret = clk_prepare_enable(sprd_hwlock->clk);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action_or_reset(&pdev->dev, sprd_hwspinlock_disable,
+ sprd_hwlock);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Failed to add hwspinlock disable action\n");
+ return ret;
+ }
/* set the hwspinlock to record user id to identify subsystems */
writel(HWSPINLOCK_USER_BITS, sprd_hwlock->base + HWSPINLOCK_RECCTRL);
@@ -118,27 +132,10 @@ static int sprd_hwspinlock_probe(struct platform_device *pdev)
}
platform_set_drvdata(pdev, sprd_hwlock);
- pm_runtime_enable(&pdev->dev);
- ret = hwspin_lock_register(&sprd_hwlock->bank, &pdev->dev,
- &sprd_hwspinlock_ops, 0, SPRD_HWLOCKS_NUM);
- if (ret) {
- pm_runtime_disable(&pdev->dev);
- clk_disable_unprepare(sprd_hwlock->clk);
- return ret;
- }
-
- return 0;
-}
-
-static int sprd_hwspinlock_remove(struct platform_device *pdev)
-{
- struct sprd_hwspinlock_dev *sprd_hwlock = platform_get_drvdata(pdev);
-
- hwspin_lock_unregister(&sprd_hwlock->bank);
- pm_runtime_disable(&pdev->dev);
- clk_disable_unprepare(sprd_hwlock->clk);
- return 0;
+ return devm_hwspin_lock_register(&pdev->dev, &sprd_hwlock->bank,
+ &sprd_hwspinlock_ops, 0,
+ SPRD_HWLOCKS_NUM);
}
static const struct of_device_id sprd_hwspinlock_of_match[] = {
@@ -149,7 +146,6 @@ MODULE_DEVICE_TABLE(of, sprd_hwspinlock_of_match);
static struct platform_driver sprd_hwspinlock_driver = {
.probe = sprd_hwspinlock_probe,
- .remove = sprd_hwspinlock_remove,
.driver = {
.name = "sprd_hwspinlock",
.of_match_table = of_match_ptr(sprd_hwspinlock_of_match),
diff --git a/drivers/hwspinlock/u8500_hsem.c b/drivers/hwspinlock/u8500_hsem.c
index 572ca79d77e8..67845c0c9701 100644
--- a/drivers/hwspinlock/u8500_hsem.c
+++ b/drivers/hwspinlock/u8500_hsem.c
@@ -16,7 +16,6 @@
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/io.h>
-#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/hwspinlock.h>
@@ -88,21 +87,16 @@ static int u8500_hsem_probe(struct platform_device *pdev)
struct hwspinlock_pdata *pdata = pdev->dev.platform_data;
struct hwspinlock_device *bank;
struct hwspinlock *hwlock;
- struct resource *res;
void __iomem *io_base;
- int i, ret, num_locks = U8500_MAX_SEMAPHORE;
+ int i, num_locks = U8500_MAX_SEMAPHORE;
ulong val;
if (!pdata)
return -ENODEV;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENODEV;
-
- io_base = ioremap(res->start, resource_size(res));
- if (!io_base)
- return -ENOMEM;
+ io_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(io_base))
+ return PTR_ERR(io_base);
/* make sure protocol 1 is selected */
val = readl(io_base + HSEM_CTRL_REG);
@@ -111,54 +105,29 @@ static int u8500_hsem_probe(struct platform_device *pdev)
/* clear all interrupts */
writel(0xFFFF, io_base + HSEM_ICRALL);
- bank = kzalloc(struct_size(bank, lock, num_locks), GFP_KERNEL);
- if (!bank) {
- ret = -ENOMEM;
- goto iounmap_base;
- }
+ bank = devm_kzalloc(&pdev->dev, struct_size(bank, lock, num_locks),
+ GFP_KERNEL);
+ if (!bank)
+ return -ENOMEM;
platform_set_drvdata(pdev, bank);
for (i = 0, hwlock = &bank->lock[0]; i < num_locks; i++, hwlock++)
hwlock->priv = io_base + HSEM_REGISTER_OFFSET + sizeof(u32) * i;
- /* no pm needed for HSem but required to comply with hwspilock core */
- pm_runtime_enable(&pdev->dev);
-
- ret = hwspin_lock_register(bank, &pdev->dev, &u8500_hwspinlock_ops,
- pdata->base_id, num_locks);
- if (ret)
- goto reg_fail;
-
- return 0;
-
-reg_fail:
- pm_runtime_disable(&pdev->dev);
- kfree(bank);
-iounmap_base:
- iounmap(io_base);
- return ret;
+ return devm_hwspin_lock_register(&pdev->dev, bank,
+ &u8500_hwspinlock_ops,
+ pdata->base_id, num_locks);
}
static int u8500_hsem_remove(struct platform_device *pdev)
{
struct hwspinlock_device *bank = platform_get_drvdata(pdev);
void __iomem *io_base = bank->lock[0].priv - HSEM_REGISTER_OFFSET;
- int ret;
/* clear all interrupts */
writel(0xFFFF, io_base + HSEM_ICRALL);
- ret = hwspin_lock_unregister(bank);
- if (ret) {
- dev_err(&pdev->dev, "%s failed: %d\n", __func__, ret);
- return ret;
- }
-
- pm_runtime_disable(&pdev->dev);
- iounmap(io_base);
- kfree(bank);
-
return 0;
}
diff --git a/drivers/hwtracing/coresight/Kconfig b/drivers/hwtracing/coresight/Kconfig
index 7a9f5fb08330..6ff30e25af55 100644
--- a/drivers/hwtracing/coresight/Kconfig
+++ b/drivers/hwtracing/coresight/Kconfig
@@ -4,6 +4,7 @@
#
menuconfig CORESIGHT
bool "CoreSight Tracing Support"
+ depends on ARM || ARM64
depends on OF || ACPI
select ARM_AMBA
select PERF_EVENTS
diff --git a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
index 219c10eb752c..ce41482431f9 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
@@ -217,6 +217,7 @@ static ssize_t reset_store(struct device *dev,
/* No start-stop filtering for ViewInst */
config->vissctlr = 0x0;
+ config->vipcssctlr = 0x0;
/* Disable seq events */
for (i = 0; i < drvdata->nrseqstate-1; i++)
@@ -238,6 +239,7 @@ static ssize_t reset_store(struct device *dev,
for (i = 0; i < drvdata->nr_resource; i++)
config->res_ctrl[i] = 0x0;
+ config->ss_idx = 0x0;
for (i = 0; i < drvdata->nr_ss_cmp; i++) {
config->ss_ctrl[i] = 0x0;
config->ss_pe_cmp[i] = 0x0;
@@ -296,8 +298,6 @@ static ssize_t mode_store(struct device *dev,
spin_lock(&drvdata->spinlock);
config->mode = val & ETMv4_MODE_ALL;
- etm4_set_mode_exclude(drvdata,
- config->mode & ETM_MODE_EXCLUDE ? true : false);
if (drvdata->instrp0 == true) {
/* start by clearing instruction P0 field */
@@ -652,10 +652,13 @@ static ssize_t cyc_threshold_store(struct device *dev,
if (kstrtoul(buf, 16, &val))
return -EINVAL;
+
+ /* mask off max threshold before checking min value */
+ val &= ETM_CYC_THRESHOLD_MASK;
if (val < drvdata->ccitmin)
return -EINVAL;
- config->ccctlr = val & ETM_CYC_THRESHOLD_MASK;
+ config->ccctlr = val;
return size;
}
static DEVICE_ATTR_RW(cyc_threshold);
@@ -686,14 +689,16 @@ static ssize_t bb_ctrl_store(struct device *dev,
return -EINVAL;
if (!drvdata->nr_addr_cmp)
return -EINVAL;
+
/*
- * Bit[7:0] selects which address range comparator is used for
- * branch broadcast control.
+ * Bit[8] controls include(1) / exclude(0), bits[0-7] select
+ * individual range comparators. If include then at least 1
+ * range must be selected.
*/
- if (BMVAL(val, 0, 7) > drvdata->nr_addr_cmp)
+ if ((val & BIT(8)) && (BMVAL(val, 0, 7) == 0))
return -EINVAL;
- config->bb_ctrl = val;
+ config->bb_ctrl = val & GENMASK(8, 0);
return size;
}
static DEVICE_ATTR_RW(bb_ctrl);
@@ -738,7 +743,7 @@ static ssize_t s_exlevel_vinst_show(struct device *dev,
struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etmv4_config *config = &drvdata->config;
- val = BMVAL(config->vinst_ctrl, 16, 19);
+ val = (config->vinst_ctrl & ETM_EXLEVEL_S_VICTLR_MASK) >> 16;
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
@@ -754,8 +759,8 @@ static ssize_t s_exlevel_vinst_store(struct device *dev,
return -EINVAL;
spin_lock(&drvdata->spinlock);
- /* clear all EXLEVEL_S bits (bit[18] is never implemented) */
- config->vinst_ctrl &= ~(BIT(16) | BIT(17) | BIT(19));
+ /* clear all EXLEVEL_S bits */
+ config->vinst_ctrl &= ~(ETM_EXLEVEL_S_VICTLR_MASK);
/* enable instruction tracing for corresponding exception level */
val &= drvdata->s_ex_level;
config->vinst_ctrl |= (val << 16);
@@ -773,7 +778,7 @@ static ssize_t ns_exlevel_vinst_show(struct device *dev,
struct etmv4_config *config = &drvdata->config;
/* EXLEVEL_NS, bits[23:20] */
- val = BMVAL(config->vinst_ctrl, 20, 23);
+ val = (config->vinst_ctrl & ETM_EXLEVEL_NS_VICTLR_MASK) >> 20;
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
@@ -789,8 +794,8 @@ static ssize_t ns_exlevel_vinst_store(struct device *dev,
return -EINVAL;
spin_lock(&drvdata->spinlock);
- /* clear EXLEVEL_NS bits (bit[23] is never implemented */
- config->vinst_ctrl &= ~(BIT(20) | BIT(21) | BIT(22));
+ /* clear EXLEVEL_NS bits */
+ config->vinst_ctrl &= ~(ETM_EXLEVEL_NS_VICTLR_MASK);
/* enable instruction tracing for corresponding exception level */
val &= drvdata->ns_ex_level;
config->vinst_ctrl |= (val << 20);
@@ -966,8 +971,12 @@ static ssize_t addr_range_store(struct device *dev,
unsigned long val1, val2;
struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etmv4_config *config = &drvdata->config;
+ int elements, exclude;
+
+ elements = sscanf(buf, "%lx %lx %x", &val1, &val2, &exclude);
- if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
+ /* exclude is optional, but need at least two parameter */
+ if (elements < 2)
return -EINVAL;
/* lower address comparator cannot have a higher address value */
if (val1 > val2)
@@ -995,9 +1004,11 @@ static ssize_t addr_range_store(struct device *dev,
/*
* Program include or exclude control bits for vinst or vdata
* whenever we change addr comparators to ETM_ADDR_TYPE_RANGE
+ * use supplied value, or default to bit set in 'mode'
*/
- etm4_set_mode_exclude(drvdata,
- config->mode & ETM_MODE_EXCLUDE ? true : false);
+ if (elements != 3)
+ exclude = config->mode & ETM_MODE_EXCLUDE;
+ etm4_set_mode_exclude(drvdata, exclude ? true : false);
spin_unlock(&drvdata->spinlock);
return size;
@@ -1054,8 +1065,6 @@ static ssize_t addr_start_store(struct device *dev,
config->addr_val[idx] = (u64)val;
config->addr_type[idx] = ETM_ADDR_TYPE_START;
config->vissctlr |= BIT(idx);
- /* SSSTATUS, bit[9] - turn on start/stop logic */
- config->vinst_ctrl |= BIT(9);
spin_unlock(&drvdata->spinlock);
return size;
}
@@ -1111,8 +1120,6 @@ static ssize_t addr_stop_store(struct device *dev,
config->addr_val[idx] = (u64)val;
config->addr_type[idx] = ETM_ADDR_TYPE_STOP;
config->vissctlr |= BIT(idx + 16);
- /* SSSTATUS, bit[9] - turn on start/stop logic */
- config->vinst_ctrl |= BIT(9);
spin_unlock(&drvdata->spinlock);
return size;
}
@@ -1228,6 +1235,131 @@ static ssize_t addr_context_store(struct device *dev,
}
static DEVICE_ATTR_RW(addr_context);
+static ssize_t addr_exlevel_s_ns_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ u8 idx;
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+
+ spin_lock(&drvdata->spinlock);
+ idx = config->addr_idx;
+ val = BMVAL(config->addr_acc[idx], 8, 14);
+ spin_unlock(&drvdata->spinlock);
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t addr_exlevel_s_ns_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ u8 idx;
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+
+ if (kstrtoul(buf, 0, &val))
+ return -EINVAL;
+
+ if (val & ~((GENMASK(14, 8) >> 8)))
+ return -EINVAL;
+
+ spin_lock(&drvdata->spinlock);
+ idx = config->addr_idx;
+ /* clear Exlevel_ns & Exlevel_s bits[14:12, 11:8], bit[15] is res0 */
+ config->addr_acc[idx] &= ~(GENMASK(14, 8));
+ config->addr_acc[idx] |= (val << 8);
+ spin_unlock(&drvdata->spinlock);
+ return size;
+}
+static DEVICE_ATTR_RW(addr_exlevel_s_ns);
+
+static const char * const addr_type_names[] = {
+ "unused",
+ "single",
+ "range",
+ "start",
+ "stop"
+};
+
+static ssize_t addr_cmp_view_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u8 idx, addr_type;
+ unsigned long addr_v, addr_v2, addr_ctrl;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+ int size = 0;
+ bool exclude = false;
+
+ spin_lock(&drvdata->spinlock);
+ idx = config->addr_idx;
+ addr_v = config->addr_val[idx];
+ addr_ctrl = config->addr_acc[idx];
+ addr_type = config->addr_type[idx];
+ if (addr_type == ETM_ADDR_TYPE_RANGE) {
+ if (idx & 0x1) {
+ idx -= 1;
+ addr_v2 = addr_v;
+ addr_v = config->addr_val[idx];
+ } else {
+ addr_v2 = config->addr_val[idx + 1];
+ }
+ exclude = config->viiectlr & BIT(idx / 2 + 16);
+ }
+ spin_unlock(&drvdata->spinlock);
+ if (addr_type) {
+ size = scnprintf(buf, PAGE_SIZE, "addr_cmp[%i] %s %#lx", idx,
+ addr_type_names[addr_type], addr_v);
+ if (addr_type == ETM_ADDR_TYPE_RANGE) {
+ size += scnprintf(buf + size, PAGE_SIZE - size,
+ " %#lx %s", addr_v2,
+ exclude ? "exclude" : "include");
+ }
+ size += scnprintf(buf + size, PAGE_SIZE - size,
+ " ctrl(%#lx)\n", addr_ctrl);
+ } else {
+ size = scnprintf(buf, PAGE_SIZE, "addr_cmp[%i] unused\n", idx);
+ }
+ return size;
+}
+static DEVICE_ATTR_RO(addr_cmp_view);
+
+static ssize_t vinst_pe_cmp_start_stop_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+
+ if (!drvdata->nr_pe_cmp)
+ return -EINVAL;
+ val = config->vipcssctlr;
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+static ssize_t vinst_pe_cmp_start_stop_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+ if (!drvdata->nr_pe_cmp)
+ return -EINVAL;
+
+ spin_lock(&drvdata->spinlock);
+ config->vipcssctlr = val;
+ spin_unlock(&drvdata->spinlock);
+ return size;
+}
+static DEVICE_ATTR_RW(vinst_pe_cmp_start_stop);
+
static ssize_t seq_idx_show(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -1324,8 +1456,8 @@ static ssize_t seq_event_store(struct device *dev,
spin_lock(&drvdata->spinlock);
idx = config->seq_idx;
- /* RST, bits[7:0] */
- config->seq_ctrl[idx] = val & 0xFF;
+ /* Seq control has two masks B[15:8] F[7:0] */
+ config->seq_ctrl[idx] = val & 0xFFFF;
spin_unlock(&drvdata->spinlock);
return size;
}
@@ -1580,12 +1712,129 @@ static ssize_t res_ctrl_store(struct device *dev,
if (idx % 2 != 0)
/* PAIRINV, bit[21] */
val &= ~BIT(21);
- config->res_ctrl[idx] = val;
+ config->res_ctrl[idx] = val & GENMASK(21, 0);
spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_RW(res_ctrl);
+static ssize_t sshot_idx_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+
+ val = config->ss_idx;
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t sshot_idx_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+ if (val >= drvdata->nr_ss_cmp)
+ return -EINVAL;
+
+ spin_lock(&drvdata->spinlock);
+ config->ss_idx = val;
+ spin_unlock(&drvdata->spinlock);
+ return size;
+}
+static DEVICE_ATTR_RW(sshot_idx);
+
+static ssize_t sshot_ctrl_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+
+ spin_lock(&drvdata->spinlock);
+ val = config->ss_ctrl[config->ss_idx];
+ spin_unlock(&drvdata->spinlock);
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t sshot_ctrl_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ u8 idx;
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+
+ spin_lock(&drvdata->spinlock);
+ idx = config->ss_idx;
+ config->ss_ctrl[idx] = val & GENMASK(24, 0);
+ /* must clear bit 31 in related status register on programming */
+ config->ss_status[idx] &= ~BIT(31);
+ spin_unlock(&drvdata->spinlock);
+ return size;
+}
+static DEVICE_ATTR_RW(sshot_ctrl);
+
+static ssize_t sshot_status_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+
+ spin_lock(&drvdata->spinlock);
+ val = config->ss_status[config->ss_idx];
+ spin_unlock(&drvdata->spinlock);
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+static DEVICE_ATTR_RO(sshot_status);
+
+static ssize_t sshot_pe_ctrl_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+
+ spin_lock(&drvdata->spinlock);
+ val = config->ss_pe_cmp[config->ss_idx];
+ spin_unlock(&drvdata->spinlock);
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t sshot_pe_ctrl_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ u8 idx;
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+
+ spin_lock(&drvdata->spinlock);
+ idx = config->ss_idx;
+ config->ss_pe_cmp[idx] = val & GENMASK(7, 0);
+ /* must clear bit 31 in related status register on programming */
+ config->ss_status[idx] &= ~BIT(31);
+ spin_unlock(&drvdata->spinlock);
+ return size;
+}
+static DEVICE_ATTR_RW(sshot_pe_ctrl);
+
static ssize_t ctxid_idx_show(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -1714,6 +1963,7 @@ static ssize_t ctxid_masks_store(struct device *dev,
unsigned long val1, val2, mask;
struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etmv4_config *config = &drvdata->config;
+ int nr_inputs;
/*
* Don't use contextID tracing if coming from a PID namespace. See
@@ -1729,7 +1979,9 @@ static ssize_t ctxid_masks_store(struct device *dev,
*/
if (!drvdata->ctxid_size || !drvdata->numcidc)
return -EINVAL;
- if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
+ /* one mask if <= 4 comparators, two for up to 8 */
+ nr_inputs = sscanf(buf, "%lx %lx", &val1, &val2);
+ if ((drvdata->numcidc > 4) && (nr_inputs != 2))
return -EINVAL;
spin_lock(&drvdata->spinlock);
@@ -1903,6 +2155,7 @@ static ssize_t vmid_masks_store(struct device *dev,
unsigned long val1, val2, mask;
struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etmv4_config *config = &drvdata->config;
+ int nr_inputs;
/*
* only implemented when vmid tracing is enabled, i.e. at least one
@@ -1910,7 +2163,9 @@ static ssize_t vmid_masks_store(struct device *dev,
*/
if (!drvdata->vmid_size || !drvdata->numvmidc)
return -EINVAL;
- if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
+ /* one mask if <= 4 comparators, two for up to 8 */
+ nr_inputs = sscanf(buf, "%lx %lx", &val1, &val2);
+ if ((drvdata->numvmidc > 4) && (nr_inputs != 2))
return -EINVAL;
spin_lock(&drvdata->spinlock);
@@ -2033,6 +2288,13 @@ static struct attribute *coresight_etmv4_attrs[] = {
&dev_attr_addr_stop.attr,
&dev_attr_addr_ctxtype.attr,
&dev_attr_addr_context.attr,
+ &dev_attr_addr_exlevel_s_ns.attr,
+ &dev_attr_addr_cmp_view.attr,
+ &dev_attr_vinst_pe_cmp_start_stop.attr,
+ &dev_attr_sshot_idx.attr,
+ &dev_attr_sshot_ctrl.attr,
+ &dev_attr_sshot_pe_ctrl.attr,
+ &dev_attr_sshot_status.attr,
&dev_attr_seq_idx.attr,
&dev_attr_seq_state.attr,
&dev_attr_seq_event.attr,
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c
index a128b5063f46..dc3f507e7562 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x.c
@@ -18,6 +18,7 @@
#include <linux/stat.h>
#include <linux/clk.h>
#include <linux/cpu.h>
+#include <linux/cpu_pm.h>
#include <linux/coresight.h>
#include <linux/coresight-pmu.h>
#include <linux/pm_wakeup.h>
@@ -26,6 +27,7 @@
#include <linux/uaccess.h>
#include <linux/perf_event.h>
#include <linux/pm_runtime.h>
+#include <linux/property.h>
#include <asm/sections.h>
#include <asm/local.h>
#include <asm/virt.h>
@@ -37,6 +39,15 @@ static int boot_enable;
module_param(boot_enable, int, 0444);
MODULE_PARM_DESC(boot_enable, "Enable tracing on boot");
+#define PARAM_PM_SAVE_FIRMWARE 0 /* save self-hosted state as per firmware */
+#define PARAM_PM_SAVE_NEVER 1 /* never save any state */
+#define PARAM_PM_SAVE_SELF_HOSTED 2 /* save self-hosted state only */
+
+static int pm_save_enable = PARAM_PM_SAVE_FIRMWARE;
+module_param(pm_save_enable, int, 0444);
+MODULE_PARM_DESC(pm_save_enable,
+ "Save/restore state on power down: 1 = never, 2 = self-hosted");
+
/* The number of ETMv4 currently registered */
static int etm4_count;
static struct etmv4_drvdata *etmdrvdata[NR_CPUS];
@@ -54,6 +65,14 @@ static void etm4_os_unlock(struct etmv4_drvdata *drvdata)
isb();
}
+static void etm4_os_lock(struct etmv4_drvdata *drvdata)
+{
+ /* Writing 0x1 to TRCOSLAR locks the trace registers */
+ writel_relaxed(0x1, drvdata->base + TRCOSLAR);
+ drvdata->os_unlock = false;
+ isb();
+}
+
static bool etm4_arch_supported(u8 arch)
{
/* Mask out the minor version number */
@@ -149,6 +168,9 @@ static int etm4_enable_hw(struct etmv4_drvdata *drvdata)
drvdata->base + TRCRSCTLRn(i));
for (i = 0; i < drvdata->nr_ss_cmp; i++) {
+ /* always clear status bit on restart if using single-shot */
+ if (config->ss_ctrl[i] || config->ss_pe_cmp[i])
+ config->ss_status[i] &= ~BIT(31);
writel_relaxed(config->ss_ctrl[i],
drvdata->base + TRCSSCCRn(i));
writel_relaxed(config->ss_status[i],
@@ -448,6 +470,9 @@ static void etm4_disable_hw(void *info)
{
u32 control;
struct etmv4_drvdata *drvdata = info;
+ struct etmv4_config *config = &drvdata->config;
+ struct device *etm_dev = &drvdata->csdev->dev;
+ int i;
CS_UNLOCK(drvdata->base);
@@ -470,6 +495,18 @@ static void etm4_disable_hw(void *info)
isb();
writel_relaxed(control, drvdata->base + TRCPRGCTLR);
+ /* wait for TRCSTATR.PMSTABLE to go to '1' */
+ if (coresight_timeout(drvdata->base, TRCSTATR,
+ TRCSTATR_PMSTABLE_BIT, 1))
+ dev_err(etm_dev,
+ "timeout while waiting for PM stable Trace Status\n");
+
+ /* read the status of the single shot comparators */
+ for (i = 0; i < drvdata->nr_ss_cmp; i++) {
+ config->ss_status[i] =
+ readl_relaxed(drvdata->base + TRCSSCSRn(i));
+ }
+
coresight_disclaim_device_unlocked(drvdata->base);
CS_LOCK(drvdata->base);
@@ -576,6 +613,7 @@ static void etm4_init_arch_data(void *info)
u32 etmidr4;
u32 etmidr5;
struct etmv4_drvdata *drvdata = info;
+ int i;
/* Make sure all registers are accessible */
etm4_os_unlock(drvdata);
@@ -629,6 +667,7 @@ static void etm4_init_arch_data(void *info)
* TRCARCHMAJ, bits[11:8] architecture major versin number
*/
drvdata->arch = BMVAL(etmidr1, 4, 11);
+ drvdata->config.arch = drvdata->arch;
/* maximum size of resources */
etmidr2 = readl_relaxed(drvdata->base + TRCIDR2);
@@ -698,9 +737,14 @@ static void etm4_init_arch_data(void *info)
drvdata->nr_resource = BMVAL(etmidr4, 16, 19) + 1;
/*
* NUMSSCC, bits[23:20] the number of single-shot
- * comparator control for tracing
+ * comparator control for tracing. Read any status regs as these
+ * also contain RO capability data.
*/
drvdata->nr_ss_cmp = BMVAL(etmidr4, 20, 23);
+ for (i = 0; i < drvdata->nr_ss_cmp; i++) {
+ drvdata->config.ss_status[i] =
+ readl_relaxed(drvdata->base + TRCSSCSRn(i));
+ }
/* NUMCIDC, bits[27:24] number of Context ID comparators for tracing */
drvdata->numcidc = BMVAL(etmidr4, 24, 27);
/* NUMVMIDC, bits[31:28] number of VMID comparators for tracing */
@@ -780,6 +824,7 @@ static u64 etm4_get_ns_access_type(struct etmv4_config *config)
static u64 etm4_get_access_type(struct etmv4_config *config)
{
u64 access_type = etm4_get_ns_access_type(config);
+ u64 s_hyp = (config->arch & 0x0f) >= 0x4 ? ETM_EXLEVEL_S_HYP : 0;
/*
* EXLEVEL_S, bits[11:8], don't trace anything happening
@@ -787,7 +832,8 @@ static u64 etm4_get_access_type(struct etmv4_config *config)
*/
access_type |= (ETM_EXLEVEL_S_APP |
ETM_EXLEVEL_S_OS |
- ETM_EXLEVEL_S_HYP);
+ s_hyp |
+ ETM_EXLEVEL_S_MON);
return access_type;
}
@@ -865,6 +911,7 @@ static void etm4_set_default_filter(struct etmv4_config *config)
* in the started state
*/
config->vinst_ctrl |= BIT(9);
+ config->mode |= ETM_MODE_VIEWINST_STARTSTOP;
/* No start-stop filtering for ViewInst */
config->vissctlr = 0x0;
@@ -1085,6 +1132,288 @@ static void etm4_init_trace_id(struct etmv4_drvdata *drvdata)
drvdata->trcid = coresight_get_trace_id(drvdata->cpu);
}
+#ifdef CONFIG_CPU_PM
+static int etm4_cpu_save(struct etmv4_drvdata *drvdata)
+{
+ int i, ret = 0;
+ struct etmv4_save_state *state;
+ struct device *etm_dev = &drvdata->csdev->dev;
+
+ /*
+ * As recommended by 3.4.1 ("The procedure when powering down the PE")
+ * of ARM IHI 0064D
+ */
+ dsb(sy);
+ isb();
+
+ CS_UNLOCK(drvdata->base);
+
+ /* Lock the OS lock to disable trace and external debugger access */
+ etm4_os_lock(drvdata);
+
+ /* wait for TRCSTATR.PMSTABLE to go up */
+ if (coresight_timeout(drvdata->base, TRCSTATR,
+ TRCSTATR_PMSTABLE_BIT, 1)) {
+ dev_err(etm_dev,
+ "timeout while waiting for PM Stable Status\n");
+ etm4_os_unlock(drvdata);
+ ret = -EBUSY;
+ goto out;
+ }
+
+ state = drvdata->save_state;
+
+ state->trcprgctlr = readl(drvdata->base + TRCPRGCTLR);
+ state->trcprocselr = readl(drvdata->base + TRCPROCSELR);
+ state->trcconfigr = readl(drvdata->base + TRCCONFIGR);
+ state->trcauxctlr = readl(drvdata->base + TRCAUXCTLR);
+ state->trceventctl0r = readl(drvdata->base + TRCEVENTCTL0R);
+ state->trceventctl1r = readl(drvdata->base + TRCEVENTCTL1R);
+ state->trcstallctlr = readl(drvdata->base + TRCSTALLCTLR);
+ state->trctsctlr = readl(drvdata->base + TRCTSCTLR);
+ state->trcsyncpr = readl(drvdata->base + TRCSYNCPR);
+ state->trcccctlr = readl(drvdata->base + TRCCCCTLR);
+ state->trcbbctlr = readl(drvdata->base + TRCBBCTLR);
+ state->trctraceidr = readl(drvdata->base + TRCTRACEIDR);
+ state->trcqctlr = readl(drvdata->base + TRCQCTLR);
+
+ state->trcvictlr = readl(drvdata->base + TRCVICTLR);
+ state->trcviiectlr = readl(drvdata->base + TRCVIIECTLR);
+ state->trcvissctlr = readl(drvdata->base + TRCVISSCTLR);
+ state->trcvipcssctlr = readl(drvdata->base + TRCVIPCSSCTLR);
+ state->trcvdctlr = readl(drvdata->base + TRCVDCTLR);
+ state->trcvdsacctlr = readl(drvdata->base + TRCVDSACCTLR);
+ state->trcvdarcctlr = readl(drvdata->base + TRCVDARCCTLR);
+
+ for (i = 0; i < drvdata->nrseqstate; i++)
+ state->trcseqevr[i] = readl(drvdata->base + TRCSEQEVRn(i));
+
+ state->trcseqrstevr = readl(drvdata->base + TRCSEQRSTEVR);
+ state->trcseqstr = readl(drvdata->base + TRCSEQSTR);
+ state->trcextinselr = readl(drvdata->base + TRCEXTINSELR);
+
+ for (i = 0; i < drvdata->nr_cntr; i++) {
+ state->trccntrldvr[i] = readl(drvdata->base + TRCCNTRLDVRn(i));
+ state->trccntctlr[i] = readl(drvdata->base + TRCCNTCTLRn(i));
+ state->trccntvr[i] = readl(drvdata->base + TRCCNTVRn(i));
+ }
+
+ for (i = 0; i < drvdata->nr_resource * 2; i++)
+ state->trcrsctlr[i] = readl(drvdata->base + TRCRSCTLRn(i));
+
+ for (i = 0; i < drvdata->nr_ss_cmp; i++) {
+ state->trcssccr[i] = readl(drvdata->base + TRCSSCCRn(i));
+ state->trcsscsr[i] = readl(drvdata->base + TRCSSCSRn(i));
+ state->trcsspcicr[i] = readl(drvdata->base + TRCSSPCICRn(i));
+ }
+
+ for (i = 0; i < drvdata->nr_addr_cmp * 2; i++) {
+ state->trcacvr[i] = readl(drvdata->base + TRCACVRn(i));
+ state->trcacatr[i] = readl(drvdata->base + TRCACATRn(i));
+ }
+
+ /*
+ * Data trace stream is architecturally prohibited for A profile cores
+ * so we don't save (or later restore) trcdvcvr and trcdvcmr - As per
+ * section 1.3.4 ("Possible functional configurations of an ETMv4 trace
+ * unit") of ARM IHI 0064D.
+ */
+
+ for (i = 0; i < drvdata->numcidc; i++)
+ state->trccidcvr[i] = readl(drvdata->base + TRCCIDCVRn(i));
+
+ for (i = 0; i < drvdata->numvmidc; i++)
+ state->trcvmidcvr[i] = readl(drvdata->base + TRCVMIDCVRn(i));
+
+ state->trccidcctlr0 = readl(drvdata->base + TRCCIDCCTLR0);
+ state->trccidcctlr1 = readl(drvdata->base + TRCCIDCCTLR1);
+
+ state->trcvmidcctlr0 = readl(drvdata->base + TRCVMIDCCTLR0);
+ state->trcvmidcctlr0 = readl(drvdata->base + TRCVMIDCCTLR1);
+
+ state->trcclaimset = readl(drvdata->base + TRCCLAIMCLR);
+
+ state->trcpdcr = readl(drvdata->base + TRCPDCR);
+
+ /* wait for TRCSTATR.IDLE to go up */
+ if (coresight_timeout(drvdata->base, TRCSTATR, TRCSTATR_IDLE_BIT, 1)) {
+ dev_err(etm_dev,
+ "timeout while waiting for Idle Trace Status\n");
+ etm4_os_unlock(drvdata);
+ ret = -EBUSY;
+ goto out;
+ }
+
+ drvdata->state_needs_restore = true;
+
+ /*
+ * Power can be removed from the trace unit now. We do this to
+ * potentially save power on systems that respect the TRCPDCR_PU
+ * despite requesting software to save/restore state.
+ */
+ writel_relaxed((state->trcpdcr & ~TRCPDCR_PU),
+ drvdata->base + TRCPDCR);
+
+out:
+ CS_LOCK(drvdata->base);
+ return ret;
+}
+
+static void etm4_cpu_restore(struct etmv4_drvdata *drvdata)
+{
+ int i;
+ struct etmv4_save_state *state = drvdata->save_state;
+
+ CS_UNLOCK(drvdata->base);
+
+ writel_relaxed(state->trcclaimset, drvdata->base + TRCCLAIMSET);
+
+ writel_relaxed(state->trcprgctlr, drvdata->base + TRCPRGCTLR);
+ writel_relaxed(state->trcprocselr, drvdata->base + TRCPROCSELR);
+ writel_relaxed(state->trcconfigr, drvdata->base + TRCCONFIGR);
+ writel_relaxed(state->trcauxctlr, drvdata->base + TRCAUXCTLR);
+ writel_relaxed(state->trceventctl0r, drvdata->base + TRCEVENTCTL0R);
+ writel_relaxed(state->trceventctl1r, drvdata->base + TRCEVENTCTL1R);
+ writel_relaxed(state->trcstallctlr, drvdata->base + TRCSTALLCTLR);
+ writel_relaxed(state->trctsctlr, drvdata->base + TRCTSCTLR);
+ writel_relaxed(state->trcsyncpr, drvdata->base + TRCSYNCPR);
+ writel_relaxed(state->trcccctlr, drvdata->base + TRCCCCTLR);
+ writel_relaxed(state->trcbbctlr, drvdata->base + TRCBBCTLR);
+ writel_relaxed(state->trctraceidr, drvdata->base + TRCTRACEIDR);
+ writel_relaxed(state->trcqctlr, drvdata->base + TRCQCTLR);
+
+ writel_relaxed(state->trcvictlr, drvdata->base + TRCVICTLR);
+ writel_relaxed(state->trcviiectlr, drvdata->base + TRCVIIECTLR);
+ writel_relaxed(state->trcvissctlr, drvdata->base + TRCVISSCTLR);
+ writel_relaxed(state->trcvipcssctlr, drvdata->base + TRCVIPCSSCTLR);
+ writel_relaxed(state->trcvdctlr, drvdata->base + TRCVDCTLR);
+ writel_relaxed(state->trcvdsacctlr, drvdata->base + TRCVDSACCTLR);
+ writel_relaxed(state->trcvdarcctlr, drvdata->base + TRCVDARCCTLR);
+
+ for (i = 0; i < drvdata->nrseqstate; i++)
+ writel_relaxed(state->trcseqevr[i],
+ drvdata->base + TRCSEQEVRn(i));
+
+ writel_relaxed(state->trcseqrstevr, drvdata->base + TRCSEQRSTEVR);
+ writel_relaxed(state->trcseqstr, drvdata->base + TRCSEQSTR);
+ writel_relaxed(state->trcextinselr, drvdata->base + TRCEXTINSELR);
+
+ for (i = 0; i < drvdata->nr_cntr; i++) {
+ writel_relaxed(state->trccntrldvr[i],
+ drvdata->base + TRCCNTRLDVRn(i));
+ writel_relaxed(state->trccntctlr[i],
+ drvdata->base + TRCCNTCTLRn(i));
+ writel_relaxed(state->trccntvr[i],
+ drvdata->base + TRCCNTVRn(i));
+ }
+
+ for (i = 0; i < drvdata->nr_resource * 2; i++)
+ writel_relaxed(state->trcrsctlr[i],
+ drvdata->base + TRCRSCTLRn(i));
+
+ for (i = 0; i < drvdata->nr_ss_cmp; i++) {
+ writel_relaxed(state->trcssccr[i],
+ drvdata->base + TRCSSCCRn(i));
+ writel_relaxed(state->trcsscsr[i],
+ drvdata->base + TRCSSCSRn(i));
+ writel_relaxed(state->trcsspcicr[i],
+ drvdata->base + TRCSSPCICRn(i));
+ }
+
+ for (i = 0; i < drvdata->nr_addr_cmp * 2; i++) {
+ writel_relaxed(state->trcacvr[i],
+ drvdata->base + TRCACVRn(i));
+ writel_relaxed(state->trcacatr[i],
+ drvdata->base + TRCACATRn(i));
+ }
+
+ for (i = 0; i < drvdata->numcidc; i++)
+ writel_relaxed(state->trccidcvr[i],
+ drvdata->base + TRCCIDCVRn(i));
+
+ for (i = 0; i < drvdata->numvmidc; i++)
+ writel_relaxed(state->trcvmidcvr[i],
+ drvdata->base + TRCVMIDCVRn(i));
+
+ writel_relaxed(state->trccidcctlr0, drvdata->base + TRCCIDCCTLR0);
+ writel_relaxed(state->trccidcctlr1, drvdata->base + TRCCIDCCTLR1);
+
+ writel_relaxed(state->trcvmidcctlr0, drvdata->base + TRCVMIDCCTLR0);
+ writel_relaxed(state->trcvmidcctlr0, drvdata->base + TRCVMIDCCTLR1);
+
+ writel_relaxed(state->trcclaimset, drvdata->base + TRCCLAIMSET);
+
+ writel_relaxed(state->trcpdcr, drvdata->base + TRCPDCR);
+
+ drvdata->state_needs_restore = false;
+
+ /*
+ * As recommended by section 4.3.7 ("Synchronization when using the
+ * memory-mapped interface") of ARM IHI 0064D
+ */
+ dsb(sy);
+ isb();
+
+ /* Unlock the OS lock to re-enable trace and external debug access */
+ etm4_os_unlock(drvdata);
+ CS_LOCK(drvdata->base);
+}
+
+static int etm4_cpu_pm_notify(struct notifier_block *nb, unsigned long cmd,
+ void *v)
+{
+ struct etmv4_drvdata *drvdata;
+ unsigned int cpu = smp_processor_id();
+
+ if (!etmdrvdata[cpu])
+ return NOTIFY_OK;
+
+ drvdata = etmdrvdata[cpu];
+
+ if (!drvdata->save_state)
+ return NOTIFY_OK;
+
+ if (WARN_ON_ONCE(drvdata->cpu != cpu))
+ return NOTIFY_BAD;
+
+ switch (cmd) {
+ case CPU_PM_ENTER:
+ /* save the state if self-hosted coresight is in use */
+ if (local_read(&drvdata->mode))
+ if (etm4_cpu_save(drvdata))
+ return NOTIFY_BAD;
+ break;
+ case CPU_PM_EXIT:
+ /* fallthrough */
+ case CPU_PM_ENTER_FAILED:
+ if (drvdata->state_needs_restore)
+ etm4_cpu_restore(drvdata);
+ break;
+ default:
+ return NOTIFY_DONE;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block etm4_cpu_pm_nb = {
+ .notifier_call = etm4_cpu_pm_notify,
+};
+
+static int etm4_cpu_pm_register(void)
+{
+ return cpu_pm_register_notifier(&etm4_cpu_pm_nb);
+}
+
+static void etm4_cpu_pm_unregister(void)
+{
+ cpu_pm_unregister_notifier(&etm4_cpu_pm_nb);
+}
+#else
+static int etm4_cpu_pm_register(void) { return 0; }
+static void etm4_cpu_pm_unregister(void) { }
+#endif
+
static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
{
int ret;
@@ -1101,6 +1430,17 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
dev_set_drvdata(dev, drvdata);
+ if (pm_save_enable == PARAM_PM_SAVE_FIRMWARE)
+ pm_save_enable = coresight_loses_context_with_cpu(dev) ?
+ PARAM_PM_SAVE_SELF_HOSTED : PARAM_PM_SAVE_NEVER;
+
+ if (pm_save_enable != PARAM_PM_SAVE_NEVER) {
+ drvdata->save_state = devm_kmalloc(dev,
+ sizeof(struct etmv4_save_state), GFP_KERNEL);
+ if (!drvdata->save_state)
+ return -ENOMEM;
+ }
+
/* Validity for the resource is already checked by the AMBA core */
base = devm_ioremap_resource(dev, res);
if (IS_ERR(base))
@@ -1135,6 +1475,10 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
if (ret < 0)
goto err_arch_supported;
hp_online = ret;
+
+ ret = etm4_cpu_pm_register();
+ if (ret)
+ goto err_arch_supported;
}
cpus_read_unlock();
@@ -1185,6 +1529,8 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
err_arch_supported:
if (--etm4_count == 0) {
+ etm4_cpu_pm_unregister();
+
cpuhp_remove_state_nocalls(CPUHP_AP_ARM_CORESIGHT_STARTING);
if (hp_online)
cpuhp_remove_state_nocalls(hp_online);
@@ -1211,6 +1557,7 @@ static const struct amba_id etm4_ids[] = {
CS_AMBA_UCI_ID(0x000f0211, uci_id_etm4),/* Qualcomm Kryo */
CS_AMBA_ID(0x000bb802), /* Qualcomm Kryo 385 Cortex-A55 */
CS_AMBA_ID(0x000bb803), /* Qualcomm Kryo 385 Cortex-A75 */
+ CS_AMBA_UCI_ID(0x000cc0af, uci_id_etm4),/* Marvell ThunderX2 */
{},
};
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.h b/drivers/hwtracing/coresight/coresight-etm4x.h
index 4523f10ddd0f..4a695bf90582 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.h
+++ b/drivers/hwtracing/coresight/coresight-etm4x.h
@@ -175,22 +175,28 @@
ETM_MODE_EXCL_USER)
#define TRCSTATR_IDLE_BIT 0
+#define TRCSTATR_PMSTABLE_BIT 1
#define ETM_DEFAULT_ADDR_COMP 0
/* PowerDown Control Register bits */
#define TRCPDCR_PU BIT(3)
-/* secure state access levels */
+/* secure state access levels - TRCACATRn */
#define ETM_EXLEVEL_S_APP BIT(8)
#define ETM_EXLEVEL_S_OS BIT(9)
-#define ETM_EXLEVEL_S_NA BIT(10)
-#define ETM_EXLEVEL_S_HYP BIT(11)
-/* non-secure state access levels */
+#define ETM_EXLEVEL_S_HYP BIT(10)
+#define ETM_EXLEVEL_S_MON BIT(11)
+/* non-secure state access levels - TRCACATRn */
#define ETM_EXLEVEL_NS_APP BIT(12)
#define ETM_EXLEVEL_NS_OS BIT(13)
#define ETM_EXLEVEL_NS_HYP BIT(14)
#define ETM_EXLEVEL_NS_NA BIT(15)
+/* secure / non secure masks - TRCVICTLR, IDR3 */
+#define ETM_EXLEVEL_S_VICTLR_MASK GENMASK(19, 16)
+/* NS MON (EL3) mode never implemented */
+#define ETM_EXLEVEL_NS_VICTLR_MASK GENMASK(22, 20)
+
/**
* struct etmv4_config - configuration information related to an ETMv4
* @mode: Controls various modes supported by this ETM.
@@ -221,6 +227,7 @@
* @cntr_val: Sets or returns the value for a counter.
* @res_idx: Resource index selector.
* @res_ctrl: Controls the selection of the resources in the trace unit.
+ * @ss_idx: Single-shot index selector.
* @ss_ctrl: Controls the corresponding single-shot comparator resource.
* @ss_status: The status of the corresponding single-shot comparator.
* @ss_pe_cmp: Selects the PE comparator inputs for Single-shot control.
@@ -237,6 +244,7 @@
* @vmid_mask0: VM ID comparator mask for comparator 0-3.
* @vmid_mask1: VM ID comparator mask for comparator 4-7.
* @ext_inp: External input selection.
+ * @arch: ETM architecture version (for arch dependent config).
*/
struct etmv4_config {
u32 mode;
@@ -263,6 +271,7 @@ struct etmv4_config {
u32 cntr_val[ETMv4_MAX_CNTR];
u8 res_idx;
u32 res_ctrl[ETM_MAX_RES_SEL];
+ u8 ss_idx;
u32 ss_ctrl[ETM_MAX_SS_CMP];
u32 ss_status[ETM_MAX_SS_CMP];
u32 ss_pe_cmp[ETM_MAX_SS_CMP];
@@ -279,6 +288,66 @@ struct etmv4_config {
u32 vmid_mask0;
u32 vmid_mask1;
u32 ext_inp;
+ u8 arch;
+};
+
+/**
+ * struct etm4_save_state - state to be preserved when ETM is without power
+ */
+struct etmv4_save_state {
+ u32 trcprgctlr;
+ u32 trcprocselr;
+ u32 trcconfigr;
+ u32 trcauxctlr;
+ u32 trceventctl0r;
+ u32 trceventctl1r;
+ u32 trcstallctlr;
+ u32 trctsctlr;
+ u32 trcsyncpr;
+ u32 trcccctlr;
+ u32 trcbbctlr;
+ u32 trctraceidr;
+ u32 trcqctlr;
+
+ u32 trcvictlr;
+ u32 trcviiectlr;
+ u32 trcvissctlr;
+ u32 trcvipcssctlr;
+ u32 trcvdctlr;
+ u32 trcvdsacctlr;
+ u32 trcvdarcctlr;
+
+ u32 trcseqevr[ETM_MAX_SEQ_STATES];
+ u32 trcseqrstevr;
+ u32 trcseqstr;
+ u32 trcextinselr;
+ u32 trccntrldvr[ETMv4_MAX_CNTR];
+ u32 trccntctlr[ETMv4_MAX_CNTR];
+ u32 trccntvr[ETMv4_MAX_CNTR];
+
+ u32 trcrsctlr[ETM_MAX_RES_SEL * 2];
+
+ u32 trcssccr[ETM_MAX_SS_CMP];
+ u32 trcsscsr[ETM_MAX_SS_CMP];
+ u32 trcsspcicr[ETM_MAX_SS_CMP];
+
+ u64 trcacvr[ETM_MAX_SINGLE_ADDR_CMP];
+ u64 trcacatr[ETM_MAX_SINGLE_ADDR_CMP];
+ u64 trccidcvr[ETMv4_MAX_CTXID_CMP];
+ u32 trcvmidcvr[ETM_MAX_VMID_CMP];
+ u32 trccidcctlr0;
+ u32 trccidcctlr1;
+ u32 trcvmidcctlr0;
+ u32 trcvmidcctlr1;
+
+ u32 trcclaimset;
+
+ u32 cntr_val[ETMv4_MAX_CNTR];
+ u32 seq_state;
+ u32 vinst_ctrl;
+ u32 ss_status[ETM_MAX_SS_CMP];
+
+ u32 trcpdcr;
};
/**
@@ -336,6 +405,8 @@ struct etmv4_config {
* @atbtrig: If the implementation can support ATB triggers
* @lpoverride: If the implementation can support low-power state over.
* @config: structure holding configuration parameters.
+ * @save_state: State to be preserved across power loss
+ * @state_needs_restore: True when there is context to restore after PM exit
*/
struct etmv4_drvdata {
void __iomem *base;
@@ -381,6 +452,8 @@ struct etmv4_drvdata {
bool atbtrig;
bool lpoverride;
struct etmv4_config config;
+ struct etmv4_save_state *save_state;
+ bool state_needs_restore;
};
/* Address comparator access types */
diff --git a/drivers/hwtracing/coresight/coresight-funnel.c b/drivers/hwtracing/coresight/coresight-funnel.c
index 05f7896c3a01..900690a9f7f0 100644
--- a/drivers/hwtracing/coresight/coresight-funnel.c
+++ b/drivers/hwtracing/coresight/coresight-funnel.c
@@ -38,12 +38,14 @@ DEFINE_CORESIGHT_DEVLIST(funnel_devs, "funnel");
* @atclk: optional clock for the core parts of the funnel.
* @csdev: component vitals needed by the framework.
* @priority: port selection order.
+ * @spinlock: serialize enable/disable operations.
*/
struct funnel_drvdata {
void __iomem *base;
struct clk *atclk;
struct coresight_device *csdev;
unsigned long priority;
+ spinlock_t spinlock;
};
static int dynamic_funnel_enable_hw(struct funnel_drvdata *drvdata, int port)
@@ -76,11 +78,21 @@ static int funnel_enable(struct coresight_device *csdev, int inport,
{
int rc = 0;
struct funnel_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
-
- if (drvdata->base)
- rc = dynamic_funnel_enable_hw(drvdata, inport);
-
+ unsigned long flags;
+ bool first_enable = false;
+
+ spin_lock_irqsave(&drvdata->spinlock, flags);
+ if (atomic_read(&csdev->refcnt[inport]) == 0) {
+ if (drvdata->base)
+ rc = dynamic_funnel_enable_hw(drvdata, inport);
+ if (!rc)
+ first_enable = true;
+ }
if (!rc)
+ atomic_inc(&csdev->refcnt[inport]);
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
+
+ if (first_enable)
dev_dbg(&csdev->dev, "FUNNEL inport %d enabled\n", inport);
return rc;
}
@@ -107,11 +119,19 @@ static void funnel_disable(struct coresight_device *csdev, int inport,
int outport)
{
struct funnel_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+ unsigned long flags;
+ bool last_disable = false;
+
+ spin_lock_irqsave(&drvdata->spinlock, flags);
+ if (atomic_dec_return(&csdev->refcnt[inport]) == 0) {
+ if (drvdata->base)
+ dynamic_funnel_disable_hw(drvdata, inport);
+ last_disable = true;
+ }
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
- if (drvdata->base)
- dynamic_funnel_disable_hw(drvdata, inport);
-
- dev_dbg(&csdev->dev, "FUNNEL inport %d disabled\n", inport);
+ if (last_disable)
+ dev_dbg(&csdev->dev, "FUNNEL inport %d disabled\n", inport);
}
static const struct coresight_ops_link funnel_link_ops = {
@@ -233,6 +253,7 @@ static int funnel_probe(struct device *dev, struct resource *res)
}
dev->platform_data = pdata;
+ spin_lock_init(&drvdata->spinlock);
desc.type = CORESIGHT_DEV_TYPE_LINK;
desc.subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_MERG;
desc.ops = &funnel_cs_ops;
diff --git a/drivers/hwtracing/coresight/coresight-replicator.c b/drivers/hwtracing/coresight/coresight-replicator.c
index b29ba640eb25..e7dc1c31d20d 100644
--- a/drivers/hwtracing/coresight/coresight-replicator.c
+++ b/drivers/hwtracing/coresight/coresight-replicator.c
@@ -31,11 +31,13 @@ DEFINE_CORESIGHT_DEVLIST(replicator_devs, "replicator");
* whether this one is programmable or not.
* @atclk: optional clock for the core parts of the replicator.
* @csdev: component vitals needed by the framework
+ * @spinlock: serialize enable/disable operations.
*/
struct replicator_drvdata {
void __iomem *base;
struct clk *atclk;
struct coresight_device *csdev;
+ spinlock_t spinlock;
};
static void dynamic_replicator_reset(struct replicator_drvdata *drvdata)
@@ -97,10 +99,22 @@ static int replicator_enable(struct coresight_device *csdev, int inport,
{
int rc = 0;
struct replicator_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
-
- if (drvdata->base)
- rc = dynamic_replicator_enable(drvdata, inport, outport);
+ unsigned long flags;
+ bool first_enable = false;
+
+ spin_lock_irqsave(&drvdata->spinlock, flags);
+ if (atomic_read(&csdev->refcnt[outport]) == 0) {
+ if (drvdata->base)
+ rc = dynamic_replicator_enable(drvdata, inport,
+ outport);
+ if (!rc)
+ first_enable = true;
+ }
if (!rc)
+ atomic_inc(&csdev->refcnt[outport]);
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
+
+ if (first_enable)
dev_dbg(&csdev->dev, "REPLICATOR enabled\n");
return rc;
}
@@ -137,10 +151,19 @@ static void replicator_disable(struct coresight_device *csdev, int inport,
int outport)
{
struct replicator_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+ unsigned long flags;
+ bool last_disable = false;
+
+ spin_lock_irqsave(&drvdata->spinlock, flags);
+ if (atomic_dec_return(&csdev->refcnt[outport]) == 0) {
+ if (drvdata->base)
+ dynamic_replicator_disable(drvdata, inport, outport);
+ last_disable = true;
+ }
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
- if (drvdata->base)
- dynamic_replicator_disable(drvdata, inport, outport);
- dev_dbg(&csdev->dev, "REPLICATOR disabled\n");
+ if (last_disable)
+ dev_dbg(&csdev->dev, "REPLICATOR disabled\n");
}
static const struct coresight_ops_link replicator_link_ops = {
@@ -225,6 +248,7 @@ static int replicator_probe(struct device *dev, struct resource *res)
}
dev->platform_data = pdata;
+ spin_lock_init(&drvdata->spinlock);
desc.type = CORESIGHT_DEV_TYPE_LINK;
desc.subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_SPLIT;
desc.ops = &replicator_cs_ops;
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etf.c b/drivers/hwtracing/coresight/coresight-tmc-etf.c
index 807416b75ecc..d0cc3985b72a 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etf.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etf.c
@@ -334,9 +334,10 @@ static int tmc_disable_etf_sink(struct coresight_device *csdev)
static int tmc_enable_etf_link(struct coresight_device *csdev,
int inport, int outport)
{
- int ret;
+ int ret = 0;
unsigned long flags;
struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+ bool first_enable = false;
spin_lock_irqsave(&drvdata->spinlock, flags);
if (drvdata->reading) {
@@ -344,12 +345,18 @@ static int tmc_enable_etf_link(struct coresight_device *csdev,
return -EBUSY;
}
- ret = tmc_etf_enable_hw(drvdata);
+ if (atomic_read(&csdev->refcnt[0]) == 0) {
+ ret = tmc_etf_enable_hw(drvdata);
+ if (!ret) {
+ drvdata->mode = CS_MODE_SYSFS;
+ first_enable = true;
+ }
+ }
if (!ret)
- drvdata->mode = CS_MODE_SYSFS;
+ atomic_inc(&csdev->refcnt[0]);
spin_unlock_irqrestore(&drvdata->spinlock, flags);
- if (!ret)
+ if (first_enable)
dev_dbg(&csdev->dev, "TMC-ETF enabled\n");
return ret;
}
@@ -359,6 +366,7 @@ static void tmc_disable_etf_link(struct coresight_device *csdev,
{
unsigned long flags;
struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+ bool last_disable = false;
spin_lock_irqsave(&drvdata->spinlock, flags);
if (drvdata->reading) {
@@ -366,11 +374,15 @@ static void tmc_disable_etf_link(struct coresight_device *csdev,
return;
}
- tmc_etf_disable_hw(drvdata);
- drvdata->mode = CS_MODE_DISABLED;
+ if (atomic_dec_return(&csdev->refcnt[0]) == 0) {
+ tmc_etf_disable_hw(drvdata);
+ drvdata->mode = CS_MODE_DISABLED;
+ last_disable = true;
+ }
spin_unlock_irqrestore(&drvdata->spinlock, flags);
- dev_dbg(&csdev->dev, "TMC-ETF disabled\n");
+ if (last_disable)
+ dev_dbg(&csdev->dev, "TMC-ETF disabled\n");
}
static void *tmc_alloc_etf_buffer(struct coresight_device *csdev,
diff --git a/drivers/hwtracing/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c
index 6453c67a4d01..ef20f74c85fa 100644
--- a/drivers/hwtracing/coresight/coresight.c
+++ b/drivers/hwtracing/coresight/coresight.c
@@ -253,9 +253,9 @@ static int coresight_enable_link(struct coresight_device *csdev,
struct coresight_device *parent,
struct coresight_device *child)
{
- int ret;
+ int ret = 0;
int link_subtype;
- int refport, inport, outport;
+ int inport, outport;
if (!parent || !child)
return -EINVAL;
@@ -264,29 +264,17 @@ static int coresight_enable_link(struct coresight_device *csdev,
outport = coresight_find_link_outport(csdev, child);
link_subtype = csdev->subtype.link_subtype;
- if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_MERG)
- refport = inport;
- else if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_SPLIT)
- refport = outport;
- else
- refport = 0;
-
- if (refport < 0)
- return refport;
-
- if (atomic_inc_return(&csdev->refcnt[refport]) == 1) {
- if (link_ops(csdev)->enable) {
- ret = link_ops(csdev)->enable(csdev, inport, outport);
- if (ret) {
- atomic_dec(&csdev->refcnt[refport]);
- return ret;
- }
- }
- }
+ if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_MERG && inport < 0)
+ return inport;
+ if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_SPLIT && outport < 0)
+ return outport;
- csdev->enable = true;
+ if (link_ops(csdev)->enable)
+ ret = link_ops(csdev)->enable(csdev, inport, outport);
+ if (!ret)
+ csdev->enable = true;
- return 0;
+ return ret;
}
static void coresight_disable_link(struct coresight_device *csdev,
@@ -295,7 +283,7 @@ static void coresight_disable_link(struct coresight_device *csdev,
{
int i, nr_conns;
int link_subtype;
- int refport, inport, outport;
+ int inport, outport;
if (!parent || !child)
return;
@@ -305,20 +293,15 @@ static void coresight_disable_link(struct coresight_device *csdev,
link_subtype = csdev->subtype.link_subtype;
if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_MERG) {
- refport = inport;
nr_conns = csdev->pdata->nr_inport;
} else if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_SPLIT) {
- refport = outport;
nr_conns = csdev->pdata->nr_outport;
} else {
- refport = 0;
nr_conns = 1;
}
- if (atomic_dec_return(&csdev->refcnt[refport]) == 0) {
- if (link_ops(csdev)->disable)
- link_ops(csdev)->disable(csdev, inport, outport);
- }
+ if (link_ops(csdev)->disable)
+ link_ops(csdev)->disable(csdev, inport, outport);
for (i = 0; i < nr_conns; i++)
if (atomic_read(&csdev->refcnt[i]) != 0)
@@ -1308,6 +1291,12 @@ static inline int coresight_search_device_idx(struct coresight_dev_list *dict,
return -ENOENT;
}
+bool coresight_loses_context_with_cpu(struct device *dev)
+{
+ return fwnode_property_present(dev_fwnode(dev),
+ "arm,coresight-loses-context-with-cpu");
+}
+
/*
* coresight_alloc_device_name - Get an index for a given device in the
* device index list specific to a driver. An index is allocated for a
diff --git a/drivers/hwtracing/intel_th/core.c b/drivers/hwtracing/intel_th/core.c
index d5c1821b31c6..0dfd97bbde9e 100644
--- a/drivers/hwtracing/intel_th/core.c
+++ b/drivers/hwtracing/intel_th/core.c
@@ -649,10 +649,8 @@ intel_th_subdevice_alloc(struct intel_th *th,
}
err = intel_th_device_add_resources(thdev, res, subdev->nres);
- if (err) {
- put_device(&thdev->dev);
+ if (err)
goto fail_put_device;
- }
if (subdev->type == INTEL_TH_OUTPUT) {
if (subdev->mknode)
@@ -667,10 +665,8 @@ intel_th_subdevice_alloc(struct intel_th *th,
}
err = device_add(&thdev->dev);
- if (err) {
- put_device(&thdev->dev);
+ if (err)
goto fail_free_res;
- }
/* need switch driver to be loaded to enumerate the rest */
if (subdev->type == INTEL_TH_SWITCH && !req) {
diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c
index 03ca5b1bef9f..ebf3e30e989a 100644
--- a/drivers/hwtracing/intel_th/pci.c
+++ b/drivers/hwtracing/intel_th/pci.c
@@ -210,6 +210,16 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
.driver_data = (kernel_ulong_t)&intel_th_2x,
},
{
+ /* Ice Lake CPU */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x8a29),
+ .driver_data = (kernel_ulong_t)&intel_th_2x,
+ },
+ {
+ /* Tiger Lake CPU */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x9a33),
+ .driver_data = (kernel_ulong_t)&intel_th_2x,
+ },
+ {
/* Tiger Lake PCH */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa0a6),
.driver_data = (kernel_ulong_t)&intel_th_2x,
diff --git a/drivers/hwtracing/stm/core.c b/drivers/hwtracing/stm/core.c
index 603b83ac5085..2712e699ba08 100644
--- a/drivers/hwtracing/stm/core.c
+++ b/drivers/hwtracing/stm/core.c
@@ -832,23 +832,13 @@ stm_char_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
return err;
}
-#ifdef CONFIG_COMPAT
-static long
-stm_char_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
- return stm_char_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
-}
-#else
-#define stm_char_compat_ioctl NULL
-#endif
-
static const struct file_operations stm_fops = {
.open = stm_char_open,
.release = stm_char_release,
.write = stm_char_write,
.mmap = stm_char_mmap,
.unlocked_ioctl = stm_char_ioctl,
- .compat_ioctl = stm_char_compat_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.llseek = no_llseek,
};
diff --git a/drivers/hwtracing/stm/policy.c b/drivers/hwtracing/stm/policy.c
index 4b9e44b227d8..4f932a419752 100644
--- a/drivers/hwtracing/stm/policy.c
+++ b/drivers/hwtracing/stm/policy.c
@@ -345,7 +345,11 @@ void stp_policy_unbind(struct stp_policy *policy)
stm->policy = NULL;
policy->stm = NULL;
+ /*
+ * Drop the reference on the protocol driver and lose the link.
+ */
stm_put_protocol(stm->pdrv);
+ stm->pdrv = NULL;
stm_put_device(stm);
}
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 146ce40d8e0a..6a0aa76859f3 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -145,6 +145,7 @@ config I2C_I801
Comet Lake (PCH)
Elkhart Lake (PCH)
Tiger Lake (PCH)
+ Jasper Lake (SOC)
This driver can also be built as a module. If so, the module
will be called i2c-i801.
@@ -292,7 +293,7 @@ config I2C_VIA
select I2C_ALGOBIT
help
If you say yes to this option, support will be included for the VIA
- 82C586B I2C interface
+ 82C586B I2C interface
This driver can also be built as a module. If so, the module
will be called i2c-via.
@@ -677,11 +678,11 @@ config I2C_IMX_LPI2C
tristate "IMX Low Power I2C interface"
depends on ARCH_MXC || COMPILE_TEST
help
- Say Y here if you want to use the Low Power IIC bus controller
- on the Freescale i.MX processors.
+ Say Y here if you want to use the Low Power IIC bus controller
+ on the Freescale i.MX processors.
- This driver can also be built as a module. If so, the module
- will be called i2c-imx-lpi2c.
+ This driver can also be built as a module. If so, the module
+ will be called i2c-imx-lpi2c.
config I2C_IOP3XX
tristate "Intel IOPx3xx and IXP4xx on-chip I2C interface"
@@ -874,6 +875,7 @@ config I2C_PXA_PCI
config I2C_PXA_SLAVE
bool "Intel PXA2XX I2C Slave comms support"
depends on I2C_PXA && !X86_32
+ select I2C_SLAVE
help
Support I2C slave mode communications on the PXA I2C bus. This
is necessary for systems where the PXA may be a target on the
@@ -1182,9 +1184,9 @@ config I2C_DIOLAN_U2C
will be called i2c-diolan-u2c.
config I2C_DLN2
- tristate "Diolan DLN-2 USB I2C adapter"
- depends on MFD_DLN2
- help
+ tristate "Diolan DLN-2 USB I2C adapter"
+ depends on MFD_DLN2
+ help
If you say yes to this option, support will be included for Diolan
DLN2, a USB to I2C interface.
@@ -1283,9 +1285,9 @@ config I2C_VIPERBOARD
help
Say yes here to access the I2C part of the Nano River
Technologies Viperboard as I2C master.
- See viperboard API specification and Nano
- River Tech's viperboard.h for detailed meaning
- of the module parameters.
+ See viperboard API specification and Nano
+ River Tech's viperboard.h for detailed meaning
+ of the module parameters.
comment "Other I2C/SMBus bus drivers"
diff --git a/drivers/i2c/busses/i2c-aspeed.c b/drivers/i2c/busses/i2c-aspeed.c
index 7b098ff5f5dd..a7be6f24450b 100644
--- a/drivers/i2c/busses/i2c-aspeed.c
+++ b/drivers/i2c/busses/i2c-aspeed.c
@@ -952,6 +952,10 @@ static const struct of_device_id aspeed_i2c_bus_of_table[] = {
.compatible = "aspeed,ast2500-i2c-bus",
.data = aspeed_i2c_25xx_get_clk_reg_val,
},
+ {
+ .compatible = "aspeed,ast2600-i2c-bus",
+ .data = aspeed_i2c_25xx_get_clk_reg_val,
+ },
{ },
};
MODULE_DEVICE_TABLE(of, aspeed_i2c_bus_of_table);
diff --git a/drivers/i2c/busses/i2c-at91-core.c b/drivers/i2c/busses/i2c-at91-core.c
index 435c7d7377a3..e13af4874976 100644
--- a/drivers/i2c/busses/i2c-at91-core.c
+++ b/drivers/i2c/busses/i2c-at91-core.c
@@ -68,6 +68,9 @@ static struct at91_twi_pdata at91rm9200_config = {
.has_unre_flag = true,
.has_alt_cmd = false,
.has_hold_field = false,
+ .has_dig_filtr = false,
+ .has_adv_dig_filtr = false,
+ .has_ana_filtr = false,
};
static struct at91_twi_pdata at91sam9261_config = {
@@ -76,6 +79,9 @@ static struct at91_twi_pdata at91sam9261_config = {
.has_unre_flag = false,
.has_alt_cmd = false,
.has_hold_field = false,
+ .has_dig_filtr = false,
+ .has_adv_dig_filtr = false,
+ .has_ana_filtr = false,
};
static struct at91_twi_pdata at91sam9260_config = {
@@ -84,6 +90,9 @@ static struct at91_twi_pdata at91sam9260_config = {
.has_unre_flag = false,
.has_alt_cmd = false,
.has_hold_field = false,
+ .has_dig_filtr = false,
+ .has_adv_dig_filtr = false,
+ .has_ana_filtr = false,
};
static struct at91_twi_pdata at91sam9g20_config = {
@@ -92,6 +101,9 @@ static struct at91_twi_pdata at91sam9g20_config = {
.has_unre_flag = false,
.has_alt_cmd = false,
.has_hold_field = false,
+ .has_dig_filtr = false,
+ .has_adv_dig_filtr = false,
+ .has_ana_filtr = false,
};
static struct at91_twi_pdata at91sam9g10_config = {
@@ -100,6 +112,9 @@ static struct at91_twi_pdata at91sam9g10_config = {
.has_unre_flag = false,
.has_alt_cmd = false,
.has_hold_field = false,
+ .has_dig_filtr = false,
+ .has_adv_dig_filtr = false,
+ .has_ana_filtr = false,
};
static const struct platform_device_id at91_twi_devtypes[] = {
@@ -130,6 +145,9 @@ static struct at91_twi_pdata at91sam9x5_config = {
.has_unre_flag = false,
.has_alt_cmd = false,
.has_hold_field = false,
+ .has_dig_filtr = false,
+ .has_adv_dig_filtr = false,
+ .has_ana_filtr = false,
};
static struct at91_twi_pdata sama5d4_config = {
@@ -138,6 +156,9 @@ static struct at91_twi_pdata sama5d4_config = {
.has_unre_flag = false,
.has_alt_cmd = false,
.has_hold_field = true,
+ .has_dig_filtr = true,
+ .has_adv_dig_filtr = false,
+ .has_ana_filtr = false,
};
static struct at91_twi_pdata sama5d2_config = {
@@ -146,6 +167,20 @@ static struct at91_twi_pdata sama5d2_config = {
.has_unre_flag = true,
.has_alt_cmd = true,
.has_hold_field = true,
+ .has_dig_filtr = true,
+ .has_adv_dig_filtr = true,
+ .has_ana_filtr = true,
+};
+
+static struct at91_twi_pdata sam9x60_config = {
+ .clk_max_div = 7,
+ .clk_offset = 4,
+ .has_unre_flag = true,
+ .has_alt_cmd = true,
+ .has_hold_field = true,
+ .has_dig_filtr = true,
+ .has_adv_dig_filtr = true,
+ .has_ana_filtr = true,
};
static const struct of_device_id atmel_twi_dt_ids[] = {
@@ -174,6 +209,9 @@ static const struct of_device_id atmel_twi_dt_ids[] = {
.compatible = "atmel,sama5d2-i2c",
.data = &sama5d2_config,
}, {
+ .compatible = "microchip,sam9x60-i2c",
+ .data = &sam9x60_config,
+ }, {
/* sentinel */
}
};
diff --git a/drivers/i2c/busses/i2c-at91-master.c b/drivers/i2c/busses/i2c-at91-master.c
index a3fcc35ffd3b..7a862e00b475 100644
--- a/drivers/i2c/busses/i2c-at91-master.c
+++ b/drivers/i2c/busses/i2c-at91-master.c
@@ -31,12 +31,32 @@
void at91_init_twi_bus_master(struct at91_twi_dev *dev)
{
+ struct at91_twi_pdata *pdata = dev->pdata;
+ u32 filtr = 0;
+
/* FIFO should be enabled immediately after the software reset */
if (dev->fifo_size)
at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_FIFOEN);
at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_MSEN);
at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_SVDIS);
at91_twi_write(dev, AT91_TWI_CWGR, dev->twi_cwgr_reg);
+
+ /* enable digital filter */
+ if (pdata->has_dig_filtr && dev->enable_dig_filt)
+ filtr |= AT91_TWI_FILTR_FILT;
+
+ /* enable advanced digital filter */
+ if (pdata->has_adv_dig_filtr && dev->enable_dig_filt)
+ filtr |= AT91_TWI_FILTR_FILT |
+ (AT91_TWI_FILTR_THRES(dev->filter_width) &
+ AT91_TWI_FILTR_THRES_MASK);
+
+ /* enable analog filter */
+ if (pdata->has_ana_filtr && dev->enable_ana_filt)
+ filtr |= AT91_TWI_FILTR_PADFEN;
+
+ if (filtr)
+ at91_twi_write(dev, AT91_TWI_FILTR, filtr);
}
/*
@@ -45,7 +65,7 @@ void at91_init_twi_bus_master(struct at91_twi_dev *dev)
*/
static void at91_calc_twi_clock(struct at91_twi_dev *dev)
{
- int ckdiv, cdiv, div, hold = 0;
+ int ckdiv, cdiv, div, hold = 0, filter_width = 0;
struct at91_twi_pdata *pdata = dev->pdata;
int offset = pdata->clk_offset;
int max_ckdiv = pdata->clk_max_div;
@@ -84,11 +104,29 @@ static void at91_calc_twi_clock(struct at91_twi_dev *dev)
}
}
+ if (pdata->has_adv_dig_filtr) {
+ /*
+ * filter width = 0 to AT91_TWI_FILTR_THRES_MAX
+ * peripheral clocks
+ */
+ filter_width = DIV_ROUND_UP(t->digital_filter_width_ns
+ * (clk_get_rate(dev->clk) / 1000), 1000000);
+ if (filter_width > AT91_TWI_FILTR_THRES_MAX) {
+ dev_warn(dev->dev,
+ "Filter threshold set to its maximum value (%d instead of %d)\n",
+ AT91_TWI_FILTR_THRES_MAX, filter_width);
+ filter_width = AT91_TWI_FILTR_THRES_MAX;
+ }
+ }
+
dev->twi_cwgr_reg = (ckdiv << 16) | (cdiv << 8) | cdiv
| AT91_TWI_CWGR_HOLD(hold);
- dev_dbg(dev->dev, "cdiv %d ckdiv %d hold %d (%d ns)\n",
- cdiv, ckdiv, hold, t->sda_hold_ns);
+ dev->filter_width = filter_width;
+
+ dev_dbg(dev->dev, "cdiv %d ckdiv %d hold %d (%d ns), filter_width %d (%d ns)\n",
+ cdiv, ckdiv, hold, t->sda_hold_ns, filter_width,
+ t->digital_filter_width_ns);
}
static void at91_twi_dma_cleanup(struct at91_twi_dev *dev)
@@ -720,14 +758,14 @@ static int at91_twi_configure_dma(struct at91_twi_dev *dev, u32 phy_addr)
slave_config.dst_maxburst = 1;
slave_config.device_fc = false;
- dma->chan_tx = dma_request_slave_channel_reason(dev->dev, "tx");
+ dma->chan_tx = dma_request_chan(dev->dev, "tx");
if (IS_ERR(dma->chan_tx)) {
ret = PTR_ERR(dma->chan_tx);
dma->chan_tx = NULL;
goto error;
}
- dma->chan_rx = dma_request_slave_channel_reason(dev->dev, "rx");
+ dma->chan_rx = dma_request_chan(dev->dev, "rx");
if (IS_ERR(dma->chan_rx)) {
ret = PTR_ERR(dma->chan_rx);
dma->chan_rx = NULL;
@@ -793,6 +831,11 @@ int at91_twi_probe_master(struct platform_device *pdev,
dev_info(dev->dev, "Using FIFO (%u data)\n", dev->fifo_size);
}
+ dev->enable_dig_filt = of_property_read_bool(pdev->dev.of_node,
+ "i2c-digital-filter");
+
+ dev->enable_ana_filt = of_property_read_bool(pdev->dev.of_node,
+ "i2c-analog-filter");
at91_calc_twi_clock(dev);
dev->adapter.algo = &at91_twi_algorithm;
diff --git a/drivers/i2c/busses/i2c-at91.h b/drivers/i2c/busses/i2c-at91.h
index 499b506f6128..977a67bc0f88 100644
--- a/drivers/i2c/busses/i2c-at91.h
+++ b/drivers/i2c/busses/i2c-at91.h
@@ -84,6 +84,13 @@
#define AT91_TWI_ACR_DATAL(len) ((len) & 0xff)
#define AT91_TWI_ACR_DIR BIT(8)
+#define AT91_TWI_FILTR 0x0044
+#define AT91_TWI_FILTR_FILT BIT(0)
+#define AT91_TWI_FILTR_PADFEN BIT(1)
+#define AT91_TWI_FILTR_THRES(v) ((v) << 8)
+#define AT91_TWI_FILTR_THRES_MAX 7
+#define AT91_TWI_FILTR_THRES_MASK GENMASK(10, 8)
+
#define AT91_TWI_FMR 0x0050 /* FIFO Mode Register */
#define AT91_TWI_FMR_TXRDYM(mode) (((mode) & 0x3) << 0)
#define AT91_TWI_FMR_TXRDYM_MASK (0x3 << 0)
@@ -108,6 +115,9 @@ struct at91_twi_pdata {
bool has_unre_flag;
bool has_alt_cmd;
bool has_hold_field;
+ bool has_dig_filtr;
+ bool has_adv_dig_filtr;
+ bool has_ana_filtr;
struct at_dma_slave dma_slave;
};
@@ -145,6 +155,9 @@ struct at91_twi_dev {
unsigned smr;
struct i2c_client *slave;
#endif
+ bool enable_dig_filt;
+ bool enable_ana_filt;
+ u32 filter_width;
};
unsigned at91_twi_read(struct at91_twi_dev *dev, unsigned reg);
diff --git a/drivers/i2c/busses/i2c-bcm-iproc.c b/drivers/i2c/busses/i2c-bcm-iproc.c
index 9ffdffaf6141..30efb7913b2e 100644
--- a/drivers/i2c/busses/i2c-bcm-iproc.c
+++ b/drivers/i2c/busses/i2c-bcm-iproc.c
@@ -81,6 +81,7 @@
#define M_CMD_PROTOCOL_MASK 0xf
#define M_CMD_PROTOCOL_BLK_WR 0x7
#define M_CMD_PROTOCOL_BLK_RD 0x8
+#define M_CMD_PROTOCOL_PROCESS 0xa
#define M_CMD_PEC_SHIFT 8
#define M_CMD_RD_CNT_SHIFT 0
#define M_CMD_RD_CNT_MASK 0xff
@@ -675,13 +676,20 @@ static int bcm_iproc_i2c_xfer_wait(struct bcm_iproc_i2c_dev *iproc_i2c,
return 0;
}
-static int bcm_iproc_i2c_xfer_single_msg(struct bcm_iproc_i2c_dev *iproc_i2c,
- struct i2c_msg *msg)
+/*
+ * If 'process_call' is true, then this is a multi-msg transfer that requires
+ * a repeated start between the messages.
+ * More specifically, it must be a write (reg) followed by a read (data).
+ * The i2c quirks are set to enforce this rule.
+ */
+static int bcm_iproc_i2c_xfer_internal(struct bcm_iproc_i2c_dev *iproc_i2c,
+ struct i2c_msg *msgs, bool process_call)
{
int i;
u8 addr;
u32 val, tmp, val_intr_en;
unsigned int tx_bytes;
+ struct i2c_msg *msg = &msgs[0];
/* check if bus is busy */
if (!!(iproc_i2c_rd_reg(iproc_i2c,
@@ -707,14 +715,29 @@ static int bcm_iproc_i2c_xfer_single_msg(struct bcm_iproc_i2c_dev *iproc_i2c,
val = msg->buf[i];
/* mark the last byte */
- if (i == msg->len - 1)
- val |= BIT(M_TX_WR_STATUS_SHIFT);
+ if (!process_call && (i == msg->len - 1))
+ val |= 1 << M_TX_WR_STATUS_SHIFT;
iproc_i2c_wr_reg(iproc_i2c, M_TX_OFFSET, val);
}
iproc_i2c->tx_bytes = tx_bytes;
}
+ /* Process the read message if this is process call */
+ if (process_call) {
+ msg++;
+ iproc_i2c->msg = msg; /* point to second msg */
+
+ /*
+ * The last byte to be sent out should be a slave
+ * address with read operation
+ */
+ addr = i2c_8bit_addr_from_msg(msg);
+ /* mark it the last byte out */
+ val = addr | (1 << M_TX_WR_STATUS_SHIFT);
+ iproc_i2c_wr_reg(iproc_i2c, M_TX_OFFSET, val);
+ }
+
/* mark as incomplete before starting the transaction */
if (iproc_i2c->irq)
reinit_completion(&iproc_i2c->done);
@@ -733,7 +756,7 @@ static int bcm_iproc_i2c_xfer_single_msg(struct bcm_iproc_i2c_dev *iproc_i2c,
* underrun interrupt, which will be triggerred when the TX FIFO is
* empty. When that happens we can then pump more data into the FIFO
*/
- if (!(msg->flags & I2C_M_RD) &&
+ if (!process_call && !(msg->flags & I2C_M_RD) &&
msg->len > iproc_i2c->tx_bytes)
val_intr_en |= BIT(IE_M_TX_UNDERRUN_SHIFT);
@@ -743,6 +766,8 @@ static int bcm_iproc_i2c_xfer_single_msg(struct bcm_iproc_i2c_dev *iproc_i2c,
*/
val = BIT(M_CMD_START_BUSY_SHIFT);
if (msg->flags & I2C_M_RD) {
+ u32 protocol;
+
iproc_i2c->rx_bytes = 0;
if (msg->len > M_RX_FIFO_MAX_THLD_VALUE)
iproc_i2c->thld_bytes = M_RX_FIFO_THLD_VALUE;
@@ -758,7 +783,10 @@ static int bcm_iproc_i2c_xfer_single_msg(struct bcm_iproc_i2c_dev *iproc_i2c,
/* enable the RX threshold interrupt */
val_intr_en |= BIT(IE_M_RX_THLD_SHIFT);
- val |= (M_CMD_PROTOCOL_BLK_RD << M_CMD_PROTOCOL_SHIFT) |
+ protocol = process_call ?
+ M_CMD_PROTOCOL_PROCESS : M_CMD_PROTOCOL_BLK_RD;
+
+ val |= (protocol << M_CMD_PROTOCOL_SHIFT) |
(msg->len << M_CMD_RD_CNT_SHIFT);
} else {
val |= (M_CMD_PROTOCOL_BLK_WR << M_CMD_PROTOCOL_SHIFT);
@@ -774,17 +802,24 @@ static int bcm_iproc_i2c_xfer(struct i2c_adapter *adapter,
struct i2c_msg msgs[], int num)
{
struct bcm_iproc_i2c_dev *iproc_i2c = i2c_get_adapdata(adapter);
- int ret, i;
+ bool process_call = false;
+ int ret;
- /* go through all messages */
- for (i = 0; i < num; i++) {
- ret = bcm_iproc_i2c_xfer_single_msg(iproc_i2c, &msgs[i]);
- if (ret) {
- dev_dbg(iproc_i2c->device, "xfer failed\n");
- return ret;
+ if (num == 2) {
+ /* Repeated start, use process call */
+ process_call = true;
+ if (msgs[1].flags & I2C_M_NOSTART) {
+ dev_err(iproc_i2c->device, "Invalid repeated start\n");
+ return -EOPNOTSUPP;
}
}
+ ret = bcm_iproc_i2c_xfer_internal(iproc_i2c, msgs, process_call);
+ if (ret) {
+ dev_dbg(iproc_i2c->device, "xfer failed\n");
+ return ret;
+ }
+
return num;
}
@@ -809,6 +844,8 @@ static struct i2c_algorithm bcm_iproc_algo = {
};
static const struct i2c_adapter_quirks bcm_iproc_i2c_quirks = {
+ .flags = I2C_AQ_COMB_WRITE_THEN_READ,
+ .max_comb_1st_msg_len = M_TX_RX_FIFO_SIZE,
.max_read_len = M_RX_MAX_READ_LEN,
};
diff --git a/drivers/i2c/busses/i2c-cros-ec-tunnel.c b/drivers/i2c/busses/i2c-cros-ec-tunnel.c
index c551aa96a2e3..958161c71985 100644
--- a/drivers/i2c/busses/i2c-cros-ec-tunnel.c
+++ b/drivers/i2c/busses/i2c-cros-ec-tunnel.c
@@ -3,6 +3,7 @@
//
// Copyright (C) 2013 Google, Inc.
+#include <linux/acpi.h>
#include <linux/module.h>
#include <linux/i2c.h>
#include <linux/platform_data/cros_ec_commands.h>
@@ -240,7 +241,6 @@ static const struct i2c_algorithm ec_i2c_algorithm = {
static int ec_i2c_probe(struct platform_device *pdev)
{
- struct device_node *np = pdev->dev.of_node;
struct cros_ec_device *ec = dev_get_drvdata(pdev->dev.parent);
struct device *dev = &pdev->dev;
struct ec_i2c_device *bus = NULL;
@@ -256,7 +256,7 @@ static int ec_i2c_probe(struct platform_device *pdev)
if (bus == NULL)
return -ENOMEM;
- err = of_property_read_u32(np, "google,remote-bus", &remote_bus);
+ err = device_property_read_u32(dev, "google,remote-bus", &remote_bus);
if (err) {
dev_err(dev, "Couldn't read remote-bus property\n");
return err;
@@ -271,7 +271,7 @@ static int ec_i2c_probe(struct platform_device *pdev)
bus->adap.algo = &ec_i2c_algorithm;
bus->adap.algo_data = bus;
bus->adap.dev.parent = &pdev->dev;
- bus->adap.dev.of_node = np;
+ bus->adap.dev.of_node = pdev->dev.of_node;
bus->adap.retries = I2C_MAX_RETRIES;
err = i2c_add_adapter(&bus->adap);
@@ -291,19 +291,24 @@ static int ec_i2c_remove(struct platform_device *dev)
return 0;
}
-#ifdef CONFIG_OF
static const struct of_device_id cros_ec_i2c_of_match[] = {
{ .compatible = "google,cros-ec-i2c-tunnel" },
{},
};
MODULE_DEVICE_TABLE(of, cros_ec_i2c_of_match);
-#endif
+
+static const struct acpi_device_id cros_ec_i2c_tunnel_acpi_id[] = {
+ { "GOOG001A", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, cros_ec_i2c_tunnel_acpi_id);
static struct platform_driver ec_i2c_tunnel_driver = {
.probe = ec_i2c_probe,
.remove = ec_i2c_remove,
.driver = {
.name = "cros-ec-i2c-tunnel",
+ .acpi_match_table = ACPI_PTR(cros_ec_i2c_tunnel_acpi_id),
.of_match_table = of_match_ptr(cros_ec_i2c_of_match),
},
};
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index f1c714acc280..f5e69fe56532 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -64,8 +64,10 @@
* Cedar Fork (PCH) 0x18df 32 hard yes yes yes
* Ice Lake-LP (PCH) 0x34a3 32 hard yes yes yes
* Comet Lake (PCH) 0x02a3 32 hard yes yes yes
+ * Comet Lake-H (PCH) 0x06a3 32 hard yes yes yes
* Elkhart Lake (PCH) 0x4b23 32 hard yes yes yes
* Tiger Lake-LP (PCH) 0xa0a3 32 hard yes yes yes
+ * Jasper Lake (SOC) 0x4da3 32 hard yes yes yes
*
* Features supported by this driver:
* Software PEC no
@@ -205,6 +207,7 @@
/* Older devices have their ID defined in <linux/pci_ids.h> */
#define PCI_DEVICE_ID_INTEL_COMETLAKE_SMBUS 0x02a3
+#define PCI_DEVICE_ID_INTEL_COMETLAKE_H_SMBUS 0x06a3
#define PCI_DEVICE_ID_INTEL_BAYTRAIL_SMBUS 0x0f12
#define PCI_DEVICE_ID_INTEL_CDF_SMBUS 0x18df
#define PCI_DEVICE_ID_INTEL_DNV_SMBUS 0x19df
@@ -223,6 +226,7 @@
#define PCI_DEVICE_ID_INTEL_ICELAKE_LP_SMBUS 0x34a3
#define PCI_DEVICE_ID_INTEL_5_3400_SERIES_SMBUS 0x3b30
#define PCI_DEVICE_ID_INTEL_ELKHART_LAKE_SMBUS 0x4b23
+#define PCI_DEVICE_ID_INTEL_JASPER_LAKE_SMBUS 0x4da3
#define PCI_DEVICE_ID_INTEL_BROXTON_SMBUS 0x5ad4
#define PCI_DEVICE_ID_INTEL_LYNXPOINT_SMBUS 0x8c22
#define PCI_DEVICE_ID_INTEL_WILDCATPOINT_SMBUS 0x8ca2
@@ -1069,8 +1073,10 @@ static const struct pci_device_id i801_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CANNONLAKE_LP_SMBUS) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICELAKE_LP_SMBUS) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_COMETLAKE_SMBUS) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_COMETLAKE_H_SMBUS) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ELKHART_LAKE_SMBUS) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TIGERLAKE_LP_SMBUS) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_JASPER_LAKE_SMBUS) },
{ 0, }
};
@@ -1750,8 +1756,10 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
case PCI_DEVICE_ID_INTEL_CDF_SMBUS:
case PCI_DEVICE_ID_INTEL_ICELAKE_LP_SMBUS:
case PCI_DEVICE_ID_INTEL_COMETLAKE_SMBUS:
+ case PCI_DEVICE_ID_INTEL_COMETLAKE_H_SMBUS:
case PCI_DEVICE_ID_INTEL_ELKHART_LAKE_SMBUS:
case PCI_DEVICE_ID_INTEL_TIGERLAKE_LP_SMBUS:
+ case PCI_DEVICE_ID_INTEL_JASPER_LAKE_SMBUS:
priv->features |= FEATURE_BLOCK_PROC;
priv->features |= FEATURE_I2C_BLOCK_READ;
priv->features |= FEATURE_IRQ;
diff --git a/drivers/i2c/busses/i2c-icy.c b/drivers/i2c/busses/i2c-icy.c
index 8382eb64b424..271470f4d8a9 100644
--- a/drivers/i2c/busses/i2c-icy.c
+++ b/drivers/i2c/busses/i2c-icy.c
@@ -122,7 +122,6 @@ static int icy_probe(struct zorro_dev *z,
struct fwnode_handle *new_fwnode;
struct i2c_board_info ltc2990_info = {
.type = "ltc2990",
- .addr = 0x4c,
};
i2c = devm_kzalloc(&z->dev, sizeof(*i2c), GFP_KERNEL);
@@ -188,10 +187,10 @@ static int icy_probe(struct zorro_dev *z,
ltc2990_info.fwnode = new_fwnode;
i2c->ltc2990_client =
- i2c_new_probed_device(&i2c->adapter,
- &ltc2990_info,
- icy_ltc2990_addresses,
- NULL);
+ i2c_new_scanned_device(&i2c->adapter,
+ &ltc2990_info,
+ icy_ltc2990_addresses,
+ NULL);
}
return 0;
diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c
index 2c3c3d6935c0..466e4f681d7a 100644
--- a/drivers/i2c/busses/i2c-pxa.c
+++ b/drivers/i2c/busses/i2c-pxa.c
@@ -25,7 +25,6 @@
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
-#include <linux/i2c-pxa.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
@@ -180,7 +179,7 @@ struct pxa_i2c {
struct i2c_adapter adap;
struct clk *clk;
#ifdef CONFIG_I2C_PXA_SLAVE
- struct i2c_slave_client *slave;
+ struct i2c_client *slave;
#endif
unsigned int irqlogidx;
@@ -544,22 +543,23 @@ static void i2c_pxa_slave_txempty(struct pxa_i2c *i2c, u32 isr)
if (isr & ISR_BED) {
/* what should we do here? */
} else {
- int ret = 0;
+ u8 byte = 0;
if (i2c->slave != NULL)
- ret = i2c->slave->read(i2c->slave->data);
+ i2c_slave_event(i2c->slave, I2C_SLAVE_READ_PROCESSED,
+ &byte);
- writel(ret, _IDBR(i2c));
+ writel(byte, _IDBR(i2c));
writel(readl(_ICR(i2c)) | ICR_TB, _ICR(i2c)); /* allow next byte */
}
}
static void i2c_pxa_slave_rxfull(struct pxa_i2c *i2c, u32 isr)
{
- unsigned int byte = readl(_IDBR(i2c));
+ u8 byte = readl(_IDBR(i2c));
if (i2c->slave != NULL)
- i2c->slave->write(i2c->slave->data, byte);
+ i2c_slave_event(i2c->slave, I2C_SLAVE_WRITE_RECEIVED, &byte);
writel(readl(_ICR(i2c)) | ICR_TB, _ICR(i2c));
}
@@ -572,9 +572,18 @@ static void i2c_pxa_slave_start(struct pxa_i2c *i2c, u32 isr)
dev_dbg(&i2c->adap.dev, "SAD, mode is slave-%cx\n",
(isr & ISR_RWM) ? 'r' : 't');
- if (i2c->slave != NULL)
- i2c->slave->event(i2c->slave->data,
- (isr & ISR_RWM) ? I2C_SLAVE_EVENT_START_READ : I2C_SLAVE_EVENT_START_WRITE);
+ if (i2c->slave != NULL) {
+ if (isr & ISR_RWM) {
+ u8 byte = 0;
+
+ i2c_slave_event(i2c->slave, I2C_SLAVE_READ_REQUESTED,
+ &byte);
+ writel(byte, _IDBR(i2c));
+ } else {
+ i2c_slave_event(i2c->slave, I2C_SLAVE_WRITE_REQUESTED,
+ NULL);
+ }
+ }
/*
* slave could interrupt in the middle of us generating a
@@ -607,7 +616,7 @@ static void i2c_pxa_slave_stop(struct pxa_i2c *i2c)
dev_dbg(&i2c->adap.dev, "ISR: SSD (Slave Stop)\n");
if (i2c->slave != NULL)
- i2c->slave->event(i2c->slave->data, I2C_SLAVE_EVENT_STOP);
+ i2c_slave_event(i2c->slave, I2C_SLAVE_STOP, NULL);
if (i2c_debug > 2)
dev_dbg(&i2c->adap.dev, "ISR: SSD (Slave Stop) acked\n");
@@ -619,6 +628,38 @@ static void i2c_pxa_slave_stop(struct pxa_i2c *i2c)
if (i2c->msg)
i2c_pxa_master_complete(i2c, I2C_RETRY);
}
+
+static int i2c_pxa_slave_reg(struct i2c_client *slave)
+{
+ struct pxa_i2c *i2c = slave->adapter->algo_data;
+
+ if (i2c->slave)
+ return -EBUSY;
+
+ if (!i2c->reg_isar)
+ return -EAFNOSUPPORT;
+
+ i2c->slave = slave;
+ i2c->slave_addr = slave->addr;
+
+ writel(i2c->slave_addr, _ISAR(i2c));
+
+ return 0;
+}
+
+static int i2c_pxa_slave_unreg(struct i2c_client *slave)
+{
+ struct pxa_i2c *i2c = slave->adapter->algo_data;
+
+ WARN_ON(!i2c->slave);
+
+ i2c->slave_addr = I2C_PXA_SLAVE_ADDR;
+ writel(i2c->slave_addr, _ISAR(i2c));
+
+ i2c->slave = NULL;
+
+ return 0;
+}
#else
static void i2c_pxa_slave_txempty(struct pxa_i2c *i2c, u32 isr)
{
@@ -1141,11 +1182,19 @@ static u32 i2c_pxa_functionality(struct i2c_adapter *adap)
static const struct i2c_algorithm i2c_pxa_algorithm = {
.master_xfer = i2c_pxa_xfer,
.functionality = i2c_pxa_functionality,
+#ifdef CONFIG_I2C_PXA_SLAVE
+ .reg_slave = i2c_pxa_slave_reg,
+ .unreg_slave = i2c_pxa_slave_unreg,
+#endif
};
static const struct i2c_algorithm i2c_pxa_pio_algorithm = {
.master_xfer = i2c_pxa_pio_xfer,
.functionality = i2c_pxa_functionality,
+#ifdef CONFIG_I2C_PXA_SLAVE
+ .reg_slave = i2c_pxa_slave_reg,
+ .unreg_slave = i2c_pxa_slave_unreg,
+#endif
};
static const struct of_device_id i2c_pxa_dt_ids[] = {
@@ -1270,10 +1319,6 @@ static int i2c_pxa_probe(struct platform_device *dev)
i2c->highmode_enter = false;
if (plat) {
-#ifdef CONFIG_I2C_PXA_SLAVE
- i2c->slave_addr = plat->slave_addr;
- i2c->slave = plat->slave;
-#endif
i2c->adap.class = plat->class;
}
diff --git a/drivers/i2c/busses/i2c-qup.c b/drivers/i2c/busses/i2c-qup.c
index e09cd0775ae9..2d7dabe12723 100644
--- a/drivers/i2c/busses/i2c-qup.c
+++ b/drivers/i2c/busses/i2c-qup.c
@@ -628,7 +628,7 @@ static int qup_i2c_req_dma(struct qup_i2c_dev *qup)
int err;
if (!qup->btx.dma) {
- qup->btx.dma = dma_request_slave_channel_reason(qup->dev, "tx");
+ qup->btx.dma = dma_request_chan(qup->dev, "tx");
if (IS_ERR(qup->btx.dma)) {
err = PTR_ERR(qup->btx.dma);
qup->btx.dma = NULL;
@@ -638,7 +638,7 @@ static int qup_i2c_req_dma(struct qup_i2c_dev *qup)
}
if (!qup->brx.dma) {
- qup->brx.dma = dma_request_slave_channel_reason(qup->dev, "rx");
+ qup->brx.dma = dma_request_chan(qup->dev, "rx");
if (IS_ERR(qup->brx.dma)) {
dev_err(qup->dev, "\n rx channel not available");
err = PTR_ERR(qup->brx.dma);
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
index 531c01100b56..879f0e61a496 100644
--- a/drivers/i2c/busses/i2c-rcar.c
+++ b/drivers/i2c/busses/i2c-rcar.c
@@ -317,7 +317,7 @@ static int rcar_i2c_clock_calculate(struct rcar_i2c_priv *priv, struct i2c_timin
scgd_find:
dev_dbg(dev, "clk %d/%d(%lu), round %u, CDF:0x%x, SCGD: 0x%x\n",
- scl, t->bus_freq_hz, clk_get_rate(priv->clk), round, cdf, scgd);
+ scl, t->bus_freq_hz, rate, round, cdf, scgd);
/* keep icccr value */
priv->icccr = scgd << cdf_width | cdf;
diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c
index 8777af4c695e..82b3b795e0bd 100644
--- a/drivers/i2c/busses/i2c-sh_mobile.c
+++ b/drivers/i2c/busses/i2c-sh_mobile.c
@@ -486,7 +486,7 @@ static struct dma_chan *sh_mobile_i2c_request_dma_chan(struct device *dev,
char *chan_name = dir == DMA_MEM_TO_DEV ? "tx" : "rx";
int ret;
- chan = dma_request_slave_channel_reason(dev, chan_name);
+ chan = dma_request_chan(dev, chan_name);
if (IS_ERR(chan)) {
dev_dbg(dev, "request_channel failed for %s (%ld)\n", chan_name,
PTR_ERR(chan));
diff --git a/drivers/i2c/busses/i2c-stm32.c b/drivers/i2c/busses/i2c-stm32.c
index 07d5dfce68d4..1da347e6a358 100644
--- a/drivers/i2c/busses/i2c-stm32.c
+++ b/drivers/i2c/busses/i2c-stm32.c
@@ -20,13 +20,13 @@ struct stm32_i2c_dma *stm32_i2c_dma_request(struct device *dev,
dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL);
if (!dma)
- return NULL;
+ return ERR_PTR(-ENOMEM);
/* Request and configure I2C TX dma channel */
- dma->chan_tx = dma_request_slave_channel(dev, "tx");
- if (!dma->chan_tx) {
+ dma->chan_tx = dma_request_chan(dev, "tx");
+ if (IS_ERR(dma->chan_tx)) {
dev_dbg(dev, "can't request DMA tx channel\n");
- ret = -EINVAL;
+ ret = PTR_ERR(dma->chan_tx);
goto fail_al;
}
@@ -42,10 +42,10 @@ struct stm32_i2c_dma *stm32_i2c_dma_request(struct device *dev,
}
/* Request and configure I2C RX dma channel */
- dma->chan_rx = dma_request_slave_channel(dev, "rx");
- if (!dma->chan_rx) {
+ dma->chan_rx = dma_request_chan(dev, "rx");
+ if (IS_ERR(dma->chan_rx)) {
dev_err(dev, "can't request DMA rx channel\n");
- ret = -EINVAL;
+ ret = PTR_ERR(dma->chan_rx);
goto fail_tx;
}
@@ -75,7 +75,7 @@ fail_al:
devm_kfree(dev, dma);
dev_info(dev, "can't use DMA\n");
- return NULL;
+ return ERR_PTR(ret);
}
void stm32_i2c_dma_free(struct stm32_i2c_dma *dma)
diff --git a/drivers/i2c/busses/i2c-stm32f7.c b/drivers/i2c/busses/i2c-stm32f7.c
index b24e7b937f21..b2634afe066d 100644
--- a/drivers/i2c/busses/i2c-stm32f7.c
+++ b/drivers/i2c/busses/i2c-stm32f7.c
@@ -1267,8 +1267,8 @@ static int stm32f7_i2c_get_free_slave_id(struct stm32f7_i2c_dev *i2c_dev,
* slave[0] supports 7-bit and 10-bit slave address
* slave[1] supports 7-bit slave address only
*/
- for (i = 0; i < STM32F7_I2C_MAX_SLAVE; i++) {
- if (i == 1 && (slave->flags & I2C_CLIENT_PEC))
+ for (i = STM32F7_I2C_MAX_SLAVE - 1; i >= 0; i--) {
+ if (i == 1 && (slave->flags & I2C_CLIENT_TEN))
continue;
if (!i2c_dev->slave[i]) {
*id = i;
@@ -1955,6 +1955,15 @@ static int stm32f7_i2c_probe(struct platform_device *pdev)
i2c_dev->dma = stm32_i2c_dma_request(i2c_dev->dev, phy_addr,
STM32F7_I2C_TXDR,
STM32F7_I2C_RXDR);
+ if (PTR_ERR(i2c_dev->dma) == -ENODEV)
+ i2c_dev->dma = NULL;
+ else if (IS_ERR(i2c_dev->dma)) {
+ ret = PTR_ERR(i2c_dev->dma);
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev,
+ "Failed to request dma error %i\n", ret);
+ goto clk_free;
+ }
platform_set_drvdata(pdev, i2c_dev);
@@ -1985,6 +1994,11 @@ pm_disable:
pm_runtime_set_suspended(i2c_dev->dev);
pm_runtime_dont_use_autosuspend(i2c_dev->dev);
+ if (i2c_dev->dma) {
+ stm32_i2c_dma_free(i2c_dev->dma);
+ i2c_dev->dma = NULL;
+ }
+
clk_free:
clk_disable_unprepare(i2c_dev->clk);
@@ -1995,21 +2009,21 @@ static int stm32f7_i2c_remove(struct platform_device *pdev)
{
struct stm32f7_i2c_dev *i2c_dev = platform_get_drvdata(pdev);
- if (i2c_dev->dma) {
- stm32_i2c_dma_free(i2c_dev->dma);
- i2c_dev->dma = NULL;
- }
-
i2c_del_adapter(&i2c_dev->adap);
pm_runtime_get_sync(i2c_dev->dev);
- clk_disable_unprepare(i2c_dev->clk);
-
pm_runtime_put_noidle(i2c_dev->dev);
pm_runtime_disable(i2c_dev->dev);
pm_runtime_set_suspended(i2c_dev->dev);
pm_runtime_dont_use_autosuspend(i2c_dev->dev);
+ if (i2c_dev->dma) {
+ stm32_i2c_dma_free(i2c_dev->dma);
+ i2c_dev->dma = NULL;
+ }
+
+ clk_disable_unprepare(i2c_dev->clk);
+
return 0;
}
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index c1683f9338b4..a98bf31d0e5c 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -413,7 +413,7 @@ static int tegra_i2c_init_dma(struct tegra_i2c_dev *i2c_dev)
return 0;
}
- chan = dma_request_slave_channel_reason(i2c_dev->dev, "rx");
+ chan = dma_request_chan(i2c_dev->dev, "rx");
if (IS_ERR(chan)) {
err = PTR_ERR(chan);
goto err_out;
@@ -421,7 +421,7 @@ static int tegra_i2c_init_dma(struct tegra_i2c_dev *i2c_dev)
i2c_dev->rx_dma_chan = chan;
- chan = dma_request_slave_channel_reason(i2c_dev->dev, "tx");
+ chan = dma_request_chan(i2c_dev->dev, "tx");
if (IS_ERR(chan)) {
err = PTR_ERR(chan);
goto err_out;
diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c
index 37b3b9307d07..d8d49f1814c7 100644
--- a/drivers/i2c/busses/i2c-xiic.c
+++ b/drivers/i2c/busses/i2c-xiic.c
@@ -46,6 +46,7 @@ enum xiic_endian {
/**
* struct xiic_i2c - Internal representation of the XIIC I2C bus
+ * @dev: Pointer to device structure
* @base: Memory base of the HW registers
* @wait: Wait queue for callers
* @adap: Kernel adapter representation
@@ -57,6 +58,7 @@ enum xiic_endian {
* @rx_msg: Current RX message
* @rx_pos: Position within current RX message
* @endianness: big/little-endian byte order
+ * @clk: Pointer to AXI4-lite input clock
*/
struct xiic_i2c {
struct device *dev;
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
index 5f6a4985f2bc..9333c865d4a9 100644
--- a/drivers/i2c/i2c-core-base.c
+++ b/drivers/i2c/i2c-core-base.c
@@ -1656,6 +1656,12 @@ void i2c_parse_fw_timings(struct device *dev, struct i2c_timings *t, bool use_de
t->sda_fall_ns = t->scl_fall_ns;
device_property_read_u32(dev, "i2c-sda-hold-time-ns", &t->sda_hold_ns);
+
+ device_property_read_u32(dev, "i2c-digital-filter-width-ns",
+ &t->digital_filter_width_ns);
+
+ device_property_read_u32(dev, "i2c-analog-filter-cutoff-frequency",
+ &t->analog_filter_cutoff_freq_hz);
}
EXPORT_SYMBOL_GPL(i2c_parse_fw_timings);
@@ -1737,38 +1743,6 @@ EXPORT_SYMBOL(i2c_del_driver);
/* ------------------------------------------------------------------------- */
-/**
- * i2c_use_client - increments the reference count of the i2c client structure
- * @client: the client being referenced
- *
- * Each live reference to a client should be refcounted. The driver model does
- * that automatically as part of driver binding, so that most drivers don't
- * need to do this explicitly: they hold a reference until they're unbound
- * from the device.
- *
- * A pointer to the client with the incremented reference counter is returned.
- */
-struct i2c_client *i2c_use_client(struct i2c_client *client)
-{
- if (client && get_device(&client->dev))
- return client;
- return NULL;
-}
-EXPORT_SYMBOL(i2c_use_client);
-
-/**
- * i2c_release_client - release a use of the i2c client structure
- * @client: the client being no longer referenced
- *
- * Must be called when a user of a client is finished with it.
- */
-void i2c_release_client(struct i2c_client *client)
-{
- if (client)
- put_device(&client->dev);
-}
-EXPORT_SYMBOL(i2c_release_client);
-
struct i2c_cmd_arg {
unsigned cmd;
void *arg;
@@ -2271,10 +2245,10 @@ int i2c_probe_func_quick_read(struct i2c_adapter *adap, unsigned short addr)
EXPORT_SYMBOL_GPL(i2c_probe_func_quick_read);
struct i2c_client *
-i2c_new_probed_device(struct i2c_adapter *adap,
- struct i2c_board_info *info,
- unsigned short const *addr_list,
- int (*probe)(struct i2c_adapter *adap, unsigned short addr))
+i2c_new_scanned_device(struct i2c_adapter *adap,
+ struct i2c_board_info *info,
+ unsigned short const *addr_list,
+ int (*probe)(struct i2c_adapter *adap, unsigned short addr))
{
int i;
@@ -2304,11 +2278,24 @@ i2c_new_probed_device(struct i2c_adapter *adap,
if (addr_list[i] == I2C_CLIENT_END) {
dev_dbg(&adap->dev, "Probing failed, no device found\n");
- return NULL;
+ return ERR_PTR(-ENODEV);
}
info->addr = addr_list[i];
- return i2c_new_device(adap, info);
+ return i2c_new_client_device(adap, info);
+}
+EXPORT_SYMBOL_GPL(i2c_new_scanned_device);
+
+struct i2c_client *
+i2c_new_probed_device(struct i2c_adapter *adap,
+ struct i2c_board_info *info,
+ unsigned short const *addr_list,
+ int (*probe)(struct i2c_adapter *adap, unsigned short addr))
+{
+ struct i2c_client *client;
+
+ client = i2c_new_scanned_device(adap, info, addr_list, probe);
+ return IS_ERR(client) ? NULL : client;
}
EXPORT_SYMBOL_GPL(i2c_new_probed_device);
diff --git a/drivers/i2c/i2c-core-of.c b/drivers/i2c/i2c-core-of.c
index 7eb41990bd6d..e4d296b40baa 100644
--- a/drivers/i2c/i2c-core-of.c
+++ b/drivers/i2c/i2c-core-of.c
@@ -50,6 +50,7 @@ int of_i2c_get_board_info(struct device *dev, struct device_node *node,
info->addr = addr;
info->of_node = node;
+ info->fwnode = of_fwnode_handle(node);
if (of_property_read_bool(node, "host-notify"))
info->flags |= I2C_CLIENT_HOST_NOTIFY;
diff --git a/drivers/i2c/i2c-smbus.c b/drivers/i2c/i2c-smbus.c
index 03096f47e6ab..7e2f5d0eacdb 100644
--- a/drivers/i2c/i2c-smbus.c
+++ b/drivers/i2c/i2c-smbus.c
@@ -66,7 +66,6 @@ static irqreturn_t smbus_alert(int irq, void *d)
{
struct i2c_smbus_alert *alert = d;
struct i2c_client *ara;
- unsigned short prev_addr = 0; /* Not a valid address */
ara = alert->ara;
@@ -90,18 +89,12 @@ static irqreturn_t smbus_alert(int irq, void *d)
data.addr = status >> 1;
data.type = I2C_PROTOCOL_SMBUS_ALERT;
- if (data.addr == prev_addr) {
- dev_warn(&ara->dev, "Duplicate SMBALERT# from dev "
- "0x%02x, skipping\n", data.addr);
- break;
- }
dev_dbg(&ara->dev, "SMBALERT# from dev 0x%02x, flag %d\n",
data.addr, data.data);
/* Notify driver for the device which issued the alert */
device_for_each_child(&ara->adapter->dev, &data,
smbus_do_alert);
- prev_addr = data.addr;
}
return IRQ_HANDLED;
diff --git a/drivers/i2c/muxes/Kconfig b/drivers/i2c/muxes/Kconfig
index c6040aa839ac..1708b1a82da2 100644
--- a/drivers/i2c/muxes/Kconfig
+++ b/drivers/i2c/muxes/Kconfig
@@ -109,14 +109,14 @@ config I2C_DEMUX_PINCTRL
want to change the I2C master at run-time depending on features.
config I2C_MUX_MLXCPLD
- tristate "Mellanox CPLD based I2C multiplexer"
- help
- If you say yes to this option, support will be included for a
- CPLD based I2C multiplexer. This driver provides access to
- I2C busses connected through a MUX, which is controlled
- by a CPLD register.
-
- This driver can also be built as a module. If so, the module
- will be called i2c-mux-mlxcpld.
+ tristate "Mellanox CPLD based I2C multiplexer"
+ help
+ If you say yes to this option, support will be included for a
+ CPLD based I2C multiplexer. This driver provides access to
+ I2C busses connected through a MUX, which is controlled
+ by a CPLD register.
+
+ This driver can also be built as a module. If so, the module
+ will be called i2c-mux-mlxcpld.
endmenu
diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c
index db1a65f4b490..3e7482695f77 100644
--- a/drivers/ide/ide-tape.c
+++ b/drivers/ide/ide-tape.c
@@ -19,6 +19,7 @@
#define IDETAPE_VERSION "1.20"
+#include <linux/compat.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/string.h>
@@ -1407,14 +1408,10 @@ static long do_idetape_chrdev_ioctl(struct file *file,
if (tape->drv_write_prot)
mtget.mt_gstat |= GMT_WR_PROT(0xffffffff);
- if (copy_to_user(argp, &mtget, sizeof(struct mtget)))
- return -EFAULT;
- return 0;
+ return put_user_mtget(argp, &mtget);
case MTIOCPOS:
mtpos.mt_blkno = position / tape->user_bs_factor - block_offset;
- if (copy_to_user(argp, &mtpos, sizeof(struct mtpos)))
- return -EFAULT;
- return 0;
+ return put_user_mtpos(argp, &mtpos);
default:
if (tape->chrdev_dir == IDETAPE_DIR_READ)
ide_tape_discard_merge_buffer(drive, 1);
@@ -1432,6 +1429,22 @@ static long idetape_chrdev_ioctl(struct file *file,
return ret;
}
+static long idetape_chrdev_compat_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ long ret;
+
+ if (cmd == MTIOCPOS32)
+ cmd = MTIOCPOS;
+ else if (cmd == MTIOCGET32)
+ cmd = MTIOCGET;
+
+ mutex_lock(&ide_tape_mutex);
+ ret = do_idetape_chrdev_ioctl(file, cmd, arg);
+ mutex_unlock(&ide_tape_mutex);
+ return ret;
+}
+
/*
* Do a mode sense page 0 with block descriptor and if it succeeds set the tape
* block size with the reported value.
@@ -1886,6 +1899,8 @@ static const struct file_operations idetape_fops = {
.read = idetape_chrdev_read,
.write = idetape_chrdev_write,
.unlocked_ioctl = idetape_chrdev_ioctl,
+ .compat_ioctl = IS_ENABLED(CONFIG_COMPAT) ?
+ idetape_chrdev_compat_ioctl : NULL,
.open = idetape_chrdev_open,
.release = idetape_chrdev_release,
.llseek = noop_llseek,
diff --git a/drivers/iio/accel/cros_ec_accel_legacy.c b/drivers/iio/accel/cros_ec_accel_legacy.c
index fcc3f999e482..65f85faf6f31 100644
--- a/drivers/iio/accel/cros_ec_accel_legacy.c
+++ b/drivers/iio/accel/cros_ec_accel_legacy.c
@@ -163,16 +163,10 @@ static const struct iio_chan_spec cros_ec_accel_legacy_channels[] = {
static int cros_ec_accel_legacy_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct cros_ec_dev *ec = dev_get_drvdata(dev->parent);
struct iio_dev *indio_dev;
struct cros_ec_sensors_core_state *state;
int ret;
- if (!ec || !ec->ec_dev) {
- dev_warn(&pdev->dev, "No EC device found.\n");
- return -EINVAL;
- }
-
indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*state));
if (!indio_dev)
return -ENOMEM;
diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c
index 2e37f8a6d8cf..7b837641f166 100644
--- a/drivers/iio/accel/st_accel_core.c
+++ b/drivers/iio/accel/st_accel_core.c
@@ -15,7 +15,6 @@
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/i2c.h>
-#include <linux/gpio.h>
#include <linux/irq.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index f0af3a42f53c..5d8540b7b427 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -6,6 +6,16 @@
menu "Analog to digital converters"
+config AB8500_GPADC
+ bool "ST-Ericsson AB8500 GPADC driver"
+ depends on AB8500_CORE && REGULATOR_AB8500
+ default y
+ help
+ AB8500 Analog Baseband, mixed signal integrated circuit GPADC
+ (General Purpose Analog to Digital Converter) driver used to monitor
+ internal voltages, convert accessory and battery, AC (charger, mains)
+ and USB voltages integral to the U8500 platform.
+
config AD_SIGMA_DELTA
tristate
select IIO_BUFFER
@@ -45,6 +55,16 @@ config AD7291
To compile this driver as a module, choose M here: the
module will be called ad7291.
+config AD7292
+ tristate "Analog Devices AD7292 ADC driver"
+ depends on SPI
+ help
+ Say yes here to build support for Analog Devices AD7292
+ 8 Channel ADC with temperature sensor.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ad7292.
+
config AD7298
tristate "Analog Devices AD7298 ADC driver"
depends on SPI
@@ -432,6 +452,17 @@ config INGENIC_ADC
This driver can also be built as a module. If so, the module will be
called ingenic_adc.
+config INTEL_MRFLD_ADC
+ tristate "Intel Merrifield Basin Cove ADC driver"
+ depends on INTEL_SOC_PMIC_MRFLD
+ help
+ Say yes here to have support for Basin Cove power management IC (PMIC) ADC
+ device. Depending on platform configuration, this general purpose ADC can
+ be used for sampling sensors such as thermal resistors.
+
+ To compile this driver as a module, choose M here: the module will be
+ called intel_mrfld_adc.
+
config IMX7D_ADC
tristate "Freescale IMX7D ADC driver"
depends on ARCH_MXC || COMPILE_TEST
@@ -508,8 +539,8 @@ config MAX1027
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
help
- Say yes here to build support for Maxim SPI ADC models
- max1027, max1029 and max1031.
+ Say yes here to build support for Maxim SPI {10,12}-bit ADC models:
+ max1027, max1029, max1031, max1227, max1229 and max1231.
To compile this driver as a module, choose M here: the module will be
called max1027.
diff --git a/drivers/iio/adc/Makefile b/drivers/iio/adc/Makefile
index ef9cc485fb67..a1f1fbec0f87 100644
--- a/drivers/iio/adc/Makefile
+++ b/drivers/iio/adc/Makefile
@@ -4,10 +4,12 @@
#
# When adding new entries keep the list in alphabetical order
+obj-$(CONFIG_AB8500_GPADC) += ab8500-gpadc.o
obj-$(CONFIG_AD_SIGMA_DELTA) += ad_sigma_delta.o
obj-$(CONFIG_AD7124) += ad7124.o
obj-$(CONFIG_AD7266) += ad7266.o
obj-$(CONFIG_AD7291) += ad7291.o
+obj-$(CONFIG_AD7292) += ad7292.o
obj-$(CONFIG_AD7298) += ad7298.o
obj-$(CONFIG_AD7923) += ad7923.o
obj-$(CONFIG_AD7476) += ad7476.o
@@ -42,6 +44,7 @@ obj-$(CONFIG_HX711) += hx711.o
obj-$(CONFIG_IMX7D_ADC) += imx7d_adc.o
obj-$(CONFIG_INA2XX_ADC) += ina2xx-adc.o
obj-$(CONFIG_INGENIC_ADC) += ingenic-adc.o
+obj-$(CONFIG_INTEL_MRFLD_ADC) += intel_mrfld_adc.o
obj-$(CONFIG_LP8788_ADC) += lp8788_adc.o
obj-$(CONFIG_LPC18XX_ADC) += lpc18xx_adc.o
obj-$(CONFIG_LPC32XX_ADC) += lpc32xx_adc.o
diff --git a/drivers/iio/adc/ab8500-gpadc.c b/drivers/iio/adc/ab8500-gpadc.c
new file mode 100644
index 000000000000..fd5b18d7f0c2
--- /dev/null
+++ b/drivers/iio/adc/ab8500-gpadc.c
@@ -0,0 +1,1218 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Author: Arun R Murthy <arun.murthy@stericsson.com>
+ * Author: Daniel Willerud <daniel.willerud@stericsson.com>
+ * Author: Johan Palsson <johan.palsson@stericsson.com>
+ * Author: M'boumba Cedric Madianga
+ * Author: Linus Walleij <linus.walleij@linaro.org>
+ *
+ * AB8500 General Purpose ADC driver. The AB8500 uses reference voltages:
+ * VinVADC, and VADC relative to GND to do its job. It monitors main and backup
+ * battery voltages, AC (mains) voltage, USB cable voltage, as well as voltages
+ * representing the temperature of the chip die and battery, accessory
+ * detection by resistance measurements using relative voltages and GSM burst
+ * information.
+ *
+ * Some of the voltages are measured on external pins on the IC, such as
+ * battery temperature or "ADC aux" 1 and 2. Other voltages are internal rails
+ * from other parts of the ASIC such as main charger voltage, main and battery
+ * backup voltage or USB VBUS voltage. For this reason drivers for other
+ * parts of the system are required to obtain handles to the ADC to do work
+ * for them and the IIO driver provides arbitration among these consumers.
+ */
+#include <linux/init.h>
+#include <linux/bits.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/pm_runtime.h>
+#include <linux/platform_device.h>
+#include <linux/completion.h>
+#include <linux/regulator/consumer.h>
+#include <linux/random.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/mfd/abx500.h>
+#include <linux/mfd/abx500/ab8500.h>
+
+/* GPADC register offsets and bit definitions */
+
+#define AB8500_GPADC_CTRL1_REG 0x00
+/* GPADC control register 1 bits */
+#define AB8500_GPADC_CTRL1_DISABLE 0x00
+#define AB8500_GPADC_CTRL1_ENABLE BIT(0)
+#define AB8500_GPADC_CTRL1_TRIG_ENA BIT(1)
+#define AB8500_GPADC_CTRL1_START_SW_CONV BIT(2)
+#define AB8500_GPADC_CTRL1_BTEMP_PULL_UP BIT(3)
+/* 0 = use rising edge, 1 = use falling edge */
+#define AB8500_GPADC_CTRL1_TRIG_EDGE BIT(4)
+/* 0 = use VTVOUT, 1 = use VRTC as pull-up supply for battery temp NTC */
+#define AB8500_GPADC_CTRL1_PUPSUPSEL BIT(5)
+#define AB8500_GPADC_CTRL1_BUF_ENA BIT(6)
+#define AB8500_GPADC_CTRL1_ICHAR_ENA BIT(7)
+
+#define AB8500_GPADC_CTRL2_REG 0x01
+#define AB8500_GPADC_CTRL3_REG 0x02
+/*
+ * GPADC control register 2 and 3 bits
+ * the bit layout is the same for SW and HW conversion set-up
+ */
+#define AB8500_GPADC_CTRL2_AVG_1 0x00
+#define AB8500_GPADC_CTRL2_AVG_4 BIT(5)
+#define AB8500_GPADC_CTRL2_AVG_8 BIT(6)
+#define AB8500_GPADC_CTRL2_AVG_16 (BIT(5) | BIT(6))
+
+enum ab8500_gpadc_channel {
+ AB8500_GPADC_CHAN_UNUSED = 0x00,
+ AB8500_GPADC_CHAN_BAT_CTRL = 0x01,
+ AB8500_GPADC_CHAN_BAT_TEMP = 0x02,
+ /* This is not used on AB8505 */
+ AB8500_GPADC_CHAN_MAIN_CHARGER = 0x03,
+ AB8500_GPADC_CHAN_ACC_DET_1 = 0x04,
+ AB8500_GPADC_CHAN_ACC_DET_2 = 0x05,
+ AB8500_GPADC_CHAN_ADC_AUX_1 = 0x06,
+ AB8500_GPADC_CHAN_ADC_AUX_2 = 0x07,
+ AB8500_GPADC_CHAN_VBAT_A = 0x08,
+ AB8500_GPADC_CHAN_VBUS = 0x09,
+ AB8500_GPADC_CHAN_MAIN_CHARGER_CURRENT = 0x0a,
+ AB8500_GPADC_CHAN_USB_CHARGER_CURRENT = 0x0b,
+ AB8500_GPADC_CHAN_BACKUP_BAT = 0x0c,
+ /* Only on AB8505 */
+ AB8505_GPADC_CHAN_DIE_TEMP = 0x0d,
+ AB8500_GPADC_CHAN_ID = 0x0e,
+ AB8500_GPADC_CHAN_INTERNAL_TEST_1 = 0x0f,
+ AB8500_GPADC_CHAN_INTERNAL_TEST_2 = 0x10,
+ AB8500_GPADC_CHAN_INTERNAL_TEST_3 = 0x11,
+ /* FIXME: Applicable to all ASIC variants? */
+ AB8500_GPADC_CHAN_XTAL_TEMP = 0x12,
+ AB8500_GPADC_CHAN_VBAT_TRUE_MEAS = 0x13,
+ /* FIXME: Doesn't seem to work with pure AB8500 */
+ AB8500_GPADC_CHAN_BAT_CTRL_AND_IBAT = 0x1c,
+ AB8500_GPADC_CHAN_VBAT_MEAS_AND_IBAT = 0x1d,
+ AB8500_GPADC_CHAN_VBAT_TRUE_MEAS_AND_IBAT = 0x1e,
+ AB8500_GPADC_CHAN_BAT_TEMP_AND_IBAT = 0x1f,
+ /*
+ * Virtual channel used only for ibat conversion to ampere.
+ * Battery current conversion (ibat) cannot be requested as a
+ * single conversion but it is always requested in combination
+ * with other input requests.
+ */
+ AB8500_GPADC_CHAN_IBAT_VIRTUAL = 0xFF,
+};
+
+#define AB8500_GPADC_AUTO_TIMER_REG 0x03
+
+#define AB8500_GPADC_STAT_REG 0x04
+#define AB8500_GPADC_STAT_BUSY BIT(0)
+
+#define AB8500_GPADC_MANDATAL_REG 0x05
+#define AB8500_GPADC_MANDATAH_REG 0x06
+#define AB8500_GPADC_AUTODATAL_REG 0x07
+#define AB8500_GPADC_AUTODATAH_REG 0x08
+#define AB8500_GPADC_MUX_CTRL_REG 0x09
+#define AB8540_GPADC_MANDATA2L_REG 0x09
+#define AB8540_GPADC_MANDATA2H_REG 0x0A
+#define AB8540_GPADC_APEAAX_REG 0x10
+#define AB8540_GPADC_APEAAT_REG 0x11
+#define AB8540_GPADC_APEAAM_REG 0x12
+#define AB8540_GPADC_APEAAH_REG 0x13
+#define AB8540_GPADC_APEAAL_REG 0x14
+
+/*
+ * OTP register offsets
+ * Bank : 0x15
+ */
+#define AB8500_GPADC_CAL_1 0x0F
+#define AB8500_GPADC_CAL_2 0x10
+#define AB8500_GPADC_CAL_3 0x11
+#define AB8500_GPADC_CAL_4 0x12
+#define AB8500_GPADC_CAL_5 0x13
+#define AB8500_GPADC_CAL_6 0x14
+#define AB8500_GPADC_CAL_7 0x15
+/* New calibration for 8540 */
+#define AB8540_GPADC_OTP4_REG_7 0x38
+#define AB8540_GPADC_OTP4_REG_6 0x39
+#define AB8540_GPADC_OTP4_REG_5 0x3A
+
+#define AB8540_GPADC_DIS_ZERO 0x00
+#define AB8540_GPADC_EN_VBIAS_XTAL_TEMP 0x02
+
+/* GPADC constants from AB8500 spec, UM0836 */
+#define AB8500_ADC_RESOLUTION 1024
+#define AB8500_ADC_CH_BTEMP_MIN 0
+#define AB8500_ADC_CH_BTEMP_MAX 1350
+#define AB8500_ADC_CH_DIETEMP_MIN 0
+#define AB8500_ADC_CH_DIETEMP_MAX 1350
+#define AB8500_ADC_CH_CHG_V_MIN 0
+#define AB8500_ADC_CH_CHG_V_MAX 20030
+#define AB8500_ADC_CH_ACCDET2_MIN 0
+#define AB8500_ADC_CH_ACCDET2_MAX 2500
+#define AB8500_ADC_CH_VBAT_MIN 2300
+#define AB8500_ADC_CH_VBAT_MAX 4800
+#define AB8500_ADC_CH_CHG_I_MIN 0
+#define AB8500_ADC_CH_CHG_I_MAX 1500
+#define AB8500_ADC_CH_BKBAT_MIN 0
+#define AB8500_ADC_CH_BKBAT_MAX 3200
+
+/* GPADC constants from AB8540 spec */
+#define AB8500_ADC_CH_IBAT_MIN (-6000) /* mA range measured by ADC for ibat */
+#define AB8500_ADC_CH_IBAT_MAX 6000
+#define AB8500_ADC_CH_IBAT_MIN_V (-60) /* mV range measured by ADC for ibat */
+#define AB8500_ADC_CH_IBAT_MAX_V 60
+#define AB8500_GPADC_IBAT_VDROP_L (-56) /* mV */
+#define AB8500_GPADC_IBAT_VDROP_H 56
+
+/* This is used to not lose precision when dividing to get gain and offset */
+#define AB8500_GPADC_CALIB_SCALE 1000
+/*
+ * Number of bits shift used to not lose precision
+ * when dividing to get ibat gain.
+ */
+#define AB8500_GPADC_CALIB_SHIFT_IBAT 20
+
+/* Time in ms before disabling regulator */
+#define AB8500_GPADC_AUTOSUSPEND_DELAY 1
+
+#define AB8500_GPADC_CONVERSION_TIME 500 /* ms */
+
+enum ab8500_cal_channels {
+ AB8500_CAL_VMAIN = 0,
+ AB8500_CAL_BTEMP,
+ AB8500_CAL_VBAT,
+ AB8500_CAL_IBAT,
+ AB8500_CAL_NR,
+};
+
+/**
+ * struct ab8500_adc_cal_data - Table for storing gain and offset for the
+ * calibrated ADC channels
+ * @gain: Gain of the ADC channel
+ * @offset: Offset of the ADC channel
+ * @otp_calib_hi: Calibration from OTP
+ * @otp_calib_lo: Calibration from OTP
+ */
+struct ab8500_adc_cal_data {
+ s64 gain;
+ s64 offset;
+ u16 otp_calib_hi;
+ u16 otp_calib_lo;
+};
+
+/**
+ * struct ab8500_gpadc_chan_info - per-channel GPADC info
+ * @name: name of the channel
+ * @id: the internal AB8500 ID number for the channel
+ * @hardware_control: indicate that we want to use hardware ADC control
+ * on this channel, the default is software ADC control. Hardware control
+ * is normally only used to test the battery voltage during GSM bursts
+ * and needs a hardware trigger on the GPADCTrig pin of the ASIC.
+ * @falling_edge: indicate that we want to trigger on falling edge
+ * rather than rising edge, rising edge is the default
+ * @avg_sample: how many samples to average: must be 1, 4, 8 or 16.
+ * @trig_timer: how long to wait for the trigger, in 32kHz periods:
+ * 0 .. 255 periods
+ */
+struct ab8500_gpadc_chan_info {
+ const char *name;
+ u8 id;
+ bool hardware_control;
+ bool falling_edge;
+ u8 avg_sample;
+ u8 trig_timer;
+};
+
+/**
+ * struct ab8500_gpadc - AB8500 GPADC device information
+ * @dev: pointer to the containing device
+ * @ab8500: pointer to the parent AB8500 device
+ * @chans: internal per-channel information container
+ * @nchans: number of channels
+ * @complete: pointer to the completion that indicates
+ * the completion of an gpadc conversion cycle
+ * @vddadc: pointer to the regulator supplying VDDADC
+ * @irq_sw: interrupt number that is used by gpadc for software ADC conversion
+ * @irq_hw: interrupt number that is used by gpadc for hardware ADC conversion
+ * @cal_data: array of ADC calibration data structs
+ */
+struct ab8500_gpadc {
+ struct device *dev;
+ struct ab8500 *ab8500;
+ struct ab8500_gpadc_chan_info *chans;
+ unsigned int nchans;
+ struct completion complete;
+ struct regulator *vddadc;
+ int irq_sw;
+ int irq_hw;
+ struct ab8500_adc_cal_data cal_data[AB8500_CAL_NR];
+};
+
+static struct ab8500_gpadc_chan_info *
+ab8500_gpadc_get_channel(struct ab8500_gpadc *gpadc, u8 chan)
+{
+ struct ab8500_gpadc_chan_info *ch;
+ int i;
+
+ for (i = 0; i < gpadc->nchans; i++) {
+ ch = &gpadc->chans[i];
+ if (ch->id == chan)
+ break;
+ }
+ if (i == gpadc->nchans)
+ return NULL;
+
+ return ch;
+}
+
+/**
+ * ab8500_gpadc_ad_to_voltage() - Convert a raw ADC value to a voltage
+ * @gpadc: GPADC instance
+ * @ch: the sampled channel this raw value is coming from
+ * @ad_value: the raw value
+ */
+static int ab8500_gpadc_ad_to_voltage(struct ab8500_gpadc *gpadc,
+ enum ab8500_gpadc_channel ch,
+ int ad_value)
+{
+ int res;
+
+ switch (ch) {
+ case AB8500_GPADC_CHAN_MAIN_CHARGER:
+ /* No calibration data available: just interpolate */
+ if (!gpadc->cal_data[AB8500_CAL_VMAIN].gain) {
+ res = AB8500_ADC_CH_CHG_V_MIN + (AB8500_ADC_CH_CHG_V_MAX -
+ AB8500_ADC_CH_CHG_V_MIN) * ad_value /
+ AB8500_ADC_RESOLUTION;
+ break;
+ }
+ /* Here we can use calibration */
+ res = (int) (ad_value * gpadc->cal_data[AB8500_CAL_VMAIN].gain +
+ gpadc->cal_data[AB8500_CAL_VMAIN].offset) / AB8500_GPADC_CALIB_SCALE;
+ break;
+
+ case AB8500_GPADC_CHAN_BAT_CTRL:
+ case AB8500_GPADC_CHAN_BAT_TEMP:
+ case AB8500_GPADC_CHAN_ACC_DET_1:
+ case AB8500_GPADC_CHAN_ADC_AUX_1:
+ case AB8500_GPADC_CHAN_ADC_AUX_2:
+ case AB8500_GPADC_CHAN_XTAL_TEMP:
+ /* No calibration data available: just interpolate */
+ if (!gpadc->cal_data[AB8500_CAL_BTEMP].gain) {
+ res = AB8500_ADC_CH_BTEMP_MIN + (AB8500_ADC_CH_BTEMP_MAX -
+ AB8500_ADC_CH_BTEMP_MIN) * ad_value /
+ AB8500_ADC_RESOLUTION;
+ break;
+ }
+ /* Here we can use calibration */
+ res = (int) (ad_value * gpadc->cal_data[AB8500_CAL_BTEMP].gain +
+ gpadc->cal_data[AB8500_CAL_BTEMP].offset) / AB8500_GPADC_CALIB_SCALE;
+ break;
+
+ case AB8500_GPADC_CHAN_VBAT_A:
+ case AB8500_GPADC_CHAN_VBAT_TRUE_MEAS:
+ /* No calibration data available: just interpolate */
+ if (!gpadc->cal_data[AB8500_CAL_VBAT].gain) {
+ res = AB8500_ADC_CH_VBAT_MIN + (AB8500_ADC_CH_VBAT_MAX -
+ AB8500_ADC_CH_VBAT_MIN) * ad_value /
+ AB8500_ADC_RESOLUTION;
+ break;
+ }
+ /* Here we can use calibration */
+ res = (int) (ad_value * gpadc->cal_data[AB8500_CAL_VBAT].gain +
+ gpadc->cal_data[AB8500_CAL_VBAT].offset) / AB8500_GPADC_CALIB_SCALE;
+ break;
+
+ case AB8505_GPADC_CHAN_DIE_TEMP:
+ res = AB8500_ADC_CH_DIETEMP_MIN +
+ (AB8500_ADC_CH_DIETEMP_MAX - AB8500_ADC_CH_DIETEMP_MIN) * ad_value /
+ AB8500_ADC_RESOLUTION;
+ break;
+
+ case AB8500_GPADC_CHAN_ACC_DET_2:
+ res = AB8500_ADC_CH_ACCDET2_MIN +
+ (AB8500_ADC_CH_ACCDET2_MAX - AB8500_ADC_CH_ACCDET2_MIN) * ad_value /
+ AB8500_ADC_RESOLUTION;
+ break;
+
+ case AB8500_GPADC_CHAN_VBUS:
+ res = AB8500_ADC_CH_CHG_V_MIN +
+ (AB8500_ADC_CH_CHG_V_MAX - AB8500_ADC_CH_CHG_V_MIN) * ad_value /
+ AB8500_ADC_RESOLUTION;
+ break;
+
+ case AB8500_GPADC_CHAN_MAIN_CHARGER_CURRENT:
+ case AB8500_GPADC_CHAN_USB_CHARGER_CURRENT:
+ res = AB8500_ADC_CH_CHG_I_MIN +
+ (AB8500_ADC_CH_CHG_I_MAX - AB8500_ADC_CH_CHG_I_MIN) * ad_value /
+ AB8500_ADC_RESOLUTION;
+ break;
+
+ case AB8500_GPADC_CHAN_BACKUP_BAT:
+ res = AB8500_ADC_CH_BKBAT_MIN +
+ (AB8500_ADC_CH_BKBAT_MAX - AB8500_ADC_CH_BKBAT_MIN) * ad_value /
+ AB8500_ADC_RESOLUTION;
+ break;
+
+ case AB8500_GPADC_CHAN_IBAT_VIRTUAL:
+ /* No calibration data available: just interpolate */
+ if (!gpadc->cal_data[AB8500_CAL_IBAT].gain) {
+ res = AB8500_ADC_CH_IBAT_MIN + (AB8500_ADC_CH_IBAT_MAX -
+ AB8500_ADC_CH_IBAT_MIN) * ad_value /
+ AB8500_ADC_RESOLUTION;
+ break;
+ }
+ /* Here we can use calibration */
+ res = (int) (ad_value * gpadc->cal_data[AB8500_CAL_IBAT].gain +
+ gpadc->cal_data[AB8500_CAL_IBAT].offset)
+ >> AB8500_GPADC_CALIB_SHIFT_IBAT;
+ break;
+
+ default:
+ dev_err(gpadc->dev,
+ "unknown channel ID: %d, not possible to convert\n",
+ ch);
+ res = -EINVAL;
+ break;
+
+ }
+
+ return res;
+}
+
+static int ab8500_gpadc_read(struct ab8500_gpadc *gpadc,
+ const struct ab8500_gpadc_chan_info *ch,
+ int *ibat)
+{
+ int ret;
+ int looplimit = 0;
+ unsigned long completion_timeout;
+ u8 val;
+ u8 low_data, high_data, low_data2, high_data2;
+ u8 ctrl1;
+ u8 ctrl23;
+ unsigned int delay_min = 0;
+ unsigned int delay_max = 0;
+ u8 data_low_addr, data_high_addr;
+
+ if (!gpadc)
+ return -ENODEV;
+
+ /* check if conversion is supported */
+ if ((gpadc->irq_sw <= 0) && !ch->hardware_control)
+ return -ENOTSUPP;
+ if ((gpadc->irq_hw <= 0) && ch->hardware_control)
+ return -ENOTSUPP;
+
+ /* Enable vddadc by grabbing PM runtime */
+ pm_runtime_get_sync(gpadc->dev);
+
+ /* Check if ADC is not busy, lock and proceed */
+ do {
+ ret = abx500_get_register_interruptible(gpadc->dev,
+ AB8500_GPADC, AB8500_GPADC_STAT_REG, &val);
+ if (ret < 0)
+ goto out;
+ if (!(val & AB8500_GPADC_STAT_BUSY))
+ break;
+ msleep(20);
+ } while (++looplimit < 10);
+ if (looplimit >= 10 && (val & AB8500_GPADC_STAT_BUSY)) {
+ dev_err(gpadc->dev, "gpadc_conversion: GPADC busy");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* Enable GPADC */
+ ctrl1 = AB8500_GPADC_CTRL1_ENABLE;
+
+ /* Select the channel source and set average samples */
+ switch (ch->avg_sample) {
+ case 1:
+ ctrl23 = ch->id | AB8500_GPADC_CTRL2_AVG_1;
+ break;
+ case 4:
+ ctrl23 = ch->id | AB8500_GPADC_CTRL2_AVG_4;
+ break;
+ case 8:
+ ctrl23 = ch->id | AB8500_GPADC_CTRL2_AVG_8;
+ break;
+ default:
+ ctrl23 = ch->id | AB8500_GPADC_CTRL2_AVG_16;
+ break;
+ }
+
+ if (ch->hardware_control) {
+ ret = abx500_set_register_interruptible(gpadc->dev,
+ AB8500_GPADC, AB8500_GPADC_CTRL3_REG, ctrl23);
+ ctrl1 |= AB8500_GPADC_CTRL1_TRIG_ENA;
+ if (ch->falling_edge)
+ ctrl1 |= AB8500_GPADC_CTRL1_TRIG_EDGE;
+ } else {
+ ret = abx500_set_register_interruptible(gpadc->dev,
+ AB8500_GPADC, AB8500_GPADC_CTRL2_REG, ctrl23);
+ }
+ if (ret < 0) {
+ dev_err(gpadc->dev,
+ "gpadc_conversion: set avg samples failed\n");
+ goto out;
+ }
+
+ /*
+ * Enable ADC, buffering, select rising edge and enable ADC path
+ * charging current sense if it needed, ABB 3.0 needs some special
+ * treatment too.
+ */
+ switch (ch->id) {
+ case AB8500_GPADC_CHAN_MAIN_CHARGER_CURRENT:
+ case AB8500_GPADC_CHAN_USB_CHARGER_CURRENT:
+ ctrl1 |= AB8500_GPADC_CTRL1_BUF_ENA |
+ AB8500_GPADC_CTRL1_ICHAR_ENA;
+ break;
+ case AB8500_GPADC_CHAN_BAT_TEMP:
+ if (!is_ab8500_2p0_or_earlier(gpadc->ab8500)) {
+ ctrl1 |= AB8500_GPADC_CTRL1_BUF_ENA |
+ AB8500_GPADC_CTRL1_BTEMP_PULL_UP;
+ /*
+ * Delay might be needed for ABB8500 cut 3.0, if not,
+ * remove when hardware will be available
+ */
+ delay_min = 1000; /* Delay in micro seconds */
+ delay_max = 10000; /* large range optimises sleepmode */
+ break;
+ }
+ /* Fall through */
+ default:
+ ctrl1 |= AB8500_GPADC_CTRL1_BUF_ENA;
+ break;
+ }
+
+ /* Write configuration to control register 1 */
+ ret = abx500_set_register_interruptible(gpadc->dev,
+ AB8500_GPADC, AB8500_GPADC_CTRL1_REG, ctrl1);
+ if (ret < 0) {
+ dev_err(gpadc->dev,
+ "gpadc_conversion: set Control register failed\n");
+ goto out;
+ }
+
+ if (delay_min != 0)
+ usleep_range(delay_min, delay_max);
+
+ if (ch->hardware_control) {
+ /* Set trigger delay timer */
+ ret = abx500_set_register_interruptible(gpadc->dev,
+ AB8500_GPADC, AB8500_GPADC_AUTO_TIMER_REG,
+ ch->trig_timer);
+ if (ret < 0) {
+ dev_err(gpadc->dev,
+ "gpadc_conversion: trig timer failed\n");
+ goto out;
+ }
+ completion_timeout = 2 * HZ;
+ data_low_addr = AB8500_GPADC_AUTODATAL_REG;
+ data_high_addr = AB8500_GPADC_AUTODATAH_REG;
+ } else {
+ /* Start SW conversion */
+ ret = abx500_mask_and_set_register_interruptible(gpadc->dev,
+ AB8500_GPADC, AB8500_GPADC_CTRL1_REG,
+ AB8500_GPADC_CTRL1_START_SW_CONV,
+ AB8500_GPADC_CTRL1_START_SW_CONV);
+ if (ret < 0) {
+ dev_err(gpadc->dev,
+ "gpadc_conversion: start s/w conv failed\n");
+ goto out;
+ }
+ completion_timeout = msecs_to_jiffies(AB8500_GPADC_CONVERSION_TIME);
+ data_low_addr = AB8500_GPADC_MANDATAL_REG;
+ data_high_addr = AB8500_GPADC_MANDATAH_REG;
+ }
+
+ /* Wait for completion of conversion */
+ if (!wait_for_completion_timeout(&gpadc->complete,
+ completion_timeout)) {
+ dev_err(gpadc->dev,
+ "timeout didn't receive GPADC conv interrupt\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* Read the converted RAW data */
+ ret = abx500_get_register_interruptible(gpadc->dev,
+ AB8500_GPADC, data_low_addr, &low_data);
+ if (ret < 0) {
+ dev_err(gpadc->dev,
+ "gpadc_conversion: read low data failed\n");
+ goto out;
+ }
+
+ ret = abx500_get_register_interruptible(gpadc->dev,
+ AB8500_GPADC, data_high_addr, &high_data);
+ if (ret < 0) {
+ dev_err(gpadc->dev,
+ "gpadc_conversion: read high data failed\n");
+ goto out;
+ }
+
+ /* Check if double conversion is required */
+ if ((ch->id == AB8500_GPADC_CHAN_BAT_CTRL_AND_IBAT) ||
+ (ch->id == AB8500_GPADC_CHAN_VBAT_MEAS_AND_IBAT) ||
+ (ch->id == AB8500_GPADC_CHAN_VBAT_TRUE_MEAS_AND_IBAT) ||
+ (ch->id == AB8500_GPADC_CHAN_BAT_TEMP_AND_IBAT)) {
+
+ if (ch->hardware_control) {
+ /* not supported */
+ ret = -ENOTSUPP;
+ dev_err(gpadc->dev,
+ "gpadc_conversion: only SW double conversion supported\n");
+ goto out;
+ } else {
+ /* Read the converted RAW data 2 */
+ ret = abx500_get_register_interruptible(gpadc->dev,
+ AB8500_GPADC, AB8540_GPADC_MANDATA2L_REG,
+ &low_data2);
+ if (ret < 0) {
+ dev_err(gpadc->dev,
+ "gpadc_conversion: read sw low data 2 failed\n");
+ goto out;
+ }
+
+ ret = abx500_get_register_interruptible(gpadc->dev,
+ AB8500_GPADC, AB8540_GPADC_MANDATA2H_REG,
+ &high_data2);
+ if (ret < 0) {
+ dev_err(gpadc->dev,
+ "gpadc_conversion: read sw high data 2 failed\n");
+ goto out;
+ }
+ if (ibat != NULL) {
+ *ibat = (high_data2 << 8) | low_data2;
+ } else {
+ dev_warn(gpadc->dev,
+ "gpadc_conversion: ibat not stored\n");
+ }
+
+ }
+ }
+
+ /* Disable GPADC */
+ ret = abx500_set_register_interruptible(gpadc->dev, AB8500_GPADC,
+ AB8500_GPADC_CTRL1_REG, AB8500_GPADC_CTRL1_DISABLE);
+ if (ret < 0) {
+ dev_err(gpadc->dev, "gpadc_conversion: disable gpadc failed\n");
+ goto out;
+ }
+
+ /* This eventually drops the regulator */
+ pm_runtime_mark_last_busy(gpadc->dev);
+ pm_runtime_put_autosuspend(gpadc->dev);
+
+ return (high_data << 8) | low_data;
+
+out:
+ /*
+ * It has shown to be needed to turn off the GPADC if an error occurs,
+ * otherwise we might have problem when waiting for the busy bit in the
+ * GPADC status register to go low. In V1.1 there wait_for_completion
+ * seems to timeout when waiting for an interrupt.. Not seen in V2.0
+ */
+ (void) abx500_set_register_interruptible(gpadc->dev, AB8500_GPADC,
+ AB8500_GPADC_CTRL1_REG, AB8500_GPADC_CTRL1_DISABLE);
+ pm_runtime_put(gpadc->dev);
+ dev_err(gpadc->dev,
+ "gpadc_conversion: Failed to AD convert channel %d\n", ch->id);
+
+ return ret;
+}
+
+/**
+ * ab8500_bm_gpadcconvend_handler() - isr for gpadc conversion completion
+ * @irq: irq number
+ * @data: pointer to the data passed during request irq
+ *
+ * This is a interrupt service routine for gpadc conversion completion.
+ * Notifies the gpadc completion is completed and the converted raw value
+ * can be read from the registers.
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab8500_bm_gpadcconvend_handler(int irq, void *data)
+{
+ struct ab8500_gpadc *gpadc = data;
+
+ complete(&gpadc->complete);
+
+ return IRQ_HANDLED;
+}
+
+static int otp_cal_regs[] = {
+ AB8500_GPADC_CAL_1,
+ AB8500_GPADC_CAL_2,
+ AB8500_GPADC_CAL_3,
+ AB8500_GPADC_CAL_4,
+ AB8500_GPADC_CAL_5,
+ AB8500_GPADC_CAL_6,
+ AB8500_GPADC_CAL_7,
+};
+
+static int otp4_cal_regs[] = {
+ AB8540_GPADC_OTP4_REG_7,
+ AB8540_GPADC_OTP4_REG_6,
+ AB8540_GPADC_OTP4_REG_5,
+};
+
+static void ab8500_gpadc_read_calibration_data(struct ab8500_gpadc *gpadc)
+{
+ int i;
+ int ret[ARRAY_SIZE(otp_cal_regs)];
+ u8 gpadc_cal[ARRAY_SIZE(otp_cal_regs)];
+ int ret_otp4[ARRAY_SIZE(otp4_cal_regs)];
+ u8 gpadc_otp4[ARRAY_SIZE(otp4_cal_regs)];
+ int vmain_high, vmain_low;
+ int btemp_high, btemp_low;
+ int vbat_high, vbat_low;
+ int ibat_high, ibat_low;
+ s64 V_gain, V_offset, V2A_gain, V2A_offset;
+
+ /* First we read all OTP registers and store the error code */
+ for (i = 0; i < ARRAY_SIZE(otp_cal_regs); i++) {
+ ret[i] = abx500_get_register_interruptible(gpadc->dev,
+ AB8500_OTP_EMUL, otp_cal_regs[i], &gpadc_cal[i]);
+ if (ret[i] < 0) {
+ /* Continue anyway: maybe the other registers are OK */
+ dev_err(gpadc->dev, "%s: read otp reg 0x%02x failed\n",
+ __func__, otp_cal_regs[i]);
+ } else {
+ /* Put this in the entropy pool as device-unique */
+ add_device_randomness(&ret[i], sizeof(ret[i]));
+ }
+ }
+
+ /*
+ * The ADC calibration data is stored in OTP registers.
+ * The layout of the calibration data is outlined below and a more
+ * detailed description can be found in UM0836
+ *
+ * vm_h/l = vmain_high/low
+ * bt_h/l = btemp_high/low
+ * vb_h/l = vbat_high/low
+ *
+ * Data bits 8500/9540:
+ * | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0
+ * |.......|.......|.......|.......|.......|.......|.......|.......
+ * | | vm_h9 | vm_h8
+ * |.......|.......|.......|.......|.......|.......|.......|.......
+ * | | vm_h7 | vm_h6 | vm_h5 | vm_h4 | vm_h3 | vm_h2
+ * |.......|.......|.......|.......|.......|.......|.......|.......
+ * | vm_h1 | vm_h0 | vm_l4 | vm_l3 | vm_l2 | vm_l1 | vm_l0 | bt_h9
+ * |.......|.......|.......|.......|.......|.......|.......|.......
+ * | bt_h8 | bt_h7 | bt_h6 | bt_h5 | bt_h4 | bt_h3 | bt_h2 | bt_h1
+ * |.......|.......|.......|.......|.......|.......|.......|.......
+ * | bt_h0 | bt_l4 | bt_l3 | bt_l2 | bt_l1 | bt_l0 | vb_h9 | vb_h8
+ * |.......|.......|.......|.......|.......|.......|.......|.......
+ * | vb_h7 | vb_h6 | vb_h5 | vb_h4 | vb_h3 | vb_h2 | vb_h1 | vb_h0
+ * |.......|.......|.......|.......|.......|.......|.......|.......
+ * | vb_l5 | vb_l4 | vb_l3 | vb_l2 | vb_l1 | vb_l0 |
+ * |.......|.......|.......|.......|.......|.......|.......|.......
+ *
+ * Data bits 8540:
+ * OTP2
+ * | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0
+ * |.......|.......|.......|.......|.......|.......|.......|.......
+ * |
+ * |.......|.......|.......|.......|.......|.......|.......|.......
+ * | vm_h9 | vm_h8 | vm_h7 | vm_h6 | vm_h5 | vm_h4 | vm_h3 | vm_h2
+ * |.......|.......|.......|.......|.......|.......|.......|.......
+ * | vm_h1 | vm_h0 | vm_l4 | vm_l3 | vm_l2 | vm_l1 | vm_l0 | bt_h9
+ * |.......|.......|.......|.......|.......|.......|.......|.......
+ * | bt_h8 | bt_h7 | bt_h6 | bt_h5 | bt_h4 | bt_h3 | bt_h2 | bt_h1
+ * |.......|.......|.......|.......|.......|.......|.......|.......
+ * | bt_h0 | bt_l4 | bt_l3 | bt_l2 | bt_l1 | bt_l0 | vb_h9 | vb_h8
+ * |.......|.......|.......|.......|.......|.......|.......|.......
+ * | vb_h7 | vb_h6 | vb_h5 | vb_h4 | vb_h3 | vb_h2 | vb_h1 | vb_h0
+ * |.......|.......|.......|.......|.......|.......|.......|.......
+ * | vb_l5 | vb_l4 | vb_l3 | vb_l2 | vb_l1 | vb_l0 |
+ * |.......|.......|.......|.......|.......|.......|.......|.......
+ *
+ * Data bits 8540:
+ * OTP4
+ * | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0
+ * |.......|.......|.......|.......|.......|.......|.......|.......
+ * | | ib_h9 | ib_h8 | ib_h7
+ * |.......|.......|.......|.......|.......|.......|.......|.......
+ * | ib_h6 | ib_h5 | ib_h4 | ib_h3 | ib_h2 | ib_h1 | ib_h0 | ib_l5
+ * |.......|.......|.......|.......|.......|.......|.......|.......
+ * | ib_l4 | ib_l3 | ib_l2 | ib_l1 | ib_l0 |
+ *
+ *
+ * Ideal output ADC codes corresponding to injected input voltages
+ * during manufacturing is:
+ *
+ * vmain_high: Vin = 19500mV / ADC ideal code = 997
+ * vmain_low: Vin = 315mV / ADC ideal code = 16
+ * btemp_high: Vin = 1300mV / ADC ideal code = 985
+ * btemp_low: Vin = 21mV / ADC ideal code = 16
+ * vbat_high: Vin = 4700mV / ADC ideal code = 982
+ * vbat_low: Vin = 2380mV / ADC ideal code = 33
+ */
+
+ if (is_ab8540(gpadc->ab8500)) {
+ /* Calculate gain and offset for VMAIN if all reads succeeded*/
+ if (!(ret[1] < 0 || ret[2] < 0)) {
+ vmain_high = (((gpadc_cal[1] & 0xFF) << 2) |
+ ((gpadc_cal[2] & 0xC0) >> 6));
+ vmain_low = ((gpadc_cal[2] & 0x3E) >> 1);
+
+ gpadc->cal_data[AB8500_CAL_VMAIN].otp_calib_hi =
+ (u16)vmain_high;
+ gpadc->cal_data[AB8500_CAL_VMAIN].otp_calib_lo =
+ (u16)vmain_low;
+
+ gpadc->cal_data[AB8500_CAL_VMAIN].gain = AB8500_GPADC_CALIB_SCALE *
+ (19500 - 315) / (vmain_high - vmain_low);
+ gpadc->cal_data[AB8500_CAL_VMAIN].offset = AB8500_GPADC_CALIB_SCALE *
+ 19500 - (AB8500_GPADC_CALIB_SCALE * (19500 - 315) /
+ (vmain_high - vmain_low)) * vmain_high;
+ } else {
+ gpadc->cal_data[AB8500_CAL_VMAIN].gain = 0;
+ }
+
+ /* Read IBAT calibration Data */
+ for (i = 0; i < ARRAY_SIZE(otp4_cal_regs); i++) {
+ ret_otp4[i] = abx500_get_register_interruptible(
+ gpadc->dev, AB8500_OTP_EMUL,
+ otp4_cal_regs[i], &gpadc_otp4[i]);
+ if (ret_otp4[i] < 0)
+ dev_err(gpadc->dev,
+ "%s: read otp4 reg 0x%02x failed\n",
+ __func__, otp4_cal_regs[i]);
+ }
+
+ /* Calculate gain and offset for IBAT if all reads succeeded */
+ if (!(ret_otp4[0] < 0 || ret_otp4[1] < 0 || ret_otp4[2] < 0)) {
+ ibat_high = (((gpadc_otp4[0] & 0x07) << 7) |
+ ((gpadc_otp4[1] & 0xFE) >> 1));
+ ibat_low = (((gpadc_otp4[1] & 0x01) << 5) |
+ ((gpadc_otp4[2] & 0xF8) >> 3));
+
+ gpadc->cal_data[AB8500_CAL_IBAT].otp_calib_hi =
+ (u16)ibat_high;
+ gpadc->cal_data[AB8500_CAL_IBAT].otp_calib_lo =
+ (u16)ibat_low;
+
+ V_gain = ((AB8500_GPADC_IBAT_VDROP_H - AB8500_GPADC_IBAT_VDROP_L)
+ << AB8500_GPADC_CALIB_SHIFT_IBAT) / (ibat_high - ibat_low);
+
+ V_offset = (AB8500_GPADC_IBAT_VDROP_H << AB8500_GPADC_CALIB_SHIFT_IBAT) -
+ (((AB8500_GPADC_IBAT_VDROP_H - AB8500_GPADC_IBAT_VDROP_L) <<
+ AB8500_GPADC_CALIB_SHIFT_IBAT) / (ibat_high - ibat_low))
+ * ibat_high;
+ /*
+ * Result obtained is in mV (at a scale factor),
+ * we need to calculate gain and offset to get mA
+ */
+ V2A_gain = (AB8500_ADC_CH_IBAT_MAX - AB8500_ADC_CH_IBAT_MIN)/
+ (AB8500_ADC_CH_IBAT_MAX_V - AB8500_ADC_CH_IBAT_MIN_V);
+ V2A_offset = ((AB8500_ADC_CH_IBAT_MAX_V * AB8500_ADC_CH_IBAT_MIN -
+ AB8500_ADC_CH_IBAT_MAX * AB8500_ADC_CH_IBAT_MIN_V)
+ << AB8500_GPADC_CALIB_SHIFT_IBAT)
+ / (AB8500_ADC_CH_IBAT_MAX_V - AB8500_ADC_CH_IBAT_MIN_V);
+
+ gpadc->cal_data[AB8500_CAL_IBAT].gain =
+ V_gain * V2A_gain;
+ gpadc->cal_data[AB8500_CAL_IBAT].offset =
+ V_offset * V2A_gain + V2A_offset;
+ } else {
+ gpadc->cal_data[AB8500_CAL_IBAT].gain = 0;
+ }
+ } else {
+ /* Calculate gain and offset for VMAIN if all reads succeeded */
+ if (!(ret[0] < 0 || ret[1] < 0 || ret[2] < 0)) {
+ vmain_high = (((gpadc_cal[0] & 0x03) << 8) |
+ ((gpadc_cal[1] & 0x3F) << 2) |
+ ((gpadc_cal[2] & 0xC0) >> 6));
+ vmain_low = ((gpadc_cal[2] & 0x3E) >> 1);
+
+ gpadc->cal_data[AB8500_CAL_VMAIN].otp_calib_hi =
+ (u16)vmain_high;
+ gpadc->cal_data[AB8500_CAL_VMAIN].otp_calib_lo =
+ (u16)vmain_low;
+
+ gpadc->cal_data[AB8500_CAL_VMAIN].gain = AB8500_GPADC_CALIB_SCALE *
+ (19500 - 315) / (vmain_high - vmain_low);
+
+ gpadc->cal_data[AB8500_CAL_VMAIN].offset = AB8500_GPADC_CALIB_SCALE *
+ 19500 - (AB8500_GPADC_CALIB_SCALE * (19500 - 315) /
+ (vmain_high - vmain_low)) * vmain_high;
+ } else {
+ gpadc->cal_data[AB8500_CAL_VMAIN].gain = 0;
+ }
+ }
+
+ /* Calculate gain and offset for BTEMP if all reads succeeded */
+ if (!(ret[2] < 0 || ret[3] < 0 || ret[4] < 0)) {
+ btemp_high = (((gpadc_cal[2] & 0x01) << 9) |
+ (gpadc_cal[3] << 1) | ((gpadc_cal[4] & 0x80) >> 7));
+ btemp_low = ((gpadc_cal[4] & 0x7C) >> 2);
+
+ gpadc->cal_data[AB8500_CAL_BTEMP].otp_calib_hi = (u16)btemp_high;
+ gpadc->cal_data[AB8500_CAL_BTEMP].otp_calib_lo = (u16)btemp_low;
+
+ gpadc->cal_data[AB8500_CAL_BTEMP].gain =
+ AB8500_GPADC_CALIB_SCALE * (1300 - 21) / (btemp_high - btemp_low);
+ gpadc->cal_data[AB8500_CAL_BTEMP].offset = AB8500_GPADC_CALIB_SCALE * 1300 -
+ (AB8500_GPADC_CALIB_SCALE * (1300 - 21) / (btemp_high - btemp_low))
+ * btemp_high;
+ } else {
+ gpadc->cal_data[AB8500_CAL_BTEMP].gain = 0;
+ }
+
+ /* Calculate gain and offset for VBAT if all reads succeeded */
+ if (!(ret[4] < 0 || ret[5] < 0 || ret[6] < 0)) {
+ vbat_high = (((gpadc_cal[4] & 0x03) << 8) | gpadc_cal[5]);
+ vbat_low = ((gpadc_cal[6] & 0xFC) >> 2);
+
+ gpadc->cal_data[AB8500_CAL_VBAT].otp_calib_hi = (u16)vbat_high;
+ gpadc->cal_data[AB8500_CAL_VBAT].otp_calib_lo = (u16)vbat_low;
+
+ gpadc->cal_data[AB8500_CAL_VBAT].gain = AB8500_GPADC_CALIB_SCALE *
+ (4700 - 2380) / (vbat_high - vbat_low);
+ gpadc->cal_data[AB8500_CAL_VBAT].offset = AB8500_GPADC_CALIB_SCALE * 4700 -
+ (AB8500_GPADC_CALIB_SCALE * (4700 - 2380) /
+ (vbat_high - vbat_low)) * vbat_high;
+ } else {
+ gpadc->cal_data[AB8500_CAL_VBAT].gain = 0;
+ }
+}
+
+static int ab8500_gpadc_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct ab8500_gpadc *gpadc = iio_priv(indio_dev);
+ const struct ab8500_gpadc_chan_info *ch;
+ int raw_val;
+ int processed;
+
+ ch = ab8500_gpadc_get_channel(gpadc, chan->address);
+ if (!ch) {
+ dev_err(gpadc->dev, "no such channel %lu\n",
+ chan->address);
+ return -EINVAL;
+ }
+
+ raw_val = ab8500_gpadc_read(gpadc, ch, NULL);
+ if (raw_val < 0)
+ return raw_val;
+
+ if (mask == IIO_CHAN_INFO_RAW) {
+ *val = raw_val;
+ return IIO_VAL_INT;
+ }
+
+ if (mask == IIO_CHAN_INFO_PROCESSED) {
+ processed = ab8500_gpadc_ad_to_voltage(gpadc, ch->id, raw_val);
+ if (processed < 0)
+ return processed;
+
+ /* Return millivolt or milliamps or millicentigrades */
+ *val = processed * 1000;
+ return IIO_VAL_INT;
+ }
+
+ return -EINVAL;
+}
+
+static int ab8500_gpadc_of_xlate(struct iio_dev *indio_dev,
+ const struct of_phandle_args *iiospec)
+{
+ int i;
+
+ for (i = 0; i < indio_dev->num_channels; i++)
+ if (indio_dev->channels[i].channel == iiospec->args[0])
+ return i;
+
+ return -EINVAL;
+}
+
+static const struct iio_info ab8500_gpadc_info = {
+ .of_xlate = ab8500_gpadc_of_xlate,
+ .read_raw = ab8500_gpadc_read_raw,
+};
+
+#ifdef CONFIG_PM
+static int ab8500_gpadc_runtime_suspend(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ab8500_gpadc *gpadc = iio_priv(indio_dev);
+
+ regulator_disable(gpadc->vddadc);
+
+ return 0;
+}
+
+static int ab8500_gpadc_runtime_resume(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ab8500_gpadc *gpadc = iio_priv(indio_dev);
+ int ret;
+
+ ret = regulator_enable(gpadc->vddadc);
+ if (ret)
+ dev_err(dev, "Failed to enable vddadc: %d\n", ret);
+
+ return ret;
+}
+#endif
+
+/**
+ * ab8500_gpadc_parse_channel() - process devicetree channel configuration
+ * @dev: pointer to containing device
+ * @np: device tree node for the channel to configure
+ * @ch: channel info to fill in
+ * @iio_chan: IIO channel specification to fill in
+ *
+ * The devicetree will set up the channel for use with the specific device,
+ * and define usage for things like AUX GPADC inputs more precisely.
+ */
+static int ab8500_gpadc_parse_channel(struct device *dev,
+ struct device_node *np,
+ struct ab8500_gpadc_chan_info *ch,
+ struct iio_chan_spec *iio_chan)
+{
+ const char *name = np->name;
+ u32 chan;
+ int ret;
+
+ ret = of_property_read_u32(np, "reg", &chan);
+ if (ret) {
+ dev_err(dev, "invalid channel number %s\n", name);
+ return ret;
+ }
+ if (chan > AB8500_GPADC_CHAN_BAT_TEMP_AND_IBAT) {
+ dev_err(dev, "%s channel number out of range %d\n", name, chan);
+ return -EINVAL;
+ }
+
+ iio_chan->channel = chan;
+ iio_chan->datasheet_name = name;
+ iio_chan->indexed = 1;
+ iio_chan->address = chan;
+ iio_chan->info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_PROCESSED);
+ /* Most are voltages (also temperatures), some are currents */
+ if ((chan == AB8500_GPADC_CHAN_MAIN_CHARGER_CURRENT) ||
+ (chan == AB8500_GPADC_CHAN_USB_CHARGER_CURRENT))
+ iio_chan->type = IIO_CURRENT;
+ else
+ iio_chan->type = IIO_VOLTAGE;
+
+ ch->id = chan;
+
+ /* Sensible defaults */
+ ch->avg_sample = 16;
+ ch->hardware_control = false;
+ ch->falling_edge = false;
+ ch->trig_timer = 0;
+
+ return 0;
+}
+
+/**
+ * ab8500_gpadc_parse_channels() - Parse the GPADC channels from DT
+ * @gpadc: the GPADC to configure the channels for
+ * @np: device tree node containing the channel configurations
+ * @chans: the IIO channels we parsed
+ * @nchans: the number of IIO channels we parsed
+ */
+static int ab8500_gpadc_parse_channels(struct ab8500_gpadc *gpadc,
+ struct device_node *np,
+ struct iio_chan_spec **chans_parsed,
+ unsigned int *nchans_parsed)
+{
+ struct device_node *child;
+ struct ab8500_gpadc_chan_info *ch;
+ struct iio_chan_spec *iio_chans;
+ unsigned int nchans;
+ int i;
+
+ nchans = of_get_available_child_count(np);
+ if (!nchans) {
+ dev_err(gpadc->dev, "no channel children\n");
+ return -ENODEV;
+ }
+ dev_info(gpadc->dev, "found %d ADC channels\n", nchans);
+
+ iio_chans = devm_kcalloc(gpadc->dev, nchans,
+ sizeof(*iio_chans), GFP_KERNEL);
+ if (!iio_chans)
+ return -ENOMEM;
+
+ gpadc->chans = devm_kcalloc(gpadc->dev, nchans,
+ sizeof(*gpadc->chans), GFP_KERNEL);
+ if (!gpadc->chans)
+ return -ENOMEM;
+
+ i = 0;
+ for_each_available_child_of_node(np, child) {
+ struct iio_chan_spec *iio_chan;
+ int ret;
+
+ ch = &gpadc->chans[i];
+ iio_chan = &iio_chans[i];
+
+ ret = ab8500_gpadc_parse_channel(gpadc->dev, child, ch,
+ iio_chan);
+ if (ret) {
+ of_node_put(child);
+ return ret;
+ }
+ i++;
+ }
+ gpadc->nchans = nchans;
+ *chans_parsed = iio_chans;
+ *nchans_parsed = nchans;
+
+ return 0;
+}
+
+static int ab8500_gpadc_probe(struct platform_device *pdev)
+{
+ struct ab8500_gpadc *gpadc;
+ struct iio_dev *indio_dev;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = pdev->dev.of_node;
+ struct iio_chan_spec *iio_chans;
+ unsigned int n_iio_chans;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*gpadc));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, indio_dev);
+ gpadc = iio_priv(indio_dev);
+
+ gpadc->dev = dev;
+ gpadc->ab8500 = dev_get_drvdata(dev->parent);
+
+ ret = ab8500_gpadc_parse_channels(gpadc, np, &iio_chans, &n_iio_chans);
+ if (ret)
+ return ret;
+
+ gpadc->irq_sw = platform_get_irq_byname(pdev, "SW_CONV_END");
+ if (gpadc->irq_sw < 0) {
+ dev_err(dev, "failed to get platform sw_conv_end irq\n");
+ return gpadc->irq_sw;
+ }
+
+ gpadc->irq_hw = platform_get_irq_byname(pdev, "HW_CONV_END");
+ if (gpadc->irq_hw < 0) {
+ dev_err(dev, "failed to get platform hw_conv_end irq\n");
+ return gpadc->irq_hw;
+ }
+
+ /* Initialize completion used to notify completion of conversion */
+ init_completion(&gpadc->complete);
+
+ /* Request interrupts */
+ ret = devm_request_threaded_irq(dev, gpadc->irq_sw, NULL,
+ ab8500_bm_gpadcconvend_handler, IRQF_NO_SUSPEND | IRQF_ONESHOT,
+ "ab8500-gpadc-sw", gpadc);
+ if (ret < 0) {
+ dev_err(dev,
+ "failed to request sw conversion irq %d\n",
+ gpadc->irq_sw);
+ return ret;
+ }
+
+ ret = devm_request_threaded_irq(dev, gpadc->irq_hw, NULL,
+ ab8500_bm_gpadcconvend_handler, IRQF_NO_SUSPEND | IRQF_ONESHOT,
+ "ab8500-gpadc-hw", gpadc);
+ if (ret < 0) {
+ dev_err(dev,
+ "Failed to request hw conversion irq: %d\n",
+ gpadc->irq_hw);
+ return ret;
+ }
+
+ /* The VTVout LDO used to power the AB8500 GPADC */
+ gpadc->vddadc = devm_regulator_get(dev, "vddadc");
+ if (IS_ERR(gpadc->vddadc)) {
+ ret = PTR_ERR(gpadc->vddadc);
+ dev_err(dev, "failed to get vddadc\n");
+ return ret;
+ }
+
+ ret = regulator_enable(gpadc->vddadc);
+ if (ret) {
+ dev_err(dev, "failed to enable vddadc: %d\n", ret);
+ return ret;
+ }
+
+ /* Enable runtime PM */
+ pm_runtime_get_noresume(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ pm_runtime_set_autosuspend_delay(dev, AB8500_GPADC_AUTOSUSPEND_DELAY);
+ pm_runtime_use_autosuspend(dev);
+
+ ab8500_gpadc_read_calibration_data(gpadc);
+
+ pm_runtime_put(dev);
+
+ indio_dev->dev.parent = dev;
+ indio_dev->dev.of_node = np;
+ indio_dev->name = "ab8500-gpadc";
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->info = &ab8500_gpadc_info;
+ indio_dev->channels = iio_chans;
+ indio_dev->num_channels = n_iio_chans;
+
+ ret = devm_iio_device_register(dev, indio_dev);
+ if (ret)
+ goto out_dis_pm;
+
+ return 0;
+
+out_dis_pm:
+ pm_runtime_get_sync(dev);
+ pm_runtime_put_noidle(dev);
+ pm_runtime_disable(dev);
+ regulator_disable(gpadc->vddadc);
+
+ return ret;
+}
+
+static int ab8500_gpadc_remove(struct platform_device *pdev)
+{
+ struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+ struct ab8500_gpadc *gpadc = iio_priv(indio_dev);
+
+ pm_runtime_get_sync(gpadc->dev);
+ pm_runtime_put_noidle(gpadc->dev);
+ pm_runtime_disable(gpadc->dev);
+ regulator_disable(gpadc->vddadc);
+
+ return 0;
+}
+
+static const struct dev_pm_ops ab8500_gpadc_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ SET_RUNTIME_PM_OPS(ab8500_gpadc_runtime_suspend,
+ ab8500_gpadc_runtime_resume,
+ NULL)
+};
+
+static struct platform_driver ab8500_gpadc_driver = {
+ .probe = ab8500_gpadc_probe,
+ .remove = ab8500_gpadc_remove,
+ .driver = {
+ .name = "ab8500-gpadc",
+ .pm = &ab8500_gpadc_pm_ops,
+ },
+};
+builtin_platform_driver(ab8500_gpadc_driver);
diff --git a/drivers/iio/adc/ad7292.c b/drivers/iio/adc/ad7292.c
new file mode 100644
index 000000000000..a6798f7dfdb8
--- /dev/null
+++ b/drivers/iio/adc/ad7292.c
@@ -0,0 +1,350 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Analog Devices AD7292 SPI ADC driver
+ *
+ * Copyright 2019 Analog Devices Inc.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/regulator/consumer.h>
+#include <linux/spi/spi.h>
+
+#include <linux/iio/iio.h>
+
+#define ADI_VENDOR_ID 0x0018
+
+/* AD7292 registers definition */
+#define AD7292_REG_VENDOR_ID 0x00
+#define AD7292_REG_CONF_BANK 0x05
+#define AD7292_REG_CONV_COMM 0x0E
+#define AD7292_REG_ADC_CH(x) (0x10 + (x))
+
+/* AD7292 configuration bank subregisters definition */
+#define AD7292_BANK_REG_VIN_RNG0 0x10
+#define AD7292_BANK_REG_VIN_RNG1 0x11
+#define AD7292_BANK_REG_SAMP_MODE 0x12
+
+#define AD7292_RD_FLAG_MSK(x) (BIT(7) | ((x) & 0x3F))
+
+/* AD7292_REG_ADC_CONVERSION */
+#define AD7292_ADC_DATA_MASK GENMASK(15, 6)
+#define AD7292_ADC_DATA(x) FIELD_GET(AD7292_ADC_DATA_MASK, x)
+
+/* AD7292_CHANNEL_SAMPLING_MODE */
+#define AD7292_CH_SAMP_MODE(reg, ch) (((reg) >> 8) & BIT(ch))
+
+/* AD7292_CHANNEL_VIN_RANGE */
+#define AD7292_CH_VIN_RANGE(reg, ch) ((reg) & BIT(ch))
+
+#define AD7292_VOLTAGE_CHAN(_chan) \
+{ \
+ .type = IIO_VOLTAGE, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE), \
+ .indexed = 1, \
+ .channel = _chan, \
+}
+
+static const struct iio_chan_spec ad7292_channels[] = {
+ AD7292_VOLTAGE_CHAN(0),
+ AD7292_VOLTAGE_CHAN(1),
+ AD7292_VOLTAGE_CHAN(2),
+ AD7292_VOLTAGE_CHAN(3),
+ AD7292_VOLTAGE_CHAN(4),
+ AD7292_VOLTAGE_CHAN(5),
+ AD7292_VOLTAGE_CHAN(6),
+ AD7292_VOLTAGE_CHAN(7)
+};
+
+static const struct iio_chan_spec ad7292_channels_diff[] = {
+ {
+ .type = IIO_VOLTAGE,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .indexed = 1,
+ .differential = 1,
+ .channel = 0,
+ .channel2 = 1,
+ },
+ AD7292_VOLTAGE_CHAN(2),
+ AD7292_VOLTAGE_CHAN(3),
+ AD7292_VOLTAGE_CHAN(4),
+ AD7292_VOLTAGE_CHAN(5),
+ AD7292_VOLTAGE_CHAN(6),
+ AD7292_VOLTAGE_CHAN(7)
+};
+
+struct ad7292_state {
+ struct spi_device *spi;
+ struct regulator *reg;
+ unsigned short vref_mv;
+
+ __be16 d16 ____cacheline_aligned;
+ u8 d8[2];
+};
+
+static int ad7292_spi_reg_read(struct ad7292_state *st, unsigned int addr)
+{
+ int ret;
+
+ st->d8[0] = AD7292_RD_FLAG_MSK(addr);
+
+ ret = spi_write_then_read(st->spi, st->d8, 1, &st->d16, 2);
+ if (ret < 0)
+ return ret;
+
+ return be16_to_cpu(st->d16);
+}
+
+static int ad7292_spi_subreg_read(struct ad7292_state *st, unsigned int addr,
+ unsigned int sub_addr, unsigned int len)
+{
+ unsigned int shift = 16 - (8 * len);
+ int ret;
+
+ st->d8[0] = AD7292_RD_FLAG_MSK(addr);
+ st->d8[1] = sub_addr;
+
+ ret = spi_write_then_read(st->spi, st->d8, 2, &st->d16, len);
+ if (ret < 0)
+ return ret;
+
+ return (be16_to_cpu(st->d16) >> shift);
+}
+
+static int ad7292_single_conversion(struct ad7292_state *st,
+ unsigned int chan_addr)
+{
+ int ret;
+
+ struct spi_transfer t[] = {
+ {
+ .tx_buf = &st->d8,
+ .len = 4,
+ .delay_usecs = 6,
+ }, {
+ .rx_buf = &st->d16,
+ .len = 2,
+ },
+ };
+
+ st->d8[0] = chan_addr;
+ st->d8[1] = AD7292_RD_FLAG_MSK(AD7292_REG_CONV_COMM);
+
+ ret = spi_sync_transfer(st->spi, t, ARRAY_SIZE(t));
+
+ if (ret < 0)
+ return ret;
+
+ return be16_to_cpu(st->d16);
+}
+
+static int ad7292_vin_range_multiplier(struct ad7292_state *st, int channel)
+{
+ int samp_mode, range0, range1, factor = 1;
+
+ /*
+ * Every AD7292 ADC channel may have its input range adjusted according
+ * to the settings at the ADC sampling mode and VIN range subregisters.
+ * For a given channel, the minimum input range is equal to Vref, and it
+ * may be increased by a multiplier factor of 2 or 4 according to the
+ * following rule:
+ * If channel is being sampled with respect to AGND:
+ * factor = 4 if VIN range0 and VIN range1 equal 0
+ * factor = 2 if only one of VIN ranges equal 1
+ * factor = 1 if both VIN range0 and VIN range1 equal 1
+ * If channel is being sampled with respect to AVDD:
+ * factor = 4 if VIN range0 and VIN range1 equal 0
+ * Behavior is undefined if any of VIN range doesn't equal 0
+ */
+
+ samp_mode = ad7292_spi_subreg_read(st, AD7292_REG_CONF_BANK,
+ AD7292_BANK_REG_SAMP_MODE, 2);
+
+ if (samp_mode < 0)
+ return samp_mode;
+
+ range0 = ad7292_spi_subreg_read(st, AD7292_REG_CONF_BANK,
+ AD7292_BANK_REG_VIN_RNG0, 2);
+
+ if (range0 < 0)
+ return range0;
+
+ range1 = ad7292_spi_subreg_read(st, AD7292_REG_CONF_BANK,
+ AD7292_BANK_REG_VIN_RNG1, 2);
+
+ if (range1 < 0)
+ return range1;
+
+ if (AD7292_CH_SAMP_MODE(samp_mode, channel)) {
+ /* Sampling with respect to AGND */
+ if (!AD7292_CH_VIN_RANGE(range0, channel))
+ factor *= 2;
+
+ if (!AD7292_CH_VIN_RANGE(range1, channel))
+ factor *= 2;
+
+ } else {
+ /* Sampling with respect to AVDD */
+ if (AD7292_CH_VIN_RANGE(range0, channel) ||
+ AD7292_CH_VIN_RANGE(range1, channel))
+ return -EPERM;
+
+ factor = 4;
+ }
+
+ return factor;
+}
+
+static int ad7292_read_raw(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ int *val, int *val2, long info)
+{
+ struct ad7292_state *st = iio_priv(indio_dev);
+ unsigned int ch_addr;
+ int ret;
+
+ switch (info) {
+ case IIO_CHAN_INFO_RAW:
+ ch_addr = AD7292_REG_ADC_CH(chan->channel);
+ ret = ad7292_single_conversion(st, ch_addr);
+ if (ret < 0)
+ return ret;
+
+ *val = AD7292_ADC_DATA(ret);
+
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ /*
+ * To convert a raw value to standard units, the IIO defines
+ * this formula: Scaled value = (raw + offset) * scale.
+ * For the scale to be a correct multiplier for (raw + offset),
+ * it must be calculated as the input range divided by the
+ * number of possible distinct input values. Given the ADC data
+ * is 10 bit long, it may assume 2^10 distinct values.
+ * Hence, scale = range / 2^10. The IIO_VAL_FRACTIONAL_LOG2
+ * return type indicates to the IIO API to divide *val by 2 to
+ * the power of *val2 when returning from read_raw.
+ */
+
+ ret = ad7292_vin_range_multiplier(st, chan->channel);
+ if (ret < 0)
+ return ret;
+
+ *val = st->vref_mv * ret;
+ *val2 = 10;
+ return IIO_VAL_FRACTIONAL_LOG2;
+ default:
+ break;
+ }
+ return -EINVAL;
+}
+
+static const struct iio_info ad7292_info = {
+ .read_raw = ad7292_read_raw,
+};
+
+static void ad7292_regulator_disable(void *data)
+{
+ struct ad7292_state *st = data;
+
+ regulator_disable(st->reg);
+}
+
+static int ad7292_probe(struct spi_device *spi)
+{
+ struct ad7292_state *st;
+ struct iio_dev *indio_dev;
+ struct device_node *child;
+ bool diff_channels = 0;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ st = iio_priv(indio_dev);
+ st->spi = spi;
+
+ ret = ad7292_spi_reg_read(st, AD7292_REG_VENDOR_ID);
+ if (ret != ADI_VENDOR_ID) {
+ dev_err(&spi->dev, "Wrong vendor id 0x%x\n", ret);
+ return -EINVAL;
+ }
+
+ spi_set_drvdata(spi, indio_dev);
+
+ st->reg = devm_regulator_get_optional(&spi->dev, "vref");
+ if (!IS_ERR(st->reg)) {
+ ret = regulator_enable(st->reg);
+ if (ret) {
+ dev_err(&spi->dev,
+ "Failed to enable external vref supply\n");
+ return ret;
+ }
+
+ ret = devm_add_action_or_reset(&spi->dev,
+ ad7292_regulator_disable, st);
+ if (ret) {
+ regulator_disable(st->reg);
+ return ret;
+ }
+
+ ret = regulator_get_voltage(st->reg);
+ if (ret < 0)
+ return ret;
+
+ st->vref_mv = ret / 1000;
+ } else {
+ /* Use the internal voltage reference. */
+ st->vref_mv = 1250;
+ }
+
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->name = spi_get_device_id(spi)->name;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->info = &ad7292_info;
+
+ for_each_available_child_of_node(spi->dev.of_node, child) {
+ diff_channels = of_property_read_bool(child, "diff-channels");
+ if (diff_channels)
+ break;
+ }
+
+ if (diff_channels) {
+ indio_dev->num_channels = ARRAY_SIZE(ad7292_channels_diff);
+ indio_dev->channels = ad7292_channels_diff;
+ } else {
+ indio_dev->num_channels = ARRAY_SIZE(ad7292_channels);
+ indio_dev->channels = ad7292_channels;
+ }
+
+ return devm_iio_device_register(&spi->dev, indio_dev);
+}
+
+static const struct spi_device_id ad7292_id_table[] = {
+ { "ad7292", 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(spi, ad7292_id_table);
+
+static const struct of_device_id ad7292_of_match[] = {
+ { .compatible = "adi,ad7292" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, ad7292_of_match);
+
+static struct spi_driver ad7292_driver = {
+ .driver = {
+ .name = "ad7292",
+ .of_match_table = ad7292_of_match,
+ },
+ .probe = ad7292_probe,
+ .id_table = ad7292_id_table,
+};
+module_spi_driver(ad7292_driver);
+
+MODULE_AUTHOR("Marcelo Schmitt <marcelo.schmitt1@gmail.com>");
+MODULE_DESCRIPTION("Analog Devices AD7292 ADC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/ad7949.c b/drivers/iio/adc/ad7949.c
index ac0ffff6c5ae..5c2b3446fa4a 100644
--- a/drivers/iio/adc/ad7949.c
+++ b/drivers/iio/adc/ad7949.c
@@ -54,38 +54,20 @@ struct ad7949_adc_chip {
u8 resolution;
u16 cfg;
unsigned int current_channel;
- u32 buffer ____cacheline_aligned;
+ u16 buffer ____cacheline_aligned;
};
-static bool ad7949_spi_cfg_is_read_back(struct ad7949_adc_chip *ad7949_adc)
-{
- if (!(ad7949_adc->cfg & AD7949_CFG_READ_BACK))
- return true;
-
- return false;
-}
-
-static int ad7949_spi_bits_per_word(struct ad7949_adc_chip *ad7949_adc)
-{
- int ret = ad7949_adc->resolution;
-
- if (ad7949_spi_cfg_is_read_back(ad7949_adc))
- ret += AD7949_CFG_REG_SIZE_BITS;
-
- return ret;
-}
-
static int ad7949_spi_write_cfg(struct ad7949_adc_chip *ad7949_adc, u16 val,
u16 mask)
{
int ret;
- int bits_per_word = ad7949_spi_bits_per_word(ad7949_adc);
+ int bits_per_word = ad7949_adc->resolution;
int shift = bits_per_word - AD7949_CFG_REG_SIZE_BITS;
struct spi_message msg;
struct spi_transfer tx[] = {
{
.tx_buf = &ad7949_adc->buffer,
- .len = 4,
+ .len = 2,
.bits_per_word = bits_per_word,
},
};
@@ -107,13 +89,13 @@ static int ad7949_spi_read_channel(struct ad7949_adc_chip *ad7949_adc, int *val,
unsigned int channel)
{
int ret;
- int bits_per_word = ad7949_spi_bits_per_word(ad7949_adc);
+ int bits_per_word = ad7949_adc->resolution;
int mask = GENMASK(ad7949_adc->resolution, 0);
struct spi_message msg;
struct spi_transfer tx[] = {
{
.rx_buf = &ad7949_adc->buffer,
- .len = 4,
+ .len = 2,
.bits_per_word = bits_per_word,
},
};
@@ -138,10 +120,7 @@ static int ad7949_spi_read_channel(struct ad7949_adc_chip *ad7949_adc, int *val,
ad7949_adc->current_channel = channel;
- if (ad7949_spi_cfg_is_read_back(ad7949_adc))
- *val = (ad7949_adc->buffer >> AD7949_CFG_REG_SIZE_BITS) & mask;
- else
- *val = ad7949_adc->buffer & mask;
+ *val = ad7949_adc->buffer & mask;
return 0;
}
diff --git a/drivers/iio/adc/ad_sigma_delta.c b/drivers/iio/adc/ad_sigma_delta.c
index 2640b75fb774..8ba90486c787 100644
--- a/drivers/iio/adc/ad_sigma_delta.c
+++ b/drivers/iio/adc/ad_sigma_delta.c
@@ -205,7 +205,7 @@ int ad_sd_reset(struct ad_sigma_delta *sigma_delta,
}
EXPORT_SYMBOL_GPL(ad_sd_reset);
-static int ad_sd_calibrate(struct ad_sigma_delta *sigma_delta,
+int ad_sd_calibrate(struct ad_sigma_delta *sigma_delta,
unsigned int mode, unsigned int channel)
{
int ret;
@@ -242,6 +242,7 @@ out:
return ret;
}
+EXPORT_SYMBOL_GPL(ad_sd_calibrate);
/**
* ad_sd_calibrate_all() - Performs channel calibration
diff --git a/drivers/iio/adc/aspeed_adc.c b/drivers/iio/adc/aspeed_adc.c
index d3fc39df535d..1e5375235cfe 100644
--- a/drivers/iio/adc/aspeed_adc.c
+++ b/drivers/iio/adc/aspeed_adc.c
@@ -173,7 +173,6 @@ static int aspeed_adc_probe(struct platform_device *pdev)
struct iio_dev *indio_dev;
struct aspeed_adc_data *data;
const struct aspeed_adc_model_data *model_data;
- struct resource *res;
const char *clk_parent_name;
int ret;
u32 adc_engine_control_reg_val;
@@ -185,8 +184,7 @@ static int aspeed_adc_probe(struct platform_device *pdev)
data = iio_priv(indio_dev);
data->dev = &pdev->dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- data->base = devm_ioremap_resource(&pdev->dev, res);
+ data->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(data->base))
return PTR_ERR(data->base);
diff --git a/drivers/iio/adc/at91-sama5d2_adc.c b/drivers/iio/adc/at91-sama5d2_adc.c
index a2837a0e7cba..e1850f3d5cf3 100644
--- a/drivers/iio/adc/at91-sama5d2_adc.c
+++ b/drivers/iio/adc/at91-sama5d2_adc.c
@@ -1483,7 +1483,7 @@ dma_free_area:
st->dma_st.rx_buf, st->dma_st.rx_dma_buf);
dma_chan_disable:
dma_release_channel(st->dma_st.dma_chan);
- st->dma_st.dma_chan = 0;
+ st->dma_st.dma_chan = NULL;
dma_exit:
dev_info(&pdev->dev, "continuing without DMA support\n");
}
@@ -1506,7 +1506,7 @@ static void at91_adc_dma_disable(struct platform_device *pdev)
dma_free_coherent(st->dma_st.dma_chan->device->dev, pages * PAGE_SIZE,
st->dma_st.rx_buf, st->dma_st.rx_dma_buf);
dma_release_channel(st->dma_st.dma_chan);
- st->dma_st.dma_chan = 0;
+ st->dma_st.dma_chan = NULL;
dev_info(&pdev->dev, "continuing without DMA support\n");
}
diff --git a/drivers/iio/adc/bcm_iproc_adc.c b/drivers/iio/adc/bcm_iproc_adc.c
index 646ebdc0a8b4..5e396104ac86 100644
--- a/drivers/iio/adc/bcm_iproc_adc.c
+++ b/drivers/iio/adc/bcm_iproc_adc.c
@@ -308,7 +308,7 @@ static int iproc_adc_do_read(struct iio_dev *indio_dev,
"IntMask set failed. Read will likely fail.");
read_len = -EIO;
goto adc_err;
- };
+ }
}
regmap_read(adc_priv->regmap, IPROC_INTERRUPT_MASK, &val_check);
diff --git a/drivers/iio/adc/cc10001_adc.c b/drivers/iio/adc/cc10001_adc.c
index f93f1d93b80d..fe9257624f16 100644
--- a/drivers/iio/adc/cc10001_adc.c
+++ b/drivers/iio/adc/cc10001_adc.c
@@ -310,7 +310,6 @@ static int cc10001_adc_probe(struct platform_device *pdev)
struct device_node *node = pdev->dev.of_node;
struct cc10001_adc_device *adc_dev;
unsigned long adc_clk_rate;
- struct resource *res;
struct iio_dev *indio_dev;
unsigned long channel_map;
int ret;
@@ -340,8 +339,7 @@ static int cc10001_adc_probe(struct platform_device *pdev)
indio_dev->info = &cc10001_adc_info;
indio_dev->modes = INDIO_DIRECT_MODE;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- adc_dev->reg_base = devm_ioremap_resource(&pdev->dev, res);
+ adc_dev->reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(adc_dev->reg_base)) {
ret = PTR_ERR(adc_dev->reg_base);
goto err_disable_reg;
diff --git a/drivers/iio/adc/cpcap-adc.c b/drivers/iio/adc/cpcap-adc.c
index 2d616cafe75f..5086a337f4c9 100644
--- a/drivers/iio/adc/cpcap-adc.c
+++ b/drivers/iio/adc/cpcap-adc.c
@@ -1008,7 +1008,7 @@ static int cpcap_adc_probe(struct platform_device *pdev)
error = devm_request_threaded_irq(&pdev->dev, ddata->irq, NULL,
cpcap_adc_irq_thread,
- IRQF_TRIGGER_NONE,
+ IRQF_TRIGGER_NONE | IRQF_ONESHOT,
"cpcap-adc", indio_dev);
if (error) {
dev_err(&pdev->dev, "could not get irq: %i\n",
diff --git a/drivers/iio/adc/dln2-adc.c b/drivers/iio/adc/dln2-adc.c
index 5fa78c273a25..65c7c9329b1c 100644
--- a/drivers/iio/adc/dln2-adc.c
+++ b/drivers/iio/adc/dln2-adc.c
@@ -524,6 +524,10 @@ static int dln2_adc_triggered_buffer_postenable(struct iio_dev *indio_dev)
u16 conflict;
unsigned int trigger_chan;
+ ret = iio_triggered_buffer_postenable(indio_dev);
+ if (ret)
+ return ret;
+
mutex_lock(&dln2->mutex);
/* Enable ADC */
@@ -537,6 +541,7 @@ static int dln2_adc_triggered_buffer_postenable(struct iio_dev *indio_dev)
(int)conflict);
ret = -EBUSY;
}
+ iio_triggered_buffer_predisable(indio_dev);
return ret;
}
@@ -550,6 +555,7 @@ static int dln2_adc_triggered_buffer_postenable(struct iio_dev *indio_dev)
mutex_unlock(&dln2->mutex);
if (ret < 0) {
dev_dbg(&dln2->pdev->dev, "Problem in %s\n", __func__);
+ iio_triggered_buffer_predisable(indio_dev);
return ret;
}
} else {
@@ -557,12 +563,12 @@ static int dln2_adc_triggered_buffer_postenable(struct iio_dev *indio_dev)
mutex_unlock(&dln2->mutex);
}
- return iio_triggered_buffer_postenable(indio_dev);
+ return 0;
}
static int dln2_adc_triggered_buffer_predisable(struct iio_dev *indio_dev)
{
- int ret;
+ int ret, ret2;
struct dln2_adc *dln2 = iio_priv(indio_dev);
mutex_lock(&dln2->mutex);
@@ -577,12 +583,14 @@ static int dln2_adc_triggered_buffer_predisable(struct iio_dev *indio_dev)
ret = dln2_adc_set_port_enabled(dln2, false, NULL);
mutex_unlock(&dln2->mutex);
- if (ret < 0) {
+ if (ret < 0)
dev_dbg(&dln2->pdev->dev, "Problem in %s\n", __func__);
- return ret;
- }
- return iio_triggered_buffer_predisable(indio_dev);
+ ret2 = iio_triggered_buffer_predisable(indio_dev);
+ if (ret == 0)
+ ret = ret2;
+
+ return ret;
}
static const struct iio_buffer_setup_ops dln2_adc_buffer_setup_ops = {
diff --git a/drivers/iio/adc/exynos_adc.c b/drivers/iio/adc/exynos_adc.c
index 42a3ced11fbd..2df7d057b249 100644
--- a/drivers/iio/adc/exynos_adc.c
+++ b/drivers/iio/adc/exynos_adc.c
@@ -651,7 +651,7 @@ static irqreturn_t exynos_ts_isr(int irq, void *dev_id)
input_sync(info->input);
usleep_range(1000, 1100);
- };
+ }
writel(0, ADC_V1_CLRINTPNDNUP(info->regs));
@@ -769,7 +769,6 @@ static int exynos_adc_probe(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
struct s3c2410_ts_mach_info *pdata = dev_get_platdata(&pdev->dev);
struct iio_dev *indio_dev = NULL;
- struct resource *mem;
bool has_ts = false;
int ret = -ENODEV;
int irq;
@@ -788,8 +787,7 @@ static int exynos_adc_probe(struct platform_device *pdev)
return -EINVAL;
}
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- info->regs = devm_ioremap_resource(&pdev->dev, mem);
+ info->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(info->regs))
return PTR_ERR(info->regs);
diff --git a/drivers/iio/adc/hx711.c b/drivers/iio/adc/hx711.c
index 62e6c8badd22..c8686558429b 100644
--- a/drivers/iio/adc/hx711.c
+++ b/drivers/iio/adc/hx711.c
@@ -23,6 +23,7 @@
/* gain to pulse and scale conversion */
#define HX711_GAIN_MAX 3
+#define HX711_RESET_GAIN 128
struct hx711_gain_to_scale {
int gain;
@@ -185,8 +186,7 @@ static int hx711_wait_for_ready(struct hx711_data *hx711_data)
static int hx711_reset(struct hx711_data *hx711_data)
{
- int ret;
- int val = gpiod_get_value(hx711_data->gpiod_dout);
+ int val = hx711_wait_for_ready(hx711_data);
if (val) {
/*
@@ -202,22 +202,10 @@ static int hx711_reset(struct hx711_data *hx711_data)
msleep(10);
gpiod_set_value(hx711_data->gpiod_pd_sck, 0);
- ret = hx711_wait_for_ready(hx711_data);
- if (ret)
- return ret;
- /*
- * after a reset the gain is 128 so we do a dummy read
- * to set the gain for the next read
- */
- ret = hx711_read(hx711_data);
- if (ret < 0)
- return ret;
-
- /*
- * after a dummy read we need to wait vor readiness
- * for not mixing gain pulses with the clock
- */
val = hx711_wait_for_ready(hx711_data);
+
+ /* after a reset the gain is 128 */
+ hx711_data->gain_set = HX711_RESET_GAIN;
}
return val;
diff --git a/drivers/iio/adc/ingenic-adc.c b/drivers/iio/adc/ingenic-adc.c
index e234970b7150..39c0a609fc94 100644
--- a/drivers/iio/adc/ingenic-adc.c
+++ b/drivers/iio/adc/ingenic-adc.c
@@ -25,9 +25,13 @@
#define JZ_ADC_REG_ADSDAT 0x20
#define JZ_ADC_REG_ADCLK 0x28
+#define JZ_ADC_REG_ENABLE_PD BIT(7)
+#define JZ_ADC_REG_CFG_AUX_MD (BIT(0) | BIT(1))
#define JZ_ADC_REG_CFG_BAT_MD BIT(4)
#define JZ_ADC_REG_ADCLK_CLKDIV_LSB 0
-#define JZ_ADC_REG_ADCLK_CLKDIV10US_LSB 16
+#define JZ4725B_ADC_REG_ADCLK_CLKDIV10US_LSB 16
+#define JZ4770_ADC_REG_ADCLK_CLKDIV10US_LSB 8
+#define JZ4770_ADC_REG_ADCLK_CLKDIVMS_LSB 16
#define JZ_ADC_AUX_VREF 3300
#define JZ_ADC_AUX_VREF_BITS 12
@@ -37,6 +41,8 @@
#define JZ4725B_ADC_BATTERY_HIGH_VREF_BITS 10
#define JZ4740_ADC_BATTERY_HIGH_VREF (7500 * 0.986)
#define JZ4740_ADC_BATTERY_HIGH_VREF_BITS 12
+#define JZ4770_ADC_BATTERY_VREF 6600
+#define JZ4770_ADC_BATTERY_VREF_BITS 12
struct ingenic_adc;
@@ -47,6 +53,8 @@ struct ingenic_adc_soc_data {
size_t battery_raw_avail_size;
const int *battery_scale_avail;
size_t battery_scale_avail_size;
+ unsigned int battery_vref_mode: 1;
+ unsigned int has_aux2: 1;
int (*init_clk_div)(struct device *dev, struct ingenic_adc *adc);
};
@@ -54,6 +62,7 @@ struct ingenic_adc {
void __iomem *base;
struct clk *clk;
struct mutex lock;
+ struct mutex aux_lock;
const struct ingenic_adc_soc_data *soc_data;
bool low_vref_mode;
};
@@ -120,6 +129,8 @@ static int ingenic_adc_write_raw(struct iio_dev *iio_dev,
case IIO_CHAN_INFO_SCALE:
switch (chan->channel) {
case INGENIC_ADC_BATTERY:
+ if (!adc->soc_data->battery_vref_mode)
+ return -EINVAL;
if (val > JZ_ADC_BATTERY_LOW_VREF) {
ingenic_adc_set_config(adc,
JZ_ADC_REG_CFG_BAT_MD,
@@ -158,6 +169,14 @@ static const int jz4740_adc_battery_scale_avail[] = {
JZ_ADC_BATTERY_LOW_VREF, JZ_ADC_BATTERY_LOW_VREF_BITS,
};
+static const int jz4770_adc_battery_raw_avail[] = {
+ 0, 1, (1 << JZ4770_ADC_BATTERY_VREF_BITS) - 1,
+};
+
+static const int jz4770_adc_battery_scale_avail[] = {
+ JZ4770_ADC_BATTERY_VREF, JZ4770_ADC_BATTERY_VREF_BITS,
+};
+
static int jz4725b_adc_init_clk_div(struct device *dev, struct ingenic_adc *adc)
{
struct clk *parent_clk;
@@ -187,7 +206,45 @@ static int jz4725b_adc_init_clk_div(struct device *dev, struct ingenic_adc *adc)
/* We also need a divider that produces a 10us clock. */
div_10us = DIV_ROUND_UP(rate, 100000);
- writel(((div_10us - 1) << JZ_ADC_REG_ADCLK_CLKDIV10US_LSB) |
+ writel(((div_10us - 1) << JZ4725B_ADC_REG_ADCLK_CLKDIV10US_LSB) |
+ (div_main - 1) << JZ_ADC_REG_ADCLK_CLKDIV_LSB,
+ adc->base + JZ_ADC_REG_ADCLK);
+
+ return 0;
+}
+
+static int jz4770_adc_init_clk_div(struct device *dev, struct ingenic_adc *adc)
+{
+ struct clk *parent_clk;
+ unsigned long parent_rate, rate;
+ unsigned int div_main, div_ms, div_10us;
+
+ parent_clk = clk_get_parent(adc->clk);
+ if (!parent_clk) {
+ dev_err(dev, "ADC clock has no parent\n");
+ return -ENODEV;
+ }
+ parent_rate = clk_get_rate(parent_clk);
+
+ /*
+ * The JZ4770 ADC works at 20 kHz to 200 kHz.
+ * We pick the highest rate possible.
+ */
+ div_main = DIV_ROUND_UP(parent_rate, 200000);
+ div_main = clamp(div_main, 1u, 256u);
+ rate = parent_rate / div_main;
+ if (rate < 20000 || rate > 200000) {
+ dev_err(dev, "No valid divider for ADC main clock\n");
+ return -EINVAL;
+ }
+
+ /* We also need a divider that produces a 10us clock. */
+ div_10us = DIV_ROUND_UP(rate, 10000);
+ /* And another, which produces a 1ms clock. */
+ div_ms = DIV_ROUND_UP(rate, 1000);
+
+ writel(((div_ms - 1) << JZ4770_ADC_REG_ADCLK_CLKDIVMS_LSB) |
+ ((div_10us - 1) << JZ4770_ADC_REG_ADCLK_CLKDIV10US_LSB) |
(div_main - 1) << JZ_ADC_REG_ADCLK_CLKDIV_LSB,
adc->base + JZ_ADC_REG_ADCLK);
@@ -201,6 +258,8 @@ static const struct ingenic_adc_soc_data jz4725b_adc_soc_data = {
.battery_raw_avail_size = ARRAY_SIZE(jz4725b_adc_battery_raw_avail),
.battery_scale_avail = jz4725b_adc_battery_scale_avail,
.battery_scale_avail_size = ARRAY_SIZE(jz4725b_adc_battery_scale_avail),
+ .battery_vref_mode = true,
+ .has_aux2 = false,
.init_clk_div = jz4725b_adc_init_clk_div,
};
@@ -211,9 +270,23 @@ static const struct ingenic_adc_soc_data jz4740_adc_soc_data = {
.battery_raw_avail_size = ARRAY_SIZE(jz4740_adc_battery_raw_avail),
.battery_scale_avail = jz4740_adc_battery_scale_avail,
.battery_scale_avail_size = ARRAY_SIZE(jz4740_adc_battery_scale_avail),
+ .battery_vref_mode = true,
+ .has_aux2 = false,
.init_clk_div = NULL, /* no ADCLK register on JZ4740 */
};
+static const struct ingenic_adc_soc_data jz4770_adc_soc_data = {
+ .battery_high_vref = JZ4770_ADC_BATTERY_VREF,
+ .battery_high_vref_bits = JZ4770_ADC_BATTERY_VREF_BITS,
+ .battery_raw_avail = jz4770_adc_battery_raw_avail,
+ .battery_raw_avail_size = ARRAY_SIZE(jz4770_adc_battery_raw_avail),
+ .battery_scale_avail = jz4770_adc_battery_scale_avail,
+ .battery_scale_avail_size = ARRAY_SIZE(jz4770_adc_battery_scale_avail),
+ .battery_vref_mode = false,
+ .has_aux2 = true,
+ .init_clk_div = jz4770_adc_init_clk_div,
+};
+
static int ingenic_adc_read_avail(struct iio_dev *iio_dev,
struct iio_chan_spec const *chan,
const int **vals,
@@ -239,6 +312,42 @@ static int ingenic_adc_read_avail(struct iio_dev *iio_dev,
};
}
+static int ingenic_adc_read_chan_info_raw(struct ingenic_adc *adc,
+ struct iio_chan_spec const *chan,
+ int *val)
+{
+ int bit, ret, engine = (chan->channel == INGENIC_ADC_BATTERY);
+
+ /* We cannot sample AUX/AUX2 in parallel. */
+ mutex_lock(&adc->aux_lock);
+ if (adc->soc_data->has_aux2 && engine == 0) {
+ bit = BIT(chan->channel == INGENIC_ADC_AUX2);
+ ingenic_adc_set_config(adc, JZ_ADC_REG_CFG_AUX_MD, bit);
+ }
+
+ clk_enable(adc->clk);
+ ret = ingenic_adc_capture(adc, engine);
+ if (ret)
+ goto out;
+
+ switch (chan->channel) {
+ case INGENIC_ADC_AUX:
+ case INGENIC_ADC_AUX2:
+ *val = readw(adc->base + JZ_ADC_REG_ADSDAT);
+ break;
+ case INGENIC_ADC_BATTERY:
+ *val = readw(adc->base + JZ_ADC_REG_ADBDAT);
+ break;
+ }
+
+ ret = IIO_VAL_INT;
+out:
+ clk_disable(adc->clk);
+ mutex_unlock(&adc->aux_lock);
+
+ return ret;
+}
+
static int ingenic_adc_read_raw(struct iio_dev *iio_dev,
struct iio_chan_spec const *chan,
int *val,
@@ -246,32 +355,14 @@ static int ingenic_adc_read_raw(struct iio_dev *iio_dev,
long m)
{
struct ingenic_adc *adc = iio_priv(iio_dev);
- int ret;
switch (m) {
case IIO_CHAN_INFO_RAW:
- clk_enable(adc->clk);
- ret = ingenic_adc_capture(adc, chan->channel);
- if (ret) {
- clk_disable(adc->clk);
- return ret;
- }
-
- switch (chan->channel) {
- case INGENIC_ADC_AUX:
- *val = readw(adc->base + JZ_ADC_REG_ADSDAT);
- break;
- case INGENIC_ADC_BATTERY:
- *val = readw(adc->base + JZ_ADC_REG_ADBDAT);
- break;
- }
-
- clk_disable(adc->clk);
-
- return IIO_VAL_INT;
+ return ingenic_adc_read_chan_info_raw(adc, chan, val);
case IIO_CHAN_INFO_SCALE:
switch (chan->channel) {
case INGENIC_ADC_AUX:
+ case INGENIC_ADC_AUX2:
*val = JZ_ADC_AUX_VREF;
*val2 = JZ_ADC_AUX_VREF_BITS;
break;
@@ -322,6 +413,14 @@ static const struct iio_chan_spec ingenic_channels[] = {
.indexed = 1,
.channel = INGENIC_ADC_BATTERY,
},
+ { /* Must always be last in the array. */
+ .extend_name = "aux2",
+ .type = IIO_VOLTAGE,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .indexed = 1,
+ .channel = INGENIC_ADC_AUX2,
+ },
};
static int ingenic_adc_probe(struct platform_device *pdev)
@@ -329,7 +428,6 @@ static int ingenic_adc_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct iio_dev *iio_dev;
struct ingenic_adc *adc;
- struct resource *mem_base;
const struct ingenic_adc_soc_data *soc_data;
int ret;
@@ -343,10 +441,10 @@ static int ingenic_adc_probe(struct platform_device *pdev)
adc = iio_priv(iio_dev);
mutex_init(&adc->lock);
+ mutex_init(&adc->aux_lock);
adc->soc_data = soc_data;
- mem_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- adc->base = devm_ioremap_resource(dev, mem_base);
+ adc->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(adc->base))
return PTR_ERR(adc->base);
@@ -374,6 +472,7 @@ static int ingenic_adc_probe(struct platform_device *pdev)
/* Put hardware in a known passive state. */
writeb(0x00, adc->base + JZ_ADC_REG_ENABLE);
writeb(0xff, adc->base + JZ_ADC_REG_CTRL);
+ usleep_range(2000, 3000); /* Must wait at least 2ms. */
clk_disable(adc->clk);
ret = devm_add_action_or_reset(dev, ingenic_adc_clk_cleanup, adc->clk);
@@ -387,6 +486,9 @@ static int ingenic_adc_probe(struct platform_device *pdev)
iio_dev->modes = INDIO_DIRECT_MODE;
iio_dev->channels = ingenic_channels;
iio_dev->num_channels = ARRAY_SIZE(ingenic_channels);
+ /* Remove AUX2 from the list of supported channels. */
+ if (!adc->soc_data->has_aux2)
+ iio_dev->num_channels -= 1;
iio_dev->info = &ingenic_adc_info;
ret = devm_iio_device_register(dev, iio_dev);
@@ -400,6 +502,7 @@ static int ingenic_adc_probe(struct platform_device *pdev)
static const struct of_device_id ingenic_adc_of_match[] = {
{ .compatible = "ingenic,jz4725b-adc", .data = &jz4725b_adc_soc_data, },
{ .compatible = "ingenic,jz4740-adc", .data = &jz4740_adc_soc_data, },
+ { .compatible = "ingenic,jz4770-adc", .data = &jz4770_adc_soc_data, },
{ },
};
MODULE_DEVICE_TABLE(of, ingenic_adc_of_match);
diff --git a/drivers/iio/adc/intel_mrfld_adc.c b/drivers/iio/adc/intel_mrfld_adc.c
new file mode 100644
index 000000000000..67d096f8180d
--- /dev/null
+++ b/drivers/iio/adc/intel_mrfld_adc.c
@@ -0,0 +1,262 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ADC driver for Basin Cove PMIC
+ *
+ * Copyright (C) 2012 Intel Corporation
+ * Author: Bin Yang <bin.yang@intel.com>
+ *
+ * Rewritten for upstream by:
+ * Vincent Pelletier <plr.vincent@gmail.com>
+ * Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ */
+
+#include <linux/bitops.h>
+#include <linux/completion.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/intel_soc_pmic.h>
+#include <linux/mfd/intel_soc_pmic_mrfld.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <linux/iio/driver.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/machine.h>
+
+#include <asm/unaligned.h>
+
+#define BCOVE_GPADCREQ 0xDC
+#define BCOVE_GPADCREQ_BUSY BIT(0)
+#define BCOVE_GPADCREQ_IRQEN BIT(1)
+
+#define BCOVE_ADCIRQ_ALL ( \
+ BCOVE_ADCIRQ_BATTEMP | \
+ BCOVE_ADCIRQ_SYSTEMP | \
+ BCOVE_ADCIRQ_BATTID | \
+ BCOVE_ADCIRQ_VIBATT | \
+ BCOVE_ADCIRQ_CCTICK)
+
+#define BCOVE_ADC_TIMEOUT msecs_to_jiffies(1000)
+
+static const u8 mrfld_adc_requests[] = {
+ BCOVE_ADCIRQ_VIBATT,
+ BCOVE_ADCIRQ_BATTID,
+ BCOVE_ADCIRQ_VIBATT,
+ BCOVE_ADCIRQ_SYSTEMP,
+ BCOVE_ADCIRQ_BATTEMP,
+ BCOVE_ADCIRQ_BATTEMP,
+ BCOVE_ADCIRQ_SYSTEMP,
+ BCOVE_ADCIRQ_SYSTEMP,
+ BCOVE_ADCIRQ_SYSTEMP,
+};
+
+struct mrfld_adc {
+ struct regmap *regmap;
+ struct completion completion;
+ /* Lock to protect the IPC transfers */
+ struct mutex lock;
+};
+
+static irqreturn_t mrfld_adc_thread_isr(int irq, void *data)
+{
+ struct iio_dev *indio_dev = data;
+ struct mrfld_adc *adc = iio_priv(indio_dev);
+
+ complete(&adc->completion);
+ return IRQ_HANDLED;
+}
+
+static int mrfld_adc_single_conv(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *result)
+{
+ struct mrfld_adc *adc = iio_priv(indio_dev);
+ struct regmap *regmap = adc->regmap;
+ unsigned int req;
+ long timeout;
+ u8 buf[2];
+ int ret;
+
+ reinit_completion(&adc->completion);
+
+ regmap_update_bits(regmap, BCOVE_MADCIRQ, BCOVE_ADCIRQ_ALL, 0);
+ regmap_update_bits(regmap, BCOVE_MIRQLVL1, BCOVE_LVL1_ADC, 0);
+
+ ret = regmap_read_poll_timeout(regmap, BCOVE_GPADCREQ, req,
+ !(req & BCOVE_GPADCREQ_BUSY),
+ 2000, 1000000);
+ if (ret)
+ goto done;
+
+ req = mrfld_adc_requests[chan->channel];
+ ret = regmap_write(regmap, BCOVE_GPADCREQ, BCOVE_GPADCREQ_IRQEN | req);
+ if (ret)
+ goto done;
+
+ timeout = wait_for_completion_interruptible_timeout(&adc->completion,
+ BCOVE_ADC_TIMEOUT);
+ if (timeout < 0) {
+ ret = timeout;
+ goto done;
+ }
+ if (timeout == 0) {
+ ret = -ETIMEDOUT;
+ goto done;
+ }
+
+ ret = regmap_bulk_read(regmap, chan->address, buf, 2);
+ if (ret)
+ goto done;
+
+ *result = get_unaligned_be16(buf);
+ ret = IIO_VAL_INT;
+
+done:
+ regmap_update_bits(regmap, BCOVE_MIRQLVL1, BCOVE_LVL1_ADC, 0xff);
+ regmap_update_bits(regmap, BCOVE_MADCIRQ, BCOVE_ADCIRQ_ALL, 0xff);
+
+ return ret;
+}
+
+static int mrfld_adc_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct mrfld_adc *adc = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ mutex_lock(&adc->lock);
+ ret = mrfld_adc_single_conv(indio_dev, chan, val);
+ mutex_unlock(&adc->lock);
+ return ret;
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_info mrfld_adc_iio_info = {
+ .read_raw = &mrfld_adc_read_raw,
+};
+
+#define BCOVE_ADC_CHANNEL(_type, _channel, _datasheet_name, _address) \
+ { \
+ .indexed = 1, \
+ .type = _type, \
+ .channel = _channel, \
+ .address = _address, \
+ .datasheet_name = _datasheet_name, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ }
+
+static const struct iio_chan_spec mrfld_adc_channels[] = {
+ BCOVE_ADC_CHANNEL(IIO_VOLTAGE, 0, "CH0", 0xE9),
+ BCOVE_ADC_CHANNEL(IIO_RESISTANCE, 1, "CH1", 0xEB),
+ BCOVE_ADC_CHANNEL(IIO_CURRENT, 2, "CH2", 0xED),
+ BCOVE_ADC_CHANNEL(IIO_TEMP, 3, "CH3", 0xCC),
+ BCOVE_ADC_CHANNEL(IIO_TEMP, 4, "CH4", 0xC8),
+ BCOVE_ADC_CHANNEL(IIO_TEMP, 5, "CH5", 0xCA),
+ BCOVE_ADC_CHANNEL(IIO_TEMP, 6, "CH6", 0xC2),
+ BCOVE_ADC_CHANNEL(IIO_TEMP, 7, "CH7", 0xC4),
+ BCOVE_ADC_CHANNEL(IIO_TEMP, 8, "CH8", 0xC6),
+};
+
+static struct iio_map iio_maps[] = {
+ IIO_MAP("CH0", "bcove-battery", "VBATRSLT"),
+ IIO_MAP("CH1", "bcove-battery", "BATTID"),
+ IIO_MAP("CH2", "bcove-battery", "IBATRSLT"),
+ IIO_MAP("CH3", "bcove-temp", "PMICTEMP"),
+ IIO_MAP("CH4", "bcove-temp", "BATTEMP0"),
+ IIO_MAP("CH5", "bcove-temp", "BATTEMP1"),
+ IIO_MAP("CH6", "bcove-temp", "SYSTEMP0"),
+ IIO_MAP("CH7", "bcove-temp", "SYSTEMP1"),
+ IIO_MAP("CH8", "bcove-temp", "SYSTEMP2"),
+ {}
+};
+
+static int mrfld_adc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct intel_soc_pmic *pmic = dev_get_drvdata(dev->parent);
+ struct iio_dev *indio_dev;
+ struct mrfld_adc *adc;
+ int irq;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*indio_dev));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ adc = iio_priv(indio_dev);
+
+ mutex_init(&adc->lock);
+ init_completion(&adc->completion);
+ adc->regmap = pmic->regmap;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ ret = devm_request_threaded_irq(dev, irq, NULL, mrfld_adc_thread_isr,
+ IRQF_ONESHOT | IRQF_SHARED, pdev->name,
+ indio_dev);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, indio_dev);
+
+ indio_dev->dev.parent = dev;
+ indio_dev->name = pdev->name;
+
+ indio_dev->channels = mrfld_adc_channels;
+ indio_dev->num_channels = ARRAY_SIZE(mrfld_adc_channels);
+ indio_dev->info = &mrfld_adc_iio_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = iio_map_array_register(indio_dev, iio_maps);
+ if (ret)
+ return ret;
+
+ ret = devm_iio_device_register(dev, indio_dev);
+ if (ret < 0)
+ goto err_array_unregister;
+
+ return 0;
+
+err_array_unregister:
+ iio_map_array_unregister(indio_dev);
+ return ret;
+}
+
+static int mrfld_adc_remove(struct platform_device *pdev)
+{
+ struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+
+ iio_map_array_unregister(indio_dev);
+
+ return 0;
+}
+
+static const struct platform_device_id mrfld_adc_id_table[] = {
+ { .name = "mrfld_bcove_adc" },
+ {}
+};
+MODULE_DEVICE_TABLE(platform, mrfld_adc_id_table);
+
+static struct platform_driver mrfld_adc_driver = {
+ .driver = {
+ .name = "mrfld_bcove_adc",
+ },
+ .probe = mrfld_adc_probe,
+ .remove = mrfld_adc_remove,
+ .id_table = mrfld_adc_id_table,
+};
+module_platform_driver(mrfld_adc_driver);
+
+MODULE_AUTHOR("Bin Yang <bin.yang@intel.com>");
+MODULE_AUTHOR("Vincent Pelletier <plr.vincent@gmail.com>");
+MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
+MODULE_DESCRIPTION("ADC driver for Basin Cove PMIC");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/lpc18xx_adc.c b/drivers/iio/adc/lpc18xx_adc.c
index e400a95f553d..4c6ac6644dc0 100644
--- a/drivers/iio/adc/lpc18xx_adc.c
+++ b/drivers/iio/adc/lpc18xx_adc.c
@@ -119,7 +119,6 @@ static int lpc18xx_adc_probe(struct platform_device *pdev)
{
struct iio_dev *indio_dev;
struct lpc18xx_adc *adc;
- struct resource *res;
unsigned int clkdiv;
unsigned long rate;
int ret;
@@ -133,8 +132,7 @@ static int lpc18xx_adc_probe(struct platform_device *pdev)
adc->dev = &pdev->dev;
mutex_init(&adc->lock);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- adc->base = devm_ioremap_resource(&pdev->dev, res);
+ adc->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(adc->base))
return PTR_ERR(adc->base);
diff --git a/drivers/iio/adc/max1027.c b/drivers/iio/adc/max1027.c
index 214883458582..e171db20c04a 100644
--- a/drivers/iio/adc/max1027.c
+++ b/drivers/iio/adc/max1027.c
@@ -63,12 +63,18 @@ enum max1027_id {
max1027,
max1029,
max1031,
+ max1227,
+ max1229,
+ max1231,
};
static const struct spi_device_id max1027_id[] = {
{"max1027", max1027},
{"max1029", max1029},
{"max1031", max1031},
+ {"max1227", max1227},
+ {"max1229", max1229},
+ {"max1231", max1231},
{}
};
MODULE_DEVICE_TABLE(spi, max1027_id);
@@ -78,12 +84,15 @@ static const struct of_device_id max1027_adc_dt_ids[] = {
{ .compatible = "maxim,max1027" },
{ .compatible = "maxim,max1029" },
{ .compatible = "maxim,max1031" },
+ { .compatible = "maxim,max1227" },
+ { .compatible = "maxim,max1229" },
+ { .compatible = "maxim,max1231" },
{},
};
MODULE_DEVICE_TABLE(of, max1027_adc_dt_ids);
#endif
-#define MAX1027_V_CHAN(index) \
+#define MAX1027_V_CHAN(index, depth) \
{ \
.type = IIO_VOLTAGE, \
.indexed = 1, \
@@ -93,7 +102,7 @@ MODULE_DEVICE_TABLE(of, max1027_adc_dt_ids);
.scan_index = index + 1, \
.scan_type = { \
.sign = 'u', \
- .realbits = 10, \
+ .realbits = depth, \
.storagebits = 16, \
.shift = 2, \
.endianness = IIO_BE, \
@@ -115,52 +124,54 @@ MODULE_DEVICE_TABLE(of, max1027_adc_dt_ids);
}, \
}
+#define MAX1X27_CHANNELS(depth) \
+ MAX1027_T_CHAN, \
+ MAX1027_V_CHAN(0, depth), \
+ MAX1027_V_CHAN(1, depth), \
+ MAX1027_V_CHAN(2, depth), \
+ MAX1027_V_CHAN(3, depth), \
+ MAX1027_V_CHAN(4, depth), \
+ MAX1027_V_CHAN(5, depth), \
+ MAX1027_V_CHAN(6, depth), \
+ MAX1027_V_CHAN(7, depth)
+
+#define MAX1X29_CHANNELS(depth) \
+ MAX1X27_CHANNELS(depth), \
+ MAX1027_V_CHAN(8, depth), \
+ MAX1027_V_CHAN(9, depth), \
+ MAX1027_V_CHAN(10, depth), \
+ MAX1027_V_CHAN(11, depth)
+
+#define MAX1X31_CHANNELS(depth) \
+ MAX1X27_CHANNELS(depth), \
+ MAX1X29_CHANNELS(depth), \
+ MAX1027_V_CHAN(12, depth), \
+ MAX1027_V_CHAN(13, depth), \
+ MAX1027_V_CHAN(14, depth), \
+ MAX1027_V_CHAN(15, depth)
+
static const struct iio_chan_spec max1027_channels[] = {
- MAX1027_T_CHAN,
- MAX1027_V_CHAN(0),
- MAX1027_V_CHAN(1),
- MAX1027_V_CHAN(2),
- MAX1027_V_CHAN(3),
- MAX1027_V_CHAN(4),
- MAX1027_V_CHAN(5),
- MAX1027_V_CHAN(6),
- MAX1027_V_CHAN(7)
+ MAX1X27_CHANNELS(10),
};
static const struct iio_chan_spec max1029_channels[] = {
- MAX1027_T_CHAN,
- MAX1027_V_CHAN(0),
- MAX1027_V_CHAN(1),
- MAX1027_V_CHAN(2),
- MAX1027_V_CHAN(3),
- MAX1027_V_CHAN(4),
- MAX1027_V_CHAN(5),
- MAX1027_V_CHAN(6),
- MAX1027_V_CHAN(7),
- MAX1027_V_CHAN(8),
- MAX1027_V_CHAN(9),
- MAX1027_V_CHAN(10),
- MAX1027_V_CHAN(11)
+ MAX1X29_CHANNELS(10),
};
static const struct iio_chan_spec max1031_channels[] = {
- MAX1027_T_CHAN,
- MAX1027_V_CHAN(0),
- MAX1027_V_CHAN(1),
- MAX1027_V_CHAN(2),
- MAX1027_V_CHAN(3),
- MAX1027_V_CHAN(4),
- MAX1027_V_CHAN(5),
- MAX1027_V_CHAN(6),
- MAX1027_V_CHAN(7),
- MAX1027_V_CHAN(8),
- MAX1027_V_CHAN(9),
- MAX1027_V_CHAN(10),
- MAX1027_V_CHAN(11),
- MAX1027_V_CHAN(12),
- MAX1027_V_CHAN(13),
- MAX1027_V_CHAN(14),
- MAX1027_V_CHAN(15)
+ MAX1X31_CHANNELS(10),
+};
+
+static const struct iio_chan_spec max1227_channels[] = {
+ MAX1X27_CHANNELS(12),
+};
+
+static const struct iio_chan_spec max1229_channels[] = {
+ MAX1X29_CHANNELS(12),
+};
+
+static const struct iio_chan_spec max1231_channels[] = {
+ MAX1X31_CHANNELS(12),
};
static const unsigned long max1027_available_scan_masks[] = {
@@ -200,6 +211,21 @@ static const struct max1027_chip_info max1027_chip_info_tbl[] = {
.num_channels = ARRAY_SIZE(max1031_channels),
.available_scan_masks = max1031_available_scan_masks,
},
+ [max1227] = {
+ .channels = max1227_channels,
+ .num_channels = ARRAY_SIZE(max1227_channels),
+ .available_scan_masks = max1027_available_scan_masks,
+ },
+ [max1229] = {
+ .channels = max1229_channels,
+ .num_channels = ARRAY_SIZE(max1229_channels),
+ .available_scan_masks = max1029_available_scan_masks,
+ },
+ [max1231] = {
+ .channels = max1231_channels,
+ .num_channels = ARRAY_SIZE(max1231_channels),
+ .available_scan_masks = max1031_available_scan_masks,
+ },
};
struct max1027_state {
@@ -284,7 +310,7 @@ static int max1027_read_raw(struct iio_dev *indio_dev,
break;
case IIO_VOLTAGE:
*val = 2500;
- *val2 = 10;
+ *val2 = chan->scan_type.realbits;
ret = IIO_VAL_FRACTIONAL_LOG2;
break;
default:
@@ -309,8 +335,11 @@ static int max1027_debugfs_reg_access(struct iio_dev *indio_dev,
struct max1027_state *st = iio_priv(indio_dev);
u8 *val = (u8 *)st->buffer;
- if (readval != NULL)
- return -EINVAL;
+ if (readval) {
+ int ret = spi_read(st->spi, val, 2);
+ *readval = be16_to_cpu(st->buffer[0]);
+ return ret;
+ }
*val = (u8)writeval;
return spi_write(st->spi, val, 1);
@@ -427,34 +456,47 @@ static int max1027_probe(struct spi_device *spi)
return -ENOMEM;
}
- ret = devm_iio_triggered_buffer_setup(&spi->dev, indio_dev,
- &iio_pollfunc_store_time,
- &max1027_trigger_handler, NULL);
- if (ret < 0) {
- dev_err(&indio_dev->dev, "Failed to setup buffer\n");
- return ret;
- }
+ if (spi->irq) {
+ ret = devm_iio_triggered_buffer_setup(&spi->dev, indio_dev,
+ &iio_pollfunc_store_time,
+ &max1027_trigger_handler,
+ NULL);
+ if (ret < 0) {
+ dev_err(&indio_dev->dev, "Failed to setup buffer\n");
+ return ret;
+ }
- st->trig = devm_iio_trigger_alloc(&spi->dev, "%s-trigger",
- indio_dev->name);
- if (st->trig == NULL) {
- ret = -ENOMEM;
- dev_err(&indio_dev->dev, "Failed to allocate iio trigger\n");
- return ret;
- }
+ st->trig = devm_iio_trigger_alloc(&spi->dev, "%s-trigger",
+ indio_dev->name);
+ if (st->trig == NULL) {
+ ret = -ENOMEM;
+ dev_err(&indio_dev->dev,
+ "Failed to allocate iio trigger\n");
+ return ret;
+ }
- st->trig->ops = &max1027_trigger_ops;
- st->trig->dev.parent = &spi->dev;
- iio_trigger_set_drvdata(st->trig, indio_dev);
- iio_trigger_register(st->trig);
+ st->trig->ops = &max1027_trigger_ops;
+ st->trig->dev.parent = &spi->dev;
+ iio_trigger_set_drvdata(st->trig, indio_dev);
+ iio_trigger_register(st->trig);
+
+ ret = devm_request_threaded_irq(&spi->dev, spi->irq,
+ iio_trigger_generic_data_rdy_poll,
+ NULL,
+ IRQF_TRIGGER_FALLING,
+ spi->dev.driver->name,
+ st->trig);
+ if (ret < 0) {
+ dev_err(&indio_dev->dev, "Failed to allocate IRQ.\n");
+ return ret;
+ }
+ }
- ret = devm_request_threaded_irq(&spi->dev, spi->irq,
- iio_trigger_generic_data_rdy_poll,
- NULL,
- IRQF_TRIGGER_FALLING,
- spi->dev.driver->name, st->trig);
+ /* Internal reset */
+ st->reg = MAX1027_RST_REG;
+ ret = spi_write(st->spi, &st->reg, 1);
if (ret < 0) {
- dev_err(&indio_dev->dev, "Failed to allocate IRQ.\n");
+ dev_err(&indio_dev->dev, "Failed to reset the ADC\n");
return ret;
}
@@ -480,5 +522,5 @@ static struct spi_driver max1027_driver = {
module_spi_driver(max1027_driver);
MODULE_AUTHOR("Philippe Reynes <tremyfr@yahoo.fr>");
-MODULE_DESCRIPTION("MAX1027/MAX1029/MAX1031 ADC");
+MODULE_DESCRIPTION("MAX1X27/MAX1X29/MAX1X31 ADC");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/mcp320x.c b/drivers/iio/adc/mcp320x.c
index 38bf10085696..465c7625a55a 100644
--- a/drivers/iio/adc/mcp320x.c
+++ b/drivers/iio/adc/mcp320x.c
@@ -164,7 +164,7 @@ static int mcp320x_adc_conversion(struct mcp320x *adc, u8 channel,
case mcp3550_60:
case mcp3551:
case mcp3553: {
- u32 raw = be32_to_cpup((u32 *)adc->rx_buf);
+ u32 raw = be32_to_cpup((__be32 *)adc->rx_buf);
if (!(adc->spi->mode & SPI_CPOL))
raw <<= 1; /* strip Data Ready bit in SPI mode 0,0 */
diff --git a/drivers/iio/adc/men_z188_adc.c b/drivers/iio/adc/men_z188_adc.c
index 3b2fbb7ce431..196c8226381e 100644
--- a/drivers/iio/adc/men_z188_adc.c
+++ b/drivers/iio/adc/men_z188_adc.c
@@ -167,3 +167,4 @@ MODULE_AUTHOR("Johannes Thumshirn <johannes.thumshirn@men.de>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("IIO ADC driver for MEN 16z188 ADC Core");
MODULE_ALIAS("mcb:16z188");
+MODULE_IMPORT_NS(MCB);
diff --git a/drivers/iio/adc/meson_saradc.c b/drivers/iio/adc/meson_saradc.c
index 7b27306330a3..22a470db9ef8 100644
--- a/drivers/iio/adc/meson_saradc.c
+++ b/drivers/iio/adc/meson_saradc.c
@@ -1187,7 +1187,6 @@ static int meson_sar_adc_probe(struct platform_device *pdev)
const struct meson_sar_adc_data *match_data;
struct meson_sar_adc_priv *priv;
struct iio_dev *indio_dev;
- struct resource *res;
void __iomem *base;
int irq, ret;
@@ -1214,8 +1213,7 @@ static int meson_sar_adc_probe(struct platform_device *pdev)
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->info = &meson_sar_adc_iio_info;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/drivers/iio/adc/mt6577_auxadc.c b/drivers/iio/adc/mt6577_auxadc.c
index 7bbb64ca3b32..a4776d924f3a 100644
--- a/drivers/iio/adc/mt6577_auxadc.c
+++ b/drivers/iio/adc/mt6577_auxadc.c
@@ -237,7 +237,6 @@ static int mt6577_auxadc_probe(struct platform_device *pdev)
{
struct mt6577_auxadc_device *adc_dev;
unsigned long adc_clk_rate;
- struct resource *res;
struct iio_dev *indio_dev;
int ret;
@@ -253,8 +252,7 @@ static int mt6577_auxadc_probe(struct platform_device *pdev)
indio_dev->channels = mt6577_auxadc_iio_channels;
indio_dev->num_channels = ARRAY_SIZE(mt6577_auxadc_iio_channels);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- adc_dev->reg_base = devm_ioremap_resource(&pdev->dev, res);
+ adc_dev->reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(adc_dev->reg_base)) {
dev_err(&pdev->dev, "failed to get auxadc base address\n");
return PTR_ERR(adc_dev->reg_base);
diff --git a/drivers/iio/adc/npcm_adc.c b/drivers/iio/adc/npcm_adc.c
index 910f3585fa54..a6170a37ebe8 100644
--- a/drivers/iio/adc/npcm_adc.c
+++ b/drivers/iio/adc/npcm_adc.c
@@ -183,7 +183,6 @@ static int npcm_adc_probe(struct platform_device *pdev)
int irq;
u32 div;
u32 reg_con;
- struct resource *res;
struct npcm_adc *info;
struct iio_dev *indio_dev;
struct device *dev = &pdev->dev;
@@ -196,8 +195,7 @@ static int npcm_adc_probe(struct platform_device *pdev)
info->dev = &pdev->dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- info->regs = devm_ioremap_resource(&pdev->dev, res);
+ info->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(info->regs))
return PTR_ERR(info->regs);
diff --git a/drivers/iio/adc/rcar-gyroadc.c b/drivers/iio/adc/rcar-gyroadc.c
index c37f201294b2..63ce743ee7af 100644
--- a/drivers/iio/adc/rcar-gyroadc.c
+++ b/drivers/iio/adc/rcar-gyroadc.c
@@ -481,7 +481,6 @@ static int rcar_gyroadc_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct rcar_gyroadc *priv;
struct iio_dev *indio_dev;
- struct resource *mem;
int ret;
indio_dev = devm_iio_device_alloc(dev, sizeof(*priv));
@@ -491,8 +490,7 @@ static int rcar_gyroadc_probe(struct platform_device *pdev)
priv = iio_priv(indio_dev);
priv->dev = dev;
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->regs = devm_ioremap_resource(dev, mem);
+ priv->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->regs))
return PTR_ERR(priv->regs);
diff --git a/drivers/iio/adc/sc27xx_adc.c b/drivers/iio/adc/sc27xx_adc.c
index a6c046575ec3..66b387f9b36d 100644
--- a/drivers/iio/adc/sc27xx_adc.c
+++ b/drivers/iio/adc/sc27xx_adc.c
@@ -477,13 +477,6 @@ static void sc27xx_adc_disable(void *_data)
SC27XX_MODULE_ADC_EN, 0);
}
-static void sc27xx_adc_free_hwlock(void *_data)
-{
- struct hwspinlock *hwlock = _data;
-
- hwspin_lock_free(hwlock);
-}
-
static int sc27xx_adc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -520,19 +513,12 @@ static int sc27xx_adc_probe(struct platform_device *pdev)
return ret;
}
- sc27xx_data->hwlock = hwspin_lock_request_specific(ret);
+ sc27xx_data->hwlock = devm_hwspin_lock_request_specific(dev, ret);
if (!sc27xx_data->hwlock) {
dev_err(dev, "failed to request hwspinlock\n");
return -ENXIO;
}
- ret = devm_add_action_or_reset(dev, sc27xx_adc_free_hwlock,
- sc27xx_data->hwlock);
- if (ret) {
- dev_err(dev, "failed to add hwspinlock action\n");
- return ret;
- }
-
sc27xx_data->dev = dev;
ret = sc27xx_adc_enable(sc27xx_data);
diff --git a/drivers/iio/adc/spear_adc.c b/drivers/iio/adc/spear_adc.c
index 592b97c464da..0ad536494e8f 100644
--- a/drivers/iio/adc/spear_adc.c
+++ b/drivers/iio/adc/spear_adc.c
@@ -260,7 +260,6 @@ static int spear_adc_probe(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
struct device *dev = &pdev->dev;
struct spear_adc_state *st;
- struct resource *res;
struct iio_dev *indio_dev = NULL;
int ret = -ENODEV;
int irq;
@@ -279,8 +278,7 @@ static int spear_adc_probe(struct platform_device *pdev)
* (e.g. SPEAr3xx). Let's provide two register base addresses
* to support multi-arch kernels.
*/
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- st->adc_base_spear6xx = devm_ioremap_resource(&pdev->dev, res);
+ st->adc_base_spear6xx = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(st->adc_base_spear6xx))
return PTR_ERR(st->adc_base_spear6xx);
diff --git a/drivers/iio/adc/stm32-adc-core.c b/drivers/iio/adc/stm32-adc-core.c
index 93a096a91f8c..6537f4f776c5 100644
--- a/drivers/iio/adc/stm32-adc-core.c
+++ b/drivers/iio/adc/stm32-adc-core.c
@@ -38,12 +38,12 @@
#define HAS_ANASWVDD BIT(1)
/**
- * stm32_adc_common_regs - stm32 common registers, compatible dependent data
+ * struct stm32_adc_common_regs - stm32 common registers
* @csr: common status register offset
* @ccr: common control register offset
- * @eoc1: adc1 end of conversion flag in @csr
- * @eoc2: adc2 end of conversion flag in @csr
- * @eoc3: adc3 end of conversion flag in @csr
+ * @eoc1_msk: adc1 end of conversion flag in @csr
+ * @eoc2_msk: adc2 end of conversion flag in @csr
+ * @eoc3_msk: adc3 end of conversion flag in @csr
* @ier: interrupt enable register offset for each adc
* @eocie_msk: end of conversion interrupt enable mask in @ier
*/
@@ -60,7 +60,7 @@ struct stm32_adc_common_regs {
struct stm32_adc_priv;
/**
- * stm32_adc_priv_cfg - stm32 core compatible configuration data
+ * struct stm32_adc_priv_cfg - stm32 core compatible configuration data
* @regs: common registers for all instances
* @clk_sel: clock selection routine
* @max_clk_rate_hz: maximum analog clock rate (Hz, from datasheet)
@@ -79,6 +79,7 @@ struct stm32_adc_priv_cfg {
* @domain: irq domain reference
* @aclk: clock reference for the analog circuitry
* @bclk: bus clock common for all ADCs, depends on part used
+ * @max_clk_rate: desired maximum clock rate
* @booster: booster supply reference
* @vdd: vdd supply reference
* @vdda: vdda analog supply reference
@@ -95,6 +96,7 @@ struct stm32_adc_priv {
struct irq_domain *domain;
struct clk *aclk;
struct clk *bclk;
+ u32 max_clk_rate;
struct regulator *booster;
struct regulator *vdd;
struct regulator *vdda;
@@ -117,6 +119,7 @@ static int stm32f4_pclk_div[] = {2, 4, 6, 8};
/**
* stm32f4_adc_clk_sel() - Select stm32f4 ADC common clock prescaler
+ * @pdev: platform device
* @priv: stm32 ADC core private data
* Select clock prescaler used for analog conversions, before using ADC.
*/
@@ -140,7 +143,7 @@ static int stm32f4_adc_clk_sel(struct platform_device *pdev,
}
for (i = 0; i < ARRAY_SIZE(stm32f4_pclk_div); i++) {
- if ((rate / stm32f4_pclk_div[i]) <= priv->cfg->max_clk_rate_hz)
+ if ((rate / stm32f4_pclk_div[i]) <= priv->max_clk_rate)
break;
}
if (i >= ARRAY_SIZE(stm32f4_pclk_div)) {
@@ -229,7 +232,7 @@ static int stm32h7_adc_clk_sel(struct platform_device *pdev,
if (ckmode)
continue;
- if ((rate / div) <= priv->cfg->max_clk_rate_hz)
+ if ((rate / div) <= priv->max_clk_rate)
goto out;
}
}
@@ -249,7 +252,7 @@ static int stm32h7_adc_clk_sel(struct platform_device *pdev,
if (!ckmode)
continue;
- if ((rate / div) <= priv->cfg->max_clk_rate_hz)
+ if ((rate / div) <= priv->max_clk_rate)
goto out;
}
@@ -654,6 +657,7 @@ static int stm32_adc_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct device_node *np = pdev->dev.of_node;
struct resource *res;
+ u32 max_rate;
int ret;
if (!pdev->dev.of_node)
@@ -730,6 +734,13 @@ static int stm32_adc_probe(struct platform_device *pdev)
priv->common.vref_mv = ret / 1000;
dev_dbg(&pdev->dev, "vref+=%dmV\n", priv->common.vref_mv);
+ ret = of_property_read_u32(pdev->dev.of_node, "st,max-clk-rate-hz",
+ &max_rate);
+ if (!ret)
+ priv->max_clk_rate = min(max_rate, priv->cfg->max_clk_rate_hz);
+ else
+ priv->max_clk_rate = priv->cfg->max_clk_rate_hz;
+
ret = priv->cfg->clk_sel(pdev, priv);
if (ret < 0)
goto err_hw_stop;
diff --git a/drivers/iio/adc/stm32-adc.c b/drivers/iio/adc/stm32-adc.c
index 73aee5949b6b..3b291d72701c 100644
--- a/drivers/iio/adc/stm32-adc.c
+++ b/drivers/iio/adc/stm32-adc.c
@@ -102,7 +102,7 @@ struct stm32_adc_calib {
};
/**
- * stm32_adc_regs - stm32 ADC misc registers & bitfield desc
+ * struct stm32_adc_regs - stm32 ADC misc registers & bitfield desc
* @reg: register offset
* @mask: bitfield mask
* @shift: left shift
@@ -114,7 +114,7 @@ struct stm32_adc_regs {
};
/**
- * stm32_adc_regspec - stm32 registers definition, compatible dependent data
+ * struct stm32_adc_regspec - stm32 registers definition
* @dr: data register offset
* @ier_eoc: interrupt enable register & eocie bitfield
* @isr_eoc: interrupt status register & eoc bitfield
@@ -140,7 +140,7 @@ struct stm32_adc_regspec {
struct stm32_adc;
/**
- * stm32_adc_cfg - stm32 compatible configuration data
+ * struct stm32_adc_cfg - stm32 compatible configuration data
* @regs: registers descriptions
* @adc_info: per instance input channels definitions
* @trigs: external trigger sources
@@ -183,8 +183,8 @@ struct stm32_adc_cfg {
* @rx_buf: dma rx buffer cpu address
* @rx_dma_buf: dma rx buffer bus address
* @rx_buf_sz: dma rx buffer size
- * @difsel bitmask to set single-ended/differential channel
- * @pcsel bitmask to preselect channels on some devices
+ * @difsel: bitmask to set single-ended/differential channel
+ * @pcsel: bitmask to preselect channels on some devices
* @smpr_val: sampling time settings (e.g. smpr1 / smpr2)
* @cal: optional calibration data on some devices
* @chan_name: channel name array
@@ -254,7 +254,7 @@ static const struct stm32_adc_info stm32h7_adc_info = {
.num_res = ARRAY_SIZE(stm32h7_adc_resolutions),
};
-/**
+/*
* stm32f4_sq - describe regular sequence registers
* - L: sequence len (register & bit field)
* - SQ1..SQ16: sequence entries (register & bit field)
@@ -301,7 +301,7 @@ static struct stm32_adc_trig_info stm32f4_adc_trigs[] = {
{}, /* sentinel */
};
-/**
+/*
* stm32f4_smp_bits[] - describe sampling time register index & bit fields
* Sorted so it can be indexed by channel number.
*/
@@ -392,7 +392,7 @@ static struct stm32_adc_trig_info stm32h7_adc_trigs[] = {
{},
};
-/**
+/*
* stm32h7_smp_bits - describe sampling time register index & bit fields
* Sorted so it can be indexed by channel number.
*/
@@ -994,6 +994,7 @@ static int stm32_adc_conf_scan_seq(struct iio_dev *indio_dev,
/**
* stm32_adc_get_trig_extsel() - Get external trigger selection
+ * @indio_dev: IIO device structure
* @trig: trigger
*
* Returns trigger extsel value, if trig matches, -EINVAL otherwise.
@@ -1297,6 +1298,10 @@ static int stm32_adc_of_xlate(struct iio_dev *indio_dev,
/**
* stm32_adc_debugfs_reg_access - read or write register value
+ * @indio_dev: IIO device structure
+ * @reg: register offset
+ * @writeval: value to write
+ * @readval: value to read
*
* To read a value from an ADC register:
* echo [ADC reg offset] > direct_reg_access
diff --git a/drivers/iio/adc/stmpe-adc.c b/drivers/iio/adc/stmpe-adc.c
index bd72727fc417..0f88048ea48f 100644
--- a/drivers/iio/adc/stmpe-adc.c
+++ b/drivers/iio/adc/stmpe-adc.c
@@ -175,7 +175,7 @@ static int stmpe_read_raw(struct iio_dev *indio_dev,
static irqreturn_t stmpe_adc_isr(int irq, void *dev_id)
{
struct stmpe_adc *info = (struct stmpe_adc *)dev_id;
- u16 data;
+ __be16 data;
if (info->channel <= STMPE_ADC_LAST_NR) {
int int_sta;
diff --git a/drivers/iio/adc/twl4030-madc.c b/drivers/iio/adc/twl4030-madc.c
index 55c5119fe575..472b08f37fea 100644
--- a/drivers/iio/adc/twl4030-madc.c
+++ b/drivers/iio/adc/twl4030-madc.c
@@ -495,7 +495,7 @@ static irqreturn_t twl4030_madc_threaded_irq_handler(int irq, void *_madc)
ret = twl4030_madc_disable_irq(madc, i);
if (ret < 0)
dev_dbg(madc->dev, "Disable interrupt failed %d\n", i);
- madc->requests[i].result_pending = 1;
+ madc->requests[i].result_pending = true;
}
for (i = 0; i < TWL4030_MADC_NUM_METHODS; i++) {
r = &madc->requests[i];
@@ -507,8 +507,8 @@ static irqreturn_t twl4030_madc_threaded_irq_handler(int irq, void *_madc)
len = twl4030_madc_read_channels(madc, method->rbase,
r->channels, r->rbuf, r->raw);
/* Free request */
- r->result_pending = 0;
- r->active = 0;
+ r->result_pending = false;
+ r->active = false;
}
mutex_unlock(&madc->lock);
@@ -521,15 +521,15 @@ err_i2c:
*/
for (i = 0; i < TWL4030_MADC_NUM_METHODS; i++) {
r = &madc->requests[i];
- if (r->active == 0)
+ if (!r->active)
continue;
method = &twl4030_conversion_methods[r->method];
/* Read results */
len = twl4030_madc_read_channels(madc, method->rbase,
r->channels, r->rbuf, r->raw);
/* Free request */
- r->result_pending = 0;
- r->active = 0;
+ r->result_pending = false;
+ r->active = false;
}
mutex_unlock(&madc->lock);
@@ -652,16 +652,16 @@ static int twl4030_madc_conversion(struct twl4030_madc_request *req)
ret = twl4030_madc_start_conversion(twl4030_madc, req->method);
if (ret < 0)
goto out;
- twl4030_madc->requests[req->method].active = 1;
+ twl4030_madc->requests[req->method].active = true;
/* Wait until conversion is ready (ctrl register returns EOC) */
ret = twl4030_madc_wait_conversion_ready(twl4030_madc, 5, method->ctrl);
if (ret) {
- twl4030_madc->requests[req->method].active = 0;
+ twl4030_madc->requests[req->method].active = false;
goto out;
}
ret = twl4030_madc_read_channels(twl4030_madc, method->rbase,
req->channels, req->rbuf, req->raw);
- twl4030_madc->requests[req->method].active = 0;
+ twl4030_madc->requests[req->method].active = false;
out:
mutex_unlock(&twl4030_madc->lock);
diff --git a/drivers/iio/adc/vf610_adc.c b/drivers/iio/adc/vf610_adc.c
index 98b30475bbc6..cb7380bf07ca 100644
--- a/drivers/iio/adc/vf610_adc.c
+++ b/drivers/iio/adc/vf610_adc.c
@@ -802,7 +802,6 @@ static int vf610_adc_probe(struct platform_device *pdev)
{
struct vf610_adc *info;
struct iio_dev *indio_dev;
- struct resource *mem;
int irq;
int ret;
@@ -815,8 +814,7 @@ static int vf610_adc_probe(struct platform_device *pdev)
info = iio_priv(indio_dev);
info->dev = &pdev->dev;
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- info->regs = devm_ioremap_resource(&pdev->dev, mem);
+ info->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(info->regs))
return PTR_ERR(info->regs);
diff --git a/drivers/iio/adc/xilinx-xadc-core.c b/drivers/iio/adc/xilinx-xadc-core.c
index 4fd389678dba..ec227b358cd6 100644
--- a/drivers/iio/adc/xilinx-xadc-core.c
+++ b/drivers/iio/adc/xilinx-xadc-core.c
@@ -1150,7 +1150,6 @@ static int xadc_probe(struct platform_device *pdev)
const struct of_device_id *id;
struct iio_dev *indio_dev;
unsigned int bipolar_mask;
- struct resource *mem;
unsigned int conf0;
struct xadc *xadc;
int ret;
@@ -1180,8 +1179,7 @@ static int xadc_probe(struct platform_device *pdev)
spin_lock_init(&xadc->lock);
INIT_DELAYED_WORK(&xadc->zynq_unmask_work, xadc_zynq_unmask_worker);
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- xadc->base = devm_ioremap_resource(&pdev->dev, mem);
+ xadc->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(xadc->base))
return PTR_ERR(xadc->base);
diff --git a/drivers/iio/chemical/atlas-ph-sensor.c b/drivers/iio/chemical/atlas-ph-sensor.c
index 3a20cb5d9bff..6c175eb1c7a7 100644
--- a/drivers/iio/chemical/atlas-ph-sensor.c
+++ b/drivers/iio/chemical/atlas-ph-sensor.c
@@ -323,16 +323,16 @@ static int atlas_buffer_predisable(struct iio_dev *indio_dev)
struct atlas_data *data = iio_priv(indio_dev);
int ret;
- ret = iio_triggered_buffer_predisable(indio_dev);
+ ret = atlas_set_interrupt(data, false);
if (ret)
return ret;
- ret = atlas_set_interrupt(data, false);
+ pm_runtime_mark_last_busy(&data->client->dev);
+ ret = pm_runtime_put_autosuspend(&data->client->dev);
if (ret)
return ret;
- pm_runtime_mark_last_busy(&data->client->dev);
- return pm_runtime_put_autosuspend(&data->client->dev);
+ return iio_triggered_buffer_predisable(indio_dev);
}
static const struct iio_trigger_ops atlas_interrupt_trigger_ops = {
diff --git a/drivers/iio/chemical/sgp30.c b/drivers/iio/chemical/sgp30.c
index 8cc8fe5e356d..403e8803471a 100644
--- a/drivers/iio/chemical/sgp30.c
+++ b/drivers/iio/chemical/sgp30.c
@@ -483,7 +483,7 @@ static void sgp_init(struct sgp_data *data)
data->iaq_defval_skip_jiffies =
43 * data->measure_interval_jiffies;
break;
- };
+ }
}
static const struct iio_info sgp_info = {
diff --git a/drivers/iio/chemical/sps30.c b/drivers/iio/chemical/sps30.c
index edbb956e81e8..acb9f8ecbb3d 100644
--- a/drivers/iio/chemical/sps30.c
+++ b/drivers/iio/chemical/sps30.c
@@ -117,7 +117,7 @@ static int sps30_do_cmd(struct sps30_state *state, u16 cmd, u8 *data, int size)
break;
case SPS30_READ_AUTO_CLEANING_PERIOD:
buf[0] = SPS30_AUTO_CLEANING_PERIOD >> 8;
- buf[1] = (u8)SPS30_AUTO_CLEANING_PERIOD;
+ buf[1] = (u8)(SPS30_AUTO_CLEANING_PERIOD & 0xff);
/* fall through */
case SPS30_READ_DATA_READY_FLAG:
case SPS30_READ_DATA:
diff --git a/drivers/iio/common/cros_ec_sensors/Kconfig b/drivers/iio/common/cros_ec_sensors/Kconfig
index cdbb29cfb907..fefad9572790 100644
--- a/drivers/iio/common/cros_ec_sensors/Kconfig
+++ b/drivers/iio/common/cros_ec_sensors/Kconfig
@@ -4,7 +4,7 @@
#
config IIO_CROS_EC_SENSORS_CORE
tristate "ChromeOS EC Sensors Core"
- depends on SYSFS && CROS_EC
+ depends on SYSFS && CROS_EC_SENSORHUB
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
help
diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c
index a6987726eeb8..7dce04473467 100644
--- a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c
+++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c
@@ -222,17 +222,11 @@ static const struct iio_info ec_sensors_info = {
static int cros_ec_sensors_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct cros_ec_dev *ec_dev = dev_get_drvdata(dev->parent);
struct iio_dev *indio_dev;
struct cros_ec_sensors_state *state;
struct iio_chan_spec *channel;
int ret, i;
- if (!ec_dev || !ec_dev->ec_dev) {
- dev_warn(&pdev->dev, "No CROS EC device found.\n");
- return -EINVAL;
- }
-
indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*state));
if (!indio_dev)
return -ENOMEM;
diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c
index d2609e6feda4..81a7f692de2f 100644
--- a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c
+++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c
@@ -18,6 +18,7 @@
#include <linux/slab.h>
#include <linux/platform_data/cros_ec_commands.h>
#include <linux/platform_data/cros_ec_proto.h>
+#include <linux/platform_data/cros_ec_sensorhub.h>
#include <linux/platform_device.h>
static char *cros_ec_loc[] = {
@@ -88,7 +89,8 @@ int cros_ec_sensors_core_init(struct platform_device *pdev,
{
struct device *dev = &pdev->dev;
struct cros_ec_sensors_core_state *state = iio_priv(indio_dev);
- struct cros_ec_dev *ec = dev_get_drvdata(pdev->dev.parent);
+ struct cros_ec_sensorhub *sensor_hub = dev_get_drvdata(dev->parent);
+ struct cros_ec_dev *ec = sensor_hub->ec;
struct cros_ec_sensor_platform *sensor_platform = dev_get_platdata(dev);
u32 ver_mask;
int ret, i;
diff --git a/drivers/iio/dac/Kconfig b/drivers/iio/dac/Kconfig
index cc42219a64f7..979070196da9 100644
--- a/drivers/iio/dac/Kconfig
+++ b/drivers/iio/dac/Kconfig
@@ -60,8 +60,8 @@ config AD5446
help
Say yes here to build support for Analog Devices AD5300, AD5301, AD5310,
AD5311, AD5320, AD5321, AD5444, AD5446, AD5450, AD5451, AD5452, AD5453,
- AD5512A, AD5541A, AD5542A, AD5543, AD5553, AD5601, AD5602, AD5611, AD5612,
- AD5620, AD5621, AD5622, AD5640, AD5641, AD5660, AD5662 DACs
+ AD5512A, AD5541A, AD5542A, AD5543, AD5553, AD5600, AD5601, AD5602, AD5611,
+ AD5612, AD5620, AD5621, AD5622, AD5640, AD5641, AD5660, AD5662 DACs
as well as Texas Instruments DAC081S101, DAC101S101, DAC121S101.
To compile this driver as a module, choose M here: the
diff --git a/drivers/iio/dac/ad5446.c b/drivers/iio/dac/ad5446.c
index 7df8b4cc295d..61c670f7fc5f 100644
--- a/drivers/iio/dac/ad5446.c
+++ b/drivers/iio/dac/ad5446.c
@@ -327,6 +327,7 @@ enum ad5446_supported_spi_device_ids {
ID_AD5541A,
ID_AD5512A,
ID_AD5553,
+ ID_AD5600,
ID_AD5601,
ID_AD5611,
ID_AD5621,
@@ -381,6 +382,10 @@ static const struct ad5446_chip_info ad5446_spi_chip_info[] = {
.channel = AD5446_CHANNEL(14, 16, 0),
.write = ad5446_write,
},
+ [ID_AD5600] = {
+ .channel = AD5446_CHANNEL(16, 16, 0),
+ .write = ad5446_write,
+ },
[ID_AD5601] = {
.channel = AD5446_CHANNEL_POWERDOWN(8, 16, 6),
.write = ad5446_write,
@@ -448,6 +453,7 @@ static const struct spi_device_id ad5446_spi_ids[] = {
{"ad5542a", ID_AD5541A}, /* ad5541a and ad5542a are compatible */
{"ad5543", ID_AD5541A}, /* ad5541a and ad5543 are compatible */
{"ad5553", ID_AD5553},
+ {"ad5600", ID_AD5600},
{"ad5601", ID_AD5601},
{"ad5611", ID_AD5611},
{"ad5621", ID_AD5621},
diff --git a/drivers/iio/dac/ad7303.c b/drivers/iio/dac/ad7303.c
index 8de9f40226e6..14bbac6bee98 100644
--- a/drivers/iio/dac/ad7303.c
+++ b/drivers/iio/dac/ad7303.c
@@ -41,6 +41,7 @@ struct ad7303_state {
struct regulator *vdd_reg;
struct regulator *vref_reg;
+ struct mutex lock;
/*
* DMA (thus cache coherency maintenance) requires the
* transfer buffers to live in their own cache lines.
@@ -79,7 +80,7 @@ static ssize_t ad7303_write_dac_powerdown(struct iio_dev *indio_dev,
if (ret)
return ret;
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->lock);
if (pwr_down)
st->config |= AD7303_CFG_POWER_DOWN(chan->channel);
@@ -90,7 +91,7 @@ static ssize_t ad7303_write_dac_powerdown(struct iio_dev *indio_dev,
* mode, so just write one of the DAC channels again */
ad7303_write(st, chan->channel, st->dac_cache[chan->channel]);
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
return len;
}
@@ -116,7 +117,9 @@ static int ad7303_read_raw(struct iio_dev *indio_dev,
switch (info) {
case IIO_CHAN_INFO_RAW:
+ mutex_lock(&st->lock);
*val = st->dac_cache[chan->channel];
+ mutex_unlock(&st->lock);
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
vref_uv = ad7303_get_vref(st, chan);
@@ -144,11 +147,11 @@ static int ad7303_write_raw(struct iio_dev *indio_dev,
if (val >= (1 << chan->scan_type.realbits) || val < 0)
return -EINVAL;
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->lock);
ret = ad7303_write(st, chan->address, val);
if (ret == 0)
st->dac_cache[chan->channel] = val;
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
break;
default:
ret = -EINVAL;
@@ -211,6 +214,8 @@ static int ad7303_probe(struct spi_device *spi)
st->spi = spi;
+ mutex_init(&st->lock);
+
st->vdd_reg = devm_regulator_get(&spi->dev, "Vdd");
if (IS_ERR(st->vdd_reg))
return PTR_ERR(st->vdd_reg);
diff --git a/drivers/iio/dac/lpc18xx_dac.c b/drivers/iio/dac/lpc18xx_dac.c
index 883e84e96609..0ab357bd3633 100644
--- a/drivers/iio/dac/lpc18xx_dac.c
+++ b/drivers/iio/dac/lpc18xx_dac.c
@@ -106,7 +106,6 @@ static int lpc18xx_dac_probe(struct platform_device *pdev)
{
struct iio_dev *indio_dev;
struct lpc18xx_dac *dac;
- struct resource *res;
int ret;
indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*dac));
@@ -117,8 +116,7 @@ static int lpc18xx_dac_probe(struct platform_device *pdev)
dac = iio_priv(indio_dev);
mutex_init(&dac->lock);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- dac->base = devm_ioremap_resource(&pdev->dev, res);
+ dac->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dac->base))
return PTR_ERR(dac->base);
diff --git a/drivers/iio/dac/stm32-dac-core.c b/drivers/iio/dac/stm32-dac-core.c
index d0fb3124de07..9e6b4cd0a5cc 100644
--- a/drivers/iio/dac/stm32-dac-core.c
+++ b/drivers/iio/dac/stm32-dac-core.c
@@ -11,6 +11,7 @@
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/of_platform.h>
+#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
#include <linux/reset.h>
@@ -50,6 +51,41 @@ static const struct regmap_config stm32_dac_regmap_cfg = {
.max_register = 0x3fc,
};
+static int stm32_dac_core_hw_start(struct device *dev)
+{
+ struct stm32_dac_common *common = dev_get_drvdata(dev);
+ struct stm32_dac_priv *priv = to_stm32_dac_priv(common);
+ int ret;
+
+ ret = regulator_enable(priv->vref);
+ if (ret < 0) {
+ dev_err(dev, "vref enable failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(priv->pclk);
+ if (ret < 0) {
+ dev_err(dev, "pclk enable failed: %d\n", ret);
+ goto err_regulator_disable;
+ }
+
+ return 0;
+
+err_regulator_disable:
+ regulator_disable(priv->vref);
+
+ return ret;
+}
+
+static void stm32_dac_core_hw_stop(struct device *dev)
+{
+ struct stm32_dac_common *common = dev_get_drvdata(dev);
+ struct stm32_dac_priv *priv = to_stm32_dac_priv(common);
+
+ clk_disable_unprepare(priv->pclk);
+ regulator_disable(priv->vref);
+}
+
static int stm32_dac_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -66,6 +102,8 @@ static int stm32_dac_probe(struct platform_device *pdev)
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
+ platform_set_drvdata(pdev, &priv->common);
+
cfg = (const struct stm32_dac_cfg *)
of_match_device(dev->driver->of_match_table, dev)->data;
@@ -74,11 +112,19 @@ static int stm32_dac_probe(struct platform_device *pdev)
if (IS_ERR(mmio))
return PTR_ERR(mmio);
- regmap = devm_regmap_init_mmio(dev, mmio, &stm32_dac_regmap_cfg);
+ regmap = devm_regmap_init_mmio_clk(dev, "pclk", mmio,
+ &stm32_dac_regmap_cfg);
if (IS_ERR(regmap))
return PTR_ERR(regmap);
priv->common.regmap = regmap;
+ priv->pclk = devm_clk_get(dev, "pclk");
+ if (IS_ERR(priv->pclk)) {
+ ret = PTR_ERR(priv->pclk);
+ dev_err(dev, "pclk get failed\n");
+ return ret;
+ }
+
priv->vref = devm_regulator_get(dev, "vref");
if (IS_ERR(priv->vref)) {
ret = PTR_ERR(priv->vref);
@@ -86,33 +132,22 @@ static int stm32_dac_probe(struct platform_device *pdev)
return ret;
}
- ret = regulator_enable(priv->vref);
- if (ret < 0) {
- dev_err(dev, "vref enable failed\n");
- return ret;
- }
+ pm_runtime_get_noresume(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
+ ret = stm32_dac_core_hw_start(dev);
+ if (ret)
+ goto err_pm_stop;
ret = regulator_get_voltage(priv->vref);
if (ret < 0) {
dev_err(dev, "vref get voltage failed, %d\n", ret);
- goto err_vref;
+ goto err_hw_stop;
}
priv->common.vref_mv = ret / 1000;
dev_dbg(dev, "vref+=%dmV\n", priv->common.vref_mv);
- priv->pclk = devm_clk_get(dev, "pclk");
- if (IS_ERR(priv->pclk)) {
- ret = PTR_ERR(priv->pclk);
- dev_err(dev, "pclk get failed\n");
- goto err_vref;
- }
-
- ret = clk_prepare_enable(priv->pclk);
- if (ret < 0) {
- dev_err(dev, "pclk enable failed\n");
- goto err_vref;
- }
-
priv->rst = devm_reset_control_get_exclusive(dev, NULL);
if (!IS_ERR(priv->rst)) {
reset_control_assert(priv->rst);
@@ -128,39 +163,79 @@ static int stm32_dac_probe(struct platform_device *pdev)
priv->common.hfsel ?
STM32H7_DAC_CR_HFSEL : 0);
if (ret)
- goto err_pclk;
+ goto err_hw_stop;
}
- platform_set_drvdata(pdev, &priv->common);
ret = of_platform_populate(pdev->dev.of_node, NULL, NULL, dev);
if (ret < 0) {
dev_err(dev, "failed to populate DT children\n");
- goto err_pclk;
+ goto err_hw_stop;
}
+ pm_runtime_put(dev);
+
return 0;
-err_pclk:
- clk_disable_unprepare(priv->pclk);
-err_vref:
- regulator_disable(priv->vref);
+err_hw_stop:
+ stm32_dac_core_hw_stop(dev);
+err_pm_stop:
+ pm_runtime_disable(dev);
+ pm_runtime_set_suspended(dev);
+ pm_runtime_put_noidle(dev);
return ret;
}
static int stm32_dac_remove(struct platform_device *pdev)
{
- struct stm32_dac_common *common = platform_get_drvdata(pdev);
+ pm_runtime_get_sync(&pdev->dev);
+ of_platform_depopulate(&pdev->dev);
+ stm32_dac_core_hw_stop(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+ pm_runtime_put_noidle(&pdev->dev);
+
+ return 0;
+}
+
+static int __maybe_unused stm32_dac_core_resume(struct device *dev)
+{
+ struct stm32_dac_common *common = dev_get_drvdata(dev);
struct stm32_dac_priv *priv = to_stm32_dac_priv(common);
+ int ret;
- of_platform_depopulate(&pdev->dev);
- clk_disable_unprepare(priv->pclk);
- regulator_disable(priv->vref);
+ if (priv->common.hfsel) {
+ /* restore hfsel (maybe lost under low power state) */
+ ret = regmap_update_bits(priv->common.regmap, STM32_DAC_CR,
+ STM32H7_DAC_CR_HFSEL,
+ STM32H7_DAC_CR_HFSEL);
+ if (ret)
+ return ret;
+ }
+
+ return pm_runtime_force_resume(dev);
+}
+
+static int __maybe_unused stm32_dac_core_runtime_suspend(struct device *dev)
+{
+ stm32_dac_core_hw_stop(dev);
return 0;
}
+static int __maybe_unused stm32_dac_core_runtime_resume(struct device *dev)
+{
+ return stm32_dac_core_hw_start(dev);
+}
+
+static const struct dev_pm_ops stm32_dac_core_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, stm32_dac_core_resume)
+ SET_RUNTIME_PM_OPS(stm32_dac_core_runtime_suspend,
+ stm32_dac_core_runtime_resume,
+ NULL)
+};
+
static const struct stm32_dac_cfg stm32h7_dac_cfg = {
.has_hfsel = true,
};
@@ -182,6 +257,7 @@ static struct platform_driver stm32_dac_driver = {
.driver = {
.name = "stm32-dac-core",
.of_match_table = stm32_dac_of_match,
+ .pm = &stm32_dac_core_pm_ops,
},
};
module_platform_driver(stm32_dac_driver);
diff --git a/drivers/iio/dac/stm32-dac.c b/drivers/iio/dac/stm32-dac.c
index cce26a3a6627..f22c1d9129b2 100644
--- a/drivers/iio/dac/stm32-dac.c
+++ b/drivers/iio/dac/stm32-dac.c
@@ -13,6 +13,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include "stm32-dac-core.h"
@@ -20,6 +21,8 @@
#define STM32_DAC_CHANNEL_2 2
#define STM32_DAC_IS_CHAN_1(ch) ((ch) & STM32_DAC_CHANNEL_1)
+#define STM32_DAC_AUTO_SUSPEND_DELAY_MS 2000
+
/**
* struct stm32_dac - private data of DAC driver
* @common: reference to DAC common data
@@ -49,15 +52,34 @@ static int stm32_dac_set_enable_state(struct iio_dev *indio_dev, int ch,
bool enable)
{
struct stm32_dac *dac = iio_priv(indio_dev);
+ struct device *dev = indio_dev->dev.parent;
u32 msk = STM32_DAC_IS_CHAN_1(ch) ? STM32_DAC_CR_EN1 : STM32_DAC_CR_EN2;
u32 en = enable ? msk : 0;
int ret;
+ /* already enabled / disabled ? */
+ mutex_lock(&indio_dev->mlock);
+ ret = stm32_dac_is_enabled(indio_dev, ch);
+ if (ret < 0 || enable == !!ret) {
+ mutex_unlock(&indio_dev->mlock);
+ return ret < 0 ? ret : 0;
+ }
+
+ if (enable) {
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(dev);
+ mutex_unlock(&indio_dev->mlock);
+ return ret;
+ }
+ }
+
ret = regmap_update_bits(dac->common->regmap, STM32_DAC_CR, msk, en);
+ mutex_unlock(&indio_dev->mlock);
if (ret < 0) {
dev_err(&indio_dev->dev, "%s failed\n", en ?
"Enable" : "Disable");
- return ret;
+ goto err_put_pm;
}
/*
@@ -68,7 +90,20 @@ static int stm32_dac_set_enable_state(struct iio_dev *indio_dev, int ch,
if (en && dac->common->hfsel)
udelay(1);
+ if (!enable) {
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+ }
+
return 0;
+
+err_put_pm:
+ if (enable) {
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+ }
+
+ return ret;
}
static int stm32_dac_get_value(struct stm32_dac *dac, int channel, int *val)
@@ -272,6 +307,7 @@ static int stm32_dac_chan_of_init(struct iio_dev *indio_dev)
static int stm32_dac_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
struct iio_dev *indio_dev;
struct stm32_dac *dac;
int ret;
@@ -296,9 +332,61 @@ static int stm32_dac_probe(struct platform_device *pdev)
if (ret < 0)
return ret;
- return devm_iio_device_register(&pdev->dev, indio_dev);
+ /* Get stm32-dac-core PM online */
+ pm_runtime_get_noresume(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_set_autosuspend_delay(dev, STM32_DAC_AUTO_SUSPEND_DELAY_MS);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_enable(dev);
+
+ ret = iio_device_register(indio_dev);
+ if (ret)
+ goto err_pm_put;
+
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
+ return 0;
+
+err_pm_put:
+ pm_runtime_disable(dev);
+ pm_runtime_set_suspended(dev);
+ pm_runtime_put_noidle(dev);
+
+ return ret;
}
+static int stm32_dac_remove(struct platform_device *pdev)
+{
+ struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+
+ pm_runtime_get_sync(&pdev->dev);
+ iio_device_unregister(indio_dev);
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+ pm_runtime_put_noidle(&pdev->dev);
+
+ return 0;
+}
+
+static int __maybe_unused stm32_dac_suspend(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ int channel = indio_dev->channels[0].channel;
+ int ret;
+
+ /* Ensure DAC is disabled before suspend */
+ ret = stm32_dac_is_enabled(indio_dev, channel);
+ if (ret)
+ return ret < 0 ? ret : -EBUSY;
+
+ return pm_runtime_force_suspend(dev);
+}
+
+static const struct dev_pm_ops stm32_dac_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(stm32_dac_suspend, pm_runtime_force_resume)
+};
+
static const struct of_device_id stm32_dac_of_match[] = {
{ .compatible = "st,stm32-dac", },
{},
@@ -307,9 +395,11 @@ MODULE_DEVICE_TABLE(of, stm32_dac_of_match);
static struct platform_driver stm32_dac_driver = {
.probe = stm32_dac_probe,
+ .remove = stm32_dac_remove,
.driver = {
.name = "stm32-dac",
.of_match_table = stm32_dac_of_match,
+ .pm = &stm32_dac_pm_ops,
},
};
module_platform_driver(stm32_dac_driver);
diff --git a/drivers/iio/dac/vf610_dac.c b/drivers/iio/dac/vf610_dac.c
index 0ec4d2609ef9..71f8a5c471c4 100644
--- a/drivers/iio/dac/vf610_dac.c
+++ b/drivers/iio/dac/vf610_dac.c
@@ -172,7 +172,6 @@ static int vf610_dac_probe(struct platform_device *pdev)
{
struct iio_dev *indio_dev;
struct vf610_dac *info;
- struct resource *mem;
int ret;
indio_dev = devm_iio_device_alloc(&pdev->dev,
@@ -185,8 +184,7 @@ static int vf610_dac_probe(struct platform_device *pdev)
info = iio_priv(indio_dev);
info->dev = &pdev->dev;
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- info->regs = devm_ioremap_resource(&pdev->dev, mem);
+ info->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(info->regs))
return PTR_ERR(info->regs);
diff --git a/drivers/iio/gyro/adis16080.c b/drivers/iio/gyro/adis16080.c
index 236220d6de02..1b84b8e112fe 100644
--- a/drivers/iio/gyro/adis16080.c
+++ b/drivers/iio/gyro/adis16080.c
@@ -38,10 +38,12 @@ struct adis16080_chip_info {
* @us: actual spi_device to write data
* @info: chip specific parameters
* @buf: transmit or receive buffer
+ * @lock lock to protect buffer during reads
**/
struct adis16080_state {
struct spi_device *us;
const struct adis16080_chip_info *info;
+ struct mutex lock;
__be16 buf ____cacheline_aligned;
};
@@ -82,9 +84,9 @@ static int adis16080_read_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->lock);
ret = adis16080_read_sample(indio_dev, chan->address, val);
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
return ret ? ret : IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
switch (chan->type) {
@@ -196,6 +198,8 @@ static int adis16080_probe(struct spi_device *spi)
/* this is only used for removal purposes */
spi_set_drvdata(spi, indio_dev);
+ mutex_init(&st->lock);
+
/* Allocate the comms buffers */
st->us = spi;
st->info = &adis16080_chip_info[id->driver_data];
diff --git a/drivers/iio/gyro/adis16130.c b/drivers/iio/gyro/adis16130.c
index de3f66f89496..79e63c8a2ea8 100644
--- a/drivers/iio/gyro/adis16130.c
+++ b/drivers/iio/gyro/adis16130.c
@@ -76,9 +76,7 @@ static int adis16130_read_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
/* Take the iio_dev status lock */
- mutex_lock(&indio_dev->mlock);
ret = adis16130_spi_read(indio_dev, chan->address, &temp);
- mutex_unlock(&indio_dev->mlock);
if (ret)
return ret;
*val = temp;
diff --git a/drivers/iio/gyro/adis16136.c b/drivers/iio/gyro/adis16136.c
index 5bec7ad53d8b..d637d52d051a 100644
--- a/drivers/iio/gyro/adis16136.c
+++ b/drivers/iio/gyro/adis16136.c
@@ -80,19 +80,19 @@ static ssize_t adis16136_show_serial(struct file *file,
ret = adis_read_reg_16(&adis16136->adis, ADIS16136_REG_SERIAL_NUM,
&serial);
- if (ret < 0)
+ if (ret)
return ret;
ret = adis_read_reg_16(&adis16136->adis, ADIS16136_REG_LOT1, &lot1);
- if (ret < 0)
+ if (ret)
return ret;
ret = adis_read_reg_16(&adis16136->adis, ADIS16136_REG_LOT2, &lot2);
- if (ret < 0)
+ if (ret)
return ret;
ret = adis_read_reg_16(&adis16136->adis, ADIS16136_REG_LOT3, &lot3);
- if (ret < 0)
+ if (ret)
return ret;
len = snprintf(buf, sizeof(buf), "%.4x%.4x%.4x-%.4x\n", lot1, lot2,
@@ -116,7 +116,7 @@ static int adis16136_show_product_id(void *arg, u64 *val)
ret = adis_read_reg_16(&adis16136->adis, ADIS16136_REG_PROD_ID,
&prod_id);
- if (ret < 0)
+ if (ret)
return ret;
*val = prod_id;
@@ -134,7 +134,7 @@ static int adis16136_show_flash_count(void *arg, u64 *val)
ret = adis_read_reg_16(&adis16136->adis, ADIS16136_REG_FLASH_CNT,
&flash_count);
- if (ret < 0)
+ if (ret)
return ret;
*val = flash_count;
@@ -191,7 +191,7 @@ static int adis16136_get_freq(struct adis16136 *adis16136, unsigned int *freq)
int ret;
ret = adis_read_reg_16(&adis16136->adis, ADIS16136_REG_SMPL_PRD, &t);
- if (ret < 0)
+ if (ret)
return ret;
*freq = 32768 / (t + 1);
@@ -228,7 +228,7 @@ static ssize_t adis16136_read_frequency(struct device *dev,
int ret;
ret = adis16136_get_freq(adis16136, &freq);
- if (ret < 0)
+ if (ret)
return ret;
return sprintf(buf, "%d\n", freq);
@@ -256,7 +256,7 @@ static int adis16136_set_filter(struct iio_dev *indio_dev, int val)
int i, ret;
ret = adis16136_get_freq(adis16136, &freq);
- if (ret < 0)
+ if (ret)
return ret;
for (i = ARRAY_SIZE(adis16136_3db_divisors) - 1; i >= 1; i--) {
@@ -277,11 +277,11 @@ static int adis16136_get_filter(struct iio_dev *indio_dev, int *val)
mutex_lock(&indio_dev->mlock);
ret = adis_read_reg_16(&adis16136->adis, ADIS16136_REG_AVG_CNT, &val16);
- if (ret < 0)
+ if (ret)
goto err_unlock;
ret = adis16136_get_freq(adis16136, &freq);
- if (ret < 0)
+ if (ret)
goto err_unlock;
*val = freq / adis16136_3db_divisors[val16 & 0x07];
@@ -318,7 +318,7 @@ static int adis16136_read_raw(struct iio_dev *indio_dev,
case IIO_CHAN_INFO_CALIBBIAS:
ret = adis_read_reg_32(&adis16136->adis,
ADIS16136_REG_GYRO_OFF2, &val32);
- if (ret < 0)
+ if (ret)
return ret;
*val = sign_extend32(val32, 31);
diff --git a/drivers/iio/gyro/itg3200_core.c b/drivers/iio/gyro/itg3200_core.c
index 998fb8d66fe3..981ae2291505 100644
--- a/drivers/iio/gyro/itg3200_core.c
+++ b/drivers/iio/gyro/itg3200_core.c
@@ -154,7 +154,7 @@ static int itg3200_write_raw(struct iio_dev *indio_dev,
t);
mutex_unlock(&indio_dev->mlock);
- return ret;
+ return ret;
default:
return -EINVAL;
diff --git a/drivers/iio/gyro/mpu3050-core.c b/drivers/iio/gyro/mpu3050-core.c
index 80154bca18b6..8e908a749f95 100644
--- a/drivers/iio/gyro/mpu3050-core.c
+++ b/drivers/iio/gyro/mpu3050-core.c
@@ -543,7 +543,7 @@ static irqreturn_t mpu3050_trigger_handler(int irq, void *p)
toread = bytes_per_datum;
offset = 1;
/* Put in some dummy value */
- fifo_values[0] = 0xAAAA;
+ fifo_values[0] = cpu_to_be16(0xAAAA);
}
ret = regmap_bulk_read(mpu3050->map,
diff --git a/drivers/iio/gyro/st_gyro_core.c b/drivers/iio/gyro/st_gyro_core.c
index c0acbb5d2ffb..57be68b291fa 100644
--- a/drivers/iio/gyro/st_gyro_core.c
+++ b/drivers/iio/gyro/st_gyro_core.c
@@ -14,7 +14,6 @@
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/i2c.h>
-#include <linux/gpio.h>
#include <linux/irq.h>
#include <linux/delay.h>
#include <linux/iio/iio.h>
diff --git a/drivers/iio/humidity/hdc100x.c b/drivers/iio/humidity/hdc100x.c
index bfe1cdb16846..963ff043eecf 100644
--- a/drivers/iio/humidity/hdc100x.c
+++ b/drivers/iio/humidity/hdc100x.c
@@ -278,31 +278,34 @@ static int hdc100x_buffer_postenable(struct iio_dev *indio_dev)
struct hdc100x_data *data = iio_priv(indio_dev);
int ret;
+ ret = iio_triggered_buffer_postenable(indio_dev);
+ if (ret)
+ return ret;
+
/* Buffer is enabled. First set ACQ Mode, then attach poll func */
mutex_lock(&data->lock);
ret = hdc100x_update_config(data, HDC100X_REG_CONFIG_ACQ_MODE,
HDC100X_REG_CONFIG_ACQ_MODE);
mutex_unlock(&data->lock);
if (ret)
- return ret;
+ iio_triggered_buffer_predisable(indio_dev);
- return iio_triggered_buffer_postenable(indio_dev);
+ return ret;
}
static int hdc100x_buffer_predisable(struct iio_dev *indio_dev)
{
struct hdc100x_data *data = iio_priv(indio_dev);
- int ret;
-
- /* First detach poll func, then reset ACQ mode. OK to disable buffer */
- ret = iio_triggered_buffer_predisable(indio_dev);
- if (ret)
- return ret;
+ int ret, ret2;
mutex_lock(&data->lock);
ret = hdc100x_update_config(data, HDC100X_REG_CONFIG_ACQ_MODE, 0);
mutex_unlock(&data->lock);
+ ret2 = iio_triggered_buffer_predisable(indio_dev);
+ if (ret == 0)
+ ret = ret2;
+
return ret;
}
diff --git a/drivers/iio/imu/Kconfig b/drivers/iio/imu/Kconfig
index f3c7282321a8..60bb1029e759 100644
--- a/drivers/iio/imu/Kconfig
+++ b/drivers/iio/imu/Kconfig
@@ -40,6 +40,33 @@ config ADIS16480
source "drivers/iio/imu/bmi160/Kconfig"
+config FXOS8700
+ tristate
+
+config FXOS8700_I2C
+ tristate "NXP FXOS8700 I2C driver"
+ depends on I2C
+ select FXOS8700
+ select REGMAP_I2C
+ help
+ Say yes here to build support for the NXP FXOS8700 m+g combo
+ sensor on I2C.
+
+ This driver can also be built as a module. If so, the module will be
+ called fxos8700_i2c.
+
+config FXOS8700_SPI
+ tristate "NXP FXOS8700 SPI driver"
+ depends on SPI
+ select FXOS8700
+ select REGMAP_SPI
+ help
+ Say yes here to build support for the NXP FXOS8700 m+g combo
+ sensor on SPI.
+
+ This driver can also be built as a module. If so, the module will be
+ called fxos8700_spi.
+
config KMX61
tristate "Kionix KMX61 6-axis accelerometer and magnetometer"
depends on I2C
diff --git a/drivers/iio/imu/Makefile b/drivers/iio/imu/Makefile
index 4a6958865504..5237fd4bc384 100644
--- a/drivers/iio/imu/Makefile
+++ b/drivers/iio/imu/Makefile
@@ -14,6 +14,11 @@ adis_lib-$(CONFIG_IIO_ADIS_LIB_BUFFER) += adis_buffer.o
obj-$(CONFIG_IIO_ADIS_LIB) += adis_lib.o
obj-y += bmi160/
+
+obj-$(CONFIG_FXOS8700) += fxos8700_core.o
+obj-$(CONFIG_FXOS8700_I2C) += fxos8700_i2c.o
+obj-$(CONFIG_FXOS8700_SPI) += fxos8700_spi.o
+
obj-y += inv_mpu6050/
obj-$(CONFIG_KMX61) += kmx61.o
diff --git a/drivers/iio/imu/adis.c b/drivers/iio/imu/adis.c
index 2cd2cc2316c6..e14c8536fd09 100644
--- a/drivers/iio/imu/adis.c
+++ b/drivers/iio/imu/adis.c
@@ -229,7 +229,8 @@ int adis_debugfs_reg_access(struct iio_dev *indio_dev,
int ret;
ret = adis_read_reg_16(adis, reg, &val16);
- *readval = val16;
+ if (ret == 0)
+ *readval = val16;
return ret;
} else {
@@ -286,7 +287,7 @@ int adis_check_status(struct adis *adis)
int i;
ret = adis_read_reg_16(adis, adis->data->diag_stat_reg, &status);
- if (ret < 0)
+ if (ret)
return ret;
status &= adis->data->status_error_mask;
diff --git a/drivers/iio/imu/adis16400.c b/drivers/iio/imu/adis16400.c
index 0575ff706bd4..44e46dc96e00 100644
--- a/drivers/iio/imu/adis16400.c
+++ b/drivers/iio/imu/adis16400.c
@@ -217,16 +217,16 @@ static ssize_t adis16400_show_serial_number(struct file *file,
int ret;
ret = adis_read_reg_16(&st->adis, ADIS16334_LOT_ID1, &lot1);
- if (ret < 0)
+ if (ret)
return ret;
ret = adis_read_reg_16(&st->adis, ADIS16334_LOT_ID2, &lot2);
- if (ret < 0)
+ if (ret)
return ret;
ret = adis_read_reg_16(&st->adis, ADIS16334_SERIAL_NUMBER,
&serial_number);
- if (ret < 0)
+ if (ret)
return ret;
len = snprintf(buf, sizeof(buf), "%.4x-%.4x-%.4x\n", lot1, lot2,
@@ -249,7 +249,7 @@ static int adis16400_show_product_id(void *arg, u64 *val)
int ret;
ret = adis_read_reg_16(&st->adis, ADIS16400_PRODUCT_ID, &prod_id);
- if (ret < 0)
+ if (ret)
return ret;
*val = prod_id;
@@ -266,7 +266,7 @@ static int adis16400_show_flash_count(void *arg, u64 *val)
int ret;
ret = adis_read_reg_16(&st->adis, ADIS16400_FLASH_CNT, &flash_count);
- if (ret < 0)
+ if (ret)
return ret;
*val = flash_count;
@@ -327,7 +327,7 @@ static int adis16334_get_freq(struct adis16400_state *st)
uint16_t t;
ret = adis_read_reg_16(&st->adis, ADIS16400_SMPL_PRD, &t);
- if (ret < 0)
+ if (ret)
return ret;
t >>= ADIS16334_RATE_DIV_SHIFT;
@@ -359,7 +359,7 @@ static int adis16400_get_freq(struct adis16400_state *st)
uint16_t t;
ret = adis_read_reg_16(&st->adis, ADIS16400_SMPL_PRD, &t);
- if (ret < 0)
+ if (ret)
return ret;
sps = (t & ADIS16400_SMPL_PRD_TIME_BASE) ? 52851 : 1638404;
@@ -416,7 +416,7 @@ static int adis16400_set_filter(struct iio_dev *indio_dev, int sps, int val)
}
ret = adis_read_reg_16(&st->adis, ADIS16400_SENS_AVG, &val16);
- if (ret < 0)
+ if (ret)
return ret;
ret = adis_write_reg_16(&st->adis, ADIS16400_SENS_AVG,
@@ -615,7 +615,7 @@ static int adis16400_read_raw(struct iio_dev *indio_dev,
ret = adis_read_reg_16(&st->adis,
ADIS16400_SENS_AVG,
&val16);
- if (ret < 0) {
+ if (ret) {
mutex_unlock(&indio_dev->mlock);
return ret;
}
@@ -626,12 +626,12 @@ static int adis16400_read_raw(struct iio_dev *indio_dev,
*val2 = (ret % 1000) * 1000;
}
mutex_unlock(&indio_dev->mlock);
- if (ret < 0)
+ if (ret)
return ret;
return IIO_VAL_INT_PLUS_MICRO;
case IIO_CHAN_INFO_SAMP_FREQ:
ret = st->variant->get_freq(st);
- if (ret < 0)
+ if (ret)
return ret;
*val = ret / 1000;
*val2 = (ret % 1000) * 1000;
diff --git a/drivers/iio/imu/adis16460.c b/drivers/iio/imu/adis16460.c
index 6aed9e84abbf..b55812521537 100644
--- a/drivers/iio/imu/adis16460.c
+++ b/drivers/iio/imu/adis16460.c
@@ -80,7 +80,7 @@ static int adis16460_show_serial_number(void *arg, u64 *val)
ret = adis_read_reg_16(&adis16460->adis, ADIS16460_REG_SERIAL_NUM,
&serial);
- if (ret < 0)
+ if (ret)
return ret;
*val = serial;
@@ -98,7 +98,7 @@ static int adis16460_show_product_id(void *arg, u64 *val)
ret = adis_read_reg_16(&adis16460->adis, ADIS16460_REG_PROD_ID,
&prod_id);
- if (ret < 0)
+ if (ret)
return ret;
*val = prod_id;
@@ -116,7 +116,7 @@ static int adis16460_show_flash_count(void *arg, u64 *val)
ret = adis_read_reg_32(&adis16460->adis, ADIS16460_REG_FLASH_CNT,
&flash_count);
- if (ret < 0)
+ if (ret)
return ret;
*val = flash_count;
@@ -176,7 +176,7 @@ static int adis16460_get_freq(struct iio_dev *indio_dev, int *val, int *val2)
unsigned int freq;
ret = adis_read_reg_16(&st->adis, ADIS16460_REG_DEC_RATE, &t);
- if (ret < 0)
+ if (ret)
return ret;
freq = 2048000 / (t + 1);
diff --git a/drivers/iio/imu/adis16480.c b/drivers/iio/imu/adis16480.c
index 8743b2f376e2..748f8bbf184d 100644
--- a/drivers/iio/imu/adis16480.c
+++ b/drivers/iio/imu/adis16480.c
@@ -181,7 +181,7 @@ static ssize_t adis16480_show_firmware_revision(struct file *file,
int ret;
ret = adis_read_reg_16(&adis16480->adis, ADIS16480_REG_FIRM_REV, &rev);
- if (ret < 0)
+ if (ret)
return ret;
len = scnprintf(buf, sizeof(buf), "%x.%x\n", rev >> 8, rev & 0xff);
@@ -206,11 +206,11 @@ static ssize_t adis16480_show_firmware_date(struct file *file,
int ret;
ret = adis_read_reg_16(&adis16480->adis, ADIS16480_REG_FIRM_Y, &year);
- if (ret < 0)
+ if (ret)
return ret;
ret = adis_read_reg_16(&adis16480->adis, ADIS16480_REG_FIRM_DM, &md);
- if (ret < 0)
+ if (ret)
return ret;
len = snprintf(buf, sizeof(buf), "%.2x-%.2x-%.4x\n",
@@ -234,7 +234,7 @@ static int adis16480_show_serial_number(void *arg, u64 *val)
ret = adis_read_reg_16(&adis16480->adis, ADIS16480_REG_SERIAL_NUM,
&serial);
- if (ret < 0)
+ if (ret)
return ret;
*val = serial;
@@ -252,7 +252,7 @@ static int adis16480_show_product_id(void *arg, u64 *val)
ret = adis_read_reg_16(&adis16480->adis, ADIS16480_REG_PROD_ID,
&prod_id);
- if (ret < 0)
+ if (ret)
return ret;
*val = prod_id;
@@ -270,7 +270,7 @@ static int adis16480_show_flash_count(void *arg, u64 *val)
ret = adis_read_reg_32(&adis16480->adis, ADIS16480_REG_FLASH_CNT,
&flash_count);
- if (ret < 0)
+ if (ret)
return ret;
*val = flash_count;
@@ -353,7 +353,7 @@ static int adis16480_get_freq(struct iio_dev *indio_dev, int *val, int *val2)
struct adis16480 *st = iio_priv(indio_dev);
uint16_t t;
int ret;
- unsigned freq;
+ unsigned int freq;
unsigned int reg;
if (st->clk_mode == ADIS16480_CLK_PPS)
@@ -362,7 +362,7 @@ static int adis16480_get_freq(struct iio_dev *indio_dev, int *val, int *val2)
reg = ADIS16480_REG_DEC_RATE;
ret = adis_read_reg_16(&st->adis, reg, &t);
- if (ret < 0)
+ if (ret)
return ret;
/*
@@ -454,18 +454,20 @@ static int adis16480_get_calibbias(struct iio_dev *indio_dev,
case IIO_MAGN:
case IIO_PRESSURE:
ret = adis_read_reg_16(&st->adis, reg, &val16);
- *bias = sign_extend32(val16, 15);
+ if (ret == 0)
+ *bias = sign_extend32(val16, 15);
break;
case IIO_ANGL_VEL:
case IIO_ACCEL:
ret = adis_read_reg_32(&st->adis, reg, &val32);
- *bias = sign_extend32(val32, 31);
+ if (ret == 0)
+ *bias = sign_extend32(val32, 31);
break;
default:
- ret = -EINVAL;
+ ret = -EINVAL;
}
- if (ret < 0)
+ if (ret)
return ret;
return IIO_VAL_INT;
@@ -492,7 +494,7 @@ static int adis16480_get_calibscale(struct iio_dev *indio_dev,
int ret;
ret = adis_read_reg_16(&st->adis, reg, &val16);
- if (ret < 0)
+ if (ret)
return ret;
*scale = sign_extend32(val16, 15);
@@ -538,7 +540,7 @@ static int adis16480_get_filter_freq(struct iio_dev *indio_dev,
enable_mask = BIT(offset + 2);
ret = adis_read_reg_16(&st->adis, reg, &val);
- if (ret < 0)
+ if (ret)
return ret;
if (!(val & enable_mask))
@@ -564,7 +566,7 @@ static int adis16480_set_filter_freq(struct iio_dev *indio_dev,
enable_mask = BIT(offset + 2);
ret = adis_read_reg_16(&st->adis, reg, &val);
- if (ret < 0)
+ if (ret)
return ret;
if (freq == 0) {
@@ -623,9 +625,13 @@ static int adis16480_read_raw(struct iio_dev *indio_dev,
*val2 = (st->chip_info->temp_scale % 1000) * 1000;
return IIO_VAL_INT_PLUS_MICRO;
case IIO_PRESSURE:
- *val = 0;
- *val2 = 4000; /* 40ubar = 0.004 kPa */
- return IIO_VAL_INT_PLUS_MICRO;
+ /*
+ * max scale is 1310 mbar
+ * max raw value is 32767 shifted for 32bits
+ */
+ *val = 131; /* 1310mbar = 131 kPa */
+ *val2 = 32767 << 16;
+ return IIO_VAL_FRACTIONAL;
default:
return -EINVAL;
}
@@ -786,13 +792,14 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
.channels = adis16485_channels,
.num_channels = ARRAY_SIZE(adis16485_channels),
/*
- * storing the value in rad/degree and the scale in degree
- * gives us the result in rad and better precession than
- * storing the scale directly in rad.
+ * Typically we do IIO_RAD_TO_DEGREE in the denominator, which
+ * is exactly the same as IIO_DEGREE_TO_RAD in numerator, since
+ * it gives better approximation. However, in this case we
+ * cannot do it since it would not fit in a 32bit variable.
*/
- .gyro_max_val = IIO_RAD_TO_DEGREE(22887),
- .gyro_max_scale = 300,
- .accel_max_val = IIO_M_S_2_TO_G(21973),
+ .gyro_max_val = 22887 << 16,
+ .gyro_max_scale = IIO_DEGREE_TO_RAD(300),
+ .accel_max_val = IIO_M_S_2_TO_G(21973 << 16),
.accel_max_scale = 18,
.temp_scale = 5650, /* 5.65 milli degree Celsius */
.int_clk = 2460000,
@@ -802,9 +809,9 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
[ADIS16480] = {
.channels = adis16480_channels,
.num_channels = ARRAY_SIZE(adis16480_channels),
- .gyro_max_val = IIO_RAD_TO_DEGREE(22500),
- .gyro_max_scale = 450,
- .accel_max_val = IIO_M_S_2_TO_G(12500),
+ .gyro_max_val = 22500 << 16,
+ .gyro_max_scale = IIO_DEGREE_TO_RAD(450),
+ .accel_max_val = IIO_M_S_2_TO_G(12500 << 16),
.accel_max_scale = 10,
.temp_scale = 5650, /* 5.65 milli degree Celsius */
.int_clk = 2460000,
@@ -814,9 +821,9 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
[ADIS16485] = {
.channels = adis16485_channels,
.num_channels = ARRAY_SIZE(adis16485_channels),
- .gyro_max_val = IIO_RAD_TO_DEGREE(22500),
- .gyro_max_scale = 450,
- .accel_max_val = IIO_M_S_2_TO_G(20000),
+ .gyro_max_val = 22500 << 16,
+ .gyro_max_scale = IIO_DEGREE_TO_RAD(450),
+ .accel_max_val = IIO_M_S_2_TO_G(20000 << 16),
.accel_max_scale = 5,
.temp_scale = 5650, /* 5.65 milli degree Celsius */
.int_clk = 2460000,
@@ -826,9 +833,9 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
[ADIS16488] = {
.channels = adis16480_channels,
.num_channels = ARRAY_SIZE(adis16480_channels),
- .gyro_max_val = IIO_RAD_TO_DEGREE(22500),
- .gyro_max_scale = 450,
- .accel_max_val = IIO_M_S_2_TO_G(22500),
+ .gyro_max_val = 22500 << 16,
+ .gyro_max_scale = IIO_DEGREE_TO_RAD(450),
+ .accel_max_val = IIO_M_S_2_TO_G(22500 << 16),
.accel_max_scale = 18,
.temp_scale = 5650, /* 5.65 milli degree Celsius */
.int_clk = 2460000,
@@ -838,9 +845,9 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
[ADIS16495_1] = {
.channels = adis16485_channels,
.num_channels = ARRAY_SIZE(adis16485_channels),
- .gyro_max_val = IIO_RAD_TO_DEGREE(20000),
- .gyro_max_scale = 125,
- .accel_max_val = IIO_M_S_2_TO_G(32000),
+ .gyro_max_val = 20000 << 16,
+ .gyro_max_scale = IIO_DEGREE_TO_RAD(125),
+ .accel_max_val = IIO_M_S_2_TO_G(32000 << 16),
.accel_max_scale = 8,
.temp_scale = 12500, /* 12.5 milli degree Celsius */
.int_clk = 4250000,
@@ -851,9 +858,9 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
[ADIS16495_2] = {
.channels = adis16485_channels,
.num_channels = ARRAY_SIZE(adis16485_channels),
- .gyro_max_val = IIO_RAD_TO_DEGREE(18000),
- .gyro_max_scale = 450,
- .accel_max_val = IIO_M_S_2_TO_G(32000),
+ .gyro_max_val = 18000 << 16,
+ .gyro_max_scale = IIO_DEGREE_TO_RAD(450),
+ .accel_max_val = IIO_M_S_2_TO_G(32000 << 16),
.accel_max_scale = 8,
.temp_scale = 12500, /* 12.5 milli degree Celsius */
.int_clk = 4250000,
@@ -864,9 +871,9 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
[ADIS16495_3] = {
.channels = adis16485_channels,
.num_channels = ARRAY_SIZE(adis16485_channels),
- .gyro_max_val = IIO_RAD_TO_DEGREE(20000),
- .gyro_max_scale = 2000,
- .accel_max_val = IIO_M_S_2_TO_G(32000),
+ .gyro_max_val = 20000 << 16,
+ .gyro_max_scale = IIO_DEGREE_TO_RAD(2000),
+ .accel_max_val = IIO_M_S_2_TO_G(32000 << 16),
.accel_max_scale = 8,
.temp_scale = 12500, /* 12.5 milli degree Celsius */
.int_clk = 4250000,
@@ -877,9 +884,9 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
[ADIS16497_1] = {
.channels = adis16485_channels,
.num_channels = ARRAY_SIZE(adis16485_channels),
- .gyro_max_val = IIO_RAD_TO_DEGREE(20000),
- .gyro_max_scale = 125,
- .accel_max_val = IIO_M_S_2_TO_G(32000),
+ .gyro_max_val = 20000 << 16,
+ .gyro_max_scale = IIO_DEGREE_TO_RAD(125),
+ .accel_max_val = IIO_M_S_2_TO_G(32000 << 16),
.accel_max_scale = 40,
.temp_scale = 12500, /* 12.5 milli degree Celsius */
.int_clk = 4250000,
@@ -890,9 +897,9 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
[ADIS16497_2] = {
.channels = adis16485_channels,
.num_channels = ARRAY_SIZE(adis16485_channels),
- .gyro_max_val = IIO_RAD_TO_DEGREE(18000),
- .gyro_max_scale = 450,
- .accel_max_val = IIO_M_S_2_TO_G(32000),
+ .gyro_max_val = 18000 << 16,
+ .gyro_max_scale = IIO_DEGREE_TO_RAD(450),
+ .accel_max_val = IIO_M_S_2_TO_G(32000 << 16),
.accel_max_scale = 40,
.temp_scale = 12500, /* 12.5 milli degree Celsius */
.int_clk = 4250000,
@@ -903,9 +910,9 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
[ADIS16497_3] = {
.channels = adis16485_channels,
.num_channels = ARRAY_SIZE(adis16485_channels),
- .gyro_max_val = IIO_RAD_TO_DEGREE(20000),
- .gyro_max_scale = 2000,
- .accel_max_val = IIO_M_S_2_TO_G(32000),
+ .gyro_max_val = 20000 << 16,
+ .gyro_max_scale = IIO_DEGREE_TO_RAD(2000),
+ .accel_max_val = IIO_M_S_2_TO_G(32000 << 16),
.accel_max_scale = 40,
.temp_scale = 12500, /* 12.5 milli degree Celsius */
.int_clk = 4250000,
@@ -919,6 +926,7 @@ static const struct iio_info adis16480_info = {
.read_raw = &adis16480_read_raw,
.write_raw = &adis16480_write_raw,
.update_scan_mode = adis_update_scan_mode,
+ .debugfs_reg_access = adis_debugfs_reg_access,
};
static int adis16480_stop_device(struct iio_dev *indio_dev)
@@ -940,7 +948,7 @@ static int adis16480_enable_irq(struct adis *adis, bool enable)
int ret;
ret = adis_read_reg_16(adis, ADIS16480_REG_FNCTIO_CTRL, &val);
- if (ret < 0)
+ if (ret)
return ret;
val &= ~ADIS16480_DRDY_EN_MSK;
@@ -1118,7 +1126,7 @@ static int adis16480_ext_clk_config(struct adis16480 *st,
int ret;
ret = adis_read_reg_16(&st->adis, ADIS16480_REG_FNCTIO_CTRL, &val);
- if (ret < 0)
+ if (ret)
return ret;
pin = adis16480_of_get_ext_clk_pin(st, of_node);
@@ -1144,7 +1152,7 @@ static int adis16480_ext_clk_config(struct adis16480 *st,
val |= mode;
ret = adis_write_reg_16(&st->adis, ADIS16480_REG_FNCTIO_CTRL, val);
- if (ret < 0)
+ if (ret)
return ret;
return clk_prepare_enable(st->ext_clk);
diff --git a/drivers/iio/imu/fxos8700.h b/drivers/iio/imu/fxos8700.h
new file mode 100644
index 000000000000..6dfb8d7099e4
--- /dev/null
+++ b/drivers/iio/imu/fxos8700.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef FXOS8700_H_
+#define FXOS8700_H_
+
+extern const struct regmap_config fxos8700_regmap_config;
+
+int fxos8700_core_probe(struct device *dev, struct regmap *regmap,
+ const char *name, bool use_spi);
+
+#endif /* FXOS8700_H_ */
diff --git a/drivers/iio/imu/fxos8700_core.c b/drivers/iio/imu/fxos8700_core.c
new file mode 100644
index 000000000000..7b47be44ea59
--- /dev/null
+++ b/drivers/iio/imu/fxos8700_core.c
@@ -0,0 +1,649 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * FXOS8700 - NXP IMU (accelerometer plus magnetometer)
+ *
+ * IIO core driver for FXOS8700, with support for I2C/SPI busses
+ *
+ * TODO: Buffer, trigger, and IRQ support
+ */
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/acpi.h>
+#include <linux/bitops.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+
+#include "fxos8700.h"
+
+/* Register Definitions */
+#define FXOS8700_STATUS 0x00
+#define FXOS8700_OUT_X_MSB 0x01
+#define FXOS8700_OUT_X_LSB 0x02
+#define FXOS8700_OUT_Y_MSB 0x03
+#define FXOS8700_OUT_Y_LSB 0x04
+#define FXOS8700_OUT_Z_MSB 0x05
+#define FXOS8700_OUT_Z_LSB 0x06
+#define FXOS8700_F_SETUP 0x09
+#define FXOS8700_TRIG_CFG 0x0a
+#define FXOS8700_SYSMOD 0x0b
+#define FXOS8700_INT_SOURCE 0x0c
+#define FXOS8700_WHO_AM_I 0x0d
+#define FXOS8700_XYZ_DATA_CFG 0x0e
+#define FXOS8700_HP_FILTER_CUTOFF 0x0f
+#define FXOS8700_PL_STATUS 0x10
+#define FXOS8700_PL_CFG 0x11
+#define FXOS8700_PL_COUNT 0x12
+#define FXOS8700_PL_BF_ZCOMP 0x13
+#define FXOS8700_PL_THS_REG 0x14
+#define FXOS8700_A_FFMT_CFG 0x15
+#define FXOS8700_A_FFMT_SRC 0x16
+#define FXOS8700_A_FFMT_THS 0x17
+#define FXOS8700_A_FFMT_COUNT 0x18
+#define FXOS8700_TRANSIENT_CFG 0x1d
+#define FXOS8700_TRANSIENT_SRC 0x1e
+#define FXOS8700_TRANSIENT_THS 0x1f
+#define FXOS8700_TRANSIENT_COUNT 0x20
+#define FXOS8700_PULSE_CFG 0x21
+#define FXOS8700_PULSE_SRC 0x22
+#define FXOS8700_PULSE_THSX 0x23
+#define FXOS8700_PULSE_THSY 0x24
+#define FXOS8700_PULSE_THSZ 0x25
+#define FXOS8700_PULSE_TMLT 0x26
+#define FXOS8700_PULSE_LTCY 0x27
+#define FXOS8700_PULSE_WIND 0x28
+#define FXOS8700_ASLP_COUNT 0x29
+#define FXOS8700_CTRL_REG1 0x2a
+#define FXOS8700_CTRL_REG2 0x2b
+#define FXOS8700_CTRL_REG3 0x2c
+#define FXOS8700_CTRL_REG4 0x2d
+#define FXOS8700_CTRL_REG5 0x2e
+#define FXOS8700_OFF_X 0x2f
+#define FXOS8700_OFF_Y 0x30
+#define FXOS8700_OFF_Z 0x31
+#define FXOS8700_M_DR_STATUS 0x32
+#define FXOS8700_M_OUT_X_MSB 0x33
+#define FXOS8700_M_OUT_X_LSB 0x34
+#define FXOS8700_M_OUT_Y_MSB 0x35
+#define FXOS8700_M_OUT_Y_LSB 0x36
+#define FXOS8700_M_OUT_Z_MSB 0x37
+#define FXOS8700_M_OUT_Z_LSB 0x38
+#define FXOS8700_CMP_X_MSB 0x39
+#define FXOS8700_CMP_X_LSB 0x3a
+#define FXOS8700_CMP_Y_MSB 0x3b
+#define FXOS8700_CMP_Y_LSB 0x3c
+#define FXOS8700_CMP_Z_MSB 0x3d
+#define FXOS8700_CMP_Z_LSB 0x3e
+#define FXOS8700_M_OFF_X_MSB 0x3f
+#define FXOS8700_M_OFF_X_LSB 0x40
+#define FXOS8700_M_OFF_Y_MSB 0x41
+#define FXOS8700_M_OFF_Y_LSB 0x42
+#define FXOS8700_M_OFF_Z_MSB 0x43
+#define FXOS8700_M_OFF_Z_LSB 0x44
+#define FXOS8700_MAX_X_MSB 0x45
+#define FXOS8700_MAX_X_LSB 0x46
+#define FXOS8700_MAX_Y_MSB 0x47
+#define FXOS8700_MAX_Y_LSB 0x48
+#define FXOS8700_MAX_Z_MSB 0x49
+#define FXOS8700_MAX_Z_LSB 0x4a
+#define FXOS8700_MIN_X_MSB 0x4b
+#define FXOS8700_MIN_X_LSB 0x4c
+#define FXOS8700_MIN_Y_MSB 0x4d
+#define FXOS8700_MIN_Y_LSB 0x4e
+#define FXOS8700_MIN_Z_MSB 0x4f
+#define FXOS8700_MIN_Z_LSB 0x50
+#define FXOS8700_TEMP 0x51
+#define FXOS8700_M_THS_CFG 0x52
+#define FXOS8700_M_THS_SRC 0x53
+#define FXOS8700_M_THS_X_MSB 0x54
+#define FXOS8700_M_THS_X_LSB 0x55
+#define FXOS8700_M_THS_Y_MSB 0x56
+#define FXOS8700_M_THS_Y_LSB 0x57
+#define FXOS8700_M_THS_Z_MSB 0x58
+#define FXOS8700_M_THS_Z_LSB 0x59
+#define FXOS8700_M_THS_COUNT 0x5a
+#define FXOS8700_M_CTRL_REG1 0x5b
+#define FXOS8700_M_CTRL_REG2 0x5c
+#define FXOS8700_M_CTRL_REG3 0x5d
+#define FXOS8700_M_INT_SRC 0x5e
+#define FXOS8700_A_VECM_CFG 0x5f
+#define FXOS8700_A_VECM_THS_MSB 0x60
+#define FXOS8700_A_VECM_THS_LSB 0x61
+#define FXOS8700_A_VECM_CNT 0x62
+#define FXOS8700_A_VECM_INITX_MSB 0x63
+#define FXOS8700_A_VECM_INITX_LSB 0x64
+#define FXOS8700_A_VECM_INITY_MSB 0x65
+#define FXOS8700_A_VECM_INITY_LSB 0x66
+#define FXOS8700_A_VECM_INITZ_MSB 0x67
+#define FXOS8700_A_VECM_INITZ_LSB 0x68
+#define FXOS8700_M_VECM_CFG 0x69
+#define FXOS8700_M_VECM_THS_MSB 0x6a
+#define FXOS8700_M_VECM_THS_LSB 0x6b
+#define FXOS8700_M_VECM_CNT 0x6c
+#define FXOS8700_M_VECM_INITX_MSB 0x6d
+#define FXOS8700_M_VECM_INITX_LSB 0x6e
+#define FXOS8700_M_VECM_INITY_MSB 0x6f
+#define FXOS8700_M_VECM_INITY_LSB 0x70
+#define FXOS8700_M_VECM_INITZ_MSB 0x71
+#define FXOS8700_M_VECM_INITZ_LSB 0x72
+#define FXOS8700_A_FFMT_THS_X_MSB 0x73
+#define FXOS8700_A_FFMT_THS_X_LSB 0x74
+#define FXOS8700_A_FFMT_THS_Y_MSB 0x75
+#define FXOS8700_A_FFMT_THS_Y_LSB 0x76
+#define FXOS8700_A_FFMT_THS_Z_MSB 0x77
+#define FXOS8700_A_FFMT_THS_Z_LSB 0x78
+#define FXOS8700_A_TRAN_INIT_MSB 0x79
+#define FXOS8700_A_TRAN_INIT_LSB_X 0x7a
+#define FXOS8700_A_TRAN_INIT_LSB_Y 0x7b
+#define FXOS8700_A_TRAN_INIT_LSB_Z 0x7d
+#define FXOS8700_TM_NVM_LOCK 0x7e
+#define FXOS8700_NVM_DATA0_35 0x80
+#define FXOS8700_NVM_DATA_BNK3 0xa4
+#define FXOS8700_NVM_DATA_BNK2 0xa5
+#define FXOS8700_NVM_DATA_BNK1 0xa6
+#define FXOS8700_NVM_DATA_BNK0 0xa7
+
+/* Bit definitions for FXOS8700_CTRL_REG1 */
+#define FXOS8700_CTRL_ODR_MSK 0x38
+#define FXOS8700_CTRL_ODR_MAX 0x00
+#define FXOS8700_CTRL_ODR_MIN GENMASK(4, 3)
+
+/* Bit definitions for FXOS8700_M_CTRL_REG1 */
+#define FXOS8700_HMS_MASK GENMASK(1, 0)
+#define FXOS8700_OS_MASK GENMASK(4, 2)
+
+/* Bit definitions for FXOS8700_M_CTRL_REG2 */
+#define FXOS8700_MAXMIN_RST BIT(2)
+#define FXOS8700_MAXMIN_DIS_THS BIT(3)
+#define FXOS8700_MAXMIN_DIS BIT(4)
+
+#define FXOS8700_ACTIVE 0x01
+#define FXOS8700_ACTIVE_MIN_USLEEP 4000 /* from table 6 in datasheet */
+
+#define FXOS8700_DEVICE_ID 0xC7
+#define FXOS8700_PRE_DEVICE_ID 0xC4
+#define FXOS8700_DATA_BUF_SIZE 3
+
+struct fxos8700_data {
+ struct regmap *regmap;
+ struct iio_trigger *trig;
+ __be16 buf[FXOS8700_DATA_BUF_SIZE] ____cacheline_aligned;
+};
+
+/* Regmap info */
+static const struct regmap_range read_range[] = {
+ {
+ .range_min = FXOS8700_STATUS,
+ .range_max = FXOS8700_A_FFMT_COUNT,
+ }, {
+ .range_min = FXOS8700_TRANSIENT_CFG,
+ .range_max = FXOS8700_A_FFMT_THS_Z_LSB,
+ },
+};
+
+static const struct regmap_range write_range[] = {
+ {
+ .range_min = FXOS8700_F_SETUP,
+ .range_max = FXOS8700_TRIG_CFG,
+ }, {
+ .range_min = FXOS8700_XYZ_DATA_CFG,
+ .range_max = FXOS8700_HP_FILTER_CUTOFF,
+ }, {
+ .range_min = FXOS8700_PL_CFG,
+ .range_max = FXOS8700_A_FFMT_CFG,
+ }, {
+ .range_min = FXOS8700_A_FFMT_THS,
+ .range_max = FXOS8700_TRANSIENT_CFG,
+ }, {
+ .range_min = FXOS8700_TRANSIENT_THS,
+ .range_max = FXOS8700_PULSE_CFG,
+ }, {
+ .range_min = FXOS8700_PULSE_THSX,
+ .range_max = FXOS8700_OFF_Z,
+ }, {
+ .range_min = FXOS8700_M_OFF_X_MSB,
+ .range_max = FXOS8700_M_OFF_Z_LSB,
+ }, {
+ .range_min = FXOS8700_M_THS_CFG,
+ .range_max = FXOS8700_M_THS_CFG,
+ }, {
+ .range_min = FXOS8700_M_THS_X_MSB,
+ .range_max = FXOS8700_M_CTRL_REG3,
+ }, {
+ .range_min = FXOS8700_A_VECM_CFG,
+ .range_max = FXOS8700_A_FFMT_THS_Z_LSB,
+ },
+};
+
+static const struct regmap_access_table driver_read_table = {
+ .yes_ranges = read_range,
+ .n_yes_ranges = ARRAY_SIZE(read_range),
+};
+
+static const struct regmap_access_table driver_write_table = {
+ .yes_ranges = write_range,
+ .n_yes_ranges = ARRAY_SIZE(write_range),
+};
+
+const struct regmap_config fxos8700_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = FXOS8700_NVM_DATA_BNK0,
+ .rd_table = &driver_read_table,
+ .wr_table = &driver_write_table,
+};
+EXPORT_SYMBOL(fxos8700_regmap_config);
+
+#define FXOS8700_CHANNEL(_type, _axis) { \
+ .type = _type, \
+ .modified = 1, \
+ .channel2 = IIO_MOD_##_axis, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+}
+
+enum fxos8700_accel_scale_bits {
+ MODE_2G = 0,
+ MODE_4G,
+ MODE_8G,
+};
+
+/* scan indexes follow DATA register order */
+enum fxos8700_scan_axis {
+ FXOS8700_SCAN_ACCEL_X = 0,
+ FXOS8700_SCAN_ACCEL_Y,
+ FXOS8700_SCAN_ACCEL_Z,
+ FXOS8700_SCAN_MAGN_X,
+ FXOS8700_SCAN_MAGN_Y,
+ FXOS8700_SCAN_MAGN_Z,
+ FXOS8700_SCAN_RHALL,
+ FXOS8700_SCAN_TIMESTAMP,
+};
+
+enum fxos8700_sensor {
+ FXOS8700_ACCEL = 0,
+ FXOS8700_MAGN,
+ FXOS8700_NUM_SENSORS /* must be last */
+};
+
+enum fxos8700_int_pin {
+ FXOS8700_PIN_INT1,
+ FXOS8700_PIN_INT2
+};
+
+struct fxos8700_scale {
+ u8 bits;
+ int uscale;
+};
+
+struct fxos8700_odr {
+ u8 bits;
+ int odr;
+ int uodr;
+};
+
+static const struct fxos8700_scale fxos8700_accel_scale[] = {
+ { MODE_2G, 244},
+ { MODE_4G, 488},
+ { MODE_8G, 976},
+};
+
+/*
+ * Accellerometer and magnetometer have the same ODR options, set in the
+ * CTRL_REG1 register. ODR is halved when using both sensors at once in
+ * hybrid mode.
+ */
+static const struct fxos8700_odr fxos8700_odr[] = {
+ {0x00, 800, 0},
+ {0x01, 400, 0},
+ {0x02, 200, 0},
+ {0x03, 100, 0},
+ {0x04, 50, 0},
+ {0x05, 12, 500000},
+ {0x06, 6, 250000},
+ {0x07, 1, 562500},
+};
+
+static const struct iio_chan_spec fxos8700_channels[] = {
+ FXOS8700_CHANNEL(IIO_ACCEL, X),
+ FXOS8700_CHANNEL(IIO_ACCEL, Y),
+ FXOS8700_CHANNEL(IIO_ACCEL, Z),
+ FXOS8700_CHANNEL(IIO_MAGN, X),
+ FXOS8700_CHANNEL(IIO_MAGN, Y),
+ FXOS8700_CHANNEL(IIO_MAGN, Z),
+ IIO_CHAN_SOFT_TIMESTAMP(FXOS8700_SCAN_TIMESTAMP),
+};
+
+static enum fxos8700_sensor fxos8700_to_sensor(enum iio_chan_type iio_type)
+{
+ switch (iio_type) {
+ case IIO_ACCEL:
+ return FXOS8700_ACCEL;
+ case IIO_ANGL_VEL:
+ return FXOS8700_MAGN;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int fxos8700_set_active_mode(struct fxos8700_data *data,
+ enum fxos8700_sensor t, bool mode)
+{
+ int ret;
+
+ ret = regmap_write(data->regmap, FXOS8700_CTRL_REG1, mode);
+ if (ret)
+ return ret;
+
+ usleep_range(FXOS8700_ACTIVE_MIN_USLEEP,
+ FXOS8700_ACTIVE_MIN_USLEEP + 1000);
+
+ return 0;
+}
+
+static int fxos8700_set_scale(struct fxos8700_data *data,
+ enum fxos8700_sensor t, int uscale)
+{
+ int i;
+ static const int scale_num = ARRAY_SIZE(fxos8700_accel_scale);
+ struct device *dev = regmap_get_device(data->regmap);
+
+ if (t == FXOS8700_MAGN) {
+ dev_err(dev, "Magnetometer scale is locked at 1200uT\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < scale_num; i++)
+ if (fxos8700_accel_scale[i].uscale == uscale)
+ break;
+
+ if (i == scale_num)
+ return -EINVAL;
+
+ return regmap_write(data->regmap, FXOS8700_XYZ_DATA_CFG,
+ fxos8700_accel_scale[i].bits);
+}
+
+static int fxos8700_get_scale(struct fxos8700_data *data,
+ enum fxos8700_sensor t, int *uscale)
+{
+ int i, ret, val;
+ static const int scale_num = ARRAY_SIZE(fxos8700_accel_scale);
+
+ if (t == FXOS8700_MAGN) {
+ *uscale = 1200; /* Magnetometer is locked at 1200uT */
+ return 0;
+ }
+
+ ret = regmap_read(data->regmap, FXOS8700_XYZ_DATA_CFG, &val);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < scale_num; i++) {
+ if (fxos8700_accel_scale[i].bits == (val & 0x3)) {
+ *uscale = fxos8700_accel_scale[i].uscale;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int fxos8700_get_data(struct fxos8700_data *data, int chan_type,
+ int axis, int *val)
+{
+ u8 base, reg;
+ int ret;
+ enum fxos8700_sensor type = fxos8700_to_sensor(chan_type);
+
+ base = type ? FXOS8700_OUT_X_MSB : FXOS8700_M_OUT_X_MSB;
+
+ /* Block read 6 bytes of device output registers to avoid data loss */
+ ret = regmap_bulk_read(data->regmap, base, data->buf,
+ FXOS8700_DATA_BUF_SIZE);
+ if (ret)
+ return ret;
+
+ /* Convert axis to buffer index */
+ reg = axis - IIO_MOD_X;
+
+ /* Convert to native endianness */
+ *val = sign_extend32(be16_to_cpu(data->buf[reg]), 15);
+
+ return 0;
+}
+
+static int fxos8700_set_odr(struct fxos8700_data *data, enum fxos8700_sensor t,
+ int odr, int uodr)
+{
+ int i, ret, val;
+ bool active_mode;
+ static const int odr_num = ARRAY_SIZE(fxos8700_odr);
+
+ ret = regmap_read(data->regmap, FXOS8700_CTRL_REG1, &val);
+ if (ret)
+ return ret;
+
+ active_mode = val & FXOS8700_ACTIVE;
+
+ if (active_mode) {
+ /*
+ * The device must be in standby mode to change any of the
+ * other fields within CTRL_REG1
+ */
+ ret = regmap_write(data->regmap, FXOS8700_CTRL_REG1,
+ val & ~FXOS8700_ACTIVE);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < odr_num; i++)
+ if (fxos8700_odr[i].odr == odr && fxos8700_odr[i].uodr == uodr)
+ break;
+
+ if (i >= odr_num)
+ return -EINVAL;
+
+ return regmap_update_bits(data->regmap,
+ FXOS8700_CTRL_REG1,
+ FXOS8700_CTRL_ODR_MSK + FXOS8700_ACTIVE,
+ fxos8700_odr[i].bits << 3 | active_mode);
+}
+
+static int fxos8700_get_odr(struct fxos8700_data *data, enum fxos8700_sensor t,
+ int *odr, int *uodr)
+{
+ int i, val, ret;
+ static const int odr_num = ARRAY_SIZE(fxos8700_odr);
+
+ ret = regmap_read(data->regmap, FXOS8700_CTRL_REG1, &val);
+ if (ret)
+ return ret;
+
+ val &= FXOS8700_CTRL_ODR_MSK;
+
+ for (i = 0; i < odr_num; i++)
+ if (val == fxos8700_odr[i].bits)
+ break;
+
+ if (i >= odr_num)
+ return -EINVAL;
+
+ *odr = fxos8700_odr[i].odr;
+ *uodr = fxos8700_odr[i].uodr;
+
+ return 0;
+}
+
+static int fxos8700_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ int ret;
+ struct fxos8700_data *data = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = fxos8700_get_data(data, chan->type, chan->channel2, val);
+ if (ret)
+ return ret;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ *val = 0;
+ ret = fxos8700_get_scale(data, fxos8700_to_sensor(chan->type),
+ val2);
+ return ret ? ret : IIO_VAL_INT_PLUS_MICRO;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ ret = fxos8700_get_odr(data, fxos8700_to_sensor(chan->type),
+ val, val2);
+ return ret ? ret : IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int fxos8700_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct fxos8700_data *data = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ return fxos8700_set_scale(data, fxos8700_to_sensor(chan->type),
+ val2);
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ return fxos8700_set_odr(data, fxos8700_to_sensor(chan->type),
+ val, val2);
+ default:
+ return -EINVAL;
+ }
+}
+
+static IIO_CONST_ATTR(in_accel_sampling_frequency_available,
+ "1.5625 6.25 12.5 50 100 200 400 800");
+static IIO_CONST_ATTR(in_magn_sampling_frequency_available,
+ "1.5625 6.25 12.5 50 100 200 400 800");
+static IIO_CONST_ATTR(in_accel_scale_available, "0.000244 0.000488 0.000976");
+static IIO_CONST_ATTR(in_magn_scale_available, "0.000001200");
+
+static struct attribute *fxos8700_attrs[] = {
+ &iio_const_attr_in_accel_sampling_frequency_available.dev_attr.attr,
+ &iio_const_attr_in_magn_sampling_frequency_available.dev_attr.attr,
+ &iio_const_attr_in_accel_scale_available.dev_attr.attr,
+ &iio_const_attr_in_magn_scale_available.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group fxos8700_attrs_group = {
+ .attrs = fxos8700_attrs,
+};
+
+static const struct iio_info fxos8700_info = {
+ .read_raw = fxos8700_read_raw,
+ .write_raw = fxos8700_write_raw,
+ .attrs = &fxos8700_attrs_group,
+};
+
+static int fxos8700_chip_init(struct fxos8700_data *data, bool use_spi)
+{
+ int ret;
+ unsigned int val;
+ struct device *dev = regmap_get_device(data->regmap);
+
+ ret = regmap_read(data->regmap, FXOS8700_WHO_AM_I, &val);
+ if (ret) {
+ dev_err(dev, "Error reading chip id\n");
+ return ret;
+ }
+ if (val != FXOS8700_DEVICE_ID && val != FXOS8700_PRE_DEVICE_ID) {
+ dev_err(dev, "Wrong chip id, got %x expected %x or %x\n",
+ val, FXOS8700_DEVICE_ID, FXOS8700_PRE_DEVICE_ID);
+ return -ENODEV;
+ }
+
+ ret = fxos8700_set_active_mode(data, FXOS8700_ACCEL, true);
+ if (ret)
+ return ret;
+
+ ret = fxos8700_set_active_mode(data, FXOS8700_MAGN, true);
+ if (ret)
+ return ret;
+
+ /*
+ * The device must be in standby mode to change any of the other fields
+ * within CTRL_REG1
+ */
+ ret = regmap_write(data->regmap, FXOS8700_CTRL_REG1, 0x00);
+ if (ret)
+ return ret;
+
+ /* Set max oversample ratio (OSR) and both devices active */
+ ret = regmap_write(data->regmap, FXOS8700_M_CTRL_REG1,
+ FXOS8700_HMS_MASK | FXOS8700_OS_MASK);
+ if (ret)
+ return ret;
+
+ /* Disable and rst min/max measurements & threshold */
+ ret = regmap_write(data->regmap, FXOS8700_M_CTRL_REG2,
+ FXOS8700_MAXMIN_RST | FXOS8700_MAXMIN_DIS_THS |
+ FXOS8700_MAXMIN_DIS);
+ if (ret)
+ return ret;
+
+ /* Max ODR (800Hz individual or 400Hz hybrid), active mode */
+ ret = regmap_write(data->regmap, FXOS8700_CTRL_REG1,
+ FXOS8700_CTRL_ODR_MAX | FXOS8700_ACTIVE);
+ if (ret)
+ return ret;
+
+ /* Set for max full-scale range (+/-8G) */
+ return regmap_write(data->regmap, FXOS8700_XYZ_DATA_CFG, MODE_8G);
+}
+
+static void fxos8700_chip_uninit(void *data)
+{
+ struct fxos8700_data *fxos8700_data = data;
+
+ fxos8700_set_active_mode(fxos8700_data, FXOS8700_ACCEL, false);
+ fxos8700_set_active_mode(fxos8700_data, FXOS8700_MAGN, false);
+}
+
+int fxos8700_core_probe(struct device *dev, struct regmap *regmap,
+ const char *name, bool use_spi)
+{
+ struct iio_dev *indio_dev;
+ struct fxos8700_data *data;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ dev_set_drvdata(dev, indio_dev);
+ data->regmap = regmap;
+
+ ret = fxos8700_chip_init(data, use_spi);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action_or_reset(dev, fxos8700_chip_uninit, data);
+ if (ret)
+ return ret;
+
+ indio_dev->dev.parent = dev;
+ indio_dev->channels = fxos8700_channels;
+ indio_dev->num_channels = ARRAY_SIZE(fxos8700_channels);
+ indio_dev->name = name ? name : "fxos8700";
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->info = &fxos8700_info;
+
+ return devm_iio_device_register(dev, indio_dev);
+}
+EXPORT_SYMBOL_GPL(fxos8700_core_probe);
+
+MODULE_AUTHOR("Robert Jones <rjones@gateworks.com>");
+MODULE_DESCRIPTION("FXOS8700 6-Axis Acc and Mag Combo Sensor driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/imu/fxos8700_i2c.c b/drivers/iio/imu/fxos8700_i2c.c
new file mode 100644
index 000000000000..3ceb76366313
--- /dev/null
+++ b/drivers/iio/imu/fxos8700_i2c.c
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * FXOS8700 - NXP IMU, I2C bits
+ *
+ * 7-bit I2C slave address determined by SA1 and SA0 logic level
+ * inputs represented in the following table:
+ * SA1 | SA0 | Slave Address
+ * 0 | 0 | 0x1E
+ * 0 | 1 | 0x1D
+ * 1 | 0 | 0x1C
+ * 1 | 1 | 0x1F
+ */
+#include <linux/acpi.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/regmap.h>
+
+#include "fxos8700.h"
+
+static int fxos8700_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct regmap *regmap;
+ const char *name = NULL;
+
+ regmap = devm_regmap_init_i2c(client, &fxos8700_regmap_config);
+ if (IS_ERR(regmap)) {
+ dev_err(&client->dev, "Failed to register i2c regmap %d\n",
+ (int)PTR_ERR(regmap));
+ return PTR_ERR(regmap);
+ }
+
+ if (id)
+ name = id->name;
+
+ return fxos8700_core_probe(&client->dev, regmap, name, false);
+}
+
+static const struct i2c_device_id fxos8700_i2c_id[] = {
+ {"fxos8700", 0},
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, fxos8700_i2c_id);
+
+static const struct acpi_device_id fxos8700_acpi_match[] = {
+ {"FXOS8700", 0},
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, fxos8700_acpi_match);
+
+static const struct of_device_id fxos8700_of_match[] = {
+ { .compatible = "nxp,fxos8700" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, fxos8700_of_match);
+
+static struct i2c_driver fxos8700_i2c_driver = {
+ .driver = {
+ .name = "fxos8700_i2c",
+ .acpi_match_table = ACPI_PTR(fxos8700_acpi_match),
+ .of_match_table = fxos8700_of_match,
+ },
+ .probe = fxos8700_i2c_probe,
+ .id_table = fxos8700_i2c_id,
+};
+module_i2c_driver(fxos8700_i2c_driver);
+
+MODULE_AUTHOR("Robert Jones <rjones@gateworks.com>");
+MODULE_DESCRIPTION("FXOS8700 I2C driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/imu/fxos8700_spi.c b/drivers/iio/imu/fxos8700_spi.c
new file mode 100644
index 000000000000..57e7bb6444e7
--- /dev/null
+++ b/drivers/iio/imu/fxos8700_spi.c
@@ -0,0 +1,59 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * FXOS8700 - NXP IMU, SPI bits
+ */
+#include <linux/acpi.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/regmap.h>
+#include <linux/spi/spi.h>
+
+#include "fxos8700.h"
+
+static int fxos8700_spi_probe(struct spi_device *spi)
+{
+ struct regmap *regmap;
+ const struct spi_device_id *id = spi_get_device_id(spi);
+
+ regmap = devm_regmap_init_spi(spi, &fxos8700_regmap_config);
+ if (IS_ERR(regmap)) {
+ dev_err(&spi->dev, "Failed to register spi regmap %d\n",
+ (int)PTR_ERR(regmap));
+ return PTR_ERR(regmap);
+ }
+
+ return fxos8700_core_probe(&spi->dev, regmap, id->name, true);
+}
+
+static const struct spi_device_id fxos8700_spi_id[] = {
+ {"fxos8700", 0},
+ { }
+};
+MODULE_DEVICE_TABLE(spi, fxos8700_spi_id);
+
+static const struct acpi_device_id fxos8700_acpi_match[] = {
+ {"FXOS8700", 0},
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, fxos8700_acpi_match);
+
+static const struct of_device_id fxos8700_of_match[] = {
+ { .compatible = "nxp,fxos8700" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, fxos8700_of_match);
+
+static struct spi_driver fxos8700_spi_driver = {
+ .probe = fxos8700_spi_probe,
+ .id_table = fxos8700_spi_id,
+ .driver = {
+ .acpi_match_table = ACPI_PTR(fxos8700_acpi_match),
+ .of_match_table = fxos8700_of_match,
+ .name = "fxos8700_spi",
+ },
+};
+module_spi_driver(fxos8700_spi_driver);
+
+MODULE_AUTHOR("Robert Jones <rjones@gateworks.com>");
+MODULE_DESCRIPTION("FXOS8700 SPI driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/imu/inv_mpu6050/Makefile b/drivers/iio/imu/inv_mpu6050/Makefile
index 70ffe0d13d8c..c103441a906b 100644
--- a/drivers/iio/imu/inv_mpu6050/Makefile
+++ b/drivers/iio/imu/inv_mpu6050/Makefile
@@ -4,10 +4,11 @@
#
obj-$(CONFIG_INV_MPU6050_IIO) += inv-mpu6050.o
-inv-mpu6050-objs := inv_mpu_core.o inv_mpu_ring.o inv_mpu_trigger.o
+inv-mpu6050-y := inv_mpu_core.o inv_mpu_ring.o inv_mpu_trigger.o \
+ inv_mpu_aux.o inv_mpu_magn.o
obj-$(CONFIG_INV_MPU6050_I2C) += inv-mpu6050-i2c.o
-inv-mpu6050-i2c-objs := inv_mpu_i2c.o inv_mpu_acpi.o
+inv-mpu6050-i2c-y := inv_mpu_i2c.o inv_mpu_acpi.o
obj-$(CONFIG_INV_MPU6050_SPI) += inv-mpu6050-spi.o
-inv-mpu6050-spi-objs := inv_mpu_spi.o
+inv-mpu6050-spi-y := inv_mpu_spi.o
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_aux.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_aux.c
new file mode 100644
index 000000000000..7327e5723f96
--- /dev/null
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_aux.c
@@ -0,0 +1,204 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 TDK-InvenSense, Inc.
+ */
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/regmap.h>
+#include <linux/delay.h>
+
+#include "inv_mpu_aux.h"
+#include "inv_mpu_iio.h"
+
+/*
+ * i2c master auxiliary bus transfer function.
+ * Requires the i2c operations to be correctly setup before.
+ */
+static int inv_mpu_i2c_master_xfer(const struct inv_mpu6050_state *st)
+{
+ /* use 50hz frequency for xfer */
+ const unsigned int freq = 50;
+ const unsigned int period_ms = 1000 / freq;
+ uint8_t d;
+ unsigned int user_ctrl;
+ int ret;
+
+ /* set sample rate */
+ d = INV_MPU6050_FIFO_RATE_TO_DIVIDER(freq);
+ ret = regmap_write(st->map, st->reg->sample_rate_div, d);
+ if (ret)
+ return ret;
+
+ /* start i2c master */
+ user_ctrl = st->chip_config.user_ctrl | INV_MPU6050_BIT_I2C_MST_EN;
+ ret = regmap_write(st->map, st->reg->user_ctrl, user_ctrl);
+ if (ret)
+ goto error_restore_rate;
+
+ /* wait for xfer: 1 period + half-period margin */
+ msleep(period_ms + period_ms / 2);
+
+ /* stop i2c master */
+ user_ctrl = st->chip_config.user_ctrl;
+ ret = regmap_write(st->map, st->reg->user_ctrl, user_ctrl);
+ if (ret)
+ goto error_stop_i2c;
+
+ /* restore sample rate */
+ d = st->chip_config.divider;
+ ret = regmap_write(st->map, st->reg->sample_rate_div, d);
+ if (ret)
+ goto error_restore_rate;
+
+ return 0;
+
+error_stop_i2c:
+ regmap_write(st->map, st->reg->user_ctrl, st->chip_config.user_ctrl);
+error_restore_rate:
+ regmap_write(st->map, st->reg->sample_rate_div, st->chip_config.divider);
+ return ret;
+}
+
+/**
+ * inv_mpu_aux_init() - init i2c auxiliary bus
+ * @st: driver internal state
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int inv_mpu_aux_init(const struct inv_mpu6050_state *st)
+{
+ unsigned int val;
+ int ret;
+
+ /* configure i2c master */
+ val = INV_MPU6050_BITS_I2C_MST_CLK_400KHZ |
+ INV_MPU6050_BIT_WAIT_FOR_ES;
+ ret = regmap_write(st->map, INV_MPU6050_REG_I2C_MST_CTRL, val);
+ if (ret)
+ return ret;
+
+ /* configure i2c master delay */
+ ret = regmap_write(st->map, INV_MPU6050_REG_I2C_SLV4_CTRL, 0);
+ if (ret)
+ return ret;
+
+ val = INV_MPU6050_BIT_I2C_SLV0_DLY_EN |
+ INV_MPU6050_BIT_I2C_SLV1_DLY_EN |
+ INV_MPU6050_BIT_I2C_SLV2_DLY_EN |
+ INV_MPU6050_BIT_I2C_SLV3_DLY_EN |
+ INV_MPU6050_BIT_DELAY_ES_SHADOW;
+ return regmap_write(st->map, INV_MPU6050_REG_I2C_MST_DELAY_CTRL, val);
+}
+
+/**
+ * inv_mpu_aux_read() - read register function for i2c auxiliary bus
+ * @st: driver internal state.
+ * @addr: chip i2c Address
+ * @reg: chip register address
+ * @val: buffer for storing read bytes
+ * @size: number of bytes to read
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int inv_mpu_aux_read(const struct inv_mpu6050_state *st, uint8_t addr,
+ uint8_t reg, uint8_t *val, size_t size)
+{
+ unsigned int status;
+ int ret;
+
+ if (size > 0x0F)
+ return -EINVAL;
+
+ /* setup i2c SLV0 control: i2c addr, register, enable + size */
+ ret = regmap_write(st->map, INV_MPU6050_REG_I2C_SLV_ADDR(0),
+ INV_MPU6050_BIT_I2C_SLV_RNW | addr);
+ if (ret)
+ return ret;
+ ret = regmap_write(st->map, INV_MPU6050_REG_I2C_SLV_REG(0), reg);
+ if (ret)
+ return ret;
+ ret = regmap_write(st->map, INV_MPU6050_REG_I2C_SLV_CTRL(0),
+ INV_MPU6050_BIT_SLV_EN | size);
+ if (ret)
+ return ret;
+
+ /* do i2c xfer */
+ ret = inv_mpu_i2c_master_xfer(st);
+ if (ret)
+ goto error_disable_i2c;
+
+ /* disable i2c slave */
+ ret = regmap_write(st->map, INV_MPU6050_REG_I2C_SLV_CTRL(0), 0);
+ if (ret)
+ goto error_disable_i2c;
+
+ /* check i2c status */
+ ret = regmap_read(st->map, INV_MPU6050_REG_I2C_MST_STATUS, &status);
+ if (ret)
+ return ret;
+ if (status & INV_MPU6050_BIT_I2C_SLV0_NACK)
+ return -EIO;
+
+ /* read data in registers */
+ return regmap_bulk_read(st->map, INV_MPU6050_REG_EXT_SENS_DATA,
+ val, size);
+
+error_disable_i2c:
+ regmap_write(st->map, INV_MPU6050_REG_I2C_SLV_CTRL(0), 0);
+ return ret;
+}
+
+/**
+ * inv_mpu_aux_write() - write register function for i2c auxiliary bus
+ * @st: driver internal state.
+ * @addr: chip i2c Address
+ * @reg: chip register address
+ * @val: 1 byte value to write
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int inv_mpu_aux_write(const struct inv_mpu6050_state *st, uint8_t addr,
+ uint8_t reg, uint8_t val)
+{
+ unsigned int status;
+ int ret;
+
+ /* setup i2c SLV0 control: i2c addr, register, value, enable + size */
+ ret = regmap_write(st->map, INV_MPU6050_REG_I2C_SLV_ADDR(0), addr);
+ if (ret)
+ return ret;
+ ret = regmap_write(st->map, INV_MPU6050_REG_I2C_SLV_REG(0), reg);
+ if (ret)
+ return ret;
+ ret = regmap_write(st->map, INV_MPU6050_REG_I2C_SLV_DO(0), val);
+ if (ret)
+ return ret;
+ ret = regmap_write(st->map, INV_MPU6050_REG_I2C_SLV_CTRL(0),
+ INV_MPU6050_BIT_SLV_EN | 1);
+ if (ret)
+ return ret;
+
+ /* do i2c xfer */
+ ret = inv_mpu_i2c_master_xfer(st);
+ if (ret)
+ goto error_disable_i2c;
+
+ /* disable i2c slave */
+ ret = regmap_write(st->map, INV_MPU6050_REG_I2C_SLV_CTRL(0), 0);
+ if (ret)
+ goto error_disable_i2c;
+
+ /* check i2c status */
+ ret = regmap_read(st->map, INV_MPU6050_REG_I2C_MST_STATUS, &status);
+ if (ret)
+ return ret;
+ if (status & INV_MPU6050_BIT_I2C_SLV0_NACK)
+ return -EIO;
+
+ return 0;
+
+error_disable_i2c:
+ regmap_write(st->map, INV_MPU6050_REG_I2C_SLV_CTRL(0), 0);
+ return ret;
+}
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_aux.h b/drivers/iio/imu/inv_mpu6050/inv_mpu_aux.h
new file mode 100644
index 000000000000..b66997545762
--- /dev/null
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_aux.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019 TDK-InvenSense, Inc.
+ */
+
+#ifndef INV_MPU_AUX_H_
+#define INV_MPU_AUX_H_
+
+#include "inv_mpu_iio.h"
+
+int inv_mpu_aux_init(const struct inv_mpu6050_state *st);
+
+int inv_mpu_aux_read(const struct inv_mpu6050_state *st, uint8_t addr,
+ uint8_t reg, uint8_t *val, size_t size);
+
+int inv_mpu_aux_write(const struct inv_mpu6050_state *st, uint8_t addr,
+ uint8_t reg, uint8_t val);
+
+#endif /* INV_MPU_AUX_H_ */
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
index 868281b8adb0..45e77b308238 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
@@ -17,6 +17,7 @@
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include "inv_mpu_iio.h"
+#include "inv_mpu_magn.h"
/*
* this is the gyro scale translated from dynamic range plus/minus
@@ -103,6 +104,7 @@ static const struct inv_mpu6050_chip_config chip_config_6050 = {
.divider = INV_MPU6050_FIFO_RATE_TO_DIVIDER(INV_MPU6050_INIT_FIFO_RATE),
.gyro_fifo_enable = false,
.accl_fifo_enable = false,
+ .magn_fifo_enable = false,
.accl_fs = INV_MPU6050_FS_02G,
.user_ctrl = 0,
};
@@ -341,6 +343,11 @@ static int inv_mpu6050_init_config(struct iio_dev *indio_dev)
*/
st->chip_period = NSEC_PER_MSEC;
+ /* magn chip init, noop if not present in the chip */
+ result = inv_mpu_magn_probe(st);
+ if (result)
+ goto error_power_off;
+
return inv_mpu6050_set_power_itg(st, false);
error_power_off:
@@ -420,6 +427,9 @@ static int inv_mpu6050_read_channel_data(struct iio_dev *indio_dev,
ret = inv_mpu6050_sensor_show(st, st->reg->temperature,
IIO_MOD_X, val);
break;
+ case IIO_MAGN:
+ ret = inv_mpu_magn_read(st, chan->channel2, val);
+ break;
default:
ret = -EINVAL;
break;
@@ -478,6 +488,8 @@ inv_mpu6050_read_raw(struct iio_dev *indio_dev,
*val2 = INV_MPU6050_TEMP_SCALE;
return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_MAGN:
+ return inv_mpu_magn_get_scale(st, chan, val, val2);
default:
return -EINVAL;
}
@@ -719,6 +731,11 @@ inv_mpu6050_fifo_rate_store(struct device *dev, struct device_attribute *attr,
if (result)
goto fifo_rate_fail_power_off;
+ /* update rate for magn, noop if not present in chip */
+ result = inv_mpu_magn_set_rate(st, fifo_rate);
+ if (result)
+ goto fifo_rate_fail_power_off;
+
fifo_rate_fail_power_off:
result |= inv_mpu6050_set_power_itg(st, false);
fifo_rate_fail_unlock:
@@ -804,8 +821,14 @@ inv_get_mount_matrix(const struct iio_dev *indio_dev,
const struct iio_chan_spec *chan)
{
struct inv_mpu6050_state *data = iio_priv(indio_dev);
+ const struct iio_mount_matrix *matrix;
+
+ if (chan->type == IIO_MAGN)
+ matrix = &data->magn_orient;
+ else
+ matrix = &data->orientation;
- return &data->orientation;
+ return matrix;
}
static const struct iio_chan_spec_ext_info inv_ext_info[] = {
@@ -873,6 +896,98 @@ static const unsigned long inv_mpu_scan_masks[] = {
0,
};
+#define INV_MPU9X50_MAGN_CHAN(_chan2, _bits, _index) \
+ { \
+ .type = IIO_MAGN, \
+ .modified = 1, \
+ .channel2 = _chan2, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_RAW), \
+ .scan_index = _index, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = _bits, \
+ .storagebits = 16, \
+ .shift = 0, \
+ .endianness = IIO_BE, \
+ }, \
+ .ext_info = inv_ext_info, \
+ }
+
+static const struct iio_chan_spec inv_mpu9250_channels[] = {
+ IIO_CHAN_SOFT_TIMESTAMP(INV_MPU9X50_SCAN_TIMESTAMP),
+ /*
+ * Note that temperature should only be via polled reading only,
+ * not the final scan elements output.
+ */
+ {
+ .type = IIO_TEMP,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW)
+ | BIT(IIO_CHAN_INFO_OFFSET)
+ | BIT(IIO_CHAN_INFO_SCALE),
+ .scan_index = -1,
+ },
+ INV_MPU6050_CHAN(IIO_ANGL_VEL, IIO_MOD_X, INV_MPU6050_SCAN_GYRO_X),
+ INV_MPU6050_CHAN(IIO_ANGL_VEL, IIO_MOD_Y, INV_MPU6050_SCAN_GYRO_Y),
+ INV_MPU6050_CHAN(IIO_ANGL_VEL, IIO_MOD_Z, INV_MPU6050_SCAN_GYRO_Z),
+
+ INV_MPU6050_CHAN(IIO_ACCEL, IIO_MOD_X, INV_MPU6050_SCAN_ACCL_X),
+ INV_MPU6050_CHAN(IIO_ACCEL, IIO_MOD_Y, INV_MPU6050_SCAN_ACCL_Y),
+ INV_MPU6050_CHAN(IIO_ACCEL, IIO_MOD_Z, INV_MPU6050_SCAN_ACCL_Z),
+
+ /* Magnetometer resolution is 16 bits */
+ INV_MPU9X50_MAGN_CHAN(IIO_MOD_X, 16, INV_MPU9X50_SCAN_MAGN_X),
+ INV_MPU9X50_MAGN_CHAN(IIO_MOD_Y, 16, INV_MPU9X50_SCAN_MAGN_Y),
+ INV_MPU9X50_MAGN_CHAN(IIO_MOD_Z, 16, INV_MPU9X50_SCAN_MAGN_Z),
+};
+
+static const unsigned long inv_mpu9x50_scan_masks[] = {
+ /* 3-axis accel */
+ BIT(INV_MPU6050_SCAN_ACCL_X)
+ | BIT(INV_MPU6050_SCAN_ACCL_Y)
+ | BIT(INV_MPU6050_SCAN_ACCL_Z),
+ /* 3-axis gyro */
+ BIT(INV_MPU6050_SCAN_GYRO_X)
+ | BIT(INV_MPU6050_SCAN_GYRO_Y)
+ | BIT(INV_MPU6050_SCAN_GYRO_Z),
+ /* 3-axis magn */
+ BIT(INV_MPU9X50_SCAN_MAGN_X)
+ | BIT(INV_MPU9X50_SCAN_MAGN_Y)
+ | BIT(INV_MPU9X50_SCAN_MAGN_Z),
+ /* 6-axis accel + gyro */
+ BIT(INV_MPU6050_SCAN_ACCL_X)
+ | BIT(INV_MPU6050_SCAN_ACCL_Y)
+ | BIT(INV_MPU6050_SCAN_ACCL_Z)
+ | BIT(INV_MPU6050_SCAN_GYRO_X)
+ | BIT(INV_MPU6050_SCAN_GYRO_Y)
+ | BIT(INV_MPU6050_SCAN_GYRO_Z),
+ /* 6-axis accel + magn */
+ BIT(INV_MPU6050_SCAN_ACCL_X)
+ | BIT(INV_MPU6050_SCAN_ACCL_Y)
+ | BIT(INV_MPU6050_SCAN_ACCL_Z)
+ | BIT(INV_MPU9X50_SCAN_MAGN_X)
+ | BIT(INV_MPU9X50_SCAN_MAGN_Y)
+ | BIT(INV_MPU9X50_SCAN_MAGN_Z),
+ /* 6-axis gyro + magn */
+ BIT(INV_MPU6050_SCAN_GYRO_X)
+ | BIT(INV_MPU6050_SCAN_GYRO_Y)
+ | BIT(INV_MPU6050_SCAN_GYRO_Z)
+ | BIT(INV_MPU9X50_SCAN_MAGN_X)
+ | BIT(INV_MPU9X50_SCAN_MAGN_Y)
+ | BIT(INV_MPU9X50_SCAN_MAGN_Z),
+ /* 9-axis accel + gyro + magn */
+ BIT(INV_MPU6050_SCAN_ACCL_X)
+ | BIT(INV_MPU6050_SCAN_ACCL_Y)
+ | BIT(INV_MPU6050_SCAN_ACCL_Z)
+ | BIT(INV_MPU6050_SCAN_GYRO_X)
+ | BIT(INV_MPU6050_SCAN_GYRO_Y)
+ | BIT(INV_MPU6050_SCAN_GYRO_Z)
+ | BIT(INV_MPU9X50_SCAN_MAGN_X)
+ | BIT(INV_MPU9X50_SCAN_MAGN_Y)
+ | BIT(INV_MPU9X50_SCAN_MAGN_Z),
+ 0,
+};
+
static const struct iio_chan_spec inv_icm20602_channels[] = {
IIO_CHAN_SOFT_TIMESTAMP(INV_ICM20602_SCAN_TIMESTAMP),
{
@@ -1034,14 +1149,14 @@ error_power_off:
return result;
}
-static int inv_mpu_core_enable_regulator(struct inv_mpu6050_state *st)
+static int inv_mpu_core_enable_regulator_vddio(struct inv_mpu6050_state *st)
{
int result;
result = regulator_enable(st->vddio_supply);
if (result) {
dev_err(regmap_get_device(st->map),
- "Failed to enable regulator: %d\n", result);
+ "Failed to enable vddio regulator: %d\n", result);
} else {
/* Give the device a little bit of time to start up. */
usleep_range(35000, 70000);
@@ -1050,21 +1165,29 @@ static int inv_mpu_core_enable_regulator(struct inv_mpu6050_state *st)
return result;
}
-static int inv_mpu_core_disable_regulator(struct inv_mpu6050_state *st)
+static int inv_mpu_core_disable_regulator_vddio(struct inv_mpu6050_state *st)
{
int result;
result = regulator_disable(st->vddio_supply);
if (result)
dev_err(regmap_get_device(st->map),
- "Failed to disable regulator: %d\n", result);
+ "Failed to disable vddio regulator: %d\n", result);
return result;
}
static void inv_mpu_core_disable_regulator_action(void *_data)
{
- inv_mpu_core_disable_regulator(_data);
+ struct inv_mpu6050_state *st = _data;
+ int result;
+
+ result = regulator_disable(st->vdd_supply);
+ if (result)
+ dev_err(regmap_get_device(st->map),
+ "Failed to disable vdd regulator: %d\n", result);
+
+ inv_mpu_core_disable_regulator_vddio(st);
}
int inv_mpu_core_probe(struct regmap *regmap, int irq, const char *name,
@@ -1133,6 +1256,15 @@ int inv_mpu_core_probe(struct regmap *regmap, int irq, const char *name,
return -EINVAL;
}
+ st->vdd_supply = devm_regulator_get(dev, "vdd");
+ if (IS_ERR(st->vdd_supply)) {
+ if (PTR_ERR(st->vdd_supply) != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get vdd regulator %d\n",
+ (int)PTR_ERR(st->vdd_supply));
+
+ return PTR_ERR(st->vdd_supply);
+ }
+
st->vddio_supply = devm_regulator_get(dev, "vddio");
if (IS_ERR(st->vddio_supply)) {
if (PTR_ERR(st->vddio_supply) != -EPROBE_DEFER)
@@ -1142,9 +1274,17 @@ int inv_mpu_core_probe(struct regmap *regmap, int irq, const char *name,
return PTR_ERR(st->vddio_supply);
}
- result = inv_mpu_core_enable_regulator(st);
- if (result)
+ result = regulator_enable(st->vdd_supply);
+ if (result) {
+ dev_err(dev, "Failed to enable vdd regulator: %d\n", result);
+ return result;
+ }
+
+ result = inv_mpu_core_enable_regulator_vddio(st);
+ if (result) {
+ regulator_disable(st->vdd_supply);
return result;
+ }
result = devm_add_action_or_reset(dev, inv_mpu_core_disable_regulator_action,
st);
@@ -1154,6 +1294,11 @@ int inv_mpu_core_probe(struct regmap *regmap, int irq, const char *name,
return result;
}
+ /* fill magnetometer orientation */
+ result = inv_mpu_magn_set_orient(st);
+ if (result)
+ return result;
+
/* power is turned on inside check chip type*/
result = inv_check_and_setup_chip(st);
if (result)
@@ -1165,9 +1310,6 @@ int inv_mpu_core_probe(struct regmap *regmap, int irq, const char *name,
return result;
}
- if (inv_mpu_bus_setup)
- inv_mpu_bus_setup(indio_dev);
-
dev_set_drvdata(dev, indio_dev);
indio_dev->dev.parent = dev;
/* name will be NULL when enumerated via ACPI */
@@ -1176,14 +1318,37 @@ int inv_mpu_core_probe(struct regmap *regmap, int irq, const char *name,
else
indio_dev->name = dev_name(dev);
- if (chip_type == INV_ICM20602) {
+ /* requires parent device set in indio_dev */
+ if (inv_mpu_bus_setup)
+ inv_mpu_bus_setup(indio_dev);
+
+ switch (chip_type) {
+ case INV_MPU9250:
+ case INV_MPU9255:
+ /*
+ * Use magnetometer inside the chip only if there is no i2c
+ * auxiliary device in use.
+ */
+ if (!st->magn_disabled) {
+ indio_dev->channels = inv_mpu9250_channels;
+ indio_dev->num_channels = ARRAY_SIZE(inv_mpu9250_channels);
+ indio_dev->available_scan_masks = inv_mpu9x50_scan_masks;
+ } else {
+ indio_dev->channels = inv_mpu_channels;
+ indio_dev->num_channels = ARRAY_SIZE(inv_mpu_channels);
+ indio_dev->available_scan_masks = inv_mpu_scan_masks;
+ }
+ break;
+ case INV_ICM20602:
indio_dev->channels = inv_icm20602_channels;
indio_dev->num_channels = ARRAY_SIZE(inv_icm20602_channels);
indio_dev->available_scan_masks = inv_icm20602_scan_masks;
- } else {
+ break;
+ default:
indio_dev->channels = inv_mpu_channels;
indio_dev->num_channels = ARRAY_SIZE(inv_mpu_channels);
indio_dev->available_scan_masks = inv_mpu_scan_masks;
+ break;
}
indio_dev->info = &mpu_info;
@@ -1221,7 +1386,7 @@ static int inv_mpu_resume(struct device *dev)
int result;
mutex_lock(&st->lock);
- result = inv_mpu_core_enable_regulator(st);
+ result = inv_mpu_core_enable_regulator_vddio(st);
if (result)
goto out_unlock;
@@ -1239,7 +1404,7 @@ static int inv_mpu_suspend(struct device *dev)
mutex_lock(&st->lock);
result = inv_mpu6050_set_power_itg(st, false);
- inv_mpu_core_disable_regulator(st);
+ inv_mpu_core_disable_regulator_vddio(st);
mutex_unlock(&st->lock);
return result;
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
index 4b8b5a87398c..389cc8505e0e 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
@@ -68,6 +68,56 @@ static const char *inv_mpu_match_acpi_device(struct device *dev,
return dev_name(dev);
}
+static bool inv_mpu_i2c_aux_bus(struct device *dev)
+{
+ struct inv_mpu6050_state *st = iio_priv(dev_get_drvdata(dev));
+
+ switch (st->chip_type) {
+ case INV_ICM20608:
+ case INV_ICM20602:
+ /* no i2c auxiliary bus on the chip */
+ return false;
+ case INV_MPU9250:
+ case INV_MPU9255:
+ if (st->magn_disabled)
+ return true;
+ else
+ return false;
+ default:
+ return true;
+ }
+}
+
+/*
+ * MPU9xxx magnetometer support requires to disable i2c auxiliary bus support.
+ * To ensure backward compatibility with existing setups, do not disable
+ * i2c auxiliary bus if it used.
+ * Check for i2c-gate node in devicetree and set magnetometer disabled.
+ * Only MPU6500 is supported by ACPI, no need to check.
+ */
+static int inv_mpu_magn_disable(struct iio_dev *indio_dev)
+{
+ struct inv_mpu6050_state *st = iio_priv(indio_dev);
+ struct device *dev = indio_dev->dev.parent;
+ struct device_node *mux_node;
+
+ switch (st->chip_type) {
+ case INV_MPU9250:
+ case INV_MPU9255:
+ mux_node = of_get_child_by_name(dev->of_node, "i2c-gate");
+ if (mux_node != NULL) {
+ st->magn_disabled = true;
+ dev_warn(dev, "disable internal use of magnetometer\n");
+ }
+ of_node_put(mux_node);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
/**
* inv_mpu_probe() - probe function.
* @client: i2c client.
@@ -112,17 +162,12 @@ static int inv_mpu_probe(struct i2c_client *client,
}
result = inv_mpu_core_probe(regmap, client->irq, name,
- NULL, chip_type);
+ inv_mpu_magn_disable, chip_type);
if (result < 0)
return result;
st = iio_priv(dev_get_drvdata(&client->dev));
- switch (st->chip_type) {
- case INV_ICM20608:
- case INV_ICM20602:
- /* no i2c auxiliary bus on the chip */
- break;
- default:
+ if (inv_mpu_i2c_aux_bus(&client->dev)) {
/* declare i2c auxiliary bus */
st->muxc = i2c_mux_alloc(client->adapter, &client->dev,
1, 0, I2C_MUX_LOCKED | I2C_MUX_GATE,
@@ -137,7 +182,6 @@ static int inv_mpu_probe(struct i2c_client *client,
result = inv_mpu_acpi_create_mux_client(client);
if (result)
goto out_del_mux;
- break;
}
return 0;
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
index 51235677c534..f1fb7b6bdab1 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
@@ -2,6 +2,10 @@
/*
* Copyright (C) 2012 Invensense, Inc.
*/
+
+#ifndef INV_MPU_IIO_H_
+#define INV_MPU_IIO_H_
+
#include <linux/i2c.h>
#include <linux/i2c-mux.h>
#include <linux/mutex.h>
@@ -82,6 +86,7 @@ enum inv_devices {
* @accl_fs: accel full scale range.
* @accl_fifo_enable: enable accel data output
* @gyro_fifo_enable: enable gyro data output
+ * @magn_fifo_enable: enable magn data output
* @divider: chip sample rate divider (sample rate divider - 1)
*/
struct inv_mpu6050_chip_config {
@@ -90,6 +95,7 @@ struct inv_mpu6050_chip_config {
unsigned int accl_fs:2;
unsigned int accl_fifo_enable:1;
unsigned int gyro_fifo_enable:1;
+ unsigned int magn_fifo_enable:1;
u8 divider;
u8 user_ctrl;
};
@@ -126,7 +132,11 @@ struct inv_mpu6050_hw {
* @chip_period: chip internal period estimation (~1kHz).
* @it_timestamp: timestamp from previous interrupt.
* @data_timestamp: timestamp for next data sample.
- * @vddio_supply voltage regulator for the chip.
+ * @vdd_supply: VDD voltage regulator for the chip.
+ * @vddio_supply I/O voltage regulator for the chip.
+ * @magn_disabled: magnetometer disabled for backward compatibility reason.
+ * @magn_raw_to_gauss: coefficient to convert mag raw value to Gauss.
+ * @magn_orient: magnetometer sensor chip orientation if available.
*/
struct inv_mpu6050_state {
struct mutex lock;
@@ -147,7 +157,11 @@ struct inv_mpu6050_state {
s64 chip_period;
s64 it_timestamp;
s64 data_timestamp;
+ struct regulator *vdd_supply;
struct regulator *vddio_supply;
+ bool magn_disabled;
+ s32 magn_raw_to_gauss[3];
+ struct iio_mount_matrix magn_orient;
};
/*register and associated bit definition*/
@@ -160,9 +174,41 @@ struct inv_mpu6050_state {
#define INV_MPU6050_REG_ACCEL_CONFIG 0x1C
#define INV_MPU6050_REG_FIFO_EN 0x23
+#define INV_MPU6050_BIT_SLAVE_0 0x01
+#define INV_MPU6050_BIT_SLAVE_1 0x02
+#define INV_MPU6050_BIT_SLAVE_2 0x04
#define INV_MPU6050_BIT_ACCEL_OUT 0x08
#define INV_MPU6050_BITS_GYRO_OUT 0x70
+#define INV_MPU6050_REG_I2C_MST_CTRL 0x24
+#define INV_MPU6050_BITS_I2C_MST_CLK_400KHZ 0x0D
+#define INV_MPU6050_BIT_I2C_MST_P_NSR 0x10
+#define INV_MPU6050_BIT_SLV3_FIFO_EN 0x20
+#define INV_MPU6050_BIT_WAIT_FOR_ES 0x40
+#define INV_MPU6050_BIT_MULT_MST_EN 0x80
+
+/* control I2C slaves from 0 to 3 */
+#define INV_MPU6050_REG_I2C_SLV_ADDR(_x) (0x25 + 3 * (_x))
+#define INV_MPU6050_BIT_I2C_SLV_RNW 0x80
+
+#define INV_MPU6050_REG_I2C_SLV_REG(_x) (0x26 + 3 * (_x))
+
+#define INV_MPU6050_REG_I2C_SLV_CTRL(_x) (0x27 + 3 * (_x))
+#define INV_MPU6050_BIT_SLV_GRP 0x10
+#define INV_MPU6050_BIT_SLV_REG_DIS 0x20
+#define INV_MPU6050_BIT_SLV_BYTE_SW 0x40
+#define INV_MPU6050_BIT_SLV_EN 0x80
+
+/* I2C master delay register */
+#define INV_MPU6050_REG_I2C_SLV4_CTRL 0x34
+#define INV_MPU6050_BITS_I2C_MST_DLY(_x) ((_x) & 0x1F)
+
+#define INV_MPU6050_REG_I2C_MST_STATUS 0x36
+#define INV_MPU6050_BIT_I2C_SLV0_NACK 0x01
+#define INV_MPU6050_BIT_I2C_SLV1_NACK 0x02
+#define INV_MPU6050_BIT_I2C_SLV2_NACK 0x04
+#define INV_MPU6050_BIT_I2C_SLV3_NACK 0x08
+
#define INV_MPU6050_REG_INT_ENABLE 0x38
#define INV_MPU6050_BIT_DATA_RDY_EN 0x01
#define INV_MPU6050_BIT_DMP_INT_EN 0x02
@@ -175,6 +221,18 @@ struct inv_mpu6050_state {
#define INV_MPU6050_BIT_FIFO_OVERFLOW_INT 0x10
#define INV_MPU6050_BIT_RAW_DATA_RDY_INT 0x01
+#define INV_MPU6050_REG_EXT_SENS_DATA 0x49
+
+/* I2C slaves data output from 0 to 3 */
+#define INV_MPU6050_REG_I2C_SLV_DO(_x) (0x63 + (_x))
+
+#define INV_MPU6050_REG_I2C_MST_DELAY_CTRL 0x67
+#define INV_MPU6050_BIT_I2C_SLV0_DLY_EN 0x01
+#define INV_MPU6050_BIT_I2C_SLV1_DLY_EN 0x02
+#define INV_MPU6050_BIT_I2C_SLV2_DLY_EN 0x04
+#define INV_MPU6050_BIT_I2C_SLV3_DLY_EN 0x08
+#define INV_MPU6050_BIT_DELAY_ES_SHADOW 0x80
+
#define INV_MPU6050_REG_USER_CTRL 0x6A
#define INV_MPU6050_BIT_FIFO_RST 0x04
#define INV_MPU6050_BIT_DMP_RST 0x08
@@ -202,6 +260,9 @@ struct inv_mpu6050_state {
#define INV_MPU6050_BYTES_PER_3AXIS_SENSOR 6
#define INV_MPU6050_FIFO_COUNT_BYTE 2
+/* MPU9X50 9-axis magnetometer */
+#define INV_MPU9X50_BYTES_MAGN 7
+
/* ICM20602 FIFO samples include temperature readings */
#define INV_ICM20602_BYTES_PER_TEMP_SENSOR 2
@@ -229,8 +290,8 @@ struct inv_mpu6050_state {
#define INV_ICM20602_TEMP_OFFSET 8170
#define INV_ICM20602_TEMP_SCALE 3060
-/* 6 + 6 round up and plus 8 */
-#define INV_MPU6050_OUTPUT_DATA_SIZE 24
+/* 6 + 6 + 7 (for MPU9x50) = 19 round up to 24 and plus 8 */
+#define INV_MPU6050_OUTPUT_DATA_SIZE 32
#define INV_MPU6050_REG_INT_PIN_CFG 0x37
#define INV_MPU6050_ACTIVE_HIGH 0x00
@@ -279,6 +340,11 @@ enum inv_mpu6050_scan {
INV_MPU6050_SCAN_GYRO_Y,
INV_MPU6050_SCAN_GYRO_Z,
INV_MPU6050_SCAN_TIMESTAMP,
+
+ INV_MPU9X50_SCAN_MAGN_X = INV_MPU6050_SCAN_GYRO_Z + 1,
+ INV_MPU9X50_SCAN_MAGN_Y,
+ INV_MPU9X50_SCAN_MAGN_Z,
+ INV_MPU9X50_SCAN_TIMESTAMP,
};
/* scan element definition for ICM20602, which includes temperature */
@@ -344,3 +410,5 @@ void inv_mpu_acpi_delete_mux_client(struct i2c_client *client);
int inv_mpu_core_probe(struct regmap *regmap, int irq, const char *name,
int (*inv_mpu_bus_setup)(struct iio_dev *), int chip_type);
extern const struct dev_pm_ops inv_mpu_pmops;
+
+#endif
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_magn.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_magn.c
new file mode 100644
index 000000000000..02735af152c8
--- /dev/null
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_magn.c
@@ -0,0 +1,356 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 TDK-InvenSense, Inc.
+ */
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/string.h>
+
+#include "inv_mpu_aux.h"
+#include "inv_mpu_iio.h"
+#include "inv_mpu_magn.h"
+
+/*
+ * MPU9250 magnetometer is an AKM AK8963 chip on I2C aux bus
+ */
+#define INV_MPU_MAGN_I2C_ADDR 0x0C
+
+#define INV_MPU_MAGN_REG_WIA 0x00
+#define INV_MPU_MAGN_BITS_WIA 0x48
+
+#define INV_MPU_MAGN_REG_ST1 0x02
+#define INV_MPU_MAGN_BIT_DRDY 0x01
+#define INV_MPU_MAGN_BIT_DOR 0x02
+
+#define INV_MPU_MAGN_REG_DATA 0x03
+
+#define INV_MPU_MAGN_REG_ST2 0x09
+#define INV_MPU_MAGN_BIT_HOFL 0x08
+#define INV_MPU_MAGN_BIT_BITM 0x10
+
+#define INV_MPU_MAGN_REG_CNTL1 0x0A
+#define INV_MPU_MAGN_BITS_MODE_PWDN 0x00
+#define INV_MPU_MAGN_BITS_MODE_SINGLE 0x01
+#define INV_MPU_MAGN_BITS_MODE_FUSE 0x0F
+#define INV_MPU_MAGN_BIT_OUTPUT_BIT 0x10
+
+#define INV_MPU_MAGN_REG_CNTL2 0x0B
+#define INV_MPU_MAGN_BIT_SRST 0x01
+
+#define INV_MPU_MAGN_REG_ASAX 0x10
+#define INV_MPU_MAGN_REG_ASAY 0x11
+#define INV_MPU_MAGN_REG_ASAZ 0x12
+
+/* Magnetometer maximum frequency */
+#define INV_MPU_MAGN_FREQ_HZ_MAX 50
+
+static bool inv_magn_supported(const struct inv_mpu6050_state *st)
+{
+ switch (st->chip_type) {
+ case INV_MPU9250:
+ case INV_MPU9255:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/* init magnetometer chip */
+static int inv_magn_init(struct inv_mpu6050_state *st)
+{
+ uint8_t val;
+ uint8_t asa[3];
+ int ret;
+
+ /* check whoami */
+ ret = inv_mpu_aux_read(st, INV_MPU_MAGN_I2C_ADDR, INV_MPU_MAGN_REG_WIA,
+ &val, sizeof(val));
+ if (ret)
+ return ret;
+ if (val != INV_MPU_MAGN_BITS_WIA)
+ return -ENODEV;
+
+ /* reset chip */
+ ret = inv_mpu_aux_write(st, INV_MPU_MAGN_I2C_ADDR,
+ INV_MPU_MAGN_REG_CNTL2,
+ INV_MPU_MAGN_BIT_SRST);
+ if (ret)
+ return ret;
+
+ /* read fuse ROM data */
+ ret = inv_mpu_aux_write(st, INV_MPU_MAGN_I2C_ADDR,
+ INV_MPU_MAGN_REG_CNTL1,
+ INV_MPU_MAGN_BITS_MODE_FUSE);
+ if (ret)
+ return ret;
+
+ ret = inv_mpu_aux_read(st, INV_MPU_MAGN_I2C_ADDR, INV_MPU_MAGN_REG_ASAX,
+ asa, sizeof(asa));
+ if (ret)
+ return ret;
+
+ /* switch back to power-down */
+ ret = inv_mpu_aux_write(st, INV_MPU_MAGN_I2C_ADDR,
+ INV_MPU_MAGN_REG_CNTL1,
+ INV_MPU_MAGN_BITS_MODE_PWDN);
+ if (ret)
+ return ret;
+
+ /*
+ * Sensitivity adjustement and scale to Gauss
+ *
+ * Hadj = H * (((ASA - 128) * 0.5 / 128) + 1)
+ * Factor simplification:
+ * Hadj = H * ((ASA + 128) / 256)
+ *
+ * Sensor sentivity
+ * 0.15 uT in 16 bits mode
+ * 1 uT = 0.01 G and value is in micron (1e6)
+ * sensitvity = 0.15 uT * 0.01 * 1e6
+ *
+ * raw_to_gauss = Hadj * 1500
+ */
+ st->magn_raw_to_gauss[0] = (((int32_t)asa[0] + 128) * 1500) / 256;
+ st->magn_raw_to_gauss[1] = (((int32_t)asa[1] + 128) * 1500) / 256;
+ st->magn_raw_to_gauss[2] = (((int32_t)asa[2] + 128) * 1500) / 256;
+
+ return 0;
+}
+
+/**
+ * inv_mpu_magn_probe() - probe and setup magnetometer chip
+ * @st: driver internal state
+ *
+ * Returns 0 on success, a negative error code otherwise
+ *
+ * It is probing the chip and setting up all needed i2c transfers.
+ * Noop if there is no magnetometer in the chip.
+ */
+int inv_mpu_magn_probe(struct inv_mpu6050_state *st)
+{
+ int ret;
+
+ /* quit if chip is not supported */
+ if (!inv_magn_supported(st))
+ return 0;
+
+ /* configure i2c master aux port */
+ ret = inv_mpu_aux_init(st);
+ if (ret)
+ return ret;
+
+ /* check and init mag chip */
+ ret = inv_magn_init(st);
+ if (ret)
+ return ret;
+
+ /*
+ * configure mpu i2c master accesses
+ * i2c SLV0: read sensor data, 7 bytes data(6)-ST2
+ * Byte swap data to store them in big-endian in impair address groups
+ */
+ ret = regmap_write(st->map, INV_MPU6050_REG_I2C_SLV_ADDR(0),
+ INV_MPU6050_BIT_I2C_SLV_RNW | INV_MPU_MAGN_I2C_ADDR);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(st->map, INV_MPU6050_REG_I2C_SLV_REG(0),
+ INV_MPU_MAGN_REG_DATA);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(st->map, INV_MPU6050_REG_I2C_SLV_CTRL(0),
+ INV_MPU6050_BIT_SLV_EN |
+ INV_MPU6050_BIT_SLV_BYTE_SW |
+ INV_MPU6050_BIT_SLV_GRP |
+ INV_MPU9X50_BYTES_MAGN);
+ if (ret)
+ return ret;
+
+ /* i2c SLV1: launch single measurement */
+ ret = regmap_write(st->map, INV_MPU6050_REG_I2C_SLV_ADDR(1),
+ INV_MPU_MAGN_I2C_ADDR);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(st->map, INV_MPU6050_REG_I2C_SLV_REG(1),
+ INV_MPU_MAGN_REG_CNTL1);
+ if (ret)
+ return ret;
+
+ /* add 16 bits mode */
+ ret = regmap_write(st->map, INV_MPU6050_REG_I2C_SLV_DO(1),
+ INV_MPU_MAGN_BITS_MODE_SINGLE |
+ INV_MPU_MAGN_BIT_OUTPUT_BIT);
+ if (ret)
+ return ret;
+
+ return regmap_write(st->map, INV_MPU6050_REG_I2C_SLV_CTRL(1),
+ INV_MPU6050_BIT_SLV_EN | 1);
+}
+
+/**
+ * inv_mpu_magn_set_rate() - set magnetometer sampling rate
+ * @st: driver internal state
+ * @fifo_rate: mpu set fifo rate
+ *
+ * Returns 0 on success, a negative error code otherwise
+ *
+ * Limit sampling frequency to the maximum value supported by the
+ * magnetometer chip. Resulting in duplicated data for higher frequencies.
+ * Noop if there is no magnetometer in the chip.
+ */
+int inv_mpu_magn_set_rate(const struct inv_mpu6050_state *st, int fifo_rate)
+{
+ uint8_t d;
+
+ /* quit if chip is not supported */
+ if (!inv_magn_supported(st))
+ return 0;
+
+ /*
+ * update i2c master delay to limit mag sampling to max frequency
+ * compute fifo_rate divider d: rate = fifo_rate / (d + 1)
+ */
+ if (fifo_rate > INV_MPU_MAGN_FREQ_HZ_MAX)
+ d = fifo_rate / INV_MPU_MAGN_FREQ_HZ_MAX - 1;
+ else
+ d = 0;
+
+ return regmap_write(st->map, INV_MPU6050_REG_I2C_SLV4_CTRL, d);
+}
+
+/**
+ * inv_mpu_magn_set_orient() - fill magnetometer mounting matrix
+ * @st: driver internal state
+ *
+ * Returns 0 on success, a negative error code otherwise
+ *
+ * Fill magnetometer mounting matrix using the provided chip matrix.
+ */
+int inv_mpu_magn_set_orient(struct inv_mpu6050_state *st)
+{
+ const char *orient;
+ char *str;
+ int i;
+
+ /* fill magnetometer orientation */
+ switch (st->chip_type) {
+ case INV_MPU9250:
+ case INV_MPU9255:
+ /* x <- y */
+ st->magn_orient.rotation[0] = st->orientation.rotation[3];
+ st->magn_orient.rotation[1] = st->orientation.rotation[4];
+ st->magn_orient.rotation[2] = st->orientation.rotation[5];
+ /* y <- x */
+ st->magn_orient.rotation[3] = st->orientation.rotation[0];
+ st->magn_orient.rotation[4] = st->orientation.rotation[1];
+ st->magn_orient.rotation[5] = st->orientation.rotation[2];
+ /* z <- -z */
+ for (i = 0; i < 3; ++i) {
+ orient = st->orientation.rotation[6 + i];
+ /* use length + 2 for adding minus sign if needed */
+ str = devm_kzalloc(regmap_get_device(st->map),
+ strlen(orient) + 2, GFP_KERNEL);
+ if (str == NULL)
+ return -ENOMEM;
+ if (strcmp(orient, "0") == 0) {
+ strcpy(str, orient);
+ } else if (orient[0] == '-') {
+ strcpy(str, &orient[1]);
+ } else {
+ str[0] = '-';
+ strcpy(&str[1], orient);
+ }
+ st->magn_orient.rotation[6 + i] = str;
+ }
+ break;
+ default:
+ st->magn_orient = st->orientation;
+ break;
+ }
+
+ return 0;
+}
+
+/**
+ * inv_mpu_magn_read() - read magnetometer data
+ * @st: driver internal state
+ * @axis: IIO modifier axis value
+ * @val: store corresponding axis value
+ *
+ * Returns 0 on success, a negative error code otherwise
+ */
+int inv_mpu_magn_read(const struct inv_mpu6050_state *st, int axis, int *val)
+{
+ unsigned int user_ctrl, status;
+ __be16 data[3];
+ uint8_t addr;
+ uint8_t d;
+ unsigned int period_ms;
+ int ret;
+
+ /* quit if chip is not supported */
+ if (!inv_magn_supported(st))
+ return -ENODEV;
+
+ /* Mag data: X - Y - Z */
+ switch (axis) {
+ case IIO_MOD_X:
+ addr = 0;
+ break;
+ case IIO_MOD_Y:
+ addr = 1;
+ break;
+ case IIO_MOD_Z:
+ addr = 2;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* set sample rate to max mag freq */
+ d = INV_MPU6050_FIFO_RATE_TO_DIVIDER(INV_MPU_MAGN_FREQ_HZ_MAX);
+ ret = regmap_write(st->map, st->reg->sample_rate_div, d);
+ if (ret)
+ return ret;
+
+ /* start i2c master, wait for xfer, stop */
+ user_ctrl = st->chip_config.user_ctrl | INV_MPU6050_BIT_I2C_MST_EN;
+ ret = regmap_write(st->map, st->reg->user_ctrl, user_ctrl);
+ if (ret)
+ return ret;
+
+ /* need to wait 2 periods + half-period margin */
+ period_ms = 1000 / INV_MPU_MAGN_FREQ_HZ_MAX;
+ msleep(period_ms * 2 + period_ms / 2);
+ user_ctrl = st->chip_config.user_ctrl;
+ ret = regmap_write(st->map, st->reg->user_ctrl, user_ctrl);
+ if (ret)
+ return ret;
+
+ /* restore sample rate */
+ d = st->chip_config.divider;
+ ret = regmap_write(st->map, st->reg->sample_rate_div, d);
+ if (ret)
+ return ret;
+
+ /* check i2c status and read raw data */
+ ret = regmap_read(st->map, INV_MPU6050_REG_I2C_MST_STATUS, &status);
+ if (ret)
+ return ret;
+
+ if (status & INV_MPU6050_BIT_I2C_SLV0_NACK ||
+ status & INV_MPU6050_BIT_I2C_SLV1_NACK)
+ return -EIO;
+
+ ret = regmap_bulk_read(st->map, INV_MPU6050_REG_EXT_SENS_DATA,
+ data, sizeof(data));
+ if (ret)
+ return ret;
+
+ *val = (int16_t)be16_to_cpu(data[addr]);
+
+ return IIO_VAL_INT;
+}
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_magn.h b/drivers/iio/imu/inv_mpu6050/inv_mpu_magn.h
new file mode 100644
index 000000000000..b41bd0578478
--- /dev/null
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_magn.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019 TDK-InvenSense, Inc.
+ */
+
+#ifndef INV_MPU_MAGN_H_
+#define INV_MPU_MAGN_H_
+
+#include <linux/kernel.h>
+
+#include "inv_mpu_iio.h"
+
+int inv_mpu_magn_probe(struct inv_mpu6050_state *st);
+
+/**
+ * inv_mpu_magn_get_scale() - get magnetometer scale value
+ * @st: driver internal state
+ *
+ * Returns IIO data format.
+ */
+static inline int inv_mpu_magn_get_scale(const struct inv_mpu6050_state *st,
+ const struct iio_chan_spec *chan,
+ int *val, int *val2)
+{
+ *val = 0;
+ *val2 = st->magn_raw_to_gauss[chan->address];
+ return IIO_VAL_INT_PLUS_MICRO;
+}
+
+int inv_mpu_magn_set_rate(const struct inv_mpu6050_state *st, int fifo_rate);
+
+int inv_mpu_magn_set_orient(struct inv_mpu6050_state *st);
+
+int inv_mpu_magn_read(const struct inv_mpu6050_state *st, int axis, int *val);
+
+#endif /* INV_MPU_MAGN_H_ */
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
index 72d8c5790076..10d16ec5104b 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
@@ -124,7 +124,8 @@ int inv_reset_fifo(struct iio_dev *indio_dev)
/* enable interrupt */
if (st->chip_config.accl_fifo_enable ||
- st->chip_config.gyro_fifo_enable) {
+ st->chip_config.gyro_fifo_enable ||
+ st->chip_config.magn_fifo_enable) {
result = regmap_write(st->map, st->reg->int_enable,
INV_MPU6050_BIT_DATA_RDY_EN);
if (result)
@@ -141,6 +142,8 @@ int inv_reset_fifo(struct iio_dev *indio_dev)
d |= INV_MPU6050_BITS_GYRO_OUT;
if (st->chip_config.accl_fifo_enable)
d |= INV_MPU6050_BIT_ACCEL_OUT;
+ if (st->chip_config.magn_fifo_enable)
+ d |= INV_MPU6050_BIT_SLAVE_0;
result = regmap_write(st->map, st->reg->fifo_en, d);
if (result)
goto reset_fifo_fail;
@@ -187,7 +190,8 @@ irqreturn_t inv_mpu6050_read_fifo(int irq, void *p)
}
if (!(st->chip_config.accl_fifo_enable |
- st->chip_config.gyro_fifo_enable))
+ st->chip_config.gyro_fifo_enable |
+ st->chip_config.magn_fifo_enable))
goto end_session;
bytes_per_datum = 0;
if (st->chip_config.accl_fifo_enable)
@@ -199,6 +203,9 @@ irqreturn_t inv_mpu6050_read_fifo(int irq, void *p)
if (st->chip_type == INV_ICM20602)
bytes_per_datum += INV_ICM20602_BYTES_PER_TEMP_SENSOR;
+ if (st->chip_config.magn_fifo_enable)
+ bytes_per_datum += INV_MPU9X50_BYTES_MAGN;
+
/*
* read fifo_count register to know how many bytes are inside the FIFO
* right now
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
index dd55e70b6f77..d7d951927a44 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
@@ -5,7 +5,7 @@
#include "inv_mpu_iio.h"
-static void inv_scan_query(struct iio_dev *indio_dev)
+static void inv_scan_query_mpu6050(struct iio_dev *indio_dev)
{
struct inv_mpu6050_state *st = iio_priv(indio_dev);
@@ -26,6 +26,60 @@ static void inv_scan_query(struct iio_dev *indio_dev)
indio_dev->active_scan_mask);
}
+static void inv_scan_query_mpu9x50(struct iio_dev *indio_dev)
+{
+ struct inv_mpu6050_state *st = iio_priv(indio_dev);
+
+ inv_scan_query_mpu6050(indio_dev);
+
+ /* no magnetometer if i2c auxiliary bus is used */
+ if (st->magn_disabled)
+ return;
+
+ st->chip_config.magn_fifo_enable =
+ test_bit(INV_MPU9X50_SCAN_MAGN_X,
+ indio_dev->active_scan_mask) ||
+ test_bit(INV_MPU9X50_SCAN_MAGN_Y,
+ indio_dev->active_scan_mask) ||
+ test_bit(INV_MPU9X50_SCAN_MAGN_Z,
+ indio_dev->active_scan_mask);
+}
+
+static void inv_scan_query(struct iio_dev *indio_dev)
+{
+ struct inv_mpu6050_state *st = iio_priv(indio_dev);
+
+ switch (st->chip_type) {
+ case INV_MPU9250:
+ case INV_MPU9255:
+ return inv_scan_query_mpu9x50(indio_dev);
+ default:
+ return inv_scan_query_mpu6050(indio_dev);
+ }
+}
+
+static unsigned int inv_compute_skip_samples(const struct inv_mpu6050_state *st)
+{
+ unsigned int gyro_skip = 0;
+ unsigned int magn_skip = 0;
+ unsigned int skip_samples;
+
+ /* gyro first sample is out of specs, skip it */
+ if (st->chip_config.gyro_fifo_enable)
+ gyro_skip = 1;
+
+ /* mag first sample is always not ready, skip it */
+ if (st->chip_config.magn_fifo_enable)
+ magn_skip = 1;
+
+ /* compute first samples to skip */
+ skip_samples = gyro_skip;
+ if (magn_skip > skip_samples)
+ skip_samples = magn_skip;
+
+ return skip_samples;
+}
+
/**
* inv_mpu6050_set_enable() - enable chip functions.
* @indio_dev: Device driver instance.
@@ -34,6 +88,7 @@ static void inv_scan_query(struct iio_dev *indio_dev)
static int inv_mpu6050_set_enable(struct iio_dev *indio_dev, bool enable)
{
struct inv_mpu6050_state *st = iio_priv(indio_dev);
+ uint8_t d;
int result;
if (enable) {
@@ -41,14 +96,11 @@ static int inv_mpu6050_set_enable(struct iio_dev *indio_dev, bool enable)
if (result)
return result;
inv_scan_query(indio_dev);
- st->skip_samples = 0;
if (st->chip_config.gyro_fifo_enable) {
result = inv_mpu6050_switch_engine(st, true,
INV_MPU6050_BIT_PWR_GYRO_STBY);
if (result)
goto error_power_off;
- /* gyro first sample is out of specs, skip it */
- st->skip_samples = 1;
}
if (st->chip_config.accl_fifo_enable) {
result = inv_mpu6050_switch_engine(st, true,
@@ -56,22 +108,32 @@ static int inv_mpu6050_set_enable(struct iio_dev *indio_dev, bool enable)
if (result)
goto error_gyro_off;
}
+ if (st->chip_config.magn_fifo_enable) {
+ d = st->chip_config.user_ctrl |
+ INV_MPU6050_BIT_I2C_MST_EN;
+ result = regmap_write(st->map, st->reg->user_ctrl, d);
+ if (result)
+ goto error_accl_off;
+ st->chip_config.user_ctrl = d;
+ }
+ st->skip_samples = inv_compute_skip_samples(st);
result = inv_reset_fifo(indio_dev);
if (result)
- goto error_accl_off;
+ goto error_magn_off;
} else {
result = regmap_write(st->map, st->reg->fifo_en, 0);
if (result)
- goto error_accl_off;
+ goto error_magn_off;
result = regmap_write(st->map, st->reg->int_enable, 0);
if (result)
- goto error_accl_off;
+ goto error_magn_off;
- result = regmap_write(st->map, st->reg->user_ctrl,
- st->chip_config.user_ctrl);
+ d = st->chip_config.user_ctrl & ~INV_MPU6050_BIT_I2C_MST_EN;
+ result = regmap_write(st->map, st->reg->user_ctrl, d);
if (result)
- goto error_accl_off;
+ goto error_magn_off;
+ st->chip_config.user_ctrl = d;
result = inv_mpu6050_switch_engine(st, false,
INV_MPU6050_BIT_PWR_ACCL_STBY);
@@ -90,6 +152,10 @@ static int inv_mpu6050_set_enable(struct iio_dev *indio_dev, bool enable)
return 0;
+error_magn_off:
+ /* always restore user_ctrl to disable fifo properly */
+ st->chip_config.user_ctrl &= ~INV_MPU6050_BIT_I2C_MST_EN;
+ regmap_write(st->map, st->reg->user_ctrl, st->chip_config.user_ctrl);
error_accl_off:
if (st->chip_config.accl_fifo_enable)
inv_mpu6050_switch_engine(st, false,
diff --git a/drivers/iio/imu/st_lsm6dsx/Kconfig b/drivers/iio/imu/st_lsm6dsx/Kconfig
index 77aa0e77212d..28f59d09208a 100644
--- a/drivers/iio/imu/st_lsm6dsx/Kconfig
+++ b/drivers/iio/imu/st_lsm6dsx/Kconfig
@@ -12,7 +12,8 @@ config IIO_ST_LSM6DSX
Say yes here to build support for STMicroelectronics LSM6DSx imu
sensor. Supported devices: lsm6ds3, lsm6ds3h, lsm6dsl, lsm6dsm,
ism330dlc, lsm6dso, lsm6dsox, asm330lhh, lsm6dsr, lsm6ds3tr-c,
- ism330dhcx and the accelerometer/gyroscope of lsm9ds1.
+ ism330dhcx, lsm6dsrx, lsm6ds0 and the accelerometer/gyroscope
+ of lsm9ds1.
To compile this driver as a module, choose M here: the module
will be called st_lsm6dsx.
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h
index 0fe6999b8257..c605b153be41 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h
@@ -12,6 +12,7 @@
#define ST_LSM6DSX_H
#include <linux/device.h>
+#include <linux/iio/iio.h>
#define ST_LSM6DS3_DEV_NAME "lsm6ds3"
#define ST_LSM6DS3H_DEV_NAME "lsm6ds3h"
@@ -25,6 +26,8 @@
#define ST_LSM6DS3TRC_DEV_NAME "lsm6ds3tr-c"
#define ST_ISM330DHCX_DEV_NAME "ism330dhcx"
#define ST_LSM9DS1_DEV_NAME "lsm9ds1-imu"
+#define ST_LSM6DS0_DEV_NAME "lsm6ds0"
+#define ST_LSM6DSRX_DEV_NAME "lsm6dsrx"
enum st_lsm6dsx_hw_id {
ST_LSM6DS3_ID,
@@ -39,6 +42,8 @@ enum st_lsm6dsx_hw_id {
ST_LSM6DS3TRC_ID,
ST_ISM330DHCX_ID,
ST_LSM9DS1_ID,
+ ST_LSM6DS0_ID,
+ ST_LSM6DSRX_ID,
ST_LSM6DSX_MAX_ID,
};
@@ -54,6 +59,26 @@ enum st_lsm6dsx_hw_id {
* ST_LSM6DSX_TAGGED_SAMPLE_SIZE)
#define ST_LSM6DSX_SHIFT_VAL(val, mask) (((val) << __ffs(mask)) & (mask))
+#define ST_LSM6DSX_CHANNEL_ACC(chan_type, addr, mod, scan_idx) \
+{ \
+ .type = chan_type, \
+ .address = addr, \
+ .modified = 1, \
+ .channel2 = mod, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .scan_index = scan_idx, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 16, \
+ .storagebits = 16, \
+ .endianness = IIO_LE, \
+ }, \
+ .event_spec = &st_lsm6dsx_event, \
+ .num_event_specs = 1, \
+}
+
#define ST_LSM6DSX_CHANNEL(chan_type, addr, mod, scan_idx) \
{ \
.type = chan_type, \
@@ -81,14 +106,16 @@ struct st_lsm6dsx_sensor;
struct st_lsm6dsx_hw;
struct st_lsm6dsx_odr {
- u16 hz;
+ u32 milli_hz;
u8 val;
};
#define ST_LSM6DSX_ODR_LIST_SIZE 6
struct st_lsm6dsx_odr_table_entry {
struct st_lsm6dsx_reg reg;
+
struct st_lsm6dsx_odr odr_avl[ST_LSM6DSX_ODR_LIST_SIZE];
+ int odr_len;
};
struct st_lsm6dsx_fs {
@@ -132,12 +159,14 @@ struct st_lsm6dsx_fifo_ops {
* @hr_timer: Hw timer resolution register info (addr + mask).
* @fifo_en: Hw timer FIFO enable register info (addr + mask).
* @decimator: Hw timer FIFO decimator register info (addr + mask).
+ * @freq_fine: Difference in % of ODR with respect to the typical.
*/
struct st_lsm6dsx_hw_ts_settings {
struct st_lsm6dsx_reg timer_en;
struct st_lsm6dsx_reg hr_timer;
struct st_lsm6dsx_reg fifo_en;
struct st_lsm6dsx_reg decimator;
+ u8 freq_fine;
};
/**
@@ -164,6 +193,16 @@ struct st_lsm6dsx_shub_settings {
u8 batch_en;
};
+struct st_lsm6dsx_event_settings {
+ struct st_lsm6dsx_reg enable_reg;
+ struct st_lsm6dsx_reg wakeup_reg;
+ u8 wakeup_src_reg;
+ u8 wakeup_src_status_mask;
+ u8 wakeup_src_z_mask;
+ u8 wakeup_src_y_mask;
+ u8 wakeup_src_x_mask;
+};
+
enum st_lsm6dsx_ext_sensor_id {
ST_LSM6DSX_ID_MAGN,
};
@@ -207,12 +246,14 @@ struct st_lsm6dsx_ext_dev_settings {
/**
* struct st_lsm6dsx_settings - ST IMU sensor settings
* @wai: Sensor WhoAmI default value.
- * @int1_addr: Control Register address for INT1
- * @int2_addr: Control Register address for INT2
- * @reset_addr: register address for reset/reboot
+ * @reset: register address for reset.
+ * @boot: register address for boot.
+ * @bdu: register address for Block Data Update.
* @max_fifo_size: Sensor max fifo length in FIFO words.
* @id: List of hw id/device name supported by the driver configuration.
* @channels: IIO channels supported by the device.
+ * @irq_config: interrupts related registers.
+ * @drdy_mask: register info for data-ready mask (addr + mask).
* @odr_table: Hw sensors odr table (Hz + val).
* @fs_table: Hw sensors gain table (gain + val).
* @decimator: List of decimator register info (addr + mask).
@@ -223,9 +264,9 @@ struct st_lsm6dsx_ext_dev_settings {
*/
struct st_lsm6dsx_settings {
u8 wai;
- u8 int1_addr;
- u8 int2_addr;
- u8 reset_addr;
+ struct st_lsm6dsx_reg reset;
+ struct st_lsm6dsx_reg boot;
+ struct st_lsm6dsx_reg bdu;
u16 max_fifo_size;
struct {
enum st_lsm6dsx_hw_id hw_id;
@@ -235,6 +276,17 @@ struct st_lsm6dsx_settings {
const struct iio_chan_spec *chan;
int len;
} channels[2];
+ struct {
+ struct st_lsm6dsx_reg irq1;
+ struct st_lsm6dsx_reg irq2;
+ struct st_lsm6dsx_reg irq1_func;
+ struct st_lsm6dsx_reg irq2_func;
+ struct st_lsm6dsx_reg lir;
+ struct st_lsm6dsx_reg clear_on_read;
+ struct st_lsm6dsx_reg hla;
+ struct st_lsm6dsx_reg od;
+ } irq_config;
+ struct st_lsm6dsx_reg drdy_mask;
struct st_lsm6dsx_odr_table_entry odr_table[2];
struct st_lsm6dsx_fs_table_entry fs_table[2];
struct st_lsm6dsx_reg decimator[ST_LSM6DSX_MAX_ID];
@@ -242,6 +294,7 @@ struct st_lsm6dsx_settings {
struct st_lsm6dsx_fifo_ops fifo_ops;
struct st_lsm6dsx_hw_ts_settings ts_settings;
struct st_lsm6dsx_shub_settings shub_settings;
+ struct st_lsm6dsx_event_settings event_settings;
};
enum st_lsm6dsx_sensor_id {
@@ -277,7 +330,7 @@ struct st_lsm6dsx_sensor {
struct st_lsm6dsx_hw *hw;
u32 gain;
- u16 odr;
+ u32 odr;
u16 watermark;
u8 sip;
@@ -301,9 +354,13 @@ struct st_lsm6dsx_sensor {
* @fifo_mode: FIFO operating mode supported by the device.
* @suspend_mask: Suspended sensor bitmask.
* @enable_mask: Enabled sensor bitmask.
+ * @ts_gain: Hw timestamp rate after internal calibration.
* @ts_sip: Total number of timestamp samples in a given pattern.
* @sip: Total number of samples (acc/gyro/ts) in a given pattern.
* @buff: Device read buffer.
+ * @irq_routing: pointer to interrupt routing configuration.
+ * @event_threshold: wakeup event threshold.
+ * @enable_event: enabled event bitmask.
* @iio_devs: Pointers to acc/gyro iio_dev instances.
* @settings: Pointer to the specific sensor settings in use.
*/
@@ -319,9 +376,14 @@ struct st_lsm6dsx_hw {
enum st_lsm6dsx_fifo_mode fifo_mode;
u8 suspend_mask;
u8 enable_mask;
+ s64 ts_gain;
u8 ts_sip;
u8 sip;
+ const struct st_lsm6dsx_reg *irq_routing;
+ u8 event_threshold;
+ u8 enable_event;
+
u8 *buff;
struct iio_dev *iio_devs[ST_LSM6DSX_ID_MAX];
@@ -329,6 +391,13 @@ struct st_lsm6dsx_hw {
const struct st_lsm6dsx_settings *settings;
};
+static const struct iio_event_spec st_lsm6dsx_event = {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_EITHER,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_ENABLE)
+};
+
static const unsigned long st_lsm6dsx_available_scan_masks[] = {0x7, 0x0};
extern const struct dev_pm_ops st_lsm6dsx_pm_ops;
@@ -346,7 +415,7 @@ int st_lsm6dsx_set_fifo_mode(struct st_lsm6dsx_hw *hw,
enum st_lsm6dsx_fifo_mode fifo_mode);
int st_lsm6dsx_read_fifo(struct st_lsm6dsx_hw *hw);
int st_lsm6dsx_read_tagged_fifo(struct st_lsm6dsx_hw *hw);
-int st_lsm6dsx_check_odr(struct st_lsm6dsx_sensor *sensor, u16 odr, u8 *val);
+int st_lsm6dsx_check_odr(struct st_lsm6dsx_sensor *sensor, u32 odr, u8 *val);
int st_lsm6dsx_shub_probe(struct st_lsm6dsx_hw *hw, const char *name);
int st_lsm6dsx_shub_set_enable(struct st_lsm6dsx_sensor *sensor, bool enable);
int st_lsm6dsx_set_page(struct st_lsm6dsx_hw *hw, bool enable);
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
index b0f3da1976e4..d416990ae309 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
@@ -14,10 +14,10 @@
* (e.g. Gx, Gy, Gz, Ax, Ay, Az), then data are repeated depending on the
* value of the decimation factor and ODR set for each FIFO data set.
*
- * LSM6DSO/LSM6DSOX/ASM330LHH/LSM6DSR/ISM330DHCX: The FIFO buffer can be
- * configured to store data from gyroscope and accelerometer. Each sample
- * is queued with a tag (1B) indicating data source (gyroscope, accelerometer,
- * hw timer).
+ * LSM6DSO/LSM6DSOX/ASM330LHH/LSM6DSR/LSM6DSRX/ISM330DHCX:
+ * The FIFO buffer can be configured to store data from gyroscope and
+ * accelerometer. Each sample is queued with a tag (1B) indicating data
+ * source (gyroscope, accelerometer, hw timer).
*
* FIFO supported modes:
* - BYPASS: FIFO disabled
@@ -30,8 +30,6 @@
* Denis Ciocca <denis.ciocca@st.com>
*/
#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
#include <linux/iio/kfifo_buf.h>
#include <linux/iio/iio.h>
#include <linux/iio/buffer.h>
@@ -42,10 +40,6 @@
#include "st_lsm6dsx.h"
-#define ST_LSM6DSX_REG_HLACTIVE_ADDR 0x12
-#define ST_LSM6DSX_REG_HLACTIVE_MASK BIT(5)
-#define ST_LSM6DSX_REG_PP_OD_ADDR 0x12
-#define ST_LSM6DSX_REG_PP_OD_MASK BIT(4)
#define ST_LSM6DSX_REG_FIFO_MODE_ADDR 0x0a
#define ST_LSM6DSX_FIFO_MODE_MASK GENMASK(2, 0)
#define ST_LSM6DSX_FIFO_ODR_MASK GENMASK(6, 3)
@@ -56,7 +50,6 @@
#define ST_LSM6DSX_MAX_FIFO_ODR_VAL 0x08
-#define ST_LSM6DSX_TS_SENSITIVITY 25000UL /* 25us */
#define ST_LSM6DSX_TS_RESET_VAL 0xaa
struct st_lsm6dsx_decimator_entry {
@@ -98,7 +91,7 @@ static int st_lsm6dsx_get_decimator_val(u8 val)
}
static void st_lsm6dsx_get_max_min_odr(struct st_lsm6dsx_hw *hw,
- u16 *max_odr, u16 *min_odr)
+ u32 *max_odr, u32 *min_odr)
{
struct st_lsm6dsx_sensor *sensor;
int i;
@@ -113,16 +106,17 @@ static void st_lsm6dsx_get_max_min_odr(struct st_lsm6dsx_hw *hw,
if (!(hw->enable_mask & BIT(sensor->id)))
continue;
- *max_odr = max_t(u16, *max_odr, sensor->odr);
- *min_odr = min_t(u16, *min_odr, sensor->odr);
+ *max_odr = max_t(u32, *max_odr, sensor->odr);
+ *min_odr = min_t(u32, *min_odr, sensor->odr);
}
}
static int st_lsm6dsx_update_decimators(struct st_lsm6dsx_hw *hw)
{
- u16 max_odr, min_odr, sip = 0, ts_sip = 0;
const struct st_lsm6dsx_reg *ts_dec_reg;
struct st_lsm6dsx_sensor *sensor;
+ u16 sip = 0, ts_sip = 0;
+ u32 max_odr, min_odr;
int err = 0, i;
u8 data;
@@ -429,7 +423,7 @@ int st_lsm6dsx_read_fifo(struct st_lsm6dsx_hw *hw)
*/
if (!reset_ts && ts >= 0xff0000)
reset_ts = true;
- ts *= ST_LSM6DSX_TS_SENSITIVITY;
+ ts *= hw->ts_gain;
offset += ST_LSM6DSX_SAMPLE_SIZE;
}
@@ -456,13 +450,19 @@ int st_lsm6dsx_read_fifo(struct st_lsm6dsx_hw *hw)
return read_len;
}
+#define ST_LSM6DSX_INVALID_SAMPLE 0x7ffd
static int
st_lsm6dsx_push_tagged_data(struct st_lsm6dsx_hw *hw, u8 tag,
u8 *data, s64 ts)
{
+ s16 val = le16_to_cpu(*(__le16 *)data);
struct st_lsm6dsx_sensor *sensor;
struct iio_dev *iio_dev;
+ /* invalid sample during bootstrap phase */
+ if (val >= ST_LSM6DSX_INVALID_SAMPLE)
+ return -EINVAL;
+
/*
* EXT_TAG are managed in FIFO fashion so ST_LSM6DSX_EXT0_TAG
* corresponds to the first enabled channel, ST_LSM6DSX_EXT1_TAG
@@ -572,7 +572,7 @@ int st_lsm6dsx_read_tagged_fifo(struct st_lsm6dsx_hw *hw)
*/
if (!reset_ts && ts >= 0xffff0000)
reset_ts = true;
- ts *= ST_LSM6DSX_TS_SENSITIVITY;
+ ts *= hw->ts_gain;
} else {
st_lsm6dsx_push_tagged_data(hw, tag, iio_buff,
ts);
@@ -592,6 +592,9 @@ int st_lsm6dsx_flush_fifo(struct st_lsm6dsx_hw *hw)
{
int err;
+ if (!hw->settings->fifo_ops.read_fifo)
+ return -ENOTSUPP;
+
mutex_lock(&hw->fifo_lock);
hw->settings->fifo_ops.read_fifo(hw);
@@ -654,25 +657,6 @@ out:
return err;
}
-static irqreturn_t st_lsm6dsx_handler_irq(int irq, void *private)
-{
- struct st_lsm6dsx_hw *hw = private;
-
- return hw->sip > 0 ? IRQ_WAKE_THREAD : IRQ_NONE;
-}
-
-static irqreturn_t st_lsm6dsx_handler_thread(int irq, void *private)
-{
- struct st_lsm6dsx_hw *hw = private;
- int count;
-
- mutex_lock(&hw->fifo_lock);
- count = hw->settings->fifo_ops.read_fifo(hw);
- mutex_unlock(&hw->fifo_lock);
-
- return count ? IRQ_HANDLED : IRQ_NONE;
-}
-
static int st_lsm6dsx_buffer_preenable(struct iio_dev *iio_dev)
{
struct st_lsm6dsx_sensor *sensor = iio_priv(iio_dev);
@@ -702,59 +686,8 @@ static const struct iio_buffer_setup_ops st_lsm6dsx_buffer_ops = {
int st_lsm6dsx_fifo_setup(struct st_lsm6dsx_hw *hw)
{
- struct device_node *np = hw->dev->of_node;
- struct st_sensors_platform_data *pdata;
struct iio_buffer *buffer;
- unsigned long irq_type;
- bool irq_active_low;
- int i, err;
-
- irq_type = irqd_get_trigger_type(irq_get_irq_data(hw->irq));
-
- switch (irq_type) {
- case IRQF_TRIGGER_HIGH:
- case IRQF_TRIGGER_RISING:
- irq_active_low = false;
- break;
- case IRQF_TRIGGER_LOW:
- case IRQF_TRIGGER_FALLING:
- irq_active_low = true;
- break;
- default:
- dev_info(hw->dev, "mode %lx unsupported\n", irq_type);
- return -EINVAL;
- }
-
- err = regmap_update_bits(hw->regmap, ST_LSM6DSX_REG_HLACTIVE_ADDR,
- ST_LSM6DSX_REG_HLACTIVE_MASK,
- FIELD_PREP(ST_LSM6DSX_REG_HLACTIVE_MASK,
- irq_active_low));
- if (err < 0)
- return err;
-
- pdata = (struct st_sensors_platform_data *)hw->dev->platform_data;
- if ((np && of_property_read_bool(np, "drive-open-drain")) ||
- (pdata && pdata->open_drain)) {
- err = regmap_update_bits(hw->regmap, ST_LSM6DSX_REG_PP_OD_ADDR,
- ST_LSM6DSX_REG_PP_OD_MASK,
- FIELD_PREP(ST_LSM6DSX_REG_PP_OD_MASK,
- 1));
- if (err < 0)
- return err;
-
- irq_type |= IRQF_SHARED;
- }
-
- err = devm_request_threaded_irq(hw->dev, hw->irq,
- st_lsm6dsx_handler_irq,
- st_lsm6dsx_handler_thread,
- irq_type | IRQF_ONESHOT,
- "lsm6dsx", hw);
- if (err) {
- dev_err(hw->dev, "failed to request trigger irq %d\n",
- hw->irq);
- return err;
- }
+ int i;
for (i = 0; i < ST_LSM6DSX_ID_MAX; i++) {
if (!hw->iio_devs[i])
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
index fd5ebe1e1594..11b2c7bc8041 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
@@ -32,7 +32,7 @@
* - Gyroscope supported full-scale [dps]: +-125/+-245/+-500/+-1000/+-2000
* - FIFO size: 3KB
*
- * - LSM9DS1:
+ * - LSM9DS1/LSM6DS0:
* - Accelerometer supported ODR [Hz]: 10, 50, 119, 238, 476, 952
* - Accelerometer supported full-scale [g]: +-2/+-4/+-8/+-16
* - Gyroscope supported ODR [Hz]: 15, 60, 119, 238, 476, 952
@@ -48,8 +48,11 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/delay.h>
+#include <linux/iio/events.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
#include <linux/pm.h>
#include <linux/regmap.h>
#include <linux/bitfield.h>
@@ -58,17 +61,14 @@
#include "st_lsm6dsx.h"
-#define ST_LSM6DSX_REG_FIFO_FTH_IRQ_MASK BIT(3)
#define ST_LSM6DSX_REG_WHOAMI_ADDR 0x0f
-#define ST_LSM6DSX_REG_RESET_MASK BIT(0)
-#define ST_LSM6DSX_REG_BOOT_MASK BIT(7)
-#define ST_LSM6DSX_REG_BDU_ADDR 0x12
-#define ST_LSM6DSX_REG_BDU_MASK BIT(6)
+
+#define ST_LSM6DSX_TS_SENSITIVITY 25000UL /* 25us */
static const struct iio_chan_spec st_lsm6dsx_acc_channels[] = {
- ST_LSM6DSX_CHANNEL(IIO_ACCEL, 0x28, IIO_MOD_X, 0),
- ST_LSM6DSX_CHANNEL(IIO_ACCEL, 0x2a, IIO_MOD_Y, 1),
- ST_LSM6DSX_CHANNEL(IIO_ACCEL, 0x2c, IIO_MOD_Z, 2),
+ ST_LSM6DSX_CHANNEL_ACC(IIO_ACCEL, 0x28, IIO_MOD_X, 0),
+ ST_LSM6DSX_CHANNEL_ACC(IIO_ACCEL, 0x2a, IIO_MOD_Y, 1),
+ ST_LSM6DSX_CHANNEL_ACC(IIO_ACCEL, 0x2c, IIO_MOD_Z, 2),
IIO_CHAN_SOFT_TIMESTAMP(3),
};
@@ -89,14 +89,26 @@ static const struct iio_chan_spec st_lsm6ds0_gyro_channels[] = {
static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
{
.wai = 0x68,
- .int1_addr = 0x0c,
- .int2_addr = 0x0d,
- .reset_addr = 0x22,
+ .reset = {
+ .addr = 0x22,
+ .mask = BIT(0),
+ },
+ .boot = {
+ .addr = 0x22,
+ .mask = BIT(7),
+ },
+ .bdu = {
+ .addr = 0x22,
+ .mask = BIT(6),
+ },
.max_fifo_size = 32,
.id = {
{
.hw_id = ST_LSM9DS1_ID,
.name = ST_LSM9DS1_DEV_NAME,
+ }, {
+ .hw_id = ST_LSM6DS0_ID,
+ .name = ST_LSM6DS0_DEV_NAME,
},
},
.channels = {
@@ -115,24 +127,26 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.addr = 0x20,
.mask = GENMASK(7, 5),
},
- .odr_avl[0] = { 10, 0x01 },
- .odr_avl[1] = { 50, 0x02 },
- .odr_avl[2] = { 119, 0x03 },
- .odr_avl[3] = { 238, 0x04 },
- .odr_avl[4] = { 476, 0x05 },
- .odr_avl[5] = { 952, 0x06 },
+ .odr_avl[0] = { 10000, 0x01 },
+ .odr_avl[1] = { 50000, 0x02 },
+ .odr_avl[2] = { 119000, 0x03 },
+ .odr_avl[3] = { 238000, 0x04 },
+ .odr_avl[4] = { 476000, 0x05 },
+ .odr_avl[5] = { 952000, 0x06 },
+ .odr_len = 6,
},
[ST_LSM6DSX_ID_GYRO] = {
.reg = {
.addr = 0x10,
.mask = GENMASK(7, 5),
},
- .odr_avl[0] = { 15, 0x01 },
- .odr_avl[1] = { 60, 0x02 },
- .odr_avl[2] = { 119, 0x03 },
- .odr_avl[3] = { 238, 0x04 },
- .odr_avl[4] = { 476, 0x05 },
- .odr_avl[5] = { 952, 0x06 },
+ .odr_avl[0] = { 14900, 0x01 },
+ .odr_avl[1] = { 59500, 0x02 },
+ .odr_avl[2] = { 119000, 0x03 },
+ .odr_avl[3] = { 238000, 0x04 },
+ .odr_avl[4] = { 476000, 0x05 },
+ .odr_avl[5] = { 952000, 0x06 },
+ .odr_len = 6,
},
},
.fs_table = {
@@ -152,18 +166,46 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.addr = 0x10,
.mask = GENMASK(4, 3),
},
- .fs_avl[0] = { IIO_DEGREE_TO_RAD(245), 0x0 },
- .fs_avl[1] = { IIO_DEGREE_TO_RAD(500), 0x1 },
- .fs_avl[2] = { IIO_DEGREE_TO_RAD(2000), 0x3 },
+
+ .fs_avl[0] = { IIO_DEGREE_TO_RAD(8750), 0x0 },
+ .fs_avl[1] = { IIO_DEGREE_TO_RAD(17500), 0x1 },
+ .fs_avl[2] = { IIO_DEGREE_TO_RAD(70000), 0x3 },
.fs_len = 3,
},
},
+ .irq_config = {
+ .irq1 = {
+ .addr = 0x0c,
+ .mask = BIT(3),
+ },
+ .irq2 = {
+ .addr = 0x0d,
+ .mask = BIT(3),
+ },
+ .hla = {
+ .addr = 0x22,
+ .mask = BIT(5),
+ },
+ .od = {
+ .addr = 0x22,
+ .mask = BIT(4),
+ },
+ },
},
{
.wai = 0x69,
- .int1_addr = 0x0d,
- .int2_addr = 0x0e,
- .reset_addr = 0x12,
+ .reset = {
+ .addr = 0x12,
+ .mask = BIT(0),
+ },
+ .boot = {
+ .addr = 0x12,
+ .mask = BIT(7),
+ },
+ .bdu = {
+ .addr = 0x12,
+ .mask = BIT(6),
+ },
.max_fifo_size = 1365,
.id = {
{
@@ -187,24 +229,26 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.addr = 0x10,
.mask = GENMASK(7, 4),
},
- .odr_avl[0] = { 13, 0x01 },
- .odr_avl[1] = { 26, 0x02 },
- .odr_avl[2] = { 52, 0x03 },
- .odr_avl[3] = { 104, 0x04 },
- .odr_avl[4] = { 208, 0x05 },
- .odr_avl[5] = { 416, 0x06 },
+ .odr_avl[0] = { 12500, 0x01 },
+ .odr_avl[1] = { 26000, 0x02 },
+ .odr_avl[2] = { 52000, 0x03 },
+ .odr_avl[3] = { 104000, 0x04 },
+ .odr_avl[4] = { 208000, 0x05 },
+ .odr_avl[5] = { 416000, 0x06 },
+ .odr_len = 6,
},
[ST_LSM6DSX_ID_GYRO] = {
.reg = {
.addr = 0x11,
.mask = GENMASK(7, 4),
},
- .odr_avl[0] = { 13, 0x01 },
- .odr_avl[1] = { 26, 0x02 },
- .odr_avl[2] = { 52, 0x03 },
- .odr_avl[3] = { 104, 0x04 },
- .odr_avl[4] = { 208, 0x05 },
- .odr_avl[5] = { 416, 0x06 },
+ .odr_avl[0] = { 12500, 0x01 },
+ .odr_avl[1] = { 26000, 0x02 },
+ .odr_avl[2] = { 52000, 0x03 },
+ .odr_avl[3] = { 104000, 0x04 },
+ .odr_avl[4] = { 208000, 0x05 },
+ .odr_avl[5] = { 416000, 0x06 },
+ .odr_len = 6,
},
},
.fs_table = {
@@ -231,6 +275,36 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.fs_len = 4,
},
},
+ .irq_config = {
+ .irq1 = {
+ .addr = 0x0d,
+ .mask = BIT(3),
+ },
+ .irq2 = {
+ .addr = 0x0e,
+ .mask = BIT(3),
+ },
+ .lir = {
+ .addr = 0x58,
+ .mask = BIT(0),
+ },
+ .irq1_func = {
+ .addr = 0x5e,
+ .mask = BIT(5),
+ },
+ .irq2_func = {
+ .addr = 0x5f,
+ .mask = BIT(5),
+ },
+ .hla = {
+ .addr = 0x12,
+ .mask = BIT(5),
+ },
+ .od = {
+ .addr = 0x12,
+ .mask = BIT(4),
+ },
+ },
.decimator = {
[ST_LSM6DSX_ID_ACC] = {
.addr = 0x08,
@@ -272,12 +346,32 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.mask = GENMASK(5, 3),
},
},
+ .event_settings = {
+ .wakeup_reg = {
+ .addr = 0x5B,
+ .mask = GENMASK(5, 0),
+ },
+ .wakeup_src_reg = 0x1b,
+ .wakeup_src_status_mask = BIT(3),
+ .wakeup_src_z_mask = BIT(0),
+ .wakeup_src_y_mask = BIT(1),
+ .wakeup_src_x_mask = BIT(2),
+ },
},
{
.wai = 0x69,
- .int1_addr = 0x0d,
- .int2_addr = 0x0e,
- .reset_addr = 0x12,
+ .reset = {
+ .addr = 0x12,
+ .mask = BIT(0),
+ },
+ .boot = {
+ .addr = 0x12,
+ .mask = BIT(7),
+ },
+ .bdu = {
+ .addr = 0x12,
+ .mask = BIT(6),
+ },
.max_fifo_size = 682,
.id = {
{
@@ -301,24 +395,26 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.addr = 0x10,
.mask = GENMASK(7, 4),
},
- .odr_avl[0] = { 13, 0x01 },
- .odr_avl[1] = { 26, 0x02 },
- .odr_avl[2] = { 52, 0x03 },
- .odr_avl[3] = { 104, 0x04 },
- .odr_avl[4] = { 208, 0x05 },
- .odr_avl[5] = { 416, 0x06 },
+ .odr_avl[0] = { 12500, 0x01 },
+ .odr_avl[1] = { 26000, 0x02 },
+ .odr_avl[2] = { 52000, 0x03 },
+ .odr_avl[3] = { 104000, 0x04 },
+ .odr_avl[4] = { 208000, 0x05 },
+ .odr_avl[5] = { 416000, 0x06 },
+ .odr_len = 6,
},
[ST_LSM6DSX_ID_GYRO] = {
.reg = {
.addr = 0x11,
.mask = GENMASK(7, 4),
},
- .odr_avl[0] = { 13, 0x01 },
- .odr_avl[1] = { 26, 0x02 },
- .odr_avl[2] = { 52, 0x03 },
- .odr_avl[3] = { 104, 0x04 },
- .odr_avl[4] = { 208, 0x05 },
- .odr_avl[5] = { 416, 0x06 },
+ .odr_avl[0] = { 12500, 0x01 },
+ .odr_avl[1] = { 26000, 0x02 },
+ .odr_avl[2] = { 52000, 0x03 },
+ .odr_avl[3] = { 104000, 0x04 },
+ .odr_avl[4] = { 208000, 0x05 },
+ .odr_avl[5] = { 416000, 0x06 },
+ .odr_len = 6,
},
},
.fs_table = {
@@ -345,6 +441,36 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.fs_len = 4,
},
},
+ .irq_config = {
+ .irq1 = {
+ .addr = 0x0d,
+ .mask = BIT(3),
+ },
+ .irq2 = {
+ .addr = 0x0e,
+ .mask = BIT(3),
+ },
+ .lir = {
+ .addr = 0x58,
+ .mask = BIT(0),
+ },
+ .irq1_func = {
+ .addr = 0x5e,
+ .mask = BIT(5),
+ },
+ .irq2_func = {
+ .addr = 0x5f,
+ .mask = BIT(5),
+ },
+ .hla = {
+ .addr = 0x12,
+ .mask = BIT(5),
+ },
+ .od = {
+ .addr = 0x12,
+ .mask = BIT(4),
+ },
+ },
.decimator = {
[ST_LSM6DSX_ID_ACC] = {
.addr = 0x08,
@@ -386,12 +512,32 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.mask = GENMASK(5, 3),
},
},
+ .event_settings = {
+ .wakeup_reg = {
+ .addr = 0x5B,
+ .mask = GENMASK(5, 0),
+ },
+ .wakeup_src_reg = 0x1b,
+ .wakeup_src_status_mask = BIT(3),
+ .wakeup_src_z_mask = BIT(0),
+ .wakeup_src_y_mask = BIT(1),
+ .wakeup_src_x_mask = BIT(2),
+ },
},
{
.wai = 0x6a,
- .int1_addr = 0x0d,
- .int2_addr = 0x0e,
- .reset_addr = 0x12,
+ .reset = {
+ .addr = 0x12,
+ .mask = BIT(0),
+ },
+ .boot = {
+ .addr = 0x12,
+ .mask = BIT(7),
+ },
+ .bdu = {
+ .addr = 0x12,
+ .mask = BIT(6),
+ },
.max_fifo_size = 682,
.id = {
{
@@ -424,24 +570,26 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.addr = 0x10,
.mask = GENMASK(7, 4),
},
- .odr_avl[0] = { 13, 0x01 },
- .odr_avl[1] = { 26, 0x02 },
- .odr_avl[2] = { 52, 0x03 },
- .odr_avl[3] = { 104, 0x04 },
- .odr_avl[4] = { 208, 0x05 },
- .odr_avl[5] = { 416, 0x06 },
+ .odr_avl[0] = { 12500, 0x01 },
+ .odr_avl[1] = { 26000, 0x02 },
+ .odr_avl[2] = { 52000, 0x03 },
+ .odr_avl[3] = { 104000, 0x04 },
+ .odr_avl[4] = { 208000, 0x05 },
+ .odr_avl[5] = { 416000, 0x06 },
+ .odr_len = 6,
},
[ST_LSM6DSX_ID_GYRO] = {
.reg = {
.addr = 0x11,
.mask = GENMASK(7, 4),
},
- .odr_avl[0] = { 13, 0x01 },
- .odr_avl[1] = { 26, 0x02 },
- .odr_avl[2] = { 52, 0x03 },
- .odr_avl[3] = { 104, 0x04 },
- .odr_avl[4] = { 208, 0x05 },
- .odr_avl[5] = { 416, 0x06 },
+ .odr_avl[0] = { 12500, 0x01 },
+ .odr_avl[1] = { 26000, 0x02 },
+ .odr_avl[2] = { 52000, 0x03 },
+ .odr_avl[3] = { 104000, 0x04 },
+ .odr_avl[4] = { 208000, 0x05 },
+ .odr_avl[5] = { 416000, 0x06 },
+ .odr_len = 6,
},
},
.fs_table = {
@@ -468,6 +616,36 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.fs_len = 4,
},
},
+ .irq_config = {
+ .irq1 = {
+ .addr = 0x0d,
+ .mask = BIT(3),
+ },
+ .irq2 = {
+ .addr = 0x0e,
+ .mask = BIT(3),
+ },
+ .lir = {
+ .addr = 0x58,
+ .mask = BIT(0),
+ },
+ .irq1_func = {
+ .addr = 0x5e,
+ .mask = BIT(5),
+ },
+ .irq2_func = {
+ .addr = 0x5f,
+ .mask = BIT(5),
+ },
+ .hla = {
+ .addr = 0x12,
+ .mask = BIT(5),
+ },
+ .od = {
+ .addr = 0x12,
+ .mask = BIT(4),
+ },
+ },
.decimator = {
[ST_LSM6DSX_ID_ACC] = {
.addr = 0x08,
@@ -509,12 +687,36 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.mask = GENMASK(5, 3),
},
},
+ .event_settings = {
+ .enable_reg = {
+ .addr = 0x58,
+ .mask = BIT(7),
+ },
+ .wakeup_reg = {
+ .addr = 0x5B,
+ .mask = GENMASK(5, 0),
+ },
+ .wakeup_src_reg = 0x1b,
+ .wakeup_src_status_mask = BIT(3),
+ .wakeup_src_z_mask = BIT(0),
+ .wakeup_src_y_mask = BIT(1),
+ .wakeup_src_x_mask = BIT(2),
+ },
},
{
.wai = 0x6c,
- .int1_addr = 0x0d,
- .int2_addr = 0x0e,
- .reset_addr = 0x12,
+ .reset = {
+ .addr = 0x12,
+ .mask = BIT(0),
+ },
+ .boot = {
+ .addr = 0x12,
+ .mask = BIT(7),
+ },
+ .bdu = {
+ .addr = 0x12,
+ .mask = BIT(6),
+ },
.max_fifo_size = 512,
.id = {
{
@@ -535,30 +737,36 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.len = ARRAY_SIZE(st_lsm6dsx_gyro_channels),
},
},
+ .drdy_mask = {
+ .addr = 0x13,
+ .mask = BIT(3),
+ },
.odr_table = {
[ST_LSM6DSX_ID_ACC] = {
.reg = {
.addr = 0x10,
.mask = GENMASK(7, 4),
},
- .odr_avl[0] = { 13, 0x01 },
- .odr_avl[1] = { 26, 0x02 },
- .odr_avl[2] = { 52, 0x03 },
- .odr_avl[3] = { 104, 0x04 },
- .odr_avl[4] = { 208, 0x05 },
- .odr_avl[5] = { 416, 0x06 },
+ .odr_avl[0] = { 12500, 0x01 },
+ .odr_avl[1] = { 26000, 0x02 },
+ .odr_avl[2] = { 52000, 0x03 },
+ .odr_avl[3] = { 104000, 0x04 },
+ .odr_avl[4] = { 208000, 0x05 },
+ .odr_avl[5] = { 416000, 0x06 },
+ .odr_len = 6,
},
[ST_LSM6DSX_ID_GYRO] = {
.reg = {
.addr = 0x11,
.mask = GENMASK(7, 4),
},
- .odr_avl[0] = { 13, 0x01 },
- .odr_avl[1] = { 26, 0x02 },
- .odr_avl[2] = { 52, 0x03 },
- .odr_avl[3] = { 104, 0x04 },
- .odr_avl[4] = { 208, 0x05 },
- .odr_avl[5] = { 416, 0x06 },
+ .odr_avl[0] = { 12500, 0x01 },
+ .odr_avl[1] = { 26000, 0x02 },
+ .odr_avl[2] = { 52000, 0x03 },
+ .odr_avl[3] = { 104000, 0x04 },
+ .odr_avl[4] = { 208000, 0x05 },
+ .odr_avl[5] = { 416000, 0x06 },
+ .odr_len = 6,
},
},
.fs_table = {
@@ -585,6 +793,40 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.fs_len = 4,
},
},
+ .irq_config = {
+ .irq1 = {
+ .addr = 0x0d,
+ .mask = BIT(3),
+ },
+ .irq2 = {
+ .addr = 0x0e,
+ .mask = BIT(3),
+ },
+ .lir = {
+ .addr = 0x56,
+ .mask = BIT(0),
+ },
+ .clear_on_read = {
+ .addr = 0x56,
+ .mask = BIT(6),
+ },
+ .irq1_func = {
+ .addr = 0x5e,
+ .mask = BIT(5),
+ },
+ .irq2_func = {
+ .addr = 0x5f,
+ .mask = BIT(5),
+ },
+ .hla = {
+ .addr = 0x12,
+ .mask = BIT(5),
+ },
+ .od = {
+ .addr = 0x12,
+ .mask = BIT(4),
+ },
+ },
.batch = {
[ST_LSM6DSX_ID_ACC] = {
.addr = 0x09,
@@ -617,6 +859,7 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.addr = 0x0a,
.mask = GENMASK(7, 6),
},
+ .freq_fine = 0x63,
},
.shub_settings = {
.page_mux = {
@@ -643,13 +886,37 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.slv0_addr = 0x15,
.dw_slv0_addr = 0x21,
.batch_en = BIT(3),
- }
+ },
+ .event_settings = {
+ .enable_reg = {
+ .addr = 0x58,
+ .mask = BIT(7),
+ },
+ .wakeup_reg = {
+ .addr = 0x5b,
+ .mask = GENMASK(5, 0),
+ },
+ .wakeup_src_reg = 0x1b,
+ .wakeup_src_status_mask = BIT(3),
+ .wakeup_src_z_mask = BIT(0),
+ .wakeup_src_y_mask = BIT(1),
+ .wakeup_src_x_mask = BIT(2),
+ },
},
{
.wai = 0x6b,
- .int1_addr = 0x0d,
- .int2_addr = 0x0e,
- .reset_addr = 0x12,
+ .reset = {
+ .addr = 0x12,
+ .mask = BIT(0),
+ },
+ .boot = {
+ .addr = 0x12,
+ .mask = BIT(7),
+ },
+ .bdu = {
+ .addr = 0x12,
+ .mask = BIT(6),
+ },
.max_fifo_size = 512,
.id = {
{
@@ -667,30 +934,36 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.len = ARRAY_SIZE(st_lsm6dsx_gyro_channels),
},
},
+ .drdy_mask = {
+ .addr = 0x13,
+ .mask = BIT(3),
+ },
.odr_table = {
[ST_LSM6DSX_ID_ACC] = {
.reg = {
.addr = 0x10,
.mask = GENMASK(7, 4),
},
- .odr_avl[0] = { 13, 0x01 },
- .odr_avl[1] = { 26, 0x02 },
- .odr_avl[2] = { 52, 0x03 },
- .odr_avl[3] = { 104, 0x04 },
- .odr_avl[4] = { 208, 0x05 },
- .odr_avl[5] = { 416, 0x06 },
+ .odr_avl[0] = { 12500, 0x01 },
+ .odr_avl[1] = { 26000, 0x02 },
+ .odr_avl[2] = { 52000, 0x03 },
+ .odr_avl[3] = { 104000, 0x04 },
+ .odr_avl[4] = { 208000, 0x05 },
+ .odr_avl[5] = { 416000, 0x06 },
+ .odr_len = 6,
},
[ST_LSM6DSX_ID_GYRO] = {
.reg = {
.addr = 0x11,
.mask = GENMASK(7, 4),
},
- .odr_avl[0] = { 13, 0x01 },
- .odr_avl[1] = { 26, 0x02 },
- .odr_avl[2] = { 52, 0x03 },
- .odr_avl[3] = { 104, 0x04 },
- .odr_avl[4] = { 208, 0x05 },
- .odr_avl[5] = { 416, 0x06 },
+ .odr_avl[0] = { 12500, 0x01 },
+ .odr_avl[1] = { 26000, 0x02 },
+ .odr_avl[2] = { 52000, 0x03 },
+ .odr_avl[3] = { 104000, 0x04 },
+ .odr_avl[4] = { 208000, 0x05 },
+ .odr_avl[5] = { 416000, 0x06 },
+ .odr_len = 6,
},
},
.fs_table = {
@@ -717,6 +990,40 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.fs_len = 4,
},
},
+ .irq_config = {
+ .irq1 = {
+ .addr = 0x0d,
+ .mask = BIT(3),
+ },
+ .irq2 = {
+ .addr = 0x0e,
+ .mask = BIT(3),
+ },
+ .lir = {
+ .addr = 0x56,
+ .mask = BIT(0),
+ },
+ .clear_on_read = {
+ .addr = 0x56,
+ .mask = BIT(6),
+ },
+ .irq1_func = {
+ .addr = 0x5e,
+ .mask = BIT(5),
+ },
+ .irq2_func = {
+ .addr = 0x5f,
+ .mask = BIT(5),
+ },
+ .hla = {
+ .addr = 0x12,
+ .mask = BIT(5),
+ },
+ .od = {
+ .addr = 0x12,
+ .mask = BIT(4),
+ },
+ },
.batch = {
[ST_LSM6DSX_ID_ACC] = {
.addr = 0x09,
@@ -749,13 +1056,38 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.addr = 0x0a,
.mask = GENMASK(7, 6),
},
+ .freq_fine = 0x63,
+ },
+ .event_settings = {
+ .enable_reg = {
+ .addr = 0x58,
+ .mask = BIT(7),
+ },
+ .wakeup_reg = {
+ .addr = 0x5B,
+ .mask = GENMASK(5, 0),
+ },
+ .wakeup_src_reg = 0x1b,
+ .wakeup_src_status_mask = BIT(3),
+ .wakeup_src_z_mask = BIT(0),
+ .wakeup_src_y_mask = BIT(1),
+ .wakeup_src_x_mask = BIT(2),
},
},
{
.wai = 0x6b,
- .int1_addr = 0x0d,
- .int2_addr = 0x0e,
- .reset_addr = 0x12,
+ .reset = {
+ .addr = 0x12,
+ .mask = BIT(0),
+ },
+ .boot = {
+ .addr = 0x12,
+ .mask = BIT(7),
+ },
+ .bdu = {
+ .addr = 0x12,
+ .mask = BIT(6),
+ },
.max_fifo_size = 512,
.id = {
{
@@ -764,6 +1096,9 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
}, {
.hw_id = ST_ISM330DHCX_ID,
.name = ST_ISM330DHCX_DEV_NAME,
+ }, {
+ .hw_id = ST_LSM6DSRX_ID,
+ .name = ST_LSM6DSRX_DEV_NAME,
},
},
.channels = {
@@ -776,30 +1111,36 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.len = ARRAY_SIZE(st_lsm6dsx_gyro_channels),
},
},
+ .drdy_mask = {
+ .addr = 0x13,
+ .mask = BIT(3),
+ },
.odr_table = {
[ST_LSM6DSX_ID_ACC] = {
.reg = {
.addr = 0x10,
.mask = GENMASK(7, 4),
},
- .odr_avl[0] = { 13, 0x01 },
- .odr_avl[1] = { 26, 0x02 },
- .odr_avl[2] = { 52, 0x03 },
- .odr_avl[3] = { 104, 0x04 },
- .odr_avl[4] = { 208, 0x05 },
- .odr_avl[5] = { 416, 0x06 },
+ .odr_avl[0] = { 12500, 0x01 },
+ .odr_avl[1] = { 26000, 0x02 },
+ .odr_avl[2] = { 52000, 0x03 },
+ .odr_avl[3] = { 104000, 0x04 },
+ .odr_avl[4] = { 208000, 0x05 },
+ .odr_avl[5] = { 416000, 0x06 },
+ .odr_len = 6,
},
[ST_LSM6DSX_ID_GYRO] = {
.reg = {
.addr = 0x11,
.mask = GENMASK(7, 4),
},
- .odr_avl[0] = { 13, 0x01 },
- .odr_avl[1] = { 26, 0x02 },
- .odr_avl[2] = { 52, 0x03 },
- .odr_avl[3] = { 104, 0x04 },
- .odr_avl[4] = { 208, 0x05 },
- .odr_avl[5] = { 416, 0x06 },
+ .odr_avl[0] = { 12500, 0x01 },
+ .odr_avl[1] = { 26000, 0x02 },
+ .odr_avl[2] = { 52000, 0x03 },
+ .odr_avl[3] = { 104000, 0x04 },
+ .odr_avl[4] = { 208000, 0x05 },
+ .odr_avl[5] = { 416000, 0x06 },
+ .odr_len = 6,
},
},
.fs_table = {
@@ -826,6 +1167,40 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.fs_len = 4,
},
},
+ .irq_config = {
+ .irq1 = {
+ .addr = 0x0d,
+ .mask = BIT(3),
+ },
+ .irq2 = {
+ .addr = 0x0e,
+ .mask = BIT(3),
+ },
+ .lir = {
+ .addr = 0x56,
+ .mask = BIT(0),
+ },
+ .clear_on_read = {
+ .addr = 0x56,
+ .mask = BIT(6),
+ },
+ .irq1_func = {
+ .addr = 0x5e,
+ .mask = BIT(5),
+ },
+ .irq2_func = {
+ .addr = 0x5f,
+ .mask = BIT(5),
+ },
+ .hla = {
+ .addr = 0x12,
+ .mask = BIT(5),
+ },
+ .od = {
+ .addr = 0x12,
+ .mask = BIT(4),
+ },
+ },
.batch = {
[ST_LSM6DSX_ID_ACC] = {
.addr = 0x09,
@@ -858,6 +1233,7 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.addr = 0x0a,
.mask = GENMASK(7, 6),
},
+ .freq_fine = 0x63,
},
.shub_settings = {
.page_mux = {
@@ -884,6 +1260,21 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.slv0_addr = 0x15,
.dw_slv0_addr = 0x21,
.batch_en = BIT(3),
+ },
+ .event_settings = {
+ .enable_reg = {
+ .addr = 0x58,
+ .mask = BIT(7),
+ },
+ .wakeup_reg = {
+ .addr = 0x5B,
+ .mask = GENMASK(5, 0),
+ },
+ .wakeup_src_reg = 0x1b,
+ .wakeup_src_status_mask = BIT(3),
+ .wakeup_src_z_mask = BIT(0),
+ .wakeup_src_y_mask = BIT(1),
+ .wakeup_src_x_mask = BIT(2),
}
},
};
@@ -967,36 +1358,37 @@ static int st_lsm6dsx_set_full_scale(struct st_lsm6dsx_sensor *sensor,
return 0;
}
-int st_lsm6dsx_check_odr(struct st_lsm6dsx_sensor *sensor, u16 odr, u8 *val)
+int st_lsm6dsx_check_odr(struct st_lsm6dsx_sensor *sensor, u32 odr, u8 *val)
{
const struct st_lsm6dsx_odr_table_entry *odr_table;
int i;
odr_table = &sensor->hw->settings->odr_table[sensor->id];
- for (i = 0; i < ST_LSM6DSX_ODR_LIST_SIZE; i++)
+ for (i = 0; i < odr_table->odr_len; i++) {
/*
* ext devices can run at different odr respect to
* accel sensor
*/
- if (odr_table->odr_avl[i].hz >= odr)
+ if (odr_table->odr_avl[i].milli_hz >= odr)
break;
+ }
- if (i == ST_LSM6DSX_ODR_LIST_SIZE)
+ if (i == odr_table->odr_len)
return -EINVAL;
*val = odr_table->odr_avl[i].val;
-
- return 0;
+ return odr_table->odr_avl[i].milli_hz;
}
-static u16 st_lsm6dsx_check_odr_dependency(struct st_lsm6dsx_hw *hw, u16 odr,
- enum st_lsm6dsx_sensor_id id)
+static int
+st_lsm6dsx_check_odr_dependency(struct st_lsm6dsx_hw *hw, u32 odr,
+ enum st_lsm6dsx_sensor_id id)
{
struct st_lsm6dsx_sensor *ref = iio_priv(hw->iio_devs[id]);
if (odr > 0) {
if (hw->enable_mask & BIT(id))
- return max_t(u16, ref->odr, odr);
+ return max_t(u32, ref->odr, odr);
else
return odr;
} else {
@@ -1004,7 +1396,8 @@ static u16 st_lsm6dsx_check_odr_dependency(struct st_lsm6dsx_hw *hw, u16 odr,
}
}
-static int st_lsm6dsx_set_odr(struct st_lsm6dsx_sensor *sensor, u16 req_odr)
+static int
+st_lsm6dsx_set_odr(struct st_lsm6dsx_sensor *sensor, u32 req_odr)
{
struct st_lsm6dsx_sensor *ref_sensor = sensor;
struct st_lsm6dsx_hw *hw = sensor->hw;
@@ -1018,7 +1411,7 @@ static int st_lsm6dsx_set_odr(struct st_lsm6dsx_sensor *sensor, u16 req_odr)
case ST_LSM6DSX_ID_EXT1:
case ST_LSM6DSX_ID_EXT2:
case ST_LSM6DSX_ID_ACC: {
- u16 odr;
+ u32 odr;
int i;
/*
@@ -1058,7 +1451,7 @@ int st_lsm6dsx_sensor_set_enable(struct st_lsm6dsx_sensor *sensor,
bool enable)
{
struct st_lsm6dsx_hw *hw = sensor->hw;
- u16 odr = enable ? sensor->odr : 0;
+ u32 odr = enable ? sensor->odr : 0;
int err;
err = st_lsm6dsx_set_odr(sensor, odr);
@@ -1084,14 +1477,15 @@ static int st_lsm6dsx_read_oneshot(struct st_lsm6dsx_sensor *sensor,
if (err < 0)
return err;
- delay = 1000000 / sensor->odr;
+ delay = 1000000000 / sensor->odr;
usleep_range(delay, 2 * delay);
err = st_lsm6dsx_read_locked(hw, addr, &data, sizeof(data));
if (err < 0)
return err;
- st_lsm6dsx_sensor_set_enable(sensor, false);
+ if (!hw->enable_event)
+ st_lsm6dsx_sensor_set_enable(sensor, false);
*val = (s16)le16_to_cpu(data);
@@ -1115,8 +1509,9 @@ static int st_lsm6dsx_read_raw(struct iio_dev *iio_dev,
iio_device_release_direct_mode(iio_dev);
break;
case IIO_CHAN_INFO_SAMP_FREQ:
- *val = sensor->odr;
- ret = IIO_VAL_INT;
+ *val = sensor->odr / 1000;
+ *val2 = (sensor->odr % 1000) * 1000;
+ ret = IIO_VAL_INT_PLUS_MICRO;
break;
case IIO_CHAN_INFO_SCALE:
*val = 0;
@@ -1149,8 +1544,11 @@ static int st_lsm6dsx_write_raw(struct iio_dev *iio_dev,
case IIO_CHAN_INFO_SAMP_FREQ: {
u8 data;
- err = st_lsm6dsx_check_odr(sensor, val, &data);
- if (!err)
+ val = val * 1000 + val2 / 1000;
+ val = st_lsm6dsx_check_odr(sensor, val, &data);
+ if (val < 0)
+ err = val;
+ else
sensor->odr = val;
break;
}
@@ -1164,6 +1562,144 @@ static int st_lsm6dsx_write_raw(struct iio_dev *iio_dev,
return err;
}
+static int st_lsm6dsx_event_setup(struct st_lsm6dsx_hw *hw, int state)
+{
+ const struct st_lsm6dsx_reg *reg;
+ unsigned int data;
+ int err;
+
+ if (!hw->settings->irq_config.irq1_func.addr)
+ return -ENOTSUPP;
+
+ reg = &hw->settings->event_settings.enable_reg;
+ if (reg->addr) {
+ data = ST_LSM6DSX_SHIFT_VAL(state, reg->mask);
+ err = st_lsm6dsx_update_bits_locked(hw, reg->addr,
+ reg->mask, data);
+ if (err < 0)
+ return err;
+ }
+
+ /* Enable wakeup interrupt */
+ data = ST_LSM6DSX_SHIFT_VAL(state, hw->irq_routing->mask);
+ return st_lsm6dsx_update_bits_locked(hw, hw->irq_routing->addr,
+ hw->irq_routing->mask, data);
+}
+
+static int st_lsm6dsx_read_event(struct iio_dev *iio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info,
+ int *val, int *val2)
+{
+ struct st_lsm6dsx_sensor *sensor = iio_priv(iio_dev);
+ struct st_lsm6dsx_hw *hw = sensor->hw;
+
+ if (type != IIO_EV_TYPE_THRESH)
+ return -EINVAL;
+
+ *val2 = 0;
+ *val = hw->event_threshold;
+
+ return IIO_VAL_INT;
+}
+
+static int
+st_lsm6dsx_write_event(struct iio_dev *iio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info,
+ int val, int val2)
+{
+ struct st_lsm6dsx_sensor *sensor = iio_priv(iio_dev);
+ struct st_lsm6dsx_hw *hw = sensor->hw;
+ const struct st_lsm6dsx_reg *reg;
+ unsigned int data;
+ int err;
+
+ if (type != IIO_EV_TYPE_THRESH)
+ return -EINVAL;
+
+ if (val < 0 || val > 31)
+ return -EINVAL;
+
+ reg = &hw->settings->event_settings.wakeup_reg;
+ data = ST_LSM6DSX_SHIFT_VAL(val, reg->mask);
+ err = st_lsm6dsx_update_bits_locked(hw, reg->addr,
+ reg->mask, data);
+ if (err < 0)
+ return -EINVAL;
+
+ hw->event_threshold = val;
+
+ return 0;
+}
+
+static int
+st_lsm6dsx_read_event_config(struct iio_dev *iio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir)
+{
+ struct st_lsm6dsx_sensor *sensor = iio_priv(iio_dev);
+ struct st_lsm6dsx_hw *hw = sensor->hw;
+
+ if (type != IIO_EV_TYPE_THRESH)
+ return -EINVAL;
+
+ return !!(hw->enable_event & BIT(chan->channel2));
+}
+
+static int
+st_lsm6dsx_write_event_config(struct iio_dev *iio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir, int state)
+{
+ struct st_lsm6dsx_sensor *sensor = iio_priv(iio_dev);
+ struct st_lsm6dsx_hw *hw = sensor->hw;
+ u8 enable_event;
+ int err = 0;
+
+ if (type != IIO_EV_TYPE_THRESH)
+ return -EINVAL;
+
+ if (state) {
+ enable_event = hw->enable_event | BIT(chan->channel2);
+
+ /* do not enable events if they are already enabled */
+ if (hw->enable_event)
+ goto out;
+ } else {
+ enable_event = hw->enable_event & ~BIT(chan->channel2);
+
+ /* only turn off sensor if no events is enabled */
+ if (enable_event)
+ goto out;
+ }
+
+ /* stop here if no changes have been made */
+ if (hw->enable_event == enable_event)
+ return 0;
+
+ err = st_lsm6dsx_event_setup(hw, state);
+ if (err < 0)
+ return err;
+
+ mutex_lock(&hw->conf_lock);
+ err = st_lsm6dsx_sensor_set_enable(sensor, state);
+ mutex_unlock(&hw->conf_lock);
+ if (err < 0)
+ return err;
+
+out:
+ hw->enable_event = enable_event;
+
+ return 0;
+}
+
int st_lsm6dsx_set_watermark(struct iio_dev *iio_dev, unsigned int val)
{
struct st_lsm6dsx_sensor *sensor = iio_priv(iio_dev);
@@ -1193,13 +1729,14 @@ st_lsm6dsx_sysfs_sampling_frequency_avail(struct device *dev,
char *buf)
{
struct st_lsm6dsx_sensor *sensor = iio_priv(dev_get_drvdata(dev));
- enum st_lsm6dsx_sensor_id id = sensor->id;
- struct st_lsm6dsx_hw *hw = sensor->hw;
+ const struct st_lsm6dsx_odr_table_entry *odr_table;
int i, len = 0;
- for (i = 0; i < ST_LSM6DSX_ODR_LIST_SIZE; i++)
- len += scnprintf(buf + len, PAGE_SIZE - len, "%d ",
- hw->settings->odr_table[id].odr_avl[i].hz);
+ odr_table = &sensor->hw->settings->odr_table[sensor->id];
+ for (i = 0; i < odr_table->odr_len; i++)
+ len += scnprintf(buf + len, PAGE_SIZE - len, "%d.%03d ",
+ odr_table->odr_avl[i].milli_hz / 1000,
+ odr_table->odr_avl[i].milli_hz % 1000);
buf[len - 1] = '\n';
return len;
@@ -1243,6 +1780,10 @@ static const struct iio_info st_lsm6dsx_acc_info = {
.attrs = &st_lsm6dsx_acc_attribute_group,
.read_raw = st_lsm6dsx_read_raw,
.write_raw = st_lsm6dsx_write_raw,
+ .read_event_value = st_lsm6dsx_read_event,
+ .write_event_value = st_lsm6dsx_write_event,
+ .read_event_config = st_lsm6dsx_read_event_config,
+ .write_event_config = st_lsm6dsx_write_event_config,
.hwfifo_set_watermark = st_lsm6dsx_set_watermark,
};
@@ -1273,7 +1814,9 @@ static int st_lsm6dsx_of_get_drdy_pin(struct st_lsm6dsx_hw *hw, int *drdy_pin)
return of_property_read_u32(np, "st,drdy-int-pin", drdy_pin);
}
-static int st_lsm6dsx_get_drdy_reg(struct st_lsm6dsx_hw *hw, u8 *drdy_reg)
+static int
+st_lsm6dsx_get_drdy_reg(struct st_lsm6dsx_hw *hw,
+ const struct st_lsm6dsx_reg **drdy_reg)
{
int err = 0, drdy_pin;
@@ -1287,10 +1830,12 @@ static int st_lsm6dsx_get_drdy_reg(struct st_lsm6dsx_hw *hw, u8 *drdy_reg)
switch (drdy_pin) {
case 1:
- *drdy_reg = hw->settings->int1_addr;
+ hw->irq_routing = &hw->settings->irq_config.irq1_func;
+ *drdy_reg = &hw->settings->irq_config.irq1;
break;
case 2:
- *drdy_reg = hw->settings->int2_addr;
+ hw->irq_routing = &hw->settings->irq_config.irq2_func;
+ *drdy_reg = &hw->settings->irq_config.irq2;
break;
default:
dev_err(hw->dev, "unsupported data ready pin\n");
@@ -1381,51 +1926,95 @@ static int st_lsm6dsx_init_hw_timer(struct st_lsm6dsx_hw *hw)
if (err < 0)
return err;
}
+
+ /* calibrate timestamp sensitivity */
+ hw->ts_gain = ST_LSM6DSX_TS_SENSITIVITY;
+ if (ts_settings->freq_fine) {
+ err = regmap_read(hw->regmap, ts_settings->freq_fine, &val);
+ if (err < 0)
+ return err;
+
+ /*
+ * linearize the AN5192 formula:
+ * 1 / (1 + x) ~= 1 - x (Taylor’s Series)
+ * ttrim[s] = 1 / (40000 * (1 + 0.0015 * val))
+ * ttrim[ns] ~= 25000 - 37.5 * val
+ * ttrim[ns] ~= 25000 - (37500 * val) / 1000
+ */
+ hw->ts_gain -= ((s8)val * 37500) / 1000;
+ }
+
return 0;
}
static int st_lsm6dsx_init_device(struct st_lsm6dsx_hw *hw)
{
- u8 drdy_int_reg;
+ const struct st_lsm6dsx_reg *reg;
int err;
/* device sw reset */
- err = regmap_update_bits(hw->regmap, hw->settings->reset_addr,
- ST_LSM6DSX_REG_RESET_MASK,
- FIELD_PREP(ST_LSM6DSX_REG_RESET_MASK, 1));
+ reg = &hw->settings->reset;
+ err = regmap_update_bits(hw->regmap, reg->addr, reg->mask,
+ ST_LSM6DSX_SHIFT_VAL(1, reg->mask));
if (err < 0)
return err;
msleep(50);
/* reload trimming parameter */
- err = regmap_update_bits(hw->regmap, hw->settings->reset_addr,
- ST_LSM6DSX_REG_BOOT_MASK,
- FIELD_PREP(ST_LSM6DSX_REG_BOOT_MASK, 1));
+ reg = &hw->settings->boot;
+ err = regmap_update_bits(hw->regmap, reg->addr, reg->mask,
+ ST_LSM6DSX_SHIFT_VAL(1, reg->mask));
if (err < 0)
return err;
msleep(50);
/* enable Block Data Update */
- err = regmap_update_bits(hw->regmap, ST_LSM6DSX_REG_BDU_ADDR,
- ST_LSM6DSX_REG_BDU_MASK,
- FIELD_PREP(ST_LSM6DSX_REG_BDU_MASK, 1));
+ reg = &hw->settings->bdu;
+ err = regmap_update_bits(hw->regmap, reg->addr, reg->mask,
+ ST_LSM6DSX_SHIFT_VAL(1, reg->mask));
if (err < 0)
return err;
/* enable FIFO watermak interrupt */
- err = st_lsm6dsx_get_drdy_reg(hw, &drdy_int_reg);
+ err = st_lsm6dsx_get_drdy_reg(hw, &reg);
if (err < 0)
return err;
- err = regmap_update_bits(hw->regmap, drdy_int_reg,
- ST_LSM6DSX_REG_FIFO_FTH_IRQ_MASK,
- FIELD_PREP(ST_LSM6DSX_REG_FIFO_FTH_IRQ_MASK,
- 1));
+ err = regmap_update_bits(hw->regmap, reg->addr, reg->mask,
+ ST_LSM6DSX_SHIFT_VAL(1, reg->mask));
if (err < 0)
return err;
+ /* enable Latched interrupts for device events */
+ if (hw->settings->irq_config.lir.addr) {
+ reg = &hw->settings->irq_config.lir;
+ err = regmap_update_bits(hw->regmap, reg->addr, reg->mask,
+ ST_LSM6DSX_SHIFT_VAL(1, reg->mask));
+ if (err < 0)
+ return err;
+
+ /* enable clear on read for latched interrupts */
+ if (hw->settings->irq_config.clear_on_read.addr) {
+ reg = &hw->settings->irq_config.clear_on_read;
+ err = regmap_update_bits(hw->regmap,
+ reg->addr, reg->mask,
+ ST_LSM6DSX_SHIFT_VAL(1, reg->mask));
+ if (err < 0)
+ return err;
+ }
+ }
+
+ /* enable drdy-mas if available */
+ if (hw->settings->drdy_mask.addr) {
+ reg = &hw->settings->drdy_mask;
+ err = regmap_update_bits(hw->regmap, reg->addr, reg->mask,
+ ST_LSM6DSX_SHIFT_VAL(1, reg->mask));
+ if (err < 0)
+ return err;
+ }
+
err = st_lsm6dsx_init_shub(hw);
if (err < 0)
return err;
@@ -1453,7 +2042,7 @@ static struct iio_dev *st_lsm6dsx_alloc_iiodev(struct st_lsm6dsx_hw *hw,
sensor = iio_priv(iio_dev);
sensor->id = id;
sensor->hw = hw;
- sensor->odr = hw->settings->odr_table[id].odr_avl[0].hz;
+ sensor->odr = hw->settings->odr_table[id].odr_avl[0].milli_hz;
sensor->gain = hw->settings->fs_table[id].fs_avl[0].gain;
sensor->watermark = 1;
@@ -1476,10 +2065,138 @@ static struct iio_dev *st_lsm6dsx_alloc_iiodev(struct st_lsm6dsx_hw *hw,
return iio_dev;
}
+static bool
+st_lsm6dsx_report_motion_event(struct st_lsm6dsx_hw *hw)
+{
+ const struct st_lsm6dsx_event_settings *event_settings;
+ int err, data;
+ s64 timestamp;
+
+ if (!hw->enable_event)
+ return false;
+
+ event_settings = &hw->settings->event_settings;
+ err = st_lsm6dsx_read_locked(hw, event_settings->wakeup_src_reg,
+ &data, sizeof(data));
+ if (err < 0)
+ return false;
+
+ timestamp = iio_get_time_ns(hw->iio_devs[ST_LSM6DSX_ID_ACC]);
+ if ((data & hw->settings->event_settings.wakeup_src_z_mask) &&
+ (hw->enable_event & BIT(IIO_MOD_Z)))
+ iio_push_event(hw->iio_devs[ST_LSM6DSX_ID_ACC],
+ IIO_MOD_EVENT_CODE(IIO_ACCEL,
+ 0,
+ IIO_MOD_Z,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_EITHER),
+ timestamp);
+
+ if ((data & hw->settings->event_settings.wakeup_src_y_mask) &&
+ (hw->enable_event & BIT(IIO_MOD_Y)))
+ iio_push_event(hw->iio_devs[ST_LSM6DSX_ID_ACC],
+ IIO_MOD_EVENT_CODE(IIO_ACCEL,
+ 0,
+ IIO_MOD_Y,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_EITHER),
+ timestamp);
+
+ if ((data & hw->settings->event_settings.wakeup_src_x_mask) &&
+ (hw->enable_event & BIT(IIO_MOD_X)))
+ iio_push_event(hw->iio_devs[ST_LSM6DSX_ID_ACC],
+ IIO_MOD_EVENT_CODE(IIO_ACCEL,
+ 0,
+ IIO_MOD_X,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_EITHER),
+ timestamp);
+
+ return data & event_settings->wakeup_src_status_mask;
+}
+
+static irqreturn_t st_lsm6dsx_handler_thread(int irq, void *private)
+{
+ struct st_lsm6dsx_hw *hw = private;
+ bool event;
+ int count;
+
+ event = st_lsm6dsx_report_motion_event(hw);
+
+ if (!hw->settings->fifo_ops.read_fifo)
+ return event ? IRQ_HANDLED : IRQ_NONE;
+
+ mutex_lock(&hw->fifo_lock);
+ count = hw->settings->fifo_ops.read_fifo(hw);
+ mutex_unlock(&hw->fifo_lock);
+
+ return count || event ? IRQ_HANDLED : IRQ_NONE;
+}
+
+static int st_lsm6dsx_irq_setup(struct st_lsm6dsx_hw *hw)
+{
+ struct device_node *np = hw->dev->of_node;
+ struct st_sensors_platform_data *pdata;
+ const struct st_lsm6dsx_reg *reg;
+ unsigned long irq_type;
+ bool irq_active_low;
+ int err;
+
+ irq_type = irqd_get_trigger_type(irq_get_irq_data(hw->irq));
+
+ switch (irq_type) {
+ case IRQF_TRIGGER_HIGH:
+ case IRQF_TRIGGER_RISING:
+ irq_active_low = false;
+ break;
+ case IRQF_TRIGGER_LOW:
+ case IRQF_TRIGGER_FALLING:
+ irq_active_low = true;
+ break;
+ default:
+ dev_info(hw->dev, "mode %lx unsupported\n", irq_type);
+ return -EINVAL;
+ }
+
+ reg = &hw->settings->irq_config.hla;
+ err = regmap_update_bits(hw->regmap, reg->addr, reg->mask,
+ ST_LSM6DSX_SHIFT_VAL(irq_active_low,
+ reg->mask));
+ if (err < 0)
+ return err;
+
+ pdata = (struct st_sensors_platform_data *)hw->dev->platform_data;
+ if ((np && of_property_read_bool(np, "drive-open-drain")) ||
+ (pdata && pdata->open_drain)) {
+ reg = &hw->settings->irq_config.od;
+ err = regmap_update_bits(hw->regmap, reg->addr, reg->mask,
+ ST_LSM6DSX_SHIFT_VAL(1, reg->mask));
+ if (err < 0)
+ return err;
+
+ irq_type |= IRQF_SHARED;
+ }
+
+ err = devm_request_threaded_irq(hw->dev, hw->irq,
+ NULL,
+ st_lsm6dsx_handler_thread,
+ irq_type | IRQF_ONESHOT,
+ "lsm6dsx", hw);
+ if (err) {
+ dev_err(hw->dev, "failed to request trigger irq %d\n",
+ hw->irq);
+ return err;
+ }
+
+ return 0;
+}
+
int st_lsm6dsx_probe(struct device *dev, int irq, int hw_id,
struct regmap *regmap)
{
+ struct st_sensors_platform_data *pdata = dev->platform_data;
const struct st_lsm6dsx_shub_settings *hub_settings;
+ struct device_node *np = dev->of_node;
struct st_lsm6dsx_hw *hw;
const char *name = NULL;
int i, err;
@@ -1524,6 +2241,10 @@ int st_lsm6dsx_probe(struct device *dev, int irq, int hw_id,
}
if (hw->irq > 0) {
+ err = st_lsm6dsx_irq_setup(hw);
+ if (err < 0)
+ return err;
+
err = st_lsm6dsx_fifo_setup(hw);
if (err < 0)
return err;
@@ -1538,6 +2259,10 @@ int st_lsm6dsx_probe(struct device *dev, int irq, int hw_id,
return err;
}
+ if ((np && of_property_read_bool(np, "wakeup-source")) ||
+ (pdata && pdata->wakeup_source))
+ device_init_wakeup(dev, true);
+
return 0;
}
EXPORT_SYMBOL(st_lsm6dsx_probe);
@@ -1556,6 +2281,13 @@ static int __maybe_unused st_lsm6dsx_suspend(struct device *dev)
if (!(hw->enable_mask & BIT(sensor->id)))
continue;
+ if (device_may_wakeup(dev) &&
+ sensor->id == ST_LSM6DSX_ID_ACC && hw->enable_event) {
+ /* Enable wake from IRQ */
+ enable_irq_wake(hw->irq);
+ continue;
+ }
+
if (sensor->id == ST_LSM6DSX_ID_EXT0 ||
sensor->id == ST_LSM6DSX_ID_EXT1 ||
sensor->id == ST_LSM6DSX_ID_EXT2)
@@ -1585,6 +2317,10 @@ static int __maybe_unused st_lsm6dsx_resume(struct device *dev)
continue;
sensor = iio_priv(hw->iio_devs[i]);
+ if (device_may_wakeup(dev) &&
+ sensor->id == ST_LSM6DSX_ID_ACC && hw->enable_event)
+ disable_irq_wake(hw->irq);
+
if (!(hw->suspend_mask & BIT(sensor->id)))
continue;
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c
index f52511059545..cd47ec1fedcb 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c
@@ -87,6 +87,14 @@ static const struct of_device_id st_lsm6dsx_i2c_of_match[] = {
.compatible = "st,lsm9ds1-imu",
.data = (void *)ST_LSM9DS1_ID,
},
+ {
+ .compatible = "st,lsm6ds0",
+ .data = (void *)ST_LSM6DS0_ID,
+ },
+ {
+ .compatible = "st,lsm6dsrx",
+ .data = (void *)ST_LSM6DSRX_ID,
+ },
{},
};
MODULE_DEVICE_TABLE(of, st_lsm6dsx_i2c_of_match);
@@ -104,6 +112,8 @@ static const struct i2c_device_id st_lsm6dsx_i2c_id_table[] = {
{ ST_LSM6DS3TRC_DEV_NAME, ST_LSM6DS3TRC_ID },
{ ST_ISM330DHCX_DEV_NAME, ST_ISM330DHCX_ID },
{ ST_LSM9DS1_DEV_NAME, ST_LSM9DS1_ID },
+ { ST_LSM6DS0_DEV_NAME, ST_LSM6DS0_ID },
+ { ST_LSM6DSRX_DEV_NAME, ST_LSM6DSRX_ID },
{},
};
MODULE_DEVICE_TABLE(i2c, st_lsm6dsx_i2c_id_table);
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c
index ea472cf6db7b..fa5d1001a46c 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c
@@ -51,10 +51,11 @@ static const struct st_lsm6dsx_ext_dev_settings st_lsm6dsx_ext_dev_table[] = {
.addr = 0x60,
.mask = GENMASK(3, 2),
},
- .odr_avl[0] = { 10, 0x0 },
- .odr_avl[1] = { 20, 0x1 },
- .odr_avl[2] = { 50, 0x2 },
- .odr_avl[3] = { 100, 0x3 },
+ .odr_avl[0] = { 10000, 0x0 },
+ .odr_avl[1] = { 20000, 0x1 },
+ .odr_avl[2] = { 50000, 0x2 },
+ .odr_avl[3] = { 100000, 0x3 },
+ .odr_len = 4,
},
.fs_table = {
.fs_avl[0] = {
@@ -93,11 +94,11 @@ static const struct st_lsm6dsx_ext_dev_settings st_lsm6dsx_ext_dev_table[] = {
static void st_lsm6dsx_shub_wait_complete(struct st_lsm6dsx_hw *hw)
{
struct st_lsm6dsx_sensor *sensor;
- u16 odr;
+ u32 odr;
sensor = iio_priv(hw->iio_devs[ST_LSM6DSX_ID_ACC]);
- odr = (hw->enable_mask & BIT(ST_LSM6DSX_ID_ACC)) ? sensor->odr : 13;
- msleep((2000U / odr) + 1);
+ odr = (hw->enable_mask & BIT(ST_LSM6DSX_ID_ACC)) ? sensor->odr : 12500;
+ msleep((2000000U / odr) + 1);
}
/**
@@ -317,17 +318,18 @@ st_lsm6dsx_shub_write_with_mask(struct st_lsm6dsx_sensor *sensor,
static int
st_lsm6dsx_shub_get_odr_val(struct st_lsm6dsx_sensor *sensor,
- u16 odr, u16 *val)
+ u32 odr, u16 *val)
{
const struct st_lsm6dsx_ext_dev_settings *settings;
int i;
settings = sensor->ext_info.settings;
- for (i = 0; i < ST_LSM6DSX_ODR_LIST_SIZE; i++)
- if (settings->odr_table.odr_avl[i].hz == odr)
+ for (i = 0; i < settings->odr_table.odr_len; i++) {
+ if (settings->odr_table.odr_avl[i].milli_hz == odr)
break;
+ }
- if (i == ST_LSM6DSX_ODR_LIST_SIZE)
+ if (i == settings->odr_table.odr_len)
return -EINVAL;
*val = settings->odr_table.odr_avl[i].val;
@@ -335,7 +337,7 @@ st_lsm6dsx_shub_get_odr_val(struct st_lsm6dsx_sensor *sensor,
}
static int
-st_lsm6dsx_shub_set_odr(struct st_lsm6dsx_sensor *sensor, u16 odr)
+st_lsm6dsx_shub_set_odr(struct st_lsm6dsx_sensor *sensor, u32 odr)
{
const struct st_lsm6dsx_ext_dev_settings *settings;
u16 val;
@@ -440,7 +442,7 @@ st_lsm6dsx_shub_read_oneshot(struct st_lsm6dsx_sensor *sensor,
if (err < 0)
return err;
- delay = 1000000 / sensor->odr;
+ delay = 1000000000 / sensor->odr;
usleep_range(delay, 2 * delay);
len = min_t(int, sizeof(data), ch->scan_type.realbits >> 3);
@@ -480,8 +482,9 @@ st_lsm6dsx_shub_read_raw(struct iio_dev *iio_dev,
iio_device_release_direct_mode(iio_dev);
break;
case IIO_CHAN_INFO_SAMP_FREQ:
- *val = sensor->odr;
- ret = IIO_VAL_INT;
+ *val = sensor->odr / 1000;
+ *val2 = (sensor->odr % 1000) * 1000;
+ ret = IIO_VAL_INT_PLUS_MICRO;
break;
case IIO_CHAN_INFO_SCALE:
*val = 0;
@@ -512,6 +515,7 @@ st_lsm6dsx_shub_write_raw(struct iio_dev *iio_dev,
case IIO_CHAN_INFO_SAMP_FREQ: {
u16 data;
+ val = val * 1000 + val2 / 1000;
err = st_lsm6dsx_shub_get_odr_val(sensor, val, &data);
if (!err)
sensor->odr = val;
@@ -537,12 +541,11 @@ st_lsm6dsx_shub_sampling_freq_avail(struct device *dev,
int i, len = 0;
settings = sensor->ext_info.settings;
- for (i = 0; i < ST_LSM6DSX_ODR_LIST_SIZE; i++) {
- u16 val = settings->odr_table.odr_avl[i].hz;
+ for (i = 0; i < settings->odr_table.odr_len; i++) {
+ u32 val = settings->odr_table.odr_avl[i].milli_hz;
- if (val > 0)
- len += scnprintf(buf + len, PAGE_SIZE - len, "%d ",
- val);
+ len += scnprintf(buf + len, PAGE_SIZE - len, "%d.%03d ",
+ val / 1000, val % 1000);
}
buf[len - 1] = '\n';
@@ -607,7 +610,7 @@ st_lsm6dsx_shub_alloc_iiodev(struct st_lsm6dsx_hw *hw,
sensor = iio_priv(iio_dev);
sensor->id = id;
sensor->hw = hw;
- sensor->odr = info->odr_table.odr_avl[0].hz;
+ sensor->odr = info->odr_table.odr_avl[0].milli_hz;
sensor->gain = info->fs_table.fs_avl[0].gain;
sensor->ext_info.settings = info;
sensor->ext_info.addr = i2c_addr;
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_spi.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_spi.c
index 344b28dddebb..67ff36eac247 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_spi.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_spi.c
@@ -87,6 +87,14 @@ static const struct of_device_id st_lsm6dsx_spi_of_match[] = {
.compatible = "st,lsm9ds1-imu",
.data = (void *)ST_LSM9DS1_ID,
},
+ {
+ .compatible = "st,lsm6ds0",
+ .data = (void *)ST_LSM6DS0_ID,
+ },
+ {
+ .compatible = "st,lsm6dsrx",
+ .data = (void *)ST_LSM6DSRX_ID,
+ },
{},
};
MODULE_DEVICE_TABLE(of, st_lsm6dsx_spi_of_match);
@@ -104,6 +112,8 @@ static const struct spi_device_id st_lsm6dsx_spi_id_table[] = {
{ ST_LSM6DS3TRC_DEV_NAME, ST_LSM6DS3TRC_ID },
{ ST_ISM330DHCX_DEV_NAME, ST_ISM330DHCX_ID },
{ ST_LSM9DS1_DEV_NAME, ST_LSM9DS1_ID },
+ { ST_LSM6DS0_DEV_NAME, ST_LSM6DS0_ID },
+ { ST_LSM6DSRX_DEV_NAME, ST_LSM6DSRX_ID },
{},
};
MODULE_DEVICE_TABLE(spi, st_lsm6dsx_spi_id_table);
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index 524a686077ca..a46cdf2d8833 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -1238,6 +1238,16 @@ static ssize_t iio_show_dev_name(struct device *dev,
static DEVICE_ATTR(name, S_IRUGO, iio_show_dev_name, NULL);
+static ssize_t iio_show_dev_label(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ return snprintf(buf, PAGE_SIZE, "%s\n", indio_dev->label);
+}
+
+static DEVICE_ATTR(label, S_IRUGO, iio_show_dev_label, NULL);
+
static ssize_t iio_show_timestamp_clock(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -1354,6 +1364,8 @@ static int iio_device_register_sysfs(struct iio_dev *indio_dev)
if (indio_dev->name)
attrcount++;
+ if (indio_dev->label)
+ attrcount++;
if (clk)
attrcount++;
@@ -1376,6 +1388,8 @@ static int iio_device_register_sysfs(struct iio_dev *indio_dev)
indio_dev->chan_attr_group.attrs[attrn++] = &p->dev_attr.attr;
if (indio_dev->name)
indio_dev->chan_attr_group.attrs[attrn++] = &dev_attr_name.attr;
+ if (indio_dev->label)
+ indio_dev->chan_attr_group.attrs[attrn++] = &dev_attr_label.attr;
if (clk)
indio_dev->chan_attr_group.attrs[attrn++] = clk;
@@ -1610,7 +1624,7 @@ static const struct file_operations iio_buffer_fileops = {
.owner = THIS_MODULE,
.llseek = noop_llseek,
.unlocked_ioctl = iio_ioctl,
- .compat_ioctl = iio_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
};
static int iio_check_unique_scan_index(struct iio_dev *indio_dev)
@@ -1647,6 +1661,9 @@ int __iio_device_register(struct iio_dev *indio_dev, struct module *this_mod)
if (!indio_dev->dev.of_node && indio_dev->dev.parent)
indio_dev->dev.of_node = indio_dev->dev.parent->of_node;
+ indio_dev->label = of_get_property(indio_dev->dev.of_node, "label",
+ NULL);
+
ret = iio_check_unique_scan_index(indio_dev);
if (ret < 0)
return ret;
diff --git a/drivers/iio/light/Kconfig b/drivers/iio/light/Kconfig
index 4a1a883dc061..9968f982fbc7 100644
--- a/drivers/iio/light/Kconfig
+++ b/drivers/iio/light/Kconfig
@@ -32,6 +32,17 @@ config ADJD_S311
This driver can also be built as a module. If so, the module
will be called adjd_s311.
+config ADUX1020
+ tristate "ADUX1020 photometric sensor"
+ select REGMAP_I2C
+ depends on I2C
+ help
+ Say Y here if you want to build a driver for the Analog Devices
+ ADUX1020 photometric sensor.
+
+ To compile this driver as a module, choose M here: the
+ module will be called adux1020.
+
config AL3320A
tristate "AL3320A ambient light sensor"
depends on I2C
@@ -496,6 +507,17 @@ config VCNL4035
To compile this driver as a module, choose M here: the
module will be called vcnl4035.
+config VEML6030
+ tristate "VEML6030 ambient light sensor"
+ select REGMAP_I2C
+ depends on I2C
+ help
+ Say Y here if you want to build a driver for the Vishay VEML6030
+ ambient light sensor (ALS).
+
+ To compile this driver as a module, choose M here: the
+ module will be called veml6030.
+
config VEML6070
tristate "VEML6070 UV A light sensor"
depends on I2C
diff --git a/drivers/iio/light/Makefile b/drivers/iio/light/Makefile
index 00d1f9b98f39..c98d1cefb861 100644
--- a/drivers/iio/light/Makefile
+++ b/drivers/iio/light/Makefile
@@ -6,6 +6,7 @@
# When adding new entries keep the list in alphabetical order
obj-$(CONFIG_ACPI_ALS) += acpi-als.o
obj-$(CONFIG_ADJD_S311) += adjd_s311.o
+obj-$(CONFIG_ADUX1020) += adux1020.o
obj-$(CONFIG_AL3320A) += al3320a.o
obj-$(CONFIG_APDS9300) += apds9300.o
obj-$(CONFIG_APDS9960) += apds9960.o
@@ -48,6 +49,7 @@ obj-$(CONFIG_TSL4531) += tsl4531.o
obj-$(CONFIG_US5182D) += us5182d.o
obj-$(CONFIG_VCNL4000) += vcnl4000.o
obj-$(CONFIG_VCNL4035) += vcnl4035.o
+obj-$(CONFIG_VEML6030) += veml6030.o
obj-$(CONFIG_VEML6070) += veml6070.o
obj-$(CONFIG_VL6180) += vl6180.o
obj-$(CONFIG_ZOPT2201) += zopt2201.o
diff --git a/drivers/iio/light/adux1020.c b/drivers/iio/light/adux1020.c
new file mode 100644
index 000000000000..b07797ac10d7
--- /dev/null
+++ b/drivers/iio/light/adux1020.c
@@ -0,0 +1,849 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * adux1020.c - Support for Analog Devices ADUX1020 photometric sensor
+ *
+ * Copyright (C) 2019 Linaro Ltd.
+ * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+ *
+ * TODO: Triggered buffer support
+ */
+
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/regmap.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/events.h>
+
+#define ADUX1020_REGMAP_NAME "adux1020_regmap"
+#define ADUX1020_DRV_NAME "adux1020"
+
+/* System registers */
+#define ADUX1020_REG_CHIP_ID 0x08
+#define ADUX1020_REG_SLAVE_ADDRESS 0x09
+
+#define ADUX1020_REG_SW_RESET 0x0f
+#define ADUX1020_REG_INT_ENABLE 0x1c
+#define ADUX1020_REG_INT_POLARITY 0x1d
+#define ADUX1020_REG_PROX_TH_ON1 0x2a
+#define ADUX1020_REG_PROX_TH_OFF1 0x2b
+#define ADUX1020_REG_PROX_TYPE 0x2f
+#define ADUX1020_REG_TEST_MODES_3 0x32
+#define ADUX1020_REG_FORCE_MODE 0x33
+#define ADUX1020_REG_FREQUENCY 0x40
+#define ADUX1020_REG_LED_CURRENT 0x41
+#define ADUX1020_REG_OP_MODE 0x45
+#define ADUX1020_REG_INT_MASK 0x48
+#define ADUX1020_REG_INT_STATUS 0x49
+#define ADUX1020_REG_DATA_BUFFER 0x60
+
+/* Chip ID bits */
+#define ADUX1020_CHIP_ID_MASK GENMASK(11, 0)
+#define ADUX1020_CHIP_ID 0x03fc
+
+#define ADUX1020_SW_RESET BIT(1)
+#define ADUX1020_FIFO_FLUSH BIT(15)
+#define ADUX1020_OP_MODE_MASK GENMASK(3, 0)
+#define ADUX1020_DATA_OUT_MODE_MASK GENMASK(7, 4)
+#define ADUX1020_DATA_OUT_PROX_I FIELD_PREP(ADUX1020_DATA_OUT_MODE_MASK, 1)
+
+#define ADUX1020_MODE_INT_MASK GENMASK(7, 0)
+#define ADUX1020_INT_ENABLE 0x2094
+#define ADUX1020_INT_DISABLE 0x2090
+#define ADUX1020_PROX_INT_ENABLE 0x00f0
+#define ADUX1020_PROX_ON1_INT BIT(0)
+#define ADUX1020_PROX_OFF1_INT BIT(1)
+#define ADUX1020_FIFO_INT_ENABLE 0x7f
+#define ADUX1020_MODE_INT_DISABLE 0xff
+#define ADUX1020_MODE_INT_STATUS_MASK GENMASK(7, 0)
+#define ADUX1020_FIFO_STATUS_MASK GENMASK(15, 8)
+#define ADUX1020_INT_CLEAR 0xff
+#define ADUX1020_PROX_TYPE BIT(15)
+
+#define ADUX1020_INT_PROX_ON1 BIT(0)
+#define ADUX1020_INT_PROX_OFF1 BIT(1)
+
+#define ADUX1020_FORCE_CLOCK_ON 0x0f4f
+#define ADUX1020_FORCE_CLOCK_RESET 0x0040
+#define ADUX1020_ACTIVE_4_STATE 0x0008
+
+#define ADUX1020_PROX_FREQ_MASK GENMASK(7, 4)
+#define ADUX1020_PROX_FREQ(x) FIELD_PREP(ADUX1020_PROX_FREQ_MASK, x)
+
+#define ADUX1020_LED_CURRENT_MASK GENMASK(3, 0)
+#define ADUX1020_LED_PIREF_EN BIT(12)
+
+/* Operating modes */
+enum adux1020_op_modes {
+ ADUX1020_MODE_STANDBY,
+ ADUX1020_MODE_PROX_I,
+ ADUX1020_MODE_PROX_XY,
+ ADUX1020_MODE_GEST,
+ ADUX1020_MODE_SAMPLE,
+ ADUX1020_MODE_FORCE = 0x0e,
+ ADUX1020_MODE_IDLE = 0x0f,
+};
+
+struct adux1020_data {
+ struct i2c_client *client;
+ struct iio_dev *indio_dev;
+ struct mutex lock;
+ struct regmap *regmap;
+};
+
+struct adux1020_mode_data {
+ u8 bytes;
+ u8 buf_len;
+ u16 int_en;
+};
+
+static const struct adux1020_mode_data adux1020_modes[] = {
+ [ADUX1020_MODE_PROX_I] = {
+ .bytes = 2,
+ .buf_len = 1,
+ .int_en = ADUX1020_PROX_INT_ENABLE,
+ },
+};
+
+static const struct regmap_config adux1020_regmap_config = {
+ .name = ADUX1020_REGMAP_NAME,
+ .reg_bits = 8,
+ .val_bits = 16,
+ .max_register = 0x6F,
+ .cache_type = REGCACHE_NONE,
+};
+
+static const struct reg_sequence adux1020_def_conf[] = {
+ { 0x000c, 0x000f },
+ { 0x0010, 0x1010 },
+ { 0x0011, 0x004c },
+ { 0x0012, 0x5f0c },
+ { 0x0013, 0xada5 },
+ { 0x0014, 0x0080 },
+ { 0x0015, 0x0000 },
+ { 0x0016, 0x0600 },
+ { 0x0017, 0x0000 },
+ { 0x0018, 0x2693 },
+ { 0x0019, 0x0004 },
+ { 0x001a, 0x4280 },
+ { 0x001b, 0x0060 },
+ { 0x001c, 0x2094 },
+ { 0x001d, 0x0020 },
+ { 0x001e, 0x0001 },
+ { 0x001f, 0x0100 },
+ { 0x0020, 0x0320 },
+ { 0x0021, 0x0A13 },
+ { 0x0022, 0x0320 },
+ { 0x0023, 0x0113 },
+ { 0x0024, 0x0000 },
+ { 0x0025, 0x2412 },
+ { 0x0026, 0x2412 },
+ { 0x0027, 0x0022 },
+ { 0x0028, 0x0000 },
+ { 0x0029, 0x0300 },
+ { 0x002a, 0x0700 },
+ { 0x002b, 0x0600 },
+ { 0x002c, 0x6000 },
+ { 0x002d, 0x4000 },
+ { 0x002e, 0x0000 },
+ { 0x002f, 0x0000 },
+ { 0x0030, 0x0000 },
+ { 0x0031, 0x0000 },
+ { 0x0032, 0x0040 },
+ { 0x0033, 0x0008 },
+ { 0x0034, 0xE400 },
+ { 0x0038, 0x8080 },
+ { 0x0039, 0x8080 },
+ { 0x003a, 0x2000 },
+ { 0x003b, 0x1f00 },
+ { 0x003c, 0x2000 },
+ { 0x003d, 0x2000 },
+ { 0x003e, 0x0000 },
+ { 0x0040, 0x8069 },
+ { 0x0041, 0x1f2f },
+ { 0x0042, 0x4000 },
+ { 0x0043, 0x0000 },
+ { 0x0044, 0x0008 },
+ { 0x0046, 0x0000 },
+ { 0x0048, 0x00ef },
+ { 0x0049, 0x0000 },
+ { 0x0045, 0x0000 },
+};
+
+static const int adux1020_rates[][2] = {
+ { 0, 100000 },
+ { 0, 200000 },
+ { 0, 500000 },
+ { 1, 0 },
+ { 2, 0 },
+ { 5, 0 },
+ { 10, 0 },
+ { 20, 0 },
+ { 50, 0 },
+ { 100, 0 },
+ { 190, 0 },
+ { 450, 0 },
+ { 820, 0 },
+ { 1400, 0 },
+};
+
+static const int adux1020_led_currents[][2] = {
+ { 0, 25000 },
+ { 0, 40000 },
+ { 0, 55000 },
+ { 0, 70000 },
+ { 0, 85000 },
+ { 0, 100000 },
+ { 0, 115000 },
+ { 0, 130000 },
+ { 0, 145000 },
+ { 0, 160000 },
+ { 0, 175000 },
+ { 0, 190000 },
+ { 0, 205000 },
+ { 0, 220000 },
+ { 0, 235000 },
+ { 0, 250000 },
+};
+
+static int adux1020_flush_fifo(struct adux1020_data *data)
+{
+ int ret;
+
+ /* Force Idle mode */
+ ret = regmap_write(data->regmap, ADUX1020_REG_FORCE_MODE,
+ ADUX1020_ACTIVE_4_STATE);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_update_bits(data->regmap, ADUX1020_REG_OP_MODE,
+ ADUX1020_OP_MODE_MASK, ADUX1020_MODE_FORCE);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_update_bits(data->regmap, ADUX1020_REG_OP_MODE,
+ ADUX1020_OP_MODE_MASK, ADUX1020_MODE_IDLE);
+ if (ret < 0)
+ return ret;
+
+ /* Flush FIFO */
+ ret = regmap_write(data->regmap, ADUX1020_REG_TEST_MODES_3,
+ ADUX1020_FORCE_CLOCK_ON);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_write(data->regmap, ADUX1020_REG_INT_STATUS,
+ ADUX1020_FIFO_FLUSH);
+ if (ret < 0)
+ return ret;
+
+ return regmap_write(data->regmap, ADUX1020_REG_TEST_MODES_3,
+ ADUX1020_FORCE_CLOCK_RESET);
+}
+
+static int adux1020_read_fifo(struct adux1020_data *data, u16 *buf, u8 buf_len)
+{
+ unsigned int regval;
+ int i, ret;
+
+ /* Enable 32MHz clock */
+ ret = regmap_write(data->regmap, ADUX1020_REG_TEST_MODES_3,
+ ADUX1020_FORCE_CLOCK_ON);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < buf_len; i++) {
+ ret = regmap_read(data->regmap, ADUX1020_REG_DATA_BUFFER,
+ &regval);
+ if (ret < 0)
+ return ret;
+
+ buf[i] = regval;
+ }
+
+ /* Set 32MHz clock to be controlled by internal state machine */
+ return regmap_write(data->regmap, ADUX1020_REG_TEST_MODES_3,
+ ADUX1020_FORCE_CLOCK_RESET);
+}
+
+static int adux1020_set_mode(struct adux1020_data *data,
+ enum adux1020_op_modes mode)
+{
+ int ret;
+
+ /* Switch to standby mode before changing the mode */
+ ret = regmap_write(data->regmap, ADUX1020_REG_OP_MODE,
+ ADUX1020_MODE_STANDBY);
+ if (ret < 0)
+ return ret;
+
+ /* Set data out and switch to the desired mode */
+ switch (mode) {
+ case ADUX1020_MODE_PROX_I:
+ ret = regmap_update_bits(data->regmap, ADUX1020_REG_OP_MODE,
+ ADUX1020_DATA_OUT_MODE_MASK,
+ ADUX1020_DATA_OUT_PROX_I);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_update_bits(data->regmap, ADUX1020_REG_OP_MODE,
+ ADUX1020_OP_MODE_MASK,
+ ADUX1020_MODE_PROX_I);
+ if (ret < 0)
+ return ret;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int adux1020_measure(struct adux1020_data *data,
+ enum adux1020_op_modes mode,
+ u16 *val)
+{
+ unsigned int status;
+ int ret, tries = 50;
+
+ /* Disable INT pin as polling is going to be used */
+ ret = regmap_write(data->regmap, ADUX1020_REG_INT_ENABLE,
+ ADUX1020_INT_DISABLE);
+ if (ret < 0)
+ return ret;
+
+ /* Enable mode interrupt */
+ ret = regmap_update_bits(data->regmap, ADUX1020_REG_INT_MASK,
+ ADUX1020_MODE_INT_MASK,
+ adux1020_modes[mode].int_en);
+ if (ret < 0)
+ return ret;
+
+ while (tries--) {
+ ret = regmap_read(data->regmap, ADUX1020_REG_INT_STATUS,
+ &status);
+ if (ret < 0)
+ return ret;
+
+ status &= ADUX1020_FIFO_STATUS_MASK;
+ if (status >= adux1020_modes[mode].bytes)
+ break;
+ msleep(20);
+ }
+
+ if (tries < 0)
+ return -EIO;
+
+ ret = adux1020_read_fifo(data, val, adux1020_modes[mode].buf_len);
+ if (ret < 0)
+ return ret;
+
+ /* Clear mode interrupt */
+ ret = regmap_write(data->regmap, ADUX1020_REG_INT_STATUS,
+ (~adux1020_modes[mode].int_en));
+ if (ret < 0)
+ return ret;
+
+ /* Disable mode interrupts */
+ return regmap_update_bits(data->regmap, ADUX1020_REG_INT_MASK,
+ ADUX1020_MODE_INT_MASK,
+ ADUX1020_MODE_INT_DISABLE);
+}
+
+static int adux1020_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct adux1020_data *data = iio_priv(indio_dev);
+ u16 buf[3];
+ int ret = -EINVAL;
+ unsigned int regval;
+
+ mutex_lock(&data->lock);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ switch (chan->type) {
+ case IIO_PROXIMITY:
+ ret = adux1020_set_mode(data, ADUX1020_MODE_PROX_I);
+ if (ret < 0)
+ goto fail;
+
+ ret = adux1020_measure(data, ADUX1020_MODE_PROX_I, buf);
+ if (ret < 0)
+ goto fail;
+
+ *val = buf[0];
+ ret = IIO_VAL_INT;
+ break;
+ default:
+ break;
+ }
+ break;
+ case IIO_CHAN_INFO_PROCESSED:
+ switch (chan->type) {
+ case IIO_CURRENT:
+ ret = regmap_read(data->regmap,
+ ADUX1020_REG_LED_CURRENT, &regval);
+ if (ret < 0)
+ goto fail;
+
+ regval = regval & ADUX1020_LED_CURRENT_MASK;
+
+ *val = adux1020_led_currents[regval][0];
+ *val2 = adux1020_led_currents[regval][1];
+
+ ret = IIO_VAL_INT_PLUS_MICRO;
+ break;
+ default:
+ break;
+ }
+ break;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ switch (chan->type) {
+ case IIO_PROXIMITY:
+ ret = regmap_read(data->regmap, ADUX1020_REG_FREQUENCY,
+ &regval);
+ if (ret < 0)
+ goto fail;
+
+ regval = FIELD_GET(ADUX1020_PROX_FREQ_MASK, regval);
+
+ *val = adux1020_rates[regval][0];
+ *val2 = adux1020_rates[regval][1];
+
+ ret = IIO_VAL_INT_PLUS_MICRO;
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+fail:
+ mutex_unlock(&data->lock);
+
+ return ret;
+};
+
+static inline int adux1020_find_index(const int array[][2], int count, int val,
+ int val2)
+{
+ int i;
+
+ for (i = 0; i < count; i++)
+ if (val == array[i][0] && val2 == array[i][1])
+ return i;
+
+ return -EINVAL;
+}
+
+static int adux1020_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct adux1020_data *data = iio_priv(indio_dev);
+ int i, ret = -EINVAL;
+
+ mutex_lock(&data->lock);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ if (chan->type == IIO_PROXIMITY) {
+ i = adux1020_find_index(adux1020_rates,
+ ARRAY_SIZE(adux1020_rates),
+ val, val2);
+ if (i < 0) {
+ ret = i;
+ goto fail;
+ }
+
+ ret = regmap_update_bits(data->regmap,
+ ADUX1020_REG_FREQUENCY,
+ ADUX1020_PROX_FREQ_MASK,
+ ADUX1020_PROX_FREQ(i));
+ }
+ break;
+ case IIO_CHAN_INFO_PROCESSED:
+ if (chan->type == IIO_CURRENT) {
+ i = adux1020_find_index(adux1020_led_currents,
+ ARRAY_SIZE(adux1020_led_currents),
+ val, val2);
+ if (i < 0) {
+ ret = i;
+ goto fail;
+ }
+
+ ret = regmap_update_bits(data->regmap,
+ ADUX1020_REG_LED_CURRENT,
+ ADUX1020_LED_CURRENT_MASK, i);
+ }
+ break;
+ default:
+ break;
+ }
+
+fail:
+ mutex_unlock(&data->lock);
+
+ return ret;
+}
+
+static int adux1020_write_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir, int state)
+{
+ struct adux1020_data *data = iio_priv(indio_dev);
+ int ret, mask;
+
+ mutex_lock(&data->lock);
+
+ ret = regmap_write(data->regmap, ADUX1020_REG_INT_ENABLE,
+ ADUX1020_INT_ENABLE);
+ if (ret < 0)
+ goto fail;
+
+ ret = regmap_write(data->regmap, ADUX1020_REG_INT_POLARITY, 0);
+ if (ret < 0)
+ goto fail;
+
+ switch (chan->type) {
+ case IIO_PROXIMITY:
+ if (dir == IIO_EV_DIR_RISING)
+ mask = ADUX1020_PROX_ON1_INT;
+ else
+ mask = ADUX1020_PROX_OFF1_INT;
+
+ if (state)
+ state = 0;
+ else
+ state = mask;
+
+ ret = regmap_update_bits(data->regmap, ADUX1020_REG_INT_MASK,
+ mask, state);
+ if (ret < 0)
+ goto fail;
+
+ /*
+ * Trigger proximity interrupt when the intensity is above
+ * or below threshold
+ */
+ ret = regmap_update_bits(data->regmap, ADUX1020_REG_PROX_TYPE,
+ ADUX1020_PROX_TYPE,
+ ADUX1020_PROX_TYPE);
+ if (ret < 0)
+ goto fail;
+
+ /* Set proximity mode */
+ ret = adux1020_set_mode(data, ADUX1020_MODE_PROX_I);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+fail:
+ mutex_unlock(&data->lock);
+
+ return ret;
+}
+
+static int adux1020_read_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir)
+{
+ struct adux1020_data *data = iio_priv(indio_dev);
+ int ret, mask;
+ unsigned int regval;
+
+ switch (chan->type) {
+ case IIO_PROXIMITY:
+ if (dir == IIO_EV_DIR_RISING)
+ mask = ADUX1020_PROX_ON1_INT;
+ else
+ mask = ADUX1020_PROX_OFF1_INT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = regmap_read(data->regmap, ADUX1020_REG_INT_MASK, &regval);
+ if (ret < 0)
+ return ret;
+
+ return !(regval & mask);
+}
+
+static int adux1020_read_thresh(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info, int *val, int *val2)
+{
+ struct adux1020_data *data = iio_priv(indio_dev);
+ u8 reg;
+ int ret;
+ unsigned int regval;
+
+ switch (chan->type) {
+ case IIO_PROXIMITY:
+ if (dir == IIO_EV_DIR_RISING)
+ reg = ADUX1020_REG_PROX_TH_ON1;
+ else
+ reg = ADUX1020_REG_PROX_TH_OFF1;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = regmap_read(data->regmap, reg, &regval);
+ if (ret < 0)
+ return ret;
+
+ *val = regval;
+
+ return IIO_VAL_INT;
+}
+
+static int adux1020_write_thresh(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info, int val, int val2)
+{
+ struct adux1020_data *data = iio_priv(indio_dev);
+ u8 reg;
+
+ switch (chan->type) {
+ case IIO_PROXIMITY:
+ if (dir == IIO_EV_DIR_RISING)
+ reg = ADUX1020_REG_PROX_TH_ON1;
+ else
+ reg = ADUX1020_REG_PROX_TH_OFF1;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Full scale threshold value is 0-65535 */
+ if (val < 0 || val > 65535)
+ return -EINVAL;
+
+ return regmap_write(data->regmap, reg, val);
+}
+
+static const struct iio_event_spec adux1020_proximity_event[] = {
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_RISING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_ENABLE),
+ },
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_FALLING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_ENABLE),
+ },
+};
+
+static const struct iio_chan_spec adux1020_channels[] = {
+ {
+ .type = IIO_PROXIMITY,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SAMP_FREQ),
+ .event_spec = adux1020_proximity_event,
+ .num_event_specs = ARRAY_SIZE(adux1020_proximity_event),
+ },
+ {
+ .type = IIO_CURRENT,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
+ .extend_name = "led",
+ .output = 1,
+ },
+};
+
+static IIO_CONST_ATTR_SAMP_FREQ_AVAIL(
+ "0.1 0.2 0.5 1 2 5 10 20 50 100 190 450 820 1400");
+
+static struct attribute *adux1020_attributes[] = {
+ &iio_const_attr_sampling_frequency_available.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group adux1020_attribute_group = {
+ .attrs = adux1020_attributes,
+};
+
+static const struct iio_info adux1020_info = {
+ .attrs = &adux1020_attribute_group,
+ .read_raw = adux1020_read_raw,
+ .write_raw = adux1020_write_raw,
+ .read_event_config = adux1020_read_event_config,
+ .write_event_config = adux1020_write_event_config,
+ .read_event_value = adux1020_read_thresh,
+ .write_event_value = adux1020_write_thresh,
+};
+
+static irqreturn_t adux1020_interrupt_handler(int irq, void *private)
+{
+ struct iio_dev *indio_dev = private;
+ struct adux1020_data *data = iio_priv(indio_dev);
+ int ret, status;
+
+ ret = regmap_read(data->regmap, ADUX1020_REG_INT_STATUS, &status);
+ if (ret < 0)
+ return IRQ_HANDLED;
+
+ status &= ADUX1020_MODE_INT_STATUS_MASK;
+
+ if (status & ADUX1020_INT_PROX_ON1) {
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(IIO_PROXIMITY, 0,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_RISING),
+ iio_get_time_ns(indio_dev));
+ }
+
+ if (status & ADUX1020_INT_PROX_OFF1) {
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(IIO_PROXIMITY, 0,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_FALLING),
+ iio_get_time_ns(indio_dev));
+ }
+
+ regmap_update_bits(data->regmap, ADUX1020_REG_INT_STATUS,
+ ADUX1020_MODE_INT_MASK, ADUX1020_INT_CLEAR);
+
+ return IRQ_HANDLED;
+}
+
+static int adux1020_chip_init(struct adux1020_data *data)
+{
+ struct i2c_client *client = data->client;
+ int ret;
+ unsigned int val;
+
+ ret = regmap_read(data->regmap, ADUX1020_REG_CHIP_ID, &val);
+ if (ret < 0)
+ return ret;
+
+ if ((val & ADUX1020_CHIP_ID_MASK) != ADUX1020_CHIP_ID) {
+ dev_err(&client->dev, "invalid chip id 0x%04x\n", val);
+ return -ENODEV;
+ }
+
+ dev_dbg(&client->dev, "Detected ADUX1020 with chip id: 0x%04x\n", val);
+
+ ret = regmap_update_bits(data->regmap, ADUX1020_REG_SW_RESET,
+ ADUX1020_SW_RESET, ADUX1020_SW_RESET);
+ if (ret < 0)
+ return ret;
+
+ /* Load default configuration */
+ ret = regmap_multi_reg_write(data->regmap, adux1020_def_conf,
+ ARRAY_SIZE(adux1020_def_conf));
+ if (ret < 0)
+ return ret;
+
+ ret = adux1020_flush_fifo(data);
+ if (ret < 0)
+ return ret;
+
+ /* Use LED_IREF for proximity mode */
+ ret = regmap_update_bits(data->regmap, ADUX1020_REG_LED_CURRENT,
+ ADUX1020_LED_PIREF_EN, 0);
+ if (ret < 0)
+ return ret;
+
+ /* Mask all interrupts */
+ return regmap_update_bits(data->regmap, ADUX1020_REG_INT_MASK,
+ ADUX1020_MODE_INT_MASK, ADUX1020_MODE_INT_DISABLE);
+}
+
+static int adux1020_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct adux1020_data *data;
+ struct iio_dev *indio_dev;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->info = &adux1020_info;
+ indio_dev->name = ADUX1020_DRV_NAME;
+ indio_dev->channels = adux1020_channels;
+ indio_dev->num_channels = ARRAY_SIZE(adux1020_channels);
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ data = iio_priv(indio_dev);
+
+ data->regmap = devm_regmap_init_i2c(client, &adux1020_regmap_config);
+ if (IS_ERR(data->regmap)) {
+ dev_err(&client->dev, "regmap initialization failed.\n");
+ return PTR_ERR(data->regmap);
+ }
+
+ data->client = client;
+ data->indio_dev = indio_dev;
+ mutex_init(&data->lock);
+
+ ret = adux1020_chip_init(data);
+ if (ret)
+ return ret;
+
+ if (client->irq) {
+ ret = devm_request_threaded_irq(&client->dev, client->irq,
+ NULL, adux1020_interrupt_handler,
+ IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+ ADUX1020_DRV_NAME, indio_dev);
+ if (ret) {
+ dev_err(&client->dev, "irq request error %d\n", -ret);
+ return ret;
+ }
+ }
+
+ return devm_iio_device_register(&client->dev, indio_dev);
+}
+
+static const struct i2c_device_id adux1020_id[] = {
+ { "adux1020", 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, adux1020_id);
+
+static const struct of_device_id adux1020_of_match[] = {
+ { .compatible = "adi,adux1020" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, adux1020_of_match);
+
+static struct i2c_driver adux1020_driver = {
+ .driver = {
+ .name = ADUX1020_DRV_NAME,
+ .of_match_table = adux1020_of_match,
+ },
+ .probe = adux1020_probe,
+ .id_table = adux1020_id,
+};
+module_i2c_driver(adux1020_driver);
+
+MODULE_AUTHOR("Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>");
+MODULE_DESCRIPTION("ADUX1020 photometric sensor");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/light/bh1750.c b/drivers/iio/light/bh1750.c
index 28347df78cff..adb5ab9e3439 100644
--- a/drivers/iio/light/bh1750.c
+++ b/drivers/iio/light/bh1750.c
@@ -59,9 +59,9 @@ struct bh1750_chip_info {
u16 int_time_low_mask;
u16 int_time_high_mask;
-}
+};
-static const bh1750_chip_info_tbl[] = {
+static const struct bh1750_chip_info bh1750_chip_info_tbl[] = {
[BH1710] = { 140, 1022, 300, 400, 250000000, 2, 0x001F, 0x03E0 },
[BH1721] = { 140, 1020, 300, 400, 250000000, 2, 0x0010, 0x03E0 },
[BH1750] = { 31, 254, 69, 1740, 57500000, 1, 0x001F, 0x00E0 },
diff --git a/drivers/iio/light/cm36651.c b/drivers/iio/light/cm36651.c
index 1019d625adb1..90e38fcc974b 100644
--- a/drivers/iio/light/cm36651.c
+++ b/drivers/iio/light/cm36651.c
@@ -532,7 +532,7 @@ static int cm36651_write_prox_event_config(struct iio_dev *indio_dev,
int state)
{
struct cm36651_data *cm36651 = iio_priv(indio_dev);
- int cmd, ret = -EINVAL;
+ int cmd, ret;
mutex_lock(&cm36651->lock);
diff --git a/drivers/iio/light/cros_ec_light_prox.c b/drivers/iio/light/cros_ec_light_prox.c
index c5263b563fc1..d85a391e50c5 100644
--- a/drivers/iio/light/cros_ec_light_prox.c
+++ b/drivers/iio/light/cros_ec_light_prox.c
@@ -169,17 +169,11 @@ static const struct iio_info cros_ec_light_prox_info = {
static int cros_ec_light_prox_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct cros_ec_dev *ec_dev = dev_get_drvdata(dev->parent);
struct iio_dev *indio_dev;
struct cros_ec_light_prox_state *state;
struct iio_chan_spec *channel;
int ret;
- if (!ec_dev || !ec_dev->ec_dev) {
- dev_warn(dev, "No CROS EC device found.\n");
- return -EINVAL;
- }
-
indio_dev = devm_iio_device_alloc(dev, sizeof(*state));
if (!indio_dev)
return -ENOMEM;
diff --git a/drivers/iio/light/tcs3414.c b/drivers/iio/light/tcs3414.c
index 7c0291c5fe76..b542e5619ead 100644
--- a/drivers/iio/light/tcs3414.c
+++ b/drivers/iio/light/tcs3414.c
@@ -240,32 +240,42 @@ static const struct iio_info tcs3414_info = {
.attrs = &tcs3414_attribute_group,
};
-static int tcs3414_buffer_preenable(struct iio_dev *indio_dev)
+static int tcs3414_buffer_postenable(struct iio_dev *indio_dev)
{
struct tcs3414_data *data = iio_priv(indio_dev);
+ int ret;
+
+ ret = iio_triggered_buffer_postenable(indio_dev);
+ if (ret)
+ return ret;
data->control |= TCS3414_CONTROL_ADC_EN;
- return i2c_smbus_write_byte_data(data->client, TCS3414_CONTROL,
+ ret = i2c_smbus_write_byte_data(data->client, TCS3414_CONTROL,
data->control);
+ if (ret)
+ iio_triggered_buffer_predisable(indio_dev);
+
+ return ret;
}
static int tcs3414_buffer_predisable(struct iio_dev *indio_dev)
{
struct tcs3414_data *data = iio_priv(indio_dev);
- int ret;
-
- ret = iio_triggered_buffer_predisable(indio_dev);
- if (ret < 0)
- return ret;
+ int ret, ret2;
data->control &= ~TCS3414_CONTROL_ADC_EN;
- return i2c_smbus_write_byte_data(data->client, TCS3414_CONTROL,
+ ret = i2c_smbus_write_byte_data(data->client, TCS3414_CONTROL,
data->control);
+
+ ret2 = iio_triggered_buffer_predisable(indio_dev);
+ if (!ret)
+ ret = ret2;
+
+ return ret;
}
static const struct iio_buffer_setup_ops tcs3414_buffer_setup_ops = {
- .preenable = tcs3414_buffer_preenable,
- .postenable = &iio_triggered_buffer_postenable,
+ .postenable = tcs3414_buffer_postenable,
.predisable = tcs3414_buffer_predisable,
};
diff --git a/drivers/iio/light/veml6030.c b/drivers/iio/light/veml6030.c
new file mode 100644
index 000000000000..aa25b87fca8f
--- /dev/null
+++ b/drivers/iio/light/veml6030.c
@@ -0,0 +1,908 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * VEML6030 Ambient Light Sensor
+ *
+ * Copyright (c) 2019, Rishi Gupta <gupt21@gmail.com>
+ *
+ * Datasheet: https://www.vishay.com/docs/84366/veml6030.pdf
+ * Appnote-84367: https://www.vishay.com/docs/84367/designingveml6030.pdf
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/err.h>
+#include <linux/regmap.h>
+#include <linux/interrupt.h>
+#include <linux/pm_runtime.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/events.h>
+
+/* Device registers */
+#define VEML6030_REG_ALS_CONF 0x00
+#define VEML6030_REG_ALS_WH 0x01
+#define VEML6030_REG_ALS_WL 0x02
+#define VEML6030_REG_ALS_PSM 0x03
+#define VEML6030_REG_ALS_DATA 0x04
+#define VEML6030_REG_WH_DATA 0x05
+#define VEML6030_REG_ALS_INT 0x06
+
+/* Bit masks for specific functionality */
+#define VEML6030_ALS_IT GENMASK(9, 6)
+#define VEML6030_PSM GENMASK(2, 1)
+#define VEML6030_ALS_PERS GENMASK(5, 4)
+#define VEML6030_ALS_GAIN GENMASK(12, 11)
+#define VEML6030_PSM_EN BIT(0)
+#define VEML6030_INT_TH_LOW BIT(15)
+#define VEML6030_INT_TH_HIGH BIT(14)
+#define VEML6030_ALS_INT_EN BIT(1)
+#define VEML6030_ALS_SD BIT(0)
+
+/*
+ * The resolution depends on both gain and integration time. The
+ * cur_resolution stores one of the resolution mentioned in the
+ * table during startup and gets updated whenever integration time
+ * or gain is changed.
+ *
+ * Table 'resolution and maximum detection range' in appnote 84367
+ * is visualized as a 2D array. The cur_gain stores index of gain
+ * in this table (0-3) while the cur_integration_time holds index
+ * of integration time (0-5).
+ */
+struct veml6030_data {
+ struct i2c_client *client;
+ struct regmap *regmap;
+ int cur_resolution;
+ int cur_gain;
+ int cur_integration_time;
+};
+
+/* Integration time available in seconds */
+static IIO_CONST_ATTR(in_illuminance_integration_time_available,
+ "0.025 0.05 0.1 0.2 0.4 0.8");
+
+/*
+ * Scale is 1/gain. Value 0.125 is ALS gain x (1/8), 0.25 is
+ * ALS gain x (1/4), 1.0 = ALS gain x 1 and 2.0 is ALS gain x 2.
+ */
+static IIO_CONST_ATTR(in_illuminance_scale_available,
+ "0.125 0.25 1.0 2.0");
+
+static struct attribute *veml6030_attributes[] = {
+ &iio_const_attr_in_illuminance_integration_time_available.dev_attr.attr,
+ &iio_const_attr_in_illuminance_scale_available.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group veml6030_attr_group = {
+ .attrs = veml6030_attributes,
+};
+
+/*
+ * Persistence = 1/2/4/8 x integration time
+ * Minimum time for which light readings must stay above configured
+ * threshold to assert the interrupt.
+ */
+static const char * const period_values[] = {
+ "0.1 0.2 0.4 0.8",
+ "0.2 0.4 0.8 1.6",
+ "0.4 0.8 1.6 3.2",
+ "0.8 1.6 3.2 6.4",
+ "0.05 0.1 0.2 0.4",
+ "0.025 0.050 0.1 0.2"
+};
+
+/*
+ * Return list of valid period values in seconds corresponding to
+ * the currently active integration time.
+ */
+static ssize_t in_illuminance_period_available_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int ret, reg, x;
+ struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+ struct veml6030_data *data = iio_priv(indio_dev);
+
+ ret = regmap_read(data->regmap, VEML6030_REG_ALS_CONF, &reg);
+ if (ret) {
+ dev_err(&data->client->dev,
+ "can't read als conf register %d\n", ret);
+ return ret;
+ }
+
+ ret = ((reg >> 6) & 0xF);
+ switch (ret) {
+ case 0:
+ case 1:
+ case 2:
+ case 3:
+ x = ret;
+ break;
+ case 8:
+ x = 4;
+ break;
+ case 12:
+ x = 5;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", period_values[x]);
+}
+
+static IIO_DEVICE_ATTR_RO(in_illuminance_period_available, 0);
+
+static struct attribute *veml6030_event_attributes[] = {
+ &iio_dev_attr_in_illuminance_period_available.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group veml6030_event_attr_group = {
+ .attrs = veml6030_event_attributes,
+};
+
+static int veml6030_als_pwr_on(struct veml6030_data *data)
+{
+ return regmap_update_bits(data->regmap, VEML6030_REG_ALS_CONF,
+ VEML6030_ALS_SD, 0);
+}
+
+static int veml6030_als_shut_down(struct veml6030_data *data)
+{
+ return regmap_update_bits(data->regmap, VEML6030_REG_ALS_CONF,
+ VEML6030_ALS_SD, 1);
+}
+
+static void veml6030_als_shut_down_action(void *data)
+{
+ veml6030_als_shut_down(data);
+}
+
+static const struct iio_event_spec veml6030_event_spec[] = {
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_RISING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE),
+ }, {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_FALLING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE),
+ }, {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_EITHER,
+ .mask_separate = BIT(IIO_EV_INFO_PERIOD) |
+ BIT(IIO_EV_INFO_ENABLE),
+ },
+};
+
+/* Channel number */
+enum veml6030_chan {
+ CH_ALS,
+ CH_WHITE,
+};
+
+static const struct iio_chan_spec veml6030_channels[] = {
+ {
+ .type = IIO_LIGHT,
+ .channel = CH_ALS,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_PROCESSED) |
+ BIT(IIO_CHAN_INFO_INT_TIME) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .event_spec = veml6030_event_spec,
+ .num_event_specs = ARRAY_SIZE(veml6030_event_spec),
+ },
+ {
+ .type = IIO_INTENSITY,
+ .channel = CH_WHITE,
+ .modified = 1,
+ .channel2 = IIO_MOD_LIGHT_BOTH,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_PROCESSED),
+ },
+};
+
+static const struct regmap_config veml6030_regmap_config = {
+ .name = "veml6030_regmap",
+ .reg_bits = 8,
+ .val_bits = 16,
+ .max_register = VEML6030_REG_ALS_INT,
+ .val_format_endian = REGMAP_ENDIAN_LITTLE,
+};
+
+static int veml6030_get_intgrn_tm(struct iio_dev *indio_dev,
+ int *val, int *val2)
+{
+ int ret, reg;
+ struct veml6030_data *data = iio_priv(indio_dev);
+
+ ret = regmap_read(data->regmap, VEML6030_REG_ALS_CONF, &reg);
+ if (ret) {
+ dev_err(&data->client->dev,
+ "can't read als conf register %d\n", ret);
+ return ret;
+ }
+
+ switch ((reg >> 6) & 0xF) {
+ case 0:
+ *val2 = 100000;
+ break;
+ case 1:
+ *val2 = 200000;
+ break;
+ case 2:
+ *val2 = 400000;
+ break;
+ case 3:
+ *val2 = 800000;
+ break;
+ case 8:
+ *val2 = 50000;
+ break;
+ case 12:
+ *val2 = 25000;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ *val = 0;
+ return IIO_VAL_INT_PLUS_MICRO;
+}
+
+static int veml6030_set_intgrn_tm(struct iio_dev *indio_dev,
+ int val, int val2)
+{
+ int ret, new_int_time, int_idx;
+ struct veml6030_data *data = iio_priv(indio_dev);
+
+ if (val)
+ return -EINVAL;
+
+ switch (val2) {
+ case 25000:
+ new_int_time = 0x300;
+ int_idx = 5;
+ break;
+ case 50000:
+ new_int_time = 0x200;
+ int_idx = 4;
+ break;
+ case 100000:
+ new_int_time = 0x00;
+ int_idx = 3;
+ break;
+ case 200000:
+ new_int_time = 0x40;
+ int_idx = 2;
+ break;
+ case 400000:
+ new_int_time = 0x80;
+ int_idx = 1;
+ break;
+ case 800000:
+ new_int_time = 0xC0;
+ int_idx = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = regmap_update_bits(data->regmap, VEML6030_REG_ALS_CONF,
+ VEML6030_ALS_IT, new_int_time);
+ if (ret) {
+ dev_err(&data->client->dev,
+ "can't update als integration time %d\n", ret);
+ return ret;
+ }
+
+ /*
+ * Cache current integration time and update resolution. For every
+ * increase in integration time to next level, resolution is halved
+ * and vice-versa.
+ */
+ if (data->cur_integration_time < int_idx)
+ data->cur_resolution <<= int_idx - data->cur_integration_time;
+ else if (data->cur_integration_time > int_idx)
+ data->cur_resolution >>= data->cur_integration_time - int_idx;
+
+ data->cur_integration_time = int_idx;
+
+ return ret;
+}
+
+static int veml6030_read_persistence(struct iio_dev *indio_dev,
+ int *val, int *val2)
+{
+ int ret, reg, period, x, y;
+ struct veml6030_data *data = iio_priv(indio_dev);
+
+ ret = veml6030_get_intgrn_tm(indio_dev, &x, &y);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_read(data->regmap, VEML6030_REG_ALS_CONF, &reg);
+ if (ret) {
+ dev_err(&data->client->dev,
+ "can't read als conf register %d\n", ret);
+ }
+
+ /* integration time multiplied by 1/2/4/8 */
+ period = y * (1 << ((reg >> 4) & 0x03));
+
+ *val = period / 1000000;
+ *val2 = period % 1000000;
+
+ return IIO_VAL_INT_PLUS_MICRO;
+}
+
+static int veml6030_write_persistence(struct iio_dev *indio_dev,
+ int val, int val2)
+{
+ int ret, period, x, y;
+ struct veml6030_data *data = iio_priv(indio_dev);
+
+ ret = veml6030_get_intgrn_tm(indio_dev, &x, &y);
+ if (ret < 0)
+ return ret;
+
+ if (!val) {
+ period = val2 / y;
+ } else {
+ if ((val == 1) && (val2 == 600000))
+ period = 1600000 / y;
+ else if ((val == 3) && (val2 == 200000))
+ period = 3200000 / y;
+ else if ((val == 6) && (val2 == 400000))
+ period = 6400000 / y;
+ else
+ period = -1;
+ }
+
+ if (period <= 0 || period > 8 || hweight8(period) != 1)
+ return -EINVAL;
+
+ ret = regmap_update_bits(data->regmap, VEML6030_REG_ALS_CONF,
+ VEML6030_ALS_PERS, (ffs(period) - 1) << 4);
+ if (ret)
+ dev_err(&data->client->dev,
+ "can't set persistence value %d\n", ret);
+
+ return ret;
+}
+
+static int veml6030_set_als_gain(struct iio_dev *indio_dev,
+ int val, int val2)
+{
+ int ret, new_gain, gain_idx;
+ struct veml6030_data *data = iio_priv(indio_dev);
+
+ if (val == 0 && val2 == 125000) {
+ new_gain = 0x1000; /* 0x02 << 11 */
+ gain_idx = 3;
+ } else if (val == 0 && val2 == 250000) {
+ new_gain = 0x1800;
+ gain_idx = 2;
+ } else if (val == 1 && val2 == 0) {
+ new_gain = 0x00;
+ gain_idx = 1;
+ } else if (val == 2 && val2 == 0) {
+ new_gain = 0x800;
+ gain_idx = 0;
+ } else {
+ return -EINVAL;
+ }
+
+ ret = regmap_update_bits(data->regmap, VEML6030_REG_ALS_CONF,
+ VEML6030_ALS_GAIN, new_gain);
+ if (ret) {
+ dev_err(&data->client->dev,
+ "can't set als gain %d\n", ret);
+ return ret;
+ }
+
+ /*
+ * Cache currently set gain & update resolution. For every
+ * increase in the gain to next level, resolution is halved
+ * and vice-versa.
+ */
+ if (data->cur_gain < gain_idx)
+ data->cur_resolution <<= gain_idx - data->cur_gain;
+ else if (data->cur_gain > gain_idx)
+ data->cur_resolution >>= data->cur_gain - gain_idx;
+
+ data->cur_gain = gain_idx;
+
+ return ret;
+}
+
+static int veml6030_get_als_gain(struct iio_dev *indio_dev,
+ int *val, int *val2)
+{
+ int ret, reg;
+ struct veml6030_data *data = iio_priv(indio_dev);
+
+ ret = regmap_read(data->regmap, VEML6030_REG_ALS_CONF, &reg);
+ if (ret) {
+ dev_err(&data->client->dev,
+ "can't read als conf register %d\n", ret);
+ return ret;
+ }
+
+ switch ((reg >> 11) & 0x03) {
+ case 0:
+ *val = 1;
+ *val2 = 0;
+ break;
+ case 1:
+ *val = 2;
+ *val2 = 0;
+ break;
+ case 2:
+ *val = 0;
+ *val2 = 125000;
+ break;
+ case 3:
+ *val = 0;
+ *val2 = 250000;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return IIO_VAL_INT_PLUS_MICRO;
+}
+
+static int veml6030_read_thresh(struct iio_dev *indio_dev,
+ int *val, int *val2, int dir)
+{
+ int ret, reg;
+ struct veml6030_data *data = iio_priv(indio_dev);
+
+ if (dir == IIO_EV_DIR_RISING)
+ ret = regmap_read(data->regmap, VEML6030_REG_ALS_WH, &reg);
+ else
+ ret = regmap_read(data->regmap, VEML6030_REG_ALS_WL, &reg);
+ if (ret) {
+ dev_err(&data->client->dev,
+ "can't read als threshold value %d\n", ret);
+ return ret;
+ }
+
+ *val = reg & 0xffff;
+ return IIO_VAL_INT;
+}
+
+static int veml6030_write_thresh(struct iio_dev *indio_dev,
+ int val, int val2, int dir)
+{
+ int ret;
+ struct veml6030_data *data = iio_priv(indio_dev);
+
+ if (val > 0xFFFF || val < 0 || val2)
+ return -EINVAL;
+
+ if (dir == IIO_EV_DIR_RISING) {
+ ret = regmap_write(data->regmap, VEML6030_REG_ALS_WH, val);
+ if (ret)
+ dev_err(&data->client->dev,
+ "can't set high threshold %d\n", ret);
+ } else {
+ ret = regmap_write(data->regmap, VEML6030_REG_ALS_WL, val);
+ if (ret)
+ dev_err(&data->client->dev,
+ "can't set low threshold %d\n", ret);
+ }
+
+ return ret;
+}
+
+/*
+ * Provide both raw as well as light reading in lux.
+ * light (in lux) = resolution * raw reading
+ */
+static int veml6030_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val,
+ int *val2, long mask)
+{
+ int ret, reg;
+ struct veml6030_data *data = iio_priv(indio_dev);
+ struct regmap *regmap = data->regmap;
+ struct device *dev = &data->client->dev;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ case IIO_CHAN_INFO_PROCESSED:
+ switch (chan->type) {
+ case IIO_LIGHT:
+ ret = regmap_read(regmap, VEML6030_REG_ALS_DATA, &reg);
+ if (ret < 0) {
+ dev_err(dev, "can't read als data %d\n", ret);
+ return ret;
+ }
+ if (mask == IIO_CHAN_INFO_PROCESSED) {
+ *val = (reg * data->cur_resolution) / 10000;
+ *val2 = (reg * data->cur_resolution) % 10000;
+ return IIO_VAL_INT_PLUS_MICRO;
+ }
+ *val = reg;
+ return IIO_VAL_INT;
+ case IIO_INTENSITY:
+ ret = regmap_read(regmap, VEML6030_REG_WH_DATA, &reg);
+ if (ret < 0) {
+ dev_err(dev, "can't read white data %d\n", ret);
+ return ret;
+ }
+ if (mask == IIO_CHAN_INFO_PROCESSED) {
+ *val = (reg * data->cur_resolution) / 10000;
+ *val2 = (reg * data->cur_resolution) % 10000;
+ return IIO_VAL_INT_PLUS_MICRO;
+ }
+ *val = reg;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_INT_TIME:
+ if (chan->type == IIO_LIGHT)
+ return veml6030_get_intgrn_tm(indio_dev, val, val2);
+ return -EINVAL;
+ case IIO_CHAN_INFO_SCALE:
+ if (chan->type == IIO_LIGHT)
+ return veml6030_get_als_gain(indio_dev, val, val2);
+ return -EINVAL;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int veml6030_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ switch (mask) {
+ case IIO_CHAN_INFO_INT_TIME:
+ switch (chan->type) {
+ case IIO_LIGHT:
+ return veml6030_set_intgrn_tm(indio_dev, val, val2);
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_LIGHT:
+ return veml6030_set_als_gain(indio_dev, val, val2);
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static int veml6030_read_event_val(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, enum iio_event_type type,
+ enum iio_event_direction dir, enum iio_event_info info,
+ int *val, int *val2)
+{
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ switch (dir) {
+ case IIO_EV_DIR_RISING:
+ case IIO_EV_DIR_FALLING:
+ return veml6030_read_thresh(indio_dev, val, val2, dir);
+ default:
+ return -EINVAL;
+ }
+ break;
+ case IIO_EV_INFO_PERIOD:
+ return veml6030_read_persistence(indio_dev, val, val2);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int veml6030_write_event_val(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, enum iio_event_type type,
+ enum iio_event_direction dir, enum iio_event_info info,
+ int val, int val2)
+{
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ return veml6030_write_thresh(indio_dev, val, val2, dir);
+ case IIO_EV_INFO_PERIOD:
+ return veml6030_write_persistence(indio_dev, val, val2);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int veml6030_read_interrupt_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, enum iio_event_type type,
+ enum iio_event_direction dir)
+{
+ int ret, reg;
+ struct veml6030_data *data = iio_priv(indio_dev);
+
+ ret = regmap_read(data->regmap, VEML6030_REG_ALS_CONF, &reg);
+ if (ret) {
+ dev_err(&data->client->dev,
+ "can't read als conf register %d\n", ret);
+ return ret;
+ }
+
+ if (reg & VEML6030_ALS_INT_EN)
+ return 1;
+ else
+ return 0;
+}
+
+/*
+ * Sensor should not be measuring light when interrupt is configured.
+ * Therefore correct sequence to configure interrupt functionality is:
+ * shut down -> enable/disable interrupt -> power on
+ *
+ * state = 1 enables interrupt, state = 0 disables interrupt
+ */
+static int veml6030_write_interrupt_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, enum iio_event_type type,
+ enum iio_event_direction dir, int state)
+{
+ int ret;
+ struct veml6030_data *data = iio_priv(indio_dev);
+
+ if (state < 0 || state > 1)
+ return -EINVAL;
+
+ ret = veml6030_als_shut_down(data);
+ if (ret < 0) {
+ dev_err(&data->client->dev,
+ "can't disable als to configure interrupt %d\n", ret);
+ return ret;
+ }
+
+ /* enable interrupt + power on */
+ ret = regmap_update_bits(data->regmap, VEML6030_REG_ALS_CONF,
+ VEML6030_ALS_INT_EN | VEML6030_ALS_SD, state << 1);
+ if (ret)
+ dev_err(&data->client->dev,
+ "can't enable interrupt & poweron als %d\n", ret);
+
+ return ret;
+}
+
+static const struct iio_info veml6030_info = {
+ .read_raw = veml6030_read_raw,
+ .write_raw = veml6030_write_raw,
+ .read_event_value = veml6030_read_event_val,
+ .write_event_value = veml6030_write_event_val,
+ .read_event_config = veml6030_read_interrupt_config,
+ .write_event_config = veml6030_write_interrupt_config,
+ .attrs = &veml6030_attr_group,
+ .event_attrs = &veml6030_event_attr_group,
+};
+
+static const struct iio_info veml6030_info_no_irq = {
+ .read_raw = veml6030_read_raw,
+ .write_raw = veml6030_write_raw,
+ .attrs = &veml6030_attr_group,
+};
+
+static irqreturn_t veml6030_event_handler(int irq, void *private)
+{
+ int ret, reg, evtdir;
+ struct iio_dev *indio_dev = private;
+ struct veml6030_data *data = iio_priv(indio_dev);
+
+ ret = regmap_read(data->regmap, VEML6030_REG_ALS_INT, &reg);
+ if (ret) {
+ dev_err(&data->client->dev,
+ "can't read als interrupt register %d\n", ret);
+ return IRQ_HANDLED;
+ }
+
+ /* Spurious interrupt handling */
+ if (!(reg & (VEML6030_INT_TH_HIGH | VEML6030_INT_TH_LOW)))
+ return IRQ_NONE;
+
+ if (reg & VEML6030_INT_TH_HIGH)
+ evtdir = IIO_EV_DIR_RISING;
+ else
+ evtdir = IIO_EV_DIR_FALLING;
+
+ iio_push_event(indio_dev, IIO_UNMOD_EVENT_CODE(IIO_INTENSITY,
+ 0, IIO_EV_TYPE_THRESH, evtdir),
+ iio_get_time_ns(indio_dev));
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Set ALS gain to 1/8, integration time to 100 ms, PSM to mode 2,
+ * persistence to 1 x integration time and the threshold
+ * interrupt disabled by default. First shutdown the sensor,
+ * update registers and then power on the sensor.
+ */
+static int veml6030_hw_init(struct iio_dev *indio_dev)
+{
+ int ret, val;
+ struct veml6030_data *data = iio_priv(indio_dev);
+ struct i2c_client *client = data->client;
+
+ ret = veml6030_als_shut_down(data);
+ if (ret) {
+ dev_err(&client->dev, "can't shutdown als %d\n", ret);
+ return ret;
+ }
+
+ ret = regmap_write(data->regmap, VEML6030_REG_ALS_CONF, 0x1001);
+ if (ret) {
+ dev_err(&client->dev, "can't setup als configs %d\n", ret);
+ return ret;
+ }
+
+ ret = regmap_update_bits(data->regmap, VEML6030_REG_ALS_PSM,
+ VEML6030_PSM | VEML6030_PSM_EN, 0x03);
+ if (ret) {
+ dev_err(&client->dev, "can't setup default PSM %d\n", ret);
+ return ret;
+ }
+
+ ret = regmap_write(data->regmap, VEML6030_REG_ALS_WH, 0xFFFF);
+ if (ret) {
+ dev_err(&client->dev, "can't setup high threshold %d\n", ret);
+ return ret;
+ }
+
+ ret = regmap_write(data->regmap, VEML6030_REG_ALS_WL, 0x0000);
+ if (ret) {
+ dev_err(&client->dev, "can't setup low threshold %d\n", ret);
+ return ret;
+ }
+
+ ret = veml6030_als_pwr_on(data);
+ if (ret) {
+ dev_err(&client->dev, "can't poweron als %d\n", ret);
+ return ret;
+ }
+
+ /* Wait 4 ms to let processor & oscillator start correctly */
+ usleep_range(4000, 4002);
+
+ /* Clear stale interrupt status bits if any during start */
+ ret = regmap_read(data->regmap, VEML6030_REG_ALS_INT, &val);
+ if (ret < 0) {
+ dev_err(&client->dev,
+ "can't clear als interrupt status %d\n", ret);
+ return ret;
+ }
+
+ /* Cache currently active measurement parameters */
+ data->cur_gain = 3;
+ data->cur_resolution = 4608;
+ data->cur_integration_time = 3;
+
+ return ret;
+}
+
+static int veml6030_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int ret;
+ struct veml6030_data *data;
+ struct iio_dev *indio_dev;
+ struct regmap *regmap;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ dev_err(&client->dev, "i2c adapter doesn't support plain i2c\n");
+ return -EOPNOTSUPP;
+ }
+
+ regmap = devm_regmap_init_i2c(client, &veml6030_regmap_config);
+ if (IS_ERR(regmap)) {
+ dev_err(&client->dev, "can't setup regmap\n");
+ return PTR_ERR(regmap);
+ }
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ i2c_set_clientdata(client, indio_dev);
+ data->client = client;
+ data->regmap = regmap;
+
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->name = "veml6030";
+ indio_dev->channels = veml6030_channels;
+ indio_dev->num_channels = ARRAY_SIZE(veml6030_channels);
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ if (client->irq) {
+ ret = devm_request_threaded_irq(&client->dev, client->irq,
+ NULL, veml6030_event_handler,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ "veml6030", indio_dev);
+ if (ret < 0) {
+ dev_err(&client->dev,
+ "irq %d request failed\n", client->irq);
+ return ret;
+ }
+ indio_dev->info = &veml6030_info;
+ } else {
+ indio_dev->info = &veml6030_info_no_irq;
+ }
+
+ ret = veml6030_hw_init(indio_dev);
+ if (ret < 0)
+ return ret;
+
+ ret = devm_add_action_or_reset(&client->dev,
+ veml6030_als_shut_down_action, data);
+ if (ret < 0)
+ return ret;
+
+ return devm_iio_device_register(&client->dev, indio_dev);
+}
+
+static int __maybe_unused veml6030_runtime_suspend(struct device *dev)
+{
+ int ret;
+ struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+ struct veml6030_data *data = iio_priv(indio_dev);
+
+ ret = veml6030_als_shut_down(data);
+ if (ret < 0)
+ dev_err(&data->client->dev, "can't suspend als %d\n", ret);
+
+ return ret;
+}
+
+static int __maybe_unused veml6030_runtime_resume(struct device *dev)
+{
+ int ret;
+ struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+ struct veml6030_data *data = iio_priv(indio_dev);
+
+ ret = veml6030_als_pwr_on(data);
+ if (ret < 0)
+ dev_err(&data->client->dev, "can't resume als %d\n", ret);
+
+ return ret;
+}
+
+static const struct dev_pm_ops veml6030_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ SET_RUNTIME_PM_OPS(veml6030_runtime_suspend,
+ veml6030_runtime_resume, NULL)
+};
+
+static const struct of_device_id veml6030_of_match[] = {
+ { .compatible = "vishay,veml6030" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, veml6030_of_match);
+
+static const struct i2c_device_id veml6030_id[] = {
+ { "veml6030", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, veml6030_id);
+
+static struct i2c_driver veml6030_driver = {
+ .driver = {
+ .name = "veml6030",
+ .of_match_table = veml6030_of_match,
+ .pm = &veml6030_pm_ops,
+ },
+ .probe = veml6030_probe,
+ .id_table = veml6030_id,
+};
+module_i2c_driver(veml6030_driver);
+
+MODULE_AUTHOR("Rishi Gupta <gupt21@gmail.com>");
+MODULE_DESCRIPTION("VEML6030 Ambient Light Sensor");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/magnetometer/st_magn_core.c b/drivers/iio/magnetometer/st_magn_core.c
index a3a268ee2896..e68184a93a6d 100644
--- a/drivers/iio/magnetometer/st_magn_core.c
+++ b/drivers/iio/magnetometer/st_magn_core.c
@@ -14,7 +14,6 @@
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/i2c.h>
-#include <linux/gpio.h>
#include <linux/irq.h>
#include <linux/delay.h>
#include <linux/iio/iio.h>
diff --git a/drivers/iio/pressure/bmp280-core.c b/drivers/iio/pressure/bmp280-core.c
index 8d0f15f27dc5..29c209cc1108 100644
--- a/drivers/iio/pressure/bmp280-core.c
+++ b/drivers/iio/pressure/bmp280-core.c
@@ -74,6 +74,12 @@ struct bmp280_calib {
s8 H6;
};
+static const char *const bmp280_supply_names[] = {
+ "vddd", "vdda"
+};
+
+#define BMP280_NUM_SUPPLIES ARRAY_SIZE(bmp280_supply_names)
+
struct bmp280_data {
struct device *dev;
struct mutex lock;
@@ -85,8 +91,7 @@ struct bmp280_data {
struct bmp180_calib bmp180;
struct bmp280_calib bmp280;
} calib;
- struct regulator *vddd;
- struct regulator *vdda;
+ struct regulator_bulk_data supplies[BMP280_NUM_SUPPLIES];
unsigned int start_up_time; /* in microseconds */
/* log of base 2 of oversampling rate */
@@ -148,6 +153,8 @@ static int bmp280_read_calib(struct bmp280_data *data,
{
int ret;
unsigned int tmp;
+ __le16 l16;
+ __be16 b16;
struct device *dev = data->dev;
__le16 t_buf[BMP280_COMP_TEMP_REG_COUNT / 2];
__le16 p_buf[BMP280_COMP_PRESS_REG_COUNT / 2];
@@ -207,12 +214,12 @@ static int bmp280_read_calib(struct bmp280_data *data,
}
calib->H1 = tmp;
- ret = regmap_bulk_read(data->regmap, BMP280_REG_COMP_H2, &tmp, 2);
+ ret = regmap_bulk_read(data->regmap, BMP280_REG_COMP_H2, &l16, 2);
if (ret < 0) {
dev_err(dev, "failed to read H2 comp value\n");
return ret;
}
- calib->H2 = sign_extend32(le16_to_cpu(tmp), 15);
+ calib->H2 = sign_extend32(le16_to_cpu(l16), 15);
ret = regmap_read(data->regmap, BMP280_REG_COMP_H3, &tmp);
if (ret < 0) {
@@ -221,20 +228,20 @@ static int bmp280_read_calib(struct bmp280_data *data,
}
calib->H3 = tmp;
- ret = regmap_bulk_read(data->regmap, BMP280_REG_COMP_H4, &tmp, 2);
+ ret = regmap_bulk_read(data->regmap, BMP280_REG_COMP_H4, &b16, 2);
if (ret < 0) {
dev_err(dev, "failed to read H4 comp value\n");
return ret;
}
- calib->H4 = sign_extend32(((be16_to_cpu(tmp) >> 4) & 0xff0) |
- (be16_to_cpu(tmp) & 0xf), 11);
+ calib->H4 = sign_extend32(((be16_to_cpu(b16) >> 4) & 0xff0) |
+ (be16_to_cpu(b16) & 0xf), 11);
- ret = regmap_bulk_read(data->regmap, BMP280_REG_COMP_H5, &tmp, 2);
+ ret = regmap_bulk_read(data->regmap, BMP280_REG_COMP_H5, &l16, 2);
if (ret < 0) {
dev_err(dev, "failed to read H5 comp value\n");
return ret;
}
- calib->H5 = sign_extend32(((le16_to_cpu(tmp) >> 4) & 0xfff), 11);
+ calib->H5 = sign_extend32(((le16_to_cpu(l16) >> 4) & 0xfff), 11);
ret = regmap_read(data->regmap, BMP280_REG_COMP_H6, &tmp);
if (ret < 0) {
@@ -979,6 +986,22 @@ static int bmp085_fetch_eoc_irq(struct device *dev,
return 0;
}
+static void bmp280_pm_disable(void *data)
+{
+ struct device *dev = data;
+
+ pm_runtime_get_sync(dev);
+ pm_runtime_put_noidle(dev);
+ pm_runtime_disable(dev);
+}
+
+static void bmp280_regulators_disable(void *data)
+{
+ struct regulator_bulk_data *supplies = data;
+
+ regulator_bulk_disable(BMP280_NUM_SUPPLIES, supplies);
+}
+
int bmp280_common_probe(struct device *dev,
struct regmap *regmap,
unsigned int chip,
@@ -1033,27 +1056,28 @@ int bmp280_common_probe(struct device *dev,
}
/* Bring up regulators */
- data->vddd = devm_regulator_get(dev, "vddd");
- if (IS_ERR(data->vddd)) {
- dev_err(dev, "failed to get VDDD regulator\n");
- return PTR_ERR(data->vddd);
- }
- ret = regulator_enable(data->vddd);
+ regulator_bulk_set_supply_names(data->supplies,
+ bmp280_supply_names,
+ BMP280_NUM_SUPPLIES);
+
+ ret = devm_regulator_bulk_get(dev,
+ BMP280_NUM_SUPPLIES, data->supplies);
if (ret) {
- dev_err(dev, "failed to enable VDDD regulator\n");
+ dev_err(dev, "failed to get regulators\n");
return ret;
}
- data->vdda = devm_regulator_get(dev, "vdda");
- if (IS_ERR(data->vdda)) {
- dev_err(dev, "failed to get VDDA regulator\n");
- ret = PTR_ERR(data->vdda);
- goto out_disable_vddd;
- }
- ret = regulator_enable(data->vdda);
+
+ ret = regulator_bulk_enable(BMP280_NUM_SUPPLIES, data->supplies);
if (ret) {
- dev_err(dev, "failed to enable VDDA regulator\n");
- goto out_disable_vddd;
+ dev_err(dev, "failed to enable regulators\n");
+ return ret;
}
+
+ ret = devm_add_action_or_reset(dev, bmp280_regulators_disable,
+ data->supplies);
+ if (ret)
+ return ret;
+
/* Wait to make sure we started up properly */
usleep_range(data->start_up_time, data->start_up_time + 100);
@@ -1068,17 +1092,16 @@ int bmp280_common_probe(struct device *dev,
data->regmap = regmap;
ret = regmap_read(regmap, BMP280_REG_ID, &chip_id);
if (ret < 0)
- goto out_disable_vdda;
+ return ret;
if (chip_id != chip) {
dev_err(dev, "bad chip id: expected %x got %x\n",
chip, chip_id);
- ret = -EINVAL;
- goto out_disable_vdda;
+ return -EINVAL;
}
ret = data->chip_info->chip_config(data);
if (ret < 0)
- goto out_disable_vdda;
+ return ret;
dev_set_drvdata(dev, indio_dev);
@@ -1092,14 +1115,14 @@ int bmp280_common_probe(struct device *dev,
if (ret < 0) {
dev_err(data->dev,
"failed to read calibration coefficients\n");
- goto out_disable_vdda;
+ return ret;
}
} else if (chip_id == BMP280_CHIP_ID || chip_id == BME280_CHIP_ID) {
ret = bmp280_read_calib(data, &data->calib.bmp280, chip_id);
if (ret < 0) {
dev_err(data->dev,
"failed to read calibration coefficients\n");
- goto out_disable_vdda;
+ return ret;
}
}
@@ -1111,7 +1134,7 @@ int bmp280_common_probe(struct device *dev,
if (irq > 0 || (chip_id == BMP180_CHIP_ID)) {
ret = bmp085_fetch_eoc_irq(dev, name, irq, data);
if (ret)
- goto out_disable_vdda;
+ return ret;
}
/* Enable runtime PM */
@@ -1126,51 +1149,21 @@ int bmp280_common_probe(struct device *dev,
pm_runtime_use_autosuspend(dev);
pm_runtime_put(dev);
- ret = iio_device_register(indio_dev);
+ ret = devm_add_action_or_reset(dev, bmp280_pm_disable, dev);
if (ret)
- goto out_runtime_pm_disable;
-
-
- return 0;
+ return ret;
-out_runtime_pm_disable:
- pm_runtime_get_sync(data->dev);
- pm_runtime_put_noidle(data->dev);
- pm_runtime_disable(data->dev);
-out_disable_vdda:
- regulator_disable(data->vdda);
-out_disable_vddd:
- regulator_disable(data->vddd);
- return ret;
+ return devm_iio_device_register(dev, indio_dev);
}
EXPORT_SYMBOL(bmp280_common_probe);
-int bmp280_common_remove(struct device *dev)
-{
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct bmp280_data *data = iio_priv(indio_dev);
-
- iio_device_unregister(indio_dev);
- pm_runtime_get_sync(data->dev);
- pm_runtime_put_noidle(data->dev);
- pm_runtime_disable(data->dev);
- regulator_disable(data->vdda);
- regulator_disable(data->vddd);
- return 0;
-}
-EXPORT_SYMBOL(bmp280_common_remove);
-
#ifdef CONFIG_PM
static int bmp280_runtime_suspend(struct device *dev)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct bmp280_data *data = iio_priv(indio_dev);
- int ret;
- ret = regulator_disable(data->vdda);
- if (ret)
- return ret;
- return regulator_disable(data->vddd);
+ return regulator_bulk_disable(BMP280_NUM_SUPPLIES, data->supplies);
}
static int bmp280_runtime_resume(struct device *dev)
@@ -1179,10 +1172,7 @@ static int bmp280_runtime_resume(struct device *dev)
struct bmp280_data *data = iio_priv(indio_dev);
int ret;
- ret = regulator_enable(data->vddd);
- if (ret)
- return ret;
- ret = regulator_enable(data->vdda);
+ ret = regulator_bulk_enable(BMP280_NUM_SUPPLIES, data->supplies);
if (ret)
return ret;
usleep_range(data->start_up_time, data->start_up_time + 100);
diff --git a/drivers/iio/pressure/bmp280-i2c.c b/drivers/iio/pressure/bmp280-i2c.c
index acd9a3784fb4..3109c8e2cc11 100644
--- a/drivers/iio/pressure/bmp280-i2c.c
+++ b/drivers/iio/pressure/bmp280-i2c.c
@@ -38,11 +38,6 @@ static int bmp280_i2c_probe(struct i2c_client *client,
client->irq);
}
-static int bmp280_i2c_remove(struct i2c_client *client)
-{
- return bmp280_common_remove(&client->dev);
-}
-
static const struct acpi_device_id bmp280_acpi_i2c_match[] = {
{"BMP0280", BMP280_CHIP_ID },
{"BMP0180", BMP180_CHIP_ID },
@@ -82,7 +77,6 @@ static struct i2c_driver bmp280_i2c_driver = {
.pm = &bmp280_dev_pm_ops,
},
.probe = bmp280_i2c_probe,
- .remove = bmp280_i2c_remove,
.id_table = bmp280_i2c_id,
};
module_i2c_driver(bmp280_i2c_driver);
diff --git a/drivers/iio/pressure/bmp280-spi.c b/drivers/iio/pressure/bmp280-spi.c
index 9d57b7a3b134..625b86878ad8 100644
--- a/drivers/iio/pressure/bmp280-spi.c
+++ b/drivers/iio/pressure/bmp280-spi.c
@@ -86,11 +86,6 @@ static int bmp280_spi_probe(struct spi_device *spi)
spi->irq);
}
-static int bmp280_spi_remove(struct spi_device *spi)
-{
- return bmp280_common_remove(&spi->dev);
-}
-
static const struct of_device_id bmp280_of_spi_match[] = {
{ .compatible = "bosch,bmp085", },
{ .compatible = "bosch,bmp180", },
@@ -118,7 +113,6 @@ static struct spi_driver bmp280_spi_driver = {
},
.id_table = bmp280_spi_id,
.probe = bmp280_spi_probe,
- .remove = bmp280_spi_remove,
};
module_spi_driver(bmp280_spi_driver);
diff --git a/drivers/iio/pressure/bmp280.h b/drivers/iio/pressure/bmp280.h
index eda50ef65706..57ba0e85db91 100644
--- a/drivers/iio/pressure/bmp280.h
+++ b/drivers/iio/pressure/bmp280.h
@@ -112,7 +112,6 @@ int bmp280_common_probe(struct device *dev,
unsigned int chip,
const char *name,
int irq);
-int bmp280_common_remove(struct device *dev);
/* PM ops */
extern const struct dev_pm_ops bmp280_dev_pm_ops;
diff --git a/drivers/iio/pressure/cros_ec_baro.c b/drivers/iio/pressure/cros_ec_baro.c
index 2354302375de..52f53f3123b1 100644
--- a/drivers/iio/pressure/cros_ec_baro.c
+++ b/drivers/iio/pressure/cros_ec_baro.c
@@ -114,6 +114,7 @@ static int cros_ec_baro_write(struct iio_dev *indio_dev,
static const struct iio_info cros_ec_baro_info = {
.read_raw = &cros_ec_baro_read,
.write_raw = &cros_ec_baro_write,
+ .read_avail = &cros_ec_sensors_core_read_avail,
};
static int cros_ec_baro_probe(struct platform_device *pdev)
@@ -149,6 +150,8 @@ static int cros_ec_baro_probe(struct platform_device *pdev)
BIT(IIO_CHAN_INFO_SCALE) |
BIT(IIO_CHAN_INFO_SAMP_FREQ) |
BIT(IIO_CHAN_INFO_FREQUENCY);
+ channel->info_mask_shared_by_all_available =
+ BIT(IIO_CHAN_INFO_SAMP_FREQ);
channel->scan_type.realbits = CROS_EC_SENSOR_BITS;
channel->scan_type.storagebits = CROS_EC_SENSOR_BITS;
channel->scan_type.shift = 0;
diff --git a/drivers/iio/pressure/st_pressure_core.c b/drivers/iio/pressure/st_pressure_core.c
index ca6863b32a5f..bd972cec4830 100644
--- a/drivers/iio/pressure/st_pressure_core.c
+++ b/drivers/iio/pressure/st_pressure_core.c
@@ -14,7 +14,6 @@
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/i2c.h>
-#include <linux/gpio.h>
#include <linux/irq.h>
#include <linux/delay.h>
#include <linux/iio/iio.h>
diff --git a/drivers/iio/pressure/zpa2326.c b/drivers/iio/pressure/zpa2326.c
index 9d0d07930236..99dfe33ee402 100644
--- a/drivers/iio/pressure/zpa2326.c
+++ b/drivers/iio/pressure/zpa2326.c
@@ -1243,6 +1243,11 @@ static int zpa2326_postenable_buffer(struct iio_dev *indio_dev)
const struct zpa2326_private *priv = iio_priv(indio_dev);
int err;
+ /* Plug our own trigger event handler. */
+ err = iio_triggered_buffer_postenable(indio_dev);
+ if (err)
+ goto err;
+
if (!priv->waken) {
/*
* We were already power supplied. Just clear hardware FIFO to
@@ -1250,7 +1255,7 @@ static int zpa2326_postenable_buffer(struct iio_dev *indio_dev)
*/
err = zpa2326_clear_fifo(indio_dev, 0);
if (err)
- goto err;
+ goto err_buffer_predisable;
}
if (!iio_trigger_using_own(indio_dev) && priv->waken) {
@@ -1260,16 +1265,13 @@ static int zpa2326_postenable_buffer(struct iio_dev *indio_dev)
*/
err = zpa2326_config_oneshot(indio_dev, priv->irq);
if (err)
- goto err;
+ goto err_buffer_predisable;
}
- /* Plug our own trigger event handler. */
- err = iio_triggered_buffer_postenable(indio_dev);
- if (err)
- goto err;
-
return 0;
+err_buffer_predisable:
+ iio_triggered_buffer_predisable(indio_dev);
err:
zpa2326_err(indio_dev, "failed to enable buffering (%d)", err);
diff --git a/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c b/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
index 47af54f14756..5b369645ef49 100644
--- a/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
+++ b/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
@@ -136,12 +136,13 @@ static inline int lidar_write_power(struct lidar_data *data, int val)
static int lidar_read_measurement(struct lidar_data *data, u16 *reg)
{
+ __be16 value;
int ret = data->xfer(data, LIDAR_REG_DATA_HBYTE |
(data->i2c_enabled ? LIDAR_REG_DATA_WORD_READ : 0),
- (u8 *) reg, 2);
+ (u8 *) &value, 2);
if (!ret)
- *reg = be16_to_cpu(*reg);
+ *reg = be16_to_cpu(value);
return ret;
}
diff --git a/drivers/iio/proximity/sx9500.c b/drivers/iio/proximity/sx9500.c
index 612f79c53cfc..287d288e40c2 100644
--- a/drivers/iio/proximity/sx9500.c
+++ b/drivers/iio/proximity/sx9500.c
@@ -675,11 +675,15 @@ out:
return IRQ_HANDLED;
}
-static int sx9500_buffer_preenable(struct iio_dev *indio_dev)
+static int sx9500_buffer_postenable(struct iio_dev *indio_dev)
{
struct sx9500_data *data = iio_priv(indio_dev);
int ret = 0, i;
+ ret = iio_triggered_buffer_postenable(indio_dev);
+ if (ret)
+ return ret;
+
mutex_lock(&data->mutex);
for (i = 0; i < SX9500_NUM_CHANNELS; i++)
@@ -696,6 +700,9 @@ static int sx9500_buffer_preenable(struct iio_dev *indio_dev)
mutex_unlock(&data->mutex);
+ if (ret)
+ iio_triggered_buffer_predisable(indio_dev);
+
return ret;
}
@@ -704,8 +711,6 @@ static int sx9500_buffer_predisable(struct iio_dev *indio_dev)
struct sx9500_data *data = iio_priv(indio_dev);
int ret = 0, i;
- iio_triggered_buffer_predisable(indio_dev);
-
mutex_lock(&data->mutex);
for (i = 0; i < SX9500_NUM_CHANNELS; i++)
@@ -722,12 +727,13 @@ static int sx9500_buffer_predisable(struct iio_dev *indio_dev)
mutex_unlock(&data->mutex);
+ iio_triggered_buffer_predisable(indio_dev);
+
return ret;
}
static const struct iio_buffer_setup_ops sx9500_buffer_setup_ops = {
- .preenable = sx9500_buffer_preenable,
- .postenable = iio_triggered_buffer_postenable,
+ .postenable = sx9500_buffer_postenable,
.predisable = sx9500_buffer_predisable,
};
diff --git a/drivers/iio/temperature/Kconfig b/drivers/iio/temperature/Kconfig
index 737faa0901fe..e1ccb4003015 100644
--- a/drivers/iio/temperature/Kconfig
+++ b/drivers/iio/temperature/Kconfig
@@ -4,6 +4,17 @@
#
menu "Temperature sensors"
+config LTC2983
+ tristate "Analog Devices Multi-Sensor Digital Temperature Measurement System"
+ depends on SPI
+ select REGMAP_SPI
+ help
+ Say yes here to build support for the LTC2983 Multi-Sensor
+ high accuracy digital temperature measurement system.
+
+ To compile this driver as a module, choose M here: the module
+ will be called ltc2983.
+
config MAXIM_THERMOCOUPLE
tristate "Maxim thermocouple sensors"
depends on SPI
diff --git a/drivers/iio/temperature/Makefile b/drivers/iio/temperature/Makefile
index baca4776ca0d..d6b850b0cf63 100644
--- a/drivers/iio/temperature/Makefile
+++ b/drivers/iio/temperature/Makefile
@@ -3,6 +3,7 @@
# Makefile for industrial I/O temperature drivers
#
+obj-$(CONFIG_LTC2983) += ltc2983.o
obj-$(CONFIG_HID_SENSOR_TEMP) += hid-sensor-temperature.o
obj-$(CONFIG_MAXIM_THERMOCOUPLE) += maxim_thermocouple.o
obj-$(CONFIG_MAX31856) += max31856.o
diff --git a/drivers/iio/temperature/ltc2983.c b/drivers/iio/temperature/ltc2983.c
new file mode 100644
index 000000000000..ddf47023364b
--- /dev/null
+++ b/drivers/iio/temperature/ltc2983.c
@@ -0,0 +1,1557 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Analog Devices LTC2983 Multi-Sensor Digital Temperature Measurement System
+ * driver
+ *
+ * Copyright 2019 Analog Devices Inc.
+ */
+#include <linux/bitfield.h>
+#include <linux/completion.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/iio/iio.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of_gpio.h>
+#include <linux/regmap.h>
+#include <linux/spi/spi.h>
+
+/* register map */
+#define LTC2983_STATUS_REG 0x0000
+#define LTC2983_TEMP_RES_START_REG 0x0010
+#define LTC2983_TEMP_RES_END_REG 0x005F
+#define LTC2983_GLOBAL_CONFIG_REG 0x00F0
+#define LTC2983_MULT_CHANNEL_START_REG 0x00F4
+#define LTC2983_MULT_CHANNEL_END_REG 0x00F7
+#define LTC2983_MUX_CONFIG_REG 0x00FF
+#define LTC2983_CHAN_ASSIGN_START_REG 0x0200
+#define LTC2983_CHAN_ASSIGN_END_REG 0x024F
+#define LTC2983_CUST_SENS_TBL_START_REG 0x0250
+#define LTC2983_CUST_SENS_TBL_END_REG 0x03CF
+
+#define LTC2983_DIFFERENTIAL_CHAN_MIN 2
+#define LTC2983_MAX_CHANNELS_NR 20
+#define LTC2983_MIN_CHANNELS_NR 1
+#define LTC2983_SLEEP 0x97
+#define LTC2983_CUSTOM_STEINHART_SIZE 24
+#define LTC2983_CUSTOM_SENSOR_ENTRY_SZ 6
+#define LTC2983_CUSTOM_STEINHART_ENTRY_SZ 4
+
+#define LTC2983_CHAN_START_ADDR(chan) \
+ (((chan - 1) * 4) + LTC2983_CHAN_ASSIGN_START_REG)
+#define LTC2983_CHAN_RES_ADDR(chan) \
+ (((chan - 1) * 4) + LTC2983_TEMP_RES_START_REG)
+#define LTC2983_THERMOCOUPLE_DIFF_MASK BIT(3)
+#define LTC2983_THERMOCOUPLE_SGL(x) \
+ FIELD_PREP(LTC2983_THERMOCOUPLE_DIFF_MASK, x)
+#define LTC2983_THERMOCOUPLE_OC_CURR_MASK GENMASK(1, 0)
+#define LTC2983_THERMOCOUPLE_OC_CURR(x) \
+ FIELD_PREP(LTC2983_THERMOCOUPLE_OC_CURR_MASK, x)
+#define LTC2983_THERMOCOUPLE_OC_CHECK_MASK BIT(2)
+#define LTC2983_THERMOCOUPLE_OC_CHECK(x) \
+ FIELD_PREP(LTC2983_THERMOCOUPLE_OC_CHECK_MASK, x)
+
+#define LTC2983_THERMISTOR_DIFF_MASK BIT(2)
+#define LTC2983_THERMISTOR_SGL(x) \
+ FIELD_PREP(LTC2983_THERMISTOR_DIFF_MASK, x)
+#define LTC2983_THERMISTOR_R_SHARE_MASK BIT(1)
+#define LTC2983_THERMISTOR_R_SHARE(x) \
+ FIELD_PREP(LTC2983_THERMISTOR_R_SHARE_MASK, x)
+#define LTC2983_THERMISTOR_C_ROTATE_MASK BIT(0)
+#define LTC2983_THERMISTOR_C_ROTATE(x) \
+ FIELD_PREP(LTC2983_THERMISTOR_C_ROTATE_MASK, x)
+
+#define LTC2983_DIODE_DIFF_MASK BIT(2)
+#define LTC2983_DIODE_SGL(x) \
+ FIELD_PREP(LTC2983_DIODE_DIFF_MASK, x)
+#define LTC2983_DIODE_3_CONV_CYCLE_MASK BIT(1)
+#define LTC2983_DIODE_3_CONV_CYCLE(x) \
+ FIELD_PREP(LTC2983_DIODE_3_CONV_CYCLE_MASK, x)
+#define LTC2983_DIODE_AVERAGE_ON_MASK BIT(0)
+#define LTC2983_DIODE_AVERAGE_ON(x) \
+ FIELD_PREP(LTC2983_DIODE_AVERAGE_ON_MASK, x)
+
+#define LTC2983_RTD_4_WIRE_MASK BIT(3)
+#define LTC2983_RTD_ROTATION_MASK BIT(1)
+#define LTC2983_RTD_C_ROTATE(x) \
+ FIELD_PREP(LTC2983_RTD_ROTATION_MASK, x)
+#define LTC2983_RTD_KELVIN_R_SENSE_MASK GENMASK(3, 2)
+#define LTC2983_RTD_N_WIRES_MASK GENMASK(3, 2)
+#define LTC2983_RTD_N_WIRES(x) \
+ FIELD_PREP(LTC2983_RTD_N_WIRES_MASK, x)
+#define LTC2983_RTD_R_SHARE_MASK BIT(0)
+#define LTC2983_RTD_R_SHARE(x) \
+ FIELD_PREP(LTC2983_RTD_R_SHARE_MASK, 1)
+
+#define LTC2983_COMMON_HARD_FAULT_MASK GENMASK(31, 30)
+#define LTC2983_COMMON_SOFT_FAULT_MASK GENMASK(27, 25)
+
+#define LTC2983_STATUS_START_MASK BIT(7)
+#define LTC2983_STATUS_START(x) FIELD_PREP(LTC2983_STATUS_START_MASK, x)
+
+#define LTC2983_STATUS_CHAN_SEL_MASK GENMASK(4, 0)
+#define LTC2983_STATUS_CHAN_SEL(x) \
+ FIELD_PREP(LTC2983_STATUS_CHAN_SEL_MASK, x)
+
+#define LTC2983_TEMP_UNITS_MASK BIT(2)
+#define LTC2983_TEMP_UNITS(x) FIELD_PREP(LTC2983_TEMP_UNITS_MASK, x)
+
+#define LTC2983_NOTCH_FREQ_MASK GENMASK(1, 0)
+#define LTC2983_NOTCH_FREQ(x) FIELD_PREP(LTC2983_NOTCH_FREQ_MASK, x)
+
+#define LTC2983_RES_VALID_MASK BIT(24)
+#define LTC2983_DATA_MASK GENMASK(23, 0)
+#define LTC2983_DATA_SIGN_BIT 23
+
+#define LTC2983_CHAN_TYPE_MASK GENMASK(31, 27)
+#define LTC2983_CHAN_TYPE(x) FIELD_PREP(LTC2983_CHAN_TYPE_MASK, x)
+
+/* cold junction for thermocouples and rsense for rtd's and thermistor's */
+#define LTC2983_CHAN_ASSIGN_MASK GENMASK(26, 22)
+#define LTC2983_CHAN_ASSIGN(x) FIELD_PREP(LTC2983_CHAN_ASSIGN_MASK, x)
+
+#define LTC2983_CUSTOM_LEN_MASK GENMASK(5, 0)
+#define LTC2983_CUSTOM_LEN(x) FIELD_PREP(LTC2983_CUSTOM_LEN_MASK, x)
+
+#define LTC2983_CUSTOM_ADDR_MASK GENMASK(11, 6)
+#define LTC2983_CUSTOM_ADDR(x) FIELD_PREP(LTC2983_CUSTOM_ADDR_MASK, x)
+
+#define LTC2983_THERMOCOUPLE_CFG_MASK GENMASK(21, 18)
+#define LTC2983_THERMOCOUPLE_CFG(x) \
+ FIELD_PREP(LTC2983_THERMOCOUPLE_CFG_MASK, x)
+#define LTC2983_THERMOCOUPLE_HARD_FAULT_MASK GENMASK(31, 29)
+#define LTC2983_THERMOCOUPLE_SOFT_FAULT_MASK GENMASK(28, 25)
+
+#define LTC2983_RTD_CFG_MASK GENMASK(21, 18)
+#define LTC2983_RTD_CFG(x) FIELD_PREP(LTC2983_RTD_CFG_MASK, x)
+#define LTC2983_RTD_EXC_CURRENT_MASK GENMASK(17, 14)
+#define LTC2983_RTD_EXC_CURRENT(x) \
+ FIELD_PREP(LTC2983_RTD_EXC_CURRENT_MASK, x)
+#define LTC2983_RTD_CURVE_MASK GENMASK(13, 12)
+#define LTC2983_RTD_CURVE(x) FIELD_PREP(LTC2983_RTD_CURVE_MASK, x)
+
+#define LTC2983_THERMISTOR_CFG_MASK GENMASK(21, 19)
+#define LTC2983_THERMISTOR_CFG(x) \
+ FIELD_PREP(LTC2983_THERMISTOR_CFG_MASK, x)
+#define LTC2983_THERMISTOR_EXC_CURRENT_MASK GENMASK(18, 15)
+#define LTC2983_THERMISTOR_EXC_CURRENT(x) \
+ FIELD_PREP(LTC2983_THERMISTOR_EXC_CURRENT_MASK, x)
+
+#define LTC2983_DIODE_CFG_MASK GENMASK(26, 24)
+#define LTC2983_DIODE_CFG(x) FIELD_PREP(LTC2983_DIODE_CFG_MASK, x)
+#define LTC2983_DIODE_EXC_CURRENT_MASK GENMASK(23, 22)
+#define LTC2983_DIODE_EXC_CURRENT(x) \
+ FIELD_PREP(LTC2983_DIODE_EXC_CURRENT_MASK, x)
+#define LTC2983_DIODE_IDEAL_FACTOR_MASK GENMASK(21, 0)
+#define LTC2983_DIODE_IDEAL_FACTOR(x) \
+ FIELD_PREP(LTC2983_DIODE_IDEAL_FACTOR_MASK, x)
+
+#define LTC2983_R_SENSE_VAL_MASK GENMASK(26, 0)
+#define LTC2983_R_SENSE_VAL(x) FIELD_PREP(LTC2983_R_SENSE_VAL_MASK, x)
+
+#define LTC2983_ADC_SINGLE_ENDED_MASK BIT(26)
+#define LTC2983_ADC_SINGLE_ENDED(x) \
+ FIELD_PREP(LTC2983_ADC_SINGLE_ENDED_MASK, x)
+
+enum {
+ LTC2983_SENSOR_THERMOCOUPLE = 1,
+ LTC2983_SENSOR_THERMOCOUPLE_CUSTOM = 9,
+ LTC2983_SENSOR_RTD = 10,
+ LTC2983_SENSOR_RTD_CUSTOM = 18,
+ LTC2983_SENSOR_THERMISTOR = 19,
+ LTC2983_SENSOR_THERMISTOR_STEINHART = 26,
+ LTC2983_SENSOR_THERMISTOR_CUSTOM = 27,
+ LTC2983_SENSOR_DIODE = 28,
+ LTC2983_SENSOR_SENSE_RESISTOR = 29,
+ LTC2983_SENSOR_DIRECT_ADC = 30,
+};
+
+#define to_thermocouple(_sensor) \
+ container_of(_sensor, struct ltc2983_thermocouple, sensor)
+
+#define to_rtd(_sensor) \
+ container_of(_sensor, struct ltc2983_rtd, sensor)
+
+#define to_thermistor(_sensor) \
+ container_of(_sensor, struct ltc2983_thermistor, sensor)
+
+#define to_diode(_sensor) \
+ container_of(_sensor, struct ltc2983_diode, sensor)
+
+#define to_rsense(_sensor) \
+ container_of(_sensor, struct ltc2983_rsense, sensor)
+
+#define to_adc(_sensor) \
+ container_of(_sensor, struct ltc2983_adc, sensor)
+
+struct ltc2983_data {
+ struct regmap *regmap;
+ struct spi_device *spi;
+ struct mutex lock;
+ struct completion completion;
+ struct iio_chan_spec *iio_chan;
+ struct ltc2983_sensor **sensors;
+ u32 mux_delay_config;
+ u32 filter_notch_freq;
+ u16 custom_table_size;
+ u8 num_channels;
+ u8 iio_channels;
+ /*
+ * DMA (thus cache coherency maintenance) requires the
+ * transfer buffers to live in their own cache lines.
+ * Holds the converted temperature
+ */
+ __be32 temp ____cacheline_aligned;
+};
+
+struct ltc2983_sensor {
+ int (*fault_handler)(const struct ltc2983_data *st, const u32 result);
+ int (*assign_chan)(struct ltc2983_data *st,
+ const struct ltc2983_sensor *sensor);
+ /* specifies the sensor channel */
+ u32 chan;
+ /* sensor type */
+ u32 type;
+};
+
+struct ltc2983_custom_sensor {
+ /* raw table sensor data */
+ u8 *table;
+ size_t size;
+ /* address offset */
+ s8 offset;
+ bool is_steinhart;
+};
+
+struct ltc2983_thermocouple {
+ struct ltc2983_sensor sensor;
+ struct ltc2983_custom_sensor *custom;
+ u32 sensor_config;
+ u32 cold_junction_chan;
+};
+
+struct ltc2983_rtd {
+ struct ltc2983_sensor sensor;
+ struct ltc2983_custom_sensor *custom;
+ u32 sensor_config;
+ u32 r_sense_chan;
+ u32 excitation_current;
+ u32 rtd_curve;
+};
+
+struct ltc2983_thermistor {
+ struct ltc2983_sensor sensor;
+ struct ltc2983_custom_sensor *custom;
+ u32 sensor_config;
+ u32 r_sense_chan;
+ u32 excitation_current;
+};
+
+struct ltc2983_diode {
+ struct ltc2983_sensor sensor;
+ u32 sensor_config;
+ u32 excitation_current;
+ u32 ideal_factor_value;
+};
+
+struct ltc2983_rsense {
+ struct ltc2983_sensor sensor;
+ u32 r_sense_val;
+};
+
+struct ltc2983_adc {
+ struct ltc2983_sensor sensor;
+ bool single_ended;
+};
+
+/*
+ * Convert to Q format numbers. These number's are integers where
+ * the number of integer and fractional bits are specified. The resolution
+ * is given by 1/@resolution and tell us the number of fractional bits. For
+ * instance a resolution of 2^-10 means we have 10 fractional bits.
+ */
+static u32 __convert_to_raw(const u64 val, const u32 resolution)
+{
+ u64 __res = val * resolution;
+
+ /* all values are multiplied by 1000000 to remove the fraction */
+ do_div(__res, 1000000);
+
+ return __res;
+}
+
+static u32 __convert_to_raw_sign(const u64 val, const u32 resolution)
+{
+ s64 __res = -(s32)val;
+
+ __res = __convert_to_raw(__res, resolution);
+
+ return (u32)-__res;
+}
+
+static int __ltc2983_fault_handler(const struct ltc2983_data *st,
+ const u32 result, const u32 hard_mask,
+ const u32 soft_mask)
+{
+ const struct device *dev = &st->spi->dev;
+
+ if (result & hard_mask) {
+ dev_err(dev, "Invalid conversion: Sensor HARD fault\n");
+ return -EIO;
+ } else if (result & soft_mask) {
+ /* just print a warning */
+ dev_warn(dev, "Suspicious conversion: Sensor SOFT fault\n");
+ }
+
+ return 0;
+}
+
+static int __ltc2983_chan_assign_common(const struct ltc2983_data *st,
+ const struct ltc2983_sensor *sensor,
+ u32 chan_val)
+{
+ u32 reg = LTC2983_CHAN_START_ADDR(sensor->chan);
+ __be32 __chan_val;
+
+ chan_val |= LTC2983_CHAN_TYPE(sensor->type);
+ dev_dbg(&st->spi->dev, "Assign reg:0x%04X, val:0x%08X\n", reg,
+ chan_val);
+ __chan_val = cpu_to_be32(chan_val);
+ return regmap_bulk_write(st->regmap, reg, &__chan_val,
+ sizeof(__chan_val));
+}
+
+static int __ltc2983_chan_custom_sensor_assign(struct ltc2983_data *st,
+ struct ltc2983_custom_sensor *custom,
+ u32 *chan_val)
+{
+ u32 reg;
+ u8 mult = custom->is_steinhart ? LTC2983_CUSTOM_STEINHART_ENTRY_SZ :
+ LTC2983_CUSTOM_SENSOR_ENTRY_SZ;
+ const struct device *dev = &st->spi->dev;
+ /*
+ * custom->size holds the raw size of the table. However, when
+ * configuring the sensor channel, we must write the number of
+ * entries of the table minus 1. For steinhart sensors 0 is written
+ * since the size is constant!
+ */
+ const u8 len = custom->is_steinhart ? 0 :
+ (custom->size / LTC2983_CUSTOM_SENSOR_ENTRY_SZ) - 1;
+ /*
+ * Check if the offset was assigned already. It should be for steinhart
+ * sensors. When coming from sleep, it should be assigned for all.
+ */
+ if (custom->offset < 0) {
+ /*
+ * This needs to be done again here because, from the moment
+ * when this test was done (successfully) for this custom
+ * sensor, a steinhart sensor might have been added changing
+ * custom_table_size...
+ */
+ if (st->custom_table_size + custom->size >
+ (LTC2983_CUST_SENS_TBL_END_REG -
+ LTC2983_CUST_SENS_TBL_START_REG) + 1) {
+ dev_err(dev,
+ "Not space left(%d) for new custom sensor(%zu)",
+ st->custom_table_size,
+ custom->size);
+ return -EINVAL;
+ }
+
+ custom->offset = st->custom_table_size /
+ LTC2983_CUSTOM_SENSOR_ENTRY_SZ;
+ st->custom_table_size += custom->size;
+ }
+
+ reg = (custom->offset * mult) + LTC2983_CUST_SENS_TBL_START_REG;
+
+ *chan_val |= LTC2983_CUSTOM_LEN(len);
+ *chan_val |= LTC2983_CUSTOM_ADDR(custom->offset);
+ dev_dbg(dev, "Assign custom sensor, reg:0x%04X, off:%d, sz:%zu",
+ reg, custom->offset,
+ custom->size);
+ /* write custom sensor table */
+ return regmap_bulk_write(st->regmap, reg, custom->table, custom->size);
+}
+
+static struct ltc2983_custom_sensor *__ltc2983_custom_sensor_new(
+ struct ltc2983_data *st,
+ const struct device_node *np,
+ const char *propname,
+ const bool is_steinhart,
+ const u32 resolution,
+ const bool has_signed)
+{
+ struct ltc2983_custom_sensor *new_custom;
+ u8 index, n_entries, tbl = 0;
+ struct device *dev = &st->spi->dev;
+ /*
+ * For custom steinhart, the full u32 is taken. For all the others
+ * the MSB is discarded.
+ */
+ const u8 n_size = (is_steinhart == true) ? 4 : 3;
+ const u8 e_size = (is_steinhart == true) ? sizeof(u32) : sizeof(u64);
+
+ n_entries = of_property_count_elems_of_size(np, propname, e_size);
+ /* n_entries must be an even number */
+ if (!n_entries || (n_entries % 2) != 0) {
+ dev_err(dev, "Number of entries either 0 or not even\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ new_custom = devm_kzalloc(dev, sizeof(*new_custom), GFP_KERNEL);
+ if (!new_custom)
+ return ERR_PTR(-ENOMEM);
+
+ new_custom->size = n_entries * n_size;
+ /* check Steinhart size */
+ if (is_steinhart && new_custom->size != LTC2983_CUSTOM_STEINHART_SIZE) {
+ dev_err(dev, "Steinhart sensors size(%zu) must be 24",
+ new_custom->size);
+ return ERR_PTR(-EINVAL);
+ }
+ /* Check space on the table. */
+ if (st->custom_table_size + new_custom->size >
+ (LTC2983_CUST_SENS_TBL_END_REG -
+ LTC2983_CUST_SENS_TBL_START_REG) + 1) {
+ dev_err(dev, "No space left(%d) for new custom sensor(%zu)",
+ st->custom_table_size, new_custom->size);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* allocate the table */
+ new_custom->table = devm_kzalloc(dev, new_custom->size, GFP_KERNEL);
+ if (!new_custom->table)
+ return ERR_PTR(-ENOMEM);
+
+ for (index = 0; index < n_entries; index++) {
+ u64 temp = 0, j;
+ /*
+ * Steinhart sensors are configured with raw values in the
+ * devicetree. For the other sensors we must convert the
+ * value to raw. The odd index's correspond to temperarures
+ * and always have 1/1024 of resolution. Temperatures also
+ * come in kelvin, so signed values is not possible
+ */
+ if (!is_steinhart) {
+ of_property_read_u64_index(np, propname, index, &temp);
+
+ if ((index % 2) != 0)
+ temp = __convert_to_raw(temp, 1024);
+ else if (has_signed && (s64)temp < 0)
+ temp = __convert_to_raw_sign(temp, resolution);
+ else
+ temp = __convert_to_raw(temp, resolution);
+ } else {
+ of_property_read_u32_index(np, propname, index,
+ (u32 *)&temp);
+ }
+
+ for (j = 0; j < n_size; j++)
+ new_custom->table[tbl++] =
+ temp >> (8 * (n_size - j - 1));
+ }
+
+ new_custom->is_steinhart = is_steinhart;
+ /*
+ * This is done to first add all the steinhart sensors to the table,
+ * in order to maximize the table usage. If we mix adding steinhart
+ * with the other sensors, we might have to do some roundup to make
+ * sure that sensor_addr - 0x250(start address) is a multiple of 4
+ * (for steinhart), and a multiple of 6 for all the other sensors.
+ * Since we have const 24 bytes for steinhart sensors and 24 is
+ * also a multiple of 6, we guarantee that the first non-steinhart
+ * sensor will sit in a correct address without the need of filling
+ * addresses.
+ */
+ if (is_steinhart) {
+ new_custom->offset = st->custom_table_size /
+ LTC2983_CUSTOM_STEINHART_ENTRY_SZ;
+ st->custom_table_size += new_custom->size;
+ } else {
+ /* mark as unset. This is checked later on the assign phase */
+ new_custom->offset = -1;
+ }
+
+ return new_custom;
+}
+
+static int ltc2983_thermocouple_fault_handler(const struct ltc2983_data *st,
+ const u32 result)
+{
+ return __ltc2983_fault_handler(st, result,
+ LTC2983_THERMOCOUPLE_HARD_FAULT_MASK,
+ LTC2983_THERMOCOUPLE_SOFT_FAULT_MASK);
+}
+
+static int ltc2983_common_fault_handler(const struct ltc2983_data *st,
+ const u32 result)
+{
+ return __ltc2983_fault_handler(st, result,
+ LTC2983_COMMON_HARD_FAULT_MASK,
+ LTC2983_COMMON_SOFT_FAULT_MASK);
+}
+
+static int ltc2983_thermocouple_assign_chan(struct ltc2983_data *st,
+ const struct ltc2983_sensor *sensor)
+{
+ struct ltc2983_thermocouple *thermo = to_thermocouple(sensor);
+ u32 chan_val;
+
+ chan_val = LTC2983_CHAN_ASSIGN(thermo->cold_junction_chan);
+ chan_val |= LTC2983_THERMOCOUPLE_CFG(thermo->sensor_config);
+
+ if (thermo->custom) {
+ int ret;
+
+ ret = __ltc2983_chan_custom_sensor_assign(st, thermo->custom,
+ &chan_val);
+ if (ret)
+ return ret;
+ }
+ return __ltc2983_chan_assign_common(st, sensor, chan_val);
+}
+
+static int ltc2983_rtd_assign_chan(struct ltc2983_data *st,
+ const struct ltc2983_sensor *sensor)
+{
+ struct ltc2983_rtd *rtd = to_rtd(sensor);
+ u32 chan_val;
+
+ chan_val = LTC2983_CHAN_ASSIGN(rtd->r_sense_chan);
+ chan_val |= LTC2983_RTD_CFG(rtd->sensor_config);
+ chan_val |= LTC2983_RTD_EXC_CURRENT(rtd->excitation_current);
+ chan_val |= LTC2983_RTD_CURVE(rtd->rtd_curve);
+
+ if (rtd->custom) {
+ int ret;
+
+ ret = __ltc2983_chan_custom_sensor_assign(st, rtd->custom,
+ &chan_val);
+ if (ret)
+ return ret;
+ }
+ return __ltc2983_chan_assign_common(st, sensor, chan_val);
+}
+
+static int ltc2983_thermistor_assign_chan(struct ltc2983_data *st,
+ const struct ltc2983_sensor *sensor)
+{
+ struct ltc2983_thermistor *thermistor = to_thermistor(sensor);
+ u32 chan_val;
+
+ chan_val = LTC2983_CHAN_ASSIGN(thermistor->r_sense_chan);
+ chan_val |= LTC2983_THERMISTOR_CFG(thermistor->sensor_config);
+ chan_val |=
+ LTC2983_THERMISTOR_EXC_CURRENT(thermistor->excitation_current);
+
+ if (thermistor->custom) {
+ int ret;
+
+ ret = __ltc2983_chan_custom_sensor_assign(st,
+ thermistor->custom,
+ &chan_val);
+ if (ret)
+ return ret;
+ }
+ return __ltc2983_chan_assign_common(st, sensor, chan_val);
+}
+
+static int ltc2983_diode_assign_chan(struct ltc2983_data *st,
+ const struct ltc2983_sensor *sensor)
+{
+ struct ltc2983_diode *diode = to_diode(sensor);
+ u32 chan_val;
+
+ chan_val = LTC2983_DIODE_CFG(diode->sensor_config);
+ chan_val |= LTC2983_DIODE_EXC_CURRENT(diode->excitation_current);
+ chan_val |= LTC2983_DIODE_IDEAL_FACTOR(diode->ideal_factor_value);
+
+ return __ltc2983_chan_assign_common(st, sensor, chan_val);
+}
+
+static int ltc2983_r_sense_assign_chan(struct ltc2983_data *st,
+ const struct ltc2983_sensor *sensor)
+{
+ struct ltc2983_rsense *rsense = to_rsense(sensor);
+ u32 chan_val;
+
+ chan_val = LTC2983_R_SENSE_VAL(rsense->r_sense_val);
+
+ return __ltc2983_chan_assign_common(st, sensor, chan_val);
+}
+
+static int ltc2983_adc_assign_chan(struct ltc2983_data *st,
+ const struct ltc2983_sensor *sensor)
+{
+ struct ltc2983_adc *adc = to_adc(sensor);
+ u32 chan_val;
+
+ chan_val = LTC2983_ADC_SINGLE_ENDED(adc->single_ended);
+
+ return __ltc2983_chan_assign_common(st, sensor, chan_val);
+}
+
+static struct ltc2983_sensor *ltc2983_thermocouple_new(
+ const struct device_node *child,
+ struct ltc2983_data *st,
+ const struct ltc2983_sensor *sensor)
+{
+ struct ltc2983_thermocouple *thermo;
+ struct device_node *phandle;
+ u32 oc_current;
+ int ret;
+
+ thermo = devm_kzalloc(&st->spi->dev, sizeof(*thermo), GFP_KERNEL);
+ if (!thermo)
+ return ERR_PTR(-ENOMEM);
+
+ if (of_property_read_bool(child, "adi,single-ended"))
+ thermo->sensor_config = LTC2983_THERMOCOUPLE_SGL(1);
+
+ ret = of_property_read_u32(child, "adi,sensor-oc-current-microamp",
+ &oc_current);
+ if (!ret) {
+ switch (oc_current) {
+ case 10:
+ thermo->sensor_config |=
+ LTC2983_THERMOCOUPLE_OC_CURR(0);
+ break;
+ case 100:
+ thermo->sensor_config |=
+ LTC2983_THERMOCOUPLE_OC_CURR(1);
+ break;
+ case 500:
+ thermo->sensor_config |=
+ LTC2983_THERMOCOUPLE_OC_CURR(2);
+ break;
+ case 1000:
+ thermo->sensor_config |=
+ LTC2983_THERMOCOUPLE_OC_CURR(3);
+ break;
+ default:
+ dev_err(&st->spi->dev,
+ "Invalid open circuit current:%u", oc_current);
+ return ERR_PTR(-EINVAL);
+ }
+
+ thermo->sensor_config |= LTC2983_THERMOCOUPLE_OC_CHECK(1);
+ }
+ /* validate channel index */
+ if (!(thermo->sensor_config & LTC2983_THERMOCOUPLE_DIFF_MASK) &&
+ sensor->chan < LTC2983_DIFFERENTIAL_CHAN_MIN) {
+ dev_err(&st->spi->dev,
+ "Invalid chann:%d for differential thermocouple",
+ sensor->chan);
+ return ERR_PTR(-EINVAL);
+ }
+
+ phandle = of_parse_phandle(child, "adi,cold-junction-handle", 0);
+ if (phandle) {
+ int ret;
+
+ ret = of_property_read_u32(phandle, "reg",
+ &thermo->cold_junction_chan);
+ if (ret) {
+ /*
+ * This would be catched later but we can just return
+ * the error right away.
+ */
+ dev_err(&st->spi->dev, "Property reg must be given\n");
+ of_node_put(phandle);
+ return ERR_PTR(-EINVAL);
+ }
+ }
+
+ /* check custom sensor */
+ if (sensor->type == LTC2983_SENSOR_THERMOCOUPLE_CUSTOM) {
+ const char *propname = "adi,custom-thermocouple";
+
+ thermo->custom = __ltc2983_custom_sensor_new(st, child,
+ propname, false,
+ 16384, true);
+ if (IS_ERR(thermo->custom)) {
+ of_node_put(phandle);
+ return ERR_CAST(thermo->custom);
+ }
+ }
+
+ /* set common parameters */
+ thermo->sensor.fault_handler = ltc2983_thermocouple_fault_handler;
+ thermo->sensor.assign_chan = ltc2983_thermocouple_assign_chan;
+
+ of_node_put(phandle);
+ return &thermo->sensor;
+}
+
+static struct ltc2983_sensor *ltc2983_rtd_new(const struct device_node *child,
+ struct ltc2983_data *st,
+ const struct ltc2983_sensor *sensor)
+{
+ struct ltc2983_rtd *rtd;
+ int ret = 0;
+ struct device *dev = &st->spi->dev;
+ struct device_node *phandle;
+ u32 excitation_current = 0, n_wires = 0;
+
+ rtd = devm_kzalloc(dev, sizeof(*rtd), GFP_KERNEL);
+ if (!rtd)
+ return ERR_PTR(-ENOMEM);
+
+ phandle = of_parse_phandle(child, "adi,rsense-handle", 0);
+ if (!phandle) {
+ dev_err(dev, "Property adi,rsense-handle missing or invalid");
+ return ERR_PTR(-EINVAL);
+ }
+
+ ret = of_property_read_u32(phandle, "reg", &rtd->r_sense_chan);
+ if (ret) {
+ dev_err(dev, "Property reg must be given\n");
+ goto fail;
+ }
+
+ ret = of_property_read_u32(child, "adi,number-of-wires", &n_wires);
+ if (!ret) {
+ switch (n_wires) {
+ case 2:
+ rtd->sensor_config = LTC2983_RTD_N_WIRES(0);
+ break;
+ case 3:
+ rtd->sensor_config = LTC2983_RTD_N_WIRES(1);
+ break;
+ case 4:
+ rtd->sensor_config = LTC2983_RTD_N_WIRES(2);
+ break;
+ case 5:
+ /* 4 wires, Kelvin Rsense */
+ rtd->sensor_config = LTC2983_RTD_N_WIRES(3);
+ break;
+ default:
+ dev_err(dev, "Invalid number of wires:%u\n", n_wires);
+ ret = -EINVAL;
+ goto fail;
+ }
+ }
+
+ if (of_property_read_bool(child, "adi,rsense-share")) {
+ /* Current rotation is only available with rsense sharing */
+ if (of_property_read_bool(child, "adi,current-rotate")) {
+ if (n_wires == 2 || n_wires == 3) {
+ dev_err(dev,
+ "Rotation not allowed for 2/3 Wire RTDs");
+ ret = -EINVAL;
+ goto fail;
+ }
+ rtd->sensor_config |= LTC2983_RTD_C_ROTATE(1);
+ } else {
+ rtd->sensor_config |= LTC2983_RTD_R_SHARE(1);
+ }
+ }
+ /*
+ * rtd channel indexes are a bit more complicated to validate.
+ * For 4wire RTD with rotation, the channel selection cannot be
+ * >=19 since the chann + 1 is used in this configuration.
+ * For 4wire RTDs with kelvin rsense, the rsense channel cannot be
+ * <=1 since chanel - 1 and channel - 2 are used.
+ */
+ if (rtd->sensor_config & LTC2983_RTD_4_WIRE_MASK) {
+ /* 4-wire */
+ u8 min = LTC2983_DIFFERENTIAL_CHAN_MIN,
+ max = LTC2983_MAX_CHANNELS_NR;
+
+ if (rtd->sensor_config & LTC2983_RTD_ROTATION_MASK)
+ max = LTC2983_MAX_CHANNELS_NR - 1;
+
+ if (((rtd->sensor_config & LTC2983_RTD_KELVIN_R_SENSE_MASK)
+ == LTC2983_RTD_KELVIN_R_SENSE_MASK) &&
+ (rtd->r_sense_chan <= min)) {
+ /* kelvin rsense*/
+ dev_err(dev,
+ "Invalid rsense chann:%d to use in kelvin rsense",
+ rtd->r_sense_chan);
+
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ if (sensor->chan < min || sensor->chan > max) {
+ dev_err(dev, "Invalid chann:%d for the rtd config",
+ sensor->chan);
+
+ ret = -EINVAL;
+ goto fail;
+ }
+ } else {
+ /* same as differential case */
+ if (sensor->chan < LTC2983_DIFFERENTIAL_CHAN_MIN) {
+ dev_err(&st->spi->dev,
+ "Invalid chann:%d for RTD", sensor->chan);
+
+ ret = -EINVAL;
+ goto fail;
+ }
+ }
+
+ /* check custom sensor */
+ if (sensor->type == LTC2983_SENSOR_RTD_CUSTOM) {
+ rtd->custom = __ltc2983_custom_sensor_new(st, child,
+ "adi,custom-rtd",
+ false, 2048, false);
+ if (IS_ERR(rtd->custom)) {
+ of_node_put(phandle);
+ return ERR_CAST(rtd->custom);
+ }
+ }
+
+ /* set common parameters */
+ rtd->sensor.fault_handler = ltc2983_common_fault_handler;
+ rtd->sensor.assign_chan = ltc2983_rtd_assign_chan;
+
+ ret = of_property_read_u32(child, "adi,excitation-current-microamp",
+ &excitation_current);
+ if (ret) {
+ /* default to 5uA */
+ rtd->excitation_current = 1;
+ } else {
+ switch (excitation_current) {
+ case 5:
+ rtd->excitation_current = 0x01;
+ break;
+ case 10:
+ rtd->excitation_current = 0x02;
+ break;
+ case 25:
+ rtd->excitation_current = 0x03;
+ break;
+ case 50:
+ rtd->excitation_current = 0x04;
+ break;
+ case 100:
+ rtd->excitation_current = 0x05;
+ break;
+ case 250:
+ rtd->excitation_current = 0x06;
+ break;
+ case 500:
+ rtd->excitation_current = 0x07;
+ break;
+ case 1000:
+ rtd->excitation_current = 0x08;
+ break;
+ default:
+ dev_err(&st->spi->dev,
+ "Invalid value for excitation current(%u)",
+ excitation_current);
+ ret = -EINVAL;
+ goto fail;
+ }
+ }
+
+ of_property_read_u32(child, "adi,rtd-curve", &rtd->rtd_curve);
+
+ of_node_put(phandle);
+ return &rtd->sensor;
+fail:
+ of_node_put(phandle);
+ return ERR_PTR(ret);
+}
+
+static struct ltc2983_sensor *ltc2983_thermistor_new(
+ const struct device_node *child,
+ struct ltc2983_data *st,
+ const struct ltc2983_sensor *sensor)
+{
+ struct ltc2983_thermistor *thermistor;
+ struct device *dev = &st->spi->dev;
+ struct device_node *phandle;
+ u32 excitation_current = 0;
+ int ret = 0;
+
+ thermistor = devm_kzalloc(dev, sizeof(*thermistor), GFP_KERNEL);
+ if (!thermistor)
+ return ERR_PTR(-ENOMEM);
+
+ phandle = of_parse_phandle(child, "adi,rsense-handle", 0);
+ if (!phandle) {
+ dev_err(dev, "Property adi,rsense-handle missing or invalid");
+ return ERR_PTR(-EINVAL);
+ }
+
+ ret = of_property_read_u32(phandle, "reg", &thermistor->r_sense_chan);
+ if (ret) {
+ dev_err(dev, "rsense channel must be configured...\n");
+ goto fail;
+ }
+
+ if (of_property_read_bool(child, "adi,single-ended")) {
+ thermistor->sensor_config = LTC2983_THERMISTOR_SGL(1);
+ } else if (of_property_read_bool(child, "adi,rsense-share")) {
+ /* rotation is only possible if sharing rsense */
+ if (of_property_read_bool(child, "adi,current-rotate"))
+ thermistor->sensor_config =
+ LTC2983_THERMISTOR_C_ROTATE(1);
+ else
+ thermistor->sensor_config =
+ LTC2983_THERMISTOR_R_SHARE(1);
+ }
+ /* validate channel index */
+ if (!(thermistor->sensor_config & LTC2983_THERMISTOR_DIFF_MASK) &&
+ sensor->chan < LTC2983_DIFFERENTIAL_CHAN_MIN) {
+ dev_err(&st->spi->dev,
+ "Invalid chann:%d for differential thermistor",
+ sensor->chan);
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ /* check custom sensor */
+ if (sensor->type >= LTC2983_SENSOR_THERMISTOR_STEINHART) {
+ bool steinhart = false;
+ const char *propname;
+
+ if (sensor->type == LTC2983_SENSOR_THERMISTOR_STEINHART) {
+ steinhart = true;
+ propname = "adi,custom-steinhart";
+ } else {
+ propname = "adi,custom-thermistor";
+ }
+
+ thermistor->custom = __ltc2983_custom_sensor_new(st, child,
+ propname,
+ steinhart,
+ 64, false);
+ if (IS_ERR(thermistor->custom)) {
+ of_node_put(phandle);
+ return ERR_CAST(thermistor->custom);
+ }
+ }
+ /* set common parameters */
+ thermistor->sensor.fault_handler = ltc2983_common_fault_handler;
+ thermistor->sensor.assign_chan = ltc2983_thermistor_assign_chan;
+
+ ret = of_property_read_u32(child, "adi,excitation-current-nanoamp",
+ &excitation_current);
+ if (ret) {
+ /* Auto range is not allowed for custom sensors */
+ if (sensor->type >= LTC2983_SENSOR_THERMISTOR_STEINHART)
+ /* default to 1uA */
+ thermistor->excitation_current = 0x03;
+ else
+ /* default to auto-range */
+ thermistor->excitation_current = 0x0c;
+ } else {
+ switch (excitation_current) {
+ case 0:
+ /* auto range */
+ if (sensor->type >=
+ LTC2983_SENSOR_THERMISTOR_STEINHART) {
+ dev_err(&st->spi->dev,
+ "Auto Range not allowed for custom sensors\n");
+ ret = -EINVAL;
+ goto fail;
+ }
+ thermistor->excitation_current = 0x0c;
+ break;
+ case 250:
+ thermistor->excitation_current = 0x01;
+ break;
+ case 500:
+ thermistor->excitation_current = 0x02;
+ break;
+ case 1000:
+ thermistor->excitation_current = 0x03;
+ break;
+ case 5000:
+ thermistor->excitation_current = 0x04;
+ break;
+ case 10000:
+ thermistor->excitation_current = 0x05;
+ break;
+ case 25000:
+ thermistor->excitation_current = 0x06;
+ break;
+ case 50000:
+ thermistor->excitation_current = 0x07;
+ break;
+ case 100000:
+ thermistor->excitation_current = 0x08;
+ break;
+ case 250000:
+ thermistor->excitation_current = 0x09;
+ break;
+ case 500000:
+ thermistor->excitation_current = 0x0a;
+ break;
+ case 1000000:
+ thermistor->excitation_current = 0x0b;
+ break;
+ default:
+ dev_err(&st->spi->dev,
+ "Invalid value for excitation current(%u)",
+ excitation_current);
+ ret = -EINVAL;
+ goto fail;
+ }
+ }
+
+ of_node_put(phandle);
+ return &thermistor->sensor;
+fail:
+ of_node_put(phandle);
+ return ERR_PTR(ret);
+}
+
+static struct ltc2983_sensor *ltc2983_diode_new(
+ const struct device_node *child,
+ const struct ltc2983_data *st,
+ const struct ltc2983_sensor *sensor)
+{
+ struct ltc2983_diode *diode;
+ u32 temp = 0, excitation_current = 0;
+ int ret;
+
+ diode = devm_kzalloc(&st->spi->dev, sizeof(*diode), GFP_KERNEL);
+ if (!diode)
+ return ERR_PTR(-ENOMEM);
+
+ if (of_property_read_bool(child, "adi,single-ended"))
+ diode->sensor_config = LTC2983_DIODE_SGL(1);
+
+ if (of_property_read_bool(child, "adi,three-conversion-cycles"))
+ diode->sensor_config |= LTC2983_DIODE_3_CONV_CYCLE(1);
+
+ if (of_property_read_bool(child, "adi,average-on"))
+ diode->sensor_config |= LTC2983_DIODE_AVERAGE_ON(1);
+
+ /* validate channel index */
+ if (!(diode->sensor_config & LTC2983_DIODE_DIFF_MASK) &&
+ sensor->chan < LTC2983_DIFFERENTIAL_CHAN_MIN) {
+ dev_err(&st->spi->dev,
+ "Invalid chann:%d for differential thermistor",
+ sensor->chan);
+ return ERR_PTR(-EINVAL);
+ }
+ /* set common parameters */
+ diode->sensor.fault_handler = ltc2983_common_fault_handler;
+ diode->sensor.assign_chan = ltc2983_diode_assign_chan;
+
+ ret = of_property_read_u32(child, "adi,excitation-current-microamp",
+ &excitation_current);
+ if (!ret) {
+ switch (excitation_current) {
+ case 10:
+ diode->excitation_current = 0x00;
+ break;
+ case 20:
+ diode->excitation_current = 0x01;
+ break;
+ case 40:
+ diode->excitation_current = 0x02;
+ break;
+ case 80:
+ diode->excitation_current = 0x03;
+ break;
+ default:
+ dev_err(&st->spi->dev,
+ "Invalid value for excitation current(%u)",
+ excitation_current);
+ return ERR_PTR(-EINVAL);
+ }
+ }
+
+ of_property_read_u32(child, "adi,ideal-factor-value", &temp);
+
+ /* 2^20 resolution */
+ diode->ideal_factor_value = __convert_to_raw(temp, 1048576);
+
+ return &diode->sensor;
+}
+
+static struct ltc2983_sensor *ltc2983_r_sense_new(struct device_node *child,
+ struct ltc2983_data *st,
+ const struct ltc2983_sensor *sensor)
+{
+ struct ltc2983_rsense *rsense;
+ int ret;
+ u32 temp;
+
+ rsense = devm_kzalloc(&st->spi->dev, sizeof(*rsense), GFP_KERNEL);
+ if (!rsense)
+ return ERR_PTR(-ENOMEM);
+
+ /* validate channel index */
+ if (sensor->chan < LTC2983_DIFFERENTIAL_CHAN_MIN) {
+ dev_err(&st->spi->dev, "Invalid chann:%d for r_sense",
+ sensor->chan);
+ return ERR_PTR(-EINVAL);
+ }
+
+ ret = of_property_read_u32(child, "adi,rsense-val-milli-ohms", &temp);
+ if (ret) {
+ dev_err(&st->spi->dev, "Property adi,rsense-val-milli-ohms missing\n");
+ return ERR_PTR(-EINVAL);
+ }
+ /*
+ * Times 1000 because we have milli-ohms and __convert_to_raw
+ * expects scales of 1000000 which are used for all other
+ * properties.
+ * 2^10 resolution
+ */
+ rsense->r_sense_val = __convert_to_raw((u64)temp * 1000, 1024);
+
+ /* set common parameters */
+ rsense->sensor.assign_chan = ltc2983_r_sense_assign_chan;
+
+ return &rsense->sensor;
+}
+
+static struct ltc2983_sensor *ltc2983_adc_new(struct device_node *child,
+ struct ltc2983_data *st,
+ const struct ltc2983_sensor *sensor)
+{
+ struct ltc2983_adc *adc;
+
+ adc = devm_kzalloc(&st->spi->dev, sizeof(*adc), GFP_KERNEL);
+ if (!adc)
+ return ERR_PTR(-ENOMEM);
+
+ if (of_property_read_bool(child, "adi,single-ended"))
+ adc->single_ended = true;
+
+ if (!adc->single_ended &&
+ sensor->chan < LTC2983_DIFFERENTIAL_CHAN_MIN) {
+ dev_err(&st->spi->dev, "Invalid chan:%d for differential adc\n",
+ sensor->chan);
+ return ERR_PTR(-EINVAL);
+ }
+ /* set common parameters */
+ adc->sensor.assign_chan = ltc2983_adc_assign_chan;
+ adc->sensor.fault_handler = ltc2983_common_fault_handler;
+
+ return &adc->sensor;
+}
+
+static int ltc2983_chan_read(struct ltc2983_data *st,
+ const struct ltc2983_sensor *sensor, int *val)
+{
+ u32 start_conversion = 0;
+ int ret;
+ unsigned long time;
+
+ start_conversion = LTC2983_STATUS_START(true);
+ start_conversion |= LTC2983_STATUS_CHAN_SEL(sensor->chan);
+ dev_dbg(&st->spi->dev, "Start conversion on chan:%d, status:%02X\n",
+ sensor->chan, start_conversion);
+ /* start conversion */
+ ret = regmap_write(st->regmap, LTC2983_STATUS_REG, start_conversion);
+ if (ret)
+ return ret;
+
+ reinit_completion(&st->completion);
+ /*
+ * wait for conversion to complete.
+ * 300 ms should be more than enough to complete the conversion.
+ * Depending on the sensor configuration, there are 2/3 conversions
+ * cycles of 82ms.
+ */
+ time = wait_for_completion_timeout(&st->completion,
+ msecs_to_jiffies(300));
+ if (!time) {
+ dev_warn(&st->spi->dev, "Conversion timed out\n");
+ return -ETIMEDOUT;
+ }
+
+ /* read the converted data */
+ ret = regmap_bulk_read(st->regmap, LTC2983_CHAN_RES_ADDR(sensor->chan),
+ &st->temp, sizeof(st->temp));
+ if (ret)
+ return ret;
+
+ *val = __be32_to_cpu(st->temp);
+
+ if (!(LTC2983_RES_VALID_MASK & *val)) {
+ dev_err(&st->spi->dev, "Invalid conversion detected\n");
+ return -EIO;
+ }
+
+ ret = sensor->fault_handler(st, *val);
+ if (ret)
+ return ret;
+
+ *val = sign_extend32((*val) & LTC2983_DATA_MASK, LTC2983_DATA_SIGN_BIT);
+ return 0;
+}
+
+static int ltc2983_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct ltc2983_data *st = iio_priv(indio_dev);
+ int ret;
+
+ /* sanity check */
+ if (chan->address >= st->num_channels) {
+ dev_err(&st->spi->dev, "Invalid chan address:%ld",
+ chan->address);
+ return -EINVAL;
+ }
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ mutex_lock(&st->lock);
+ ret = ltc2983_chan_read(st, st->sensors[chan->address], val);
+ mutex_unlock(&st->lock);
+ return ret ?: IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_TEMP:
+ /* value in milli degrees */
+ *val = 1000;
+ /* 2^10 */
+ *val2 = 1024;
+ return IIO_VAL_FRACTIONAL;
+ case IIO_VOLTAGE:
+ /* value in millivolt */
+ *val = 1000;
+ /* 2^21 */
+ *val2 = 2097152;
+ return IIO_VAL_FRACTIONAL;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int ltc2983_reg_access(struct iio_dev *indio_dev,
+ unsigned int reg,
+ unsigned int writeval,
+ unsigned int *readval)
+{
+ struct ltc2983_data *st = iio_priv(indio_dev);
+
+ if (readval)
+ return regmap_read(st->regmap, reg, readval);
+ else
+ return regmap_write(st->regmap, reg, writeval);
+}
+
+static irqreturn_t ltc2983_irq_handler(int irq, void *data)
+{
+ struct ltc2983_data *st = data;
+
+ complete(&st->completion);
+ return IRQ_HANDLED;
+}
+
+#define LTC2983_CHAN(__type, index, __address) ({ \
+ struct iio_chan_spec __chan = { \
+ .type = __type, \
+ .indexed = 1, \
+ .channel = index, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .address = __address, \
+ }; \
+ __chan; \
+})
+
+static int ltc2983_parse_dt(struct ltc2983_data *st)
+{
+ struct device_node *child;
+ struct device *dev = &st->spi->dev;
+ int ret = 0, chan = 0, channel_avail_mask = 0;
+
+ of_property_read_u32(dev->of_node, "adi,mux-delay-config-us",
+ &st->mux_delay_config);
+
+ of_property_read_u32(dev->of_node, "adi,filter-notch-freq",
+ &st->filter_notch_freq);
+
+ st->num_channels = of_get_available_child_count(dev->of_node);
+ st->sensors = devm_kcalloc(dev, st->num_channels, sizeof(*st->sensors),
+ GFP_KERNEL);
+ if (!st->sensors)
+ return -ENOMEM;
+
+ st->iio_channels = st->num_channels;
+ for_each_available_child_of_node(dev->of_node, child) {
+ struct ltc2983_sensor sensor;
+
+ ret = of_property_read_u32(child, "reg", &sensor.chan);
+ if (ret) {
+ dev_err(dev, "reg property must given for child nodes\n");
+ return ret;
+ }
+
+ /* check if we have a valid channel */
+ if (sensor.chan < LTC2983_MIN_CHANNELS_NR ||
+ sensor.chan > LTC2983_MAX_CHANNELS_NR) {
+ dev_err(dev,
+ "chan:%d must be from 1 to 20\n", sensor.chan);
+ return -EINVAL;
+ } else if (channel_avail_mask & BIT(sensor.chan)) {
+ dev_err(dev, "chan:%d already in use\n", sensor.chan);
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32(child, "adi,sensor-type",
+ &sensor.type);
+ if (ret) {
+ dev_err(dev,
+ "adi,sensor-type property must given for child nodes\n");
+ return ret;
+ }
+
+ dev_dbg(dev, "Create new sensor, type %u, chann %u",
+ sensor.type,
+ sensor.chan);
+
+ if (sensor.type >= LTC2983_SENSOR_THERMOCOUPLE &&
+ sensor.type <= LTC2983_SENSOR_THERMOCOUPLE_CUSTOM) {
+ st->sensors[chan] = ltc2983_thermocouple_new(child, st,
+ &sensor);
+ } else if (sensor.type >= LTC2983_SENSOR_RTD &&
+ sensor.type <= LTC2983_SENSOR_RTD_CUSTOM) {
+ st->sensors[chan] = ltc2983_rtd_new(child, st, &sensor);
+ } else if (sensor.type >= LTC2983_SENSOR_THERMISTOR &&
+ sensor.type <= LTC2983_SENSOR_THERMISTOR_CUSTOM) {
+ st->sensors[chan] = ltc2983_thermistor_new(child, st,
+ &sensor);
+ } else if (sensor.type == LTC2983_SENSOR_DIODE) {
+ st->sensors[chan] = ltc2983_diode_new(child, st,
+ &sensor);
+ } else if (sensor.type == LTC2983_SENSOR_SENSE_RESISTOR) {
+ st->sensors[chan] = ltc2983_r_sense_new(child, st,
+ &sensor);
+ /* don't add rsense to iio */
+ st->iio_channels--;
+ } else if (sensor.type == LTC2983_SENSOR_DIRECT_ADC) {
+ st->sensors[chan] = ltc2983_adc_new(child, st, &sensor);
+ } else {
+ dev_err(dev, "Unknown sensor type %d\n", sensor.type);
+ return -EINVAL;
+ }
+
+ if (IS_ERR(st->sensors[chan])) {
+ dev_err(dev, "Failed to create sensor %ld",
+ PTR_ERR(st->sensors[chan]));
+ return PTR_ERR(st->sensors[chan]);
+ }
+ /* set generic sensor parameters */
+ st->sensors[chan]->chan = sensor.chan;
+ st->sensors[chan]->type = sensor.type;
+
+ channel_avail_mask |= BIT(sensor.chan);
+ chan++;
+ }
+
+ return 0;
+}
+
+static int ltc2983_setup(struct ltc2983_data *st, bool assign_iio)
+{
+ u32 iio_chan_t = 0, iio_chan_v = 0, chan, iio_idx = 0;
+ int ret;
+ unsigned long time;
+
+ /* make sure the device is up */
+ time = wait_for_completion_timeout(&st->completion,
+ msecs_to_jiffies(250));
+
+ if (!time) {
+ dev_err(&st->spi->dev, "Device startup timed out\n");
+ return -ETIMEDOUT;
+ }
+
+ st->iio_chan = devm_kzalloc(&st->spi->dev,
+ st->iio_channels * sizeof(*st->iio_chan),
+ GFP_KERNEL);
+
+ if (!st->iio_chan)
+ return -ENOMEM;
+
+ ret = regmap_update_bits(st->regmap, LTC2983_GLOBAL_CONFIG_REG,
+ LTC2983_NOTCH_FREQ_MASK,
+ LTC2983_NOTCH_FREQ(st->filter_notch_freq));
+ if (ret)
+ return ret;
+
+ ret = regmap_write(st->regmap, LTC2983_MUX_CONFIG_REG,
+ st->mux_delay_config);
+ if (ret)
+ return ret;
+
+ for (chan = 0; chan < st->num_channels; chan++) {
+ u32 chan_type = 0, *iio_chan;
+
+ ret = st->sensors[chan]->assign_chan(st, st->sensors[chan]);
+ if (ret)
+ return ret;
+ /*
+ * The assign_iio flag is necessary for when the device is
+ * coming out of sleep. In that case, we just need to
+ * re-configure the device channels.
+ * We also don't assign iio channels for rsense.
+ */
+ if (st->sensors[chan]->type == LTC2983_SENSOR_SENSE_RESISTOR ||
+ !assign_iio)
+ continue;
+
+ /* assign iio channel */
+ if (st->sensors[chan]->type != LTC2983_SENSOR_DIRECT_ADC) {
+ chan_type = IIO_TEMP;
+ iio_chan = &iio_chan_t;
+ } else {
+ chan_type = IIO_VOLTAGE;
+ iio_chan = &iio_chan_v;
+ }
+
+ /*
+ * add chan as the iio .address so that, we can directly
+ * reference the sensor given the iio_chan_spec
+ */
+ st->iio_chan[iio_idx++] = LTC2983_CHAN(chan_type, (*iio_chan)++,
+ chan);
+ }
+
+ return 0;
+}
+
+static const struct regmap_range ltc2983_reg_ranges[] = {
+ regmap_reg_range(LTC2983_STATUS_REG, LTC2983_STATUS_REG),
+ regmap_reg_range(LTC2983_TEMP_RES_START_REG, LTC2983_TEMP_RES_END_REG),
+ regmap_reg_range(LTC2983_GLOBAL_CONFIG_REG, LTC2983_GLOBAL_CONFIG_REG),
+ regmap_reg_range(LTC2983_MULT_CHANNEL_START_REG,
+ LTC2983_MULT_CHANNEL_END_REG),
+ regmap_reg_range(LTC2983_MUX_CONFIG_REG, LTC2983_MUX_CONFIG_REG),
+ regmap_reg_range(LTC2983_CHAN_ASSIGN_START_REG,
+ LTC2983_CHAN_ASSIGN_END_REG),
+ regmap_reg_range(LTC2983_CUST_SENS_TBL_START_REG,
+ LTC2983_CUST_SENS_TBL_END_REG),
+};
+
+static const struct regmap_access_table ltc2983_reg_table = {
+ .yes_ranges = ltc2983_reg_ranges,
+ .n_yes_ranges = ARRAY_SIZE(ltc2983_reg_ranges),
+};
+
+/*
+ * The reg_bits are actually 12 but the device needs the first *complete*
+ * byte for the command (R/W).
+ */
+static const struct regmap_config ltc2983_regmap_config = {
+ .reg_bits = 24,
+ .val_bits = 8,
+ .wr_table = &ltc2983_reg_table,
+ .rd_table = &ltc2983_reg_table,
+ .read_flag_mask = GENMASK(1, 0),
+ .write_flag_mask = BIT(1),
+};
+
+static const struct iio_info ltc2983_iio_info = {
+ .read_raw = ltc2983_read_raw,
+ .debugfs_reg_access = ltc2983_reg_access,
+};
+
+static int ltc2983_probe(struct spi_device *spi)
+{
+ struct ltc2983_data *st;
+ struct iio_dev *indio_dev;
+ const char *name = spi_get_device_id(spi)->name;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ st = iio_priv(indio_dev);
+
+ st->regmap = devm_regmap_init_spi(spi, &ltc2983_regmap_config);
+ if (IS_ERR(st->regmap)) {
+ dev_err(&spi->dev, "Failed to initialize regmap\n");
+ return PTR_ERR(st->regmap);
+ }
+
+ mutex_init(&st->lock);
+ init_completion(&st->completion);
+ st->spi = spi;
+ spi_set_drvdata(spi, st);
+
+ ret = ltc2983_parse_dt(st);
+ if (ret)
+ return ret;
+ /*
+ * let's request the irq now so it is used to sync the device
+ * startup in ltc2983_setup()
+ */
+ ret = devm_request_irq(&spi->dev, spi->irq, ltc2983_irq_handler,
+ IRQF_TRIGGER_RISING, name, st);
+ if (ret) {
+ dev_err(&spi->dev, "failed to request an irq, %d", ret);
+ return ret;
+ }
+
+ ret = ltc2983_setup(st, true);
+ if (ret)
+ return ret;
+
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->name = name;
+ indio_dev->num_channels = st->iio_channels;
+ indio_dev->channels = st->iio_chan;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->info = &ltc2983_iio_info;
+
+ return devm_iio_device_register(&spi->dev, indio_dev);
+}
+
+static int __maybe_unused ltc2983_resume(struct device *dev)
+{
+ struct ltc2983_data *st = spi_get_drvdata(to_spi_device(dev));
+ int dummy;
+
+ /* dummy read to bring the device out of sleep */
+ regmap_read(st->regmap, LTC2983_STATUS_REG, &dummy);
+ /* we need to re-assign the channels */
+ return ltc2983_setup(st, false);
+}
+
+static int __maybe_unused ltc2983_suspend(struct device *dev)
+{
+ struct ltc2983_data *st = spi_get_drvdata(to_spi_device(dev));
+
+ return regmap_write(st->regmap, LTC2983_STATUS_REG, LTC2983_SLEEP);
+}
+
+static SIMPLE_DEV_PM_OPS(ltc2983_pm_ops, ltc2983_suspend, ltc2983_resume);
+
+static const struct spi_device_id ltc2983_id_table[] = {
+ { "ltc2983" },
+ {},
+};
+MODULE_DEVICE_TABLE(spi, ltc2983_id_table);
+
+static const struct of_device_id ltc2983_of_match[] = {
+ { .compatible = "adi,ltc2983" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ltc2983_of_match);
+
+static struct spi_driver ltc2983_driver = {
+ .driver = {
+ .name = "ltc2983",
+ .of_match_table = ltc2983_of_match,
+ .pm = &ltc2983_pm_ops,
+ },
+ .probe = ltc2983_probe,
+ .id_table = ltc2983_id_table,
+};
+
+module_spi_driver(ltc2983_driver);
+
+MODULE_AUTHOR("Nuno Sa <nuno.sa@analog.com>");
+MODULE_DESCRIPTION("Analog Devices LTC2983 SPI Temperature sensors");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/temperature/max31856.c b/drivers/iio/temperature/max31856.c
index f184ba5601d9..73ed550e3fc9 100644
--- a/drivers/iio/temperature/max31856.c
+++ b/drivers/iio/temperature/max31856.c
@@ -284,6 +284,8 @@ static int max31856_probe(struct spi_device *spi)
spi_set_drvdata(spi, indio_dev);
indio_dev->info = &max31856_info;
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->dev.of_node = spi->dev.of_node;
indio_dev->name = id->name;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->channels = max31856_channels;
diff --git a/drivers/iio/temperature/maxim_thermocouple.c b/drivers/iio/temperature/maxim_thermocouple.c
index 2ab68282d0b6..d1360605209c 100644
--- a/drivers/iio/temperature/maxim_thermocouple.c
+++ b/drivers/iio/temperature/maxim_thermocouple.c
@@ -194,7 +194,7 @@ static int maxim_thermocouple_read_raw(struct iio_dev *indio_dev,
default:
*val = 250; /* 1000 * 0.25 */
ret = IIO_VAL_INT;
- };
+ }
break;
}
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index b44b1c322ec8..ade86388434f 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -83,7 +83,6 @@ config INFINIBAND_ADDR_TRANS_CONFIGFS
if INFINIBAND_USER_ACCESS || !INFINIBAND_USER_ACCESS
source "drivers/infiniband/hw/mthca/Kconfig"
source "drivers/infiniband/hw/qib/Kconfig"
-source "drivers/infiniband/hw/cxgb3/Kconfig"
source "drivers/infiniband/hw/cxgb4/Kconfig"
source "drivers/infiniband/hw/efa/Kconfig"
source "drivers/infiniband/hw/i40iw/Kconfig"
diff --git a/drivers/infiniband/core/Makefile b/drivers/infiniband/core/Makefile
index 09881bd5f12d..9a8871e21545 100644
--- a/drivers/infiniband/core/Makefile
+++ b/drivers/infiniband/core/Makefile
@@ -11,7 +11,7 @@ ib_core-y := packer.o ud_header.o verbs.o cq.o rw.o sysfs.o \
device.o fmr_pool.o cache.o netlink.o \
roce_gid_mgmt.o mr_pool.o addr.o sa_query.o \
multicast.o mad.o smi.o agent.o mad_rmpp.o \
- nldev.o restrack.o counters.o
+ nldev.o restrack.o counters.o ib_core_uverbs.o
ib_core-$(CONFIG_SECURITY_INFINIBAND) += security.o
ib_core-$(CONFIG_CGROUP_RDMA) += cgroup.o
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
index 00fb3eacda19..d535995711c3 100644
--- a/drivers/infiniband/core/cache.c
+++ b/drivers/infiniband/core/cache.c
@@ -819,22 +819,16 @@ static void cleanup_gid_table_port(struct ib_device *ib_dev, u8 port,
struct ib_gid_table *table)
{
int i;
- bool deleted = false;
if (!table)
return;
mutex_lock(&table->lock);
for (i = 0; i < table->sz; ++i) {
- if (is_gid_entry_valid(table->data_vec[i])) {
+ if (is_gid_entry_valid(table->data_vec[i]))
del_gid(ib_dev, port, table, i);
- deleted = true;
- }
}
mutex_unlock(&table->lock);
-
- if (deleted)
- dispatch_gid_change_event(ib_dev, port);
}
void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port,
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 5920c0085d35..455b3659d84b 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -1,36 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/*
* Copyright (c) 2004-2007 Intel Corporation. All rights reserved.
* Copyright (c) 2004 Topspin Corporation. All rights reserved.
* Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved.
* Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019, Mellanox Technologies inc. All rights reserved.
*/
#include <linux/completion.h>
@@ -246,7 +220,7 @@ struct cm_work {
};
struct cm_timewait_info {
- struct cm_work work; /* Must be first. */
+ struct cm_work work;
struct list_head list;
struct rb_node remote_qp_node;
struct rb_node remote_id_node;
@@ -263,7 +237,7 @@ struct cm_id_private {
struct rb_node sidr_id_node;
spinlock_t lock; /* Do not acquire inside cm.lock */
struct completion comp;
- atomic_t refcount;
+ refcount_t refcount;
/* Number of clients sharing this ib_cm_id. Only valid for listeners.
* Protected by the cm.lock spinlock. */
int listen_sharecount;
@@ -308,7 +282,7 @@ static void cm_work_handler(struct work_struct *work);
static inline void cm_deref_id(struct cm_id_private *cm_id_priv)
{
- if (atomic_dec_and_test(&cm_id_priv->refcount))
+ if (refcount_dec_and_test(&cm_id_priv->refcount))
complete(&cm_id_priv->comp);
}
@@ -365,7 +339,7 @@ static int cm_alloc_msg(struct cm_id_private *cm_id_priv,
m->ah = ah;
m->retries = cm_id_priv->max_cm_retries;
- atomic_inc(&cm_id_priv->refcount);
+ refcount_inc(&cm_id_priv->refcount);
m->context[0] = cm_id_priv;
*msg = m;
@@ -626,7 +600,7 @@ static struct cm_id_private * cm_get_id(__be32 local_id, __be32 remote_id)
cm_id_priv = xa_load(&cm.local_id_table, cm_local_id(local_id));
if (cm_id_priv) {
if (cm_id_priv->id.remote_id == remote_id)
- atomic_inc(&cm_id_priv->refcount);
+ refcount_inc(&cm_id_priv->refcount);
else
cm_id_priv = NULL;
}
@@ -883,7 +857,7 @@ struct ib_cm_id *ib_create_cm_id(struct ib_device *device,
INIT_LIST_HEAD(&cm_id_priv->prim_list);
INIT_LIST_HEAD(&cm_id_priv->altr_list);
atomic_set(&cm_id_priv->work_count, -1);
- atomic_set(&cm_id_priv->refcount, 1);
+ refcount_set(&cm_id_priv->refcount, 1);
return &cm_id_priv->id;
error:
@@ -1230,7 +1204,7 @@ struct ib_cm_id *ib_cm_insert_listen(struct ib_device *device,
spin_unlock_irqrestore(&cm.lock, flags);
return ERR_PTR(-EINVAL);
}
- atomic_inc(&cm_id_priv->refcount);
+ refcount_inc(&cm_id_priv->refcount);
++cm_id_priv->listen_sharecount;
spin_unlock_irqrestore(&cm.lock, flags);
@@ -1525,14 +1499,6 @@ static int cm_issue_rej(struct cm_port *port,
return ret;
}
-static inline int cm_is_active_peer(__be64 local_ca_guid, __be64 remote_ca_guid,
- __be32 local_qpn, __be32 remote_qpn)
-{
- return (be64_to_cpu(local_ca_guid) > be64_to_cpu(remote_ca_guid) ||
- ((local_ca_guid == remote_ca_guid) &&
- (be32_to_cpu(local_qpn) > be32_to_cpu(remote_qpn))));
-}
-
static bool cm_req_has_alt_path(struct cm_req_msg *req_msg)
{
return ((req_msg->alt_local_lid) ||
@@ -1895,8 +1861,8 @@ static struct cm_id_private * cm_match_req(struct cm_work *work,
NULL, 0);
goto out;
}
- atomic_inc(&listen_cm_id_priv->refcount);
- atomic_inc(&cm_id_priv->refcount);
+ refcount_inc(&listen_cm_id_priv->refcount);
+ refcount_inc(&cm_id_priv->refcount);
cm_id_priv->id.state = IB_CM_REQ_RCVD;
atomic_inc(&cm_id_priv->work_count);
spin_unlock_irq(&cm.lock);
@@ -2052,7 +2018,7 @@ static int cm_req_handler(struct cm_work *work)
return 0;
rejected:
- atomic_dec(&cm_id_priv->refcount);
+ refcount_dec(&cm_id_priv->refcount);
cm_deref_id(listen_cm_id_priv);
free_timeinfo:
kfree(cm_id_priv->timewait_info);
@@ -2826,7 +2792,7 @@ static struct cm_id_private * cm_acquire_rejected_id(struct cm_rej_msg *rej_msg)
cm_local_id(timewait_info->work.local_id));
if (cm_id_priv) {
if (cm_id_priv->id.remote_id == remote_id)
- atomic_inc(&cm_id_priv->refcount);
+ refcount_inc(&cm_id_priv->refcount);
else
cm_id_priv = NULL;
}
@@ -3434,7 +3400,7 @@ static int cm_timewait_handler(struct cm_work *work)
struct cm_id_private *cm_id_priv;
int ret;
- timewait_info = (struct cm_timewait_info *)work;
+ timewait_info = container_of(work, struct cm_timewait_info, work);
spin_lock_irq(&cm.lock);
list_del(&timewait_info->list);
spin_unlock_irq(&cm.lock);
@@ -3596,8 +3562,8 @@ static int cm_sidr_req_handler(struct cm_work *work)
cm_reject_sidr_req(cm_id_priv, IB_SIDR_UNSUPPORTED);
goto out; /* No match. */
}
- atomic_inc(&cur_cm_id_priv->refcount);
- atomic_inc(&cm_id_priv->refcount);
+ refcount_inc(&cur_cm_id_priv->refcount);
+ refcount_inc(&cm_id_priv->refcount);
spin_unlock_irq(&cm.lock);
cm_id_priv->id.cm_handler = cur_cm_id_priv->id.cm_handler;
diff --git a/drivers/infiniband/core/cm_msgs.h b/drivers/infiniband/core/cm_msgs.h
index 3d16d614aff6..92d7260ac913 100644
--- a/drivers/infiniband/core/cm_msgs.h
+++ b/drivers/infiniband/core/cm_msgs.h
@@ -1,37 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/*
* Copyright (c) 2004, 2011 Intel Corporation. All rights reserved.
* Copyright (c) 2004 Topspin Corporation. All rights reserved.
* Copyright (c) 2004 Voltaire Corporation. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING the madirectory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use source and binary forms, with or
- * withmodification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retathe above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHWARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS THE
- * SOFTWARE.
+ * Copyright (c) 2019, Mellanox Technologies inc. All rights reserved.
*/
-#if !defined(CM_MSGS_H)
+#ifndef CM_MSGS_H
#define CM_MSGS_H
#include <rdma/ib_mad.h>
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index d78f67623f24..25f2b70fd8ef 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -1,36 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/*
* Copyright (c) 2005 Voltaire Inc. All rights reserved.
* Copyright (c) 2002-2005, Network Appliance, Inc. All rights reserved.
- * Copyright (c) 1999-2005, Mellanox Technologies, Inc. All rights reserved.
+ * Copyright (c) 1999-2019, Mellanox Technologies, Inc. All rights reserved.
* Copyright (c) 2005-2006 Intel Corporation. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#include <linux/completion.h>
@@ -2531,7 +2504,9 @@ EXPORT_SYMBOL(rdma_set_service_type);
* This function should be called before rdma_connect() on active side,
* and on passive side before rdma_accept(). It is applicable to primary
* path only. The timeout will affect the local side of the QP, it is not
- * negotiated with remote side and zero disables the timer.
+ * negotiated with remote side and zero disables the timer. In case it is
+ * set before rdma_resolve_route, the value will also be used to determine
+ * PacketLifeTime for RoCE.
*
* Return: 0 for success
*/
@@ -2828,22 +2803,65 @@ static int cma_resolve_iw_route(struct rdma_id_private *id_priv)
return 0;
}
-static int iboe_tos_to_sl(struct net_device *ndev, int tos)
+static int get_vlan_ndev_tc(struct net_device *vlan_ndev, int prio)
{
- int prio;
struct net_device *dev;
- prio = rt_tos2priority(tos);
- dev = is_vlan_dev(ndev) ? vlan_dev_real_dev(ndev) : ndev;
+ dev = vlan_dev_real_dev(vlan_ndev);
if (dev->num_tc)
return netdev_get_prio_tc_map(dev, prio);
-#if IS_ENABLED(CONFIG_VLAN_8021Q)
+ return (vlan_dev_get_egress_qos_mask(vlan_ndev, prio) &
+ VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
+}
+
+struct iboe_prio_tc_map {
+ int input_prio;
+ int output_tc;
+ bool found;
+};
+
+static int get_lower_vlan_dev_tc(struct net_device *dev, void *data)
+{
+ struct iboe_prio_tc_map *map = data;
+
+ if (is_vlan_dev(dev))
+ map->output_tc = get_vlan_ndev_tc(dev, map->input_prio);
+ else if (dev->num_tc)
+ map->output_tc = netdev_get_prio_tc_map(dev, map->input_prio);
+ else
+ map->output_tc = 0;
+ /* We are interested only in first level VLAN device, so always
+ * return 1 to stop iterating over next level devices.
+ */
+ map->found = true;
+ return 1;
+}
+
+static int iboe_tos_to_sl(struct net_device *ndev, int tos)
+{
+ struct iboe_prio_tc_map prio_tc_map = {};
+ int prio = rt_tos2priority(tos);
+
+ /* If VLAN device, get it directly from the VLAN netdev */
if (is_vlan_dev(ndev))
- return (vlan_dev_get_egress_qos_mask(ndev, prio) &
- VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
-#endif
- return 0;
+ return get_vlan_ndev_tc(ndev, prio);
+
+ prio_tc_map.input_prio = prio;
+ rcu_read_lock();
+ netdev_walk_all_lower_dev_rcu(ndev,
+ get_lower_vlan_dev_tc,
+ &prio_tc_map);
+ rcu_read_unlock();
+ /* If map is found from lower device, use it; Otherwise
+ * continue with the current netdevice to get priority to tc map.
+ */
+ if (prio_tc_map.found)
+ return prio_tc_map.output_tc;
+ else if (ndev->num_tc)
+ return netdev_get_prio_tc_map(ndev, prio);
+ else
+ return 0;
}
static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
@@ -2897,7 +2915,16 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
route->path_rec->rate = iboe_get_rate(ndev);
dev_put(ndev);
route->path_rec->packet_life_time_selector = IB_SA_EQ;
- route->path_rec->packet_life_time = CMA_IBOE_PACKET_LIFETIME;
+ /* In case ACK timeout is set, use this value to calculate
+ * PacketLifeTime. As per IBTA 12.7.34,
+ * local ACK timeout = (2 * PacketLifeTime + Local CA’s ACK delay).
+ * Assuming a negligible local ACK delay, we can use
+ * PacketLifeTime = local ACK timeout/2
+ * as a reasonable approximation for RoCE networks.
+ */
+ route->path_rec->packet_life_time = id_priv->timeout_set ?
+ id_priv->timeout - 1 : CMA_IBOE_PACKET_LIFETIME;
+
if (!route->path_rec->mtu) {
ret = -EINVAL;
goto err2;
diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h
index 9d07378b5b42..3645e092e1c7 100644
--- a/drivers/infiniband/core/core_priv.h
+++ b/drivers/infiniband/core/core_priv.h
@@ -388,4 +388,15 @@ int ib_device_set_netns_put(struct sk_buff *skb,
int rdma_nl_net_init(struct rdma_dev_net *rnet);
void rdma_nl_net_exit(struct rdma_dev_net *rnet);
+
+struct rdma_umap_priv {
+ struct vm_area_struct *vma;
+ struct list_head list;
+ struct rdma_user_mmap_entry *entry;
+};
+
+void rdma_umap_priv_init(struct rdma_umap_priv *priv,
+ struct vm_area_struct *vma,
+ struct rdma_user_mmap_entry *entry);
+
#endif /* _CORE_PRIV_H */
diff --git a/drivers/infiniband/core/counters.c b/drivers/infiniband/core/counters.c
index 680ad27f497d..8434ec082c3a 100644
--- a/drivers/infiniband/core/counters.c
+++ b/drivers/infiniband/core/counters.c
@@ -149,11 +149,18 @@ static bool auto_mode_match(struct ib_qp *qp, struct rdma_counter *counter,
struct auto_mode_param *param = &counter->mode.param;
bool match = true;
- if (!rdma_is_visible_in_pid_ns(&qp->res))
- return false;
-
- /* Ensure that counter belongs to the right PID */
- if (task_pid_nr(counter->res.task) != task_pid_nr(qp->res.task))
+ /*
+ * Ensure that counter belongs to the right PID. This operation can
+ * race with user space which kills the process and leaves QP and
+ * counters orphans.
+ *
+ * It is not a big deal because exitted task will leave both QP and
+ * counter in the same bucket of zombie process. Just ensure that
+ * process is still alive before procedding.
+ *
+ */
+ if (task_pid_nr(counter->res.task) != task_pid_nr(qp->res.task) ||
+ !task_pid_nr(qp->res.task))
return false;
if (auto_mask & RDMA_COUNTER_MASK_QP_TYPE)
@@ -229,9 +236,6 @@ static struct rdma_counter *rdma_get_counter_auto_mode(struct ib_qp *qp,
rt = &dev->res[RDMA_RESTRACK_COUNTER];
xa_lock(&rt->xa);
xa_for_each(&rt->xa, id, res) {
- if (!rdma_is_visible_in_pid_ns(res))
- continue;
-
counter = container_of(res, struct rdma_counter, res);
if ((counter->device != qp->device) || (counter->port != port))
goto next;
@@ -412,9 +416,6 @@ static struct ib_qp *rdma_counter_get_qp(struct ib_device *dev, u32 qp_num)
if (IS_ERR(res))
return NULL;
- if (!rdma_is_visible_in_pid_ns(res))
- goto err;
-
qp = container_of(res, struct ib_qp, res);
if (qp->qp_type == IB_QPT_RAW_PACKET && !capable(CAP_NET_RAW))
goto err;
@@ -445,11 +446,6 @@ static struct rdma_counter *rdma_get_counter_by_id(struct ib_device *dev,
if (IS_ERR(res))
return NULL;
- if (!rdma_is_visible_in_pid_ns(res)) {
- rdma_restrack_put(res);
- return NULL;
- }
-
counter = container_of(res, struct rdma_counter, res);
kref_get(&counter->kref);
rdma_restrack_put(res);
@@ -463,10 +459,15 @@ static struct rdma_counter *rdma_get_counter_by_id(struct ib_device *dev,
int rdma_counter_bind_qpn(struct ib_device *dev, u8 port,
u32 qp_num, u32 counter_id)
{
+ struct rdma_port_counter *port_counter;
struct rdma_counter *counter;
struct ib_qp *qp;
int ret;
+ port_counter = &dev->port_data[port].port_counter;
+ if (port_counter->mode.mode == RDMA_COUNTER_MODE_AUTO)
+ return -EINVAL;
+
qp = rdma_counter_get_qp(dev, qp_num);
if (!qp)
return -ENOENT;
@@ -503,6 +504,7 @@ err:
int rdma_counter_bind_qpn_alloc(struct ib_device *dev, u8 port,
u32 qp_num, u32 *counter_id)
{
+ struct rdma_port_counter *port_counter;
struct rdma_counter *counter;
struct ib_qp *qp;
int ret;
@@ -510,9 +512,13 @@ int rdma_counter_bind_qpn_alloc(struct ib_device *dev, u8 port,
if (!rdma_is_port_valid(dev, port))
return -EINVAL;
- if (!dev->port_data[port].port_counter.hstats)
+ port_counter = &dev->port_data[port].port_counter;
+ if (!port_counter->hstats)
return -EOPNOTSUPP;
+ if (port_counter->mode.mode == RDMA_COUNTER_MODE_AUTO)
+ return -EINVAL;
+
qp = rdma_counter_get_qp(dev, qp_num);
if (!qp)
return -ENOENT;
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index 50a92442c4f7..84dd74fe13b8 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -128,17 +128,14 @@ module_param_named(netns_mode, ib_devices_shared_netns, bool, 0444);
MODULE_PARM_DESC(netns_mode,
"Share device among net namespaces; default=1 (shared)");
/**
- * rdma_dev_access_netns() - Return whether a rdma device can be accessed
+ * rdma_dev_access_netns() - Return whether an rdma device can be accessed
* from a specified net namespace or not.
- * @device: Pointer to rdma device which needs to be checked
+ * @dev: Pointer to rdma device which needs to be checked
* @net: Pointer to net namesapce for which access to be checked
*
- * rdma_dev_access_netns() - Return whether a rdma device can be accessed
- * from a specified net namespace or not. When
- * rdma device is in shared mode, it ignores the
- * net namespace. When rdma device is exclusive
- * to a net namespace, rdma device net namespace is
- * checked against the specified one.
+ * When the rdma device is in shared mode, it ignores the net namespace.
+ * When the rdma device is exclusive to a net namespace, rdma device net
+ * namespace is checked against the specified one.
*/
bool rdma_dev_access_netns(const struct ib_device *dev, const struct net *net)
{
@@ -1199,9 +1196,21 @@ static void setup_dma_device(struct ib_device *device)
WARN_ON_ONCE(!parent);
device->dma_device = parent;
}
- /* Setup default max segment size for all IB devices */
- dma_set_max_seg_size(device->dma_device, SZ_2G);
+ if (!device->dev.dma_parms) {
+ if (parent) {
+ /*
+ * The caller did not provide DMA parameters, so
+ * 'parent' probably represents a PCI device. The PCI
+ * core sets the maximum segment size to 64
+ * KB. Increase this parameter to 2 GB.
+ */
+ device->dev.dma_parms = parent->dma_parms;
+ dma_set_max_seg_size(device->dma_device, SZ_2G);
+ } else {
+ WARN_ON_ONCE(true);
+ }
+ }
}
/*
@@ -1317,7 +1326,9 @@ out:
/**
* ib_register_device - Register an IB device with IB core
- * @device:Device to register
+ * @device: Device to register
+ * @name: unique string device name. This may include a '%' which will
+ * cause a unique index to be added to the passed device name.
*
* Low-level drivers use ib_register_device() to register their
* devices with the IB core. All registered clients will receive a
@@ -1444,7 +1455,7 @@ out:
/**
* ib_unregister_device - Unregister an IB device
- * @device: The device to unregister
+ * @ib_dev: The device to unregister
*
* Unregister an IB device. All clients will receive a remove callback.
*
@@ -1466,7 +1477,7 @@ EXPORT_SYMBOL(ib_unregister_device);
/**
* ib_unregister_device_and_put - Unregister a device while holding a 'get'
- * device: The device to unregister
+ * @ib_dev: The device to unregister
*
* This is the same as ib_unregister_device(), except it includes an internal
* ib_device_put() that should match a 'get' obtained by the caller.
@@ -1536,7 +1547,7 @@ static void ib_unregister_work(struct work_struct *work)
/**
* ib_unregister_device_queued - Unregister a device using a work queue
- * device: The device to unregister
+ * @ib_dev: The device to unregister
*
* This schedules an asynchronous unregistration using a WQ for the device. A
* driver should use this to avoid holding locks while doing unregistration,
@@ -2366,7 +2377,7 @@ int ib_modify_device(struct ib_device *device,
struct ib_device_modify *device_modify)
{
if (!device->ops.modify_device)
- return -ENOSYS;
+ return -EOPNOTSUPP;
return device->ops.modify_device(device, device_modify_mask,
device_modify);
@@ -2397,8 +2408,12 @@ int ib_modify_port(struct ib_device *device,
rc = device->ops.modify_port(device, port_num,
port_modify_mask,
port_modify);
+ else if (rdma_protocol_roce(device, port_num) &&
+ ((port_modify->set_port_cap_mask & ~IB_PORT_CM_SUP) == 0 ||
+ (port_modify->clr_port_cap_mask & ~IB_PORT_CM_SUP) == 0))
+ rc = 0;
else
- rc = rdma_protocol_roce(device, port_num) ? 0 : -ENOSYS;
+ rc = -EOPNOTSUPP;
return rc;
}
EXPORT_SYMBOL(ib_modify_port);
@@ -2607,6 +2622,7 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
SET_DEVICE_OP(dev_ops, drain_sq);
SET_DEVICE_OP(dev_ops, enable_driver);
SET_DEVICE_OP(dev_ops, fill_res_entry);
+ SET_DEVICE_OP(dev_ops, fill_stat_entry);
SET_DEVICE_OP(dev_ops, get_dev_fw_str);
SET_DEVICE_OP(dev_ops, get_dma_mr);
SET_DEVICE_OP(dev_ops, get_hw_stats);
@@ -2615,9 +2631,9 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
SET_DEVICE_OP(dev_ops, get_port_immutable);
SET_DEVICE_OP(dev_ops, get_vector_affinity);
SET_DEVICE_OP(dev_ops, get_vf_config);
+ SET_DEVICE_OP(dev_ops, get_vf_guid);
SET_DEVICE_OP(dev_ops, get_vf_stats);
SET_DEVICE_OP(dev_ops, init_port);
- SET_DEVICE_OP(dev_ops, invalidate_range);
SET_DEVICE_OP(dev_ops, iw_accept);
SET_DEVICE_OP(dev_ops, iw_add_ref);
SET_DEVICE_OP(dev_ops, iw_connect);
@@ -2630,6 +2646,7 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
SET_DEVICE_OP(dev_ops, map_mr_sg_pi);
SET_DEVICE_OP(dev_ops, map_phys_fmr);
SET_DEVICE_OP(dev_ops, mmap);
+ SET_DEVICE_OP(dev_ops, mmap_free);
SET_DEVICE_OP(dev_ops, modify_ah);
SET_DEVICE_OP(dev_ops, modify_cq);
SET_DEVICE_OP(dev_ops, modify_device);
diff --git a/drivers/infiniband/core/ib_core_uverbs.c b/drivers/infiniband/core/ib_core_uverbs.c
new file mode 100644
index 000000000000..f509c478b469
--- /dev/null
+++ b/drivers/infiniband/core/ib_core_uverbs.c
@@ -0,0 +1,335 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
+ * Copyright 2018-2019 Amazon.com, Inc. or its affiliates. All rights reserved.
+ * Copyright 2019 Marvell. All rights reserved.
+ */
+#include <linux/xarray.h>
+#include "uverbs.h"
+#include "core_priv.h"
+
+/**
+ * rdma_umap_priv_init() - Initialize the private data of a vma
+ *
+ * @priv: The already allocated private data
+ * @vma: The vm area struct that needs private data
+ * @entry: entry into the mmap_xa that needs to be linked with
+ * this vma
+ *
+ * Each time we map IO memory into user space this keeps track of the
+ * mapping. When the device is hot-unplugged we 'zap' the mmaps in user space
+ * to point to the zero page and allow the hot unplug to proceed.
+ *
+ * This is necessary for cases like PCI physical hot unplug as the actual BAR
+ * memory may vanish after this and access to it from userspace could MCE.
+ *
+ * RDMA drivers supporting disassociation must have their user space designed
+ * to cope in some way with their IO pages going to the zero page.
+ *
+ */
+void rdma_umap_priv_init(struct rdma_umap_priv *priv,
+ struct vm_area_struct *vma,
+ struct rdma_user_mmap_entry *entry)
+{
+ struct ib_uverbs_file *ufile = vma->vm_file->private_data;
+
+ priv->vma = vma;
+ if (entry) {
+ kref_get(&entry->ref);
+ priv->entry = entry;
+ }
+ vma->vm_private_data = priv;
+ /* vm_ops is setup in ib_uverbs_mmap() to avoid module dependencies */
+
+ mutex_lock(&ufile->umap_lock);
+ list_add(&priv->list, &ufile->umaps);
+ mutex_unlock(&ufile->umap_lock);
+}
+EXPORT_SYMBOL(rdma_umap_priv_init);
+
+/**
+ * rdma_user_mmap_io() - Map IO memory into a process
+ *
+ * @ucontext: associated user context
+ * @vma: the vma related to the current mmap call
+ * @pfn: pfn to map
+ * @size: size to map
+ * @prot: pgprot to use in remap call
+ * @entry: mmap_entry retrieved from rdma_user_mmap_entry_get(), or NULL
+ * if mmap_entry is not used by the driver
+ *
+ * This is to be called by drivers as part of their mmap() functions if they
+ * wish to send something like PCI-E BAR memory to userspace.
+ *
+ * Return -EINVAL on wrong flags or size, -EAGAIN on failure to map. 0 on
+ * success.
+ */
+int rdma_user_mmap_io(struct ib_ucontext *ucontext, struct vm_area_struct *vma,
+ unsigned long pfn, unsigned long size, pgprot_t prot,
+ struct rdma_user_mmap_entry *entry)
+{
+ struct ib_uverbs_file *ufile = ucontext->ufile;
+ struct rdma_umap_priv *priv;
+
+ if (!(vma->vm_flags & VM_SHARED))
+ return -EINVAL;
+
+ if (vma->vm_end - vma->vm_start != size)
+ return -EINVAL;
+
+ /* Driver is using this wrong, must be called by ib_uverbs_mmap */
+ if (WARN_ON(!vma->vm_file ||
+ vma->vm_file->private_data != ufile))
+ return -EINVAL;
+ lockdep_assert_held(&ufile->device->disassociate_srcu);
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ vma->vm_page_prot = prot;
+ if (io_remap_pfn_range(vma, vma->vm_start, pfn, size, prot)) {
+ kfree(priv);
+ return -EAGAIN;
+ }
+
+ rdma_umap_priv_init(priv, vma, entry);
+ return 0;
+}
+EXPORT_SYMBOL(rdma_user_mmap_io);
+
+/**
+ * rdma_user_mmap_entry_get_pgoff() - Get an entry from the mmap_xa
+ *
+ * @ucontext: associated user context
+ * @pgoff: The mmap offset >> PAGE_SHIFT
+ *
+ * This function is called when a user tries to mmap with an offset (returned
+ * by rdma_user_mmap_get_offset()) it initially received from the driver. The
+ * rdma_user_mmap_entry was created by the function
+ * rdma_user_mmap_entry_insert(). This function increases the refcnt of the
+ * entry so that it won't be deleted from the xarray in the meantime.
+ *
+ * Return an reference to an entry if exists or NULL if there is no
+ * match. rdma_user_mmap_entry_put() must be called to put the reference.
+ */
+struct rdma_user_mmap_entry *
+rdma_user_mmap_entry_get_pgoff(struct ib_ucontext *ucontext,
+ unsigned long pgoff)
+{
+ struct rdma_user_mmap_entry *entry;
+
+ if (pgoff > U32_MAX)
+ return NULL;
+
+ xa_lock(&ucontext->mmap_xa);
+
+ entry = xa_load(&ucontext->mmap_xa, pgoff);
+
+ /*
+ * If refcount is zero, entry is already being deleted, driver_removed
+ * indicates that the no further mmaps are possible and we waiting for
+ * the active VMAs to be closed.
+ */
+ if (!entry || entry->start_pgoff != pgoff || entry->driver_removed ||
+ !kref_get_unless_zero(&entry->ref))
+ goto err;
+
+ xa_unlock(&ucontext->mmap_xa);
+
+ ibdev_dbg(ucontext->device, "mmap: pgoff[%#lx] npages[%#zx] returned\n",
+ pgoff, entry->npages);
+
+ return entry;
+
+err:
+ xa_unlock(&ucontext->mmap_xa);
+ return NULL;
+}
+EXPORT_SYMBOL(rdma_user_mmap_entry_get_pgoff);
+
+/**
+ * rdma_user_mmap_entry_get() - Get an entry from the mmap_xa
+ *
+ * @ucontext: associated user context
+ * @vma: the vma being mmap'd into
+ *
+ * This function is like rdma_user_mmap_entry_get_pgoff() except that it also
+ * checks that the VMA is correct.
+ */
+struct rdma_user_mmap_entry *
+rdma_user_mmap_entry_get(struct ib_ucontext *ucontext,
+ struct vm_area_struct *vma)
+{
+ struct rdma_user_mmap_entry *entry;
+
+ if (!(vma->vm_flags & VM_SHARED))
+ return NULL;
+ entry = rdma_user_mmap_entry_get_pgoff(ucontext, vma->vm_pgoff);
+ if (!entry)
+ return NULL;
+ if (entry->npages * PAGE_SIZE != vma->vm_end - vma->vm_start) {
+ rdma_user_mmap_entry_put(entry);
+ return NULL;
+ }
+ return entry;
+}
+EXPORT_SYMBOL(rdma_user_mmap_entry_get);
+
+static void rdma_user_mmap_entry_free(struct kref *kref)
+{
+ struct rdma_user_mmap_entry *entry =
+ container_of(kref, struct rdma_user_mmap_entry, ref);
+ struct ib_ucontext *ucontext = entry->ucontext;
+ unsigned long i;
+
+ /*
+ * Erase all entries occupied by this single entry, this is deferred
+ * until all VMA are closed so that the mmap offsets remain unique.
+ */
+ xa_lock(&ucontext->mmap_xa);
+ for (i = 0; i < entry->npages; i++)
+ __xa_erase(&ucontext->mmap_xa, entry->start_pgoff + i);
+ xa_unlock(&ucontext->mmap_xa);
+
+ ibdev_dbg(ucontext->device, "mmap: pgoff[%#lx] npages[%#zx] removed\n",
+ entry->start_pgoff, entry->npages);
+
+ if (ucontext->device->ops.mmap_free)
+ ucontext->device->ops.mmap_free(entry);
+}
+
+/**
+ * rdma_user_mmap_entry_put() - Drop reference to the mmap entry
+ *
+ * @entry: an entry in the mmap_xa
+ *
+ * This function is called when the mapping is closed if it was
+ * an io mapping or when the driver is done with the entry for
+ * some other reason.
+ * Should be called after rdma_user_mmap_entry_get was called
+ * and entry is no longer needed. This function will erase the
+ * entry and free it if its refcnt reaches zero.
+ */
+void rdma_user_mmap_entry_put(struct rdma_user_mmap_entry *entry)
+{
+ kref_put(&entry->ref, rdma_user_mmap_entry_free);
+}
+EXPORT_SYMBOL(rdma_user_mmap_entry_put);
+
+/**
+ * rdma_user_mmap_entry_remove() - Drop reference to entry and
+ * mark it as unmmapable
+ *
+ * @entry: the entry to insert into the mmap_xa
+ *
+ * Drivers can call this to prevent userspace from creating more mappings for
+ * entry, however existing mmaps continue to exist and ops->mmap_free() will
+ * not be called until all user mmaps are destroyed.
+ */
+void rdma_user_mmap_entry_remove(struct rdma_user_mmap_entry *entry)
+{
+ if (!entry)
+ return;
+
+ entry->driver_removed = true;
+ kref_put(&entry->ref, rdma_user_mmap_entry_free);
+}
+EXPORT_SYMBOL(rdma_user_mmap_entry_remove);
+
+/**
+ * rdma_user_mmap_entry_insert() - Insert an entry to the mmap_xa
+ *
+ * @ucontext: associated user context.
+ * @entry: the entry to insert into the mmap_xa
+ * @length: length of the address that will be mmapped
+ *
+ * This function should be called by drivers that use the rdma_user_mmap
+ * interface for implementing their mmap syscall A database of mmap offsets is
+ * handled in the core and helper functions are provided to insert entries
+ * into the database and extract entries when the user calls mmap with the
+ * given offset. The function allocates a unique page offset that should be
+ * provided to user, the user will use the offset to retrieve information such
+ * as address to be mapped and how.
+ *
+ * Return: 0 on success and -ENOMEM on failure
+ */
+int rdma_user_mmap_entry_insert(struct ib_ucontext *ucontext,
+ struct rdma_user_mmap_entry *entry,
+ size_t length)
+{
+ struct ib_uverbs_file *ufile = ucontext->ufile;
+ XA_STATE(xas, &ucontext->mmap_xa, 0);
+ u32 xa_first, xa_last, npages;
+ int err;
+ u32 i;
+
+ if (!entry)
+ return -EINVAL;
+
+ kref_init(&entry->ref);
+ entry->ucontext = ucontext;
+
+ /*
+ * We want the whole allocation to be done without interruption from a
+ * different thread. The allocation requires finding a free range and
+ * storing. During the xa_insert the lock could be released, possibly
+ * allowing another thread to choose the same range.
+ */
+ mutex_lock(&ufile->umap_lock);
+
+ xa_lock(&ucontext->mmap_xa);
+
+ /* We want to find an empty range */
+ npages = (u32)DIV_ROUND_UP(length, PAGE_SIZE);
+ entry->npages = npages;
+ while (true) {
+ /* First find an empty index */
+ xas_find_marked(&xas, U32_MAX, XA_FREE_MARK);
+ if (xas.xa_node == XAS_RESTART)
+ goto err_unlock;
+
+ xa_first = xas.xa_index;
+
+ /* Is there enough room to have the range? */
+ if (check_add_overflow(xa_first, npages, &xa_last))
+ goto err_unlock;
+
+ /*
+ * Now look for the next present entry. If an entry doesn't
+ * exist, we found an empty range and can proceed.
+ */
+ xas_next_entry(&xas, xa_last - 1);
+ if (xas.xa_node == XAS_BOUNDS || xas.xa_index >= xa_last)
+ break;
+ }
+
+ for (i = xa_first; i < xa_last; i++) {
+ err = __xa_insert(&ucontext->mmap_xa, i, entry, GFP_KERNEL);
+ if (err)
+ goto err_undo;
+ }
+
+ /*
+ * Internally the kernel uses a page offset, in libc this is a byte
+ * offset. Drivers should not return pgoff to userspace.
+ */
+ entry->start_pgoff = xa_first;
+ xa_unlock(&ucontext->mmap_xa);
+ mutex_unlock(&ufile->umap_lock);
+
+ ibdev_dbg(ucontext->device, "mmap: pgoff[%#lx] npages[%#x] inserted\n",
+ entry->start_pgoff, npages);
+
+ return 0;
+
+err_undo:
+ for (; i > xa_first; i--)
+ __xa_erase(&ucontext->mmap_xa, i - 1);
+
+err_unlock:
+ xa_unlock(&ucontext->mmap_xa);
+ mutex_unlock(&ufile->umap_lock);
+ return -ENOMEM;
+}
+EXPORT_SYMBOL(rdma_user_mmap_entry_insert);
diff --git a/drivers/infiniband/core/iwpm_util.h b/drivers/infiniband/core/iwpm_util.h
index 7e2bcc72f66c..1bf87d9fd0bd 100644
--- a/drivers/infiniband/core/iwpm_util.h
+++ b/drivers/infiniband/core/iwpm_util.h
@@ -210,8 +210,10 @@ int iwpm_mapinfo_available(void);
/**
* iwpm_compare_sockaddr - Compare two sockaddr storage structs
+ * @a_sockaddr: first sockaddr to compare
+ * @b_sockaddr: second sockaddr to compare
*
- * Returns 0 if they are holding the same ip/tcp address info,
+ * Return: 0 if they are holding the same ip/tcp address info,
* otherwise returns 1
*/
int iwpm_compare_sockaddr(struct sockaddr_storage *a_sockaddr,
@@ -272,6 +274,7 @@ void iwpm_print_sockaddr(struct sockaddr_storage *sockaddr, char *msg);
* iwpm_send_hello - Send hello response to iwpmd
*
* @nl_client: The index of the netlink client
+ * @iwpm_pid: The pid of the user space port mapper
* @abi_version: The kernel's abi_version
*
* Returns 0 on success or a negative error code
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index 9947d16edef2..c54db13fa9b0 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -913,9 +913,9 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
/* No GRH for DR SMP */
ret = device->ops.process_mad(device, 0, port_num, &mad_wc, NULL,
- (const struct ib_mad_hdr *)smp, mad_size,
- (struct ib_mad_hdr *)mad_priv->mad,
- &mad_size, &out_mad_pkey_index);
+ (const struct ib_mad *)smp,
+ (struct ib_mad *)mad_priv->mad, &mad_size,
+ &out_mad_pkey_index);
switch (ret)
{
case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY:
@@ -1397,25 +1397,6 @@ void ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc)
}
EXPORT_SYMBOL(ib_free_recv_mad);
-struct ib_mad_agent *ib_redirect_mad_qp(struct ib_qp *qp,
- u8 rmpp_version,
- ib_mad_send_handler send_handler,
- ib_mad_recv_handler recv_handler,
- void *context)
-{
- return ERR_PTR(-EINVAL); /* XXX: for now */
-}
-EXPORT_SYMBOL(ib_redirect_mad_qp);
-
-int ib_process_mad_wc(struct ib_mad_agent *mad_agent,
- struct ib_wc *wc)
-{
- dev_err(&mad_agent->device->dev,
- "ib_process_mad_wc() not implemented yet\n");
- return 0;
-}
-EXPORT_SYMBOL(ib_process_mad_wc);
-
static int method_in_use(struct ib_mad_mgmt_method_table **method,
struct ib_mad_reg_req *mad_reg_req)
{
@@ -2340,9 +2321,9 @@ static void ib_mad_recv_done(struct ib_cq *cq, struct ib_wc *wc)
if (port_priv->device->ops.process_mad) {
ret = port_priv->device->ops.process_mad(
port_priv->device, 0, port_priv->port_num, wc,
- &recv->grh, (const struct ib_mad_hdr *)recv->mad,
- recv->mad_size, (struct ib_mad_hdr *)response->mad,
- &mad_size, &resp_mad_pkey_index);
+ &recv->grh, (const struct ib_mad *)recv->mad,
+ (struct ib_mad *)response->mad, &mad_size,
+ &resp_mad_pkey_index);
if (opa)
wc->pkey_index = resp_mad_pkey_index;
diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
index c03af08b80e7..cbf6041a5d4a 100644
--- a/drivers/infiniband/core/nldev.c
+++ b/drivers/infiniband/core/nldev.c
@@ -42,6 +42,9 @@
#include "cma_priv.h"
#include "restrack.h"
+typedef int (*res_fill_func_t)(struct sk_buff*, bool,
+ struct rdma_restrack_entry*, uint32_t);
+
/*
* Sort array elements by the netlink attribute name
*/
@@ -180,6 +183,19 @@ static int _rdma_nl_put_driver_u64(struct sk_buff *msg, const char *name,
return 0;
}
+int rdma_nl_put_driver_string(struct sk_buff *msg, const char *name,
+ const char *str)
+{
+ if (put_driver_name_print_type(msg, name,
+ RDMA_NLDEV_PRINT_TYPE_UNSPEC))
+ return -EMSGSIZE;
+ if (nla_put_string(msg, RDMA_NLDEV_ATTR_DRIVER_STRING, str))
+ return -EMSGSIZE;
+
+ return 0;
+}
+EXPORT_SYMBOL(rdma_nl_put_driver_string);
+
int rdma_nl_put_driver_u32(struct sk_buff *msg, const char *name, u32 value)
{
return _rdma_nl_put_driver_u32(msg, name, RDMA_NLDEV_PRINT_TYPE_UNSPEC,
@@ -399,20 +415,34 @@ err:
static int fill_res_name_pid(struct sk_buff *msg,
struct rdma_restrack_entry *res)
{
+ int err = 0;
+
/*
* For user resources, user is should read /proc/PID/comm to get the
* name of the task file.
*/
if (rdma_is_kernel_res(res)) {
- if (nla_put_string(msg, RDMA_NLDEV_ATTR_RES_KERN_NAME,
- res->kern_name))
- return -EMSGSIZE;
+ err = nla_put_string(msg, RDMA_NLDEV_ATTR_RES_KERN_NAME,
+ res->kern_name);
} else {
- if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PID,
- task_pid_vnr(res->task)))
- return -EMSGSIZE;
+ pid_t pid;
+
+ pid = task_pid_vnr(res->task);
+ /*
+ * Task is dead and in zombie state.
+ * There is no need to print PID anymore.
+ */
+ if (pid)
+ /*
+ * This part is racy, task can be killed and PID will
+ * be zero right here but it is ok, next query won't
+ * return PID. We don't promise real-time reflection
+ * of SW objects.
+ */
+ err = nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PID, pid);
}
- return 0;
+
+ return err ? -EMSGSIZE : 0;
}
static bool fill_res_entry(struct ib_device *dev, struct sk_buff *msg,
@@ -423,6 +453,14 @@ static bool fill_res_entry(struct ib_device *dev, struct sk_buff *msg,
return dev->ops.fill_res_entry(msg, res);
}
+static bool fill_stat_entry(struct ib_device *dev, struct sk_buff *msg,
+ struct rdma_restrack_entry *res)
+{
+ if (!dev->ops.fill_stat_entry)
+ return false;
+ return dev->ops.fill_stat_entry(msg, res);
+}
+
static int fill_res_qp_entry(struct sk_buff *msg, bool has_cap_net_admin,
struct rdma_restrack_entry *res, uint32_t port)
{
@@ -698,9 +736,6 @@ static int fill_stat_counter_qps(struct sk_buff *msg,
rt = &counter->device->res[RDMA_RESTRACK_QP];
xa_lock(&rt->xa);
xa_for_each(&rt->xa, id, res) {
- if (!rdma_is_visible_in_pid_ns(res))
- continue;
-
qp = container_of(res, struct ib_qp, res);
if (qp->qp_type == IB_QPT_RAW_PACKET && !capable(CAP_NET_RAW))
continue;
@@ -723,8 +758,8 @@ err:
return ret;
}
-static int fill_stat_hwcounter_entry(struct sk_buff *msg,
- const char *name, u64 value)
+int rdma_nl_stat_hwcounter_entry(struct sk_buff *msg, const char *name,
+ u64 value)
{
struct nlattr *entry_attr;
@@ -746,6 +781,25 @@ err:
nla_nest_cancel(msg, entry_attr);
return -EMSGSIZE;
}
+EXPORT_SYMBOL(rdma_nl_stat_hwcounter_entry);
+
+static int fill_stat_mr_entry(struct sk_buff *msg, bool has_cap_net_admin,
+ struct rdma_restrack_entry *res, uint32_t port)
+{
+ struct ib_mr *mr = container_of(res, struct ib_mr, res);
+ struct ib_device *dev = mr->pd->device;
+
+ if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_MRN, res->id))
+ goto err;
+
+ if (fill_stat_entry(dev, msg, res))
+ goto err;
+
+ return 0;
+
+err:
+ return -EMSGSIZE;
+}
static int fill_stat_counter_hwcounters(struct sk_buff *msg,
struct rdma_counter *counter)
@@ -759,7 +813,7 @@ static int fill_stat_counter_hwcounters(struct sk_buff *msg,
return -EMSGSIZE;
for (i = 0; i < st->num_counters; i++)
- if (fill_stat_hwcounter_entry(msg, st->names[i], st->value[i]))
+ if (rdma_nl_stat_hwcounter_entry(msg, st->names[i], st->value[i]))
goto err;
nla_nest_end(msg, table_attr);
@@ -1117,8 +1171,6 @@ static int nldev_res_get_dumpit(struct sk_buff *skb,
}
struct nldev_fill_res_entry {
- int (*fill_res_func)(struct sk_buff *msg, bool has_cap_net_admin,
- struct rdma_restrack_entry *res, u32 port);
enum rdma_nldev_attr nldev_attr;
enum rdma_nldev_command nldev_cmd;
u8 flags;
@@ -1132,21 +1184,18 @@ enum nldev_res_flags {
static const struct nldev_fill_res_entry fill_entries[RDMA_RESTRACK_MAX] = {
[RDMA_RESTRACK_QP] = {
- .fill_res_func = fill_res_qp_entry,
.nldev_cmd = RDMA_NLDEV_CMD_RES_QP_GET,
.nldev_attr = RDMA_NLDEV_ATTR_RES_QP,
.entry = RDMA_NLDEV_ATTR_RES_QP_ENTRY,
.id = RDMA_NLDEV_ATTR_RES_LQPN,
},
[RDMA_RESTRACK_CM_ID] = {
- .fill_res_func = fill_res_cm_id_entry,
.nldev_cmd = RDMA_NLDEV_CMD_RES_CM_ID_GET,
.nldev_attr = RDMA_NLDEV_ATTR_RES_CM_ID,
.entry = RDMA_NLDEV_ATTR_RES_CM_ID_ENTRY,
.id = RDMA_NLDEV_ATTR_RES_CM_IDN,
},
[RDMA_RESTRACK_CQ] = {
- .fill_res_func = fill_res_cq_entry,
.nldev_cmd = RDMA_NLDEV_CMD_RES_CQ_GET,
.nldev_attr = RDMA_NLDEV_ATTR_RES_CQ,
.flags = NLDEV_PER_DEV,
@@ -1154,7 +1203,6 @@ static const struct nldev_fill_res_entry fill_entries[RDMA_RESTRACK_MAX] = {
.id = RDMA_NLDEV_ATTR_RES_CQN,
},
[RDMA_RESTRACK_MR] = {
- .fill_res_func = fill_res_mr_entry,
.nldev_cmd = RDMA_NLDEV_CMD_RES_MR_GET,
.nldev_attr = RDMA_NLDEV_ATTR_RES_MR,
.flags = NLDEV_PER_DEV,
@@ -1162,7 +1210,6 @@ static const struct nldev_fill_res_entry fill_entries[RDMA_RESTRACK_MAX] = {
.id = RDMA_NLDEV_ATTR_RES_MRN,
},
[RDMA_RESTRACK_PD] = {
- .fill_res_func = fill_res_pd_entry,
.nldev_cmd = RDMA_NLDEV_CMD_RES_PD_GET,
.nldev_attr = RDMA_NLDEV_ATTR_RES_PD,
.flags = NLDEV_PER_DEV,
@@ -1170,7 +1217,6 @@ static const struct nldev_fill_res_entry fill_entries[RDMA_RESTRACK_MAX] = {
.id = RDMA_NLDEV_ATTR_RES_PDN,
},
[RDMA_RESTRACK_COUNTER] = {
- .fill_res_func = fill_res_counter_entry,
.nldev_cmd = RDMA_NLDEV_CMD_STAT_GET,
.nldev_attr = RDMA_NLDEV_ATTR_STAT_COUNTER,
.entry = RDMA_NLDEV_ATTR_STAT_COUNTER_ENTRY,
@@ -1180,7 +1226,8 @@ static const struct nldev_fill_res_entry fill_entries[RDMA_RESTRACK_MAX] = {
static int res_get_common_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack,
- enum rdma_restrack_type res_type)
+ enum rdma_restrack_type res_type,
+ res_fill_func_t fill_func)
{
const struct nldev_fill_res_entry *fe = &fill_entries[res_type];
struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
@@ -1222,11 +1269,6 @@ static int res_get_common_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
goto err;
}
- if (!rdma_is_visible_in_pid_ns(res)) {
- ret = -ENOENT;
- goto err_get;
- }
-
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg) {
ret = -ENOMEM;
@@ -1243,7 +1285,9 @@ static int res_get_common_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
}
has_cap_net_admin = netlink_capable(skb, CAP_NET_ADMIN);
- ret = fe->fill_res_func(msg, has_cap_net_admin, res, port);
+
+ ret = fill_func(msg, has_cap_net_admin, res, port);
+
rdma_restrack_put(res);
if (ret)
goto err_free;
@@ -1263,7 +1307,8 @@ err:
static int res_get_common_dumpit(struct sk_buff *skb,
struct netlink_callback *cb,
- enum rdma_restrack_type res_type)
+ enum rdma_restrack_type res_type,
+ res_fill_func_t fill_func)
{
const struct nldev_fill_res_entry *fe = &fill_entries[res_type];
struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
@@ -1334,9 +1379,6 @@ static int res_get_common_dumpit(struct sk_buff *skb,
* objects.
*/
xa_for_each(&rt->xa, id, res) {
- if (!rdma_is_visible_in_pid_ns(res))
- continue;
-
if (idx < start || !rdma_restrack_get(res))
goto next;
@@ -1351,7 +1393,8 @@ static int res_get_common_dumpit(struct sk_buff *skb,
goto msg_full;
}
- ret = fe->fill_res_func(skb, has_cap_net_admin, res, port);
+ ret = fill_func(skb, has_cap_net_admin, res, port);
+
rdma_restrack_put(res);
if (ret) {
@@ -1394,17 +1437,19 @@ err_index:
return ret;
}
-#define RES_GET_FUNCS(name, type) \
- static int nldev_res_get_##name##_dumpit(struct sk_buff *skb, \
+#define RES_GET_FUNCS(name, type) \
+ static int nldev_res_get_##name##_dumpit(struct sk_buff *skb, \
struct netlink_callback *cb) \
- { \
- return res_get_common_dumpit(skb, cb, type); \
- } \
- static int nldev_res_get_##name##_doit(struct sk_buff *skb, \
- struct nlmsghdr *nlh, \
+ { \
+ return res_get_common_dumpit(skb, cb, type, \
+ fill_res_##name##_entry); \
+ } \
+ static int nldev_res_get_##name##_doit(struct sk_buff *skb, \
+ struct nlmsghdr *nlh, \
struct netlink_ext_ack *extack) \
- { \
- return res_get_common_doit(skb, nlh, extack, type); \
+ { \
+ return res_get_common_doit(skb, nlh, extack, type, \
+ fill_res_##name##_entry); \
}
RES_GET_FUNCS(qp, RDMA_RESTRACK_QP);
@@ -1880,7 +1925,7 @@ static int stat_get_doit_default_counter(struct sk_buff *skb,
for (i = 0; i < num_cnts; i++) {
v = stats->value[i] +
rdma_counter_get_hwstat_value(device, port, i);
- if (fill_stat_hwcounter_entry(msg, stats->names[i], v)) {
+ if (rdma_nl_stat_hwcounter_entry(msg, stats->names[i], v)) {
ret = -EMSGSIZE;
goto err_table;
}
@@ -1989,7 +2034,10 @@ static int nldev_stat_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
case RDMA_NLDEV_ATTR_RES_QP:
ret = stat_get_doit_qp(skb, nlh, extack, tb);
break;
-
+ case RDMA_NLDEV_ATTR_RES_MR:
+ ret = res_get_common_doit(skb, nlh, extack, RDMA_RESTRACK_MR,
+ fill_stat_mr_entry);
+ break;
default:
ret = -EINVAL;
break;
@@ -2013,7 +2061,10 @@ static int nldev_stat_get_dumpit(struct sk_buff *skb,
case RDMA_NLDEV_ATTR_RES_QP:
ret = nldev_res_get_counter_dumpit(skb, cb);
break;
-
+ case RDMA_NLDEV_ATTR_RES_MR:
+ ret = res_get_common_dumpit(skb, cb, RDMA_RESTRACK_MR,
+ fill_stat_mr_entry);
+ break;
default:
ret = -EINVAL;
break;
diff --git a/drivers/infiniband/core/rdma_core.c b/drivers/infiniband/core/rdma_core.c
index ccf4d069c25c..6c72773faf29 100644
--- a/drivers/infiniband/core/rdma_core.c
+++ b/drivers/infiniband/core/rdma_core.c
@@ -817,6 +817,7 @@ static void ufile_destroy_ucontext(struct ib_uverbs_file *ufile,
rdma_restrack_del(&ucontext->res);
ib_dev->ops.dealloc_ucontext(ucontext);
+ WARN_ON(!xa_empty(&ucontext->mmap_xa));
kfree(ucontext);
ufile->ucontext = NULL;
diff --git a/drivers/infiniband/core/restrack.c b/drivers/infiniband/core/restrack.c
index a07665f7ef8c..62fbb0ae9cb4 100644
--- a/drivers/infiniband/core/restrack.c
+++ b/drivers/infiniband/core/restrack.c
@@ -116,11 +116,8 @@ int rdma_restrack_count(struct ib_device *dev, enum rdma_restrack_type type)
u32 cnt = 0;
xa_lock(&rt->xa);
- xas_for_each(&xas, e, U32_MAX) {
- if (!rdma_is_visible_in_pid_ns(e))
- continue;
+ xas_for_each(&xas, e, U32_MAX)
cnt++;
- }
xa_unlock(&rt->xa);
return cnt;
}
@@ -346,18 +343,3 @@ out:
}
}
EXPORT_SYMBOL(rdma_restrack_del);
-
-bool rdma_is_visible_in_pid_ns(struct rdma_restrack_entry *res)
-{
- /*
- * 1. Kern resources should be visible in init
- * namespace only
- * 2. Present only resources visible in the current
- * namespace
- */
- if (rdma_is_kernel_res(res))
- return task_active_pid_ns(current) == &init_pid_ns;
-
- /* PID 0 means that resource is not found in current namespace */
- return task_pid_vnr(res->task);
-}
diff --git a/drivers/infiniband/core/restrack.h b/drivers/infiniband/core/restrack.h
index 7bd177cc0a61..d084e5f89849 100644
--- a/drivers/infiniband/core/restrack.h
+++ b/drivers/infiniband/core/restrack.h
@@ -27,5 +27,4 @@ int rdma_restrack_init(struct ib_device *dev);
void rdma_restrack_clean(struct ib_device *dev);
void rdma_restrack_attach_task(struct rdma_restrack_entry *res,
struct task_struct *task);
-bool rdma_is_visible_in_pid_ns(struct rdma_restrack_entry *res);
#endif /* _RDMA_CORE_RESTRACK_H_ */
diff --git a/drivers/infiniband/core/rw.c b/drivers/infiniband/core/rw.c
index 5337393d4dfe..4fad732f9b3c 100644
--- a/drivers/infiniband/core/rw.c
+++ b/drivers/infiniband/core/rw.c
@@ -20,14 +20,17 @@ module_param_named(force_mr, rdma_rw_force_mr, bool, 0);
MODULE_PARM_DESC(force_mr, "Force usage of MRs for RDMA READ/WRITE operations");
/*
- * Check if the device might use memory registration. This is currently only
- * true for iWarp devices. In the future we can hopefully fine tune this based
- * on HCA driver input.
+ * Report whether memory registration should be used. Memory registration must
+ * be used for iWarp devices because of iWARP-specific limitations. Memory
+ * registration is also enabled if registering memory might yield better
+ * performance than using multiple SGE entries, see rdma_rw_io_needs_mr()
*/
static inline bool rdma_rw_can_use_mr(struct ib_device *dev, u8 port_num)
{
if (rdma_protocol_iwarp(dev, port_num))
return true;
+ if (dev->attrs.max_sgl_rd)
+ return true;
if (unlikely(rdma_rw_force_mr))
return true;
return false;
@@ -35,17 +38,19 @@ static inline bool rdma_rw_can_use_mr(struct ib_device *dev, u8 port_num)
/*
* Check if the device will use memory registration for this RW operation.
- * We currently always use memory registrations for iWarp RDMA READs, and
- * have a debug option to force usage of MRs.
- *
- * XXX: In the future we can hopefully fine tune this based on HCA driver
- * input.
+ * For RDMA READs we must use MRs on iWarp and can optionally use them as an
+ * optimization otherwise. Additionally we have a debug option to force usage
+ * of MRs to help testing this code path.
*/
static inline bool rdma_rw_io_needs_mr(struct ib_device *dev, u8 port_num,
enum dma_data_direction dir, int dma_nents)
{
- if (rdma_protocol_iwarp(dev, port_num) && dir == DMA_FROM_DEVICE)
- return true;
+ if (dir == DMA_FROM_DEVICE) {
+ if (rdma_protocol_iwarp(dev, port_num))
+ return true;
+ if (dev->attrs.max_sgl_rd && dma_nents > dev->attrs.max_sgl_rd)
+ return true;
+ }
if (unlikely(rdma_rw_force_mr))
return true;
return false;
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index 17fc2936c077..8917125ea16d 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -1246,7 +1246,7 @@ static int init_ah_attr_grh_fields(struct ib_device *device, u8 port_num,
* @port_num: Port on the specified device.
* @rec: path record entry to use for ah attributes initialization.
* @ah_attr: address handle attributes to initialization from path record.
- * @sgid_attr: SGID attribute to consider during initialization.
+ * @gid_attr: SGID attribute to consider during initialization.
*
* When ib_init_ah_attr_from_path() returns success,
* (a) for IB link layer it optionally contains a reference to SGID attribute
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
index 7a50cedcef1f..087682e6969e 100644
--- a/drivers/infiniband/core/sysfs.c
+++ b/drivers/infiniband/core/sysfs.c
@@ -481,8 +481,8 @@ static int get_perf_mad(struct ib_device *dev, int port_num, __be16 attr,
if (!dev->ops.process_mad)
return -ENOSYS;
- in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
- out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
+ in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
+ out_mad = kzalloc(sizeof(*out_mad), GFP_KERNEL);
if (!in_mad || !out_mad) {
ret = -ENOMEM;
goto out;
@@ -497,10 +497,8 @@ static int get_perf_mad(struct ib_device *dev, int port_num, __be16 attr,
if (attr != IB_PMA_CLASS_PORT_INFO)
in_mad->data[41] = port_num; /* PortSelect field */
- if ((dev->ops.process_mad(dev, IB_MAD_IGNORE_MKEY,
- port_num, NULL, NULL,
- (const struct ib_mad_hdr *)in_mad, mad_size,
- (struct ib_mad_hdr *)out_mad, &mad_size,
+ if ((dev->ops.process_mad(dev, IB_MAD_IGNORE_MKEY, port_num, NULL, NULL,
+ in_mad, out_mad, &mad_size,
&out_mad_pkey_index) &
(IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY)) !=
(IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY)) {
@@ -1268,7 +1266,7 @@ static ssize_t node_desc_store(struct device *device,
int ret;
if (!dev->ops.modify_device)
- return -EIO;
+ return -EOPNOTSUPP;
memcpy(desc.node_desc, buf, min_t(int, count, IB_DEVICE_NODE_DESC_MAX));
ret = ib_modify_device(dev, IB_DEVICE_MODIFY_NODE_DESC, &desc);
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index 24244a2f68cc..7a3b99597ead 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -185,10 +185,9 @@ EXPORT_SYMBOL(ib_umem_find_best_pgsz);
* @addr: userspace virtual address to start at
* @size: length of region to pin
* @access: IB_ACCESS_xxx flags for memory being pinned
- * @dmasync: flush in-flight DMA when the memory region is written
*/
struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr,
- size_t size, int access, int dmasync)
+ size_t size, int access)
{
struct ib_ucontext *context;
struct ib_umem *umem;
@@ -199,7 +198,6 @@ struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr,
struct mm_struct *mm;
unsigned long npages;
int ret;
- unsigned long dma_attrs = 0;
struct scatterlist *sg;
unsigned int gup_flags = FOLL_WRITE;
@@ -211,9 +209,6 @@ struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr,
if (!context)
return ERR_PTR(-EIO);
- if (dmasync)
- dma_attrs |= DMA_ATTR_WRITE_BARRIER;
-
/*
* If the combination of the addr and size requested for this memory
* region causes an integer overflow, return error.
@@ -294,11 +289,10 @@ struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr,
sg_mark_end(sg);
- umem->nmap = ib_dma_map_sg_attrs(context->device,
+ umem->nmap = ib_dma_map_sg(context->device,
umem->sg_head.sgl,
umem->sg_nents,
- DMA_BIDIRECTIONAL,
- dma_attrs);
+ DMA_BIDIRECTIONAL);
if (!umem->nmap) {
ret = -ENOMEM;
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
index 163ff7ba92b7..e42d44e501fd 100644
--- a/drivers/infiniband/core/umem_odp.c
+++ b/drivers/infiniband/core/umem_odp.c
@@ -48,197 +48,33 @@
#include "uverbs.h"
-static void ib_umem_notifier_start_account(struct ib_umem_odp *umem_odp)
+static inline int ib_init_umem_odp(struct ib_umem_odp *umem_odp,
+ const struct mmu_interval_notifier_ops *ops)
{
- mutex_lock(&umem_odp->umem_mutex);
- if (umem_odp->notifiers_count++ == 0)
- /*
- * Initialize the completion object for waiting on
- * notifiers. Since notifier_count is zero, no one should be
- * waiting right now.
- */
- reinit_completion(&umem_odp->notifier_completion);
- mutex_unlock(&umem_odp->umem_mutex);
-}
-
-static void ib_umem_notifier_end_account(struct ib_umem_odp *umem_odp)
-{
- mutex_lock(&umem_odp->umem_mutex);
- /*
- * This sequence increase will notify the QP page fault that the page
- * that is going to be mapped in the spte could have been freed.
- */
- ++umem_odp->notifiers_seq;
- if (--umem_odp->notifiers_count == 0)
- complete_all(&umem_odp->notifier_completion);
- mutex_unlock(&umem_odp->umem_mutex);
-}
-
-static void ib_umem_notifier_release(struct mmu_notifier *mn,
- struct mm_struct *mm)
-{
- struct ib_ucontext_per_mm *per_mm =
- container_of(mn, struct ib_ucontext_per_mm, mn);
- struct rb_node *node;
-
- down_read(&per_mm->umem_rwsem);
- if (!per_mm->mn.users)
- goto out;
-
- for (node = rb_first_cached(&per_mm->umem_tree); node;
- node = rb_next(node)) {
- struct ib_umem_odp *umem_odp =
- rb_entry(node, struct ib_umem_odp, interval_tree.rb);
-
- /*
- * Increase the number of notifiers running, to prevent any
- * further fault handling on this MR.
- */
- ib_umem_notifier_start_account(umem_odp);
- complete_all(&umem_odp->notifier_completion);
- umem_odp->umem.ibdev->ops.invalidate_range(
- umem_odp, ib_umem_start(umem_odp),
- ib_umem_end(umem_odp));
- }
-
-out:
- up_read(&per_mm->umem_rwsem);
-}
-
-static int invalidate_range_start_trampoline(struct ib_umem_odp *item,
- u64 start, u64 end, void *cookie)
-{
- ib_umem_notifier_start_account(item);
- item->umem.ibdev->ops.invalidate_range(item, start, end);
- return 0;
-}
-
-static int ib_umem_notifier_invalidate_range_start(struct mmu_notifier *mn,
- const struct mmu_notifier_range *range)
-{
- struct ib_ucontext_per_mm *per_mm =
- container_of(mn, struct ib_ucontext_per_mm, mn);
- int rc;
-
- if (mmu_notifier_range_blockable(range))
- down_read(&per_mm->umem_rwsem);
- else if (!down_read_trylock(&per_mm->umem_rwsem))
- return -EAGAIN;
-
- if (!per_mm->mn.users) {
- up_read(&per_mm->umem_rwsem);
- /*
- * At this point users is permanently zero and visible to this
- * CPU without a lock, that fact is relied on to skip the unlock
- * in range_end.
- */
- return 0;
- }
-
- rc = rbt_ib_umem_for_each_in_range(&per_mm->umem_tree, range->start,
- range->end,
- invalidate_range_start_trampoline,
- mmu_notifier_range_blockable(range),
- NULL);
- if (rc)
- up_read(&per_mm->umem_rwsem);
- return rc;
-}
-
-static int invalidate_range_end_trampoline(struct ib_umem_odp *item, u64 start,
- u64 end, void *cookie)
-{
- ib_umem_notifier_end_account(item);
- return 0;
-}
-
-static void ib_umem_notifier_invalidate_range_end(struct mmu_notifier *mn,
- const struct mmu_notifier_range *range)
-{
- struct ib_ucontext_per_mm *per_mm =
- container_of(mn, struct ib_ucontext_per_mm, mn);
-
- if (unlikely(!per_mm->mn.users))
- return;
-
- rbt_ib_umem_for_each_in_range(&per_mm->umem_tree, range->start,
- range->end,
- invalidate_range_end_trampoline, true, NULL);
- up_read(&per_mm->umem_rwsem);
-}
-
-static struct mmu_notifier *ib_umem_alloc_notifier(struct mm_struct *mm)
-{
- struct ib_ucontext_per_mm *per_mm;
-
- per_mm = kzalloc(sizeof(*per_mm), GFP_KERNEL);
- if (!per_mm)
- return ERR_PTR(-ENOMEM);
-
- per_mm->umem_tree = RB_ROOT_CACHED;
- init_rwsem(&per_mm->umem_rwsem);
-
- WARN_ON(mm != current->mm);
- rcu_read_lock();
- per_mm->tgid = get_task_pid(current->group_leader, PIDTYPE_PID);
- rcu_read_unlock();
- return &per_mm->mn;
-}
-
-static void ib_umem_free_notifier(struct mmu_notifier *mn)
-{
- struct ib_ucontext_per_mm *per_mm =
- container_of(mn, struct ib_ucontext_per_mm, mn);
-
- WARN_ON(!RB_EMPTY_ROOT(&per_mm->umem_tree.rb_root));
-
- put_pid(per_mm->tgid);
- kfree(per_mm);
-}
-
-static const struct mmu_notifier_ops ib_umem_notifiers = {
- .release = ib_umem_notifier_release,
- .invalidate_range_start = ib_umem_notifier_invalidate_range_start,
- .invalidate_range_end = ib_umem_notifier_invalidate_range_end,
- .alloc_notifier = ib_umem_alloc_notifier,
- .free_notifier = ib_umem_free_notifier,
-};
-
-static inline int ib_init_umem_odp(struct ib_umem_odp *umem_odp)
-{
- struct ib_ucontext_per_mm *per_mm;
- struct mmu_notifier *mn;
int ret;
umem_odp->umem.is_odp = 1;
+ mutex_init(&umem_odp->umem_mutex);
+
if (!umem_odp->is_implicit_odp) {
size_t page_size = 1UL << umem_odp->page_shift;
+ unsigned long start;
+ unsigned long end;
size_t pages;
- umem_odp->interval_tree.start =
- ALIGN_DOWN(umem_odp->umem.address, page_size);
+ start = ALIGN_DOWN(umem_odp->umem.address, page_size);
if (check_add_overflow(umem_odp->umem.address,
(unsigned long)umem_odp->umem.length,
- &umem_odp->interval_tree.last))
+ &end))
return -EOVERFLOW;
- umem_odp->interval_tree.last =
- ALIGN(umem_odp->interval_tree.last, page_size);
- if (unlikely(umem_odp->interval_tree.last < page_size))
+ end = ALIGN(end, page_size);
+ if (unlikely(end < page_size))
return -EOVERFLOW;
- pages = (umem_odp->interval_tree.last -
- umem_odp->interval_tree.start) >>
- umem_odp->page_shift;
+ pages = (end - start) >> umem_odp->page_shift;
if (!pages)
return -EINVAL;
- /*
- * Note that the representation of the intervals in the
- * interval tree considers the ending point as contained in
- * the interval.
- */
- umem_odp->interval_tree.last--;
-
umem_odp->page_list = kvcalloc(
pages, sizeof(*umem_odp->page_list), GFP_KERNEL);
if (!umem_odp->page_list)
@@ -250,26 +86,13 @@ static inline int ib_init_umem_odp(struct ib_umem_odp *umem_odp)
ret = -ENOMEM;
goto out_page_list;
}
- }
- mn = mmu_notifier_get(&ib_umem_notifiers, umem_odp->umem.owning_mm);
- if (IS_ERR(mn)) {
- ret = PTR_ERR(mn);
- goto out_dma_list;
+ ret = mmu_interval_notifier_insert(&umem_odp->notifier,
+ umem_odp->umem.owning_mm,
+ start, end - start, ops);
+ if (ret)
+ goto out_dma_list;
}
- umem_odp->per_mm = per_mm =
- container_of(mn, struct ib_ucontext_per_mm, mn);
-
- mutex_init(&umem_odp->umem_mutex);
- init_completion(&umem_odp->notifier_completion);
-
- if (!umem_odp->is_implicit_odp) {
- down_write(&per_mm->umem_rwsem);
- interval_tree_insert(&umem_odp->interval_tree,
- &per_mm->umem_tree);
- up_write(&per_mm->umem_rwsem);
- }
- mmgrab(umem_odp->umem.owning_mm);
return 0;
@@ -305,8 +128,6 @@ struct ib_umem_odp *ib_umem_odp_alloc_implicit(struct ib_udata *udata,
if (!context)
return ERR_PTR(-EIO);
- if (WARN_ON_ONCE(!context->device->ops.invalidate_range))
- return ERR_PTR(-EINVAL);
umem_odp = kzalloc(sizeof(*umem_odp), GFP_KERNEL);
if (!umem_odp)
@@ -318,8 +139,10 @@ struct ib_umem_odp *ib_umem_odp_alloc_implicit(struct ib_udata *udata,
umem_odp->is_implicit_odp = 1;
umem_odp->page_shift = PAGE_SHIFT;
- ret = ib_init_umem_odp(umem_odp);
+ umem_odp->tgid = get_task_pid(current->group_leader, PIDTYPE_PID);
+ ret = ib_init_umem_odp(umem_odp, NULL);
if (ret) {
+ put_pid(umem_odp->tgid);
kfree(umem_odp);
return ERR_PTR(ret);
}
@@ -336,8 +159,10 @@ EXPORT_SYMBOL(ib_umem_odp_alloc_implicit);
* @addr: The starting userspace VA
* @size: The length of the userspace VA
*/
-struct ib_umem_odp *ib_umem_odp_alloc_child(struct ib_umem_odp *root,
- unsigned long addr, size_t size)
+struct ib_umem_odp *
+ib_umem_odp_alloc_child(struct ib_umem_odp *root, unsigned long addr,
+ size_t size,
+ const struct mmu_interval_notifier_ops *ops)
{
/*
* Caller must ensure that root cannot be freed during the call to
@@ -360,9 +185,12 @@ struct ib_umem_odp *ib_umem_odp_alloc_child(struct ib_umem_odp *root,
umem->writable = root->umem.writable;
umem->owning_mm = root->umem.owning_mm;
odp_data->page_shift = PAGE_SHIFT;
+ odp_data->notifier.ops = ops;
- ret = ib_init_umem_odp(odp_data);
+ odp_data->tgid = get_pid(root->tgid);
+ ret = ib_init_umem_odp(odp_data, ops);
if (ret) {
+ put_pid(odp_data->tgid);
kfree(odp_data);
return ERR_PTR(ret);
}
@@ -383,7 +211,8 @@ EXPORT_SYMBOL(ib_umem_odp_alloc_child);
* conjunction with MMU notifiers.
*/
struct ib_umem_odp *ib_umem_odp_get(struct ib_udata *udata, unsigned long addr,
- size_t size, int access)
+ size_t size, int access,
+ const struct mmu_interval_notifier_ops *ops)
{
struct ib_umem_odp *umem_odp;
struct ib_ucontext *context;
@@ -398,8 +227,7 @@ struct ib_umem_odp *ib_umem_odp_get(struct ib_udata *udata, unsigned long addr,
if (!context)
return ERR_PTR(-EIO);
- if (WARN_ON_ONCE(!(access & IB_ACCESS_ON_DEMAND)) ||
- WARN_ON_ONCE(!context->device->ops.invalidate_range))
+ if (WARN_ON_ONCE(!(access & IB_ACCESS_ON_DEMAND)))
return ERR_PTR(-EINVAL);
umem_odp = kzalloc(sizeof(struct ib_umem_odp), GFP_KERNEL);
@@ -411,6 +239,7 @@ struct ib_umem_odp *ib_umem_odp_get(struct ib_udata *udata, unsigned long addr,
umem_odp->umem.address = addr;
umem_odp->umem.writable = ib_access_writable(access);
umem_odp->umem.owning_mm = mm = current->mm;
+ umem_odp->notifier.ops = ops;
umem_odp->page_shift = PAGE_SHIFT;
if (access & IB_ACCESS_HUGETLB) {
@@ -429,11 +258,14 @@ struct ib_umem_odp *ib_umem_odp_get(struct ib_udata *udata, unsigned long addr,
up_read(&mm->mmap_sem);
}
- ret = ib_init_umem_odp(umem_odp);
+ umem_odp->tgid = get_task_pid(current->group_leader, PIDTYPE_PID);
+ ret = ib_init_umem_odp(umem_odp, ops);
if (ret)
- goto err_free;
+ goto err_put_pid;
return umem_odp;
+err_put_pid:
+ put_pid(umem_odp->tgid);
err_free:
kfree(umem_odp);
return ERR_PTR(ret);
@@ -442,8 +274,6 @@ EXPORT_SYMBOL(ib_umem_odp_get);
void ib_umem_odp_release(struct ib_umem_odp *umem_odp)
{
- struct ib_ucontext_per_mm *per_mm = umem_odp->per_mm;
-
/*
* Ensure that no more pages are mapped in the umem.
*
@@ -455,28 +285,11 @@ void ib_umem_odp_release(struct ib_umem_odp *umem_odp)
ib_umem_odp_unmap_dma_pages(umem_odp, ib_umem_start(umem_odp),
ib_umem_end(umem_odp));
mutex_unlock(&umem_odp->umem_mutex);
+ mmu_interval_notifier_remove(&umem_odp->notifier);
kvfree(umem_odp->dma_list);
kvfree(umem_odp->page_list);
+ put_pid(umem_odp->tgid);
}
-
- down_write(&per_mm->umem_rwsem);
- if (!umem_odp->is_implicit_odp) {
- interval_tree_remove(&umem_odp->interval_tree,
- &per_mm->umem_tree);
- complete_all(&umem_odp->notifier_completion);
- }
- /*
- * NOTE! mmu_notifier_unregister() can happen between a start/end
- * callback, resulting in a missing end, and thus an unbalanced
- * lock. This doesn't really matter to us since we are about to kfree
- * the memory that holds the lock, however LOCKDEP doesn't like this.
- * Thus we call the mmu_notifier_put under the rwsem and test the
- * internal users count to reliably see if we are past this point.
- */
- mmu_notifier_put(&per_mm->mn);
- up_write(&per_mm->umem_rwsem);
-
- mmdrop(umem_odp->umem.owning_mm);
kfree(umem_odp);
}
EXPORT_SYMBOL(ib_umem_odp_release);
@@ -501,22 +314,16 @@ EXPORT_SYMBOL(ib_umem_odp_release);
*/
static int ib_umem_odp_map_dma_single_page(
struct ib_umem_odp *umem_odp,
- int page_index,
+ unsigned int page_index,
struct page *page,
u64 access_mask,
unsigned long current_seq)
{
struct ib_device *dev = umem_odp->umem.ibdev;
dma_addr_t dma_addr;
- int remove_existing_mapping = 0;
int ret = 0;
- /*
- * Note: we avoid writing if seq is different from the initial seq, to
- * handle case of a racing notifier. This check also allows us to bail
- * early if we have a notifier running in parallel with us.
- */
- if (ib_umem_mmu_notifier_retry(umem_odp, current_seq)) {
+ if (mmu_interval_check_retry(&umem_odp->notifier, current_seq)) {
ret = -EAGAIN;
goto out;
}
@@ -534,28 +341,29 @@ static int ib_umem_odp_map_dma_single_page(
} else if (umem_odp->page_list[page_index] == page) {
umem_odp->dma_list[page_index] |= access_mask;
} else {
- pr_err("error: got different pages in IB device and from get_user_pages. IB device page: %p, gup page: %p\n",
- umem_odp->page_list[page_index], page);
- /* Better remove the mapping now, to prevent any further
- * damage. */
- remove_existing_mapping = 1;
+ /*
+ * This is a race here where we could have done:
+ *
+ * CPU0 CPU1
+ * get_user_pages()
+ * invalidate()
+ * page_fault()
+ * mutex_lock(umem_mutex)
+ * page from GUP != page in ODP
+ *
+ * It should be prevented by the retry test above as reading
+ * the seq number should be reliable under the
+ * umem_mutex. Thus something is really not working right if
+ * things get here.
+ */
+ WARN(true,
+ "Got different pages in IB device and from get_user_pages. IB device page: %p, gup page: %p\n",
+ umem_odp->page_list[page_index], page);
+ ret = -EAGAIN;
}
out:
put_user_page(page);
-
- if (remove_existing_mapping) {
- ib_umem_notifier_start_account(umem_odp);
- dev->ops.invalidate_range(
- umem_odp,
- ib_umem_start(umem_odp) +
- (page_index << umem_odp->page_shift),
- ib_umem_start(umem_odp) +
- ((page_index + 1) << umem_odp->page_shift));
- ib_umem_notifier_end_account(umem_odp);
- ret = -EAGAIN;
- }
-
return ret;
}
@@ -618,7 +426,7 @@ int ib_umem_odp_map_dma_pages(struct ib_umem_odp *umem_odp, u64 user_virt,
* existing beyond the lifetime of the originating process.. Presumably
* mmget_not_zero will fail in this case.
*/
- owning_process = get_pid_task(umem_odp->per_mm->tgid, PIDTYPE_PID);
+ owning_process = get_pid_task(umem_odp->tgid, PIDTYPE_PID);
if (!owning_process || !mmget_not_zero(owning_mm)) {
ret = -EINVAL;
goto out_put_task;
@@ -762,32 +570,3 @@ void ib_umem_odp_unmap_dma_pages(struct ib_umem_odp *umem_odp, u64 virt,
}
}
EXPORT_SYMBOL(ib_umem_odp_unmap_dma_pages);
-
-/* @last is not a part of the interval. See comment for function
- * node_last.
- */
-int rbt_ib_umem_for_each_in_range(struct rb_root_cached *root,
- u64 start, u64 last,
- umem_call_back cb,
- bool blockable,
- void *cookie)
-{
- int ret_val = 0;
- struct interval_tree_node *node, *next;
- struct ib_umem_odp *umem;
-
- if (unlikely(start == last))
- return ret_val;
-
- for (node = interval_tree_iter_first(root, start, last - 1);
- node; node = next) {
- /* TODO move the blockable decision up to the callback */
- if (!blockable)
- return -EAGAIN;
- next = interval_tree_iter_next(node, start, last - 1);
- umem = container_of(node, struct ib_umem_odp, interval_tree);
- ret_val = cb(umem, start, last, cookie) || ret_val;
- }
-
- return ret_val;
-}
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 14a80fd9f464..06ed32c8662f 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -252,6 +252,8 @@ static int ib_uverbs_get_context(struct uverbs_attr_bundle *attrs)
ucontext->closing = false;
ucontext->cleanup_retryable = false;
+ xa_init_flags(&ucontext->mmap_xa, XA_FLAGS_ALLOC);
+
ret = get_unused_fd_flags(O_CLOEXEC);
if (ret < 0)
goto err_free;
diff --git a/drivers/infiniband/core/uverbs_ioctl.c b/drivers/infiniband/core/uverbs_ioctl.c
index 61758201d9b2..269938f59d3f 100644
--- a/drivers/infiniband/core/uverbs_ioctl.c
+++ b/drivers/infiniband/core/uverbs_ioctl.c
@@ -795,6 +795,9 @@ int uverbs_copy_to_struct_or_zero(const struct uverbs_attr_bundle *bundle,
{
const struct uverbs_attr *attr = uverbs_attr_get(bundle, idx);
+ if (IS_ERR(attr))
+ return PTR_ERR(attr);
+
if (size < attr->ptr_attr.len) {
if (clear_user(u64_to_user_ptr(attr->ptr_attr.data) + size,
attr->ptr_attr.len - size))
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index db98111b47f4..970d8e31dd65 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -772,6 +772,8 @@ out_unlock:
return (ret) ? : count;
}
+static const struct vm_operations_struct rdma_umap_ops;
+
static int ib_uverbs_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct ib_uverbs_file *file = filp->private_data;
@@ -785,7 +787,7 @@ static int ib_uverbs_mmap(struct file *filp, struct vm_area_struct *vma)
ret = PTR_ERR(ucontext);
goto out;
}
-
+ vma->vm_ops = &rdma_umap_ops;
ret = ucontext->device->ops.mmap(ucontext, vma);
out:
srcu_read_unlock(&file->device->disassociate_srcu, srcu_key);
@@ -793,38 +795,6 @@ out:
}
/*
- * Each time we map IO memory into user space this keeps track of the mapping.
- * When the device is hot-unplugged we 'zap' the mmaps in user space to point
- * to the zero page and allow the hot unplug to proceed.
- *
- * This is necessary for cases like PCI physical hot unplug as the actual BAR
- * memory may vanish after this and access to it from userspace could MCE.
- *
- * RDMA drivers supporting disassociation must have their user space designed
- * to cope in some way with their IO pages going to the zero page.
- */
-struct rdma_umap_priv {
- struct vm_area_struct *vma;
- struct list_head list;
-};
-
-static const struct vm_operations_struct rdma_umap_ops;
-
-static void rdma_umap_priv_init(struct rdma_umap_priv *priv,
- struct vm_area_struct *vma)
-{
- struct ib_uverbs_file *ufile = vma->vm_file->private_data;
-
- priv->vma = vma;
- vma->vm_private_data = priv;
- vma->vm_ops = &rdma_umap_ops;
-
- mutex_lock(&ufile->umap_lock);
- list_add(&priv->list, &ufile->umaps);
- mutex_unlock(&ufile->umap_lock);
-}
-
-/*
* The VMA has been dup'd, initialize the vm_private_data with a new tracking
* struct
*/
@@ -849,7 +819,7 @@ static void rdma_umap_open(struct vm_area_struct *vma)
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
goto out_unlock;
- rdma_umap_priv_init(priv, vma);
+ rdma_umap_priv_init(priv, vma, opriv->entry);
up_read(&ufile->hw_destroy_rwsem);
return;
@@ -880,6 +850,9 @@ static void rdma_umap_close(struct vm_area_struct *vma)
* this point.
*/
mutex_lock(&ufile->umap_lock);
+ if (priv->entry)
+ rdma_user_mmap_entry_put(priv->entry);
+
list_del(&priv->list);
mutex_unlock(&ufile->umap_lock);
kfree(priv);
@@ -931,44 +904,6 @@ static const struct vm_operations_struct rdma_umap_ops = {
.fault = rdma_umap_fault,
};
-/*
- * Map IO memory into a process. This is to be called by drivers as part of
- * their mmap() functions if they wish to send something like PCI-E BAR memory
- * to userspace.
- */
-int rdma_user_mmap_io(struct ib_ucontext *ucontext, struct vm_area_struct *vma,
- unsigned long pfn, unsigned long size, pgprot_t prot)
-{
- struct ib_uverbs_file *ufile = ucontext->ufile;
- struct rdma_umap_priv *priv;
-
- if (!(vma->vm_flags & VM_SHARED))
- return -EINVAL;
-
- if (vma->vm_end - vma->vm_start != size)
- return -EINVAL;
-
- /* Driver is using this wrong, must be called by ib_uverbs_mmap */
- if (WARN_ON(!vma->vm_file ||
- vma->vm_file->private_data != ufile))
- return -EINVAL;
- lockdep_assert_held(&ufile->device->disassociate_srcu);
-
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- vma->vm_page_prot = prot;
- if (io_remap_pfn_range(vma, vma->vm_start, pfn, size, prot)) {
- kfree(priv);
- return -EAGAIN;
- }
-
- rdma_umap_priv_init(priv, vma);
- return 0;
-}
-EXPORT_SYMBOL(rdma_user_mmap_io);
-
void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile)
{
struct rdma_umap_priv *priv, *next_priv;
@@ -1018,6 +953,11 @@ void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile)
zap_vma_ptes(vma, vma->vm_start,
vma->vm_end - vma->vm_start);
+
+ if (priv->entry) {
+ rdma_user_mmap_entry_put(priv->entry);
+ priv->entry = NULL;
+ }
}
mutex_unlock(&ufile->umap_lock);
skip_mm:
@@ -1139,7 +1079,7 @@ static const struct file_operations uverbs_fops = {
.release = ib_uverbs_close,
.llseek = no_llseek,
.unlocked_ioctl = ib_uverbs_ioctl,
- .compat_ioctl = ib_uverbs_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
};
static const struct file_operations uverbs_mmap_fops = {
@@ -1150,7 +1090,7 @@ static const struct file_operations uverbs_mmap_fops = {
.release = ib_uverbs_close,
.llseek = no_llseek,
.unlocked_ioctl = ib_uverbs_ioctl,
- .compat_ioctl = ib_uverbs_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
};
static int ib_uverbs_get_nl_info(struct ib_device *ibdev, void *client_data,
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index 35c2841a569e..dd765e176cdd 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -244,6 +244,8 @@ EXPORT_SYMBOL(rdma_port_get_link_layer);
/**
* ib_alloc_pd - Allocates an unused protection domain.
* @device: The device on which to allocate the protection domain.
+ * @flags: protection domain flags
+ * @caller: caller's build-time module name
*
* A protection domain object provides an association between QPs, shared
* receive queues, address handles, memory regions, and memory windows.
@@ -2459,6 +2461,16 @@ int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid,
}
EXPORT_SYMBOL(ib_set_vf_guid);
+int ib_get_vf_guid(struct ib_device *device, int vf, u8 port,
+ struct ifla_vf_guid *node_guid,
+ struct ifla_vf_guid *port_guid)
+{
+ if (!device->ops.get_vf_guid)
+ return -EOPNOTSUPP;
+
+ return device->ops.get_vf_guid(device, vf, port, node_guid, port_guid);
+}
+EXPORT_SYMBOL(ib_get_vf_guid);
/**
* ib_map_mr_sg_pi() - Map the dma mapped SG lists for PI (protection
* information) and set an appropriate memory region for registration.
diff --git a/drivers/infiniband/hw/Makefile b/drivers/infiniband/hw/Makefile
index 433fca59febd..0aeccd984889 100644
--- a/drivers/infiniband/hw/Makefile
+++ b/drivers/infiniband/hw/Makefile
@@ -1,7 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_INFINIBAND_MTHCA) += mthca/
obj-$(CONFIG_INFINIBAND_QIB) += qib/
-obj-$(CONFIG_INFINIBAND_CXGB3) += cxgb3/
obj-$(CONFIG_INFINIBAND_CXGB4) += cxgb4/
obj-$(CONFIG_INFINIBAND_EFA) += efa/
obj-$(CONFIG_INFINIBAND_I40IW) += i40iw/
diff --git a/drivers/infiniband/hw/bnxt_re/Kconfig b/drivers/infiniband/hw/bnxt_re/Kconfig
index ab8779d23382..b83f1cc38c52 100644
--- a/drivers/infiniband/hw/bnxt_re/Kconfig
+++ b/drivers/infiniband/hw/bnxt_re/Kconfig
@@ -1,11 +1,11 @@
# SPDX-License-Identifier: GPL-2.0-only
config INFINIBAND_BNXT_RE
- tristate "Broadcom Netxtreme HCA support"
- depends on 64BIT
- depends on ETHERNET && NETDEVICES && PCI && INET && DCB
- select NET_VENDOR_BROADCOM
- select BNXT
- ---help---
+ tristate "Broadcom Netxtreme HCA support"
+ depends on 64BIT
+ depends on ETHERNET && NETDEVICES && PCI && INET && DCB
+ select NET_VENDOR_BROADCOM
+ select BNXT
+ ---help---
This driver supports Broadcom NetXtreme-E 10/25/40/50 gigabit
RoCE HCAs. To compile this driver as a module, choose M here:
the module will be called bnxt_re.
diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
index e55a1666c0cd..725b2350e349 100644
--- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h
+++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
@@ -108,6 +108,7 @@ struct bnxt_re_sqp_entries {
#define BNXT_RE_MAX_MSIX 9
#define BNXT_RE_AEQ_IDX 0
#define BNXT_RE_NQ_IDX 1
+#define BNXT_RE_GEN_P5_MAX_VF 64
struct bnxt_re_dev {
struct ib_device ibdev;
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index b4149dc9e824..9b6ca15a183c 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -191,24 +191,6 @@ int bnxt_re_query_device(struct ib_device *ibdev,
return 0;
}
-int bnxt_re_modify_device(struct ib_device *ibdev,
- int device_modify_mask,
- struct ib_device_modify *device_modify)
-{
- switch (device_modify_mask) {
- case IB_DEVICE_MODIFY_SYS_IMAGE_GUID:
- /* Modify the GUID requires the modification of the GID table */
- /* GUID should be made as READ-ONLY */
- break;
- case IB_DEVICE_MODIFY_NODE_DESC:
- /* Node Desc should be made as READ-ONLY */
- break;
- default:
- break;
- }
- return 0;
-}
-
/* Port */
int bnxt_re_query_port(struct ib_device *ibdev, u8 port_num,
struct ib_port_attr *port_attr)
@@ -855,7 +837,7 @@ static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
bytes += (qplib_qp->sq.max_wqe * psn_sz);
}
bytes = PAGE_ALIGN(bytes);
- umem = ib_umem_get(udata, ureq.qpsva, bytes, IB_ACCESS_LOCAL_WRITE, 1);
+ umem = ib_umem_get(udata, ureq.qpsva, bytes, IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(umem))
return PTR_ERR(umem);
@@ -869,7 +851,7 @@ static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
bytes = (qplib_qp->rq.max_wqe * BNXT_QPLIB_MAX_RQE_ENTRY_SIZE);
bytes = PAGE_ALIGN(bytes);
umem = ib_umem_get(udata, ureq.qprva, bytes,
- IB_ACCESS_LOCAL_WRITE, 1);
+ IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(umem))
goto rqfail;
qp->rumem = umem;
@@ -1322,7 +1304,7 @@ static int bnxt_re_init_user_srq(struct bnxt_re_dev *rdev,
bytes = (qplib_srq->max_wqe * BNXT_QPLIB_MAX_RQE_ENTRY_SIZE);
bytes = PAGE_ALIGN(bytes);
- umem = ib_umem_get(udata, ureq.srqva, bytes, IB_ACCESS_LOCAL_WRITE, 1);
+ umem = ib_umem_get(udata, ureq.srqva, bytes, IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(umem))
return PTR_ERR(umem);
@@ -2565,7 +2547,7 @@ int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
cq->umem = ib_umem_get(udata, req.cq_va,
entries * sizeof(struct cq_base),
- IB_ACCESS_LOCAL_WRITE, 1);
+ IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(cq->umem)) {
rc = PTR_ERR(cq->umem);
goto fail;
@@ -3530,7 +3512,7 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
/* The fixed portion of the rkey is the same as the lkey */
mr->ib_mr.rkey = mr->qplib_mr.rkey;
- umem = ib_umem_get(udata, start, length, mr_access_flags, 0);
+ umem = ib_umem_get(udata, start, length, mr_access_flags);
if (IS_ERR(umem)) {
dev_err(rdev_to_dev(rdev), "Failed to get umem");
rc = -EFAULT;
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.h b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
index 31662b1ee35a..23d972da5652 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.h
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
@@ -145,9 +145,6 @@ struct bnxt_re_ucontext {
int bnxt_re_query_device(struct ib_device *ibdev,
struct ib_device_attr *ib_attr,
struct ib_udata *udata);
-int bnxt_re_modify_device(struct ib_device *ibdev,
- int device_modify_mask,
- struct ib_device_modify *device_modify);
int bnxt_re_query_port(struct ib_device *ibdev, u8 port_num,
struct ib_port_attr *port_attr);
int bnxt_re_get_port_immutable(struct ib_device *ibdev, u8 port_num,
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index 30a54f8aa42c..e7e8a0f49464 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -119,61 +119,76 @@ static void bnxt_re_get_sriov_func_type(struct bnxt_re_dev *rdev)
* reserved for the function. The driver may choose to allocate fewer
* resources than the firmware maximum.
*/
-static void bnxt_re_set_resource_limits(struct bnxt_re_dev *rdev)
+static void bnxt_re_limit_pf_res(struct bnxt_re_dev *rdev)
{
- u32 vf_qps = 0, vf_srqs = 0, vf_cqs = 0, vf_mrws = 0, vf_gids = 0;
- u32 i;
- u32 vf_pct;
- u32 num_vfs;
- struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
+ struct bnxt_qplib_dev_attr *attr;
+ struct bnxt_qplib_ctx *ctx;
+ int i;
- rdev->qplib_ctx.qpc_count = min_t(u32, BNXT_RE_MAX_QPC_COUNT,
- dev_attr->max_qp);
+ attr = &rdev->dev_attr;
+ ctx = &rdev->qplib_ctx;
- rdev->qplib_ctx.mrw_count = BNXT_RE_MAX_MRW_COUNT_256K;
+ ctx->qpc_count = min_t(u32, BNXT_RE_MAX_QPC_COUNT,
+ attr->max_qp);
+ ctx->mrw_count = BNXT_RE_MAX_MRW_COUNT_256K;
/* Use max_mr from fw since max_mrw does not get set */
- rdev->qplib_ctx.mrw_count = min_t(u32, rdev->qplib_ctx.mrw_count,
- dev_attr->max_mr);
- rdev->qplib_ctx.srqc_count = min_t(u32, BNXT_RE_MAX_SRQC_COUNT,
- dev_attr->max_srq);
- rdev->qplib_ctx.cq_count = min_t(u32, BNXT_RE_MAX_CQ_COUNT,
- dev_attr->max_cq);
-
- for (i = 0; i < MAX_TQM_ALLOC_REQ; i++)
- rdev->qplib_ctx.tqm_count[i] =
- rdev->dev_attr.tqm_alloc_reqs[i];
-
- if (rdev->num_vfs) {
- /*
- * Reserve a set of resources for the PF. Divide the remaining
- * resources among the VFs
- */
- vf_pct = 100 - BNXT_RE_PCT_RSVD_FOR_PF;
- num_vfs = 100 * rdev->num_vfs;
- vf_qps = (rdev->qplib_ctx.qpc_count * vf_pct) / num_vfs;
- vf_srqs = (rdev->qplib_ctx.srqc_count * vf_pct) / num_vfs;
- vf_cqs = (rdev->qplib_ctx.cq_count * vf_pct) / num_vfs;
- /*
- * The driver allows many more MRs than other resources. If the
- * firmware does also, then reserve a fixed amount for the PF
- * and divide the rest among VFs. VFs may use many MRs for NFS
- * mounts, ISER, NVME applications, etc. If the firmware
- * severely restricts the number of MRs, then let PF have
- * half and divide the rest among VFs, as for the other
- * resource types.
- */
- if (rdev->qplib_ctx.mrw_count < BNXT_RE_MAX_MRW_COUNT_64K)
- vf_mrws = rdev->qplib_ctx.mrw_count * vf_pct / num_vfs;
- else
- vf_mrws = (rdev->qplib_ctx.mrw_count -
- BNXT_RE_RESVD_MR_FOR_PF) / rdev->num_vfs;
- vf_gids = BNXT_RE_MAX_GID_PER_VF;
+ ctx->mrw_count = min_t(u32, ctx->mrw_count, attr->max_mr);
+ ctx->srqc_count = min_t(u32, BNXT_RE_MAX_SRQC_COUNT,
+ attr->max_srq);
+ ctx->cq_count = min_t(u32, BNXT_RE_MAX_CQ_COUNT, attr->max_cq);
+ if (!bnxt_qplib_is_chip_gen_p5(&rdev->chip_ctx))
+ for (i = 0; i < MAX_TQM_ALLOC_REQ; i++)
+ rdev->qplib_ctx.tqm_count[i] =
+ rdev->dev_attr.tqm_alloc_reqs[i];
+}
+
+static void bnxt_re_limit_vf_res(struct bnxt_qplib_ctx *qplib_ctx, u32 num_vf)
+{
+ struct bnxt_qplib_vf_res *vf_res;
+ u32 mrws = 0;
+ u32 vf_pct;
+ u32 nvfs;
+
+ vf_res = &qplib_ctx->vf_res;
+ /*
+ * Reserve a set of resources for the PF. Divide the remaining
+ * resources among the VFs
+ */
+ vf_pct = 100 - BNXT_RE_PCT_RSVD_FOR_PF;
+ nvfs = num_vf;
+ num_vf = 100 * num_vf;
+ vf_res->max_qp_per_vf = (qplib_ctx->qpc_count * vf_pct) / num_vf;
+ vf_res->max_srq_per_vf = (qplib_ctx->srqc_count * vf_pct) / num_vf;
+ vf_res->max_cq_per_vf = (qplib_ctx->cq_count * vf_pct) / num_vf;
+ /*
+ * The driver allows many more MRs than other resources. If the
+ * firmware does also, then reserve a fixed amount for the PF and
+ * divide the rest among VFs. VFs may use many MRs for NFS
+ * mounts, ISER, NVME applications, etc. If the firmware severely
+ * restricts the number of MRs, then let PF have half and divide
+ * the rest among VFs, as for the other resource types.
+ */
+ if (qplib_ctx->mrw_count < BNXT_RE_MAX_MRW_COUNT_64K) {
+ mrws = qplib_ctx->mrw_count * vf_pct;
+ nvfs = num_vf;
+ } else {
+ mrws = qplib_ctx->mrw_count - BNXT_RE_RESVD_MR_FOR_PF;
}
- rdev->qplib_ctx.vf_res.max_mrw_per_vf = vf_mrws;
- rdev->qplib_ctx.vf_res.max_gid_per_vf = vf_gids;
- rdev->qplib_ctx.vf_res.max_qp_per_vf = vf_qps;
- rdev->qplib_ctx.vf_res.max_srq_per_vf = vf_srqs;
- rdev->qplib_ctx.vf_res.max_cq_per_vf = vf_cqs;
+ vf_res->max_mrw_per_vf = (mrws / nvfs);
+ vf_res->max_gid_per_vf = BNXT_RE_MAX_GID_PER_VF;
+}
+
+static void bnxt_re_set_resource_limits(struct bnxt_re_dev *rdev)
+{
+ u32 num_vfs;
+
+ memset(&rdev->qplib_ctx.vf_res, 0, sizeof(struct bnxt_qplib_vf_res));
+ bnxt_re_limit_pf_res(rdev);
+
+ num_vfs = bnxt_qplib_is_chip_gen_p5(&rdev->chip_ctx) ?
+ BNXT_RE_GEN_P5_MAX_VF : rdev->num_vfs;
+ if (num_vfs)
+ bnxt_re_limit_vf_res(&rdev->qplib_ctx, num_vfs);
}
/* for handling bnxt_en callbacks later */
@@ -193,9 +208,11 @@ static void bnxt_re_sriov_config(void *p, int num_vfs)
return;
rdev->num_vfs = num_vfs;
- bnxt_re_set_resource_limits(rdev);
- bnxt_qplib_set_func_resources(&rdev->qplib_res, &rdev->rcfw,
- &rdev->qplib_ctx);
+ if (!bnxt_qplib_is_chip_gen_p5(&rdev->chip_ctx)) {
+ bnxt_re_set_resource_limits(rdev);
+ bnxt_qplib_set_func_resources(&rdev->qplib_res, &rdev->rcfw,
+ &rdev->qplib_ctx);
+ }
}
static void bnxt_re_shutdown(void *p)
@@ -477,6 +494,7 @@ static int bnxt_re_net_stats_ctx_alloc(struct bnxt_re_dev *rdev,
bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_STAT_CTX_ALLOC, -1, -1);
req.update_period_ms = cpu_to_le32(1000);
req.stats_dma_addr = cpu_to_le64(dma_map);
+ req.stats_dma_length = cpu_to_le16(sizeof(struct ctx_hw_stats_ext));
req.stat_ctx_flags = STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_ROCE;
bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
@@ -625,7 +643,6 @@ static const struct ib_device_ops bnxt_re_dev_ops = {
.map_mr_sg = bnxt_re_map_mr_sg,
.mmap = bnxt_re_mmap,
.modify_ah = bnxt_re_modify_ah,
- .modify_device = bnxt_re_modify_device,
.modify_qp = bnxt_re_modify_qp,
.modify_srq = bnxt_re_modify_srq,
.poll_cq = bnxt_re_poll_cq,
@@ -895,10 +912,14 @@ static int bnxt_re_cqn_handler(struct bnxt_qplib_nq *nq,
return 0;
}
+#define BNXT_RE_GEN_P5_PF_NQ_DB 0x10000
+#define BNXT_RE_GEN_P5_VF_NQ_DB 0x4000
static u32 bnxt_re_get_nqdb_offset(struct bnxt_re_dev *rdev, u16 indx)
{
return bnxt_qplib_is_chip_gen_p5(&rdev->chip_ctx) ?
- 0x10000 : rdev->msix_entries[indx].db_offset;
+ (rdev->is_virtfn ? BNXT_RE_GEN_P5_VF_NQ_DB :
+ BNXT_RE_GEN_P5_PF_NQ_DB) :
+ rdev->msix_entries[indx].db_offset;
}
static void bnxt_re_cleanup_res(struct bnxt_re_dev *rdev)
@@ -1270,10 +1291,10 @@ static void bnxt_re_query_hwrm_intf_version(struct bnxt_re_dev *rdev)
return;
}
rdev->qplib_ctx.hwrm_intf_ver =
- (u64)resp.hwrm_intf_major << 48 |
- (u64)resp.hwrm_intf_minor << 32 |
- (u64)resp.hwrm_intf_build << 16 |
- resp.hwrm_intf_patch;
+ (u64)le16_to_cpu(resp.hwrm_intf_major) << 48 |
+ (u64)le16_to_cpu(resp.hwrm_intf_minor) << 32 |
+ (u64)le16_to_cpu(resp.hwrm_intf_build) << 16 |
+ le16_to_cpu(resp.hwrm_intf_patch);
}
static void bnxt_re_ib_unreg(struct bnxt_re_dev *rdev)
@@ -1408,8 +1429,8 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
rdev->is_virtfn);
if (rc)
goto disable_rcfw;
- if (!rdev->is_virtfn)
- bnxt_re_set_resource_limits(rdev);
+
+ bnxt_re_set_resource_limits(rdev);
rc = bnxt_qplib_alloc_ctx(rdev->en_dev->pdev, &rdev->qplib_ctx, 0,
bnxt_qplib_is_chip_gen_p5(&rdev->chip_ctx));
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
index 60c8f76aab33..5cdfa84faf85 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
@@ -494,8 +494,10 @@ int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw,
* shall setup this area for VF. Skipping the
* HW programming
*/
- if (is_virtfn || bnxt_qplib_is_chip_gen_p5(rcfw->res->cctx))
+ if (is_virtfn)
goto skip_ctx_setup;
+ if (bnxt_qplib_is_chip_gen_p5(rcfw->res->cctx))
+ goto config_vf_res;
level = ctx->qpc_tbl.level;
req.qpc_pg_size_qpc_lvl = (level << CMDQ_INITIALIZE_FW_QPC_LVL_SFT) |
@@ -540,6 +542,7 @@ int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw,
req.number_of_srq = cpu_to_le32(ctx->srqc_tbl.max_elements);
req.number_of_cq = cpu_to_le32(ctx->cq_tbl.max_elements);
+config_vf_res:
req.max_qp_per_vf = cpu_to_le32(ctx->vf_res.max_qp_per_vf);
req.max_mrw_per_vf = cpu_to_le32(ctx->vf_res.max_mrw_per_vf);
req.max_srq_per_vf = cpu_to_le32(ctx->vf_res.max_srq_per_vf);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.h b/drivers/infiniband/hw/bnxt_re/qplib_res.h
index fbda11a7ab1a..aaa76d792185 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.h
@@ -186,7 +186,9 @@ struct bnxt_qplib_chip_ctx {
u8 chip_metal;
};
-#define CHIP_NUM_57500 0x1750
+#define CHIP_NUM_57508 0x1750
+#define CHIP_NUM_57504 0x1751
+#define CHIP_NUM_57502 0x1752
struct bnxt_qplib_res {
struct pci_dev *pdev;
@@ -203,7 +205,9 @@ struct bnxt_qplib_res {
static inline bool bnxt_qplib_is_chip_gen_p5(struct bnxt_qplib_chip_ctx *cctx)
{
- return (cctx->chip_num == CHIP_NUM_57500);
+ return (cctx->chip_num == CHIP_NUM_57508 ||
+ cctx->chip_num == CHIP_NUM_57504 ||
+ cctx->chip_num == CHIP_NUM_57502);
}
static inline u8 bnxt_qplib_get_hwq_type(struct bnxt_qplib_res *res)
diff --git a/drivers/infiniband/hw/cxgb3/Kconfig b/drivers/infiniband/hw/cxgb3/Kconfig
deleted file mode 100644
index 8c1a72bff447..000000000000
--- a/drivers/infiniband/hw/cxgb3/Kconfig
+++ /dev/null
@@ -1,19 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-config INFINIBAND_CXGB3
- tristate "Chelsio RDMA Driver"
- depends on CHELSIO_T3
- select GENERIC_ALLOCATOR
- ---help---
- This is an iWARP/RDMA driver for the Chelsio T3 1GbE and
- 10GbE adapters.
-
- For general information about Chelsio and our products, visit
- our website at <http://www.chelsio.com>.
-
- For customer support, please visit our customer support page at
- <http://www.chelsio.com/support.html>.
-
- Please send feedback to <linux-bugs@chelsio.com>.
-
- To compile this driver as a module, choose M here: the module
- will be called iw_cxgb3.
diff --git a/drivers/infiniband/hw/cxgb3/Makefile b/drivers/infiniband/hw/cxgb3/Makefile
deleted file mode 100644
index 34bb86a6ae3a..000000000000
--- a/drivers/infiniband/hw/cxgb3/Makefile
+++ /dev/null
@@ -1,7 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-ccflags-y := -I $(srctree)/drivers/net/ethernet/chelsio/cxgb3
-
-obj-$(CONFIG_INFINIBAND_CXGB3) += iw_cxgb3.o
-
-iw_cxgb3-y := iwch_cm.o iwch_ev.o iwch_cq.o iwch_qp.o iwch_mem.o \
- iwch_provider.o iwch.o cxio_hal.o cxio_resource.o
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.c b/drivers/infiniband/hw/cxgb3/cxio_hal.c
deleted file mode 100644
index 95b22a651673..000000000000
--- a/drivers/infiniband/hw/cxgb3/cxio_hal.c
+++ /dev/null
@@ -1,1312 +0,0 @@
-/*
- * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include <asm/delay.h>
-
-#include <linux/mutex.h>
-#include <linux/netdevice.h>
-#include <linux/sched.h>
-#include <linux/spinlock.h>
-#include <linux/pci.h>
-#include <linux/dma-mapping.h>
-#include <linux/slab.h>
-#include <net/net_namespace.h>
-
-#include "cxio_resource.h"
-#include "cxio_hal.h"
-#include "cxgb3_offload.h"
-#include "sge_defs.h"
-
-static LIST_HEAD(rdev_list);
-static cxio_hal_ev_callback_func_t cxio_ev_cb = NULL;
-
-static struct cxio_rdev *cxio_hal_find_rdev_by_name(char *dev_name)
-{
- struct cxio_rdev *rdev;
-
- list_for_each_entry(rdev, &rdev_list, entry)
- if (!strcmp(rdev->dev_name, dev_name))
- return rdev;
- return NULL;
-}
-
-static struct cxio_rdev *cxio_hal_find_rdev_by_t3cdev(struct t3cdev *tdev)
-{
- struct cxio_rdev *rdev;
-
- list_for_each_entry(rdev, &rdev_list, entry)
- if (rdev->t3cdev_p == tdev)
- return rdev;
- return NULL;
-}
-
-int cxio_hal_cq_op(struct cxio_rdev *rdev_p, struct t3_cq *cq,
- enum t3_cq_opcode op, u32 credit)
-{
- int ret;
- struct t3_cqe *cqe;
- u32 rptr;
-
- struct rdma_cq_op setup;
- setup.id = cq->cqid;
- setup.credits = (op == CQ_CREDIT_UPDATE) ? credit : 0;
- setup.op = op;
- ret = rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_CQ_OP, &setup);
-
- if ((ret < 0) || (op == CQ_CREDIT_UPDATE))
- return ret;
-
- /*
- * If the rearm returned an index other than our current index,
- * then there might be CQE's in flight (being DMA'd). We must wait
- * here for them to complete or the consumer can miss a notification.
- */
- if (Q_PTR2IDX((cq->rptr), cq->size_log2) != ret) {
- int i=0;
-
- rptr = cq->rptr;
-
- /*
- * Keep the generation correct by bumping rptr until it
- * matches the index returned by the rearm - 1.
- */
- while (Q_PTR2IDX((rptr+1), cq->size_log2) != ret)
- rptr++;
-
- /*
- * Now rptr is the index for the (last) cqe that was
- * in-flight at the time the HW rearmed the CQ. We
- * spin until that CQE is valid.
- */
- cqe = cq->queue + Q_PTR2IDX(rptr, cq->size_log2);
- while (!CQ_VLD_ENTRY(rptr, cq->size_log2, cqe)) {
- udelay(1);
- if (i++ > 1000000) {
- pr_err("%s: stalled rnic\n", rdev_p->dev_name);
- return -EIO;
- }
- }
-
- return 1;
- }
-
- return 0;
-}
-
-static int cxio_hal_clear_cq_ctx(struct cxio_rdev *rdev_p, u32 cqid)
-{
- struct rdma_cq_setup setup;
- setup.id = cqid;
- setup.base_addr = 0; /* NULL address */
- setup.size = 0; /* disaable the CQ */
- setup.credits = 0;
- setup.credit_thres = 0;
- setup.ovfl_mode = 0;
- return (rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_CQ_SETUP, &setup));
-}
-
-static int cxio_hal_clear_qp_ctx(struct cxio_rdev *rdev_p, u32 qpid)
-{
- u64 sge_cmd;
- struct t3_modify_qp_wr *wqe;
- struct sk_buff *skb = alloc_skb(sizeof(*wqe), GFP_KERNEL);
- if (!skb) {
- pr_debug("%s alloc_skb failed\n", __func__);
- return -ENOMEM;
- }
- wqe = skb_put_zero(skb, sizeof(*wqe));
- build_fw_riwrh((struct fw_riwrh *) wqe, T3_WR_QP_MOD,
- T3_COMPLETION_FLAG | T3_NOTIFY_FLAG, 0, qpid, 7,
- T3_SOPEOP);
- wqe->flags = cpu_to_be32(MODQP_WRITE_EC);
- sge_cmd = qpid << 8 | 3;
- wqe->sge_cmd = cpu_to_be64(sge_cmd);
- skb->priority = CPL_PRIORITY_CONTROL;
- return iwch_cxgb3_ofld_send(rdev_p->t3cdev_p, skb);
-}
-
-int cxio_create_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq, int kernel)
-{
- struct rdma_cq_setup setup;
- int size = (1UL << (cq->size_log2)) * sizeof(struct t3_cqe);
-
- size += 1; /* one extra page for storing cq-in-err state */
- cq->cqid = cxio_hal_get_cqid(rdev_p->rscp);
- if (!cq->cqid)
- return -ENOMEM;
- if (kernel) {
- cq->sw_queue = kzalloc(size, GFP_KERNEL);
- if (!cq->sw_queue)
- return -ENOMEM;
- }
- cq->queue = dma_alloc_coherent(&(rdev_p->rnic_info.pdev->dev), size,
- &(cq->dma_addr), GFP_KERNEL);
- if (!cq->queue) {
- kfree(cq->sw_queue);
- return -ENOMEM;
- }
- dma_unmap_addr_set(cq, mapping, cq->dma_addr);
- setup.id = cq->cqid;
- setup.base_addr = (u64) (cq->dma_addr);
- setup.size = 1UL << cq->size_log2;
- setup.credits = 65535;
- setup.credit_thres = 1;
- if (rdev_p->t3cdev_p->type != T3A)
- setup.ovfl_mode = 0;
- else
- setup.ovfl_mode = 1;
- return (rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_CQ_SETUP, &setup));
-}
-
-static u32 get_qpid(struct cxio_rdev *rdev_p, struct cxio_ucontext *uctx)
-{
- struct cxio_qpid_list *entry;
- u32 qpid;
- int i;
-
- mutex_lock(&uctx->lock);
- if (!list_empty(&uctx->qpids)) {
- entry = list_entry(uctx->qpids.next, struct cxio_qpid_list,
- entry);
- list_del(&entry->entry);
- qpid = entry->qpid;
- kfree(entry);
- } else {
- qpid = cxio_hal_get_qpid(rdev_p->rscp);
- if (!qpid)
- goto out;
- for (i = qpid+1; i & rdev_p->qpmask; i++) {
- entry = kmalloc(sizeof(*entry), GFP_KERNEL);
- if (!entry)
- break;
- entry->qpid = i;
- list_add_tail(&entry->entry, &uctx->qpids);
- }
- }
-out:
- mutex_unlock(&uctx->lock);
- pr_debug("%s qpid 0x%x\n", __func__, qpid);
- return qpid;
-}
-
-static void put_qpid(struct cxio_rdev *rdev_p, u32 qpid,
- struct cxio_ucontext *uctx)
-{
- struct cxio_qpid_list *entry;
-
- entry = kmalloc(sizeof(*entry), GFP_KERNEL);
- if (!entry)
- return;
- pr_debug("%s qpid 0x%x\n", __func__, qpid);
- entry->qpid = qpid;
- mutex_lock(&uctx->lock);
- list_add_tail(&entry->entry, &uctx->qpids);
- mutex_unlock(&uctx->lock);
-}
-
-void cxio_release_ucontext(struct cxio_rdev *rdev_p, struct cxio_ucontext *uctx)
-{
- struct list_head *pos, *nxt;
- struct cxio_qpid_list *entry;
-
- mutex_lock(&uctx->lock);
- list_for_each_safe(pos, nxt, &uctx->qpids) {
- entry = list_entry(pos, struct cxio_qpid_list, entry);
- list_del_init(&entry->entry);
- if (!(entry->qpid & rdev_p->qpmask))
- cxio_hal_put_qpid(rdev_p->rscp, entry->qpid);
- kfree(entry);
- }
- mutex_unlock(&uctx->lock);
-}
-
-void cxio_init_ucontext(struct cxio_rdev *rdev_p, struct cxio_ucontext *uctx)
-{
- INIT_LIST_HEAD(&uctx->qpids);
- mutex_init(&uctx->lock);
-}
-
-int cxio_create_qp(struct cxio_rdev *rdev_p, u32 kernel_domain,
- struct t3_wq *wq, struct cxio_ucontext *uctx)
-{
- int depth = 1UL << wq->size_log2;
- int rqsize = 1UL << wq->rq_size_log2;
-
- wq->qpid = get_qpid(rdev_p, uctx);
- if (!wq->qpid)
- return -ENOMEM;
-
- wq->rq = kcalloc(depth, sizeof(struct t3_swrq), GFP_KERNEL);
- if (!wq->rq)
- goto err1;
-
- wq->rq_addr = cxio_hal_rqtpool_alloc(rdev_p, rqsize);
- if (!wq->rq_addr)
- goto err2;
-
- wq->sq = kcalloc(depth, sizeof(struct t3_swsq), GFP_KERNEL);
- if (!wq->sq)
- goto err3;
-
- wq->queue = dma_alloc_coherent(&(rdev_p->rnic_info.pdev->dev),
- depth * sizeof(union t3_wr),
- &(wq->dma_addr), GFP_KERNEL);
- if (!wq->queue)
- goto err4;
-
- dma_unmap_addr_set(wq, mapping, wq->dma_addr);
- wq->doorbell = (void __iomem *)rdev_p->rnic_info.kdb_addr;
- if (!kernel_domain)
- wq->udb = (u64)rdev_p->rnic_info.udbell_physbase +
- (wq->qpid << rdev_p->qpshift);
- wq->rdev = rdev_p;
- pr_debug("%s qpid 0x%x doorbell 0x%p udb 0x%llx\n",
- __func__, wq->qpid, wq->doorbell, (unsigned long long)wq->udb);
- return 0;
-err4:
- kfree(wq->sq);
-err3:
- cxio_hal_rqtpool_free(rdev_p, wq->rq_addr, rqsize);
-err2:
- kfree(wq->rq);
-err1:
- put_qpid(rdev_p, wq->qpid, uctx);
- return -ENOMEM;
-}
-
-void cxio_destroy_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq)
-{
- cxio_hal_clear_cq_ctx(rdev_p, cq->cqid);
- kfree(cq->sw_queue);
- dma_free_coherent(&(rdev_p->rnic_info.pdev->dev),
- (1UL << (cq->size_log2))
- * sizeof(struct t3_cqe) + 1, cq->queue,
- dma_unmap_addr(cq, mapping));
- cxio_hal_put_cqid(rdev_p->rscp, cq->cqid);
-}
-
-int cxio_destroy_qp(struct cxio_rdev *rdev_p, struct t3_wq *wq,
- struct cxio_ucontext *uctx)
-{
- dma_free_coherent(&(rdev_p->rnic_info.pdev->dev),
- (1UL << (wq->size_log2))
- * sizeof(union t3_wr), wq->queue,
- dma_unmap_addr(wq, mapping));
- kfree(wq->sq);
- cxio_hal_rqtpool_free(rdev_p, wq->rq_addr, (1UL << wq->rq_size_log2));
- kfree(wq->rq);
- put_qpid(rdev_p, wq->qpid, uctx);
- return 0;
-}
-
-static void insert_recv_cqe(struct t3_wq *wq, struct t3_cq *cq)
-{
- struct t3_cqe cqe;
-
- pr_debug("%s wq %p cq %p sw_rptr 0x%x sw_wptr 0x%x\n", __func__,
- wq, cq, cq->sw_rptr, cq->sw_wptr);
- memset(&cqe, 0, sizeof(cqe));
- cqe.header = cpu_to_be32(V_CQE_STATUS(TPT_ERR_SWFLUSH) |
- V_CQE_OPCODE(T3_SEND) |
- V_CQE_TYPE(0) |
- V_CQE_SWCQE(1) |
- V_CQE_QPID(wq->qpid) |
- V_CQE_GENBIT(Q_GENBIT(cq->sw_wptr,
- cq->size_log2)));
- *(cq->sw_queue + Q_PTR2IDX(cq->sw_wptr, cq->size_log2)) = cqe;
- cq->sw_wptr++;
-}
-
-int cxio_flush_rq(struct t3_wq *wq, struct t3_cq *cq, int count)
-{
- u32 ptr;
- int flushed = 0;
-
- pr_debug("%s wq %p cq %p\n", __func__, wq, cq);
-
- /* flush RQ */
- pr_debug("%s rq_rptr %u rq_wptr %u skip count %u\n", __func__,
- wq->rq_rptr, wq->rq_wptr, count);
- ptr = wq->rq_rptr + count;
- while (ptr++ != wq->rq_wptr) {
- insert_recv_cqe(wq, cq);
- flushed++;
- }
- return flushed;
-}
-
-static void insert_sq_cqe(struct t3_wq *wq, struct t3_cq *cq,
- struct t3_swsq *sqp)
-{
- struct t3_cqe cqe;
-
- pr_debug("%s wq %p cq %p sw_rptr 0x%x sw_wptr 0x%x\n", __func__,
- wq, cq, cq->sw_rptr, cq->sw_wptr);
- memset(&cqe, 0, sizeof(cqe));
- cqe.header = cpu_to_be32(V_CQE_STATUS(TPT_ERR_SWFLUSH) |
- V_CQE_OPCODE(sqp->opcode) |
- V_CQE_TYPE(1) |
- V_CQE_SWCQE(1) |
- V_CQE_QPID(wq->qpid) |
- V_CQE_GENBIT(Q_GENBIT(cq->sw_wptr,
- cq->size_log2)));
- cqe.u.scqe.wrid_hi = sqp->sq_wptr;
-
- *(cq->sw_queue + Q_PTR2IDX(cq->sw_wptr, cq->size_log2)) = cqe;
- cq->sw_wptr++;
-}
-
-int cxio_flush_sq(struct t3_wq *wq, struct t3_cq *cq, int count)
-{
- __u32 ptr = wq->sq_rptr + count;
- int flushed = 0;
- struct t3_swsq *sqp = wq->sq + Q_PTR2IDX(ptr, wq->sq_size_log2);
-
- while (ptr != wq->sq_wptr) {
- sqp->signaled = 0;
- insert_sq_cqe(wq, cq, sqp);
- ptr++;
- sqp = wq->sq + Q_PTR2IDX(ptr, wq->sq_size_log2);
- flushed++;
- }
- return flushed;
-}
-
-/*
- * Move all CQEs from the HWCQ into the SWCQ.
- */
-void cxio_flush_hw_cq(struct t3_cq *cq)
-{
- struct t3_cqe *cqe, *swcqe;
-
- pr_debug("%s cq %p cqid 0x%x\n", __func__, cq, cq->cqid);
- cqe = cxio_next_hw_cqe(cq);
- while (cqe) {
- pr_debug("%s flushing hwcq rptr 0x%x to swcq wptr 0x%x\n",
- __func__, cq->rptr, cq->sw_wptr);
- swcqe = cq->sw_queue + Q_PTR2IDX(cq->sw_wptr, cq->size_log2);
- *swcqe = *cqe;
- swcqe->header |= cpu_to_be32(V_CQE_SWCQE(1));
- cq->sw_wptr++;
- cq->rptr++;
- cqe = cxio_next_hw_cqe(cq);
- }
-}
-
-static int cqe_completes_wr(struct t3_cqe *cqe, struct t3_wq *wq)
-{
- if (CQE_OPCODE(*cqe) == T3_TERMINATE)
- return 0;
-
- if ((CQE_OPCODE(*cqe) == T3_RDMA_WRITE) && RQ_TYPE(*cqe))
- return 0;
-
- if ((CQE_OPCODE(*cqe) == T3_READ_RESP) && SQ_TYPE(*cqe))
- return 0;
-
- if (CQE_SEND_OPCODE(*cqe) && RQ_TYPE(*cqe) &&
- Q_EMPTY(wq->rq_rptr, wq->rq_wptr))
- return 0;
-
- return 1;
-}
-
-void cxio_count_scqes(struct t3_cq *cq, struct t3_wq *wq, int *count)
-{
- struct t3_cqe *cqe;
- u32 ptr;
-
- *count = 0;
- ptr = cq->sw_rptr;
- while (!Q_EMPTY(ptr, cq->sw_wptr)) {
- cqe = cq->sw_queue + (Q_PTR2IDX(ptr, cq->size_log2));
- if ((SQ_TYPE(*cqe) ||
- ((CQE_OPCODE(*cqe) == T3_READ_RESP) && wq->oldest_read)) &&
- (CQE_QPID(*cqe) == wq->qpid))
- (*count)++;
- ptr++;
- }
- pr_debug("%s cq %p count %d\n", __func__, cq, *count);
-}
-
-void cxio_count_rcqes(struct t3_cq *cq, struct t3_wq *wq, int *count)
-{
- struct t3_cqe *cqe;
- u32 ptr;
-
- *count = 0;
- pr_debug("%s count zero %d\n", __func__, *count);
- ptr = cq->sw_rptr;
- while (!Q_EMPTY(ptr, cq->sw_wptr)) {
- cqe = cq->sw_queue + (Q_PTR2IDX(ptr, cq->size_log2));
- if (RQ_TYPE(*cqe) && (CQE_OPCODE(*cqe) != T3_READ_RESP) &&
- (CQE_QPID(*cqe) == wq->qpid) && cqe_completes_wr(cqe, wq))
- (*count)++;
- ptr++;
- }
- pr_debug("%s cq %p count %d\n", __func__, cq, *count);
-}
-
-static int cxio_hal_init_ctrl_cq(struct cxio_rdev *rdev_p)
-{
- struct rdma_cq_setup setup;
- setup.id = 0;
- setup.base_addr = 0; /* NULL address */
- setup.size = 1; /* enable the CQ */
- setup.credits = 0;
-
- /* force SGE to redirect to RspQ and interrupt */
- setup.credit_thres = 0;
- setup.ovfl_mode = 1;
- return (rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_CQ_SETUP, &setup));
-}
-
-static int cxio_hal_init_ctrl_qp(struct cxio_rdev *rdev_p)
-{
- int err;
- u64 sge_cmd, ctx0, ctx1;
- u64 base_addr;
- struct t3_modify_qp_wr *wqe;
- struct sk_buff *skb;
-
- skb = alloc_skb(sizeof(*wqe), GFP_KERNEL);
- if (!skb) {
- pr_debug("%s alloc_skb failed\n", __func__);
- return -ENOMEM;
- }
- err = cxio_hal_init_ctrl_cq(rdev_p);
- if (err) {
- pr_debug("%s err %d initializing ctrl_cq\n", __func__, err);
- goto err;
- }
- rdev_p->ctrl_qp.workq = dma_alloc_coherent(
- &(rdev_p->rnic_info.pdev->dev),
- (1 << T3_CTRL_QP_SIZE_LOG2) *
- sizeof(union t3_wr),
- &(rdev_p->ctrl_qp.dma_addr),
- GFP_KERNEL);
- if (!rdev_p->ctrl_qp.workq) {
- pr_debug("%s dma_alloc_coherent failed\n", __func__);
- err = -ENOMEM;
- goto err;
- }
- dma_unmap_addr_set(&rdev_p->ctrl_qp, mapping,
- rdev_p->ctrl_qp.dma_addr);
- rdev_p->ctrl_qp.doorbell = (void __iomem *)rdev_p->rnic_info.kdb_addr;
-
- mutex_init(&rdev_p->ctrl_qp.lock);
- init_waitqueue_head(&rdev_p->ctrl_qp.waitq);
-
- /* update HW Ctrl QP context */
- base_addr = rdev_p->ctrl_qp.dma_addr;
- base_addr >>= 12;
- ctx0 = (V_EC_SIZE((1 << T3_CTRL_QP_SIZE_LOG2)) |
- V_EC_BASE_LO((u32) base_addr & 0xffff));
- ctx0 <<= 32;
- ctx0 |= V_EC_CREDITS(FW_WR_NUM);
- base_addr >>= 16;
- ctx1 = (u32) base_addr;
- base_addr >>= 32;
- ctx1 |= ((u64) (V_EC_BASE_HI((u32) base_addr & 0xf) | V_EC_RESPQ(0) |
- V_EC_TYPE(0) | V_EC_GEN(1) |
- V_EC_UP_TOKEN(T3_CTL_QP_TID) | F_EC_VALID)) << 32;
- wqe = skb_put_zero(skb, sizeof(*wqe));
- build_fw_riwrh((struct fw_riwrh *) wqe, T3_WR_QP_MOD, 0, 0,
- T3_CTL_QP_TID, 7, T3_SOPEOP);
- wqe->flags = cpu_to_be32(MODQP_WRITE_EC);
- sge_cmd = (3ULL << 56) | FW_RI_SGEEC_START << 8 | 3;
- wqe->sge_cmd = cpu_to_be64(sge_cmd);
- wqe->ctx1 = cpu_to_be64(ctx1);
- wqe->ctx0 = cpu_to_be64(ctx0);
- pr_debug("CtrlQP dma_addr %pad workq %p size %d\n",
- &rdev_p->ctrl_qp.dma_addr, rdev_p->ctrl_qp.workq,
- 1 << T3_CTRL_QP_SIZE_LOG2);
- skb->priority = CPL_PRIORITY_CONTROL;
- return iwch_cxgb3_ofld_send(rdev_p->t3cdev_p, skb);
-err:
- kfree_skb(skb);
- return err;
-}
-
-static int cxio_hal_destroy_ctrl_qp(struct cxio_rdev *rdev_p)
-{
- dma_free_coherent(&(rdev_p->rnic_info.pdev->dev),
- (1UL << T3_CTRL_QP_SIZE_LOG2)
- * sizeof(union t3_wr), rdev_p->ctrl_qp.workq,
- dma_unmap_addr(&rdev_p->ctrl_qp, mapping));
- return cxio_hal_clear_qp_ctx(rdev_p, T3_CTRL_QP_ID);
-}
-
-/* write len bytes of data into addr (32B aligned address)
- * If data is NULL, clear len byte of memory to zero.
- * caller acquires the ctrl_qp lock before the call
- */
-static int cxio_hal_ctrl_qp_write_mem(struct cxio_rdev *rdev_p, u32 addr,
- u32 len, void *data)
-{
- u32 i, nr_wqe, copy_len;
- u8 *copy_data;
- u8 wr_len, utx_len; /* length in 8 byte flit */
- enum t3_wr_flags flag;
- __be64 *wqe;
- u64 utx_cmd;
- addr &= 0x7FFFFFF;
- nr_wqe = len % 96 ? len / 96 + 1 : len / 96; /* 96B max per WQE */
- pr_debug("%s wptr 0x%x rptr 0x%x len %d, nr_wqe %d data %p addr 0x%0x\n",
- __func__, rdev_p->ctrl_qp.wptr, rdev_p->ctrl_qp.rptr, len,
- nr_wqe, data, addr);
- utx_len = 3; /* in 32B unit */
- for (i = 0; i < nr_wqe; i++) {
- if (Q_FULL(rdev_p->ctrl_qp.rptr, rdev_p->ctrl_qp.wptr,
- T3_CTRL_QP_SIZE_LOG2)) {
- pr_debug("%s ctrl_qp full wtpr 0x%0x rptr 0x%0x, wait for more space i %d\n",
- __func__,
- rdev_p->ctrl_qp.wptr, rdev_p->ctrl_qp.rptr, i);
- if (wait_event_interruptible(rdev_p->ctrl_qp.waitq,
- !Q_FULL(rdev_p->ctrl_qp.rptr,
- rdev_p->ctrl_qp.wptr,
- T3_CTRL_QP_SIZE_LOG2))) {
- pr_debug("%s ctrl_qp workq interrupted\n",
- __func__);
- return -ERESTARTSYS;
- }
- pr_debug("%s ctrl_qp wakeup, continue posting work request i %d\n",
- __func__, i);
- }
- wqe = (__be64 *)(rdev_p->ctrl_qp.workq + (rdev_p->ctrl_qp.wptr %
- (1 << T3_CTRL_QP_SIZE_LOG2)));
- flag = 0;
- if (i == (nr_wqe - 1)) {
- /* last WQE */
- flag = T3_COMPLETION_FLAG;
- if (len % 32)
- utx_len = len / 32 + 1;
- else
- utx_len = len / 32;
- }
-
- /*
- * Force a CQE to return the credit to the workq in case
- * we posted more than half the max QP size of WRs
- */
- if ((i != 0) &&
- (i % (((1 << T3_CTRL_QP_SIZE_LOG2)) >> 1) == 0)) {
- flag = T3_COMPLETION_FLAG;
- pr_debug("%s force completion at i %d\n", __func__, i);
- }
-
- /* build the utx mem command */
- wqe += (sizeof(struct t3_bypass_wr) >> 3);
- utx_cmd = (T3_UTX_MEM_WRITE << 28) | (addr + i * 3);
- utx_cmd <<= 32;
- utx_cmd |= (utx_len << 28) | ((utx_len << 2) + 1);
- *wqe = cpu_to_be64(utx_cmd);
- wqe++;
- copy_data = (u8 *) data + i * 96;
- copy_len = len > 96 ? 96 : len;
-
- /* clear memory content if data is NULL */
- if (data)
- memcpy(wqe, copy_data, copy_len);
- else
- memset(wqe, 0, copy_len);
- if (copy_len % 32)
- memset(((u8 *) wqe) + copy_len, 0,
- 32 - (copy_len % 32));
- wr_len = ((sizeof(struct t3_bypass_wr)) >> 3) + 1 +
- (utx_len << 2);
- wqe = (__be64 *)(rdev_p->ctrl_qp.workq + (rdev_p->ctrl_qp.wptr %
- (1 << T3_CTRL_QP_SIZE_LOG2)));
-
- /* wptr in the WRID[31:0] */
- ((union t3_wrid *)(wqe+1))->id0.low = rdev_p->ctrl_qp.wptr;
-
- /*
- * This must be the last write with a memory barrier
- * for the genbit
- */
- build_fw_riwrh((struct fw_riwrh *) wqe, T3_WR_BP, flag,
- Q_GENBIT(rdev_p->ctrl_qp.wptr,
- T3_CTRL_QP_SIZE_LOG2), T3_CTRL_QP_ID,
- wr_len, T3_SOPEOP);
- if (flag == T3_COMPLETION_FLAG)
- ring_doorbell(rdev_p->ctrl_qp.doorbell, T3_CTRL_QP_ID);
- len -= 96;
- rdev_p->ctrl_qp.wptr++;
- }
- return 0;
-}
-
-/* IN: stag key, pdid, perm, zbva, to, len, page_size, pbl_size and pbl_addr
- * OUT: stag index
- * TBD: shared memory region support
- */
-static int __cxio_tpt_op(struct cxio_rdev *rdev_p, u32 reset_tpt_entry,
- u32 *stag, u8 stag_state, u32 pdid,
- enum tpt_mem_type type, enum tpt_mem_perm perm,
- u32 zbva, u64 to, u32 len, u8 page_size,
- u32 pbl_size, u32 pbl_addr)
-{
- int err;
- struct tpt_entry tpt;
- u32 stag_idx;
- u32 wptr;
-
- if (cxio_fatal_error(rdev_p))
- return -EIO;
-
- stag_state = stag_state > 0;
- stag_idx = (*stag) >> 8;
-
- if ((!reset_tpt_entry) && !(*stag != T3_STAG_UNSET)) {
- stag_idx = cxio_hal_get_stag(rdev_p->rscp);
- if (!stag_idx)
- return -ENOMEM;
- *stag = (stag_idx << 8) | ((*stag) & 0xFF);
- }
- pr_debug("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
- __func__, stag_state, type, pdid, stag_idx);
-
- mutex_lock(&rdev_p->ctrl_qp.lock);
-
- /* write TPT entry */
- if (reset_tpt_entry)
- memset(&tpt, 0, sizeof(tpt));
- else {
- tpt.valid_stag_pdid = cpu_to_be32(F_TPT_VALID |
- V_TPT_STAG_KEY((*stag) & M_TPT_STAG_KEY) |
- V_TPT_STAG_STATE(stag_state) |
- V_TPT_STAG_TYPE(type) | V_TPT_PDID(pdid));
- BUG_ON(page_size >= 28);
- tpt.flags_pagesize_qpid = cpu_to_be32(V_TPT_PERM(perm) |
- ((perm & TPT_MW_BIND) ? F_TPT_MW_BIND_ENABLE : 0) |
- V_TPT_ADDR_TYPE((zbva ? TPT_ZBTO : TPT_VATO)) |
- V_TPT_PAGE_SIZE(page_size));
- tpt.rsvd_pbl_addr = cpu_to_be32(V_TPT_PBL_ADDR(PBL_OFF(rdev_p, pbl_addr)>>3));
- tpt.len = cpu_to_be32(len);
- tpt.va_hi = cpu_to_be32((u32) (to >> 32));
- tpt.va_low_or_fbo = cpu_to_be32((u32) (to & 0xFFFFFFFFULL));
- tpt.rsvd_bind_cnt_or_pstag = 0;
- tpt.rsvd_pbl_size = cpu_to_be32(V_TPT_PBL_SIZE(pbl_size >> 2));
- }
- err = cxio_hal_ctrl_qp_write_mem(rdev_p,
- stag_idx +
- (rdev_p->rnic_info.tpt_base >> 5),
- sizeof(tpt), &tpt);
-
- /* release the stag index to free pool */
- if (reset_tpt_entry)
- cxio_hal_put_stag(rdev_p->rscp, stag_idx);
-
- wptr = rdev_p->ctrl_qp.wptr;
- mutex_unlock(&rdev_p->ctrl_qp.lock);
- if (!err)
- if (wait_event_interruptible(rdev_p->ctrl_qp.waitq,
- SEQ32_GE(rdev_p->ctrl_qp.rptr,
- wptr)))
- return -ERESTARTSYS;
- return err;
-}
-
-int cxio_write_pbl(struct cxio_rdev *rdev_p, __be64 *pbl,
- u32 pbl_addr, u32 pbl_size)
-{
- u32 wptr;
- int err;
-
- pr_debug("%s *pdb_addr 0x%x, pbl_base 0x%x, pbl_size %d\n",
- __func__, pbl_addr, rdev_p->rnic_info.pbl_base,
- pbl_size);
-
- mutex_lock(&rdev_p->ctrl_qp.lock);
- err = cxio_hal_ctrl_qp_write_mem(rdev_p, pbl_addr >> 5, pbl_size << 3,
- pbl);
- wptr = rdev_p->ctrl_qp.wptr;
- mutex_unlock(&rdev_p->ctrl_qp.lock);
- if (err)
- return err;
-
- if (wait_event_interruptible(rdev_p->ctrl_qp.waitq,
- SEQ32_GE(rdev_p->ctrl_qp.rptr,
- wptr)))
- return -ERESTARTSYS;
-
- return 0;
-}
-
-int cxio_register_phys_mem(struct cxio_rdev *rdev_p, u32 *stag, u32 pdid,
- enum tpt_mem_perm perm, u32 zbva, u64 to, u32 len,
- u8 page_size, u32 pbl_size, u32 pbl_addr)
-{
- *stag = T3_STAG_UNSET;
- return __cxio_tpt_op(rdev_p, 0, stag, 1, pdid, TPT_NON_SHARED_MR, perm,
- zbva, to, len, page_size, pbl_size, pbl_addr);
-}
-
-int cxio_reregister_phys_mem(struct cxio_rdev *rdev_p, u32 *stag, u32 pdid,
- enum tpt_mem_perm perm, u32 zbva, u64 to, u32 len,
- u8 page_size, u32 pbl_size, u32 pbl_addr)
-{
- return __cxio_tpt_op(rdev_p, 0, stag, 1, pdid, TPT_NON_SHARED_MR, perm,
- zbva, to, len, page_size, pbl_size, pbl_addr);
-}
-
-int cxio_dereg_mem(struct cxio_rdev *rdev_p, u32 stag, u32 pbl_size,
- u32 pbl_addr)
-{
- return __cxio_tpt_op(rdev_p, 1, &stag, 0, 0, 0, 0, 0, 0ULL, 0, 0,
- pbl_size, pbl_addr);
-}
-
-int cxio_allocate_window(struct cxio_rdev *rdev_p, u32 * stag, u32 pdid)
-{
- *stag = T3_STAG_UNSET;
- return __cxio_tpt_op(rdev_p, 0, stag, 0, pdid, TPT_MW, 0, 0, 0ULL, 0, 0,
- 0, 0);
-}
-
-int cxio_deallocate_window(struct cxio_rdev *rdev_p, u32 stag)
-{
- return __cxio_tpt_op(rdev_p, 1, &stag, 0, 0, 0, 0, 0, 0ULL, 0, 0,
- 0, 0);
-}
-
-int cxio_allocate_stag(struct cxio_rdev *rdev_p, u32 *stag, u32 pdid, u32 pbl_size, u32 pbl_addr)
-{
- *stag = T3_STAG_UNSET;
- return __cxio_tpt_op(rdev_p, 0, stag, 0, pdid, TPT_NON_SHARED_MR,
- 0, 0, 0ULL, 0, 0, pbl_size, pbl_addr);
-}
-
-int cxio_rdma_init(struct cxio_rdev *rdev_p, struct t3_rdma_init_attr *attr)
-{
- struct t3_rdma_init_wr *wqe;
- struct sk_buff *skb = alloc_skb(sizeof(*wqe), GFP_ATOMIC);
- if (!skb)
- return -ENOMEM;
- pr_debug("%s rdev_p %p\n", __func__, rdev_p);
- wqe = __skb_put(skb, sizeof(*wqe));
- wqe->wrh.op_seop_flags = cpu_to_be32(V_FW_RIWR_OP(T3_WR_INIT));
- wqe->wrh.gen_tid_len = cpu_to_be32(V_FW_RIWR_TID(attr->tid) |
- V_FW_RIWR_LEN(sizeof(*wqe) >> 3));
- wqe->wrid.id1 = 0;
- wqe->qpid = cpu_to_be32(attr->qpid);
- wqe->pdid = cpu_to_be32(attr->pdid);
- wqe->scqid = cpu_to_be32(attr->scqid);
- wqe->rcqid = cpu_to_be32(attr->rcqid);
- wqe->rq_addr = cpu_to_be32(attr->rq_addr - rdev_p->rnic_info.rqt_base);
- wqe->rq_size = cpu_to_be32(attr->rq_size);
- wqe->mpaattrs = attr->mpaattrs;
- wqe->qpcaps = attr->qpcaps;
- wqe->ulpdu_size = cpu_to_be16(attr->tcp_emss);
- wqe->rqe_count = cpu_to_be16(attr->rqe_count);
- wqe->flags_rtr_type = cpu_to_be16(attr->flags |
- V_RTR_TYPE(attr->rtr_type) |
- V_CHAN(attr->chan));
- wqe->ord = cpu_to_be32(attr->ord);
- wqe->ird = cpu_to_be32(attr->ird);
- wqe->qp_dma_addr = cpu_to_be64(attr->qp_dma_addr);
- wqe->qp_dma_size = cpu_to_be32(attr->qp_dma_size);
- wqe->irs = cpu_to_be32(attr->irs);
- skb->priority = 0; /* 0=>ToeQ; 1=>CtrlQ */
- return iwch_cxgb3_ofld_send(rdev_p->t3cdev_p, skb);
-}
-
-void cxio_register_ev_cb(cxio_hal_ev_callback_func_t ev_cb)
-{
- cxio_ev_cb = ev_cb;
-}
-
-void cxio_unregister_ev_cb(cxio_hal_ev_callback_func_t ev_cb)
-{
- cxio_ev_cb = NULL;
-}
-
-static int cxio_hal_ev_handler(struct t3cdev *t3cdev_p, struct sk_buff *skb)
-{
- static int cnt;
- struct cxio_rdev *rdev_p = NULL;
- struct respQ_msg_t *rsp_msg = (struct respQ_msg_t *) skb->data;
- pr_debug("%d: %s cq_id 0x%x cq_ptr 0x%x genbit %0x overflow %0x an %0x se %0x notify %0x cqbranch %0x creditth %0x\n",
- cnt, __func__, RSPQ_CQID(rsp_msg), RSPQ_CQPTR(rsp_msg),
- RSPQ_GENBIT(rsp_msg), RSPQ_OVERFLOW(rsp_msg), RSPQ_AN(rsp_msg),
- RSPQ_SE(rsp_msg), RSPQ_NOTIFY(rsp_msg), RSPQ_CQBRANCH(rsp_msg),
- RSPQ_CREDIT_THRESH(rsp_msg));
- pr_debug("CQE: QPID 0x%0x genbit %0x type 0x%0x status 0x%0x opcode %d len 0x%0x wrid_hi_stag 0x%x wrid_low_msn 0x%x\n",
- CQE_QPID(rsp_msg->cqe), CQE_GENBIT(rsp_msg->cqe),
- CQE_TYPE(rsp_msg->cqe), CQE_STATUS(rsp_msg->cqe),
- CQE_OPCODE(rsp_msg->cqe), CQE_LEN(rsp_msg->cqe),
- CQE_WRID_HI(rsp_msg->cqe), CQE_WRID_LOW(rsp_msg->cqe));
- rdev_p = (struct cxio_rdev *)t3cdev_p->ulp;
- if (!rdev_p) {
- pr_debug("%s called by t3cdev %p with null ulp\n", __func__,
- t3cdev_p);
- return 0;
- }
- if (CQE_QPID(rsp_msg->cqe) == T3_CTRL_QP_ID) {
- rdev_p->ctrl_qp.rptr = CQE_WRID_LOW(rsp_msg->cqe) + 1;
- wake_up_interruptible(&rdev_p->ctrl_qp.waitq);
- dev_kfree_skb_irq(skb);
- } else if (CQE_QPID(rsp_msg->cqe) == 0xfff8)
- dev_kfree_skb_irq(skb);
- else if (cxio_ev_cb)
- (*cxio_ev_cb) (rdev_p, skb);
- else
- dev_kfree_skb_irq(skb);
- cnt++;
- return 0;
-}
-
-/* Caller takes care of locking if needed */
-int cxio_rdev_open(struct cxio_rdev *rdev_p)
-{
- struct net_device *netdev_p = NULL;
- int err = 0;
- if (strlen(rdev_p->dev_name)) {
- if (cxio_hal_find_rdev_by_name(rdev_p->dev_name)) {
- return -EBUSY;
- }
- netdev_p = dev_get_by_name(&init_net, rdev_p->dev_name);
- if (!netdev_p) {
- return -EINVAL;
- }
- dev_put(netdev_p);
- } else if (rdev_p->t3cdev_p) {
- if (cxio_hal_find_rdev_by_t3cdev(rdev_p->t3cdev_p)) {
- return -EBUSY;
- }
- netdev_p = rdev_p->t3cdev_p->lldev;
- strncpy(rdev_p->dev_name, rdev_p->t3cdev_p->name,
- T3_MAX_DEV_NAME_LEN);
- } else {
- pr_debug("%s t3cdev_p or dev_name must be set\n", __func__);
- return -EINVAL;
- }
-
- list_add_tail(&rdev_p->entry, &rdev_list);
-
- pr_debug("%s opening rnic dev %s\n", __func__, rdev_p->dev_name);
- memset(&rdev_p->ctrl_qp, 0, sizeof(rdev_p->ctrl_qp));
- if (!rdev_p->t3cdev_p)
- rdev_p->t3cdev_p = dev2t3cdev(netdev_p);
- rdev_p->t3cdev_p->ulp = (void *) rdev_p;
-
- err = rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, GET_EMBEDDED_INFO,
- &(rdev_p->fw_info));
- if (err) {
- pr_err("%s t3cdev_p(%p)->ctl returned error %d\n",
- __func__, rdev_p->t3cdev_p, err);
- goto err1;
- }
- if (G_FW_VERSION_MAJOR(rdev_p->fw_info.fw_vers) != CXIO_FW_MAJ) {
- pr_err("fatal firmware version mismatch: need version %u but adapter has version %u\n",
- CXIO_FW_MAJ,
- G_FW_VERSION_MAJOR(rdev_p->fw_info.fw_vers));
- err = -EINVAL;
- goto err1;
- }
-
- err = rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_GET_PARAMS,
- &(rdev_p->rnic_info));
- if (err) {
- pr_err("%s t3cdev_p(%p)->ctl returned error %d\n",
- __func__, rdev_p->t3cdev_p, err);
- goto err1;
- }
- err = rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, GET_PORTS,
- &(rdev_p->port_info));
- if (err) {
- pr_err("%s t3cdev_p(%p)->ctl returned error %d\n",
- __func__, rdev_p->t3cdev_p, err);
- goto err1;
- }
-
- /*
- * qpshift is the number of bits to shift the qpid left in order
- * to get the correct address of the doorbell for that qp.
- */
- cxio_init_ucontext(rdev_p, &rdev_p->uctx);
- rdev_p->qpshift = PAGE_SHIFT -
- ilog2(65536 >>
- ilog2(rdev_p->rnic_info.udbell_len >>
- PAGE_SHIFT));
- rdev_p->qpnr = rdev_p->rnic_info.udbell_len >> PAGE_SHIFT;
- rdev_p->qpmask = (65536 >> ilog2(rdev_p->qpnr)) - 1;
- pr_debug("%s rnic %s info: tpt_base 0x%0x tpt_top 0x%0x num stags %d pbl_base 0x%0x pbl_top 0x%0x rqt_base 0x%0x, rqt_top 0x%0x\n",
- __func__, rdev_p->dev_name, rdev_p->rnic_info.tpt_base,
- rdev_p->rnic_info.tpt_top, cxio_num_stags(rdev_p),
- rdev_p->rnic_info.pbl_base,
- rdev_p->rnic_info.pbl_top, rdev_p->rnic_info.rqt_base,
- rdev_p->rnic_info.rqt_top);
- pr_debug("udbell_len 0x%0x udbell_physbase 0x%lx kdb_addr %p qpshift %lu qpnr %d qpmask 0x%x\n",
- rdev_p->rnic_info.udbell_len,
- rdev_p->rnic_info.udbell_physbase, rdev_p->rnic_info.kdb_addr,
- rdev_p->qpshift, rdev_p->qpnr, rdev_p->qpmask);
-
- err = cxio_hal_init_ctrl_qp(rdev_p);
- if (err) {
- pr_err("%s error %d initializing ctrl_qp\n", __func__, err);
- goto err1;
- }
- err = cxio_hal_init_resource(rdev_p, cxio_num_stags(rdev_p), 0,
- 0, T3_MAX_NUM_QP, T3_MAX_NUM_CQ,
- T3_MAX_NUM_PD);
- if (err) {
- pr_err("%s error %d initializing hal resources\n",
- __func__, err);
- goto err2;
- }
- err = cxio_hal_pblpool_create(rdev_p);
- if (err) {
- pr_err("%s error %d initializing pbl mem pool\n",
- __func__, err);
- goto err3;
- }
- err = cxio_hal_rqtpool_create(rdev_p);
- if (err) {
- pr_err("%s error %d initializing rqt mem pool\n",
- __func__, err);
- goto err4;
- }
- return 0;
-err4:
- cxio_hal_pblpool_destroy(rdev_p);
-err3:
- cxio_hal_destroy_resource(rdev_p->rscp);
-err2:
- cxio_hal_destroy_ctrl_qp(rdev_p);
-err1:
- rdev_p->t3cdev_p->ulp = NULL;
- list_del(&rdev_p->entry);
- return err;
-}
-
-void cxio_rdev_close(struct cxio_rdev *rdev_p)
-{
- if (rdev_p) {
- cxio_hal_pblpool_destroy(rdev_p);
- cxio_hal_rqtpool_destroy(rdev_p);
- list_del(&rdev_p->entry);
- cxio_hal_destroy_ctrl_qp(rdev_p);
- cxio_hal_destroy_resource(rdev_p->rscp);
- rdev_p->t3cdev_p->ulp = NULL;
- }
-}
-
-int __init cxio_hal_init(void)
-{
- if (cxio_hal_init_rhdl_resource(T3_MAX_NUM_RI))
- return -ENOMEM;
- t3_register_cpl_handler(CPL_ASYNC_NOTIF, cxio_hal_ev_handler);
- return 0;
-}
-
-void __exit cxio_hal_exit(void)
-{
- struct cxio_rdev *rdev, *tmp;
-
- t3_register_cpl_handler(CPL_ASYNC_NOTIF, NULL);
- list_for_each_entry_safe(rdev, tmp, &rdev_list, entry)
- cxio_rdev_close(rdev);
- cxio_hal_destroy_rhdl_resource();
-}
-
-static void flush_completed_wrs(struct t3_wq *wq, struct t3_cq *cq)
-{
- struct t3_swsq *sqp;
- __u32 ptr = wq->sq_rptr;
- int count = Q_COUNT(wq->sq_rptr, wq->sq_wptr);
-
- sqp = wq->sq + Q_PTR2IDX(ptr, wq->sq_size_log2);
- while (count--)
- if (!sqp->signaled) {
- ptr++;
- sqp = wq->sq + Q_PTR2IDX(ptr, wq->sq_size_log2);
- } else if (sqp->complete) {
-
- /*
- * Insert this completed cqe into the swcq.
- */
- pr_debug("%s moving cqe into swcq sq idx %ld cq idx %ld\n",
- __func__, Q_PTR2IDX(ptr, wq->sq_size_log2),
- Q_PTR2IDX(cq->sw_wptr, cq->size_log2));
- sqp->cqe.header |= htonl(V_CQE_SWCQE(1));
- *(cq->sw_queue + Q_PTR2IDX(cq->sw_wptr, cq->size_log2))
- = sqp->cqe;
- cq->sw_wptr++;
- sqp->signaled = 0;
- break;
- } else
- break;
-}
-
-static void create_read_req_cqe(struct t3_wq *wq, struct t3_cqe *hw_cqe,
- struct t3_cqe *read_cqe)
-{
- read_cqe->u.scqe.wrid_hi = wq->oldest_read->sq_wptr;
- read_cqe->len = wq->oldest_read->read_len;
- read_cqe->header = htonl(V_CQE_QPID(CQE_QPID(*hw_cqe)) |
- V_CQE_SWCQE(SW_CQE(*hw_cqe)) |
- V_CQE_OPCODE(T3_READ_REQ) |
- V_CQE_TYPE(1));
-}
-
-/*
- * Return a ptr to the next read wr in the SWSQ or NULL.
- */
-static void advance_oldest_read(struct t3_wq *wq)
-{
-
- u32 rptr = wq->oldest_read - wq->sq + 1;
- u32 wptr = Q_PTR2IDX(wq->sq_wptr, wq->sq_size_log2);
-
- while (Q_PTR2IDX(rptr, wq->sq_size_log2) != wptr) {
- wq->oldest_read = wq->sq + Q_PTR2IDX(rptr, wq->sq_size_log2);
-
- if (wq->oldest_read->opcode == T3_READ_REQ)
- return;
- rptr++;
- }
- wq->oldest_read = NULL;
-}
-
-/*
- * cxio_poll_cq
- *
- * Caller must:
- * check the validity of the first CQE,
- * supply the wq assicated with the qpid.
- *
- * credit: cq credit to return to sge.
- * cqe_flushed: 1 iff the CQE is flushed.
- * cqe: copy of the polled CQE.
- *
- * return value:
- * 0 CQE returned,
- * -1 CQE skipped, try again.
- */
-int cxio_poll_cq(struct t3_wq *wq, struct t3_cq *cq, struct t3_cqe *cqe,
- u8 *cqe_flushed, u64 *cookie, u32 *credit)
-{
- int ret = 0;
- struct t3_cqe *hw_cqe, read_cqe;
-
- *cqe_flushed = 0;
- *credit = 0;
- hw_cqe = cxio_next_cqe(cq);
-
- pr_debug("%s CQE OOO %d qpid 0x%0x genbit %d type %d status 0x%0x opcode 0x%0x len 0x%0x wrid_hi_stag 0x%x wrid_low_msn 0x%x\n",
- __func__, CQE_OOO(*hw_cqe), CQE_QPID(*hw_cqe),
- CQE_GENBIT(*hw_cqe), CQE_TYPE(*hw_cqe), CQE_STATUS(*hw_cqe),
- CQE_OPCODE(*hw_cqe), CQE_LEN(*hw_cqe), CQE_WRID_HI(*hw_cqe),
- CQE_WRID_LOW(*hw_cqe));
-
- /*
- * skip cqe's not affiliated with a QP.
- */
- if (wq == NULL) {
- ret = -1;
- goto skip_cqe;
- }
-
- /*
- * Gotta tweak READ completions:
- * 1) the cqe doesn't contain the sq_wptr from the wr.
- * 2) opcode not reflected from the wr.
- * 3) read_len not reflected from the wr.
- * 4) cq_type is RQ_TYPE not SQ_TYPE.
- */
- if (RQ_TYPE(*hw_cqe) && (CQE_OPCODE(*hw_cqe) == T3_READ_RESP)) {
-
- /*
- * If this is an unsolicited read response, then the read
- * was generated by the kernel driver as part of peer-2-peer
- * connection setup. So ignore the completion.
- */
- if (!wq->oldest_read) {
- if (CQE_STATUS(*hw_cqe))
- wq->error = 1;
- ret = -1;
- goto skip_cqe;
- }
-
- /*
- * Don't write to the HWCQ, so create a new read req CQE
- * in local memory.
- */
- create_read_req_cqe(wq, hw_cqe, &read_cqe);
- hw_cqe = &read_cqe;
- advance_oldest_read(wq);
- }
-
- /*
- * T3A: Discard TERMINATE CQEs.
- */
- if (CQE_OPCODE(*hw_cqe) == T3_TERMINATE) {
- ret = -1;
- wq->error = 1;
- goto skip_cqe;
- }
-
- if (CQE_STATUS(*hw_cqe) || wq->error) {
- *cqe_flushed = wq->error;
- wq->error = 1;
-
- /*
- * T3A inserts errors into the CQE. We cannot return
- * these as work completions.
- */
- /* incoming write failures */
- if ((CQE_OPCODE(*hw_cqe) == T3_RDMA_WRITE)
- && RQ_TYPE(*hw_cqe)) {
- ret = -1;
- goto skip_cqe;
- }
- /* incoming read request failures */
- if ((CQE_OPCODE(*hw_cqe) == T3_READ_RESP) && SQ_TYPE(*hw_cqe)) {
- ret = -1;
- goto skip_cqe;
- }
-
- /* incoming SEND with no receive posted failures */
- if (CQE_SEND_OPCODE(*hw_cqe) && RQ_TYPE(*hw_cqe) &&
- Q_EMPTY(wq->rq_rptr, wq->rq_wptr)) {
- ret = -1;
- goto skip_cqe;
- }
- BUG_ON((*cqe_flushed == 0) && !SW_CQE(*hw_cqe));
- goto proc_cqe;
- }
-
- /*
- * RECV completion.
- */
- if (RQ_TYPE(*hw_cqe)) {
-
- /*
- * HW only validates 4 bits of MSN. So we must validate that
- * the MSN in the SEND is the next expected MSN. If its not,
- * then we complete this with TPT_ERR_MSN and mark the wq in
- * error.
- */
-
- if (Q_EMPTY(wq->rq_rptr, wq->rq_wptr)) {
- wq->error = 1;
- ret = -1;
- goto skip_cqe;
- }
-
- if (unlikely((CQE_WRID_MSN(*hw_cqe) != (wq->rq_rptr + 1)))) {
- wq->error = 1;
- hw_cqe->header |= htonl(V_CQE_STATUS(TPT_ERR_MSN));
- goto proc_cqe;
- }
- goto proc_cqe;
- }
-
- /*
- * If we get here its a send completion.
- *
- * Handle out of order completion. These get stuffed
- * in the SW SQ. Then the SW SQ is walked to move any
- * now in-order completions into the SW CQ. This handles
- * 2 cases:
- * 1) reaping unsignaled WRs when the first subsequent
- * signaled WR is completed.
- * 2) out of order read completions.
- */
- if (!SW_CQE(*hw_cqe) && (CQE_WRID_SQ_WPTR(*hw_cqe) != wq->sq_rptr)) {
- struct t3_swsq *sqp;
-
- pr_debug("%s out of order completion going in swsq at idx %ld\n",
- __func__,
- Q_PTR2IDX(CQE_WRID_SQ_WPTR(*hw_cqe),
- wq->sq_size_log2));
- sqp = wq->sq +
- Q_PTR2IDX(CQE_WRID_SQ_WPTR(*hw_cqe), wq->sq_size_log2);
- sqp->cqe = *hw_cqe;
- sqp->complete = 1;
- ret = -1;
- goto flush_wq;
- }
-
-proc_cqe:
- *cqe = *hw_cqe;
-
- /*
- * Reap the associated WR(s) that are freed up with this
- * completion.
- */
- if (SQ_TYPE(*hw_cqe)) {
- wq->sq_rptr = CQE_WRID_SQ_WPTR(*hw_cqe);
- pr_debug("%s completing sq idx %ld\n", __func__,
- Q_PTR2IDX(wq->sq_rptr, wq->sq_size_log2));
- *cookie = wq->sq[Q_PTR2IDX(wq->sq_rptr, wq->sq_size_log2)].wr_id;
- wq->sq_rptr++;
- } else {
- pr_debug("%s completing rq idx %ld\n", __func__,
- Q_PTR2IDX(wq->rq_rptr, wq->rq_size_log2));
- *cookie = wq->rq[Q_PTR2IDX(wq->rq_rptr, wq->rq_size_log2)].wr_id;
- if (wq->rq[Q_PTR2IDX(wq->rq_rptr, wq->rq_size_log2)].pbl_addr)
- cxio_hal_pblpool_free(wq->rdev,
- wq->rq[Q_PTR2IDX(wq->rq_rptr,
- wq->rq_size_log2)].pbl_addr, T3_STAG0_PBL_SIZE);
- BUG_ON(Q_EMPTY(wq->rq_rptr, wq->rq_wptr));
- wq->rq_rptr++;
- }
-
-flush_wq:
- /*
- * Flush any completed cqes that are now in-order.
- */
- flush_completed_wrs(wq, cq);
-
-skip_cqe:
- if (SW_CQE(*hw_cqe)) {
- pr_debug("%s cq %p cqid 0x%x skip sw cqe sw_rptr 0x%x\n",
- __func__, cq, cq->cqid, cq->sw_rptr);
- ++cq->sw_rptr;
- } else {
- pr_debug("%s cq %p cqid 0x%x skip hw cqe rptr 0x%x\n",
- __func__, cq, cq->cqid, cq->rptr);
- ++cq->rptr;
-
- /*
- * T3A: compute credits.
- */
- if (((cq->rptr - cq->wptr) > (1 << (cq->size_log2 - 1)))
- || ((cq->rptr - cq->wptr) >= 128)) {
- *credit = cq->rptr - cq->wptr;
- cq->wptr = cq->rptr;
- }
- }
- return ret;
-}
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.h b/drivers/infiniband/hw/cxgb3/cxio_hal.h
deleted file mode 100644
index 40c029ffa425..000000000000
--- a/drivers/infiniband/hw/cxgb3/cxio_hal.h
+++ /dev/null
@@ -1,204 +0,0 @@
-/*
- * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef __CXIO_HAL_H__
-#define __CXIO_HAL_H__
-
-#include <linux/list.h>
-#include <linux/mutex.h>
-#include <linux/kfifo.h>
-
-#include "t3_cpl.h"
-#include "t3cdev.h"
-#include "cxgb3_ctl_defs.h"
-#include "cxio_wr.h"
-
-#define T3_CTRL_QP_ID FW_RI_SGEEC_START
-#define T3_CTL_QP_TID FW_RI_TID_START
-#define T3_CTRL_QP_SIZE_LOG2 8
-#define T3_CTRL_CQ_ID 0
-
-#define T3_MAX_NUM_RI (1<<15)
-#define T3_MAX_NUM_QP (1<<15)
-#define T3_MAX_NUM_CQ (1<<15)
-#define T3_MAX_NUM_PD (1<<15)
-#define T3_MAX_PBL_SIZE 256
-#define T3_MAX_RQ_SIZE 1024
-#define T3_MAX_QP_DEPTH (T3_MAX_RQ_SIZE-1)
-#define T3_MAX_CQ_DEPTH 65536
-#define T3_MAX_NUM_STAG (1<<15)
-#define T3_MAX_MR_SIZE 0x100000000ULL
-#define T3_PAGESIZE_MASK 0xffff000 /* 4KB-128MB */
-
-#define T3_STAG_UNSET 0xffffffff
-
-#define T3_MAX_DEV_NAME_LEN 32
-
-#define CXIO_FW_MAJ 7
-
-struct cxio_hal_ctrl_qp {
- u32 wptr;
- u32 rptr;
- struct mutex lock; /* for the wtpr, can sleep */
- wait_queue_head_t waitq;/* wait for RspQ/CQE msg */
- union t3_wr *workq; /* the work request queue */
- dma_addr_t dma_addr; /* pci bus address of the workq */
- DEFINE_DMA_UNMAP_ADDR(mapping);
- void __iomem *doorbell;
-};
-
-struct cxio_hal_resource {
- struct kfifo tpt_fifo;
- spinlock_t tpt_fifo_lock;
- struct kfifo qpid_fifo;
- spinlock_t qpid_fifo_lock;
- struct kfifo cqid_fifo;
- spinlock_t cqid_fifo_lock;
- struct kfifo pdid_fifo;
- spinlock_t pdid_fifo_lock;
-};
-
-struct cxio_qpid_list {
- struct list_head entry;
- u32 qpid;
-};
-
-struct cxio_ucontext {
- struct list_head qpids;
- struct mutex lock;
-};
-
-struct cxio_rdev {
- char dev_name[T3_MAX_DEV_NAME_LEN];
- struct t3cdev *t3cdev_p;
- struct rdma_info rnic_info;
- struct adap_ports port_info;
- struct cxio_hal_resource *rscp;
- struct cxio_hal_ctrl_qp ctrl_qp;
- void *ulp;
- unsigned long qpshift;
- u32 qpnr;
- u32 qpmask;
- struct cxio_ucontext uctx;
- struct gen_pool *pbl_pool;
- struct gen_pool *rqt_pool;
- struct list_head entry;
- struct ch_embedded_info fw_info;
- u32 flags;
-#define CXIO_ERROR_FATAL 1
-};
-
-static inline int cxio_fatal_error(struct cxio_rdev *rdev_p)
-{
- return rdev_p->flags & CXIO_ERROR_FATAL;
-}
-
-static inline int cxio_num_stags(struct cxio_rdev *rdev_p)
-{
- return min((int)T3_MAX_NUM_STAG, (int)((rdev_p->rnic_info.tpt_top - rdev_p->rnic_info.tpt_base) >> 5));
-}
-
-typedef void (*cxio_hal_ev_callback_func_t) (struct cxio_rdev * rdev_p,
- struct sk_buff * skb);
-
-#define RSPQ_CQID(rsp) (be32_to_cpu(rsp->cq_ptrid) & 0xffff)
-#define RSPQ_CQPTR(rsp) ((be32_to_cpu(rsp->cq_ptrid) >> 16) & 0xffff)
-#define RSPQ_GENBIT(rsp) ((be32_to_cpu(rsp->flags) >> 16) & 1)
-#define RSPQ_OVERFLOW(rsp) ((be32_to_cpu(rsp->flags) >> 17) & 1)
-#define RSPQ_AN(rsp) ((be32_to_cpu(rsp->flags) >> 18) & 1)
-#define RSPQ_SE(rsp) ((be32_to_cpu(rsp->flags) >> 19) & 1)
-#define RSPQ_NOTIFY(rsp) ((be32_to_cpu(rsp->flags) >> 20) & 1)
-#define RSPQ_CQBRANCH(rsp) ((be32_to_cpu(rsp->flags) >> 21) & 1)
-#define RSPQ_CREDIT_THRESH(rsp) ((be32_to_cpu(rsp->flags) >> 22) & 1)
-
-struct respQ_msg_t {
- __be32 flags; /* flit 0 */
- __be32 cq_ptrid;
- __be64 rsvd; /* flit 1 */
- struct t3_cqe cqe; /* flits 2-3 */
-};
-
-enum t3_cq_opcode {
- CQ_ARM_AN = 0x2,
- CQ_ARM_SE = 0x6,
- CQ_FORCE_AN = 0x3,
- CQ_CREDIT_UPDATE = 0x7
-};
-
-int cxio_rdev_open(struct cxio_rdev *rdev);
-void cxio_rdev_close(struct cxio_rdev *rdev);
-int cxio_hal_cq_op(struct cxio_rdev *rdev, struct t3_cq *cq,
- enum t3_cq_opcode op, u32 credit);
-int cxio_create_cq(struct cxio_rdev *rdev, struct t3_cq *cq, int kernel);
-void cxio_destroy_cq(struct cxio_rdev *rdev, struct t3_cq *cq);
-void cxio_release_ucontext(struct cxio_rdev *rdev, struct cxio_ucontext *uctx);
-void cxio_init_ucontext(struct cxio_rdev *rdev, struct cxio_ucontext *uctx);
-int cxio_create_qp(struct cxio_rdev *rdev, u32 kernel_domain, struct t3_wq *wq,
- struct cxio_ucontext *uctx);
-int cxio_destroy_qp(struct cxio_rdev *rdev, struct t3_wq *wq,
- struct cxio_ucontext *uctx);
-int cxio_peek_cq(struct t3_wq *wr, struct t3_cq *cq, int opcode);
-int cxio_write_pbl(struct cxio_rdev *rdev_p, __be64 *pbl,
- u32 pbl_addr, u32 pbl_size);
-int cxio_register_phys_mem(struct cxio_rdev *rdev, u32 * stag, u32 pdid,
- enum tpt_mem_perm perm, u32 zbva, u64 to, u32 len,
- u8 page_size, u32 pbl_size, u32 pbl_addr);
-int cxio_reregister_phys_mem(struct cxio_rdev *rdev, u32 * stag, u32 pdid,
- enum tpt_mem_perm perm, u32 zbva, u64 to, u32 len,
- u8 page_size, u32 pbl_size, u32 pbl_addr);
-int cxio_dereg_mem(struct cxio_rdev *rdev, u32 stag, u32 pbl_size,
- u32 pbl_addr);
-int cxio_allocate_window(struct cxio_rdev *rdev, u32 * stag, u32 pdid);
-int cxio_allocate_stag(struct cxio_rdev *rdev, u32 *stag, u32 pdid, u32 pbl_size, u32 pbl_addr);
-int cxio_deallocate_window(struct cxio_rdev *rdev, u32 stag);
-int cxio_rdma_init(struct cxio_rdev *rdev, struct t3_rdma_init_attr *attr);
-void cxio_register_ev_cb(cxio_hal_ev_callback_func_t ev_cb);
-void cxio_unregister_ev_cb(cxio_hal_ev_callback_func_t ev_cb);
-u32 cxio_hal_get_pdid(struct cxio_hal_resource *rscp);
-void cxio_hal_put_pdid(struct cxio_hal_resource *rscp, u32 pdid);
-int __init cxio_hal_init(void);
-void __exit cxio_hal_exit(void);
-int cxio_flush_rq(struct t3_wq *wq, struct t3_cq *cq, int count);
-int cxio_flush_sq(struct t3_wq *wq, struct t3_cq *cq, int count);
-void cxio_count_rcqes(struct t3_cq *cq, struct t3_wq *wq, int *count);
-void cxio_count_scqes(struct t3_cq *cq, struct t3_wq *wq, int *count);
-void cxio_flush_hw_cq(struct t3_cq *cq);
-int cxio_poll_cq(struct t3_wq *wq, struct t3_cq *cq, struct t3_cqe *cqe,
- u8 *cqe_flushed, u64 *cookie, u32 *credit);
-int iwch_cxgb3_ofld_send(struct t3cdev *tdev, struct sk_buff *skb);
-
-#ifdef pr_fmt
-#undef pr_fmt
-#endif
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#endif
diff --git a/drivers/infiniband/hw/cxgb3/cxio_resource.c b/drivers/infiniband/hw/cxgb3/cxio_resource.c
deleted file mode 100644
index c6e7bc4420b6..000000000000
--- a/drivers/infiniband/hw/cxgb3/cxio_resource.c
+++ /dev/null
@@ -1,344 +0,0 @@
-/*
- * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-/* Crude resource management */
-#include <linux/kernel.h>
-#include <linux/random.h>
-#include <linux/slab.h>
-#include <linux/kfifo.h>
-#include <linux/spinlock.h>
-#include <linux/errno.h>
-#include "cxio_resource.h"
-#include "cxio_hal.h"
-
-static struct kfifo rhdl_fifo;
-static spinlock_t rhdl_fifo_lock;
-
-#define RANDOM_SIZE 16
-
-static int __cxio_init_resource_fifo(struct kfifo *fifo,
- spinlock_t *fifo_lock,
- u32 nr, u32 skip_low,
- u32 skip_high,
- int random)
-{
- u32 i, j, entry = 0, idx;
- u32 random_bytes;
- u32 rarray[16];
- spin_lock_init(fifo_lock);
-
- if (kfifo_alloc(fifo, nr * sizeof(u32), GFP_KERNEL))
- return -ENOMEM;
-
- for (i = 0; i < skip_low + skip_high; i++)
- kfifo_in(fifo, (unsigned char *) &entry, sizeof(u32));
- if (random) {
- j = 0;
- random_bytes = prandom_u32();
- for (i = 0; i < RANDOM_SIZE; i++)
- rarray[i] = i + skip_low;
- for (i = skip_low + RANDOM_SIZE; i < nr - skip_high; i++) {
- if (j >= RANDOM_SIZE) {
- j = 0;
- random_bytes = prandom_u32();
- }
- idx = (random_bytes >> (j * 2)) & 0xF;
- kfifo_in(fifo,
- (unsigned char *) &rarray[idx],
- sizeof(u32));
- rarray[idx] = i;
- j++;
- }
- for (i = 0; i < RANDOM_SIZE; i++)
- kfifo_in(fifo,
- (unsigned char *) &rarray[i],
- sizeof(u32));
- } else
- for (i = skip_low; i < nr - skip_high; i++)
- kfifo_in(fifo, (unsigned char *) &i, sizeof(u32));
-
- for (i = 0; i < skip_low + skip_high; i++)
- if (kfifo_out_locked(fifo, (unsigned char *) &entry,
- sizeof(u32), fifo_lock) != sizeof(u32))
- break;
- return 0;
-}
-
-static int cxio_init_resource_fifo(struct kfifo *fifo, spinlock_t * fifo_lock,
- u32 nr, u32 skip_low, u32 skip_high)
-{
- return (__cxio_init_resource_fifo(fifo, fifo_lock, nr, skip_low,
- skip_high, 0));
-}
-
-static int cxio_init_resource_fifo_random(struct kfifo *fifo,
- spinlock_t * fifo_lock,
- u32 nr, u32 skip_low, u32 skip_high)
-{
-
- return (__cxio_init_resource_fifo(fifo, fifo_lock, nr, skip_low,
- skip_high, 1));
-}
-
-static int cxio_init_qpid_fifo(struct cxio_rdev *rdev_p)
-{
- u32 i;
-
- spin_lock_init(&rdev_p->rscp->qpid_fifo_lock);
-
- if (kfifo_alloc(&rdev_p->rscp->qpid_fifo, T3_MAX_NUM_QP * sizeof(u32),
- GFP_KERNEL))
- return -ENOMEM;
-
- for (i = 16; i < T3_MAX_NUM_QP; i++)
- if (!(i & rdev_p->qpmask))
- kfifo_in(&rdev_p->rscp->qpid_fifo,
- (unsigned char *) &i, sizeof(u32));
- return 0;
-}
-
-int cxio_hal_init_rhdl_resource(u32 nr_rhdl)
-{
- return cxio_init_resource_fifo(&rhdl_fifo, &rhdl_fifo_lock, nr_rhdl, 1,
- 0);
-}
-
-void cxio_hal_destroy_rhdl_resource(void)
-{
- kfifo_free(&rhdl_fifo);
-}
-
-/* nr_* must be power of 2 */
-int cxio_hal_init_resource(struct cxio_rdev *rdev_p,
- u32 nr_tpt, u32 nr_pbl,
- u32 nr_rqt, u32 nr_qpid, u32 nr_cqid, u32 nr_pdid)
-{
- int err = 0;
- struct cxio_hal_resource *rscp;
-
- rscp = kmalloc(sizeof(*rscp), GFP_KERNEL);
- if (!rscp)
- return -ENOMEM;
- rdev_p->rscp = rscp;
- err = cxio_init_resource_fifo_random(&rscp->tpt_fifo,
- &rscp->tpt_fifo_lock,
- nr_tpt, 1, 0);
- if (err)
- goto tpt_err;
- err = cxio_init_qpid_fifo(rdev_p);
- if (err)
- goto qpid_err;
- err = cxio_init_resource_fifo(&rscp->cqid_fifo, &rscp->cqid_fifo_lock,
- nr_cqid, 1, 0);
- if (err)
- goto cqid_err;
- err = cxio_init_resource_fifo(&rscp->pdid_fifo, &rscp->pdid_fifo_lock,
- nr_pdid, 1, 0);
- if (err)
- goto pdid_err;
- return 0;
-pdid_err:
- kfifo_free(&rscp->cqid_fifo);
-cqid_err:
- kfifo_free(&rscp->qpid_fifo);
-qpid_err:
- kfifo_free(&rscp->tpt_fifo);
-tpt_err:
- return -ENOMEM;
-}
-
-/*
- * returns 0 if no resource available
- */
-static u32 cxio_hal_get_resource(struct kfifo *fifo, spinlock_t * lock)
-{
- u32 entry;
- if (kfifo_out_locked(fifo, (unsigned char *) &entry, sizeof(u32), lock))
- return entry;
- else
- return 0; /* fifo emptry */
-}
-
-static void cxio_hal_put_resource(struct kfifo *fifo, spinlock_t * lock,
- u32 entry)
-{
- BUG_ON(
- kfifo_in_locked(fifo, (unsigned char *) &entry, sizeof(u32), lock)
- == 0);
-}
-
-u32 cxio_hal_get_stag(struct cxio_hal_resource *rscp)
-{
- return cxio_hal_get_resource(&rscp->tpt_fifo, &rscp->tpt_fifo_lock);
-}
-
-void cxio_hal_put_stag(struct cxio_hal_resource *rscp, u32 stag)
-{
- cxio_hal_put_resource(&rscp->tpt_fifo, &rscp->tpt_fifo_lock, stag);
-}
-
-u32 cxio_hal_get_qpid(struct cxio_hal_resource *rscp)
-{
- u32 qpid = cxio_hal_get_resource(&rscp->qpid_fifo,
- &rscp->qpid_fifo_lock);
- pr_debug("%s qpid 0x%x\n", __func__, qpid);
- return qpid;
-}
-
-void cxio_hal_put_qpid(struct cxio_hal_resource *rscp, u32 qpid)
-{
- pr_debug("%s qpid 0x%x\n", __func__, qpid);
- cxio_hal_put_resource(&rscp->qpid_fifo, &rscp->qpid_fifo_lock, qpid);
-}
-
-u32 cxio_hal_get_cqid(struct cxio_hal_resource *rscp)
-{
- return cxio_hal_get_resource(&rscp->cqid_fifo, &rscp->cqid_fifo_lock);
-}
-
-void cxio_hal_put_cqid(struct cxio_hal_resource *rscp, u32 cqid)
-{
- cxio_hal_put_resource(&rscp->cqid_fifo, &rscp->cqid_fifo_lock, cqid);
-}
-
-u32 cxio_hal_get_pdid(struct cxio_hal_resource *rscp)
-{
- return cxio_hal_get_resource(&rscp->pdid_fifo, &rscp->pdid_fifo_lock);
-}
-
-void cxio_hal_put_pdid(struct cxio_hal_resource *rscp, u32 pdid)
-{
- cxio_hal_put_resource(&rscp->pdid_fifo, &rscp->pdid_fifo_lock, pdid);
-}
-
-void cxio_hal_destroy_resource(struct cxio_hal_resource *rscp)
-{
- kfifo_free(&rscp->tpt_fifo);
- kfifo_free(&rscp->cqid_fifo);
- kfifo_free(&rscp->qpid_fifo);
- kfifo_free(&rscp->pdid_fifo);
- kfree(rscp);
-}
-
-/*
- * PBL Memory Manager. Uses Linux generic allocator.
- */
-
-#define MIN_PBL_SHIFT 8 /* 256B == min PBL size (32 entries) */
-
-u32 cxio_hal_pblpool_alloc(struct cxio_rdev *rdev_p, int size)
-{
- unsigned long addr = gen_pool_alloc(rdev_p->pbl_pool, size);
- pr_debug("%s addr 0x%x size %d\n", __func__, (u32)addr, size);
- return (u32)addr;
-}
-
-void cxio_hal_pblpool_free(struct cxio_rdev *rdev_p, u32 addr, int size)
-{
- pr_debug("%s addr 0x%x size %d\n", __func__, addr, size);
- gen_pool_free(rdev_p->pbl_pool, (unsigned long)addr, size);
-}
-
-int cxio_hal_pblpool_create(struct cxio_rdev *rdev_p)
-{
- unsigned pbl_start, pbl_chunk;
-
- rdev_p->pbl_pool = gen_pool_create(MIN_PBL_SHIFT, -1);
- if (!rdev_p->pbl_pool)
- return -ENOMEM;
-
- pbl_start = rdev_p->rnic_info.pbl_base;
- pbl_chunk = rdev_p->rnic_info.pbl_top - pbl_start + 1;
-
- while (pbl_start < rdev_p->rnic_info.pbl_top) {
- pbl_chunk = min(rdev_p->rnic_info.pbl_top - pbl_start + 1,
- pbl_chunk);
- if (gen_pool_add(rdev_p->pbl_pool, pbl_start, pbl_chunk, -1)) {
- pr_debug("%s failed to add PBL chunk (%x/%x)\n",
- __func__, pbl_start, pbl_chunk);
- if (pbl_chunk <= 1024 << MIN_PBL_SHIFT) {
- pr_warn("%s: Failed to add all PBL chunks (%x/%x)\n",
- __func__, pbl_start,
- rdev_p->rnic_info.pbl_top - pbl_start);
- return 0;
- }
- pbl_chunk >>= 1;
- } else {
- pr_debug("%s added PBL chunk (%x/%x)\n",
- __func__, pbl_start, pbl_chunk);
- pbl_start += pbl_chunk;
- }
- }
-
- return 0;
-}
-
-void cxio_hal_pblpool_destroy(struct cxio_rdev *rdev_p)
-{
- gen_pool_destroy(rdev_p->pbl_pool);
-}
-
-/*
- * RQT Memory Manager. Uses Linux generic allocator.
- */
-
-#define MIN_RQT_SHIFT 10 /* 1KB == mini RQT size (16 entries) */
-#define RQT_CHUNK 2*1024*1024
-
-u32 cxio_hal_rqtpool_alloc(struct cxio_rdev *rdev_p, int size)
-{
- unsigned long addr = gen_pool_alloc(rdev_p->rqt_pool, size << 6);
- pr_debug("%s addr 0x%x size %d\n", __func__, (u32)addr, size << 6);
- return (u32)addr;
-}
-
-void cxio_hal_rqtpool_free(struct cxio_rdev *rdev_p, u32 addr, int size)
-{
- pr_debug("%s addr 0x%x size %d\n", __func__, addr, size << 6);
- gen_pool_free(rdev_p->rqt_pool, (unsigned long)addr, size << 6);
-}
-
-int cxio_hal_rqtpool_create(struct cxio_rdev *rdev_p)
-{
- unsigned long i;
- rdev_p->rqt_pool = gen_pool_create(MIN_RQT_SHIFT, -1);
- if (rdev_p->rqt_pool)
- for (i = rdev_p->rnic_info.rqt_base;
- i <= rdev_p->rnic_info.rqt_top - RQT_CHUNK + 1;
- i += RQT_CHUNK)
- gen_pool_add(rdev_p->rqt_pool, i, RQT_CHUNK, -1);
- return rdev_p->rqt_pool ? 0 : -ENOMEM;
-}
-
-void cxio_hal_rqtpool_destroy(struct cxio_rdev *rdev_p)
-{
- gen_pool_destroy(rdev_p->rqt_pool);
-}
diff --git a/drivers/infiniband/hw/cxgb3/cxio_resource.h b/drivers/infiniband/hw/cxgb3/cxio_resource.h
deleted file mode 100644
index a2703a3d882d..000000000000
--- a/drivers/infiniband/hw/cxgb3/cxio_resource.h
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef __CXIO_RESOURCE_H__
-#define __CXIO_RESOURCE_H__
-
-#include <linux/kernel.h>
-#include <linux/random.h>
-#include <linux/slab.h>
-#include <linux/kfifo.h>
-#include <linux/spinlock.h>
-#include <linux/errno.h>
-#include <linux/genalloc.h>
-#include "cxio_hal.h"
-
-extern int cxio_hal_init_rhdl_resource(u32 nr_rhdl);
-extern void cxio_hal_destroy_rhdl_resource(void);
-extern int cxio_hal_init_resource(struct cxio_rdev *rdev_p,
- u32 nr_tpt, u32 nr_pbl,
- u32 nr_rqt, u32 nr_qpid, u32 nr_cqid,
- u32 nr_pdid);
-extern u32 cxio_hal_get_stag(struct cxio_hal_resource *rscp);
-extern void cxio_hal_put_stag(struct cxio_hal_resource *rscp, u32 stag);
-extern u32 cxio_hal_get_qpid(struct cxio_hal_resource *rscp);
-extern void cxio_hal_put_qpid(struct cxio_hal_resource *rscp, u32 qpid);
-extern u32 cxio_hal_get_cqid(struct cxio_hal_resource *rscp);
-extern void cxio_hal_put_cqid(struct cxio_hal_resource *rscp, u32 cqid);
-extern void cxio_hal_destroy_resource(struct cxio_hal_resource *rscp);
-
-#define PBL_OFF(rdev_p, a) ( (a) - (rdev_p)->rnic_info.pbl_base )
-extern int cxio_hal_pblpool_create(struct cxio_rdev *rdev_p);
-extern void cxio_hal_pblpool_destroy(struct cxio_rdev *rdev_p);
-extern u32 cxio_hal_pblpool_alloc(struct cxio_rdev *rdev_p, int size);
-extern void cxio_hal_pblpool_free(struct cxio_rdev *rdev_p, u32 addr, int size);
-
-#define RQT_OFF(rdev_p, a) ( (a) - (rdev_p)->rnic_info.rqt_base )
-extern int cxio_hal_rqtpool_create(struct cxio_rdev *rdev_p);
-extern void cxio_hal_rqtpool_destroy(struct cxio_rdev *rdev_p);
-extern u32 cxio_hal_rqtpool_alloc(struct cxio_rdev *rdev_p, int size);
-extern void cxio_hal_rqtpool_free(struct cxio_rdev *rdev_p, u32 addr, int size);
-#endif
diff --git a/drivers/infiniband/hw/cxgb3/cxio_wr.h b/drivers/infiniband/hw/cxgb3/cxio_wr.h
deleted file mode 100644
index 53aa5c36247a..000000000000
--- a/drivers/infiniband/hw/cxgb3/cxio_wr.h
+++ /dev/null
@@ -1,802 +0,0 @@
-/*
- * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef __CXIO_WR_H__
-#define __CXIO_WR_H__
-
-#include <asm/io.h>
-#include <linux/pci.h>
-#include <linux/timer.h>
-#include "firmware_exports.h"
-
-#define T3_MAX_SGE 4
-#define T3_MAX_INLINE 64
-#define T3_STAG0_PBL_SIZE (2 * T3_MAX_SGE << 3)
-#define T3_STAG0_MAX_PBE_LEN (128 * 1024 * 1024)
-#define T3_STAG0_PAGE_SHIFT 15
-
-#define Q_EMPTY(rptr,wptr) ((rptr)==(wptr))
-#define Q_FULL(rptr,wptr,size_log2) ( (((wptr)-(rptr))>>(size_log2)) && \
- ((rptr)!=(wptr)) )
-#define Q_GENBIT(ptr,size_log2) (!(((ptr)>>size_log2)&0x1))
-#define Q_FREECNT(rptr,wptr,size_log2) ((1UL<<size_log2)-((wptr)-(rptr)))
-#define Q_COUNT(rptr,wptr) ((wptr)-(rptr))
-#define Q_PTR2IDX(ptr,size_log2) (ptr & ((1UL<<size_log2)-1))
-
-static inline void ring_doorbell(void __iomem *doorbell, u32 qpid)
-{
- writel(((1<<31) | qpid), doorbell);
-}
-
-#define SEQ32_GE(x,y) (!( (((u32) (x)) - ((u32) (y))) & 0x80000000 ))
-
-enum t3_wr_flags {
- T3_COMPLETION_FLAG = 0x01,
- T3_NOTIFY_FLAG = 0x02,
- T3_SOLICITED_EVENT_FLAG = 0x04,
- T3_READ_FENCE_FLAG = 0x08,
- T3_LOCAL_FENCE_FLAG = 0x10
-} __packed;
-
-enum t3_wr_opcode {
- T3_WR_BP = FW_WROPCODE_RI_BYPASS,
- T3_WR_SEND = FW_WROPCODE_RI_SEND,
- T3_WR_WRITE = FW_WROPCODE_RI_RDMA_WRITE,
- T3_WR_READ = FW_WROPCODE_RI_RDMA_READ,
- T3_WR_INV_STAG = FW_WROPCODE_RI_LOCAL_INV,
- T3_WR_BIND = FW_WROPCODE_RI_BIND_MW,
- T3_WR_RCV = FW_WROPCODE_RI_RECEIVE,
- T3_WR_INIT = FW_WROPCODE_RI_RDMA_INIT,
- T3_WR_QP_MOD = FW_WROPCODE_RI_MODIFY_QP,
- T3_WR_FASTREG = FW_WROPCODE_RI_FASTREGISTER_MR
-} __packed;
-
-enum t3_rdma_opcode {
- T3_RDMA_WRITE, /* IETF RDMAP v1.0 ... */
- T3_READ_REQ,
- T3_READ_RESP,
- T3_SEND,
- T3_SEND_WITH_INV,
- T3_SEND_WITH_SE,
- T3_SEND_WITH_SE_INV,
- T3_TERMINATE,
- T3_RDMA_INIT, /* CHELSIO RI specific ... */
- T3_BIND_MW,
- T3_FAST_REGISTER,
- T3_LOCAL_INV,
- T3_QP_MOD,
- T3_BYPASS,
- T3_RDMA_READ_REQ_WITH_INV,
-} __packed;
-
-static inline enum t3_rdma_opcode wr2opcode(enum t3_wr_opcode wrop)
-{
- switch (wrop) {
- case T3_WR_BP: return T3_BYPASS;
- case T3_WR_SEND: return T3_SEND;
- case T3_WR_WRITE: return T3_RDMA_WRITE;
- case T3_WR_READ: return T3_READ_REQ;
- case T3_WR_INV_STAG: return T3_LOCAL_INV;
- case T3_WR_BIND: return T3_BIND_MW;
- case T3_WR_INIT: return T3_RDMA_INIT;
- case T3_WR_QP_MOD: return T3_QP_MOD;
- case T3_WR_FASTREG: return T3_FAST_REGISTER;
- default: break;
- }
- return -1;
-}
-
-
-/* Work request id */
-union t3_wrid {
- struct {
- u32 hi;
- u32 low;
- } id0;
- u64 id1;
-};
-
-#define WRID(wrid) (wrid.id1)
-#define WRID_GEN(wrid) (wrid.id0.wr_gen)
-#define WRID_IDX(wrid) (wrid.id0.wr_idx)
-#define WRID_LO(wrid) (wrid.id0.wr_lo)
-
-struct fw_riwrh {
- __be32 op_seop_flags;
- __be32 gen_tid_len;
-};
-
-#define S_FW_RIWR_OP 24
-#define M_FW_RIWR_OP 0xff
-#define V_FW_RIWR_OP(x) ((x) << S_FW_RIWR_OP)
-#define G_FW_RIWR_OP(x) ((((x) >> S_FW_RIWR_OP)) & M_FW_RIWR_OP)
-
-#define S_FW_RIWR_SOPEOP 22
-#define M_FW_RIWR_SOPEOP 0x3
-#define V_FW_RIWR_SOPEOP(x) ((x) << S_FW_RIWR_SOPEOP)
-
-#define S_FW_RIWR_FLAGS 8
-#define M_FW_RIWR_FLAGS 0x3fffff
-#define V_FW_RIWR_FLAGS(x) ((x) << S_FW_RIWR_FLAGS)
-#define G_FW_RIWR_FLAGS(x) ((((x) >> S_FW_RIWR_FLAGS)) & M_FW_RIWR_FLAGS)
-
-#define S_FW_RIWR_TID 8
-#define V_FW_RIWR_TID(x) ((x) << S_FW_RIWR_TID)
-
-#define S_FW_RIWR_LEN 0
-#define V_FW_RIWR_LEN(x) ((x) << S_FW_RIWR_LEN)
-
-#define S_FW_RIWR_GEN 31
-#define V_FW_RIWR_GEN(x) ((x) << S_FW_RIWR_GEN)
-
-struct t3_sge {
- __be32 stag;
- __be32 len;
- __be64 to;
-};
-
-/* If num_sgle is zero, flit 5+ contains immediate data.*/
-struct t3_send_wr {
- struct fw_riwrh wrh; /* 0 */
- union t3_wrid wrid; /* 1 */
-
- u8 rdmaop; /* 2 */
- u8 reserved[3];
- __be32 rem_stag;
- __be32 plen; /* 3 */
- __be32 num_sgle;
- struct t3_sge sgl[T3_MAX_SGE]; /* 4+ */
-};
-
-#define T3_MAX_FASTREG_DEPTH 10
-#define T3_MAX_FASTREG_FRAG 10
-
-struct t3_fastreg_wr {
- struct fw_riwrh wrh; /* 0 */
- union t3_wrid wrid; /* 1 */
- __be32 stag; /* 2 */
- __be32 len;
- __be32 va_base_hi; /* 3 */
- __be32 va_base_lo_fbo;
- __be32 page_type_perms; /* 4 */
- __be32 reserved1;
- __be64 pbl_addrs[0]; /* 5+ */
-};
-
-/*
- * If a fastreg wr spans multiple wqes, then the 2nd fragment look like this.
- */
-struct t3_pbl_frag {
- struct fw_riwrh wrh; /* 0 */
- __be64 pbl_addrs[14]; /* 1..14 */
-};
-
-#define S_FR_PAGE_COUNT 24
-#define M_FR_PAGE_COUNT 0xff
-#define V_FR_PAGE_COUNT(x) ((x) << S_FR_PAGE_COUNT)
-#define G_FR_PAGE_COUNT(x) ((((x) >> S_FR_PAGE_COUNT)) & M_FR_PAGE_COUNT)
-
-#define S_FR_PAGE_SIZE 16
-#define M_FR_PAGE_SIZE 0x1f
-#define V_FR_PAGE_SIZE(x) ((x) << S_FR_PAGE_SIZE)
-#define G_FR_PAGE_SIZE(x) ((((x) >> S_FR_PAGE_SIZE)) & M_FR_PAGE_SIZE)
-
-#define S_FR_TYPE 8
-#define M_FR_TYPE 0x1
-#define V_FR_TYPE(x) ((x) << S_FR_TYPE)
-#define G_FR_TYPE(x) ((((x) >> S_FR_TYPE)) & M_FR_TYPE)
-
-#define S_FR_PERMS 0
-#define M_FR_PERMS 0xff
-#define V_FR_PERMS(x) ((x) << S_FR_PERMS)
-#define G_FR_PERMS(x) ((((x) >> S_FR_PERMS)) & M_FR_PERMS)
-
-struct t3_local_inv_wr {
- struct fw_riwrh wrh; /* 0 */
- union t3_wrid wrid; /* 1 */
- __be32 stag; /* 2 */
- __be32 reserved;
-};
-
-struct t3_rdma_write_wr {
- struct fw_riwrh wrh; /* 0 */
- union t3_wrid wrid; /* 1 */
- u8 rdmaop; /* 2 */
- u8 reserved[3];
- __be32 stag_sink;
- __be64 to_sink; /* 3 */
- __be32 plen; /* 4 */
- __be32 num_sgle;
- struct t3_sge sgl[T3_MAX_SGE]; /* 5+ */
-};
-
-struct t3_rdma_read_wr {
- struct fw_riwrh wrh; /* 0 */
- union t3_wrid wrid; /* 1 */
- u8 rdmaop; /* 2 */
- u8 local_inv;
- u8 reserved[2];
- __be32 rem_stag;
- __be64 rem_to; /* 3 */
- __be32 local_stag; /* 4 */
- __be32 local_len;
- __be64 local_to; /* 5 */
-};
-
-struct t3_bind_mw_wr {
- struct fw_riwrh wrh; /* 0 */
- union t3_wrid wrid; /* 1 */
- u16 reserved; /* 2 */
- u8 type;
- u8 perms;
- __be32 mr_stag;
- __be32 mw_stag; /* 3 */
- __be32 mw_len;
- __be64 mw_va; /* 4 */
- __be32 mr_pbl_addr; /* 5 */
- u8 reserved2[3];
- u8 mr_pagesz;
-};
-
-struct t3_receive_wr {
- struct fw_riwrh wrh; /* 0 */
- union t3_wrid wrid; /* 1 */
- u8 pagesz[T3_MAX_SGE];
- __be32 num_sgle; /* 2 */
- struct t3_sge sgl[T3_MAX_SGE]; /* 3+ */
- __be32 pbl_addr[T3_MAX_SGE];
-};
-
-struct t3_bypass_wr {
- struct fw_riwrh wrh;
- union t3_wrid wrid; /* 1 */
-};
-
-struct t3_modify_qp_wr {
- struct fw_riwrh wrh; /* 0 */
- union t3_wrid wrid; /* 1 */
- __be32 flags; /* 2 */
- __be32 quiesce; /* 2 */
- __be32 max_ird; /* 3 */
- __be32 max_ord; /* 3 */
- __be64 sge_cmd; /* 4 */
- __be64 ctx1; /* 5 */
- __be64 ctx0; /* 6 */
-};
-
-enum t3_modify_qp_flags {
- MODQP_QUIESCE = 0x01,
- MODQP_MAX_IRD = 0x02,
- MODQP_MAX_ORD = 0x04,
- MODQP_WRITE_EC = 0x08,
- MODQP_READ_EC = 0x10,
-};
-
-
-enum t3_mpa_attrs {
- uP_RI_MPA_RX_MARKER_ENABLE = 0x1,
- uP_RI_MPA_TX_MARKER_ENABLE = 0x2,
- uP_RI_MPA_CRC_ENABLE = 0x4,
- uP_RI_MPA_IETF_ENABLE = 0x8
-} __packed;
-
-enum t3_qp_caps {
- uP_RI_QP_RDMA_READ_ENABLE = 0x01,
- uP_RI_QP_RDMA_WRITE_ENABLE = 0x02,
- uP_RI_QP_BIND_ENABLE = 0x04,
- uP_RI_QP_FAST_REGISTER_ENABLE = 0x08,
- uP_RI_QP_STAG0_ENABLE = 0x10
-} __packed;
-
-enum rdma_init_rtr_types {
- RTR_READ = 1,
- RTR_WRITE = 2,
- RTR_SEND = 3,
-};
-
-#define S_RTR_TYPE 2
-#define M_RTR_TYPE 0x3
-#define V_RTR_TYPE(x) ((x) << S_RTR_TYPE)
-#define G_RTR_TYPE(x) ((((x) >> S_RTR_TYPE)) & M_RTR_TYPE)
-
-#define S_CHAN 4
-#define M_CHAN 0x3
-#define V_CHAN(x) ((x) << S_CHAN)
-#define G_CHAN(x) ((((x) >> S_CHAN)) & M_CHAN)
-
-struct t3_rdma_init_attr {
- u32 tid;
- u32 qpid;
- u32 pdid;
- u32 scqid;
- u32 rcqid;
- u32 rq_addr;
- u32 rq_size;
- enum t3_mpa_attrs mpaattrs;
- enum t3_qp_caps qpcaps;
- u16 tcp_emss;
- u32 ord;
- u32 ird;
- u64 qp_dma_addr;
- u32 qp_dma_size;
- enum rdma_init_rtr_types rtr_type;
- u16 flags;
- u16 rqe_count;
- u32 irs;
- u32 chan;
-};
-
-struct t3_rdma_init_wr {
- struct fw_riwrh wrh; /* 0 */
- union t3_wrid wrid; /* 1 */
- __be32 qpid; /* 2 */
- __be32 pdid;
- __be32 scqid; /* 3 */
- __be32 rcqid;
- __be32 rq_addr; /* 4 */
- __be32 rq_size;
- u8 mpaattrs; /* 5 */
- u8 qpcaps;
- __be16 ulpdu_size;
- __be16 flags_rtr_type;
- __be16 rqe_count;
- __be32 ord; /* 6 */
- __be32 ird;
- __be64 qp_dma_addr; /* 7 */
- __be32 qp_dma_size; /* 8 */
- __be32 irs;
-};
-
-struct t3_genbit {
- u64 flit[15];
- __be64 genbit;
-};
-
-struct t3_wq_in_err {
- u64 flit[13];
- u64 err;
-};
-
-enum rdma_init_wr_flags {
- MPA_INITIATOR = (1<<0),
- PRIV_QP = (1<<1),
-};
-
-union t3_wr {
- struct t3_send_wr send;
- struct t3_rdma_write_wr write;
- struct t3_rdma_read_wr read;
- struct t3_receive_wr recv;
- struct t3_fastreg_wr fastreg;
- struct t3_pbl_frag pbl_frag;
- struct t3_local_inv_wr local_inv;
- struct t3_bind_mw_wr bind;
- struct t3_bypass_wr bypass;
- struct t3_rdma_init_wr init;
- struct t3_modify_qp_wr qp_mod;
- struct t3_genbit genbit;
- struct t3_wq_in_err wq_in_err;
- __be64 flit[16];
-};
-
-#define T3_SQ_CQE_FLIT 13
-#define T3_SQ_COOKIE_FLIT 14
-
-#define T3_RQ_COOKIE_FLIT 13
-#define T3_RQ_CQE_FLIT 14
-
-static inline enum t3_wr_opcode fw_riwrh_opcode(struct fw_riwrh *wqe)
-{
- return G_FW_RIWR_OP(be32_to_cpu(wqe->op_seop_flags));
-}
-
-enum t3_wr_hdr_bits {
- T3_EOP = 1,
- T3_SOP = 2,
- T3_SOPEOP = T3_EOP|T3_SOP,
-};
-
-static inline void build_fw_riwrh(struct fw_riwrh *wqe, enum t3_wr_opcode op,
- enum t3_wr_flags flags, u8 genbit, u32 tid,
- u8 len, u8 sopeop)
-{
- wqe->op_seop_flags = cpu_to_be32(V_FW_RIWR_OP(op) |
- V_FW_RIWR_SOPEOP(sopeop) |
- V_FW_RIWR_FLAGS(flags));
- wmb();
- wqe->gen_tid_len = cpu_to_be32(V_FW_RIWR_GEN(genbit) |
- V_FW_RIWR_TID(tid) |
- V_FW_RIWR_LEN(len));
- /* 2nd gen bit... */
- ((union t3_wr *)wqe)->genbit.genbit = cpu_to_be64(genbit);
-}
-
-/*
- * T3 ULP2_TX commands
- */
-enum t3_utx_mem_op {
- T3_UTX_MEM_READ = 2,
- T3_UTX_MEM_WRITE = 3
-};
-
-/* T3 MC7 RDMA TPT entry format */
-
-enum tpt_mem_type {
- TPT_NON_SHARED_MR = 0x0,
- TPT_SHARED_MR = 0x1,
- TPT_MW = 0x2,
- TPT_MW_RELAXED_PROTECTION = 0x3
-};
-
-enum tpt_addr_type {
- TPT_ZBTO = 0,
- TPT_VATO = 1
-};
-
-enum tpt_mem_perm {
- TPT_MW_BIND = 0x10,
- TPT_LOCAL_READ = 0x8,
- TPT_LOCAL_WRITE = 0x4,
- TPT_REMOTE_READ = 0x2,
- TPT_REMOTE_WRITE = 0x1
-};
-
-struct tpt_entry {
- __be32 valid_stag_pdid;
- __be32 flags_pagesize_qpid;
-
- __be32 rsvd_pbl_addr;
- __be32 len;
- __be32 va_hi;
- __be32 va_low_or_fbo;
-
- __be32 rsvd_bind_cnt_or_pstag;
- __be32 rsvd_pbl_size;
-};
-
-#define S_TPT_VALID 31
-#define V_TPT_VALID(x) ((x) << S_TPT_VALID)
-#define F_TPT_VALID V_TPT_VALID(1U)
-
-#define S_TPT_STAG_KEY 23
-#define M_TPT_STAG_KEY 0xFF
-#define V_TPT_STAG_KEY(x) ((x) << S_TPT_STAG_KEY)
-#define G_TPT_STAG_KEY(x) (((x) >> S_TPT_STAG_KEY) & M_TPT_STAG_KEY)
-
-#define S_TPT_STAG_STATE 22
-#define V_TPT_STAG_STATE(x) ((x) << S_TPT_STAG_STATE)
-#define F_TPT_STAG_STATE V_TPT_STAG_STATE(1U)
-
-#define S_TPT_STAG_TYPE 20
-#define M_TPT_STAG_TYPE 0x3
-#define V_TPT_STAG_TYPE(x) ((x) << S_TPT_STAG_TYPE)
-#define G_TPT_STAG_TYPE(x) (((x) >> S_TPT_STAG_TYPE) & M_TPT_STAG_TYPE)
-
-#define S_TPT_PDID 0
-#define M_TPT_PDID 0xFFFFF
-#define V_TPT_PDID(x) ((x) << S_TPT_PDID)
-#define G_TPT_PDID(x) (((x) >> S_TPT_PDID) & M_TPT_PDID)
-
-#define S_TPT_PERM 28
-#define M_TPT_PERM 0xF
-#define V_TPT_PERM(x) ((x) << S_TPT_PERM)
-#define G_TPT_PERM(x) (((x) >> S_TPT_PERM) & M_TPT_PERM)
-
-#define S_TPT_REM_INV_DIS 27
-#define V_TPT_REM_INV_DIS(x) ((x) << S_TPT_REM_INV_DIS)
-#define F_TPT_REM_INV_DIS V_TPT_REM_INV_DIS(1U)
-
-#define S_TPT_ADDR_TYPE 26
-#define V_TPT_ADDR_TYPE(x) ((x) << S_TPT_ADDR_TYPE)
-#define F_TPT_ADDR_TYPE V_TPT_ADDR_TYPE(1U)
-
-#define S_TPT_MW_BIND_ENABLE 25
-#define V_TPT_MW_BIND_ENABLE(x) ((x) << S_TPT_MW_BIND_ENABLE)
-#define F_TPT_MW_BIND_ENABLE V_TPT_MW_BIND_ENABLE(1U)
-
-#define S_TPT_PAGE_SIZE 20
-#define M_TPT_PAGE_SIZE 0x1F
-#define V_TPT_PAGE_SIZE(x) ((x) << S_TPT_PAGE_SIZE)
-#define G_TPT_PAGE_SIZE(x) (((x) >> S_TPT_PAGE_SIZE) & M_TPT_PAGE_SIZE)
-
-#define S_TPT_PBL_ADDR 0
-#define M_TPT_PBL_ADDR 0x1FFFFFFF
-#define V_TPT_PBL_ADDR(x) ((x) << S_TPT_PBL_ADDR)
-#define G_TPT_PBL_ADDR(x) (((x) >> S_TPT_PBL_ADDR) & M_TPT_PBL_ADDR)
-
-#define S_TPT_QPID 0
-#define M_TPT_QPID 0xFFFFF
-#define V_TPT_QPID(x) ((x) << S_TPT_QPID)
-#define G_TPT_QPID(x) (((x) >> S_TPT_QPID) & M_TPT_QPID)
-
-#define S_TPT_PSTAG 0
-#define M_TPT_PSTAG 0xFFFFFF
-#define V_TPT_PSTAG(x) ((x) << S_TPT_PSTAG)
-#define G_TPT_PSTAG(x) (((x) >> S_TPT_PSTAG) & M_TPT_PSTAG)
-
-#define S_TPT_PBL_SIZE 0
-#define M_TPT_PBL_SIZE 0xFFFFF
-#define V_TPT_PBL_SIZE(x) ((x) << S_TPT_PBL_SIZE)
-#define G_TPT_PBL_SIZE(x) (((x) >> S_TPT_PBL_SIZE) & M_TPT_PBL_SIZE)
-
-/*
- * CQE defs
- */
-struct t3_cqe {
- __be32 header;
- __be32 len;
- union {
- struct {
- __be32 stag;
- __be32 msn;
- } rcqe;
- struct {
- u32 wrid_hi;
- u32 wrid_low;
- } scqe;
- } u;
-};
-
-#define S_CQE_OOO 31
-#define M_CQE_OOO 0x1
-#define G_CQE_OOO(x) ((((x) >> S_CQE_OOO)) & M_CQE_OOO)
-#define V_CEQ_OOO(x) ((x)<<S_CQE_OOO)
-
-#define S_CQE_QPID 12
-#define M_CQE_QPID 0x7FFFF
-#define G_CQE_QPID(x) ((((x) >> S_CQE_QPID)) & M_CQE_QPID)
-#define V_CQE_QPID(x) ((x)<<S_CQE_QPID)
-
-#define S_CQE_SWCQE 11
-#define M_CQE_SWCQE 0x1
-#define G_CQE_SWCQE(x) ((((x) >> S_CQE_SWCQE)) & M_CQE_SWCQE)
-#define V_CQE_SWCQE(x) ((x)<<S_CQE_SWCQE)
-
-#define S_CQE_GENBIT 10
-#define M_CQE_GENBIT 0x1
-#define G_CQE_GENBIT(x) (((x) >> S_CQE_GENBIT) & M_CQE_GENBIT)
-#define V_CQE_GENBIT(x) ((x)<<S_CQE_GENBIT)
-
-#define S_CQE_STATUS 5
-#define M_CQE_STATUS 0x1F
-#define G_CQE_STATUS(x) ((((x) >> S_CQE_STATUS)) & M_CQE_STATUS)
-#define V_CQE_STATUS(x) ((x)<<S_CQE_STATUS)
-
-#define S_CQE_TYPE 4
-#define M_CQE_TYPE 0x1
-#define G_CQE_TYPE(x) ((((x) >> S_CQE_TYPE)) & M_CQE_TYPE)
-#define V_CQE_TYPE(x) ((x)<<S_CQE_TYPE)
-
-#define S_CQE_OPCODE 0
-#define M_CQE_OPCODE 0xF
-#define G_CQE_OPCODE(x) ((((x) >> S_CQE_OPCODE)) & M_CQE_OPCODE)
-#define V_CQE_OPCODE(x) ((x)<<S_CQE_OPCODE)
-
-#define SW_CQE(x) (G_CQE_SWCQE(be32_to_cpu((x).header)))
-#define CQE_OOO(x) (G_CQE_OOO(be32_to_cpu((x).header)))
-#define CQE_QPID(x) (G_CQE_QPID(be32_to_cpu((x).header)))
-#define CQE_GENBIT(x) (G_CQE_GENBIT(be32_to_cpu((x).header)))
-#define CQE_TYPE(x) (G_CQE_TYPE(be32_to_cpu((x).header)))
-#define SQ_TYPE(x) (CQE_TYPE((x)))
-#define RQ_TYPE(x) (!CQE_TYPE((x)))
-#define CQE_STATUS(x) (G_CQE_STATUS(be32_to_cpu((x).header)))
-#define CQE_OPCODE(x) (G_CQE_OPCODE(be32_to_cpu((x).header)))
-
-#define CQE_SEND_OPCODE(x)( \
- (G_CQE_OPCODE(be32_to_cpu((x).header)) == T3_SEND) || \
- (G_CQE_OPCODE(be32_to_cpu((x).header)) == T3_SEND_WITH_SE) || \
- (G_CQE_OPCODE(be32_to_cpu((x).header)) == T3_SEND_WITH_INV) || \
- (G_CQE_OPCODE(be32_to_cpu((x).header)) == T3_SEND_WITH_SE_INV))
-
-#define CQE_LEN(x) (be32_to_cpu((x).len))
-
-/* used for RQ completion processing */
-#define CQE_WRID_STAG(x) (be32_to_cpu((x).u.rcqe.stag))
-#define CQE_WRID_MSN(x) (be32_to_cpu((x).u.rcqe.msn))
-
-/* used for SQ completion processing */
-#define CQE_WRID_SQ_WPTR(x) ((x).u.scqe.wrid_hi)
-#define CQE_WRID_WPTR(x) ((x).u.scqe.wrid_low)
-
-/* generic accessor macros */
-#define CQE_WRID_HI(x) ((x).u.scqe.wrid_hi)
-#define CQE_WRID_LOW(x) ((x).u.scqe.wrid_low)
-
-#define TPT_ERR_SUCCESS 0x0
-#define TPT_ERR_STAG 0x1 /* STAG invalid: either the */
- /* STAG is offlimt, being 0, */
- /* or STAG_key mismatch */
-#define TPT_ERR_PDID 0x2 /* PDID mismatch */
-#define TPT_ERR_QPID 0x3 /* QPID mismatch */
-#define TPT_ERR_ACCESS 0x4 /* Invalid access right */
-#define TPT_ERR_WRAP 0x5 /* Wrap error */
-#define TPT_ERR_BOUND 0x6 /* base and bounds voilation */
-#define TPT_ERR_INVALIDATE_SHARED_MR 0x7 /* attempt to invalidate a */
- /* shared memory region */
-#define TPT_ERR_INVALIDATE_MR_WITH_MW_BOUND 0x8 /* attempt to invalidate a */
- /* shared memory region */
-#define TPT_ERR_ECC 0x9 /* ECC error detected */
-#define TPT_ERR_ECC_PSTAG 0xA /* ECC error detected when */
- /* reading PSTAG for a MW */
- /* Invalidate */
-#define TPT_ERR_PBL_ADDR_BOUND 0xB /* pbl addr out of bounds: */
- /* software error */
-#define TPT_ERR_SWFLUSH 0xC /* SW FLUSHED */
-#define TPT_ERR_CRC 0x10 /* CRC error */
-#define TPT_ERR_MARKER 0x11 /* Marker error */
-#define TPT_ERR_PDU_LEN_ERR 0x12 /* invalid PDU length */
-#define TPT_ERR_OUT_OF_RQE 0x13 /* out of RQE */
-#define TPT_ERR_DDP_VERSION 0x14 /* wrong DDP version */
-#define TPT_ERR_RDMA_VERSION 0x15 /* wrong RDMA version */
-#define TPT_ERR_OPCODE 0x16 /* invalid rdma opcode */
-#define TPT_ERR_DDP_QUEUE_NUM 0x17 /* invalid ddp queue number */
-#define TPT_ERR_MSN 0x18 /* MSN error */
-#define TPT_ERR_TBIT 0x19 /* tag bit not set correctly */
-#define TPT_ERR_MO 0x1A /* MO not 0 for TERMINATE */
- /* or READ_REQ */
-#define TPT_ERR_MSN_GAP 0x1B
-#define TPT_ERR_MSN_RANGE 0x1C
-#define TPT_ERR_IRD_OVERFLOW 0x1D
-#define TPT_ERR_RQE_ADDR_BOUND 0x1E /* RQE addr out of bounds: */
- /* software error */
-#define TPT_ERR_INTERNAL_ERR 0x1F /* internal error (opcode */
- /* mismatch) */
-
-struct t3_swsq {
- __u64 wr_id;
- struct t3_cqe cqe;
- __u32 sq_wptr;
- __be32 read_len;
- int opcode;
- int complete;
- int signaled;
-};
-
-struct t3_swrq {
- __u64 wr_id;
- __u32 pbl_addr;
-};
-
-/*
- * A T3 WQ implements both the SQ and RQ.
- */
-struct t3_wq {
- union t3_wr *queue; /* DMA accessible memory */
- dma_addr_t dma_addr; /* DMA address for HW */
- DEFINE_DMA_UNMAP_ADDR(mapping); /* unmap kruft */
- u32 error; /* 1 once we go to ERROR */
- u32 qpid;
- u32 wptr; /* idx to next available WR slot */
- u32 size_log2; /* total wq size */
- struct t3_swsq *sq; /* SW SQ */
- struct t3_swsq *oldest_read; /* tracks oldest pending read */
- u32 sq_wptr; /* sq_wptr - sq_rptr == count of */
- u32 sq_rptr; /* pending wrs */
- u32 sq_size_log2; /* sq size */
- struct t3_swrq *rq; /* SW RQ (holds consumer wr_ids */
- u32 rq_wptr; /* rq_wptr - rq_rptr == count of */
- u32 rq_rptr; /* pending wrs */
- struct t3_swrq *rq_oldest_wr; /* oldest wr on the SW RQ */
- u32 rq_size_log2; /* rq size */
- u32 rq_addr; /* rq adapter address */
- void __iomem *doorbell; /* kernel db */
- u64 udb; /* user db if any */
- struct cxio_rdev *rdev;
-};
-
-struct t3_cq {
- u32 cqid;
- u32 rptr;
- u32 wptr;
- u32 size_log2;
- dma_addr_t dma_addr;
- DEFINE_DMA_UNMAP_ADDR(mapping);
- struct t3_cqe *queue;
- struct t3_cqe *sw_queue;
- u32 sw_rptr;
- u32 sw_wptr;
-};
-
-#define CQ_VLD_ENTRY(ptr,size_log2,cqe) (Q_GENBIT(ptr,size_log2) == \
- CQE_GENBIT(*cqe))
-
-struct t3_cq_status_page {
- u32 cq_err;
-};
-
-static inline int cxio_cq_in_error(struct t3_cq *cq)
-{
- return ((struct t3_cq_status_page *)
- &cq->queue[1 << cq->size_log2])->cq_err;
-}
-
-static inline void cxio_set_cq_in_error(struct t3_cq *cq)
-{
- ((struct t3_cq_status_page *)
- &cq->queue[1 << cq->size_log2])->cq_err = 1;
-}
-
-static inline void cxio_set_wq_in_error(struct t3_wq *wq)
-{
- wq->queue->wq_in_err.err |= 1;
-}
-
-static inline void cxio_disable_wq_db(struct t3_wq *wq)
-{
- wq->queue->wq_in_err.err |= 2;
-}
-
-static inline void cxio_enable_wq_db(struct t3_wq *wq)
-{
- wq->queue->wq_in_err.err &= ~2;
-}
-
-static inline int cxio_wq_db_enabled(struct t3_wq *wq)
-{
- return !(wq->queue->wq_in_err.err & 2);
-}
-
-static inline struct t3_cqe *cxio_next_hw_cqe(struct t3_cq *cq)
-{
- struct t3_cqe *cqe;
-
- cqe = cq->queue + (Q_PTR2IDX(cq->rptr, cq->size_log2));
- if (CQ_VLD_ENTRY(cq->rptr, cq->size_log2, cqe))
- return cqe;
- return NULL;
-}
-
-static inline struct t3_cqe *cxio_next_sw_cqe(struct t3_cq *cq)
-{
- struct t3_cqe *cqe;
-
- if (!Q_EMPTY(cq->sw_rptr, cq->sw_wptr)) {
- cqe = cq->sw_queue + (Q_PTR2IDX(cq->sw_rptr, cq->size_log2));
- return cqe;
- }
- return NULL;
-}
-
-static inline struct t3_cqe *cxio_next_cqe(struct t3_cq *cq)
-{
- struct t3_cqe *cqe;
-
- if (!Q_EMPTY(cq->sw_rptr, cq->sw_wptr)) {
- cqe = cq->sw_queue + (Q_PTR2IDX(cq->sw_rptr, cq->size_log2));
- return cqe;
- }
- cqe = cq->queue + (Q_PTR2IDX(cq->rptr, cq->size_log2));
- if (CQ_VLD_ENTRY(cq->rptr, cq->size_log2, cqe))
- return cqe;
- return NULL;
-}
-
-#endif
diff --git a/drivers/infiniband/hw/cxgb3/iwch.c b/drivers/infiniband/hw/cxgb3/iwch.c
deleted file mode 100644
index 56a8ab6210cf..000000000000
--- a/drivers/infiniband/hw/cxgb3/iwch.c
+++ /dev/null
@@ -1,282 +0,0 @@
-/*
- * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-
-#include <rdma/ib_verbs.h>
-
-#include "cxgb3_offload.h"
-#include "iwch_provider.h"
-#include <rdma/cxgb3-abi.h>
-#include "iwch.h"
-#include "iwch_cm.h"
-
-#define DRV_VERSION "1.1"
-
-MODULE_AUTHOR("Boyd Faulkner, Steve Wise");
-MODULE_DESCRIPTION("Chelsio T3 RDMA Driver");
-MODULE_LICENSE("Dual BSD/GPL");
-
-static void open_rnic_dev(struct t3cdev *);
-static void close_rnic_dev(struct t3cdev *);
-static void iwch_event_handler(struct t3cdev *, u32, u32);
-
-struct cxgb3_client t3c_client = {
- .name = "iw_cxgb3",
- .add = open_rnic_dev,
- .remove = close_rnic_dev,
- .handlers = t3c_handlers,
- .redirect = iwch_ep_redirect,
- .event_handler = iwch_event_handler
-};
-
-static LIST_HEAD(dev_list);
-static DEFINE_MUTEX(dev_mutex);
-
-static void disable_dbs(struct iwch_dev *rnicp)
-{
- unsigned long index;
- struct iwch_qp *qhp;
-
- xa_lock_irq(&rnicp->qps);
- xa_for_each(&rnicp->qps, index, qhp)
- cxio_disable_wq_db(&qhp->wq);
- xa_unlock_irq(&rnicp->qps);
-}
-
-static void enable_dbs(struct iwch_dev *rnicp, int ring_db)
-{
- unsigned long index;
- struct iwch_qp *qhp;
-
- xa_lock_irq(&rnicp->qps);
- xa_for_each(&rnicp->qps, index, qhp) {
- if (ring_db)
- ring_doorbell(qhp->rhp->rdev.ctrl_qp.doorbell,
- qhp->wq.qpid);
- cxio_enable_wq_db(&qhp->wq);
- }
- xa_unlock_irq(&rnicp->qps);
-}
-
-static void iwch_db_drop_task(struct work_struct *work)
-{
- struct iwch_dev *rnicp = container_of(work, struct iwch_dev,
- db_drop_task.work);
- enable_dbs(rnicp, 1);
-}
-
-static void rnic_init(struct iwch_dev *rnicp)
-{
- pr_debug("%s iwch_dev %p\n", __func__, rnicp);
- xa_init_flags(&rnicp->cqs, XA_FLAGS_LOCK_IRQ);
- xa_init_flags(&rnicp->qps, XA_FLAGS_LOCK_IRQ);
- xa_init_flags(&rnicp->mrs, XA_FLAGS_LOCK_IRQ);
- INIT_DELAYED_WORK(&rnicp->db_drop_task, iwch_db_drop_task);
-
- rnicp->attr.max_qps = T3_MAX_NUM_QP - 32;
- rnicp->attr.max_wrs = T3_MAX_QP_DEPTH;
- rnicp->attr.max_sge_per_wr = T3_MAX_SGE;
- rnicp->attr.max_sge_per_rdma_write_wr = T3_MAX_SGE;
- rnicp->attr.max_cqs = T3_MAX_NUM_CQ - 1;
- rnicp->attr.max_cqes_per_cq = T3_MAX_CQ_DEPTH;
- rnicp->attr.max_mem_regs = cxio_num_stags(&rnicp->rdev);
- rnicp->attr.max_phys_buf_entries = T3_MAX_PBL_SIZE;
- rnicp->attr.max_pds = T3_MAX_NUM_PD - 1;
- rnicp->attr.mem_pgsizes_bitmask = T3_PAGESIZE_MASK;
- rnicp->attr.max_mr_size = T3_MAX_MR_SIZE;
- rnicp->attr.can_resize_wq = 0;
- rnicp->attr.max_rdma_reads_per_qp = 8;
- rnicp->attr.max_rdma_read_resources =
- rnicp->attr.max_rdma_reads_per_qp * rnicp->attr.max_qps;
- rnicp->attr.max_rdma_read_qp_depth = 8; /* IRD */
- rnicp->attr.max_rdma_read_depth =
- rnicp->attr.max_rdma_read_qp_depth * rnicp->attr.max_qps;
- rnicp->attr.rq_overflow_handled = 0;
- rnicp->attr.can_modify_ird = 0;
- rnicp->attr.can_modify_ord = 0;
- rnicp->attr.max_mem_windows = rnicp->attr.max_mem_regs - 1;
- rnicp->attr.stag0_value = 1;
- rnicp->attr.zbva_support = 1;
- rnicp->attr.local_invalidate_fence = 1;
- rnicp->attr.cq_overflow_detection = 1;
- return;
-}
-
-static void open_rnic_dev(struct t3cdev *tdev)
-{
- struct iwch_dev *rnicp;
-
- pr_debug("%s t3cdev %p\n", __func__, tdev);
- pr_info_once("Chelsio T3 RDMA Driver - version %s\n", DRV_VERSION);
- rnicp = ib_alloc_device(iwch_dev, ibdev);
- if (!rnicp) {
- pr_err("Cannot allocate ib device\n");
- return;
- }
- rnicp->rdev.ulp = rnicp;
- rnicp->rdev.t3cdev_p = tdev;
-
- mutex_lock(&dev_mutex);
-
- if (cxio_rdev_open(&rnicp->rdev)) {
- mutex_unlock(&dev_mutex);
- pr_err("Unable to open CXIO rdev\n");
- ib_dealloc_device(&rnicp->ibdev);
- return;
- }
-
- rnic_init(rnicp);
-
- list_add_tail(&rnicp->entry, &dev_list);
- mutex_unlock(&dev_mutex);
-
- if (iwch_register_device(rnicp)) {
- pr_err("Unable to register device\n");
- close_rnic_dev(tdev);
- }
- pr_info("Initialized device %s\n",
- pci_name(rnicp->rdev.rnic_info.pdev));
- return;
-}
-
-static void close_rnic_dev(struct t3cdev *tdev)
-{
- struct iwch_dev *dev, *tmp;
- pr_debug("%s t3cdev %p\n", __func__, tdev);
- mutex_lock(&dev_mutex);
- list_for_each_entry_safe(dev, tmp, &dev_list, entry) {
- if (dev->rdev.t3cdev_p == tdev) {
- dev->rdev.flags = CXIO_ERROR_FATAL;
- synchronize_net();
- cancel_delayed_work_sync(&dev->db_drop_task);
- list_del(&dev->entry);
- iwch_unregister_device(dev);
- cxio_rdev_close(&dev->rdev);
- WARN_ON(!xa_empty(&dev->cqs));
- WARN_ON(!xa_empty(&dev->qps));
- WARN_ON(!xa_empty(&dev->mrs));
- ib_dealloc_device(&dev->ibdev);
- break;
- }
- }
- mutex_unlock(&dev_mutex);
-}
-
-static void iwch_event_handler(struct t3cdev *tdev, u32 evt, u32 port_id)
-{
- struct cxio_rdev *rdev = tdev->ulp;
- struct iwch_dev *rnicp;
- struct ib_event event;
- u32 portnum = port_id + 1;
- int dispatch = 0;
-
- if (!rdev)
- return;
- rnicp = rdev_to_iwch_dev(rdev);
- switch (evt) {
- case OFFLOAD_STATUS_DOWN: {
- rdev->flags = CXIO_ERROR_FATAL;
- synchronize_net();
- event.event = IB_EVENT_DEVICE_FATAL;
- dispatch = 1;
- break;
- }
- case OFFLOAD_PORT_DOWN: {
- event.event = IB_EVENT_PORT_ERR;
- dispatch = 1;
- break;
- }
- case OFFLOAD_PORT_UP: {
- event.event = IB_EVENT_PORT_ACTIVE;
- dispatch = 1;
- break;
- }
- case OFFLOAD_DB_FULL: {
- disable_dbs(rnicp);
- break;
- }
- case OFFLOAD_DB_EMPTY: {
- enable_dbs(rnicp, 1);
- break;
- }
- case OFFLOAD_DB_DROP: {
- unsigned long delay = 1000;
- unsigned short r;
-
- disable_dbs(rnicp);
- get_random_bytes(&r, 2);
- delay += r & 1023;
-
- /*
- * delay is between 1000-2023 usecs.
- */
- schedule_delayed_work(&rnicp->db_drop_task,
- usecs_to_jiffies(delay));
- break;
- }
- }
-
- if (dispatch) {
- event.device = &rnicp->ibdev;
- event.element.port_num = portnum;
- ib_dispatch_event(&event);
- }
-
- return;
-}
-
-static int __init iwch_init_module(void)
-{
- int err;
-
- err = cxio_hal_init();
- if (err)
- return err;
- err = iwch_cm_init();
- if (err)
- return err;
- cxio_register_ev_cb(iwch_ev_dispatch);
- cxgb3_register_client(&t3c_client);
- return 0;
-}
-
-static void __exit iwch_exit_module(void)
-{
- cxgb3_unregister_client(&t3c_client);
- cxio_unregister_ev_cb(iwch_ev_dispatch);
- iwch_cm_term();
- cxio_hal_exit();
-}
-
-module_init(iwch_init_module);
-module_exit(iwch_exit_module);
diff --git a/drivers/infiniband/hw/cxgb3/iwch.h b/drivers/infiniband/hw/cxgb3/iwch.h
deleted file mode 100644
index 310a937bffcf..000000000000
--- a/drivers/infiniband/hw/cxgb3/iwch.h
+++ /dev/null
@@ -1,155 +0,0 @@
-/*
- * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef __IWCH_H__
-#define __IWCH_H__
-
-#include <linux/mutex.h>
-#include <linux/list.h>
-#include <linux/spinlock.h>
-#include <linux/xarray.h>
-#include <linux/workqueue.h>
-
-#include <rdma/ib_verbs.h>
-
-#include "cxio_hal.h"
-#include "cxgb3_offload.h"
-
-struct iwch_pd;
-struct iwch_cq;
-struct iwch_qp;
-struct iwch_mr;
-
-struct iwch_rnic_attributes {
- u32 max_qps;
- u32 max_wrs; /* Max for any SQ/RQ */
- u32 max_sge_per_wr;
- u32 max_sge_per_rdma_write_wr; /* for RDMA Write WR */
- u32 max_cqs;
- u32 max_cqes_per_cq;
- u32 max_mem_regs;
- u32 max_phys_buf_entries; /* for phys buf list */
- u32 max_pds;
-
- /*
- * The memory page sizes supported by this RNIC.
- * Bit position i in bitmap indicates page of
- * size (4k)^i. Phys block list mode unsupported.
- */
- u32 mem_pgsizes_bitmask;
- u64 max_mr_size;
- u8 can_resize_wq;
-
- /*
- * The maximum number of RDMA Reads that can be outstanding
- * per QP with this RNIC as the target.
- */
- u32 max_rdma_reads_per_qp;
-
- /*
- * The maximum number of resources used for RDMA Reads
- * by this RNIC with this RNIC as the target.
- */
- u32 max_rdma_read_resources;
-
- /*
- * The max depth per QP for initiation of RDMA Read
- * by this RNIC.
- */
- u32 max_rdma_read_qp_depth;
-
- /*
- * The maximum depth for initiation of RDMA Read
- * operations by this RNIC on all QPs
- */
- u32 max_rdma_read_depth;
- u8 rq_overflow_handled;
- u32 can_modify_ird;
- u32 can_modify_ord;
- u32 max_mem_windows;
- u32 stag0_value;
- u8 zbva_support;
- u8 local_invalidate_fence;
- u32 cq_overflow_detection;
-};
-
-struct iwch_dev {
- struct ib_device ibdev;
- struct cxio_rdev rdev;
- u32 device_cap_flags;
- struct iwch_rnic_attributes attr;
- struct xarray cqs;
- struct xarray qps;
- struct xarray mrs;
- struct list_head entry;
- struct delayed_work db_drop_task;
-};
-
-static inline struct iwch_dev *to_iwch_dev(struct ib_device *ibdev)
-{
- return container_of(ibdev, struct iwch_dev, ibdev);
-}
-
-static inline struct iwch_dev *rdev_to_iwch_dev(struct cxio_rdev *rdev)
-{
- return container_of(rdev, struct iwch_dev, rdev);
-}
-
-static inline int t3b_device(const struct iwch_dev *rhp)
-{
- return rhp->rdev.t3cdev_p->type == T3B;
-}
-
-static inline int t3a_device(const struct iwch_dev *rhp)
-{
- return rhp->rdev.t3cdev_p->type == T3A;
-}
-
-static inline struct iwch_cq *get_chp(struct iwch_dev *rhp, u32 cqid)
-{
- return xa_load(&rhp->cqs, cqid);
-}
-
-static inline struct iwch_qp *get_qhp(struct iwch_dev *rhp, u32 qpid)
-{
- return xa_load(&rhp->qps, qpid);
-}
-
-static inline struct iwch_mr *get_mhp(struct iwch_dev *rhp, u32 mmid)
-{
- return xa_load(&rhp->mrs, mmid);
-}
-
-extern struct cxgb3_client t3c_client;
-extern cxgb3_cpl_handler_func t3c_handlers[NUM_CPL_CMDS];
-extern void iwch_ev_dispatch(struct cxio_rdev *rdev_p, struct sk_buff *skb);
-
-#endif
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
deleted file mode 100644
index 0bca72cb4d9a..000000000000
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ /dev/null
@@ -1,2258 +0,0 @@
-/*
- * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include <linux/module.h>
-#include <linux/list.h>
-#include <linux/slab.h>
-#include <linux/workqueue.h>
-#include <linux/skbuff.h>
-#include <linux/timer.h>
-#include <linux/notifier.h>
-#include <linux/inetdevice.h>
-
-#include <net/neighbour.h>
-#include <net/netevent.h>
-#include <net/route.h>
-
-#include "tcb.h"
-#include "cxgb3_offload.h"
-#include "iwch.h"
-#include "iwch_provider.h"
-#include "iwch_cm.h"
-
-static char *states[] = {
- "idle",
- "listen",
- "connecting",
- "mpa_wait_req",
- "mpa_req_sent",
- "mpa_req_rcvd",
- "mpa_rep_sent",
- "fpdu_mode",
- "aborting",
- "closing",
- "moribund",
- "dead",
- NULL,
-};
-
-int peer2peer = 0;
-module_param(peer2peer, int, 0644);
-MODULE_PARM_DESC(peer2peer, "Support peer2peer ULPs (default=0)");
-
-static int ep_timeout_secs = 60;
-module_param(ep_timeout_secs, int, 0644);
-MODULE_PARM_DESC(ep_timeout_secs, "CM Endpoint operation timeout "
- "in seconds (default=60)");
-
-static int mpa_rev = 1;
-module_param(mpa_rev, int, 0644);
-MODULE_PARM_DESC(mpa_rev, "MPA Revision, 0 supports amso1100, "
- "1 is spec compliant. (default=1)");
-
-static int markers_enabled = 0;
-module_param(markers_enabled, int, 0644);
-MODULE_PARM_DESC(markers_enabled, "Enable MPA MARKERS (default(0)=disabled)");
-
-static int crc_enabled = 1;
-module_param(crc_enabled, int, 0644);
-MODULE_PARM_DESC(crc_enabled, "Enable MPA CRC (default(1)=enabled)");
-
-static int rcv_win = 256 * 1024;
-module_param(rcv_win, int, 0644);
-MODULE_PARM_DESC(rcv_win, "TCP receive window in bytes (default=256)");
-
-static int snd_win = 32 * 1024;
-module_param(snd_win, int, 0644);
-MODULE_PARM_DESC(snd_win, "TCP send window in bytes (default=32KB)");
-
-static unsigned int nocong = 0;
-module_param(nocong, uint, 0644);
-MODULE_PARM_DESC(nocong, "Turn off congestion control (default=0)");
-
-static unsigned int cong_flavor = 1;
-module_param(cong_flavor, uint, 0644);
-MODULE_PARM_DESC(cong_flavor, "TCP Congestion control flavor (default=1)");
-
-static struct workqueue_struct *workq;
-
-static struct sk_buff_head rxq;
-
-static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp);
-static void ep_timeout(struct timer_list *t);
-static void connect_reply_upcall(struct iwch_ep *ep, int status);
-
-static void start_ep_timer(struct iwch_ep *ep)
-{
- pr_debug("%s ep %p\n", __func__, ep);
- if (timer_pending(&ep->timer)) {
- pr_debug("%s stopped / restarted timer ep %p\n", __func__, ep);
- del_timer_sync(&ep->timer);
- } else
- get_ep(&ep->com);
- ep->timer.expires = jiffies + ep_timeout_secs * HZ;
- add_timer(&ep->timer);
-}
-
-static void stop_ep_timer(struct iwch_ep *ep)
-{
- pr_debug("%s ep %p\n", __func__, ep);
- if (!timer_pending(&ep->timer)) {
- WARN(1, "%s timer stopped when its not running! ep %p state %u\n",
- __func__, ep, ep->com.state);
- return;
- }
- del_timer_sync(&ep->timer);
- put_ep(&ep->com);
-}
-
-static int iwch_l2t_send(struct t3cdev *tdev, struct sk_buff *skb, struct l2t_entry *l2e)
-{
- int error = 0;
- struct cxio_rdev *rdev;
-
- rdev = (struct cxio_rdev *)tdev->ulp;
- if (cxio_fatal_error(rdev)) {
- kfree_skb(skb);
- return -EIO;
- }
- error = l2t_send(tdev, skb, l2e);
- if (error < 0)
- kfree_skb(skb);
- return error < 0 ? error : 0;
-}
-
-int iwch_cxgb3_ofld_send(struct t3cdev *tdev, struct sk_buff *skb)
-{
- int error = 0;
- struct cxio_rdev *rdev;
-
- rdev = (struct cxio_rdev *)tdev->ulp;
- if (cxio_fatal_error(rdev)) {
- kfree_skb(skb);
- return -EIO;
- }
- error = cxgb3_ofld_send(tdev, skb);
- if (error < 0)
- kfree_skb(skb);
- return error < 0 ? error : 0;
-}
-
-static void release_tid(struct t3cdev *tdev, u32 hwtid, struct sk_buff *skb)
-{
- struct cpl_tid_release *req;
-
- skb = get_skb(skb, sizeof(*req), GFP_KERNEL);
- if (!skb)
- return;
- req = skb_put(skb, sizeof(*req));
- req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
- OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, hwtid));
- skb->priority = CPL_PRIORITY_SETUP;
- iwch_cxgb3_ofld_send(tdev, skb);
- return;
-}
-
-int iwch_quiesce_tid(struct iwch_ep *ep)
-{
- struct cpl_set_tcb_field *req;
- struct sk_buff *skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
-
- if (!skb)
- return -ENOMEM;
- req = skb_put(skb, sizeof(*req));
- req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
- req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));
- OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, ep->hwtid));
- req->reply = 0;
- req->cpu_idx = 0;
- req->word = htons(W_TCB_RX_QUIESCE);
- req->mask = cpu_to_be64(1ULL << S_TCB_RX_QUIESCE);
- req->val = cpu_to_be64(1 << S_TCB_RX_QUIESCE);
-
- skb->priority = CPL_PRIORITY_DATA;
- return iwch_cxgb3_ofld_send(ep->com.tdev, skb);
-}
-
-int iwch_resume_tid(struct iwch_ep *ep)
-{
- struct cpl_set_tcb_field *req;
- struct sk_buff *skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
-
- if (!skb)
- return -ENOMEM;
- req = skb_put(skb, sizeof(*req));
- req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
- req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));
- OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, ep->hwtid));
- req->reply = 0;
- req->cpu_idx = 0;
- req->word = htons(W_TCB_RX_QUIESCE);
- req->mask = cpu_to_be64(1ULL << S_TCB_RX_QUIESCE);
- req->val = 0;
-
- skb->priority = CPL_PRIORITY_DATA;
- return iwch_cxgb3_ofld_send(ep->com.tdev, skb);
-}
-
-static void set_emss(struct iwch_ep *ep, u16 opt)
-{
- pr_debug("%s ep %p opt %u\n", __func__, ep, opt);
- ep->emss = T3C_DATA(ep->com.tdev)->mtus[G_TCPOPT_MSS(opt)] - 40;
- if (G_TCPOPT_TSTAMP(opt))
- ep->emss -= 12;
- if (ep->emss < 128)
- ep->emss = 128;
- pr_debug("emss=%d\n", ep->emss);
-}
-
-static enum iwch_ep_state state_read(struct iwch_ep_common *epc)
-{
- unsigned long flags;
- enum iwch_ep_state state;
-
- spin_lock_irqsave(&epc->lock, flags);
- state = epc->state;
- spin_unlock_irqrestore(&epc->lock, flags);
- return state;
-}
-
-static void __state_set(struct iwch_ep_common *epc, enum iwch_ep_state new)
-{
- epc->state = new;
-}
-
-static void state_set(struct iwch_ep_common *epc, enum iwch_ep_state new)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&epc->lock, flags);
- pr_debug("%s - %s -> %s\n", __func__, states[epc->state], states[new]);
- __state_set(epc, new);
- spin_unlock_irqrestore(&epc->lock, flags);
- return;
-}
-
-static void *alloc_ep(int size, gfp_t gfp)
-{
- struct iwch_ep_common *epc;
-
- epc = kzalloc(size, gfp);
- if (epc) {
- kref_init(&epc->kref);
- spin_lock_init(&epc->lock);
- init_waitqueue_head(&epc->waitq);
- }
- pr_debug("%s alloc ep %p\n", __func__, epc);
- return epc;
-}
-
-void __free_ep(struct kref *kref)
-{
- struct iwch_ep *ep;
- ep = container_of(container_of(kref, struct iwch_ep_common, kref),
- struct iwch_ep, com);
- pr_debug("%s ep %p state %s\n",
- __func__, ep, states[state_read(&ep->com)]);
- if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) {
- cxgb3_remove_tid(ep->com.tdev, (void *)ep, ep->hwtid);
- dst_release(ep->dst);
- l2t_release(ep->com.tdev, ep->l2t);
- }
- kfree(ep);
-}
-
-static void release_ep_resources(struct iwch_ep *ep)
-{
- pr_debug("%s ep %p tid %d\n", __func__, ep, ep->hwtid);
- set_bit(RELEASE_RESOURCES, &ep->com.flags);
- put_ep(&ep->com);
-}
-
-static int status2errno(int status)
-{
- switch (status) {
- case CPL_ERR_NONE:
- return 0;
- case CPL_ERR_CONN_RESET:
- return -ECONNRESET;
- case CPL_ERR_ARP_MISS:
- return -EHOSTUNREACH;
- case CPL_ERR_CONN_TIMEDOUT:
- return -ETIMEDOUT;
- case CPL_ERR_TCAM_FULL:
- return -ENOMEM;
- case CPL_ERR_CONN_EXIST:
- return -EADDRINUSE;
- default:
- return -EIO;
- }
-}
-
-/*
- * Try and reuse skbs already allocated...
- */
-static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp)
-{
- if (skb && !skb_is_nonlinear(skb) && !skb_cloned(skb)) {
- skb_trim(skb, 0);
- skb_get(skb);
- } else {
- skb = alloc_skb(len, gfp);
- }
- return skb;
-}
-
-static struct rtable *find_route(struct t3cdev *dev, __be32 local_ip,
- __be32 peer_ip, __be16 local_port,
- __be16 peer_port, u8 tos)
-{
- struct rtable *rt;
- struct flowi4 fl4;
-
- rt = ip_route_output_ports(&init_net, &fl4, NULL, peer_ip, local_ip,
- peer_port, local_port, IPPROTO_TCP,
- tos, 0);
- if (IS_ERR(rt))
- return NULL;
- return rt;
-}
-
-static unsigned int find_best_mtu(const struct t3c_data *d, unsigned short mtu)
-{
- int i = 0;
-
- while (i < d->nmtus - 1 && d->mtus[i + 1] <= mtu)
- ++i;
- return i;
-}
-
-static void arp_failure_discard(struct t3cdev *dev, struct sk_buff *skb)
-{
- pr_debug("%s t3cdev %p\n", __func__, dev);
- kfree_skb(skb);
-}
-
-/*
- * Handle an ARP failure for an active open.
- */
-static void act_open_req_arp_failure(struct t3cdev *dev, struct sk_buff *skb)
-{
- pr_err("ARP failure during connect\n");
- kfree_skb(skb);
-}
-
-/*
- * Handle an ARP failure for a CPL_ABORT_REQ. Change it into a no RST variant
- * and send it along.
- */
-static void abort_arp_failure(struct t3cdev *dev, struct sk_buff *skb)
-{
- struct cpl_abort_req *req = cplhdr(skb);
-
- pr_debug("%s t3cdev %p\n", __func__, dev);
- req->cmd = CPL_ABORT_NO_RST;
- iwch_cxgb3_ofld_send(dev, skb);
-}
-
-static int send_halfclose(struct iwch_ep *ep, gfp_t gfp)
-{
- struct cpl_close_con_req *req;
- struct sk_buff *skb;
-
- pr_debug("%s ep %p\n", __func__, ep);
- skb = get_skb(NULL, sizeof(*req), gfp);
- if (!skb) {
- pr_err("%s - failed to alloc skb\n", __func__);
- return -ENOMEM;
- }
- skb->priority = CPL_PRIORITY_DATA;
- set_arp_failure_handler(skb, arp_failure_discard);
- req = skb_put(skb, sizeof(*req));
- req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_CLOSE_CON));
- req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));
- OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, ep->hwtid));
- return iwch_l2t_send(ep->com.tdev, skb, ep->l2t);
-}
-
-static int send_abort(struct iwch_ep *ep, struct sk_buff *skb, gfp_t gfp)
-{
- struct cpl_abort_req *req;
-
- pr_debug("%s ep %p\n", __func__, ep);
- skb = get_skb(skb, sizeof(*req), gfp);
- if (!skb) {
- pr_err("%s - failed to alloc skb\n", __func__);
- return -ENOMEM;
- }
- skb->priority = CPL_PRIORITY_DATA;
- set_arp_failure_handler(skb, abort_arp_failure);
- req = skb_put_zero(skb, sizeof(*req));
- req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_REQ));
- req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));
- OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ABORT_REQ, ep->hwtid));
- req->cmd = CPL_ABORT_SEND_RST;
- return iwch_l2t_send(ep->com.tdev, skb, ep->l2t);
-}
-
-static int send_connect(struct iwch_ep *ep)
-{
- struct cpl_act_open_req *req;
- struct sk_buff *skb;
- u32 opt0h, opt0l, opt2;
- unsigned int mtu_idx;
- int wscale;
-
- pr_debug("%s ep %p\n", __func__, ep);
-
- skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
- if (!skb) {
- pr_err("%s - failed to alloc skb\n", __func__);
- return -ENOMEM;
- }
- mtu_idx = find_best_mtu(T3C_DATA(ep->com.tdev), dst_mtu(ep->dst));
- wscale = compute_wscale(rcv_win);
- opt0h = V_NAGLE(0) |
- V_NO_CONG(nocong) |
- V_KEEP_ALIVE(1) |
- F_TCAM_BYPASS |
- V_WND_SCALE(wscale) |
- V_MSS_IDX(mtu_idx) |
- V_L2T_IDX(ep->l2t->idx) | V_TX_CHANNEL(ep->l2t->smt_idx);
- opt0l = V_TOS((ep->tos >> 2) & M_TOS) | V_RCV_BUFSIZ(rcv_win>>10);
- opt2 = F_RX_COALESCE_VALID | V_RX_COALESCE(0) | V_FLAVORS_VALID(1) |
- V_CONG_CONTROL_FLAVOR(cong_flavor);
- skb->priority = CPL_PRIORITY_SETUP;
- set_arp_failure_handler(skb, act_open_req_arp_failure);
-
- req = skb_put(skb, sizeof(*req));
- req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
- OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, ep->atid));
- req->local_port = ep->com.local_addr.sin_port;
- req->peer_port = ep->com.remote_addr.sin_port;
- req->local_ip = ep->com.local_addr.sin_addr.s_addr;
- req->peer_ip = ep->com.remote_addr.sin_addr.s_addr;
- req->opt0h = htonl(opt0h);
- req->opt0l = htonl(opt0l);
- req->params = 0;
- req->opt2 = htonl(opt2);
- return iwch_l2t_send(ep->com.tdev, skb, ep->l2t);
-}
-
-static void send_mpa_req(struct iwch_ep *ep, struct sk_buff *skb)
-{
- int mpalen;
- struct tx_data_wr *req;
- struct mpa_message *mpa;
- int len;
-
- pr_debug("%s ep %p pd_len %d\n", __func__, ep, ep->plen);
-
- BUG_ON(skb_cloned(skb));
-
- mpalen = sizeof(*mpa) + ep->plen;
- if (skb->data + mpalen + sizeof(*req) > skb_end_pointer(skb)) {
- kfree_skb(skb);
- skb=alloc_skb(mpalen + sizeof(*req), GFP_KERNEL);
- if (!skb) {
- connect_reply_upcall(ep, -ENOMEM);
- return;
- }
- }
- skb_trim(skb, 0);
- skb_reserve(skb, sizeof(*req));
- skb_put(skb, mpalen);
- skb->priority = CPL_PRIORITY_DATA;
- mpa = (struct mpa_message *) skb->data;
- memset(mpa, 0, sizeof(*mpa));
- memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key));
- mpa->flags = (crc_enabled ? MPA_CRC : 0) |
- (markers_enabled ? MPA_MARKERS : 0);
- mpa->private_data_size = htons(ep->plen);
- mpa->revision = mpa_rev;
-
- if (ep->plen)
- memcpy(mpa->private_data, ep->mpa_pkt + sizeof(*mpa), ep->plen);
-
- /*
- * Reference the mpa skb. This ensures the data area
- * will remain in memory until the hw acks the tx.
- * Function tx_ack() will deref it.
- */
- skb_get(skb);
- set_arp_failure_handler(skb, arp_failure_discard);
- skb_reset_transport_header(skb);
- len = skb->len;
- req = skb_push(skb, sizeof(*req));
- req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA)|F_WR_COMPL);
- req->wr_lo = htonl(V_WR_TID(ep->hwtid));
- req->len = htonl(len);
- req->param = htonl(V_TX_PORT(ep->l2t->smt_idx) |
- V_TX_SNDBUF(snd_win>>15));
- req->flags = htonl(F_TX_INIT);
- req->sndseq = htonl(ep->snd_seq);
- BUG_ON(ep->mpa_skb);
- ep->mpa_skb = skb;
- iwch_l2t_send(ep->com.tdev, skb, ep->l2t);
- start_ep_timer(ep);
- state_set(&ep->com, MPA_REQ_SENT);
- return;
-}
-
-static int send_mpa_reject(struct iwch_ep *ep, const void *pdata, u8 plen)
-{
- int mpalen;
- struct tx_data_wr *req;
- struct mpa_message *mpa;
- struct sk_buff *skb;
-
- pr_debug("%s ep %p plen %d\n", __func__, ep, plen);
-
- mpalen = sizeof(*mpa) + plen;
-
- skb = get_skb(NULL, mpalen + sizeof(*req), GFP_KERNEL);
- if (!skb) {
- pr_err("%s - cannot alloc skb!\n", __func__);
- return -ENOMEM;
- }
- skb_reserve(skb, sizeof(*req));
- mpa = skb_put(skb, mpalen);
- memset(mpa, 0, sizeof(*mpa));
- memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
- mpa->flags = MPA_REJECT;
- mpa->revision = mpa_rev;
- mpa->private_data_size = htons(plen);
- if (plen)
- memcpy(mpa->private_data, pdata, plen);
-
- /*
- * Reference the mpa skb again. This ensures the data area
- * will remain in memory until the hw acks the tx.
- * Function tx_ack() will deref it.
- */
- skb_get(skb);
- skb->priority = CPL_PRIORITY_DATA;
- set_arp_failure_handler(skb, arp_failure_discard);
- skb_reset_transport_header(skb);
- req = skb_push(skb, sizeof(*req));
- req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA)|F_WR_COMPL);
- req->wr_lo = htonl(V_WR_TID(ep->hwtid));
- req->len = htonl(mpalen);
- req->param = htonl(V_TX_PORT(ep->l2t->smt_idx) |
- V_TX_SNDBUF(snd_win>>15));
- req->flags = htonl(F_TX_INIT);
- req->sndseq = htonl(ep->snd_seq);
- BUG_ON(ep->mpa_skb);
- ep->mpa_skb = skb;
- return iwch_l2t_send(ep->com.tdev, skb, ep->l2t);
-}
-
-static int send_mpa_reply(struct iwch_ep *ep, const void *pdata, u8 plen)
-{
- int mpalen;
- struct tx_data_wr *req;
- struct mpa_message *mpa;
- int len;
- struct sk_buff *skb;
-
- pr_debug("%s ep %p plen %d\n", __func__, ep, plen);
-
- mpalen = sizeof(*mpa) + plen;
-
- skb = get_skb(NULL, mpalen + sizeof(*req), GFP_KERNEL);
- if (!skb) {
- pr_err("%s - cannot alloc skb!\n", __func__);
- return -ENOMEM;
- }
- skb->priority = CPL_PRIORITY_DATA;
- skb_reserve(skb, sizeof(*req));
- mpa = skb_put(skb, mpalen);
- memset(mpa, 0, sizeof(*mpa));
- memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
- mpa->flags = (ep->mpa_attr.crc_enabled ? MPA_CRC : 0) |
- (markers_enabled ? MPA_MARKERS : 0);
- mpa->revision = mpa_rev;
- mpa->private_data_size = htons(plen);
- if (plen)
- memcpy(mpa->private_data, pdata, plen);
-
- /*
- * Reference the mpa skb. This ensures the data area
- * will remain in memory until the hw acks the tx.
- * Function tx_ack() will deref it.
- */
- skb_get(skb);
- set_arp_failure_handler(skb, arp_failure_discard);
- skb_reset_transport_header(skb);
- len = skb->len;
- req = skb_push(skb, sizeof(*req));
- req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA)|F_WR_COMPL);
- req->wr_lo = htonl(V_WR_TID(ep->hwtid));
- req->len = htonl(len);
- req->param = htonl(V_TX_PORT(ep->l2t->smt_idx) |
- V_TX_SNDBUF(snd_win>>15));
- req->flags = htonl(F_TX_INIT);
- req->sndseq = htonl(ep->snd_seq);
- ep->mpa_skb = skb;
- state_set(&ep->com, MPA_REP_SENT);
- return iwch_l2t_send(ep->com.tdev, skb, ep->l2t);
-}
-
-static int act_establish(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
-{
- struct iwch_ep *ep = ctx;
- struct cpl_act_establish *req = cplhdr(skb);
- unsigned int tid = GET_TID(req);
-
- pr_debug("%s ep %p tid %d\n", __func__, ep, tid);
-
- dst_confirm(ep->dst);
-
- /* setup the hwtid for this connection */
- ep->hwtid = tid;
- cxgb3_insert_tid(ep->com.tdev, &t3c_client, ep, tid);
-
- ep->snd_seq = ntohl(req->snd_isn);
- ep->rcv_seq = ntohl(req->rcv_isn);
-
- set_emss(ep, ntohs(req->tcp_opt));
-
- /* dealloc the atid */
- cxgb3_free_atid(ep->com.tdev, ep->atid);
-
- /* start MPA negotiation */
- send_mpa_req(ep, skb);
-
- return 0;
-}
-
-static void abort_connection(struct iwch_ep *ep, struct sk_buff *skb, gfp_t gfp)
-{
- pr_debug("%s ep %p\n", __FILE__, ep);
- state_set(&ep->com, ABORTING);
- send_abort(ep, skb, gfp);
-}
-
-static void close_complete_upcall(struct iwch_ep *ep)
-{
- struct iw_cm_event event;
-
- pr_debug("%s ep %p\n", __func__, ep);
- memset(&event, 0, sizeof(event));
- event.event = IW_CM_EVENT_CLOSE;
- if (ep->com.cm_id) {
- pr_debug("close complete delivered ep %p cm_id %p tid %d\n",
- ep, ep->com.cm_id, ep->hwtid);
- ep->com.cm_id->event_handler(ep->com.cm_id, &event);
- ep->com.cm_id->rem_ref(ep->com.cm_id);
- ep->com.cm_id = NULL;
- ep->com.qp = NULL;
- }
-}
-
-static void peer_close_upcall(struct iwch_ep *ep)
-{
- struct iw_cm_event event;
-
- pr_debug("%s ep %p\n", __func__, ep);
- memset(&event, 0, sizeof(event));
- event.event = IW_CM_EVENT_DISCONNECT;
- if (ep->com.cm_id) {
- pr_debug("peer close delivered ep %p cm_id %p tid %d\n",
- ep, ep->com.cm_id, ep->hwtid);
- ep->com.cm_id->event_handler(ep->com.cm_id, &event);
- }
-}
-
-static void peer_abort_upcall(struct iwch_ep *ep)
-{
- struct iw_cm_event event;
-
- pr_debug("%s ep %p\n", __func__, ep);
- memset(&event, 0, sizeof(event));
- event.event = IW_CM_EVENT_CLOSE;
- event.status = -ECONNRESET;
- if (ep->com.cm_id) {
- pr_debug("abort delivered ep %p cm_id %p tid %d\n", ep,
- ep->com.cm_id, ep->hwtid);
- ep->com.cm_id->event_handler(ep->com.cm_id, &event);
- ep->com.cm_id->rem_ref(ep->com.cm_id);
- ep->com.cm_id = NULL;
- ep->com.qp = NULL;
- }
-}
-
-static void connect_reply_upcall(struct iwch_ep *ep, int status)
-{
- struct iw_cm_event event;
-
- pr_debug("%s ep %p status %d\n", __func__, ep, status);
- memset(&event, 0, sizeof(event));
- event.event = IW_CM_EVENT_CONNECT_REPLY;
- event.status = status;
- memcpy(&event.local_addr, &ep->com.local_addr,
- sizeof(ep->com.local_addr));
- memcpy(&event.remote_addr, &ep->com.remote_addr,
- sizeof(ep->com.remote_addr));
-
- if ((status == 0) || (status == -ECONNREFUSED)) {
- event.private_data_len = ep->plen;
- event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
- }
- if (ep->com.cm_id) {
- pr_debug("%s ep %p tid %d status %d\n", __func__, ep,
- ep->hwtid, status);
- ep->com.cm_id->event_handler(ep->com.cm_id, &event);
- }
- if (status < 0) {
- ep->com.cm_id->rem_ref(ep->com.cm_id);
- ep->com.cm_id = NULL;
- ep->com.qp = NULL;
- }
-}
-
-static void connect_request_upcall(struct iwch_ep *ep)
-{
- struct iw_cm_event event;
-
- pr_debug("%s ep %p tid %d\n", __func__, ep, ep->hwtid);
- memset(&event, 0, sizeof(event));
- event.event = IW_CM_EVENT_CONNECT_REQUEST;
- memcpy(&event.local_addr, &ep->com.local_addr,
- sizeof(ep->com.local_addr));
- memcpy(&event.remote_addr, &ep->com.remote_addr,
- sizeof(ep->com.local_addr));
- event.private_data_len = ep->plen;
- event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
- event.provider_data = ep;
- /*
- * Until ird/ord negotiation via MPAv2 support is added, send max
- * supported values
- */
- event.ird = event.ord = 8;
- if (state_read(&ep->parent_ep->com) != DEAD) {
- get_ep(&ep->com);
- ep->parent_ep->com.cm_id->event_handler(
- ep->parent_ep->com.cm_id,
- &event);
- }
- put_ep(&ep->parent_ep->com);
- ep->parent_ep = NULL;
-}
-
-static void established_upcall(struct iwch_ep *ep)
-{
- struct iw_cm_event event;
-
- pr_debug("%s ep %p\n", __func__, ep);
- memset(&event, 0, sizeof(event));
- event.event = IW_CM_EVENT_ESTABLISHED;
- /*
- * Until ird/ord negotiation via MPAv2 support is added, send max
- * supported values
- */
- event.ird = event.ord = 8;
- if (ep->com.cm_id) {
- pr_debug("%s ep %p tid %d\n", __func__, ep, ep->hwtid);
- ep->com.cm_id->event_handler(ep->com.cm_id, &event);
- }
-}
-
-static int update_rx_credits(struct iwch_ep *ep, u32 credits)
-{
- struct cpl_rx_data_ack *req;
- struct sk_buff *skb;
-
- pr_debug("%s ep %p credits %u\n", __func__, ep, credits);
- skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
- if (!skb) {
- pr_err("update_rx_credits - cannot alloc skb!\n");
- return 0;
- }
-
- req = skb_put(skb, sizeof(*req));
- req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
- OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RX_DATA_ACK, ep->hwtid));
- req->credit_dack = htonl(V_RX_CREDITS(credits) | V_RX_FORCE_ACK(1));
- skb->priority = CPL_PRIORITY_ACK;
- iwch_cxgb3_ofld_send(ep->com.tdev, skb);
- return credits;
-}
-
-static void process_mpa_reply(struct iwch_ep *ep, struct sk_buff *skb)
-{
- struct mpa_message *mpa;
- u16 plen;
- struct iwch_qp_attributes attrs;
- enum iwch_qp_attr_mask mask;
- int err;
-
- pr_debug("%s ep %p\n", __func__, ep);
-
- /*
- * Stop mpa timer. If it expired, then the state has
- * changed and we bail since ep_timeout already aborted
- * the connection.
- */
- stop_ep_timer(ep);
- if (state_read(&ep->com) != MPA_REQ_SENT)
- return;
-
- /*
- * If we get more than the supported amount of private data
- * then we must fail this connection.
- */
- if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) {
- err = -EINVAL;
- goto err;
- }
-
- /*
- * copy the new data into our accumulation buffer.
- */
- skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]),
- skb->len);
- ep->mpa_pkt_len += skb->len;
-
- /*
- * if we don't even have the mpa message, then bail.
- */
- if (ep->mpa_pkt_len < sizeof(*mpa))
- return;
- mpa = (struct mpa_message *) ep->mpa_pkt;
-
- /* Validate MPA header. */
- if (mpa->revision != mpa_rev) {
- err = -EPROTO;
- goto err;
- }
- if (memcmp(mpa->key, MPA_KEY_REP, sizeof(mpa->key))) {
- err = -EPROTO;
- goto err;
- }
-
- plen = ntohs(mpa->private_data_size);
-
- /*
- * Fail if there's too much private data.
- */
- if (plen > MPA_MAX_PRIVATE_DATA) {
- err = -EPROTO;
- goto err;
- }
-
- /*
- * If plen does not account for pkt size
- */
- if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {
- err = -EPROTO;
- goto err;
- }
-
- ep->plen = (u8) plen;
-
- /*
- * If we don't have all the pdata yet, then bail.
- * We'll continue process when more data arrives.
- */
- if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
- return;
-
- if (mpa->flags & MPA_REJECT) {
- err = -ECONNREFUSED;
- goto err;
- }
-
- /*
- * If we get here we have accumulated the entire mpa
- * start reply message including private data. And
- * the MPA header is valid.
- */
- state_set(&ep->com, FPDU_MODE);
- ep->mpa_attr.initiator = 1;
- ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
- ep->mpa_attr.recv_marker_enabled = markers_enabled;
- ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
- ep->mpa_attr.version = mpa_rev;
- pr_debug("%s - crc_enabled=%d, recv_marker_enabled=%d, xmit_marker_enabled=%d, version=%d\n",
- __func__,
- ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
- ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version);
-
- attrs.mpa_attr = ep->mpa_attr;
- attrs.max_ird = ep->ird;
- attrs.max_ord = ep->ord;
- attrs.llp_stream_handle = ep;
- attrs.next_state = IWCH_QP_STATE_RTS;
-
- mask = IWCH_QP_ATTR_NEXT_STATE |
- IWCH_QP_ATTR_LLP_STREAM_HANDLE | IWCH_QP_ATTR_MPA_ATTR |
- IWCH_QP_ATTR_MAX_IRD | IWCH_QP_ATTR_MAX_ORD;
-
- /* bind QP and TID with INIT_WR */
- err = iwch_modify_qp(ep->com.qp->rhp,
- ep->com.qp, mask, &attrs, 1);
- if (err)
- goto err;
-
- if (peer2peer && iwch_rqes_posted(ep->com.qp) == 0) {
- iwch_post_zb_read(ep);
- }
-
- goto out;
-err:
- abort_connection(ep, skb, GFP_KERNEL);
-out:
- connect_reply_upcall(ep, err);
- return;
-}
-
-static void process_mpa_request(struct iwch_ep *ep, struct sk_buff *skb)
-{
- struct mpa_message *mpa;
- u16 plen;
-
- pr_debug("%s ep %p\n", __func__, ep);
-
- /*
- * Stop mpa timer. If it expired, then the state has
- * changed and we bail since ep_timeout already aborted
- * the connection.
- */
- stop_ep_timer(ep);
- if (state_read(&ep->com) != MPA_REQ_WAIT)
- return;
-
- /*
- * If we get more than the supported amount of private data
- * then we must fail this connection.
- */
- if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) {
- abort_connection(ep, skb, GFP_KERNEL);
- return;
- }
-
- pr_debug("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__);
-
- /*
- * Copy the new data into our accumulation buffer.
- */
- skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]),
- skb->len);
- ep->mpa_pkt_len += skb->len;
-
- /*
- * If we don't even have the mpa message, then bail.
- * We'll continue process when more data arrives.
- */
- if (ep->mpa_pkt_len < sizeof(*mpa))
- return;
- pr_debug("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__);
- mpa = (struct mpa_message *) ep->mpa_pkt;
-
- /*
- * Validate MPA Header.
- */
- if (mpa->revision != mpa_rev) {
- abort_connection(ep, skb, GFP_KERNEL);
- return;
- }
-
- if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key))) {
- abort_connection(ep, skb, GFP_KERNEL);
- return;
- }
-
- plen = ntohs(mpa->private_data_size);
-
- /*
- * Fail if there's too much private data.
- */
- if (plen > MPA_MAX_PRIVATE_DATA) {
- abort_connection(ep, skb, GFP_KERNEL);
- return;
- }
-
- /*
- * If plen does not account for pkt size
- */
- if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {
- abort_connection(ep, skb, GFP_KERNEL);
- return;
- }
- ep->plen = (u8) plen;
-
- /*
- * If we don't have all the pdata yet, then bail.
- */
- if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
- return;
-
- /*
- * If we get here we have accumulated the entire mpa
- * start reply message including private data.
- */
- ep->mpa_attr.initiator = 0;
- ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
- ep->mpa_attr.recv_marker_enabled = markers_enabled;
- ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
- ep->mpa_attr.version = mpa_rev;
- pr_debug("%s - crc_enabled=%d, recv_marker_enabled=%d, xmit_marker_enabled=%d, version=%d\n",
- __func__,
- ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
- ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version);
-
- state_set(&ep->com, MPA_REQ_RCVD);
-
- /* drive upcall */
- connect_request_upcall(ep);
- return;
-}
-
-static int rx_data(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
-{
- struct iwch_ep *ep = ctx;
- struct cpl_rx_data *hdr = cplhdr(skb);
- unsigned int dlen = ntohs(hdr->len);
-
- pr_debug("%s ep %p dlen %u\n", __func__, ep, dlen);
-
- skb_pull(skb, sizeof(*hdr));
- skb_trim(skb, dlen);
-
- ep->rcv_seq += dlen;
- BUG_ON(ep->rcv_seq != (ntohl(hdr->seq) + dlen));
-
- switch (state_read(&ep->com)) {
- case MPA_REQ_SENT:
- process_mpa_reply(ep, skb);
- break;
- case MPA_REQ_WAIT:
- process_mpa_request(ep, skb);
- break;
- case MPA_REP_SENT:
- break;
- default:
- pr_err("%s Unexpected streaming data. ep %p state %d tid %d\n",
- __func__, ep, state_read(&ep->com), ep->hwtid);
-
- /*
- * The ep will timeout and inform the ULP of the failure.
- * See ep_timeout().
- */
- break;
- }
-
- /* update RX credits */
- update_rx_credits(ep, dlen);
-
- return CPL_RET_BUF_DONE;
-}
-
-/*
- * Upcall from the adapter indicating data has been transmitted.
- * For us its just the single MPA request or reply. We can now free
- * the skb holding the mpa message.
- */
-static int tx_ack(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
-{
- struct iwch_ep *ep = ctx;
- struct cpl_wr_ack *hdr = cplhdr(skb);
- unsigned int credits = ntohs(hdr->credits);
- unsigned long flags;
- int post_zb = 0;
-
- pr_debug("%s ep %p credits %u\n", __func__, ep, credits);
-
- if (credits == 0) {
- pr_debug("%s 0 credit ack ep %p state %u\n",
- __func__, ep, state_read(&ep->com));
- return CPL_RET_BUF_DONE;
- }
-
- spin_lock_irqsave(&ep->com.lock, flags);
- BUG_ON(credits != 1);
- dst_confirm(ep->dst);
- if (!ep->mpa_skb) {
- pr_debug("%s rdma_init wr_ack ep %p state %u\n",
- __func__, ep, ep->com.state);
- if (ep->mpa_attr.initiator) {
- pr_debug("%s initiator ep %p state %u\n",
- __func__, ep, ep->com.state);
- if (peer2peer && ep->com.state == FPDU_MODE)
- post_zb = 1;
- } else {
- pr_debug("%s responder ep %p state %u\n",
- __func__, ep, ep->com.state);
- if (ep->com.state == MPA_REQ_RCVD) {
- ep->com.rpl_done = 1;
- wake_up(&ep->com.waitq);
- }
- }
- } else {
- pr_debug("%s lsm ack ep %p state %u freeing skb\n",
- __func__, ep, ep->com.state);
- kfree_skb(ep->mpa_skb);
- ep->mpa_skb = NULL;
- }
- spin_unlock_irqrestore(&ep->com.lock, flags);
- if (post_zb)
- iwch_post_zb_read(ep);
- return CPL_RET_BUF_DONE;
-}
-
-static int abort_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
-{
- struct iwch_ep *ep = ctx;
- unsigned long flags;
- int release = 0;
-
- pr_debug("%s ep %p\n", __func__, ep);
- BUG_ON(!ep);
-
- /*
- * We get 2 abort replies from the HW. The first one must
- * be ignored except for scribbling that we need one more.
- */
- if (!test_and_set_bit(ABORT_REQ_IN_PROGRESS, &ep->com.flags)) {
- return CPL_RET_BUF_DONE;
- }
-
- spin_lock_irqsave(&ep->com.lock, flags);
- switch (ep->com.state) {
- case ABORTING:
- close_complete_upcall(ep);
- __state_set(&ep->com, DEAD);
- release = 1;
- break;
- default:
- pr_err("%s ep %p state %d\n", __func__, ep, ep->com.state);
- break;
- }
- spin_unlock_irqrestore(&ep->com.lock, flags);
-
- if (release)
- release_ep_resources(ep);
- return CPL_RET_BUF_DONE;
-}
-
-/*
- * Return whether a failed active open has allocated a TID
- */
-static inline int act_open_has_tid(int status)
-{
- return status != CPL_ERR_TCAM_FULL && status != CPL_ERR_CONN_EXIST &&
- status != CPL_ERR_ARP_MISS;
-}
-
-static int act_open_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
-{
- struct iwch_ep *ep = ctx;
- struct cpl_act_open_rpl *rpl = cplhdr(skb);
-
- pr_debug("%s ep %p status %u errno %d\n", __func__, ep, rpl->status,
- status2errno(rpl->status));
- connect_reply_upcall(ep, status2errno(rpl->status));
- state_set(&ep->com, DEAD);
- if (ep->com.tdev->type != T3A && act_open_has_tid(rpl->status))
- release_tid(ep->com.tdev, GET_TID(rpl), NULL);
- cxgb3_free_atid(ep->com.tdev, ep->atid);
- dst_release(ep->dst);
- l2t_release(ep->com.tdev, ep->l2t);
- put_ep(&ep->com);
- return CPL_RET_BUF_DONE;
-}
-
-static int listen_start(struct iwch_listen_ep *ep)
-{
- struct sk_buff *skb;
- struct cpl_pass_open_req *req;
-
- pr_debug("%s ep %p\n", __func__, ep);
- skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
- if (!skb) {
- pr_err("t3c_listen_start failed to alloc skb!\n");
- return -ENOMEM;
- }
-
- req = skb_put(skb, sizeof(*req));
- req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
- OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, ep->stid));
- req->local_port = ep->com.local_addr.sin_port;
- req->local_ip = ep->com.local_addr.sin_addr.s_addr;
- req->peer_port = 0;
- req->peer_ip = 0;
- req->peer_netmask = 0;
- req->opt0h = htonl(F_DELACK | F_TCAM_BYPASS);
- req->opt0l = htonl(V_RCV_BUFSIZ(rcv_win>>10));
- req->opt1 = htonl(V_CONN_POLICY(CPL_CONN_POLICY_ASK));
-
- skb->priority = 1;
- return iwch_cxgb3_ofld_send(ep->com.tdev, skb);
-}
-
-static int pass_open_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
-{
- struct iwch_listen_ep *ep = ctx;
- struct cpl_pass_open_rpl *rpl = cplhdr(skb);
-
- pr_debug("%s ep %p status %d error %d\n", __func__, ep,
- rpl->status, status2errno(rpl->status));
- ep->com.rpl_err = status2errno(rpl->status);
- ep->com.rpl_done = 1;
- wake_up(&ep->com.waitq);
-
- return CPL_RET_BUF_DONE;
-}
-
-static int listen_stop(struct iwch_listen_ep *ep)
-{
- struct sk_buff *skb;
- struct cpl_close_listserv_req *req;
-
- pr_debug("%s ep %p\n", __func__, ep);
- skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
- if (!skb) {
- pr_err("%s - failed to alloc skb\n", __func__);
- return -ENOMEM;
- }
- req = skb_put(skb, sizeof(*req));
- req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
- req->cpu_idx = 0;
- OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, ep->stid));
- skb->priority = 1;
- return iwch_cxgb3_ofld_send(ep->com.tdev, skb);
-}
-
-static int close_listsrv_rpl(struct t3cdev *tdev, struct sk_buff *skb,
- void *ctx)
-{
- struct iwch_listen_ep *ep = ctx;
- struct cpl_close_listserv_rpl *rpl = cplhdr(skb);
-
- pr_debug("%s ep %p\n", __func__, ep);
- ep->com.rpl_err = status2errno(rpl->status);
- ep->com.rpl_done = 1;
- wake_up(&ep->com.waitq);
- return CPL_RET_BUF_DONE;
-}
-
-static void accept_cr(struct iwch_ep *ep, __be32 peer_ip, struct sk_buff *skb)
-{
- struct cpl_pass_accept_rpl *rpl;
- unsigned int mtu_idx;
- u32 opt0h, opt0l, opt2;
- int wscale;
-
- pr_debug("%s ep %p\n", __func__, ep);
- BUG_ON(skb_cloned(skb));
- skb_trim(skb, sizeof(*rpl));
- skb_get(skb);
- mtu_idx = find_best_mtu(T3C_DATA(ep->com.tdev), dst_mtu(ep->dst));
- wscale = compute_wscale(rcv_win);
- opt0h = V_NAGLE(0) |
- V_NO_CONG(nocong) |
- V_KEEP_ALIVE(1) |
- F_TCAM_BYPASS |
- V_WND_SCALE(wscale) |
- V_MSS_IDX(mtu_idx) |
- V_L2T_IDX(ep->l2t->idx) | V_TX_CHANNEL(ep->l2t->smt_idx);
- opt0l = V_TOS((ep->tos >> 2) & M_TOS) | V_RCV_BUFSIZ(rcv_win>>10);
- opt2 = F_RX_COALESCE_VALID | V_RX_COALESCE(0) | V_FLAVORS_VALID(1) |
- V_CONG_CONTROL_FLAVOR(cong_flavor);
-
- rpl = cplhdr(skb);
- rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
- OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL, ep->hwtid));
- rpl->peer_ip = peer_ip;
- rpl->opt0h = htonl(opt0h);
- rpl->opt0l_status = htonl(opt0l | CPL_PASS_OPEN_ACCEPT);
- rpl->opt2 = htonl(opt2);
- rpl->rsvd = rpl->opt2; /* workaround for HW bug */
- skb->priority = CPL_PRIORITY_SETUP;
- iwch_l2t_send(ep->com.tdev, skb, ep->l2t);
-
- return;
-}
-
-static void reject_cr(struct t3cdev *tdev, u32 hwtid, __be32 peer_ip,
- struct sk_buff *skb)
-{
- pr_debug("%s t3cdev %p tid %u peer_ip %x\n", __func__, tdev, hwtid,
- peer_ip);
- BUG_ON(skb_cloned(skb));
- skb_trim(skb, sizeof(struct cpl_tid_release));
- skb_get(skb);
-
- if (tdev->type != T3A)
- release_tid(tdev, hwtid, skb);
- else {
- struct cpl_pass_accept_rpl *rpl;
-
- rpl = cplhdr(skb);
- skb->priority = CPL_PRIORITY_SETUP;
- rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
- OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
- hwtid));
- rpl->peer_ip = peer_ip;
- rpl->opt0h = htonl(F_TCAM_BYPASS);
- rpl->opt0l_status = htonl(CPL_PASS_OPEN_REJECT);
- rpl->opt2 = 0;
- rpl->rsvd = rpl->opt2;
- iwch_cxgb3_ofld_send(tdev, skb);
- }
-}
-
-static int pass_accept_req(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
-{
- struct iwch_ep *child_ep, *parent_ep = ctx;
- struct cpl_pass_accept_req *req = cplhdr(skb);
- unsigned int hwtid = GET_TID(req);
- struct dst_entry *dst;
- struct l2t_entry *l2t;
- struct rtable *rt;
- struct iff_mac tim;
-
- pr_debug("%s parent ep %p tid %u\n", __func__, parent_ep, hwtid);
-
- if (state_read(&parent_ep->com) != LISTEN) {
- pr_err("%s - listening ep not in LISTEN\n", __func__);
- goto reject;
- }
-
- /*
- * Find the netdev for this connection request.
- */
- tim.mac_addr = req->dst_mac;
- tim.vlan_tag = ntohs(req->vlan_tag);
- if (tdev->ctl(tdev, GET_IFF_FROM_MAC, &tim) < 0 || !tim.dev) {
- pr_err("%s bad dst mac %pM\n", __func__, req->dst_mac);
- goto reject;
- }
-
- /* Find output route */
- rt = find_route(tdev,
- req->local_ip,
- req->peer_ip,
- req->local_port,
- req->peer_port, G_PASS_OPEN_TOS(ntohl(req->tos_tid)));
- if (!rt) {
- pr_err("%s - failed to find dst entry!\n", __func__);
- goto reject;
- }
- dst = &rt->dst;
- l2t = t3_l2t_get(tdev, dst, NULL, &req->peer_ip);
- if (!l2t) {
- pr_err("%s - failed to allocate l2t entry!\n", __func__);
- dst_release(dst);
- goto reject;
- }
- child_ep = alloc_ep(sizeof(*child_ep), GFP_KERNEL);
- if (!child_ep) {
- pr_err("%s - failed to allocate ep entry!\n", __func__);
- l2t_release(tdev, l2t);
- dst_release(dst);
- goto reject;
- }
- state_set(&child_ep->com, CONNECTING);
- child_ep->com.tdev = tdev;
- child_ep->com.cm_id = NULL;
- child_ep->com.local_addr.sin_family = AF_INET;
- child_ep->com.local_addr.sin_port = req->local_port;
- child_ep->com.local_addr.sin_addr.s_addr = req->local_ip;
- child_ep->com.remote_addr.sin_family = AF_INET;
- child_ep->com.remote_addr.sin_port = req->peer_port;
- child_ep->com.remote_addr.sin_addr.s_addr = req->peer_ip;
- get_ep(&parent_ep->com);
- child_ep->parent_ep = parent_ep;
- child_ep->tos = G_PASS_OPEN_TOS(ntohl(req->tos_tid));
- child_ep->l2t = l2t;
- child_ep->dst = dst;
- child_ep->hwtid = hwtid;
- timer_setup(&child_ep->timer, ep_timeout, 0);
- cxgb3_insert_tid(tdev, &t3c_client, child_ep, hwtid);
- accept_cr(child_ep, req->peer_ip, skb);
- goto out;
-reject:
- reject_cr(tdev, hwtid, req->peer_ip, skb);
-out:
- return CPL_RET_BUF_DONE;
-}
-
-static int pass_establish(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
-{
- struct iwch_ep *ep = ctx;
- struct cpl_pass_establish *req = cplhdr(skb);
-
- pr_debug("%s ep %p\n", __func__, ep);
- ep->snd_seq = ntohl(req->snd_isn);
- ep->rcv_seq = ntohl(req->rcv_isn);
-
- set_emss(ep, ntohs(req->tcp_opt));
-
- dst_confirm(ep->dst);
- state_set(&ep->com, MPA_REQ_WAIT);
- start_ep_timer(ep);
-
- return CPL_RET_BUF_DONE;
-}
-
-static int peer_close(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
-{
- struct iwch_ep *ep = ctx;
- struct iwch_qp_attributes attrs;
- unsigned long flags;
- int disconnect = 1;
- int release = 0;
-
- pr_debug("%s ep %p\n", __func__, ep);
- dst_confirm(ep->dst);
-
- spin_lock_irqsave(&ep->com.lock, flags);
- switch (ep->com.state) {
- case MPA_REQ_WAIT:
- __state_set(&ep->com, CLOSING);
- break;
- case MPA_REQ_SENT:
- __state_set(&ep->com, CLOSING);
- connect_reply_upcall(ep, -ECONNRESET);
- break;
- case MPA_REQ_RCVD:
-
- /*
- * We're gonna mark this puppy DEAD, but keep
- * the reference on it until the ULP accepts or
- * rejects the CR. Also wake up anyone waiting
- * in rdma connection migration (see iwch_accept_cr()).
- */
- __state_set(&ep->com, CLOSING);
- ep->com.rpl_done = 1;
- ep->com.rpl_err = -ECONNRESET;
- pr_debug("waking up ep %p\n", ep);
- wake_up(&ep->com.waitq);
- break;
- case MPA_REP_SENT:
- __state_set(&ep->com, CLOSING);
- ep->com.rpl_done = 1;
- ep->com.rpl_err = -ECONNRESET;
- pr_debug("waking up ep %p\n", ep);
- wake_up(&ep->com.waitq);
- break;
- case FPDU_MODE:
- start_ep_timer(ep);
- __state_set(&ep->com, CLOSING);
- attrs.next_state = IWCH_QP_STATE_CLOSING;
- iwch_modify_qp(ep->com.qp->rhp, ep->com.qp,
- IWCH_QP_ATTR_NEXT_STATE, &attrs, 1);
- peer_close_upcall(ep);
- break;
- case ABORTING:
- disconnect = 0;
- break;
- case CLOSING:
- __state_set(&ep->com, MORIBUND);
- disconnect = 0;
- break;
- case MORIBUND:
- stop_ep_timer(ep);
- if (ep->com.cm_id && ep->com.qp) {
- attrs.next_state = IWCH_QP_STATE_IDLE;
- iwch_modify_qp(ep->com.qp->rhp, ep->com.qp,
- IWCH_QP_ATTR_NEXT_STATE, &attrs, 1);
- }
- close_complete_upcall(ep);
- __state_set(&ep->com, DEAD);
- release = 1;
- disconnect = 0;
- break;
- case DEAD:
- disconnect = 0;
- break;
- default:
- BUG_ON(1);
- }
- spin_unlock_irqrestore(&ep->com.lock, flags);
- if (disconnect)
- iwch_ep_disconnect(ep, 0, GFP_KERNEL);
- if (release)
- release_ep_resources(ep);
- return CPL_RET_BUF_DONE;
-}
-
-/*
- * Returns whether an ABORT_REQ_RSS message is a negative advice.
- */
-static int is_neg_adv_abort(unsigned int status)
-{
- return status == CPL_ERR_RTX_NEG_ADVICE ||
- status == CPL_ERR_PERSIST_NEG_ADVICE;
-}
-
-static int peer_abort(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
-{
- struct cpl_abort_req_rss *req = cplhdr(skb);
- struct iwch_ep *ep = ctx;
- struct cpl_abort_rpl *rpl;
- struct sk_buff *rpl_skb;
- struct iwch_qp_attributes attrs;
- int ret;
- int release = 0;
- unsigned long flags;
-
- if (is_neg_adv_abort(req->status)) {
- pr_debug("%s neg_adv_abort ep %p tid %d\n", __func__, ep,
- ep->hwtid);
- t3_l2t_send_event(ep->com.tdev, ep->l2t);
- return CPL_RET_BUF_DONE;
- }
-
- /*
- * We get 2 peer aborts from the HW. The first one must
- * be ignored except for scribbling that we need one more.
- */
- if (!test_and_set_bit(PEER_ABORT_IN_PROGRESS, &ep->com.flags)) {
- return CPL_RET_BUF_DONE;
- }
-
- spin_lock_irqsave(&ep->com.lock, flags);
- pr_debug("%s ep %p state %u\n", __func__, ep, ep->com.state);
- switch (ep->com.state) {
- case CONNECTING:
- break;
- case MPA_REQ_WAIT:
- stop_ep_timer(ep);
- break;
- case MPA_REQ_SENT:
- stop_ep_timer(ep);
- connect_reply_upcall(ep, -ECONNRESET);
- break;
- case MPA_REP_SENT:
- ep->com.rpl_done = 1;
- ep->com.rpl_err = -ECONNRESET;
- pr_debug("waking up ep %p\n", ep);
- wake_up(&ep->com.waitq);
- break;
- case MPA_REQ_RCVD:
-
- /*
- * We're gonna mark this puppy DEAD, but keep
- * the reference on it until the ULP accepts or
- * rejects the CR. Also wake up anyone waiting
- * in rdma connection migration (see iwch_accept_cr()).
- */
- ep->com.rpl_done = 1;
- ep->com.rpl_err = -ECONNRESET;
- pr_debug("waking up ep %p\n", ep);
- wake_up(&ep->com.waitq);
- break;
- case MORIBUND:
- case CLOSING:
- stop_ep_timer(ep);
- /*FALLTHROUGH*/
- case FPDU_MODE:
- if (ep->com.cm_id && ep->com.qp) {
- attrs.next_state = IWCH_QP_STATE_ERROR;
- ret = iwch_modify_qp(ep->com.qp->rhp,
- ep->com.qp, IWCH_QP_ATTR_NEXT_STATE,
- &attrs, 1);
- if (ret)
- pr_err("%s - qp <- error failed!\n", __func__);
- }
- peer_abort_upcall(ep);
- break;
- case ABORTING:
- break;
- case DEAD:
- pr_debug("%s PEER_ABORT IN DEAD STATE!!!!\n", __func__);
- spin_unlock_irqrestore(&ep->com.lock, flags);
- return CPL_RET_BUF_DONE;
- default:
- BUG_ON(1);
- break;
- }
- dst_confirm(ep->dst);
- if (ep->com.state != ABORTING) {
- __state_set(&ep->com, DEAD);
- release = 1;
- }
- spin_unlock_irqrestore(&ep->com.lock, flags);
-
- rpl_skb = get_skb(skb, sizeof(*rpl), GFP_KERNEL);
- if (!rpl_skb) {
- pr_err("%s - cannot allocate skb!\n", __func__);
- release = 1;
- goto out;
- }
- rpl_skb->priority = CPL_PRIORITY_DATA;
- rpl = skb_put(rpl_skb, sizeof(*rpl));
- rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL));
- rpl->wr.wr_lo = htonl(V_WR_TID(ep->hwtid));
- OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, ep->hwtid));
- rpl->cmd = CPL_ABORT_NO_RST;
- iwch_cxgb3_ofld_send(ep->com.tdev, rpl_skb);
-out:
- if (release)
- release_ep_resources(ep);
- return CPL_RET_BUF_DONE;
-}
-
-static int close_con_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
-{
- struct iwch_ep *ep = ctx;
- struct iwch_qp_attributes attrs;
- unsigned long flags;
- int release = 0;
-
- pr_debug("%s ep %p\n", __func__, ep);
- BUG_ON(!ep);
-
- /* The cm_id may be null if we failed to connect */
- spin_lock_irqsave(&ep->com.lock, flags);
- switch (ep->com.state) {
- case CLOSING:
- __state_set(&ep->com, MORIBUND);
- break;
- case MORIBUND:
- stop_ep_timer(ep);
- if ((ep->com.cm_id) && (ep->com.qp)) {
- attrs.next_state = IWCH_QP_STATE_IDLE;
- iwch_modify_qp(ep->com.qp->rhp,
- ep->com.qp,
- IWCH_QP_ATTR_NEXT_STATE,
- &attrs, 1);
- }
- close_complete_upcall(ep);
- __state_set(&ep->com, DEAD);
- release = 1;
- break;
- case ABORTING:
- case DEAD:
- break;
- default:
- BUG_ON(1);
- break;
- }
- spin_unlock_irqrestore(&ep->com.lock, flags);
- if (release)
- release_ep_resources(ep);
- return CPL_RET_BUF_DONE;
-}
-
-/*
- * T3A does 3 things when a TERM is received:
- * 1) send up a CPL_RDMA_TERMINATE message with the TERM packet
- * 2) generate an async event on the QP with the TERMINATE opcode
- * 3) post a TERMINATE opcode cqe into the associated CQ.
- *
- * For (1), we save the message in the qp for later consumer consumption.
- * For (2), we move the QP into TERMINATE, post a QP event and disconnect.
- * For (3), we toss the CQE in cxio_poll_cq().
- *
- * terminate() handles case (1)...
- */
-static int terminate(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
-{
- struct iwch_ep *ep = ctx;
-
- if (state_read(&ep->com) != FPDU_MODE)
- return CPL_RET_BUF_DONE;
-
- pr_debug("%s ep %p\n", __func__, ep);
- skb_pull(skb, sizeof(struct cpl_rdma_terminate));
- pr_debug("%s saving %d bytes of term msg\n", __func__, skb->len);
- skb_copy_from_linear_data(skb, ep->com.qp->attr.terminate_buffer,
- skb->len);
- ep->com.qp->attr.terminate_msg_len = skb->len;
- ep->com.qp->attr.is_terminate_local = 0;
- return CPL_RET_BUF_DONE;
-}
-
-static int ec_status(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
-{
- struct cpl_rdma_ec_status *rep = cplhdr(skb);
- struct iwch_ep *ep = ctx;
-
- pr_debug("%s ep %p tid %u status %d\n", __func__, ep, ep->hwtid,
- rep->status);
- if (rep->status) {
- struct iwch_qp_attributes attrs;
-
- pr_err("%s BAD CLOSE - Aborting tid %u\n",
- __func__, ep->hwtid);
- stop_ep_timer(ep);
- attrs.next_state = IWCH_QP_STATE_ERROR;
- iwch_modify_qp(ep->com.qp->rhp,
- ep->com.qp, IWCH_QP_ATTR_NEXT_STATE,
- &attrs, 1);
- abort_connection(ep, NULL, GFP_KERNEL);
- }
- return CPL_RET_BUF_DONE;
-}
-
-static void ep_timeout(struct timer_list *t)
-{
- struct iwch_ep *ep = from_timer(ep, t, timer);
- struct iwch_qp_attributes attrs;
- unsigned long flags;
- int abort = 1;
-
- spin_lock_irqsave(&ep->com.lock, flags);
- pr_debug("%s ep %p tid %u state %d\n", __func__, ep, ep->hwtid,
- ep->com.state);
- switch (ep->com.state) {
- case MPA_REQ_SENT:
- __state_set(&ep->com, ABORTING);
- connect_reply_upcall(ep, -ETIMEDOUT);
- break;
- case MPA_REQ_WAIT:
- __state_set(&ep->com, ABORTING);
- break;
- case CLOSING:
- case MORIBUND:
- if (ep->com.cm_id && ep->com.qp) {
- attrs.next_state = IWCH_QP_STATE_ERROR;
- iwch_modify_qp(ep->com.qp->rhp,
- ep->com.qp, IWCH_QP_ATTR_NEXT_STATE,
- &attrs, 1);
- }
- __state_set(&ep->com, ABORTING);
- break;
- default:
- WARN(1, "%s unexpected state ep %p state %u\n",
- __func__, ep, ep->com.state);
- abort = 0;
- }
- spin_unlock_irqrestore(&ep->com.lock, flags);
- if (abort)
- abort_connection(ep, NULL, GFP_ATOMIC);
- put_ep(&ep->com);
-}
-
-int iwch_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
-{
- struct iwch_ep *ep = to_ep(cm_id);
-
- pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
-
- if (state_read(&ep->com) == DEAD) {
- put_ep(&ep->com);
- return -ECONNRESET;
- }
- BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);
- if (mpa_rev == 0)
- abort_connection(ep, NULL, GFP_KERNEL);
- else {
- send_mpa_reject(ep, pdata, pdata_len);
- iwch_ep_disconnect(ep, 0, GFP_KERNEL);
- }
- put_ep(&ep->com);
- return 0;
-}
-
-int iwch_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
-{
- int err;
- struct iwch_qp_attributes attrs;
- enum iwch_qp_attr_mask mask;
- struct iwch_ep *ep = to_ep(cm_id);
- struct iwch_dev *h = to_iwch_dev(cm_id->device);
- struct iwch_qp *qp = get_qhp(h, conn_param->qpn);
-
- pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
- if (state_read(&ep->com) == DEAD) {
- err = -ECONNRESET;
- goto err;
- }
-
- BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);
- BUG_ON(!qp);
-
- if ((conn_param->ord > qp->rhp->attr.max_rdma_read_qp_depth) ||
- (conn_param->ird > qp->rhp->attr.max_rdma_reads_per_qp)) {
- abort_connection(ep, NULL, GFP_KERNEL);
- err = -EINVAL;
- goto err;
- }
-
- cm_id->add_ref(cm_id);
- ep->com.cm_id = cm_id;
- ep->com.qp = qp;
-
- ep->ird = conn_param->ird;
- ep->ord = conn_param->ord;
-
- if (peer2peer && ep->ird == 0)
- ep->ird = 1;
-
- pr_debug("%s %d ird %d ord %d\n", __func__, __LINE__, ep->ird, ep->ord);
-
- /* bind QP to EP and move to RTS */
- attrs.mpa_attr = ep->mpa_attr;
- attrs.max_ird = ep->ird;
- attrs.max_ord = ep->ord;
- attrs.llp_stream_handle = ep;
- attrs.next_state = IWCH_QP_STATE_RTS;
-
- /* bind QP and TID with INIT_WR */
- mask = IWCH_QP_ATTR_NEXT_STATE |
- IWCH_QP_ATTR_LLP_STREAM_HANDLE |
- IWCH_QP_ATTR_MPA_ATTR |
- IWCH_QP_ATTR_MAX_IRD |
- IWCH_QP_ATTR_MAX_ORD;
-
- err = iwch_modify_qp(ep->com.qp->rhp,
- ep->com.qp, mask, &attrs, 1);
- if (err)
- goto err1;
-
- /* if needed, wait for wr_ack */
- if (iwch_rqes_posted(qp)) {
- wait_event(ep->com.waitq, ep->com.rpl_done);
- err = ep->com.rpl_err;
- if (err)
- goto err1;
- }
-
- err = send_mpa_reply(ep, conn_param->private_data,
- conn_param->private_data_len);
- if (err)
- goto err1;
-
-
- state_set(&ep->com, FPDU_MODE);
- established_upcall(ep);
- put_ep(&ep->com);
- return 0;
-err1:
- ep->com.cm_id = NULL;
- ep->com.qp = NULL;
- cm_id->rem_ref(cm_id);
-err:
- put_ep(&ep->com);
- return err;
-}
-
-static int is_loopback_dst(struct iw_cm_id *cm_id)
-{
- struct net_device *dev;
- struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->m_remote_addr;
-
- dev = ip_dev_find(&init_net, raddr->sin_addr.s_addr);
- if (!dev)
- return 0;
- dev_put(dev);
- return 1;
-}
-
-int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
-{
- struct iwch_dev *h = to_iwch_dev(cm_id->device);
- struct iwch_ep *ep;
- struct rtable *rt;
- int err = 0;
- struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_id->m_local_addr;
- struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->m_remote_addr;
-
- if (cm_id->m_remote_addr.ss_family != PF_INET) {
- err = -ENOSYS;
- goto out;
- }
-
- if (is_loopback_dst(cm_id)) {
- err = -ENOSYS;
- goto out;
- }
-
- ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
- if (!ep) {
- pr_err("%s - cannot alloc ep\n", __func__);
- err = -ENOMEM;
- goto out;
- }
- timer_setup(&ep->timer, ep_timeout, 0);
- ep->plen = conn_param->private_data_len;
- if (ep->plen)
- memcpy(ep->mpa_pkt + sizeof(struct mpa_message),
- conn_param->private_data, ep->plen);
- ep->ird = conn_param->ird;
- ep->ord = conn_param->ord;
-
- if (peer2peer && ep->ord == 0)
- ep->ord = 1;
-
- ep->com.tdev = h->rdev.t3cdev_p;
-
- cm_id->add_ref(cm_id);
- ep->com.cm_id = cm_id;
- ep->com.qp = get_qhp(h, conn_param->qpn);
- BUG_ON(!ep->com.qp);
- pr_debug("%s qpn 0x%x qp %p cm_id %p\n", __func__, conn_param->qpn,
- ep->com.qp, cm_id);
-
- /*
- * Allocate an active TID to initiate a TCP connection.
- */
- ep->atid = cxgb3_alloc_atid(h->rdev.t3cdev_p, &t3c_client, ep);
- if (ep->atid == -1) {
- pr_err("%s - cannot alloc atid\n", __func__);
- err = -ENOMEM;
- goto fail2;
- }
-
- /* find a route */
- rt = find_route(h->rdev.t3cdev_p, laddr->sin_addr.s_addr,
- raddr->sin_addr.s_addr, laddr->sin_port,
- raddr->sin_port, IPTOS_LOWDELAY);
- if (!rt) {
- pr_err("%s - cannot find route\n", __func__);
- err = -EHOSTUNREACH;
- goto fail3;
- }
- ep->dst = &rt->dst;
- ep->l2t = t3_l2t_get(ep->com.tdev, ep->dst, NULL,
- &raddr->sin_addr.s_addr);
- if (!ep->l2t) {
- pr_err("%s - cannot alloc l2e\n", __func__);
- err = -ENOMEM;
- goto fail4;
- }
-
- state_set(&ep->com, CONNECTING);
- ep->tos = IPTOS_LOWDELAY;
- memcpy(&ep->com.local_addr, &cm_id->m_local_addr,
- sizeof(ep->com.local_addr));
- memcpy(&ep->com.remote_addr, &cm_id->m_remote_addr,
- sizeof(ep->com.remote_addr));
-
- /* send connect request to rnic */
- err = send_connect(ep);
- if (!err)
- goto out;
-
- l2t_release(h->rdev.t3cdev_p, ep->l2t);
-fail4:
- dst_release(ep->dst);
-fail3:
- cxgb3_free_atid(ep->com.tdev, ep->atid);
-fail2:
- cm_id->rem_ref(cm_id);
- put_ep(&ep->com);
-out:
- return err;
-}
-
-int iwch_create_listen(struct iw_cm_id *cm_id, int backlog)
-{
- int err = 0;
- struct iwch_dev *h = to_iwch_dev(cm_id->device);
- struct iwch_listen_ep *ep;
-
-
- might_sleep();
-
- if (cm_id->m_local_addr.ss_family != PF_INET) {
- err = -ENOSYS;
- goto fail1;
- }
-
- ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
- if (!ep) {
- pr_err("%s - cannot alloc ep\n", __func__);
- err = -ENOMEM;
- goto fail1;
- }
- pr_debug("%s ep %p\n", __func__, ep);
- ep->com.tdev = h->rdev.t3cdev_p;
- cm_id->add_ref(cm_id);
- ep->com.cm_id = cm_id;
- ep->backlog = backlog;
- memcpy(&ep->com.local_addr, &cm_id->m_local_addr,
- sizeof(ep->com.local_addr));
-
- /*
- * Allocate a server TID.
- */
- ep->stid = cxgb3_alloc_stid(h->rdev.t3cdev_p, &t3c_client, ep);
- if (ep->stid == -1) {
- pr_err("%s - cannot alloc atid\n", __func__);
- err = -ENOMEM;
- goto fail2;
- }
-
- state_set(&ep->com, LISTEN);
- err = listen_start(ep);
- if (err)
- goto fail3;
-
- /* wait for pass_open_rpl */
- wait_event(ep->com.waitq, ep->com.rpl_done);
- err = ep->com.rpl_err;
- if (!err) {
- cm_id->provider_data = ep;
- goto out;
- }
-fail3:
- cxgb3_free_stid(ep->com.tdev, ep->stid);
-fail2:
- cm_id->rem_ref(cm_id);
- put_ep(&ep->com);
-fail1:
-out:
- return err;
-}
-
-int iwch_destroy_listen(struct iw_cm_id *cm_id)
-{
- int err;
- struct iwch_listen_ep *ep = to_listen_ep(cm_id);
-
- pr_debug("%s ep %p\n", __func__, ep);
-
- might_sleep();
- state_set(&ep->com, DEAD);
- ep->com.rpl_done = 0;
- ep->com.rpl_err = 0;
- err = listen_stop(ep);
- if (err)
- goto done;
- wait_event(ep->com.waitq, ep->com.rpl_done);
- cxgb3_free_stid(ep->com.tdev, ep->stid);
-done:
- err = ep->com.rpl_err;
- cm_id->rem_ref(cm_id);
- put_ep(&ep->com);
- return err;
-}
-
-int iwch_ep_disconnect(struct iwch_ep *ep, int abrupt, gfp_t gfp)
-{
- int ret=0;
- unsigned long flags;
- int close = 0;
- int fatal = 0;
- struct t3cdev *tdev;
- struct cxio_rdev *rdev;
-
- spin_lock_irqsave(&ep->com.lock, flags);
-
- pr_debug("%s ep %p state %s, abrupt %d\n", __func__, ep,
- states[ep->com.state], abrupt);
-
- tdev = (struct t3cdev *)ep->com.tdev;
- rdev = (struct cxio_rdev *)tdev->ulp;
- if (cxio_fatal_error(rdev)) {
- fatal = 1;
- close_complete_upcall(ep);
- ep->com.state = DEAD;
- }
- switch (ep->com.state) {
- case MPA_REQ_WAIT:
- case MPA_REQ_SENT:
- case MPA_REQ_RCVD:
- case MPA_REP_SENT:
- case FPDU_MODE:
- close = 1;
- if (abrupt)
- ep->com.state = ABORTING;
- else {
- ep->com.state = CLOSING;
- start_ep_timer(ep);
- }
- set_bit(CLOSE_SENT, &ep->com.flags);
- break;
- case CLOSING:
- if (!test_and_set_bit(CLOSE_SENT, &ep->com.flags)) {
- close = 1;
- if (abrupt) {
- stop_ep_timer(ep);
- ep->com.state = ABORTING;
- } else
- ep->com.state = MORIBUND;
- }
- break;
- case MORIBUND:
- case ABORTING:
- case DEAD:
- pr_debug("%s ignoring disconnect ep %p state %u\n",
- __func__, ep, ep->com.state);
- break;
- default:
- BUG();
- break;
- }
-
- spin_unlock_irqrestore(&ep->com.lock, flags);
- if (close) {
- if (abrupt)
- ret = send_abort(ep, NULL, gfp);
- else
- ret = send_halfclose(ep, gfp);
- if (ret)
- fatal = 1;
- }
- if (fatal)
- release_ep_resources(ep);
- return ret;
-}
-
-int iwch_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new,
- struct l2t_entry *l2t)
-{
- struct iwch_ep *ep = ctx;
-
- if (ep->dst != old)
- return 0;
-
- pr_debug("%s ep %p redirect to dst %p l2t %p\n", __func__, ep, new,
- l2t);
- dst_hold(new);
- l2t_release(ep->com.tdev, ep->l2t);
- ep->l2t = l2t;
- dst_release(old);
- ep->dst = new;
- return 1;
-}
-
-/*
- * All the CM events are handled on a work queue to have a safe context.
- * These are the real handlers that are called from the work queue.
- */
-static const cxgb3_cpl_handler_func work_handlers[NUM_CPL_CMDS] = {
- [CPL_ACT_ESTABLISH] = act_establish,
- [CPL_ACT_OPEN_RPL] = act_open_rpl,
- [CPL_RX_DATA] = rx_data,
- [CPL_TX_DMA_ACK] = tx_ack,
- [CPL_ABORT_RPL_RSS] = abort_rpl,
- [CPL_ABORT_RPL] = abort_rpl,
- [CPL_PASS_OPEN_RPL] = pass_open_rpl,
- [CPL_CLOSE_LISTSRV_RPL] = close_listsrv_rpl,
- [CPL_PASS_ACCEPT_REQ] = pass_accept_req,
- [CPL_PASS_ESTABLISH] = pass_establish,
- [CPL_PEER_CLOSE] = peer_close,
- [CPL_ABORT_REQ_RSS] = peer_abort,
- [CPL_CLOSE_CON_RPL] = close_con_rpl,
- [CPL_RDMA_TERMINATE] = terminate,
- [CPL_RDMA_EC_STATUS] = ec_status,
-};
-
-static void process_work(struct work_struct *work)
-{
- struct sk_buff *skb = NULL;
- void *ep;
- struct t3cdev *tdev;
- int ret;
-
- while ((skb = skb_dequeue(&rxq))) {
- ep = *((void **) (skb->cb));
- tdev = *((struct t3cdev **) (skb->cb + sizeof(void *)));
- ret = work_handlers[G_OPCODE(ntohl((__force __be32)skb->csum))](tdev, skb, ep);
- if (ret & CPL_RET_BUF_DONE)
- kfree_skb(skb);
-
- /*
- * ep was referenced in sched(), and is freed here.
- */
- put_ep((struct iwch_ep_common *)ep);
- }
-}
-
-static DECLARE_WORK(skb_work, process_work);
-
-static int sched(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
-{
- struct iwch_ep_common *epc = ctx;
-
- get_ep(epc);
-
- /*
- * Save ctx and tdev in the skb->cb area.
- */
- *((void **) skb->cb) = ctx;
- *((struct t3cdev **) (skb->cb + sizeof(void *))) = tdev;
-
- /*
- * Queue the skb and schedule the worker thread.
- */
- skb_queue_tail(&rxq, skb);
- queue_work(workq, &skb_work);
- return 0;
-}
-
-static int set_tcb_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
-{
- struct cpl_set_tcb_rpl *rpl = cplhdr(skb);
-
- if (rpl->status != CPL_ERR_NONE) {
- pr_err("Unexpected SET_TCB_RPL status %u for tid %u\n",
- rpl->status, GET_TID(rpl));
- }
- return CPL_RET_BUF_DONE;
-}
-
-/*
- * All upcalls from the T3 Core go to sched() to schedule the
- * processing on a work queue.
- */
-cxgb3_cpl_handler_func t3c_handlers[NUM_CPL_CMDS] = {
- [CPL_ACT_ESTABLISH] = sched,
- [CPL_ACT_OPEN_RPL] = sched,
- [CPL_RX_DATA] = sched,
- [CPL_TX_DMA_ACK] = sched,
- [CPL_ABORT_RPL_RSS] = sched,
- [CPL_ABORT_RPL] = sched,
- [CPL_PASS_OPEN_RPL] = sched,
- [CPL_CLOSE_LISTSRV_RPL] = sched,
- [CPL_PASS_ACCEPT_REQ] = sched,
- [CPL_PASS_ESTABLISH] = sched,
- [CPL_PEER_CLOSE] = sched,
- [CPL_CLOSE_CON_RPL] = sched,
- [CPL_ABORT_REQ_RSS] = sched,
- [CPL_RDMA_TERMINATE] = sched,
- [CPL_RDMA_EC_STATUS] = sched,
- [CPL_SET_TCB_RPL] = set_tcb_rpl,
-};
-
-int __init iwch_cm_init(void)
-{
- skb_queue_head_init(&rxq);
-
- workq = alloc_ordered_workqueue("iw_cxgb3", WQ_MEM_RECLAIM);
- if (!workq)
- return -ENOMEM;
-
- return 0;
-}
-
-void __exit iwch_cm_term(void)
-{
- flush_workqueue(workq);
- destroy_workqueue(workq);
-}
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.h b/drivers/infiniband/hw/cxgb3/iwch_cm.h
deleted file mode 100644
index cc7fe644d260..000000000000
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.h
+++ /dev/null
@@ -1,233 +0,0 @@
-/*
- * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef _IWCH_CM_H_
-#define _IWCH_CM_H_
-
-#include <linux/inet.h>
-#include <linux/wait.h>
-#include <linux/spinlock.h>
-#include <linux/kref.h>
-
-#include <rdma/ib_verbs.h>
-#include <rdma/iw_cm.h>
-
-#include "cxgb3_offload.h"
-#include "iwch_provider.h"
-
-#define MPA_KEY_REQ "MPA ID Req Frame"
-#define MPA_KEY_REP "MPA ID Rep Frame"
-
-#define MPA_MAX_PRIVATE_DATA 256
-#define MPA_REV 0 /* XXX - amso1100 uses rev 0 ! */
-#define MPA_REJECT 0x20
-#define MPA_CRC 0x40
-#define MPA_MARKERS 0x80
-#define MPA_FLAGS_MASK 0xE0
-
-#define put_ep(ep) { \
- pr_debug("put_ep (via %s:%u) ep %p refcnt %d\n", \
- __func__, __LINE__, ep, kref_read(&((ep)->kref))); \
- WARN_ON(kref_read(&((ep)->kref)) < 1); \
- kref_put(&((ep)->kref), __free_ep); \
-}
-
-#define get_ep(ep) { \
- pr_debug("get_ep (via %s:%u) ep %p, refcnt %d\n", \
- __func__, __LINE__, ep, kref_read(&((ep)->kref))); \
- kref_get(&((ep)->kref)); \
-}
-
-struct mpa_message {
- u8 key[16];
- u8 flags;
- u8 revision;
- __be16 private_data_size;
- u8 private_data[0];
-};
-
-struct terminate_message {
- u8 layer_etype;
- u8 ecode;
- __be16 hdrct_rsvd;
- u8 len_hdrs[0];
-};
-
-#define TERM_MAX_LENGTH (sizeof(struct terminate_message) + 2 + 18 + 28)
-
-enum iwch_layers_types {
- LAYER_RDMAP = 0x00,
- LAYER_DDP = 0x10,
- LAYER_MPA = 0x20,
- RDMAP_LOCAL_CATA = 0x00,
- RDMAP_REMOTE_PROT = 0x01,
- RDMAP_REMOTE_OP = 0x02,
- DDP_LOCAL_CATA = 0x00,
- DDP_TAGGED_ERR = 0x01,
- DDP_UNTAGGED_ERR = 0x02,
- DDP_LLP = 0x03
-};
-
-enum iwch_rdma_ecodes {
- RDMAP_INV_STAG = 0x00,
- RDMAP_BASE_BOUNDS = 0x01,
- RDMAP_ACC_VIOL = 0x02,
- RDMAP_STAG_NOT_ASSOC = 0x03,
- RDMAP_TO_WRAP = 0x04,
- RDMAP_INV_VERS = 0x05,
- RDMAP_INV_OPCODE = 0x06,
- RDMAP_STREAM_CATA = 0x07,
- RDMAP_GLOBAL_CATA = 0x08,
- RDMAP_CANT_INV_STAG = 0x09,
- RDMAP_UNSPECIFIED = 0xff
-};
-
-enum iwch_ddp_ecodes {
- DDPT_INV_STAG = 0x00,
- DDPT_BASE_BOUNDS = 0x01,
- DDPT_STAG_NOT_ASSOC = 0x02,
- DDPT_TO_WRAP = 0x03,
- DDPT_INV_VERS = 0x04,
- DDPU_INV_QN = 0x01,
- DDPU_INV_MSN_NOBUF = 0x02,
- DDPU_INV_MSN_RANGE = 0x03,
- DDPU_INV_MO = 0x04,
- DDPU_MSG_TOOBIG = 0x05,
- DDPU_INV_VERS = 0x06
-};
-
-enum iwch_mpa_ecodes {
- MPA_CRC_ERR = 0x02,
- MPA_MARKER_ERR = 0x03
-};
-
-enum iwch_ep_state {
- IDLE = 0,
- LISTEN,
- CONNECTING,
- MPA_REQ_WAIT,
- MPA_REQ_SENT,
- MPA_REQ_RCVD,
- MPA_REP_SENT,
- FPDU_MODE,
- ABORTING,
- CLOSING,
- MORIBUND,
- DEAD,
-};
-
-enum iwch_ep_flags {
- PEER_ABORT_IN_PROGRESS = 0,
- ABORT_REQ_IN_PROGRESS = 1,
- RELEASE_RESOURCES = 2,
- CLOSE_SENT = 3,
-};
-
-struct iwch_ep_common {
- struct iw_cm_id *cm_id;
- struct iwch_qp *qp;
- struct t3cdev *tdev;
- enum iwch_ep_state state;
- struct kref kref;
- spinlock_t lock;
- struct sockaddr_in local_addr;
- struct sockaddr_in remote_addr;
- wait_queue_head_t waitq;
- int rpl_done;
- int rpl_err;
- unsigned long flags;
-};
-
-struct iwch_listen_ep {
- struct iwch_ep_common com;
- unsigned int stid;
- int backlog;
-};
-
-struct iwch_ep {
- struct iwch_ep_common com;
- struct iwch_ep *parent_ep;
- struct timer_list timer;
- unsigned int atid;
- u32 hwtid;
- u32 snd_seq;
- u32 rcv_seq;
- struct l2t_entry *l2t;
- struct dst_entry *dst;
- struct sk_buff *mpa_skb;
- struct iwch_mpa_attributes mpa_attr;
- unsigned int mpa_pkt_len;
- u8 mpa_pkt[sizeof(struct mpa_message) + MPA_MAX_PRIVATE_DATA];
- u8 tos;
- u16 emss;
- u16 plen;
- u32 ird;
- u32 ord;
-};
-
-static inline struct iwch_ep *to_ep(struct iw_cm_id *cm_id)
-{
- return cm_id->provider_data;
-}
-
-static inline struct iwch_listen_ep *to_listen_ep(struct iw_cm_id *cm_id)
-{
- return cm_id->provider_data;
-}
-
-static inline int compute_wscale(int win)
-{
- int wscale = 0;
-
- while (wscale < 14 && (65535<<wscale) < win)
- wscale++;
- return wscale;
-}
-
-/* CM prototypes */
-
-int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
-int iwch_create_listen(struct iw_cm_id *cm_id, int backlog);
-int iwch_destroy_listen(struct iw_cm_id *cm_id);
-int iwch_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len);
-int iwch_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
-int iwch_ep_disconnect(struct iwch_ep *ep, int abrupt, gfp_t gfp);
-int iwch_quiesce_tid(struct iwch_ep *ep);
-int iwch_resume_tid(struct iwch_ep *ep);
-void __free_ep(struct kref *kref);
-void iwch_rearp(struct iwch_ep *ep);
-int iwch_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new, struct l2t_entry *l2t);
-
-int __init iwch_cm_init(void);
-void __exit iwch_cm_term(void);
-extern int peer2peer;
-
-#endif /* _IWCH_CM_H_ */
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cq.c b/drivers/infiniband/hw/cxgb3/iwch_cq.c
deleted file mode 100644
index a098c0140580..000000000000
--- a/drivers/infiniband/hw/cxgb3/iwch_cq.c
+++ /dev/null
@@ -1,230 +0,0 @@
-/*
- * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "iwch_provider.h"
-#include "iwch.h"
-
-static int __iwch_poll_cq_one(struct iwch_dev *rhp, struct iwch_cq *chp,
- struct iwch_qp *qhp, struct ib_wc *wc)
-{
- struct t3_wq *wq = qhp ? &qhp->wq : NULL;
- struct t3_cqe cqe;
- u32 credit = 0;
- u8 cqe_flushed;
- u64 cookie;
- int ret = 1;
-
- ret = cxio_poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie,
- &credit);
- if (t3a_device(chp->rhp) && credit) {
- pr_debug("%s updating %d cq credits on id %d\n", __func__,
- credit, chp->cq.cqid);
- cxio_hal_cq_op(&rhp->rdev, &chp->cq, CQ_CREDIT_UPDATE, credit);
- }
-
- if (ret) {
- ret = -EAGAIN;
- goto out;
- }
- ret = 1;
-
- wc->wr_id = cookie;
- wc->qp = qhp ? &qhp->ibqp : NULL;
- wc->vendor_err = CQE_STATUS(cqe);
- wc->wc_flags = 0;
-
- pr_debug("%s qpid 0x%x type %d opcode %d status 0x%x wrid hi 0x%x lo 0x%x cookie 0x%llx\n",
- __func__,
- CQE_QPID(cqe), CQE_TYPE(cqe),
- CQE_OPCODE(cqe), CQE_STATUS(cqe), CQE_WRID_HI(cqe),
- CQE_WRID_LOW(cqe), (unsigned long long)cookie);
-
- if (CQE_TYPE(cqe) == 0) {
- if (!CQE_STATUS(cqe))
- wc->byte_len = CQE_LEN(cqe);
- else
- wc->byte_len = 0;
- wc->opcode = IB_WC_RECV;
- if (CQE_OPCODE(cqe) == T3_SEND_WITH_INV ||
- CQE_OPCODE(cqe) == T3_SEND_WITH_SE_INV) {
- wc->ex.invalidate_rkey = CQE_WRID_STAG(cqe);
- wc->wc_flags |= IB_WC_WITH_INVALIDATE;
- }
- } else {
- switch (CQE_OPCODE(cqe)) {
- case T3_RDMA_WRITE:
- wc->opcode = IB_WC_RDMA_WRITE;
- break;
- case T3_READ_REQ:
- wc->opcode = IB_WC_RDMA_READ;
- wc->byte_len = CQE_LEN(cqe);
- break;
- case T3_SEND:
- case T3_SEND_WITH_SE:
- case T3_SEND_WITH_INV:
- case T3_SEND_WITH_SE_INV:
- wc->opcode = IB_WC_SEND;
- break;
- case T3_LOCAL_INV:
- wc->opcode = IB_WC_LOCAL_INV;
- break;
- case T3_FAST_REGISTER:
- wc->opcode = IB_WC_REG_MR;
- break;
- default:
- pr_err("Unexpected opcode %d in the CQE received for QPID=0x%0x\n",
- CQE_OPCODE(cqe), CQE_QPID(cqe));
- ret = -EINVAL;
- goto out;
- }
- }
-
- if (cqe_flushed)
- wc->status = IB_WC_WR_FLUSH_ERR;
- else {
-
- switch (CQE_STATUS(cqe)) {
- case TPT_ERR_SUCCESS:
- wc->status = IB_WC_SUCCESS;
- break;
- case TPT_ERR_STAG:
- wc->status = IB_WC_LOC_ACCESS_ERR;
- break;
- case TPT_ERR_PDID:
- wc->status = IB_WC_LOC_PROT_ERR;
- break;
- case TPT_ERR_QPID:
- case TPT_ERR_ACCESS:
- wc->status = IB_WC_LOC_ACCESS_ERR;
- break;
- case TPT_ERR_WRAP:
- wc->status = IB_WC_GENERAL_ERR;
- break;
- case TPT_ERR_BOUND:
- wc->status = IB_WC_LOC_LEN_ERR;
- break;
- case TPT_ERR_INVALIDATE_SHARED_MR:
- case TPT_ERR_INVALIDATE_MR_WITH_MW_BOUND:
- wc->status = IB_WC_MW_BIND_ERR;
- break;
- case TPT_ERR_CRC:
- case TPT_ERR_MARKER:
- case TPT_ERR_PDU_LEN_ERR:
- case TPT_ERR_OUT_OF_RQE:
- case TPT_ERR_DDP_VERSION:
- case TPT_ERR_RDMA_VERSION:
- case TPT_ERR_DDP_QUEUE_NUM:
- case TPT_ERR_MSN:
- case TPT_ERR_TBIT:
- case TPT_ERR_MO:
- case TPT_ERR_MSN_RANGE:
- case TPT_ERR_IRD_OVERFLOW:
- case TPT_ERR_OPCODE:
- wc->status = IB_WC_FATAL_ERR;
- break;
- case TPT_ERR_SWFLUSH:
- wc->status = IB_WC_WR_FLUSH_ERR;
- break;
- default:
- pr_err("Unexpected cqe_status 0x%x for QPID=0x%0x\n",
- CQE_STATUS(cqe), CQE_QPID(cqe));
- ret = -EINVAL;
- }
- }
-out:
- return ret;
-}
-
-/*
- * Get one cq entry from cxio and map it to openib.
- *
- * Returns:
- * 0 EMPTY;
- * 1 cqe returned
- * -EAGAIN caller must try again
- * any other -errno fatal error
- */
-static int iwch_poll_cq_one(struct iwch_dev *rhp, struct iwch_cq *chp,
- struct ib_wc *wc)
-{
- struct iwch_qp *qhp;
- struct t3_cqe *rd_cqe;
- int ret;
-
- rd_cqe = cxio_next_cqe(&chp->cq);
-
- if (!rd_cqe)
- return 0;
-
- qhp = get_qhp(rhp, CQE_QPID(*rd_cqe));
- if (qhp) {
- spin_lock(&qhp->lock);
- ret = __iwch_poll_cq_one(rhp, chp, qhp, wc);
- spin_unlock(&qhp->lock);
- } else {
- ret = __iwch_poll_cq_one(rhp, chp, NULL, wc);
- }
- return ret;
-}
-
-int iwch_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
-{
- struct iwch_dev *rhp;
- struct iwch_cq *chp;
- unsigned long flags;
- int npolled;
- int err = 0;
-
- chp = to_iwch_cq(ibcq);
- rhp = chp->rhp;
-
- spin_lock_irqsave(&chp->lock, flags);
- for (npolled = 0; npolled < num_entries; ++npolled) {
-
- /*
- * Because T3 can post CQEs that are _not_ associated
- * with a WR, we might have to poll again after removing
- * one of these.
- */
- do {
- err = iwch_poll_cq_one(rhp, chp, wc + npolled);
- } while (err == -EAGAIN);
- if (err <= 0)
- break;
- }
- spin_unlock_irqrestore(&chp->lock, flags);
-
- if (err < 0)
- return err;
- else {
- return npolled;
- }
-}
diff --git a/drivers/infiniband/hw/cxgb3/iwch_ev.c b/drivers/infiniband/hw/cxgb3/iwch_ev.c
deleted file mode 100644
index 9d356c1301c7..000000000000
--- a/drivers/infiniband/hw/cxgb3/iwch_ev.c
+++ /dev/null
@@ -1,232 +0,0 @@
-/*
- * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include <linux/gfp.h>
-#include <linux/mman.h>
-#include <net/sock.h>
-#include "iwch_provider.h"
-#include "iwch.h"
-#include "iwch_cm.h"
-#include "cxio_hal.h"
-#include "cxio_wr.h"
-
-static void post_qp_event(struct iwch_dev *rnicp, struct iwch_cq *chp,
- struct respQ_msg_t *rsp_msg,
- enum ib_event_type ib_event,
- int send_term)
-{
- struct ib_event event;
- struct iwch_qp_attributes attrs;
- struct iwch_qp *qhp;
- unsigned long flag;
-
- xa_lock(&rnicp->qps);
- qhp = xa_load(&rnicp->qps, CQE_QPID(rsp_msg->cqe));
-
- if (!qhp) {
- pr_err("%s unaffiliated error 0x%x qpid 0x%x\n",
- __func__, CQE_STATUS(rsp_msg->cqe),
- CQE_QPID(rsp_msg->cqe));
- xa_unlock(&rnicp->qps);
- return;
- }
-
- if ((qhp->attr.state == IWCH_QP_STATE_ERROR) ||
- (qhp->attr.state == IWCH_QP_STATE_TERMINATE)) {
- pr_debug("%s AE received after RTS - qp state %d qpid 0x%x status 0x%x\n",
- __func__,
- qhp->attr.state, qhp->wq.qpid,
- CQE_STATUS(rsp_msg->cqe));
- xa_unlock(&rnicp->qps);
- return;
- }
-
- pr_err("%s - AE qpid 0x%x opcode %d status 0x%x type %d wrid.hi 0x%x wrid.lo 0x%x\n",
- __func__,
- CQE_QPID(rsp_msg->cqe), CQE_OPCODE(rsp_msg->cqe),
- CQE_STATUS(rsp_msg->cqe), CQE_TYPE(rsp_msg->cqe),
- CQE_WRID_HI(rsp_msg->cqe), CQE_WRID_LOW(rsp_msg->cqe));
-
- atomic_inc(&qhp->refcnt);
- xa_unlock(&rnicp->qps);
-
- if (qhp->attr.state == IWCH_QP_STATE_RTS) {
- attrs.next_state = IWCH_QP_STATE_TERMINATE;
- iwch_modify_qp(qhp->rhp, qhp, IWCH_QP_ATTR_NEXT_STATE,
- &attrs, 1);
- if (send_term)
- iwch_post_terminate(qhp, rsp_msg);
- }
-
- event.event = ib_event;
- event.device = chp->ibcq.device;
- if (ib_event == IB_EVENT_CQ_ERR)
- event.element.cq = &chp->ibcq;
- else
- event.element.qp = &qhp->ibqp;
-
- if (qhp->ibqp.event_handler)
- (*qhp->ibqp.event_handler)(&event, qhp->ibqp.qp_context);
-
- spin_lock_irqsave(&chp->comp_handler_lock, flag);
- (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
- spin_unlock_irqrestore(&chp->comp_handler_lock, flag);
-
- if (atomic_dec_and_test(&qhp->refcnt))
- wake_up(&qhp->wait);
-}
-
-void iwch_ev_dispatch(struct cxio_rdev *rdev_p, struct sk_buff *skb)
-{
- struct iwch_dev *rnicp;
- struct respQ_msg_t *rsp_msg = (struct respQ_msg_t *) skb->data;
- struct iwch_cq *chp;
- struct iwch_qp *qhp;
- u32 cqid = RSPQ_CQID(rsp_msg);
- unsigned long flag;
-
- rnicp = (struct iwch_dev *) rdev_p->ulp;
- xa_lock(&rnicp->qps);
- chp = get_chp(rnicp, cqid);
- qhp = xa_load(&rnicp->qps, CQE_QPID(rsp_msg->cqe));
- if (!chp || !qhp) {
- pr_err("BAD AE cqid 0x%x qpid 0x%x opcode %d status 0x%x type %d wrid.hi 0x%x wrid.lo 0x%x\n",
- cqid, CQE_QPID(rsp_msg->cqe),
- CQE_OPCODE(rsp_msg->cqe), CQE_STATUS(rsp_msg->cqe),
- CQE_TYPE(rsp_msg->cqe), CQE_WRID_HI(rsp_msg->cqe),
- CQE_WRID_LOW(rsp_msg->cqe));
- xa_unlock(&rnicp->qps);
- goto out;
- }
- iwch_qp_add_ref(&qhp->ibqp);
- atomic_inc(&chp->refcnt);
- xa_unlock(&rnicp->qps);
-
- /*
- * 1) completion of our sending a TERMINATE.
- * 2) incoming TERMINATE message.
- */
- if ((CQE_OPCODE(rsp_msg->cqe) == T3_TERMINATE) &&
- (CQE_STATUS(rsp_msg->cqe) == 0)) {
- if (SQ_TYPE(rsp_msg->cqe)) {
- pr_debug("%s QPID 0x%x ep %p disconnecting\n",
- __func__, qhp->wq.qpid, qhp->ep);
- iwch_ep_disconnect(qhp->ep, 0, GFP_ATOMIC);
- } else {
- pr_debug("%s post REQ_ERR AE QPID 0x%x\n", __func__,
- qhp->wq.qpid);
- post_qp_event(rnicp, chp, rsp_msg,
- IB_EVENT_QP_REQ_ERR, 0);
- iwch_ep_disconnect(qhp->ep, 0, GFP_ATOMIC);
- }
- goto done;
- }
-
- /* Bad incoming Read request */
- if (SQ_TYPE(rsp_msg->cqe) &&
- (CQE_OPCODE(rsp_msg->cqe) == T3_READ_RESP)) {
- post_qp_event(rnicp, chp, rsp_msg, IB_EVENT_QP_REQ_ERR, 1);
- goto done;
- }
-
- /* Bad incoming write */
- if (RQ_TYPE(rsp_msg->cqe) &&
- (CQE_OPCODE(rsp_msg->cqe) == T3_RDMA_WRITE)) {
- post_qp_event(rnicp, chp, rsp_msg, IB_EVENT_QP_REQ_ERR, 1);
- goto done;
- }
-
- switch (CQE_STATUS(rsp_msg->cqe)) {
-
- /* Completion Events */
- case TPT_ERR_SUCCESS:
-
- /*
- * Confirm the destination entry if this is a RECV completion.
- */
- if (qhp->ep && SQ_TYPE(rsp_msg->cqe))
- dst_confirm(qhp->ep->dst);
- spin_lock_irqsave(&chp->comp_handler_lock, flag);
- (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
- spin_unlock_irqrestore(&chp->comp_handler_lock, flag);
- break;
-
- case TPT_ERR_STAG:
- case TPT_ERR_PDID:
- case TPT_ERR_QPID:
- case TPT_ERR_ACCESS:
- case TPT_ERR_WRAP:
- case TPT_ERR_BOUND:
- case TPT_ERR_INVALIDATE_SHARED_MR:
- case TPT_ERR_INVALIDATE_MR_WITH_MW_BOUND:
- post_qp_event(rnicp, chp, rsp_msg, IB_EVENT_QP_ACCESS_ERR, 1);
- break;
-
- /* Device Fatal Errors */
- case TPT_ERR_ECC:
- case TPT_ERR_ECC_PSTAG:
- case TPT_ERR_INTERNAL_ERR:
- post_qp_event(rnicp, chp, rsp_msg, IB_EVENT_DEVICE_FATAL, 1);
- break;
-
- /* QP Fatal Errors */
- case TPT_ERR_OUT_OF_RQE:
- case TPT_ERR_PBL_ADDR_BOUND:
- case TPT_ERR_CRC:
- case TPT_ERR_MARKER:
- case TPT_ERR_PDU_LEN_ERR:
- case TPT_ERR_DDP_VERSION:
- case TPT_ERR_RDMA_VERSION:
- case TPT_ERR_OPCODE:
- case TPT_ERR_DDP_QUEUE_NUM:
- case TPT_ERR_MSN:
- case TPT_ERR_TBIT:
- case TPT_ERR_MO:
- case TPT_ERR_MSN_GAP:
- case TPT_ERR_MSN_RANGE:
- case TPT_ERR_RQE_ADDR_BOUND:
- case TPT_ERR_IRD_OVERFLOW:
- post_qp_event(rnicp, chp, rsp_msg, IB_EVENT_QP_FATAL, 1);
- break;
-
- default:
- pr_err("Unknown T3 status 0x%x QPID 0x%x\n",
- CQE_STATUS(rsp_msg->cqe), qhp->wq.qpid);
- post_qp_event(rnicp, chp, rsp_msg, IB_EVENT_QP_FATAL, 1);
- break;
- }
-done:
- if (atomic_dec_and_test(&chp->refcnt))
- wake_up(&chp->wait);
- iwch_qp_rem_ref(&qhp->ibqp);
-out:
- dev_kfree_skb_irq(skb);
-}
diff --git a/drivers/infiniband/hw/cxgb3/iwch_mem.c b/drivers/infiniband/hw/cxgb3/iwch_mem.c
deleted file mode 100644
index ce0f2741821d..000000000000
--- a/drivers/infiniband/hw/cxgb3/iwch_mem.c
+++ /dev/null
@@ -1,101 +0,0 @@
-/*
- * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include <linux/slab.h>
-#include <asm/byteorder.h>
-
-#include <rdma/iw_cm.h>
-#include <rdma/ib_verbs.h>
-
-#include "cxio_hal.h"
-#include "cxio_resource.h"
-#include "iwch.h"
-#include "iwch_provider.h"
-
-static int iwch_finish_mem_reg(struct iwch_mr *mhp, u32 stag)
-{
- u32 mmid;
-
- mhp->attr.state = 1;
- mhp->attr.stag = stag;
- mmid = stag >> 8;
- mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
- pr_debug("%s mmid 0x%x mhp %p\n", __func__, mmid, mhp);
- return xa_insert_irq(&mhp->rhp->mrs, mmid, mhp, GFP_KERNEL);
-}
-
-int iwch_register_mem(struct iwch_dev *rhp, struct iwch_pd *php,
- struct iwch_mr *mhp, int shift)
-{
- u32 stag;
- int ret;
-
- if (cxio_register_phys_mem(&rhp->rdev,
- &stag, mhp->attr.pdid,
- mhp->attr.perms,
- mhp->attr.zbva,
- mhp->attr.va_fbo,
- mhp->attr.len,
- shift - 12,
- mhp->attr.pbl_size, mhp->attr.pbl_addr))
- return -ENOMEM;
-
- ret = iwch_finish_mem_reg(mhp, stag);
- if (ret)
- cxio_dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
- mhp->attr.pbl_addr);
- return ret;
-}
-
-int iwch_alloc_pbl(struct iwch_mr *mhp, int npages)
-{
- mhp->attr.pbl_addr = cxio_hal_pblpool_alloc(&mhp->rhp->rdev,
- npages << 3);
-
- if (!mhp->attr.pbl_addr)
- return -ENOMEM;
-
- mhp->attr.pbl_size = npages;
-
- return 0;
-}
-
-void iwch_free_pbl(struct iwch_mr *mhp)
-{
- cxio_hal_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
- mhp->attr.pbl_size << 3);
-}
-
-int iwch_write_pbl(struct iwch_mr *mhp, __be64 *pages, int npages, int offset)
-{
- return cxio_write_pbl(&mhp->rhp->rdev, pages,
- mhp->attr.pbl_addr + (offset << 3), npages);
-}
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
deleted file mode 100644
index dcf02ec02810..000000000000
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ /dev/null
@@ -1,1321 +0,0 @@
-/*
- * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/device.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/delay.h>
-#include <linux/errno.h>
-#include <linux/list.h>
-#include <linux/sched/mm.h>
-#include <linux/spinlock.h>
-#include <linux/ethtool.h>
-#include <linux/rtnetlink.h>
-#include <linux/inetdevice.h>
-#include <linux/slab.h>
-
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <asm/byteorder.h>
-
-#include <rdma/iw_cm.h>
-#include <rdma/ib_verbs.h>
-#include <rdma/ib_smi.h>
-#include <rdma/ib_umem.h>
-#include <rdma/ib_user_verbs.h>
-#include <rdma/uverbs_ioctl.h>
-
-#include "cxio_hal.h"
-#include "iwch.h"
-#include "iwch_provider.h"
-#include "iwch_cm.h"
-#include <rdma/cxgb3-abi.h>
-#include "common.h"
-
-static void iwch_dealloc_ucontext(struct ib_ucontext *context)
-{
- struct iwch_dev *rhp = to_iwch_dev(context->device);
- struct iwch_ucontext *ucontext = to_iwch_ucontext(context);
- struct iwch_mm_entry *mm, *tmp;
-
- pr_debug("%s context %p\n", __func__, context);
- list_for_each_entry_safe(mm, tmp, &ucontext->mmaps, entry)
- kfree(mm);
- cxio_release_ucontext(&rhp->rdev, &ucontext->uctx);
-}
-
-static int iwch_alloc_ucontext(struct ib_ucontext *ucontext,
- struct ib_udata *udata)
-{
- struct ib_device *ibdev = ucontext->device;
- struct iwch_ucontext *context = to_iwch_ucontext(ucontext);
- struct iwch_dev *rhp = to_iwch_dev(ibdev);
-
- pr_debug("%s ibdev %p\n", __func__, ibdev);
- cxio_init_ucontext(&rhp->rdev, &context->uctx);
- INIT_LIST_HEAD(&context->mmaps);
- spin_lock_init(&context->mmap_lock);
- return 0;
-}
-
-static void iwch_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
-{
- struct iwch_cq *chp;
-
- pr_debug("%s ib_cq %p\n", __func__, ib_cq);
- chp = to_iwch_cq(ib_cq);
-
- xa_erase_irq(&chp->rhp->cqs, chp->cq.cqid);
- atomic_dec(&chp->refcnt);
- wait_event(chp->wait, !atomic_read(&chp->refcnt));
-
- cxio_destroy_cq(&chp->rhp->rdev, &chp->cq);
-}
-
-static int iwch_create_cq(struct ib_cq *ibcq,
- const struct ib_cq_init_attr *attr,
- struct ib_udata *udata)
-{
- struct ib_device *ibdev = ibcq->device;
- int entries = attr->cqe;
- struct iwch_dev *rhp = to_iwch_dev(ibcq->device);
- struct iwch_cq *chp = to_iwch_cq(ibcq);
- struct iwch_create_cq_resp uresp;
- struct iwch_create_cq_req ureq;
- static int warned;
- size_t resplen;
-
- pr_debug("%s ib_dev %p entries %d\n", __func__, ibdev, entries);
- if (attr->flags)
- return -EINVAL;
-
- if (udata) {
- if (!t3a_device(rhp)) {
- if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
- return -EFAULT;
-
- chp->user_rptr_addr = (u32 __user *)(unsigned long)ureq.user_rptr_addr;
- }
- }
-
- if (t3a_device(rhp)) {
-
- /*
- * T3A: Add some fluff to handle extra CQEs inserted
- * for various errors.
- * Additional CQE possibilities:
- * TERMINATE,
- * incoming RDMA WRITE Failures
- * incoming RDMA READ REQUEST FAILUREs
- * NOTE: We cannot ensure the CQ won't overflow.
- */
- entries += 16;
- }
- entries = roundup_pow_of_two(entries);
- chp->cq.size_log2 = ilog2(entries);
-
- if (cxio_create_cq(&rhp->rdev, &chp->cq, !udata))
- return -ENOMEM;
-
- chp->rhp = rhp;
- chp->ibcq.cqe = 1 << chp->cq.size_log2;
- spin_lock_init(&chp->lock);
- spin_lock_init(&chp->comp_handler_lock);
- atomic_set(&chp->refcnt, 1);
- init_waitqueue_head(&chp->wait);
- if (xa_store_irq(&rhp->cqs, chp->cq.cqid, chp, GFP_KERNEL)) {
- cxio_destroy_cq(&chp->rhp->rdev, &chp->cq);
- return -ENOMEM;
- }
-
- if (udata) {
- struct iwch_mm_entry *mm;
- struct iwch_ucontext *ucontext = rdma_udata_to_drv_context(
- udata, struct iwch_ucontext, ibucontext);
-
- mm = kmalloc(sizeof(*mm), GFP_KERNEL);
- if (!mm) {
- iwch_destroy_cq(&chp->ibcq, udata);
- return -ENOMEM;
- }
- uresp.cqid = chp->cq.cqid;
- uresp.size_log2 = chp->cq.size_log2;
- spin_lock(&ucontext->mmap_lock);
- uresp.key = ucontext->key;
- ucontext->key += PAGE_SIZE;
- spin_unlock(&ucontext->mmap_lock);
- mm->key = uresp.key;
- mm->addr = virt_to_phys(chp->cq.queue);
- if (udata->outlen < sizeof(uresp)) {
- if (!warned++)
- pr_warn("Warning - downlevel libcxgb3 (non-fatal)\n");
- mm->len = PAGE_ALIGN((1UL << uresp.size_log2) *
- sizeof(struct t3_cqe));
- resplen = sizeof(struct iwch_create_cq_resp_v0);
- } else {
- mm->len = PAGE_ALIGN(((1UL << uresp.size_log2) + 1) *
- sizeof(struct t3_cqe));
- uresp.memsize = mm->len;
- uresp.reserved = 0;
- resplen = sizeof(uresp);
- }
- if (ib_copy_to_udata(udata, &uresp, resplen)) {
- kfree(mm);
- iwch_destroy_cq(&chp->ibcq, udata);
- return -EFAULT;
- }
- insert_mmap(ucontext, mm);
- }
- pr_debug("created cqid 0x%0x chp %p size 0x%0x, dma_addr %pad\n",
- chp->cq.cqid, chp, (1 << chp->cq.size_log2),
- &chp->cq.dma_addr);
- return 0;
-}
-
-static int iwch_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
-{
- struct iwch_dev *rhp;
- struct iwch_cq *chp;
- enum t3_cq_opcode cq_op;
- int err;
- unsigned long flag;
- u32 rptr;
-
- chp = to_iwch_cq(ibcq);
- rhp = chp->rhp;
- if ((flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED)
- cq_op = CQ_ARM_SE;
- else
- cq_op = CQ_ARM_AN;
- if (chp->user_rptr_addr) {
- if (get_user(rptr, chp->user_rptr_addr))
- return -EFAULT;
- spin_lock_irqsave(&chp->lock, flag);
- chp->cq.rptr = rptr;
- } else
- spin_lock_irqsave(&chp->lock, flag);
- pr_debug("%s rptr 0x%x\n", __func__, chp->cq.rptr);
- err = cxio_hal_cq_op(&rhp->rdev, &chp->cq, cq_op, 0);
- spin_unlock_irqrestore(&chp->lock, flag);
- if (err < 0)
- pr_err("Error %d rearming CQID 0x%x\n", err, chp->cq.cqid);
- if (err > 0 && !(flags & IB_CQ_REPORT_MISSED_EVENTS))
- err = 0;
- return err;
-}
-
-static int iwch_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
-{
- int len = vma->vm_end - vma->vm_start;
- u32 key = vma->vm_pgoff << PAGE_SHIFT;
- struct cxio_rdev *rdev_p;
- int ret = 0;
- struct iwch_mm_entry *mm;
- struct iwch_ucontext *ucontext;
- u64 addr;
-
- pr_debug("%s pgoff 0x%lx key 0x%x len %d\n", __func__, vma->vm_pgoff,
- key, len);
-
- if (vma->vm_start & (PAGE_SIZE-1)) {
- return -EINVAL;
- }
-
- rdev_p = &(to_iwch_dev(context->device)->rdev);
- ucontext = to_iwch_ucontext(context);
-
- mm = remove_mmap(ucontext, key, len);
- if (!mm)
- return -EINVAL;
- addr = mm->addr;
- kfree(mm);
-
- if ((addr >= rdev_p->rnic_info.udbell_physbase) &&
- (addr < (rdev_p->rnic_info.udbell_physbase +
- rdev_p->rnic_info.udbell_len))) {
-
- /*
- * Map T3 DB register.
- */
- if (vma->vm_flags & VM_READ) {
- return -EPERM;
- }
-
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
- vma->vm_flags &= ~VM_MAYREAD;
- ret = io_remap_pfn_range(vma, vma->vm_start,
- addr >> PAGE_SHIFT,
- len, vma->vm_page_prot);
- } else {
-
- /*
- * Map WQ or CQ contig dma memory...
- */
- ret = remap_pfn_range(vma, vma->vm_start,
- addr >> PAGE_SHIFT,
- len, vma->vm_page_prot);
- }
-
- return ret;
-}
-
-static void iwch_deallocate_pd(struct ib_pd *pd, struct ib_udata *udata)
-{
- struct iwch_dev *rhp;
- struct iwch_pd *php;
-
- php = to_iwch_pd(pd);
- rhp = php->rhp;
- pr_debug("%s ibpd %p pdid 0x%x\n", __func__, pd, php->pdid);
- cxio_hal_put_pdid(rhp->rdev.rscp, php->pdid);
-}
-
-static int iwch_allocate_pd(struct ib_pd *pd, struct ib_udata *udata)
-{
- struct iwch_pd *php = to_iwch_pd(pd);
- struct ib_device *ibdev = pd->device;
- u32 pdid;
- struct iwch_dev *rhp;
-
- pr_debug("%s ibdev %p\n", __func__, ibdev);
- rhp = (struct iwch_dev *) ibdev;
- pdid = cxio_hal_get_pdid(rhp->rdev.rscp);
- if (!pdid)
- return -EINVAL;
-
- php->pdid = pdid;
- php->rhp = rhp;
- if (udata) {
- struct iwch_alloc_pd_resp resp = {.pdid = php->pdid};
-
- if (ib_copy_to_udata(udata, &resp, sizeof(resp))) {
- iwch_deallocate_pd(&php->ibpd, udata);
- return -EFAULT;
- }
- }
- pr_debug("%s pdid 0x%0x ptr 0x%p\n", __func__, pdid, php);
- return 0;
-}
-
-static int iwch_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
-{
- struct iwch_dev *rhp;
- struct iwch_mr *mhp;
- u32 mmid;
-
- pr_debug("%s ib_mr %p\n", __func__, ib_mr);
-
- mhp = to_iwch_mr(ib_mr);
- kfree(mhp->pages);
- rhp = mhp->rhp;
- mmid = mhp->attr.stag >> 8;
- cxio_dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
- mhp->attr.pbl_addr);
- iwch_free_pbl(mhp);
- xa_erase_irq(&rhp->mrs, mmid);
- if (mhp->kva)
- kfree((void *) (unsigned long) mhp->kva);
- ib_umem_release(mhp->umem);
- pr_debug("%s mmid 0x%x ptr %p\n", __func__, mmid, mhp);
- kfree(mhp);
- return 0;
-}
-
-static struct ib_mr *iwch_get_dma_mr(struct ib_pd *pd, int acc)
-{
- const u64 total_size = 0xffffffff;
- const u64 mask = (total_size + PAGE_SIZE - 1) & PAGE_MASK;
- struct iwch_pd *php = to_iwch_pd(pd);
- struct iwch_dev *rhp = php->rhp;
- struct iwch_mr *mhp;
- __be64 *page_list;
- int shift = 26, npages, ret, i;
-
- pr_debug("%s ib_pd %p\n", __func__, pd);
-
- /*
- * T3 only supports 32 bits of size.
- */
- if (sizeof(phys_addr_t) > 4) {
- pr_warn_once("Cannot support dma_mrs on this platform\n");
- return ERR_PTR(-ENOTSUPP);
- }
-
- mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
- if (!mhp)
- return ERR_PTR(-ENOMEM);
-
- mhp->rhp = rhp;
-
- npages = (total_size + (1ULL << shift) - 1) >> shift;
- if (!npages) {
- ret = -EINVAL;
- goto err;
- }
-
- page_list = kmalloc_array(npages, sizeof(u64), GFP_KERNEL);
- if (!page_list) {
- ret = -ENOMEM;
- goto err;
- }
-
- for (i = 0; i < npages; i++)
- page_list[i] = cpu_to_be64((u64)i << shift);
-
- pr_debug("%s mask 0x%llx shift %d len %lld pbl_size %d\n",
- __func__, mask, shift, total_size, npages);
-
- ret = iwch_alloc_pbl(mhp, npages);
- if (ret) {
- kfree(page_list);
- goto err_pbl;
- }
-
- ret = iwch_write_pbl(mhp, page_list, npages, 0);
- kfree(page_list);
- if (ret)
- goto err_pbl;
-
- mhp->attr.pdid = php->pdid;
- mhp->attr.zbva = 0;
-
- mhp->attr.perms = iwch_ib_to_tpt_access(acc);
- mhp->attr.va_fbo = 0;
- mhp->attr.page_size = shift - 12;
-
- mhp->attr.len = (u32) total_size;
- mhp->attr.pbl_size = npages;
- ret = iwch_register_mem(rhp, php, mhp, shift);
- if (ret)
- goto err_pbl;
-
- return &mhp->ibmr;
-
-err_pbl:
- iwch_free_pbl(mhp);
-
-err:
- kfree(mhp);
- return ERR_PTR(ret);
-}
-
-static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
- u64 virt, int acc, struct ib_udata *udata)
-{
- __be64 *pages;
- int shift, n, i;
- int err = 0;
- struct iwch_dev *rhp;
- struct iwch_pd *php;
- struct iwch_mr *mhp;
- struct iwch_reg_user_mr_resp uresp;
- struct sg_dma_page_iter sg_iter;
- pr_debug("%s ib_pd %p\n", __func__, pd);
-
- php = to_iwch_pd(pd);
- rhp = php->rhp;
- mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
- if (!mhp)
- return ERR_PTR(-ENOMEM);
-
- mhp->rhp = rhp;
-
- mhp->umem = ib_umem_get(udata, start, length, acc, 0);
- if (IS_ERR(mhp->umem)) {
- err = PTR_ERR(mhp->umem);
- kfree(mhp);
- return ERR_PTR(err);
- }
-
- shift = PAGE_SHIFT;
-
- n = ib_umem_num_pages(mhp->umem);
-
- err = iwch_alloc_pbl(mhp, n);
- if (err)
- goto err;
-
- pages = (__be64 *) __get_free_page(GFP_KERNEL);
- if (!pages) {
- err = -ENOMEM;
- goto err_pbl;
- }
-
- i = n = 0;
-
- for_each_sg_dma_page(mhp->umem->sg_head.sgl, &sg_iter, mhp->umem->nmap, 0) {
- pages[i++] = cpu_to_be64(sg_page_iter_dma_address(&sg_iter));
- if (i == PAGE_SIZE / sizeof(*pages)) {
- err = iwch_write_pbl(mhp, pages, i, n);
- if (err)
- goto pbl_done;
- n += i;
- i = 0;
- }
- }
-
- if (i)
- err = iwch_write_pbl(mhp, pages, i, n);
-
-pbl_done:
- free_page((unsigned long) pages);
- if (err)
- goto err_pbl;
-
- mhp->attr.pdid = php->pdid;
- mhp->attr.zbva = 0;
- mhp->attr.perms = iwch_ib_to_tpt_access(acc);
- mhp->attr.va_fbo = virt;
- mhp->attr.page_size = shift - 12;
- mhp->attr.len = (u32) length;
-
- err = iwch_register_mem(rhp, php, mhp, shift);
- if (err)
- goto err_pbl;
-
- if (udata && !t3a_device(rhp)) {
- uresp.pbl_addr = (mhp->attr.pbl_addr -
- rhp->rdev.rnic_info.pbl_base) >> 3;
- pr_debug("%s user resp pbl_addr 0x%x\n", __func__,
- uresp.pbl_addr);
-
- if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) {
- iwch_dereg_mr(&mhp->ibmr, udata);
- err = -EFAULT;
- goto err;
- }
- }
-
- return &mhp->ibmr;
-
-err_pbl:
- iwch_free_pbl(mhp);
-
-err:
- ib_umem_release(mhp->umem);
- kfree(mhp);
- return ERR_PTR(err);
-}
-
-static struct ib_mw *iwch_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
- struct ib_udata *udata)
-{
- struct iwch_dev *rhp;
- struct iwch_pd *php;
- struct iwch_mw *mhp;
- u32 mmid;
- u32 stag = 0;
- int ret;
-
- if (type != IB_MW_TYPE_1)
- return ERR_PTR(-EINVAL);
-
- php = to_iwch_pd(pd);
- rhp = php->rhp;
- mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
- if (!mhp)
- return ERR_PTR(-ENOMEM);
- ret = cxio_allocate_window(&rhp->rdev, &stag, php->pdid);
- if (ret) {
- kfree(mhp);
- return ERR_PTR(ret);
- }
- mhp->rhp = rhp;
- mhp->attr.pdid = php->pdid;
- mhp->attr.type = TPT_MW;
- mhp->attr.stag = stag;
- mmid = (stag) >> 8;
- mhp->ibmw.rkey = stag;
- if (xa_insert_irq(&rhp->mrs, mmid, mhp, GFP_KERNEL)) {
- cxio_deallocate_window(&rhp->rdev, mhp->attr.stag);
- kfree(mhp);
- return ERR_PTR(-ENOMEM);
- }
- pr_debug("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
- return &(mhp->ibmw);
-}
-
-static int iwch_dealloc_mw(struct ib_mw *mw)
-{
- struct iwch_dev *rhp;
- struct iwch_mw *mhp;
- u32 mmid;
-
- mhp = to_iwch_mw(mw);
- rhp = mhp->rhp;
- mmid = (mw->rkey) >> 8;
- cxio_deallocate_window(&rhp->rdev, mhp->attr.stag);
- xa_erase_irq(&rhp->mrs, mmid);
- pr_debug("%s ib_mw %p mmid 0x%x ptr %p\n", __func__, mw, mmid, mhp);
- kfree(mhp);
- return 0;
-}
-
-static struct ib_mr *iwch_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
- u32 max_num_sg, struct ib_udata *udata)
-{
- struct iwch_dev *rhp;
- struct iwch_pd *php;
- struct iwch_mr *mhp;
- u32 mmid;
- u32 stag = 0;
- int ret = -ENOMEM;
-
- if (mr_type != IB_MR_TYPE_MEM_REG ||
- max_num_sg > T3_MAX_FASTREG_DEPTH)
- return ERR_PTR(-EINVAL);
-
- php = to_iwch_pd(pd);
- rhp = php->rhp;
- mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
- if (!mhp)
- goto err;
-
- mhp->pages = kcalloc(max_num_sg, sizeof(u64), GFP_KERNEL);
- if (!mhp->pages)
- goto pl_err;
-
- mhp->rhp = rhp;
- ret = iwch_alloc_pbl(mhp, max_num_sg);
- if (ret)
- goto err1;
- mhp->attr.pbl_size = max_num_sg;
- ret = cxio_allocate_stag(&rhp->rdev, &stag, php->pdid,
- mhp->attr.pbl_size, mhp->attr.pbl_addr);
- if (ret)
- goto err2;
- mhp->attr.pdid = php->pdid;
- mhp->attr.type = TPT_NON_SHARED_MR;
- mhp->attr.stag = stag;
- mhp->attr.state = 1;
- mmid = (stag) >> 8;
- mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
- ret = xa_insert_irq(&rhp->mrs, mmid, mhp, GFP_KERNEL);
- if (ret)
- goto err3;
-
- pr_debug("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
- return &(mhp->ibmr);
-err3:
- cxio_dereg_mem(&rhp->rdev, stag, mhp->attr.pbl_size,
- mhp->attr.pbl_addr);
-err2:
- iwch_free_pbl(mhp);
-err1:
- kfree(mhp->pages);
-pl_err:
- kfree(mhp);
-err:
- return ERR_PTR(ret);
-}
-
-static int iwch_set_page(struct ib_mr *ibmr, u64 addr)
-{
- struct iwch_mr *mhp = to_iwch_mr(ibmr);
-
- if (unlikely(mhp->npages == mhp->attr.pbl_size))
- return -ENOMEM;
-
- mhp->pages[mhp->npages++] = addr;
-
- return 0;
-}
-
-static int iwch_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
- int sg_nents, unsigned int *sg_offset)
-{
- struct iwch_mr *mhp = to_iwch_mr(ibmr);
-
- mhp->npages = 0;
-
- return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, iwch_set_page);
-}
-
-static int iwch_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
-{
- struct iwch_dev *rhp;
- struct iwch_qp *qhp;
- struct iwch_qp_attributes attrs;
- struct iwch_ucontext *ucontext;
-
- qhp = to_iwch_qp(ib_qp);
- rhp = qhp->rhp;
-
- attrs.next_state = IWCH_QP_STATE_ERROR;
- iwch_modify_qp(rhp, qhp, IWCH_QP_ATTR_NEXT_STATE, &attrs, 0);
- wait_event(qhp->wait, !qhp->ep);
-
- xa_erase_irq(&rhp->qps, qhp->wq.qpid);
-
- atomic_dec(&qhp->refcnt);
- wait_event(qhp->wait, !atomic_read(&qhp->refcnt));
-
- ucontext = rdma_udata_to_drv_context(udata, struct iwch_ucontext,
- ibucontext);
- cxio_destroy_qp(&rhp->rdev, &qhp->wq,
- ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
-
- pr_debug("%s ib_qp %p qpid 0x%0x qhp %p\n", __func__,
- ib_qp, qhp->wq.qpid, qhp);
- kfree(qhp);
- return 0;
-}
-
-static struct ib_qp *iwch_create_qp(struct ib_pd *pd,
- struct ib_qp_init_attr *attrs,
- struct ib_udata *udata)
-{
- struct iwch_dev *rhp;
- struct iwch_qp *qhp;
- struct iwch_pd *php;
- struct iwch_cq *schp;
- struct iwch_cq *rchp;
- struct iwch_create_qp_resp uresp;
- int wqsize, sqsize, rqsize;
- struct iwch_ucontext *ucontext;
-
- pr_debug("%s ib_pd %p\n", __func__, pd);
- if (attrs->qp_type != IB_QPT_RC)
- return ERR_PTR(-EINVAL);
- php = to_iwch_pd(pd);
- rhp = php->rhp;
- schp = get_chp(rhp, ((struct iwch_cq *) attrs->send_cq)->cq.cqid);
- rchp = get_chp(rhp, ((struct iwch_cq *) attrs->recv_cq)->cq.cqid);
- if (!schp || !rchp)
- return ERR_PTR(-EINVAL);
-
- /* The RQT size must be # of entries + 1 rounded up to a power of two */
- rqsize = roundup_pow_of_two(attrs->cap.max_recv_wr);
- if (rqsize == attrs->cap.max_recv_wr)
- rqsize = roundup_pow_of_two(attrs->cap.max_recv_wr+1);
-
- /* T3 doesn't support RQT depth < 16 */
- if (rqsize < 16)
- rqsize = 16;
-
- if (rqsize > T3_MAX_RQ_SIZE)
- return ERR_PTR(-EINVAL);
-
- if (attrs->cap.max_inline_data > T3_MAX_INLINE)
- return ERR_PTR(-EINVAL);
-
- /*
- * NOTE: The SQ and total WQ sizes don't need to be
- * a power of two. However, all the code assumes
- * they are. EG: Q_FREECNT() and friends.
- */
- sqsize = roundup_pow_of_two(attrs->cap.max_send_wr);
- wqsize = roundup_pow_of_two(rqsize + sqsize);
-
- /*
- * Kernel users need more wq space for fastreg WRs which can take
- * 2 WR fragments.
- */
- ucontext = rdma_udata_to_drv_context(udata, struct iwch_ucontext,
- ibucontext);
- if (!ucontext && wqsize < (rqsize + (2 * sqsize)))
- wqsize = roundup_pow_of_two(rqsize +
- roundup_pow_of_two(attrs->cap.max_send_wr * 2));
- pr_debug("%s wqsize %d sqsize %d rqsize %d\n", __func__,
- wqsize, sqsize, rqsize);
- qhp = kzalloc(sizeof(*qhp), GFP_KERNEL);
- if (!qhp)
- return ERR_PTR(-ENOMEM);
- qhp->wq.size_log2 = ilog2(wqsize);
- qhp->wq.rq_size_log2 = ilog2(rqsize);
- qhp->wq.sq_size_log2 = ilog2(sqsize);
- if (cxio_create_qp(&rhp->rdev, !udata, &qhp->wq,
- ucontext ? &ucontext->uctx : &rhp->rdev.uctx)) {
- kfree(qhp);
- return ERR_PTR(-ENOMEM);
- }
-
- attrs->cap.max_recv_wr = rqsize - 1;
- attrs->cap.max_send_wr = sqsize;
- attrs->cap.max_inline_data = T3_MAX_INLINE;
-
- qhp->rhp = rhp;
- qhp->attr.pd = php->pdid;
- qhp->attr.scq = ((struct iwch_cq *) attrs->send_cq)->cq.cqid;
- qhp->attr.rcq = ((struct iwch_cq *) attrs->recv_cq)->cq.cqid;
- qhp->attr.sq_num_entries = attrs->cap.max_send_wr;
- qhp->attr.rq_num_entries = attrs->cap.max_recv_wr;
- qhp->attr.sq_max_sges = attrs->cap.max_send_sge;
- qhp->attr.sq_max_sges_rdma_write = attrs->cap.max_send_sge;
- qhp->attr.rq_max_sges = attrs->cap.max_recv_sge;
- qhp->attr.state = IWCH_QP_STATE_IDLE;
- qhp->attr.next_state = IWCH_QP_STATE_IDLE;
-
- /*
- * XXX - These don't get passed in from the openib user
- * at create time. The CM sets them via a QP modify.
- * Need to fix... I think the CM should
- */
- qhp->attr.enable_rdma_read = 1;
- qhp->attr.enable_rdma_write = 1;
- qhp->attr.enable_bind = 1;
- qhp->attr.max_ord = 1;
- qhp->attr.max_ird = 1;
-
- spin_lock_init(&qhp->lock);
- init_waitqueue_head(&qhp->wait);
- atomic_set(&qhp->refcnt, 1);
-
- if (xa_store_irq(&rhp->qps, qhp->wq.qpid, qhp, GFP_KERNEL)) {
- cxio_destroy_qp(&rhp->rdev, &qhp->wq,
- ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
- kfree(qhp);
- return ERR_PTR(-ENOMEM);
- }
-
- if (udata) {
-
- struct iwch_mm_entry *mm1, *mm2;
-
- mm1 = kmalloc(sizeof(*mm1), GFP_KERNEL);
- if (!mm1) {
- iwch_destroy_qp(&qhp->ibqp, udata);
- return ERR_PTR(-ENOMEM);
- }
-
- mm2 = kmalloc(sizeof(*mm2), GFP_KERNEL);
- if (!mm2) {
- kfree(mm1);
- iwch_destroy_qp(&qhp->ibqp, udata);
- return ERR_PTR(-ENOMEM);
- }
-
- uresp.qpid = qhp->wq.qpid;
- uresp.size_log2 = qhp->wq.size_log2;
- uresp.sq_size_log2 = qhp->wq.sq_size_log2;
- uresp.rq_size_log2 = qhp->wq.rq_size_log2;
- spin_lock(&ucontext->mmap_lock);
- uresp.key = ucontext->key;
- ucontext->key += PAGE_SIZE;
- uresp.db_key = ucontext->key;
- ucontext->key += PAGE_SIZE;
- spin_unlock(&ucontext->mmap_lock);
- if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) {
- kfree(mm1);
- kfree(mm2);
- iwch_destroy_qp(&qhp->ibqp, udata);
- return ERR_PTR(-EFAULT);
- }
- mm1->key = uresp.key;
- mm1->addr = virt_to_phys(qhp->wq.queue);
- mm1->len = PAGE_ALIGN(wqsize * sizeof(union t3_wr));
- insert_mmap(ucontext, mm1);
- mm2->key = uresp.db_key;
- mm2->addr = qhp->wq.udb & PAGE_MASK;
- mm2->len = PAGE_SIZE;
- insert_mmap(ucontext, mm2);
- }
- qhp->ibqp.qp_num = qhp->wq.qpid;
- pr_debug(
- "%s sq_num_entries %d, rq_num_entries %d qpid 0x%0x qhp %p dma_addr %pad size %d rq_addr 0x%x\n",
- __func__, qhp->attr.sq_num_entries, qhp->attr.rq_num_entries,
- qhp->wq.qpid, qhp, &qhp->wq.dma_addr, 1 << qhp->wq.size_log2,
- qhp->wq.rq_addr);
- return &qhp->ibqp;
-}
-
-static int iwch_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
- int attr_mask, struct ib_udata *udata)
-{
- struct iwch_dev *rhp;
- struct iwch_qp *qhp;
- enum iwch_qp_attr_mask mask = 0;
- struct iwch_qp_attributes attrs = {};
-
- pr_debug("%s ib_qp %p\n", __func__, ibqp);
-
- /* iwarp does not support the RTR state */
- if ((attr_mask & IB_QP_STATE) && (attr->qp_state == IB_QPS_RTR))
- attr_mask &= ~IB_QP_STATE;
-
- /* Make sure we still have something left to do */
- if (!attr_mask)
- return 0;
-
- qhp = to_iwch_qp(ibqp);
- rhp = qhp->rhp;
-
- attrs.next_state = iwch_convert_state(attr->qp_state);
- attrs.enable_rdma_read = (attr->qp_access_flags &
- IB_ACCESS_REMOTE_READ) ? 1 : 0;
- attrs.enable_rdma_write = (attr->qp_access_flags &
- IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
- attrs.enable_bind = (attr->qp_access_flags & IB_ACCESS_MW_BIND) ? 1 : 0;
-
-
- mask |= (attr_mask & IB_QP_STATE) ? IWCH_QP_ATTR_NEXT_STATE : 0;
- mask |= (attr_mask & IB_QP_ACCESS_FLAGS) ?
- (IWCH_QP_ATTR_ENABLE_RDMA_READ |
- IWCH_QP_ATTR_ENABLE_RDMA_WRITE |
- IWCH_QP_ATTR_ENABLE_RDMA_BIND) : 0;
-
- return iwch_modify_qp(rhp, qhp, mask, &attrs, 0);
-}
-
-void iwch_qp_add_ref(struct ib_qp *qp)
-{
- pr_debug("%s ib_qp %p\n", __func__, qp);
- atomic_inc(&(to_iwch_qp(qp)->refcnt));
-}
-
-void iwch_qp_rem_ref(struct ib_qp *qp)
-{
- pr_debug("%s ib_qp %p\n", __func__, qp);
- if (atomic_dec_and_test(&(to_iwch_qp(qp)->refcnt)))
- wake_up(&(to_iwch_qp(qp)->wait));
-}
-
-static struct ib_qp *iwch_get_qp(struct ib_device *dev, int qpn)
-{
- pr_debug("%s ib_dev %p qpn 0x%x\n", __func__, dev, qpn);
- return (struct ib_qp *)get_qhp(to_iwch_dev(dev), qpn);
-}
-
-
-static int iwch_query_pkey(struct ib_device *ibdev,
- u8 port, u16 index, u16 * pkey)
-{
- pr_debug("%s ibdev %p\n", __func__, ibdev);
- *pkey = 0;
- return 0;
-}
-
-static int iwch_query_gid(struct ib_device *ibdev, u8 port,
- int index, union ib_gid *gid)
-{
- struct iwch_dev *dev;
-
- pr_debug("%s ibdev %p, port %d, index %d, gid %p\n",
- __func__, ibdev, port, index, gid);
- dev = to_iwch_dev(ibdev);
- BUG_ON(port == 0 || port > 2);
- memset(&(gid->raw[0]), 0, sizeof(gid->raw));
- memcpy(&(gid->raw[0]), dev->rdev.port_info.lldevs[port-1]->dev_addr, 6);
- return 0;
-}
-
-static u64 fw_vers_string_to_u64(struct iwch_dev *iwch_dev)
-{
- struct ethtool_drvinfo info;
- struct net_device *lldev = iwch_dev->rdev.t3cdev_p->lldev;
- char *cp, *next;
- unsigned fw_maj, fw_min, fw_mic;
-
- lldev->ethtool_ops->get_drvinfo(lldev, &info);
-
- next = info.fw_version + 1;
- cp = strsep(&next, ".");
- sscanf(cp, "%i", &fw_maj);
- cp = strsep(&next, ".");
- sscanf(cp, "%i", &fw_min);
- cp = strsep(&next, ".");
- sscanf(cp, "%i", &fw_mic);
-
- return (((u64)fw_maj & 0xffff) << 32) | ((fw_min & 0xffff) << 16) |
- (fw_mic & 0xffff);
-}
-
-static int iwch_query_device(struct ib_device *ibdev, struct ib_device_attr *props,
- struct ib_udata *uhw)
-{
-
- struct iwch_dev *dev;
-
- pr_debug("%s ibdev %p\n", __func__, ibdev);
-
- if (uhw->inlen || uhw->outlen)
- return -EINVAL;
-
- dev = to_iwch_dev(ibdev);
- memcpy(&props->sys_image_guid, dev->rdev.t3cdev_p->lldev->dev_addr, 6);
- props->hw_ver = dev->rdev.t3cdev_p->type;
- props->fw_ver = fw_vers_string_to_u64(dev);
- props->device_cap_flags = dev->device_cap_flags;
- props->page_size_cap = dev->attr.mem_pgsizes_bitmask;
- props->vendor_id = (u32)dev->rdev.rnic_info.pdev->vendor;
- props->vendor_part_id = (u32)dev->rdev.rnic_info.pdev->device;
- props->max_mr_size = dev->attr.max_mr_size;
- props->max_qp = dev->attr.max_qps;
- props->max_qp_wr = dev->attr.max_wrs;
- props->max_send_sge = dev->attr.max_sge_per_wr;
- props->max_recv_sge = dev->attr.max_sge_per_wr;
- props->max_sge_rd = 1;
- props->max_qp_rd_atom = dev->attr.max_rdma_reads_per_qp;
- props->max_qp_init_rd_atom = dev->attr.max_rdma_reads_per_qp;
- props->max_cq = dev->attr.max_cqs;
- props->max_cqe = dev->attr.max_cqes_per_cq;
- props->max_mr = dev->attr.max_mem_regs;
- props->max_pd = dev->attr.max_pds;
- props->local_ca_ack_delay = 0;
- props->max_fast_reg_page_list_len = T3_MAX_FASTREG_DEPTH;
-
- return 0;
-}
-
-static int iwch_query_port(struct ib_device *ibdev,
- u8 port, struct ib_port_attr *props)
-{
- pr_debug("%s ibdev %p\n", __func__, ibdev);
-
- props->port_cap_flags =
- IB_PORT_CM_SUP |
- IB_PORT_SNMP_TUNNEL_SUP |
- IB_PORT_REINIT_SUP |
- IB_PORT_DEVICE_MGMT_SUP |
- IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP;
- props->gid_tbl_len = 1;
- props->pkey_tbl_len = 1;
- props->active_width = 2;
- props->active_speed = IB_SPEED_DDR;
- props->max_msg_sz = -1;
-
- return 0;
-}
-
-static ssize_t hw_rev_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct iwch_dev *iwch_dev =
- rdma_device_to_drv_device(dev, struct iwch_dev, ibdev);
-
- pr_debug("%s dev 0x%p\n", __func__, dev);
- return sprintf(buf, "%d\n", iwch_dev->rdev.t3cdev_p->type);
-}
-static DEVICE_ATTR_RO(hw_rev);
-
-static ssize_t hca_type_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct iwch_dev *iwch_dev =
- rdma_device_to_drv_device(dev, struct iwch_dev, ibdev);
- struct ethtool_drvinfo info;
- struct net_device *lldev = iwch_dev->rdev.t3cdev_p->lldev;
-
- pr_debug("%s dev 0x%p\n", __func__, dev);
- lldev->ethtool_ops->get_drvinfo(lldev, &info);
- return sprintf(buf, "%s\n", info.driver);
-}
-static DEVICE_ATTR_RO(hca_type);
-
-static ssize_t board_id_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct iwch_dev *iwch_dev =
- rdma_device_to_drv_device(dev, struct iwch_dev, ibdev);
-
- pr_debug("%s dev 0x%p\n", __func__, dev);
- return sprintf(buf, "%x.%x\n", iwch_dev->rdev.rnic_info.pdev->vendor,
- iwch_dev->rdev.rnic_info.pdev->device);
-}
-static DEVICE_ATTR_RO(board_id);
-
-enum counters {
- IPINRECEIVES,
- IPINHDRERRORS,
- IPINADDRERRORS,
- IPINUNKNOWNPROTOS,
- IPINDISCARDS,
- IPINDELIVERS,
- IPOUTREQUESTS,
- IPOUTDISCARDS,
- IPOUTNOROUTES,
- IPREASMTIMEOUT,
- IPREASMREQDS,
- IPREASMOKS,
- IPREASMFAILS,
- TCPACTIVEOPENS,
- TCPPASSIVEOPENS,
- TCPATTEMPTFAILS,
- TCPESTABRESETS,
- TCPCURRESTAB,
- TCPINSEGS,
- TCPOUTSEGS,
- TCPRETRANSSEGS,
- TCPINERRS,
- TCPOUTRSTS,
- TCPRTOMIN,
- TCPRTOMAX,
- NR_COUNTERS
-};
-
-static const char * const names[] = {
- [IPINRECEIVES] = "ipInReceives",
- [IPINHDRERRORS] = "ipInHdrErrors",
- [IPINADDRERRORS] = "ipInAddrErrors",
- [IPINUNKNOWNPROTOS] = "ipInUnknownProtos",
- [IPINDISCARDS] = "ipInDiscards",
- [IPINDELIVERS] = "ipInDelivers",
- [IPOUTREQUESTS] = "ipOutRequests",
- [IPOUTDISCARDS] = "ipOutDiscards",
- [IPOUTNOROUTES] = "ipOutNoRoutes",
- [IPREASMTIMEOUT] = "ipReasmTimeout",
- [IPREASMREQDS] = "ipReasmReqds",
- [IPREASMOKS] = "ipReasmOKs",
- [IPREASMFAILS] = "ipReasmFails",
- [TCPACTIVEOPENS] = "tcpActiveOpens",
- [TCPPASSIVEOPENS] = "tcpPassiveOpens",
- [TCPATTEMPTFAILS] = "tcpAttemptFails",
- [TCPESTABRESETS] = "tcpEstabResets",
- [TCPCURRESTAB] = "tcpCurrEstab",
- [TCPINSEGS] = "tcpInSegs",
- [TCPOUTSEGS] = "tcpOutSegs",
- [TCPRETRANSSEGS] = "tcpRetransSegs",
- [TCPINERRS] = "tcpInErrs",
- [TCPOUTRSTS] = "tcpOutRsts",
- [TCPRTOMIN] = "tcpRtoMin",
- [TCPRTOMAX] = "tcpRtoMax",
-};
-
-static struct rdma_hw_stats *iwch_alloc_stats(struct ib_device *ibdev,
- u8 port_num)
-{
- BUILD_BUG_ON(ARRAY_SIZE(names) != NR_COUNTERS);
-
- /* Our driver only supports device level stats */
- if (port_num != 0)
- return NULL;
-
- return rdma_alloc_hw_stats_struct(names, NR_COUNTERS,
- RDMA_HW_STATS_DEFAULT_LIFESPAN);
-}
-
-static int iwch_get_mib(struct ib_device *ibdev, struct rdma_hw_stats *stats,
- u8 port, int index)
-{
- struct iwch_dev *dev;
- struct tp_mib_stats m;
- int ret;
-
- if (port != 0 || !stats)
- return -ENOSYS;
-
- pr_debug("%s ibdev %p\n", __func__, ibdev);
- dev = to_iwch_dev(ibdev);
- ret = dev->rdev.t3cdev_p->ctl(dev->rdev.t3cdev_p, RDMA_GET_MIB, &m);
- if (ret)
- return -ENOSYS;
-
- stats->value[IPINRECEIVES] = ((u64)m.ipInReceive_hi << 32) + m.ipInReceive_lo;
- stats->value[IPINHDRERRORS] = ((u64)m.ipInHdrErrors_hi << 32) + m.ipInHdrErrors_lo;
- stats->value[IPINADDRERRORS] = ((u64)m.ipInAddrErrors_hi << 32) + m.ipInAddrErrors_lo;
- stats->value[IPINUNKNOWNPROTOS] = ((u64)m.ipInUnknownProtos_hi << 32) + m.ipInUnknownProtos_lo;
- stats->value[IPINDISCARDS] = ((u64)m.ipInDiscards_hi << 32) + m.ipInDiscards_lo;
- stats->value[IPINDELIVERS] = ((u64)m.ipInDelivers_hi << 32) + m.ipInDelivers_lo;
- stats->value[IPOUTREQUESTS] = ((u64)m.ipOutRequests_hi << 32) + m.ipOutRequests_lo;
- stats->value[IPOUTDISCARDS] = ((u64)m.ipOutDiscards_hi << 32) + m.ipOutDiscards_lo;
- stats->value[IPOUTNOROUTES] = ((u64)m.ipOutNoRoutes_hi << 32) + m.ipOutNoRoutes_lo;
- stats->value[IPREASMTIMEOUT] = m.ipReasmTimeout;
- stats->value[IPREASMREQDS] = m.ipReasmReqds;
- stats->value[IPREASMOKS] = m.ipReasmOKs;
- stats->value[IPREASMFAILS] = m.ipReasmFails;
- stats->value[TCPACTIVEOPENS] = m.tcpActiveOpens;
- stats->value[TCPPASSIVEOPENS] = m.tcpPassiveOpens;
- stats->value[TCPATTEMPTFAILS] = m.tcpAttemptFails;
- stats->value[TCPESTABRESETS] = m.tcpEstabResets;
- stats->value[TCPCURRESTAB] = m.tcpOutRsts;
- stats->value[TCPINSEGS] = m.tcpCurrEstab;
- stats->value[TCPOUTSEGS] = ((u64)m.tcpInSegs_hi << 32) + m.tcpInSegs_lo;
- stats->value[TCPRETRANSSEGS] = ((u64)m.tcpOutSegs_hi << 32) + m.tcpOutSegs_lo;
- stats->value[TCPINERRS] = ((u64)m.tcpRetransSeg_hi << 32) + m.tcpRetransSeg_lo,
- stats->value[TCPOUTRSTS] = ((u64)m.tcpInErrs_hi << 32) + m.tcpInErrs_lo;
- stats->value[TCPRTOMIN] = m.tcpRtoMin;
- stats->value[TCPRTOMAX] = m.tcpRtoMax;
-
- return stats->num_counters;
-}
-
-static struct attribute *iwch_class_attributes[] = {
- &dev_attr_hw_rev.attr,
- &dev_attr_hca_type.attr,
- &dev_attr_board_id.attr,
- NULL
-};
-
-static const struct attribute_group iwch_attr_group = {
- .attrs = iwch_class_attributes,
-};
-
-static int iwch_port_immutable(struct ib_device *ibdev, u8 port_num,
- struct ib_port_immutable *immutable)
-{
- struct ib_port_attr attr;
- int err;
-
- immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
-
- err = ib_query_port(ibdev, port_num, &attr);
- if (err)
- return err;
-
- immutable->pkey_tbl_len = attr.pkey_tbl_len;
- immutable->gid_tbl_len = attr.gid_tbl_len;
-
- return 0;
-}
-
-static void get_dev_fw_ver_str(struct ib_device *ibdev, char *str)
-{
- struct iwch_dev *iwch_dev = to_iwch_dev(ibdev);
- struct ethtool_drvinfo info;
- struct net_device *lldev = iwch_dev->rdev.t3cdev_p->lldev;
-
- pr_debug("%s dev 0x%p\n", __func__, iwch_dev);
- lldev->ethtool_ops->get_drvinfo(lldev, &info);
- snprintf(str, IB_FW_VERSION_NAME_MAX, "%s", info.fw_version);
-}
-
-static const struct ib_device_ops iwch_dev_ops = {
- .owner = THIS_MODULE,
- .driver_id = RDMA_DRIVER_CXGB3,
- .uverbs_abi_ver = IWCH_UVERBS_ABI_VERSION,
- .uverbs_no_driver_id_binding = 1,
-
- .alloc_hw_stats = iwch_alloc_stats,
- .alloc_mr = iwch_alloc_mr,
- .alloc_mw = iwch_alloc_mw,
- .alloc_pd = iwch_allocate_pd,
- .alloc_ucontext = iwch_alloc_ucontext,
- .create_cq = iwch_create_cq,
- .create_qp = iwch_create_qp,
- .dealloc_mw = iwch_dealloc_mw,
- .dealloc_pd = iwch_deallocate_pd,
- .dealloc_ucontext = iwch_dealloc_ucontext,
- .dereg_mr = iwch_dereg_mr,
- .destroy_cq = iwch_destroy_cq,
- .destroy_qp = iwch_destroy_qp,
- .get_dev_fw_str = get_dev_fw_ver_str,
- .get_dma_mr = iwch_get_dma_mr,
- .get_hw_stats = iwch_get_mib,
- .get_port_immutable = iwch_port_immutable,
- .iw_accept = iwch_accept_cr,
- .iw_add_ref = iwch_qp_add_ref,
- .iw_connect = iwch_connect,
- .iw_create_listen = iwch_create_listen,
- .iw_destroy_listen = iwch_destroy_listen,
- .iw_get_qp = iwch_get_qp,
- .iw_reject = iwch_reject_cr,
- .iw_rem_ref = iwch_qp_rem_ref,
- .map_mr_sg = iwch_map_mr_sg,
- .mmap = iwch_mmap,
- .modify_qp = iwch_ib_modify_qp,
- .poll_cq = iwch_poll_cq,
- .post_recv = iwch_post_receive,
- .post_send = iwch_post_send,
- .query_device = iwch_query_device,
- .query_gid = iwch_query_gid,
- .query_pkey = iwch_query_pkey,
- .query_port = iwch_query_port,
- .reg_user_mr = iwch_reg_user_mr,
- .req_notify_cq = iwch_arm_cq,
- INIT_RDMA_OBJ_SIZE(ib_pd, iwch_pd, ibpd),
- INIT_RDMA_OBJ_SIZE(ib_cq, iwch_cq, ibcq),
- INIT_RDMA_OBJ_SIZE(ib_ucontext, iwch_ucontext, ibucontext),
-};
-
-static int set_netdevs(struct ib_device *ib_dev, struct cxio_rdev *rdev)
-{
- int ret;
- int i;
-
- for (i = 0; i < rdev->port_info.nports; i++) {
- ret = ib_device_set_netdev(ib_dev, rdev->port_info.lldevs[i],
- i + 1);
- if (ret)
- return ret;
- }
- return 0;
-}
-
-int iwch_register_device(struct iwch_dev *dev)
-{
- int err;
-
- pr_debug("%s iwch_dev %p\n", __func__, dev);
- memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid));
- memcpy(&dev->ibdev.node_guid, dev->rdev.t3cdev_p->lldev->dev_addr, 6);
- dev->device_cap_flags = IB_DEVICE_LOCAL_DMA_LKEY |
- IB_DEVICE_MEM_WINDOW |
- IB_DEVICE_MEM_MGT_EXTENSIONS;
-
- /* cxgb3 supports STag 0. */
- dev->ibdev.local_dma_lkey = 0;
-
- dev->ibdev.uverbs_cmd_mask =
- (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
- (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
- (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
- (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
- (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
- (1ull << IB_USER_VERBS_CMD_REG_MR) |
- (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
- (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
- (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
- (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
- (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
- (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
- (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
- (1ull << IB_USER_VERBS_CMD_POLL_CQ) |
- (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
- (1ull << IB_USER_VERBS_CMD_POST_SEND) |
- (1ull << IB_USER_VERBS_CMD_POST_RECV);
- dev->ibdev.node_type = RDMA_NODE_RNIC;
- BUILD_BUG_ON(sizeof(IWCH_NODE_DESC) > IB_DEVICE_NODE_DESC_MAX);
- memcpy(dev->ibdev.node_desc, IWCH_NODE_DESC, sizeof(IWCH_NODE_DESC));
- dev->ibdev.phys_port_cnt = dev->rdev.port_info.nports;
- dev->ibdev.num_comp_vectors = 1;
- dev->ibdev.dev.parent = &dev->rdev.rnic_info.pdev->dev;
-
- memcpy(dev->ibdev.iw_ifname, dev->rdev.t3cdev_p->lldev->name,
- sizeof(dev->ibdev.iw_ifname));
-
- rdma_set_device_sysfs_group(&dev->ibdev, &iwch_attr_group);
- ib_set_device_ops(&dev->ibdev, &iwch_dev_ops);
- err = set_netdevs(&dev->ibdev, &dev->rdev);
- if (err)
- return err;
-
- return ib_register_device(&dev->ibdev, "cxgb3_%d");
-}
-
-void iwch_unregister_device(struct iwch_dev *dev)
-{
- pr_debug("%s iwch_dev %p\n", __func__, dev);
- ib_unregister_device(&dev->ibdev);
- return;
-}
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.h b/drivers/infiniband/hw/cxgb3/iwch_provider.h
deleted file mode 100644
index 8adbe9658935..000000000000
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.h
+++ /dev/null
@@ -1,347 +0,0 @@
-/*
- * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef __IWCH_PROVIDER_H__
-#define __IWCH_PROVIDER_H__
-
-#include <linux/list.h>
-#include <linux/spinlock.h>
-#include <rdma/ib_verbs.h>
-#include <asm/types.h>
-#include "t3cdev.h"
-#include "iwch.h"
-#include "cxio_wr.h"
-#include "cxio_hal.h"
-
-struct iwch_pd {
- struct ib_pd ibpd;
- u32 pdid;
- struct iwch_dev *rhp;
-};
-
-static inline struct iwch_pd *to_iwch_pd(struct ib_pd *ibpd)
-{
- return container_of(ibpd, struct iwch_pd, ibpd);
-}
-
-struct tpt_attributes {
- u32 stag;
- u32 state:1;
- u32 type:2;
- u32 rsvd:1;
- enum tpt_mem_perm perms;
- u32 remote_invaliate_disable:1;
- u32 zbva:1;
- u32 mw_bind_enable:1;
- u32 page_size:5;
-
- u32 pdid;
- u32 qpid;
- u32 pbl_addr;
- u32 len;
- u64 va_fbo;
- u32 pbl_size;
-};
-
-struct iwch_mr {
- struct ib_mr ibmr;
- struct ib_umem *umem;
- struct iwch_dev *rhp;
- u64 kva;
- struct tpt_attributes attr;
- u64 *pages;
- u32 npages;
-};
-
-typedef struct iwch_mw iwch_mw_handle;
-
-static inline struct iwch_mr *to_iwch_mr(struct ib_mr *ibmr)
-{
- return container_of(ibmr, struct iwch_mr, ibmr);
-}
-
-struct iwch_mw {
- struct ib_mw ibmw;
- struct iwch_dev *rhp;
- u64 kva;
- struct tpt_attributes attr;
-};
-
-static inline struct iwch_mw *to_iwch_mw(struct ib_mw *ibmw)
-{
- return container_of(ibmw, struct iwch_mw, ibmw);
-}
-
-struct iwch_cq {
- struct ib_cq ibcq;
- struct iwch_dev *rhp;
- struct t3_cq cq;
- spinlock_t lock;
- spinlock_t comp_handler_lock;
- atomic_t refcnt;
- wait_queue_head_t wait;
- u32 __user *user_rptr_addr;
-};
-
-static inline struct iwch_cq *to_iwch_cq(struct ib_cq *ibcq)
-{
- return container_of(ibcq, struct iwch_cq, ibcq);
-}
-
-enum IWCH_QP_FLAGS {
- QP_QUIESCED = 0x01
-};
-
-struct iwch_mpa_attributes {
- u8 initiator;
- u8 recv_marker_enabled;
- u8 xmit_marker_enabled; /* iWARP: enable inbound Read Resp. */
- u8 crc_enabled;
- u8 version; /* 0 or 1 */
-};
-
-struct iwch_qp_attributes {
- u32 scq;
- u32 rcq;
- u32 sq_num_entries;
- u32 rq_num_entries;
- u32 sq_max_sges;
- u32 sq_max_sges_rdma_write;
- u32 rq_max_sges;
- u32 state;
- u8 enable_rdma_read;
- u8 enable_rdma_write; /* enable inbound Read Resp. */
- u8 enable_bind;
- u8 enable_mmid0_fastreg; /* Enable STAG0 + Fast-register */
- /*
- * Next QP state. If specify the current state, only the
- * QP attributes will be modified.
- */
- u32 max_ord;
- u32 max_ird;
- u32 pd; /* IN */
- u32 next_state;
- char terminate_buffer[52];
- u32 terminate_msg_len;
- u8 is_terminate_local;
- struct iwch_mpa_attributes mpa_attr; /* IN-OUT */
- struct iwch_ep *llp_stream_handle;
- char *stream_msg_buf; /* Last stream msg. before Idle -> RTS */
- u32 stream_msg_buf_len; /* Only on Idle -> RTS */
-};
-
-struct iwch_qp {
- struct ib_qp ibqp;
- struct iwch_dev *rhp;
- struct iwch_ep *ep;
- struct iwch_qp_attributes attr;
- struct t3_wq wq;
- spinlock_t lock;
- atomic_t refcnt;
- wait_queue_head_t wait;
- enum IWCH_QP_FLAGS flags;
-};
-
-static inline int qp_quiesced(struct iwch_qp *qhp)
-{
- return qhp->flags & QP_QUIESCED;
-}
-
-static inline struct iwch_qp *to_iwch_qp(struct ib_qp *ibqp)
-{
- return container_of(ibqp, struct iwch_qp, ibqp);
-}
-
-void iwch_qp_add_ref(struct ib_qp *qp);
-void iwch_qp_rem_ref(struct ib_qp *qp);
-
-struct iwch_ucontext {
- struct ib_ucontext ibucontext;
- struct cxio_ucontext uctx;
- u32 key;
- spinlock_t mmap_lock;
- struct list_head mmaps;
-};
-
-static inline struct iwch_ucontext *to_iwch_ucontext(struct ib_ucontext *c)
-{
- return container_of(c, struct iwch_ucontext, ibucontext);
-}
-
-struct iwch_mm_entry {
- struct list_head entry;
- u64 addr;
- u32 key;
- unsigned len;
-};
-
-static inline struct iwch_mm_entry *remove_mmap(struct iwch_ucontext *ucontext,
- u32 key, unsigned len)
-{
- struct list_head *pos, *nxt;
- struct iwch_mm_entry *mm;
-
- spin_lock(&ucontext->mmap_lock);
- list_for_each_safe(pos, nxt, &ucontext->mmaps) {
-
- mm = list_entry(pos, struct iwch_mm_entry, entry);
- if (mm->key == key && mm->len == len) {
- list_del_init(&mm->entry);
- spin_unlock(&ucontext->mmap_lock);
- pr_debug("%s key 0x%x addr 0x%llx len %d\n",
- __func__, key,
- (unsigned long long)mm->addr, mm->len);
- return mm;
- }
- }
- spin_unlock(&ucontext->mmap_lock);
- return NULL;
-}
-
-static inline void insert_mmap(struct iwch_ucontext *ucontext,
- struct iwch_mm_entry *mm)
-{
- spin_lock(&ucontext->mmap_lock);
- pr_debug("%s key 0x%x addr 0x%llx len %d\n",
- __func__, mm->key, (unsigned long long)mm->addr, mm->len);
- list_add_tail(&mm->entry, &ucontext->mmaps);
- spin_unlock(&ucontext->mmap_lock);
-}
-
-enum iwch_qp_attr_mask {
- IWCH_QP_ATTR_NEXT_STATE = 1 << 0,
- IWCH_QP_ATTR_ENABLE_RDMA_READ = 1 << 7,
- IWCH_QP_ATTR_ENABLE_RDMA_WRITE = 1 << 8,
- IWCH_QP_ATTR_ENABLE_RDMA_BIND = 1 << 9,
- IWCH_QP_ATTR_MAX_ORD = 1 << 11,
- IWCH_QP_ATTR_MAX_IRD = 1 << 12,
- IWCH_QP_ATTR_LLP_STREAM_HANDLE = 1 << 22,
- IWCH_QP_ATTR_STREAM_MSG_BUFFER = 1 << 23,
- IWCH_QP_ATTR_MPA_ATTR = 1 << 24,
- IWCH_QP_ATTR_QP_CONTEXT_ACTIVATE = 1 << 25,
- IWCH_QP_ATTR_VALID_MODIFY = (IWCH_QP_ATTR_ENABLE_RDMA_READ |
- IWCH_QP_ATTR_ENABLE_RDMA_WRITE |
- IWCH_QP_ATTR_MAX_ORD |
- IWCH_QP_ATTR_MAX_IRD |
- IWCH_QP_ATTR_LLP_STREAM_HANDLE |
- IWCH_QP_ATTR_STREAM_MSG_BUFFER |
- IWCH_QP_ATTR_MPA_ATTR |
- IWCH_QP_ATTR_QP_CONTEXT_ACTIVATE)
-};
-
-int iwch_modify_qp(struct iwch_dev *rhp,
- struct iwch_qp *qhp,
- enum iwch_qp_attr_mask mask,
- struct iwch_qp_attributes *attrs,
- int internal);
-
-enum iwch_qp_state {
- IWCH_QP_STATE_IDLE,
- IWCH_QP_STATE_RTS,
- IWCH_QP_STATE_ERROR,
- IWCH_QP_STATE_TERMINATE,
- IWCH_QP_STATE_CLOSING,
- IWCH_QP_STATE_TOT
-};
-
-static inline int iwch_convert_state(enum ib_qp_state ib_state)
-{
- switch (ib_state) {
- case IB_QPS_RESET:
- case IB_QPS_INIT:
- return IWCH_QP_STATE_IDLE;
- case IB_QPS_RTS:
- return IWCH_QP_STATE_RTS;
- case IB_QPS_SQD:
- return IWCH_QP_STATE_CLOSING;
- case IB_QPS_SQE:
- return IWCH_QP_STATE_TERMINATE;
- case IB_QPS_ERR:
- return IWCH_QP_STATE_ERROR;
- default:
- return -1;
- }
-}
-
-static inline u32 iwch_ib_to_tpt_access(int acc)
-{
- return (acc & IB_ACCESS_REMOTE_WRITE ? TPT_REMOTE_WRITE : 0) |
- (acc & IB_ACCESS_REMOTE_READ ? TPT_REMOTE_READ : 0) |
- (acc & IB_ACCESS_LOCAL_WRITE ? TPT_LOCAL_WRITE : 0) |
- (acc & IB_ACCESS_MW_BIND ? TPT_MW_BIND : 0) |
- TPT_LOCAL_READ;
-}
-
-static inline u32 iwch_ib_to_tpt_bind_access(int acc)
-{
- return (acc & IB_ACCESS_REMOTE_WRITE ? TPT_REMOTE_WRITE : 0) |
- (acc & IB_ACCESS_REMOTE_READ ? TPT_REMOTE_READ : 0);
-}
-
-enum iwch_mmid_state {
- IWCH_STAG_STATE_VALID,
- IWCH_STAG_STATE_INVALID
-};
-
-enum iwch_qp_query_flags {
- IWCH_QP_QUERY_CONTEXT_NONE = 0x0, /* No ctx; Only attrs */
- IWCH_QP_QUERY_CONTEXT_GET = 0x1, /* Get ctx + attrs */
- IWCH_QP_QUERY_CONTEXT_SUSPEND = 0x2, /* Not Supported */
-
- /*
- * Quiesce QP context; Consumer
- * will NOT replay outstanding WR
- */
- IWCH_QP_QUERY_CONTEXT_QUIESCE = 0x4,
- IWCH_QP_QUERY_CONTEXT_REMOVE = 0x8,
- IWCH_QP_QUERY_TEST_USERWRITE = 0x32 /* Test special */
-};
-
-u16 iwch_rqes_posted(struct iwch_qp *qhp);
-int iwch_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
- const struct ib_send_wr **bad_wr);
-int iwch_post_receive(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
- const struct ib_recv_wr **bad_wr);
-int iwch_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
-int iwch_post_terminate(struct iwch_qp *qhp, struct respQ_msg_t *rsp_msg);
-int iwch_post_zb_read(struct iwch_ep *ep);
-int iwch_register_device(struct iwch_dev *dev);
-void iwch_unregister_device(struct iwch_dev *dev);
-void stop_read_rep_timer(struct iwch_qp *qhp);
-int iwch_register_mem(struct iwch_dev *rhp, struct iwch_pd *php,
- struct iwch_mr *mhp, int shift);
-int iwch_alloc_pbl(struct iwch_mr *mhp, int npages);
-void iwch_free_pbl(struct iwch_mr *mhp);
-int iwch_write_pbl(struct iwch_mr *mhp, __be64 *pages, int npages, int offset);
-
-#define IWCH_NODE_DESC "cxgb3 Chelsio Communications"
-
-#endif
diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c
deleted file mode 100644
index c649faad63f9..000000000000
--- a/drivers/infiniband/hw/cxgb3/iwch_qp.c
+++ /dev/null
@@ -1,1082 +0,0 @@
-/*
- * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include <linux/sched.h>
-#include <linux/gfp.h>
-#include "iwch_provider.h"
-#include "iwch.h"
-#include "iwch_cm.h"
-#include "cxio_hal.h"
-#include "cxio_resource.h"
-
-#define NO_SUPPORT -1
-
-static int build_rdma_send(union t3_wr *wqe, const struct ib_send_wr *wr,
- u8 *flit_cnt)
-{
- int i;
- u32 plen;
-
- switch (wr->opcode) {
- case IB_WR_SEND:
- if (wr->send_flags & IB_SEND_SOLICITED)
- wqe->send.rdmaop = T3_SEND_WITH_SE;
- else
- wqe->send.rdmaop = T3_SEND;
- wqe->send.rem_stag = 0;
- break;
- case IB_WR_SEND_WITH_INV:
- if (wr->send_flags & IB_SEND_SOLICITED)
- wqe->send.rdmaop = T3_SEND_WITH_SE_INV;
- else
- wqe->send.rdmaop = T3_SEND_WITH_INV;
- wqe->send.rem_stag = cpu_to_be32(wr->ex.invalidate_rkey);
- break;
- default:
- return -EINVAL;
- }
- if (wr->num_sge > T3_MAX_SGE)
- return -EINVAL;
- wqe->send.reserved[0] = 0;
- wqe->send.reserved[1] = 0;
- wqe->send.reserved[2] = 0;
- plen = 0;
- for (i = 0; i < wr->num_sge; i++) {
- if ((plen + wr->sg_list[i].length) < plen)
- return -EMSGSIZE;
-
- plen += wr->sg_list[i].length;
- wqe->send.sgl[i].stag = cpu_to_be32(wr->sg_list[i].lkey);
- wqe->send.sgl[i].len = cpu_to_be32(wr->sg_list[i].length);
- wqe->send.sgl[i].to = cpu_to_be64(wr->sg_list[i].addr);
- }
- wqe->send.num_sgle = cpu_to_be32(wr->num_sge);
- *flit_cnt = 4 + ((wr->num_sge) << 1);
- wqe->send.plen = cpu_to_be32(plen);
- return 0;
-}
-
-static int build_rdma_write(union t3_wr *wqe, const struct ib_send_wr *wr,
- u8 *flit_cnt)
-{
- int i;
- u32 plen;
- if (wr->num_sge > T3_MAX_SGE)
- return -EINVAL;
- wqe->write.rdmaop = T3_RDMA_WRITE;
- wqe->write.reserved[0] = 0;
- wqe->write.reserved[1] = 0;
- wqe->write.reserved[2] = 0;
- wqe->write.stag_sink = cpu_to_be32(rdma_wr(wr)->rkey);
- wqe->write.to_sink = cpu_to_be64(rdma_wr(wr)->remote_addr);
-
- if (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) {
- plen = 4;
- wqe->write.sgl[0].stag = wr->ex.imm_data;
- wqe->write.sgl[0].len = cpu_to_be32(0);
- wqe->write.num_sgle = cpu_to_be32(0);
- *flit_cnt = 6;
- } else {
- plen = 0;
- for (i = 0; i < wr->num_sge; i++) {
- if ((plen + wr->sg_list[i].length) < plen) {
- return -EMSGSIZE;
- }
- plen += wr->sg_list[i].length;
- wqe->write.sgl[i].stag =
- cpu_to_be32(wr->sg_list[i].lkey);
- wqe->write.sgl[i].len =
- cpu_to_be32(wr->sg_list[i].length);
- wqe->write.sgl[i].to =
- cpu_to_be64(wr->sg_list[i].addr);
- }
- wqe->write.num_sgle = cpu_to_be32(wr->num_sge);
- *flit_cnt = 5 + ((wr->num_sge) << 1);
- }
- wqe->write.plen = cpu_to_be32(plen);
- return 0;
-}
-
-static int build_rdma_read(union t3_wr *wqe, const struct ib_send_wr *wr,
- u8 *flit_cnt)
-{
- if (wr->num_sge > 1)
- return -EINVAL;
- wqe->read.rdmaop = T3_READ_REQ;
- if (wr->opcode == IB_WR_RDMA_READ_WITH_INV)
- wqe->read.local_inv = 1;
- else
- wqe->read.local_inv = 0;
- wqe->read.reserved[0] = 0;
- wqe->read.reserved[1] = 0;
- wqe->read.rem_stag = cpu_to_be32(rdma_wr(wr)->rkey);
- wqe->read.rem_to = cpu_to_be64(rdma_wr(wr)->remote_addr);
- wqe->read.local_stag = cpu_to_be32(wr->sg_list[0].lkey);
- wqe->read.local_len = cpu_to_be32(wr->sg_list[0].length);
- wqe->read.local_to = cpu_to_be64(wr->sg_list[0].addr);
- *flit_cnt = sizeof(struct t3_rdma_read_wr) >> 3;
- return 0;
-}
-
-static int build_memreg(union t3_wr *wqe, const struct ib_reg_wr *wr,
- u8 *flit_cnt, int *wr_cnt, struct t3_wq *wq)
-{
- struct iwch_mr *mhp = to_iwch_mr(wr->mr);
- int i;
- __be64 *p;
-
- if (mhp->npages > T3_MAX_FASTREG_DEPTH)
- return -EINVAL;
- *wr_cnt = 1;
- wqe->fastreg.stag = cpu_to_be32(wr->key);
- wqe->fastreg.len = cpu_to_be32(mhp->ibmr.length);
- wqe->fastreg.va_base_hi = cpu_to_be32(mhp->ibmr.iova >> 32);
- wqe->fastreg.va_base_lo_fbo =
- cpu_to_be32(mhp->ibmr.iova & 0xffffffff);
- wqe->fastreg.page_type_perms = cpu_to_be32(
- V_FR_PAGE_COUNT(mhp->npages) |
- V_FR_PAGE_SIZE(ilog2(wr->mr->page_size) - 12) |
- V_FR_TYPE(TPT_VATO) |
- V_FR_PERMS(iwch_ib_to_tpt_access(wr->access)));
- p = &wqe->fastreg.pbl_addrs[0];
- for (i = 0; i < mhp->npages; i++, p++) {
-
- /* If we need a 2nd WR, then set it up */
- if (i == T3_MAX_FASTREG_FRAG) {
- *wr_cnt = 2;
- wqe = (union t3_wr *)(wq->queue +
- Q_PTR2IDX((wq->wptr+1), wq->size_log2));
- build_fw_riwrh((void *)wqe, T3_WR_FASTREG, 0,
- Q_GENBIT(wq->wptr + 1, wq->size_log2),
- 0, 1 + mhp->npages - T3_MAX_FASTREG_FRAG,
- T3_EOP);
-
- p = &wqe->pbl_frag.pbl_addrs[0];
- }
- *p = cpu_to_be64((u64)mhp->pages[i]);
- }
- *flit_cnt = 5 + mhp->npages;
- if (*flit_cnt > 15)
- *flit_cnt = 15;
- return 0;
-}
-
-static int build_inv_stag(union t3_wr *wqe, const struct ib_send_wr *wr,
- u8 *flit_cnt)
-{
- wqe->local_inv.stag = cpu_to_be32(wr->ex.invalidate_rkey);
- wqe->local_inv.reserved = 0;
- *flit_cnt = sizeof(struct t3_local_inv_wr) >> 3;
- return 0;
-}
-
-static int iwch_sgl2pbl_map(struct iwch_dev *rhp, struct ib_sge *sg_list,
- u32 num_sgle, u32 * pbl_addr, u8 * page_size)
-{
- int i;
- struct iwch_mr *mhp;
- u64 offset;
- for (i = 0; i < num_sgle; i++) {
-
- mhp = get_mhp(rhp, (sg_list[i].lkey) >> 8);
- if (!mhp) {
- pr_debug("%s %d\n", __func__, __LINE__);
- return -EIO;
- }
- if (!mhp->attr.state) {
- pr_debug("%s %d\n", __func__, __LINE__);
- return -EIO;
- }
- if (mhp->attr.zbva) {
- pr_debug("%s %d\n", __func__, __LINE__);
- return -EIO;
- }
-
- if (sg_list[i].addr < mhp->attr.va_fbo) {
- pr_debug("%s %d\n", __func__, __LINE__);
- return -EINVAL;
- }
- if (sg_list[i].addr + ((u64) sg_list[i].length) <
- sg_list[i].addr) {
- pr_debug("%s %d\n", __func__, __LINE__);
- return -EINVAL;
- }
- if (sg_list[i].addr + ((u64) sg_list[i].length) >
- mhp->attr.va_fbo + ((u64) mhp->attr.len)) {
- pr_debug("%s %d\n", __func__, __LINE__);
- return -EINVAL;
- }
- offset = sg_list[i].addr - mhp->attr.va_fbo;
- offset += mhp->attr.va_fbo &
- ((1UL << (12 + mhp->attr.page_size)) - 1);
- pbl_addr[i] = ((mhp->attr.pbl_addr -
- rhp->rdev.rnic_info.pbl_base) >> 3) +
- (offset >> (12 + mhp->attr.page_size));
- page_size[i] = mhp->attr.page_size;
- }
- return 0;
-}
-
-static int build_rdma_recv(struct iwch_qp *qhp, union t3_wr *wqe,
- const struct ib_recv_wr *wr)
-{
- int i, err = 0;
- u32 pbl_addr[T3_MAX_SGE];
- u8 page_size[T3_MAX_SGE];
-
- err = iwch_sgl2pbl_map(qhp->rhp, wr->sg_list, wr->num_sge, pbl_addr,
- page_size);
- if (err)
- return err;
- wqe->recv.pagesz[0] = page_size[0];
- wqe->recv.pagesz[1] = page_size[1];
- wqe->recv.pagesz[2] = page_size[2];
- wqe->recv.pagesz[3] = page_size[3];
- wqe->recv.num_sgle = cpu_to_be32(wr->num_sge);
- for (i = 0; i < wr->num_sge; i++) {
- wqe->recv.sgl[i].stag = cpu_to_be32(wr->sg_list[i].lkey);
- wqe->recv.sgl[i].len = cpu_to_be32(wr->sg_list[i].length);
-
- /* to in the WQE == the offset into the page */
- wqe->recv.sgl[i].to = cpu_to_be64(((u32)wr->sg_list[i].addr) &
- ((1UL << (12 + page_size[i])) - 1));
-
- /* pbl_addr is the adapters address in the PBL */
- wqe->recv.pbl_addr[i] = cpu_to_be32(pbl_addr[i]);
- }
- for (; i < T3_MAX_SGE; i++) {
- wqe->recv.sgl[i].stag = 0;
- wqe->recv.sgl[i].len = 0;
- wqe->recv.sgl[i].to = 0;
- wqe->recv.pbl_addr[i] = 0;
- }
- qhp->wq.rq[Q_PTR2IDX(qhp->wq.rq_wptr,
- qhp->wq.rq_size_log2)].wr_id = wr->wr_id;
- qhp->wq.rq[Q_PTR2IDX(qhp->wq.rq_wptr,
- qhp->wq.rq_size_log2)].pbl_addr = 0;
- return 0;
-}
-
-static int build_zero_stag_recv(struct iwch_qp *qhp, union t3_wr *wqe,
- const struct ib_recv_wr *wr)
-{
- int i;
- u32 pbl_addr;
- u32 pbl_offset;
-
-
- /*
- * The T3 HW requires the PBL in the HW recv descriptor to reference
- * a PBL entry. So we allocate the max needed PBL memory here and pass
- * it to the uP in the recv WR. The uP will build the PBL and setup
- * the HW recv descriptor.
- */
- pbl_addr = cxio_hal_pblpool_alloc(&qhp->rhp->rdev, T3_STAG0_PBL_SIZE);
- if (!pbl_addr)
- return -ENOMEM;
-
- /*
- * Compute the 8B aligned offset.
- */
- pbl_offset = (pbl_addr - qhp->rhp->rdev.rnic_info.pbl_base) >> 3;
-
- wqe->recv.num_sgle = cpu_to_be32(wr->num_sge);
-
- for (i = 0; i < wr->num_sge; i++) {
-
- /*
- * Use a 128MB page size. This and an imposed 128MB
- * sge length limit allows us to require only a 2-entry HW
- * PBL for each SGE. This restriction is acceptable since
- * since it is not possible to allocate 128MB of contiguous
- * DMA coherent memory!
- */
- if (wr->sg_list[i].length > T3_STAG0_MAX_PBE_LEN)
- return -EINVAL;
- wqe->recv.pagesz[i] = T3_STAG0_PAGE_SHIFT;
-
- /*
- * T3 restricts a recv to all zero-stag or all non-zero-stag.
- */
- if (wr->sg_list[i].lkey != 0)
- return -EINVAL;
- wqe->recv.sgl[i].stag = 0;
- wqe->recv.sgl[i].len = cpu_to_be32(wr->sg_list[i].length);
- wqe->recv.sgl[i].to = cpu_to_be64(wr->sg_list[i].addr);
- wqe->recv.pbl_addr[i] = cpu_to_be32(pbl_offset);
- pbl_offset += 2;
- }
- for (; i < T3_MAX_SGE; i++) {
- wqe->recv.pagesz[i] = 0;
- wqe->recv.sgl[i].stag = 0;
- wqe->recv.sgl[i].len = 0;
- wqe->recv.sgl[i].to = 0;
- wqe->recv.pbl_addr[i] = 0;
- }
- qhp->wq.rq[Q_PTR2IDX(qhp->wq.rq_wptr,
- qhp->wq.rq_size_log2)].wr_id = wr->wr_id;
- qhp->wq.rq[Q_PTR2IDX(qhp->wq.rq_wptr,
- qhp->wq.rq_size_log2)].pbl_addr = pbl_addr;
- return 0;
-}
-
-int iwch_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
- const struct ib_send_wr **bad_wr)
-{
- int err = 0;
- u8 uninitialized_var(t3_wr_flit_cnt);
- enum t3_wr_opcode t3_wr_opcode = 0;
- enum t3_wr_flags t3_wr_flags;
- struct iwch_qp *qhp;
- u32 idx;
- union t3_wr *wqe;
- u32 num_wrs;
- unsigned long flag;
- struct t3_swsq *sqp;
- int wr_cnt = 1;
-
- qhp = to_iwch_qp(ibqp);
- spin_lock_irqsave(&qhp->lock, flag);
- if (qhp->attr.state > IWCH_QP_STATE_RTS) {
- spin_unlock_irqrestore(&qhp->lock, flag);
- err = -EINVAL;
- goto out;
- }
- num_wrs = Q_FREECNT(qhp->wq.sq_rptr, qhp->wq.sq_wptr,
- qhp->wq.sq_size_log2);
- if (num_wrs == 0) {
- spin_unlock_irqrestore(&qhp->lock, flag);
- err = -ENOMEM;
- goto out;
- }
- while (wr) {
- if (num_wrs == 0) {
- err = -ENOMEM;
- break;
- }
- idx = Q_PTR2IDX(qhp->wq.wptr, qhp->wq.size_log2);
- wqe = (union t3_wr *) (qhp->wq.queue + idx);
- t3_wr_flags = 0;
- if (wr->send_flags & IB_SEND_SOLICITED)
- t3_wr_flags |= T3_SOLICITED_EVENT_FLAG;
- if (wr->send_flags & IB_SEND_SIGNALED)
- t3_wr_flags |= T3_COMPLETION_FLAG;
- sqp = qhp->wq.sq +
- Q_PTR2IDX(qhp->wq.sq_wptr, qhp->wq.sq_size_log2);
- switch (wr->opcode) {
- case IB_WR_SEND:
- case IB_WR_SEND_WITH_INV:
- if (wr->send_flags & IB_SEND_FENCE)
- t3_wr_flags |= T3_READ_FENCE_FLAG;
- t3_wr_opcode = T3_WR_SEND;
- err = build_rdma_send(wqe, wr, &t3_wr_flit_cnt);
- break;
- case IB_WR_RDMA_WRITE:
- case IB_WR_RDMA_WRITE_WITH_IMM:
- t3_wr_opcode = T3_WR_WRITE;
- err = build_rdma_write(wqe, wr, &t3_wr_flit_cnt);
- break;
- case IB_WR_RDMA_READ:
- case IB_WR_RDMA_READ_WITH_INV:
- t3_wr_opcode = T3_WR_READ;
- t3_wr_flags = 0; /* T3 reads are always signaled */
- err = build_rdma_read(wqe, wr, &t3_wr_flit_cnt);
- if (err)
- break;
- sqp->read_len = wqe->read.local_len;
- if (!qhp->wq.oldest_read)
- qhp->wq.oldest_read = sqp;
- break;
- case IB_WR_REG_MR:
- t3_wr_opcode = T3_WR_FASTREG;
- err = build_memreg(wqe, reg_wr(wr), &t3_wr_flit_cnt,
- &wr_cnt, &qhp->wq);
- break;
- case IB_WR_LOCAL_INV:
- if (wr->send_flags & IB_SEND_FENCE)
- t3_wr_flags |= T3_LOCAL_FENCE_FLAG;
- t3_wr_opcode = T3_WR_INV_STAG;
- err = build_inv_stag(wqe, wr, &t3_wr_flit_cnt);
- break;
- default:
- pr_debug("%s post of type=%d TBD!\n", __func__,
- wr->opcode);
- err = -EINVAL;
- }
- if (err)
- break;
- wqe->send.wrid.id0.hi = qhp->wq.sq_wptr;
- sqp->wr_id = wr->wr_id;
- sqp->opcode = wr2opcode(t3_wr_opcode);
- sqp->sq_wptr = qhp->wq.sq_wptr;
- sqp->complete = 0;
- sqp->signaled = (wr->send_flags & IB_SEND_SIGNALED);
-
- build_fw_riwrh((void *) wqe, t3_wr_opcode, t3_wr_flags,
- Q_GENBIT(qhp->wq.wptr, qhp->wq.size_log2),
- 0, t3_wr_flit_cnt,
- (wr_cnt == 1) ? T3_SOPEOP : T3_SOP);
- pr_debug("%s cookie 0x%llx wq idx 0x%x swsq idx %ld opcode %d\n",
- __func__, (unsigned long long)wr->wr_id, idx,
- Q_PTR2IDX(qhp->wq.sq_wptr, qhp->wq.sq_size_log2),
- sqp->opcode);
- wr = wr->next;
- num_wrs--;
- qhp->wq.wptr += wr_cnt;
- ++(qhp->wq.sq_wptr);
- }
- spin_unlock_irqrestore(&qhp->lock, flag);
- if (cxio_wq_db_enabled(&qhp->wq))
- ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid);
-
-out:
- if (err)
- *bad_wr = wr;
- return err;
-}
-
-int iwch_post_receive(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
- const struct ib_recv_wr **bad_wr)
-{
- int err = 0;
- struct iwch_qp *qhp;
- u32 idx;
- union t3_wr *wqe;
- u32 num_wrs;
- unsigned long flag;
-
- qhp = to_iwch_qp(ibqp);
- spin_lock_irqsave(&qhp->lock, flag);
- if (qhp->attr.state > IWCH_QP_STATE_RTS) {
- spin_unlock_irqrestore(&qhp->lock, flag);
- err = -EINVAL;
- goto out;
- }
- num_wrs = Q_FREECNT(qhp->wq.rq_rptr, qhp->wq.rq_wptr,
- qhp->wq.rq_size_log2) - 1;
- if (!wr) {
- spin_unlock_irqrestore(&qhp->lock, flag);
- err = -ENOMEM;
- goto out;
- }
- while (wr) {
- if (wr->num_sge > T3_MAX_SGE) {
- err = -EINVAL;
- break;
- }
- idx = Q_PTR2IDX(qhp->wq.wptr, qhp->wq.size_log2);
- wqe = (union t3_wr *) (qhp->wq.queue + idx);
- if (num_wrs)
- if (wr->sg_list[0].lkey)
- err = build_rdma_recv(qhp, wqe, wr);
- else
- err = build_zero_stag_recv(qhp, wqe, wr);
- else
- err = -ENOMEM;
-
- if (err)
- break;
-
- build_fw_riwrh((void *) wqe, T3_WR_RCV, T3_COMPLETION_FLAG,
- Q_GENBIT(qhp->wq.wptr, qhp->wq.size_log2),
- 0, sizeof(struct t3_receive_wr) >> 3, T3_SOPEOP);
- pr_debug("%s cookie 0x%llx idx 0x%x rq_wptr 0x%x rw_rptr 0x%x wqe %p\n",
- __func__, (unsigned long long)wr->wr_id,
- idx, qhp->wq.rq_wptr, qhp->wq.rq_rptr, wqe);
- ++(qhp->wq.rq_wptr);
- ++(qhp->wq.wptr);
- wr = wr->next;
- num_wrs--;
- }
- spin_unlock_irqrestore(&qhp->lock, flag);
- if (cxio_wq_db_enabled(&qhp->wq))
- ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid);
-
-out:
- if (err)
- *bad_wr = wr;
- return err;
-}
-
-static inline void build_term_codes(struct respQ_msg_t *rsp_msg,
- u8 *layer_type, u8 *ecode)
-{
- int status = TPT_ERR_INTERNAL_ERR;
- int tagged = 0;
- int opcode = -1;
- int rqtype = 0;
- int send_inv = 0;
-
- if (rsp_msg) {
- status = CQE_STATUS(rsp_msg->cqe);
- opcode = CQE_OPCODE(rsp_msg->cqe);
- rqtype = RQ_TYPE(rsp_msg->cqe);
- send_inv = (opcode == T3_SEND_WITH_INV) ||
- (opcode == T3_SEND_WITH_SE_INV);
- tagged = (opcode == T3_RDMA_WRITE) ||
- (rqtype && (opcode == T3_READ_RESP));
- }
-
- switch (status) {
- case TPT_ERR_STAG:
- if (send_inv) {
- *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
- *ecode = RDMAP_CANT_INV_STAG;
- } else {
- *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
- *ecode = RDMAP_INV_STAG;
- }
- break;
- case TPT_ERR_PDID:
- *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
- if ((opcode == T3_SEND_WITH_INV) ||
- (opcode == T3_SEND_WITH_SE_INV))
- *ecode = RDMAP_CANT_INV_STAG;
- else
- *ecode = RDMAP_STAG_NOT_ASSOC;
- break;
- case TPT_ERR_QPID:
- *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
- *ecode = RDMAP_STAG_NOT_ASSOC;
- break;
- case TPT_ERR_ACCESS:
- *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
- *ecode = RDMAP_ACC_VIOL;
- break;
- case TPT_ERR_WRAP:
- *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
- *ecode = RDMAP_TO_WRAP;
- break;
- case TPT_ERR_BOUND:
- if (tagged) {
- *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
- *ecode = DDPT_BASE_BOUNDS;
- } else {
- *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
- *ecode = RDMAP_BASE_BOUNDS;
- }
- break;
- case TPT_ERR_INVALIDATE_SHARED_MR:
- case TPT_ERR_INVALIDATE_MR_WITH_MW_BOUND:
- *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
- *ecode = RDMAP_CANT_INV_STAG;
- break;
- case TPT_ERR_ECC:
- case TPT_ERR_ECC_PSTAG:
- case TPT_ERR_INTERNAL_ERR:
- *layer_type = LAYER_RDMAP|RDMAP_LOCAL_CATA;
- *ecode = 0;
- break;
- case TPT_ERR_OUT_OF_RQE:
- *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
- *ecode = DDPU_INV_MSN_NOBUF;
- break;
- case TPT_ERR_PBL_ADDR_BOUND:
- *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
- *ecode = DDPT_BASE_BOUNDS;
- break;
- case TPT_ERR_CRC:
- *layer_type = LAYER_MPA|DDP_LLP;
- *ecode = MPA_CRC_ERR;
- break;
- case TPT_ERR_MARKER:
- *layer_type = LAYER_MPA|DDP_LLP;
- *ecode = MPA_MARKER_ERR;
- break;
- case TPT_ERR_PDU_LEN_ERR:
- *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
- *ecode = DDPU_MSG_TOOBIG;
- break;
- case TPT_ERR_DDP_VERSION:
- if (tagged) {
- *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
- *ecode = DDPT_INV_VERS;
- } else {
- *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
- *ecode = DDPU_INV_VERS;
- }
- break;
- case TPT_ERR_RDMA_VERSION:
- *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
- *ecode = RDMAP_INV_VERS;
- break;
- case TPT_ERR_OPCODE:
- *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
- *ecode = RDMAP_INV_OPCODE;
- break;
- case TPT_ERR_DDP_QUEUE_NUM:
- *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
- *ecode = DDPU_INV_QN;
- break;
- case TPT_ERR_MSN:
- case TPT_ERR_MSN_GAP:
- case TPT_ERR_MSN_RANGE:
- case TPT_ERR_IRD_OVERFLOW:
- *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
- *ecode = DDPU_INV_MSN_RANGE;
- break;
- case TPT_ERR_TBIT:
- *layer_type = LAYER_DDP|DDP_LOCAL_CATA;
- *ecode = 0;
- break;
- case TPT_ERR_MO:
- *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
- *ecode = DDPU_INV_MO;
- break;
- default:
- *layer_type = LAYER_RDMAP|DDP_LOCAL_CATA;
- *ecode = 0;
- break;
- }
-}
-
-int iwch_post_zb_read(struct iwch_ep *ep)
-{
- union t3_wr *wqe;
- struct sk_buff *skb;
- u8 flit_cnt = sizeof(struct t3_rdma_read_wr) >> 3;
-
- pr_debug("%s enter\n", __func__);
- skb = alloc_skb(40, GFP_KERNEL);
- if (!skb) {
- pr_err("%s cannot send zb_read!!\n", __func__);
- return -ENOMEM;
- }
- wqe = skb_put_zero(skb, sizeof(struct t3_rdma_read_wr));
- wqe->read.rdmaop = T3_READ_REQ;
- wqe->read.reserved[0] = 0;
- wqe->read.reserved[1] = 0;
- wqe->read.rem_stag = cpu_to_be32(1);
- wqe->read.rem_to = cpu_to_be64(1);
- wqe->read.local_stag = cpu_to_be32(1);
- wqe->read.local_len = cpu_to_be32(0);
- wqe->read.local_to = cpu_to_be64(1);
- wqe->send.wrh.op_seop_flags = cpu_to_be32(V_FW_RIWR_OP(T3_WR_READ));
- wqe->send.wrh.gen_tid_len = cpu_to_be32(V_FW_RIWR_TID(ep->hwtid)|
- V_FW_RIWR_LEN(flit_cnt));
- skb->priority = CPL_PRIORITY_DATA;
- return iwch_cxgb3_ofld_send(ep->com.qp->rhp->rdev.t3cdev_p, skb);
-}
-
-/*
- * This posts a TERMINATE with layer=RDMA, type=catastrophic.
- */
-int iwch_post_terminate(struct iwch_qp *qhp, struct respQ_msg_t *rsp_msg)
-{
- union t3_wr *wqe;
- struct terminate_message *term;
- struct sk_buff *skb;
-
- pr_debug("%s %d\n", __func__, __LINE__);
- skb = alloc_skb(40, GFP_ATOMIC);
- if (!skb) {
- pr_err("%s cannot send TERMINATE!\n", __func__);
- return -ENOMEM;
- }
- wqe = skb_put_zero(skb, 40);
- wqe->send.rdmaop = T3_TERMINATE;
-
- /* immediate data length */
- wqe->send.plen = htonl(4);
-
- /* immediate data starts here. */
- term = (struct terminate_message *)wqe->send.sgl;
- build_term_codes(rsp_msg, &term->layer_etype, &term->ecode);
- wqe->send.wrh.op_seop_flags = cpu_to_be32(V_FW_RIWR_OP(T3_WR_SEND) |
- V_FW_RIWR_FLAGS(T3_COMPLETION_FLAG | T3_NOTIFY_FLAG));
- wqe->send.wrh.gen_tid_len = cpu_to_be32(V_FW_RIWR_TID(qhp->ep->hwtid));
- skb->priority = CPL_PRIORITY_DATA;
- return iwch_cxgb3_ofld_send(qhp->rhp->rdev.t3cdev_p, skb);
-}
-
-/*
- * Assumes qhp lock is held.
- */
-static void __flush_qp(struct iwch_qp *qhp, struct iwch_cq *rchp,
- struct iwch_cq *schp)
- __releases(&qhp->lock)
- __acquires(&qhp->lock)
-{
- int count;
- int flushed;
-
- lockdep_assert_held(&qhp->lock);
-
- pr_debug("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp);
- /* take a ref on the qhp since we must release the lock */
- atomic_inc(&qhp->refcnt);
- spin_unlock(&qhp->lock);
-
- /* locking hierarchy: cq lock first, then qp lock. */
- spin_lock(&rchp->lock);
- spin_lock(&qhp->lock);
- cxio_flush_hw_cq(&rchp->cq);
- cxio_count_rcqes(&rchp->cq, &qhp->wq, &count);
- flushed = cxio_flush_rq(&qhp->wq, &rchp->cq, count);
- spin_unlock(&qhp->lock);
- spin_unlock(&rchp->lock);
- if (flushed) {
- spin_lock(&rchp->comp_handler_lock);
- (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
- spin_unlock(&rchp->comp_handler_lock);
- }
-
- /* locking hierarchy: cq lock first, then qp lock. */
- spin_lock(&schp->lock);
- spin_lock(&qhp->lock);
- cxio_flush_hw_cq(&schp->cq);
- cxio_count_scqes(&schp->cq, &qhp->wq, &count);
- flushed = cxio_flush_sq(&qhp->wq, &schp->cq, count);
- spin_unlock(&qhp->lock);
- spin_unlock(&schp->lock);
- if (flushed) {
- spin_lock(&schp->comp_handler_lock);
- (*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context);
- spin_unlock(&schp->comp_handler_lock);
- }
-
- /* deref */
- if (atomic_dec_and_test(&qhp->refcnt))
- wake_up(&qhp->wait);
-
- spin_lock(&qhp->lock);
-}
-
-static void flush_qp(struct iwch_qp *qhp)
-{
- struct iwch_cq *rchp, *schp;
-
- rchp = get_chp(qhp->rhp, qhp->attr.rcq);
- schp = get_chp(qhp->rhp, qhp->attr.scq);
-
- if (qhp->ibqp.uobject) {
- cxio_set_wq_in_error(&qhp->wq);
- cxio_set_cq_in_error(&rchp->cq);
- spin_lock(&rchp->comp_handler_lock);
- (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
- spin_unlock(&rchp->comp_handler_lock);
- if (schp != rchp) {
- cxio_set_cq_in_error(&schp->cq);
- spin_lock(&schp->comp_handler_lock);
- (*schp->ibcq.comp_handler)(&schp->ibcq,
- schp->ibcq.cq_context);
- spin_unlock(&schp->comp_handler_lock);
- }
- return;
- }
- __flush_qp(qhp, rchp, schp);
-}
-
-
-/*
- * Return count of RECV WRs posted
- */
-u16 iwch_rqes_posted(struct iwch_qp *qhp)
-{
- union t3_wr *wqe = qhp->wq.queue;
- u16 count = 0;
-
- while (count < USHRT_MAX && fw_riwrh_opcode((struct fw_riwrh *)wqe) == T3_WR_RCV) {
- count++;
- wqe++;
- }
- pr_debug("%s qhp %p count %u\n", __func__, qhp, count);
- return count;
-}
-
-static int rdma_init(struct iwch_dev *rhp, struct iwch_qp *qhp,
- enum iwch_qp_attr_mask mask,
- struct iwch_qp_attributes *attrs)
-{
- struct t3_rdma_init_attr init_attr;
- int ret;
-
- init_attr.tid = qhp->ep->hwtid;
- init_attr.qpid = qhp->wq.qpid;
- init_attr.pdid = qhp->attr.pd;
- init_attr.scqid = qhp->attr.scq;
- init_attr.rcqid = qhp->attr.rcq;
- init_attr.rq_addr = qhp->wq.rq_addr;
- init_attr.rq_size = 1 << qhp->wq.rq_size_log2;
- init_attr.mpaattrs = uP_RI_MPA_IETF_ENABLE |
- qhp->attr.mpa_attr.recv_marker_enabled |
- (qhp->attr.mpa_attr.xmit_marker_enabled << 1) |
- (qhp->attr.mpa_attr.crc_enabled << 2);
-
- init_attr.qpcaps = uP_RI_QP_RDMA_READ_ENABLE |
- uP_RI_QP_RDMA_WRITE_ENABLE |
- uP_RI_QP_BIND_ENABLE;
- if (!qhp->ibqp.uobject)
- init_attr.qpcaps |= uP_RI_QP_STAG0_ENABLE |
- uP_RI_QP_FAST_REGISTER_ENABLE;
-
- init_attr.tcp_emss = qhp->ep->emss;
- init_attr.ord = qhp->attr.max_ord;
- init_attr.ird = qhp->attr.max_ird;
- init_attr.qp_dma_addr = qhp->wq.dma_addr;
- init_attr.qp_dma_size = (1UL << qhp->wq.size_log2);
- init_attr.rqe_count = iwch_rqes_posted(qhp);
- init_attr.flags = qhp->attr.mpa_attr.initiator ? MPA_INITIATOR : 0;
- init_attr.chan = qhp->ep->l2t->smt_idx;
- if (peer2peer) {
- init_attr.rtr_type = RTR_READ;
- if (init_attr.ord == 0 && qhp->attr.mpa_attr.initiator)
- init_attr.ord = 1;
- if (init_attr.ird == 0 && !qhp->attr.mpa_attr.initiator)
- init_attr.ird = 1;
- } else
- init_attr.rtr_type = 0;
- init_attr.irs = qhp->ep->rcv_seq;
- pr_debug("%s init_attr.rq_addr 0x%x init_attr.rq_size = %d flags 0x%x qpcaps 0x%x\n",
- __func__,
- init_attr.rq_addr, init_attr.rq_size,
- init_attr.flags, init_attr.qpcaps);
- ret = cxio_rdma_init(&rhp->rdev, &init_attr);
- pr_debug("%s ret %d\n", __func__, ret);
- return ret;
-}
-
-int iwch_modify_qp(struct iwch_dev *rhp, struct iwch_qp *qhp,
- enum iwch_qp_attr_mask mask,
- struct iwch_qp_attributes *attrs,
- int internal)
-{
- int ret = 0;
- struct iwch_qp_attributes newattr = qhp->attr;
- unsigned long flag;
- int disconnect = 0;
- int terminate = 0;
- int abort = 0;
- int free = 0;
- struct iwch_ep *ep = NULL;
-
- pr_debug("%s qhp %p qpid 0x%x ep %p state %d -> %d\n", __func__,
- qhp, qhp->wq.qpid, qhp->ep, qhp->attr.state,
- (mask & IWCH_QP_ATTR_NEXT_STATE) ? attrs->next_state : -1);
-
- spin_lock_irqsave(&qhp->lock, flag);
-
- /* Process attr changes if in IDLE */
- if (mask & IWCH_QP_ATTR_VALID_MODIFY) {
- if (qhp->attr.state != IWCH_QP_STATE_IDLE) {
- ret = -EIO;
- goto out;
- }
- if (mask & IWCH_QP_ATTR_ENABLE_RDMA_READ)
- newattr.enable_rdma_read = attrs->enable_rdma_read;
- if (mask & IWCH_QP_ATTR_ENABLE_RDMA_WRITE)
- newattr.enable_rdma_write = attrs->enable_rdma_write;
- if (mask & IWCH_QP_ATTR_ENABLE_RDMA_BIND)
- newattr.enable_bind = attrs->enable_bind;
- if (mask & IWCH_QP_ATTR_MAX_ORD) {
- if (attrs->max_ord >
- rhp->attr.max_rdma_read_qp_depth) {
- ret = -EINVAL;
- goto out;
- }
- newattr.max_ord = attrs->max_ord;
- }
- if (mask & IWCH_QP_ATTR_MAX_IRD) {
- if (attrs->max_ird >
- rhp->attr.max_rdma_reads_per_qp) {
- ret = -EINVAL;
- goto out;
- }
- newattr.max_ird = attrs->max_ird;
- }
- qhp->attr = newattr;
- }
-
- if (!(mask & IWCH_QP_ATTR_NEXT_STATE))
- goto out;
- if (qhp->attr.state == attrs->next_state)
- goto out;
-
- switch (qhp->attr.state) {
- case IWCH_QP_STATE_IDLE:
- switch (attrs->next_state) {
- case IWCH_QP_STATE_RTS:
- if (!(mask & IWCH_QP_ATTR_LLP_STREAM_HANDLE)) {
- ret = -EINVAL;
- goto out;
- }
- if (!(mask & IWCH_QP_ATTR_MPA_ATTR)) {
- ret = -EINVAL;
- goto out;
- }
- qhp->attr.mpa_attr = attrs->mpa_attr;
- qhp->attr.llp_stream_handle = attrs->llp_stream_handle;
- qhp->ep = qhp->attr.llp_stream_handle;
- qhp->attr.state = IWCH_QP_STATE_RTS;
-
- /*
- * Ref the endpoint here and deref when we
- * disassociate the endpoint from the QP. This
- * happens in CLOSING->IDLE transition or *->ERROR
- * transition.
- */
- get_ep(&qhp->ep->com);
- spin_unlock_irqrestore(&qhp->lock, flag);
- ret = rdma_init(rhp, qhp, mask, attrs);
- spin_lock_irqsave(&qhp->lock, flag);
- if (ret)
- goto err;
- break;
- case IWCH_QP_STATE_ERROR:
- qhp->attr.state = IWCH_QP_STATE_ERROR;
- flush_qp(qhp);
- break;
- default:
- ret = -EINVAL;
- goto out;
- }
- break;
- case IWCH_QP_STATE_RTS:
- switch (attrs->next_state) {
- case IWCH_QP_STATE_CLOSING:
- BUG_ON(kref_read(&qhp->ep->com.kref) < 2);
- qhp->attr.state = IWCH_QP_STATE_CLOSING;
- if (!internal) {
- abort=0;
- disconnect = 1;
- ep = qhp->ep;
- get_ep(&ep->com);
- }
- break;
- case IWCH_QP_STATE_TERMINATE:
- qhp->attr.state = IWCH_QP_STATE_TERMINATE;
- if (qhp->ibqp.uobject)
- cxio_set_wq_in_error(&qhp->wq);
- if (!internal)
- terminate = 1;
- break;
- case IWCH_QP_STATE_ERROR:
- qhp->attr.state = IWCH_QP_STATE_ERROR;
- if (!internal) {
- abort=1;
- disconnect = 1;
- ep = qhp->ep;
- get_ep(&ep->com);
- }
- goto err;
- break;
- default:
- ret = -EINVAL;
- goto out;
- }
- break;
- case IWCH_QP_STATE_CLOSING:
- if (!internal) {
- ret = -EINVAL;
- goto out;
- }
- switch (attrs->next_state) {
- case IWCH_QP_STATE_IDLE:
- flush_qp(qhp);
- qhp->attr.state = IWCH_QP_STATE_IDLE;
- qhp->attr.llp_stream_handle = NULL;
- put_ep(&qhp->ep->com);
- qhp->ep = NULL;
- wake_up(&qhp->wait);
- break;
- case IWCH_QP_STATE_ERROR:
- goto err;
- default:
- ret = -EINVAL;
- goto err;
- }
- break;
- case IWCH_QP_STATE_ERROR:
- if (attrs->next_state != IWCH_QP_STATE_IDLE) {
- ret = -EINVAL;
- goto out;
- }
-
- if (!Q_EMPTY(qhp->wq.sq_rptr, qhp->wq.sq_wptr) ||
- !Q_EMPTY(qhp->wq.rq_rptr, qhp->wq.rq_wptr)) {
- ret = -EINVAL;
- goto out;
- }
- qhp->attr.state = IWCH_QP_STATE_IDLE;
- break;
- case IWCH_QP_STATE_TERMINATE:
- if (!internal) {
- ret = -EINVAL;
- goto out;
- }
- goto err;
- break;
- default:
- pr_err("%s in a bad state %d\n", __func__, qhp->attr.state);
- ret = -EINVAL;
- goto err;
- break;
- }
- goto out;
-err:
- pr_debug("%s disassociating ep %p qpid 0x%x\n", __func__, qhp->ep,
- qhp->wq.qpid);
-
- /* disassociate the LLP connection */
- qhp->attr.llp_stream_handle = NULL;
- ep = qhp->ep;
- qhp->ep = NULL;
- qhp->attr.state = IWCH_QP_STATE_ERROR;
- free=1;
- wake_up(&qhp->wait);
- BUG_ON(!ep);
- flush_qp(qhp);
-out:
- spin_unlock_irqrestore(&qhp->lock, flag);
-
- if (terminate)
- iwch_post_terminate(qhp, NULL);
-
- /*
- * If disconnect is 1, then we need to initiate a disconnect
- * on the EP. This can be a normal close (RTS->CLOSING) or
- * an abnormal close (RTS/CLOSING->ERROR).
- */
- if (disconnect) {
- iwch_ep_disconnect(ep, abort, GFP_KERNEL);
- put_ep(&ep->com);
- }
-
- /*
- * If free is 1, then we've disassociated the EP from the QP
- * and we need to dereference the EP.
- */
- if (free)
- put_ep(&ep->com);
-
- pr_debug("%s exit state %d\n", __func__, qhp->attr.state);
- return ret;
-}
diff --git a/drivers/infiniband/hw/cxgb3/tcb.h b/drivers/infiniband/hw/cxgb3/tcb.h
deleted file mode 100644
index c702dc199e18..000000000000
--- a/drivers/infiniband/hw/cxgb3/tcb.h
+++ /dev/null
@@ -1,632 +0,0 @@
-/*
- * Copyright (c) 2007 Chelsio, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef _TCB_DEFS_H
-#define _TCB_DEFS_H
-
-#define W_TCB_T_STATE 0
-#define S_TCB_T_STATE 0
-#define M_TCB_T_STATE 0xfULL
-#define V_TCB_T_STATE(x) ((x) << S_TCB_T_STATE)
-
-#define W_TCB_TIMER 0
-#define S_TCB_TIMER 4
-#define M_TCB_TIMER 0x1ULL
-#define V_TCB_TIMER(x) ((x) << S_TCB_TIMER)
-
-#define W_TCB_DACK_TIMER 0
-#define S_TCB_DACK_TIMER 5
-#define M_TCB_DACK_TIMER 0x1ULL
-#define V_TCB_DACK_TIMER(x) ((x) << S_TCB_DACK_TIMER)
-
-#define W_TCB_DEL_FLAG 0
-#define S_TCB_DEL_FLAG 6
-#define M_TCB_DEL_FLAG 0x1ULL
-#define V_TCB_DEL_FLAG(x) ((x) << S_TCB_DEL_FLAG)
-
-#define W_TCB_L2T_IX 0
-#define S_TCB_L2T_IX 7
-#define M_TCB_L2T_IX 0x7ffULL
-#define V_TCB_L2T_IX(x) ((x) << S_TCB_L2T_IX)
-
-#define W_TCB_SMAC_SEL 0
-#define S_TCB_SMAC_SEL 18
-#define M_TCB_SMAC_SEL 0x3ULL
-#define V_TCB_SMAC_SEL(x) ((x) << S_TCB_SMAC_SEL)
-
-#define W_TCB_TOS 0
-#define S_TCB_TOS 20
-#define M_TCB_TOS 0x3fULL
-#define V_TCB_TOS(x) ((x) << S_TCB_TOS)
-
-#define W_TCB_MAX_RT 0
-#define S_TCB_MAX_RT 26
-#define M_TCB_MAX_RT 0xfULL
-#define V_TCB_MAX_RT(x) ((x) << S_TCB_MAX_RT)
-
-#define W_TCB_T_RXTSHIFT 0
-#define S_TCB_T_RXTSHIFT 30
-#define M_TCB_T_RXTSHIFT 0xfULL
-#define V_TCB_T_RXTSHIFT(x) ((x) << S_TCB_T_RXTSHIFT)
-
-#define W_TCB_T_DUPACKS 1
-#define S_TCB_T_DUPACKS 2
-#define M_TCB_T_DUPACKS 0xfULL
-#define V_TCB_T_DUPACKS(x) ((x) << S_TCB_T_DUPACKS)
-
-#define W_TCB_T_MAXSEG 1
-#define S_TCB_T_MAXSEG 6
-#define M_TCB_T_MAXSEG 0xfULL
-#define V_TCB_T_MAXSEG(x) ((x) << S_TCB_T_MAXSEG)
-
-#define W_TCB_T_FLAGS1 1
-#define S_TCB_T_FLAGS1 10
-#define M_TCB_T_FLAGS1 0xffffffffULL
-#define V_TCB_T_FLAGS1(x) ((x) << S_TCB_T_FLAGS1)
-
-#define W_TCB_T_MIGRATION 1
-#define S_TCB_T_MIGRATION 20
-#define M_TCB_T_MIGRATION 0x1ULL
-#define V_TCB_T_MIGRATION(x) ((x) << S_TCB_T_MIGRATION)
-
-#define W_TCB_T_FLAGS2 2
-#define S_TCB_T_FLAGS2 10
-#define M_TCB_T_FLAGS2 0x7fULL
-#define V_TCB_T_FLAGS2(x) ((x) << S_TCB_T_FLAGS2)
-
-#define W_TCB_SND_SCALE 2
-#define S_TCB_SND_SCALE 17
-#define M_TCB_SND_SCALE 0xfULL
-#define V_TCB_SND_SCALE(x) ((x) << S_TCB_SND_SCALE)
-
-#define W_TCB_RCV_SCALE 2
-#define S_TCB_RCV_SCALE 21
-#define M_TCB_RCV_SCALE 0xfULL
-#define V_TCB_RCV_SCALE(x) ((x) << S_TCB_RCV_SCALE)
-
-#define W_TCB_SND_UNA_RAW 2
-#define S_TCB_SND_UNA_RAW 25
-#define M_TCB_SND_UNA_RAW 0x7ffffffULL
-#define V_TCB_SND_UNA_RAW(x) ((x) << S_TCB_SND_UNA_RAW)
-
-#define W_TCB_SND_NXT_RAW 3
-#define S_TCB_SND_NXT_RAW 20
-#define M_TCB_SND_NXT_RAW 0x7ffffffULL
-#define V_TCB_SND_NXT_RAW(x) ((x) << S_TCB_SND_NXT_RAW)
-
-#define W_TCB_RCV_NXT 4
-#define S_TCB_RCV_NXT 15
-#define M_TCB_RCV_NXT 0xffffffffULL
-#define V_TCB_RCV_NXT(x) ((x) << S_TCB_RCV_NXT)
-
-#define W_TCB_RCV_ADV 5
-#define S_TCB_RCV_ADV 15
-#define M_TCB_RCV_ADV 0xffffULL
-#define V_TCB_RCV_ADV(x) ((x) << S_TCB_RCV_ADV)
-
-#define W_TCB_SND_MAX_RAW 5
-#define S_TCB_SND_MAX_RAW 31
-#define M_TCB_SND_MAX_RAW 0x7ffffffULL
-#define V_TCB_SND_MAX_RAW(x) ((x) << S_TCB_SND_MAX_RAW)
-
-#define W_TCB_SND_CWND 6
-#define S_TCB_SND_CWND 26
-#define M_TCB_SND_CWND 0x7ffffffULL
-#define V_TCB_SND_CWND(x) ((x) << S_TCB_SND_CWND)
-
-#define W_TCB_SND_SSTHRESH 7
-#define S_TCB_SND_SSTHRESH 21
-#define M_TCB_SND_SSTHRESH 0x7ffffffULL
-#define V_TCB_SND_SSTHRESH(x) ((x) << S_TCB_SND_SSTHRESH)
-
-#define W_TCB_T_RTT_TS_RECENT_AGE 8
-#define S_TCB_T_RTT_TS_RECENT_AGE 16
-#define M_TCB_T_RTT_TS_RECENT_AGE 0xffffffffULL
-#define V_TCB_T_RTT_TS_RECENT_AGE(x) ((x) << S_TCB_T_RTT_TS_RECENT_AGE)
-
-#define W_TCB_T_RTSEQ_RECENT 9
-#define S_TCB_T_RTSEQ_RECENT 16
-#define M_TCB_T_RTSEQ_RECENT 0xffffffffULL
-#define V_TCB_T_RTSEQ_RECENT(x) ((x) << S_TCB_T_RTSEQ_RECENT)
-
-#define W_TCB_T_SRTT 10
-#define S_TCB_T_SRTT 16
-#define M_TCB_T_SRTT 0xffffULL
-#define V_TCB_T_SRTT(x) ((x) << S_TCB_T_SRTT)
-
-#define W_TCB_T_RTTVAR 11
-#define S_TCB_T_RTTVAR 0
-#define M_TCB_T_RTTVAR 0xffffULL
-#define V_TCB_T_RTTVAR(x) ((x) << S_TCB_T_RTTVAR)
-
-#define W_TCB_TS_LAST_ACK_SENT_RAW 11
-#define S_TCB_TS_LAST_ACK_SENT_RAW 16
-#define M_TCB_TS_LAST_ACK_SENT_RAW 0x7ffffffULL
-#define V_TCB_TS_LAST_ACK_SENT_RAW(x) ((x) << S_TCB_TS_LAST_ACK_SENT_RAW)
-
-#define W_TCB_DIP 12
-#define S_TCB_DIP 11
-#define M_TCB_DIP 0xffffffffULL
-#define V_TCB_DIP(x) ((x) << S_TCB_DIP)
-
-#define W_TCB_SIP 13
-#define S_TCB_SIP 11
-#define M_TCB_SIP 0xffffffffULL
-#define V_TCB_SIP(x) ((x) << S_TCB_SIP)
-
-#define W_TCB_DP 14
-#define S_TCB_DP 11
-#define M_TCB_DP 0xffffULL
-#define V_TCB_DP(x) ((x) << S_TCB_DP)
-
-#define W_TCB_SP 14
-#define S_TCB_SP 27
-#define M_TCB_SP 0xffffULL
-#define V_TCB_SP(x) ((x) << S_TCB_SP)
-
-#define W_TCB_TIMESTAMP 15
-#define S_TCB_TIMESTAMP 11
-#define M_TCB_TIMESTAMP 0xffffffffULL
-#define V_TCB_TIMESTAMP(x) ((x) << S_TCB_TIMESTAMP)
-
-#define W_TCB_TIMESTAMP_OFFSET 16
-#define S_TCB_TIMESTAMP_OFFSET 11
-#define M_TCB_TIMESTAMP_OFFSET 0xfULL
-#define V_TCB_TIMESTAMP_OFFSET(x) ((x) << S_TCB_TIMESTAMP_OFFSET)
-
-#define W_TCB_TX_MAX 16
-#define S_TCB_TX_MAX 15
-#define M_TCB_TX_MAX 0xffffffffULL
-#define V_TCB_TX_MAX(x) ((x) << S_TCB_TX_MAX)
-
-#define W_TCB_TX_HDR_PTR_RAW 17
-#define S_TCB_TX_HDR_PTR_RAW 15
-#define M_TCB_TX_HDR_PTR_RAW 0x1ffffULL
-#define V_TCB_TX_HDR_PTR_RAW(x) ((x) << S_TCB_TX_HDR_PTR_RAW)
-
-#define W_TCB_TX_LAST_PTR_RAW 18
-#define S_TCB_TX_LAST_PTR_RAW 0
-#define M_TCB_TX_LAST_PTR_RAW 0x1ffffULL
-#define V_TCB_TX_LAST_PTR_RAW(x) ((x) << S_TCB_TX_LAST_PTR_RAW)
-
-#define W_TCB_TX_COMPACT 18
-#define S_TCB_TX_COMPACT 17
-#define M_TCB_TX_COMPACT 0x1ULL
-#define V_TCB_TX_COMPACT(x) ((x) << S_TCB_TX_COMPACT)
-
-#define W_TCB_RX_COMPACT 18
-#define S_TCB_RX_COMPACT 18
-#define M_TCB_RX_COMPACT 0x1ULL
-#define V_TCB_RX_COMPACT(x) ((x) << S_TCB_RX_COMPACT)
-
-#define W_TCB_RCV_WND 18
-#define S_TCB_RCV_WND 19
-#define M_TCB_RCV_WND 0x7ffffffULL
-#define V_TCB_RCV_WND(x) ((x) << S_TCB_RCV_WND)
-
-#define W_TCB_RX_HDR_OFFSET 19
-#define S_TCB_RX_HDR_OFFSET 14
-#define M_TCB_RX_HDR_OFFSET 0x7ffffffULL
-#define V_TCB_RX_HDR_OFFSET(x) ((x) << S_TCB_RX_HDR_OFFSET)
-
-#define W_TCB_RX_FRAG0_START_IDX_RAW 20
-#define S_TCB_RX_FRAG0_START_IDX_RAW 9
-#define M_TCB_RX_FRAG0_START_IDX_RAW 0x7ffffffULL
-#define V_TCB_RX_FRAG0_START_IDX_RAW(x) ((x) << S_TCB_RX_FRAG0_START_IDX_RAW)
-
-#define W_TCB_RX_FRAG1_START_IDX_OFFSET 21
-#define S_TCB_RX_FRAG1_START_IDX_OFFSET 4
-#define M_TCB_RX_FRAG1_START_IDX_OFFSET 0x7ffffffULL
-#define V_TCB_RX_FRAG1_START_IDX_OFFSET(x) ((x) << S_TCB_RX_FRAG1_START_IDX_OFFSET)
-
-#define W_TCB_RX_FRAG0_LEN 21
-#define S_TCB_RX_FRAG0_LEN 31
-#define M_TCB_RX_FRAG0_LEN 0x7ffffffULL
-#define V_TCB_RX_FRAG0_LEN(x) ((x) << S_TCB_RX_FRAG0_LEN)
-
-#define W_TCB_RX_FRAG1_LEN 22
-#define S_TCB_RX_FRAG1_LEN 26
-#define M_TCB_RX_FRAG1_LEN 0x7ffffffULL
-#define V_TCB_RX_FRAG1_LEN(x) ((x) << S_TCB_RX_FRAG1_LEN)
-
-#define W_TCB_NEWRENO_RECOVER 23
-#define S_TCB_NEWRENO_RECOVER 21
-#define M_TCB_NEWRENO_RECOVER 0x7ffffffULL
-#define V_TCB_NEWRENO_RECOVER(x) ((x) << S_TCB_NEWRENO_RECOVER)
-
-#define W_TCB_PDU_HAVE_LEN 24
-#define S_TCB_PDU_HAVE_LEN 16
-#define M_TCB_PDU_HAVE_LEN 0x1ULL
-#define V_TCB_PDU_HAVE_LEN(x) ((x) << S_TCB_PDU_HAVE_LEN)
-
-#define W_TCB_PDU_LEN 24
-#define S_TCB_PDU_LEN 17
-#define M_TCB_PDU_LEN 0xffffULL
-#define V_TCB_PDU_LEN(x) ((x) << S_TCB_PDU_LEN)
-
-#define W_TCB_RX_QUIESCE 25
-#define S_TCB_RX_QUIESCE 1
-#define M_TCB_RX_QUIESCE 0x1ULL
-#define V_TCB_RX_QUIESCE(x) ((x) << S_TCB_RX_QUIESCE)
-
-#define W_TCB_RX_PTR_RAW 25
-#define S_TCB_RX_PTR_RAW 2
-#define M_TCB_RX_PTR_RAW 0x1ffffULL
-#define V_TCB_RX_PTR_RAW(x) ((x) << S_TCB_RX_PTR_RAW)
-
-#define W_TCB_CPU_NO 25
-#define S_TCB_CPU_NO 19
-#define M_TCB_CPU_NO 0x7fULL
-#define V_TCB_CPU_NO(x) ((x) << S_TCB_CPU_NO)
-
-#define W_TCB_ULP_TYPE 25
-#define S_TCB_ULP_TYPE 26
-#define M_TCB_ULP_TYPE 0xfULL
-#define V_TCB_ULP_TYPE(x) ((x) << S_TCB_ULP_TYPE)
-
-#define W_TCB_RX_FRAG1_PTR_RAW 25
-#define S_TCB_RX_FRAG1_PTR_RAW 30
-#define M_TCB_RX_FRAG1_PTR_RAW 0x1ffffULL
-#define V_TCB_RX_FRAG1_PTR_RAW(x) ((x) << S_TCB_RX_FRAG1_PTR_RAW)
-
-#define W_TCB_RX_FRAG2_START_IDX_OFFSET_RAW 26
-#define S_TCB_RX_FRAG2_START_IDX_OFFSET_RAW 15
-#define M_TCB_RX_FRAG2_START_IDX_OFFSET_RAW 0x7ffffffULL
-#define V_TCB_RX_FRAG2_START_IDX_OFFSET_RAW(x) ((x) << S_TCB_RX_FRAG2_START_IDX_OFFSET_RAW)
-
-#define W_TCB_RX_FRAG2_PTR_RAW 27
-#define S_TCB_RX_FRAG2_PTR_RAW 10
-#define M_TCB_RX_FRAG2_PTR_RAW 0x1ffffULL
-#define V_TCB_RX_FRAG2_PTR_RAW(x) ((x) << S_TCB_RX_FRAG2_PTR_RAW)
-
-#define W_TCB_RX_FRAG2_LEN_RAW 27
-#define S_TCB_RX_FRAG2_LEN_RAW 27
-#define M_TCB_RX_FRAG2_LEN_RAW 0x7ffffffULL
-#define V_TCB_RX_FRAG2_LEN_RAW(x) ((x) << S_TCB_RX_FRAG2_LEN_RAW)
-
-#define W_TCB_RX_FRAG3_PTR_RAW 28
-#define S_TCB_RX_FRAG3_PTR_RAW 22
-#define M_TCB_RX_FRAG3_PTR_RAW 0x1ffffULL
-#define V_TCB_RX_FRAG3_PTR_RAW(x) ((x) << S_TCB_RX_FRAG3_PTR_RAW)
-
-#define W_TCB_RX_FRAG3_LEN_RAW 29
-#define S_TCB_RX_FRAG3_LEN_RAW 7
-#define M_TCB_RX_FRAG3_LEN_RAW 0x7ffffffULL
-#define V_TCB_RX_FRAG3_LEN_RAW(x) ((x) << S_TCB_RX_FRAG3_LEN_RAW)
-
-#define W_TCB_RX_FRAG3_START_IDX_OFFSET_RAW 30
-#define S_TCB_RX_FRAG3_START_IDX_OFFSET_RAW 2
-#define M_TCB_RX_FRAG3_START_IDX_OFFSET_RAW 0x7ffffffULL
-#define V_TCB_RX_FRAG3_START_IDX_OFFSET_RAW(x) ((x) << S_TCB_RX_FRAG3_START_IDX_OFFSET_RAW)
-
-#define W_TCB_PDU_HDR_LEN 30
-#define S_TCB_PDU_HDR_LEN 29
-#define M_TCB_PDU_HDR_LEN 0xffULL
-#define V_TCB_PDU_HDR_LEN(x) ((x) << S_TCB_PDU_HDR_LEN)
-
-#define W_TCB_SLUSH1 31
-#define S_TCB_SLUSH1 5
-#define M_TCB_SLUSH1 0x7ffffULL
-#define V_TCB_SLUSH1(x) ((x) << S_TCB_SLUSH1)
-
-#define W_TCB_ULP_RAW 31
-#define S_TCB_ULP_RAW 24
-#define M_TCB_ULP_RAW 0xffULL
-#define V_TCB_ULP_RAW(x) ((x) << S_TCB_ULP_RAW)
-
-#define W_TCB_DDP_RDMAP_VERSION 25
-#define S_TCB_DDP_RDMAP_VERSION 30
-#define M_TCB_DDP_RDMAP_VERSION 0x1ULL
-#define V_TCB_DDP_RDMAP_VERSION(x) ((x) << S_TCB_DDP_RDMAP_VERSION)
-
-#define W_TCB_MARKER_ENABLE_RX 25
-#define S_TCB_MARKER_ENABLE_RX 31
-#define M_TCB_MARKER_ENABLE_RX 0x1ULL
-#define V_TCB_MARKER_ENABLE_RX(x) ((x) << S_TCB_MARKER_ENABLE_RX)
-
-#define W_TCB_MARKER_ENABLE_TX 26
-#define S_TCB_MARKER_ENABLE_TX 0
-#define M_TCB_MARKER_ENABLE_TX 0x1ULL
-#define V_TCB_MARKER_ENABLE_TX(x) ((x) << S_TCB_MARKER_ENABLE_TX)
-
-#define W_TCB_CRC_ENABLE 26
-#define S_TCB_CRC_ENABLE 1
-#define M_TCB_CRC_ENABLE 0x1ULL
-#define V_TCB_CRC_ENABLE(x) ((x) << S_TCB_CRC_ENABLE)
-
-#define W_TCB_IRS_ULP 26
-#define S_TCB_IRS_ULP 2
-#define M_TCB_IRS_ULP 0x1ffULL
-#define V_TCB_IRS_ULP(x) ((x) << S_TCB_IRS_ULP)
-
-#define W_TCB_ISS_ULP 26
-#define S_TCB_ISS_ULP 11
-#define M_TCB_ISS_ULP 0x1ffULL
-#define V_TCB_ISS_ULP(x) ((x) << S_TCB_ISS_ULP)
-
-#define W_TCB_TX_PDU_LEN 26
-#define S_TCB_TX_PDU_LEN 20
-#define M_TCB_TX_PDU_LEN 0x3fffULL
-#define V_TCB_TX_PDU_LEN(x) ((x) << S_TCB_TX_PDU_LEN)
-
-#define W_TCB_TX_PDU_OUT 27
-#define S_TCB_TX_PDU_OUT 2
-#define M_TCB_TX_PDU_OUT 0x1ULL
-#define V_TCB_TX_PDU_OUT(x) ((x) << S_TCB_TX_PDU_OUT)
-
-#define W_TCB_CQ_IDX_SQ 27
-#define S_TCB_CQ_IDX_SQ 3
-#define M_TCB_CQ_IDX_SQ 0xffffULL
-#define V_TCB_CQ_IDX_SQ(x) ((x) << S_TCB_CQ_IDX_SQ)
-
-#define W_TCB_CQ_IDX_RQ 27
-#define S_TCB_CQ_IDX_RQ 19
-#define M_TCB_CQ_IDX_RQ 0xffffULL
-#define V_TCB_CQ_IDX_RQ(x) ((x) << S_TCB_CQ_IDX_RQ)
-
-#define W_TCB_QP_ID 28
-#define S_TCB_QP_ID 3
-#define M_TCB_QP_ID 0xffffULL
-#define V_TCB_QP_ID(x) ((x) << S_TCB_QP_ID)
-
-#define W_TCB_PD_ID 28
-#define S_TCB_PD_ID 19
-#define M_TCB_PD_ID 0xffffULL
-#define V_TCB_PD_ID(x) ((x) << S_TCB_PD_ID)
-
-#define W_TCB_STAG 29
-#define S_TCB_STAG 3
-#define M_TCB_STAG 0xffffffffULL
-#define V_TCB_STAG(x) ((x) << S_TCB_STAG)
-
-#define W_TCB_RQ_START 30
-#define S_TCB_RQ_START 3
-#define M_TCB_RQ_START 0x3ffffffULL
-#define V_TCB_RQ_START(x) ((x) << S_TCB_RQ_START)
-
-#define W_TCB_RQ_MSN 30
-#define S_TCB_RQ_MSN 29
-#define M_TCB_RQ_MSN 0x3ffULL
-#define V_TCB_RQ_MSN(x) ((x) << S_TCB_RQ_MSN)
-
-#define W_TCB_RQ_MAX_OFFSET 31
-#define S_TCB_RQ_MAX_OFFSET 7
-#define M_TCB_RQ_MAX_OFFSET 0xfULL
-#define V_TCB_RQ_MAX_OFFSET(x) ((x) << S_TCB_RQ_MAX_OFFSET)
-
-#define W_TCB_RQ_WRITE_PTR 31
-#define S_TCB_RQ_WRITE_PTR 11
-#define M_TCB_RQ_WRITE_PTR 0x3ffULL
-#define V_TCB_RQ_WRITE_PTR(x) ((x) << S_TCB_RQ_WRITE_PTR)
-
-#define W_TCB_INB_WRITE_PERM 31
-#define S_TCB_INB_WRITE_PERM 21
-#define M_TCB_INB_WRITE_PERM 0x1ULL
-#define V_TCB_INB_WRITE_PERM(x) ((x) << S_TCB_INB_WRITE_PERM)
-
-#define W_TCB_INB_READ_PERM 31
-#define S_TCB_INB_READ_PERM 22
-#define M_TCB_INB_READ_PERM 0x1ULL
-#define V_TCB_INB_READ_PERM(x) ((x) << S_TCB_INB_READ_PERM)
-
-#define W_TCB_ORD_L_BIT_VLD 31
-#define S_TCB_ORD_L_BIT_VLD 23
-#define M_TCB_ORD_L_BIT_VLD 0x1ULL
-#define V_TCB_ORD_L_BIT_VLD(x) ((x) << S_TCB_ORD_L_BIT_VLD)
-
-#define W_TCB_RDMAP_OPCODE 31
-#define S_TCB_RDMAP_OPCODE 24
-#define M_TCB_RDMAP_OPCODE 0xfULL
-#define V_TCB_RDMAP_OPCODE(x) ((x) << S_TCB_RDMAP_OPCODE)
-
-#define W_TCB_TX_FLUSH 31
-#define S_TCB_TX_FLUSH 28
-#define M_TCB_TX_FLUSH 0x1ULL
-#define V_TCB_TX_FLUSH(x) ((x) << S_TCB_TX_FLUSH)
-
-#define W_TCB_TX_OOS_RXMT 31
-#define S_TCB_TX_OOS_RXMT 29
-#define M_TCB_TX_OOS_RXMT 0x1ULL
-#define V_TCB_TX_OOS_RXMT(x) ((x) << S_TCB_TX_OOS_RXMT)
-
-#define W_TCB_TX_OOS_TXMT 31
-#define S_TCB_TX_OOS_TXMT 30
-#define M_TCB_TX_OOS_TXMT 0x1ULL
-#define V_TCB_TX_OOS_TXMT(x) ((x) << S_TCB_TX_OOS_TXMT)
-
-#define W_TCB_SLUSH_AUX2 31
-#define S_TCB_SLUSH_AUX2 31
-#define M_TCB_SLUSH_AUX2 0x1ULL
-#define V_TCB_SLUSH_AUX2(x) ((x) << S_TCB_SLUSH_AUX2)
-
-#define W_TCB_RX_FRAG1_PTR_RAW2 25
-#define S_TCB_RX_FRAG1_PTR_RAW2 30
-#define M_TCB_RX_FRAG1_PTR_RAW2 0x1ffffULL
-#define V_TCB_RX_FRAG1_PTR_RAW2(x) ((x) << S_TCB_RX_FRAG1_PTR_RAW2)
-
-#define W_TCB_RX_DDP_FLAGS 26
-#define S_TCB_RX_DDP_FLAGS 15
-#define M_TCB_RX_DDP_FLAGS 0x3ffULL
-#define V_TCB_RX_DDP_FLAGS(x) ((x) << S_TCB_RX_DDP_FLAGS)
-
-#define W_TCB_SLUSH_AUX3 26
-#define S_TCB_SLUSH_AUX3 31
-#define M_TCB_SLUSH_AUX3 0x1ffULL
-#define V_TCB_SLUSH_AUX3(x) ((x) << S_TCB_SLUSH_AUX3)
-
-#define W_TCB_RX_DDP_BUF0_OFFSET 27
-#define S_TCB_RX_DDP_BUF0_OFFSET 8
-#define M_TCB_RX_DDP_BUF0_OFFSET 0x3fffffULL
-#define V_TCB_RX_DDP_BUF0_OFFSET(x) ((x) << S_TCB_RX_DDP_BUF0_OFFSET)
-
-#define W_TCB_RX_DDP_BUF0_LEN 27
-#define S_TCB_RX_DDP_BUF0_LEN 30
-#define M_TCB_RX_DDP_BUF0_LEN 0x3fffffULL
-#define V_TCB_RX_DDP_BUF0_LEN(x) ((x) << S_TCB_RX_DDP_BUF0_LEN)
-
-#define W_TCB_RX_DDP_BUF1_OFFSET 28
-#define S_TCB_RX_DDP_BUF1_OFFSET 20
-#define M_TCB_RX_DDP_BUF1_OFFSET 0x3fffffULL
-#define V_TCB_RX_DDP_BUF1_OFFSET(x) ((x) << S_TCB_RX_DDP_BUF1_OFFSET)
-
-#define W_TCB_RX_DDP_BUF1_LEN 29
-#define S_TCB_RX_DDP_BUF1_LEN 10
-#define M_TCB_RX_DDP_BUF1_LEN 0x3fffffULL
-#define V_TCB_RX_DDP_BUF1_LEN(x) ((x) << S_TCB_RX_DDP_BUF1_LEN)
-
-#define W_TCB_RX_DDP_BUF0_TAG 30
-#define S_TCB_RX_DDP_BUF0_TAG 0
-#define M_TCB_RX_DDP_BUF0_TAG 0xffffffffULL
-#define V_TCB_RX_DDP_BUF0_TAG(x) ((x) << S_TCB_RX_DDP_BUF0_TAG)
-
-#define W_TCB_RX_DDP_BUF1_TAG 31
-#define S_TCB_RX_DDP_BUF1_TAG 0
-#define M_TCB_RX_DDP_BUF1_TAG 0xffffffffULL
-#define V_TCB_RX_DDP_BUF1_TAG(x) ((x) << S_TCB_RX_DDP_BUF1_TAG)
-
-#define S_TF_DACK 10
-#define V_TF_DACK(x) ((x) << S_TF_DACK)
-
-#define S_TF_NAGLE 11
-#define V_TF_NAGLE(x) ((x) << S_TF_NAGLE)
-
-#define S_TF_RECV_SCALE 12
-#define V_TF_RECV_SCALE(x) ((x) << S_TF_RECV_SCALE)
-
-#define S_TF_RECV_TSTMP 13
-#define V_TF_RECV_TSTMP(x) ((x) << S_TF_RECV_TSTMP)
-
-#define S_TF_RECV_SACK 14
-#define V_TF_RECV_SACK(x) ((x) << S_TF_RECV_SACK)
-
-#define S_TF_TURBO 15
-#define V_TF_TURBO(x) ((x) << S_TF_TURBO)
-
-#define S_TF_KEEPALIVE 16
-#define V_TF_KEEPALIVE(x) ((x) << S_TF_KEEPALIVE)
-
-#define S_TF_TCAM_BYPASS 17
-#define V_TF_TCAM_BYPASS(x) ((x) << S_TF_TCAM_BYPASS)
-
-#define S_TF_CORE_FIN 18
-#define V_TF_CORE_FIN(x) ((x) << S_TF_CORE_FIN)
-
-#define S_TF_CORE_MORE 19
-#define V_TF_CORE_MORE(x) ((x) << S_TF_CORE_MORE)
-
-#define S_TF_MIGRATING 20
-#define V_TF_MIGRATING(x) ((x) << S_TF_MIGRATING)
-
-#define S_TF_ACTIVE_OPEN 21
-#define V_TF_ACTIVE_OPEN(x) ((x) << S_TF_ACTIVE_OPEN)
-
-#define S_TF_ASK_MODE 22
-#define V_TF_ASK_MODE(x) ((x) << S_TF_ASK_MODE)
-
-#define S_TF_NON_OFFLOAD 23
-#define V_TF_NON_OFFLOAD(x) ((x) << S_TF_NON_OFFLOAD)
-
-#define S_TF_MOD_SCHD 24
-#define V_TF_MOD_SCHD(x) ((x) << S_TF_MOD_SCHD)
-
-#define S_TF_MOD_SCHD_REASON0 25
-#define V_TF_MOD_SCHD_REASON0(x) ((x) << S_TF_MOD_SCHD_REASON0)
-
-#define S_TF_MOD_SCHD_REASON1 26
-#define V_TF_MOD_SCHD_REASON1(x) ((x) << S_TF_MOD_SCHD_REASON1)
-
-#define S_TF_MOD_SCHD_RX 27
-#define V_TF_MOD_SCHD_RX(x) ((x) << S_TF_MOD_SCHD_RX)
-
-#define S_TF_CORE_PUSH 28
-#define V_TF_CORE_PUSH(x) ((x) << S_TF_CORE_PUSH)
-
-#define S_TF_RCV_COALESCE_ENABLE 29
-#define V_TF_RCV_COALESCE_ENABLE(x) ((x) << S_TF_RCV_COALESCE_ENABLE)
-
-#define S_TF_RCV_COALESCE_PUSH 30
-#define V_TF_RCV_COALESCE_PUSH(x) ((x) << S_TF_RCV_COALESCE_PUSH)
-
-#define S_TF_RCV_COALESCE_LAST_PSH 31
-#define V_TF_RCV_COALESCE_LAST_PSH(x) ((x) << S_TF_RCV_COALESCE_LAST_PSH)
-
-#define S_TF_RCV_COALESCE_HEARTBEAT 32
-#define V_TF_RCV_COALESCE_HEARTBEAT(x) ((x) << S_TF_RCV_COALESCE_HEARTBEAT)
-
-#define S_TF_HALF_CLOSE 33
-#define V_TF_HALF_CLOSE(x) ((x) << S_TF_HALF_CLOSE)
-
-#define S_TF_DACK_MSS 34
-#define V_TF_DACK_MSS(x) ((x) << S_TF_DACK_MSS)
-
-#define S_TF_CCTRL_SEL0 35
-#define V_TF_CCTRL_SEL0(x) ((x) << S_TF_CCTRL_SEL0)
-
-#define S_TF_CCTRL_SEL1 36
-#define V_TF_CCTRL_SEL1(x) ((x) << S_TF_CCTRL_SEL1)
-
-#define S_TF_TCP_NEWRENO_FAST_RECOVERY 37
-#define V_TF_TCP_NEWRENO_FAST_RECOVERY(x) ((x) << S_TF_TCP_NEWRENO_FAST_RECOVERY)
-
-#define S_TF_TX_PACE_AUTO 38
-#define V_TF_TX_PACE_AUTO(x) ((x) << S_TF_TX_PACE_AUTO)
-
-#define S_TF_PEER_FIN_HELD 39
-#define V_TF_PEER_FIN_HELD(x) ((x) << S_TF_PEER_FIN_HELD)
-
-#define S_TF_CORE_URG 40
-#define V_TF_CORE_URG(x) ((x) << S_TF_CORE_URG)
-
-#define S_TF_RDMA_ERROR 41
-#define V_TF_RDMA_ERROR(x) ((x) << S_TF_RDMA_ERROR)
-
-#define S_TF_SSWS_DISABLED 42
-#define V_TF_SSWS_DISABLED(x) ((x) << S_TF_SSWS_DISABLED)
-
-#define S_TF_DUPACK_COUNT_ODD 43
-#define V_TF_DUPACK_COUNT_ODD(x) ((x) << S_TF_DUPACK_COUNT_ODD)
-
-#define S_TF_TX_CHANNEL 44
-#define V_TF_TX_CHANNEL(x) ((x) << S_TF_TX_CHANNEL)
-
-#define S_TF_RX_CHANNEL 45
-#define V_TF_RX_CHANNEL(x) ((x) << S_TF_RX_CHANNEL)
-
-#define S_TF_TX_PACE_FIXED 46
-#define V_TF_TX_PACE_FIXED(x) ((x) << S_TF_TX_PACE_FIXED)
-
-#define S_TF_RDMA_FLM_ERROR 47
-#define V_TF_RDMA_FLM_ERROR(x) ((x) << S_TF_RDMA_FLM_ERROR)
-
-#define S_TF_RX_FLOW_CONTROL_DISABLE 48
-#define V_TF_RX_FLOW_CONTROL_DISABLE(x) ((x) << S_TF_RX_FLOW_CONTROL_DISABLE)
-
-#endif /* _TCB_DEFS_H */
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 347dc242fb88..ee1182f9b627 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -3379,7 +3379,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
if (raddr->sin_addr.s_addr == htonl(INADDR_ANY)) {
err = pick_local_ipaddrs(dev, cm_id);
if (err)
- goto fail2;
+ goto fail3;
}
/* find a route */
@@ -3401,7 +3401,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
if (ipv6_addr_type(&raddr6->sin6_addr) == IPV6_ADDR_ANY) {
err = pick_local_ip6addrs(dev, cm_id);
if (err)
- goto fail2;
+ goto fail3;
}
/* find a route */
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
index 35c284af574d..fe3a7e8561df 100644
--- a/drivers/infiniband/hw/cxgb4/mem.c
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -543,7 +543,7 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
mhp->rhp = rhp;
- mhp->umem = ib_umem_get(udata, start, length, acc, 0);
+ mhp->umem = ib_umem_get(udata, start, length, acc);
if (IS_ERR(mhp->umem))
goto err_free_skb;
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c
index d373ac0fe2cb..ba83d942997c 100644
--- a/drivers/infiniband/hw/cxgb4/provider.c
+++ b/drivers/infiniband/hw/cxgb4/provider.c
@@ -305,7 +305,10 @@ static int c4iw_query_device(struct ib_device *ibdev, struct ib_device_attr *pro
static int c4iw_query_port(struct ib_device *ibdev, u8 port,
struct ib_port_attr *props)
{
+ int ret = 0;
pr_debug("ibdev %p\n", ibdev);
+ ret = ib_get_eth_speed(ibdev, port, &props->active_speed,
+ &props->active_width);
props->port_cap_flags =
IB_PORT_CM_SUP |
@@ -315,11 +318,9 @@ static int c4iw_query_port(struct ib_device *ibdev, u8 port,
IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP;
props->gid_tbl_len = 1;
props->pkey_tbl_len = 1;
- props->active_width = 2;
- props->active_speed = IB_SPEED_DDR;
props->max_msg_sz = -1;
- return 0;
+ return ret;
}
static ssize_t hw_rev_show(struct device *dev,
diff --git a/drivers/infiniband/hw/efa/efa.h b/drivers/infiniband/hw/efa/efa.h
index 2283e432693e..aa7396a1588a 100644
--- a/drivers/infiniband/hw/efa/efa.h
+++ b/drivers/infiniband/hw/efa/efa.h
@@ -60,8 +60,6 @@ struct efa_dev {
u64 mem_bar_len;
u64 db_bar_addr;
u64 db_bar_len;
- u8 addr[EFA_GID_SIZE];
- u32 mtu;
int admin_msix_vector_idx;
struct efa_irq admin_irq;
@@ -71,8 +69,6 @@ struct efa_dev {
struct efa_ucontext {
struct ib_ucontext ibucontext;
- struct xarray mmap_xa;
- u32 mmap_xa_page;
u16 uarn;
};
@@ -91,6 +87,7 @@ struct efa_cq {
struct efa_ucontext *ucontext;
dma_addr_t dma_addr;
void *cpu_addr;
+ struct rdma_user_mmap_entry *mmap_entry;
size_t size;
u16 cq_idx;
};
@@ -101,6 +98,13 @@ struct efa_qp {
void *rq_cpu_addr;
size_t rq_size;
enum ib_qp_state state;
+
+ /* Used for saving mmap_xa entries */
+ struct rdma_user_mmap_entry *sq_db_mmap_entry;
+ struct rdma_user_mmap_entry *llq_desc_mmap_entry;
+ struct rdma_user_mmap_entry *rq_db_mmap_entry;
+ struct rdma_user_mmap_entry *rq_mmap_entry;
+
u32 qp_handle;
u32 max_send_wr;
u32 max_recv_wr;
@@ -147,6 +151,7 @@ int efa_alloc_ucontext(struct ib_ucontext *ibucontext, struct ib_udata *udata);
void efa_dealloc_ucontext(struct ib_ucontext *ibucontext);
int efa_mmap(struct ib_ucontext *ibucontext,
struct vm_area_struct *vma);
+void efa_mmap_free(struct rdma_user_mmap_entry *rdma_entry);
int efa_create_ah(struct ib_ah *ibah,
struct rdma_ah_attr *ah_attr,
u32 flags,
diff --git a/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h b/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h
index 2be0469d545f..e96bcb16bd2b 100644
--- a/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h
+++ b/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h
@@ -362,9 +362,13 @@ struct efa_admin_reg_mr_cmd {
/*
* permissions
- * 0 : local_write_enable - Write permissions: value
- * of 1 needed for RQ buffers and for RDMA write
- * 7:1 : reserved1 - remote access flags, etc
+ * 0 : local_write_enable - Local write permissions:
+ * must be set for RQ buffers and buffers posted for
+ * RDMA Read requests
+ * 1 : reserved1 - MBZ
+ * 2 : remote_read_enable - Remote read permissions:
+ * must be set to enable RDMA read from the region
+ * 7:3 : reserved2 - MBZ
*/
u8 permissions;
@@ -558,6 +562,16 @@ struct efa_admin_feature_device_attr_desc {
/* Indicates how many bits are used virtual address access */
u8 virt_addr_width;
+
+ /*
+ * 0 : rdma_read - If set, RDMA Read is supported on
+ * TX queues
+ * 31:1 : reserved - MBZ
+ */
+ u32 device_caps;
+
+ /* Max RDMA transfer size in bytes */
+ u32 max_rdma_size;
};
struct efa_admin_feature_queue_attr_desc {
@@ -604,6 +618,9 @@ struct efa_admin_feature_queue_attr_desc {
/* The maximum size of LLQ in bytes */
u32 max_llq_size;
+
+ /* Maximum number of SGEs for a single RDMA read WQE */
+ u16 max_wr_rdma_sges;
};
struct efa_admin_feature_aenq_desc {
@@ -618,6 +635,7 @@ struct efa_admin_feature_network_attr_desc {
/* Raw address data in network byte order */
u8 addr[16];
+ /* max packet payload size in bytes */
u32 mtu;
};
@@ -780,6 +798,8 @@ struct efa_admin_mmio_req_read_less_resp {
#define EFA_ADMIN_REG_MR_CMD_MEM_ADDR_PHY_MODE_EN_SHIFT 7
#define EFA_ADMIN_REG_MR_CMD_MEM_ADDR_PHY_MODE_EN_MASK BIT(7)
#define EFA_ADMIN_REG_MR_CMD_LOCAL_WRITE_ENABLE_MASK BIT(0)
+#define EFA_ADMIN_REG_MR_CMD_REMOTE_READ_ENABLE_SHIFT 2
+#define EFA_ADMIN_REG_MR_CMD_REMOTE_READ_ENABLE_MASK BIT(2)
/* create_cq_cmd */
#define EFA_ADMIN_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_SHIFT 5
@@ -791,4 +811,7 @@ struct efa_admin_mmio_req_read_less_resp {
/* get_set_feature_common_desc */
#define EFA_ADMIN_GET_SET_FEATURE_COMMON_DESC_SELECT_MASK GENMASK(1, 0)
+/* feature_device_attr_desc */
+#define EFA_ADMIN_FEATURE_DEVICE_ATTR_DESC_RDMA_READ_MASK BIT(0)
+
#endif /* _EFA_ADMIN_CMDS_H_ */
diff --git a/drivers/infiniband/hw/efa/efa_com.c b/drivers/infiniband/hw/efa/efa_com.c
index 3c412bc5b94f..0778f4f7dccd 100644
--- a/drivers/infiniband/hw/efa/efa_com.c
+++ b/drivers/infiniband/hw/efa/efa_com.c
@@ -317,6 +317,7 @@ static struct efa_comp_ctx *__efa_com_submit_admin_cmd(struct efa_com_admin_queu
struct efa_admin_acq_entry *comp,
size_t comp_size_in_bytes)
{
+ struct efa_admin_aq_entry *aqe;
struct efa_comp_ctx *comp_ctx;
u16 queue_size_mask;
u16 cmd_id;
@@ -350,7 +351,9 @@ static struct efa_comp_ctx *__efa_com_submit_admin_cmd(struct efa_com_admin_queu
reinit_completion(&comp_ctx->wait_event);
- memcpy(&aq->sq.entries[pi], cmd, cmd_size_in_bytes);
+ aqe = &aq->sq.entries[pi];
+ memset(aqe, 0, sizeof(*aqe));
+ memcpy(aqe, cmd, cmd_size_in_bytes);
aq->sq.pc++;
atomic64_inc(&aq->stats.submitted_cmd);
diff --git a/drivers/infiniband/hw/efa/efa_com_cmd.c b/drivers/infiniband/hw/efa/efa_com_cmd.c
index c079f1332082..e20bd84a1014 100644
--- a/drivers/infiniband/hw/efa/efa_com_cmd.c
+++ b/drivers/infiniband/hw/efa/efa_com_cmd.c
@@ -230,8 +230,7 @@ int efa_com_register_mr(struct efa_com_dev *edev,
mr_cmd.flags |= params->page_shift &
EFA_ADMIN_REG_MR_CMD_PHYS_PAGE_SIZE_SHIFT_MASK;
mr_cmd.iova = params->iova;
- mr_cmd.permissions |= params->permissions &
- EFA_ADMIN_REG_MR_CMD_LOCAL_WRITE_ENABLE_MASK;
+ mr_cmd.permissions = params->permissions;
if (params->inline_pbl) {
memcpy(mr_cmd.pbl.inline_pbl_array,
@@ -423,28 +422,6 @@ static int efa_com_get_feature(struct efa_com_dev *edev,
return efa_com_get_feature_ex(edev, get_resp, feature_id, 0, 0);
}
-int efa_com_get_network_attr(struct efa_com_dev *edev,
- struct efa_com_get_network_attr_result *result)
-{
- struct efa_admin_get_feature_resp resp;
- int err;
-
- err = efa_com_get_feature(edev, &resp,
- EFA_ADMIN_NETWORK_ATTR);
- if (err) {
- ibdev_err_ratelimited(edev->efa_dev,
- "Failed to get network attributes %d\n",
- err);
- return err;
- }
-
- memcpy(result->addr, resp.u.network_attr.addr,
- sizeof(resp.u.network_attr.addr));
- result->mtu = resp.u.network_attr.mtu;
-
- return 0;
-}
-
int efa_com_get_device_attr(struct efa_com_dev *edev,
struct efa_com_get_device_attr_result *result)
{
@@ -467,6 +444,8 @@ int efa_com_get_device_attr(struct efa_com_dev *edev,
result->phys_addr_width = resp.u.device_attr.phys_addr_width;
result->virt_addr_width = resp.u.device_attr.virt_addr_width;
result->db_bar = resp.u.device_attr.db_bar;
+ result->max_rdma_size = resp.u.device_attr.max_rdma_size;
+ result->device_caps = resp.u.device_attr.device_caps;
if (result->admin_api_version < 1) {
ibdev_err_ratelimited(
@@ -500,6 +479,19 @@ int efa_com_get_device_attr(struct efa_com_dev *edev,
result->max_ah = resp.u.queue_attr.max_ah;
result->max_llq_size = resp.u.queue_attr.max_llq_size;
result->sub_cqs_per_cq = resp.u.queue_attr.sub_cqs_per_cq;
+ result->max_wr_rdma_sge = resp.u.queue_attr.max_wr_rdma_sges;
+
+ err = efa_com_get_feature(edev, &resp, EFA_ADMIN_NETWORK_ATTR);
+ if (err) {
+ ibdev_err_ratelimited(edev->efa_dev,
+ "Failed to get network attributes %d\n",
+ err);
+ return err;
+ }
+
+ memcpy(result->addr, resp.u.network_attr.addr,
+ sizeof(resp.u.network_attr.addr));
+ result->mtu = resp.u.network_attr.mtu;
return 0;
}
diff --git a/drivers/infiniband/hw/efa/efa_com_cmd.h b/drivers/infiniband/hw/efa/efa_com_cmd.h
index 7f6c13052f49..31db5a0cbd5b 100644
--- a/drivers/infiniband/hw/efa/efa_com_cmd.h
+++ b/drivers/infiniband/hw/efa/efa_com_cmd.h
@@ -100,14 +100,11 @@ struct efa_com_destroy_ah_params {
u16 pdn;
};
-struct efa_com_get_network_attr_result {
- u8 addr[EFA_GID_SIZE];
- u32 mtu;
-};
-
struct efa_com_get_device_attr_result {
+ u8 addr[EFA_GID_SIZE];
u64 page_size_cap;
u64 max_mr_pages;
+ u32 mtu;
u32 fw_version;
u32 admin_api_version;
u32 device_version;
@@ -124,9 +121,12 @@ struct efa_com_get_device_attr_result {
u32 max_pd;
u32 max_ah;
u32 max_llq_size;
+ u32 max_rdma_size;
+ u32 device_caps;
u16 sub_cqs_per_cq;
u16 max_sq_sge;
u16 max_rq_sge;
+ u16 max_wr_rdma_sge;
u8 db_bar;
};
@@ -181,12 +181,7 @@ struct efa_com_reg_mr_params {
* address mapping
*/
u8 page_shift;
- /*
- * permissions
- * 0: local_write_enable - Write permissions: value of 1 needed
- * for RQ buffers and for RDMA write:1: reserved1 - remote
- * access flags, etc
- */
+ /* see permissions field of struct efa_admin_reg_mr_cmd */
u8 permissions;
u8 inline_pbl;
u8 indirect;
@@ -271,8 +266,6 @@ int efa_com_create_ah(struct efa_com_dev *edev,
struct efa_com_create_ah_result *result);
int efa_com_destroy_ah(struct efa_com_dev *edev,
struct efa_com_destroy_ah_params *params);
-int efa_com_get_network_attr(struct efa_com_dev *edev,
- struct efa_com_get_network_attr_result *result);
int efa_com_get_device_attr(struct efa_com_dev *edev,
struct efa_com_get_device_attr_result *result);
int efa_com_get_hw_hints(struct efa_com_dev *edev,
diff --git a/drivers/infiniband/hw/efa/efa_main.c b/drivers/infiniband/hw/efa/efa_main.c
index 83858f7e83d0..faf3ff1bca2a 100644
--- a/drivers/infiniband/hw/efa/efa_main.c
+++ b/drivers/infiniband/hw/efa/efa_main.c
@@ -30,15 +30,6 @@ MODULE_DEVICE_TABLE(pci, efa_pci_tbl);
(BIT(EFA_ADMIN_FATAL_ERROR) | BIT(EFA_ADMIN_WARNING) | \
BIT(EFA_ADMIN_NOTIFICATION) | BIT(EFA_ADMIN_KEEP_ALIVE))
-static void efa_update_network_attr(struct efa_dev *dev,
- struct efa_com_get_network_attr_result *network_attr)
-{
- memcpy(dev->addr, network_attr->addr, sizeof(network_attr->addr));
- dev->mtu = network_attr->mtu;
-
- dev_dbg(&dev->pdev->dev, "Full address %pI6\n", dev->addr);
-}
-
/* This handler will called for unknown event group or unimplemented handlers */
static void unimplemented_aenq_handler(void *data,
struct efa_admin_aenq_entry *aenq_e)
@@ -217,6 +208,7 @@ static const struct ib_device_ops efa_dev_ops = {
.get_link_layer = efa_port_link_layer,
.get_port_immutable = efa_get_port_immutable,
.mmap = efa_mmap,
+ .mmap_free = efa_mmap_free,
.modify_qp = efa_modify_qp,
.query_device = efa_query_device,
.query_gid = efa_query_gid,
@@ -233,7 +225,6 @@ static const struct ib_device_ops efa_dev_ops = {
static int efa_ib_device_add(struct efa_dev *dev)
{
- struct efa_com_get_network_attr_result network_attr;
struct efa_com_get_hw_hints_result hw_hints;
struct pci_dev *pdev = dev->pdev;
int err;
@@ -249,12 +240,6 @@ static int efa_ib_device_add(struct efa_dev *dev)
if (err)
return err;
- err = efa_com_get_network_attr(&dev->edev, &network_attr);
- if (err)
- goto err_release_doorbell_bar;
-
- efa_update_network_attr(dev, &network_attr);
-
err = efa_com_get_hw_hints(&dev->edev, &hw_hints);
if (err)
goto err_release_doorbell_bar;
diff --git a/drivers/infiniband/hw/efa/efa_verbs.c b/drivers/infiniband/hw/efa/efa_verbs.c
index 4edae89e8e3c..c9d294caa27a 100644
--- a/drivers/infiniband/hw/efa/efa_verbs.c
+++ b/drivers/infiniband/hw/efa/efa_verbs.c
@@ -13,10 +13,6 @@
#include "efa.h"
-#define EFA_MMAP_FLAG_SHIFT 56
-#define EFA_MMAP_PAGE_MASK GENMASK(EFA_MMAP_FLAG_SHIFT - 1, 0)
-#define EFA_MMAP_INVALID U64_MAX
-
enum {
EFA_MMAP_DMA_PAGE = 0,
EFA_MMAP_IO_WC,
@@ -27,20 +23,12 @@ enum {
(BIT(EFA_ADMIN_FATAL_ERROR) | BIT(EFA_ADMIN_WARNING) | \
BIT(EFA_ADMIN_NOTIFICATION) | BIT(EFA_ADMIN_KEEP_ALIVE))
-struct efa_mmap_entry {
- void *obj;
+struct efa_user_mmap_entry {
+ struct rdma_user_mmap_entry rdma_entry;
u64 address;
- u64 length;
- u32 mmap_page;
u8 mmap_flag;
};
-static inline u64 get_mmap_key(const struct efa_mmap_entry *efa)
-{
- return ((u64)efa->mmap_flag << EFA_MMAP_FLAG_SHIFT) |
- ((u64)efa->mmap_page << PAGE_SHIFT);
-}
-
#define EFA_DEFINE_STATS(op) \
op(EFA_TX_BYTES, "tx_bytes") \
op(EFA_TX_PKTS, "tx_pkts") \
@@ -82,8 +70,6 @@ static const char *const efa_stats_names[] = {
#define EFA_CHUNK_USED_SIZE \
((EFA_PTRS_PER_CHUNK * EFA_CHUNK_PAYLOAD_PTR_SIZE) + EFA_CHUNK_PTR_SIZE)
-#define EFA_SUPPORTED_ACCESS_FLAGS IB_ACCESS_LOCAL_WRITE
-
struct pbl_chunk {
dma_addr_t dma_addr;
u64 *buf;
@@ -147,6 +133,17 @@ static inline struct efa_ah *to_eah(struct ib_ah *ibah)
return container_of(ibah, struct efa_ah, ibah);
}
+static inline struct efa_user_mmap_entry *
+to_emmap(struct rdma_user_mmap_entry *rdma_entry)
+{
+ return container_of(rdma_entry, struct efa_user_mmap_entry, rdma_entry);
+}
+
+static inline bool is_rdma_read_cap(struct efa_dev *dev)
+{
+ return dev->dev_attr.device_caps & EFA_ADMIN_FEATURE_DEVICE_ATTR_DESC_RDMA_READ_MASK;
+}
+
#define field_avail(x, fld, sz) (offsetof(typeof(x), fld) + \
FIELD_SIZEOF(typeof(x), fld) <= (sz))
@@ -172,106 +169,6 @@ static void *efa_zalloc_mapped(struct efa_dev *dev, dma_addr_t *dma_addr,
return addr;
}
-/*
- * This is only called when the ucontext is destroyed and there can be no
- * concurrent query via mmap or allocate on the xarray, thus we can be sure no
- * other thread is using the entry pointer. We also know that all the BAR
- * pages have either been zap'd or munmaped at this point. Normal pages are
- * refcounted and will be freed at the proper time.
- */
-static void mmap_entries_remove_free(struct efa_dev *dev,
- struct efa_ucontext *ucontext)
-{
- struct efa_mmap_entry *entry;
- unsigned long mmap_page;
-
- xa_for_each(&ucontext->mmap_xa, mmap_page, entry) {
- xa_erase(&ucontext->mmap_xa, mmap_page);
-
- ibdev_dbg(
- &dev->ibdev,
- "mmap: obj[0x%p] key[%#llx] addr[%#llx] len[%#llx] removed\n",
- entry->obj, get_mmap_key(entry), entry->address,
- entry->length);
- if (entry->mmap_flag == EFA_MMAP_DMA_PAGE)
- /* DMA mapping is already gone, now free the pages */
- free_pages_exact(phys_to_virt(entry->address),
- entry->length);
- kfree(entry);
- }
-}
-
-static struct efa_mmap_entry *mmap_entry_get(struct efa_dev *dev,
- struct efa_ucontext *ucontext,
- u64 key, u64 len)
-{
- struct efa_mmap_entry *entry;
- u64 mmap_page;
-
- mmap_page = (key & EFA_MMAP_PAGE_MASK) >> PAGE_SHIFT;
- if (mmap_page > U32_MAX)
- return NULL;
-
- entry = xa_load(&ucontext->mmap_xa, mmap_page);
- if (!entry || get_mmap_key(entry) != key || entry->length != len)
- return NULL;
-
- ibdev_dbg(&dev->ibdev,
- "mmap: obj[0x%p] key[%#llx] addr[%#llx] len[%#llx] removed\n",
- entry->obj, key, entry->address, entry->length);
-
- return entry;
-}
-
-/*
- * Note this locking scheme cannot support removal of entries, except during
- * ucontext destruction when the core code guarentees no concurrency.
- */
-static u64 mmap_entry_insert(struct efa_dev *dev, struct efa_ucontext *ucontext,
- void *obj, u64 address, u64 length, u8 mmap_flag)
-{
- struct efa_mmap_entry *entry;
- u32 next_mmap_page;
- int err;
-
- entry = kmalloc(sizeof(*entry), GFP_KERNEL);
- if (!entry)
- return EFA_MMAP_INVALID;
-
- entry->obj = obj;
- entry->address = address;
- entry->length = length;
- entry->mmap_flag = mmap_flag;
-
- xa_lock(&ucontext->mmap_xa);
- if (check_add_overflow(ucontext->mmap_xa_page,
- (u32)(length >> PAGE_SHIFT),
- &next_mmap_page))
- goto err_unlock;
-
- entry->mmap_page = ucontext->mmap_xa_page;
- ucontext->mmap_xa_page = next_mmap_page;
- err = __xa_insert(&ucontext->mmap_xa, entry->mmap_page, entry,
- GFP_KERNEL);
- if (err)
- goto err_unlock;
-
- xa_unlock(&ucontext->mmap_xa);
-
- ibdev_dbg(
- &dev->ibdev,
- "mmap: obj[0x%p] addr[%#llx], len[%#llx], key[%#llx] inserted\n",
- entry->obj, entry->address, entry->length, get_mmap_key(entry));
-
- return get_mmap_key(entry);
-
-err_unlock:
- xa_unlock(&ucontext->mmap_xa);
- kfree(entry);
- return EFA_MMAP_INVALID;
-
-}
-
int efa_query_device(struct ib_device *ibdev,
struct ib_device_attr *props,
struct ib_udata *udata)
@@ -306,12 +203,17 @@ int efa_query_device(struct ib_device *ibdev,
dev_attr->max_rq_depth);
props->max_send_sge = dev_attr->max_sq_sge;
props->max_recv_sge = dev_attr->max_rq_sge;
+ props->max_sge_rd = dev_attr->max_wr_rdma_sge;
if (udata && udata->outlen) {
resp.max_sq_sge = dev_attr->max_sq_sge;
resp.max_rq_sge = dev_attr->max_rq_sge;
resp.max_sq_wr = dev_attr->max_sq_depth;
resp.max_rq_wr = dev_attr->max_rq_depth;
+ resp.max_rdma_size = dev_attr->max_rdma_size;
+
+ if (is_rdma_read_cap(dev))
+ resp.device_caps |= EFA_QUERY_DEVICE_CAPS_RDMA_READ;
err = ib_copy_to_udata(udata, &resp,
min(sizeof(resp), udata->outlen));
@@ -338,9 +240,9 @@ int efa_query_port(struct ib_device *ibdev, u8 port,
props->pkey_tbl_len = 1;
props->active_speed = IB_SPEED_EDR;
props->active_width = IB_WIDTH_4X;
- props->max_mtu = ib_mtu_int_to_enum(dev->mtu);
- props->active_mtu = ib_mtu_int_to_enum(dev->mtu);
- props->max_msg_sz = dev->mtu;
+ props->max_mtu = ib_mtu_int_to_enum(dev->dev_attr.mtu);
+ props->active_mtu = ib_mtu_int_to_enum(dev->dev_attr.mtu);
+ props->max_msg_sz = dev->dev_attr.mtu;
props->max_vl_num = 1;
return 0;
@@ -401,7 +303,7 @@ int efa_query_gid(struct ib_device *ibdev, u8 port, int index,
{
struct efa_dev *dev = to_edev(ibdev);
- memcpy(gid->raw, dev->addr, sizeof(dev->addr));
+ memcpy(gid->raw, dev->dev_attr.addr, sizeof(dev->dev_attr.addr));
return 0;
}
@@ -485,8 +387,19 @@ static int efa_destroy_qp_handle(struct efa_dev *dev, u32 qp_handle)
return efa_com_destroy_qp(&dev->edev, &params);
}
+static void efa_qp_user_mmap_entries_remove(struct efa_ucontext *uctx,
+ struct efa_qp *qp)
+{
+ rdma_user_mmap_entry_remove(qp->rq_mmap_entry);
+ rdma_user_mmap_entry_remove(qp->rq_db_mmap_entry);
+ rdma_user_mmap_entry_remove(qp->llq_desc_mmap_entry);
+ rdma_user_mmap_entry_remove(qp->sq_db_mmap_entry);
+}
+
int efa_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
{
+ struct efa_ucontext *ucontext = rdma_udata_to_drv_context(udata,
+ struct efa_ucontext, ibucontext);
struct efa_dev *dev = to_edev(ibqp->pd->device);
struct efa_qp *qp = to_eqp(ibqp);
int err;
@@ -505,61 +418,101 @@ int efa_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
DMA_TO_DEVICE);
}
+ efa_qp_user_mmap_entries_remove(ucontext, qp);
kfree(qp);
return 0;
}
+static struct rdma_user_mmap_entry*
+efa_user_mmap_entry_insert(struct ib_ucontext *ucontext,
+ u64 address, size_t length,
+ u8 mmap_flag, u64 *offset)
+{
+ struct efa_user_mmap_entry *entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ int err;
+
+ if (!entry)
+ return NULL;
+
+ entry->address = address;
+ entry->mmap_flag = mmap_flag;
+
+ err = rdma_user_mmap_entry_insert(ucontext, &entry->rdma_entry,
+ length);
+ if (err) {
+ kfree(entry);
+ return NULL;
+ }
+ *offset = rdma_user_mmap_get_offset(&entry->rdma_entry);
+
+ return &entry->rdma_entry;
+}
+
static int qp_mmap_entries_setup(struct efa_qp *qp,
struct efa_dev *dev,
struct efa_ucontext *ucontext,
struct efa_com_create_qp_params *params,
struct efa_ibv_create_qp_resp *resp)
{
- /*
- * Once an entry is inserted it might be mmapped, hence cannot be
- * cleaned up until dealloc_ucontext.
- */
- resp->sq_db_mmap_key =
- mmap_entry_insert(dev, ucontext, qp,
- dev->db_bar_addr + resp->sq_db_offset,
- PAGE_SIZE, EFA_MMAP_IO_NC);
- if (resp->sq_db_mmap_key == EFA_MMAP_INVALID)
+ size_t length;
+ u64 address;
+
+ address = dev->db_bar_addr + resp->sq_db_offset;
+ qp->sq_db_mmap_entry =
+ efa_user_mmap_entry_insert(&ucontext->ibucontext,
+ address,
+ PAGE_SIZE, EFA_MMAP_IO_NC,
+ &resp->sq_db_mmap_key);
+ if (!qp->sq_db_mmap_entry)
return -ENOMEM;
resp->sq_db_offset &= ~PAGE_MASK;
- resp->llq_desc_mmap_key =
- mmap_entry_insert(dev, ucontext, qp,
- dev->mem_bar_addr + resp->llq_desc_offset,
- PAGE_ALIGN(params->sq_ring_size_in_bytes +
- (resp->llq_desc_offset & ~PAGE_MASK)),
- EFA_MMAP_IO_WC);
- if (resp->llq_desc_mmap_key == EFA_MMAP_INVALID)
- return -ENOMEM;
+ address = dev->mem_bar_addr + resp->llq_desc_offset;
+ length = PAGE_ALIGN(params->sq_ring_size_in_bytes +
+ (resp->llq_desc_offset & ~PAGE_MASK));
+
+ qp->llq_desc_mmap_entry =
+ efa_user_mmap_entry_insert(&ucontext->ibucontext,
+ address, length,
+ EFA_MMAP_IO_WC,
+ &resp->llq_desc_mmap_key);
+ if (!qp->llq_desc_mmap_entry)
+ goto err_remove_mmap;
resp->llq_desc_offset &= ~PAGE_MASK;
if (qp->rq_size) {
- resp->rq_db_mmap_key =
- mmap_entry_insert(dev, ucontext, qp,
- dev->db_bar_addr + resp->rq_db_offset,
- PAGE_SIZE, EFA_MMAP_IO_NC);
- if (resp->rq_db_mmap_key == EFA_MMAP_INVALID)
- return -ENOMEM;
+ address = dev->db_bar_addr + resp->rq_db_offset;
+
+ qp->rq_db_mmap_entry =
+ efa_user_mmap_entry_insert(&ucontext->ibucontext,
+ address, PAGE_SIZE,
+ EFA_MMAP_IO_NC,
+ &resp->rq_db_mmap_key);
+ if (!qp->rq_db_mmap_entry)
+ goto err_remove_mmap;
resp->rq_db_offset &= ~PAGE_MASK;
- resp->rq_mmap_key =
- mmap_entry_insert(dev, ucontext, qp,
- virt_to_phys(qp->rq_cpu_addr),
- qp->rq_size, EFA_MMAP_DMA_PAGE);
- if (resp->rq_mmap_key == EFA_MMAP_INVALID)
- return -ENOMEM;
+ address = virt_to_phys(qp->rq_cpu_addr);
+ qp->rq_mmap_entry =
+ efa_user_mmap_entry_insert(&ucontext->ibucontext,
+ address, qp->rq_size,
+ EFA_MMAP_DMA_PAGE,
+ &resp->rq_mmap_key);
+ if (!qp->rq_mmap_entry)
+ goto err_remove_mmap;
resp->rq_mmap_size = qp->rq_size;
}
return 0;
+
+err_remove_mmap:
+ efa_qp_user_mmap_entries_remove(ucontext, qp);
+
+ return -ENOMEM;
}
static int efa_qp_validate_cap(struct efa_dev *dev,
@@ -634,7 +587,6 @@ struct ib_qp *efa_create_qp(struct ib_pd *ibpd,
struct efa_dev *dev = to_edev(ibpd->device);
struct efa_ibv_create_qp_resp resp = {};
struct efa_ibv_create_qp cmd = {};
- bool rq_entry_inserted = false;
struct efa_ucontext *ucontext;
struct efa_qp *qp;
int err;
@@ -742,7 +694,6 @@ struct ib_qp *efa_create_qp(struct ib_pd *ibpd,
if (err)
goto err_destroy_qp;
- rq_entry_inserted = true;
qp->qp_handle = create_qp_resp.qp_handle;
qp->ibqp.qp_num = create_qp_resp.qp_num;
qp->ibqp.qp_type = init_attr->qp_type;
@@ -759,7 +710,7 @@ struct ib_qp *efa_create_qp(struct ib_pd *ibpd,
ibdev_dbg(&dev->ibdev,
"Failed to copy udata for qp[%u]\n",
create_qp_resp.qp_num);
- goto err_destroy_qp;
+ goto err_remove_mmap_entries;
}
}
@@ -767,13 +718,16 @@ struct ib_qp *efa_create_qp(struct ib_pd *ibpd,
return &qp->ibqp;
+err_remove_mmap_entries:
+ efa_qp_user_mmap_entries_remove(ucontext, qp);
err_destroy_qp:
efa_destroy_qp_handle(dev, create_qp_resp.qp_handle);
err_free_mapped:
if (qp->rq_size) {
dma_unmap_single(&dev->pdev->dev, qp->rq_dma_addr, qp->rq_size,
DMA_TO_DEVICE);
- if (!rq_entry_inserted)
+
+ if (!qp->rq_mmap_entry)
free_pages_exact(qp->rq_cpu_addr, qp->rq_size);
}
err_free_qp:
@@ -897,16 +851,18 @@ void efa_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
efa_destroy_cq_idx(dev, cq->cq_idx);
dma_unmap_single(&dev->pdev->dev, cq->dma_addr, cq->size,
DMA_FROM_DEVICE);
+ rdma_user_mmap_entry_remove(cq->mmap_entry);
}
static int cq_mmap_entries_setup(struct efa_dev *dev, struct efa_cq *cq,
struct efa_ibv_create_cq_resp *resp)
{
resp->q_mmap_size = cq->size;
- resp->q_mmap_key = mmap_entry_insert(dev, cq->ucontext, cq,
- virt_to_phys(cq->cpu_addr),
- cq->size, EFA_MMAP_DMA_PAGE);
- if (resp->q_mmap_key == EFA_MMAP_INVALID)
+ cq->mmap_entry = efa_user_mmap_entry_insert(&cq->ucontext->ibucontext,
+ virt_to_phys(cq->cpu_addr),
+ cq->size, EFA_MMAP_DMA_PAGE,
+ &resp->q_mmap_key);
+ if (!cq->mmap_entry)
return -ENOMEM;
return 0;
@@ -924,7 +880,6 @@ int efa_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
struct efa_dev *dev = to_edev(ibdev);
struct efa_ibv_create_cq cmd = {};
struct efa_cq *cq = to_ecq(ibcq);
- bool cq_entry_inserted = false;
int entries = attr->cqe;
int err;
@@ -1013,15 +968,13 @@ int efa_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
goto err_destroy_cq;
}
- cq_entry_inserted = true;
-
if (udata->outlen) {
err = ib_copy_to_udata(udata, &resp,
min(sizeof(resp), udata->outlen));
if (err) {
ibdev_dbg(ibdev,
"Failed to copy udata for create_cq\n");
- goto err_destroy_cq;
+ goto err_remove_mmap;
}
}
@@ -1030,13 +983,16 @@ int efa_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
return 0;
+err_remove_mmap:
+ rdma_user_mmap_entry_remove(cq->mmap_entry);
err_destroy_cq:
efa_destroy_cq_idx(dev, cq->cq_idx);
err_free_mapped:
dma_unmap_single(&dev->pdev->dev, cq->dma_addr, cq->size,
DMA_FROM_DEVICE);
- if (!cq_entry_inserted)
+ if (!cq->mmap_entry)
free_pages_exact(cq->cpu_addr, cq->size);
+
err_out:
atomic64_inc(&dev->stats.sw_stats.create_cq_err);
return err;
@@ -1396,6 +1352,7 @@ struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length,
struct efa_com_reg_mr_params params = {};
struct efa_com_reg_mr_result result = {};
struct pbl_context pbl;
+ int supp_access_flags;
unsigned int pg_sz;
struct efa_mr *mr;
int inline_size;
@@ -1409,10 +1366,14 @@ struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length,
goto err_out;
}
- if (access_flags & ~EFA_SUPPORTED_ACCESS_FLAGS) {
+ supp_access_flags =
+ IB_ACCESS_LOCAL_WRITE |
+ (is_rdma_read_cap(dev) ? IB_ACCESS_REMOTE_READ : 0);
+
+ if (access_flags & ~supp_access_flags) {
ibdev_dbg(&dev->ibdev,
"Unsupported access flags[%#x], supported[%#x]\n",
- access_flags, EFA_SUPPORTED_ACCESS_FLAGS);
+ access_flags, supp_access_flags);
err = -EOPNOTSUPP;
goto err_out;
}
@@ -1423,7 +1384,7 @@ struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length,
goto err_out;
}
- mr->umem = ib_umem_get(udata, start, length, access_flags, 0);
+ mr->umem = ib_umem_get(udata, start, length, access_flags);
if (IS_ERR(mr->umem)) {
err = PTR_ERR(mr->umem);
ibdev_dbg(&dev->ibdev,
@@ -1434,7 +1395,7 @@ struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length,
params.pd = to_epd(ibpd)->pdn;
params.iova = virt_addr;
params.mr_length_in_bytes = length;
- params.permissions = access_flags & 0x1;
+ params.permissions = access_flags;
pg_sz = ib_umem_find_best_pgsz(mr->umem,
dev->dev_attr.page_size_cap,
@@ -1556,7 +1517,6 @@ int efa_alloc_ucontext(struct ib_ucontext *ibucontext, struct ib_udata *udata)
goto err_out;
ucontext->uarn = result.uarn;
- xa_init(&ucontext->mmap_xa);
resp.cmds_supp_udata_mask |= EFA_USER_CMDS_SUPP_UDATA_QUERY_DEVICE;
resp.cmds_supp_udata_mask |= EFA_USER_CMDS_SUPP_UDATA_CREATE_AH;
@@ -1585,38 +1545,56 @@ void efa_dealloc_ucontext(struct ib_ucontext *ibucontext)
struct efa_ucontext *ucontext = to_eucontext(ibucontext);
struct efa_dev *dev = to_edev(ibucontext->device);
- mmap_entries_remove_free(dev, ucontext);
efa_dealloc_uar(dev, ucontext->uarn);
}
+void efa_mmap_free(struct rdma_user_mmap_entry *rdma_entry)
+{
+ struct efa_user_mmap_entry *entry = to_emmap(rdma_entry);
+
+ /* DMA mapping is already gone, now free the pages */
+ if (entry->mmap_flag == EFA_MMAP_DMA_PAGE)
+ free_pages_exact(phys_to_virt(entry->address),
+ entry->rdma_entry.npages * PAGE_SIZE);
+ kfree(entry);
+}
+
static int __efa_mmap(struct efa_dev *dev, struct efa_ucontext *ucontext,
- struct vm_area_struct *vma, u64 key, u64 length)
+ struct vm_area_struct *vma)
{
- struct efa_mmap_entry *entry;
+ struct rdma_user_mmap_entry *rdma_entry;
+ struct efa_user_mmap_entry *entry;
unsigned long va;
+ int err = 0;
u64 pfn;
- int err;
- entry = mmap_entry_get(dev, ucontext, key, length);
- if (!entry) {
- ibdev_dbg(&dev->ibdev, "key[%#llx] does not have valid entry\n",
- key);
+ rdma_entry = rdma_user_mmap_entry_get(&ucontext->ibucontext, vma);
+ if (!rdma_entry) {
+ ibdev_dbg(&dev->ibdev,
+ "pgoff[%#lx] does not have valid entry\n",
+ vma->vm_pgoff);
return -EINVAL;
}
+ entry = to_emmap(rdma_entry);
ibdev_dbg(&dev->ibdev,
- "Mapping address[%#llx], length[%#llx], mmap_flag[%d]\n",
- entry->address, length, entry->mmap_flag);
+ "Mapping address[%#llx], length[%#zx], mmap_flag[%d]\n",
+ entry->address, rdma_entry->npages * PAGE_SIZE,
+ entry->mmap_flag);
pfn = entry->address >> PAGE_SHIFT;
switch (entry->mmap_flag) {
case EFA_MMAP_IO_NC:
- err = rdma_user_mmap_io(&ucontext->ibucontext, vma, pfn, length,
- pgprot_noncached(vma->vm_page_prot));
+ err = rdma_user_mmap_io(&ucontext->ibucontext, vma, pfn,
+ entry->rdma_entry.npages * PAGE_SIZE,
+ pgprot_noncached(vma->vm_page_prot),
+ rdma_entry);
break;
case EFA_MMAP_IO_WC:
- err = rdma_user_mmap_io(&ucontext->ibucontext, vma, pfn, length,
- pgprot_writecombine(vma->vm_page_prot));
+ err = rdma_user_mmap_io(&ucontext->ibucontext, vma, pfn,
+ entry->rdma_entry.npages * PAGE_SIZE,
+ pgprot_writecombine(vma->vm_page_prot),
+ rdma_entry);
break;
case EFA_MMAP_DMA_PAGE:
for (va = vma->vm_start; va < vma->vm_end;
@@ -1633,12 +1611,13 @@ static int __efa_mmap(struct efa_dev *dev, struct efa_ucontext *ucontext,
if (err) {
ibdev_dbg(
&dev->ibdev,
- "Couldn't mmap address[%#llx] length[%#llx] mmap_flag[%d] err[%d]\n",
- entry->address, length, entry->mmap_flag, err);
- return err;
+ "Couldn't mmap address[%#llx] length[%#zx] mmap_flag[%d] err[%d]\n",
+ entry->address, rdma_entry->npages * PAGE_SIZE,
+ entry->mmap_flag, err);
}
- return 0;
+ rdma_user_mmap_entry_put(rdma_entry);
+ return err;
}
int efa_mmap(struct ib_ucontext *ibucontext,
@@ -1646,26 +1625,13 @@ int efa_mmap(struct ib_ucontext *ibucontext,
{
struct efa_ucontext *ucontext = to_eucontext(ibucontext);
struct efa_dev *dev = to_edev(ibucontext->device);
- u64 length = vma->vm_end - vma->vm_start;
- u64 key = vma->vm_pgoff << PAGE_SHIFT;
+ size_t length = vma->vm_end - vma->vm_start;
ibdev_dbg(&dev->ibdev,
- "start %#lx, end %#lx, length = %#llx, key = %#llx\n",
- vma->vm_start, vma->vm_end, length, key);
-
- if (length % PAGE_SIZE != 0 || !(vma->vm_flags & VM_SHARED)) {
- ibdev_dbg(&dev->ibdev,
- "length[%#llx] is not page size aligned[%#lx] or VM_SHARED is not set [%#lx]\n",
- length, PAGE_SIZE, vma->vm_flags);
- return -EINVAL;
- }
-
- if (vma->vm_flags & VM_EXEC) {
- ibdev_dbg(&dev->ibdev, "Mapping executable pages is not permitted\n");
- return -EPERM;
- }
+ "start %#lx, end %#lx, length = %#zx, pgoff = %#lx\n",
+ vma->vm_start, vma->vm_end, length, vma->vm_pgoff);
- return __efa_mmap(dev, ucontext, vma, key, length);
+ return __efa_mmap(dev, ucontext, vma);
}
static int efa_ah_destroy(struct efa_dev *dev, struct efa_ah *ah)
diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c
index f9a7e9d29c8b..7c5e3fb22413 100644
--- a/drivers/infiniband/hw/hfi1/file_ops.c
+++ b/drivers/infiniband/hw/hfi1/file_ops.c
@@ -1138,7 +1138,7 @@ static int get_ctxt_info(struct hfi1_filedata *fd, unsigned long arg, u32 len)
HFI1_CAP_UGET_MASK(uctxt->flags, MASK) |
HFI1_CAP_KGET_MASK(uctxt->flags, K2U);
/* adjust flag if this fd is not able to cache */
- if (!fd->handler)
+ if (!fd->use_mn)
cinfo.runtime_flags |= HFI1_CAP_TID_UNMAP; /* no caching */
cinfo.num_active = hfi1_count_active_units();
diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h
index fa45350a9a1d..fc10d65fc3e1 100644
--- a/drivers/infiniband/hw/hfi1/hfi.h
+++ b/drivers/infiniband/hw/hfi1/hfi.h
@@ -1444,7 +1444,7 @@ struct hfi1_filedata {
/* for cpu affinity; -1 if none */
int rec_cpu_num;
u32 tid_n_pinned;
- struct mmu_rb_handler *handler;
+ bool use_mn;
struct tid_rb_node **entry_to_rb;
spinlock_t tid_lock; /* protect tid_[limit,used] counters */
u32 tid_limit;
diff --git a/drivers/infiniband/hw/hfi1/mad.c b/drivers/infiniband/hw/hfi1/mad.c
index d8ff063a5419..a51bcd2b4391 100644
--- a/drivers/infiniband/hw/hfi1/mad.c
+++ b/drivers/infiniband/hw/hfi1/mad.c
@@ -4915,16 +4915,11 @@ static int hfi1_process_ib_mad(struct ib_device *ibdev, int mad_flags, u8 port,
*/
int hfi1_process_mad(struct ib_device *ibdev, int mad_flags, u8 port,
const struct ib_wc *in_wc, const struct ib_grh *in_grh,
- const struct ib_mad_hdr *in_mad, size_t in_mad_size,
- struct ib_mad_hdr *out_mad, size_t *out_mad_size,
- u16 *out_mad_pkey_index)
+ const struct ib_mad *in_mad, struct ib_mad *out_mad,
+ size_t *out_mad_size, u16 *out_mad_pkey_index)
{
- switch (in_mad->base_version) {
+ switch (in_mad->mad_hdr.base_version) {
case OPA_MGMT_BASE_VERSION:
- if (unlikely(in_mad_size != sizeof(struct opa_mad))) {
- dev_err(ibdev->dev.parent, "invalid in_mad_size\n");
- return IB_MAD_RESULT_FAILURE;
- }
return hfi1_process_opa_mad(ibdev, mad_flags, port,
in_wc, in_grh,
(struct opa_mad *)in_mad,
@@ -4932,10 +4927,8 @@ int hfi1_process_mad(struct ib_device *ibdev, int mad_flags, u8 port,
out_mad_size,
out_mad_pkey_index);
case IB_MGMT_BASE_VERSION:
- return hfi1_process_ib_mad(ibdev, mad_flags, port,
- in_wc, in_grh,
- (const struct ib_mad *)in_mad,
- (struct ib_mad *)out_mad);
+ return hfi1_process_ib_mad(ibdev, mad_flags, port, in_wc,
+ in_grh, in_mad, out_mad);
default:
break;
}
diff --git a/drivers/infiniband/hw/hfi1/platform.c b/drivers/infiniband/hw/hfi1/platform.c
index cbf7faa5038c..36593f2efe26 100644
--- a/drivers/infiniband/hw/hfi1/platform.c
+++ b/drivers/infiniband/hw/hfi1/platform.c
@@ -634,7 +634,7 @@ static void apply_tx_lanes(struct hfi1_pportdata *ppd, u8 field_id,
u32 config_data, const char *message)
{
u8 i;
- int ret = HCMD_SUCCESS;
+ int ret;
for (i = 0; i < 4; i++) {
ret = load_8051_config(ppd->dd, field_id, i, config_data);
diff --git a/drivers/infiniband/hw/hfi1/user_exp_rcv.c b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
index 3592a9ec155e..f05742ac0949 100644
--- a/drivers/infiniband/hw/hfi1/user_exp_rcv.c
+++ b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
@@ -59,11 +59,11 @@ static int set_rcvarray_entry(struct hfi1_filedata *fd,
struct tid_user_buf *tbuf,
u32 rcventry, struct tid_group *grp,
u16 pageidx, unsigned int npages);
-static int tid_rb_insert(void *arg, struct mmu_rb_node *node);
static void cacheless_tid_rb_remove(struct hfi1_filedata *fdata,
struct tid_rb_node *tnode);
-static void tid_rb_remove(void *arg, struct mmu_rb_node *node);
-static int tid_rb_invalidate(void *arg, struct mmu_rb_node *mnode);
+static bool tid_rb_invalidate(struct mmu_interval_notifier *mni,
+ const struct mmu_notifier_range *range,
+ unsigned long cur_seq);
static int program_rcvarray(struct hfi1_filedata *fd, struct tid_user_buf *,
struct tid_group *grp,
unsigned int start, u16 count,
@@ -73,10 +73,8 @@ static int unprogram_rcvarray(struct hfi1_filedata *fd, u32 tidinfo,
struct tid_group **grp);
static void clear_tid_node(struct hfi1_filedata *fd, struct tid_rb_node *node);
-static struct mmu_rb_ops tid_rb_ops = {
- .insert = tid_rb_insert,
- .remove = tid_rb_remove,
- .invalidate = tid_rb_invalidate
+static const struct mmu_interval_notifier_ops tid_mn_ops = {
+ .invalidate = tid_rb_invalidate,
};
/*
@@ -87,7 +85,6 @@ static struct mmu_rb_ops tid_rb_ops = {
int hfi1_user_exp_rcv_init(struct hfi1_filedata *fd,
struct hfi1_ctxtdata *uctxt)
{
- struct hfi1_devdata *dd = uctxt->dd;
int ret = 0;
spin_lock_init(&fd->tid_lock);
@@ -109,20 +106,7 @@ int hfi1_user_exp_rcv_init(struct hfi1_filedata *fd,
fd->entry_to_rb = NULL;
return -ENOMEM;
}
-
- /*
- * Register MMU notifier callbacks. If the registration
- * fails, continue without TID caching for this context.
- */
- ret = hfi1_mmu_rb_register(fd, fd->mm, &tid_rb_ops,
- dd->pport->hfi1_wq,
- &fd->handler);
- if (ret) {
- dd_dev_info(dd,
- "Failed MMU notifier registration %d\n",
- ret);
- ret = 0;
- }
+ fd->use_mn = true;
}
/*
@@ -139,7 +123,7 @@ int hfi1_user_exp_rcv_init(struct hfi1_filedata *fd,
* init.
*/
spin_lock(&fd->tid_lock);
- if (uctxt->subctxt_cnt && fd->handler) {
+ if (uctxt->subctxt_cnt && fd->use_mn) {
u16 remainder;
fd->tid_limit = uctxt->expected_count / uctxt->subctxt_cnt;
@@ -158,18 +142,10 @@ void hfi1_user_exp_rcv_free(struct hfi1_filedata *fd)
{
struct hfi1_ctxtdata *uctxt = fd->uctxt;
- /*
- * The notifier would have been removed when the process'es mm
- * was freed.
- */
- if (fd->handler) {
- hfi1_mmu_rb_unregister(fd->handler);
- } else {
- if (!EXP_TID_SET_EMPTY(uctxt->tid_full_list))
- unlock_exp_tids(uctxt, &uctxt->tid_full_list, fd);
- if (!EXP_TID_SET_EMPTY(uctxt->tid_used_list))
- unlock_exp_tids(uctxt, &uctxt->tid_used_list, fd);
- }
+ if (!EXP_TID_SET_EMPTY(uctxt->tid_full_list))
+ unlock_exp_tids(uctxt, &uctxt->tid_full_list, fd);
+ if (!EXP_TID_SET_EMPTY(uctxt->tid_used_list))
+ unlock_exp_tids(uctxt, &uctxt->tid_used_list, fd);
kfree(fd->invalid_tids);
fd->invalid_tids = NULL;
@@ -201,7 +177,7 @@ static void unpin_rcv_pages(struct hfi1_filedata *fd,
if (mapped) {
pci_unmap_single(dd->pcidev, node->dma_addr,
- node->mmu.len, PCI_DMA_FROMDEVICE);
+ node->npages * PAGE_SIZE, PCI_DMA_FROMDEVICE);
pages = &node->pages[idx];
} else {
pages = &tidbuf->pages[idx];
@@ -777,8 +753,7 @@ static int set_rcvarray_entry(struct hfi1_filedata *fd,
return -EFAULT;
}
- node->mmu.addr = tbuf->vaddr + (pageidx * PAGE_SIZE);
- node->mmu.len = npages * PAGE_SIZE;
+ node->fdata = fd;
node->phys = page_to_phys(pages[0]);
node->npages = npages;
node->rcventry = rcventry;
@@ -787,23 +762,35 @@ static int set_rcvarray_entry(struct hfi1_filedata *fd,
node->freed = false;
memcpy(node->pages, pages, sizeof(struct page *) * npages);
- if (!fd->handler)
- ret = tid_rb_insert(fd, &node->mmu);
- else
- ret = hfi1_mmu_rb_insert(fd->handler, &node->mmu);
-
- if (ret) {
- hfi1_cdbg(TID, "Failed to insert RB node %u 0x%lx, 0x%lx %d",
- node->rcventry, node->mmu.addr, node->phys, ret);
- pci_unmap_single(dd->pcidev, phys, npages * PAGE_SIZE,
- PCI_DMA_FROMDEVICE);
- kfree(node);
- return -EFAULT;
+ if (fd->use_mn) {
+ ret = mmu_interval_notifier_insert(
+ &node->notifier, fd->mm,
+ tbuf->vaddr + (pageidx * PAGE_SIZE), npages * PAGE_SIZE,
+ &tid_mn_ops);
+ if (ret)
+ goto out_unmap;
+ /*
+ * FIXME: This is in the wrong order, the notifier should be
+ * established before the pages are pinned by pin_rcv_pages.
+ */
+ mmu_interval_read_begin(&node->notifier);
}
+ fd->entry_to_rb[node->rcventry - uctxt->expected_base] = node;
+
hfi1_put_tid(dd, rcventry, PT_EXPECTED, phys, ilog2(npages) + 1);
trace_hfi1_exp_tid_reg(uctxt->ctxt, fd->subctxt, rcventry, npages,
- node->mmu.addr, node->phys, phys);
+ node->notifier.interval_tree.start, node->phys,
+ phys);
return 0;
+
+out_unmap:
+ hfi1_cdbg(TID, "Failed to insert RB node %u 0x%lx, 0x%lx %d",
+ node->rcventry, node->notifier.interval_tree.start,
+ node->phys, ret);
+ pci_unmap_single(dd->pcidev, phys, npages * PAGE_SIZE,
+ PCI_DMA_FROMDEVICE);
+ kfree(node);
+ return -EFAULT;
}
static int unprogram_rcvarray(struct hfi1_filedata *fd, u32 tidinfo,
@@ -833,10 +820,9 @@ static int unprogram_rcvarray(struct hfi1_filedata *fd, u32 tidinfo,
if (grp)
*grp = node->grp;
- if (!fd->handler)
- cacheless_tid_rb_remove(fd, node);
- else
- hfi1_mmu_rb_remove(fd->handler, &node->mmu);
+ if (fd->use_mn)
+ mmu_interval_notifier_remove(&node->notifier);
+ cacheless_tid_rb_remove(fd, node);
return 0;
}
@@ -847,7 +833,8 @@ static void clear_tid_node(struct hfi1_filedata *fd, struct tid_rb_node *node)
struct hfi1_devdata *dd = uctxt->dd;
trace_hfi1_exp_tid_unreg(uctxt->ctxt, fd->subctxt, node->rcventry,
- node->npages, node->mmu.addr, node->phys,
+ node->npages,
+ node->notifier.interval_tree.start, node->phys,
node->dma_addr);
/*
@@ -894,30 +881,29 @@ static void unlock_exp_tids(struct hfi1_ctxtdata *uctxt,
if (!node || node->rcventry != rcventry)
continue;
+ if (fd->use_mn)
+ mmu_interval_notifier_remove(
+ &node->notifier);
cacheless_tid_rb_remove(fd, node);
}
}
}
}
-/*
- * Always return 0 from this function. A non-zero return indicates that the
- * remove operation will be called and that memory should be unpinned.
- * However, the driver cannot unpin out from under PSM. Instead, retain the
- * memory (by returning 0) and inform PSM that the memory is going away. PSM
- * will call back later when it has removed the memory from its list.
- */
-static int tid_rb_invalidate(void *arg, struct mmu_rb_node *mnode)
+static bool tid_rb_invalidate(struct mmu_interval_notifier *mni,
+ const struct mmu_notifier_range *range,
+ unsigned long cur_seq)
{
- struct hfi1_filedata *fdata = arg;
- struct hfi1_ctxtdata *uctxt = fdata->uctxt;
struct tid_rb_node *node =
- container_of(mnode, struct tid_rb_node, mmu);
+ container_of(mni, struct tid_rb_node, notifier);
+ struct hfi1_filedata *fdata = node->fdata;
+ struct hfi1_ctxtdata *uctxt = fdata->uctxt;
if (node->freed)
- return 0;
+ return true;
- trace_hfi1_exp_tid_inval(uctxt->ctxt, fdata->subctxt, node->mmu.addr,
+ trace_hfi1_exp_tid_inval(uctxt->ctxt, fdata->subctxt,
+ node->notifier.interval_tree.start,
node->rcventry, node->npages, node->dma_addr);
node->freed = true;
@@ -946,18 +932,7 @@ static int tid_rb_invalidate(void *arg, struct mmu_rb_node *mnode)
fdata->invalid_tid_idx++;
}
spin_unlock(&fdata->invalid_lock);
- return 0;
-}
-
-static int tid_rb_insert(void *arg, struct mmu_rb_node *node)
-{
- struct hfi1_filedata *fdata = arg;
- struct tid_rb_node *tnode =
- container_of(node, struct tid_rb_node, mmu);
- u32 base = fdata->uctxt->expected_base;
-
- fdata->entry_to_rb[tnode->rcventry - base] = tnode;
- return 0;
+ return true;
}
static void cacheless_tid_rb_remove(struct hfi1_filedata *fdata,
@@ -968,12 +943,3 @@ static void cacheless_tid_rb_remove(struct hfi1_filedata *fdata,
fdata->entry_to_rb[tnode->rcventry - base] = NULL;
clear_tid_node(fdata, tnode);
}
-
-static void tid_rb_remove(void *arg, struct mmu_rb_node *node)
-{
- struct hfi1_filedata *fdata = arg;
- struct tid_rb_node *tnode =
- container_of(node, struct tid_rb_node, mmu);
-
- cacheless_tid_rb_remove(fdata, tnode);
-}
diff --git a/drivers/infiniband/hw/hfi1/user_exp_rcv.h b/drivers/infiniband/hw/hfi1/user_exp_rcv.h
index 43b105de1d54..6257eee083a1 100644
--- a/drivers/infiniband/hw/hfi1/user_exp_rcv.h
+++ b/drivers/infiniband/hw/hfi1/user_exp_rcv.h
@@ -65,7 +65,8 @@ struct tid_user_buf {
};
struct tid_rb_node {
- struct mmu_rb_node mmu;
+ struct mmu_interval_notifier notifier;
+ struct hfi1_filedata *fdata;
unsigned long phys;
struct tid_group *grp;
u32 rcventry;
diff --git a/drivers/infiniband/hw/hfi1/verbs.h b/drivers/infiniband/hw/hfi1/verbs.h
index ae9582ddbc8f..b0e9bf7cd150 100644
--- a/drivers/infiniband/hw/hfi1/verbs.h
+++ b/drivers/infiniband/hw/hfi1/verbs.h
@@ -330,9 +330,8 @@ void hfi1_sys_guid_chg(struct hfi1_ibport *ibp);
void hfi1_node_desc_chg(struct hfi1_ibport *ibp);
int hfi1_process_mad(struct ib_device *ibdev, int mad_flags, u8 port,
const struct ib_wc *in_wc, const struct ib_grh *in_grh,
- const struct ib_mad_hdr *in_mad, size_t in_mad_size,
- struct ib_mad_hdr *out_mad, size_t *out_mad_size,
- u16 *out_mad_pkey_index);
+ const struct ib_mad *in_mad, struct ib_mad *out_mad,
+ size_t *out_mad_size, u16 *out_mad_pkey_index);
/*
* The PSN_MASK and PSN_SHIFT allow for
diff --git a/drivers/infiniband/hw/hns/Kconfig b/drivers/infiniband/hw/hns/Kconfig
index d602b698b57e..4921c1e40ccd 100644
--- a/drivers/infiniband/hw/hns/Kconfig
+++ b/drivers/infiniband/hw/hns/Kconfig
@@ -1,23 +1,34 @@
# SPDX-License-Identifier: GPL-2.0-only
config INFINIBAND_HNS
- bool "HNS RoCE Driver"
+ tristate "HNS RoCE Driver"
depends on NET_VENDOR_HISILICON
depends on ARM64 || (COMPILE_TEST && 64BIT)
+ depends on (HNS_DSAF && HNS_ENET) || HNS3
---help---
This is a RoCE/RDMA driver for the Hisilicon RoCE engine. The engine
is used in Hisilicon Hip06 and more further ICT SoC based on
platform device.
+ To compile HIP06 or HIP08 driver as module, choose M here.
+
config INFINIBAND_HNS_HIP06
- tristate "Hisilicon Hip06 Family RoCE support"
+ bool "Hisilicon Hip06 Family RoCE support"
depends on INFINIBAND_HNS && HNS && HNS_DSAF && HNS_ENET
+ depends on INFINIBAND_HNS=m || (HNS_DSAF=y && HNS_ENET=y)
---help---
RoCE driver support for Hisilicon RoCE engine in Hisilicon Hip06 and
Hip07 SoC. These RoCE engines are platform devices.
+ To compile this driver, choose Y here: if INFINIBAND_HNS is m, this
+ module will be called hns-roce-hw-v1
+
config INFINIBAND_HNS_HIP08
- tristate "Hisilicon Hip08 Family RoCE support"
+ bool "Hisilicon Hip08 Family RoCE support"
depends on INFINIBAND_HNS && PCI && HNS3
+ depends on INFINIBAND_HNS=m || HNS3=y
---help---
RoCE driver support for Hisilicon RoCE engine in Hisilicon Hip08 SoC.
The RoCE engine is a PCI device.
+
+ To compile this driver, choose Y here: if INFINIBAND_HNS is m, this
+ module will be called hns-roce-hw-v2.
diff --git a/drivers/infiniband/hw/hns/Makefile b/drivers/infiniband/hw/hns/Makefile
index 449a2d81319d..e105945b94a1 100644
--- a/drivers/infiniband/hw/hns/Makefile
+++ b/drivers/infiniband/hw/hns/Makefile
@@ -9,8 +9,12 @@ hns-roce-objs := hns_roce_main.o hns_roce_cmd.o hns_roce_pd.o \
hns_roce_ah.o hns_roce_hem.o hns_roce_mr.o hns_roce_qp.o \
hns_roce_cq.o hns_roce_alloc.o hns_roce_db.o hns_roce_srq.o hns_roce_restrack.o
+ifdef CONFIG_INFINIBAND_HNS_HIP06
hns-roce-hw-v1-objs := hns_roce_hw_v1.o $(hns-roce-objs)
-obj-$(CONFIG_INFINIBAND_HNS_HIP06) += hns-roce-hw-v1.o
+obj-$(CONFIG_INFINIBAND_HNS) += hns-roce-hw-v1.o
+endif
+ifdef CONFIG_INFINIBAND_HNS_HIP08
hns-roce-hw-v2-objs := hns_roce_hw_v2.o hns_roce_hw_v2_dfx.o $(hns-roce-objs)
-obj-$(CONFIG_INFINIBAND_HNS_HIP08) += hns-roce-hw-v2.o
+obj-$(CONFIG_INFINIBAND_HNS) += hns-roce-hw-v2.o
+endif
diff --git a/drivers/infiniband/hw/hns/hns_roce_ah.c b/drivers/infiniband/hw/hns/hns_roce_ah.c
index 90e08c0c332d..8a522e14ef62 100644
--- a/drivers/infiniband/hw/hns/hns_roce_ah.c
+++ b/drivers/infiniband/hw/hns/hns_roce_ah.c
@@ -46,32 +46,32 @@ int hns_roce_create_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr,
const struct ib_gid_attr *gid_attr;
struct device *dev = hr_dev->dev;
struct hns_roce_ah *ah = to_hr_ah(ibah);
- u16 vlan_tag = 0xffff;
const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
+ u16 vlan_id = 0xffff;
bool vlan_en = false;
int ret;
gid_attr = ah_attr->grh.sgid_attr;
- ret = rdma_read_gid_l2_fields(gid_attr, &vlan_tag, NULL);
+ ret = rdma_read_gid_l2_fields(gid_attr, &vlan_id, NULL);
if (ret)
return ret;
/* Get mac address */
memcpy(ah->av.mac, ah_attr->roce.dmac, ETH_ALEN);
- if (vlan_tag < VLAN_CFI_MASK) {
+ if (vlan_id < VLAN_N_VID) {
vlan_en = true;
- vlan_tag |= (rdma_ah_get_sl(ah_attr) &
+ vlan_id |= (rdma_ah_get_sl(ah_attr) &
HNS_ROCE_VLAN_SL_BIT_MASK) <<
HNS_ROCE_VLAN_SL_SHIFT;
}
ah->av.port = rdma_ah_get_port_num(ah_attr);
ah->av.gid_index = grh->sgid_index;
- ah->av.vlan = vlan_tag;
+ ah->av.vlan_id = vlan_id;
ah->av.vlan_en = vlan_en;
- dev_dbg(dev, "gid_index = 0x%x,vlan = 0x%x\n", ah->av.gid_index,
- ah->av.vlan);
+ dev_dbg(dev, "gid_index = 0x%x,vlan_id = 0x%x\n", ah->av.gid_index,
+ ah->av.vlan_id);
if (rdma_ah_get_static_rate(ah_attr))
ah->av.stat_rate = IB_RATE_10_GBPS;
diff --git a/drivers/infiniband/hw/hns/hns_roce_alloc.c b/drivers/infiniband/hw/hns/hns_roce_alloc.c
index 8c063c598d2a..da574c26e063 100644
--- a/drivers/infiniband/hw/hns/hns_roce_alloc.c
+++ b/drivers/infiniband/hw/hns/hns_roce_alloc.c
@@ -55,7 +55,7 @@ int hns_roce_bitmap_alloc(struct hns_roce_bitmap *bitmap, unsigned long *obj)
bitmap->last = 0;
*obj |= bitmap->top;
} else {
- ret = -1;
+ ret = -EINVAL;
}
spin_unlock(&bitmap->lock);
@@ -100,7 +100,7 @@ int hns_roce_bitmap_alloc_range(struct hns_roce_bitmap *bitmap, int cnt,
}
*obj |= bitmap->top;
} else {
- ret = -1;
+ ret = -EINVAL;
}
spin_unlock(&bitmap->lock);
diff --git a/drivers/infiniband/hw/hns/hns_roce_cmd.h b/drivers/infiniband/hw/hns/hns_roce_cmd.h
index 2b6ac646ca9a..1915bacaded0 100644
--- a/drivers/infiniband/hw/hns/hns_roce_cmd.h
+++ b/drivers/infiniband/hw/hns/hns_roce_cmd.h
@@ -115,12 +115,12 @@ enum {
enum {
/* TPT commands */
- HNS_ROCE_CMD_SW2HW_MPT = 0xd,
- HNS_ROCE_CMD_HW2SW_MPT = 0xf,
+ HNS_ROCE_CMD_CREATE_MPT = 0xd,
+ HNS_ROCE_CMD_DESTROY_MPT = 0xf,
/* CQ commands */
- HNS_ROCE_CMD_SW2HW_CQ = 0x16,
- HNS_ROCE_CMD_HW2SW_CQ = 0x17,
+ HNS_ROCE_CMD_CREATE_CQC = 0x16,
+ HNS_ROCE_CMD_DESTROY_CQC = 0x17,
/* QP/EE commands */
HNS_ROCE_CMD_RST2INIT_QP = 0x19,
@@ -129,14 +129,14 @@ enum {
HNS_ROCE_CMD_RTS2RTS_QP = 0x1c,
HNS_ROCE_CMD_2ERR_QP = 0x1e,
HNS_ROCE_CMD_RTS2SQD_QP = 0x1f,
- HNS_ROCE_CMD_SQD2SQD_QP = 0x38,
HNS_ROCE_CMD_SQD2RTS_QP = 0x20,
HNS_ROCE_CMD_2RST_QP = 0x21,
HNS_ROCE_CMD_QUERY_QP = 0x22,
- HNS_ROCE_CMD_SW2HW_SRQ = 0x70,
+ HNS_ROCE_CMD_SQD2SQD_QP = 0x38,
+ HNS_ROCE_CMD_CREATE_SRQ = 0x70,
HNS_ROCE_CMD_MODIFY_SRQC = 0x72,
HNS_ROCE_CMD_QUERY_SRQC = 0x73,
- HNS_ROCE_CMD_HW2SW_SRQ = 0x74,
+ HNS_ROCE_CMD_DESTROY_SRQ = 0x74,
};
int hns_roce_cmd_mbox(struct hns_roce_dev *hr_dev, u64 in_param, u64 out_param,
diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c
index 22541d19cd09..af1d8823b3f0 100644
--- a/drivers/infiniband/hw/hns/hns_roce_cq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_cq.c
@@ -39,51 +39,8 @@
#include <rdma/hns-abi.h>
#include "hns_roce_common.h"
-static void hns_roce_ib_cq_comp(struct hns_roce_cq *hr_cq)
-{
- struct ib_cq *ibcq = &hr_cq->ib_cq;
-
- ibcq->comp_handler(ibcq, ibcq->cq_context);
-}
-
-static void hns_roce_ib_cq_event(struct hns_roce_cq *hr_cq,
- enum hns_roce_event event_type)
-{
- struct hns_roce_dev *hr_dev;
- struct ib_event event;
- struct ib_cq *ibcq;
-
- ibcq = &hr_cq->ib_cq;
- hr_dev = to_hr_dev(ibcq->device);
-
- if (event_type != HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID &&
- event_type != HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR &&
- event_type != HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW) {
- dev_err(hr_dev->dev,
- "hns_roce_ib: Unexpected event type 0x%x on CQ %06lx\n",
- event_type, hr_cq->cqn);
- return;
- }
-
- if (ibcq->event_handler) {
- event.device = ibcq->device;
- event.event = IB_EVENT_CQ_ERR;
- event.element.cq = ibcq;
- ibcq->event_handler(&event, ibcq->cq_context);
- }
-}
-
-static int hns_roce_sw2hw_cq(struct hns_roce_dev *dev,
- struct hns_roce_cmd_mailbox *mailbox,
- unsigned long cq_num)
-{
- return hns_roce_cmd_mbox(dev, mailbox->dma, 0, cq_num, 0,
- HNS_ROCE_CMD_SW2HW_CQ, HNS_ROCE_CMD_TIMEOUT_MSECS);
-}
-
-static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev, int nent,
- struct hns_roce_mtt *hr_mtt,
- struct hns_roce_cq *hr_cq, int vector)
+static int hns_roce_alloc_cqc(struct hns_roce_dev *hr_dev,
+ struct hns_roce_cq *hr_cq)
{
struct hns_roce_cmd_mailbox *mailbox;
struct hns_roce_hem_table *mtt_table;
@@ -101,35 +58,32 @@ static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev, int nent,
else
mtt_table = &hr_dev->mr_table.mtt_table;
- mtts = hns_roce_table_find(hr_dev, mtt_table,
- hr_mtt->first_seg, &dma_handle);
- if (!mtts) {
- dev_err(dev, "CQ alloc.Failed to find cq buf addr.\n");
- return -EINVAL;
- }
+ mtts = hns_roce_table_find(hr_dev, mtt_table, hr_cq->mtt.first_seg,
+ &dma_handle);
- if (vector >= hr_dev->caps.num_comp_vectors) {
- dev_err(dev, "CQ alloc.Invalid vector.\n");
+ if (!mtts) {
+ dev_err(dev, "Failed to find mtt for CQ buf.\n");
return -EINVAL;
}
- hr_cq->vector = vector;
ret = hns_roce_bitmap_alloc(&cq_table->bitmap, &hr_cq->cqn);
- if (ret == -1) {
- dev_err(dev, "CQ alloc.Failed to alloc index.\n");
- return -ENOMEM;
+ if (ret) {
+ dev_err(dev, "Num of CQ out of range.\n");
+ return ret;
}
/* Get CQC memory HEM(Hardware Entry Memory) table */
ret = hns_roce_table_get(hr_dev, &cq_table->table, hr_cq->cqn);
if (ret) {
- dev_err(dev, "CQ alloc.Failed to get context mem.\n");
+ dev_err(dev,
+ "Get context mem failed(%d) when CQ(0x%lx) alloc.\n",
+ ret, hr_cq->cqn);
goto err_out;
}
ret = xa_err(xa_store(&cq_table->array, hr_cq->cqn, hr_cq, GFP_KERNEL));
if (ret) {
- dev_err(dev, "CQ alloc failed xa_store.\n");
+ dev_err(dev, "Failed to xa_store CQ.\n");
goto err_put;
}
@@ -140,14 +94,16 @@ static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev, int nent,
goto err_xa;
}
- hr_dev->hw->write_cqc(hr_dev, hr_cq, mailbox->buf, mtts, dma_handle,
- nent, vector);
+ hr_dev->hw->write_cqc(hr_dev, hr_cq, mailbox->buf, mtts, dma_handle);
/* Send mailbox to hw */
- ret = hns_roce_sw2hw_cq(hr_dev, mailbox, hr_cq->cqn);
+ ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, hr_cq->cqn, 0,
+ HNS_ROCE_CMD_CREATE_CQC, HNS_ROCE_CMD_TIMEOUT_MSECS);
hns_roce_free_cmd_mailbox(hr_dev, mailbox);
if (ret) {
- dev_err(dev, "CQ alloc.Failed to cmd mailbox.\n");
+ dev_err(dev,
+ "Send cmd mailbox failed(%d) when CQ(0x%lx) alloc.\n",
+ ret, hr_cq->cqn);
goto err_xa;
}
@@ -170,24 +126,17 @@ err_out:
return ret;
}
-static int hns_roce_hw2sw_cq(struct hns_roce_dev *dev,
- struct hns_roce_cmd_mailbox *mailbox,
- unsigned long cq_num)
-{
- return hns_roce_cmd_mbox(dev, 0, mailbox ? mailbox->dma : 0, cq_num,
- mailbox ? 0 : 1, HNS_ROCE_CMD_HW2SW_CQ,
- HNS_ROCE_CMD_TIMEOUT_MSECS);
-}
-
-void hns_roce_free_cq(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
+void hns_roce_free_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
{
struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
struct device *dev = hr_dev->dev;
int ret;
- ret = hns_roce_hw2sw_cq(hr_dev, NULL, hr_cq->cqn);
+ ret = hns_roce_cmd_mbox(hr_dev, 0, 0, hr_cq->cqn, 1,
+ HNS_ROCE_CMD_DESTROY_CQC,
+ HNS_ROCE_CMD_TIMEOUT_MSECS);
if (ret)
- dev_err(dev, "HW2SW_CQ failed (%d) for CQN %06lx\n", ret,
+ dev_err(dev, "DESTROY_CQ failed (%d) for CQN %06lx\n", ret,
hr_cq->cqn);
xa_erase(&cq_table->array, hr_cq->cqn);
@@ -204,103 +153,91 @@ void hns_roce_free_cq(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
hns_roce_bitmap_free(&cq_table->bitmap, hr_cq->cqn, BITMAP_NO_RR);
}
-static int hns_roce_ib_get_cq_umem(struct hns_roce_dev *hr_dev,
- struct ib_udata *udata,
- struct hns_roce_cq_buf *buf,
- struct ib_umem **umem, u64 buf_addr, int cqe)
+static int get_cq_umem(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq,
+ struct hns_roce_ib_create_cq ucmd,
+ struct ib_udata *udata)
{
- int ret;
- u32 page_shift;
+ struct hns_roce_buf *buf = &hr_cq->buf;
+ struct hns_roce_mtt *mtt = &hr_cq->mtt;
+ struct ib_umem **umem = &hr_cq->umem;
u32 npages;
+ int ret;
- *umem = ib_umem_get(udata, buf_addr, cqe * hr_dev->caps.cq_entry_sz,
- IB_ACCESS_LOCAL_WRITE, 1);
+ *umem = ib_umem_get(udata, ucmd.buf_addr, buf->size,
+ IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(*umem))
return PTR_ERR(*umem);
if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE))
- buf->hr_mtt.mtt_type = MTT_TYPE_CQE;
+ mtt->mtt_type = MTT_TYPE_CQE;
else
- buf->hr_mtt.mtt_type = MTT_TYPE_WQE;
-
- if (hr_dev->caps.cqe_buf_pg_sz) {
- npages = (ib_umem_page_count(*umem) +
- (1 << hr_dev->caps.cqe_buf_pg_sz) - 1) /
- (1 << hr_dev->caps.cqe_buf_pg_sz);
- page_shift = PAGE_SHIFT + hr_dev->caps.cqe_buf_pg_sz;
- ret = hns_roce_mtt_init(hr_dev, npages, page_shift,
- &buf->hr_mtt);
- } else {
- ret = hns_roce_mtt_init(hr_dev, ib_umem_page_count(*umem),
- PAGE_SHIFT, &buf->hr_mtt);
- }
+ mtt->mtt_type = MTT_TYPE_WQE;
+
+ npages = DIV_ROUND_UP(ib_umem_page_count(*umem),
+ 1 << hr_dev->caps.cqe_buf_pg_sz);
+ ret = hns_roce_mtt_init(hr_dev, npages, buf->page_shift, mtt);
if (ret)
goto err_buf;
- ret = hns_roce_ib_umem_write_mtt(hr_dev, &buf->hr_mtt, *umem);
+ ret = hns_roce_ib_umem_write_mtt(hr_dev, mtt, *umem);
if (ret)
goto err_mtt;
return 0;
err_mtt:
- hns_roce_mtt_cleanup(hr_dev, &buf->hr_mtt);
+ hns_roce_mtt_cleanup(hr_dev, mtt);
err_buf:
ib_umem_release(*umem);
return ret;
}
-static int hns_roce_ib_alloc_cq_buf(struct hns_roce_dev *hr_dev,
- struct hns_roce_cq_buf *buf, u32 nent)
+static int alloc_cq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
{
+ struct hns_roce_buf *buf = &hr_cq->buf;
+ struct hns_roce_mtt *mtt = &hr_cq->mtt;
int ret;
- u32 page_shift = PAGE_SHIFT + hr_dev->caps.cqe_buf_pg_sz;
- ret = hns_roce_buf_alloc(hr_dev, nent * hr_dev->caps.cq_entry_sz,
- (1 << page_shift) * 2, &buf->hr_buf,
- page_shift);
+ ret = hns_roce_buf_alloc(hr_dev, buf->size, (1 << buf->page_shift) * 2,
+ buf, buf->page_shift);
if (ret)
goto out;
if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE))
- buf->hr_mtt.mtt_type = MTT_TYPE_CQE;
+ mtt->mtt_type = MTT_TYPE_CQE;
else
- buf->hr_mtt.mtt_type = MTT_TYPE_WQE;
+ mtt->mtt_type = MTT_TYPE_WQE;
- ret = hns_roce_mtt_init(hr_dev, buf->hr_buf.npages,
- buf->hr_buf.page_shift, &buf->hr_mtt);
+ ret = hns_roce_mtt_init(hr_dev, buf->npages, buf->page_shift, mtt);
if (ret)
goto err_buf;
- ret = hns_roce_buf_write_mtt(hr_dev, &buf->hr_mtt, &buf->hr_buf);
+ ret = hns_roce_buf_write_mtt(hr_dev, mtt, buf);
if (ret)
goto err_mtt;
return 0;
err_mtt:
- hns_roce_mtt_cleanup(hr_dev, &buf->hr_mtt);
+ hns_roce_mtt_cleanup(hr_dev, mtt);
err_buf:
- hns_roce_buf_free(hr_dev, nent * hr_dev->caps.cq_entry_sz,
- &buf->hr_buf);
+ hns_roce_buf_free(hr_dev, buf->size, buf);
+
out:
return ret;
}
-static void hns_roce_ib_free_cq_buf(struct hns_roce_dev *hr_dev,
- struct hns_roce_cq_buf *buf, int cqe)
+static void free_cq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
{
- hns_roce_buf_free(hr_dev, (cqe + 1) * hr_dev->caps.cq_entry_sz,
- &buf->hr_buf);
+ hns_roce_buf_free(hr_dev, hr_cq->buf.size, &hr_cq->buf);
}
static int create_user_cq(struct hns_roce_dev *hr_dev,
struct hns_roce_cq *hr_cq,
struct ib_udata *udata,
- struct hns_roce_ib_create_cq_resp *resp,
- int cq_entries)
+ struct hns_roce_ib_create_cq_resp *resp)
{
struct hns_roce_ib_create_cq ucmd;
struct device *dev = hr_dev->dev;
@@ -314,9 +251,7 @@ static int create_user_cq(struct hns_roce_dev *hr_dev,
}
/* Get user space address, write it into mtt table */
- ret = hns_roce_ib_get_cq_umem(hr_dev, udata, &hr_cq->hr_buf,
- &hr_cq->umem, ucmd.buf_addr,
- cq_entries);
+ ret = get_cq_umem(hr_dev, hr_cq, ucmd, udata);
if (ret) {
dev_err(dev, "Failed to get_cq_umem.\n");
return ret;
@@ -337,17 +272,16 @@ static int create_user_cq(struct hns_roce_dev *hr_dev,
return 0;
err_mtt:
- hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt);
+ hns_roce_mtt_cleanup(hr_dev, &hr_cq->mtt);
ib_umem_release(hr_cq->umem);
return ret;
}
static int create_kernel_cq(struct hns_roce_dev *hr_dev,
- struct hns_roce_cq *hr_cq, int cq_entries)
+ struct hns_roce_cq *hr_cq)
{
struct device *dev = hr_dev->dev;
- struct hns_roce_uar *uar;
int ret;
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) {
@@ -361,15 +295,14 @@ static int create_kernel_cq(struct hns_roce_dev *hr_dev,
}
/* Init mtt table and write buff address to mtt table */
- ret = hns_roce_ib_alloc_cq_buf(hr_dev, &hr_cq->hr_buf, cq_entries);
+ ret = alloc_cq_buf(hr_dev, hr_cq);
if (ret) {
dev_err(dev, "Failed to alloc_cq_buf.\n");
goto err_db;
}
- uar = &hr_dev->priv_uar;
hr_cq->cq_db_l = hr_dev->reg_base + hr_dev->odb_offset +
- DB_REG_OFFSET * uar->index;
+ DB_REG_OFFSET * hr_dev->priv_uar.index;
return 0;
@@ -392,64 +325,69 @@ static void destroy_user_cq(struct hns_roce_dev *hr_dev,
(udata->outlen >= sizeof(*resp)))
hns_roce_db_unmap_user(context, &hr_cq->db);
- hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt);
+ hns_roce_mtt_cleanup(hr_dev, &hr_cq->mtt);
ib_umem_release(hr_cq->umem);
}
static void destroy_kernel_cq(struct hns_roce_dev *hr_dev,
struct hns_roce_cq *hr_cq)
{
- hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt);
- hns_roce_ib_free_cq_buf(hr_dev, &hr_cq->hr_buf, hr_cq->ib_cq.cqe);
+ hns_roce_mtt_cleanup(hr_dev, &hr_cq->mtt);
+ free_cq_buf(hr_dev, hr_cq);
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB)
hns_roce_free_db(hr_dev, &hr_cq->db);
}
-int hns_roce_ib_create_cq(struct ib_cq *ib_cq,
- const struct ib_cq_init_attr *attr,
- struct ib_udata *udata)
+int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
+ struct ib_udata *udata)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device);
- struct device *dev = hr_dev->dev;
struct hns_roce_ib_create_cq_resp resp = {};
struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq);
+ struct device *dev = hr_dev->dev;
int vector = attr->comp_vector;
- int cq_entries = attr->cqe;
+ u32 cq_entries = attr->cqe;
int ret;
if (cq_entries < 1 || cq_entries > hr_dev->caps.max_cqes) {
- dev_err(dev, "Creat CQ failed. entries=%d, max=%d\n",
+ dev_err(dev, "Create CQ failed. entries=%d, max=%d\n",
cq_entries, hr_dev->caps.max_cqes);
return -EINVAL;
}
- if (hr_dev->caps.min_cqes)
- cq_entries = max(cq_entries, hr_dev->caps.min_cqes);
+ if (vector >= hr_dev->caps.num_comp_vectors) {
+ dev_err(dev, "Create CQ failed, vector=%d, max=%d\n",
+ vector, hr_dev->caps.num_comp_vectors);
+ return -EINVAL;
+ }
- cq_entries = roundup_pow_of_two((unsigned int)cq_entries);
- hr_cq->ib_cq.cqe = cq_entries - 1;
+ cq_entries = max(cq_entries, hr_dev->caps.min_cqes);
+ cq_entries = roundup_pow_of_two(cq_entries);
+ hr_cq->ib_cq.cqe = cq_entries - 1; /* used as cqe index */
+ hr_cq->cq_depth = cq_entries;
+ hr_cq->vector = vector;
+ hr_cq->buf.size = hr_cq->cq_depth * hr_dev->caps.cq_entry_sz;
+ hr_cq->buf.page_shift = PAGE_SHIFT + hr_dev->caps.cqe_buf_pg_sz;
spin_lock_init(&hr_cq->lock);
if (udata) {
- ret = create_user_cq(hr_dev, hr_cq, udata, &resp, cq_entries);
+ ret = create_user_cq(hr_dev, hr_cq, udata, &resp);
if (ret) {
dev_err(dev, "Create cq failed in user mode!\n");
goto err_cq;
}
} else {
- ret = create_kernel_cq(hr_dev, hr_cq, cq_entries);
+ ret = create_kernel_cq(hr_dev, hr_cq);
if (ret) {
dev_err(dev, "Create cq failed in kernel mode!\n");
goto err_cq;
}
}
- /* Allocate cq index, fill cq_context */
- ret = hns_roce_cq_alloc(hr_dev, cq_entries, &hr_cq->hr_buf.hr_mtt,
- hr_cq, vector);
+ ret = hns_roce_alloc_cqc(hr_dev, hr_cq);
if (ret) {
- dev_err(dev, "Creat CQ .Failed to cq_alloc.\n");
+ dev_err(dev, "Alloc CQ failed(%d).\n", ret);
goto err_dbmap;
}
@@ -462,11 +400,6 @@ int hns_roce_ib_create_cq(struct ib_cq *ib_cq,
if (!udata && hr_cq->tptr_addr)
*hr_cq->tptr_addr = 0;
- /* Get created cq handler and carry out event */
- hr_cq->comp = hns_roce_ib_cq_comp;
- hr_cq->event = hns_roce_ib_cq_event;
- hr_cq->cq_depth = cq_entries;
-
if (udata) {
resp.cqn = hr_cq->cqn;
ret = ib_copy_to_udata(udata, &resp, sizeof(resp));
@@ -477,7 +410,7 @@ int hns_roce_ib_create_cq(struct ib_cq *ib_cq,
return 0;
err_cqc:
- hns_roce_free_cq(hr_dev, hr_cq);
+ hns_roce_free_cqc(hr_dev, hr_cq);
err_dbmap:
if (udata)
@@ -489,7 +422,7 @@ err_cq:
return ret;
}
-void hns_roce_ib_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
+void hns_roce_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device);
struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq);
@@ -499,8 +432,8 @@ void hns_roce_ib_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
return;
}
- hns_roce_free_cq(hr_dev, hr_cq);
- hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt);
+ hns_roce_free_cqc(hr_dev, hr_cq);
+ hns_roce_mtt_cleanup(hr_dev, &hr_cq->mtt);
ib_umem_release(hr_cq->umem);
if (udata) {
@@ -512,7 +445,7 @@ void hns_roce_ib_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
&hr_cq->db);
} else {
/* Free the buff of stored cq */
- hns_roce_ib_free_cq_buf(hr_dev, &hr_cq->hr_buf, ib_cq->cqe);
+ free_cq_buf(hr_dev, hr_cq);
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB)
hns_roce_free_db(hr_dev, &hr_cq->db);
}
@@ -520,38 +453,57 @@ void hns_roce_ib_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn)
{
- struct device *dev = hr_dev->dev;
- struct hns_roce_cq *cq;
+ struct hns_roce_cq *hr_cq;
+ struct ib_cq *ibcq;
- cq = xa_load(&hr_dev->cq_table.array, cqn & (hr_dev->caps.num_cqs - 1));
- if (!cq) {
- dev_warn(dev, "Completion event for bogus CQ 0x%08x\n", cqn);
+ hr_cq = xa_load(&hr_dev->cq_table.array,
+ cqn & (hr_dev->caps.num_cqs - 1));
+ if (!hr_cq) {
+ dev_warn(hr_dev->dev, "Completion event for bogus CQ 0x%06x\n",
+ cqn);
return;
}
- ++cq->arm_sn;
- cq->comp(cq);
+ ++hr_cq->arm_sn;
+ ibcq = &hr_cq->ib_cq;
+ if (ibcq->comp_handler)
+ ibcq->comp_handler(ibcq, ibcq->cq_context);
}
void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type)
{
- struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
struct device *dev = hr_dev->dev;
- struct hns_roce_cq *cq;
+ struct hns_roce_cq *hr_cq;
+ struct ib_event event;
+ struct ib_cq *ibcq;
- cq = xa_load(&cq_table->array, cqn & (hr_dev->caps.num_cqs - 1));
- if (cq)
- atomic_inc(&cq->refcount);
+ hr_cq = xa_load(&hr_dev->cq_table.array,
+ cqn & (hr_dev->caps.num_cqs - 1));
+ if (!hr_cq) {
+ dev_warn(dev, "Async event for bogus CQ 0x%06x\n", cqn);
+ return;
+ }
- if (!cq) {
- dev_warn(dev, "Async event for bogus CQ %08x\n", cqn);
+ if (event_type != HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID &&
+ event_type != HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR &&
+ event_type != HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW) {
+ dev_err(dev, "Unexpected event type 0x%x on CQ 0x%06x\n",
+ event_type, cqn);
return;
}
- cq->event(cq, (enum hns_roce_event)event_type);
+ atomic_inc(&hr_cq->refcount);
+
+ ibcq = &hr_cq->ib_cq;
+ if (ibcq->event_handler) {
+ event.device = ibcq->device;
+ event.element.cq = ibcq;
+ event.event = IB_EVENT_CQ_ERR;
+ ibcq->event_handler(&event, ibcq->cq_context);
+ }
- if (atomic_dec_and_test(&cq->refcount))
- complete(&cq->free);
+ if (atomic_dec_and_test(&hr_cq->refcount))
+ complete(&hr_cq->free);
}
int hns_roce_init_cq_table(struct hns_roce_dev *hr_dev)
diff --git a/drivers/infiniband/hw/hns/hns_roce_db.c b/drivers/infiniband/hw/hns/hns_roce_db.c
index c00714c2f16a..10af6958ab69 100644
--- a/drivers/infiniband/hw/hns/hns_roce_db.c
+++ b/drivers/infiniband/hw/hns/hns_roce_db.c
@@ -31,7 +31,7 @@ int hns_roce_db_map_user(struct hns_roce_ucontext *context,
refcount_set(&page->refcount, 1);
page->user_virt = page_addr;
- page->umem = ib_umem_get(udata, page_addr, PAGE_SIZE, 0, 0);
+ page->umem = ib_umem_get(udata, page_addr, PAGE_SIZE, 0);
if (IS_ERR(page->umem)) {
ret = PTR_ERR(page->umem);
kfree(page);
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
index 96d1302abde1..5617434cbfb4 100644
--- a/drivers/infiniband/hw/hns/hns_roce_device.h
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
@@ -45,7 +45,7 @@
#define HNS_ROCE_MAX_MSG_LEN 0x80000000
-#define HNS_ROCE_ALOGN_UP(a, b) ((((a) + (b) - 1) / (b)) * (b))
+#define HNS_ROCE_ALIGN_UP(a, b) ((((a) + (b) - 1) / (b)) * (b))
#define HNS_ROCE_IB_MIN_SQ_STRIDE 6
@@ -53,8 +53,6 @@
#define BA_BYTE_LEN 8
-#define BITS_PER_BYTE 8
-
/* Hardware specification only for v1 engine */
#define HNS_ROCE_MIN_CQE_NUM 0x40
#define HNS_ROCE_MIN_WQE_NUM 0x20
@@ -426,7 +424,6 @@ struct hns_roce_wq {
u64 *wrid; /* Work request ID */
spinlock_t lock;
int wqe_cnt; /* WQE num */
- u32 max_post;
int max_gs;
int offset;
int wqe_shift; /* WQE size */
@@ -451,6 +448,7 @@ struct hns_roce_buf {
struct hns_roce_buf_list *page_list;
int nbufs;
u32 npages;
+ u32 size;
int page_shift;
};
@@ -482,22 +480,14 @@ struct hns_roce_db {
int order;
};
-struct hns_roce_cq_buf {
- struct hns_roce_buf hr_buf;
- struct hns_roce_mtt hr_mtt;
-};
-
struct hns_roce_cq {
struct ib_cq ib_cq;
- struct hns_roce_cq_buf hr_buf;
+ struct hns_roce_buf buf;
+ struct hns_roce_mtt mtt;
struct hns_roce_db db;
u8 db_en;
spinlock_t lock;
struct ib_umem *umem;
- void (*comp)(struct hns_roce_cq *cq);
- void (*event)(struct hns_roce_cq *cq, enum hns_roce_event event_type);
-
- struct hns_roce_uar *uar;
u32 cq_depth;
u32 cons_index;
u32 *set_ci_db;
@@ -521,9 +511,8 @@ struct hns_roce_idx_que {
struct hns_roce_srq {
struct ib_srq ibsrq;
- void (*event)(struct hns_roce_srq *srq, enum hns_roce_event event);
unsigned long srqn;
- int max;
+ u32 wqe_cnt;
int max_gs;
int wqe_shift;
void __iomem *db_reg_l;
@@ -539,8 +528,8 @@ struct hns_roce_srq {
spinlock_t lock;
int head;
int tail;
- u16 wqe_ctr;
struct mutex mutex;
+ void (*event)(struct hns_roce_srq *srq, enum hns_roce_event event);
};
struct hns_roce_uar_table {
@@ -582,7 +571,7 @@ struct hns_roce_av {
u8 tclass;
u8 dgid[HNS_ROCE_GID_SIZE];
u8 mac[ETH_ALEN];
- u16 vlan;
+ u16 vlan_id;
bool vlan_en;
};
@@ -695,10 +684,6 @@ struct hns_roce_qp {
struct hns_roce_rinl_buf rq_inl_buf;
};
-struct hns_roce_sqp {
- struct hns_roce_qp hr_qp;
-};
-
struct hns_roce_ib_iboe {
spinlock_t lock;
struct net_device *netdevs[HNS_ROCE_MAX_PORTS];
@@ -821,8 +806,8 @@ struct hns_roce_caps {
int max_qp_init_rdma;
int max_qp_dest_rdma;
int num_cqs;
- int max_cqes;
- int min_cqes;
+ u32 max_cqes;
+ u32 min_cqes;
u32 min_wqes;
int reserved_cqs;
int reserved_srqs;
@@ -953,7 +938,7 @@ struct hns_roce_hw {
int (*mw_write_mtpt)(void *mb_buf, struct hns_roce_mw *mw);
void (*write_cqc)(struct hns_roce_dev *hr_dev,
struct hns_roce_cq *hr_cq, void *mb_buf, u64 *mtts,
- dma_addr_t dma_handle, int nent, u32 vector);
+ dma_addr_t dma_handle);
int (*set_hem)(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table, int obj, int step_idx);
int (*clear_hem)(struct hns_roce_dev *hr_dev,
@@ -1092,11 +1077,6 @@ static inline struct hns_roce_srq *to_hr_srq(struct ib_srq *ibsrq)
return container_of(ibsrq, struct hns_roce_srq, ibsrq);
}
-static inline struct hns_roce_sqp *hr_to_hr_sqp(struct hns_roce_qp *hr_qp)
-{
- return container_of(hr_qp, struct hns_roce_sqp, hr_qp);
-}
-
static inline void hns_roce_write64_k(__le32 val[2], void __iomem *dest)
{
__raw_writeq(*(u64 *) val, dest);
@@ -1198,9 +1178,9 @@ struct ib_mr *hns_roce_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
unsigned int *sg_offset);
int hns_roce_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata);
-int hns_roce_hw2sw_mpt(struct hns_roce_dev *hr_dev,
- struct hns_roce_cmd_mailbox *mailbox,
- unsigned long mpt_index);
+int hns_roce_hw_destroy_mpt(struct hns_roce_dev *hr_dev,
+ struct hns_roce_cmd_mailbox *mailbox,
+ unsigned long mpt_index);
unsigned long key_to_hw_index(u32 key);
struct ib_mw *hns_roce_alloc_mw(struct ib_pd *pd, enum ib_mw_type,
@@ -1257,12 +1237,11 @@ void hns_roce_release_range_qp(struct hns_roce_dev *hr_dev, int base_qpn,
__be32 send_ieth(const struct ib_send_wr *wr);
int to_hr_qp_type(int qp_type);
-int hns_roce_ib_create_cq(struct ib_cq *ib_cq,
- const struct ib_cq_init_attr *attr,
- struct ib_udata *udata);
+int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
+ struct ib_udata *udata);
-void hns_roce_ib_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata);
-void hns_roce_free_cq(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq);
+void hns_roce_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata);
+void hns_roce_free_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq);
int hns_roce_db_map_user(struct hns_roce_ucontext *context,
struct ib_udata *udata, unsigned long virt,
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
index 5f74bf55f471..2a2b2112f886 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
@@ -732,7 +732,7 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
if (!cq)
return -ENOMEM;
- ret = hns_roce_ib_create_cq(cq, &cq_init_attr, NULL);
+ ret = hns_roce_create_cq(cq, &cq_init_attr, NULL);
if (ret) {
dev_err(dev, "Create cq for reserved loop qp failed!");
goto alloc_cq_failed;
@@ -868,7 +868,7 @@ alloc_pd_failed:
kfree(pd);
alloc_mem_failed:
- hns_roce_ib_destroy_cq(cq, NULL);
+ hns_roce_destroy_cq(cq, NULL);
alloc_cq_failed:
kfree(cq);
return ret;
@@ -897,7 +897,7 @@ static void hns_roce_v1_release_lp_qp(struct hns_roce_dev *hr_dev)
i, ret);
}
- hns_roce_ib_destroy_cq(&free_mr->mr_free_cq->ib_cq, NULL);
+ hns_roce_destroy_cq(&free_mr->mr_free_cq->ib_cq, NULL);
kfree(&free_mr->mr_free_cq->ib_cq);
hns_roce_dealloc_pd(&free_mr->mr_free_pd->ibpd, NULL);
kfree(&free_mr->mr_free_pd->ibpd);
@@ -1114,9 +1114,10 @@ static int hns_roce_v1_dereg_mr(struct hns_roce_dev *hr_dev,
free_mr = &priv->free_mr;
if (mr->enabled) {
- if (hns_roce_hw2sw_mpt(hr_dev, NULL, key_to_hw_index(mr->key)
- & (hr_dev->caps.num_mtpts - 1)))
- dev_warn(dev, "HW2SW_MPT failed!\n");
+ if (hns_roce_hw_destroy_mpt(hr_dev, NULL,
+ key_to_hw_index(mr->key) &
+ (hr_dev->caps.num_mtpts - 1)))
+ dev_warn(dev, "DESTROY_MPT failed!\n");
}
mr_work = kzalloc(sizeof(*mr_work), GFP_KERNEL);
@@ -1979,8 +1980,7 @@ static int hns_roce_v1_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
static void *get_cqe(struct hns_roce_cq *hr_cq, int n)
{
- return hns_roce_buf_offset(&hr_cq->hr_buf.hr_buf,
- n * HNS_ROCE_V1_CQE_ENTRY_SIZE);
+ return hns_roce_buf_offset(&hr_cq->buf, n * HNS_ROCE_V1_CQE_ENTRY_SIZE);
}
static void *get_sw_cqe(struct hns_roce_cq *hr_cq, int n)
@@ -1989,7 +1989,7 @@ static void *get_sw_cqe(struct hns_roce_cq *hr_cq, int n)
/* Get cqe when Owner bit is Conversely with the MSB of cons_idx */
return (roce_get_bit(hr_cqe->cqe_byte_4, CQE_BYTE_4_OWNER_S) ^
- !!(n & (hr_cq->ib_cq.cqe + 1))) ? hr_cqe : NULL;
+ !!(n & hr_cq->cq_depth)) ? hr_cqe : NULL;
}
static struct hns_roce_cqe *next_cqe_sw(struct hns_roce_cq *hr_cq)
@@ -2072,8 +2072,7 @@ static void hns_roce_v1_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
static void hns_roce_v1_write_cqc(struct hns_roce_dev *hr_dev,
struct hns_roce_cq *hr_cq, void *mb_buf,
- u64 *mtts, dma_addr_t dma_handle, int nent,
- u32 vector)
+ u64 *mtts, dma_addr_t dma_handle)
{
struct hns_roce_cq_context *cq_context = NULL;
struct hns_roce_buf_list *tptr_buf;
@@ -2108,9 +2107,9 @@ static void hns_roce_v1_write_cqc(struct hns_roce_dev *hr_dev,
roce_set_field(cq_context->cqc_byte_12,
CQ_CONTEXT_CQC_BYTE_12_CQ_CQE_SHIFT_M,
CQ_CONTEXT_CQC_BYTE_12_CQ_CQE_SHIFT_S,
- ilog2((unsigned int)nent));
+ ilog2(hr_cq->cq_depth));
roce_set_field(cq_context->cqc_byte_12, CQ_CONTEXT_CQC_BYTE_12_CEQN_M,
- CQ_CONTEXT_CQC_BYTE_12_CEQN_S, vector);
+ CQ_CONTEXT_CQC_BYTE_12_CEQN_S, hr_cq->vector);
cq_context->cur_cqe_ba0_l = cpu_to_le32((u32)(mtts[0]));
@@ -3644,10 +3643,7 @@ int hns_roce_v1_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf);
}
- if (hr_qp->ibqp.qp_type == IB_QPT_RC)
- kfree(hr_qp);
- else
- kfree(hr_to_hr_sqp(hr_qp));
+ kfree(hr_qp);
return 0;
}
@@ -3658,10 +3654,9 @@ static void hns_roce_v1_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
struct device *dev = &hr_dev->pdev->dev;
u32 cqe_cnt_ori;
u32 cqe_cnt_cur;
- u32 cq_buf_size;
int wait_time = 0;
- hns_roce_free_cq(hr_dev, hr_cq);
+ hns_roce_free_cqc(hr_dev, hr_cq);
/*
* Before freeing cq buffer, we need to ensure that the outstanding CQE
@@ -3686,13 +3681,12 @@ static void hns_roce_v1_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
wait_time++;
}
- hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt);
+ hns_roce_mtt_cleanup(hr_dev, &hr_cq->mtt);
ib_umem_release(hr_cq->umem);
if (!udata) {
/* Free the buff of stored cq */
- cq_buf_size = (ibcq->cqe + 1) * hr_dev->caps.cq_entry_sz;
- hns_roce_buf_free(hr_dev, cq_buf_size, &hr_cq->hr_buf.hr_buf);
+ hns_roce_buf_free(hr_dev, hr_cq->buf.size, &hr_cq->buf);
}
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index e82567fcdeb7..cb8071a3e0d5 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -389,7 +389,7 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp,
roce_set_field(ud_sq_wqe->byte_36,
V2_UD_SEND_WQE_BYTE_36_VLAN_M,
V2_UD_SEND_WQE_BYTE_36_VLAN_S,
- le16_to_cpu(ah->av.vlan));
+ ah->av.vlan_id);
roce_set_field(ud_sq_wqe->byte_36,
V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_M,
V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_S,
@@ -2447,8 +2447,7 @@ static int hns_roce_v2_mw_write_mtpt(void *mb_buf, struct hns_roce_mw *mw)
static void *get_cqe_v2(struct hns_roce_cq *hr_cq, int n)
{
- return hns_roce_buf_offset(&hr_cq->hr_buf.hr_buf,
- n * HNS_ROCE_V2_CQE_ENTRY_SIZE);
+ return hns_roce_buf_offset(&hr_cq->buf, n * HNS_ROCE_V2_CQE_ENTRY_SIZE);
}
static void *get_sw_cqe_v2(struct hns_roce_cq *hr_cq, int n)
@@ -2457,7 +2456,7 @@ static void *get_sw_cqe_v2(struct hns_roce_cq *hr_cq, int n)
/* Get cqe when Owner bit is Conversely with the MSB of cons_idx */
return (roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_OWNER_S) ^
- !!(n & (hr_cq->ib_cq.cqe + 1))) ? cqe : NULL;
+ !!(n & hr_cq->cq_depth)) ? cqe : NULL;
}
static struct hns_roce_v2_cqe *next_cqe_sw_v2(struct hns_roce_cq *hr_cq)
@@ -2550,8 +2549,7 @@ static void hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
static void hns_roce_v2_write_cqc(struct hns_roce_dev *hr_dev,
struct hns_roce_cq *hr_cq, void *mb_buf,
- u64 *mtts, dma_addr_t dma_handle, int nent,
- u32 vector)
+ u64 *mtts, dma_addr_t dma_handle)
{
struct hns_roce_v2_cq_context *cq_context;
@@ -2563,9 +2561,10 @@ static void hns_roce_v2_write_cqc(struct hns_roce_dev *hr_dev,
roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_ARM_ST_M,
V2_CQC_BYTE_4_ARM_ST_S, REG_NXT_CEQE);
roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_SHIFT_M,
- V2_CQC_BYTE_4_SHIFT_S, ilog2((unsigned int)nent));
+ V2_CQC_BYTE_4_SHIFT_S,
+ ilog2(hr_cq->cq_depth));
roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_CEQN_M,
- V2_CQC_BYTE_4_CEQN_S, vector);
+ V2_CQC_BYTE_4_CEQN_S, hr_cq->vector);
roce_set_field(cq_context->byte_8_cqn, V2_CQC_BYTE_8_CQN_M,
V2_CQC_BYTE_8_CQN_S, hr_cq->cqn);
@@ -4061,8 +4060,8 @@ static int hns_roce_v2_set_path(struct ib_qp *ibqp,
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
const struct ib_gid_attr *gid_attr = NULL;
int is_roce_protocol;
+ u16 vlan_id = 0xffff;
bool is_udp = false;
- u16 vlan = 0xffff;
u8 ib_port;
u8 hr_port;
int ret;
@@ -4074,7 +4073,7 @@ static int hns_roce_v2_set_path(struct ib_qp *ibqp,
if (is_roce_protocol) {
gid_attr = attr->ah_attr.grh.sgid_attr;
- ret = rdma_read_gid_l2_fields(gid_attr, &vlan, NULL);
+ ret = rdma_read_gid_l2_fields(gid_attr, &vlan_id, NULL);
if (ret)
return ret;
@@ -4083,7 +4082,7 @@ static int hns_roce_v2_set_path(struct ib_qp *ibqp,
IB_GID_TYPE_ROCE_UDP_ENCAP);
}
- if (vlan < VLAN_CFI_MASK) {
+ if (vlan_id < VLAN_N_VID) {
roce_set_bit(context->byte_76_srqn_op_en,
V2_QPC_BYTE_76_RQ_VLAN_EN_S, 1);
roce_set_bit(qpc_mask->byte_76_srqn_op_en,
@@ -4095,7 +4094,7 @@ static int hns_roce_v2_set_path(struct ib_qp *ibqp,
}
roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_ID_M,
- V2_QPC_BYTE_24_VLAN_ID_S, vlan);
+ V2_QPC_BYTE_24_VLAN_ID_S, vlan_id);
roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_ID_M,
V2_QPC_BYTE_24_VLAN_ID_S, 0);
@@ -4650,16 +4649,14 @@ static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev,
{
struct hns_roce_cq *send_cq, *recv_cq;
struct ib_device *ibdev = &hr_dev->ib_dev;
- int ret;
+ int ret = 0;
if (hr_qp->ibqp.qp_type == IB_QPT_RC && hr_qp->state != IB_QPS_RESET) {
/* Modify qp to reset before destroying qp */
ret = hns_roce_v2_modify_qp(&hr_qp->ibqp, NULL, 0,
hr_qp->state, IB_QPS_RESET);
- if (ret) {
+ if (ret)
ibdev_err(ibdev, "modify QP to Reset failed.\n");
- return ret;
- }
}
send_cq = to_hr_cq(hr_qp->ibqp.send_cq);
@@ -4715,7 +4712,7 @@ static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev,
kfree(hr_qp->rq_inl_buf.wqe_list);
}
- return 0;
+ return ret;
}
static int hns_roce_v2_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
@@ -4725,16 +4722,11 @@ static int hns_roce_v2_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
int ret;
ret = hns_roce_v2_destroy_qp_common(hr_dev, hr_qp, udata);
- if (ret) {
+ if (ret)
ibdev_err(&hr_dev->ib_dev, "Destroy qp 0x%06lx failed(%d)\n",
hr_qp->qpn, ret);
- return ret;
- }
- if (hr_qp->ibqp.qp_type == IB_QPT_GSI)
- kfree(hr_to_hr_sqp(hr_qp));
- else
- kfree(hr_qp);
+ kfree(hr_qp);
return 0;
}
@@ -4951,10 +4943,7 @@ static void hns_roce_v2_init_irq_work(struct hns_roce_dev *hr_dev,
static void set_eq_cons_index_v2(struct hns_roce_eq *eq)
{
struct hns_roce_dev *hr_dev = eq->hr_dev;
- __le32 doorbell[2];
-
- doorbell[0] = 0;
- doorbell[1] = 0;
+ __le32 doorbell[2] = {};
if (eq->type_flag == HNS_ROCE_AEQ) {
roce_set_field(doorbell[0], HNS_ROCE_V2_EQ_DB_CMD_M,
@@ -6047,7 +6036,7 @@ static void hns_roce_v2_write_srqc(struct hns_roce_dev *hr_dev,
hr_dev->caps.srqwqe_hop_num));
roce_set_field(srq_context->byte_4_srqn_srqst,
SRQC_BYTE_4_SRQ_SHIFT_M, SRQC_BYTE_4_SRQ_SHIFT_S,
- ilog2(srq->max));
+ ilog2(srq->wqe_cnt));
roce_set_field(srq_context->byte_4_srqn_srqst, SRQC_BYTE_4_SRQN_M,
SRQC_BYTE_4_SRQN_S, srq->srqn);
@@ -6092,11 +6081,11 @@ static void hns_roce_v2_write_srqc(struct hns_roce_dev *hr_dev,
roce_set_field(srq_context->byte_44_idxbufpgsz_addr,
SRQC_BYTE_44_SRQ_IDX_BA_PG_SZ_M,
SRQC_BYTE_44_SRQ_IDX_BA_PG_SZ_S,
- hr_dev->caps.idx_ba_pg_sz);
+ hr_dev->caps.idx_ba_pg_sz + PG_SHIFT_OFFSET);
roce_set_field(srq_context->byte_44_idxbufpgsz_addr,
SRQC_BYTE_44_SRQ_IDX_BUF_PG_SZ_M,
SRQC_BYTE_44_SRQ_IDX_BUF_PG_SZ_S,
- hr_dev->caps.idx_buf_pg_sz);
+ hr_dev->caps.idx_buf_pg_sz + PG_SHIFT_OFFSET);
srq_context->idx_nxt_blk_addr =
cpu_to_le32(mtts_idx[1] >> PAGE_ADDR_SHIFT);
@@ -6133,7 +6122,7 @@ static int hns_roce_v2_modify_srq(struct ib_srq *ibsrq,
int ret;
if (srq_attr_mask & IB_SRQ_LIMIT) {
- if (srq_attr->srq_limit >= srq->max)
+ if (srq_attr->srq_limit >= srq->wqe_cnt)
return -EINVAL;
mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
@@ -6193,7 +6182,7 @@ static int hns_roce_v2_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
SRQC_BYTE_8_SRQ_LIMIT_WL_S);
attr->srq_limit = limit_wl;
- attr->max_wr = srq->max - 1;
+ attr->max_wr = srq->wqe_cnt - 1;
attr->max_sge = srq->max_gs;
memcpy(srq_context, mailbox->buf, sizeof(*srq_context));
@@ -6246,7 +6235,7 @@ static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq,
spin_lock_irqsave(&srq->lock, flags);
- ind = srq->head & (srq->max - 1);
+ ind = srq->head & (srq->wqe_cnt - 1);
for (nreq = 0; wr; ++nreq, wr = wr->next) {
if (unlikely(wr->num_sge > srq->max_gs)) {
@@ -6261,7 +6250,7 @@ static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq,
break;
}
- wqe_idx = find_empty_entry(&srq->idx_que, srq->max);
+ wqe_idx = find_empty_entry(&srq->idx_que, srq->wqe_cnt);
if (wqe_idx < 0) {
ret = -ENOMEM;
*bad_wr = wr;
@@ -6285,7 +6274,7 @@ static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq,
}
srq->wrid[wqe_idx] = wr->wr_id;
- ind = (ind + 1) & (srq->max - 1);
+ ind = (ind + 1) & (srq->wqe_cnt - 1);
}
if (likely(nreq)) {
@@ -6380,12 +6369,14 @@ static const struct pci_device_id hns_roce_hw_v2_pci_tbl[] = {
MODULE_DEVICE_TABLE(pci, hns_roce_hw_v2_pci_tbl);
-static int hns_roce_hw_v2_get_cfg(struct hns_roce_dev *hr_dev,
+static void hns_roce_hw_v2_get_cfg(struct hns_roce_dev *hr_dev,
struct hnae3_handle *handle)
{
struct hns_roce_v2_priv *priv = hr_dev->priv;
int i;
+ hr_dev->pci_dev = handle->pdev;
+ hr_dev->dev = &handle->pdev->dev;
hr_dev->hw = &hns_roce_hw_v2;
hr_dev->dfx = &hns_roce_dfx_hw_v2;
hr_dev->sdb_offset = ROCEE_DB_SQ_L_0_REG;
@@ -6410,8 +6401,6 @@ static int hns_roce_hw_v2_get_cfg(struct hns_roce_dev *hr_dev,
hr_dev->reset_cnt = handle->ae_algo->ops->ae_dev_reset_cnt(handle);
priv->handle = handle;
-
- return 0;
}
static int __hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
@@ -6429,14 +6418,7 @@ static int __hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
goto error_failed_kzalloc;
}
- hr_dev->pci_dev = handle->pdev;
- hr_dev->dev = &handle->pdev->dev;
-
- ret = hns_roce_hw_v2_get_cfg(hr_dev, handle);
- if (ret) {
- dev_err(hr_dev->dev, "Get Configuration failed!\n");
- goto error_failed_get_cfg;
- }
+ hns_roce_hw_v2_get_cfg(hr_dev, handle);
ret = hns_roce_init(hr_dev);
if (ret) {
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
index 43219d2f7de0..76a14db7028d 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
@@ -87,8 +87,8 @@
#define HNS_ROCE_V2_MTT_ENTRY_SZ 64
#define HNS_ROCE_V2_CQE_ENTRY_SIZE 32
#define HNS_ROCE_V2_SCCC_ENTRY_SZ 32
-#define HNS_ROCE_V2_QPC_TIMER_ENTRY_SZ 4096
-#define HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ 4096
+#define HNS_ROCE_V2_QPC_TIMER_ENTRY_SZ PAGE_SIZE
+#define HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ PAGE_SIZE
#define HNS_ROCE_V2_PAGE_SIZE_SUPPORTED 0xFFFFF000
#define HNS_ROCE_V2_MAX_INNER_MTPT_NUM 2
#define HNS_ROCE_INVALID_LKEY 0x100
diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
index b5d196c119ee..854ef6e74788 100644
--- a/drivers/infiniband/hw/hns/hns_roce_main.c
+++ b/drivers/infiniband/hw/hns/hns_roce_main.c
@@ -111,7 +111,7 @@ static int handle_en_event(struct hns_roce_dev *hr_dev, u8 port,
netdev = hr_dev->iboe.netdevs[port];
if (!netdev) {
- dev_err(dev, "port(%d) can't find netdev\n", port);
+ dev_err(dev, "Can't find netdev on port(%u)!\n", port);
return -ENODEV;
}
@@ -253,7 +253,7 @@ static int hns_roce_query_port(struct ib_device *ib_dev, u8 port_num,
net_dev = hr_dev->iboe.netdevs[port];
if (!net_dev) {
spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
- dev_err(dev, "find netdev %d failed!\r\n", port);
+ dev_err(dev, "Find netdev %u failed!\n", port);
return -EINVAL;
}
@@ -301,12 +301,6 @@ static int hns_roce_modify_device(struct ib_device *ib_dev, int mask,
return 0;
}
-static int hns_roce_modify_port(struct ib_device *ib_dev, u8 port_num, int mask,
- struct ib_port_modify *props)
-{
- return 0;
-}
-
static int hns_roce_alloc_ucontext(struct ib_ucontext *uctx,
struct ib_udata *udata)
{
@@ -359,7 +353,8 @@ static int hns_roce_mmap(struct ib_ucontext *context,
return rdma_user_mmap_io(context, vma,
to_hr_ucontext(context)->uar.pfn,
PAGE_SIZE,
- pgprot_noncached(vma->vm_page_prot));
+ pgprot_noncached(vma->vm_page_prot),
+ NULL);
/* vm_pgoff: 1 -- TPTR */
case 1:
@@ -372,7 +367,8 @@ static int hns_roce_mmap(struct ib_ucontext *context,
return rdma_user_mmap_io(context, vma,
hr_dev->tptr_dma_addr >> PAGE_SHIFT,
hr_dev->tptr_size,
- vma->vm_page_prot);
+ vma->vm_page_prot,
+ NULL);
default:
return -EINVAL;
@@ -423,14 +419,14 @@ static const struct ib_device_ops hns_roce_dev_ops = {
.alloc_pd = hns_roce_alloc_pd,
.alloc_ucontext = hns_roce_alloc_ucontext,
.create_ah = hns_roce_create_ah,
- .create_cq = hns_roce_ib_create_cq,
+ .create_cq = hns_roce_create_cq,
.create_qp = hns_roce_create_qp,
.dealloc_pd = hns_roce_dealloc_pd,
.dealloc_ucontext = hns_roce_dealloc_ucontext,
.del_gid = hns_roce_del_gid,
.dereg_mr = hns_roce_dereg_mr,
.destroy_ah = hns_roce_destroy_ah,
- .destroy_cq = hns_roce_ib_destroy_cq,
+ .destroy_cq = hns_roce_destroy_cq,
.disassociate_ucontext = hns_roce_disassociate_ucontext,
.fill_res_entry = hns_roce_fill_res_entry,
.get_dma_mr = hns_roce_get_dma_mr,
@@ -438,7 +434,6 @@ static const struct ib_device_ops hns_roce_dev_ops = {
.get_port_immutable = hns_roce_port_immutable,
.mmap = hns_roce_mmap,
.modify_device = hns_roce_modify_device,
- .modify_port = hns_roce_modify_port,
.modify_qp = hns_roce_modify_qp,
.query_ah = hns_roce_query_ah,
.query_device = hns_roce_query_device,
diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c
index 5f8416ba09a9..9ad19170c3f9 100644
--- a/drivers/infiniband/hw/hns/hns_roce_mr.c
+++ b/drivers/infiniband/hw/hns/hns_roce_mr.c
@@ -48,21 +48,21 @@ unsigned long key_to_hw_index(u32 key)
return (key << 24) | (key >> 8);
}
-static int hns_roce_sw2hw_mpt(struct hns_roce_dev *hr_dev,
- struct hns_roce_cmd_mailbox *mailbox,
- unsigned long mpt_index)
+static int hns_roce_hw_create_mpt(struct hns_roce_dev *hr_dev,
+ struct hns_roce_cmd_mailbox *mailbox,
+ unsigned long mpt_index)
{
return hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, mpt_index, 0,
- HNS_ROCE_CMD_SW2HW_MPT,
+ HNS_ROCE_CMD_CREATE_MPT,
HNS_ROCE_CMD_TIMEOUT_MSECS);
}
-int hns_roce_hw2sw_mpt(struct hns_roce_dev *hr_dev,
- struct hns_roce_cmd_mailbox *mailbox,
- unsigned long mpt_index)
+int hns_roce_hw_destroy_mpt(struct hns_roce_dev *hr_dev,
+ struct hns_roce_cmd_mailbox *mailbox,
+ unsigned long mpt_index)
{
return hns_roce_cmd_mbox(hr_dev, 0, mailbox ? mailbox->dma : 0,
- mpt_index, !mailbox, HNS_ROCE_CMD_HW2SW_MPT,
+ mpt_index, !mailbox, HNS_ROCE_CMD_DESTROY_MPT,
HNS_ROCE_CMD_TIMEOUT_MSECS);
}
@@ -83,7 +83,7 @@ static int hns_roce_buddy_alloc(struct hns_roce_buddy *buddy, int order,
}
}
spin_unlock(&buddy->lock);
- return -1;
+ return -EINVAL;
found:
clear_bit(*seg, buddy->bits[o]);
@@ -206,13 +206,14 @@ static int hns_roce_alloc_mtt_range(struct hns_roce_dev *hr_dev, int order,
}
ret = hns_roce_buddy_alloc(buddy, order, seg);
- if (ret == -1)
- return -1;
+ if (ret)
+ return ret;
- if (hns_roce_table_get_range(hr_dev, table, *seg,
- *seg + (1 << order) - 1)) {
+ ret = hns_roce_table_get_range(hr_dev, table, *seg,
+ *seg + (1 << order) - 1);
+ if (ret) {
hns_roce_buddy_free(buddy, *seg, order);
- return -1;
+ return ret;
}
return 0;
@@ -578,7 +579,7 @@ static int hns_roce_mr_alloc(struct hns_roce_dev *hr_dev, u32 pd, u64 iova,
/* Allocate a key for mr from mr_table */
ret = hns_roce_bitmap_alloc(&hr_dev->mr_table.mtpt_bitmap, &index);
- if (ret == -1)
+ if (ret)
return -ENOMEM;
mr->iova = iova; /* MR va starting addr */
@@ -707,10 +708,11 @@ static void hns_roce_mr_free(struct hns_roce_dev *hr_dev,
int ret;
if (mr->enabled) {
- ret = hns_roce_hw2sw_mpt(hr_dev, NULL, key_to_hw_index(mr->key)
- & (hr_dev->caps.num_mtpts - 1));
+ ret = hns_roce_hw_destroy_mpt(hr_dev, NULL,
+ key_to_hw_index(mr->key) &
+ (hr_dev->caps.num_mtpts - 1));
if (ret)
- dev_warn(dev, "HW2SW_MPT failed (%d)\n", ret);
+ dev_warn(dev, "DESTROY_MPT failed (%d)\n", ret);
}
if (mr->size != ~0ULL) {
@@ -763,10 +765,10 @@ static int hns_roce_mr_enable(struct hns_roce_dev *hr_dev,
goto err_page;
}
- ret = hns_roce_sw2hw_mpt(hr_dev, mailbox,
- mtpt_idx & (hr_dev->caps.num_mtpts - 1));
+ ret = hns_roce_hw_create_mpt(hr_dev, mailbox,
+ mtpt_idx & (hr_dev->caps.num_mtpts - 1));
if (ret) {
- dev_err(dev, "SW2HW_MPT failed (%d)\n", ret);
+ dev_err(dev, "CREATE_MPT failed (%d)\n", ret);
goto err_page;
}
@@ -1143,7 +1145,7 @@ struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
if (!mr)
return ERR_PTR(-ENOMEM);
- mr->umem = ib_umem_get(udata, start, length, access_flags, 0);
+ mr->umem = ib_umem_get(udata, start, length, access_flags);
if (IS_ERR(mr->umem)) {
ret = PTR_ERR(mr->umem);
goto err_free;
@@ -1228,7 +1230,7 @@ static int rereg_mr_trans(struct ib_mr *ibmr, int flags,
}
ib_umem_release(mr->umem);
- mr->umem = ib_umem_get(udata, start, length, mr_access_flags, 0);
+ mr->umem = ib_umem_get(udata, start, length, mr_access_flags);
if (IS_ERR(mr->umem)) {
ret = PTR_ERR(mr->umem);
mr->umem = NULL;
@@ -1308,9 +1310,9 @@ int hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start, u64 length,
if (ret)
goto free_cmd_mbox;
- ret = hns_roce_hw2sw_mpt(hr_dev, NULL, mtpt_idx);
+ ret = hns_roce_hw_destroy_mpt(hr_dev, NULL, mtpt_idx);
if (ret)
- dev_warn(dev, "HW2SW_MPT failed (%d)\n", ret);
+ dev_warn(dev, "DESTROY_MPT failed (%d)\n", ret);
mr->enabled = 0;
@@ -1332,9 +1334,9 @@ int hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start, u64 length,
goto free_cmd_mbox;
}
- ret = hns_roce_sw2hw_mpt(hr_dev, mailbox, mtpt_idx);
+ ret = hns_roce_hw_create_mpt(hr_dev, mailbox, mtpt_idx);
if (ret) {
- dev_err(dev, "SW2HW_MPT failed (%d)\n", ret);
+ dev_err(dev, "CREATE_MPT failed (%d)\n", ret);
ib_umem_release(mr->umem);
goto free_cmd_mbox;
}
@@ -1448,10 +1450,11 @@ static void hns_roce_mw_free(struct hns_roce_dev *hr_dev,
int ret;
if (mw->enabled) {
- ret = hns_roce_hw2sw_mpt(hr_dev, NULL, key_to_hw_index(mw->rkey)
- & (hr_dev->caps.num_mtpts - 1));
+ ret = hns_roce_hw_destroy_mpt(hr_dev, NULL,
+ key_to_hw_index(mw->rkey) &
+ (hr_dev->caps.num_mtpts - 1));
if (ret)
- dev_warn(dev, "MW HW2SW_MPT failed (%d)\n", ret);
+ dev_warn(dev, "MW DESTROY_MPT failed (%d)\n", ret);
hns_roce_table_put(hr_dev, &hr_dev->mr_table.mtpt_table,
key_to_hw_index(mw->rkey));
@@ -1487,10 +1490,10 @@ static int hns_roce_mw_enable(struct hns_roce_dev *hr_dev,
goto err_page;
}
- ret = hns_roce_sw2hw_mpt(hr_dev, mailbox,
- mtpt_idx & (hr_dev->caps.num_mtpts - 1));
+ ret = hns_roce_hw_create_mpt(hr_dev, mailbox,
+ mtpt_idx & (hr_dev->caps.num_mtpts - 1));
if (ret) {
- dev_err(dev, "MW sw2hw_mpt failed (%d)\n", ret);
+ dev_err(dev, "MW CREATE_MPT failed (%d)\n", ret);
goto err_page;
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_pd.c b/drivers/infiniband/hw/hns/hns_roce_pd.c
index 912b89b4da34..780c780fdb22 100644
--- a/drivers/infiniband/hw/hns/hns_roce_pd.c
+++ b/drivers/infiniband/hw/hns/hns_roce_pd.c
@@ -96,7 +96,7 @@ int hns_roce_uar_alloc(struct hns_roce_dev *hr_dev, struct hns_roce_uar *uar)
/* Using bitmap to manager UAR index */
ret = hns_roce_bitmap_alloc(&hr_dev->uar_table.bitmap, &uar->logic_idx);
- if (ret == -1)
+ if (ret)
return -ENOMEM;
if (uar->logic_idx > 0 && hr_dev->caps.phy_num_uars > 1)
diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
index bd78ff90d998..a6565b674801 100644
--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
+++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
@@ -318,7 +318,7 @@ static int hns_roce_set_rq_size(struct hns_roce_dev *hr_dev,
* hr_qp->rq.max_gs);
}
- cap->max_recv_wr = hr_qp->rq.max_post = hr_qp->rq.wqe_cnt;
+ cap->max_recv_wr = hr_qp->rq.wqe_cnt;
cap->max_recv_sge = hr_qp->rq.max_gs;
return 0;
@@ -332,9 +332,8 @@ static int check_sq_size_with_integrity(struct hns_roce_dev *hr_dev,
u8 max_sq_stride = ilog2(roundup_sq_stride);
/* Sanity check SQ size before proceeding */
- if ((u32)(1 << ucmd->log_sq_bb_count) > hr_dev->caps.max_wqes ||
- ucmd->log_sq_stride > max_sq_stride ||
- ucmd->log_sq_stride < HNS_ROCE_IB_MIN_SQ_STRIDE) {
+ if (ucmd->log_sq_stride > max_sq_stride ||
+ ucmd->log_sq_stride < HNS_ROCE_IB_MIN_SQ_STRIDE) {
ibdev_err(&hr_dev->ib_dev, "check SQ size error!\n");
return -EINVAL;
}
@@ -358,13 +357,16 @@ static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev,
u32 max_cnt;
int ret;
+ if (check_shl_overflow(1, ucmd->log_sq_bb_count, &hr_qp->sq.wqe_cnt) ||
+ hr_qp->sq.wqe_cnt > hr_dev->caps.max_wqes)
+ return -EINVAL;
+
ret = check_sq_size_with_integrity(hr_dev, cap, ucmd);
if (ret) {
ibdev_err(&hr_dev->ib_dev, "Sanity check sq size failed\n");
return ret;
}
- hr_qp->sq.wqe_cnt = 1 << ucmd->log_sq_bb_count;
hr_qp->sq.wqe_shift = ucmd->log_sq_stride;
max_cnt = max(1U, cap->max_send_sge);
@@ -391,37 +393,37 @@ static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev,
/* Get buf size, SQ and RQ are aligned to page_szie */
if (hr_dev->caps.max_sq_sg <= 2) {
- hr_qp->buff_size = HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt <<
+ hr_qp->buff_size = HNS_ROCE_ALIGN_UP((hr_qp->rq.wqe_cnt <<
hr_qp->rq.wqe_shift), PAGE_SIZE) +
- HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt <<
+ HNS_ROCE_ALIGN_UP((hr_qp->sq.wqe_cnt <<
hr_qp->sq.wqe_shift), PAGE_SIZE);
hr_qp->sq.offset = 0;
- hr_qp->rq.offset = HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt <<
+ hr_qp->rq.offset = HNS_ROCE_ALIGN_UP((hr_qp->sq.wqe_cnt <<
hr_qp->sq.wqe_shift), PAGE_SIZE);
} else {
page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
hr_qp->sge.sge_cnt = ex_sge_num ?
max(page_size / (1 << hr_qp->sge.sge_shift), ex_sge_num) : 0;
- hr_qp->buff_size = HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt <<
+ hr_qp->buff_size = HNS_ROCE_ALIGN_UP((hr_qp->rq.wqe_cnt <<
hr_qp->rq.wqe_shift), page_size) +
- HNS_ROCE_ALOGN_UP((hr_qp->sge.sge_cnt <<
+ HNS_ROCE_ALIGN_UP((hr_qp->sge.sge_cnt <<
hr_qp->sge.sge_shift), page_size) +
- HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt <<
+ HNS_ROCE_ALIGN_UP((hr_qp->sq.wqe_cnt <<
hr_qp->sq.wqe_shift), page_size);
hr_qp->sq.offset = 0;
if (ex_sge_num) {
- hr_qp->sge.offset = HNS_ROCE_ALOGN_UP(
+ hr_qp->sge.offset = HNS_ROCE_ALIGN_UP(
(hr_qp->sq.wqe_cnt <<
hr_qp->sq.wqe_shift),
page_size);
hr_qp->rq.offset = hr_qp->sge.offset +
- HNS_ROCE_ALOGN_UP((hr_qp->sge.sge_cnt <<
+ HNS_ROCE_ALIGN_UP((hr_qp->sge.sge_cnt <<
hr_qp->sge.sge_shift),
page_size);
} else {
- hr_qp->rq.offset = HNS_ROCE_ALOGN_UP(
+ hr_qp->rq.offset = HNS_ROCE_ALIGN_UP(
(hr_qp->sq.wqe_cnt <<
hr_qp->sq.wqe_shift),
page_size);
@@ -591,24 +593,24 @@ static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev,
/* Get buf size, SQ and RQ are aligned to PAGE_SIZE */
page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
hr_qp->sq.offset = 0;
- size = HNS_ROCE_ALOGN_UP(hr_qp->sq.wqe_cnt << hr_qp->sq.wqe_shift,
+ size = HNS_ROCE_ALIGN_UP(hr_qp->sq.wqe_cnt << hr_qp->sq.wqe_shift,
page_size);
if (hr_dev->caps.max_sq_sg > 2 && hr_qp->sge.sge_cnt) {
hr_qp->sge.sge_cnt = max(page_size/(1 << hr_qp->sge.sge_shift),
(u32)hr_qp->sge.sge_cnt);
hr_qp->sge.offset = size;
- size += HNS_ROCE_ALOGN_UP(hr_qp->sge.sge_cnt <<
+ size += HNS_ROCE_ALIGN_UP(hr_qp->sge.sge_cnt <<
hr_qp->sge.sge_shift, page_size);
}
hr_qp->rq.offset = size;
- size += HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt << hr_qp->rq.wqe_shift),
+ size += HNS_ROCE_ALIGN_UP((hr_qp->rq.wqe_cnt << hr_qp->rq.wqe_shift),
page_size);
hr_qp->buff_size = size;
/* Get wr and sge number which send */
- cap->max_send_wr = hr_qp->sq.max_post = hr_qp->sq.wqe_cnt;
+ cap->max_send_wr = hr_qp->sq.wqe_cnt;
cap->max_send_sge = hr_qp->sq.max_gs;
/* We don't support inline sends for kernel QPs (yet) */
@@ -743,7 +745,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
}
hr_qp->umem = ib_umem_get(udata, ucmd.buf_addr,
- hr_qp->buff_size, 0, 0);
+ hr_qp->buff_size, 0);
if (IS_ERR(hr_qp->umem)) {
dev_err(dev, "ib_umem_get error for create qp\n");
ret = PTR_ERR(hr_qp->umem);
@@ -1017,7 +1019,6 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
{
struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
struct ib_device *ibdev = &hr_dev->ib_dev;
- struct hns_roce_sqp *hr_sqp;
struct hns_roce_qp *hr_qp;
int ret;
@@ -1030,7 +1031,7 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata, 0,
hr_qp);
if (ret) {
- ibdev_err(ibdev, "Create RC QP 0x%06lx failed(%d)\n",
+ ibdev_err(ibdev, "Create QP 0x%06lx failed(%d)\n",
hr_qp->qpn, ret);
kfree(hr_qp);
return ERR_PTR(ret);
@@ -1047,11 +1048,10 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
return ERR_PTR(-EINVAL);
}
- hr_sqp = kzalloc(sizeof(*hr_sqp), GFP_KERNEL);
- if (!hr_sqp)
+ hr_qp = kzalloc(sizeof(*hr_qp), GFP_KERNEL);
+ if (!hr_qp)
return ERR_PTR(-ENOMEM);
- hr_qp = &hr_sqp->hr_qp;
hr_qp->port = init_attr->port_num - 1;
hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port];
@@ -1066,7 +1066,7 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
hr_qp->ibqp.qp_num, hr_qp);
if (ret) {
ibdev_err(ibdev, "Create GSI QP failed!\n");
- kfree(hr_sqp);
+ kfree(hr_qp);
return ERR_PTR(ret);
}
@@ -1289,7 +1289,7 @@ bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, int nreq,
u32 cur;
cur = hr_wq->head - hr_wq->tail;
- if (likely(cur + nreq < hr_wq->max_post))
+ if (likely(cur + nreq < hr_wq->wqe_cnt))
return false;
hr_cq = to_hr_cq(ib_cq);
@@ -1297,7 +1297,7 @@ bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, int nreq,
cur = hr_wq->head - hr_wq->tail;
spin_unlock(&hr_cq->lock);
- return cur + nreq >= hr_wq->max_post;
+ return cur + nreq >= hr_wq->wqe_cnt;
}
int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev)
diff --git a/drivers/infiniband/hw/hns/hns_roce_restrack.c b/drivers/infiniband/hw/hns/hns_roce_restrack.c
index 0a31d0a3d657..06871731ac43 100644
--- a/drivers/infiniband/hw/hns/hns_roce_restrack.c
+++ b/drivers/infiniband/hw/hns/hns_roce_restrack.c
@@ -98,11 +98,15 @@ static int hns_roce_fill_res_cq_entry(struct sk_buff *msg,
goto err;
table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER);
- if (!table_attr)
+ if (!table_attr) {
+ ret = -EMSGSIZE;
goto err;
+ }
- if (hns_roce_fill_cq(msg, context))
+ if (hns_roce_fill_cq(msg, context)) {
+ ret = -EMSGSIZE;
goto err_cancel_table;
+ }
nla_nest_end(msg, table_attr);
kfree(context);
@@ -113,7 +117,7 @@ err_cancel_table:
nla_nest_cancel(msg, table_attr);
err:
kfree(context);
- return -EMSGSIZE;
+ return ret;
}
int hns_roce_fill_res_entry(struct sk_buff *msg,
diff --git a/drivers/infiniband/hw/hns/hns_roce_srq.c b/drivers/infiniband/hw/hns/hns_roce_srq.c
index 43ea2c13b212..7113ebfdb4f0 100644
--- a/drivers/infiniband/hw/hns/hns_roce_srq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_srq.c
@@ -59,21 +59,21 @@ static void hns_roce_ib_srq_event(struct hns_roce_srq *srq,
}
}
-static int hns_roce_sw2hw_srq(struct hns_roce_dev *dev,
- struct hns_roce_cmd_mailbox *mailbox,
- unsigned long srq_num)
+static int hns_roce_hw_create_srq(struct hns_roce_dev *dev,
+ struct hns_roce_cmd_mailbox *mailbox,
+ unsigned long srq_num)
{
return hns_roce_cmd_mbox(dev, mailbox->dma, 0, srq_num, 0,
- HNS_ROCE_CMD_SW2HW_SRQ,
+ HNS_ROCE_CMD_CREATE_SRQ,
HNS_ROCE_CMD_TIMEOUT_MSECS);
}
-static int hns_roce_hw2sw_srq(struct hns_roce_dev *dev,
- struct hns_roce_cmd_mailbox *mailbox,
- unsigned long srq_num)
+static int hns_roce_hw_destroy_srq(struct hns_roce_dev *dev,
+ struct hns_roce_cmd_mailbox *mailbox,
+ unsigned long srq_num)
{
return hns_roce_cmd_mbox(dev, 0, mailbox ? mailbox->dma : 0, srq_num,
- mailbox ? 0 : 1, HNS_ROCE_CMD_HW2SW_SRQ,
+ mailbox ? 0 : 1, HNS_ROCE_CMD_DESTROY_SRQ,
HNS_ROCE_CMD_TIMEOUT_MSECS);
}
@@ -95,8 +95,7 @@ static int hns_roce_srq_alloc(struct hns_roce_dev *hr_dev, u32 pdn, u32 cqn,
srq->mtt.first_seg,
&dma_handle_wqe);
if (!mtts_wqe) {
- dev_err(hr_dev->dev,
- "SRQ alloc.Failed to find srq buf addr.\n");
+ dev_err(hr_dev->dev, "Failed to find mtt for srq buf.\n");
return -EINVAL;
}
@@ -106,13 +105,14 @@ static int hns_roce_srq_alloc(struct hns_roce_dev *hr_dev, u32 pdn, u32 cqn,
&dma_handle_idx);
if (!mtts_idx) {
dev_err(hr_dev->dev,
- "SRQ alloc.Failed to find idx que buf addr.\n");
+ "Failed to find mtt for srq idx queue buf.\n");
return -EINVAL;
}
ret = hns_roce_bitmap_alloc(&srq_table->bitmap, &srq->srqn);
- if (ret == -1) {
- dev_err(hr_dev->dev, "SRQ alloc.Failed to alloc index.\n");
+ if (ret) {
+ dev_err(hr_dev->dev,
+ "Failed to alloc a bit from srq bitmap.\n");
return -ENOMEM;
}
@@ -134,7 +134,7 @@ static int hns_roce_srq_alloc(struct hns_roce_dev *hr_dev, u32 pdn, u32 cqn,
mtts_wqe, mtts_idx, dma_handle_wqe,
dma_handle_idx);
- ret = hns_roce_sw2hw_srq(hr_dev, mailbox, srq->srqn);
+ ret = hns_roce_hw_create_srq(hr_dev, mailbox, srq->srqn);
hns_roce_free_cmd_mailbox(hr_dev, mailbox);
if (ret)
goto err_xa;
@@ -160,9 +160,9 @@ static void hns_roce_srq_free(struct hns_roce_dev *hr_dev,
struct hns_roce_srq_table *srq_table = &hr_dev->srq_table;
int ret;
- ret = hns_roce_hw2sw_srq(hr_dev, NULL, srq->srqn);
+ ret = hns_roce_hw_destroy_srq(hr_dev, NULL, srq->srqn);
if (ret)
- dev_err(hr_dev->dev, "HW2SW_SRQ failed (%d) for CQN %06lx\n",
+ dev_err(hr_dev->dev, "DESTROY_SRQ failed (%d) for SRQN %06lx\n",
ret, srq->srqn);
xa_erase(&srq_table->xa, srq->srqn);
@@ -180,22 +180,23 @@ static int create_user_srq(struct hns_roce_srq *srq, struct ib_udata *udata,
{
struct hns_roce_dev *hr_dev = to_hr_dev(srq->ibsrq.device);
struct hns_roce_ib_create_srq ucmd;
- u32 page_shift;
- u32 npages;
+ struct hns_roce_buf *buf;
int ret;
if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd)))
return -EFAULT;
- srq->umem = ib_umem_get(udata, ucmd.buf_addr, srq_buf_size, 0, 0);
+ srq->umem = ib_umem_get(udata, ucmd.buf_addr, srq_buf_size, 0);
if (IS_ERR(srq->umem))
return PTR_ERR(srq->umem);
- npages = (ib_umem_page_count(srq->umem) +
- (1 << hr_dev->caps.srqwqe_buf_pg_sz) - 1) /
- (1 << hr_dev->caps.srqwqe_buf_pg_sz);
- page_shift = PAGE_SHIFT + hr_dev->caps.srqwqe_buf_pg_sz;
- ret = hns_roce_mtt_init(hr_dev, npages, page_shift, &srq->mtt);
+ buf = &srq->buf;
+ buf->npages = (ib_umem_page_count(srq->umem) +
+ (1 << hr_dev->caps.srqwqe_buf_pg_sz) - 1) /
+ (1 << hr_dev->caps.srqwqe_buf_pg_sz);
+ buf->page_shift = PAGE_SHIFT + hr_dev->caps.srqwqe_buf_pg_sz;
+ ret = hns_roce_mtt_init(hr_dev, buf->npages, buf->page_shift,
+ &srq->mtt);
if (ret)
goto err_user_buf;
@@ -205,16 +206,19 @@ static int create_user_srq(struct hns_roce_srq *srq, struct ib_udata *udata,
/* config index queue BA */
srq->idx_que.umem = ib_umem_get(udata, ucmd.que_addr,
- srq->idx_que.buf_size, 0, 0);
+ srq->idx_que.buf_size, 0);
if (IS_ERR(srq->idx_que.umem)) {
dev_err(hr_dev->dev, "ib_umem_get error for index queue\n");
ret = PTR_ERR(srq->idx_que.umem);
goto err_user_srq_mtt;
}
- ret = hns_roce_mtt_init(hr_dev, ib_umem_page_count(srq->idx_que.umem),
- PAGE_SHIFT, &srq->idx_que.mtt);
-
+ buf = &srq->idx_que.idx_buf;
+ buf->npages = DIV_ROUND_UP(ib_umem_page_count(srq->idx_que.umem),
+ 1 << hr_dev->caps.idx_buf_pg_sz);
+ buf->page_shift = PAGE_SHIFT + hr_dev->caps.idx_buf_pg_sz;
+ ret = hns_roce_mtt_init(hr_dev, buf->npages, buf->page_shift,
+ &srq->idx_que.mtt);
if (ret) {
dev_err(hr_dev->dev, "hns_roce_mtt_init error for idx que\n");
goto err_user_idx_mtt;
@@ -251,7 +255,7 @@ static int hns_roce_create_idx_que(struct ib_pd *pd, struct hns_roce_srq *srq,
struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
struct hns_roce_idx_que *idx_que = &srq->idx_que;
- idx_que->bitmap = bitmap_zalloc(srq->max, GFP_KERNEL);
+ idx_que->bitmap = bitmap_zalloc(srq->wqe_cnt, GFP_KERNEL);
if (!idx_que->bitmap)
return -ENOMEM;
@@ -277,7 +281,7 @@ static int create_kernel_srq(struct hns_roce_srq *srq, int srq_buf_size)
return -ENOMEM;
srq->head = 0;
- srq->tail = srq->max - 1;
+ srq->tail = srq->wqe_cnt - 1;
ret = hns_roce_mtt_init(hr_dev, srq->buf.npages, srq->buf.page_shift,
&srq->mtt);
@@ -308,7 +312,7 @@ static int create_kernel_srq(struct hns_roce_srq *srq, int srq_buf_size)
if (ret)
goto err_kernel_idx_buf;
- srq->wrid = kvmalloc_array(srq->max, sizeof(u64), GFP_KERNEL);
+ srq->wrid = kvmalloc_array(srq->wqe_cnt, sizeof(u64), GFP_KERNEL);
if (!srq->wrid) {
ret = -ENOMEM;
goto err_kernel_idx_buf;
@@ -354,7 +358,7 @@ static void destroy_kernel_srq(struct hns_roce_dev *hr_dev,
}
int hns_roce_create_srq(struct ib_srq *ib_srq,
- struct ib_srq_init_attr *srq_init_attr,
+ struct ib_srq_init_attr *init_attr,
struct ib_udata *udata)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ib_srq->device);
@@ -366,24 +370,24 @@ int hns_roce_create_srq(struct ib_srq *ib_srq,
u32 cqn;
/* Check the actual SRQ wqe and SRQ sge num */
- if (srq_init_attr->attr.max_wr >= hr_dev->caps.max_srq_wrs ||
- srq_init_attr->attr.max_sge > hr_dev->caps.max_srq_sges)
+ if (init_attr->attr.max_wr >= hr_dev->caps.max_srq_wrs ||
+ init_attr->attr.max_sge > hr_dev->caps.max_srq_sges)
return -EINVAL;
mutex_init(&srq->mutex);
spin_lock_init(&srq->lock);
- srq->max = roundup_pow_of_two(srq_init_attr->attr.max_wr + 1);
- srq->max_gs = srq_init_attr->attr.max_sge;
+ srq->wqe_cnt = roundup_pow_of_two(init_attr->attr.max_wr + 1);
+ srq->max_gs = init_attr->attr.max_sge;
srq_desc_size = roundup_pow_of_two(max(16, 16 * srq->max_gs));
srq->wqe_shift = ilog2(srq_desc_size);
- srq_buf_size = srq->max * srq_desc_size;
+ srq_buf_size = srq->wqe_cnt * srq_desc_size;
srq->idx_que.entry_sz = HNS_ROCE_IDX_QUE_ENTRY_SZ;
- srq->idx_que.buf_size = srq->max * srq->idx_que.entry_sz;
+ srq->idx_que.buf_size = srq->wqe_cnt * srq->idx_que.entry_sz;
srq->mtt.mtt_type = MTT_TYPE_SRQWQE;
srq->idx_que.mtt.mtt_type = MTT_TYPE_IDX;
@@ -401,8 +405,8 @@ int hns_roce_create_srq(struct ib_srq *ib_srq,
}
}
- cqn = ib_srq_has_cq(srq_init_attr->srq_type) ?
- to_hr_cq(srq_init_attr->ext.cq)->cqn : 0;
+ cqn = ib_srq_has_cq(init_attr->srq_type) ?
+ to_hr_cq(init_attr->ext.cq)->cqn : 0;
srq->db_reg_l = hr_dev->reg_base + SRQ_DB_REG;
@@ -449,7 +453,7 @@ void hns_roce_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
hns_roce_mtt_cleanup(hr_dev, &srq->idx_que.mtt);
} else {
kvfree(srq->wrid);
- hns_roce_buf_free(hr_dev, srq->max << srq->wqe_shift,
+ hns_roce_buf_free(hr_dev, srq->wqe_cnt << srq->wqe_shift,
&srq->buf);
}
ib_umem_release(srq->idx_que.umem);
diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c
index 2d6a378e8560..bb78d3280acc 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_cm.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c
@@ -2079,9 +2079,9 @@ static int i40iw_addr_resolve_neigh_ipv6(struct i40iw_device *iwdev,
dst = i40iw_get_dst_ipv6(&src_addr, &dst_addr);
if (!dst || dst->error) {
if (dst) {
- dst_release(dst);
i40iw_pr_err("ip6_route_output returned dst->error = %d\n",
dst->error);
+ dst_release(dst);
}
return rc;
}
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
index cd9ee1664a69..86375947bc67 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
@@ -1763,7 +1763,7 @@ static struct ib_mr *i40iw_reg_user_mr(struct ib_pd *pd,
if (length > I40IW_MAX_MR_SIZE)
return ERR_PTR(-EINVAL);
- region = ib_umem_get(udata, start, length, acc, 0);
+ region = ib_umem_get(udata, start, length, acc);
if (IS_ERR(region))
return (struct ib_mr *)region;
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index a7d238d312f0..306b21281fa2 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -145,7 +145,7 @@ static int mlx4_ib_get_cq_umem(struct mlx4_ib_dev *dev, struct ib_udata *udata,
int n;
*umem = ib_umem_get(udata, buf_addr, cqe * cqe_size,
- IB_ACCESS_LOCAL_WRITE, 1);
+ IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(*umem))
return PTR_ERR(*umem);
diff --git a/drivers/infiniband/hw/mlx4/doorbell.c b/drivers/infiniband/hw/mlx4/doorbell.c
index 0f390351cef0..714f9df5bf39 100644
--- a/drivers/infiniband/hw/mlx4/doorbell.c
+++ b/drivers/infiniband/hw/mlx4/doorbell.c
@@ -64,7 +64,7 @@ int mlx4_ib_db_map_user(struct ib_udata *udata, unsigned long virt,
page->user_virt = (virt & PAGE_MASK);
page->refcnt = 0;
- page->umem = ib_umem_get(udata, virt & PAGE_MASK, PAGE_SIZE, 0, 0);
+ page->umem = ib_umem_get(udata, virt & PAGE_MASK, PAGE_SIZE, 0);
if (IS_ERR(page->umem)) {
err = PTR_ERR(page->umem);
kfree(page);
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index 57079110af9b..abe68708d6d6 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -966,7 +966,6 @@ static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
}
mutex_unlock(&dev->counters_table[port_num - 1].mutex);
if (stats_avail) {
- memset(out_mad->data, 0, sizeof out_mad->data);
switch (counter_stats.counter_mode & 0xf) {
case 0:
edit_counter(&counter_stats,
@@ -984,38 +983,31 @@ static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
const struct ib_wc *in_wc, const struct ib_grh *in_grh,
- const struct ib_mad_hdr *in, size_t in_mad_size,
- struct ib_mad_hdr *out, size_t *out_mad_size,
- u16 *out_mad_pkey_index)
+ const struct ib_mad *in, struct ib_mad *out,
+ size_t *out_mad_size, u16 *out_mad_pkey_index)
{
struct mlx4_ib_dev *dev = to_mdev(ibdev);
- const struct ib_mad *in_mad = (const struct ib_mad *)in;
- struct ib_mad *out_mad = (struct ib_mad *)out;
enum rdma_link_layer link = rdma_port_get_link_layer(ibdev, port_num);
- if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
- *out_mad_size != sizeof(*out_mad)))
- return IB_MAD_RESULT_FAILURE;
-
/* iboe_process_mad() which uses the HCA flow-counters to implement IB PMA
* queries, should be called only by VFs and for that specific purpose
*/
if (link == IB_LINK_LAYER_INFINIBAND) {
if (mlx4_is_slave(dev->dev) &&
- (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT &&
- (in_mad->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS ||
- in_mad->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS_EXT ||
- in_mad->mad_hdr.attr_id == IB_PMA_CLASS_PORT_INFO)))
- return iboe_process_mad(ibdev, mad_flags, port_num, in_wc,
- in_grh, in_mad, out_mad);
+ (in->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT &&
+ (in->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS ||
+ in->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS_EXT ||
+ in->mad_hdr.attr_id == IB_PMA_CLASS_PORT_INFO)))
+ return iboe_process_mad(ibdev, mad_flags, port_num,
+ in_wc, in_grh, in, out);
- return ib_process_mad(ibdev, mad_flags, port_num, in_wc,
- in_grh, in_mad, out_mad);
+ return ib_process_mad(ibdev, mad_flags, port_num, in_wc, in_grh,
+ in, out);
}
if (link == IB_LINK_LAYER_ETHERNET)
return iboe_process_mad(ibdev, mad_flags, port_num, in_wc,
- in_grh, in_mad, out_mad);
+ in_grh, in, out);
return -EINVAL;
}
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 8d2f1e38b891..0b5dc1d5928f 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -256,6 +256,8 @@ static int mlx4_ib_add_gid(const struct ib_gid_attr *attr, void **context)
int hw_update = 0;
int i;
struct gid_entry *gids = NULL;
+ u16 vlan_id = 0xffff;
+ u8 mac[ETH_ALEN];
if (!rdma_cap_roce_gid_table(attr->device, attr->port_num))
return -EINVAL;
@@ -266,12 +268,16 @@ static int mlx4_ib_add_gid(const struct ib_gid_attr *attr, void **context)
if (!context)
return -EINVAL;
+ ret = rdma_read_gid_l2_fields(attr, &vlan_id, &mac[0]);
+ if (ret)
+ return ret;
port_gid_table = &iboe->gids[attr->port_num - 1];
spin_lock_bh(&iboe->lock);
for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i) {
if (!memcmp(&port_gid_table->gids[i].gid,
&attr->gid, sizeof(attr->gid)) &&
- port_gid_table->gids[i].gid_type == attr->gid_type) {
+ port_gid_table->gids[i].gid_type == attr->gid_type &&
+ port_gid_table->gids[i].vlan_id == vlan_id) {
found = i;
break;
}
@@ -291,6 +297,7 @@ static int mlx4_ib_add_gid(const struct ib_gid_attr *attr, void **context)
memcpy(&port_gid_table->gids[free].gid,
&attr->gid, sizeof(attr->gid));
port_gid_table->gids[free].gid_type = attr->gid_type;
+ port_gid_table->gids[free].vlan_id = vlan_id;
port_gid_table->gids[free].ctx->real_index = free;
port_gid_table->gids[free].ctx->refcount = 1;
hw_update = 1;
@@ -1146,7 +1153,8 @@ static int mlx4_ib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
return rdma_user_mmap_io(context, vma,
to_mucontext(context)->uar.pfn,
PAGE_SIZE,
- pgprot_noncached(vma->vm_page_prot));
+ pgprot_noncached(vma->vm_page_prot),
+ NULL);
case 1:
if (dev->dev->caps.bf_reg_size == 0)
@@ -1155,7 +1163,8 @@ static int mlx4_ib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
context, vma,
to_mucontext(context)->uar.pfn +
dev->dev->caps.num_uars,
- PAGE_SIZE, pgprot_writecombine(vma->vm_page_prot));
+ PAGE_SIZE, pgprot_writecombine(vma->vm_page_prot),
+ NULL);
case 3: {
struct mlx4_clock_params params;
@@ -1171,7 +1180,8 @@ static int mlx4_ib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
params.bar) +
params.offset) >>
PAGE_SHIFT,
- PAGE_SIZE, pgprot_noncached(vma->vm_page_prot));
+ PAGE_SIZE, pgprot_noncached(vma->vm_page_prot),
+ NULL);
}
default:
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index eb53bb4c0c91..d188573187fa 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -508,6 +508,7 @@ struct gid_entry {
union ib_gid gid;
enum ib_gid_type gid_type;
struct gid_cache_context *ctx;
+ u16 vlan_id;
};
struct mlx4_port_gid_table {
@@ -786,11 +787,10 @@ int mlx4_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
int mlx4_MAD_IFC(struct mlx4_ib_dev *dev, int mad_ifc_flags,
int port, const struct ib_wc *in_wc, const struct ib_grh *in_grh,
const void *in_mad, void *response_mad);
-int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
+int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
const struct ib_wc *in_wc, const struct ib_grh *in_grh,
- const struct ib_mad_hdr *in, size_t in_mad_size,
- struct ib_mad_hdr *out, size_t *out_mad_size,
- u16 *out_mad_pkey_index);
+ const struct ib_mad *in, struct ib_mad *out,
+ size_t *out_mad_size, u16 *out_mad_pkey_index);
int mlx4_ib_mad_init(struct mlx4_ib_dev *dev);
void mlx4_ib_mad_cleanup(struct mlx4_ib_dev *dev);
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c
index 6ae503cfc526..dfa17bcdcdbc 100644
--- a/drivers/infiniband/hw/mlx4/mr.c
+++ b/drivers/infiniband/hw/mlx4/mr.c
@@ -398,7 +398,7 @@ static struct ib_umem *mlx4_get_umem_mr(struct ib_udata *udata, u64 start,
up_read(&current->mm->mmap_sem);
}
- return ib_umem_get(udata, start, length, access_flags, 0);
+ return ib_umem_get(udata, start, length, access_flags);
}
struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index bd4aa04416c6..85f57b76e446 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -916,7 +916,7 @@ static int create_rq(struct ib_pd *pd, struct ib_qp_init_attr *init_attr,
qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) +
(qp->sq.wqe_cnt << qp->sq.wqe_shift);
- qp->umem = ib_umem_get(udata, wq.buf_addr, qp->buf_size, 0, 0);
+ qp->umem = ib_umem_get(udata, wq.buf_addr, qp->buf_size, 0);
if (IS_ERR(qp->umem)) {
err = PTR_ERR(qp->umem);
goto err;
@@ -1110,8 +1110,7 @@ static int create_qp_common(struct ib_pd *pd, struct ib_qp_init_attr *init_attr,
if (err)
goto err;
- qp->umem =
- ib_umem_get(udata, ucmd.buf_addr, qp->buf_size, 0, 0);
+ qp->umem = ib_umem_get(udata, ucmd.buf_addr, qp->buf_size, 0);
if (IS_ERR(qp->umem)) {
err = PTR_ERR(qp->umem);
goto err;
diff --git a/drivers/infiniband/hw/mlx4/srq.c b/drivers/infiniband/hw/mlx4/srq.c
index 848db7264cc9..8dcf6e3d9ae2 100644
--- a/drivers/infiniband/hw/mlx4/srq.c
+++ b/drivers/infiniband/hw/mlx4/srq.c
@@ -110,7 +110,7 @@ int mlx4_ib_create_srq(struct ib_srq *ib_srq,
if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd)))
return -EFAULT;
- srq->umem = ib_umem_get(udata, ucmd.buf_addr, buf_size, 0, 0);
+ srq->umem = ib_umem_get(udata, ucmd.buf_addr, buf_size, 0);
if (IS_ERR(srq->umem))
return PTR_ERR(srq->umem);
diff --git a/drivers/infiniband/hw/mlx5/Makefile b/drivers/infiniband/hw/mlx5/Makefile
index 9924be8384d8..d0a043ccbe58 100644
--- a/drivers/infiniband/hw/mlx5/Makefile
+++ b/drivers/infiniband/hw/mlx5/Makefile
@@ -3,7 +3,7 @@ obj-$(CONFIG_MLX5_INFINIBAND) += mlx5_ib.o
mlx5_ib-y := main.o cq.o doorbell.o qp.o mem.o srq_cmd.o \
srq.o mr.o ah.o mad.o gsi.o ib_virt.o cmd.o \
- cong.o
+ cong.o restrack.o
mlx5_ib-$(CONFIG_INFINIBAND_ON_DEMAND_PAGING) += odp.o
mlx5_ib-$(CONFIG_MLX5_ESWITCH) += ib_rep.o
mlx5_ib-$(CONFIG_INFINIBAND_USER_ACCESS) += devx.o
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index 45f48cde6b9d..dd8d24ee8e1d 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -423,9 +423,6 @@ static int mlx5_poll_one(struct mlx5_ib_cq *cq,
struct mlx5_cqe64 *cqe64;
struct mlx5_core_qp *mqp;
struct mlx5_ib_wq *wq;
- struct mlx5_sig_err_cqe *sig_err_cqe;
- struct mlx5_core_mkey *mmkey;
- struct mlx5_ib_mr *mr;
uint8_t opcode;
uint32_t qpn;
u16 wqe_ctr;
@@ -519,27 +516,29 @@ repoll:
}
}
break;
- case MLX5_CQE_SIG_ERR:
- sig_err_cqe = (struct mlx5_sig_err_cqe *)cqe64;
+ case MLX5_CQE_SIG_ERR: {
+ struct mlx5_sig_err_cqe *sig_err_cqe =
+ (struct mlx5_sig_err_cqe *)cqe64;
+ struct mlx5_core_sig_ctx *sig;
- xa_lock(&dev->mdev->priv.mkey_table);
- mmkey = xa_load(&dev->mdev->priv.mkey_table,
+ xa_lock(&dev->sig_mrs);
+ sig = xa_load(&dev->sig_mrs,
mlx5_base_mkey(be32_to_cpu(sig_err_cqe->mkey)));
- mr = to_mibmr(mmkey);
- get_sig_err_item(sig_err_cqe, &mr->sig->err_item);
- mr->sig->sig_err_exists = true;
- mr->sig->sigerr_count++;
+ get_sig_err_item(sig_err_cqe, &sig->err_item);
+ sig->sig_err_exists = true;
+ sig->sigerr_count++;
mlx5_ib_warn(dev, "CQN: 0x%x Got SIGERR on key: 0x%x err_type %x err_offset %llx expected %x actual %x\n",
- cq->mcq.cqn, mr->sig->err_item.key,
- mr->sig->err_item.err_type,
- mr->sig->err_item.sig_err_offset,
- mr->sig->err_item.expected,
- mr->sig->err_item.actual);
+ cq->mcq.cqn, sig->err_item.key,
+ sig->err_item.err_type,
+ sig->err_item.sig_err_offset,
+ sig->err_item.expected,
+ sig->err_item.actual);
- xa_unlock(&dev->mdev->priv.mkey_table);
+ xa_unlock(&dev->sig_mrs);
goto repoll;
}
+ }
return 0;
}
@@ -710,7 +709,7 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
cq->buf.umem =
ib_umem_get(udata, ucmd.buf_addr, entries * ucmd.cqe_size,
- IB_ACCESS_LOCAL_WRITE, 1);
+ IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(cq->buf.umem)) {
err = PTR_ERR(cq->buf.umem);
return err;
@@ -1111,7 +1110,7 @@ static int resize_user(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
umem = ib_umem_get(udata, ucmd.buf_addr,
(size_t)ucmd.cqe_size * entries,
- IB_ACCESS_LOCAL_WRITE, 1);
+ IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(umem)) {
err = PTR_ERR(umem);
return err;
diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c
index d609f4659afb..9d0a18cf9e5e 100644
--- a/drivers/infiniband/hw/mlx5/devx.c
+++ b/drivers/infiniband/hw/mlx5/devx.c
@@ -100,6 +100,7 @@ struct devx_obj {
struct mlx5_ib_devx_mr devx_mr;
struct mlx5_core_dct core_dct;
struct mlx5_core_cq core_cq;
+ u32 flow_counter_bulk_size;
};
struct list_head event_sub; /* holds devx_event_subscription entries */
};
@@ -192,15 +193,20 @@ bool mlx5_ib_devx_is_flow_dest(void *obj, int *dest_id, int *dest_type)
}
}
-bool mlx5_ib_devx_is_flow_counter(void *obj, u32 *counter_id)
+bool mlx5_ib_devx_is_flow_counter(void *obj, u32 offset, u32 *counter_id)
{
struct devx_obj *devx_obj = obj;
u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, devx_obj->dinbox, opcode);
if (opcode == MLX5_CMD_OP_DEALLOC_FLOW_COUNTER) {
+
+ if (offset && offset >= devx_obj->flow_counter_bulk_size)
+ return false;
+
*counter_id = MLX5_GET(dealloc_flow_counter_in,
devx_obj->dinbox,
flow_counter_id);
+ *counter_id += offset;
return true;
}
@@ -1265,8 +1271,8 @@ static int devx_handle_mkey_indirect(struct devx_obj *obj,
mkey->pd = MLX5_GET(mkc, mkc, pd);
devx_mr->ndescs = MLX5_GET(mkc, mkc, translations_octword_size);
- return xa_err(xa_store(&dev->mdev->priv.mkey_table,
- mlx5_base_mkey(mkey->key), mkey, GFP_KERNEL));
+ return xa_err(xa_store(&dev->odp_mkeys, mlx5_base_mkey(mkey->key), mkey,
+ GFP_KERNEL));
}
static int devx_handle_mkey_create(struct mlx5_ib_dev *dev,
@@ -1345,9 +1351,9 @@ static int devx_obj_cleanup(struct ib_uobject *uobject,
* the mmkey, we must wait for that to stop before freeing the
* mkey, as another allocation could get the same mkey #.
*/
- xa_erase(&obj->ib_dev->mdev->priv.mkey_table,
+ xa_erase(&obj->ib_dev->odp_mkeys,
mlx5_base_mkey(obj->devx_mr.mmkey.key));
- synchronize_srcu(&dev->mr_srcu);
+ synchronize_srcu(&dev->odp_srcu);
}
if (obj->flags & DEVX_OBJ_FLAGS_DCT)
@@ -1463,6 +1469,13 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)(
if (err)
goto obj_free;
+ if (opcode == MLX5_CMD_OP_ALLOC_FLOW_COUNTER) {
+ u8 bulk = MLX5_GET(alloc_flow_counter_in,
+ cmd_in,
+ flow_counter_bulk);
+ obj->flow_counter_bulk_size = 128UL * bulk;
+ }
+
uobj->object = obj;
INIT_LIST_HEAD(&obj->event_sub);
obj->ib_dev = dev;
@@ -2121,7 +2134,7 @@ static int devx_umem_get(struct mlx5_ib_dev *dev, struct ib_ucontext *ucontext,
if (err)
return err;
- obj->umem = ib_umem_get(&attrs->driver_udata, addr, size, access, 0);
+ obj->umem = ib_umem_get(&attrs->driver_udata, addr, size, access);
if (IS_ERR(obj->umem))
return PTR_ERR(obj->umem);
diff --git a/drivers/infiniband/hw/mlx5/doorbell.c b/drivers/infiniband/hw/mlx5/doorbell.c
index 8f4e5f22b84c..12737c509aa2 100644
--- a/drivers/infiniband/hw/mlx5/doorbell.c
+++ b/drivers/infiniband/hw/mlx5/doorbell.c
@@ -64,7 +64,7 @@ int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context,
page->user_virt = (virt & PAGE_MASK);
page->refcnt = 0;
- page->umem = ib_umem_get(udata, virt & PAGE_MASK, PAGE_SIZE, 0, 0);
+ page->umem = ib_umem_get(udata, virt & PAGE_MASK, PAGE_SIZE, 0);
if (IS_ERR(page->umem)) {
err = PTR_ERR(page->umem);
kfree(page);
diff --git a/drivers/infiniband/hw/mlx5/flow.c b/drivers/infiniband/hw/mlx5/flow.c
index b198ff10cde9..dbee17d22d50 100644
--- a/drivers/infiniband/hw/mlx5/flow.c
+++ b/drivers/infiniband/hw/mlx5/flow.c
@@ -85,6 +85,8 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_CREATE_FLOW)(
struct mlx5_ib_dev *dev = mlx5_udata_to_mdev(&attrs->driver_udata);
int len, ret, i;
u32 counter_id = 0;
+ u32 *offset_attr;
+ u32 offset = 0;
if (!capable(CAP_NET_RAW))
return -EPERM;
@@ -151,8 +153,27 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_CREATE_FLOW)(
if (len) {
devx_obj = arr_flow_actions[0]->object;
- if (!mlx5_ib_devx_is_flow_counter(devx_obj, &counter_id))
+ if (uverbs_attr_is_valid(attrs,
+ MLX5_IB_ATTR_CREATE_FLOW_ARR_COUNTERS_DEVX_OFFSET)) {
+
+ int num_offsets = uverbs_attr_ptr_get_array_size(
+ attrs,
+ MLX5_IB_ATTR_CREATE_FLOW_ARR_COUNTERS_DEVX_OFFSET,
+ sizeof(u32));
+
+ if (num_offsets != 1)
+ return -EINVAL;
+
+ offset_attr = uverbs_attr_get_alloced_ptr(
+ attrs,
+ MLX5_IB_ATTR_CREATE_FLOW_ARR_COUNTERS_DEVX_OFFSET);
+ offset = *offset_attr;
+ }
+
+ if (!mlx5_ib_devx_is_flow_counter(devx_obj, offset,
+ &counter_id))
return -EINVAL;
+
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
}
@@ -598,7 +619,11 @@ DECLARE_UVERBS_NAMED_METHOD(
UVERBS_ATTR_IDRS_ARR(MLX5_IB_ATTR_CREATE_FLOW_ARR_COUNTERS_DEVX,
MLX5_IB_OBJECT_DEVX_OBJ,
UVERBS_ACCESS_READ, 1, 1,
- UA_OPTIONAL));
+ UA_OPTIONAL),
+ UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_CREATE_FLOW_ARR_COUNTERS_DEVX_OFFSET,
+ UVERBS_ATTR_MIN_SIZE(sizeof(u32)),
+ UA_OPTIONAL,
+ UA_ALLOC_AND_COPY));
DECLARE_UVERBS_NAMED_METHOD_DESTROY(
MLX5_IB_METHOD_DESTROY_FLOW,
diff --git a/drivers/infiniband/hw/mlx5/gsi.c b/drivers/infiniband/hw/mlx5/gsi.c
index 4950df3f71b6..ac4d8d1b9a07 100644
--- a/drivers/infiniband/hw/mlx5/gsi.c
+++ b/drivers/infiniband/hw/mlx5/gsi.c
@@ -263,7 +263,7 @@ static struct ib_qp *create_gsi_ud_qp(struct mlx5_ib_gsi_qp *gsi)
},
.sq_sig_type = gsi->sq_sig_type,
.qp_type = IB_QPT_UD,
- .create_flags = mlx5_ib_create_qp_sqpn_qp1(),
+ .create_flags = MLX5_IB_QP_CREATE_SQPN_QP1,
};
return ib_create_qp(pd, &init_attr);
diff --git a/drivers/infiniband/hw/mlx5/ib_virt.c b/drivers/infiniband/hw/mlx5/ib_virt.c
index 649a3364f838..4f0edd4832bd 100644
--- a/drivers/infiniband/hw/mlx5/ib_virt.c
+++ b/drivers/infiniband/hw/mlx5/ib_virt.c
@@ -201,3 +201,27 @@ int mlx5_ib_set_vf_guid(struct ib_device *device, int vf, u8 port,
return -EINVAL;
}
+
+int mlx5_ib_get_vf_guid(struct ib_device *device, int vf, u8 port,
+ struct ifla_vf_guid *node_guid,
+ struct ifla_vf_guid *port_guid)
+{
+ struct mlx5_ib_dev *dev = to_mdev(device);
+ struct mlx5_core_dev *mdev = dev->mdev;
+ struct mlx5_hca_vport_context *rep;
+ int err;
+
+ rep = kzalloc(sizeof(*rep), GFP_KERNEL);
+ if (!rep)
+ return -ENOMEM;
+
+ err = mlx5_query_hca_vport_context(mdev, 1, 1, vf+1, rep);
+ if (err)
+ goto ex;
+
+ port_guid->guid = rep->port_guid;
+ node_guid->guid = rep->node_guid;
+ex:
+ kfree(rep);
+ return err;
+}
diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c
index 348c1df69cdc..14e0c17de6a9 100644
--- a/drivers/infiniband/hw/mlx5/mad.c
+++ b/drivers/infiniband/hw/mlx5/mad.c
@@ -74,58 +74,6 @@ static int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey,
port);
}
-static int process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
- const struct ib_wc *in_wc, const struct ib_grh *in_grh,
- const struct ib_mad *in_mad, struct ib_mad *out_mad)
-{
- u16 slid;
- int err;
-
- slid = in_wc ? ib_lid_cpu16(in_wc->slid) : be16_to_cpu(IB_LID_PERMISSIVE);
-
- if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP && slid == 0)
- return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
-
- if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
- in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
- if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET &&
- in_mad->mad_hdr.method != IB_MGMT_METHOD_SET &&
- in_mad->mad_hdr.method != IB_MGMT_METHOD_TRAP_REPRESS)
- return IB_MAD_RESULT_SUCCESS;
-
- /* Don't process SMInfo queries -- the SMA can't handle them.
- */
- if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_SM_INFO)
- return IB_MAD_RESULT_SUCCESS;
- } else if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT ||
- in_mad->mad_hdr.mgmt_class == MLX5_IB_VENDOR_CLASS1 ||
- in_mad->mad_hdr.mgmt_class == MLX5_IB_VENDOR_CLASS2 ||
- in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_CONG_MGMT) {
- if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET &&
- in_mad->mad_hdr.method != IB_MGMT_METHOD_SET)
- return IB_MAD_RESULT_SUCCESS;
- } else {
- return IB_MAD_RESULT_SUCCESS;
- }
-
- err = mlx5_MAD_IFC(to_mdev(ibdev),
- mad_flags & IB_MAD_IGNORE_MKEY,
- mad_flags & IB_MAD_IGNORE_BKEY,
- port_num, in_wc, in_grh, in_mad, out_mad);
- if (err)
- return IB_MAD_RESULT_FAILURE;
-
- /* set return bit in status of directed route responses */
- if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
- out_mad->mad_hdr.status |= cpu_to_be16(1 << 15);
-
- if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS)
- /* no response for trap repress */
- return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
-
- return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
-}
-
static void pma_cnt_ext_assign(struct ib_pma_portcounters_ext *pma_cnt_ext,
void *out)
{
@@ -271,30 +219,66 @@ done:
int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
const struct ib_wc *in_wc, const struct ib_grh *in_grh,
- const struct ib_mad_hdr *in, size_t in_mad_size,
- struct ib_mad_hdr *out, size_t *out_mad_size,
- u16 *out_mad_pkey_index)
+ const struct ib_mad *in, struct ib_mad *out,
+ size_t *out_mad_size, u16 *out_mad_pkey_index)
{
struct mlx5_ib_dev *dev = to_mdev(ibdev);
- const struct ib_mad *in_mad = (const struct ib_mad *)in;
- struct ib_mad *out_mad = (struct ib_mad *)out;
- int ret;
+ u8 mgmt_class = in->mad_hdr.mgmt_class;
+ u8 method = in->mad_hdr.method;
+ u16 slid;
+ int err;
- if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
- *out_mad_size != sizeof(*out_mad)))
- return IB_MAD_RESULT_FAILURE;
+ slid = in_wc ? ib_lid_cpu16(in_wc->slid) :
+ be16_to_cpu(IB_LID_PERMISSIVE);
- memset(out_mad->data, 0, sizeof(out_mad->data));
+ if (method == IB_MGMT_METHOD_TRAP && !slid)
+ return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
- if (MLX5_CAP_GEN(dev->mdev, vport_counters) &&
- in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT &&
- in_mad->mad_hdr.method == IB_MGMT_METHOD_GET) {
- ret = process_pma_cmd(dev, port_num, in_mad, out_mad);
- } else {
- ret = process_mad(ibdev, mad_flags, port_num, in_wc, in_grh,
- in_mad, out_mad);
+ switch (mgmt_class) {
+ case IB_MGMT_CLASS_SUBN_LID_ROUTED:
+ case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE: {
+ if (method != IB_MGMT_METHOD_GET &&
+ method != IB_MGMT_METHOD_SET &&
+ method != IB_MGMT_METHOD_TRAP_REPRESS)
+ return IB_MAD_RESULT_SUCCESS;
+
+ /* Don't process SMInfo queries -- the SMA can't handle them.
+ */
+ if (in->mad_hdr.attr_id == IB_SMP_ATTR_SM_INFO)
+ return IB_MAD_RESULT_SUCCESS;
+ } break;
+ case IB_MGMT_CLASS_PERF_MGMT:
+ if (MLX5_CAP_GEN(dev->mdev, vport_counters) &&
+ method == IB_MGMT_METHOD_GET)
+ return process_pma_cmd(dev, port_num, in, out);
+ /* fallthrough */
+ case MLX5_IB_VENDOR_CLASS1:
+ /* fallthrough */
+ case MLX5_IB_VENDOR_CLASS2:
+ case IB_MGMT_CLASS_CONG_MGMT: {
+ if (method != IB_MGMT_METHOD_GET &&
+ method != IB_MGMT_METHOD_SET)
+ return IB_MAD_RESULT_SUCCESS;
+ } break;
+ default:
+ return IB_MAD_RESULT_SUCCESS;
}
- return ret;
+
+ err = mlx5_MAD_IFC(to_mdev(ibdev), mad_flags & IB_MAD_IGNORE_MKEY,
+ mad_flags & IB_MAD_IGNORE_BKEY, port_num, in_wc,
+ in_grh, in, out);
+ if (err)
+ return IB_MAD_RESULT_FAILURE;
+
+ /* set return bit in status of directed route responses */
+ if (mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
+ out->mad_hdr.status |= cpu_to_be16(1 << 15);
+
+ if (method == IB_MGMT_METHOD_TRAP_REPRESS)
+ /* no response for trap repress */
+ return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
+
+ return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
}
int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port)
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 46ea4f0b9b51..51100350b688 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -67,6 +67,7 @@
#include <rdma/uverbs_std_types.h>
#include <rdma/mlx5_user_ioctl_verbs.h>
#include <rdma/mlx5_user_ioctl_cmds.h>
+#include <rdma/ib_umem_odp.h>
#define UVERBS_MODULE_NAME mlx5_ib
#include <rdma/uverbs_named_ioctl.h>
@@ -693,21 +694,6 @@ static void get_atomic_caps_qp(struct mlx5_ib_dev *dev,
get_atomic_caps(dev, atomic_size_qp, props);
}
-static void get_atomic_caps_dc(struct mlx5_ib_dev *dev,
- struct ib_device_attr *props)
-{
- u8 atomic_size_qp = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_dc);
-
- get_atomic_caps(dev, atomic_size_qp, props);
-}
-
-bool mlx5_ib_dc_atomic_is_supported(struct mlx5_ib_dev *dev)
-{
- struct ib_device_attr props = {};
-
- get_atomic_caps_dc(dev, &props);
- return (props.atomic_cap == IB_ATOMIC_HCA) ? true : false;
-}
static int mlx5_query_system_image_guid(struct ib_device *ibdev,
__be64 *sys_image_guid)
{
@@ -844,8 +830,8 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
resp_len = sizeof(resp.comp_mask) + sizeof(resp.response_length);
if (uhw->outlen && uhw->outlen < resp_len)
return -EINVAL;
- else
- resp.response_length = resp_len;
+
+ resp.response_length = resp_len;
if (uhw->inlen && !ib_is_udata_cleared(uhw, 0, uhw->inlen))
return -EINVAL;
@@ -1011,6 +997,8 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
1 << MLX5_CAP_GEN(mdev, log_max_klm_list_size);
props->max_pi_fast_reg_page_list_len =
props->max_fast_reg_page_list_len / 2;
+ props->max_sgl_rd =
+ MLX5_CAP_GEN(mdev, max_sgl_for_optimized_performance);
get_atomic_caps_qp(dev, props);
props->masked_atomic_cap = IB_ATOMIC_NONE;
props->max_mcast_grp = 1 << MLX5_CAP_GEN(mdev, log_max_mcg);
@@ -1161,8 +1149,14 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES;
resp.striding_rq_caps.max_single_stride_log_num_of_bytes =
MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES;
- resp.striding_rq_caps.min_single_wqe_log_num_of_strides =
- MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES;
+ if (MLX5_CAP_GEN(dev->mdev, ext_stride_num_range))
+ resp.striding_rq_caps
+ .min_single_wqe_log_num_of_strides =
+ MLX5_EXT_MIN_SINGLE_WQE_LOG_NUM_STRIDES;
+ else
+ resp.striding_rq_caps
+ .min_single_wqe_log_num_of_strides =
+ MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES;
resp.striding_rq_caps.max_single_wqe_log_num_of_strides =
MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES;
resp.striding_rq_caps.supported_qpts =
@@ -1808,7 +1802,7 @@ static int mlx5_ib_alloc_ucontext(struct ib_ucontext *uctx,
return -EINVAL;
resp.qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp);
- if (mlx5_core_is_pf(dev->mdev) && MLX5_CAP_GEN(dev->mdev, bf))
+ if (dev->wc_support)
resp.bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size);
resp.cache_line_size = cache_line_size();
resp.max_sq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq);
@@ -2168,7 +2162,7 @@ static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
mlx5_ib_dbg(dev, "uar idx 0x%lx, pfn %pa\n", idx, &pfn);
err = rdma_user_mmap_io(&context->ibucontext, vma, pfn, PAGE_SIZE,
- prot);
+ prot, NULL);
if (err) {
mlx5_ib_err(dev,
"rdma_user_mmap_io failed with error=%d, mmap_cmd=%s\n",
@@ -2210,7 +2204,8 @@ static int dm_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
PAGE_SHIFT) +
page_idx;
return rdma_user_mmap_io(context, vma, pfn, map_size,
- pgprot_writecombine(vma->vm_page_prot));
+ pgprot_writecombine(vma->vm_page_prot),
+ NULL);
}
static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma)
@@ -2248,7 +2243,8 @@ static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vm
PAGE_SHIFT;
return rdma_user_mmap_io(&context->ibucontext, vma, pfn,
PAGE_SIZE,
- pgprot_noncached(vma->vm_page_prot));
+ pgprot_noncached(vma->vm_page_prot),
+ NULL);
case MLX5_IB_MMAP_CLOCK_INFO:
return mlx5_ib_mmap_clock_info_page(dev, vma, context);
@@ -5705,11 +5701,10 @@ static int mlx5_ib_rn_get_params(struct ib_device *device, u8 port_num,
static void delay_drop_debugfs_cleanup(struct mlx5_ib_dev *dev)
{
- if (!dev->delay_drop.dbg)
+ if (!dev->delay_drop.dir_debugfs)
return;
- debugfs_remove_recursive(dev->delay_drop.dbg->dir_debugfs);
- kfree(dev->delay_drop.dbg);
- dev->delay_drop.dbg = NULL;
+ debugfs_remove_recursive(dev->delay_drop.dir_debugfs);
+ dev->delay_drop.dir_debugfs = NULL;
}
static void cancel_delay_drop(struct mlx5_ib_dev *dev)
@@ -5760,52 +5755,22 @@ static const struct file_operations fops_delay_drop_timeout = {
.read = delay_drop_timeout_read,
};
-static int delay_drop_debugfs_init(struct mlx5_ib_dev *dev)
+static void delay_drop_debugfs_init(struct mlx5_ib_dev *dev)
{
- struct mlx5_ib_dbg_delay_drop *dbg;
+ struct dentry *root;
if (!mlx5_debugfs_root)
- return 0;
-
- dbg = kzalloc(sizeof(*dbg), GFP_KERNEL);
- if (!dbg)
- return -ENOMEM;
-
- dev->delay_drop.dbg = dbg;
-
- dbg->dir_debugfs =
- debugfs_create_dir("delay_drop",
- dev->mdev->priv.dbg_root);
- if (!dbg->dir_debugfs)
- goto out_debugfs;
-
- dbg->events_cnt_debugfs =
- debugfs_create_atomic_t("num_timeout_events", 0400,
- dbg->dir_debugfs,
- &dev->delay_drop.events_cnt);
- if (!dbg->events_cnt_debugfs)
- goto out_debugfs;
-
- dbg->rqs_cnt_debugfs =
- debugfs_create_atomic_t("num_rqs", 0400,
- dbg->dir_debugfs,
- &dev->delay_drop.rqs_cnt);
- if (!dbg->rqs_cnt_debugfs)
- goto out_debugfs;
-
- dbg->timeout_debugfs =
- debugfs_create_file("timeout", 0600,
- dbg->dir_debugfs,
- &dev->delay_drop,
- &fops_delay_drop_timeout);
- if (!dbg->timeout_debugfs)
- goto out_debugfs;
+ return;
- return 0;
+ root = debugfs_create_dir("delay_drop", dev->mdev->priv.dbg_root);
+ dev->delay_drop.dir_debugfs = root;
-out_debugfs:
- delay_drop_debugfs_cleanup(dev);
- return -ENOMEM;
+ debugfs_create_atomic_t("num_timeout_events", 0400, root,
+ &dev->delay_drop.events_cnt);
+ debugfs_create_atomic_t("num_rqs", 0400, root,
+ &dev->delay_drop.rqs_cnt);
+ debugfs_create_file("timeout", 0600, root, &dev->delay_drop,
+ &fops_delay_drop_timeout);
}
static void init_delay_drop(struct mlx5_ib_dev *dev)
@@ -5821,8 +5786,7 @@ static void init_delay_drop(struct mlx5_ib_dev *dev)
atomic_set(&dev->delay_drop.rqs_cnt, 0);
atomic_set(&dev->delay_drop.events_cnt, 0);
- if (delay_drop_debugfs_init(dev))
- mlx5_ib_warn(dev, "Failed to init delay drop debugfs\n");
+ delay_drop_debugfs_init(dev);
}
static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev,
@@ -6140,11 +6104,10 @@ static struct ib_counters *mlx5_ib_create_counters(struct ib_device *device,
static void mlx5_ib_stage_init_cleanup(struct mlx5_ib_dev *dev)
{
mlx5_ib_cleanup_multiport_master(dev);
- if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
- srcu_barrier(&dev->mr_srcu);
- cleanup_srcu_struct(&dev->mr_srcu);
- }
+ WARN_ON(!xa_empty(&dev->odp_mkeys));
+ cleanup_srcu_struct(&dev->odp_srcu);
+ WARN_ON(!xa_empty(&dev->sig_mrs));
WARN_ON(!bitmap_empty(dev->dm.memic_alloc_pages, MLX5_MAX_MEMIC_PAGES));
}
@@ -6196,15 +6159,15 @@ static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
mutex_init(&dev->cap_mask_mutex);
INIT_LIST_HEAD(&dev->qp_list);
spin_lock_init(&dev->reset_flow_resource_lock);
+ xa_init(&dev->odp_mkeys);
+ xa_init(&dev->sig_mrs);
spin_lock_init(&dev->dm.lock);
dev->dm.dev = mdev;
- if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
- err = init_srcu_struct(&dev->mr_srcu);
- if (err)
- goto err_mp;
- }
+ err = init_srcu_struct(&dev->odp_srcu);
+ if (err)
+ goto err_mp;
return 0;
@@ -6264,6 +6227,9 @@ static const struct ib_device_ops mlx5_ib_dev_ops = {
.disassociate_ucontext = mlx5_ib_disassociate_ucontext,
.drain_rq = mlx5_ib_drain_rq,
.drain_sq = mlx5_ib_drain_sq,
+ .enable_driver = mlx5_ib_enable_driver,
+ .fill_res_entry = mlx5_ib_fill_res_entry,
+ .fill_stat_entry = mlx5_ib_fill_stat_entry,
.get_dev_fw_str = get_dev_fw_str,
.get_dma_mr = mlx5_ib_get_dma_mr,
.get_link_layer = mlx5_ib_port_link_layer,
@@ -6310,6 +6276,7 @@ static const struct ib_device_ops mlx5_ib_dev_ipoib_enhanced_ops = {
static const struct ib_device_ops mlx5_ib_dev_sriov_ops = {
.get_vf_config = mlx5_ib_get_vf_config,
+ .get_vf_guid = mlx5_ib_get_vf_guid,
.get_vf_stats = mlx5_ib_get_vf_stats,
.set_vf_guid = mlx5_ib_set_vf_guid,
.set_vf_link_state = mlx5_ib_set_vf_link_state,
@@ -6705,6 +6672,18 @@ static void mlx5_ib_stage_devx_cleanup(struct mlx5_ib_dev *dev)
}
}
+int mlx5_ib_enable_driver(struct ib_device *dev)
+{
+ struct mlx5_ib_dev *mdev = to_mdev(dev);
+ int ret;
+
+ ret = mlx5_ib_test_wc(mdev);
+ mlx5_ib_dbg(mdev, "Write-Combining %s",
+ mdev->wc_support ? "supported" : "not supported");
+
+ return ret;
+}
+
void __mlx5_ib_remove(struct mlx5_ib_dev *dev,
const struct mlx5_ib_profile *profile,
int stage)
diff --git a/drivers/infiniband/hw/mlx5/mem.c b/drivers/infiniband/hw/mlx5/mem.c
index b5aece786b36..048f4e974a61 100644
--- a/drivers/infiniband/hw/mlx5/mem.c
+++ b/drivers/infiniband/hw/mlx5/mem.c
@@ -34,6 +34,7 @@
#include <rdma/ib_umem.h>
#include <rdma/ib_umem_odp.h>
#include "mlx5_ib.h"
+#include <linux/jiffies.h>
/* @umem: umem object to scan
* @addr: ib virtual address requested by the user
@@ -216,3 +217,201 @@ int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset)
*offset = buf_off >> ilog2(off_size);
return 0;
}
+
+#define WR_ID_BF 0xBF
+#define WR_ID_END 0xBAD
+#define TEST_WC_NUM_WQES 255
+#define TEST_WC_POLLING_MAX_TIME_JIFFIES msecs_to_jiffies(100)
+static int post_send_nop(struct mlx5_ib_dev *dev, struct ib_qp *ibqp, u64 wr_id,
+ bool signaled)
+{
+ struct mlx5_ib_qp *qp = to_mqp(ibqp);
+ struct mlx5_wqe_ctrl_seg *ctrl;
+ struct mlx5_bf *bf = &qp->bf;
+ __be32 mmio_wqe[16] = {};
+ unsigned long flags;
+ unsigned int idx;
+ int i;
+
+ if (unlikely(dev->mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR))
+ return -EIO;
+
+ spin_lock_irqsave(&qp->sq.lock, flags);
+
+ idx = qp->sq.cur_post & (qp->sq.wqe_cnt - 1);
+ ctrl = mlx5_frag_buf_get_wqe(&qp->sq.fbc, idx);
+
+ memset(ctrl, 0, sizeof(struct mlx5_wqe_ctrl_seg));
+ ctrl->fm_ce_se = signaled ? MLX5_WQE_CTRL_CQ_UPDATE : 0;
+ ctrl->opmod_idx_opcode =
+ cpu_to_be32(((u32)(qp->sq.cur_post) << 8) | MLX5_OPCODE_NOP);
+ ctrl->qpn_ds = cpu_to_be32((sizeof(struct mlx5_wqe_ctrl_seg) / 16) |
+ (qp->trans_qp.base.mqp.qpn << 8));
+
+ qp->sq.wrid[idx] = wr_id;
+ qp->sq.w_list[idx].opcode = MLX5_OPCODE_NOP;
+ qp->sq.wqe_head[idx] = qp->sq.head + 1;
+ qp->sq.cur_post += DIV_ROUND_UP(sizeof(struct mlx5_wqe_ctrl_seg),
+ MLX5_SEND_WQE_BB);
+ qp->sq.w_list[idx].next = qp->sq.cur_post;
+ qp->sq.head++;
+
+ memcpy(mmio_wqe, ctrl, sizeof(*ctrl));
+ ((struct mlx5_wqe_ctrl_seg *)&mmio_wqe)->fm_ce_se |=
+ MLX5_WQE_CTRL_CQ_UPDATE;
+
+ /* Make sure that descriptors are written before
+ * updating doorbell record and ringing the doorbell
+ */
+ wmb();
+
+ qp->db.db[MLX5_SND_DBR] = cpu_to_be32(qp->sq.cur_post);
+
+ /* Make sure doorbell record is visible to the HCA before
+ * we hit doorbell
+ */
+ wmb();
+ for (i = 0; i < 8; i++)
+ mlx5_write64(&mmio_wqe[i * 2],
+ bf->bfreg->map + bf->offset + i * 8);
+
+ bf->offset ^= bf->buf_size;
+
+ spin_unlock_irqrestore(&qp->sq.lock, flags);
+
+ return 0;
+}
+
+static int test_wc_poll_cq_result(struct mlx5_ib_dev *dev, struct ib_cq *cq)
+{
+ int ret;
+ struct ib_wc wc = {};
+ unsigned long end = jiffies + TEST_WC_POLLING_MAX_TIME_JIFFIES;
+
+ do {
+ ret = ib_poll_cq(cq, 1, &wc);
+ if (ret < 0 || wc.status)
+ return ret < 0 ? ret : -EINVAL;
+ if (ret)
+ break;
+ } while (!time_after(jiffies, end));
+
+ if (!ret)
+ return -ETIMEDOUT;
+
+ if (wc.wr_id != WR_ID_BF)
+ ret = 0;
+
+ return ret;
+}
+
+static int test_wc_do_send(struct mlx5_ib_dev *dev, struct ib_qp *qp)
+{
+ int err, i;
+
+ for (i = 0; i < TEST_WC_NUM_WQES; i++) {
+ err = post_send_nop(dev, qp, WR_ID_BF, false);
+ if (err)
+ return err;
+ }
+
+ return post_send_nop(dev, qp, WR_ID_END, true);
+}
+
+int mlx5_ib_test_wc(struct mlx5_ib_dev *dev)
+{
+ struct ib_cq_init_attr cq_attr = { .cqe = TEST_WC_NUM_WQES + 1 };
+ int port_type_cap = MLX5_CAP_GEN(dev->mdev, port_type);
+ struct ib_qp_init_attr qp_init_attr = {
+ .cap = { .max_send_wr = TEST_WC_NUM_WQES },
+ .qp_type = IB_QPT_UD,
+ .sq_sig_type = IB_SIGNAL_REQ_WR,
+ .create_flags = MLX5_IB_QP_CREATE_WC_TEST,
+ };
+ struct ib_qp_attr qp_attr = { .port_num = 1 };
+ struct ib_device *ibdev = &dev->ib_dev;
+ struct ib_qp *qp;
+ struct ib_cq *cq;
+ struct ib_pd *pd;
+ int ret;
+
+ if (!MLX5_CAP_GEN(dev->mdev, bf))
+ return 0;
+
+ if (!dev->mdev->roce.roce_en &&
+ port_type_cap == MLX5_CAP_PORT_TYPE_ETH) {
+ if (mlx5_core_is_pf(dev->mdev))
+ dev->wc_support = true;
+ return 0;
+ }
+
+ ret = mlx5_alloc_bfreg(dev->mdev, &dev->wc_bfreg, true, false);
+ if (ret)
+ goto print_err;
+
+ if (!dev->wc_bfreg.wc)
+ goto out1;
+
+ pd = ib_alloc_pd(ibdev, 0);
+ if (IS_ERR(pd)) {
+ ret = PTR_ERR(pd);
+ goto out1;
+ }
+
+ cq = ib_create_cq(ibdev, NULL, NULL, NULL, &cq_attr);
+ if (IS_ERR(cq)) {
+ ret = PTR_ERR(cq);
+ goto out2;
+ }
+
+ qp_init_attr.recv_cq = cq;
+ qp_init_attr.send_cq = cq;
+ qp = ib_create_qp(pd, &qp_init_attr);
+ if (IS_ERR(qp)) {
+ ret = PTR_ERR(qp);
+ goto out3;
+ }
+
+ qp_attr.qp_state = IB_QPS_INIT;
+ ret = ib_modify_qp(qp, &qp_attr,
+ IB_QP_STATE | IB_QP_PORT | IB_QP_PKEY_INDEX |
+ IB_QP_QKEY);
+ if (ret)
+ goto out4;
+
+ qp_attr.qp_state = IB_QPS_RTR;
+ ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE);
+ if (ret)
+ goto out4;
+
+ qp_attr.qp_state = IB_QPS_RTS;
+ ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE | IB_QP_SQ_PSN);
+ if (ret)
+ goto out4;
+
+ ret = test_wc_do_send(dev, qp);
+ if (ret < 0)
+ goto out4;
+
+ ret = test_wc_poll_cq_result(dev, cq);
+ if (ret > 0) {
+ dev->wc_support = true;
+ ret = 0;
+ }
+
+out4:
+ ib_destroy_qp(qp);
+out3:
+ ib_destroy_cq(cq);
+out2:
+ ib_dealloc_pd(pd);
+out1:
+ mlx5_free_bfreg(dev->mdev, &dev->wc_bfreg);
+print_err:
+ if (ret)
+ mlx5_ib_err(
+ dev,
+ "Error %d while trying to test write-combining support\n",
+ ret);
+ return ret;
+}
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index 1a98ee2e01c4..5986953ec2fa 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -247,12 +247,8 @@ struct mlx5_ib_flow_db {
* These flags are intended for internal use by the mlx5_ib driver, and they
* rely on the range reserved for that use in the ib_qp_create_flags enum.
*/
-
-/* Create a UD QP whose source QP number is 1 */
-static inline enum ib_qp_create_flags mlx5_ib_create_qp_sqpn_qp1(void)
-{
- return IB_QP_CREATE_RESERVED_START;
-}
+#define MLX5_IB_QP_CREATE_SQPN_QP1 IB_QP_CREATE_RESERVED_START
+#define MLX5_IB_QP_CREATE_WC_TEST (IB_QP_CREATE_RESERVED_START << 1)
struct wr_list {
u16 opcode;
@@ -295,6 +291,7 @@ enum mlx5_ib_wq_flags {
#define MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES 16
#define MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES 6
#define MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES 13
+#define MLX5_EXT_MIN_SINGLE_WQE_LOG_NUM_STRIDES 3
struct mlx5_ib_rwq {
struct ib_wq ibwq;
@@ -585,6 +582,9 @@ struct mlx5_ib_dm {
IB_ACCESS_REMOTE_READ |\
IB_ZERO_BASED)
+#define mlx5_update_odp_stats(mr, counter_name, value) \
+ atomic64_add(value, &((mr)->odp_stats.counter_name))
+
struct mlx5_ib_mr {
struct ib_mr ibmr;
void *descs;
@@ -606,7 +606,6 @@ struct mlx5_ib_mr {
struct mlx5_ib_dev *dev;
u32 out[MLX5_ST_SZ_DW(create_mkey_out)];
struct mlx5_core_sig_ctx *sig;
- unsigned int live;
void *descs_alloc;
int access_flags; /* Needed for rereg MR */
@@ -618,10 +617,18 @@ struct mlx5_ib_mr {
u64 data_iova;
u64 pi_iova;
- atomic_t num_leaf_free;
- wait_queue_head_t q_leaf_free;
+ /* For ODP and implicit */
+ atomic_t num_deferred_work;
+ struct xarray implicit_children;
+ union {
+ struct rcu_head rcu;
+ struct list_head elm;
+ struct work_struct work;
+ } odp_destroy;
+ struct ib_odp_counters odp_stats;
+ bool is_odp_implicit;
+
struct mlx5_async_work cb_work;
- atomic_t num_pending_prefetch;
};
static inline bool is_odp_mr(struct mlx5_ib_mr *mr)
@@ -792,13 +799,6 @@ enum {
MLX5_MAX_DELAY_DROP_TIMEOUT_MS = 100,
};
-struct mlx5_ib_dbg_delay_drop {
- struct dentry *dir_debugfs;
- struct dentry *rqs_cnt_debugfs;
- struct dentry *events_cnt_debugfs;
- struct dentry *timeout_debugfs;
-};
-
struct mlx5_ib_delay_drop {
struct mlx5_ib_dev *dev;
struct work_struct delay_drop_work;
@@ -808,7 +808,7 @@ struct mlx5_ib_delay_drop {
bool activate;
atomic_t events_cnt;
atomic_t rqs_cnt;
- struct mlx5_ib_dbg_delay_drop *dbg;
+ struct dentry *dir_debugfs;
};
enum mlx5_ib_stages {
@@ -957,7 +957,11 @@ struct mlx5_ib_dev {
/* serialize update of capability mask
*/
struct mutex cap_mask_mutex;
- bool ib_active;
+ u8 ib_active:1;
+ u8 fill_delay:1;
+ u8 is_rep:1;
+ u8 lag_active:1;
+ u8 wc_support:1;
struct umr_common umrc;
/* sync used page count stats
*/
@@ -966,7 +970,6 @@ struct mlx5_ib_dev {
struct timer_list delay_timer;
/* Prevents soft lock on massive reg MRs */
struct mutex slow_path_mutex;
- int fill_delay;
struct ib_odp_caps odp_caps;
u64 odp_max_size;
struct mlx5_ib_pf_eq odp_pf_eq;
@@ -975,7 +978,9 @@ struct mlx5_ib_dev {
* Sleepable RCU that prevents destruction of MRs while they are still
* being used by a page fault handler.
*/
- struct srcu_struct mr_srcu;
+ struct srcu_struct odp_srcu;
+ struct xarray odp_mkeys;
+
u32 null_mkey;
struct mlx5_ib_flow_db *flow_db;
/* protect resources needed as part of reset flow */
@@ -984,11 +989,10 @@ struct mlx5_ib_dev {
/* Array with num_ports elements */
struct mlx5_ib_port *port;
struct mlx5_sq_bfreg bfreg;
+ struct mlx5_sq_bfreg wc_bfreg;
struct mlx5_sq_bfreg fp_bfreg;
struct mlx5_ib_delay_drop delay_drop;
const struct mlx5_ib_profile *profile;
- bool is_rep;
- int lag_active;
struct mlx5_ib_lb_state lb;
u8 umr_fence;
@@ -999,6 +1003,8 @@ struct mlx5_ib_dev {
struct mlx5_srq_table srq_table;
struct mlx5_async_ctx async_ctx;
struct mlx5_devx_event_table devx_event_table;
+
+ struct xarray sig_mrs;
};
static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq)
@@ -1162,6 +1168,7 @@ struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,
struct ib_udata *udata,
int access_flags);
void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *mr);
+void mlx5_ib_fence_odp_mr(struct mlx5_ib_mr *mr);
int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
u64 length, u64 virt_addr, int access_flags,
struct ib_pd *pd, struct ib_udata *udata);
@@ -1179,9 +1186,8 @@ int mlx5_ib_map_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg,
unsigned int *meta_sg_offset);
int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
const struct ib_wc *in_wc, const struct ib_grh *in_grh,
- const struct ib_mad_hdr *in, size_t in_mad_size,
- struct ib_mad_hdr *out, size_t *out_mad_size,
- u16 *out_mad_pkey_index);
+ const struct ib_mad *in, struct ib_mad *out,
+ size_t *out_mad_size, u16 *out_mad_pkey_index);
struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev,
struct ib_udata *udata);
int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata);
@@ -1223,6 +1229,8 @@ int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev);
struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev, int entry);
void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
+int mlx5_mr_cache_invalidate(struct mlx5_ib_mr *mr);
+
int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
struct ib_mr_status *mr_status);
struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd,
@@ -1235,7 +1243,6 @@ struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device,
struct ib_rwq_ind_table_init_attr *init_attr,
struct ib_udata *udata);
int mlx5_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table);
-bool mlx5_ib_dc_atomic_is_supported(struct mlx5_ib_dev *dev);
struct ib_dm *mlx5_ib_alloc_dm(struct ib_device *ibdev,
struct ib_ucontext *context,
struct ib_dm_alloc_attr *attr,
@@ -1251,8 +1258,6 @@ int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev);
void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *ibdev);
int __init mlx5_ib_odp_init(void);
void mlx5_ib_odp_cleanup(void);
-void mlx5_ib_invalidate_range(struct ib_umem_odp *umem_odp, unsigned long start,
- unsigned long end);
void mlx5_odp_init_mr_cache_entry(struct mlx5_cache_ent *ent);
void mlx5_odp_populate_klm(struct mlx5_klm *pklm, size_t offset,
size_t nentries, struct mlx5_ib_mr *mr, int flags);
@@ -1282,11 +1287,10 @@ mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
{
return -EOPNOTSUPP;
}
-static inline void mlx5_ib_invalidate_range(struct ib_umem_odp *umem_odp,
- unsigned long start,
- unsigned long end){};
#endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
+extern const struct mmu_interval_notifier_ops mlx5_mn_ops;
+
/* Needed for rep profile */
void __mlx5_ib_remove(struct mlx5_ib_dev *dev,
const struct mlx5_ib_profile *profile,
@@ -1300,6 +1304,9 @@ int mlx5_ib_set_vf_link_state(struct ib_device *device, int vf,
u8 port, int state);
int mlx5_ib_get_vf_stats(struct ib_device *device, int vf,
u8 port, struct ifla_vf_stats *stats);
+int mlx5_ib_get_vf_guid(struct ib_device *device, int vf, u8 port,
+ struct ifla_vf_guid *node_guid,
+ struct ifla_vf_guid *port_guid);
int mlx5_ib_set_vf_guid(struct ib_device *device, int vf, u8 port,
u64 guid, int type);
@@ -1334,6 +1341,10 @@ struct mlx5_core_dev *mlx5_ib_get_native_port_mdev(struct mlx5_ib_dev *dev,
u8 *native_port_num);
void mlx5_ib_put_native_port_mdev(struct mlx5_ib_dev *dev,
u8 port_num);
+int mlx5_ib_fill_res_entry(struct sk_buff *msg,
+ struct rdma_restrack_entry *res);
+int mlx5_ib_fill_stat_entry(struct sk_buff *msg,
+ struct rdma_restrack_entry *res);
#if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS)
int mlx5_ib_devx_create(struct mlx5_ib_dev *dev, bool is_user);
@@ -1349,7 +1360,7 @@ struct mlx5_ib_flow_handler *mlx5_ib_raw_fs_rule_add(
struct mlx5_flow_act *flow_act, u32 counter_id,
void *cmd_in, int inlen, int dest_id, int dest_type);
bool mlx5_ib_devx_is_flow_dest(void *obj, int *dest_id, int *dest_type);
-bool mlx5_ib_devx_is_flow_counter(void *obj, u32 *counter_id);
+bool mlx5_ib_devx_is_flow_counter(void *obj, u32 offset, u32 *counter_id);
int mlx5_ib_get_flow_trees(const struct uverbs_object_tree_def **root);
void mlx5_ib_destroy_flow_action_raw(struct mlx5_ib_flow_action *maction);
#else
@@ -1491,4 +1502,7 @@ static inline bool mlx5_ib_can_use_umr(struct mlx5_ib_dev *dev,
return true;
}
+
+int mlx5_ib_enable_driver(struct ib_device *dev);
+int mlx5_ib_test_wc(struct mlx5_ib_dev *dev);
#endif /* MLX5_IB_H */
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 7019c12005f4..ea8bfc3e2d8d 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -50,7 +50,6 @@ enum {
static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
static int mr_cache_max_order(struct mlx5_ib_dev *dev);
-static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
static bool umr_can_use_indirect_mkey(struct mlx5_ib_dev *dev)
{
@@ -59,13 +58,9 @@ static bool umr_can_use_indirect_mkey(struct mlx5_ib_dev *dev)
static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
{
- int err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
+ WARN_ON(xa_load(&dev->odp_mkeys, mlx5_base_mkey(mr->mmkey.key)));
- if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING))
- /* Wait until all page fault handlers using the mr complete. */
- synchronize_srcu(&dev->mr_srcu);
-
- return err;
+ return mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
}
static int order2idx(struct mlx5_ib_dev *dev, int order)
@@ -94,8 +89,6 @@ static void reg_mr_callback(int status, struct mlx5_async_work *context)
struct mlx5_cache_ent *ent = &cache->ent[c];
u8 key;
unsigned long flags;
- struct xarray *mkeys = &dev->mdev->priv.mkey_table;
- int err;
spin_lock_irqsave(&ent->lock, flags);
ent->pending--;
@@ -122,13 +115,6 @@ static void reg_mr_callback(int status, struct mlx5_async_work *context)
ent->size++;
spin_unlock_irqrestore(&ent->lock, flags);
- xa_lock_irqsave(mkeys, flags);
- err = xa_err(__xa_store(mkeys, mlx5_base_mkey(mr->mmkey.key),
- &mr->mmkey, GFP_ATOMIC));
- xa_unlock_irqrestore(mkeys, flags);
- if (err)
- pr_err("Error inserting to mkey tree. 0x%x\n", -err);
-
if (!completion_done(&ent->compl))
complete(&ent->compl);
}
@@ -218,9 +204,6 @@ static void remove_keys(struct mlx5_ib_dev *dev, int c, int num)
mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
}
- if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING))
- synchronize_srcu(&dev->mr_srcu);
-
list_for_each_entry_safe(mr, tmp_mr, &del_list, list) {
list_del(&mr->list);
kfree(mr);
@@ -428,7 +411,7 @@ struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev, int entry)
if (entry < 0 || entry >= MAX_MR_CACHE_ENTRIES) {
mlx5_ib_err(dev, "cache entry %d is out of range\n", entry);
- return NULL;
+ return ERR_PTR(-EINVAL);
}
ent = &cache->ent[entry];
@@ -511,7 +494,7 @@ void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
c = order2idx(dev, mr->order);
WARN_ON(c < 0 || c >= MAX_MR_CACHE_ENTRIES);
- if (unreg_umr(dev, mr)) {
+ if (mlx5_mr_cache_invalidate(mr)) {
mr->allocated_from_cache = false;
destroy_mkey(dev, mr);
ent = &cache->ent[c];
@@ -555,10 +538,6 @@ static void clean_keys(struct mlx5_ib_dev *dev, int c)
mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
}
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
- synchronize_srcu(&dev->mr_srcu);
-#endif
-
list_for_each_entry_safe(mr, tmp_mr, &del_list, list) {
list_del(&mr->list);
kfree(mr);
@@ -679,6 +658,20 @@ int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
return 0;
}
+static void set_mkc_access_pd_addr_fields(void *mkc, int acc, u64 start_addr,
+ struct ib_pd *pd)
+{
+ MLX5_SET(mkc, mkc, a, !!(acc & IB_ACCESS_REMOTE_ATOMIC));
+ MLX5_SET(mkc, mkc, rw, !!(acc & IB_ACCESS_REMOTE_WRITE));
+ MLX5_SET(mkc, mkc, rr, !!(acc & IB_ACCESS_REMOTE_READ));
+ MLX5_SET(mkc, mkc, lw, !!(acc & IB_ACCESS_LOCAL_WRITE));
+ MLX5_SET(mkc, mkc, lr, 1);
+
+ MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
+ MLX5_SET(mkc, mkc, qpn, 0xffffff);
+ MLX5_SET64(mkc, mkc, start_addr, start_addr);
+}
+
struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc)
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
@@ -702,16 +695,8 @@ struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc)
mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_PA);
- MLX5_SET(mkc, mkc, a, !!(acc & IB_ACCESS_REMOTE_ATOMIC));
- MLX5_SET(mkc, mkc, rw, !!(acc & IB_ACCESS_REMOTE_WRITE));
- MLX5_SET(mkc, mkc, rr, !!(acc & IB_ACCESS_REMOTE_READ));
- MLX5_SET(mkc, mkc, lw, !!(acc & IB_ACCESS_LOCAL_WRITE));
- MLX5_SET(mkc, mkc, lr, 1);
-
MLX5_SET(mkc, mkc, length64, 1);
- MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
- MLX5_SET(mkc, mkc, qpn, 0xffffff);
- MLX5_SET64(mkc, mkc, start_addr, 0);
+ set_mkc_access_pd_addr_fields(mkc, acc, 0, pd);
err = mlx5_core_create_mkey(mdev, &mr->mmkey, in, inlen);
if (err)
@@ -764,7 +749,8 @@ static int mr_umem_get(struct mlx5_ib_dev *dev, struct ib_udata *udata,
if (access_flags & IB_ACCESS_ON_DEMAND) {
struct ib_umem_odp *odp;
- odp = ib_umem_odp_get(udata, start, length, access_flags);
+ odp = ib_umem_odp_get(udata, start, length, access_flags,
+ &mlx5_mn_ops);
if (IS_ERR(odp)) {
mlx5_ib_dbg(dev, "umem get failed (%ld)\n",
PTR_ERR(odp));
@@ -779,7 +765,7 @@ static int mr_umem_get(struct mlx5_ib_dev *dev, struct ib_udata *udata,
if (order)
*order = ilog2(roundup_pow_of_two(*ncont));
} else {
- u = ib_umem_get(udata, start, length, access_flags, 0);
+ u = ib_umem_get(udata, start, length, access_flags);
if (IS_ERR(u)) {
mlx5_ib_dbg(dev, "umem get failed (%ld)\n", PTR_ERR(u));
return PTR_ERR(u);
@@ -1169,16 +1155,8 @@ static struct ib_mr *mlx5_ib_get_dm_mr(struct ib_pd *pd, u64 start_addr,
MLX5_SET(mkc, mkc, access_mode_1_0, mode & 0x3);
MLX5_SET(mkc, mkc, access_mode_4_2, (mode >> 2) & 0x7);
- MLX5_SET(mkc, mkc, a, !!(acc & IB_ACCESS_REMOTE_ATOMIC));
- MLX5_SET(mkc, mkc, rw, !!(acc & IB_ACCESS_REMOTE_WRITE));
- MLX5_SET(mkc, mkc, rr, !!(acc & IB_ACCESS_REMOTE_READ));
- MLX5_SET(mkc, mkc, lw, !!(acc & IB_ACCESS_LOCAL_WRITE));
- MLX5_SET(mkc, mkc, lr, 1);
-
MLX5_SET64(mkc, mkc, len, length);
- MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
- MLX5_SET(mkc, mkc, qpn, 0xffffff);
- MLX5_SET64(mkc, mkc, start_addr, start_addr);
+ set_mkc_access_pd_addr_fields(mkc, acc, start_addr, pd);
err = mlx5_core_create_mkey(mdev, &mr->mmkey, in, inlen);
if (err)
@@ -1337,10 +1315,15 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
if (is_odp_mr(mr)) {
to_ib_umem_odp(mr->umem)->private = mr;
- atomic_set(&mr->num_pending_prefetch, 0);
+ atomic_set(&mr->num_deferred_work, 0);
+ err = xa_err(xa_store(&dev->odp_mkeys,
+ mlx5_base_mkey(mr->mmkey.key), &mr->mmkey,
+ GFP_KERNEL));
+ if (err) {
+ dereg_mr(dev, mr);
+ return ERR_PTR(err);
+ }
}
- if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING))
- smp_store_release(&mr->live, 1);
return &mr->ibmr;
error:
@@ -1348,22 +1331,29 @@ error:
return ERR_PTR(err);
}
-static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
+/**
+ * mlx5_mr_cache_invalidate - Fence all DMA on the MR
+ * @mr: The MR to fence
+ *
+ * Upon return the NIC will not be doing any DMA to the pages under the MR,
+ * and any DMA inprogress will be completed. Failure of this function
+ * indicates the HW has failed catastrophically.
+ */
+int mlx5_mr_cache_invalidate(struct mlx5_ib_mr *mr)
{
- struct mlx5_core_dev *mdev = dev->mdev;
struct mlx5_umr_wr umrwr = {};
- if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
+ if (mr->dev->mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
return 0;
umrwr.wr.send_flags = MLX5_IB_SEND_UMR_DISABLE_MR |
MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS;
umrwr.wr.opcode = MLX5_IB_WR_UMR;
- umrwr.pd = dev->umrc.pd;
+ umrwr.pd = mr->dev->umrc.pd;
umrwr.mkey = mr->mmkey.key;
umrwr.ignore_free_state = 1;
- return mlx5_ib_post_send_wait(dev, &umrwr);
+ return mlx5_ib_post_send_wait(mr->dev, &umrwr);
}
static int rereg_umr(struct ib_pd *pd, struct mlx5_ib_mr *mr,
@@ -1447,7 +1437,7 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
* UMR can't be used - MKey needs to be replaced.
*/
if (mr->allocated_from_cache)
- err = unreg_umr(dev, mr);
+ err = mlx5_mr_cache_invalidate(mr);
else
err = destroy_mkey(dev, mr);
if (err)
@@ -1560,6 +1550,7 @@ static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
mr->sig->psv_wire.psv_idx))
mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
mr->sig->psv_wire.psv_idx);
+ xa_erase(&dev->sig_mrs, mlx5_base_mkey(mr->mmkey.key));
kfree(mr->sig);
mr->sig = NULL;
}
@@ -1575,54 +1566,20 @@ static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
int npages = mr->npages;
struct ib_umem *umem = mr->umem;
- if (is_odp_mr(mr)) {
- struct ib_umem_odp *umem_odp = to_ib_umem_odp(umem);
-
- /* Prevent new page faults and
- * prefetch requests from succeeding
- */
- WRITE_ONCE(mr->live, 0);
-
- /* Wait for all running page-fault handlers to finish. */
- synchronize_srcu(&dev->mr_srcu);
-
- /* dequeue pending prefetch requests for the mr */
- if (atomic_read(&mr->num_pending_prefetch))
- flush_workqueue(system_unbound_wq);
- WARN_ON(atomic_read(&mr->num_pending_prefetch));
-
- /* Destroy all page mappings */
- if (!umem_odp->is_implicit_odp)
- mlx5_ib_invalidate_range(umem_odp,
- ib_umem_start(umem_odp),
- ib_umem_end(umem_odp));
- else
- mlx5_ib_free_implicit_mr(mr);
- /*
- * We kill the umem before the MR for ODP,
- * so that there will not be any invalidations in
- * flight, looking at the *mr struct.
- */
- ib_umem_odp_release(umem_odp);
- atomic_sub(npages, &dev->mdev->priv.reg_pages);
-
- /* Avoid double-freeing the umem. */
- umem = NULL;
- }
+ /* Stop all DMA */
+ if (is_odp_mr(mr))
+ mlx5_ib_fence_odp_mr(mr);
+ else
+ clean_mr(dev, mr);
- clean_mr(dev, mr);
+ if (mr->allocated_from_cache)
+ mlx5_mr_cache_free(dev, mr);
+ else
+ kfree(mr);
- /*
- * We should unregister the DMA address from the HCA before
- * remove the DMA mapping.
- */
- mlx5_mr_cache_free(dev, mr);
ib_umem_release(umem);
- if (umem)
- atomic_sub(npages, &dev->mdev->priv.reg_pages);
+ atomic_sub(npages, &dev->mdev->priv.reg_pages);
- if (!mr->allocated_from_cache)
- kfree(mr);
}
int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
@@ -1634,6 +1591,11 @@ int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
dereg_mr(to_mdev(mmr->klm_mr->ibmr.device), mmr->klm_mr);
}
+ if (is_odp_mr(mmr) && to_ib_umem_odp(mmr->umem)->is_implicit_odp) {
+ mlx5_ib_free_implicit_mr(mmr);
+ return 0;
+ }
+
dereg_mr(to_mdev(ibmr->device), mmr);
return 0;
@@ -1797,8 +1759,15 @@ static int mlx5_alloc_integrity_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr,
if (err)
goto err_free_mtt_mr;
+ err = xa_err(xa_store(&dev->sig_mrs, mlx5_base_mkey(mr->mmkey.key),
+ mr->sig, GFP_KERNEL));
+ if (err)
+ goto err_free_descs;
return 0;
+err_free_descs:
+ destroy_mkey(dev, mr);
+ mlx5_free_priv_descs(mr);
err_free_mtt_mr:
dereg_mr(to_mdev(mr->mtt_mr->ibmr.device), mr->mtt_mr);
mr->mtt_mr = NULL;
@@ -1951,9 +1920,19 @@ struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
}
}
+ if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
+ err = xa_err(xa_store(&dev->odp_mkeys,
+ mlx5_base_mkey(mw->mmkey.key), &mw->mmkey,
+ GFP_KERNEL));
+ if (err)
+ goto free_mkey;
+ }
+
kfree(in);
return &mw->ibmw;
+free_mkey:
+ mlx5_core_destroy_mkey(dev->mdev, &mw->mmkey);
free:
kfree(mw);
kfree(in);
@@ -1967,13 +1946,12 @@ int mlx5_ib_dealloc_mw(struct ib_mw *mw)
int err;
if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
- xa_erase_irq(&dev->mdev->priv.mkey_table,
- mlx5_base_mkey(mmw->mmkey.key));
+ xa_erase(&dev->odp_mkeys, mlx5_base_mkey(mmw->mmkey.key));
/*
* pagefault_single_data_segment() may be accessing mmw under
* SRCU if the user bound an ODP MR to this MW.
*/
- synchronize_srcu(&dev->mr_srcu);
+ synchronize_srcu(&dev->odp_srcu);
}
err = mlx5_core_destroy_mkey(dev->mdev, &mmw->mmkey);
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index 3f9478d19376..f924250f80c2 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -93,182 +93,185 @@ struct mlx5_pagefault {
static u64 mlx5_imr_ksm_entries;
-static int check_parent(struct ib_umem_odp *odp,
- struct mlx5_ib_mr *parent)
+void mlx5_odp_populate_klm(struct mlx5_klm *pklm, size_t idx, size_t nentries,
+ struct mlx5_ib_mr *imr, int flags)
{
- struct mlx5_ib_mr *mr = odp->private;
-
- return mr && mr->parent == parent && !odp->dying;
-}
-
-static struct ib_ucontext_per_mm *mr_to_per_mm(struct mlx5_ib_mr *mr)
-{
- if (WARN_ON(!mr || !is_odp_mr(mr)))
- return NULL;
-
- return to_ib_umem_odp(mr->umem)->per_mm;
-}
-
-static struct ib_umem_odp *odp_next(struct ib_umem_odp *odp)
-{
- struct mlx5_ib_mr *mr = odp->private, *parent = mr->parent;
- struct ib_ucontext_per_mm *per_mm = odp->per_mm;
- struct rb_node *rb;
-
- down_read(&per_mm->umem_rwsem);
- while (1) {
- rb = rb_next(&odp->interval_tree.rb);
- if (!rb)
- goto not_found;
- odp = rb_entry(rb, struct ib_umem_odp, interval_tree.rb);
- if (check_parent(odp, parent))
- goto end;
- }
-not_found:
- odp = NULL;
-end:
- up_read(&per_mm->umem_rwsem);
- return odp;
-}
-
-static struct ib_umem_odp *odp_lookup(u64 start, u64 length,
- struct mlx5_ib_mr *parent)
-{
- struct ib_ucontext_per_mm *per_mm = mr_to_per_mm(parent);
- struct ib_umem_odp *odp;
- struct rb_node *rb;
-
- down_read(&per_mm->umem_rwsem);
- odp = rbt_ib_umem_lookup(&per_mm->umem_tree, start, length);
- if (!odp)
- goto end;
-
- while (1) {
- if (check_parent(odp, parent))
- goto end;
- rb = rb_next(&odp->interval_tree.rb);
- if (!rb)
- goto not_found;
- odp = rb_entry(rb, struct ib_umem_odp, interval_tree.rb);
- if (ib_umem_start(odp) > start + length)
- goto not_found;
- }
-not_found:
- odp = NULL;
-end:
- up_read(&per_mm->umem_rwsem);
- return odp;
-}
-
-void mlx5_odp_populate_klm(struct mlx5_klm *pklm, size_t offset,
- size_t nentries, struct mlx5_ib_mr *mr, int flags)
-{
- struct ib_pd *pd = mr->ibmr.pd;
- struct mlx5_ib_dev *dev = to_mdev(pd->device);
- struct ib_umem_odp *odp;
- unsigned long va;
- int i;
+ struct mlx5_klm *end = pklm + nentries;
if (flags & MLX5_IB_UPD_XLT_ZAP) {
- for (i = 0; i < nentries; i++, pklm++) {
+ for (; pklm != end; pklm++, idx++) {
pklm->bcount = cpu_to_be32(MLX5_IMR_MTT_SIZE);
- pklm->key = cpu_to_be32(dev->null_mkey);
+ pklm->key = cpu_to_be32(imr->dev->null_mkey);
pklm->va = 0;
}
return;
}
/*
- * The locking here is pretty subtle. Ideally the implicit children
- * list would be protected by the umem_mutex, however that is not
+ * The locking here is pretty subtle. Ideally the implicit_children
+ * xarray would be protected by the umem_mutex, however that is not
* possible. Instead this uses a weaker update-then-lock pattern:
*
* srcu_read_lock()
- * <change children list>
+ * xa_store()
* mutex_lock(umem_mutex)
* mlx5_ib_update_xlt()
* mutex_unlock(umem_mutex)
* destroy lkey
*
- * ie any change the children list must be followed by the locked
- * update_xlt before destroying.
+ * ie any change the xarray must be followed by the locked update_xlt
+ * before destroying.
*
* The umem_mutex provides the acquire/release semantic needed to make
- * the children list visible to a racing thread. While SRCU is not
+ * the xa_store() visible to a racing thread. While SRCU is not
* technically required, using it gives consistent use of the SRCU
- * locking around the children list.
+ * locking around the xarray.
*/
- lockdep_assert_held(&to_ib_umem_odp(mr->umem)->umem_mutex);
- lockdep_assert_held(&mr->dev->mr_srcu);
+ lockdep_assert_held(&to_ib_umem_odp(imr->umem)->umem_mutex);
+ lockdep_assert_held(&imr->dev->odp_srcu);
- odp = odp_lookup(offset * MLX5_IMR_MTT_SIZE,
- nentries * MLX5_IMR_MTT_SIZE, mr);
+ for (; pklm != end; pklm++, idx++) {
+ struct mlx5_ib_mr *mtt = xa_load(&imr->implicit_children, idx);
- for (i = 0; i < nentries; i++, pklm++) {
pklm->bcount = cpu_to_be32(MLX5_IMR_MTT_SIZE);
- va = (offset + i) * MLX5_IMR_MTT_SIZE;
- if (odp && ib_umem_start(odp) == va) {
- struct mlx5_ib_mr *mtt = odp->private;
-
+ if (mtt) {
pklm->key = cpu_to_be32(mtt->ibmr.lkey);
- odp = odp_next(odp);
+ pklm->va = cpu_to_be64(idx * MLX5_IMR_MTT_SIZE);
} else {
- pklm->key = cpu_to_be32(dev->null_mkey);
+ pklm->key = cpu_to_be32(imr->dev->null_mkey);
+ pklm->va = 0;
}
- mlx5_ib_dbg(dev, "[%d] va %lx key %x\n",
- i, va, be32_to_cpu(pklm->key));
}
}
-static void mr_leaf_free_action(struct work_struct *work)
+static void dma_fence_odp_mr(struct mlx5_ib_mr *mr)
+{
+ struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem);
+
+ /* Ensure mlx5_ib_invalidate_range() will not touch the MR any more */
+ mutex_lock(&odp->umem_mutex);
+ if (odp->npages) {
+ mlx5_mr_cache_invalidate(mr);
+ ib_umem_odp_unmap_dma_pages(odp, ib_umem_start(odp),
+ ib_umem_end(odp));
+ WARN_ON(odp->npages);
+ }
+ odp->private = NULL;
+ mutex_unlock(&odp->umem_mutex);
+
+ if (!mr->allocated_from_cache) {
+ mlx5_core_destroy_mkey(mr->dev->mdev, &mr->mmkey);
+ WARN_ON(mr->descs);
+ }
+}
+
+/*
+ * This must be called after the mr has been removed from implicit_children
+ * and the SRCU synchronized. NOTE: The MR does not necessarily have to be
+ * empty here, parallel page faults could have raced with the free process and
+ * added pages to it.
+ */
+static void free_implicit_child_mr(struct mlx5_ib_mr *mr, bool need_imr_xlt)
{
- struct ib_umem_odp *odp = container_of(work, struct ib_umem_odp, work);
- int idx = ib_umem_start(odp) >> MLX5_IMR_MTT_SHIFT;
- struct mlx5_ib_mr *mr = odp->private, *imr = mr->parent;
+ struct mlx5_ib_mr *imr = mr->parent;
struct ib_umem_odp *odp_imr = to_ib_umem_odp(imr->umem);
+ struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem);
+ unsigned long idx = ib_umem_start(odp) >> MLX5_IMR_MTT_SHIFT;
int srcu_key;
- mr->parent = NULL;
- synchronize_srcu(&mr->dev->mr_srcu);
+ /* implicit_child_mr's are not allowed to have deferred work */
+ WARN_ON(atomic_read(&mr->num_deferred_work));
- if (smp_load_acquire(&imr->live)) {
- srcu_key = srcu_read_lock(&mr->dev->mr_srcu);
+ if (need_imr_xlt) {
+ srcu_key = srcu_read_lock(&mr->dev->odp_srcu);
mutex_lock(&odp_imr->umem_mutex);
- mlx5_ib_update_xlt(imr, idx, 1, 0,
+ mlx5_ib_update_xlt(mr->parent, idx, 1, 0,
MLX5_IB_UPD_XLT_INDIRECT |
MLX5_IB_UPD_XLT_ATOMIC);
mutex_unlock(&odp_imr->umem_mutex);
- srcu_read_unlock(&mr->dev->mr_srcu, srcu_key);
+ srcu_read_unlock(&mr->dev->odp_srcu, srcu_key);
}
- ib_umem_odp_release(odp);
+
+ dma_fence_odp_mr(mr);
+
+ mr->parent = NULL;
mlx5_mr_cache_free(mr->dev, mr);
+ ib_umem_odp_release(odp);
+ atomic_dec(&imr->num_deferred_work);
+}
+
+static void free_implicit_child_mr_work(struct work_struct *work)
+{
+ struct mlx5_ib_mr *mr =
+ container_of(work, struct mlx5_ib_mr, odp_destroy.work);
- if (atomic_dec_and_test(&imr->num_leaf_free))
- wake_up(&imr->q_leaf_free);
+ free_implicit_child_mr(mr, true);
}
-void mlx5_ib_invalidate_range(struct ib_umem_odp *umem_odp, unsigned long start,
- unsigned long end)
+static void free_implicit_child_mr_rcu(struct rcu_head *head)
{
+ struct mlx5_ib_mr *mr =
+ container_of(head, struct mlx5_ib_mr, odp_destroy.rcu);
+
+ /* Freeing a MR is a sleeping operation, so bounce to a work queue */
+ INIT_WORK(&mr->odp_destroy.work, free_implicit_child_mr_work);
+ queue_work(system_unbound_wq, &mr->odp_destroy.work);
+}
+
+static void destroy_unused_implicit_child_mr(struct mlx5_ib_mr *mr)
+{
+ struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem);
+ unsigned long idx = ib_umem_start(odp) >> MLX5_IMR_MTT_SHIFT;
+ struct mlx5_ib_mr *imr = mr->parent;
+
+ xa_lock(&imr->implicit_children);
+ /*
+ * This can race with mlx5_ib_free_implicit_mr(), the first one to
+ * reach the xa lock wins the race and destroys the MR.
+ */
+ if (__xa_cmpxchg(&imr->implicit_children, idx, mr, NULL, GFP_ATOMIC) !=
+ mr)
+ goto out_unlock;
+
+ atomic_inc(&imr->num_deferred_work);
+ call_srcu(&mr->dev->odp_srcu, &mr->odp_destroy.rcu,
+ free_implicit_child_mr_rcu);
+
+out_unlock:
+ xa_unlock(&imr->implicit_children);
+}
+
+static bool mlx5_ib_invalidate_range(struct mmu_interval_notifier *mni,
+ const struct mmu_notifier_range *range,
+ unsigned long cur_seq)
+{
+ struct ib_umem_odp *umem_odp =
+ container_of(mni, struct ib_umem_odp, notifier);
struct mlx5_ib_mr *mr;
const u64 umr_block_mask = (MLX5_UMR_MTT_ALIGNMENT /
sizeof(struct mlx5_mtt)) - 1;
u64 idx = 0, blk_start_idx = 0;
+ u64 invalidations = 0;
+ unsigned long start;
+ unsigned long end;
int in_block = 0;
u64 addr;
- if (!umem_odp) {
- pr_err("invalidation called on NULL umem or non-ODP umem\n");
- return;
- }
+ if (!mmu_notifier_range_blockable(range))
+ return false;
+ mutex_lock(&umem_odp->umem_mutex);
+ mmu_interval_set_seq(mni, cur_seq);
+ /*
+ * If npages is zero then umem_odp->private may not be setup yet. This
+ * does not complete until after the first page is mapped for DMA.
+ */
+ if (!umem_odp->npages)
+ goto out;
mr = umem_odp->private;
- if (!mr || !mr->ibmr.pd)
- return;
-
- start = max_t(u64, ib_umem_start(umem_odp), start);
- end = min_t(u64, ib_umem_end(umem_odp), end);
+ start = max_t(u64, ib_umem_start(umem_odp), range->start);
+ end = min_t(u64, ib_umem_end(umem_odp), range->end);
/*
* Iteration one - zap the HW's MTTs. The notifiers_count ensures that
@@ -276,7 +279,6 @@ void mlx5_ib_invalidate_range(struct ib_umem_odp *umem_odp, unsigned long start,
* overwrite the same MTTs. Concurent invalidations might race us,
* but they will write 0s as well, so no difference in the end result.
*/
- mutex_lock(&umem_odp->umem_mutex);
for (addr = start; addr < end; addr += BIT(umem_odp->page_shift)) {
idx = (addr - ib_umem_start(umem_odp)) >> umem_odp->page_shift;
/*
@@ -291,6 +293,9 @@ void mlx5_ib_invalidate_range(struct ib_umem_odp *umem_odp, unsigned long start,
blk_start_idx = idx;
in_block = 1;
}
+
+ /* Count page invalidations */
+ invalidations += idx - blk_start_idx + 1;
} else {
u64 umr_offset = idx & umr_block_mask;
@@ -308,6 +313,9 @@ void mlx5_ib_invalidate_range(struct ib_umem_odp *umem_odp, unsigned long start,
idx - blk_start_idx + 1, 0,
MLX5_IB_UPD_XLT_ZAP |
MLX5_IB_UPD_XLT_ATOMIC);
+
+ mlx5_update_odp_stats(mr, invalidations, invalidations);
+
/*
* We are now sure that the device will not access the
* memory. We can safely unmap it, and mark it as dirty if
@@ -316,16 +324,17 @@ void mlx5_ib_invalidate_range(struct ib_umem_odp *umem_odp, unsigned long start,
ib_umem_odp_unmap_dma_pages(umem_odp, start, end);
- if (unlikely(!umem_odp->npages && mr->parent &&
- !umem_odp->dying)) {
- WRITE_ONCE(mr->live, 0);
- umem_odp->dying = 1;
- atomic_inc(&mr->parent->num_leaf_free);
- schedule_work(&umem_odp->work);
- }
+ if (unlikely(!umem_odp->npages && mr->parent))
+ destroy_unused_implicit_child_mr(mr);
+out:
mutex_unlock(&umem_odp->umem_mutex);
+ return true;
}
+const struct mmu_interval_notifier_ops mlx5_mn_ops = {
+ .invalidate = mlx5_ib_invalidate_range,
+};
+
void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
{
struct ib_odp_caps *caps = &dev->odp_caps;
@@ -390,8 +399,6 @@ void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset) &&
!MLX5_CAP_GEN(dev->mdev, umr_indirect_mkey_disabled))
caps->general_caps |= IB_ODP_SUPPORT_IMPLICIT;
-
- return;
}
static void mlx5_ib_page_fault_resume(struct mlx5_ib_dev *dev,
@@ -416,257 +423,226 @@ static void mlx5_ib_page_fault_resume(struct mlx5_ib_dev *dev,
wq_num, err);
}
-static struct mlx5_ib_mr *implicit_mr_alloc(struct ib_pd *pd,
- struct ib_umem_odp *umem_odp,
- bool ksm, int access_flags)
+static struct mlx5_ib_mr *implicit_get_child_mr(struct mlx5_ib_mr *imr,
+ unsigned long idx)
{
- struct mlx5_ib_dev *dev = to_mdev(pd->device);
+ struct ib_umem_odp *odp;
struct mlx5_ib_mr *mr;
+ struct mlx5_ib_mr *ret;
int err;
- mr = mlx5_mr_cache_alloc(dev, ksm ? MLX5_IMR_KSM_CACHE_ENTRY :
- MLX5_IMR_MTT_CACHE_ENTRY);
+ odp = ib_umem_odp_alloc_child(to_ib_umem_odp(imr->umem),
+ idx * MLX5_IMR_MTT_SIZE,
+ MLX5_IMR_MTT_SIZE, &mlx5_mn_ops);
+ if (IS_ERR(odp))
+ return ERR_CAST(odp);
+ ret = mr = mlx5_mr_cache_alloc(imr->dev, MLX5_IMR_MTT_CACHE_ENTRY);
if (IS_ERR(mr))
- return mr;
-
- mr->ibmr.pd = pd;
-
- mr->dev = dev;
- mr->access_flags = access_flags;
- mr->mmkey.iova = 0;
- mr->umem = &umem_odp->umem;
-
- if (ksm) {
- err = mlx5_ib_update_xlt(mr, 0,
- mlx5_imr_ksm_entries,
- MLX5_KSM_PAGE_SHIFT,
- MLX5_IB_UPD_XLT_INDIRECT |
- MLX5_IB_UPD_XLT_ZAP |
- MLX5_IB_UPD_XLT_ENABLE);
-
- } else {
- err = mlx5_ib_update_xlt(mr, 0,
- MLX5_IMR_MTT_ENTRIES,
- PAGE_SHIFT,
- MLX5_IB_UPD_XLT_ZAP |
- MLX5_IB_UPD_XLT_ENABLE |
- MLX5_IB_UPD_XLT_ATOMIC);
- }
-
- if (err)
- goto fail;
+ goto out_umem;
+ mr->ibmr.pd = imr->ibmr.pd;
+ mr->access_flags = imr->access_flags;
+ mr->umem = &odp->umem;
mr->ibmr.lkey = mr->mmkey.key;
mr->ibmr.rkey = mr->mmkey.key;
-
- mlx5_ib_dbg(dev, "key %x dev %p mr %p\n",
- mr->mmkey.key, dev->mdev, mr);
-
- return mr;
-
-fail:
- mlx5_ib_err(dev, "Failed to register MKEY %d\n", err);
- mlx5_mr_cache_free(dev, mr);
-
- return ERR_PTR(err);
-}
-
-static struct ib_umem_odp *implicit_mr_get_data(struct mlx5_ib_mr *mr,
- u64 io_virt, size_t bcnt)
-{
- struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.pd->device);
- struct ib_umem_odp *odp, *result = NULL;
- struct ib_umem_odp *odp_mr = to_ib_umem_odp(mr->umem);
- u64 addr = io_virt & MLX5_IMR_MTT_MASK;
- int nentries = 0, start_idx = 0, ret;
- struct mlx5_ib_mr *mtt;
-
- mutex_lock(&odp_mr->umem_mutex);
- odp = odp_lookup(addr, 1, mr);
-
- mlx5_ib_dbg(dev, "io_virt:%llx bcnt:%zx addr:%llx odp:%p\n",
- io_virt, bcnt, addr, odp);
-
-next_mr:
- if (likely(odp)) {
- if (nentries)
- nentries++;
- } else {
- odp = ib_umem_odp_alloc_child(odp_mr, addr, MLX5_IMR_MTT_SIZE);
- if (IS_ERR(odp)) {
- mutex_unlock(&odp_mr->umem_mutex);
- return ERR_CAST(odp);
- }
-
- mtt = implicit_mr_alloc(mr->ibmr.pd, odp, 0,
- mr->access_flags);
- if (IS_ERR(mtt)) {
- mutex_unlock(&odp_mr->umem_mutex);
- ib_umem_odp_release(odp);
- return ERR_CAST(mtt);
- }
-
- odp->private = mtt;
- mtt->umem = &odp->umem;
- mtt->mmkey.iova = addr;
- mtt->parent = mr;
- INIT_WORK(&odp->work, mr_leaf_free_action);
-
- smp_store_release(&mtt->live, 1);
-
- if (!nentries)
- start_idx = addr >> MLX5_IMR_MTT_SHIFT;
- nentries++;
- }
-
- /* Return first odp if region not covered by single one */
- if (likely(!result))
- result = odp;
-
- addr += MLX5_IMR_MTT_SIZE;
- if (unlikely(addr < io_virt + bcnt)) {
- odp = odp_next(odp);
- if (odp && ib_umem_start(odp) != addr)
- odp = NULL;
- goto next_mr;
+ mr->mmkey.iova = idx * MLX5_IMR_MTT_SIZE;
+ mr->parent = imr;
+ odp->private = mr;
+
+ err = mlx5_ib_update_xlt(mr, 0,
+ MLX5_IMR_MTT_ENTRIES,
+ PAGE_SHIFT,
+ MLX5_IB_UPD_XLT_ZAP |
+ MLX5_IB_UPD_XLT_ENABLE);
+ if (err) {
+ ret = ERR_PTR(err);
+ goto out_mr;
}
- if (unlikely(nentries)) {
- ret = mlx5_ib_update_xlt(mr, start_idx, nentries, 0,
- MLX5_IB_UPD_XLT_INDIRECT |
- MLX5_IB_UPD_XLT_ATOMIC);
- if (ret) {
- mlx5_ib_err(dev, "Failed to update PAS\n");
- result = ERR_PTR(ret);
+ /*
+ * Once the store to either xarray completes any error unwind has to
+ * use synchronize_srcu(). Avoid this with xa_reserve()
+ */
+ ret = xa_cmpxchg(&imr->implicit_children, idx, NULL, mr,
+ GFP_KERNEL);
+ if (unlikely(ret)) {
+ if (xa_is_err(ret)) {
+ ret = ERR_PTR(xa_err(ret));
+ goto out_mr;
}
+ /*
+ * Another thread beat us to creating the child mr, use
+ * theirs.
+ */
+ goto out_mr;
}
- mutex_unlock(&odp_mr->umem_mutex);
- return result;
+ mlx5_ib_dbg(imr->dev, "key %x mr %p\n", mr->mmkey.key, mr);
+ return mr;
+
+out_mr:
+ mlx5_mr_cache_free(imr->dev, mr);
+out_umem:
+ ib_umem_odp_release(odp);
+ return ret;
}
struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,
struct ib_udata *udata,
int access_flags)
{
- struct mlx5_ib_mr *imr;
+ struct mlx5_ib_dev *dev = to_mdev(pd->ibpd.device);
struct ib_umem_odp *umem_odp;
+ struct mlx5_ib_mr *imr;
+ int err;
umem_odp = ib_umem_odp_alloc_implicit(udata, access_flags);
if (IS_ERR(umem_odp))
return ERR_CAST(umem_odp);
- imr = implicit_mr_alloc(&pd->ibpd, umem_odp, 1, access_flags);
+ imr = mlx5_mr_cache_alloc(dev, MLX5_IMR_KSM_CACHE_ENTRY);
if (IS_ERR(imr)) {
- ib_umem_odp_release(umem_odp);
- return ERR_CAST(imr);
+ err = PTR_ERR(imr);
+ goto out_umem;
}
+ imr->ibmr.pd = &pd->ibpd;
+ imr->access_flags = access_flags;
+ imr->mmkey.iova = 0;
imr->umem = &umem_odp->umem;
- init_waitqueue_head(&imr->q_leaf_free);
- atomic_set(&imr->num_leaf_free, 0);
- atomic_set(&imr->num_pending_prefetch, 0);
- smp_store_release(&imr->live, 1);
+ imr->ibmr.lkey = imr->mmkey.key;
+ imr->ibmr.rkey = imr->mmkey.key;
+ imr->umem = &umem_odp->umem;
+ imr->is_odp_implicit = true;
+ atomic_set(&imr->num_deferred_work, 0);
+ xa_init(&imr->implicit_children);
+
+ err = mlx5_ib_update_xlt(imr, 0,
+ mlx5_imr_ksm_entries,
+ MLX5_KSM_PAGE_SHIFT,
+ MLX5_IB_UPD_XLT_INDIRECT |
+ MLX5_IB_UPD_XLT_ZAP |
+ MLX5_IB_UPD_XLT_ENABLE);
+ if (err)
+ goto out_mr;
+ err = xa_err(xa_store(&dev->odp_mkeys, mlx5_base_mkey(imr->mmkey.key),
+ &imr->mmkey, GFP_KERNEL));
+ if (err)
+ goto out_mr;
+
+ mlx5_ib_dbg(dev, "key %x mr %p\n", imr->mmkey.key, imr);
return imr;
+out_mr:
+ mlx5_ib_err(dev, "Failed to register MKEY %d\n", err);
+ mlx5_mr_cache_free(dev, imr);
+out_umem:
+ ib_umem_odp_release(umem_odp);
+ return ERR_PTR(err);
}
void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *imr)
{
- struct ib_ucontext_per_mm *per_mm = mr_to_per_mm(imr);
- struct rb_node *node;
+ struct ib_umem_odp *odp_imr = to_ib_umem_odp(imr->umem);
+ struct mlx5_ib_dev *dev = imr->dev;
+ struct list_head destroy_list;
+ struct mlx5_ib_mr *mtt;
+ struct mlx5_ib_mr *tmp;
+ unsigned long idx;
- down_read(&per_mm->umem_rwsem);
- for (node = rb_first_cached(&per_mm->umem_tree); node;
- node = rb_next(node)) {
- struct ib_umem_odp *umem_odp =
- rb_entry(node, struct ib_umem_odp, interval_tree.rb);
- struct mlx5_ib_mr *mr = umem_odp->private;
+ INIT_LIST_HEAD(&destroy_list);
- if (mr->parent != imr)
- continue;
+ xa_erase(&dev->odp_mkeys, mlx5_base_mkey(imr->mmkey.key));
+ /*
+ * This stops the SRCU protected page fault path from touching either
+ * the imr or any children. The page fault path can only reach the
+ * children xarray via the imr.
+ */
+ synchronize_srcu(&dev->odp_srcu);
- mutex_lock(&umem_odp->umem_mutex);
- ib_umem_odp_unmap_dma_pages(umem_odp, ib_umem_start(umem_odp),
- ib_umem_end(umem_odp));
+ xa_lock(&imr->implicit_children);
+ xa_for_each (&imr->implicit_children, idx, mtt) {
+ __xa_erase(&imr->implicit_children, idx);
+ list_add(&mtt->odp_destroy.elm, &destroy_list);
+ }
+ xa_unlock(&imr->implicit_children);
- if (umem_odp->dying) {
- mutex_unlock(&umem_odp->umem_mutex);
- continue;
- }
+ /*
+ * num_deferred_work can only be incremented inside the odp_srcu, or
+ * under xa_lock while the child is in the xarray. Thus at this point
+ * it is only decreasing, and all work holding it is now on the wq.
+ */
+ if (atomic_read(&imr->num_deferred_work)) {
+ flush_workqueue(system_unbound_wq);
+ WARN_ON(atomic_read(&imr->num_deferred_work));
+ }
+
+ /*
+ * Fence the imr before we destroy the children. This allows us to
+ * skip updating the XLT of the imr during destroy of the child mkey
+ * the imr points to.
+ */
+ mlx5_mr_cache_invalidate(imr);
- umem_odp->dying = 1;
- atomic_inc(&imr->num_leaf_free);
- schedule_work(&umem_odp->work);
- mutex_unlock(&umem_odp->umem_mutex);
+ list_for_each_entry_safe (mtt, tmp, &destroy_list, odp_destroy.elm)
+ free_implicit_child_mr(mtt, false);
+
+ mlx5_mr_cache_free(dev, imr);
+ ib_umem_odp_release(odp_imr);
+}
+
+/**
+ * mlx5_ib_fence_odp_mr - Stop all access to the ODP MR
+ * @mr: to fence
+ *
+ * On return no parallel threads will be touching this MR and no DMA will be
+ * active.
+ */
+void mlx5_ib_fence_odp_mr(struct mlx5_ib_mr *mr)
+{
+ /* Prevent new page faults and prefetch requests from succeeding */
+ xa_erase(&mr->dev->odp_mkeys, mlx5_base_mkey(mr->mmkey.key));
+
+ /* Wait for all running page-fault handlers to finish. */
+ synchronize_srcu(&mr->dev->odp_srcu);
+
+ if (atomic_read(&mr->num_deferred_work)) {
+ flush_workqueue(system_unbound_wq);
+ WARN_ON(atomic_read(&mr->num_deferred_work));
}
- up_read(&per_mm->umem_rwsem);
- wait_event(imr->q_leaf_free, !atomic_read(&imr->num_leaf_free));
+ dma_fence_odp_mr(mr);
}
-#define MLX5_PF_FLAGS_PREFETCH BIT(0)
#define MLX5_PF_FLAGS_DOWNGRADE BIT(1)
-static int pagefault_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
- u64 io_virt, size_t bcnt, u32 *bytes_mapped,
- u32 flags)
+static int pagefault_real_mr(struct mlx5_ib_mr *mr, struct ib_umem_odp *odp,
+ u64 user_va, size_t bcnt, u32 *bytes_mapped,
+ u32 flags)
{
- int npages = 0, current_seq, page_shift, ret, np;
- struct ib_umem_odp *odp_mr = to_ib_umem_odp(mr->umem);
+ int page_shift, ret, np;
bool downgrade = flags & MLX5_PF_FLAGS_DOWNGRADE;
- bool prefetch = flags & MLX5_PF_FLAGS_PREFETCH;
+ unsigned long current_seq;
u64 access_mask;
u64 start_idx, page_mask;
- struct ib_umem_odp *odp;
- size_t size;
-
- if (odp_mr->is_implicit_odp) {
- odp = implicit_mr_get_data(mr, io_virt, bcnt);
-
- if (IS_ERR(odp))
- return PTR_ERR(odp);
- mr = odp->private;
- } else {
- odp = odp_mr;
- }
-
-next_mr:
- size = min_t(size_t, bcnt, ib_umem_end(odp) - io_virt);
page_shift = odp->page_shift;
page_mask = ~(BIT(page_shift) - 1);
- start_idx = (io_virt - (mr->mmkey.iova & page_mask)) >> page_shift;
+ start_idx = (user_va - (mr->mmkey.iova & page_mask)) >> page_shift;
access_mask = ODP_READ_ALLOWED_BIT;
- if (prefetch && !downgrade && !odp->umem.writable) {
- /* prefetch with write-access must
- * be supported by the MR
- */
- ret = -EINVAL;
- goto out;
- }
-
if (odp->umem.writable && !downgrade)
access_mask |= ODP_WRITE_ALLOWED_BIT;
- current_seq = READ_ONCE(odp->notifiers_seq);
- /*
- * Ensure the sequence number is valid for some time before we call
- * gup.
- */
- smp_rmb();
+ current_seq = mmu_interval_read_begin(&odp->notifier);
- ret = ib_umem_odp_map_dma_pages(odp, io_virt, size, access_mask,
- current_seq);
-
- if (ret < 0)
- goto out;
-
- np = ret;
+ np = ib_umem_odp_map_dma_pages(odp, user_va, bcnt, access_mask,
+ current_seq);
+ if (np < 0)
+ return np;
mutex_lock(&odp->umem_mutex);
- if (!ib_umem_mmu_notifier_retry(odp, current_seq)) {
+ if (!mmu_interval_read_retry(&odp->notifier, current_seq)) {
/*
* No need to check whether the MTTs really belong to
* this MR, since ib_umem_odp_map_dma_pages already
@@ -681,53 +657,127 @@ next_mr:
if (ret < 0) {
if (ret != -EAGAIN)
- mlx5_ib_err(dev, "Failed to update mkey page tables\n");
+ mlx5_ib_err(mr->dev,
+ "Failed to update mkey page tables\n");
goto out;
}
if (bytes_mapped) {
u32 new_mappings = (np << page_shift) -
- (io_virt - round_down(io_virt, 1 << page_shift));
- *bytes_mapped += min_t(u32, new_mappings, size);
+ (user_va - round_down(user_va, 1 << page_shift));
+
+ *bytes_mapped += min_t(u32, new_mappings, bcnt);
}
- npages += np << (page_shift - PAGE_SHIFT);
- bcnt -= size;
+ return np << (page_shift - PAGE_SHIFT);
+
+out:
+ return ret;
+}
+
+static int pagefault_implicit_mr(struct mlx5_ib_mr *imr,
+ struct ib_umem_odp *odp_imr, u64 user_va,
+ size_t bcnt, u32 *bytes_mapped, u32 flags)
+{
+ unsigned long end_idx = (user_va + bcnt - 1) >> MLX5_IMR_MTT_SHIFT;
+ unsigned long upd_start_idx = end_idx + 1;
+ unsigned long upd_len = 0;
+ unsigned long npages = 0;
+ int err;
+ int ret;
- if (unlikely(bcnt)) {
- struct ib_umem_odp *next;
+ if (unlikely(user_va >= mlx5_imr_ksm_entries * MLX5_IMR_MTT_SIZE ||
+ mlx5_imr_ksm_entries * MLX5_IMR_MTT_SIZE - user_va < bcnt))
+ return -EFAULT;
- io_virt += size;
- next = odp_next(odp);
- if (unlikely(!next || ib_umem_start(next) != io_virt)) {
- mlx5_ib_dbg(dev, "next implicit leaf removed at 0x%llx. got %p\n",
- io_virt, next);
- return -EAGAIN;
+ /* Fault each child mr that intersects with our interval. */
+ while (bcnt) {
+ unsigned long idx = user_va >> MLX5_IMR_MTT_SHIFT;
+ struct ib_umem_odp *umem_odp;
+ struct mlx5_ib_mr *mtt;
+ u64 len;
+
+ mtt = xa_load(&imr->implicit_children, idx);
+ if (unlikely(!mtt)) {
+ mtt = implicit_get_child_mr(imr, idx);
+ if (IS_ERR(mtt)) {
+ ret = PTR_ERR(mtt);
+ goto out;
+ }
+ upd_start_idx = min(upd_start_idx, idx);
+ upd_len = idx - upd_start_idx + 1;
}
- odp = next;
- mr = odp->private;
- goto next_mr;
+
+ umem_odp = to_ib_umem_odp(mtt->umem);
+ len = min_t(u64, user_va + bcnt, ib_umem_end(umem_odp)) -
+ user_va;
+
+ ret = pagefault_real_mr(mtt, umem_odp, user_va, len,
+ bytes_mapped, flags);
+ if (ret < 0)
+ goto out;
+ user_va += len;
+ bcnt -= len;
+ npages += ret;
}
- return npages;
+ ret = npages;
+ /*
+ * Any time the implicit_children are changed we must perform an
+ * update of the xlt before exiting to ensure the HW and the
+ * implicit_children remains synchronized.
+ */
out:
- if (ret == -EAGAIN) {
- unsigned long timeout = msecs_to_jiffies(MMU_NOTIFIER_TIMEOUT);
-
- if (!wait_for_completion_timeout(&odp->notifier_completion,
- timeout)) {
- mlx5_ib_warn(
- dev,
- "timeout waiting for mmu notifier. seq %d against %d. notifiers_count=%d\n",
- current_seq, odp->notifiers_seq,
- odp->notifiers_count);
- }
- }
+ if (likely(!upd_len))
+ return ret;
+ /*
+ * Notice this is not strictly ordered right, the KSM is updated after
+ * the implicit_children is updated, so a parallel page fault could
+ * see a MR that is not yet visible in the KSM. This is similar to a
+ * parallel page fault seeing a MR that is being concurrently removed
+ * from the KSM. Both of these improbable situations are resolved
+ * safely by resuming the HW and then taking another page fault. The
+ * next pagefault handler will see the new information.
+ */
+ mutex_lock(&odp_imr->umem_mutex);
+ err = mlx5_ib_update_xlt(imr, upd_start_idx, upd_len, 0,
+ MLX5_IB_UPD_XLT_INDIRECT |
+ MLX5_IB_UPD_XLT_ATOMIC);
+ mutex_unlock(&odp_imr->umem_mutex);
+ if (err) {
+ mlx5_ib_err(imr->dev, "Failed to update PAS\n");
+ return err;
+ }
return ret;
}
+/*
+ * Returns:
+ * -EFAULT: The io_virt->bcnt is not within the MR, it covers pages that are
+ * not accessible, or the MR is no longer valid.
+ * -EAGAIN/-ENOMEM: The operation should be retried
+ *
+ * -EINVAL/others: General internal malfunction
+ * >0: Number of pages mapped
+ */
+static int pagefault_mr(struct mlx5_ib_mr *mr, u64 io_virt, size_t bcnt,
+ u32 *bytes_mapped, u32 flags)
+{
+ struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem);
+
+ if (!odp->is_implicit_odp) {
+ if (unlikely(io_virt < ib_umem_start(odp) ||
+ ib_umem_end(odp) - io_virt < bcnt))
+ return -EFAULT;
+ return pagefault_real_mr(mr, odp, io_virt, bcnt, bytes_mapped,
+ flags);
+ }
+ return pagefault_implicit_mr(mr, odp, io_virt, bcnt, bytes_mapped,
+ flags);
+}
+
struct pf_frame {
struct pf_frame *next;
u32 key;
@@ -775,10 +825,9 @@ static int pagefault_single_data_segment(struct mlx5_ib_dev *dev,
struct ib_pd *pd, u32 key,
u64 io_virt, size_t bcnt,
u32 *bytes_committed,
- u32 *bytes_mapped, u32 flags)
+ u32 *bytes_mapped)
{
int npages = 0, srcu_key, ret, i, outlen, cur_outlen = 0, depth = 0;
- bool prefetch = flags & MLX5_PF_FLAGS_PREFETCH;
struct pf_frame *head = NULL, *frame;
struct mlx5_core_mkey *mmkey;
struct mlx5_ib_mr *mr;
@@ -787,58 +836,49 @@ static int pagefault_single_data_segment(struct mlx5_ib_dev *dev,
size_t offset;
int ndescs;
- srcu_key = srcu_read_lock(&dev->mr_srcu);
+ srcu_key = srcu_read_lock(&dev->odp_srcu);
io_virt += *bytes_committed;
bcnt -= *bytes_committed;
next_mr:
- mmkey = xa_load(&dev->mdev->priv.mkey_table, mlx5_base_mkey(key));
+ mmkey = xa_load(&dev->odp_mkeys, mlx5_base_mkey(key));
+ if (!mmkey) {
+ mlx5_ib_dbg(
+ dev,
+ "skipping non ODP MR (lkey=0x%06x) in page fault handler.\n",
+ key);
+ if (bytes_mapped)
+ *bytes_mapped += bcnt;
+ /*
+ * The user could specify a SGL with multiple lkeys and only
+ * some of them are ODP. Treat the non-ODP ones as fully
+ * faulted.
+ */
+ ret = 0;
+ goto srcu_unlock;
+ }
if (!mkey_is_eq(mmkey, key)) {
mlx5_ib_dbg(dev, "failed to find mkey %x\n", key);
ret = -EFAULT;
goto srcu_unlock;
}
- if (prefetch && mmkey->type != MLX5_MKEY_MR) {
- mlx5_ib_dbg(dev, "prefetch is allowed only for MR\n");
- ret = -EINVAL;
- goto srcu_unlock;
- }
-
switch (mmkey->type) {
case MLX5_MKEY_MR:
mr = container_of(mmkey, struct mlx5_ib_mr, mmkey);
- if (!smp_load_acquire(&mr->live) || !mr->ibmr.pd) {
- mlx5_ib_dbg(dev, "got dead MR\n");
- ret = -EFAULT;
- goto srcu_unlock;
- }
-
- if (prefetch) {
- if (!is_odp_mr(mr) ||
- mr->ibmr.pd != pd) {
- mlx5_ib_dbg(dev, "Invalid prefetch request: %s\n",
- is_odp_mr(mr) ? "MR is not ODP" :
- "PD is not of the MR");
- ret = -EINVAL;
- goto srcu_unlock;
- }
- }
-
- if (!is_odp_mr(mr)) {
- mlx5_ib_dbg(dev, "skipping non ODP MR (lkey=0x%06x) in page fault handler.\n",
- key);
- if (bytes_mapped)
- *bytes_mapped += bcnt;
- ret = 0;
- goto srcu_unlock;
- }
- ret = pagefault_mr(dev, mr, io_virt, bcnt, bytes_mapped, flags);
+ ret = pagefault_mr(mr, io_virt, bcnt, bytes_mapped, 0);
if (ret < 0)
goto srcu_unlock;
+ /*
+ * When prefetching a page, page fault is generated
+ * in order to bring the page to the main memory.
+ * In the current flow, page faults are being counted.
+ */
+ mlx5_update_odp_stats(mr, faults, ret);
+
npages += ret;
ret = 0;
break;
@@ -928,7 +968,7 @@ srcu_unlock:
}
kfree(out);
- srcu_read_unlock(&dev->mr_srcu, srcu_key);
+ srcu_read_unlock(&dev->odp_srcu, srcu_key);
*bytes_committed = 0;
return ret ? ret : npages;
}
@@ -1009,7 +1049,7 @@ static int pagefault_data_segments(struct mlx5_ib_dev *dev,
ret = pagefault_single_data_segment(dev, NULL, key,
io_virt, bcnt,
&pfault->bytes_committed,
- bytes_mapped, 0);
+ bytes_mapped);
if (ret < 0)
break;
npages += ret;
@@ -1292,8 +1332,7 @@ static void mlx5_ib_mr_rdma_pfault_handler(struct mlx5_ib_dev *dev,
}
ret = pagefault_single_data_segment(dev, NULL, rkey, address, length,
- &pfault->bytes_committed, NULL,
- 0);
+ &pfault->bytes_committed, NULL);
if (ret == -EAGAIN) {
/* We're racing with an invalidation, don't prefetch */
prefetch_activated = 0;
@@ -1320,8 +1359,7 @@ static void mlx5_ib_mr_rdma_pfault_handler(struct mlx5_ib_dev *dev,
ret = pagefault_single_data_segment(dev, NULL, rkey, address,
prefetch_len,
- &bytes_committed, NULL,
- 0);
+ &bytes_committed, NULL);
if (ret < 0 && ret != -EAGAIN) {
mlx5_ib_dbg(dev, "Prefetch failed. ret: %d, QP 0x%x, address: 0x%.16llx, length = 0x%.16x\n",
ret, pfault->token, address, prefetch_len);
@@ -1581,7 +1619,6 @@ void mlx5_odp_init_mr_cache_entry(struct mlx5_cache_ent *ent)
static const struct ib_device_ops mlx5_ib_dev_odp_ops = {
.advise_mr = mlx5_ib_advise_mr,
- .invalidate_range = mlx5_ib_invalidate_range,
};
int mlx5_ib_odp_init_one(struct mlx5_ib_dev *dev)
@@ -1624,114 +1661,128 @@ int mlx5_ib_odp_init(void)
struct prefetch_mr_work {
struct work_struct work;
- struct ib_pd *pd;
u32 pf_flags;
u32 num_sge;
- struct ib_sge sg_list[0];
+ struct {
+ u64 io_virt;
+ struct mlx5_ib_mr *mr;
+ size_t length;
+ } frags[];
};
-static void num_pending_prefetch_dec(struct mlx5_ib_dev *dev,
- struct ib_sge *sg_list, u32 num_sge,
- u32 from)
+static void destroy_prefetch_work(struct prefetch_mr_work *work)
{
u32 i;
- int srcu_key;
-
- srcu_key = srcu_read_lock(&dev->mr_srcu);
- for (i = from; i < num_sge; ++i) {
- struct mlx5_core_mkey *mmkey;
- struct mlx5_ib_mr *mr;
-
- mmkey = xa_load(&dev->mdev->priv.mkey_table,
- mlx5_base_mkey(sg_list[i].lkey));
- mr = container_of(mmkey, struct mlx5_ib_mr, mmkey);
- atomic_dec(&mr->num_pending_prefetch);
- }
-
- srcu_read_unlock(&dev->mr_srcu, srcu_key);
+ for (i = 0; i < work->num_sge; ++i)
+ atomic_dec(&work->frags[i].mr->num_deferred_work);
+ kvfree(work);
}
-static bool num_pending_prefetch_inc(struct ib_pd *pd,
- struct ib_sge *sg_list, u32 num_sge)
+static struct mlx5_ib_mr *
+get_prefetchable_mr(struct ib_pd *pd, enum ib_uverbs_advise_mr_advice advice,
+ u32 lkey)
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
- bool ret = true;
- u32 i;
+ struct mlx5_core_mkey *mmkey;
+ struct ib_umem_odp *odp;
+ struct mlx5_ib_mr *mr;
- for (i = 0; i < num_sge; ++i) {
- struct mlx5_core_mkey *mmkey;
- struct mlx5_ib_mr *mr;
+ lockdep_assert_held(&dev->odp_srcu);
- mmkey = xa_load(&dev->mdev->priv.mkey_table,
- mlx5_base_mkey(sg_list[i].lkey));
- if (!mmkey || mmkey->key != sg_list[i].lkey) {
- ret = false;
- break;
- }
+ mmkey = xa_load(&dev->odp_mkeys, mlx5_base_mkey(lkey));
+ if (!mmkey || mmkey->key != lkey || mmkey->type != MLX5_MKEY_MR)
+ return NULL;
- if (mmkey->type != MLX5_MKEY_MR) {
- ret = false;
- break;
- }
+ mr = container_of(mmkey, struct mlx5_ib_mr, mmkey);
- mr = container_of(mmkey, struct mlx5_ib_mr, mmkey);
+ if (mr->ibmr.pd != pd)
+ return NULL;
- if (!smp_load_acquire(&mr->live)) {
- ret = false;
- break;
- }
+ odp = to_ib_umem_odp(mr->umem);
- if (mr->ibmr.pd != pd) {
- ret = false;
- break;
- }
+ /* prefetch with write-access must be supported by the MR */
+ if (advice == IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_WRITE &&
+ !odp->umem.writable)
+ return NULL;
- atomic_inc(&mr->num_pending_prefetch);
- }
+ return mr;
+}
- if (!ret)
- num_pending_prefetch_dec(dev, sg_list, i, 0);
+static void mlx5_ib_prefetch_mr_work(struct work_struct *w)
+{
+ struct prefetch_mr_work *work =
+ container_of(w, struct prefetch_mr_work, work);
+ u32 bytes_mapped = 0;
+ u32 i;
- return ret;
+ for (i = 0; i < work->num_sge; ++i)
+ pagefault_mr(work->frags[i].mr, work->frags[i].io_virt,
+ work->frags[i].length, &bytes_mapped,
+ work->pf_flags);
+
+ destroy_prefetch_work(work);
}
-static int mlx5_ib_prefetch_sg_list(struct ib_pd *pd, u32 pf_flags,
- struct ib_sge *sg_list, u32 num_sge)
+static bool init_prefetch_work(struct ib_pd *pd,
+ enum ib_uverbs_advise_mr_advice advice,
+ u32 pf_flags, struct prefetch_mr_work *work,
+ struct ib_sge *sg_list, u32 num_sge)
{
u32 i;
- int ret = 0;
- struct mlx5_ib_dev *dev = to_mdev(pd->device);
+
+ INIT_WORK(&work->work, mlx5_ib_prefetch_mr_work);
+ work->pf_flags = pf_flags;
for (i = 0; i < num_sge; ++i) {
- struct ib_sge *sg = &sg_list[i];
- int bytes_committed = 0;
+ work->frags[i].io_virt = sg_list[i].addr;
+ work->frags[i].length = sg_list[i].length;
+ work->frags[i].mr =
+ get_prefetchable_mr(pd, advice, sg_list[i].lkey);
+ if (!work->frags[i].mr) {
+ work->num_sge = i - 1;
+ if (i)
+ destroy_prefetch_work(work);
+ return false;
+ }
- ret = pagefault_single_data_segment(dev, pd, sg->lkey, sg->addr,
- sg->length,
- &bytes_committed, NULL,
- pf_flags);
- if (ret < 0)
- break;
+ /* Keep the MR pointer will valid outside the SRCU */
+ atomic_inc(&work->frags[i].mr->num_deferred_work);
}
-
- return ret < 0 ? ret : 0;
+ work->num_sge = num_sge;
+ return true;
}
-static void mlx5_ib_prefetch_mr_work(struct work_struct *work)
+static int mlx5_ib_prefetch_sg_list(struct ib_pd *pd,
+ enum ib_uverbs_advise_mr_advice advice,
+ u32 pf_flags, struct ib_sge *sg_list,
+ u32 num_sge)
{
- struct prefetch_mr_work *w =
- container_of(work, struct prefetch_mr_work, work);
+ struct mlx5_ib_dev *dev = to_mdev(pd->device);
+ u32 bytes_mapped = 0;
+ int srcu_key;
+ int ret = 0;
+ u32 i;
+
+ srcu_key = srcu_read_lock(&dev->odp_srcu);
+ for (i = 0; i < num_sge; ++i) {
+ struct mlx5_ib_mr *mr;
- if (ib_device_try_get(w->pd->device)) {
- mlx5_ib_prefetch_sg_list(w->pd, w->pf_flags, w->sg_list,
- w->num_sge);
- ib_device_put(w->pd->device);
+ mr = get_prefetchable_mr(pd, advice, sg_list[i].lkey);
+ if (!mr) {
+ ret = -ENOENT;
+ goto out;
+ }
+ ret = pagefault_mr(mr, sg_list[i].addr, sg_list[i].length,
+ &bytes_mapped, pf_flags);
+ if (ret < 0)
+ goto out;
}
+ ret = 0;
- num_pending_prefetch_dec(to_mdev(w->pd->device), w->sg_list,
- w->num_sge, 0);
- kvfree(w);
+out:
+ srcu_read_unlock(&dev->odp_srcu, srcu_key);
+ return ret;
}
int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
@@ -1739,43 +1790,27 @@ int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
u32 flags, struct ib_sge *sg_list, u32 num_sge)
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
- u32 pf_flags = MLX5_PF_FLAGS_PREFETCH;
+ u32 pf_flags = 0;
struct prefetch_mr_work *work;
- bool valid_req;
int srcu_key;
if (advice == IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH)
pf_flags |= MLX5_PF_FLAGS_DOWNGRADE;
if (flags & IB_UVERBS_ADVISE_MR_FLAG_FLUSH)
- return mlx5_ib_prefetch_sg_list(pd, pf_flags, sg_list,
+ return mlx5_ib_prefetch_sg_list(pd, advice, pf_flags, sg_list,
num_sge);
- work = kvzalloc(struct_size(work, sg_list, num_sge), GFP_KERNEL);
+ work = kvzalloc(struct_size(work, frags, num_sge), GFP_KERNEL);
if (!work)
return -ENOMEM;
- memcpy(work->sg_list, sg_list, num_sge * sizeof(struct ib_sge));
-
- /* It is guaranteed that the pd when work is executed is the pd when
- * work was queued since pd can't be destroyed while it holds MRs and
- * destroying a MR leads to flushing the workquque
- */
- work->pd = pd;
- work->pf_flags = pf_flags;
- work->num_sge = num_sge;
-
- INIT_WORK(&work->work, mlx5_ib_prefetch_mr_work);
-
- srcu_key = srcu_read_lock(&dev->mr_srcu);
-
- valid_req = num_pending_prefetch_inc(pd, sg_list, num_sge);
- if (valid_req)
- queue_work(system_unbound_wq, &work->work);
- else
- kvfree(work);
-
- srcu_read_unlock(&dev->mr_srcu, srcu_key);
-
- return valid_req ? 0 : -EINVAL;
+ srcu_key = srcu_read_lock(&dev->odp_srcu);
+ if (!init_prefetch_work(pd, advice, pf_flags, work, sg_list, num_sge)) {
+ srcu_read_unlock(&dev->odp_srcu, srcu_key);
+ return -EINVAL;
+ }
+ queue_work(system_unbound_wq, &work->work);
+ srcu_read_unlock(&dev->odp_srcu, srcu_key);
+ return 0;
}
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 5fd071c05944..7e51870e9e01 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -749,7 +749,7 @@ static int mlx5_ib_umem_get(struct mlx5_ib_dev *dev, struct ib_udata *udata,
{
int err;
- *umem = ib_umem_get(udata, addr, size, 0, 0);
+ *umem = ib_umem_get(udata, addr, size, 0);
if (IS_ERR(*umem)) {
mlx5_ib_dbg(dev, "umem_get failed\n");
return PTR_ERR(*umem);
@@ -806,7 +806,7 @@ static int create_user_rq(struct mlx5_ib_dev *dev, struct ib_pd *pd,
if (!ucmd->buf_addr)
return -EINVAL;
- rwq->umem = ib_umem_get(udata, ucmd->buf_addr, rwq->buf_size, 0, 0);
+ rwq->umem = ib_umem_get(udata, ucmd->buf_addr, rwq->buf_size, 0);
if (IS_ERR(rwq->umem)) {
mlx5_ib_dbg(dev, "umem_get failed\n");
err = PTR_ERR(rwq->umem);
@@ -1041,11 +1041,14 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev,
IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK |
IB_QP_CREATE_IPOIB_UD_LSO |
IB_QP_CREATE_NETIF_QP |
- mlx5_ib_create_qp_sqpn_qp1()))
+ MLX5_IB_QP_CREATE_SQPN_QP1 |
+ MLX5_IB_QP_CREATE_WC_TEST))
return -EINVAL;
if (init_attr->qp_type == MLX5_IB_QPT_REG_UMR)
qp->bf.bfreg = &dev->fp_bfreg;
+ else if (init_attr->create_flags & MLX5_IB_QP_CREATE_WC_TEST)
+ qp->bf.bfreg = &dev->wc_bfreg;
else
qp->bf.bfreg = &dev->bfreg;
@@ -1104,7 +1107,7 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev,
MLX5_SET(qpc, qpc, fre, 1);
MLX5_SET(qpc, qpc, rlky, 1);
- if (init_attr->create_flags & mlx5_ib_create_qp_sqpn_qp1()) {
+ if (init_attr->create_flags & MLX5_IB_QP_CREATE_SQPN_QP1) {
MLX5_SET(qpc, qpc, deth_sqpn, 1);
qp->flags |= MLX5_IB_QP_SQPN_QP1;
}
@@ -2140,7 +2143,7 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
return -EINVAL;
}
if (init_attr->create_flags &
- mlx5_ib_create_qp_sqpn_qp1()) {
+ MLX5_IB_QP_CREATE_SQPN_QP1) {
mlx5_ib_dbg(dev, "user-space is not allowed to create UD QPs spoofing as QP1\n");
return -EINVAL;
}
@@ -5330,7 +5333,6 @@ out:
* we hit doorbell */
wmb();
- /* currently we support only regular doorbells */
mlx5_write64((__be32 *)ctrl, bf->bfreg->map + bf->offset);
/* Make sure doorbells don't leak out of SQ spinlock
* and reach the HCA out of order.
@@ -5825,7 +5827,7 @@ int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
if (qp->flags & MLX5_IB_QP_MANAGED_RECV)
qp_init_attr->create_flags |= IB_QP_CREATE_MANAGED_RECV;
if (qp->flags & MLX5_IB_QP_SQPN_QP1)
- qp_init_attr->create_flags |= mlx5_ib_create_qp_sqpn_qp1();
+ qp_init_attr->create_flags |= MLX5_IB_QP_CREATE_SQPN_QP1;
qp_init_attr->sq_sig_type = qp->sq_signal_bits & MLX5_WQE_CTRL_CQ_UPDATE ?
IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR;
@@ -5957,12 +5959,21 @@ static int create_rq(struct mlx5_ib_rwq *rwq, struct ib_pd *pd,
}
MLX5_SET(wq, wq, log_wq_stride, rwq->log_rq_stride);
if (rwq->create_flags & MLX5_IB_WQ_FLAGS_STRIDING_RQ) {
+ /*
+ * In Firmware number of strides in each WQE is:
+ * "512 * 2^single_wqe_log_num_of_strides"
+ * Values 3 to 8 are accepted as 10 to 15, 9 to 18 are
+ * accepted as 0 to 9
+ */
+ static const u8 fw_map[] = { 10, 11, 12, 13, 14, 15, 0, 1,
+ 2, 3, 4, 5, 6, 7, 8, 9 };
MLX5_SET(wq, wq, two_byte_shift_en, rwq->two_byte_shift_en);
MLX5_SET(wq, wq, log_wqe_stride_size,
rwq->single_stride_log_num_of_bytes -
MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES);
- MLX5_SET(wq, wq, log_wqe_num_of_strides, rwq->log_num_strides -
- MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES);
+ MLX5_SET(wq, wq, log_wqe_num_of_strides,
+ fw_map[rwq->log_num_strides -
+ MLX5_EXT_MIN_SINGLE_WQE_LOG_NUM_STRIDES]);
}
MLX5_SET(wq, wq, log_wq_sz, rwq->log_rq_size);
MLX5_SET(wq, wq, pd, to_mpd(pd)->pdn);
@@ -6037,6 +6048,19 @@ static int set_user_rq_size(struct mlx5_ib_dev *dev,
return 0;
}
+static bool log_of_strides_valid(struct mlx5_ib_dev *dev, u32 log_num_strides)
+{
+ if ((log_num_strides > MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES) ||
+ (log_num_strides < MLX5_EXT_MIN_SINGLE_WQE_LOG_NUM_STRIDES))
+ return false;
+
+ if (!MLX5_CAP_GEN(dev->mdev, ext_stride_num_range) &&
+ (log_num_strides < MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES))
+ return false;
+
+ return true;
+}
+
static int prepare_user_rq(struct ib_pd *pd,
struct ib_wq_init_attr *init_attr,
struct ib_udata *udata,
@@ -6084,14 +6108,16 @@ static int prepare_user_rq(struct ib_pd *pd,
MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES);
return -EINVAL;
}
- if ((ucmd.single_wqe_log_num_of_strides >
- MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES) ||
- (ucmd.single_wqe_log_num_of_strides <
- MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES)) {
- mlx5_ib_dbg(dev, "Invalid log num strides (%u. Range is %u - %u)\n",
- ucmd.single_wqe_log_num_of_strides,
- MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES,
- MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES);
+ if (!log_of_strides_valid(dev,
+ ucmd.single_wqe_log_num_of_strides)) {
+ mlx5_ib_dbg(
+ dev,
+ "Invalid log num strides (%u. Range is %u - %u)\n",
+ ucmd.single_wqe_log_num_of_strides,
+ MLX5_CAP_GEN(dev->mdev, ext_stride_num_range) ?
+ MLX5_EXT_MIN_SINGLE_WQE_LOG_NUM_STRIDES :
+ MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES,
+ MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES);
return -EINVAL;
}
rwq->single_stride_log_num_of_bytes =
diff --git a/drivers/infiniband/hw/mlx5/restrack.c b/drivers/infiniband/hw/mlx5/restrack.c
new file mode 100644
index 000000000000..8f6c04f12531
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/restrack.c
@@ -0,0 +1,90 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2019, Mellanox Technologies inc. All rights reserved.
+ */
+
+#include <uapi/rdma/rdma_netlink.h>
+#include <rdma/ib_umem_odp.h>
+#include <rdma/restrack.h>
+#include "mlx5_ib.h"
+
+static int fill_stat_mr_entry(struct sk_buff *msg,
+ struct rdma_restrack_entry *res)
+{
+ struct ib_mr *ibmr = container_of(res, struct ib_mr, res);
+ struct mlx5_ib_mr *mr = to_mmr(ibmr);
+ struct nlattr *table_attr;
+
+ if (!(mr->access_flags & IB_ACCESS_ON_DEMAND))
+ return 0;
+
+ table_attr = nla_nest_start(msg,
+ RDMA_NLDEV_ATTR_STAT_HWCOUNTERS);
+
+ if (!table_attr)
+ goto err;
+
+ if (rdma_nl_stat_hwcounter_entry(msg, "page_faults",
+ atomic64_read(&mr->odp_stats.faults)))
+ goto err_table;
+ if (rdma_nl_stat_hwcounter_entry(
+ msg, "page_invalidations",
+ atomic64_read(&mr->odp_stats.invalidations)))
+ goto err_table;
+
+ nla_nest_end(msg, table_attr);
+ return 0;
+
+err_table:
+ nla_nest_cancel(msg, table_attr);
+err:
+ return -EMSGSIZE;
+}
+
+static int fill_res_mr_entry(struct sk_buff *msg,
+ struct rdma_restrack_entry *res)
+{
+ struct ib_mr *ibmr = container_of(res, struct ib_mr, res);
+ struct mlx5_ib_mr *mr = to_mmr(ibmr);
+ struct nlattr *table_attr;
+
+ if (!(mr->access_flags & IB_ACCESS_ON_DEMAND))
+ return 0;
+
+ table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER);
+ if (!table_attr)
+ goto err;
+
+ if (mr->is_odp_implicit) {
+ if (rdma_nl_put_driver_string(msg, "odp", "implicit"))
+ goto err;
+ } else {
+ if (rdma_nl_put_driver_string(msg, "odp", "explicit"))
+ goto err;
+ }
+
+ nla_nest_end(msg, table_attr);
+ return 0;
+
+err:
+ nla_nest_cancel(msg, table_attr);
+ return -EMSGSIZE;
+}
+
+int mlx5_ib_fill_res_entry(struct sk_buff *msg,
+ struct rdma_restrack_entry *res)
+{
+ if (res->type == RDMA_RESTRACK_MR)
+ return fill_res_mr_entry(msg, res);
+
+ return 0;
+}
+
+int mlx5_ib_fill_stat_entry(struct sk_buff *msg,
+ struct rdma_restrack_entry *res)
+{
+ if (res->type == RDMA_RESTRACK_MR)
+ return fill_stat_mr_entry(msg, res);
+
+ return 0;
+}
diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c
index 4e7fde86c96b..62939df3c692 100644
--- a/drivers/infiniband/hw/mlx5/srq.c
+++ b/drivers/infiniband/hw/mlx5/srq.c
@@ -80,7 +80,7 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
srq->wq_sig = !!(ucmd.flags & MLX5_SRQ_FLAG_SIGNATURE);
- srq->umem = ib_umem_get(udata, ucmd.buf_addr, buf_size, 0, 0);
+ srq->umem = ib_umem_get(udata, ucmd.buf_addr, buf_size, 0);
if (IS_ERR(srq->umem)) {
mlx5_ib_dbg(dev, "failed umem get, size %d\n", buf_size);
err = PTR_ERR(srq->umem);
diff --git a/drivers/infiniband/hw/mthca/mthca_dev.h b/drivers/infiniband/hw/mthca/mthca_dev.h
index bfd4eebc1182..599794c5a78f 100644
--- a/drivers/infiniband/hw/mthca/mthca_dev.h
+++ b/drivers/infiniband/hw/mthca/mthca_dev.h
@@ -576,14 +576,10 @@ enum ib_rate mthca_rate_to_ib(struct mthca_dev *dev, u8 mthca_rate, u8 port);
int mthca_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
-int mthca_process_mad(struct ib_device *ibdev,
- int mad_flags,
- u8 port_num,
- const struct ib_wc *in_wc,
- const struct ib_grh *in_grh,
- const struct ib_mad_hdr *in, size_t in_mad_size,
- struct ib_mad_hdr *out, size_t *out_mad_size,
- u16 *out_mad_pkey_index);
+int mthca_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
+ const struct ib_wc *in_wc, const struct ib_grh *in_grh,
+ const struct ib_mad *in, struct ib_mad *out,
+ size_t *out_mad_size, u16 *out_mad_pkey_index);
int mthca_create_agents(struct mthca_dev *dev);
void mthca_free_agents(struct mthca_dev *dev);
diff --git a/drivers/infiniband/hw/mthca/mthca_mad.c b/drivers/infiniband/hw/mthca/mthca_mad.c
index 7ad517da4917..99aa8183a7f2 100644
--- a/drivers/infiniband/hw/mthca/mthca_mad.c
+++ b/drivers/infiniband/hw/mthca/mthca_mad.c
@@ -196,30 +196,19 @@ static void forward_trap(struct mthca_dev *dev,
}
}
-int mthca_process_mad(struct ib_device *ibdev,
- int mad_flags,
- u8 port_num,
- const struct ib_wc *in_wc,
- const struct ib_grh *in_grh,
- const struct ib_mad_hdr *in, size_t in_mad_size,
- struct ib_mad_hdr *out, size_t *out_mad_size,
- u16 *out_mad_pkey_index)
+int mthca_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
+ const struct ib_wc *in_wc, const struct ib_grh *in_grh,
+ const struct ib_mad *in, struct ib_mad *out,
+ size_t *out_mad_size, u16 *out_mad_pkey_index)
{
int err;
u16 slid = in_wc ? ib_lid_cpu16(in_wc->slid) : be16_to_cpu(IB_LID_PERMISSIVE);
u16 prev_lid = 0;
struct ib_port_attr pattr;
- const struct ib_mad *in_mad = (const struct ib_mad *)in;
- struct ib_mad *out_mad = (struct ib_mad *)out;
-
- if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
- *out_mad_size != sizeof(*out_mad)))
- return IB_MAD_RESULT_FAILURE;
/* Forward locally generated traps to the SM */
- if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP &&
- slid == 0) {
- forward_trap(to_mdev(ibdev), port_num, in_mad);
+ if (in->mad_hdr.method == IB_MGMT_METHOD_TRAP && !slid) {
+ forward_trap(to_mdev(ibdev), port_num, in);
return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
}
@@ -229,40 +218,39 @@ int mthca_process_mad(struct ib_device *ibdev,
* Only handle PMA and Mellanox vendor-specific class gets and
* sets for other classes.
*/
- if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
- in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
- if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET &&
- in_mad->mad_hdr.method != IB_MGMT_METHOD_SET &&
- in_mad->mad_hdr.method != IB_MGMT_METHOD_TRAP_REPRESS)
+ if (in->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
+ in->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
+ if (in->mad_hdr.method != IB_MGMT_METHOD_GET &&
+ in->mad_hdr.method != IB_MGMT_METHOD_SET &&
+ in->mad_hdr.method != IB_MGMT_METHOD_TRAP_REPRESS)
return IB_MAD_RESULT_SUCCESS;
/*
* Don't process SMInfo queries or vendor-specific
* MADs -- the SMA can't handle them.
*/
- if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_SM_INFO ||
- ((in_mad->mad_hdr.attr_id & IB_SMP_ATTR_VENDOR_MASK) ==
+ if (in->mad_hdr.attr_id == IB_SMP_ATTR_SM_INFO ||
+ ((in->mad_hdr.attr_id & IB_SMP_ATTR_VENDOR_MASK) ==
IB_SMP_ATTR_VENDOR_MASK))
return IB_MAD_RESULT_SUCCESS;
- } else if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT ||
- in_mad->mad_hdr.mgmt_class == MTHCA_VENDOR_CLASS1 ||
- in_mad->mad_hdr.mgmt_class == MTHCA_VENDOR_CLASS2) {
- if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET &&
- in_mad->mad_hdr.method != IB_MGMT_METHOD_SET)
+ } else if (in->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT ||
+ in->mad_hdr.mgmt_class == MTHCA_VENDOR_CLASS1 ||
+ in->mad_hdr.mgmt_class == MTHCA_VENDOR_CLASS2) {
+ if (in->mad_hdr.method != IB_MGMT_METHOD_GET &&
+ in->mad_hdr.method != IB_MGMT_METHOD_SET)
return IB_MAD_RESULT_SUCCESS;
} else
return IB_MAD_RESULT_SUCCESS;
- if ((in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
- in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
- in_mad->mad_hdr.method == IB_MGMT_METHOD_SET &&
- in_mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO &&
+ if ((in->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
+ in->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
+ in->mad_hdr.method == IB_MGMT_METHOD_SET &&
+ in->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO &&
!ib_query_port(ibdev, port_num, &pattr))
prev_lid = ib_lid_cpu16(pattr.lid);
- err = mthca_MAD_IFC(to_mdev(ibdev),
- mad_flags & IB_MAD_IGNORE_MKEY,
- mad_flags & IB_MAD_IGNORE_BKEY,
- port_num, in_wc, in_grh, in_mad, out_mad);
+ err = mthca_MAD_IFC(to_mdev(ibdev), mad_flags & IB_MAD_IGNORE_MKEY,
+ mad_flags & IB_MAD_IGNORE_BKEY, port_num, in_wc,
+ in_grh, in, out);
if (err == -EBADMSG)
return IB_MAD_RESULT_SUCCESS;
else if (err) {
@@ -270,16 +258,16 @@ int mthca_process_mad(struct ib_device *ibdev,
return IB_MAD_RESULT_FAILURE;
}
- if (!out_mad->mad_hdr.status) {
- smp_snoop(ibdev, port_num, in_mad, prev_lid);
- node_desc_override(ibdev, out_mad);
+ if (!out->mad_hdr.status) {
+ smp_snoop(ibdev, port_num, in, prev_lid);
+ node_desc_override(ibdev, out);
}
/* set return bit in status of directed route responses */
- if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
- out_mad->mad_hdr.status |= cpu_to_be16(1 << 15);
+ if (in->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
+ out->mad_hdr.status |= cpu_to_be16(1 << 15);
- if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS)
+ if (in->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS)
/* no response for trap repress */
return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index 23554d8bf241..33002530fee7 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -880,9 +880,7 @@ static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
if (!mr)
return ERR_PTR(-ENOMEM);
- mr->umem = ib_umem_get(udata, start, length, acc,
- ucmd.mr_attrs & MTHCA_MR_DMASYNC);
-
+ mr->umem = ib_umem_get(udata, start, length, acc);
if (IS_ERR(mr->umem)) {
err = PTR_ERR(mr->umem);
goto err;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
index 8d3e36d548aa..2b7f00ac41b0 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
@@ -247,35 +247,20 @@ int ocrdma_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr)
return 0;
}
-int ocrdma_process_mad(struct ib_device *ibdev,
- int process_mad_flags,
- u8 port_num,
- const struct ib_wc *in_wc,
- const struct ib_grh *in_grh,
- const struct ib_mad_hdr *in, size_t in_mad_size,
- struct ib_mad_hdr *out, size_t *out_mad_size,
+int ocrdma_process_mad(struct ib_device *ibdev, int process_mad_flags,
+ u8 port_num, const struct ib_wc *in_wc,
+ const struct ib_grh *in_grh, const struct ib_mad *in,
+ struct ib_mad *out, size_t *out_mad_size,
u16 *out_mad_pkey_index)
{
- int status;
+ int status = IB_MAD_RESULT_SUCCESS;
struct ocrdma_dev *dev;
- const struct ib_mad *in_mad = (const struct ib_mad *)in;
- struct ib_mad *out_mad = (struct ib_mad *)out;
-
- if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
- *out_mad_size != sizeof(*out_mad)))
- return IB_MAD_RESULT_FAILURE;
- switch (in_mad->mad_hdr.mgmt_class) {
- case IB_MGMT_CLASS_PERF_MGMT:
+ if (in->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT) {
dev = get_ocrdma_dev(ibdev);
- if (!ocrdma_pma_counters(dev, out_mad))
- status = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
- else
- status = IB_MAD_RESULT_SUCCESS;
- break;
- default:
- status = IB_MAD_RESULT_SUCCESS;
- break;
+ ocrdma_pma_counters(dev, out);
+ status |= IB_MAD_RESULT_REPLY;
}
+
return status;
}
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.h b/drivers/infiniband/hw/ocrdma/ocrdma_ah.h
index 64cb82c08664..9780afcde780 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.h
@@ -56,12 +56,9 @@ int ocrdma_create_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr, u32 flags,
void ocrdma_destroy_ah(struct ib_ah *ah, u32 flags);
int ocrdma_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
-int ocrdma_process_mad(struct ib_device *,
- int process_mad_flags,
- u8 port_num,
- const struct ib_wc *in_wc,
- const struct ib_grh *in_grh,
- const struct ib_mad_hdr *in, size_t in_mad_size,
- struct ib_mad_hdr *out, size_t *out_mad_size,
+int ocrdma_process_mad(struct ib_device *dev, int process_mad_flags,
+ u8 port_num, const struct ib_wc *in_wc,
+ const struct ib_grh *in_grh, const struct ib_mad *in,
+ struct ib_mad *out, size_t *out_mad_size,
u16 *out_mad_pkey_index);
#endif /* __OCRDMA_AH_H__ */
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
index c15cfc6cef81..d8c47d24d6d6 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
@@ -166,7 +166,6 @@ static const struct ib_device_ops ocrdma_dev_ops = {
.get_port_immutable = ocrdma_port_immutable,
.map_mr_sg = ocrdma_map_mr_sg,
.mmap = ocrdma_mmap,
- .modify_port = ocrdma_modify_port,
.modify_qp = ocrdma_modify_qp,
.poll_cq = ocrdma_poll_cq,
.post_recv = ocrdma_post_recv,
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
index 6ef89c226ad8..c2e0d0fa44be 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
@@ -2034,7 +2034,7 @@ struct ocrdma_rx_stats {
};
struct ocrdma_rx_qp_err_stats {
- u32 nak_invalid_requst_errors;
+ u32 nak_invalid_request_errors;
u32 nak_remote_operation_errors;
u32 nak_count_remote_access_errors;
u32 local_length_errors;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
index a902942adb5d..5f831e3bdbad 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
@@ -423,8 +423,8 @@ static char *ocrdma_rxqp_errstats(struct ocrdma_dev *dev)
memset(stats, 0, (OCRDMA_MAX_DBGFS_MEM));
pcur = stats;
- pcur += ocrdma_add_stat(stats, pcur, "nak_invalid_requst_errors",
- (u64)rx_qp_err_stats->nak_invalid_requst_errors);
+ pcur += ocrdma_add_stat(stats, pcur, "nak_invalid_request_errors",
+ (u64)rx_qp_err_stats->nak_invalid_request_errors);
pcur += ocrdma_add_stat(stats, pcur, "nak_remote_operation_errors",
(u64)rx_qp_err_stats->nak_remote_operation_errors);
pcur += ocrdma_add_stat(stats, pcur, "nak_count_remote_access_errors",
@@ -670,12 +670,10 @@ err:
return -EFAULT;
}
-int ocrdma_pma_counters(struct ocrdma_dev *dev,
- struct ib_mad *out_mad)
+void ocrdma_pma_counters(struct ocrdma_dev *dev, struct ib_mad *out_mad)
{
struct ib_pma_portcounters *pma_cnt;
- memset(out_mad->data, 0, sizeof out_mad->data);
pma_cnt = (void *)(out_mad->data + 40);
ocrdma_update_stats(dev);
@@ -683,7 +681,6 @@ int ocrdma_pma_counters(struct ocrdma_dev *dev,
pma_cnt->port_rcv_data = cpu_to_be32(ocrdma_sysfs_rcv_data(dev));
pma_cnt->port_xmit_packets = cpu_to_be32(ocrdma_sysfs_xmit_pkts(dev));
pma_cnt->port_rcv_packets = cpu_to_be32(ocrdma_sysfs_rcv_pkts(dev));
- return 0;
}
static ssize_t ocrdma_dbgfs_ops_read(struct file *filp, char __user *buffer,
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_stats.h b/drivers/infiniband/hw/ocrdma/ocrdma_stats.h
index bba1fec4f11f..98feca26ac55 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_stats.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_stats.h
@@ -69,7 +69,6 @@ bool ocrdma_alloc_stats_resources(struct ocrdma_dev *dev);
void ocrdma_release_stats_resources(struct ocrdma_dev *dev);
void ocrdma_rem_port_stats(struct ocrdma_dev *dev);
void ocrdma_add_port_stats(struct ocrdma_dev *dev);
-int ocrdma_pma_counters(struct ocrdma_dev *dev,
- struct ib_mad *out_mad);
+void ocrdma_pma_counters(struct ocrdma_dev *dev, struct ib_mad *out_mad);
#endif /* __OCRDMA_STATS_H__ */
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index e8267e590772..9bc1ca6f6f9e 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -190,12 +190,6 @@ int ocrdma_query_port(struct ib_device *ibdev,
return 0;
}
-int ocrdma_modify_port(struct ib_device *ibdev, u8 port, int mask,
- struct ib_port_modify *props)
-{
- return 0;
-}
-
static int ocrdma_add_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr,
unsigned long len)
{
@@ -875,7 +869,7 @@ struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
mr = kzalloc(sizeof(*mr), GFP_KERNEL);
if (!mr)
return ERR_PTR(status);
- mr->umem = ib_umem_get(udata, start, len, acc, 0);
+ mr->umem = ib_umem_get(udata, start, len, acc);
if (IS_ERR(mr->umem)) {
status = -EFAULT;
goto umem_err;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
index 32488da1b752..3a5010881be5 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
@@ -54,8 +54,6 @@ int ocrdma_arm_cq(struct ib_cq *, enum ib_cq_notify_flags flags);
int ocrdma_query_device(struct ib_device *, struct ib_device_attr *props,
struct ib_udata *uhw);
int ocrdma_query_port(struct ib_device *, u8 port, struct ib_port_attr *props);
-int ocrdma_modify_port(struct ib_device *, u8 port, int mask,
- struct ib_port_modify *props);
enum rdma_protocol_type
ocrdma_query_protocol(struct ib_device *device, u8 port_num);
diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c
index dc71b6e16a07..dcdc85a1ab25 100644
--- a/drivers/infiniband/hw/qedr/main.c
+++ b/drivers/infiniband/hw/qedr/main.c
@@ -212,7 +212,7 @@ static const struct ib_device_ops qedr_dev_ops = {
.get_link_layer = qedr_link_layer,
.map_mr_sg = qedr_map_mr_sg,
.mmap = qedr_mmap,
- .modify_port = qedr_modify_port,
+ .mmap_free = qedr_mmap_free,
.modify_qp = qedr_modify_qp,
.modify_srq = qedr_modify_srq,
.poll_cq = qedr_poll_cq,
@@ -357,9 +357,10 @@ static int qedr_alloc_resources(struct qedr_dev *dev)
return -ENOMEM;
spin_lock_init(&dev->sgid_lock);
+ xa_init_flags(&dev->srqs, XA_FLAGS_LOCK_IRQ);
if (IS_IWARP(dev)) {
- xa_init_flags(&dev->qps, XA_FLAGS_LOCK_IRQ);
+ xa_init(&dev->qps);
dev->iwarp_wq = create_singlethread_workqueue("qedr_iwarpq");
}
diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h
index 0cfd849b13d6..5488dbd59d3c 100644
--- a/drivers/infiniband/hw/qedr/qedr.h
+++ b/drivers/infiniband/hw/qedr/qedr.h
@@ -40,6 +40,7 @@
#include <linux/qed/qed_rdma_if.h>
#include <linux/qed/qede_rdma.h>
#include <linux/qed/roce_common.h>
+#include <linux/completion.h>
#include "qedr_hsi_rdma.h"
#define QEDR_NODE_DESC "QLogic 579xx RoCE HCA"
@@ -230,14 +231,16 @@ struct qedr_ucontext {
struct qedr_dev *dev;
struct qedr_pd *pd;
void __iomem *dpi_addr;
+ struct rdma_user_mmap_entry *db_mmap_entry;
u64 dpi_phys_addr;
u32 dpi_size;
u16 dpi;
+ bool db_rec;
+};
- struct list_head mm_head;
-
- /* Lock to protect mm list */
- struct mutex mm_list_lock;
+union db_prod32 {
+ struct rdma_pwm_val16_data data;
+ u32 raw;
};
union db_prod64 {
@@ -265,6 +268,13 @@ struct qedr_userq {
struct qedr_pbl *pbl_tbl;
u64 buf_addr;
size_t buf_len;
+
+ /* doorbell recovery */
+ void __iomem *db_addr;
+ struct qedr_user_db_rec *db_rec_data;
+ struct rdma_user_mmap_entry *db_mmap_entry;
+ void __iomem *db_rec_db2_addr;
+ union db_prod32 db_rec_db2_data;
};
struct qedr_cq {
@@ -300,19 +310,6 @@ struct qedr_pd {
struct qedr_ucontext *uctx;
};
-struct qedr_mm {
- struct {
- u64 phy_addr;
- unsigned long len;
- } key;
- struct list_head entry;
-};
-
-union db_prod32 {
- struct rdma_pwm_val16_data data;
- u32 raw;
-};
-
struct qedr_qp_hwq_info {
/* WQE Elements */
struct qed_chain pbl;
@@ -377,10 +374,20 @@ enum qedr_qp_err_bitmap {
QEDR_QP_ERR_RQ_PBL_FULL = 32,
};
+enum qedr_qp_create_type {
+ QEDR_QP_CREATE_NONE,
+ QEDR_QP_CREATE_USER,
+ QEDR_QP_CREATE_KERNEL,
+};
+
+enum qedr_iwarp_cm_flags {
+ QEDR_IWARP_CM_WAIT_FOR_CONNECT = BIT(0),
+ QEDR_IWARP_CM_WAIT_FOR_DISCONNECT = BIT(1),
+};
+
struct qedr_qp {
struct ib_qp ibqp; /* must be first */
struct qedr_dev *dev;
- struct qedr_iw_ep *ep;
struct qedr_qp_hwq_info sq;
struct qedr_qp_hwq_info rq;
@@ -395,6 +402,7 @@ struct qedr_qp {
u32 id;
struct qedr_pd *pd;
enum ib_qp_type qp_type;
+ enum qedr_qp_create_type create_type;
struct qed_rdma_qp *qed_qp;
u32 qp_id;
u16 icid;
@@ -437,8 +445,11 @@ struct qedr_qp {
/* Relevant to qps created from user space only (applications) */
struct qedr_userq usq;
struct qedr_userq urq;
- atomic_t refcnt;
- bool destroyed;
+
+ /* synchronization objects used with iwarp ep */
+ struct kref refcnt;
+ struct completion iwarp_cm_comp;
+ unsigned long iwarp_cm_flags; /* enum iwarp_cm_flags */
};
struct qedr_ah {
@@ -476,6 +487,18 @@ struct qedr_mr {
u32 npages;
};
+struct qedr_user_mmap_entry {
+ struct rdma_user_mmap_entry rdma_entry;
+ struct qedr_dev *dev;
+ union {
+ u64 io_address;
+ void *address;
+ };
+ size_t length;
+ u16 dpi;
+ u8 mmap_flag;
+};
+
#define SET_FIELD2(value, name, flag) ((value) |= ((flag) << (name ## _SHIFT)))
#define QEDR_RESP_IMM (RDMA_CQE_RESPONDER_IMM_FLG_MASK << \
@@ -531,7 +554,7 @@ struct qedr_iw_ep {
struct iw_cm_id *cm_id;
struct qedr_qp *qp;
void *qed_context;
- u8 during_connect;
+ struct kref refcnt;
};
static inline
@@ -574,4 +597,11 @@ static inline struct qedr_srq *get_qedr_srq(struct ib_srq *ibsrq)
{
return container_of(ibsrq, struct qedr_srq, ibsrq);
}
+
+static inline struct qedr_user_mmap_entry *
+get_qedr_mmap_entry(struct rdma_user_mmap_entry *rdma_entry)
+{
+ return container_of(rdma_entry, struct qedr_user_mmap_entry,
+ rdma_entry);
+}
#endif
diff --git a/drivers/infiniband/hw/qedr/qedr_iw_cm.c b/drivers/infiniband/hw/qedr/qedr_iw_cm.c
index 22881d4442b9..792eecd206b6 100644
--- a/drivers/infiniband/hw/qedr/qedr_iw_cm.c
+++ b/drivers/infiniband/hw/qedr/qedr_iw_cm.c
@@ -79,6 +79,27 @@ qedr_fill_sockaddr6(const struct qed_iwarp_cm_info *cm_info,
}
}
+static void qedr_iw_free_qp(struct kref *ref)
+{
+ struct qedr_qp *qp = container_of(ref, struct qedr_qp, refcnt);
+
+ kfree(qp);
+}
+
+static void
+qedr_iw_free_ep(struct kref *ref)
+{
+ struct qedr_iw_ep *ep = container_of(ref, struct qedr_iw_ep, refcnt);
+
+ if (ep->qp)
+ kref_put(&ep->qp->refcnt, qedr_iw_free_qp);
+
+ if (ep->cm_id)
+ ep->cm_id->rem_ref(ep->cm_id);
+
+ kfree(ep);
+}
+
static void
qedr_iw_mpa_request(void *context, struct qed_iwarp_cm_event_params *params)
{
@@ -93,6 +114,7 @@ qedr_iw_mpa_request(void *context, struct qed_iwarp_cm_event_params *params)
ep->dev = dev;
ep->qed_context = params->ep_context;
+ kref_init(&ep->refcnt);
memset(&event, 0, sizeof(event));
event.event = IW_CM_EVENT_CONNECT_REQUEST;
@@ -141,12 +163,10 @@ qedr_iw_close_event(void *context, struct qed_iwarp_cm_event_params *params)
{
struct qedr_iw_ep *ep = (struct qedr_iw_ep *)context;
- if (ep->cm_id) {
+ if (ep->cm_id)
qedr_iw_issue_event(context, params, IW_CM_EVENT_CLOSE);
- ep->cm_id->rem_ref(ep->cm_id);
- ep->cm_id = NULL;
- }
+ kref_put(&ep->refcnt, qedr_iw_free_ep);
}
static void
@@ -186,11 +206,13 @@ static void qedr_iw_disconnect_worker(struct work_struct *work)
struct qedr_qp *qp = ep->qp;
struct iw_cm_event event;
- if (qp->destroyed) {
- kfree(dwork);
- qedr_iw_qp_rem_ref(&qp->ibqp);
- return;
- }
+ /* The qp won't be released until we release the ep.
+ * the ep's refcnt was increased before calling this
+ * function, therefore it is safe to access qp
+ */
+ if (test_and_set_bit(QEDR_IWARP_CM_WAIT_FOR_DISCONNECT,
+ &qp->iwarp_cm_flags))
+ goto out;
memset(&event, 0, sizeof(event));
event.status = dwork->status;
@@ -204,7 +226,6 @@ static void qedr_iw_disconnect_worker(struct work_struct *work)
else
qp_params.new_state = QED_ROCE_QP_STATE_SQD;
- kfree(dwork);
if (ep->cm_id)
ep->cm_id->event_handler(ep->cm_id, &event);
@@ -214,7 +235,10 @@ static void qedr_iw_disconnect_worker(struct work_struct *work)
dev->ops->rdma_modify_qp(dev->rdma_ctx, qp->qed_qp, &qp_params);
- qedr_iw_qp_rem_ref(&qp->ibqp);
+ complete(&ep->qp->iwarp_cm_comp);
+out:
+ kfree(dwork);
+ kref_put(&ep->refcnt, qedr_iw_free_ep);
}
static void
@@ -224,13 +248,17 @@ qedr_iw_disconnect_event(void *context,
struct qedr_discon_work *work;
struct qedr_iw_ep *ep = (struct qedr_iw_ep *)context;
struct qedr_dev *dev = ep->dev;
- struct qedr_qp *qp = ep->qp;
work = kzalloc(sizeof(*work), GFP_ATOMIC);
if (!work)
return;
- qedr_iw_qp_add_ref(&qp->ibqp);
+ /* We can't get a close event before disconnect, but since
+ * we're scheduling a work queue we need to make sure close
+ * won't delete the ep, so we increase the refcnt
+ */
+ kref_get(&ep->refcnt);
+
work->ep = ep;
work->event = params->event;
work->status = params->status;
@@ -252,16 +280,30 @@ qedr_iw_passive_complete(void *context,
if ((params->status == -ECONNREFUSED) && (!ep->qp)) {
DP_DEBUG(dev, QEDR_MSG_IWARP,
"PASSIVE connection refused releasing ep...\n");
- kfree(ep);
+ kref_put(&ep->refcnt, qedr_iw_free_ep);
return;
}
+ complete(&ep->qp->iwarp_cm_comp);
qedr_iw_issue_event(context, params, IW_CM_EVENT_ESTABLISHED);
if (params->status < 0)
qedr_iw_close_event(context, params);
}
+static void
+qedr_iw_active_complete(void *context,
+ struct qed_iwarp_cm_event_params *params)
+{
+ struct qedr_iw_ep *ep = (struct qedr_iw_ep *)context;
+
+ complete(&ep->qp->iwarp_cm_comp);
+ qedr_iw_issue_event(context, params, IW_CM_EVENT_CONNECT_REPLY);
+
+ if (params->status < 0)
+ kref_put(&ep->refcnt, qedr_iw_free_ep);
+}
+
static int
qedr_iw_mpa_reply(void *context, struct qed_iwarp_cm_event_params *params)
{
@@ -288,27 +330,15 @@ qedr_iw_event_handler(void *context, struct qed_iwarp_cm_event_params *params)
qedr_iw_mpa_reply(context, params);
break;
case QED_IWARP_EVENT_PASSIVE_COMPLETE:
- ep->during_connect = 0;
qedr_iw_passive_complete(context, params);
break;
-
case QED_IWARP_EVENT_ACTIVE_COMPLETE:
- ep->during_connect = 0;
- qedr_iw_issue_event(context,
- params,
- IW_CM_EVENT_CONNECT_REPLY);
- if (params->status < 0) {
- struct qedr_iw_ep *ep = (struct qedr_iw_ep *)context;
-
- ep->cm_id->rem_ref(ep->cm_id);
- ep->cm_id = NULL;
- }
+ qedr_iw_active_complete(context, params);
break;
case QED_IWARP_EVENT_DISCONNECT:
qedr_iw_disconnect_event(context, params);
break;
case QED_IWARP_EVENT_CLOSE:
- ep->during_connect = 0;
qedr_iw_close_event(context, params);
break;
case QED_IWARP_EVENT_RQ_EMPTY:
@@ -451,10 +481,10 @@ qedr_addr6_resolve(struct qedr_dev *dev,
if ((!dst) || dst->error) {
if (dst) {
- dst_release(dst);
DP_ERR(dev,
"ip6_route_output returned dst->error = %d\n",
dst->error);
+ dst_release(dst);
}
return -EINVAL;
}
@@ -476,6 +506,19 @@ qedr_addr6_resolve(struct qedr_dev *dev,
return rc;
}
+static struct qedr_qp *qedr_iw_load_qp(struct qedr_dev *dev, u32 qpn)
+{
+ struct qedr_qp *qp;
+
+ xa_lock(&dev->qps);
+ qp = xa_load(&dev->qps, qpn);
+ if (qp)
+ kref_get(&qp->refcnt);
+ xa_unlock(&dev->qps);
+
+ return qp;
+}
+
int qedr_iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
{
struct qedr_dev *dev = get_qedr_dev(cm_id->device);
@@ -491,10 +534,6 @@ int qedr_iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
int rc = 0;
int i;
- qp = xa_load(&dev->qps, conn_param->qpn);
- if (unlikely(!qp))
- return -EINVAL;
-
laddr = (struct sockaddr_in *)&cm_id->m_local_addr;
raddr = (struct sockaddr_in *)&cm_id->m_remote_addr;
laddr6 = (struct sockaddr_in6 *)&cm_id->m_local_addr;
@@ -516,8 +555,15 @@ int qedr_iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
return -ENOMEM;
ep->dev = dev;
+ kref_init(&ep->refcnt);
+
+ qp = qedr_iw_load_qp(dev, conn_param->qpn);
+ if (!qp) {
+ rc = -EINVAL;
+ goto err;
+ }
+
ep->qp = qp;
- qp->ep = ep;
cm_id->add_ref(cm_id);
ep->cm_id = cm_id;
@@ -580,16 +626,20 @@ int qedr_iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
in_params.qp = qp->qed_qp;
memcpy(in_params.local_mac_addr, dev->ndev->dev_addr, ETH_ALEN);
- ep->during_connect = 1;
+ if (test_and_set_bit(QEDR_IWARP_CM_WAIT_FOR_CONNECT,
+ &qp->iwarp_cm_flags))
+ goto err; /* QP already being destroyed */
+
rc = dev->ops->iwarp_connect(dev->rdma_ctx, &in_params, &out_params);
- if (rc)
+ if (rc) {
+ complete(&qp->iwarp_cm_comp);
goto err;
+ }
return rc;
err:
- cm_id->rem_ref(cm_id);
- kfree(ep);
+ kref_put(&ep->refcnt, qedr_iw_free_ep);
return rc;
}
@@ -677,18 +727,17 @@ int qedr_iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
struct qedr_dev *dev = ep->dev;
struct qedr_qp *qp;
struct qed_iwarp_accept_in params;
- int rc;
+ int rc = 0;
DP_DEBUG(dev, QEDR_MSG_IWARP, "Accept on qpid=%d\n", conn_param->qpn);
- qp = xa_load(&dev->qps, conn_param->qpn);
+ qp = qedr_iw_load_qp(dev, conn_param->qpn);
if (!qp) {
DP_ERR(dev, "Invalid QP number %d\n", conn_param->qpn);
return -EINVAL;
}
ep->qp = qp;
- qp->ep = ep;
cm_id->add_ref(cm_id);
ep->cm_id = cm_id;
@@ -700,15 +749,21 @@ int qedr_iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
params.ird = conn_param->ird;
params.ord = conn_param->ord;
- ep->during_connect = 1;
+ if (test_and_set_bit(QEDR_IWARP_CM_WAIT_FOR_CONNECT,
+ &qp->iwarp_cm_flags))
+ goto err; /* QP already destroyed */
+
rc = dev->ops->iwarp_accept(dev->rdma_ctx, &params);
- if (rc)
+ if (rc) {
+ complete(&qp->iwarp_cm_comp);
goto err;
+ }
return rc;
+
err:
- ep->during_connect = 0;
- cm_id->rem_ref(cm_id);
+ kref_put(&ep->refcnt, qedr_iw_free_ep);
+
return rc;
}
@@ -731,17 +786,14 @@ void qedr_iw_qp_add_ref(struct ib_qp *ibqp)
{
struct qedr_qp *qp = get_qedr_qp(ibqp);
- atomic_inc(&qp->refcnt);
+ kref_get(&qp->refcnt);
}
void qedr_iw_qp_rem_ref(struct ib_qp *ibqp)
{
struct qedr_qp *qp = get_qedr_qp(ibqp);
- if (atomic_dec_and_test(&qp->refcnt)) {
- xa_erase_irq(&qp->dev->qps, qp->qp_id);
- kfree(qp);
- }
+ kref_put(&qp->refcnt, qedr_iw_free_qp);
}
struct ib_qp *qedr_iw_get_qp(struct ib_device *ibdev, int qpn)
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index 6f3ce86019b7..4cd292966aa9 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -51,6 +51,7 @@
#include "verbs.h"
#include <rdma/qedr-abi.h>
#include "qedr_roce_cm.h"
+#include "qedr_iw_cm.h"
#define QEDR_SRQ_WQE_ELEM_SIZE sizeof(union rdma_srq_elm)
#define RDMA_MAX_SGE_PER_SRQ (4)
@@ -58,6 +59,11 @@
#define DB_ADDR_SHIFT(addr) ((addr) << DB_PWM_ADDR_OFFSET_SHIFT)
+enum {
+ QEDR_USER_MMAP_IO_WC = 0,
+ QEDR_USER_MMAP_PHYS_PAGE,
+};
+
static inline int qedr_ib_copy_to_udata(struct ib_udata *udata, void *src,
size_t len)
{
@@ -250,78 +256,31 @@ int qedr_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *attr)
return 0;
}
-int qedr_modify_port(struct ib_device *ibdev, u8 port, int mask,
- struct ib_port_modify *props)
-{
- return 0;
-}
-
-static int qedr_add_mmap(struct qedr_ucontext *uctx, u64 phy_addr,
- unsigned long len)
-{
- struct qedr_mm *mm;
-
- mm = kzalloc(sizeof(*mm), GFP_KERNEL);
- if (!mm)
- return -ENOMEM;
-
- mm->key.phy_addr = phy_addr;
- /* This function might be called with a length which is not a multiple
- * of PAGE_SIZE, while the mapping is PAGE_SIZE grained and the kernel
- * forces this granularity by increasing the requested size if needed.
- * When qedr_mmap is called, it will search the list with the updated
- * length as a key. To prevent search failures, the length is rounded up
- * in advance to PAGE_SIZE.
- */
- mm->key.len = roundup(len, PAGE_SIZE);
- INIT_LIST_HEAD(&mm->entry);
-
- mutex_lock(&uctx->mm_list_lock);
- list_add(&mm->entry, &uctx->mm_head);
- mutex_unlock(&uctx->mm_list_lock);
-
- DP_DEBUG(uctx->dev, QEDR_MSG_MISC,
- "added (addr=0x%llx,len=0x%lx) for ctx=%p\n",
- (unsigned long long)mm->key.phy_addr,
- (unsigned long)mm->key.len, uctx);
-
- return 0;
-}
-
-static bool qedr_search_mmap(struct qedr_ucontext *uctx, u64 phy_addr,
- unsigned long len)
-{
- bool found = false;
- struct qedr_mm *mm;
-
- mutex_lock(&uctx->mm_list_lock);
- list_for_each_entry(mm, &uctx->mm_head, entry) {
- if (len != mm->key.len || phy_addr != mm->key.phy_addr)
- continue;
-
- found = true;
- break;
- }
- mutex_unlock(&uctx->mm_list_lock);
- DP_DEBUG(uctx->dev, QEDR_MSG_MISC,
- "searched for (addr=0x%llx,len=0x%lx) for ctx=%p, result=%d\n",
- mm->key.phy_addr, mm->key.len, uctx, found);
-
- return found;
-}
-
int qedr_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata)
{
struct ib_device *ibdev = uctx->device;
int rc;
struct qedr_ucontext *ctx = get_qedr_ucontext(uctx);
struct qedr_alloc_ucontext_resp uresp = {};
+ struct qedr_alloc_ucontext_req ureq = {};
struct qedr_dev *dev = get_qedr_dev(ibdev);
struct qed_rdma_add_user_out_params oparams;
+ struct qedr_user_mmap_entry *entry;
if (!udata)
return -EFAULT;
+ if (udata->inlen) {
+ rc = ib_copy_from_udata(&ureq, udata,
+ min(sizeof(ureq), udata->inlen));
+ if (rc) {
+ DP_ERR(dev, "Problem copying data from user space\n");
+ return -EFAULT;
+ }
+
+ ctx->db_rec = !!(ureq.context_flags & QEDR_ALLOC_UCTX_DB_REC);
+ }
+
rc = dev->ops->rdma_add_user(dev->rdma_ctx, &oparams);
if (rc) {
DP_ERR(dev,
@@ -334,13 +293,29 @@ int qedr_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata)
ctx->dpi_addr = oparams.dpi_addr;
ctx->dpi_phys_addr = oparams.dpi_phys_addr;
ctx->dpi_size = oparams.dpi_size;
- INIT_LIST_HEAD(&ctx->mm_head);
- mutex_init(&ctx->mm_list_lock);
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry) {
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ entry->io_address = ctx->dpi_phys_addr;
+ entry->length = ctx->dpi_size;
+ entry->mmap_flag = QEDR_USER_MMAP_IO_WC;
+ entry->dpi = ctx->dpi;
+ entry->dev = dev;
+ rc = rdma_user_mmap_entry_insert(uctx, &entry->rdma_entry,
+ ctx->dpi_size);
+ if (rc) {
+ kfree(entry);
+ goto err;
+ }
+ ctx->db_mmap_entry = &entry->rdma_entry;
uresp.dpm_enabled = dev->user_dpm_enabled;
uresp.wids_enabled = 1;
uresp.wid_count = oparams.wid_count;
- uresp.db_pa = ctx->dpi_phys_addr;
+ uresp.db_pa = rdma_user_mmap_get_offset(ctx->db_mmap_entry);
uresp.db_size = ctx->dpi_size;
uresp.max_send_wr = dev->attr.max_sqe;
uresp.max_recv_wr = dev->attr.max_rqe;
@@ -352,82 +327,92 @@ int qedr_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata)
rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
if (rc)
- return rc;
+ goto err;
ctx->dev = dev;
- rc = qedr_add_mmap(ctx, ctx->dpi_phys_addr, ctx->dpi_size);
- if (rc)
- return rc;
-
DP_DEBUG(dev, QEDR_MSG_INIT, "Allocating user context %p\n",
&ctx->ibucontext);
return 0;
+
+err:
+ if (!ctx->db_mmap_entry)
+ dev->ops->rdma_remove_user(dev->rdma_ctx, ctx->dpi);
+ else
+ rdma_user_mmap_entry_remove(ctx->db_mmap_entry);
+
+ return rc;
}
void qedr_dealloc_ucontext(struct ib_ucontext *ibctx)
{
struct qedr_ucontext *uctx = get_qedr_ucontext(ibctx);
- struct qedr_mm *mm, *tmp;
DP_DEBUG(uctx->dev, QEDR_MSG_INIT, "Deallocating user context %p\n",
uctx);
- uctx->dev->ops->rdma_remove_user(uctx->dev->rdma_ctx, uctx->dpi);
- list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) {
- DP_DEBUG(uctx->dev, QEDR_MSG_MISC,
- "deleted (addr=0x%llx,len=0x%lx) for ctx=%p\n",
- mm->key.phy_addr, mm->key.len, uctx);
- list_del(&mm->entry);
- kfree(mm);
- }
+ rdma_user_mmap_entry_remove(uctx->db_mmap_entry);
}
-int qedr_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
+void qedr_mmap_free(struct rdma_user_mmap_entry *rdma_entry)
{
- struct qedr_ucontext *ucontext = get_qedr_ucontext(context);
- struct qedr_dev *dev = get_qedr_dev(context->device);
- unsigned long phys_addr = vma->vm_pgoff << PAGE_SHIFT;
- unsigned long len = (vma->vm_end - vma->vm_start);
- unsigned long dpi_start;
+ struct qedr_user_mmap_entry *entry = get_qedr_mmap_entry(rdma_entry);
+ struct qedr_dev *dev = entry->dev;
- dpi_start = dev->db_phys_addr + (ucontext->dpi * ucontext->dpi_size);
+ if (entry->mmap_flag == QEDR_USER_MMAP_PHYS_PAGE)
+ free_page((unsigned long)entry->address);
+ else if (entry->mmap_flag == QEDR_USER_MMAP_IO_WC)
+ dev->ops->rdma_remove_user(dev->rdma_ctx, entry->dpi);
- DP_DEBUG(dev, QEDR_MSG_INIT,
- "mmap invoked with vm_start=0x%pK, vm_end=0x%pK,vm_pgoff=0x%pK; dpi_start=0x%pK dpi_size=0x%x\n",
- (void *)vma->vm_start, (void *)vma->vm_end,
- (void *)vma->vm_pgoff, (void *)dpi_start, ucontext->dpi_size);
+ kfree(entry);
+}
- if ((vma->vm_start & (PAGE_SIZE - 1)) || (len & (PAGE_SIZE - 1))) {
- DP_ERR(dev,
- "failed mmap, addresses must be page aligned: start=0x%pK, end=0x%pK\n",
- (void *)vma->vm_start, (void *)vma->vm_end);
- return -EINVAL;
- }
+int qedr_mmap(struct ib_ucontext *ucontext, struct vm_area_struct *vma)
+{
+ struct ib_device *dev = ucontext->device;
+ size_t length = vma->vm_end - vma->vm_start;
+ struct rdma_user_mmap_entry *rdma_entry;
+ struct qedr_user_mmap_entry *entry;
+ int rc = 0;
+ u64 pfn;
- if (!qedr_search_mmap(ucontext, phys_addr, len)) {
- DP_ERR(dev, "failed mmap, vm_pgoff=0x%lx is not authorized\n",
- vma->vm_pgoff);
- return -EINVAL;
- }
+ ibdev_dbg(dev,
+ "start %#lx, end %#lx, length = %#zx, pgoff = %#lx\n",
+ vma->vm_start, vma->vm_end, length, vma->vm_pgoff);
- if (phys_addr < dpi_start ||
- ((phys_addr + len) > (dpi_start + ucontext->dpi_size))) {
- DP_ERR(dev,
- "failed mmap, pages are outside of dpi; page address=0x%pK, dpi_start=0x%pK, dpi_size=0x%x\n",
- (void *)phys_addr, (void *)dpi_start,
- ucontext->dpi_size);
+ rdma_entry = rdma_user_mmap_entry_get(ucontext, vma);
+ if (!rdma_entry) {
+ ibdev_dbg(dev, "pgoff[%#lx] does not have valid entry\n",
+ vma->vm_pgoff);
return -EINVAL;
}
-
- if (vma->vm_flags & VM_READ) {
- DP_ERR(dev, "failed mmap, cannot map doorbell bar for read\n");
- return -EINVAL;
+ entry = get_qedr_mmap_entry(rdma_entry);
+ ibdev_dbg(dev,
+ "Mapping address[%#llx], length[%#zx], mmap_flag[%d]\n",
+ entry->io_address, length, entry->mmap_flag);
+
+ switch (entry->mmap_flag) {
+ case QEDR_USER_MMAP_IO_WC:
+ pfn = entry->io_address >> PAGE_SHIFT;
+ rc = rdma_user_mmap_io(ucontext, vma, pfn, length,
+ pgprot_writecombine(vma->vm_page_prot),
+ rdma_entry);
+ break;
+ case QEDR_USER_MMAP_PHYS_PAGE:
+ rc = vm_insert_page(vma, vma->vm_start,
+ virt_to_page(entry->address));
+ break;
+ default:
+ rc = -EINVAL;
}
- vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
- return io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, len,
- vma->vm_page_prot);
+ if (rc)
+ ibdev_dbg(dev,
+ "Couldn't mmap address[%#llx] length[%#zx] mmap_flag[%d] err[%d]\n",
+ entry->io_address, length, entry->mmap_flag, rc);
+
+ rdma_user_mmap_entry_put(rdma_entry);
+ return rc;
}
int qedr_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
@@ -657,16 +642,50 @@ static void qedr_populate_pbls(struct qedr_dev *dev, struct ib_umem *umem,
}
}
+static int qedr_db_recovery_add(struct qedr_dev *dev,
+ void __iomem *db_addr,
+ void *db_data,
+ enum qed_db_rec_width db_width,
+ enum qed_db_rec_space db_space)
+{
+ if (!db_data) {
+ DP_DEBUG(dev, QEDR_MSG_INIT, "avoiding db rec since old lib\n");
+ return 0;
+ }
+
+ return dev->ops->common->db_recovery_add(dev->cdev, db_addr, db_data,
+ db_width, db_space);
+}
+
+static void qedr_db_recovery_del(struct qedr_dev *dev,
+ void __iomem *db_addr,
+ void *db_data)
+{
+ if (!db_data) {
+ DP_DEBUG(dev, QEDR_MSG_INIT, "avoiding db rec since old lib\n");
+ return;
+ }
+
+ /* Ignore return code as there is not much we can do about it. Error
+ * log will be printed inside.
+ */
+ dev->ops->common->db_recovery_del(dev->cdev, db_addr, db_data);
+}
+
static int qedr_copy_cq_uresp(struct qedr_dev *dev,
- struct qedr_cq *cq, struct ib_udata *udata)
+ struct qedr_cq *cq, struct ib_udata *udata,
+ u32 db_offset)
{
struct qedr_create_cq_uresp uresp;
int rc;
memset(&uresp, 0, sizeof(uresp));
- uresp.db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT);
+ uresp.db_offset = db_offset;
uresp.icid = cq->icid;
+ if (cq->q.db_mmap_entry)
+ uresp.db_rec_addr =
+ rdma_user_mmap_get_offset(cq->q.db_mmap_entry);
rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
if (rc)
@@ -694,10 +713,58 @@ static inline int qedr_align_cq_entries(int entries)
return aligned_size / QEDR_CQE_SIZE;
}
+static int qedr_init_user_db_rec(struct ib_udata *udata,
+ struct qedr_dev *dev, struct qedr_userq *q,
+ bool requires_db_rec)
+{
+ struct qedr_ucontext *uctx =
+ rdma_udata_to_drv_context(udata, struct qedr_ucontext,
+ ibucontext);
+ struct qedr_user_mmap_entry *entry;
+ int rc;
+
+ /* Aborting for non doorbell userqueue (SRQ) or non-supporting lib */
+ if (requires_db_rec == 0 || !uctx->db_rec)
+ return 0;
+
+ /* Allocate a page for doorbell recovery, add to mmap */
+ q->db_rec_data = (void *)get_zeroed_page(GFP_USER);
+ if (!q->db_rec_data) {
+ DP_ERR(dev, "get_zeroed_page failed\n");
+ return -ENOMEM;
+ }
+
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ goto err_free_db_data;
+
+ entry->address = q->db_rec_data;
+ entry->length = PAGE_SIZE;
+ entry->mmap_flag = QEDR_USER_MMAP_PHYS_PAGE;
+ rc = rdma_user_mmap_entry_insert(&uctx->ibucontext,
+ &entry->rdma_entry,
+ PAGE_SIZE);
+ if (rc)
+ goto err_free_entry;
+
+ q->db_mmap_entry = &entry->rdma_entry;
+
+ return 0;
+
+err_free_entry:
+ kfree(entry);
+
+err_free_db_data:
+ free_page((unsigned long)q->db_rec_data);
+ q->db_rec_data = NULL;
+ return -ENOMEM;
+}
+
static inline int qedr_init_user_queue(struct ib_udata *udata,
struct qedr_dev *dev,
struct qedr_userq *q, u64 buf_addr,
- size_t buf_len, int access, int dmasync,
+ size_t buf_len, bool requires_db_rec,
+ int access,
int alloc_and_init)
{
u32 fw_pages;
@@ -705,7 +772,7 @@ static inline int qedr_init_user_queue(struct ib_udata *udata,
q->buf_addr = buf_addr;
q->buf_len = buf_len;
- q->umem = ib_umem_get(udata, q->buf_addr, q->buf_len, access, dmasync);
+ q->umem = ib_umem_get(udata, q->buf_addr, q->buf_len, access);
if (IS_ERR(q->umem)) {
DP_ERR(dev, "create user queue: failed ib_umem_get, got %ld\n",
PTR_ERR(q->umem));
@@ -735,7 +802,8 @@ static inline int qedr_init_user_queue(struct ib_udata *udata,
}
}
- return 0;
+ /* mmap the user address used to store doorbell data for recovery */
+ return qedr_init_user_db_rec(udata, dev, q, requires_db_rec);
err0:
ib_umem_release(q->umem);
@@ -821,6 +889,7 @@ int qedr_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
int entries = attr->cqe;
struct qedr_cq *cq = get_qedr_cq(ibcq);
int chain_entries;
+ u32 db_offset;
int page_cnt;
u64 pbl_ptr;
u16 icid;
@@ -840,8 +909,12 @@ int qedr_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
chain_entries = qedr_align_cq_entries(entries);
chain_entries = min_t(int, chain_entries, QEDR_MAX_CQES);
+ /* calc db offset. user will add DPI base, kernel will add db addr */
+ db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT);
+
if (udata) {
- if (ib_copy_from_udata(&ureq, udata, sizeof(ureq))) {
+ if (ib_copy_from_udata(&ureq, udata, min(sizeof(ureq),
+ udata->inlen))) {
DP_ERR(dev,
"create cq: problem copying data from user space\n");
goto err0;
@@ -856,7 +929,7 @@ int qedr_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
cq->cq_type = QEDR_CQ_TYPE_USER;
rc = qedr_init_user_queue(udata, dev, &cq->q, ureq.addr,
- ureq.len, IB_ACCESS_LOCAL_WRITE, 1,
+ ureq.len, true, IB_ACCESS_LOCAL_WRITE,
1);
if (rc)
goto err0;
@@ -865,6 +938,7 @@ int qedr_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
page_cnt = cq->q.pbl_info.num_pbes;
cq->ibcq.cqe = chain_entries;
+ cq->q.db_addr = ctx->dpi_addr + db_offset;
} else {
cq->cq_type = QEDR_CQ_TYPE_KERNEL;
@@ -876,7 +950,7 @@ int qedr_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
sizeof(union rdma_cqe),
&cq->pbl, NULL);
if (rc)
- goto err1;
+ goto err0;
page_cnt = qed_chain_get_page_cnt(&cq->pbl);
pbl_ptr = qed_chain_get_pbl_phys(&cq->pbl);
@@ -888,21 +962,28 @@ int qedr_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
rc = dev->ops->rdma_create_cq(dev->rdma_ctx, &params, &icid);
if (rc)
- goto err2;
+ goto err1;
cq->icid = icid;
cq->sig = QEDR_CQ_MAGIC_NUMBER;
spin_lock_init(&cq->cq_lock);
if (udata) {
- rc = qedr_copy_cq_uresp(dev, cq, udata);
+ rc = qedr_copy_cq_uresp(dev, cq, udata, db_offset);
+ if (rc)
+ goto err2;
+
+ rc = qedr_db_recovery_add(dev, cq->q.db_addr,
+ &cq->q.db_rec_data->db_data,
+ DB_REC_WIDTH_64B,
+ DB_REC_USER);
if (rc)
- goto err3;
+ goto err2;
+
} else {
/* Generate doorbell address. */
- cq->db_addr = dev->db_addr +
- DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT);
cq->db.data.icid = cq->icid;
+ cq->db_addr = dev->db_addr + db_offset;
cq->db.data.params = DB_AGG_CMD_SET <<
RDMA_PWM_VAL32_DATA_AGG_CMD_SHIFT;
@@ -912,6 +993,11 @@ int qedr_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
cq->latest_cqe = NULL;
consume_cqe(cq);
cq->cq_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
+
+ rc = qedr_db_recovery_add(dev, cq->db_addr, &cq->db.data,
+ DB_REC_WIDTH_64B, DB_REC_KERNEL);
+ if (rc)
+ goto err2;
}
DP_DEBUG(dev, QEDR_MSG_CQ,
@@ -920,18 +1006,19 @@ int qedr_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
return 0;
-err3:
+err2:
destroy_iparams.icid = cq->icid;
dev->ops->rdma_destroy_cq(dev->rdma_ctx, &destroy_iparams,
&destroy_oparams);
-err2:
- if (udata)
- qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
- else
- dev->ops->common->chain_free(dev->cdev, &cq->pbl);
err1:
- if (udata)
+ if (udata) {
+ qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
ib_umem_release(cq->q.umem);
+ if (cq->q.db_mmap_entry)
+ rdma_user_mmap_entry_remove(cq->q.db_mmap_entry);
+ } else {
+ dev->ops->common->chain_free(dev->cdev, &cq->pbl);
+ }
err0:
return -EINVAL;
}
@@ -962,8 +1049,10 @@ void qedr_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
cq->destroyed = 1;
/* GSIs CQs are handled by driver, so they don't exist in the FW */
- if (cq->cq_type == QEDR_CQ_TYPE_GSI)
+ if (cq->cq_type == QEDR_CQ_TYPE_GSI) {
+ qedr_db_recovery_del(dev, cq->db_addr, &cq->db.data);
return;
+ }
iparams.icid = cq->icid;
dev->ops->rdma_destroy_cq(dev->rdma_ctx, &iparams, &oparams);
@@ -972,6 +1061,14 @@ void qedr_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
if (udata) {
qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
ib_umem_release(cq->q.umem);
+
+ if (cq->q.db_rec_data) {
+ qedr_db_recovery_del(dev, cq->q.db_addr,
+ &cq->q.db_rec_data->db_data);
+ rdma_user_mmap_entry_remove(cq->q.db_mmap_entry);
+ }
+ } else {
+ qedr_db_recovery_del(dev, cq->db_addr, &cq->db.data);
}
/* We don't want the IRQ handler to handle a non-existing CQ so we
@@ -1136,8 +1233,8 @@ static int qedr_copy_srq_uresp(struct qedr_dev *dev,
}
static void qedr_copy_rq_uresp(struct qedr_dev *dev,
- struct qedr_create_qp_uresp *uresp,
- struct qedr_qp *qp)
+ struct qedr_create_qp_uresp *uresp,
+ struct qedr_qp *qp)
{
/* iWARP requires two doorbells per RQ. */
if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
@@ -1150,6 +1247,9 @@ static void qedr_copy_rq_uresp(struct qedr_dev *dev,
}
uresp->rq_icid = qp->icid;
+ if (qp->urq.db_mmap_entry)
+ uresp->rq_db_rec_addr =
+ rdma_user_mmap_get_offset(qp->urq.db_mmap_entry);
}
static void qedr_copy_sq_uresp(struct qedr_dev *dev,
@@ -1163,22 +1263,26 @@ static void qedr_copy_sq_uresp(struct qedr_dev *dev,
uresp->sq_icid = qp->icid;
else
uresp->sq_icid = qp->icid + 1;
+
+ if (qp->usq.db_mmap_entry)
+ uresp->sq_db_rec_addr =
+ rdma_user_mmap_get_offset(qp->usq.db_mmap_entry);
}
static int qedr_copy_qp_uresp(struct qedr_dev *dev,
- struct qedr_qp *qp, struct ib_udata *udata)
+ struct qedr_qp *qp, struct ib_udata *udata,
+ struct qedr_create_qp_uresp *uresp)
{
- struct qedr_create_qp_uresp uresp;
int rc;
- memset(&uresp, 0, sizeof(uresp));
- qedr_copy_sq_uresp(dev, &uresp, qp);
- qedr_copy_rq_uresp(dev, &uresp, qp);
+ memset(uresp, 0, sizeof(*uresp));
+ qedr_copy_sq_uresp(dev, uresp, qp);
+ qedr_copy_rq_uresp(dev, uresp, qp);
- uresp.atomic_supported = dev->atomic_cap != IB_ATOMIC_NONE;
- uresp.qp_id = qp->qp_id;
+ uresp->atomic_supported = dev->atomic_cap != IB_ATOMIC_NONE;
+ uresp->qp_id = qp->qp_id;
- rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
+ rc = qedr_ib_copy_to_udata(udata, uresp, sizeof(*uresp));
if (rc)
DP_ERR(dev,
"create qp: failed a copy to user space with qp icid=0x%x.\n",
@@ -1193,7 +1297,10 @@ static void qedr_set_common_qp_params(struct qedr_dev *dev,
struct ib_qp_init_attr *attrs)
{
spin_lock_init(&qp->q_lock);
- atomic_set(&qp->refcnt, 1);
+ if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
+ kref_init(&qp->refcnt);
+ init_completion(&qp->iwarp_cm_comp);
+ }
qp->pd = pd;
qp->qp_type = attrs->qp_type;
qp->max_inline_data = attrs->cap.max_inline_data;
@@ -1222,16 +1329,35 @@ static void qedr_set_common_qp_params(struct qedr_dev *dev,
qp->sq.max_sges, qp->sq_cq->icid);
}
-static void qedr_set_roce_db_info(struct qedr_dev *dev, struct qedr_qp *qp)
+static int qedr_set_roce_db_info(struct qedr_dev *dev, struct qedr_qp *qp)
{
+ int rc;
+
qp->sq.db = dev->db_addr +
DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
qp->sq.db_data.data.icid = qp->icid + 1;
+ rc = qedr_db_recovery_add(dev, qp->sq.db,
+ &qp->sq.db_data,
+ DB_REC_WIDTH_32B,
+ DB_REC_KERNEL);
+ if (rc)
+ return rc;
+
if (!qp->srq) {
qp->rq.db = dev->db_addr +
DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
qp->rq.db_data.data.icid = qp->icid;
+
+ rc = qedr_db_recovery_add(dev, qp->rq.db,
+ &qp->rq.db_data,
+ DB_REC_WIDTH_32B,
+ DB_REC_KERNEL);
+ if (rc)
+ qedr_db_recovery_del(dev, qp->sq.db,
+ &qp->sq.db_data);
}
+
+ return rc;
}
static int qedr_check_srq_params(struct qedr_dev *dev,
@@ -1279,19 +1405,19 @@ static void qedr_free_srq_kernel_params(struct qedr_srq *srq)
static int qedr_init_srq_user_params(struct ib_udata *udata,
struct qedr_srq *srq,
struct qedr_create_srq_ureq *ureq,
- int access, int dmasync)
+ int access)
{
struct scatterlist *sg;
int rc;
rc = qedr_init_user_queue(udata, srq->dev, &srq->usrq, ureq->srq_addr,
- ureq->srq_len, access, dmasync, 1);
+ ureq->srq_len, false, access, 1);
if (rc)
return rc;
srq->prod_umem =
ib_umem_get(udata, ureq->prod_pair_addr,
- sizeof(struct rdma_srq_producers), access, dmasync);
+ sizeof(struct rdma_srq_producers), access);
if (IS_ERR(srq->prod_umem)) {
qedr_free_pbl(srq->dev, &srq->usrq.pbl_info, srq->usrq.pbl_tbl);
ib_umem_release(srq->usrq.umem);
@@ -1381,13 +1507,14 @@ int qedr_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init_attr,
hw_srq->max_sges = init_attr->attr.max_sge;
if (udata) {
- if (ib_copy_from_udata(&ureq, udata, sizeof(ureq))) {
+ if (ib_copy_from_udata(&ureq, udata, min(sizeof(ureq),
+ udata->inlen))) {
DP_ERR(dev,
"create srq: problem copying data from user space\n");
goto err0;
}
- rc = qedr_init_srq_user_params(udata, srq, &ureq, 0, 0);
+ rc = qedr_init_srq_user_params(udata, srq, &ureq, 0);
if (rc)
goto err0;
@@ -1570,13 +1697,39 @@ qedr_iwarp_populate_user_qp(struct qedr_dev *dev,
&qp->urq.pbl_info, FW_PAGE_SHIFT);
}
-static void qedr_cleanup_user(struct qedr_dev *dev, struct qedr_qp *qp)
+static void qedr_cleanup_user(struct qedr_dev *dev,
+ struct qedr_ucontext *ctx,
+ struct qedr_qp *qp)
{
ib_umem_release(qp->usq.umem);
qp->usq.umem = NULL;
ib_umem_release(qp->urq.umem);
qp->urq.umem = NULL;
+
+ if (rdma_protocol_roce(&dev->ibdev, 1)) {
+ qedr_free_pbl(dev, &qp->usq.pbl_info, qp->usq.pbl_tbl);
+ qedr_free_pbl(dev, &qp->urq.pbl_info, qp->urq.pbl_tbl);
+ } else {
+ kfree(qp->usq.pbl_tbl);
+ kfree(qp->urq.pbl_tbl);
+ }
+
+ if (qp->usq.db_rec_data) {
+ qedr_db_recovery_del(dev, qp->usq.db_addr,
+ &qp->usq.db_rec_data->db_data);
+ rdma_user_mmap_entry_remove(qp->usq.db_mmap_entry);
+ }
+
+ if (qp->urq.db_rec_data) {
+ qedr_db_recovery_del(dev, qp->urq.db_addr,
+ &qp->urq.db_rec_data->db_data);
+ rdma_user_mmap_entry_remove(qp->urq.db_mmap_entry);
+ }
+
+ if (rdma_protocol_iwarp(&dev->ibdev, 1))
+ qedr_db_recovery_del(dev, qp->urq.db_rec_db2_addr,
+ &qp->urq.db_rec_db2_data);
}
static int qedr_create_user_qp(struct qedr_dev *dev,
@@ -1588,27 +1741,30 @@ static int qedr_create_user_qp(struct qedr_dev *dev,
struct qed_rdma_create_qp_in_params in_params;
struct qed_rdma_create_qp_out_params out_params;
struct qedr_pd *pd = get_qedr_pd(ibpd);
+ struct qedr_create_qp_uresp uresp;
+ struct qedr_ucontext *ctx = NULL;
struct qedr_create_qp_ureq ureq;
int alloc_and_init = rdma_protocol_roce(&dev->ibdev, 1);
int rc = -EINVAL;
+ qp->create_type = QEDR_QP_CREATE_USER;
memset(&ureq, 0, sizeof(ureq));
- rc = ib_copy_from_udata(&ureq, udata, sizeof(ureq));
+ rc = ib_copy_from_udata(&ureq, udata, min(sizeof(ureq), udata->inlen));
if (rc) {
DP_ERR(dev, "Problem copying data from user space\n");
return rc;
}
- /* SQ - read access only (0), dma sync not required (0) */
+ /* SQ - read access only (0) */
rc = qedr_init_user_queue(udata, dev, &qp->usq, ureq.sq_addr,
- ureq.sq_len, 0, 0, alloc_and_init);
+ ureq.sq_len, true, 0, alloc_and_init);
if (rc)
return rc;
if (!qp->srq) {
- /* RQ - read access only (0), dma sync not required (0) */
+ /* RQ - read access only (0) */
rc = qedr_init_user_queue(udata, dev, &qp->urq, ureq.rq_addr,
- ureq.rq_len, 0, 0, alloc_and_init);
+ ureq.rq_len, true, 0, alloc_and_init);
if (rc)
return rc;
}
@@ -1638,29 +1794,76 @@ static int qedr_create_user_qp(struct qedr_dev *dev,
qp->qp_id = out_params.qp_id;
qp->icid = out_params.icid;
- rc = qedr_copy_qp_uresp(dev, qp, udata);
+ rc = qedr_copy_qp_uresp(dev, qp, udata, &uresp);
+ if (rc)
+ goto err;
+
+ /* db offset was calculated in copy_qp_uresp, now set in the user q */
+ ctx = pd->uctx;
+ qp->usq.db_addr = ctx->dpi_addr + uresp.sq_db_offset;
+ qp->urq.db_addr = ctx->dpi_addr + uresp.rq_db_offset;
+
+ if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
+ qp->urq.db_rec_db2_addr = ctx->dpi_addr + uresp.rq_db2_offset;
+
+ /* calculate the db_rec_db2 data since it is constant so no
+ * need to reflect from user
+ */
+ qp->urq.db_rec_db2_data.data.icid = cpu_to_le16(qp->icid);
+ qp->urq.db_rec_db2_data.data.value =
+ cpu_to_le16(DQ_TCM_IWARP_POST_RQ_CF_CMD);
+ }
+
+ rc = qedr_db_recovery_add(dev, qp->usq.db_addr,
+ &qp->usq.db_rec_data->db_data,
+ DB_REC_WIDTH_32B,
+ DB_REC_USER);
if (rc)
goto err;
+ rc = qedr_db_recovery_add(dev, qp->urq.db_addr,
+ &qp->urq.db_rec_data->db_data,
+ DB_REC_WIDTH_32B,
+ DB_REC_USER);
+ if (rc)
+ goto err;
+
+ if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
+ rc = qedr_db_recovery_add(dev, qp->urq.db_rec_db2_addr,
+ &qp->urq.db_rec_db2_data,
+ DB_REC_WIDTH_32B,
+ DB_REC_USER);
+ if (rc)
+ goto err;
+ }
qedr_qp_user_print(dev, qp);
- return 0;
+ return rc;
err:
rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
if (rc)
DP_ERR(dev, "create qp: fatal fault. rc=%d", rc);
err1:
- qedr_cleanup_user(dev, qp);
+ qedr_cleanup_user(dev, ctx, qp);
return rc;
}
-static void qedr_set_iwarp_db_info(struct qedr_dev *dev, struct qedr_qp *qp)
+static int qedr_set_iwarp_db_info(struct qedr_dev *dev, struct qedr_qp *qp)
{
+ int rc;
+
qp->sq.db = dev->db_addr +
DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
qp->sq.db_data.data.icid = qp->icid;
+ rc = qedr_db_recovery_add(dev, qp->sq.db,
+ &qp->sq.db_data,
+ DB_REC_WIDTH_32B,
+ DB_REC_KERNEL);
+ if (rc)
+ return rc;
+
qp->rq.db = dev->db_addr +
DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_IWARP_RQ_PROD);
qp->rq.db_data.data.icid = qp->icid;
@@ -1668,6 +1871,19 @@ static void qedr_set_iwarp_db_info(struct qedr_dev *dev, struct qedr_qp *qp)
DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_FLAGS);
qp->rq.iwarp_db2_data.data.icid = qp->icid;
qp->rq.iwarp_db2_data.data.value = DQ_TCM_IWARP_POST_RQ_CF_CMD;
+
+ rc = qedr_db_recovery_add(dev, qp->rq.db,
+ &qp->rq.db_data,
+ DB_REC_WIDTH_32B,
+ DB_REC_KERNEL);
+ if (rc)
+ return rc;
+
+ rc = qedr_db_recovery_add(dev, qp->rq.iwarp_db2,
+ &qp->rq.iwarp_db2_data,
+ DB_REC_WIDTH_32B,
+ DB_REC_KERNEL);
+ return rc;
}
static int
@@ -1715,8 +1931,7 @@ qedr_roce_create_kernel_qp(struct qedr_dev *dev,
qp->qp_id = out_params.qp_id;
qp->icid = out_params.icid;
- qedr_set_roce_db_info(dev, qp);
- return rc;
+ return qedr_set_roce_db_info(dev, qp);
}
static int
@@ -1774,8 +1989,7 @@ qedr_iwarp_create_kernel_qp(struct qedr_dev *dev,
qp->qp_id = out_params.qp_id;
qp->icid = out_params.icid;
- qedr_set_iwarp_db_info(dev, qp);
- return rc;
+ return qedr_set_iwarp_db_info(dev, qp);
err:
dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
@@ -1790,6 +2004,20 @@ static void qedr_cleanup_kernel(struct qedr_dev *dev, struct qedr_qp *qp)
dev->ops->common->chain_free(dev->cdev, &qp->rq.pbl);
kfree(qp->rqe_wr_id);
+
+ /* GSI qp is not registered to db mechanism so no need to delete */
+ if (qp->qp_type == IB_QPT_GSI)
+ return;
+
+ qedr_db_recovery_del(dev, qp->sq.db, &qp->sq.db_data);
+
+ if (!qp->srq) {
+ qedr_db_recovery_del(dev, qp->rq.db, &qp->rq.db_data);
+
+ if (rdma_protocol_iwarp(&dev->ibdev, 1))
+ qedr_db_recovery_del(dev, qp->rq.iwarp_db2,
+ &qp->rq.iwarp_db2_data);
+ }
}
static int qedr_create_kernel_qp(struct qedr_dev *dev,
@@ -1805,6 +2033,7 @@ static int qedr_create_kernel_qp(struct qedr_dev *dev,
u32 n_sq_entries;
memset(&in_params, 0, sizeof(in_params));
+ qp->create_type = QEDR_QP_CREATE_KERNEL;
/* A single work request may take up to QEDR_MAX_SQ_WQE_SIZE elements in
* the ring. The ring should allow at least a single WR, even if the
@@ -1918,7 +2147,7 @@ struct ib_qp *qedr_create_qp(struct ib_pd *ibpd,
qp->ibqp.qp_num = qp->qp_id;
if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
- rc = xa_insert_irq(&dev->qps, qp->qp_id, qp, GFP_KERNEL);
+ rc = xa_insert(&dev->qps, qp->qp_id, qp, GFP_KERNEL);
if (rc)
goto err;
}
@@ -2429,7 +2658,10 @@ err:
static int qedr_free_qp_resources(struct qedr_dev *dev, struct qedr_qp *qp,
struct ib_udata *udata)
{
- int rc = 0;
+ struct qedr_ucontext *ctx =
+ rdma_udata_to_drv_context(udata, struct qedr_ucontext,
+ ibucontext);
+ int rc;
if (qp->qp_type != IB_QPT_GSI) {
rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
@@ -2437,8 +2669,8 @@ static int qedr_free_qp_resources(struct qedr_dev *dev, struct qedr_qp *qp,
return rc;
}
- if (udata)
- qedr_cleanup_user(dev, qp);
+ if (qp->create_type == QEDR_QP_CREATE_USER)
+ qedr_cleanup_user(dev, ctx, qp);
else
qedr_cleanup_kernel(dev, qp);
@@ -2467,34 +2699,44 @@ int qedr_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
qedr_modify_qp(ibqp, &attr, attr_mask, NULL);
}
} else {
- /* Wait for the connect/accept to complete */
- if (qp->ep) {
- int wait_count = 1;
-
- while (qp->ep->during_connect) {
- DP_DEBUG(dev, QEDR_MSG_QP,
- "Still in during connect/accept\n");
-
- msleep(100);
- if (wait_count++ > 200) {
- DP_NOTICE(dev,
- "during connect timeout\n");
- break;
- }
- }
- }
+ /* If connection establishment started the WAIT_FOR_CONNECT
+ * bit will be on and we need to Wait for the establishment
+ * to complete before destroying the qp.
+ */
+ if (test_and_set_bit(QEDR_IWARP_CM_WAIT_FOR_CONNECT,
+ &qp->iwarp_cm_flags))
+ wait_for_completion(&qp->iwarp_cm_comp);
+
+ /* If graceful disconnect started, the WAIT_FOR_DISCONNECT
+ * bit will be on, and we need to wait for the disconnect to
+ * complete before continuing. We can use the same completion,
+ * iwarp_cm_comp, since this is the only place that waits for
+ * this completion and it is sequential. In addition,
+ * disconnect can't occur before the connection is fully
+ * established, therefore if WAIT_FOR_DISCONNECT is on it
+ * means WAIT_FOR_CONNECT is also on and the completion for
+ * CONNECT already occurred.
+ */
+ if (test_and_set_bit(QEDR_IWARP_CM_WAIT_FOR_DISCONNECT,
+ &qp->iwarp_cm_flags))
+ wait_for_completion(&qp->iwarp_cm_comp);
}
if (qp->qp_type == IB_QPT_GSI)
qedr_destroy_gsi_qp(dev);
+ /* We need to remove the entry from the xarray before we release the
+ * qp_id to avoid a race of the qp_id being reallocated and failing
+ * on xa_insert
+ */
+ if (rdma_protocol_iwarp(&dev->ibdev, 1))
+ xa_erase(&dev->qps, qp->qp_id);
+
qedr_free_qp_resources(dev, qp, udata);
- if (atomic_dec_and_test(&qp->refcnt) &&
- rdma_protocol_iwarp(&dev->ibdev, 1)) {
- xa_erase_irq(&dev->qps, qp->qp_id);
- kfree(qp);
- }
+ if (rdma_protocol_iwarp(&dev->ibdev, 1))
+ qedr_iw_qp_rem_ref(&qp->ibqp);
+
return 0;
}
@@ -2597,7 +2839,7 @@ struct ib_mr *qedr_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
mr->type = QEDR_MR_USER;
- mr->umem = ib_umem_get(udata, start, len, acc, 0);
+ mr->umem = ib_umem_get(udata, start, len, acc);
if (IS_ERR(mr->umem)) {
rc = -EFAULT;
goto err0;
@@ -2673,8 +2915,8 @@ int qedr_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
- if ((mr->type != QEDR_MR_DMA) && (mr->type != QEDR_MR_FRMR))
- qedr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table);
+ if (mr->type != QEDR_MR_DMA)
+ free_mr_info(dev, &mr->info);
/* it could be user registered memory. */
ib_umem_release(mr->umem);
@@ -4106,19 +4348,10 @@ int qedr_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
}
int qedr_process_mad(struct ib_device *ibdev, int process_mad_flags,
- u8 port_num,
- const struct ib_wc *in_wc,
- const struct ib_grh *in_grh,
- const struct ib_mad_hdr *mad_hdr,
- size_t in_mad_size, struct ib_mad_hdr *out_mad,
- size_t *out_mad_size, u16 *out_mad_pkey_index)
+ u8 port_num, const struct ib_wc *in_wc,
+ const struct ib_grh *in_grh, const struct ib_mad *in,
+ struct ib_mad *out_mad, size_t *out_mad_size,
+ u16 *out_mad_pkey_index)
{
- struct qedr_dev *dev = get_qedr_dev(ibdev);
-
- DP_DEBUG(dev, QEDR_MSG_GSI,
- "QEDR_PROCESS_MAD in_mad %x %x %x %x %x %x %x %x\n",
- mad_hdr->attr_id, mad_hdr->base_version, mad_hdr->attr_mod,
- mad_hdr->class_specific, mad_hdr->class_version,
- mad_hdr->method, mad_hdr->mgmt_class, mad_hdr->status);
return IB_MAD_RESULT_SUCCESS;
}
diff --git a/drivers/infiniband/hw/qedr/verbs.h b/drivers/infiniband/hw/qedr/verbs.h
index 9aaa90283d6e..18027844eb87 100644
--- a/drivers/infiniband/hw/qedr/verbs.h
+++ b/drivers/infiniband/hw/qedr/verbs.h
@@ -35,8 +35,6 @@
int qedr_query_device(struct ib_device *ibdev,
struct ib_device_attr *attr, struct ib_udata *udata);
int qedr_query_port(struct ib_device *, u8 port, struct ib_port_attr *props);
-int qedr_modify_port(struct ib_device *, u8 port, int mask,
- struct ib_port_modify *props);
int qedr_iw_query_gid(struct ib_device *ibdev, u8 port,
int index, union ib_gid *gid);
@@ -46,7 +44,8 @@ int qedr_query_pkey(struct ib_device *, u8 port, u16 index, u16 *pkey);
int qedr_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata);
void qedr_dealloc_ucontext(struct ib_ucontext *uctx);
-int qedr_mmap(struct ib_ucontext *, struct vm_area_struct *vma);
+int qedr_mmap(struct ib_ucontext *ucontext, struct vm_area_struct *vma);
+void qedr_mmap_free(struct rdma_user_mmap_entry *rdma_entry);
int qedr_alloc_pd(struct ib_pd *pd, struct ib_udata *udata);
void qedr_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata);
@@ -93,10 +92,9 @@ int qedr_post_recv(struct ib_qp *, const struct ib_recv_wr *,
const struct ib_recv_wr **bad_wr);
int qedr_process_mad(struct ib_device *ibdev, int process_mad_flags,
u8 port_num, const struct ib_wc *in_wc,
- const struct ib_grh *in_grh,
- const struct ib_mad_hdr *in_mad,
- size_t in_mad_size, struct ib_mad_hdr *out_mad,
- size_t *out_mad_size, u16 *out_mad_pkey_index);
+ const struct ib_grh *in_grh, const struct ib_mad *in_mad,
+ struct ib_mad *out_mad, size_t *out_mad_size,
+ u16 *out_mad_pkey_index);
int qedr_port_immutable(struct ib_device *ibdev, u8 port_num,
struct ib_port_immutable *immutable);
diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c
index 531d8a1db2c3..ca5ea734e3d0 100644
--- a/drivers/infiniband/hw/qib/qib_iba6120.c
+++ b/drivers/infiniband/hw/qib/qib_iba6120.c
@@ -1417,7 +1417,6 @@ static void qib_6120_quiet_serdes(struct qib_pportdata *ppd)
*
* The exact combo of LEDs if on is true is determined by looking
* at the ibcstatus.
-
* These LEDs indicate the physical and logical state of IB link.
* For this chip (at least with recommended board pinouts), LED1
* is Yellow (logical state) and LED2 is Green (physical state),
diff --git a/drivers/infiniband/hw/qib/qib_mad.c b/drivers/infiniband/hw/qib/qib_mad.c
index f92faf5ec369..79bb83222e8d 100644
--- a/drivers/infiniband/hw/qib/qib_mad.c
+++ b/drivers/infiniband/hw/qib/qib_mad.c
@@ -2098,8 +2098,6 @@ static int cc_get_classportinfo(struct ib_cc_mad *ccp,
struct ib_cc_classportinfo_attr *p =
(struct ib_cc_classportinfo_attr *)ccp->mgmt_data;
- memset(ccp->mgmt_data, 0, sizeof(ccp->mgmt_data));
-
p->base_version = 1;
p->class_version = 1;
p->cap_mask = 0;
@@ -2120,8 +2118,6 @@ static int cc_get_congestion_info(struct ib_cc_mad *ccp,
struct qib_ibport *ibp = to_iport(ibdev, port);
struct qib_pportdata *ppd = ppd_from_ibp(ibp);
- memset(ccp->mgmt_data, 0, sizeof(ccp->mgmt_data));
-
p->congestion_info = 0;
p->control_table_cap = ppd->cc_max_table_entries;
@@ -2138,8 +2134,6 @@ static int cc_get_congestion_setting(struct ib_cc_mad *ccp,
struct qib_pportdata *ppd = ppd_from_ibp(ibp);
struct ib_cc_congestion_entry_shadow *entries;
- memset(ccp->mgmt_data, 0, sizeof(ccp->mgmt_data));
-
spin_lock(&ppd->cc_shadow_lock);
entries = ppd->congestion_entries_shadow->entries;
@@ -2176,8 +2170,6 @@ static int cc_get_congestion_control_table(struct ib_cc_mad *ccp,
if (cct_block_index > IB_CC_TABLE_CAP_DEFAULT - 1)
goto bail;
- memset(ccp->mgmt_data, 0, sizeof(ccp->mgmt_data));
-
spin_lock(&ppd->cc_shadow_lock);
max_cct_block =
@@ -2296,18 +2288,11 @@ bail:
return reply_failure((struct ib_smp *) ccp);
}
-static int check_cc_key(struct qib_ibport *ibp,
- struct ib_cc_mad *ccp, int mad_flags)
-{
- return 0;
-}
-
static int process_cc(struct ib_device *ibdev, int mad_flags,
u8 port, const struct ib_mad *in_mad,
struct ib_mad *out_mad)
{
struct ib_cc_mad *ccp = (struct ib_cc_mad *)out_mad;
- struct qib_ibport *ibp = to_iport(ibdev, port);
int ret;
*out_mad = *in_mad;
@@ -2318,10 +2303,6 @@ static int process_cc(struct ib_device *ibdev, int mad_flags,
goto bail;
}
- ret = check_cc_key(ibp, ccp, mad_flags);
- if (ret)
- goto bail;
-
switch (ccp->method) {
case IB_MGMT_METHOD_GET:
switch (ccp->attr_id) {
@@ -2405,28 +2386,21 @@ bail:
*/
int qib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port,
const struct ib_wc *in_wc, const struct ib_grh *in_grh,
- const struct ib_mad_hdr *in, size_t in_mad_size,
- struct ib_mad_hdr *out, size_t *out_mad_size,
- u16 *out_mad_pkey_index)
+ const struct ib_mad *in, struct ib_mad *out,
+ size_t *out_mad_size, u16 *out_mad_pkey_index)
{
int ret;
struct qib_ibport *ibp = to_iport(ibdev, port);
struct qib_pportdata *ppd = ppd_from_ibp(ibp);
- const struct ib_mad *in_mad = (const struct ib_mad *)in;
- struct ib_mad *out_mad = (struct ib_mad *)out;
-
- if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
- *out_mad_size != sizeof(*out_mad)))
- return IB_MAD_RESULT_FAILURE;
- switch (in_mad->mad_hdr.mgmt_class) {
+ switch (in->mad_hdr.mgmt_class) {
case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
case IB_MGMT_CLASS_SUBN_LID_ROUTED:
- ret = process_subn(ibdev, mad_flags, port, in_mad, out_mad);
+ ret = process_subn(ibdev, mad_flags, port, in, out);
goto bail;
case IB_MGMT_CLASS_PERF_MGMT:
- ret = process_perf(ibdev, port, in_mad, out_mad);
+ ret = process_perf(ibdev, port, in, out);
goto bail;
case IB_MGMT_CLASS_CONG_MGMT:
@@ -2435,7 +2409,7 @@ int qib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port,
ret = IB_MAD_RESULT_SUCCESS;
goto bail;
}
- ret = process_cc(ibdev, mad_flags, port, in_mad, out_mad);
+ ret = process_cc(ibdev, mad_flags, port, in, out);
goto bail;
default:
diff --git a/drivers/infiniband/hw/qib/qib_sysfs.c b/drivers/infiniband/hw/qib/qib_sysfs.c
index 3926be78036e..568b21eb6ea1 100644
--- a/drivers/infiniband/hw/qib/qib_sysfs.c
+++ b/drivers/infiniband/hw/qib/qib_sysfs.c
@@ -301,6 +301,9 @@ static ssize_t qib_portattr_show(struct kobject *kobj,
struct qib_pportdata *ppd =
container_of(kobj, struct qib_pportdata, pport_kobj);
+ if (!pattr->show)
+ return -EIO;
+
return pattr->show(ppd, buf);
}
@@ -312,6 +315,9 @@ static ssize_t qib_portattr_store(struct kobject *kobj,
struct qib_pportdata *ppd =
container_of(kobj, struct qib_pportdata, pport_kobj);
+ if (!pattr->store)
+ return -EIO;
+
return pattr->store(ppd, buf, len);
}
diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h
index 17bdf8acee2f..8bf414b47b96 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.h
+++ b/drivers/infiniband/hw/qib/qib_verbs.h
@@ -245,9 +245,8 @@ void qib_sys_guid_chg(struct qib_ibport *ibp);
void qib_node_desc_chg(struct qib_ibport *ibp);
int qib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
const struct ib_wc *in_wc, const struct ib_grh *in_grh,
- const struct ib_mad_hdr *in, size_t in_mad_size,
- struct ib_mad_hdr *out, size_t *out_mad_size,
- u16 *out_mad_pkey_index);
+ const struct ib_mad *in, struct ib_mad *out,
+ size_t *out_mad_size, u16 *out_mad_pkey_index);
void qib_notify_create_mad_agent(struct rvt_dev_info *rdi, int port_idx);
void qib_notify_free_mad_agent(struct rvt_dev_info *rdi, int port_idx);
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
index 7800e6930502..a26a4fd86bf4 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
@@ -136,7 +136,7 @@ int pvrdma_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
}
cq->umem = ib_umem_get(udata, ucmd.buf_addr, ucmd.buf_size,
- IB_ACCESS_LOCAL_WRITE, 1);
+ IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(cq->umem)) {
ret = PTR_ERR(cq->umem);
goto err_cq;
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h
index 8f9749d54688..86a6c054ea26 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h
@@ -58,7 +58,8 @@
#define PVRDMA_ROCEV1_VERSION 17
#define PVRDMA_ROCEV2_VERSION 18
#define PVRDMA_PPN64_VERSION 19
-#define PVRDMA_VERSION PVRDMA_PPN64_VERSION
+#define PVRDMA_QPHANDLE_VERSION 20
+#define PVRDMA_VERSION PVRDMA_QPHANDLE_VERSION
#define PVRDMA_BOARD_ID 1
#define PVRDMA_REV_ID 1
@@ -581,6 +582,17 @@ struct pvrdma_cmd_create_qp_resp {
u32 max_inline_data;
};
+struct pvrdma_cmd_create_qp_resp_v2 {
+ struct pvrdma_cmd_resp_hdr hdr;
+ u32 qpn;
+ u32 qp_handle;
+ u32 max_send_wr;
+ u32 max_recv_wr;
+ u32 max_send_sge;
+ u32 max_recv_sge;
+ u32 max_inline_data;
+};
+
struct pvrdma_cmd_modify_qp {
struct pvrdma_cmd_hdr hdr;
u32 qp_handle;
@@ -663,6 +675,7 @@ union pvrdma_cmd_resp {
struct pvrdma_cmd_create_cq_resp create_cq_resp;
struct pvrdma_cmd_resize_cq_resp resize_cq_resp;
struct pvrdma_cmd_create_qp_resp create_qp_resp;
+ struct pvrdma_cmd_create_qp_resp_v2 create_qp_resp_v2;
struct pvrdma_cmd_query_qp_resp query_qp_resp;
struct pvrdma_cmd_destroy_qp_resp destroy_qp_resp;
struct pvrdma_cmd_create_srq_resp create_srq_resp;
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c
index f3a3d22ee8d7..c61e665ff261 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c
@@ -126,7 +126,7 @@ struct ib_mr *pvrdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
return ERR_PTR(-EINVAL);
}
- umem = ib_umem_get(udata, start, length, access_flags, 0);
+ umem = ib_umem_get(udata, start, length, access_flags);
if (IS_ERR(umem)) {
dev_warn(&dev->pdev->dev,
"could not get umem for mem region\n");
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
index bca6a58a442e..f15809c28f67 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
@@ -52,6 +52,9 @@
#include "pvrdma.h"
+static void __pvrdma_destroy_qp(struct pvrdma_dev *dev,
+ struct pvrdma_qp *qp);
+
static inline void get_cqs(struct pvrdma_qp *qp, struct pvrdma_cq **send_cq,
struct pvrdma_cq **recv_cq)
{
@@ -195,7 +198,9 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
union pvrdma_cmd_resp rsp;
struct pvrdma_cmd_create_qp *cmd = &req.create_qp;
struct pvrdma_cmd_create_qp_resp *resp = &rsp.create_qp_resp;
+ struct pvrdma_cmd_create_qp_resp_v2 *resp_v2 = &rsp.create_qp_resp_v2;
struct pvrdma_create_qp ucmd;
+ struct pvrdma_create_qp_resp qp_resp = {};
unsigned long flags;
int ret;
bool is_srq = !!init_attr->srq;
@@ -260,10 +265,19 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
goto err_qp;
}
+ /* Userspace supports qpn and qp handles? */
+ if (dev->dsr_version >= PVRDMA_QPHANDLE_VERSION &&
+ udata->outlen < sizeof(qp_resp)) {
+ dev_warn(&dev->pdev->dev,
+ "create queuepair not supported\n");
+ ret = -EOPNOTSUPP;
+ goto err_qp;
+ }
+
if (!is_srq) {
/* set qp->sq.wqe_cnt, shift, buf_size.. */
qp->rumem = ib_umem_get(udata, ucmd.rbuf_addr,
- ucmd.rbuf_size, 0, 0);
+ ucmd.rbuf_size, 0);
if (IS_ERR(qp->rumem)) {
ret = PTR_ERR(qp->rumem);
goto err_qp;
@@ -275,7 +289,7 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
}
qp->sumem = ib_umem_get(udata, ucmd.sbuf_addr,
- ucmd.sbuf_size, 0, 0);
+ ucmd.sbuf_size, 0);
if (IS_ERR(qp->sumem)) {
if (!is_srq)
ib_umem_release(qp->rumem);
@@ -379,13 +393,33 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
}
/* max_send_wr/_recv_wr/_send_sge/_recv_sge/_inline_data */
- qp->qp_handle = resp->qpn;
qp->port = init_attr->port_num;
- qp->ibqp.qp_num = resp->qpn;
+
+ if (dev->dsr_version >= PVRDMA_QPHANDLE_VERSION) {
+ qp->ibqp.qp_num = resp_v2->qpn;
+ qp->qp_handle = resp_v2->qp_handle;
+ } else {
+ qp->ibqp.qp_num = resp->qpn;
+ qp->qp_handle = resp->qpn;
+ }
+
spin_lock_irqsave(&dev->qp_tbl_lock, flags);
dev->qp_tbl[qp->qp_handle % dev->dsr->caps.max_qp] = qp;
spin_unlock_irqrestore(&dev->qp_tbl_lock, flags);
+ if (udata) {
+ qp_resp.qpn = qp->ibqp.qp_num;
+ qp_resp.qp_handle = qp->qp_handle;
+
+ if (ib_copy_to_udata(udata, &qp_resp,
+ min(udata->outlen, sizeof(qp_resp)))) {
+ dev_warn(&dev->pdev->dev,
+ "failed to copy back udata\n");
+ __pvrdma_destroy_qp(dev, qp);
+ return ERR_PTR(-EINVAL);
+ }
+ }
+
return &qp->ibqp;
err_pdir:
@@ -400,27 +434,15 @@ err_qp:
return ERR_PTR(ret);
}
-static void pvrdma_free_qp(struct pvrdma_qp *qp)
+static void _pvrdma_free_qp(struct pvrdma_qp *qp)
{
+ unsigned long flags;
struct pvrdma_dev *dev = to_vdev(qp->ibqp.device);
- struct pvrdma_cq *scq;
- struct pvrdma_cq *rcq;
- unsigned long flags, scq_flags, rcq_flags;
-
- /* In case cq is polling */
- get_cqs(qp, &scq, &rcq);
- pvrdma_lock_cqs(scq, rcq, &scq_flags, &rcq_flags);
-
- _pvrdma_flush_cqe(qp, scq);
- if (scq != rcq)
- _pvrdma_flush_cqe(qp, rcq);
spin_lock_irqsave(&dev->qp_tbl_lock, flags);
dev->qp_tbl[qp->qp_handle] = NULL;
spin_unlock_irqrestore(&dev->qp_tbl_lock, flags);
- pvrdma_unlock_cqs(scq, rcq, &scq_flags, &rcq_flags);
-
if (refcount_dec_and_test(&qp->refcnt))
complete(&qp->free);
wait_for_completion(&qp->free);
@@ -435,34 +457,71 @@ static void pvrdma_free_qp(struct pvrdma_qp *qp)
atomic_dec(&dev->num_qps);
}
-/**
- * pvrdma_destroy_qp - destroy a queue pair
- * @qp: the queue pair to destroy
- * @udata: user data or null for kernel object
- *
- * @return: 0 on success.
- */
-int pvrdma_destroy_qp(struct ib_qp *qp, struct ib_udata *udata)
+static void pvrdma_free_qp(struct pvrdma_qp *qp)
+{
+ struct pvrdma_cq *scq;
+ struct pvrdma_cq *rcq;
+ unsigned long scq_flags, rcq_flags;
+
+ /* In case cq is polling */
+ get_cqs(qp, &scq, &rcq);
+ pvrdma_lock_cqs(scq, rcq, &scq_flags, &rcq_flags);
+
+ _pvrdma_flush_cqe(qp, scq);
+ if (scq != rcq)
+ _pvrdma_flush_cqe(qp, rcq);
+
+ /*
+ * We're now unlocking the CQs before clearing out the qp handle this
+ * should still be safe. We have destroyed the backend QP and flushed
+ * the CQEs so there should be no other completions for this QP.
+ */
+ pvrdma_unlock_cqs(scq, rcq, &scq_flags, &rcq_flags);
+
+ _pvrdma_free_qp(qp);
+}
+
+static inline void _pvrdma_destroy_qp_work(struct pvrdma_dev *dev,
+ u32 qp_handle)
{
- struct pvrdma_qp *vqp = to_vqp(qp);
union pvrdma_cmd_req req;
struct pvrdma_cmd_destroy_qp *cmd = &req.destroy_qp;
int ret;
memset(cmd, 0, sizeof(*cmd));
cmd->hdr.cmd = PVRDMA_CMD_DESTROY_QP;
- cmd->qp_handle = vqp->qp_handle;
+ cmd->qp_handle = qp_handle;
- ret = pvrdma_cmd_post(to_vdev(qp->device), &req, NULL, 0);
+ ret = pvrdma_cmd_post(dev, &req, NULL, 0);
if (ret < 0)
- dev_warn(&to_vdev(qp->device)->pdev->dev,
+ dev_warn(&dev->pdev->dev,
"destroy queuepair failed, error: %d\n", ret);
+}
+/**
+ * pvrdma_destroy_qp - destroy a queue pair
+ * @qp: the queue pair to destroy
+ * @udata: user data or null for kernel object
+ *
+ * @return: always 0.
+ */
+int pvrdma_destroy_qp(struct ib_qp *qp, struct ib_udata *udata)
+{
+ struct pvrdma_qp *vqp = to_vqp(qp);
+
+ _pvrdma_destroy_qp_work(to_vdev(qp->device), vqp->qp_handle);
pvrdma_free_qp(vqp);
return 0;
}
+static void __pvrdma_destroy_qp(struct pvrdma_dev *dev,
+ struct pvrdma_qp *qp)
+{
+ _pvrdma_destroy_qp_work(dev, qp->qp_handle);
+ _pvrdma_free_qp(qp);
+}
+
/**
* pvrdma_modify_qp - modify queue pair attributes
* @ibqp: the queue pair
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c
index 36cdfbdbd325..98c8be71d91d 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c
@@ -146,7 +146,7 @@ int pvrdma_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init_attr,
goto err_srq;
}
- srq->umem = ib_umem_get(udata, ucmd.buf_addr, ucmd.buf_size, 0, 0);
+ srq->umem = ib_umem_get(udata, ucmd.buf_addr, ucmd.buf_size, 0);
if (IS_ERR(srq->umem)) {
ret = PTR_ERR(srq->umem);
goto err_srq;
diff --git a/drivers/infiniband/sw/rdmavt/ah.c b/drivers/infiniband/sw/rdmavt/ah.c
index fe99da0ff060..ee02c6176007 100644
--- a/drivers/infiniband/sw/rdmavt/ah.c
+++ b/drivers/infiniband/sw/rdmavt/ah.c
@@ -129,7 +129,6 @@ int rvt_create_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr,
* rvt_destory_ah - Destory an address handle
* @ibah: address handle
* @destroy_flags: destroy address handle flags (see enum rdma_destroy_ah_flags)
- * @udata: user data or NULL for kernel object
*
* Return: 0 on success
*/
diff --git a/drivers/infiniband/sw/rdmavt/cq.c b/drivers/infiniband/sw/rdmavt/cq.c
index a85571a4cf57..13d7f66eadab 100644
--- a/drivers/infiniband/sw/rdmavt/cq.c
+++ b/drivers/infiniband/sw/rdmavt/cq.c
@@ -552,7 +552,6 @@ int rvt_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
/**
* rvt_driver_cq_init - Init cq resources on behalf of driver
- * @rdi: rvt dev structure
*
* Return: 0 on success
*/
@@ -568,7 +567,6 @@ int rvt_driver_cq_init(void)
/**
* rvt_cq_exit - tear down cq reources
- * @rdi: rvt dev structure
*/
void rvt_cq_exit(void)
{
diff --git a/drivers/infiniband/sw/rdmavt/mr.c b/drivers/infiniband/sw/rdmavt/mr.c
index a6a39f01dca3..b9a76bf74857 100644
--- a/drivers/infiniband/sw/rdmavt/mr.c
+++ b/drivers/infiniband/sw/rdmavt/mr.c
@@ -390,7 +390,7 @@ struct ib_mr *rvt_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
if (length == 0)
return ERR_PTR(-EINVAL);
- umem = ib_umem_get(udata, start, length, mr_access_flags, 0);
+ umem = ib_umem_get(udata, start, length, mr_access_flags);
if (IS_ERR(umem))
return (void *)umem;
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
index 0b0a241c57ff..3cdf75d0c7a4 100644
--- a/drivers/infiniband/sw/rdmavt/qp.c
+++ b/drivers/infiniband/sw/rdmavt/qp.c
@@ -2563,10 +2563,9 @@ void rvt_add_retry_timer_ext(struct rvt_qp *qp, u8 shift)
EXPORT_SYMBOL(rvt_add_retry_timer_ext);
/**
- * rvt_add_rnr_timer - add/start an rnr timer
- * @qp - the QP
- * @aeth - aeth of RNR timeout, simulated aeth for loopback
- * add an rnr timer on the QP
+ * rvt_add_rnr_timer - add/start an rnr timer on the QP
+ * @qp: the QP
+ * @aeth: aeth of RNR timeout, simulated aeth for loopback
*/
void rvt_add_rnr_timer(struct rvt_qp *qp, u32 aeth)
{
@@ -2583,7 +2582,7 @@ EXPORT_SYMBOL(rvt_add_rnr_timer);
/**
* rvt_stop_rc_timers - stop all timers
- * @qp - the QP
+ * @qp: the QP
* stop any pending timers
*/
void rvt_stop_rc_timers(struct rvt_qp *qp)
@@ -2617,7 +2616,7 @@ static void rvt_stop_rnr_timer(struct rvt_qp *qp)
/**
* rvt_del_timers_sync - wait for any timeout routines to exit
- * @qp - the QP
+ * @qp: the QP
*/
void rvt_del_timers_sync(struct rvt_qp *qp)
{
@@ -2626,7 +2625,7 @@ void rvt_del_timers_sync(struct rvt_qp *qp)
}
EXPORT_SYMBOL(rvt_del_timers_sync);
-/**
+/*
* This is called from s_timer for missing responses.
*/
static void rvt_rc_timeout(struct timer_list *t)
@@ -2676,12 +2675,13 @@ EXPORT_SYMBOL(rvt_rc_rnr_retry);
* rvt_qp_iter_init - initial for QP iteration
* @rdi: rvt devinfo
* @v: u64 value
+ * @cb: user-defined callback
*
* This returns an iterator suitable for iterating QPs
* in the system.
*
- * The @cb is a user defined callback and @v is a 64
- * bit value passed to and relevant for processing in the
+ * The @cb is a user-defined callback and @v is a 64-bit
+ * value passed to and relevant for processing in the
* @cb. An example use case would be to alter QP processing
* based on criteria not part of the rvt_qp.
*
@@ -2712,7 +2712,7 @@ EXPORT_SYMBOL(rvt_qp_iter_init);
/**
* rvt_qp_iter_next - return the next QP in iter
- * @iter - the iterator
+ * @iter: the iterator
*
* Fine grained QP iterator suitable for use
* with debugfs seq_file mechanisms.
@@ -2775,14 +2775,14 @@ EXPORT_SYMBOL(rvt_qp_iter_next);
/**
* rvt_qp_iter - iterate all QPs
- * @rdi - rvt devinfo
- * @v - a 64 bit value
- * @cb - a callback
+ * @rdi: rvt devinfo
+ * @v: a 64-bit value
+ * @cb: a callback
*
* This provides a way for iterating all QPs.
*
- * The @cb is a user defined callback and @v is a 64
- * bit value passed to and relevant for processing in the
+ * The @cb is a user-defined callback and @v is a 64-bit
+ * value passed to and relevant for processing in the
* cb. An example use case would be to alter QP processing
* based on criteria not part of the rvt_qp.
*
diff --git a/drivers/infiniband/sw/rdmavt/vt.c b/drivers/infiniband/sw/rdmavt/vt.c
index 18da1e1ea979..986265ad6e79 100644
--- a/drivers/infiniband/sw/rdmavt/vt.c
+++ b/drivers/infiniband/sw/rdmavt/vt.c
@@ -683,9 +683,10 @@ EXPORT_SYMBOL(rvt_unregister_device);
/**
* rvt_init_port - init internal data for driver port
- * @rdi: rvt dev strut
+ * @rdi: rvt_dev_info struct
* @port: rvt port
* @port_index: 0 based index of ports, different from IB core port num
+ * @pkey_table: pkey_table for @port
*
* Keep track of a list of ports. No need to have a detach port.
* They persist until the driver goes away.
diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c
index a8c11b5e1e94..0946a301a5c5 100644
--- a/drivers/infiniband/sw/rxe/rxe.c
+++ b/drivers/infiniband/sw/rxe/rxe.c
@@ -77,12 +77,8 @@ static void rxe_init_device_param(struct rxe_dev *rxe)
{
rxe->max_inline_data = RXE_MAX_INLINE_DATA;
- rxe->attr.fw_ver = RXE_FW_VER;
rxe->attr.max_mr_size = RXE_MAX_MR_SIZE;
rxe->attr.page_size_cap = RXE_PAGE_SIZE_CAP;
- rxe->attr.vendor_id = RXE_VENDOR_ID;
- rxe->attr.vendor_part_id = RXE_VENDOR_PART_ID;
- rxe->attr.hw_ver = RXE_HW_VER;
rxe->attr.max_qp = RXE_MAX_QP;
rxe->attr.max_qp_wr = RXE_MAX_QP_WR;
rxe->attr.device_cap_flags = RXE_DEVICE_CAP_FLAGS;
@@ -94,22 +90,13 @@ static void rxe_init_device_param(struct rxe_dev *rxe)
rxe->attr.max_mr = RXE_MAX_MR;
rxe->attr.max_pd = RXE_MAX_PD;
rxe->attr.max_qp_rd_atom = RXE_MAX_QP_RD_ATOM;
- rxe->attr.max_ee_rd_atom = RXE_MAX_EE_RD_ATOM;
rxe->attr.max_res_rd_atom = RXE_MAX_RES_RD_ATOM;
rxe->attr.max_qp_init_rd_atom = RXE_MAX_QP_INIT_RD_ATOM;
- rxe->attr.max_ee_init_rd_atom = RXE_MAX_EE_INIT_RD_ATOM;
rxe->attr.atomic_cap = IB_ATOMIC_HCA;
- rxe->attr.max_ee = RXE_MAX_EE;
- rxe->attr.max_rdd = RXE_MAX_RDD;
- rxe->attr.max_mw = RXE_MAX_MW;
- rxe->attr.max_raw_ipv6_qp = RXE_MAX_RAW_IPV6_QP;
- rxe->attr.max_raw_ethy_qp = RXE_MAX_RAW_ETHY_QP;
rxe->attr.max_mcast_grp = RXE_MAX_MCAST_GRP;
rxe->attr.max_mcast_qp_attach = RXE_MAX_MCAST_QP_ATTACH;
rxe->attr.max_total_mcast_qp_attach = RXE_MAX_TOT_MCAST_QP_ATTACH;
rxe->attr.max_ah = RXE_MAX_AH;
- rxe->attr.max_fmr = RXE_MAX_FMR;
- rxe->attr.max_map_per_fmr = RXE_MAX_MAP_PER_FMR;
rxe->attr.max_srq = RXE_MAX_SRQ;
rxe->attr.max_srq_wr = RXE_MAX_SRQ_WR;
rxe->attr.max_srq_sge = RXE_MAX_SRQ_SGE;
diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
index ea6a819b7167..35a2baf2f364 100644
--- a/drivers/infiniband/sw/rxe/rxe_mr.c
+++ b/drivers/infiniband/sw/rxe/rxe_mr.c
@@ -169,7 +169,7 @@ int rxe_mem_init_user(struct rxe_pd *pd, u64 start,
void *vaddr;
int err;
- umem = ib_umem_get(udata, start, length, access, 0);
+ umem = ib_umem_get(udata, start, length, access);
if (IS_ERR(umem)) {
pr_warn("err %d from rxe_umem_get\n",
(int)PTR_ERR(umem));
diff --git a/drivers/infiniband/sw/rxe/rxe_param.h b/drivers/infiniband/sw/rxe/rxe_param.h
index fe5207386700..353c6668249e 100644
--- a/drivers/infiniband/sw/rxe/rxe_param.h
+++ b/drivers/infiniband/sw/rxe/rxe_param.h
@@ -60,12 +60,8 @@ static inline enum ib_mtu eth_mtu_int_to_enum(int mtu)
/* default/initial rxe device parameter settings */
enum rxe_device_param {
- RXE_FW_VER = 0,
RXE_MAX_MR_SIZE = -1ull,
RXE_PAGE_SIZE_CAP = 0xfffff000,
- RXE_VENDOR_ID = 0,
- RXE_VENDOR_PART_ID = 0,
- RXE_HW_VER = 0,
RXE_MAX_QP = 0x10000,
RXE_MAX_QP_WR = 0x4000,
RXE_MAX_INLINE_DATA = 400,
@@ -87,21 +83,12 @@ enum rxe_device_param {
RXE_MAX_MR = 256 * 1024,
RXE_MAX_PD = 0x7ffc,
RXE_MAX_QP_RD_ATOM = 128,
- RXE_MAX_EE_RD_ATOM = 0,
RXE_MAX_RES_RD_ATOM = 0x3f000,
RXE_MAX_QP_INIT_RD_ATOM = 128,
- RXE_MAX_EE_INIT_RD_ATOM = 0,
- RXE_MAX_EE = 0,
- RXE_MAX_RDD = 0,
- RXE_MAX_MW = 0,
- RXE_MAX_RAW_IPV6_QP = 0,
- RXE_MAX_RAW_ETHY_QP = 0,
RXE_MAX_MCAST_GRP = 8192,
RXE_MAX_MCAST_QP_ATTACH = 56,
RXE_MAX_TOT_MCAST_QP_ATTACH = 0x70000,
RXE_MAX_AH = 100,
- RXE_MAX_FMR = 0,
- RXE_MAX_MAP_PER_FMR = 0,
RXE_MAX_SRQ = 960,
RXE_MAX_SRQ_WR = 0x4000,
RXE_MIN_SRQ_WR = 1,
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index 623129f27f5a..9dd4bd7aea92 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -106,6 +106,10 @@ static int rxe_modify_device(struct ib_device *dev,
{
struct rxe_dev *rxe = to_rdev(dev);
+ if (mask & ~(IB_DEVICE_MODIFY_SYS_IMAGE_GUID |
+ IB_DEVICE_MODIFY_NODE_DESC))
+ return -EOPNOTSUPP;
+
if (mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID)
rxe->attr.sys_image_guid = cpu_to_be64(attr->sys_image_guid);
@@ -1171,6 +1175,9 @@ int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name)
addrconf_addr_eui48((unsigned char *)&dev->node_guid,
rxe->ndev->dev_addr);
dev->dev.dma_ops = &dma_virt_ops;
+ dev->dev.dma_parms = &rxe->dma_parms;
+ rxe->dma_parms = (struct device_dma_parameters)
+ { .max_segment_size = SZ_2G };
dma_coerce_mask_and_coherent(&dev->dev,
dma_get_required_mask(&dev->dev));
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.h b/drivers/infiniband/sw/rxe/rxe_verbs.h
index 5c4b2239129c..95834206c80c 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.h
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.h
@@ -384,6 +384,7 @@ struct rxe_port {
struct rxe_dev {
struct ib_device ib_dev;
struct ib_device_attr attr;
+ struct device_dma_parameters dma_parms;
int max_ucontext;
int max_inline_data;
struct mutex usdev_lock;
diff --git a/drivers/infiniband/sw/siw/siw.h b/drivers/infiniband/sw/siw/siw.h
index dba4535494ab..b939f489cd46 100644
--- a/drivers/infiniband/sw/siw/siw.h
+++ b/drivers/infiniband/sw/siw/siw.h
@@ -70,6 +70,7 @@ struct siw_pd {
struct siw_device {
struct ib_device base_dev;
+ struct device_dma_parameters dma_parms;
struct net_device *netdev;
struct siw_dev_cap attrs;
@@ -98,18 +99,9 @@ struct siw_device {
struct work_struct netdev_down;
};
-struct siw_uobj {
- void *addr;
- u32 size;
-};
-
struct siw_ucontext {
struct ib_ucontext base_ucontext;
struct siw_device *sdev;
-
- /* xarray of user mappable objects */
- struct xarray xa;
- u32 uobj_nextkey;
};
/*
@@ -149,8 +141,6 @@ struct siw_pbl {
struct siw_pble pbe[1];
};
-struct siw_mr;
-
/*
* Generic memory representation for registered siw memory.
* Memory lookup always via higher 24 bit of STag (STag index).
@@ -220,7 +210,7 @@ struct siw_cq {
u32 cq_get;
u32 num_cqe;
bool kernel_verbs;
- u32 xa_cq_index; /* mmap information for CQE array */
+ struct rdma_user_mmap_entry *cq_entry; /* mmap info for CQE array */
u32 id; /* For debugging only */
};
@@ -263,7 +253,7 @@ struct siw_srq {
u32 rq_put;
u32 rq_get;
u32 num_rqe; /* max # of wqe's allowed */
- u32 xa_srq_index; /* mmap information for SRQ array */
+ struct rdma_user_mmap_entry *srq_entry; /* mmap info for SRQ array */
char armed; /* inform user if limit hit */
char kernel_verbs; /* '1' if kernel client */
};
@@ -477,8 +467,8 @@ struct siw_qp {
u8 layer : 4, etype : 4;
u8 ecode;
} term_info;
- u32 xa_sq_index; /* mmap information for SQE array */
- u32 xa_rq_index; /* mmap information for RQE array */
+ struct rdma_user_mmap_entry *sq_entry; /* mmap info for SQE array */
+ struct rdma_user_mmap_entry *rq_entry; /* mmap info for RQE array */
struct rcu_head rcu;
};
@@ -503,6 +493,11 @@ struct iwarp_msg_info {
int (*rx_data)(struct siw_qp *qp);
};
+struct siw_user_mmap_entry {
+ struct rdma_user_mmap_entry rdma_entry;
+ void *address;
+};
+
/* Global siw parameters. Currently set in siw_main.c */
extern const bool zcopy_tx;
extern const bool try_gso;
@@ -607,6 +602,12 @@ static inline struct siw_mr *to_siw_mr(struct ib_mr *base_mr)
return container_of(base_mr, struct siw_mr, base_mr);
}
+static inline struct siw_user_mmap_entry *
+to_siw_mmap_entry(struct rdma_user_mmap_entry *rdma_mmap)
+{
+ return container_of(rdma_mmap, struct siw_user_mmap_entry, rdma_entry);
+}
+
static inline struct siw_qp *siw_qp_id2obj(struct siw_device *sdev, int id)
{
struct siw_qp *qp;
diff --git a/drivers/infiniband/sw/siw/siw_cm.c b/drivers/infiniband/sw/siw/siw_cm.c
index 8c1931a57f4a..3bccfef40e7e 100644
--- a/drivers/infiniband/sw/siw/siw_cm.c
+++ b/drivers/infiniband/sw/siw/siw_cm.c
@@ -1373,22 +1373,8 @@ int siw_connect(struct iw_cm_id *id, struct iw_cm_conn_param *params)
rv = -EINVAL;
goto error;
}
- if (v4)
- siw_dbg_qp(qp,
- "pd_len %d, laddr %pI4 %d, raddr %pI4 %d\n",
- pd_len,
- &((struct sockaddr_in *)(laddr))->sin_addr,
- ntohs(((struct sockaddr_in *)(laddr))->sin_port),
- &((struct sockaddr_in *)(raddr))->sin_addr,
- ntohs(((struct sockaddr_in *)(raddr))->sin_port));
- else
- siw_dbg_qp(qp,
- "pd_len %d, laddr %pI6 %d, raddr %pI6 %d\n",
- pd_len,
- &((struct sockaddr_in6 *)(laddr))->sin6_addr,
- ntohs(((struct sockaddr_in6 *)(laddr))->sin6_port),
- &((struct sockaddr_in6 *)(raddr))->sin6_addr,
- ntohs(((struct sockaddr_in6 *)(raddr))->sin6_port));
+ siw_dbg_qp(qp, "pd_len %d, laddr %pISp, raddr %pISp\n", pd_len, laddr,
+ raddr);
rv = sock_create(v4 ? AF_INET : AF_INET6, SOCK_STREAM, IPPROTO_TCP, &s);
if (rv < 0)
@@ -1867,14 +1853,7 @@ static int siw_listen_address(struct iw_cm_id *id, int backlog,
list_add_tail(&cep->listenq, (struct list_head *)id->provider_data);
cep->state = SIW_EPSTATE_LISTENING;
- if (addr_family == AF_INET)
- siw_dbg(id->device, "Listen at laddr %pI4 %u\n",
- &(((struct sockaddr_in *)laddr)->sin_addr),
- ((struct sockaddr_in *)laddr)->sin_port);
- else
- siw_dbg(id->device, "Listen at laddr %pI6 %u\n",
- &(((struct sockaddr_in6 *)laddr)->sin6_addr),
- ((struct sockaddr_in6 *)laddr)->sin6_port);
+ siw_dbg(id->device, "Listen at laddr %pISp\n", laddr);
return 0;
@@ -1935,7 +1914,7 @@ static void siw_drop_listeners(struct iw_cm_id *id)
/*
* siw_create_listen - Create resources for a listener's IWCM ID @id
*
- * Listens on the socket addresses id->local_addr and id->remote_addr.
+ * Listens on the socket address id->local_addr.
*
* If the listener's @id provides a specific local IP address, at most one
* listening socket is created and associated with @id.
@@ -1959,7 +1938,7 @@ int siw_create_listen(struct iw_cm_id *id, int backlog)
*/
if (id->local_addr.ss_family == AF_INET) {
struct in_device *in_dev = in_dev_get(dev);
- struct sockaddr_in s_laddr, *s_raddr;
+ struct sockaddr_in s_laddr;
const struct in_ifaddr *ifa;
if (!in_dev) {
@@ -1967,12 +1946,8 @@ int siw_create_listen(struct iw_cm_id *id, int backlog)
goto out;
}
memcpy(&s_laddr, &id->local_addr, sizeof(s_laddr));
- s_raddr = (struct sockaddr_in *)&id->remote_addr;
- siw_dbg(id->device,
- "laddr %pI4:%d, raddr %pI4:%d\n",
- &s_laddr.sin_addr, ntohs(s_laddr.sin_port),
- &s_raddr->sin_addr, ntohs(s_raddr->sin_port));
+ siw_dbg(id->device, "laddr %pISp\n", &s_laddr);
rtnl_lock();
in_dev_for_each_ifa_rtnl(ifa, in_dev) {
@@ -1992,17 +1967,13 @@ int siw_create_listen(struct iw_cm_id *id, int backlog)
} else if (id->local_addr.ss_family == AF_INET6) {
struct inet6_dev *in6_dev = in6_dev_get(dev);
struct inet6_ifaddr *ifp;
- struct sockaddr_in6 *s_laddr = &to_sockaddr_in6(id->local_addr),
- *s_raddr = &to_sockaddr_in6(id->remote_addr);
+ struct sockaddr_in6 *s_laddr = &to_sockaddr_in6(id->local_addr);
if (!in6_dev) {
rv = -ENODEV;
goto out;
}
- siw_dbg(id->device,
- "laddr %pI6:%d, raddr %pI6:%d\n",
- &s_laddr->sin6_addr, ntohs(s_laddr->sin6_port),
- &s_raddr->sin6_addr, ntohs(s_raddr->sin6_port));
+ siw_dbg(id->device, "laddr %pISp\n", &s_laddr);
rtnl_lock();
list_for_each_entry(ifp, &in6_dev->addr_list, if_list) {
diff --git a/drivers/infiniband/sw/siw/siw_main.c b/drivers/infiniband/sw/siw/siw_main.c
index 05a92f997f60..c147f0613d95 100644
--- a/drivers/infiniband/sw/siw/siw_main.c
+++ b/drivers/infiniband/sw/siw/siw_main.c
@@ -16,6 +16,7 @@
#include <linux/module.h>
#include <linux/dma-mapping.h>
+#include <net/addrconf.h>
#include <rdma/ib_verbs.h>
#include <rdma/ib_user_verbs.h>
#include <rdma/rdma_netlink.h>
@@ -248,24 +249,6 @@ static struct ib_qp *siw_get_base_qp(struct ib_device *base_dev, int id)
return NULL;
}
-static void siw_verbs_sq_flush(struct ib_qp *base_qp)
-{
- struct siw_qp *qp = to_siw_qp(base_qp);
-
- down_write(&qp->state_lock);
- siw_sq_flush(qp);
- up_write(&qp->state_lock);
-}
-
-static void siw_verbs_rq_flush(struct ib_qp *base_qp)
-{
- struct siw_qp *qp = to_siw_qp(base_qp);
-
- down_write(&qp->state_lock);
- siw_rq_flush(qp);
- up_write(&qp->state_lock);
-}
-
static const struct ib_device_ops siw_device_ops = {
.owner = THIS_MODULE,
.uverbs_abi_ver = SIW_ABI_VERSION,
@@ -284,8 +267,6 @@ static const struct ib_device_ops siw_device_ops = {
.destroy_cq = siw_destroy_cq,
.destroy_qp = siw_destroy_qp,
.destroy_srq = siw_destroy_srq,
- .drain_rq = siw_verbs_rq_flush,
- .drain_sq = siw_verbs_sq_flush,
.get_dma_mr = siw_get_dma_mr,
.get_port_immutable = siw_get_port_immutable,
.iw_accept = siw_accept,
@@ -298,6 +279,7 @@ static const struct ib_device_ops siw_device_ops = {
.iw_rem_ref = siw_qp_put_ref,
.map_mr_sg = siw_map_mr_sg,
.mmap = siw_mmap,
+ .mmap_free = siw_mmap_free,
.modify_qp = siw_verbs_modify_qp,
.modify_srq = siw_modify_srq,
.poll_cq = siw_poll_cq,
@@ -350,15 +332,19 @@ static struct siw_device *siw_device_create(struct net_device *netdev)
sdev->netdev = netdev;
if (netdev->type != ARPHRD_LOOPBACK) {
- memcpy(&base_dev->node_guid, netdev->dev_addr, 6);
+ addrconf_addr_eui48((unsigned char *)&base_dev->node_guid,
+ netdev->dev_addr);
} else {
/*
* The loopback device does not have a HW address,
* but connection mangagement lib expects gid != 0
*/
- size_t gidlen = min_t(size_t, strlen(base_dev->name), 6);
+ size_t len = min_t(size_t, strlen(base_dev->name), 6);
+ char addr[6] = { };
- memcpy(&base_dev->node_guid, base_dev->name, gidlen);
+ memcpy(addr, base_dev->name, len);
+ addrconf_addr_eui48((unsigned char *)&base_dev->node_guid,
+ addr);
}
base_dev->uverbs_cmd_mask =
(1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
@@ -397,6 +383,9 @@ static struct siw_device *siw_device_create(struct net_device *netdev)
base_dev->phys_port_cnt = 1;
base_dev->dev.parent = parent;
base_dev->dev.dma_ops = &dma_virt_ops;
+ base_dev->dev.dma_parms = &sdev->dma_parms;
+ sdev->dma_parms = (struct device_dma_parameters)
+ { .max_segment_size = SZ_2G };
base_dev->num_comp_vectors = num_possible_cpus();
ib_set_device_ops(base_dev, &siw_device_ops);
diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c
index b18a677832e1..5fd6d6499b3d 100644
--- a/drivers/infiniband/sw/siw/siw_verbs.c
+++ b/drivers/infiniband/sw/siw/siw_verbs.c
@@ -34,44 +34,19 @@ static char ib_qp_state_to_string[IB_QPS_ERR + 1][sizeof("RESET")] = {
[IB_QPS_ERR] = "ERR"
};
-static u32 siw_create_uobj(struct siw_ucontext *uctx, void *vaddr, u32 size)
+void siw_mmap_free(struct rdma_user_mmap_entry *rdma_entry)
{
- struct siw_uobj *uobj;
- struct xa_limit limit = XA_LIMIT(0, SIW_UOBJ_MAX_KEY);
- u32 key;
+ struct siw_user_mmap_entry *entry = to_siw_mmap_entry(rdma_entry);
- uobj = kzalloc(sizeof(*uobj), GFP_KERNEL);
- if (!uobj)
- return SIW_INVAL_UOBJ_KEY;
-
- if (xa_alloc_cyclic(&uctx->xa, &key, uobj, limit, &uctx->uobj_nextkey,
- GFP_KERNEL) < 0) {
- kfree(uobj);
- return SIW_INVAL_UOBJ_KEY;
- }
- uobj->size = PAGE_ALIGN(size);
- uobj->addr = vaddr;
-
- return key;
-}
-
-static struct siw_uobj *siw_get_uobj(struct siw_ucontext *uctx,
- unsigned long off, u32 size)
-{
- struct siw_uobj *uobj = xa_load(&uctx->xa, off);
-
- if (uobj && uobj->size == size)
- return uobj;
-
- return NULL;
+ kfree(entry);
}
int siw_mmap(struct ib_ucontext *ctx, struct vm_area_struct *vma)
{
struct siw_ucontext *uctx = to_siw_ctx(ctx);
- struct siw_uobj *uobj;
- unsigned long off = vma->vm_pgoff;
- int size = vma->vm_end - vma->vm_start;
+ size_t size = vma->vm_end - vma->vm_start;
+ struct rdma_user_mmap_entry *rdma_entry;
+ struct siw_user_mmap_entry *entry;
int rv = -EINVAL;
/*
@@ -79,18 +54,25 @@ int siw_mmap(struct ib_ucontext *ctx, struct vm_area_struct *vma)
*/
if (vma->vm_start & (PAGE_SIZE - 1)) {
pr_warn("siw: mmap not page aligned\n");
- goto out;
+ return -EINVAL;
}
- uobj = siw_get_uobj(uctx, off, size);
- if (!uobj) {
- siw_dbg(&uctx->sdev->base_dev, "mmap lookup failed: %lu, %u\n",
- off, size);
+ rdma_entry = rdma_user_mmap_entry_get(&uctx->base_ucontext, vma);
+ if (!rdma_entry) {
+ siw_dbg(&uctx->sdev->base_dev, "mmap lookup failed: %lu, %#zx\n",
+ vma->vm_pgoff, size);
+ return -EINVAL;
+ }
+ entry = to_siw_mmap_entry(rdma_entry);
+
+ rv = remap_vmalloc_range(vma, entry->address, 0);
+ if (rv) {
+ pr_warn("remap_vmalloc_range failed: %lu, %zu\n", vma->vm_pgoff,
+ size);
goto out;
}
- rv = remap_vmalloc_range(vma, uobj->addr, 0);
- if (rv)
- pr_warn("remap_vmalloc_range failed: %lu, %u\n", off, size);
out:
+ rdma_user_mmap_entry_put(rdma_entry);
+
return rv;
}
@@ -105,8 +87,6 @@ int siw_alloc_ucontext(struct ib_ucontext *base_ctx, struct ib_udata *udata)
rv = -ENOMEM;
goto err_out;
}
- xa_init_flags(&ctx->xa, XA_FLAGS_ALLOC);
- ctx->uobj_nextkey = 0;
ctx->sdev = sdev;
uresp.dev_id = sdev->vendor_part_id;
@@ -135,19 +115,7 @@ err_out:
void siw_dealloc_ucontext(struct ib_ucontext *base_ctx)
{
struct siw_ucontext *uctx = to_siw_ctx(base_ctx);
- void *entry;
- unsigned long index;
- /*
- * Make sure all user mmap objects are gone. Since QP, CQ
- * and SRQ destroy routines destroy related objects, nothing
- * should be found here.
- */
- xa_for_each(&uctx->xa, index, entry) {
- kfree(xa_erase(&uctx->xa, index));
- pr_warn("siw: dropping orphaned uobj at %lu\n", index);
- }
- xa_destroy(&uctx->xa);
atomic_dec(&uctx->sdev->num_ctx);
}
@@ -293,6 +261,33 @@ void siw_qp_put_ref(struct ib_qp *base_qp)
siw_qp_put(to_siw_qp(base_qp));
}
+static struct rdma_user_mmap_entry *
+siw_mmap_entry_insert(struct siw_ucontext *uctx,
+ void *address, size_t length,
+ u64 *offset)
+{
+ struct siw_user_mmap_entry *entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ int rv;
+
+ *offset = SIW_INVAL_UOBJ_KEY;
+ if (!entry)
+ return NULL;
+
+ entry->address = address;
+
+ rv = rdma_user_mmap_entry_insert(&uctx->base_ucontext,
+ &entry->rdma_entry,
+ length);
+ if (rv) {
+ kfree(entry);
+ return NULL;
+ }
+
+ *offset = rdma_user_mmap_get_offset(&entry->rdma_entry);
+
+ return &entry->rdma_entry;
+}
+
/*
* siw_create_qp()
*
@@ -317,6 +312,7 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
struct siw_cq *scq = NULL, *rcq = NULL;
unsigned long flags;
int num_sqe, num_rqe, rv = 0;
+ size_t length;
siw_dbg(base_dev, "create new QP\n");
@@ -380,8 +376,6 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
spin_lock_init(&qp->orq_lock);
qp->kernel_verbs = !udata;
- qp->xa_sq_index = SIW_INVAL_UOBJ_KEY;
- qp->xa_rq_index = SIW_INVAL_UOBJ_KEY;
rv = siw_qp_add(sdev, qp);
if (rv)
@@ -458,22 +452,27 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
uresp.qp_id = qp_id(qp);
if (qp->sendq) {
- qp->xa_sq_index =
- siw_create_uobj(uctx, qp->sendq,
- num_sqe * sizeof(struct siw_sqe));
+ length = num_sqe * sizeof(struct siw_sqe);
+ qp->sq_entry =
+ siw_mmap_entry_insert(uctx, qp->sendq,
+ length, &uresp.sq_key);
+ if (!qp->sq_entry) {
+ rv = -ENOMEM;
+ goto err_out_xa;
+ }
}
+
if (qp->recvq) {
- qp->xa_rq_index =
- siw_create_uobj(uctx, qp->recvq,
- num_rqe * sizeof(struct siw_rqe));
- }
- if (qp->xa_sq_index == SIW_INVAL_UOBJ_KEY ||
- qp->xa_rq_index == SIW_INVAL_UOBJ_KEY) {
- rv = -ENOMEM;
- goto err_out_xa;
+ length = num_rqe * sizeof(struct siw_rqe);
+ qp->rq_entry =
+ siw_mmap_entry_insert(uctx, qp->recvq,
+ length, &uresp.rq_key);
+ if (!qp->rq_entry) {
+ uresp.sq_key = SIW_INVAL_UOBJ_KEY;
+ rv = -ENOMEM;
+ goto err_out_xa;
+ }
}
- uresp.sq_key = qp->xa_sq_index << PAGE_SHIFT;
- uresp.rq_key = qp->xa_rq_index << PAGE_SHIFT;
if (udata->outlen < sizeof(uresp)) {
rv = -EINVAL;
@@ -501,11 +500,10 @@ err_out:
kfree(siw_base_qp);
if (qp) {
- if (qp->xa_sq_index != SIW_INVAL_UOBJ_KEY)
- kfree(xa_erase(&uctx->xa, qp->xa_sq_index));
- if (qp->xa_rq_index != SIW_INVAL_UOBJ_KEY)
- kfree(xa_erase(&uctx->xa, qp->xa_rq_index));
-
+ if (uctx) {
+ rdma_user_mmap_entry_remove(qp->sq_entry);
+ rdma_user_mmap_entry_remove(qp->rq_entry);
+ }
vfree(qp->sendq);
vfree(qp->recvq);
kfree(qp);
@@ -618,10 +616,10 @@ int siw_destroy_qp(struct ib_qp *base_qp, struct ib_udata *udata)
qp->attrs.flags |= SIW_QP_IN_DESTROY;
qp->rx_stream.rx_suspend = 1;
- if (uctx && qp->xa_sq_index != SIW_INVAL_UOBJ_KEY)
- kfree(xa_erase(&uctx->xa, qp->xa_sq_index));
- if (uctx && qp->xa_rq_index != SIW_INVAL_UOBJ_KEY)
- kfree(xa_erase(&uctx->xa, qp->xa_rq_index));
+ if (uctx) {
+ rdma_user_mmap_entry_remove(qp->sq_entry);
+ rdma_user_mmap_entry_remove(qp->rq_entry);
+ }
down_write(&qp->state_lock);
@@ -685,6 +683,47 @@ static int siw_copy_inline_sgl(const struct ib_send_wr *core_wr,
return bytes;
}
+/* Complete SQ WR's without processing */
+static int siw_sq_flush_wr(struct siw_qp *qp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr)
+{
+ struct siw_sqe sqe = {};
+ int rv = 0;
+
+ while (wr) {
+ sqe.id = wr->wr_id;
+ sqe.opcode = wr->opcode;
+ rv = siw_sqe_complete(qp, &sqe, 0, SIW_WC_WR_FLUSH_ERR);
+ if (rv) {
+ if (bad_wr)
+ *bad_wr = wr;
+ break;
+ }
+ wr = wr->next;
+ }
+ return rv;
+}
+
+/* Complete RQ WR's without processing */
+static int siw_rq_flush_wr(struct siw_qp *qp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
+{
+ struct siw_rqe rqe = {};
+ int rv = 0;
+
+ while (wr) {
+ rqe.id = wr->wr_id;
+ rv = siw_rqe_complete(qp, &rqe, 0, 0, SIW_WC_WR_FLUSH_ERR);
+ if (rv) {
+ if (bad_wr)
+ *bad_wr = wr;
+ break;
+ }
+ wr = wr->next;
+ }
+ return rv;
+}
+
/*
* siw_post_send()
*
@@ -703,26 +742,54 @@ int siw_post_send(struct ib_qp *base_qp, const struct ib_send_wr *wr,
unsigned long flags;
int rv = 0;
+ if (wr && !qp->kernel_verbs) {
+ siw_dbg_qp(qp, "wr must be empty for user mapped sq\n");
+ *bad_wr = wr;
+ return -EINVAL;
+ }
+
/*
* Try to acquire QP state lock. Must be non-blocking
* to accommodate kernel clients needs.
*/
if (!down_read_trylock(&qp->state_lock)) {
- *bad_wr = wr;
- siw_dbg_qp(qp, "QP locked, state %d\n", qp->attrs.state);
- return -ENOTCONN;
+ if (qp->attrs.state == SIW_QP_STATE_ERROR) {
+ /*
+ * ERROR state is final, so we can be sure
+ * this state will not change as long as the QP
+ * exists.
+ *
+ * This handles an ib_drain_sq() call with
+ * a concurrent request to set the QP state
+ * to ERROR.
+ */
+ rv = siw_sq_flush_wr(qp, wr, bad_wr);
+ } else {
+ siw_dbg_qp(qp, "QP locked, state %d\n",
+ qp->attrs.state);
+ *bad_wr = wr;
+ rv = -ENOTCONN;
+ }
+ return rv;
}
if (unlikely(qp->attrs.state != SIW_QP_STATE_RTS)) {
+ if (qp->attrs.state == SIW_QP_STATE_ERROR) {
+ /*
+ * Immediately flush this WR to CQ, if QP
+ * is in ERROR state. SQ is guaranteed to
+ * be empty, so WR complets in-order.
+ *
+ * Typically triggered by ib_drain_sq().
+ */
+ rv = siw_sq_flush_wr(qp, wr, bad_wr);
+ } else {
+ siw_dbg_qp(qp, "QP out of state %d\n",
+ qp->attrs.state);
+ *bad_wr = wr;
+ rv = -ENOTCONN;
+ }
up_read(&qp->state_lock);
- *bad_wr = wr;
- siw_dbg_qp(qp, "QP out of state %d\n", qp->attrs.state);
- return -ENOTCONN;
- }
- if (wr && !qp->kernel_verbs) {
- siw_dbg_qp(qp, "wr must be empty for user mapped sq\n");
- up_read(&qp->state_lock);
- *bad_wr = wr;
- return -EINVAL;
+ return rv;
}
spin_lock_irqsave(&qp->sq_lock, flags);
@@ -917,24 +984,54 @@ int siw_post_receive(struct ib_qp *base_qp, const struct ib_recv_wr *wr,
*bad_wr = wr;
return -EOPNOTSUPP; /* what else from errno.h? */
}
+ if (!qp->kernel_verbs) {
+ siw_dbg_qp(qp, "no kernel post_recv for user mapped sq\n");
+ *bad_wr = wr;
+ return -EINVAL;
+ }
+
/*
* Try to acquire QP state lock. Must be non-blocking
* to accommodate kernel clients needs.
*/
if (!down_read_trylock(&qp->state_lock)) {
- *bad_wr = wr;
- return -ENOTCONN;
- }
- if (!qp->kernel_verbs) {
- siw_dbg_qp(qp, "no kernel post_recv for user mapped sq\n");
- up_read(&qp->state_lock);
- *bad_wr = wr;
- return -EINVAL;
+ if (qp->attrs.state == SIW_QP_STATE_ERROR) {
+ /*
+ * ERROR state is final, so we can be sure
+ * this state will not change as long as the QP
+ * exists.
+ *
+ * This handles an ib_drain_rq() call with
+ * a concurrent request to set the QP state
+ * to ERROR.
+ */
+ rv = siw_rq_flush_wr(qp, wr, bad_wr);
+ } else {
+ siw_dbg_qp(qp, "QP locked, state %d\n",
+ qp->attrs.state);
+ *bad_wr = wr;
+ rv = -ENOTCONN;
+ }
+ return rv;
}
if (qp->attrs.state > SIW_QP_STATE_RTS) {
+ if (qp->attrs.state == SIW_QP_STATE_ERROR) {
+ /*
+ * Immediately flush this WR to CQ, if QP
+ * is in ERROR state. RQ is guaranteed to
+ * be empty, so WR complets in-order.
+ *
+ * Typically triggered by ib_drain_rq().
+ */
+ rv = siw_rq_flush_wr(qp, wr, bad_wr);
+ } else {
+ siw_dbg_qp(qp, "QP out of state %d\n",
+ qp->attrs.state);
+ *bad_wr = wr;
+ rv = -ENOTCONN;
+ }
up_read(&qp->state_lock);
- *bad_wr = wr;
- return -EINVAL;
+ return rv;
}
/*
* Serialize potentially multiple producers.
@@ -991,8 +1088,8 @@ void siw_destroy_cq(struct ib_cq *base_cq, struct ib_udata *udata)
siw_cq_flush(cq);
- if (ctx && cq->xa_cq_index != SIW_INVAL_UOBJ_KEY)
- kfree(xa_erase(&ctx->xa, cq->xa_cq_index));
+ if (ctx)
+ rdma_user_mmap_entry_remove(cq->cq_entry);
atomic_dec(&sdev->num_cq);
@@ -1029,7 +1126,6 @@ int siw_create_cq(struct ib_cq *base_cq, const struct ib_cq_init_attr *attr,
size = roundup_pow_of_two(size);
cq->base_cq.cqe = size;
cq->num_cqe = size;
- cq->xa_cq_index = SIW_INVAL_UOBJ_KEY;
if (!udata) {
cq->kernel_verbs = 1;
@@ -1055,16 +1151,17 @@ int siw_create_cq(struct ib_cq *base_cq, const struct ib_cq_init_attr *attr,
struct siw_ucontext *ctx =
rdma_udata_to_drv_context(udata, struct siw_ucontext,
base_ucontext);
+ size_t length = size * sizeof(struct siw_cqe) +
+ sizeof(struct siw_cq_ctrl);
- cq->xa_cq_index =
- siw_create_uobj(ctx, cq->queue,
- size * sizeof(struct siw_cqe) +
- sizeof(struct siw_cq_ctrl));
- if (cq->xa_cq_index == SIW_INVAL_UOBJ_KEY) {
+ cq->cq_entry =
+ siw_mmap_entry_insert(ctx, cq->queue,
+ length, &uresp.cq_key);
+ if (!cq->cq_entry) {
rv = -ENOMEM;
goto err_out;
}
- uresp.cq_key = cq->xa_cq_index << PAGE_SHIFT;
+
uresp.cq_id = cq->id;
uresp.num_cqe = size;
@@ -1085,8 +1182,8 @@ err_out:
struct siw_ucontext *ctx =
rdma_udata_to_drv_context(udata, struct siw_ucontext,
base_ucontext);
- if (cq->xa_cq_index != SIW_INVAL_UOBJ_KEY)
- kfree(xa_erase(&ctx->xa, cq->xa_cq_index));
+ if (ctx)
+ rdma_user_mmap_entry_remove(cq->cq_entry);
vfree(cq->queue);
}
atomic_dec(&sdev->num_cq);
@@ -1490,7 +1587,6 @@ int siw_create_srq(struct ib_srq *base_srq,
}
srq->max_sge = attrs->max_sge;
srq->num_rqe = roundup_pow_of_two(attrs->max_wr);
- srq->xa_srq_index = SIW_INVAL_UOBJ_KEY;
srq->limit = attrs->srq_limit;
if (srq->limit)
srq->armed = 1;
@@ -1509,15 +1605,16 @@ int siw_create_srq(struct ib_srq *base_srq,
}
if (udata) {
struct siw_uresp_create_srq uresp = {};
+ size_t length = srq->num_rqe * sizeof(struct siw_rqe);
- srq->xa_srq_index = siw_create_uobj(
- ctx, srq->recvq, srq->num_rqe * sizeof(struct siw_rqe));
-
- if (srq->xa_srq_index == SIW_INVAL_UOBJ_KEY) {
+ srq->srq_entry =
+ siw_mmap_entry_insert(ctx, srq->recvq,
+ length, &uresp.srq_key);
+ if (!srq->srq_entry) {
rv = -ENOMEM;
goto err_out;
}
- uresp.srq_key = srq->xa_srq_index;
+
uresp.num_rqe = srq->num_rqe;
if (udata->outlen < sizeof(uresp)) {
@@ -1536,8 +1633,8 @@ int siw_create_srq(struct ib_srq *base_srq,
err_out:
if (srq->recvq) {
- if (ctx && srq->xa_srq_index != SIW_INVAL_UOBJ_KEY)
- kfree(xa_erase(&ctx->xa, srq->xa_srq_index));
+ if (ctx)
+ rdma_user_mmap_entry_remove(srq->srq_entry);
vfree(srq->recvq);
}
atomic_dec(&sdev->num_srq);
@@ -1623,9 +1720,8 @@ void siw_destroy_srq(struct ib_srq *base_srq, struct ib_udata *udata)
rdma_udata_to_drv_context(udata, struct siw_ucontext,
base_ucontext);
- if (ctx && srq->xa_srq_index != SIW_INVAL_UOBJ_KEY)
- kfree(xa_erase(&ctx->xa, srq->xa_srq_index));
-
+ if (ctx)
+ rdma_user_mmap_entry_remove(srq->srq_entry);
vfree(srq->recvq);
atomic_dec(&sdev->num_srq);
}
diff --git a/drivers/infiniband/sw/siw/siw_verbs.h b/drivers/infiniband/sw/siw/siw_verbs.h
index 1910869281cb..1a731989fad6 100644
--- a/drivers/infiniband/sw/siw/siw_verbs.h
+++ b/drivers/infiniband/sw/siw/siw_verbs.h
@@ -83,6 +83,7 @@ void siw_destroy_srq(struct ib_srq *base_srq, struct ib_udata *udata);
int siw_post_srq_recv(struct ib_srq *base_srq, const struct ib_recv_wr *wr,
const struct ib_recv_wr **bad_wr);
int siw_mmap(struct ib_ucontext *ctx, struct vm_area_struct *vma);
+void siw_mmap_free(struct rdma_user_mmap_entry *rdma_entry);
void siw_qp_event(struct siw_qp *qp, enum ib_event_type type);
void siw_cq_event(struct siw_cq *cq, enum ib_event_type type);
void siw_srq_event(struct siw_srq *srq, enum ib_event_type type);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index ac0583ff280d..e5f438ab716c 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -2019,6 +2019,15 @@ static int ipoib_set_vf_guid(struct net_device *dev, int vf, u64 guid, int type)
return ib_set_vf_guid(priv->ca, vf, priv->port, guid, type);
}
+static int ipoib_get_vf_guid(struct net_device *dev, int vf,
+ struct ifla_vf_guid *node_guid,
+ struct ifla_vf_guid *port_guid)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+
+ return ib_get_vf_guid(priv->ca, vf, priv->port, node_guid, port_guid);
+}
+
static int ipoib_get_vf_stats(struct net_device *dev, int vf,
struct ifla_vf_stats *vf_stats)
{
@@ -2045,6 +2054,7 @@ static const struct net_device_ops ipoib_netdev_ops_pf = {
.ndo_set_vf_link_state = ipoib_set_vf_link_state,
.ndo_get_vf_config = ipoib_get_vf_config,
.ndo_get_vf_stats = ipoib_get_vf_stats,
+ .ndo_get_vf_guid = ipoib_get_vf_guid,
.ndo_set_vf_guid = ipoib_set_vf_guid,
.ndo_set_mac_address = ipoib_set_mac,
.ndo_get_stats64 = ipoib_get_stats,
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 2e72fc5af157..3690e28cc7ea 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -646,13 +646,14 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
if (ib_conn->pi_support) {
u32 sig_caps = ib_dev->attrs.sig_prot_cap;
+ shost->sg_prot_tablesize = shost->sg_tablesize;
scsi_host_set_prot(shost, iser_dif_prot_caps(sig_caps));
scsi_host_set_guard(shost, SHOST_DIX_GUARD_IP |
SHOST_DIX_GUARD_CRC);
}
if (!(ib_dev->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG))
- shost->virt_boundary_mask = ~MASK_4K;
+ shost->virt_boundary_mask = SZ_4K - 1;
if (iscsi_host_add(shost, ib_dev->dev.parent)) {
mutex_unlock(&iser_conn->state_mutex);
@@ -785,7 +786,7 @@ static int iscsi_iser_get_ep_param(struct iscsi_endpoint *ep,
* iscsi_iser_ep_connect() - Initiate iSER connection establishment
* @shost: scsi_host
* @dst_addr: destination address
- * @non-blocking: indicate if routine can block
+ * @non_blocking: indicate if routine can block
*
* Allocate an iscsi endpoint, an iser_conn structure and bind them.
* After that start RDMA connection establishment via rdma_cm. We
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index 52ce63592dcf..029c00163442 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -96,16 +96,12 @@
#define iser_err(fmt, arg...) \
pr_err(PFX "%s: " fmt, __func__ , ## arg)
-#define SHIFT_4K 12
-#define SIZE_4K (1ULL << SHIFT_4K)
-#define MASK_4K (~(SIZE_4K-1))
-
/* Default support is 512KB I/O size */
#define ISER_DEF_MAX_SECTORS 1024
#define ISCSI_ISER_DEF_SG_TABLESIZE \
- ((ISER_DEF_MAX_SECTORS * SECTOR_SIZE) >> SHIFT_4K)
+ ((ISER_DEF_MAX_SECTORS * SECTOR_SIZE) >> ilog2(SZ_4K))
/* Maximum support is 16MB I/O size */
-#define ISCSI_ISER_MAX_SG_TABLESIZE ((32768 * SECTOR_SIZE) >> SHIFT_4K)
+#define ISCSI_ISER_MAX_SG_TABLESIZE ((32768 * SECTOR_SIZE) >> ilog2(SZ_4K))
#define ISER_DEF_XMIT_CMDS_DEFAULT 512
#if ISCSI_DEF_XMIT_CMDS_MAX > ISER_DEF_XMIT_CMDS_DEFAULT
@@ -232,15 +228,16 @@ enum iser_desc_type {
* @iser_header: iser header
* @iscsi_header: iscsi header
* @type: command/control/dataout
- * @dam_addr: header buffer dma_address
+ * @dma_addr: header buffer dma_address
* @tx_sg: sg[0] points to iser/iscsi headers
* sg[1] optionally points to either of immediate data
* unsolicited data-out or control
* @num_sge: number sges used on this TX task
+ * @cqe: completion handler
* @mapped: Is the task header mapped
- * reg_wr: registration WR
- * send_wr: send WR
- * inv_wr: invalidate WR
+ * @reg_wr: registration WR
+ * @send_wr: send WR
+ * @inv_wr: invalidate WR
*/
struct iser_tx_desc {
struct iser_ctrl iser_header;
@@ -267,6 +264,7 @@ struct iser_tx_desc {
* @data: received data segment
* @dma_addr: receive buffer dma address
* @rx_sg: ib_sge of receive buffer
+ * @cqe: completion handler
* @pad: for sense data TODO: Modify to maximum sense length supported
*/
struct iser_rx_desc {
@@ -283,9 +281,9 @@ struct iser_rx_desc {
* struct iser_login_desc - iSER login descriptor
*
* @req: pointer to login request buffer
- * @resp: pointer to login response buffer
+ * @rsp: pointer to login response buffer
* @req_dma: DMA address of login request buffer
- * @rsp_dma: DMA address of login response buffer
+ * @rsp_dma: DMA address of login response buffer
* @sge: IB sge for login post recv
* @cqe: completion handler
*/
@@ -315,12 +313,12 @@ struct iser_comp {
};
/**
- * struct iser_device - Memory registration operations
+ * struct iser_reg_ops - Memory registration operations
* per-device registration schemes
*
* @alloc_reg_res: Allocate registration resources
* @free_reg_res: Free registration resources
- * @fast_reg_mem: Register memory buffers
+ * @reg_mem: Register memory buffers
* @unreg_mem: Un-register memory buffers
* @reg_desc_get: Get a registration descriptor for pool
* @reg_desc_put: Get a registration descriptor to pool
@@ -369,7 +367,7 @@ struct iser_device {
};
/**
- * struct iser_reg_resources - Fast registration recources
+ * struct iser_reg_resources - Fast registration resources
*
* @mr: memory region
* @fmr_pool: pool of fmrs
@@ -402,7 +400,7 @@ struct iser_fr_desc {
};
/**
- * struct iser_fr_pool: connection fast registration pool
+ * struct iser_fr_pool - connection fast registration pool
*
* @list: list of fastreg descriptors
* @lock: protects fmr/fastreg pool
@@ -427,6 +425,7 @@ struct iser_fr_pool {
* @comp: iser completion context
* @fr_pool: connection fast registration poool
* @pi_support: Indicate device T10-PI support
+ * @reg_cqe: completion handler
*/
struct ib_conn {
struct rdma_cm_id *cma_id;
@@ -467,6 +466,7 @@ struct ib_conn {
* @num_rx_descs: number of rx descriptors
* @scsi_sg_tablesize: scsi host sg_tablesize
* @pages_per_mr: maximum pages available for registration
+ * @snd_w_inv: connection uses remote invalidation
*/
struct iser_conn {
struct ib_conn ib_conn;
@@ -525,7 +525,7 @@ struct iser_page_vec {
};
/**
- * struct iser_global: iSER global context
+ * struct iser_global - iSER global context
*
* @device_list_mutex: protects device_list
* @device_list: iser devices global list
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index 5cbb4b3a0566..4a7045bb0831 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -358,6 +358,8 @@ static inline bool iser_signal_comp(u8 sig_count)
/**
* iser_send_command - send command PDU
+ * @conn: link to matching iscsi connection
+ * @task: SCSI command task
*/
int iser_send_command(struct iscsi_conn *conn,
struct iscsi_task *task)
@@ -429,6 +431,9 @@ send_command_error:
/**
* iser_send_data_out - send data out PDU
+ * @conn: link to matching iscsi connection
+ * @task: SCSI command task
+ * @hdr: pointer to the LLD's iSCSI message header
*/
int iser_send_data_out(struct iscsi_conn *conn,
struct iscsi_task *task,
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
index 2cc89a9b9e9b..0f74dc6d12fa 100644
--- a/drivers/infiniband/ulp/iser/iser_memory.c
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -170,7 +170,7 @@ int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
dev = iser_task->iser_conn->ib_conn.device->ib_device;
data->dma_nents = ib_dma_map_sg(dev, data->sg, data->size, dma_dir);
- if (data->dma_nents == 0) {
+ if (unlikely(data->dma_nents == 0)) {
iser_err("dma_map_sg failed!!!\n");
return -EINVAL;
}
@@ -237,7 +237,7 @@ int iser_fast_reg_fmr(struct iscsi_iser_task *iser_task,
int ret, plen;
page_vec->npages = 0;
- page_vec->fake_mr.page_size = SIZE_4K;
+ page_vec->fake_mr.page_size = SZ_4K;
plen = ib_sg_to_pages(&page_vec->fake_mr, mem->sg,
mem->dma_nents, NULL, iser_set_page);
if (unlikely(plen < mem->dma_nents)) {
@@ -451,7 +451,7 @@ static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task,
ib_update_fast_reg_key(mr, ib_inc_rkey(mr->rkey));
- n = ib_map_mr_sg(mr, mem->sg, mem->dma_nents, NULL, SIZE_4K);
+ n = ib_map_mr_sg(mr, mem->sg, mem->dma_nents, NULL, SZ_4K);
if (unlikely(n != mem->dma_nents)) {
iser_err("failed to map sg (%d/%d)\n",
n, mem->dma_nents);
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index a6548de0e218..1f4a37a3c2b3 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -58,12 +58,12 @@ static void iser_event_handler(struct ib_event_handler *handler,
dev_name(&event->device->dev), event->element.port_num);
}
-/**
+/*
* iser_create_device_ib_res - creates Protection Domain (PD), Completion
* Queue (CQ), DMA Memory Region (DMA MR) with the device associated with
- * the adapator.
+ * the adaptor.
*
- * returns 0 on success, -1 on failure
+ * Return: 0 on success, -1 on failure
*/
static int iser_create_device_ib_res(struct iser_device *device)
{
@@ -124,9 +124,9 @@ comps_err:
return -1;
}
-/**
+/*
* iser_free_device_ib_res - destroy/dealloc/dereg the DMA MR,
- * CQ and PD created with the device associated with the adapator.
+ * CQ and PD created with the device associated with the adaptor.
*/
static void iser_free_device_ib_res(struct iser_device *device)
{
@@ -149,8 +149,11 @@ static void iser_free_device_ib_res(struct iser_device *device)
/**
* iser_alloc_fmr_pool - Creates FMR pool and page_vector
+ * @ib_conn: connection RDMA resources
+ * @cmds_max: max number of SCSI commands for this connection
+ * @size: max number of pages per map request
*
- * returns 0 on success, or errno code on failure
+ * Return: 0 on success, or errno code on failure
*/
int iser_alloc_fmr_pool(struct ib_conn *ib_conn,
unsigned cmds_max,
@@ -180,7 +183,7 @@ int iser_alloc_fmr_pool(struct ib_conn *ib_conn,
page_vec->pages = (u64 *)(page_vec + 1);
- params.page_shift = SHIFT_4K;
+ params.page_shift = ilog2(SZ_4K);
params.max_pages_per_fmr = size;
/* make the pool size twice the max number of SCSI commands *
* the ML is expected to queue, watermark for unmap at 50% */
@@ -215,6 +218,7 @@ err_frpl:
/**
* iser_free_fmr_pool - releases the FMR pool and page vec
+ * @ib_conn: connection RDMA resources
*/
void iser_free_fmr_pool(struct ib_conn *ib_conn)
{
@@ -295,7 +299,11 @@ static void iser_destroy_fastreg_desc(struct iser_fr_desc *desc)
/**
* iser_alloc_fastreg_pool - Creates pool of fast_reg descriptors
* for fast registration work requests.
- * returns 0 on success, or errno code on failure
+ * @ib_conn: connection RDMA resources
+ * @cmds_max: max number of SCSI commands for this connection
+ * @size: max number of pages per map request
+ *
+ * Return: 0 on success, or errno code on failure
*/
int iser_alloc_fastreg_pool(struct ib_conn *ib_conn,
unsigned cmds_max,
@@ -332,6 +340,7 @@ err:
/**
* iser_free_fastreg_pool - releases the pool of fast_reg descriptors
+ * @ib_conn: connection RDMA resources
*/
void iser_free_fastreg_pool(struct ib_conn *ib_conn)
{
@@ -355,10 +364,10 @@ void iser_free_fastreg_pool(struct ib_conn *ib_conn)
fr_pool->size - i);
}
-/**
+/*
* iser_create_ib_conn_res - Queue-Pair (QP)
*
- * returns 0 on success, -1 on failure
+ * Return: 0 on success, -1 on failure
*/
static int iser_create_ib_conn_res(struct ib_conn *ib_conn)
{
@@ -436,7 +445,7 @@ out_err:
return ret;
}
-/**
+/*
* based on the resolved device node GUID see if there already allocated
* device for this device. If there's no such, create one.
*/
@@ -487,9 +496,9 @@ static void iser_device_try_release(struct iser_device *device)
mutex_unlock(&ig.device_list_mutex);
}
-/**
+/*
* Called with state mutex held
- **/
+ */
static int iser_conn_state_comp_exch(struct iser_conn *iser_conn,
enum iser_conn_state comp,
enum iser_conn_state exch)
@@ -561,7 +570,8 @@ static void iser_free_ib_conn_res(struct iser_conn *iser_conn,
}
/**
- * Frees all conn objects and deallocs conn descriptor
+ * iser_conn_release - Frees all conn objects and deallocs conn descriptor
+ * @iser_conn: iSER connection context
*/
void iser_conn_release(struct iser_conn *iser_conn)
{
@@ -595,7 +605,10 @@ void iser_conn_release(struct iser_conn *iser_conn)
}
/**
- * triggers start of the disconnect procedures and wait for them to be done
+ * iser_conn_terminate - triggers start of the disconnect procedures and
+ * waits for them to be done
+ * @iser_conn: iSER connection context
+ *
* Called with state mutex held
*/
int iser_conn_terminate(struct iser_conn *iser_conn)
@@ -632,9 +645,9 @@ int iser_conn_terminate(struct iser_conn *iser_conn)
return 1;
}
-/**
+/*
* Called with state mutex held
- **/
+ */
static void iser_connect_error(struct rdma_cm_id *cma_id)
{
struct iser_conn *iser_conn;
@@ -670,7 +683,7 @@ iser_calc_scsi_params(struct iser_conn *iser_conn,
else
max_num_sg = attr->max_fast_reg_page_list_len;
- sg_tablesize = DIV_ROUND_UP(max_sectors * 512, SIZE_4K);
+ sg_tablesize = DIV_ROUND_UP(max_sectors * SECTOR_SIZE, SZ_4K);
if (attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS)
sup_sg_tablesize =
min_t(
@@ -684,9 +697,9 @@ iser_calc_scsi_params(struct iser_conn *iser_conn,
iser_conn->scsi_sg_tablesize + reserved_mr_pages;
}
-/**
+/*
* Called with state mutex held
- **/
+ */
static void iser_addr_handler(struct rdma_cm_id *cma_id)
{
struct iser_device *device;
@@ -732,9 +745,9 @@ static void iser_addr_handler(struct rdma_cm_id *cma_id)
}
}
-/**
+/*
* Called with state mutex held
- **/
+ */
static void iser_route_handler(struct rdma_cm_id *cma_id)
{
struct rdma_conn_param conn_param;
@@ -1019,7 +1032,7 @@ int iser_post_recvm(struct iser_conn *iser_conn, int count)
ib_conn->post_recv_buf_count += count;
ib_ret = ib_post_recv(ib_conn->qp, ib_conn->rx_wr, NULL);
- if (ib_ret) {
+ if (unlikely(ib_ret)) {
iser_err("ib_post_recv failed ret=%d\n", ib_ret);
ib_conn->post_recv_buf_count -= count;
} else
@@ -1030,9 +1043,12 @@ int iser_post_recvm(struct iser_conn *iser_conn, int count)
/**
- * iser_start_send - Initiate a Send DTO operation
+ * iser_post_send - Initiate a Send DTO operation
+ * @ib_conn: connection RDMA resources
+ * @tx_desc: iSER TX descriptor
+ * @signal: true to send work request as SIGNALED
*
- * returns 0 on success, -1 on failure
+ * Return: 0 on success, -1 on failure
*/
int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc,
bool signal)
@@ -1060,7 +1076,7 @@ int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc,
first_wr = wr;
ib_ret = ib_post_send(ib_conn->qp, first_wr, NULL);
- if (ib_ret)
+ if (unlikely(ib_ret))
iser_err("ib_post_send failed, ret:%d opcode:%d\n",
ib_ret, wr->opcode);
@@ -1081,7 +1097,7 @@ u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task,
ret = ib_check_mr_status(desc->rsc.sig_mr,
IB_MR_CHECK_SIG_STATUS, &mr_status);
if (ret) {
- pr_err("ib_check_mr_status failed, ret %d\n", ret);
+ iser_err("ib_check_mr_status failed, ret %d\n", ret);
/* Not a lot we can do, return ambiguous guard error */
*sector = 0;
return 0x1;
@@ -1093,7 +1109,7 @@ u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task,
sector_div(sector_off, sector_size + 8);
*sector = scsi_get_lba(iser_task->sc) + sector_off;
- pr_err("PI error found type %d at sector %llx "
+ iser_err("PI error found type %d at sector %llx "
"expected %x vs actual %x\n",
mr_status.sig_err.err_type,
(unsigned long long)*sector,
diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_internal.h b/drivers/infiniband/ulp/opa_vnic/opa_vnic_internal.h
index 43ac61ffef4a..6dbc08e1a6a6 100644
--- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_internal.h
+++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_internal.h
@@ -70,7 +70,7 @@
struct opa_vnic_adapter;
-/**
+/*
* struct __opa_vesw_info - OPA vnic virtual switch info
*
* Same as opa_vesw_info without bitwise attribute.
@@ -96,7 +96,7 @@ struct __opa_vesw_info {
u8 rsvd4[2];
} __packed;
-/**
+/*
* struct __opa_per_veswport_info - OPA vnic per port info
*
* Same as opa_per_veswport_info without bitwise attribute.
@@ -136,7 +136,7 @@ struct __opa_per_veswport_info {
u8 rsvd3[8];
} __packed;
-/**
+/*
* struct __opa_veswport_info - OPA vnic port info
*
* Same as opa_veswport_info without bitwise attribute.
@@ -146,7 +146,7 @@ struct __opa_veswport_info {
struct __opa_per_veswport_info vport;
};
-/**
+/*
* struct __opa_veswport_trap - OPA vnic trap info
*
* Same as opa_veswport_trap without bitwise attribute.
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index b5960351bec0..b7f7a5f7bd98 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -174,9 +174,9 @@ static int srp_tmo_get(char *buffer, const struct kernel_param *kp)
int tmo = *(int *)kp->arg;
if (tmo >= 0)
- return sprintf(buffer, "%d", tmo);
+ return sprintf(buffer, "%d\n", tmo);
else
- return sprintf(buffer, "off");
+ return sprintf(buffer, "off\n");
}
static int srp_tmo_set(const char *val, const struct kernel_param *kp)
@@ -352,11 +352,11 @@ static int srp_new_rdma_cm_id(struct srp_rdma_ch *ch)
init_completion(&ch->done);
ret = rdma_resolve_addr(new_cm_id, target->rdma_cm.src_specified ?
- (struct sockaddr *)&target->rdma_cm.src : NULL,
- (struct sockaddr *)&target->rdma_cm.dst,
+ &target->rdma_cm.src.sa : NULL,
+ &target->rdma_cm.dst.sa,
SRP_PATH_REC_TIMEOUT_MS);
if (ret) {
- pr_err("No route available from %pIS to %pIS (%d)\n",
+ pr_err("No route available from %pISpsc to %pISpsc (%d)\n",
&target->rdma_cm.src, &target->rdma_cm.dst, ret);
goto out;
}
@@ -366,7 +366,7 @@ static int srp_new_rdma_cm_id(struct srp_rdma_ch *ch)
ret = ch->status;
if (ret) {
- pr_err("Resolving address %pIS failed (%d)\n",
+ pr_err("Resolving address %pISpsc failed (%d)\n",
&target->rdma_cm.dst, ret);
goto out;
}
@@ -552,6 +552,7 @@ static int srp_create_ch_ib(struct srp_rdma_ch *ch)
{
struct srp_target_port *target = ch->target;
struct srp_device *dev = target->srp_host->srp_dev;
+ const struct ib_device_attr *attr = &dev->dev->attrs;
struct ib_qp_init_attr *init_attr;
struct ib_cq *recv_cq, *send_cq;
struct ib_qp *qp;
@@ -583,12 +584,14 @@ static int srp_create_ch_ib(struct srp_rdma_ch *ch)
init_attr->cap.max_send_wr = m * target->queue_size;
init_attr->cap.max_recv_wr = target->queue_size + 1;
init_attr->cap.max_recv_sge = 1;
- init_attr->cap.max_send_sge = SRP_MAX_SGE;
+ init_attr->cap.max_send_sge = min(SRP_MAX_SGE, attr->max_send_sge);
init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
init_attr->qp_type = IB_QPT_RC;
init_attr->send_cq = send_cq;
init_attr->recv_cq = recv_cq;
+ ch->max_imm_sge = min(init_attr->cap.max_send_sge - 1U, 255U);
+
if (target->using_rdma_cm) {
ret = rdma_create_qp(ch->rdma_cm.cm_id, dev->pd, init_attr);
qp = ch->rdma_cm.cm_id->qp;
@@ -1362,7 +1365,8 @@ static void srp_terminate_io(struct srp_rport *rport)
}
/* Calculate maximum initiator to target information unit length. */
-static uint32_t srp_max_it_iu_len(int cmd_sg_cnt, bool use_imm_data)
+static uint32_t srp_max_it_iu_len(int cmd_sg_cnt, bool use_imm_data,
+ uint32_t max_it_iu_size)
{
uint32_t max_iu_len = sizeof(struct srp_cmd) + SRP_MAX_ADD_CDB_LEN +
sizeof(struct srp_indirect_buf) +
@@ -1372,6 +1376,11 @@ static uint32_t srp_max_it_iu_len(int cmd_sg_cnt, bool use_imm_data)
max_iu_len = max(max_iu_len, SRP_IMM_DATA_OFFSET +
srp_max_imm_data);
+ if (max_it_iu_size)
+ max_iu_len = min(max_iu_len, max_it_iu_size);
+
+ pr_debug("max_iu_len = %d\n", max_iu_len);
+
return max_iu_len;
}
@@ -1389,7 +1398,8 @@ static int srp_rport_reconnect(struct srp_rport *rport)
struct srp_target_port *target = rport->lld_data;
struct srp_rdma_ch *ch;
uint32_t max_iu_len = srp_max_it_iu_len(target->cmd_sg_cnt,
- srp_use_imm_data);
+ srp_use_imm_data,
+ target->max_it_iu_size);
int i, j, ret = 0;
bool multich = false;
@@ -1838,7 +1848,7 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
return -EIO;
if (ch->use_imm_data &&
- count <= SRP_MAX_IMM_SGE &&
+ count <= ch->max_imm_sge &&
SRP_IMM_DATA_OFFSET + data_len <= ch->max_it_iu_len &&
scmnd->sc_data_direction == DMA_TO_DEVICE) {
struct srp_imm_buf *buf;
@@ -2538,7 +2548,8 @@ static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
ch->req_lim = be32_to_cpu(lrsp->req_lim_delta);
ch->use_imm_data = lrsp->rsp_flags & SRP_LOGIN_RSP_IMMED_SUPP;
ch->max_it_iu_len = srp_max_it_iu_len(target->cmd_sg_cnt,
- ch->use_imm_data);
+ ch->use_imm_data,
+ target->max_it_iu_size);
WARN_ON_ONCE(ch->max_it_iu_len >
be32_to_cpu(lrsp->max_it_iu_len));
@@ -3411,6 +3422,7 @@ enum {
SRP_OPT_IP_SRC = 1 << 15,
SRP_OPT_IP_DEST = 1 << 16,
SRP_OPT_TARGET_CAN_QUEUE= 1 << 17,
+ SRP_OPT_MAX_IT_IU_SIZE = 1 << 18,
};
static unsigned int srp_opt_mandatory[] = {
@@ -3443,6 +3455,7 @@ static const match_table_t srp_opt_tokens = {
{ SRP_OPT_QUEUE_SIZE, "queue_size=%d" },
{ SRP_OPT_IP_SRC, "src=%s" },
{ SRP_OPT_IP_DEST, "dest=%s" },
+ { SRP_OPT_MAX_IT_IU_SIZE, "max_it_iu_size=%d" },
{ SRP_OPT_ERR, NULL }
};
@@ -3736,6 +3749,14 @@ static int srp_parse_options(struct net *net, const char *buf,
target->tl_retry_count = token;
break;
+ case SRP_OPT_MAX_IT_IU_SIZE:
+ if (match_int(args, &token) || token < 0) {
+ pr_warn("bad maximum initiator to target IU size '%s'\n", p);
+ goto out;
+ }
+ target->max_it_iu_size = token;
+ break;
+
default:
pr_warn("unknown parameter or missing value '%s' in target creation request\n",
p);
@@ -3887,7 +3908,9 @@ static ssize_t srp_create_target(struct device *dev,
target->mr_per_cmd = mr_per_cmd;
target->indirect_size = target->sg_tablesize *
sizeof (struct srp_direct_buf);
- max_iu_len = srp_max_it_iu_len(target->cmd_sg_cnt, srp_use_imm_data);
+ max_iu_len = srp_max_it_iu_len(target->cmd_sg_cnt,
+ srp_use_imm_data,
+ target->max_it_iu_size);
INIT_WORK(&target->tl_err_work, srp_tl_err_work);
INIT_WORK(&target->remove_work, srp_remove_work);
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h
index b2861cd2087a..5359ece561ca 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.h
+++ b/drivers/infiniband/ulp/srp/ib_srp.h
@@ -161,6 +161,7 @@ struct srp_rdma_ch {
};
uint32_t max_it_iu_len;
uint32_t max_ti_iu_len;
+ u8 max_imm_sge;
bool use_imm_data;
/* Everything above this point is used in the hot path of
@@ -209,6 +210,7 @@ struct srp_target_port {
u32 ch_count;
u32 lkey;
enum srp_target_state state;
+ uint32_t max_it_iu_size;
unsigned int cmd_sg_cnt;
unsigned int indirect_size;
bool allow_ext_sg;
@@ -245,11 +247,13 @@ struct srp_target_port {
union {
struct sockaddr_in ip4;
struct sockaddr_in6 ip6;
+ struct sockaddr sa;
struct sockaddr_storage ss;
} src;
union {
struct sockaddr_in ip4;
struct sockaddr_in6 ip6;
+ struct sockaddr sa;
struct sockaddr_storage ss;
} dst;
bool src_specified;
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index e25c70a56be6..23c782e3d49a 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -556,34 +556,41 @@ static int srpt_refresh_port(struct srpt_port *sport)
struct ib_port_attr port_attr;
int ret;
- memset(&port_modify, 0, sizeof(port_modify));
- port_modify.set_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP;
- port_modify.clr_port_cap_mask = 0;
-
- ret = ib_modify_port(sport->sdev->device, sport->port, 0, &port_modify);
- if (ret)
- goto err_mod_port;
-
ret = ib_query_port(sport->sdev->device, sport->port, &port_attr);
if (ret)
- goto err_query_port;
+ return ret;
sport->sm_lid = port_attr.sm_lid;
sport->lid = port_attr.lid;
ret = rdma_query_gid(sport->sdev->device, sport->port, 0, &sport->gid);
if (ret)
- goto err_query_port;
+ return ret;
- sport->port_guid_wwn.priv = sport;
- srpt_format_guid(sport->port_guid, sizeof(sport->port_guid),
+ sport->port_guid_id.wwn.priv = sport;
+ srpt_format_guid(sport->port_guid_id.name,
+ sizeof(sport->port_guid_id.name),
&sport->gid.global.interface_id);
- sport->port_gid_wwn.priv = sport;
- snprintf(sport->port_gid, sizeof(sport->port_gid),
+ sport->port_gid_id.wwn.priv = sport;
+ snprintf(sport->port_gid_id.name, sizeof(sport->port_gid_id.name),
"0x%016llx%016llx",
be64_to_cpu(sport->gid.global.subnet_prefix),
be64_to_cpu(sport->gid.global.interface_id));
+ if (rdma_protocol_iwarp(sport->sdev->device, sport->port))
+ return 0;
+
+ memset(&port_modify, 0, sizeof(port_modify));
+ port_modify.set_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP;
+ port_modify.clr_port_cap_mask = 0;
+
+ ret = ib_modify_port(sport->sdev->device, sport->port, 0, &port_modify);
+ if (ret) {
+ pr_warn("%s-%d: enabling device management failed (%d). Note: this is expected if SR-IOV is enabled.\n",
+ dev_name(&sport->sdev->device->dev), sport->port, ret);
+ return 0;
+ }
+
if (!sport->mad_agent) {
memset(&reg_req, 0, sizeof(reg_req));
reg_req.mgmt_class = IB_MGMT_CLASS_DEVICE_MGMT;
@@ -599,23 +606,14 @@ static int srpt_refresh_port(struct srpt_port *sport)
srpt_mad_recv_handler,
sport, 0);
if (IS_ERR(sport->mad_agent)) {
- ret = PTR_ERR(sport->mad_agent);
+ pr_err("%s-%d: MAD agent registration failed (%ld). Note: this is expected if SR-IOV is enabled.\n",
+ dev_name(&sport->sdev->device->dev), sport->port,
+ PTR_ERR(sport->mad_agent));
sport->mad_agent = NULL;
- goto err_query_port;
}
}
return 0;
-
-err_query_port:
-
- port_modify.set_port_cap_mask = 0;
- port_modify.clr_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP;
- ib_modify_port(sport->sdev->device, sport->port, 0, &port_modify);
-
-err_mod_port:
-
- return ret;
}
/**
@@ -1364,9 +1362,11 @@ static int srpt_build_cmd_rsp(struct srpt_rdma_ch *ch,
struct srpt_send_ioctx *ioctx, u64 tag,
int status)
{
+ struct se_cmd *cmd = &ioctx->cmd;
struct srp_rsp *srp_rsp;
const u8 *sense_data;
int sense_data_len, max_sense_len;
+ u32 resid = cmd->residual_count;
/*
* The lowest bit of all SAM-3 status codes is zero (see also
@@ -1388,6 +1388,28 @@ static int srpt_build_cmd_rsp(struct srpt_rdma_ch *ch,
srp_rsp->tag = tag;
srp_rsp->status = status;
+ if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
+ if (cmd->data_direction == DMA_TO_DEVICE) {
+ /* residual data from an underflow write */
+ srp_rsp->flags = SRP_RSP_FLAG_DOUNDER;
+ srp_rsp->data_out_res_cnt = cpu_to_be32(resid);
+ } else if (cmd->data_direction == DMA_FROM_DEVICE) {
+ /* residual data from an underflow read */
+ srp_rsp->flags = SRP_RSP_FLAG_DIUNDER;
+ srp_rsp->data_in_res_cnt = cpu_to_be32(resid);
+ }
+ } else if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
+ if (cmd->data_direction == DMA_TO_DEVICE) {
+ /* residual data from an overflow write */
+ srp_rsp->flags = SRP_RSP_FLAG_DOOVER;
+ srp_rsp->data_out_res_cnt = cpu_to_be32(resid);
+ } else if (cmd->data_direction == DMA_FROM_DEVICE) {
+ /* residual data from an overflow read */
+ srp_rsp->flags = SRP_RSP_FLAG_DIOVER;
+ srp_rsp->data_in_res_cnt = cpu_to_be32(resid);
+ }
+ }
+
if (sense_data_len) {
BUILD_BUG_ON(MIN_MAX_RSP_SIZE <= sizeof(*srp_rsp));
max_sense_len = ch->max_ti_iu_len - sizeof(*srp_rsp);
@@ -1931,41 +1953,22 @@ static int srpt_disconnect_ch(struct srpt_rdma_ch *ch)
return ret;
}
-static bool srpt_ch_closed(struct srpt_port *sport, struct srpt_rdma_ch *ch)
-{
- struct srpt_nexus *nexus;
- struct srpt_rdma_ch *ch2;
- bool res = true;
-
- rcu_read_lock();
- list_for_each_entry(nexus, &sport->nexus_list, entry) {
- list_for_each_entry(ch2, &nexus->ch_list, list) {
- if (ch2 == ch) {
- res = false;
- goto done;
- }
- }
- }
-done:
- rcu_read_unlock();
-
- return res;
-}
-
/* Send DREQ and wait for DREP. */
static void srpt_disconnect_ch_sync(struct srpt_rdma_ch *ch)
{
+ DECLARE_COMPLETION_ONSTACK(closed);
struct srpt_port *sport = ch->sport;
pr_debug("ch %s-%d state %d\n", ch->sess_name, ch->qp->qp_num,
ch->state);
+ ch->closed = &closed;
+
mutex_lock(&sport->mutex);
srpt_disconnect_ch(ch);
mutex_unlock(&sport->mutex);
- while (wait_event_timeout(sport->ch_releaseQ, srpt_ch_closed(sport, ch),
- 5 * HZ) == 0)
+ while (wait_for_completion_timeout(&closed, 5 * HZ) == 0)
pr_info("%s(%s-%d state %d): still waiting ...\n", __func__,
ch->sess_name, ch->qp->qp_num, ch->state);
@@ -2045,10 +2048,17 @@ static void srpt_set_enabled(struct srpt_port *sport, bool enabled)
__srpt_close_all_ch(sport);
}
+static void srpt_drop_sport_ref(struct srpt_port *sport)
+{
+ if (atomic_dec_return(&sport->refcount) == 0 && sport->freed_channels)
+ complete(sport->freed_channels);
+}
+
static void srpt_free_ch(struct kref *kref)
{
struct srpt_rdma_ch *ch = container_of(kref, struct srpt_rdma_ch, kref);
+ srpt_drop_sport_ref(ch->sport);
kfree_rcu(ch, rcu);
}
@@ -2092,6 +2102,9 @@ static void srpt_release_channel_work(struct work_struct *w)
list_del_rcu(&ch->list);
mutex_unlock(&sport->mutex);
+ if (ch->closed)
+ complete(ch->closed);
+
srpt_destroy_ch_ib(ch);
srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring,
@@ -2106,8 +2119,6 @@ static void srpt_release_channel_work(struct work_struct *w)
kmem_cache_destroy(ch->req_buf_cache);
- wake_up(&sport->ch_releaseQ);
-
kref_put(&ch->kref, srpt_free_ch);
}
@@ -2144,6 +2155,7 @@ static int srpt_cm_req_recv(struct srpt_device *const sdev,
char i_port_id[36];
u32 it_iu_len;
int i, tag_num, tag_size, ret;
+ struct srpt_tpg *stpg;
WARN_ON_ONCE(irqs_disabled());
@@ -2296,23 +2308,38 @@ static int srpt_cm_req_recv(struct srpt_device *const sdev,
be64_to_cpu(*(__be64 *)nexus->i_port_id),
be64_to_cpu(*(__be64 *)(nexus->i_port_id + 8)));
- pr_debug("registering session %s\n", ch->sess_name);
+ pr_debug("registering src addr %s or i_port_id %s\n", ch->sess_name,
+ i_port_id);
tag_num = ch->rq_size;
tag_size = 1; /* ib_srpt does not use se_sess->sess_cmd_map */
- if (sport->port_guid_tpg.se_tpg_wwn)
- ch->sess = target_setup_session(&sport->port_guid_tpg, tag_num,
+
+ mutex_lock(&sport->port_guid_id.mutex);
+ list_for_each_entry(stpg, &sport->port_guid_id.tpg_list, entry) {
+ if (!IS_ERR_OR_NULL(ch->sess))
+ break;
+ ch->sess = target_setup_session(&stpg->tpg, tag_num,
tag_size, TARGET_PROT_NORMAL,
ch->sess_name, ch, NULL);
- if (sport->port_gid_tpg.se_tpg_wwn && IS_ERR_OR_NULL(ch->sess))
- ch->sess = target_setup_session(&sport->port_gid_tpg, tag_num,
+ }
+ mutex_unlock(&sport->port_guid_id.mutex);
+
+ mutex_lock(&sport->port_gid_id.mutex);
+ list_for_each_entry(stpg, &sport->port_gid_id.tpg_list, entry) {
+ if (!IS_ERR_OR_NULL(ch->sess))
+ break;
+ ch->sess = target_setup_session(&stpg->tpg, tag_num,
tag_size, TARGET_PROT_NORMAL, i_port_id,
ch, NULL);
- /* Retry without leading "0x" */
- if (sport->port_gid_tpg.se_tpg_wwn && IS_ERR_OR_NULL(ch->sess))
- ch->sess = target_setup_session(&sport->port_gid_tpg, tag_num,
+ if (!IS_ERR_OR_NULL(ch->sess))
+ break;
+ /* Retry without leading "0x" */
+ ch->sess = target_setup_session(&stpg->tpg, tag_num,
tag_size, TARGET_PROT_NORMAL,
i_port_id + 2, ch, NULL);
+ }
+ mutex_unlock(&sport->port_gid_id.mutex);
+
if (IS_ERR_OR_NULL(ch->sess)) {
WARN_ON_ONCE(ch->sess == NULL);
ret = PTR_ERR(ch->sess);
@@ -2325,6 +2352,12 @@ static int srpt_cm_req_recv(struct srpt_device *const sdev,
goto destroy_ib;
}
+ /*
+ * Once a session has been created destruction of srpt_rdma_ch objects
+ * will decrement sport->refcount. Hence increment sport->refcount now.
+ */
+ atomic_inc(&sport->refcount);
+
mutex_lock(&sport->mutex);
if ((req->req_flags & SRP_MTCH_ACTION) == SRP_MULTICHAN_SINGLE) {
@@ -2505,6 +2538,7 @@ static int srpt_rdma_cm_req_recv(struct rdma_cm_id *cm_id,
struct srpt_device *sdev;
struct srp_login_req req;
const struct srp_login_req_rdma *req_rdma;
+ struct sa_path_rec *path_rec = cm_id->route.path_rec;
char src_addr[40];
sdev = ib_get_client_data(cm_id->device, &srpt_client);
@@ -2530,7 +2564,7 @@ static int srpt_rdma_cm_req_recv(struct rdma_cm_id *cm_id,
&cm_id->route.addr.src_addr);
return srpt_cm_req_recv(sdev, NULL, cm_id, cm_id->port_num,
- cm_id->route.path_rec->pkey, &req, src_addr);
+ path_rec ? path_rec->pkey : 0, &req, src_addr);
}
static void srpt_cm_rej_recv(struct srpt_rdma_ch *ch,
@@ -2906,39 +2940,29 @@ static void srpt_refresh_port_work(struct work_struct *work)
srpt_refresh_port(sport);
}
-static bool srpt_ch_list_empty(struct srpt_port *sport)
-{
- struct srpt_nexus *nexus;
- bool res = true;
-
- rcu_read_lock();
- list_for_each_entry(nexus, &sport->nexus_list, entry)
- if (!list_empty(&nexus->ch_list))
- res = false;
- rcu_read_unlock();
-
- return res;
-}
-
/**
* srpt_release_sport - disable login and wait for associated channels
* @sport: SRPT HCA port.
*/
static int srpt_release_sport(struct srpt_port *sport)
{
+ DECLARE_COMPLETION_ONSTACK(c);
struct srpt_nexus *nexus, *next_n;
struct srpt_rdma_ch *ch;
WARN_ON_ONCE(irqs_disabled());
+ sport->freed_channels = &c;
+
mutex_lock(&sport->mutex);
srpt_set_enabled(sport, false);
mutex_unlock(&sport->mutex);
- while (wait_event_timeout(sport->ch_releaseQ,
- srpt_ch_list_empty(sport), 5 * HZ) <= 0) {
- pr_info("%s_%d: waiting for session unregistration ...\n",
- dev_name(&sport->sdev->device->dev), sport->port);
+ while (atomic_read(&sport->refcount) > 0 &&
+ wait_for_completion_timeout(&c, 5 * HZ) <= 0) {
+ pr_info("%s_%d: waiting for unregistration of %d sessions ...\n",
+ dev_name(&sport->sdev->device->dev), sport->port,
+ atomic_read(&sport->refcount));
rcu_read_lock();
list_for_each_entry(nexus, &sport->nexus_list, entry) {
list_for_each_entry(ch, &nexus->ch_list, list) {
@@ -2975,10 +2999,10 @@ static struct se_wwn *__srpt_lookup_wwn(const char *name)
for (i = 0; i < dev->phys_port_cnt; i++) {
sport = &sdev->port[i];
- if (strcmp(sport->port_guid, name) == 0)
- return &sport->port_guid_wwn;
- if (strcmp(sport->port_gid, name) == 0)
- return &sport->port_gid_wwn;
+ if (strcmp(sport->port_guid_id.name, name) == 0)
+ return &sport->port_guid_id.wwn;
+ if (strcmp(sport->port_gid_id.name, name) == 0)
+ return &sport->port_gid_id.wwn;
}
}
@@ -3147,7 +3171,6 @@ static void srpt_add_one(struct ib_device *device)
for (i = 1; i <= sdev->device->phys_port_cnt; i++) {
sport = &sdev->port[i - 1];
INIT_LIST_HEAD(&sport->nexus_list);
- init_waitqueue_head(&sport->ch_releaseQ);
mutex_init(&sport->mutex);
sport->sdev = sdev;
sport->port = i;
@@ -3156,6 +3179,10 @@ static void srpt_add_one(struct ib_device *device)
sport->port_attrib.srp_sq_size = DEF_SRPT_SQ_SIZE;
sport->port_attrib.use_srq = false;
INIT_WORK(&sport->work, srpt_refresh_port_work);
+ mutex_init(&sport->port_guid_id.mutex);
+ INIT_LIST_HEAD(&sport->port_guid_id.tpg_list);
+ mutex_init(&sport->port_gid_id.mutex);
+ INIT_LIST_HEAD(&sport->port_gid_id.tpg_list);
if (srpt_refresh_port(sport)) {
pr_err("MAD registration failed for %s-%d.\n",
@@ -3258,14 +3285,23 @@ static struct srpt_port *srpt_tpg_to_sport(struct se_portal_group *tpg)
return tpg->se_tpg_wwn->priv;
}
+static struct srpt_port_id *srpt_wwn_to_sport_id(struct se_wwn *wwn)
+{
+ struct srpt_port *sport = wwn->priv;
+
+ if (wwn == &sport->port_guid_id.wwn)
+ return &sport->port_guid_id;
+ if (wwn == &sport->port_gid_id.wwn)
+ return &sport->port_gid_id;
+ WARN_ON_ONCE(true);
+ return NULL;
+}
+
static char *srpt_get_fabric_wwn(struct se_portal_group *tpg)
{
- struct srpt_port *sport = srpt_tpg_to_sport(tpg);
+ struct srpt_tpg *stpg = container_of(tpg, typeof(*stpg), tpg);
- WARN_ON_ONCE(tpg != &sport->port_guid_tpg &&
- tpg != &sport->port_gid_tpg);
- return tpg == &sport->port_guid_tpg ? sport->port_guid :
- sport->port_gid;
+ return stpg->sport_id->name;
}
static u16 srpt_get_tag(struct se_portal_group *tpg)
@@ -3721,19 +3757,25 @@ static struct configfs_attribute *srpt_tpg_attrs[] = {
static struct se_portal_group *srpt_make_tpg(struct se_wwn *wwn,
const char *name)
{
- struct srpt_port *sport = wwn->priv;
- struct se_portal_group *tpg;
- int res;
-
- WARN_ON_ONCE(wwn != &sport->port_guid_wwn &&
- wwn != &sport->port_gid_wwn);
- tpg = wwn == &sport->port_guid_wwn ? &sport->port_guid_tpg :
- &sport->port_gid_tpg;
- res = core_tpg_register(wwn, tpg, SCSI_PROTOCOL_SRP);
- if (res)
+ struct srpt_port_id *sport_id = srpt_wwn_to_sport_id(wwn);
+ struct srpt_tpg *stpg;
+ int res = -ENOMEM;
+
+ stpg = kzalloc(sizeof(*stpg), GFP_KERNEL);
+ if (!stpg)
return ERR_PTR(res);
+ stpg->sport_id = sport_id;
+ res = core_tpg_register(wwn, &stpg->tpg, SCSI_PROTOCOL_SRP);
+ if (res) {
+ kfree(stpg);
+ return ERR_PTR(res);
+ }
- return tpg;
+ mutex_lock(&sport_id->mutex);
+ list_add_tail(&stpg->entry, &sport_id->tpg_list);
+ mutex_unlock(&sport_id->mutex);
+
+ return &stpg->tpg;
}
/**
@@ -3742,10 +3784,17 @@ static struct se_portal_group *srpt_make_tpg(struct se_wwn *wwn,
*/
static void srpt_drop_tpg(struct se_portal_group *tpg)
{
+ struct srpt_tpg *stpg = container_of(tpg, typeof(*stpg), tpg);
+ struct srpt_port_id *sport_id = stpg->sport_id;
struct srpt_port *sport = srpt_tpg_to_sport(tpg);
+ mutex_lock(&sport_id->mutex);
+ list_del(&stpg->entry);
+ mutex_unlock(&sport_id->mutex);
+
sport->enabled = false;
core_tpg_deregister(tpg);
+ kfree(stpg);
}
/**
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.h b/drivers/infiniband/ulp/srpt/ib_srpt.h
index ee9f20e9177a..2e1a69840857 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.h
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.h
@@ -264,6 +264,8 @@ enum rdma_ch_state {
* @zw_cqe: Zero-length write CQE.
* @rcu: RCU head.
* @kref: kref for this channel.
+ * @closed: Completion object that will be signaled as soon as a new
+ * channel object with the same identity can be created.
* @rq_size: IB receive queue size.
* @max_rsp_size: Maximum size of an RSP response message in bytes.
* @sq_wr_avail: number of work requests available in the send queue.
@@ -306,6 +308,7 @@ struct srpt_rdma_ch {
struct ib_cqe zw_cqe;
struct rcu_head rcu;
struct kref kref;
+ struct completion *closed;
int rq_size;
u32 max_rsp_size;
atomic_t sq_wr_avail;
@@ -361,24 +364,52 @@ struct srpt_port_attrib {
};
/**
+ * struct srpt_tpg - information about a single "target portal group"
+ * @entry: Entry in @sport_id->tpg_list.
+ * @sport_id: Port name this TPG is associated with.
+ * @tpg: LIO TPG data structure.
+ *
+ * Zero or more target portal groups are associated with each port name
+ * (srpt_port_id). With each TPG an ACL list is associated.
+ */
+struct srpt_tpg {
+ struct list_head entry;
+ struct srpt_port_id *sport_id;
+ struct se_portal_group tpg;
+};
+
+/**
+ * struct srpt_port_id - information about an RDMA port name
+ * @mutex: Protects @tpg_list changes.
+ * @tpg_list: TPGs associated with the RDMA port name.
+ * @wwn: WWN associated with the RDMA port name.
+ * @name: ASCII representation of the port name.
+ *
+ * Multiple sysfs directories can be associated with a single RDMA port. This
+ * data structure represents a single (port, name) pair.
+ */
+struct srpt_port_id {
+ struct mutex mutex;
+ struct list_head tpg_list;
+ struct se_wwn wwn;
+ char name[64];
+};
+
+/**
* struct srpt_port - information associated by SRPT with a single IB port
* @sdev: backpointer to the HCA information.
* @mad_agent: per-port management datagram processing information.
* @enabled: Whether or not this target port is enabled.
- * @port_guid: ASCII representation of Port GUID
- * @port_gid: ASCII representation of Port GID
* @port: one-based port number.
* @sm_lid: cached value of the port's sm_lid.
* @lid: cached value of the port's lid.
* @gid: cached value of the port's gid.
- * @port_acl_lock spinlock for port_acl_list:
* @work: work structure for refreshing the aforementioned cached values.
- * @port_guid_tpg: TPG associated with target port GUID.
- * @port_guid_wwn: WWN associated with target port GUID.
- * @port_gid_tpg: TPG associated with target port GID.
- * @port_gid_wwn: WWN associated with target port GID.
+ * @port_guid_id: target port GUID
+ * @port_gid_id: target port GID
* @port_attrib: Port attributes that can be accessed through configfs.
- * @ch_releaseQ: Enables waiting for removal from nexus_list.
+ * @refcount: Number of objects associated with this port.
+ * @freed_channels: Completion that will be signaled once @refcount becomes 0.
* @mutex: Protects nexus_list.
* @nexus_list: Nexus list. See also srpt_nexus.entry.
*/
@@ -386,19 +417,16 @@ struct srpt_port {
struct srpt_device *sdev;
struct ib_mad_agent *mad_agent;
bool enabled;
- u8 port_guid[24];
- u8 port_gid[64];
u8 port;
u32 sm_lid;
u32 lid;
union ib_gid gid;
struct work_struct work;
- struct se_portal_group port_guid_tpg;
- struct se_wwn port_guid_wwn;
- struct se_portal_group port_gid_tpg;
- struct se_wwn port_gid_wwn;
+ struct srpt_port_id port_guid_id;
+ struct srpt_port_id port_gid_id;
struct srpt_port_attrib port_attrib;
- wait_queue_head_t ch_releaseQ;
+ atomic_t refcount;
+ struct completion *freed_channels;
struct mutex mutex;
struct list_head nexus_list;
};
diff --git a/drivers/input/input-poller.c b/drivers/input/input-poller.c
index 1b3d28964bb2..7d6b4e8879f1 100644
--- a/drivers/input/input-poller.c
+++ b/drivers/input/input-poller.c
@@ -123,6 +123,15 @@ void input_set_max_poll_interval(struct input_dev *dev, unsigned int interval)
}
EXPORT_SYMBOL(input_set_max_poll_interval);
+int input_get_poll_interval(struct input_dev *dev)
+{
+ if (!dev->poller)
+ return -EINVAL;
+
+ return dev->poller->poll_interval;
+}
+EXPORT_SYMBOL(input_get_poll_interval);
+
/* SYSFS interface */
static ssize_t input_dev_get_poll_interval(struct device *dev,
diff --git a/drivers/input/joystick/Kconfig b/drivers/input/joystick/Kconfig
index 312b854b5506..940b744639c7 100644
--- a/drivers/input/joystick/Kconfig
+++ b/drivers/input/joystick/Kconfig
@@ -334,7 +334,6 @@ config JOYSTICK_MAPLE
config JOYSTICK_PSXPAD_SPI
tristate "PlayStation 1/2 joypads via SPI interface"
depends on SPI
- select INPUT_POLLDEV
help
Say Y here if you wish to connect PlayStation 1/2 joypads
via SPI interface.
diff --git a/drivers/input/joystick/psxpad-spi.c b/drivers/input/joystick/psxpad-spi.c
index 7eee1b0e360f..a32656064f39 100644
--- a/drivers/input/joystick/psxpad-spi.c
+++ b/drivers/input/joystick/psxpad-spi.c
@@ -22,7 +22,6 @@
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/input.h>
-#include <linux/input-polldev.h>
#include <linux/module.h>
#include <linux/spi/spi.h>
#include <linux/types.h>
@@ -60,7 +59,7 @@ static const u8 PSX_CMD_ENABLE_MOTOR[] = {
struct psxpad {
struct spi_device *spi;
- struct input_polled_dev *pdev;
+ struct input_dev *idev;
char phys[0x20];
bool motor1enable;
bool motor2enable;
@@ -140,8 +139,7 @@ static void psxpad_set_motor_level(struct psxpad *pad,
static int psxpad_spi_play_effect(struct input_dev *idev,
void *data, struct ff_effect *effect)
{
- struct input_polled_dev *pdev = input_get_drvdata(idev);
- struct psxpad *pad = pdev->private;
+ struct psxpad *pad = input_get_drvdata(idev);
switch (effect->type) {
case FF_RUMBLE:
@@ -158,10 +156,9 @@ static int psxpad_spi_init_ff(struct psxpad *pad)
{
int err;
- input_set_capability(pad->pdev->input, EV_FF, FF_RUMBLE);
+ input_set_capability(pad->idev, EV_FF, FF_RUMBLE);
- err = input_ff_create_memless(pad->pdev->input, NULL,
- psxpad_spi_play_effect);
+ err = input_ff_create_memless(pad->idev, NULL, psxpad_spi_play_effect);
if (err) {
dev_err(&pad->spi->dev,
"input_ff_create_memless() failed: %d\n", err);
@@ -189,24 +186,25 @@ static inline int psxpad_spi_init_ff(struct psxpad *pad)
}
#endif /* CONFIG_JOYSTICK_PSXPAD_SPI_FF */
-static void psxpad_spi_poll_open(struct input_polled_dev *pdev)
+static int psxpad_spi_poll_open(struct input_dev *input)
{
- struct psxpad *pad = pdev->private;
+ struct psxpad *pad = input_get_drvdata(input);
pm_runtime_get_sync(&pad->spi->dev);
+
+ return 0;
}
-static void psxpad_spi_poll_close(struct input_polled_dev *pdev)
+static void psxpad_spi_poll_close(struct input_dev *input)
{
- struct psxpad *pad = pdev->private;
+ struct psxpad *pad = input_get_drvdata(input);
pm_runtime_put_sync(&pad->spi->dev);
}
-static void psxpad_spi_poll(struct input_polled_dev *pdev)
+static void psxpad_spi_poll(struct input_dev *input)
{
- struct psxpad *pad = pdev->private;
- struct input_dev *input = pdev->input;
+ struct psxpad *pad = input_get_drvdata(input);
u8 b_rsp3, b_rsp4;
int err;
@@ -284,7 +282,6 @@ static void psxpad_spi_poll(struct input_polled_dev *pdev)
static int psxpad_spi_probe(struct spi_device *spi)
{
struct psxpad *pad;
- struct input_polled_dev *pdev;
struct input_dev *idev;
int err;
@@ -292,31 +289,26 @@ static int psxpad_spi_probe(struct spi_device *spi)
if (!pad)
return -ENOMEM;
- pdev = input_allocate_polled_device();
- if (!pdev) {
+ idev = devm_input_allocate_device(&spi->dev);
+ if (!idev) {
dev_err(&spi->dev, "failed to allocate input device\n");
return -ENOMEM;
}
/* input poll device settings */
- pad->pdev = pdev;
+ pad->idev = idev;
pad->spi = spi;
- pdev->private = pad;
- pdev->open = psxpad_spi_poll_open;
- pdev->close = psxpad_spi_poll_close;
- pdev->poll = psxpad_spi_poll;
- /* poll interval is about 60fps */
- pdev->poll_interval = 16;
- pdev->poll_interval_min = 8;
- pdev->poll_interval_max = 32;
-
/* input device settings */
- idev = pdev->input;
+ input_set_drvdata(idev, pad);
+
idev->name = "PlayStation 1/2 joypad";
snprintf(pad->phys, sizeof(pad->phys), "%s/input", dev_name(&spi->dev));
idev->id.bustype = BUS_SPI;
+ idev->open = psxpad_spi_poll_open;
+ idev->close = psxpad_spi_poll_close;
+
/* key/value map settings */
input_set_abs_params(idev, ABS_X, 0, 255, 0, 0);
input_set_abs_params(idev, ABS_Y, 0, 255, 0, 0);
@@ -354,11 +346,23 @@ static int psxpad_spi_probe(struct spi_device *spi)
/* pad settings */
psxpad_set_motor_level(pad, 0, 0);
+
+ err = input_setup_polling(idev, psxpad_spi_poll);
+ if (err) {
+ dev_err(&spi->dev, "failed to set up polling: %d\n", err);
+ return err;
+ }
+
+ /* poll interval is about 60fps */
+ input_set_poll_interval(idev, 16);
+ input_set_min_poll_interval(idev, 8);
+ input_set_max_poll_interval(idev, 32);
+
/* register input poll device */
- err = input_register_polled_device(pdev);
+ err = input_register_device(idev);
if (err) {
dev_err(&spi->dev,
- "failed to register input poll device: %d\n", err);
+ "failed to register input device: %d\n", err);
return err;
}
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index 8911bc2ec42a..61f4eb63eec1 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -16,7 +16,6 @@ if INPUT_KEYBOARD
config KEYBOARD_ADC
tristate "ADC Ladder Buttons"
depends on IIO
- select INPUT_POLLDEV
help
This driver implements support for buttons connected
to an ADC using a resistor ladder.
@@ -168,14 +167,14 @@ config KEYBOARD_QT1050
the module will be called qt1050
config KEYBOARD_QT1070
- tristate "Atmel AT42QT1070 Touch Sensor Chip"
- depends on I2C
- help
- Say Y here if you want to use Atmel AT42QT1070 QTouch
- Sensor chip as input device.
+ tristate "Atmel AT42QT1070 Touch Sensor Chip"
+ depends on I2C
+ help
+ Say Y here if you want to use Atmel AT42QT1070 QTouch
+ Sensor chip as input device.
- To compile this driver as a module, choose M here:
- the module will be called qt1070
+ To compile this driver as a module, choose M here:
+ the module will be called qt1070
config KEYBOARD_QT2160
tristate "Atmel AT42QT2160 Touch Sensor Chip"
@@ -191,7 +190,6 @@ config KEYBOARD_CLPS711X
tristate "CLPS711X Keypad support"
depends on OF_GPIO && (ARCH_CLPS711X || COMPILE_TEST)
select INPUT_MATRIXKMAP
- select INPUT_POLLDEV
help
Say Y here to enable the matrix keypad on the Cirrus Logic
CLPS711X CPUs.
@@ -250,7 +248,6 @@ config KEYBOARD_GPIO
config KEYBOARD_GPIO_POLLED
tristate "Polled GPIO buttons"
depends on GPIOLIB
- select INPUT_POLLDEV
help
This driver implements support for buttons connected
to GPIO pins that are not capable of generating interrupts.
@@ -342,7 +339,6 @@ config KEYBOARD_HIL
config KEYBOARD_HP6XX
tristate "HP Jornada 6xx keyboard"
depends on SH_HP6XX
- select INPUT_POLLDEV
help
Say Y here if you have a HP Jornada 620/660/680/690 and want to
support the built-in keyboard.
@@ -469,6 +465,16 @@ config KEYBOARD_IMX
To compile this driver as a module, choose M here: the
module will be called imx_keypad.
+config KEYBOARD_IMX_SC_KEY
+ tristate "IMX SCU Key Driver"
+ depends on IMX_SCU
+ help
+ This is the system controller key driver for NXP i.MX SoCs with
+ system controller inside.
+
+ To compile this driver as a module, choose M here: the
+ module will be called imx_sc_key.
+
config KEYBOARD_NEWTON
tristate "Newton keyboard"
select SERIO
diff --git a/drivers/input/keyboard/Makefile b/drivers/input/keyboard/Makefile
index 9510325c0c5d..f5b17524adf2 100644
--- a/drivers/input/keyboard/Makefile
+++ b/drivers/input/keyboard/Makefile
@@ -29,6 +29,7 @@ obj-$(CONFIG_KEYBOARD_HIL) += hil_kbd.o
obj-$(CONFIG_KEYBOARD_HIL_OLD) += hilkbd.o
obj-$(CONFIG_KEYBOARD_IPAQ_MICRO) += ipaq-micro-keys.o
obj-$(CONFIG_KEYBOARD_IMX) += imx_keypad.o
+obj-$(CONFIG_KEYBOARD_IMX_SC_KEY) += imx_sc_key.o
obj-$(CONFIG_KEYBOARD_HP6XX) += jornada680_kbd.o
obj-$(CONFIG_KEYBOARD_HP7XX) += jornada720_kbd.o
obj-$(CONFIG_KEYBOARD_LKKBD) += lkkbd.o
diff --git a/drivers/input/keyboard/adc-keys.c b/drivers/input/keyboard/adc-keys.c
index 9885fd56f5f9..6d5be48d1b3d 100644
--- a/drivers/input/keyboard/adc-keys.c
+++ b/drivers/input/keyboard/adc-keys.c
@@ -9,7 +9,6 @@
#include <linux/iio/consumer.h>
#include <linux/iio/types.h>
#include <linux/input.h>
-#include <linux/input-polldev.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -30,9 +29,9 @@ struct adc_keys_state {
const struct adc_keys_button *map;
};
-static void adc_keys_poll(struct input_polled_dev *dev)
+static void adc_keys_poll(struct input_dev *input)
{
- struct adc_keys_state *st = dev->private;
+ struct adc_keys_state *st = input_get_drvdata(input);
int i, value, ret;
u32 diff, closest = 0xffffffff;
int keycode = 0;
@@ -55,12 +54,12 @@ static void adc_keys_poll(struct input_polled_dev *dev)
keycode = 0;
if (st->last_key && st->last_key != keycode)
- input_report_key(dev->input, st->last_key, 0);
+ input_report_key(input, st->last_key, 0);
if (keycode)
- input_report_key(dev->input, keycode, 1);
+ input_report_key(input, keycode, 1);
- input_sync(dev->input);
+ input_sync(input);
st->last_key = keycode;
}
@@ -108,7 +107,6 @@ static int adc_keys_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct adc_keys_state *st;
- struct input_polled_dev *poll_dev;
struct input_dev *input;
enum iio_chan_type type;
int i, value;
@@ -145,19 +143,13 @@ static int adc_keys_probe(struct platform_device *pdev)
if (error)
return error;
- poll_dev = devm_input_allocate_polled_device(dev);
- if (!poll_dev) {
+ input = devm_input_allocate_device(dev);
+ if (!input) {
dev_err(dev, "failed to allocate input device\n");
return -ENOMEM;
}
- if (!device_property_read_u32(dev, "poll-interval", &value))
- poll_dev->poll_interval = value;
-
- poll_dev->poll = adc_keys_poll;
- poll_dev->private = st;
-
- input = poll_dev->input;
+ input_set_drvdata(input, st);
input->name = pdev->name;
input->phys = "adc-keys/input0";
@@ -174,7 +166,17 @@ static int adc_keys_probe(struct platform_device *pdev)
if (device_property_read_bool(dev, "autorepeat"))
__set_bit(EV_REP, input->evbit);
- error = input_register_polled_device(poll_dev);
+
+ error = input_setup_polling(input, adc_keys_poll);
+ if (error) {
+ dev_err(dev, "Unable to set up polling: %d\n", error);
+ return error;
+ }
+
+ if (!device_property_read_u32(dev, "poll-interval", &value))
+ input_set_poll_interval(input, value);
+
+ error = input_register_device(input);
if (error) {
dev_err(dev, "Unable to register input device: %d\n", error);
return error;
diff --git a/drivers/input/keyboard/adp5589-keys.c b/drivers/input/keyboard/adp5589-keys.c
index 4f96a4a99e5b..e7d58e7f0257 100644
--- a/drivers/input/keyboard/adp5589-keys.c
+++ b/drivers/input/keyboard/adp5589-keys.c
@@ -857,70 +857,35 @@ static void adp5589_report_switch_state(struct adp5589_kpad *kpad)
input_sync(kpad->input);
}
-static int adp5589_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int adp5589_keypad_add(struct adp5589_kpad *kpad, unsigned int revid)
{
- struct adp5589_kpad *kpad;
+ struct i2c_client *client = kpad->client;
const struct adp5589_kpad_platform_data *pdata =
dev_get_platdata(&client->dev);
struct input_dev *input;
- unsigned int revid;
- int ret, i;
+ unsigned int i;
int error;
- if (!i2c_check_functionality(client->adapter,
- I2C_FUNC_SMBUS_BYTE_DATA)) {
- dev_err(&client->dev, "SMBUS Byte Data not Supported\n");
- return -EIO;
- }
-
- if (!pdata) {
- dev_err(&client->dev, "no platform data?\n");
- return -EINVAL;
- }
-
- kpad = kzalloc(sizeof(*kpad), GFP_KERNEL);
- if (!kpad)
- return -ENOMEM;
-
- switch (id->driver_data) {
- case ADP5585_02:
- kpad->support_row5 = true;
- /* fall through */
- case ADP5585_01:
- kpad->is_adp5585 = true;
- kpad->var = &const_adp5585;
- break;
- case ADP5589:
- kpad->support_row5 = true;
- kpad->var = &const_adp5589;
- break;
- }
-
if (!((pdata->keypad_en_mask & kpad->var->row_mask) &&
(pdata->keypad_en_mask >> kpad->var->col_shift)) ||
!pdata->keymap) {
dev_err(&client->dev, "no rows, cols or keymap from pdata\n");
- error = -EINVAL;
- goto err_free_mem;
+ return -EINVAL;
}
if (pdata->keymapsize != kpad->var->keymapsize) {
dev_err(&client->dev, "invalid keymapsize\n");
- error = -EINVAL;
- goto err_free_mem;
+ return -EINVAL;
}
if (!pdata->gpimap && pdata->gpimapsize) {
dev_err(&client->dev, "invalid gpimap from pdata\n");
- error = -EINVAL;
- goto err_free_mem;
+ return -EINVAL;
}
if (pdata->gpimapsize > kpad->var->gpimapsize_max) {
dev_err(&client->dev, "invalid gpimapsize\n");
- error = -EINVAL;
- goto err_free_mem;
+ return -EINVAL;
}
for (i = 0; i < pdata->gpimapsize; i++) {
@@ -929,41 +894,27 @@ static int adp5589_probe(struct i2c_client *client,
if (pin < kpad->var->gpi_pin_base ||
pin > kpad->var->gpi_pin_end) {
dev_err(&client->dev, "invalid gpi pin data\n");
- error = -EINVAL;
- goto err_free_mem;
+ return -EINVAL;
}
if ((1 << (pin - kpad->var->gpi_pin_row_base)) &
pdata->keypad_en_mask) {
dev_err(&client->dev, "invalid gpi row/col data\n");
- error = -EINVAL;
- goto err_free_mem;
+ return -EINVAL;
}
}
if (!client->irq) {
dev_err(&client->dev, "no IRQ?\n");
- error = -EINVAL;
- goto err_free_mem;
+ return -EINVAL;
}
input = input_allocate_device();
- if (!input) {
- error = -ENOMEM;
- goto err_free_mem;
- }
+ if (!input)
+ return -ENOMEM;
- kpad->client = client;
kpad->input = input;
- ret = adp5589_read(client, ADP5589_5_ID);
- if (ret < 0) {
- error = ret;
- goto err_free_input;
- }
-
- revid = (u8) ret & ADP5589_5_DEVICE_ID_MASK;
-
input->name = client->name;
input->phys = "adp5589-keys/input0";
input->dev.parent = &client->dev;
@@ -1015,30 +966,99 @@ static int adp5589_probe(struct i2c_client *client,
goto err_unreg_dev;
}
+ device_init_wakeup(&client->dev, 1);
+
+ return 0;
+
+err_unreg_dev:
+ input_unregister_device(input);
+ input = NULL;
+err_free_input:
+ input_free_device(input);
+
+ return error;
+}
+
+static void adp5589_keypad_remove(struct adp5589_kpad *kpad)
+{
+ if (kpad->input) {
+ free_irq(kpad->client->irq, kpad);
+ input_unregister_device(kpad->input);
+ }
+}
+
+static int adp5589_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct adp5589_kpad *kpad;
+ const struct adp5589_kpad_platform_data *pdata =
+ dev_get_platdata(&client->dev);
+ unsigned int revid;
+ int error, ret;
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_BYTE_DATA)) {
+ dev_err(&client->dev, "SMBUS Byte Data not Supported\n");
+ return -EIO;
+ }
+
+ if (!pdata) {
+ dev_err(&client->dev, "no platform data?\n");
+ return -EINVAL;
+ }
+
+ kpad = kzalloc(sizeof(*kpad), GFP_KERNEL);
+ if (!kpad)
+ return -ENOMEM;
+
+ kpad->client = client;
+
+ switch (id->driver_data) {
+ case ADP5585_02:
+ kpad->support_row5 = true;
+ /* fall through */
+ case ADP5585_01:
+ kpad->is_adp5585 = true;
+ kpad->var = &const_adp5585;
+ break;
+ case ADP5589:
+ kpad->support_row5 = true;
+ kpad->var = &const_adp5589;
+ break;
+ }
+
+ ret = adp5589_read(client, ADP5589_5_ID);
+ if (ret < 0) {
+ error = ret;
+ goto err_free_mem;
+ }
+
+ revid = (u8) ret & ADP5589_5_DEVICE_ID_MASK;
+
+ if (pdata->keymapsize) {
+ error = adp5589_keypad_add(kpad, revid);
+ if (error)
+ goto err_free_mem;
+ }
+
error = adp5589_setup(kpad);
if (error)
- goto err_free_irq;
+ goto err_keypad_remove;
if (kpad->gpimapsize)
adp5589_report_switch_state(kpad);
error = adp5589_gpio_add(kpad);
if (error)
- goto err_free_irq;
+ goto err_keypad_remove;
- device_init_wakeup(&client->dev, 1);
i2c_set_clientdata(client, kpad);
dev_info(&client->dev, "Rev.%d keypad, irq %d\n", revid, client->irq);
return 0;
-err_free_irq:
- free_irq(client->irq, kpad);
-err_unreg_dev:
- input_unregister_device(input);
- input = NULL;
-err_free_input:
- input_free_device(input);
+err_keypad_remove:
+ adp5589_keypad_remove(kpad);
err_free_mem:
kfree(kpad);
@@ -1050,8 +1070,7 @@ static int adp5589_remove(struct i2c_client *client)
struct adp5589_kpad *kpad = i2c_get_clientdata(client);
adp5589_write(client, kpad->var->reg(ADP5589_GENERAL_CFG), 0);
- free_irq(client->irq, kpad);
- input_unregister_device(kpad->input);
+ adp5589_keypad_remove(kpad);
adp5589_gpio_remove(kpad);
kfree(kpad);
@@ -1064,6 +1083,9 @@ static int adp5589_suspend(struct device *dev)
struct adp5589_kpad *kpad = dev_get_drvdata(dev);
struct i2c_client *client = kpad->client;
+ if (!kpad->input)
+ return 0;
+
disable_irq(client->irq);
if (device_may_wakeup(&client->dev))
@@ -1077,6 +1099,9 @@ static int adp5589_resume(struct device *dev)
struct adp5589_kpad *kpad = dev_get_drvdata(dev);
struct i2c_client *client = kpad->client;
+ if (!kpad->input)
+ return 0;
+
if (device_may_wakeup(&client->dev))
disable_irq_wake(client->irq);
diff --git a/drivers/input/keyboard/clps711x-keypad.c b/drivers/input/keyboard/clps711x-keypad.c
index c4a5c07a4b98..019dd6ed2c29 100644
--- a/drivers/input/keyboard/clps711x-keypad.c
+++ b/drivers/input/keyboard/clps711x-keypad.c
@@ -6,7 +6,6 @@
*/
#include <linux/input.h>
-#include <linux/input-polldev.h>
#include <linux/module.h>
#include <linux/of_gpio.h>
#include <linux/platform_device.h>
@@ -30,10 +29,10 @@ struct clps711x_keypad_data {
struct clps711x_gpio_data *gpio_data;
};
-static void clps711x_keypad_poll(struct input_polled_dev *dev)
+static void clps711x_keypad_poll(struct input_dev *input)
{
- const unsigned short *keycodes = dev->input->keycode;
- struct clps711x_keypad_data *priv = dev->private;
+ const unsigned short *keycodes = input->keycode;
+ struct clps711x_keypad_data *priv = input_get_drvdata(input);
bool sync = false;
int col, row;
@@ -61,14 +60,14 @@ static void clps711x_keypad_poll(struct input_polled_dev *dev)
if (state) {
set_bit(col, data->last_state);
- input_event(dev->input, EV_MSC,
- MSC_SCAN, code);
+ input_event(input,
+ EV_MSC, MSC_SCAN, code);
} else {
clear_bit(col, data->last_state);
}
if (keycodes[code])
- input_report_key(dev->input,
+ input_report_key(input,
keycodes[code], state);
sync = true;
}
@@ -80,7 +79,7 @@ static void clps711x_keypad_poll(struct input_polled_dev *dev)
}
if (sync)
- input_sync(dev->input);
+ input_sync(input);
}
static int clps711x_keypad_probe(struct platform_device *pdev)
@@ -88,7 +87,7 @@ static int clps711x_keypad_probe(struct platform_device *pdev)
struct clps711x_keypad_data *priv;
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
- struct input_polled_dev *poll_dev;
+ struct input_dev *input;
u32 poll_interval;
int i, err;
@@ -125,53 +124,43 @@ static int clps711x_keypad_probe(struct platform_device *pdev)
if (err)
return err;
- poll_dev = input_allocate_polled_device();
- if (!poll_dev)
+ input = devm_input_allocate_device(dev);
+ if (!input)
return -ENOMEM;
- poll_dev->private = priv;
- poll_dev->poll = clps711x_keypad_poll;
- poll_dev->poll_interval = poll_interval;
- poll_dev->input->name = pdev->name;
- poll_dev->input->dev.parent = dev;
- poll_dev->input->id.bustype = BUS_HOST;
- poll_dev->input->id.vendor = 0x0001;
- poll_dev->input->id.product = 0x0001;
- poll_dev->input->id.version = 0x0100;
+ input_set_drvdata(input, priv);
+
+ input->name = pdev->name;
+ input->dev.parent = dev;
+ input->id.bustype = BUS_HOST;
+ input->id.vendor = 0x0001;
+ input->id.product = 0x0001;
+ input->id.version = 0x0100;
err = matrix_keypad_build_keymap(NULL, NULL, priv->row_count,
CLPS711X_KEYPAD_COL_COUNT,
- NULL, poll_dev->input);
+ NULL, input);
if (err)
- goto out_err;
+ return err;
- input_set_capability(poll_dev->input, EV_MSC, MSC_SCAN);
+ input_set_capability(input, EV_MSC, MSC_SCAN);
if (of_property_read_bool(np, "autorepeat"))
- __set_bit(EV_REP, poll_dev->input->evbit);
-
- platform_set_drvdata(pdev, poll_dev);
+ __set_bit(EV_REP, input->evbit);
/* Set all columns to low */
regmap_update_bits(priv->syscon, SYSCON_OFFSET, SYSCON1_KBDSCAN_MASK,
SYSCON1_KBDSCAN(1));
- err = input_register_polled_device(poll_dev);
- if (err)
- goto out_err;
-
- return 0;
-out_err:
- input_free_polled_device(poll_dev);
- return err;
-}
+ err = input_setup_polling(input, clps711x_keypad_poll);
+ if (err)
+ return err;
-static int clps711x_keypad_remove(struct platform_device *pdev)
-{
- struct input_polled_dev *poll_dev = platform_get_drvdata(pdev);
+ input_set_poll_interval(input, poll_interval);
- input_unregister_polled_device(poll_dev);
- input_free_polled_device(poll_dev);
+ err = input_register_device(input);
+ if (err)
+ return err;
return 0;
}
@@ -188,7 +177,6 @@ static struct platform_driver clps711x_keypad_driver = {
.of_match_table = clps711x_keypad_of_match,
},
.probe = clps711x_keypad_probe,
- .remove = clps711x_keypad_remove,
};
module_platform_driver(clps711x_keypad_driver);
diff --git a/drivers/input/keyboard/cros_ec_keyb.c b/drivers/input/keyboard/cros_ec_keyb.c
index 8d4d9786cc74..2b71c5a51f90 100644
--- a/drivers/input/keyboard/cros_ec_keyb.c
+++ b/drivers/input/keyboard/cros_ec_keyb.c
@@ -226,8 +226,6 @@ static int cros_ec_keyb_work(struct notifier_block *nb,
{
struct cros_ec_keyb *ckdev = container_of(nb, struct cros_ec_keyb,
notifier);
- uint8_t mkbp_event_type = ckdev->ec->event_data.event_type &
- EC_MKBP_EVENT_TYPE_MASK;
u32 val;
unsigned int ev_type;
@@ -239,7 +237,7 @@ static int cros_ec_keyb_work(struct notifier_block *nb,
if (queued_during_suspend && !device_may_wakeup(ckdev->dev))
return NOTIFY_OK;
- switch (mkbp_event_type) {
+ switch (ckdev->ec->event_data.event_type) {
case EC_MKBP_EVENT_KEY_MATRIX:
pm_wakeup_event(ckdev->dev, 0);
@@ -266,7 +264,7 @@ static int cros_ec_keyb_work(struct notifier_block *nb,
case EC_MKBP_EVENT_SWITCH:
pm_wakeup_event(ckdev->dev, 0);
- if (mkbp_event_type == EC_MKBP_EVENT_BUTTON) {
+ if (ckdev->ec->event_data.event_type == EC_MKBP_EVENT_BUTTON) {
val = get_unaligned_le32(
&ckdev->ec->event_data.data.buttons);
ev_type = EV_KEY;
diff --git a/drivers/input/keyboard/gpio_keys.c b/drivers/input/keyboard/gpio_keys.c
index 1373dc5b0765..1f56d53454b2 100644
--- a/drivers/input/keyboard/gpio_keys.c
+++ b/drivers/input/keyboard/gpio_keys.c
@@ -494,10 +494,8 @@ static int gpio_keys_setup_key(struct platform_device *pdev,
spin_lock_init(&bdata->lock);
if (child) {
- bdata->gpiod = devm_fwnode_get_gpiod_from_child(dev, NULL,
- child,
- GPIOD_IN,
- desc);
+ bdata->gpiod = devm_fwnode_gpiod_get(dev, child,
+ NULL, GPIOD_IN, desc);
if (IS_ERR(bdata->gpiod)) {
error = PTR_ERR(bdata->gpiod);
if (error == -ENOENT) {
diff --git a/drivers/input/keyboard/gpio_keys_polled.c b/drivers/input/keyboard/gpio_keys_polled.c
index 465eecfa6b3f..6eb0a2f3f9de 100644
--- a/drivers/input/keyboard/gpio_keys_polled.c
+++ b/drivers/input/keyboard/gpio_keys_polled.c
@@ -16,7 +16,6 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/input.h>
-#include <linux/input-polldev.h>
#include <linux/ioport.h>
#include <linux/platform_device.h>
#include <linux/gpio.h>
@@ -34,7 +33,7 @@ struct gpio_keys_button_data {
};
struct gpio_keys_polled_dev {
- struct input_polled_dev *poll_dev;
+ struct input_dev *input;
struct device *dev;
const struct gpio_keys_platform_data *pdata;
unsigned long rel_axis_seen[BITS_TO_LONGS(REL_CNT)];
@@ -42,12 +41,11 @@ struct gpio_keys_polled_dev {
struct gpio_keys_button_data data[0];
};
-static void gpio_keys_button_event(struct input_polled_dev *dev,
+static void gpio_keys_button_event(struct input_dev *input,
const struct gpio_keys_button *button,
int state)
{
- struct gpio_keys_polled_dev *bdev = dev->private;
- struct input_dev *input = dev->input;
+ struct gpio_keys_polled_dev *bdev = input_get_drvdata(input);
unsigned int type = button->type ?: EV_KEY;
if (type == EV_REL) {
@@ -66,7 +64,7 @@ static void gpio_keys_button_event(struct input_polled_dev *dev,
}
}
-static void gpio_keys_polled_check_state(struct input_polled_dev *dev,
+static void gpio_keys_polled_check_state(struct input_dev *input,
const struct gpio_keys_button *button,
struct gpio_keys_button_data *bdata)
{
@@ -74,10 +72,10 @@ static void gpio_keys_polled_check_state(struct input_polled_dev *dev,
state = gpiod_get_value_cansleep(bdata->gpiod);
if (state < 0) {
- dev_err(dev->input->dev.parent,
+ dev_err(input->dev.parent,
"failed to get gpio state: %d\n", state);
} else {
- gpio_keys_button_event(dev, button, state);
+ gpio_keys_button_event(input, button, state);
if (state != bdata->last_state) {
bdata->count = 0;
@@ -86,11 +84,10 @@ static void gpio_keys_polled_check_state(struct input_polled_dev *dev,
}
}
-static void gpio_keys_polled_poll(struct input_polled_dev *dev)
+static void gpio_keys_polled_poll(struct input_dev *input)
{
- struct gpio_keys_polled_dev *bdev = dev->private;
+ struct gpio_keys_polled_dev *bdev = input_get_drvdata(input);
const struct gpio_keys_platform_data *pdata = bdev->pdata;
- struct input_dev *input = dev->input;
int i;
memset(bdev->rel_axis_seen, 0, sizeof(bdev->rel_axis_seen));
@@ -101,10 +98,10 @@ static void gpio_keys_polled_poll(struct input_polled_dev *dev)
if (bdata->count < bdata->threshold) {
bdata->count++;
- gpio_keys_button_event(dev, &pdata->buttons[i],
+ gpio_keys_button_event(input, &pdata->buttons[i],
bdata->last_state);
} else {
- gpio_keys_polled_check_state(dev, &pdata->buttons[i],
+ gpio_keys_polled_check_state(input, &pdata->buttons[i],
bdata);
}
}
@@ -122,18 +119,20 @@ static void gpio_keys_polled_poll(struct input_polled_dev *dev)
input_sync(input);
}
-static void gpio_keys_polled_open(struct input_polled_dev *dev)
+static int gpio_keys_polled_open(struct input_dev *input)
{
- struct gpio_keys_polled_dev *bdev = dev->private;
+ struct gpio_keys_polled_dev *bdev = input_get_drvdata(input);
const struct gpio_keys_platform_data *pdata = bdev->pdata;
if (pdata->enable)
pdata->enable(bdev->dev);
+
+ return 0;
}
-static void gpio_keys_polled_close(struct input_polled_dev *dev)
+static void gpio_keys_polled_close(struct input_dev *input)
{
- struct gpio_keys_polled_dev *bdev = dev->private;
+ struct gpio_keys_polled_dev *bdev = input_get_drvdata(input);
const struct gpio_keys_platform_data *pdata = bdev->pdata;
if (pdata->disable)
@@ -232,7 +231,6 @@ static int gpio_keys_polled_probe(struct platform_device *pdev)
struct fwnode_handle *child = NULL;
const struct gpio_keys_platform_data *pdata = dev_get_platdata(dev);
struct gpio_keys_polled_dev *bdev;
- struct input_polled_dev *poll_dev;
struct input_dev *input;
int error;
int i;
@@ -255,19 +253,13 @@ static int gpio_keys_polled_probe(struct platform_device *pdev)
return -ENOMEM;
}
- poll_dev = devm_input_allocate_polled_device(dev);
- if (!poll_dev) {
- dev_err(dev, "no memory for polled device\n");
+ input = devm_input_allocate_device(dev);
+ if (!input) {
+ dev_err(dev, "no memory for input device\n");
return -ENOMEM;
}
- poll_dev->private = bdev;
- poll_dev->poll = gpio_keys_polled_poll;
- poll_dev->poll_interval = pdata->poll_interval;
- poll_dev->open = gpio_keys_polled_open;
- poll_dev->close = gpio_keys_polled_close;
-
- input = poll_dev->input;
+ input_set_drvdata(input, bdev);
input->name = pdata->name ?: pdev->name;
input->phys = DRV_NAME"/input0";
@@ -277,6 +269,9 @@ static int gpio_keys_polled_probe(struct platform_device *pdev)
input->id.product = 0x0001;
input->id.version = 0x0100;
+ input->open = gpio_keys_polled_open;
+ input->close = gpio_keys_polled_close;
+
__set_bit(EV_KEY, input->evbit);
if (pdata->rep)
__set_bit(EV_REP, input->evbit);
@@ -300,10 +295,9 @@ static int gpio_keys_polled_probe(struct platform_device *pdev)
return -EINVAL;
}
- bdata->gpiod = devm_fwnode_get_gpiod_from_child(dev,
- NULL, child,
- GPIOD_IN,
- button->desc);
+ bdata->gpiod = devm_fwnode_gpiod_get(dev, child,
+ NULL, GPIOD_IN,
+ button->desc);
if (IS_ERR(bdata->gpiod)) {
error = PTR_ERR(bdata->gpiod);
if (error != -EPROBE_DEFER)
@@ -353,11 +347,19 @@ static int gpio_keys_polled_probe(struct platform_device *pdev)
fwnode_handle_put(child);
- bdev->poll_dev = poll_dev;
+ bdev->input = input;
bdev->dev = dev;
bdev->pdata = pdata;
- error = input_register_polled_device(poll_dev);
+ error = input_setup_polling(input, gpio_keys_polled_poll);
+ if (error) {
+ dev_err(dev, "unable to set up polling, err=%d\n", error);
+ return error;
+ }
+
+ input_set_poll_interval(input, pdata->poll_interval);
+
+ error = input_register_device(input);
if (error) {
dev_err(dev, "unable to register polled device, err=%d\n",
error);
@@ -366,7 +368,7 @@ static int gpio_keys_polled_probe(struct platform_device *pdev)
/* report initial state of the buttons */
for (i = 0; i < pdata->nbuttons; i++)
- gpio_keys_polled_check_state(poll_dev, &pdata->buttons[i],
+ gpio_keys_polled_check_state(input, &pdata->buttons[i],
&bdev->data[i]);
input_sync(input);
diff --git a/drivers/input/keyboard/imx_sc_key.c b/drivers/input/keyboard/imx_sc_key.c
new file mode 100644
index 000000000000..53799527dc75
--- /dev/null
+++ b/drivers/input/keyboard/imx_sc_key.c
@@ -0,0 +1,187 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2019 NXP.
+ */
+
+#include <linux/err.h>
+#include <linux/device.h>
+#include <linux/firmware/imx/sci.h>
+#include <linux/init.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+
+#define DEBOUNCE_TIME 30
+#define REPEAT_INTERVAL 60
+
+#define SC_IRQ_BUTTON 1
+#define SC_IRQ_GROUP_WAKE 3
+
+#define IMX_SC_MISC_FUNC_GET_BUTTON_STATUS 18
+
+struct imx_key_drv_data {
+ u32 keycode;
+ bool keystate; /* true: pressed, false: released */
+ struct delayed_work check_work;
+ struct input_dev *input;
+ struct imx_sc_ipc *key_ipc_handle;
+ struct notifier_block key_notifier;
+};
+
+struct imx_sc_msg_key {
+ struct imx_sc_rpc_msg hdr;
+ u32 state;
+};
+
+static int imx_sc_key_notify(struct notifier_block *nb,
+ unsigned long event, void *group)
+{
+ struct imx_key_drv_data *priv =
+ container_of(nb,
+ struct imx_key_drv_data,
+ key_notifier);
+
+ if ((event & SC_IRQ_BUTTON) && (*(u8 *)group == SC_IRQ_GROUP_WAKE)) {
+ schedule_delayed_work(&priv->check_work,
+ msecs_to_jiffies(DEBOUNCE_TIME));
+ pm_wakeup_event(priv->input->dev.parent, 0);
+ }
+
+ return 0;
+}
+
+static void imx_sc_check_for_events(struct work_struct *work)
+{
+ struct imx_key_drv_data *priv =
+ container_of(work,
+ struct imx_key_drv_data,
+ check_work.work);
+ struct input_dev *input = priv->input;
+ struct imx_sc_msg_key msg;
+ struct imx_sc_rpc_msg *hdr = &msg.hdr;
+ bool state;
+ int error;
+
+ hdr->ver = IMX_SC_RPC_VERSION;
+ hdr->svc = IMX_SC_RPC_SVC_MISC;
+ hdr->func = IMX_SC_MISC_FUNC_GET_BUTTON_STATUS;
+ hdr->size = 1;
+
+ error = imx_scu_call_rpc(priv->key_ipc_handle, &msg, true);
+ if (error) {
+ dev_err(&input->dev, "read imx sc key failed, error %d\n", error);
+ return;
+ }
+
+ state = (bool)msg.state;
+
+ if (state ^ priv->keystate) {
+ priv->keystate = state;
+ input_event(input, EV_KEY, priv->keycode, state);
+ input_sync(input);
+ if (!priv->keystate)
+ pm_relax(priv->input->dev.parent);
+ }
+
+ if (state)
+ schedule_delayed_work(&priv->check_work,
+ msecs_to_jiffies(REPEAT_INTERVAL));
+}
+
+static int imx_sc_key_probe(struct platform_device *pdev)
+{
+ struct imx_key_drv_data *priv;
+ struct input_dev *input;
+ int error;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ error = imx_scu_get_handle(&priv->key_ipc_handle);
+ if (error)
+ return error;
+
+ if (device_property_read_u32(&pdev->dev, "linux,keycodes",
+ &priv->keycode)) {
+ dev_err(&pdev->dev, "missing linux,keycodes property\n");
+ return -EINVAL;
+ }
+
+ INIT_DELAYED_WORK(&priv->check_work, imx_sc_check_for_events);
+
+ input = devm_input_allocate_device(&pdev->dev);
+ if (!input) {
+ dev_err(&pdev->dev, "failed to allocate the input device\n");
+ return -ENOMEM;
+ }
+
+ input->name = pdev->name;
+ input->phys = "imx-sc-key/input0";
+ input->id.bustype = BUS_HOST;
+
+ input_set_capability(input, EV_KEY, priv->keycode);
+
+ error = input_register_device(input);
+ if (error) {
+ dev_err(&pdev->dev, "failed to register input device\n");
+ return error;
+ }
+
+ priv->input = input;
+ platform_set_drvdata(pdev, priv);
+
+ error = imx_scu_irq_group_enable(SC_IRQ_GROUP_WAKE, SC_IRQ_BUTTON,
+ true);
+ if (error) {
+ dev_err(&pdev->dev, "failed to enable scu group irq\n");
+ return error;
+ }
+
+ priv->key_notifier.notifier_call = imx_sc_key_notify;
+ error = imx_scu_irq_register_notifier(&priv->key_notifier);
+ if (error) {
+ imx_scu_irq_group_enable(SC_IRQ_GROUP_WAKE, SC_IRQ_BUTTON,
+ false);
+ dev_err(&pdev->dev, "failed to register scu notifier\n");
+ return error;
+ }
+
+ return 0;
+}
+
+static int imx_sc_key_remove(struct platform_device *pdev)
+{
+ struct imx_key_drv_data *priv = platform_get_drvdata(pdev);
+
+ imx_scu_irq_group_enable(SC_IRQ_GROUP_WAKE, SC_IRQ_BUTTON, false);
+ imx_scu_irq_unregister_notifier(&priv->key_notifier);
+ cancel_delayed_work_sync(&priv->check_work);
+
+ return 0;
+}
+
+static const struct of_device_id imx_sc_key_ids[] = {
+ { .compatible = "fsl,imx-sc-key" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, imx_sc_key_ids);
+
+static struct platform_driver imx_sc_key_driver = {
+ .driver = {
+ .name = "imx-sc-key",
+ .of_match_table = imx_sc_key_ids,
+ },
+ .probe = imx_sc_key_probe,
+ .remove = imx_sc_key_remove,
+};
+module_platform_driver(imx_sc_key_driver);
+
+MODULE_AUTHOR("Anson Huang <Anson.Huang@nxp.com>");
+MODULE_DESCRIPTION("i.MX System Controller Key Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/keyboard/jornada680_kbd.c b/drivers/input/keyboard/jornada680_kbd.c
index 4232aa876d2e..7e35081393be 100644
--- a/drivers/input/keyboard/jornada680_kbd.c
+++ b/drivers/input/keyboard/jornada680_kbd.c
@@ -15,7 +15,6 @@
#include <linux/device.h>
#include <linux/input.h>
-#include <linux/input-polldev.h>
#include <linux/interrupt.h>
#include <linux/jiffies.h>
#include <linux/kernel.h>
@@ -64,7 +63,7 @@ static const unsigned short jornada_scancodes[] = {
#define JORNADA_SCAN_SIZE 18
struct jornadakbd {
- struct input_polled_dev *poll_dev;
+ struct input_dev *input;
unsigned short keymap[ARRAY_SIZE(jornada_scancodes)];
unsigned char length;
unsigned char old_scan[JORNADA_SCAN_SIZE];
@@ -73,7 +72,7 @@ struct jornadakbd {
static void jornada_parse_kbd(struct jornadakbd *jornadakbd)
{
- struct input_dev *input_dev = jornadakbd->poll_dev->input;
+ struct input_dev *input_dev = jornadakbd->input;
unsigned short *keymap = jornadakbd->keymap;
unsigned int sync_me = 0;
unsigned int i, j;
@@ -167,9 +166,9 @@ static void jornada_scan_keyb(unsigned char *s)
*s++ = __raw_readb(PHDR);
}
-static void jornadakbd680_poll(struct input_polled_dev *dev)
+static void jornadakbd680_poll(struct input_dev *input)
{
- struct jornadakbd *jornadakbd = dev->private;
+ struct jornadakbd *jornadakbd = input_get_drvdata(input);
jornada_scan_keyb(jornadakbd->new_scan);
jornada_parse_kbd(jornadakbd);
@@ -179,7 +178,6 @@ static void jornadakbd680_poll(struct input_polled_dev *dev)
static int jornada680kbd_probe(struct platform_device *pdev)
{
struct jornadakbd *jornadakbd;
- struct input_polled_dev *poll_dev;
struct input_dev *input_dev;
int i, error;
@@ -188,29 +186,24 @@ static int jornada680kbd_probe(struct platform_device *pdev)
if (!jornadakbd)
return -ENOMEM;
- poll_dev = devm_input_allocate_polled_device(&pdev->dev);
- if (!poll_dev) {
- dev_err(&pdev->dev, "failed to allocate polled input device\n");
+ input_dev = devm_input_allocate_device(&pdev->dev);
+ if (!input_dev) {
+ dev_err(&pdev->dev, "failed to allocate input device\n");
return -ENOMEM;
}
- jornadakbd->poll_dev = poll_dev;
+ jornadakbd->input = input_dev;
memcpy(jornadakbd->keymap, jornada_scancodes,
sizeof(jornadakbd->keymap));
- poll_dev->private = jornadakbd;
- poll_dev->poll = jornadakbd680_poll;
- poll_dev->poll_interval = 50; /* msec */
-
- input_dev = poll_dev->input;
+ input_set_drvdata(input_dev, jornadakbd);
input_dev->evbit[0] = BIT(EV_KEY) | BIT(EV_REP);
input_dev->name = "HP Jornada 680 keyboard";
input_dev->phys = "jornadakbd/input0";
input_dev->keycode = jornadakbd->keymap;
input_dev->keycodesize = sizeof(unsigned short);
input_dev->keycodemax = ARRAY_SIZE(jornada_scancodes);
- input_dev->dev.parent = &pdev->dev;
input_dev->id.bustype = BUS_HOST;
for (i = 0; i < 128; i++)
@@ -220,9 +213,17 @@ static int jornada680kbd_probe(struct platform_device *pdev)
input_set_capability(input_dev, EV_MSC, MSC_SCAN);
- error = input_register_polled_device(jornadakbd->poll_dev);
+ error = input_setup_polling(input_dev, jornadakbd680_poll);
+ if (error) {
+ dev_err(&pdev->dev, "failed to set up polling\n");
+ return error;
+ }
+
+ input_set_poll_interval(input_dev, 50 /* msec */);
+
+ error = input_register_device(input_dev);
if (error) {
- dev_err(&pdev->dev, "failed to register polled input device\n");
+ dev_err(&pdev->dev, "failed to register input device\n");
return error;
}
diff --git a/drivers/input/keyboard/mpr121_touchkey.c b/drivers/input/keyboard/mpr121_touchkey.c
index ee80de44ce3f..40d6e5087cde 100644
--- a/drivers/input/keyboard/mpr121_touchkey.c
+++ b/drivers/input/keyboard/mpr121_touchkey.c
@@ -54,6 +54,9 @@
/* MPR121 has 12 keys */
#define MPR121_MAX_KEY_COUNT 12
+#define MPR121_MIN_POLL_INTERVAL 10
+#define MPR121_MAX_POLL_INTERVAL 200
+
struct mpr121_touchkey {
struct i2c_client *client;
struct input_dev *input_dev;
@@ -115,11 +118,11 @@ static struct regulator *mpr121_vdd_supply_init(struct device *dev)
return vdd_supply;
}
-static irqreturn_t mpr_touchkey_interrupt(int irq, void *dev_id)
+static void mpr_touchkey_report(struct input_dev *dev)
{
- struct mpr121_touchkey *mpr121 = dev_id;
- struct i2c_client *client = mpr121->client;
+ struct mpr121_touchkey *mpr121 = input_get_drvdata(dev);
struct input_dev *input = mpr121->input_dev;
+ struct i2c_client *client = mpr121->client;
unsigned long bit_changed;
unsigned int key_num;
int reg;
@@ -127,14 +130,14 @@ static irqreturn_t mpr_touchkey_interrupt(int irq, void *dev_id)
reg = i2c_smbus_read_byte_data(client, ELE_TOUCH_STATUS_1_ADDR);
if (reg < 0) {
dev_err(&client->dev, "i2c read error [%d]\n", reg);
- goto out;
+ return;
}
reg <<= 8;
reg |= i2c_smbus_read_byte_data(client, ELE_TOUCH_STATUS_0_ADDR);
if (reg < 0) {
dev_err(&client->dev, "i2c read error [%d]\n", reg);
- goto out;
+ return;
}
reg &= TOUCH_STATUS_MASK;
@@ -155,8 +158,14 @@ static irqreturn_t mpr_touchkey_interrupt(int irq, void *dev_id)
}
input_sync(input);
+}
+
+static irqreturn_t mpr_touchkey_interrupt(int irq, void *dev_id)
+{
+ struct mpr121_touchkey *mpr121 = dev_id;
+
+ mpr_touchkey_report(mpr121->input_dev);
-out:
return IRQ_HANDLED;
}
@@ -229,14 +238,10 @@ static int mpr_touchkey_probe(struct i2c_client *client,
int vdd_uv;
struct mpr121_touchkey *mpr121;
struct input_dev *input_dev;
+ u32 poll_interval = 0;
int error;
int i;
- if (!client->irq) {
- dev_err(dev, "irq number should not be zero\n");
- return -EINVAL;
- }
-
vdd_supply = mpr121_vdd_supply_init(dev);
if (IS_ERR(vdd_supply))
return PTR_ERR(vdd_supply);
@@ -274,6 +279,7 @@ static int mpr_touchkey_probe(struct i2c_client *client,
if (device_property_read_bool(dev, "autorepeat"))
__set_bit(EV_REP, input_dev->evbit);
input_set_capability(input_dev, EV_MSC, MSC_SCAN);
+ input_set_drvdata(input_dev, mpr121);
input_dev->keycode = mpr121->keycodes;
input_dev->keycodesize = sizeof(mpr121->keycodes[0]);
@@ -288,13 +294,40 @@ static int mpr_touchkey_probe(struct i2c_client *client,
return error;
}
- error = devm_request_threaded_irq(dev, client->irq, NULL,
- mpr_touchkey_interrupt,
- IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
- dev->driver->name, mpr121);
- if (error) {
- dev_err(dev, "Failed to register interrupt\n");
- return error;
+ device_property_read_u32(dev, "poll-interval", &poll_interval);
+
+ if (client->irq) {
+ error = devm_request_threaded_irq(dev, client->irq, NULL,
+ mpr_touchkey_interrupt,
+ IRQF_TRIGGER_FALLING |
+ IRQF_ONESHOT,
+ dev->driver->name, mpr121);
+ if (error) {
+ dev_err(dev, "Failed to register interrupt\n");
+ return error;
+ }
+ } else if (poll_interval) {
+ if (poll_interval < MPR121_MIN_POLL_INTERVAL)
+ return -EINVAL;
+
+ if (poll_interval > MPR121_MAX_POLL_INTERVAL)
+ return -EINVAL;
+
+ error = input_setup_polling(input_dev, mpr_touchkey_report);
+ if (error) {
+ dev_err(dev, "Failed to setup polling\n");
+ return error;
+ }
+
+ input_set_poll_interval(input_dev, poll_interval);
+ input_set_min_poll_interval(input_dev,
+ MPR121_MIN_POLL_INTERVAL);
+ input_set_max_poll_interval(input_dev,
+ MPR121_MAX_POLL_INTERVAL);
+ } else {
+ dev_err(dev,
+ "invalid IRQ number and polling not configured\n");
+ return -EINVAL;
}
error = input_register_device(input_dev);
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index 7d9ae394e597..7e2e658d551c 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -100,7 +100,6 @@ config INPUT_ATMEL_CAPTOUCH
config INPUT_BMA150
tristate "BMA150/SMB380 acceleration sensor support"
depends on I2C
- select INPUT_POLLDEV
help
Say Y here if you have Bosch Sensortec's BMA150 or SMB380
acceleration sensor hooked to an I2C bus.
@@ -246,7 +245,6 @@ config INPUT_MC13783_PWRBUTTON
config INPUT_MMA8450
tristate "MMA8450 - Freescale's 3-Axis, 8/12-bit Digital Accelerometer"
depends on I2C
- select INPUT_POLLDEV
help
Say Y here if you want to support Freescale's MMA8450 Accelerometer
through I2C interface.
@@ -257,7 +255,6 @@ config INPUT_MMA8450
config INPUT_APANEL
tristate "Fujitsu Lifebook Application Panel buttons"
depends on X86 && I2C && LEDS_CLASS
- select INPUT_POLLDEV
select CHECK_SIGNATURE
help
Say Y here for support of the Application Panel buttons, used on
@@ -291,7 +288,6 @@ config INPUT_GPIO_BEEPER
config INPUT_GPIO_DECODER
tristate "Polled GPIO Decoder Input driver"
depends on GPIOLIB || COMPILE_TEST
- select INPUT_POLLDEV
help
Say Y here if you want driver to read status of multiple GPIO
lines and report the encoded value as an absolute integer to
@@ -327,7 +323,6 @@ config INPUT_IXP4XX_BEEPER
config INPUT_COBALT_BTNS
tristate "Cobalt button interface"
depends on MIPS_COBALT
- select INPUT_POLLDEV
help
Say Y here if you want to support MIPS Cobalt button interface.
@@ -347,7 +342,6 @@ config INPUT_CPCAP_PWRBUTTON
config INPUT_WISTRON_BTNS
tristate "x86 Wistron laptop button interface"
depends on X86_32
- select INPUT_POLLDEV
select INPUT_SPARSEKMAP
select NEW_LEDS
select LEDS_CLASS
@@ -410,13 +404,6 @@ config INPUT_KXTJ9
To compile this driver as a module, choose M here: the module will
be called kxtj9.
-config INPUT_KXTJ9_POLLED_MODE
- bool "Enable polling mode support"
- depends on INPUT_KXTJ9
- select INPUT_POLLDEV
- help
- Say Y here if you need accelerometer to work in polling mode.
-
config INPUT_POWERMATE
tristate "Griffin PowerMate and Contour Jog support"
depends on USB_ARCH_HAS_HCD
@@ -546,7 +533,6 @@ config INPUT_UINPUT
config INPUT_SGI_BTNS
tristate "SGI Indy/O2 volume button interface"
depends on SGI_IP22 || SGI_IP32
- select INPUT_POLLDEV
help
Say Y here if you want to support SGI Indy/O2 volume button interface.
@@ -637,7 +623,6 @@ config INPUT_RB532_BUTTON
tristate "Mikrotik Routerboard 532 button interface"
depends on MIKROTIK_RB532
depends on GPIOLIB
- select INPUT_POLLDEV
help
Say Y here if you want support for the S1 button built into
Mikrotik's Routerboard 532.
diff --git a/drivers/input/misc/apanel.c b/drivers/input/misc/apanel.c
index 53ec40d1b90d..7276657ad7ca 100644
--- a/drivers/input/misc/apanel.c
+++ b/drivers/input/misc/apanel.c
@@ -17,7 +17,7 @@
#include <linux/module.h>
#include <linux/ioport.h>
#include <linux/io.h>
-#include <linux/input-polldev.h>
+#include <linux/input.h>
#include <linux/i2c.h>
#include <linux/leds.h>
@@ -51,19 +51,28 @@ static enum apanel_chip device_chip[APANEL_DEV_MAX];
#define MAX_PANEL_KEYS 12
struct apanel {
- struct input_polled_dev *ipdev;
+ struct input_dev *idev;
struct i2c_client *client;
unsigned short keymap[MAX_PANEL_KEYS];
- u16 nkeys;
+ u16 nkeys;
struct led_classdev mail_led;
};
+static const unsigned short apanel_keymap[MAX_PANEL_KEYS] = {
+ [0] = KEY_MAIL,
+ [1] = KEY_WWW,
+ [2] = KEY_PROG2,
+ [3] = KEY_PROG1,
-static int apanel_probe(struct i2c_client *, const struct i2c_device_id *);
+ [8] = KEY_FORWARD,
+ [9] = KEY_REWIND,
+ [10] = KEY_STOPCD,
+ [11] = KEY_PLAYPAUSE,
+};
static void report_key(struct input_dev *input, unsigned keycode)
{
- pr_debug(APANEL ": report key %#x\n", keycode);
+ dev_dbg(input->dev.parent, "report key %#x\n", keycode);
input_report_key(input, keycode, 1);
input_sync(input);
@@ -79,10 +88,9 @@ static void report_key(struct input_dev *input, unsigned keycode)
* CD keys:
* Forward (0x100), Rewind (0x200), Stop (0x400), Pause (0x800)
*/
-static void apanel_poll(struct input_polled_dev *ipdev)
+static void apanel_poll(struct input_dev *idev)
{
- struct apanel *ap = ipdev->private;
- struct input_dev *idev = ipdev->input;
+ struct apanel *ap = input_get_drvdata(idev);
u8 cmd = device_chip[APANEL_DEV_APPBTN] == CHIP_OZ992C ? 0 : 8;
s32 data;
int i;
@@ -112,126 +120,93 @@ static int mail_led_set(struct led_classdev *led,
return i2c_smbus_write_word_data(ap->client, 0x10, led_bits);
}
-static int apanel_remove(struct i2c_client *client)
-{
- struct apanel *ap = i2c_get_clientdata(client);
-
- if (device_chip[APANEL_DEV_LED] != CHIP_NONE)
- led_classdev_unregister(&ap->mail_led);
-
- input_unregister_polled_device(ap->ipdev);
- input_free_polled_device(ap->ipdev);
-
- return 0;
-}
-
-static void apanel_shutdown(struct i2c_client *client)
-{
- apanel_remove(client);
-}
-
-static const struct i2c_device_id apanel_id[] = {
- { "fujitsu_apanel", 0 },
- { }
-};
-MODULE_DEVICE_TABLE(i2c, apanel_id);
-
-static struct i2c_driver apanel_driver = {
- .driver = {
- .name = APANEL,
- },
- .probe = &apanel_probe,
- .remove = &apanel_remove,
- .shutdown = &apanel_shutdown,
- .id_table = apanel_id,
-};
-
-static struct apanel apanel = {
- .keymap = {
- [0] = KEY_MAIL,
- [1] = KEY_WWW,
- [2] = KEY_PROG2,
- [3] = KEY_PROG1,
-
- [8] = KEY_FORWARD,
- [9] = KEY_REWIND,
- [10] = KEY_STOPCD,
- [11] = KEY_PLAYPAUSE,
-
- },
- .mail_led = {
- .name = "mail:blue",
- .brightness_set_blocking = mail_led_set,
- },
-};
-
-/* NB: Only one panel on the i2c. */
static int apanel_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct apanel *ap;
- struct input_polled_dev *ipdev;
struct input_dev *idev;
u8 cmd = device_chip[APANEL_DEV_APPBTN] == CHIP_OZ992C ? 0 : 8;
- int i, err = -ENOMEM;
+ int i, err;
- ap = &apanel;
+ ap = devm_kzalloc(&client->dev, sizeof(*ap), GFP_KERNEL);
+ if (!ap)
+ return -ENOMEM;
- ipdev = input_allocate_polled_device();
- if (!ipdev)
- goto out1;
+ idev = devm_input_allocate_device(&client->dev);
+ if (!idev)
+ return -ENOMEM;
- ap->ipdev = ipdev;
+ ap->idev = idev;
ap->client = client;
i2c_set_clientdata(client, ap);
err = i2c_smbus_write_word_data(client, cmd, 0);
if (err) {
- dev_warn(&client->dev, APANEL ": smbus write error %d\n",
- err);
- goto out3;
+ dev_warn(&client->dev, "smbus write error %d\n", err);
+ return err;
}
- ipdev->poll = apanel_poll;
- ipdev->poll_interval = POLL_INTERVAL_DEFAULT;
- ipdev->private = ap;
+ input_set_drvdata(idev, ap);
- idev = ipdev->input;
idev->name = APANEL_NAME " buttons";
idev->phys = "apanel/input0";
idev->id.bustype = BUS_HOST;
- idev->dev.parent = &client->dev;
-
- set_bit(EV_KEY, idev->evbit);
+ memcpy(ap->keymap, apanel_keymap, sizeof(apanel_keymap));
idev->keycode = ap->keymap;
idev->keycodesize = sizeof(ap->keymap[0]);
idev->keycodemax = (device_chip[APANEL_DEV_CDBTN] != CHIP_NONE) ? 12 : 4;
+ set_bit(EV_KEY, idev->evbit);
for (i = 0; i < idev->keycodemax; i++)
if (ap->keymap[i])
set_bit(ap->keymap[i], idev->keybit);
- err = input_register_polled_device(ipdev);
+ err = input_setup_polling(idev, apanel_poll);
if (err)
- goto out3;
+ return err;
+
+ input_set_poll_interval(idev, POLL_INTERVAL_DEFAULT);
+
+ err = input_register_device(idev);
+ if (err)
+ return err;
if (device_chip[APANEL_DEV_LED] != CHIP_NONE) {
- err = led_classdev_register(&client->dev, &ap->mail_led);
+ ap->mail_led.name = "mail:blue";
+ ap->mail_led.brightness_set_blocking = mail_led_set;
+ err = devm_led_classdev_register(&client->dev, &ap->mail_led);
if (err)
- goto out4;
+ return err;
}
return 0;
-out4:
- input_unregister_polled_device(ipdev);
-out3:
- input_free_polled_device(ipdev);
-out1:
- return err;
}
+static void apanel_shutdown(struct i2c_client *client)
+{
+ struct apanel *ap = i2c_get_clientdata(client);
+
+ if (device_chip[APANEL_DEV_LED] != CHIP_NONE)
+ led_set_brightness(&ap->mail_led, LED_OFF);
+}
+
+static const struct i2c_device_id apanel_id[] = {
+ { "fujitsu_apanel", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, apanel_id);
+
+static struct i2c_driver apanel_driver = {
+ .driver = {
+ .name = APANEL,
+ },
+ .probe = apanel_probe,
+ .shutdown = apanel_shutdown,
+ .id_table = apanel_id,
+};
+
/* Scan the system ROM for the signature "FJKEYINF" */
static __init const void __iomem *bios_signature(const void __iomem *bios)
{
diff --git a/drivers/input/misc/bma150.c b/drivers/input/misc/bma150.c
index 735d3a46f44b..a9d984da95f3 100644
--- a/drivers/input/misc/bma150.c
+++ b/drivers/input/misc/bma150.c
@@ -14,7 +14,6 @@
#include <linux/module.h>
#include <linux/i2c.h>
#include <linux/input.h>
-#include <linux/input-polldev.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/slab.h>
@@ -123,7 +122,6 @@
struct bma150_data {
struct i2c_client *client;
- struct input_polled_dev *input_polled;
struct input_dev *input;
u8 mode;
};
@@ -336,13 +334,16 @@ static irqreturn_t bma150_irq_thread(int irq, void *dev)
return IRQ_HANDLED;
}
-static void bma150_poll(struct input_polled_dev *dev)
+static void bma150_poll(struct input_dev *input)
{
- bma150_report_xyz(dev->private);
+ struct bma150_data *bma150 = input_get_drvdata(input);
+
+ bma150_report_xyz(bma150);
}
-static int bma150_open(struct bma150_data *bma150)
+static int bma150_open(struct input_dev *input)
{
+ struct bma150_data *bma150 = input_get_drvdata(input);
int error;
error = pm_runtime_get_sync(&bma150->client->dev);
@@ -362,44 +363,18 @@ static int bma150_open(struct bma150_data *bma150)
return 0;
}
-static void bma150_close(struct bma150_data *bma150)
+static void bma150_close(struct input_dev *input)
{
+ struct bma150_data *bma150 = input_get_drvdata(input);
+
pm_runtime_put_sync(&bma150->client->dev);
if (bma150->mode != BMA150_MODE_SLEEP)
bma150_set_mode(bma150, BMA150_MODE_SLEEP);
}
-static int bma150_irq_open(struct input_dev *input)
-{
- struct bma150_data *bma150 = input_get_drvdata(input);
-
- return bma150_open(bma150);
-}
-
-static void bma150_irq_close(struct input_dev *input)
-{
- struct bma150_data *bma150 = input_get_drvdata(input);
-
- bma150_close(bma150);
-}
-
-static void bma150_poll_open(struct input_polled_dev *ipoll_dev)
-{
- struct bma150_data *bma150 = ipoll_dev->private;
-
- bma150_open(bma150);
-}
-
-static void bma150_poll_close(struct input_polled_dev *ipoll_dev)
-{
- struct bma150_data *bma150 = ipoll_dev->private;
-
- bma150_close(bma150);
-}
-
static int bma150_initialize(struct bma150_data *bma150,
- const struct bma150_cfg *cfg)
+ const struct bma150_cfg *cfg)
{
int error;
@@ -439,84 +414,14 @@ static int bma150_initialize(struct bma150_data *bma150,
return bma150_set_mode(bma150, BMA150_MODE_SLEEP);
}
-static void bma150_init_input_device(struct bma150_data *bma150,
- struct input_dev *idev)
-{
- idev->name = BMA150_DRIVER;
- idev->phys = BMA150_DRIVER "/input0";
- idev->id.bustype = BUS_I2C;
- idev->dev.parent = &bma150->client->dev;
-
- idev->evbit[0] = BIT_MASK(EV_ABS);
- input_set_abs_params(idev, ABS_X, ABSMIN_ACC_VAL, ABSMAX_ACC_VAL, 0, 0);
- input_set_abs_params(idev, ABS_Y, ABSMIN_ACC_VAL, ABSMAX_ACC_VAL, 0, 0);
- input_set_abs_params(idev, ABS_Z, ABSMIN_ACC_VAL, ABSMAX_ACC_VAL, 0, 0);
-}
-
-static int bma150_register_input_device(struct bma150_data *bma150)
-{
- struct input_dev *idev;
- int error;
-
- idev = input_allocate_device();
- if (!idev)
- return -ENOMEM;
-
- bma150_init_input_device(bma150, idev);
-
- idev->open = bma150_irq_open;
- idev->close = bma150_irq_close;
- input_set_drvdata(idev, bma150);
-
- bma150->input = idev;
-
- error = input_register_device(idev);
- if (error) {
- input_free_device(idev);
- return error;
- }
-
- return 0;
-}
-
-static int bma150_register_polled_device(struct bma150_data *bma150)
-{
- struct input_polled_dev *ipoll_dev;
- int error;
-
- ipoll_dev = input_allocate_polled_device();
- if (!ipoll_dev)
- return -ENOMEM;
-
- ipoll_dev->private = bma150;
- ipoll_dev->open = bma150_poll_open;
- ipoll_dev->close = bma150_poll_close;
- ipoll_dev->poll = bma150_poll;
- ipoll_dev->poll_interval = BMA150_POLL_INTERVAL;
- ipoll_dev->poll_interval_min = BMA150_POLL_MIN;
- ipoll_dev->poll_interval_max = BMA150_POLL_MAX;
-
- bma150_init_input_device(bma150, ipoll_dev->input);
-
- bma150->input_polled = ipoll_dev;
- bma150->input = ipoll_dev->input;
-
- error = input_register_polled_device(ipoll_dev);
- if (error) {
- input_free_polled_device(ipoll_dev);
- return error;
- }
-
- return 0;
-}
-
static int bma150_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+ const struct i2c_device_id *id)
{
const struct bma150_platform_data *pdata =
dev_get_platdata(&client->dev);
const struct bma150_cfg *cfg;
struct bma150_data *bma150;
+ struct input_dev *idev;
int chip_id;
int error;
@@ -531,7 +436,7 @@ static int bma150_probe(struct i2c_client *client,
return -EINVAL;
}
- bma150 = kzalloc(sizeof(struct bma150_data), GFP_KERNEL);
+ bma150 = devm_kzalloc(&client->dev, sizeof(*bma150), GFP_KERNEL);
if (!bma150)
return -ENOMEM;
@@ -544,7 +449,7 @@ static int bma150_probe(struct i2c_client *client,
dev_err(&client->dev,
"IRQ GPIO conf. error %d, error %d\n",
client->irq, error);
- goto err_free_mem;
+ return error;
}
}
cfg = &pdata->cfg;
@@ -554,14 +459,42 @@ static int bma150_probe(struct i2c_client *client,
error = bma150_initialize(bma150, cfg);
if (error)
- goto err_free_mem;
+ return error;
- if (client->irq > 0) {
- error = bma150_register_input_device(bma150);
+ idev = devm_input_allocate_device(&bma150->client->dev);
+ if (!idev)
+ return -ENOMEM;
+
+ input_set_drvdata(idev, bma150);
+ bma150->input = idev;
+
+ idev->name = BMA150_DRIVER;
+ idev->phys = BMA150_DRIVER "/input0";
+ idev->id.bustype = BUS_I2C;
+
+ idev->open = bma150_open;
+ idev->close = bma150_close;
+
+ input_set_abs_params(idev, ABS_X, ABSMIN_ACC_VAL, ABSMAX_ACC_VAL, 0, 0);
+ input_set_abs_params(idev, ABS_Y, ABSMIN_ACC_VAL, ABSMAX_ACC_VAL, 0, 0);
+ input_set_abs_params(idev, ABS_Z, ABSMIN_ACC_VAL, ABSMAX_ACC_VAL, 0, 0);
+
+ if (client->irq <= 0) {
+ error = input_setup_polling(idev, bma150_poll);
if (error)
- goto err_free_mem;
+ return error;
+
+ input_set_poll_interval(idev, BMA150_POLL_INTERVAL);
+ input_set_min_poll_interval(idev, BMA150_POLL_MIN);
+ input_set_max_poll_interval(idev, BMA150_POLL_MAX);
+ }
+
+ error = input_register_device(idev);
+ if (error)
+ return error;
- error = request_threaded_irq(client->irq,
+ if (client->irq > 0) {
+ error = devm_request_threaded_irq(&client->dev, client->irq,
NULL, bma150_irq_thread,
IRQF_TRIGGER_RISING | IRQF_ONESHOT,
BMA150_DRIVER, bma150);
@@ -569,13 +502,8 @@ static int bma150_probe(struct i2c_client *client,
dev_err(&client->dev,
"irq request failed %d, error %d\n",
client->irq, error);
- input_unregister_device(bma150->input);
- goto err_free_mem;
+ return error;
}
- } else {
- error = bma150_register_polled_device(bma150);
- if (error)
- goto err_free_mem;
}
i2c_set_clientdata(client, bma150);
@@ -583,33 +511,16 @@ static int bma150_probe(struct i2c_client *client,
pm_runtime_enable(&client->dev);
return 0;
-
-err_free_mem:
- kfree(bma150);
- return error;
}
static int bma150_remove(struct i2c_client *client)
{
- struct bma150_data *bma150 = i2c_get_clientdata(client);
-
pm_runtime_disable(&client->dev);
- if (client->irq > 0) {
- free_irq(client->irq, bma150);
- input_unregister_device(bma150->input);
- } else {
- input_unregister_polled_device(bma150->input_polled);
- input_free_polled_device(bma150->input_polled);
- }
-
- kfree(bma150);
-
return 0;
}
-#ifdef CONFIG_PM
-static int bma150_suspend(struct device *dev)
+static int __maybe_unused bma150_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct bma150_data *bma150 = i2c_get_clientdata(client);
@@ -617,14 +528,13 @@ static int bma150_suspend(struct device *dev)
return bma150_set_mode(bma150, BMA150_MODE_SLEEP);
}
-static int bma150_resume(struct device *dev)
+static int __maybe_unused bma150_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct bma150_data *bma150 = i2c_get_clientdata(client);
return bma150_set_mode(bma150, BMA150_MODE_NORMAL);
}
-#endif
static UNIVERSAL_DEV_PM_OPS(bma150_pm, bma150_suspend, bma150_resume, NULL);
diff --git a/drivers/input/misc/cobalt_btns.c b/drivers/input/misc/cobalt_btns.c
index bcf6174bbd5d..b1624f5414ee 100644
--- a/drivers/input/misc/cobalt_btns.c
+++ b/drivers/input/misc/cobalt_btns.c
@@ -4,7 +4,8 @@
*
* Copyright (C) 2007-2008 Yoichi Yuasa <yuasa@linux-mips.org>
*/
-#include <linux/input-polldev.h>
+#include <linux/input.h>
+#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -26,16 +27,14 @@ static const unsigned short cobalt_map[] = {
};
struct buttons_dev {
- struct input_polled_dev *poll_dev;
unsigned short keymap[ARRAY_SIZE(cobalt_map)];
int count[ARRAY_SIZE(cobalt_map)];
void __iomem *reg;
};
-static void handle_buttons(struct input_polled_dev *dev)
+static void handle_buttons(struct input_dev *input)
{
- struct buttons_dev *bdev = dev->private;
- struct input_dev *input = dev->input;
+ struct buttons_dev *bdev = input_get_drvdata(input);
uint32_t status;
int i;
@@ -62,29 +61,33 @@ static void handle_buttons(struct input_polled_dev *dev)
static int cobalt_buttons_probe(struct platform_device *pdev)
{
struct buttons_dev *bdev;
- struct input_polled_dev *poll_dev;
struct input_dev *input;
struct resource *res;
int error, i;
- bdev = kzalloc(sizeof(struct buttons_dev), GFP_KERNEL);
- poll_dev = input_allocate_polled_device();
- if (!bdev || !poll_dev) {
- error = -ENOMEM;
- goto err_free_mem;
- }
+ bdev = devm_kzalloc(&pdev->dev, sizeof(*bdev), GFP_KERNEL);
+ if (!bdev)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -EBUSY;
+
+ bdev->reg = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+ if (!bdev->reg)
+ return -ENOMEM;
memcpy(bdev->keymap, cobalt_map, sizeof(bdev->keymap));
- poll_dev->private = bdev;
- poll_dev->poll = handle_buttons;
- poll_dev->poll_interval = BUTTONS_POLL_INTERVAL;
+ input = devm_input_allocate_device(&pdev->dev);
+ if (!input)
+ return -ENOMEM;
+
+ input_set_drvdata(input, bdev);
- input = poll_dev->input;
input->name = "Cobalt buttons";
input->phys = "cobalt/input0";
input->id.bustype = BUS_HOST;
- input->dev.parent = &pdev->dev;
input->keycode = bdev->keymap;
input->keycodemax = ARRAY_SIZE(bdev->keymap);
@@ -96,39 +99,16 @@ static int cobalt_buttons_probe(struct platform_device *pdev)
__set_bit(bdev->keymap[i], input->keybit);
__clear_bit(KEY_RESERVED, input->keybit);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- error = -EBUSY;
- goto err_free_mem;
- }
-
- bdev->poll_dev = poll_dev;
- bdev->reg = ioremap(res->start, resource_size(res));
- dev_set_drvdata(&pdev->dev, bdev);
- error = input_register_polled_device(poll_dev);
+ error = input_setup_polling(input, handle_buttons);
if (error)
- goto err_iounmap;
-
- return 0;
+ return error;
- err_iounmap:
- iounmap(bdev->reg);
- err_free_mem:
- input_free_polled_device(poll_dev);
- kfree(bdev);
- return error;
-}
+ input_set_poll_interval(input, BUTTONS_POLL_INTERVAL);
-static int cobalt_buttons_remove(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct buttons_dev *bdev = dev_get_drvdata(dev);
-
- input_unregister_polled_device(bdev->poll_dev);
- input_free_polled_device(bdev->poll_dev);
- iounmap(bdev->reg);
- kfree(bdev);
+ error = input_register_device(input);
+ if (error)
+ return error;
return 0;
}
@@ -141,7 +121,6 @@ MODULE_ALIAS("platform:Cobalt buttons");
static struct platform_driver cobalt_buttons_driver = {
.probe = cobalt_buttons_probe,
- .remove = cobalt_buttons_remove,
.driver = {
.name = "Cobalt buttons",
},
diff --git a/drivers/input/misc/gpio_decoder.c b/drivers/input/misc/gpio_decoder.c
index 1dca526e6f1a..145826a1a9a1 100644
--- a/drivers/input/misc/gpio_decoder.c
+++ b/drivers/input/misc/gpio_decoder.c
@@ -17,14 +17,12 @@
#include <linux/device.h>
#include <linux/gpio/consumer.h>
#include <linux/input.h>
-#include <linux/input-polldev.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
struct gpio_decoder {
- struct input_polled_dev *poll_dev;
struct gpio_descs *input_gpios;
struct device *dev;
u32 axis;
@@ -53,15 +51,15 @@ static int gpio_decoder_get_gpios_state(struct gpio_decoder *decoder)
return ret;
}
-static void gpio_decoder_poll_gpios(struct input_polled_dev *poll_dev)
+static void gpio_decoder_poll_gpios(struct input_dev *input)
{
- struct gpio_decoder *decoder = poll_dev->private;
+ struct gpio_decoder *decoder = input_get_drvdata(input);
int state;
state = gpio_decoder_get_gpios_state(decoder);
if (state >= 0 && state != decoder->last_stable) {
- input_report_abs(poll_dev->input, decoder->axis, state);
- input_sync(poll_dev->input);
+ input_report_abs(input, decoder->axis, state);
+ input_sync(input);
decoder->last_stable = state;
}
}
@@ -70,20 +68,23 @@ static int gpio_decoder_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct gpio_decoder *decoder;
- struct input_polled_dev *poll_dev;
+ struct input_dev *input;
u32 max;
int err;
- decoder = devm_kzalloc(dev, sizeof(struct gpio_decoder), GFP_KERNEL);
+ decoder = devm_kzalloc(dev, sizeof(*decoder), GFP_KERNEL);
if (!decoder)
return -ENOMEM;
+ decoder->dev = dev;
device_property_read_u32(dev, "linux,axis", &decoder->axis);
+
decoder->input_gpios = devm_gpiod_get_array(dev, NULL, GPIOD_IN);
if (IS_ERR(decoder->input_gpios)) {
dev_err(dev, "unable to acquire input gpios\n");
return PTR_ERR(decoder->input_gpios);
}
+
if (decoder->input_gpios->ndescs < 2) {
dev_err(dev, "not enough gpios found\n");
return -EINVAL;
@@ -92,22 +93,25 @@ static int gpio_decoder_probe(struct platform_device *pdev)
if (device_property_read_u32(dev, "decoder-max-value", &max))
max = (1U << decoder->input_gpios->ndescs) - 1;
- decoder->dev = dev;
- poll_dev = devm_input_allocate_polled_device(decoder->dev);
- if (!poll_dev)
+ input = devm_input_allocate_device(dev);
+ if (!input)
return -ENOMEM;
- poll_dev->private = decoder;
- poll_dev->poll = gpio_decoder_poll_gpios;
- decoder->poll_dev = poll_dev;
+ input_set_drvdata(input, decoder);
- poll_dev->input->name = pdev->name;
- poll_dev->input->id.bustype = BUS_HOST;
- input_set_abs_params(poll_dev->input, decoder->axis, 0, max, 0, 0);
+ input->name = pdev->name;
+ input->id.bustype = BUS_HOST;
+ input_set_abs_params(input, decoder->axis, 0, max, 0, 0);
+
+ err = input_setup_polling(input, gpio_decoder_poll_gpios);
+ if (err) {
+ dev_err(dev, "failed to set up polling\n");
+ return err;
+ }
- err = input_register_polled_device(poll_dev);
+ err = input_register_device(input);
if (err) {
- dev_err(dev, "failed to register polled device\n");
+ dev_err(dev, "failed to register input device\n");
return err;
}
diff --git a/drivers/input/misc/hp_sdc_rtc.c b/drivers/input/misc/hp_sdc_rtc.c
index abca895a6156..199bc17ddb1d 100644
--- a/drivers/input/misc/hp_sdc_rtc.c
+++ b/drivers/input/misc/hp_sdc_rtc.c
@@ -53,28 +53,10 @@ MODULE_LICENSE("Dual BSD/GPL");
#define RTC_VERSION "1.10d"
-static DEFINE_MUTEX(hp_sdc_rtc_mutex);
static unsigned long epoch = 2000;
static struct semaphore i8042tregs;
-static hp_sdc_irqhook hp_sdc_rtc_isr;
-
-static struct fasync_struct *hp_sdc_rtc_async_queue;
-
-static DECLARE_WAIT_QUEUE_HEAD(hp_sdc_rtc_wait);
-
-static ssize_t hp_sdc_rtc_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos);
-
-static long hp_sdc_rtc_unlocked_ioctl(struct file *file,
- unsigned int cmd, unsigned long arg);
-
-static unsigned int hp_sdc_rtc_poll(struct file *file, poll_table *wait);
-
-static int hp_sdc_rtc_open(struct inode *inode, struct file *file);
-static int hp_sdc_rtc_fasync (int fd, struct file *filp, int on);
-
static void hp_sdc_rtc_isr (int irq, void *dev_id,
uint8_t status, uint8_t data)
{
@@ -283,151 +265,6 @@ static inline int hp_sdc_rtc_read_ct(struct timespec64 *res) {
return 0;
}
-
-#if 0 /* not used yet */
-/* Set the i8042 real-time clock */
-static int hp_sdc_rtc_set_rt (struct timeval *setto)
-{
- uint32_t tenms;
- unsigned int days;
- hp_sdc_transaction t;
- uint8_t tseq[11] = {
- HP_SDC_ACT_PRECMD | HP_SDC_ACT_DATAOUT,
- HP_SDC_CMD_SET_RTMS, 3, 0, 0, 0,
- HP_SDC_ACT_PRECMD | HP_SDC_ACT_DATAOUT,
- HP_SDC_CMD_SET_RTD, 2, 0, 0
- };
-
- t.endidx = 10;
-
- if (0xffff < setto->tv_sec / 86400) return -1;
- days = setto->tv_sec / 86400;
- if (0xffff < setto->tv_usec / 1000000 / 86400) return -1;
- days += ((setto->tv_sec % 86400) + setto->tv_usec / 1000000) / 86400;
- if (days > 0xffff) return -1;
-
- if (0xffffff < setto->tv_sec) return -1;
- tenms = setto->tv_sec * 100;
- if (0xffffff < setto->tv_usec / 10000) return -1;
- tenms += setto->tv_usec / 10000;
- if (tenms > 0xffffff) return -1;
-
- tseq[3] = (uint8_t)(tenms & 0xff);
- tseq[4] = (uint8_t)((tenms >> 8) & 0xff);
- tseq[5] = (uint8_t)((tenms >> 16) & 0xff);
-
- tseq[9] = (uint8_t)(days & 0xff);
- tseq[10] = (uint8_t)((days >> 8) & 0xff);
-
- t.seq = tseq;
-
- if (hp_sdc_enqueue_transaction(&t)) return -1;
- return 0;
-}
-
-/* Set the i8042 fast handshake timer */
-static int hp_sdc_rtc_set_fhs (struct timeval *setto)
-{
- uint32_t tenms;
- hp_sdc_transaction t;
- uint8_t tseq[5] = {
- HP_SDC_ACT_PRECMD | HP_SDC_ACT_DATAOUT,
- HP_SDC_CMD_SET_FHS, 2, 0, 0
- };
-
- t.endidx = 4;
-
- if (0xffff < setto->tv_sec) return -1;
- tenms = setto->tv_sec * 100;
- if (0xffff < setto->tv_usec / 10000) return -1;
- tenms += setto->tv_usec / 10000;
- if (tenms > 0xffff) return -1;
-
- tseq[3] = (uint8_t)(tenms & 0xff);
- tseq[4] = (uint8_t)((tenms >> 8) & 0xff);
-
- t.seq = tseq;
-
- if (hp_sdc_enqueue_transaction(&t)) return -1;
- return 0;
-}
-
-
-/* Set the i8042 match timer (a.k.a. alarm) */
-#define hp_sdc_rtc_set_mt (setto) \
- hp_sdc_rtc_set_i8042timer(setto, HP_SDC_CMD_SET_MT)
-
-/* Set the i8042 delay timer */
-#define hp_sdc_rtc_set_dt (setto) \
- hp_sdc_rtc_set_i8042timer(setto, HP_SDC_CMD_SET_DT)
-
-/* Set the i8042 cycle timer (a.k.a. periodic) */
-#define hp_sdc_rtc_set_ct (setto) \
- hp_sdc_rtc_set_i8042timer(setto, HP_SDC_CMD_SET_CT)
-
-/* Set one of the i8042 3-byte wide timers */
-static int hp_sdc_rtc_set_i8042timer (struct timeval *setto, uint8_t setcmd)
-{
- uint32_t tenms;
- hp_sdc_transaction t;
- uint8_t tseq[6] = {
- HP_SDC_ACT_PRECMD | HP_SDC_ACT_DATAOUT,
- 0, 3, 0, 0, 0
- };
-
- t.endidx = 6;
-
- if (0xffffff < setto->tv_sec) return -1;
- tenms = setto->tv_sec * 100;
- if (0xffffff < setto->tv_usec / 10000) return -1;
- tenms += setto->tv_usec / 10000;
- if (tenms > 0xffffff) return -1;
-
- tseq[1] = setcmd;
- tseq[3] = (uint8_t)(tenms & 0xff);
- tseq[4] = (uint8_t)((tenms >> 8) & 0xff);
- tseq[5] = (uint8_t)((tenms >> 16) & 0xff);
-
- t.seq = tseq;
-
- if (hp_sdc_enqueue_transaction(&t)) {
- return -1;
- }
- return 0;
-}
-#endif
-
-static ssize_t hp_sdc_rtc_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos) {
- ssize_t retval;
-
- if (count < sizeof(unsigned long))
- return -EINVAL;
-
- retval = put_user(68, (unsigned long __user *)buf);
- return retval;
-}
-
-static __poll_t hp_sdc_rtc_poll(struct file *file, poll_table *wait)
-{
- unsigned long l;
-
- l = 0;
- if (l != 0)
- return EPOLLIN | EPOLLRDNORM;
- return 0;
-}
-
-static int hp_sdc_rtc_open(struct inode *inode, struct file *file)
-{
- return 0;
-}
-
-static int hp_sdc_rtc_fasync (int fd, struct file *filp, int on)
-{
- return fasync_helper (fd, filp, on, &hp_sdc_rtc_async_queue);
-}
-
static int hp_sdc_rtc_proc_show(struct seq_file *m, void *v)
{
#define YN(bit) ("no")
@@ -507,182 +344,6 @@ static int hp_sdc_rtc_proc_show(struct seq_file *m, void *v)
#undef NY
}
-static int hp_sdc_rtc_ioctl(struct file *file,
- unsigned int cmd, unsigned long arg)
-{
-#if 1
- return -EINVAL;
-#else
-
- struct rtc_time wtime;
- struct timeval ttime;
- int use_wtime = 0;
-
- /* This needs major work. */
-
- switch (cmd) {
-
- case RTC_AIE_OFF: /* Mask alarm int. enab. bit */
- case RTC_AIE_ON: /* Allow alarm interrupts. */
- case RTC_PIE_OFF: /* Mask periodic int. enab. bit */
- case RTC_PIE_ON: /* Allow periodic ints */
- case RTC_UIE_ON: /* Allow ints for RTC updates. */
- case RTC_UIE_OFF: /* Allow ints for RTC updates. */
- {
- /* We cannot mask individual user timers and we
- cannot tell them apart when they occur, so it
- would be disingenuous to succeed these IOCTLs */
- return -EINVAL;
- }
- case RTC_ALM_READ: /* Read the present alarm time */
- {
- if (hp_sdc_rtc_read_mt(&ttime)) return -EFAULT;
- if (hp_sdc_rtc_read_bbrtc(&wtime)) return -EFAULT;
-
- wtime.tm_hour = ttime.tv_sec / 3600; ttime.tv_sec %= 3600;
- wtime.tm_min = ttime.tv_sec / 60; ttime.tv_sec %= 60;
- wtime.tm_sec = ttime.tv_sec;
-
- break;
- }
- case RTC_IRQP_READ: /* Read the periodic IRQ rate. */
- {
- return put_user(hp_sdc_rtc_freq, (unsigned long *)arg);
- }
- case RTC_IRQP_SET: /* Set periodic IRQ rate. */
- {
- /*
- * The max we can do is 100Hz.
- */
-
- if ((arg < 1) || (arg > 100)) return -EINVAL;
- ttime.tv_sec = 0;
- ttime.tv_usec = 1000000 / arg;
- if (hp_sdc_rtc_set_ct(&ttime)) return -EFAULT;
- hp_sdc_rtc_freq = arg;
- return 0;
- }
- case RTC_ALM_SET: /* Store a time into the alarm */
- {
- /*
- * This expects a struct hp_sdc_rtc_time. Writing 0xff means
- * "don't care" or "match all" for PC timers. The HP SDC
- * does not support that perk, but it could be emulated fairly
- * easily. Only the tm_hour, tm_min and tm_sec are used.
- * We could do it with 10ms accuracy with the HP SDC, if the
- * rtc interface left us a way to do that.
- */
- struct hp_sdc_rtc_time alm_tm;
-
- if (copy_from_user(&alm_tm, (struct hp_sdc_rtc_time*)arg,
- sizeof(struct hp_sdc_rtc_time)))
- return -EFAULT;
-
- if (alm_tm.tm_hour > 23) return -EINVAL;
- if (alm_tm.tm_min > 59) return -EINVAL;
- if (alm_tm.tm_sec > 59) return -EINVAL;
-
- ttime.sec = alm_tm.tm_hour * 3600 +
- alm_tm.tm_min * 60 + alm_tm.tm_sec;
- ttime.usec = 0;
- if (hp_sdc_rtc_set_mt(&ttime)) return -EFAULT;
- return 0;
- }
- case RTC_RD_TIME: /* Read the time/date from RTC */
- {
- if (hp_sdc_rtc_read_bbrtc(&wtime)) return -EFAULT;
- break;
- }
- case RTC_SET_TIME: /* Set the RTC */
- {
- struct rtc_time hp_sdc_rtc_tm;
- unsigned char mon, day, hrs, min, sec, leap_yr;
- unsigned int yrs;
-
- if (!capable(CAP_SYS_TIME))
- return -EACCES;
- if (copy_from_user(&hp_sdc_rtc_tm, (struct rtc_time *)arg,
- sizeof(struct rtc_time)))
- return -EFAULT;
-
- yrs = hp_sdc_rtc_tm.tm_year + 1900;
- mon = hp_sdc_rtc_tm.tm_mon + 1; /* tm_mon starts at zero */
- day = hp_sdc_rtc_tm.tm_mday;
- hrs = hp_sdc_rtc_tm.tm_hour;
- min = hp_sdc_rtc_tm.tm_min;
- sec = hp_sdc_rtc_tm.tm_sec;
-
- if (yrs < 1970)
- return -EINVAL;
-
- leap_yr = ((!(yrs % 4) && (yrs % 100)) || !(yrs % 400));
-
- if ((mon > 12) || (day == 0))
- return -EINVAL;
- if (day > (days_in_mo[mon] + ((mon == 2) && leap_yr)))
- return -EINVAL;
- if ((hrs >= 24) || (min >= 60) || (sec >= 60))
- return -EINVAL;
-
- if ((yrs -= eH) > 255) /* They are unsigned */
- return -EINVAL;
-
-
- return 0;
- }
- case RTC_EPOCH_READ: /* Read the epoch. */
- {
- return put_user (epoch, (unsigned long *)arg);
- }
- case RTC_EPOCH_SET: /* Set the epoch. */
- {
- /*
- * There were no RTC clocks before 1900.
- */
- if (arg < 1900)
- return -EINVAL;
- if (!capable(CAP_SYS_TIME))
- return -EACCES;
-
- epoch = arg;
- return 0;
- }
- default:
- return -EINVAL;
- }
- return copy_to_user((void *)arg, &wtime, sizeof wtime) ? -EFAULT : 0;
-#endif
-}
-
-static long hp_sdc_rtc_unlocked_ioctl(struct file *file,
- unsigned int cmd, unsigned long arg)
-{
- int ret;
-
- mutex_lock(&hp_sdc_rtc_mutex);
- ret = hp_sdc_rtc_ioctl(file, cmd, arg);
- mutex_unlock(&hp_sdc_rtc_mutex);
-
- return ret;
-}
-
-
-static const struct file_operations hp_sdc_rtc_fops = {
- .owner = THIS_MODULE,
- .llseek = no_llseek,
- .read = hp_sdc_rtc_read,
- .poll = hp_sdc_rtc_poll,
- .unlocked_ioctl = hp_sdc_rtc_unlocked_ioctl,
- .open = hp_sdc_rtc_open,
- .fasync = hp_sdc_rtc_fasync,
-};
-
-static struct miscdevice hp_sdc_rtc_dev = {
- .minor = RTC_MINOR,
- .name = "rtc_HIL",
- .fops = &hp_sdc_rtc_fops
-};
-
static int __init hp_sdc_rtc_init(void)
{
int ret;
@@ -696,8 +357,6 @@ static int __init hp_sdc_rtc_init(void)
if ((ret = hp_sdc_request_timer_irq(&hp_sdc_rtc_isr)))
return ret;
- if (misc_register(&hp_sdc_rtc_dev) != 0)
- printk(KERN_INFO "Could not register misc. dev for i8042 rtc\n");
proc_create_single("driver/rtc", 0, NULL, hp_sdc_rtc_proc_show);
@@ -710,7 +369,6 @@ static int __init hp_sdc_rtc_init(void)
static void __exit hp_sdc_rtc_exit(void)
{
remove_proc_entry ("driver/rtc", NULL);
- misc_deregister(&hp_sdc_rtc_dev);
hp_sdc_release_timer_irq(hp_sdc_rtc_isr);
printk(KERN_INFO "HP i8042 SDC + MSM-58321 RTC support unloaded\n");
}
diff --git a/drivers/input/misc/kxtj9.c b/drivers/input/misc/kxtj9.c
index db01c4a33914..52313c6e3fb3 100644
--- a/drivers/input/misc/kxtj9.c
+++ b/drivers/input/misc/kxtj9.c
@@ -11,7 +11,6 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/input/kxtj9.h>
-#include <linux/input-polldev.h>
#define NAME "kxtj9"
#define G_MAX 8000
@@ -71,9 +70,6 @@ struct kxtj9_data {
struct i2c_client *client;
struct kxtj9_platform_data pdata;
struct input_dev *input_dev;
-#ifdef CONFIG_INPUT_KXTJ9_POLLED_MODE
- struct input_polled_dev *poll_dev;
-#endif
unsigned int last_poll_interval;
u8 shift;
u8 ctrl_reg1;
@@ -282,50 +278,6 @@ static void kxtj9_input_close(struct input_dev *dev)
kxtj9_disable(tj9);
}
-static void kxtj9_init_input_device(struct kxtj9_data *tj9,
- struct input_dev *input_dev)
-{
- __set_bit(EV_ABS, input_dev->evbit);
- input_set_abs_params(input_dev, ABS_X, -G_MAX, G_MAX, FUZZ, FLAT);
- input_set_abs_params(input_dev, ABS_Y, -G_MAX, G_MAX, FUZZ, FLAT);
- input_set_abs_params(input_dev, ABS_Z, -G_MAX, G_MAX, FUZZ, FLAT);
-
- input_dev->name = "kxtj9_accel";
- input_dev->id.bustype = BUS_I2C;
- input_dev->dev.parent = &tj9->client->dev;
-}
-
-static int kxtj9_setup_input_device(struct kxtj9_data *tj9)
-{
- struct input_dev *input_dev;
- int err;
-
- input_dev = input_allocate_device();
- if (!input_dev) {
- dev_err(&tj9->client->dev, "input device allocate failed\n");
- return -ENOMEM;
- }
-
- tj9->input_dev = input_dev;
-
- input_dev->open = kxtj9_input_open;
- input_dev->close = kxtj9_input_close;
- input_set_drvdata(input_dev, tj9);
-
- kxtj9_init_input_device(tj9, input_dev);
-
- err = input_register_device(tj9->input_dev);
- if (err) {
- dev_err(&tj9->client->dev,
- "unable to register input polled device %s: %d\n",
- tj9->input_dev->name, err);
- input_free_device(tj9->input_dev);
- return err;
- }
-
- return 0;
-}
-
/*
* When IRQ mode is selected, we need to provide an interface to allow the user
* to change the output data rate of the part. For consistency, we are using
@@ -391,12 +343,10 @@ static struct attribute_group kxtj9_attribute_group = {
.attrs = kxtj9_attributes
};
-
-#ifdef CONFIG_INPUT_KXTJ9_POLLED_MODE
-static void kxtj9_poll(struct input_polled_dev *dev)
+static void kxtj9_poll(struct input_dev *input)
{
- struct kxtj9_data *tj9 = dev->private;
- unsigned int poll_interval = dev->poll_interval;
+ struct kxtj9_data *tj9 = input_get_drvdata(input);
+ unsigned int poll_interval = input_get_poll_interval(input);
kxtj9_report_acceleration_data(tj9);
@@ -406,72 +356,14 @@ static void kxtj9_poll(struct input_polled_dev *dev)
}
}
-static void kxtj9_polled_input_open(struct input_polled_dev *dev)
-{
- struct kxtj9_data *tj9 = dev->private;
-
- kxtj9_enable(tj9);
-}
-
-static void kxtj9_polled_input_close(struct input_polled_dev *dev)
-{
- struct kxtj9_data *tj9 = dev->private;
-
- kxtj9_disable(tj9);
-}
-
-static int kxtj9_setup_polled_device(struct kxtj9_data *tj9)
-{
- int err;
- struct input_polled_dev *poll_dev;
- poll_dev = input_allocate_polled_device();
-
- if (!poll_dev) {
- dev_err(&tj9->client->dev,
- "Failed to allocate polled device\n");
- return -ENOMEM;
- }
-
- tj9->poll_dev = poll_dev;
- tj9->input_dev = poll_dev->input;
-
- poll_dev->private = tj9;
- poll_dev->poll = kxtj9_poll;
- poll_dev->open = kxtj9_polled_input_open;
- poll_dev->close = kxtj9_polled_input_close;
-
- kxtj9_init_input_device(tj9, poll_dev->input);
-
- err = input_register_polled_device(poll_dev);
- if (err) {
- dev_err(&tj9->client->dev,
- "Unable to register polled device, err=%d\n", err);
- input_free_polled_device(poll_dev);
- return err;
- }
-
- return 0;
-}
-
-static void kxtj9_teardown_polled_device(struct kxtj9_data *tj9)
-{
- input_unregister_polled_device(tj9->poll_dev);
- input_free_polled_device(tj9->poll_dev);
-}
-
-#else
-
-static inline int kxtj9_setup_polled_device(struct kxtj9_data *tj9)
+static void kxtj9_platform_exit(void *data)
{
- return -ENOSYS;
-}
+ struct kxtj9_data *tj9 = data;
-static inline void kxtj9_teardown_polled_device(struct kxtj9_data *tj9)
-{
+ if (tj9->pdata.exit)
+ tj9->pdata.exit();
}
-#endif
-
static int kxtj9_verify(struct kxtj9_data *tj9)
{
int retval;
@@ -494,11 +386,12 @@ out:
}
static int kxtj9_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+ const struct i2c_device_id *id)
{
const struct kxtj9_platform_data *pdata =
dev_get_platdata(&client->dev);
struct kxtj9_data *tj9;
+ struct input_dev *input_dev;
int err;
if (!i2c_check_functionality(client->adapter,
@@ -512,7 +405,7 @@ static int kxtj9_probe(struct i2c_client *client,
return -EINVAL;
}
- tj9 = kzalloc(sizeof(*tj9), GFP_KERNEL);
+ tj9 = devm_kzalloc(&client->dev, sizeof(*tj9), GFP_KERNEL);
if (!tj9) {
dev_err(&client->dev,
"failed to allocate memory for module data\n");
@@ -525,13 +418,17 @@ static int kxtj9_probe(struct i2c_client *client,
if (pdata->init) {
err = pdata->init();
if (err < 0)
- goto err_free_mem;
+ return err;
}
+ err = devm_add_action_or_reset(&client->dev, kxtj9_platform_exit, tj9);
+ if (err)
+ return err;
+
err = kxtj9_verify(tj9);
if (err < 0) {
dev_err(&client->dev, "device not recognized\n");
- goto err_pdata_exit;
+ return err;
}
i2c_set_clientdata(client, tj9);
@@ -539,67 +436,63 @@ static int kxtj9_probe(struct i2c_client *client,
tj9->ctrl_reg1 = tj9->pdata.res_12bit | tj9->pdata.g_range;
tj9->last_poll_interval = tj9->pdata.init_interval;
+ input_dev = devm_input_allocate_device(&client->dev);
+ if (!input_dev) {
+ dev_err(&client->dev, "input device allocate failed\n");
+ return -ENOMEM;
+ }
+
+ input_set_drvdata(input_dev, tj9);
+ tj9->input_dev = input_dev;
+
+ input_dev->name = "kxtj9_accel";
+ input_dev->id.bustype = BUS_I2C;
+
+ input_dev->open = kxtj9_input_open;
+ input_dev->close = kxtj9_input_close;
+
+ input_set_abs_params(input_dev, ABS_X, -G_MAX, G_MAX, FUZZ, FLAT);
+ input_set_abs_params(input_dev, ABS_Y, -G_MAX, G_MAX, FUZZ, FLAT);
+ input_set_abs_params(input_dev, ABS_Z, -G_MAX, G_MAX, FUZZ, FLAT);
+
+ if (client->irq <= 0) {
+ err = input_setup_polling(input_dev, kxtj9_poll);
+ if (err)
+ return err;
+ }
+
+ err = input_register_device(input_dev);
+ if (err) {
+ dev_err(&client->dev,
+ "unable to register input polled device %s: %d\n",
+ input_dev->name, err);
+ return err;
+ }
+
if (client->irq) {
/* If in irq mode, populate INT_CTRL_REG1 and enable DRDY. */
tj9->int_ctrl |= KXTJ9_IEN | KXTJ9_IEA | KXTJ9_IEL;
tj9->ctrl_reg1 |= DRDYE;
- err = kxtj9_setup_input_device(tj9);
- if (err)
- goto err_pdata_exit;
-
- err = request_threaded_irq(client->irq, NULL, kxtj9_isr,
- IRQF_TRIGGER_RISING | IRQF_ONESHOT,
- "kxtj9-irq", tj9);
+ err = devm_request_threaded_irq(&client->dev, client->irq,
+ NULL, kxtj9_isr,
+ IRQF_TRIGGER_RISING |
+ IRQF_ONESHOT,
+ "kxtj9-irq", tj9);
if (err) {
dev_err(&client->dev, "request irq failed: %d\n", err);
- goto err_destroy_input;
+ return err;
}
- err = sysfs_create_group(&client->dev.kobj, &kxtj9_attribute_group);
+ err = devm_device_add_group(&client->dev,
+ &kxtj9_attribute_group);
if (err) {
dev_err(&client->dev, "sysfs create failed: %d\n", err);
- goto err_free_irq;
+ return err;
}
-
- } else {
- err = kxtj9_setup_polled_device(tj9);
- if (err)
- goto err_pdata_exit;
}
return 0;
-
-err_free_irq:
- free_irq(client->irq, tj9);
-err_destroy_input:
- input_unregister_device(tj9->input_dev);
-err_pdata_exit:
- if (tj9->pdata.exit)
- tj9->pdata.exit();
-err_free_mem:
- kfree(tj9);
- return err;
-}
-
-static int kxtj9_remove(struct i2c_client *client)
-{
- struct kxtj9_data *tj9 = i2c_get_clientdata(client);
-
- if (client->irq) {
- sysfs_remove_group(&client->dev.kobj, &kxtj9_attribute_group);
- free_irq(client->irq, tj9);
- input_unregister_device(tj9->input_dev);
- } else {
- kxtj9_teardown_polled_device(tj9);
- }
-
- if (tj9->pdata.exit)
- tj9->pdata.exit();
-
- kfree(tj9);
-
- return 0;
}
static int __maybe_unused kxtj9_suspend(struct device *dev)
@@ -647,7 +540,6 @@ static struct i2c_driver kxtj9_driver = {
.pm = &kxtj9_pm_ops,
},
.probe = kxtj9_probe,
- .remove = kxtj9_remove,
.id_table = kxtj9_id,
};
diff --git a/drivers/input/misc/mma8450.c b/drivers/input/misc/mma8450.c
index 49f5242bc54c..1b5a5e19230a 100644
--- a/drivers/input/misc/mma8450.c
+++ b/drivers/input/misc/mma8450.c
@@ -10,7 +10,7 @@
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/i2c.h>
-#include <linux/input-polldev.h>
+#include <linux/input.h>
#include <linux/of_device.h>
#define MMA8450_DRV_NAME "mma8450"
@@ -39,15 +39,8 @@
#define MMA8450_CTRL_REG1 0x38
#define MMA8450_CTRL_REG2 0x39
-/* mma8450 status */
-struct mma8450 {
- struct i2c_client *client;
- struct input_polled_dev *idev;
-};
-
-static int mma8450_read(struct mma8450 *m, unsigned off)
+static int mma8450_read(struct i2c_client *c, unsigned int off)
{
- struct i2c_client *c = m->client;
int ret;
ret = i2c_smbus_read_byte_data(c, off);
@@ -59,9 +52,8 @@ static int mma8450_read(struct mma8450 *m, unsigned off)
return ret;
}
-static int mma8450_write(struct mma8450 *m, unsigned off, u8 v)
+static int mma8450_write(struct i2c_client *c, unsigned int off, u8 v)
{
- struct i2c_client *c = m->client;
int error;
error = i2c_smbus_write_byte_data(c, off, v);
@@ -75,10 +67,9 @@ static int mma8450_write(struct mma8450 *m, unsigned off, u8 v)
return 0;
}
-static int mma8450_read_block(struct mma8450 *m, unsigned off,
+static int mma8450_read_block(struct i2c_client *c, unsigned int off,
u8 *buf, size_t size)
{
- struct i2c_client *c = m->client;
int err;
err = i2c_smbus_read_i2c_block_data(c, off, size, buf);
@@ -92,21 +83,21 @@ static int mma8450_read_block(struct mma8450 *m, unsigned off,
return 0;
}
-static void mma8450_poll(struct input_polled_dev *dev)
+static void mma8450_poll(struct input_dev *input)
{
- struct mma8450 *m = dev->private;
+ struct i2c_client *c = input_get_drvdata(input);
int x, y, z;
int ret;
u8 buf[6];
- ret = mma8450_read(m, MMA8450_STATUS);
+ ret = mma8450_read(c, MMA8450_STATUS);
if (ret < 0)
return;
if (!(ret & MMA8450_STATUS_ZXYDR))
return;
- ret = mma8450_read_block(m, MMA8450_OUT_X_LSB, buf, sizeof(buf));
+ ret = mma8450_read_block(c, MMA8450_OUT_X_LSB, buf, sizeof(buf));
if (ret < 0)
return;
@@ -114,41 +105,42 @@ static void mma8450_poll(struct input_polled_dev *dev)
y = ((int)(s8)buf[3] << 4) | (buf[2] & 0xf);
z = ((int)(s8)buf[5] << 4) | (buf[4] & 0xf);
- input_report_abs(dev->input, ABS_X, x);
- input_report_abs(dev->input, ABS_Y, y);
- input_report_abs(dev->input, ABS_Z, z);
- input_sync(dev->input);
+ input_report_abs(input, ABS_X, x);
+ input_report_abs(input, ABS_Y, y);
+ input_report_abs(input, ABS_Z, z);
+ input_sync(input);
}
/* Initialize the MMA8450 chip */
-static void mma8450_open(struct input_polled_dev *dev)
+static int mma8450_open(struct input_dev *input)
{
- struct mma8450 *m = dev->private;
+ struct i2c_client *c = input_get_drvdata(input);
int err;
/* enable all events from X/Y/Z, no FIFO */
- err = mma8450_write(m, MMA8450_XYZ_DATA_CFG, 0x07);
+ err = mma8450_write(c, MMA8450_XYZ_DATA_CFG, 0x07);
if (err)
- return;
+ return err;
/*
* Sleep mode poll rate - 50Hz
* System output data rate - 400Hz
* Full scale selection - Active, +/- 2G
*/
- err = mma8450_write(m, MMA8450_CTRL_REG1, 0x01);
- if (err < 0)
- return;
+ err = mma8450_write(c, MMA8450_CTRL_REG1, 0x01);
+ if (err)
+ return err;
msleep(MODE_CHANGE_DELAY_MS);
+ return 0;
}
-static void mma8450_close(struct input_polled_dev *dev)
+static void mma8450_close(struct input_dev *input)
{
- struct mma8450 *m = dev->private;
+ struct i2c_client *c = input_get_drvdata(input);
- mma8450_write(m, MMA8450_CTRL_REG1, 0x00);
- mma8450_write(m, MMA8450_CTRL_REG2, 0x01);
+ mma8450_write(c, MMA8450_CTRL_REG1, 0x00);
+ mma8450_write(c, MMA8450_CTRL_REG2, 0x01);
}
/*
@@ -157,38 +149,37 @@ static void mma8450_close(struct input_polled_dev *dev)
static int mma8450_probe(struct i2c_client *c,
const struct i2c_device_id *id)
{
- struct input_polled_dev *idev;
- struct mma8450 *m;
+ struct input_dev *input;
int err;
- m = devm_kzalloc(&c->dev, sizeof(*m), GFP_KERNEL);
- if (!m)
+ input = devm_input_allocate_device(&c->dev);
+ if (!input)
return -ENOMEM;
- idev = devm_input_allocate_polled_device(&c->dev);
- if (!idev)
- return -ENOMEM;
+ input_set_drvdata(input, c);
+
+ input->name = MMA8450_DRV_NAME;
+ input->id.bustype = BUS_I2C;
+
+ input->open = mma8450_open;
+ input->close = mma8450_close;
- m->client = c;
- m->idev = idev;
+ input_set_abs_params(input, ABS_X, -2048, 2047, 32, 32);
+ input_set_abs_params(input, ABS_Y, -2048, 2047, 32, 32);
+ input_set_abs_params(input, ABS_Z, -2048, 2047, 32, 32);
- idev->private = m;
- idev->input->name = MMA8450_DRV_NAME;
- idev->input->id.bustype = BUS_I2C;
- idev->poll = mma8450_poll;
- idev->poll_interval = POLL_INTERVAL;
- idev->poll_interval_max = POLL_INTERVAL_MAX;
- idev->open = mma8450_open;
- idev->close = mma8450_close;
+ err = input_setup_polling(input, mma8450_poll);
+ if (err) {
+ dev_err(&c->dev, "failed to set up polling\n");
+ return err;
+ }
- __set_bit(EV_ABS, idev->input->evbit);
- input_set_abs_params(idev->input, ABS_X, -2048, 2047, 32, 32);
- input_set_abs_params(idev->input, ABS_Y, -2048, 2047, 32, 32);
- input_set_abs_params(idev->input, ABS_Z, -2048, 2047, 32, 32);
+ input_set_poll_interval(input, POLL_INTERVAL);
+ input_set_max_poll_interval(input, POLL_INTERVAL_MAX);
- err = input_register_polled_device(idev);
+ err = input_register_device(input);
if (err) {
- dev_err(&c->dev, "failed to register polled input device\n");
+ dev_err(&c->dev, "failed to register input device\n");
return err;
}
diff --git a/drivers/input/misc/rb532_button.c b/drivers/input/misc/rb532_button.c
index 4412055f8761..190a80e1e2c1 100644
--- a/drivers/input/misc/rb532_button.c
+++ b/drivers/input/misc/rb532_button.c
@@ -5,7 +5,7 @@
* Copyright (C) 2009 Phil Sutter <n0-1@freewrt.org>
*/
-#include <linux/input-polldev.h>
+#include <linux/input.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/gpio.h>
@@ -46,56 +46,42 @@ static bool rb532_button_pressed(void)
return !val;
}
-static void rb532_button_poll(struct input_polled_dev *poll_dev)
+static void rb532_button_poll(struct input_dev *input)
{
- input_report_key(poll_dev->input, RB532_BTN_KSYM,
- rb532_button_pressed());
- input_sync(poll_dev->input);
+ input_report_key(input, RB532_BTN_KSYM, rb532_button_pressed());
+ input_sync(input);
}
static int rb532_button_probe(struct platform_device *pdev)
{
- struct input_polled_dev *poll_dev;
+ struct input_dev *input;
int error;
- poll_dev = input_allocate_polled_device();
- if (!poll_dev)
+ input = devm_input_allocate_device(&pdev->dev);
+ if (!input)
return -ENOMEM;
- poll_dev->poll = rb532_button_poll;
- poll_dev->poll_interval = RB532_BTN_RATE;
+ input->name = "rb532 button";
+ input->phys = "rb532/button0";
+ input->id.bustype = BUS_HOST;
- poll_dev->input->name = "rb532 button";
- poll_dev->input->phys = "rb532/button0";
- poll_dev->input->id.bustype = BUS_HOST;
- poll_dev->input->dev.parent = &pdev->dev;
+ input_set_capability(input, EV_KEY, RB532_BTN_KSYM);
- dev_set_drvdata(&pdev->dev, poll_dev);
-
- input_set_capability(poll_dev->input, EV_KEY, RB532_BTN_KSYM);
-
- error = input_register_polled_device(poll_dev);
- if (error) {
- input_free_polled_device(poll_dev);
+ error = input_setup_polling(input, rb532_button_poll);
+ if (error)
return error;
- }
- return 0;
-}
+ input_set_poll_interval(input, RB532_BTN_RATE);
-static int rb532_button_remove(struct platform_device *pdev)
-{
- struct input_polled_dev *poll_dev = dev_get_drvdata(&pdev->dev);
-
- input_unregister_polled_device(poll_dev);
- input_free_polled_device(poll_dev);
+ error = input_register_device(input);
+ if (error)
+ return error;
return 0;
}
static struct platform_driver rb532_button_driver = {
.probe = rb532_button_probe,
- .remove = rb532_button_remove,
.driver = {
.name = DRV_NAME,
},
diff --git a/drivers/input/misc/sgi_btns.c b/drivers/input/misc/sgi_btns.c
index 0fee6ddf3602..0657d785b3cc 100644
--- a/drivers/input/misc/sgi_btns.c
+++ b/drivers/input/misc/sgi_btns.c
@@ -4,7 +4,7 @@
*
* Copyright (C) 2008 Thomas Bogendoerfer <tsbogend@alpha.franken.de>
*/
-#include <linux/input-polldev.h>
+#include <linux/input.h>
#include <linux/ioport.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -45,15 +45,13 @@ static const unsigned short sgi_map[] = {
};
struct buttons_dev {
- struct input_polled_dev *poll_dev;
unsigned short keymap[ARRAY_SIZE(sgi_map)];
int count[ARRAY_SIZE(sgi_map)];
};
-static void handle_buttons(struct input_polled_dev *dev)
+static void handle_buttons(struct input_dev *input)
{
- struct buttons_dev *bdev = dev->private;
- struct input_dev *input = dev->input;
+ struct buttons_dev *bdev = input_get_drvdata(input);
u8 status;
int i;
@@ -80,28 +78,24 @@ static void handle_buttons(struct input_polled_dev *dev)
static int sgi_buttons_probe(struct platform_device *pdev)
{
struct buttons_dev *bdev;
- struct input_polled_dev *poll_dev;
struct input_dev *input;
int error, i;
- bdev = kzalloc(sizeof(struct buttons_dev), GFP_KERNEL);
- poll_dev = input_allocate_polled_device();
- if (!bdev || !poll_dev) {
- error = -ENOMEM;
- goto err_free_mem;
- }
+ bdev = devm_kzalloc(&pdev->dev, sizeof(*bdev), GFP_KERNEL);
+ if (!bdev)
+ return -ENOMEM;
+
+ input = devm_input_allocate_device(&pdev->dev);
+ if (!input)
+ return -ENOMEM;
memcpy(bdev->keymap, sgi_map, sizeof(bdev->keymap));
- poll_dev->private = bdev;
- poll_dev->poll = handle_buttons;
- poll_dev->poll_interval = BUTTONS_POLL_INTERVAL;
+ input_set_drvdata(input, bdev);
- input = poll_dev->input;
input->name = "SGI buttons";
input->phys = "sgi/input0";
input->id.bustype = BUS_HOST;
- input->dev.parent = &pdev->dev;
input->keycode = bdev->keymap;
input->keycodemax = ARRAY_SIZE(bdev->keymap);
@@ -113,35 +107,21 @@ static int sgi_buttons_probe(struct platform_device *pdev)
__set_bit(bdev->keymap[i], input->keybit);
__clear_bit(KEY_RESERVED, input->keybit);
- bdev->poll_dev = poll_dev;
- platform_set_drvdata(pdev, bdev);
-
- error = input_register_polled_device(poll_dev);
+ error = input_setup_polling(input, handle_buttons);
if (error)
- goto err_free_mem;
+ return error;
- return 0;
+ input_set_poll_interval(input, BUTTONS_POLL_INTERVAL);
- err_free_mem:
- input_free_polled_device(poll_dev);
- kfree(bdev);
- return error;
-}
-
-static int sgi_buttons_remove(struct platform_device *pdev)
-{
- struct buttons_dev *bdev = platform_get_drvdata(pdev);
-
- input_unregister_polled_device(bdev->poll_dev);
- input_free_polled_device(bdev->poll_dev);
- kfree(bdev);
+ error = input_register_device(input);
+ if (error)
+ return error;
return 0;
}
static struct platform_driver sgi_buttons_driver = {
.probe = sgi_buttons_probe,
- .remove = sgi_buttons_remove,
.driver = {
.name = "sgibtns",
},
diff --git a/drivers/input/misc/wistron_btns.c b/drivers/input/misc/wistron_btns.c
index 7ce6cc60d4d2..80dfd72a02d3 100644
--- a/drivers/input/misc/wistron_btns.c
+++ b/drivers/input/misc/wistron_btns.c
@@ -8,7 +8,7 @@
#include <linux/io.h>
#include <linux/dmi.h>
#include <linux/init.h>
-#include <linux/input-polldev.h>
+#include <linux/input.h>
#include <linux/input/sparse-keymap.h>
#include <linux/interrupt.h>
#include <linux/jiffies.h>
@@ -1030,7 +1030,7 @@ static int __init select_keymap(void)
/* Input layer interface */
-static struct input_polled_dev *wistron_idev;
+static struct input_dev *wistron_idev;
static unsigned long jiffies_last_press;
static bool wifi_enabled;
static bool bluetooth_enabled;
@@ -1114,7 +1114,7 @@ static inline void wistron_led_resume(void)
static void handle_key(u8 code)
{
const struct key_entry *key =
- sparse_keymap_entry_from_scancode(wistron_idev->input, code);
+ sparse_keymap_entry_from_scancode(wistron_idev, code);
if (key) {
switch (key->type) {
@@ -1133,14 +1133,14 @@ static void handle_key(u8 code)
break;
default:
- sparse_keymap_report_entry(wistron_idev->input,
- key, 1, true);
+ sparse_keymap_report_entry(wistron_idev, key, 1, true);
break;
}
jiffies_last_press = jiffies;
- } else
+ } else {
printk(KERN_NOTICE
"wistron_btns: Unknown key code %02X\n", code);
+ }
}
static void poll_bios(bool discard)
@@ -1158,21 +1158,23 @@ static void poll_bios(bool discard)
}
}
-static void wistron_flush(struct input_polled_dev *dev)
+static int wistron_flush(struct input_dev *dev)
{
/* Flush stale event queue */
poll_bios(true);
+
+ return 0;
}
-static void wistron_poll(struct input_polled_dev *dev)
+static void wistron_poll(struct input_dev *dev)
{
poll_bios(false);
/* Increase poll frequency if user is currently pressing keys (< 2s ago) */
if (time_before(jiffies, jiffies_last_press + 2 * HZ))
- dev->poll_interval = POLL_INTERVAL_BURST;
+ input_set_poll_interval(dev, POLL_INTERVAL_BURST);
else
- dev->poll_interval = POLL_INTERVAL_DEFAULT;
+ input_set_poll_interval(dev, POLL_INTERVAL_DEFAULT);
}
static int wistron_setup_keymap(struct input_dev *dev,
@@ -1208,35 +1210,37 @@ static int wistron_setup_keymap(struct input_dev *dev,
static int setup_input_dev(void)
{
- struct input_dev *input_dev;
int error;
- wistron_idev = input_allocate_polled_device();
+ wistron_idev = input_allocate_device();
if (!wistron_idev)
return -ENOMEM;
+ wistron_idev->name = "Wistron laptop buttons";
+ wistron_idev->phys = "wistron/input0";
+ wistron_idev->id.bustype = BUS_HOST;
+ wistron_idev->dev.parent = &wistron_device->dev;
+
wistron_idev->open = wistron_flush;
- wistron_idev->poll = wistron_poll;
- wistron_idev->poll_interval = POLL_INTERVAL_DEFAULT;
- input_dev = wistron_idev->input;
- input_dev->name = "Wistron laptop buttons";
- input_dev->phys = "wistron/input0";
- input_dev->id.bustype = BUS_HOST;
- input_dev->dev.parent = &wistron_device->dev;
+ error = sparse_keymap_setup(wistron_idev, keymap, wistron_setup_keymap);
+ if (error)
+ goto err_free_dev;
- error = sparse_keymap_setup(input_dev, keymap, wistron_setup_keymap);
+ error = input_setup_polling(wistron_idev, wistron_poll);
if (error)
goto err_free_dev;
- error = input_register_polled_device(wistron_idev);
+ input_set_poll_interval(wistron_idev, POLL_INTERVAL_DEFAULT);
+
+ error = input_register_device(wistron_idev);
if (error)
goto err_free_dev;
return 0;
err_free_dev:
- input_free_polled_device(wistron_idev);
+ input_free_device(wistron_idev);
return error;
}
@@ -1285,8 +1289,7 @@ static int wistron_probe(struct platform_device *dev)
static int wistron_remove(struct platform_device *dev)
{
wistron_led_remove();
- input_unregister_polled_device(wistron_idev);
- input_free_polled_device(wistron_idev);
+ input_unregister_device(wistron_idev);
bios_detach();
return 0;
diff --git a/drivers/input/mouse/Kconfig b/drivers/input/mouse/Kconfig
index 652c38e3c0b5..d8b6a5dab190 100644
--- a/drivers/input/mouse/Kconfig
+++ b/drivers/input/mouse/Kconfig
@@ -92,14 +92,14 @@ config MOUSE_PS2_SYNAPTICS_SMBUS
If unsure, say Y.
config MOUSE_PS2_CYPRESS
- bool "Cypress PS/2 mouse protocol extension" if EXPERT
- default y
- depends on MOUSE_PS2
- help
- Say Y here if you have a Cypress PS/2 Trackpad connected to
- your system.
+ bool "Cypress PS/2 mouse protocol extension" if EXPERT
+ default y
+ depends on MOUSE_PS2
+ help
+ Say Y here if you have a Cypress PS/2 Trackpad connected to
+ your system.
- If unsure, say Y.
+ If unsure, say Y.
config MOUSE_PS2_LIFEBOOK
bool "Fujitsu Lifebook PS/2 mouse protocol extension" if EXPERT
@@ -381,7 +381,6 @@ config MOUSE_VSXXXAA
config MOUSE_GPIO
tristate "GPIO mouse"
depends on GPIOLIB || COMPILE_TEST
- select INPUT_POLLDEV
help
This driver simulates a mouse on GPIO lines of various CPUs (and some
other chips).
diff --git a/drivers/input/mouse/gpio_mouse.c b/drivers/input/mouse/gpio_mouse.c
index 461436f6f087..23507fce3a2b 100644
--- a/drivers/input/mouse/gpio_mouse.c
+++ b/drivers/input/mouse/gpio_mouse.c
@@ -8,7 +8,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/input-polldev.h>
+#include <linux/input.h>
#include <linux/gpio/consumer.h>
#include <linux/property.h>
#include <linux/of.h>
@@ -43,10 +43,9 @@ struct gpio_mouse {
* Timer function which is run every scan_ms ms when the device is opened.
* The dev input variable is set to the the input_dev pointer.
*/
-static void gpio_mouse_scan(struct input_polled_dev *dev)
+static void gpio_mouse_scan(struct input_dev *input)
{
- struct gpio_mouse *gpio = dev->private;
- struct input_dev *input = dev->input;
+ struct gpio_mouse *gpio = input_get_drvdata(input);
int x, y;
if (gpio->bleft)
@@ -71,18 +70,17 @@ static int gpio_mouse_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct gpio_mouse *gmouse;
- struct input_polled_dev *input_poll;
struct input_dev *input;
- int ret;
+ int error;
gmouse = devm_kzalloc(dev, sizeof(*gmouse), GFP_KERNEL);
if (!gmouse)
return -ENOMEM;
/* Assign some default scanning time */
- ret = device_property_read_u32(dev, "scan-interval-ms",
- &gmouse->scan_ms);
- if (ret || gmouse->scan_ms == 0) {
+ error = device_property_read_u32(dev, "scan-interval-ms",
+ &gmouse->scan_ms);
+ if (error || gmouse->scan_ms == 0) {
dev_warn(dev, "invalid scan time, set to 50 ms\n");
gmouse->scan_ms = 50;
}
@@ -112,23 +110,14 @@ static int gpio_mouse_probe(struct platform_device *pdev)
if (IS_ERR(gmouse->bright))
return PTR_ERR(gmouse->bright);
- input_poll = devm_input_allocate_polled_device(dev);
- if (!input_poll) {
- dev_err(dev, "not enough memory for input device\n");
+ input = devm_input_allocate_device(dev);
+ if (!input)
return -ENOMEM;
- }
-
- platform_set_drvdata(pdev, input_poll);
-
- /* set input-polldev handlers */
- input_poll->private = gmouse;
- input_poll->poll = gpio_mouse_scan;
- input_poll->poll_interval = gmouse->scan_ms;
- input = input_poll->input;
input->name = pdev->name;
input->id.bustype = BUS_HOST;
- input->dev.parent = &pdev->dev;
+
+ input_set_drvdata(input, gmouse);
input_set_capability(input, EV_REL, REL_X);
input_set_capability(input, EV_REL, REL_Y);
@@ -139,10 +128,16 @@ static int gpio_mouse_probe(struct platform_device *pdev)
if (gmouse->bright)
input_set_capability(input, EV_KEY, BTN_RIGHT);
- ret = input_register_polled_device(input_poll);
- if (ret) {
+ error = input_setup_polling(input, gpio_mouse_scan);
+ if (error)
+ return error;
+
+ input_set_poll_interval(input, gmouse->scan_ms);
+
+ error = input_register_device(input);
+ if (error) {
dev_err(dev, "could not register input device\n");
- return ret;
+ return error;
}
dev_dbg(dev, "%d ms scan time, buttons: %s%s%s\n",
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index 56fae3472114..1ae6f8bba9ae 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -172,6 +172,7 @@ static const char * const smbus_pnp_ids[] = {
"LEN0071", /* T480 */
"LEN0072", /* X1 Carbon Gen 5 (2017) - Elan/ALPS trackpoint */
"LEN0073", /* X1 Carbon G5 (Elantech) */
+ "LEN0091", /* X1 Carbon 6 */
"LEN0092", /* X1 Carbon 6 */
"LEN0093", /* T480 */
"LEN0096", /* X280 */
diff --git a/drivers/input/rmi4/rmi_f54.c b/drivers/input/rmi4/rmi_f54.c
index 897105b9a98b..0bc01cfc2b51 100644
--- a/drivers/input/rmi4/rmi_f54.c
+++ b/drivers/input/rmi4/rmi_f54.c
@@ -81,11 +81,6 @@ static const char * const rmi_f54_report_type_names[] = {
= "Full Raw Capacitance RX Offset Removed",
};
-struct rmi_f54_reports {
- int start;
- int size;
-};
-
struct f54_data {
struct rmi_function *fn;
@@ -98,7 +93,6 @@ struct f54_data {
enum rmi_f54_report_type report_type;
u8 *report_data;
int report_size;
- struct rmi_f54_reports standard_report[2];
bool is_busy;
struct mutex status_mutex;
@@ -116,6 +110,7 @@ struct f54_data {
struct video_device vdev;
struct vb2_queue queue;
struct mutex lock;
+ u32 sequence;
int input;
enum rmi_f54_report_type inputs[F54_MAX_REPORT_TYPE];
};
@@ -290,6 +285,7 @@ static int rmi_f54_queue_setup(struct vb2_queue *q, unsigned int *nbuffers,
static void rmi_f54_buffer_queue(struct vb2_buffer *vb)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct f54_data *f54 = vb2_get_drv_priv(vb->vb2_queue);
u16 *ptr;
enum vb2_buffer_state state;
@@ -298,6 +294,7 @@ static void rmi_f54_buffer_queue(struct vb2_buffer *vb)
mutex_lock(&f54->status_mutex);
+ vb2_set_plane_payload(vb, 0, 0);
reptype = rmi_f54_get_reptype(f54, f54->input);
if (reptype == F54_REPORT_NONE) {
state = VB2_BUF_STATE_ERROR;
@@ -344,14 +341,25 @@ static void rmi_f54_buffer_queue(struct vb2_buffer *vb)
data_done:
mutex_unlock(&f54->data_mutex);
done:
+ vb->timestamp = ktime_get_ns();
+ vbuf->field = V4L2_FIELD_NONE;
+ vbuf->sequence = f54->sequence++;
vb2_buffer_done(vb, state);
mutex_unlock(&f54->status_mutex);
}
+static void rmi_f54_stop_streaming(struct vb2_queue *q)
+{
+ struct f54_data *f54 = vb2_get_drv_priv(q);
+
+ f54->sequence = 0;
+}
+
/* V4L2 structures */
static const struct vb2_ops rmi_f54_queue_ops = {
.queue_setup = rmi_f54_queue_setup,
.buf_queue = rmi_f54_buffer_queue,
+ .stop_streaming = rmi_f54_stop_streaming,
.wait_prepare = vb2_ops_wait_prepare,
.wait_finish = vb2_ops_wait_finish,
};
@@ -363,7 +371,6 @@ static const struct vb2_queue rmi_f54_queue = {
.ops = &rmi_f54_queue_ops,
.mem_ops = &vb2_vmalloc_memops,
.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC,
- .min_buffers_needed = 1,
};
static int rmi_f54_vidioc_querycap(struct file *file, void *priv,
@@ -516,13 +523,10 @@ static void rmi_f54_work(struct work_struct *work)
struct f54_data *f54 = container_of(work, struct f54_data, work.work);
struct rmi_function *fn = f54->fn;
u8 fifo[2];
- struct rmi_f54_reports *report;
int report_size;
u8 command;
- u8 *data;
int error;
- data = f54->report_data;
report_size = rmi_f54_get_report_size(f54);
if (report_size == 0) {
dev_err(&fn->dev, "Bad report size, report type=%d\n",
@@ -530,8 +534,6 @@ static void rmi_f54_work(struct work_struct *work)
error = -EINVAL;
goto error; /* retry won't help */
}
- f54->standard_report[0].size = report_size;
- report = f54->standard_report;
mutex_lock(&f54->data_mutex);
@@ -556,28 +558,23 @@ static void rmi_f54_work(struct work_struct *work)
rmi_dbg(RMI_DEBUG_FN, &fn->dev, "Get report command completed, reading data\n");
- report_size = 0;
- for (; report->size; report++) {
- fifo[0] = report->start & 0xff;
- fifo[1] = (report->start >> 8) & 0xff;
- error = rmi_write_block(fn->rmi_dev,
- fn->fd.data_base_addr + F54_FIFO_OFFSET,
- fifo, sizeof(fifo));
- if (error) {
- dev_err(&fn->dev, "Failed to set fifo start offset\n");
- goto abort;
- }
+ fifo[0] = 0;
+ fifo[1] = 0;
+ error = rmi_write_block(fn->rmi_dev,
+ fn->fd.data_base_addr + F54_FIFO_OFFSET,
+ fifo, sizeof(fifo));
+ if (error) {
+ dev_err(&fn->dev, "Failed to set fifo start offset\n");
+ goto abort;
+ }
- error = rmi_read_block(fn->rmi_dev, fn->fd.data_base_addr +
- F54_REPORT_DATA_OFFSET, data,
- report->size);
- if (error) {
- dev_err(&fn->dev, "%s: read [%d bytes] returned %d\n",
- __func__, report->size, error);
- goto abort;
- }
- data += report->size;
- report_size += report->size;
+ error = rmi_read_block(fn->rmi_dev, fn->fd.data_base_addr +
+ F54_REPORT_DATA_OFFSET, f54->report_data,
+ report_size);
+ if (error) {
+ dev_err(&fn->dev, "%s: read [%d bytes] returned %d\n",
+ __func__, report_size, error);
+ goto abort;
}
abort:
diff --git a/drivers/input/tablet/Kconfig b/drivers/input/tablet/Kconfig
index e4c0d9a055b9..51c339182017 100644
--- a/drivers/input/tablet/Kconfig
+++ b/drivers/input/tablet/Kconfig
@@ -39,16 +39,16 @@ config TABLET_USB_AIPTEK
module will be called aiptek.
config TABLET_USB_GTCO
- tristate "GTCO CalComp/InterWrite USB Support"
- depends on USB && INPUT
- help
- Say Y here if you want to use the USB version of the GTCO
- CalComp/InterWrite Tablet. Make sure to say Y to "Mouse support"
- (CONFIG_INPUT_MOUSEDEV) and/or "Event interface support"
- (CONFIG_INPUT_EVDEV) as well.
-
- To compile this driver as a module, choose M here: the
- module will be called gtco.
+ tristate "GTCO CalComp/InterWrite USB Support"
+ depends on USB && INPUT
+ help
+ Say Y here if you want to use the USB version of the GTCO
+ CalComp/InterWrite Tablet. Make sure to say Y to "Mouse support"
+ (CONFIG_INPUT_MOUSEDEV) and/or "Event interface support"
+ (CONFIG_INPUT_EVDEV) as well.
+
+ To compile this driver as a module, choose M here: the
+ module will be called gtco.
config TABLET_USB_HANWANG
tristate "Hanwang Art Master III tablet support (USB)"
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index 46ad9090493b..c071f7c407b6 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -633,7 +633,7 @@ config TOUCHSCREEN_HP600
depends on SH_HP6XX && SH_ADC
help
Say Y here if you have a HP Jornada 620/660/680/690 and want to
- support the built-in touchscreen.
+ support the built-in touchscreen.
To compile this driver as a module, choose M here: the
module will be called hp680_ts_input.
@@ -700,7 +700,6 @@ config TOUCHSCREEN_EDT_FT5X06
config TOUCHSCREEN_RASPBERRYPI_FW
tristate "Raspberry Pi's firmware base touch screen support"
depends on RASPBERRYPI_FIRMWARE || (RASPBERRYPI_FIRMWARE=n && COMPILE_TEST)
- select INPUT_POLLDEV
help
Say Y here if you have the official Raspberry Pi 7 inch screen on
your system.
@@ -1038,7 +1037,6 @@ config TOUCHSCREEN_TS4800
depends on HAS_IOMEM && OF
depends on SOC_IMX51 || COMPILE_TEST
select MFD_SYSCON
- select INPUT_POLLDEV
help
Say Y here if you have a touchscreen on a TS-4800 board.
@@ -1210,7 +1208,6 @@ config TOUCHSCREEN_SUR40
tristate "Samsung SUR40 (Surface 2.0/PixelSense) touchscreen"
depends on USB && MEDIA_USB_SUPPORT && HAS_DMA
depends on VIDEO_V4L2
- select INPUT_POLLDEV
select VIDEOBUF2_DMA_SG
help
Say Y here if you want support for the Samsung SUR40 touchscreen
@@ -1246,7 +1243,6 @@ config TOUCHSCREEN_SX8654
config TOUCHSCREEN_TPS6507X
tristate "TPS6507x based touchscreens"
depends on I2C
- select INPUT_POLLDEV
help
Say Y here if you have a TPS6507x based touchscreen
controller.
diff --git a/drivers/input/touchscreen/ar1021_i2c.c b/drivers/input/touchscreen/ar1021_i2c.c
index 28644f372bd8..c0d5c2413356 100644
--- a/drivers/input/touchscreen/ar1021_i2c.c
+++ b/drivers/input/touchscreen/ar1021_i2c.c
@@ -13,7 +13,7 @@
#include <linux/irq.h>
#include <linux/interrupt.h>
-#define AR1021_TOCUH_PKG_SIZE 5
+#define AR1021_TOUCH_PKG_SIZE 5
#define AR1021_MAX_X 4095
#define AR1021_MAX_Y 4095
@@ -25,7 +25,7 @@
struct ar1021_i2c {
struct i2c_client *client;
struct input_dev *input;
- u8 data[AR1021_TOCUH_PKG_SIZE];
+ u8 data[AR1021_TOUCH_PKG_SIZE];
};
static irqreturn_t ar1021_i2c_irq(int irq, void *dev_id)
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
index 24c4b691b1c9..ae60442efda0 100644
--- a/drivers/input/touchscreen/atmel_mxt_ts.c
+++ b/drivers/input/touchscreen/atmel_mxt_ts.c
@@ -3156,6 +3156,8 @@ static int __maybe_unused mxt_suspend(struct device *dev)
mutex_unlock(&input_dev->mutex);
+ disable_irq(data->irq);
+
return 0;
}
@@ -3168,6 +3170,8 @@ static int __maybe_unused mxt_resume(struct device *dev)
if (!input_dev)
return 0;
+ enable_irq(data->irq);
+
mutex_lock(&input_dev->mutex);
if (input_dev->users)
diff --git a/drivers/input/touchscreen/colibri-vf50-ts.c b/drivers/input/touchscreen/colibri-vf50-ts.c
index 0e40897949bb..aa829725ded7 100644
--- a/drivers/input/touchscreen/colibri-vf50-ts.c
+++ b/drivers/input/touchscreen/colibri-vf50-ts.c
@@ -9,7 +9,6 @@
#include <linux/delay.h>
#include <linux/err.h>
-#include <linux/gpio.h>
#include <linux/gpio/consumer.h>
#include <linux/iio/consumer.h>
#include <linux/iio/types.h>
diff --git a/drivers/input/touchscreen/edt-ft5x06.c b/drivers/input/touchscreen/edt-ft5x06.c
index 5525f1fb1526..d61731c0037d 100644
--- a/drivers/input/touchscreen/edt-ft5x06.c
+++ b/drivers/input/touchscreen/edt-ft5x06.c
@@ -28,6 +28,7 @@
#include <linux/input/mt.h>
#include <linux/input/touchscreen.h>
#include <asm/unaligned.h>
+#include <linux/regulator/consumer.h>
#define WORK_REGISTER_THRESHOLD 0x00
#define WORK_REGISTER_REPORT_RATE 0x08
@@ -88,6 +89,7 @@ struct edt_ft5x06_ts_data {
struct touchscreen_properties prop;
u16 num_x;
u16 num_y;
+ struct regulator *vcc;
struct gpio_desc *reset_gpio;
struct gpio_desc *wake_gpio;
@@ -1036,6 +1038,13 @@ edt_ft5x06_ts_set_regs(struct edt_ft5x06_ts_data *tsdata)
}
}
+static void edt_ft5x06_disable_regulator(void *arg)
+{
+ struct edt_ft5x06_ts_data *data = arg;
+
+ regulator_disable(data->vcc);
+}
+
static int edt_ft5x06_ts_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -1064,6 +1073,27 @@ static int edt_ft5x06_ts_probe(struct i2c_client *client,
tsdata->max_support_points = chip_data->max_support_points;
+ tsdata->vcc = devm_regulator_get(&client->dev, "vcc");
+ if (IS_ERR(tsdata->vcc)) {
+ error = PTR_ERR(tsdata->vcc);
+ if (error != -EPROBE_DEFER)
+ dev_err(&client->dev,
+ "failed to request regulator: %d\n", error);
+ return error;
+ }
+
+ error = regulator_enable(tsdata->vcc);
+ if (error < 0) {
+ dev_err(&client->dev, "failed to enable vcc: %d\n", error);
+ return error;
+ }
+
+ error = devm_add_action_or_reset(&client->dev,
+ edt_ft5x06_disable_regulator,
+ tsdata);
+ if (error)
+ return error;
+
tsdata->reset_gpio = devm_gpiod_get_optional(&client->dev,
"reset", GPIOD_OUT_HIGH);
if (IS_ERR(tsdata->reset_gpio)) {
diff --git a/drivers/input/touchscreen/ili210x.c b/drivers/input/touchscreen/ili210x.c
index e9006407c9bc..4a17096e83e1 100644
--- a/drivers/input/touchscreen/ili210x.c
+++ b/drivers/input/touchscreen/ili210x.c
@@ -1,54 +1,54 @@
// SPDX-License-Identifier: GPL-2.0-only
-#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
-#include <linux/interrupt.h>
-#include <linux/slab.h>
#include <linux/input.h>
#include <linux/input/mt.h>
#include <linux/input/touchscreen.h>
-#include <linux/delay.h>
-#include <linux/workqueue.h>
-#include <linux/gpio/consumer.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
#include <linux/of_device.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
#include <asm/unaligned.h>
-#define ILI210X_TOUCHES 2
-#define ILI251X_TOUCHES 10
-#define DEFAULT_POLL_PERIOD 20
+#define ILI2XXX_POLL_PERIOD 20
+
+#define ILI210X_DATA_SIZE 64
+#define ILI211X_DATA_SIZE 43
+#define ILI251X_DATA_SIZE1 31
+#define ILI251X_DATA_SIZE2 20
/* Touchscreen commands */
#define REG_TOUCHDATA 0x10
#define REG_PANEL_INFO 0x20
-#define REG_FIRMWARE_VERSION 0x40
#define REG_CALIBRATE 0xcc
-struct firmware_version {
- u8 id;
- u8 major;
- u8 minor;
-} __packed;
-
-enum ili2xxx_model {
- MODEL_ILI210X,
- MODEL_ILI251X,
+struct ili2xxx_chip {
+ int (*read_reg)(struct i2c_client *client, u8 reg,
+ void *buf, size_t len);
+ int (*get_touch_data)(struct i2c_client *client, u8 *data);
+ bool (*parse_touch_data)(const u8 *data, unsigned int finger,
+ unsigned int *x, unsigned int *y);
+ bool (*continue_polling)(const u8 *data, bool touch);
+ unsigned int max_touches;
+ unsigned int resolution;
+ bool has_calibrate_reg;
};
struct ili210x {
struct i2c_client *client;
struct input_dev *input;
- unsigned int poll_period;
- struct delayed_work dwork;
struct gpio_desc *reset_gpio;
struct touchscreen_properties prop;
- enum ili2xxx_model model;
- unsigned int max_touches;
+ const struct ili2xxx_chip *chip;
+ bool stop;
};
-static int ili210x_read_reg(struct i2c_client *client, u8 reg, void *buf,
- size_t len)
+static int ili210x_read_reg(struct i2c_client *client,
+ u8 reg, void *buf, size_t len)
{
- struct ili210x *priv = i2c_get_clientdata(client);
- struct i2c_msg msg[2] = {
+ struct i2c_msg msg[] = {
{
.addr = client->addr,
.flags = 0,
@@ -62,151 +62,223 @@ static int ili210x_read_reg(struct i2c_client *client, u8 reg, void *buf,
.buf = buf,
}
};
+ int error, ret;
- if (priv->model == MODEL_ILI251X) {
- if (i2c_transfer(client->adapter, msg, 1) != 1) {
- dev_err(&client->dev, "i2c transfer failed\n");
- return -EIO;
- }
-
- usleep_range(5000, 5500);
-
- if (i2c_transfer(client->adapter, msg + 1, 1) != 1) {
- dev_err(&client->dev, "i2c transfer failed\n");
- return -EIO;
- }
- } else {
- if (i2c_transfer(client->adapter, msg, 2) != 2) {
- dev_err(&client->dev, "i2c transfer failed\n");
- return -EIO;
- }
+ ret = i2c_transfer(client->adapter, msg, ARRAY_SIZE(msg));
+ if (ret != ARRAY_SIZE(msg)) {
+ error = ret < 0 ? ret : -EIO;
+ dev_err(&client->dev, "%s failed: %d\n", __func__, error);
+ return error;
}
return 0;
}
-static int ili210x_read(struct i2c_client *client, void *buf, size_t len)
+static int ili210x_read_touch_data(struct i2c_client *client, u8 *data)
{
- struct i2c_msg msg = {
- .addr = client->addr,
- .flags = I2C_M_RD,
- .len = len,
- .buf = buf,
- };
+ return ili210x_read_reg(client, REG_TOUCHDATA,
+ data, ILI210X_DATA_SIZE);
+}
+
+static bool ili210x_touchdata_to_coords(const u8 *touchdata,
+ unsigned int finger,
+ unsigned int *x, unsigned int *y)
+{
+ if (touchdata[0] & BIT(finger))
+ return false;
+
+ *x = get_unaligned_be16(touchdata + 1 + (finger * 4) + 0);
+ *y = get_unaligned_be16(touchdata + 1 + (finger * 4) + 2);
- if (i2c_transfer(client->adapter, &msg, 1) != 1) {
- dev_err(&client->dev, "i2c transfer failed\n");
+ return true;
+}
+
+static bool ili210x_check_continue_polling(const u8 *data, bool touch)
+{
+ return data[0] & 0xf3;
+}
+
+static const struct ili2xxx_chip ili210x_chip = {
+ .read_reg = ili210x_read_reg,
+ .get_touch_data = ili210x_read_touch_data,
+ .parse_touch_data = ili210x_touchdata_to_coords,
+ .continue_polling = ili210x_check_continue_polling,
+ .max_touches = 2,
+ .has_calibrate_reg = true,
+};
+
+static int ili211x_read_touch_data(struct i2c_client *client, u8 *data)
+{
+ s16 sum = 0;
+ int error;
+ int ret;
+ int i;
+
+ ret = i2c_master_recv(client, data, ILI211X_DATA_SIZE);
+ if (ret != ILI211X_DATA_SIZE) {
+ error = ret < 0 ? ret : -EIO;
+ dev_err(&client->dev, "%s failed: %d\n", __func__, error);
+ return error;
+ }
+
+ /* This chip uses custom checksum at the end of data */
+ for (i = 0; i < ILI211X_DATA_SIZE - 1; i++)
+ sum = (sum + data[i]) & 0xff;
+
+ if ((-sum & 0xff) != data[ILI211X_DATA_SIZE - 1]) {
+ dev_err(&client->dev,
+ "CRC error (crc=0x%02x expected=0x%02x)\n",
+ sum, data[ILI211X_DATA_SIZE - 1]);
return -EIO;
}
return 0;
}
-static bool ili210x_touchdata_to_coords(struct ili210x *priv, u8 *touchdata,
+static bool ili211x_touchdata_to_coords(const u8 *touchdata,
unsigned int finger,
unsigned int *x, unsigned int *y)
{
- if (finger >= ILI210X_TOUCHES)
- return false;
+ u32 data;
- if (touchdata[0] & BIT(finger))
+ data = get_unaligned_be32(touchdata + 1 + (finger * 4) + 0);
+ if (data == 0xffffffff) /* Finger up */
return false;
- *x = get_unaligned_be16(touchdata + 1 + (finger * 4) + 0);
- *y = get_unaligned_be16(touchdata + 1 + (finger * 4) + 2);
+ *x = ((touchdata[1 + (finger * 4) + 0] & 0xf0) << 4) |
+ touchdata[1 + (finger * 4) + 1];
+ *y = ((touchdata[1 + (finger * 4) + 0] & 0x0f) << 8) |
+ touchdata[1 + (finger * 4) + 2];
return true;
}
-static bool ili251x_touchdata_to_coords(struct ili210x *priv, u8 *touchdata,
+static bool ili211x_decline_polling(const u8 *data, bool touch)
+{
+ return false;
+}
+
+static const struct ili2xxx_chip ili211x_chip = {
+ .read_reg = ili210x_read_reg,
+ .get_touch_data = ili211x_read_touch_data,
+ .parse_touch_data = ili211x_touchdata_to_coords,
+ .continue_polling = ili211x_decline_polling,
+ .max_touches = 10,
+ .resolution = 2048,
+};
+
+static int ili251x_read_reg(struct i2c_client *client,
+ u8 reg, void *buf, size_t len)
+{
+ int error;
+ int ret;
+
+ ret = i2c_master_send(client, &reg, 1);
+ if (ret == 1) {
+ usleep_range(5000, 5500);
+
+ ret = i2c_master_recv(client, buf, len);
+ if (ret == len)
+ return 0;
+ }
+
+ error = ret < 0 ? ret : -EIO;
+ dev_err(&client->dev, "%s failed: %d\n", __func__, error);
+ return ret;
+}
+
+static int ili251x_read_touch_data(struct i2c_client *client, u8 *data)
+{
+ int error;
+
+ error = ili251x_read_reg(client, REG_TOUCHDATA,
+ data, ILI251X_DATA_SIZE1);
+ if (!error && data[0] == 2) {
+ error = i2c_master_recv(client, data + ILI251X_DATA_SIZE1,
+ ILI251X_DATA_SIZE2);
+ if (error >= 0 && error != ILI251X_DATA_SIZE2)
+ error = -EIO;
+ }
+
+ return error;
+}
+
+static bool ili251x_touchdata_to_coords(const u8 *touchdata,
unsigned int finger,
unsigned int *x, unsigned int *y)
{
- if (finger >= ILI251X_TOUCHES)
- return false;
+ u16 val;
- *x = get_unaligned_be16(touchdata + 1 + (finger * 5) + 0);
- if (!(*x & BIT(15))) /* Touch indication */
+ val = get_unaligned_be16(touchdata + 1 + (finger * 5) + 0);
+ if (!(val & BIT(15))) /* Touch indication */
return false;
- *x &= 0x3fff;
+ *x = val & 0x3fff;
*y = get_unaligned_be16(touchdata + 1 + (finger * 5) + 2);
return true;
}
+static bool ili251x_check_continue_polling(const u8 *data, bool touch)
+{
+ return touch;
+}
+
+static const struct ili2xxx_chip ili251x_chip = {
+ .read_reg = ili251x_read_reg,
+ .get_touch_data = ili251x_read_touch_data,
+ .parse_touch_data = ili251x_touchdata_to_coords,
+ .continue_polling = ili251x_check_continue_polling,
+ .max_touches = 10,
+ .has_calibrate_reg = true,
+};
+
static bool ili210x_report_events(struct ili210x *priv, u8 *touchdata)
{
struct input_dev *input = priv->input;
int i;
- bool contact = false, touch = false;
+ bool contact = false, touch;
unsigned int x = 0, y = 0;
- for (i = 0; i < priv->max_touches; i++) {
- if (priv->model == MODEL_ILI210X) {
- touch = ili210x_touchdata_to_coords(priv, touchdata,
- i, &x, &y);
- } else if (priv->model == MODEL_ILI251X) {
- touch = ili251x_touchdata_to_coords(priv, touchdata,
- i, &x, &y);
- if (touch)
- contact = true;
- }
+ for (i = 0; i < priv->chip->max_touches; i++) {
+ touch = priv->chip->parse_touch_data(touchdata, i, &x, &y);
input_mt_slot(input, i);
- input_mt_report_slot_state(input, MT_TOOL_FINGER, touch);
- if (!touch)
- continue;
- touchscreen_report_pos(input, &priv->prop, x, y,
- true);
+ if (input_mt_report_slot_state(input, MT_TOOL_FINGER, touch)) {
+ touchscreen_report_pos(input, &priv->prop, x, y, true);
+ contact = true;
+ }
}
input_mt_report_pointer_emulation(input, false);
input_sync(input);
- if (priv->model == MODEL_ILI210X)
- contact = touchdata[0] & 0xf3;
-
return contact;
}
-static void ili210x_work(struct work_struct *work)
+static irqreturn_t ili210x_irq(int irq, void *irq_data)
{
- struct ili210x *priv = container_of(work, struct ili210x,
- dwork.work);
+ struct ili210x *priv = irq_data;
struct i2c_client *client = priv->client;
- u8 touchdata[64] = { 0 };
+ const struct ili2xxx_chip *chip = priv->chip;
+ u8 touchdata[ILI210X_DATA_SIZE] = { 0 };
+ bool keep_polling;
bool touch;
- int error = -EINVAL;
-
- if (priv->model == MODEL_ILI210X) {
- error = ili210x_read_reg(client, REG_TOUCHDATA,
- touchdata, sizeof(touchdata));
- } else if (priv->model == MODEL_ILI251X) {
- error = ili210x_read_reg(client, REG_TOUCHDATA,
- touchdata, 31);
- if (!error && touchdata[0] == 2)
- error = ili210x_read(client, &touchdata[31], 20);
- }
-
- if (error) {
- dev_err(&client->dev,
- "Unable to get touchdata, err = %d\n", error);
- return;
- }
-
- touch = ili210x_report_events(priv, touchdata);
-
- if (touch)
- schedule_delayed_work(&priv->dwork,
- msecs_to_jiffies(priv->poll_period));
-}
+ int error;
-static irqreturn_t ili210x_irq(int irq, void *irq_data)
-{
- struct ili210x *priv = irq_data;
+ do {
+ error = chip->get_touch_data(client, touchdata);
+ if (error) {
+ dev_err(&client->dev,
+ "Unable to get touch data: %d\n", error);
+ break;
+ }
- schedule_delayed_work(&priv->dwork, 0);
+ touch = ili210x_report_events(priv, touchdata);
+ keep_polling = chip->continue_polling(touchdata, touch);
+ if (keep_polling)
+ msleep(ILI2XXX_POLL_PERIOD);
+ } while (!priv->stop && keep_polling);
return IRQ_HANDLED;
}
@@ -242,8 +314,19 @@ static struct attribute *ili210x_attributes[] = {
NULL,
};
+static umode_t ili210x_calibrate_visible(struct kobject *kobj,
+ struct attribute *attr, int index)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct i2c_client *client = to_i2c_client(dev);
+ struct ili210x *priv = i2c_get_clientdata(client);
+
+ return priv->chip->has_calibrate_reg;
+}
+
static const struct attribute_group ili210x_attr_group = {
.attrs = ili210x_attributes,
+ .is_visible = ili210x_calibrate_visible,
};
static void ili210x_power_down(void *data)
@@ -253,28 +336,35 @@ static void ili210x_power_down(void *data)
gpiod_set_value_cansleep(reset_gpio, 1);
}
-static void ili210x_cancel_work(void *data)
+static void ili210x_stop(void *data)
{
struct ili210x *priv = data;
- cancel_delayed_work_sync(&priv->dwork);
+ /* Tell ISR to quit even if there is a contact. */
+ priv->stop = true;
}
static int ili210x_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+ const struct i2c_device_id *id)
{
struct device *dev = &client->dev;
+ const struct ili2xxx_chip *chip;
struct ili210x *priv;
struct gpio_desc *reset_gpio;
struct input_dev *input;
- struct firmware_version firmware;
- enum ili2xxx_model model;
int error;
-
- model = (enum ili2xxx_model)id->driver_data;
+ unsigned int max_xy;
dev_dbg(dev, "Probing for ILI210X I2C Touschreen driver");
+ chip = device_get_match_data(dev);
+ if (!chip && id)
+ chip = (const struct ili2xxx_chip *)id->driver_data;
+ if (!chip) {
+ dev_err(&client->dev, "unknown device model\n");
+ return -ENODEV;
+ }
+
if (client->irq <= 0) {
dev_err(dev, "No IRQ!\n");
return -EINVAL;
@@ -305,49 +395,39 @@ static int ili210x_i2c_probe(struct i2c_client *client,
priv->client = client;
priv->input = input;
- priv->poll_period = DEFAULT_POLL_PERIOD;
- INIT_DELAYED_WORK(&priv->dwork, ili210x_work);
priv->reset_gpio = reset_gpio;
- priv->model = model;
- if (model == MODEL_ILI210X)
- priv->max_touches = ILI210X_TOUCHES;
- if (model == MODEL_ILI251X)
- priv->max_touches = ILI251X_TOUCHES;
-
+ priv->chip = chip;
i2c_set_clientdata(client, priv);
- /* Get firmware version */
- error = ili210x_read_reg(client, REG_FIRMWARE_VERSION,
- &firmware, sizeof(firmware));
- if (error) {
- dev_err(dev, "Failed to get firmware version, err: %d\n",
- error);
- return error;
- }
-
/* Setup input device */
input->name = "ILI210x Touchscreen";
input->id.bustype = BUS_I2C;
- input->dev.parent = dev;
/* Multi touch */
- input_set_abs_params(input, ABS_MT_POSITION_X, 0, 0xffff, 0, 0);
- input_set_abs_params(input, ABS_MT_POSITION_Y, 0, 0xffff, 0, 0);
+ max_xy = (chip->resolution ?: SZ_64K) - 1;
+ input_set_abs_params(input, ABS_MT_POSITION_X, 0, max_xy, 0, 0);
+ input_set_abs_params(input, ABS_MT_POSITION_Y, 0, max_xy, 0, 0);
touchscreen_parse_properties(input, true, &priv->prop);
- input_mt_init_slots(input, priv->max_touches, INPUT_MT_DIRECT);
- error = devm_add_action(dev, ili210x_cancel_work, priv);
- if (error)
+ error = input_mt_init_slots(input, priv->chip->max_touches,
+ INPUT_MT_DIRECT);
+ if (error) {
+ dev_err(dev, "Unable to set up slots, err: %d\n", error);
return error;
+ }
- error = devm_request_irq(dev, client->irq, ili210x_irq, 0,
- client->name, priv);
+ error = devm_request_threaded_irq(dev, client->irq, NULL, ili210x_irq,
+ IRQF_ONESHOT, client->name, priv);
if (error) {
dev_err(dev, "Unable to request touchscreen IRQ, err: %d\n",
error);
return error;
}
+ error = devm_add_action_or_reset(dev, ili210x_stop, priv);
+ if (error)
+ return error;
+
error = devm_device_add_group(dev, &ili210x_attr_group);
if (error) {
dev_err(dev, "Unable to create sysfs attributes, err: %d\n",
@@ -361,56 +441,28 @@ static int ili210x_i2c_probe(struct i2c_client *client,
return error;
}
- device_init_wakeup(dev, 1);
-
- dev_dbg(dev,
- "ILI210x initialized (IRQ: %d), firmware version %d.%d.%d",
- client->irq, firmware.id, firmware.major, firmware.minor);
-
return 0;
}
-static int __maybe_unused ili210x_i2c_suspend(struct device *dev)
-{
- struct i2c_client *client = to_i2c_client(dev);
-
- if (device_may_wakeup(&client->dev))
- enable_irq_wake(client->irq);
-
- return 0;
-}
-
-static int __maybe_unused ili210x_i2c_resume(struct device *dev)
-{
- struct i2c_client *client = to_i2c_client(dev);
-
- if (device_may_wakeup(&client->dev))
- disable_irq_wake(client->irq);
-
- return 0;
-}
-
-static SIMPLE_DEV_PM_OPS(ili210x_i2c_pm,
- ili210x_i2c_suspend, ili210x_i2c_resume);
-
static const struct i2c_device_id ili210x_i2c_id[] = {
- { "ili210x", MODEL_ILI210X },
- { "ili251x", MODEL_ILI251X },
+ { "ili210x", (long)&ili210x_chip },
+ { "ili2117", (long)&ili211x_chip },
+ { "ili251x", (long)&ili251x_chip },
{ }
};
MODULE_DEVICE_TABLE(i2c, ili210x_i2c_id);
static const struct of_device_id ili210x_dt_ids[] = {
- { .compatible = "ilitek,ili210x", .data = (void *)MODEL_ILI210X },
- { .compatible = "ilitek,ili251x", .data = (void *)MODEL_ILI251X },
- { },
+ { .compatible = "ilitek,ili210x", .data = &ili210x_chip },
+ { .compatible = "ilitek,ili2117", .data = &ili211x_chip },
+ { .compatible = "ilitek,ili251x", .data = &ili251x_chip },
+ { }
};
MODULE_DEVICE_TABLE(of, ili210x_dt_ids);
static struct i2c_driver ili210x_ts_driver = {
.driver = {
.name = "ili210x_i2c",
- .pm = &ili210x_i2c_pm,
.of_match_table = ili210x_dt_ids,
},
.id_table = ili210x_i2c_id,
diff --git a/drivers/input/touchscreen/mms114.c b/drivers/input/touchscreen/mms114.c
index a5ab774da4cc..69c6d559eeb0 100644
--- a/drivers/input/touchscreen/mms114.c
+++ b/drivers/input/touchscreen/mms114.c
@@ -446,8 +446,7 @@ static int mms114_probe(struct i2c_client *client,
data->client = client;
data->input_dev = input_dev;
- /* FIXME: switch to device_get_match_data() when available */
- match_data = of_device_get_match_data(&client->dev);
+ match_data = device_get_match_data(&client->dev);
if (!match_data)
return -EINVAL;
diff --git a/drivers/input/touchscreen/pixcir_i2c_ts.c b/drivers/input/touchscreen/pixcir_i2c_ts.c
index e146dfa257b1..9aa098577350 100644
--- a/drivers/input/touchscreen/pixcir_i2c_ts.c
+++ b/drivers/input/touchscreen/pixcir_i2c_ts.c
@@ -5,22 +5,73 @@
* Copyright (C) 2010-2011 Pixcir, Inc.
*/
+#include <asm/unaligned.h>
#include <linux/delay.h>
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/slab.h>
+#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/input.h>
#include <linux/input/mt.h>
#include <linux/input/touchscreen.h>
-#include <linux/gpio.h>
-#include <linux/gpio/consumer.h>
+#include <linux/interrupt.h>
#include <linux/of_device.h>
-#include <linux/platform_data/pixcir_i2c_ts.h>
-#include <asm/unaligned.h>
+#include <linux/module.h>
+#include <linux/slab.h>
#define PIXCIR_MAX_SLOTS 5 /* Max fingers supported by driver */
+/*
+ * Register map
+ */
+#define PIXCIR_REG_POWER_MODE 51
+#define PIXCIR_REG_INT_MODE 52
+
+/*
+ * Power modes:
+ * active: max scan speed
+ * idle: lower scan speed with automatic transition to active on touch
+ * halt: datasheet says sleep but this is more like halt as the chip
+ * clocks are cut and it can only be brought out of this mode
+ * using the RESET pin.
+ */
+enum pixcir_power_mode {
+ PIXCIR_POWER_ACTIVE,
+ PIXCIR_POWER_IDLE,
+ PIXCIR_POWER_HALT,
+};
+
+#define PIXCIR_POWER_MODE_MASK 0x03
+#define PIXCIR_POWER_ALLOW_IDLE (1UL << 2)
+
+/*
+ * Interrupt modes:
+ * periodical: interrupt is asserted periodicaly
+ * diff coordinates: interrupt is asserted when coordinates change
+ * level on touch: interrupt level asserted during touch
+ * pulse on touch: interrupt pulse asserted during touch
+ *
+ */
+enum pixcir_int_mode {
+ PIXCIR_INT_PERIODICAL,
+ PIXCIR_INT_DIFF_COORD,
+ PIXCIR_INT_LEVEL_TOUCH,
+ PIXCIR_INT_PULSE_TOUCH,
+};
+
+#define PIXCIR_INT_MODE_MASK 0x03
+#define PIXCIR_INT_ENABLE (1UL << 3)
+#define PIXCIR_INT_POL_HIGH (1UL << 2)
+
+/**
+ * struct pixcir_i2c_chip_data - chip related data
+ * @max_fingers: Max number of fingers reported simultaneously by h/w
+ * @has_hw_ids: Hardware supports finger tracking IDs
+ *
+ */
+struct pixcir_i2c_chip_data {
+ u8 max_fingers;
+ bool has_hw_ids;
+};
+
struct pixcir_i2c_ts_data {
struct i2c_client *client;
struct input_dev *input;
@@ -30,7 +81,6 @@ struct pixcir_i2c_ts_data {
struct gpio_desc *gpio_wake;
const struct pixcir_i2c_chip_data *chip;
struct touchscreen_properties prop;
- int max_fingers; /* Max fingers supported in this instance */
bool running;
};
@@ -54,7 +104,7 @@ static void pixcir_ts_parse(struct pixcir_i2c_ts_data *tsdata,
memset(report, 0, sizeof(struct pixcir_report_data));
i = chip->has_hw_ids ? 1 : 0;
- readsize = 2 + tsdata->max_fingers * (4 + i);
+ readsize = 2 + tsdata->chip->max_fingers * (4 + i);
if (readsize > sizeof(rdbuf))
readsize = sizeof(rdbuf);
@@ -75,8 +125,8 @@ static void pixcir_ts_parse(struct pixcir_i2c_ts_data *tsdata,
}
touch = rdbuf[0] & 0x7;
- if (touch > tsdata->max_fingers)
- touch = tsdata->max_fingers;
+ if (touch > tsdata->chip->max_fingers)
+ touch = tsdata->chip->max_fingers;
report->num_touches = touch;
bufptr = &rdbuf[2];
@@ -192,7 +242,7 @@ static int pixcir_set_power_mode(struct pixcir_i2c_ts_data *ts,
ret = i2c_smbus_read_byte_data(ts->client, PIXCIR_REG_POWER_MODE);
if (ret < 0) {
- dev_err(dev, "%s: can't read reg 0x%x : %d\n",
+ dev_err(dev, "%s: can't read reg %d : %d\n",
__func__, PIXCIR_REG_POWER_MODE, ret);
return ret;
}
@@ -205,7 +255,7 @@ static int pixcir_set_power_mode(struct pixcir_i2c_ts_data *ts,
ret = i2c_smbus_write_byte_data(ts->client, PIXCIR_REG_POWER_MODE, ret);
if (ret < 0) {
- dev_err(dev, "%s: can't write reg 0x%x : %d\n",
+ dev_err(dev, "%s: can't write reg %d : %d\n",
__func__, PIXCIR_REG_POWER_MODE, ret);
return ret;
}
@@ -231,7 +281,7 @@ static int pixcir_set_int_mode(struct pixcir_i2c_ts_data *ts,
ret = i2c_smbus_read_byte_data(ts->client, PIXCIR_REG_INT_MODE);
if (ret < 0) {
- dev_err(dev, "%s: can't read reg 0x%x : %d\n",
+ dev_err(dev, "%s: can't read reg %d : %d\n",
__func__, PIXCIR_REG_INT_MODE, ret);
return ret;
}
@@ -246,7 +296,7 @@ static int pixcir_set_int_mode(struct pixcir_i2c_ts_data *ts,
ret = i2c_smbus_write_byte_data(ts->client, PIXCIR_REG_INT_MODE, ret);
if (ret < 0) {
- dev_err(dev, "%s: can't write reg 0x%x : %d\n",
+ dev_err(dev, "%s: can't write reg %d : %d\n",
__func__, PIXCIR_REG_INT_MODE, ret);
return ret;
}
@@ -264,7 +314,7 @@ static int pixcir_int_enable(struct pixcir_i2c_ts_data *ts, bool enable)
ret = i2c_smbus_read_byte_data(ts->client, PIXCIR_REG_INT_MODE);
if (ret < 0) {
- dev_err(dev, "%s: can't read reg 0x%x : %d\n",
+ dev_err(dev, "%s: can't read reg %d : %d\n",
__func__, PIXCIR_REG_INT_MODE, ret);
return ret;
}
@@ -276,7 +326,7 @@ static int pixcir_int_enable(struct pixcir_i2c_ts_data *ts, bool enable)
ret = i2c_smbus_write_byte_data(ts->client, PIXCIR_REG_INT_MODE, ret);
if (ret < 0) {
- dev_err(dev, "%s: can't write reg 0x%x : %d\n",
+ dev_err(dev, "%s: can't write reg %d : %d\n",
__func__, PIXCIR_REG_INT_MODE, ret);
return ret;
}
@@ -412,31 +462,9 @@ unlock:
static SIMPLE_DEV_PM_OPS(pixcir_dev_pm_ops,
pixcir_i2c_ts_suspend, pixcir_i2c_ts_resume);
-#ifdef CONFIG_OF
-static const struct of_device_id pixcir_of_match[];
-
-static int pixcir_parse_dt(struct device *dev,
- struct pixcir_i2c_ts_data *tsdata)
-{
- tsdata->chip = of_device_get_match_data(dev);
- if (!tsdata->chip)
- return -EINVAL;
-
- return 0;
-}
-#else
-static int pixcir_parse_dt(struct device *dev,
- struct pixcir_i2c_ts_data *tsdata)
-{
- return -EINVAL;
-}
-#endif
-
static int pixcir_i2c_ts_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- const struct pixcir_ts_platform_data *pdata =
- dev_get_platdata(&client->dev);
struct device *dev = &client->dev;
struct pixcir_i2c_ts_data *tsdata;
struct input_dev *input;
@@ -446,19 +474,11 @@ static int pixcir_i2c_ts_probe(struct i2c_client *client,
if (!tsdata)
return -ENOMEM;
- if (pdata) {
- tsdata->chip = &pdata->chip;
- } else if (dev->of_node) {
- error = pixcir_parse_dt(dev, tsdata);
- if (error)
- return error;
- } else {
- dev_err(dev, "platform data not defined\n");
- return -EINVAL;
- }
-
- if (!tsdata->chip->max_fingers) {
- dev_err(dev, "Invalid max_fingers in chip data\n");
+ tsdata->chip = device_get_match_data(dev);
+ if (!tsdata->chip && id)
+ tsdata->chip = (const void *)id->driver_data;
+ if (!tsdata->chip) {
+ dev_err(dev, "can't locate chip data\n");
return -EINVAL;
}
@@ -475,30 +495,17 @@ static int pixcir_i2c_ts_probe(struct i2c_client *client,
input->id.bustype = BUS_I2C;
input->open = pixcir_input_open;
input->close = pixcir_input_close;
- input->dev.parent = dev;
-
- if (pdata) {
- input_set_abs_params(input, ABS_MT_POSITION_X, 0, pdata->x_max, 0, 0);
- input_set_abs_params(input, ABS_MT_POSITION_Y, 0, pdata->y_max, 0, 0);
- } else {
- input_set_capability(input, EV_ABS, ABS_MT_POSITION_X);
- input_set_capability(input, EV_ABS, ABS_MT_POSITION_Y);
- touchscreen_parse_properties(input, true, &tsdata->prop);
- if (!input_abs_get_max(input, ABS_MT_POSITION_X) ||
- !input_abs_get_max(input, ABS_MT_POSITION_Y)) {
- dev_err(dev, "Touchscreen size is not specified\n");
- return -EINVAL;
- }
- }
- tsdata->max_fingers = tsdata->chip->max_fingers;
- if (tsdata->max_fingers > PIXCIR_MAX_SLOTS) {
- tsdata->max_fingers = PIXCIR_MAX_SLOTS;
- dev_info(dev, "Limiting maximum fingers to %d\n",
- tsdata->max_fingers);
+ input_set_capability(input, EV_ABS, ABS_MT_POSITION_X);
+ input_set_capability(input, EV_ABS, ABS_MT_POSITION_Y);
+ touchscreen_parse_properties(input, true, &tsdata->prop);
+ if (!input_abs_get_max(input, ABS_MT_POSITION_X) ||
+ !input_abs_get_max(input, ABS_MT_POSITION_Y)) {
+ dev_err(dev, "Touchscreen size is not specified\n");
+ return -EINVAL;
}
- error = input_mt_init_slots(input, tsdata->max_fingers,
+ error = input_mt_init_slots(input, tsdata->chip->max_fingers,
INPUT_MT_DIRECT | INPUT_MT_DROP_UNUSED);
if (error) {
dev_err(dev, "Error initializing Multi-Touch slots\n");
@@ -510,7 +517,9 @@ static int pixcir_i2c_ts_probe(struct i2c_client *client,
tsdata->gpio_attb = devm_gpiod_get(dev, "attb", GPIOD_IN);
if (IS_ERR(tsdata->gpio_attb)) {
error = PTR_ERR(tsdata->gpio_attb);
- dev_err(dev, "Failed to request ATTB gpio: %d\n", error);
+ if (error != -EPROBE_DEFER)
+ dev_err(dev, "Failed to request ATTB gpio: %d\n",
+ error);
return error;
}
@@ -518,7 +527,9 @@ static int pixcir_i2c_ts_probe(struct i2c_client *client,
GPIOD_OUT_LOW);
if (IS_ERR(tsdata->gpio_reset)) {
error = PTR_ERR(tsdata->gpio_reset);
- dev_err(dev, "Failed to request RESET gpio: %d\n", error);
+ if (error != -EPROBE_DEFER)
+ dev_err(dev, "Failed to request RESET gpio: %d\n",
+ error);
return error;
}
@@ -574,14 +585,6 @@ static int pixcir_i2c_ts_probe(struct i2c_client *client,
return 0;
}
-static const struct i2c_device_id pixcir_i2c_ts_id[] = {
- { "pixcir_ts", 0 },
- { "pixcir_tangoc", 0 },
- { }
-};
-MODULE_DEVICE_TABLE(i2c, pixcir_i2c_ts_id);
-
-#ifdef CONFIG_OF
static const struct pixcir_i2c_chip_data pixcir_ts_data = {
.max_fingers = 2,
/* no hw id support */
@@ -592,6 +595,14 @@ static const struct pixcir_i2c_chip_data pixcir_tangoc_data = {
.has_hw_ids = true,
};
+static const struct i2c_device_id pixcir_i2c_ts_id[] = {
+ { "pixcir_ts", (unsigned long) &pixcir_ts_data },
+ { "pixcir_tangoc", (unsigned long) &pixcir_tangoc_data },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, pixcir_i2c_ts_id);
+
+#ifdef CONFIG_OF
static const struct of_device_id pixcir_of_match[] = {
{ .compatible = "pixcir,pixcir_ts", .data = &pixcir_ts_data },
{ .compatible = "pixcir,pixcir_tangoc", .data = &pixcir_tangoc_data },
diff --git a/drivers/input/touchscreen/raspberrypi-ts.c b/drivers/input/touchscreen/raspberrypi-ts.c
index 69881265d121..0e2e08f3f433 100644
--- a/drivers/input/touchscreen/raspberrypi-ts.c
+++ b/drivers/input/touchscreen/raspberrypi-ts.c
@@ -16,7 +16,6 @@
#include <linux/platform_device.h>
#include <linux/input.h>
#include <linux/input/mt.h>
-#include <linux/input-polldev.h>
#include <linux/input/touchscreen.h>
#include <soc/bcm2835/raspberrypi-firmware.h>
@@ -34,7 +33,7 @@
struct rpi_ts {
struct platform_device *pdev;
- struct input_polled_dev *poll_dev;
+ struct input_dev *input;
struct touchscreen_properties prop;
void __iomem *fw_regs_va;
@@ -57,10 +56,9 @@ struct rpi_ts_regs {
} point[RPI_TS_MAX_SUPPORTED_POINTS];
};
-static void rpi_ts_poll(struct input_polled_dev *dev)
+static void rpi_ts_poll(struct input_dev *input)
{
- struct input_dev *input = dev->input;
- struct rpi_ts *ts = dev->private;
+ struct rpi_ts *ts = input_get_drvdata(input);
struct rpi_ts_regs regs;
int modified_ids = 0;
long released_ids;
@@ -123,10 +121,9 @@ static int rpi_ts_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
- struct input_polled_dev *poll_dev;
+ struct input_dev *input;
struct device_node *fw_node;
struct rpi_firmware *fw;
- struct input_dev *input;
struct rpi_ts *ts;
u32 touchbuf;
int error;
@@ -160,7 +157,6 @@ static int rpi_ts_probe(struct platform_device *pdev)
return error;
}
-
touchbuf = (u32)ts->fw_regs_phys;
error = rpi_firmware_property(fw, RPI_FIRMWARE_FRAMEBUFFER_SET_TOUCHBUF,
&touchbuf, sizeof(touchbuf));
@@ -170,19 +166,17 @@ static int rpi_ts_probe(struct platform_device *pdev)
return error;
}
- poll_dev = devm_input_allocate_polled_device(dev);
- if (!poll_dev) {
+ input = devm_input_allocate_device(dev);
+ if (!input) {
dev_err(dev, "Failed to allocate input device\n");
return -ENOMEM;
}
- ts->poll_dev = poll_dev;
- input = poll_dev->input;
+
+ ts->input = input;
+ input_set_drvdata(input, ts);
input->name = "raspberrypi-ts";
input->id.bustype = BUS_HOST;
- poll_dev->poll_interval = RPI_TS_POLL_INTERVAL;
- poll_dev->poll = rpi_ts_poll;
- poll_dev->private = ts;
input_set_abs_params(input, ABS_MT_POSITION_X, 0,
RPI_TS_DEFAULT_WIDTH, 0, 0);
@@ -197,7 +191,15 @@ static int rpi_ts_probe(struct platform_device *pdev)
return error;
}
- error = input_register_polled_device(poll_dev);
+ error = input_setup_polling(input, rpi_ts_poll);
+ if (error) {
+ dev_err(dev, "could not set up polling mode, %d\n", error);
+ return error;
+ }
+
+ input_set_poll_interval(input, RPI_TS_POLL_INTERVAL);
+
+ error = input_register_device(input);
if (error) {
dev_err(dev, "could not register input device, %d\n", error);
return error;
@@ -214,10 +216,10 @@ MODULE_DEVICE_TABLE(of, rpi_ts_match);
static struct platform_driver rpi_ts_driver = {
.driver = {
- .name = "raspberrypi-ts",
+ .name = "raspberrypi-ts",
.of_match_table = rpi_ts_match,
},
- .probe = rpi_ts_probe,
+ .probe = rpi_ts_probe,
};
module_platform_driver(rpi_ts_driver);
diff --git a/drivers/input/touchscreen/s3c2410_ts.c b/drivers/input/touchscreen/s3c2410_ts.c
index b346e7cafd62..82920ff46f72 100644
--- a/drivers/input/touchscreen/s3c2410_ts.c
+++ b/drivers/input/touchscreen/s3c2410_ts.c
@@ -13,7 +13,6 @@
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/gpio.h>
#include <linux/input.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
diff --git a/drivers/input/touchscreen/st1232.c b/drivers/input/touchscreen/st1232.c
index 1139714e72e2..63b29c7279e2 100644
--- a/drivers/input/touchscreen/st1232.c
+++ b/drivers/input/touchscreen/st1232.c
@@ -14,23 +14,19 @@
#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/input.h>
+#include <linux/input/mt.h>
+#include <linux/input/touchscreen.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/pm_qos.h>
#include <linux/slab.h>
#include <linux/types.h>
-#include <linux/input/touchscreen.h>
#define ST1232_TS_NAME "st1232-ts"
#define ST1633_TS_NAME "st1633-ts"
-struct st1232_ts_finger {
- u16 x;
- u16 y;
- u8 t;
- bool is_valid;
-};
+#define ST_TS_MAX_FINGERS 10
struct st_chip_info {
bool have_z;
@@ -50,81 +46,89 @@ struct st1232_ts_data {
const struct st_chip_info *chip_info;
int read_buf_len;
u8 *read_buf;
- struct st1232_ts_finger *finger;
};
static int st1232_ts_read_data(struct st1232_ts_data *ts)
{
- struct st1232_ts_finger *finger = ts->finger;
struct i2c_client *client = ts->client;
- struct i2c_msg msg[2];
- int error;
- int i, y;
u8 start_reg = ts->chip_info->start_reg;
- u8 *buf = ts->read_buf;
+ struct i2c_msg msg[] = {
+ {
+ .addr = client->addr,
+ .len = sizeof(start_reg),
+ .buf = &start_reg,
+ },
+ {
+ .addr = client->addr,
+ .flags = I2C_M_RD | I2C_M_DMA_SAFE,
+ .len = ts->read_buf_len,
+ .buf = ts->read_buf,
+ }
+ };
+ int ret;
- /* read touchscreen data */
- msg[0].addr = client->addr;
- msg[0].flags = 0;
- msg[0].len = 1;
- msg[0].buf = &start_reg;
+ ret = i2c_transfer(client->adapter, msg, ARRAY_SIZE(msg));
+ if (ret != ARRAY_SIZE(msg))
+ return ret < 0 ? ret : -EIO;
- msg[1].addr = ts->client->addr;
- msg[1].flags = I2C_M_RD;
- msg[1].len = ts->read_buf_len;
- msg[1].buf = buf;
+ return 0;
+}
- error = i2c_transfer(client->adapter, msg, 2);
- if (error < 0)
- return error;
+static int st1232_ts_parse_and_report(struct st1232_ts_data *ts)
+{
+ struct input_dev *input = ts->input_dev;
+ struct input_mt_pos pos[ST_TS_MAX_FINGERS];
+ u8 z[ST_TS_MAX_FINGERS];
+ int slots[ST_TS_MAX_FINGERS];
+ int n_contacts = 0;
+ int i;
+
+ for (i = 0; i < ts->chip_info->max_fingers; i++) {
+ u8 *buf = &ts->read_buf[i * 4];
+
+ if (buf[0] & BIT(7)) {
+ unsigned int x = ((buf[0] & 0x70) << 4) | buf[1];
+ unsigned int y = ((buf[0] & 0x07) << 8) | buf[2];
- for (i = 0, y = 0; i < ts->chip_info->max_fingers; i++, y += 3) {
- finger[i].is_valid = buf[i + y] >> 7;
- if (finger[i].is_valid) {
- finger[i].x = ((buf[i + y] & 0x0070) << 4) |
- buf[i + y + 1];
- finger[i].y = ((buf[i + y] & 0x0007) << 8) |
- buf[i + y + 2];
+ touchscreen_set_mt_pos(&pos[n_contacts],
+ &ts->prop, x, y);
/* st1232 includes a z-axis / touch strength */
if (ts->chip_info->have_z)
- finger[i].t = buf[i + 6];
+ z[n_contacts] = ts->read_buf[i + 6];
+
+ n_contacts++;
}
}
- return 0;
+ input_mt_assign_slots(input, slots, pos, n_contacts, 0);
+ for (i = 0; i < n_contacts; i++) {
+ input_mt_slot(input, slots[i]);
+ input_mt_report_slot_state(input, MT_TOOL_FINGER, true);
+ input_report_abs(input, ABS_MT_POSITION_X, pos[i].x);
+ input_report_abs(input, ABS_MT_POSITION_Y, pos[i].y);
+ if (ts->chip_info->have_z)
+ input_report_abs(input, ABS_MT_TOUCH_MAJOR, z[i]);
+ }
+
+ input_mt_sync_frame(input);
+ input_sync(input);
+
+ return n_contacts;
}
static irqreturn_t st1232_ts_irq_handler(int irq, void *dev_id)
{
struct st1232_ts_data *ts = dev_id;
- struct st1232_ts_finger *finger = ts->finger;
- struct input_dev *input_dev = ts->input_dev;
- int count = 0;
- int i, ret;
-
- ret = st1232_ts_read_data(ts);
- if (ret < 0)
- goto end;
-
- /* multi touch protocol */
- for (i = 0; i < ts->chip_info->max_fingers; i++) {
- if (!finger[i].is_valid)
- continue;
-
- if (ts->chip_info->have_z)
- input_report_abs(input_dev, ABS_MT_TOUCH_MAJOR,
- finger[i].t);
+ int count;
+ int error;
- touchscreen_report_pos(input_dev, &ts->prop,
- finger[i].x, finger[i].y, true);
- input_mt_sync(input_dev);
- count++;
- }
+ error = st1232_ts_read_data(ts);
+ if (error)
+ goto out;
- /* SYN_MT_REPORT only if no contact */
+ count = st1232_ts_parse_and_report(ts);
if (!count) {
- input_mt_sync(input_dev);
if (ts->low_latency_req.dev) {
dev_pm_qos_remove_request(&ts->low_latency_req);
ts->low_latency_req.dev = NULL;
@@ -136,10 +140,7 @@ static irqreturn_t st1232_ts_irq_handler(int irq, void *dev_id)
DEV_PM_QOS_RESUME_LATENCY, 100);
}
- /* SYN_REPORT */
- input_sync(input_dev);
-
-end:
+out:
return IRQ_HANDLED;
}
@@ -149,6 +150,11 @@ static void st1232_ts_power(struct st1232_ts_data *ts, bool poweron)
gpiod_set_value_cansleep(ts->reset_gpio, !poweron);
}
+static void st1232_ts_power_off(void *data)
+{
+ st1232_ts_power(data, false);
+}
+
static const struct st_chip_info st1232_chip_info = {
.have_z = true,
.max_x = 0x31f, /* 800 - 1 */
@@ -172,7 +178,6 @@ static int st1232_ts_probe(struct i2c_client *client,
{
const struct st_chip_info *match;
struct st1232_ts_data *ts;
- struct st1232_ts_finger *finger;
struct input_dev *input_dev;
int error;
@@ -199,11 +204,6 @@ static int st1232_ts_probe(struct i2c_client *client,
return -ENOMEM;
ts->chip_info = match;
- ts->finger = devm_kcalloc(&client->dev,
- ts->chip_info->max_fingers, sizeof(*finger),
- GFP_KERNEL);
- if (!ts->finger)
- return -ENOMEM;
/* allocate a buffer according to the number of registers to read */
ts->read_buf_len = ts->chip_info->max_fingers * 4;
@@ -229,14 +229,15 @@ static int st1232_ts_probe(struct i2c_client *client,
st1232_ts_power(ts, true);
+ error = devm_add_action_or_reset(&client->dev, st1232_ts_power_off, ts);
+ if (error) {
+ dev_err(&client->dev,
+ "Failed to install power off action: %d\n", error);
+ return error;
+ }
+
input_dev->name = "st1232-touchscreen";
input_dev->id.bustype = BUS_I2C;
- input_dev->dev.parent = &client->dev;
-
- __set_bit(INPUT_PROP_DIRECT, input_dev->propbit);
- __set_bit(EV_SYN, input_dev->evbit);
- __set_bit(EV_KEY, input_dev->evbit);
- __set_bit(EV_ABS, input_dev->evbit);
if (ts->chip_info->have_z)
input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR, 0,
@@ -249,6 +250,14 @@ static int st1232_ts_probe(struct i2c_client *client,
touchscreen_parse_properties(input_dev, true, &ts->prop);
+ error = input_mt_init_slots(input_dev, ts->chip_info->max_fingers,
+ INPUT_MT_DIRECT | INPUT_MT_TRACK |
+ INPUT_MT_DROP_UNUSED);
+ if (error) {
+ dev_err(&client->dev, "failed to initialize MT slots\n");
+ return error;
+ }
+
error = devm_request_threaded_irq(&client->dev, client->irq,
NULL, st1232_ts_irq_handler,
IRQF_ONESHOT,
@@ -266,16 +275,6 @@ static int st1232_ts_probe(struct i2c_client *client,
}
i2c_set_clientdata(client, ts);
- device_init_wakeup(&client->dev, 1);
-
- return 0;
-}
-
-static int st1232_ts_remove(struct i2c_client *client)
-{
- struct st1232_ts_data *ts = i2c_get_clientdata(client);
-
- st1232_ts_power(ts, false);
return 0;
}
@@ -285,12 +284,10 @@ static int __maybe_unused st1232_ts_suspend(struct device *dev)
struct i2c_client *client = to_i2c_client(dev);
struct st1232_ts_data *ts = i2c_get_clientdata(client);
- if (device_may_wakeup(&client->dev)) {
- enable_irq_wake(client->irq);
- } else {
- disable_irq(client->irq);
+ disable_irq(client->irq);
+
+ if (!device_may_wakeup(&client->dev))
st1232_ts_power(ts, false);
- }
return 0;
}
@@ -300,12 +297,10 @@ static int __maybe_unused st1232_ts_resume(struct device *dev)
struct i2c_client *client = to_i2c_client(dev);
struct st1232_ts_data *ts = i2c_get_clientdata(client);
- if (device_may_wakeup(&client->dev)) {
- disable_irq_wake(client->irq);
- } else {
+ if (!device_may_wakeup(&client->dev))
st1232_ts_power(ts, true);
- enable_irq(client->irq);
- }
+
+ enable_irq(client->irq);
return 0;
}
@@ -329,7 +324,6 @@ MODULE_DEVICE_TABLE(of, st1232_ts_dt_ids);
static struct i2c_driver st1232_ts_driver = {
.probe = st1232_ts_probe,
- .remove = st1232_ts_remove,
.id_table = st1232_ts_id,
.driver = {
.name = ST1232_TS_NAME,
diff --git a/drivers/input/touchscreen/sur40.c b/drivers/input/touchscreen/sur40.c
index 3fd3e862269b..1dd47dda71cd 100644
--- a/drivers/input/touchscreen/sur40.c
+++ b/drivers/input/touchscreen/sur40.c
@@ -27,7 +27,7 @@
#include <linux/uaccess.h>
#include <linux/usb.h>
#include <linux/printk.h>
-#include <linux/input-polldev.h>
+#include <linux/input.h>
#include <linux/input/mt.h>
#include <linux/usb/input.h>
#include <linux/videodev2.h>
@@ -206,7 +206,7 @@ struct sur40_state {
struct usb_device *usbdev;
struct device *dev;
- struct input_polled_dev *input;
+ struct input_dev *input;
struct v4l2_device v4l2;
struct video_device vdev;
@@ -370,6 +370,10 @@ static int sur40_init(struct sur40_state *dev)
goto error;
result = sur40_command(dev, SUR40_GET_VERSION, 0x03, buffer, 12);
+ if (result < 0)
+ goto error;
+
+ result = 0;
/*
* Discard the result buffer - no known data inside except
@@ -381,22 +385,22 @@ error:
}
/*
- * Callback routines from input_polled_dev
+ * Callback routines from input_dev
*/
/* Enable the device, polling will now start. */
-static void sur40_open(struct input_polled_dev *polldev)
+static int sur40_open(struct input_dev *input)
{
- struct sur40_state *sur40 = polldev->private;
+ struct sur40_state *sur40 = input_get_drvdata(input);
dev_dbg(sur40->dev, "open\n");
- sur40_init(sur40);
+ return sur40_init(sur40);
}
/* Disable device, polling has stopped. */
-static void sur40_close(struct input_polled_dev *polldev)
+static void sur40_close(struct input_dev *input)
{
- struct sur40_state *sur40 = polldev->private;
+ struct sur40_state *sur40 = input_get_drvdata(input);
dev_dbg(sur40->dev, "close\n");
/*
@@ -448,10 +452,9 @@ static void sur40_report_blob(struct sur40_blob *blob, struct input_dev *input)
}
/* core function: poll for new input data */
-static void sur40_poll(struct input_polled_dev *polldev)
+static void sur40_poll(struct input_dev *input)
{
- struct sur40_state *sur40 = polldev->private;
- struct input_dev *input = polldev->input;
+ struct sur40_state *sur40 = input_get_drvdata(input);
int result, bulk_read, need_blobs, packet_blobs, i;
u32 uninitialized_var(packet_id);
@@ -613,10 +616,9 @@ err_poll:
}
/* Initialize input device parameters. */
-static void sur40_input_setup(struct input_dev *input_dev)
+static int sur40_input_setup_events(struct input_dev *input_dev)
{
- __set_bit(EV_KEY, input_dev->evbit);
- __set_bit(EV_ABS, input_dev->evbit);
+ int error;
input_set_abs_params(input_dev, ABS_MT_POSITION_X,
0, SENSOR_RES_X, 0, 0);
@@ -637,8 +639,14 @@ static void sur40_input_setup(struct input_dev *input_dev)
input_set_abs_params(input_dev, ABS_MT_ORIENTATION, 0, 1, 0, 0);
- input_mt_init_slots(input_dev, MAX_CONTACTS,
- INPUT_MT_DIRECT | INPUT_MT_DROP_UNUSED);
+ error = input_mt_init_slots(input_dev, MAX_CONTACTS,
+ INPUT_MT_DIRECT | INPUT_MT_DROP_UNUSED);
+ if (error) {
+ dev_err(input_dev->dev.parent, "failed to set up slots\n");
+ return error;
+ }
+
+ return 0;
}
/* Check candidate USB interface. */
@@ -649,7 +657,7 @@ static int sur40_probe(struct usb_interface *interface,
struct sur40_state *sur40;
struct usb_host_interface *iface_desc;
struct usb_endpoint_descriptor *endpoint;
- struct input_polled_dev *poll_dev;
+ struct input_dev *input;
int error;
/* Check if we really have the right interface. */
@@ -670,8 +678,8 @@ static int sur40_probe(struct usb_interface *interface,
if (!sur40)
return -ENOMEM;
- poll_dev = input_allocate_polled_device();
- if (!poll_dev) {
+ input = input_allocate_device();
+ if (!input) {
error = -ENOMEM;
goto err_free_dev;
}
@@ -681,26 +689,33 @@ static int sur40_probe(struct usb_interface *interface,
spin_lock_init(&sur40->qlock);
mutex_init(&sur40->lock);
- /* Set up polled input device control structure */
- poll_dev->private = sur40;
- poll_dev->poll_interval = POLL_INTERVAL;
- poll_dev->open = sur40_open;
- poll_dev->poll = sur40_poll;
- poll_dev->close = sur40_close;
-
/* Set up regular input device structure */
- sur40_input_setup(poll_dev->input);
-
- poll_dev->input->name = DRIVER_LONG;
- usb_to_input_id(usbdev, &poll_dev->input->id);
+ input->name = DRIVER_LONG;
+ usb_to_input_id(usbdev, &input->id);
usb_make_path(usbdev, sur40->phys, sizeof(sur40->phys));
strlcat(sur40->phys, "/input0", sizeof(sur40->phys));
- poll_dev->input->phys = sur40->phys;
- poll_dev->input->dev.parent = &interface->dev;
+ input->phys = sur40->phys;
+ input->dev.parent = &interface->dev;
+
+ input->open = sur40_open;
+ input->close = sur40_close;
+
+ error = sur40_input_setup_events(input);
+ if (error)
+ goto err_free_input;
+
+ input_set_drvdata(input, sur40);
+ error = input_setup_polling(input, sur40_poll);
+ if (error) {
+ dev_err(&interface->dev, "failed to set up polling");
+ goto err_free_input;
+ }
+
+ input_set_poll_interval(input, POLL_INTERVAL);
sur40->usbdev = usbdev;
sur40->dev = &interface->dev;
- sur40->input = poll_dev;
+ sur40->input = input;
/* use the bulk-in endpoint tested above */
sur40->bulk_in_size = usb_endpoint_maxp(endpoint);
@@ -709,11 +724,11 @@ static int sur40_probe(struct usb_interface *interface,
if (!sur40->bulk_in_buffer) {
dev_err(&interface->dev, "Unable to allocate input buffer.");
error = -ENOMEM;
- goto err_free_polldev;
+ goto err_free_input;
}
/* register the polled input device */
- error = input_register_polled_device(poll_dev);
+ error = input_register_device(input);
if (error) {
dev_err(&interface->dev,
"Unable to register polled input device.");
@@ -796,8 +811,8 @@ err_unreg_v4l2:
v4l2_device_unregister(&sur40->v4l2);
err_free_buffer:
kfree(sur40->bulk_in_buffer);
-err_free_polldev:
- input_free_polled_device(sur40->input);
+err_free_input:
+ input_free_device(input);
err_free_dev:
kfree(sur40);
@@ -813,8 +828,7 @@ static void sur40_disconnect(struct usb_interface *interface)
video_unregister_device(&sur40->vdev);
v4l2_device_unregister(&sur40->v4l2);
- input_unregister_polled_device(sur40->input);
- input_free_polled_device(sur40->input);
+ input_unregister_device(sur40->input);
kfree(sur40->bulk_in_buffer);
kfree(sur40);
diff --git a/drivers/input/touchscreen/tps6507x-ts.c b/drivers/input/touchscreen/tps6507x-ts.c
index 75170a7439b1..357a3108f2e5 100644
--- a/drivers/input/touchscreen/tps6507x-ts.c
+++ b/drivers/input/touchscreen/tps6507x-ts.c
@@ -17,7 +17,6 @@
#include <linux/workqueue.h>
#include <linux/slab.h>
#include <linux/input.h>
-#include <linux/input-polldev.h>
#include <linux/platform_device.h>
#include <linux/mfd/tps6507x.h>
#include <linux/input/tps6507x-ts.h>
@@ -40,7 +39,7 @@ struct ts_event {
struct tps6507x_ts {
struct device *dev;
- struct input_polled_dev *poll_dev;
+ struct input_dev *input;
struct tps6507x_dev *mfd;
char phys[32];
struct ts_event tc;
@@ -148,10 +147,9 @@ static s32 tps6507x_adc_standby(struct tps6507x_ts *tsc)
return ret;
}
-static void tps6507x_ts_poll(struct input_polled_dev *poll_dev)
+static void tps6507x_ts_poll(struct input_dev *input_dev)
{
- struct tps6507x_ts *tsc = poll_dev->private;
- struct input_dev *input_dev = poll_dev->input;
+ struct tps6507x_ts *tsc = input_get_drvdata(input_dev);
bool pendown;
s32 ret;
@@ -205,7 +203,6 @@ static int tps6507x_ts_probe(struct platform_device *pdev)
const struct tps6507x_board *tps_board;
const struct touchscreen_init_data *init_data;
struct tps6507x_ts *tsc;
- struct input_polled_dev *poll_dev;
struct input_dev *input_dev;
int error;
@@ -240,23 +237,16 @@ static int tps6507x_ts_probe(struct platform_device *pdev)
snprintf(tsc->phys, sizeof(tsc->phys),
"%s/input0", dev_name(tsc->dev));
- poll_dev = devm_input_allocate_polled_device(&pdev->dev);
- if (!poll_dev) {
+ input_dev = devm_input_allocate_device(&pdev->dev);
+ if (!input_dev) {
dev_err(tsc->dev, "Failed to allocate polled input device.\n");
return -ENOMEM;
}
- tsc->poll_dev = poll_dev;
-
- poll_dev->private = tsc;
- poll_dev->poll = tps6507x_ts_poll;
- poll_dev->poll_interval = init_data ?
- init_data->poll_period : TSC_DEFAULT_POLL_PERIOD;
-
- input_dev = poll_dev->input;
- input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
- input_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
+ tsc->input = input_dev;
+ input_set_drvdata(input_dev, tsc);
+ input_set_capability(input_dev, EV_KEY, BTN_TOUCH);
input_set_abs_params(input_dev, ABS_X, 0, MAX_10BIT, 0, 0);
input_set_abs_params(input_dev, ABS_Y, 0, MAX_10BIT, 0, 0);
input_set_abs_params(input_dev, ABS_PRESSURE, 0, MAX_10BIT, 0, 0);
@@ -275,7 +265,15 @@ static int tps6507x_ts_probe(struct platform_device *pdev)
if (error)
return error;
- error = input_register_polled_device(poll_dev);
+ error = input_setup_polling(input_dev, tps6507x_ts_poll);
+ if (error)
+ return error;
+
+ input_set_poll_interval(input_dev,
+ init_data ? init_data->poll_period :
+ TSC_DEFAULT_POLL_PERIOD);
+
+ error = input_register_device(input_dev);
if (error)
return error;
diff --git a/drivers/input/touchscreen/ts4800-ts.c b/drivers/input/touchscreen/ts4800-ts.c
index 5b4f5362c67b..6cf66aadc10e 100644
--- a/drivers/input/touchscreen/ts4800-ts.c
+++ b/drivers/input/touchscreen/ts4800-ts.c
@@ -10,7 +10,6 @@
#include <linux/bitops.h>
#include <linux/input.h>
-#include <linux/input-polldev.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
@@ -33,7 +32,7 @@
#define Y_OFFSET 0x2
struct ts4800_ts {
- struct input_polled_dev *poll_dev;
+ struct input_dev *input;
struct device *dev;
char phys[32];
@@ -46,22 +45,26 @@ struct ts4800_ts {
int debounce;
};
-static void ts4800_ts_open(struct input_polled_dev *dev)
+static int ts4800_ts_open(struct input_dev *input_dev)
{
- struct ts4800_ts *ts = dev->private;
- int ret;
+ struct ts4800_ts *ts = input_get_drvdata(input_dev);
+ int error;
ts->pendown = false;
ts->debounce = DEBOUNCE_COUNT;
- ret = regmap_update_bits(ts->regmap, ts->reg, ts->bit, ts->bit);
- if (ret)
- dev_warn(ts->dev, "Failed to enable touchscreen\n");
+ error = regmap_update_bits(ts->regmap, ts->reg, ts->bit, ts->bit);
+ if (error) {
+ dev_warn(ts->dev, "Failed to enable touchscreen: %d\n", error);
+ return error;
+ }
+
+ return 0;
}
-static void ts4800_ts_close(struct input_polled_dev *dev)
+static void ts4800_ts_close(struct input_dev *input_dev)
{
- struct ts4800_ts *ts = dev->private;
+ struct ts4800_ts *ts = input_get_drvdata(input_dev);
int ret;
ret = regmap_update_bits(ts->regmap, ts->reg, ts->bit, 0);
@@ -70,10 +73,9 @@ static void ts4800_ts_close(struct input_polled_dev *dev)
}
-static void ts4800_ts_poll(struct input_polled_dev *dev)
+static void ts4800_ts_poll(struct input_dev *input_dev)
{
- struct input_dev *input_dev = dev->input;
- struct ts4800_ts *ts = dev->private;
+ struct ts4800_ts *ts = input_get_drvdata(input_dev);
u16 last_x = readw(ts->base + X_OFFSET);
u16 last_y = readw(ts->base + Y_OFFSET);
bool pendown = last_x & PENDOWN_MASK;
@@ -146,7 +148,7 @@ static int ts4800_parse_dt(struct platform_device *pdev,
static int ts4800_ts_probe(struct platform_device *pdev)
{
- struct input_polled_dev *poll_dev;
+ struct input_dev *input_dev;
struct ts4800_ts *ts;
int error;
@@ -162,32 +164,38 @@ static int ts4800_ts_probe(struct platform_device *pdev)
if (IS_ERR(ts->base))
return PTR_ERR(ts->base);
- poll_dev = devm_input_allocate_polled_device(&pdev->dev);
- if (!poll_dev)
+ input_dev = devm_input_allocate_device(&pdev->dev);
+ if (!input_dev)
return -ENOMEM;
snprintf(ts->phys, sizeof(ts->phys), "%s/input0", dev_name(&pdev->dev));
- ts->poll_dev = poll_dev;
+ ts->input = input_dev;
ts->dev = &pdev->dev;
- poll_dev->private = ts;
- poll_dev->poll_interval = POLL_INTERVAL;
- poll_dev->open = ts4800_ts_open;
- poll_dev->close = ts4800_ts_close;
- poll_dev->poll = ts4800_ts_poll;
+ input_set_drvdata(input_dev, ts);
+
+ input_dev->name = "TS-4800 Touchscreen";
+ input_dev->phys = ts->phys;
+
+ input_dev->open = ts4800_ts_open;
+ input_dev->close = ts4800_ts_close;
+
+ input_set_capability(input_dev, EV_KEY, BTN_TOUCH);
+ input_set_abs_params(input_dev, ABS_X, 0, MAX_12BIT, 0, 0);
+ input_set_abs_params(input_dev, ABS_Y, 0, MAX_12BIT, 0, 0);
- poll_dev->input->name = "TS-4800 Touchscreen";
- poll_dev->input->phys = ts->phys;
+ error = input_setup_polling(input_dev, ts4800_ts_poll);
+ if (error) {
+ dev_err(&pdev->dev, "Unable to set up polling: %d\n", error);
+ return error;
+ }
- input_set_capability(poll_dev->input, EV_KEY, BTN_TOUCH);
- input_set_abs_params(poll_dev->input, ABS_X, 0, MAX_12BIT, 0, 0);
- input_set_abs_params(poll_dev->input, ABS_Y, 0, MAX_12BIT, 0, 0);
+ input_set_poll_interval(input_dev, POLL_INTERVAL);
- error = input_register_polled_device(poll_dev);
+ error = input_register_device(input_dev);
if (error) {
dev_err(&pdev->dev,
- "Unabled to register polled input device (%d)\n",
- error);
+ "Unable to register input device: %d\n", error);
return error;
}
diff --git a/drivers/input/touchscreen/wacom_i2c.c b/drivers/input/touchscreen/wacom_i2c.c
index f017af8c2aa3..1afc6bde2891 100644
--- a/drivers/input/touchscreen/wacom_i2c.c
+++ b/drivers/input/touchscreen/wacom_i2c.c
@@ -12,7 +12,6 @@
#include <linux/slab.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
-#include <linux/gpio.h>
#include <asm/unaligned.h>
#define WACOM_CMD_QUERY0 0x04
diff --git a/drivers/interconnect/qcom/Kconfig b/drivers/interconnect/qcom/Kconfig
index 6ab4012a059a..c49afbea3458 100644
--- a/drivers/interconnect/qcom/Kconfig
+++ b/drivers/interconnect/qcom/Kconfig
@@ -5,6 +5,15 @@ config INTERCONNECT_QCOM
help
Support for Qualcomm's Network-on-Chip interconnect hardware.
+config INTERCONNECT_QCOM_MSM8974
+ tristate "Qualcomm MSM8974 interconnect driver"
+ depends on INTERCONNECT_QCOM
+ depends on QCOM_SMD_RPM
+ select INTERCONNECT_QCOM_SMD_RPM
+ help
+ This is a driver for the Qualcomm Network-on-Chip on msm8974-based
+ platforms.
+
config INTERCONNECT_QCOM_QCS404
tristate "Qualcomm QCS404 interconnect driver"
depends on INTERCONNECT_QCOM
diff --git a/drivers/interconnect/qcom/Makefile b/drivers/interconnect/qcom/Makefile
index 67dafb783dec..9adf9e380545 100644
--- a/drivers/interconnect/qcom/Makefile
+++ b/drivers/interconnect/qcom/Makefile
@@ -1,9 +1,11 @@
# SPDX-License-Identifier: GPL-2.0
+qnoc-msm8974-objs := msm8974.o
qnoc-qcs404-objs := qcs404.o
qnoc-sdm845-objs := sdm845.o
icc-smd-rpm-objs := smd-rpm.o
+obj-$(CONFIG_INTERCONNECT_QCOM_MSM8974) += qnoc-msm8974.o
obj-$(CONFIG_INTERCONNECT_QCOM_QCS404) += qnoc-qcs404.o
obj-$(CONFIG_INTERCONNECT_QCOM_SDM845) += qnoc-sdm845.o
obj-$(CONFIG_INTERCONNECT_QCOM_SMD_RPM) += icc-smd-rpm.o
diff --git a/drivers/interconnect/qcom/msm8974.c b/drivers/interconnect/qcom/msm8974.c
new file mode 100644
index 000000000000..ce599a0c83d9
--- /dev/null
+++ b/drivers/interconnect/qcom/msm8974.c
@@ -0,0 +1,784 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 Brian Masney <masneyb@onstation.org>
+ *
+ * Based on MSM bus code from downstream MSM kernel sources.
+ * Copyright (c) 2012-2013 The Linux Foundation. All rights reserved.
+ *
+ * Based on qcs404.c
+ * Copyright (C) 2019 Linaro Ltd
+ *
+ * Here's a rough representation that shows the various buses that form the
+ * Network On Chip (NOC) for the msm8974:
+ *
+ * Multimedia Subsystem (MMSS)
+ * |----------+-----------------------------------+-----------|
+ * | |
+ * | |
+ * Config | Bus Interface | Memory Controller
+ * |------------+-+-----------| |------------+-+-----------|
+ * | |
+ * | |
+ * | System |
+ * |--------------+-+---------------------------------+-+-------------|
+ * | |
+ * | |
+ * Peripheral | On Chip | Memory (OCMEM)
+ * |------------+-------------| |------------+-------------|
+ */
+
+#include <dt-bindings/interconnect/qcom,msm8974.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/interconnect-provider.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "smd-rpm.h"
+
+enum {
+ MSM8974_BIMC_MAS_AMPSS_M0 = 1,
+ MSM8974_BIMC_MAS_AMPSS_M1,
+ MSM8974_BIMC_MAS_MSS_PROC,
+ MSM8974_BIMC_TO_MNOC,
+ MSM8974_BIMC_TO_SNOC,
+ MSM8974_BIMC_SLV_EBI_CH0,
+ MSM8974_BIMC_SLV_AMPSS_L2,
+ MSM8974_CNOC_MAS_RPM_INST,
+ MSM8974_CNOC_MAS_RPM_DATA,
+ MSM8974_CNOC_MAS_RPM_SYS,
+ MSM8974_CNOC_MAS_DEHR,
+ MSM8974_CNOC_MAS_QDSS_DAP,
+ MSM8974_CNOC_MAS_SPDM,
+ MSM8974_CNOC_MAS_TIC,
+ MSM8974_CNOC_SLV_CLK_CTL,
+ MSM8974_CNOC_SLV_CNOC_MSS,
+ MSM8974_CNOC_SLV_SECURITY,
+ MSM8974_CNOC_SLV_TCSR,
+ MSM8974_CNOC_SLV_TLMM,
+ MSM8974_CNOC_SLV_CRYPTO_0_CFG,
+ MSM8974_CNOC_SLV_CRYPTO_1_CFG,
+ MSM8974_CNOC_SLV_IMEM_CFG,
+ MSM8974_CNOC_SLV_MESSAGE_RAM,
+ MSM8974_CNOC_SLV_BIMC_CFG,
+ MSM8974_CNOC_SLV_BOOT_ROM,
+ MSM8974_CNOC_SLV_PMIC_ARB,
+ MSM8974_CNOC_SLV_SPDM_WRAPPER,
+ MSM8974_CNOC_SLV_DEHR_CFG,
+ MSM8974_CNOC_SLV_MPM,
+ MSM8974_CNOC_SLV_QDSS_CFG,
+ MSM8974_CNOC_SLV_RBCPR_CFG,
+ MSM8974_CNOC_SLV_RBCPR_QDSS_APU_CFG,
+ MSM8974_CNOC_TO_SNOC,
+ MSM8974_CNOC_SLV_CNOC_ONOC_CFG,
+ MSM8974_CNOC_SLV_CNOC_MNOC_MMSS_CFG,
+ MSM8974_CNOC_SLV_CNOC_MNOC_CFG,
+ MSM8974_CNOC_SLV_PNOC_CFG,
+ MSM8974_CNOC_SLV_SNOC_MPU_CFG,
+ MSM8974_CNOC_SLV_SNOC_CFG,
+ MSM8974_CNOC_SLV_EBI1_DLL_CFG,
+ MSM8974_CNOC_SLV_PHY_APU_CFG,
+ MSM8974_CNOC_SLV_EBI1_PHY_CFG,
+ MSM8974_CNOC_SLV_RPM,
+ MSM8974_CNOC_SLV_SERVICE_CNOC,
+ MSM8974_MNOC_MAS_GRAPHICS_3D,
+ MSM8974_MNOC_MAS_JPEG,
+ MSM8974_MNOC_MAS_MDP_PORT0,
+ MSM8974_MNOC_MAS_VIDEO_P0,
+ MSM8974_MNOC_MAS_VIDEO_P1,
+ MSM8974_MNOC_MAS_VFE,
+ MSM8974_MNOC_TO_CNOC,
+ MSM8974_MNOC_TO_BIMC,
+ MSM8974_MNOC_SLV_CAMERA_CFG,
+ MSM8974_MNOC_SLV_DISPLAY_CFG,
+ MSM8974_MNOC_SLV_OCMEM_CFG,
+ MSM8974_MNOC_SLV_CPR_CFG,
+ MSM8974_MNOC_SLV_CPR_XPU_CFG,
+ MSM8974_MNOC_SLV_MISC_CFG,
+ MSM8974_MNOC_SLV_MISC_XPU_CFG,
+ MSM8974_MNOC_SLV_VENUS_CFG,
+ MSM8974_MNOC_SLV_GRAPHICS_3D_CFG,
+ MSM8974_MNOC_SLV_MMSS_CLK_CFG,
+ MSM8974_MNOC_SLV_MMSS_CLK_XPU_CFG,
+ MSM8974_MNOC_SLV_MNOC_MPU_CFG,
+ MSM8974_MNOC_SLV_ONOC_MPU_CFG,
+ MSM8974_MNOC_SLV_SERVICE_MNOC,
+ MSM8974_OCMEM_NOC_TO_OCMEM_VNOC,
+ MSM8974_OCMEM_MAS_JPEG_OCMEM,
+ MSM8974_OCMEM_MAS_MDP_OCMEM,
+ MSM8974_OCMEM_MAS_VIDEO_P0_OCMEM,
+ MSM8974_OCMEM_MAS_VIDEO_P1_OCMEM,
+ MSM8974_OCMEM_MAS_VFE_OCMEM,
+ MSM8974_OCMEM_MAS_CNOC_ONOC_CFG,
+ MSM8974_OCMEM_SLV_SERVICE_ONOC,
+ MSM8974_OCMEM_VNOC_TO_SNOC,
+ MSM8974_OCMEM_VNOC_TO_OCMEM_NOC,
+ MSM8974_OCMEM_VNOC_MAS_GFX3D,
+ MSM8974_OCMEM_SLV_OCMEM,
+ MSM8974_PNOC_MAS_PNOC_CFG,
+ MSM8974_PNOC_MAS_SDCC_1,
+ MSM8974_PNOC_MAS_SDCC_3,
+ MSM8974_PNOC_MAS_SDCC_4,
+ MSM8974_PNOC_MAS_SDCC_2,
+ MSM8974_PNOC_MAS_TSIF,
+ MSM8974_PNOC_MAS_BAM_DMA,
+ MSM8974_PNOC_MAS_BLSP_2,
+ MSM8974_PNOC_MAS_USB_HSIC,
+ MSM8974_PNOC_MAS_BLSP_1,
+ MSM8974_PNOC_MAS_USB_HS,
+ MSM8974_PNOC_TO_SNOC,
+ MSM8974_PNOC_SLV_SDCC_1,
+ MSM8974_PNOC_SLV_SDCC_3,
+ MSM8974_PNOC_SLV_SDCC_2,
+ MSM8974_PNOC_SLV_SDCC_4,
+ MSM8974_PNOC_SLV_TSIF,
+ MSM8974_PNOC_SLV_BAM_DMA,
+ MSM8974_PNOC_SLV_BLSP_2,
+ MSM8974_PNOC_SLV_USB_HSIC,
+ MSM8974_PNOC_SLV_BLSP_1,
+ MSM8974_PNOC_SLV_USB_HS,
+ MSM8974_PNOC_SLV_PDM,
+ MSM8974_PNOC_SLV_PERIPH_APU_CFG,
+ MSM8974_PNOC_SLV_PNOC_MPU_CFG,
+ MSM8974_PNOC_SLV_PRNG,
+ MSM8974_PNOC_SLV_SERVICE_PNOC,
+ MSM8974_SNOC_MAS_LPASS_AHB,
+ MSM8974_SNOC_MAS_QDSS_BAM,
+ MSM8974_SNOC_MAS_SNOC_CFG,
+ MSM8974_SNOC_TO_BIMC,
+ MSM8974_SNOC_TO_CNOC,
+ MSM8974_SNOC_TO_PNOC,
+ MSM8974_SNOC_TO_OCMEM_VNOC,
+ MSM8974_SNOC_MAS_CRYPTO_CORE0,
+ MSM8974_SNOC_MAS_CRYPTO_CORE1,
+ MSM8974_SNOC_MAS_LPASS_PROC,
+ MSM8974_SNOC_MAS_MSS,
+ MSM8974_SNOC_MAS_MSS_NAV,
+ MSM8974_SNOC_MAS_OCMEM_DMA,
+ MSM8974_SNOC_MAS_WCSS,
+ MSM8974_SNOC_MAS_QDSS_ETR,
+ MSM8974_SNOC_MAS_USB3,
+ MSM8974_SNOC_SLV_AMPSS,
+ MSM8974_SNOC_SLV_LPASS,
+ MSM8974_SNOC_SLV_USB3,
+ MSM8974_SNOC_SLV_WCSS,
+ MSM8974_SNOC_SLV_OCIMEM,
+ MSM8974_SNOC_SLV_SNOC_OCMEM,
+ MSM8974_SNOC_SLV_SERVICE_SNOC,
+ MSM8974_SNOC_SLV_QDSS_STM,
+};
+
+#define RPM_BUS_MASTER_REQ 0x73616d62
+#define RPM_BUS_SLAVE_REQ 0x766c7362
+
+#define to_msm8974_icc_provider(_provider) \
+ container_of(_provider, struct msm8974_icc_provider, provider)
+
+static const struct clk_bulk_data msm8974_icc_bus_clocks[] = {
+ { .id = "bus" },
+ { .id = "bus_a" },
+};
+
+/**
+ * struct msm8974_icc_provider - Qualcomm specific interconnect provider
+ * @provider: generic interconnect provider
+ * @bus_clks: the clk_bulk_data table of bus clocks
+ * @num_clks: the total number of clk_bulk_data entries
+ */
+struct msm8974_icc_provider {
+ struct icc_provider provider;
+ struct clk_bulk_data *bus_clks;
+ int num_clks;
+};
+
+#define MSM8974_ICC_MAX_LINKS 3
+
+/**
+ * struct msm8974_icc_node - Qualcomm specific interconnect nodes
+ * @name: the node name used in debugfs
+ * @id: a unique node identifier
+ * @links: an array of nodes where we can go next while traversing
+ * @num_links: the total number of @links
+ * @buswidth: width of the interconnect between a node and the bus (bytes)
+ * @mas_rpm_id: RPM ID for devices that are bus masters
+ * @slv_rpm_id: RPM ID for devices that are bus slaves
+ * @rate: current bus clock rate in Hz
+ */
+struct msm8974_icc_node {
+ unsigned char *name;
+ u16 id;
+ u16 links[MSM8974_ICC_MAX_LINKS];
+ u16 num_links;
+ u16 buswidth;
+ int mas_rpm_id;
+ int slv_rpm_id;
+ u64 rate;
+};
+
+struct msm8974_icc_desc {
+ struct msm8974_icc_node **nodes;
+ size_t num_nodes;
+};
+
+#define DEFINE_QNODE(_name, _id, _buswidth, _mas_rpm_id, _slv_rpm_id, \
+ ...) \
+ static struct msm8974_icc_node _name = { \
+ .name = #_name, \
+ .id = _id, \
+ .buswidth = _buswidth, \
+ .mas_rpm_id = _mas_rpm_id, \
+ .slv_rpm_id = _slv_rpm_id, \
+ .num_links = ARRAY_SIZE(((int[]){ __VA_ARGS__ })), \
+ .links = { __VA_ARGS__ }, \
+ }
+
+DEFINE_QNODE(mas_ampss_m0, MSM8974_BIMC_MAS_AMPSS_M0, 8, 0, -1);
+DEFINE_QNODE(mas_ampss_m1, MSM8974_BIMC_MAS_AMPSS_M1, 8, 0, -1);
+DEFINE_QNODE(mas_mss_proc, MSM8974_BIMC_MAS_MSS_PROC, 8, 1, -1);
+DEFINE_QNODE(bimc_to_mnoc, MSM8974_BIMC_TO_MNOC, 8, 2, -1, MSM8974_BIMC_SLV_EBI_CH0);
+DEFINE_QNODE(bimc_to_snoc, MSM8974_BIMC_TO_SNOC, 8, 3, 2, MSM8974_SNOC_TO_BIMC, MSM8974_BIMC_SLV_EBI_CH0, MSM8974_BIMC_MAS_AMPSS_M0);
+DEFINE_QNODE(slv_ebi_ch0, MSM8974_BIMC_SLV_EBI_CH0, 8, -1, 0);
+DEFINE_QNODE(slv_ampss_l2, MSM8974_BIMC_SLV_AMPSS_L2, 8, -1, 1);
+
+static struct msm8974_icc_node *msm8974_bimc_nodes[] = {
+ [BIMC_MAS_AMPSS_M0] = &mas_ampss_m0,
+ [BIMC_MAS_AMPSS_M1] = &mas_ampss_m1,
+ [BIMC_MAS_MSS_PROC] = &mas_mss_proc,
+ [BIMC_TO_MNOC] = &bimc_to_mnoc,
+ [BIMC_TO_SNOC] = &bimc_to_snoc,
+ [BIMC_SLV_EBI_CH0] = &slv_ebi_ch0,
+ [BIMC_SLV_AMPSS_L2] = &slv_ampss_l2,
+};
+
+static struct msm8974_icc_desc msm8974_bimc = {
+ .nodes = msm8974_bimc_nodes,
+ .num_nodes = ARRAY_SIZE(msm8974_bimc_nodes),
+};
+
+DEFINE_QNODE(mas_rpm_inst, MSM8974_CNOC_MAS_RPM_INST, 8, 45, -1);
+DEFINE_QNODE(mas_rpm_data, MSM8974_CNOC_MAS_RPM_DATA, 8, 46, -1);
+DEFINE_QNODE(mas_rpm_sys, MSM8974_CNOC_MAS_RPM_SYS, 8, 47, -1);
+DEFINE_QNODE(mas_dehr, MSM8974_CNOC_MAS_DEHR, 8, 48, -1);
+DEFINE_QNODE(mas_qdss_dap, MSM8974_CNOC_MAS_QDSS_DAP, 8, 49, -1);
+DEFINE_QNODE(mas_spdm, MSM8974_CNOC_MAS_SPDM, 8, 50, -1);
+DEFINE_QNODE(mas_tic, MSM8974_CNOC_MAS_TIC, 8, 51, -1);
+DEFINE_QNODE(slv_clk_ctl, MSM8974_CNOC_SLV_CLK_CTL, 8, -1, 47);
+DEFINE_QNODE(slv_cnoc_mss, MSM8974_CNOC_SLV_CNOC_MSS, 8, -1, 48);
+DEFINE_QNODE(slv_security, MSM8974_CNOC_SLV_SECURITY, 8, -1, 49);
+DEFINE_QNODE(slv_tcsr, MSM8974_CNOC_SLV_TCSR, 8, -1, 50);
+DEFINE_QNODE(slv_tlmm, MSM8974_CNOC_SLV_TLMM, 8, -1, 51);
+DEFINE_QNODE(slv_crypto_0_cfg, MSM8974_CNOC_SLV_CRYPTO_0_CFG, 8, -1, 52);
+DEFINE_QNODE(slv_crypto_1_cfg, MSM8974_CNOC_SLV_CRYPTO_1_CFG, 8, -1, 53);
+DEFINE_QNODE(slv_imem_cfg, MSM8974_CNOC_SLV_IMEM_CFG, 8, -1, 54);
+DEFINE_QNODE(slv_message_ram, MSM8974_CNOC_SLV_MESSAGE_RAM, 8, -1, 55);
+DEFINE_QNODE(slv_bimc_cfg, MSM8974_CNOC_SLV_BIMC_CFG, 8, -1, 56);
+DEFINE_QNODE(slv_boot_rom, MSM8974_CNOC_SLV_BOOT_ROM, 8, -1, 57);
+DEFINE_QNODE(slv_pmic_arb, MSM8974_CNOC_SLV_PMIC_ARB, 8, -1, 59);
+DEFINE_QNODE(slv_spdm_wrapper, MSM8974_CNOC_SLV_SPDM_WRAPPER, 8, -1, 60);
+DEFINE_QNODE(slv_dehr_cfg, MSM8974_CNOC_SLV_DEHR_CFG, 8, -1, 61);
+DEFINE_QNODE(slv_mpm, MSM8974_CNOC_SLV_MPM, 8, -1, 62);
+DEFINE_QNODE(slv_qdss_cfg, MSM8974_CNOC_SLV_QDSS_CFG, 8, -1, 63);
+DEFINE_QNODE(slv_rbcpr_cfg, MSM8974_CNOC_SLV_RBCPR_CFG, 8, -1, 64);
+DEFINE_QNODE(slv_rbcpr_qdss_apu_cfg, MSM8974_CNOC_SLV_RBCPR_QDSS_APU_CFG, 8, -1, 65);
+DEFINE_QNODE(cnoc_to_snoc, MSM8974_CNOC_TO_SNOC, 8, 52, 75);
+DEFINE_QNODE(slv_cnoc_onoc_cfg, MSM8974_CNOC_SLV_CNOC_ONOC_CFG, 8, -1, 68);
+DEFINE_QNODE(slv_cnoc_mnoc_mmss_cfg, MSM8974_CNOC_SLV_CNOC_MNOC_MMSS_CFG, 8, -1, 58);
+DEFINE_QNODE(slv_cnoc_mnoc_cfg, MSM8974_CNOC_SLV_CNOC_MNOC_CFG, 8, -1, 66);
+DEFINE_QNODE(slv_pnoc_cfg, MSM8974_CNOC_SLV_PNOC_CFG, 8, -1, 69);
+DEFINE_QNODE(slv_snoc_mpu_cfg, MSM8974_CNOC_SLV_SNOC_MPU_CFG, 8, -1, 67);
+DEFINE_QNODE(slv_snoc_cfg, MSM8974_CNOC_SLV_SNOC_CFG, 8, -1, 70);
+DEFINE_QNODE(slv_ebi1_dll_cfg, MSM8974_CNOC_SLV_EBI1_DLL_CFG, 8, -1, 71);
+DEFINE_QNODE(slv_phy_apu_cfg, MSM8974_CNOC_SLV_PHY_APU_CFG, 8, -1, 72);
+DEFINE_QNODE(slv_ebi1_phy_cfg, MSM8974_CNOC_SLV_EBI1_PHY_CFG, 8, -1, 73);
+DEFINE_QNODE(slv_rpm, MSM8974_CNOC_SLV_RPM, 8, -1, 74);
+DEFINE_QNODE(slv_service_cnoc, MSM8974_CNOC_SLV_SERVICE_CNOC, 8, -1, 76);
+
+static struct msm8974_icc_node *msm8974_cnoc_nodes[] = {
+ [CNOC_MAS_RPM_INST] = &mas_rpm_inst,
+ [CNOC_MAS_RPM_DATA] = &mas_rpm_data,
+ [CNOC_MAS_RPM_SYS] = &mas_rpm_sys,
+ [CNOC_MAS_DEHR] = &mas_dehr,
+ [CNOC_MAS_QDSS_DAP] = &mas_qdss_dap,
+ [CNOC_MAS_SPDM] = &mas_spdm,
+ [CNOC_MAS_TIC] = &mas_tic,
+ [CNOC_SLV_CLK_CTL] = &slv_clk_ctl,
+ [CNOC_SLV_CNOC_MSS] = &slv_cnoc_mss,
+ [CNOC_SLV_SECURITY] = &slv_security,
+ [CNOC_SLV_TCSR] = &slv_tcsr,
+ [CNOC_SLV_TLMM] = &slv_tlmm,
+ [CNOC_SLV_CRYPTO_0_CFG] = &slv_crypto_0_cfg,
+ [CNOC_SLV_CRYPTO_1_CFG] = &slv_crypto_1_cfg,
+ [CNOC_SLV_IMEM_CFG] = &slv_imem_cfg,
+ [CNOC_SLV_MESSAGE_RAM] = &slv_message_ram,
+ [CNOC_SLV_BIMC_CFG] = &slv_bimc_cfg,
+ [CNOC_SLV_BOOT_ROM] = &slv_boot_rom,
+ [CNOC_SLV_PMIC_ARB] = &slv_pmic_arb,
+ [CNOC_SLV_SPDM_WRAPPER] = &slv_spdm_wrapper,
+ [CNOC_SLV_DEHR_CFG] = &slv_dehr_cfg,
+ [CNOC_SLV_MPM] = &slv_mpm,
+ [CNOC_SLV_QDSS_CFG] = &slv_qdss_cfg,
+ [CNOC_SLV_RBCPR_CFG] = &slv_rbcpr_cfg,
+ [CNOC_SLV_RBCPR_QDSS_APU_CFG] = &slv_rbcpr_qdss_apu_cfg,
+ [CNOC_TO_SNOC] = &cnoc_to_snoc,
+ [CNOC_SLV_CNOC_ONOC_CFG] = &slv_cnoc_onoc_cfg,
+ [CNOC_SLV_CNOC_MNOC_MMSS_CFG] = &slv_cnoc_mnoc_mmss_cfg,
+ [CNOC_SLV_CNOC_MNOC_CFG] = &slv_cnoc_mnoc_cfg,
+ [CNOC_SLV_PNOC_CFG] = &slv_pnoc_cfg,
+ [CNOC_SLV_SNOC_MPU_CFG] = &slv_snoc_mpu_cfg,
+ [CNOC_SLV_SNOC_CFG] = &slv_snoc_cfg,
+ [CNOC_SLV_EBI1_DLL_CFG] = &slv_ebi1_dll_cfg,
+ [CNOC_SLV_PHY_APU_CFG] = &slv_phy_apu_cfg,
+ [CNOC_SLV_EBI1_PHY_CFG] = &slv_ebi1_phy_cfg,
+ [CNOC_SLV_RPM] = &slv_rpm,
+ [CNOC_SLV_SERVICE_CNOC] = &slv_service_cnoc,
+};
+
+static struct msm8974_icc_desc msm8974_cnoc = {
+ .nodes = msm8974_cnoc_nodes,
+ .num_nodes = ARRAY_SIZE(msm8974_cnoc_nodes),
+};
+
+DEFINE_QNODE(mas_graphics_3d, MSM8974_MNOC_MAS_GRAPHICS_3D, 16, 6, -1, MSM8974_MNOC_TO_BIMC);
+DEFINE_QNODE(mas_jpeg, MSM8974_MNOC_MAS_JPEG, 16, 7, -1, MSM8974_MNOC_TO_BIMC);
+DEFINE_QNODE(mas_mdp_port0, MSM8974_MNOC_MAS_MDP_PORT0, 16, 8, -1, MSM8974_MNOC_TO_BIMC);
+DEFINE_QNODE(mas_video_p0, MSM8974_MNOC_MAS_VIDEO_P0, 16, 9, -1);
+DEFINE_QNODE(mas_video_p1, MSM8974_MNOC_MAS_VIDEO_P1, 16, 10, -1);
+DEFINE_QNODE(mas_vfe, MSM8974_MNOC_MAS_VFE, 16, 11, -1, MSM8974_MNOC_TO_BIMC);
+DEFINE_QNODE(mnoc_to_cnoc, MSM8974_MNOC_TO_CNOC, 16, 4, -1);
+DEFINE_QNODE(mnoc_to_bimc, MSM8974_MNOC_TO_BIMC, 16, -1, 16, MSM8974_BIMC_TO_MNOC);
+DEFINE_QNODE(slv_camera_cfg, MSM8974_MNOC_SLV_CAMERA_CFG, 16, -1, 3);
+DEFINE_QNODE(slv_display_cfg, MSM8974_MNOC_SLV_DISPLAY_CFG, 16, -1, 4);
+DEFINE_QNODE(slv_ocmem_cfg, MSM8974_MNOC_SLV_OCMEM_CFG, 16, -1, 5);
+DEFINE_QNODE(slv_cpr_cfg, MSM8974_MNOC_SLV_CPR_CFG, 16, -1, 6);
+DEFINE_QNODE(slv_cpr_xpu_cfg, MSM8974_MNOC_SLV_CPR_XPU_CFG, 16, -1, 7);
+DEFINE_QNODE(slv_misc_cfg, MSM8974_MNOC_SLV_MISC_CFG, 16, -1, 8);
+DEFINE_QNODE(slv_misc_xpu_cfg, MSM8974_MNOC_SLV_MISC_XPU_CFG, 16, -1, 9);
+DEFINE_QNODE(slv_venus_cfg, MSM8974_MNOC_SLV_VENUS_CFG, 16, -1, 10);
+DEFINE_QNODE(slv_graphics_3d_cfg, MSM8974_MNOC_SLV_GRAPHICS_3D_CFG, 16, -1, 11);
+DEFINE_QNODE(slv_mmss_clk_cfg, MSM8974_MNOC_SLV_MMSS_CLK_CFG, 16, -1, 12);
+DEFINE_QNODE(slv_mmss_clk_xpu_cfg, MSM8974_MNOC_SLV_MMSS_CLK_XPU_CFG, 16, -1, 13);
+DEFINE_QNODE(slv_mnoc_mpu_cfg, MSM8974_MNOC_SLV_MNOC_MPU_CFG, 16, -1, 14);
+DEFINE_QNODE(slv_onoc_mpu_cfg, MSM8974_MNOC_SLV_ONOC_MPU_CFG, 16, -1, 15);
+DEFINE_QNODE(slv_service_mnoc, MSM8974_MNOC_SLV_SERVICE_MNOC, 16, -1, 17);
+
+static struct msm8974_icc_node *msm8974_mnoc_nodes[] = {
+ [MNOC_MAS_GRAPHICS_3D] = &mas_graphics_3d,
+ [MNOC_MAS_JPEG] = &mas_jpeg,
+ [MNOC_MAS_MDP_PORT0] = &mas_mdp_port0,
+ [MNOC_MAS_VIDEO_P0] = &mas_video_p0,
+ [MNOC_MAS_VIDEO_P1] = &mas_video_p1,
+ [MNOC_MAS_VFE] = &mas_vfe,
+ [MNOC_TO_CNOC] = &mnoc_to_cnoc,
+ [MNOC_TO_BIMC] = &mnoc_to_bimc,
+ [MNOC_SLV_CAMERA_CFG] = &slv_camera_cfg,
+ [MNOC_SLV_DISPLAY_CFG] = &slv_display_cfg,
+ [MNOC_SLV_OCMEM_CFG] = &slv_ocmem_cfg,
+ [MNOC_SLV_CPR_CFG] = &slv_cpr_cfg,
+ [MNOC_SLV_CPR_XPU_CFG] = &slv_cpr_xpu_cfg,
+ [MNOC_SLV_MISC_CFG] = &slv_misc_cfg,
+ [MNOC_SLV_MISC_XPU_CFG] = &slv_misc_xpu_cfg,
+ [MNOC_SLV_VENUS_CFG] = &slv_venus_cfg,
+ [MNOC_SLV_GRAPHICS_3D_CFG] = &slv_graphics_3d_cfg,
+ [MNOC_SLV_MMSS_CLK_CFG] = &slv_mmss_clk_cfg,
+ [MNOC_SLV_MMSS_CLK_XPU_CFG] = &slv_mmss_clk_xpu_cfg,
+ [MNOC_SLV_MNOC_MPU_CFG] = &slv_mnoc_mpu_cfg,
+ [MNOC_SLV_ONOC_MPU_CFG] = &slv_onoc_mpu_cfg,
+ [MNOC_SLV_SERVICE_MNOC] = &slv_service_mnoc,
+};
+
+static struct msm8974_icc_desc msm8974_mnoc = {
+ .nodes = msm8974_mnoc_nodes,
+ .num_nodes = ARRAY_SIZE(msm8974_mnoc_nodes),
+};
+
+DEFINE_QNODE(ocmem_noc_to_ocmem_vnoc, MSM8974_OCMEM_NOC_TO_OCMEM_VNOC, 16, 54, 78, MSM8974_OCMEM_SLV_OCMEM);
+DEFINE_QNODE(mas_jpeg_ocmem, MSM8974_OCMEM_MAS_JPEG_OCMEM, 16, 13, -1);
+DEFINE_QNODE(mas_mdp_ocmem, MSM8974_OCMEM_MAS_MDP_OCMEM, 16, 14, -1);
+DEFINE_QNODE(mas_video_p0_ocmem, MSM8974_OCMEM_MAS_VIDEO_P0_OCMEM, 16, 15, -1);
+DEFINE_QNODE(mas_video_p1_ocmem, MSM8974_OCMEM_MAS_VIDEO_P1_OCMEM, 16, 16, -1);
+DEFINE_QNODE(mas_vfe_ocmem, MSM8974_OCMEM_MAS_VFE_OCMEM, 16, 17, -1);
+DEFINE_QNODE(mas_cnoc_onoc_cfg, MSM8974_OCMEM_MAS_CNOC_ONOC_CFG, 16, 12, -1);
+DEFINE_QNODE(slv_service_onoc, MSM8974_OCMEM_SLV_SERVICE_ONOC, 16, -1, 19);
+DEFINE_QNODE(slv_ocmem, MSM8974_OCMEM_SLV_OCMEM, 16, -1, 18);
+
+/* Virtual NoC is needed for connection to OCMEM */
+DEFINE_QNODE(ocmem_vnoc_to_onoc, MSM8974_OCMEM_VNOC_TO_OCMEM_NOC, 16, 56, 79, MSM8974_OCMEM_NOC_TO_OCMEM_VNOC);
+DEFINE_QNODE(ocmem_vnoc_to_snoc, MSM8974_OCMEM_VNOC_TO_SNOC, 8, 57, 80);
+DEFINE_QNODE(mas_v_ocmem_gfx3d, MSM8974_OCMEM_VNOC_MAS_GFX3D, 8, 55, -1, MSM8974_OCMEM_VNOC_TO_OCMEM_NOC);
+
+static struct msm8974_icc_node *msm8974_onoc_nodes[] = {
+ [OCMEM_NOC_TO_OCMEM_VNOC] = &ocmem_noc_to_ocmem_vnoc,
+ [OCMEM_MAS_JPEG_OCMEM] = &mas_jpeg_ocmem,
+ [OCMEM_MAS_MDP_OCMEM] = &mas_mdp_ocmem,
+ [OCMEM_MAS_VIDEO_P0_OCMEM] = &mas_video_p0_ocmem,
+ [OCMEM_MAS_VIDEO_P1_OCMEM] = &mas_video_p1_ocmem,
+ [OCMEM_MAS_VFE_OCMEM] = &mas_vfe_ocmem,
+ [OCMEM_MAS_CNOC_ONOC_CFG] = &mas_cnoc_onoc_cfg,
+ [OCMEM_SLV_SERVICE_ONOC] = &slv_service_onoc,
+ [OCMEM_VNOC_TO_SNOC] = &ocmem_vnoc_to_snoc,
+ [OCMEM_VNOC_TO_OCMEM_NOC] = &ocmem_vnoc_to_onoc,
+ [OCMEM_VNOC_MAS_GFX3D] = &mas_v_ocmem_gfx3d,
+ [OCMEM_SLV_OCMEM] = &slv_ocmem,
+};
+
+static struct msm8974_icc_desc msm8974_onoc = {
+ .nodes = msm8974_onoc_nodes,
+ .num_nodes = ARRAY_SIZE(msm8974_onoc_nodes),
+};
+
+DEFINE_QNODE(mas_pnoc_cfg, MSM8974_PNOC_MAS_PNOC_CFG, 8, 43, -1);
+DEFINE_QNODE(mas_sdcc_1, MSM8974_PNOC_MAS_SDCC_1, 8, 33, -1, MSM8974_PNOC_TO_SNOC);
+DEFINE_QNODE(mas_sdcc_3, MSM8974_PNOC_MAS_SDCC_3, 8, 34, -1, MSM8974_PNOC_TO_SNOC);
+DEFINE_QNODE(mas_sdcc_4, MSM8974_PNOC_MAS_SDCC_4, 8, 36, -1, MSM8974_PNOC_TO_SNOC);
+DEFINE_QNODE(mas_sdcc_2, MSM8974_PNOC_MAS_SDCC_2, 8, 35, -1, MSM8974_PNOC_TO_SNOC);
+DEFINE_QNODE(mas_tsif, MSM8974_PNOC_MAS_TSIF, 8, 37, -1, MSM8974_PNOC_TO_SNOC);
+DEFINE_QNODE(mas_bam_dma, MSM8974_PNOC_MAS_BAM_DMA, 8, 38, -1);
+DEFINE_QNODE(mas_blsp_2, MSM8974_PNOC_MAS_BLSP_2, 8, 39, -1, MSM8974_PNOC_TO_SNOC);
+DEFINE_QNODE(mas_usb_hsic, MSM8974_PNOC_MAS_USB_HSIC, 8, 40, -1, MSM8974_PNOC_TO_SNOC);
+DEFINE_QNODE(mas_blsp_1, MSM8974_PNOC_MAS_BLSP_1, 8, 41, -1, MSM8974_PNOC_TO_SNOC);
+DEFINE_QNODE(mas_usb_hs, MSM8974_PNOC_MAS_USB_HS, 8, 42, -1, MSM8974_PNOC_TO_SNOC);
+DEFINE_QNODE(pnoc_to_snoc, MSM8974_PNOC_TO_SNOC, 8, 44, 45, MSM8974_SNOC_TO_PNOC, MSM8974_PNOC_SLV_PRNG);
+DEFINE_QNODE(slv_sdcc_1, MSM8974_PNOC_SLV_SDCC_1, 8, -1, 31);
+DEFINE_QNODE(slv_sdcc_3, MSM8974_PNOC_SLV_SDCC_3, 8, -1, 32);
+DEFINE_QNODE(slv_sdcc_2, MSM8974_PNOC_SLV_SDCC_2, 8, -1, 33);
+DEFINE_QNODE(slv_sdcc_4, MSM8974_PNOC_SLV_SDCC_4, 8, -1, 34);
+DEFINE_QNODE(slv_tsif, MSM8974_PNOC_SLV_TSIF, 8, -1, 35);
+DEFINE_QNODE(slv_bam_dma, MSM8974_PNOC_SLV_BAM_DMA, 8, -1, 36);
+DEFINE_QNODE(slv_blsp_2, MSM8974_PNOC_SLV_BLSP_2, 8, -1, 37);
+DEFINE_QNODE(slv_usb_hsic, MSM8974_PNOC_SLV_USB_HSIC, 8, -1, 38);
+DEFINE_QNODE(slv_blsp_1, MSM8974_PNOC_SLV_BLSP_1, 8, -1, 39);
+DEFINE_QNODE(slv_usb_hs, MSM8974_PNOC_SLV_USB_HS, 8, -1, 40);
+DEFINE_QNODE(slv_pdm, MSM8974_PNOC_SLV_PDM, 8, -1, 41);
+DEFINE_QNODE(slv_periph_apu_cfg, MSM8974_PNOC_SLV_PERIPH_APU_CFG, 8, -1, 42);
+DEFINE_QNODE(slv_pnoc_mpu_cfg, MSM8974_PNOC_SLV_PNOC_MPU_CFG, 8, -1, 43);
+DEFINE_QNODE(slv_prng, MSM8974_PNOC_SLV_PRNG, 8, -1, 44, MSM8974_PNOC_TO_SNOC);
+DEFINE_QNODE(slv_service_pnoc, MSM8974_PNOC_SLV_SERVICE_PNOC, 8, -1, 46);
+
+static struct msm8974_icc_node *msm8974_pnoc_nodes[] = {
+ [PNOC_MAS_PNOC_CFG] = &mas_pnoc_cfg,
+ [PNOC_MAS_SDCC_1] = &mas_sdcc_1,
+ [PNOC_MAS_SDCC_3] = &mas_sdcc_3,
+ [PNOC_MAS_SDCC_4] = &mas_sdcc_4,
+ [PNOC_MAS_SDCC_2] = &mas_sdcc_2,
+ [PNOC_MAS_TSIF] = &mas_tsif,
+ [PNOC_MAS_BAM_DMA] = &mas_bam_dma,
+ [PNOC_MAS_BLSP_2] = &mas_blsp_2,
+ [PNOC_MAS_USB_HSIC] = &mas_usb_hsic,
+ [PNOC_MAS_BLSP_1] = &mas_blsp_1,
+ [PNOC_MAS_USB_HS] = &mas_usb_hs,
+ [PNOC_TO_SNOC] = &pnoc_to_snoc,
+ [PNOC_SLV_SDCC_1] = &slv_sdcc_1,
+ [PNOC_SLV_SDCC_3] = &slv_sdcc_3,
+ [PNOC_SLV_SDCC_2] = &slv_sdcc_2,
+ [PNOC_SLV_SDCC_4] = &slv_sdcc_4,
+ [PNOC_SLV_TSIF] = &slv_tsif,
+ [PNOC_SLV_BAM_DMA] = &slv_bam_dma,
+ [PNOC_SLV_BLSP_2] = &slv_blsp_2,
+ [PNOC_SLV_USB_HSIC] = &slv_usb_hsic,
+ [PNOC_SLV_BLSP_1] = &slv_blsp_1,
+ [PNOC_SLV_USB_HS] = &slv_usb_hs,
+ [PNOC_SLV_PDM] = &slv_pdm,
+ [PNOC_SLV_PERIPH_APU_CFG] = &slv_periph_apu_cfg,
+ [PNOC_SLV_PNOC_MPU_CFG] = &slv_pnoc_mpu_cfg,
+ [PNOC_SLV_PRNG] = &slv_prng,
+ [PNOC_SLV_SERVICE_PNOC] = &slv_service_pnoc,
+};
+
+static struct msm8974_icc_desc msm8974_pnoc = {
+ .nodes = msm8974_pnoc_nodes,
+ .num_nodes = ARRAY_SIZE(msm8974_pnoc_nodes),
+};
+
+DEFINE_QNODE(mas_lpass_ahb, MSM8974_SNOC_MAS_LPASS_AHB, 8, 18, -1);
+DEFINE_QNODE(mas_qdss_bam, MSM8974_SNOC_MAS_QDSS_BAM, 8, 19, -1);
+DEFINE_QNODE(mas_snoc_cfg, MSM8974_SNOC_MAS_SNOC_CFG, 8, 20, -1);
+DEFINE_QNODE(snoc_to_bimc, MSM8974_SNOC_TO_BIMC, 8, 21, 24, MSM8974_BIMC_TO_SNOC);
+DEFINE_QNODE(snoc_to_cnoc, MSM8974_SNOC_TO_CNOC, 8, 22, 25);
+DEFINE_QNODE(snoc_to_pnoc, MSM8974_SNOC_TO_PNOC, 8, 29, 28, MSM8974_PNOC_TO_SNOC);
+DEFINE_QNODE(snoc_to_ocmem_vnoc, MSM8974_SNOC_TO_OCMEM_VNOC, 8, 53, 77, MSM8974_OCMEM_VNOC_TO_OCMEM_NOC);
+DEFINE_QNODE(mas_crypto_core0, MSM8974_SNOC_MAS_CRYPTO_CORE0, 8, 23, -1, MSM8974_SNOC_TO_BIMC);
+DEFINE_QNODE(mas_crypto_core1, MSM8974_SNOC_MAS_CRYPTO_CORE1, 8, 24, -1);
+DEFINE_QNODE(mas_lpass_proc, MSM8974_SNOC_MAS_LPASS_PROC, 8, 25, -1, MSM8974_SNOC_TO_OCMEM_VNOC);
+DEFINE_QNODE(mas_mss, MSM8974_SNOC_MAS_MSS, 8, 26, -1);
+DEFINE_QNODE(mas_mss_nav, MSM8974_SNOC_MAS_MSS_NAV, 8, 27, -1);
+DEFINE_QNODE(mas_ocmem_dma, MSM8974_SNOC_MAS_OCMEM_DMA, 8, 28, -1);
+DEFINE_QNODE(mas_wcss, MSM8974_SNOC_MAS_WCSS, 8, 30, -1);
+DEFINE_QNODE(mas_qdss_etr, MSM8974_SNOC_MAS_QDSS_ETR, 8, 31, -1);
+DEFINE_QNODE(mas_usb3, MSM8974_SNOC_MAS_USB3, 8, 32, -1, MSM8974_SNOC_TO_BIMC);
+DEFINE_QNODE(slv_ampss, MSM8974_SNOC_SLV_AMPSS, 8, -1, 20);
+DEFINE_QNODE(slv_lpass, MSM8974_SNOC_SLV_LPASS, 8, -1, 21);
+DEFINE_QNODE(slv_usb3, MSM8974_SNOC_SLV_USB3, 8, -1, 22);
+DEFINE_QNODE(slv_wcss, MSM8974_SNOC_SLV_WCSS, 8, -1, 23);
+DEFINE_QNODE(slv_ocimem, MSM8974_SNOC_SLV_OCIMEM, 8, -1, 26);
+DEFINE_QNODE(slv_snoc_ocmem, MSM8974_SNOC_SLV_SNOC_OCMEM, 8, -1, 27);
+DEFINE_QNODE(slv_service_snoc, MSM8974_SNOC_SLV_SERVICE_SNOC, 8, -1, 29);
+DEFINE_QNODE(slv_qdss_stm, MSM8974_SNOC_SLV_QDSS_STM, 8, -1, 30);
+
+static struct msm8974_icc_node *msm8974_snoc_nodes[] = {
+ [SNOC_MAS_LPASS_AHB] = &mas_lpass_ahb,
+ [SNOC_MAS_QDSS_BAM] = &mas_qdss_bam,
+ [SNOC_MAS_SNOC_CFG] = &mas_snoc_cfg,
+ [SNOC_TO_BIMC] = &snoc_to_bimc,
+ [SNOC_TO_CNOC] = &snoc_to_cnoc,
+ [SNOC_TO_PNOC] = &snoc_to_pnoc,
+ [SNOC_TO_OCMEM_VNOC] = &snoc_to_ocmem_vnoc,
+ [SNOC_MAS_CRYPTO_CORE0] = &mas_crypto_core0,
+ [SNOC_MAS_CRYPTO_CORE1] = &mas_crypto_core1,
+ [SNOC_MAS_LPASS_PROC] = &mas_lpass_proc,
+ [SNOC_MAS_MSS] = &mas_mss,
+ [SNOC_MAS_MSS_NAV] = &mas_mss_nav,
+ [SNOC_MAS_OCMEM_DMA] = &mas_ocmem_dma,
+ [SNOC_MAS_WCSS] = &mas_wcss,
+ [SNOC_MAS_QDSS_ETR] = &mas_qdss_etr,
+ [SNOC_MAS_USB3] = &mas_usb3,
+ [SNOC_SLV_AMPSS] = &slv_ampss,
+ [SNOC_SLV_LPASS] = &slv_lpass,
+ [SNOC_SLV_USB3] = &slv_usb3,
+ [SNOC_SLV_WCSS] = &slv_wcss,
+ [SNOC_SLV_OCIMEM] = &slv_ocimem,
+ [SNOC_SLV_SNOC_OCMEM] = &slv_snoc_ocmem,
+ [SNOC_SLV_SERVICE_SNOC] = &slv_service_snoc,
+ [SNOC_SLV_QDSS_STM] = &slv_qdss_stm,
+};
+
+static struct msm8974_icc_desc msm8974_snoc = {
+ .nodes = msm8974_snoc_nodes,
+ .num_nodes = ARRAY_SIZE(msm8974_snoc_nodes),
+};
+
+static int msm8974_icc_aggregate(struct icc_node *node, u32 tag, u32 avg_bw,
+ u32 peak_bw, u32 *agg_avg, u32 *agg_peak)
+{
+ *agg_avg += avg_bw;
+ *agg_peak = max(*agg_peak, peak_bw);
+
+ return 0;
+}
+
+static void msm8974_icc_rpm_smd_send(struct device *dev, int rsc_type,
+ char *name, int id, u64 val)
+{
+ int ret;
+
+ if (id == -1)
+ return;
+
+ /*
+ * Setting the bandwidth requests for some nodes fails and this same
+ * behavior occurs on the downstream MSM 3.4 kernel sources based on
+ * errors like this in that kernel:
+ *
+ * msm_rpm_get_error_from_ack(): RPM NACK Unsupported resource
+ * AXI: msm_bus_rpm_req(): RPM: Ack failed
+ * AXI: msm_bus_rpm_commit_arb(): RPM: Req fail: mas:32, bw:240000000
+ *
+ * Since there's no publicly available documentation for this hardware,
+ * and the bandwidth for some nodes in the path can be set properly,
+ * let's not return an error.
+ */
+ ret = qcom_icc_rpm_smd_send(QCOM_SMD_RPM_ACTIVE_STATE, rsc_type, id,
+ val);
+ if (ret)
+ dev_dbg(dev, "Cannot set bandwidth for node %s (%d): %d\n",
+ name, id, ret);
+}
+
+static int msm8974_icc_set(struct icc_node *src, struct icc_node *dst)
+{
+ struct msm8974_icc_node *src_qn, *dst_qn;
+ struct msm8974_icc_provider *qp;
+ u64 sum_bw, max_peak_bw, rate;
+ u32 agg_avg = 0, agg_peak = 0;
+ struct icc_provider *provider;
+ struct icc_node *n;
+ int ret, i;
+
+ src_qn = src->data;
+ dst_qn = dst->data;
+ provider = src->provider;
+ qp = to_msm8974_icc_provider(provider);
+
+ list_for_each_entry(n, &provider->nodes, node_list)
+ msm8974_icc_aggregate(n, 0, n->avg_bw, n->peak_bw,
+ &agg_avg, &agg_peak);
+
+ sum_bw = icc_units_to_bps(agg_avg);
+ max_peak_bw = icc_units_to_bps(agg_peak);
+
+ /* Set bandwidth on source node */
+ msm8974_icc_rpm_smd_send(provider->dev, RPM_BUS_MASTER_REQ,
+ src_qn->name, src_qn->mas_rpm_id, sum_bw);
+
+ msm8974_icc_rpm_smd_send(provider->dev, RPM_BUS_SLAVE_REQ,
+ src_qn->name, src_qn->slv_rpm_id, sum_bw);
+
+ /* Set bandwidth on destination node */
+ msm8974_icc_rpm_smd_send(provider->dev, RPM_BUS_MASTER_REQ,
+ dst_qn->name, dst_qn->mas_rpm_id, sum_bw);
+
+ msm8974_icc_rpm_smd_send(provider->dev, RPM_BUS_SLAVE_REQ,
+ dst_qn->name, dst_qn->slv_rpm_id, sum_bw);
+
+ rate = max(sum_bw, max_peak_bw);
+
+ do_div(rate, src_qn->buswidth);
+
+ if (src_qn->rate == rate)
+ return 0;
+
+ for (i = 0; i < qp->num_clks; i++) {
+ ret = clk_set_rate(qp->bus_clks[i].clk, rate);
+ if (ret) {
+ dev_err(provider->dev, "%s clk_set_rate error: %d\n",
+ qp->bus_clks[i].id, ret);
+ ret = 0;
+ }
+ }
+
+ src_qn->rate = rate;
+
+ return 0;
+}
+
+static int msm8974_icc_probe(struct platform_device *pdev)
+{
+ const struct msm8974_icc_desc *desc;
+ struct msm8974_icc_node **qnodes;
+ struct msm8974_icc_provider *qp;
+ struct device *dev = &pdev->dev;
+ struct icc_onecell_data *data;
+ struct icc_provider *provider;
+ struct icc_node *node;
+ size_t num_nodes, i;
+ int ret;
+
+ /* wait for the RPM proxy */
+ if (!qcom_icc_rpm_smd_available())
+ return -EPROBE_DEFER;
+
+ desc = of_device_get_match_data(dev);
+ if (!desc)
+ return -EINVAL;
+
+ qnodes = desc->nodes;
+ num_nodes = desc->num_nodes;
+
+ qp = devm_kzalloc(dev, sizeof(*qp), GFP_KERNEL);
+ if (!qp)
+ return -ENOMEM;
+
+ data = devm_kzalloc(dev, struct_size(data, nodes, num_nodes),
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ qp->bus_clks = devm_kmemdup(dev, msm8974_icc_bus_clocks,
+ sizeof(msm8974_icc_bus_clocks), GFP_KERNEL);
+ if (!qp->bus_clks)
+ return -ENOMEM;
+
+ qp->num_clks = ARRAY_SIZE(msm8974_icc_bus_clocks);
+ ret = devm_clk_bulk_get(dev, qp->num_clks, qp->bus_clks);
+ if (ret)
+ return ret;
+
+ ret = clk_bulk_prepare_enable(qp->num_clks, qp->bus_clks);
+ if (ret)
+ return ret;
+
+ provider = &qp->provider;
+ INIT_LIST_HEAD(&provider->nodes);
+ provider->dev = dev;
+ provider->set = msm8974_icc_set;
+ provider->aggregate = msm8974_icc_aggregate;
+ provider->xlate = of_icc_xlate_onecell;
+ provider->data = data;
+
+ ret = icc_provider_add(provider);
+ if (ret) {
+ dev_err(dev, "error adding interconnect provider: %d\n", ret);
+ goto err_disable_clks;
+ }
+
+ for (i = 0; i < num_nodes; i++) {
+ size_t j;
+
+ node = icc_node_create(qnodes[i]->id);
+ if (IS_ERR(node)) {
+ ret = PTR_ERR(node);
+ goto err_del_icc;
+ }
+
+ node->name = qnodes[i]->name;
+ node->data = qnodes[i];
+ icc_node_add(node, provider);
+
+ dev_dbg(dev, "registered node %s\n", node->name);
+
+ /* populate links */
+ for (j = 0; j < qnodes[i]->num_links; j++)
+ icc_link_create(node, qnodes[i]->links[j]);
+
+ data->nodes[i] = node;
+ }
+ data->num_nodes = num_nodes;
+
+ platform_set_drvdata(pdev, qp);
+
+ return 0;
+
+err_del_icc:
+ list_for_each_entry(node, &provider->nodes, node_list) {
+ icc_node_del(node);
+ icc_node_destroy(node->id);
+ }
+ icc_provider_del(provider);
+
+err_disable_clks:
+ clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
+
+ return ret;
+}
+
+static int msm8974_icc_remove(struct platform_device *pdev)
+{
+ struct msm8974_icc_provider *qp = platform_get_drvdata(pdev);
+ struct icc_provider *provider = &qp->provider;
+ struct icc_node *n;
+
+ list_for_each_entry(n, &provider->nodes, node_list) {
+ icc_node_del(n);
+ icc_node_destroy(n->id);
+ }
+ clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
+
+ return icc_provider_del(provider);
+}
+
+static const struct of_device_id msm8974_noc_of_match[] = {
+ { .compatible = "qcom,msm8974-bimc", .data = &msm8974_bimc},
+ { .compatible = "qcom,msm8974-cnoc", .data = &msm8974_cnoc},
+ { .compatible = "qcom,msm8974-mmssnoc", .data = &msm8974_mnoc},
+ { .compatible = "qcom,msm8974-ocmemnoc", .data = &msm8974_onoc},
+ { .compatible = "qcom,msm8974-pnoc", .data = &msm8974_pnoc},
+ { .compatible = "qcom,msm8974-snoc", .data = &msm8974_snoc},
+ { },
+};
+MODULE_DEVICE_TABLE(of, msm8974_noc_of_match);
+
+static struct platform_driver msm8974_noc_driver = {
+ .probe = msm8974_icc_probe,
+ .remove = msm8974_icc_remove,
+ .driver = {
+ .name = "qnoc-msm8974",
+ .of_match_table = msm8974_noc_of_match,
+ },
+};
+module_platform_driver(msm8974_noc_driver);
+MODULE_DESCRIPTION("Qualcomm MSM8974 NoC driver");
+MODULE_AUTHOR("Brian Masney <masneyb@onstation.org>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index e3842eabcfdd..0b9d78a0f3ac 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -3,6 +3,10 @@
config IOMMU_IOVA
tristate
+# The IOASID library may also be used by non-IOMMU_API users
+config IOASID
+ tristate
+
# IOMMU_API always gets selected by whoever wants it.
config IOMMU_API
bool
@@ -138,6 +142,7 @@ config AMD_IOMMU
select PCI_PASID
select IOMMU_API
select IOMMU_IOVA
+ select IOMMU_DMA
depends on X86_64 && PCI && ACPI
---help---
With this option you can enable support for AMD IOMMU hardware in
@@ -207,6 +212,7 @@ config INTEL_IOMMU_SVM
bool "Support for Shared Virtual Memory with Intel IOMMU"
depends on INTEL_IOMMU && X86
select PCI_PASID
+ select PCI_PRI
select MMU_NOTIFIER
help
Shared Virtual Memory (SVM) provides a facility for devices
@@ -467,7 +473,7 @@ config QCOM_IOMMU
config HYPERV_IOMMU
bool "Hyper-V x2APIC IRQ Handling"
- depends on HYPERV
+ depends on HYPERV && X86
select IOMMU_API
default HYPERV
help
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
index 4f405f926e73..97814cc861ea 100644
--- a/drivers/iommu/Makefile
+++ b/drivers/iommu/Makefile
@@ -7,13 +7,14 @@ obj-$(CONFIG_IOMMU_DMA) += dma-iommu.o
obj-$(CONFIG_IOMMU_IO_PGTABLE) += io-pgtable.o
obj-$(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) += io-pgtable-arm-v7s.o
obj-$(CONFIG_IOMMU_IO_PGTABLE_LPAE) += io-pgtable-arm.o
+obj-$(CONFIG_IOASID) += ioasid.o
obj-$(CONFIG_IOMMU_IOVA) += iova.o
obj-$(CONFIG_OF_IOMMU) += of_iommu.o
obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o
obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o amd_iommu_quirks.o
obj-$(CONFIG_AMD_IOMMU_DEBUGFS) += amd_iommu_debugfs.o
obj-$(CONFIG_AMD_IOMMU_V2) += amd_iommu_v2.o
-obj-$(CONFIG_ARM_SMMU) += arm-smmu.o arm-smmu-impl.o
+obj-$(CONFIG_ARM_SMMU) += arm-smmu.o arm-smmu-impl.o arm-smmu-qcom.o
obj-$(CONFIG_ARM_SMMU_V3) += arm-smmu-v3.o
obj-$(CONFIG_DMAR_TABLE) += dmar.o
obj-$(CONFIG_INTEL_IOMMU) += intel-iommu.o intel-pasid.o
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index dd555078258c..bd25674ee4db 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -20,6 +20,7 @@
#include <linux/scatterlist.h>
#include <linux/dma-mapping.h>
#include <linux/dma-direct.h>
+#include <linux/dma-iommu.h>
#include <linux/iommu-helper.h>
#include <linux/iommu.h>
#include <linux/delay.h>
@@ -88,8 +89,6 @@ const struct iommu_ops amd_iommu_ops;
static ATOMIC_NOTIFIER_HEAD(ppr_notifier);
int amd_iommu_max_glx_val = -1;
-static const struct dma_map_ops amd_iommu_dma_ops;
-
/*
* general struct to manage commands send to an IOMMU
*/
@@ -102,21 +101,6 @@ struct kmem_cache *amd_iommu_irq_cache;
static void update_domain(struct protection_domain *domain);
static int protection_domain_init(struct protection_domain *domain);
static void detach_device(struct device *dev);
-static void iova_domain_flush_tlb(struct iova_domain *iovad);
-
-/*
- * Data container for a dma_ops specific protection domain
- */
-struct dma_ops_domain {
- /* generic protection domain information */
- struct protection_domain domain;
-
- /* IOVA RB-Tree */
- struct iova_domain iovad;
-};
-
-static struct iova_domain reserved_iova_ranges;
-static struct lock_class_key reserved_rbtree_key;
/****************************************************************************
*
@@ -124,30 +108,6 @@ static struct lock_class_key reserved_rbtree_key;
*
****************************************************************************/
-static inline int match_hid_uid(struct device *dev,
- struct acpihid_map_entry *entry)
-{
- struct acpi_device *adev = ACPI_COMPANION(dev);
- const char *hid, *uid;
-
- if (!adev)
- return -ENODEV;
-
- hid = acpi_device_hid(adev);
- uid = acpi_device_uid(adev);
-
- if (!hid || !(*hid))
- return -ENODEV;
-
- if (!uid || !(*uid))
- return strcmp(hid, entry->hid);
-
- if (!(*entry->uid))
- return strcmp(hid, entry->hid);
-
- return (strcmp(hid, entry->hid) || strcmp(uid, entry->uid));
-}
-
static inline u16 get_pci_device_id(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
@@ -158,10 +118,14 @@ static inline u16 get_pci_device_id(struct device *dev)
static inline int get_acpihid_device_id(struct device *dev,
struct acpihid_map_entry **entry)
{
+ struct acpi_device *adev = ACPI_COMPANION(dev);
struct acpihid_map_entry *p;
+ if (!adev)
+ return -ENODEV;
+
list_for_each_entry(p, &acpihid_map, list) {
- if (!match_hid_uid(dev, p)) {
+ if (acpi_dev_hid_uid_match(adev, p->hid, p->uid)) {
if (entry)
*entry = p;
return p->devid;
@@ -187,12 +151,6 @@ static struct protection_domain *to_pdomain(struct iommu_domain *dom)
return container_of(dom, struct protection_domain, domain);
}
-static struct dma_ops_domain* to_dma_ops_domain(struct protection_domain *domain)
-{
- BUG_ON(domain->flags != PD_DMA_OPS_MASK);
- return container_of(domain, struct dma_ops_domain, domain);
-}
-
static struct iommu_dev_data *alloc_dev_data(u16 devid)
{
struct iommu_dev_data *dev_data;
@@ -226,71 +184,61 @@ static struct iommu_dev_data *search_dev_data(u16 devid)
return NULL;
}
-static int __last_alias(struct pci_dev *pdev, u16 alias, void *data)
+static int clone_alias(struct pci_dev *pdev, u16 alias, void *data)
{
- *(u16 *)data = alias;
- return 0;
-}
-
-static u16 get_alias(struct device *dev)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- u16 devid, ivrs_alias, pci_alias;
-
- /* The callers make sure that get_device_id() does not fail here */
- devid = get_device_id(dev);
+ u16 devid = pci_dev_id(pdev);
- /* For ACPI HID devices, we simply return the devid as such */
- if (!dev_is_pci(dev))
- return devid;
+ if (devid == alias)
+ return 0;
- ivrs_alias = amd_iommu_alias_table[devid];
+ amd_iommu_rlookup_table[alias] =
+ amd_iommu_rlookup_table[devid];
+ memcpy(amd_iommu_dev_table[alias].data,
+ amd_iommu_dev_table[devid].data,
+ sizeof(amd_iommu_dev_table[alias].data));
- pci_for_each_dma_alias(pdev, __last_alias, &pci_alias);
+ return 0;
+}
- if (ivrs_alias == pci_alias)
- return ivrs_alias;
+static void clone_aliases(struct pci_dev *pdev)
+{
+ if (!pdev)
+ return;
/*
- * DMA alias showdown
- *
- * The IVRS is fairly reliable in telling us about aliases, but it
- * can't know about every screwy device. If we don't have an IVRS
- * reported alias, use the PCI reported alias. In that case we may
- * still need to initialize the rlookup and dev_table entries if the
- * alias is to a non-existent device.
+ * The IVRS alias stored in the alias table may not be
+ * part of the PCI DMA aliases if it's bus differs
+ * from the original device.
*/
- if (ivrs_alias == devid) {
- if (!amd_iommu_rlookup_table[pci_alias]) {
- amd_iommu_rlookup_table[pci_alias] =
- amd_iommu_rlookup_table[devid];
- memcpy(amd_iommu_dev_table[pci_alias].data,
- amd_iommu_dev_table[devid].data,
- sizeof(amd_iommu_dev_table[pci_alias].data));
- }
+ clone_alias(pdev, amd_iommu_alias_table[pci_dev_id(pdev)], NULL);
- return pci_alias;
- }
+ pci_for_each_dma_alias(pdev, clone_alias, NULL);
+}
- pci_info(pdev, "Using IVRS reported alias %02x:%02x.%d "
- "for device [%04x:%04x], kernel reported alias "
- "%02x:%02x.%d\n", PCI_BUS_NUM(ivrs_alias), PCI_SLOT(ivrs_alias),
- PCI_FUNC(ivrs_alias), pdev->vendor, pdev->device,
- PCI_BUS_NUM(pci_alias), PCI_SLOT(pci_alias),
- PCI_FUNC(pci_alias));
+static struct pci_dev *setup_aliases(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ u16 ivrs_alias;
+
+ /* For ACPI HID devices, there are no aliases */
+ if (!dev_is_pci(dev))
+ return NULL;
/*
- * If we don't have a PCI DMA alias and the IVRS alias is on the same
- * bus, then the IVRS table may know about a quirk that we don't.
+ * Add the IVRS alias to the pci aliases if it is on the same
+ * bus. The IVRS table may know about a quirk that we don't.
*/
- if (pci_alias == devid &&
+ ivrs_alias = amd_iommu_alias_table[pci_dev_id(pdev)];
+ if (ivrs_alias != pci_dev_id(pdev) &&
PCI_BUS_NUM(ivrs_alias) == pdev->bus->number) {
pci_add_dma_alias(pdev, ivrs_alias & 0xff);
pci_info(pdev, "Added PCI DMA alias %02x.%d\n",
PCI_SLOT(ivrs_alias), PCI_FUNC(ivrs_alias));
}
- return ivrs_alias;
+ clone_aliases(pdev);
+
+ return pdev;
}
static struct iommu_dev_data *find_dev_data(u16 devid)
@@ -428,7 +376,7 @@ static int iommu_init_device(struct device *dev)
if (!dev_data)
return -ENOMEM;
- dev_data->alias = get_alias(dev);
+ dev_data->pdev = setup_aliases(dev);
/*
* By default we use passthrough mode for IOMMUv2 capable device.
@@ -453,20 +401,16 @@ static int iommu_init_device(struct device *dev)
static void iommu_ignore_device(struct device *dev)
{
- u16 alias;
int devid;
devid = get_device_id(dev);
if (devid < 0)
return;
- alias = get_alias(dev);
-
+ amd_iommu_rlookup_table[devid] = NULL;
memset(&amd_iommu_dev_table[devid], 0, sizeof(struct dev_table_entry));
- memset(&amd_iommu_dev_table[alias], 0, sizeof(struct dev_table_entry));
- amd_iommu_rlookup_table[devid] = NULL;
- amd_iommu_rlookup_table[alias] = NULL;
+ setup_aliases(dev);
}
static void iommu_uninit_device(struct device *dev)
@@ -640,8 +584,7 @@ retry:
pasid, address, flags);
break;
case EVENT_TYPE_INV_PPR_REQ:
- pasid = ((event[0] >> 16) & 0xFFFF)
- | ((event[1] << 6) & 0xF0000);
+ pasid = PPR_PASID(*((u64 *)__evt));
tag = event[1] & 0x03FF;
dev_err(dev, "Event logged [INVALID_PPR_REQUEST device=%02x:%02x.%x pasid=0x%05x address=0x%llx flags=0x%04x tag=0x%03x]\n",
PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
@@ -876,17 +819,18 @@ static void copy_cmd_to_buffer(struct amd_iommu *iommu,
struct iommu_cmd *cmd)
{
u8 *target;
-
- target = iommu->cmd_buf + iommu->cmd_buf_tail;
-
- iommu->cmd_buf_tail += sizeof(*cmd);
- iommu->cmd_buf_tail %= CMD_BUFFER_SIZE;
+ u32 tail;
/* Copy command to buffer */
+ tail = iommu->cmd_buf_tail;
+ target = iommu->cmd_buf + tail;
memcpy(target, cmd, sizeof(*cmd));
+ tail = (tail + sizeof(*cmd)) % CMD_BUFFER_SIZE;
+ iommu->cmd_buf_tail = tail;
+
/* Tell the IOMMU about it */
- writel(iommu->cmd_buf_tail, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
+ writel(tail, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
}
static void build_completion_wait(struct iommu_cmd *cmd, u64 address)
@@ -1236,6 +1180,13 @@ static int device_flush_iotlb(struct iommu_dev_data *dev_data,
return iommu_queue_command(iommu, &cmd);
}
+static int device_flush_dte_alias(struct pci_dev *pdev, u16 alias, void *data)
+{
+ struct amd_iommu *iommu = data;
+
+ return iommu_flush_dte(iommu, alias);
+}
+
/*
* Command send function for invalidating a device table entry
*/
@@ -1246,14 +1197,22 @@ static int device_flush_dte(struct iommu_dev_data *dev_data)
int ret;
iommu = amd_iommu_rlookup_table[dev_data->devid];
- alias = dev_data->alias;
- ret = iommu_flush_dte(iommu, dev_data->devid);
- if (!ret && alias != dev_data->devid)
- ret = iommu_flush_dte(iommu, alias);
+ if (dev_data->pdev)
+ ret = pci_for_each_dma_alias(dev_data->pdev,
+ device_flush_dte_alias, iommu);
+ else
+ ret = iommu_flush_dte(iommu, dev_data->devid);
if (ret)
return ret;
+ alias = amd_iommu_alias_table[dev_data->devid];
+ if (alias != dev_data->devid) {
+ ret = iommu_flush_dte(iommu, alias);
+ if (ret)
+ return ret;
+ }
+
if (dev_data->ats.enabled)
ret = device_flush_iotlb(dev_data, 0, ~0UL);
@@ -1302,12 +1261,6 @@ static void domain_flush_pages(struct protection_domain *domain,
__domain_flush_pages(domain, address, size, 0);
}
-/* Flush the whole IO/TLB for a given protection domain */
-static void domain_flush_tlb(struct protection_domain *domain)
-{
- __domain_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 0);
-}
-
/* Flush the whole IO/TLB for a given protection domain - including PDE */
static void domain_flush_tlb_pde(struct protection_domain *domain)
{
@@ -1755,43 +1708,6 @@ static unsigned long iommu_unmap_page(struct protection_domain *dom,
/****************************************************************************
*
- * The next functions belong to the address allocator for the dma_ops
- * interface functions.
- *
- ****************************************************************************/
-
-
-static unsigned long dma_ops_alloc_iova(struct device *dev,
- struct dma_ops_domain *dma_dom,
- unsigned int pages, u64 dma_mask)
-{
- unsigned long pfn = 0;
-
- pages = __roundup_pow_of_two(pages);
-
- if (dma_mask > DMA_BIT_MASK(32))
- pfn = alloc_iova_fast(&dma_dom->iovad, pages,
- IOVA_PFN(DMA_BIT_MASK(32)), false);
-
- if (!pfn)
- pfn = alloc_iova_fast(&dma_dom->iovad, pages,
- IOVA_PFN(dma_mask), true);
-
- return (pfn << PAGE_SHIFT);
-}
-
-static void dma_ops_free_iova(struct dma_ops_domain *dma_dom,
- unsigned long address,
- unsigned int pages)
-{
- pages = __roundup_pow_of_two(pages);
- address >>= PAGE_SHIFT;
-
- free_iova_fast(&dma_dom->iovad, address, pages);
-}
-
-/****************************************************************************
- *
* The next functions belong to the domain allocation. A domain is
* allocated for every IOMMU as the default domain. If device isolation
* is enabled, every device get its own domain. The most important thing
@@ -1866,42 +1782,23 @@ static void free_gcr3_table(struct protection_domain *domain)
free_page((unsigned long)domain->gcr3_tbl);
}
-static void dma_ops_domain_flush_tlb(struct dma_ops_domain *dom)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&dom->domain.lock, flags);
- domain_flush_tlb(&dom->domain);
- domain_flush_complete(&dom->domain);
- spin_unlock_irqrestore(&dom->domain.lock, flags);
-}
-
-static void iova_domain_flush_tlb(struct iova_domain *iovad)
-{
- struct dma_ops_domain *dom;
-
- dom = container_of(iovad, struct dma_ops_domain, iovad);
-
- dma_ops_domain_flush_tlb(dom);
-}
-
/*
* Free a domain, only used if something went wrong in the
* allocation path and we need to free an already allocated page table
*/
-static void dma_ops_domain_free(struct dma_ops_domain *dom)
+static void dma_ops_domain_free(struct protection_domain *domain)
{
- if (!dom)
+ if (!domain)
return;
- put_iova_domain(&dom->iovad);
+ iommu_put_dma_cookie(&domain->domain);
- free_pagetable(&dom->domain);
+ free_pagetable(domain);
- if (dom->domain.id)
- domain_id_free(dom->domain.id);
+ if (domain->id)
+ domain_id_free(domain->id);
- kfree(dom);
+ kfree(domain);
}
/*
@@ -1909,35 +1806,30 @@ static void dma_ops_domain_free(struct dma_ops_domain *dom)
* It also initializes the page table and the address allocator data
* structures required for the dma_ops interface
*/
-static struct dma_ops_domain *dma_ops_domain_alloc(void)
+static struct protection_domain *dma_ops_domain_alloc(void)
{
- struct dma_ops_domain *dma_dom;
+ struct protection_domain *domain;
- dma_dom = kzalloc(sizeof(struct dma_ops_domain), GFP_KERNEL);
- if (!dma_dom)
+ domain = kzalloc(sizeof(struct protection_domain), GFP_KERNEL);
+ if (!domain)
return NULL;
- if (protection_domain_init(&dma_dom->domain))
- goto free_dma_dom;
-
- dma_dom->domain.mode = PAGE_MODE_3_LEVEL;
- dma_dom->domain.pt_root = (void *)get_zeroed_page(GFP_KERNEL);
- dma_dom->domain.flags = PD_DMA_OPS_MASK;
- if (!dma_dom->domain.pt_root)
- goto free_dma_dom;
-
- init_iova_domain(&dma_dom->iovad, PAGE_SIZE, IOVA_START_PFN);
+ if (protection_domain_init(domain))
+ goto free_domain;
- if (init_iova_flush_queue(&dma_dom->iovad, iova_domain_flush_tlb, NULL))
- goto free_dma_dom;
+ domain->mode = PAGE_MODE_3_LEVEL;
+ domain->pt_root = (void *)get_zeroed_page(GFP_KERNEL);
+ domain->flags = PD_DMA_OPS_MASK;
+ if (!domain->pt_root)
+ goto free_domain;
- /* Initialize reserved ranges */
- copy_reserved_iova(&reserved_iova_ranges, &dma_dom->iovad);
+ if (iommu_get_dma_cookie(&domain->domain) == -ENOMEM)
+ goto free_domain;
- return dma_dom;
+ return domain;
-free_dma_dom:
- dma_ops_domain_free(dma_dom);
+free_domain:
+ dma_ops_domain_free(domain);
return NULL;
}
@@ -2035,11 +1927,9 @@ static void do_attach(struct iommu_dev_data *dev_data,
struct protection_domain *domain)
{
struct amd_iommu *iommu;
- u16 alias;
bool ats;
iommu = amd_iommu_rlookup_table[dev_data->devid];
- alias = dev_data->alias;
ats = dev_data->ats.enabled;
/* Update data structures */
@@ -2052,8 +1942,7 @@ static void do_attach(struct iommu_dev_data *dev_data,
/* Update device table */
set_dte_entry(dev_data->devid, domain, ats, dev_data->iommu_v2);
- if (alias != dev_data->devid)
- set_dte_entry(alias, domain, ats, dev_data->iommu_v2);
+ clone_aliases(dev_data->pdev);
device_flush_dte(dev_data);
}
@@ -2062,17 +1951,14 @@ static void do_detach(struct iommu_dev_data *dev_data)
{
struct protection_domain *domain = dev_data->domain;
struct amd_iommu *iommu;
- u16 alias;
iommu = amd_iommu_rlookup_table[dev_data->devid];
- alias = dev_data->alias;
/* Update data structures */
dev_data->domain = NULL;
list_del(&dev_data->list);
clear_dte_entry(dev_data->devid);
- if (alias != dev_data->devid)
- clear_dte_entry(alias);
+ clone_aliases(dev_data->pdev);
/* Flush the DTE entry */
device_flush_dte(dev_data);
@@ -2305,8 +2191,8 @@ static int amd_iommu_add_device(struct device *dev)
domain = iommu_get_domain_for_dev(dev);
if (domain->type == IOMMU_DOMAIN_IDENTITY)
dev_data->passthrough = true;
- else
- dev->dma_ops = &amd_iommu_dma_ops;
+ else if (domain->type == IOMMU_DOMAIN_DMA)
+ iommu_setup_dma_ops(dev, IOVA_START_PFN << PAGE_SHIFT, 0);
out:
iommu_completion_wait(iommu);
@@ -2340,43 +2226,32 @@ static struct iommu_group *amd_iommu_device_group(struct device *dev)
return acpihid_device_group(dev);
}
+static int amd_iommu_domain_get_attr(struct iommu_domain *domain,
+ enum iommu_attr attr, void *data)
+{
+ switch (domain->type) {
+ case IOMMU_DOMAIN_UNMANAGED:
+ return -ENODEV;
+ case IOMMU_DOMAIN_DMA:
+ switch (attr) {
+ case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
+ *(int *)data = !amd_iommu_unmap_flush;
+ return 0;
+ default:
+ return -ENODEV;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+}
+
/*****************************************************************************
*
* The next functions belong to the dma_ops mapping/unmapping code.
*
*****************************************************************************/
-/*
- * In the dma_ops path we only have the struct device. This function
- * finds the corresponding IOMMU, the protection domain and the
- * requestor id for a given device.
- * If the device is not yet associated with a domain this is also done
- * in this function.
- */
-static struct protection_domain *get_domain(struct device *dev)
-{
- struct protection_domain *domain;
- struct iommu_domain *io_domain;
-
- if (!check_device(dev))
- return ERR_PTR(-EINVAL);
-
- domain = get_dev_data(dev)->domain;
- if (domain == NULL && get_dev_data(dev)->defer_attach) {
- get_dev_data(dev)->defer_attach = false;
- io_domain = iommu_get_domain_for_dev(dev);
- domain = to_pdomain(io_domain);
- attach_device(dev, domain);
- }
- if (domain == NULL)
- return ERR_PTR(-EBUSY);
-
- if (!dma_ops_domain(domain))
- return ERR_PTR(-EBUSY);
-
- return domain;
-}
-
static void update_device_table(struct protection_domain *domain)
{
struct iommu_dev_data *dev_data;
@@ -2384,13 +2259,7 @@ static void update_device_table(struct protection_domain *domain)
list_for_each_entry(dev_data, &domain->dev_list, list) {
set_dte_entry(dev_data->devid, domain, dev_data->ats.enabled,
dev_data->iommu_v2);
-
- if (dev_data->devid == dev_data->alias)
- continue;
-
- /* There is an alias, update device table entry for it */
- set_dte_entry(dev_data->alias, domain, dev_data->ats.enabled,
- dev_data->iommu_v2);
+ clone_aliases(dev_data->pdev);
}
}
@@ -2402,458 +2271,6 @@ static void update_domain(struct protection_domain *domain)
domain_flush_tlb_pde(domain);
}
-static int dir2prot(enum dma_data_direction direction)
-{
- if (direction == DMA_TO_DEVICE)
- return IOMMU_PROT_IR;
- else if (direction == DMA_FROM_DEVICE)
- return IOMMU_PROT_IW;
- else if (direction == DMA_BIDIRECTIONAL)
- return IOMMU_PROT_IW | IOMMU_PROT_IR;
- else
- return 0;
-}
-
-/*
- * This function contains common code for mapping of a physically
- * contiguous memory region into DMA address space. It is used by all
- * mapping functions provided with this IOMMU driver.
- * Must be called with the domain lock held.
- */
-static dma_addr_t __map_single(struct device *dev,
- struct dma_ops_domain *dma_dom,
- phys_addr_t paddr,
- size_t size,
- enum dma_data_direction direction,
- u64 dma_mask)
-{
- dma_addr_t offset = paddr & ~PAGE_MASK;
- dma_addr_t address, start, ret;
- unsigned long flags;
- unsigned int pages;
- int prot = 0;
- int i;
-
- pages = iommu_num_pages(paddr, size, PAGE_SIZE);
- paddr &= PAGE_MASK;
-
- address = dma_ops_alloc_iova(dev, dma_dom, pages, dma_mask);
- if (!address)
- goto out;
-
- prot = dir2prot(direction);
-
- start = address;
- for (i = 0; i < pages; ++i) {
- ret = iommu_map_page(&dma_dom->domain, start, paddr,
- PAGE_SIZE, prot, GFP_ATOMIC);
- if (ret)
- goto out_unmap;
-
- paddr += PAGE_SIZE;
- start += PAGE_SIZE;
- }
- address += offset;
-
- domain_flush_np_cache(&dma_dom->domain, address, size);
-
-out:
- return address;
-
-out_unmap:
-
- for (--i; i >= 0; --i) {
- start -= PAGE_SIZE;
- iommu_unmap_page(&dma_dom->domain, start, PAGE_SIZE);
- }
-
- spin_lock_irqsave(&dma_dom->domain.lock, flags);
- domain_flush_tlb(&dma_dom->domain);
- domain_flush_complete(&dma_dom->domain);
- spin_unlock_irqrestore(&dma_dom->domain.lock, flags);
-
- dma_ops_free_iova(dma_dom, address, pages);
-
- return DMA_MAPPING_ERROR;
-}
-
-/*
- * Does the reverse of the __map_single function. Must be called with
- * the domain lock held too
- */
-static void __unmap_single(struct dma_ops_domain *dma_dom,
- dma_addr_t dma_addr,
- size_t size,
- int dir)
-{
- dma_addr_t i, start;
- unsigned int pages;
-
- pages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
- dma_addr &= PAGE_MASK;
- start = dma_addr;
-
- for (i = 0; i < pages; ++i) {
- iommu_unmap_page(&dma_dom->domain, start, PAGE_SIZE);
- start += PAGE_SIZE;
- }
-
- if (amd_iommu_unmap_flush) {
- unsigned long flags;
-
- spin_lock_irqsave(&dma_dom->domain.lock, flags);
- domain_flush_tlb(&dma_dom->domain);
- domain_flush_complete(&dma_dom->domain);
- spin_unlock_irqrestore(&dma_dom->domain.lock, flags);
- dma_ops_free_iova(dma_dom, dma_addr, pages);
- } else {
- pages = __roundup_pow_of_two(pages);
- queue_iova(&dma_dom->iovad, dma_addr >> PAGE_SHIFT, pages, 0);
- }
-}
-
-/*
- * The exported map_single function for dma_ops.
- */
-static dma_addr_t map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction dir,
- unsigned long attrs)
-{
- phys_addr_t paddr = page_to_phys(page) + offset;
- struct protection_domain *domain;
- struct dma_ops_domain *dma_dom;
- u64 dma_mask;
-
- domain = get_domain(dev);
- if (PTR_ERR(domain) == -EINVAL)
- return (dma_addr_t)paddr;
- else if (IS_ERR(domain))
- return DMA_MAPPING_ERROR;
-
- dma_mask = *dev->dma_mask;
- dma_dom = to_dma_ops_domain(domain);
-
- return __map_single(dev, dma_dom, paddr, size, dir, dma_mask);
-}
-
-/*
- * The exported unmap_single function for dma_ops.
- */
-static void unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
- enum dma_data_direction dir, unsigned long attrs)
-{
- struct protection_domain *domain;
- struct dma_ops_domain *dma_dom;
-
- domain = get_domain(dev);
- if (IS_ERR(domain))
- return;
-
- dma_dom = to_dma_ops_domain(domain);
-
- __unmap_single(dma_dom, dma_addr, size, dir);
-}
-
-static int sg_num_pages(struct device *dev,
- struct scatterlist *sglist,
- int nelems)
-{
- unsigned long mask, boundary_size;
- struct scatterlist *s;
- int i, npages = 0;
-
- mask = dma_get_seg_boundary(dev);
- boundary_size = mask + 1 ? ALIGN(mask + 1, PAGE_SIZE) >> PAGE_SHIFT :
- 1UL << (BITS_PER_LONG - PAGE_SHIFT);
-
- for_each_sg(sglist, s, nelems, i) {
- int p, n;
-
- s->dma_address = npages << PAGE_SHIFT;
- p = npages % boundary_size;
- n = iommu_num_pages(sg_phys(s), s->length, PAGE_SIZE);
- if (p + n > boundary_size)
- npages += boundary_size - p;
- npages += n;
- }
-
- return npages;
-}
-
-/*
- * The exported map_sg function for dma_ops (handles scatter-gather
- * lists).
- */
-static int map_sg(struct device *dev, struct scatterlist *sglist,
- int nelems, enum dma_data_direction direction,
- unsigned long attrs)
-{
- int mapped_pages = 0, npages = 0, prot = 0, i;
- struct protection_domain *domain;
- struct dma_ops_domain *dma_dom;
- struct scatterlist *s;
- unsigned long address;
- u64 dma_mask;
- int ret;
-
- domain = get_domain(dev);
- if (IS_ERR(domain))
- return 0;
-
- dma_dom = to_dma_ops_domain(domain);
- dma_mask = *dev->dma_mask;
-
- npages = sg_num_pages(dev, sglist, nelems);
-
- address = dma_ops_alloc_iova(dev, dma_dom, npages, dma_mask);
- if (!address)
- goto out_err;
-
- prot = dir2prot(direction);
-
- /* Map all sg entries */
- for_each_sg(sglist, s, nelems, i) {
- int j, pages = iommu_num_pages(sg_phys(s), s->length, PAGE_SIZE);
-
- for (j = 0; j < pages; ++j) {
- unsigned long bus_addr, phys_addr;
-
- bus_addr = address + s->dma_address + (j << PAGE_SHIFT);
- phys_addr = (sg_phys(s) & PAGE_MASK) + (j << PAGE_SHIFT);
- ret = iommu_map_page(domain, bus_addr, phys_addr,
- PAGE_SIZE, prot,
- GFP_ATOMIC | __GFP_NOWARN);
- if (ret)
- goto out_unmap;
-
- mapped_pages += 1;
- }
- }
-
- /* Everything is mapped - write the right values into s->dma_address */
- for_each_sg(sglist, s, nelems, i) {
- /*
- * Add in the remaining piece of the scatter-gather offset that
- * was masked out when we were determining the physical address
- * via (sg_phys(s) & PAGE_MASK) earlier.
- */
- s->dma_address += address + (s->offset & ~PAGE_MASK);
- s->dma_length = s->length;
- }
-
- if (s)
- domain_flush_np_cache(domain, s->dma_address, s->dma_length);
-
- return nelems;
-
-out_unmap:
- dev_err(dev, "IOMMU mapping error in map_sg (io-pages: %d reason: %d)\n",
- npages, ret);
-
- for_each_sg(sglist, s, nelems, i) {
- int j, pages = iommu_num_pages(sg_phys(s), s->length, PAGE_SIZE);
-
- for (j = 0; j < pages; ++j) {
- unsigned long bus_addr;
-
- bus_addr = address + s->dma_address + (j << PAGE_SHIFT);
- iommu_unmap_page(domain, bus_addr, PAGE_SIZE);
-
- if (--mapped_pages == 0)
- goto out_free_iova;
- }
- }
-
-out_free_iova:
- free_iova_fast(&dma_dom->iovad, address >> PAGE_SHIFT, npages);
-
-out_err:
- return 0;
-}
-
-/*
- * The exported map_sg function for dma_ops (handles scatter-gather
- * lists).
- */
-static void unmap_sg(struct device *dev, struct scatterlist *sglist,
- int nelems, enum dma_data_direction dir,
- unsigned long attrs)
-{
- struct protection_domain *domain;
- struct dma_ops_domain *dma_dom;
- unsigned long startaddr;
- int npages;
-
- domain = get_domain(dev);
- if (IS_ERR(domain))
- return;
-
- startaddr = sg_dma_address(sglist) & PAGE_MASK;
- dma_dom = to_dma_ops_domain(domain);
- npages = sg_num_pages(dev, sglist, nelems);
-
- __unmap_single(dma_dom, startaddr, npages << PAGE_SHIFT, dir);
-}
-
-/*
- * The exported alloc_coherent function for dma_ops.
- */
-static void *alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_addr, gfp_t flag,
- unsigned long attrs)
-{
- u64 dma_mask = dev->coherent_dma_mask;
- struct protection_domain *domain;
- struct dma_ops_domain *dma_dom;
- struct page *page;
-
- domain = get_domain(dev);
- if (PTR_ERR(domain) == -EINVAL) {
- page = alloc_pages(flag, get_order(size));
- *dma_addr = page_to_phys(page);
- return page_address(page);
- } else if (IS_ERR(domain))
- return NULL;
-
- dma_dom = to_dma_ops_domain(domain);
- size = PAGE_ALIGN(size);
- dma_mask = dev->coherent_dma_mask;
- flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
- flag |= __GFP_ZERO;
-
- page = alloc_pages(flag | __GFP_NOWARN, get_order(size));
- if (!page) {
- if (!gfpflags_allow_blocking(flag))
- return NULL;
-
- page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
- get_order(size), flag & __GFP_NOWARN);
- if (!page)
- return NULL;
- }
-
- if (!dma_mask)
- dma_mask = *dev->dma_mask;
-
- *dma_addr = __map_single(dev, dma_dom, page_to_phys(page),
- size, DMA_BIDIRECTIONAL, dma_mask);
-
- if (*dma_addr == DMA_MAPPING_ERROR)
- goto out_free;
-
- return page_address(page);
-
-out_free:
-
- if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
- __free_pages(page, get_order(size));
-
- return NULL;
-}
-
-/*
- * The exported free_coherent function for dma_ops.
- */
-static void free_coherent(struct device *dev, size_t size,
- void *virt_addr, dma_addr_t dma_addr,
- unsigned long attrs)
-{
- struct protection_domain *domain;
- struct dma_ops_domain *dma_dom;
- struct page *page;
-
- page = virt_to_page(virt_addr);
- size = PAGE_ALIGN(size);
-
- domain = get_domain(dev);
- if (IS_ERR(domain))
- goto free_mem;
-
- dma_dom = to_dma_ops_domain(domain);
-
- __unmap_single(dma_dom, dma_addr, size, DMA_BIDIRECTIONAL);
-
-free_mem:
- if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
- __free_pages(page, get_order(size));
-}
-
-/*
- * This function is called by the DMA layer to find out if we can handle a
- * particular device. It is part of the dma_ops.
- */
-static int amd_iommu_dma_supported(struct device *dev, u64 mask)
-{
- if (!dma_direct_supported(dev, mask))
- return 0;
- return check_device(dev);
-}
-
-static const struct dma_map_ops amd_iommu_dma_ops = {
- .alloc = alloc_coherent,
- .free = free_coherent,
- .map_page = map_page,
- .unmap_page = unmap_page,
- .map_sg = map_sg,
- .unmap_sg = unmap_sg,
- .dma_supported = amd_iommu_dma_supported,
- .mmap = dma_common_mmap,
- .get_sgtable = dma_common_get_sgtable,
-};
-
-static int init_reserved_iova_ranges(void)
-{
- struct pci_dev *pdev = NULL;
- struct iova *val;
-
- init_iova_domain(&reserved_iova_ranges, PAGE_SIZE, IOVA_START_PFN);
-
- lockdep_set_class(&reserved_iova_ranges.iova_rbtree_lock,
- &reserved_rbtree_key);
-
- /* MSI memory range */
- val = reserve_iova(&reserved_iova_ranges,
- IOVA_PFN(MSI_RANGE_START), IOVA_PFN(MSI_RANGE_END));
- if (!val) {
- pr_err("Reserving MSI range failed\n");
- return -ENOMEM;
- }
-
- /* HT memory range */
- val = reserve_iova(&reserved_iova_ranges,
- IOVA_PFN(HT_RANGE_START), IOVA_PFN(HT_RANGE_END));
- if (!val) {
- pr_err("Reserving HT range failed\n");
- return -ENOMEM;
- }
-
- /*
- * Memory used for PCI resources
- * FIXME: Check whether we can reserve the PCI-hole completly
- */
- for_each_pci_dev(pdev) {
- int i;
-
- for (i = 0; i < PCI_NUM_RESOURCES; ++i) {
- struct resource *r = &pdev->resource[i];
-
- if (!(r->flags & IORESOURCE_MEM))
- continue;
-
- val = reserve_iova(&reserved_iova_ranges,
- IOVA_PFN(r->start),
- IOVA_PFN(r->end));
- if (!val) {
- pci_err(pdev, "Reserve pci-resource range %pR failed\n", r);
- return -ENOMEM;
- }
- }
- }
-
- return 0;
-}
-
int __init amd_iommu_init_api(void)
{
int ret, err = 0;
@@ -2862,10 +2279,6 @@ int __init amd_iommu_init_api(void)
if (ret)
return ret;
- ret = init_reserved_iova_ranges();
- if (ret)
- return ret;
-
err = bus_set_iommu(&pci_bus_type, &amd_iommu_ops);
if (err)
return err;
@@ -2936,7 +2349,6 @@ static void protection_domain_free(struct protection_domain *domain)
static int protection_domain_init(struct protection_domain *domain)
{
spin_lock_init(&domain->lock);
- mutex_init(&domain->api_lock);
domain->id = domain_id_alloc();
if (!domain->id)
return -ENOMEM;
@@ -2967,7 +2379,6 @@ out_err:
static struct iommu_domain *amd_iommu_domain_alloc(unsigned type)
{
struct protection_domain *pdomain;
- struct dma_ops_domain *dma_domain;
switch (type) {
case IOMMU_DOMAIN_UNMANAGED:
@@ -2988,12 +2399,11 @@ static struct iommu_domain *amd_iommu_domain_alloc(unsigned type)
break;
case IOMMU_DOMAIN_DMA:
- dma_domain = dma_ops_domain_alloc();
- if (!dma_domain) {
+ pdomain = dma_ops_domain_alloc();
+ if (!pdomain) {
pr_err("Failed to allocate\n");
return NULL;
}
- pdomain = &dma_domain->domain;
break;
case IOMMU_DOMAIN_IDENTITY:
pdomain = protection_domain_alloc();
@@ -3012,7 +2422,6 @@ static struct iommu_domain *amd_iommu_domain_alloc(unsigned type)
static void amd_iommu_domain_free(struct iommu_domain *dom)
{
struct protection_domain *domain;
- struct dma_ops_domain *dma_dom;
domain = to_pdomain(dom);
@@ -3027,8 +2436,7 @@ static void amd_iommu_domain_free(struct iommu_domain *dom)
switch (dom->type) {
case IOMMU_DOMAIN_DMA:
/* Now release the domain */
- dma_dom = to_dma_ops_domain(domain);
- dma_ops_domain_free(dma_dom);
+ dma_ops_domain_free(domain);
break;
default:
if (domain->mode != PAGE_MODE_NONE)
@@ -3084,6 +2492,7 @@ static int amd_iommu_attach_device(struct iommu_domain *dom,
return -EINVAL;
dev_data = dev->archdata.iommu;
+ dev_data->defer_attach = false;
iommu = amd_iommu_rlookup_table[dev_data->devid];
if (!iommu)
@@ -3109,7 +2518,8 @@ static int amd_iommu_attach_device(struct iommu_domain *dom,
}
static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
- phys_addr_t paddr, size_t page_size, int iommu_prot)
+ phys_addr_t paddr, size_t page_size, int iommu_prot,
+ gfp_t gfp)
{
struct protection_domain *domain = to_pdomain(dom);
int prot = 0;
@@ -3123,9 +2533,7 @@ static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
if (iommu_prot & IOMMU_WRITE)
prot |= IOMMU_PROT_IW;
- mutex_lock(&domain->api_lock);
- ret = iommu_map_page(domain, iova, paddr, page_size, prot, GFP_KERNEL);
- mutex_unlock(&domain->api_lock);
+ ret = iommu_map_page(domain, iova, paddr, page_size, prot, gfp);
domain_flush_np_cache(domain, iova, page_size);
@@ -3137,16 +2545,11 @@ static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
struct iommu_iotlb_gather *gather)
{
struct protection_domain *domain = to_pdomain(dom);
- size_t unmap_size;
if (domain->mode == PAGE_MODE_NONE)
return 0;
- mutex_lock(&domain->api_lock);
- unmap_size = iommu_unmap_page(domain, iova, page_size);
- mutex_unlock(&domain->api_lock);
-
- return unmap_size;
+ return iommu_unmap_page(domain, iova, page_size);
}
static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
@@ -3247,19 +2650,6 @@ static void amd_iommu_put_resv_regions(struct device *dev,
kfree(entry);
}
-static void amd_iommu_apply_resv_region(struct device *dev,
- struct iommu_domain *domain,
- struct iommu_resv_region *region)
-{
- struct dma_ops_domain *dma_dom = to_dma_ops_domain(to_pdomain(domain));
- unsigned long start, end;
-
- start = IOVA_PFN(region->start);
- end = IOVA_PFN(region->start + region->length - 1);
-
- WARN_ON_ONCE(reserve_iova(&dma_dom->iovad, start, end) == NULL);
-}
-
static bool amd_iommu_is_attach_deferred(struct iommu_domain *domain,
struct device *dev)
{
@@ -3296,9 +2686,9 @@ const struct iommu_ops amd_iommu_ops = {
.add_device = amd_iommu_add_device,
.remove_device = amd_iommu_remove_device,
.device_group = amd_iommu_device_group,
+ .domain_get_attr = amd_iommu_domain_get_attr,
.get_resv_regions = amd_iommu_get_resv_regions,
.put_resv_regions = amd_iommu_put_resv_regions,
- .apply_resv_region = amd_iommu_apply_resv_region,
.is_attach_deferred = amd_iommu_is_attach_deferred,
.pgsize_bitmap = AMD_IOMMU_PGSIZES,
.flush_iotlb_all = amd_iommu_flush_iotlb_all,
@@ -3610,9 +3000,23 @@ EXPORT_SYMBOL(amd_iommu_complete_ppr);
struct iommu_domain *amd_iommu_get_v2_domain(struct pci_dev *pdev)
{
struct protection_domain *pdomain;
+ struct iommu_domain *io_domain;
+ struct device *dev = &pdev->dev;
- pdomain = get_domain(&pdev->dev);
- if (IS_ERR(pdomain))
+ if (!check_device(dev))
+ return NULL;
+
+ pdomain = get_dev_data(dev)->domain;
+ if (pdomain == NULL && get_dev_data(dev)->defer_attach) {
+ get_dev_data(dev)->defer_attach = false;
+ io_domain = iommu_get_domain_for_dev(dev);
+ pdomain = to_pdomain(io_domain);
+ attach_device(dev, pdomain);
+ }
+ if (pdomain == NULL)
+ return NULL;
+
+ if (!dma_ops_domain(pdomain))
return NULL;
/* Only return IOMMUv2 domains */
@@ -3752,7 +3156,20 @@ static void set_remap_table_entry(struct amd_iommu *iommu, u16 devid,
iommu_flush_dte(iommu, devid);
}
-static struct irq_remap_table *alloc_irq_table(u16 devid)
+static int set_remap_table_entry_alias(struct pci_dev *pdev, u16 alias,
+ void *data)
+{
+ struct irq_remap_table *table = data;
+
+ irq_lookup_table[alias] = table;
+ set_dte_irq_entry(alias, table);
+
+ iommu_flush_dte(amd_iommu_rlookup_table[alias], alias);
+
+ return 0;
+}
+
+static struct irq_remap_table *alloc_irq_table(u16 devid, struct pci_dev *pdev)
{
struct irq_remap_table *table = NULL;
struct irq_remap_table *new_table = NULL;
@@ -3798,7 +3215,12 @@ static struct irq_remap_table *alloc_irq_table(u16 devid)
table = new_table;
new_table = NULL;
- set_remap_table_entry(iommu, devid, table);
+ if (pdev)
+ pci_for_each_dma_alias(pdev, set_remap_table_entry_alias,
+ table);
+ else
+ set_remap_table_entry(iommu, devid, table);
+
if (devid != alias)
set_remap_table_entry(iommu, alias, table);
@@ -3815,7 +3237,8 @@ out_unlock:
return table;
}
-static int alloc_irq_index(u16 devid, int count, bool align)
+static int alloc_irq_index(u16 devid, int count, bool align,
+ struct pci_dev *pdev)
{
struct irq_remap_table *table;
int index, c, alignment = 1;
@@ -3825,7 +3248,7 @@ static int alloc_irq_index(u16 devid, int count, bool align)
if (!iommu)
return -ENODEV;
- table = alloc_irq_table(devid);
+ table = alloc_irq_table(devid, pdev);
if (!table)
return -ENODEV;
@@ -4258,7 +3681,7 @@ static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq,
struct irq_remap_table *table;
struct amd_iommu *iommu;
- table = alloc_irq_table(devid);
+ table = alloc_irq_table(devid, NULL);
if (table) {
if (!table->min_index) {
/*
@@ -4275,11 +3698,15 @@ static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq,
} else {
index = -ENOMEM;
}
- } else {
+ } else if (info->type == X86_IRQ_ALLOC_TYPE_MSI ||
+ info->type == X86_IRQ_ALLOC_TYPE_MSIX) {
bool align = (info->type == X86_IRQ_ALLOC_TYPE_MSI);
- index = alloc_irq_index(devid, nr_irqs, align);
+ index = alloc_irq_index(devid, nr_irqs, align, info->msi_dev);
+ } else {
+ index = alloc_irq_index(devid, nr_irqs, false, NULL);
}
+
if (index < 0) {
pr_warn("Failed to allocate IRTE\n");
ret = index;
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
index 17bd5a349119..f52f59d5c6bd 100644
--- a/drivers/iommu/amd_iommu_types.h
+++ b/drivers/iommu/amd_iommu_types.h
@@ -468,7 +468,6 @@ struct protection_domain {
struct iommu_domain domain; /* generic domain handle used by
iommu core code */
spinlock_t lock; /* mostly used to lock the page table*/
- struct mutex api_lock; /* protect page tables in the iommu-api path */
u16 id; /* the domain id written to the device table */
int mode; /* paging mode (0-6 levels) */
u64 *pt_root; /* page table root pointer */
@@ -639,8 +638,8 @@ struct iommu_dev_data {
struct list_head list; /* For domain->dev_list */
struct llist_node dev_data_list; /* For global dev_data_list */
struct protection_domain *domain; /* Domain the device is bound to */
+ struct pci_dev *pdev;
u16 devid; /* PCI Device ID */
- u16 alias; /* Alias Device ID */
bool iommu_v2; /* Device can make use of IOMMUv2 */
bool passthrough; /* Device is identity mapped */
struct {
diff --git a/drivers/iommu/arm-smmu-impl.c b/drivers/iommu/arm-smmu-impl.c
index 5c87a38620c4..b2fe72a8f019 100644
--- a/drivers/iommu/arm-smmu-impl.c
+++ b/drivers/iommu/arm-smmu-impl.c
@@ -109,7 +109,7 @@ static struct arm_smmu_device *cavium_smmu_impl_init(struct arm_smmu_device *smm
#define ARM_MMU500_ACR_S2CRB_TLBEN (1 << 10)
#define ARM_MMU500_ACR_SMTNMB_TLBEN (1 << 8)
-static int arm_mmu500_reset(struct arm_smmu_device *smmu)
+int arm_mmu500_reset(struct arm_smmu_device *smmu)
{
u32 reg, major;
int i;
@@ -170,5 +170,8 @@ struct arm_smmu_device *arm_smmu_impl_init(struct arm_smmu_device *smmu)
"calxeda,smmu-secure-config-access"))
smmu->impl = &calxeda_impl;
+ if (of_device_is_compatible(smmu->dev->of_node, "qcom,sdm845-smmu-500"))
+ return qcom_smmu_impl_init(smmu);
+
return smmu;
}
diff --git a/drivers/iommu/arm-smmu-qcom.c b/drivers/iommu/arm-smmu-qcom.c
new file mode 100644
index 000000000000..24c071c1d8b0
--- /dev/null
+++ b/drivers/iommu/arm-smmu-qcom.c
@@ -0,0 +1,51 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/qcom_scm.h>
+
+#include "arm-smmu.h"
+
+struct qcom_smmu {
+ struct arm_smmu_device smmu;
+};
+
+static int qcom_sdm845_smmu500_reset(struct arm_smmu_device *smmu)
+{
+ int ret;
+
+ arm_mmu500_reset(smmu);
+
+ /*
+ * To address performance degradation in non-real time clients,
+ * such as USB and UFS, turn off wait-for-safe on sdm845 based boards,
+ * such as MTP and db845, whose firmwares implement secure monitor
+ * call handlers to turn on/off the wait-for-safe logic.
+ */
+ ret = qcom_scm_qsmmu500_wait_safe_toggle(0);
+ if (ret)
+ dev_warn(smmu->dev, "Failed to turn off SAFE logic\n");
+
+ return ret;
+}
+
+static const struct arm_smmu_impl qcom_smmu_impl = {
+ .reset = qcom_sdm845_smmu500_reset,
+};
+
+struct arm_smmu_device *qcom_smmu_impl_init(struct arm_smmu_device *smmu)
+{
+ struct qcom_smmu *qsmmu;
+
+ qsmmu = devm_kzalloc(smmu->dev, sizeof(*qsmmu), GFP_KERNEL);
+ if (!qsmmu)
+ return ERR_PTR(-ENOMEM);
+
+ qsmmu->smmu = *smmu;
+
+ qsmmu->smmu.impl = &qcom_smmu_impl;
+ devm_kfree(smmu->dev, smmu);
+
+ return &qsmmu->smmu;
+}
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index 8da93e730d6f..effe72eb89e7 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -2172,7 +2172,7 @@ static int arm_smmu_domain_finalise_s1(struct arm_smmu_domain *smmu_domain,
cfg->cd.asid = (u16)asid;
cfg->cd.ttbr = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
cfg->cd.tcr = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
- cfg->cd.mair = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
+ cfg->cd.mair = pgtbl_cfg->arm_lpae_s1_cfg.mair;
return 0;
out_free_asid:
@@ -2448,7 +2448,7 @@ out_unlock:
}
static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot)
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{
struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
@@ -3611,19 +3611,19 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
/* Interrupt lines */
- irq = platform_get_irq_byname(pdev, "combined");
+ irq = platform_get_irq_byname_optional(pdev, "combined");
if (irq > 0)
smmu->combined_irq = irq;
else {
- irq = platform_get_irq_byname(pdev, "eventq");
+ irq = platform_get_irq_byname_optional(pdev, "eventq");
if (irq > 0)
smmu->evtq.q.irq = irq;
- irq = platform_get_irq_byname(pdev, "priq");
+ irq = platform_get_irq_byname_optional(pdev, "priq");
if (irq > 0)
smmu->priq.q.irq = irq;
- irq = platform_get_irq_byname(pdev, "gerror");
+ irq = platform_get_irq_byname_optional(pdev, "gerror");
if (irq > 0)
smmu->gerr_irq = irq;
}
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 7c503a6bc585..4f1a350d9529 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -36,6 +36,7 @@
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/ratelimit.h>
#include <linux/slab.h>
#include <linux/amba/bus.h>
@@ -122,7 +123,7 @@ static inline int arm_smmu_rpm_get(struct arm_smmu_device *smmu)
static inline void arm_smmu_rpm_put(struct arm_smmu_device *smmu)
{
if (pm_runtime_enabled(smmu->dev))
- pm_runtime_put(smmu->dev);
+ pm_runtime_put_autosuspend(smmu->dev);
}
static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
@@ -244,6 +245,9 @@ static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu, int page,
unsigned int spin_cnt, delay;
u32 reg;
+ if (smmu->impl && unlikely(smmu->impl->tlb_sync))
+ return smmu->impl->tlb_sync(smmu, page, sync, status);
+
arm_smmu_writel(smmu, page, sync, QCOM_DUMMY_VAL);
for (delay = 1; delay < TLB_LOOP_TIMEOUT; delay *= 2) {
for (spin_cnt = TLB_SPIN_COUNT; spin_cnt > 0; spin_cnt--) {
@@ -268,9 +272,8 @@ static void arm_smmu_tlb_sync_global(struct arm_smmu_device *smmu)
spin_unlock_irqrestore(&smmu->global_sync_lock, flags);
}
-static void arm_smmu_tlb_sync_context(void *cookie)
+static void arm_smmu_tlb_sync_context(struct arm_smmu_domain *smmu_domain)
{
- struct arm_smmu_domain *smmu_domain = cookie;
struct arm_smmu_device *smmu = smmu_domain->smmu;
unsigned long flags;
@@ -280,13 +283,6 @@ static void arm_smmu_tlb_sync_context(void *cookie)
spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
}
-static void arm_smmu_tlb_sync_vmid(void *cookie)
-{
- struct arm_smmu_domain *smmu_domain = cookie;
-
- arm_smmu_tlb_sync_global(smmu_domain->smmu);
-}
-
static void arm_smmu_tlb_inv_context_s1(void *cookie)
{
struct arm_smmu_domain *smmu_domain = cookie;
@@ -297,7 +293,7 @@ static void arm_smmu_tlb_inv_context_s1(void *cookie)
wmb();
arm_smmu_cb_write(smmu_domain->smmu, smmu_domain->cfg.cbndx,
ARM_SMMU_CB_S1_TLBIASID, smmu_domain->cfg.asid);
- arm_smmu_tlb_sync_context(cookie);
+ arm_smmu_tlb_sync_context(smmu_domain);
}
static void arm_smmu_tlb_inv_context_s2(void *cookie)
@@ -312,18 +308,16 @@ static void arm_smmu_tlb_inv_context_s2(void *cookie)
}
static void arm_smmu_tlb_inv_range_s1(unsigned long iova, size_t size,
- size_t granule, bool leaf, void *cookie)
+ size_t granule, void *cookie, int reg)
{
struct arm_smmu_domain *smmu_domain = cookie;
struct arm_smmu_device *smmu = smmu_domain->smmu;
struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
- int reg, idx = cfg->cbndx;
+ int idx = cfg->cbndx;
if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
wmb();
- reg = leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
-
if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
iova = (iova >> 12) << 12;
iova |= cfg->asid;
@@ -342,16 +336,15 @@ static void arm_smmu_tlb_inv_range_s1(unsigned long iova, size_t size,
}
static void arm_smmu_tlb_inv_range_s2(unsigned long iova, size_t size,
- size_t granule, bool leaf, void *cookie)
+ size_t granule, void *cookie, int reg)
{
struct arm_smmu_domain *smmu_domain = cookie;
struct arm_smmu_device *smmu = smmu_domain->smmu;
- int reg, idx = smmu_domain->cfg.cbndx;
+ int idx = smmu_domain->cfg.cbndx;
if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
wmb();
- reg = leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L : ARM_SMMU_CB_S2_TLBIIPAS2;
iova >>= 12;
do {
if (smmu_domain->cfg.fmt == ARM_SMMU_CTX_FMT_AARCH64)
@@ -362,85 +355,98 @@ static void arm_smmu_tlb_inv_range_s2(unsigned long iova, size_t size,
} while (size -= granule);
}
-/*
- * On MMU-401 at least, the cost of firing off multiple TLBIVMIDs appears
- * almost negligible, but the benefit of getting the first one in as far ahead
- * of the sync as possible is significant, hence we don't just make this a
- * no-op and set .tlb_sync to arm_smmu_tlb_inv_context_s2() as you might think.
- */
-static void arm_smmu_tlb_inv_vmid_nosync(unsigned long iova, size_t size,
- size_t granule, bool leaf, void *cookie)
+static void arm_smmu_tlb_inv_walk_s1(unsigned long iova, size_t size,
+ size_t granule, void *cookie)
{
- struct arm_smmu_domain *smmu_domain = cookie;
- struct arm_smmu_device *smmu = smmu_domain->smmu;
-
- if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
- wmb();
+ arm_smmu_tlb_inv_range_s1(iova, size, granule, cookie,
+ ARM_SMMU_CB_S1_TLBIVA);
+ arm_smmu_tlb_sync_context(cookie);
+}
- arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIVMID, smmu_domain->cfg.vmid);
+static void arm_smmu_tlb_inv_leaf_s1(unsigned long iova, size_t size,
+ size_t granule, void *cookie)
+{
+ arm_smmu_tlb_inv_range_s1(iova, size, granule, cookie,
+ ARM_SMMU_CB_S1_TLBIVAL);
+ arm_smmu_tlb_sync_context(cookie);
}
-static void arm_smmu_tlb_inv_walk(unsigned long iova, size_t size,
- size_t granule, void *cookie)
+static void arm_smmu_tlb_add_page_s1(struct iommu_iotlb_gather *gather,
+ unsigned long iova, size_t granule,
+ void *cookie)
{
- struct arm_smmu_domain *smmu_domain = cookie;
- const struct arm_smmu_flush_ops *ops = smmu_domain->flush_ops;
+ arm_smmu_tlb_inv_range_s1(iova, granule, granule, cookie,
+ ARM_SMMU_CB_S1_TLBIVAL);
+}
- ops->tlb_inv_range(iova, size, granule, false, cookie);
- ops->tlb_sync(cookie);
+static void arm_smmu_tlb_inv_walk_s2(unsigned long iova, size_t size,
+ size_t granule, void *cookie)
+{
+ arm_smmu_tlb_inv_range_s2(iova, size, granule, cookie,
+ ARM_SMMU_CB_S2_TLBIIPAS2);
+ arm_smmu_tlb_sync_context(cookie);
}
-static void arm_smmu_tlb_inv_leaf(unsigned long iova, size_t size,
- size_t granule, void *cookie)
+static void arm_smmu_tlb_inv_leaf_s2(unsigned long iova, size_t size,
+ size_t granule, void *cookie)
{
- struct arm_smmu_domain *smmu_domain = cookie;
- const struct arm_smmu_flush_ops *ops = smmu_domain->flush_ops;
+ arm_smmu_tlb_inv_range_s2(iova, size, granule, cookie,
+ ARM_SMMU_CB_S2_TLBIIPAS2L);
+ arm_smmu_tlb_sync_context(cookie);
+}
- ops->tlb_inv_range(iova, size, granule, true, cookie);
- ops->tlb_sync(cookie);
+static void arm_smmu_tlb_add_page_s2(struct iommu_iotlb_gather *gather,
+ unsigned long iova, size_t granule,
+ void *cookie)
+{
+ arm_smmu_tlb_inv_range_s2(iova, granule, granule, cookie,
+ ARM_SMMU_CB_S2_TLBIIPAS2L);
}
-static void arm_smmu_tlb_add_page(struct iommu_iotlb_gather *gather,
- unsigned long iova, size_t granule,
- void *cookie)
+static void arm_smmu_tlb_inv_any_s2_v1(unsigned long iova, size_t size,
+ size_t granule, void *cookie)
+{
+ arm_smmu_tlb_inv_context_s2(cookie);
+}
+/*
+ * On MMU-401 at least, the cost of firing off multiple TLBIVMIDs appears
+ * almost negligible, but the benefit of getting the first one in as far ahead
+ * of the sync as possible is significant, hence we don't just make this a
+ * no-op and call arm_smmu_tlb_inv_context_s2() from .iotlb_sync as you might
+ * think.
+ */
+static void arm_smmu_tlb_add_page_s2_v1(struct iommu_iotlb_gather *gather,
+ unsigned long iova, size_t granule,
+ void *cookie)
{
struct arm_smmu_domain *smmu_domain = cookie;
- const struct arm_smmu_flush_ops *ops = smmu_domain->flush_ops;
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
+
+ if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
+ wmb();
- ops->tlb_inv_range(iova, granule, granule, true, cookie);
+ arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIVMID, smmu_domain->cfg.vmid);
}
-static const struct arm_smmu_flush_ops arm_smmu_s1_tlb_ops = {
- .tlb = {
- .tlb_flush_all = arm_smmu_tlb_inv_context_s1,
- .tlb_flush_walk = arm_smmu_tlb_inv_walk,
- .tlb_flush_leaf = arm_smmu_tlb_inv_leaf,
- .tlb_add_page = arm_smmu_tlb_add_page,
- },
- .tlb_inv_range = arm_smmu_tlb_inv_range_s1,
- .tlb_sync = arm_smmu_tlb_sync_context,
+static const struct iommu_flush_ops arm_smmu_s1_tlb_ops = {
+ .tlb_flush_all = arm_smmu_tlb_inv_context_s1,
+ .tlb_flush_walk = arm_smmu_tlb_inv_walk_s1,
+ .tlb_flush_leaf = arm_smmu_tlb_inv_leaf_s1,
+ .tlb_add_page = arm_smmu_tlb_add_page_s1,
};
-static const struct arm_smmu_flush_ops arm_smmu_s2_tlb_ops_v2 = {
- .tlb = {
- .tlb_flush_all = arm_smmu_tlb_inv_context_s2,
- .tlb_flush_walk = arm_smmu_tlb_inv_walk,
- .tlb_flush_leaf = arm_smmu_tlb_inv_leaf,
- .tlb_add_page = arm_smmu_tlb_add_page,
- },
- .tlb_inv_range = arm_smmu_tlb_inv_range_s2,
- .tlb_sync = arm_smmu_tlb_sync_context,
+static const struct iommu_flush_ops arm_smmu_s2_tlb_ops_v2 = {
+ .tlb_flush_all = arm_smmu_tlb_inv_context_s2,
+ .tlb_flush_walk = arm_smmu_tlb_inv_walk_s2,
+ .tlb_flush_leaf = arm_smmu_tlb_inv_leaf_s2,
+ .tlb_add_page = arm_smmu_tlb_add_page_s2,
};
-static const struct arm_smmu_flush_ops arm_smmu_s2_tlb_ops_v1 = {
- .tlb = {
- .tlb_flush_all = arm_smmu_tlb_inv_context_s2,
- .tlb_flush_walk = arm_smmu_tlb_inv_walk,
- .tlb_flush_leaf = arm_smmu_tlb_inv_leaf,
- .tlb_add_page = arm_smmu_tlb_add_page,
- },
- .tlb_inv_range = arm_smmu_tlb_inv_vmid_nosync,
- .tlb_sync = arm_smmu_tlb_sync_vmid,
+static const struct iommu_flush_ops arm_smmu_s2_tlb_ops_v1 = {
+ .tlb_flush_all = arm_smmu_tlb_inv_context_s2,
+ .tlb_flush_walk = arm_smmu_tlb_inv_any_s2_v1,
+ .tlb_flush_leaf = arm_smmu_tlb_inv_any_s2_v1,
+ .tlb_add_page = arm_smmu_tlb_add_page_s2_v1,
};
static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
@@ -472,6 +478,8 @@ static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
{
u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
struct arm_smmu_device *smmu = dev;
+ static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
+ DEFAULT_RATELIMIT_BURST);
gfsr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSR);
gfsynr0 = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSYNR0);
@@ -481,11 +489,19 @@ static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
if (!gfsr)
return IRQ_NONE;
- dev_err_ratelimited(smmu->dev,
- "Unexpected global fault, this could be serious\n");
- dev_err_ratelimited(smmu->dev,
- "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
- gfsr, gfsynr0, gfsynr1, gfsynr2);
+ if (__ratelimit(&rs)) {
+ if (IS_ENABLED(CONFIG_ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT) &&
+ (gfsr & sGFSR_USF))
+ dev_err(smmu->dev,
+ "Blocked unknown Stream ID 0x%hx; boot with \"arm-smmu.disable_bypass=0\" to allow, but this may have security implications\n",
+ (u16)gfsynr1);
+ else
+ dev_err(smmu->dev,
+ "Unexpected global fault, this could be serious\n");
+ dev_err(smmu->dev,
+ "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
+ gfsr, gfsynr0, gfsynr1, gfsynr2);
+ }
arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sGFSR, gfsr);
return IRQ_HANDLED;
@@ -536,8 +552,8 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
cb->mair[0] = pgtbl_cfg->arm_v7s_cfg.prrr;
cb->mair[1] = pgtbl_cfg->arm_v7s_cfg.nmrr;
} else {
- cb->mair[0] = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
- cb->mair[1] = pgtbl_cfg->arm_lpae_s1_cfg.mair[1];
+ cb->mair[0] = pgtbl_cfg->arm_lpae_s1_cfg.mair;
+ cb->mair[1] = pgtbl_cfg->arm_lpae_s1_cfg.mair >> 32;
}
}
}
@@ -770,7 +786,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
.ias = ias,
.oas = oas,
.coherent_walk = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK,
- .tlb = &smmu_domain->flush_ops->tlb,
+ .tlb = smmu_domain->flush_ops,
.iommu_dev = smmu->dev,
};
@@ -1039,8 +1055,6 @@ static int arm_smmu_master_alloc_smes(struct device *dev)
}
group = iommu_group_get_for_dev(dev);
- if (!group)
- group = ERR_PTR(-ENOMEM);
if (IS_ERR(group)) {
ret = PTR_ERR(group);
goto out_err;
@@ -1154,13 +1168,27 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
/* Looks ok, so add the device to the domain */
ret = arm_smmu_domain_add_master(smmu_domain, fwspec);
+ /*
+ * Setup an autosuspend delay to avoid bouncing runpm state.
+ * Otherwise, if a driver for a suspended consumer device
+ * unmaps buffers, it will runpm resume/suspend for each one.
+ *
+ * For example, when used by a GPU device, when an application
+ * or game exits, it can trigger unmapping 100s or 1000s of
+ * buffers. With a runpm cycle for each buffer, that adds up
+ * to 5-10sec worth of reprogramming the context bank, while
+ * the system appears to be locked up to the user.
+ */
+ pm_runtime_set_autosuspend_delay(smmu->dev, 20);
+ pm_runtime_use_autosuspend(smmu->dev);
+
rpm_put:
arm_smmu_rpm_put(smmu);
return ret;
}
static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot)
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{
struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu;
@@ -1200,7 +1228,7 @@ static void arm_smmu_flush_iotlb_all(struct iommu_domain *domain)
if (smmu_domain->flush_ops) {
arm_smmu_rpm_get(smmu);
- smmu_domain->flush_ops->tlb.tlb_flush_all(smmu_domain);
+ smmu_domain->flush_ops->tlb_flush_all(smmu_domain);
arm_smmu_rpm_put(smmu);
}
}
@@ -1211,11 +1239,16 @@ static void arm_smmu_iotlb_sync(struct iommu_domain *domain,
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct arm_smmu_device *smmu = smmu_domain->smmu;
- if (smmu_domain->flush_ops) {
- arm_smmu_rpm_get(smmu);
- smmu_domain->flush_ops->tlb_sync(smmu_domain);
- arm_smmu_rpm_put(smmu);
- }
+ if (!smmu)
+ return;
+
+ arm_smmu_rpm_get(smmu);
+ if (smmu->version == ARM_SMMU_V2 ||
+ smmu_domain->stage == ARM_SMMU_DOMAIN_S1)
+ arm_smmu_tlb_sync_context(smmu_domain);
+ else
+ arm_smmu_tlb_sync_global(smmu);
+ arm_smmu_rpm_put(smmu);
}
static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
@@ -2062,10 +2095,8 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
for (i = 0; i < num_irqs; ++i) {
int irq = platform_get_irq(pdev, i);
- if (irq < 0) {
- dev_err(dev, "failed to get irq index %d\n", i);
+ if (irq < 0)
return -ENODEV;
- }
smmu->irqs[i] = irq;
}
diff --git a/drivers/iommu/arm-smmu.h b/drivers/iommu/arm-smmu.h
index b19b6cae9b5e..62b9f0cec49b 100644
--- a/drivers/iommu/arm-smmu.h
+++ b/drivers/iommu/arm-smmu.h
@@ -79,6 +79,8 @@
#define ID7_MINOR GENMASK(3, 0)
#define ARM_SMMU_GR0_sGFSR 0x48
+#define sGFSR_USF BIT(1)
+
#define ARM_SMMU_GR0_sGFSYNR0 0x50
#define ARM_SMMU_GR0_sGFSYNR1 0x54
#define ARM_SMMU_GR0_sGFSYNR2 0x58
@@ -304,17 +306,10 @@ enum arm_smmu_domain_stage {
ARM_SMMU_DOMAIN_BYPASS,
};
-struct arm_smmu_flush_ops {
- struct iommu_flush_ops tlb;
- void (*tlb_inv_range)(unsigned long iova, size_t size, size_t granule,
- bool leaf, void *cookie);
- void (*tlb_sync)(void *cookie);
-};
-
struct arm_smmu_domain {
struct arm_smmu_device *smmu;
struct io_pgtable_ops *pgtbl_ops;
- const struct arm_smmu_flush_ops *flush_ops;
+ const struct iommu_flush_ops *flush_ops;
struct arm_smmu_cfg cfg;
enum arm_smmu_domain_stage stage;
bool non_strict;
@@ -335,6 +330,8 @@ struct arm_smmu_impl {
int (*cfg_probe)(struct arm_smmu_device *smmu);
int (*reset)(struct arm_smmu_device *smmu);
int (*init_context)(struct arm_smmu_domain *smmu_domain);
+ void (*tlb_sync)(struct arm_smmu_device *smmu, int page, int sync,
+ int status);
};
static inline void __iomem *arm_smmu_page(struct arm_smmu_device *smmu, int n)
@@ -398,5 +395,8 @@ static inline void arm_smmu_writeq(struct arm_smmu_device *smmu, int page,
arm_smmu_writeq((s), ARM_SMMU_CB((s), (n)), (o), (v))
struct arm_smmu_device *arm_smmu_impl_init(struct arm_smmu_device *smmu);
+struct arm_smmu_device *qcom_smmu_impl_init(struct arm_smmu_device *smmu);
+
+int arm_mmu500_reset(struct arm_smmu_device *smmu);
#endif /* _ARM_SMMU_H */
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index f321279baf9e..0cc702a70a96 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -22,6 +22,7 @@
#include <linux/pci.h>
#include <linux/scatterlist.h>
#include <linux/vmalloc.h>
+#include <linux/crash_dump.h>
struct iommu_dma_msi_page {
struct list_head list;
@@ -353,6 +354,21 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
return iova_reserve_iommu_regions(dev, domain);
}
+static int iommu_dma_deferred_attach(struct device *dev,
+ struct iommu_domain *domain)
+{
+ const struct iommu_ops *ops = domain->ops;
+
+ if (!is_kdump_kernel())
+ return 0;
+
+ if (unlikely(ops->is_attach_deferred &&
+ ops->is_attach_deferred(domain, dev)))
+ return iommu_attach_device(domain, dev);
+
+ return 0;
+}
+
/**
* dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API
* page flags.
@@ -405,8 +421,7 @@ static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
if (iova_len < (1 << (IOVA_RANGE_CACHE_MAX_SIZE - 1)))
iova_len = roundup_pow_of_two(iova_len);
- if (dev->bus_dma_mask)
- dma_limit &= dev->bus_dma_mask;
+ dma_limit = min_not_zero(dma_limit, dev->bus_dma_limit);
if (domain->geometry.force_aperture)
dma_limit = min(dma_limit, domain->geometry.aperture_end);
@@ -462,7 +477,7 @@ static void __iommu_dma_unmap(struct device *dev, dma_addr_t dma_addr,
}
static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
- size_t size, int prot)
+ size_t size, int prot, dma_addr_t dma_mask)
{
struct iommu_domain *domain = iommu_get_dma_domain(dev);
struct iommu_dma_cookie *cookie = domain->iova_cookie;
@@ -470,13 +485,16 @@ static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
size_t iova_off = iova_offset(iovad, phys);
dma_addr_t iova;
+ if (unlikely(iommu_dma_deferred_attach(dev, domain)))
+ return DMA_MAPPING_ERROR;
+
size = iova_align(iovad, size + iova_off);
- iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
+ iova = iommu_dma_alloc_iova(domain, size, dma_mask, dev);
if (!iova)
return DMA_MAPPING_ERROR;
- if (iommu_map(domain, iova, phys - iova_off, size, prot)) {
+ if (iommu_map_atomic(domain, iova, phys - iova_off, size, prot)) {
iommu_dma_free_iova(cookie, iova, size);
return DMA_MAPPING_ERROR;
}
@@ -579,6 +597,9 @@ static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
*dma_handle = DMA_MAPPING_ERROR;
+ if (unlikely(iommu_dma_deferred_attach(dev, domain)))
+ return NULL;
+
min_size = alloc_sizes & -alloc_sizes;
if (min_size < PAGE_SIZE) {
min_size = PAGE_SIZE;
@@ -611,7 +632,7 @@ static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
arch_dma_prep_coherent(sg_page(sg), sg->length);
}
- if (iommu_map_sg(domain, iova, sgt.sgl, sgt.orig_nents, ioprot)
+ if (iommu_map_sg_atomic(domain, iova, sgt.sgl, sgt.orig_nents, ioprot)
< size)
goto out_free_sg;
@@ -659,7 +680,7 @@ static void iommu_dma_sync_single_for_cpu(struct device *dev,
return;
phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
- arch_sync_dma_for_cpu(dev, phys, size, dir);
+ arch_sync_dma_for_cpu(phys, size, dir);
}
static void iommu_dma_sync_single_for_device(struct device *dev,
@@ -671,7 +692,7 @@ static void iommu_dma_sync_single_for_device(struct device *dev,
return;
phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
- arch_sync_dma_for_device(dev, phys, size, dir);
+ arch_sync_dma_for_device(phys, size, dir);
}
static void iommu_dma_sync_sg_for_cpu(struct device *dev,
@@ -685,7 +706,7 @@ static void iommu_dma_sync_sg_for_cpu(struct device *dev,
return;
for_each_sg(sgl, sg, nelems, i)
- arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length, dir);
+ arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir);
}
static void iommu_dma_sync_sg_for_device(struct device *dev,
@@ -699,7 +720,7 @@ static void iommu_dma_sync_sg_for_device(struct device *dev,
return;
for_each_sg(sgl, sg, nelems, i)
- arch_sync_dma_for_device(dev, sg_phys(sg), sg->length, dir);
+ arch_sync_dma_for_device(sg_phys(sg), sg->length, dir);
}
static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
@@ -711,10 +732,10 @@ static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
int prot = dma_info_to_prot(dir, coherent, attrs);
dma_addr_t dma_handle;
- dma_handle =__iommu_dma_map(dev, phys, size, prot);
+ dma_handle = __iommu_dma_map(dev, phys, size, prot, dma_get_mask(dev));
if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
dma_handle != DMA_MAPPING_ERROR)
- arch_sync_dma_for_device(dev, phys, size, dir);
+ arch_sync_dma_for_device(phys, size, dir);
return dma_handle;
}
@@ -821,6 +842,9 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
unsigned long mask = dma_get_seg_boundary(dev);
int i;
+ if (unlikely(iommu_dma_deferred_attach(dev, domain)))
+ return 0;
+
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
iommu_dma_sync_sg_for_device(dev, sg, nents, dir);
@@ -871,7 +895,7 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
* We'll leave any physical concatenation to the IOMMU driver's
* implementation - it knows better than we do.
*/
- if (iommu_map_sg(domain, iova, sg, nents, prot) < iova_len)
+ if (iommu_map_sg_atomic(domain, iova, sg, nents, prot) < iova_len)
goto out_free_iova;
return __finalise_sg(dev, sg, nents, iova);
@@ -911,7 +935,8 @@ static dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{
return __iommu_dma_map(dev, phys, size,
- dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO);
+ dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO,
+ dma_get_mask(dev));
}
static void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
@@ -1017,7 +1042,8 @@ static void *iommu_dma_alloc(struct device *dev, size_t size,
if (!cpu_addr)
return NULL;
- *handle = __iommu_dma_map(dev, page_to_phys(page), size, ioprot);
+ *handle = __iommu_dma_map(dev, page_to_phys(page), size, ioprot,
+ dev->coherent_dma_mask);
if (*handle == DMA_MAPPING_ERROR) {
__iommu_dma_free(dev, size, cpu_addr);
return NULL;
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index eecd6a421667..3acfa6a25fa2 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -895,8 +895,11 @@ int __init detect_intel_iommu(void)
}
#ifdef CONFIG_X86
- if (!ret)
+ if (!ret) {
x86_init.iommu.iommu_init = intel_iommu_init;
+ x86_platform.iommu_shutdown = intel_iommu_shutdown;
+ }
+
#endif
if (dmar_tbl) {
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index 9c94e16fb127..186ff5cc975c 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -1073,7 +1073,7 @@ static int lv2set_page(sysmmu_pte_t *pent, phys_addr_t paddr, size_t size,
*/
static int exynos_iommu_map(struct iommu_domain *iommu_domain,
unsigned long l_iova, phys_addr_t paddr, size_t size,
- int prot)
+ int prot, gfp_t gfp)
{
struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
sysmmu_pte_t *entry;
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 6db6d969e31c..0c8d81f56a30 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -2420,14 +2420,24 @@ static void domain_remove_dev_info(struct dmar_domain *domain)
spin_unlock_irqrestore(&device_domain_lock, flags);
}
-/*
- * find_domain
- * Note: we use struct device->archdata.iommu stores the info
- */
static struct dmar_domain *find_domain(struct device *dev)
{
struct device_domain_info *info;
+ if (unlikely(dev->archdata.iommu == DEFER_DEVICE_DOMAIN_INFO ||
+ dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO))
+ return NULL;
+
+ /* No lock here, assumes no domain exit in normal case */
+ info = dev->archdata.iommu;
+ if (likely(info))
+ return info->domain;
+
+ return NULL;
+}
+
+static struct dmar_domain *deferred_attach_domain(struct device *dev)
+{
if (unlikely(dev->archdata.iommu == DEFER_DEVICE_DOMAIN_INFO)) {
struct iommu_domain *domain;
@@ -2437,12 +2447,7 @@ static struct dmar_domain *find_domain(struct device *dev)
intel_iommu_attach_device(domain, dev);
}
- /* No lock here, assumes no domain exit in normal case */
- info = dev->archdata.iommu;
-
- if (likely(info))
- return info->domain;
- return NULL;
+ return find_domain(dev);
}
static inline struct device_domain_info *
@@ -3512,7 +3517,7 @@ static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
BUG_ON(dir == DMA_NONE);
- domain = find_domain(dev);
+ domain = deferred_attach_domain(dev);
if (!domain)
return DMA_MAPPING_ERROR;
@@ -3732,7 +3737,7 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele
if (!iommu_need_mapping(dev))
return dma_direct_map_sg(dev, sglist, nelems, dir, attrs);
- domain = find_domain(dev);
+ domain = deferred_attach_domain(dev);
if (!domain)
return 0;
@@ -3827,7 +3832,7 @@ bounce_map_single(struct device *dev, phys_addr_t paddr, size_t size,
int prot = 0;
int ret;
- domain = find_domain(dev);
+ domain = deferred_attach_domain(dev);
if (WARN_ON(dir == DMA_NONE || !domain))
return DMA_MAPPING_ERROR;
@@ -4314,13 +4319,19 @@ int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg)
{
struct acpi_dmar_reserved_memory *rmrr;
struct dmar_rmrr_unit *rmrru;
+ int ret;
+
+ rmrr = (struct acpi_dmar_reserved_memory *)header;
+ ret = arch_rmrr_sanity_check(rmrr);
+ if (ret)
+ return ret;
rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
if (!rmrru)
goto out;
rmrru->hdr = header;
- rmrr = (struct acpi_dmar_reserved_memory *)header;
+
rmrru->base_address = rmrr->base_address;
rmrru->end_address = rmrr->end_address;
@@ -4759,6 +4770,26 @@ static void intel_disable_iommus(void)
iommu_disable_translation(iommu);
}
+void intel_iommu_shutdown(void)
+{
+ struct dmar_drhd_unit *drhd;
+ struct intel_iommu *iommu = NULL;
+
+ if (no_iommu || dmar_disabled)
+ return;
+
+ down_write(&dmar_global_lock);
+
+ /* Disable PMRs explicitly here. */
+ for_each_iommu(iommu, drhd)
+ iommu_disable_protect_mem_regions(iommu);
+
+ /* Make sure the IOMMUs are switched off */
+ intel_disable_iommus();
+
+ up_write(&dmar_global_lock);
+}
+
static inline struct intel_iommu *dev_to_intel_iommu(struct device *dev)
{
struct iommu_device *iommu_dev = dev_to_iommu_device(dev);
@@ -5440,7 +5471,7 @@ static void intel_iommu_aux_detach_device(struct iommu_domain *domain,
static int intel_iommu_map(struct iommu_domain *domain,
unsigned long iova, phys_addr_t hpa,
- size_t size, int iommu_prot)
+ size_t size, int iommu_prot, gfp_t gfp)
{
struct dmar_domain *dmar_domain = to_dmar_domain(domain);
u64 max_addr;
diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c
index 4cb394937700..7c3bd2c3cdca 100644
--- a/drivers/iommu/io-pgtable-arm-v7s.c
+++ b/drivers/iommu/io-pgtable-arm-v7s.c
@@ -846,27 +846,28 @@ struct io_pgtable_init_fns io_pgtable_arm_v7s_init_fns = {
#ifdef CONFIG_IOMMU_IO_PGTABLE_ARMV7S_SELFTEST
-static struct io_pgtable_cfg *cfg_cookie;
+static struct io_pgtable_cfg *cfg_cookie __initdata;
-static void dummy_tlb_flush_all(void *cookie)
+static void __init dummy_tlb_flush_all(void *cookie)
{
WARN_ON(cookie != cfg_cookie);
}
-static void dummy_tlb_flush(unsigned long iova, size_t size, size_t granule,
- void *cookie)
+static void __init dummy_tlb_flush(unsigned long iova, size_t size,
+ size_t granule, void *cookie)
{
WARN_ON(cookie != cfg_cookie);
WARN_ON(!(size & cfg_cookie->pgsize_bitmap));
}
-static void dummy_tlb_add_page(struct iommu_iotlb_gather *gather,
- unsigned long iova, size_t granule, void *cookie)
+static void __init dummy_tlb_add_page(struct iommu_iotlb_gather *gather,
+ unsigned long iova, size_t granule,
+ void *cookie)
{
dummy_tlb_flush(iova, granule, granule, cookie);
}
-static const struct iommu_flush_ops dummy_tlb_ops = {
+static const struct iommu_flush_ops dummy_tlb_ops __initconst = {
.tlb_flush_all = dummy_tlb_flush_all,
.tlb_flush_walk = dummy_tlb_flush,
.tlb_flush_leaf = dummy_tlb_flush,
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index ca51036aa53c..bdf47f745268 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -32,39 +32,31 @@
io_pgtable_to_data(io_pgtable_ops_to_pgtable(x))
/*
- * For consistency with the architecture, we always consider
- * ARM_LPAE_MAX_LEVELS levels, with the walk starting at level n >=0
- */
-#define ARM_LPAE_START_LVL(d) (ARM_LPAE_MAX_LEVELS - (d)->levels)
-
-/*
* Calculate the right shift amount to get to the portion describing level l
* in a virtual address mapped by the pagetable in d.
*/
#define ARM_LPAE_LVL_SHIFT(l,d) \
- ((((d)->levels - ((l) - ARM_LPAE_START_LVL(d) + 1)) \
- * (d)->bits_per_level) + (d)->pg_shift)
+ (((ARM_LPAE_MAX_LEVELS - (l)) * (d)->bits_per_level) + \
+ ilog2(sizeof(arm_lpae_iopte)))
-#define ARM_LPAE_GRANULE(d) (1UL << (d)->pg_shift)
-
-#define ARM_LPAE_PAGES_PER_PGD(d) \
- DIV_ROUND_UP((d)->pgd_size, ARM_LPAE_GRANULE(d))
+#define ARM_LPAE_GRANULE(d) \
+ (sizeof(arm_lpae_iopte) << (d)->bits_per_level)
+#define ARM_LPAE_PGD_SIZE(d) \
+ (sizeof(arm_lpae_iopte) << (d)->pgd_bits)
/*
* Calculate the index at level l used to map virtual address a using the
* pagetable in d.
*/
#define ARM_LPAE_PGD_IDX(l,d) \
- ((l) == ARM_LPAE_START_LVL(d) ? ilog2(ARM_LPAE_PAGES_PER_PGD(d)) : 0)
+ ((l) == (d)->start_level ? (d)->pgd_bits - (d)->bits_per_level : 0)
#define ARM_LPAE_LVL_IDX(a,l,d) \
(((u64)(a) >> ARM_LPAE_LVL_SHIFT(l,d)) & \
((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1))
/* Calculate the block/page mapping size at level l for pagetable in d. */
-#define ARM_LPAE_BLOCK_SIZE(l,d) \
- (1ULL << (ilog2(sizeof(arm_lpae_iopte)) + \
- ((ARM_LPAE_MAX_LEVELS - (l)) * (d)->bits_per_level)))
+#define ARM_LPAE_BLOCK_SIZE(l,d) (1ULL << ARM_LPAE_LVL_SHIFT(l,d))
/* Page table bits */
#define ARM_LPAE_PTE_TYPE_SHIFT 0
@@ -180,10 +172,9 @@
struct arm_lpae_io_pgtable {
struct io_pgtable iop;
- int levels;
- size_t pgd_size;
- unsigned long pg_shift;
- unsigned long bits_per_level;
+ int pgd_bits;
+ int start_level;
+ int bits_per_level;
void *pgd;
};
@@ -213,7 +204,7 @@ static phys_addr_t iopte_to_paddr(arm_lpae_iopte pte,
{
u64 paddr = pte & ARM_LPAE_PTE_ADDR_MASK;
- if (data->pg_shift < 16)
+ if (ARM_LPAE_GRANULE(data) < SZ_64K)
return paddr;
/* Rotate the packed high-order bits back to the top */
@@ -392,7 +383,7 @@ static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
/* If we can install a leaf entry at this level, then do so */
- if (size == block_size && (size & cfg->pgsize_bitmap))
+ if (size == block_size)
return arm_lpae_init_pte(data, iova, paddr, prot, lvl, ptep);
/* We can't allocate tables at the final level */
@@ -464,7 +455,7 @@ static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
else if (prot & IOMMU_CACHE)
pte |= (ARM_LPAE_MAIR_ATTR_IDX_CACHE
<< ARM_LPAE_PTE_ATTRINDX_SHIFT);
- else if (prot & IOMMU_QCOM_SYS_CACHE)
+ else if (prot & IOMMU_SYS_CACHE_ONLY)
pte |= (ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE
<< ARM_LPAE_PTE_ATTRINDX_SHIFT);
}
@@ -479,16 +470,19 @@ static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
phys_addr_t paddr, size_t size, int iommu_prot)
{
struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
+ struct io_pgtable_cfg *cfg = &data->iop.cfg;
arm_lpae_iopte *ptep = data->pgd;
- int ret, lvl = ARM_LPAE_START_LVL(data);
+ int ret, lvl = data->start_level;
arm_lpae_iopte prot;
/* If no access, then nothing to do */
if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
return 0;
- if (WARN_ON(iova >= (1ULL << data->iop.cfg.ias) ||
- paddr >= (1ULL << data->iop.cfg.oas)))
+ if (WARN_ON(!size || (size & cfg->pgsize_bitmap) != size))
+ return -EINVAL;
+
+ if (WARN_ON(iova >> data->iop.cfg.ias || paddr >> data->iop.cfg.oas))
return -ERANGE;
prot = arm_lpae_prot_to_pte(data, iommu_prot);
@@ -508,8 +502,8 @@ static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl,
arm_lpae_iopte *start, *end;
unsigned long table_size;
- if (lvl == ARM_LPAE_START_LVL(data))
- table_size = data->pgd_size;
+ if (lvl == data->start_level)
+ table_size = ARM_LPAE_PGD_SIZE(data);
else
table_size = ARM_LPAE_GRANULE(data);
@@ -537,7 +531,7 @@ static void arm_lpae_free_pgtable(struct io_pgtable *iop)
{
struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop);
- __arm_lpae_free_pgtable(data, ARM_LPAE_START_LVL(data), data->pgd);
+ __arm_lpae_free_pgtable(data, data->start_level, data->pgd);
kfree(data);
}
@@ -652,13 +646,16 @@ static size_t arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
size_t size, struct iommu_iotlb_gather *gather)
{
struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
+ struct io_pgtable_cfg *cfg = &data->iop.cfg;
arm_lpae_iopte *ptep = data->pgd;
- int lvl = ARM_LPAE_START_LVL(data);
- if (WARN_ON(iova >= (1ULL << data->iop.cfg.ias)))
+ if (WARN_ON(!size || (size & cfg->pgsize_bitmap) != size))
+ return 0;
+
+ if (WARN_ON(iova >> data->iop.cfg.ias))
return 0;
- return __arm_lpae_unmap(data, gather, iova, size, lvl, ptep);
+ return __arm_lpae_unmap(data, gather, iova, size, data->start_level, ptep);
}
static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
@@ -666,7 +663,7 @@ static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
{
struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
arm_lpae_iopte pte, *ptep = data->pgd;
- int lvl = ARM_LPAE_START_LVL(data);
+ int lvl = data->start_level;
do {
/* Valid IOPTE pointer? */
@@ -743,8 +740,8 @@ static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg)
static struct arm_lpae_io_pgtable *
arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
{
- unsigned long va_bits, pgd_bits;
struct arm_lpae_io_pgtable *data;
+ int levels, va_bits, pg_shift;
arm_lpae_restrict_pgsizes(cfg);
@@ -766,15 +763,15 @@ arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
if (!data)
return NULL;
- data->pg_shift = __ffs(cfg->pgsize_bitmap);
- data->bits_per_level = data->pg_shift - ilog2(sizeof(arm_lpae_iopte));
+ pg_shift = __ffs(cfg->pgsize_bitmap);
+ data->bits_per_level = pg_shift - ilog2(sizeof(arm_lpae_iopte));
- va_bits = cfg->ias - data->pg_shift;
- data->levels = DIV_ROUND_UP(va_bits, data->bits_per_level);
+ va_bits = cfg->ias - pg_shift;
+ levels = DIV_ROUND_UP(va_bits, data->bits_per_level);
+ data->start_level = ARM_LPAE_MAX_LEVELS - levels;
/* Calculate the actual size of our pgd (without concatenation) */
- pgd_bits = va_bits - (data->bits_per_level * (data->levels - 1));
- data->pgd_size = 1UL << (pgd_bits + ilog2(sizeof(arm_lpae_iopte)));
+ data->pgd_bits = va_bits - (data->bits_per_level * (levels - 1));
data->iop.ops = (struct io_pgtable_ops) {
.map = arm_lpae_map,
@@ -864,11 +861,11 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
(ARM_LPAE_MAIR_ATTR_INC_OWBRWA
<< ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE));
- cfg->arm_lpae_s1_cfg.mair[0] = reg;
- cfg->arm_lpae_s1_cfg.mair[1] = 0;
+ cfg->arm_lpae_s1_cfg.mair = reg;
/* Looking good; allocate a pgd */
- data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg);
+ data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data),
+ GFP_KERNEL, cfg);
if (!data->pgd)
goto out_free_data;
@@ -903,13 +900,13 @@ arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
* Concatenate PGDs at level 1 if possible in order to reduce
* the depth of the stage-2 walk.
*/
- if (data->levels == ARM_LPAE_MAX_LEVELS) {
+ if (data->start_level == 0) {
unsigned long pgd_pages;
- pgd_pages = data->pgd_size >> ilog2(sizeof(arm_lpae_iopte));
+ pgd_pages = ARM_LPAE_PGD_SIZE(data) / sizeof(arm_lpae_iopte);
if (pgd_pages <= ARM_LPAE_S2_MAX_CONCAT_PAGES) {
- data->pgd_size = pgd_pages << data->pg_shift;
- data->levels--;
+ data->pgd_bits += data->bits_per_level;
+ data->start_level++;
}
}
@@ -919,7 +916,7 @@ arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
(ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) |
(ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
- sl = ARM_LPAE_START_LVL(data);
+ sl = data->start_level;
switch (ARM_LPAE_GRANULE(data)) {
case SZ_4K:
@@ -965,7 +962,8 @@ arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
cfg->arm_lpae_s2_cfg.vtcr = reg;
/* Allocate pgd pages */
- data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg);
+ data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data),
+ GFP_KERNEL, cfg);
if (!data->pgd)
goto out_free_data;
@@ -1034,9 +1032,9 @@ arm_mali_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
return NULL;
/* Mali seems to need a full 4-level table regardless of IAS */
- if (data->levels < ARM_LPAE_MAX_LEVELS) {
- data->levels = ARM_LPAE_MAX_LEVELS;
- data->pgd_size = sizeof(arm_lpae_iopte);
+ if (data->start_level > 0) {
+ data->start_level = 0;
+ data->pgd_bits = 0;
}
/*
* MEMATTR: Mali has no actual notion of a non-cacheable type, so the
@@ -1053,7 +1051,8 @@ arm_mali_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
(ARM_MALI_LPAE_MEMATTR_IMP_DEF
<< ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV));
- data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg);
+ data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data), GFP_KERNEL,
+ cfg);
if (!data->pgd)
goto out_free_data;
@@ -1097,22 +1096,23 @@ struct io_pgtable_init_fns io_pgtable_arm_mali_lpae_init_fns = {
#ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST
-static struct io_pgtable_cfg *cfg_cookie;
+static struct io_pgtable_cfg *cfg_cookie __initdata;
-static void dummy_tlb_flush_all(void *cookie)
+static void __init dummy_tlb_flush_all(void *cookie)
{
WARN_ON(cookie != cfg_cookie);
}
-static void dummy_tlb_flush(unsigned long iova, size_t size, size_t granule,
- void *cookie)
+static void __init dummy_tlb_flush(unsigned long iova, size_t size,
+ size_t granule, void *cookie)
{
WARN_ON(cookie != cfg_cookie);
WARN_ON(!(size & cfg_cookie->pgsize_bitmap));
}
-static void dummy_tlb_add_page(struct iommu_iotlb_gather *gather,
- unsigned long iova, size_t granule, void *cookie)
+static void __init dummy_tlb_add_page(struct iommu_iotlb_gather *gather,
+ unsigned long iova, size_t granule,
+ void *cookie)
{
dummy_tlb_flush(iova, granule, granule, cookie);
}
@@ -1131,9 +1131,9 @@ static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops)
pr_err("cfg: pgsize_bitmap 0x%lx, ias %u-bit\n",
cfg->pgsize_bitmap, cfg->ias);
- pr_err("data: %d levels, 0x%zx pgd_size, %lu pg_shift, %lu bits_per_level, pgd @ %p\n",
- data->levels, data->pgd_size, data->pg_shift,
- data->bits_per_level, data->pgd);
+ pr_err("data: %d levels, 0x%zx pgd_size, %u pg_shift, %u bits_per_level, pgd @ %p\n",
+ ARM_LPAE_MAX_LEVELS - data->start_level, ARM_LPAE_PGD_SIZE(data),
+ ilog2(ARM_LPAE_GRANULE(data)), data->bits_per_level, data->pgd);
}
#define __FAIL(ops, i) ({ \
@@ -1145,7 +1145,7 @@ static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops)
static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
{
- static const enum io_pgtable_fmt fmts[] = {
+ static const enum io_pgtable_fmt fmts[] __initconst = {
ARM_64_LPAE_S1,
ARM_64_LPAE_S2,
};
@@ -1244,13 +1244,13 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
static int __init arm_lpae_do_selftests(void)
{
- static const unsigned long pgsize[] = {
+ static const unsigned long pgsize[] __initconst = {
SZ_4K | SZ_2M | SZ_1G,
SZ_16K | SZ_32M,
SZ_64K | SZ_512M,
};
- static const unsigned int ias[] = {
+ static const unsigned int ias[] __initconst = {
32, 36, 40, 42, 44, 48,
};
diff --git a/drivers/iommu/ioasid.c b/drivers/iommu/ioasid.c
new file mode 100644
index 000000000000..0f8dd377aada
--- /dev/null
+++ b/drivers/iommu/ioasid.c
@@ -0,0 +1,422 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * I/O Address Space ID allocator. There is one global IOASID space, split into
+ * subsets. Users create a subset with DECLARE_IOASID_SET, then allocate and
+ * free IOASIDs with ioasid_alloc and ioasid_free.
+ */
+#include <linux/ioasid.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/xarray.h>
+
+struct ioasid_data {
+ ioasid_t id;
+ struct ioasid_set *set;
+ void *private;
+ struct rcu_head rcu;
+};
+
+/*
+ * struct ioasid_allocator_data - Internal data structure to hold information
+ * about an allocator. There are two types of allocators:
+ *
+ * - Default allocator always has its own XArray to track the IOASIDs allocated.
+ * - Custom allocators may share allocation helpers with different private data.
+ * Custom allocators that share the same helper functions also share the same
+ * XArray.
+ * Rules:
+ * 1. Default allocator is always available, not dynamically registered. This is
+ * to prevent race conditions with early boot code that want to register
+ * custom allocators or allocate IOASIDs.
+ * 2. Custom allocators take precedence over the default allocator.
+ * 3. When all custom allocators sharing the same helper functions are
+ * unregistered (e.g. due to hotplug), all outstanding IOASIDs must be
+ * freed. Otherwise, outstanding IOASIDs will be lost and orphaned.
+ * 4. When switching between custom allocators sharing the same helper
+ * functions, outstanding IOASIDs are preserved.
+ * 5. When switching between custom allocator and default allocator, all IOASIDs
+ * must be freed to ensure unadulterated space for the new allocator.
+ *
+ * @ops: allocator helper functions and its data
+ * @list: registered custom allocators
+ * @slist: allocators share the same ops but different data
+ * @flags: attributes of the allocator
+ * @xa: xarray holds the IOASID space
+ * @rcu: used for kfree_rcu when unregistering allocator
+ */
+struct ioasid_allocator_data {
+ struct ioasid_allocator_ops *ops;
+ struct list_head list;
+ struct list_head slist;
+#define IOASID_ALLOCATOR_CUSTOM BIT(0) /* Needs framework to track results */
+ unsigned long flags;
+ struct xarray xa;
+ struct rcu_head rcu;
+};
+
+static DEFINE_SPINLOCK(ioasid_allocator_lock);
+static LIST_HEAD(allocators_list);
+
+static ioasid_t default_alloc(ioasid_t min, ioasid_t max, void *opaque);
+static void default_free(ioasid_t ioasid, void *opaque);
+
+static struct ioasid_allocator_ops default_ops = {
+ .alloc = default_alloc,
+ .free = default_free,
+};
+
+static struct ioasid_allocator_data default_allocator = {
+ .ops = &default_ops,
+ .flags = 0,
+ .xa = XARRAY_INIT(ioasid_xa, XA_FLAGS_ALLOC),
+};
+
+static struct ioasid_allocator_data *active_allocator = &default_allocator;
+
+static ioasid_t default_alloc(ioasid_t min, ioasid_t max, void *opaque)
+{
+ ioasid_t id;
+
+ if (xa_alloc(&default_allocator.xa, &id, opaque, XA_LIMIT(min, max), GFP_ATOMIC)) {
+ pr_err("Failed to alloc ioasid from %d to %d\n", min, max);
+ return INVALID_IOASID;
+ }
+
+ return id;
+}
+
+static void default_free(ioasid_t ioasid, void *opaque)
+{
+ struct ioasid_data *ioasid_data;
+
+ ioasid_data = xa_erase(&default_allocator.xa, ioasid);
+ kfree_rcu(ioasid_data, rcu);
+}
+
+/* Allocate and initialize a new custom allocator with its helper functions */
+static struct ioasid_allocator_data *ioasid_alloc_allocator(struct ioasid_allocator_ops *ops)
+{
+ struct ioasid_allocator_data *ia_data;
+
+ ia_data = kzalloc(sizeof(*ia_data), GFP_ATOMIC);
+ if (!ia_data)
+ return NULL;
+
+ xa_init_flags(&ia_data->xa, XA_FLAGS_ALLOC);
+ INIT_LIST_HEAD(&ia_data->slist);
+ ia_data->flags |= IOASID_ALLOCATOR_CUSTOM;
+ ia_data->ops = ops;
+
+ /* For tracking custom allocators that share the same ops */
+ list_add_tail(&ops->list, &ia_data->slist);
+
+ return ia_data;
+}
+
+static bool use_same_ops(struct ioasid_allocator_ops *a, struct ioasid_allocator_ops *b)
+{
+ return (a->free == b->free) && (a->alloc == b->alloc);
+}
+
+/**
+ * ioasid_register_allocator - register a custom allocator
+ * @ops: the custom allocator ops to be registered
+ *
+ * Custom allocators take precedence over the default xarray based allocator.
+ * Private data associated with the IOASID allocated by the custom allocators
+ * are managed by IOASID framework similar to data stored in xa by default
+ * allocator.
+ *
+ * There can be multiple allocators registered but only one is active. In case
+ * of runtime removal of a custom allocator, the next one is activated based
+ * on the registration ordering.
+ *
+ * Multiple allocators can share the same alloc() function, in this case the
+ * IOASID space is shared.
+ */
+int ioasid_register_allocator(struct ioasid_allocator_ops *ops)
+{
+ struct ioasid_allocator_data *ia_data;
+ struct ioasid_allocator_data *pallocator;
+ int ret = 0;
+
+ spin_lock(&ioasid_allocator_lock);
+
+ ia_data = ioasid_alloc_allocator(ops);
+ if (!ia_data) {
+ ret = -ENOMEM;
+ goto out_unlock;
+ }
+
+ /*
+ * No particular preference, we activate the first one and keep
+ * the later registered allocators in a list in case the first one gets
+ * removed due to hotplug.
+ */
+ if (list_empty(&allocators_list)) {
+ WARN_ON(active_allocator != &default_allocator);
+ /* Use this new allocator if default is not active */
+ if (xa_empty(&active_allocator->xa)) {
+ rcu_assign_pointer(active_allocator, ia_data);
+ list_add_tail(&ia_data->list, &allocators_list);
+ goto out_unlock;
+ }
+ pr_warn("Default allocator active with outstanding IOASID\n");
+ ret = -EAGAIN;
+ goto out_free;
+ }
+
+ /* Check if the allocator is already registered */
+ list_for_each_entry(pallocator, &allocators_list, list) {
+ if (pallocator->ops == ops) {
+ pr_err("IOASID allocator already registered\n");
+ ret = -EEXIST;
+ goto out_free;
+ } else if (use_same_ops(pallocator->ops, ops)) {
+ /*
+ * If the new allocator shares the same ops,
+ * then they will share the same IOASID space.
+ * We should put them under the same xarray.
+ */
+ list_add_tail(&ops->list, &pallocator->slist);
+ goto out_free;
+ }
+ }
+ list_add_tail(&ia_data->list, &allocators_list);
+
+ spin_unlock(&ioasid_allocator_lock);
+ return 0;
+out_free:
+ kfree(ia_data);
+out_unlock:
+ spin_unlock(&ioasid_allocator_lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ioasid_register_allocator);
+
+/**
+ * ioasid_unregister_allocator - Remove a custom IOASID allocator ops
+ * @ops: the custom allocator to be removed
+ *
+ * Remove an allocator from the list, activate the next allocator in
+ * the order it was registered. Or revert to default allocator if all
+ * custom allocators are unregistered without outstanding IOASIDs.
+ */
+void ioasid_unregister_allocator(struct ioasid_allocator_ops *ops)
+{
+ struct ioasid_allocator_data *pallocator;
+ struct ioasid_allocator_ops *sops;
+
+ spin_lock(&ioasid_allocator_lock);
+ if (list_empty(&allocators_list)) {
+ pr_warn("No custom IOASID allocators active!\n");
+ goto exit_unlock;
+ }
+
+ list_for_each_entry(pallocator, &allocators_list, list) {
+ if (!use_same_ops(pallocator->ops, ops))
+ continue;
+
+ if (list_is_singular(&pallocator->slist)) {
+ /* No shared helper functions */
+ list_del(&pallocator->list);
+ /*
+ * All IOASIDs should have been freed before
+ * the last allocator that shares the same ops
+ * is unregistered.
+ */
+ WARN_ON(!xa_empty(&pallocator->xa));
+ if (list_empty(&allocators_list)) {
+ pr_info("No custom IOASID allocators, switch to default.\n");
+ rcu_assign_pointer(active_allocator, &default_allocator);
+ } else if (pallocator == active_allocator) {
+ rcu_assign_pointer(active_allocator,
+ list_first_entry(&allocators_list,
+ struct ioasid_allocator_data, list));
+ pr_info("IOASID allocator changed");
+ }
+ kfree_rcu(pallocator, rcu);
+ break;
+ }
+ /*
+ * Find the matching shared ops to delete,
+ * but keep outstanding IOASIDs
+ */
+ list_for_each_entry(sops, &pallocator->slist, list) {
+ if (sops == ops) {
+ list_del(&ops->list);
+ break;
+ }
+ }
+ break;
+ }
+
+exit_unlock:
+ spin_unlock(&ioasid_allocator_lock);
+}
+EXPORT_SYMBOL_GPL(ioasid_unregister_allocator);
+
+/**
+ * ioasid_set_data - Set private data for an allocated ioasid
+ * @ioasid: the ID to set data
+ * @data: the private data
+ *
+ * For IOASID that is already allocated, private data can be set
+ * via this API. Future lookup can be done via ioasid_find.
+ */
+int ioasid_set_data(ioasid_t ioasid, void *data)
+{
+ struct ioasid_data *ioasid_data;
+ int ret = 0;
+
+ spin_lock(&ioasid_allocator_lock);
+ ioasid_data = xa_load(&active_allocator->xa, ioasid);
+ if (ioasid_data)
+ rcu_assign_pointer(ioasid_data->private, data);
+ else
+ ret = -ENOENT;
+ spin_unlock(&ioasid_allocator_lock);
+
+ /*
+ * Wait for readers to stop accessing the old private data, so the
+ * caller can free it.
+ */
+ if (!ret)
+ synchronize_rcu();
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ioasid_set_data);
+
+/**
+ * ioasid_alloc - Allocate an IOASID
+ * @set: the IOASID set
+ * @min: the minimum ID (inclusive)
+ * @max: the maximum ID (inclusive)
+ * @private: data private to the caller
+ *
+ * Allocate an ID between @min and @max. The @private pointer is stored
+ * internally and can be retrieved with ioasid_find().
+ *
+ * Return: the allocated ID on success, or %INVALID_IOASID on failure.
+ */
+ioasid_t ioasid_alloc(struct ioasid_set *set, ioasid_t min, ioasid_t max,
+ void *private)
+{
+ struct ioasid_data *data;
+ void *adata;
+ ioasid_t id;
+
+ data = kzalloc(sizeof(*data), GFP_ATOMIC);
+ if (!data)
+ return INVALID_IOASID;
+
+ data->set = set;
+ data->private = private;
+
+ /*
+ * Custom allocator needs allocator data to perform platform specific
+ * operations.
+ */
+ spin_lock(&ioasid_allocator_lock);
+ adata = active_allocator->flags & IOASID_ALLOCATOR_CUSTOM ? active_allocator->ops->pdata : data;
+ id = active_allocator->ops->alloc(min, max, adata);
+ if (id == INVALID_IOASID) {
+ pr_err("Failed ASID allocation %lu\n", active_allocator->flags);
+ goto exit_free;
+ }
+
+ if ((active_allocator->flags & IOASID_ALLOCATOR_CUSTOM) &&
+ xa_alloc(&active_allocator->xa, &id, data, XA_LIMIT(id, id), GFP_ATOMIC)) {
+ /* Custom allocator needs framework to store and track allocation results */
+ pr_err("Failed to alloc ioasid from %d\n", id);
+ active_allocator->ops->free(id, active_allocator->ops->pdata);
+ goto exit_free;
+ }
+ data->id = id;
+
+ spin_unlock(&ioasid_allocator_lock);
+ return id;
+exit_free:
+ spin_unlock(&ioasid_allocator_lock);
+ kfree(data);
+ return INVALID_IOASID;
+}
+EXPORT_SYMBOL_GPL(ioasid_alloc);
+
+/**
+ * ioasid_free - Free an IOASID
+ * @ioasid: the ID to remove
+ */
+void ioasid_free(ioasid_t ioasid)
+{
+ struct ioasid_data *ioasid_data;
+
+ spin_lock(&ioasid_allocator_lock);
+ ioasid_data = xa_load(&active_allocator->xa, ioasid);
+ if (!ioasid_data) {
+ pr_err("Trying to free unknown IOASID %u\n", ioasid);
+ goto exit_unlock;
+ }
+
+ active_allocator->ops->free(ioasid, active_allocator->ops->pdata);
+ /* Custom allocator needs additional steps to free the xa element */
+ if (active_allocator->flags & IOASID_ALLOCATOR_CUSTOM) {
+ ioasid_data = xa_erase(&active_allocator->xa, ioasid);
+ kfree_rcu(ioasid_data, rcu);
+ }
+
+exit_unlock:
+ spin_unlock(&ioasid_allocator_lock);
+}
+EXPORT_SYMBOL_GPL(ioasid_free);
+
+/**
+ * ioasid_find - Find IOASID data
+ * @set: the IOASID set
+ * @ioasid: the IOASID to find
+ * @getter: function to call on the found object
+ *
+ * The optional getter function allows to take a reference to the found object
+ * under the rcu lock. The function can also check if the object is still valid:
+ * if @getter returns false, then the object is invalid and NULL is returned.
+ *
+ * If the IOASID exists, return the private pointer passed to ioasid_alloc.
+ * Private data can be NULL if not set. Return an error if the IOASID is not
+ * found, or if @set is not NULL and the IOASID does not belong to the set.
+ */
+void *ioasid_find(struct ioasid_set *set, ioasid_t ioasid,
+ bool (*getter)(void *))
+{
+ void *priv;
+ struct ioasid_data *ioasid_data;
+ struct ioasid_allocator_data *idata;
+
+ rcu_read_lock();
+ idata = rcu_dereference(active_allocator);
+ ioasid_data = xa_load(&idata->xa, ioasid);
+ if (!ioasid_data) {
+ priv = ERR_PTR(-ENOENT);
+ goto unlock;
+ }
+ if (set && ioasid_data->set != set) {
+ /* data found but does not belong to the set */
+ priv = ERR_PTR(-EACCES);
+ goto unlock;
+ }
+ /* Now IOASID and its set is verified, we can return the private data */
+ priv = rcu_dereference(ioasid_data->private);
+ if (getter && !getter(priv))
+ priv = NULL;
+unlock:
+ rcu_read_unlock();
+
+ return priv;
+}
+EXPORT_SYMBOL_GPL(ioasid_find);
+
+MODULE_AUTHOR("Jean-Philippe Brucker <jean-philippe.brucker@arm.com>");
+MODULE_AUTHOR("Jacob Pan <jacob.jun.pan@linux.intel.com>");
+MODULE_DESCRIPTION("IO Address Space ID (IOASID) allocator");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index d658c7c6a2ab..db7bfd4f2d20 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -1665,6 +1665,36 @@ out_unlock:
}
EXPORT_SYMBOL_GPL(iommu_attach_device);
+int iommu_cache_invalidate(struct iommu_domain *domain, struct device *dev,
+ struct iommu_cache_invalidate_info *inv_info)
+{
+ if (unlikely(!domain->ops->cache_invalidate))
+ return -ENODEV;
+
+ return domain->ops->cache_invalidate(domain, dev, inv_info);
+}
+EXPORT_SYMBOL_GPL(iommu_cache_invalidate);
+
+int iommu_sva_bind_gpasid(struct iommu_domain *domain,
+ struct device *dev, struct iommu_gpasid_bind_data *data)
+{
+ if (unlikely(!domain->ops->sva_bind_gpasid))
+ return -ENODEV;
+
+ return domain->ops->sva_bind_gpasid(domain, dev, data);
+}
+EXPORT_SYMBOL_GPL(iommu_sva_bind_gpasid);
+
+int iommu_sva_unbind_gpasid(struct iommu_domain *domain, struct device *dev,
+ ioasid_t pasid)
+{
+ if (unlikely(!domain->ops->sva_unbind_gpasid))
+ return -ENODEV;
+
+ return domain->ops->sva_unbind_gpasid(dev, pasid);
+}
+EXPORT_SYMBOL_GPL(iommu_sva_unbind_gpasid);
+
static void __iommu_detach_device(struct iommu_domain *domain,
struct device *dev)
{
@@ -1854,8 +1884,8 @@ static size_t iommu_pgsize(struct iommu_domain *domain,
return pgsize;
}
-int iommu_map(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot)
+int __iommu_map(struct iommu_domain *domain, unsigned long iova,
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{
const struct iommu_ops *ops = domain->ops;
unsigned long orig_iova = iova;
@@ -1892,8 +1922,8 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova,
pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx\n",
iova, &paddr, pgsize);
+ ret = ops->map(domain, iova, paddr, pgsize, prot, gfp);
- ret = ops->map(domain, iova, paddr, pgsize, prot);
if (ret)
break;
@@ -1913,8 +1943,22 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova,
return ret;
}
+
+int iommu_map(struct iommu_domain *domain, unsigned long iova,
+ phys_addr_t paddr, size_t size, int prot)
+{
+ might_sleep();
+ return __iommu_map(domain, iova, paddr, size, prot, GFP_KERNEL);
+}
EXPORT_SYMBOL_GPL(iommu_map);
+int iommu_map_atomic(struct iommu_domain *domain, unsigned long iova,
+ phys_addr_t paddr, size_t size, int prot)
+{
+ return __iommu_map(domain, iova, paddr, size, prot, GFP_ATOMIC);
+}
+EXPORT_SYMBOL_GPL(iommu_map_atomic);
+
static size_t __iommu_unmap(struct iommu_domain *domain,
unsigned long iova, size_t size,
struct iommu_iotlb_gather *iotlb_gather)
@@ -1991,8 +2035,9 @@ size_t iommu_unmap_fast(struct iommu_domain *domain,
}
EXPORT_SYMBOL_GPL(iommu_unmap_fast);
-size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
- struct scatterlist *sg, unsigned int nents, int prot)
+size_t __iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
+ struct scatterlist *sg, unsigned int nents, int prot,
+ gfp_t gfp)
{
size_t len = 0, mapped = 0;
phys_addr_t start;
@@ -2003,7 +2048,9 @@ size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
phys_addr_t s_phys = sg_phys(sg);
if (len && s_phys != start + len) {
- ret = iommu_map(domain, iova + mapped, start, len, prot);
+ ret = __iommu_map(domain, iova + mapped, start,
+ len, prot, gfp);
+
if (ret)
goto out_err;
@@ -2031,8 +2078,22 @@ out_err:
return 0;
}
+
+size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
+ struct scatterlist *sg, unsigned int nents, int prot)
+{
+ might_sleep();
+ return __iommu_map_sg(domain, iova, sg, nents, prot, GFP_KERNEL);
+}
EXPORT_SYMBOL_GPL(iommu_map_sg);
+size_t iommu_map_sg_atomic(struct iommu_domain *domain, unsigned long iova,
+ struct scatterlist *sg, unsigned int nents, int prot)
+{
+ return __iommu_map_sg(domain, iova, sg, nents, prot, GFP_ATOMIC);
+}
+EXPORT_SYMBOL_GPL(iommu_map_sg_atomic);
+
int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr,
phys_addr_t paddr, u64 size, int prot)
{
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
index 2639fc718117..d02edd2751f3 100644
--- a/drivers/iommu/ipmmu-vmsa.c
+++ b/drivers/iommu/ipmmu-vmsa.c
@@ -50,6 +50,9 @@ struct ipmmu_features {
bool twobit_imttbcr_sl0;
bool reserved_context;
bool cache_snoop;
+ unsigned int ctx_offset_base;
+ unsigned int ctx_offset_stride;
+ unsigned int utlb_offset_base;
};
struct ipmmu_vmsa_device {
@@ -99,125 +102,49 @@ static struct ipmmu_vmsa_device *to_ipmmu(struct device *dev)
#define IM_NS_ALIAS_OFFSET 0x800
-#define IM_CTX_SIZE 0x40
-
-#define IMCTR 0x0000
-#define IMCTR_TRE (1 << 17)
-#define IMCTR_AFE (1 << 16)
-#define IMCTR_RTSEL_MASK (3 << 4)
-#define IMCTR_RTSEL_SHIFT 4
-#define IMCTR_TREN (1 << 3)
-#define IMCTR_INTEN (1 << 2)
-#define IMCTR_FLUSH (1 << 1)
-#define IMCTR_MMUEN (1 << 0)
-
-#define IMCAAR 0x0004
-
-#define IMTTBCR 0x0008
-#define IMTTBCR_EAE (1 << 31)
-#define IMTTBCR_PMB (1 << 30)
-#define IMTTBCR_SH1_NON_SHAREABLE (0 << 28) /* R-Car Gen2 only */
-#define IMTTBCR_SH1_OUTER_SHAREABLE (2 << 28) /* R-Car Gen2 only */
-#define IMTTBCR_SH1_INNER_SHAREABLE (3 << 28) /* R-Car Gen2 only */
-#define IMTTBCR_SH1_MASK (3 << 28) /* R-Car Gen2 only */
-#define IMTTBCR_ORGN1_NC (0 << 26) /* R-Car Gen2 only */
-#define IMTTBCR_ORGN1_WB_WA (1 << 26) /* R-Car Gen2 only */
-#define IMTTBCR_ORGN1_WT (2 << 26) /* R-Car Gen2 only */
-#define IMTTBCR_ORGN1_WB (3 << 26) /* R-Car Gen2 only */
-#define IMTTBCR_ORGN1_MASK (3 << 26) /* R-Car Gen2 only */
-#define IMTTBCR_IRGN1_NC (0 << 24) /* R-Car Gen2 only */
-#define IMTTBCR_IRGN1_WB_WA (1 << 24) /* R-Car Gen2 only */
-#define IMTTBCR_IRGN1_WT (2 << 24) /* R-Car Gen2 only */
-#define IMTTBCR_IRGN1_WB (3 << 24) /* R-Car Gen2 only */
-#define IMTTBCR_IRGN1_MASK (3 << 24) /* R-Car Gen2 only */
-#define IMTTBCR_TSZ1_MASK (7 << 16)
-#define IMTTBCR_TSZ1_SHIFT 16
-#define IMTTBCR_SH0_NON_SHAREABLE (0 << 12) /* R-Car Gen2 only */
-#define IMTTBCR_SH0_OUTER_SHAREABLE (2 << 12) /* R-Car Gen2 only */
+/* MMU "context" registers */
+#define IMCTR 0x0000 /* R-Car Gen2/3 */
+#define IMCTR_INTEN (1 << 2) /* R-Car Gen2/3 */
+#define IMCTR_FLUSH (1 << 1) /* R-Car Gen2/3 */
+#define IMCTR_MMUEN (1 << 0) /* R-Car Gen2/3 */
+
+#define IMTTBCR 0x0008 /* R-Car Gen2/3 */
+#define IMTTBCR_EAE (1 << 31) /* R-Car Gen2/3 */
#define IMTTBCR_SH0_INNER_SHAREABLE (3 << 12) /* R-Car Gen2 only */
-#define IMTTBCR_SH0_MASK (3 << 12) /* R-Car Gen2 only */
-#define IMTTBCR_ORGN0_NC (0 << 10) /* R-Car Gen2 only */
#define IMTTBCR_ORGN0_WB_WA (1 << 10) /* R-Car Gen2 only */
-#define IMTTBCR_ORGN0_WT (2 << 10) /* R-Car Gen2 only */
-#define IMTTBCR_ORGN0_WB (3 << 10) /* R-Car Gen2 only */
-#define IMTTBCR_ORGN0_MASK (3 << 10) /* R-Car Gen2 only */
-#define IMTTBCR_IRGN0_NC (0 << 8) /* R-Car Gen2 only */
#define IMTTBCR_IRGN0_WB_WA (1 << 8) /* R-Car Gen2 only */
-#define IMTTBCR_IRGN0_WT (2 << 8) /* R-Car Gen2 only */
-#define IMTTBCR_IRGN0_WB (3 << 8) /* R-Car Gen2 only */
-#define IMTTBCR_IRGN0_MASK (3 << 8) /* R-Car Gen2 only */
-#define IMTTBCR_SL0_TWOBIT_LVL_3 (0 << 6) /* R-Car Gen3 only */
-#define IMTTBCR_SL0_TWOBIT_LVL_2 (1 << 6) /* R-Car Gen3 only */
#define IMTTBCR_SL0_TWOBIT_LVL_1 (2 << 6) /* R-Car Gen3 only */
-#define IMTTBCR_SL0_LVL_2 (0 << 4)
-#define IMTTBCR_SL0_LVL_1 (1 << 4)
-#define IMTTBCR_TSZ0_MASK (7 << 0)
-#define IMTTBCR_TSZ0_SHIFT O
-
-#define IMBUSCR 0x000c
-#define IMBUSCR_DVM (1 << 2)
-#define IMBUSCR_BUSSEL_SYS (0 << 0)
-#define IMBUSCR_BUSSEL_CCI (1 << 0)
-#define IMBUSCR_BUSSEL_IMCAAR (2 << 0)
-#define IMBUSCR_BUSSEL_CCI_IMCAAR (3 << 0)
-#define IMBUSCR_BUSSEL_MASK (3 << 0)
-
-#define IMTTLBR0 0x0010
-#define IMTTUBR0 0x0014
-#define IMTTLBR1 0x0018
-#define IMTTUBR1 0x001c
-
-#define IMSTR 0x0020
-#define IMSTR_ERRLVL_MASK (3 << 12)
-#define IMSTR_ERRLVL_SHIFT 12
-#define IMSTR_ERRCODE_TLB_FORMAT (1 << 8)
-#define IMSTR_ERRCODE_ACCESS_PERM (4 << 8)
-#define IMSTR_ERRCODE_SECURE_ACCESS (5 << 8)
-#define IMSTR_ERRCODE_MASK (7 << 8)
-#define IMSTR_MHIT (1 << 4)
-#define IMSTR_ABORT (1 << 2)
-#define IMSTR_PF (1 << 1)
-#define IMSTR_TF (1 << 0)
-
-#define IMMAIR0 0x0028
-#define IMMAIR1 0x002c
-#define IMMAIR_ATTR_MASK 0xff
-#define IMMAIR_ATTR_DEVICE 0x04
-#define IMMAIR_ATTR_NC 0x44
-#define IMMAIR_ATTR_WBRWA 0xff
-#define IMMAIR_ATTR_SHIFT(n) ((n) << 3)
-#define IMMAIR_ATTR_IDX_NC 0
-#define IMMAIR_ATTR_IDX_WBRWA 1
-#define IMMAIR_ATTR_IDX_DEV 2
-
-#define IMELAR 0x0030 /* IMEAR on R-Car Gen2 */
-#define IMEUAR 0x0034 /* R-Car Gen3 only */
-
-#define IMPCTR 0x0200
-#define IMPSTR 0x0208
-#define IMPEAR 0x020c
-#define IMPMBA(n) (0x0280 + ((n) * 4))
-#define IMPMBD(n) (0x02c0 + ((n) * 4))
+#define IMTTBCR_SL0_LVL_1 (1 << 4) /* R-Car Gen2 only */
+
+#define IMBUSCR 0x000c /* R-Car Gen2 only */
+#define IMBUSCR_DVM (1 << 2) /* R-Car Gen2 only */
+#define IMBUSCR_BUSSEL_MASK (3 << 0) /* R-Car Gen2 only */
+
+#define IMTTLBR0 0x0010 /* R-Car Gen2/3 */
+#define IMTTUBR0 0x0014 /* R-Car Gen2/3 */
+
+#define IMSTR 0x0020 /* R-Car Gen2/3 */
+#define IMSTR_MHIT (1 << 4) /* R-Car Gen2/3 */
+#define IMSTR_ABORT (1 << 2) /* R-Car Gen2/3 */
+#define IMSTR_PF (1 << 1) /* R-Car Gen2/3 */
+#define IMSTR_TF (1 << 0) /* R-Car Gen2/3 */
+#define IMMAIR0 0x0028 /* R-Car Gen2/3 */
+
+#define IMELAR 0x0030 /* R-Car Gen2/3, IMEAR on R-Car Gen2 */
+#define IMEUAR 0x0034 /* R-Car Gen3 only */
+
+/* uTLB registers */
#define IMUCTR(n) ((n) < 32 ? IMUCTR0(n) : IMUCTR32(n))
-#define IMUCTR0(n) (0x0300 + ((n) * 16))
-#define IMUCTR32(n) (0x0600 + (((n) - 32) * 16))
-#define IMUCTR_FIXADDEN (1 << 31)
-#define IMUCTR_FIXADD_MASK (0xff << 16)
-#define IMUCTR_FIXADD_SHIFT 16
-#define IMUCTR_TTSEL_MMU(n) ((n) << 4)
-#define IMUCTR_TTSEL_PMB (8 << 4)
-#define IMUCTR_TTSEL_MASK (15 << 4)
-#define IMUCTR_FLUSH (1 << 1)
-#define IMUCTR_MMUEN (1 << 0)
+#define IMUCTR0(n) (0x0300 + ((n) * 16)) /* R-Car Gen2/3 */
+#define IMUCTR32(n) (0x0600 + (((n) - 32) * 16)) /* R-Car Gen3 only */
+#define IMUCTR_TTSEL_MMU(n) ((n) << 4) /* R-Car Gen2/3 */
+#define IMUCTR_FLUSH (1 << 1) /* R-Car Gen2/3 */
+#define IMUCTR_MMUEN (1 << 0) /* R-Car Gen2/3 */
#define IMUASID(n) ((n) < 32 ? IMUASID0(n) : IMUASID32(n))
-#define IMUASID0(n) (0x0308 + ((n) * 16))
-#define IMUASID32(n) (0x0608 + (((n) - 32) * 16))
-#define IMUASID_ASID8_MASK (0xff << 8)
-#define IMUASID_ASID8_SHIFT 8
-#define IMUASID_ASID0_MASK (0xff << 0)
-#define IMUASID_ASID0_SHIFT 0
+#define IMUASID0(n) (0x0308 + ((n) * 16)) /* R-Car Gen2/3 */
+#define IMUASID32(n) (0x0608 + (((n) - 32) * 16)) /* R-Car Gen3 only */
/* -----------------------------------------------------------------------------
* Root device handling
@@ -264,29 +191,61 @@ static void ipmmu_write(struct ipmmu_vmsa_device *mmu, unsigned int offset,
iowrite32(data, mmu->base + offset);
}
+static unsigned int ipmmu_ctx_reg(struct ipmmu_vmsa_device *mmu,
+ unsigned int context_id, unsigned int reg)
+{
+ return mmu->features->ctx_offset_base +
+ context_id * mmu->features->ctx_offset_stride + reg;
+}
+
+static u32 ipmmu_ctx_read(struct ipmmu_vmsa_device *mmu,
+ unsigned int context_id, unsigned int reg)
+{
+ return ipmmu_read(mmu, ipmmu_ctx_reg(mmu, context_id, reg));
+}
+
+static void ipmmu_ctx_write(struct ipmmu_vmsa_device *mmu,
+ unsigned int context_id, unsigned int reg, u32 data)
+{
+ ipmmu_write(mmu, ipmmu_ctx_reg(mmu, context_id, reg), data);
+}
+
static u32 ipmmu_ctx_read_root(struct ipmmu_vmsa_domain *domain,
unsigned int reg)
{
- return ipmmu_read(domain->mmu->root,
- domain->context_id * IM_CTX_SIZE + reg);
+ return ipmmu_ctx_read(domain->mmu->root, domain->context_id, reg);
}
static void ipmmu_ctx_write_root(struct ipmmu_vmsa_domain *domain,
unsigned int reg, u32 data)
{
- ipmmu_write(domain->mmu->root,
- domain->context_id * IM_CTX_SIZE + reg, data);
+ ipmmu_ctx_write(domain->mmu->root, domain->context_id, reg, data);
}
static void ipmmu_ctx_write_all(struct ipmmu_vmsa_domain *domain,
unsigned int reg, u32 data)
{
if (domain->mmu != domain->mmu->root)
- ipmmu_write(domain->mmu,
- domain->context_id * IM_CTX_SIZE + reg, data);
+ ipmmu_ctx_write(domain->mmu, domain->context_id, reg, data);
- ipmmu_write(domain->mmu->root,
- domain->context_id * IM_CTX_SIZE + reg, data);
+ ipmmu_ctx_write(domain->mmu->root, domain->context_id, reg, data);
+}
+
+static u32 ipmmu_utlb_reg(struct ipmmu_vmsa_device *mmu, unsigned int reg)
+{
+ return mmu->features->utlb_offset_base + reg;
+}
+
+static void ipmmu_imuasid_write(struct ipmmu_vmsa_device *mmu,
+ unsigned int utlb, u32 data)
+{
+ ipmmu_write(mmu, ipmmu_utlb_reg(mmu, IMUASID(utlb)), data);
+}
+
+static void ipmmu_imuctr_write(struct ipmmu_vmsa_device *mmu,
+ unsigned int utlb, u32 data)
+{
+ ipmmu_write(mmu, ipmmu_utlb_reg(mmu, IMUCTR(utlb)), data);
}
/* -----------------------------------------------------------------------------
@@ -334,11 +293,10 @@ static void ipmmu_utlb_enable(struct ipmmu_vmsa_domain *domain,
*/
/* TODO: What should we set the ASID to ? */
- ipmmu_write(mmu, IMUASID(utlb), 0);
+ ipmmu_imuasid_write(mmu, utlb, 0);
/* TODO: Do we need to flush the microTLB ? */
- ipmmu_write(mmu, IMUCTR(utlb),
- IMUCTR_TTSEL_MMU(domain->context_id) | IMUCTR_FLUSH |
- IMUCTR_MMUEN);
+ ipmmu_imuctr_write(mmu, utlb, IMUCTR_TTSEL_MMU(domain->context_id) |
+ IMUCTR_FLUSH | IMUCTR_MMUEN);
mmu->utlb_ctx[utlb] = domain->context_id;
}
@@ -350,7 +308,7 @@ static void ipmmu_utlb_disable(struct ipmmu_vmsa_domain *domain,
{
struct ipmmu_vmsa_device *mmu = domain->mmu;
- ipmmu_write(mmu, IMUCTR(utlb), 0);
+ ipmmu_imuctr_write(mmu, utlb, 0);
mmu->utlb_ctx[utlb] = IPMMU_CTX_INVALID;
}
@@ -438,7 +396,7 @@ static void ipmmu_domain_setup_context(struct ipmmu_vmsa_domain *domain)
/* MAIR0 */
ipmmu_ctx_write_root(domain, IMMAIR0,
- domain->cfg.arm_lpae_s1_cfg.mair[0]);
+ domain->cfg.arm_lpae_s1_cfg.mair);
/* IMBUSCR */
if (domain->mmu->features->setup_imbuscr)
@@ -724,7 +682,7 @@ static void ipmmu_detach_device(struct iommu_domain *io_domain,
}
static int ipmmu_map(struct iommu_domain *io_domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot)
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{
struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
@@ -783,6 +741,7 @@ static int ipmmu_init_platform_device(struct device *dev,
static const struct soc_device_attribute soc_rcar_gen3[] = {
{ .soc_id = "r8a774a1", },
+ { .soc_id = "r8a774b1", },
{ .soc_id = "r8a774c0", },
{ .soc_id = "r8a7795", },
{ .soc_id = "r8a7796", },
@@ -794,6 +753,7 @@ static const struct soc_device_attribute soc_rcar_gen3[] = {
};
static const struct soc_device_attribute soc_rcar_gen3_whitelist[] = {
+ { .soc_id = "r8a774b1", },
{ .soc_id = "r8a774c0", },
{ .soc_id = "r8a7795", .revision = "ES3.*" },
{ .soc_id = "r8a77965", },
@@ -985,7 +945,7 @@ static void ipmmu_device_reset(struct ipmmu_vmsa_device *mmu)
/* Disable all contexts. */
for (i = 0; i < mmu->num_ctx; ++i)
- ipmmu_write(mmu, i * IM_CTX_SIZE + IMCTR, 0);
+ ipmmu_ctx_write(mmu, i, IMCTR, 0);
}
static const struct ipmmu_features ipmmu_features_default = {
@@ -997,6 +957,9 @@ static const struct ipmmu_features ipmmu_features_default = {
.twobit_imttbcr_sl0 = false,
.reserved_context = false,
.cache_snoop = true,
+ .ctx_offset_base = 0,
+ .ctx_offset_stride = 0x40,
+ .utlb_offset_base = 0,
};
static const struct ipmmu_features ipmmu_features_rcar_gen3 = {
@@ -1008,6 +971,9 @@ static const struct ipmmu_features ipmmu_features_rcar_gen3 = {
.twobit_imttbcr_sl0 = true,
.reserved_context = true,
.cache_snoop = false,
+ .ctx_offset_base = 0,
+ .ctx_offset_stride = 0x40,
+ .utlb_offset_base = 0,
};
static const struct of_device_id ipmmu_of_ids[] = {
@@ -1018,6 +984,9 @@ static const struct of_device_id ipmmu_of_ids[] = {
.compatible = "renesas,ipmmu-r8a774a1",
.data = &ipmmu_features_rcar_gen3,
}, {
+ .compatible = "renesas,ipmmu-r8a774b1",
+ .data = &ipmmu_features_rcar_gen3,
+ }, {
.compatible = "renesas,ipmmu-r8a774c0",
.data = &ipmmu_features_rcar_gen3,
}, {
diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c
index be99d408cf35..93f14bca26ee 100644
--- a/drivers/iommu/msm_iommu.c
+++ b/drivers/iommu/msm_iommu.c
@@ -504,7 +504,7 @@ fail:
}
static int msm_iommu_map(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t pa, size_t len, int prot)
+ phys_addr_t pa, size_t len, int prot, gfp_t gfp)
{
struct msm_priv *priv = to_msm_priv(domain);
unsigned long flags;
diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
index 67a483c1a935..6fc1f5ecf91e 100644
--- a/drivers/iommu/mtk_iommu.c
+++ b/drivers/iommu/mtk_iommu.c
@@ -101,8 +101,6 @@
#define MTK_M4U_TO_PORT(id) ((id) & 0x1f)
struct mtk_iommu_domain {
- spinlock_t pgtlock; /* lock for page table */
-
struct io_pgtable_cfg cfg;
struct io_pgtable_ops *iop;
@@ -173,13 +171,16 @@ static void mtk_iommu_tlb_flush_all(void *cookie)
}
}
-static void mtk_iommu_tlb_add_flush_nosync(unsigned long iova, size_t size,
- size_t granule, bool leaf,
- void *cookie)
+static void mtk_iommu_tlb_flush_range_sync(unsigned long iova, size_t size,
+ size_t granule, void *cookie)
{
struct mtk_iommu_data *data = cookie;
+ unsigned long flags;
+ int ret;
+ u32 tmp;
for_each_m4u(data) {
+ spin_lock_irqsave(&data->tlb_lock, flags);
writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0,
data->base + REG_MMU_INV_SEL);
@@ -188,23 +189,10 @@ static void mtk_iommu_tlb_add_flush_nosync(unsigned long iova, size_t size,
data->base + REG_MMU_INVLD_END_A);
writel_relaxed(F_MMU_INV_RANGE,
data->base + REG_MMU_INVALIDATE);
- data->tlb_flush_active = true;
- }
-}
-
-static void mtk_iommu_tlb_sync(void *cookie)
-{
- struct mtk_iommu_data *data = cookie;
- int ret;
- u32 tmp;
-
- for_each_m4u(data) {
- /* Avoid timing out if there's nothing to wait for */
- if (!data->tlb_flush_active)
- return;
+ /* tlb sync */
ret = readl_poll_timeout_atomic(data->base + REG_MMU_CPE_DONE,
- tmp, tmp != 0, 10, 100000);
+ tmp, tmp != 0, 10, 1000);
if (ret) {
dev_warn(data->dev,
"Partial TLB flush timed out, falling back to full flush\n");
@@ -212,35 +200,24 @@ static void mtk_iommu_tlb_sync(void *cookie)
}
/* Clear the CPE status */
writel_relaxed(0, data->base + REG_MMU_CPE_DONE);
- data->tlb_flush_active = false;
+ spin_unlock_irqrestore(&data->tlb_lock, flags);
}
}
-static void mtk_iommu_tlb_flush_walk(unsigned long iova, size_t size,
- size_t granule, void *cookie)
-{
- mtk_iommu_tlb_add_flush_nosync(iova, size, granule, false, cookie);
- mtk_iommu_tlb_sync(cookie);
-}
-
-static void mtk_iommu_tlb_flush_leaf(unsigned long iova, size_t size,
- size_t granule, void *cookie)
-{
- mtk_iommu_tlb_add_flush_nosync(iova, size, granule, true, cookie);
- mtk_iommu_tlb_sync(cookie);
-}
-
static void mtk_iommu_tlb_flush_page_nosync(struct iommu_iotlb_gather *gather,
unsigned long iova, size_t granule,
void *cookie)
{
- mtk_iommu_tlb_add_flush_nosync(iova, granule, granule, true, cookie);
+ struct mtk_iommu_data *data = cookie;
+ struct iommu_domain *domain = &data->m4u_dom->domain;
+
+ iommu_iotlb_gather_add_page(domain, gather, iova, granule);
}
static const struct iommu_flush_ops mtk_iommu_flush_ops = {
.tlb_flush_all = mtk_iommu_tlb_flush_all,
- .tlb_flush_walk = mtk_iommu_tlb_flush_walk,
- .tlb_flush_leaf = mtk_iommu_tlb_flush_leaf,
+ .tlb_flush_walk = mtk_iommu_tlb_flush_range_sync,
+ .tlb_flush_leaf = mtk_iommu_tlb_flush_range_sync,
.tlb_add_page = mtk_iommu_tlb_flush_page_nosync,
};
@@ -316,8 +293,6 @@ static int mtk_iommu_domain_finalise(struct mtk_iommu_domain *dom)
{
struct mtk_iommu_data *data = mtk_iommu_get_m4u_data();
- spin_lock_init(&dom->pgtlock);
-
dom->cfg = (struct io_pgtable_cfg) {
.quirks = IO_PGTABLE_QUIRK_ARM_NS |
IO_PGTABLE_QUIRK_NO_PERMS |
@@ -412,22 +387,17 @@ static void mtk_iommu_detach_device(struct iommu_domain *domain,
}
static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot)
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{
struct mtk_iommu_domain *dom = to_mtk_domain(domain);
struct mtk_iommu_data *data = mtk_iommu_get_m4u_data();
- unsigned long flags;
- int ret;
/* The "4GB mode" M4U physically can not use the lower remap of Dram. */
if (data->enable_4GB)
paddr |= BIT_ULL(32);
- spin_lock_irqsave(&dom->pgtlock, flags);
- ret = dom->iop->map(dom->iop, iova, paddr, size, prot);
- spin_unlock_irqrestore(&dom->pgtlock, flags);
-
- return ret;
+ /* Synchronize with the tlb_lock */
+ return dom->iop->map(dom->iop, iova, paddr, size, prot);
}
static size_t mtk_iommu_unmap(struct iommu_domain *domain,
@@ -435,25 +405,26 @@ static size_t mtk_iommu_unmap(struct iommu_domain *domain,
struct iommu_iotlb_gather *gather)
{
struct mtk_iommu_domain *dom = to_mtk_domain(domain);
- unsigned long flags;
- size_t unmapsz;
-
- spin_lock_irqsave(&dom->pgtlock, flags);
- unmapsz = dom->iop->unmap(dom->iop, iova, size, gather);
- spin_unlock_irqrestore(&dom->pgtlock, flags);
- return unmapsz;
+ return dom->iop->unmap(dom->iop, iova, size, gather);
}
static void mtk_iommu_flush_iotlb_all(struct iommu_domain *domain)
{
- mtk_iommu_tlb_sync(mtk_iommu_get_m4u_data());
+ mtk_iommu_tlb_flush_all(mtk_iommu_get_m4u_data());
}
static void mtk_iommu_iotlb_sync(struct iommu_domain *domain,
struct iommu_iotlb_gather *gather)
{
- mtk_iommu_tlb_sync(mtk_iommu_get_m4u_data());
+ struct mtk_iommu_data *data = mtk_iommu_get_m4u_data();
+ size_t length = gather->end - gather->start;
+
+ if (gather->start == ULONG_MAX)
+ return;
+
+ mtk_iommu_tlb_flush_range_sync(gather->start, length, gather->pgsize,
+ data);
}
static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain,
@@ -461,13 +432,9 @@ static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain,
{
struct mtk_iommu_domain *dom = to_mtk_domain(domain);
struct mtk_iommu_data *data = mtk_iommu_get_m4u_data();
- unsigned long flags;
phys_addr_t pa;
- spin_lock_irqsave(&dom->pgtlock, flags);
pa = dom->iop->iova_to_phys(dom->iop, iova);
- spin_unlock_irqrestore(&dom->pgtlock, flags);
-
if (data->enable_4GB && pa >= MTK_IOMMU_4GB_MODE_REMAP_BASE)
pa &= ~BIT_ULL(32);
@@ -733,6 +700,7 @@ static int mtk_iommu_probe(struct platform_device *pdev)
if (ret)
return ret;
+ spin_lock_init(&data->tlb_lock);
list_add_tail(&data->list, &m4ulist);
if (!iommu_present(&platform_bus_type))
diff --git a/drivers/iommu/mtk_iommu.h b/drivers/iommu/mtk_iommu.h
index fc0f16eabacd..ea949a324e33 100644
--- a/drivers/iommu/mtk_iommu.h
+++ b/drivers/iommu/mtk_iommu.h
@@ -57,7 +57,7 @@ struct mtk_iommu_data {
struct mtk_iommu_domain *m4u_dom;
struct iommu_group *m4u_group;
bool enable_4GB;
- bool tlb_flush_active;
+ spinlock_t tlb_lock; /* lock for tlb range flush */
struct iommu_device iommu;
const struct mtk_iommu_plat_data *plat_data;
diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c
index b5efd6dac953..e93b94ecac45 100644
--- a/drivers/iommu/mtk_iommu_v1.c
+++ b/drivers/iommu/mtk_iommu_v1.c
@@ -295,7 +295,7 @@ static void mtk_iommu_detach_device(struct iommu_domain *domain,
}
static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot)
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{
struct mtk_iommu_domain *dom = to_mtk_domain(domain);
unsigned int page_num = size >> MT2701_IOMMU_PAGE_SHIFT;
diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c
index 614a93aa5305..026ad2b29dcd 100644
--- a/drivers/iommu/of_iommu.c
+++ b/drivers/iommu/of_iommu.c
@@ -8,6 +8,8 @@
#include <linux/export.h>
#include <linux/iommu.h>
#include <linux/limits.h>
+#include <linux/pci.h>
+#include <linux/msi.h>
#include <linux/of.h>
#include <linux/of_iommu.h>
#include <linux/of_pci.h>
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
index 09c6e1c680db..be551cc34be4 100644
--- a/drivers/iommu/omap-iommu.c
+++ b/drivers/iommu/omap-iommu.c
@@ -1339,7 +1339,7 @@ static u32 iotlb_init_entry(struct iotlb_entry *e, u32 da, u32 pa, int pgsz)
}
static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
- phys_addr_t pa, size_t bytes, int prot)
+ phys_addr_t pa, size_t bytes, int prot, gfp_t gfp)
{
struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
struct device *dev = omap_domain->dev;
diff --git a/drivers/iommu/qcom_iommu.c b/drivers/iommu/qcom_iommu.c
index c31e7bc4ccbe..52f38292df5b 100644
--- a/drivers/iommu/qcom_iommu.c
+++ b/drivers/iommu/qcom_iommu.c
@@ -284,9 +284,9 @@ static int qcom_iommu_init_domain(struct iommu_domain *domain,
/* MAIRs (stage-1 only) */
iommu_writel(ctx, ARM_SMMU_CB_S1_MAIR0,
- pgtbl_cfg.arm_lpae_s1_cfg.mair[0]);
+ pgtbl_cfg.arm_lpae_s1_cfg.mair);
iommu_writel(ctx, ARM_SMMU_CB_S1_MAIR1,
- pgtbl_cfg.arm_lpae_s1_cfg.mair[1]);
+ pgtbl_cfg.arm_lpae_s1_cfg.mair >> 32);
/* SCTLR */
reg = SCTLR_CFIE | SCTLR_CFRE | SCTLR_AFE | SCTLR_TRE |
@@ -423,7 +423,7 @@ static void qcom_iommu_detach_dev(struct iommu_domain *domain, struct device *de
}
static int qcom_iommu_map(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot)
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{
int ret;
unsigned long flags;
@@ -539,8 +539,8 @@ static int qcom_iommu_add_device(struct device *dev)
}
group = iommu_group_get_for_dev(dev);
- if (IS_ERR_OR_NULL(group))
- return PTR_ERR_OR_ZERO(group);
+ if (IS_ERR(group))
+ return PTR_ERR(group);
iommu_group_put(group);
iommu_device_link(&qcom_iommu->iommu, dev);
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
index 4dcbf68dfda4..b33cdd5aad81 100644
--- a/drivers/iommu/rockchip-iommu.c
+++ b/drivers/iommu/rockchip-iommu.c
@@ -527,7 +527,7 @@ static irqreturn_t rk_iommu_irq(int irq, void *dev_id)
int i, err;
err = pm_runtime_get_if_in_use(iommu->dev);
- if (WARN_ON_ONCE(err <= 0))
+ if (!err || WARN_ON_ONCE(err < 0))
return ret;
if (WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks)))
@@ -758,7 +758,7 @@ unwind:
}
static int rk_iommu_map(struct iommu_domain *domain, unsigned long _iova,
- phys_addr_t paddr, size_t size, int prot)
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{
struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
unsigned long flags;
@@ -980,13 +980,13 @@ static struct iommu_domain *rk_iommu_domain_alloc(unsigned type)
if (!dma_dev)
return NULL;
- rk_domain = devm_kzalloc(dma_dev, sizeof(*rk_domain), GFP_KERNEL);
+ rk_domain = kzalloc(sizeof(*rk_domain), GFP_KERNEL);
if (!rk_domain)
return NULL;
if (type == IOMMU_DOMAIN_DMA &&
iommu_get_dma_cookie(&rk_domain->domain))
- return NULL;
+ goto err_free_domain;
/*
* rk32xx iommus use a 2 level pagetable.
@@ -1021,6 +1021,8 @@ err_free_dt:
err_put_cookie:
if (type == IOMMU_DOMAIN_DMA)
iommu_put_dma_cookie(&rk_domain->domain);
+err_free_domain:
+ kfree(rk_domain);
return NULL;
}
@@ -1049,6 +1051,7 @@ static void rk_iommu_domain_free(struct iommu_domain *domain)
if (domain->type == IOMMU_DOMAIN_DMA)
iommu_put_dma_cookie(&rk_domain->domain);
+ kfree(rk_domain);
}
static int rk_iommu_add_device(struct device *dev)
diff --git a/drivers/iommu/s390-iommu.c b/drivers/iommu/s390-iommu.c
index 3b0b18e23187..1137f3ddcb85 100644
--- a/drivers/iommu/s390-iommu.c
+++ b/drivers/iommu/s390-iommu.c
@@ -265,7 +265,7 @@ undo_cpu_trans:
}
static int s390_iommu_map(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot)
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{
struct s390_domain *s390_domain = to_s390_domain(domain);
int flags = ZPCI_PTE_VALID, rc = 0;
diff --git a/drivers/iommu/tegra-gart.c b/drivers/iommu/tegra-gart.c
index 3924f7c05544..3fb7ba72507d 100644
--- a/drivers/iommu/tegra-gart.c
+++ b/drivers/iommu/tegra-gart.c
@@ -178,7 +178,7 @@ static inline int __gart_iommu_map(struct gart_device *gart, unsigned long iova,
}
static int gart_iommu_map(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t pa, size_t bytes, int prot)
+ phys_addr_t pa, size_t bytes, int prot, gfp_t gfp)
{
struct gart_device *gart = gart_handle;
int ret;
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
index 7293fc3f796d..63a147b623e6 100644
--- a/drivers/iommu/tegra-smmu.c
+++ b/drivers/iommu/tegra-smmu.c
@@ -159,9 +159,9 @@ static bool smmu_dma_addr_valid(struct tegra_smmu *smmu, dma_addr_t addr)
return (addr & smmu->pfn_mask) == addr;
}
-static dma_addr_t smmu_pde_to_dma(u32 pde)
+static dma_addr_t smmu_pde_to_dma(struct tegra_smmu *smmu, u32 pde)
{
- return pde << 12;
+ return (dma_addr_t)(pde & smmu->pfn_mask) << 12;
}
static void smmu_flush_ptc_all(struct tegra_smmu *smmu)
@@ -240,7 +240,7 @@ static inline void smmu_flush_tlb_group(struct tegra_smmu *smmu,
static inline void smmu_flush(struct tegra_smmu *smmu)
{
- smmu_readl(smmu, SMMU_CONFIG);
+ smmu_readl(smmu, SMMU_PTB_ASID);
}
static int tegra_smmu_alloc_asid(struct tegra_smmu *smmu, unsigned int *idp)
@@ -351,6 +351,20 @@ static void tegra_smmu_enable(struct tegra_smmu *smmu, unsigned int swgroup,
unsigned int i;
u32 value;
+ group = tegra_smmu_find_swgroup(smmu, swgroup);
+ if (group) {
+ value = smmu_readl(smmu, group->reg);
+ value &= ~SMMU_ASID_MASK;
+ value |= SMMU_ASID_VALUE(asid);
+ value |= SMMU_ASID_ENABLE;
+ smmu_writel(smmu, value, group->reg);
+ } else {
+ pr_warn("%s group from swgroup %u not found\n", __func__,
+ swgroup);
+ /* No point moving ahead if group was not found */
+ return;
+ }
+
for (i = 0; i < smmu->soc->num_clients; i++) {
const struct tegra_mc_client *client = &smmu->soc->clients[i];
@@ -361,15 +375,6 @@ static void tegra_smmu_enable(struct tegra_smmu *smmu, unsigned int swgroup,
value |= BIT(client->smmu.bit);
smmu_writel(smmu, value, client->smmu.reg);
}
-
- group = tegra_smmu_find_swgroup(smmu, swgroup);
- if (group) {
- value = smmu_readl(smmu, group->reg);
- value &= ~SMMU_ASID_MASK;
- value |= SMMU_ASID_VALUE(asid);
- value |= SMMU_ASID_ENABLE;
- smmu_writel(smmu, value, group->reg);
- }
}
static void tegra_smmu_disable(struct tegra_smmu *smmu, unsigned int swgroup,
@@ -549,6 +554,7 @@ static u32 *tegra_smmu_pte_lookup(struct tegra_smmu_as *as, unsigned long iova,
dma_addr_t *dmap)
{
unsigned int pd_index = iova_pd_index(iova);
+ struct tegra_smmu *smmu = as->smmu;
struct page *pt_page;
u32 *pd;
@@ -557,7 +563,7 @@ static u32 *tegra_smmu_pte_lookup(struct tegra_smmu_as *as, unsigned long iova,
return NULL;
pd = page_address(as->pd);
- *dmap = smmu_pde_to_dma(pd[pd_index]);
+ *dmap = smmu_pde_to_dma(smmu, pd[pd_index]);
return tegra_smmu_pte_offset(pt_page, iova);
}
@@ -599,7 +605,7 @@ static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova,
} else {
u32 *pd = page_address(as->pd);
- *dmap = smmu_pde_to_dma(pd[pde]);
+ *dmap = smmu_pde_to_dma(smmu, pd[pde]);
}
return tegra_smmu_pte_offset(as->pts[pde], iova);
@@ -624,7 +630,7 @@ static void tegra_smmu_pte_put_use(struct tegra_smmu_as *as, unsigned long iova)
if (--as->count[pde] == 0) {
struct tegra_smmu *smmu = as->smmu;
u32 *pd = page_address(as->pd);
- dma_addr_t pte_dma = smmu_pde_to_dma(pd[pde]);
+ dma_addr_t pte_dma = smmu_pde_to_dma(smmu, pd[pde]);
tegra_smmu_set_pde(as, iova, 0);
@@ -650,7 +656,7 @@ static void tegra_smmu_set_pte(struct tegra_smmu_as *as, unsigned long iova,
}
static int tegra_smmu_map(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot)
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{
struct tegra_smmu_as *as = to_smmu_as(domain);
dma_addr_t pte_dma;
diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c
index 3ea9d7682999..315c7cc4f99d 100644
--- a/drivers/iommu/virtio-iommu.c
+++ b/drivers/iommu/virtio-iommu.c
@@ -153,7 +153,6 @@ static off_t viommu_get_write_desc_offset(struct viommu_dev *viommu,
*/
static int __viommu_sync_req(struct viommu_dev *viommu)
{
- int ret = 0;
unsigned int len;
size_t write_len;
struct viommu_request *req;
@@ -182,7 +181,7 @@ static int __viommu_sync_req(struct viommu_dev *viommu)
kfree(req);
}
- return ret;
+ return 0;
}
static int viommu_sync_req(struct viommu_dev *viommu)
@@ -713,7 +712,7 @@ static int viommu_attach_dev(struct iommu_domain *domain, struct device *dev)
}
static int viommu_map(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot)
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{
int ret;
u32 flags;
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index ccbb8973a324..697e6a8ccaae 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -370,6 +370,10 @@ config MVEBU_PIC
config MVEBU_SEI
bool
+config LS_EXTIRQ
+ def_bool y if SOC_LS1021A || ARCH_LAYERSCAPE
+ select MFD_SYSCON
+
config LS_SCFG_MSI
def_bool y if SOC_LS1021A || ARCH_LAYERSCAPE
depends on PCI && PCI_MSI
@@ -483,8 +487,6 @@ config TI_SCI_INTA_IRQCHIP
If you wish to use interrupt aggregator irq resources managed by the
TI System Controller, say Y here. Otherwise, say N.
-endmenu
-
config SIFIVE_PLIC
bool "SiFive Platform-Level Interrupt Controller"
depends on RISCV
@@ -496,3 +498,5 @@ config SIFIVE_PLIC
interrupt sources are subordinate to the PLIC.
If you don't know what to do here, say Y.
+
+endmenu
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index cc7c43932f16..e806dda690ea 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -84,6 +84,7 @@ obj-$(CONFIG_MVEBU_ICU) += irq-mvebu-icu.o
obj-$(CONFIG_MVEBU_ODMI) += irq-mvebu-odmi.o
obj-$(CONFIG_MVEBU_PIC) += irq-mvebu-pic.o
obj-$(CONFIG_MVEBU_SEI) += irq-mvebu-sei.o
+obj-$(CONFIG_LS_EXTIRQ) += irq-ls-extirq.o
obj-$(CONFIG_LS_SCFG_MSI) += irq-ls-scfg-msi.o
obj-$(CONFIG_EZNPS_GIC) += irq-eznps.o
obj-$(CONFIG_ARCH_ASPEED) += irq-aspeed-vic.o irq-aspeed-i2c-ic.o
diff --git a/drivers/irqchip/irq-bcm7038-l1.c b/drivers/irqchip/irq-bcm7038-l1.c
index fc75c61233aa..cbf01afcd2a6 100644
--- a/drivers/irqchip/irq-bcm7038-l1.c
+++ b/drivers/irqchip/irq-bcm7038-l1.c
@@ -27,6 +27,7 @@
#include <linux/types.h>
#include <linux/irqchip.h>
#include <linux/irqchip/chained_irq.h>
+#include <linux/syscore_ops.h>
#define IRQS_PER_WORD 32
#define REG_BYTES_PER_IRQ_WORD (sizeof(u32) * 4)
@@ -39,6 +40,11 @@ struct bcm7038_l1_chip {
unsigned int n_words;
struct irq_domain *domain;
struct bcm7038_l1_cpu *cpus[NR_CPUS];
+#ifdef CONFIG_PM_SLEEP
+ struct list_head list;
+ u32 wake_mask[MAX_WORDS];
+#endif
+ u32 irq_fwd_mask[MAX_WORDS];
u8 affinity[MAX_WORDS * IRQS_PER_WORD];
};
@@ -249,6 +255,7 @@ static int __init bcm7038_l1_init_one(struct device_node *dn,
resource_size_t sz;
struct bcm7038_l1_cpu *cpu;
unsigned int i, n_words, parent_irq;
+ int ret;
if (of_address_to_resource(dn, idx, &res))
return -EINVAL;
@@ -262,6 +269,14 @@ static int __init bcm7038_l1_init_one(struct device_node *dn,
else if (intc->n_words != n_words)
return -EINVAL;
+ ret = of_property_read_u32_array(dn , "brcm,int-fwd-mask",
+ intc->irq_fwd_mask, n_words);
+ if (ret != 0 && ret != -EINVAL) {
+ /* property exists but has the wrong number of words */
+ pr_err("invalid brcm,int-fwd-mask property\n");
+ return -EINVAL;
+ }
+
cpu = intc->cpus[idx] = kzalloc(sizeof(*cpu) + n_words * sizeof(u32),
GFP_KERNEL);
if (!cpu)
@@ -272,8 +287,11 @@ static int __init bcm7038_l1_init_one(struct device_node *dn,
return -ENOMEM;
for (i = 0; i < n_words; i++) {
- l1_writel(0xffffffff, cpu->map_base + reg_mask_set(intc, i));
- cpu->mask_cache[i] = 0xffffffff;
+ l1_writel(~intc->irq_fwd_mask[i],
+ cpu->map_base + reg_mask_set(intc, i));
+ l1_writel(intc->irq_fwd_mask[i],
+ cpu->map_base + reg_mask_clr(intc, i));
+ cpu->mask_cache[i] = ~intc->irq_fwd_mask[i];
}
parent_irq = irq_of_parse_and_map(dn, idx);
@@ -281,12 +299,89 @@ static int __init bcm7038_l1_init_one(struct device_node *dn,
pr_err("failed to map parent interrupt %d\n", parent_irq);
return -EINVAL;
}
+
+ if (of_property_read_bool(dn, "brcm,irq-can-wake"))
+ enable_irq_wake(parent_irq);
+
irq_set_chained_handler_and_data(parent_irq, bcm7038_l1_irq_handle,
intc);
return 0;
}
+#ifdef CONFIG_PM_SLEEP
+/*
+ * We keep a list of bcm7038_l1_chip used for suspend/resume. This hack is
+ * used because the struct chip_type suspend/resume hooks are not called
+ * unless chip_type is hooked onto a generic_chip. Since this driver does
+ * not use generic_chip, we need to manually hook our resume/suspend to
+ * syscore_ops.
+ */
+static LIST_HEAD(bcm7038_l1_intcs_list);
+static DEFINE_RAW_SPINLOCK(bcm7038_l1_intcs_lock);
+
+static int bcm7038_l1_suspend(void)
+{
+ struct bcm7038_l1_chip *intc;
+ int boot_cpu, word;
+ u32 val;
+
+ /* Wakeup interrupt should only come from the boot cpu */
+ boot_cpu = cpu_logical_map(0);
+
+ list_for_each_entry(intc, &bcm7038_l1_intcs_list, list) {
+ for (word = 0; word < intc->n_words; word++) {
+ val = intc->wake_mask[word] | intc->irq_fwd_mask[word];
+ l1_writel(~val,
+ intc->cpus[boot_cpu]->map_base + reg_mask_set(intc, word));
+ l1_writel(val,
+ intc->cpus[boot_cpu]->map_base + reg_mask_clr(intc, word));
+ }
+ }
+
+ return 0;
+}
+
+static void bcm7038_l1_resume(void)
+{
+ struct bcm7038_l1_chip *intc;
+ int boot_cpu, word;
+
+ boot_cpu = cpu_logical_map(0);
+
+ list_for_each_entry(intc, &bcm7038_l1_intcs_list, list) {
+ for (word = 0; word < intc->n_words; word++) {
+ l1_writel(intc->cpus[boot_cpu]->mask_cache[word],
+ intc->cpus[boot_cpu]->map_base + reg_mask_set(intc, word));
+ l1_writel(~intc->cpus[boot_cpu]->mask_cache[word],
+ intc->cpus[boot_cpu]->map_base + reg_mask_clr(intc, word));
+ }
+ }
+}
+
+static struct syscore_ops bcm7038_l1_syscore_ops = {
+ .suspend = bcm7038_l1_suspend,
+ .resume = bcm7038_l1_resume,
+};
+
+static int bcm7038_l1_set_wake(struct irq_data *d, unsigned int on)
+{
+ struct bcm7038_l1_chip *intc = irq_data_get_irq_chip_data(d);
+ unsigned long flags;
+ u32 word = d->hwirq / IRQS_PER_WORD;
+ u32 mask = BIT(d->hwirq % IRQS_PER_WORD);
+
+ raw_spin_lock_irqsave(&intc->lock, flags);
+ if (on)
+ intc->wake_mask[word] |= mask;
+ else
+ intc->wake_mask[word] &= ~mask;
+ raw_spin_unlock_irqrestore(&intc->lock, flags);
+
+ return 0;
+}
+#endif
+
static struct irq_chip bcm7038_l1_irq_chip = {
.name = "bcm7038-l1",
.irq_mask = bcm7038_l1_mask,
@@ -295,11 +390,21 @@ static struct irq_chip bcm7038_l1_irq_chip = {
#ifdef CONFIG_SMP
.irq_cpu_offline = bcm7038_l1_cpu_offline,
#endif
+#ifdef CONFIG_PM_SLEEP
+ .irq_set_wake = bcm7038_l1_set_wake,
+#endif
};
static int bcm7038_l1_map(struct irq_domain *d, unsigned int virq,
irq_hw_number_t hw_irq)
{
+ struct bcm7038_l1_chip *intc = d->host_data;
+ u32 mask = BIT(hw_irq % IRQS_PER_WORD);
+ u32 word = hw_irq / IRQS_PER_WORD;
+
+ if (intc->irq_fwd_mask[word] & mask)
+ return -EPERM;
+
irq_set_chip_and_handler(virq, &bcm7038_l1_irq_chip, handle_level_irq);
irq_set_chip_data(virq, d->host_data);
irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq)));
@@ -340,6 +445,16 @@ int __init bcm7038_l1_of_init(struct device_node *dn,
goto out_unmap;
}
+#ifdef CONFIG_PM_SLEEP
+ /* Add bcm7038_l1_chip into a list */
+ raw_spin_lock(&bcm7038_l1_intcs_lock);
+ list_add_tail(&intc->list, &bcm7038_l1_intcs_list);
+ raw_spin_unlock(&bcm7038_l1_intcs_lock);
+
+ if (list_is_singular(&bcm7038_l1_intcs_list))
+ register_syscore_ops(&bcm7038_l1_syscore_ops);
+#endif
+
pr_info("registered BCM7038 L1 intc (%pOF, IRQs: %d)\n",
dn, IRQS_PER_WORD * intc->n_words);
diff --git a/drivers/irqchip/irq-gic-v2m.c b/drivers/irqchip/irq-gic-v2m.c
index e88e75c22b6a..fbec07d634ad 100644
--- a/drivers/irqchip/irq-gic-v2m.c
+++ b/drivers/irqchip/irq-gic-v2m.c
@@ -17,6 +17,7 @@
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
+#include <linux/pci.h>
#include <linux/msi.h>
#include <linux/of_address.h>
#include <linux/of_pci.h>
diff --git a/drivers/irqchip/irq-gic-v3-its-pci-msi.c b/drivers/irqchip/irq-gic-v3-its-pci-msi.c
index 229d586c3d7a..87711e0f8014 100644
--- a/drivers/irqchip/irq-gic-v3-its-pci-msi.c
+++ b/drivers/irqchip/irq-gic-v3-its-pci-msi.c
@@ -5,6 +5,7 @@
*/
#include <linux/acpi_iort.h>
+#include <linux/pci.h>
#include <linux/msi.h>
#include <linux/of.h>
#include <linux/of_irq.h>
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 787e8eec9a7f..e05673bcd52b 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -6,6 +6,7 @@
#include <linux/acpi.h>
#include <linux/acpi_iort.h>
+#include <linux/bitfield.h>
#include <linux/bitmap.h>
#include <linux/cpu.h>
#include <linux/crash_dump.h>
@@ -102,20 +103,21 @@ struct its_node {
struct its_collection *collections;
struct fwnode_handle *fwnode_handle;
u64 (*get_msi_base)(struct its_device *its_dev);
+ u64 typer;
u64 cbaser_save;
u32 ctlr_save;
struct list_head its_device_list;
u64 flags;
unsigned long list_nr;
- u32 ite_size;
- u32 device_ids;
int numa_node;
unsigned int msi_domain_flags;
u32 pre_its_base; /* for Socionext Synquacer */
- bool is_v4;
int vlpi_redist_offset;
};
+#define is_v4(its) (!!((its)->typer & GITS_TYPER_VLPIS))
+#define device_ids(its) (FIELD_GET(GITS_TYPER_DEVBITS, (its)->typer) + 1)
+
#define ITS_ITT_ALIGN SZ_256
/* The maximum number of VPEID bits supported by VLPI commands */
@@ -130,7 +132,7 @@ struct event_lpi_map {
u16 *col_map;
irq_hw_number_t lpi_base;
int nr_lpis;
- struct mutex vlpi_lock;
+ raw_spinlock_t vlpi_lock;
struct its_vm *vm;
struct its_vlpi_map *vlpi_maps;
int nr_vlpis;
@@ -181,7 +183,7 @@ static u16 get_its_list(struct its_vm *vm)
unsigned long its_list = 0;
list_for_each_entry(its, &its_nodes, entry) {
- if (!its->is_v4)
+ if (!is_v4(its))
continue;
if (vm->vlpi_count[its->list_nr])
@@ -191,6 +193,12 @@ static u16 get_its_list(struct its_vm *vm)
return (u16)its_list;
}
+static inline u32 its_get_event_id(struct irq_data *d)
+{
+ struct its_device *its_dev = irq_data_get_irq_chip_data(d);
+ return d->hwirq - its_dev->event_map.lpi_base;
+}
+
static struct its_collection *dev_event_to_col(struct its_device *its_dev,
u32 event)
{
@@ -199,6 +207,22 @@ static struct its_collection *dev_event_to_col(struct its_device *its_dev,
return its->collections + its_dev->event_map.col_map[event];
}
+static struct its_vlpi_map *dev_event_to_vlpi_map(struct its_device *its_dev,
+ u32 event)
+{
+ if (WARN_ON_ONCE(event >= its_dev->event_map.nr_lpis))
+ return NULL;
+
+ return &its_dev->event_map.vlpi_maps[event];
+}
+
+static struct its_collection *irq_to_col(struct irq_data *d)
+{
+ struct its_device *its_dev = irq_data_get_irq_chip_data(d);
+
+ return dev_event_to_col(its_dev, its_get_event_id(d));
+}
+
static struct its_collection *valid_col(struct its_collection *col)
{
if (WARN_ON_ONCE(col->target_address & GENMASK_ULL(15, 0)))
@@ -305,7 +329,10 @@ struct its_cmd_desc {
* The ITS command block, which is what the ITS actually parses.
*/
struct its_cmd_block {
- u64 raw_cmd[4];
+ union {
+ u64 raw_cmd[4];
+ __le64 raw_cmd_le[4];
+ };
};
#define ITS_CMD_QUEUE_SZ SZ_64K
@@ -414,10 +441,10 @@ static void its_encode_vpt_size(struct its_cmd_block *cmd, u8 vpt_size)
static inline void its_fixup_cmd(struct its_cmd_block *cmd)
{
/* Let's fixup BE commands */
- cmd->raw_cmd[0] = cpu_to_le64(cmd->raw_cmd[0]);
- cmd->raw_cmd[1] = cpu_to_le64(cmd->raw_cmd[1]);
- cmd->raw_cmd[2] = cpu_to_le64(cmd->raw_cmd[2]);
- cmd->raw_cmd[3] = cpu_to_le64(cmd->raw_cmd[3]);
+ cmd->raw_cmd_le[0] = cpu_to_le64(cmd->raw_cmd[0]);
+ cmd->raw_cmd_le[1] = cpu_to_le64(cmd->raw_cmd[1]);
+ cmd->raw_cmd_le[2] = cpu_to_le64(cmd->raw_cmd[2]);
+ cmd->raw_cmd_le[3] = cpu_to_le64(cmd->raw_cmd[3]);
}
static struct its_collection *its_build_mapd_cmd(struct its_node *its,
@@ -676,6 +703,60 @@ static struct its_vpe *its_build_vmovp_cmd(struct its_node *its,
return valid_vpe(its, desc->its_vmovp_cmd.vpe);
}
+static struct its_vpe *its_build_vinv_cmd(struct its_node *its,
+ struct its_cmd_block *cmd,
+ struct its_cmd_desc *desc)
+{
+ struct its_vlpi_map *map;
+
+ map = dev_event_to_vlpi_map(desc->its_inv_cmd.dev,
+ desc->its_inv_cmd.event_id);
+
+ its_encode_cmd(cmd, GITS_CMD_INV);
+ its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id);
+ its_encode_event_id(cmd, desc->its_inv_cmd.event_id);
+
+ its_fixup_cmd(cmd);
+
+ return valid_vpe(its, map->vpe);
+}
+
+static struct its_vpe *its_build_vint_cmd(struct its_node *its,
+ struct its_cmd_block *cmd,
+ struct its_cmd_desc *desc)
+{
+ struct its_vlpi_map *map;
+
+ map = dev_event_to_vlpi_map(desc->its_int_cmd.dev,
+ desc->its_int_cmd.event_id);
+
+ its_encode_cmd(cmd, GITS_CMD_INT);
+ its_encode_devid(cmd, desc->its_int_cmd.dev->device_id);
+ its_encode_event_id(cmd, desc->its_int_cmd.event_id);
+
+ its_fixup_cmd(cmd);
+
+ return valid_vpe(its, map->vpe);
+}
+
+static struct its_vpe *its_build_vclear_cmd(struct its_node *its,
+ struct its_cmd_block *cmd,
+ struct its_cmd_desc *desc)
+{
+ struct its_vlpi_map *map;
+
+ map = dev_event_to_vlpi_map(desc->its_clear_cmd.dev,
+ desc->its_clear_cmd.event_id);
+
+ its_encode_cmd(cmd, GITS_CMD_CLEAR);
+ its_encode_devid(cmd, desc->its_clear_cmd.dev->device_id);
+ its_encode_event_id(cmd, desc->its_clear_cmd.event_id);
+
+ its_fixup_cmd(cmd);
+
+ return valid_vpe(its, map->vpe);
+}
+
static u64 its_cmd_ptr_to_offset(struct its_node *its,
struct its_cmd_block *ptr)
{
@@ -953,7 +1034,7 @@ static void its_send_invall(struct its_node *its, struct its_collection *col)
static void its_send_vmapti(struct its_device *dev, u32 id)
{
- struct its_vlpi_map *map = &dev->event_map.vlpi_maps[id];
+ struct its_vlpi_map *map = dev_event_to_vlpi_map(dev, id);
struct its_cmd_desc desc;
desc.its_vmapti_cmd.vpe = map->vpe;
@@ -967,7 +1048,7 @@ static void its_send_vmapti(struct its_device *dev, u32 id)
static void its_send_vmovi(struct its_device *dev, u32 id)
{
- struct its_vlpi_map *map = &dev->event_map.vlpi_maps[id];
+ struct its_vlpi_map *map = dev_event_to_vlpi_map(dev, id);
struct its_cmd_desc desc;
desc.its_vmovi_cmd.vpe = map->vpe;
@@ -1021,7 +1102,7 @@ static void its_send_vmovp(struct its_vpe *vpe)
/* Emit VMOVPs */
list_for_each_entry(its, &its_nodes, entry) {
- if (!its->is_v4)
+ if (!is_v4(its))
continue;
if (!vpe->its_vm->vlpi_count[its->list_nr])
@@ -1042,29 +1123,71 @@ static void its_send_vinvall(struct its_node *its, struct its_vpe *vpe)
its_send_single_vcommand(its, its_build_vinvall_cmd, &desc);
}
+static void its_send_vinv(struct its_device *dev, u32 event_id)
+{
+ struct its_cmd_desc desc;
+
+ /*
+ * There is no real VINV command. This is just a normal INV,
+ * with a VSYNC instead of a SYNC.
+ */
+ desc.its_inv_cmd.dev = dev;
+ desc.its_inv_cmd.event_id = event_id;
+
+ its_send_single_vcommand(dev->its, its_build_vinv_cmd, &desc);
+}
+
+static void its_send_vint(struct its_device *dev, u32 event_id)
+{
+ struct its_cmd_desc desc;
+
+ /*
+ * There is no real VINT command. This is just a normal INT,
+ * with a VSYNC instead of a SYNC.
+ */
+ desc.its_int_cmd.dev = dev;
+ desc.its_int_cmd.event_id = event_id;
+
+ its_send_single_vcommand(dev->its, its_build_vint_cmd, &desc);
+}
+
+static void its_send_vclear(struct its_device *dev, u32 event_id)
+{
+ struct its_cmd_desc desc;
+
+ /*
+ * There is no real VCLEAR command. This is just a normal CLEAR,
+ * with a VSYNC instead of a SYNC.
+ */
+ desc.its_clear_cmd.dev = dev;
+ desc.its_clear_cmd.event_id = event_id;
+
+ its_send_single_vcommand(dev->its, its_build_vclear_cmd, &desc);
+}
+
/*
* irqchip functions - assumes MSI, mostly.
*/
-
-static inline u32 its_get_event_id(struct irq_data *d)
+static struct its_vlpi_map *get_vlpi_map(struct irq_data *d)
{
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
- return d->hwirq - its_dev->event_map.lpi_base;
+ u32 event = its_get_event_id(d);
+
+ if (!irqd_is_forwarded_to_vcpu(d))
+ return NULL;
+
+ return dev_event_to_vlpi_map(its_dev, event);
}
static void lpi_write_config(struct irq_data *d, u8 clr, u8 set)
{
+ struct its_vlpi_map *map = get_vlpi_map(d);
irq_hw_number_t hwirq;
void *va;
u8 *cfg;
- if (irqd_is_forwarded_to_vcpu(d)) {
- struct its_device *its_dev = irq_data_get_irq_chip_data(d);
- u32 event = its_get_event_id(d);
- struct its_vlpi_map *map;
-
- va = page_address(its_dev->event_map.vm->vprop_page);
- map = &its_dev->event_map.vlpi_maps[event];
+ if (map) {
+ va = page_address(map->vm->vprop_page);
hwirq = map->vintid;
/* Remember the updated property */
@@ -1090,23 +1213,50 @@ static void lpi_write_config(struct irq_data *d, u8 clr, u8 set)
dsb(ishst);
}
+static void wait_for_syncr(void __iomem *rdbase)
+{
+ while (gic_read_lpir(rdbase + GICR_SYNCR) & 1)
+ cpu_relax();
+}
+
+static void direct_lpi_inv(struct irq_data *d)
+{
+ struct its_collection *col;
+ void __iomem *rdbase;
+
+ /* Target the redistributor this LPI is currently routed to */
+ col = irq_to_col(d);
+ rdbase = per_cpu_ptr(gic_rdists->rdist, col->col_id)->rd_base;
+ gic_write_lpir(d->hwirq, rdbase + GICR_INVLPIR);
+
+ wait_for_syncr(rdbase);
+}
+
static void lpi_update_config(struct irq_data *d, u8 clr, u8 set)
{
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
lpi_write_config(d, clr, set);
- its_send_inv(its_dev, its_get_event_id(d));
+ if (gic_rdists->has_direct_lpi && !irqd_is_forwarded_to_vcpu(d))
+ direct_lpi_inv(d);
+ else if (!irqd_is_forwarded_to_vcpu(d))
+ its_send_inv(its_dev, its_get_event_id(d));
+ else
+ its_send_vinv(its_dev, its_get_event_id(d));
}
static void its_vlpi_set_doorbell(struct irq_data *d, bool enable)
{
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
u32 event = its_get_event_id(d);
+ struct its_vlpi_map *map;
- if (its_dev->event_map.vlpi_maps[event].db_enabled == enable)
+ map = dev_event_to_vlpi_map(its_dev, event);
+
+ if (map->db_enabled == enable)
return;
- its_dev->event_map.vlpi_maps[event].db_enabled = enable;
+ map->db_enabled = enable;
/*
* More fun with the architecture:
@@ -1208,10 +1358,17 @@ static int its_irq_set_irqchip_state(struct irq_data *d,
if (which != IRQCHIP_STATE_PENDING)
return -EINVAL;
- if (state)
- its_send_int(its_dev, event);
- else
- its_send_clear(its_dev, event);
+ if (irqd_is_forwarded_to_vcpu(d)) {
+ if (state)
+ its_send_vint(its_dev, event);
+ else
+ its_send_vclear(its_dev, event);
+ } else {
+ if (state)
+ its_send_int(its_dev, event);
+ else
+ its_send_clear(its_dev, event);
+ }
return 0;
}
@@ -1279,13 +1436,13 @@ static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info)
if (!info->map)
return -EINVAL;
- mutex_lock(&its_dev->event_map.vlpi_lock);
+ raw_spin_lock(&its_dev->event_map.vlpi_lock);
if (!its_dev->event_map.vm) {
struct its_vlpi_map *maps;
maps = kcalloc(its_dev->event_map.nr_lpis, sizeof(*maps),
- GFP_KERNEL);
+ GFP_ATOMIC);
if (!maps) {
ret = -ENOMEM;
goto out;
@@ -1328,29 +1485,30 @@ static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info)
}
out:
- mutex_unlock(&its_dev->event_map.vlpi_lock);
+ raw_spin_unlock(&its_dev->event_map.vlpi_lock);
return ret;
}
static int its_vlpi_get(struct irq_data *d, struct its_cmd_info *info)
{
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
- u32 event = its_get_event_id(d);
+ struct its_vlpi_map *map;
int ret = 0;
- mutex_lock(&its_dev->event_map.vlpi_lock);
+ raw_spin_lock(&its_dev->event_map.vlpi_lock);
+
+ map = get_vlpi_map(d);
- if (!its_dev->event_map.vm ||
- !its_dev->event_map.vlpi_maps[event].vm) {
+ if (!its_dev->event_map.vm || !map) {
ret = -EINVAL;
goto out;
}
/* Copy our mapping information to the incoming request */
- *info->map = its_dev->event_map.vlpi_maps[event];
+ *info->map = *map;
out:
- mutex_unlock(&its_dev->event_map.vlpi_lock);
+ raw_spin_unlock(&its_dev->event_map.vlpi_lock);
return ret;
}
@@ -1360,7 +1518,7 @@ static int its_vlpi_unmap(struct irq_data *d)
u32 event = its_get_event_id(d);
int ret = 0;
- mutex_lock(&its_dev->event_map.vlpi_lock);
+ raw_spin_lock(&its_dev->event_map.vlpi_lock);
if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d)) {
ret = -EINVAL;
@@ -1390,7 +1548,7 @@ static int its_vlpi_unmap(struct irq_data *d)
}
out:
- mutex_unlock(&its_dev->event_map.vlpi_lock);
+ raw_spin_unlock(&its_dev->event_map.vlpi_lock);
return ret;
}
@@ -1416,7 +1574,7 @@ static int its_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
struct its_cmd_info *info = vcpu_info;
/* Need a v4 ITS */
- if (!its_dev->its->is_v4)
+ if (!is_v4(its_dev->its))
return -EINVAL;
/* Unmap request? */
@@ -1922,9 +2080,9 @@ static bool its_parse_indirect_baser(struct its_node *its,
if (new_order >= MAX_ORDER) {
new_order = MAX_ORDER - 1;
ids = ilog2(PAGE_ORDER_TO_SIZE(new_order) / (int)esz);
- pr_warn("ITS@%pa: %s Table too large, reduce ids %u->%u\n",
+ pr_warn("ITS@%pa: %s Table too large, reduce ids %llu->%u\n",
&its->phys_base, its_base_type_string[type],
- its->device_ids, ids);
+ device_ids(its), ids);
}
*order = new_order;
@@ -1970,7 +2128,7 @@ static int its_alloc_tables(struct its_node *its)
case GITS_BASER_TYPE_DEVICE:
indirect = its_parse_indirect_baser(its, baser,
psz, &order,
- its->device_ids);
+ device_ids(its));
break;
case GITS_BASER_TYPE_VCPU:
@@ -2361,7 +2519,7 @@ static bool its_alloc_device_table(struct its_node *its, u32 dev_id)
/* Don't allow device id that exceeds ITS hardware limit */
if (!baser)
- return (ilog2(dev_id) < its->device_ids);
+ return (ilog2(dev_id) < device_ids(its));
return its_alloc_table_entry(its, baser, dev_id);
}
@@ -2380,7 +2538,7 @@ static bool its_alloc_vpe_table(u32 vpe_id)
list_for_each_entry(its, &its_nodes, entry) {
struct its_baser *baser;
- if (!its->is_v4)
+ if (!is_v4(its))
continue;
baser = its_get_baser(its, GITS_BASER_TYPE_VCPU);
@@ -2419,7 +2577,7 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
* sized as a power of two (and you need at least one bit...).
*/
nr_ites = max(2, nvecs);
- sz = nr_ites * its->ite_size;
+ sz = nr_ites * (FIELD_GET(GITS_TYPER_ITT_ENTRY_SIZE, its->typer) + 1);
sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1;
itt = kzalloc_node(sz, GFP_KERNEL, its->numa_node);
if (alloc_lpis) {
@@ -2450,7 +2608,7 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
dev->event_map.col_map = col_map;
dev->event_map.lpi_base = lpi_base;
dev->event_map.nr_lpis = nr_lpis;
- mutex_init(&dev->event_map.vlpi_lock);
+ raw_spin_lock_init(&dev->event_map.vlpi_lock);
dev->device_id = dev_id;
INIT_LIST_HEAD(&dev->entry);
@@ -2471,6 +2629,7 @@ static void its_free_device(struct its_device *its_dev)
raw_spin_lock_irqsave(&its_dev->its->lock, flags);
list_del(&its_dev->entry);
raw_spin_unlock_irqrestore(&its_dev->its->lock, flags);
+ kfree(its_dev->event_map.col_map);
kfree(its_dev->itt);
kfree(its_dev);
}
@@ -2679,7 +2838,6 @@ static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq,
its_lpi_free(its_dev->event_map.lpi_map,
its_dev->event_map.lpi_base,
its_dev->event_map.nr_lpis);
- kfree(its_dev->event_map.col_map);
/* Unmap device/itt */
its_send_mapd(its_dev, 0);
@@ -2772,8 +2930,7 @@ static void its_vpe_db_proxy_move(struct its_vpe *vpe, int from, int to)
rdbase = per_cpu_ptr(gic_rdists->rdist, from)->rd_base;
gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR);
- while (gic_read_lpir(rdbase + GICR_SYNCR) & 1)
- cpu_relax();
+ wait_for_syncr(rdbase);
return;
}
@@ -2869,7 +3026,7 @@ static void its_vpe_invall(struct its_vpe *vpe)
struct its_node *its;
list_for_each_entry(its, &its_nodes, entry) {
- if (!its->is_v4)
+ if (!is_v4(its))
continue;
if (its_list_map && !vpe->its_vm->vlpi_count[its->list_nr])
@@ -2927,10 +3084,10 @@ static void its_vpe_send_inv(struct irq_data *d)
if (gic_rdists->has_direct_lpi) {
void __iomem *rdbase;
+ /* Target the redistributor this VPE is currently known on */
rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base;
- gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_INVLPIR);
- while (gic_read_lpir(rdbase + GICR_SYNCR) & 1)
- cpu_relax();
+ gic_write_lpir(d->parent_data->hwirq, rdbase + GICR_INVLPIR);
+ wait_for_syncr(rdbase);
} else {
its_vpe_send_cmd(vpe, its_send_inv);
}
@@ -2972,8 +3129,7 @@ static int its_vpe_set_irqchip_state(struct irq_data *d,
gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_SETLPIR);
} else {
gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR);
- while (gic_read_lpir(rdbase + GICR_SYNCR) & 1)
- cpu_relax();
+ wait_for_syncr(rdbase);
}
} else {
if (state)
@@ -3138,7 +3294,7 @@ static int its_vpe_irq_domain_activate(struct irq_domain *domain,
vpe->col_idx = cpumask_first(cpu_online_mask);
list_for_each_entry(its, &its_nodes, entry) {
- if (!its->is_v4)
+ if (!is_v4(its))
continue;
its_send_vmapp(its, vpe, true);
@@ -3164,7 +3320,7 @@ static void its_vpe_irq_domain_deactivate(struct irq_domain *domain,
return;
list_for_each_entry(its, &its_nodes, entry) {
- if (!its->is_v4)
+ if (!is_v4(its))
continue;
its_send_vmapp(its, vpe, false);
@@ -3215,8 +3371,9 @@ static bool __maybe_unused its_enable_quirk_cavium_22375(void *data)
{
struct its_node *its = data;
- /* erratum 22375: only alloc 8MB table size */
- its->device_ids = 0x14; /* 20 bits, 8MB */
+ /* erratum 22375: only alloc 8MB table size (20 bits) */
+ its->typer &= ~GITS_TYPER_DEVBITS;
+ its->typer |= FIELD_PREP(GITS_TYPER_DEVBITS, 20 - 1);
its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_22375;
return true;
@@ -3236,7 +3393,8 @@ static bool __maybe_unused its_enable_quirk_qdf2400_e0065(void *data)
struct its_node *its = data;
/* On QDF2400, the size of the ITE is 16Bytes */
- its->ite_size = 16;
+ its->typer &= ~GITS_TYPER_ITT_ENTRY_SIZE;
+ its->typer |= FIELD_PREP(GITS_TYPER_ITT_ENTRY_SIZE, 16 - 1);
return true;
}
@@ -3270,8 +3428,10 @@ static bool __maybe_unused its_enable_quirk_socionext_synquacer(void *data)
its->get_msi_base = its_irq_get_msi_base_pre_its;
ids = ilog2(pre_its_window[1]) - 2;
- if (its->device_ids > ids)
- its->device_ids = ids;
+ if (device_ids(its) > ids) {
+ its->typer &= ~GITS_TYPER_DEVBITS;
+ its->typer |= FIELD_PREP(GITS_TYPER_DEVBITS, ids - 1);
+ }
/* the pre-ITS breaks isolation, so disable MSI remapping */
its->msi_domain_flags &= ~IRQ_DOMAIN_FLAG_MSI_REMAP;
@@ -3504,7 +3664,7 @@ static int its_init_vpe_domain(void)
}
/* Use the last possible DevID */
- devid = GENMASK(its->device_ids - 1, 0);
+ devid = GENMASK(device_ids(its) - 1, 0);
vpe_proxy.dev = its_create_device(its, devid, entries, false);
if (!vpe_proxy.dev) {
kfree(vpe_proxy.vpes);
@@ -3602,12 +3762,10 @@ static int __init its_probe_one(struct resource *res,
INIT_LIST_HEAD(&its->entry);
INIT_LIST_HEAD(&its->its_device_list);
typer = gic_read_typer(its_base + GITS_TYPER);
+ its->typer = typer;
its->base = its_base;
its->phys_base = res->start;
- its->ite_size = GITS_TYPER_ITT_ENTRY_SIZE(typer);
- its->device_ids = GITS_TYPER_DEVBITS(typer);
- its->is_v4 = !!(typer & GITS_TYPER_VLPIS);
- if (its->is_v4) {
+ if (is_v4(its)) {
if (!(typer & GITS_TYPER_VMOVP)) {
err = its_compute_its_list_map(res, its_base);
if (err < 0)
@@ -3674,7 +3832,7 @@ static int __init its_probe_one(struct resource *res,
gits_write_cwriter(0, its->base + GITS_CWRITER);
ctlr = readl_relaxed(its->base + GITS_CTLR);
ctlr |= GITS_CTLR_ENABLE;
- if (its->is_v4)
+ if (is_v4(its))
ctlr |= GITS_CTLR_ImDe;
writel_relaxed(ctlr, its->base + GITS_CTLR);
@@ -3999,7 +4157,7 @@ int __init its_init(struct fwnode_handle *handle, struct rdists *rdists,
return err;
list_for_each_entry(its, &its_nodes, entry)
- has_v4 |= its->is_v4;
+ has_v4 |= is_v4(its);
if (has_v4 & rdists->has_vlpis) {
if (its_init_vpe_domain() ||
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 6bb1f682f78b..d6218012097b 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -183,7 +183,7 @@ static void gic_do_wait_for_rwp(void __iomem *base)
}
cpu_relax();
udelay(1);
- };
+ }
}
/* Wait for completion of a distributor change */
@@ -240,7 +240,7 @@ static void gic_enable_redist(bool enable)
break;
cpu_relax();
udelay(1);
- };
+ }
if (!count)
pr_err_ratelimited("redistributor failed to %s...\n",
enable ? "wakeup" : "sleep");
diff --git a/drivers/irqchip/irq-ingenic.c b/drivers/irqchip/irq-ingenic.c
index f126255b3260..01d18b39069e 100644
--- a/drivers/irqchip/irq-ingenic.c
+++ b/drivers/irqchip/irq-ingenic.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2009-2010, Lars-Peter Clausen <lars@metafoo.de>
- * JZ4740 platform IRQ support
+ * Ingenic XBurst platform IRQ support
*/
#include <linux/errno.h>
@@ -10,7 +10,6 @@
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/irqchip.h>
-#include <linux/irqchip/ingenic.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/timex.h>
@@ -22,6 +21,7 @@
struct ingenic_intc_data {
void __iomem *base;
+ struct irq_domain *domain;
unsigned num_chips;
};
@@ -35,41 +35,30 @@ struct ingenic_intc_data {
static irqreturn_t intc_cascade(int irq, void *data)
{
struct ingenic_intc_data *intc = irq_get_handler_data(irq);
- uint32_t irq_reg;
+ struct irq_domain *domain = intc->domain;
+ struct irq_chip_generic *gc;
+ uint32_t pending;
unsigned i;
for (i = 0; i < intc->num_chips; i++) {
- irq_reg = readl(intc->base + (i * CHIP_SIZE) +
- JZ_REG_INTC_PENDING);
- if (!irq_reg)
+ gc = irq_get_domain_generic_chip(domain, i * 32);
+
+ pending = irq_reg_readl(gc, JZ_REG_INTC_PENDING);
+ if (!pending)
continue;
- generic_handle_irq(__fls(irq_reg) + (i * 32) + JZ4740_IRQ_BASE);
+ while (pending) {
+ int bit = __fls(pending);
+
+ irq = irq_find_mapping(domain, bit + (i * 32));
+ generic_handle_irq(irq);
+ pending &= ~BIT(bit);
+ }
}
return IRQ_HANDLED;
}
-static void intc_irq_set_mask(struct irq_chip_generic *gc, uint32_t mask)
-{
- struct irq_chip_regs *regs = &gc->chip_types->regs;
-
- writel(mask, gc->reg_base + regs->enable);
- writel(~mask, gc->reg_base + regs->disable);
-}
-
-void ingenic_intc_irq_suspend(struct irq_data *data)
-{
- struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data);
- intc_irq_set_mask(gc, gc->wake_active);
-}
-
-void ingenic_intc_irq_resume(struct irq_data *data)
-{
- struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data);
- intc_irq_set_mask(gc, gc->mask_cache);
-}
-
static struct irqaction intc_cascade_action = {
.handler = intc_cascade,
.name = "SoC intc cascade interrupt",
@@ -108,17 +97,27 @@ static int __init ingenic_intc_of_init(struct device_node *node,
goto out_unmap_irq;
}
- for (i = 0; i < num_chips; i++) {
- /* Mask all irqs */
- writel(0xffffffff, intc->base + (i * CHIP_SIZE) +
- JZ_REG_INTC_SET_MASK);
+ domain = irq_domain_add_legacy(node, num_chips * 32,
+ JZ4740_IRQ_BASE, 0,
+ &irq_generic_chip_ops, NULL);
+ if (!domain) {
+ err = -ENOMEM;
+ goto out_unmap_base;
+ }
- gc = irq_alloc_generic_chip("INTC", 1,
- JZ4740_IRQ_BASE + (i * 32),
- intc->base + (i * CHIP_SIZE),
- handle_level_irq);
+ intc->domain = domain;
+
+ err = irq_alloc_domain_generic_chips(domain, 32, 1, "INTC",
+ handle_level_irq, 0,
+ IRQ_NOPROBE | IRQ_LEVEL, 0);
+ if (err)
+ goto out_domain_remove;
+
+ for (i = 0; i < num_chips; i++) {
+ gc = irq_get_domain_generic_chip(domain, i * 32);
gc->wake_enabled = IRQ_MSK(32);
+ gc->reg_base = intc->base + (i * CHIP_SIZE);
ct = gc->chip_types;
ct->regs.enable = JZ_REG_INTC_CLEAR_MASK;
@@ -127,21 +126,19 @@ static int __init ingenic_intc_of_init(struct device_node *node,
ct->chip.irq_mask = irq_gc_mask_disable_reg;
ct->chip.irq_mask_ack = irq_gc_mask_disable_reg;
ct->chip.irq_set_wake = irq_gc_set_wake;
- ct->chip.irq_suspend = ingenic_intc_irq_suspend;
- ct->chip.irq_resume = ingenic_intc_irq_resume;
+ ct->chip.flags = IRQCHIP_MASK_ON_SUSPEND;
- irq_setup_generic_chip(gc, IRQ_MSK(32), 0, 0,
- IRQ_NOPROBE | IRQ_LEVEL);
+ /* Mask all irqs */
+ irq_reg_writel(gc, IRQ_MSK(32), JZ_REG_INTC_SET_MASK);
}
- domain = irq_domain_add_legacy(node, num_chips * 32, JZ4740_IRQ_BASE, 0,
- &irq_domain_simple_ops, NULL);
- if (!domain)
- pr_warn("unable to register IRQ domain\n");
-
setup_irq(parent_irq, &intc_cascade_action);
return 0;
+out_domain_remove:
+ irq_domain_remove(domain);
+out_unmap_base:
+ iounmap(intc->base);
out_unmap_irq:
irq_dispose_mapping(parent_irq);
out_free:
diff --git a/drivers/irqchip/irq-ls-extirq.c b/drivers/irqchip/irq-ls-extirq.c
new file mode 100644
index 000000000000..4d1179fed77c
--- /dev/null
+++ b/drivers/irqchip/irq-ls-extirq.c
@@ -0,0 +1,197 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#define pr_fmt(fmt) "irq-ls-extirq: " fmt
+
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqdomain.h>
+#include <linux/of.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+#define MAXIRQ 12
+#define LS1021A_SCFGREVCR 0x200
+
+struct ls_extirq_data {
+ struct regmap *syscon;
+ u32 intpcr;
+ bool bit_reverse;
+ u32 nirq;
+ struct irq_fwspec map[MAXIRQ];
+};
+
+static int
+ls_extirq_set_type(struct irq_data *data, unsigned int type)
+{
+ struct ls_extirq_data *priv = data->chip_data;
+ irq_hw_number_t hwirq = data->hwirq;
+ u32 value, mask;
+
+ if (priv->bit_reverse)
+ mask = 1U << (31 - hwirq);
+ else
+ mask = 1U << hwirq;
+
+ switch (type) {
+ case IRQ_TYPE_LEVEL_LOW:
+ type = IRQ_TYPE_LEVEL_HIGH;
+ value = mask;
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ type = IRQ_TYPE_EDGE_RISING;
+ value = mask;
+ break;
+ case IRQ_TYPE_LEVEL_HIGH:
+ case IRQ_TYPE_EDGE_RISING:
+ value = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+ regmap_update_bits(priv->syscon, priv->intpcr, mask, value);
+
+ return irq_chip_set_type_parent(data, type);
+}
+
+static struct irq_chip ls_extirq_chip = {
+ .name = "ls-extirq",
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = irq_chip_unmask_parent,
+ .irq_eoi = irq_chip_eoi_parent,
+ .irq_set_type = ls_extirq_set_type,
+ .irq_retrigger = irq_chip_retrigger_hierarchy,
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+ .flags = IRQCHIP_SET_TYPE_MASKED,
+};
+
+static int
+ls_extirq_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *arg)
+{
+ struct ls_extirq_data *priv = domain->host_data;
+ struct irq_fwspec *fwspec = arg;
+ irq_hw_number_t hwirq;
+
+ if (fwspec->param_count != 2)
+ return -EINVAL;
+
+ hwirq = fwspec->param[0];
+ if (hwirq >= priv->nirq)
+ return -EINVAL;
+
+ irq_domain_set_hwirq_and_chip(domain, virq, hwirq, &ls_extirq_chip,
+ priv);
+
+ return irq_domain_alloc_irqs_parent(domain, virq, 1, &priv->map[hwirq]);
+}
+
+static const struct irq_domain_ops extirq_domain_ops = {
+ .xlate = irq_domain_xlate_twocell,
+ .alloc = ls_extirq_domain_alloc,
+ .free = irq_domain_free_irqs_common,
+};
+
+static int
+ls_extirq_parse_map(struct ls_extirq_data *priv, struct device_node *node)
+{
+ const __be32 *map;
+ u32 mapsize;
+ int ret;
+
+ map = of_get_property(node, "interrupt-map", &mapsize);
+ if (!map)
+ return -ENOENT;
+ if (mapsize % sizeof(*map))
+ return -EINVAL;
+ mapsize /= sizeof(*map);
+
+ while (mapsize) {
+ struct device_node *ipar;
+ u32 hwirq, intsize, j;
+
+ if (mapsize < 3)
+ return -EINVAL;
+ hwirq = be32_to_cpup(map);
+ if (hwirq >= MAXIRQ)
+ return -EINVAL;
+ priv->nirq = max(priv->nirq, hwirq + 1);
+
+ ipar = of_find_node_by_phandle(be32_to_cpup(map + 2));
+ map += 3;
+ mapsize -= 3;
+ if (!ipar)
+ return -EINVAL;
+ priv->map[hwirq].fwnode = &ipar->fwnode;
+ ret = of_property_read_u32(ipar, "#interrupt-cells", &intsize);
+ if (ret)
+ return ret;
+
+ if (intsize > mapsize)
+ return -EINVAL;
+
+ priv->map[hwirq].param_count = intsize;
+ for (j = 0; j < intsize; ++j)
+ priv->map[hwirq].param[j] = be32_to_cpup(map++);
+ mapsize -= intsize;
+ }
+ return 0;
+}
+
+static int __init
+ls_extirq_of_init(struct device_node *node, struct device_node *parent)
+{
+
+ struct irq_domain *domain, *parent_domain;
+ struct ls_extirq_data *priv;
+ int ret;
+
+ parent_domain = irq_find_host(parent);
+ if (!parent_domain) {
+ pr_err("Cannot find parent domain\n");
+ return -ENODEV;
+ }
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->syscon = syscon_node_to_regmap(node->parent);
+ if (IS_ERR(priv->syscon)) {
+ ret = PTR_ERR(priv->syscon);
+ pr_err("Failed to lookup parent regmap\n");
+ goto out;
+ }
+ ret = of_property_read_u32(node, "reg", &priv->intpcr);
+ if (ret) {
+ pr_err("Missing INTPCR offset value\n");
+ goto out;
+ }
+
+ ret = ls_extirq_parse_map(priv, node);
+ if (ret)
+ goto out;
+
+ if (of_device_is_compatible(node, "fsl,ls1021a-extirq")) {
+ u32 revcr;
+
+ ret = regmap_read(priv->syscon, LS1021A_SCFGREVCR, &revcr);
+ if (ret)
+ goto out;
+ priv->bit_reverse = (revcr != 0);
+ }
+
+ domain = irq_domain_add_hierarchy(parent_domain, 0, priv->nirq, node,
+ &extirq_domain_ops, priv);
+ if (!domain)
+ ret = -ENOMEM;
+
+out:
+ if (ret)
+ kfree(priv);
+ return ret;
+}
+
+IRQCHIP_DECLARE(ls1021a_extirq, "fsl,ls1021a-extirq", ls_extirq_of_init);
diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c
index 7d0a12fe2714..8df547d2d935 100644
--- a/drivers/irqchip/irq-sifive-plic.c
+++ b/drivers/irqchip/irq-sifive-plic.c
@@ -181,7 +181,7 @@ static void plic_handle_irq(struct pt_regs *regs)
WARN_ON_ONCE(!handler->present);
- csr_clear(sie, SIE_SEIE);
+ csr_clear(CSR_IE, IE_EIE);
while ((hwirq = readl(claim))) {
int irq = irq_find_mapping(plic_irqdomain, hwirq);
@@ -191,7 +191,7 @@ static void plic_handle_irq(struct pt_regs *regs)
else
generic_handle_irq(irq);
}
- csr_set(sie, SIE_SEIE);
+ csr_set(CSR_IE, IE_EIE);
}
/*
@@ -252,8 +252,11 @@ static int __init plic_init(struct device_node *node,
continue;
}
- /* skip contexts other than supervisor external interrupt */
- if (parent.args[0] != IRQ_S_EXT)
+ /*
+ * Skip contexts other than external interrupts for our
+ * privilege level.
+ */
+ if (parent.args[0] != IRQ_EXT)
continue;
hartid = plic_find_hart_id(parent.np);
diff --git a/drivers/irqchip/irq-ti-sci-inta.c b/drivers/irqchip/irq-ti-sci-inta.c
index ef4d625d2d80..8f6e6b08eadf 100644
--- a/drivers/irqchip/irq-ti-sci-inta.c
+++ b/drivers/irqchip/irq-ti-sci-inta.c
@@ -246,8 +246,8 @@ static struct ti_sci_inta_event_desc *ti_sci_inta_alloc_irq(struct irq_domain *d
/* No free bits available. Allocate a new vint */
vint_desc = ti_sci_inta_alloc_parent_irq(domain);
if (IS_ERR(vint_desc)) {
- mutex_unlock(&inta->vint_mutex);
- return ERR_PTR(PTR_ERR(vint_desc));
+ event_desc = ERR_CAST(vint_desc);
+ goto unlock;
}
free_bit = find_first_zero_bit(vint_desc->event_map,
@@ -259,6 +259,7 @@ alloc_event:
if (IS_ERR(event_desc))
clear_bit(free_bit, vint_desc->event_map);
+unlock:
mutex_unlock(&inta->vint_mutex);
return event_desc;
}
diff --git a/drivers/irqchip/irq-zevio.c b/drivers/irqchip/irq-zevio.c
index 5a7efeb3892d..84163f1ebfcf 100644
--- a/drivers/irqchip/irq-zevio.c
+++ b/drivers/irqchip/irq-zevio.c
@@ -51,7 +51,7 @@ static void __exception_irq_entry zevio_handle_irq(struct pt_regs *regs)
while (readl(zevio_irq_io + IO_STATUS)) {
irqnr = readl(zevio_irq_io + IO_CURRENT);
handle_domain_irq(zevio_irq_domain, irqnr, regs);
- };
+ }
}
static void __init zevio_init_irq_base(void __iomem *base)
diff --git a/drivers/irqchip/qcom-pdc.c b/drivers/irqchip/qcom-pdc.c
index faa7d61b9d6c..6ae9e1f0819d 100644
--- a/drivers/irqchip/qcom-pdc.c
+++ b/drivers/irqchip/qcom-pdc.c
@@ -1,10 +1,11 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
*/
#include <linux/err.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/irqchip.h>
#include <linux/irqdomain.h>
@@ -13,12 +14,13 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
+#include <linux/soc/qcom/irq.h>
#include <linux/spinlock.h>
-#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/types.h>
-#define PDC_MAX_IRQS 126
+#define PDC_MAX_IRQS 168
+#define PDC_MAX_GPIO_IRQS 256
#define CLEAR_INTR(reg, intr) (reg & ~(1 << intr))
#define ENABLE_INTR(reg, intr) (reg | (1 << intr))
@@ -26,6 +28,8 @@
#define IRQ_ENABLE_BANK 0x10
#define IRQ_i_CFG 0x110
+#define PDC_NO_PARENT_IRQ ~0UL
+
struct pdc_pin_region {
u32 pin_base;
u32 parent_base;
@@ -47,6 +51,26 @@ static u32 pdc_reg_read(int reg, u32 i)
return readl_relaxed(pdc_base + reg + i * sizeof(u32));
}
+static int qcom_pdc_gic_get_irqchip_state(struct irq_data *d,
+ enum irqchip_irq_state which,
+ bool *state)
+{
+ if (d->hwirq == GPIO_NO_WAKE_IRQ)
+ return 0;
+
+ return irq_chip_get_parent_state(d, which, state);
+}
+
+static int qcom_pdc_gic_set_irqchip_state(struct irq_data *d,
+ enum irqchip_irq_state which,
+ bool value)
+{
+ if (d->hwirq == GPIO_NO_WAKE_IRQ)
+ return 0;
+
+ return irq_chip_set_parent_state(d, which, value);
+}
+
static void pdc_enable_intr(struct irq_data *d, bool on)
{
int pin_out = d->hwirq;
@@ -63,15 +87,37 @@ static void pdc_enable_intr(struct irq_data *d, bool on)
raw_spin_unlock(&pdc_lock);
}
-static void qcom_pdc_gic_mask(struct irq_data *d)
+static void qcom_pdc_gic_disable(struct irq_data *d)
{
+ if (d->hwirq == GPIO_NO_WAKE_IRQ)
+ return;
+
pdc_enable_intr(d, false);
+ irq_chip_disable_parent(d);
+}
+
+static void qcom_pdc_gic_enable(struct irq_data *d)
+{
+ if (d->hwirq == GPIO_NO_WAKE_IRQ)
+ return;
+
+ pdc_enable_intr(d, true);
+ irq_chip_enable_parent(d);
+}
+
+static void qcom_pdc_gic_mask(struct irq_data *d)
+{
+ if (d->hwirq == GPIO_NO_WAKE_IRQ)
+ return;
+
irq_chip_mask_parent(d);
}
static void qcom_pdc_gic_unmask(struct irq_data *d)
{
- pdc_enable_intr(d, true);
+ if (d->hwirq == GPIO_NO_WAKE_IRQ)
+ return;
+
irq_chip_unmask_parent(d);
}
@@ -114,6 +160,9 @@ static int qcom_pdc_gic_set_type(struct irq_data *d, unsigned int type)
int pin_out = d->hwirq;
enum pdc_irq_config_bits pdc_type;
+ if (pin_out == GPIO_NO_WAKE_IRQ)
+ return 0;
+
switch (type) {
case IRQ_TYPE_EDGE_RISING:
pdc_type = PDC_EDGE_RISING;
@@ -148,6 +197,10 @@ static struct irq_chip qcom_pdc_gic_chip = {
.irq_eoi = irq_chip_eoi_parent,
.irq_mask = qcom_pdc_gic_mask,
.irq_unmask = qcom_pdc_gic_unmask,
+ .irq_disable = qcom_pdc_gic_disable,
+ .irq_enable = qcom_pdc_gic_enable,
+ .irq_get_irqchip_state = qcom_pdc_gic_get_irqchip_state,
+ .irq_set_irqchip_state = qcom_pdc_gic_set_irqchip_state,
.irq_retrigger = irq_chip_retrigger_hierarchy,
.irq_set_type = qcom_pdc_gic_set_type,
.flags = IRQCHIP_MASK_ON_SUSPEND |
@@ -169,8 +222,7 @@ static irq_hw_number_t get_parent_hwirq(int pin)
return (region->parent_base + pin - region->pin_base);
}
- WARN_ON(1);
- return ~0UL;
+ return PDC_NO_PARENT_IRQ;
}
static int qcom_pdc_translate(struct irq_domain *d, struct irq_fwspec *fwspec,
@@ -199,17 +251,17 @@ static int qcom_pdc_alloc(struct irq_domain *domain, unsigned int virq,
ret = qcom_pdc_translate(domain, fwspec, &hwirq, &type);
if (ret)
- return -EINVAL;
-
- parent_hwirq = get_parent_hwirq(hwirq);
- if (parent_hwirq == ~0UL)
- return -EINVAL;
+ return ret;
ret = irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
&qcom_pdc_gic_chip, NULL);
if (ret)
return ret;
+ parent_hwirq = get_parent_hwirq(hwirq);
+ if (parent_hwirq == PDC_NO_PARENT_IRQ)
+ return 0;
+
if (type & IRQ_TYPE_EDGE_BOTH)
type = IRQ_TYPE_EDGE_RISING;
@@ -232,6 +284,60 @@ static const struct irq_domain_ops qcom_pdc_ops = {
.free = irq_domain_free_irqs_common,
};
+static int qcom_pdc_gpio_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *data)
+{
+ struct irq_fwspec *fwspec = data;
+ struct irq_fwspec parent_fwspec;
+ irq_hw_number_t hwirq, parent_hwirq;
+ unsigned int type;
+ int ret;
+
+ ret = qcom_pdc_translate(domain, fwspec, &hwirq, &type);
+ if (ret)
+ return ret;
+
+ ret = irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
+ &qcom_pdc_gic_chip, NULL);
+ if (ret)
+ return ret;
+
+ if (hwirq == GPIO_NO_WAKE_IRQ)
+ return 0;
+
+ parent_hwirq = get_parent_hwirq(hwirq);
+ if (parent_hwirq == PDC_NO_PARENT_IRQ)
+ return 0;
+
+ if (type & IRQ_TYPE_EDGE_BOTH)
+ type = IRQ_TYPE_EDGE_RISING;
+
+ if (type & IRQ_TYPE_LEVEL_MASK)
+ type = IRQ_TYPE_LEVEL_HIGH;
+
+ parent_fwspec.fwnode = domain->parent->fwnode;
+ parent_fwspec.param_count = 3;
+ parent_fwspec.param[0] = 0;
+ parent_fwspec.param[1] = parent_hwirq;
+ parent_fwspec.param[2] = type;
+
+ return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs,
+ &parent_fwspec);
+}
+
+static int qcom_pdc_gpio_domain_select(struct irq_domain *d,
+ struct irq_fwspec *fwspec,
+ enum irq_domain_bus_token bus_token)
+{
+ return bus_token == DOMAIN_BUS_WAKEUP;
+}
+
+static const struct irq_domain_ops qcom_pdc_gpio_ops = {
+ .select = qcom_pdc_gpio_domain_select,
+ .alloc = qcom_pdc_gpio_alloc,
+ .free = irq_domain_free_irqs_common,
+};
+
static int pdc_setup_pin_mapping(struct device_node *np)
{
int ret, n;
@@ -270,7 +376,7 @@ static int pdc_setup_pin_mapping(struct device_node *np)
static int qcom_pdc_init(struct device_node *node, struct device_node *parent)
{
- struct irq_domain *parent_domain, *pdc_domain;
+ struct irq_domain *parent_domain, *pdc_domain, *pdc_gpio_domain;
int ret;
pdc_base = of_iomap(node, 0);
@@ -301,12 +407,27 @@ static int qcom_pdc_init(struct device_node *node, struct device_node *parent)
goto fail;
}
+ pdc_gpio_domain = irq_domain_create_hierarchy(parent_domain,
+ IRQ_DOMAIN_FLAG_QCOM_PDC_WAKEUP,
+ PDC_MAX_GPIO_IRQS,
+ of_fwnode_handle(node),
+ &qcom_pdc_gpio_ops, NULL);
+ if (!pdc_gpio_domain) {
+ pr_err("%pOF: PDC domain add failed for GPIO domain\n", node);
+ ret = -ENOMEM;
+ goto remove;
+ }
+
+ irq_domain_update_bus_token(pdc_gpio_domain, DOMAIN_BUS_WAKEUP);
+
return 0;
+remove:
+ irq_domain_remove(pdc_domain);
fail:
kfree(pdc_region);
iounmap(pdc_base);
return ret;
}
-IRQCHIP_DECLARE(pdc_sdm845, "qcom,sdm845-pdc", qcom_pdc_init);
+IRQCHIP_DECLARE(qcom_pdc, "qcom,pdc", qcom_pdc_init);
diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
index ba8619524231..1675da34239b 100644
--- a/drivers/isdn/capi/capi.c
+++ b/drivers/isdn/capi/capi.c
@@ -950,6 +950,34 @@ capi_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
return ret;
}
+#ifdef CONFIG_COMPAT
+static long
+capi_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ int ret;
+
+ if (cmd == CAPI_MANUFACTURER_CMD) {
+ struct {
+ compat_ulong_t cmd;
+ compat_uptr_t data;
+ } mcmd32;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ if (copy_from_user(&mcmd32, compat_ptr(arg), sizeof(mcmd32)))
+ return -EFAULT;
+
+ mutex_lock(&capi_mutex);
+ ret = capi20_manufacturer(mcmd32.cmd, compat_ptr(mcmd32.data));
+ mutex_unlock(&capi_mutex);
+
+ return ret;
+ }
+
+ return capi_unlocked_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
+}
+#endif
+
static int capi_open(struct inode *inode, struct file *file)
{
struct capidev *cdev;
@@ -996,6 +1024,9 @@ static const struct file_operations capi_fops =
.write = capi_write,
.poll = capi_poll,
.unlocked_ioctl = capi_unlocked_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = capi_compat_ioctl,
+#endif
.open = capi_open,
.release = capi_release,
};
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 1988de1d64c0..4b68520ac251 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -17,7 +17,7 @@ if NEW_LEDS
config LEDS_CLASS
tristate "LED Class Support"
help
- This option enables the led sysfs class in /sys/class/leds. You'll
+ This option enables the LED sysfs class in /sys/class/leds. You'll
need this to do anything useful with LEDs. If unsure, say N.
config LEDS_CLASS_FLASH
@@ -35,7 +35,7 @@ config LEDS_BRIGHTNESS_HW_CHANGED
depends on LEDS_CLASS
help
This option enables support for the brightness_hw_changed attribute
- for led sysfs class devices under /sys/class/leds.
+ for LED sysfs class devices under /sys/class/leds.
See Documentation/ABI/testing/sysfs-class-led for details.
@@ -132,6 +132,19 @@ config LEDS_CR0014114
To compile this driver as a module, choose M here: the module
will be called leds-cr0014114.
+config LEDS_EL15203000
+ tristate "LED Support for Crane EL15203000"
+ depends on LEDS_CLASS
+ depends on SPI
+ depends on OF
+ help
+ This option enables support for EL15203000 LED Board
+ (aka RED LED board) which is widely used in coffee vending
+ machines produced by Crane Merchandising Systems.
+
+ To compile this driver as a module, choose M here: the module
+ will be called leds-el15203000.
+
config LEDS_LM3530
tristate "LCD Backlight driver for LM3530"
depends on LEDS_CLASS
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index 41fb073a39c1..2da39e896ce8 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -89,6 +89,7 @@ obj-$(CONFIG_LEDS_LM36274) += leds-lm36274.o
# LED SPI Drivers
obj-$(CONFIG_LEDS_CR0014114) += leds-cr0014114.o
obj-$(CONFIG_LEDS_DAC124S085) += leds-dac124s085.o
+obj-$(CONFIG_LEDS_EL15203000) += leds-el15203000.o
# LED Userspace Drivers
obj-$(CONFIG_LEDS_USER) += uleds.o
diff --git a/drivers/leds/led-class-flash.c b/drivers/leds/led-class-flash.c
index 60c3de5c6b9f..6eeb9effcf65 100644
--- a/drivers/leds/led-class-flash.c
+++ b/drivers/leds/led-class-flash.c
@@ -327,6 +327,56 @@ void led_classdev_flash_unregister(struct led_classdev_flash *fled_cdev)
}
EXPORT_SYMBOL_GPL(led_classdev_flash_unregister);
+static void devm_led_classdev_flash_release(struct device *dev, void *res)
+{
+ led_classdev_flash_unregister(*(struct led_classdev_flash **)res);
+}
+
+int devm_led_classdev_flash_register_ext(struct device *parent,
+ struct led_classdev_flash *fled_cdev,
+ struct led_init_data *init_data)
+{
+ struct led_classdev_flash **dr;
+ int ret;
+
+ dr = devres_alloc(devm_led_classdev_flash_release, sizeof(*dr),
+ GFP_KERNEL);
+ if (!dr)
+ return -ENOMEM;
+
+ ret = led_classdev_flash_register_ext(parent, fled_cdev, init_data);
+ if (ret) {
+ devres_free(dr);
+ return ret;
+ }
+
+ *dr = fled_cdev;
+ devres_add(parent, dr);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devm_led_classdev_flash_register_ext);
+
+static int devm_led_classdev_flash_match(struct device *dev,
+ void *res, void *data)
+{
+ struct led_classdev_flash **p = res;
+
+ if (WARN_ON(!p || !*p))
+ return 0;
+
+ return *p == data;
+}
+
+void devm_led_classdev_flash_unregister(struct device *dev,
+ struct led_classdev_flash *fled_cdev)
+{
+ WARN_ON(devres_release(dev,
+ devm_led_classdev_flash_release,
+ devm_led_classdev_flash_match, fled_cdev));
+}
+EXPORT_SYMBOL_GPL(devm_led_classdev_flash_unregister);
+
static void led_clamp_align(struct led_flash_setting *s)
{
u32 v, offset;
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
index 647b1263c579..438774315e6c 100644
--- a/drivers/leds/led-class.c
+++ b/drivers/leds/led-class.c
@@ -74,13 +74,13 @@ static ssize_t max_brightness_show(struct device *dev,
static DEVICE_ATTR_RO(max_brightness);
#ifdef CONFIG_LEDS_TRIGGERS
-static DEVICE_ATTR(trigger, 0644, led_trigger_show, led_trigger_store);
-static struct attribute *led_trigger_attrs[] = {
- &dev_attr_trigger.attr,
+static BIN_ATTR(trigger, 0644, led_trigger_read, led_trigger_write, 0);
+static struct bin_attribute *led_trigger_bin_attrs[] = {
+ &bin_attr_trigger,
NULL,
};
static const struct attribute_group led_trigger_group = {
- .attrs = led_trigger_attrs,
+ .bin_attrs = led_trigger_bin_attrs,
};
#endif
@@ -403,7 +403,7 @@ EXPORT_SYMBOL_GPL(devm_led_classdev_register_ext);
static int devm_led_classdev_match(struct device *dev, void *res, void *data)
{
- struct led_cdev **p = res;
+ struct led_classdev **p = res;
if (WARN_ON(!p || !*p))
return 0;
diff --git a/drivers/leds/led-triggers.c b/drivers/leds/led-triggers.c
index 23963e5cb5d6..79e30d2cb7a5 100644
--- a/drivers/leds/led-triggers.c
+++ b/drivers/leds/led-triggers.c
@@ -16,6 +16,7 @@
#include <linux/rwsem.h>
#include <linux/leds.h>
#include <linux/slab.h>
+#include <linux/mm.h>
#include "leds.h"
/*
@@ -26,9 +27,11 @@ LIST_HEAD(trigger_list);
/* Used by LED Class */
-ssize_t led_trigger_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+ssize_t led_trigger_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf,
+ loff_t pos, size_t count)
{
+ struct device *dev = kobj_to_dev(kobj);
struct led_classdev *led_cdev = dev_get_drvdata(dev);
struct led_trigger *trig;
int ret = count;
@@ -64,39 +67,82 @@ unlock:
mutex_unlock(&led_cdev->led_access);
return ret;
}
-EXPORT_SYMBOL_GPL(led_trigger_store);
+EXPORT_SYMBOL_GPL(led_trigger_write);
-ssize_t led_trigger_show(struct device *dev, struct device_attribute *attr,
- char *buf)
+__printf(3, 4)
+static int led_trigger_snprintf(char *buf, ssize_t size, const char *fmt, ...)
+{
+ va_list args;
+ int i;
+
+ va_start(args, fmt);
+ if (size <= 0)
+ i = vsnprintf(NULL, 0, fmt, args);
+ else
+ i = vscnprintf(buf, size, fmt, args);
+ va_end(args);
+
+ return i;
+}
+
+static int led_trigger_format(char *buf, size_t size,
+ struct led_classdev *led_cdev)
{
- struct led_classdev *led_cdev = dev_get_drvdata(dev);
struct led_trigger *trig;
- int len = 0;
+ int len = led_trigger_snprintf(buf, size, "%s",
+ led_cdev->trigger ? "none" : "[none]");
+
+ list_for_each_entry(trig, &trigger_list, next_trig) {
+ bool hit = led_cdev->trigger &&
+ !strcmp(led_cdev->trigger->name, trig->name);
+
+ len += led_trigger_snprintf(buf + len, size - len,
+ " %s%s%s", hit ? "[" : "",
+ trig->name, hit ? "]" : "");
+ }
+
+ len += led_trigger_snprintf(buf + len, size - len, "\n");
+
+ return len;
+}
+
+/*
+ * It was stupid to create 10000 cpu triggers, but we are stuck with it now.
+ * Don't make that mistake again. We work around it here by creating binary
+ * attribute, which is not limited by length. This is _not_ good design, do not
+ * copy it.
+ */
+ssize_t led_trigger_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t pos, size_t count)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ void *data;
+ int len;
down_read(&triggers_list_lock);
down_read(&led_cdev->trigger_lock);
- if (!led_cdev->trigger)
- len += scnprintf(buf+len, PAGE_SIZE - len, "[none] ");
- else
- len += scnprintf(buf+len, PAGE_SIZE - len, "none ");
-
- list_for_each_entry(trig, &trigger_list, next_trig) {
- if (led_cdev->trigger && !strcmp(led_cdev->trigger->name,
- trig->name))
- len += scnprintf(buf+len, PAGE_SIZE - len, "[%s] ",
- trig->name);
- else
- len += scnprintf(buf+len, PAGE_SIZE - len, "%s ",
- trig->name);
+ len = led_trigger_format(NULL, 0, led_cdev);
+ data = kvmalloc(len + 1, GFP_KERNEL);
+ if (!data) {
+ up_read(&led_cdev->trigger_lock);
+ up_read(&triggers_list_lock);
+ return -ENOMEM;
}
+ len = led_trigger_format(data, len + 1, led_cdev);
+
up_read(&led_cdev->trigger_lock);
up_read(&triggers_list_lock);
- len += scnprintf(len+buf, PAGE_SIZE - len, "\n");
+ len = memory_read_from_buffer(buf, count, &pos, data, len);
+
+ kvfree(data);
+
return len;
}
-EXPORT_SYMBOL_GPL(led_trigger_show);
+EXPORT_SYMBOL_GPL(led_trigger_read);
/* Caller must ensure led_cdev->trigger_lock held */
int led_trigger_set(struct led_classdev *led_cdev, struct led_trigger *trig)
diff --git a/drivers/leds/leds-an30259a.c b/drivers/leds/leds-an30259a.c
index 250dc9d6f635..82350a28a564 100644
--- a/drivers/leds/leds-an30259a.c
+++ b/drivers/leds/leds-an30259a.c
@@ -305,6 +305,13 @@ static int an30259a_probe(struct i2c_client *client)
chip->regmap = devm_regmap_init_i2c(client, &an30259a_regmap_config);
+ if (IS_ERR(chip->regmap)) {
+ err = PTR_ERR(chip->regmap);
+ dev_err(&client->dev, "Failed to allocate register map: %d\n",
+ err);
+ goto exit;
+ }
+
for (i = 0; i < chip->num_leds; i++) {
struct led_init_data init_data = {};
diff --git a/drivers/leds/leds-bcm6328.c b/drivers/leds/leds-bcm6328.c
index c50d34e2b098..42e1b7598c3a 100644
--- a/drivers/leds/leds-bcm6328.c
+++ b/drivers/leds/leds-bcm6328.c
@@ -346,16 +346,11 @@ static int bcm6328_leds_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct device_node *np = pdev->dev.of_node;
struct device_node *child;
- struct resource *mem_r;
void __iomem *mem;
spinlock_t *lock; /* memory lock */
unsigned long val, *blink_leds, *blink_delay;
- mem_r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!mem_r)
- return -EINVAL;
-
- mem = devm_ioremap_resource(dev, mem_r);
+ mem = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mem))
return PTR_ERR(mem);
diff --git a/drivers/leds/leds-bcm6358.c b/drivers/leds/leds-bcm6358.c
index aec285fd21c0..94fefd456ba0 100644
--- a/drivers/leds/leds-bcm6358.c
+++ b/drivers/leds/leds-bcm6358.c
@@ -151,17 +151,12 @@ static int bcm6358_leds_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct device_node *np = pdev->dev.of_node;
struct device_node *child;
- struct resource *mem_r;
void __iomem *mem;
spinlock_t *lock; /* memory lock */
unsigned long val;
u32 clk_div;
- mem_r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!mem_r)
- return -EINVAL;
-
- mem = devm_ioremap_resource(dev, mem_r);
+ mem = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mem))
return PTR_ERR(mem);
diff --git a/drivers/leds/leds-el15203000.c b/drivers/leds/leds-el15203000.c
new file mode 100644
index 000000000000..298b13e4807a
--- /dev/null
+++ b/drivers/leds/leds-el15203000.c
@@ -0,0 +1,357 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2019 Crane Merchandising Systems. All rights reserved.
+// Copyright (C) 2019 Oleh Kravchenko <oleg@kaa.org.ua>
+
+#include <linux/delay.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/spi/spi.h>
+
+/*
+ * EL15203000 SPI protocol description:
+ * +-----+---------+
+ * | LED | COMMAND |
+ * +-----+---------+
+ * | 1 | 1 |
+ * +-----+---------+
+ * (*) LEDs MCU board expects 20 msec delay per byte.
+ *
+ * LEDs:
+ * +----------+--------------+-------------------------------------------+
+ * | ID | NAME | DESCRIPTION |
+ * +----------+--------------+-------------------------------------------+
+ * | 'P' 0x50 | Pipe | Consists from 5 LEDs, controlled by board |
+ * +----------+--------------+-------------------------------------------+
+ * | 'S' 0x53 | Screen frame | Light tube around the screen |
+ * +----------+--------------+-------------------------------------------+
+ * | 'V' 0x56 | Vending area | Highlights a cup of coffee |
+ * +----------+--------------+-------------------------------------------+
+ *
+ * COMMAND:
+ * +----------+-----------------+--------------+--------------+
+ * | VALUES | PIPE | SCREEN FRAME | VENDING AREA |
+ * +----------+-----------------+--------------+--------------+
+ * | '0' 0x30 | Off |
+ * +----------+-----------------------------------------------+
+ * | '1' 0x31 | On |
+ * +----------+-----------------+--------------+--------------+
+ * | '2' 0x32 | Cascade | Breathing |
+ * +----------+-----------------+--------------+
+ * | '3' 0x33 | Inverse cascade |
+ * +----------+-----------------+
+ * | '4' 0x34 | Bounce |
+ * +----------+-----------------+
+ * | '5' 0x35 | Inverse bounce |
+ * +----------+-----------------+
+ */
+
+/* EL15203000 default settings */
+#define EL_FW_DELAY_USEC 20000ul
+#define EL_PATTERN_DELAY_MSEC 800u
+#define EL_PATTERN_LEN 10u
+#define EL_PATTERN_HALF_LEN (EL_PATTERN_LEN / 2)
+
+enum el15203000_command {
+ /* for all LEDs */
+ EL_OFF = '0',
+ EL_ON = '1',
+
+ /* for Screen LED */
+ EL_SCREEN_BREATHING = '2',
+
+ /* for Pipe LED */
+ EL_PIPE_CASCADE = '2',
+ EL_PIPE_INV_CASCADE = '3',
+ EL_PIPE_BOUNCE = '4',
+ EL_PIPE_INV_BOUNCE = '5',
+};
+
+struct el15203000_led {
+ struct el15203000 *priv;
+ struct led_classdev ldev;
+ u32 reg;
+};
+
+struct el15203000 {
+ struct device *dev;
+ struct mutex lock;
+ struct spi_device *spi;
+ unsigned long delay;
+ size_t count;
+ struct el15203000_led leds[];
+};
+
+static int el15203000_cmd(struct el15203000_led *led, u8 brightness)
+{
+ int ret;
+ u8 cmd[2];
+ size_t i;
+
+ mutex_lock(&led->priv->lock);
+
+ dev_dbg(led->priv->dev, "Set brightness of 0x%02x(%c) to 0x%02x(%c)",
+ led->reg, led->reg, brightness, brightness);
+
+ /* to avoid SPI mistiming with firmware we should wait some time */
+ if (time_after(led->priv->delay, jiffies)) {
+ dev_dbg(led->priv->dev, "Wait %luus to sync",
+ EL_FW_DELAY_USEC);
+
+ usleep_range(EL_FW_DELAY_USEC,
+ EL_FW_DELAY_USEC + 1);
+ }
+
+ cmd[0] = led->reg;
+ cmd[1] = brightness;
+
+ for (i = 0; i < ARRAY_SIZE(cmd); i++) {
+ if (i)
+ usleep_range(EL_FW_DELAY_USEC,
+ EL_FW_DELAY_USEC + 1);
+
+ ret = spi_write(led->priv->spi, &cmd[i], sizeof(cmd[i]));
+ if (ret) {
+ dev_err(led->priv->dev,
+ "spi_write() error %d", ret);
+ break;
+ }
+ }
+
+ led->priv->delay = jiffies + usecs_to_jiffies(EL_FW_DELAY_USEC);
+
+ mutex_unlock(&led->priv->lock);
+
+ return ret;
+}
+
+static int el15203000_set_blocking(struct led_classdev *ldev,
+ enum led_brightness brightness)
+{
+ struct el15203000_led *led = container_of(ldev,
+ struct el15203000_led,
+ ldev);
+
+ return el15203000_cmd(led, brightness == LED_OFF ? EL_OFF : EL_ON);
+}
+
+static int el15203000_pattern_set_S(struct led_classdev *ldev,
+ struct led_pattern *pattern,
+ u32 len, int repeat)
+{
+ struct el15203000_led *led = container_of(ldev,
+ struct el15203000_led,
+ ldev);
+
+ if (repeat > 0 || len != 2 ||
+ pattern[0].delta_t != 4000 || pattern[0].brightness != 0 ||
+ pattern[1].delta_t != 4000 || pattern[1].brightness != 1)
+ return -EINVAL;
+
+ dev_dbg(led->priv->dev, "Breathing mode for 0x%02x(%c)",
+ led->reg, led->reg);
+
+ return el15203000_cmd(led, EL_SCREEN_BREATHING);
+}
+
+static bool is_cascade(const struct led_pattern *pattern, u32 len,
+ bool inv, bool right)
+{
+ int val, t;
+ u32 i;
+
+ if (len != EL_PATTERN_HALF_LEN)
+ return false;
+
+ val = right ? BIT(4) : BIT(0);
+
+ for (i = 0; i < len; i++) {
+ t = inv ? ~val & GENMASK(4, 0) : val;
+
+ if (pattern[i].delta_t != EL_PATTERN_DELAY_MSEC ||
+ pattern[i].brightness != t)
+ return false;
+
+ val = right ? val >> 1 : val << 1;
+ }
+
+ return true;
+}
+
+static bool is_bounce(const struct led_pattern *pattern, u32 len, bool inv)
+{
+ if (len != EL_PATTERN_LEN)
+ return false;
+
+ return is_cascade(pattern, EL_PATTERN_HALF_LEN, inv, false) &&
+ is_cascade(pattern + EL_PATTERN_HALF_LEN,
+ EL_PATTERN_HALF_LEN, inv, true);
+}
+
+static int el15203000_pattern_set_P(struct led_classdev *ldev,
+ struct led_pattern *pattern,
+ u32 len, int repeat)
+{
+ u8 cmd;
+ struct el15203000_led *led = container_of(ldev,
+ struct el15203000_led,
+ ldev);
+
+ if (repeat > 0)
+ return -EINVAL;
+
+ if (is_cascade(pattern, len, false, false)) {
+ dev_dbg(led->priv->dev, "Cascade mode for 0x%02x(%c)",
+ led->reg, led->reg);
+
+ cmd = EL_PIPE_CASCADE;
+ } else if (is_cascade(pattern, len, true, false)) {
+ dev_dbg(led->priv->dev, "Inverse cascade mode for 0x%02x(%c)",
+ led->reg, led->reg);
+
+ cmd = EL_PIPE_INV_CASCADE;
+ } else if (is_bounce(pattern, len, false)) {
+ dev_dbg(led->priv->dev, "Bounce mode for 0x%02x(%c)",
+ led->reg, led->reg);
+
+ cmd = EL_PIPE_BOUNCE;
+ } else if (is_bounce(pattern, len, true)) {
+ dev_dbg(led->priv->dev, "Inverse bounce mode for 0x%02x(%c)",
+ led->reg, led->reg);
+
+ cmd = EL_PIPE_INV_BOUNCE;
+ } else {
+ dev_err(led->priv->dev, "Invalid hw_pattern for 0x%02x(%c)!",
+ led->reg, led->reg);
+
+ return -EINVAL;
+ }
+
+ return el15203000_cmd(led, cmd);
+}
+
+static int el15203000_pattern_clear(struct led_classdev *ldev)
+{
+ struct el15203000_led *led = container_of(ldev,
+ struct el15203000_led,
+ ldev);
+
+ return el15203000_cmd(led, EL_OFF);
+}
+
+static int el15203000_probe_dt(struct el15203000 *priv)
+{
+ struct el15203000_led *led = priv->leds;
+ struct fwnode_handle *child;
+ int ret;
+
+ device_for_each_child_node(priv->dev, child) {
+ struct led_init_data init_data = {};
+
+ ret = fwnode_property_read_u32(child, "reg", &led->reg);
+ if (ret) {
+ dev_err(priv->dev, "LED without ID number");
+ fwnode_handle_put(child);
+
+ break;
+ }
+
+ if (led->reg > U8_MAX) {
+ dev_err(priv->dev, "LED value %d is invalid", led->reg);
+ fwnode_handle_put(child);
+
+ return -EINVAL;
+ }
+
+ fwnode_property_read_string(child, "linux,default-trigger",
+ &led->ldev.default_trigger);
+
+ led->priv = priv;
+ led->ldev.max_brightness = LED_ON;
+ led->ldev.brightness_set_blocking = el15203000_set_blocking;
+
+ if (led->reg == 'S') {
+ led->ldev.pattern_set = el15203000_pattern_set_S;
+ led->ldev.pattern_clear = el15203000_pattern_clear;
+ } else if (led->reg == 'P') {
+ led->ldev.pattern_set = el15203000_pattern_set_P;
+ led->ldev.pattern_clear = el15203000_pattern_clear;
+ }
+
+ init_data.fwnode = child;
+ ret = devm_led_classdev_register_ext(priv->dev, &led->ldev,
+ &init_data);
+ if (ret) {
+ dev_err(priv->dev,
+ "failed to register LED device %s, err %d",
+ led->ldev.name, ret);
+ fwnode_handle_put(child);
+
+ break;
+ }
+
+ led++;
+ }
+
+ return ret;
+}
+
+static int el15203000_probe(struct spi_device *spi)
+{
+ struct el15203000 *priv;
+ size_t count;
+
+ count = device_get_child_node_count(&spi->dev);
+ if (!count) {
+ dev_err(&spi->dev, "LEDs are not defined in device tree!");
+ return -ENODEV;
+ }
+
+ priv = devm_kzalloc(&spi->dev, struct_size(priv, leds, count),
+ GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ mutex_init(&priv->lock);
+ priv->count = count;
+ priv->dev = &spi->dev;
+ priv->spi = spi;
+ priv->delay = jiffies -
+ usecs_to_jiffies(EL_FW_DELAY_USEC);
+
+ spi_set_drvdata(spi, priv);
+
+ return el15203000_probe_dt(priv);
+}
+
+static int el15203000_remove(struct spi_device *spi)
+{
+ struct el15203000 *priv = spi_get_drvdata(spi);
+
+ mutex_destroy(&priv->lock);
+
+ return 0;
+}
+
+static const struct of_device_id el15203000_dt_ids[] = {
+ { .compatible = "crane,el15203000", },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, el15203000_dt_ids);
+
+static struct spi_driver el15203000_driver = {
+ .probe = el15203000_probe,
+ .remove = el15203000_remove,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = el15203000_dt_ids,
+ },
+};
+
+module_spi_driver(el15203000_driver);
+
+MODULE_AUTHOR("Oleh Kravchenko <oleg@kaa.org.ua>");
+MODULE_DESCRIPTION("el15203000 LED driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("spi:el15203000");
diff --git a/drivers/leds/leds-lm3601x.c b/drivers/leds/leds-lm3601x.c
index b02972f1a341..fce89f2a2d92 100644
--- a/drivers/leds/leds-lm3601x.c
+++ b/drivers/leds/leds-lm3601x.c
@@ -350,8 +350,7 @@ static int lm3601x_register_leds(struct lm3601x_led *led,
init_data.devicename = led->client->name;
init_data.default_label = (led->led_mode == LM3601X_LED_TORCH) ?
"torch" : "infrared";
-
- return led_classdev_flash_register_ext(&led->client->dev,
+ return devm_led_classdev_flash_register_ext(&led->client->dev,
&led->fled_cdev, &init_data);
}
@@ -445,7 +444,6 @@ static int lm3601x_remove(struct i2c_client *client)
{
struct lm3601x_led *led = i2c_get_clientdata(client);
- led_classdev_flash_unregister(&led->fled_cdev);
mutex_destroy(&led->lock);
return regmap_update_bits(led->regmap, LM3601X_ENABLE_REG,
diff --git a/drivers/leds/leds-lm3692x.c b/drivers/leds/leds-lm3692x.c
index 3d381f2f73d0..8b408102e138 100644
--- a/drivers/leds/leds-lm3692x.c
+++ b/drivers/leds/leds-lm3692x.c
@@ -174,19 +174,20 @@ static int lm3692x_brightness_set(struct led_classdev *led_cdev,
ret = lm3692x_fault_check(led);
if (ret) {
- dev_err(&led->client->dev, "Cannot read/clear faults\n");
+ dev_err(&led->client->dev, "Cannot read/clear faults: %d\n",
+ ret);
goto out;
}
ret = regmap_write(led->regmap, LM3692X_BRT_MSB, brt_val);
if (ret) {
- dev_err(&led->client->dev, "Cannot write MSB\n");
+ dev_err(&led->client->dev, "Cannot write MSB: %d\n", ret);
goto out;
}
ret = regmap_write(led->regmap, LM3692X_BRT_LSB, led_brightness_lsb);
if (ret) {
- dev_err(&led->client->dev, "Cannot write LSB\n");
+ dev_err(&led->client->dev, "Cannot write LSB: %d\n", ret);
goto out;
}
out:
@@ -197,13 +198,13 @@ out:
static int lm3692x_init(struct lm3692x_led *led)
{
int enable_state;
- int ret;
+ int ret, reg_ret;
if (led->regulator) {
ret = regulator_enable(led->regulator);
if (ret) {
dev_err(&led->client->dev,
- "Failed to enable regulator\n");
+ "Failed to enable regulator: %d\n", ret);
return ret;
}
}
@@ -213,7 +214,8 @@ static int lm3692x_init(struct lm3692x_led *led)
ret = lm3692x_fault_check(led);
if (ret) {
- dev_err(&led->client->dev, "Cannot read/clear faults\n");
+ dev_err(&led->client->dev, "Cannot read/clear faults: %d\n",
+ ret);
goto out;
}
@@ -248,9 +250,9 @@ static int lm3692x_init(struct lm3692x_led *led)
goto out;
ret = regmap_write(led->regmap, LM3692X_BOOST_CTRL,
- LM3692X_BRHT_MODE_RAMP_MULTI |
- LM3692X_BL_ADJ_POL |
- LM3692X_RAMP_RATE_250us);
+ LM3692X_BOOST_SW_1MHZ |
+ LM3692X_BOOST_SW_NO_SHIFT |
+ LM3692X_OCP_PROT_1_5A);
if (ret)
goto out;
@@ -267,7 +269,7 @@ static int lm3692x_init(struct lm3692x_led *led)
goto out;
ret = regmap_write(led->regmap, LM3692X_BRT_CTRL,
- LM3692X_BL_ADJ_POL | LM3692X_PWM_HYSTER_4LSB);
+ LM3692X_BL_ADJ_POL | LM3692X_RAMP_EN);
if (ret)
goto out;
@@ -311,14 +313,15 @@ out:
gpiod_direction_output(led->enable_gpio, 0);
if (led->regulator) {
- ret = regulator_disable(led->regulator);
- if (ret)
+ reg_ret = regulator_disable(led->regulator);
+ if (reg_ret)
dev_err(&led->client->dev,
- "Failed to disable regulator\n");
+ "Failed to disable regulator: %d\n", reg_ret);
}
return ret;
}
+
static int lm3692x_probe_dt(struct lm3692x_led *led)
{
struct fwnode_handle *child = NULL;
@@ -334,9 +337,18 @@ static int lm3692x_probe_dt(struct lm3692x_led *led)
return ret;
}
- led->regulator = devm_regulator_get(&led->client->dev, "vled");
- if (IS_ERR(led->regulator))
+ led->regulator = devm_regulator_get_optional(&led->client->dev, "vled");
+ if (IS_ERR(led->regulator)) {
+ ret = PTR_ERR(led->regulator);
+ if (ret != -ENODEV) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(&led->client->dev,
+ "Failed to get vled regulator: %d\n",
+ ret);
+ return ret;
+ }
led->regulator = NULL;
+ }
child = device_get_next_child_node(&led->client->dev, child);
if (!child) {
@@ -409,7 +421,8 @@ static int lm3692x_remove(struct i2c_client *client)
ret = regmap_update_bits(led->regmap, LM3692X_EN, LM3692X_DEVICE_EN, 0);
if (ret) {
- dev_err(&led->client->dev, "Failed to disable regulator\n");
+ dev_err(&led->client->dev, "Failed to disable regulator: %d\n",
+ ret);
return ret;
}
@@ -420,7 +433,7 @@ static int lm3692x_remove(struct i2c_client *client)
ret = regulator_disable(led->regulator);
if (ret)
dev_err(&led->client->dev,
- "Failed to disable regulator\n");
+ "Failed to disable regulator: %d\n", ret);
}
mutex_destroy(&led->lock);
diff --git a/drivers/leds/leds-mlxreg.c b/drivers/leds/leds-mlxreg.c
index cabe379071a7..82aea1cd0c12 100644
--- a/drivers/leds/leds-mlxreg.c
+++ b/drivers/leds/leds-mlxreg.c
@@ -228,8 +228,8 @@ static int mlxreg_led_config(struct mlxreg_led_priv_data *priv)
brightness = LED_OFF;
led_data->base_color = MLXREG_LED_GREEN_SOLID;
}
- sprintf(led_data->led_cdev_name, "%s:%s", "mlxreg",
- data->label);
+ snprintf(led_data->led_cdev_name, sizeof(led_data->led_cdev_name),
+ "mlxreg:%s", data->label);
led_cdev->name = led_data->led_cdev_name;
led_cdev->brightness = brightness;
led_cdev->max_brightness = LED_ON;
diff --git a/drivers/leds/leds-pca9532.c b/drivers/leds/leds-pca9532.c
index c7c7199e8ebd..7d515d5e57bd 100644
--- a/drivers/leds/leds-pca9532.c
+++ b/drivers/leds/leds-pca9532.c
@@ -467,16 +467,11 @@ pca9532_of_populate_pdata(struct device *dev, struct device_node *np)
{
struct pca9532_platform_data *pdata;
struct device_node *child;
- const struct of_device_id *match;
int devid, maxleds;
int i = 0;
const char *state;
- match = of_match_device(of_pca9532_leds_match, dev);
- if (!match)
- return ERR_PTR(-ENODEV);
-
- devid = (int)(uintptr_t)match->data;
+ devid = (int)(uintptr_t)of_device_get_match_data(dev);
maxleds = pca9532_chip_info_tbl[devid].num_leds;
pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
@@ -509,7 +504,6 @@ static int pca9532_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
int devid;
- const struct of_device_id *of_id;
struct pca9532_data *data = i2c_get_clientdata(client);
struct pca9532_platform_data *pca9532_pdata =
dev_get_platdata(&client->dev);
@@ -525,11 +519,7 @@ static int pca9532_probe(struct i2c_client *client,
dev_err(&client->dev, "no platform data\n");
return -EINVAL;
}
- of_id = of_match_device(of_pca9532_leds_match,
- &client->dev);
- if (unlikely(!of_id))
- return -EINVAL;
- devid = (int)(uintptr_t) of_id->data;
+ devid = (int)(uintptr_t)of_device_get_match_data(&client->dev);
} else {
devid = id->driver_data;
}
diff --git a/drivers/leds/leds-tlc591xx.c b/drivers/leds/leds-tlc591xx.c
index 59ff088c7d75..a8911ebd30e5 100644
--- a/drivers/leds/leds-tlc591xx.c
+++ b/drivers/leds/leds-tlc591xx.c
@@ -13,6 +13,7 @@
#include <linux/slab.h>
#define TLC591XX_MAX_LEDS 16
+#define TLC591XX_MAX_BRIGHTNESS 256
#define TLC591XX_REG_MODE1 0x00
#define MODE1_RESPON_ADDR_MASK 0xF0
@@ -112,11 +113,11 @@ tlc591xx_brightness_set(struct led_classdev *led_cdev,
struct tlc591xx_priv *priv = led->priv;
int err;
- switch (brightness) {
+ switch ((int)brightness) {
case 0:
err = tlc591xx_set_ledout(priv, led, LEDOUT_OFF);
break;
- case LED_FULL:
+ case TLC591XX_MAX_BRIGHTNESS:
err = tlc591xx_set_ledout(priv, led, LEDOUT_ON);
break;
default:
@@ -128,51 +129,6 @@ tlc591xx_brightness_set(struct led_classdev *led_cdev,
return err;
}
-static void
-tlc591xx_destroy_devices(struct tlc591xx_priv *priv, unsigned int j)
-{
- int i = j;
-
- while (--i >= 0) {
- if (priv->leds[i].active)
- led_classdev_unregister(&priv->leds[i].ldev);
- }
-}
-
-static int
-tlc591xx_configure(struct device *dev,
- struct tlc591xx_priv *priv,
- const struct tlc591xx *tlc591xx)
-{
- unsigned int i;
- int err = 0;
-
- tlc591xx_set_mode(priv->regmap, MODE2_DIM);
- for (i = 0; i < TLC591XX_MAX_LEDS; i++) {
- struct tlc591xx_led *led = &priv->leds[i];
-
- if (!led->active)
- continue;
-
- led->priv = priv;
- led->led_no = i;
- led->ldev.brightness_set_blocking = tlc591xx_brightness_set;
- led->ldev.max_brightness = LED_FULL;
- err = led_classdev_register(dev, &led->ldev);
- if (err < 0) {
- dev_err(dev, "couldn't register LED %s\n",
- led->ldev.name);
- goto exit;
- }
- }
-
- return 0;
-
-exit:
- tlc591xx_destroy_devices(priv, i);
- return err;
-}
-
static const struct regmap_config tlc591xx_regmap = {
.reg_bits = 8,
.val_bits = 8,
@@ -225,7 +181,16 @@ tlc591xx_probe(struct i2c_client *client,
i2c_set_clientdata(client, priv);
+ err = tlc591xx_set_mode(priv->regmap, MODE2_DIM);
+ if (err < 0)
+ return err;
+
for_each_child_of_node(np, child) {
+ struct tlc591xx_led *led;
+ struct led_init_data init_data = {};
+
+ init_data.fwnode = of_fwnode_handle(child);
+
err = of_property_read_u32(child, "reg", &reg);
if (err) {
of_node_put(child);
@@ -236,22 +201,24 @@ tlc591xx_probe(struct i2c_client *client,
of_node_put(child);
return -EINVAL;
}
- priv->leds[reg].active = true;
- priv->leds[reg].ldev.name =
- of_get_property(child, "label", NULL) ? : child->name;
- priv->leds[reg].ldev.default_trigger =
- of_get_property(child, "linux,default-trigger", NULL);
- }
- return tlc591xx_configure(dev, priv, tlc591xx);
-}
+ led = &priv->leds[reg];
-static int
-tlc591xx_remove(struct i2c_client *client)
-{
- struct tlc591xx_priv *priv = i2c_get_clientdata(client);
-
- tlc591xx_destroy_devices(priv, TLC591XX_MAX_LEDS);
+ led->active = true;
+ led->ldev.default_trigger =
+ of_get_property(child, "linux,default-trigger", NULL);
+ led->priv = priv;
+ led->led_no = reg;
+ led->ldev.brightness_set_blocking = tlc591xx_brightness_set;
+ led->ldev.max_brightness = TLC591XX_MAX_BRIGHTNESS;
+ err = devm_led_classdev_register_ext(dev, &led->ldev,
+ &init_data);
+ if (err < 0) {
+ dev_err(dev, "couldn't register LED %s\n",
+ led->ldev.name);
+ return err;
+ }
+ }
return 0;
}
@@ -268,7 +235,6 @@ static struct i2c_driver tlc591xx_driver = {
.of_match_table = of_match_ptr(of_tlc591xx_leds_match),
},
.probe = tlc591xx_probe,
- .remove = tlc591xx_remove,
.id_table = tlc591xx_id,
};
diff --git a/drivers/leds/leds.h b/drivers/leds/leds.h
index 0b577cece8f7..2d9eb48bbed9 100644
--- a/drivers/leds/leds.h
+++ b/drivers/leds/leds.h
@@ -23,6 +23,12 @@ void led_set_brightness_nopm(struct led_classdev *led_cdev,
enum led_brightness value);
void led_set_brightness_nosleep(struct led_classdev *led_cdev,
enum led_brightness value);
+ssize_t led_trigger_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t pos, size_t count);
+ssize_t led_trigger_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf,
+ loff_t pos, size_t count);
extern struct rw_semaphore leds_list_lock;
extern struct list_head leds_list;
diff --git a/drivers/leds/trigger/ledtrig-netdev.c b/drivers/leds/trigger/ledtrig-netdev.c
index 136f86a1627d..d5e774d83021 100644
--- a/drivers/leds/trigger/ledtrig-netdev.c
+++ b/drivers/leds/trigger/ledtrig-netdev.c
@@ -302,10 +302,12 @@ static int netdev_trig_notify(struct notifier_block *nb,
container_of(nb, struct led_netdev_data, notifier);
if (evt != NETDEV_UP && evt != NETDEV_DOWN && evt != NETDEV_CHANGE
- && evt != NETDEV_REGISTER && evt != NETDEV_UNREGISTER)
+ && evt != NETDEV_REGISTER && evt != NETDEV_UNREGISTER
+ && evt != NETDEV_CHANGENAME)
return NOTIFY_DONE;
if (!(dev == trigger_data->net_dev ||
+ (evt == NETDEV_CHANGENAME && !strcmp(dev->name, trigger_data->device_name)) ||
(evt == NETDEV_REGISTER && !strcmp(dev->name, trigger_data->device_name))))
return NOTIFY_DONE;
@@ -315,6 +317,7 @@ static int netdev_trig_notify(struct notifier_block *nb,
clear_bit(NETDEV_LED_MODE_LINKUP, &trigger_data->mode);
switch (evt) {
+ case NETDEV_CHANGENAME:
case NETDEV_REGISTER:
if (trigger_data->net_dev)
dev_put(trigger_data->net_dev);
diff --git a/drivers/macintosh/ans-lcd.c b/drivers/macintosh/ans-lcd.c
index 400960cf04d5..b1314d104b06 100644
--- a/drivers/macintosh/ans-lcd.c
+++ b/drivers/macintosh/ans-lcd.c
@@ -147,7 +147,8 @@ static struct miscdevice anslcd_dev = {
&anslcd_fops
};
-const char anslcd_logo[] = "********************" /* Line #1 */
+static const char anslcd_logo[] __initconst =
+ "********************" /* Line #1 */
"* LINUX! *" /* Line #3 */
"* Welcome to *" /* Line #2 */
"********************"; /* Line #4 */
diff --git a/drivers/mailbox/hi6220-mailbox.c b/drivers/mailbox/hi6220-mailbox.c
index 8b9eb56e4311..cc236ac7a0b5 100644
--- a/drivers/mailbox/hi6220-mailbox.c
+++ b/drivers/mailbox/hi6220-mailbox.c
@@ -354,7 +354,6 @@ static int hi6220_mbox_probe(struct platform_device *pdev)
static struct platform_driver hi6220_mbox_driver = {
.driver = {
.name = "hi6220-mbox",
- .owner = THIS_MODULE,
.of_match_table = hi6220_mbox_of_match,
},
.probe = hi6220_mbox_probe,
diff --git a/drivers/mailbox/imx-mailbox.c b/drivers/mailbox/imx-mailbox.c
index 9f74dee1a58c..2cdcdc5f1119 100644
--- a/drivers/mailbox/imx-mailbox.c
+++ b/drivers/mailbox/imx-mailbox.c
@@ -12,19 +12,11 @@
#include <linux/of_device.h>
#include <linux/slab.h>
-/* Transmit Register */
-#define IMX_MU_xTRn(x) (0x00 + 4 * (x))
-/* Receive Register */
-#define IMX_MU_xRRn(x) (0x10 + 4 * (x))
-/* Status Register */
-#define IMX_MU_xSR 0x20
#define IMX_MU_xSR_GIPn(x) BIT(28 + (3 - (x)))
#define IMX_MU_xSR_RFn(x) BIT(24 + (3 - (x)))
#define IMX_MU_xSR_TEn(x) BIT(20 + (3 - (x)))
#define IMX_MU_xSR_BRDIP BIT(9)
-/* Control Register */
-#define IMX_MU_xCR 0x24
/* General Purpose Interrupt Enable */
#define IMX_MU_xCR_GIEn(x) BIT(28 + (3 - (x)))
/* Receive Interrupt Enable */
@@ -44,6 +36,13 @@ enum imx_mu_chan_type {
IMX_MU_TYPE_RXDB, /* Rx doorbell */
};
+struct imx_mu_dcfg {
+ u32 xTR[4]; /* Transmit Registers */
+ u32 xRR[4]; /* Receive Registers */
+ u32 xSR; /* Status Register */
+ u32 xCR; /* Control Register */
+};
+
struct imx_mu_con_priv {
unsigned int idx;
char irq_desc[IMX_MU_CHAN_NAME_SIZE];
@@ -61,12 +60,27 @@ struct imx_mu_priv {
struct mbox_chan mbox_chans[IMX_MU_CHANS];
struct imx_mu_con_priv con_priv[IMX_MU_CHANS];
+ const struct imx_mu_dcfg *dcfg;
struct clk *clk;
int irq;
bool side_b;
};
+static const struct imx_mu_dcfg imx_mu_cfg_imx6sx = {
+ .xTR = {0x0, 0x4, 0x8, 0xc},
+ .xRR = {0x10, 0x14, 0x18, 0x1c},
+ .xSR = 0x20,
+ .xCR = 0x24,
+};
+
+static const struct imx_mu_dcfg imx_mu_cfg_imx7ulp = {
+ .xTR = {0x20, 0x24, 0x28, 0x2c},
+ .xRR = {0x40, 0x44, 0x48, 0x4c},
+ .xSR = 0x60,
+ .xCR = 0x64,
+};
+
static struct imx_mu_priv *to_imx_mu_priv(struct mbox_controller *mbox)
{
return container_of(mbox, struct imx_mu_priv, mbox);
@@ -88,10 +102,10 @@ static u32 imx_mu_xcr_rmw(struct imx_mu_priv *priv, u32 set, u32 clr)
u32 val;
spin_lock_irqsave(&priv->xcr_lock, flags);
- val = imx_mu_read(priv, IMX_MU_xCR);
+ val = imx_mu_read(priv, priv->dcfg->xCR);
val &= ~clr;
val |= set;
- imx_mu_write(priv, val, IMX_MU_xCR);
+ imx_mu_write(priv, val, priv->dcfg->xCR);
spin_unlock_irqrestore(&priv->xcr_lock, flags);
return val;
@@ -111,8 +125,8 @@ static irqreturn_t imx_mu_isr(int irq, void *p)
struct imx_mu_con_priv *cp = chan->con_priv;
u32 val, ctrl, dat;
- ctrl = imx_mu_read(priv, IMX_MU_xCR);
- val = imx_mu_read(priv, IMX_MU_xSR);
+ ctrl = imx_mu_read(priv, priv->dcfg->xCR);
+ val = imx_mu_read(priv, priv->dcfg->xSR);
switch (cp->type) {
case IMX_MU_TYPE_TX:
@@ -138,10 +152,10 @@ static irqreturn_t imx_mu_isr(int irq, void *p)
imx_mu_xcr_rmw(priv, 0, IMX_MU_xCR_TIEn(cp->idx));
mbox_chan_txdone(chan, 0);
} else if (val == IMX_MU_xSR_RFn(cp->idx)) {
- dat = imx_mu_read(priv, IMX_MU_xRRn(cp->idx));
+ dat = imx_mu_read(priv, priv->dcfg->xRR[cp->idx]);
mbox_chan_received_data(chan, (void *)&dat);
} else if (val == IMX_MU_xSR_GIPn(cp->idx)) {
- imx_mu_write(priv, IMX_MU_xSR_GIPn(cp->idx), IMX_MU_xSR);
+ imx_mu_write(priv, IMX_MU_xSR_GIPn(cp->idx), priv->dcfg->xSR);
mbox_chan_received_data(chan, NULL);
} else {
dev_warn_ratelimited(priv->dev, "Not handled interrupt\n");
@@ -159,7 +173,7 @@ static int imx_mu_send_data(struct mbox_chan *chan, void *data)
switch (cp->type) {
case IMX_MU_TYPE_TX:
- imx_mu_write(priv, *arg, IMX_MU_xTRn(cp->idx));
+ imx_mu_write(priv, *arg, priv->dcfg->xTR[cp->idx]);
imx_mu_xcr_rmw(priv, IMX_MU_xCR_TIEn(cp->idx), 0);
break;
case IMX_MU_TYPE_TXDB:
@@ -214,11 +228,24 @@ static void imx_mu_shutdown(struct mbox_chan *chan)
struct imx_mu_priv *priv = to_imx_mu_priv(chan->mbox);
struct imx_mu_con_priv *cp = chan->con_priv;
- if (cp->type == IMX_MU_TYPE_TXDB)
+ if (cp->type == IMX_MU_TYPE_TXDB) {
tasklet_kill(&cp->txdb_tasklet);
+ return;
+ }
- imx_mu_xcr_rmw(priv, 0, IMX_MU_xCR_TIEn(cp->idx) |
- IMX_MU_xCR_RIEn(cp->idx) | IMX_MU_xCR_GIEn(cp->idx));
+ switch (cp->type) {
+ case IMX_MU_TYPE_TX:
+ imx_mu_xcr_rmw(priv, 0, IMX_MU_xCR_TIEn(cp->idx));
+ break;
+ case IMX_MU_TYPE_RX:
+ imx_mu_xcr_rmw(priv, 0, IMX_MU_xCR_RIEn(cp->idx));
+ break;
+ case IMX_MU_TYPE_RXDB:
+ imx_mu_xcr_rmw(priv, 0, IMX_MU_xCR_GIEn(cp->idx));
+ break;
+ default:
+ break;
+ }
free_irq(priv->irq, chan);
}
@@ -257,7 +284,7 @@ static void imx_mu_init_generic(struct imx_mu_priv *priv)
return;
/* Set default MU configuration */
- imx_mu_write(priv, 0, IMX_MU_xCR);
+ imx_mu_write(priv, 0, priv->dcfg->xCR);
}
static int imx_mu_probe(struct platform_device *pdev)
@@ -265,6 +292,7 @@ static int imx_mu_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct imx_mu_priv *priv;
+ const struct imx_mu_dcfg *dcfg;
unsigned int i;
int ret;
@@ -282,6 +310,11 @@ static int imx_mu_probe(struct platform_device *pdev)
if (priv->irq < 0)
return priv->irq;
+ dcfg = of_device_get_match_data(dev);
+ if (!dcfg)
+ return -EINVAL;
+ priv->dcfg = dcfg;
+
priv->clk = devm_clk_get(dev, NULL);
if (IS_ERR(priv->clk)) {
if (PTR_ERR(priv->clk) != -ENOENT)
@@ -335,7 +368,8 @@ static int imx_mu_remove(struct platform_device *pdev)
}
static const struct of_device_id imx_mu_dt_ids[] = {
- { .compatible = "fsl,imx6sx-mu" },
+ { .compatible = "fsl,imx7ulp-mu", .data = &imx_mu_cfg_imx7ulp },
+ { .compatible = "fsl,imx6sx-mu", .data = &imx_mu_cfg_imx6sx },
{ },
};
MODULE_DEVICE_TABLE(of, imx_mu_dt_ids);
diff --git a/drivers/mailbox/omap-mailbox.c b/drivers/mailbox/omap-mailbox.c
index a3cd63583cf7..5978a35aac6d 100644
--- a/drivers/mailbox/omap-mailbox.c
+++ b/drivers/mailbox/omap-mailbox.c
@@ -868,7 +868,7 @@ static int omap_mbox_probe(struct platform_device *pdev)
dev_info(mdev->dev, "omap mailbox rev 0x%x\n", l);
ret = pm_runtime_put_sync(mdev->dev);
- if (ret < 0)
+ if (ret < 0 && ret != -ENOSYS)
goto unregister;
devm_kfree(&pdev->dev, finfoblk);
diff --git a/drivers/mailbox/stm32-ipcc.c b/drivers/mailbox/stm32-ipcc.c
index 5c2d1e1f988b..ef966887aa15 100644
--- a/drivers/mailbox/stm32-ipcc.c
+++ b/drivers/mailbox/stm32-ipcc.c
@@ -52,7 +52,6 @@ struct stm32_ipcc {
struct clk *clk;
spinlock_t lock; /* protect access to IPCC registers */
int irqs[IPCC_IRQ_NUM];
- int wkp;
u32 proc_id;
u32 n_chans;
u32 xcr;
@@ -282,16 +281,9 @@ static int stm32_ipcc_probe(struct platform_device *pdev)
/* wakeup */
if (of_property_read_bool(np, "wakeup-source")) {
- ipcc->wkp = platform_get_irq_byname(pdev, "wakeup");
- if (ipcc->wkp < 0) {
- if (ipcc->wkp != -EPROBE_DEFER)
- dev_err(dev, "could not get wakeup IRQ\n");
- ret = ipcc->wkp;
- goto err_clk;
- }
-
device_set_wakeup_capable(dev, true);
- ret = dev_pm_set_dedicated_wake_irq(dev, ipcc->wkp);
+
+ ret = dev_pm_set_wake_irq(dev, ipcc->irqs[IPCC_IRQ_RX]);
if (ret) {
dev_err(dev, "Failed to set wake up irq\n");
goto err_init_wkp;
@@ -334,10 +326,10 @@ static int stm32_ipcc_probe(struct platform_device *pdev)
return 0;
err_irq_wkp:
- if (ipcc->wkp)
+ if (of_property_read_bool(np, "wakeup-source"))
dev_pm_clear_wake_irq(dev);
err_init_wkp:
- device_init_wakeup(dev, false);
+ device_set_wakeup_capable(dev, false);
err_clk:
clk_disable_unprepare(ipcc->clk);
return ret;
@@ -345,27 +337,17 @@ err_clk:
static int stm32_ipcc_remove(struct platform_device *pdev)
{
- struct stm32_ipcc *ipcc = platform_get_drvdata(pdev);
+ struct device *dev = &pdev->dev;
- if (ipcc->wkp)
+ if (of_property_read_bool(dev->of_node, "wakeup-source"))
dev_pm_clear_wake_irq(&pdev->dev);
- device_init_wakeup(&pdev->dev, false);
+ device_set_wakeup_capable(dev, false);
return 0;
}
#ifdef CONFIG_PM_SLEEP
-static void stm32_ipcc_set_irq_wake(struct device *dev, bool enable)
-{
- struct stm32_ipcc *ipcc = dev_get_drvdata(dev);
- unsigned int i;
-
- if (device_may_wakeup(dev))
- for (i = 0; i < IPCC_IRQ_NUM; i++)
- irq_set_irq_wake(ipcc->irqs[i], enable);
-}
-
static int stm32_ipcc_suspend(struct device *dev)
{
struct stm32_ipcc *ipcc = dev_get_drvdata(dev);
@@ -373,8 +355,6 @@ static int stm32_ipcc_suspend(struct device *dev)
ipcc->xmr = readl_relaxed(ipcc->reg_proc + IPCC_XMR);
ipcc->xcr = readl_relaxed(ipcc->reg_proc + IPCC_XCR);
- stm32_ipcc_set_irq_wake(dev, true);
-
return 0;
}
@@ -382,8 +362,6 @@ static int stm32_ipcc_resume(struct device *dev)
{
struct stm32_ipcc *ipcc = dev_get_drvdata(dev);
- stm32_ipcc_set_irq_wake(dev, false);
-
writel_relaxed(ipcc->xmr, ipcc->reg_proc + IPCC_XMR);
writel_relaxed(ipcc->xcr, ipcc->reg_proc + IPCC_XCR);
diff --git a/drivers/mailbox/tegra-hsp.c b/drivers/mailbox/tegra-hsp.c
index 4c5ba35d48d4..834b35dc3b13 100644
--- a/drivers/mailbox/tegra-hsp.c
+++ b/drivers/mailbox/tegra-hsp.c
@@ -657,7 +657,7 @@ static int tegra_hsp_probe(struct platform_device *pdev)
hsp->num_db = (value >> HSP_nDB_SHIFT) & HSP_nINT_MASK;
hsp->num_si = (value >> HSP_nSI_SHIFT) & HSP_nINT_MASK;
- err = platform_get_irq_byname(pdev, "doorbell");
+ err = platform_get_irq_byname_optional(pdev, "doorbell");
if (err >= 0)
hsp->doorbell_irq = err;
@@ -677,7 +677,7 @@ static int tegra_hsp_probe(struct platform_device *pdev)
if (!name)
return -ENOMEM;
- err = platform_get_irq_byname(pdev, name);
+ err = platform_get_irq_byname_optional(pdev, name);
if (err >= 0) {
hsp->shared_irqs[i] = err;
count++;
diff --git a/drivers/mcb/mcb-core.c b/drivers/mcb/mcb-core.c
index b72e82efaee5..38fbb3b59873 100644
--- a/drivers/mcb/mcb-core.c
+++ b/drivers/mcb/mcb-core.c
@@ -191,7 +191,7 @@ int __mcb_register_driver(struct mcb_driver *drv, struct module *owner,
return driver_register(&drv->driver);
}
-EXPORT_SYMBOL_GPL(__mcb_register_driver);
+EXPORT_SYMBOL_NS_GPL(__mcb_register_driver, MCB);
/**
* mcb_unregister_driver() - Unregister a @mcb_driver from the system
@@ -203,7 +203,7 @@ void mcb_unregister_driver(struct mcb_driver *drv)
{
driver_unregister(&drv->driver);
}
-EXPORT_SYMBOL_GPL(mcb_unregister_driver);
+EXPORT_SYMBOL_NS_GPL(mcb_unregister_driver, MCB);
static void mcb_release_dev(struct device *dev)
{
@@ -249,7 +249,7 @@ out:
return ret;
}
-EXPORT_SYMBOL_GPL(mcb_device_register);
+EXPORT_SYMBOL_NS_GPL(mcb_device_register, MCB);
static void mcb_free_bus(struct device *dev)
{
@@ -301,7 +301,7 @@ err_free:
kfree(bus);
return ERR_PTR(rc);
}
-EXPORT_SYMBOL_GPL(mcb_alloc_bus);
+EXPORT_SYMBOL_NS_GPL(mcb_alloc_bus, MCB);
static int __mcb_devices_unregister(struct device *dev, void *data)
{
@@ -323,7 +323,7 @@ void mcb_release_bus(struct mcb_bus *bus)
{
mcb_devices_unregister(bus);
}
-EXPORT_SYMBOL_GPL(mcb_release_bus);
+EXPORT_SYMBOL_NS_GPL(mcb_release_bus, MCB);
/**
* mcb_bus_put() - Increment refcnt
@@ -338,7 +338,7 @@ struct mcb_bus *mcb_bus_get(struct mcb_bus *bus)
return bus;
}
-EXPORT_SYMBOL_GPL(mcb_bus_get);
+EXPORT_SYMBOL_NS_GPL(mcb_bus_get, MCB);
/**
* mcb_bus_put() - Decrement refcnt
@@ -351,7 +351,7 @@ void mcb_bus_put(struct mcb_bus *bus)
if (bus)
put_device(&bus->dev);
}
-EXPORT_SYMBOL_GPL(mcb_bus_put);
+EXPORT_SYMBOL_NS_GPL(mcb_bus_put, MCB);
/**
* mcb_alloc_dev() - Allocate a device
@@ -371,7 +371,7 @@ struct mcb_device *mcb_alloc_dev(struct mcb_bus *bus)
return dev;
}
-EXPORT_SYMBOL_GPL(mcb_alloc_dev);
+EXPORT_SYMBOL_NS_GPL(mcb_alloc_dev, MCB);
/**
* mcb_free_dev() - Free @mcb_device
@@ -383,7 +383,7 @@ void mcb_free_dev(struct mcb_device *dev)
{
kfree(dev);
}
-EXPORT_SYMBOL_GPL(mcb_free_dev);
+EXPORT_SYMBOL_NS_GPL(mcb_free_dev, MCB);
static int __mcb_bus_add_devices(struct device *dev, void *data)
{
@@ -412,7 +412,7 @@ void mcb_bus_add_devices(const struct mcb_bus *bus)
{
bus_for_each_dev(&mcb_bus_type, NULL, NULL, __mcb_bus_add_devices);
}
-EXPORT_SYMBOL_GPL(mcb_bus_add_devices);
+EXPORT_SYMBOL_NS_GPL(mcb_bus_add_devices, MCB);
/**
* mcb_get_resource() - get a resource for a mcb device
@@ -428,7 +428,7 @@ struct resource *mcb_get_resource(struct mcb_device *dev, unsigned int type)
else
return NULL;
}
-EXPORT_SYMBOL_GPL(mcb_get_resource);
+EXPORT_SYMBOL_NS_GPL(mcb_get_resource, MCB);
/**
* mcb_request_mem() - Request memory
@@ -454,7 +454,7 @@ struct resource *mcb_request_mem(struct mcb_device *dev, const char *name)
return mem;
}
-EXPORT_SYMBOL_GPL(mcb_request_mem);
+EXPORT_SYMBOL_NS_GPL(mcb_request_mem, MCB);
/**
* mcb_release_mem() - Release memory requested by device
@@ -469,7 +469,7 @@ void mcb_release_mem(struct resource *mem)
size = resource_size(mem);
release_mem_region(mem->start, size);
}
-EXPORT_SYMBOL_GPL(mcb_release_mem);
+EXPORT_SYMBOL_NS_GPL(mcb_release_mem, MCB);
static int __mcb_get_irq(struct mcb_device *dev)
{
@@ -495,7 +495,7 @@ int mcb_get_irq(struct mcb_device *dev)
return __mcb_get_irq(dev);
}
-EXPORT_SYMBOL_GPL(mcb_get_irq);
+EXPORT_SYMBOL_NS_GPL(mcb_get_irq, MCB);
static int mcb_init(void)
{
diff --git a/drivers/mcb/mcb-lpc.c b/drivers/mcb/mcb-lpc.c
index 8f1bde437a7e..506676754538 100644
--- a/drivers/mcb/mcb-lpc.c
+++ b/drivers/mcb/mcb-lpc.c
@@ -168,3 +168,4 @@ module_exit(mcb_lpc_exit);
MODULE_AUTHOR("Andreas Werner <andreas.werner@men.de>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("MCB over LPC support");
+MODULE_IMPORT_NS(MCB);
diff --git a/drivers/mcb/mcb-parse.c b/drivers/mcb/mcb-parse.c
index 3b69e6aa3d88..0266bfddfbe2 100644
--- a/drivers/mcb/mcb-parse.c
+++ b/drivers/mcb/mcb-parse.c
@@ -253,4 +253,4 @@ free_header:
return ret;
}
-EXPORT_SYMBOL_GPL(chameleon_parse_cells);
+EXPORT_SYMBOL_NS_GPL(chameleon_parse_cells, MCB);
diff --git a/drivers/mcb/mcb-pci.c b/drivers/mcb/mcb-pci.c
index 14866aa22f75..dc88232d9af8 100644
--- a/drivers/mcb/mcb-pci.c
+++ b/drivers/mcb/mcb-pci.c
@@ -131,3 +131,4 @@ module_pci_driver(mcb_pci_driver);
MODULE_AUTHOR("Johannes Thumshirn <johannes.thumshirn@men.de>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("MCB over PCI support");
+MODULE_IMPORT_NS(MCB);
diff --git a/drivers/media/cec/cec-adap.c b/drivers/media/cec/cec-adap.c
index 5ef7daeb8cbd..9340435a94a0 100644
--- a/drivers/media/cec/cec-adap.c
+++ b/drivers/media/cec/cec-adap.c
@@ -319,6 +319,8 @@ static void cec_post_state_event(struct cec_adapter *adap)
ev.state_change.phys_addr = adap->phys_addr;
ev.state_change.log_addr_mask = adap->log_addrs.log_addr_mask;
+ ev.state_change.have_conn_info =
+ adap->conn_info.type != CEC_CONNECTOR_TYPE_NO_CONNECTOR;
cec_queue_event(adap, &ev);
}
@@ -1976,7 +1978,7 @@ static int cec_receive_notify(struct cec_adapter *adap, struct cec_msg *msg,
* Play function, this message can have variable length
* depending on the specific play function that is used.
*/
- case 0x60:
+ case CEC_OP_UI_CMD_PLAY_FUNCTION:
if (msg->len == 2)
rc_keydown(adap->rc, RC_PROTO_CEC,
msg->msg[2], 0);
@@ -1993,8 +1995,12 @@ static int cec_receive_notify(struct cec_adapter *adap, struct cec_msg *msg,
* For the time being these messages are not processed by the
* framework and are simply forwarded to the user space.
*/
- case 0x56: case 0x57:
- case 0x67: case 0x68: case 0x69: case 0x6a:
+ case CEC_OP_UI_CMD_SELECT_BROADCAST_TYPE:
+ case CEC_OP_UI_CMD_SELECT_SOUND_PRESENTATION:
+ case CEC_OP_UI_CMD_TUNE_FUNCTION:
+ case CEC_OP_UI_CMD_SELECT_MEDIA_FUNCTION:
+ case CEC_OP_UI_CMD_SELECT_AV_INPUT_FUNCTION:
+ case CEC_OP_UI_CMD_SELECT_AUDIO_INPUT_FUNCTION:
break;
default:
rc_keydown(adap->rc, RC_PROTO_CEC, msg->msg[2], 0);
diff --git a/drivers/media/cec/cec-api.c b/drivers/media/cec/cec-api.c
index 12d676484472..17d1cb2e5f97 100644
--- a/drivers/media/cec/cec-api.c
+++ b/drivers/media/cec/cec-api.c
@@ -187,6 +187,21 @@ static long cec_adap_s_log_addrs(struct cec_adapter *adap, struct cec_fh *fh,
return 0;
}
+static long cec_adap_g_connector_info(struct cec_adapter *adap,
+ struct cec_log_addrs __user *parg)
+{
+ int ret = 0;
+
+ if (!(adap->capabilities & CEC_CAP_CONNECTOR_INFO))
+ return -ENOTTY;
+
+ mutex_lock(&adap->lock);
+ if (copy_to_user(parg, &adap->conn_info, sizeof(adap->conn_info)))
+ ret = -EFAULT;
+ mutex_unlock(&adap->lock);
+ return ret;
+}
+
static long cec_transmit(struct cec_adapter *adap, struct cec_fh *fh,
bool block, struct cec_msg __user *parg)
{
@@ -506,6 +521,9 @@ static long cec_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
case CEC_ADAP_S_LOG_ADDRS:
return cec_adap_s_log_addrs(adap, fh, block, parg);
+ case CEC_ADAP_G_CONNECTOR_INFO:
+ return cec_adap_g_connector_info(adap, parg);
+
case CEC_TRANSMIT:
return cec_transmit(adap, fh, block, parg);
@@ -578,6 +596,8 @@ static int cec_open(struct inode *inode, struct file *filp)
/* Queue up initial state events */
ev.state_change.phys_addr = adap->phys_addr;
ev.state_change.log_addr_mask = adap->log_addrs.log_addr_mask;
+ ev.state_change.have_conn_info =
+ adap->conn_info.type != CEC_CONNECTOR_TYPE_NO_CONNECTOR;
cec_queue_event_fh(fh, &ev, 0);
#ifdef CONFIG_CEC_PIN
if (adap->pin && adap->pin->ops->read_hpd) {
diff --git a/drivers/media/cec/cec-core.c b/drivers/media/cec/cec-core.c
index 9c610e1e99b8..db7adffcdc76 100644
--- a/drivers/media/cec/cec-core.c
+++ b/drivers/media/cec/cec-core.c
@@ -257,11 +257,6 @@ struct cec_adapter *cec_allocate_adapter(const struct cec_adap_ops *ops,
struct cec_adapter *adap;
int res;
- /*
- * Disable this capability until the connector info public API
- * is ready.
- */
- caps &= ~CEC_CAP_CONNECTOR_INFO;
#ifndef CONFIG_MEDIA_CEC_RC
caps &= ~CEC_CAP_RC;
#endif
diff --git a/drivers/media/cec/cec-notifier.c b/drivers/media/cec/cec-notifier.c
index 4d82a5522072..7cf42b133dbc 100644
--- a/drivers/media/cec/cec-notifier.c
+++ b/drivers/media/cec/cec-notifier.c
@@ -153,13 +153,14 @@ cec_notifier_cec_adap_register(struct device *hdmi_dev, const char *conn_name,
}
EXPORT_SYMBOL_GPL(cec_notifier_cec_adap_register);
-void cec_notifier_cec_adap_unregister(struct cec_notifier *n)
+void cec_notifier_cec_adap_unregister(struct cec_notifier *n,
+ struct cec_adapter *adap)
{
if (!n)
return;
mutex_lock(&n->lock);
- n->cec_adap->notifier = NULL;
+ adap->notifier = NULL;
n->cec_adap = NULL;
n->callback = NULL;
mutex_unlock(&n->lock);
diff --git a/drivers/media/cec/cec-pin.c b/drivers/media/cec/cec-pin.c
index 8f987bc0dd88..660fe111f540 100644
--- a/drivers/media/cec/cec-pin.c
+++ b/drivers/media/cec/cec-pin.c
@@ -1279,6 +1279,15 @@ static void cec_pin_adap_free(struct cec_adapter *adap)
kfree(pin);
}
+static int cec_pin_received(struct cec_adapter *adap, struct cec_msg *msg)
+{
+ struct cec_pin *pin = adap->pin;
+
+ if (pin->ops->received)
+ return pin->ops->received(adap, msg);
+ return -ENOMSG;
+}
+
void cec_pin_changed(struct cec_adapter *adap, bool value)
{
struct cec_pin *pin = adap->pin;
@@ -1301,6 +1310,7 @@ static const struct cec_adap_ops cec_pin_adap_ops = {
.error_inj_parse_line = cec_pin_error_inj_parse_line,
.error_inj_show = cec_pin_error_inj_show,
#endif
+ .received = cec_pin_received,
};
struct cec_adapter *cec_pin_allocate_adapter(const struct cec_pin_ops *pin_ops,
diff --git a/drivers/media/common/siano/smscoreapi.c b/drivers/media/common/siano/smscoreapi.c
index 0ba51dacc580..c1511094fdc7 100644
--- a/drivers/media/common/siano/smscoreapi.c
+++ b/drivers/media/common/siano/smscoreapi.c
@@ -230,8 +230,8 @@ static char *siano_msgs[] = {
[MSG_SMS_FLASH_DL_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_FLASH_DL_REQ",
[MSG_SMS_EXEC_TEST_1_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_EXEC_TEST_1_REQ",
[MSG_SMS_EXEC_TEST_1_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_EXEC_TEST_1_RES",
- [MSG_SMS_ENBALE_TS_INTERFACE_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_ENBALE_TS_INTERFACE_REQ",
- [MSG_SMS_ENBALE_TS_INTERFACE_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_ENBALE_TS_INTERFACE_RES",
+ [MSG_SMS_ENABLE_TS_INTERFACE_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_ENABLE_TS_INTERFACE_REQ",
+ [MSG_SMS_ENABLE_TS_INTERFACE_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_ENABLE_TS_INTERFACE_RES",
[MSG_SMS_SPI_SET_BUS_WIDTH_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_SPI_SET_BUS_WIDTH_REQ",
[MSG_SMS_SPI_SET_BUS_WIDTH_RES - MSG_TYPE_BASE_VAL] = "MSG_SMS_SPI_SET_BUS_WIDTH_RES",
[MSG_SMS_SEND_EMM_REQ - MSG_TYPE_BASE_VAL] = "MSG_SMS_SEND_EMM_REQ",
diff --git a/drivers/media/common/siano/smscoreapi.h b/drivers/media/common/siano/smscoreapi.h
index a2f95f4899c2..b3b793b5caf3 100644
--- a/drivers/media/common/siano/smscoreapi.h
+++ b/drivers/media/common/siano/smscoreapi.h
@@ -434,8 +434,8 @@ enum msg_types {
MSG_SMS_FLASH_DL_REQ = 732,
MSG_SMS_EXEC_TEST_1_REQ = 734,
MSG_SMS_EXEC_TEST_1_RES = 735,
- MSG_SMS_ENBALE_TS_INTERFACE_REQ = 736,
- MSG_SMS_ENBALE_TS_INTERFACE_RES = 737,
+ MSG_SMS_ENABLE_TS_INTERFACE_REQ = 736,
+ MSG_SMS_ENABLE_TS_INTERFACE_RES = 737,
MSG_SMS_SPI_SET_BUS_WIDTH_REQ = 738,
MSG_SMS_SPI_SET_BUS_WIDTH_RES = 739,
MSG_SMS_SEND_EMM_REQ = 740,
diff --git a/drivers/media/common/siano/smsir.h b/drivers/media/common/siano/smsir.h
index b2c54c256e86..ada41d5c4e83 100644
--- a/drivers/media/common/siano/smsir.h
+++ b/drivers/media/common/siano/smsir.h
@@ -1,5 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
- * SPDX-License-Identifier: GPL-2.0+
*
* Siano Mobile Silicon, Inc.
* MDTV receiver kernel modules.
diff --git a/drivers/media/common/videobuf2/videobuf2-v4l2.c b/drivers/media/common/videobuf2/videobuf2-v4l2.c
index 5a9ba3846f0a..e652f4318284 100644
--- a/drivers/media/common/videobuf2/videobuf2-v4l2.c
+++ b/drivers/media/common/videobuf2/videobuf2-v4l2.c
@@ -49,8 +49,11 @@ module_param(debug, int, 0644);
V4L2_BUF_FLAG_REQUEST_FD | \
V4L2_BUF_FLAG_TIMESTAMP_MASK)
/* Output buffer flags that should be passed on to the driver */
-#define V4L2_BUFFER_OUT_FLAGS (V4L2_BUF_FLAG_PFRAME | V4L2_BUF_FLAG_BFRAME | \
- V4L2_BUF_FLAG_KEYFRAME | V4L2_BUF_FLAG_TIMECODE)
+#define V4L2_BUFFER_OUT_FLAGS (V4L2_BUF_FLAG_PFRAME | \
+ V4L2_BUF_FLAG_BFRAME | \
+ V4L2_BUF_FLAG_KEYFRAME | \
+ V4L2_BUF_FLAG_TIMECODE | \
+ V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF)
/*
* __verify_planes_array() - verify that the planes array passed in struct
@@ -194,6 +197,7 @@ static int vb2_fill_vb2_v4l2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b
}
vbuf->sequence = 0;
vbuf->request_fd = -1;
+ vbuf->is_held = false;
if (V4L2_TYPE_IS_MULTIPLANAR(b->type)) {
switch (b->memory) {
@@ -321,6 +325,8 @@ static int vb2_fill_vb2_v4l2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b
*/
vbuf->flags &= ~V4L2_BUF_FLAG_TIMECODE;
vbuf->field = b->field;
+ if (!(q->subsystem_flags & VB2_V4L2_FL_SUPPORTS_M2M_HOLD_CAPTURE_BUF))
+ vbuf->flags &= ~V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF;
} else {
/* Zero any output buffer flags as this is a capture buffer */
vbuf->flags &= ~V4L2_BUFFER_OUT_FLAGS;
@@ -654,6 +660,8 @@ static void fill_buf_caps(struct vb2_queue *q, u32 *caps)
*caps |= V4L2_BUF_CAP_SUPPORTS_USERPTR;
if (q->io_modes & VB2_DMABUF)
*caps |= V4L2_BUF_CAP_SUPPORTS_DMABUF;
+ if (q->subsystem_flags & VB2_V4L2_FL_SUPPORTS_M2M_HOLD_CAPTURE_BUF)
+ *caps |= V4L2_BUF_CAP_SUPPORTS_M2M_HOLD_CAPTURE_BUF;
#ifdef CONFIG_MEDIA_CONTROLLER_REQUEST_API
if (q->supports_requests)
*caps |= V4L2_BUF_CAP_SUPPORTS_REQUESTS;
diff --git a/drivers/media/dvb-frontends/cxd2820r_c.c b/drivers/media/dvb-frontends/cxd2820r_c.c
index 6f7eedb4c00e..0ba382948c51 100644
--- a/drivers/media/dvb-frontends/cxd2820r_c.c
+++ b/drivers/media/dvb-frontends/cxd2820r_c.c
@@ -298,7 +298,7 @@ int cxd2820r_sleep_c(struct dvb_frontend *fe)
struct cxd2820r_priv *priv = fe->demodulator_priv;
struct i2c_client *client = priv->client[0];
int ret;
- struct reg_val_mask tab[] = {
+ static const struct reg_val_mask tab[] = {
{ 0x000ff, 0x1f, 0xff },
{ 0x00085, 0x00, 0xff },
{ 0x00088, 0x01, 0xff },
diff --git a/drivers/media/dvb-frontends/cxd2820r_t.c b/drivers/media/dvb-frontends/cxd2820r_t.c
index d56c6f788196..fbdfa6bf38dc 100644
--- a/drivers/media/dvb-frontends/cxd2820r_t.c
+++ b/drivers/media/dvb-frontends/cxd2820r_t.c
@@ -392,7 +392,7 @@ int cxd2820r_sleep_t(struct dvb_frontend *fe)
struct cxd2820r_priv *priv = fe->demodulator_priv;
struct i2c_client *client = priv->client[0];
int ret;
- struct reg_val_mask tab[] = {
+ static struct reg_val_mask tab[] = {
{ 0x000ff, 0x1f, 0xff },
{ 0x00085, 0x00, 0xff },
{ 0x00088, 0x01, 0xff },
diff --git a/drivers/media/dvb-frontends/cxd2820r_t2.c b/drivers/media/dvb-frontends/cxd2820r_t2.c
index f924a80b968a..34ef2bb2de34 100644
--- a/drivers/media/dvb-frontends/cxd2820r_t2.c
+++ b/drivers/media/dvb-frontends/cxd2820r_t2.c
@@ -386,7 +386,7 @@ int cxd2820r_sleep_t2(struct dvb_frontend *fe)
struct cxd2820r_priv *priv = fe->demodulator_priv;
struct i2c_client *client = priv->client[0];
int ret;
- struct reg_val_mask tab[] = {
+ static const struct reg_val_mask tab[] = {
{ 0x000ff, 0x1f, 0xff },
{ 0x00085, 0x00, 0xff },
{ 0x00088, 0x01, 0xff },
diff --git a/drivers/media/dvb-frontends/cxd2841er.c b/drivers/media/dvb-frontends/cxd2841er.c
index 1b30cf570803..758c95bc3b11 100644
--- a/drivers/media/dvb-frontends/cxd2841er.c
+++ b/drivers/media/dvb-frontends/cxd2841er.c
@@ -60,6 +60,7 @@ struct cxd2841er_priv {
enum cxd2841er_xtal xtal;
enum fe_caps caps;
u32 flags;
+ unsigned long stats_time;
};
static const struct cxd2841er_cnr_data s_cn_data[] = {
@@ -3279,9 +3280,15 @@ static int cxd2841er_get_frontend(struct dvb_frontend *fe,
p->strength.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
if (status & FE_HAS_LOCK) {
+ if (priv->stats_time &&
+ (!time_after(jiffies, priv->stats_time)))
+ return 0;
+
+ /* Prevent retrieving stats faster than once per second */
+ priv->stats_time = jiffies + msecs_to_jiffies(1000);
+
cxd2841er_read_snr(fe);
cxd2841er_read_ucblocks(fe);
-
cxd2841er_read_ber(fe);
} else {
p->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
@@ -3360,6 +3367,9 @@ done:
p->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
p->post_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ /* Reset the wait for jiffies logic */
+ priv->stats_time = 0;
+
return ret;
}
diff --git a/drivers/media/dvb-frontends/drx39xyj/drxj.c b/drivers/media/dvb-frontends/drx39xyj/drxj.c
index 2f5af4813a74..ac7be872f460 100644
--- a/drivers/media/dvb-frontends/drx39xyj/drxj.c
+++ b/drivers/media/dvb-frontends/drx39xyj/drxj.c
@@ -4201,7 +4201,7 @@ int drxj_dap_scu_atomic_read_reg16(struct i2c_device_addr *dev_addr,
u16 *data, u32 flags)
{
u8 buf[2] = { 0 };
- int rc = -EIO;
+ int rc;
u16 word = 0;
if (!data)
diff --git a/drivers/media/dvb-frontends/mb86a20s.c b/drivers/media/dvb-frontends/mb86a20s.c
index 4e50441c247a..a7faf0cf8788 100644
--- a/drivers/media/dvb-frontends/mb86a20s.c
+++ b/drivers/media/dvb-frontends/mb86a20s.c
@@ -517,7 +517,7 @@ static void mb86a20s_reset_frontend_cache(struct dvb_frontend *fe)
* Estimates the bit rate using the per-segment bit rate given by
* ABNT/NBR 15601 spec (table 4).
*/
-static u32 isdbt_rate[3][5][4] = {
+static const u32 isdbt_rate[3][5][4] = {
{ /* DQPSK/QPSK */
{ 280850, 312060, 330420, 340430 }, /* 1/2 */
{ 374470, 416080, 440560, 453910 }, /* 2/3 */
@@ -539,13 +539,9 @@ static u32 isdbt_rate[3][5][4] = {
}
};
-static void mb86a20s_layer_bitrate(struct dvb_frontend *fe, u32 layer,
- u32 modulation, u32 forward_error_correction,
- u32 guard_interval,
- u32 segment)
+static u32 isdbt_layer_min_bitrate(struct dtv_frontend_properties *c,
+ u32 layer)
{
- struct mb86a20s_state *state = fe->demodulator_priv;
- u32 rate;
int mod, fec, guard;
/*
@@ -553,7 +549,7 @@ static void mb86a20s_layer_bitrate(struct dvb_frontend *fe, u32 layer,
* to consider the lowest bit rate, to avoid taking too long time
* to get BER.
*/
- switch (modulation) {
+ switch (c->layer[layer].modulation) {
case DQPSK:
case QPSK:
default:
@@ -567,7 +563,7 @@ static void mb86a20s_layer_bitrate(struct dvb_frontend *fe, u32 layer,
break;
}
- switch (forward_error_correction) {
+ switch (c->layer[layer].fec) {
default:
case FEC_1_2:
case FEC_AUTO:
@@ -587,7 +583,7 @@ static void mb86a20s_layer_bitrate(struct dvb_frontend *fe, u32 layer,
break;
}
- switch (guard_interval) {
+ switch (c->guard_interval) {
default:
case GUARD_INTERVAL_1_4:
guard = 0;
@@ -603,29 +599,14 @@ static void mb86a20s_layer_bitrate(struct dvb_frontend *fe, u32 layer,
break;
}
- /* Samples BER at BER_SAMPLING_RATE seconds */
- rate = isdbt_rate[mod][fec][guard] * segment * BER_SAMPLING_RATE;
-
- /* Avoids sampling too quickly or to overflow the register */
- if (rate < 256)
- rate = 256;
- else if (rate > (1 << 24) - 1)
- rate = (1 << 24) - 1;
-
- dev_dbg(&state->i2c->dev,
- "%s: layer %c bitrate: %d kbps; counter = %d (0x%06x)\n",
- __func__, 'A' + layer,
- segment * isdbt_rate[mod][fec][guard]/1000,
- rate, rate);
-
- state->estimated_rate[layer] = rate;
+ return isdbt_rate[mod][fec][guard] * c->layer[layer].segment_count;
}
static int mb86a20s_get_frontend(struct dvb_frontend *fe)
{
struct mb86a20s_state *state = fe->demodulator_priv;
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
- int layer, rc;
+ int layer, rc, rate, counter;
dev_dbg(&state->i2c->dev, "%s called.\n", __func__);
@@ -676,10 +657,21 @@ static int mb86a20s_get_frontend(struct dvb_frontend *fe)
dev_dbg(&state->i2c->dev, "%s: interleaving %d.\n",
__func__, rc);
c->layer[layer].interleaving = rc;
- mb86a20s_layer_bitrate(fe, layer, c->layer[layer].modulation,
- c->layer[layer].fec,
- c->guard_interval,
- c->layer[layer].segment_count);
+
+ rate = isdbt_layer_min_bitrate(c, layer);
+ counter = rate * BER_SAMPLING_RATE;
+
+ /* Avoids sampling too quickly or to overflow the register */
+ if (counter < 256)
+ counter = 256;
+ else if (counter > (1 << 24) - 1)
+ counter = (1 << 24) - 1;
+
+ dev_dbg(&state->i2c->dev,
+ "%s: layer %c bitrate: %d kbps; counter = %d (0x%06x)\n",
+ __func__, 'A' + layer, rate / 1000, counter, counter);
+
+ state->estimated_rate[layer] = counter;
}
rc = mb86a20s_writereg(state, 0x6d, 0x84);
diff --git a/drivers/media/dvb-frontends/mt312.c b/drivers/media/dvb-frontends/mt312.c
index 7cae7d632030..d43a67045dbe 100644
--- a/drivers/media/dvb-frontends/mt312.c
+++ b/drivers/media/dvb-frontends/mt312.c
@@ -135,11 +135,6 @@ static inline int mt312_writereg(struct mt312_state *state,
return mt312_write(state, reg, &tmp, 1);
}
-static inline u32 mt312_div(u32 a, u32 b)
-{
- return (a + (b / 2)) / b;
-}
-
static int mt312_reset(struct mt312_state *state, const u8 full)
{
return mt312_writereg(state, RESET, full ? 0x80 : 0x40);
@@ -187,7 +182,7 @@ static int mt312_get_symbol_rate(struct mt312_state *state, u32 *sr)
monitor = (buf[0] << 8) | buf[1];
dprintk("sr(auto) = %u\n",
- mt312_div(monitor * 15625, 4));
+ DIV_ROUND_CLOSEST(monitor * 15625, 4));
} else {
ret = mt312_writereg(state, MON_CTRL, 0x05);
if (ret < 0)
@@ -291,10 +286,10 @@ static int mt312_initfe(struct dvb_frontend *fe)
}
/* SYS_CLK */
- buf[0] = mt312_div(state->xtal * state->freq_mult * 2, 1000000);
+ buf[0] = DIV_ROUND_CLOSEST(state->xtal * state->freq_mult * 2, 1000000);
/* DISEQC_RATIO */
- buf[1] = mt312_div(state->xtal, 22000 * 4);
+ buf[1] = DIV_ROUND_CLOSEST(state->xtal, 22000 * 4);
ret = mt312_write(state, SYS_CLK, buf, sizeof(buf));
if (ret < 0)
@@ -610,7 +605,7 @@ static int mt312_set_frontend(struct dvb_frontend *fe)
}
/* sr = (u16)(sr * 256.0 / 1000000.0) */
- sr = mt312_div(p->symbol_rate * 4, 15625);
+ sr = DIV_ROUND_CLOSEST(p->symbol_rate * 4, 15625);
/* SYM_RATE */
buf[0] = (sr >> 8) & 0x3f;
diff --git a/drivers/media/dvb-frontends/si2168.h b/drivers/media/dvb-frontends/si2168.h
index 50dccb394efa..ecd21adf8950 100644
--- a/drivers/media/dvb-frontends/si2168.h
+++ b/drivers/media/dvb-frontends/si2168.h
@@ -9,38 +9,43 @@
#define SI2168_H
#include <linux/dvb/frontend.h>
-/*
- * I2C address
- * 0x64
+/**
+ * struct si2168_config - configuration parameters for si2168
+ *
+ * @fe:
+ * frontend returned by driver
+ * @i2c_adapter:
+ * tuner I2C adapter returned by driver
+ * @ts_mode:
+ * Transport Stream mode. Can be:
+ * - %SI2168_TS_PARALLEL
+ * - %SI2168_TS_SERIAL
+ * - %SI2168_TS_TRISTATE
+ * - %SI2168_TS_CLK_MANUAL
+ * @ts_clock_inv:
+ * TS clock inverted
+ * @ts_clock_gapped:
+ * TS clock gapped
+ * @spectral_inversion:
+ * Inverted spectrum
+ *
+ * Note:
+ * The I2C address of this demod is 0x64.
*/
struct si2168_config {
- /*
- * frontend
- * returned by driver
- */
struct dvb_frontend **fe;
-
- /*
- * tuner I2C adapter
- * returned by driver
- */
struct i2c_adapter **i2c_adapter;
- /* TS mode */
#define SI2168_TS_PARALLEL 0x06
#define SI2168_TS_SERIAL 0x03
#define SI2168_TS_TRISTATE 0x00
#define SI2168_TS_CLK_MANUAL 0x20
u8 ts_mode;
- /* TS clock inverted */
- bool ts_clock_inv;
-
- /* TS clock gapped */
- bool ts_clock_gapped;
-
- /* Inverted spectrum */
- bool spectral_inversion;
+ /* Flags */
+ unsigned int ts_clock_inv:1;
+ unsigned int ts_clock_gapped:1;
+ unsigned int spectral_inversion:1;
};
#endif
diff --git a/drivers/media/dvb-frontends/si2168_priv.h b/drivers/media/dvb-frontends/si2168_priv.h
index 804d5b30c697..18bea5222082 100644
--- a/drivers/media/dvb-frontends/si2168_priv.h
+++ b/drivers/media/dvb-frontends/si2168_priv.h
@@ -34,12 +34,12 @@ struct si2168_dev {
unsigned int chip_id;
unsigned int version;
const char *firmware_name;
- bool active;
- bool warm;
u8 ts_mode;
- bool ts_clock_inv;
- bool ts_clock_gapped;
- bool spectral_inversion;
+ unsigned int active:1;
+ unsigned int warm:1;
+ unsigned int ts_clock_inv:1;
+ unsigned int ts_clock_gapped:1;
+ unsigned int spectral_inversion:1;
};
/* firmware command struct */
diff --git a/drivers/media/dvb-frontends/tc90522.c b/drivers/media/dvb-frontends/tc90522.c
index 849d63dbc279..e83836b29715 100644
--- a/drivers/media/dvb-frontends/tc90522.c
+++ b/drivers/media/dvb-frontends/tc90522.c
@@ -685,10 +685,33 @@ tc90522_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
p += new_msgs[j].len;
}
- if (i < num)
+ if (i < num) {
ret = -ENOMEM;
- else
+ } else if (!state->cfg.split_tuner_read_i2c || rd_num == 0) {
ret = i2c_transfer(state->i2c_client->adapter, new_msgs, j);
+ } else {
+ /*
+ * Split transactions at each I2C_M_RD message.
+ * Some of the parent device require this,
+ * such as Friio (see. dvb-usb-gl861).
+ */
+ int from, to;
+
+ ret = 0;
+ from = 0;
+ do {
+ int r;
+
+ to = from + 1;
+ while (to < j && !(new_msgs[to].flags & I2C_M_RD))
+ to++;
+ r = i2c_transfer(state->i2c_client->adapter,
+ &new_msgs[from], to - from);
+ ret = (r <= 0) ? r : ret + r;
+ from = to;
+ } while (from < j && ret > 0);
+ }
+
if (ret >= 0 && ret < j)
ret = -EIO;
kfree(new_msgs);
diff --git a/drivers/media/dvb-frontends/tc90522.h b/drivers/media/dvb-frontends/tc90522.h
index ac0e2ab51924..07e3813bf590 100644
--- a/drivers/media/dvb-frontends/tc90522.h
+++ b/drivers/media/dvb-frontends/tc90522.h
@@ -28,6 +28,9 @@ struct tc90522_config {
/* [OUT] tuner I2C adapter returned by driver */
struct i2c_adapter *tuner_i2c;
+
+ /* [IN] use two separate I2C transactions for one tuner read */
+ bool split_tuner_read_i2c;
};
#endif /* TC90522_H */
diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig
index 7eee1812bba3..c68e002d26ea 100644
--- a/drivers/media/i2c/Kconfig
+++ b/drivers/media/i2c/Kconfig
@@ -566,10 +566,23 @@ config VIDEO_APTINA_PLL
config VIDEO_SMIAPP_PLL
tristate
+if MEDIA_CAMERA_SUPPORT
+
+config VIDEO_HI556
+ tristate "Hynix Hi-556 sensor support"
+ depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ depends on MEDIA_CONTROLLER
+ select V4L2_FWNODE
+ help
+ This is a Video4Linux2 sensor driver for the Hynix
+ Hi-556 camera.
+
+ To compile this driver as a module, choose M here: the
+ module will be called hi556.
+
config VIDEO_IMX214
tristate "Sony IMX214 sensor support"
depends on GPIOLIB && I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
- depends on MEDIA_CAMERA_SUPPORT
depends on V4L2_FWNODE
help
This is a Video4Linux2 sensor driver for the Sony
@@ -581,7 +594,6 @@ config VIDEO_IMX214
config VIDEO_IMX258
tristate "Sony IMX258 sensor support"
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
- depends on MEDIA_CAMERA_SUPPORT
help
This is a Video4Linux2 sensor driver for the Sony
IMX258 camera.
@@ -592,16 +604,25 @@ config VIDEO_IMX258
config VIDEO_IMX274
tristate "Sony IMX274 sensor support"
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
- depends on MEDIA_CAMERA_SUPPORT
select REGMAP_I2C
help
This is a V4L2 sensor driver for the Sony IMX274
CMOS image sensor.
+config VIDEO_IMX290
+ tristate "Sony IMX290 sensor support"
+ depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ select V4L2_FWNODE
+ help
+ This is a Video4Linux2 sensor driver for the Sony
+ IMX290 camera sensor.
+
+ To compile this driver as a module, choose M here: the
+ module will be called imx290.
+
config VIDEO_IMX319
tristate "Sony IMX319 sensor support"
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
- depends on MEDIA_CAMERA_SUPPORT
help
This is a Video4Linux2 sensor driver for the Sony
IMX319 camera.
@@ -612,7 +633,6 @@ config VIDEO_IMX319
config VIDEO_IMX355
tristate "Sony IMX355 sensor support"
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
- depends on MEDIA_CAMERA_SUPPORT
help
This is a Video4Linux2 sensor driver for the Sony
IMX355 camera.
@@ -623,7 +643,6 @@ config VIDEO_IMX355
config VIDEO_OV2640
tristate "OmniVision OV2640 sensor support"
depends on VIDEO_V4L2 && I2C
- depends on MEDIA_CAMERA_SUPPORT
help
This is a Video4Linux2 sensor driver for the OmniVision
OV2640 camera.
@@ -633,8 +652,7 @@ config VIDEO_OV2640
config VIDEO_OV2659
tristate "OmniVision OV2659 sensor support"
- depends on VIDEO_V4L2 && I2C
- depends on MEDIA_CAMERA_SUPPORT
+ depends on VIDEO_V4L2 && I2C && GPIOLIB
select V4L2_FWNODE
help
This is a Video4Linux2 sensor driver for the OmniVision
@@ -646,7 +664,6 @@ config VIDEO_OV2659
config VIDEO_OV2680
tristate "OmniVision OV2680 sensor support"
depends on VIDEO_V4L2 && I2C && MEDIA_CONTROLLER
- depends on MEDIA_CAMERA_SUPPORT
select V4L2_FWNODE
help
This is a Video4Linux2 sensor driver for the OmniVision
@@ -658,7 +675,6 @@ config VIDEO_OV2680
config VIDEO_OV2685
tristate "OmniVision OV2685 sensor support"
depends on VIDEO_V4L2 && I2C && MEDIA_CONTROLLER
- depends on MEDIA_CAMERA_SUPPORT
select V4L2_FWNODE
help
This is a Video4Linux2 sensor driver for the OmniVision
@@ -671,7 +687,6 @@ config VIDEO_OV5640
tristate "OmniVision OV5640 sensor support"
depends on OF
depends on GPIOLIB && VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API
- depends on MEDIA_CAMERA_SUPPORT
select V4L2_FWNODE
help
This is a Video4Linux2 sensor driver for the Omnivision
@@ -681,7 +696,6 @@ config VIDEO_OV5645
tristate "OmniVision OV5645 sensor support"
depends on OF
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
- depends on MEDIA_CAMERA_SUPPORT
select V4L2_FWNODE
help
This is a Video4Linux2 sensor driver for the OmniVision
@@ -693,7 +707,6 @@ config VIDEO_OV5645
config VIDEO_OV5647
tristate "OmniVision OV5647 sensor support"
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
- depends on MEDIA_CAMERA_SUPPORT
select V4L2_FWNODE
help
This is a Video4Linux2 sensor driver for the OmniVision
@@ -705,7 +718,6 @@ config VIDEO_OV5647
config VIDEO_OV6650
tristate "OmniVision OV6650 sensor support"
depends on I2C && VIDEO_V4L2
- depends on MEDIA_CAMERA_SUPPORT
help
This is a Video4Linux2 sensor driver for the OmniVision
OV6650 camera.
@@ -716,7 +728,6 @@ config VIDEO_OV6650
config VIDEO_OV5670
tristate "OmniVision OV5670 sensor support"
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
- depends on MEDIA_CAMERA_SUPPORT
depends on MEDIA_CONTROLLER
select V4L2_FWNODE
help
@@ -729,7 +740,6 @@ config VIDEO_OV5670
config VIDEO_OV5675
tristate "OmniVision OV5675 sensor support"
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
- depends on MEDIA_CAMERA_SUPPORT
depends on MEDIA_CONTROLLER
select V4L2_FWNODE
help
@@ -742,7 +752,7 @@ config VIDEO_OV5675
config VIDEO_OV5695
tristate "OmniVision OV5695 sensor support"
depends on I2C && VIDEO_V4L2
- depends on MEDIA_CAMERA_SUPPORT
+ select V4L2_FWNODE
help
This is a Video4Linux2 sensor driver for the OmniVision
OV5695 camera.
@@ -753,7 +763,6 @@ config VIDEO_OV5695
config VIDEO_OV7251
tristate "OmniVision OV7251 sensor support"
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
- depends on MEDIA_CAMERA_SUPPORT
select V4L2_FWNODE
help
This is a Video4Linux2 sensor driver for the OmniVision
@@ -765,7 +774,6 @@ config VIDEO_OV7251
config VIDEO_OV772X
tristate "OmniVision OV772x sensor support"
depends on I2C && VIDEO_V4L2
- depends on MEDIA_CAMERA_SUPPORT
select REGMAP_SCCB
help
This is a Video4Linux2 sensor driver for the OmniVision
@@ -777,7 +785,6 @@ config VIDEO_OV772X
config VIDEO_OV7640
tristate "OmniVision OV7640 sensor support"
depends on I2C && VIDEO_V4L2
- depends on MEDIA_CAMERA_SUPPORT
help
This is a Video4Linux2 sensor driver for the OmniVision
OV7640 camera.
@@ -788,7 +795,6 @@ config VIDEO_OV7640
config VIDEO_OV7670
tristate "OmniVision OV7670 sensor support"
depends on I2C && VIDEO_V4L2
- depends on MEDIA_CAMERA_SUPPORT
select V4L2_FWNODE
help
This is a Video4Linux2 sensor driver for the OmniVision
@@ -798,7 +804,6 @@ config VIDEO_OV7670
config VIDEO_OV7740
tristate "OmniVision OV7740 sensor support"
depends on I2C && VIDEO_V4L2
- depends on MEDIA_CAMERA_SUPPORT
help
This is a Video4Linux2 sensor driver for the OmniVision
OV7740 VGA camera sensor.
@@ -806,7 +811,6 @@ config VIDEO_OV7740
config VIDEO_OV8856
tristate "OmniVision OV8856 sensor support"
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
- depends on MEDIA_CAMERA_SUPPORT
select V4L2_FWNODE
help
This is a Video4Linux2 sensor driver for the OmniVision
@@ -833,7 +837,6 @@ config VIDEO_OV9650
config VIDEO_OV13858
tristate "OmniVision OV13858 sensor support"
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
- depends on MEDIA_CAMERA_SUPPORT
select V4L2_FWNODE
help
This is a Video4Linux2 sensor driver for the OmniVision
@@ -842,7 +845,6 @@ config VIDEO_OV13858
config VIDEO_VS6624
tristate "ST VS6624 sensor support"
depends on VIDEO_V4L2 && I2C
- depends on MEDIA_CAMERA_SUPPORT
help
This is a Video4Linux2 sensor driver for the ST VS6624
camera.
@@ -853,7 +855,6 @@ config VIDEO_VS6624
config VIDEO_MT9M001
tristate "mt9m001 support"
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
- depends on MEDIA_CAMERA_SUPPORT
help
This driver supports MT9M001 cameras from Micron, monochrome
and colour models.
@@ -861,7 +862,6 @@ config VIDEO_MT9M001
config VIDEO_MT9M032
tristate "MT9M032 camera sensor support"
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
- depends on MEDIA_CAMERA_SUPPORT
select VIDEO_APTINA_PLL
help
This driver supports MT9M032 camera sensors from Aptina, monochrome
@@ -878,7 +878,6 @@ config VIDEO_MT9M111
config VIDEO_MT9P031
tristate "Aptina MT9P031 support"
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
- depends on MEDIA_CAMERA_SUPPORT
select VIDEO_APTINA_PLL
help
This is a Video4Linux2 sensor driver for the Aptina
@@ -887,7 +886,6 @@ config VIDEO_MT9P031
config VIDEO_MT9T001
tristate "Aptina MT9T001 support"
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
- depends on MEDIA_CAMERA_SUPPORT
help
This is a Video4Linux2 sensor driver for the Aptina
(Micron) mt0t001 3 Mpixel camera.
@@ -895,7 +893,6 @@ config VIDEO_MT9T001
config VIDEO_MT9T112
tristate "Aptina MT9T111/MT9T112 support"
depends on I2C && VIDEO_V4L2
- depends on MEDIA_CAMERA_SUPPORT
help
This is a Video4Linux2 sensor driver for the Aptina
(Micron) MT9T111 and MT9T112 3 Mpixel camera.
@@ -906,7 +903,6 @@ config VIDEO_MT9T112
config VIDEO_MT9V011
tristate "Micron mt9v011 sensor support"
depends on I2C && VIDEO_V4L2
- depends on MEDIA_CAMERA_SUPPORT
help
This is a Video4Linux2 sensor driver for the Micron
mt0v011 1.3 Mpixel camera. It currently only works with the
@@ -915,7 +911,6 @@ config VIDEO_MT9V011
config VIDEO_MT9V032
tristate "Micron MT9V032 sensor support"
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
- depends on MEDIA_CAMERA_SUPPORT
select REGMAP_I2C
select V4L2_FWNODE
help
@@ -925,7 +920,6 @@ config VIDEO_MT9V032
config VIDEO_MT9V111
tristate "Aptina MT9V111 sensor support"
depends on I2C && VIDEO_V4L2
- depends on MEDIA_CAMERA_SUPPORT
help
This is a Video4Linux2 sensor driver for the Aptina/Micron
MT9V111 sensor.
@@ -936,14 +930,12 @@ config VIDEO_MT9V111
config VIDEO_SR030PC30
tristate "Siliconfile SR030PC30 sensor support"
depends on I2C && VIDEO_V4L2
- depends on MEDIA_CAMERA_SUPPORT
help
This driver supports SR030PC30 VGA camera from Siliconfile
config VIDEO_NOON010PC30
tristate "Siliconfile NOON010PC30 sensor support"
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
- depends on MEDIA_CAMERA_SUPPORT
help
This driver supports NOON010PC30 CIF camera from Siliconfile
@@ -952,7 +944,6 @@ source "drivers/media/i2c/m5mols/Kconfig"
config VIDEO_RJ54N1
tristate "Sharp RJ54N1CB0C sensor support"
depends on I2C && VIDEO_V4L2
- depends on MEDIA_CAMERA_SUPPORT
help
This is a V4L2 sensor driver for Sharp RJ54N1CB0C CMOS image
sensor.
@@ -962,7 +953,6 @@ config VIDEO_RJ54N1
config VIDEO_S5K6AA
tristate "Samsung S5K6AAFX sensor support"
- depends on MEDIA_CAMERA_SUPPORT
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
help
This is a V4L2 sensor driver for Samsung S5K6AA(FX) 1.3M
@@ -970,7 +960,6 @@ config VIDEO_S5K6AA
config VIDEO_S5K6A3
tristate "Samsung S5K6A3 sensor support"
- depends on MEDIA_CAMERA_SUPPORT
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
help
This is a V4L2 sensor driver for Samsung S5K6A3 raw
@@ -1002,12 +991,15 @@ config VIDEO_S5C73M3
help
This is a V4L2 sensor driver for Samsung S5C73M3
8 Mpixel camera.
+endif
comment "Lens drivers"
+if MEDIA_CAMERA_SUPPORT
+
config VIDEO_AD5820
tristate "AD5820 lens voice coil support"
- depends on I2C && VIDEO_V4L2 && MEDIA_CONTROLLER
+ depends on GPIOLIB && I2C && VIDEO_V4L2 && MEDIA_CONTROLLER
help
This is a driver for the AD5820 camera lens voice coil.
It is used for example in Nokia N900 (RX-51).
@@ -1042,12 +1034,15 @@ config VIDEO_DW9807_VCM
capability. This is designed for linear control of
voice coil motors, controlled via I2C serial interface.
+endif
+
comment "Flash devices"
+if MEDIA_CAMERA_SUPPORT
+
config VIDEO_ADP1653
tristate "ADP1653 flash support"
depends on I2C && VIDEO_V4L2 && MEDIA_CONTROLLER
- depends on MEDIA_CAMERA_SUPPORT
help
This is a driver for the ADP1653 flash controller. It is used for
example in Nokia N900.
@@ -1055,7 +1050,6 @@ config VIDEO_ADP1653
config VIDEO_LM3560
tristate "LM3560 dual flash driver support"
depends on I2C && VIDEO_V4L2 && MEDIA_CONTROLLER
- depends on MEDIA_CAMERA_SUPPORT
select REGMAP_I2C
help
This is a driver for the lm3560 dual flash controllers. It controls
@@ -1064,12 +1058,13 @@ config VIDEO_LM3560
config VIDEO_LM3646
tristate "LM3646 dual flash driver support"
depends on I2C && VIDEO_V4L2 && MEDIA_CONTROLLER
- depends on MEDIA_CAMERA_SUPPORT
select REGMAP_I2C
help
This is a driver for the lm3646 dual flash controllers. It controls
flash, torch LEDs.
+endif
+
comment "Video improvement chips"
config VIDEO_UPD64031A
@@ -1113,6 +1108,7 @@ comment "SDR tuner chips"
config SDR_MAX2175
tristate "Maxim 2175 RF to Bits tuner"
depends on VIDEO_V4L2 && MEDIA_SDR_SUPPORT && I2C
+ select REGMAP_I2C
help
Support for Maxim 2175 tuner. It is an advanced analog/digital
radio receiver with RF-to-Bits front-end designed for SDR solutions.
diff --git a/drivers/media/i2c/Makefile b/drivers/media/i2c/Makefile
index beb170b002dc..c147bb9d28db 100644
--- a/drivers/media/i2c/Makefile
+++ b/drivers/media/i2c/Makefile
@@ -109,9 +109,11 @@ obj-$(CONFIG_VIDEO_I2C) += video-i2c.o
obj-$(CONFIG_VIDEO_ML86V7667) += ml86v7667.o
obj-$(CONFIG_VIDEO_OV2659) += ov2659.o
obj-$(CONFIG_VIDEO_TC358743) += tc358743.o
+obj-$(CONFIG_VIDEO_HI556) += hi556.o
obj-$(CONFIG_VIDEO_IMX214) += imx214.o
obj-$(CONFIG_VIDEO_IMX258) += imx258.o
obj-$(CONFIG_VIDEO_IMX274) += imx274.o
+obj-$(CONFIG_VIDEO_IMX290) += imx290.o
obj-$(CONFIG_VIDEO_IMX319) += imx319.o
obj-$(CONFIG_VIDEO_IMX355) += imx355.o
obj-$(CONFIG_VIDEO_ST_MIPID02) += st-mipid02.o
diff --git a/drivers/media/i2c/ad5820.c b/drivers/media/i2c/ad5820.c
index 925c171e7797..19c74db0649f 100644
--- a/drivers/media/i2c/ad5820.c
+++ b/drivers/media/i2c/ad5820.c
@@ -19,13 +19,12 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/regulator/consumer.h>
+#include <linux/gpio/consumer.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
#include <media/v4l2-subdev.h>
-#define AD5820_NAME "ad5820"
-
/* Register definitions */
#define AD5820_POWER_DOWN (1 << 15)
#define AD5820_DAC_SHIFT 4
@@ -47,6 +46,8 @@ struct ad5820_device {
u32 focus_ramp_time;
u32 focus_ramp_mode;
+ struct gpio_desc *enable_gpio;
+
struct mutex power_lock;
int power_count;
@@ -114,6 +115,8 @@ static int ad5820_power_off(struct ad5820_device *coil, bool standby)
ret = ad5820_update_hw(coil);
}
+ gpiod_set_value_cansleep(coil->enable_gpio, 0);
+
ret2 = regulator_disable(coil->vana);
if (ret)
return ret;
@@ -128,6 +131,8 @@ static int ad5820_power_on(struct ad5820_device *coil, bool restore)
if (ret < 0)
return ret;
+ gpiod_set_value_cansleep(coil->enable_gpio, 1);
+
if (restore) {
/* Restore the hardware settings. */
coil->standby = false;
@@ -138,6 +143,7 @@ static int ad5820_power_on(struct ad5820_device *coil, bool restore)
return 0;
fail:
+ gpiod_set_value_cansleep(coil->enable_gpio, 0);
coil->standby = true;
regulator_disable(coil->vana);
@@ -304,11 +310,21 @@ static int ad5820_probe(struct i2c_client *client,
return ret;
}
+ coil->enable_gpio = devm_gpiod_get_optional(&client->dev, "enable",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(coil->enable_gpio)) {
+ ret = PTR_ERR(coil->enable_gpio);
+ if (ret != -EPROBE_DEFER)
+ dev_err(&client->dev, "could not get enable gpio\n");
+ return ret;
+ }
+
mutex_init(&coil->power_lock);
v4l2_i2c_subdev_init(&coil->subdev, client, &ad5820_ops);
coil->subdev.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
coil->subdev.internal_ops = &ad5820_internal_ops;
+ coil->subdev.entity.function = MEDIA_ENT_F_LENS;
strscpy(coil->subdev.name, "ad5820 focus", sizeof(coil->subdev.name));
ret = media_entity_pads_init(&coil->subdev.entity, 0, NULL);
@@ -341,17 +357,28 @@ static int ad5820_remove(struct i2c_client *client)
}
static const struct i2c_device_id ad5820_id_table[] = {
- { AD5820_NAME, 0 },
+ { "ad5820", 0 },
+ { "ad5821", 0 },
+ { "ad5823", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, ad5820_id_table);
+static const struct of_device_id ad5820_of_table[] = {
+ { .compatible = "adi,ad5820" },
+ { .compatible = "adi,ad5821" },
+ { .compatible = "adi,ad5823" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ad5820_of_table);
+
static SIMPLE_DEV_PM_OPS(ad5820_pm, ad5820_suspend, ad5820_resume);
static struct i2c_driver ad5820_i2c_driver = {
.driver = {
- .name = AD5820_NAME,
+ .name = "ad5820",
.pm = &ad5820_pm,
+ .of_match_table = ad5820_of_table,
},
.probe = ad5820_probe,
.remove = ad5820_remove,
diff --git a/drivers/media/i2c/adv7180.c b/drivers/media/i2c/adv7180.c
index e780969cc2f2..6528e2343fc8 100644
--- a/drivers/media/i2c/adv7180.c
+++ b/drivers/media/i2c/adv7180.c
@@ -1309,9 +1309,6 @@ static int adv7180_probe(struct i2c_client *client,
if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
return -EIO;
- v4l_info(client, "chip found @ 0x%02x (%s)\n",
- client->addr, client->adapter->name);
-
state = devm_kzalloc(&client->dev, sizeof(*state), GFP_KERNEL);
if (state == NULL)
return -ENOMEM;
@@ -1382,6 +1379,9 @@ static int adv7180_probe(struct i2c_client *client,
if (ret)
goto err_free_irq;
+ v4l_info(client, "chip found @ 0x%02x (%s)\n",
+ client->addr, client->adapter->name);
+
return 0;
err_free_irq:
diff --git a/drivers/media/i2c/adv7842.c b/drivers/media/i2c/adv7842.c
index 885619841719..0855f648416d 100644
--- a/drivers/media/i2c/adv7842.c
+++ b/drivers/media/i2c/adv7842.c
@@ -2547,7 +2547,7 @@ struct adv7842_cfg_read_infoframe {
u8 payload_addr;
};
-static void log_infoframe(struct v4l2_subdev *sd, struct adv7842_cfg_read_infoframe *cri)
+static void log_infoframe(struct v4l2_subdev *sd, const struct adv7842_cfg_read_infoframe *cri)
{
int i;
u8 buffer[32];
@@ -2585,7 +2585,7 @@ static void log_infoframe(struct v4l2_subdev *sd, struct adv7842_cfg_read_infofr
static void adv7842_log_infoframes(struct v4l2_subdev *sd)
{
int i;
- struct adv7842_cfg_read_infoframe cri[] = {
+ static const struct adv7842_cfg_read_infoframe cri[] = {
{ "AVI", 0x01, 0xe0, 0x00 },
{ "Audio", 0x02, 0xe3, 0x1c },
{ "SDP", 0x04, 0xe6, 0x2a },
diff --git a/drivers/media/i2c/bt819.c b/drivers/media/i2c/bt819.c
index 43336175c7d9..73bc50c919d7 100644
--- a/drivers/media/i2c/bt819.c
+++ b/drivers/media/i2c/bt819.c
@@ -157,7 +157,7 @@ static int bt819_init(struct v4l2_subdev *sd)
0x12, 0x04, /* 0x12 Output Format */
0x13, 0x20, /* 0x13 Vertical Scaling msb 0x00
chroma comb OFF, line drop scaling, interlace scaling
- BUG? Why does turning the chroma comb on fuck up color?
+ BUG? Why does turning the chroma comb on screw up color?
Bug in the bt819 stepping on my board?
*/
0x14, 0x00, /* 0x14 Vertical Scaling lsb */
diff --git a/drivers/media/i2c/hi556.c b/drivers/media/i2c/hi556.c
new file mode 100644
index 000000000000..c66cd1446c0f
--- /dev/null
+++ b/drivers/media/i2c/hi556.c
@@ -0,0 +1,1200 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2019 Intel Corporation.
+
+#include <asm/unaligned.h>
+#include <linux/acpi.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-fwnode.h>
+
+#define HI556_REG_VALUE_08BIT 1
+#define HI556_REG_VALUE_16BIT 2
+#define HI556_REG_VALUE_24BIT 3
+
+#define HI556_LINK_FREQ_437MHZ 437000000ULL
+#define HI556_MCLK 19200000
+#define HI556_DATA_LANES 2
+#define HI556_RGB_DEPTH 10
+
+#define HI556_REG_CHIP_ID 0x0f16
+#define HI556_CHIP_ID 0x0556
+
+#define HI556_REG_MODE_SELECT 0x0a00
+#define HI556_MODE_STANDBY 0x0000
+#define HI556_MODE_STREAMING 0x0100
+
+/* vertical-timings from sensor */
+#define HI556_REG_FLL 0x0006
+#define HI556_FLL_30FPS 0x0814
+#define HI556_FLL_30FPS_MIN 0x0814
+#define HI556_FLL_MAX 0x7fff
+
+/* horizontal-timings from sensor */
+#define HI556_REG_LLP 0x0008
+
+/* Exposure controls from sensor */
+#define HI556_REG_EXPOSURE 0x0074
+#define HI556_EXPOSURE_MIN 6
+#define HI556_EXPOSURE_MAX_MARGIN 2
+#define HI556_EXPOSURE_STEP 1
+
+/* Analog gain controls from sensor */
+#define HI556_REG_ANALOG_GAIN 0x0077
+#define HI556_ANAL_GAIN_MIN 0
+#define HI556_ANAL_GAIN_MAX 240
+#define HI556_ANAL_GAIN_STEP 1
+
+/* Digital gain controls from sensor */
+#define HI556_REG_MWB_GR_GAIN 0x0078
+#define HI556_REG_MWB_GB_GAIN 0x007a
+#define HI556_REG_MWB_R_GAIN 0x007c
+#define HI556_REG_MWB_B_GAIN 0x007e
+#define HI556_DGTL_GAIN_MIN 0
+#define HI556_DGTL_GAIN_MAX 2048
+#define HI556_DGTL_GAIN_STEP 1
+#define HI556_DGTL_GAIN_DEFAULT 256
+
+/* Test Pattern Control */
+#define HI556_REG_ISP 0X0a05
+#define HI556_REG_ISP_TPG_EN 0x01
+#define HI556_REG_TEST_PATTERN 0x0201
+
+enum {
+ HI556_LINK_FREQ_437MHZ_INDEX,
+};
+
+struct hi556_reg {
+ u16 address;
+ u16 val;
+};
+
+struct hi556_reg_list {
+ u32 num_of_regs;
+ const struct hi556_reg *regs;
+};
+
+struct hi556_link_freq_config {
+ const struct hi556_reg_list reg_list;
+};
+
+struct hi556_mode {
+ /* Frame width in pixels */
+ u32 width;
+
+ /* Frame height in pixels */
+ u32 height;
+
+ /* Horizontal timining size */
+ u32 llp;
+
+ /* Default vertical timining size */
+ u32 fll_def;
+
+ /* Min vertical timining size */
+ u32 fll_min;
+
+ /* Link frequency needed for this resolution */
+ u32 link_freq_index;
+
+ /* Sensor register settings for this resolution */
+ const struct hi556_reg_list reg_list;
+};
+
+#define to_hi556(_sd) container_of(_sd, struct hi556, sd)
+
+//SENSOR_INITIALIZATION
+static const struct hi556_reg mipi_data_rate_874mbps[] = {
+ {0x0e00, 0x0102},
+ {0x0e02, 0x0102},
+ {0x0e0c, 0x0100},
+ {0x2000, 0x7400},
+ {0x2002, 0x001c},
+ {0x2004, 0x0242},
+ {0x2006, 0x0942},
+ {0x2008, 0x7007},
+ {0x200a, 0x0fd9},
+ {0x200c, 0x0259},
+ {0x200e, 0x7008},
+ {0x2010, 0x160e},
+ {0x2012, 0x0047},
+ {0x2014, 0x2118},
+ {0x2016, 0x0041},
+ {0x2018, 0x00d8},
+ {0x201a, 0x0145},
+ {0x201c, 0x0006},
+ {0x201e, 0x0181},
+ {0x2020, 0x13cc},
+ {0x2022, 0x2057},
+ {0x2024, 0x7001},
+ {0x2026, 0x0fca},
+ {0x2028, 0x00cb},
+ {0x202a, 0x009f},
+ {0x202c, 0x7002},
+ {0x202e, 0x13cc},
+ {0x2030, 0x019b},
+ {0x2032, 0x014d},
+ {0x2034, 0x2987},
+ {0x2036, 0x2766},
+ {0x2038, 0x0020},
+ {0x203a, 0x2060},
+ {0x203c, 0x0e5d},
+ {0x203e, 0x181d},
+ {0x2040, 0x2066},
+ {0x2042, 0x20c4},
+ {0x2044, 0x5000},
+ {0x2046, 0x0005},
+ {0x2048, 0x0000},
+ {0x204a, 0x01db},
+ {0x204c, 0x025a},
+ {0x204e, 0x00c0},
+ {0x2050, 0x0005},
+ {0x2052, 0x0006},
+ {0x2054, 0x0ad9},
+ {0x2056, 0x0259},
+ {0x2058, 0x0618},
+ {0x205a, 0x0258},
+ {0x205c, 0x2266},
+ {0x205e, 0x20c8},
+ {0x2060, 0x2060},
+ {0x2062, 0x707b},
+ {0x2064, 0x0fdd},
+ {0x2066, 0x81b8},
+ {0x2068, 0x5040},
+ {0x206a, 0x0020},
+ {0x206c, 0x5060},
+ {0x206e, 0x3143},
+ {0x2070, 0x5081},
+ {0x2072, 0x025c},
+ {0x2074, 0x7800},
+ {0x2076, 0x7400},
+ {0x2078, 0x001c},
+ {0x207a, 0x0242},
+ {0x207c, 0x0942},
+ {0x207e, 0x0bd9},
+ {0x2080, 0x0259},
+ {0x2082, 0x7008},
+ {0x2084, 0x160e},
+ {0x2086, 0x0047},
+ {0x2088, 0x2118},
+ {0x208a, 0x0041},
+ {0x208c, 0x00d8},
+ {0x208e, 0x0145},
+ {0x2090, 0x0006},
+ {0x2092, 0x0181},
+ {0x2094, 0x13cc},
+ {0x2096, 0x2057},
+ {0x2098, 0x7001},
+ {0x209a, 0x0fca},
+ {0x209c, 0x00cb},
+ {0x209e, 0x009f},
+ {0x20a0, 0x7002},
+ {0x20a2, 0x13cc},
+ {0x20a4, 0x019b},
+ {0x20a6, 0x014d},
+ {0x20a8, 0x2987},
+ {0x20aa, 0x2766},
+ {0x20ac, 0x0020},
+ {0x20ae, 0x2060},
+ {0x20b0, 0x0e5d},
+ {0x20b2, 0x181d},
+ {0x20b4, 0x2066},
+ {0x20b6, 0x20c4},
+ {0x20b8, 0x50a0},
+ {0x20ba, 0x0005},
+ {0x20bc, 0x0000},
+ {0x20be, 0x01db},
+ {0x20c0, 0x025a},
+ {0x20c2, 0x00c0},
+ {0x20c4, 0x0005},
+ {0x20c6, 0x0006},
+ {0x20c8, 0x0ad9},
+ {0x20ca, 0x0259},
+ {0x20cc, 0x0618},
+ {0x20ce, 0x0258},
+ {0x20d0, 0x2266},
+ {0x20d2, 0x20c8},
+ {0x20d4, 0x2060},
+ {0x20d6, 0x707b},
+ {0x20d8, 0x0fdd},
+ {0x20da, 0x86b8},
+ {0x20dc, 0x50e0},
+ {0x20de, 0x0020},
+ {0x20e0, 0x5100},
+ {0x20e2, 0x3143},
+ {0x20e4, 0x5121},
+ {0x20e6, 0x7800},
+ {0x20e8, 0x3140},
+ {0x20ea, 0x01c4},
+ {0x20ec, 0x01c1},
+ {0x20ee, 0x01c0},
+ {0x20f0, 0x01c4},
+ {0x20f2, 0x2700},
+ {0x20f4, 0x3d40},
+ {0x20f6, 0x7800},
+ {0x20f8, 0xffff},
+ {0x27fe, 0xe000},
+ {0x3000, 0x60f8},
+ {0x3002, 0x187f},
+ {0x3004, 0x7060},
+ {0x3006, 0x0114},
+ {0x3008, 0x60b0},
+ {0x300a, 0x1473},
+ {0x300c, 0x0013},
+ {0x300e, 0x140f},
+ {0x3010, 0x0040},
+ {0x3012, 0x100f},
+ {0x3014, 0x60f8},
+ {0x3016, 0x187f},
+ {0x3018, 0x7060},
+ {0x301a, 0x0114},
+ {0x301c, 0x60b0},
+ {0x301e, 0x1473},
+ {0x3020, 0x0013},
+ {0x3022, 0x140f},
+ {0x3024, 0x0040},
+ {0x3026, 0x000f},
+
+ {0x0b00, 0x0000},
+ {0x0b02, 0x0045},
+ {0x0b04, 0xb405},
+ {0x0b06, 0xc403},
+ {0x0b08, 0x0081},
+ {0x0b0a, 0x8252},
+ {0x0b0c, 0xf814},
+ {0x0b0e, 0xc618},
+ {0x0b10, 0xa828},
+ {0x0b12, 0x004c},
+ {0x0b14, 0x4068},
+ {0x0b16, 0x0000},
+ {0x0f30, 0x5b15},
+ {0x0f32, 0x7067},
+ {0x0954, 0x0009},
+ {0x0956, 0x0000},
+ {0x0958, 0xbb80},
+ {0x095a, 0x5140},
+ {0x0c00, 0x1110},
+ {0x0c02, 0x0011},
+ {0x0c04, 0x0000},
+ {0x0c06, 0x0200},
+ {0x0c10, 0x0040},
+ {0x0c12, 0x0040},
+ {0x0c14, 0x0040},
+ {0x0c16, 0x0040},
+ {0x0a10, 0x4000},
+ {0x3068, 0xf800},
+ {0x306a, 0xf876},
+ {0x006c, 0x0000},
+ {0x005e, 0x0200},
+ {0x000e, 0x0100},
+ {0x0e0a, 0x0001},
+ {0x004a, 0x0100},
+ {0x004c, 0x0000},
+ {0x004e, 0x0100},
+ {0x000c, 0x0022},
+ {0x0008, 0x0b00},
+ {0x005a, 0x0202},
+ {0x0012, 0x000e},
+ {0x0018, 0x0a33},
+ {0x0022, 0x0008},
+ {0x0028, 0x0017},
+ {0x0024, 0x0028},
+ {0x002a, 0x002d},
+ {0x0026, 0x0030},
+ {0x002c, 0x07c9},
+ {0x002e, 0x1111},
+ {0x0030, 0x1111},
+ {0x0032, 0x1111},
+ {0x0006, 0x07bc},
+ {0x0a22, 0x0000},
+ {0x0a12, 0x0a20},
+ {0x0a14, 0x0798},
+ {0x003e, 0x0000},
+ {0x0074, 0x080e},
+ {0x0070, 0x0407},
+ {0x0002, 0x0000},
+ {0x0a02, 0x0100},
+ {0x0a24, 0x0100},
+ {0x0046, 0x0000},
+ {0x0076, 0x0000},
+ {0x0060, 0x0000},
+ {0x0062, 0x0530},
+ {0x0064, 0x0500},
+ {0x0066, 0x0530},
+ {0x0068, 0x0500},
+ {0x0122, 0x0300},
+ {0x015a, 0xff08},
+ {0x0804, 0x0300},
+ {0x0806, 0x0100},
+ {0x005c, 0x0102},
+ {0x0a1a, 0x0800},
+};
+
+static const struct hi556_reg mode_2592x1944_regs[] = {
+ {0x0a00, 0x0000},
+ {0x0b0a, 0x8252},
+ {0x0f30, 0x5b15},
+ {0x0f32, 0x7067},
+ {0x004a, 0x0100},
+ {0x004c, 0x0000},
+ {0x004e, 0x0100},
+ {0x000c, 0x0022},
+ {0x0008, 0x0b00},
+ {0x005a, 0x0202},
+ {0x0012, 0x000e},
+ {0x0018, 0x0a33},
+ {0x0022, 0x0008},
+ {0x0028, 0x0017},
+ {0x0024, 0x0028},
+ {0x002a, 0x002d},
+ {0x0026, 0x0030},
+ {0x002c, 0x07c9},
+ {0x002e, 0x1111},
+ {0x0030, 0x1111},
+ {0x0032, 0x1111},
+ {0x0006, 0x0814},
+ {0x0a22, 0x0000},
+ {0x0a12, 0x0a20},
+ {0x0a14, 0x0798},
+ {0x003e, 0x0000},
+ {0x0074, 0x0812},
+ {0x0070, 0x0409},
+ {0x0804, 0x0300},
+ {0x0806, 0x0100},
+ {0x0a04, 0x014a},
+ {0x090c, 0x0fdc},
+ {0x090e, 0x002d},
+
+ {0x0902, 0x4319},
+ {0x0914, 0xc10a},
+ {0x0916, 0x071f},
+ {0x0918, 0x0408},
+ {0x091a, 0x0c0d},
+ {0x091c, 0x0f09},
+ {0x091e, 0x0a00},
+ {0x0958, 0xbb80},
+};
+
+static const struct hi556_reg mode_1296x972_regs[] = {
+ {0x0a00, 0x0000},
+ {0x0b0a, 0x8259},
+ {0x0f30, 0x5b15},
+ {0x0f32, 0x7167},
+ {0x004a, 0x0100},
+ {0x004c, 0x0000},
+ {0x004e, 0x0100},
+ {0x000c, 0x0122},
+ {0x0008, 0x0b00},
+ {0x005a, 0x0404},
+ {0x0012, 0x000c},
+ {0x0018, 0x0a33},
+ {0x0022, 0x0008},
+ {0x0028, 0x0017},
+ {0x0024, 0x0022},
+ {0x002a, 0x002b},
+ {0x0026, 0x0030},
+ {0x002c, 0x07c9},
+ {0x002e, 0x3311},
+ {0x0030, 0x3311},
+ {0x0032, 0x3311},
+ {0x0006, 0x0814},
+ {0x0a22, 0x0000},
+ {0x0a12, 0x0510},
+ {0x0a14, 0x03cc},
+ {0x003e, 0x0000},
+ {0x0074, 0x0812},
+ {0x0070, 0x0409},
+ {0x0804, 0x0308},
+ {0x0806, 0x0100},
+ {0x0a04, 0x016a},
+ {0x090e, 0x0010},
+ {0x090c, 0x09c0},
+
+ {0x0902, 0x4319},
+ {0x0914, 0xc106},
+ {0x0916, 0x040e},
+ {0x0918, 0x0304},
+ {0x091a, 0x0708},
+ {0x091c, 0x0e06},
+ {0x091e, 0x0300},
+ {0x0958, 0xbb80},
+};
+
+static const char * const hi556_test_pattern_menu[] = {
+ "Disabled",
+ "Solid Colour",
+ "100% Colour Bars",
+ "Fade To Grey Colour Bars",
+ "PN9",
+ "Gradient Horizontal",
+ "Gradient Vertical",
+ "Check Board",
+ "Slant Pattern",
+};
+
+static const s64 link_freq_menu_items[] = {
+ HI556_LINK_FREQ_437MHZ,
+};
+
+static const struct hi556_link_freq_config link_freq_configs[] = {
+ [HI556_LINK_FREQ_437MHZ_INDEX] = {
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(mipi_data_rate_874mbps),
+ .regs = mipi_data_rate_874mbps,
+ }
+ }
+};
+
+static const struct hi556_mode supported_modes[] = {
+ {
+ .width = 2592,
+ .height = 1944,
+ .fll_def = HI556_FLL_30FPS,
+ .fll_min = HI556_FLL_30FPS_MIN,
+ .llp = 0x0b00,
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(mode_2592x1944_regs),
+ .regs = mode_2592x1944_regs,
+ },
+ .link_freq_index = HI556_LINK_FREQ_437MHZ_INDEX,
+ },
+ {
+ .width = 1296,
+ .height = 972,
+ .fll_def = HI556_FLL_30FPS,
+ .fll_min = HI556_FLL_30FPS_MIN,
+ .llp = 0x0b00,
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(mode_1296x972_regs),
+ .regs = mode_1296x972_regs,
+ },
+ .link_freq_index = HI556_LINK_FREQ_437MHZ_INDEX,
+ }
+};
+
+struct hi556 {
+ struct v4l2_subdev sd;
+ struct media_pad pad;
+ struct v4l2_ctrl_handler ctrl_handler;
+
+ /* V4L2 Controls */
+ struct v4l2_ctrl *link_freq;
+ struct v4l2_ctrl *pixel_rate;
+ struct v4l2_ctrl *vblank;
+ struct v4l2_ctrl *hblank;
+ struct v4l2_ctrl *exposure;
+
+ /* Current mode */
+ const struct hi556_mode *cur_mode;
+
+ /* To serialize asynchronus callbacks */
+ struct mutex mutex;
+
+ /* Streaming on/off */
+ bool streaming;
+};
+
+static u64 to_pixel_rate(u32 f_index)
+{
+ u64 pixel_rate = link_freq_menu_items[f_index] * 2 * HI556_DATA_LANES;
+
+ do_div(pixel_rate, HI556_RGB_DEPTH);
+
+ return pixel_rate;
+}
+
+static int hi556_read_reg(struct hi556 *hi556, u16 reg, u16 len, u32 *val)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&hi556->sd);
+ struct i2c_msg msgs[2];
+ u8 addr_buf[2];
+ u8 data_buf[4] = {0};
+ int ret;
+
+ if (len > 4)
+ return -EINVAL;
+
+ put_unaligned_be16(reg, addr_buf);
+ msgs[0].addr = client->addr;
+ msgs[0].flags = 0;
+ msgs[0].len = sizeof(addr_buf);
+ msgs[0].buf = addr_buf;
+ msgs[1].addr = client->addr;
+ msgs[1].flags = I2C_M_RD;
+ msgs[1].len = len;
+ msgs[1].buf = &data_buf[4 - len];
+
+ ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+ if (ret != ARRAY_SIZE(msgs))
+ return -EIO;
+
+ *val = get_unaligned_be32(data_buf);
+
+ return 0;
+}
+
+static int hi556_write_reg(struct hi556 *hi556, u16 reg, u16 len, u32 val)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&hi556->sd);
+ u8 buf[6];
+
+ if (len > 4)
+ return -EINVAL;
+
+ put_unaligned_be16(reg, buf);
+ put_unaligned_be32(val << 8 * (4 - len), buf + 2);
+ if (i2c_master_send(client, buf, len + 2) != len + 2)
+ return -EIO;
+
+ return 0;
+}
+
+static int hi556_write_reg_list(struct hi556 *hi556,
+ const struct hi556_reg_list *r_list)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&hi556->sd);
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < r_list->num_of_regs; i++) {
+ ret = hi556_write_reg(hi556, r_list->regs[i].address,
+ HI556_REG_VALUE_16BIT,
+ r_list->regs[i].val);
+ if (ret) {
+ dev_err_ratelimited(&client->dev,
+ "failed to write reg 0x%4.4x. error = %d",
+ r_list->regs[i].address, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int hi556_update_digital_gain(struct hi556 *hi556, u32 d_gain)
+{
+ int ret;
+
+ ret = hi556_write_reg(hi556, HI556_REG_MWB_GR_GAIN,
+ HI556_REG_VALUE_16BIT, d_gain);
+ if (ret)
+ return ret;
+
+ ret = hi556_write_reg(hi556, HI556_REG_MWB_GB_GAIN,
+ HI556_REG_VALUE_16BIT, d_gain);
+ if (ret)
+ return ret;
+
+ ret = hi556_write_reg(hi556, HI556_REG_MWB_R_GAIN,
+ HI556_REG_VALUE_16BIT, d_gain);
+ if (ret)
+ return ret;
+
+ return hi556_write_reg(hi556, HI556_REG_MWB_B_GAIN,
+ HI556_REG_VALUE_16BIT, d_gain);
+}
+
+static int hi556_test_pattern(struct hi556 *hi556, u32 pattern)
+{
+ int ret;
+ u32 val;
+
+ if (pattern) {
+ ret = hi556_read_reg(hi556, HI556_REG_ISP,
+ HI556_REG_VALUE_08BIT, &val);
+ if (ret)
+ return ret;
+
+ ret = hi556_write_reg(hi556, HI556_REG_ISP,
+ HI556_REG_VALUE_08BIT,
+ val | HI556_REG_ISP_TPG_EN);
+ if (ret)
+ return ret;
+ }
+
+ return hi556_write_reg(hi556, HI556_REG_TEST_PATTERN,
+ HI556_REG_VALUE_08BIT, pattern);
+}
+
+static int hi556_set_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct hi556 *hi556 = container_of(ctrl->handler,
+ struct hi556, ctrl_handler);
+ struct i2c_client *client = v4l2_get_subdevdata(&hi556->sd);
+ s64 exposure_max;
+ int ret = 0;
+
+ /* Propagate change of current control to all related controls */
+ if (ctrl->id == V4L2_CID_VBLANK) {
+ /* Update max exposure while meeting expected vblanking */
+ exposure_max = hi556->cur_mode->height + ctrl->val -
+ HI556_EXPOSURE_MAX_MARGIN;
+ __v4l2_ctrl_modify_range(hi556->exposure,
+ hi556->exposure->minimum,
+ exposure_max, hi556->exposure->step,
+ exposure_max);
+ }
+
+ /* V4L2 controls values will be applied only when power is already up */
+ if (!pm_runtime_get_if_in_use(&client->dev))
+ return 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_ANALOGUE_GAIN:
+ ret = hi556_write_reg(hi556, HI556_REG_ANALOG_GAIN,
+ HI556_REG_VALUE_16BIT, ctrl->val);
+ break;
+
+ case V4L2_CID_DIGITAL_GAIN:
+ ret = hi556_update_digital_gain(hi556, ctrl->val);
+ break;
+
+ case V4L2_CID_EXPOSURE:
+ ret = hi556_write_reg(hi556, HI556_REG_EXPOSURE,
+ HI556_REG_VALUE_16BIT, ctrl->val);
+ break;
+
+ case V4L2_CID_VBLANK:
+ /* Update FLL that meets expected vertical blanking */
+ ret = hi556_write_reg(hi556, HI556_REG_FLL,
+ HI556_REG_VALUE_16BIT,
+ hi556->cur_mode->height + ctrl->val);
+ break;
+
+ case V4L2_CID_TEST_PATTERN:
+ ret = hi556_test_pattern(hi556, ctrl->val);
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ pm_runtime_put(&client->dev);
+
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops hi556_ctrl_ops = {
+ .s_ctrl = hi556_set_ctrl,
+};
+
+static int hi556_init_controls(struct hi556 *hi556)
+{
+ struct v4l2_ctrl_handler *ctrl_hdlr;
+ s64 exposure_max, h_blank;
+ int ret;
+
+ ctrl_hdlr = &hi556->ctrl_handler;
+ ret = v4l2_ctrl_handler_init(ctrl_hdlr, 8);
+ if (ret)
+ return ret;
+
+ ctrl_hdlr->lock = &hi556->mutex;
+ hi556->link_freq = v4l2_ctrl_new_int_menu(ctrl_hdlr, &hi556_ctrl_ops,
+ V4L2_CID_LINK_FREQ,
+ ARRAY_SIZE(link_freq_menu_items) - 1,
+ 0, link_freq_menu_items);
+ if (hi556->link_freq)
+ hi556->link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ hi556->pixel_rate = v4l2_ctrl_new_std
+ (ctrl_hdlr, &hi556_ctrl_ops,
+ V4L2_CID_PIXEL_RATE, 0,
+ to_pixel_rate(HI556_LINK_FREQ_437MHZ_INDEX),
+ 1,
+ to_pixel_rate(HI556_LINK_FREQ_437MHZ_INDEX));
+ hi556->vblank = v4l2_ctrl_new_std(ctrl_hdlr, &hi556_ctrl_ops,
+ V4L2_CID_VBLANK,
+ hi556->cur_mode->fll_min -
+ hi556->cur_mode->height,
+ HI556_FLL_MAX -
+ hi556->cur_mode->height, 1,
+ hi556->cur_mode->fll_def -
+ hi556->cur_mode->height);
+
+ h_blank = hi556->cur_mode->llp - hi556->cur_mode->width;
+
+ hi556->hblank = v4l2_ctrl_new_std(ctrl_hdlr, &hi556_ctrl_ops,
+ V4L2_CID_HBLANK, h_blank, h_blank, 1,
+ h_blank);
+ if (hi556->hblank)
+ hi556->hblank->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ v4l2_ctrl_new_std(ctrl_hdlr, &hi556_ctrl_ops, V4L2_CID_ANALOGUE_GAIN,
+ HI556_ANAL_GAIN_MIN, HI556_ANAL_GAIN_MAX,
+ HI556_ANAL_GAIN_STEP, HI556_ANAL_GAIN_MIN);
+ v4l2_ctrl_new_std(ctrl_hdlr, &hi556_ctrl_ops, V4L2_CID_DIGITAL_GAIN,
+ HI556_DGTL_GAIN_MIN, HI556_DGTL_GAIN_MAX,
+ HI556_DGTL_GAIN_STEP, HI556_DGTL_GAIN_DEFAULT);
+ exposure_max = hi556->cur_mode->fll_def - HI556_EXPOSURE_MAX_MARGIN;
+ hi556->exposure = v4l2_ctrl_new_std(ctrl_hdlr, &hi556_ctrl_ops,
+ V4L2_CID_EXPOSURE,
+ HI556_EXPOSURE_MIN, exposure_max,
+ HI556_EXPOSURE_STEP,
+ exposure_max);
+ v4l2_ctrl_new_std_menu_items(ctrl_hdlr, &hi556_ctrl_ops,
+ V4L2_CID_TEST_PATTERN,
+ ARRAY_SIZE(hi556_test_pattern_menu) - 1,
+ 0, 0, hi556_test_pattern_menu);
+ if (ctrl_hdlr->error)
+ return ctrl_hdlr->error;
+
+ hi556->sd.ctrl_handler = ctrl_hdlr;
+
+ return 0;
+}
+
+static void hi556_assign_pad_format(const struct hi556_mode *mode,
+ struct v4l2_mbus_framefmt *fmt)
+{
+ fmt->width = mode->width;
+ fmt->height = mode->height;
+ fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10;
+ fmt->field = V4L2_FIELD_NONE;
+}
+
+static int hi556_start_streaming(struct hi556 *hi556)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&hi556->sd);
+ const struct hi556_reg_list *reg_list;
+ int link_freq_index, ret;
+
+ link_freq_index = hi556->cur_mode->link_freq_index;
+ reg_list = &link_freq_configs[link_freq_index].reg_list;
+ ret = hi556_write_reg_list(hi556, reg_list);
+ if (ret) {
+ dev_err(&client->dev, "failed to set plls");
+ return ret;
+ }
+
+ reg_list = &hi556->cur_mode->reg_list;
+ ret = hi556_write_reg_list(hi556, reg_list);
+ if (ret) {
+ dev_err(&client->dev, "failed to set mode");
+ return ret;
+ }
+
+ ret = __v4l2_ctrl_handler_setup(hi556->sd.ctrl_handler);
+ if (ret)
+ return ret;
+
+ ret = hi556_write_reg(hi556, HI556_REG_MODE_SELECT,
+ HI556_REG_VALUE_16BIT, HI556_MODE_STREAMING);
+
+ if (ret) {
+ dev_err(&client->dev, "failed to set stream");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void hi556_stop_streaming(struct hi556 *hi556)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&hi556->sd);
+
+ if (hi556_write_reg(hi556, HI556_REG_MODE_SELECT,
+ HI556_REG_VALUE_16BIT, HI556_MODE_STANDBY))
+ dev_err(&client->dev, "failed to set stream");
+}
+
+static int hi556_set_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct hi556 *hi556 = to_hi556(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret = 0;
+
+ if (hi556->streaming == enable)
+ return 0;
+
+ mutex_lock(&hi556->mutex);
+ if (enable) {
+ ret = pm_runtime_get_sync(&client->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(&client->dev);
+ mutex_unlock(&hi556->mutex);
+ return ret;
+ }
+
+ ret = hi556_start_streaming(hi556);
+ if (ret) {
+ enable = 0;
+ hi556_stop_streaming(hi556);
+ pm_runtime_put(&client->dev);
+ }
+ } else {
+ hi556_stop_streaming(hi556);
+ pm_runtime_put(&client->dev);
+ }
+
+ hi556->streaming = enable;
+ mutex_unlock(&hi556->mutex);
+
+ return ret;
+}
+
+static int __maybe_unused hi556_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct hi556 *hi556 = to_hi556(sd);
+
+ mutex_lock(&hi556->mutex);
+ if (hi556->streaming)
+ hi556_stop_streaming(hi556);
+
+ mutex_unlock(&hi556->mutex);
+
+ return 0;
+}
+
+static int __maybe_unused hi556_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct hi556 *hi556 = to_hi556(sd);
+ int ret;
+
+ mutex_lock(&hi556->mutex);
+ if (hi556->streaming) {
+ ret = hi556_start_streaming(hi556);
+ if (ret)
+ goto error;
+ }
+
+ mutex_unlock(&hi556->mutex);
+
+ return 0;
+
+error:
+ hi556_stop_streaming(hi556);
+ hi556->streaming = 0;
+ mutex_unlock(&hi556->mutex);
+ return ret;
+}
+
+static int hi556_set_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct hi556 *hi556 = to_hi556(sd);
+ const struct hi556_mode *mode;
+ s32 vblank_def, h_blank;
+
+ mode = v4l2_find_nearest_size(supported_modes,
+ ARRAY_SIZE(supported_modes), width,
+ height, fmt->format.width,
+ fmt->format.height);
+
+ mutex_lock(&hi556->mutex);
+ hi556_assign_pad_format(mode, &fmt->format);
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
+ *v4l2_subdev_get_try_format(sd, cfg, fmt->pad) = fmt->format;
+ } else {
+ hi556->cur_mode = mode;
+ __v4l2_ctrl_s_ctrl(hi556->link_freq, mode->link_freq_index);
+ __v4l2_ctrl_s_ctrl_int64(hi556->pixel_rate,
+ to_pixel_rate(mode->link_freq_index));
+
+ /* Update limits and set FPS to default */
+ vblank_def = mode->fll_def - mode->height;
+ __v4l2_ctrl_modify_range(hi556->vblank,
+ mode->fll_min - mode->height,
+ HI556_FLL_MAX - mode->height, 1,
+ vblank_def);
+ __v4l2_ctrl_s_ctrl(hi556->vblank, vblank_def);
+
+ h_blank = hi556->cur_mode->llp - hi556->cur_mode->width;
+
+ __v4l2_ctrl_modify_range(hi556->hblank, h_blank, h_blank, 1,
+ h_blank);
+ }
+
+ mutex_unlock(&hi556->mutex);
+
+ return 0;
+}
+
+static int hi556_get_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct hi556 *hi556 = to_hi556(sd);
+
+ mutex_lock(&hi556->mutex);
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
+ fmt->format = *v4l2_subdev_get_try_format(&hi556->sd, cfg,
+ fmt->pad);
+ else
+ hi556_assign_pad_format(hi556->cur_mode, &fmt->format);
+
+ mutex_unlock(&hi556->mutex);
+
+ return 0;
+}
+
+static int hi556_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ if (code->index > 0)
+ return -EINVAL;
+
+ code->code = MEDIA_BUS_FMT_SGRBG10_1X10;
+
+ return 0;
+}
+
+static int hi556_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ if (fse->index >= ARRAY_SIZE(supported_modes))
+ return -EINVAL;
+
+ if (fse->code != MEDIA_BUS_FMT_SGRBG10_1X10)
+ return -EINVAL;
+
+ fse->min_width = supported_modes[fse->index].width;
+ fse->max_width = fse->min_width;
+ fse->min_height = supported_modes[fse->index].height;
+ fse->max_height = fse->min_height;
+
+ return 0;
+}
+
+static int hi556_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ struct hi556 *hi556 = to_hi556(sd);
+
+ mutex_lock(&hi556->mutex);
+ hi556_assign_pad_format(&supported_modes[0],
+ v4l2_subdev_get_try_format(sd, fh->pad, 0));
+ mutex_unlock(&hi556->mutex);
+
+ return 0;
+}
+
+static const struct v4l2_subdev_video_ops hi556_video_ops = {
+ .s_stream = hi556_set_stream,
+};
+
+static const struct v4l2_subdev_pad_ops hi556_pad_ops = {
+ .set_fmt = hi556_set_format,
+ .get_fmt = hi556_get_format,
+ .enum_mbus_code = hi556_enum_mbus_code,
+ .enum_frame_size = hi556_enum_frame_size,
+};
+
+static const struct v4l2_subdev_ops hi556_subdev_ops = {
+ .video = &hi556_video_ops,
+ .pad = &hi556_pad_ops,
+};
+
+static const struct media_entity_operations hi556_subdev_entity_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+static const struct v4l2_subdev_internal_ops hi556_internal_ops = {
+ .open = hi556_open,
+};
+
+static int hi556_identify_module(struct hi556 *hi556)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&hi556->sd);
+ int ret;
+ u32 val;
+
+ ret = hi556_read_reg(hi556, HI556_REG_CHIP_ID,
+ HI556_REG_VALUE_16BIT, &val);
+ if (ret)
+ return ret;
+
+ if (val != HI556_CHIP_ID) {
+ dev_err(&client->dev, "chip id mismatch: %x!=%x",
+ HI556_CHIP_ID, val);
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
+static int hi556_check_hwcfg(struct device *dev)
+{
+ struct fwnode_handle *ep;
+ struct fwnode_handle *fwnode = dev_fwnode(dev);
+ struct v4l2_fwnode_endpoint bus_cfg = {
+ .bus_type = V4L2_MBUS_CSI2_DPHY
+ };
+ u32 mclk;
+ int ret = 0;
+ unsigned int i, j;
+
+ if (!fwnode)
+ return -ENXIO;
+
+ ret = fwnode_property_read_u32(fwnode, "clock-frequency", &mclk);
+ if (ret) {
+ dev_err(dev, "can't get clock frequency");
+ return ret;
+ }
+
+ if (mclk != HI556_MCLK) {
+ dev_err(dev, "external clock %d is not supported", mclk);
+ return -EINVAL;
+ }
+
+ ep = fwnode_graph_get_next_endpoint(fwnode, NULL);
+ if (!ep)
+ return -ENXIO;
+
+ ret = v4l2_fwnode_endpoint_alloc_parse(ep, &bus_cfg);
+ fwnode_handle_put(ep);
+ if (ret)
+ return ret;
+
+ if (bus_cfg.bus.mipi_csi2.num_data_lanes != 2) {
+ dev_err(dev, "number of CSI2 data lanes %d is not supported",
+ bus_cfg.bus.mipi_csi2.num_data_lanes);
+ ret = -EINVAL;
+ goto check_hwcfg_error;
+ }
+
+ if (!bus_cfg.nr_of_link_frequencies) {
+ dev_err(dev, "no link frequencies defined");
+ ret = -EINVAL;
+ goto check_hwcfg_error;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(link_freq_menu_items); i++) {
+ for (j = 0; j < bus_cfg.nr_of_link_frequencies; j++) {
+ if (link_freq_menu_items[i] ==
+ bus_cfg.link_frequencies[j])
+ break;
+ }
+
+ if (j == bus_cfg.nr_of_link_frequencies) {
+ dev_err(dev, "no link frequency %lld supported",
+ link_freq_menu_items[i]);
+ ret = -EINVAL;
+ goto check_hwcfg_error;
+ }
+ }
+
+check_hwcfg_error:
+ v4l2_fwnode_endpoint_free(&bus_cfg);
+
+ return ret;
+}
+
+static int hi556_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct hi556 *hi556 = to_hi556(sd);
+
+ v4l2_async_unregister_subdev(sd);
+ media_entity_cleanup(&sd->entity);
+ v4l2_ctrl_handler_free(sd->ctrl_handler);
+ pm_runtime_disable(&client->dev);
+ mutex_destroy(&hi556->mutex);
+
+ return 0;
+}
+
+static int hi556_probe(struct i2c_client *client)
+{
+ struct hi556 *hi556;
+ int ret;
+
+ ret = hi556_check_hwcfg(&client->dev);
+ if (ret) {
+ dev_err(&client->dev, "failed to check HW configuration: %d",
+ ret);
+ return ret;
+ }
+
+ hi556 = devm_kzalloc(&client->dev, sizeof(*hi556), GFP_KERNEL);
+ if (!hi556)
+ return -ENOMEM;
+
+ v4l2_i2c_subdev_init(&hi556->sd, client, &hi556_subdev_ops);
+ ret = hi556_identify_module(hi556);
+ if (ret) {
+ dev_err(&client->dev, "failed to find sensor: %d", ret);
+ return ret;
+ }
+
+ mutex_init(&hi556->mutex);
+ hi556->cur_mode = &supported_modes[0];
+ ret = hi556_init_controls(hi556);
+ if (ret) {
+ dev_err(&client->dev, "failed to init controls: %d", ret);
+ goto probe_error_v4l2_ctrl_handler_free;
+ }
+
+ hi556->sd.internal_ops = &hi556_internal_ops;
+ hi556->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ hi556->sd.entity.ops = &hi556_subdev_entity_ops;
+ hi556->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
+ hi556->pad.flags = MEDIA_PAD_FL_SOURCE;
+ ret = media_entity_pads_init(&hi556->sd.entity, 1, &hi556->pad);
+ if (ret) {
+ dev_err(&client->dev, "failed to init entity pads: %d", ret);
+ goto probe_error_v4l2_ctrl_handler_free;
+ }
+
+ ret = v4l2_async_register_subdev_sensor_common(&hi556->sd);
+ if (ret < 0) {
+ dev_err(&client->dev, "failed to register V4L2 subdev: %d",
+ ret);
+ goto probe_error_media_entity_cleanup;
+ }
+
+ pm_runtime_set_active(&client->dev);
+ pm_runtime_enable(&client->dev);
+ pm_runtime_idle(&client->dev);
+
+ return 0;
+
+probe_error_media_entity_cleanup:
+ media_entity_cleanup(&hi556->sd.entity);
+
+probe_error_v4l2_ctrl_handler_free:
+ v4l2_ctrl_handler_free(hi556->sd.ctrl_handler);
+ mutex_destroy(&hi556->mutex);
+
+ return ret;
+}
+
+static const struct dev_pm_ops hi556_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(hi556_suspend, hi556_resume)
+};
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id hi556_acpi_ids[] = {
+ {"INT3537"},
+ {}
+};
+
+MODULE_DEVICE_TABLE(acpi, hi556_acpi_ids);
+#endif
+
+static struct i2c_driver hi556_i2c_driver = {
+ .driver = {
+ .name = "hi556",
+ .pm = &hi556_pm_ops,
+ .acpi_match_table = ACPI_PTR(hi556_acpi_ids),
+ },
+ .probe_new = hi556_probe,
+ .remove = hi556_remove,
+};
+
+module_i2c_driver(hi556_i2c_driver);
+
+MODULE_AUTHOR("Shawn Tu <shawnx.tu@intel.com>");
+MODULE_DESCRIPTION("Hynix HI556 sensor driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/i2c/imx214.c b/drivers/media/i2c/imx214.c
index 159a3a604f0e..adcaaa8c86d1 100644
--- a/drivers/media/i2c/imx214.c
+++ b/drivers/media/i2c/imx214.c
@@ -47,6 +47,7 @@ struct imx214 {
struct v4l2_ctrl *pixel_rate;
struct v4l2_ctrl *link_freq;
struct v4l2_ctrl *exposure;
+ struct v4l2_ctrl *unit_size;
struct regulator_bulk_data supplies[IMX214_NUM_SUPPLIES];
@@ -948,6 +949,10 @@ static int imx214_probe(struct i2c_client *client)
static const s64 link_freq[] = {
IMX214_DEFAULT_LINK_FREQ,
};
+ static const struct v4l2_area unit_size = {
+ .width = 1120,
+ .height = 1120,
+ };
int ret;
ret = imx214_parse_fwnode(dev);
@@ -1029,6 +1034,10 @@ static int imx214_probe(struct i2c_client *client)
V4L2_CID_EXPOSURE,
0, 3184, 1, 0x0c70);
+ imx214->unit_size = v4l2_ctrl_new_std_compound(&imx214->ctrls,
+ NULL,
+ V4L2_CID_UNIT_CELL_SIZE,
+ v4l2_ctrl_ptr_create((void *)&unit_size));
ret = imx214->ctrls.error;
if (ret) {
dev_err(&client->dev, "%s control init failed (%d)\n",
diff --git a/drivers/media/i2c/imx290.c b/drivers/media/i2c/imx290.c
new file mode 100644
index 000000000000..f7678e5a5d87
--- /dev/null
+++ b/drivers/media/i2c/imx290.c
@@ -0,0 +1,884 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Sony IMX290 CMOS Image Sensor Driver
+ *
+ * Copyright (C) 2019 FRAMOS GmbH.
+ *
+ * Copyright (C) 2019 Linaro Ltd.
+ * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <media/media-entity.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-subdev.h>
+
+#define IMX290_STANDBY 0x3000
+#define IMX290_REGHOLD 0x3001
+#define IMX290_XMSTA 0x3002
+#define IMX290_GAIN 0x3014
+
+#define IMX290_DEFAULT_LINK_FREQ 445500000
+
+static const char * const imx290_supply_name[] = {
+ "vdda",
+ "vddd",
+ "vdddo",
+};
+
+#define IMX290_NUM_SUPPLIES ARRAY_SIZE(imx290_supply_name)
+
+struct imx290_regval {
+ u16 reg;
+ u8 val;
+};
+
+struct imx290_mode {
+ u32 width;
+ u32 height;
+ u32 pixel_rate;
+ u32 link_freq_index;
+
+ const struct imx290_regval *data;
+ u32 data_size;
+};
+
+struct imx290 {
+ struct device *dev;
+ struct clk *xclk;
+ struct regmap *regmap;
+
+ struct v4l2_subdev sd;
+ struct v4l2_fwnode_endpoint ep;
+ struct media_pad pad;
+ struct v4l2_mbus_framefmt current_format;
+ const struct imx290_mode *current_mode;
+
+ struct regulator_bulk_data supplies[IMX290_NUM_SUPPLIES];
+ struct gpio_desc *rst_gpio;
+
+ struct v4l2_ctrl_handler ctrls;
+ struct v4l2_ctrl *link_freq;
+ struct v4l2_ctrl *pixel_rate;
+
+ struct mutex lock;
+};
+
+struct imx290_pixfmt {
+ u32 code;
+};
+
+static const struct imx290_pixfmt imx290_formats[] = {
+ { MEDIA_BUS_FMT_SRGGB10_1X10 },
+};
+
+static const struct regmap_config imx290_regmap_config = {
+ .reg_bits = 16,
+ .val_bits = 8,
+ .cache_type = REGCACHE_RBTREE,
+};
+
+static const struct imx290_regval imx290_global_init_settings[] = {
+ { 0x3007, 0x00 },
+ { 0x3009, 0x00 },
+ { 0x3018, 0x65 },
+ { 0x3019, 0x04 },
+ { 0x301a, 0x00 },
+ { 0x3443, 0x03 },
+ { 0x3444, 0x20 },
+ { 0x3445, 0x25 },
+ { 0x3407, 0x03 },
+ { 0x303a, 0x0c },
+ { 0x3040, 0x00 },
+ { 0x3041, 0x00 },
+ { 0x303c, 0x00 },
+ { 0x303d, 0x00 },
+ { 0x3042, 0x9c },
+ { 0x3043, 0x07 },
+ { 0x303e, 0x49 },
+ { 0x303f, 0x04 },
+ { 0x304b, 0x0a },
+ { 0x300f, 0x00 },
+ { 0x3010, 0x21 },
+ { 0x3012, 0x64 },
+ { 0x3016, 0x09 },
+ { 0x3070, 0x02 },
+ { 0x3071, 0x11 },
+ { 0x309b, 0x10 },
+ { 0x309c, 0x22 },
+ { 0x30a2, 0x02 },
+ { 0x30a6, 0x20 },
+ { 0x30a8, 0x20 },
+ { 0x30aa, 0x20 },
+ { 0x30ac, 0x20 },
+ { 0x30b0, 0x43 },
+ { 0x3119, 0x9e },
+ { 0x311c, 0x1e },
+ { 0x311e, 0x08 },
+ { 0x3128, 0x05 },
+ { 0x313d, 0x83 },
+ { 0x3150, 0x03 },
+ { 0x317e, 0x00 },
+ { 0x32b8, 0x50 },
+ { 0x32b9, 0x10 },
+ { 0x32ba, 0x00 },
+ { 0x32bb, 0x04 },
+ { 0x32c8, 0x50 },
+ { 0x32c9, 0x10 },
+ { 0x32ca, 0x00 },
+ { 0x32cb, 0x04 },
+ { 0x332c, 0xd3 },
+ { 0x332d, 0x10 },
+ { 0x332e, 0x0d },
+ { 0x3358, 0x06 },
+ { 0x3359, 0xe1 },
+ { 0x335a, 0x11 },
+ { 0x3360, 0x1e },
+ { 0x3361, 0x61 },
+ { 0x3362, 0x10 },
+ { 0x33b0, 0x50 },
+ { 0x33b2, 0x1a },
+ { 0x33b3, 0x04 },
+};
+
+static const struct imx290_regval imx290_1080p_settings[] = {
+ /* mode settings */
+ { 0x3007, 0x00 },
+ { 0x303a, 0x0c },
+ { 0x3414, 0x0a },
+ { 0x3472, 0x80 },
+ { 0x3473, 0x07 },
+ { 0x3418, 0x38 },
+ { 0x3419, 0x04 },
+ { 0x3012, 0x64 },
+ { 0x3013, 0x00 },
+ { 0x305c, 0x18 },
+ { 0x305d, 0x03 },
+ { 0x305e, 0x20 },
+ { 0x305f, 0x01 },
+ { 0x315e, 0x1a },
+ { 0x3164, 0x1a },
+ { 0x3480, 0x49 },
+ /* data rate settings */
+ { 0x3009, 0x01 },
+ { 0x3405, 0x10 },
+ { 0x3446, 0x57 },
+ { 0x3447, 0x00 },
+ { 0x3448, 0x37 },
+ { 0x3449, 0x00 },
+ { 0x344a, 0x1f },
+ { 0x344b, 0x00 },
+ { 0x344c, 0x1f },
+ { 0x344d, 0x00 },
+ { 0x344e, 0x1f },
+ { 0x344f, 0x00 },
+ { 0x3450, 0x77 },
+ { 0x3451, 0x00 },
+ { 0x3452, 0x1f },
+ { 0x3453, 0x00 },
+ { 0x3454, 0x17 },
+ { 0x3455, 0x00 },
+ { 0x301c, 0x98 },
+ { 0x301d, 0x08 },
+};
+
+static const struct imx290_regval imx290_720p_settings[] = {
+ /* mode settings */
+ { 0x3007, 0x10 },
+ { 0x303a, 0x06 },
+ { 0x3414, 0x04 },
+ { 0x3472, 0x00 },
+ { 0x3473, 0x05 },
+ { 0x3418, 0xd0 },
+ { 0x3419, 0x02 },
+ { 0x3012, 0x64 },
+ { 0x3013, 0x00 },
+ { 0x305c, 0x20 },
+ { 0x305d, 0x00 },
+ { 0x305e, 0x20 },
+ { 0x305f, 0x01 },
+ { 0x315e, 0x1a },
+ { 0x3164, 0x1a },
+ { 0x3480, 0x49 },
+ /* data rate settings */
+ { 0x3009, 0x01 },
+ { 0x3405, 0x10 },
+ { 0x3446, 0x4f },
+ { 0x3447, 0x00 },
+ { 0x3448, 0x2f },
+ { 0x3449, 0x00 },
+ { 0x344a, 0x17 },
+ { 0x344b, 0x00 },
+ { 0x344c, 0x17 },
+ { 0x344d, 0x00 },
+ { 0x344e, 0x17 },
+ { 0x344f, 0x00 },
+ { 0x3450, 0x57 },
+ { 0x3451, 0x00 },
+ { 0x3452, 0x17 },
+ { 0x3453, 0x00 },
+ { 0x3454, 0x17 },
+ { 0x3455, 0x00 },
+ { 0x301c, 0xe4 },
+ { 0x301d, 0x0c },
+};
+
+static const struct imx290_regval imx290_10bit_settings[] = {
+ { 0x3005, 0x00},
+ { 0x3046, 0x00},
+ { 0x3129, 0x1d},
+ { 0x317c, 0x12},
+ { 0x31ec, 0x37},
+ { 0x3441, 0x0a},
+ { 0x3442, 0x0a},
+ { 0x300a, 0x3c},
+ { 0x300b, 0x00},
+};
+
+/* supported link frequencies */
+static const s64 imx290_link_freq[] = {
+ IMX290_DEFAULT_LINK_FREQ,
+};
+
+/* Mode configs */
+static const struct imx290_mode imx290_modes[] = {
+ {
+ .width = 1920,
+ .height = 1080,
+ .data = imx290_1080p_settings,
+ .data_size = ARRAY_SIZE(imx290_1080p_settings),
+ .pixel_rate = 178200000,
+ .link_freq_index = 0,
+ },
+ {
+ .width = 1280,
+ .height = 720,
+ .data = imx290_720p_settings,
+ .data_size = ARRAY_SIZE(imx290_720p_settings),
+ .pixel_rate = 178200000,
+ .link_freq_index = 0,
+ },
+};
+
+static inline struct imx290 *to_imx290(struct v4l2_subdev *_sd)
+{
+ return container_of(_sd, struct imx290, sd);
+}
+
+static inline int imx290_read_reg(struct imx290 *imx290, u16 addr, u8 *value)
+{
+ unsigned int regval;
+ int ret;
+
+ ret = regmap_read(imx290->regmap, addr, &regval);
+ if (ret) {
+ dev_err(imx290->dev, "I2C read failed for addr: %x\n", addr);
+ return ret;
+ }
+
+ *value = regval & 0xff;
+
+ return 0;
+}
+
+static int imx290_write_reg(struct imx290 *imx290, u16 addr, u8 value)
+{
+ int ret;
+
+ ret = regmap_write(imx290->regmap, addr, value);
+ if (ret) {
+ dev_err(imx290->dev, "I2C write failed for addr: %x\n", addr);
+ return ret;
+ }
+
+ return ret;
+}
+
+static int imx290_set_register_array(struct imx290 *imx290,
+ const struct imx290_regval *settings,
+ unsigned int num_settings)
+{
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < num_settings; ++i, ++settings) {
+ ret = imx290_write_reg(imx290, settings->reg, settings->val);
+ if (ret < 0)
+ return ret;
+
+ /* Settle time is 10ms for all registers */
+ msleep(10);
+ }
+
+ return 0;
+}
+
+static int imx290_write_buffered_reg(struct imx290 *imx290, u16 address_low,
+ u8 nr_regs, u32 value)
+{
+ unsigned int i;
+ int ret;
+
+ ret = imx290_write_reg(imx290, IMX290_REGHOLD, 0x01);
+ if (ret) {
+ dev_err(imx290->dev, "Error setting hold register\n");
+ return ret;
+ }
+
+ for (i = 0; i < nr_regs; i++) {
+ ret = imx290_write_reg(imx290, address_low + i,
+ (u8)(value >> (i * 8)));
+ if (ret) {
+ dev_err(imx290->dev, "Error writing buffered registers\n");
+ return ret;
+ }
+ }
+
+ ret = imx290_write_reg(imx290, IMX290_REGHOLD, 0x00);
+ if (ret) {
+ dev_err(imx290->dev, "Error setting hold register\n");
+ return ret;
+ }
+
+ return ret;
+}
+
+static int imx290_set_gain(struct imx290 *imx290, u32 value)
+{
+ int ret;
+
+ ret = imx290_write_buffered_reg(imx290, IMX290_GAIN, 1, value);
+ if (ret)
+ dev_err(imx290->dev, "Unable to write gain\n");
+
+ return ret;
+}
+
+/* Stop streaming */
+static int imx290_stop_streaming(struct imx290 *imx290)
+{
+ int ret;
+
+ ret = imx290_write_reg(imx290, IMX290_STANDBY, 0x01);
+ if (ret < 0)
+ return ret;
+
+ msleep(30);
+
+ return imx290_write_reg(imx290, IMX290_XMSTA, 0x01);
+}
+
+static int imx290_set_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct imx290 *imx290 = container_of(ctrl->handler,
+ struct imx290, ctrls);
+ int ret = 0;
+
+ /* V4L2 controls values will be applied only when power is already up */
+ if (!pm_runtime_get_if_in_use(imx290->dev))
+ return 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_GAIN:
+ ret = imx290_set_gain(imx290, ctrl->val);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ pm_runtime_put(imx290->dev);
+
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops imx290_ctrl_ops = {
+ .s_ctrl = imx290_set_ctrl,
+};
+
+static int imx290_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ if (code->index >= ARRAY_SIZE(imx290_formats))
+ return -EINVAL;
+
+ code->code = imx290_formats[code->index].code;
+
+ return 0;
+}
+
+static int imx290_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct imx290 *imx290 = to_imx290(sd);
+ struct v4l2_mbus_framefmt *framefmt;
+
+ mutex_lock(&imx290->lock);
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
+ framefmt = v4l2_subdev_get_try_format(&imx290->sd, cfg,
+ fmt->pad);
+ else
+ framefmt = &imx290->current_format;
+
+ fmt->format = *framefmt;
+
+ mutex_unlock(&imx290->lock);
+
+ return 0;
+}
+
+static int imx290_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct imx290 *imx290 = to_imx290(sd);
+ const struct imx290_mode *mode;
+ struct v4l2_mbus_framefmt *format;
+ unsigned int i;
+
+ mutex_lock(&imx290->lock);
+
+ mode = v4l2_find_nearest_size(imx290_modes,
+ ARRAY_SIZE(imx290_modes),
+ width, height,
+ fmt->format.width, fmt->format.height);
+
+ fmt->format.width = mode->width;
+ fmt->format.height = mode->height;
+
+ for (i = 0; i < ARRAY_SIZE(imx290_formats); i++)
+ if (imx290_formats[i].code == fmt->format.code)
+ break;
+
+ if (i >= ARRAY_SIZE(imx290_formats))
+ i = 0;
+
+ fmt->format.code = imx290_formats[i].code;
+ fmt->format.field = V4L2_FIELD_NONE;
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
+ format = v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
+ } else {
+ format = &imx290->current_format;
+ __v4l2_ctrl_s_ctrl(imx290->link_freq, mode->link_freq_index);
+ __v4l2_ctrl_s_ctrl_int64(imx290->pixel_rate, mode->pixel_rate);
+
+ imx290->current_mode = mode;
+ }
+
+ *format = fmt->format;
+
+ mutex_unlock(&imx290->lock);
+
+ return 0;
+}
+
+static int imx290_entity_init_cfg(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg)
+{
+ struct v4l2_subdev_format fmt = { 0 };
+
+ fmt.which = cfg ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
+ fmt.format.width = 1920;
+ fmt.format.height = 1080;
+
+ imx290_set_fmt(subdev, cfg, &fmt);
+
+ return 0;
+}
+
+static int imx290_write_current_format(struct imx290 *imx290,
+ struct v4l2_mbus_framefmt *format)
+{
+ int ret;
+
+ switch (format->code) {
+ case MEDIA_BUS_FMT_SRGGB10_1X10:
+ ret = imx290_set_register_array(imx290, imx290_10bit_settings,
+ ARRAY_SIZE(
+ imx290_10bit_settings));
+ if (ret < 0) {
+ dev_err(imx290->dev, "Could not set format registers\n");
+ return ret;
+ }
+ break;
+ default:
+ dev_err(imx290->dev, "Unknown pixel format\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* Start streaming */
+static int imx290_start_streaming(struct imx290 *imx290)
+{
+ int ret;
+
+ /* Set init register settings */
+ ret = imx290_set_register_array(imx290, imx290_global_init_settings,
+ ARRAY_SIZE(
+ imx290_global_init_settings));
+ if (ret < 0) {
+ dev_err(imx290->dev, "Could not set init registers\n");
+ return ret;
+ }
+
+ /* Set current frame format */
+ ret = imx290_write_current_format(imx290, &imx290->current_format);
+ if (ret < 0) {
+ dev_err(imx290->dev, "Could not set frame format\n");
+ return ret;
+ }
+
+ /* Apply default values of current mode */
+ ret = imx290_set_register_array(imx290, imx290->current_mode->data,
+ imx290->current_mode->data_size);
+ if (ret < 0) {
+ dev_err(imx290->dev, "Could not set current mode\n");
+ return ret;
+ }
+
+ /* Apply customized values from user */
+ ret = v4l2_ctrl_handler_setup(imx290->sd.ctrl_handler);
+ if (ret) {
+ dev_err(imx290->dev, "Could not sync v4l2 controls\n");
+ return ret;
+ }
+
+ ret = imx290_write_reg(imx290, IMX290_STANDBY, 0x00);
+ if (ret < 0)
+ return ret;
+
+ msleep(30);
+
+ /* Start streaming */
+ return imx290_write_reg(imx290, IMX290_XMSTA, 0x00);
+}
+
+static int imx290_set_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct imx290 *imx290 = to_imx290(sd);
+ int ret = 0;
+
+ if (enable) {
+ ret = pm_runtime_get_sync(imx290->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(imx290->dev);
+ goto unlock_and_return;
+ }
+
+ ret = imx290_start_streaming(imx290);
+ if (ret) {
+ dev_err(imx290->dev, "Start stream failed\n");
+ pm_runtime_put(imx290->dev);
+ goto unlock_and_return;
+ }
+ } else {
+ imx290_stop_streaming(imx290);
+ pm_runtime_put(imx290->dev);
+ }
+
+unlock_and_return:
+
+ return ret;
+}
+
+static int imx290_get_regulators(struct device *dev, struct imx290 *imx290)
+{
+ unsigned int i;
+
+ for (i = 0; i < IMX290_NUM_SUPPLIES; i++)
+ imx290->supplies[i].supply = imx290_supply_name[i];
+
+ return devm_regulator_bulk_get(dev, IMX290_NUM_SUPPLIES,
+ imx290->supplies);
+}
+
+static int imx290_power_on(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct imx290 *imx290 = to_imx290(sd);
+ int ret;
+
+ ret = clk_prepare_enable(imx290->xclk);
+ if (ret) {
+ dev_err(imx290->dev, "Failed to enable clock\n");
+ return ret;
+ }
+
+ ret = regulator_bulk_enable(IMX290_NUM_SUPPLIES, imx290->supplies);
+ if (ret) {
+ dev_err(imx290->dev, "Failed to enable regulators\n");
+ clk_disable_unprepare(imx290->xclk);
+ return ret;
+ }
+
+ usleep_range(1, 2);
+ gpiod_set_value_cansleep(imx290->rst_gpio, 1);
+ usleep_range(30000, 31000);
+
+ return 0;
+}
+
+static int imx290_power_off(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct imx290 *imx290 = to_imx290(sd);
+
+ clk_disable_unprepare(imx290->xclk);
+ gpiod_set_value_cansleep(imx290->rst_gpio, 0);
+ regulator_bulk_disable(IMX290_NUM_SUPPLIES, imx290->supplies);
+
+ return 0;
+}
+
+static const struct dev_pm_ops imx290_pm_ops = {
+ SET_RUNTIME_PM_OPS(imx290_power_on, imx290_power_off, NULL)
+};
+
+static const struct v4l2_subdev_video_ops imx290_video_ops = {
+ .s_stream = imx290_set_stream,
+};
+
+static const struct v4l2_subdev_pad_ops imx290_pad_ops = {
+ .init_cfg = imx290_entity_init_cfg,
+ .enum_mbus_code = imx290_enum_mbus_code,
+ .get_fmt = imx290_get_fmt,
+ .set_fmt = imx290_set_fmt,
+};
+
+static const struct v4l2_subdev_ops imx290_subdev_ops = {
+ .video = &imx290_video_ops,
+ .pad = &imx290_pad_ops,
+};
+
+static const struct media_entity_operations imx290_subdev_entity_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+static int imx290_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct fwnode_handle *endpoint;
+ struct imx290 *imx290;
+ u32 xclk_freq;
+ int ret;
+
+ imx290 = devm_kzalloc(dev, sizeof(*imx290), GFP_KERNEL);
+ if (!imx290)
+ return -ENOMEM;
+
+ imx290->dev = dev;
+ imx290->regmap = devm_regmap_init_i2c(client, &imx290_regmap_config);
+ if (IS_ERR(imx290->regmap)) {
+ dev_err(dev, "Unable to initialize I2C\n");
+ return -ENODEV;
+ }
+
+ endpoint = fwnode_graph_get_next_endpoint(dev_fwnode(dev), NULL);
+ if (!endpoint) {
+ dev_err(dev, "Endpoint node not found\n");
+ return -EINVAL;
+ }
+
+ ret = v4l2_fwnode_endpoint_alloc_parse(endpoint, &imx290->ep);
+ fwnode_handle_put(endpoint);
+ if (ret) {
+ dev_err(dev, "Parsing endpoint node failed\n");
+ goto free_err;
+ }
+
+ if (!imx290->ep.nr_of_link_frequencies) {
+ dev_err(dev, "link-frequency property not found in DT\n");
+ ret = -EINVAL;
+ goto free_err;
+ }
+
+ if (imx290->ep.link_frequencies[0] != IMX290_DEFAULT_LINK_FREQ) {
+ dev_err(dev, "Unsupported link frequency\n");
+ ret = -EINVAL;
+ goto free_err;
+ }
+
+ /* Only CSI2 is supported for now */
+ if (imx290->ep.bus_type != V4L2_MBUS_CSI2_DPHY) {
+ dev_err(dev, "Unsupported bus type, should be CSI2\n");
+ ret = -EINVAL;
+ goto free_err;
+ }
+
+ /* Set default mode to max resolution */
+ imx290->current_mode = &imx290_modes[0];
+
+ /* get system clock (xclk) */
+ imx290->xclk = devm_clk_get(dev, "xclk");
+ if (IS_ERR(imx290->xclk)) {
+ dev_err(dev, "Could not get xclk");
+ ret = PTR_ERR(imx290->xclk);
+ goto free_err;
+ }
+
+ ret = fwnode_property_read_u32(dev_fwnode(dev), "clock-frequency",
+ &xclk_freq);
+ if (ret) {
+ dev_err(dev, "Could not get xclk frequency\n");
+ goto free_err;
+ }
+
+ /* external clock must be 37.125 MHz */
+ if (xclk_freq != 37125000) {
+ dev_err(dev, "External clock frequency %u is not supported\n",
+ xclk_freq);
+ ret = -EINVAL;
+ goto free_err;
+ }
+
+ ret = clk_set_rate(imx290->xclk, xclk_freq);
+ if (ret) {
+ dev_err(dev, "Could not set xclk frequency\n");
+ goto free_err;
+ }
+
+ ret = imx290_get_regulators(dev, imx290);
+ if (ret < 0) {
+ dev_err(dev, "Cannot get regulators\n");
+ goto free_err;
+ }
+
+ imx290->rst_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_ASIS);
+ if (IS_ERR(imx290->rst_gpio)) {
+ dev_err(dev, "Cannot get reset gpio\n");
+ ret = PTR_ERR(imx290->rst_gpio);
+ goto free_err;
+ }
+
+ mutex_init(&imx290->lock);
+
+ v4l2_ctrl_handler_init(&imx290->ctrls, 3);
+
+ v4l2_ctrl_new_std(&imx290->ctrls, &imx290_ctrl_ops,
+ V4L2_CID_GAIN, 0, 72, 1, 0);
+ imx290->link_freq =
+ v4l2_ctrl_new_int_menu(&imx290->ctrls,
+ &imx290_ctrl_ops,
+ V4L2_CID_LINK_FREQ,
+ ARRAY_SIZE(imx290_link_freq) - 1,
+ 0, imx290_link_freq);
+ if (imx290->link_freq)
+ imx290->link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ imx290->pixel_rate = v4l2_ctrl_new_std(&imx290->ctrls, &imx290_ctrl_ops,
+ V4L2_CID_PIXEL_RATE, 1,
+ INT_MAX, 1,
+ imx290_modes[0].pixel_rate);
+
+ imx290->sd.ctrl_handler = &imx290->ctrls;
+
+ if (imx290->ctrls.error) {
+ dev_err(dev, "Control initialization error %d\n",
+ imx290->ctrls.error);
+ ret = imx290->ctrls.error;
+ goto free_ctrl;
+ }
+
+ v4l2_i2c_subdev_init(&imx290->sd, client, &imx290_subdev_ops);
+ imx290->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ imx290->sd.dev = &client->dev;
+ imx290->sd.entity.ops = &imx290_subdev_entity_ops;
+ imx290->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
+
+ imx290->pad.flags = MEDIA_PAD_FL_SOURCE;
+ ret = media_entity_pads_init(&imx290->sd.entity, 1, &imx290->pad);
+ if (ret < 0) {
+ dev_err(dev, "Could not register media entity\n");
+ goto free_ctrl;
+ }
+
+ ret = v4l2_async_register_subdev(&imx290->sd);
+ if (ret < 0) {
+ dev_err(dev, "Could not register v4l2 device\n");
+ goto free_entity;
+ }
+
+ /* Power on the device to match runtime PM state below */
+ ret = imx290_power_on(dev);
+ if (ret < 0) {
+ dev_err(dev, "Could not power on the device\n");
+ goto free_entity;
+ }
+
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ pm_runtime_idle(dev);
+
+ v4l2_fwnode_endpoint_free(&imx290->ep);
+
+ return 0;
+
+free_entity:
+ media_entity_cleanup(&imx290->sd.entity);
+free_ctrl:
+ v4l2_ctrl_handler_free(&imx290->ctrls);
+ mutex_destroy(&imx290->lock);
+free_err:
+ v4l2_fwnode_endpoint_free(&imx290->ep);
+
+ return ret;
+}
+
+static int imx290_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct imx290 *imx290 = to_imx290(sd);
+
+ v4l2_async_unregister_subdev(sd);
+ media_entity_cleanup(&sd->entity);
+ v4l2_ctrl_handler_free(sd->ctrl_handler);
+
+ mutex_destroy(&imx290->lock);
+
+ pm_runtime_disable(imx290->dev);
+ if (!pm_runtime_status_suspended(imx290->dev))
+ imx290_power_off(imx290->dev);
+ pm_runtime_set_suspended(imx290->dev);
+
+ return 0;
+}
+
+static const struct of_device_id imx290_of_match[] = {
+ { .compatible = "sony,imx290" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, imx290_of_match);
+
+static struct i2c_driver imx290_i2c_driver = {
+ .probe_new = imx290_probe,
+ .remove = imx290_remove,
+ .driver = {
+ .name = "imx290",
+ .pm = &imx290_pm_ops,
+ .of_match_table = of_match_ptr(imx290_of_match),
+ },
+};
+
+module_i2c_driver(imx290_i2c_driver);
+
+MODULE_DESCRIPTION("Sony IMX290 CMOS Image Sensor Driver");
+MODULE_AUTHOR("FRAMOS GmbH");
+MODULE_AUTHOR("Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/i2c/lm3646.c b/drivers/media/i2c/lm3646.c
index d8a8853f9a2b..c76ccf67a909 100644
--- a/drivers/media/i2c/lm3646.c
+++ b/drivers/media/i2c/lm3646.c
@@ -134,7 +134,7 @@ static int lm3646_set_ctrl(struct v4l2_ctrl *ctrl)
{
struct lm3646_flash *flash = to_lm3646_flash(ctrl);
unsigned int reg_val;
- int rval = -EINVAL;
+ int rval;
switch (ctrl->id) {
case V4L2_CID_FLASH_LED_MODE:
diff --git a/drivers/media/i2c/max2175.c b/drivers/media/i2c/max2175.c
index 19a3ceea3bc2..506a30e69ced 100644
--- a/drivers/media/i2c/max2175.c
+++ b/drivers/media/i2c/max2175.c
@@ -591,8 +591,8 @@ static int max2175_set_lo_freq(struct max2175 *ctx, u32 lo_freq)
lo_freq *= lo_mult;
int_desired = lo_freq / ctx->xtal_freq;
- frac_desired = div_u64((u64)(lo_freq % ctx->xtal_freq) << 20,
- ctx->xtal_freq);
+ frac_desired = div64_ul((u64)(lo_freq % ctx->xtal_freq) << 20,
+ ctx->xtal_freq);
/* Check CSM is not busy */
ret = max2175_poll_csm_ready(ctx);
diff --git a/drivers/media/i2c/max2175.h b/drivers/media/i2c/max2175.h
index 1ece587c153d..4c722ea3e5f1 100644
--- a/drivers/media/i2c/max2175.h
+++ b/drivers/media/i2c/max2175.h
@@ -1,5 +1,5 @@
-/* SPDX-License-Identifier: GPL-2.0
- *
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
* Maxim Integrated MAX2175 RF to Bits tuner driver
*
* This driver & most of the hard coded values are based on the reference
diff --git a/drivers/media/i2c/mt9m001.c b/drivers/media/i2c/mt9m001.c
index 5613072908ac..210ea76adb53 100644
--- a/drivers/media/i2c/mt9m001.c
+++ b/drivers/media/i2c/mt9m001.c
@@ -167,7 +167,7 @@ static int multi_reg_write(struct i2c_client *client,
static int mt9m001_init(struct i2c_client *client)
{
- const struct mt9m001_reg init_regs[] = {
+ static const struct mt9m001_reg init_regs[] = {
/*
* Issue a soft reset. This returns all registers to their
* default values.
diff --git a/drivers/media/i2c/ov2659.c b/drivers/media/i2c/ov2659.c
index f4ded0669ff9..42f64175a6df 100644
--- a/drivers/media/i2c/ov2659.c
+++ b/drivers/media/i2c/ov2659.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Omnivision OV2659 CMOS Image Sensor driver
*
@@ -5,46 +6,21 @@
*
* Benoit Parrot <bparrot@ti.com>
* Lad, Prabhakar <prabhakar.csengg@gmail.com>
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#include <linux/clk.h>
#include <linux/delay.h>
-#include <linux/err.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
+#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
-#include <linux/kernel.h>
-#include <linux/media.h>
#include <linux/module.h>
-#include <linux/of.h>
#include <linux/of_graph.h>
-#include <linux/slab.h>
-#include <linux/uaccess.h>
-#include <linux/videodev2.h>
+#include <linux/pm_runtime.h>
-#include <media/media-entity.h>
#include <media/i2c/ov2659.h>
-#include <media/v4l2-common.h>
#include <media/v4l2-ctrls.h>
-#include <media/v4l2-device.h>
#include <media/v4l2-event.h>
#include <media/v4l2-fwnode.h>
#include <media/v4l2-image-sizes.h>
-#include <media/v4l2-mediabus.h>
#include <media/v4l2-subdev.h>
#define DRIVER_NAME "ov2659"
@@ -232,6 +208,10 @@ struct ov2659 {
struct sensor_register *format_ctrl_regs;
struct ov2659_pll_ctrl pll;
int streaming;
+ /* used to control the sensor PWDN pin */
+ struct gpio_desc *pwdn_gpio;
+ /* used to control the sensor RESETB pin */
+ struct gpio_desc *resetb_gpio;
};
static const struct sensor_register ov2659_init_regs[] = {
@@ -419,10 +399,14 @@ static struct sensor_register ov2659_720p[] = {
{ REG_TIMING_YINC, 0x11 },
{ REG_TIMING_VERT_FORMAT, 0x80 },
{ REG_TIMING_HORIZ_FORMAT, 0x00 },
+ { 0x370a, 0x12 },
{ 0x3a03, 0xe8 },
{ 0x3a09, 0x6f },
{ 0x3a0b, 0x5d },
{ 0x3a15, 0x9a },
+ { REG_VFIFO_READ_START_H, 0x00 },
+ { REG_VFIFO_READ_START_L, 0x80 },
+ { REG_ISP_CTRL02, 0x00 },
{ REG_NULL, 0x00 },
};
@@ -661,7 +645,7 @@ static struct sensor_register ov2659_vga[] = {
{ REG_TIMING_HORIZ_FORMAT, 0x01 },
{ 0x370a, 0x52 },
{ REG_VFIFO_READ_START_H, 0x00 },
- { REG_VFIFO_READ_START_L, 0x80 },
+ { REG_VFIFO_READ_START_L, 0xa0 },
{ REG_ISP_CTRL02, 0x10 },
{ REG_NULL, 0x00 },
};
@@ -709,7 +693,7 @@ static struct sensor_register ov2659_qvga[] = {
{ REG_TIMING_HORIZ_FORMAT, 0x01 },
{ 0x370a, 0x52 },
{ REG_VFIFO_READ_START_H, 0x00 },
- { REG_VFIFO_READ_START_L, 0x80 },
+ { REG_VFIFO_READ_START_L, 0xa0 },
{ REG_ISP_CTRL02, 0x10 },
{ REG_NULL, 0x00 },
};
@@ -1198,14 +1182,27 @@ static int ov2659_s_stream(struct v4l2_subdev *sd, int on)
/* Stop Streaming Sequence */
ov2659_set_streaming(ov2659, 0);
ov2659->streaming = on;
+ pm_runtime_put(&client->dev);
goto unlock;
}
- ov2659_set_pixel_clock(ov2659);
- ov2659_set_frame_size(ov2659);
- ov2659_set_format(ov2659);
- ov2659_set_streaming(ov2659, 1);
- ov2659->streaming = on;
+ ret = pm_runtime_get_sync(&client->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(&client->dev);
+ goto unlock;
+ }
+
+ ret = ov2659_init(sd, 0);
+ if (!ret)
+ ret = ov2659_set_pixel_clock(ov2659);
+ if (!ret)
+ ret = ov2659_set_frame_size(ov2659);
+ if (!ret)
+ ret = ov2659_set_format(ov2659);
+ if (!ret) {
+ ov2659_set_streaming(ov2659, 1);
+ ov2659->streaming = on;
+ }
unlock:
mutex_unlock(&ov2659->lock);
@@ -1239,12 +1236,18 @@ static int ov2659_s_ctrl(struct v4l2_ctrl *ctrl)
{
struct ov2659 *ov2659 =
container_of(ctrl->handler, struct ov2659, ctrls);
+ struct i2c_client *client = ov2659->client;
+
+ /* V4L2 controls values will be applied only when power is already up */
+ if (!pm_runtime_get_if_in_use(&client->dev))
+ return 0;
switch (ctrl->id) {
case V4L2_CID_TEST_PATTERN:
return ov2659_set_test_pattern(ov2659, ctrl->val);
}
+ pm_runtime_put(&client->dev);
return 0;
}
@@ -1257,6 +1260,39 @@ static const char * const ov2659_test_pattern_menu[] = {
"Vertical Color Bars",
};
+static int ov2659_power_off(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct ov2659 *ov2659 = to_ov2659(sd);
+
+ dev_dbg(&client->dev, "%s:\n", __func__);
+
+ gpiod_set_value(ov2659->pwdn_gpio, 1);
+
+ return 0;
+}
+
+static int ov2659_power_on(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct ov2659 *ov2659 = to_ov2659(sd);
+
+ dev_dbg(&client->dev, "%s:\n", __func__);
+
+ gpiod_set_value(ov2659->pwdn_gpio, 0);
+
+ if (ov2659->resetb_gpio) {
+ gpiod_set_value(ov2659->resetb_gpio, 1);
+ usleep_range(500, 1000);
+ gpiod_set_value(ov2659->resetb_gpio, 0);
+ usleep_range(3000, 5000);
+ }
+
+ return 0;
+}
+
/* -----------------------------------------------------------------------------
* V4L2 subdev internal operations
*/
@@ -1330,13 +1366,13 @@ static int ov2659_detect(struct v4l2_subdev *sd)
unsigned short id;
id = OV265X_ID(pid, ver);
- if (id != OV2659_ID)
+ if (id != OV2659_ID) {
dev_err(&client->dev,
"Sensor detection failed (%04X, %d)\n",
id, ret);
- else {
+ ret = -ENODEV;
+ } else {
dev_info(&client->dev, "Found OV%04X sensor\n", id);
- ret = ov2659_init(sd, 0);
}
}
@@ -1413,6 +1449,18 @@ static int ov2659_probe(struct i2c_client *client)
ov2659->xvclk_frequency > 27000000)
return -EINVAL;
+ /* Optional gpio don't fail if not present */
+ ov2659->pwdn_gpio = devm_gpiod_get_optional(&client->dev, "powerdown",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(ov2659->pwdn_gpio))
+ return PTR_ERR(ov2659->pwdn_gpio);
+
+ /* Optional gpio don't fail if not present */
+ ov2659->resetb_gpio = devm_gpiod_get_optional(&client->dev, "reset",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(ov2659->resetb_gpio))
+ return PTR_ERR(ov2659->resetb_gpio);
+
v4l2_ctrl_handler_init(&ov2659->ctrls, 2);
ov2659->link_frequency =
v4l2_ctrl_new_std(&ov2659->ctrls, &ov2659_ctrl_ops,
@@ -1458,6 +1506,8 @@ static int ov2659_probe(struct i2c_client *client)
ov2659->frame_size = &ov2659_framesizes[2];
ov2659->format_ctrl_regs = ov2659_formats[0].format_ctrl_regs;
+ ov2659_power_on(&client->dev);
+
ret = ov2659_detect(sd);
if (ret < 0)
goto error;
@@ -1471,10 +1521,15 @@ static int ov2659_probe(struct i2c_client *client)
dev_info(&client->dev, "%s sensor driver registered !!\n", sd->name);
+ pm_runtime_set_active(&client->dev);
+ pm_runtime_enable(&client->dev);
+ pm_runtime_idle(&client->dev);
+
return 0;
error:
v4l2_ctrl_handler_free(&ov2659->ctrls);
+ ov2659_power_off(&client->dev);
media_entity_cleanup(&sd->entity);
mutex_destroy(&ov2659->lock);
return ret;
@@ -1490,9 +1545,18 @@ static int ov2659_remove(struct i2c_client *client)
media_entity_cleanup(&sd->entity);
mutex_destroy(&ov2659->lock);
+ pm_runtime_disable(&client->dev);
+ if (!pm_runtime_status_suspended(&client->dev))
+ ov2659_power_off(&client->dev);
+ pm_runtime_set_suspended(&client->dev);
+
return 0;
}
+static const struct dev_pm_ops ov2659_pm_ops = {
+ SET_RUNTIME_PM_OPS(ov2659_power_off, ov2659_power_on, NULL)
+};
+
static const struct i2c_device_id ov2659_id[] = {
{ "ov2659", 0 },
{ /* sentinel */ },
@@ -1510,6 +1574,7 @@ MODULE_DEVICE_TABLE(of, ov2659_of_match);
static struct i2c_driver ov2659_i2c_driver = {
.driver = {
.name = DRIVER_NAME,
+ .pm = &ov2659_pm_ops,
.of_match_table = of_match_ptr(ov2659_of_match),
},
.probe_new = ov2659_probe,
diff --git a/drivers/media/i2c/ov5640.c b/drivers/media/i2c/ov5640.c
index 500d9bbff10b..5e495c833d32 100644
--- a/drivers/media/i2c/ov5640.c
+++ b/drivers/media/i2c/ov5640.c
@@ -193,6 +193,7 @@ struct ov5640_mode_info {
struct ov5640_ctrls {
struct v4l2_ctrl_handler handler;
+ struct v4l2_ctrl *pixel_rate;
struct {
struct v4l2_ctrl *auto_exp;
struct v4l2_ctrl *exposure;
@@ -489,7 +490,6 @@ static const struct reg_value ov5640_setting_720P_1280_720[] = {
};
static const struct reg_value ov5640_setting_1080P_1920_1080[] = {
- {0x3008, 0x42, 0, 0},
{0x3c07, 0x08, 0, 0},
{0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
{0x3814, 0x11, 0, 0},
@@ -517,7 +517,7 @@ static const struct reg_value ov5640_setting_1080P_1920_1080[] = {
{0x3a0e, 0x03, 0, 0}, {0x3a0d, 0x04, 0, 0}, {0x3a14, 0x04, 0, 0},
{0x3a15, 0x60, 0, 0}, {0x4407, 0x04, 0, 0},
{0x460b, 0x37, 0, 0}, {0x460c, 0x20, 0, 0}, {0x3824, 0x04, 0, 0},
- {0x4005, 0x1a, 0, 0}, {0x3008, 0x02, 0, 0},
+ {0x4005, 0x1a, 0, 0},
};
static const struct reg_value ov5640_setting_QSXGA_2592_1944[] = {
@@ -1611,9 +1611,24 @@ ov5640_find_mode(struct ov5640_dev *sensor, enum ov5640_frame_rate fr,
!(mode->hact == 640 && mode->vact == 480))
return NULL;
+ /* 2592x1944 only works at 15fps max */
+ if ((mode->hact == 2592 && mode->vact == 1944) &&
+ fr > OV5640_15_FPS)
+ return NULL;
+
return mode;
}
+static u64 ov5640_calc_pixel_rate(struct ov5640_dev *sensor)
+{
+ u64 rate;
+
+ rate = sensor->current_mode->vtot * sensor->current_mode->htot;
+ rate *= ov5640_framerates[sensor->current_fr];
+
+ return rate;
+}
+
/*
* sensor changes between scaling and subsampling, go through
* exposure calculation
@@ -1818,8 +1833,7 @@ static int ov5640_set_mode(struct ov5640_dev *sensor)
* All the formats we support have 16 bits per pixel, seems to require
* the same rate than YUV, so we can just use 16 bpp all the time.
*/
- rate = mode->vtot * mode->htot * 16;
- rate *= ov5640_framerates[sensor->current_fr];
+ rate = ov5640_calc_pixel_rate(sensor) * 16;
if (sensor->ep.bus_type == V4L2_MBUS_CSI2_DPHY) {
rate = rate / sensor->ep.bus.mipi_csi2.num_data_lanes;
ret = ov5640_set_mipi_pclk(sensor, rate);
@@ -2233,6 +2247,8 @@ static int ov5640_set_fmt(struct v4l2_subdev *sd,
if (mbus_fmt->code != sensor->fmt.code)
sensor->pending_fmt_change = true;
+ __v4l2_ctrl_s_ctrl_int64(sensor->ctrls.pixel_rate,
+ ov5640_calc_pixel_rate(sensor));
out:
mutex_unlock(&sensor->lock);
return ret;
@@ -2657,6 +2673,11 @@ static int ov5640_init_controls(struct ov5640_dev *sensor)
/* we can use our own mutex for the ctrl lock */
hdl->lock = &sensor->lock;
+ /* Clock related controls */
+ ctrls->pixel_rate = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_PIXEL_RATE,
+ 0, INT_MAX, 1,
+ ov5640_calc_pixel_rate(sensor));
+
/* Auto/manual white balance */
ctrls->auto_wb = v4l2_ctrl_new_std(hdl, ops,
V4L2_CID_AUTO_WHITE_BALANCE,
@@ -2704,6 +2725,7 @@ static int ov5640_init_controls(struct ov5640_dev *sensor)
goto free_ctrls;
}
+ ctrls->pixel_rate->flags |= V4L2_CTRL_FLAG_READ_ONLY;
ctrls->gain->flags |= V4L2_CTRL_FLAG_VOLATILE;
ctrls->exposure->flags |= V4L2_CTRL_FLAG_VOLATILE;
@@ -2816,6 +2838,9 @@ static int ov5640_s_frame_interval(struct v4l2_subdev *sd,
sensor->frame_interval = fi->interval;
sensor->current_mode = mode;
sensor->pending_mode_change = true;
+
+ __v4l2_ctrl_s_ctrl_int64(sensor->ctrls.pixel_rate,
+ ov5640_calc_pixel_rate(sensor));
}
out:
mutex_unlock(&sensor->lock);
diff --git a/drivers/media/i2c/ov5695.c b/drivers/media/i2c/ov5695.c
index 34b7046d9702..d6cd15bb699a 100644
--- a/drivers/media/i2c/ov5695.c
+++ b/drivers/media/i2c/ov5695.c
@@ -1325,7 +1325,7 @@ static int ov5695_probe(struct i2c_client *client,
goto err_power_off;
#endif
- ret = v4l2_async_register_subdev(sd);
+ ret = v4l2_async_register_subdev_sensor_common(sd);
if (ret) {
dev_err(dev, "v4l2 async register subdev failed\n");
goto err_clean_entity;
diff --git a/drivers/media/i2c/ov6650.c b/drivers/media/i2c/ov6650.c
index 5b9af5e5b7f1..91906b94f978 100644
--- a/drivers/media/i2c/ov6650.c
+++ b/drivers/media/i2c/ov6650.c
@@ -124,12 +124,13 @@
#define DEF_AECH 0x4D
-#define CLKRC_6MHz 0x00
+#define CLKRC_8MHz 0x00
#define CLKRC_12MHz 0x40
#define CLKRC_16MHz 0x80
#define CLKRC_24MHz 0xc0
#define CLKRC_DIV_MASK 0x3f
#define GET_CLKRC_DIV(x) (((x) & CLKRC_DIV_MASK) + 1)
+#define DEF_CLKRC 0x00
#define COMA_RESET BIT(7)
#define COMA_QCIF BIT(5)
@@ -196,13 +197,33 @@ struct ov6650 {
struct v4l2_clk *clk;
bool half_scale; /* scale down output by 2 */
struct v4l2_rect rect; /* sensor cropping window */
- unsigned long pclk_limit; /* from host */
- unsigned long pclk_max; /* from resolution and format */
struct v4l2_fract tpf; /* as requested with s_frame_interval */
u32 code;
- enum v4l2_colorspace colorspace;
};
+struct ov6650_xclk {
+ unsigned long rate;
+ u8 clkrc;
+};
+
+static const struct ov6650_xclk ov6650_xclk[] = {
+{
+ .rate = 8000000,
+ .clkrc = CLKRC_8MHz,
+},
+{
+ .rate = 12000000,
+ .clkrc = CLKRC_12MHz,
+},
+{
+ .rate = 16000000,
+ .clkrc = CLKRC_16MHz,
+},
+{
+ .rate = 24000000,
+ .clkrc = CLKRC_24MHz,
+},
+};
static u32 ov6650_codes[] = {
MEDIA_BUS_FMT_YUYV8_2X8,
@@ -213,6 +234,17 @@ static u32 ov6650_codes[] = {
MEDIA_BUS_FMT_Y8_1X8,
};
+static const struct v4l2_mbus_framefmt ov6650_def_fmt = {
+ .width = W_CIF,
+ .height = H_CIF,
+ .code = MEDIA_BUS_FMT_SBGGR8_1X8,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .field = V4L2_FIELD_NONE,
+ .ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT,
+ .quantization = V4L2_QUANTIZATION_DEFAULT,
+ .xfer_func = V4L2_XFER_FUNC_DEFAULT,
+};
+
/* read a register */
static int ov6650_reg_read(struct i2c_client *client, u8 reg, u8 *val)
{
@@ -465,38 +497,39 @@ static int ov6650_set_selection(struct v4l2_subdev *sd,
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct ov6650 *priv = to_ov6650(client);
- struct v4l2_rect rect = sel->r;
int ret;
if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE ||
sel->target != V4L2_SEL_TGT_CROP)
return -EINVAL;
- v4l_bound_align_image(&rect.width, 2, W_CIF, 1,
- &rect.height, 2, H_CIF, 1, 0);
- v4l_bound_align_image(&rect.left, DEF_HSTRT << 1,
- (DEF_HSTRT << 1) + W_CIF - (__s32)rect.width, 1,
- &rect.top, DEF_VSTRT << 1,
- (DEF_VSTRT << 1) + H_CIF - (__s32)rect.height, 1,
- 0);
+ v4l_bound_align_image(&sel->r.width, 2, W_CIF, 1,
+ &sel->r.height, 2, H_CIF, 1, 0);
+ v4l_bound_align_image(&sel->r.left, DEF_HSTRT << 1,
+ (DEF_HSTRT << 1) + W_CIF - (__s32)sel->r.width, 1,
+ &sel->r.top, DEF_VSTRT << 1,
+ (DEF_VSTRT << 1) + H_CIF - (__s32)sel->r.height,
+ 1, 0);
- ret = ov6650_reg_write(client, REG_HSTRT, rect.left >> 1);
+ ret = ov6650_reg_write(client, REG_HSTRT, sel->r.left >> 1);
if (!ret) {
- priv->rect.left = rect.left;
+ priv->rect.width += priv->rect.left - sel->r.left;
+ priv->rect.left = sel->r.left;
ret = ov6650_reg_write(client, REG_HSTOP,
- (rect.left + rect.width) >> 1);
+ (sel->r.left + sel->r.width) >> 1);
}
if (!ret) {
- priv->rect.width = rect.width;
- ret = ov6650_reg_write(client, REG_VSTRT, rect.top >> 1);
+ priv->rect.width = sel->r.width;
+ ret = ov6650_reg_write(client, REG_VSTRT, sel->r.top >> 1);
}
if (!ret) {
- priv->rect.top = rect.top;
+ priv->rect.height += priv->rect.top - sel->r.top;
+ priv->rect.top = sel->r.top;
ret = ov6650_reg_write(client, REG_VSTOP,
- (rect.top + rect.height) >> 1);
+ (sel->r.top + sel->r.height) >> 1);
}
if (!ret)
- priv->rect.height = rect.height;
+ priv->rect.height = sel->r.height;
return ret;
}
@@ -512,12 +545,20 @@ static int ov6650_get_fmt(struct v4l2_subdev *sd,
if (format->pad)
return -EINVAL;
- mf->width = priv->rect.width >> priv->half_scale;
- mf->height = priv->rect.height >> priv->half_scale;
- mf->code = priv->code;
- mf->colorspace = priv->colorspace;
- mf->field = V4L2_FIELD_NONE;
+ /* initialize response with default media bus frame format */
+ *mf = ov6650_def_fmt;
+
+ /* update media bus format code and frame size */
+ if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
+ mf->width = cfg->try_fmt.width;
+ mf->height = cfg->try_fmt.height;
+ mf->code = cfg->try_fmt.code;
+ } else {
+ mf->width = priv->rect.width >> priv->half_scale;
+ mf->height = priv->rect.height >> priv->half_scale;
+ mf->code = priv->code;
+ }
return 0;
}
@@ -526,22 +567,7 @@ static bool is_unscaled_ok(int width, int height, struct v4l2_rect *rect)
return width > rect->width >> 1 || height > rect->height >> 1;
}
-static u8 to_clkrc(struct v4l2_fract *timeperframe,
- unsigned long pclk_limit, unsigned long pclk_max)
-{
- unsigned long pclk;
-
- if (timeperframe->numerator && timeperframe->denominator)
- pclk = pclk_max * timeperframe->denominator /
- (FRAME_RATE_MAX * timeperframe->numerator);
- else
- pclk = pclk_max;
-
- if (pclk_limit && pclk_limit < pclk)
- pclk = pclk_limit;
-
- return (pclk_max - 1) / pclk;
-}
+#define to_clkrc(div) ((div) - 1)
/* set the format we will capture in */
static int ov6650_s_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf)
@@ -560,8 +586,7 @@ static int ov6650_s_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf)
.r.height = mf->height << half_scale,
};
u32 code = mf->code;
- unsigned long mclk, pclk;
- u8 coma_set = 0, coma_mask = 0, coml_set, coml_mask, clkrc;
+ u8 coma_set = 0, coma_mask = 0, coml_set, coml_mask;
int ret;
/* select color matrix configuration for given color encoding */
@@ -610,58 +635,35 @@ static int ov6650_s_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf)
dev_err(&client->dev, "Pixel format not handled: 0x%x\n", code);
return -EINVAL;
}
- priv->code = code;
if (code == MEDIA_BUS_FMT_Y8_1X8 ||
code == MEDIA_BUS_FMT_SBGGR8_1X8) {
coml_mask = COML_ONE_CHANNEL;
coml_set = 0;
- priv->pclk_max = 4000000;
} else {
coml_mask = 0;
coml_set = COML_ONE_CHANNEL;
- priv->pclk_max = 8000000;
}
- if (code == MEDIA_BUS_FMT_SBGGR8_1X8)
- priv->colorspace = V4L2_COLORSPACE_SRGB;
- else if (code != 0)
- priv->colorspace = V4L2_COLORSPACE_JPEG;
-
if (half_scale) {
dev_dbg(&client->dev, "max resolution: QCIF\n");
coma_set |= COMA_QCIF;
- priv->pclk_max /= 2;
} else {
dev_dbg(&client->dev, "max resolution: CIF\n");
coma_mask |= COMA_QCIF;
}
- priv->half_scale = half_scale;
-
- clkrc = CLKRC_12MHz;
- mclk = 12000000;
- priv->pclk_limit = 1334000;
- dev_dbg(&client->dev, "using 12MHz input clock\n");
-
- clkrc |= to_clkrc(&priv->tpf, priv->pclk_limit, priv->pclk_max);
-
- pclk = priv->pclk_max / GET_CLKRC_DIV(clkrc);
- dev_dbg(&client->dev, "pixel clock divider: %ld.%ld\n",
- mclk / pclk, 10 * mclk % pclk / pclk);
ret = ov6650_set_selection(sd, NULL, &sel);
if (!ret)
ret = ov6650_reg_rmw(client, REG_COMA, coma_set, coma_mask);
- if (!ret)
- ret = ov6650_reg_write(client, REG_CLKRC, clkrc);
- if (!ret)
- ret = ov6650_reg_rmw(client, REG_COML, coml_set, coml_mask);
-
if (!ret) {
- mf->colorspace = priv->colorspace;
- mf->width = priv->rect.width >> half_scale;
- mf->height = priv->rect.height >> half_scale;
+ priv->half_scale = half_scale;
+
+ ret = ov6650_reg_rmw(client, REG_COML, coml_set, coml_mask);
}
+ if (!ret)
+ priv->code = code;
+
return ret;
}
@@ -680,8 +682,6 @@ static int ov6650_set_fmt(struct v4l2_subdev *sd,
v4l_bound_align_image(&mf->width, 2, W_CIF, 1,
&mf->height, 2, H_CIF, 1, 0);
- mf->field = V4L2_FIELD_NONE;
-
switch (mf->code) {
case MEDIA_BUS_FMT_Y10_1X10:
mf->code = MEDIA_BUS_FMT_Y8_1X8;
@@ -691,20 +691,39 @@ static int ov6650_set_fmt(struct v4l2_subdev *sd,
case MEDIA_BUS_FMT_YUYV8_2X8:
case MEDIA_BUS_FMT_VYUY8_2X8:
case MEDIA_BUS_FMT_UYVY8_2X8:
- mf->colorspace = V4L2_COLORSPACE_JPEG;
break;
default:
mf->code = MEDIA_BUS_FMT_SBGGR8_1X8;
/* fall through */
case MEDIA_BUS_FMT_SBGGR8_1X8:
- mf->colorspace = V4L2_COLORSPACE_SRGB;
break;
}
- if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE)
- return ov6650_s_fmt(sd, mf);
- cfg->try_fmt = *mf;
+ if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
+ /* store media bus format code and frame size in pad config */
+ cfg->try_fmt.width = mf->width;
+ cfg->try_fmt.height = mf->height;
+ cfg->try_fmt.code = mf->code;
+ /* return default mbus frame format updated with pad config */
+ *mf = ov6650_def_fmt;
+ mf->width = cfg->try_fmt.width;
+ mf->height = cfg->try_fmt.height;
+ mf->code = cfg->try_fmt.code;
+
+ } else {
+ /* apply new media bus format code and frame size */
+ int ret = ov6650_s_fmt(sd, mf);
+
+ if (ret)
+ return ret;
+
+ /* return default format updated with active size and code */
+ *mf = ov6650_def_fmt;
+ mf->width = priv->rect.width >> priv->half_scale;
+ mf->height = priv->rect.height >> priv->half_scale;
+ mf->code = priv->code;
+ }
return 0;
}
@@ -725,9 +744,7 @@ static int ov6650_g_frame_interval(struct v4l2_subdev *sd,
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct ov6650 *priv = to_ov6650(client);
- ival->interval.numerator = GET_CLKRC_DIV(to_clkrc(&priv->tpf,
- priv->pclk_limit, priv->pclk_max));
- ival->interval.denominator = FRAME_RATE_MAX;
+ ival->interval = priv->tpf;
dev_dbg(&client->dev, "Frame interval: %u/%u s\n",
ival->interval.numerator, ival->interval.denominator);
@@ -742,7 +759,6 @@ static int ov6650_s_frame_interval(struct v4l2_subdev *sd,
struct ov6650 *priv = to_ov6650(client);
struct v4l2_fract *tpf = &ival->interval;
int div, ret;
- u8 clkrc;
if (tpf->numerator == 0 || tpf->denominator == 0)
div = 1; /* Reset to full rate */
@@ -754,19 +770,12 @@ static int ov6650_s_frame_interval(struct v4l2_subdev *sd,
else if (div > GET_CLKRC_DIV(CLKRC_DIV_MASK))
div = GET_CLKRC_DIV(CLKRC_DIV_MASK);
- /*
- * Keep result to be used as tpf limit
- * for subsequent clock divider calculations
- */
- priv->tpf.numerator = div;
- priv->tpf.denominator = FRAME_RATE_MAX;
-
- clkrc = to_clkrc(&priv->tpf, priv->pclk_limit, priv->pclk_max);
-
- ret = ov6650_reg_rmw(client, REG_CLKRC, clkrc, CLKRC_DIV_MASK);
+ ret = ov6650_reg_rmw(client, REG_CLKRC, to_clkrc(div), CLKRC_DIV_MASK);
if (!ret) {
- tpf->numerator = GET_CLKRC_DIV(clkrc);
- tpf->denominator = FRAME_RATE_MAX;
+ priv->tpf.numerator = div;
+ priv->tpf.denominator = FRAME_RATE_MAX;
+
+ *tpf = priv->tpf;
}
return ret;
@@ -788,7 +797,7 @@ static int ov6650_reset(struct i2c_client *client)
}
/* program default register values */
-static int ov6650_prog_dflt(struct i2c_client *client)
+static int ov6650_prog_dflt(struct i2c_client *client, u8 clkrc)
{
int ret;
@@ -796,6 +805,8 @@ static int ov6650_prog_dflt(struct i2c_client *client)
ret = ov6650_reg_write(client, REG_COMA, 0); /* ~COMA_RESET */
if (!ret)
+ ret = ov6650_reg_write(client, REG_CLKRC, clkrc);
+ if (!ret)
ret = ov6650_reg_rmw(client, REG_COMB, 0, COMB_BAND_FILTER);
return ret;
@@ -805,8 +816,10 @@ static int ov6650_video_probe(struct v4l2_subdev *sd)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct ov6650 *priv = to_ov6650(client);
- u8 pidh, pidl, midh, midl;
- int ret;
+ const struct ov6650_xclk *xclk = NULL;
+ unsigned long rate;
+ u8 pidh, pidl, midh, midl;
+ int i, ret = 0;
priv->clk = v4l2_clk_get(&client->dev, NULL);
if (IS_ERR(priv->clk)) {
@@ -815,6 +828,33 @@ static int ov6650_video_probe(struct v4l2_subdev *sd)
return ret;
}
+ rate = v4l2_clk_get_rate(priv->clk);
+ for (i = 0; rate && i < ARRAY_SIZE(ov6650_xclk); i++) {
+ if (rate != ov6650_xclk[i].rate)
+ continue;
+
+ xclk = &ov6650_xclk[i];
+ dev_info(&client->dev, "using host default clock rate %lukHz\n",
+ rate / 1000);
+ break;
+ }
+ for (i = 0; !xclk && i < ARRAY_SIZE(ov6650_xclk); i++) {
+ ret = v4l2_clk_set_rate(priv->clk, ov6650_xclk[i].rate);
+ if (ret || v4l2_clk_get_rate(priv->clk) != ov6650_xclk[i].rate)
+ continue;
+
+ xclk = &ov6650_xclk[i];
+ dev_info(&client->dev, "using negotiated clock rate %lukHz\n",
+ xclk->rate / 1000);
+ break;
+ }
+ if (!xclk) {
+ dev_err(&client->dev, "unable to get supported clock rate\n");
+ if (!ret)
+ ret = -EINVAL;
+ goto eclkput;
+ }
+
ret = ov6650_s_power(sd, 1);
if (ret < 0)
goto eclkput;
@@ -848,7 +888,12 @@ static int ov6650_video_probe(struct v4l2_subdev *sd)
ret = ov6650_reset(client);
if (!ret)
- ret = ov6650_prog_dflt(client);
+ ret = ov6650_prog_dflt(client, xclk->clkrc);
+ if (!ret) {
+ struct v4l2_mbus_framefmt mf = ov6650_def_fmt;
+
+ ret = ov6650_s_fmt(sd, &mf);
+ }
if (!ret)
ret = v4l2_ctrl_handler_setup(&priv->hdl);
@@ -989,8 +1034,10 @@ static int ov6650_probe(struct i2c_client *client,
V4L2_CID_GAMMA, 0, 0xff, 1, 0x12);
priv->subdev.ctrl_handler = &priv->hdl;
- if (priv->hdl.error)
- return priv->hdl.error;
+ if (priv->hdl.error) {
+ ret = priv->hdl.error;
+ goto ectlhdlfree;
+ }
v4l2_ctrl_auto_cluster(2, &priv->autogain, 0, true);
v4l2_ctrl_auto_cluster(3, &priv->autowb, 0, true);
@@ -1001,15 +1048,18 @@ static int ov6650_probe(struct i2c_client *client,
priv->rect.top = DEF_VSTRT << 1;
priv->rect.width = W_CIF;
priv->rect.height = H_CIF;
- priv->half_scale = false;
- priv->code = MEDIA_BUS_FMT_YUYV8_2X8;
- priv->colorspace = V4L2_COLORSPACE_JPEG;
+
+ /* Hardware default frame interval */
+ priv->tpf.numerator = GET_CLKRC_DIV(DEF_CLKRC);
+ priv->tpf.denominator = FRAME_RATE_MAX;
priv->subdev.internal_ops = &ov6650_internal_ops;
ret = v4l2_async_register_subdev(&priv->subdev);
- if (ret)
- v4l2_ctrl_handler_free(&priv->hdl);
+ if (!ret)
+ return 0;
+ectlhdlfree:
+ v4l2_ctrl_handler_free(&priv->hdl);
return ret;
}
@@ -1041,6 +1091,6 @@ static struct i2c_driver ov6650_i2c_driver = {
module_i2c_driver(ov6650_i2c_driver);
-MODULE_DESCRIPTION("SoC Camera driver for OmniVision OV6650");
-MODULE_AUTHOR("Janusz Krzysztofik <jkrzyszt@tis.icnet.pl>");
+MODULE_DESCRIPTION("V4L2 subdevice driver for OmniVision OV6650 camera sensor");
+MODULE_AUTHOR("Janusz Krzysztofik <jmkrzyszt@gmail.com");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/i2c/saa711x_regs.h b/drivers/media/i2c/saa711x_regs.h
index 44fabe08234d..4b5f6985710b 100644
--- a/drivers/media/i2c/saa711x_regs.h
+++ b/drivers/media/i2c/saa711x_regs.h
@@ -1,5 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
- * SPDX-License-Identifier: GPL-2.0+
* saa711x - Philips SAA711x video decoder register specifications
*
* Copyright (c) 2006 Mauro Carvalho Chehab <mchehab@kernel.org>
diff --git a/drivers/media/i2c/smiapp/smiapp-core.c b/drivers/media/i2c/smiapp/smiapp-core.c
index 9adf8e034e7d..84f9771b5fed 100644
--- a/drivers/media/i2c/smiapp/smiapp-core.c
+++ b/drivers/media/i2c/smiapp/smiapp-core.c
@@ -682,66 +682,6 @@ static int smiapp_get_all_limits(struct smiapp_sensor *sensor)
return 0;
}
-static int smiapp_get_limits_binning(struct smiapp_sensor *sensor)
-{
- struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
- static u32 const limits[] = {
- SMIAPP_LIMIT_MIN_FRAME_LENGTH_LINES_BIN,
- SMIAPP_LIMIT_MAX_FRAME_LENGTH_LINES_BIN,
- SMIAPP_LIMIT_MIN_LINE_LENGTH_PCK_BIN,
- SMIAPP_LIMIT_MAX_LINE_LENGTH_PCK_BIN,
- SMIAPP_LIMIT_MIN_LINE_BLANKING_PCK_BIN,
- SMIAPP_LIMIT_FINE_INTEGRATION_TIME_MIN_BIN,
- SMIAPP_LIMIT_FINE_INTEGRATION_TIME_MAX_MARGIN_BIN,
- };
- static u32 const limits_replace[] = {
- SMIAPP_LIMIT_MIN_FRAME_LENGTH_LINES,
- SMIAPP_LIMIT_MAX_FRAME_LENGTH_LINES,
- SMIAPP_LIMIT_MIN_LINE_LENGTH_PCK,
- SMIAPP_LIMIT_MAX_LINE_LENGTH_PCK,
- SMIAPP_LIMIT_MIN_LINE_BLANKING_PCK,
- SMIAPP_LIMIT_FINE_INTEGRATION_TIME_MIN,
- SMIAPP_LIMIT_FINE_INTEGRATION_TIME_MAX_MARGIN,
- };
- unsigned int i;
- int rval;
-
- if (sensor->limits[SMIAPP_LIMIT_BINNING_CAPABILITY] ==
- SMIAPP_BINNING_CAPABILITY_NO) {
- for (i = 0; i < ARRAY_SIZE(limits); i++)
- sensor->limits[limits[i]] =
- sensor->limits[limits_replace[i]];
-
- return 0;
- }
-
- rval = smiapp_get_limits(sensor, limits, ARRAY_SIZE(limits));
- if (rval < 0)
- return rval;
-
- /*
- * Sanity check whether the binning limits are valid. If not,
- * use the non-binning ones.
- */
- if (sensor->limits[SMIAPP_LIMIT_MIN_FRAME_LENGTH_LINES_BIN]
- && sensor->limits[SMIAPP_LIMIT_MIN_LINE_LENGTH_PCK_BIN]
- && sensor->limits[SMIAPP_LIMIT_MIN_LINE_BLANKING_PCK_BIN])
- return 0;
-
- for (i = 0; i < ARRAY_SIZE(limits); i++) {
- dev_dbg(&client->dev,
- "replace limit 0x%8.8x \"%s\" = %d, 0x%x\n",
- smiapp_reg_limits[limits[i]].addr,
- smiapp_reg_limits[limits[i]].what,
- sensor->limits[limits_replace[i]],
- sensor->limits[limits_replace[i]]);
- sensor->limits[limits[i]] =
- sensor->limits[limits_replace[i]];
- }
-
- return 0;
-}
-
static int smiapp_get_mbus_formats(struct smiapp_sensor *sensor)
{
struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
@@ -891,60 +831,47 @@ static void smiapp_update_blanking(struct smiapp_sensor *sensor)
{
struct v4l2_ctrl *vblank = sensor->vblank;
struct v4l2_ctrl *hblank = sensor->hblank;
+ uint16_t min_fll, max_fll, min_llp, max_llp, min_lbp;
int min, max;
+ if (sensor->binning_vertical > 1 || sensor->binning_horizontal > 1) {
+ min_fll = sensor->limits[SMIAPP_LIMIT_MIN_FRAME_LENGTH_LINES_BIN];
+ max_fll = sensor->limits[SMIAPP_LIMIT_MAX_FRAME_LENGTH_LINES_BIN];
+ min_llp = sensor->limits[SMIAPP_LIMIT_MIN_LINE_LENGTH_PCK_BIN];
+ max_llp = sensor->limits[SMIAPP_LIMIT_MAX_LINE_LENGTH_PCK_BIN];
+ min_lbp = sensor->limits[SMIAPP_LIMIT_MIN_LINE_BLANKING_PCK_BIN];
+ } else {
+ min_fll = sensor->limits[SMIAPP_LIMIT_MIN_FRAME_LENGTH_LINES];
+ max_fll = sensor->limits[SMIAPP_LIMIT_MAX_FRAME_LENGTH_LINES];
+ min_llp = sensor->limits[SMIAPP_LIMIT_MIN_LINE_LENGTH_PCK];
+ max_llp = sensor->limits[SMIAPP_LIMIT_MAX_LINE_LENGTH_PCK];
+ min_lbp = sensor->limits[SMIAPP_LIMIT_MIN_LINE_BLANKING_PCK];
+ }
+
min = max_t(int,
sensor->limits[SMIAPP_LIMIT_MIN_FRAME_BLANKING_LINES],
- sensor->limits[SMIAPP_LIMIT_MIN_FRAME_LENGTH_LINES_BIN] -
+ min_fll -
sensor->pixel_array->crop[SMIAPP_PA_PAD_SRC].height);
- max = sensor->limits[SMIAPP_LIMIT_MAX_FRAME_LENGTH_LINES_BIN] -
- sensor->pixel_array->crop[SMIAPP_PA_PAD_SRC].height;
+ max = max_fll - sensor->pixel_array->crop[SMIAPP_PA_PAD_SRC].height;
__v4l2_ctrl_modify_range(vblank, min, max, vblank->step, min);
min = max_t(int,
- sensor->limits[SMIAPP_LIMIT_MIN_LINE_LENGTH_PCK_BIN] -
+ min_llp -
sensor->pixel_array->crop[SMIAPP_PA_PAD_SRC].width,
- sensor->limits[SMIAPP_LIMIT_MIN_LINE_BLANKING_PCK_BIN]);
- max = sensor->limits[SMIAPP_LIMIT_MAX_LINE_LENGTH_PCK_BIN] -
- sensor->pixel_array->crop[SMIAPP_PA_PAD_SRC].width;
+ min_lbp);
+ max = max_llp - sensor->pixel_array->crop[SMIAPP_PA_PAD_SRC].width;
__v4l2_ctrl_modify_range(hblank, min, max, hblank->step, min);
__smiapp_update_exposure_limits(sensor);
}
-static int smiapp_update_mode(struct smiapp_sensor *sensor)
+static int smiapp_pll_blanking_update(struct smiapp_sensor *sensor)
{
struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
- unsigned int binning_mode;
int rval;
- /* Binning has to be set up here; it affects limits */
- if (sensor->binning_horizontal == 1 &&
- sensor->binning_vertical == 1) {
- binning_mode = 0;
- } else {
- u8 binning_type =
- (sensor->binning_horizontal << 4)
- | sensor->binning_vertical;
-
- rval = smiapp_write(
- sensor, SMIAPP_REG_U8_BINNING_TYPE, binning_type);
- if (rval < 0)
- return rval;
-
- binning_mode = 1;
- }
- rval = smiapp_write(sensor, SMIAPP_REG_U8_BINNING_MODE, binning_mode);
- if (rval < 0)
- return rval;
-
- /* Get updated limits due to binning */
- rval = smiapp_get_limits_binning(sensor);
- if (rval < 0)
- return rval;
-
rval = smiapp_pll_update(sensor);
if (rval < 0)
return rval;
@@ -970,62 +897,91 @@ static int smiapp_update_mode(struct smiapp_sensor *sensor)
* SMIA++ NVM handling
*
*/
-static int smiapp_read_nvm(struct smiapp_sensor *sensor,
- unsigned char *nvm)
+
+static int smiapp_read_nvm_page(struct smiapp_sensor *sensor, u32 p, u8 *nvm,
+ u8 *status)
{
- u32 i, s, p, np, v;
- int rval = 0, rval2;
+ unsigned int i;
+ int rval;
+ u32 s;
- np = sensor->nvm_size / SMIAPP_NVM_PAGE_SIZE;
- for (p = 0; p < np; p++) {
- rval = smiapp_write(
- sensor,
- SMIAPP_REG_U8_DATA_TRANSFER_IF_1_PAGE_SELECT, p);
- if (rval)
- goto out;
+ *status = 0;
- rval = smiapp_write(sensor,
- SMIAPP_REG_U8_DATA_TRANSFER_IF_1_CTRL,
- SMIAPP_DATA_TRANSFER_IF_1_CTRL_EN |
- SMIAPP_DATA_TRANSFER_IF_1_CTRL_RD_EN);
- if (rval)
- goto out;
+ rval = smiapp_write(sensor,
+ SMIAPP_REG_U8_DATA_TRANSFER_IF_1_PAGE_SELECT, p);
+ if (rval)
+ return rval;
- for (i = 1000; i > 0; i--) {
- rval = smiapp_read(
- sensor,
- SMIAPP_REG_U8_DATA_TRANSFER_IF_1_STATUS, &s);
+ rval = smiapp_write(sensor, SMIAPP_REG_U8_DATA_TRANSFER_IF_1_CTRL,
+ SMIAPP_DATA_TRANSFER_IF_1_CTRL_EN);
+ if (rval)
+ return rval;
- if (rval)
- goto out;
+ rval = smiapp_read(sensor, SMIAPP_REG_U8_DATA_TRANSFER_IF_1_STATUS,
+ &s);
+ if (rval)
+ return rval;
+
+ if (s & SMIAPP_DATA_TRANSFER_IF_1_STATUS_EUSAGE) {
+ *status = s;
+ return -ENODATA;
+ }
+ if (sensor->limits[SMIAPP_LIMIT_DATA_TRANSFER_IF_CAPABILITY] &
+ SMIAPP_DATA_TRANSFER_IF_CAPABILITY_POLL) {
+ for (i = 1000; i > 0; i--) {
if (s & SMIAPP_DATA_TRANSFER_IF_1_STATUS_RD_READY)
break;
- }
- if (!i) {
- rval = -ETIMEDOUT;
- goto out;
- }
-
- for (i = 0; i < SMIAPP_NVM_PAGE_SIZE; i++) {
rval = smiapp_read(
sensor,
- SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_0 + i,
- &v);
- if (rval)
- goto out;
+ SMIAPP_REG_U8_DATA_TRANSFER_IF_1_STATUS,
+ &s);
- *nvm++ = v;
+ if (rval)
+ return rval;
}
+
+ if (!i)
+ return -ETIMEDOUT;
}
-out:
+ for (i = 0; i < SMIAPP_NVM_PAGE_SIZE; i++) {
+ u32 v;
+
+ rval = smiapp_read(sensor,
+ SMIAPP_REG_U8_DATA_TRANSFER_IF_1_DATA_0 + i,
+ &v);
+ if (rval)
+ return rval;
+
+ *nvm++ = v;
+ }
+
+ return 0;
+}
+
+static int smiapp_read_nvm(struct smiapp_sensor *sensor, unsigned char *nvm,
+ size_t nvm_size)
+{
+ u8 status = 0;
+ u32 p;
+ int rval = 0, rval2;
+
+ for (p = 0; p < nvm_size / SMIAPP_NVM_PAGE_SIZE && !rval; p++) {
+ rval = smiapp_read_nvm_page(sensor, p, nvm, &status);
+ nvm += SMIAPP_NVM_PAGE_SIZE;
+ }
+
+ if (rval == -ENODATA &&
+ status & SMIAPP_DATA_TRANSFER_IF_1_STATUS_EUSAGE)
+ rval = 0;
+
rval2 = smiapp_write(sensor, SMIAPP_REG_U8_DATA_TRANSFER_IF_1_CTRL, 0);
if (rval < 0)
return rval;
else
- return rval2;
+ return rval2 ?: p * SMIAPP_NVM_PAGE_SIZE;
}
/*
@@ -1324,10 +1280,6 @@ static int smiapp_power_on(struct device *dev)
rval = __v4l2_ctrl_handler_setup(&sensor->src->ctrl_handler);
if (rval)
goto out_cci_addr_fail;
-
- rval = smiapp_update_mode(sensor);
- if (rval < 0)
- goto out_cci_addr_fail;
}
mutex_unlock(&sensor->mutex);
@@ -1387,6 +1339,7 @@ static int smiapp_power_off(struct device *dev)
static int smiapp_start_streaming(struct smiapp_sensor *sensor)
{
struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
+ unsigned int binning_mode;
int rval;
mutex_lock(&sensor->mutex);
@@ -1397,6 +1350,27 @@ static int smiapp_start_streaming(struct smiapp_sensor *sensor)
if (rval)
goto out;
+ /* Binning configuration */
+ if (sensor->binning_horizontal == 1 &&
+ sensor->binning_vertical == 1) {
+ binning_mode = 0;
+ } else {
+ u8 binning_type =
+ (sensor->binning_horizontal << 4)
+ | sensor->binning_vertical;
+
+ rval = smiapp_write(
+ sensor, SMIAPP_REG_U8_BINNING_TYPE, binning_type);
+ if (rval < 0)
+ goto out;
+
+ binning_mode = 1;
+ }
+ rval = smiapp_write(sensor, SMIAPP_REG_U8_BINNING_MODE, binning_mode);
+ if (rval < 0)
+ goto out;
+
+ /* Set up PLL */
rval = smiapp_pll_configure(sensor);
if (rval)
goto out;
@@ -2073,7 +2047,7 @@ static int smiapp_set_compose(struct v4l2_subdev *subdev,
smiapp_propagate(subdev, cfg, sel->which, V4L2_SEL_TGT_COMPOSE);
if (sel->which == V4L2_SUBDEV_FORMAT_ACTIVE)
- return smiapp_update_mode(sensor);
+ return smiapp_pll_blanking_update(sensor);
return 0;
}
@@ -2312,41 +2286,34 @@ smiapp_sysfs_nvm_read(struct device *dev, struct device_attribute *attr,
struct v4l2_subdev *subdev = i2c_get_clientdata(to_i2c_client(dev));
struct i2c_client *client = v4l2_get_subdevdata(subdev);
struct smiapp_sensor *sensor = to_smiapp_sensor(subdev);
- unsigned int nbytes;
+ int rval;
if (!sensor->dev_init_done)
return -EBUSY;
- if (!sensor->nvm_size) {
- int rval;
-
- /* NVM not read yet - read it now */
- sensor->nvm_size = sensor->hwcfg->nvm_size;
+ rval = pm_runtime_get_sync(&client->dev);
+ if (rval < 0) {
+ if (rval != -EBUSY && rval != -EAGAIN)
+ pm_runtime_set_active(&client->dev);
+ pm_runtime_put_noidle(&client->dev);
+ return -ENODEV;
+ }
- rval = pm_runtime_get_sync(&client->dev);
- if (rval < 0) {
- if (rval != -EBUSY && rval != -EAGAIN)
- pm_runtime_set_active(&client->dev);
- pm_runtime_put(&client->dev);
- return -ENODEV;
- }
+ rval = smiapp_read_nvm(sensor, buf, PAGE_SIZE);
+ if (rval < 0) {
+ pm_runtime_put(&client->dev);
+ dev_err(&client->dev, "nvm read failed\n");
+ return -ENODEV;
+ }
- if (smiapp_read_nvm(sensor, sensor->nvm)) {
- dev_err(&client->dev, "nvm read failed\n");
- return -ENODEV;
- }
+ pm_runtime_mark_last_busy(&client->dev);
+ pm_runtime_put_autosuspend(&client->dev);
- pm_runtime_mark_last_busy(&client->dev);
- pm_runtime_put_autosuspend(&client->dev);
- }
/*
* NVM is still way below a PAGE_SIZE, so we can safely
* assume this for now.
*/
- nbytes = min_t(unsigned int, sensor->nvm_size, PAGE_SIZE);
- memcpy(buf, sensor->nvm, nbytes);
-
- return nbytes;
+ return rval;
}
static DEVICE_ATTR(nvm, S_IRUGO, smiapp_sysfs_nvm_read, NULL);
@@ -2810,16 +2777,13 @@ static struct smiapp_hwconfig *smiapp_get_hwconfig(struct device *dev)
}
}
- /* NVM size is not mandatory */
- fwnode_property_read_u32(fwnode, "nokia,nvm-size", &hwcfg->nvm_size);
-
rval = fwnode_property_read_u32(dev_fwnode(dev), "clock-frequency",
&hwcfg->ext_clk);
if (rval)
dev_info(dev, "can't get clock-frequency\n");
- dev_dbg(dev, "nvm %d, clk %d, mode %d\n",
- hwcfg->nvm_size, hwcfg->ext_clk, hwcfg->csi_signalling_mode);
+ dev_dbg(dev, "clk %d, mode %d\n", hwcfg->ext_clk,
+ hwcfg->csi_signalling_mode);
if (!bus_cfg.nr_of_link_frequencies) {
dev_warn(dev, "no link frequencies defined\n");
@@ -2862,7 +2826,6 @@ static int smiapp_probe(struct i2c_client *client)
return -ENOMEM;
sensor->hwcfg = hwcfg;
- mutex_init(&sensor->mutex);
sensor->src = &sensor->ssds[sensor->ssds_used];
v4l2_i2c_subdev_init(&sensor->src->sd, client, &smiapp_ops);
@@ -2926,6 +2889,8 @@ static int smiapp_probe(struct i2c_client *client)
if (rval < 0)
return rval;
+ mutex_init(&sensor->mutex);
+
rval = smiapp_identify_module(sensor);
if (rval) {
rval = -ENODEV;
@@ -3003,17 +2968,10 @@ static int smiapp_probe(struct i2c_client *client)
rval = -ENOENT;
goto out_power_off;
}
- /* SMIA++ NVM initialization - it will be read from the sensor
- * when it is first requested by userspace.
- */
- if (sensor->minfo.smiapp_version && sensor->hwcfg->nvm_size) {
- sensor->nvm = devm_kzalloc(&client->dev,
- sensor->hwcfg->nvm_size, GFP_KERNEL);
- if (sensor->nvm == NULL) {
- rval = -ENOMEM;
- goto out_cleanup;
- }
+ if (sensor->minfo.smiapp_version &&
+ sensor->limits[SMIAPP_LIMIT_DATA_TRANSFER_IF_CAPABILITY] &
+ SMIAPP_DATA_TRANSFER_IF_CAPABILITY_SUPPORTED) {
if (device_create_file(&client->dev, &dev_attr_nvm) != 0) {
dev_err(&client->dev, "sysfs nvm entry failed\n");
rval = -EBUSY;
@@ -3086,7 +3044,7 @@ static int smiapp_probe(struct i2c_client *client)
}
mutex_lock(&sensor->mutex);
- rval = smiapp_update_mode(sensor);
+ rval = smiapp_pll_blanking_update(sensor);
mutex_unlock(&sensor->mutex);
if (rval) {
dev_err(&client->dev, "update mode failed\n");
@@ -3101,19 +3059,23 @@ static int smiapp_probe(struct i2c_client *client)
if (rval < 0)
goto out_media_entity_cleanup;
- rval = v4l2_async_register_subdev_sensor_common(&sensor->src->sd);
- if (rval < 0)
- goto out_media_entity_cleanup;
-
pm_runtime_set_active(&client->dev);
pm_runtime_get_noresume(&client->dev);
pm_runtime_enable(&client->dev);
+
+ rval = v4l2_async_register_subdev_sensor_common(&sensor->src->sd);
+ if (rval < 0)
+ goto out_disable_runtime_pm;
+
pm_runtime_set_autosuspend_delay(&client->dev, 1000);
pm_runtime_use_autosuspend(&client->dev);
pm_runtime_put_autosuspend(&client->dev);
return 0;
+out_disable_runtime_pm:
+ pm_runtime_disable(&client->dev);
+
out_media_entity_cleanup:
media_entity_cleanup(&sensor->src->sd.entity);
@@ -3122,6 +3084,7 @@ out_cleanup:
out_power_off:
smiapp_power_off(&client->dev);
+ mutex_destroy(&sensor->mutex);
return rval;
}
@@ -3144,6 +3107,7 @@ static int smiapp_remove(struct i2c_client *client)
media_entity_cleanup(&sensor->ssds[i].sd.entity);
}
smiapp_cleanup(sensor);
+ mutex_destroy(&sensor->mutex);
return 0;
}
diff --git a/drivers/media/i2c/smiapp/smiapp-reg.h b/drivers/media/i2c/smiapp/smiapp-reg.h
index 2804a4d9a4e1..43505cd0616e 100644
--- a/drivers/media/i2c/smiapp/smiapp-reg.h
+++ b/drivers/media/i2c/smiapp/smiapp-reg.h
@@ -11,25 +11,29 @@
#ifndef __SMIAPP_REG_H_
#define __SMIAPP_REG_H_
+#include <linux/bits.h>
+
#include "smiapp-reg-defs.h"
/* Bits for above register */
-#define SMIAPP_IMAGE_ORIENTATION_HFLIP (1 << 0)
-#define SMIAPP_IMAGE_ORIENTATION_VFLIP (1 << 1)
-
-#define SMIAPP_DATA_TRANSFER_IF_1_CTRL_EN (1 << 0)
-#define SMIAPP_DATA_TRANSFER_IF_1_CTRL_RD_EN (0 << 1)
-#define SMIAPP_DATA_TRANSFER_IF_1_CTRL_WR_EN (1 << 1)
-#define SMIAPP_DATA_TRANSFER_IF_1_CTRL_ERR_CLEAR (1 << 2)
-#define SMIAPP_DATA_TRANSFER_IF_1_STATUS_RD_READY (1 << 0)
-#define SMIAPP_DATA_TRANSFER_IF_1_STATUS_WR_READY (1 << 1)
-#define SMIAPP_DATA_TRANSFER_IF_1_STATUS_EDATA (1 << 2)
-#define SMIAPP_DATA_TRANSFER_IF_1_STATUS_EUSAGE (1 << 3)
-
-#define SMIAPP_SOFTWARE_RESET (1 << 0)
-
-#define SMIAPP_FLASH_MODE_CAPABILITY_SINGLE_STROBE (1 << 0)
-#define SMIAPP_FLASH_MODE_CAPABILITY_MULTIPLE_STROBE (1 << 1)
+#define SMIAPP_IMAGE_ORIENTATION_HFLIP BIT(0)
+#define SMIAPP_IMAGE_ORIENTATION_VFLIP BIT(1)
+
+#define SMIAPP_DATA_TRANSFER_IF_1_CTRL_EN BIT(0)
+#define SMIAPP_DATA_TRANSFER_IF_1_CTRL_WR_EN BIT(1)
+#define SMIAPP_DATA_TRANSFER_IF_1_CTRL_ERR_CLEAR BIT(2)
+#define SMIAPP_DATA_TRANSFER_IF_1_STATUS_RD_READY BIT(0)
+#define SMIAPP_DATA_TRANSFER_IF_1_STATUS_WR_READY BIT(1)
+#define SMIAPP_DATA_TRANSFER_IF_1_STATUS_EDATA BIT(2)
+#define SMIAPP_DATA_TRANSFER_IF_1_STATUS_EUSAGE BIT(3)
+
+#define SMIAPP_DATA_TRANSFER_IF_CAPABILITY_SUPPORTED BIT(0)
+#define SMIAPP_DATA_TRANSFER_IF_CAPABILITY_POLL BIT(2)
+
+#define SMIAPP_SOFTWARE_RESET BIT(0)
+
+#define SMIAPP_FLASH_MODE_CAPABILITY_SINGLE_STROBE BIT(0)
+#define SMIAPP_FLASH_MODE_CAPABILITY_MULTIPLE_STROBE BIT(1)
#define SMIAPP_DPHY_CTRL_AUTOMATIC 0
/* DPHY control based on REQUESTED_LINK_BIT_RATE_MBPS */
diff --git a/drivers/media/i2c/smiapp/smiapp.h b/drivers/media/i2c/smiapp/smiapp.h
index ecf8a17dbe37..3ab874a5deba 100644
--- a/drivers/media/i2c/smiapp/smiapp.h
+++ b/drivers/media/i2c/smiapp/smiapp.h
@@ -208,9 +208,6 @@ struct smiapp_sensor {
bool dev_init_done;
u8 compressed_min_bpp;
- u8 *nvm; /* nvm memory buffer */
- unsigned int nvm_size; /* bytes */
-
struct smiapp_module_info minfo;
struct smiapp_pll pll;
diff --git a/drivers/media/i2c/st-mipid02.c b/drivers/media/i2c/st-mipid02.c
index 81285b8d5cfb..003ba22334cd 100644
--- a/drivers/media/i2c/st-mipid02.c
+++ b/drivers/media/i2c/st-mipid02.c
@@ -971,6 +971,11 @@ static int mipid02_probe(struct i2c_client *client)
bridge->reset_gpio = devm_gpiod_get_optional(dev, "reset",
GPIOD_OUT_HIGH);
+ if (IS_ERR(bridge->reset_gpio)) {
+ dev_err(dev, "failed to get reset GPIO\n");
+ return PTR_ERR(bridge->reset_gpio);
+ }
+
ret = mipid02_get_regulators(bridge);
if (ret) {
dev_err(dev, "failed to get regulators %d", ret);
diff --git a/drivers/media/i2c/tda1997x_regs.h b/drivers/media/i2c/tda1997x_regs.h
index ecf87534613b..d9b3daada07d 100644
--- a/drivers/media/i2c/tda1997x_regs.h
+++ b/drivers/media/i2c/tda1997x_regs.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2018 Gateworks Corporation
*/
diff --git a/drivers/media/i2c/tvp5150_reg.h b/drivers/media/i2c/tvp5150_reg.h
index 9088186c24d1..f716129adf09 100644
--- a/drivers/media/i2c/tvp5150_reg.h
+++ b/drivers/media/i2c/tvp5150_reg.h
@@ -1,5 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
- * SPDX-License-Identifier: GPL-2.0
*
* tvp5150 - Texas Instruments TVP5150A/AM1 video decoder registers
*
diff --git a/drivers/media/i2c/vpx3220.c b/drivers/media/i2c/vpx3220.c
index 39f66e7a0e42..8be03fe5928c 100644
--- a/drivers/media/i2c/vpx3220.c
+++ b/drivers/media/i2c/vpx3220.c
@@ -375,7 +375,7 @@ static int vpx3220_s_routing(struct v4l2_subdev *sd,
input = 1: COMPOSITE input
input = 2: SVHS input */
- const int input_vals[3][2] = {
+ static const int input_vals[3][2] = {
{0x0c, 0},
{0x0d, 0},
{0x0e, 1}
diff --git a/drivers/media/mc/mc-device.c b/drivers/media/mc/mc-device.c
index e19df5165e78..da8088351135 100644
--- a/drivers/media/mc/mc-device.c
+++ b/drivers/media/mc/mc-device.c
@@ -575,6 +575,38 @@ static void media_device_release(struct media_devnode *devnode)
dev_dbg(devnode->parent, "Media device released\n");
}
+static void __media_device_unregister_entity(struct media_entity *entity)
+{
+ struct media_device *mdev = entity->graph_obj.mdev;
+ struct media_link *link, *tmp;
+ struct media_interface *intf;
+ unsigned int i;
+
+ ida_free(&mdev->entity_internal_idx, entity->internal_idx);
+
+ /* Remove all interface links pointing to this entity */
+ list_for_each_entry(intf, &mdev->interfaces, graph_obj.list) {
+ list_for_each_entry_safe(link, tmp, &intf->links, list) {
+ if (link->entity == entity)
+ __media_remove_intf_link(link);
+ }
+ }
+
+ /* Remove all data links that belong to this entity */
+ __media_entity_remove_links(entity);
+
+ /* Remove all pads that belong to this entity */
+ for (i = 0; i < entity->num_pads; i++)
+ media_gobj_destroy(&entity->pads[i].graph_obj);
+
+ /* Remove the entity */
+ media_gobj_destroy(&entity->graph_obj);
+
+ /* invoke entity_notify callbacks to handle entity removal?? */
+
+ entity->graph_obj.mdev = NULL;
+}
+
/**
* media_device_register_entity - Register an entity with a media device
* @mdev: The media device
@@ -632,6 +664,7 @@ int __must_check media_device_register_entity(struct media_device *mdev,
*/
ret = media_graph_walk_init(&new, mdev);
if (ret) {
+ __media_device_unregister_entity(entity);
mutex_unlock(&mdev->graph_mutex);
return ret;
}
@@ -644,38 +677,6 @@ int __must_check media_device_register_entity(struct media_device *mdev,
}
EXPORT_SYMBOL_GPL(media_device_register_entity);
-static void __media_device_unregister_entity(struct media_entity *entity)
-{
- struct media_device *mdev = entity->graph_obj.mdev;
- struct media_link *link, *tmp;
- struct media_interface *intf;
- unsigned int i;
-
- ida_free(&mdev->entity_internal_idx, entity->internal_idx);
-
- /* Remove all interface links pointing to this entity */
- list_for_each_entry(intf, &mdev->interfaces, graph_obj.list) {
- list_for_each_entry_safe(link, tmp, &intf->links, list) {
- if (link->entity == entity)
- __media_remove_intf_link(link);
- }
- }
-
- /* Remove all data links that belong to this entity */
- __media_entity_remove_links(entity);
-
- /* Remove all pads that belong to this entity */
- for (i = 0; i < entity->num_pads; i++)
- media_gobj_destroy(&entity->pads[i].graph_obj);
-
- /* Remove the entity */
- media_gobj_destroy(&entity->graph_obj);
-
- /* invoke entity_notify callbacks to handle entity removal?? */
-
- entity->graph_obj.mdev = NULL;
-}
-
void media_device_unregister_entity(struct media_entity *entity)
{
struct media_device *mdev = entity->graph_obj.mdev;
diff --git a/drivers/media/pci/cx18/cx18-ioctl.c b/drivers/media/pci/cx18/cx18-ioctl.c
index 85f3e7307538..fa57e12f2ac8 100644
--- a/drivers/media/pci/cx18/cx18-ioctl.c
+++ b/drivers/media/pci/cx18/cx18-ioctl.c
@@ -664,7 +664,7 @@ static int _cx18_process_idx_data(struct cx18_buffer *buf,
struct cx18_enc_idx_entry *e_buf;
/* Frame type lookup: 1=I, 2=P, 4=B */
- const int mapping[8] = {
+ static const int mapping[8] = {
-1, V4L2_ENC_IDX_FRAME_I, V4L2_ENC_IDX_FRAME_P,
-1, V4L2_ENC_IDX_FRAME_B, -1, -1, -1
};
diff --git a/drivers/media/pci/cx23885/cx23888-ir.c b/drivers/media/pci/cx23885/cx23888-ir.c
index e880afe37f15..d59ca3601785 100644
--- a/drivers/media/pci/cx23885/cx23888-ir.c
+++ b/drivers/media/pci/cx23885/cx23888-ir.c
@@ -1167,8 +1167,11 @@ int cx23888_ir_probe(struct cx23885_dev *dev)
return -ENOMEM;
spin_lock_init(&state->rx_kfifo_lock);
- if (kfifo_alloc(&state->rx_kfifo, CX23888_IR_RX_KFIFO_SIZE, GFP_KERNEL))
+ if (kfifo_alloc(&state->rx_kfifo, CX23888_IR_RX_KFIFO_SIZE,
+ GFP_KERNEL)) {
+ kfree(state);
return -ENOMEM;
+ }
state->dev = dev;
sd = &state->sd;
diff --git a/drivers/media/pci/cx88/cx88-cards.c b/drivers/media/pci/cx88/cx88-cards.c
index 3cd87626cd79..9fa388626bae 100644
--- a/drivers/media/pci/cx88/cx88-cards.c
+++ b/drivers/media/pci/cx88/cx88-cards.c
@@ -1781,6 +1781,41 @@ static const struct cx88_board cx88_boards[] = {
},
.mpeg = CX88_MPEG_DVB,
},
+ [CX88_BOARD_NOTONLYTV_LV3H] = {
+ .name = "NotOnlyTV LV3H",
+ .tuner_type = TUNER_XC2028,
+ .radio_type = UNSET,
+ .tuner_addr = 0x61,
+ .radio_addr = ADDR_UNSET,
+ /* if gpio1:bit9 is enabled, DVB-T won't work */
+
+ .input = { {
+ .type = CX88_VMUX_TELEVISION,
+ .vmux = 0,
+ .gpio0 = 0x0000,
+ .gpio1 = 0xa141,
+ .gpio2 = 0x0000,
+ }, {
+ .type = CX88_VMUX_COMPOSITE1,
+ .vmux = 1,
+ .gpio0 = 0x0000,
+ .gpio1 = 0xa161,
+ .gpio2 = 0x0000,
+ }, {
+ .type = CX88_VMUX_SVIDEO,
+ .vmux = 2,
+ .gpio0 = 0x0000,
+ .gpio1 = 0xa161,
+ .gpio2 = 0x0000,
+ } },
+ .radio = {
+ .type = CX88_RADIO,
+ .gpio0 = 0x0000,
+ .gpio1 = 0xa141,
+ .gpio2 = 0x0000,
+ },
+ .mpeg = CX88_MPEG_DVB,
+ },
[CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_PRO] = {
.name = "DViCO FusionHDTV DVB-T PRO",
.tuner_type = TUNER_XC2028,
@@ -2654,6 +2689,7 @@ static const struct cx88_subid cx88_subids[] = {
.subdevice = 0x6f18,
.card = CX88_BOARD_WINFAST_TV2000_XP_GLOBAL,
}, {
+ /* Also NotOnlyTV LV3H (version 1.11 is silkscreened on the board) */
.subvendor = 0x14f1,
.subdevice = 0x8852,
.card = CX88_BOARD_GENIATECH_X8000_MT,
@@ -3121,6 +3157,7 @@ static int cx88_xc2028_tuner_callback(struct cx88_core *core,
case CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_PRO:
case CX88_BOARD_DVICO_FUSIONHDTV_5_PCI_NANO:
return cx88_dvico_xc2028_callback(core, command, arg);
+ case CX88_BOARD_NOTONLYTV_LV3H:
case CX88_BOARD_WINFAST_TV2000_XP_GLOBAL:
case CX88_BOARD_WINFAST_DTV1800H:
return cx88_xc3028_winfast1800h_callback(core, command, arg);
@@ -3322,6 +3359,7 @@ static void cx88_card_setup_pre_i2c(struct cx88_core *core)
udelay(1000);
break;
+ case CX88_BOARD_NOTONLYTV_LV3H:
case CX88_BOARD_WINFAST_TV2000_XP_GLOBAL:
case CX88_BOARD_WINFAST_DTV1800H:
cx88_xc3028_winfast1800h_callback(core, XC2028_TUNER_RESET, 0);
@@ -3378,6 +3416,11 @@ void cx88_setup_xc3028(struct cx88_core *core, struct xc2028_ctrl *ctl)
*/
ctl->disable_power_mgmt = 1;
break;
+ case CX88_BOARD_NOTONLYTV_LV3H:
+ ctl->demod = XC3028_FE_ZARLINK456;
+ ctl->fname = XC3028L_DEFAULT_FIRMWARE;
+ ctl->read_not_reliable = 1;
+ break;
case CX88_BOARD_WINFAST_TV2000_XP_GLOBAL:
case CX88_BOARD_PROLINK_PV_GLOBAL_XTREME:
case CX88_BOARD_PROLINK_PV_8000GT:
diff --git a/drivers/media/pci/cx88/cx88-dvb.c b/drivers/media/pci/cx88/cx88-dvb.c
index 0292d0947cc7..202ff9e8c257 100644
--- a/drivers/media/pci/cx88/cx88-dvb.c
+++ b/drivers/media/pci/cx88/cx88-dvb.c
@@ -1378,6 +1378,7 @@ static int dvb_register(struct cx8802_dev *dev)
fe->ops.tuner_ops.set_config(fe, &ctl);
}
break;
+ case CX88_BOARD_NOTONLYTV_LV3H:
case CX88_BOARD_PINNACLE_HYBRID_PCTV:
case CX88_BOARD_WINFAST_DTV1800H:
fe0->dvb.frontend = dvb_attach(zl10353_attach,
diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c
index dcc0f02aeb70..b8abcd550604 100644
--- a/drivers/media/pci/cx88/cx88-video.c
+++ b/drivers/media/pci/cx88/cx88-video.c
@@ -1277,7 +1277,7 @@ static int cx8800_initdev(struct pci_dev *pci_dev,
core = cx88_core_get(dev->pci);
if (!core) {
err = -EINVAL;
- goto fail_free;
+ goto fail_disable;
}
dev->core = core;
@@ -1323,7 +1323,7 @@ static int cx8800_initdev(struct pci_dev *pci_dev,
cc->step, cc->default_value);
if (!vc) {
err = core->audio_hdl.error;
- goto fail_core;
+ goto fail_irq;
}
vc->priv = (void *)cc;
}
@@ -1337,7 +1337,7 @@ static int cx8800_initdev(struct pci_dev *pci_dev,
cc->step, cc->default_value);
if (!vc) {
err = core->video_hdl.error;
- goto fail_core;
+ goto fail_irq;
}
vc->priv = (void *)cc;
if (vc->id == V4L2_CID_CHROMA_AGC)
@@ -1509,11 +1509,14 @@ static int cx8800_initdev(struct pci_dev *pci_dev,
fail_unreg:
cx8800_unregister_video(dev);
- free_irq(pci_dev->irq, dev);
mutex_unlock(&core->lock);
+fail_irq:
+ free_irq(pci_dev->irq, dev);
fail_core:
core->v4ldev = NULL;
cx88_core_put(core, dev->pci);
+fail_disable:
+ pci_disable_device(pci_dev);
fail_free:
kfree(dev);
return err;
diff --git a/drivers/media/pci/cx88/cx88.h b/drivers/media/pci/cx88/cx88.h
index 744a22328ebc..ce4acf6de6aa 100644
--- a/drivers/media/pci/cx88/cx88.h
+++ b/drivers/media/pci/cx88/cx88.h
@@ -228,6 +228,7 @@ extern const struct sram_channel cx88_sram_channels[];
#define CX88_BOARD_WINFAST_DTV1800H_XC4000 88
#define CX88_BOARD_WINFAST_TV2000_XP_GLOBAL_6F36 89
#define CX88_BOARD_WINFAST_TV2000_XP_GLOBAL_6F43 90
+#define CX88_BOARD_NOTONLYTV_LV3H 91
enum cx88_itype {
CX88_VMUX_COMPOSITE1 = 1,
diff --git a/drivers/media/pci/dm1105/dm1105.c b/drivers/media/pci/dm1105/dm1105.c
index bb3a8cc9de0c..9dce31d2b525 100644
--- a/drivers/media/pci/dm1105/dm1105.c
+++ b/drivers/media/pci/dm1105/dm1105.c
@@ -11,7 +11,6 @@
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/proc_fs.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
diff --git a/drivers/media/pci/ivtv/ivtv-vbi.c b/drivers/media/pci/ivtv/ivtv-vbi.c
index 6d22c0107d33..80478b026d75 100644
--- a/drivers/media/pci/ivtv/ivtv-vbi.c
+++ b/drivers/media/pci/ivtv/ivtv-vbi.c
@@ -325,7 +325,7 @@ static u32 compress_raw_buf(struct ivtv *itv, u8 *buf, u32 size)
static u32 compress_sliced_buf(struct ivtv *itv, u32 line, u8 *buf, u32 size, u8 sav)
{
u32 line_size = itv->vbi.sliced_decoder_line_size;
- struct v4l2_decode_vbi_line vbi;
+ struct v4l2_decode_vbi_line vbi = {};
int i;
unsigned lines = 0;
diff --git a/drivers/media/pci/mantis/hopper_cards.c b/drivers/media/pci/mantis/hopper_cards.c
index 67aebe759232..c0bd5d7e148b 100644
--- a/drivers/media/pci/mantis/hopper_cards.c
+++ b/drivers/media/pci/mantis/hopper_cards.c
@@ -60,10 +60,8 @@ static irqreturn_t hopper_irq_handler(int irq, void *dev_id)
struct mantis_ca *ca;
mantis = (struct mantis_pci *) dev_id;
- if (unlikely(!mantis)) {
- dprintk(MANTIS_ERROR, 1, "Mantis == NULL");
+ if (unlikely(!mantis))
return IRQ_NONE;
- }
ca = mantis->mantis_ca;
stat = mmread(MANTIS_INT_STAT);
diff --git a/drivers/media/pci/mantis/mantis_cards.c b/drivers/media/pci/mantis/mantis_cards.c
index deadd0b92233..906e4500d87d 100644
--- a/drivers/media/pci/mantis/mantis_cards.c
+++ b/drivers/media/pci/mantis/mantis_cards.c
@@ -69,10 +69,8 @@ static irqreturn_t mantis_irq_handler(int irq, void *dev_id)
struct mantis_ca *ca;
mantis = (struct mantis_pci *) dev_id;
- if (unlikely(mantis == NULL)) {
- dprintk(MANTIS_ERROR, 1, "Mantis == NULL");
+ if (unlikely(!mantis))
return IRQ_NONE;
- }
ca = mantis->mantis_ca;
stat = mmread(MANTIS_INT_STAT);
diff --git a/drivers/media/pci/saa7164/saa7164-core.c b/drivers/media/pci/saa7164/saa7164-core.c
index 9ae04e18e6c6..126d085be9a7 100644
--- a/drivers/media/pci/saa7164/saa7164-core.c
+++ b/drivers/media/pci/saa7164/saa7164-core.c
@@ -13,12 +13,10 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
+#include <linux/debugfs.h>
#include <linux/delay.h>
#include <asm/div64.h>
-#ifdef CONFIG_PROC_FS
-#include <linux/proc_fs.h>
-#endif
#include "saa7164.h"
MODULE_DESCRIPTION("Driver for NXP SAA7164 based TV cards");
@@ -1045,92 +1043,138 @@ static void saa7164_dev_unregister(struct saa7164_dev *dev)
return;
}
-#ifdef CONFIG_PROC_FS
-static int saa7164_proc_show(struct seq_file *m, void *v)
+#ifdef CONFIG_DEBUG_FS
+static void *saa7164_seq_start(struct seq_file *s, loff_t *pos)
{
struct saa7164_dev *dev;
- struct tmComResBusInfo *b;
- struct list_head *list;
- int i, c;
+ loff_t index = *pos;
- if (saa7164_devcount == 0)
- return 0;
+ mutex_lock(&devlist);
+ list_for_each_entry(dev, &saa7164_devlist, devlist) {
+ if (index-- == 0) {
+ mutex_unlock(&devlist);
+ return dev;
+ }
+ }
+ mutex_unlock(&devlist);
- list_for_each(list, &saa7164_devlist) {
- dev = list_entry(list, struct saa7164_dev, devlist);
- seq_printf(m, "%s = %p\n", dev->name, dev);
+ return NULL;
+}
- /* Lock the bus from any other access */
- b = &dev->bus;
- mutex_lock(&b->lock);
+static void *saa7164_seq_next(struct seq_file *s, void *v, loff_t *pos)
+{
+ struct saa7164_dev *dev = v;
+ void *ret;
- seq_printf(m, " .m_pdwSetWritePos = 0x%x (0x%08x)\n",
- b->m_dwSetReadPos, saa7164_readl(b->m_dwSetReadPos));
+ mutex_lock(&devlist);
+ if (list_is_last(&dev->devlist, &saa7164_devlist))
+ ret = NULL;
+ else
+ ret = list_next_entry(dev, devlist);
+ mutex_unlock(&devlist);
- seq_printf(m, " .m_pdwSetReadPos = 0x%x (0x%08x)\n",
- b->m_dwSetWritePos, saa7164_readl(b->m_dwSetWritePos));
+ ++*pos;
- seq_printf(m, " .m_pdwGetWritePos = 0x%x (0x%08x)\n",
- b->m_dwGetReadPos, saa7164_readl(b->m_dwGetReadPos));
+ return ret;
+}
- seq_printf(m, " .m_pdwGetReadPos = 0x%x (0x%08x)\n",
- b->m_dwGetWritePos, saa7164_readl(b->m_dwGetWritePos));
- c = 0;
- seq_printf(m, "\n Set Ring:\n");
- seq_printf(m, "\n addr 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f\n");
- for (i = 0; i < b->m_dwSizeSetRing; i++) {
- if (c == 0)
- seq_printf(m, " %04x:", i);
+static void saa7164_seq_stop(struct seq_file *s, void *v)
+{
+}
- seq_printf(m, " %02x", readb(b->m_pdwSetRing + i));
+static int saa7164_seq_show(struct seq_file *m, void *v)
+{
+ struct saa7164_dev *dev = v;
+ struct tmComResBusInfo *b;
+ int i, c;
- if (++c == 16) {
- seq_printf(m, "\n");
- c = 0;
- }
- }
+ seq_printf(m, "%s = %p\n", dev->name, dev);
- c = 0;
- seq_printf(m, "\n Get Ring:\n");
- seq_printf(m, "\n addr 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f\n");
- for (i = 0; i < b->m_dwSizeGetRing; i++) {
- if (c == 0)
- seq_printf(m, " %04x:", i);
+ /* Lock the bus from any other access */
+ b = &dev->bus;
+ mutex_lock(&b->lock);
- seq_printf(m, " %02x", readb(b->m_pdwGetRing + i));
+ seq_printf(m, " .m_pdwSetWritePos = 0x%x (0x%08x)\n",
+ b->m_dwSetReadPos, saa7164_readl(b->m_dwSetReadPos));
- if (++c == 16) {
- seq_printf(m, "\n");
- c = 0;
- }
+ seq_printf(m, " .m_pdwSetReadPos = 0x%x (0x%08x)\n",
+ b->m_dwSetWritePos, saa7164_readl(b->m_dwSetWritePos));
+
+ seq_printf(m, " .m_pdwGetWritePos = 0x%x (0x%08x)\n",
+ b->m_dwGetReadPos, saa7164_readl(b->m_dwGetReadPos));
+
+ seq_printf(m, " .m_pdwGetReadPos = 0x%x (0x%08x)\n",
+ b->m_dwGetWritePos, saa7164_readl(b->m_dwGetWritePos));
+ c = 0;
+ seq_puts(m, "\n Set Ring:\n");
+ seq_puts(m, "\n addr 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f\n");
+ for (i = 0; i < b->m_dwSizeSetRing; i++) {
+ if (c == 0)
+ seq_printf(m, " %04x:", i);
+
+ seq_printf(m, " %02x", readb(b->m_pdwSetRing + i));
+
+ if (++c == 16) {
+ seq_puts(m, "\n");
+ c = 0;
}
+ }
- mutex_unlock(&b->lock);
+ c = 0;
+ seq_puts(m, "\n Get Ring:\n");
+ seq_puts(m, "\n addr 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f\n");
+ for (i = 0; i < b->m_dwSizeGetRing; i++) {
+ if (c == 0)
+ seq_printf(m, " %04x:", i);
+ seq_printf(m, " %02x", readb(b->m_pdwGetRing + i));
+
+ if (++c == 16) {
+ seq_puts(m, "\n");
+ c = 0;
+ }
}
+ mutex_unlock(&b->lock);
+
return 0;
}
-static struct proc_dir_entry *saa7164_pe;
+static const struct seq_operations saa7164_seq_ops = {
+ .start = saa7164_seq_start,
+ .next = saa7164_seq_next,
+ .stop = saa7164_seq_stop,
+ .show = saa7164_seq_show,
+};
-static int saa7164_proc_create(void)
+static int saa7164_open(struct inode *inode, struct file *file)
{
- saa7164_pe = proc_create_single("saa7164", 0444, NULL, saa7164_proc_show);
- if (!saa7164_pe)
- return -ENOMEM;
+ return seq_open(file, &saa7164_seq_ops);
+}
- return 0;
+static const struct file_operations saa7164_operations = {
+ .owner = THIS_MODULE,
+ .open = saa7164_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+static struct dentry *saa7614_dentry;
+
+static void __init saa7164_debugfs_create(void)
+{
+ saa7614_dentry = debugfs_create_file("saa7164", 0444, NULL, NULL,
+ &saa7164_operations);
}
-static void saa7164_proc_destroy(void)
+static void __exit saa7164_debugfs_remove(void)
{
- if (saa7164_pe)
- remove_proc_entry("saa7164", NULL);
+ debugfs_remove(saa7614_dentry);
}
#else
-static int saa7164_proc_create(void) { return 0; }
-static void saa7164_proc_destroy(void) {}
+static void saa7164_debugfs_create(void) { }
+static void saa7164_debugfs_remove(void) { }
#endif
static int saa7164_thread_function(void *data)
@@ -1507,7 +1551,7 @@ static int __init saa7164_init(void)
if (ret)
return ret;
- saa7164_proc_create();
+ saa7164_debugfs_create();
pr_info("saa7164 driver loaded\n");
@@ -1516,7 +1560,7 @@ static int __init saa7164_init(void)
static void __exit saa7164_fini(void)
{
- saa7164_proc_destroy();
+ saa7164_debugfs_remove();
pci_unregister_driver(&saa7164_pci_driver);
}
diff --git a/drivers/media/pci/smipcie/smipcie.h b/drivers/media/pci/smipcie/smipcie.h
index 65bc7e29450b..2b5e0154814c 100644
--- a/drivers/media/pci/smipcie/smipcie.h
+++ b/drivers/media/pci/smipcie/smipcie.h
@@ -14,7 +14,6 @@
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/proc_fs.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
diff --git a/drivers/media/pci/solo6x10/solo6x10-g723.c b/drivers/media/pci/solo6x10/solo6x10-g723.c
index 30c8f2ec9c3c..eaa57d835ea8 100644
--- a/drivers/media/pci/solo6x10/solo6x10-g723.c
+++ b/drivers/media/pci/solo6x10/solo6x10-g723.c
@@ -353,7 +353,7 @@ static int solo_snd_pcm_init(struct solo_dev *solo_dev)
snd_pcm_lib_preallocate_pages_for_all(pcm,
SNDRV_DMA_TYPE_CONTINUOUS,
- snd_dma_continuous_data(GFP_KERNEL),
+ NULL,
G723_PERIOD_BYTES * PERIODS,
G723_PERIOD_BYTES * PERIODS);
diff --git a/drivers/media/pci/tw686x/tw686x-audio.c b/drivers/media/pci/tw686x/tw686x-audio.c
index 40373bd23381..7786e51d19ae 100644
--- a/drivers/media/pci/tw686x/tw686x-audio.c
+++ b/drivers/media/pci/tw686x/tw686x-audio.c
@@ -300,7 +300,7 @@ static int tw686x_snd_pcm_init(struct tw686x_dev *dev)
snd_pcm_lib_preallocate_pages_for_all(pcm,
SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(dev->pci_dev),
+ &dev->pci_dev->dev,
TW686X_AUDIO_PAGE_MAX * AUDIO_DMA_SIZE_MAX,
TW686X_AUDIO_PAGE_MAX * AUDIO_DMA_SIZE_MAX);
return 0;
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
index f1f61419fd29..e84f35d3a68e 100644
--- a/drivers/media/platform/Kconfig
+++ b/drivers/media/platform/Kconfig
@@ -483,6 +483,7 @@ config VIDEO_QCOM_VENUS
tristate "Qualcomm Venus V4L2 encoder/decoder driver"
depends on VIDEO_DEV && VIDEO_V4L2
depends on (ARCH_QCOM && IOMMU_DMA) || COMPILE_TEST
+ depends on INTERCONNECT || !INTERCONNECT
select QCOM_MDT_LOADER if ARCH_QCOM
select QCOM_SCM if ARCH_QCOM
select VIDEOBUF2_DMA_SG
@@ -493,6 +494,19 @@ config VIDEO_QCOM_VENUS
on various Qualcomm SoCs.
To compile this driver as a module choose m here.
+config VIDEO_SUN8I_DEINTERLACE
+ tristate "Allwinner Deinterlace driver"
+ depends on VIDEO_DEV && VIDEO_V4L2
+ depends on ARCH_SUNXI || COMPILE_TEST
+ depends on COMMON_CLK && OF
+ depends on PM
+ select VIDEOBUF2_DMA_CONTIG
+ select V4L2_MEM2MEM_DEV
+ help
+ Support for the Allwinner deinterlace unit with scaling
+ capability found on some SoCs, like H3.
+ To compile this driver as a module choose m here.
+
endif # V4L_MEM2MEM_DRIVERS
# TI VIDEO PORT Helper Modules
@@ -585,9 +599,10 @@ config VIDEO_MESON_G12A_AO_CEC
config CEC_GPIO
tristate "Generic GPIO-based CEC driver"
- depends on PREEMPT || COMPILE_TEST
+ depends on PREEMPTION || COMPILE_TEST
select CEC_CORE
select CEC_PIN
+ select CEC_NOTIFIER
select GPIOLIB
help
This is a generic GPIO-based CEC driver.
diff --git a/drivers/media/platform/Makefile b/drivers/media/platform/Makefile
index 6ee7eb0d36f4..d13db96e3015 100644
--- a/drivers/media/platform/Makefile
+++ b/drivers/media/platform/Makefile
@@ -19,9 +19,7 @@ obj-$(CONFIG_VIDEO_VIVID) += vivid/
obj-$(CONFIG_VIDEO_VIM2M) += vim2m.o
obj-$(CONFIG_VIDEO_VICODEC) += vicodec/
-obj-$(CONFIG_VIDEO_TI_VPE) += ti-vpe/
-
-obj-$(CONFIG_VIDEO_TI_CAL) += ti-vpe/
+obj-y += ti-vpe/
obj-$(CONFIG_VIDEO_MX2_EMMAPRP) += mx2_emmaprp.o
obj-$(CONFIG_VIDEO_CODA) += coda/
diff --git a/drivers/media/platform/am437x/am437x-vpfe.c b/drivers/media/platform/am437x/am437x-vpfe.c
index 2b42ba1f5949..09104304bd06 100644
--- a/drivers/media/platform/am437x/am437x-vpfe.c
+++ b/drivers/media/platform/am437x/am437x-vpfe.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* TI VPFE capture Driver
*
@@ -5,19 +6,6 @@
*
* Benoit Parrot <bparrot@ti.com>
* Lad, Prabhakar <prabhakar.csengg@gmail.com>
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#include <linux/delay.h>
@@ -69,125 +57,64 @@ static const struct vpfe_standard vpfe_standards[] = {
{V4L2_STD_625_50, 720, 576, {54, 59}, 1},
};
-struct bus_format {
- unsigned int width;
- unsigned int bpp;
-};
-
-/*
- * struct vpfe_fmt - VPFE media bus format information
- * @code: V4L2 media bus format code
- * @shifted: V4L2 media bus format code for the same pixel layout but
- * shifted to be 8 bits per pixel. =0 if format is not shiftable.
- * @pixelformat: V4L2 pixel format FCC identifier
- * @width: Bits per pixel (when transferred over a bus)
- * @bpp: Bytes per pixel (when stored in memory)
- * @supported: Indicates format supported by subdev
- */
-struct vpfe_fmt {
- u32 fourcc;
- u32 code;
- struct bus_format l;
- struct bus_format s;
- bool supported;
- u32 index;
-};
-
-static struct vpfe_fmt formats[] = {
+static struct vpfe_fmt formats[VPFE_NUM_FORMATS] = {
{
.fourcc = V4L2_PIX_FMT_YUYV,
.code = MEDIA_BUS_FMT_YUYV8_2X8,
- .l.width = 10,
- .l.bpp = 4,
- .s.width = 8,
- .s.bpp = 2,
- .supported = false,
+ .bitsperpixel = 16,
}, {
.fourcc = V4L2_PIX_FMT_UYVY,
.code = MEDIA_BUS_FMT_UYVY8_2X8,
- .l.width = 10,
- .l.bpp = 4,
- .s.width = 8,
- .s.bpp = 2,
- .supported = false,
+ .bitsperpixel = 16,
}, {
.fourcc = V4L2_PIX_FMT_YVYU,
.code = MEDIA_BUS_FMT_YVYU8_2X8,
- .l.width = 10,
- .l.bpp = 4,
- .s.width = 8,
- .s.bpp = 2,
- .supported = false,
+ .bitsperpixel = 16,
}, {
.fourcc = V4L2_PIX_FMT_VYUY,
.code = MEDIA_BUS_FMT_VYUY8_2X8,
- .l.width = 10,
- .l.bpp = 4,
- .s.width = 8,
- .s.bpp = 2,
- .supported = false,
+ .bitsperpixel = 16,
}, {
.fourcc = V4L2_PIX_FMT_SBGGR8,
.code = MEDIA_BUS_FMT_SBGGR8_1X8,
- .l.width = 10,
- .l.bpp = 2,
- .s.width = 8,
- .s.bpp = 1,
- .supported = false,
+ .bitsperpixel = 8,
}, {
.fourcc = V4L2_PIX_FMT_SGBRG8,
.code = MEDIA_BUS_FMT_SGBRG8_1X8,
- .l.width = 10,
- .l.bpp = 2,
- .s.width = 8,
- .s.bpp = 1,
- .supported = false,
+ .bitsperpixel = 8,
}, {
.fourcc = V4L2_PIX_FMT_SGRBG8,
.code = MEDIA_BUS_FMT_SGRBG8_1X8,
- .l.width = 10,
- .l.bpp = 2,
- .s.width = 8,
- .s.bpp = 1,
- .supported = false,
+ .bitsperpixel = 8,
}, {
.fourcc = V4L2_PIX_FMT_SRGGB8,
.code = MEDIA_BUS_FMT_SRGGB8_1X8,
- .l.width = 10,
- .l.bpp = 2,
- .s.width = 8,
- .s.bpp = 1,
- .supported = false,
+ .bitsperpixel = 8,
}, {
.fourcc = V4L2_PIX_FMT_RGB565,
.code = MEDIA_BUS_FMT_RGB565_2X8_LE,
- .l.width = 10,
- .l.bpp = 4,
- .s.width = 8,
- .s.bpp = 2,
- .supported = false,
+ .bitsperpixel = 16,
}, {
.fourcc = V4L2_PIX_FMT_RGB565X,
.code = MEDIA_BUS_FMT_RGB565_2X8_BE,
- .l.width = 10,
- .l.bpp = 4,
- .s.width = 8,
- .s.bpp = 2,
- .supported = false,
+ .bitsperpixel = 16,
},
};
-static int
-__vpfe_get_format(struct vpfe_device *vpfe,
- struct v4l2_format *format, unsigned int *bpp);
+static int __subdev_get_format(struct vpfe_device *vpfe,
+ struct v4l2_mbus_framefmt *fmt);
+static int vpfe_calc_format_size(struct vpfe_device *vpfe,
+ const struct vpfe_fmt *fmt,
+ struct v4l2_format *f);
-static struct vpfe_fmt *find_format_by_code(unsigned int code)
+static struct vpfe_fmt *find_format_by_code(struct vpfe_device *vpfe,
+ unsigned int code)
{
struct vpfe_fmt *fmt;
unsigned int k;
- for (k = 0; k < ARRAY_SIZE(formats); k++) {
- fmt = &formats[k];
+ for (k = 0; k < vpfe->num_active_fmt; k++) {
+ fmt = vpfe->active_fmt[k];
if (fmt->code == code)
return fmt;
}
@@ -195,13 +122,14 @@ static struct vpfe_fmt *find_format_by_code(unsigned int code)
return NULL;
}
-static struct vpfe_fmt *find_format_by_pix(unsigned int pixelformat)
+static struct vpfe_fmt *find_format_by_pix(struct vpfe_device *vpfe,
+ unsigned int pixelformat)
{
struct vpfe_fmt *fmt;
unsigned int k;
- for (k = 0; k < ARRAY_SIZE(formats); k++) {
- fmt = &formats[k];
+ for (k = 0; k < vpfe->num_active_fmt; k++) {
+ fmt = vpfe->active_fmt[k];
if (fmt->fourcc == pixelformat)
return fmt;
}
@@ -209,48 +137,18 @@ static struct vpfe_fmt *find_format_by_pix(unsigned int pixelformat)
return NULL;
}
-static void
-mbus_to_pix(struct vpfe_device *vpfe,
- const struct v4l2_mbus_framefmt *mbus,
- struct v4l2_pix_format *pix, unsigned int *bpp)
+static unsigned int __get_bytesperpixel(struct vpfe_device *vpfe,
+ const struct vpfe_fmt *fmt)
{
struct vpfe_subdev_info *sdinfo = vpfe->current_subdev;
unsigned int bus_width = sdinfo->vpfe_param.bus_width;
- struct vpfe_fmt *fmt;
-
- fmt = find_format_by_code(mbus->code);
- if (WARN_ON(fmt == NULL)) {
- pr_err("Invalid mbus code set\n");
- *bpp = 1;
- return;
- }
-
- memset(pix, 0, sizeof(*pix));
- v4l2_fill_pix_format(pix, mbus);
- pix->pixelformat = fmt->fourcc;
- *bpp = (bus_width == 10) ? fmt->l.bpp : fmt->s.bpp;
+ u32 bpp, bus_width_bytes, clocksperpixel;
- /* pitch should be 32 bytes aligned */
- pix->bytesperline = ALIGN(pix->width * *bpp, 32);
- pix->sizeimage = pix->bytesperline * pix->height;
-}
-
-static void pix_to_mbus(struct vpfe_device *vpfe,
- struct v4l2_pix_format *pix_fmt,
- struct v4l2_mbus_framefmt *mbus_fmt)
-{
- struct vpfe_fmt *fmt;
-
- fmt = find_format_by_pix(pix_fmt->pixelformat);
- if (!fmt) {
- /* default to first entry */
- vpfe_dbg(3, vpfe, "Invalid pixel code: %x, default used instead\n",
- pix_fmt->pixelformat);
- fmt = &formats[0];
- }
+ bus_width_bytes = ALIGN(bus_width, 8) >> 3;
+ clocksperpixel = DIV_ROUND_UP(fmt->bitsperpixel, bus_width);
+ bpp = clocksperpixel * bus_width_bytes;
- memset(mbus_fmt, 0, sizeof(*mbus_fmt));
- v4l2_fill_mbus_format(mbus_fmt, pix_fmt, fmt->code);
+ return bpp;
}
/* Print Four-character-code (FOURCC) */
@@ -267,20 +165,6 @@ static char *print_fourcc(u32 fmt)
return code;
}
-static int
-cmp_v4l2_format(const struct v4l2_format *lhs, const struct v4l2_format *rhs)
-{
- return lhs->type == rhs->type &&
- lhs->fmt.pix.width == rhs->fmt.pix.width &&
- lhs->fmt.pix.height == rhs->fmt.pix.height &&
- lhs->fmt.pix.pixelformat == rhs->fmt.pix.pixelformat &&
- lhs->fmt.pix.field == rhs->fmt.pix.field &&
- lhs->fmt.pix.colorspace == rhs->fmt.pix.colorspace &&
- lhs->fmt.pix.ycbcr_enc == rhs->fmt.pix.ycbcr_enc &&
- lhs->fmt.pix.quantization == rhs->fmt.pix.quantization &&
- lhs->fmt.pix.xfer_func == rhs->fmt.pix.xfer_func;
-}
-
static inline u32 vpfe_reg_read(struct vpfe_ccdc *ccdc, u32 offset)
{
return ioread32(ccdc->ccdc_cfg.base_addr + offset);
@@ -345,13 +229,9 @@ static void vpfe_ccdc_setwin(struct vpfe_ccdc *ccdc,
if (frm_fmt == CCDC_FRMFMT_INTERLACED) {
vert_nr_lines = (image_win->height >> 1) - 1;
vert_start >>= 1;
- /* Since first line doesn't have any data */
- vert_start += 1;
/* configure VDINT0 */
val = (vert_start << VPFE_VDINT_VDINT0_SHIFT);
} else {
- /* Since first line doesn't have any data */
- vert_start += 1;
vert_nr_lines = image_win->height - 1;
/*
* configure VDINT0 and VDINT1. VDINT1 will be at half
@@ -405,7 +285,6 @@ vpfe_ccdc_validate_param(struct vpfe_ccdc *ccdc,
max_data = ccdc_data_size_max_bit(ccdcparam->data_sz);
if (ccdcparam->alaw.gamma_wd > VPFE_CCDC_GAMMA_BITS_09_0 ||
- ccdcparam->alaw.gamma_wd < VPFE_CCDC_GAMMA_BITS_15_6 ||
max_gamma > max_data) {
vpfe_dbg(1, vpfe, "Invalid data line select\n");
return -EINVAL;
@@ -445,40 +324,25 @@ static void vpfe_ccdc_restore_defaults(struct vpfe_ccdc *ccdc)
static int vpfe_ccdc_close(struct vpfe_ccdc *ccdc, struct device *dev)
{
- int dma_cntl, i, pcr;
+ struct vpfe_device *vpfe = container_of(ccdc, struct vpfe_device, ccdc);
+ u32 dma_cntl, pcr;
- /* If the CCDC module is still busy wait for it to be done */
- for (i = 0; i < 10; i++) {
- usleep_range(5000, 6000);
- pcr = vpfe_reg_read(ccdc, VPFE_PCR);
- if (!pcr)
- break;
+ pcr = vpfe_reg_read(ccdc, VPFE_PCR);
+ if (pcr)
+ vpfe_dbg(1, vpfe, "VPFE_PCR is still set (%x)", pcr);
- /* make sure it it is disabled */
- vpfe_pcr_enable(ccdc, 0);
- }
+ dma_cntl = vpfe_reg_read(ccdc, VPFE_DMA_CNTL);
+ if ((dma_cntl & VPFE_DMA_CNTL_OVERFLOW))
+ vpfe_dbg(1, vpfe, "VPFE_DMA_CNTL_OVERFLOW is still set (%x)",
+ dma_cntl);
/* Disable CCDC by resetting all register to default POR values */
vpfe_ccdc_restore_defaults(ccdc);
- /* if DMA_CNTL overflow bit is set. Clear it
- * It appears to take a while for this to become quiescent ~20ms
- */
- for (i = 0; i < 10; i++) {
- dma_cntl = vpfe_reg_read(ccdc, VPFE_DMA_CNTL);
- if (!(dma_cntl & VPFE_DMA_CNTL_OVERFLOW))
- break;
-
- /* Clear the overflow bit */
- vpfe_reg_write(ccdc, dma_cntl, VPFE_DMA_CNTL);
- usleep_range(5000, 6000);
- }
-
/* Disabled the module at the CONFIG level */
vpfe_config_enable(ccdc, 0);
pm_runtime_put_sync(dev);
-
return 0;
}
@@ -494,8 +358,8 @@ static int vpfe_ccdc_set_params(struct vpfe_ccdc *ccdc, void __user *params)
x = copy_from_user(&raw_params, params, sizeof(raw_params));
if (x) {
vpfe_dbg(1, vpfe,
- "vpfe_ccdc_set_params: error in copying ccdc params, %d\n",
- x);
+ "%s: error in copying ccdc params, %d\n",
+ __func__, x);
return -EFAULT;
}
@@ -513,11 +377,9 @@ static int vpfe_ccdc_set_params(struct vpfe_ccdc *ccdc, void __user *params)
*/
static void vpfe_ccdc_config_ycbcr(struct vpfe_ccdc *ccdc)
{
- struct vpfe_device *vpfe = container_of(ccdc, struct vpfe_device, ccdc);
struct ccdc_params_ycbcr *params = &ccdc->ccdc_cfg.ycbcr;
u32 syn_mode;
- vpfe_dbg(3, vpfe, "vpfe_ccdc_config_ycbcr:\n");
/*
* first restore the CCDC registers to default values
* This is important since we assume default values to be set in
@@ -649,8 +511,6 @@ static void vpfe_ccdc_config_raw(struct vpfe_ccdc *ccdc)
unsigned int syn_mode;
unsigned int val;
- vpfe_dbg(3, vpfe, "vpfe_ccdc_config_raw:\n");
-
/* Reset CCDC */
vpfe_ccdc_restore_defaults(ccdc);
@@ -751,8 +611,8 @@ static int vpfe_ccdc_set_pixel_format(struct vpfe_ccdc *ccdc, u32 pixfmt)
{
struct vpfe_device *vpfe = container_of(ccdc, struct vpfe_device, ccdc);
- vpfe_dbg(1, vpfe, "vpfe_ccdc_set_pixel_format: if_type: %d, pixfmt:%s\n",
- ccdc->ccdc_cfg.if_type, print_fourcc(pixfmt));
+ vpfe_dbg(1, vpfe, "%s: if_type: %d, pixfmt:%s\n",
+ __func__, ccdc->ccdc_cfg.if_type, print_fourcc(pixfmt));
if (ccdc->ccdc_cfg.if_type == VPFE_RAW_BAYER) {
ccdc->ccdc_cfg.bayer.pix_fmt = CCDC_PIXFMT_RAW;
@@ -1036,10 +896,9 @@ static int vpfe_get_ccdc_image_format(struct vpfe_device *vpfe,
static int vpfe_config_ccdc_image_format(struct vpfe_device *vpfe)
{
enum ccdc_frmfmt frm_fmt = CCDC_FRMFMT_INTERLACED;
+ u32 bpp;
int ret = 0;
- vpfe_dbg(2, vpfe, "vpfe_config_ccdc_image_format\n");
-
vpfe_dbg(1, vpfe, "pixelformat: %s\n",
print_fourcc(vpfe->fmt.fmt.pix.pixelformat));
@@ -1050,7 +909,8 @@ static int vpfe_config_ccdc_image_format(struct vpfe_device *vpfe)
}
/* configure the image window */
- vpfe_ccdc_set_image_window(&vpfe->ccdc, &vpfe->crop, vpfe->bpp);
+ bpp = __get_bytesperpixel(vpfe, vpfe->current_vpfe_fmt);
+ vpfe_ccdc_set_image_window(&vpfe->ccdc, &vpfe->crop, bpp);
switch (vpfe->fmt.fmt.pix.field) {
case V4L2_FIELD_INTERLACED:
@@ -1094,7 +954,8 @@ static int vpfe_config_ccdc_image_format(struct vpfe_device *vpfe)
static int vpfe_config_image_format(struct vpfe_device *vpfe,
v4l2_std_id std_id)
{
- struct v4l2_pix_format *pix = &vpfe->fmt.fmt.pix;
+ struct vpfe_fmt *fmt;
+ struct v4l2_mbus_framefmt mbus_fmt;
int i, ret;
for (i = 0; i < ARRAY_SIZE(vpfe_standards); i++) {
@@ -1116,26 +977,29 @@ static int vpfe_config_image_format(struct vpfe_device *vpfe,
return -EINVAL;
}
- vpfe->crop.top = vpfe->crop.left = 0;
- vpfe->crop.width = vpfe->std_info.active_pixels;
- vpfe->crop.height = vpfe->std_info.active_lines;
- pix->width = vpfe->crop.width;
- pix->height = vpfe->crop.height;
- pix->pixelformat = V4L2_PIX_FMT_YUYV;
-
- /* first field and frame format based on standard frame format */
- if (vpfe->std_info.frame_format)
- pix->field = V4L2_FIELD_INTERLACED;
- else
- pix->field = V4L2_FIELD_NONE;
-
- ret = __vpfe_get_format(vpfe, &vpfe->fmt, &vpfe->bpp);
+ ret = __subdev_get_format(vpfe, &mbus_fmt);
if (ret)
return ret;
+ fmt = find_format_by_code(vpfe, mbus_fmt.code);
+ if (!fmt) {
+ vpfe_dbg(3, vpfe, "mbus code format (0x%08x) not found.\n",
+ mbus_fmt.code);
+ return -EINVAL;
+ }
+
+ /* Save current subdev format */
+ v4l2_fill_pix_format(&vpfe->fmt.fmt.pix, &mbus_fmt);
+ vpfe->fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ vpfe->fmt.fmt.pix.pixelformat = fmt->fourcc;
+ vpfe_calc_format_size(vpfe, fmt, &vpfe->fmt);
+ vpfe->current_vpfe_fmt = fmt;
+
/* Update the crop window based on found values */
- vpfe->crop.width = pix->width;
- vpfe->crop.height = pix->height;
+ vpfe->crop.top = 0;
+ vpfe->crop.left = 0;
+ vpfe->crop.width = mbus_fmt.width;
+ vpfe->crop.height = mbus_fmt.height;
return vpfe_config_ccdc_image_format(vpfe);
}
@@ -1237,22 +1101,29 @@ unlock:
* This function will get next buffer from the dma queue and
* set the buffer address in the vpfe register for capture.
* the buffer is marked active
- *
- * Assumes caller is holding vpfe->dma_queue_lock already
*/
-static inline void vpfe_schedule_next_buffer(struct vpfe_device *vpfe)
+static void vpfe_schedule_next_buffer(struct vpfe_device *vpfe)
{
+ dma_addr_t addr;
+
+ spin_lock(&vpfe->dma_queue_lock);
+ if (list_empty(&vpfe->dma_queue)) {
+ spin_unlock(&vpfe->dma_queue_lock);
+ return;
+ }
+
vpfe->next_frm = list_entry(vpfe->dma_queue.next,
struct vpfe_cap_buffer, list);
list_del(&vpfe->next_frm->list);
+ spin_unlock(&vpfe->dma_queue_lock);
- vpfe_set_sdr_addr(&vpfe->ccdc,
- vb2_dma_contig_plane_dma_addr(&vpfe->next_frm->vb.vb2_buf, 0));
+ addr = vb2_dma_contig_plane_dma_addr(&vpfe->next_frm->vb.vb2_buf, 0);
+ vpfe_set_sdr_addr(&vpfe->ccdc, addr);
}
static inline void vpfe_schedule_bottom_field(struct vpfe_device *vpfe)
{
- unsigned long addr;
+ dma_addr_t addr;
addr = vb2_dma_contig_plane_dma_addr(&vpfe->next_frm->vb.vb2_buf, 0) +
vpfe->field_off;
@@ -1277,6 +1148,58 @@ static inline void vpfe_process_buffer_complete(struct vpfe_device *vpfe)
vpfe->cur_frm = vpfe->next_frm;
}
+static void vpfe_handle_interlaced_irq(struct vpfe_device *vpfe,
+ enum v4l2_field field)
+{
+ int fid;
+
+ /* interlaced or TB capture check which field
+ * we are in hardware
+ */
+ fid = vpfe_ccdc_getfid(&vpfe->ccdc);
+
+ /* switch the software maintained field id */
+ vpfe->field ^= 1;
+ if (fid == vpfe->field) {
+ /* we are in-sync here,continue */
+ if (fid == 0) {
+ /*
+ * One frame is just being captured. If the
+ * next frame is available, release the
+ * current frame and move on
+ */
+ if (vpfe->cur_frm != vpfe->next_frm)
+ vpfe_process_buffer_complete(vpfe);
+
+ if (vpfe->stopping)
+ return;
+
+ /*
+ * based on whether the two fields are stored
+ * interleave or separately in memory,
+ * reconfigure the CCDC memory address
+ */
+ if (field == V4L2_FIELD_SEQ_TB)
+ vpfe_schedule_bottom_field(vpfe);
+ } else {
+ /*
+ * if one field is just being captured configure
+ * the next frame get the next frame from the empty
+ * queue if no frame is available hold on to the
+ * current buffer
+ */
+ if (vpfe->cur_frm == vpfe->next_frm)
+ vpfe_schedule_next_buffer(vpfe);
+ }
+ } else if (fid == 0) {
+ /*
+ * out of sync. Recover from any hardware out-of-sync.
+ * May loose one frame
+ */
+ vpfe->field = fid;
+ }
+}
+
/*
* vpfe_isr : ISR handler for vpfe capture (VINT0)
* @irq: irq number
@@ -1288,76 +1211,28 @@ static inline void vpfe_process_buffer_complete(struct vpfe_device *vpfe)
static irqreturn_t vpfe_isr(int irq, void *dev)
{
struct vpfe_device *vpfe = (struct vpfe_device *)dev;
- enum v4l2_field field;
- int intr_status;
- int fid;
+ enum v4l2_field field = vpfe->fmt.fmt.pix.field;
+ int intr_status, stopping = vpfe->stopping;
intr_status = vpfe_reg_read(&vpfe->ccdc, VPFE_IRQ_STS);
if (intr_status & VPFE_VDINT0) {
- field = vpfe->fmt.fmt.pix.field;
-
if (field == V4L2_FIELD_NONE) {
- /* handle progressive frame capture */
if (vpfe->cur_frm != vpfe->next_frm)
vpfe_process_buffer_complete(vpfe);
- goto next_intr;
+ } else {
+ vpfe_handle_interlaced_irq(vpfe, field);
}
-
- /* interlaced or TB capture check which field
- we are in hardware */
- fid = vpfe_ccdc_getfid(&vpfe->ccdc);
-
- /* switch the software maintained field id */
- vpfe->field ^= 1;
- if (fid == vpfe->field) {
- /* we are in-sync here,continue */
- if (fid == 0) {
- /*
- * One frame is just being captured. If the
- * next frame is available, release the
- * current frame and move on
- */
- if (vpfe->cur_frm != vpfe->next_frm)
- vpfe_process_buffer_complete(vpfe);
- /*
- * based on whether the two fields are stored
- * interleave or separately in memory,
- * reconfigure the CCDC memory address
- */
- if (field == V4L2_FIELD_SEQ_TB)
- vpfe_schedule_bottom_field(vpfe);
-
- goto next_intr;
- }
- /*
- * if one field is just being captured configure
- * the next frame get the next frame from the empty
- * queue if no frame is available hold on to the
- * current buffer
- */
- spin_lock(&vpfe->dma_queue_lock);
- if (!list_empty(&vpfe->dma_queue) &&
- vpfe->cur_frm == vpfe->next_frm)
- vpfe_schedule_next_buffer(vpfe);
- spin_unlock(&vpfe->dma_queue_lock);
- } else if (fid == 0) {
- /*
- * out of sync. Recover from any hardware out-of-sync.
- * May loose one frame
- */
- vpfe->field = fid;
+ if (stopping) {
+ vpfe->stopping = false;
+ complete(&vpfe->capture_stop);
}
}
-next_intr:
- if (intr_status & VPFE_VDINT1) {
- spin_lock(&vpfe->dma_queue_lock);
- if (vpfe->fmt.fmt.pix.field == V4L2_FIELD_NONE &&
- !list_empty(&vpfe->dma_queue) &&
+ if (intr_status & VPFE_VDINT1 && !stopping) {
+ if (field == V4L2_FIELD_NONE &&
vpfe->cur_frm == vpfe->next_frm)
vpfe_schedule_next_buffer(vpfe);
- spin_unlock(&vpfe->dma_queue_lock);
}
vpfe_clear_intr(&vpfe->ccdc, intr_status);
@@ -1394,8 +1269,6 @@ static int vpfe_querycap(struct file *file, void *priv,
{
struct vpfe_device *vpfe = video_drvdata(file);
- vpfe_dbg(2, vpfe, "vpfe_querycap\n");
-
strscpy(cap->driver, VPFE_MODULE_NAME, sizeof(cap->driver));
strscpy(cap->card, "TI AM437x VPFE", sizeof(cap->card));
snprintf(cap->bus_info, sizeof(cap->bus_info),
@@ -1404,83 +1277,74 @@ static int vpfe_querycap(struct file *file, void *priv,
}
/* get the format set at output pad of the adjacent subdev */
-static int __vpfe_get_format(struct vpfe_device *vpfe,
- struct v4l2_format *format, unsigned int *bpp)
+static int __subdev_get_format(struct vpfe_device *vpfe,
+ struct v4l2_mbus_framefmt *fmt)
{
- struct v4l2_mbus_framefmt mbus_fmt;
- struct vpfe_subdev_info *sdinfo;
- struct v4l2_subdev_format fmt;
+ struct v4l2_subdev *sd = vpfe->current_subdev->sd;
+ struct v4l2_subdev_format sd_fmt;
+ struct v4l2_mbus_framefmt *mbus_fmt = &sd_fmt.format;
int ret;
- sdinfo = vpfe->current_subdev;
- if (!sdinfo->sd)
- return -EINVAL;
-
- fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
- fmt.pad = 0;
+ sd_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ sd_fmt.pad = 0;
- ret = v4l2_subdev_call(sdinfo->sd, pad, get_fmt, NULL, &fmt);
- if (ret && ret != -ENOIOCTLCMD && ret != -ENODEV)
+ ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &sd_fmt);
+ if (ret)
return ret;
- if (!ret) {
- v4l2_fill_pix_format(&format->fmt.pix, &fmt.format);
- mbus_to_pix(vpfe, &fmt.format, &format->fmt.pix, bpp);
- } else {
- ret = v4l2_device_call_until_err(&vpfe->v4l2_dev,
- sdinfo->grp_id,
- pad, get_fmt,
- NULL, &fmt);
- if (ret && ret != -ENOIOCTLCMD && ret != -ENODEV)
- return ret;
- v4l2_fill_pix_format(&format->fmt.pix, &mbus_fmt);
- mbus_to_pix(vpfe, &mbus_fmt, &format->fmt.pix, bpp);
- }
-
- format->type = vpfe->fmt.type;
+ *fmt = *mbus_fmt;
- vpfe_dbg(1, vpfe,
- "%s size %dx%d (%s) bytesperline = %d, size = %d, bpp = %d\n",
- __func__, format->fmt.pix.width, format->fmt.pix.height,
- print_fourcc(format->fmt.pix.pixelformat),
- format->fmt.pix.bytesperline, format->fmt.pix.sizeimage, *bpp);
+ vpfe_dbg(1, vpfe, "%s: %dx%d code:%04X\n", __func__,
+ fmt->width, fmt->height, fmt->code);
return 0;
}
/* set the format at output pad of the adjacent subdev */
-static int __vpfe_set_format(struct vpfe_device *vpfe,
- struct v4l2_format *format, unsigned int *bpp)
+static int __subdev_set_format(struct vpfe_device *vpfe,
+ struct v4l2_mbus_framefmt *fmt)
{
- struct vpfe_subdev_info *sdinfo;
- struct v4l2_subdev_format fmt;
+ struct v4l2_subdev *sd = vpfe->current_subdev->sd;
+ struct v4l2_subdev_format sd_fmt;
+ struct v4l2_mbus_framefmt *mbus_fmt = &sd_fmt.format;
int ret;
- vpfe_dbg(2, vpfe, "__vpfe_set_format\n");
+ sd_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ sd_fmt.pad = 0;
+ *mbus_fmt = *fmt;
- sdinfo = vpfe->current_subdev;
- if (!sdinfo->sd)
- return -EINVAL;
+ ret = v4l2_subdev_call(sd, pad, set_fmt, NULL, &sd_fmt);
+ if (ret)
+ return ret;
- fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
- fmt.pad = 0;
+ vpfe_dbg(1, vpfe, "%s %dx%d code:%04X\n", __func__,
+ fmt->width, fmt->height, fmt->code);
- pix_to_mbus(vpfe, &format->fmt.pix, &fmt.format);
+ return 0;
+}
- ret = v4l2_subdev_call(sdinfo->sd, pad, set_fmt, NULL, &fmt);
- if (ret)
- return ret;
+static int vpfe_calc_format_size(struct vpfe_device *vpfe,
+ const struct vpfe_fmt *fmt,
+ struct v4l2_format *f)
+{
+ u32 bpp;
+
+ if (!fmt) {
+ vpfe_dbg(3, vpfe, "No vpfe_fmt provided!\n");
+ return -EINVAL;
+ }
- v4l2_fill_pix_format(&format->fmt.pix, &fmt.format);
- mbus_to_pix(vpfe, &fmt.format, &format->fmt.pix, bpp);
+ bpp = __get_bytesperpixel(vpfe, fmt);
- format->type = vpfe->fmt.type;
+ /* pitch should be 32 bytes aligned */
+ f->fmt.pix.bytesperline = ALIGN(f->fmt.pix.width * bpp, 32);
+ f->fmt.pix.sizeimage = f->fmt.pix.bytesperline *
+ f->fmt.pix.height;
- vpfe_dbg(1, vpfe,
- "%s size %dx%d (%s) bytesperline = %d, size = %d, bpp = %d\n",
- __func__, format->fmt.pix.width, format->fmt.pix.height,
- print_fourcc(format->fmt.pix.pixelformat),
- format->fmt.pix.bytesperline, format->fmt.pix.sizeimage, *bpp);
+ vpfe_dbg(3, vpfe, "%s: fourcc: %s size: %dx%d bpl:%d img_size:%d\n",
+ __func__, print_fourcc(f->fmt.pix.pixelformat),
+ f->fmt.pix.width, f->fmt.pix.height,
+ f->fmt.pix.bytesperline, f->fmt.pix.sizeimage);
return 0;
}
@@ -1490,8 +1354,6 @@ static int vpfe_g_fmt(struct file *file, void *priv,
{
struct vpfe_device *vpfe = video_drvdata(file);
- vpfe_dbg(2, vpfe, "vpfe_g_fmt\n");
-
*fmt = vpfe->fmt;
return 0;
@@ -1502,82 +1364,124 @@ static int vpfe_enum_fmt(struct file *file, void *priv,
{
struct vpfe_device *vpfe = video_drvdata(file);
struct vpfe_subdev_info *sdinfo;
- struct vpfe_fmt *fmt = NULL;
- unsigned int k;
-
- vpfe_dbg(2, vpfe, "vpfe_enum_format index:%d\n",
- f->index);
+ struct vpfe_fmt *fmt;
sdinfo = vpfe->current_subdev;
if (!sdinfo->sd)
return -EINVAL;
- if (f->index > ARRAY_SIZE(formats))
+ if (f->index >= vpfe->num_active_fmt)
return -EINVAL;
- for (k = 0; k < ARRAY_SIZE(formats); k++) {
- if (formats[k].index == f->index) {
- fmt = &formats[k];
- break;
- }
- }
- if (!fmt)
- return -EINVAL;
+ fmt = vpfe->active_fmt[f->index];
f->pixelformat = fmt->fourcc;
- vpfe_dbg(1, vpfe, "vpfe_enum_format: mbus index: %d code: %x pixelformat: %s\n",
- f->index, fmt->code, print_fourcc(fmt->fourcc));
+ vpfe_dbg(1, vpfe, "%s: mbus index: %d code: %x pixelformat: %s\n",
+ __func__, f->index, fmt->code, print_fourcc(fmt->fourcc));
return 0;
}
static int vpfe_try_fmt(struct file *file, void *priv,
- struct v4l2_format *fmt)
+ struct v4l2_format *f)
{
struct vpfe_device *vpfe = video_drvdata(file);
- unsigned int bpp;
+ struct v4l2_subdev *sd = vpfe->current_subdev->sd;
+ const struct vpfe_fmt *fmt;
+ struct v4l2_subdev_frame_size_enum fse;
+ int ret, found;
+
+ fmt = find_format_by_pix(vpfe, f->fmt.pix.pixelformat);
+ if (!fmt) {
+ /* default to first entry */
+ vpfe_dbg(3, vpfe, "Invalid pixel code: %x, default used instead\n",
+ f->fmt.pix.pixelformat);
+ fmt = vpfe->active_fmt[0];
+ f->fmt.pix.pixelformat = fmt->fourcc;
+ }
+
+ f->fmt.pix.field = vpfe->fmt.fmt.pix.field;
+
+ /* check for/find a valid width/height */
+ ret = 0;
+ found = false;
+ fse.pad = 0;
+ fse.code = fmt->code;
+ fse.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ for (fse.index = 0; ; fse.index++) {
+ ret = v4l2_subdev_call(sd, pad, enum_frame_size,
+ NULL, &fse);
+ if (ret)
+ break;
+
+ if (f->fmt.pix.width == fse.max_width &&
+ f->fmt.pix.height == fse.max_height) {
+ found = true;
+ break;
+ } else if (f->fmt.pix.width >= fse.min_width &&
+ f->fmt.pix.width <= fse.max_width &&
+ f->fmt.pix.height >= fse.min_height &&
+ f->fmt.pix.height <= fse.max_height) {
+ found = true;
+ break;
+ }
+ }
- vpfe_dbg(2, vpfe, "vpfe_try_fmt\n");
+ if (!found) {
+ /* use existing values as default */
+ f->fmt.pix.width = vpfe->fmt.fmt.pix.width;
+ f->fmt.pix.height = vpfe->fmt.fmt.pix.height;
+ }
- return __vpfe_get_format(vpfe, fmt, &bpp);
+ /*
+ * Use current colorspace for now, it will get
+ * updated properly during s_fmt
+ */
+ f->fmt.pix.colorspace = vpfe->fmt.fmt.pix.colorspace;
+ return vpfe_calc_format_size(vpfe, fmt, f);
}
static int vpfe_s_fmt(struct file *file, void *priv,
struct v4l2_format *fmt)
{
struct vpfe_device *vpfe = video_drvdata(file);
- struct v4l2_format format;
- unsigned int bpp;
+ struct vpfe_fmt *f;
+ struct v4l2_mbus_framefmt mbus_fmt;
int ret;
- vpfe_dbg(2, vpfe, "vpfe_s_fmt\n");
-
/* If streaming is started, return error */
if (vb2_is_busy(&vpfe->buffer_queue)) {
vpfe_err(vpfe, "%s device busy\n", __func__);
return -EBUSY;
}
- ret = __vpfe_get_format(vpfe, &format, &bpp);
- if (ret)
+ ret = vpfe_try_fmt(file, priv, fmt);
+ if (ret < 0)
return ret;
+ f = find_format_by_pix(vpfe, fmt->fmt.pix.pixelformat);
- if (!cmp_v4l2_format(fmt, &format)) {
- /* Sensor format is different from the requested format
- * so we need to change it
- */
- ret = __vpfe_set_format(vpfe, fmt, &bpp);
- if (ret)
- return ret;
- } else /* Just make sure all of the fields are consistent */
- *fmt = format;
+ v4l2_fill_mbus_format(&mbus_fmt, &fmt->fmt.pix, f->code);
- /* First detach any IRQ if currently attached */
- vpfe_detach_irq(vpfe);
- vpfe->fmt = *fmt;
- vpfe->bpp = bpp;
+ ret = __subdev_set_format(vpfe, &mbus_fmt);
+ if (ret)
+ return ret;
+
+ /* Just double check nothing has gone wrong */
+ if (mbus_fmt.code != f->code) {
+ vpfe_dbg(3, vpfe,
+ "%s subdev changed format on us, this should not happen\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ v4l2_fill_pix_format(&vpfe->fmt.fmt.pix, &mbus_fmt);
+ vpfe->fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ vpfe->fmt.fmt.pix.pixelformat = f->fourcc;
+ vpfe_calc_format_size(vpfe, f, &vpfe->fmt);
+ *fmt = vpfe->fmt;
+ vpfe->current_vpfe_fmt = f;
/* Update the crop window based on found values */
vpfe->crop.width = fmt->fmt.pix.width;
@@ -1592,57 +1496,40 @@ static int vpfe_enum_size(struct file *file, void *priv,
{
struct vpfe_device *vpfe = video_drvdata(file);
struct v4l2_subdev_frame_size_enum fse;
- struct vpfe_subdev_info *sdinfo;
- struct v4l2_mbus_framefmt mbus;
- struct v4l2_pix_format pix;
+ struct v4l2_subdev *sd = vpfe->current_subdev->sd;
struct vpfe_fmt *fmt;
int ret;
- vpfe_dbg(2, vpfe, "vpfe_enum_size\n");
-
/* check for valid format */
- fmt = find_format_by_pix(fsize->pixel_format);
+ fmt = find_format_by_pix(vpfe, fsize->pixel_format);
if (!fmt) {
- vpfe_dbg(3, vpfe, "Invalid pixel code: %x, default used instead\n",
- fsize->pixel_format);
+ vpfe_dbg(3, vpfe, "Invalid pixel code: %x\n",
+ fsize->pixel_format);
return -EINVAL;
}
memset(fsize->reserved, 0x0, sizeof(fsize->reserved));
- sdinfo = vpfe->current_subdev;
- if (!sdinfo->sd)
- return -EINVAL;
-
- memset(&pix, 0x0, sizeof(pix));
- /* Construct pix from parameter and use default for the rest */
- pix.pixelformat = fsize->pixel_format;
- pix.width = 640;
- pix.height = 480;
- pix.colorspace = V4L2_COLORSPACE_SRGB;
- pix.field = V4L2_FIELD_NONE;
- pix_to_mbus(vpfe, &pix, &mbus);
-
memset(&fse, 0x0, sizeof(fse));
fse.index = fsize->index;
fse.pad = 0;
- fse.code = mbus.code;
+ fse.code = fmt->code;
fse.which = V4L2_SUBDEV_FORMAT_ACTIVE;
- ret = v4l2_subdev_call(sdinfo->sd, pad, enum_frame_size, NULL, &fse);
+ ret = v4l2_subdev_call(sd, pad, enum_frame_size, NULL, &fse);
if (ret)
- return -EINVAL;
+ return ret;
- vpfe_dbg(1, vpfe, "vpfe_enum_size: index: %d code: %x W:[%d,%d] H:[%d,%d]\n",
- fse.index, fse.code, fse.min_width, fse.max_width,
- fse.min_height, fse.max_height);
+ vpfe_dbg(1, vpfe, "%s: index: %d code: %x W:[%d,%d] H:[%d,%d]\n",
+ __func__, fse.index, fse.code, fse.min_width, fse.max_width,
+ fse.min_height, fse.max_height);
fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
fsize->discrete.width = fse.max_width;
fsize->discrete.height = fse.max_height;
- vpfe_dbg(1, vpfe, "vpfe_enum_size: index: %d pixformat: %s size: %dx%d\n",
- fsize->index, print_fourcc(fsize->pixel_format),
- fsize->discrete.width, fsize->discrete.height);
+ vpfe_dbg(1, vpfe, "%s: index: %d pixformat: %s size: %dx%d\n",
+ __func__, fsize->index, print_fourcc(fsize->pixel_format),
+ fsize->discrete.width, fsize->discrete.height);
return 0;
}
@@ -1707,8 +1594,6 @@ static int vpfe_enum_input(struct file *file, void *priv,
struct vpfe_subdev_info *sdinfo;
int subdev, index;
- vpfe_dbg(2, vpfe, "vpfe_enum_input\n");
-
if (vpfe_get_subdev_input_index(vpfe, &subdev, &index,
inp->index) < 0) {
vpfe_dbg(1, vpfe,
@@ -1725,8 +1610,6 @@ static int vpfe_g_input(struct file *file, void *priv, unsigned int *index)
{
struct vpfe_device *vpfe = video_drvdata(file);
- vpfe_dbg(2, vpfe, "vpfe_g_input\n");
-
return vpfe_get_app_input_index(vpfe, index);
}
@@ -1739,8 +1622,6 @@ static int vpfe_set_input(struct vpfe_device *vpfe, unsigned int index)
u32 input, output;
int ret;
- vpfe_dbg(2, vpfe, "vpfe_set_input: index: %d\n", index);
-
/* If streaming is started, return error */
if (vb2_is_busy(&vpfe->buffer_queue)) {
vpfe_err(vpfe, "%s device busy\n", __func__);
@@ -1796,9 +1677,6 @@ static int vpfe_s_input(struct file *file, void *priv, unsigned int index)
{
struct vpfe_device *vpfe = video_drvdata(file);
- vpfe_dbg(2, vpfe,
- "vpfe_s_input: index: %d\n", index);
-
return vpfe_set_input(vpfe, index);
}
@@ -1807,8 +1685,6 @@ static int vpfe_querystd(struct file *file, void *priv, v4l2_std_id *std_id)
struct vpfe_device *vpfe = video_drvdata(file);
struct vpfe_subdev_info *sdinfo;
- vpfe_dbg(2, vpfe, "vpfe_querystd\n");
-
sdinfo = vpfe->current_subdev;
if (!(sdinfo->inputs[0].capabilities & V4L2_IN_CAP_STD))
return -ENODATA;
@@ -1824,12 +1700,14 @@ static int vpfe_s_std(struct file *file, void *priv, v4l2_std_id std_id)
struct vpfe_subdev_info *sdinfo;
int ret;
- vpfe_dbg(2, vpfe, "vpfe_s_std\n");
-
sdinfo = vpfe->current_subdev;
if (!(sdinfo->inputs[0].capabilities & V4L2_IN_CAP_STD))
return -ENODATA;
+ /* if trying to set the same std then nothing to do */
+ if (vpfe_standards[vpfe->std_index].std_id == std_id)
+ return 0;
+
/* If streaming is started, return error */
if (vb2_is_busy(&vpfe->buffer_queue)) {
vpfe_err(vpfe, "%s device busy\n", __func__);
@@ -1853,8 +1731,6 @@ static int vpfe_g_std(struct file *file, void *priv, v4l2_std_id *std_id)
struct vpfe_device *vpfe = video_drvdata(file);
struct vpfe_subdev_info *sdinfo;
- vpfe_dbg(2, vpfe, "vpfe_g_std\n");
-
sdinfo = vpfe->current_subdev;
if (sdinfo->inputs[0].capabilities != V4L2_IN_CAP_STD)
return -ENODATA;
@@ -1872,8 +1748,6 @@ static void vpfe_calculate_offsets(struct vpfe_device *vpfe)
{
struct v4l2_rect image_win;
- vpfe_dbg(2, vpfe, "vpfe_calculate_offsets\n");
-
vpfe_ccdc_get_image_window(&vpfe->ccdc, &image_win);
vpfe->field_off = image_win.height * image_win.width;
}
@@ -1957,6 +1831,29 @@ static void vpfe_buffer_queue(struct vb2_buffer *vb)
spin_unlock_irqrestore(&vpfe->dma_queue_lock, flags);
}
+static void vpfe_return_all_buffers(struct vpfe_device *vpfe,
+ enum vb2_buffer_state state)
+{
+ struct vpfe_cap_buffer *buf, *node;
+ unsigned long flags;
+
+ spin_lock_irqsave(&vpfe->dma_queue_lock, flags);
+ list_for_each_entry_safe(buf, node, &vpfe->dma_queue, list) {
+ vb2_buffer_done(&buf->vb.vb2_buf, state);
+ list_del(&buf->list);
+ }
+
+ if (vpfe->cur_frm)
+ vb2_buffer_done(&vpfe->cur_frm->vb.vb2_buf, state);
+
+ if (vpfe->next_frm && vpfe->next_frm != vpfe->cur_frm)
+ vb2_buffer_done(&vpfe->next_frm->vb.vb2_buf, state);
+
+ vpfe->cur_frm = NULL;
+ vpfe->next_frm = NULL;
+ spin_unlock_irqrestore(&vpfe->dma_queue_lock, flags);
+}
+
/*
* vpfe_start_streaming : Starts the DMA engine for streaming
* @vb: ptr to vb2_buffer
@@ -1965,7 +1862,6 @@ static void vpfe_buffer_queue(struct vb2_buffer *vb)
static int vpfe_start_streaming(struct vb2_queue *vq, unsigned int count)
{
struct vpfe_device *vpfe = vb2_get_drv_priv(vq);
- struct vpfe_cap_buffer *buf, *tmp;
struct vpfe_subdev_info *sdinfo;
unsigned long flags;
unsigned long addr;
@@ -1980,6 +1876,9 @@ static int vpfe_start_streaming(struct vb2_queue *vq, unsigned int count)
vpfe_attach_irq(vpfe);
+ vpfe->stopping = false;
+ init_completion(&vpfe->capture_stop);
+
if (vpfe->ccdc.ccdc_cfg.if_type == VPFE_RAW_BAYER)
vpfe_ccdc_config_raw(&vpfe->ccdc);
else
@@ -2008,11 +1907,8 @@ static int vpfe_start_streaming(struct vb2_queue *vq, unsigned int count)
return 0;
err:
- list_for_each_entry_safe(buf, tmp, &vpfe->dma_queue, list) {
- list_del(&buf->list);
- vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_QUEUED);
- }
-
+ vpfe_return_all_buffers(vpfe, VB2_BUF_STATE_QUEUED);
+ vpfe_pcr_enable(&vpfe->ccdc, 0);
return ret;
}
@@ -2027,11 +1923,15 @@ static void vpfe_stop_streaming(struct vb2_queue *vq)
{
struct vpfe_device *vpfe = vb2_get_drv_priv(vq);
struct vpfe_subdev_info *sdinfo;
- unsigned long flags;
int ret;
vpfe_pcr_enable(&vpfe->ccdc, 0);
+ /* Wait for the last frame to be captured */
+ vpfe->stopping = true;
+ wait_for_completion_timeout(&vpfe->capture_stop,
+ msecs_to_jiffies(250));
+
vpfe_detach_irq(vpfe);
sdinfo = vpfe->current_subdev;
@@ -2040,27 +1940,7 @@ static void vpfe_stop_streaming(struct vb2_queue *vq)
vpfe_dbg(1, vpfe, "stream off failed in subdev\n");
/* release all active buffers */
- spin_lock_irqsave(&vpfe->dma_queue_lock, flags);
- if (vpfe->cur_frm == vpfe->next_frm) {
- vb2_buffer_done(&vpfe->cur_frm->vb.vb2_buf,
- VB2_BUF_STATE_ERROR);
- } else {
- if (vpfe->cur_frm != NULL)
- vb2_buffer_done(&vpfe->cur_frm->vb.vb2_buf,
- VB2_BUF_STATE_ERROR);
- if (vpfe->next_frm != NULL)
- vb2_buffer_done(&vpfe->next_frm->vb.vb2_buf,
- VB2_BUF_STATE_ERROR);
- }
-
- while (!list_empty(&vpfe->dma_queue)) {
- vpfe->next_frm = list_entry(vpfe->dma_queue.next,
- struct vpfe_cap_buffer, list);
- list_del(&vpfe->next_frm->list);
- vb2_buffer_done(&vpfe->next_frm->vb.vb2_buf,
- VB2_BUF_STATE_ERROR);
- }
- spin_unlock_irqrestore(&vpfe->dma_queue_lock, flags);
+ vpfe_return_all_buffers(vpfe, VB2_BUF_STATE_ERROR);
}
static int vpfe_g_pixelaspect(struct file *file, void *priv,
@@ -2068,8 +1948,6 @@ static int vpfe_g_pixelaspect(struct file *file, void *priv,
{
struct vpfe_device *vpfe = video_drvdata(file);
- vpfe_dbg(2, vpfe, "vpfe_g_pixelaspect\n");
-
if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
vpfe->std_index >= ARRAY_SIZE(vpfe_standards))
return -EINVAL;
@@ -2128,6 +2006,7 @@ vpfe_s_selection(struct file *file, void *fh, struct v4l2_selection *s)
struct vpfe_device *vpfe = video_drvdata(file);
struct v4l2_rect cr = vpfe->crop;
struct v4l2_rect r = s->r;
+ u32 bpp;
/* If streaming is started, return error */
if (vb2_is_busy(&vpfe->buffer_queue)) {
@@ -2153,10 +2032,12 @@ vpfe_s_selection(struct file *file, void *fh, struct v4l2_selection *s)
s->r = vpfe->crop = r;
- vpfe_ccdc_set_image_window(&vpfe->ccdc, &r, vpfe->bpp);
+ bpp = __get_bytesperpixel(vpfe, vpfe->current_vpfe_fmt);
+ vpfe_ccdc_set_image_window(&vpfe->ccdc, &r, bpp);
vpfe->fmt.fmt.pix.width = r.width;
vpfe->fmt.fmt.pix.height = r.height;
- vpfe->fmt.fmt.pix.bytesperline = vpfe_ccdc_get_line_length(&vpfe->ccdc);
+ vpfe->fmt.fmt.pix.bytesperline =
+ vpfe_ccdc_get_line_length(&vpfe->ccdc);
vpfe->fmt.fmt.pix.sizeimage = vpfe->fmt.fmt.pix.bytesperline *
vpfe->fmt.fmt.pix.height;
@@ -2172,8 +2053,6 @@ static long vpfe_ioctl_default(struct file *file, void *priv,
struct vpfe_device *vpfe = video_drvdata(file);
int ret;
- vpfe_dbg(2, vpfe, "vpfe_ioctl_default\n");
-
if (!valid_prio) {
vpfe_err(vpfe, "%s device busy\n", __func__);
return -EBUSY;
@@ -2279,10 +2158,10 @@ vpfe_async_bound(struct v4l2_async_notifier *notifier,
struct vpfe_device, v4l2_dev);
struct v4l2_subdev_mbus_code_enum mbus_code;
struct vpfe_subdev_info *sdinfo;
+ struct vpfe_fmt *fmt;
+ int ret = 0;
bool found = false;
- int i, j;
-
- vpfe_dbg(1, vpfe, "vpfe_async_bound\n");
+ int i, j, k;
for (i = 0; i < ARRAY_SIZE(vpfe->cfg->asd); i++) {
if (vpfe->cfg->asd[i]->match.fwnode ==
@@ -2302,27 +2181,37 @@ vpfe_async_bound(struct v4l2_async_notifier *notifier,
vpfe->video_dev.tvnorms |= sdinfo->inputs[0].std;
- /* setup the supported formats & indexes */
- for (j = 0, i = 0; ; ++j) {
- struct vpfe_fmt *fmt;
- int ret;
-
+ vpfe->num_active_fmt = 0;
+ for (j = 0, i = 0; (ret != -EINVAL); ++j) {
memset(&mbus_code, 0, sizeof(mbus_code));
mbus_code.index = j;
mbus_code.which = V4L2_SUBDEV_FORMAT_ACTIVE;
ret = v4l2_subdev_call(subdev, pad, enum_mbus_code,
- NULL, &mbus_code);
+ NULL, &mbus_code);
if (ret)
- break;
-
- fmt = find_format_by_code(mbus_code.code);
- if (!fmt)
continue;
- fmt->supported = true;
- fmt->index = i++;
+ vpfe_dbg(3, vpfe,
+ "subdev %s: code: %04x idx: %d\n",
+ subdev->name, mbus_code.code, j);
+
+ for (k = 0; k < ARRAY_SIZE(formats); k++) {
+ fmt = &formats[k];
+ if (mbus_code.code != fmt->code)
+ continue;
+ vpfe->active_fmt[i] = fmt;
+ vpfe_dbg(3, vpfe,
+ "matched fourcc: %s code: %04x idx: %d\n",
+ print_fourcc(fmt->fourcc), mbus_code.code, i);
+ vpfe->num_active_fmt = ++i;
+ }
}
+ if (!i) {
+ vpfe_err(vpfe, "No suitable format reported by subdev %s\n",
+ subdev->name);
+ return -EINVAL;
+ }
return 0;
}
@@ -2605,8 +2494,6 @@ static int vpfe_remove(struct platform_device *pdev)
{
struct vpfe_device *vpfe = platform_get_drvdata(pdev);
- vpfe_dbg(2, vpfe, "vpfe_remove\n");
-
pm_runtime_disable(&pdev->dev);
v4l2_async_notifier_unregister(&vpfe->notifier);
@@ -2653,22 +2540,21 @@ static int vpfe_suspend(struct device *dev)
struct vpfe_device *vpfe = dev_get_drvdata(dev);
struct vpfe_ccdc *ccdc = &vpfe->ccdc;
- /* if streaming has not started we don't care */
- if (!vb2_start_streaming_called(&vpfe->buffer_queue))
- return 0;
-
- pm_runtime_get_sync(dev);
- vpfe_config_enable(ccdc, 1);
+ /* only do full suspend if streaming has started */
+ if (vb2_start_streaming_called(&vpfe->buffer_queue)) {
+ pm_runtime_get_sync(dev);
+ vpfe_config_enable(ccdc, 1);
- /* Save VPFE context */
- vpfe_save_context(ccdc);
+ /* Save VPFE context */
+ vpfe_save_context(ccdc);
- /* Disable CCDC */
- vpfe_pcr_enable(ccdc, 0);
- vpfe_config_enable(ccdc, 0);
+ /* Disable CCDC */
+ vpfe_pcr_enable(ccdc, 0);
+ vpfe_config_enable(ccdc, 0);
- /* Disable both master and slave clock */
- pm_runtime_put_sync(dev);
+ /* Disable both master and slave clock */
+ pm_runtime_put_sync(dev);
+ }
/* Select sleep pin state */
pinctrl_pm_select_sleep_state(dev);
@@ -2710,19 +2596,18 @@ static int vpfe_resume(struct device *dev)
struct vpfe_device *vpfe = dev_get_drvdata(dev);
struct vpfe_ccdc *ccdc = &vpfe->ccdc;
- /* if streaming has not started we don't care */
- if (!vb2_start_streaming_called(&vpfe->buffer_queue))
- return 0;
-
- /* Enable both master and slave clock */
- pm_runtime_get_sync(dev);
- vpfe_config_enable(ccdc, 1);
+ /* only do full resume if streaming has started */
+ if (vb2_start_streaming_called(&vpfe->buffer_queue)) {
+ /* Enable both master and slave clock */
+ pm_runtime_get_sync(dev);
+ vpfe_config_enable(ccdc, 1);
- /* Restore VPFE context */
- vpfe_restore_context(ccdc);
+ /* Restore VPFE context */
+ vpfe_restore_context(ccdc);
- vpfe_config_enable(ccdc, 0);
- pm_runtime_put_sync(dev);
+ vpfe_config_enable(ccdc, 0);
+ pm_runtime_put_sync(dev);
+ }
/* Select default pin state */
pinctrl_pm_select_default_state(dev);
diff --git a/drivers/media/platform/am437x/am437x-vpfe.h b/drivers/media/platform/am437x/am437x-vpfe.h
index 4678285f34c6..05ee37db0273 100644
--- a/drivers/media/platform/am437x/am437x-vpfe.h
+++ b/drivers/media/platform/am437x/am437x-vpfe.h
@@ -1,21 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2013 - 2014 Texas Instruments, Inc.
*
* Benoit Parrot <bparrot@ti.com>
* Lad, Prabhakar <prabhakar.csengg@gmail.com>
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#ifndef AM437X_VPFE_H
@@ -23,6 +11,7 @@
#include <linux/am437x-vpfe.h>
#include <linux/clk.h>
+#include <linux/completion.h>
#include <linux/device.h>
#include <linux/io.h>
#include <linux/i2c.h>
@@ -214,6 +203,25 @@ struct vpfe_ccdc {
u32 ccdc_ctx[VPFE_REG_END / sizeof(u32)];
};
+/*
+ * struct vpfe_fmt - VPFE media bus format information
+ * fourcc: V4L2 pixel format code
+ * code: V4L2 media bus format code
+ * bitsperpixel: Bits per pixel over the bus
+ */
+struct vpfe_fmt {
+ u32 fourcc;
+ u32 code;
+ u32 bitsperpixel;
+};
+
+/*
+ * When formats[] is modified make sure to adjust this value also.
+ * Expect compile time warnings if VPFE_NUM_FORMATS is smaller then
+ * the number of elements in formats[].
+ */
+#define VPFE_NUM_FORMATS 10
+
struct vpfe_device {
/* V4l2 specific parameters */
/* Identifies video device for this channel */
@@ -249,8 +257,11 @@ struct vpfe_device {
struct vpfe_cap_buffer *next_frm;
/* Used to store pixel format */
struct v4l2_format fmt;
- /* Used to store current bytes per pixel based on current format */
- unsigned int bpp;
+ /* Used to keep a reference to the current vpfe_fmt */
+ struct vpfe_fmt *current_vpfe_fmt;
+ struct vpfe_fmt *active_fmt[VPFE_NUM_FORMATS];
+ unsigned int num_active_fmt;
+
/*
* used when IMP is chained to store the crop window which
* is different from the image window
@@ -270,6 +281,8 @@ struct vpfe_device {
*/
u32 field_off;
struct vpfe_ccdc ccdc;
+ int stopping;
+ struct completion capture_stop;
};
#endif /* AM437X_VPFE_H */
diff --git a/drivers/media/platform/am437x/am437x-vpfe_regs.h b/drivers/media/platform/am437x/am437x-vpfe_regs.h
index 0746c48ec23f..63ecdca3b908 100644
--- a/drivers/media/platform/am437x/am437x-vpfe_regs.h
+++ b/drivers/media/platform/am437x/am437x-vpfe_regs.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* TI AM437x Image Sensor Interface Registers
*
@@ -5,15 +6,6 @@
*
* Benoit Parrot <bparrot@ti.com>
* Lad, Prabhakar <prabhakar.csengg@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef AM437X_VPFE_REGS_H
diff --git a/drivers/media/platform/aspeed-video.c b/drivers/media/platform/aspeed-video.c
index eb12f3793062..d8593cb2ae84 100644
--- a/drivers/media/platform/aspeed-video.c
+++ b/drivers/media/platform/aspeed-video.c
@@ -606,6 +606,16 @@ static irqreturn_t aspeed_video_irq(int irq, void *arg)
aspeed_video_start_frame(video);
}
+ /*
+ * CAPTURE_COMPLETE and FRAME_COMPLETE interrupts come even when these
+ * are disabled in the VE_INTERRUPT_CTRL register so clear them to
+ * prevent unnecessary interrupt calls.
+ */
+ if (sts & VE_INTERRUPT_CAPTURE_COMPLETE)
+ sts &= ~VE_INTERRUPT_CAPTURE_COMPLETE;
+ if (sts & VE_INTERRUPT_FRAME_COMPLETE)
+ sts &= ~VE_INTERRUPT_FRAME_COMPLETE;
+
return sts ? IRQ_NONE : IRQ_HANDLED;
}
@@ -614,7 +624,7 @@ static void aspeed_video_check_and_set_polarity(struct aspeed_video *video)
int i;
int hsync_counter = 0;
int vsync_counter = 0;
- u32 sts;
+ u32 sts, ctrl;
for (i = 0; i < NUM_POLARITY_CHECKS; ++i) {
sts = aspeed_video_read(video, VE_MODE_DETECT_STATUS);
@@ -629,30 +639,29 @@ static void aspeed_video_check_and_set_polarity(struct aspeed_video *video)
hsync_counter++;
}
- if (hsync_counter < 0 || vsync_counter < 0) {
- u32 ctrl = 0;
+ ctrl = aspeed_video_read(video, VE_CTRL);
- if (hsync_counter < 0) {
- ctrl = VE_CTRL_HSYNC_POL;
- video->detected_timings.polarities &=
- ~V4L2_DV_HSYNC_POS_POL;
- } else {
- video->detected_timings.polarities |=
- V4L2_DV_HSYNC_POS_POL;
- }
-
- if (vsync_counter < 0) {
- ctrl = VE_CTRL_VSYNC_POL;
- video->detected_timings.polarities &=
- ~V4L2_DV_VSYNC_POS_POL;
- } else {
- video->detected_timings.polarities |=
- V4L2_DV_VSYNC_POS_POL;
- }
+ if (hsync_counter < 0) {
+ ctrl |= VE_CTRL_HSYNC_POL;
+ video->detected_timings.polarities &=
+ ~V4L2_DV_HSYNC_POS_POL;
+ } else {
+ ctrl &= ~VE_CTRL_HSYNC_POL;
+ video->detected_timings.polarities |=
+ V4L2_DV_HSYNC_POS_POL;
+ }
- if (ctrl)
- aspeed_video_update(video, VE_CTRL, 0, ctrl);
+ if (vsync_counter < 0) {
+ ctrl |= VE_CTRL_VSYNC_POL;
+ video->detected_timings.polarities &=
+ ~V4L2_DV_VSYNC_POS_POL;
+ } else {
+ ctrl &= ~VE_CTRL_VSYNC_POL;
+ video->detected_timings.polarities |=
+ V4L2_DV_VSYNC_POS_POL;
}
+
+ aspeed_video_write(video, VE_CTRL, ctrl);
}
static bool aspeed_video_alloc_buf(struct aspeed_video *video,
@@ -741,6 +750,8 @@ static void aspeed_video_get_resolution(struct aspeed_video *video)
}
set_bit(VIDEO_RES_DETECT, &video->flags);
+ aspeed_video_update(video, VE_CTRL,
+ VE_CTRL_VSYNC_POL | VE_CTRL_HSYNC_POL, 0);
aspeed_video_enable_mode_detect(video);
rc = wait_event_interruptible_timeout(video->wait,
@@ -1646,7 +1657,8 @@ static int aspeed_video_probe(struct platform_device *pdev)
{
int rc;
struct resource *res;
- struct aspeed_video *video = kzalloc(sizeof(*video), GFP_KERNEL);
+ struct aspeed_video *video =
+ devm_kzalloc(&pdev->dev, sizeof(*video), GFP_KERNEL);
if (!video)
return -ENOMEM;
diff --git a/drivers/media/platform/cadence/cdns-csi2rx.c b/drivers/media/platform/cadence/cdns-csi2rx.c
index 31ace114eda1..be9ec59774d6 100644
--- a/drivers/media/platform/cadence/cdns-csi2rx.c
+++ b/drivers/media/platform/cadence/cdns-csi2rx.c
@@ -129,7 +129,7 @@ static int csi2rx_start(struct csi2rx_priv *csi2rx)
*/
for (i = csi2rx->num_lanes; i < csi2rx->max_lanes; i++) {
unsigned int idx = find_first_zero_bit(&lanes_used,
- sizeof(lanes_used));
+ csi2rx->max_lanes);
set_bit(idx, &lanes_used);
reg |= CSI2RX_STATIC_CFG_DLANE_MAP(i, i + 1);
}
diff --git a/drivers/media/platform/cec-gpio/cec-gpio.c b/drivers/media/platform/cec-gpio/cec-gpio.c
index 5b17d3a31896..42d2c2cd9a78 100644
--- a/drivers/media/platform/cec-gpio/cec-gpio.c
+++ b/drivers/media/platform/cec-gpio/cec-gpio.c
@@ -8,10 +8,12 @@
#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/gpio/consumer.h>
+#include <media/cec-notifier.h>
#include <media/cec-pin.h>
struct cec_gpio {
struct cec_adapter *adap;
+ struct cec_notifier *notifier;
struct device *dev;
struct gpio_desc *cec_gpio;
@@ -173,9 +175,17 @@ static const struct cec_pin_ops cec_gpio_pin_ops = {
static int cec_gpio_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ struct device *hdmi_dev;
struct cec_gpio *cec;
+ u32 caps = CEC_CAP_DEFAULTS | CEC_CAP_MONITOR_ALL | CEC_CAP_MONITOR_PIN;
int ret;
+ hdmi_dev = cec_notifier_parse_hdmi_phandle(dev);
+ if (PTR_ERR(hdmi_dev) == -EPROBE_DEFER)
+ return PTR_ERR(hdmi_dev);
+ if (IS_ERR(hdmi_dev))
+ caps |= CEC_CAP_PHYS_ADDR;
+
cec = devm_kzalloc(dev, sizeof(*cec), GFP_KERNEL);
if (!cec)
return -ENOMEM;
@@ -196,8 +206,7 @@ static int cec_gpio_probe(struct platform_device *pdev)
return PTR_ERR(cec->v5_gpio);
cec->adap = cec_pin_allocate_adapter(&cec_gpio_pin_ops,
- cec, pdev->name, CEC_CAP_DEFAULTS | CEC_CAP_PHYS_ADDR |
- CEC_CAP_MONITOR_ALL | CEC_CAP_MONITOR_PIN);
+ cec, pdev->name, caps);
if (IS_ERR(cec->adap))
return PTR_ERR(cec->adap);
@@ -205,7 +214,7 @@ static int cec_gpio_probe(struct platform_device *pdev)
IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
cec->adap->name, cec);
if (ret)
- return ret;
+ goto del_adap;
cec_gpio_disable_irq(cec->adap);
@@ -218,7 +227,7 @@ static int cec_gpio_probe(struct platform_device *pdev)
IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
"hpd-gpio", cec);
if (ret)
- return ret;
+ goto del_adap;
}
if (cec->v5_gpio) {
@@ -230,23 +239,37 @@ static int cec_gpio_probe(struct platform_device *pdev)
IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
"v5-gpio", cec);
if (ret)
- return ret;
+ goto del_adap;
}
- ret = cec_register_adapter(cec->adap, &pdev->dev);
- if (ret) {
- cec_delete_adapter(cec->adap);
- return ret;
+ if (!IS_ERR(hdmi_dev)) {
+ cec->notifier = cec_notifier_cec_adap_register(hdmi_dev, NULL,
+ cec->adap);
+ if (!cec->notifier) {
+ ret = -ENOMEM;
+ goto del_adap;
+ }
}
+ ret = cec_register_adapter(cec->adap, &pdev->dev);
+ if (ret)
+ goto unreg_notifier;
+
platform_set_drvdata(pdev, cec);
return 0;
+
+unreg_notifier:
+ cec_notifier_cec_adap_unregister(cec->notifier, cec->adap);
+del_adap:
+ cec_delete_adapter(cec->adap);
+ return ret;
}
static int cec_gpio_remove(struct platform_device *pdev)
{
struct cec_gpio *cec = platform_get_drvdata(pdev);
+ cec_notifier_cec_adap_unregister(cec->notifier, cec->adap);
cec_unregister_adapter(cec->adap);
return 0;
}
diff --git a/drivers/media/platform/coda/coda-common.c b/drivers/media/platform/coda/coda-common.c
index 73222c0615c0..94fb4d2ecc43 100644
--- a/drivers/media/platform/coda/coda-common.c
+++ b/drivers/media/platform/coda/coda-common.c
@@ -933,7 +933,8 @@ static int coda_g_selection(struct file *file, void *fh,
rsel = &r;
/* fallthrough */
case V4L2_SEL_TGT_CROP:
- if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT ||
+ ctx->inst_type == CODA_INST_DECODER)
return -EINVAL;
break;
case V4L2_SEL_TGT_COMPOSE_BOUNDS:
@@ -942,7 +943,8 @@ static int coda_g_selection(struct file *file, void *fh,
/* fallthrough */
case V4L2_SEL_TGT_COMPOSE:
case V4L2_SEL_TGT_COMPOSE_DEFAULT:
- if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
+ ctx->inst_type == CODA_INST_ENCODER)
return -EINVAL;
break;
default:
@@ -1084,16 +1086,16 @@ static int coda_decoder_cmd(struct file *file, void *fh,
switch (dc->cmd) {
case V4L2_DEC_CMD_START:
- mutex_lock(&ctx->bitstream_mutex);
mutex_lock(&dev->coda_mutex);
+ mutex_lock(&ctx->bitstream_mutex);
coda_bitstream_flush(ctx);
- mutex_unlock(&dev->coda_mutex);
dst_vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx,
V4L2_BUF_TYPE_VIDEO_CAPTURE);
vb2_clear_last_buffer_dequeued(dst_vq);
ctx->bit_stream_param &= ~CODA_BIT_STREAM_END_FLAG;
coda_fill_bitstream(ctx, NULL);
mutex_unlock(&ctx->bitstream_mutex);
+ mutex_unlock(&dev->coda_mutex);
break;
case V4L2_DEC_CMD_STOP:
stream_end = false;
@@ -2387,6 +2389,7 @@ int coda_decoder_queue_init(void *priv, struct vb2_queue *src_vq,
dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
dst_vq->io_modes = VB2_DMABUF | VB2_MMAP;
+ dst_vq->dma_attrs = DMA_ATTR_NO_KERNEL_MAPPING;
dst_vq->mem_ops = &vb2_dma_contig_memops;
return coda_queue_init(priv, dst_vq);
@@ -2959,8 +2962,6 @@ static int coda_probe(struct platform_device *pdev)
else
return -EINVAL;
- spin_lock_init(&dev->irqlock);
-
dev->dev = &pdev->dev;
dev->clk_per = devm_clk_get(&pdev->dev, "per");
if (IS_ERR(dev->clk_per)) {
diff --git a/drivers/media/platform/coda/coda.h b/drivers/media/platform/coda/coda.h
index 848bf1da401e..9f226140b486 100644
--- a/drivers/media/platform/coda/coda.h
+++ b/drivers/media/platform/coda/coda.h
@@ -86,7 +86,6 @@ struct coda_dev {
struct gen_pool *iram_pool;
struct coda_aux_buf iram;
- spinlock_t irqlock;
struct mutex dev_mutex;
struct mutex coda_mutex;
struct workqueue_struct *workqueue;
diff --git a/drivers/media/platform/cros-ec-cec/cros-ec-cec.c b/drivers/media/platform/cros-ec-cec/cros-ec-cec.c
index 4a3b3810fd89..f048e8994785 100644
--- a/drivers/media/platform/cros-ec-cec/cros-ec-cec.c
+++ b/drivers/media/platform/cros-ec-cec/cros-ec-cec.c
@@ -314,7 +314,8 @@ static int cros_ec_cec_probe(struct platform_device *pdev)
return 0;
out_probe_notify:
- cec_notifier_cec_adap_unregister(cros_ec_cec->notify);
+ cec_notifier_cec_adap_unregister(cros_ec_cec->notify,
+ cros_ec_cec->adap);
out_probe_adapter:
cec_delete_adapter(cros_ec_cec->adap);
return ret;
@@ -335,7 +336,8 @@ static int cros_ec_cec_remove(struct platform_device *pdev)
return ret;
}
- cec_notifier_cec_adap_unregister(cros_ec_cec->notify);
+ cec_notifier_cec_adap_unregister(cros_ec_cec->notify,
+ cros_ec_cec->adap);
cec_unregister_adapter(cros_ec_cec->adap);
return 0;
diff --git a/drivers/media/platform/exynos4-is/fimc-isp-video.c b/drivers/media/platform/exynos4-is/fimc-isp-video.c
index 378cc302e1f8..d2cbcdca0463 100644
--- a/drivers/media/platform/exynos4-is/fimc-isp-video.c
+++ b/drivers/media/platform/exynos4-is/fimc-isp-video.c
@@ -313,7 +313,7 @@ static int isp_video_release(struct file *file)
ivc->streaming = 0;
}
- vb2_fop_release(file);
+ _vb2_fop_release(file, NULL);
if (v4l2_fh_is_singular_file(file)) {
fimc_pipeline_call(&ivc->ve, close);
diff --git a/drivers/media/platform/exynos4-is/media-dev.c b/drivers/media/platform/exynos4-is/media-dev.c
index a838189d4490..9aaf3b8060d5 100644
--- a/drivers/media/platform/exynos4-is/media-dev.c
+++ b/drivers/media/platform/exynos4-is/media-dev.c
@@ -1457,12 +1457,12 @@ static int fimc_md_probe(struct platform_device *pdev)
ret = v4l2_device_register(dev, &fmd->v4l2_dev);
if (ret < 0) {
v4l2_err(v4l2_dev, "Failed to register v4l2_device: %d\n", ret);
- return ret;
+ goto err_md;
}
ret = fimc_md_get_clocks(fmd);
if (ret)
- goto err_md;
+ goto err_v4l2dev;
ret = fimc_md_get_pinctrl(fmd);
if (ret < 0) {
@@ -1519,9 +1519,10 @@ err_m_ent:
fimc_md_unregister_entities(fmd);
err_clk:
fimc_md_put_clocks(fmd);
+err_v4l2dev:
+ v4l2_device_unregister(&fmd->v4l2_dev);
err_md:
media_device_cleanup(&fmd->media_dev);
- v4l2_device_unregister(&fmd->v4l2_dev);
return ret;
}
diff --git a/drivers/media/platform/meson/ao-cec-g12a.c b/drivers/media/platform/meson/ao-cec-g12a.c
index 3b39e875292e..891533060d49 100644
--- a/drivers/media/platform/meson/ao-cec-g12a.c
+++ b/drivers/media/platform/meson/ao-cec-g12a.c
@@ -662,34 +662,27 @@ static int meson_ao_cec_g12a_probe(struct platform_device *pdev)
if (IS_ERR(ao_cec->adap))
return PTR_ERR(ao_cec->adap);
- ao_cec->notify = cec_notifier_cec_adap_register(hdmi_dev, NULL,
- ao_cec->adap);
- if (!ao_cec->notify) {
- ret = -ENOMEM;
- goto out_probe_adapter;
- }
-
ao_cec->adap->owner = THIS_MODULE;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(base)) {
ret = PTR_ERR(base);
- goto out_probe_notify;
+ goto out_probe_adapter;
}
ao_cec->regmap = devm_regmap_init_mmio(&pdev->dev, base,
&meson_ao_cec_g12a_regmap_conf);
if (IS_ERR(ao_cec->regmap)) {
ret = PTR_ERR(ao_cec->regmap);
- goto out_probe_notify;
+ goto out_probe_adapter;
}
ao_cec->regmap_cec = devm_regmap_init(&pdev->dev, NULL, ao_cec,
&meson_ao_cec_g12a_cec_regmap_conf);
if (IS_ERR(ao_cec->regmap_cec)) {
ret = PTR_ERR(ao_cec->regmap_cec);
- goto out_probe_notify;
+ goto out_probe_adapter;
}
irq = platform_get_irq(pdev, 0);
@@ -699,45 +692,52 @@ static int meson_ao_cec_g12a_probe(struct platform_device *pdev)
0, NULL, ao_cec);
if (ret) {
dev_err(&pdev->dev, "irq request failed\n");
- goto out_probe_notify;
+ goto out_probe_adapter;
}
ao_cec->oscin = devm_clk_get(&pdev->dev, "oscin");
if (IS_ERR(ao_cec->oscin)) {
dev_err(&pdev->dev, "oscin clock request failed\n");
ret = PTR_ERR(ao_cec->oscin);
- goto out_probe_notify;
+ goto out_probe_adapter;
}
ret = meson_ao_cec_g12a_setup_clk(ao_cec);
if (ret)
- goto out_probe_notify;
+ goto out_probe_adapter;
ret = clk_prepare_enable(ao_cec->core);
if (ret) {
dev_err(&pdev->dev, "core clock enable failed\n");
- goto out_probe_notify;
+ goto out_probe_adapter;
}
device_reset_optional(&pdev->dev);
platform_set_drvdata(pdev, ao_cec);
+ ao_cec->notify = cec_notifier_cec_adap_register(hdmi_dev, NULL,
+ ao_cec->adap);
+ if (!ao_cec->notify) {
+ ret = -ENOMEM;
+ goto out_probe_core_clk;
+ }
+
ret = cec_register_adapter(ao_cec->adap, &pdev->dev);
if (ret < 0)
- goto out_probe_core_clk;
+ goto out_probe_notify;
/* Setup Hardware */
regmap_write(ao_cec->regmap, CECB_GEN_CNTL_REG, CECB_GEN_CNTL_RESET);
return 0;
+out_probe_notify:
+ cec_notifier_cec_adap_unregister(ao_cec->notify, ao_cec->adap);
+
out_probe_core_clk:
clk_disable_unprepare(ao_cec->core);
-out_probe_notify:
- cec_notifier_cec_adap_unregister(ao_cec->notify);
-
out_probe_adapter:
cec_delete_adapter(ao_cec->adap);
@@ -752,7 +752,7 @@ static int meson_ao_cec_g12a_remove(struct platform_device *pdev)
clk_disable_unprepare(ao_cec->core);
- cec_notifier_cec_adap_unregister(ao_cec->notify);
+ cec_notifier_cec_adap_unregister(ao_cec->notify, ao_cec->adap);
cec_unregister_adapter(ao_cec->adap);
diff --git a/drivers/media/platform/meson/ao-cec.c b/drivers/media/platform/meson/ao-cec.c
index 64ed549bf012..09aff82c3773 100644
--- a/drivers/media/platform/meson/ao-cec.c
+++ b/drivers/media/platform/meson/ao-cec.c
@@ -624,20 +624,13 @@ static int meson_ao_cec_probe(struct platform_device *pdev)
if (IS_ERR(ao_cec->adap))
return PTR_ERR(ao_cec->adap);
- ao_cec->notify = cec_notifier_cec_adap_register(hdmi_dev, NULL,
- ao_cec->adap);
- if (!ao_cec->notify) {
- ret = -ENOMEM;
- goto out_probe_adapter;
- }
-
ao_cec->adap->owner = THIS_MODULE;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
ao_cec->base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(ao_cec->base)) {
ret = PTR_ERR(ao_cec->base);
- goto out_probe_notify;
+ goto out_probe_adapter;
}
irq = platform_get_irq(pdev, 0);
@@ -647,20 +640,20 @@ static int meson_ao_cec_probe(struct platform_device *pdev)
0, NULL, ao_cec);
if (ret) {
dev_err(&pdev->dev, "irq request failed\n");
- goto out_probe_notify;
+ goto out_probe_adapter;
}
ao_cec->core = devm_clk_get(&pdev->dev, "core");
if (IS_ERR(ao_cec->core)) {
dev_err(&pdev->dev, "core clock request failed\n");
ret = PTR_ERR(ao_cec->core);
- goto out_probe_notify;
+ goto out_probe_adapter;
}
ret = clk_prepare_enable(ao_cec->core);
if (ret) {
dev_err(&pdev->dev, "core clock enable failed\n");
- goto out_probe_notify;
+ goto out_probe_adapter;
}
ret = clk_set_rate(ao_cec->core, CEC_CLK_RATE);
@@ -674,9 +667,16 @@ static int meson_ao_cec_probe(struct platform_device *pdev)
ao_cec->pdev = pdev;
platform_set_drvdata(pdev, ao_cec);
+ ao_cec->notify = cec_notifier_cec_adap_register(hdmi_dev, NULL,
+ ao_cec->adap);
+ if (!ao_cec->notify) {
+ ret = -ENOMEM;
+ goto out_probe_clk;
+ }
+
ret = cec_register_adapter(ao_cec->adap, &pdev->dev);
if (ret < 0)
- goto out_probe_clk;
+ goto out_probe_notify;
/* Setup Hardware */
writel_relaxed(CEC_GEN_CNTL_RESET,
@@ -684,12 +684,12 @@ static int meson_ao_cec_probe(struct platform_device *pdev)
return 0;
+out_probe_notify:
+ cec_notifier_cec_adap_unregister(ao_cec->notify, ao_cec->adap);
+
out_probe_clk:
clk_disable_unprepare(ao_cec->core);
-out_probe_notify:
- cec_notifier_cec_adap_unregister(ao_cec->notify);
-
out_probe_adapter:
cec_delete_adapter(ao_cec->adap);
@@ -704,7 +704,7 @@ static int meson_ao_cec_remove(struct platform_device *pdev)
clk_disable_unprepare(ao_cec->core);
- cec_notifier_cec_adap_unregister(ao_cec->notify);
+ cec_notifier_cec_adap_unregister(ao_cec->notify, ao_cec->adap);
cec_unregister_adapter(ao_cec->adap);
return 0;
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c
index 26a55c3e807e..858727824889 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c
@@ -284,7 +284,7 @@ static void mtk_vdec_update_fmt(struct mtk_vcodec_ctx *ctx,
fmt = &mtk_video_formats[k];
if (fmt->fourcc == pixelformat) {
mtk_v4l2_debug(1, "Update cap fourcc(%d -> %d)",
- dst_q_data->fmt.fourcc, pixelformat);
+ dst_q_data->fmt->fourcc, pixelformat);
dst_q_data->fmt = fmt;
return;
}
@@ -841,12 +841,20 @@ static int vidioc_vdec_s_fmt(struct file *file, void *priv,
return -EINVAL;
pix_mp = &f->fmt.pix_mp;
+ /*
+ * Setting OUTPUT format after OUTPUT buffers are allocated is invalid
+ * if using the stateful API.
+ */
if ((f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) &&
vb2_is_busy(&ctx->m2m_ctx->out_q_ctx.q)) {
mtk_v4l2_err("out_q_ctx buffers already requested");
ret = -EBUSY;
}
+ /*
+ * Setting CAPTURE format after CAPTURE buffers are allocated is
+ * invalid.
+ */
if ((f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) &&
vb2_is_busy(&ctx->m2m_ctx->cap_q_ctx.q)) {
mtk_v4l2_err("cap_q_ctx buffers already requested");
@@ -865,6 +873,8 @@ static int vidioc_vdec_s_fmt(struct file *file, void *priv,
fmt = mtk_vdec_find_format(f);
}
}
+ if (fmt == NULL)
+ return -EINVAL;
q_data->fmt = fmt;
vidioc_try_fmt(f, q_data->fmt);
@@ -873,10 +883,10 @@ static int vidioc_vdec_s_fmt(struct file *file, void *priv,
q_data->coded_width = pix_mp->width;
q_data->coded_height = pix_mp->height;
- ctx->colorspace = f->fmt.pix_mp.colorspace;
- ctx->ycbcr_enc = f->fmt.pix_mp.ycbcr_enc;
- ctx->quantization = f->fmt.pix_mp.quantization;
- ctx->xfer_func = f->fmt.pix_mp.xfer_func;
+ ctx->colorspace = pix_mp->colorspace;
+ ctx->ycbcr_enc = pix_mp->ycbcr_enc;
+ ctx->quantization = pix_mp->quantization;
+ ctx->xfer_func = pix_mp->xfer_func;
if (ctx->state == MTK_STATE_FREE) {
ret = vdec_if_init(ctx, q_data->fmt->fourcc);
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c
index 00d090df11bb..944771ee5f5c 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c
@@ -253,13 +253,7 @@ static int mtk_vcodec_probe(struct platform_device *pdev)
}
for (i = 0; i < NUM_MAX_VDEC_REG_BASE; i++) {
- res = platform_get_resource(pdev, IORESOURCE_MEM, i);
- if (res == NULL) {
- dev_err(&pdev->dev, "get memory resource failed.");
- ret = -ENXIO;
- goto err_res;
- }
- dev->reg_base[i] = devm_ioremap_resource(&pdev->dev, res);
+ dev->reg_base[i] = devm_platform_ioremap_resource(pdev, i);
if (IS_ERR((__force void *)dev->reg_base[i])) {
ret = PTR_ERR((__force void *)dev->reg_base[i]);
goto err_res;
diff --git a/drivers/media/platform/mtk-vcodec/vdec/vdec_h264_if.c b/drivers/media/platform/mtk-vcodec/vdec/vdec_h264_if.c
index 49aa85a9bb5a..50048c170b99 100644
--- a/drivers/media/platform/mtk-vcodec/vdec/vdec_h264_if.c
+++ b/drivers/media/platform/mtk-vcodec/vdec/vdec_h264_if.c
@@ -283,7 +283,6 @@ static int vdec_h264_init(struct mtk_vcodec_ctx *ctx)
inst->vpu.id = IPI_VDEC_H264;
inst->vpu.dev = ctx->dev->vpu_plat_dev;
inst->vpu.ctx = ctx;
- inst->vpu.handler = vpu_dec_ipi_handler;
err = vpu_dec_init(&inst->vpu);
if (err) {
diff --git a/drivers/media/platform/mtk-vcodec/vdec/vdec_vp8_if.c b/drivers/media/platform/mtk-vcodec/vdec/vdec_vp8_if.c
index 63a8708ce682..6011fdd60a22 100644
--- a/drivers/media/platform/mtk-vcodec/vdec/vdec_vp8_if.c
+++ b/drivers/media/platform/mtk-vcodec/vdec/vdec_vp8_if.c
@@ -402,7 +402,6 @@ static int vdec_vp8_init(struct mtk_vcodec_ctx *ctx)
inst->vpu.id = IPI_VDEC_VP8;
inst->vpu.dev = ctx->dev->vpu_plat_dev;
inst->vpu.ctx = ctx;
- inst->vpu.handler = vpu_dec_ipi_handler;
err = vpu_dec_init(&inst->vpu);
if (err) {
diff --git a/drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c b/drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c
index 5066c283d86d..24c1f0bf2147 100644
--- a/drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c
+++ b/drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c
@@ -793,7 +793,6 @@ static int vdec_vp9_init(struct mtk_vcodec_ctx *ctx)
inst->vpu.id = IPI_VDEC_VP9;
inst->vpu.dev = ctx->dev->vpu_plat_dev;
inst->vpu.ctx = ctx;
- inst->vpu.handler = vpu_dec_ipi_handler;
if (vpu_dec_init(&inst->vpu)) {
mtk_vcodec_err(inst, "vp9_dec_vpu_init failed");
diff --git a/drivers/media/platform/mtk-vcodec/vdec_vpu_if.c b/drivers/media/platform/mtk-vcodec/vdec_vpu_if.c
index 3f38cc4509ef..70abfd4cd4b9 100644
--- a/drivers/media/platform/mtk-vcodec/vdec_vpu_if.c
+++ b/drivers/media/platform/mtk-vcodec/vdec_vpu_if.c
@@ -25,10 +25,16 @@ static void handle_init_ack_msg(struct vdec_vpu_ipi_init_ack *msg)
}
/*
+ * vpu_dec_ipi_handler - Handler for VPU ipi message.
+ *
+ * @data: ipi message
+ * @len : length of ipi message
+ * @priv: callback private data which is passed by decoder when register.
+ *
* This function runs in interrupt context and it means there's an IPI MSG
* from VPU.
*/
-void vpu_dec_ipi_handler(void *data, unsigned int len, void *priv)
+static void vpu_dec_ipi_handler(void *data, unsigned int len, void *priv)
{
struct vdec_vpu_ipi_ack *msg = data;
struct vdec_vpu_inst *vpu = (struct vdec_vpu_inst *)
@@ -102,6 +108,7 @@ int vpu_dec_init(struct vdec_vpu_inst *vpu)
mtk_vcodec_debug_enter(vpu);
init_waitqueue_head(&vpu->wq);
+ vpu->handler = vpu_dec_ipi_handler;
err = vpu_ipi_register(vpu->dev, vpu->id, vpu->handler, "vdec", NULL);
if (err != 0) {
diff --git a/drivers/media/platform/mtk-vcodec/vdec_vpu_if.h b/drivers/media/platform/mtk-vcodec/vdec_vpu_if.h
index b76f717e4fd7..f779b0676fbd 100644
--- a/drivers/media/platform/mtk-vcodec/vdec_vpu_if.h
+++ b/drivers/media/platform/mtk-vcodec/vdec_vpu_if.h
@@ -76,13 +76,4 @@ int vpu_dec_deinit(struct vdec_vpu_inst *vpu);
*/
int vpu_dec_reset(struct vdec_vpu_inst *vpu);
-/**
- * vpu_dec_ipi_handler - Handler for VPU ipi message.
- *
- * @data: ipi message
- * @len : length of ipi message
- * @priv: callback private data which is passed by decoder when register.
- */
-void vpu_dec_ipi_handler(void *data, unsigned int len, void *priv);
-
#endif
diff --git a/drivers/media/platform/mtk-vpu/mtk_vpu.c b/drivers/media/platform/mtk-vpu/mtk_vpu.c
index cc2ff40d060d..a768707abb94 100644
--- a/drivers/media/platform/mtk-vpu/mtk_vpu.c
+++ b/drivers/media/platform/mtk-vpu/mtk_vpu.c
@@ -273,7 +273,7 @@ int vpu_ipi_register(struct platform_device *pdev,
return -EPROBE_DEFER;
}
- if (id >= 0 && id < IPI_MAX && handler) {
+ if (id < IPI_MAX && handler) {
ipi_desc = vpu->ipi_desc;
ipi_desc[id].name = name;
ipi_desc[id].handler = handler;
@@ -398,7 +398,7 @@ int vpu_wdt_reg_handler(struct platform_device *pdev,
handler = vpu->wdt.handler;
- if (id >= 0 && id < VPU_RST_MAX && wdt_reset) {
+ if (id < VPU_RST_MAX && wdt_reset) {
dev_dbg(vpu->dev, "wdt register id %d\n", id);
mutex_lock(&vpu->vpu_mutex);
handler[id].reset_func = wdt_reset;
diff --git a/drivers/media/platform/qcom/venus/core.c b/drivers/media/platform/qcom/venus/core.c
index e6eff512a8a1..07312a2fab24 100644
--- a/drivers/media/platform/qcom/venus/core.c
+++ b/drivers/media/platform/qcom/venus/core.c
@@ -5,6 +5,7 @@
*/
#include <linux/clk.h>
#include <linux/init.h>
+#include <linux/interconnect.h>
#include <linux/ioctl.h>
#include <linux/list.h>
#include <linux/module.h>
@@ -239,6 +240,14 @@ static int venus_probe(struct platform_device *pdev)
if (IS_ERR(core->base))
return PTR_ERR(core->base);
+ core->video_path = of_icc_get(dev, "video-mem");
+ if (IS_ERR(core->video_path))
+ return PTR_ERR(core->video_path);
+
+ core->cpucfg_path = of_icc_get(dev, "cpu-cfg");
+ if (IS_ERR(core->cpucfg_path))
+ return PTR_ERR(core->cpucfg_path);
+
core->irq = platform_get_irq(pdev, 0);
if (core->irq < 0)
return core->irq;
@@ -273,6 +282,10 @@ static int venus_probe(struct platform_device *pdev)
if (ret)
return ret;
+ ret = icc_set_bw(core->cpucfg_path, 0, kbps_to_icc(1000));
+ if (ret)
+ return ret;
+
ret = hfi_create(core, &venus_core_ops);
if (ret)
return ret;
@@ -355,6 +368,9 @@ static int venus_remove(struct platform_device *pdev)
pm_runtime_put_sync(dev);
pm_runtime_disable(dev);
+ icc_put(core->video_path);
+ icc_put(core->cpucfg_path);
+
v4l2_device_unregister(&core->v4l2_dev);
return ret;
@@ -427,10 +443,11 @@ static const struct venus_resources msm8916_res = {
};
static const struct freq_tbl msm8996_freq_table[] = {
- { 1944000, 490000000 }, /* 4k UHD @ 60 */
- { 972000, 320000000 }, /* 4k UHD @ 30 */
- { 489600, 150000000 }, /* 1080p @ 60 */
- { 244800, 75000000 }, /* 1080p @ 30 */
+ { 1944000, 520000000 }, /* 4k UHD @ 60 (decode only) */
+ { 972000, 520000000 }, /* 4k UHD @ 30 */
+ { 489600, 346666667 }, /* 1080p @ 60 */
+ { 244800, 150000000 }, /* 1080p @ 30 */
+ { 108000, 75000000 }, /* 720p @ 30 */
};
static const struct reg_val msm8996_reg_preset[] = {
@@ -464,9 +481,40 @@ static const struct freq_tbl sdm845_freq_table[] = {
{ 244800, 100000000 }, /* 1920x1080@30 */
};
+static struct codec_freq_data sdm845_codec_freq_data[] = {
+ { V4L2_PIX_FMT_H264, VIDC_SESSION_TYPE_ENC, 675, 10 },
+ { V4L2_PIX_FMT_HEVC, VIDC_SESSION_TYPE_ENC, 675, 10 },
+ { V4L2_PIX_FMT_VP8, VIDC_SESSION_TYPE_ENC, 675, 10 },
+ { V4L2_PIX_FMT_MPEG2, VIDC_SESSION_TYPE_DEC, 200, 10 },
+ { V4L2_PIX_FMT_H264, VIDC_SESSION_TYPE_DEC, 200, 10 },
+ { V4L2_PIX_FMT_HEVC, VIDC_SESSION_TYPE_DEC, 200, 10 },
+ { V4L2_PIX_FMT_VP8, VIDC_SESSION_TYPE_DEC, 200, 10 },
+ { V4L2_PIX_FMT_VP9, VIDC_SESSION_TYPE_DEC, 200, 10 },
+};
+
+static const struct bw_tbl sdm845_bw_table_enc[] = {
+ { 1944000, 1612000, 0, 2416000, 0 }, /* 3840x2160@60 */
+ { 972000, 951000, 0, 1434000, 0 }, /* 3840x2160@30 */
+ { 489600, 723000, 0, 973000, 0 }, /* 1920x1080@60 */
+ { 244800, 370000, 0, 495000, 0 }, /* 1920x1080@30 */
+};
+
+static const struct bw_tbl sdm845_bw_table_dec[] = {
+ { 2073600, 3929000, 0, 5551000, 0 }, /* 4096x2160@60 */
+ { 1036800, 1987000, 0, 2797000, 0 }, /* 4096x2160@30 */
+ { 489600, 1040000, 0, 1298000, 0 }, /* 1920x1080@60 */
+ { 244800, 530000, 0, 659000, 0 }, /* 1920x1080@30 */
+};
+
static const struct venus_resources sdm845_res = {
.freq_tbl = sdm845_freq_table,
.freq_tbl_size = ARRAY_SIZE(sdm845_freq_table),
+ .bw_tbl_enc = sdm845_bw_table_enc,
+ .bw_tbl_enc_size = ARRAY_SIZE(sdm845_bw_table_enc),
+ .bw_tbl_dec = sdm845_bw_table_dec,
+ .bw_tbl_dec_size = ARRAY_SIZE(sdm845_bw_table_dec),
+ .codec_freq_data = sdm845_codec_freq_data,
+ .codec_freq_data_size = ARRAY_SIZE(sdm845_codec_freq_data),
.clks = {"core", "iface", "bus" },
.clks_num = 3,
.max_load = 3110400, /* 4096x2160@90 */
diff --git a/drivers/media/platform/qcom/venus/core.h b/drivers/media/platform/qcom/venus/core.h
index 922cb7e64bfa..11585fb3cae3 100644
--- a/drivers/media/platform/qcom/venus/core.h
+++ b/drivers/media/platform/qcom/venus/core.h
@@ -26,12 +26,33 @@ struct reg_val {
u32 value;
};
+struct codec_freq_data {
+ u32 pixfmt;
+ u32 session_type;
+ unsigned long vpp_freq;
+ unsigned long vsp_freq;
+};
+
+struct bw_tbl {
+ u32 mbs_per_sec;
+ u32 avg;
+ u32 peak;
+ u32 avg_10bit;
+ u32 peak_10bit;
+};
+
struct venus_resources {
u64 dma_mask;
const struct freq_tbl *freq_tbl;
unsigned int freq_tbl_size;
+ const struct bw_tbl *bw_tbl_enc;
+ unsigned int bw_tbl_enc_size;
+ const struct bw_tbl *bw_tbl_dec;
+ unsigned int bw_tbl_dec_size;
const struct reg_val *reg_tbl;
unsigned int reg_tbl_size;
+ const struct codec_freq_data *codec_freq_data;
+ unsigned int codec_freq_data_size;
const char * const clks[VIDC_CLKS_NUM_MAX];
unsigned int clks_num;
enum hfi_version hfi_version;
@@ -115,6 +136,8 @@ struct venus_core {
struct clk *core1_clk;
struct clk *core0_bus_clk;
struct clk *core1_bus_clk;
+ struct icc_path *video_path;
+ struct icc_path *cpucfg_path;
struct video_device *vdev_dec;
struct video_device *vdev_enc;
struct v4l2_device v4l2_dev;
@@ -208,6 +231,12 @@ struct venus_buffer {
struct list_head ref_list;
};
+struct clock_data {
+ u32 core_id;
+ unsigned long freq;
+ const struct codec_freq_data *codec_freq_data;
+};
+
#define to_venus_buffer(ptr) container_of(ptr, struct venus_buffer, vb)
enum venus_dec_state {
@@ -288,6 +317,7 @@ struct venus_inst {
struct list_head list;
struct mutex lock;
struct venus_core *core;
+ struct clock_data clk_data;
struct list_head dpbbufs;
struct list_head internalbufs;
struct list_head registeredbufs;
diff --git a/drivers/media/platform/qcom/venus/helpers.c b/drivers/media/platform/qcom/venus/helpers.c
index 1ad96c25ab09..a172f1ac0b35 100644
--- a/drivers/media/platform/qcom/venus/helpers.c
+++ b/drivers/media/platform/qcom/venus/helpers.c
@@ -5,6 +5,7 @@
*/
#include <linux/clk.h>
#include <linux/iopoll.h>
+#include <linux/interconnect.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/pm_runtime.h>
@@ -388,12 +389,91 @@ static u32 load_per_type(struct venus_core *core, u32 session_type)
return mbs_per_sec;
}
-int venus_helper_load_scale_clocks(struct venus_core *core)
+static void mbs_to_bw(struct venus_inst *inst, u32 mbs, u32 *avg, u32 *peak)
{
+ const struct venus_resources *res = inst->core->res;
+ const struct bw_tbl *bw_tbl;
+ unsigned int num_rows, i;
+
+ *avg = 0;
+ *peak = 0;
+
+ if (mbs == 0)
+ return;
+
+ if (inst->session_type == VIDC_SESSION_TYPE_ENC) {
+ num_rows = res->bw_tbl_enc_size;
+ bw_tbl = res->bw_tbl_enc;
+ } else if (inst->session_type == VIDC_SESSION_TYPE_DEC) {
+ num_rows = res->bw_tbl_dec_size;
+ bw_tbl = res->bw_tbl_dec;
+ } else {
+ return;
+ }
+
+ if (!bw_tbl || num_rows == 0)
+ return;
+
+ for (i = 0; i < num_rows; i++) {
+ if (mbs > bw_tbl[i].mbs_per_sec)
+ break;
+
+ if (inst->dpb_fmt & HFI_COLOR_FORMAT_10_BIT_BASE) {
+ *avg = bw_tbl[i].avg_10bit;
+ *peak = bw_tbl[i].peak_10bit;
+ } else {
+ *avg = bw_tbl[i].avg;
+ *peak = bw_tbl[i].peak;
+ }
+ }
+}
+
+static int load_scale_bw(struct venus_core *core)
+{
+ struct venus_inst *inst = NULL;
+ u32 mbs_per_sec, avg, peak, total_avg = 0, total_peak = 0;
+
+ mutex_lock(&core->lock);
+ list_for_each_entry(inst, &core->instances, list) {
+ mbs_per_sec = load_per_instance(inst);
+ mbs_to_bw(inst, mbs_per_sec, &avg, &peak);
+ total_avg += avg;
+ total_peak += peak;
+ }
+ mutex_unlock(&core->lock);
+
+ dev_dbg(core->dev, "total: avg_bw: %u, peak_bw: %u\n",
+ total_avg, total_peak);
+
+ return icc_set_bw(core->video_path, total_avg, total_peak);
+}
+
+static int set_clk_freq(struct venus_core *core, unsigned long freq)
+{
+ struct clk *clk = core->clks[0];
+ int ret;
+
+ ret = clk_set_rate(clk, freq);
+ if (ret)
+ return ret;
+
+ ret = clk_set_rate(core->core0_clk, freq);
+ if (ret)
+ return ret;
+
+ ret = clk_set_rate(core->core1_clk, freq);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int scale_clocks(struct venus_inst *inst)
+{
+ struct venus_core *core = inst->core;
const struct freq_tbl *table = core->res->freq_tbl;
unsigned int num_rows = core->res->freq_tbl_size;
unsigned long freq = table[0].freq;
- struct clk *clk = core->clks[0];
struct device *dev = core->dev;
u32 mbs_per_sec;
unsigned int i;
@@ -419,23 +499,124 @@ int venus_helper_load_scale_clocks(struct venus_core *core)
set_freq:
- ret = clk_set_rate(clk, freq);
- if (ret)
- goto err;
+ ret = set_clk_freq(core, freq);
+ if (ret) {
+ dev_err(dev, "failed to set clock rate %lu (%d)\n",
+ freq, ret);
+ return ret;
+ }
- ret = clk_set_rate(core->core0_clk, freq);
- if (ret)
- goto err;
+ ret = load_scale_bw(core);
+ if (ret) {
+ dev_err(dev, "failed to set bandwidth (%d)\n",
+ ret);
+ return ret;
+ }
- ret = clk_set_rate(core->core1_clk, freq);
- if (ret)
- goto err;
+ return 0;
+}
+
+static unsigned long calculate_inst_freq(struct venus_inst *inst,
+ unsigned long filled_len)
+{
+ unsigned long vpp_freq = 0, vsp_freq = 0;
+ u32 fps = (u32)inst->fps;
+ u32 mbs_per_sec;
+
+ mbs_per_sec = load_per_instance(inst) / fps;
+
+ vpp_freq = mbs_per_sec * inst->clk_data.codec_freq_data->vpp_freq;
+ /* 21 / 20 is overhead factor */
+ vpp_freq += vpp_freq / 20;
+ vsp_freq = mbs_per_sec * inst->clk_data.codec_freq_data->vsp_freq;
+
+ /* 10 / 7 is overhead factor */
+ if (inst->session_type == VIDC_SESSION_TYPE_ENC)
+ vsp_freq += (inst->controls.enc.bitrate * 10) / 7;
+ else
+ vsp_freq += ((fps * filled_len * 8) * 10) / 7;
+
+ return max(vpp_freq, vsp_freq);
+}
+
+static int scale_clocks_v4(struct venus_inst *inst)
+{
+ struct venus_core *core = inst->core;
+ const struct freq_tbl *table = core->res->freq_tbl;
+ unsigned int num_rows = core->res->freq_tbl_size;
+ struct v4l2_m2m_ctx *m2m_ctx = inst->m2m_ctx;
+ struct device *dev = core->dev;
+ unsigned long freq = 0, freq_core1 = 0, freq_core2 = 0;
+ unsigned long filled_len = 0;
+ struct venus_buffer *buf, *n;
+ struct vb2_buffer *vb;
+ int i, ret;
+
+ v4l2_m2m_for_each_src_buf_safe(m2m_ctx, buf, n) {
+ vb = &buf->vb.vb2_buf;
+ filled_len = max(filled_len, vb2_get_plane_payload(vb, 0));
+ }
+
+ if (inst->session_type == VIDC_SESSION_TYPE_DEC && !filled_len)
+ return 0;
+
+ freq = calculate_inst_freq(inst, filled_len);
+ inst->clk_data.freq = freq;
+
+ mutex_lock(&core->lock);
+ list_for_each_entry(inst, &core->instances, list) {
+ if (inst->clk_data.core_id == VIDC_CORE_ID_1) {
+ freq_core1 += inst->clk_data.freq;
+ } else if (inst->clk_data.core_id == VIDC_CORE_ID_2) {
+ freq_core2 += inst->clk_data.freq;
+ } else if (inst->clk_data.core_id == VIDC_CORE_ID_3) {
+ freq_core1 += inst->clk_data.freq;
+ freq_core2 += inst->clk_data.freq;
+ }
+ }
+ mutex_unlock(&core->lock);
+
+ freq = max(freq_core1, freq_core2);
+
+ if (freq >= table[0].freq) {
+ freq = table[0].freq;
+ dev_warn(dev, "HW is overloaded, needed: %lu max: %lu\n",
+ freq, table[0].freq);
+ goto set_freq;
+ }
+
+ for (i = num_rows - 1 ; i >= 0; i--) {
+ if (freq <= table[i].freq) {
+ freq = table[i].freq;
+ break;
+ }
+ }
+
+set_freq:
+
+ ret = set_clk_freq(core, freq);
+ if (ret) {
+ dev_err(dev, "failed to set clock rate %lu (%d)\n",
+ freq, ret);
+ return ret;
+ }
+
+ ret = load_scale_bw(core);
+ if (ret) {
+ dev_err(dev, "failed to set bandwidth (%d)\n",
+ ret);
+ return ret;
+ }
return 0;
+}
-err:
- dev_err(dev, "failed to set clock rate %lu (%d)\n", freq, ret);
- return ret;
+int venus_helper_load_scale_clocks(struct venus_inst *inst)
+{
+ if (IS_V4(inst->core))
+ return scale_clocks_v4(inst);
+
+ return scale_clocks(inst);
}
EXPORT_SYMBOL_GPL(venus_helper_load_scale_clocks);
@@ -541,6 +722,8 @@ session_process_buf(struct venus_inst *inst, struct vb2_v4l2_buffer *vbuf)
if (inst->session_type == VIDC_SESSION_TYPE_DEC)
put_ts_metadata(inst, vbuf);
+
+ venus_helper_load_scale_clocks(inst);
} else if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
if (inst->session_type == VIDC_SESSION_TYPE_ENC)
fdata.buffer_type = HFI_BUFFER_OUTPUT;
@@ -809,6 +992,7 @@ int venus_helper_set_core_usage(struct venus_inst *inst, u32 usage)
const u32 ptype = HFI_PROPERTY_CONFIG_VIDEOCORES_USAGE;
struct hfi_videocores_usage_type cu;
+ inst->clk_data.core_id = usage;
if (!IS_V4(inst->core))
return 0;
@@ -818,6 +1002,36 @@ int venus_helper_set_core_usage(struct venus_inst *inst, u32 usage)
}
EXPORT_SYMBOL_GPL(venus_helper_set_core_usage);
+int venus_helper_init_codec_freq_data(struct venus_inst *inst)
+{
+ const struct codec_freq_data *data;
+ unsigned int i, data_size;
+ u32 pixfmt;
+ int ret = 0;
+
+ if (!IS_V4(inst->core))
+ return 0;
+
+ data = inst->core->res->codec_freq_data;
+ data_size = inst->core->res->codec_freq_data_size;
+ pixfmt = inst->session_type == VIDC_SESSION_TYPE_DEC ?
+ inst->fmt_out->pixfmt : inst->fmt_cap->pixfmt;
+
+ for (i = 0; i < data_size; i++) {
+ if (data[i].pixfmt == pixfmt &&
+ data[i].session_type == inst->session_type) {
+ inst->clk_data.codec_freq_data = &data[i];
+ break;
+ }
+ }
+
+ if (!inst->clk_data.codec_freq_data)
+ ret = -EINVAL;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(venus_helper_init_codec_freq_data);
+
int venus_helper_set_num_bufs(struct venus_inst *inst, unsigned int input_bufs,
unsigned int output_bufs,
unsigned int output2_bufs)
@@ -1140,7 +1354,7 @@ void venus_helper_vb2_stop_streaming(struct vb2_queue *q)
venus_helper_free_dpb_bufs(inst);
- venus_helper_load_scale_clocks(core);
+ venus_helper_load_scale_clocks(inst);
INIT_LIST_HEAD(&inst->registeredbufs);
}
@@ -1193,7 +1407,6 @@ EXPORT_SYMBOL_GPL(venus_helper_process_initial_out_bufs);
int venus_helper_vb2_start_streaming(struct venus_inst *inst)
{
- struct venus_core *core = inst->core;
int ret;
ret = venus_helper_intbufs_alloc(inst);
@@ -1204,7 +1417,7 @@ int venus_helper_vb2_start_streaming(struct venus_inst *inst)
if (ret)
goto err_bufs_free;
- venus_helper_load_scale_clocks(core);
+ venus_helper_load_scale_clocks(inst);
ret = hfi_session_load_res(inst);
if (ret)
diff --git a/drivers/media/platform/qcom/venus/helpers.h b/drivers/media/platform/qcom/venus/helpers.h
index 01f411b12f81..34dcd0c13f06 100644
--- a/drivers/media/platform/qcom/venus/helpers.h
+++ b/drivers/media/platform/qcom/venus/helpers.h
@@ -33,6 +33,7 @@ int venus_helper_set_output_resolution(struct venus_inst *inst,
unsigned int width, unsigned int height,
u32 buftype);
int venus_helper_set_work_mode(struct venus_inst *inst, u32 mode);
+int venus_helper_init_codec_freq_data(struct venus_inst *inst);
int venus_helper_set_core_usage(struct venus_inst *inst, u32 usage);
int venus_helper_set_num_bufs(struct venus_inst *inst, unsigned int input_bufs,
unsigned int output_bufs,
@@ -59,7 +60,7 @@ int venus_helper_intbufs_free(struct venus_inst *inst);
int venus_helper_intbufs_realloc(struct venus_inst *inst);
int venus_helper_queue_dpb_bufs(struct venus_inst *inst);
int venus_helper_unregister_bufs(struct venus_inst *inst);
-int venus_helper_load_scale_clocks(struct venus_core *core);
+int venus_helper_load_scale_clocks(struct venus_inst *inst);
int venus_helper_process_initial_cap_bufs(struct venus_inst *inst);
int venus_helper_process_initial_out_bufs(struct venus_inst *inst);
void venus_helper_get_ts_metadata(struct venus_inst *inst, u64 timestamp_us,
diff --git a/drivers/media/platform/qcom/venus/hfi_venus.c b/drivers/media/platform/qcom/venus/hfi_venus.c
index 7129a2aea09a..0d8855014ab3 100644
--- a/drivers/media/platform/qcom/venus/hfi_venus.c
+++ b/drivers/media/platform/qcom/venus/hfi_venus.c
@@ -1472,6 +1472,7 @@ static int venus_suspend_3xx(struct venus_core *core)
{
struct venus_hfi_device *hdev = to_hfi_priv(core);
struct device *dev = core->dev;
+ u32 ctrl_status;
bool val;
int ret;
@@ -1487,6 +1488,10 @@ static int venus_suspend_3xx(struct venus_core *core)
return -EINVAL;
}
+ ctrl_status = venus_readl(hdev, CPU_CS_SCIACMDARG0);
+ if (ctrl_status & CPU_CS_SCIACMDARG0_PC_READY)
+ goto power_off;
+
/*
* Power collapse sequence for Venus 3xx and 4xx versions:
* 1. Check for ARM9 and video core to be idle by checking WFI bit
@@ -1511,6 +1516,7 @@ static int venus_suspend_3xx(struct venus_core *core)
if (ret)
return ret;
+power_off:
mutex_lock(&hdev->lock);
ret = venus_power_off(hdev);
diff --git a/drivers/media/platform/qcom/venus/vdec.c b/drivers/media/platform/qcom/venus/vdec.c
index 7f4660555ddb..8feaf5daece9 100644
--- a/drivers/media/platform/qcom/venus/vdec.c
+++ b/drivers/media/platform/qcom/venus/vdec.c
@@ -685,6 +685,10 @@ static int vdec_session_init(struct venus_inst *inst)
if (ret)
goto deinit;
+ ret = venus_helper_init_codec_freq_data(inst);
+ if (ret)
+ goto deinit;
+
return 0;
deinit:
hfi_session_deinit(inst);
@@ -864,7 +868,7 @@ reconfigure:
if (ret)
goto free_dpb_bufs;
- venus_helper_load_scale_clocks(inst->core);
+ venus_helper_load_scale_clocks(inst);
ret = hfi_session_continue(inst);
if (ret)
@@ -1072,7 +1076,7 @@ static void vdec_session_release(struct venus_inst *inst)
hfi_session_abort(inst);
venus_helper_free_dpb_bufs(inst);
- venus_helper_load_scale_clocks(core);
+ venus_helper_load_scale_clocks(inst);
INIT_LIST_HEAD(&inst->registeredbufs);
mutex_unlock(&inst->lock);
@@ -1412,9 +1416,6 @@ static const struct v4l2_file_operations vdec_fops = {
.unlocked_ioctl = video_ioctl2,
.poll = v4l2_m2m_fop_poll,
.mmap = v4l2_m2m_fop_mmap,
-#ifdef CONFIG_COMPAT
- .compat_ioctl32 = v4l2_compat_ioctl32,
-#endif
};
static int vdec_probe(struct platform_device *pdev)
diff --git a/drivers/media/platform/qcom/venus/venc.c b/drivers/media/platform/qcom/venus/venc.c
index 1b7fb2d5887c..453edf966d4f 100644
--- a/drivers/media/platform/qcom/venus/venc.c
+++ b/drivers/media/platform/qcom/venus/venc.c
@@ -842,6 +842,10 @@ static int venc_init_session(struct venus_inst *inst)
if (ret)
goto deinit;
+ ret = venus_helper_init_codec_freq_data(inst);
+ if (ret)
+ goto deinit;
+
ret = venc_set_properties(inst);
if (ret)
goto deinit;
@@ -1235,9 +1239,6 @@ static const struct v4l2_file_operations venc_fops = {
.unlocked_ioctl = video_ioctl2,
.poll = v4l2_m2m_fop_poll,
.mmap = v4l2_m2m_fop_mmap,
-#ifdef CONFIG_COMPAT
- .compat_ioctl32 = v4l2_compat_ioctl32,
-#endif
};
static int venc_probe(struct platform_device *pdev)
diff --git a/drivers/media/platform/rcar-vin/rcar-core.c b/drivers/media/platform/rcar-vin/rcar-core.c
index 6993484ff0f3..7440c8965d27 100644
--- a/drivers/media/platform/rcar-vin/rcar-core.c
+++ b/drivers/media/platform/rcar-vin/rcar-core.c
@@ -983,6 +983,7 @@ static const struct rvin_group_route rcar_info_r8a7795_routes[] = {
static const struct rvin_info rcar_info_r8a7795 = {
.model = RCAR_GEN3,
.use_mc = true,
+ .nv12 = true,
.max_width = 4096,
.max_height = 4096,
.routes = rcar_info_r8a7795_routes,
@@ -1077,6 +1078,7 @@ static const struct rvin_group_route rcar_info_r8a7796_routes[] = {
static const struct rvin_info rcar_info_r8a7796 = {
.model = RCAR_GEN3,
.use_mc = true,
+ .nv12 = true,
.max_width = 4096,
.max_height = 4096,
.routes = rcar_info_r8a7796_routes,
@@ -1121,6 +1123,7 @@ static const struct rvin_group_route rcar_info_r8a77965_routes[] = {
static const struct rvin_info rcar_info_r8a77965 = {
.model = RCAR_GEN3,
.use_mc = true,
+ .nv12 = true,
.max_width = 4096,
.max_height = 4096,
.routes = rcar_info_r8a77965_routes,
@@ -1168,6 +1171,7 @@ static const struct rvin_group_route rcar_info_r8a77980_routes[] = {
static const struct rvin_info rcar_info_r8a77980 = {
.model = RCAR_GEN3,
.use_mc = true,
+ .nv12 = true,
.max_width = 4096,
.max_height = 4096,
.routes = rcar_info_r8a77980_routes,
@@ -1184,6 +1188,7 @@ static const struct rvin_group_route rcar_info_r8a77990_routes[] = {
static const struct rvin_info rcar_info_r8a77990 = {
.model = RCAR_GEN3,
.use_mc = true,
+ .nv12 = true,
.max_width = 4096,
.max_height = 4096,
.routes = rcar_info_r8a77990_routes,
@@ -1196,6 +1201,7 @@ static const struct rvin_group_route rcar_info_r8a77995_routes[] = {
static const struct rvin_info rcar_info_r8a77995 = {
.model = RCAR_GEN3,
.use_mc = true,
+ .nv12 = true,
.max_width = 4096,
.max_height = 4096,
.routes = rcar_info_r8a77995_routes,
@@ -1207,6 +1213,10 @@ static const struct of_device_id rvin_of_id_table[] = {
.data = &rcar_info_r8a7796,
},
{
+ .compatible = "renesas,vin-r8a774b1",
+ .data = &rcar_info_r8a77965,
+ },
+ {
.compatible = "renesas,vin-r8a774c0",
.data = &rcar_info_r8a77990,
},
@@ -1282,7 +1292,6 @@ static int rcar_vin_probe(struct platform_device *pdev)
{
const struct soc_device_attribute *attr;
struct rvin_dev *vin;
- struct resource *mem;
int irq, ret;
vin = devm_kzalloc(&pdev->dev, sizeof(*vin), GFP_KERNEL);
@@ -1301,11 +1310,7 @@ static int rcar_vin_probe(struct platform_device *pdev)
if (attr)
vin->info = attr->data;
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (mem == NULL)
- return -EINVAL;
-
- vin->base = devm_ioremap_resource(vin->dev, mem);
+ vin->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(vin->base))
return PTR_ERR(vin->base);
diff --git a/drivers/media/platform/rcar-vin/rcar-csi2.c b/drivers/media/platform/rcar-vin/rcar-csi2.c
index c14af1b929df..faa9fb23a2e9 100644
--- a/drivers/media/platform/rcar-vin/rcar-csi2.c
+++ b/drivers/media/platform/rcar-vin/rcar-csi2.c
@@ -1082,6 +1082,10 @@ static const struct of_device_id rcar_csi2_of_table[] = {
.data = &rcar_csi2_info_r8a7796,
},
{
+ .compatible = "renesas,r8a774b1-csi2",
+ .data = &rcar_csi2_info_r8a77965,
+ },
+ {
.compatible = "renesas,r8a774c0-csi2",
.data = &rcar_csi2_info_r8a77990,
},
diff --git a/drivers/media/platform/rcar-vin/rcar-dma.c b/drivers/media/platform/rcar-vin/rcar-dma.c
index 3cb29b2e0b2b..cf9029efeb04 100644
--- a/drivers/media/platform/rcar-vin/rcar-dma.c
+++ b/drivers/media/platform/rcar-vin/rcar-dma.c
@@ -118,6 +118,7 @@
#define VNDMR_ABIT (1 << 2)
#define VNDMR_DTMD_YCSEP (1 << 1)
#define VNDMR_DTMD_ARGB (1 << 0)
+#define VNDMR_DTMD_YCSEP_420 (3 << 0)
/* Video n Data Mode Register 2 bits */
#define VNDMR2_VPS (1 << 30)
@@ -529,12 +530,17 @@ static void rvin_set_coeff(struct rvin_dev *vin, unsigned short xs)
static void rvin_crop_scale_comp_gen2(struct rvin_dev *vin)
{
+ unsigned int crop_height;
u32 xs, ys;
/* Set scaling coefficient */
+ crop_height = vin->crop.height;
+ if (V4L2_FIELD_IS_INTERLACED(vin->format.field))
+ crop_height *= 2;
+
ys = 0;
- if (vin->crop.height != vin->compose.height)
- ys = (4096 * vin->crop.height) / vin->compose.height;
+ if (crop_height != vin->compose.height)
+ ys = (4096 * crop_height) / vin->compose.height;
rvin_write(vin, ys, VNYS_REG);
xs = 0;
@@ -557,16 +563,11 @@ static void rvin_crop_scale_comp_gen2(struct rvin_dev *vin)
rvin_write(vin, 0, VNSPPOC_REG);
rvin_write(vin, 0, VNSLPOC_REG);
rvin_write(vin, vin->format.width - 1, VNEPPOC_REG);
- switch (vin->format.field) {
- case V4L2_FIELD_INTERLACED:
- case V4L2_FIELD_INTERLACED_TB:
- case V4L2_FIELD_INTERLACED_BT:
+
+ if (V4L2_FIELD_IS_INTERLACED(vin->format.field))
rvin_write(vin, vin->format.height / 2 - 1, VNELPOC_REG);
- break;
- default:
+ else
rvin_write(vin, vin->format.height - 1, VNELPOC_REG);
- break;
- }
vin_dbg(vin,
"Pre-Clip: %ux%u@%u:%u YS: %d XS: %d Post-Clip: %ux%u@%u:%u\n",
@@ -583,21 +584,9 @@ void rvin_crop_scale_comp(struct rvin_dev *vin)
/* Set Start/End Pixel/Line Pre-Clip */
rvin_write(vin, vin->crop.left, VNSPPRC_REG);
rvin_write(vin, vin->crop.left + vin->crop.width - 1, VNEPPRC_REG);
+ rvin_write(vin, vin->crop.top, VNSLPRC_REG);
+ rvin_write(vin, vin->crop.top + vin->crop.height - 1, VNELPRC_REG);
- switch (vin->format.field) {
- case V4L2_FIELD_INTERLACED:
- case V4L2_FIELD_INTERLACED_TB:
- case V4L2_FIELD_INTERLACED_BT:
- rvin_write(vin, vin->crop.top / 2, VNSLPRC_REG);
- rvin_write(vin, (vin->crop.top + vin->crop.height) / 2 - 1,
- VNELPRC_REG);
- break;
- default:
- rvin_write(vin, vin->crop.top, VNSLPRC_REG);
- rvin_write(vin, vin->crop.top + vin->crop.height - 1,
- VNELPRC_REG);
- break;
- }
/* TODO: Add support for the UDS scaler. */
if (vin->info->model != RCAR_GEN3)
@@ -641,6 +630,9 @@ static int rvin_setup(struct rvin_dev *vin)
vnmc = VNMC_IM_ODD_EVEN;
progressive = true;
break;
+ case V4L2_FIELD_ALTERNATE:
+ vnmc = VNMC_IM_ODD_EVEN;
+ break;
default:
vnmc = VNMC_IM_ODD;
break;
@@ -710,11 +702,13 @@ static int rvin_setup(struct rvin_dev *vin)
* Output format
*/
switch (vin->format.pixelformat) {
+ case V4L2_PIX_FMT_NV12:
case V4L2_PIX_FMT_NV16:
rvin_write(vin,
- ALIGN(vin->format.width * vin->format.height, 0x80),
- VNUVAOF_REG);
- dmr = VNDMR_DTMD_YCSEP;
+ ALIGN(vin->format.bytesperline * vin->format.height,
+ 0x80), VNUVAOF_REG);
+ dmr = vin->format.pixelformat == V4L2_PIX_FMT_NV12 ?
+ VNDMR_DTMD_YCSEP_420 : VNDMR_DTMD_YCSEP;
output_is_yuv = true;
break;
case V4L2_PIX_FMT_YUYV:
@@ -799,6 +793,18 @@ static bool rvin_capture_active(struct rvin_dev *vin)
return rvin_read(vin, VNMS_REG) & VNMS_CA;
}
+static enum v4l2_field rvin_get_active_field(struct rvin_dev *vin, u32 vnms)
+{
+ if (vin->format.field == V4L2_FIELD_ALTERNATE) {
+ /* If FS is set it is an Even field. */
+ if (vnms & VNMS_FS)
+ return V4L2_FIELD_BOTTOM;
+ return V4L2_FIELD_TOP;
+ }
+
+ return vin->format.field;
+}
+
static void rvin_set_slot_addr(struct rvin_dev *vin, int slot, dma_addr_t addr)
{
const struct rvin_video_format *fmt;
@@ -948,7 +954,7 @@ static irqreturn_t rvin_irq(int irq, void *data)
/* Capture frame */
if (vin->queue_buf[slot]) {
- vin->queue_buf[slot]->field = vin->format.field;
+ vin->queue_buf[slot]->field = rvin_get_active_field(vin, vnms);
vin->queue_buf[slot]->sequence = vin->sequence;
vin->queue_buf[slot]->vb2_buf.timestamp = ktime_get_ns();
vb2_buffer_done(&vin->queue_buf[slot]->vb2_buf,
@@ -1075,6 +1081,7 @@ static int rvin_mc_validate_format(struct rvin_dev *vin, struct v4l2_subdev *sd,
case V4L2_FIELD_TOP:
case V4L2_FIELD_BOTTOM:
case V4L2_FIELD_NONE:
+ case V4L2_FIELD_ALTERNATE:
break;
case V4L2_FIELD_INTERLACED_TB:
case V4L2_FIELD_INTERLACED_BT:
diff --git a/drivers/media/platform/rcar-vin/rcar-v4l2.c b/drivers/media/platform/rcar-vin/rcar-v4l2.c
index cbc1c07f0a96..9e2e63ffcc47 100644
--- a/drivers/media/platform/rcar-vin/rcar-v4l2.c
+++ b/drivers/media/platform/rcar-vin/rcar-v4l2.c
@@ -31,6 +31,10 @@
static const struct rvin_video_format rvin_formats[] = {
{
+ .fourcc = V4L2_PIX_FMT_NV12,
+ .bpp = 1,
+ },
+ {
.fourcc = V4L2_PIX_FMT_NV16,
.bpp = 1,
},
@@ -72,6 +76,9 @@ const struct rvin_video_format *rvin_format_from_pixel(struct rvin_dev *vin,
if (vin->info->model == RCAR_M1 && pixelformat == V4L2_PIX_FMT_XBGR32)
return NULL;
+ if (pixelformat == V4L2_PIX_FMT_NV12 && !vin->info->nv12)
+ return NULL;
+
for (i = 0; i < ARRAY_SIZE(rvin_formats); i++)
if (rvin_formats[i].fourcc == pixelformat)
return rvin_formats + i;
@@ -90,17 +97,29 @@ static u32 rvin_format_bytesperline(struct rvin_dev *vin,
if (WARN_ON(!fmt))
return -EINVAL;
- align = pix->pixelformat == V4L2_PIX_FMT_NV16 ? 0x20 : 0x10;
+ switch (pix->pixelformat) {
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV16:
+ align = 0x20;
+ break;
+ default:
+ align = 0x10;
+ break;
+ }
return ALIGN(pix->width, align) * fmt->bpp;
}
static u32 rvin_format_sizeimage(struct v4l2_pix_format *pix)
{
- if (pix->pixelformat == V4L2_PIX_FMT_NV16)
+ switch (pix->pixelformat) {
+ case V4L2_PIX_FMT_NV12:
+ return pix->bytesperline * pix->height * 3 / 2;
+ case V4L2_PIX_FMT_NV16:
return pix->bytesperline * pix->height * 2;
-
- return pix->bytesperline * pix->height;
+ default:
+ return pix->bytesperline * pix->height;
+ }
}
static void rvin_format_align(struct rvin_dev *vin, struct v4l2_pix_format *pix)
@@ -117,23 +136,23 @@ static void rvin_format_align(struct rvin_dev *vin, struct v4l2_pix_format *pix)
case V4L2_FIELD_INTERLACED_TB:
case V4L2_FIELD_INTERLACED_BT:
case V4L2_FIELD_INTERLACED:
- break;
case V4L2_FIELD_ALTERNATE:
- /*
- * Driver does not (yet) support outputting ALTERNATE to a
- * userspace. It does support outputting INTERLACED so use
- * the VIN hardware to combine the two fields.
- */
- pix->field = V4L2_FIELD_INTERLACED;
- pix->height *= 2;
break;
default:
pix->field = RVIN_DEFAULT_FIELD;
break;
}
- /* HW limit width to a multiple of 32 (2^5) for NV16 else 2 (2^1) */
- walign = vin->format.pixelformat == V4L2_PIX_FMT_NV16 ? 5 : 1;
+ /* HW limit width to a multiple of 32 (2^5) for NV12/16 else 2 (2^1) */
+ switch (vin->format.pixelformat) {
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV16:
+ walign = 5;
+ break;
+ default:
+ walign = 1;
+ break;
+ }
/* Limit to VIN capabilities */
v4l_bound_align_image(&pix->width, 2, vin->info->max_width, walign,
@@ -164,22 +183,32 @@ static int rvin_reset_format(struct rvin_dev *vin)
v4l2_fill_pix_format(&vin->format, &fmt.format);
+ vin->src_rect.top = 0;
+ vin->src_rect.left = 0;
+ vin->src_rect.width = vin->format.width;
+ vin->src_rect.height = vin->format.height;
+
+ /* Make use of the hardware interlacer by default. */
+ if (vin->format.field == V4L2_FIELD_ALTERNATE) {
+ vin->format.field = V4L2_FIELD_INTERLACED;
+ vin->format.height *= 2;
+ }
+
rvin_format_align(vin, &vin->format);
- vin->source.top = 0;
- vin->source.left = 0;
- vin->source.width = vin->format.width;
- vin->source.height = vin->format.height;
+ vin->crop = vin->src_rect;
- vin->crop = vin->source;
- vin->compose = vin->source;
+ vin->compose.top = 0;
+ vin->compose.left = 0;
+ vin->compose.width = vin->format.width;
+ vin->compose.height = vin->format.height;
return 0;
}
static int rvin_try_format(struct rvin_dev *vin, u32 which,
struct v4l2_pix_format *pix,
- struct v4l2_rect *crop, struct v4l2_rect *compose)
+ struct v4l2_rect *src_rect)
{
struct v4l2_subdev *sd = vin_to_source(vin);
struct v4l2_subdev_pad_config *pad_cfg;
@@ -208,21 +237,15 @@ static int rvin_try_format(struct rvin_dev *vin, u32 which,
ret = v4l2_subdev_call(sd, pad, set_fmt, pad_cfg, &format);
if (ret < 0 && ret != -ENOIOCTLCMD)
goto done;
+ ret = 0;
v4l2_fill_pix_format(pix, &format.format);
- if (crop) {
- crop->top = 0;
- crop->left = 0;
- crop->width = pix->width;
- crop->height = pix->height;
-
- /*
- * If source is ALTERNATE the driver will use the VIN hardware
- * to INTERLACE it. The crop height then needs to be doubled.
- */
- if (pix->field == V4L2_FIELD_ALTERNATE)
- crop->height *= 2;
+ if (src_rect) {
+ src_rect->top = 0;
+ src_rect->left = 0;
+ src_rect->width = pix->width;
+ src_rect->height = pix->height;
}
if (field != V4L2_FIELD_ANY)
@@ -232,17 +255,10 @@ static int rvin_try_format(struct rvin_dev *vin, u32 which,
pix->height = height;
rvin_format_align(vin, pix);
-
- if (compose) {
- compose->top = 0;
- compose->left = 0;
- compose->width = pix->width;
- compose->height = pix->height;
- }
done:
v4l2_subdev_free_pad_config(pad_cfg);
- return 0;
+ return ret;
}
static int rvin_querycap(struct file *file, void *priv,
@@ -262,29 +278,34 @@ static int rvin_try_fmt_vid_cap(struct file *file, void *priv,
{
struct rvin_dev *vin = video_drvdata(file);
- return rvin_try_format(vin, V4L2_SUBDEV_FORMAT_TRY, &f->fmt.pix, NULL,
- NULL);
+ return rvin_try_format(vin, V4L2_SUBDEV_FORMAT_TRY, &f->fmt.pix, NULL);
}
static int rvin_s_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
struct rvin_dev *vin = video_drvdata(file);
- struct v4l2_rect crop, compose;
+ struct v4l2_rect fmt_rect, src_rect;
int ret;
if (vb2_is_busy(&vin->queue))
return -EBUSY;
ret = rvin_try_format(vin, V4L2_SUBDEV_FORMAT_ACTIVE, &f->fmt.pix,
- &crop, &compose);
+ &src_rect);
if (ret)
return ret;
vin->format = f->fmt.pix;
- vin->crop = crop;
- vin->compose = compose;
- vin->source = crop;
+
+ fmt_rect.top = 0;
+ fmt_rect.left = 0;
+ fmt_rect.width = vin->format.width;
+ fmt_rect.height = vin->format.height;
+
+ v4l2_rect_map_inside(&vin->crop, &src_rect);
+ v4l2_rect_map_inside(&vin->compose, &fmt_rect);
+ vin->src_rect = src_rect;
return 0;
}
@@ -302,12 +323,22 @@ static int rvin_g_fmt_vid_cap(struct file *file, void *priv,
static int rvin_enum_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
- if (f->index >= ARRAY_SIZE(rvin_formats))
- return -EINVAL;
-
- f->pixelformat = rvin_formats[f->index].fourcc;
+ struct rvin_dev *vin = video_drvdata(file);
+ unsigned int i;
+ int matched;
+
+ matched = -1;
+ for (i = 0; i < ARRAY_SIZE(rvin_formats); i++) {
+ if (rvin_format_from_pixel(vin, rvin_formats[i].fourcc))
+ matched++;
+
+ if (matched == f->index) {
+ f->pixelformat = rvin_formats[i].fourcc;
+ return 0;
+ }
+ }
- return 0;
+ return -EINVAL;
}
static int rvin_g_selection(struct file *file, void *fh,
@@ -322,8 +353,8 @@ static int rvin_g_selection(struct file *file, void *fh,
case V4L2_SEL_TGT_CROP_BOUNDS:
case V4L2_SEL_TGT_CROP_DEFAULT:
s->r.left = s->r.top = 0;
- s->r.width = vin->source.width;
- s->r.height = vin->source.height;
+ s->r.width = vin->src_rect.width;
+ s->r.height = vin->src_rect.height;
break;
case V4L2_SEL_TGT_CROP:
s->r = vin->crop;
@@ -365,21 +396,22 @@ static int rvin_s_selection(struct file *file, void *fh,
case V4L2_SEL_TGT_CROP:
/* Can't crop outside of source input */
max_rect.top = max_rect.left = 0;
- max_rect.width = vin->source.width;
- max_rect.height = vin->source.height;
+ max_rect.width = vin->src_rect.width;
+ max_rect.height = vin->src_rect.height;
v4l2_rect_map_inside(&r, &max_rect);
- v4l_bound_align_image(&r.width, 6, vin->source.width, 0,
- &r.height, 2, vin->source.height, 0, 0);
+ v4l_bound_align_image(&r.width, 6, vin->src_rect.width, 0,
+ &r.height, 2, vin->src_rect.height, 0, 0);
- r.top = clamp_t(s32, r.top, 0, vin->source.height - r.height);
- r.left = clamp_t(s32, r.left, 0, vin->source.width - r.width);
+ r.top = clamp_t(s32, r.top, 0,
+ vin->src_rect.height - r.height);
+ r.left = clamp_t(s32, r.left, 0, vin->src_rect.width - r.width);
vin->crop = s->r = r;
vin_dbg(vin, "Cropped %dx%d@%d:%d of %dx%d\n",
r.width, r.height, r.left, r.top,
- vin->source.width, vin->source.height);
+ vin->src_rect.width, vin->src_rect.height);
break;
case V4L2_SEL_TGT_COMPOSE:
/* Make sure compose rect fits inside output format */
diff --git a/drivers/media/platform/rcar-vin/rcar-vin.h b/drivers/media/platform/rcar-vin/rcar-vin.h
index e562c2ff21ec..a36b0824f81d 100644
--- a/drivers/media/platform/rcar-vin/rcar-vin.h
+++ b/drivers/media/platform/rcar-vin/rcar-vin.h
@@ -126,6 +126,7 @@ struct rvin_group_route {
* struct rvin_info - Information about the particular VIN implementation
* @model: VIN model
* @use_mc: use media controller instead of controlling subdevice
+ * @nv12: support outputing NV12 pixel format
* @max_width: max input width the VIN supports
* @max_height: max input height the VIN supports
* @routes: list of possible routes from the CSI-2 recivers to
@@ -134,6 +135,7 @@ struct rvin_group_route {
struct rvin_info {
enum model_id model;
bool use_mc;
+ bool nv12;
unsigned int max_width;
unsigned int max_height;
@@ -176,7 +178,7 @@ struct rvin_info {
*
* @crop: active cropping
* @compose: active composing
- * @source: active size of the video source
+ * @src_rect: active size of the video source
* @std: active video standard of the video source
*
* @alpha: Alpha component to fill in for supported pixel formats
@@ -215,7 +217,7 @@ struct rvin_dev {
struct v4l2_rect crop;
struct v4l2_rect compose;
- struct v4l2_rect source;
+ struct v4l2_rect src_rect;
v4l2_std_id std;
unsigned int alpha;
diff --git a/drivers/media/platform/rcar_drif.c b/drivers/media/platform/rcar_drif.c
index 608e5217ccd5..0f267a237b42 100644
--- a/drivers/media/platform/rcar_drif.c
+++ b/drivers/media/platform/rcar_drif.c
@@ -912,6 +912,7 @@ static int rcar_drif_g_fmt_sdr_cap(struct file *file, void *priv,
{
struct rcar_drif_sdr *sdr = video_drvdata(file);
+ memset(f->fmt.sdr.reserved, 0, sizeof(f->fmt.sdr.reserved));
f->fmt.sdr.pixelformat = sdr->fmt->pixelformat;
f->fmt.sdr.buffersize = sdr->fmt->buffersize;
diff --git a/drivers/media/platform/rcar_fdp1.c b/drivers/media/platform/rcar_fdp1.c
index cb93a13e1777..97bed45360f0 100644
--- a/drivers/media/platform/rcar_fdp1.c
+++ b/drivers/media/platform/rcar_fdp1.c
@@ -2369,7 +2369,7 @@ static int fdp1_probe(struct platform_device *pdev)
dprintk(fdp1, "FDP1 Version R-Car H3\n");
break;
case FD1_IP_M3N:
- dprintk(fdp1, "FDP1 Version R-Car M3N\n");
+ dprintk(fdp1, "FDP1 Version R-Car M3-N\n");
break;
case FD1_IP_E3:
dprintk(fdp1, "FDP1 Version R-Car E3\n");
diff --git a/drivers/media/platform/s3c-camif/camif-regs.c b/drivers/media/platform/s3c-camif/camif-regs.c
index 1a65532dc36d..e80204f5720c 100644
--- a/drivers/media/platform/s3c-camif/camif-regs.c
+++ b/drivers/media/platform/s3c-camif/camif-regs.c
@@ -553,7 +553,7 @@ void camif_hw_disable_capture(struct camif_vp *vp)
void camif_hw_dump_regs(struct camif_dev *camif, const char *label)
{
- struct {
+ static const struct {
u32 offset;
const char * const name;
} registers[] = {
diff --git a/drivers/media/platform/s5p-cec/s5p_cec.c b/drivers/media/platform/s5p-cec/s5p_cec.c
index 6ddcc35b0bbd..2a3e7ffefe0a 100644
--- a/drivers/media/platform/s5p-cec/s5p_cec.c
+++ b/drivers/media/platform/s5p-cec/s5p_cec.c
@@ -239,7 +239,7 @@ static int s5p_cec_probe(struct platform_device *pdev)
return 0;
err_notifier:
- cec_notifier_cec_adap_unregister(cec->notifier);
+ cec_notifier_cec_adap_unregister(cec->notifier, cec->adap);
err_delete_adapter:
cec_delete_adapter(cec->adap);
@@ -250,7 +250,7 @@ static int s5p_cec_remove(struct platform_device *pdev)
{
struct s5p_cec_dev *cec = platform_get_drvdata(pdev);
- cec_notifier_cec_adap_unregister(cec->notifier);
+ cec_notifier_cec_adap_unregister(cec->notifier, cec->adap);
cec_unregister_adapter(cec->adap);
pm_runtime_disable(&pdev->dev);
return 0;
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-core.c b/drivers/media/platform/s5p-jpeg/jpeg-core.c
index 8dbbd5f2a40a..ac2162235cef 100644
--- a/drivers/media/platform/s5p-jpeg/jpeg-core.c
+++ b/drivers/media/platform/s5p-jpeg/jpeg-core.c
@@ -1236,7 +1236,6 @@ static bool s5p_jpeg_parse_hdr(struct s5p_jpeg_q_data *result,
}
result->sof = sof;
result->sof_len = sof_len;
- result->components = components;
return true;
}
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-core.h b/drivers/media/platform/s5p-jpeg/jpeg-core.h
index 3bc52f83f5bc..4407fe775afa 100644
--- a/drivers/media/platform/s5p-jpeg/jpeg-core.h
+++ b/drivers/media/platform/s5p-jpeg/jpeg-core.h
@@ -190,7 +190,6 @@ struct s5p_jpeg_marker {
* @dqt: DQT markers' positions relative to the buffer beginning
* @sof: SOF0 marker's position relative to the buffer beginning
* @sof_len: SOF0 marker's payload length (without length field itself)
- * @components: number of image components
* @size: image buffer size in bytes
*/
struct s5p_jpeg_q_data {
@@ -202,7 +201,6 @@ struct s5p_jpeg_q_data {
struct s5p_jpeg_marker dqt;
u32 sof;
u32 sof_len;
- u32 components;
u32 size;
};
diff --git a/drivers/media/platform/seco-cec/seco-cec.c b/drivers/media/platform/seco-cec/seco-cec.c
index 9cd60fe1867c..2ff62a488b27 100644
--- a/drivers/media/platform/seco-cec/seco-cec.c
+++ b/drivers/media/platform/seco-cec/seco-cec.c
@@ -671,10 +671,11 @@ static int secocec_probe(struct platform_device *pdev)
return ret;
err_notifier:
- cec_notifier_cec_adap_unregister(secocec->notifier);
+ cec_notifier_cec_adap_unregister(secocec->notifier, secocec->cec_adap);
err_delete_adapter:
cec_delete_adapter(secocec->cec_adap);
err:
+ release_region(BRA_SMB_BASE_ADDR, 7);
dev_err(dev, "%s device probe failed\n", dev_name(dev));
return ret;
@@ -692,7 +693,7 @@ static int secocec_remove(struct platform_device *pdev)
dev_dbg(&pdev->dev, "IR disabled");
}
- cec_notifier_cec_adap_unregister(secocec->notifier);
+ cec_notifier_cec_adap_unregister(secocec->notifier, secocec->cec_adap);
cec_unregister_adapter(secocec->cec_adap);
release_region(BRA_SMB_BASE_ADDR, 7);
diff --git a/drivers/media/platform/sti/bdisp/bdisp-v4l2.c b/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
index e90f1ba30574..675b5f2b4c2e 100644
--- a/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
+++ b/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
@@ -651,8 +651,7 @@ static int bdisp_release(struct file *file)
dev_dbg(bdisp->dev, "%s\n", __func__);
- if (mutex_lock_interruptible(&bdisp->lock))
- return -ERESTARTSYS;
+ mutex_lock(&bdisp->lock);
v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
diff --git a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-debugfs.c b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-debugfs.c
index 8f0ddcbeed9d..301fa10f419b 100644
--- a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-debugfs.c
+++ b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-debugfs.c
@@ -225,36 +225,16 @@ static const struct debugfs_reg32 fei_sys_regs[] = {
void c8sectpfe_debugfs_init(struct c8sectpfei *fei)
{
- struct dentry *root;
- struct dentry *file;
-
- root = debugfs_create_dir("c8sectpfe", NULL);
- if (!root)
- goto err;
-
- fei->root = root;
-
fei->regset = devm_kzalloc(fei->dev, sizeof(*fei->regset), GFP_KERNEL);
if (!fei->regset)
- goto err;
+ return;
fei->regset->regs = fei_sys_regs;
fei->regset->nregs = ARRAY_SIZE(fei_sys_regs);
fei->regset->base = fei->io;
- file = debugfs_create_regset32("registers", S_IRUGO, root,
- fei->regset);
- if (!file) {
- dev_err(fei->dev,
- "%s not able to create 'registers' debugfs\n"
- , __func__);
- goto err;
- }
-
- return;
-
-err:
- debugfs_remove_recursive(root);
+ fei->root = debugfs_create_dir("c8sectpfe", NULL);
+ debugfs_create_regset32("registers", S_IRUGO, fei->root, fei->regset);
}
void c8sectpfe_debugfs_exit(struct c8sectpfei *fei)
diff --git a/drivers/media/platform/sti/cec/stih-cec.c b/drivers/media/platform/sti/cec/stih-cec.c
index 8118c7365d3f..f0c73e64b586 100644
--- a/drivers/media/platform/sti/cec/stih-cec.c
+++ b/drivers/media/platform/sti/cec/stih-cec.c
@@ -359,7 +359,7 @@ static int stih_cec_probe(struct platform_device *pdev)
return 0;
err_notifier:
- cec_notifier_cec_adap_unregister(cec->notifier);
+ cec_notifier_cec_adap_unregister(cec->notifier, cec->adap);
err_delete_adapter:
cec_delete_adapter(cec->adap);
@@ -370,7 +370,7 @@ static int stih_cec_remove(struct platform_device *pdev)
{
struct stih_cec *cec = platform_get_drvdata(pdev);
- cec_notifier_cec_adap_unregister(cec->notifier);
+ cec_notifier_cec_adap_unregister(cec->notifier, cec->adap);
cec_unregister_adapter(cec->adap);
return 0;
diff --git a/drivers/media/platform/sunxi/Makefile b/drivers/media/platform/sunxi/Makefile
index a05127529006..3878cb4efdc2 100644
--- a/drivers/media/platform/sunxi/Makefile
+++ b/drivers/media/platform/sunxi/Makefile
@@ -1,2 +1,3 @@
obj-y += sun4i-csi/
obj-y += sun6i-csi/
+obj-y += sun8i-di/
diff --git a/drivers/media/platform/sunxi/sun8i-di/Makefile b/drivers/media/platform/sunxi/sun8i-di/Makefile
new file mode 100644
index 000000000000..109f7e5442b7
--- /dev/null
+++ b/drivers/media/platform/sunxi/sun8i-di/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_VIDEO_SUN8I_DEINTERLACE) += sun8i-di.o
diff --git a/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c b/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c
new file mode 100644
index 000000000000..aaa1dc159ac2
--- /dev/null
+++ b/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c
@@ -0,0 +1,1028 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Allwinner sun8i deinterlacer with scaler driver
+ *
+ * Copyright (C) 2019 Jernej Skrabec <jernej.skrabec@siol.net>
+ *
+ * Based on vim2m driver.
+ */
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mem2mem.h>
+
+#include "sun8i-di.h"
+
+#define FLAG_SIZE (DEINTERLACE_MAX_WIDTH * DEINTERLACE_MAX_HEIGHT / 4)
+
+static u32 deinterlace_formats[] = {
+ V4L2_PIX_FMT_NV12,
+ V4L2_PIX_FMT_NV21,
+};
+
+static inline u32 deinterlace_read(struct deinterlace_dev *dev, u32 reg)
+{
+ return readl(dev->base + reg);
+}
+
+static inline void deinterlace_write(struct deinterlace_dev *dev,
+ u32 reg, u32 value)
+{
+ writel(value, dev->base + reg);
+}
+
+static inline void deinterlace_set_bits(struct deinterlace_dev *dev,
+ u32 reg, u32 bits)
+{
+ writel(readl(dev->base + reg) | bits, dev->base + reg);
+}
+
+static inline void deinterlace_clr_set_bits(struct deinterlace_dev *dev,
+ u32 reg, u32 clr, u32 set)
+{
+ u32 val = readl(dev->base + reg);
+
+ val &= ~clr;
+ val |= set;
+
+ writel(val, dev->base + reg);
+}
+
+static void deinterlace_device_run(void *priv)
+{
+ struct deinterlace_ctx *ctx = priv;
+ struct deinterlace_dev *dev = ctx->dev;
+ u32 size, stride, width, height, val;
+ struct vb2_v4l2_buffer *src, *dst;
+ unsigned int hstep, vstep;
+ dma_addr_t addr;
+
+ src = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+ dst = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
+
+ v4l2_m2m_buf_copy_metadata(src, dst, true);
+
+ deinterlace_write(dev, DEINTERLACE_MOD_ENABLE,
+ DEINTERLACE_MOD_ENABLE_EN);
+
+ if (ctx->field) {
+ deinterlace_write(dev, DEINTERLACE_TILE_FLAG0,
+ ctx->flag1_buf_dma);
+ deinterlace_write(dev, DEINTERLACE_TILE_FLAG1,
+ ctx->flag2_buf_dma);
+ } else {
+ deinterlace_write(dev, DEINTERLACE_TILE_FLAG0,
+ ctx->flag2_buf_dma);
+ deinterlace_write(dev, DEINTERLACE_TILE_FLAG1,
+ ctx->flag1_buf_dma);
+ }
+ deinterlace_write(dev, DEINTERLACE_FLAG_LINE_STRIDE, 0x200);
+
+ width = ctx->src_fmt.width;
+ height = ctx->src_fmt.height;
+ stride = ctx->src_fmt.bytesperline;
+ size = stride * height;
+
+ addr = vb2_dma_contig_plane_dma_addr(&src->vb2_buf, 0);
+ deinterlace_write(dev, DEINTERLACE_BUF_ADDR0, addr);
+ deinterlace_write(dev, DEINTERLACE_BUF_ADDR1, addr + size);
+ deinterlace_write(dev, DEINTERLACE_BUF_ADDR2, 0);
+
+ deinterlace_write(dev, DEINTERLACE_LINE_STRIDE0, stride);
+ deinterlace_write(dev, DEINTERLACE_LINE_STRIDE1, stride);
+
+ deinterlace_write(dev, DEINTERLACE_CH0_IN_SIZE,
+ DEINTERLACE_SIZE(width, height));
+ deinterlace_write(dev, DEINTERLACE_CH1_IN_SIZE,
+ DEINTERLACE_SIZE(width / 2, height / 2));
+
+ val = DEINTERLACE_IN_FMT_FMT(DEINTERLACE_IN_FMT_YUV420) |
+ DEINTERLACE_IN_FMT_MOD(DEINTERLACE_MODE_UV_COMBINED);
+ switch (ctx->src_fmt.pixelformat) {
+ case V4L2_PIX_FMT_NV12:
+ val |= DEINTERLACE_IN_FMT_PS(DEINTERLACE_PS_UVUV);
+ break;
+ case V4L2_PIX_FMT_NV21:
+ val |= DEINTERLACE_IN_FMT_PS(DEINTERLACE_PS_VUVU);
+ break;
+ }
+ deinterlace_write(dev, DEINTERLACE_IN_FMT, val);
+
+ if (ctx->prev)
+ addr = vb2_dma_contig_plane_dma_addr(&ctx->prev->vb2_buf, 0);
+
+ deinterlace_write(dev, DEINTERLACE_PRELUMA, addr);
+ deinterlace_write(dev, DEINTERLACE_PRECHROMA, addr + size);
+
+ val = DEINTERLACE_OUT_FMT_FMT(DEINTERLACE_OUT_FMT_YUV420SP);
+ switch (ctx->src_fmt.pixelformat) {
+ case V4L2_PIX_FMT_NV12:
+ val |= DEINTERLACE_OUT_FMT_PS(DEINTERLACE_PS_UVUV);
+ break;
+ case V4L2_PIX_FMT_NV21:
+ val |= DEINTERLACE_OUT_FMT_PS(DEINTERLACE_PS_VUVU);
+ break;
+ }
+ deinterlace_write(dev, DEINTERLACE_OUT_FMT, val);
+
+ width = ctx->dst_fmt.width;
+ height = ctx->dst_fmt.height;
+ stride = ctx->dst_fmt.bytesperline;
+ size = stride * height;
+
+ deinterlace_write(dev, DEINTERLACE_CH0_OUT_SIZE,
+ DEINTERLACE_SIZE(width, height));
+ deinterlace_write(dev, DEINTERLACE_CH1_OUT_SIZE,
+ DEINTERLACE_SIZE(width / 2, height / 2));
+
+ deinterlace_write(dev, DEINTERLACE_WB_LINE_STRIDE0, stride);
+ deinterlace_write(dev, DEINTERLACE_WB_LINE_STRIDE1, stride);
+
+ addr = vb2_dma_contig_plane_dma_addr(&dst->vb2_buf, 0);
+ deinterlace_write(dev, DEINTERLACE_WB_ADDR0, addr);
+ deinterlace_write(dev, DEINTERLACE_WB_ADDR1, addr + size);
+ deinterlace_write(dev, DEINTERLACE_WB_ADDR2, 0);
+
+ hstep = (ctx->src_fmt.width << 16) / ctx->dst_fmt.width;
+ vstep = (ctx->src_fmt.height << 16) / ctx->dst_fmt.height;
+ deinterlace_write(dev, DEINTERLACE_CH0_HORZ_FACT, hstep);
+ deinterlace_write(dev, DEINTERLACE_CH0_VERT_FACT, vstep);
+ deinterlace_write(dev, DEINTERLACE_CH1_HORZ_FACT, hstep);
+ deinterlace_write(dev, DEINTERLACE_CH1_VERT_FACT, vstep);
+
+ deinterlace_clr_set_bits(dev, DEINTERLACE_FIELD_CTRL,
+ DEINTERLACE_FIELD_CTRL_FIELD_CNT_MSK,
+ DEINTERLACE_FIELD_CTRL_FIELD_CNT(ctx->field));
+
+ deinterlace_set_bits(dev, DEINTERLACE_FRM_CTRL,
+ DEINTERLACE_FRM_CTRL_START);
+
+ deinterlace_set_bits(dev, DEINTERLACE_FRM_CTRL,
+ DEINTERLACE_FRM_CTRL_REG_READY);
+
+ deinterlace_set_bits(dev, DEINTERLACE_INT_ENABLE,
+ DEINTERLACE_INT_ENABLE_WB_EN);
+
+ deinterlace_set_bits(dev, DEINTERLACE_FRM_CTRL,
+ DEINTERLACE_FRM_CTRL_WB_EN);
+}
+
+static int deinterlace_job_ready(void *priv)
+{
+ struct deinterlace_ctx *ctx = priv;
+
+ return v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx) >= 1 &&
+ v4l2_m2m_num_dst_bufs_ready(ctx->fh.m2m_ctx) >= 2;
+}
+
+static void deinterlace_job_abort(void *priv)
+{
+ struct deinterlace_ctx *ctx = priv;
+
+ /* Will cancel the transaction in the next interrupt handler */
+ ctx->aborting = 1;
+}
+
+static irqreturn_t deinterlace_irq(int irq, void *data)
+{
+ struct deinterlace_dev *dev = data;
+ struct vb2_v4l2_buffer *src, *dst;
+ enum vb2_buffer_state state;
+ struct deinterlace_ctx *ctx;
+ unsigned int val;
+
+ ctx = v4l2_m2m_get_curr_priv(dev->m2m_dev);
+ if (!ctx) {
+ v4l2_err(&dev->v4l2_dev,
+ "Instance released before the end of transaction\n");
+ return IRQ_NONE;
+ }
+
+ val = deinterlace_read(dev, DEINTERLACE_INT_STATUS);
+ if (!(val & DEINTERLACE_INT_STATUS_WRITEBACK))
+ return IRQ_NONE;
+
+ deinterlace_write(dev, DEINTERLACE_INT_ENABLE, 0);
+ deinterlace_set_bits(dev, DEINTERLACE_INT_STATUS,
+ DEINTERLACE_INT_STATUS_WRITEBACK);
+ deinterlace_write(dev, DEINTERLACE_MOD_ENABLE, 0);
+ deinterlace_clr_set_bits(dev, DEINTERLACE_FRM_CTRL,
+ DEINTERLACE_FRM_CTRL_START, 0);
+
+ val = deinterlace_read(dev, DEINTERLACE_STATUS);
+ if (val & DEINTERLACE_STATUS_WB_ERROR)
+ state = VB2_BUF_STATE_ERROR;
+ else
+ state = VB2_BUF_STATE_DONE;
+
+ dst = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+ v4l2_m2m_buf_done(dst, state);
+
+ if (ctx->field != ctx->first_field || ctx->aborting) {
+ ctx->field = ctx->first_field;
+
+ src = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ if (ctx->prev)
+ v4l2_m2m_buf_done(ctx->prev, state);
+ ctx->prev = src;
+
+ v4l2_m2m_job_finish(ctx->dev->m2m_dev, ctx->fh.m2m_ctx);
+ } else {
+ ctx->field = !ctx->first_field;
+ deinterlace_device_run(ctx);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void deinterlace_init(struct deinterlace_dev *dev)
+{
+ u32 val;
+ int i;
+
+ deinterlace_write(dev, DEINTERLACE_BYPASS,
+ DEINTERLACE_BYPASS_CSC);
+ deinterlace_write(dev, DEINTERLACE_WB_LINE_STRIDE_CTRL,
+ DEINTERLACE_WB_LINE_STRIDE_CTRL_EN);
+ deinterlace_set_bits(dev, DEINTERLACE_FRM_CTRL,
+ DEINTERLACE_FRM_CTRL_OUT_CTRL);
+ deinterlace_write(dev, DEINTERLACE_AGTH_SEL,
+ DEINTERLACE_AGTH_SEL_LINEBUF);
+
+ val = DEINTERLACE_CTRL_EN |
+ DEINTERLACE_CTRL_MODE_MIXED |
+ DEINTERLACE_CTRL_DIAG_INTP_EN |
+ DEINTERLACE_CTRL_TEMP_DIFF_EN;
+ deinterlace_write(dev, DEINTERLACE_CTRL, val);
+
+ deinterlace_clr_set_bits(dev, DEINTERLACE_LUMA_TH,
+ DEINTERLACE_LUMA_TH_MIN_LUMA_MSK,
+ DEINTERLACE_LUMA_TH_MIN_LUMA(4));
+
+ deinterlace_clr_set_bits(dev, DEINTERLACE_SPAT_COMP,
+ DEINTERLACE_SPAT_COMP_TH2_MSK,
+ DEINTERLACE_SPAT_COMP_TH2(5));
+
+ deinterlace_clr_set_bits(dev, DEINTERLACE_TEMP_DIFF,
+ DEINTERLACE_TEMP_DIFF_AMBIGUITY_TH_MSK,
+ DEINTERLACE_TEMP_DIFF_AMBIGUITY_TH(5));
+
+ val = DEINTERLACE_DIAG_INTP_TH0(60) |
+ DEINTERLACE_DIAG_INTP_TH1(0) |
+ DEINTERLACE_DIAG_INTP_TH3(30);
+ deinterlace_write(dev, DEINTERLACE_DIAG_INTP, val);
+
+ deinterlace_clr_set_bits(dev, DEINTERLACE_CHROMA_DIFF,
+ DEINTERLACE_CHROMA_DIFF_TH_MSK,
+ DEINTERLACE_CHROMA_DIFF_TH(5));
+
+ /* neutral filter coefficients */
+ deinterlace_set_bits(dev, DEINTERLACE_FRM_CTRL,
+ DEINTERLACE_FRM_CTRL_COEF_ACCESS);
+ readl_poll_timeout(dev->base + DEINTERLACE_STATUS, val,
+ val & DEINTERLACE_STATUS_COEF_STATUS, 2, 40);
+
+ for (i = 0; i < 32; i++) {
+ deinterlace_write(dev, DEINTERLACE_CH0_HORZ_COEF0 + i * 4,
+ DEINTERLACE_IDENTITY_COEF);
+ deinterlace_write(dev, DEINTERLACE_CH0_VERT_COEF + i * 4,
+ DEINTERLACE_IDENTITY_COEF);
+ deinterlace_write(dev, DEINTERLACE_CH1_HORZ_COEF0 + i * 4,
+ DEINTERLACE_IDENTITY_COEF);
+ deinterlace_write(dev, DEINTERLACE_CH1_VERT_COEF + i * 4,
+ DEINTERLACE_IDENTITY_COEF);
+ }
+
+ deinterlace_clr_set_bits(dev, DEINTERLACE_FRM_CTRL,
+ DEINTERLACE_FRM_CTRL_COEF_ACCESS, 0);
+}
+
+static inline struct deinterlace_ctx *deinterlace_file2ctx(struct file *file)
+{
+ return container_of(file->private_data, struct deinterlace_ctx, fh);
+}
+
+static bool deinterlace_check_format(u32 pixelformat)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(deinterlace_formats); i++)
+ if (deinterlace_formats[i] == pixelformat)
+ return true;
+
+ return false;
+}
+
+static void deinterlace_prepare_format(struct v4l2_pix_format *pix_fmt)
+{
+ unsigned int height = pix_fmt->height;
+ unsigned int width = pix_fmt->width;
+ unsigned int bytesperline;
+ unsigned int sizeimage;
+
+ width = clamp(width, DEINTERLACE_MIN_WIDTH,
+ DEINTERLACE_MAX_WIDTH);
+ height = clamp(height, DEINTERLACE_MIN_HEIGHT,
+ DEINTERLACE_MAX_HEIGHT);
+
+ bytesperline = ALIGN(width, 2);
+ /* luma */
+ sizeimage = bytesperline * height;
+ /* chroma */
+ sizeimage += bytesperline * height / 2;
+
+ pix_fmt->width = width;
+ pix_fmt->height = height;
+ pix_fmt->bytesperline = bytesperline;
+ pix_fmt->sizeimage = sizeimage;
+}
+
+static int deinterlace_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ strscpy(cap->driver, DEINTERLACE_NAME, sizeof(cap->driver));
+ strscpy(cap->card, DEINTERLACE_NAME, sizeof(cap->card));
+ snprintf(cap->bus_info, sizeof(cap->bus_info),
+ "platform:%s", DEINTERLACE_NAME);
+
+ return 0;
+}
+
+static int deinterlace_enum_fmt(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ if (f->index < ARRAY_SIZE(deinterlace_formats)) {
+ f->pixelformat = deinterlace_formats[f->index];
+
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int deinterlace_enum_framesizes(struct file *file, void *priv,
+ struct v4l2_frmsizeenum *fsize)
+{
+ if (fsize->index != 0)
+ return -EINVAL;
+
+ if (!deinterlace_check_format(fsize->pixel_format))
+ return -EINVAL;
+
+ fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE;
+ fsize->stepwise.min_width = DEINTERLACE_MIN_WIDTH;
+ fsize->stepwise.min_height = DEINTERLACE_MIN_HEIGHT;
+ fsize->stepwise.max_width = DEINTERLACE_MAX_WIDTH;
+ fsize->stepwise.max_height = DEINTERLACE_MAX_HEIGHT;
+ fsize->stepwise.step_width = 2;
+ fsize->stepwise.step_height = 1;
+
+ return 0;
+}
+
+static int deinterlace_g_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct deinterlace_ctx *ctx = deinterlace_file2ctx(file);
+
+ f->fmt.pix = ctx->dst_fmt;
+
+ return 0;
+}
+
+static int deinterlace_g_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct deinterlace_ctx *ctx = deinterlace_file2ctx(file);
+
+ f->fmt.pix = ctx->src_fmt;
+
+ return 0;
+}
+
+static int deinterlace_try_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ if (!deinterlace_check_format(f->fmt.pix.pixelformat))
+ f->fmt.pix.pixelformat = deinterlace_formats[0];
+
+ if (f->fmt.pix.field != V4L2_FIELD_NONE)
+ f->fmt.pix.field = V4L2_FIELD_NONE;
+
+ deinterlace_prepare_format(&f->fmt.pix);
+
+ return 0;
+}
+
+static int deinterlace_try_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ if (!deinterlace_check_format(f->fmt.pix.pixelformat))
+ f->fmt.pix.pixelformat = deinterlace_formats[0];
+
+ if (f->fmt.pix.field != V4L2_FIELD_INTERLACED_TB &&
+ f->fmt.pix.field != V4L2_FIELD_INTERLACED_BT &&
+ f->fmt.pix.field != V4L2_FIELD_INTERLACED)
+ f->fmt.pix.field = V4L2_FIELD_INTERLACED;
+
+ deinterlace_prepare_format(&f->fmt.pix);
+
+ return 0;
+}
+
+static int deinterlace_s_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct deinterlace_ctx *ctx = deinterlace_file2ctx(file);
+ struct vb2_queue *vq;
+ int ret;
+
+ ret = deinterlace_try_fmt_vid_cap(file, priv, f);
+ if (ret)
+ return ret;
+
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
+ if (vb2_is_busy(vq))
+ return -EBUSY;
+
+ ctx->dst_fmt = f->fmt.pix;
+
+ return 0;
+}
+
+static int deinterlace_s_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct deinterlace_ctx *ctx = deinterlace_file2ctx(file);
+ struct vb2_queue *vq;
+ int ret;
+
+ ret = deinterlace_try_fmt_vid_out(file, priv, f);
+ if (ret)
+ return ret;
+
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
+ if (vb2_is_busy(vq))
+ return -EBUSY;
+
+ ctx->src_fmt = f->fmt.pix;
+
+ /* Propagate colorspace information to capture. */
+ ctx->dst_fmt.colorspace = f->fmt.pix.colorspace;
+ ctx->dst_fmt.xfer_func = f->fmt.pix.xfer_func;
+ ctx->dst_fmt.ycbcr_enc = f->fmt.pix.ycbcr_enc;
+ ctx->dst_fmt.quantization = f->fmt.pix.quantization;
+
+ return 0;
+}
+
+static const struct v4l2_ioctl_ops deinterlace_ioctl_ops = {
+ .vidioc_querycap = deinterlace_querycap,
+
+ .vidioc_enum_framesizes = deinterlace_enum_framesizes,
+
+ .vidioc_enum_fmt_vid_cap = deinterlace_enum_fmt,
+ .vidioc_g_fmt_vid_cap = deinterlace_g_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = deinterlace_try_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = deinterlace_s_fmt_vid_cap,
+
+ .vidioc_enum_fmt_vid_out = deinterlace_enum_fmt,
+ .vidioc_g_fmt_vid_out = deinterlace_g_fmt_vid_out,
+ .vidioc_try_fmt_vid_out = deinterlace_try_fmt_vid_out,
+ .vidioc_s_fmt_vid_out = deinterlace_s_fmt_vid_out,
+
+ .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
+ .vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
+ .vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
+ .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
+ .vidioc_prepare_buf = v4l2_m2m_ioctl_prepare_buf,
+ .vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs,
+ .vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
+
+ .vidioc_streamon = v4l2_m2m_ioctl_streamon,
+ .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
+};
+
+static int deinterlace_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers,
+ unsigned int *nplanes, unsigned int sizes[],
+ struct device *alloc_devs[])
+{
+ struct deinterlace_ctx *ctx = vb2_get_drv_priv(vq);
+ struct v4l2_pix_format *pix_fmt;
+
+ if (V4L2_TYPE_IS_OUTPUT(vq->type))
+ pix_fmt = &ctx->src_fmt;
+ else
+ pix_fmt = &ctx->dst_fmt;
+
+ if (*nplanes) {
+ if (sizes[0] < pix_fmt->sizeimage)
+ return -EINVAL;
+ } else {
+ sizes[0] = pix_fmt->sizeimage;
+ *nplanes = 1;
+ }
+
+ return 0;
+}
+
+static int deinterlace_buf_prepare(struct vb2_buffer *vb)
+{
+ struct vb2_queue *vq = vb->vb2_queue;
+ struct deinterlace_ctx *ctx = vb2_get_drv_priv(vq);
+ struct v4l2_pix_format *pix_fmt;
+
+ if (V4L2_TYPE_IS_OUTPUT(vq->type))
+ pix_fmt = &ctx->src_fmt;
+ else
+ pix_fmt = &ctx->dst_fmt;
+
+ if (vb2_plane_size(vb, 0) < pix_fmt->sizeimage)
+ return -EINVAL;
+
+ vb2_set_plane_payload(vb, 0, pix_fmt->sizeimage);
+
+ return 0;
+}
+
+static void deinterlace_buf_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct deinterlace_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+
+ v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
+}
+
+static void deinterlace_queue_cleanup(struct vb2_queue *vq, u32 state)
+{
+ struct deinterlace_ctx *ctx = vb2_get_drv_priv(vq);
+ struct vb2_v4l2_buffer *vbuf;
+
+ do {
+ if (V4L2_TYPE_IS_OUTPUT(vq->type))
+ vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ else
+ vbuf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+
+ if (vbuf)
+ v4l2_m2m_buf_done(vbuf, state);
+ } while (vbuf);
+
+ if (V4L2_TYPE_IS_OUTPUT(vq->type) && ctx->prev)
+ v4l2_m2m_buf_done(ctx->prev, state);
+}
+
+static int deinterlace_start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ struct deinterlace_ctx *ctx = vb2_get_drv_priv(vq);
+ struct device *dev = ctx->dev->dev;
+ int ret;
+
+ if (V4L2_TYPE_IS_OUTPUT(vq->type)) {
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ dev_err(dev, "Failed to enable module\n");
+
+ goto err_runtime_get;
+ }
+
+ ctx->first_field =
+ ctx->src_fmt.field == V4L2_FIELD_INTERLACED_BT;
+ ctx->field = ctx->first_field;
+
+ ctx->prev = NULL;
+ ctx->aborting = 0;
+
+ ctx->flag1_buf = dma_alloc_coherent(dev, FLAG_SIZE,
+ &ctx->flag1_buf_dma,
+ GFP_KERNEL);
+ if (!ctx->flag1_buf) {
+ ret = -ENOMEM;
+
+ goto err_no_mem1;
+ }
+
+ ctx->flag2_buf = dma_alloc_coherent(dev, FLAG_SIZE,
+ &ctx->flag2_buf_dma,
+ GFP_KERNEL);
+ if (!ctx->flag2_buf) {
+ ret = -ENOMEM;
+
+ goto err_no_mem2;
+ }
+ }
+
+ return 0;
+
+err_no_mem2:
+ dma_free_coherent(dev, FLAG_SIZE, ctx->flag1_buf,
+ ctx->flag1_buf_dma);
+err_no_mem1:
+ pm_runtime_put(dev);
+err_runtime_get:
+ deinterlace_queue_cleanup(vq, VB2_BUF_STATE_QUEUED);
+
+ return ret;
+}
+
+static void deinterlace_stop_streaming(struct vb2_queue *vq)
+{
+ struct deinterlace_ctx *ctx = vb2_get_drv_priv(vq);
+
+ if (V4L2_TYPE_IS_OUTPUT(vq->type)) {
+ struct device *dev = ctx->dev->dev;
+
+ dma_free_coherent(dev, FLAG_SIZE, ctx->flag1_buf,
+ ctx->flag1_buf_dma);
+ dma_free_coherent(dev, FLAG_SIZE, ctx->flag2_buf,
+ ctx->flag2_buf_dma);
+
+ pm_runtime_put(dev);
+ }
+
+ deinterlace_queue_cleanup(vq, VB2_BUF_STATE_ERROR);
+}
+
+static const struct vb2_ops deinterlace_qops = {
+ .queue_setup = deinterlace_queue_setup,
+ .buf_prepare = deinterlace_buf_prepare,
+ .buf_queue = deinterlace_buf_queue,
+ .start_streaming = deinterlace_start_streaming,
+ .stop_streaming = deinterlace_stop_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
+
+static int deinterlace_queue_init(void *priv, struct vb2_queue *src_vq,
+ struct vb2_queue *dst_vq)
+{
+ struct deinterlace_ctx *ctx = priv;
+ int ret;
+
+ src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ src_vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ src_vq->drv_priv = ctx;
+ src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ src_vq->min_buffers_needed = 1;
+ src_vq->ops = &deinterlace_qops;
+ src_vq->mem_ops = &vb2_dma_contig_memops;
+ src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ src_vq->lock = &ctx->dev->dev_mutex;
+ src_vq->dev = ctx->dev->dev;
+
+ ret = vb2_queue_init(src_vq);
+ if (ret)
+ return ret;
+
+ dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ dst_vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ dst_vq->drv_priv = ctx;
+ dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ dst_vq->min_buffers_needed = 2;
+ dst_vq->ops = &deinterlace_qops;
+ dst_vq->mem_ops = &vb2_dma_contig_memops;
+ dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ dst_vq->lock = &ctx->dev->dev_mutex;
+ dst_vq->dev = ctx->dev->dev;
+
+ ret = vb2_queue_init(dst_vq);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int deinterlace_open(struct file *file)
+{
+ struct deinterlace_dev *dev = video_drvdata(file);
+ struct deinterlace_ctx *ctx = NULL;
+ int ret;
+
+ if (mutex_lock_interruptible(&dev->dev_mutex))
+ return -ERESTARTSYS;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx) {
+ mutex_unlock(&dev->dev_mutex);
+ return -ENOMEM;
+ }
+
+ /* default output format */
+ ctx->src_fmt.pixelformat = deinterlace_formats[0];
+ ctx->src_fmt.field = V4L2_FIELD_INTERLACED;
+ ctx->src_fmt.width = 640;
+ ctx->src_fmt.height = 480;
+ deinterlace_prepare_format(&ctx->src_fmt);
+
+ /* default capture format */
+ ctx->dst_fmt.pixelformat = deinterlace_formats[0];
+ ctx->dst_fmt.field = V4L2_FIELD_NONE;
+ ctx->dst_fmt.width = 640;
+ ctx->dst_fmt.height = 480;
+ deinterlace_prepare_format(&ctx->dst_fmt);
+
+ v4l2_fh_init(&ctx->fh, video_devdata(file));
+ file->private_data = &ctx->fh;
+ ctx->dev = dev;
+
+ ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(dev->m2m_dev, ctx,
+ &deinterlace_queue_init);
+ if (IS_ERR(ctx->fh.m2m_ctx)) {
+ ret = PTR_ERR(ctx->fh.m2m_ctx);
+ goto err_free;
+ }
+
+ v4l2_fh_add(&ctx->fh);
+
+ mutex_unlock(&dev->dev_mutex);
+
+ return 0;
+
+err_free:
+ kfree(ctx);
+ mutex_unlock(&dev->dev_mutex);
+
+ return ret;
+}
+
+static int deinterlace_release(struct file *file)
+{
+ struct deinterlace_dev *dev = video_drvdata(file);
+ struct deinterlace_ctx *ctx = container_of(file->private_data,
+ struct deinterlace_ctx, fh);
+
+ mutex_lock(&dev->dev_mutex);
+
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+ v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
+
+ kfree(ctx);
+
+ mutex_unlock(&dev->dev_mutex);
+
+ return 0;
+}
+
+static const struct v4l2_file_operations deinterlace_fops = {
+ .owner = THIS_MODULE,
+ .open = deinterlace_open,
+ .release = deinterlace_release,
+ .poll = v4l2_m2m_fop_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = v4l2_m2m_fop_mmap,
+};
+
+static const struct video_device deinterlace_video_device = {
+ .name = DEINTERLACE_NAME,
+ .vfl_dir = VFL_DIR_M2M,
+ .fops = &deinterlace_fops,
+ .ioctl_ops = &deinterlace_ioctl_ops,
+ .minor = -1,
+ .release = video_device_release_empty,
+ .device_caps = V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING,
+};
+
+static const struct v4l2_m2m_ops deinterlace_m2m_ops = {
+ .device_run = deinterlace_device_run,
+ .job_ready = deinterlace_job_ready,
+ .job_abort = deinterlace_job_abort,
+};
+
+static int deinterlace_probe(struct platform_device *pdev)
+{
+ struct deinterlace_dev *dev;
+ struct video_device *vfd;
+ struct resource *res;
+ int irq, ret;
+
+ dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ dev->vfd = deinterlace_video_device;
+ dev->dev = &pdev->dev;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0) {
+ dev_err(dev->dev, "Failed to get IRQ\n");
+
+ return irq;
+ }
+
+ ret = devm_request_irq(dev->dev, irq, deinterlace_irq,
+ 0, dev_name(dev->dev), dev);
+ if (ret) {
+ dev_err(dev->dev, "Failed to request IRQ\n");
+
+ return ret;
+ }
+
+ ret = of_dma_configure(dev->dev, dev->dev->of_node, true);
+ if (ret)
+ return ret;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ dev->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(dev->base)) {
+ dev_err(dev->dev, "Failed to map registers\n");
+
+ return PTR_ERR(dev->base);
+ }
+
+ dev->bus_clk = devm_clk_get(dev->dev, "bus");
+ if (IS_ERR(dev->bus_clk)) {
+ dev_err(dev->dev, "Failed to get bus clock\n");
+
+ return PTR_ERR(dev->bus_clk);
+ }
+
+ dev->mod_clk = devm_clk_get(dev->dev, "mod");
+ if (IS_ERR(dev->mod_clk)) {
+ dev_err(dev->dev, "Failed to get mod clock\n");
+
+ return PTR_ERR(dev->mod_clk);
+ }
+
+ dev->ram_clk = devm_clk_get(dev->dev, "ram");
+ if (IS_ERR(dev->ram_clk)) {
+ dev_err(dev->dev, "Failed to get ram clock\n");
+
+ return PTR_ERR(dev->ram_clk);
+ }
+
+ dev->rstc = devm_reset_control_get(dev->dev, NULL);
+ if (IS_ERR(dev->rstc)) {
+ dev_err(dev->dev, "Failed to get reset control\n");
+
+ return PTR_ERR(dev->rstc);
+ }
+
+ mutex_init(&dev->dev_mutex);
+
+ ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev);
+ if (ret) {
+ dev_err(dev->dev, "Failed to register V4L2 device\n");
+
+ return ret;
+ }
+
+ vfd = &dev->vfd;
+ vfd->lock = &dev->dev_mutex;
+ vfd->v4l2_dev = &dev->v4l2_dev;
+
+ snprintf(vfd->name, sizeof(vfd->name), "%s",
+ deinterlace_video_device.name);
+ video_set_drvdata(vfd, dev);
+
+ ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
+ if (ret) {
+ v4l2_err(&dev->v4l2_dev, "Failed to register video device\n");
+
+ goto err_v4l2;
+ }
+
+ v4l2_info(&dev->v4l2_dev,
+ "Device registered as /dev/video%d\n", vfd->num);
+
+ dev->m2m_dev = v4l2_m2m_init(&deinterlace_m2m_ops);
+ if (IS_ERR(dev->m2m_dev)) {
+ v4l2_err(&dev->v4l2_dev,
+ "Failed to initialize V4L2 M2M device\n");
+ ret = PTR_ERR(dev->m2m_dev);
+
+ goto err_video;
+ }
+
+ platform_set_drvdata(pdev, dev);
+
+ pm_runtime_enable(dev->dev);
+
+ return 0;
+
+err_video:
+ video_unregister_device(&dev->vfd);
+err_v4l2:
+ v4l2_device_unregister(&dev->v4l2_dev);
+
+ return ret;
+}
+
+static int deinterlace_remove(struct platform_device *pdev)
+{
+ struct deinterlace_dev *dev = platform_get_drvdata(pdev);
+
+ v4l2_m2m_release(dev->m2m_dev);
+ video_unregister_device(&dev->vfd);
+ v4l2_device_unregister(&dev->v4l2_dev);
+
+ pm_runtime_force_suspend(&pdev->dev);
+
+ return 0;
+}
+
+static int deinterlace_runtime_resume(struct device *device)
+{
+ struct deinterlace_dev *dev = dev_get_drvdata(device);
+ int ret;
+
+ ret = clk_set_rate_exclusive(dev->mod_clk, 300000000);
+ if (ret) {
+ dev_err(dev->dev, "Failed to set exclusive mod clock rate\n");
+
+ return ret;
+ }
+
+ ret = clk_prepare_enable(dev->bus_clk);
+ if (ret) {
+ dev_err(dev->dev, "Failed to enable bus clock\n");
+
+ goto err_exlusive_rate;
+ }
+
+ ret = clk_prepare_enable(dev->mod_clk);
+ if (ret) {
+ dev_err(dev->dev, "Failed to enable mod clock\n");
+
+ goto err_bus_clk;
+ }
+
+ ret = clk_prepare_enable(dev->ram_clk);
+ if (ret) {
+ dev_err(dev->dev, "Failed to enable ram clock\n");
+
+ goto err_mod_clk;
+ }
+
+ ret = reset_control_deassert(dev->rstc);
+ if (ret) {
+ dev_err(dev->dev, "Failed to apply reset\n");
+
+ goto err_ram_clk;
+ }
+
+ deinterlace_init(dev);
+
+ return 0;
+
+err_exlusive_rate:
+ clk_rate_exclusive_put(dev->mod_clk);
+err_ram_clk:
+ clk_disable_unprepare(dev->ram_clk);
+err_mod_clk:
+ clk_disable_unprepare(dev->mod_clk);
+err_bus_clk:
+ clk_disable_unprepare(dev->bus_clk);
+
+ return ret;
+}
+
+static int deinterlace_runtime_suspend(struct device *device)
+{
+ struct deinterlace_dev *dev = dev_get_drvdata(device);
+
+ reset_control_assert(dev->rstc);
+
+ clk_disable_unprepare(dev->ram_clk);
+ clk_disable_unprepare(dev->mod_clk);
+ clk_disable_unprepare(dev->bus_clk);
+ clk_rate_exclusive_put(dev->mod_clk);
+
+ return 0;
+}
+
+static const struct of_device_id deinterlace_dt_match[] = {
+ { .compatible = "allwinner,sun8i-h3-deinterlace" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, deinterlace_dt_match);
+
+static const struct dev_pm_ops deinterlace_pm_ops = {
+ .runtime_resume = deinterlace_runtime_resume,
+ .runtime_suspend = deinterlace_runtime_suspend,
+};
+
+static struct platform_driver deinterlace_driver = {
+ .probe = deinterlace_probe,
+ .remove = deinterlace_remove,
+ .driver = {
+ .name = DEINTERLACE_NAME,
+ .of_match_table = deinterlace_dt_match,
+ .pm = &deinterlace_pm_ops,
+ },
+};
+module_platform_driver(deinterlace_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Jernej Skrabec <jernej.skrabec@siol.net>");
+MODULE_DESCRIPTION("Allwinner Deinterlace driver");
diff --git a/drivers/media/platform/sunxi/sun8i-di/sun8i-di.h b/drivers/media/platform/sunxi/sun8i-di/sun8i-di.h
new file mode 100644
index 000000000000..0254251d8687
--- /dev/null
+++ b/drivers/media/platform/sunxi/sun8i-di/sun8i-di.h
@@ -0,0 +1,237 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Allwinner Deinterlace driver
+ *
+ * Copyright (C) 2019 Jernej Skrabec <jernej.skrabec@siol.net>
+ */
+
+#ifndef _SUN8I_DEINTERLACE_H_
+#define _SUN8I_DEINTERLACE_H_
+
+#include <media/v4l2-device.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/videobuf2-v4l2.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include <linux/platform_device.h>
+
+#define DEINTERLACE_NAME "sun8i-di"
+
+#define DEINTERLACE_MOD_ENABLE 0x00
+#define DEINTERLACE_MOD_ENABLE_EN BIT(0)
+
+#define DEINTERLACE_FRM_CTRL 0x04
+#define DEINTERLACE_FRM_CTRL_REG_READY BIT(0)
+#define DEINTERLACE_FRM_CTRL_WB_EN BIT(2)
+#define DEINTERLACE_FRM_CTRL_OUT_CTRL BIT(11)
+#define DEINTERLACE_FRM_CTRL_START BIT(16)
+#define DEINTERLACE_FRM_CTRL_COEF_ACCESS BIT(23)
+
+#define DEINTERLACE_BYPASS 0x08
+#define DEINTERLACE_BYPASS_CSC BIT(1)
+
+#define DEINTERLACE_AGTH_SEL 0x0c
+#define DEINTERLACE_AGTH_SEL_LINEBUF BIT(8)
+
+#define DEINTERLACE_LINT_CTRL 0x10
+#define DEINTERLACE_TRD_PRELUMA 0x1c
+#define DEINTERLACE_BUF_ADDR0 0x20
+#define DEINTERLACE_BUF_ADDR1 0x24
+#define DEINTERLACE_BUF_ADDR2 0x28
+
+#define DEINTERLACE_FIELD_CTRL 0x2c
+#define DEINTERLACE_FIELD_CTRL_FIELD_CNT(v) ((v) & 0xff)
+#define DEINTERLACE_FIELD_CTRL_FIELD_CNT_MSK (0xff)
+
+#define DEINTERLACE_TB_OFFSET0 0x30
+#define DEINTERLACE_TB_OFFSET1 0x34
+#define DEINTERLACE_TB_OFFSET2 0x38
+#define DEINTERLACE_TRD_PRECHROMA 0x3c
+#define DEINTERLACE_LINE_STRIDE0 0x40
+#define DEINTERLACE_LINE_STRIDE1 0x44
+#define DEINTERLACE_LINE_STRIDE2 0x48
+
+#define DEINTERLACE_IN_FMT 0x4c
+#define DEINTERLACE_IN_FMT_PS(v) ((v) & 3)
+#define DEINTERLACE_IN_FMT_FMT(v) (((v) & 7) << 4)
+#define DEINTERLACE_IN_FMT_MOD(v) (((v) & 7) << 8)
+
+#define DEINTERLACE_WB_ADDR0 0x50
+#define DEINTERLACE_WB_ADDR1 0x54
+#define DEINTERLACE_WB_ADDR2 0x58
+
+#define DEINTERLACE_OUT_FMT 0x5c
+#define DEINTERLACE_OUT_FMT_FMT(v) ((v) & 0xf)
+#define DEINTERLACE_OUT_FMT_PS(v) (((v) & 3) << 5)
+
+#define DEINTERLACE_INT_ENABLE 0x60
+#define DEINTERLACE_INT_ENABLE_WB_EN BIT(7)
+
+#define DEINTERLACE_INT_STATUS 0x64
+#define DEINTERLACE_INT_STATUS_WRITEBACK BIT(7)
+
+#define DEINTERLACE_STATUS 0x68
+#define DEINTERLACE_STATUS_COEF_STATUS BIT(11)
+#define DEINTERLACE_STATUS_WB_ERROR BIT(12)
+
+#define DEINTERLACE_CSC_COEF 0x70 /* 12 registers */
+
+#define DEINTERLACE_CTRL 0xa0
+#define DEINTERLACE_CTRL_EN BIT(0)
+#define DEINTERLACE_CTRL_FLAG_OUT_EN BIT(8)
+#define DEINTERLACE_CTRL_MODE_PASSTROUGH (0 << 16)
+#define DEINTERLACE_CTRL_MODE_WEAVE (1 << 16)
+#define DEINTERLACE_CTRL_MODE_BOB (2 << 16)
+#define DEINTERLACE_CTRL_MODE_MIXED (3 << 16)
+#define DEINTERLACE_CTRL_DIAG_INTP_EN BIT(24)
+#define DEINTERLACE_CTRL_TEMP_DIFF_EN BIT(25)
+
+#define DEINTERLACE_DIAG_INTP 0xa4
+#define DEINTERLACE_DIAG_INTP_TH0(v) ((v) & 0x7f)
+#define DEINTERLACE_DIAG_INTP_TH0_MSK (0x7f)
+#define DEINTERLACE_DIAG_INTP_TH1(v) (((v) & 0x7f) << 8)
+#define DEINTERLACE_DIAG_INTP_TH1_MSK (0x7f << 8)
+#define DEINTERLACE_DIAG_INTP_TH3(v) (((v) & 0xff) << 24)
+#define DEINTERLACE_DIAG_INTP_TH3_MSK (0xff << 24)
+
+#define DEINTERLACE_TEMP_DIFF 0xa8
+#define DEINTERLACE_TEMP_DIFF_SAD_CENTRAL_TH(v) ((v) & 0x7f)
+#define DEINTERLACE_TEMP_DIFF_SAD_CENTRAL_TH_MSK (0x7f)
+#define DEINTERLACE_TEMP_DIFF_AMBIGUITY_TH(v) (((v) & 0x7f) << 8)
+#define DEINTERLACE_TEMP_DIFF_AMBIGUITY_TH_MSK (0x7f << 8)
+#define DEINTERLACE_TEMP_DIFF_DIRECT_DITHER_TH(v) (((v) & 0x7ff) << 16)
+#define DEINTERLACE_TEMP_DIFF_DIRECT_DITHER_TH_MSK (0x7ff << 16)
+
+#define DEINTERLACE_LUMA_TH 0xac
+#define DEINTERLACE_LUMA_TH_MIN_LUMA(v) ((v) & 0xff)
+#define DEINTERLACE_LUMA_TH_MIN_LUMA_MSK (0xff)
+#define DEINTERLACE_LUMA_TH_MAX_LUMA(v) (((v) & 0xff) << 8)
+#define DEINTERLACE_LUMA_TH_MAX_LUMA_MSK (0xff << 8)
+#define DEINTERLACE_LUMA_TH_AVG_LUMA_SHIFT(v) (((v) & 0xff) << 16)
+#define DEINTERLACE_LUMA_TH_AVG_LUMA_SHIFT_MSK (0xff << 16)
+#define DEINTERLACE_LUMA_TH_PIXEL_STATIC(v) (((v) & 3) << 24)
+#define DEINTERLACE_LUMA_TH_PIXEL_STATIC_MSK (3 << 24)
+
+#define DEINTERLACE_SPAT_COMP 0xb0
+#define DEINTERLACE_SPAT_COMP_TH2(v) ((v) & 0xff)
+#define DEINTERLACE_SPAT_COMP_TH2_MSK (0xff)
+#define DEINTERLACE_SPAT_COMP_TH3(v) (((v) & 0xff) << 16)
+#define DEINTERLACE_SPAT_COMP_TH3_MSK (0xff << 16)
+
+#define DEINTERLACE_CHROMA_DIFF 0xb4
+#define DEINTERLACE_CHROMA_DIFF_TH(v) ((v) & 0xff)
+#define DEINTERLACE_CHROMA_DIFF_TH_MSK (0xff)
+#define DEINTERLACE_CHROMA_DIFF_LUMA(v) (((v) & 0x3f) << 16)
+#define DEINTERLACE_CHROMA_DIFF_LUMA_MSK (0x3f << 16)
+#define DEINTERLACE_CHROMA_DIFF_CHROMA(v) (((v) & 0x3f) << 24)
+#define DEINTERLACE_CHROMA_DIFF_CHROMA_MSK (0x3f << 24)
+
+#define DEINTERLACE_PRELUMA 0xb8
+#define DEINTERLACE_PRECHROMA 0xbc
+#define DEINTERLACE_TILE_FLAG0 0xc0
+#define DEINTERLACE_TILE_FLAG1 0xc4
+#define DEINTERLACE_FLAG_LINE_STRIDE 0xc8
+#define DEINTERLACE_FLAG_SEQ 0xcc
+
+#define DEINTERLACE_WB_LINE_STRIDE_CTRL 0xd0
+#define DEINTERLACE_WB_LINE_STRIDE_CTRL_EN BIT(0)
+
+#define DEINTERLACE_WB_LINE_STRIDE0 0xd4
+#define DEINTERLACE_WB_LINE_STRIDE1 0xd8
+#define DEINTERLACE_WB_LINE_STRIDE2 0xdc
+#define DEINTERLACE_TRD_CTRL 0xe0
+#define DEINTERLACE_TRD_BUF_ADDR0 0xe4
+#define DEINTERLACE_TRD_BUF_ADDR1 0xe8
+#define DEINTERLACE_TRD_BUF_ADDR2 0xec
+#define DEINTERLACE_TRD_TB_OFF0 0xf0
+#define DEINTERLACE_TRD_TB_OFF1 0xf4
+#define DEINTERLACE_TRD_TB_OFF2 0xf8
+#define DEINTERLACE_TRD_WB_STRIDE 0xfc
+#define DEINTERLACE_CH0_IN_SIZE 0x100
+#define DEINTERLACE_CH0_OUT_SIZE 0x104
+#define DEINTERLACE_CH0_HORZ_FACT 0x108
+#define DEINTERLACE_CH0_VERT_FACT 0x10c
+#define DEINTERLACE_CH0_HORZ_PHASE 0x110
+#define DEINTERLACE_CH0_VERT_PHASE0 0x114
+#define DEINTERLACE_CH0_VERT_PHASE1 0x118
+#define DEINTERLACE_CH0_HORZ_TAP0 0x120
+#define DEINTERLACE_CH0_HORZ_TAP1 0x124
+#define DEINTERLACE_CH0_VERT_TAP 0x128
+#define DEINTERLACE_CH1_IN_SIZE 0x200
+#define DEINTERLACE_CH1_OUT_SIZE 0x204
+#define DEINTERLACE_CH1_HORZ_FACT 0x208
+#define DEINTERLACE_CH1_VERT_FACT 0x20c
+#define DEINTERLACE_CH1_HORZ_PHASE 0x210
+#define DEINTERLACE_CH1_VERT_PHASE0 0x214
+#define DEINTERLACE_CH1_VERT_PHASE1 0x218
+#define DEINTERLACE_CH1_HORZ_TAP0 0x220
+#define DEINTERLACE_CH1_HORZ_TAP1 0x224
+#define DEINTERLACE_CH1_VERT_TAP 0x228
+#define DEINTERLACE_CH0_HORZ_COEF0 0x400 /* 32 registers */
+#define DEINTERLACE_CH0_HORZ_COEF1 0x480 /* 32 registers */
+#define DEINTERLACE_CH0_VERT_COEF 0x500 /* 32 registers */
+#define DEINTERLACE_CH1_HORZ_COEF0 0x600 /* 32 registers */
+#define DEINTERLACE_CH1_HORZ_COEF1 0x680 /* 32 registers */
+#define DEINTERLACE_CH1_VERT_COEF 0x700 /* 32 registers */
+#define DEINTERLACE_CH3_HORZ_COEF0 0x800 /* 32 registers */
+#define DEINTERLACE_CH3_HORZ_COEF1 0x880 /* 32 registers */
+#define DEINTERLACE_CH3_VERT_COEF 0x900 /* 32 registers */
+
+#define DEINTERLACE_MIN_WIDTH 2U
+#define DEINTERLACE_MIN_HEIGHT 2U
+#define DEINTERLACE_MAX_WIDTH 2048U
+#define DEINTERLACE_MAX_HEIGHT 1100U
+
+#define DEINTERLACE_MODE_UV_COMBINED 2
+
+#define DEINTERLACE_IN_FMT_YUV420 2
+
+#define DEINTERLACE_OUT_FMT_YUV420SP 13
+
+#define DEINTERLACE_PS_UVUV 0
+#define DEINTERLACE_PS_VUVU 1
+
+#define DEINTERLACE_IDENTITY_COEF 0x4000
+
+#define DEINTERLACE_SIZE(w, h) (((h) - 1) << 16 | ((w) - 1))
+
+struct deinterlace_ctx {
+ struct v4l2_fh fh;
+ struct deinterlace_dev *dev;
+
+ struct v4l2_pix_format src_fmt;
+ struct v4l2_pix_format dst_fmt;
+
+ void *flag1_buf;
+ dma_addr_t flag1_buf_dma;
+
+ void *flag2_buf;
+ dma_addr_t flag2_buf_dma;
+
+ struct vb2_v4l2_buffer *prev;
+
+ unsigned int first_field;
+ unsigned int field;
+
+ int aborting;
+};
+
+struct deinterlace_dev {
+ struct v4l2_device v4l2_dev;
+ struct video_device vfd;
+ struct device *dev;
+ struct v4l2_m2m_dev *m2m_dev;
+
+ /* Device file mutex */
+ struct mutex dev_mutex;
+
+ void __iomem *base;
+
+ struct clk *bus_clk;
+ struct clk *mod_clk;
+ struct clk *ram_clk;
+
+ struct reset_control *rstc;
+};
+
+#endif
diff --git a/drivers/media/platform/tegra-cec/tegra_cec.c b/drivers/media/platform/tegra-cec/tegra_cec.c
index a632602131f2..a99caac59f44 100644
--- a/drivers/media/platform/tegra-cec/tegra_cec.c
+++ b/drivers/media/platform/tegra-cec/tegra_cec.c
@@ -409,7 +409,7 @@ static int tegra_cec_probe(struct platform_device *pdev)
return 0;
err_notifier:
- cec_notifier_cec_adap_unregister(cec->notifier);
+ cec_notifier_cec_adap_unregister(cec->notifier, cec->adap);
err_adapter:
cec_delete_adapter(cec->adap);
err_clk:
@@ -423,7 +423,7 @@ static int tegra_cec_remove(struct platform_device *pdev)
clk_disable_unprepare(cec->clk);
- cec_notifier_cec_adap_unregister(cec->notifier);
+ cec_notifier_cec_adap_unregister(cec->notifier, cec->adap);
cec_unregister_adapter(cec->adap);
return 0;
diff --git a/drivers/media/platform/ti-vpe/csc.c b/drivers/media/platform/ti-vpe/csc.c
index eda2a5985da7..834114a4eebe 100644
--- a/drivers/media/platform/ti-vpe/csc.c
+++ b/drivers/media/platform/ti-vpe/csc.c
@@ -15,76 +15,96 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/videodev2.h>
+#include <media/v4l2-common.h>
#include "csc.h"
/*
- * 16 coefficients in the order:
+ * 12 coefficients in the order:
* a0, b0, c0, a1, b1, c1, a2, b2, c2, d0, d1, d2
- * (we may need to pass non-default values from user space later on, we might
- * need to make the coefficient struct more easy to populate)
*/
-struct colorspace_coeffs {
- u16 sd[12];
- u16 hd[12];
+struct quantization {
+ u16 coeff[12];
};
-/* VIDEO_RANGE: limited range, GRAPHICS_RANGE: full range */
-#define CSC_COEFFS_VIDEO_RANGE_Y2R 0
-#define CSC_COEFFS_GRAPHICS_RANGE_Y2R 1
-#define CSC_COEFFS_VIDEO_RANGE_R2Y 2
-#define CSC_COEFFS_GRAPHICS_RANGE_R2Y 3
+struct colorspace {
+ struct quantization limited;
+ struct quantization full;
+};
+
+struct encoding_direction {
+ struct colorspace r601;
+ struct colorspace r709;
+};
+
+struct csc_coeffs {
+ struct encoding_direction y2r;
+ struct encoding_direction r2y;
+};
/* default colorspace coefficients */
-static struct colorspace_coeffs colorspace_coeffs[4] = {
- [CSC_COEFFS_VIDEO_RANGE_Y2R] = {
- {
- /* SDTV */
- 0x0400, 0x0000, 0x057D, 0x0400, 0x1EA7, 0x1D35,
- 0x0400, 0x06EF, 0x1FFE, 0x0D40, 0x0210, 0x0C88,
+static struct csc_coeffs csc_coeffs = {
+ .y2r = {
+ .r601 = {
+ .limited = {
+ { /* SDTV */
+ 0x0400, 0x0000, 0x057D, 0x0400, 0x1EA7, 0x1D35,
+ 0x0400, 0x06EF, 0x1FFE, 0x0D40, 0x0210, 0x0C88,
+ }
+ },
+ .full = {
+ { /* SDTV */
+ 0x04A8, 0x1FFE, 0x0662, 0x04A8, 0x1E6F, 0x1CBF,
+ 0x04A8, 0x0812, 0x1FFF, 0x0C84, 0x0220, 0x0BAC,
+ }
+ },
},
- {
- /* HDTV */
- 0x0400, 0x0000, 0x0629, 0x0400, 0x1F45, 0x1E2B,
- 0x0400, 0x0742, 0x0000, 0x0CEC, 0x0148, 0x0C60,
+ .r709 = {
+ .limited = {
+ { /* HDTV */
+ 0x0400, 0x0000, 0x0629, 0x0400, 0x1F45, 0x1E2B,
+ 0x0400, 0x0742, 0x0000, 0x0CEC, 0x0148, 0x0C60,
+ }
+ },
+ .full = {
+ { /* HDTV */
+ 0x04A8, 0x0000, 0x072C, 0x04A8, 0x1F26, 0x1DDE,
+ 0x04A8, 0x0873, 0x0000, 0x0C20, 0x0134, 0x0B7C,
+ }
+ },
},
},
- [CSC_COEFFS_GRAPHICS_RANGE_Y2R] = {
- {
- /* SDTV */
- 0x04A8, 0x1FFE, 0x0662, 0x04A8, 0x1E6F, 0x1CBF,
- 0x04A8, 0x0812, 0x1FFF, 0x0C84, 0x0220, 0x0BAC,
+ .r2y = {
+ .r601 = {
+ .limited = {
+ { /* SDTV */
+ 0x0132, 0x0259, 0x0075, 0x1F50, 0x1EA5, 0x020B,
+ 0x020B, 0x1E4A, 0x1FAB, 0x0000, 0x0200, 0x0200,
+ }
+ },
+ .full = {
+ { /* SDTV */
+ 0x0107, 0x0204, 0x0064, 0x1F68, 0x1ED6, 0x01C2,
+ 0x01C2, 0x1E87, 0x1FB7, 0x0040, 0x0200, 0x0200,
+ }
+ },
},
- {
- /* HDTV */
- 0x04A8, 0x0000, 0x072C, 0x04A8, 0x1F26, 0x1DDE,
- 0x04A8, 0x0873, 0x0000, 0x0C20, 0x0134, 0x0B7C,
- },
- },
- [CSC_COEFFS_VIDEO_RANGE_R2Y] = {
- {
- /* SDTV */
- 0x0132, 0x0259, 0x0075, 0x1F50, 0x1EA5, 0x020B,
- 0x020B, 0x1E4A, 0x1FAB, 0x0000, 0x0200, 0x0200,
- },
- {
- /* HDTV */
- 0x00DA, 0x02DC, 0x004A, 0x1F88, 0x1E6C, 0x020C,
- 0x020C, 0x1E24, 0x1FD0, 0x0000, 0x0200, 0x0200,
- },
- },
- [CSC_COEFFS_GRAPHICS_RANGE_R2Y] = {
- {
- /* SDTV */
- 0x0107, 0x0204, 0x0064, 0x1F68, 0x1ED6, 0x01C2,
- 0x01C2, 0x1E87, 0x1FB7, 0x0040, 0x0200, 0x0200,
- },
- {
- /* HDTV */
- 0x04A8, 0x0000, 0x072C, 0x04A8, 0x1F26, 0x1DDE,
- 0x04A8, 0x0873, 0x0000, 0x0C20, 0x0134, 0x0B7C,
+ .r709 = {
+ .limited = {
+ { /* HDTV */
+ 0x00DA, 0x02DC, 0x004A, 0x1F88, 0x1E6C, 0x020C,
+ 0x020C, 0x1E24, 0x1FD0, 0x0000, 0x0200, 0x0200,
+ }
+ },
+ .full = {
+ { /* HDTV */
+ 0x00bb, 0x0275, 0x003f, 0x1f99, 0x1ea5, 0x01c2,
+ 0x01c2, 0x1e67, 0x1fd7, 0x0040, 0x0200, 0x0200,
+ }
+ },
},
},
+
};
void csc_dump_regs(struct csc_data *csc)
@@ -117,46 +137,114 @@ EXPORT_SYMBOL(csc_set_coeff_bypass);
* set the color space converter coefficient shadow register values
*/
void csc_set_coeff(struct csc_data *csc, u32 *csc_reg0,
- enum v4l2_colorspace src_colorspace,
- enum v4l2_colorspace dst_colorspace)
+ struct v4l2_format *src_fmt, struct v4l2_format *dst_fmt)
{
u32 *csc_reg5 = csc_reg0 + 5;
u32 *shadow_csc = csc_reg0;
- struct colorspace_coeffs *sd_hd_coeffs;
u16 *coeff, *end_coeff;
- enum v4l2_colorspace yuv_colorspace;
- int sel = 0;
-
- /*
- * support only graphics data range(full range) for now, a control ioctl
- * would be nice here
- */
- /* Y2R */
- if (dst_colorspace == V4L2_COLORSPACE_SRGB &&
- (src_colorspace == V4L2_COLORSPACE_SMPTE170M ||
- src_colorspace == V4L2_COLORSPACE_REC709)) {
+ const struct v4l2_pix_format *pix;
+ const struct v4l2_pix_format_mplane *mp;
+ const struct v4l2_format_info *src_finfo, *dst_finfo;
+ enum v4l2_ycbcr_encoding src_ycbcr_enc, dst_ycbcr_enc;
+ enum v4l2_quantization src_quantization, dst_quantization;
+ u32 src_pixelformat, dst_pixelformat;
+
+ switch (src_fmt->type) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ pix = &src_fmt->fmt.pix;
+ src_pixelformat = pix->pixelformat;
+ src_ycbcr_enc = pix->ycbcr_enc;
+ src_quantization = pix->quantization;
+ break;
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ default:
+ mp = &src_fmt->fmt.pix_mp;
+ src_pixelformat = mp->pixelformat;
+ src_ycbcr_enc = mp->ycbcr_enc;
+ src_quantization = mp->quantization;
+ break;
+ }
+
+ switch (dst_fmt->type) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ pix = &dst_fmt->fmt.pix;
+ dst_pixelformat = pix->pixelformat;
+ dst_ycbcr_enc = pix->ycbcr_enc;
+ dst_quantization = pix->quantization;
+ break;
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ default:
+ mp = &dst_fmt->fmt.pix_mp;
+ dst_pixelformat = mp->pixelformat;
+ dst_ycbcr_enc = mp->ycbcr_enc;
+ dst_quantization = mp->quantization;
+ break;
+ }
+
+ src_finfo = v4l2_format_info(src_pixelformat);
+ dst_finfo = v4l2_format_info(dst_pixelformat);
+
+ if (v4l2_is_format_yuv(src_finfo) &&
+ v4l2_is_format_rgb(dst_finfo)) {
/* Y2R */
- sel = 1;
- yuv_colorspace = src_colorspace;
- } else if ((dst_colorspace == V4L2_COLORSPACE_SMPTE170M ||
- dst_colorspace == V4L2_COLORSPACE_REC709) &&
- src_colorspace == V4L2_COLORSPACE_SRGB) {
+
+ /*
+ * These are not the standard default values but are
+ * set this way for historical compatibility
+ */
+ if (src_ycbcr_enc == V4L2_YCBCR_ENC_DEFAULT)
+ src_ycbcr_enc = V4L2_YCBCR_ENC_601;
+
+ if (src_quantization == V4L2_QUANTIZATION_DEFAULT)
+ src_quantization = V4L2_QUANTIZATION_FULL_RANGE;
+
+ if (src_ycbcr_enc == V4L2_YCBCR_ENC_601) {
+ if (src_quantization == V4L2_QUANTIZATION_FULL_RANGE)
+ coeff = csc_coeffs.y2r.r601.full.coeff;
+ else
+ coeff = csc_coeffs.y2r.r601.limited.coeff;
+ } else if (src_ycbcr_enc == V4L2_YCBCR_ENC_709) {
+ if (src_quantization == V4L2_QUANTIZATION_FULL_RANGE)
+ coeff = csc_coeffs.y2r.r709.full.coeff;
+ else
+ coeff = csc_coeffs.y2r.r709.limited.coeff;
+ } else {
+ /* Should never reach this, but it keeps gcc happy */
+ coeff = csc_coeffs.y2r.r601.full.coeff;
+ }
+ } else if (v4l2_is_format_rgb(src_finfo) &&
+ v4l2_is_format_yuv(dst_finfo)) {
/* R2Y */
- sel = 3;
- yuv_colorspace = dst_colorspace;
+
+ /*
+ * These are not the standard default values but are
+ * set this way for historical compatibility
+ */
+ if (dst_ycbcr_enc == V4L2_YCBCR_ENC_DEFAULT)
+ dst_ycbcr_enc = V4L2_YCBCR_ENC_601;
+
+ if (dst_quantization == V4L2_QUANTIZATION_DEFAULT)
+ dst_quantization = V4L2_QUANTIZATION_FULL_RANGE;
+
+ if (dst_ycbcr_enc == V4L2_YCBCR_ENC_601) {
+ if (dst_quantization == V4L2_QUANTIZATION_FULL_RANGE)
+ coeff = csc_coeffs.r2y.r601.full.coeff;
+ else
+ coeff = csc_coeffs.r2y.r601.limited.coeff;
+ } else if (dst_ycbcr_enc == V4L2_YCBCR_ENC_709) {
+ if (dst_quantization == V4L2_QUANTIZATION_FULL_RANGE)
+ coeff = csc_coeffs.r2y.r709.full.coeff;
+ else
+ coeff = csc_coeffs.r2y.r709.limited.coeff;
+ } else {
+ /* Should never reach this, but it keeps gcc happy */
+ coeff = csc_coeffs.r2y.r601.full.coeff;
+ }
} else {
*csc_reg5 |= CSC_BYPASS;
return;
}
- sd_hd_coeffs = &colorspace_coeffs[sel];
-
- /* select between SD or HD coefficients */
- if (yuv_colorspace == V4L2_COLORSPACE_SMPTE170M)
- coeff = sd_hd_coeffs->sd;
- else
- coeff = sd_hd_coeffs->hd;
-
end_coeff = coeff + 12;
for (; coeff < end_coeff; coeff += 2)
diff --git a/drivers/media/platform/ti-vpe/csc.h b/drivers/media/platform/ti-vpe/csc.h
index de9a58af2ca8..af2e86bccf57 100644
--- a/drivers/media/platform/ti-vpe/csc.h
+++ b/drivers/media/platform/ti-vpe/csc.h
@@ -58,8 +58,8 @@ struct csc_data {
void csc_dump_regs(struct csc_data *csc);
void csc_set_coeff_bypass(struct csc_data *csc, u32 *csc_reg5);
void csc_set_coeff(struct csc_data *csc, u32 *csc_reg0,
- enum v4l2_colorspace src_colorspace,
- enum v4l2_colorspace dst_colorspace);
+ struct v4l2_format *src_fmt, struct v4l2_format *dst_fmt);
+
struct csc_data *csc_create(struct platform_device *pdev, const char *res_name);
#endif
diff --git a/drivers/media/platform/ti-vpe/vpdma.c b/drivers/media/platform/ti-vpe/vpdma.c
index 53d27cd6e10a..2e5148ae7a0f 100644
--- a/drivers/media/platform/ti-vpe/vpdma.c
+++ b/drivers/media/platform/ti-vpe/vpdma.c
@@ -56,6 +56,11 @@ const struct vpdma_data_format vpdma_yuv_fmts[] = {
.data_type = DATA_TYPE_C420,
.depth = 4,
},
+ [VPDMA_DATA_FMT_CB420] = {
+ .type = VPDMA_DATA_FMT_TYPE_YUV,
+ .data_type = DATA_TYPE_CB420,
+ .depth = 4,
+ },
[VPDMA_DATA_FMT_YCR422] = {
.type = VPDMA_DATA_FMT_TYPE_YUV,
.data_type = DATA_TYPE_YCR422,
@@ -759,7 +764,7 @@ static void dump_dtd(struct vpdma_dtd *dtd)
pr_debug("word1: line_length = %d, xfer_height = %d\n",
dtd_get_line_length(dtd), dtd_get_xfer_height(dtd));
- pr_debug("word2: start_addr = %pad\n", &dtd->start_addr);
+ pr_debug("word2: start_addr = %x\n", dtd->start_addr);
pr_debug("word3: pkt_type = %d, mode = %d, dir = %d, chan = %d, pri = %d, next_chan = %d\n",
dtd_get_pkt_type(dtd),
@@ -825,7 +830,8 @@ void vpdma_rawchan_add_out_dtd(struct vpdma_desc_list *list, int width,
channel = next_chan = raw_vpdma_chan;
if (fmt->type == VPDMA_DATA_FMT_TYPE_YUV &&
- fmt->data_type == DATA_TYPE_C420) {
+ (fmt->data_type == DATA_TYPE_C420 ||
+ fmt->data_type == DATA_TYPE_CB420)) {
rect.height >>= 1;
rect.top >>= 1;
depth = 8;
@@ -893,7 +899,8 @@ void vpdma_add_in_dtd(struct vpdma_desc_list *list, int width,
channel = next_chan = chan_info[chan].num;
if (fmt->type == VPDMA_DATA_FMT_TYPE_YUV &&
- fmt->data_type == DATA_TYPE_C420) {
+ (fmt->data_type == DATA_TYPE_C420 ||
+ fmt->data_type == DATA_TYPE_CB420)) {
rect.height >>= 1;
rect.top >>= 1;
depth = 8;
diff --git a/drivers/media/platform/ti-vpe/vpdma.h b/drivers/media/platform/ti-vpe/vpdma.h
index 28bc94129348..393fcbb3cb40 100644
--- a/drivers/media/platform/ti-vpe/vpdma.h
+++ b/drivers/media/platform/ti-vpe/vpdma.h
@@ -57,6 +57,7 @@ struct vpdma_data_format {
* line stride of source and dest
* buffers should be 16 byte aligned
*/
+#define VPDMA_MAX_STRIDE 65520 /* Max line stride 16 byte aligned */
#define VPDMA_DTD_DESC_SIZE 32 /* 8 words */
#define VPDMA_CFD_CTD_DESC_SIZE 16 /* 4 words */
@@ -71,6 +72,7 @@ enum vpdma_yuv_formats {
VPDMA_DATA_FMT_C444,
VPDMA_DATA_FMT_C422,
VPDMA_DATA_FMT_C420,
+ VPDMA_DATA_FMT_CB420,
VPDMA_DATA_FMT_YCR422,
VPDMA_DATA_FMT_YC444,
VPDMA_DATA_FMT_CRY422,
diff --git a/drivers/media/platform/ti-vpe/vpdma_priv.h b/drivers/media/platform/ti-vpe/vpdma_priv.h
index c488609bc162..0bbee45338bd 100644
--- a/drivers/media/platform/ti-vpe/vpdma_priv.h
+++ b/drivers/media/platform/ti-vpe/vpdma_priv.h
@@ -92,6 +92,7 @@
#define DATA_TYPE_C444 0x4
#define DATA_TYPE_C422 0x5
#define DATA_TYPE_C420 0x6
+#define DATA_TYPE_CB420 0x16
#define DATA_TYPE_YC444 0x8
#define DATA_TYPE_YCB422 0x7
#define DATA_TYPE_YCR422 0x17
@@ -165,11 +166,11 @@ struct vpdma_dtd {
u32 xfer_length_height;
u32 w1;
};
- dma_addr_t start_addr;
+ u32 start_addr;
u32 pkt_ctl;
union {
u32 frame_width_height; /* inbound */
- dma_addr_t desc_write_addr; /* outbound */
+ u32 desc_write_addr; /* outbound */
};
union {
u32 start_h_v; /* inbound */
diff --git a/drivers/media/platform/ti-vpe/vpe.c b/drivers/media/platform/ti-vpe/vpe.c
index 60b575bb44c4..65c2c048b018 100644
--- a/drivers/media/platform/ti-vpe/vpe.c
+++ b/drivers/media/platform/ti-vpe/vpe.c
@@ -52,7 +52,7 @@
#define MIN_W 32
#define MIN_H 32
#define MAX_W 2048
-#define MAX_H 1184
+#define MAX_H 2048
/* required alignments */
#define S_ALIGN 0 /* multiple of 1 */
@@ -249,6 +249,14 @@ static struct vpe_fmt vpe_formats[] = {
},
},
{
+ .fourcc = V4L2_PIX_FMT_NV21,
+ .types = VPE_FMT_TYPE_CAPTURE | VPE_FMT_TYPE_OUTPUT,
+ .coplanar = 1,
+ .vpdma_fmt = { &vpdma_yuv_fmts[VPDMA_DATA_FMT_Y420],
+ &vpdma_yuv_fmts[VPDMA_DATA_FMT_CB420],
+ },
+ },
+ {
.fourcc = V4L2_PIX_FMT_YUYV,
.types = VPE_FMT_TYPE_CAPTURE | VPE_FMT_TYPE_OUTPUT,
.coplanar = 0,
@@ -311,14 +319,9 @@ static struct vpe_fmt vpe_formats[] = {
* there is one source queue and one destination queue for each m2m context.
*/
struct vpe_q_data {
- unsigned int width; /* frame width */
- unsigned int height; /* frame height */
- unsigned int nplanes; /* Current number of planes */
- unsigned int bytesperline[VPE_MAX_PLANES]; /* bytes per line in memory */
- enum v4l2_colorspace colorspace;
- enum v4l2_field field; /* supported field value */
+ /* current v4l2 format info */
+ struct v4l2_format format;
unsigned int flags;
- unsigned int sizeimage[VPE_MAX_PLANES]; /* image size in memory */
struct v4l2_rect c_rect; /* crop/compose rectangle */
struct vpe_fmt *fmt; /* format info */
};
@@ -328,9 +331,14 @@ struct vpe_q_data {
#define Q_DATA_MODE_TILED BIT(1)
#define Q_DATA_INTERLACED_ALTERNATE BIT(2)
#define Q_DATA_INTERLACED_SEQ_TB BIT(3)
+#define Q_DATA_INTERLACED_SEQ_BT BIT(4)
+
+#define Q_IS_SEQ_XX (Q_DATA_INTERLACED_SEQ_TB | \
+ Q_DATA_INTERLACED_SEQ_BT)
#define Q_IS_INTERLACED (Q_DATA_INTERLACED_ALTERNATE | \
- Q_DATA_INTERLACED_SEQ_TB)
+ Q_DATA_INTERLACED_SEQ_TB | \
+ Q_DATA_INTERLACED_SEQ_BT)
enum {
Q_DATA_SRC = 0,
@@ -338,20 +346,25 @@ enum {
};
/* find our format description corresponding to the passed v4l2_format */
-static struct vpe_fmt *find_format(struct v4l2_format *f)
+static struct vpe_fmt *__find_format(u32 fourcc)
{
struct vpe_fmt *fmt;
unsigned int k;
for (k = 0; k < ARRAY_SIZE(vpe_formats); k++) {
fmt = &vpe_formats[k];
- if (fmt->fourcc == f->fmt.pix.pixelformat)
+ if (fmt->fourcc == fourcc)
return fmt;
}
return NULL;
}
+static struct vpe_fmt *find_format(struct v4l2_format *f)
+{
+ return __find_format(f->fmt.pix.pixelformat);
+}
+
/*
* there is one vpe_dev structure in the driver, it is shared by
* all instances.
@@ -681,7 +694,8 @@ static void set_cfg_modes(struct vpe_ctx *ctx)
* Cfg Mode 1: YUV422 source, disable upsampler, DEI is de-interlacing.
*/
- if (fmt->fourcc == V4L2_PIX_FMT_NV12)
+ if (fmt->fourcc == V4L2_PIX_FMT_NV12 ||
+ fmt->fourcc == V4L2_PIX_FMT_NV21)
cfg_mode = 0;
write_field(us1_reg0, cfg_mode, VPE_US_MODE_MASK, VPE_US_MODE_SHIFT);
@@ -696,7 +710,8 @@ static void set_line_modes(struct vpe_ctx *ctx)
struct vpe_fmt *fmt = ctx->q_data[Q_DATA_SRC].fmt;
int line_mode = 1;
- if (fmt->fourcc == V4L2_PIX_FMT_NV12)
+ if (fmt->fourcc == V4L2_PIX_FMT_NV12 ||
+ fmt->fourcc == V4L2_PIX_FMT_NV21)
line_mode = 0; /* double lines to line buffer */
/* regs for now */
@@ -741,11 +756,12 @@ static void set_src_registers(struct vpe_ctx *ctx)
static void set_dst_registers(struct vpe_ctx *ctx)
{
struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr;
- enum v4l2_colorspace clrspc = ctx->q_data[Q_DATA_DST].colorspace;
struct vpe_fmt *fmt = ctx->q_data[Q_DATA_DST].fmt;
+ const struct v4l2_format_info *finfo;
u32 val = 0;
- if (clrspc == V4L2_COLORSPACE_SRGB) {
+ finfo = v4l2_format_info(fmt->fourcc);
+ if (v4l2_is_format_rgb(finfo)) {
val |= VPE_RGB_OUT_SELECT;
vpdma_set_bg_color(ctx->dev->vpdma,
(struct vpdma_data_format *)fmt->vpdma_fmt[0], 0xff);
@@ -758,7 +774,8 @@ static void set_dst_registers(struct vpe_ctx *ctx)
*/
val |= VPE_DS_SRC_DEI_SCALER | VPE_CSC_SRC_DEI_SCALER;
- if (fmt->fourcc != V4L2_PIX_FMT_NV12)
+ if (fmt->fourcc != V4L2_PIX_FMT_NV12 &&
+ fmt->fourcc != V4L2_PIX_FMT_NV21)
val |= VPE_DS_BYPASS;
mmr_adb->out_fmt_reg[0] = val;
@@ -847,11 +864,13 @@ static int set_srcdst_params(struct vpe_ctx *ctx)
unsigned int src_h = s_q_data->c_rect.height;
unsigned int dst_w = d_q_data->c_rect.width;
unsigned int dst_h = d_q_data->c_rect.height;
+ struct v4l2_pix_format_mplane *spix;
size_t mv_buf_size;
int ret;
ctx->sequence = 0;
ctx->field = V4L2_FIELD_TOP;
+ spix = &s_q_data->format.fmt.pix_mp;
if ((s_q_data->flags & Q_IS_INTERLACED) &&
!(d_q_data->flags & Q_IS_INTERLACED)) {
@@ -866,9 +885,9 @@ static int set_srcdst_params(struct vpe_ctx *ctx)
* extra space will not be used by the de-interlacer, but will
* ensure that vpdma operates correctly
*/
- bytes_per_line = ALIGN((s_q_data->width * mv->depth) >> 3,
- VPDMA_STRIDE_ALIGN);
- mv_buf_size = bytes_per_line * s_q_data->height;
+ bytes_per_line = ALIGN((spix->width * mv->depth) >> 3,
+ VPDMA_STRIDE_ALIGN);
+ mv_buf_size = bytes_per_line * spix->height;
ctx->deinterlacing = true;
src_h <<= 1;
@@ -888,7 +907,7 @@ static int set_srcdst_params(struct vpe_ctx *ctx)
set_dei_regs(ctx);
csc_set_coeff(ctx->dev->csc, &mmr_adb->csc_regs[0],
- s_q_data->colorspace, d_q_data->colorspace);
+ &s_q_data->format, &d_q_data->format);
sc_set_hs_coeffs(ctx->dev->sc, ctx->sc_coeff_h.addr, src_w, dst_w);
sc_set_vs_coeffs(ctx->dev->sc, ctx->sc_coeff_v.addr, src_h, dst_h);
@@ -901,14 +920,6 @@ static int set_srcdst_params(struct vpe_ctx *ctx)
}
/*
- * Return the vpe_ctx structure for a given struct file
- */
-static struct vpe_ctx *file2ctx(struct file *file)
-{
- return container_of(file->private_data, struct vpe_ctx, fh);
-}
-
-/*
* mem2mem callbacks
*/
@@ -1010,27 +1021,33 @@ static void add_out_dtd(struct vpe_ctx *ctx, int port)
struct vpe_fmt *fmt = q_data->fmt;
const struct vpdma_data_format *vpdma_fmt;
int mv_buf_selector = !ctx->src_mv_buf_selector;
+ struct v4l2_pix_format_mplane *pix;
dma_addr_t dma_addr;
u32 flags = 0;
u32 offset = 0;
+ u32 stride;
if (port == VPE_PORT_MV_OUT) {
vpdma_fmt = &vpdma_misc_fmts[VPDMA_DATA_FMT_MV];
dma_addr = ctx->mv_buf_dma[mv_buf_selector];
q_data = &ctx->q_data[Q_DATA_SRC];
+ pix = &q_data->format.fmt.pix_mp;
+ stride = ALIGN((pix->width * vpdma_fmt->depth) >> 3,
+ VPDMA_STRIDE_ALIGN);
} else {
/* to incorporate interleaved formats */
int plane = fmt->coplanar ? p_data->vb_part : 0;
+ pix = &q_data->format.fmt.pix_mp;
vpdma_fmt = fmt->vpdma_fmt[plane];
/*
* If we are using a single plane buffer and
* we need to set a separate vpdma chroma channel.
*/
- if (q_data->nplanes == 1 && plane) {
+ if (pix->num_planes == 1 && plane) {
dma_addr = vb2_dma_contig_plane_dma_addr(vb, 0);
/* Compute required offset */
- offset = q_data->bytesperline[0] * q_data->height;
+ offset = pix->plane_fmt[0].bytesperline * pix->height;
} else {
dma_addr = vb2_dma_contig_plane_dma_addr(vb, plane);
/* Use address as is, no offset */
@@ -1044,6 +1061,7 @@ static void add_out_dtd(struct vpe_ctx *ctx, int port)
}
/* Apply the offset */
dma_addr += offset;
+ stride = pix->plane_fmt[VPE_LUMA].bytesperline;
}
if (q_data->flags & Q_DATA_FRAME_1D)
@@ -1054,8 +1072,8 @@ static void add_out_dtd(struct vpe_ctx *ctx, int port)
vpdma_set_max_size(ctx->dev->vpdma, VPDMA_MAX_SIZE1,
MAX_W, MAX_H);
- vpdma_add_out_dtd(&ctx->desc_list, q_data->width,
- q_data->bytesperline[VPE_LUMA], &q_data->c_rect,
+ vpdma_add_out_dtd(&ctx->desc_list, pix->width,
+ stride, &q_data->c_rect,
vpdma_fmt, dma_addr, MAX_OUT_WIDTH_REG1,
MAX_OUT_HEIGHT_REG1, p_data->channel, flags);
}
@@ -1067,6 +1085,7 @@ static void add_in_dtd(struct vpe_ctx *ctx, int port)
struct vb2_buffer *vb = &ctx->src_vbs[p_data->vb_index]->vb2_buf;
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct vpe_fmt *fmt = q_data->fmt;
+ struct v4l2_pix_format_mplane *pix;
const struct vpdma_data_format *vpdma_fmt;
int mv_buf_selector = ctx->src_mv_buf_selector;
int field = vbuf->field == V4L2_FIELD_BOTTOM;
@@ -1074,10 +1093,14 @@ static void add_in_dtd(struct vpe_ctx *ctx, int port)
dma_addr_t dma_addr;
u32 flags = 0;
u32 offset = 0;
+ u32 stride;
+ pix = &q_data->format.fmt.pix_mp;
if (port == VPE_PORT_MV_IN) {
vpdma_fmt = &vpdma_misc_fmts[VPDMA_DATA_FMT_MV];
dma_addr = ctx->mv_buf_dma[mv_buf_selector];
+ stride = ALIGN((pix->width * vpdma_fmt->depth) >> 3,
+ VPDMA_STRIDE_ALIGN);
} else {
/* to incorporate interleaved formats */
int plane = fmt->coplanar ? p_data->vb_part : 0;
@@ -1087,10 +1110,10 @@ static void add_in_dtd(struct vpe_ctx *ctx, int port)
* If we are using a single plane buffer and
* we need to set a separate vpdma chroma channel.
*/
- if (q_data->nplanes == 1 && plane) {
+ if (pix->num_planes == 1 && plane) {
dma_addr = vb2_dma_contig_plane_dma_addr(vb, 0);
/* Compute required offset */
- offset = q_data->bytesperline[0] * q_data->height;
+ offset = pix->plane_fmt[0].bytesperline * pix->height;
} else {
dma_addr = vb2_dma_contig_plane_dma_addr(vb, plane);
/* Use address as is, no offset */
@@ -1104,26 +1127,39 @@ static void add_in_dtd(struct vpe_ctx *ctx, int port)
}
/* Apply the offset */
dma_addr += offset;
+ stride = pix->plane_fmt[VPE_LUMA].bytesperline;
- if (q_data->flags & Q_DATA_INTERLACED_SEQ_TB) {
- /*
- * Use top or bottom field from same vb alternately
- * f,f-1,f-2 = TBT when seq is even
- * f,f-1,f-2 = BTB when seq is odd
- */
- field = (p_data->vb_index + (ctx->sequence % 2)) % 2;
+ /*
+ * field used in VPDMA desc = 0 (top) / 1 (bottom)
+ * Use top or bottom field from same vb alternately
+ * For each de-interlacing operation, f,f-1,f-2 should be one
+ * of TBT or BTB
+ */
+ if (q_data->flags & Q_DATA_INTERLACED_SEQ_TB ||
+ q_data->flags & Q_DATA_INTERLACED_SEQ_BT) {
+ /* Select initial value based on format */
+ if (q_data->flags & Q_DATA_INTERLACED_SEQ_BT)
+ field = 1;
+ else
+ field = 0;
+
+ /* Toggle for each vb_index and each operation */
+ field = (field + p_data->vb_index + ctx->sequence) % 2;
if (field) {
- /*
- * bottom field of a SEQ_TB buffer
- * Skip the top field data by
- */
- int height = q_data->height / 2;
- int bpp = fmt->fourcc == V4L2_PIX_FMT_NV12 ?
- 1 : (vpdma_fmt->depth >> 3);
+ int height = pix->height / 2;
+ int bpp;
+
+ if (fmt->fourcc == V4L2_PIX_FMT_NV12 ||
+ fmt->fourcc == V4L2_PIX_FMT_NV21)
+ bpp = 1;
+ else
+ bpp = vpdma_fmt->depth >> 3;
+
if (plane)
height /= 2;
- dma_addr += q_data->width * height * bpp;
+
+ dma_addr += pix->width * height * bpp;
}
}
}
@@ -1136,13 +1172,14 @@ static void add_in_dtd(struct vpe_ctx *ctx, int port)
frame_width = q_data->c_rect.width;
frame_height = q_data->c_rect.height;
- if (p_data->vb_part && fmt->fourcc == V4L2_PIX_FMT_NV12)
+ if (p_data->vb_part && (fmt->fourcc == V4L2_PIX_FMT_NV12 ||
+ fmt->fourcc == V4L2_PIX_FMT_NV21))
frame_height /= 2;
- vpdma_add_in_dtd(&ctx->desc_list, q_data->width,
- q_data->bytesperline[VPE_LUMA], &q_data->c_rect,
- vpdma_fmt, dma_addr, p_data->channel, field, flags, frame_width,
- frame_height, 0, 0);
+ vpdma_add_in_dtd(&ctx->desc_list, pix->width, stride,
+ &q_data->c_rect, vpdma_fmt, dma_addr,
+ p_data->channel, field, flags, frame_width,
+ frame_height, 0, 0);
}
/*
@@ -1176,13 +1213,18 @@ static void device_run(void *priv)
struct sc_data *sc = ctx->dev->sc;
struct vpe_q_data *d_q_data = &ctx->q_data[Q_DATA_DST];
struct vpe_q_data *s_q_data = &ctx->q_data[Q_DATA_SRC];
-
- if (ctx->deinterlacing && s_q_data->flags & Q_DATA_INTERLACED_SEQ_TB &&
- ctx->sequence % 2 == 0) {
- /* When using SEQ_TB buffers, When using it first time,
- * No need to remove the buffer as the next field is present
- * in the same buffer. (so that job_ready won't fail)
- * It will be removed when using bottom field
+ const struct v4l2_format_info *d_finfo;
+
+ d_finfo = v4l2_format_info(d_q_data->fmt->fourcc);
+
+ if (ctx->deinterlacing && s_q_data->flags & Q_IS_SEQ_XX &&
+ ctx->sequence % 2 == 0) {
+ /* When using SEQ_XX type buffers, each buffer has two fields
+ * each buffer has two fields (top & bottom)
+ * Removing one buffer is actually getting two fields
+ * Alternate between two operations:-
+ * Even : consume one field but DO NOT REMOVE from queue
+ * Odd : consume other field and REMOVE from queue
*/
ctx->src_vbs[0] = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
WARN_ON(ctx->src_vbs[0] == NULL);
@@ -1246,7 +1288,7 @@ static void device_run(void *priv)
if (ctx->deinterlacing)
add_out_dtd(ctx, VPE_PORT_MV_OUT);
- if (d_q_data->colorspace == V4L2_COLORSPACE_SRGB) {
+ if (v4l2_is_format_rgb(d_finfo)) {
add_out_dtd(ctx, VPE_PORT_RGB_OUT);
} else {
add_out_dtd(ctx, VPE_PORT_LUMA_OUT);
@@ -1288,7 +1330,7 @@ static void device_run(void *priv)
}
/* sync on channel control descriptors for output ports */
- if (d_q_data->colorspace == V4L2_COLORSPACE_SRGB) {
+ if (v4l2_is_format_rgb(d_finfo)) {
vpdma_add_sync_on_channel_ctd(&ctx->desc_list,
VPE_CHAN_RGB_OUT);
} else {
@@ -1391,9 +1433,6 @@ static irqreturn_t vpe_irq(int irq_vpe, void *data)
/* the previous dst mv buffer becomes the next src mv buffer */
ctx->src_mv_buf_selector = !ctx->src_mv_buf_selector;
- if (ctx->aborting)
- goto finished;
-
s_vb = ctx->src_vbs[0];
d_vb = ctx->dst_vb;
@@ -1404,6 +1443,7 @@ static irqreturn_t vpe_irq(int irq_vpe, void *data)
d_vb->timecode = s_vb->timecode;
d_vb->sequence = ctx->sequence;
+ s_vb->sequence = ctx->sequence;
d_q_data = &ctx->q_data[Q_DATA_DST];
if (d_q_data->flags & Q_IS_INTERLACED) {
@@ -1457,6 +1497,9 @@ static irqreturn_t vpe_irq(int irq_vpe, void *data)
ctx->src_vbs[0] = NULL;
ctx->dst_vb = NULL;
+ if (ctx->aborting)
+ goto finished;
+
ctx->bufs_completed++;
if (ctx->bufs_completed < ctx->bufs_per_job && job_ready(ctx)) {
device_run(ctx);
@@ -1519,38 +1562,32 @@ static int vpe_enum_fmt(struct file *file, void *priv,
static int vpe_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
{
struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
- struct vpe_ctx *ctx = file2ctx(file);
+ struct vpe_ctx *ctx = file->private_data;
struct vb2_queue *vq;
struct vpe_q_data *q_data;
- int i;
vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
if (!vq)
return -EINVAL;
q_data = get_q_data(ctx, f->type);
+ if (!q_data)
+ return -EINVAL;
- pix->width = q_data->width;
- pix->height = q_data->height;
- pix->pixelformat = q_data->fmt->fourcc;
- pix->field = q_data->field;
+ *f = q_data->format;
- if (V4L2_TYPE_IS_OUTPUT(f->type)) {
- pix->colorspace = q_data->colorspace;
- } else {
+ if (!V4L2_TYPE_IS_OUTPUT(f->type)) {
struct vpe_q_data *s_q_data;
+ struct v4l2_pix_format_mplane *spix;
- /* get colorspace from the source queue */
+ /* get colorimetry from the source queue */
s_q_data = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ spix = &s_q_data->format.fmt.pix_mp;
- pix->colorspace = s_q_data->colorspace;
- }
-
- pix->num_planes = q_data->nplanes;
-
- for (i = 0; i < pix->num_planes; i++) {
- pix->plane_fmt[i].bytesperline = q_data->bytesperline[i];
- pix->plane_fmt[i].sizeimage = q_data->sizeimage[i];
+ pix->colorspace = spix->colorspace;
+ pix->xfer_func = spix->xfer_func;
+ pix->ycbcr_enc = spix->ycbcr_enc;
+ pix->quantization = spix->quantization;
}
return 0;
@@ -1564,15 +1601,18 @@ static int __vpe_try_fmt(struct vpe_ctx *ctx, struct v4l2_format *f,
unsigned int w_align;
int i, depth, depth_bytes, height;
unsigned int stride = 0;
+ const struct v4l2_format_info *finfo;
if (!fmt || !(fmt->types & type)) {
- vpe_err(ctx->dev, "Fourcc format (0x%08x) invalid.\n",
+ vpe_dbg(ctx->dev, "Fourcc format (0x%08x) invalid.\n",
pix->pixelformat);
- return -EINVAL;
+ fmt = __find_format(V4L2_PIX_FMT_YUYV);
}
- if (pix->field != V4L2_FIELD_NONE && pix->field != V4L2_FIELD_ALTERNATE
- && pix->field != V4L2_FIELD_SEQ_TB)
+ if (pix->field != V4L2_FIELD_NONE &&
+ pix->field != V4L2_FIELD_ALTERNATE &&
+ pix->field != V4L2_FIELD_SEQ_TB &&
+ pix->field != V4L2_FIELD_SEQ_BT)
pix->field = V4L2_FIELD_NONE;
depth = fmt->vpdma_fmt[VPE_LUMA]->depth;
@@ -1615,27 +1655,25 @@ static int __vpe_try_fmt(struct vpe_ctx *ctx, struct v4l2_format *f,
&pix->height, MIN_H, MAX_H, H_ALIGN,
S_ALIGN);
- if (!pix->num_planes)
+ if (!pix->num_planes || pix->num_planes > 2)
pix->num_planes = fmt->coplanar ? 2 : 1;
else if (pix->num_planes > 1 && !fmt->coplanar)
pix->num_planes = 1;
pix->pixelformat = fmt->fourcc;
+ finfo = v4l2_format_info(fmt->fourcc);
/*
* For the actual image parameters, we need to consider the field
- * height of the image for SEQ_TB buffers.
+ * height of the image for SEQ_XX buffers.
*/
- if (pix->field == V4L2_FIELD_SEQ_TB)
+ if (pix->field == V4L2_FIELD_SEQ_TB || pix->field == V4L2_FIELD_SEQ_BT)
height = pix->height / 2;
else
height = pix->height;
if (!pix->colorspace) {
- if (fmt->fourcc == V4L2_PIX_FMT_RGB24 ||
- fmt->fourcc == V4L2_PIX_FMT_BGR24 ||
- fmt->fourcc == V4L2_PIX_FMT_RGB32 ||
- fmt->fourcc == V4L2_PIX_FMT_BGR32) {
+ if (v4l2_is_format_rgb(finfo)) {
pix->colorspace = V4L2_COLORSPACE_SRGB;
} else {
if (height > 1280) /* HD */
@@ -1654,6 +1692,10 @@ static int __vpe_try_fmt(struct vpe_ctx *ctx, struct v4l2_format *f,
if (stride > plane_fmt->bytesperline)
plane_fmt->bytesperline = stride;
+ plane_fmt->bytesperline = clamp_t(u32, plane_fmt->bytesperline,
+ stride,
+ VPDMA_MAX_STRIDE);
+
plane_fmt->bytesperline = ALIGN(plane_fmt->bytesperline,
VPDMA_STRIDE_ALIGN);
@@ -1679,7 +1721,7 @@ static int __vpe_try_fmt(struct vpe_ctx *ctx, struct v4l2_format *f,
static int vpe_try_fmt(struct file *file, void *priv, struct v4l2_format *f)
{
- struct vpe_ctx *ctx = file2ctx(file);
+ struct vpe_ctx *ctx = file->private_data;
struct vpe_fmt *fmt = find_format(f);
if (V4L2_TYPE_IS_OUTPUT(f->type))
@@ -1691,10 +1733,9 @@ static int vpe_try_fmt(struct file *file, void *priv, struct v4l2_format *f)
static int __vpe_s_fmt(struct vpe_ctx *ctx, struct v4l2_format *f)
{
struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
- struct v4l2_plane_pix_format *plane_fmt;
+ struct v4l2_pix_format_mplane *qpix;
struct vpe_q_data *q_data;
struct vb2_queue *vq;
- int i;
vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
if (!vq)
@@ -1709,42 +1750,34 @@ static int __vpe_s_fmt(struct vpe_ctx *ctx, struct v4l2_format *f)
if (!q_data)
return -EINVAL;
+ qpix = &q_data->format.fmt.pix_mp;
q_data->fmt = find_format(f);
- q_data->width = pix->width;
- q_data->height = pix->height;
- q_data->colorspace = pix->colorspace;
- q_data->field = pix->field;
- q_data->nplanes = pix->num_planes;
-
- for (i = 0; i < pix->num_planes; i++) {
- plane_fmt = &pix->plane_fmt[i];
-
- q_data->bytesperline[i] = plane_fmt->bytesperline;
- q_data->sizeimage[i] = plane_fmt->sizeimage;
- }
+ q_data->format = *f;
q_data->c_rect.left = 0;
q_data->c_rect.top = 0;
- q_data->c_rect.width = q_data->width;
- q_data->c_rect.height = q_data->height;
+ q_data->c_rect.width = pix->width;
+ q_data->c_rect.height = pix->height;
- if (q_data->field == V4L2_FIELD_ALTERNATE)
+ if (qpix->field == V4L2_FIELD_ALTERNATE)
q_data->flags |= Q_DATA_INTERLACED_ALTERNATE;
- else if (q_data->field == V4L2_FIELD_SEQ_TB)
+ else if (qpix->field == V4L2_FIELD_SEQ_TB)
q_data->flags |= Q_DATA_INTERLACED_SEQ_TB;
+ else if (qpix->field == V4L2_FIELD_SEQ_BT)
+ q_data->flags |= Q_DATA_INTERLACED_SEQ_BT;
else
q_data->flags &= ~Q_IS_INTERLACED;
- /* the crop height is halved for the case of SEQ_TB buffers */
- if (q_data->flags & Q_DATA_INTERLACED_SEQ_TB)
+ /* the crop height is halved for the case of SEQ_XX buffers */
+ if (q_data->flags & Q_IS_SEQ_XX)
q_data->c_rect.height /= 2;
vpe_dbg(ctx->dev, "Setting format for type %d, wxh: %dx%d, fmt: %d bpl_y %d",
- f->type, q_data->width, q_data->height, q_data->fmt->fourcc,
- q_data->bytesperline[VPE_LUMA]);
- if (q_data->nplanes == 2)
+ f->type, pix->width, pix->height, pix->pixelformat,
+ pix->plane_fmt[0].bytesperline);
+ if (pix->num_planes == 2)
vpe_dbg(ctx->dev, " bpl_uv %d\n",
- q_data->bytesperline[VPE_CHROMA]);
+ pix->plane_fmt[1].bytesperline);
return 0;
}
@@ -1752,7 +1785,7 @@ static int __vpe_s_fmt(struct vpe_ctx *ctx, struct v4l2_format *f)
static int vpe_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
{
int ret;
- struct vpe_ctx *ctx = file2ctx(file);
+ struct vpe_ctx *ctx = file->private_data;
ret = vpe_try_fmt(file, priv, f);
if (ret)
@@ -1773,6 +1806,7 @@ static int vpe_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
static int __vpe_try_selection(struct vpe_ctx *ctx, struct v4l2_selection *s)
{
struct vpe_q_data *q_data;
+ struct v4l2_pix_format_mplane *pix;
int height;
if ((s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) &&
@@ -1783,6 +1817,8 @@ static int __vpe_try_selection(struct vpe_ctx *ctx, struct v4l2_selection *s)
if (!q_data)
return -EINVAL;
+ pix = &q_data->format.fmt.pix_mp;
+
switch (s->target) {
case V4L2_SEL_TGT_COMPOSE:
/*
@@ -1809,27 +1845,27 @@ static int __vpe_try_selection(struct vpe_ctx *ctx, struct v4l2_selection *s)
}
/*
- * For SEQ_TB buffers, crop height should be less than the height of
+ * For SEQ_XX buffers, crop height should be less than the height of
* the field height, not the buffer height
*/
- if (q_data->flags & Q_DATA_INTERLACED_SEQ_TB)
- height = q_data->height / 2;
+ if (q_data->flags & Q_IS_SEQ_XX)
+ height = pix->height / 2;
else
- height = q_data->height;
+ height = pix->height;
if (s->r.top < 0 || s->r.left < 0) {
vpe_err(ctx->dev, "negative values for top and left\n");
s->r.top = s->r.left = 0;
}
- v4l_bound_align_image(&s->r.width, MIN_W, q_data->width, 1,
+ v4l_bound_align_image(&s->r.width, MIN_W, pix->width, 1,
&s->r.height, MIN_H, height, H_ALIGN, S_ALIGN);
/* adjust left/top if cropping rectangle is out of bounds */
- if (s->r.left + s->r.width > q_data->width)
- s->r.left = q_data->width - s->r.width;
- if (s->r.top + s->r.height > q_data->height)
- s->r.top = q_data->height - s->r.height;
+ if (s->r.left + s->r.width > pix->width)
+ s->r.left = pix->width - s->r.width;
+ if (s->r.top + s->r.height > pix->height)
+ s->r.top = pix->height - s->r.height;
return 0;
}
@@ -1837,8 +1873,9 @@ static int __vpe_try_selection(struct vpe_ctx *ctx, struct v4l2_selection *s)
static int vpe_g_selection(struct file *file, void *fh,
struct v4l2_selection *s)
{
- struct vpe_ctx *ctx = file2ctx(file);
+ struct vpe_ctx *ctx = file->private_data;
struct vpe_q_data *q_data;
+ struct v4l2_pix_format_mplane *pix;
bool use_c_rect = false;
if ((s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) &&
@@ -1849,6 +1886,8 @@ static int vpe_g_selection(struct file *file, void *fh,
if (!q_data)
return -EINVAL;
+ pix = &q_data->format.fmt.pix_mp;
+
switch (s->target) {
case V4L2_SEL_TGT_COMPOSE_DEFAULT:
case V4L2_SEL_TGT_COMPOSE_BOUNDS:
@@ -1887,8 +1926,8 @@ static int vpe_g_selection(struct file *file, void *fh,
*/
s->r.left = 0;
s->r.top = 0;
- s->r.width = q_data->width;
- s->r.height = q_data->height;
+ s->r.width = pix->width;
+ s->r.height = pix->height;
}
return 0;
@@ -1898,7 +1937,7 @@ static int vpe_g_selection(struct file *file, void *fh,
static int vpe_s_selection(struct file *file, void *fh,
struct v4l2_selection *s)
{
- struct vpe_ctx *ctx = file2ctx(file);
+ struct vpe_ctx *ctx = file->private_data;
struct vpe_q_data *q_data;
struct v4l2_selection sel = *s;
int ret;
@@ -1991,17 +2030,21 @@ static int vpe_queue_setup(struct vb2_queue *vq,
int i;
struct vpe_ctx *ctx = vb2_get_drv_priv(vq);
struct vpe_q_data *q_data;
+ struct v4l2_pix_format_mplane *pix;
q_data = get_q_data(ctx, vq->type);
+ if (!q_data)
+ return -EINVAL;
- *nplanes = q_data->nplanes;
+ pix = &q_data->format.fmt.pix_mp;
+ *nplanes = pix->num_planes;
for (i = 0; i < *nplanes; i++)
- sizes[i] = q_data->sizeimage[i];
+ sizes[i] = pix->plane_fmt[i].sizeimage;
vpe_dbg(ctx->dev, "get %d buffer(s) of size %d", *nbuffers,
sizes[VPE_LUMA]);
- if (q_data->nplanes == 2)
+ if (*nplanes == 2)
vpe_dbg(ctx->dev, " and %d\n", sizes[VPE_CHROMA]);
return 0;
@@ -2012,12 +2055,16 @@ static int vpe_buf_prepare(struct vb2_buffer *vb)
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct vpe_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
struct vpe_q_data *q_data;
- int i, num_planes;
+ struct v4l2_pix_format_mplane *pix;
+ int i;
vpe_dbg(ctx->dev, "type: %d\n", vb->vb2_queue->type);
q_data = get_q_data(ctx, vb->vb2_queue->type);
- num_planes = q_data->nplanes;
+ if (!q_data)
+ return -EINVAL;
+
+ pix = &q_data->format.fmt.pix_mp;
if (vb->vb2_queue->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
if (!(q_data->flags & Q_IS_INTERLACED)) {
@@ -2025,23 +2072,24 @@ static int vpe_buf_prepare(struct vb2_buffer *vb)
} else {
if (vbuf->field != V4L2_FIELD_TOP &&
vbuf->field != V4L2_FIELD_BOTTOM &&
- vbuf->field != V4L2_FIELD_SEQ_TB)
+ vbuf->field != V4L2_FIELD_SEQ_TB &&
+ vbuf->field != V4L2_FIELD_SEQ_BT)
return -EINVAL;
}
}
- for (i = 0; i < num_planes; i++) {
- if (vb2_plane_size(vb, i) < q_data->sizeimage[i]) {
+ for (i = 0; i < pix->num_planes; i++) {
+ if (vb2_plane_size(vb, i) < pix->plane_fmt[i].sizeimage) {
vpe_err(ctx->dev,
"data will not fit into plane (%lu < %lu)\n",
vb2_plane_size(vb, i),
- (long) q_data->sizeimage[i]);
+ (long)pix->plane_fmt[i].sizeimage);
return -EINVAL;
}
}
- for (i = 0; i < num_planes; i++)
- vb2_set_plane_payload(vb, i, q_data->sizeimage[i]);
+ for (i = 0; i < pix->num_planes; i++)
+ vb2_set_plane_payload(vb, i, pix->plane_fmt[i].sizeimage);
return 0;
}
@@ -2226,6 +2274,7 @@ static int vpe_open(struct file *file)
struct vpe_q_data *s_q_data;
struct v4l2_ctrl_handler *hdl;
struct vpe_ctx *ctx;
+ struct v4l2_pix_format_mplane *pix;
int ret;
vpe_dbg(dev, "vpe_open\n");
@@ -2261,7 +2310,7 @@ static int vpe_open(struct file *file)
init_adb_hdrs(ctx);
v4l2_fh_init(&ctx->fh, video_devdata(file));
- file->private_data = &ctx->fh;
+ file->private_data = ctx;
hdl = &ctx->hdl;
v4l2_ctrl_handler_init(hdl, 1);
@@ -2274,23 +2323,32 @@ static int vpe_open(struct file *file)
v4l2_ctrl_handler_setup(hdl);
s_q_data = &ctx->q_data[Q_DATA_SRC];
- s_q_data->fmt = &vpe_formats[2];
- s_q_data->width = 1920;
- s_q_data->height = 1080;
- s_q_data->nplanes = 1;
- s_q_data->bytesperline[VPE_LUMA] = (s_q_data->width *
+ pix = &s_q_data->format.fmt.pix_mp;
+ s_q_data->fmt = __find_format(V4L2_PIX_FMT_YUYV);
+ pix->pixelformat = s_q_data->fmt->fourcc;
+ s_q_data->format.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ pix->width = 1920;
+ pix->height = 1080;
+ pix->num_planes = 1;
+ pix->plane_fmt[VPE_LUMA].bytesperline = (pix->width *
s_q_data->fmt->vpdma_fmt[VPE_LUMA]->depth) >> 3;
- s_q_data->sizeimage[VPE_LUMA] = (s_q_data->bytesperline[VPE_LUMA] *
- s_q_data->height);
- s_q_data->colorspace = V4L2_COLORSPACE_REC709;
- s_q_data->field = V4L2_FIELD_NONE;
+ pix->plane_fmt[VPE_LUMA].sizeimage =
+ pix->plane_fmt[VPE_LUMA].bytesperline *
+ pix->height;
+ pix->colorspace = V4L2_COLORSPACE_REC709;
+ pix->xfer_func = V4L2_XFER_FUNC_DEFAULT;
+ pix->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+ pix->quantization = V4L2_QUANTIZATION_DEFAULT;
+ pix->field = V4L2_FIELD_NONE;
s_q_data->c_rect.left = 0;
s_q_data->c_rect.top = 0;
- s_q_data->c_rect.width = s_q_data->width;
- s_q_data->c_rect.height = s_q_data->height;
+ s_q_data->c_rect.width = pix->width;
+ s_q_data->c_rect.height = pix->height;
s_q_data->flags = 0;
ctx->q_data[Q_DATA_DST] = *s_q_data;
+ ctx->q_data[Q_DATA_DST].format.type =
+ V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
set_dei_shadow_registers(ctx);
set_src_registers(ctx);
@@ -2346,12 +2404,18 @@ free_ctx:
static int vpe_release(struct file *file)
{
struct vpe_dev *dev = video_drvdata(file);
- struct vpe_ctx *ctx = file2ctx(file);
+ struct vpe_ctx *ctx = file->private_data;
vpe_dbg(dev, "releasing instance %p\n", ctx);
mutex_lock(&dev->dev_mutex);
free_mv_buffers(ctx);
+
+ vpdma_unmap_desc_buf(dev->vpdma, &ctx->desc_list.buf);
+ vpdma_unmap_desc_buf(dev->vpdma, &ctx->mmr_adb);
+ vpdma_unmap_desc_buf(dev->vpdma, &ctx->sc_coeff_h);
+ vpdma_unmap_desc_buf(dev->vpdma, &ctx->sc_coeff_v);
+
vpdma_free_desc_list(&ctx->desc_list);
vpdma_free_desc_buf(&ctx->mmr_adb);
@@ -2459,6 +2523,13 @@ static int vpe_probe(struct platform_device *pdev)
struct vpe_dev *dev;
int ret, irq, func;
+ ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(&pdev->dev,
+ "32-bit consistent DMA enable failed\n");
+ return ret;
+ }
+
dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
if (!dev)
return -ENOMEM;
@@ -2473,7 +2544,12 @@ static int vpe_probe(struct platform_device *pdev)
mutex_init(&dev->dev_mutex);
dev->res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- "vpe_top");
+ "vpe_top");
+ if (!dev->res) {
+ dev_err(&pdev->dev, "missing 'vpe_top' resources data\n");
+ return -ENODEV;
+ }
+
/*
* HACK: we get resource info from device tree in the form of a list of
* VPE sub blocks, the driver currently uses only the base of vpe_top
@@ -2568,7 +2644,7 @@ static int vpe_remove(struct platform_device *pdev)
#if defined(CONFIG_OF)
static const struct of_device_id vpe_of_match[] = {
{
- .compatible = "ti,vpe",
+ .compatible = "ti,dra7-vpe",
},
{},
};
diff --git a/drivers/media/platform/vicodec/vicodec-core.c b/drivers/media/platform/vicodec/vicodec-core.c
index 0ee143ae0f6b..82350097503e 100644
--- a/drivers/media/platform/vicodec/vicodec-core.c
+++ b/drivers/media/platform/vicodec/vicodec-core.c
@@ -2139,6 +2139,9 @@ static void vicodec_v4l2_dev_release(struct v4l2_device *v4l2_dev)
v4l2_m2m_release(dev->stateful_enc.m2m_dev);
v4l2_m2m_release(dev->stateful_dec.m2m_dev);
v4l2_m2m_release(dev->stateless_dec.m2m_dev);
+#ifdef CONFIG_MEDIA_CONTROLLER
+ media_device_cleanup(&dev->mdev);
+#endif
kfree(dev);
}
@@ -2250,7 +2253,6 @@ static int vicodec_remove(struct platform_device *pdev)
v4l2_m2m_unregister_media_controller(dev->stateful_enc.m2m_dev);
v4l2_m2m_unregister_media_controller(dev->stateful_dec.m2m_dev);
v4l2_m2m_unregister_media_controller(dev->stateless_dec.m2m_dev);
- media_device_cleanup(&dev->mdev);
#endif
video_unregister_device(&dev->stateful_enc.vfd);
diff --git a/drivers/media/platform/vim2m.c b/drivers/media/platform/vim2m.c
index acd3bd48c7e2..8d6b09623d88 100644
--- a/drivers/media/platform/vim2m.c
+++ b/drivers/media/platform/vim2m.c
@@ -1073,6 +1073,9 @@ static int vim2m_start_streaming(struct vb2_queue *q, unsigned int count)
if (!q_data)
return -EINVAL;
+ if (V4L2_TYPE_IS_OUTPUT(q->type))
+ ctx->aborting = 0;
+
q_data->sequence = 0;
return 0;
}
@@ -1272,6 +1275,9 @@ static void vim2m_device_release(struct video_device *vdev)
v4l2_device_unregister(&dev->v4l2_dev);
v4l2_m2m_release(dev->m2m_dev);
+#ifdef CONFIG_MEDIA_CONTROLLER
+ media_device_cleanup(&dev->mdev);
+#endif
kfree(dev);
}
@@ -1343,6 +1349,7 @@ static int vim2m_probe(struct platform_device *pdev)
if (IS_ERR(dev->m2m_dev)) {
v4l2_err(&dev->v4l2_dev, "Failed to init mem2mem device\n");
ret = PTR_ERR(dev->m2m_dev);
+ dev->m2m_dev = NULL;
goto error_dev;
}
@@ -1395,7 +1402,6 @@ static int vim2m_remove(struct platform_device *pdev)
#ifdef CONFIG_MEDIA_CONTROLLER
media_device_unregister(&dev->mdev);
v4l2_m2m_unregister_media_controller(dev->m2m_dev);
- media_device_cleanup(&dev->mdev);
#endif
video_unregister_device(&dev->vfd);
diff --git a/drivers/media/platform/vimc/Makefile b/drivers/media/platform/vimc/Makefile
index 96d06f030c31..a53b2b532e9f 100644
--- a/drivers/media/platform/vimc/Makefile
+++ b/drivers/media/platform/vimc/Makefile
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
-vimc-y := vimc-core.o vimc-common.o vimc-streamer.o
+vimc-y := vimc-core.o vimc-common.o vimc-streamer.o vimc-capture.o \
+ vimc-debayer.o vimc-scaler.o vimc-sensor.o
+
+obj-$(CONFIG_VIDEO_VIMC) += vimc.o
-obj-$(CONFIG_VIDEO_VIMC) += vimc.o vimc-capture.o vimc-debayer.o \
- vimc-scaler.o vimc-sensor.o
diff --git a/drivers/media/platform/vimc/vimc-capture.c b/drivers/media/platform/vimc/vimc-capture.c
index 1d56b91830ba..76c015898cfd 100644
--- a/drivers/media/platform/vimc/vimc-capture.c
+++ b/drivers/media/platform/vimc/vimc-capture.c
@@ -5,10 +5,6 @@
* Copyright (C) 2015-2017 Helen Koike <helen.fornazier@gmail.com>
*/
-#include <linux/component.h>
-#include <linux/module.h>
-#include <linux/mod_devicetable.h>
-#include <linux/platform_device.h>
#include <media/v4l2-ioctl.h>
#include <media/videobuf2-core.h>
#include <media/videobuf2-vmalloc.h>
@@ -16,12 +12,9 @@
#include "vimc-common.h"
#include "vimc-streamer.h"
-#define VIMC_CAP_DRV_NAME "vimc-capture"
-
struct vimc_cap_device {
struct vimc_ent_device ved;
struct video_device vdev;
- struct device *dev;
struct v4l2_pix_format format;
struct vb2_queue queue;
struct list_head buf_list;
@@ -36,6 +29,7 @@ struct vimc_cap_device {
struct mutex lock;
u32 sequence;
struct vimc_stream stream;
+ struct media_pad pad;
};
static const struct v4l2_pix_format fmt_default = {
@@ -130,7 +124,7 @@ static int vimc_cap_s_fmt_vid_cap(struct file *file, void *priv,
if (ret)
return ret;
- dev_dbg(vcap->dev, "%s: format update: "
+ dev_dbg(vcap->ved.dev, "%s: format update: "
"old:%dx%d (0x%x, %d, %d, %d, %d) "
"new:%dx%d (0x%x, %d, %d, %d, %d)\n", vcap->vdev.name,
/* old */
@@ -306,7 +300,7 @@ static int vimc_cap_buffer_prepare(struct vb2_buffer *vb)
unsigned long size = vcap->format.sizeimage;
if (vb2_plane_size(vb, 0) < size) {
- dev_err(vcap->dev, "%s: buffer too small (%lu < %lu)\n",
+ dev_err(vcap->ved.dev, "%s: buffer too small (%lu < %lu)\n",
vcap->vdev.name, vb2_plane_size(vb, 0), size);
return -EINVAL;
}
@@ -328,7 +322,7 @@ static const struct vb2_ops vimc_cap_qops = {
};
static const struct media_entity_operations vimc_cap_mops = {
- .link_validate = vimc_link_validate,
+ .link_validate = vimc_vdev_link_validate,
};
static void vimc_cap_release(struct video_device *vdev)
@@ -336,19 +330,16 @@ static void vimc_cap_release(struct video_device *vdev)
struct vimc_cap_device *vcap =
container_of(vdev, struct vimc_cap_device, vdev);
- vimc_pads_cleanup(vcap->ved.pads);
+ media_entity_cleanup(vcap->ved.ent);
kfree(vcap);
}
-static void vimc_cap_comp_unbind(struct device *comp, struct device *master,
- void *master_data)
+void vimc_cap_rm(struct vimc_device *vimc, struct vimc_ent_device *ved)
{
- struct vimc_ent_device *ved = dev_get_drvdata(comp);
- struct vimc_cap_device *vcap = container_of(ved, struct vimc_cap_device,
- ved);
+ struct vimc_cap_device *vcap;
+ vcap = container_of(ved, struct vimc_cap_device, ved);
vb2_queue_release(&vcap->queue);
- media_entity_cleanup(ved->ent);
video_unregister_device(&vcap->vdev);
}
@@ -391,11 +382,10 @@ static void *vimc_cap_process_frame(struct vimc_ent_device *ved,
return NULL;
}
-static int vimc_cap_comp_bind(struct device *comp, struct device *master,
- void *master_data)
+struct vimc_ent_device *vimc_cap_add(struct vimc_device *vimc,
+ const char *vcfg_name)
{
- struct v4l2_device *v4l2_dev = master_data;
- struct vimc_platform_data *pdata = comp->platform_data;
+ struct v4l2_device *v4l2_dev = &vimc->v4l2_dev;
const struct vimc_pix_map *vpix;
struct vimc_cap_device *vcap;
struct video_device *vdev;
@@ -405,23 +395,16 @@ static int vimc_cap_comp_bind(struct device *comp, struct device *master,
/* Allocate the vimc_cap_device struct */
vcap = kzalloc(sizeof(*vcap), GFP_KERNEL);
if (!vcap)
- return -ENOMEM;
-
- /* Allocate the pads */
- vcap->ved.pads =
- vimc_pads_init(1, (const unsigned long[1]) {MEDIA_PAD_FL_SINK});
- if (IS_ERR(vcap->ved.pads)) {
- ret = PTR_ERR(vcap->ved.pads);
- goto err_free_vcap;
- }
+ return NULL;
/* Initialize the media entity */
- vcap->vdev.entity.name = pdata->entity_name;
+ vcap->vdev.entity.name = vcfg_name;
vcap->vdev.entity.function = MEDIA_ENT_F_IO_V4L;
+ vcap->pad.flags = MEDIA_PAD_FL_SINK;
ret = media_entity_pads_init(&vcap->vdev.entity,
- 1, vcap->ved.pads);
+ 1, &vcap->pad);
if (ret)
- goto err_clean_pads;
+ goto err_free_vcap;
/* Initialize the lock */
mutex_init(&vcap->lock);
@@ -440,8 +423,8 @@ static int vimc_cap_comp_bind(struct device *comp, struct device *master,
ret = vb2_queue_init(q);
if (ret) {
- dev_err(comp, "%s: vb2 queue init failed (err=%d)\n",
- pdata->entity_name, ret);
+ dev_err(&vimc->pdev.dev, "%s: vb2 queue init failed (err=%d)\n",
+ vcfg_name, ret);
goto err_clean_m_ent;
}
@@ -460,8 +443,7 @@ static int vimc_cap_comp_bind(struct device *comp, struct device *master,
vcap->ved.ent = &vcap->vdev.entity;
vcap->ved.process_frame = vimc_cap_process_frame;
vcap->ved.vdev_get_format = vimc_cap_get_format;
- dev_set_drvdata(comp, &vcap->ved);
- vcap->dev = comp;
+ vcap->ved.dev = &vimc->pdev.dev;
/* Initialize the video_device struct */
vdev = &vcap->vdev;
@@ -474,68 +456,25 @@ static int vimc_cap_comp_bind(struct device *comp, struct device *master,
vdev->queue = q;
vdev->v4l2_dev = v4l2_dev;
vdev->vfl_dir = VFL_DIR_RX;
- strscpy(vdev->name, pdata->entity_name, sizeof(vdev->name));
+ strscpy(vdev->name, vcfg_name, sizeof(vdev->name));
video_set_drvdata(vdev, &vcap->ved);
/* Register the video_device with the v4l2 and the media framework */
ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
if (ret) {
- dev_err(comp, "%s: video register failed (err=%d)\n",
+ dev_err(&vimc->pdev.dev, "%s: video register failed (err=%d)\n",
vcap->vdev.name, ret);
goto err_release_queue;
}
- return 0;
+ return &vcap->ved;
err_release_queue:
vb2_queue_release(q);
err_clean_m_ent:
media_entity_cleanup(&vcap->vdev.entity);
-err_clean_pads:
- vimc_pads_cleanup(vcap->ved.pads);
err_free_vcap:
kfree(vcap);
- return ret;
-}
-
-static const struct component_ops vimc_cap_comp_ops = {
- .bind = vimc_cap_comp_bind,
- .unbind = vimc_cap_comp_unbind,
-};
-
-static int vimc_cap_probe(struct platform_device *pdev)
-{
- return component_add(&pdev->dev, &vimc_cap_comp_ops);
-}
-
-static int vimc_cap_remove(struct platform_device *pdev)
-{
- component_del(&pdev->dev, &vimc_cap_comp_ops);
-
- return 0;
+ return NULL;
}
-
-static const struct platform_device_id vimc_cap_driver_ids[] = {
- {
- .name = VIMC_CAP_DRV_NAME,
- },
- { }
-};
-
-static struct platform_driver vimc_cap_pdrv = {
- .probe = vimc_cap_probe,
- .remove = vimc_cap_remove,
- .id_table = vimc_cap_driver_ids,
- .driver = {
- .name = VIMC_CAP_DRV_NAME,
- },
-};
-
-module_platform_driver(vimc_cap_pdrv);
-
-MODULE_DEVICE_TABLE(platform, vimc_cap_driver_ids);
-
-MODULE_DESCRIPTION("Virtual Media Controller Driver (VIMC) Capture");
-MODULE_AUTHOR("Helen Mae Koike Fornazier <helen.fornazier@gmail.com>");
-MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/vimc/vimc-common.c b/drivers/media/platform/vimc/vimc-common.c
index 7e1ae0b12f1e..16ce9f3b7c75 100644
--- a/drivers/media/platform/vimc/vimc-common.c
+++ b/drivers/media/platform/vimc/vimc-common.c
@@ -164,6 +164,16 @@ static const struct vimc_pix_map vimc_pix_map_list[] = {
},
};
+bool vimc_is_source(struct media_entity *ent)
+{
+ unsigned int i;
+
+ for (i = 0; i < ent->num_pads; i++)
+ if (ent->pads[i].flags & MEDIA_PAD_FL_SINK)
+ return false;
+ return true;
+}
+
const struct vimc_pix_map *vimc_pix_map_by_index(unsigned int i)
{
if (i >= ARRAY_SIZE(vimc_pix_map_list))
@@ -171,7 +181,6 @@ const struct vimc_pix_map *vimc_pix_map_by_index(unsigned int i)
return &vimc_pix_map_list[i];
}
-EXPORT_SYMBOL_GPL(vimc_pix_map_by_index);
const struct vimc_pix_map *vimc_pix_map_by_code(u32 code)
{
@@ -183,7 +192,6 @@ const struct vimc_pix_map *vimc_pix_map_by_code(u32 code)
}
return NULL;
}
-EXPORT_SYMBOL_GPL(vimc_pix_map_by_code);
const struct vimc_pix_map *vimc_pix_map_by_pixelformat(u32 pixelformat)
{
@@ -195,87 +203,37 @@ const struct vimc_pix_map *vimc_pix_map_by_pixelformat(u32 pixelformat)
}
return NULL;
}
-EXPORT_SYMBOL_GPL(vimc_pix_map_by_pixelformat);
-
-/* Helper function to allocate and initialize pads */
-struct media_pad *vimc_pads_init(u16 num_pads, const unsigned long *pads_flag)
-{
- struct media_pad *pads;
- unsigned int i;
-
- /* Allocate memory for the pads */
- pads = kcalloc(num_pads, sizeof(*pads), GFP_KERNEL);
- if (!pads)
- return ERR_PTR(-ENOMEM);
-
- /* Initialize the pads */
- for (i = 0; i < num_pads; i++) {
- pads[i].index = i;
- pads[i].flags = pads_flag[i];
- }
-
- return pads;
-}
-EXPORT_SYMBOL_GPL(vimc_pads_init);
-int vimc_pipeline_s_stream(struct media_entity *ent, int enable)
-{
- struct v4l2_subdev *sd;
- struct media_pad *pad;
- unsigned int i;
- int ret;
-
- for (i = 0; i < ent->num_pads; i++) {
- if (ent->pads[i].flags & MEDIA_PAD_FL_SOURCE)
- continue;
-
- /* Start the stream in the subdevice direct connected */
- pad = media_entity_remote_pad(&ent->pads[i]);
- if (!pad)
- continue;
-
- if (!is_media_entity_v4l2_subdev(pad->entity))
- return -EINVAL;
-
- sd = media_entity_to_v4l2_subdev(pad->entity);
- ret = v4l2_subdev_call(sd, video, s_stream, enable);
- if (ret && ret != -ENOIOCTLCMD)
- return ret;
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(vimc_pipeline_s_stream);
-
-static int vimc_get_mbus_format(struct media_pad *pad,
- struct v4l2_subdev_format *fmt)
+static int vimc_get_pix_format(struct media_pad *pad,
+ struct v4l2_pix_format *fmt)
{
if (is_media_entity_v4l2_subdev(pad->entity)) {
struct v4l2_subdev *sd =
media_entity_to_v4l2_subdev(pad->entity);
+ struct v4l2_subdev_format sd_fmt;
+ const struct vimc_pix_map *pix_map;
int ret;
- fmt->which = V4L2_SUBDEV_FORMAT_ACTIVE;
- fmt->pad = pad->index;
+ sd_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ sd_fmt.pad = pad->index;
- ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, fmt);
+ ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &sd_fmt);
if (ret)
return ret;
+ v4l2_fill_pix_format(fmt, &sd_fmt.format);
+ pix_map = vimc_pix_map_by_code(sd_fmt.format.code);
+ fmt->pixelformat = pix_map->pixelformat;
} else if (is_media_entity_v4l2_video_device(pad->entity)) {
struct video_device *vdev = container_of(pad->entity,
struct video_device,
entity);
struct vimc_ent_device *ved = video_get_drvdata(vdev);
- const struct vimc_pix_map *vpix;
- struct v4l2_pix_format vdev_fmt;
if (!ved->vdev_get_format)
return -ENOIOCTLCMD;
- ved->vdev_get_format(ved, &vdev_fmt);
- vpix = vimc_pix_map_by_pixelformat(vdev_fmt.pixelformat);
- v4l2_fill_mbus_format(&fmt->format, &vdev_fmt, vpix->code);
+ ved->vdev_get_format(ved, fmt);
} else {
return -EINVAL;
}
@@ -283,16 +241,16 @@ static int vimc_get_mbus_format(struct media_pad *pad,
return 0;
}
-int vimc_link_validate(struct media_link *link)
+int vimc_vdev_link_validate(struct media_link *link)
{
- struct v4l2_subdev_format source_fmt, sink_fmt;
+ struct v4l2_pix_format source_fmt, sink_fmt;
int ret;
- ret = vimc_get_mbus_format(link->source, &source_fmt);
+ ret = vimc_get_pix_format(link->source, &source_fmt);
if (ret)
return ret;
- ret = vimc_get_mbus_format(link->sink, &sink_fmt);
+ ret = vimc_get_pix_format(link->sink, &sink_fmt);
if (ret)
return ret;
@@ -301,21 +259,21 @@ int vimc_link_validate(struct media_link *link)
"%s:snk:%dx%d (0x%x, %d, %d, %d, %d)\n",
/* src */
link->source->entity->name,
- source_fmt.format.width, source_fmt.format.height,
- source_fmt.format.code, source_fmt.format.colorspace,
- source_fmt.format.quantization, source_fmt.format.xfer_func,
- source_fmt.format.ycbcr_enc,
+ source_fmt.width, source_fmt.height,
+ source_fmt.pixelformat, source_fmt.colorspace,
+ source_fmt.quantization, source_fmt.xfer_func,
+ source_fmt.ycbcr_enc,
/* sink */
link->sink->entity->name,
- sink_fmt.format.width, sink_fmt.format.height,
- sink_fmt.format.code, sink_fmt.format.colorspace,
- sink_fmt.format.quantization, sink_fmt.format.xfer_func,
- sink_fmt.format.ycbcr_enc);
-
- /* The width, height and code must match. */
- if (source_fmt.format.width != sink_fmt.format.width
- || source_fmt.format.height != sink_fmt.format.height
- || source_fmt.format.code != sink_fmt.format.code)
+ sink_fmt.width, sink_fmt.height,
+ sink_fmt.pixelformat, sink_fmt.colorspace,
+ sink_fmt.quantization, sink_fmt.xfer_func,
+ sink_fmt.ycbcr_enc);
+
+ /* The width, height and pixelformat must match. */
+ if (source_fmt.width != sink_fmt.width ||
+ source_fmt.height != sink_fmt.height ||
+ source_fmt.pixelformat != sink_fmt.pixelformat)
return -EPIPE;
/*
@@ -323,44 +281,43 @@ int vimc_link_validate(struct media_link *link)
* to support interlaced hardware connected to bridges that support
* progressive formats only.
*/
- if (source_fmt.format.field != sink_fmt.format.field &&
- sink_fmt.format.field != V4L2_FIELD_NONE)
+ if (source_fmt.field != sink_fmt.field &&
+ sink_fmt.field != V4L2_FIELD_NONE)
return -EPIPE;
/*
* If colorspace is DEFAULT, then assume all the colorimetry is also
* DEFAULT, return 0 to skip comparing the other colorimetry parameters
*/
- if (source_fmt.format.colorspace == V4L2_COLORSPACE_DEFAULT
- || sink_fmt.format.colorspace == V4L2_COLORSPACE_DEFAULT)
+ if (source_fmt.colorspace == V4L2_COLORSPACE_DEFAULT ||
+ sink_fmt.colorspace == V4L2_COLORSPACE_DEFAULT)
return 0;
/* Colorspace must match. */
- if (source_fmt.format.colorspace != sink_fmt.format.colorspace)
+ if (source_fmt.colorspace != sink_fmt.colorspace)
return -EPIPE;
/* Colorimetry must match if they are not set to DEFAULT */
- if (source_fmt.format.ycbcr_enc != V4L2_YCBCR_ENC_DEFAULT
- && sink_fmt.format.ycbcr_enc != V4L2_YCBCR_ENC_DEFAULT
- && source_fmt.format.ycbcr_enc != sink_fmt.format.ycbcr_enc)
+ if (source_fmt.ycbcr_enc != V4L2_YCBCR_ENC_DEFAULT &&
+ sink_fmt.ycbcr_enc != V4L2_YCBCR_ENC_DEFAULT &&
+ source_fmt.ycbcr_enc != sink_fmt.ycbcr_enc)
return -EPIPE;
- if (source_fmt.format.quantization != V4L2_QUANTIZATION_DEFAULT
- && sink_fmt.format.quantization != V4L2_QUANTIZATION_DEFAULT
- && source_fmt.format.quantization != sink_fmt.format.quantization)
+ if (source_fmt.quantization != V4L2_QUANTIZATION_DEFAULT &&
+ sink_fmt.quantization != V4L2_QUANTIZATION_DEFAULT &&
+ source_fmt.quantization != sink_fmt.quantization)
return -EPIPE;
- if (source_fmt.format.xfer_func != V4L2_XFER_FUNC_DEFAULT
- && sink_fmt.format.xfer_func != V4L2_XFER_FUNC_DEFAULT
- && source_fmt.format.xfer_func != sink_fmt.format.xfer_func)
+ if (source_fmt.xfer_func != V4L2_XFER_FUNC_DEFAULT &&
+ sink_fmt.xfer_func != V4L2_XFER_FUNC_DEFAULT &&
+ source_fmt.xfer_func != sink_fmt.xfer_func)
return -EPIPE;
return 0;
}
-EXPORT_SYMBOL_GPL(vimc_link_validate);
static const struct media_entity_operations vimc_ent_sd_mops = {
- .link_validate = vimc_link_validate,
+ .link_validate = v4l2_subdev_link_validate,
};
int vimc_ent_sd_register(struct vimc_ent_device *ved,
@@ -369,17 +326,12 @@ int vimc_ent_sd_register(struct vimc_ent_device *ved,
const char *const name,
u32 function,
u16 num_pads,
- const unsigned long *pads_flag,
+ struct media_pad *pads,
const struct v4l2_subdev_internal_ops *sd_int_ops,
const struct v4l2_subdev_ops *sd_ops)
{
int ret;
- /* Allocate the pads */
- ved->pads = vimc_pads_init(num_pads, pads_flag);
- if (IS_ERR(ved->pads))
- return PTR_ERR(ved->pads);
-
/* Fill the vimc_ent_device struct */
ved->ent = &sd->entity;
@@ -398,9 +350,9 @@ int vimc_ent_sd_register(struct vimc_ent_device *ved,
sd->flags |= V4L2_SUBDEV_FL_HAS_EVENTS;
/* Initialize the media entity */
- ret = media_entity_pads_init(&sd->entity, num_pads, ved->pads);
+ ret = media_entity_pads_init(&sd->entity, num_pads, pads);
if (ret)
- goto err_clean_pads;
+ return ret;
/* Register the subdev with the v4l2 and the media framework */
ret = v4l2_device_register_subdev(v4l2_dev, sd);
@@ -415,16 +367,5 @@ int vimc_ent_sd_register(struct vimc_ent_device *ved,
err_clean_m_ent:
media_entity_cleanup(&sd->entity);
-err_clean_pads:
- vimc_pads_cleanup(ved->pads);
return ret;
}
-EXPORT_SYMBOL_GPL(vimc_ent_sd_register);
-
-void vimc_ent_sd_unregister(struct vimc_ent_device *ved, struct v4l2_subdev *sd)
-{
- media_entity_cleanup(ved->ent);
- vimc_pads_cleanup(ved->pads);
- v4l2_device_unregister_subdev(sd);
-}
-EXPORT_SYMBOL_GPL(vimc_ent_sd_unregister);
diff --git a/drivers/media/platform/vimc/vimc-common.h b/drivers/media/platform/vimc/vimc-common.h
index 9c2e0e216c6b..87eb8259c2a8 100644
--- a/drivers/media/platform/vimc/vimc-common.h
+++ b/drivers/media/platform/vimc/vimc-common.h
@@ -8,6 +8,7 @@
#ifndef _VIMC_COMMON_H_
#define _VIMC_COMMON_H_
+#include <linux/platform_device.h>
#include <linux/slab.h>
#include <media/media-device.h>
#include <media/v4l2-device.h>
@@ -18,6 +19,7 @@
#define VIMC_CID_VIMC_BASE (0x00f00000 | 0xf000)
#define VIMC_CID_VIMC_CLASS (0x00f00000 | 1)
#define VIMC_CID_TEST_PATTERN (VIMC_CID_VIMC_BASE + 0)
+#define VIMC_CID_MEAN_WIN_SIZE (VIMC_CID_VIMC_BASE + 1)
#define VIMC_FRAME_MAX_WIDTH 4096
#define VIMC_FRAME_MAX_HEIGHT 2160
@@ -26,6 +28,10 @@
#define VIMC_FRAME_INDEX(lin, col, width, bpp) ((lin * width + col) * bpp)
+/* Source and sink pad checks */
+#define VIMC_IS_SRC(pad) (pad)
+#define VIMC_IS_SINK(pad) (!(pad))
+
/**
* struct vimc_colorimetry_clamp - Adjust colorimetry parameters
*
@@ -53,21 +59,6 @@ do { \
} while (0)
/**
- * struct vimc_platform_data - platform data to components
- *
- * @entity_name: The name of the entity to be created
- *
- * Board setup code will often provide additional information using the device's
- * platform_data field to hold additional information.
- * When injecting a new platform_device in the component system the core needs
- * to provide to the corresponding submodules the name of the entity that should
- * be used when registering the subdevice in the Media Controller system.
- */
-struct vimc_platform_data {
- char entity_name[32];
-};
-
-/**
* struct vimc_pix_map - maps media bus code with v4l2 pixel format
*
* @code: media bus format code defined by MEDIA_BUS_FMT_* macros
@@ -85,10 +76,11 @@ struct vimc_pix_map {
};
/**
- * struct vimc_ent_device - core struct that represents a node in the topology
+ * struct vimc_ent_device - core struct that represents an entity in the
+ * topology
*
+ * @dev: a pointer of the device struct of the driver
* @ent: the pointer to struct media_entity for the node
- * @pads: the list of pads of the node
* @process_frame: callback send a frame to that node
* @vdev_get_format: callback that returns the current format a pad, used
* only when is_media_entity_v4l2_video_device(ent) returns
@@ -103,8 +95,8 @@ struct vimc_pix_map {
* media_entity
*/
struct vimc_ent_device {
+ struct device *dev;
struct media_entity *ent;
- struct media_pad *pads;
void * (*process_frame)(struct vimc_ent_device *ved,
const void *frame);
void (*vdev_get_format)(struct vimc_ent_device *ved,
@@ -112,38 +104,65 @@ struct vimc_ent_device {
};
/**
- * vimc_pads_init - initialize pads
- *
- * @num_pads: number of pads to initialize
- * @pads_flags: flags to use in each pad
+ * struct vimc_device - main device for vimc driver
*
- * Helper functions to allocate/initialize pads
+ * @pdev pointer to the platform device
+ * @pipe_cfg pointer to the vimc pipeline configuration structure
+ * @ent_devs array of vimc_ent_device pointers
+ * @mdev the associated media_device parent
+ * @v4l2_dev Internal v4l2 parent device
*/
-struct media_pad *vimc_pads_init(u16 num_pads,
- const unsigned long *pads_flag);
+struct vimc_device {
+ struct platform_device pdev;
+ const struct vimc_pipeline_config *pipe_cfg;
+ struct vimc_ent_device **ent_devs;
+ struct media_device mdev;
+ struct v4l2_device v4l2_dev;
+};
/**
- * vimc_pads_cleanup - free pads
- *
- * @pads: pointer to the pads
- *
- * Helper function to free the pads initialized with vimc_pads_init
+ * struct vimc_ent_config Structure which describes individual
+ * configuration for each entity
+ *
+ * @name entity name
+ * @ved pointer to vimc_ent_device (a node in the
+ * topology)
+ * @add subdev add hook - initializes and registers
+ * subdev called from vimc-core
+ * @rm subdev rm hook - unregisters and frees
+ * subdev called from vimc-core
*/
-static inline void vimc_pads_cleanup(struct media_pad *pads)
-{
- kfree(pads);
-}
+struct vimc_ent_config {
+ const char *name;
+ struct vimc_ent_device *(*add)(struct vimc_device *vimc,
+ const char *vcfg_name);
+ void (*rm)(struct vimc_device *vimc, struct vimc_ent_device *ved);
+};
/**
- * vimc_pipeline_s_stream - start stream through the pipeline
+ * vimc_is_source - returns true if the entity has only source pads
*
- * @ent: the pointer to struct media_entity for the node
- * @enable: 1 to start the stream and 0 to stop
+ * @ent: pointer to &struct media_entity
*
- * Helper function to call the s_stream of the subdevices connected
- * in all the sink pads of the entity
*/
-int vimc_pipeline_s_stream(struct media_entity *ent, int enable);
+bool vimc_is_source(struct media_entity *ent);
+
+/* prototypes for vimc_ent_config add and rm hooks */
+struct vimc_ent_device *vimc_cap_add(struct vimc_device *vimc,
+ const char *vcfg_name);
+void vimc_cap_rm(struct vimc_device *vimc, struct vimc_ent_device *ved);
+
+struct vimc_ent_device *vimc_deb_add(struct vimc_device *vimc,
+ const char *vcfg_name);
+void vimc_deb_rm(struct vimc_device *vimc, struct vimc_ent_device *ved);
+
+struct vimc_ent_device *vimc_sca_add(struct vimc_device *vimc,
+ const char *vcfg_name);
+void vimc_sca_rm(struct vimc_device *vimc, struct vimc_ent_device *ved);
+
+struct vimc_ent_device *vimc_sen_add(struct vimc_device *vimc,
+ const char *vcfg_name);
+void vimc_sen_rm(struct vimc_device *vimc, struct vimc_ent_device *ved);
/**
* vimc_pix_map_by_index - get vimc_pix_map struct by its index
@@ -176,7 +195,8 @@ const struct vimc_pix_map *vimc_pix_map_by_pixelformat(u32 pixelformat);
* unique.
* @function: media entity function defined by MEDIA_ENT_F_* macros
* @num_pads: number of pads to initialize
- * @pads_flag: flags to use in each pad
+ * @pads: the array of pads of the entity, the caller should set the
+ flags of the pads
* @sd_int_ops: pointer to &struct v4l2_subdev_internal_ops
* @sd_ops: pointer to &struct v4l2_subdev_ops.
*
@@ -189,29 +209,17 @@ int vimc_ent_sd_register(struct vimc_ent_device *ved,
const char *const name,
u32 function,
u16 num_pads,
- const unsigned long *pads_flag,
+ struct media_pad *pads,
const struct v4l2_subdev_internal_ops *sd_int_ops,
const struct v4l2_subdev_ops *sd_ops);
/**
- * vimc_ent_sd_unregister - cleanup and unregister a subdev node
- *
- * @ved: the vimc_ent_device struct to be cleaned up
- * @sd: the v4l2_subdev struct to be unregistered
- *
- * Helper function cleanup and unregister the struct vimc_ent_device and struct
- * v4l2_subdev which represents a subdev node in the topology
- */
-void vimc_ent_sd_unregister(struct vimc_ent_device *ved,
- struct v4l2_subdev *sd);
-
-/**
- * vimc_link_validate - validates a media link
+ * vimc_vdev_link_validate - validates a media link
*
* @link: pointer to &struct media_link
*
* This function calls validates if a media link is valid for streaming.
*/
-int vimc_link_validate(struct media_link *link);
+int vimc_vdev_link_validate(struct media_link *link);
#endif
diff --git a/drivers/media/platform/vimc/vimc-core.c b/drivers/media/platform/vimc/vimc-core.c
index 571c55aa0e16..97a272f3350a 100644
--- a/drivers/media/platform/vimc/vimc-core.c
+++ b/drivers/media/platform/vimc/vimc-core.c
@@ -5,7 +5,6 @@
* Copyright (C) 2015-2017 Helen Koike <helen.fornazier@gmail.com>
*/
-#include <linux/component.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -24,29 +23,6 @@
.flags = link_flags, \
}
-struct vimc_device {
- /* The platform device */
- struct platform_device pdev;
-
- /* The pipeline configuration */
- const struct vimc_pipeline_config *pipe_cfg;
-
- /* The Associated media_device parent */
- struct media_device mdev;
-
- /* Internal v4l2 parent device*/
- struct v4l2_device v4l2_dev;
-
- /* Subdevices */
- struct platform_device **subdevs;
-};
-
-/* Structure which describes individual configuration for each entity */
-struct vimc_ent_config {
- const char *name;
- const char *drv;
-};
-
/* Structure which describes links between entities */
struct vimc_ent_link {
unsigned int src_ent;
@@ -68,43 +44,52 @@ struct vimc_pipeline_config {
* Topology Configuration
*/
-static const struct vimc_ent_config ent_config[] = {
+static struct vimc_ent_config ent_config[] = {
{
.name = "Sensor A",
- .drv = "vimc-sensor",
+ .add = vimc_sen_add,
+ .rm = vimc_sen_rm,
},
{
.name = "Sensor B",
- .drv = "vimc-sensor",
+ .add = vimc_sen_add,
+ .rm = vimc_sen_rm,
},
{
.name = "Debayer A",
- .drv = "vimc-debayer",
+ .add = vimc_deb_add,
+ .rm = vimc_deb_rm,
},
{
.name = "Debayer B",
- .drv = "vimc-debayer",
+ .add = vimc_deb_add,
+ .rm = vimc_deb_rm,
},
{
.name = "Raw Capture 0",
- .drv = "vimc-capture",
+ .add = vimc_cap_add,
+ .rm = vimc_cap_rm,
},
{
.name = "Raw Capture 1",
- .drv = "vimc-capture",
+ .add = vimc_cap_add,
+ .rm = vimc_cap_rm,
},
{
- .name = "RGB/YUV Input",
/* TODO: change this to vimc-input when it is implemented */
- .drv = "vimc-sensor",
+ .name = "RGB/YUV Input",
+ .add = vimc_sen_add,
+ .rm = vimc_sen_rm,
},
{
.name = "Scaler",
- .drv = "vimc-scaler",
+ .add = vimc_sca_add,
+ .rm = vimc_sca_rm,
},
{
.name = "RGB/YUV Capture",
- .drv = "vimc-capture",
+ .add = vimc_cap_add,
+ .rm = vimc_cap_rm,
},
};
@@ -127,7 +112,7 @@ static const struct vimc_ent_link ent_links[] = {
VIMC_ENT_LINK(7, 1, 8, 0, MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE),
};
-static const struct vimc_pipeline_config pipe_cfg = {
+static struct vimc_pipeline_config pipe_cfg = {
.ents = ent_config,
.num_ents = ARRAY_SIZE(ent_config),
.links = ent_links,
@@ -136,6 +121,14 @@ static const struct vimc_pipeline_config pipe_cfg = {
/* -------------------------------------------------------------------------- */
+static void vimc_rm_links(struct vimc_device *vimc)
+{
+ unsigned int i;
+
+ for (i = 0; i < vimc->pipe_cfg->num_ents; i++)
+ media_entity_remove_links(vimc->ent_devs[i]->ent);
+}
+
static int vimc_create_links(struct vimc_device *vimc)
{
unsigned int i;
@@ -144,32 +137,56 @@ static int vimc_create_links(struct vimc_device *vimc)
/* Initialize the links between entities */
for (i = 0; i < vimc->pipe_cfg->num_links; i++) {
const struct vimc_ent_link *link = &vimc->pipe_cfg->links[i];
- /*
- * TODO: Check another way of retrieving ved struct without
- * relying on platform_get_drvdata
- */
+
struct vimc_ent_device *ved_src =
- platform_get_drvdata(vimc->subdevs[link->src_ent]);
+ vimc->ent_devs[link->src_ent];
struct vimc_ent_device *ved_sink =
- platform_get_drvdata(vimc->subdevs[link->sink_ent]);
+ vimc->ent_devs[link->sink_ent];
ret = media_create_pad_link(ved_src->ent, link->src_pad,
ved_sink->ent, link->sink_pad,
link->flags);
if (ret)
- return ret;
+ goto err_rm_links;
}
return 0;
+
+err_rm_links:
+ vimc_rm_links(vimc);
+ return ret;
}
-static int vimc_comp_bind(struct device *master)
+static int vimc_add_subdevs(struct vimc_device *vimc)
{
- struct vimc_device *vimc = container_of(to_platform_device(master),
- struct vimc_device, pdev);
- int ret;
+ unsigned int i;
+
+ for (i = 0; i < vimc->pipe_cfg->num_ents; i++) {
+ dev_dbg(&vimc->pdev.dev, "new entity for %s\n",
+ vimc->pipe_cfg->ents[i].name);
+ vimc->ent_devs[i] = vimc->pipe_cfg->ents[i].add(vimc,
+ vimc->pipe_cfg->ents[i].name);
+ if (!vimc->ent_devs[i]) {
+ dev_err(&vimc->pdev.dev, "add new entity for %s\n",
+ vimc->pipe_cfg->ents[i].name);
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+static void vimc_rm_subdevs(struct vimc_device *vimc)
+{
+ unsigned int i;
+
+ for (i = 0; i < vimc->pipe_cfg->num_ents; i++)
+ if (vimc->ent_devs[i])
+ vimc->pipe_cfg->ents[i].rm(vimc, vimc->ent_devs[i]);
+}
- dev_dbg(master, "bind");
+static int vimc_register_devices(struct vimc_device *vimc)
+{
+ int ret;
/* Register the v4l2 struct */
ret = v4l2_device_register(vimc->mdev.dev, &vimc->v4l2_dev);
@@ -179,22 +196,31 @@ static int vimc_comp_bind(struct device *master)
return ret;
}
- /* Bind subdevices */
- ret = component_bind_all(master, &vimc->v4l2_dev);
- if (ret)
+ /* allocate ent_devs */
+ vimc->ent_devs = kcalloc(vimc->pipe_cfg->num_ents,
+ sizeof(*vimc->ent_devs), GFP_KERNEL);
+ if (!vimc->ent_devs) {
+ ret = -ENOMEM;
goto err_v4l2_unregister;
+ }
+
+ /* Invoke entity config hooks to initialize and register subdevs */
+ ret = vimc_add_subdevs(vimc);
+ if (ret)
+ /* remove sundevs that got added */
+ goto err_rm_subdevs;
/* Initialize links */
ret = vimc_create_links(vimc);
if (ret)
- goto err_comp_unbind_all;
+ goto err_rm_subdevs;
/* Register the media device */
ret = media_device_register(&vimc->mdev);
if (ret) {
dev_err(vimc->mdev.dev,
"media device register failed (err=%d)\n", ret);
- goto err_comp_unbind_all;
+ goto err_rm_subdevs;
}
/* Expose all subdev's nodes*/
@@ -211,98 +237,32 @@ static int vimc_comp_bind(struct device *master)
err_mdev_unregister:
media_device_unregister(&vimc->mdev);
media_device_cleanup(&vimc->mdev);
-err_comp_unbind_all:
- component_unbind_all(master, NULL);
+err_rm_subdevs:
+ vimc_rm_subdevs(vimc);
+ kfree(vimc->ent_devs);
err_v4l2_unregister:
v4l2_device_unregister(&vimc->v4l2_dev);
return ret;
}
-static void vimc_comp_unbind(struct device *master)
+static void vimc_unregister(struct vimc_device *vimc)
{
- struct vimc_device *vimc = container_of(to_platform_device(master),
- struct vimc_device, pdev);
-
- dev_dbg(master, "unbind");
-
media_device_unregister(&vimc->mdev);
media_device_cleanup(&vimc->mdev);
- component_unbind_all(master, NULL);
v4l2_device_unregister(&vimc->v4l2_dev);
+ kfree(vimc->ent_devs);
}
-static int vimc_comp_compare(struct device *comp, void *data)
-{
- return comp == data;
-}
-
-static struct component_match *vimc_add_subdevs(struct vimc_device *vimc)
-{
- struct component_match *match = NULL;
- struct vimc_platform_data pdata;
- int i;
-
- for (i = 0; i < vimc->pipe_cfg->num_ents; i++) {
- dev_dbg(&vimc->pdev.dev, "new pdev for %s\n",
- vimc->pipe_cfg->ents[i].drv);
-
- strscpy(pdata.entity_name, vimc->pipe_cfg->ents[i].name,
- sizeof(pdata.entity_name));
-
- vimc->subdevs[i] = platform_device_register_data(&vimc->pdev.dev,
- vimc->pipe_cfg->ents[i].drv,
- PLATFORM_DEVID_AUTO,
- &pdata,
- sizeof(pdata));
- if (IS_ERR(vimc->subdevs[i])) {
- match = ERR_CAST(vimc->subdevs[i]);
- while (--i >= 0)
- platform_device_unregister(vimc->subdevs[i]);
-
- return match;
- }
-
- component_match_add(&vimc->pdev.dev, &match, vimc_comp_compare,
- &vimc->subdevs[i]->dev);
- }
-
- return match;
-}
-
-static void vimc_rm_subdevs(struct vimc_device *vimc)
-{
- unsigned int i;
-
- for (i = 0; i < vimc->pipe_cfg->num_ents; i++)
- platform_device_unregister(vimc->subdevs[i]);
-}
-
-static const struct component_master_ops vimc_comp_ops = {
- .bind = vimc_comp_bind,
- .unbind = vimc_comp_unbind,
-};
-
static int vimc_probe(struct platform_device *pdev)
{
struct vimc_device *vimc = container_of(pdev, struct vimc_device, pdev);
- struct component_match *match = NULL;
int ret;
dev_dbg(&pdev->dev, "probe");
memset(&vimc->mdev, 0, sizeof(vimc->mdev));
- /* Create platform_device for each entity in the topology*/
- vimc->subdevs = devm_kcalloc(&vimc->pdev.dev, vimc->pipe_cfg->num_ents,
- sizeof(*vimc->subdevs), GFP_KERNEL);
- if (!vimc->subdevs)
- return -ENOMEM;
-
- match = vimc_add_subdevs(vimc);
- if (IS_ERR(match))
- return PTR_ERR(match);
-
/* Link the media device within the v4l2_device */
vimc->v4l2_dev.mdev = &vimc->mdev;
@@ -314,12 +274,9 @@ static int vimc_probe(struct platform_device *pdev)
vimc->mdev.dev = &pdev->dev;
media_device_init(&vimc->mdev);
- /* Add self to the component system */
- ret = component_master_add_with_match(&pdev->dev, &vimc_comp_ops,
- match);
+ ret = vimc_register_devices(vimc);
if (ret) {
media_device_cleanup(&vimc->mdev);
- vimc_rm_subdevs(vimc);
return ret;
}
@@ -332,8 +289,8 @@ static int vimc_remove(struct platform_device *pdev)
dev_dbg(&pdev->dev, "remove");
- component_master_del(&pdev->dev, &vimc_comp_ops);
vimc_rm_subdevs(vimc);
+ vimc_unregister(vimc);
return 0;
}
diff --git a/drivers/media/platform/vimc/vimc-debayer.c b/drivers/media/platform/vimc/vimc-debayer.c
index b72b8385067b..5d1b67d684bb 100644
--- a/drivers/media/platform/vimc/vimc-debayer.c
+++ b/drivers/media/platform/vimc/vimc-debayer.c
@@ -5,28 +5,16 @@
* Copyright (C) 2015-2017 Helen Koike <helen.fornazier@gmail.com>
*/
-#include <linux/component.h>
-#include <linux/module.h>
-#include <linux/mod_devicetable.h>
+#include <linux/moduleparam.h>
#include <linux/platform_device.h>
#include <linux/vmalloc.h>
#include <linux/v4l2-mediabus.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-event.h>
#include <media/v4l2-subdev.h>
#include "vimc-common.h"
-#define VIMC_DEB_DRV_NAME "vimc-debayer"
-
-static unsigned int deb_mean_win_size = 3;
-module_param(deb_mean_win_size, uint, 0000);
-MODULE_PARM_DESC(deb_mean_win_size, " the window size to calculate the mean.\n"
- "NOTE: the window size needs to be an odd number, as the main pixel "
- "stays in the center of the window, otherwise the next odd number "
- "is considered");
-
-#define IS_SINK(pad) (!pad)
-#define IS_SRC(pad) (pad)
-
enum vimc_deb_rgb_colors {
VIMC_DEB_RED = 0,
VIMC_DEB_GREEN = 1,
@@ -41,7 +29,6 @@ struct vimc_deb_pix_map {
struct vimc_deb_device {
struct vimc_ent_device ved;
struct v4l2_subdev sd;
- struct device *dev;
/* The active format */
struct v4l2_mbus_framefmt sink_fmt;
u32 src_code;
@@ -51,6 +38,9 @@ struct vimc_deb_device {
u8 *src_frame;
const struct vimc_deb_pix_map *sink_pix_map;
unsigned int sink_bpp;
+ unsigned int mean_win_size;
+ struct v4l2_ctrl_handler hdl;
+ struct media_pad pads[2];
};
static const struct v4l2_mbus_framefmt sink_fmt_default = {
@@ -159,7 +149,7 @@ static int vimc_deb_enum_mbus_code(struct v4l2_subdev *sd,
struct v4l2_subdev_mbus_code_enum *code)
{
/* We only support one format for source pads */
- if (IS_SRC(code->pad)) {
+ if (VIMC_IS_SRC(code->pad)) {
struct vimc_deb_device *vdeb = v4l2_get_subdevdata(sd);
if (code->index)
@@ -185,7 +175,7 @@ static int vimc_deb_enum_frame_size(struct v4l2_subdev *sd,
if (fse->index)
return -EINVAL;
- if (IS_SINK(fse->pad)) {
+ if (VIMC_IS_SINK(fse->pad)) {
const struct vimc_deb_pix_map *vpix =
vimc_deb_pix_map_by_code(fse->code);
@@ -215,7 +205,7 @@ static int vimc_deb_get_fmt(struct v4l2_subdev *sd,
vdeb->sink_fmt;
/* Set the right code for the source pad */
- if (IS_SRC(fmt->pad))
+ if (VIMC_IS_SRC(fmt->pad))
fmt->format.code = vdeb->src_code;
return 0;
@@ -262,7 +252,7 @@ static int vimc_deb_set_fmt(struct v4l2_subdev *sd,
* Do not change the format of the source pad,
* it is propagated from the sink
*/
- if (IS_SRC(fmt->pad)) {
+ if (VIMC_IS_SRC(fmt->pad)) {
fmt->format = *sink_fmt;
/* TODO: Add support for other formats */
fmt->format.code = vdeb->src_code;
@@ -270,7 +260,7 @@ static int vimc_deb_set_fmt(struct v4l2_subdev *sd,
/* Set the new format in the sink pad */
vimc_deb_adjust_sink_fmt(&fmt->format);
- dev_dbg(vdeb->dev, "%s: sink format update: "
+ dev_dbg(vdeb->ved.dev, "%s: sink format update: "
"old:%dx%d (0x%x, %d, %d, %d, %d) "
"new:%dx%d (0x%x, %d, %d, %d, %d)\n", vdeb->sd.name,
/* old */
@@ -351,11 +341,18 @@ static int vimc_deb_s_stream(struct v4l2_subdev *sd, int enable)
return 0;
}
+static const struct v4l2_subdev_core_ops vimc_deb_core_ops = {
+ .log_status = v4l2_ctrl_subdev_log_status,
+ .subscribe_event = v4l2_ctrl_subdev_subscribe_event,
+ .unsubscribe_event = v4l2_event_subdev_unsubscribe,
+};
+
static const struct v4l2_subdev_video_ops vimc_deb_video_ops = {
.s_stream = vimc_deb_s_stream,
};
static const struct v4l2_subdev_ops vimc_deb_ops = {
+ .core = &vimc_deb_core_ops,
.pad = &vimc_deb_pad_ops,
.video = &vimc_deb_video_ops,
};
@@ -389,11 +386,11 @@ static void vimc_deb_calc_rgb_sink(struct vimc_deb_device *vdeb,
* the top left corner of the mean window (considering the current
* pixel as the center)
*/
- seek = deb_mean_win_size / 2;
+ seek = vdeb->mean_win_size / 2;
/* Sum the values of the colors in the mean window */
- dev_dbg(vdeb->dev,
+ dev_dbg(vdeb->ved.dev,
"deb: %s: --- Calc pixel %dx%d, window mean %d, seek %d ---\n",
vdeb->sd.name, lin, col, vdeb->sink_fmt.height, seek);
@@ -426,7 +423,7 @@ static void vimc_deb_calc_rgb_sink(struct vimc_deb_device *vdeb,
vdeb->sink_fmt.width,
vdeb->sink_bpp);
- dev_dbg(vdeb->dev,
+ dev_dbg(vdeb->ved.dev,
"deb: %s: RGB CALC: frame index %d, win pos %dx%d, color %d\n",
vdeb->sd.name, index, wlin, wcol, color);
@@ -437,21 +434,21 @@ static void vimc_deb_calc_rgb_sink(struct vimc_deb_device *vdeb,
/* Save how many values we already added */
n_rgb[color]++;
- dev_dbg(vdeb->dev, "deb: %s: RGB CALC: val %d, n %d\n",
+ dev_dbg(vdeb->ved.dev, "deb: %s: RGB CALC: val %d, n %d\n",
vdeb->sd.name, rgb[color], n_rgb[color]);
}
}
/* Calculate the mean */
for (i = 0; i < 3; i++) {
- dev_dbg(vdeb->dev,
+ dev_dbg(vdeb->ved.dev,
"deb: %s: PRE CALC: %dx%d Color %d, val %d, n %d\n",
vdeb->sd.name, lin, col, i, rgb[i], n_rgb[i]);
if (n_rgb[i])
rgb[i] = rgb[i] / n_rgb[i];
- dev_dbg(vdeb->dev,
+ dev_dbg(vdeb->ved.dev,
"deb: %s: FINAL CALC: %dx%d Color %d, val %d\n",
vdeb->sd.name, lin, col, i, rgb[i]);
}
@@ -476,14 +473,34 @@ static void *vimc_deb_process_frame(struct vimc_ent_device *ved,
}
return vdeb->src_frame;
+}
+static int vimc_deb_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct vimc_deb_device *vdeb =
+ container_of(ctrl->handler, struct vimc_deb_device, hdl);
+
+ switch (ctrl->id) {
+ case VIMC_CID_MEAN_WIN_SIZE:
+ vdeb->mean_win_size = ctrl->val;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
}
+static const struct v4l2_ctrl_ops vimc_deb_ctrl_ops = {
+ .s_ctrl = vimc_deb_s_ctrl,
+};
+
static void vimc_deb_release(struct v4l2_subdev *sd)
{
struct vimc_deb_device *vdeb =
container_of(sd, struct vimc_deb_device, sd);
+ v4l2_ctrl_handler_free(&vdeb->hdl);
+ media_entity_cleanup(vdeb->ved.ent);
kfree(vdeb);
}
@@ -491,44 +508,69 @@ static const struct v4l2_subdev_internal_ops vimc_deb_int_ops = {
.release = vimc_deb_release,
};
-static void vimc_deb_comp_unbind(struct device *comp, struct device *master,
- void *master_data)
+void vimc_deb_rm(struct vimc_device *vimc, struct vimc_ent_device *ved)
{
- struct vimc_ent_device *ved = dev_get_drvdata(comp);
- struct vimc_deb_device *vdeb = container_of(ved, struct vimc_deb_device,
- ved);
+ struct vimc_deb_device *vdeb;
- vimc_ent_sd_unregister(ved, &vdeb->sd);
+ vdeb = container_of(ved, struct vimc_deb_device, ved);
+ v4l2_device_unregister_subdev(&vdeb->sd);
}
-static int vimc_deb_comp_bind(struct device *comp, struct device *master,
- void *master_data)
+static const struct v4l2_ctrl_config vimc_deb_ctrl_class = {
+ .flags = V4L2_CTRL_FLAG_READ_ONLY | V4L2_CTRL_FLAG_WRITE_ONLY,
+ .id = VIMC_CID_VIMC_CLASS,
+ .name = "VIMC Controls",
+ .type = V4L2_CTRL_TYPE_CTRL_CLASS,
+};
+
+static const struct v4l2_ctrl_config vimc_deb_ctrl_mean_win_size = {
+ .ops = &vimc_deb_ctrl_ops,
+ .id = VIMC_CID_MEAN_WIN_SIZE,
+ .name = "Debayer Mean Window Size",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 1,
+ .max = 25,
+ .step = 2,
+ .def = 3,
+};
+
+struct vimc_ent_device *vimc_deb_add(struct vimc_device *vimc,
+ const char *vcfg_name)
{
- struct v4l2_device *v4l2_dev = master_data;
- struct vimc_platform_data *pdata = comp->platform_data;
+ struct v4l2_device *v4l2_dev = &vimc->v4l2_dev;
struct vimc_deb_device *vdeb;
int ret;
/* Allocate the vdeb struct */
vdeb = kzalloc(sizeof(*vdeb), GFP_KERNEL);
if (!vdeb)
- return -ENOMEM;
+ return NULL;
+
+ /* Create controls: */
+ v4l2_ctrl_handler_init(&vdeb->hdl, 2);
+ v4l2_ctrl_new_custom(&vdeb->hdl, &vimc_deb_ctrl_class, NULL);
+ v4l2_ctrl_new_custom(&vdeb->hdl, &vimc_deb_ctrl_mean_win_size, NULL);
+ vdeb->sd.ctrl_handler = &vdeb->hdl;
+ if (vdeb->hdl.error) {
+ ret = vdeb->hdl.error;
+ goto err_free_vdeb;
+ }
/* Initialize ved and sd */
+ vdeb->pads[0].flags = MEDIA_PAD_FL_SINK;
+ vdeb->pads[1].flags = MEDIA_PAD_FL_SOURCE;
+
ret = vimc_ent_sd_register(&vdeb->ved, &vdeb->sd, v4l2_dev,
- pdata->entity_name,
+ vcfg_name,
MEDIA_ENT_F_PROC_VIDEO_PIXEL_ENC_CONV, 2,
- (const unsigned long[2]) {MEDIA_PAD_FL_SINK,
- MEDIA_PAD_FL_SOURCE},
+ vdeb->pads,
&vimc_deb_int_ops, &vimc_deb_ops);
- if (ret) {
- kfree(vdeb);
- return ret;
- }
+ if (ret)
+ goto err_free_hdl;
vdeb->ved.process_frame = vimc_deb_process_frame;
- dev_set_drvdata(comp, &vdeb->ved);
- vdeb->dev = comp;
+ vdeb->ved.dev = &vimc->pdev.dev;
+ vdeb->mean_win_size = vimc_deb_ctrl_mean_win_size.def;
/* Initialize the frame format */
vdeb->sink_fmt = sink_fmt_default;
@@ -541,46 +583,12 @@ static int vimc_deb_comp_bind(struct device *comp, struct device *master,
vdeb->src_code = MEDIA_BUS_FMT_RGB888_1X24;
vdeb->set_rgb_src = vimc_deb_set_rgb_mbus_fmt_rgb888_1x24;
- return 0;
-}
-
-static const struct component_ops vimc_deb_comp_ops = {
- .bind = vimc_deb_comp_bind,
- .unbind = vimc_deb_comp_unbind,
-};
+ return &vdeb->ved;
-static int vimc_deb_probe(struct platform_device *pdev)
-{
- return component_add(&pdev->dev, &vimc_deb_comp_ops);
-}
-
-static int vimc_deb_remove(struct platform_device *pdev)
-{
- component_del(&pdev->dev, &vimc_deb_comp_ops);
+err_free_hdl:
+ v4l2_ctrl_handler_free(&vdeb->hdl);
+err_free_vdeb:
+ kfree(vdeb);
- return 0;
+ return NULL;
}
-
-static const struct platform_device_id vimc_deb_driver_ids[] = {
- {
- .name = VIMC_DEB_DRV_NAME,
- },
- { }
-};
-
-static struct platform_driver vimc_deb_pdrv = {
- .probe = vimc_deb_probe,
- .remove = vimc_deb_remove,
- .id_table = vimc_deb_driver_ids,
- .driver = {
- .name = VIMC_DEB_DRV_NAME,
- },
-};
-
-module_platform_driver(vimc_deb_pdrv);
-
-MODULE_DEVICE_TABLE(platform, vimc_deb_driver_ids);
-
-MODULE_DESCRIPTION("Virtual Media Controller Driver (VIMC) Debayer");
-MODULE_AUTHOR("Helen Mae Koike Fornazier <helen.fornazier@gmail.com>");
-MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/vimc/vimc-scaler.c b/drivers/media/platform/vimc/vimc-scaler.c
index 49ab8d9dd9c9..2f88a7d9d67b 100644
--- a/drivers/media/platform/vimc/vimc-scaler.c
+++ b/drivers/media/platform/vimc/vimc-scaler.c
@@ -5,30 +5,22 @@
* Copyright (C) 2015-2017 Helen Koike <helen.fornazier@gmail.com>
*/
-#include <linux/component.h>
-#include <linux/module.h>
-#include <linux/mod_devicetable.h>
-#include <linux/platform_device.h>
+#include <linux/moduleparam.h>
#include <linux/vmalloc.h>
#include <linux/v4l2-mediabus.h>
#include <media/v4l2-subdev.h>
#include "vimc-common.h"
-#define VIMC_SCA_DRV_NAME "vimc-scaler"
-
static unsigned int sca_mult = 3;
module_param(sca_mult, uint, 0000);
MODULE_PARM_DESC(sca_mult, " the image size multiplier");
-#define IS_SINK(pad) (!pad)
-#define IS_SRC(pad) (pad)
#define MAX_ZOOM 8
struct vimc_sca_device {
struct vimc_ent_device ved;
struct v4l2_subdev sd;
- struct device *dev;
/* NOTE: the source fmt is the same as the sink
* with the width and hight multiplied by mult
*/
@@ -37,6 +29,7 @@ struct vimc_sca_device {
u8 *src_frame;
unsigned int src_line_size;
unsigned int bpp;
+ struct media_pad pads[2];
};
static const struct v4l2_mbus_framefmt sink_fmt_default = {
@@ -98,7 +91,7 @@ static int vimc_sca_enum_frame_size(struct v4l2_subdev *sd,
fse->min_width = VIMC_FRAME_MIN_WIDTH;
fse->min_height = VIMC_FRAME_MIN_HEIGHT;
- if (IS_SINK(fse->pad)) {
+ if (VIMC_IS_SINK(fse->pad)) {
fse->max_width = VIMC_FRAME_MAX_WIDTH;
fse->max_height = VIMC_FRAME_MAX_HEIGHT;
} else {
@@ -121,7 +114,7 @@ static int vimc_sca_get_fmt(struct v4l2_subdev *sd,
vsca->sink_fmt;
/* Scale the frame size for the source pad */
- if (IS_SRC(format->pad)) {
+ if (VIMC_IS_SRC(format->pad)) {
format->format.width = vsca->sink_fmt.width * sca_mult;
format->format.height = vsca->sink_fmt.height * sca_mult;
}
@@ -170,7 +163,7 @@ static int vimc_sca_set_fmt(struct v4l2_subdev *sd,
* Do not change the format of the source pad,
* it is propagated from the sink
*/
- if (IS_SRC(fmt->pad)) {
+ if (VIMC_IS_SRC(fmt->pad)) {
fmt->format = *sink_fmt;
fmt->format.width = sink_fmt->width * sca_mult;
fmt->format.height = sink_fmt->height * sca_mult;
@@ -178,7 +171,7 @@ static int vimc_sca_set_fmt(struct v4l2_subdev *sd,
/* Set the new format in the sink pad */
vimc_sca_adjust_sink_fmt(&fmt->format);
- dev_dbg(vsca->dev, "%s: sink format update: "
+ dev_dbg(vsca->ved.dev, "%s: sink format update: "
"old:%dx%d (0x%x, %d, %d, %d, %d) "
"new:%dx%d (0x%x, %d, %d, %d, %d)\n", vsca->sd.name,
/* old */
@@ -278,7 +271,7 @@ static void vimc_sca_scale_pix(const struct vimc_sca_device *const vsca,
vsca->bpp);
pixel = &sink_frame[index];
- dev_dbg(vsca->dev,
+ dev_dbg(vsca->ved.dev,
"sca: %s: --- scale_pix sink pos %dx%d, index %d ---\n",
vsca->sd.name, lin, col, index);
@@ -288,7 +281,7 @@ static void vimc_sca_scale_pix(const struct vimc_sca_device *const vsca,
index = VIMC_FRAME_INDEX(lin * sca_mult, col * sca_mult,
vsca->sink_fmt.width * sca_mult, vsca->bpp);
- dev_dbg(vsca->dev, "sca: %s: scale_pix src pos %dx%d, index %d\n",
+ dev_dbg(vsca->ved.dev, "sca: %s: scale_pix src pos %dx%d, index %d\n",
vsca->sd.name, lin * sca_mult, col * sca_mult, index);
/* Repeat this pixel mult times */
@@ -297,7 +290,7 @@ static void vimc_sca_scale_pix(const struct vimc_sca_device *const vsca,
* pixel repetition in a line
*/
for (j = 0; j < sca_mult * vsca->bpp; j += vsca->bpp) {
- dev_dbg(vsca->dev,
+ dev_dbg(vsca->ved.dev,
"sca: %s: sca: scale_pix src pos %d\n",
vsca->sd.name, index + j);
@@ -343,6 +336,7 @@ static void vimc_sca_release(struct v4l2_subdev *sd)
struct vimc_sca_device *vsca =
container_of(sd, struct vimc_sca_device, sd);
+ media_entity_cleanup(vsca->ved.ent);
kfree(vsca);
}
@@ -350,89 +344,45 @@ static const struct v4l2_subdev_internal_ops vimc_sca_int_ops = {
.release = vimc_sca_release,
};
-static void vimc_sca_comp_unbind(struct device *comp, struct device *master,
- void *master_data)
+void vimc_sca_rm(struct vimc_device *vimc, struct vimc_ent_device *ved)
{
- struct vimc_ent_device *ved = dev_get_drvdata(comp);
- struct vimc_sca_device *vsca = container_of(ved, struct vimc_sca_device,
- ved);
+ struct vimc_sca_device *vsca;
- vimc_ent_sd_unregister(ved, &vsca->sd);
+ vsca = container_of(ved, struct vimc_sca_device, ved);
+ v4l2_device_unregister_subdev(&vsca->sd);
}
-
-static int vimc_sca_comp_bind(struct device *comp, struct device *master,
- void *master_data)
+struct vimc_ent_device *vimc_sca_add(struct vimc_device *vimc,
+ const char *vcfg_name)
{
- struct v4l2_device *v4l2_dev = master_data;
- struct vimc_platform_data *pdata = comp->platform_data;
+ struct v4l2_device *v4l2_dev = &vimc->v4l2_dev;
struct vimc_sca_device *vsca;
int ret;
/* Allocate the vsca struct */
vsca = kzalloc(sizeof(*vsca), GFP_KERNEL);
if (!vsca)
- return -ENOMEM;
+ return NULL;
/* Initialize ved and sd */
+ vsca->pads[0].flags = MEDIA_PAD_FL_SINK;
+ vsca->pads[1].flags = MEDIA_PAD_FL_SOURCE;
+
ret = vimc_ent_sd_register(&vsca->ved, &vsca->sd, v4l2_dev,
- pdata->entity_name,
+ vcfg_name,
MEDIA_ENT_F_PROC_VIDEO_SCALER, 2,
- (const unsigned long[2]) {MEDIA_PAD_FL_SINK,
- MEDIA_PAD_FL_SOURCE},
+ vsca->pads,
&vimc_sca_int_ops, &vimc_sca_ops);
if (ret) {
kfree(vsca);
- return ret;
+ return NULL;
}
vsca->ved.process_frame = vimc_sca_process_frame;
- dev_set_drvdata(comp, &vsca->ved);
- vsca->dev = comp;
+ vsca->ved.dev = &vimc->pdev.dev;
/* Initialize the frame format */
vsca->sink_fmt = sink_fmt_default;
- return 0;
-}
-
-static const struct component_ops vimc_sca_comp_ops = {
- .bind = vimc_sca_comp_bind,
- .unbind = vimc_sca_comp_unbind,
-};
-
-static int vimc_sca_probe(struct platform_device *pdev)
-{
- return component_add(&pdev->dev, &vimc_sca_comp_ops);
-}
-
-static int vimc_sca_remove(struct platform_device *pdev)
-{
- component_del(&pdev->dev, &vimc_sca_comp_ops);
-
- return 0;
+ return &vsca->ved;
}
-
-static const struct platform_device_id vimc_sca_driver_ids[] = {
- {
- .name = VIMC_SCA_DRV_NAME,
- },
- { }
-};
-
-static struct platform_driver vimc_sca_pdrv = {
- .probe = vimc_sca_probe,
- .remove = vimc_sca_remove,
- .id_table = vimc_sca_driver_ids,
- .driver = {
- .name = VIMC_SCA_DRV_NAME,
- },
-};
-
-module_platform_driver(vimc_sca_pdrv);
-
-MODULE_DEVICE_TABLE(platform, vimc_sca_driver_ids);
-
-MODULE_DESCRIPTION("Virtual Media Controller Driver (VIMC) Scaler");
-MODULE_AUTHOR("Helen Mae Koike Fornazier <helen.fornazier@gmail.com>");
-MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/vimc/vimc-sensor.c b/drivers/media/platform/vimc/vimc-sensor.c
index 6c53b9fc1617..32380f504591 100644
--- a/drivers/media/platform/vimc/vimc-sensor.c
+++ b/drivers/media/platform/vimc/vimc-sensor.c
@@ -5,10 +5,6 @@
* Copyright (C) 2015-2017 Helen Koike <helen.fornazier@gmail.com>
*/
-#include <linux/component.h>
-#include <linux/module.h>
-#include <linux/mod_devicetable.h>
-#include <linux/platform_device.h>
#include <linux/v4l2-mediabus.h>
#include <linux/vmalloc.h>
#include <media/v4l2-ctrls.h>
@@ -18,18 +14,15 @@
#include "vimc-common.h"
-#define VIMC_SEN_DRV_NAME "vimc-sensor"
-
struct vimc_sen_device {
struct vimc_ent_device ved;
struct v4l2_subdev sd;
- struct device *dev;
struct tpg_data tpg;
- struct task_struct *kthread_sen;
u8 *frame;
/* The active format */
struct v4l2_mbus_framefmt mbus_format;
struct v4l2_ctrl_handler hdl;
+ struct media_pad pad;
};
static const struct v4l2_mbus_framefmt fmt_default = {
@@ -164,7 +157,7 @@ static int vimc_sen_set_fmt(struct v4l2_subdev *sd,
/* Set the new format */
vimc_sen_adjust_fmt(&fmt->format);
- dev_dbg(vsen->dev, "%s: format update: "
+ dev_dbg(vsen->ved.dev, "%s: format update: "
"old:%dx%d (0x%x, %d, %d, %d, %d) "
"new:%dx%d (0x%x, %d, %d, %d, %d)\n", vsen->sd.name,
/* old */
@@ -208,10 +201,6 @@ static int vimc_sen_s_stream(struct v4l2_subdev *sd, int enable)
const struct vimc_pix_map *vpix;
unsigned int frame_size;
- if (vsen->kthread_sen)
- /* tpg is already executing */
- return 0;
-
/* Calculate the frame size */
vpix = vimc_pix_map_by_code(vsen->mbus_format.code);
frame_size = vsen->mbus_format.width * vpix->bpp *
@@ -297,6 +286,7 @@ static void vimc_sen_release(struct v4l2_subdev *sd)
v4l2_ctrl_handler_free(&vsen->hdl);
tpg_free(&vsen->tpg);
+ media_entity_cleanup(vsen->ved.ent);
kfree(vsen);
}
@@ -304,14 +294,12 @@ static const struct v4l2_subdev_internal_ops vimc_sen_int_ops = {
.release = vimc_sen_release,
};
-static void vimc_sen_comp_unbind(struct device *comp, struct device *master,
- void *master_data)
+void vimc_sen_rm(struct vimc_device *vimc, struct vimc_ent_device *ved)
{
- struct vimc_ent_device *ved = dev_get_drvdata(comp);
- struct vimc_sen_device *vsen =
- container_of(ved, struct vimc_sen_device, ved);
+ struct vimc_sen_device *vsen;
- vimc_ent_sd_unregister(ved, &vsen->sd);
+ vsen = container_of(ved, struct vimc_sen_device, ved);
+ v4l2_device_unregister_subdev(&vsen->sd);
}
/* Image Processing Controls */
@@ -331,18 +319,17 @@ static const struct v4l2_ctrl_config vimc_sen_ctrl_test_pattern = {
.qmenu = tpg_pattern_strings,
};
-static int vimc_sen_comp_bind(struct device *comp, struct device *master,
- void *master_data)
+struct vimc_ent_device *vimc_sen_add(struct vimc_device *vimc,
+ const char *vcfg_name)
{
- struct v4l2_device *v4l2_dev = master_data;
- struct vimc_platform_data *pdata = comp->platform_data;
+ struct v4l2_device *v4l2_dev = &vimc->v4l2_dev;
struct vimc_sen_device *vsen;
int ret;
/* Allocate the vsen struct */
vsen = kzalloc(sizeof(*vsen), GFP_KERNEL);
if (!vsen)
- return -ENOMEM;
+ return NULL;
v4l2_ctrl_handler_init(&vsen->hdl, 4);
@@ -366,78 +353,36 @@ static int vimc_sen_comp_bind(struct device *comp, struct device *master,
goto err_free_vsen;
}
+ /* Initialize the test pattern generator */
+ tpg_init(&vsen->tpg, vsen->mbus_format.width,
+ vsen->mbus_format.height);
+ ret = tpg_alloc(&vsen->tpg, VIMC_FRAME_MAX_WIDTH);
+ if (ret)
+ goto err_free_hdl;
+
/* Initialize ved and sd */
+ vsen->pad.flags = MEDIA_PAD_FL_SOURCE;
ret = vimc_ent_sd_register(&vsen->ved, &vsen->sd, v4l2_dev,
- pdata->entity_name,
- MEDIA_ENT_F_CAM_SENSOR, 1,
- (const unsigned long[1]) {MEDIA_PAD_FL_SOURCE},
+ vcfg_name,
+ MEDIA_ENT_F_CAM_SENSOR, 1, &vsen->pad,
&vimc_sen_int_ops, &vimc_sen_ops);
if (ret)
- goto err_free_hdl;
+ goto err_free_tpg;
vsen->ved.process_frame = vimc_sen_process_frame;
- dev_set_drvdata(comp, &vsen->ved);
- vsen->dev = comp;
+ vsen->ved.dev = &vimc->pdev.dev;
/* Initialize the frame format */
vsen->mbus_format = fmt_default;
- /* Initialize the test pattern generator */
- tpg_init(&vsen->tpg, vsen->mbus_format.width,
- vsen->mbus_format.height);
- ret = tpg_alloc(&vsen->tpg, VIMC_FRAME_MAX_WIDTH);
- if (ret)
- goto err_unregister_ent_sd;
-
- return 0;
+ return &vsen->ved;
-err_unregister_ent_sd:
- vimc_ent_sd_unregister(&vsen->ved, &vsen->sd);
+err_free_tpg:
+ tpg_free(&vsen->tpg);
err_free_hdl:
v4l2_ctrl_handler_free(&vsen->hdl);
err_free_vsen:
kfree(vsen);
- return ret;
-}
-
-static const struct component_ops vimc_sen_comp_ops = {
- .bind = vimc_sen_comp_bind,
- .unbind = vimc_sen_comp_unbind,
-};
-
-static int vimc_sen_probe(struct platform_device *pdev)
-{
- return component_add(&pdev->dev, &vimc_sen_comp_ops);
-}
-
-static int vimc_sen_remove(struct platform_device *pdev)
-{
- component_del(&pdev->dev, &vimc_sen_comp_ops);
-
- return 0;
+ return NULL;
}
-
-static const struct platform_device_id vimc_sen_driver_ids[] = {
- {
- .name = VIMC_SEN_DRV_NAME,
- },
- { }
-};
-
-static struct platform_driver vimc_sen_pdrv = {
- .probe = vimc_sen_probe,
- .remove = vimc_sen_remove,
- .id_table = vimc_sen_driver_ids,
- .driver = {
- .name = VIMC_SEN_DRV_NAME,
- },
-};
-
-module_platform_driver(vimc_sen_pdrv);
-
-MODULE_DEVICE_TABLE(platform, vimc_sen_driver_ids);
-
-MODULE_DESCRIPTION("Virtual Media Controller Driver (VIMC) Sensor");
-MODULE_AUTHOR("Helen Mae Koike Fornazier <helen.fornazier@gmail.com>");
-MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/vimc/vimc-streamer.c b/drivers/media/platform/vimc/vimc-streamer.c
index 048d770e498b..cd6b55433c9e 100644
--- a/drivers/media/platform/vimc/vimc-streamer.c
+++ b/drivers/media/platform/vimc/vimc-streamer.c
@@ -7,7 +7,6 @@
*/
#include <linux/init.h>
-#include <linux/module.h>
#include <linux/freezer.h>
#include <linux/kthread.h>
@@ -97,17 +96,26 @@ static int vimc_streamer_pipeline_init(struct vimc_stream *stream,
sd = media_entity_to_v4l2_subdev(ved->ent);
ret = v4l2_subdev_call(sd, video, s_stream, 1);
if (ret && ret != -ENOIOCTLCMD) {
- pr_err("subdev_call error %s\n",
- ved->ent->name);
+ dev_err(ved->dev, "subdev_call error %s\n",
+ ved->ent->name);
vimc_streamer_pipeline_terminate(stream);
return ret;
}
}
entity = vimc_get_source_entity(ved->ent);
- /* Check if the end of the pipeline was reached*/
- if (!entity)
+ /* Check if the end of the pipeline was reached */
+ if (!entity) {
+ /* the first entity of the pipe should be source only */
+ if (!vimc_is_source(ved->ent)) {
+ dev_err(ved->dev,
+ "first entity in the pipe '%s' is not a source\n",
+ ved->ent->name);
+ vimc_streamer_pipeline_terminate(stream);
+ return -EPIPE;
+ }
return 0;
+ }
/* Get the next device in the pipeline */
if (is_media_entity_v4l2_subdev(entity)) {
@@ -217,4 +225,3 @@ int vimc_streamer_s_stream(struct vimc_stream *stream,
return 0;
}
-EXPORT_SYMBOL_GPL(vimc_streamer_s_stream);
diff --git a/drivers/media/platform/vivid/Makefile b/drivers/media/platform/vivid/Makefile
index 2f5762e3309a..e8a50c506dc9 100644
--- a/drivers/media/platform/vivid/Makefile
+++ b/drivers/media/platform/vivid/Makefile
@@ -3,7 +3,7 @@ vivid-objs := vivid-core.o vivid-ctrls.o vivid-vid-common.o vivid-vbi-gen.o \
vivid-vid-cap.o vivid-vid-out.o vivid-kthread-cap.o vivid-kthread-out.o \
vivid-radio-rx.o vivid-radio-tx.o vivid-radio-common.o \
vivid-rds-gen.o vivid-sdr-cap.o vivid-vbi-cap.o vivid-vbi-out.o \
- vivid-osd.o
+ vivid-osd.o vivid-meta-cap.o vivid-meta-out.o
ifeq ($(CONFIG_VIDEO_VIVID_CEC),y)
vivid-objs += vivid-cec.o
endif
diff --git a/drivers/media/platform/vivid/vivid-cec.c b/drivers/media/platform/vivid/vivid-cec.c
index 4d822dbed972..4d2413e87730 100644
--- a/drivers/media/platform/vivid/vivid-cec.c
+++ b/drivers/media/platform/vivid/vivid-cec.c
@@ -276,12 +276,11 @@ struct cec_adapter *vivid_cec_alloc_adap(struct vivid_dev *dev,
unsigned int idx,
bool is_source)
{
- char name[sizeof(dev->vid_out_dev.name) + 2];
u32 caps = CEC_CAP_DEFAULTS | CEC_CAP_MONITOR_ALL | CEC_CAP_MONITOR_PIN;
+ char name[32];
- snprintf(name, sizeof(name), "%s%d",
- is_source ? dev->vid_out_dev.name : dev->vid_cap_dev.name,
- idx);
+ snprintf(name, sizeof(name), "vivid-%03d-vid-%s%d",
+ dev->inst, is_source ? "out" : "cap", idx);
return cec_allocate_adapter(&vivid_cec_adap_ops, dev,
name, caps, 1);
}
diff --git a/drivers/media/platform/vivid/vivid-core.c b/drivers/media/platform/vivid/vivid-core.c
index 53315c8dd2bb..c184f9b0be69 100644
--- a/drivers/media/platform/vivid/vivid-core.c
+++ b/drivers/media/platform/vivid/vivid-core.c
@@ -37,6 +37,8 @@
#include "vivid-osd.h"
#include "vivid-cec.h"
#include "vivid-ctrls.h"
+#include "vivid-meta-cap.h"
+#include "vivid-meta-out.h"
#define VIVID_MODULE_NAME "vivid"
@@ -79,6 +81,14 @@ static int radio_tx_nr[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = -1 };
module_param_array(radio_tx_nr, int, NULL, 0444);
MODULE_PARM_DESC(radio_tx_nr, " radioX start number, -1 is autodetect");
+static int meta_cap_nr[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = -1 };
+module_param_array(meta_cap_nr, int, NULL, 0444);
+MODULE_PARM_DESC(meta_cap_nr, " videoX start number, -1 is autodetect");
+
+static int meta_out_nr[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = -1 };
+module_param_array(meta_out_nr, int, NULL, 0444);
+MODULE_PARM_DESC(meta_out_nr, " videoX start number, -1 is autodetect");
+
static int ccs_cap_mode[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = -1 };
module_param_array(ccs_cap_mode, int, NULL, 0444);
MODULE_PARM_DESC(ccs_cap_mode, " capture crop/compose/scale mode:\n"
@@ -95,10 +105,15 @@ static unsigned multiplanar[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = 1
module_param_array(multiplanar, uint, NULL, 0444);
MODULE_PARM_DESC(multiplanar, " 1 (default) creates a single planar device, 2 creates a multiplanar device.");
-/* Default: video + vbi-cap (raw and sliced) + radio rx + radio tx + sdr + vbi-out + vid-out */
-static unsigned node_types[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = 0x1d3d };
+/*
+ * Default: video + vbi-cap (raw and sliced) + radio rx + radio tx + sdr +
+ * vbi-out + vid-out + meta-cap
+ */
+static unsigned int node_types[VIVID_MAX_DEVS] = {
+ [0 ... (VIVID_MAX_DEVS - 1)] = 0x61d3d
+};
module_param_array(node_types, uint, NULL, 0444);
-MODULE_PARM_DESC(node_types, " node types, default is 0x1d3d. Bitmask with the following meaning:\n"
+MODULE_PARM_DESC(node_types, " node types, default is 0x61d3d. Bitmask with the following meaning:\n"
"\t\t bit 0: Video Capture node\n"
"\t\t bit 2-3: VBI Capture node: 0 = none, 1 = raw vbi, 2 = sliced vbi, 3 = both\n"
"\t\t bit 4: Radio Receiver node\n"
@@ -106,7 +121,9 @@ MODULE_PARM_DESC(node_types, " node types, default is 0x1d3d. Bitmask with the f
"\t\t bit 8: Video Output node\n"
"\t\t bit 10-11: VBI Output node: 0 = none, 1 = raw vbi, 2 = sliced vbi, 3 = both\n"
"\t\t bit 12: Radio Transmitter node\n"
- "\t\t bit 16: Framebuffer for testing overlays");
+ "\t\t bit 16: Framebuffer for testing overlays\n"
+ "\t\t bit 17: Metadata Capture node\n"
+ "\t\t bit 18: Metadata Output node\n");
/* Default: 4 inputs */
static unsigned num_inputs[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = 4 };
@@ -205,7 +222,8 @@ static int vidioc_querycap(struct file *file, void *priv,
cap->capabilities = dev->vid_cap_caps | dev->vid_out_caps |
dev->vbi_cap_caps | dev->vbi_out_caps |
dev->radio_rx_caps | dev->radio_tx_caps |
- dev->sdr_cap_caps | V4L2_CAP_DEVICE_CAPS;
+ dev->sdr_cap_caps | dev->meta_cap_caps |
+ dev->meta_out_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
@@ -433,7 +451,9 @@ static bool vivid_is_last_user(struct vivid_dev *dev)
vivid_is_in_use(&dev->vbi_out_dev) +
vivid_is_in_use(&dev->sdr_cap_dev) +
vivid_is_in_use(&dev->radio_rx_dev) +
- vivid_is_in_use(&dev->radio_tx_dev);
+ vivid_is_in_use(&dev->radio_tx_dev) +
+ vivid_is_in_use(&dev->meta_cap_dev) +
+ vivid_is_in_use(&dev->meta_out_dev);
return uses == 1;
}
@@ -459,6 +479,8 @@ static int vivid_fop_release(struct file *file)
set_bit(V4L2_FL_REGISTERED, &dev->sdr_cap_dev.flags);
set_bit(V4L2_FL_REGISTERED, &dev->radio_rx_dev.flags);
set_bit(V4L2_FL_REGISTERED, &dev->radio_tx_dev.flags);
+ set_bit(V4L2_FL_REGISTERED, &dev->meta_cap_dev.flags);
+ set_bit(V4L2_FL_REGISTERED, &dev->meta_out_dev.flags);
}
mutex_unlock(&dev->mutex);
if (file->private_data == dev->overlay_cap_owner)
@@ -604,6 +626,16 @@ static const struct v4l2_ioctl_ops vivid_ioctl_ops = {
.vidioc_log_status = vidioc_log_status,
.vidioc_subscribe_event = vidioc_subscribe_event,
.vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+
+ .vidioc_enum_fmt_meta_cap = vidioc_enum_fmt_meta_cap,
+ .vidioc_g_fmt_meta_cap = vidioc_g_fmt_meta_cap,
+ .vidioc_s_fmt_meta_cap = vidioc_g_fmt_meta_cap,
+ .vidioc_try_fmt_meta_cap = vidioc_g_fmt_meta_cap,
+
+ .vidioc_enum_fmt_meta_out = vidioc_enum_fmt_meta_out,
+ .vidioc_g_fmt_meta_out = vidioc_g_fmt_meta_out,
+ .vidioc_s_fmt_meta_out = vidioc_g_fmt_meta_out,
+ .vidioc_try_fmt_meta_out = vidioc_g_fmt_meta_out,
};
/* -----------------------------------------------------------------
@@ -616,6 +648,9 @@ static void vivid_dev_release(struct v4l2_device *v4l2_dev)
vivid_free_controls(dev);
v4l2_device_unregister(&dev->v4l2_dev);
+#ifdef CONFIG_MEDIA_CONTROLLER
+ media_device_cleanup(&dev->mdev);
+#endif
vfree(dev->scaled_line);
vfree(dev->blended_line);
vfree(dev->edid);
@@ -645,14 +680,44 @@ static const struct media_device_ops vivid_media_ops = {
};
#endif
+static int vivid_create_queue(struct vivid_dev *dev,
+ struct vb2_queue *q,
+ u32 buf_type,
+ unsigned int min_buffers_needed,
+ const struct vb2_ops *ops)
+{
+ if (buf_type == V4L2_BUF_TYPE_VIDEO_CAPTURE && dev->multiplanar)
+ buf_type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ else if (buf_type == V4L2_BUF_TYPE_VIDEO_OUTPUT && dev->multiplanar)
+ buf_type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ else if (buf_type == V4L2_BUF_TYPE_VBI_CAPTURE && !dev->has_raw_vbi_cap)
+ buf_type = V4L2_BUF_TYPE_SLICED_VBI_CAPTURE;
+ else if (buf_type == V4L2_BUF_TYPE_VBI_OUTPUT && !dev->has_raw_vbi_out)
+ buf_type = V4L2_BUF_TYPE_SLICED_VBI_OUTPUT;
+
+ q->type = buf_type;
+ q->io_modes = VB2_MMAP | VB2_DMABUF;
+ q->io_modes |= V4L2_TYPE_IS_OUTPUT(buf_type) ? VB2_WRITE : VB2_READ;
+ if (allocators[dev->inst] != 1)
+ q->io_modes |= VB2_USERPTR;
+ q->drv_priv = dev;
+ q->buf_struct_size = sizeof(struct vivid_buffer);
+ q->ops = ops;
+ q->mem_ops = allocators[dev->inst] == 1 ? &vb2_dma_contig_memops :
+ &vb2_vmalloc_memops;
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->min_buffers_needed = min_buffers_needed;
+ q->lock = &dev->mutex;
+ q->dev = dev->v4l2_dev.dev;
+ q->supports_requests = true;
+
+ return vb2_queue_init(q);
+}
+
static int vivid_create_instance(struct platform_device *pdev, int inst)
{
static const struct v4l2_dv_timings def_dv_timings =
V4L2_DV_BT_CEA_1280X720P60;
- static const struct vb2_mem_ops * const vivid_mem_ops[2] = {
- &vb2_vmalloc_memops,
- &vb2_dma_contig_memops,
- };
unsigned in_type_counter[4] = { 0, 0, 0, 0 };
unsigned out_type_counter[4] = { 0, 0, 0, 0 };
int ccs_cap = ccs_cap_mode[inst];
@@ -661,9 +726,7 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
bool has_modulator;
struct vivid_dev *dev;
struct video_device *vfd;
- struct vb2_queue *q;
unsigned node_type = node_types[inst];
- unsigned int allocator = allocators[inst];
v4l2_std_id tvnorms_cap = 0, tvnorms_out = 0;
int ret;
int i;
@@ -758,6 +821,25 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
dev->has_vbi_cap = dev->has_raw_vbi_cap | dev->has_sliced_vbi_cap;
}
+ /* do we create a meta capture device */
+ dev->has_meta_cap = node_type & 0x20000;
+
+ /* sanity checks */
+ if ((in_type_counter[WEBCAM] || in_type_counter[HDMI]) &&
+ !dev->has_vid_cap && !dev->has_meta_cap) {
+ v4l2_warn(&dev->v4l2_dev,
+ "Webcam or HDMI input without video or metadata nodes\n");
+ kfree(dev);
+ return -EINVAL;
+ }
+ if ((in_type_counter[TV] || in_type_counter[SVID]) &&
+ !dev->has_vid_cap && !dev->has_vbi_cap && !dev->has_meta_cap) {
+ v4l2_warn(&dev->v4l2_dev,
+ "TV or S-Video input without video, VBI or metadata nodes\n");
+ kfree(dev);
+ return -EINVAL;
+ }
+
/* do we create a video output device? */
dev->has_vid_out = node_type & 0x0100;
@@ -768,6 +850,24 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
dev->has_vbi_out = dev->has_raw_vbi_out | dev->has_sliced_vbi_out;
}
+ /* do we create a metadata output device */
+ dev->has_meta_out = node_type & 0x40000;
+
+ /* sanity checks */
+ if (out_type_counter[SVID] &&
+ !dev->has_vid_out && !dev->has_vbi_out && !dev->has_meta_out) {
+ v4l2_warn(&dev->v4l2_dev,
+ "S-Video output without video, VBI or metadata nodes\n");
+ kfree(dev);
+ return -EINVAL;
+ }
+ if (out_type_counter[HDMI] && !dev->has_vid_out && !dev->has_meta_out) {
+ v4l2_warn(&dev->v4l2_dev,
+ "HDMI output without video or metadata nodes\n");
+ kfree(dev);
+ return -EINVAL;
+ }
+
/* do we create a radio receiver device? */
dev->has_radio_rx = node_type & 0x0010;
@@ -777,6 +877,9 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
/* do we create a software defined radio capture device? */
dev->has_sdr_cap = node_type & 0x0020;
+ /* do we have a TV tuner? */
+ dev->has_tv_tuner = in_type_counter[TV];
+
/* do we have a tuner? */
has_tuner = ((dev->has_vid_cap || dev->has_vbi_cap) && in_type_counter[TV]) ||
dev->has_radio_rx || dev->has_sdr_cap;
@@ -828,7 +931,7 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
dev->vid_cap_caps |= V4L2_CAP_STREAMING | V4L2_CAP_READWRITE;
if (dev->has_audio_inputs)
dev->vid_cap_caps |= V4L2_CAP_AUDIO;
- if (in_type_counter[TV])
+ if (dev->has_tv_tuner)
dev->vid_cap_caps |= V4L2_CAP_TUNER;
}
if (dev->has_vid_out) {
@@ -849,7 +952,7 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
dev->vbi_cap_caps |= V4L2_CAP_STREAMING | V4L2_CAP_READWRITE;
if (dev->has_audio_inputs)
dev->vbi_cap_caps |= V4L2_CAP_AUDIO;
- if (in_type_counter[TV])
+ if (dev->has_tv_tuner)
dev->vbi_cap_caps |= V4L2_CAP_TUNER;
}
if (dev->has_vbi_out) {
@@ -875,6 +978,23 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
dev->radio_tx_caps = V4L2_CAP_RDS_OUTPUT | V4L2_CAP_MODULATOR |
V4L2_CAP_READWRITE;
+ /* set up the capabilities of meta capture device */
+ if (dev->has_meta_cap) {
+ dev->meta_cap_caps = V4L2_CAP_META_CAPTURE |
+ V4L2_CAP_STREAMING | V4L2_CAP_READWRITE;
+ if (dev->has_audio_inputs)
+ dev->meta_cap_caps |= V4L2_CAP_AUDIO;
+ if (dev->has_tv_tuner)
+ dev->meta_cap_caps |= V4L2_CAP_TUNER;
+ }
+ /* set up the capabilities of meta output device */
+ if (dev->has_meta_out) {
+ dev->meta_out_caps = V4L2_CAP_META_OUTPUT |
+ V4L2_CAP_STREAMING | V4L2_CAP_READWRITE;
+ if (dev->has_audio_outputs)
+ dev->meta_out_caps |= V4L2_CAP_AUDIO;
+ }
+
ret = -ENOMEM;
/* initialize the test pattern generator */
tpg_init(&dev->tpg, 640, 360);
@@ -934,6 +1054,9 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
v4l2_disable_ioctl(&dev->vbi_cap_dev, VIDIOC_S_AUDIO);
v4l2_disable_ioctl(&dev->vbi_cap_dev, VIDIOC_G_AUDIO);
v4l2_disable_ioctl(&dev->vbi_cap_dev, VIDIOC_ENUMAUDIO);
+ v4l2_disable_ioctl(&dev->meta_cap_dev, VIDIOC_S_AUDIO);
+ v4l2_disable_ioctl(&dev->meta_cap_dev, VIDIOC_G_AUDIO);
+ v4l2_disable_ioctl(&dev->meta_cap_dev, VIDIOC_ENUMAUDIO);
}
if (!dev->has_audio_outputs) {
v4l2_disable_ioctl(&dev->vid_out_dev, VIDIOC_S_AUDOUT);
@@ -942,6 +1065,9 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
v4l2_disable_ioctl(&dev->vbi_out_dev, VIDIOC_S_AUDOUT);
v4l2_disable_ioctl(&dev->vbi_out_dev, VIDIOC_G_AUDOUT);
v4l2_disable_ioctl(&dev->vbi_out_dev, VIDIOC_ENUMAUDOUT);
+ v4l2_disable_ioctl(&dev->meta_out_dev, VIDIOC_S_AUDOUT);
+ v4l2_disable_ioctl(&dev->meta_out_dev, VIDIOC_G_AUDOUT);
+ v4l2_disable_ioctl(&dev->meta_out_dev, VIDIOC_ENUMAUDOUT);
}
if (!in_type_counter[TV] && !in_type_counter[SVID]) {
v4l2_disable_ioctl(&dev->vid_cap_dev, VIDIOC_S_STD);
@@ -959,12 +1085,16 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
v4l2_disable_ioctl(&dev->vid_cap_dev, VIDIOC_G_FREQUENCY);
v4l2_disable_ioctl(&dev->vbi_cap_dev, VIDIOC_S_FREQUENCY);
v4l2_disable_ioctl(&dev->vbi_cap_dev, VIDIOC_G_FREQUENCY);
+ v4l2_disable_ioctl(&dev->meta_cap_dev, VIDIOC_S_FREQUENCY);
+ v4l2_disable_ioctl(&dev->meta_cap_dev, VIDIOC_G_FREQUENCY);
}
if (!has_tuner) {
v4l2_disable_ioctl(&dev->vid_cap_dev, VIDIOC_S_TUNER);
v4l2_disable_ioctl(&dev->vid_cap_dev, VIDIOC_G_TUNER);
v4l2_disable_ioctl(&dev->vbi_cap_dev, VIDIOC_S_TUNER);
v4l2_disable_ioctl(&dev->vbi_cap_dev, VIDIOC_G_TUNER);
+ v4l2_disable_ioctl(&dev->meta_cap_dev, VIDIOC_S_TUNER);
+ v4l2_disable_ioctl(&dev->meta_cap_dev, VIDIOC_G_TUNER);
}
if (in_type_counter[HDMI] == 0) {
v4l2_disable_ioctl(&dev->vid_cap_dev, VIDIOC_S_EDID);
@@ -990,12 +1120,15 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
v4l2_disable_ioctl(&dev->vid_cap_dev, VIDIOC_S_HW_FREQ_SEEK);
v4l2_disable_ioctl(&dev->vbi_cap_dev, VIDIOC_S_HW_FREQ_SEEK);
v4l2_disable_ioctl(&dev->sdr_cap_dev, VIDIOC_S_HW_FREQ_SEEK);
+ v4l2_disable_ioctl(&dev->meta_cap_dev, VIDIOC_S_HW_FREQ_SEEK);
v4l2_disable_ioctl(&dev->vid_out_dev, VIDIOC_S_FREQUENCY);
v4l2_disable_ioctl(&dev->vid_out_dev, VIDIOC_G_FREQUENCY);
v4l2_disable_ioctl(&dev->vid_out_dev, VIDIOC_ENUM_FRAMESIZES);
v4l2_disable_ioctl(&dev->vid_out_dev, VIDIOC_ENUM_FRAMEINTERVALS);
v4l2_disable_ioctl(&dev->vbi_out_dev, VIDIOC_S_FREQUENCY);
v4l2_disable_ioctl(&dev->vbi_out_dev, VIDIOC_G_FREQUENCY);
+ v4l2_disable_ioctl(&dev->meta_out_dev, VIDIOC_S_FREQUENCY);
+ v4l2_disable_ioctl(&dev->meta_out_dev, VIDIOC_G_FREQUENCY);
/* configure internal data */
dev->fmt_cap = &vivid_formats[0];
@@ -1078,6 +1211,8 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
INIT_LIST_HEAD(&dev->vbi_cap_active);
INIT_LIST_HEAD(&dev->vbi_out_active);
INIT_LIST_HEAD(&dev->sdr_cap_active);
+ INIT_LIST_HEAD(&dev->meta_cap_active);
+ INIT_LIST_HEAD(&dev->meta_out_active);
INIT_LIST_HEAD(&dev->cec_work_list);
spin_lock_init(&dev->cec_slock);
@@ -1092,126 +1227,69 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
goto unreg_dev;
}
- if (allocator == 1)
+ if (allocators[inst] == 1)
dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
- else if (allocator >= ARRAY_SIZE(vivid_mem_ops))
- allocator = 0;
/* start creating the vb2 queues */
if (dev->has_vid_cap) {
- snprintf(dev->vid_cap_dev.name, sizeof(dev->vid_cap_dev.name),
- "vivid-%03d-vid-cap", inst);
/* initialize vid_cap queue */
- q = &dev->vb_vid_cap_q;
- q->type = dev->multiplanar ? V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE :
- V4L2_BUF_TYPE_VIDEO_CAPTURE;
- q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_READ;
- if (!allocator)
- q->io_modes |= VB2_USERPTR;
- q->drv_priv = dev;
- q->buf_struct_size = sizeof(struct vivid_buffer);
- q->ops = &vivid_vid_cap_qops;
- q->mem_ops = vivid_mem_ops[allocator];
- q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
- q->min_buffers_needed = 2;
- q->lock = &dev->mutex;
- q->dev = dev->v4l2_dev.dev;
- q->supports_requests = true;
-
- ret = vb2_queue_init(q);
+ ret = vivid_create_queue(dev, &dev->vb_vid_cap_q,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE, 2,
+ &vivid_vid_cap_qops);
if (ret)
goto unreg_dev;
}
if (dev->has_vid_out) {
- snprintf(dev->vid_out_dev.name, sizeof(dev->vid_out_dev.name),
- "vivid-%03d-vid-out", inst);
/* initialize vid_out queue */
- q = &dev->vb_vid_out_q;
- q->type = dev->multiplanar ? V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE :
- V4L2_BUF_TYPE_VIDEO_OUTPUT;
- q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_WRITE;
- if (!allocator)
- q->io_modes |= VB2_USERPTR;
- q->drv_priv = dev;
- q->buf_struct_size = sizeof(struct vivid_buffer);
- q->ops = &vivid_vid_out_qops;
- q->mem_ops = vivid_mem_ops[allocator];
- q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
- q->min_buffers_needed = 2;
- q->lock = &dev->mutex;
- q->dev = dev->v4l2_dev.dev;
- q->supports_requests = true;
-
- ret = vb2_queue_init(q);
+ ret = vivid_create_queue(dev, &dev->vb_vid_out_q,
+ V4L2_BUF_TYPE_VIDEO_OUTPUT, 2,
+ &vivid_vid_out_qops);
if (ret)
goto unreg_dev;
}
if (dev->has_vbi_cap) {
/* initialize vbi_cap queue */
- q = &dev->vb_vbi_cap_q;
- q->type = dev->has_raw_vbi_cap ? V4L2_BUF_TYPE_VBI_CAPTURE :
- V4L2_BUF_TYPE_SLICED_VBI_CAPTURE;
- q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_READ;
- if (!allocator)
- q->io_modes |= VB2_USERPTR;
- q->drv_priv = dev;
- q->buf_struct_size = sizeof(struct vivid_buffer);
- q->ops = &vivid_vbi_cap_qops;
- q->mem_ops = vivid_mem_ops[allocator];
- q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
- q->min_buffers_needed = 2;
- q->lock = &dev->mutex;
- q->dev = dev->v4l2_dev.dev;
- q->supports_requests = true;
-
- ret = vb2_queue_init(q);
+ ret = vivid_create_queue(dev, &dev->vb_vbi_cap_q,
+ V4L2_BUF_TYPE_VBI_CAPTURE, 2,
+ &vivid_vbi_cap_qops);
if (ret)
goto unreg_dev;
}
if (dev->has_vbi_out) {
/* initialize vbi_out queue */
- q = &dev->vb_vbi_out_q;
- q->type = dev->has_raw_vbi_out ? V4L2_BUF_TYPE_VBI_OUTPUT :
- V4L2_BUF_TYPE_SLICED_VBI_OUTPUT;
- q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_WRITE;
- if (!allocator)
- q->io_modes |= VB2_USERPTR;
- q->drv_priv = dev;
- q->buf_struct_size = sizeof(struct vivid_buffer);
- q->ops = &vivid_vbi_out_qops;
- q->mem_ops = vivid_mem_ops[allocator];
- q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
- q->min_buffers_needed = 2;
- q->lock = &dev->mutex;
- q->dev = dev->v4l2_dev.dev;
- q->supports_requests = true;
-
- ret = vb2_queue_init(q);
+ ret = vivid_create_queue(dev, &dev->vb_vbi_out_q,
+ V4L2_BUF_TYPE_VBI_OUTPUT, 2,
+ &vivid_vbi_out_qops);
if (ret)
goto unreg_dev;
}
if (dev->has_sdr_cap) {
/* initialize sdr_cap queue */
- q = &dev->vb_sdr_cap_q;
- q->type = V4L2_BUF_TYPE_SDR_CAPTURE;
- q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_READ;
- if (!allocator)
- q->io_modes |= VB2_USERPTR;
- q->drv_priv = dev;
- q->buf_struct_size = sizeof(struct vivid_buffer);
- q->ops = &vivid_sdr_cap_qops;
- q->mem_ops = vivid_mem_ops[allocator];
- q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
- q->min_buffers_needed = 8;
- q->lock = &dev->mutex;
- q->dev = dev->v4l2_dev.dev;
- q->supports_requests = true;
-
- ret = vb2_queue_init(q);
+ ret = vivid_create_queue(dev, &dev->vb_sdr_cap_q,
+ V4L2_BUF_TYPE_SDR_CAPTURE, 8,
+ &vivid_sdr_cap_qops);
+ if (ret)
+ goto unreg_dev;
+ }
+
+ if (dev->has_meta_cap) {
+ /* initialize meta_cap queue */
+ ret = vivid_create_queue(dev, &dev->vb_meta_cap_q,
+ V4L2_BUF_TYPE_META_CAPTURE, 2,
+ &vivid_meta_cap_qops);
+ if (ret)
+ goto unreg_dev;
+ }
+
+ if (dev->has_meta_out) {
+ /* initialize meta_out queue */
+ ret = vivid_create_queue(dev, &dev->vb_meta_out_q,
+ V4L2_BUF_TYPE_META_OUTPUT, 1,
+ &vivid_meta_out_qops);
if (ret)
goto unreg_dev;
}
@@ -1222,7 +1300,7 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
if (ret)
goto unreg_dev;
v4l2_info(&dev->v4l2_dev, "Framebuffer device registered as fb%d\n",
- dev->fb_info.node);
+ dev->fb_info.node);
}
#ifdef CONFIG_VIDEO_VIVID_CEC
@@ -1265,10 +1343,14 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
v4l2_ctrl_handler_setup(&dev->ctrl_hdl_radio_rx);
v4l2_ctrl_handler_setup(&dev->ctrl_hdl_radio_tx);
v4l2_ctrl_handler_setup(&dev->ctrl_hdl_sdr_cap);
+ v4l2_ctrl_handler_setup(&dev->ctrl_hdl_meta_cap);
+ v4l2_ctrl_handler_setup(&dev->ctrl_hdl_meta_out);
/* finally start creating the device nodes */
if (dev->has_vid_cap) {
vfd = &dev->vid_cap_dev;
+ snprintf(vfd->name, sizeof(vfd->name),
+ "vivid-%03d-vid-cap", inst);
vfd->fops = &vivid_fops;
vfd->ioctl_ops = &vivid_ioctl_ops;
vfd->device_caps = dev->vid_cap_caps;
@@ -1314,6 +1396,8 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
if (dev->has_vid_out) {
vfd = &dev->vid_out_dev;
+ snprintf(vfd->name, sizeof(vfd->name),
+ "vivid-%03d-vid-out", inst);
vfd->vfl_dir = VFL_DIR_TX;
vfd->fops = &vivid_fops;
vfd->ioctl_ops = &vivid_ioctl_ops;
@@ -1492,6 +1576,65 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
video_device_node_name(vfd));
}
+ if (dev->has_meta_cap) {
+ vfd = &dev->meta_cap_dev;
+ snprintf(vfd->name, sizeof(vfd->name),
+ "vivid-%03d-meta-cap", inst);
+ vfd->fops = &vivid_fops;
+ vfd->ioctl_ops = &vivid_ioctl_ops;
+ vfd->device_caps = dev->meta_cap_caps;
+ vfd->release = video_device_release_empty;
+ vfd->v4l2_dev = &dev->v4l2_dev;
+ vfd->queue = &dev->vb_meta_cap_q;
+ vfd->lock = &dev->mutex;
+ vfd->tvnorms = tvnorms_cap;
+ video_set_drvdata(vfd, dev);
+#ifdef CONFIG_MEDIA_CONTROLLER
+ dev->meta_cap_pad.flags = MEDIA_PAD_FL_SINK;
+ ret = media_entity_pads_init(&vfd->entity, 1,
+ &dev->meta_cap_pad);
+ if (ret)
+ goto unreg_dev;
+#endif
+ ret = video_register_device(vfd, VFL_TYPE_GRABBER,
+ meta_cap_nr[inst]);
+ if (ret < 0)
+ goto unreg_dev;
+ v4l2_info(&dev->v4l2_dev,
+ "V4L2 metadata capture device registered as %s\n",
+ video_device_node_name(vfd));
+ }
+
+ if (dev->has_meta_out) {
+ vfd = &dev->meta_out_dev;
+ snprintf(vfd->name, sizeof(vfd->name),
+ "vivid-%03d-meta-out", inst);
+ vfd->vfl_dir = VFL_DIR_TX;
+ vfd->fops = &vivid_fops;
+ vfd->ioctl_ops = &vivid_ioctl_ops;
+ vfd->device_caps = dev->meta_out_caps;
+ vfd->release = video_device_release_empty;
+ vfd->v4l2_dev = &dev->v4l2_dev;
+ vfd->queue = &dev->vb_meta_out_q;
+ vfd->lock = &dev->mutex;
+ vfd->tvnorms = tvnorms_out;
+ video_set_drvdata(vfd, dev);
+#ifdef CONFIG_MEDIA_CONTROLLER
+ dev->meta_out_pad.flags = MEDIA_PAD_FL_SOURCE;
+ ret = media_entity_pads_init(&vfd->entity, 1,
+ &dev->meta_out_pad);
+ if (ret)
+ goto unreg_dev;
+#endif
+ ret = video_register_device(vfd, VFL_TYPE_GRABBER,
+ meta_out_nr[inst]);
+ if (ret < 0)
+ goto unreg_dev;
+ v4l2_info(&dev->v4l2_dev,
+ "V4L2 metadata output device registered as %s\n",
+ video_device_node_name(vfd));
+ }
+
#ifdef CONFIG_MEDIA_CONTROLLER
/* Register the media device */
ret = media_device_register(&dev->mdev);
@@ -1508,6 +1651,8 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
return 0;
unreg_dev:
+ video_unregister_device(&dev->meta_out_dev);
+ video_unregister_device(&dev->meta_cap_dev);
video_unregister_device(&dev->radio_tx_dev);
video_unregister_device(&dev->radio_rx_dev);
video_unregister_device(&dev->sdr_cap_dev);
@@ -1580,7 +1725,6 @@ static int vivid_remove(struct platform_device *pdev)
#ifdef CONFIG_MEDIA_CONTROLLER
media_device_unregister(&dev->mdev);
- media_device_cleanup(&dev->mdev);
#endif
if (dev->has_vid_cap) {
@@ -1624,6 +1768,16 @@ static int vivid_remove(struct platform_device *pdev)
unregister_framebuffer(&dev->fb_info);
vivid_fb_release_buffers(dev);
}
+ if (dev->has_meta_cap) {
+ v4l2_info(&dev->v4l2_dev, "unregistering %s\n",
+ video_device_node_name(&dev->meta_cap_dev));
+ video_unregister_device(&dev->meta_cap_dev);
+ }
+ if (dev->has_meta_out) {
+ v4l2_info(&dev->v4l2_dev, "unregistering %s\n",
+ video_device_node_name(&dev->meta_out_dev));
+ video_unregister_device(&dev->meta_out_dev);
+ }
cec_unregister_adapter(dev->cec_rx_adap);
for (j = 0; j < MAX_OUTPUTS; j++)
cec_unregister_adapter(dev->cec_tx_adap[j]);
diff --git a/drivers/media/platform/vivid/vivid-core.h b/drivers/media/platform/vivid/vivid-core.h
index 7ebb14673c75..59192b67231c 100644
--- a/drivers/media/platform/vivid/vivid-core.h
+++ b/drivers/media/platform/vivid/vivid-core.h
@@ -131,6 +131,8 @@ struct vivid_dev {
struct media_pad vbi_cap_pad;
struct media_pad vbi_out_pad;
struct media_pad sdr_cap_pad;
+ struct media_pad meta_cap_pad;
+ struct media_pad meta_out_pad;
#endif
struct v4l2_ctrl_handler ctrl_hdl_user_gen;
struct v4l2_ctrl_handler ctrl_hdl_user_vid;
@@ -153,6 +155,11 @@ struct vivid_dev {
struct v4l2_ctrl_handler ctrl_hdl_radio_tx;
struct video_device sdr_cap_dev;
struct v4l2_ctrl_handler ctrl_hdl_sdr_cap;
+ struct video_device meta_cap_dev;
+ struct v4l2_ctrl_handler ctrl_hdl_meta_cap;
+ struct video_device meta_out_dev;
+ struct v4l2_ctrl_handler ctrl_hdl_meta_out;
+
spinlock_t slock;
struct mutex mutex;
@@ -164,6 +171,8 @@ struct vivid_dev {
u32 sdr_cap_caps;
u32 radio_rx_caps;
u32 radio_tx_caps;
+ u32 meta_cap_caps;
+ u32 meta_out_caps;
/* supported features */
bool multiplanar;
@@ -189,6 +198,9 @@ struct vivid_dev {
bool has_radio_tx;
bool has_sdr_cap;
bool has_fb;
+ bool has_meta_cap;
+ bool has_meta_out;
+ bool has_tv_tuner;
bool can_loop_video;
@@ -390,6 +402,8 @@ struct vivid_dev {
struct list_head vid_cap_active;
struct vb2_queue vb_vbi_cap_q;
struct list_head vbi_cap_active;
+ struct vb2_queue vb_meta_cap_q;
+ struct list_head meta_cap_active;
/* thread for generating video capture stream */
struct task_struct *kthread_vid_cap;
@@ -407,6 +421,9 @@ struct vivid_dev {
u32 vbi_cap_seq_count;
bool vbi_cap_streaming;
bool stream_sliced_vbi_cap;
+ u32 meta_cap_seq_start;
+ u32 meta_cap_seq_count;
+ bool meta_cap_streaming;
/* video output */
const struct vivid_fmt *fmt_out;
@@ -421,6 +438,8 @@ struct vivid_dev {
struct list_head vid_out_active;
struct vb2_queue vb_vbi_out_q;
struct list_head vbi_out_active;
+ struct vb2_queue vb_meta_out_q;
+ struct list_head meta_out_active;
/* video loop precalculated rectangles */
@@ -461,6 +480,9 @@ struct vivid_dev {
u32 vbi_out_seq_count;
bool vbi_out_streaming;
bool stream_sliced_vbi_out;
+ u32 meta_out_seq_start;
+ u32 meta_out_seq_count;
+ bool meta_out_streaming;
/* SDR capture */
struct vb2_queue vb_sdr_cap_q;
@@ -527,6 +549,9 @@ struct vivid_dev {
/* CEC OSD String */
char osd[14];
unsigned long osd_jiffies;
+
+ bool meta_pts;
+ bool meta_scr;
};
static inline bool vivid_is_webcam(const struct vivid_dev *dev)
diff --git a/drivers/media/platform/vivid/vivid-ctrls.c b/drivers/media/platform/vivid/vivid-ctrls.c
index cb19a9a73092..68e8124c7973 100644
--- a/drivers/media/platform/vivid/vivid-ctrls.c
+++ b/drivers/media/platform/vivid/vivid-ctrls.c
@@ -32,6 +32,7 @@
#define VIVID_CID_U32_ARRAY (VIVID_CID_CUSTOM_BASE + 8)
#define VIVID_CID_U16_MATRIX (VIVID_CID_CUSTOM_BASE + 9)
#define VIVID_CID_U8_4D_ARRAY (VIVID_CID_CUSTOM_BASE + 10)
+#define VIVID_CID_AREA (VIVID_CID_CUSTOM_BASE + 11)
#define VIVID_CID_VIVID_BASE (0x00f00000 | 0xf000)
#define VIVID_CID_VIVID_CLASS (0x00f00000 | 1)
@@ -94,6 +95,9 @@
#define VIVID_CID_SDR_CAP_FM_DEVIATION (VIVID_CID_VIVID_BASE + 110)
+#define VIVID_CID_META_CAP_GENERATE_PTS (VIVID_CID_VIVID_BASE + 111)
+#define VIVID_CID_META_CAP_GENERATE_SCR (VIVID_CID_VIVID_BASE + 112)
+
/* General User Controls */
static int vivid_user_gen_s_ctrl(struct v4l2_ctrl *ctrl)
@@ -110,6 +114,7 @@ static int vivid_user_gen_s_ctrl(struct v4l2_ctrl *ctrl)
clear_bit(V4L2_FL_REGISTERED, &dev->sdr_cap_dev.flags);
clear_bit(V4L2_FL_REGISTERED, &dev->radio_rx_dev.flags);
clear_bit(V4L2_FL_REGISTERED, &dev->radio_tx_dev.flags);
+ clear_bit(V4L2_FL_REGISTERED, &dev->meta_cap_dev.flags);
break;
case VIVID_CID_BUTTON:
dev->button_pressed = 30;
@@ -262,6 +267,18 @@ static const struct v4l2_ctrl_config vivid_ctrl_disconnect = {
.type = V4L2_CTRL_TYPE_BUTTON,
};
+static const struct v4l2_area area = {
+ .width = 1000,
+ .height = 2000,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_area = {
+ .ops = &vivid_user_gen_ctrl_ops,
+ .id = VIVID_CID_AREA,
+ .name = "Area",
+ .type = V4L2_CTRL_TYPE_AREA,
+ .p_def.p_const = &area,
+};
/* Framebuffer Controls */
@@ -1421,6 +1438,47 @@ static const struct v4l2_ctrl_config vivid_ctrl_sdr_cap_fm_deviation = {
.step = 1,
};
+/* Metadata Capture Control */
+
+static int vivid_meta_cap_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct vivid_dev *dev = container_of(ctrl->handler, struct vivid_dev,
+ ctrl_hdl_meta_cap);
+
+ switch (ctrl->id) {
+ case VIVID_CID_META_CAP_GENERATE_PTS:
+ dev->meta_pts = ctrl->val;
+ break;
+ case VIVID_CID_META_CAP_GENERATE_SCR:
+ dev->meta_scr = ctrl->val;
+ break;
+ }
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops vivid_meta_cap_ctrl_ops = {
+ .s_ctrl = vivid_meta_cap_s_ctrl,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_meta_has_pts = {
+ .ops = &vivid_meta_cap_ctrl_ops,
+ .id = VIVID_CID_META_CAP_GENERATE_PTS,
+ .name = "Generate PTS",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .max = 1,
+ .def = 1,
+ .step = 1,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_meta_has_src_clk = {
+ .ops = &vivid_meta_cap_ctrl_ops,
+ .id = VIVID_CID_META_CAP_GENERATE_SCR,
+ .name = "Generate SCR",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .max = 1,
+ .def = 1,
+ .step = 1,
+};
static const struct v4l2_ctrl_config vivid_ctrl_class = {
.ops = &vivid_user_gen_ctrl_ops,
@@ -1448,6 +1506,9 @@ int vivid_create_controls(struct vivid_dev *dev, bool show_ccs_cap,
struct v4l2_ctrl_handler *hdl_radio_rx = &dev->ctrl_hdl_radio_rx;
struct v4l2_ctrl_handler *hdl_radio_tx = &dev->ctrl_hdl_radio_tx;
struct v4l2_ctrl_handler *hdl_sdr_cap = &dev->ctrl_hdl_sdr_cap;
+ struct v4l2_ctrl_handler *hdl_meta_cap = &dev->ctrl_hdl_meta_cap;
+ struct v4l2_ctrl_handler *hdl_meta_out = &dev->ctrl_hdl_meta_out;
+
struct v4l2_ctrl_config vivid_ctrl_dv_timings = {
.ops = &vivid_vid_cap_ctrl_ops,
.id = VIVID_CID_DV_TIMINGS,
@@ -1486,6 +1547,10 @@ int vivid_create_controls(struct vivid_dev *dev, bool show_ccs_cap,
v4l2_ctrl_new_custom(hdl_radio_tx, &vivid_ctrl_class, NULL);
v4l2_ctrl_handler_init(hdl_sdr_cap, 19);
v4l2_ctrl_new_custom(hdl_sdr_cap, &vivid_ctrl_class, NULL);
+ v4l2_ctrl_handler_init(hdl_meta_cap, 2);
+ v4l2_ctrl_new_custom(hdl_meta_cap, &vivid_ctrl_class, NULL);
+ v4l2_ctrl_handler_init(hdl_meta_out, 2);
+ v4l2_ctrl_new_custom(hdl_meta_out, &vivid_ctrl_class, NULL);
/* User Controls */
dev->volume = v4l2_ctrl_new_std(hdl_user_aud, NULL,
@@ -1522,6 +1587,7 @@ int vivid_create_controls(struct vivid_dev *dev, bool show_ccs_cap,
dev->string = v4l2_ctrl_new_custom(hdl_user_gen, &vivid_ctrl_string, NULL);
dev->bitmask = v4l2_ctrl_new_custom(hdl_user_gen, &vivid_ctrl_bitmask, NULL);
dev->int_menu = v4l2_ctrl_new_custom(hdl_user_gen, &vivid_ctrl_int_menu, NULL);
+ v4l2_ctrl_new_custom(hdl_user_gen, &vivid_ctrl_area, NULL);
v4l2_ctrl_new_custom(hdl_user_gen, &vivid_ctrl_u32_array, NULL);
v4l2_ctrl_new_custom(hdl_user_gen, &vivid_ctrl_u16_matrix, NULL);
v4l2_ctrl_new_custom(hdl_user_gen, &vivid_ctrl_u8_4d_array, NULL);
@@ -1743,6 +1809,13 @@ int vivid_create_controls(struct vivid_dev *dev, bool show_ccs_cap,
v4l2_ctrl_new_custom(hdl_sdr_cap,
&vivid_ctrl_sdr_cap_fm_deviation, NULL);
}
+ if (dev->has_meta_cap) {
+ v4l2_ctrl_new_custom(hdl_meta_cap,
+ &vivid_ctrl_meta_has_pts, NULL);
+ v4l2_ctrl_new_custom(hdl_meta_cap,
+ &vivid_ctrl_meta_has_src_clk, NULL);
+ }
+
if (hdl_user_gen->error)
return hdl_user_gen->error;
if (hdl_user_vid->error)
@@ -1817,6 +1890,20 @@ int vivid_create_controls(struct vivid_dev *dev, bool show_ccs_cap,
return hdl_sdr_cap->error;
dev->sdr_cap_dev.ctrl_handler = hdl_sdr_cap;
}
+ if (dev->has_meta_cap) {
+ v4l2_ctrl_add_handler(hdl_meta_cap, hdl_user_gen, NULL, false);
+ v4l2_ctrl_add_handler(hdl_meta_cap, hdl_streaming, NULL, false);
+ if (hdl_meta_cap->error)
+ return hdl_meta_cap->error;
+ dev->meta_cap_dev.ctrl_handler = hdl_meta_cap;
+ }
+ if (dev->has_meta_out) {
+ v4l2_ctrl_add_handler(hdl_meta_out, hdl_user_gen, NULL, false);
+ v4l2_ctrl_add_handler(hdl_meta_out, hdl_streaming, NULL, false);
+ if (hdl_meta_out->error)
+ return hdl_meta_out->error;
+ dev->meta_out_dev.ctrl_handler = hdl_meta_out;
+ }
return 0;
}
@@ -1836,4 +1923,6 @@ void vivid_free_controls(struct vivid_dev *dev)
v4l2_ctrl_handler_free(&dev->ctrl_hdl_sdtv_cap);
v4l2_ctrl_handler_free(&dev->ctrl_hdl_loop_cap);
v4l2_ctrl_handler_free(&dev->ctrl_hdl_fb);
+ v4l2_ctrl_handler_free(&dev->ctrl_hdl_meta_cap);
+ v4l2_ctrl_handler_free(&dev->ctrl_hdl_meta_out);
}
diff --git a/drivers/media/platform/vivid/vivid-kthread-cap.c b/drivers/media/platform/vivid/vivid-kthread-cap.c
index 003319d7816d..01a9d671b947 100644
--- a/drivers/media/platform/vivid/vivid-kthread-cap.c
+++ b/drivers/media/platform/vivid/vivid-kthread-cap.c
@@ -39,6 +39,7 @@
#include "vivid-osd.h"
#include "vivid-ctrls.h"
#include "vivid-kthread-cap.h"
+#include "vivid-meta-cap.h"
static inline v4l2_std_id vivid_get_std_cap(const struct vivid_dev *dev)
{
@@ -677,6 +678,7 @@ static noinline_for_stack void vivid_thread_vid_cap_tick(struct vivid_dev *dev,
{
struct vivid_buffer *vid_cap_buf = NULL;
struct vivid_buffer *vbi_cap_buf = NULL;
+ struct vivid_buffer *meta_cap_buf = NULL;
u64 f_time = 0;
dprintk(dev, 1, "Video Capture Thread Tick\n");
@@ -704,15 +706,19 @@ static noinline_for_stack void vivid_thread_vid_cap_tick(struct vivid_dev *dev,
list_del(&vbi_cap_buf->list);
}
}
+ if (!list_empty(&dev->meta_cap_active)) {
+ meta_cap_buf = list_entry(dev->meta_cap_active.next,
+ struct vivid_buffer, list);
+ list_del(&meta_cap_buf->list);
+ }
+
spin_unlock(&dev->slock);
- if (!vid_cap_buf && !vbi_cap_buf)
+ if (!vid_cap_buf && !vbi_cap_buf && !meta_cap_buf)
goto update_mv;
f_time = dev->cap_frame_period * dev->vid_cap_seq_count +
dev->cap_stream_start + dev->time_wrap_offset;
- if (!dev->tstamp_src_is_soe)
- f_time += dev->cap_frame_eof_offset;
if (vid_cap_buf) {
v4l2_ctrl_request_setup(vid_cap_buf->vb.vb2_buf.req_obj.req,
@@ -735,6 +741,8 @@ static noinline_for_stack void vivid_thread_vid_cap_tick(struct vivid_dev *dev,
vid_cap_buf->vb.vb2_buf.index);
vid_cap_buf->vb.vb2_buf.timestamp = f_time;
+ if (!dev->tstamp_src_is_soe)
+ vid_cap_buf->vb.vb2_buf.timestamp += dev->cap_frame_eof_offset;
}
if (vbi_cap_buf) {
@@ -756,8 +764,22 @@ static noinline_for_stack void vivid_thread_vid_cap_tick(struct vivid_dev *dev,
/* If capturing a VBI, offset by 0.05 */
vbi_period = dev->cap_frame_period * 5;
do_div(vbi_period, 100);
- vbi_cap_buf->vb.vb2_buf.timestamp = f_time + vbi_period;
+ vbi_cap_buf->vb.vb2_buf.timestamp = f_time + dev->cap_frame_eof_offset + vbi_period;
+ }
+
+ if (meta_cap_buf) {
+ v4l2_ctrl_request_setup(meta_cap_buf->vb.vb2_buf.req_obj.req,
+ &dev->ctrl_hdl_meta_cap);
+ vivid_meta_cap_fillbuff(dev, meta_cap_buf, f_time);
+ v4l2_ctrl_request_complete(meta_cap_buf->vb.vb2_buf.req_obj.req,
+ &dev->ctrl_hdl_meta_cap);
+ vb2_buffer_done(&meta_cap_buf->vb.vb2_buf, dev->dqbuf_error ?
+ VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
+ dprintk(dev, 2, "meta_cap %d done\n",
+ meta_cap_buf->vb.vb2_buf.index);
+ meta_cap_buf->vb.vb2_buf.timestamp = f_time + dev->cap_frame_eof_offset;
}
+
dev->dqbuf_error = false;
update_mv:
@@ -796,7 +818,11 @@ static int vivid_thread_vid_cap(void *data)
if (kthread_should_stop())
break;
- mutex_lock(&dev->mutex);
+ if (!mutex_trylock(&dev->mutex)) {
+ schedule_timeout_uninterruptible(1);
+ continue;
+ }
+
cur_jiffies = jiffies;
if (dev->cap_seq_resync) {
dev->jiffies_vid_cap = cur_jiffies;
@@ -835,6 +861,7 @@ static int vivid_thread_vid_cap(void *data)
dev->cap_seq_count = buffers_since_start + dev->cap_seq_offset;
dev->vid_cap_seq_count = dev->cap_seq_count - dev->vid_cap_seq_start;
dev->vbi_cap_seq_count = dev->cap_seq_count - dev->vbi_cap_seq_start;
+ dev->meta_cap_seq_count = dev->cap_seq_count - dev->meta_cap_seq_start;
vivid_thread_vid_cap_tick(dev, dropped_bufs);
@@ -883,8 +910,10 @@ int vivid_start_generating_vid_cap(struct vivid_dev *dev, bool *pstreaming)
if (pstreaming == &dev->vid_cap_streaming)
dev->vid_cap_seq_start = seq_count;
- else
+ else if (pstreaming == &dev->vbi_cap_streaming)
dev->vbi_cap_seq_start = seq_count;
+ else
+ dev->meta_cap_seq_start = seq_count;
*pstreaming = true;
return 0;
}
@@ -894,6 +923,7 @@ int vivid_start_generating_vid_cap(struct vivid_dev *dev, bool *pstreaming)
dev->vid_cap_seq_start = dev->seq_wrap * 128;
dev->vbi_cap_seq_start = dev->seq_wrap * 128;
+ dev->meta_cap_seq_start = dev->seq_wrap * 128;
dev->kthread_vid_cap = kthread_run(vivid_thread_vid_cap, dev,
"%s-vid-cap", dev->v4l2_dev.name);
@@ -951,13 +981,27 @@ void vivid_stop_generating_vid_cap(struct vivid_dev *dev, bool *pstreaming)
}
}
- if (dev->vid_cap_streaming || dev->vbi_cap_streaming)
+ if (pstreaming == &dev->meta_cap_streaming) {
+ while (!list_empty(&dev->meta_cap_active)) {
+ struct vivid_buffer *buf;
+
+ buf = list_entry(dev->meta_cap_active.next,
+ struct vivid_buffer, list);
+ list_del(&buf->list);
+ v4l2_ctrl_request_complete(buf->vb.vb2_buf.req_obj.req,
+ &dev->ctrl_hdl_meta_cap);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
+ dprintk(dev, 2, "meta_cap buffer %d done\n",
+ buf->vb.vb2_buf.index);
+ }
+ }
+
+ if (dev->vid_cap_streaming || dev->vbi_cap_streaming ||
+ dev->meta_cap_streaming)
return;
/* shutdown control thread */
vivid_grab_controls(dev, false);
- mutex_unlock(&dev->mutex);
kthread_stop(dev->kthread_vid_cap);
dev->kthread_vid_cap = NULL;
- mutex_lock(&dev->mutex);
}
diff --git a/drivers/media/platform/vivid/vivid-kthread-out.c b/drivers/media/platform/vivid/vivid-kthread-out.c
index ce5bcda2348c..6780687978f9 100644
--- a/drivers/media/platform/vivid/vivid-kthread-out.c
+++ b/drivers/media/platform/vivid/vivid-kthread-out.c
@@ -38,11 +38,13 @@
#include "vivid-osd.h"
#include "vivid-ctrls.h"
#include "vivid-kthread-out.h"
+#include "vivid-meta-out.h"
static void vivid_thread_vid_out_tick(struct vivid_dev *dev)
{
struct vivid_buffer *vid_out_buf = NULL;
struct vivid_buffer *vbi_out_buf = NULL;
+ struct vivid_buffer *meta_out_buf = NULL;
dprintk(dev, 1, "Video Output Thread Tick\n");
@@ -69,9 +71,14 @@ static void vivid_thread_vid_out_tick(struct vivid_dev *dev)
struct vivid_buffer, list);
list_del(&vbi_out_buf->list);
}
+ if (!list_empty(&dev->meta_out_active)) {
+ meta_out_buf = list_entry(dev->meta_out_active.next,
+ struct vivid_buffer, list);
+ list_del(&meta_out_buf->list);
+ }
spin_unlock(&dev->slock);
- if (!vid_out_buf && !vbi_out_buf)
+ if (!vid_out_buf && !vbi_out_buf && !meta_out_buf)
return;
if (vid_out_buf) {
@@ -111,6 +118,21 @@ static void vivid_thread_vid_out_tick(struct vivid_dev *dev)
dprintk(dev, 2, "vbi_out buffer %d done\n",
vbi_out_buf->vb.vb2_buf.index);
}
+ if (meta_out_buf) {
+ v4l2_ctrl_request_setup(meta_out_buf->vb.vb2_buf.req_obj.req,
+ &dev->ctrl_hdl_meta_out);
+ v4l2_ctrl_request_complete(meta_out_buf->vb.vb2_buf.req_obj.req,
+ &dev->ctrl_hdl_meta_out);
+ vivid_meta_out_process(dev, meta_out_buf);
+ meta_out_buf->vb.sequence = dev->meta_out_seq_count;
+ meta_out_buf->vb.vb2_buf.timestamp =
+ ktime_get_ns() + dev->time_wrap_offset;
+ vb2_buffer_done(&meta_out_buf->vb.vb2_buf, dev->dqbuf_error ?
+ VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
+ dprintk(dev, 2, "meta_out buffer %d done\n",
+ meta_out_buf->vb.vb2_buf.index);
+ }
+
dev->dqbuf_error = false;
}
@@ -136,6 +158,7 @@ static int vivid_thread_vid_out(void *data)
dev->out_seq_count = 0xffffff80U;
dev->jiffies_vid_out = jiffies;
dev->vid_out_seq_start = dev->vbi_out_seq_start = 0;
+ dev->meta_out_seq_start = 0;
dev->out_seq_resync = false;
for (;;) {
@@ -143,7 +166,11 @@ static int vivid_thread_vid_out(void *data)
if (kthread_should_stop())
break;
- mutex_lock(&dev->mutex);
+ if (!mutex_trylock(&dev->mutex)) {
+ schedule_timeout_uninterruptible(1);
+ continue;
+ }
+
cur_jiffies = jiffies;
if (dev->out_seq_resync) {
dev->jiffies_vid_out = cur_jiffies;
@@ -178,6 +205,7 @@ static int vivid_thread_vid_out(void *data)
dev->out_seq_count = buffers_since_start + dev->out_seq_offset;
dev->vid_out_seq_count = dev->out_seq_count - dev->vid_out_seq_start;
dev->vbi_out_seq_count = dev->out_seq_count - dev->vbi_out_seq_start;
+ dev->meta_out_seq_count = dev->out_seq_count - dev->meta_out_seq_start;
vivid_thread_vid_out_tick(dev);
mutex_unlock(&dev->mutex);
@@ -229,8 +257,10 @@ int vivid_start_generating_vid_out(struct vivid_dev *dev, bool *pstreaming)
if (pstreaming == &dev->vid_out_streaming)
dev->vid_out_seq_start = seq_count;
- else
+ else if (pstreaming == &dev->vbi_out_streaming)
dev->vbi_out_seq_start = seq_count;
+ else
+ dev->meta_out_seq_start = seq_count;
*pstreaming = true;
return 0;
}
@@ -239,6 +269,7 @@ int vivid_start_generating_vid_out(struct vivid_dev *dev, bool *pstreaming)
dev->jiffies_vid_out = jiffies;
dev->vid_out_seq_start = dev->seq_wrap * 128;
dev->vbi_out_seq_start = dev->seq_wrap * 128;
+ dev->meta_out_seq_start = dev->seq_wrap * 128;
dev->kthread_vid_out = kthread_run(vivid_thread_vid_out, dev,
"%s-vid-out", dev->v4l2_dev.name);
@@ -296,13 +327,27 @@ void vivid_stop_generating_vid_out(struct vivid_dev *dev, bool *pstreaming)
}
}
- if (dev->vid_out_streaming || dev->vbi_out_streaming)
+ if (pstreaming == &dev->meta_out_streaming) {
+ while (!list_empty(&dev->meta_out_active)) {
+ struct vivid_buffer *buf;
+
+ buf = list_entry(dev->meta_out_active.next,
+ struct vivid_buffer, list);
+ list_del(&buf->list);
+ v4l2_ctrl_request_complete(buf->vb.vb2_buf.req_obj.req,
+ &dev->ctrl_hdl_meta_out);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
+ dprintk(dev, 2, "meta_out buffer %d done\n",
+ buf->vb.vb2_buf.index);
+ }
+ }
+
+ if (dev->vid_out_streaming || dev->vbi_out_streaming ||
+ dev->meta_out_streaming)
return;
/* shutdown control thread */
vivid_grab_controls(dev, false);
- mutex_unlock(&dev->mutex);
kthread_stop(dev->kthread_vid_out);
dev->kthread_vid_out = NULL;
- mutex_lock(&dev->mutex);
}
diff --git a/drivers/media/platform/vivid/vivid-meta-cap.c b/drivers/media/platform/vivid/vivid-meta-cap.c
new file mode 100644
index 000000000000..780f96860a6d
--- /dev/null
+++ b/drivers/media/platform/vivid/vivid-meta-cap.c
@@ -0,0 +1,201 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * vivid-meta-cap.c - meta capture support functions.
+ */
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-common.h>
+#include <linux/usb/video.h>
+
+#include "vivid-core.h"
+#include "vivid-kthread-cap.h"
+#include "vivid-meta-cap.h"
+
+static int meta_cap_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers,
+ unsigned int *nplanes, unsigned int sizes[],
+ struct device *alloc_devs[])
+{
+ struct vivid_dev *dev = vb2_get_drv_priv(vq);
+ unsigned int size = sizeof(struct vivid_uvc_meta_buf);
+
+ if (!vivid_is_webcam(dev))
+ return -EINVAL;
+
+ if (*nplanes) {
+ if (sizes[0] < size)
+ return -EINVAL;
+ } else {
+ sizes[0] = size;
+ }
+
+ if (vq->num_buffers + *nbuffers < 2)
+ *nbuffers = 2 - vq->num_buffers;
+
+ *nplanes = 1;
+ return 0;
+}
+
+static int meta_cap_buf_prepare(struct vb2_buffer *vb)
+{
+ struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
+ unsigned int size = sizeof(struct vivid_uvc_meta_buf);
+
+ dprintk(dev, 1, "%s\n", __func__);
+
+ if (dev->buf_prepare_error) {
+ /*
+ * Error injection: test what happens if buf_prepare() returns
+ * an error.
+ */
+ dev->buf_prepare_error = false;
+ return -EINVAL;
+ }
+ if (vb2_plane_size(vb, 0) < size) {
+ dprintk(dev, 1, "%s data will not fit into plane (%lu < %u)\n",
+ __func__, vb2_plane_size(vb, 0), size);
+ return -EINVAL;
+ }
+ vb2_set_plane_payload(vb, 0, size);
+
+ return 0;
+}
+
+static void meta_cap_buf_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
+ struct vivid_buffer *buf = container_of(vbuf, struct vivid_buffer, vb);
+
+ dprintk(dev, 1, "%s\n", __func__);
+
+ spin_lock(&dev->slock);
+ list_add_tail(&buf->list, &dev->meta_cap_active);
+ spin_unlock(&dev->slock);
+}
+
+static int meta_cap_start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ struct vivid_dev *dev = vb2_get_drv_priv(vq);
+ int err;
+
+ dprintk(dev, 1, "%s\n", __func__);
+ dev->meta_cap_seq_count = 0;
+ if (dev->start_streaming_error) {
+ dev->start_streaming_error = false;
+ err = -EINVAL;
+ } else {
+ err = vivid_start_generating_vid_cap(dev,
+ &dev->meta_cap_streaming);
+ }
+ if (err) {
+ struct vivid_buffer *buf, *tmp;
+
+ list_for_each_entry_safe(buf, tmp,
+ &dev->meta_cap_active, list) {
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb.vb2_buf,
+ VB2_BUF_STATE_QUEUED);
+ }
+ }
+ return err;
+}
+
+/* abort streaming and wait for last buffer */
+static void meta_cap_stop_streaming(struct vb2_queue *vq)
+{
+ struct vivid_dev *dev = vb2_get_drv_priv(vq);
+
+ dprintk(dev, 1, "%s\n", __func__);
+ vivid_stop_generating_vid_cap(dev, &dev->meta_cap_streaming);
+}
+
+static void meta_cap_buf_request_complete(struct vb2_buffer *vb)
+{
+ struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
+
+ v4l2_ctrl_request_complete(vb->req_obj.req, &dev->ctrl_hdl_meta_cap);
+}
+
+const struct vb2_ops vivid_meta_cap_qops = {
+ .queue_setup = meta_cap_queue_setup,
+ .buf_prepare = meta_cap_buf_prepare,
+ .buf_queue = meta_cap_buf_queue,
+ .start_streaming = meta_cap_start_streaming,
+ .stop_streaming = meta_cap_stop_streaming,
+ .buf_request_complete = meta_cap_buf_request_complete,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
+
+int vidioc_enum_fmt_meta_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+
+ if (!vivid_is_webcam(dev))
+ return -EINVAL;
+
+ if (f->index > 0)
+ return -EINVAL;
+
+ f->type = V4L2_BUF_TYPE_META_CAPTURE;
+ f->pixelformat = V4L2_META_FMT_UVC;
+ return 0;
+}
+
+int vidioc_g_fmt_meta_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+ struct v4l2_meta_format *meta = &f->fmt.meta;
+
+ if (!vivid_is_webcam(dev) || !dev->has_meta_cap)
+ return -EINVAL;
+
+ meta->dataformat = V4L2_META_FMT_UVC;
+ meta->buffersize = sizeof(struct vivid_uvc_meta_buf);
+ return 0;
+}
+
+void vivid_meta_cap_fillbuff(struct vivid_dev *dev,
+ struct vivid_buffer *buf, u64 soe)
+{
+ struct vivid_uvc_meta_buf *meta = vb2_plane_vaddr(&buf->vb.vb2_buf, 0);
+ int buf_off = 0;
+
+ buf->vb.sequence = dev->meta_cap_seq_count;
+ if (dev->field_cap == V4L2_FIELD_ALTERNATE)
+ buf->vb.sequence /= 2;
+ memset(meta, 1, vb2_plane_size(&buf->vb.vb2_buf, 0));
+
+ meta->ns = ktime_get_ns();
+ meta->sof = buf->vb.sequence * 30;
+ meta->length = sizeof(*meta) - offsetof(struct vivid_uvc_meta_buf, length);
+ meta->flags = UVC_STREAM_EOH | UVC_STREAM_EOF;
+
+ if ((buf->vb.sequence % 2) == 0)
+ meta->flags |= UVC_STREAM_FID;
+
+ dprintk(dev, 2, "%s ns:%llu sof:%4d len:%u flags: 0x%02x",
+ __func__, meta->ns, meta->sof, meta->length, meta->flags);
+ if (dev->meta_pts) {
+ meta->flags |= UVC_STREAM_PTS;
+ meta->buf[0] = div_u64(soe, VIVID_META_CLOCK_UNIT);
+ buf_off = 4;
+ dprintk(dev, 2, " pts: %u\n", *(__u32 *)(meta->buf));
+ }
+
+ if (dev->meta_scr) {
+ meta->flags |= UVC_STREAM_SCR;
+ meta->buf[buf_off] = div_u64((soe + dev->cap_frame_eof_offset),
+ VIVID_META_CLOCK_UNIT);
+
+ meta->buf[buf_off + 4] = (buf->vb.sequence * 30) % 1000;
+ dprintk(dev, 2, " stc: %u, sof counter: %u\n",
+ *(__u32 *)(meta->buf + buf_off),
+ *(__u16 *)(meta->buf + buf_off + 4));
+ }
+ dprintk(dev, 2, "\n");
+}
diff --git a/drivers/media/platform/vivid/vivid-meta-cap.h b/drivers/media/platform/vivid/vivid-meta-cap.h
new file mode 100644
index 000000000000..4670d00d1576
--- /dev/null
+++ b/drivers/media/platform/vivid/vivid-meta-cap.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * vivid-meta-cap.h - meta capture support functions.
+ */
+#ifndef _VIVID_META_CAP_H_
+#define _VIVID_META_CAP_H_
+
+#define VIVID_META_CLOCK_UNIT 10 /* 100 MHz */
+
+struct vivid_uvc_meta_buf {
+ __u64 ns;
+ __u16 sof;
+ __u8 length;
+ __u8 flags;
+ __u8 buf[10]; /* PTS(4)+STC(4)+SOF(2) */
+} __packed;
+
+void vivid_meta_cap_fillbuff(struct vivid_dev *dev,
+ struct vivid_buffer *buf, u64 soe);
+
+int vidioc_enum_fmt_meta_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f);
+
+int vidioc_g_fmt_meta_cap(struct file *file, void *priv,
+ struct v4l2_format *f);
+
+extern const struct vb2_ops vivid_meta_cap_qops;
+
+#endif
diff --git a/drivers/media/platform/vivid/vivid-meta-out.c b/drivers/media/platform/vivid/vivid-meta-out.c
new file mode 100644
index 000000000000..ff8a039aba72
--- /dev/null
+++ b/drivers/media/platform/vivid/vivid-meta-out.c
@@ -0,0 +1,174 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * vivid-meta-out.c - meta output support functions.
+ */
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-common.h>
+#include <linux/usb/video.h>
+
+#include "vivid-core.h"
+#include "vivid-kthread-out.h"
+#include "vivid-meta-out.h"
+
+static int meta_out_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers,
+ unsigned int *nplanes, unsigned int sizes[],
+ struct device *alloc_devs[])
+{
+ struct vivid_dev *dev = vb2_get_drv_priv(vq);
+ unsigned int size = sizeof(struct vivid_meta_out_buf);
+
+ if (!vivid_is_webcam(dev))
+ return -EINVAL;
+
+ if (*nplanes) {
+ if (sizes[0] < size)
+ return -EINVAL;
+ } else {
+ sizes[0] = size;
+ }
+
+ if (vq->num_buffers + *nbuffers < 2)
+ *nbuffers = 2 - vq->num_buffers;
+
+ *nplanes = 1;
+ return 0;
+}
+
+static int meta_out_buf_prepare(struct vb2_buffer *vb)
+{
+ struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
+ unsigned int size = sizeof(struct vivid_meta_out_buf);
+
+ dprintk(dev, 1, "%s\n", __func__);
+
+ if (dev->buf_prepare_error) {
+ /*
+ * Error injection: test what happens if buf_prepare() returns
+ * an error.
+ */
+ dev->buf_prepare_error = false;
+ return -EINVAL;
+ }
+ if (vb2_plane_size(vb, 0) < size) {
+ dprintk(dev, 1, "%s data will not fit into plane (%lu < %u)\n",
+ __func__, vb2_plane_size(vb, 0), size);
+ return -EINVAL;
+ }
+ vb2_set_plane_payload(vb, 0, size);
+
+ return 0;
+}
+
+static void meta_out_buf_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
+ struct vivid_buffer *buf = container_of(vbuf, struct vivid_buffer, vb);
+
+ dprintk(dev, 1, "%s\n", __func__);
+
+ spin_lock(&dev->slock);
+ list_add_tail(&buf->list, &dev->meta_out_active);
+ spin_unlock(&dev->slock);
+}
+
+static int meta_out_start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ struct vivid_dev *dev = vb2_get_drv_priv(vq);
+ int err;
+
+ dprintk(dev, 1, "%s\n", __func__);
+ dev->meta_out_seq_count = 0;
+ if (dev->start_streaming_error) {
+ dev->start_streaming_error = false;
+ err = -EINVAL;
+ } else {
+ err = vivid_start_generating_vid_out(dev,
+ &dev->meta_out_streaming);
+ }
+ if (err) {
+ struct vivid_buffer *buf, *tmp;
+
+ list_for_each_entry_safe(buf, tmp,
+ &dev->meta_out_active, list) {
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb.vb2_buf,
+ VB2_BUF_STATE_QUEUED);
+ }
+ }
+ return err;
+}
+
+/* abort streaming and wait for last buffer */
+static void meta_out_stop_streaming(struct vb2_queue *vq)
+{
+ struct vivid_dev *dev = vb2_get_drv_priv(vq);
+
+ dprintk(dev, 1, "%s\n", __func__);
+ vivid_stop_generating_vid_out(dev, &dev->meta_out_streaming);
+}
+
+static void meta_out_buf_request_complete(struct vb2_buffer *vb)
+{
+ struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
+
+ v4l2_ctrl_request_complete(vb->req_obj.req, &dev->ctrl_hdl_meta_out);
+}
+
+const struct vb2_ops vivid_meta_out_qops = {
+ .queue_setup = meta_out_queue_setup,
+ .buf_prepare = meta_out_buf_prepare,
+ .buf_queue = meta_out_buf_queue,
+ .start_streaming = meta_out_start_streaming,
+ .stop_streaming = meta_out_stop_streaming,
+ .buf_request_complete = meta_out_buf_request_complete,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
+
+int vidioc_enum_fmt_meta_out(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+
+ if (!vivid_is_webcam(dev))
+ return -EINVAL;
+
+ if (f->index > 0)
+ return -EINVAL;
+
+ f->type = V4L2_BUF_TYPE_META_OUTPUT;
+ f->pixelformat = V4L2_META_FMT_VIVID;
+ return 0;
+}
+
+int vidioc_g_fmt_meta_out(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct vivid_dev *dev = video_drvdata(file);
+ struct v4l2_meta_format *meta = &f->fmt.meta;
+
+ if (!vivid_is_webcam(dev) || !dev->has_meta_out)
+ return -EINVAL;
+
+ meta->dataformat = V4L2_META_FMT_VIVID;
+ meta->buffersize = sizeof(struct vivid_meta_out_buf);
+ return 0;
+}
+
+void vivid_meta_out_process(struct vivid_dev *dev,
+ struct vivid_buffer *buf)
+{
+ struct vivid_meta_out_buf *meta = vb2_plane_vaddr(&buf->vb.vb2_buf, 0);
+
+ tpg_s_brightness(&dev->tpg, meta->brightness);
+ tpg_s_contrast(&dev->tpg, meta->contrast);
+ tpg_s_saturation(&dev->tpg, meta->saturation);
+ tpg_s_hue(&dev->tpg, meta->hue);
+ dprintk(dev, 2, " %s brightness %u contrast %u saturation %u hue %d\n",
+ __func__, meta->brightness, meta->contrast,
+ meta->saturation, meta->hue);
+}
diff --git a/drivers/media/platform/vivid/vivid-meta-out.h b/drivers/media/platform/vivid/vivid-meta-out.h
new file mode 100644
index 000000000000..0c639b7c2842
--- /dev/null
+++ b/drivers/media/platform/vivid/vivid-meta-out.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * vivid-meta-out.h - meta output support functions.
+ */
+#ifndef _VIVID_META_OUT_H_
+#define _VIVID_META_OUT_H_
+
+struct vivid_meta_out_buf {
+ u16 brightness;
+ u16 contrast;
+ u16 saturation;
+ s16 hue;
+};
+
+void vivid_meta_out_process(struct vivid_dev *dev, struct vivid_buffer *buf);
+int vidioc_enum_fmt_meta_out(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f);
+int vidioc_g_fmt_meta_out(struct file *file, void *priv,
+ struct v4l2_format *f);
+int vidioc_s_fmt_meta_out(struct file *file, void *priv,
+ struct v4l2_format *f);
+
+extern const struct vb2_ops vivid_meta_out_qops;
+
+#endif
diff --git a/drivers/media/platform/vivid/vivid-sdr-cap.c b/drivers/media/platform/vivid/vivid-sdr-cap.c
index 9acc709b0740..2b7522e16efc 100644
--- a/drivers/media/platform/vivid/vivid-sdr-cap.c
+++ b/drivers/media/platform/vivid/vivid-sdr-cap.c
@@ -141,7 +141,11 @@ static int vivid_thread_sdr_cap(void *data)
if (kthread_should_stop())
break;
- mutex_lock(&dev->mutex);
+ if (!mutex_trylock(&dev->mutex)) {
+ schedule_timeout_uninterruptible(1);
+ continue;
+ }
+
cur_jiffies = jiffies;
if (dev->sdr_cap_seq_resync) {
dev->jiffies_sdr_cap = cur_jiffies;
@@ -303,10 +307,8 @@ static void sdr_cap_stop_streaming(struct vb2_queue *vq)
}
/* shutdown control thread */
- mutex_unlock(&dev->mutex);
kthread_stop(dev->kthread_sdr_cap);
dev->kthread_sdr_cap = NULL;
- mutex_lock(&dev->mutex);
}
static void sdr_cap_buf_request_complete(struct vb2_buffer *vb)
diff --git a/drivers/media/platform/vivid/vivid-vid-cap.c b/drivers/media/platform/vivid/vivid-vid-cap.c
index 8cbaa0c998ed..e94beef008c8 100644
--- a/drivers/media/platform/vivid/vivid-vid-cap.c
+++ b/drivers/media/platform/vivid/vivid-vid-cap.c
@@ -223,9 +223,6 @@ static int vid_cap_start_streaming(struct vb2_queue *vq, unsigned count)
if (vb2_is_streaming(&dev->vb_vid_out_q))
dev->can_loop_video = vivid_vid_can_loop(dev);
- if (dev->kthread_vid_cap)
- return 0;
-
dev->vid_cap_seq_count = 0;
dprintk(dev, 1, "%s\n", __func__);
for (i = 0; i < VIDEO_MAX_FRAME; i++)
@@ -1359,7 +1356,9 @@ int vidioc_s_input(struct file *file, void *priv, unsigned i)
if (i == dev->input)
return 0;
- if (vb2_is_busy(&dev->vb_vid_cap_q) || vb2_is_busy(&dev->vb_vbi_cap_q))
+ if (vb2_is_busy(&dev->vb_vid_cap_q) ||
+ vb2_is_busy(&dev->vb_vbi_cap_q) ||
+ vb2_is_busy(&dev->vb_meta_cap_q))
return -EBUSY;
dev->input = i;
@@ -1369,6 +1368,7 @@ int vidioc_s_input(struct file *file, void *priv, unsigned i)
dev->vid_cap_dev.tvnorms = V4L2_STD_ALL;
}
dev->vbi_cap_dev.tvnorms = dev->vid_cap_dev.tvnorms;
+ dev->meta_cap_dev.tvnorms = dev->vid_cap_dev.tvnorms;
vivid_update_format_cap(dev, false);
if (dev->colorspace) {
diff --git a/drivers/media/platform/vivid/vivid-vid-out.c b/drivers/media/platform/vivid/vivid-vid-out.c
index 148b663a6075..ee3446e3217c 100644
--- a/drivers/media/platform/vivid/vivid-vid-out.c
+++ b/drivers/media/platform/vivid/vivid-vid-out.c
@@ -161,9 +161,6 @@ static int vid_out_start_streaming(struct vb2_queue *vq, unsigned count)
if (vb2_is_streaming(&dev->vb_vid_cap_q))
dev->can_loop_video = vivid_vid_can_loop(dev);
- if (dev->kthread_vid_out)
- return 0;
-
dev->vid_out_seq_count = 0;
dprintk(dev, 1, "%s\n", __func__);
if (dev->start_streaming_error) {
@@ -1082,7 +1079,9 @@ int vidioc_s_output(struct file *file, void *priv, unsigned o)
if (o == dev->output)
return 0;
- if (vb2_is_busy(&dev->vb_vid_out_q) || vb2_is_busy(&dev->vb_vbi_out_q))
+ if (vb2_is_busy(&dev->vb_vid_out_q) ||
+ vb2_is_busy(&dev->vb_vbi_out_q) ||
+ vb2_is_busy(&dev->vb_meta_out_q))
return -EBUSY;
dev->output = o;
@@ -1093,6 +1092,7 @@ int vidioc_s_output(struct file *file, void *priv, unsigned o)
dev->vid_out_dev.tvnorms = 0;
dev->vbi_out_dev.tvnorms = dev->vid_out_dev.tvnorms;
+ dev->meta_out_dev.tvnorms = dev->vid_out_dev.tvnorms;
vivid_update_format_out(dev);
v4l2_ctrl_activate(dev->ctrl_display_present, vivid_is_hdmi_out(dev));
diff --git a/drivers/media/platform/xilinx/xilinx-dma.h b/drivers/media/platform/xilinx/xilinx-dma.h
index 5aec4d17eb21..2378bdae57ae 100644
--- a/drivers/media/platform/xilinx/xilinx-dma.h
+++ b/drivers/media/platform/xilinx/xilinx-dma.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Xilinx Video DMA
*
diff --git a/drivers/media/platform/xilinx/xilinx-vip.h b/drivers/media/platform/xilinx/xilinx-vip.h
index f71e2b650453..a528a32ea1dc 100644
--- a/drivers/media/platform/xilinx/xilinx-vip.h
+++ b/drivers/media/platform/xilinx/xilinx-vip.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Xilinx Video IP Core
*
diff --git a/drivers/media/platform/xilinx/xilinx-vipp.h b/drivers/media/platform/xilinx/xilinx-vipp.h
index e65fce9538f9..cc52c1854dbd 100644
--- a/drivers/media/platform/xilinx/xilinx-vipp.h
+++ b/drivers/media/platform/xilinx/xilinx-vipp.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Xilinx Video IP Composite Device
*
diff --git a/drivers/media/platform/xilinx/xilinx-vtc.h b/drivers/media/platform/xilinx/xilinx-vtc.h
index 90cf44245283..855845911ffc 100644
--- a/drivers/media/platform/xilinx/xilinx-vtc.h
+++ b/drivers/media/platform/xilinx/xilinx-vtc.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Xilinx Video Timing Controller
*
diff --git a/drivers/media/radio/radio-wl1273.c b/drivers/media/radio/radio-wl1273.c
index 104ac41c6f96..112376873167 100644
--- a/drivers/media/radio/radio-wl1273.c
+++ b/drivers/media/radio/radio-wl1273.c
@@ -1148,8 +1148,7 @@ static int wl1273_fm_fops_release(struct file *file)
if (radio->rds_users > 0) {
radio->rds_users--;
if (radio->rds_users == 0) {
- if (mutex_lock_interruptible(&core->lock))
- return -EINTR;
+ mutex_lock(&core->lock);
radio->irq_flags &= ~WL1273_RDS_EVENT;
diff --git a/drivers/media/radio/si470x/radio-si470x-i2c.c b/drivers/media/radio/si470x/radio-si470x-i2c.c
index 7541698a0be1..f491420d7b53 100644
--- a/drivers/media/radio/si470x/radio-si470x-i2c.c
+++ b/drivers/media/radio/si470x/radio-si470x-i2c.c
@@ -482,6 +482,8 @@ static int si470x_i2c_remove(struct i2c_client *client)
if (radio->gpio_reset)
gpiod_set_value(radio->gpio_reset, 0);
+ v4l2_ctrl_handler_free(&radio->hdl);
+ v4l2_device_unregister(&radio->v4l2_dev);
return 0;
}
diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c
index 37a850421fbb..ed95244da894 100644
--- a/drivers/media/rc/imon.c
+++ b/drivers/media/rc/imon.c
@@ -83,6 +83,7 @@ struct imon_usb_dev_descr {
__u16 flags;
#define IMON_NO_FLAGS 0
#define IMON_NEED_20MS_PKT_DELAY 1
+#define IMON_SUPPRESS_REPEATED_KEYS 2
struct imon_panel_key_table key_table[];
};
@@ -149,8 +150,9 @@ struct imon_context {
struct timer_list ttimer; /* touch screen timer */
int touch_x; /* x coordinate on touchscreen */
int touch_y; /* y coordinate on touchscreen */
- struct imon_usb_dev_descr *dev_descr; /* device description with key
- table for front panels */
+ const struct imon_usb_dev_descr *dev_descr;
+ /* device description with key */
+ /* table for front panels */
};
#define TOUCH_TIMEOUT (HZ/30)
@@ -315,6 +317,32 @@ static const struct imon_usb_dev_descr imon_DH102 = {
}
};
+/* imon ultrabay front panel key table */
+static const struct imon_usb_dev_descr ultrabay_table = {
+ .flags = IMON_SUPPRESS_REPEATED_KEYS,
+ .key_table = {
+ { 0x0000000f0000ffeell, KEY_MEDIA }, /* Go */
+ { 0x000000000100ffeell, KEY_UP },
+ { 0x000000000001ffeell, KEY_DOWN },
+ { 0x000000160000ffeell, KEY_ENTER },
+ { 0x0000001f0000ffeell, KEY_AUDIO }, /* Music */
+ { 0x000000200000ffeell, KEY_VIDEO }, /* Movie */
+ { 0x000000210000ffeell, KEY_CAMERA }, /* Photo */
+ { 0x000000270000ffeell, KEY_DVD }, /* DVD */
+ { 0x000000230000ffeell, KEY_TV }, /* TV */
+ { 0x000000050000ffeell, KEY_PREVIOUS }, /* Previous */
+ { 0x000000070000ffeell, KEY_REWIND },
+ { 0x000000040000ffeell, KEY_STOP },
+ { 0x000000020000ffeell, KEY_PLAYPAUSE },
+ { 0x000000080000ffeell, KEY_FASTFORWARD },
+ { 0x000000060000ffeell, KEY_NEXT }, /* Next */
+ { 0x000100000000ffeell, KEY_VOLUMEUP },
+ { 0x010000000000ffeell, KEY_VOLUMEDOWN },
+ { 0x000000010000ffeell, KEY_MUTE },
+ { 0, KEY_RESERVED },
+ }
+};
+
/*
* USB Device ID for iMON USB Control Boards
*
@@ -1264,9 +1292,11 @@ static u32 imon_mce_key_lookup(struct imon_context *ictx, u32 scancode)
static u32 imon_panel_key_lookup(struct imon_context *ictx, u64 code)
{
- int i;
+ const struct imon_panel_key_table *key_table;
u32 keycode = KEY_RESERVED;
- struct imon_panel_key_table *key_table = ictx->dev_descr->key_table;
+ int i;
+
+ key_table = ictx->dev_descr->key_table;
for (i = 0; key_table[i].hw_code != 0; i++) {
if (key_table[i].hw_code == (code | 0xffee)) {
@@ -1550,7 +1580,6 @@ static void imon_incoming_packet(struct imon_context *ictx,
u32 kc;
u64 scancode;
int press_type = 0;
- long msec;
ktime_t t;
static ktime_t prev_time;
u8 ktype;
@@ -1598,8 +1627,7 @@ static void imon_incoming_packet(struct imon_context *ictx,
spin_unlock_irqrestore(&ictx->kc_lock, flags);
/* send touchscreen events through input subsystem if touchpad data */
- if (ictx->display_type == IMON_DISPLAY_TYPE_VGA && len == 8 &&
- buf[7] == 0x86) {
+ if (ictx->touch && len == 8 && buf[7] == 0x86) {
imon_touch_event(ictx, buf);
return;
@@ -1653,14 +1681,16 @@ static void imon_incoming_packet(struct imon_context *ictx,
spin_lock_irqsave(&ictx->kc_lock, flags);
t = ktime_get();
- /* KEY_MUTE repeats from knob need to be suppressed */
- if (ictx->kc == KEY_MUTE && ictx->kc == ictx->last_keycode) {
- msec = ktime_ms_delta(t, prev_time);
- if (msec < ictx->idev->rep[REP_DELAY]) {
+ /* KEY repeats from knob and panel that need to be suppressed */
+ if (ictx->kc == KEY_MUTE ||
+ ictx->dev_descr->flags & IMON_SUPPRESS_REPEATED_KEYS) {
+ if (ictx->kc == ictx->last_keycode &&
+ ktime_ms_delta(t, prev_time) < ictx->idev->rep[REP_DELAY]) {
spin_unlock_irqrestore(&ictx->kc_lock, flags);
return;
}
}
+
prev_time = t;
kc = ictx->kc;
@@ -1848,6 +1878,14 @@ static void imon_get_ffdc_type(struct imon_context *ictx)
dev_info(ictx->dev, "0xffdc iMON Inside, iMON IR");
ictx->display_supported = false;
break;
+ /* Soundgraph iMON UltraBay */
+ case 0x98:
+ dev_info(ictx->dev, "0xffdc iMON UltraBay, LCD + IR");
+ detected_display_type = IMON_DISPLAY_TYPE_LCD;
+ allowed_protos = RC_PROTO_BIT_IMON | RC_PROTO_BIT_RC6_MCE;
+ ictx->dev_descr = &ultrabay_table;
+ break;
+
default:
dev_info(ictx->dev, "Unknown 0xffdc device, defaulting to VFD and iMON IR");
detected_display_type = IMON_DISPLAY_TYPE_VFD;
@@ -1979,10 +2017,12 @@ out:
static struct input_dev *imon_init_idev(struct imon_context *ictx)
{
- struct imon_panel_key_table *key_table = ictx->dev_descr->key_table;
+ const struct imon_panel_key_table *key_table;
struct input_dev *idev;
int ret, i;
+ key_table = ictx->dev_descr->key_table;
+
idev = input_allocate_device();
if (!idev)
goto out;
diff --git a/drivers/media/rc/imon_raw.c b/drivers/media/rc/imon_raw.c
index d4aedcf76418..aae0a3cc9479 100644
--- a/drivers/media/rc/imon_raw.c
+++ b/drivers/media/rc/imon_raw.c
@@ -57,32 +57,18 @@ static void imon_ir_data(struct imon *imon)
* fls will tell us the highest bit set plus 1 (or 0 if no
* bits are set).
*/
+ rawir.pulse = !rawir.pulse;
bit = fls64(data & (BIT_ULL(offset) - 1));
if (bit < offset) {
- dev_dbg(imon->dev, "pulse: %d bits", offset - bit);
- rawir.pulse = true;
+ dev_dbg(imon->dev, "%s: %d bits",
+ rawir.pulse ? "pulse" : "space", offset - bit);
rawir.duration = (offset - bit) * BIT_DURATION;
ir_raw_event_store_with_filter(imon->rcdev, &rawir);
- if (bit == 0)
- break;
-
offset = bit;
}
- /*
- * Find highest clear bit which is less than offset.
- *
- * Just invert the data and use same trick as above.
- */
- bit = fls64(~data & (BIT_ULL(offset) - 1));
- dev_dbg(imon->dev, "space: %d bits", offset - bit);
-
- rawir.pulse = false;
- rawir.duration = (offset - bit) * BIT_DURATION;
- ir_raw_event_store_with_filter(imon->rcdev, &rawir);
-
- offset = bit;
+ data = ~data;
} while (offset > 0);
if (packet_no == 0x0a && !imon->rcdev->idle) {
diff --git a/drivers/media/rc/ir-rcmm-decoder.c b/drivers/media/rc/ir-rcmm-decoder.c
index 64fb65a9a19f..028df5cb1828 100644
--- a/drivers/media/rc/ir-rcmm-decoder.c
+++ b/drivers/media/rc/ir-rcmm-decoder.c
@@ -79,7 +79,7 @@ static int ir_rcmm_decode(struct rc_dev *dev, struct ir_raw_event ev)
if (!ev.pulse)
break;
- if (!eq_margin(ev.duration, RCMM_PREFIX_PULSE, RCMM_UNIT / 2))
+ if (!eq_margin(ev.duration, RCMM_PREFIX_PULSE, RCMM_UNIT))
break;
data->state = STATE_LOW;
@@ -91,7 +91,7 @@ static int ir_rcmm_decode(struct rc_dev *dev, struct ir_raw_event ev)
if (ev.pulse)
break;
- if (!eq_margin(ev.duration, RCMM_PULSE_0, RCMM_UNIT / 2))
+ if (!eq_margin(ev.duration, RCMM_PULSE_0, RCMM_UNIT))
break;
data->state = STATE_BUMP;
@@ -164,6 +164,8 @@ static int ir_rcmm_decode(struct rc_dev *dev, struct ir_raw_event ev)
break;
}
+ dev_dbg(&dev->dev, "RC-MM decode failed at count %d state %d (%uus %s)\n",
+ data->count, data->state, TO_US(ev.duration), TO_STR(ev.pulse));
data->state = STATE_INACTIVE;
return -EINVAL;
}
diff --git a/drivers/media/rc/ite-cir.c b/drivers/media/rc/ite-cir.c
index 3ab6cec0dc3b..07667c04c1d2 100644
--- a/drivers/media/rc/ite-cir.c
+++ b/drivers/media/rc/ite-cir.c
@@ -382,7 +382,7 @@ static int ite_tx_ir(struct rc_dev *rcdev, unsigned *txbuf, unsigned n)
ite_dbg("%s called", __func__);
/* clear the array just in case */
- memset(last_sent, 0, ARRAY_SIZE(last_sent));
+ memset(last_sent, 0, sizeof(last_sent));
spin_lock_irqsave(&dev->lock, flags);
diff --git a/drivers/media/rc/keymaps/Makefile b/drivers/media/rc/keymaps/Makefile
index a56fc634d2d6..63261ef6380a 100644
--- a/drivers/media/rc/keymaps/Makefile
+++ b/drivers/media/rc/keymaps/Makefile
@@ -17,6 +17,7 @@ obj-$(CONFIG_RC_MAP) += rc-adstech-dvb-t-pci.o \
rc-avermedia-rm-ks.o \
rc-avertv-303.o \
rc-azurewave-ad-tu700.o \
+ rc-beelink-gs1.o \
rc-behold.o \
rc-behold-columbus.o \
rc-budget-ci-old.o \
@@ -114,6 +115,7 @@ obj-$(CONFIG_RC_MAP) += rc-adstech-dvb-t-pci.o \
rc-tt-1500.o \
rc-twinhan-dtv-cab-ci.o \
rc-twinhan1027.o \
+ rc-vega-s9x.o \
rc-videomate-m1f.o \
rc-videomate-s350.o \
rc-videomate-tv-pvr.o \
diff --git a/drivers/media/rc/keymaps/rc-beelink-gs1.c b/drivers/media/rc/keymaps/rc-beelink-gs1.c
new file mode 100644
index 000000000000..cedbd5d20bc7
--- /dev/null
+++ b/drivers/media/rc/keymaps/rc-beelink-gs1.c
@@ -0,0 +1,84 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2019 Clément Péron
+
+#include <media/rc-map.h>
+#include <linux/module.h>
+
+/*
+ * Keymap for the Beelink GS1 remote control
+ */
+
+static struct rc_map_table beelink_gs1_table[] = {
+ /*
+ * TV Keys (Power, Learn and Volume)
+ * { 0x40400d, KEY_TV },
+ * { 0x80f1, KEY_TV },
+ * { 0x80f3, KEY_TV },
+ * { 0x80f4, KEY_TV },
+ */
+
+ { 0x8051, KEY_POWER },
+ { 0x804d, KEY_MUTE },
+ { 0x8040, KEY_CONFIG },
+
+ { 0x8026, KEY_UP },
+ { 0x8028, KEY_DOWN },
+ { 0x8025, KEY_LEFT },
+ { 0x8027, KEY_RIGHT },
+ { 0x800d, KEY_OK },
+
+ { 0x8053, KEY_HOME },
+ { 0x80bc, KEY_MEDIA },
+ { 0x801b, KEY_BACK },
+ { 0x8049, KEY_MENU },
+
+ { 0x804e, KEY_VOLUMEUP },
+ { 0x8056, KEY_VOLUMEDOWN },
+
+ { 0x8054, KEY_SUBTITLE }, /* Web */
+ { 0x8052, KEY_EPG }, /* Media */
+
+ { 0x8041, KEY_CHANNELUP },
+ { 0x8042, KEY_CHANNELDOWN },
+
+ { 0x8031, KEY_1 },
+ { 0x8032, KEY_2 },
+ { 0x8033, KEY_3 },
+
+ { 0x8034, KEY_4 },
+ { 0x8035, KEY_5 },
+ { 0x8036, KEY_6 },
+
+ { 0x8037, KEY_7 },
+ { 0x8038, KEY_8 },
+ { 0x8039, KEY_9 },
+
+ { 0x8044, KEY_DELETE },
+ { 0x8030, KEY_0 },
+ { 0x8058, KEY_MODE }, /* # Input Method */
+};
+
+static struct rc_map_list beelink_gs1_map = {
+ .map = {
+ .scan = beelink_gs1_table,
+ .size = ARRAY_SIZE(beelink_gs1_table),
+ .rc_proto = RC_PROTO_NEC,
+ .name = RC_MAP_BEELINK_GS1,
+ }
+};
+
+static int __init init_rc_map_beelink_gs1(void)
+{
+ return rc_map_register(&beelink_gs1_map);
+}
+
+static void __exit exit_rc_map_beelink_gs1(void)
+{
+ rc_map_unregister(&beelink_gs1_map);
+}
+
+module_init(init_rc_map_beelink_gs1)
+module_exit(exit_rc_map_beelink_gs1)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Clément Péron <peron.clem@gmail.com>");
diff --git a/drivers/media/rc/keymaps/rc-vega-s9x.c b/drivers/media/rc/keymaps/rc-vega-s9x.c
new file mode 100644
index 000000000000..bf210c4dc535
--- /dev/null
+++ b/drivers/media/rc/keymaps/rc-vega-s9x.c
@@ -0,0 +1,54 @@
+// SPDX-License-Identifier: GPL-2.0+
+//
+// Copyright (C) 2019 Christian Hewitt <christianshewitt@gmail.com>
+
+#include <media/rc-map.h>
+#include <linux/module.h>
+
+//
+// Keytable for the Tronsmart Vega S9x remote control
+//
+
+static struct rc_map_table vega_s9x[] = {
+ { 0x18, KEY_POWER },
+ { 0x17, KEY_MUTE }, // mouse
+
+ { 0x46, KEY_UP },
+ { 0x47, KEY_LEFT },
+ { 0x55, KEY_OK },
+ { 0x15, KEY_RIGHT },
+ { 0x16, KEY_DOWN },
+
+ { 0x06, KEY_HOME },
+ { 0x42, KEY_PLAYPAUSE},
+ { 0x40, KEY_BACK },
+
+ { 0x14, KEY_VOLUMEDOWN },
+ { 0x04, KEY_MENU },
+ { 0x10, KEY_VOLUMEUP },
+};
+
+static struct rc_map_list vega_s9x_map = {
+ .map = {
+ .scan = vega_s9x,
+ .size = ARRAY_SIZE(vega_s9x),
+ .rc_proto = RC_PROTO_NEC,
+ .name = RC_MAP_VEGA_S9X,
+ }
+};
+
+static int __init init_rc_map_vega_s9x(void)
+{
+ return rc_map_register(&vega_s9x_map);
+}
+
+static void __exit exit_rc_map_vega_s9x(void)
+{
+ rc_map_unregister(&vega_s9x_map);
+}
+
+module_init(init_rc_map_vega_s9x)
+module_exit(exit_rc_map_vega_s9x)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Christian Hewitt <christianshewitt@gmail.com");
diff --git a/drivers/media/rc/lirc_dev.c b/drivers/media/rc/lirc_dev.c
index f078f8a3aec8..9a8c1cf54ac4 100644
--- a/drivers/media/rc/lirc_dev.c
+++ b/drivers/media/rc/lirc_dev.c
@@ -720,9 +720,7 @@ static const struct file_operations lirc_fops = {
.owner = THIS_MODULE,
.write = ir_lirc_transmit_ir,
.unlocked_ioctl = ir_lirc_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = ir_lirc_ioctl,
-#endif
+ .compat_ioctl = compat_ptr_ioctl,
.read = ir_lirc_read,
.poll = ir_lirc_poll,
.open = ir_lirc_open,
diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c
index 3fc9829a9233..f9616158bcf4 100644
--- a/drivers/media/rc/mceusb.c
+++ b/drivers/media/rc/mceusb.c
@@ -564,7 +564,7 @@ static int mceusb_cmd_datasize(u8 cmd, u8 subcmd)
datasize = 4;
break;
case MCE_CMD_G_REVISION:
- datasize = 2;
+ datasize = 4;
break;
case MCE_RSP_EQWAKESUPPORT:
case MCE_RSP_GETWAKESOURCE:
@@ -600,14 +600,9 @@ static void mceusb_dev_printdata(struct mceusb_dev *ir, u8 *buf, int buf_len,
char *inout;
u8 cmd, subcmd, *data;
struct device *dev = ir->dev;
- int start, skip = 0;
u32 carrier, period;
- /* skip meaningless 0xb1 0x60 header bytes on orig receiver */
- if (ir->flags.microsoft_gen1 && !out && !offset)
- skip = 2;
-
- if (len <= skip)
+ if (offset < 0 || offset >= buf_len)
return;
dev_dbg(dev, "%cx data[%d]: %*ph (len=%d sz=%d)",
@@ -616,11 +611,32 @@ static void mceusb_dev_printdata(struct mceusb_dev *ir, u8 *buf, int buf_len,
inout = out ? "Request" : "Got";
- start = offset + skip;
- cmd = buf[start] & 0xff;
- subcmd = buf[start + 1] & 0xff;
- data = buf + start + 2;
+ cmd = buf[offset];
+ subcmd = (offset + 1 < buf_len) ? buf[offset + 1] : 0;
+ data = &buf[offset] + 2;
+
+ /* Trace meaningless 0xb1 0x60 header bytes on original receiver */
+ if (ir->flags.microsoft_gen1 && !out && !offset) {
+ dev_dbg(dev, "MCE gen 1 header");
+ return;
+ }
+
+ /* Trace IR data header or trailer */
+ if (cmd != MCE_CMD_PORT_IR &&
+ (cmd & MCE_PORT_MASK) == MCE_COMMAND_IRDATA) {
+ if (cmd == MCE_IRDATA_TRAILER)
+ dev_dbg(dev, "End of raw IR data");
+ else
+ dev_dbg(dev, "Raw IR data, %d pulse/space samples",
+ cmd & MCE_PACKET_LENGTH_MASK);
+ return;
+ }
+
+ /* Unexpected end of buffer? */
+ if (offset + len > buf_len)
+ return;
+ /* Decode MCE command/response */
switch (cmd) {
case MCE_CMD_NULL:
if (subcmd == MCE_CMD_NULL)
@@ -644,7 +660,7 @@ static void mceusb_dev_printdata(struct mceusb_dev *ir, u8 *buf, int buf_len,
dev_dbg(dev, "Get hw/sw rev?");
else
dev_dbg(dev, "hw/sw rev %*ph",
- 4, &buf[start + 2]);
+ 4, &buf[offset + 2]);
break;
case MCE_CMD_RESUME:
dev_dbg(dev, "Device resume requested");
@@ -746,13 +762,6 @@ static void mceusb_dev_printdata(struct mceusb_dev *ir, u8 *buf, int buf_len,
default:
break;
}
-
- if (cmd == MCE_IRDATA_TRAILER)
- dev_dbg(dev, "End of raw IR data");
- else if ((cmd != MCE_CMD_PORT_IR) &&
- ((cmd & MCE_PORT_MASK) == MCE_COMMAND_IRDATA))
- dev_dbg(dev, "Raw IR data, %d pulse/space samples",
- cmd & MCE_PACKET_LENGTH_MASK);
#endif
}
@@ -1136,32 +1145,62 @@ static int mceusb_set_rx_carrier_report(struct rc_dev *dev, int enable)
}
/*
+ * Handle PORT_SYS/IR command response received from the MCE device.
+ *
+ * Assumes single response with all its data (not truncated)
+ * in buf_in[]. The response itself determines its total length
+ * (mceusb_cmd_datasize() + 2) and hence the minimum size of buf_in[].
+ *
* We don't do anything but print debug spew for many of the command bits
* we receive from the hardware, but some of them are useful information
* we want to store so that we can use them.
*/
-static void mceusb_handle_command(struct mceusb_dev *ir, int index)
+static void mceusb_handle_command(struct mceusb_dev *ir, u8 *buf_in)
{
+ u8 cmd = buf_in[0];
+ u8 subcmd = buf_in[1];
+ u8 *hi = &buf_in[2]; /* read only when required */
+ u8 *lo = &buf_in[3]; /* read only when required */
struct ir_raw_event rawir = {};
- u8 hi = ir->buf_in[index + 1] & 0xff;
- u8 lo = ir->buf_in[index + 2] & 0xff;
u32 carrier_cycles;
u32 cycles_fix;
- switch (ir->buf_in[index]) {
- /* the one and only 5-byte return value command */
- case MCE_RSP_GETPORTSTATUS:
- if ((ir->buf_in[index + 4] & 0xff) == 0x00)
- ir->txports_cabled |= 1 << hi;
- break;
+ if (cmd == MCE_CMD_PORT_SYS) {
+ switch (subcmd) {
+ /* the one and only 5-byte return value command */
+ case MCE_RSP_GETPORTSTATUS:
+ if (buf_in[5] == 0)
+ ir->txports_cabled |= 1 << *hi;
+ break;
+
+ /* 1-byte return value commands */
+ case MCE_RSP_EQEMVER:
+ ir->emver = *hi;
+ break;
+
+ /* No return value commands */
+ case MCE_RSP_CMD_ILLEGAL:
+ ir->need_reset = true;
+ break;
+
+ default:
+ break;
+ }
+
+ return;
+ }
+ if (cmd != MCE_CMD_PORT_IR)
+ return;
+
+ switch (subcmd) {
/* 2-byte return value commands */
case MCE_RSP_EQIRTIMEOUT:
- ir->rc->timeout = US_TO_NS((hi << 8 | lo) * MCE_TIME_UNIT);
+ ir->rc->timeout = US_TO_NS((*hi << 8 | *lo) * MCE_TIME_UNIT);
break;
case MCE_RSP_EQIRNUMPORTS:
- ir->num_txports = hi;
- ir->num_rxports = lo;
+ ir->num_txports = *hi;
+ ir->num_rxports = *lo;
break;
case MCE_RSP_EQIRRXCFCNT:
/*
@@ -1174,7 +1213,7 @@ static void mceusb_handle_command(struct mceusb_dev *ir, int index)
*/
if (ir->carrier_report_enabled && ir->learning_active &&
ir->pulse_tunit > 0) {
- carrier_cycles = (hi << 8 | lo);
+ carrier_cycles = (*hi << 8 | *lo);
/*
* Adjust carrier cycle count by adding
* 1 missed count per pulse "on"
@@ -1192,24 +1231,24 @@ static void mceusb_handle_command(struct mceusb_dev *ir, int index)
break;
/* 1-byte return value commands */
- case MCE_RSP_EQEMVER:
- ir->emver = hi;
- break;
case MCE_RSP_EQIRTXPORTS:
- ir->tx_mask = hi;
+ ir->tx_mask = *hi;
break;
case MCE_RSP_EQIRRXPORTEN:
- ir->learning_active = ((hi & 0x02) == 0x02);
- if (ir->rxports_active != hi) {
+ ir->learning_active = ((*hi & 0x02) == 0x02);
+ if (ir->rxports_active != *hi) {
dev_info(ir->dev, "%s-range (0x%x) receiver active",
- ir->learning_active ? "short" : "long", hi);
- ir->rxports_active = hi;
+ ir->learning_active ? "short" : "long", *hi);
+ ir->rxports_active = *hi;
}
break;
+
+ /* No return value commands */
case MCE_RSP_CMD_ILLEGAL:
case MCE_RSP_TX_TIMEOUT:
ir->need_reset = true;
break;
+
default:
break;
}
@@ -1235,7 +1274,8 @@ static void mceusb_process_ir_data(struct mceusb_dev *ir, int buf_len)
ir->rem = mceusb_cmd_datasize(ir->cmd, ir->buf_in[i]);
mceusb_dev_printdata(ir, ir->buf_in, buf_len, i - 1,
ir->rem + 2, false);
- mceusb_handle_command(ir, i);
+ if (i + ir->rem < buf_len)
+ mceusb_handle_command(ir, &ir->buf_in[i - 1]);
ir->parser_state = CMD_DATA;
break;
case PARSE_IRDATA:
@@ -1264,15 +1304,22 @@ static void mceusb_process_ir_data(struct mceusb_dev *ir, int buf_len)
ir->rem--;
break;
case CMD_HEADER:
- /* decode mce packets of the form (84),AA,BB,CC,DD */
- /* IR data packets can span USB messages - rem */
ir->cmd = ir->buf_in[i];
if ((ir->cmd == MCE_CMD_PORT_IR) ||
((ir->cmd & MCE_PORT_MASK) !=
MCE_COMMAND_IRDATA)) {
+ /*
+ * got PORT_SYS, PORT_IR, or unknown
+ * command response prefix
+ */
ir->parser_state = SUBCMD;
continue;
}
+ /*
+ * got IR data prefix (0x80 + num_bytes)
+ * decode MCE packets of the form {0x83, AA, BB, CC}
+ * IR data packets can span USB messages
+ */
ir->rem = (ir->cmd & MCE_PACKET_LENGTH_MASK);
mceusb_dev_printdata(ir, ir->buf_in, buf_len,
i, ir->rem + 1, false);
@@ -1296,6 +1343,14 @@ static void mceusb_process_ir_data(struct mceusb_dev *ir, int buf_len)
if (ir->parser_state != CMD_HEADER && !ir->rem)
ir->parser_state = CMD_HEADER;
}
+
+ /*
+ * Accept IR data spanning multiple rx buffers.
+ * Reject MCE command response spanning multiple rx buffers.
+ */
+ if (ir->parser_state != PARSE_IRDATA || !ir->rem)
+ ir->parser_state = CMD_HEADER;
+
if (event) {
dev_dbg(ir->dev, "processed IR data");
ir_raw_event_handle(ir->rc);
diff --git a/drivers/media/rc/rc-core-priv.h b/drivers/media/rc/rc-core-priv.h
index 9f21b3e8b377..5f36244cc34f 100644
--- a/drivers/media/rc/rc-core-priv.h
+++ b/drivers/media/rc/rc-core-priv.h
@@ -1,5 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
- * SPDX-License-Identifier: GPL-2.0
* Remote Controller core raw events header
*
* Copyright (C) 2010 by Mauro Carvalho Chehab
diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
index 13da4c5c7d17..7741151606ef 100644
--- a/drivers/media/rc/rc-main.c
+++ b/drivers/media/rc/rc-main.c
@@ -1773,6 +1773,7 @@ static int rc_prepare_rx_device(struct rc_dev *dev)
set_bit(MSC_SCAN, dev->input_dev->mscbit);
/* Pointer/mouse events */
+ set_bit(INPUT_PROP_POINTING_STICK, dev->input_dev->propbit);
set_bit(EV_REL, dev->input_dev->evbit);
set_bit(REL_X, dev->input_dev->relbit);
set_bit(REL_Y, dev->input_dev->relbit);
diff --git a/drivers/media/rc/tango-ir.c b/drivers/media/rc/tango-ir.c
index 451ec4e9dcfa..b8eb5bc4d9be 100644
--- a/drivers/media/rc/tango-ir.c
+++ b/drivers/media/rc/tango-ir.c
@@ -157,20 +157,10 @@ static int tango_ir_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct rc_dev *rc;
struct tango_ir *ir;
- struct resource *rc5_res;
- struct resource *rc6_res;
u64 clkrate, clkdiv;
int irq, err;
u32 val;
- rc5_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!rc5_res)
- return -EINVAL;
-
- rc6_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (!rc6_res)
- return -EINVAL;
-
irq = platform_get_irq(pdev, 0);
if (irq <= 0)
return -EINVAL;
@@ -179,11 +169,11 @@ static int tango_ir_probe(struct platform_device *pdev)
if (!ir)
return -ENOMEM;
- ir->rc5_base = devm_ioremap_resource(dev, rc5_res);
+ ir->rc5_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ir->rc5_base))
return PTR_ERR(ir->rc5_base);
- ir->rc6_base = devm_ioremap_resource(dev, rc6_res);
+ ir->rc6_base = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(ir->rc6_base))
return PTR_ERR(ir->rc6_base);
diff --git a/drivers/media/tuners/qm1d1c0042.c b/drivers/media/tuners/qm1d1c0042.c
index 83ca5dc047ea..0e26d22f0b26 100644
--- a/drivers/media/tuners/qm1d1c0042.c
+++ b/drivers/media/tuners/qm1d1c0042.c
@@ -206,7 +206,7 @@ static int qm1d1c0042_set_params(struct dvb_frontend *fe)
if (ret < 0)
return ret;
- a = (freq + state->cfg.xtal_freq / 2) / state->cfg.xtal_freq;
+ a = DIV_ROUND_CLOSEST(freq, state->cfg.xtal_freq);
state->regs[0x06] &= 0x40;
state->regs[0x06] |= (a - 12) / 4;
diff --git a/drivers/media/tuners/si2157.c b/drivers/media/tuners/si2157.c
index e87040d6eca7..898e0f9f8b70 100644
--- a/drivers/media/tuners/si2157.c
+++ b/drivers/media/tuners/si2157.c
@@ -118,6 +118,11 @@ static int si2157_init(struct dvb_frontend *fe)
goto err;
}
+ if (dev->dont_load_firmware) {
+ dev_info(&client->dev, "device is buggy, skipping firmware download\n");
+ goto skip_fw_download;
+ }
+
/* query chip revision */
memcpy(cmd.args, "\x02", 1);
cmd.wlen = 1;
@@ -440,6 +445,7 @@ static int si2157_probe(struct i2c_client *client,
i2c_set_clientdata(client, dev);
dev->fe = cfg->fe;
dev->inversion = cfg->inversion;
+ dev->dont_load_firmware = cfg->dont_load_firmware;
dev->if_port = cfg->if_port;
dev->chiptype = (u8)id->driver_data;
dev->if_frequency = 5000000; /* default value of property 0x0706 */
diff --git a/drivers/media/tuners/si2157.h b/drivers/media/tuners/si2157.h
index c22ca784f43f..ffdece3c2eaa 100644
--- a/drivers/media/tuners/si2157.h
+++ b/drivers/media/tuners/si2157.h
@@ -11,29 +11,34 @@
#include <media/media-device.h>
#include <media/dvb_frontend.h>
-/*
- * I2C address
- * 0x60
+/**
+ * struct si2157_config - configuration parameters for si2157
+ *
+ * @fe:
+ * frontend returned by driver
+ * @mdev:
+ * media device returned by driver
+ * @inversion:
+ * spectral inversion
+ * @dont_load_firmware:
+ * Instead of uploading a new firmware, use the existing one
+ * @if_port:
+ * Port selection
+ * Select the RF interface to use (pins 9+11 or 12+13)
+ *
+ * Note:
+ * The I2C address of this demod is 0x60.
*/
struct si2157_config {
- /*
- * frontend
- */
struct dvb_frontend *fe;
#if defined(CONFIG_MEDIA_CONTROLLER)
struct media_device *mdev;
#endif
- /*
- * Spectral Inversion
- */
- bool inversion;
+ unsigned int inversion:1;
+ unsigned int dont_load_firmware:1;
- /*
- * Port selection
- * Select the RF interface to use (pins 9+11 or 12+13)
- */
u8 if_port;
};
diff --git a/drivers/media/tuners/si2157_priv.h b/drivers/media/tuners/si2157_priv.h
index 2bda903358da..778f81b39996 100644
--- a/drivers/media/tuners/si2157_priv.h
+++ b/drivers/media/tuners/si2157_priv.h
@@ -23,8 +23,9 @@ enum si2157_pads {
struct si2157_dev {
struct mutex i2c_mutex;
struct dvb_frontend *fe;
- bool active;
- bool inversion;
+ unsigned int active:1;
+ unsigned int inversion:1;
+ unsigned int dont_load_firmware:1;
u8 chiptype;
u8 if_port;
u32 if_frequency;
diff --git a/drivers/media/tuners/tuner-xc2028-types.h b/drivers/media/tuners/tuner-xc2028-types.h
index 50d017a4822a..fcca39d3e006 100644
--- a/drivers/media/tuners/tuner-xc2028-types.h
+++ b/drivers/media/tuners/tuner-xc2028-types.h
@@ -1,5 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
- * SPDX-License-Identifier: GPL-2.0
* tuner-xc2028_types
*
* This file includes internal tipes to be used inside tuner-xc2028.
diff --git a/drivers/media/tuners/tuner-xc2028.h b/drivers/media/tuners/tuner-xc2028.h
index 7b58bc06e35c..2dd45d0765d7 100644
--- a/drivers/media/tuners/tuner-xc2028.h
+++ b/drivers/media/tuners/tuner-xc2028.h
@@ -1,5 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
- * SPDX-License-Identifier: GPL-2.0
* tuner-xc2028
*
* Copyright (c) 2007-2008 Mauro Carvalho Chehab <mchehab@kernel.org>
diff --git a/drivers/media/usb/b2c2/flexcop-usb.c b/drivers/media/usb/b2c2/flexcop-usb.c
index 1826ff825c2e..039963a7765b 100644
--- a/drivers/media/usb/b2c2/flexcop-usb.c
+++ b/drivers/media/usb/b2c2/flexcop-usb.c
@@ -295,7 +295,7 @@ static int flexcop_usb_i2c_req(struct flexcop_i2c_adapter *i2c,
mutex_unlock(&fc_usb->data_mutex);
- return 0;
+ return ret;
}
/* actual bus specific access functions,
@@ -504,7 +504,13 @@ urb_error:
static int flexcop_usb_init(struct flexcop_usb *fc_usb)
{
/* use the alternate setting with the larges buffer */
- usb_set_interface(fc_usb->udev,0,1);
+ int ret = usb_set_interface(fc_usb->udev, 0, 1);
+
+ if (ret) {
+ err("set interface failed.");
+ return ret;
+ }
+
switch (fc_usb->udev->speed) {
case USB_SPEED_LOW:
err("cannot handle USB speed because it is too slow.");
@@ -538,6 +544,9 @@ static int flexcop_usb_probe(struct usb_interface *intf,
struct flexcop_device *fc = NULL;
int ret;
+ if (intf->cur_altsetting->desc.bNumEndpoints < 1)
+ return -ENODEV;
+
if ((fc = flexcop_device_kmalloc(sizeof(struct flexcop_usb))) == NULL) {
err("out of memory\n");
return -ENOMEM;
diff --git a/drivers/media/usb/cx231xx/Kconfig b/drivers/media/usb/cx231xx/Kconfig
index 74f3b29d9c60..2fe2b2d335ba 100644
--- a/drivers/media/usb/cx231xx/Kconfig
+++ b/drivers/media/usb/cx231xx/Kconfig
@@ -4,7 +4,7 @@ config VIDEO_CX231XX
depends on VIDEO_DEV && I2C && I2C_MUX
select VIDEO_TUNER
select VIDEO_TVEEPROM
- select VIDEOBUF_VMALLOC
+ select VIDEOBUF2_VMALLOC
select VIDEO_CX25840
select VIDEO_CX2341X
diff --git a/drivers/media/usb/cx231xx/cx231xx-417.c b/drivers/media/usb/cx231xx/cx231xx-417.c
index 6d218a036966..1aec4459f50a 100644
--- a/drivers/media/usb/cx231xx/cx231xx-417.c
+++ b/drivers/media/usb/cx231xx/cx231xx-417.c
@@ -60,10 +60,6 @@
#define MCI_MODE_MEMORY_READ 0x000
#define MCI_MODE_MEMORY_WRITE 0x4000
-static unsigned int mpegbufs = 8;
-module_param(mpegbufs, int, 0644);
-MODULE_PARM_DESC(mpegbufs, "number of mpeg buffers, range 2-32");
-
static unsigned int mpeglines = 128;
module_param(mpeglines, int, 0644);
MODULE_PARM_DESC(mpeglines, "number of lines in an MPEG buffer, range 2-32");
@@ -1080,16 +1076,6 @@ static int cx231xx_load_firmware(struct cx231xx *dev)
return 0;
}
-static void cx231xx_417_check_encoder(struct cx231xx *dev)
-{
- u32 status, seq;
-
- status = 0;
- seq = 0;
- cx231xx_api_cmd(dev, CX2341X_ENC_GET_SEQ_END, 0, 2, &status, &seq);
- dprintk(1, "%s() status = %d, seq = %d\n", __func__, status, seq);
-}
-
static void cx231xx_codec_settings(struct cx231xx *dev)
{
dprintk(1, "%s()\n", __func__);
@@ -1227,40 +1213,25 @@ static int cx231xx_initialize_codec(struct cx231xx *dev)
/* ------------------------------------------------------------------ */
-static int bb_buf_setup(struct videobuf_queue *q,
- unsigned int *count, unsigned int *size)
+static int queue_setup(struct vb2_queue *vq,
+ unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[], struct device *alloc_devs[])
{
- struct cx231xx_fh *fh = q->priv_data;
+ struct cx231xx *dev = vb2_get_drv_priv(vq);
+ unsigned int size = mpeglinesize * mpeglines;
- fh->dev->ts1.ts_packet_size = mpeglinesize;
- fh->dev->ts1.ts_packet_count = mpeglines;
+ dev->ts1.ts_packet_size = mpeglinesize;
+ dev->ts1.ts_packet_count = mpeglines;
- *size = fh->dev->ts1.ts_packet_size * fh->dev->ts1.ts_packet_count;
- *count = mpegbufs;
+ if (vq->num_buffers + *nbuffers < CX231XX_MIN_BUF)
+ *nbuffers = CX231XX_MIN_BUF - vq->num_buffers;
- return 0;
-}
-
-static void free_buffer(struct videobuf_queue *vq, struct cx231xx_buffer *buf)
-{
- struct cx231xx_fh *fh = vq->priv_data;
- struct cx231xx *dev = fh->dev;
- unsigned long flags = 0;
+ if (*nplanes)
+ return sizes[0] < size ? -EINVAL : 0;
+ *nplanes = 1;
+ sizes[0] = mpeglinesize * mpeglines;
- BUG_ON(in_interrupt());
-
- spin_lock_irqsave(&dev->video_mode.slock, flags);
- if (dev->USE_ISO) {
- if (dev->video_mode.isoc_ctl.buf == buf)
- dev->video_mode.isoc_ctl.buf = NULL;
- } else {
- if (dev->video_mode.bulk_ctl.buf == buf)
- dev->video_mode.bulk_ctl.buf = NULL;
- }
- spin_unlock_irqrestore(&dev->video_mode.slock, flags);
- videobuf_waiton(vq, &buf->vb, 0, 0);
- videobuf_vmalloc_free(&buf->vb);
- buf->vb.state = VIDEOBUF_NEEDS_INIT;
+ return 0;
}
static void buffer_copy(struct cx231xx *dev, char *data, int len, struct urb *urb,
@@ -1276,13 +1247,13 @@ static void buffer_copy(struct cx231xx *dev, char *data, int len, struct urb *ur
return;
buf = list_entry(dma_q->active.next,
- struct cx231xx_buffer, vb.queue);
+ struct cx231xx_buffer, list);
dev->video_mode.isoc_ctl.buf = buf;
dma_q->mpeg_buffer_done = 1;
}
/* Fill buffer */
buf = dev->video_mode.isoc_ctl.buf;
- vbuf = videobuf_to_vmalloc(&buf->vb);
+ vbuf = vb2_plane_vaddr(&buf->vb.vb2_buf, 0);
if ((dma_q->mpeg_buffer_completed+len) <
mpeglines*mpeglinesize) {
@@ -1306,11 +1277,10 @@ static void buffer_copy(struct cx231xx *dev, char *data, int len, struct urb *ur
memcpy(vbuf+dma_q->mpeg_buffer_completed,
data, tail_data);
- buf->vb.state = VIDEOBUF_DONE;
- buf->vb.field_count++;
- buf->vb.ts = ktime_get_ns();
- list_del(&buf->vb.queue);
- wake_up(&buf->vb.done);
+ buf->vb.vb2_buf.timestamp = ktime_get_ns();
+ buf->vb.sequence = dma_q->sequence++;
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
dma_q->mpeg_buffer_completed = 0;
if (len - tail_data > 0) {
@@ -1331,17 +1301,15 @@ static void buffer_filled(char *data, int len, struct urb *urb,
if (list_empty(&dma_q->active))
return;
- buf = list_entry(dma_q->active.next,
- struct cx231xx_buffer, vb.queue);
+ buf = list_entry(dma_q->active.next, struct cx231xx_buffer, list);
/* Fill buffer */
- vbuf = videobuf_to_vmalloc(&buf->vb);
+ vbuf = vb2_plane_vaddr(&buf->vb.vb2_buf, 0);
memcpy(vbuf, data, len);
- buf->vb.state = VIDEOBUF_DONE;
- buf->vb.field_count++;
- buf->vb.ts = ktime_get_ns();
- list_del(&buf->vb.queue);
- wake_up(&buf->vb.done);
+ buf->vb.sequence = dma_q->sequence++;
+ buf->vb.vb2_buf.timestamp = ktime_get_ns();
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
}
static int cx231xx_isoc_copy(struct cx231xx *dev, struct urb *urb)
@@ -1394,100 +1362,104 @@ static int cx231xx_bulk_copy(struct cx231xx *dev, struct urb *urb)
return 0;
}
-static int bb_buf_prepare(struct videobuf_queue *q,
- struct videobuf_buffer *vb, enum v4l2_field field)
+static void buffer_queue(struct vb2_buffer *vb)
{
- struct cx231xx_fh *fh = q->priv_data;
struct cx231xx_buffer *buf =
- container_of(vb, struct cx231xx_buffer, vb);
- struct cx231xx *dev = fh->dev;
- int rc = 0, urb_init = 0;
- int size = fh->dev->ts1.ts_packet_size * fh->dev->ts1.ts_packet_count;
+ container_of(vb, struct cx231xx_buffer, vb.vb2_buf);
+ struct cx231xx *dev = vb2_get_drv_priv(vb->vb2_queue);
+ struct cx231xx_dmaqueue *vidq = &dev->video_mode.vidq;
+ unsigned long flags;
- if (0 != buf->vb.baddr && buf->vb.bsize < size)
- return -EINVAL;
- buf->vb.width = fh->dev->ts1.ts_packet_size;
- buf->vb.height = fh->dev->ts1.ts_packet_count;
- buf->vb.size = size;
- buf->vb.field = field;
-
- if (VIDEOBUF_NEEDS_INIT == buf->vb.state) {
- rc = videobuf_iolock(q, &buf->vb, NULL);
- if (rc < 0)
- goto fail;
- }
+ spin_lock_irqsave(&dev->video_mode.slock, flags);
+ list_add_tail(&buf->list, &vidq->active);
+ spin_unlock_irqrestore(&dev->video_mode.slock, flags);
+}
- if (dev->USE_ISO) {
- if (!dev->video_mode.isoc_ctl.num_bufs)
- urb_init = 1;
- } else {
- if (!dev->video_mode.bulk_ctl.num_bufs)
- urb_init = 1;
+static void return_all_buffers(struct cx231xx *dev,
+ enum vb2_buffer_state state)
+{
+ struct cx231xx_dmaqueue *vidq = &dev->video_mode.vidq;
+ struct cx231xx_buffer *buf, *node;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->video_mode.slock, flags);
+ list_for_each_entry_safe(buf, node, &vidq->active, list) {
+ vb2_buffer_done(&buf->vb.vb2_buf, state);
+ list_del(&buf->list);
}
- dev_dbg(dev->dev,
- "urb_init=%d dev->video_mode.max_pkt_size=%d\n",
- urb_init, dev->video_mode.max_pkt_size);
+ spin_unlock_irqrestore(&dev->video_mode.slock, flags);
+}
+
+static int start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ struct cx231xx *dev = vb2_get_drv_priv(vq);
+ struct cx231xx_dmaqueue *vidq = &dev->video_mode.vidq;
+ int ret = 0;
+
+ vidq->sequence = 0;
dev->mode_tv = 1;
- if (urb_init) {
- rc = cx231xx_set_mode(dev, CX231XX_DIGITAL_MODE);
- rc = cx231xx_unmute_audio(dev);
- if (dev->USE_ISO) {
- cx231xx_set_alt_setting(dev, INDEX_TS1, 4);
- rc = cx231xx_init_isoc(dev, mpeglines,
- mpegbufs,
- dev->ts1_mode.max_pkt_size,
- cx231xx_isoc_copy);
- } else {
- cx231xx_set_alt_setting(dev, INDEX_TS1, 0);
- rc = cx231xx_init_bulk(dev, mpeglines,
- mpegbufs,
- dev->ts1_mode.max_pkt_size,
- cx231xx_bulk_copy);
- }
- if (rc < 0)
- goto fail;
- }
+ cx231xx_set_alt_setting(dev, INDEX_VANC, 1);
+ cx231xx_set_gpio_value(dev, 2, 0);
- buf->vb.state = VIDEOBUF_PREPARED;
- return 0;
+ cx231xx_initialize_codec(dev);
+
+ cx231xx_start_TS1(dev);
+
+ cx231xx_set_alt_setting(dev, INDEX_TS1, 0);
+ cx231xx_set_mode(dev, CX231XX_DIGITAL_MODE);
+ if (dev->USE_ISO)
+ ret = cx231xx_init_isoc(dev, CX231XX_NUM_PACKETS,
+ CX231XX_NUM_BUFS,
+ dev->ts1_mode.max_pkt_size,
+ cx231xx_isoc_copy);
+ else
+ ret = cx231xx_init_bulk(dev, 320, 5,
+ dev->ts1_mode.max_pkt_size,
+ cx231xx_bulk_copy);
+ if (ret)
+ return_all_buffers(dev, VB2_BUF_STATE_QUEUED);
-fail:
- free_buffer(q, buf);
- return rc;
+ call_all(dev, video, s_stream, 1);
+ return ret;
}
-static void bb_buf_queue(struct videobuf_queue *q,
- struct videobuf_buffer *vb)
+static void stop_streaming(struct vb2_queue *vq)
{
- struct cx231xx_fh *fh = q->priv_data;
+ struct cx231xx *dev = vb2_get_drv_priv(vq);
+ unsigned long flags;
- struct cx231xx_buffer *buf =
- container_of(vb, struct cx231xx_buffer, vb);
- struct cx231xx *dev = fh->dev;
- struct cx231xx_dmaqueue *vidq = &dev->video_mode.vidq;
+ call_all(dev, video, s_stream, 0);
- buf->vb.state = VIDEOBUF_QUEUED;
- list_add_tail(&buf->vb.queue, &vidq->active);
+ cx231xx_stop_TS1(dev);
-}
+ /* do this before setting alternate! */
+ if (dev->USE_ISO)
+ cx231xx_uninit_isoc(dev);
+ else
+ cx231xx_uninit_bulk(dev);
+ cx231xx_set_mode(dev, CX231XX_SUSPEND);
-static void bb_buf_release(struct videobuf_queue *q,
- struct videobuf_buffer *vb)
-{
- struct cx231xx_buffer *buf =
- container_of(vb, struct cx231xx_buffer, vb);
- /*struct cx231xx_fh *fh = q->priv_data;*/
- /*struct cx231xx *dev = (struct cx231xx *)fh->dev;*/
+ cx231xx_api_cmd(dev, CX2341X_ENC_STOP_CAPTURE, 3, 0,
+ CX231xx_END_NOW, CX231xx_MPEG_CAPTURE,
+ CX231xx_RAW_BITS_NONE);
- free_buffer(q, buf);
+ spin_lock_irqsave(&dev->video_mode.slock, flags);
+ if (dev->USE_ISO)
+ dev->video_mode.isoc_ctl.buf = NULL;
+ else
+ dev->video_mode.bulk_ctl.buf = NULL;
+ spin_unlock_irqrestore(&dev->video_mode.slock, flags);
+ return_all_buffers(dev, VB2_BUF_STATE_ERROR);
}
-static const struct videobuf_queue_ops cx231xx_qops = {
- .buf_setup = bb_buf_setup,
- .buf_prepare = bb_buf_prepare,
- .buf_queue = bb_buf_queue,
- .buf_release = bb_buf_release,
+static struct vb2_ops cx231xx_video_qops = {
+ .queue_setup = queue_setup,
+ .buf_queue = buffer_queue,
+ .start_streaming = start_streaming,
+ .stop_streaming = stop_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
};
/* ------------------------------------------------------------------ */
@@ -1495,8 +1467,7 @@ static const struct videobuf_queue_ops cx231xx_qops = {
static int vidioc_g_pixelaspect(struct file *file, void *priv,
int type, struct v4l2_fract *f)
{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
+ struct cx231xx *dev = video_drvdata(file);
bool is_50hz = dev->encodernorm.id & V4L2_STD_625_50;
if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
@@ -1511,8 +1482,7 @@ static int vidioc_g_pixelaspect(struct file *file, void *priv,
static int vidioc_g_selection(struct file *file, void *priv,
struct v4l2_selection *s)
{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
+ struct cx231xx *dev = video_drvdata(file);
if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
@@ -1533,8 +1503,7 @@ static int vidioc_g_selection(struct file *file, void *priv,
static int vidioc_g_std(struct file *file, void *fh0, v4l2_std_id *norm)
{
- struct cx231xx_fh *fh = file->private_data;
- struct cx231xx *dev = fh->dev;
+ struct cx231xx *dev = video_drvdata(file);
*norm = dev->encodernorm.id;
return 0;
@@ -1542,8 +1511,7 @@ static int vidioc_g_std(struct file *file, void *fh0, v4l2_std_id *norm)
static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id id)
{
- struct cx231xx_fh *fh = file->private_data;
- struct cx231xx *dev = fh->dev;
+ struct cx231xx *dev = video_drvdata(file);
unsigned int i;
for (i = 0; i < ARRAY_SIZE(cx231xx_tvnorms); i++)
@@ -1575,8 +1543,7 @@ static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id id)
static int vidioc_s_ctrl(struct file *file, void *priv,
struct v4l2_control *ctl)
{
- struct cx231xx_fh *fh = file->private_data;
- struct cx231xx *dev = fh->dev;
+ struct cx231xx *dev = video_drvdata(file);
struct v4l2_subdev *sd;
dprintk(3, "enter vidioc_s_ctrl()\n");
@@ -1601,8 +1568,7 @@ static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct cx231xx_fh *fh = file->private_data;
- struct cx231xx *dev = fh->dev;
+ struct cx231xx *dev = video_drvdata(file);
dprintk(3, "enter vidioc_g_fmt_vid_cap()\n");
f->fmt.pix.pixelformat = V4L2_PIX_FMT_MPEG;
@@ -1621,8 +1587,7 @@ static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct cx231xx_fh *fh = file->private_data;
- struct cx231xx *dev = fh->dev;
+ struct cx231xx *dev = video_drvdata(file);
dprintk(3, "enter vidioc_try_fmt_vid_cap()\n");
f->fmt.pix.pixelformat = V4L2_PIX_FMT_MPEG;
@@ -1636,230 +1601,21 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
return 0;
}
-static int vidioc_reqbufs(struct file *file, void *priv,
- struct v4l2_requestbuffers *p)
-{
- struct cx231xx_fh *fh = file->private_data;
-
- return videobuf_reqbufs(&fh->vidq, p);
-}
-
-static int vidioc_querybuf(struct file *file, void *priv,
- struct v4l2_buffer *p)
-{
- struct cx231xx_fh *fh = file->private_data;
-
- return videobuf_querybuf(&fh->vidq, p);
-}
-
-static int vidioc_qbuf(struct file *file, void *priv,
- struct v4l2_buffer *p)
-{
- struct cx231xx_fh *fh = file->private_data;
-
- return videobuf_qbuf(&fh->vidq, p);
-}
-
-static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *b)
-{
- struct cx231xx_fh *fh = priv;
-
- return videobuf_dqbuf(&fh->vidq, b, file->f_flags & O_NONBLOCK);
-}
-
-
-static int vidioc_streamon(struct file *file, void *priv,
- enum v4l2_buf_type i)
-{
- struct cx231xx_fh *fh = file->private_data;
- struct cx231xx *dev = fh->dev;
-
- dprintk(3, "enter vidioc_streamon()\n");
- cx231xx_set_alt_setting(dev, INDEX_TS1, 0);
- cx231xx_set_mode(dev, CX231XX_DIGITAL_MODE);
- if (dev->USE_ISO)
- cx231xx_init_isoc(dev, CX231XX_NUM_PACKETS,
- CX231XX_NUM_BUFS,
- dev->video_mode.max_pkt_size,
- cx231xx_isoc_copy);
- else {
- cx231xx_init_bulk(dev, 320,
- 5,
- dev->ts1_mode.max_pkt_size,
- cx231xx_bulk_copy);
- }
- dprintk(3, "exit vidioc_streamon()\n");
- return videobuf_streamon(&fh->vidq);
-}
-
-static int vidioc_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
-{
- struct cx231xx_fh *fh = file->private_data;
-
- return videobuf_streamoff(&fh->vidq);
-}
-
static int vidioc_log_status(struct file *file, void *priv)
{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
+ struct cx231xx *dev = video_drvdata(file);
call_all(dev, core, log_status);
return v4l2_ctrl_log_status(file, priv);
}
-static int mpeg_open(struct file *file)
-{
- struct video_device *vdev = video_devdata(file);
- struct cx231xx *dev = video_drvdata(file);
- struct cx231xx_fh *fh;
-
- dprintk(2, "%s()\n", __func__);
-
- if (mutex_lock_interruptible(&dev->lock))
- return -ERESTARTSYS;
-
- /* allocate + initialize per filehandle data */
- fh = kzalloc(sizeof(*fh), GFP_KERNEL);
- if (NULL == fh) {
- mutex_unlock(&dev->lock);
- return -ENOMEM;
- }
-
- file->private_data = fh;
- v4l2_fh_init(&fh->fh, vdev);
- fh->dev = dev;
-
-
- videobuf_queue_vmalloc_init(&fh->vidq, &cx231xx_qops,
- NULL, &dev->video_mode.slock,
- V4L2_BUF_TYPE_VIDEO_CAPTURE, V4L2_FIELD_INTERLACED,
- sizeof(struct cx231xx_buffer), fh, &dev->lock);
-/*
- videobuf_queue_sg_init(&fh->vidq, &cx231xx_qops,
- dev->dev, &dev->ts1.slock,
- V4L2_BUF_TYPE_VIDEO_CAPTURE,
- V4L2_FIELD_INTERLACED,
- sizeof(struct cx231xx_buffer),
- fh, &dev->lock);
-*/
-
- cx231xx_set_alt_setting(dev, INDEX_VANC, 1);
- cx231xx_set_gpio_value(dev, 2, 0);
-
- cx231xx_initialize_codec(dev);
-
- mutex_unlock(&dev->lock);
- v4l2_fh_add(&fh->fh);
- cx231xx_start_TS1(dev);
-
- return 0;
-}
-
-static int mpeg_release(struct file *file)
-{
- struct cx231xx_fh *fh = file->private_data;
- struct cx231xx *dev = fh->dev;
-
- dprintk(3, "mpeg_release()! dev=0x%p\n", dev);
-
- mutex_lock(&dev->lock);
-
- cx231xx_stop_TS1(dev);
-
- /* do this before setting alternate! */
- if (dev->USE_ISO)
- cx231xx_uninit_isoc(dev);
- else
- cx231xx_uninit_bulk(dev);
- cx231xx_set_mode(dev, CX231XX_SUSPEND);
-
- cx231xx_api_cmd(fh->dev, CX2341X_ENC_STOP_CAPTURE, 3, 0,
- CX231xx_END_NOW, CX231xx_MPEG_CAPTURE,
- CX231xx_RAW_BITS_NONE);
-
- /* FIXME: Review this crap */
- /* Shut device down on last close */
- if (atomic_cmpxchg(&fh->v4l_reading, 1, 0) == 1) {
- if (atomic_dec_return(&dev->v4l_reader_count) == 0) {
- /* stop mpeg capture */
-
- msleep(500);
- cx231xx_417_check_encoder(dev);
-
- }
- }
-
- if (fh->vidq.streaming)
- videobuf_streamoff(&fh->vidq);
- if (fh->vidq.reading)
- videobuf_read_stop(&fh->vidq);
-
- videobuf_mmap_free(&fh->vidq);
- v4l2_fh_del(&fh->fh);
- v4l2_fh_exit(&fh->fh);
- kfree(fh);
- mutex_unlock(&dev->lock);
- return 0;
-}
-
-static ssize_t mpeg_read(struct file *file, char __user *data,
- size_t count, loff_t *ppos)
-{
- struct cx231xx_fh *fh = file->private_data;
- struct cx231xx *dev = fh->dev;
-
- /* Deal w/ A/V decoder * and mpeg encoder sync issues. */
- /* Start mpeg encoder on first read. */
- if (atomic_cmpxchg(&fh->v4l_reading, 0, 1) == 0) {
- if (atomic_inc_return(&dev->v4l_reader_count) == 1) {
- if (cx231xx_initialize_codec(dev) < 0)
- return -EINVAL;
- }
- }
-
- return videobuf_read_stream(&fh->vidq, data, count, ppos, 0,
- file->f_flags & O_NONBLOCK);
-}
-
-static __poll_t mpeg_poll(struct file *file,
- struct poll_table_struct *wait)
-{
- __poll_t req_events = poll_requested_events(wait);
- struct cx231xx_fh *fh = file->private_data;
- struct cx231xx *dev = fh->dev;
- __poll_t res = 0;
-
- if (v4l2_event_pending(&fh->fh))
- res |= EPOLLPRI;
- else
- poll_wait(file, &fh->fh.wait, wait);
-
- if (!(req_events & (EPOLLIN | EPOLLRDNORM)))
- return res;
-
- mutex_lock(&dev->lock);
- res |= videobuf_poll_stream(file, &fh->vidq, wait);
- mutex_unlock(&dev->lock);
- return res;
-}
-
-static int mpeg_mmap(struct file *file, struct vm_area_struct *vma)
-{
- struct cx231xx_fh *fh = file->private_data;
-
- dprintk(2, "%s()\n", __func__);
-
- return videobuf_mmap_mapper(&fh->vidq, vma);
-}
-
static const struct v4l2_file_operations mpeg_fops = {
.owner = THIS_MODULE,
- .open = mpeg_open,
- .release = mpeg_release,
- .read = mpeg_read,
- .poll = mpeg_poll,
- .mmap = mpeg_mmap,
+ .open = v4l2_fh_open,
+ .release = vb2_fop_release,
+ .read = vb2_fop_read,
+ .poll = vb2_fop_poll,
+ .mmap = vb2_fop_mmap,
.unlocked_ioctl = video_ioctl2,
};
@@ -1881,12 +1637,12 @@ static const struct v4l2_ioctl_ops mpeg_ioctl_ops = {
.vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap,
.vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap,
.vidioc_s_fmt_vid_cap = vidioc_try_fmt_vid_cap,
- .vidioc_reqbufs = vidioc_reqbufs,
- .vidioc_querybuf = vidioc_querybuf,
- .vidioc_qbuf = vidioc_qbuf,
- .vidioc_dqbuf = vidioc_dqbuf,
- .vidioc_streamon = vidioc_streamon,
- .vidioc_streamoff = vidioc_streamoff,
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
.vidioc_log_status = vidioc_log_status,
#ifdef CONFIG_VIDEO_ADV_DEBUG
.vidioc_g_register = cx231xx_g_register,
@@ -1980,6 +1736,7 @@ int cx231xx_417_register(struct cx231xx *dev)
/* FIXME: Port1 hardcoded here */
int err = -ENODEV;
struct cx231xx_tsport *tsport = &dev->ts1;
+ struct vb2_queue *q;
dprintk(1, "%s()\n", __func__);
@@ -2017,6 +1774,21 @@ int cx231xx_417_register(struct cx231xx *dev)
/* Allocate and initialize V4L video device */
cx231xx_video_dev_init(dev, dev->udev,
&dev->v4l_device, &cx231xx_mpeg_template, "mpeg");
+ q = &dev->mpegq;
+ q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ q->io_modes = VB2_USERPTR | VB2_MMAP | VB2_DMABUF | VB2_READ;
+ q->drv_priv = dev;
+ q->buf_struct_size = sizeof(struct cx231xx_buffer);
+ q->ops = &cx231xx_video_qops;
+ q->mem_ops = &vb2_vmalloc_memops;
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->min_buffers_needed = 1;
+ q->lock = &dev->lock;
+ err = vb2_queue_init(q);
+ if (err)
+ return err;
+ dev->v4l_device.queue = q;
+
err = video_register_device(&dev->v4l_device,
VFL_TYPE_GRABBER, -1);
if (err < 0) {
diff --git a/drivers/media/usb/cx231xx/cx231xx-audio.c b/drivers/media/usb/cx231xx/cx231xx-audio.c
index 9ef362e221df..fd6e2df3d1b7 100644
--- a/drivers/media/usb/cx231xx/cx231xx-audio.c
+++ b/drivers/media/usb/cx231xx/cx231xx-audio.c
@@ -14,7 +14,6 @@
#include <linux/soundcard.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
-#include <linux/proc_fs.h>
#include <linux/module.h>
#include <sound/core.h>
#include <sound/pcm.h>
diff --git a/drivers/media/usb/cx231xx/cx231xx-avcore.c b/drivers/media/usb/cx231xx/cx231xx-avcore.c
index d417b5fe4093..0974965e848f 100644
--- a/drivers/media/usb/cx231xx/cx231xx-avcore.c
+++ b/drivers/media/usb/cx231xx/cx231xx-avcore.c
@@ -1240,7 +1240,7 @@ int cx231xx_init_ctrl_pin_status(struct cx231xx *dev)
int cx231xx_set_agc_analog_digital_mux_select(struct cx231xx *dev,
u8 analog_or_digital)
{
- int status = 0;
+ int status;
/* first set the direction to output */
status = cx231xx_set_gpio_direction(dev,
diff --git a/drivers/media/usb/cx231xx/cx231xx-cards.c b/drivers/media/usb/cx231xx/cx231xx-cards.c
index e123e74c549e..92efe6c1f47b 100644
--- a/drivers/media/usb/cx231xx/cx231xx-cards.c
+++ b/drivers/media/usb/cx231xx/cx231xx-cards.c
@@ -1479,13 +1479,11 @@ static int cx231xx_init_dev(struct cx231xx *dev, struct usb_device *udev,
goto err_dev_init;
}
- /* init video dma queues */
+ /* init video dma queue */
INIT_LIST_HEAD(&dev->video_mode.vidq.active);
- INIT_LIST_HEAD(&dev->video_mode.vidq.queued);
- /* init vbi dma queues */
+ /* init vbi dma queue */
INIT_LIST_HEAD(&dev->vbi_mode.vidq.active);
- INIT_LIST_HEAD(&dev->vbi_mode.vidq.queued);
/* Reset other chips required if they are tied up with GPIO pins */
cx231xx_add_into_devlist(dev);
diff --git a/drivers/media/usb/cx231xx/cx231xx-vbi.c b/drivers/media/usb/cx231xx/cx231xx-vbi.c
index fba7ccdf5a25..d2f143a096d1 100644
--- a/drivers/media/usb/cx231xx/cx231xx-vbi.c
+++ b/drivers/media/usb/cx231xx/cx231xx-vbi.c
@@ -153,131 +153,98 @@ static inline int cx231xx_isoc_vbi_copy(struct cx231xx *dev, struct urb *urb)
Vbi buf operations
------------------------------------------------------------------*/
-static int
-vbi_buffer_setup(struct videobuf_queue *vq, unsigned int *count,
- unsigned int *size)
+static int vbi_queue_setup(struct vb2_queue *vq,
+ unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[], struct device *alloc_devs[])
{
- struct cx231xx_fh *fh = vq->priv_data;
- struct cx231xx *dev = fh->dev;
+ struct cx231xx *dev = vb2_get_drv_priv(vq);
u32 height = 0;
height = ((dev->norm & V4L2_STD_625_50) ?
PAL_VBI_LINES : NTSC_VBI_LINES);
- *size = (dev->width * height * 2 * 2);
- if (0 == *count)
- *count = CX231XX_DEF_VBI_BUF;
-
- if (*count < CX231XX_MIN_BUF)
- *count = CX231XX_MIN_BUF;
-
+ *nplanes = 1;
+ sizes[0] = (dev->width * height * 2 * 2);
return 0;
}
/* This is called *without* dev->slock held; please keep it that way */
-static void free_buffer(struct videobuf_queue *vq, struct cx231xx_buffer *buf)
-{
- struct cx231xx_fh *fh = vq->priv_data;
- struct cx231xx *dev = fh->dev;
- unsigned long flags = 0;
- BUG_ON(in_interrupt());
-
- /* We used to wait for the buffer to finish here, but this didn't work
- because, as we were keeping the state as VIDEOBUF_QUEUED,
- videobuf_queue_cancel marked it as finished for us.
- (Also, it could wedge forever if the hardware was misconfigured.)
-
- This should be safe; by the time we get here, the buffer isn't
- queued anymore. If we ever start marking the buffers as
- VIDEOBUF_ACTIVE, it won't be, though.
- */
- spin_lock_irqsave(&dev->vbi_mode.slock, flags);
- if (dev->vbi_mode.bulk_ctl.buf == buf)
- dev->vbi_mode.bulk_ctl.buf = NULL;
- spin_unlock_irqrestore(&dev->vbi_mode.slock, flags);
-
- videobuf_vmalloc_free(&buf->vb);
- buf->vb.state = VIDEOBUF_NEEDS_INIT;
-}
-
-static int
-vbi_buffer_prepare(struct videobuf_queue *vq, struct videobuf_buffer *vb,
- enum v4l2_field field)
+static int vbi_buf_prepare(struct vb2_buffer *vb)
{
- struct cx231xx_fh *fh = vq->priv_data;
- struct cx231xx_buffer *buf =
- container_of(vb, struct cx231xx_buffer, vb);
- struct cx231xx *dev = fh->dev;
- int rc = 0, urb_init = 0;
+ struct cx231xx *dev = vb2_get_drv_priv(vb->vb2_queue);
u32 height = 0;
+ u32 size;
height = ((dev->norm & V4L2_STD_625_50) ?
PAL_VBI_LINES : NTSC_VBI_LINES);
- buf->vb.size = ((dev->width << 1) * height * 2);
+ size = ((dev->width << 1) * height * 2);
- if (0 != buf->vb.baddr && buf->vb.bsize < buf->vb.size)
+ if (vb2_plane_size(vb, 0) < size)
return -EINVAL;
-
- buf->vb.width = dev->width;
- buf->vb.height = height;
- buf->vb.field = field;
- buf->vb.field = V4L2_FIELD_SEQ_TB;
-
- if (VIDEOBUF_NEEDS_INIT == buf->vb.state) {
- rc = videobuf_iolock(vq, &buf->vb, NULL);
- if (rc < 0)
- goto fail;
- }
-
- if (!dev->vbi_mode.bulk_ctl.num_bufs)
- urb_init = 1;
-
- if (urb_init) {
- rc = cx231xx_init_vbi_isoc(dev, CX231XX_NUM_VBI_PACKETS,
- CX231XX_NUM_VBI_BUFS,
- dev->vbi_mode.alt_max_pkt_size[0],
- cx231xx_isoc_vbi_copy);
- if (rc < 0)
- goto fail;
- }
-
- buf->vb.state = VIDEOBUF_PREPARED;
+ vb2_set_plane_payload(vb, 0, size);
return 0;
-
-fail:
- free_buffer(vq, buf);
- return rc;
}
-static void
-vbi_buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb)
+static void vbi_buf_queue(struct vb2_buffer *vb)
{
+ struct cx231xx *dev = vb2_get_drv_priv(vb->vb2_queue);
struct cx231xx_buffer *buf =
- container_of(vb, struct cx231xx_buffer, vb);
- struct cx231xx_fh *fh = vq->priv_data;
- struct cx231xx *dev = fh->dev;
+ container_of(vb, struct cx231xx_buffer, vb.vb2_buf);
struct cx231xx_dmaqueue *vidq = &dev->vbi_mode.vidq;
+ unsigned long flags;
- buf->vb.state = VIDEOBUF_QUEUED;
- list_add_tail(&buf->vb.queue, &vidq->active);
+ spin_lock_irqsave(&dev->vbi_mode.slock, flags);
+ list_add_tail(&buf->list, &vidq->active);
+ spin_unlock_irqrestore(&dev->vbi_mode.slock, flags);
+}
+
+static void return_all_buffers(struct cx231xx *dev,
+ enum vb2_buffer_state state)
+{
+ struct cx231xx_dmaqueue *vidq = &dev->vbi_mode.vidq;
+ struct cx231xx_buffer *buf, *node;
+ unsigned long flags;
+ spin_lock_irqsave(&dev->vbi_mode.slock, flags);
+ dev->vbi_mode.bulk_ctl.buf = NULL;
+ list_for_each_entry_safe(buf, node, &vidq->active, list) {
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb.vb2_buf, state);
+ }
+ spin_unlock_irqrestore(&dev->vbi_mode.slock, flags);
}
-static void vbi_buffer_release(struct videobuf_queue *vq,
- struct videobuf_buffer *vb)
+static int vbi_start_streaming(struct vb2_queue *vq, unsigned int count)
{
- struct cx231xx_buffer *buf =
- container_of(vb, struct cx231xx_buffer, vb);
+ struct cx231xx *dev = vb2_get_drv_priv(vq);
+ struct cx231xx_dmaqueue *vidq = &dev->vbi_mode.vidq;
+ int ret;
+
+ vidq->sequence = 0;
+ ret = cx231xx_init_vbi_isoc(dev, CX231XX_NUM_VBI_PACKETS,
+ CX231XX_NUM_VBI_BUFS,
+ dev->vbi_mode.alt_max_pkt_size[0],
+ cx231xx_isoc_vbi_copy);
+ if (ret)
+ return_all_buffers(dev, VB2_BUF_STATE_QUEUED);
+ return ret;
+}
+static void vbi_stop_streaming(struct vb2_queue *vq)
+{
+ struct cx231xx *dev = vb2_get_drv_priv(vq);
- free_buffer(vq, buf);
+ return_all_buffers(dev, VB2_BUF_STATE_ERROR);
}
-const struct videobuf_queue_ops cx231xx_vbi_qops = {
- .buf_setup = vbi_buffer_setup,
- .buf_prepare = vbi_buffer_prepare,
- .buf_queue = vbi_buffer_queue,
- .buf_release = vbi_buffer_release,
+struct vb2_ops cx231xx_vbi_qops = {
+ .queue_setup = vbi_queue_setup,
+ .buf_prepare = vbi_buf_prepare,
+ .buf_queue = vbi_buf_queue,
+ .start_streaming = vbi_start_streaming,
+ .stop_streaming = vbi_stop_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
};
/* ------------------------------------------------------------------
@@ -512,16 +479,15 @@ static inline void vbi_buffer_filled(struct cx231xx *dev,
struct cx231xx_buffer *buf)
{
/* Advice that buffer was filled */
- /* dev_dbg(dev->dev, "[%p/%d] wakeup\n", buf, buf->vb.i); */
+ /* dev_dbg(dev->dev, "[%p/%d] wakeup\n", buf, buf->vb.index); */
- buf->vb.state = VIDEOBUF_DONE;
- buf->vb.field_count++;
- buf->vb.ts = ktime_get_ns();
+ buf->vb.sequence = dma_q->sequence++;
+ buf->vb.vb2_buf.timestamp = ktime_get_ns();
dev->vbi_mode.bulk_ctl.buf = NULL;
- list_del(&buf->vb.queue);
- wake_up(&buf->vb.done);
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
}
u32 cx231xx_copy_vbi_line(struct cx231xx *dev, struct cx231xx_dmaqueue *dma_q,
@@ -611,11 +577,11 @@ static inline void get_next_vbi_buf(struct cx231xx_dmaqueue *dma_q,
}
/* Get the next buffer */
- *buf = list_entry(dma_q->active.next, struct cx231xx_buffer, vb.queue);
+ *buf = list_entry(dma_q->active.next, struct cx231xx_buffer, list);
/* Cleans up buffer - Useful for testing for frame/URB loss */
- outp = videobuf_to_vmalloc(&(*buf)->vb);
- memset(outp, 0, (*buf)->vb.size);
+ outp = vb2_plane_vaddr(&(*buf)->vb.vb2_buf, 0);
+ memset(outp, 0, vb2_plane_size(&(*buf)->vb.vb2_buf, 0));
dev->vbi_mode.bulk_ctl.buf = *buf;
@@ -656,7 +622,7 @@ int cx231xx_do_vbi_copy(struct cx231xx *dev, struct cx231xx_dmaqueue *dma_q,
if (buf == NULL)
return -EINVAL;
- p_out_buffer = videobuf_to_vmalloc(&buf->vb);
+ p_out_buffer = vb2_plane_vaddr(&buf->vb.vb2_buf, 0);
if (dma_q->bytes_left_in_line != _line_size) {
current_line_bytes_copied =
diff --git a/drivers/media/usb/cx231xx/cx231xx-vbi.h b/drivers/media/usb/cx231xx/cx231xx-vbi.h
index 7cddd629fbfc..0b21bee5fa30 100644
--- a/drivers/media/usb/cx231xx/cx231xx-vbi.h
+++ b/drivers/media/usb/cx231xx/cx231xx-vbi.h
@@ -10,7 +10,7 @@
#ifndef _CX231XX_VBI_H
#define _CX231XX_VBI_H
-extern const struct videobuf_queue_ops cx231xx_vbi_qops;
+extern struct vb2_ops cx231xx_vbi_qops;
#define NTSC_VBI_START_LINE 10 /* line 10 - 21 */
#define NTSC_VBI_END_LINE 21
diff --git a/drivers/media/usb/cx231xx/cx231xx-video.c b/drivers/media/usb/cx231xx/cx231xx-video.c
index 9b51f07a729e..69abafaebbf3 100644
--- a/drivers/media/usb/cx231xx/cx231xx-video.c
+++ b/drivers/media/usb/cx231xx/cx231xx-video.c
@@ -58,10 +58,10 @@ MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
MODULE_VERSION(CX231XX_VERSION);
-static unsigned int card[] = {[0 ... (CX231XX_MAXBOARDS - 1)] = UNSET };
-static unsigned int video_nr[] = {[0 ... (CX231XX_MAXBOARDS - 1)] = UNSET };
-static unsigned int vbi_nr[] = {[0 ... (CX231XX_MAXBOARDS - 1)] = UNSET };
-static unsigned int radio_nr[] = {[0 ... (CX231XX_MAXBOARDS - 1)] = UNSET };
+static unsigned int card[] = {[0 ... (CX231XX_MAXBOARDS - 1)] = -1U };
+static unsigned int video_nr[] = {[0 ... (CX231XX_MAXBOARDS - 1)] = -1U };
+static unsigned int vbi_nr[] = {[0 ... (CX231XX_MAXBOARDS - 1)] = -1U };
+static unsigned int radio_nr[] = {[0 ... (CX231XX_MAXBOARDS - 1)] = -1U };
module_param_array(card, int, NULL, 0444);
module_param_array(video_nr, int, NULL, 0444);
@@ -166,18 +166,19 @@ static inline void buffer_filled(struct cx231xx *dev,
struct cx231xx_buffer *buf)
{
/* Advice that buffer was filled */
- cx231xx_isocdbg("[%p/%d] wakeup\n", buf, buf->vb.i);
- buf->vb.state = VIDEOBUF_DONE;
- buf->vb.field_count++;
- buf->vb.ts = ktime_get_ns();
+ cx231xx_isocdbg("[%p/%d] wakeup\n", buf, buf->vb.vb2_buf.index);
+ buf->vb.sequence = dma_q->sequence++;
+ buf->vb.field = V4L2_FIELD_INTERLACED;
+ buf->vb.vb2_buf.timestamp = ktime_get_ns();
+ vb2_set_plane_payload(&buf->vb.vb2_buf, 0, dev->size);
if (dev->USE_ISO)
dev->video_mode.isoc_ctl.buf = NULL;
else
dev->video_mode.bulk_ctl.buf = NULL;
- list_del(&buf->vb.queue);
- wake_up(&buf->vb.done);
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
}
static inline void print_err_status(struct cx231xx *dev, int packet, int status)
@@ -241,11 +242,11 @@ static inline void get_next_buf(struct cx231xx_dmaqueue *dma_q,
}
/* Get the next buffer */
- *buf = list_entry(dma_q->active.next, struct cx231xx_buffer, vb.queue);
+ *buf = list_entry(dma_q->active.next, struct cx231xx_buffer, list);
/* Cleans up buffer - Useful for testing for frame/URB loss */
- outp = videobuf_to_vmalloc(&(*buf)->vb);
- memset(outp, 0, (*buf)->vb.size);
+ outp = vb2_plane_vaddr(&(*buf)->vb.vb2_buf, 0);
+ memset(outp, 0, dev->size);
if (dev->USE_ISO)
dev->video_mode.isoc_ctl.buf = *buf;
@@ -653,7 +654,7 @@ int cx231xx_do_copy(struct cx231xx *dev, struct cx231xx_dmaqueue *dma_q,
if (buf == NULL)
return -1;
- p_out_buffer = videobuf_to_vmalloc(&buf->vb);
+ p_out_buffer = vb2_plane_vaddr(&buf->vb.vb2_buf, 0);
current_line_bytes_copied = _line_size - dma_q->bytes_left_in_line;
@@ -672,7 +673,7 @@ int cx231xx_do_copy(struct cx231xx *dev, struct cx231xx_dmaqueue *dma_q,
lencopy = dma_q->bytes_left_in_line > bytes_to_copy ?
bytes_to_copy : dma_q->bytes_left_in_line;
- if ((u8 *)(startwrite + lencopy) > (u8 *)(p_out_buffer + buf->vb.size))
+ if ((u8 *)(startwrite + lencopy) > (u8 *)(p_out_buffer + dev->size))
return 0;
/* The below copies the UYVY data straight into video buffer */
@@ -708,149 +709,98 @@ u8 cx231xx_is_buffer_done(struct cx231xx *dev, struct cx231xx_dmaqueue *dma_q)
Videobuf operations
------------------------------------------------------------------*/
-static int
-buffer_setup(struct videobuf_queue *vq, unsigned int *count, unsigned int *size)
+static int queue_setup(struct vb2_queue *vq,
+ unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[], struct device *alloc_devs[])
{
- struct cx231xx_fh *fh = vq->priv_data;
- struct cx231xx *dev = fh->dev;
+ struct cx231xx *dev = vb2_get_drv_priv(vq);
- *size = (fh->dev->width * fh->dev->height * dev->format->depth + 7)>>3;
- if (0 == *count)
- *count = CX231XX_DEF_BUF;
+ dev->size = (dev->width * dev->height * dev->format->depth + 7) >> 3;
- if (*count < CX231XX_MIN_BUF)
- *count = CX231XX_MIN_BUF;
+ if (vq->num_buffers + *nbuffers < CX231XX_MIN_BUF)
+ *nbuffers = CX231XX_MIN_BUF - vq->num_buffers;
-
- cx231xx_enable_analog_tuner(dev);
+ if (*nplanes)
+ return sizes[0] < dev->size ? -EINVAL : 0;
+ *nplanes = 1;
+ sizes[0] = dev->size;
return 0;
}
-/* This is called *without* dev->slock held; please keep it that way */
-static void free_buffer(struct videobuf_queue *vq, struct cx231xx_buffer *buf)
+static void buffer_queue(struct vb2_buffer *vb)
{
- struct cx231xx_fh *fh = vq->priv_data;
- struct cx231xx *dev = fh->dev;
- unsigned long flags = 0;
-
- BUG_ON(in_interrupt());
-
- /* We used to wait for the buffer to finish here, but this didn't work
- because, as we were keeping the state as VIDEOBUF_QUEUED,
- videobuf_queue_cancel marked it as finished for us.
- (Also, it could wedge forever if the hardware was misconfigured.)
+ struct cx231xx_buffer *buf =
+ container_of(vb, struct cx231xx_buffer, vb.vb2_buf);
+ struct cx231xx *dev = vb2_get_drv_priv(vb->vb2_queue);
+ struct cx231xx_dmaqueue *vidq = &dev->video_mode.vidq;
+ unsigned long flags;
- This should be safe; by the time we get here, the buffer isn't
- queued anymore. If we ever start marking the buffers as
- VIDEOBUF_ACTIVE, it won't be, though.
- */
spin_lock_irqsave(&dev->video_mode.slock, flags);
- if (dev->USE_ISO) {
- if (dev->video_mode.isoc_ctl.buf == buf)
- dev->video_mode.isoc_ctl.buf = NULL;
- } else {
- if (dev->video_mode.bulk_ctl.buf == buf)
- dev->video_mode.bulk_ctl.buf = NULL;
- }
+ list_add_tail(&buf->list, &vidq->active);
spin_unlock_irqrestore(&dev->video_mode.slock, flags);
-
- videobuf_vmalloc_free(&buf->vb);
- buf->vb.state = VIDEOBUF_NEEDS_INIT;
}
-static int
-buffer_prepare(struct videobuf_queue *vq, struct videobuf_buffer *vb,
- enum v4l2_field field)
+static void return_all_buffers(struct cx231xx *dev,
+ enum vb2_buffer_state state)
{
- struct cx231xx_fh *fh = vq->priv_data;
- struct cx231xx_buffer *buf =
- container_of(vb, struct cx231xx_buffer, vb);
- struct cx231xx *dev = fh->dev;
- int rc = 0, urb_init = 0;
-
- /* The only currently supported format is 16 bits/pixel */
- buf->vb.size = (fh->dev->width * fh->dev->height * dev->format->depth
- + 7) >> 3;
- if (0 != buf->vb.baddr && buf->vb.bsize < buf->vb.size)
- return -EINVAL;
-
- buf->vb.width = dev->width;
- buf->vb.height = dev->height;
- buf->vb.field = field;
-
- if (VIDEOBUF_NEEDS_INIT == buf->vb.state) {
- rc = videobuf_iolock(vq, &buf->vb, NULL);
- if (rc < 0)
- goto fail;
- }
+ struct cx231xx_dmaqueue *vidq = &dev->video_mode.vidq;
+ struct cx231xx_buffer *buf, *node;
+ unsigned long flags;
- if (dev->USE_ISO) {
- if (!dev->video_mode.isoc_ctl.num_bufs)
- urb_init = 1;
- } else {
- if (!dev->video_mode.bulk_ctl.num_bufs)
- urb_init = 1;
- }
- dev_dbg(dev->dev,
- "urb_init=%d dev->video_mode.max_pkt_size=%d\n",
- urb_init, dev->video_mode.max_pkt_size);
- if (urb_init) {
- dev->mode_tv = 0;
- if (dev->USE_ISO)
- rc = cx231xx_init_isoc(dev, CX231XX_NUM_PACKETS,
- CX231XX_NUM_BUFS,
- dev->video_mode.max_pkt_size,
- cx231xx_isoc_copy);
- else
- rc = cx231xx_init_bulk(dev, CX231XX_NUM_PACKETS,
- CX231XX_NUM_BUFS,
- dev->video_mode.max_pkt_size,
- cx231xx_bulk_copy);
- if (rc < 0)
- goto fail;
+ spin_lock_irqsave(&dev->video_mode.slock, flags);
+ if (dev->USE_ISO)
+ dev->video_mode.isoc_ctl.buf = NULL;
+ else
+ dev->video_mode.bulk_ctl.buf = NULL;
+ list_for_each_entry_safe(buf, node, &vidq->active, list) {
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb.vb2_buf, state);
}
-
- buf->vb.state = VIDEOBUF_PREPARED;
-
- return 0;
-
-fail:
- free_buffer(vq, buf);
- return rc;
+ spin_unlock_irqrestore(&dev->video_mode.slock, flags);
}
-static void buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb)
+static int start_streaming(struct vb2_queue *vq, unsigned int count)
{
- struct cx231xx_buffer *buf =
- container_of(vb, struct cx231xx_buffer, vb);
- struct cx231xx_fh *fh = vq->priv_data;
- struct cx231xx *dev = fh->dev;
+ struct cx231xx *dev = vb2_get_drv_priv(vq);
struct cx231xx_dmaqueue *vidq = &dev->video_mode.vidq;
+ int ret = 0;
- buf->vb.state = VIDEOBUF_QUEUED;
- list_add_tail(&buf->vb.queue, &vidq->active);
+ vidq->sequence = 0;
+ dev->mode_tv = 0;
+ cx231xx_enable_analog_tuner(dev);
+ if (dev->USE_ISO)
+ ret = cx231xx_init_isoc(dev, CX231XX_NUM_PACKETS,
+ CX231XX_NUM_BUFS,
+ dev->video_mode.max_pkt_size,
+ cx231xx_isoc_copy);
+ else
+ ret = cx231xx_init_bulk(dev, CX231XX_NUM_PACKETS,
+ CX231XX_NUM_BUFS,
+ dev->video_mode.max_pkt_size,
+ cx231xx_bulk_copy);
+ if (ret)
+ return_all_buffers(dev, VB2_BUF_STATE_QUEUED);
+ call_all(dev, video, s_stream, 1);
+ return ret;
}
-static void buffer_release(struct videobuf_queue *vq,
- struct videobuf_buffer *vb)
+static void stop_streaming(struct vb2_queue *vq)
{
- struct cx231xx_buffer *buf =
- container_of(vb, struct cx231xx_buffer, vb);
- struct cx231xx_fh *fh = vq->priv_data;
- struct cx231xx *dev = (struct cx231xx *)fh->dev;
-
- cx231xx_isocdbg("cx231xx: called buffer_release\n");
+ struct cx231xx *dev = vb2_get_drv_priv(vq);
- free_buffer(vq, buf);
+ call_all(dev, video, s_stream, 0);
+ return_all_buffers(dev, VB2_BUF_STATE_ERROR);
}
-static const struct videobuf_queue_ops cx231xx_video_qops = {
- .buf_setup = buffer_setup,
- .buf_prepare = buffer_prepare,
- .buf_queue = buffer_queue,
- .buf_release = buffer_release,
+static struct vb2_ops cx231xx_video_qops = {
+ .queue_setup = queue_setup,
+ .buf_queue = buffer_queue,
+ .start_streaming = start_streaming,
+ .stop_streaming = stop_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
};
/********************* v4l2 interface **************************************/
@@ -872,58 +822,6 @@ void video_mux(struct cx231xx *dev, int index)
cx231xx_do_mode_ctrl_overrides(dev);
}
-/* Usage lock check functions */
-static int res_get(struct cx231xx_fh *fh)
-{
- struct cx231xx *dev = fh->dev;
- int rc = 0;
-
- /* This instance already has stream_on */
- if (fh->stream_on)
- return rc;
-
- if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
- if (dev->stream_on)
- return -EBUSY;
- dev->stream_on = 1;
- } else if (fh->type == V4L2_BUF_TYPE_VBI_CAPTURE) {
- if (dev->vbi_stream_on)
- return -EBUSY;
- dev->vbi_stream_on = 1;
- } else
- return -EINVAL;
-
- fh->stream_on = 1;
-
- return rc;
-}
-
-static int res_check(struct cx231xx_fh *fh)
-{
- return fh->stream_on;
-}
-
-static void res_free(struct cx231xx_fh *fh)
-{
- struct cx231xx *dev = fh->dev;
-
- fh->stream_on = 0;
-
- if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
- dev->stream_on = 0;
- if (fh->type == V4L2_BUF_TYPE_VBI_CAPTURE)
- dev->vbi_stream_on = 0;
-}
-
-static int check_dev(struct cx231xx *dev)
-{
- if (dev->state & DEV_DISCONNECTED) {
- dev_err(dev->dev, "v4l2 ioctl: device not present\n");
- return -ENODEV;
- }
- return 0;
-}
-
/* ------------------------------------------------------------------
IOCTL vidioc handling
------------------------------------------------------------------*/
@@ -931,8 +829,7 @@ static int check_dev(struct cx231xx *dev)
static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
+ struct cx231xx *dev = video_drvdata(file);
f->fmt.pix.width = dev->width;
f->fmt.pix.height = dev->height;
@@ -960,8 +857,7 @@ static struct cx231xx_fmt *format_by_fourcc(unsigned int fourcc)
static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
+ struct cx231xx *dev = video_drvdata(file);
unsigned int width = f->fmt.pix.width;
unsigned int height = f->fmt.pix.height;
unsigned int maxw = norm_maxw(dev);
@@ -993,39 +889,25 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
- int rc;
- struct cx231xx_fmt *fmt;
+ struct cx231xx *dev = video_drvdata(file);
struct v4l2_subdev_format format = {
.which = V4L2_SUBDEV_FORMAT_ACTIVE,
};
+ int rc;
- rc = check_dev(dev);
- if (rc < 0)
+ rc = vidioc_try_fmt_vid_cap(file, priv, f);
+ if (rc)
return rc;
- vidioc_try_fmt_vid_cap(file, priv, f);
-
- fmt = format_by_fourcc(f->fmt.pix.pixelformat);
- if (!fmt)
- return -EINVAL;
-
- if (videobuf_queue_is_busy(&fh->vb_vidq)) {
+ if (vb2_is_busy(&dev->vidq)) {
dev_err(dev->dev, "%s: queue busy\n", __func__);
return -EBUSY;
}
- if (dev->stream_on && !fh->stream_on) {
- dev_err(dev->dev,
- "%s: device in use by another fh\n", __func__);
- return -EBUSY;
- }
-
/* set new image size */
dev->width = f->fmt.pix.width;
dev->height = f->fmt.pix.height;
- dev->format = fmt;
+ dev->format = format_by_fourcc(f->fmt.pix.pixelformat);
v4l2_fill_mbus_format(&format.format, &f->fmt.pix, MEDIA_BUS_FMT_FIXED);
call_all(dev, pad, set_fmt, NULL, &format);
@@ -1036,8 +918,7 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
static int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *id)
{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
+ struct cx231xx *dev = video_drvdata(file);
*id = dev->norm;
return 0;
@@ -1045,21 +926,15 @@ static int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *id)
static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id norm)
{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
+ struct cx231xx *dev = video_drvdata(file);
struct v4l2_subdev_format format = {
.which = V4L2_SUBDEV_FORMAT_ACTIVE,
};
- int rc;
-
- rc = check_dev(dev);
- if (rc < 0)
- return rc;
if (dev->norm == norm)
return 0;
- if (videobuf_queue_is_busy(&fh->vb_vidq))
+ if (vb2_is_busy(&dev->vidq))
return -EBUSY;
dev->norm = norm;
@@ -1141,8 +1016,7 @@ void cx231xx_v4l2_create_entities(struct cx231xx *dev)
int cx231xx_enum_input(struct file *file, void *priv,
struct v4l2_input *i)
{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
+ struct cx231xx *dev = video_drvdata(file);
u32 gen_stat;
unsigned int n;
int ret;
@@ -1181,8 +1055,7 @@ int cx231xx_enum_input(struct file *file, void *priv,
int cx231xx_g_input(struct file *file, void *priv, unsigned int *i)
{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
+ struct cx231xx *dev = video_drvdata(file);
*i = dev->video_input;
@@ -1191,14 +1064,9 @@ int cx231xx_g_input(struct file *file, void *priv, unsigned int *i)
int cx231xx_s_input(struct file *file, void *priv, unsigned int i)
{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
- int rc;
+ struct cx231xx *dev = video_drvdata(file);
dev->mode_tv = 0;
- rc = check_dev(dev);
- if (rc < 0)
- return rc;
if (i >= MAX_CX231XX_INPUT)
return -EINVAL;
@@ -1220,13 +1088,7 @@ int cx231xx_s_input(struct file *file, void *priv, unsigned int i)
int cx231xx_g_tuner(struct file *file, void *priv, struct v4l2_tuner *t)
{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
- int rc;
-
- rc = check_dev(dev);
- if (rc < 0)
- return rc;
+ struct cx231xx *dev = video_drvdata(file);
if (0 != t->index)
return -EINVAL;
@@ -1244,27 +1106,15 @@ int cx231xx_g_tuner(struct file *file, void *priv, struct v4l2_tuner *t)
int cx231xx_s_tuner(struct file *file, void *priv, const struct v4l2_tuner *t)
{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
- int rc;
-
- rc = check_dev(dev);
- if (rc < 0)
- return rc;
-
if (0 != t->index)
return -EINVAL;
-#if 0
- call_all(dev, tuner, s_tuner, t);
-#endif
return 0;
}
int cx231xx_g_frequency(struct file *file, void *priv,
struct v4l2_frequency *f)
{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
+ struct cx231xx *dev = video_drvdata(file);
if (f->tuner)
return -EINVAL;
@@ -1277,8 +1127,7 @@ int cx231xx_g_frequency(struct file *file, void *priv,
int cx231xx_s_frequency(struct file *file, void *priv,
const struct v4l2_frequency *f)
{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
+ struct cx231xx *dev = video_drvdata(file);
struct v4l2_frequency new_freq = *f;
int rc;
u32 if_frequency = 5400000;
@@ -1287,10 +1136,6 @@ int cx231xx_s_frequency(struct file *file, void *priv,
"Enter vidioc_s_frequency()f->frequency=%d;f->type=%d\n",
f->frequency, f->type);
- rc = check_dev(dev);
- if (rc < 0)
- return rc;
-
if (0 != f->tuner)
return -EINVAL;
@@ -1365,8 +1210,7 @@ int cx231xx_g_chip_info(struct file *file, void *fh,
int cx231xx_g_register(struct file *file, void *priv,
struct v4l2_dbg_register *reg)
{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
+ struct cx231xx *dev = video_drvdata(file);
int ret;
u8 value[4] = { 0, 0, 0, 0 };
u32 data = 0;
@@ -1424,8 +1268,7 @@ int cx231xx_g_register(struct file *file, void *priv,
int cx231xx_s_register(struct file *file, void *priv,
const struct v4l2_dbg_register *reg)
{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
+ struct cx231xx *dev = video_drvdata(file);
int ret;
u8 data[4] = { 0, 0, 0, 0 };
@@ -1472,8 +1315,7 @@ int cx231xx_s_register(struct file *file, void *priv,
static int vidioc_g_pixelaspect(struct file *file, void *priv,
int type, struct v4l2_fract *f)
{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
+ struct cx231xx *dev = video_drvdata(file);
bool is_50hz = dev->norm & V4L2_STD_625_50;
if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
@@ -1488,8 +1330,7 @@ static int vidioc_g_pixelaspect(struct file *file, void *priv,
static int vidioc_g_selection(struct file *file, void *priv,
struct v4l2_selection *s)
{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
+ struct cx231xx *dev = video_drvdata(file);
if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
@@ -1508,54 +1349,10 @@ static int vidioc_g_selection(struct file *file, void *priv,
return 0;
}
-static int vidioc_streamon(struct file *file, void *priv,
- enum v4l2_buf_type type)
-{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
- int rc;
-
- rc = check_dev(dev);
- if (rc < 0)
- return rc;
-
- rc = res_get(fh);
-
- if (likely(rc >= 0))
- rc = videobuf_streamon(&fh->vb_vidq);
-
- call_all(dev, video, s_stream, 1);
-
- return rc;
-}
-
-static int vidioc_streamoff(struct file *file, void *priv,
- enum v4l2_buf_type type)
-{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
- int rc;
-
- rc = check_dev(dev);
- if (rc < 0)
- return rc;
-
- if (type != fh->type)
- return -EINVAL;
-
- cx25840_call(dev, video, s_stream, 0);
-
- videobuf_streamoff(&fh->vb_vidq);
- res_free(fh);
-
- return 0;
-}
-
int cx231xx_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
+ struct cx231xx *dev = video_drvdata(file);
strscpy(cap->driver, "cx231xx", sizeof(cap->driver));
strscpy(cap->card, cx231xx_boards[dev->model].name, sizeof(cap->card));
@@ -1587,8 +1384,7 @@ static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
static int vidioc_g_fmt_vbi_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
+ struct cx231xx *dev = video_drvdata(file);
f->fmt.vbi.sampling_rate = 6750000 * 4;
f->fmt.vbi.samples_per_line = VBI_LINE_LENGTH;
@@ -1610,8 +1406,7 @@ static int vidioc_g_fmt_vbi_cap(struct file *file, void *priv,
static int vidioc_try_fmt_vbi_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
+ struct cx231xx *dev = video_drvdata(file);
f->fmt.vbi.sampling_rate = 6750000 * 4;
f->fmt.vbi.samples_per_line = VBI_LINE_LENGTH;
@@ -1634,77 +1429,16 @@ static int vidioc_try_fmt_vbi_cap(struct file *file, void *priv,
static int vidioc_s_fmt_vbi_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
-
- if (dev->vbi_stream_on && !fh->stream_on) {
- dev_err(dev->dev,
- "%s device in use by another fh\n", __func__);
- return -EBUSY;
- }
return vidioc_try_fmt_vbi_cap(file, priv, f);
}
-static int vidioc_reqbufs(struct file *file, void *priv,
- struct v4l2_requestbuffers *rb)
-{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
- int rc;
-
- rc = check_dev(dev);
- if (rc < 0)
- return rc;
-
- return videobuf_reqbufs(&fh->vb_vidq, rb);
-}
-
-static int vidioc_querybuf(struct file *file, void *priv, struct v4l2_buffer *b)
-{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
- int rc;
-
- rc = check_dev(dev);
- if (rc < 0)
- return rc;
-
- return videobuf_querybuf(&fh->vb_vidq, b);
-}
-
-static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *b)
-{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
- int rc;
-
- rc = check_dev(dev);
- if (rc < 0)
- return rc;
-
- return videobuf_qbuf(&fh->vb_vidq, b);
-}
-
-static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *b)
-{
- struct cx231xx_fh *fh = priv;
- struct cx231xx *dev = fh->dev;
- int rc;
-
- rc = check_dev(dev);
- if (rc < 0)
- return rc;
-
- return videobuf_dqbuf(&fh->vb_vidq, b, file->f_flags & O_NONBLOCK);
-}
-
/* ----------------------------------------------------------- */
/* RADIO ESPECIFIC IOCTLS */
/* ----------------------------------------------------------- */
static int radio_g_tuner(struct file *file, void *priv, struct v4l2_tuner *t)
{
- struct cx231xx *dev = ((struct cx231xx_fh *)priv)->dev;
+ struct cx231xx *dev = video_drvdata(file);
if (t->index)
return -EINVAL;
@@ -1717,7 +1451,7 @@ static int radio_g_tuner(struct file *file, void *priv, struct v4l2_tuner *t)
}
static int radio_s_tuner(struct file *file, void *priv, const struct v4l2_tuner *t)
{
- struct cx231xx *dev = ((struct cx231xx_fh *)priv)->dev;
+ struct cx231xx *dev = video_drvdata(file);
if (t->index)
return -EINVAL;
@@ -1733,52 +1467,20 @@ static int radio_s_tuner(struct file *file, void *priv, const struct v4l2_tuner
*/
static int cx231xx_v4l2_open(struct file *filp)
{
- int radio = 0;
struct video_device *vdev = video_devdata(filp);
struct cx231xx *dev = video_drvdata(filp);
- struct cx231xx_fh *fh;
- enum v4l2_buf_type fh_type = 0;
-
- switch (vdev->vfl_type) {
- case VFL_TYPE_GRABBER:
- fh_type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- break;
- case VFL_TYPE_VBI:
- fh_type = V4L2_BUF_TYPE_VBI_CAPTURE;
- break;
- case VFL_TYPE_RADIO:
- radio = 1;
- break;
- default:
- return -EINVAL;
- }
-
- cx231xx_videodbg("open dev=%s type=%s users=%d\n",
- video_device_node_name(vdev), v4l2_type_names[fh_type],
- dev->users);
-
-#if 0
- errCode = cx231xx_set_mode(dev, CX231XX_ANALOG_MODE);
- if (errCode < 0) {
- dev_err(dev->dev,
- "Device locked on digital mode. Can't open analog\n");
- return -EBUSY;
- }
-#endif
+ int ret;
- fh = kzalloc(sizeof(struct cx231xx_fh), GFP_KERNEL);
- if (!fh)
- return -ENOMEM;
- if (mutex_lock_interruptible(&dev->lock)) {
- kfree(fh);
+ if (mutex_lock_interruptible(&dev->lock))
return -ERESTARTSYS;
+
+ ret = v4l2_fh_open(filp);
+ if (ret) {
+ mutex_unlock(&dev->lock);
+ return ret;
}
- fh->dev = dev;
- fh->type = fh_type;
- filp->private_data = fh;
- v4l2_fh_init(&fh->fh, vdev);
- if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE && dev->users == 0) {
+ if (dev->users++ == 0) {
/* Power up in Analog TV mode */
if (dev->board.external_av)
cx231xx_set_power_mode(dev,
@@ -1786,10 +1488,6 @@ static int cx231xx_v4l2_open(struct file *filp)
else
cx231xx_set_power_mode(dev, POLARIS_AVMODE_ANALOGT_TV);
-#if 0
- cx231xx_set_mode(dev, CX231XX_ANALOG_MODE);
-#endif
-
/* set video alternate setting */
cx231xx_set_video_alternate(dev);
@@ -1799,38 +1497,21 @@ static int cx231xx_v4l2_open(struct file *filp)
/* device needs to be initialized before isoc transfer */
dev->video_input = dev->video_input > 2 ? 2 : dev->video_input;
-
}
- if (radio) {
+
+ if (vdev->vfl_type == VFL_TYPE_RADIO) {
cx231xx_videodbg("video_open: setting radio device\n");
/* cx231xx_start_radio(dev); */
call_all(dev, tuner, s_radio);
}
-
- dev->users++;
-
- if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
- videobuf_queue_vmalloc_init(&fh->vb_vidq, &cx231xx_video_qops,
- NULL, &dev->video_mode.slock,
- fh->type, V4L2_FIELD_INTERLACED,
- sizeof(struct cx231xx_buffer),
- fh, &dev->lock);
- if (fh->type == V4L2_BUF_TYPE_VBI_CAPTURE) {
+ if (vdev->vfl_type == VFL_TYPE_VBI) {
/* Set the required alternate setting VBI interface works in
Bulk mode only */
cx231xx_set_alt_setting(dev, INDEX_VANC, 0);
-
- videobuf_queue_vmalloc_init(&fh->vb_vidq, &cx231xx_vbi_qops,
- NULL, &dev->vbi_mode.slock,
- fh->type, V4L2_FIELD_SEQ_TB,
- sizeof(struct cx231xx_buffer),
- fh, &dev->lock);
}
mutex_unlock(&dev->lock);
- v4l2_fh_add(&fh->fh);
-
return 0;
}
@@ -1871,68 +1552,12 @@ void cx231xx_release_analog_resources(struct cx231xx *dev)
*/
static int cx231xx_close(struct file *filp)
{
- struct cx231xx_fh *fh = filp->private_data;
- struct cx231xx *dev = fh->dev;
-
- cx231xx_videodbg("users=%d\n", dev->users);
-
- cx231xx_videodbg("users=%d\n", dev->users);
- if (res_check(fh))
- res_free(fh);
-
- /*
- * To workaround error number=-71 on EP0 for VideoGrabber,
- * need exclude following.
- * FIXME: It is probably safe to remove most of these, as we're
- * now avoiding the alternate setting for INDEX_VANC
- */
- if (!dev->board.no_alt_vanc)
- if (fh->type == V4L2_BUF_TYPE_VBI_CAPTURE) {
- videobuf_stop(&fh->vb_vidq);
- videobuf_mmap_free(&fh->vb_vidq);
-
- /* the device is already disconnect,
- free the remaining resources */
- if (dev->state & DEV_DISCONNECTED) {
- if (atomic_read(&dev->devlist_count) > 0) {
- cx231xx_release_resources(dev);
- fh->dev = NULL;
- return 0;
- }
- return 0;
- }
-
- /* do this before setting alternate! */
- cx231xx_uninit_vbi_isoc(dev);
-
- /* set alternate 0 */
- if (!dev->vbi_or_sliced_cc_mode)
- cx231xx_set_alt_setting(dev, INDEX_VANC, 0);
- else
- cx231xx_set_alt_setting(dev, INDEX_HANC, 0);
-
- v4l2_fh_del(&fh->fh);
- v4l2_fh_exit(&fh->fh);
- kfree(fh);
- dev->users--;
- wake_up_interruptible(&dev->open);
- return 0;
- }
+ struct cx231xx *dev = video_drvdata(filp);
+ struct video_device *vdev = video_devdata(filp);
- v4l2_fh_del(&fh->fh);
- dev->users--;
- if (!dev->users) {
- videobuf_stop(&fh->vb_vidq);
- videobuf_mmap_free(&fh->vb_vidq);
-
- /* the device is already disconnect,
- free the remaining resources */
- if (dev->state & DEV_DISCONNECTED) {
- cx231xx_release_resources(dev);
- fh->dev = NULL;
- return 0;
- }
+ _vb2_fop_release(filp, NULL);
+ if (--dev->users == 0) {
/* Save some power by putting tuner to sleep */
call_all(dev, tuner, standby);
@@ -1942,20 +1567,40 @@ static int cx231xx_close(struct file *filp)
else
cx231xx_uninit_bulk(dev);
cx231xx_set_mode(dev, CX231XX_SUSPEND);
+ }
+
+ /*
+ * To workaround error number=-71 on EP0 for VideoGrabber,
+ * need exclude following.
+ * FIXME: It is probably safe to remove most of these, as we're
+ * now avoiding the alternate setting for INDEX_VANC
+ */
+ if (!dev->board.no_alt_vanc && vdev->vfl_type == VFL_TYPE_VBI) {
+ /* do this before setting alternate! */
+ cx231xx_uninit_vbi_isoc(dev);
/* set alternate 0 */
+ if (!dev->vbi_or_sliced_cc_mode)
+ cx231xx_set_alt_setting(dev, INDEX_VANC, 0);
+ else
+ cx231xx_set_alt_setting(dev, INDEX_HANC, 0);
+
+ wake_up_interruptible_nr(&dev->open, 1);
+ return 0;
+ }
+
+ if (dev->users == 0) {
+ /* set alternate 0 */
cx231xx_set_alt_setting(dev, INDEX_VIDEO, 0);
}
- v4l2_fh_exit(&fh->fh);
- kfree(fh);
+
wake_up_interruptible(&dev->open);
return 0;
}
static int cx231xx_v4l2_close(struct file *filp)
{
- struct cx231xx_fh *fh = filp->private_data;
- struct cx231xx *dev = fh->dev;
+ struct cx231xx *dev = video_drvdata(filp);
int rc;
mutex_lock(&dev->lock);
@@ -1964,116 +1609,13 @@ static int cx231xx_v4l2_close(struct file *filp)
return rc;
}
-/*
- * cx231xx_v4l2_read()
- * will allocate buffers when called for the first time
- */
-static ssize_t
-cx231xx_v4l2_read(struct file *filp, char __user *buf, size_t count,
- loff_t *pos)
-{
- struct cx231xx_fh *fh = filp->private_data;
- struct cx231xx *dev = fh->dev;
- int rc;
-
- rc = check_dev(dev);
- if (rc < 0)
- return rc;
-
- if ((fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) ||
- (fh->type == V4L2_BUF_TYPE_VBI_CAPTURE)) {
- rc = res_get(fh);
-
- if (unlikely(rc < 0))
- return rc;
-
- if (mutex_lock_interruptible(&dev->lock))
- return -ERESTARTSYS;
- rc = videobuf_read_stream(&fh->vb_vidq, buf, count, pos, 0,
- filp->f_flags & O_NONBLOCK);
- mutex_unlock(&dev->lock);
- return rc;
- }
- return 0;
-}
-
-/*
- * cx231xx_v4l2_poll()
- * will allocate buffers when called for the first time
- */
-static __poll_t cx231xx_v4l2_poll(struct file *filp, poll_table *wait)
-{
- __poll_t req_events = poll_requested_events(wait);
- struct cx231xx_fh *fh = filp->private_data;
- struct cx231xx *dev = fh->dev;
- __poll_t res = 0;
- int rc;
-
- rc = check_dev(dev);
- if (rc < 0)
- return EPOLLERR;
-
- rc = res_get(fh);
-
- if (unlikely(rc < 0))
- return EPOLLERR;
-
- if (v4l2_event_pending(&fh->fh))
- res |= EPOLLPRI;
- else
- poll_wait(filp, &fh->fh.wait, wait);
-
- if (!(req_events & (EPOLLIN | EPOLLRDNORM)))
- return res;
-
- if ((V4L2_BUF_TYPE_VIDEO_CAPTURE == fh->type) ||
- (V4L2_BUF_TYPE_VBI_CAPTURE == fh->type)) {
- mutex_lock(&dev->lock);
- res |= videobuf_poll_stream(filp, &fh->vb_vidq, wait);
- mutex_unlock(&dev->lock);
- return res;
- }
- return res | EPOLLERR;
-}
-
-/*
- * cx231xx_v4l2_mmap()
- */
-static int cx231xx_v4l2_mmap(struct file *filp, struct vm_area_struct *vma)
-{
- struct cx231xx_fh *fh = filp->private_data;
- struct cx231xx *dev = fh->dev;
- int rc;
-
- rc = check_dev(dev);
- if (rc < 0)
- return rc;
-
- rc = res_get(fh);
-
- if (unlikely(rc < 0))
- return rc;
-
- if (mutex_lock_interruptible(&dev->lock))
- return -ERESTARTSYS;
- rc = videobuf_mmap_mapper(&fh->vb_vidq, vma);
- mutex_unlock(&dev->lock);
-
- cx231xx_videodbg("vma start=0x%08lx, size=%ld, ret=%d\n",
- (unsigned long)vma->vm_start,
- (unsigned long)vma->vm_end -
- (unsigned long)vma->vm_start, rc);
-
- return rc;
-}
-
static const struct v4l2_file_operations cx231xx_v4l_fops = {
.owner = THIS_MODULE,
.open = cx231xx_v4l2_open,
.release = cx231xx_v4l2_close,
- .read = cx231xx_v4l2_read,
- .poll = cx231xx_v4l2_poll,
- .mmap = cx231xx_v4l2_mmap,
+ .read = vb2_fop_read,
+ .poll = vb2_fop_poll,
+ .mmap = vb2_fop_mmap,
.unlocked_ioctl = video_ioctl2,
};
@@ -2088,17 +1630,17 @@ static const struct v4l2_ioctl_ops video_ioctl_ops = {
.vidioc_s_fmt_vbi_cap = vidioc_s_fmt_vbi_cap,
.vidioc_g_pixelaspect = vidioc_g_pixelaspect,
.vidioc_g_selection = vidioc_g_selection,
- .vidioc_reqbufs = vidioc_reqbufs,
- .vidioc_querybuf = vidioc_querybuf,
- .vidioc_qbuf = vidioc_qbuf,
- .vidioc_dqbuf = vidioc_dqbuf,
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
.vidioc_s_std = vidioc_s_std,
.vidioc_g_std = vidioc_g_std,
.vidioc_enum_input = cx231xx_enum_input,
.vidioc_g_input = cx231xx_g_input,
.vidioc_s_input = cx231xx_s_input,
- .vidioc_streamon = vidioc_streamon,
- .vidioc_streamoff = vidioc_streamoff,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
.vidioc_g_tuner = cx231xx_g_tuner,
.vidioc_s_tuner = cx231xx_s_tuner,
.vidioc_g_frequency = cx231xx_g_frequency,
@@ -2175,6 +1717,7 @@ static void cx231xx_vdev_init(struct cx231xx *dev,
int cx231xx_register_analog_devices(struct cx231xx *dev)
{
+ struct vb2_queue *q;
int ret;
dev_info(dev->dev, "v4l2 driver version %s\n", CX231XX_VERSION);
@@ -2221,6 +1764,21 @@ int cx231xx_register_analog_devices(struct cx231xx *dev)
dev_err(dev->dev, "failed to initialize video media entity!\n");
#endif
dev->vdev.ctrl_handler = &dev->ctrl_handler;
+
+ q = &dev->vidq;
+ q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ q->io_modes = VB2_USERPTR | VB2_MMAP | VB2_DMABUF | VB2_READ;
+ q->drv_priv = dev;
+ q->buf_struct_size = sizeof(struct cx231xx_buffer);
+ q->ops = &cx231xx_video_qops;
+ q->mem_ops = &vb2_vmalloc_memops;
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->min_buffers_needed = 1;
+ q->lock = &dev->lock;
+ ret = vb2_queue_init(q);
+ if (ret)
+ return ret;
+ dev->vdev.queue = q;
dev->vdev.device_caps = V4L2_CAP_READWRITE | V4L2_CAP_STREAMING |
V4L2_CAP_VIDEO_CAPTURE;
if (dev->tuner_type != TUNER_ABSENT)
@@ -2254,6 +1812,21 @@ int cx231xx_register_analog_devices(struct cx231xx *dev)
dev_err(dev->dev, "failed to initialize vbi media entity!\n");
#endif
dev->vbi_dev.ctrl_handler = &dev->ctrl_handler;
+
+ q = &dev->vbiq;
+ q->type = V4L2_BUF_TYPE_VBI_CAPTURE;
+ q->io_modes = VB2_USERPTR | VB2_MMAP | VB2_DMABUF | VB2_READ;
+ q->drv_priv = dev;
+ q->buf_struct_size = sizeof(struct cx231xx_buffer);
+ q->ops = &cx231xx_vbi_qops;
+ q->mem_ops = &vb2_vmalloc_memops;
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->min_buffers_needed = 1;
+ q->lock = &dev->lock;
+ ret = vb2_queue_init(q);
+ if (ret)
+ return ret;
+ dev->vbi_dev.queue = q;
dev->vbi_dev.device_caps = V4L2_CAP_READWRITE | V4L2_CAP_STREAMING |
V4L2_CAP_VBI_CAPTURE;
if (dev->tuner_type != TUNER_ABSENT)
diff --git a/drivers/media/usb/cx231xx/cx231xx.h b/drivers/media/usb/cx231xx/cx231xx.h
index b007611abc37..b32eab641793 100644
--- a/drivers/media/usb/cx231xx/cx231xx.h
+++ b/drivers/media/usb/cx231xx/cx231xx.h
@@ -20,7 +20,7 @@
#include <media/drv-intf/cx2341x.h>
-#include <media/videobuf-vmalloc.h>
+#include <media/videobuf2-vmalloc.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-fh.h>
@@ -223,8 +223,8 @@ struct cx231xx_fmt {
/* buffer for one video frame */
struct cx231xx_buffer {
/* common v4l buffer stuff -- must be first */
- struct videobuf_buffer vb;
-
+ struct vb2_v4l2_buffer vb;
+ struct list_head list;
struct list_head frame;
int top_field;
int receiving;
@@ -237,7 +237,6 @@ enum ps_package_head {
struct cx231xx_dmaqueue {
struct list_head active;
- struct list_head queued;
wait_queue_head_t wq;
@@ -251,6 +250,7 @@ struct cx231xx_dmaqueue {
u32 lines_completed;
u8 field1_done;
u32 lines_per_field;
+ u32 sequence;
/*Mpeg2 control buffer*/
u8 *p_left_data;
@@ -427,23 +427,6 @@ struct cx231xx_audio {
struct cx231xx;
-struct cx231xx_fh {
- struct v4l2_fh fh;
- struct cx231xx *dev;
- unsigned int stream_on:1; /* Locks streams */
- enum v4l2_buf_type type;
-
- struct videobuf_queue vb_vidq;
-
- /* vbi capture */
- struct videobuf_queue vidq;
- struct videobuf_queue vbiq;
-
- /* MPEG Encoder specifics ONLY */
-
- atomic_t v4l_reading;
-};
-
/*****************************************************************/
/* set/get i2c */
/* 00--1Mb/s, 01-400kb/s, 10--100kb/s, 11--5Mb/s */
@@ -634,6 +617,7 @@ struct cx231xx {
int width; /* current frame width */
int height; /* current frame height */
int interlaced; /* 1=interlace fields, 0=just top fields */
+ unsigned int size;
struct cx231xx_audio adev;
@@ -657,6 +641,9 @@ struct cx231xx {
struct media_pad input_pad[MAX_CX231XX_INPUT];
#endif
+ struct vb2_queue vidq;
+ struct vb2_queue vbiq;
+
unsigned char eedata[256];
struct cx231xx_video_mode video_mode;
@@ -717,6 +704,7 @@ struct cx231xx {
u8 USE_ISO;
struct cx231xx_tvnorm encodernorm;
struct cx231xx_tsport ts1, ts2;
+ struct vb2_queue mpegq;
struct video_device v4l_device;
atomic_t v4l_reader_count;
u32 freq;
diff --git a/drivers/media/usb/dvb-usb-v2/af9035.c b/drivers/media/usb/dvb-usb-v2/af9035.c
index 3afd18733614..792667ee5ebc 100644
--- a/drivers/media/usb/dvb-usb-v2/af9035.c
+++ b/drivers/media/usb/dvb-usb-v2/af9035.c
@@ -1197,6 +1197,15 @@ err:
return ret;
}
+/*
+ * The I2C speed register is calculated with:
+ * I2C speed register = (1000000000 / (24.4 * 16 * I2C_speed))
+ *
+ * The default speed register for it930x is 7, with means a
+ * speed of ~366 kbps
+ */
+#define I2C_SPEED_366K 7
+
static int it930x_frontend_attach(struct dvb_usb_adapter *adap)
{
struct state *state = adap_to_priv(adap);
@@ -1208,13 +1217,13 @@ static int it930x_frontend_attach(struct dvb_usb_adapter *adap)
dev_dbg(&intf->dev, "adap->id=%d\n", adap->id);
- /* I2C master bus 2 clock speed 300k */
- ret = af9035_wr_reg(d, 0x00f6a7, 0x07);
+ /* I2C master bus 2 clock speed 366k */
+ ret = af9035_wr_reg(d, 0x00f6a7, I2C_SPEED_366K);
if (ret < 0)
goto err;
- /* I2C master bus 1,3 clock speed 300k */
- ret = af9035_wr_reg(d, 0x00f103, 0x07);
+ /* I2C master bus 1,3 clock speed 366k */
+ ret = af9035_wr_reg(d, 0x00f103, I2C_SPEED_366K);
if (ret < 0)
goto err;
@@ -1610,6 +1619,24 @@ static int it930x_tuner_attach(struct dvb_usb_adapter *adap)
memset(&si2157_config, 0, sizeof(si2157_config));
si2157_config.fe = adap->fe[0];
+
+ /*
+ * HACK: The Logilink VG0022A has a bug: when the si2157
+ * firmware that came with the device is replaced by a new
+ * one, the I2C transfers to the tuner will return just 0xff.
+ *
+ * Probably, the vendor firmware has some patch specifically
+ * designed for this device. So, we can't replace by the
+ * generic firmware. The right solution would be to extract
+ * the si2157 firmware from the original driver and ask the
+ * driver to load the specifically designed firmware, but,
+ * while we don't have that, the next best solution is to just
+ * keep the original firmware at the device.
+ */
+ if (le16_to_cpu(d->udev->descriptor.idVendor) == USB_VID_DEXATEK &&
+ le16_to_cpu(d->udev->descriptor.idProduct) == 0x0100)
+ si2157_config.dont_load_firmware = true;
+
si2157_config.if_port = it930x_addresses_table[state->it930x_addresses].tuner_if_port;
ret = af9035_add_i2c_dev(d, "si2157",
it930x_addresses_table[state->it930x_addresses].tuner_i2c_addr,
@@ -2121,6 +2148,8 @@ static const struct usb_device_id af9035_id_table[] = {
&it930x_props, "ITE 9303 Generic", NULL) },
{ DVB_USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_TD310,
&it930x_props, "AVerMedia TD310 DVB-T2", NULL) },
+ { DVB_USB_DEVICE(USB_VID_DEXATEK, 0x0100,
+ &it930x_props, "Logilink VG0022A", NULL) },
{ }
};
MODULE_DEVICE_TABLE(usb, af9035_id_table);
diff --git a/drivers/media/usb/dvb-usb-v2/dvb_usb.h b/drivers/media/usb/dvb-usb-v2/dvb_usb.h
index b874a49ececf..52bcc2d2efe5 100644
--- a/drivers/media/usb/dvb-usb-v2/dvb_usb.h
+++ b/drivers/media/usb/dvb-usb-v2/dvb_usb.h
@@ -121,6 +121,7 @@ struct dvb_usb_driver_info {
* @interval: time in ms between two queries
* @driver_type: used to point if a device supports raw mode
* @bulk_mode: device supports bulk mode for rc (disable polling mode)
+ * @timeout: set to length of last space before raw IR goes idle
*/
struct dvb_usb_rc {
const char *map_name;
@@ -130,6 +131,7 @@ struct dvb_usb_rc {
unsigned int interval;
enum rc_driver_type driver_type;
bool bulk_mode;
+ int timeout;
};
/**
diff --git a/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c b/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c
index e5e056bf9dfa..f1c79f351ec8 100644
--- a/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c
+++ b/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c
@@ -150,6 +150,7 @@ static int dvb_usbv2_remote_init(struct dvb_usb_device *d)
dev->map_name = d->rc.map_name;
dev->allowed_protocols = d->rc.allowed_protos;
dev->change_protocol = d->rc.change_protocol;
+ dev->timeout = d->rc.timeout;
dev->priv = d;
ret = rc_register_device(dev);
diff --git a/drivers/media/usb/dvb-usb-v2/dvbsky.c b/drivers/media/usb/dvb-usb-v2/dvbsky.c
index 617a306f6815..356fd8e66834 100644
--- a/drivers/media/usb/dvb-usb-v2/dvbsky.c
+++ b/drivers/media/usb/dvb-usb-v2/dvbsky.c
@@ -22,7 +22,6 @@ MODULE_PARM_DESC(disable_rc, "Disable inbuilt IR receiver.");
DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
struct dvbsky_state {
- struct mutex stream_mutex;
u8 ibuf[DVBSKY_BUF_LEN];
u8 obuf[DVBSKY_BUF_LEN];
u8 last_lock;
@@ -60,17 +59,19 @@ static int dvbsky_usb_generic_rw(struct dvb_usb_device *d,
static int dvbsky_stream_ctrl(struct dvb_usb_device *d, u8 onoff)
{
struct dvbsky_state *state = d_to_priv(d);
+ static const u8 obuf_pre[3] = { 0x37, 0, 0 };
+ static const u8 obuf_post[3] = { 0x36, 3, 0 };
int ret;
- u8 obuf_pre[3] = { 0x37, 0, 0 };
- u8 obuf_post[3] = { 0x36, 3, 0 };
- mutex_lock(&state->stream_mutex);
- ret = dvbsky_usb_generic_rw(d, obuf_pre, 3, NULL, 0);
+ mutex_lock(&d->usb_mutex);
+ memcpy(state->obuf, obuf_pre, 3);
+ ret = dvb_usbv2_generic_write_locked(d, state->obuf, 3);
if (!ret && onoff) {
msleep(20);
- ret = dvbsky_usb_generic_rw(d, obuf_post, 3, NULL, 0);
+ memcpy(state->obuf, obuf_post, 3);
+ ret = dvb_usbv2_generic_write_locked(d, state->obuf, 3);
}
- mutex_unlock(&state->stream_mutex);
+ mutex_unlock(&d->usb_mutex);
return ret;
}
@@ -591,17 +592,7 @@ static int dvbsky_identify_state(struct dvb_usb_device *d, const char **name)
static int dvbsky_init(struct dvb_usb_device *d)
{
struct dvbsky_state *state = d_to_priv(d);
-
- /* use default interface */
- /*
- ret = usb_set_interface(d->udev, 0, 0);
- if (ret)
- return ret;
- */
- mutex_init(&state->stream_mutex);
-
state->last_lock = 0;
-
return 0;
}
@@ -792,6 +783,9 @@ static const struct usb_device_id dvbsky_id_table[] = {
{ DVB_USB_DEVICE(USB_VID_CONEXANT, USB_PID_MYGICA_T230C,
&mygica_t230c_props, "MyGica Mini DVB-T2 USB Stick T230C",
RC_MAP_TOTAL_MEDIA_IN_HAND_02) },
+ { DVB_USB_DEVICE(USB_VID_CONEXANT, USB_PID_MYGICA_T230C_LITE,
+ &mygica_t230c_props, "MyGica Mini DVB-T2 USB Stick T230C Lite",
+ NULL) },
{ DVB_USB_DEVICE(USB_VID_CONEXANT, USB_PID_MYGICA_T230C2,
&mygica_t230c_props, "MyGica Mini DVB-T2 USB Stick T230C v2",
RC_MAP_TOTAL_MEDIA_IN_HAND_02) },
diff --git a/drivers/media/usb/dvb-usb-v2/gl861.c b/drivers/media/usb/dvb-usb-v2/gl861.c
index c7197e534c02..19217dcf20f1 100644
--- a/drivers/media/usb/dvb-usb-v2/gl861.c
+++ b/drivers/media/usb/dvb-usb-v2/gl861.c
@@ -5,7 +5,7 @@
*/
#include <linux/string.h>
-#include "gl861.h"
+#include "dvb_usb.h"
#include "zl10353.h"
#include "qt1010.h"
@@ -14,93 +14,157 @@
DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
-static int gl861_i2c_msg(struct dvb_usb_device *d, u8 addr,
- u8 *wbuf, u16 wlen, u8 *rbuf, u16 rlen)
-{
- u16 index;
- u16 value = addr << (8 + 1);
- int wo = (rbuf == NULL || rlen == 0); /* write-only */
- u8 req, type;
- u8 *buf;
- int ret;
+struct gl861 {
+ /* USB control message buffer */
+ u8 buf[16];
- if (wo) {
- req = GL861_REQ_I2C_WRITE;
- type = GL861_WRITE;
- buf = kmemdup(wbuf, wlen, GFP_KERNEL);
- } else { /* rw */
- req = GL861_REQ_I2C_READ;
- type = GL861_READ;
- buf = kmalloc(rlen, GFP_KERNEL);
- }
- if (!buf)
- return -ENOMEM;
+ struct i2c_adapter *demod_sub_i2c;
+ struct i2c_client *i2c_client_demod;
+ struct i2c_client *i2c_client_tuner;
+};
- switch (wlen) {
- case 1:
- index = wbuf[0];
+#define CMD_WRITE_SHORT 0x01
+#define CMD_READ 0x02
+#define CMD_WRITE 0x03
+
+static int gl861_ctrl_msg(struct dvb_usb_device *d, u8 request, u16 value,
+ u16 index, void *data, u16 size)
+{
+ struct gl861 *ctx = d_to_priv(d);
+ struct usb_interface *intf = d->intf;
+ int ret;
+ unsigned int pipe;
+ u8 requesttype;
+
+ mutex_lock(&d->usb_mutex);
+
+ switch (request) {
+ case CMD_WRITE:
+ memcpy(ctx->buf, data, size);
+ /* Fall through */
+ case CMD_WRITE_SHORT:
+ pipe = usb_sndctrlpipe(d->udev, 0);
+ requesttype = USB_TYPE_VENDOR | USB_DIR_OUT;
break;
- case 2:
- index = wbuf[0];
- value = value + wbuf[1];
+ case CMD_READ:
+ pipe = usb_rcvctrlpipe(d->udev, 0);
+ requesttype = USB_TYPE_VENDOR | USB_DIR_IN;
break;
default:
- dev_err(&d->udev->dev, "%s: wlen=%d, aborting\n",
- KBUILD_MODNAME, wlen);
- kfree(buf);
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_mutex_unlock;
}
- usleep_range(1000, 2000); /* avoid I2C errors */
+ ret = usb_control_msg(d->udev, pipe, request, requesttype, value,
+ index, ctx->buf, size, 200);
+ dev_dbg(&intf->dev, "%d | %02x %02x %*ph %*ph %*ph %s %*ph\n",
+ ret, requesttype, request, 2, &value, 2, &index, 2, &size,
+ (requesttype & USB_DIR_IN) ? "<<<" : ">>>", size, ctx->buf);
+ if (ret < 0)
+ goto err_mutex_unlock;
- ret = usb_control_msg(d->udev, usb_rcvctrlpipe(d->udev, 0), req, type,
- value, index, buf, rlen, 2000);
+ if (request == CMD_READ)
+ memcpy(data, ctx->buf, size);
- if (!wo && ret > 0)
- memcpy(rbuf, buf, rlen);
+ usleep_range(1000, 2000); /* Avoid I2C errors */
- kfree(buf);
+ mutex_unlock(&d->usb_mutex);
+
+ return 0;
+
+err_mutex_unlock:
+ mutex_unlock(&d->usb_mutex);
+ dev_dbg(&intf->dev, "failed %d\n", ret);
return ret;
}
-/* I2C */
-static int gl861_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
- int num)
+static int gl861_short_write(struct dvb_usb_device *d, u8 addr, u8 reg, u8 val)
+{
+ return gl861_ctrl_msg(d, CMD_WRITE_SHORT,
+ (addr << 9) | val, reg, NULL, 0);
+}
+
+static int gl861_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
+ int num)
{
struct dvb_usb_device *d = i2c_get_adapdata(adap);
- int i;
+ struct usb_interface *intf = d->intf;
+ struct gl861 *ctx = d_to_priv(d);
+ int ret;
+ u8 request, *data;
+ u16 value, index, size;
+
+ /* XXX: I2C adapter maximum data lengths are not tested */
+ if (num == 1 && !(msg[0].flags & I2C_M_RD)) {
+ /* I2C write */
+ if (msg[0].len < 2 || msg[0].len > sizeof(ctx->buf)) {
+ ret = -EOPNOTSUPP;
+ goto err;
+ }
+
+ value = (msg[0].addr << 1) << 8;
+ index = msg[0].buf[0];
+
+ if (msg[0].len == 2) {
+ request = CMD_WRITE_SHORT;
+ value |= msg[0].buf[1];
+ size = 0;
+ data = NULL;
+ } else {
+ request = CMD_WRITE;
+ size = msg[0].len - 1;
+ data = &msg[0].buf[1];
+ }
+
+ ret = gl861_ctrl_msg(d, request, value, index, data, size);
+ } else if (num == 2 && !(msg[0].flags & I2C_M_RD) &&
+ (msg[1].flags & I2C_M_RD)) {
+ /* I2C write + read */
+ if (msg[0].len > 1 || msg[1].len > sizeof(ctx->buf)) {
+ ret = -EOPNOTSUPP;
+ goto err;
+ }
- if (num > 2)
- return -EINVAL;
-
- if (mutex_lock_interruptible(&d->i2c_mutex) < 0)
- return -EAGAIN;
-
- for (i = 0; i < num; i++) {
- /* write/read request */
- if (i+1 < num && (msg[i+1].flags & I2C_M_RD)) {
- if (gl861_i2c_msg(d, msg[i].addr, msg[i].buf,
- msg[i].len, msg[i+1].buf, msg[i+1].len) < 0)
- break;
- i++;
- } else
- if (gl861_i2c_msg(d, msg[i].addr, msg[i].buf,
- msg[i].len, NULL, 0) < 0)
- break;
+ value = (msg[0].addr << 1) << 8;
+ index = msg[0].buf[0];
+ request = CMD_READ;
+
+ ret = gl861_ctrl_msg(d, request, value, index,
+ msg[1].buf, msg[1].len);
+ } else if (num == 1 && (msg[0].flags & I2C_M_RD)) {
+ /* I2C read */
+ if (msg[0].len > sizeof(ctx->buf)) {
+ ret = -EOPNOTSUPP;
+ goto err;
+ }
+ value = (msg[0].addr << 1) << 8;
+ index = 0x0100;
+ request = CMD_READ;
+
+ ret = gl861_ctrl_msg(d, request, value, index,
+ msg[0].buf, msg[0].len);
+ } else {
+ /* Unsupported I2C message */
+ dev_dbg(&intf->dev, "unknown i2c msg, num %u\n", num);
+ ret = -EOPNOTSUPP;
}
+ if (ret)
+ goto err;
- mutex_unlock(&d->i2c_mutex);
- return i;
+ return num;
+err:
+ dev_dbg(&intf->dev, "failed %d\n", ret);
+ return ret;
}
-static u32 gl861_i2c_func(struct i2c_adapter *adapter)
+static u32 gl861_i2c_functionality(struct i2c_adapter *adapter)
{
return I2C_FUNC_I2C;
}
static struct i2c_algorithm gl861_i2c_algo = {
- .master_xfer = gl861_i2c_xfer,
- .functionality = gl861_i2c_func,
+ .master_xfer = gl861_i2c_master_xfer,
+ .functionality = gl861_i2c_functionality,
};
/* Callbacks for DVB USB */
@@ -149,6 +213,8 @@ static struct dvb_usb_device_properties gl861_props = {
.owner = THIS_MODULE,
.adapter_nr = adapter_nr,
+ .size_of_priv = sizeof(struct gl861),
+
.i2c_algo = &gl861_i2c_algo,
.frontend_attach = gl861_frontend_attach,
.tuner_attach = gl861_tuner_attach,
@@ -166,14 +232,6 @@ static struct dvb_usb_device_properties gl861_props = {
/*
* For Friio
*/
-
-struct friio_priv {
- struct i2c_adapter *demod_sub_i2c;
- struct i2c_client *i2c_client_demod;
- struct i2c_client *i2c_client_tuner;
- struct i2c_adapter tuner_adap;
-};
-
struct friio_config {
struct i2c_board_info demod_info;
struct tc90522_config demod_cfg;
@@ -184,132 +242,10 @@ struct friio_config {
static const struct friio_config friio_config = {
.demod_info = { I2C_BOARD_INFO(TC90522_I2C_DEV_TER, 0x18), },
+ .demod_cfg = { .split_tuner_read_i2c = true, },
.tuner_info = { I2C_BOARD_INFO("tua6034_friio", 0x60), },
};
-/* For another type of I2C:
- * message sent by a USB control-read/write transaction with data stage.
- * Used in init/config of Friio.
- */
-static int
-gl861_i2c_write_ex(struct dvb_usb_device *d, u8 addr, u8 *wbuf, u16 wlen)
-{
- u8 *buf;
- int ret;
-
- buf = kmemdup(wbuf, wlen, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- ret = usb_control_msg(d->udev, usb_sndctrlpipe(d->udev, 0),
- GL861_REQ_I2C_RAW, GL861_WRITE,
- addr << (8 + 1), 0x0100, buf, wlen, 2000);
- kfree(buf);
- return ret;
-}
-
-static int
-gl861_i2c_read_ex(struct dvb_usb_device *d, u8 addr, u8 *rbuf, u16 rlen)
-{
- u8 *buf;
- int ret;
-
- buf = kmalloc(rlen, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- ret = usb_control_msg(d->udev, usb_rcvctrlpipe(d->udev, 0),
- GL861_REQ_I2C_READ, GL861_READ,
- addr << (8 + 1), 0x0100, buf, rlen, 2000);
- if (ret > 0 && rlen > 0)
- memcpy(buf, rbuf, rlen);
- kfree(buf);
- return ret;
-}
-
-/* For I2C transactions to the tuner of Friio (dvb_pll).
- *
- * Friio uses irregular USB encapsulation for tuner i2c transactions:
- * write transacions are encapsulated with a different USB 'request' value.
- *
- * Although all transactions are sent via the demod(tc90522)
- * and the demod provides an i2c adapter for them, it cannot be used in Friio
- * since it assumes using the same parent adapter with the demod,
- * which does not use the request value and uses same one for both read/write.
- * So we define a dedicated i2c adapter here.
- */
-
-static int
-friio_i2c_tuner_read(struct dvb_usb_device *d, struct i2c_msg *msg)
-{
- struct friio_priv *priv;
- u8 addr;
-
- priv = d_to_priv(d);
- addr = priv->i2c_client_demod->addr;
- return gl861_i2c_read_ex(d, addr, msg->buf, msg->len);
-}
-
-static int
-friio_i2c_tuner_write(struct dvb_usb_device *d, struct i2c_msg *msg)
-{
- u8 *buf;
- int ret;
- struct friio_priv *priv;
-
- priv = d_to_priv(d);
-
- if (msg->len < 1)
- return -EINVAL;
-
- buf = kmalloc(msg->len + 1, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
- buf[0] = msg->addr << 1;
- memcpy(buf + 1, msg->buf, msg->len);
-
- ret = usb_control_msg(d->udev, usb_sndctrlpipe(d->udev, 0),
- GL861_REQ_I2C_RAW, GL861_WRITE,
- priv->i2c_client_demod->addr << (8 + 1),
- 0xFE, buf, msg->len + 1, 2000);
- kfree(buf);
- return ret;
-}
-
-static int friio_tuner_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
- int num)
-{
- struct dvb_usb_device *d = i2c_get_adapdata(adap);
- int i;
-
- if (num > 2)
- return -EINVAL;
-
- if (mutex_lock_interruptible(&d->i2c_mutex) < 0)
- return -EAGAIN;
-
- for (i = 0; i < num; i++) {
- int ret;
-
- if (msg[i].flags & I2C_M_RD)
- ret = friio_i2c_tuner_read(d, &msg[i]);
- else
- ret = friio_i2c_tuner_write(d, &msg[i]);
-
- if (ret < 0)
- break;
-
- usleep_range(1000, 2000); /* avoid I2C errors */
- }
-
- mutex_unlock(&d->i2c_mutex);
- return i;
-}
-
-static struct i2c_algorithm friio_tuner_i2c_algo = {
- .master_xfer = friio_tuner_i2c_xfer,
- .functionality = gl861_i2c_func,
-};
/* GPIO control in Friio */
@@ -377,9 +313,11 @@ static int friio_ext_ctl(struct dvb_usb_device *d,
/* init/config of gl861 for Friio */
/* NOTE:
* This function cannot be moved to friio_init()/dvb_usbv2_init(),
- * because the init defined here must be done before any activities like I2C,
+ * because the init defined here includes a whole device reset,
+ * it must be run early before any activities like I2C,
* but friio_init() is called by dvb-usbv2 after {_frontend, _tuner}_attach(),
* where I2C communication is used.
+ * In addition, this reset is required in reset_resume() as well.
* Thus this function is set to be called from _power_ctl().
*
* Since it will be called on the early init stage
@@ -389,7 +327,7 @@ static int friio_ext_ctl(struct dvb_usb_device *d,
static int friio_reset(struct dvb_usb_device *d)
{
int i, ret;
- u8 wbuf[2], rbuf[2];
+ u8 wbuf[1], rbuf[2];
static const u8 friio_init_cmds[][2] = {
{0x33, 0x08}, {0x37, 0x40}, {0x3a, 0x1f}, {0x3b, 0xff},
@@ -401,16 +339,12 @@ static int friio_reset(struct dvb_usb_device *d)
if (ret < 0)
return ret;
- wbuf[0] = 0x11;
- wbuf[1] = 0x02;
- ret = gl861_i2c_msg(d, 0x00, wbuf, 2, NULL, 0);
+ ret = gl861_short_write(d, 0x00, 0x11, 0x02);
if (ret < 0)
return ret;
usleep_range(2000, 3000);
- wbuf[0] = 0x11;
- wbuf[1] = 0x00;
- ret = gl861_i2c_msg(d, 0x00, wbuf, 2, NULL, 0);
+ ret = gl861_short_write(d, 0x00, 0x11, 0x00);
if (ret < 0)
return ret;
@@ -420,14 +354,13 @@ static int friio_reset(struct dvb_usb_device *d)
*/
usleep_range(1000, 2000);
- wbuf[0] = 0x03;
- wbuf[1] = 0x80;
- ret = gl861_i2c_write_ex(d, 0x09, wbuf, 2);
+ wbuf[0] = 0x80;
+ ret = gl861_ctrl_msg(d, CMD_WRITE, 0x09 << 9, 0x03, wbuf, 1);
if (ret < 0)
return ret;
usleep_range(2000, 3000);
- ret = gl861_i2c_read_ex(d, 0x09, rbuf, 2);
+ ret = gl861_ctrl_msg(d, CMD_READ, 0x09 << 9, 0x0100, rbuf, 2);
if (ret < 0)
return ret;
if (rbuf[0] != 0xff || rbuf[1] != 0xff)
@@ -435,38 +368,33 @@ static int friio_reset(struct dvb_usb_device *d)
usleep_range(1000, 2000);
- ret = gl861_i2c_write_ex(d, 0x48, wbuf, 2);
+ wbuf[0] = 0x80;
+ ret = gl861_ctrl_msg(d, CMD_WRITE, 0x48 << 9, 0x03, wbuf, 1);
if (ret < 0)
return ret;
usleep_range(2000, 3000);
- ret = gl861_i2c_read_ex(d, 0x48, rbuf, 2);
+ ret = gl861_ctrl_msg(d, CMD_READ, 0x48 << 9, 0x0100, rbuf, 2);
if (ret < 0)
return ret;
if (rbuf[0] != 0xff || rbuf[1] != 0xff)
return -ENODEV;
- wbuf[0] = 0x30;
- wbuf[1] = 0x04;
- ret = gl861_i2c_msg(d, 0x00, wbuf, 2, NULL, 0);
+ ret = gl861_short_write(d, 0x00, 0x30, 0x04);
if (ret < 0)
return ret;
- wbuf[0] = 0x00;
- wbuf[1] = 0x01;
- ret = gl861_i2c_msg(d, 0x00, wbuf, 2, NULL, 0);
+ ret = gl861_short_write(d, 0x00, 0x00, 0x01);
if (ret < 0)
return ret;
- wbuf[0] = 0x06;
- wbuf[1] = 0x0f;
- ret = gl861_i2c_msg(d, 0x00, wbuf, 2, NULL, 0);
+ ret = gl861_short_write(d, 0x00, 0x06, 0x0f);
if (ret < 0)
return ret;
for (i = 0; i < ARRAY_SIZE(friio_init_cmds); i++) {
- ret = gl861_i2c_msg(d, 0x00, (u8 *)friio_init_cmds[i], 2,
- NULL, 0);
+ ret = gl861_short_write(d, 0x00, friio_init_cmds[i][0],
+ friio_init_cmds[i][1]);
if (ret < 0)
return ret;
}
@@ -488,9 +416,10 @@ static int friio_frontend_attach(struct dvb_usb_adapter *adap)
struct dvb_usb_device *d;
struct tc90522_config cfg;
struct i2c_client *cl;
- struct friio_priv *priv;
+ struct gl861 *priv;
info = &friio_config.demod_info;
+ cfg = friio_config.demod_cfg;
d = adap_to_d(adap);
cl = dvb_module_probe("tc90522", info->type,
&d->i2c_adap, info->addr, &cfg);
@@ -498,25 +427,17 @@ static int friio_frontend_attach(struct dvb_usb_adapter *adap)
return -ENODEV;
adap->fe[0] = cfg.fe;
- /* ignore cfg.tuner_i2c and create new one */
priv = adap_to_priv(adap);
priv->i2c_client_demod = cl;
- priv->tuner_adap.algo = &friio_tuner_i2c_algo;
- priv->tuner_adap.dev.parent = &d->udev->dev;
- strscpy(priv->tuner_adap.name, d->name, sizeof(priv->tuner_adap.name));
- strlcat(priv->tuner_adap.name, "-tuner", sizeof(priv->tuner_adap.name));
- priv->demod_sub_i2c = &priv->tuner_adap;
- i2c_set_adapdata(&priv->tuner_adap, d);
-
- return i2c_add_adapter(&priv->tuner_adap);
+ priv->demod_sub_i2c = cfg.tuner_i2c;
+ return 0;
}
static int friio_frontend_detach(struct dvb_usb_adapter *adap)
{
- struct friio_priv *priv;
+ struct gl861 *priv;
priv = adap_to_priv(adap);
- i2c_del_adapter(&priv->tuner_adap);
dvb_module_release(priv->i2c_client_demod);
return 0;
}
@@ -526,7 +447,7 @@ static int friio_tuner_attach(struct dvb_usb_adapter *adap)
const struct i2c_board_info *info;
struct dvb_pll_config cfg;
struct i2c_client *cl;
- struct friio_priv *priv;
+ struct gl861 *priv;
priv = adap_to_priv(adap);
info = &friio_config.tuner_info;
@@ -543,7 +464,7 @@ static int friio_tuner_attach(struct dvb_usb_adapter *adap)
static int friio_tuner_detach(struct dvb_usb_adapter *adap)
{
- struct friio_priv *priv;
+ struct gl861 *priv;
priv = adap_to_priv(adap);
dvb_module_release(priv->i2c_client_tuner);
@@ -554,7 +475,7 @@ static int friio_init(struct dvb_usb_device *d)
{
int i;
int ret;
- struct friio_priv *priv;
+ struct gl861 *priv;
static const u8 demod_init[][2] = {
{0x01, 0x40}, {0x04, 0x38}, {0x05, 0x40}, {0x07, 0x40},
@@ -606,7 +527,7 @@ static struct dvb_usb_device_properties friio_props = {
.owner = THIS_MODULE,
.adapter_nr = adapter_nr,
- .size_of_priv = sizeof(struct friio_priv),
+ .size_of_priv = sizeof(struct gl861),
.i2c_algo = &gl861_i2c_algo,
.power_ctrl = friio_power_ctrl,
diff --git a/drivers/media/usb/dvb-usb-v2/gl861.h b/drivers/media/usb/dvb-usb-v2/gl861.h
deleted file mode 100644
index 02c00e10748a..000000000000
--- a/drivers/media/usb/dvb-usb-v2/gl861.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _DVB_USB_GL861_H_
-#define _DVB_USB_GL861_H_
-
-#include "dvb_usb.h"
-
-#define GL861_WRITE 0x40
-#define GL861_READ 0xc0
-
-#define GL861_REQ_I2C_WRITE 0x01
-#define GL861_REQ_I2C_READ 0x02
-#define GL861_REQ_I2C_RAW 0x03
-
-#endif
diff --git a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
index 1a36bda28542..5016ede7b35f 100644
--- a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
+++ b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
@@ -1781,7 +1781,6 @@ static int rtl2832u_rc_query(struct dvb_usb_device *d)
}
/* 'flush' ir_raw_event_store_with_filter() */
- ir_raw_event_set_idle(d->rc_dev, true);
ir_raw_event_handle(d->rc_dev);
exit:
return ret;
@@ -1804,6 +1803,8 @@ static int rtl2832u_get_rc_config(struct dvb_usb_device *d,
rc->driver_type = RC_DRIVER_IR_RAW;
rc->query = rtl2832u_rc_query;
rc->interval = 200;
+ /* we program idle len to 0xc0, set timeout to one less */
+ rc->timeout = 0xbf * 50800;
return 0;
}
@@ -1957,7 +1958,8 @@ static const struct usb_device_id rtl28xxu_id_table[] = {
/* RTL2832P devices: */
{ DVB_USB_DEVICE(USB_VID_HANFTEK, 0x0131,
- &rtl28xxu_props, "Astrometa DVB-T2", NULL) },
+ &rtl28xxu_props, "Astrometa DVB-T2",
+ RC_MAP_ASTROMETA_T2HYBRID) },
{ DVB_USB_DEVICE(0x5654, 0xca42,
&rtl28xxu_props, "GoTView MasterHD 3", NULL) },
{ }
diff --git a/drivers/media/usb/dvb-usb/af9005.c b/drivers/media/usb/dvb-usb/af9005.c
index 02697d86e8c1..ac93e88d7038 100644
--- a/drivers/media/usb/dvb-usb/af9005.c
+++ b/drivers/media/usb/dvb-usb/af9005.c
@@ -976,8 +976,9 @@ static int af9005_identify_state(struct usb_device *udev,
else if (reply == 0x02)
*cold = 0;
else
- return -EIO;
- deb_info("Identify state cold = %d\n", *cold);
+ ret = -EIO;
+ if (!ret)
+ deb_info("Identify state cold = %d\n", *cold);
err:
kfree(buf);
diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c
index f02fa0a67aa4..fac19ec46089 100644
--- a/drivers/media/usb/dvb-usb/cxusb.c
+++ b/drivers/media/usb/dvb-usb/cxusb.c
@@ -521,7 +521,8 @@ static int cxusb_rc_query(struct dvb_usb_device *d)
{
u8 ircode[4];
- cxusb_ctrl_msg(d, CMD_GET_IR_CODE, NULL, 0, ircode, 4);
+ if (cxusb_ctrl_msg(d, CMD_GET_IR_CODE, NULL, 0, ircode, 4) < 0)
+ return 0;
if (ircode[2] || ircode[3])
rc_keydown(d->rc_dev, RC_PROTO_NEC,
diff --git a/drivers/media/usb/em28xx/em28xx-audio.c b/drivers/media/usb/em28xx/em28xx-audio.c
index 49c9b70b632b..79dfbb25714b 100644
--- a/drivers/media/usb/em28xx/em28xx-audio.c
+++ b/drivers/media/usb/em28xx/em28xx-audio.c
@@ -31,7 +31,6 @@
#include <linux/soundcard.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
-#include <linux/proc_fs.h>
#include <linux/module.h>
#include <sound/core.h>
#include <sound/pcm.h>
diff --git a/drivers/media/usb/em28xx/em28xx-cards.c b/drivers/media/usb/em28xx/em28xx-cards.c
index 5983e72a0622..def9cdd931a9 100644
--- a/drivers/media/usb/em28xx/em28xx-cards.c
+++ b/drivers/media/usb/em28xx/em28xx-cards.c
@@ -2487,6 +2487,24 @@ const struct em28xx_board em28xx_boards[] = {
.ir_codes = RC_MAP_HAUPPAUGE,
.leds = hauppauge_dualhd_leds,
},
+ /*
+ * 1b80:e349 Magix USB Videowandler-2
+ * (same chips as Honestech VIDBOX NW03)
+ * Empia EM2860, Philips SAA7113, Empia EMP202, No Tuner
+ */
+ [EM2861_BOARD_MAGIX_VIDEOWANDLER2] = {
+ .name = "Magix USB Videowandler-2",
+ .tuner_type = TUNER_ABSENT,
+ .decoder = EM28XX_SAA711X,
+ .input = { {
+ .type = EM28XX_VMUX_COMPOSITE,
+ .vmux = SAA7115_COMPOSITE0,
+ .amux = EM28XX_AMUX_LINE_IN,
+ }, {
+ .type = EM28XX_VMUX_SVIDEO,
+ .amux = EM28XX_AMUX_LINE_IN,
+ } },
+ },
};
EXPORT_SYMBOL_GPL(em28xx_boards);
@@ -2696,6 +2714,8 @@ struct usb_device_id em28xx_id_table[] = {
.driver_info = EM28178_BOARD_PLEX_PX_BCUD },
{ USB_DEVICE(0xeb1a, 0x5051), /* Ion Video 2 PC MKII / Startech svid2usb23 / Raygo R12-41373 */
.driver_info = EM2860_BOARD_TVP5150_REFERENCE_DESIGN },
+ { USB_DEVICE(0x1b80, 0xe349), /* Magix USB Videowandler-2 */
+ .driver_info = EM2861_BOARD_MAGIX_VIDEOWANDLER2 },
{ },
};
MODULE_DEVICE_TABLE(usb, em28xx_id_table);
diff --git a/drivers/media/usb/em28xx/em28xx-dvb.c b/drivers/media/usb/em28xx/em28xx-dvb.c
index a73faf12f7e4..0ab6c493bc74 100644
--- a/drivers/media/usb/em28xx/em28xx-dvb.c
+++ b/drivers/media/usb/em28xx/em28xx-dvb.c
@@ -471,13 +471,13 @@ static void hauppauge_hvr930c_init(struct em28xx *dev)
{
int i;
- struct em28xx_reg_seq hauppauge_hvr930c_init[] = {
+ static const struct em28xx_reg_seq hauppauge_hvr930c_init[] = {
{EM2874_R80_GPIO_P0_CTRL, 0xff, 0xff, 0x65},
{EM2874_R80_GPIO_P0_CTRL, 0xfb, 0xff, 0x32},
{EM2874_R80_GPIO_P0_CTRL, 0xff, 0xff, 0xb8},
{ -1, -1, -1, -1},
};
- struct em28xx_reg_seq hauppauge_hvr930c_end[] = {
+ static const struct em28xx_reg_seq hauppauge_hvr930c_end[] = {
{EM2874_R80_GPIO_P0_CTRL, 0xef, 0xff, 0x01},
{EM2874_R80_GPIO_P0_CTRL, 0xaf, 0xff, 0x65},
{EM2874_R80_GPIO_P0_CTRL, 0xef, 0xff, 0x76},
@@ -493,7 +493,7 @@ static void hauppauge_hvr930c_init(struct em28xx *dev)
{ -1, -1, -1, -1},
};
- struct {
+ static const struct {
unsigned char r[4];
int len;
} regs[] = {
@@ -537,20 +537,20 @@ static void hauppauge_hvr930c_init(struct em28xx *dev)
static void terratec_h5_init(struct em28xx *dev)
{
int i;
- struct em28xx_reg_seq terratec_h5_init[] = {
+ static const struct em28xx_reg_seq terratec_h5_init[] = {
{EM2820_R08_GPIO_CTRL, 0xff, 0xff, 10},
{EM2874_R80_GPIO_P0_CTRL, 0xf6, 0xff, 100},
{EM2874_R80_GPIO_P0_CTRL, 0xf2, 0xff, 50},
{EM2874_R80_GPIO_P0_CTRL, 0xf6, 0xff, 100},
{ -1, -1, -1, -1},
};
- struct em28xx_reg_seq terratec_h5_end[] = {
+ static const struct em28xx_reg_seq terratec_h5_end[] = {
{EM2874_R80_GPIO_P0_CTRL, 0xe6, 0xff, 100},
{EM2874_R80_GPIO_P0_CTRL, 0xa6, 0xff, 50},
{EM2874_R80_GPIO_P0_CTRL, 0xe6, 0xff, 100},
{ -1, -1, -1, -1},
};
- struct {
+ static const struct {
unsigned char r[4];
int len;
} regs[] = {
@@ -594,14 +594,14 @@ static void terratec_htc_stick_init(struct em28xx *dev)
* 0xe6: unknown (does not affect DVB-T).
* 0xb6: unknown (does not affect DVB-T).
*/
- struct em28xx_reg_seq terratec_htc_stick_init[] = {
+ static const struct em28xx_reg_seq terratec_htc_stick_init[] = {
{EM2820_R08_GPIO_CTRL, 0xff, 0xff, 10},
{EM2874_R80_GPIO_P0_CTRL, 0xf6, 0xff, 100},
{EM2874_R80_GPIO_P0_CTRL, 0xe6, 0xff, 50},
{EM2874_R80_GPIO_P0_CTRL, 0xf6, 0xff, 100},
{ -1, -1, -1, -1},
};
- struct em28xx_reg_seq terratec_htc_stick_end[] = {
+ static const struct em28xx_reg_seq terratec_htc_stick_end[] = {
{EM2874_R80_GPIO_P0_CTRL, 0xb6, 0xff, 100},
{EM2874_R80_GPIO_P0_CTRL, 0xf6, 0xff, 50},
{ -1, -1, -1, -1},
@@ -611,7 +611,7 @@ static void terratec_htc_stick_init(struct em28xx *dev)
* Init the analog decoder (not yet supported), but
* it's probably still a good idea.
*/
- struct {
+ static const struct {
unsigned char r[4];
int len;
} regs[] = {
@@ -642,14 +642,14 @@ static void terratec_htc_usb_xs_init(struct em28xx *dev)
{
int i;
- struct em28xx_reg_seq terratec_htc_usb_xs_init[] = {
+ static const struct em28xx_reg_seq terratec_htc_usb_xs_init[] = {
{EM2820_R08_GPIO_CTRL, 0xff, 0xff, 10},
{EM2874_R80_GPIO_P0_CTRL, 0xb2, 0xff, 100},
{EM2874_R80_GPIO_P0_CTRL, 0xb2, 0xff, 50},
{EM2874_R80_GPIO_P0_CTRL, 0xb6, 0xff, 100},
{ -1, -1, -1, -1},
};
- struct em28xx_reg_seq terratec_htc_usb_xs_end[] = {
+ static const struct em28xx_reg_seq terratec_htc_usb_xs_end[] = {
{EM2874_R80_GPIO_P0_CTRL, 0xa6, 0xff, 100},
{EM2874_R80_GPIO_P0_CTRL, 0xa6, 0xff, 50},
{EM2874_R80_GPIO_P0_CTRL, 0xe6, 0xff, 100},
@@ -660,7 +660,7 @@ static void terratec_htc_usb_xs_init(struct em28xx *dev)
* Init the analog decoder (not yet supported), but
* it's probably still a good idea.
*/
- struct {
+ static const struct {
unsigned char r[4];
int len;
} regs[] = {
@@ -704,7 +704,7 @@ static void pctv_520e_init(struct em28xx *dev)
* digital demodulator and tuner are routed via AVF4910B.
*/
int i;
- struct {
+ static const struct {
unsigned char r[4];
int len;
} regs[] = {
@@ -800,7 +800,7 @@ static int em28xx_mt352_terratec_xs_init(struct dvb_frontend *fe)
static void px_bcud_init(struct em28xx *dev)
{
int i;
- struct {
+ static const struct {
unsigned char r[4];
int len;
} regs1[] = {
@@ -818,7 +818,7 @@ static void px_bcud_init(struct em28xx *dev)
{{ 0x85, 0x7a }, 2},
{{ 0x87, 0x04 }, 2},
};
- static struct em28xx_reg_seq gpio[] = {
+ static const struct em28xx_reg_seq gpio[] = {
{EM28XX_R06_I2C_CLK, 0x40, 0xff, 300},
{EM2874_R80_GPIO_P0_CTRL, 0xfd, 0xff, 60},
{EM28XX_R15_RGAIN, 0x20, 0xff, 0},
diff --git a/drivers/media/usb/em28xx/em28xx-i2c.c b/drivers/media/usb/em28xx/em28xx-i2c.c
index a3155ec196cc..592b98b3643a 100644
--- a/drivers/media/usb/em28xx/em28xx-i2c.c
+++ b/drivers/media/usb/em28xx/em28xx-i2c.c
@@ -949,7 +949,7 @@ void em28xx_do_i2c_scan(struct em28xx *dev, unsigned int bus)
unsigned char buf;
int i, rc;
- memset(i2c_devicelist, 0, ARRAY_SIZE(i2c_devicelist));
+ memset(i2c_devicelist, 0, sizeof(i2c_devicelist));
for (i = 0; i < ARRAY_SIZE(i2c_devs); i++) {
dev->i2c_client[bus].addr = i;
@@ -964,7 +964,7 @@ void em28xx_do_i2c_scan(struct em28xx *dev, unsigned int bus)
if (bus == dev->def_i2c_bus)
dev->i2c_hash = em28xx_hash_mem(i2c_devicelist,
- ARRAY_SIZE(i2c_devicelist), 32);
+ sizeof(i2c_devicelist), 32);
}
/*
diff --git a/drivers/media/usb/em28xx/em28xx.h b/drivers/media/usb/em28xx/em28xx.h
index c8bc59059a19..4ecadd57dac7 100644
--- a/drivers/media/usb/em28xx/em28xx.h
+++ b/drivers/media/usb/em28xx/em28xx.h
@@ -149,6 +149,7 @@
#define EM28174_BOARD_HAUPPAUGE_WINTV_DUALHD_01595 100
#define EM2884_BOARD_TERRATEC_H6 101
#define EM2882_BOARD_ZOLID_HYBRID_TV_STICK 102
+#define EM2861_BOARD_MAGIX_VIDEOWANDLER2 103
/* Limits minimum and default number of buffers */
#define EM28XX_MIN_BUF 4
diff --git a/drivers/media/usb/gspca/sq905.c b/drivers/media/usb/gspca/sq905.c
index 863c485f4275..97799cfb832e 100644
--- a/drivers/media/usb/gspca/sq905.c
+++ b/drivers/media/usb/gspca/sq905.c
@@ -378,6 +378,9 @@ static int sd_start(struct gspca_dev *gspca_dev)
}
/* Start the workqueue function to do the streaming */
dev->work_thread = create_singlethread_workqueue(MODULE_NAME);
+ if (!dev->work_thread)
+ return -ENOMEM;
+
queue_work(dev->work_thread, &dev->work_struct);
return 0;
diff --git a/drivers/media/usb/gspca/sq905c.c b/drivers/media/usb/gspca/sq905c.c
index 3d7f6dcdd7a8..6ca947aef298 100644
--- a/drivers/media/usb/gspca/sq905c.c
+++ b/drivers/media/usb/gspca/sq905c.c
@@ -276,6 +276,9 @@ static int sd_start(struct gspca_dev *gspca_dev)
}
/* Start the workqueue function to do the streaming */
dev->work_thread = create_singlethread_workqueue(MODULE_NAME);
+ if (!dev->work_thread)
+ return -ENOMEM;
+
queue_work(dev->work_thread, &dev->work_struct);
return 0;
diff --git a/drivers/media/usb/gspca/stv0680.c b/drivers/media/usb/gspca/stv0680.c
index f869eb6065ce..b23988d8c7bc 100644
--- a/drivers/media/usb/gspca/stv0680.c
+++ b/drivers/media/usb/gspca/stv0680.c
@@ -35,7 +35,7 @@ struct sd {
static int stv_sndctrl(struct gspca_dev *gspca_dev, int set, u8 req, u16 val,
int size)
{
- int ret = -1;
+ int ret;
u8 req_type = 0;
unsigned int pipe = 0;
diff --git a/drivers/media/usb/gspca/stv06xx/stv06xx_st6422.c b/drivers/media/usb/gspca/stv06xx/stv06xx_st6422.c
index 7104a88b1e43..aac19d449be2 100644
--- a/drivers/media/usb/gspca/stv06xx/stv06xx_st6422.c
+++ b/drivers/media/usb/gspca/stv06xx/stv06xx_st6422.c
@@ -117,7 +117,7 @@ static int st6422_init(struct sd *sd)
{
int err = 0, i;
- const u16 st6422_bridge_init[][2] = {
+ static const u16 st6422_bridge_init[][2] = {
{ STV_ISO_ENABLE, 0x00 }, /* disable capture */
{ 0x1436, 0x00 },
{ 0x1432, 0x03 }, /* 0x00-0x1F brightness */
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
index a34717eba409..eaa08c7999d4 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
@@ -898,8 +898,12 @@ static void pvr2_v4l2_internal_check(struct pvr2_channel *chp)
pvr2_v4l2_dev_disassociate_parent(vp->dev_video);
pvr2_v4l2_dev_disassociate_parent(vp->dev_radio);
if (!list_empty(&vp->dev_video->devbase.fh_list) ||
- !list_empty(&vp->dev_radio->devbase.fh_list))
+ (vp->dev_radio &&
+ !list_empty(&vp->dev_radio->devbase.fh_list))) {
+ pvr2_trace(PVR2_TRACE_STRUCT,
+ "pvr2_v4l2 internal_check exit-empty id=%p", vp);
return;
+ }
pvr2_v4l2_destroy_no_lock(vp);
}
@@ -935,7 +939,8 @@ static int pvr2_v4l2_release(struct file *file)
kfree(fhp);
if (vp->channel.mc_head->disconnect_flag &&
list_empty(&vp->dev_video->devbase.fh_list) &&
- list_empty(&vp->dev_radio->devbase.fh_list)) {
+ (!vp->dev_radio ||
+ list_empty(&vp->dev_radio->devbase.fh_list))) {
pvr2_v4l2_destroy_no_lock(vp);
}
return 0;
diff --git a/drivers/media/usb/tm6000/tm6000-regs.h b/drivers/media/usb/tm6000/tm6000-regs.h
index d10424673db9..6a181f2e7ef2 100644
--- a/drivers/media/usb/tm6000/tm6000-regs.h
+++ b/drivers/media/usb/tm6000/tm6000-regs.h
@@ -1,5 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
- * SPDX-License-Identifier: GPL-2.0
* tm6000-regs.h - driver for TM5600/TM6000/TM6010 USB video capture devices
*
* Copyright (c) 2006-2007 Mauro Carvalho Chehab <mchehab@kernel.org>
diff --git a/drivers/media/usb/tm6000/tm6000-usb-isoc.h b/drivers/media/usb/tm6000/tm6000-usb-isoc.h
index b275dbce3a1b..e3c6933f854d 100644
--- a/drivers/media/usb/tm6000/tm6000-usb-isoc.h
+++ b/drivers/media/usb/tm6000/tm6000-usb-isoc.h
@@ -1,5 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
- * SPDX-License-Identifier: GPL-2.0
* tm6000-buf.c - driver for TM5600/TM6000/TM6010 USB video capture devices
*
* Copyright (c) 2006-2007 Mauro Carvalho Chehab <mchehab@kernel.org>
diff --git a/drivers/media/usb/tm6000/tm6000.h b/drivers/media/usb/tm6000/tm6000.h
index bf396544da9a..c08c95312739 100644
--- a/drivers/media/usb/tm6000/tm6000.h
+++ b/drivers/media/usb/tm6000/tm6000.h
@@ -1,5 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
- * SPDX-License-Identifier: GPL-2.0
* tm6000.h - driver for TM5600/TM6000/TM6010 USB video capture devices
*
* Copyright (c) 2006-2007 Mauro Carvalho Chehab <mchehab@kernel.org>
diff --git a/drivers/media/usb/usbtv/usbtv-audio.c b/drivers/media/usb/usbtv/usbtv-audio.c
index 6f108996142d..e746c8ddfc49 100644
--- a/drivers/media/usb/usbtv/usbtv-audio.c
+++ b/drivers/media/usb/usbtv/usbtv-audio.c
@@ -378,8 +378,7 @@ int usbtv_audio_init(struct usbtv *usbtv)
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_usbtv_pcm_ops);
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_CONTINUOUS,
- snd_dma_continuous_data(GFP_KERNEL), USBTV_AUDIO_BUFFER,
- USBTV_AUDIO_BUFFER);
+ NULL, USBTV_AUDIO_BUFFER, USBTV_AUDIO_BUFFER);
rv = snd_card_register(card);
if (rv)
diff --git a/drivers/media/usb/usbvision/usbvision-video.c b/drivers/media/usb/usbvision/usbvision-video.c
index cdc66adda755..93d36aab824f 100644
--- a/drivers/media/usb/usbvision/usbvision-video.c
+++ b/drivers/media/usb/usbvision/usbvision-video.c
@@ -314,6 +314,10 @@ static int usbvision_v4l2_open(struct file *file)
if (mutex_lock_interruptible(&usbvision->v4l2_lock))
return -ERESTARTSYS;
+ if (usbvision->remove_pending) {
+ err_code = -ENODEV;
+ goto unlock;
+ }
if (usbvision->user) {
err_code = -EBUSY;
} else {
@@ -377,6 +381,7 @@ unlock:
static int usbvision_v4l2_close(struct file *file)
{
struct usb_usbvision *usbvision = video_drvdata(file);
+ int r;
PDEBUG(DBG_IO, "close");
@@ -391,9 +396,10 @@ static int usbvision_v4l2_close(struct file *file)
usbvision_scratch_free(usbvision);
usbvision->user--;
+ r = usbvision->remove_pending;
mutex_unlock(&usbvision->v4l2_lock);
- if (usbvision->remove_pending) {
+ if (r) {
printk(KERN_INFO "%s: Final disconnect\n", __func__);
usbvision_release(usbvision);
return 0;
@@ -453,6 +459,9 @@ static int vidioc_querycap(struct file *file, void *priv,
{
struct usb_usbvision *usbvision = video_drvdata(file);
+ if (!usbvision->dev)
+ return -ENODEV;
+
strscpy(vc->driver, "USBVision", sizeof(vc->driver));
strscpy(vc->card,
usbvision_device_data[usbvision->dev_model].model_string,
@@ -1061,6 +1070,11 @@ static int usbvision_radio_open(struct file *file)
if (mutex_lock_interruptible(&usbvision->v4l2_lock))
return -ERESTARTSYS;
+
+ if (usbvision->remove_pending) {
+ err_code = -ENODEV;
+ goto out;
+ }
err_code = v4l2_fh_open(file);
if (err_code)
goto out;
@@ -1093,21 +1107,24 @@ out:
static int usbvision_radio_close(struct file *file)
{
struct usb_usbvision *usbvision = video_drvdata(file);
+ int r;
PDEBUG(DBG_IO, "");
mutex_lock(&usbvision->v4l2_lock);
/* Set packet size to 0 */
usbvision->iface_alt = 0;
- usb_set_interface(usbvision->dev, usbvision->iface,
- usbvision->iface_alt);
+ if (usbvision->dev)
+ usb_set_interface(usbvision->dev, usbvision->iface,
+ usbvision->iface_alt);
usbvision_audio_off(usbvision);
usbvision->radio = 0;
usbvision->user--;
+ r = usbvision->remove_pending;
mutex_unlock(&usbvision->v4l2_lock);
- if (usbvision->remove_pending) {
+ if (r) {
printk(KERN_INFO "%s: Final disconnect\n", __func__);
v4l2_fh_release(file);
usbvision_release(usbvision);
@@ -1539,6 +1556,7 @@ err_usb:
static void usbvision_disconnect(struct usb_interface *intf)
{
struct usb_usbvision *usbvision = to_usbvision(usb_get_intfdata(intf));
+ int u;
PDEBUG(DBG_PROBE, "");
@@ -1555,13 +1573,14 @@ static void usbvision_disconnect(struct usb_interface *intf)
v4l2_device_disconnect(&usbvision->v4l2_dev);
usbvision_i2c_unregister(usbvision);
usbvision->remove_pending = 1; /* Now all ISO data will be ignored */
+ u = usbvision->user;
usb_put_dev(usbvision->dev);
usbvision->dev = NULL; /* USB device is no more */
mutex_unlock(&usbvision->v4l2_lock);
- if (usbvision->user) {
+ if (u) {
printk(KERN_INFO "%s: In use, disconnect pending\n",
__func__);
wake_up_interruptible(&usbvision->wait_frame);
diff --git a/drivers/media/usb/uvc/uvc_debugfs.c b/drivers/media/usb/uvc/uvc_debugfs.c
index d2b109959d82..2b8af4b54117 100644
--- a/drivers/media/usb/uvc/uvc_debugfs.c
+++ b/drivers/media/usb/uvc/uvc_debugfs.c
@@ -108,15 +108,7 @@ void uvc_debugfs_cleanup_stream(struct uvc_streaming *stream)
void uvc_debugfs_init(void)
{
- struct dentry *dir;
-
- dir = debugfs_create_dir("uvcvideo", usb_debug_root);
- if (IS_ERR_OR_NULL(dir)) {
- uvc_printk(KERN_INFO, "Unable to create debugfs directory\n");
- return;
- }
-
- uvc_debugfs_root_dir = dir;
+ uvc_debugfs_root_dir = debugfs_create_dir("uvcvideo", usb_debug_root);
}
void uvc_debugfs_cleanup(void)
diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
index 66ee168ddc7e..428235ca2635 100644
--- a/drivers/media/usb/uvc/uvc_driver.c
+++ b/drivers/media/usb/uvc/uvc_driver.c
@@ -2151,6 +2151,20 @@ static int uvc_probe(struct usb_interface *intf,
sizeof(dev->name) - len);
}
+ /* Initialize the media device. */
+#ifdef CONFIG_MEDIA_CONTROLLER
+ dev->mdev.dev = &intf->dev;
+ strscpy(dev->mdev.model, dev->name, sizeof(dev->mdev.model));
+ if (udev->serial)
+ strscpy(dev->mdev.serial, udev->serial,
+ sizeof(dev->mdev.serial));
+ usb_make_path(udev, dev->mdev.bus_info, sizeof(dev->mdev.bus_info));
+ dev->mdev.hw_revision = le16_to_cpu(udev->descriptor.bcdDevice);
+ media_device_init(&dev->mdev);
+
+ dev->vdev.mdev = &dev->mdev;
+#endif
+
/* Parse the Video Class control descriptor. */
if (uvc_parse_control(dev) < 0) {
uvc_trace(UVC_TRACE_PROBE, "Unable to parse UVC "
@@ -2171,19 +2185,7 @@ static int uvc_probe(struct usb_interface *intf,
"linux-uvc-devel mailing list.\n");
}
- /* Initialize the media device and register the V4L2 device. */
-#ifdef CONFIG_MEDIA_CONTROLLER
- dev->mdev.dev = &intf->dev;
- strscpy(dev->mdev.model, dev->name, sizeof(dev->mdev.model));
- if (udev->serial)
- strscpy(dev->mdev.serial, udev->serial,
- sizeof(dev->mdev.serial));
- usb_make_path(udev, dev->mdev.bus_info, sizeof(dev->mdev.bus_info));
- dev->mdev.hw_revision = le16_to_cpu(udev->descriptor.bcdDevice);
- media_device_init(&dev->mdev);
-
- dev->vdev.mdev = &dev->mdev;
-#endif
+ /* Register the V4L2 device. */
if (v4l2_device_register(&intf->dev, &dev->vdev) < 0)
goto error;
diff --git a/drivers/media/usb/uvc/uvc_metadata.c b/drivers/media/usb/uvc/uvc_metadata.c
index 99bb71b47117..b6279ad7ac84 100644
--- a/drivers/media/usb/uvc/uvc_metadata.c
+++ b/drivers/media/usb/uvc/uvc_metadata.c
@@ -51,7 +51,7 @@ static int uvc_meta_v4l2_get_format(struct file *file, void *fh,
memset(fmt, 0, sizeof(*fmt));
fmt->dataformat = stream->meta.format;
- fmt->buffersize = UVC_METATADA_BUF_SIZE;
+ fmt->buffersize = UVC_METADATA_BUF_SIZE;
return 0;
}
@@ -72,7 +72,7 @@ static int uvc_meta_v4l2_try_format(struct file *file, void *fh,
fmt->dataformat = fmeta == dev->info->meta_format
? fmeta : V4L2_META_FMT_UVC;
- fmt->buffersize = UVC_METATADA_BUF_SIZE;
+ fmt->buffersize = UVC_METADATA_BUF_SIZE;
return 0;
}
diff --git a/drivers/media/usb/uvc/uvc_queue.c b/drivers/media/usb/uvc/uvc_queue.c
index da72577c2998..cd60c6c1749e 100644
--- a/drivers/media/usb/uvc/uvc_queue.c
+++ b/drivers/media/usb/uvc/uvc_queue.c
@@ -79,7 +79,7 @@ static int uvc_queue_setup(struct vb2_queue *vq,
switch (vq->type) {
case V4L2_BUF_TYPE_META_CAPTURE:
- size = UVC_METATADA_BUF_SIZE;
+ size = UVC_METADATA_BUF_SIZE;
break;
default:
diff --git a/drivers/media/usb/uvc/uvcvideo.h b/drivers/media/usb/uvc/uvcvideo.h
index c7c1baa90dea..f773dc5d802c 100644
--- a/drivers/media/usb/uvc/uvcvideo.h
+++ b/drivers/media/usb/uvc/uvcvideo.h
@@ -491,7 +491,7 @@ struct uvc_stats_stream {
unsigned int max_sof; /* Maximum STC.SOF value */
};
-#define UVC_METATADA_BUF_SIZE 1024
+#define UVC_METADATA_BUF_SIZE 1024
/**
* struct uvc_copy_op: Context structure to schedule asynchronous memcpy
diff --git a/drivers/media/usb/zr364xx/zr364xx.c b/drivers/media/usb/zr364xx/zr364xx.c
index 637962825d7a..57dbcc8083bf 100644
--- a/drivers/media/usb/zr364xx/zr364xx.c
+++ b/drivers/media/usb/zr364xx/zr364xx.c
@@ -20,7 +20,6 @@
#include <linux/usb.h>
#include <linux/vmalloc.h>
#include <linux/slab.h>
-#include <linux/proc_fs.h>
#include <linux/highmem.h>
#include <media/v4l2-common.h>
#include <media/v4l2-ioctl.h>
@@ -556,14 +555,12 @@ static int zr364xx_read_video_callback(struct zr364xx_camera *cam,
{
unsigned char *pdest;
unsigned char *psrc;
- s32 idx = -1;
- struct zr364xx_framei *frm;
+ s32 idx = cam->cur_frame;
+ struct zr364xx_framei *frm = &cam->buffer.frame[idx];
int i = 0;
unsigned char *ptr = NULL;
_DBG("buffer to user\n");
- idx = cam->cur_frame;
- frm = &cam->buffer.frame[idx];
/* swap bytes if camera needs it */
if (cam->method == METHOD0) {
diff --git a/drivers/media/v4l2-core/v4l2-common.c b/drivers/media/v4l2-core/v4l2-common.c
index 62f7aa92ac29..d0e5ebc736f9 100644
--- a/drivers/media/v4l2-core/v4l2-common.c
+++ b/drivers/media/v4l2-core/v4l2-common.c
@@ -236,77 +236,79 @@ const struct v4l2_format_info *v4l2_format_info(u32 format)
{
static const struct v4l2_format_info formats[] = {
/* RGB formats */
- { .format = V4L2_PIX_FMT_BGR24, .mem_planes = 1, .comp_planes = 1, .bpp = { 3, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_RGB24, .mem_planes = 1, .comp_planes = 1, .bpp = { 3, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_HSV24, .mem_planes = 1, .comp_planes = 1, .bpp = { 3, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_BGR32, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_XBGR32, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_BGRX32, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_RGB32, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_XRGB32, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_RGBX32, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_HSV32, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_ARGB32, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_RGBA32, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_ABGR32, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_BGRA32, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_GREY, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_BGR24, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 3, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_RGB24, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 3, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_HSV24, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 3, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_BGR32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_XBGR32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_BGRX32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_RGB32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_XRGB32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_RGBX32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_HSV32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_ARGB32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_RGBA32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_ABGR32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_BGRA32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_GREY, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_RGB565, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_RGB555, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
/* YUV packed formats */
- { .format = V4L2_PIX_FMT_YUYV, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 2, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_YVYU, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 2, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_UYVY, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 2, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_VYUY, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 2, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_YUYV, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 2, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_YVYU, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 2, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_UYVY, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 2, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_VYUY, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 2, .vdiv = 1 },
/* YUV planar formats */
- { .format = V4L2_PIX_FMT_NV12, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 2 },
- { .format = V4L2_PIX_FMT_NV21, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 2 },
- { .format = V4L2_PIX_FMT_NV16, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_NV61, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_NV24, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_NV42, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 1, .vdiv = 1 },
-
- { .format = V4L2_PIX_FMT_YUV410, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 4, .vdiv = 4 },
- { .format = V4L2_PIX_FMT_YVU410, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 4, .vdiv = 4 },
- { .format = V4L2_PIX_FMT_YUV411P, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 4, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_YUV420, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 2, .vdiv = 2 },
- { .format = V4L2_PIX_FMT_YVU420, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 2, .vdiv = 2 },
- { .format = V4L2_PIX_FMT_YUV422P, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 2, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_NV12, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 2 },
+ { .format = V4L2_PIX_FMT_NV21, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 2 },
+ { .format = V4L2_PIX_FMT_NV16, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_NV61, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_NV24, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_NV42, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+
+ { .format = V4L2_PIX_FMT_YUV410, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 4, .vdiv = 4 },
+ { .format = V4L2_PIX_FMT_YVU410, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 4, .vdiv = 4 },
+ { .format = V4L2_PIX_FMT_YUV411P, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 4, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_YUV420, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 2, .vdiv = 2 },
+ { .format = V4L2_PIX_FMT_YVU420, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 2, .vdiv = 2 },
+ { .format = V4L2_PIX_FMT_YUV422P, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 2, .vdiv = 1 },
/* YUV planar formats, non contiguous variant */
- { .format = V4L2_PIX_FMT_YUV420M, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 2, .vdiv = 2 },
- { .format = V4L2_PIX_FMT_YVU420M, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 2, .vdiv = 2 },
- { .format = V4L2_PIX_FMT_YUV422M, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 2, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_YVU422M, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 2, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_YUV444M, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_YVU444M, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 1, .vdiv = 1 },
-
- { .format = V4L2_PIX_FMT_NV12M, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 2 },
- { .format = V4L2_PIX_FMT_NV21M, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 2 },
- { .format = V4L2_PIX_FMT_NV16M, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_NV61M, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_YUV420M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 2, .vdiv = 2 },
+ { .format = V4L2_PIX_FMT_YVU420M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 2, .vdiv = 2 },
+ { .format = V4L2_PIX_FMT_YUV422M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 2, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_YVU422M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 2, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_YUV444M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_YVU444M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 1, .vdiv = 1 },
+
+ { .format = V4L2_PIX_FMT_NV12M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 2 },
+ { .format = V4L2_PIX_FMT_NV21M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 2 },
+ { .format = V4L2_PIX_FMT_NV16M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_NV61M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 1 },
/* Bayer RGB formats */
- { .format = V4L2_PIX_FMT_SBGGR8, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_SGBRG8, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_SGRBG8, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_SRGGB8, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_SBGGR10, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_SGBRG10, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_SGRBG10, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_SRGGB10, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_SBGGR10ALAW8, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_SGBRG10ALAW8, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_SGRBG10ALAW8, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_SRGGB10ALAW8, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_SBGGR10DPCM8, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_SGBRG10DPCM8, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_SGRBG10DPCM8, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_SRGGB10DPCM8, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_SBGGR12, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_SGBRG12, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_SGRBG12, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
- { .format = V4L2_PIX_FMT_SRGGB12, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SBGGR8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SGBRG8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SGRBG8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SRGGB8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SBGGR10, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SGBRG10, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SGRBG10, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SRGGB10, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SBGGR10ALAW8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SGBRG10ALAW8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SGRBG10ALAW8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SRGGB10ALAW8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SBGGR10DPCM8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SGBRG10DPCM8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SGRBG10DPCM8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SRGGB10DPCM8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SBGGR12, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SGBRG12, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SGRBG12, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_SRGGB12, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
};
unsigned int i;
diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c
index 1d8f38824631..2928c5e0a73d 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls.c
@@ -29,6 +29,8 @@
#define call_op(master, op) \
(has_op(master, op) ? master->ops->op(master) : 0)
+static const union v4l2_ctrl_ptr ptr_null;
+
/* Internal temporary helper struct, one for each v4l2_ext_control */
struct v4l2_ctrl_helper {
/* Pointer to the control reference of the master control */
@@ -566,6 +568,16 @@ const char * const *v4l2_ctrl_get_menu(u32 id)
"Disabled at slice boundary",
"NULL",
};
+ static const char * const hevc_decode_mode[] = {
+ "Slice-Based",
+ "Frame-Based",
+ NULL,
+ };
+ static const char * const hevc_start_code[] = {
+ "No Start Code",
+ "Annex B Start Code",
+ NULL,
+ };
switch (id) {
case V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ:
@@ -687,7 +699,10 @@ const char * const *v4l2_ctrl_get_menu(u32 id)
return hevc_tier;
case V4L2_CID_MPEG_VIDEO_HEVC_LOOP_FILTER_MODE:
return hevc_loop_filter_mode;
-
+ case V4L2_CID_MPEG_VIDEO_HEVC_DECODE_MODE:
+ return hevc_decode_mode;
+ case V4L2_CID_MPEG_VIDEO_HEVC_START_CODE:
+ return hevc_start_code;
default:
return NULL;
}
@@ -957,6 +972,11 @@ const char *v4l2_ctrl_get_name(u32 id)
case V4L2_CID_MPEG_VIDEO_HEVC_SIZE_OF_LENGTH_FIELD: return "HEVC Size of Length Field";
case V4L2_CID_MPEG_VIDEO_REF_NUMBER_FOR_PFRAMES: return "Reference Frames for a P-Frame";
case V4L2_CID_MPEG_VIDEO_PREPEND_SPSPPS_TO_IDR: return "Prepend SPS and PPS to IDR";
+ case V4L2_CID_MPEG_VIDEO_HEVC_SPS: return "HEVC Sequence Parameter Set";
+ case V4L2_CID_MPEG_VIDEO_HEVC_PPS: return "HEVC Picture Parameter Set";
+ case V4L2_CID_MPEG_VIDEO_HEVC_SLICE_PARAMS: return "HEVC Slice Parameters";
+ case V4L2_CID_MPEG_VIDEO_HEVC_DECODE_MODE: return "HEVC Decode Mode";
+ case V4L2_CID_MPEG_VIDEO_HEVC_START_CODE: return "HEVC Start Code";
/* CAMERA controls */
/* Keep the order of the 'case's the same as in v4l2-controls.h! */
@@ -994,6 +1014,7 @@ const char *v4l2_ctrl_get_name(u32 id)
case V4L2_CID_AUTO_FOCUS_RANGE: return "Auto Focus, Range";
case V4L2_CID_PAN_SPEED: return "Pan, Speed";
case V4L2_CID_TILT_SPEED: return "Tilt, Speed";
+ case V4L2_CID_UNIT_CELL_SIZE: return "Unit Cell Size";
/* FM Radio Modulator controls */
/* Keep the order of the 'case's the same as in v4l2-controls.h! */
@@ -1265,6 +1286,8 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
case V4L2_CID_MPEG_VIDEO_HEVC_SIZE_OF_LENGTH_FIELD:
case V4L2_CID_MPEG_VIDEO_HEVC_TIER:
case V4L2_CID_MPEG_VIDEO_HEVC_LOOP_FILTER_MODE:
+ case V4L2_CID_MPEG_VIDEO_HEVC_DECODE_MODE:
+ case V4L2_CID_MPEG_VIDEO_HEVC_START_CODE:
*type = V4L2_CTRL_TYPE_MENU;
break;
case V4L2_CID_LINK_FREQ:
@@ -1375,6 +1398,19 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
case V4L2_CID_MPEG_VIDEO_VP8_FRAME_HEADER:
*type = V4L2_CTRL_TYPE_VP8_FRAME_HEADER;
break;
+ case V4L2_CID_MPEG_VIDEO_HEVC_SPS:
+ *type = V4L2_CTRL_TYPE_HEVC_SPS;
+ break;
+ case V4L2_CID_MPEG_VIDEO_HEVC_PPS:
+ *type = V4L2_CTRL_TYPE_HEVC_PPS;
+ break;
+ case V4L2_CID_MPEG_VIDEO_HEVC_SLICE_PARAMS:
+ *type = V4L2_CTRL_TYPE_HEVC_SLICE_PARAMS;
+ break;
+ case V4L2_CID_UNIT_CELL_SIZE:
+ *type = V4L2_CTRL_TYPE_AREA;
+ *flags |= V4L2_CTRL_FLAG_READ_ONLY;
+ break;
default:
*type = V4L2_CTRL_TYPE_INTEGER;
break;
@@ -1520,7 +1556,8 @@ static bool std_equal(const struct v4l2_ctrl *ctrl, u32 idx,
if (ctrl->is_int)
return ptr1.p_s32[idx] == ptr2.p_s32[idx];
idx *= ctrl->elem_size;
- return !memcmp(ptr1.p + idx, ptr2.p + idx, ctrl->elem_size);
+ return !memcmp(ptr1.p_const + idx, ptr2.p_const + idx,
+ ctrl->elem_size);
}
}
@@ -1530,7 +1567,10 @@ static void std_init_compound(const struct v4l2_ctrl *ctrl, u32 idx,
struct v4l2_ctrl_mpeg2_slice_params *p_mpeg2_slice_params;
void *p = ptr.p + idx * ctrl->elem_size;
- memset(p, 0, ctrl->elem_size);
+ if (ctrl->p_def.p_const)
+ memcpy(p, ctrl->p_def.p_const, ctrl->elem_size);
+ else
+ memset(p, 0, ctrl->elem_size);
/*
* The cast is needed to get rid of a gcc warning complaining that
@@ -1672,7 +1712,12 @@ static int std_validate_compound(const struct v4l2_ctrl *ctrl, u32 idx,
{
struct v4l2_ctrl_mpeg2_slice_params *p_mpeg2_slice_params;
struct v4l2_ctrl_vp8_frame_header *p_vp8_frame_header;
+ struct v4l2_ctrl_hevc_sps *p_hevc_sps;
+ struct v4l2_ctrl_hevc_pps *p_hevc_pps;
+ struct v4l2_ctrl_hevc_slice_params *p_hevc_slice_params;
+ struct v4l2_area *area;
void *p = ptr.p + idx * ctrl->elem_size;
+ unsigned int i;
switch ((u32)ctrl->type) {
case V4L2_CTRL_TYPE_MPEG2_SLICE_PARAMS:
@@ -1748,6 +1793,76 @@ static int std_validate_compound(const struct v4l2_ctrl *ctrl, u32 idx,
zero_padding(p_vp8_frame_header->entropy_header);
zero_padding(p_vp8_frame_header->coder_state);
break;
+
+ case V4L2_CTRL_TYPE_HEVC_SPS:
+ p_hevc_sps = p;
+
+ if (!(p_hevc_sps->flags & V4L2_HEVC_SPS_FLAG_PCM_ENABLED)) {
+ p_hevc_sps->pcm_sample_bit_depth_luma_minus1 = 0;
+ p_hevc_sps->pcm_sample_bit_depth_chroma_minus1 = 0;
+ p_hevc_sps->log2_min_pcm_luma_coding_block_size_minus3 = 0;
+ p_hevc_sps->log2_diff_max_min_pcm_luma_coding_block_size = 0;
+ }
+
+ if (!(p_hevc_sps->flags &
+ V4L2_HEVC_SPS_FLAG_LONG_TERM_REF_PICS_PRESENT))
+ p_hevc_sps->num_long_term_ref_pics_sps = 0;
+ break;
+
+ case V4L2_CTRL_TYPE_HEVC_PPS:
+ p_hevc_pps = p;
+
+ if (!(p_hevc_pps->flags &
+ V4L2_HEVC_PPS_FLAG_CU_QP_DELTA_ENABLED))
+ p_hevc_pps->diff_cu_qp_delta_depth = 0;
+
+ if (!(p_hevc_pps->flags & V4L2_HEVC_PPS_FLAG_TILES_ENABLED)) {
+ p_hevc_pps->num_tile_columns_minus1 = 0;
+ p_hevc_pps->num_tile_rows_minus1 = 0;
+ memset(&p_hevc_pps->column_width_minus1, 0,
+ sizeof(p_hevc_pps->column_width_minus1));
+ memset(&p_hevc_pps->row_height_minus1, 0,
+ sizeof(p_hevc_pps->row_height_minus1));
+
+ p_hevc_pps->flags &=
+ ~V4L2_HEVC_PPS_FLAG_PPS_LOOP_FILTER_ACROSS_SLICES_ENABLED;
+ }
+
+ if (p_hevc_pps->flags &
+ V4L2_HEVC_PPS_FLAG_PPS_DISABLE_DEBLOCKING_FILTER) {
+ p_hevc_pps->pps_beta_offset_div2 = 0;
+ p_hevc_pps->pps_tc_offset_div2 = 0;
+ }
+
+ zero_padding(*p_hevc_pps);
+ break;
+
+ case V4L2_CTRL_TYPE_HEVC_SLICE_PARAMS:
+ p_hevc_slice_params = p;
+
+ if (p_hevc_slice_params->num_active_dpb_entries >
+ V4L2_HEVC_DPB_ENTRIES_NUM_MAX)
+ return -EINVAL;
+
+ zero_padding(p_hevc_slice_params->pred_weight_table);
+
+ for (i = 0; i < p_hevc_slice_params->num_active_dpb_entries;
+ i++) {
+ struct v4l2_hevc_dpb_entry *dpb_entry =
+ &p_hevc_slice_params->dpb[i];
+
+ zero_padding(*dpb_entry);
+ }
+
+ zero_padding(*p_hevc_slice_params);
+ break;
+
+ case V4L2_CTRL_TYPE_AREA:
+ area = p;
+ if (!area->width || !area->height)
+ return -EINVAL;
+ break;
+
default:
return -EINVAL;
}
@@ -1840,7 +1955,7 @@ static int ptr_to_user(struct v4l2_ext_control *c,
u32 len;
if (ctrl->is_ptr && !ctrl->is_string)
- return copy_to_user(c->ptr, ptr.p, c->size) ?
+ return copy_to_user(c->ptr, ptr.p_const, c->size) ?
-EFAULT : 0;
switch (ctrl->type) {
@@ -1955,7 +2070,7 @@ static void ptr_to_ptr(struct v4l2_ctrl *ctrl,
{
if (ctrl == NULL)
return;
- memcpy(to.p, from.p, ctrl->elems * ctrl->elem_size);
+ memcpy(to.p, from.p_const, ctrl->elems * ctrl->elem_size);
}
/* Copy the new value to the current value. */
@@ -2354,7 +2469,8 @@ static struct v4l2_ctrl *v4l2_ctrl_new(struct v4l2_ctrl_handler *hdl,
s64 min, s64 max, u64 step, s64 def,
const u32 dims[V4L2_CTRL_MAX_DIMS], u32 elem_size,
u32 flags, const char * const *qmenu,
- const s64 *qmenu_int, void *priv)
+ const s64 *qmenu_int, const union v4l2_ctrl_ptr p_def,
+ void *priv)
{
struct v4l2_ctrl *ctrl;
unsigned sz_extra;
@@ -2421,6 +2537,18 @@ static struct v4l2_ctrl *v4l2_ctrl_new(struct v4l2_ctrl_handler *hdl,
case V4L2_CTRL_TYPE_VP8_FRAME_HEADER:
elem_size = sizeof(struct v4l2_ctrl_vp8_frame_header);
break;
+ case V4L2_CTRL_TYPE_HEVC_SPS:
+ elem_size = sizeof(struct v4l2_ctrl_hevc_sps);
+ break;
+ case V4L2_CTRL_TYPE_HEVC_PPS:
+ elem_size = sizeof(struct v4l2_ctrl_hevc_pps);
+ break;
+ case V4L2_CTRL_TYPE_HEVC_SLICE_PARAMS:
+ elem_size = sizeof(struct v4l2_ctrl_hevc_slice_params);
+ break;
+ case V4L2_CTRL_TYPE_AREA:
+ elem_size = sizeof(struct v4l2_area);
+ break;
default:
if (type < V4L2_CTRL_COMPOUND_TYPES)
elem_size = sizeof(s32);
@@ -2460,6 +2588,9 @@ static struct v4l2_ctrl *v4l2_ctrl_new(struct v4l2_ctrl_handler *hdl,
is_array)
sz_extra += 2 * tot_ctrl_size;
+ if (type >= V4L2_CTRL_COMPOUND_TYPES && p_def.p_const)
+ sz_extra += elem_size;
+
ctrl = kvzalloc(sizeof(*ctrl) + sz_extra, GFP_KERNEL);
if (ctrl == NULL) {
handler_set_err(hdl, -ENOMEM);
@@ -2503,6 +2634,12 @@ static struct v4l2_ctrl *v4l2_ctrl_new(struct v4l2_ctrl_handler *hdl,
ctrl->p_new.p = &ctrl->val;
ctrl->p_cur.p = &ctrl->cur.val;
}
+
+ if (type >= V4L2_CTRL_COMPOUND_TYPES && p_def.p_const) {
+ ctrl->p_def.p = ctrl->p_cur.p + tot_ctrl_size;
+ memcpy(ctrl->p_def.p, p_def.p_const, elem_size);
+ }
+
for (idx = 0; idx < elems; idx++) {
ctrl->type_ops->init(ctrl, idx, ctrl->p_cur);
ctrl->type_ops->init(ctrl, idx, ctrl->p_new);
@@ -2554,7 +2691,7 @@ struct v4l2_ctrl *v4l2_ctrl_new_custom(struct v4l2_ctrl_handler *hdl,
type, min, max,
is_menu ? cfg->menu_skip_mask : step, def,
cfg->dims, cfg->elem_size,
- flags, qmenu, qmenu_int, priv);
+ flags, qmenu, qmenu_int, cfg->p_def, priv);
if (ctrl)
ctrl->is_private = cfg->is_private;
return ctrl;
@@ -2579,7 +2716,7 @@ struct v4l2_ctrl *v4l2_ctrl_new_std(struct v4l2_ctrl_handler *hdl,
}
return v4l2_ctrl_new(hdl, ops, NULL, id, name, type,
min, max, step, def, NULL, 0,
- flags, NULL, NULL, NULL);
+ flags, NULL, NULL, ptr_null, NULL);
}
EXPORT_SYMBOL(v4l2_ctrl_new_std);
@@ -2612,7 +2749,7 @@ struct v4l2_ctrl *v4l2_ctrl_new_std_menu(struct v4l2_ctrl_handler *hdl,
}
return v4l2_ctrl_new(hdl, ops, NULL, id, name, type,
0, max, mask, def, NULL, 0,
- flags, qmenu, qmenu_int, NULL);
+ flags, qmenu, qmenu_int, ptr_null, NULL);
}
EXPORT_SYMBOL(v4l2_ctrl_new_std_menu);
@@ -2644,11 +2781,32 @@ struct v4l2_ctrl *v4l2_ctrl_new_std_menu_items(struct v4l2_ctrl_handler *hdl,
}
return v4l2_ctrl_new(hdl, ops, NULL, id, name, type,
0, max, mask, def, NULL, 0,
- flags, qmenu, NULL, NULL);
+ flags, qmenu, NULL, ptr_null, NULL);
}
EXPORT_SYMBOL(v4l2_ctrl_new_std_menu_items);
+/* Helper function for standard compound controls */
+struct v4l2_ctrl *v4l2_ctrl_new_std_compound(struct v4l2_ctrl_handler *hdl,
+ const struct v4l2_ctrl_ops *ops, u32 id,
+ const union v4l2_ctrl_ptr p_def)
+{
+ const char *name;
+ enum v4l2_ctrl_type type;
+ u32 flags;
+ s64 min, max, step, def;
+
+ v4l2_ctrl_fill(id, &name, &type, &min, &max, &step, &def, &flags);
+ if (type < V4L2_CTRL_COMPOUND_TYPES) {
+ handler_set_err(hdl, -EINVAL);
+ return NULL;
+ }
+ return v4l2_ctrl_new(hdl, ops, NULL, id, name, type,
+ min, max, step, def, NULL, 0,
+ flags, NULL, NULL, p_def, NULL);
+}
+EXPORT_SYMBOL(v4l2_ctrl_new_std_compound);
+
/* Helper function for standard integer menu controls */
struct v4l2_ctrl *v4l2_ctrl_new_int_menu(struct v4l2_ctrl_handler *hdl,
const struct v4l2_ctrl_ops *ops,
@@ -2669,7 +2827,7 @@ struct v4l2_ctrl *v4l2_ctrl_new_int_menu(struct v4l2_ctrl_handler *hdl,
}
return v4l2_ctrl_new(hdl, ops, NULL, id, name, type,
0, max, 0, def, NULL, 0,
- flags, NULL, qmenu_int, NULL);
+ flags, NULL, qmenu_int, ptr_null, NULL);
}
EXPORT_SYMBOL(v4l2_ctrl_new_int_menu);
@@ -3144,6 +3302,7 @@ static void v4l2_ctrl_request_queue(struct media_request_object *obj)
struct v4l2_ctrl_handler *prev_hdl = NULL;
struct v4l2_ctrl_ref *ref_ctrl, *ref_ctrl_prev = NULL;
+ mutex_lock(main_hdl->lock);
if (list_empty(&main_hdl->requests_queued))
goto queue;
@@ -3175,18 +3334,22 @@ static void v4l2_ctrl_request_queue(struct media_request_object *obj)
queue:
list_add_tail(&hdl->requests_queued, &main_hdl->requests_queued);
hdl->request_is_queued = true;
+ mutex_unlock(main_hdl->lock);
}
static void v4l2_ctrl_request_unbind(struct media_request_object *obj)
{
struct v4l2_ctrl_handler *hdl =
container_of(obj, struct v4l2_ctrl_handler, req_obj);
+ struct v4l2_ctrl_handler *main_hdl = obj->priv;
list_del_init(&hdl->requests);
+ mutex_lock(main_hdl->lock);
if (hdl->request_is_queued) {
list_del_init(&hdl->requests_queued);
hdl->request_is_queued = false;
}
+ mutex_unlock(main_hdl->lock);
}
static void v4l2_ctrl_request_release(struct media_request_object *obj)
@@ -4080,6 +4243,18 @@ int __v4l2_ctrl_s_ctrl_string(struct v4l2_ctrl *ctrl, const char *s)
}
EXPORT_SYMBOL(__v4l2_ctrl_s_ctrl_string);
+int __v4l2_ctrl_s_ctrl_area(struct v4l2_ctrl *ctrl,
+ const struct v4l2_area *area)
+{
+ lockdep_assert_held(ctrl->handler->lock);
+
+ /* It's a driver bug if this happens. */
+ WARN_ON(ctrl->type != V4L2_CTRL_TYPE_AREA);
+ *ctrl->p_new.p_area = *area;
+ return set_ctrl(NULL, ctrl, 0);
+}
+EXPORT_SYMBOL(__v4l2_ctrl_s_ctrl_area);
+
void v4l2_ctrl_request_complete(struct media_request *req,
struct v4l2_ctrl_handler *main_hdl)
{
@@ -4128,9 +4303,11 @@ void v4l2_ctrl_request_complete(struct media_request *req,
v4l2_ctrl_unlock(ctrl);
}
+ mutex_lock(main_hdl->lock);
WARN_ON(!hdl->request_is_queued);
list_del_init(&hdl->requests_queued);
hdl->request_is_queued = false;
+ mutex_unlock(main_hdl->lock);
media_request_object_complete(obj);
media_request_object_put(obj);
}
diff --git a/drivers/media/v4l2-core/v4l2-dev.c b/drivers/media/v4l2-core/v4l2-dev.c
index 4037689a945a..da42d172714a 100644
--- a/drivers/media/v4l2-core/v4l2-dev.c
+++ b/drivers/media/v4l2-core/v4l2-dev.c
@@ -533,13 +533,23 @@ static int get_index(struct video_device *vdev)
*/
static void determine_valid_ioctls(struct video_device *vdev)
{
+ const u32 vid_caps = V4L2_CAP_VIDEO_CAPTURE |
+ V4L2_CAP_VIDEO_CAPTURE_MPLANE |
+ V4L2_CAP_VIDEO_OUTPUT |
+ V4L2_CAP_VIDEO_OUTPUT_MPLANE |
+ V4L2_CAP_VIDEO_M2M | V4L2_CAP_VIDEO_M2M_MPLANE;
+ const u32 meta_caps = V4L2_CAP_META_CAPTURE |
+ V4L2_CAP_META_OUTPUT;
DECLARE_BITMAP(valid_ioctls, BASE_VIDIOC_PRIVATE);
const struct v4l2_ioctl_ops *ops = vdev->ioctl_ops;
- bool is_vid = vdev->vfl_type == VFL_TYPE_GRABBER;
+ bool is_vid = vdev->vfl_type == VFL_TYPE_GRABBER &&
+ (vdev->device_caps & vid_caps);
bool is_vbi = vdev->vfl_type == VFL_TYPE_VBI;
bool is_radio = vdev->vfl_type == VFL_TYPE_RADIO;
bool is_sdr = vdev->vfl_type == VFL_TYPE_SDR;
bool is_tch = vdev->vfl_type == VFL_TYPE_TOUCH;
+ bool is_meta = vdev->vfl_type == VFL_TYPE_GRABBER &&
+ (vdev->device_caps & meta_caps);
bool is_rx = vdev->vfl_dir != VFL_DIR_TX;
bool is_tx = vdev->vfl_dir != VFL_DIR_RX;
@@ -571,8 +581,10 @@ static void determine_valid_ioctls(struct video_device *vdev)
set_bit(_IOC_NR(VIDIOC_TRY_EXT_CTRLS), valid_ioctls);
if (vdev->ctrl_handler || ops->vidioc_querymenu)
set_bit(_IOC_NR(VIDIOC_QUERYMENU), valid_ioctls);
- SET_VALID_IOCTL(ops, VIDIOC_G_FREQUENCY, vidioc_g_frequency);
- SET_VALID_IOCTL(ops, VIDIOC_S_FREQUENCY, vidioc_s_frequency);
+ if (!is_tch) {
+ SET_VALID_IOCTL(ops, VIDIOC_G_FREQUENCY, vidioc_g_frequency);
+ SET_VALID_IOCTL(ops, VIDIOC_S_FREQUENCY, vidioc_s_frequency);
+ }
SET_VALID_IOCTL(ops, VIDIOC_LOG_STATUS, vidioc_log_status);
#ifdef CONFIG_VIDEO_ADV_DEBUG
set_bit(_IOC_NR(VIDIOC_DBG_G_CHIP_INFO), valid_ioctls);
@@ -586,40 +598,32 @@ static void determine_valid_ioctls(struct video_device *vdev)
if (ops->vidioc_enum_freq_bands || ops->vidioc_g_tuner || ops->vidioc_g_modulator)
set_bit(_IOC_NR(VIDIOC_ENUM_FREQ_BANDS), valid_ioctls);
- if (is_vid || is_tch) {
- /* video and metadata specific ioctls */
+ if (is_vid) {
+ /* video specific ioctls */
if ((is_rx && (ops->vidioc_enum_fmt_vid_cap ||
- ops->vidioc_enum_fmt_vid_overlay ||
- ops->vidioc_enum_fmt_meta_cap)) ||
- (is_tx && (ops->vidioc_enum_fmt_vid_out ||
- ops->vidioc_enum_fmt_meta_out)))
+ ops->vidioc_enum_fmt_vid_overlay)) ||
+ (is_tx && ops->vidioc_enum_fmt_vid_out))
set_bit(_IOC_NR(VIDIOC_ENUM_FMT), valid_ioctls);
if ((is_rx && (ops->vidioc_g_fmt_vid_cap ||
ops->vidioc_g_fmt_vid_cap_mplane ||
- ops->vidioc_g_fmt_vid_overlay ||
- ops->vidioc_g_fmt_meta_cap)) ||
+ ops->vidioc_g_fmt_vid_overlay)) ||
(is_tx && (ops->vidioc_g_fmt_vid_out ||
ops->vidioc_g_fmt_vid_out_mplane ||
- ops->vidioc_g_fmt_vid_out_overlay ||
- ops->vidioc_g_fmt_meta_out)))
+ ops->vidioc_g_fmt_vid_out_overlay)))
set_bit(_IOC_NR(VIDIOC_G_FMT), valid_ioctls);
if ((is_rx && (ops->vidioc_s_fmt_vid_cap ||
ops->vidioc_s_fmt_vid_cap_mplane ||
- ops->vidioc_s_fmt_vid_overlay ||
- ops->vidioc_s_fmt_meta_cap)) ||
+ ops->vidioc_s_fmt_vid_overlay)) ||
(is_tx && (ops->vidioc_s_fmt_vid_out ||
ops->vidioc_s_fmt_vid_out_mplane ||
- ops->vidioc_s_fmt_vid_out_overlay ||
- ops->vidioc_s_fmt_meta_out)))
+ ops->vidioc_s_fmt_vid_out_overlay)))
set_bit(_IOC_NR(VIDIOC_S_FMT), valid_ioctls);
if ((is_rx && (ops->vidioc_try_fmt_vid_cap ||
ops->vidioc_try_fmt_vid_cap_mplane ||
- ops->vidioc_try_fmt_vid_overlay ||
- ops->vidioc_try_fmt_meta_cap)) ||
+ ops->vidioc_try_fmt_vid_overlay)) ||
(is_tx && (ops->vidioc_try_fmt_vid_out ||
ops->vidioc_try_fmt_vid_out_mplane ||
- ops->vidioc_try_fmt_vid_out_overlay ||
- ops->vidioc_try_fmt_meta_out)))
+ ops->vidioc_try_fmt_vid_out_overlay)))
set_bit(_IOC_NR(VIDIOC_TRY_FMT), valid_ioctls);
SET_VALID_IOCTL(ops, VIDIOC_OVERLAY, vidioc_overlay);
SET_VALID_IOCTL(ops, VIDIOC_G_FBUF, vidioc_g_fbuf);
@@ -641,7 +645,21 @@ static void determine_valid_ioctls(struct video_device *vdev)
set_bit(_IOC_NR(VIDIOC_S_CROP), valid_ioctls);
SET_VALID_IOCTL(ops, VIDIOC_G_SELECTION, vidioc_g_selection);
SET_VALID_IOCTL(ops, VIDIOC_S_SELECTION, vidioc_s_selection);
- } else if (is_vbi) {
+ }
+ if (is_meta && is_rx) {
+ /* metadata capture specific ioctls */
+ SET_VALID_IOCTL(ops, VIDIOC_ENUM_FMT, vidioc_enum_fmt_meta_cap);
+ SET_VALID_IOCTL(ops, VIDIOC_G_FMT, vidioc_g_fmt_meta_cap);
+ SET_VALID_IOCTL(ops, VIDIOC_S_FMT, vidioc_s_fmt_meta_cap);
+ SET_VALID_IOCTL(ops, VIDIOC_TRY_FMT, vidioc_try_fmt_meta_cap);
+ } else if (is_meta && is_tx) {
+ /* metadata output specific ioctls */
+ SET_VALID_IOCTL(ops, VIDIOC_ENUM_FMT, vidioc_enum_fmt_meta_out);
+ SET_VALID_IOCTL(ops, VIDIOC_G_FMT, vidioc_g_fmt_meta_out);
+ SET_VALID_IOCTL(ops, VIDIOC_S_FMT, vidioc_s_fmt_meta_out);
+ SET_VALID_IOCTL(ops, VIDIOC_TRY_FMT, vidioc_try_fmt_meta_out);
+ }
+ if (is_vbi) {
/* vbi specific ioctls */
if ((is_rx && (ops->vidioc_g_fmt_vbi_cap ||
ops->vidioc_g_fmt_sliced_vbi_cap)) ||
@@ -659,30 +677,35 @@ static void determine_valid_ioctls(struct video_device *vdev)
ops->vidioc_try_fmt_sliced_vbi_out)))
set_bit(_IOC_NR(VIDIOC_TRY_FMT), valid_ioctls);
SET_VALID_IOCTL(ops, VIDIOC_G_SLICED_VBI_CAP, vidioc_g_sliced_vbi_cap);
+ } else if (is_tch) {
+ /* touch specific ioctls */
+ SET_VALID_IOCTL(ops, VIDIOC_ENUM_FMT, vidioc_enum_fmt_vid_cap);
+ SET_VALID_IOCTL(ops, VIDIOC_G_FMT, vidioc_g_fmt_vid_cap);
+ SET_VALID_IOCTL(ops, VIDIOC_S_FMT, vidioc_s_fmt_vid_cap);
+ SET_VALID_IOCTL(ops, VIDIOC_TRY_FMT, vidioc_try_fmt_vid_cap);
+ SET_VALID_IOCTL(ops, VIDIOC_ENUM_FRAMESIZES, vidioc_enum_framesizes);
+ SET_VALID_IOCTL(ops, VIDIOC_ENUM_FRAMEINTERVALS, vidioc_enum_frameintervals);
+ SET_VALID_IOCTL(ops, VIDIOC_ENUMINPUT, vidioc_enum_input);
+ SET_VALID_IOCTL(ops, VIDIOC_G_INPUT, vidioc_g_input);
+ SET_VALID_IOCTL(ops, VIDIOC_S_INPUT, vidioc_s_input);
+ SET_VALID_IOCTL(ops, VIDIOC_G_PARM, vidioc_g_parm);
+ SET_VALID_IOCTL(ops, VIDIOC_S_PARM, vidioc_s_parm);
} else if (is_sdr && is_rx) {
/* SDR receiver specific ioctls */
- if (ops->vidioc_enum_fmt_sdr_cap)
- set_bit(_IOC_NR(VIDIOC_ENUM_FMT), valid_ioctls);
- if (ops->vidioc_g_fmt_sdr_cap)
- set_bit(_IOC_NR(VIDIOC_G_FMT), valid_ioctls);
- if (ops->vidioc_s_fmt_sdr_cap)
- set_bit(_IOC_NR(VIDIOC_S_FMT), valid_ioctls);
- if (ops->vidioc_try_fmt_sdr_cap)
- set_bit(_IOC_NR(VIDIOC_TRY_FMT), valid_ioctls);
+ SET_VALID_IOCTL(ops, VIDIOC_ENUM_FMT, vidioc_enum_fmt_sdr_cap);
+ SET_VALID_IOCTL(ops, VIDIOC_G_FMT, vidioc_g_fmt_sdr_cap);
+ SET_VALID_IOCTL(ops, VIDIOC_S_FMT, vidioc_s_fmt_sdr_cap);
+ SET_VALID_IOCTL(ops, VIDIOC_TRY_FMT, vidioc_try_fmt_sdr_cap);
} else if (is_sdr && is_tx) {
/* SDR transmitter specific ioctls */
- if (ops->vidioc_enum_fmt_sdr_out)
- set_bit(_IOC_NR(VIDIOC_ENUM_FMT), valid_ioctls);
- if (ops->vidioc_g_fmt_sdr_out)
- set_bit(_IOC_NR(VIDIOC_G_FMT), valid_ioctls);
- if (ops->vidioc_s_fmt_sdr_out)
- set_bit(_IOC_NR(VIDIOC_S_FMT), valid_ioctls);
- if (ops->vidioc_try_fmt_sdr_out)
- set_bit(_IOC_NR(VIDIOC_TRY_FMT), valid_ioctls);
+ SET_VALID_IOCTL(ops, VIDIOC_ENUM_FMT, vidioc_enum_fmt_sdr_out);
+ SET_VALID_IOCTL(ops, VIDIOC_G_FMT, vidioc_g_fmt_sdr_out);
+ SET_VALID_IOCTL(ops, VIDIOC_S_FMT, vidioc_s_fmt_sdr_out);
+ SET_VALID_IOCTL(ops, VIDIOC_TRY_FMT, vidioc_try_fmt_sdr_out);
}
- if (is_vid || is_vbi || is_sdr || is_tch) {
- /* ioctls valid for video, metadata, vbi or sdr */
+ if (is_vid || is_vbi || is_sdr || is_tch || is_meta) {
+ /* ioctls valid for video, vbi, sdr, touch and metadata */
SET_VALID_IOCTL(ops, VIDIOC_REQBUFS, vidioc_reqbufs);
SET_VALID_IOCTL(ops, VIDIOC_QUERYBUF, vidioc_querybuf);
SET_VALID_IOCTL(ops, VIDIOC_QBUF, vidioc_qbuf);
@@ -694,8 +717,8 @@ static void determine_valid_ioctls(struct video_device *vdev)
SET_VALID_IOCTL(ops, VIDIOC_STREAMOFF, vidioc_streamoff);
}
- if (is_vid || is_vbi || is_tch) {
- /* ioctls valid for video or vbi */
+ if (is_vid || is_vbi || is_meta) {
+ /* ioctls valid for video, vbi and metadata */
if (ops->vidioc_s_std)
set_bit(_IOC_NR(VIDIOC_ENUMSTD), valid_ioctls);
SET_VALID_IOCTL(ops, VIDIOC_S_STD, vidioc_s_std);
@@ -719,8 +742,7 @@ static void determine_valid_ioctls(struct video_device *vdev)
SET_VALID_IOCTL(ops, VIDIOC_G_AUDOUT, vidioc_g_audout);
SET_VALID_IOCTL(ops, VIDIOC_S_AUDOUT, vidioc_s_audout);
}
- if (ops->vidioc_g_parm || (vdev->vfl_type == VFL_TYPE_GRABBER &&
- ops->vidioc_g_std))
+ if (ops->vidioc_g_parm || ops->vidioc_g_std)
set_bit(_IOC_NR(VIDIOC_G_PARM), valid_ioctls);
SET_VALID_IOCTL(ops, VIDIOC_S_PARM, vidioc_s_parm);
SET_VALID_IOCTL(ops, VIDIOC_S_DV_TIMINGS, vidioc_s_dv_timings);
@@ -734,7 +756,7 @@ static void determine_valid_ioctls(struct video_device *vdev)
SET_VALID_IOCTL(ops, VIDIOC_G_MODULATOR, vidioc_g_modulator);
SET_VALID_IOCTL(ops, VIDIOC_S_MODULATOR, vidioc_s_modulator);
}
- if (is_rx) {
+ if (is_rx && !is_tch) {
/* receiver only ioctls */
SET_VALID_IOCTL(ops, VIDIOC_G_TUNER, vidioc_g_tuner);
SET_VALID_IOCTL(ops, VIDIOC_S_TUNER, vidioc_s_tuner);
diff --git a/drivers/media/v4l2-core/v4l2-dv-timings.c b/drivers/media/v4l2-core/v4l2-dv-timings.c
index 4f23e939ead0..230d65a64217 100644
--- a/drivers/media/v4l2-core/v4l2-dv-timings.c
+++ b/drivers/media/v4l2-core/v4l2-dv-timings.c
@@ -293,7 +293,7 @@ void v4l2_print_dv_timings(const char *dev_prefix, const char *prefix,
if (prefix == NULL)
prefix = "";
- pr_info("%s: %s%ux%u%s%u.%u (%ux%u)\n", dev_prefix, prefix,
+ pr_info("%s: %s%ux%u%s%u.%02u (%ux%u)\n", dev_prefix, prefix,
bt->width, bt->height, bt->interlaced ? "i" : "p",
fps / 100, fps % 100, htot, vtot);
@@ -757,7 +757,7 @@ bool v4l2_detect_gtf(unsigned frame_height,
pix_clk = pix_clk / GTF_PXL_CLK_GRAN * GTF_PXL_CLK_GRAN;
hsync = (frame_width * 8 + 50) / 100;
- hsync = ((hsync + GTF_CELL_GRAN / 2) / GTF_CELL_GRAN) * GTF_CELL_GRAN;
+ hsync = DIV_ROUND_CLOSEST(hsync, GTF_CELL_GRAN) * GTF_CELL_GRAN;
h_fp = h_blank / 2 - hsync;
diff --git a/drivers/media/v4l2-core/v4l2-fwnode.c b/drivers/media/v4l2-core/v4l2-fwnode.c
index 3bd1888787eb..192cac076761 100644
--- a/drivers/media/v4l2-core/v4l2-fwnode.c
+++ b/drivers/media/v4l2-core/v4l2-fwnode.c
@@ -512,6 +512,7 @@ void v4l2_fwnode_endpoint_free(struct v4l2_fwnode_endpoint *vep)
return;
kfree(vep->link_frequencies);
+ vep->link_frequencies = NULL;
}
EXPORT_SYMBOL_GPL(v4l2_fwnode_endpoint_free);
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
index 51b912743f0f..4e700583659b 100644
--- a/drivers/media/v4l2-core/v4l2-ioctl.c
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -932,12 +932,22 @@ static int check_ext_ctrls(struct v4l2_ext_controls *c, int allow_priv)
static int check_fmt(struct file *file, enum v4l2_buf_type type)
{
+ const u32 vid_caps = V4L2_CAP_VIDEO_CAPTURE |
+ V4L2_CAP_VIDEO_CAPTURE_MPLANE |
+ V4L2_CAP_VIDEO_OUTPUT |
+ V4L2_CAP_VIDEO_OUTPUT_MPLANE |
+ V4L2_CAP_VIDEO_M2M | V4L2_CAP_VIDEO_M2M_MPLANE;
+ const u32 meta_caps = V4L2_CAP_META_CAPTURE |
+ V4L2_CAP_META_OUTPUT;
struct video_device *vfd = video_devdata(file);
const struct v4l2_ioctl_ops *ops = vfd->ioctl_ops;
- bool is_vid = vfd->vfl_type == VFL_TYPE_GRABBER;
+ bool is_vid = vfd->vfl_type == VFL_TYPE_GRABBER &&
+ (vfd->device_caps & vid_caps);
bool is_vbi = vfd->vfl_type == VFL_TYPE_VBI;
bool is_sdr = vfd->vfl_type == VFL_TYPE_SDR;
bool is_tch = vfd->vfl_type == VFL_TYPE_TOUCH;
+ bool is_meta = vfd->vfl_type == VFL_TYPE_GRABBER &&
+ (vfd->device_caps & meta_caps);
bool is_rx = vfd->vfl_dir != VFL_DIR_TX;
bool is_tx = vfd->vfl_dir != VFL_DIR_RX;
@@ -996,11 +1006,11 @@ static int check_fmt(struct file *file, enum v4l2_buf_type type)
return 0;
break;
case V4L2_BUF_TYPE_META_CAPTURE:
- if (is_vid && is_rx && ops->vidioc_g_fmt_meta_cap)
+ if (is_meta && is_rx && ops->vidioc_g_fmt_meta_cap)
return 0;
break;
case V4L2_BUF_TYPE_META_OUTPUT:
- if (is_vid && is_tx && ops->vidioc_g_fmt_meta_out)
+ if (is_meta && is_tx && ops->vidioc_g_fmt_meta_out)
return 0;
break;
default:
@@ -1330,6 +1340,7 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt)
case V4L2_META_FMT_VSP1_HGT: descr = "R-Car VSP1 2-D Histogram"; break;
case V4L2_META_FMT_UVC: descr = "UVC Payload Header Metadata"; break;
case V4L2_META_FMT_D4XX: descr = "Intel D4xx UVC Metadata"; break;
+ case V4L2_META_FMT_VIVID: descr = "Vivid Metadata"; break;
default:
/* Compressed formats */
@@ -1356,6 +1367,7 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt)
case V4L2_PIX_FMT_VP8_FRAME: descr = "VP8 Frame"; break;
case V4L2_PIX_FMT_VP9: descr = "VP9"; break;
case V4L2_PIX_FMT_HEVC: descr = "HEVC"; break; /* aka H.265 */
+ case V4L2_PIX_FMT_HEVC_SLICE: descr = "HEVC Parsed Slice Data"; break;
case V4L2_PIX_FMT_FWHT: descr = "FWHT"; break; /* used in vicodec */
case V4L2_PIX_FMT_FWHT_STATELESS: descr = "FWHT Stateless"; break; /* used in vicodec */
case V4L2_PIX_FMT_CPIA1: descr = "GSPCA CPiA YUV"; break;
@@ -1466,10 +1478,26 @@ static int v4l_enum_fmt(const struct v4l2_ioctl_ops *ops,
return ret;
}
+static void v4l_pix_format_touch(struct v4l2_pix_format *p)
+{
+ /*
+ * The v4l2_pix_format structure contains fields that make no sense for
+ * touch. Set them to default values in this case.
+ */
+
+ p->field = V4L2_FIELD_NONE;
+ p->colorspace = V4L2_COLORSPACE_RAW;
+ p->flags = 0;
+ p->ycbcr_enc = 0;
+ p->quantization = 0;
+ p->xfer_func = 0;
+}
+
static int v4l_g_fmt(const struct v4l2_ioctl_ops *ops,
struct file *file, void *fh, void *arg)
{
struct v4l2_format *p = arg;
+ struct video_device *vfd = video_devdata(file);
int ret = check_fmt(file, p->type);
if (ret)
@@ -1507,6 +1535,8 @@ static int v4l_g_fmt(const struct v4l2_ioctl_ops *ops,
ret = ops->vidioc_g_fmt_vid_cap(file, fh, arg);
/* just in case the driver zeroed it again */
p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC;
+ if (vfd->vfl_type == VFL_TYPE_TOUCH)
+ v4l_pix_format_touch(&p->fmt.pix);
return ret;
case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
return ops->vidioc_g_fmt_vid_cap_mplane(file, fh, arg);
@@ -1544,21 +1574,6 @@ static int v4l_g_fmt(const struct v4l2_ioctl_ops *ops,
return -EINVAL;
}
-static void v4l_pix_format_touch(struct v4l2_pix_format *p)
-{
- /*
- * The v4l2_pix_format structure contains fields that make no sense for
- * touch. Set them to default values in this case.
- */
-
- p->field = V4L2_FIELD_NONE;
- p->colorspace = V4L2_COLORSPACE_RAW;
- p->flags = 0;
- p->ycbcr_enc = 0;
- p->quantization = 0;
- p->xfer_func = 0;
-}
-
static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops,
struct file *file, void *fh, void *arg)
{
@@ -1602,12 +1617,12 @@ static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops,
case V4L2_BUF_TYPE_VBI_CAPTURE:
if (unlikely(!ops->vidioc_s_fmt_vbi_cap))
break;
- CLEAR_AFTER_FIELD(p, fmt.vbi);
+ CLEAR_AFTER_FIELD(p, fmt.vbi.flags);
return ops->vidioc_s_fmt_vbi_cap(file, fh, arg);
case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
if (unlikely(!ops->vidioc_s_fmt_sliced_vbi_cap))
break;
- CLEAR_AFTER_FIELD(p, fmt.sliced);
+ CLEAR_AFTER_FIELD(p, fmt.sliced.io_size);
return ops->vidioc_s_fmt_sliced_vbi_cap(file, fh, arg);
case V4L2_BUF_TYPE_VIDEO_OUTPUT:
if (unlikely(!ops->vidioc_s_fmt_vid_out))
@@ -1633,22 +1648,22 @@ static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops,
case V4L2_BUF_TYPE_VBI_OUTPUT:
if (unlikely(!ops->vidioc_s_fmt_vbi_out))
break;
- CLEAR_AFTER_FIELD(p, fmt.vbi);
+ CLEAR_AFTER_FIELD(p, fmt.vbi.flags);
return ops->vidioc_s_fmt_vbi_out(file, fh, arg);
case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
if (unlikely(!ops->vidioc_s_fmt_sliced_vbi_out))
break;
- CLEAR_AFTER_FIELD(p, fmt.sliced);
+ CLEAR_AFTER_FIELD(p, fmt.sliced.io_size);
return ops->vidioc_s_fmt_sliced_vbi_out(file, fh, arg);
case V4L2_BUF_TYPE_SDR_CAPTURE:
if (unlikely(!ops->vidioc_s_fmt_sdr_cap))
break;
- CLEAR_AFTER_FIELD(p, fmt.sdr);
+ CLEAR_AFTER_FIELD(p, fmt.sdr.buffersize);
return ops->vidioc_s_fmt_sdr_cap(file, fh, arg);
case V4L2_BUF_TYPE_SDR_OUTPUT:
if (unlikely(!ops->vidioc_s_fmt_sdr_out))
break;
- CLEAR_AFTER_FIELD(p, fmt.sdr);
+ CLEAR_AFTER_FIELD(p, fmt.sdr.buffersize);
return ops->vidioc_s_fmt_sdr_out(file, fh, arg);
case V4L2_BUF_TYPE_META_CAPTURE:
if (unlikely(!ops->vidioc_s_fmt_meta_cap))
@@ -1704,12 +1719,12 @@ static int v4l_try_fmt(const struct v4l2_ioctl_ops *ops,
case V4L2_BUF_TYPE_VBI_CAPTURE:
if (unlikely(!ops->vidioc_try_fmt_vbi_cap))
break;
- CLEAR_AFTER_FIELD(p, fmt.vbi);
+ CLEAR_AFTER_FIELD(p, fmt.vbi.flags);
return ops->vidioc_try_fmt_vbi_cap(file, fh, arg);
case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
if (unlikely(!ops->vidioc_try_fmt_sliced_vbi_cap))
break;
- CLEAR_AFTER_FIELD(p, fmt.sliced);
+ CLEAR_AFTER_FIELD(p, fmt.sliced.io_size);
return ops->vidioc_try_fmt_sliced_vbi_cap(file, fh, arg);
case V4L2_BUF_TYPE_VIDEO_OUTPUT:
if (unlikely(!ops->vidioc_try_fmt_vid_out))
@@ -1735,22 +1750,22 @@ static int v4l_try_fmt(const struct v4l2_ioctl_ops *ops,
case V4L2_BUF_TYPE_VBI_OUTPUT:
if (unlikely(!ops->vidioc_try_fmt_vbi_out))
break;
- CLEAR_AFTER_FIELD(p, fmt.vbi);
+ CLEAR_AFTER_FIELD(p, fmt.vbi.flags);
return ops->vidioc_try_fmt_vbi_out(file, fh, arg);
case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
if (unlikely(!ops->vidioc_try_fmt_sliced_vbi_out))
break;
- CLEAR_AFTER_FIELD(p, fmt.sliced);
+ CLEAR_AFTER_FIELD(p, fmt.sliced.io_size);
return ops->vidioc_try_fmt_sliced_vbi_out(file, fh, arg);
case V4L2_BUF_TYPE_SDR_CAPTURE:
if (unlikely(!ops->vidioc_try_fmt_sdr_cap))
break;
- CLEAR_AFTER_FIELD(p, fmt.sdr);
+ CLEAR_AFTER_FIELD(p, fmt.sdr.buffersize);
return ops->vidioc_try_fmt_sdr_cap(file, fh, arg);
case V4L2_BUF_TYPE_SDR_OUTPUT:
if (unlikely(!ops->vidioc_try_fmt_sdr_out))
break;
- CLEAR_AFTER_FIELD(p, fmt.sdr);
+ CLEAR_AFTER_FIELD(p, fmt.sdr.buffersize);
return ops->vidioc_try_fmt_sdr_out(file, fh, arg);
case V4L2_BUF_TYPE_META_CAPTURE:
if (unlikely(!ops->vidioc_try_fmt_meta_cap))
@@ -2637,7 +2652,7 @@ struct v4l2_ioctl_info {
/* Zero struct from after the field to the end */
#define INFO_FL_CLEAR(v4l2_struct, field) \
((offsetof(struct v4l2_struct, field) + \
- sizeof(((struct v4l2_struct *)0)->field)) << 16)
+ FIELD_SIZEOF(struct v4l2_struct, field)) << 16)
#define INFO_FL_CLEAR_MASK (_IOC_SIZEMASK << 16)
#define DEFINE_V4L_STUB_FUNC(_vidioc) \
diff --git a/drivers/media/v4l2-core/v4l2-mem2mem.c b/drivers/media/v4l2-core/v4l2-mem2mem.c
index 19937dd3c6f6..1afd9c6ad908 100644
--- a/drivers/media/v4l2-core/v4l2-mem2mem.c
+++ b/drivers/media/v4l2-core/v4l2-mem2mem.c
@@ -284,7 +284,8 @@ static void v4l2_m2m_try_run(struct v4l2_m2m_dev *m2m_dev)
static void __v4l2_m2m_try_queue(struct v4l2_m2m_dev *m2m_dev,
struct v4l2_m2m_ctx *m2m_ctx)
{
- unsigned long flags_job, flags_out, flags_cap;
+ unsigned long flags_job;
+ struct vb2_v4l2_buffer *dst, *src;
dprintk("Trying to schedule a job for m2m_ctx: %p\n", m2m_ctx);
@@ -307,20 +308,37 @@ static void __v4l2_m2m_try_queue(struct v4l2_m2m_dev *m2m_dev,
goto job_unlock;
}
- spin_lock_irqsave(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out);
- if (list_empty(&m2m_ctx->out_q_ctx.rdy_queue)
- && !m2m_ctx->out_q_ctx.buffered) {
+ src = v4l2_m2m_next_src_buf(m2m_ctx);
+ dst = v4l2_m2m_next_dst_buf(m2m_ctx);
+ if (!src && !m2m_ctx->out_q_ctx.buffered) {
dprintk("No input buffers available\n");
- goto out_unlock;
+ goto job_unlock;
}
- spin_lock_irqsave(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap);
- if (list_empty(&m2m_ctx->cap_q_ctx.rdy_queue)
- && !m2m_ctx->cap_q_ctx.buffered) {
+ if (!dst && !m2m_ctx->cap_q_ctx.buffered) {
dprintk("No output buffers available\n");
- goto cap_unlock;
+ goto job_unlock;
}
- spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap);
- spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out);
+
+ m2m_ctx->new_frame = true;
+
+ if (src && dst && dst->is_held &&
+ dst->vb2_buf.copied_timestamp &&
+ dst->vb2_buf.timestamp != src->vb2_buf.timestamp) {
+ dst->is_held = false;
+ v4l2_m2m_dst_buf_remove(m2m_ctx);
+ v4l2_m2m_buf_done(dst, VB2_BUF_STATE_DONE);
+ dst = v4l2_m2m_next_dst_buf(m2m_ctx);
+
+ if (!dst && !m2m_ctx->cap_q_ctx.buffered) {
+ dprintk("No output buffers available after returning held buffer\n");
+ goto job_unlock;
+ }
+ }
+
+ if (src && dst && (m2m_ctx->out_q_ctx.q.subsystem_flags &
+ VB2_V4L2_FL_SUPPORTS_M2M_HOLD_CAPTURE_BUF))
+ m2m_ctx->new_frame = !dst->vb2_buf.copied_timestamp ||
+ dst->vb2_buf.timestamp != src->vb2_buf.timestamp;
if (m2m_dev->m2m_ops->job_ready
&& (!m2m_dev->m2m_ops->job_ready(m2m_ctx->priv))) {
@@ -331,13 +349,6 @@ static void __v4l2_m2m_try_queue(struct v4l2_m2m_dev *m2m_dev,
list_add_tail(&m2m_ctx->queue, &m2m_dev->job_queue);
m2m_ctx->job_flags |= TRANS_QUEUED;
- spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
- return;
-
-cap_unlock:
- spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap);
-out_unlock:
- spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out);
job_unlock:
spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
}
@@ -412,37 +423,97 @@ static void v4l2_m2m_cancel_job(struct v4l2_m2m_ctx *m2m_ctx)
}
}
-void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev,
- struct v4l2_m2m_ctx *m2m_ctx)
+/*
+ * Schedule the next job, called from v4l2_m2m_job_finish() or
+ * v4l2_m2m_buf_done_and_job_finish().
+ */
+static void v4l2_m2m_schedule_next_job(struct v4l2_m2m_dev *m2m_dev,
+ struct v4l2_m2m_ctx *m2m_ctx)
{
- unsigned long flags;
+ /*
+ * This instance might have more buffers ready, but since we do not
+ * allow more than one job on the job_queue per instance, each has
+ * to be scheduled separately after the previous one finishes.
+ */
+ __v4l2_m2m_try_queue(m2m_dev, m2m_ctx);
- spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
+ /*
+ * We might be running in atomic context,
+ * but the job must be run in non-atomic context.
+ */
+ schedule_work(&m2m_dev->job_work);
+}
+
+/*
+ * Assumes job_spinlock is held, called from v4l2_m2m_job_finish() or
+ * v4l2_m2m_buf_done_and_job_finish().
+ */
+static bool _v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev,
+ struct v4l2_m2m_ctx *m2m_ctx)
+{
if (!m2m_dev->curr_ctx || m2m_dev->curr_ctx != m2m_ctx) {
- spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
dprintk("Called by an instance not currently running\n");
- return;
+ return false;
}
list_del(&m2m_dev->curr_ctx->queue);
m2m_dev->curr_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING);
wake_up(&m2m_dev->curr_ctx->finished);
m2m_dev->curr_ctx = NULL;
+ return true;
+}
- spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
-
- /* This instance might have more buffers ready, but since we do not
- * allow more than one job on the job_queue per instance, each has
- * to be scheduled separately after the previous one finishes. */
- __v4l2_m2m_try_queue(m2m_dev, m2m_ctx);
+void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev,
+ struct v4l2_m2m_ctx *m2m_ctx)
+{
+ unsigned long flags;
+ bool schedule_next;
- /* We might be running in atomic context,
- * but the job must be run in non-atomic context.
+ /*
+ * This function should not be used for drivers that support
+ * holding capture buffers. Those should use
+ * v4l2_m2m_buf_done_and_job_finish() instead.
*/
- schedule_work(&m2m_dev->job_work);
+ WARN_ON(m2m_ctx->out_q_ctx.q.subsystem_flags &
+ VB2_V4L2_FL_SUPPORTS_M2M_HOLD_CAPTURE_BUF);
+ spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
+ schedule_next = _v4l2_m2m_job_finish(m2m_dev, m2m_ctx);
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
+
+ if (schedule_next)
+ v4l2_m2m_schedule_next_job(m2m_dev, m2m_ctx);
}
EXPORT_SYMBOL(v4l2_m2m_job_finish);
+void v4l2_m2m_buf_done_and_job_finish(struct v4l2_m2m_dev *m2m_dev,
+ struct v4l2_m2m_ctx *m2m_ctx,
+ enum vb2_buffer_state state)
+{
+ struct vb2_v4l2_buffer *src_buf, *dst_buf;
+ bool schedule_next = false;
+ unsigned long flags;
+
+ spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
+ src_buf = v4l2_m2m_src_buf_remove(m2m_ctx);
+ dst_buf = v4l2_m2m_next_dst_buf(m2m_ctx);
+
+ if (WARN_ON(!src_buf || !dst_buf))
+ goto unlock;
+ v4l2_m2m_buf_done(src_buf, state);
+ dst_buf->is_held = src_buf->flags & V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF;
+ if (!dst_buf->is_held) {
+ v4l2_m2m_dst_buf_remove(m2m_ctx);
+ v4l2_m2m_buf_done(dst_buf, state);
+ }
+ schedule_next = _v4l2_m2m_job_finish(m2m_dev, m2m_ctx);
+unlock:
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
+
+ if (schedule_next)
+ v4l2_m2m_schedule_next_job(m2m_dev, m2m_ctx);
+}
+EXPORT_SYMBOL(v4l2_m2m_buf_done_and_job_finish);
+
int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
struct v4l2_requestbuffers *reqbufs)
{
@@ -1154,6 +1225,59 @@ int v4l2_m2m_ioctl_try_decoder_cmd(struct file *file, void *fh,
}
EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_try_decoder_cmd);
+int v4l2_m2m_ioctl_stateless_try_decoder_cmd(struct file *file, void *fh,
+ struct v4l2_decoder_cmd *dc)
+{
+ if (dc->cmd != V4L2_DEC_CMD_FLUSH)
+ return -EINVAL;
+
+ dc->flags = 0;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_stateless_try_decoder_cmd);
+
+int v4l2_m2m_ioctl_stateless_decoder_cmd(struct file *file, void *priv,
+ struct v4l2_decoder_cmd *dc)
+{
+ struct v4l2_fh *fh = file->private_data;
+ struct vb2_v4l2_buffer *out_vb, *cap_vb;
+ struct v4l2_m2m_dev *m2m_dev = fh->m2m_ctx->m2m_dev;
+ unsigned long flags;
+ int ret;
+
+ ret = v4l2_m2m_ioctl_stateless_try_decoder_cmd(file, priv, dc);
+ if (ret < 0)
+ return ret;
+
+ spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
+ out_vb = v4l2_m2m_last_src_buf(fh->m2m_ctx);
+ cap_vb = v4l2_m2m_last_dst_buf(fh->m2m_ctx);
+
+ /*
+ * If there is an out buffer pending, then clear any HOLD flag.
+ *
+ * By clearing this flag we ensure that when this output
+ * buffer is processed any held capture buffer will be released.
+ */
+ if (out_vb) {
+ out_vb->flags &= ~V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF;
+ } else if (cap_vb && cap_vb->is_held) {
+ /*
+ * If there were no output buffers, but there is a
+ * capture buffer that is held, then release that
+ * buffer.
+ */
+ cap_vb->is_held = false;
+ v4l2_m2m_dst_buf_remove(fh->m2m_ctx);
+ v4l2_m2m_buf_done(cap_vb, VB2_BUF_STATE_DONE);
+ }
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_stateless_decoder_cmd);
+
/*
* v4l2_file_operations helpers. It is assumed here same lock is used
* for the output and the capture buffer queue.
diff --git a/drivers/media/v4l2-core/v4l2-subdev.c b/drivers/media/v4l2-core/v4l2-subdev.c
index f725cd9b66b9..9e987c0f840e 100644
--- a/drivers/media/v4l2-core/v4l2-subdev.c
+++ b/drivers/media/v4l2-core/v4l2-subdev.c
@@ -112,7 +112,7 @@ static int subdev_close(struct file *file)
return 0;
}
-static inline int check_which(__u32 which)
+static inline int check_which(u32 which)
{
if (which != V4L2_SUBDEV_FORMAT_TRY &&
which != V4L2_SUBDEV_FORMAT_ACTIVE)
@@ -121,7 +121,7 @@ static inline int check_which(__u32 which)
return 0;
}
-static inline int check_pad(struct v4l2_subdev *sd, __u32 pad)
+static inline int check_pad(struct v4l2_subdev *sd, u32 pad)
{
#if defined(CONFIG_MEDIA_CONTROLLER)
if (sd->entity.num_pads) {
@@ -136,7 +136,7 @@ static inline int check_pad(struct v4l2_subdev *sd, __u32 pad)
return 0;
}
-static int check_cfg(__u32 which, struct v4l2_subdev_pad_config *cfg)
+static int check_cfg(u32 which, struct v4l2_subdev_pad_config *cfg)
{
if (which == V4L2_SUBDEV_FORMAT_TRY && !cfg)
return -EINVAL;
diff --git a/drivers/memory/mtk-smi.c b/drivers/memory/mtk-smi.c
index 439d7d886873..a113e811faab 100644
--- a/drivers/memory/mtk-smi.c
+++ b/drivers/memory/mtk-smi.c
@@ -366,6 +366,8 @@ static int __maybe_unused mtk_smi_larb_suspend(struct device *dev)
static const struct dev_pm_ops smi_larb_pm_ops = {
SET_RUNTIME_PM_OPS(mtk_smi_larb_suspend, mtk_smi_larb_resume, NULL)
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
};
static struct platform_driver mtk_smi_larb_driver = {
@@ -507,6 +509,8 @@ static int __maybe_unused mtk_smi_common_suspend(struct device *dev)
static const struct dev_pm_ops smi_common_pm_ops = {
SET_RUNTIME_PM_OPS(mtk_smi_common_suspend, mtk_smi_common_resume, NULL)
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
};
static struct platform_driver mtk_smi_common_driver = {
diff --git a/drivers/memstick/core/Kconfig b/drivers/memstick/core/Kconfig
index 516f454fde14..08192fd70eb4 100644
--- a/drivers/memstick/core/Kconfig
+++ b/drivers/memstick/core/Kconfig
@@ -6,16 +6,16 @@
comment "MemoryStick drivers"
config MEMSTICK_UNSAFE_RESUME
- bool "Allow unsafe resume (DANGEROUS)"
- help
- If you say Y here, the MemoryStick layer will assume that all
- cards stayed in their respective slots during the suspend. The
- normal behaviour is to remove them at suspend and
- redetecting them at resume. Breaking this assumption will
- in most cases result in data corruption.
+ bool "Allow unsafe resume (DANGEROUS)"
+ help
+ If you say Y here, the MemoryStick layer will assume that all
+ cards stayed in their respective slots during the suspend. The
+ normal behaviour is to remove them at suspend and
+ redetecting them at resume. Breaking this assumption will
+ in most cases result in data corruption.
- This option is usually just for embedded systems which use
- a MemoryStick card for rootfs. Most people should say N here.
+ This option is usually just for embedded systems which use
+ a MemoryStick card for rootfs. Most people should say N here.
config MSPRO_BLOCK
tristate "MemoryStick Pro block device driver"
diff --git a/drivers/memstick/host/Kconfig b/drivers/memstick/host/Kconfig
index 446c93ecef8f..4113343da056 100644
--- a/drivers/memstick/host/Kconfig
+++ b/drivers/memstick/host/Kconfig
@@ -18,7 +18,7 @@ config MEMSTICK_TIFM_MS
'Misc devices: TI Flash Media PCI74xx/PCI76xx host adapter support
(TIFM_7XX1)'.
- To compile this driver as a module, choose M here: the
+ To compile this driver as a module, choose M here: the
module will be called tifm_ms.
config MEMSTICK_JMICRON_38X
@@ -29,7 +29,7 @@ config MEMSTICK_JMICRON_38X
Say Y here if you want to be able to access MemoryStick cards with
the JMicron(R) JMB38X MemoryStick card reader.
- To compile this driver as a module, choose M here: the
+ To compile this driver as a module, choose M here: the
module will be called jmb38x_ms.
config MEMSTICK_R592
diff --git a/drivers/memstick/host/jmb38x_ms.c b/drivers/memstick/host/jmb38x_ms.c
index 64fff6abe60e..0a9c5ddf2f59 100644
--- a/drivers/memstick/host/jmb38x_ms.c
+++ b/drivers/memstick/host/jmb38x_ms.c
@@ -433,13 +433,13 @@ static int jmb38x_ms_issue_cmd(struct memstick_host *msh)
writel(((1 << 16) & BLOCK_COUNT_MASK)
| (data_len & BLOCK_SIZE_MASK),
host->addr + BLOCK);
- t_val = readl(host->addr + INT_STATUS_ENABLE);
- t_val |= host->req->data_dir == READ
- ? INT_STATUS_FIFO_RRDY
- : INT_STATUS_FIFO_WRDY;
+ t_val = readl(host->addr + INT_STATUS_ENABLE);
+ t_val |= host->req->data_dir == READ
+ ? INT_STATUS_FIFO_RRDY
+ : INT_STATUS_FIFO_WRDY;
- writel(t_val, host->addr + INT_STATUS_ENABLE);
- writel(t_val, host->addr + INT_SIGNAL_ENABLE);
+ writel(t_val, host->addr + INT_STATUS_ENABLE);
+ writel(t_val, host->addr + INT_SIGNAL_ENABLE);
} else {
cmd &= ~(TPC_DATA_SEL | 0xf);
host->cmd_flags |= REG_DATA;
@@ -848,7 +848,7 @@ static int jmb38x_ms_count_slots(struct pci_dev *pdev)
{
int cnt, rc = 0;
- for (cnt = 0; cnt < PCI_ROM_RESOURCE; ++cnt) {
+ for (cnt = 0; cnt < PCI_STD_NUM_BARS; ++cnt) {
if (!(IORESOURCE_MEM & pci_resource_flags(pdev, cnt)))
break;
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index ae24d3ea68ea..420900852166 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -1210,13 +1210,6 @@ config AB8500_DEBUG
Select this option if you want debug information using the debug
filesystem, debugfs.
-config AB8500_GPADC
- bool "ST-Ericsson AB8500 GPADC driver"
- depends on AB8500_CORE && REGULATOR_AB8500
- default y
- help
- AB8500 GPADC driver used to convert Acc and battery/ac/usb voltage
-
config MFD_DB8500_PRCMU
bool "ST-Ericsson DB8500 Power Reset Control Management Unit"
depends on UX500_SOC_DB8500
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index c1067ea46204..aed99f08739f 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -177,7 +177,6 @@ obj-$(CONFIG_ABX500_CORE) += abx500-core.o
obj-$(CONFIG_AB3100_CORE) += ab3100-core.o
obj-$(CONFIG_AB3100_OTP) += ab3100-otp.o
obj-$(CONFIG_AB8500_DEBUG) += ab8500-debugfs.o
-obj-$(CONFIG_AB8500_GPADC) += ab8500-gpadc.o
obj-$(CONFIG_MFD_DB8500_PRCMU) += db8500-prcmu.o
# ab8500-core need to come after db8500-prcmu (which provides the channel)
obj-$(CONFIG_AB8500_CORE) += ab8500-core.o ab8500-sysctrl.o
diff --git a/drivers/mfd/ab8500-core.c b/drivers/mfd/ab8500-core.c
index 3e9dc92cb467..bafc729fc434 100644
--- a/drivers/mfd/ab8500-core.c
+++ b/drivers/mfd/ab8500-core.c
@@ -610,107 +610,53 @@ int ab8500_suspend(struct ab8500 *ab8500)
}
static const struct mfd_cell ab8500_bm_devs[] = {
- {
- .name = "ab8500-charger",
- .of_compatible = "stericsson,ab8500-charger",
- .platform_data = &ab8500_bm_data,
- .pdata_size = sizeof(ab8500_bm_data),
- },
- {
- .name = "ab8500-btemp",
- .of_compatible = "stericsson,ab8500-btemp",
- .platform_data = &ab8500_bm_data,
- .pdata_size = sizeof(ab8500_bm_data),
- },
- {
- .name = "ab8500-fg",
- .of_compatible = "stericsson,ab8500-fg",
- .platform_data = &ab8500_bm_data,
- .pdata_size = sizeof(ab8500_bm_data),
- },
- {
- .name = "ab8500-chargalg",
- .of_compatible = "stericsson,ab8500-chargalg",
- .platform_data = &ab8500_bm_data,
- .pdata_size = sizeof(ab8500_bm_data),
- },
+ OF_MFD_CELL("ab8500-charger", NULL, &ab8500_bm_data,
+ sizeof(ab8500_bm_data), 0, "stericsson,ab8500-charger"),
+ OF_MFD_CELL("ab8500-btemp", NULL, &ab8500_bm_data,
+ sizeof(ab8500_bm_data), 0, "stericsson,ab8500-btemp"),
+ OF_MFD_CELL("ab8500-fg", NULL, &ab8500_bm_data,
+ sizeof(ab8500_bm_data), 0, "stericsson,ab8500-fg"),
+ OF_MFD_CELL("ab8500-chargalg", NULL, &ab8500_bm_data,
+ sizeof(ab8500_bm_data), 0, "stericsson,ab8500-chargalg"),
};
static const struct mfd_cell ab8500_devs[] = {
#ifdef CONFIG_DEBUG_FS
- {
- .name = "ab8500-debug",
- .of_compatible = "stericsson,ab8500-debug",
- },
+ OF_MFD_CELL("ab8500-debug",
+ NULL, NULL, 0, 0, "stericsson,ab8500-debug"),
#endif
- {
- .name = "ab8500-sysctrl",
- .of_compatible = "stericsson,ab8500-sysctrl",
- },
- {
- .name = "ab8500-ext-regulator",
- .of_compatible = "stericsson,ab8500-ext-regulator",
- },
- {
- .name = "ab8500-regulator",
- .of_compatible = "stericsson,ab8500-regulator",
- },
- {
- .name = "ab8500-clk",
- .of_compatible = "stericsson,ab8500-clk",
- },
- {
- .name = "ab8500-gpadc",
- .of_compatible = "stericsson,ab8500-gpadc",
- },
- {
- .name = "ab8500-rtc",
- .of_compatible = "stericsson,ab8500-rtc",
- },
- {
- .name = "ab8500-acc-det",
- .of_compatible = "stericsson,ab8500-acc-det",
- },
- {
-
- .name = "ab8500-poweron-key",
- .of_compatible = "stericsson,ab8500-poweron-key",
- },
- {
- .name = "ab8500-pwm",
- .of_compatible = "stericsson,ab8500-pwm",
- .id = 1,
- },
- {
- .name = "ab8500-pwm",
- .of_compatible = "stericsson,ab8500-pwm",
- .id = 2,
- },
- {
- .name = "ab8500-pwm",
- .of_compatible = "stericsson,ab8500-pwm",
- .id = 3,
- },
- {
- .name = "ab8500-denc",
- .of_compatible = "stericsson,ab8500-denc",
- },
- {
- .name = "pinctrl-ab8500",
- .of_compatible = "stericsson,ab8500-gpio",
- },
- {
- .name = "abx500-temp",
- .of_compatible = "stericsson,abx500-temp",
- },
- {
- .name = "ab8500-usb",
- .of_compatible = "stericsson,ab8500-usb",
- },
- {
- .name = "ab8500-codec",
- .of_compatible = "stericsson,ab8500-codec",
- },
+ OF_MFD_CELL("ab8500-sysctrl",
+ NULL, NULL, 0, 0, "stericsson,ab8500-sysctrl"),
+ OF_MFD_CELL("ab8500-ext-regulator",
+ NULL, NULL, 0, 0, "stericsson,ab8500-ext-regulator"),
+ OF_MFD_CELL("ab8500-regulator",
+ NULL, NULL, 0, 0, "stericsson,ab8500-regulator"),
+ OF_MFD_CELL("abx500-clk",
+ NULL, NULL, 0, 0, "stericsson,abx500-clk"),
+ OF_MFD_CELL("ab8500-gpadc",
+ NULL, NULL, 0, 0, "stericsson,ab8500-gpadc"),
+ OF_MFD_CELL("ab8500-rtc",
+ NULL, NULL, 0, 0, "stericsson,ab8500-rtc"),
+ OF_MFD_CELL("ab8500-acc-det",
+ NULL, NULL, 0, 0, "stericsson,ab8500-acc-det"),
+ OF_MFD_CELL("ab8500-poweron-key",
+ NULL, NULL, 0, 0, "stericsson,ab8500-poweron-key"),
+ OF_MFD_CELL("ab8500-pwm",
+ NULL, NULL, 0, 1, "stericsson,ab8500-pwm"),
+ OF_MFD_CELL("ab8500-pwm",
+ NULL, NULL, 0, 2, "stericsson,ab8500-pwm"),
+ OF_MFD_CELL("ab8500-pwm",
+ NULL, NULL, 0, 3, "stericsson,ab8500-pwm"),
+ OF_MFD_CELL("ab8500-denc",
+ NULL, NULL, 0, 0, "stericsson,ab8500-denc"),
+ OF_MFD_CELL("pinctrl-ab8500",
+ NULL, NULL, 0, 0, "stericsson,ab8500-gpio"),
+ OF_MFD_CELL("abx500-temp",
+ NULL, NULL, 0, 0, "stericsson,abx500-temp"),
+ OF_MFD_CELL("ab8500-usb",
+ NULL, NULL, 0, 0, "stericsson,ab8500-usb"),
+ OF_MFD_CELL("ab8500-codec",
+ NULL, NULL, 0, 0, "stericsson,ab8500-codec"),
};
static const struct mfd_cell ab9540_devs[] = {
diff --git a/drivers/mfd/ab8500-debugfs.c b/drivers/mfd/ab8500-debugfs.c
index f4e26b6e5362..1a9a3414d4fa 100644
--- a/drivers/mfd/ab8500-debugfs.c
+++ b/drivers/mfd/ab8500-debugfs.c
@@ -84,7 +84,6 @@
#include <linux/mfd/abx500.h>
#include <linux/mfd/abx500/ab8500.h>
-#include <linux/mfd/abx500/ab8500-gpadc.h>
#ifdef CONFIG_DEBUG_FS
#include <linux/string.h>
@@ -103,11 +102,6 @@ static int num_irqs;
static struct device_attribute **dev_attr;
static char **event_name;
-static u8 avg_sample = SAMPLE_16;
-static u8 trig_edge = RISING_EDGE;
-static u8 conv_type = ADC_SW;
-static u8 trig_timer;
-
/**
* struct ab8500_reg_range
* @first: the first address of the range
@@ -152,7 +146,6 @@ static struct hwreg_cfg hwreg_cfg = {
};
#define AB8500_NAME_STRING "ab8500"
-#define AB8500_ADC_NAME_STRING "gpadc"
#define AB8500_NUM_BANKS AB8500_DEBUG_FIELD_LAST
#define AB8500_REV_REG 0x80
@@ -1646,633 +1639,6 @@ report_write_failure:
DEFINE_SHOW_ATTRIBUTE(ab8500_modem);
-static int ab8500_gpadc_bat_ctrl_show(struct seq_file *s, void *p)
-{
- int bat_ctrl_raw;
- int bat_ctrl_convert;
- struct ab8500_gpadc *gpadc;
-
- gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
- bat_ctrl_raw = ab8500_gpadc_read_raw(gpadc, BAT_CTRL,
- avg_sample, trig_edge, trig_timer, conv_type);
- bat_ctrl_convert = ab8500_gpadc_ad_to_voltage(gpadc,
- BAT_CTRL, bat_ctrl_raw);
-
- seq_printf(s, "%d,0x%X\n", bat_ctrl_convert, bat_ctrl_raw);
-
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_bat_ctrl);
-
-static int ab8500_gpadc_btemp_ball_show(struct seq_file *s, void *p)
-{
- int btemp_ball_raw;
- int btemp_ball_convert;
- struct ab8500_gpadc *gpadc;
-
- gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
- btemp_ball_raw = ab8500_gpadc_read_raw(gpadc, BTEMP_BALL,
- avg_sample, trig_edge, trig_timer, conv_type);
- btemp_ball_convert = ab8500_gpadc_ad_to_voltage(gpadc, BTEMP_BALL,
- btemp_ball_raw);
-
- seq_printf(s, "%d,0x%X\n", btemp_ball_convert, btemp_ball_raw);
-
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_btemp_ball);
-
-static int ab8500_gpadc_main_charger_v_show(struct seq_file *s, void *p)
-{
- int main_charger_v_raw;
- int main_charger_v_convert;
- struct ab8500_gpadc *gpadc;
-
- gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
- main_charger_v_raw = ab8500_gpadc_read_raw(gpadc, MAIN_CHARGER_V,
- avg_sample, trig_edge, trig_timer, conv_type);
- main_charger_v_convert = ab8500_gpadc_ad_to_voltage(gpadc,
- MAIN_CHARGER_V, main_charger_v_raw);
-
- seq_printf(s, "%d,0x%X\n", main_charger_v_convert, main_charger_v_raw);
-
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_main_charger_v);
-
-static int ab8500_gpadc_acc_detect1_show(struct seq_file *s, void *p)
-{
- int acc_detect1_raw;
- int acc_detect1_convert;
- struct ab8500_gpadc *gpadc;
-
- gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
- acc_detect1_raw = ab8500_gpadc_read_raw(gpadc, ACC_DETECT1,
- avg_sample, trig_edge, trig_timer, conv_type);
- acc_detect1_convert = ab8500_gpadc_ad_to_voltage(gpadc, ACC_DETECT1,
- acc_detect1_raw);
-
- seq_printf(s, "%d,0x%X\n", acc_detect1_convert, acc_detect1_raw);
-
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_acc_detect1);
-
-static int ab8500_gpadc_acc_detect2_show(struct seq_file *s, void *p)
-{
- int acc_detect2_raw;
- int acc_detect2_convert;
- struct ab8500_gpadc *gpadc;
-
- gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
- acc_detect2_raw = ab8500_gpadc_read_raw(gpadc, ACC_DETECT2,
- avg_sample, trig_edge, trig_timer, conv_type);
- acc_detect2_convert = ab8500_gpadc_ad_to_voltage(gpadc,
- ACC_DETECT2, acc_detect2_raw);
-
- seq_printf(s, "%d,0x%X\n", acc_detect2_convert, acc_detect2_raw);
-
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_acc_detect2);
-
-static int ab8500_gpadc_aux1_show(struct seq_file *s, void *p)
-{
- int aux1_raw;
- int aux1_convert;
- struct ab8500_gpadc *gpadc;
-
- gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
- aux1_raw = ab8500_gpadc_read_raw(gpadc, ADC_AUX1,
- avg_sample, trig_edge, trig_timer, conv_type);
- aux1_convert = ab8500_gpadc_ad_to_voltage(gpadc, ADC_AUX1,
- aux1_raw);
-
- seq_printf(s, "%d,0x%X\n", aux1_convert, aux1_raw);
-
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_aux1);
-
-static int ab8500_gpadc_aux2_show(struct seq_file *s, void *p)
-{
- int aux2_raw;
- int aux2_convert;
- struct ab8500_gpadc *gpadc;
-
- gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
- aux2_raw = ab8500_gpadc_read_raw(gpadc, ADC_AUX2,
- avg_sample, trig_edge, trig_timer, conv_type);
- aux2_convert = ab8500_gpadc_ad_to_voltage(gpadc, ADC_AUX2,
- aux2_raw);
-
- seq_printf(s, "%d,0x%X\n", aux2_convert, aux2_raw);
-
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_aux2);
-
-static int ab8500_gpadc_main_bat_v_show(struct seq_file *s, void *p)
-{
- int main_bat_v_raw;
- int main_bat_v_convert;
- struct ab8500_gpadc *gpadc;
-
- gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
- main_bat_v_raw = ab8500_gpadc_read_raw(gpadc, MAIN_BAT_V,
- avg_sample, trig_edge, trig_timer, conv_type);
- main_bat_v_convert = ab8500_gpadc_ad_to_voltage(gpadc, MAIN_BAT_V,
- main_bat_v_raw);
-
- seq_printf(s, "%d,0x%X\n", main_bat_v_convert, main_bat_v_raw);
-
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_main_bat_v);
-
-static int ab8500_gpadc_vbus_v_show(struct seq_file *s, void *p)
-{
- int vbus_v_raw;
- int vbus_v_convert;
- struct ab8500_gpadc *gpadc;
-
- gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
- vbus_v_raw = ab8500_gpadc_read_raw(gpadc, VBUS_V,
- avg_sample, trig_edge, trig_timer, conv_type);
- vbus_v_convert = ab8500_gpadc_ad_to_voltage(gpadc, VBUS_V,
- vbus_v_raw);
-
- seq_printf(s, "%d,0x%X\n", vbus_v_convert, vbus_v_raw);
-
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_vbus_v);
-
-static int ab8500_gpadc_main_charger_c_show(struct seq_file *s, void *p)
-{
- int main_charger_c_raw;
- int main_charger_c_convert;
- struct ab8500_gpadc *gpadc;
-
- gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
- main_charger_c_raw = ab8500_gpadc_read_raw(gpadc, MAIN_CHARGER_C,
- avg_sample, trig_edge, trig_timer, conv_type);
- main_charger_c_convert = ab8500_gpadc_ad_to_voltage(gpadc,
- MAIN_CHARGER_C, main_charger_c_raw);
-
- seq_printf(s, "%d,0x%X\n", main_charger_c_convert, main_charger_c_raw);
-
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_main_charger_c);
-
-static int ab8500_gpadc_usb_charger_c_show(struct seq_file *s, void *p)
-{
- int usb_charger_c_raw;
- int usb_charger_c_convert;
- struct ab8500_gpadc *gpadc;
-
- gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
- usb_charger_c_raw = ab8500_gpadc_read_raw(gpadc, USB_CHARGER_C,
- avg_sample, trig_edge, trig_timer, conv_type);
- usb_charger_c_convert = ab8500_gpadc_ad_to_voltage(gpadc,
- USB_CHARGER_C, usb_charger_c_raw);
-
- seq_printf(s, "%d,0x%X\n", usb_charger_c_convert, usb_charger_c_raw);
-
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_usb_charger_c);
-
-static int ab8500_gpadc_bk_bat_v_show(struct seq_file *s, void *p)
-{
- int bk_bat_v_raw;
- int bk_bat_v_convert;
- struct ab8500_gpadc *gpadc;
-
- gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
- bk_bat_v_raw = ab8500_gpadc_read_raw(gpadc, BK_BAT_V,
- avg_sample, trig_edge, trig_timer, conv_type);
- bk_bat_v_convert = ab8500_gpadc_ad_to_voltage(gpadc,
- BK_BAT_V, bk_bat_v_raw);
-
- seq_printf(s, "%d,0x%X\n", bk_bat_v_convert, bk_bat_v_raw);
-
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_bk_bat_v);
-
-static int ab8500_gpadc_die_temp_show(struct seq_file *s, void *p)
-{
- int die_temp_raw;
- int die_temp_convert;
- struct ab8500_gpadc *gpadc;
-
- gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
- die_temp_raw = ab8500_gpadc_read_raw(gpadc, DIE_TEMP,
- avg_sample, trig_edge, trig_timer, conv_type);
- die_temp_convert = ab8500_gpadc_ad_to_voltage(gpadc, DIE_TEMP,
- die_temp_raw);
-
- seq_printf(s, "%d,0x%X\n", die_temp_convert, die_temp_raw);
-
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_die_temp);
-
-static int ab8500_gpadc_usb_id_show(struct seq_file *s, void *p)
-{
- int usb_id_raw;
- int usb_id_convert;
- struct ab8500_gpadc *gpadc;
-
- gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
- usb_id_raw = ab8500_gpadc_read_raw(gpadc, USB_ID,
- avg_sample, trig_edge, trig_timer, conv_type);
- usb_id_convert = ab8500_gpadc_ad_to_voltage(gpadc, USB_ID,
- usb_id_raw);
-
- seq_printf(s, "%d,0x%X\n", usb_id_convert, usb_id_raw);
-
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_usb_id);
-
-static int ab8540_gpadc_xtal_temp_show(struct seq_file *s, void *p)
-{
- int xtal_temp_raw;
- int xtal_temp_convert;
- struct ab8500_gpadc *gpadc;
-
- gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
- xtal_temp_raw = ab8500_gpadc_read_raw(gpadc, XTAL_TEMP,
- avg_sample, trig_edge, trig_timer, conv_type);
- xtal_temp_convert = ab8500_gpadc_ad_to_voltage(gpadc, XTAL_TEMP,
- xtal_temp_raw);
-
- seq_printf(s, "%d,0x%X\n", xtal_temp_convert, xtal_temp_raw);
-
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(ab8540_gpadc_xtal_temp);
-
-static int ab8540_gpadc_vbat_true_meas_show(struct seq_file *s, void *p)
-{
- int vbat_true_meas_raw;
- int vbat_true_meas_convert;
- struct ab8500_gpadc *gpadc;
-
- gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
- vbat_true_meas_raw = ab8500_gpadc_read_raw(gpadc, VBAT_TRUE_MEAS,
- avg_sample, trig_edge, trig_timer, conv_type);
- vbat_true_meas_convert =
- ab8500_gpadc_ad_to_voltage(gpadc, VBAT_TRUE_MEAS,
- vbat_true_meas_raw);
-
- seq_printf(s, "%d,0x%X\n", vbat_true_meas_convert, vbat_true_meas_raw);
-
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(ab8540_gpadc_vbat_true_meas);
-
-static int ab8540_gpadc_bat_ctrl_and_ibat_show(struct seq_file *s, void *p)
-{
- int bat_ctrl_raw;
- int bat_ctrl_convert;
- int ibat_raw;
- int ibat_convert;
- struct ab8500_gpadc *gpadc;
-
- gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
- bat_ctrl_raw = ab8500_gpadc_double_read_raw(gpadc, BAT_CTRL_AND_IBAT,
- avg_sample, trig_edge, trig_timer, conv_type, &ibat_raw);
-
- bat_ctrl_convert = ab8500_gpadc_ad_to_voltage(gpadc, BAT_CTRL,
- bat_ctrl_raw);
- ibat_convert = ab8500_gpadc_ad_to_voltage(gpadc, IBAT_VIRTUAL_CHANNEL,
- ibat_raw);
-
- seq_printf(s,
- "%d,0x%X\n"
- "%d,0x%X\n",
- bat_ctrl_convert, bat_ctrl_raw,
- ibat_convert, ibat_raw);
-
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(ab8540_gpadc_bat_ctrl_and_ibat);
-
-static int ab8540_gpadc_vbat_meas_and_ibat_show(struct seq_file *s, void *p)
-{
- int vbat_meas_raw;
- int vbat_meas_convert;
- int ibat_raw;
- int ibat_convert;
- struct ab8500_gpadc *gpadc;
-
- gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
- vbat_meas_raw = ab8500_gpadc_double_read_raw(gpadc, VBAT_MEAS_AND_IBAT,
- avg_sample, trig_edge, trig_timer, conv_type, &ibat_raw);
- vbat_meas_convert = ab8500_gpadc_ad_to_voltage(gpadc, MAIN_BAT_V,
- vbat_meas_raw);
- ibat_convert = ab8500_gpadc_ad_to_voltage(gpadc, IBAT_VIRTUAL_CHANNEL,
- ibat_raw);
-
- seq_printf(s,
- "%d,0x%X\n"
- "%d,0x%X\n",
- vbat_meas_convert, vbat_meas_raw,
- ibat_convert, ibat_raw);
-
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(ab8540_gpadc_vbat_meas_and_ibat);
-
-static int ab8540_gpadc_vbat_true_meas_and_ibat_show(struct seq_file *s, void *p)
-{
- int vbat_true_meas_raw;
- int vbat_true_meas_convert;
- int ibat_raw;
- int ibat_convert;
- struct ab8500_gpadc *gpadc;
-
- gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
- vbat_true_meas_raw = ab8500_gpadc_double_read_raw(gpadc,
- VBAT_TRUE_MEAS_AND_IBAT, avg_sample, trig_edge,
- trig_timer, conv_type, &ibat_raw);
- vbat_true_meas_convert = ab8500_gpadc_ad_to_voltage(gpadc,
- VBAT_TRUE_MEAS, vbat_true_meas_raw);
- ibat_convert = ab8500_gpadc_ad_to_voltage(gpadc, IBAT_VIRTUAL_CHANNEL,
- ibat_raw);
-
- seq_printf(s,
- "%d,0x%X\n"
- "%d,0x%X\n",
- vbat_true_meas_convert, vbat_true_meas_raw,
- ibat_convert, ibat_raw);
-
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(ab8540_gpadc_vbat_true_meas_and_ibat);
-
-static int ab8540_gpadc_bat_temp_and_ibat_show(struct seq_file *s, void *p)
-{
- int bat_temp_raw;
- int bat_temp_convert;
- int ibat_raw;
- int ibat_convert;
- struct ab8500_gpadc *gpadc;
-
- gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
- bat_temp_raw = ab8500_gpadc_double_read_raw(gpadc, BAT_TEMP_AND_IBAT,
- avg_sample, trig_edge, trig_timer, conv_type, &ibat_raw);
- bat_temp_convert = ab8500_gpadc_ad_to_voltage(gpadc, BTEMP_BALL,
- bat_temp_raw);
- ibat_convert = ab8500_gpadc_ad_to_voltage(gpadc, IBAT_VIRTUAL_CHANNEL,
- ibat_raw);
-
- seq_printf(s,
- "%d,0x%X\n"
- "%d,0x%X\n",
- bat_temp_convert, bat_temp_raw,
- ibat_convert, ibat_raw);
-
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(ab8540_gpadc_bat_temp_and_ibat);
-
-static int ab8540_gpadc_otp_calib_show(struct seq_file *s, void *p)
-{
- struct ab8500_gpadc *gpadc;
- u16 vmain_l, vmain_h, btemp_l, btemp_h;
- u16 vbat_l, vbat_h, ibat_l, ibat_h;
-
- gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
- ab8540_gpadc_get_otp(gpadc, &vmain_l, &vmain_h, &btemp_l, &btemp_h,
- &vbat_l, &vbat_h, &ibat_l, &ibat_h);
- seq_printf(s,
- "VMAIN_L:0x%X\n"
- "VMAIN_H:0x%X\n"
- "BTEMP_L:0x%X\n"
- "BTEMP_H:0x%X\n"
- "VBAT_L:0x%X\n"
- "VBAT_H:0x%X\n"
- "IBAT_L:0x%X\n"
- "IBAT_H:0x%X\n",
- vmain_l, vmain_h, btemp_l, btemp_h,
- vbat_l, vbat_h, ibat_l, ibat_h);
-
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(ab8540_gpadc_otp_calib);
-
-static int ab8500_gpadc_avg_sample_print(struct seq_file *s, void *p)
-{
- seq_printf(s, "%d\n", avg_sample);
-
- return 0;
-}
-
-static int ab8500_gpadc_avg_sample_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ab8500_gpadc_avg_sample_print,
- inode->i_private);
-}
-
-static ssize_t ab8500_gpadc_avg_sample_write(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct device *dev = ((struct seq_file *)(file->private_data))->private;
- unsigned long user_avg_sample;
- int err;
-
- err = kstrtoul_from_user(user_buf, count, 0, &user_avg_sample);
- if (err)
- return err;
-
- if ((user_avg_sample == SAMPLE_1) || (user_avg_sample == SAMPLE_4)
- || (user_avg_sample == SAMPLE_8)
- || (user_avg_sample == SAMPLE_16)) {
- avg_sample = (u8) user_avg_sample;
- } else {
- dev_err(dev,
- "debugfs err input: should be egal to 1, 4, 8 or 16\n");
- return -EINVAL;
- }
-
- return count;
-}
-
-static const struct file_operations ab8500_gpadc_avg_sample_fops = {
- .open = ab8500_gpadc_avg_sample_open,
- .read = seq_read,
- .write = ab8500_gpadc_avg_sample_write,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
-static int ab8500_gpadc_trig_edge_print(struct seq_file *s, void *p)
-{
- seq_printf(s, "%d\n", trig_edge);
-
- return 0;
-}
-
-static int ab8500_gpadc_trig_edge_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ab8500_gpadc_trig_edge_print,
- inode->i_private);
-}
-
-static ssize_t ab8500_gpadc_trig_edge_write(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct device *dev = ((struct seq_file *)(file->private_data))->private;
- unsigned long user_trig_edge;
- int err;
-
- err = kstrtoul_from_user(user_buf, count, 0, &user_trig_edge);
- if (err)
- return err;
-
- if ((user_trig_edge == RISING_EDGE)
- || (user_trig_edge == FALLING_EDGE)) {
- trig_edge = (u8) user_trig_edge;
- } else {
- dev_err(dev, "Wrong input:\n"
- "Enter 0. Rising edge\n"
- "Enter 1. Falling edge\n");
- return -EINVAL;
- }
-
- return count;
-}
-
-static const struct file_operations ab8500_gpadc_trig_edge_fops = {
- .open = ab8500_gpadc_trig_edge_open,
- .read = seq_read,
- .write = ab8500_gpadc_trig_edge_write,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
-static int ab8500_gpadc_trig_timer_print(struct seq_file *s, void *p)
-{
- seq_printf(s, "%d\n", trig_timer);
-
- return 0;
-}
-
-static int ab8500_gpadc_trig_timer_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ab8500_gpadc_trig_timer_print,
- inode->i_private);
-}
-
-static ssize_t ab8500_gpadc_trig_timer_write(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct device *dev = ((struct seq_file *)(file->private_data))->private;
- unsigned long user_trig_timer;
- int err;
-
- err = kstrtoul_from_user(user_buf, count, 0, &user_trig_timer);
- if (err)
- return err;
-
- if (user_trig_timer & ~0xFF) {
- dev_err(dev,
- "debugfs error input: should be between 0 to 255\n");
- return -EINVAL;
- }
-
- trig_timer = (u8) user_trig_timer;
-
- return count;
-}
-
-static const struct file_operations ab8500_gpadc_trig_timer_fops = {
- .open = ab8500_gpadc_trig_timer_open,
- .read = seq_read,
- .write = ab8500_gpadc_trig_timer_write,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
-static int ab8500_gpadc_conv_type_print(struct seq_file *s, void *p)
-{
- seq_printf(s, "%d\n", conv_type);
-
- return 0;
-}
-
-static int ab8500_gpadc_conv_type_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ab8500_gpadc_conv_type_print,
- inode->i_private);
-}
-
-static ssize_t ab8500_gpadc_conv_type_write(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct device *dev = ((struct seq_file *)(file->private_data))->private;
- unsigned long user_conv_type;
- int err;
-
- err = kstrtoul_from_user(user_buf, count, 0, &user_conv_type);
- if (err)
- return err;
-
- if ((user_conv_type == ADC_SW)
- || (user_conv_type == ADC_HW)) {
- conv_type = (u8) user_conv_type;
- } else {
- dev_err(dev, "Wrong input:\n"
- "Enter 0. ADC SW conversion\n"
- "Enter 1. ADC HW conversion\n");
- return -EINVAL;
- }
-
- return count;
-}
-
-static const struct file_operations ab8500_gpadc_conv_type_fops = {
- .open = ab8500_gpadc_conv_type_open,
- .read = seq_read,
- .write = ab8500_gpadc_conv_type_write,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
/*
* return length of an ASCII numerical value, 0 is string is not a
* numerical value.
@@ -2647,7 +2013,6 @@ static const struct file_operations ab8500_hwreg_fops = {
static int ab8500_debug_probe(struct platform_device *plf)
{
struct dentry *ab8500_dir;
- struct dentry *ab8500_gpadc_dir;
struct ab8500 *ab8500;
struct resource *res;
@@ -2689,9 +2054,6 @@ static int ab8500_debug_probe(struct platform_device *plf)
ab8500_dir = debugfs_create_dir(AB8500_NAME_STRING, NULL);
- ab8500_gpadc_dir = debugfs_create_dir(AB8500_ADC_NAME_STRING,
- ab8500_dir);
-
debugfs_create_file("all-bank-registers", S_IRUGO, ab8500_dir,
&plf->dev, &ab8500_bank_registers_fops);
debugfs_create_file("all-banks", S_IRUGO, ab8500_dir,
@@ -2727,83 +2089,6 @@ static int ab8500_debug_probe(struct platform_device *plf)
&plf->dev, &ab8500_hwreg_fops);
debugfs_create_file("all-modem-registers", (S_IRUGO | S_IWUSR | S_IWGRP),
ab8500_dir, &plf->dev, &ab8500_modem_fops);
- debugfs_create_file("bat_ctrl", (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_gpadc_dir, &plf->dev,
- &ab8500_gpadc_bat_ctrl_fops);
- debugfs_create_file("btemp_ball", (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_gpadc_dir, &plf->dev,
- &ab8500_gpadc_btemp_ball_fops);
- debugfs_create_file("main_charger_v", (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_gpadc_dir, &plf->dev,
- &ab8500_gpadc_main_charger_v_fops);
- debugfs_create_file("acc_detect1", (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_gpadc_dir, &plf->dev,
- &ab8500_gpadc_acc_detect1_fops);
- debugfs_create_file("acc_detect2", (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_gpadc_dir, &plf->dev,
- &ab8500_gpadc_acc_detect2_fops);
- debugfs_create_file("adc_aux1", (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_gpadc_dir, &plf->dev,
- &ab8500_gpadc_aux1_fops);
- debugfs_create_file("adc_aux2", (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_gpadc_dir, &plf->dev,
- &ab8500_gpadc_aux2_fops);
- debugfs_create_file("main_bat_v", (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_gpadc_dir, &plf->dev,
- &ab8500_gpadc_main_bat_v_fops);
- debugfs_create_file("vbus_v", (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_gpadc_dir, &plf->dev,
- &ab8500_gpadc_vbus_v_fops);
- debugfs_create_file("main_charger_c", (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_gpadc_dir, &plf->dev,
- &ab8500_gpadc_main_charger_c_fops);
- debugfs_create_file("usb_charger_c", (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_gpadc_dir, &plf->dev,
- &ab8500_gpadc_usb_charger_c_fops);
- debugfs_create_file("bk_bat_v", (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_gpadc_dir, &plf->dev,
- &ab8500_gpadc_bk_bat_v_fops);
- debugfs_create_file("die_temp", (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_gpadc_dir, &plf->dev,
- &ab8500_gpadc_die_temp_fops);
- debugfs_create_file("usb_id", (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_gpadc_dir, &plf->dev,
- &ab8500_gpadc_usb_id_fops);
- if (is_ab8540(ab8500)) {
- debugfs_create_file("xtal_temp", (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_gpadc_dir, &plf->dev,
- &ab8540_gpadc_xtal_temp_fops);
- debugfs_create_file("vbattruemeas", (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_gpadc_dir, &plf->dev,
- &ab8540_gpadc_vbat_true_meas_fops);
- debugfs_create_file("batctrl_and_ibat", (S_IRUGO | S_IWUGO),
- ab8500_gpadc_dir, &plf->dev,
- &ab8540_gpadc_bat_ctrl_and_ibat_fops);
- debugfs_create_file("vbatmeas_and_ibat", (S_IRUGO | S_IWUGO),
- ab8500_gpadc_dir, &plf->dev,
- &ab8540_gpadc_vbat_meas_and_ibat_fops);
- debugfs_create_file("vbattruemeas_and_ibat", (S_IRUGO | S_IWUGO),
- ab8500_gpadc_dir, &plf->dev,
- &ab8540_gpadc_vbat_true_meas_and_ibat_fops);
- debugfs_create_file("battemp_and_ibat", (S_IRUGO | S_IWUGO),
- ab8500_gpadc_dir, &plf->dev,
- &ab8540_gpadc_bat_temp_and_ibat_fops);
- debugfs_create_file("otp_calib", (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_gpadc_dir, &plf->dev,
- &ab8540_gpadc_otp_calib_fops);
- }
- debugfs_create_file("avg_sample", (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_gpadc_dir, &plf->dev,
- &ab8500_gpadc_avg_sample_fops);
- debugfs_create_file("trig_edge", (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_gpadc_dir, &plf->dev,
- &ab8500_gpadc_trig_edge_fops);
- debugfs_create_file("trig_timer", (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_gpadc_dir, &plf->dev,
- &ab8500_gpadc_trig_timer_fops);
- debugfs_create_file("conv_type", (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_gpadc_dir, &plf->dev,
- &ab8500_gpadc_conv_type_fops);
return 0;
}
diff --git a/drivers/mfd/ab8500-gpadc.c b/drivers/mfd/ab8500-gpadc.c
deleted file mode 100644
index 005f9ee34cd1..000000000000
--- a/drivers/mfd/ab8500-gpadc.c
+++ /dev/null
@@ -1,1075 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) ST-Ericsson SA 2010
- *
- * Author: Arun R Murthy <arun.murthy@stericsson.com>
- * Author: Daniel Willerud <daniel.willerud@stericsson.com>
- * Author: Johan Palsson <johan.palsson@stericsson.com>
- * Author: M'boumba Cedric Madianga
- */
-#include <linux/init.h>
-#include <linux/device.h>
-#include <linux/interrupt.h>
-#include <linux/spinlock.h>
-#include <linux/delay.h>
-#include <linux/pm_runtime.h>
-#include <linux/platform_device.h>
-#include <linux/completion.h>
-#include <linux/regulator/consumer.h>
-#include <linux/err.h>
-#include <linux/slab.h>
-#include <linux/list.h>
-#include <linux/mfd/abx500.h>
-#include <linux/mfd/abx500/ab8500.h>
-#include <linux/mfd/abx500/ab8500-gpadc.h>
-
-/*
- * GPADC register offsets
- * Bank : 0x0A
- */
-#define AB8500_GPADC_CTRL1_REG 0x00
-#define AB8500_GPADC_CTRL2_REG 0x01
-#define AB8500_GPADC_CTRL3_REG 0x02
-#define AB8500_GPADC_AUTO_TIMER_REG 0x03
-#define AB8500_GPADC_STAT_REG 0x04
-#define AB8500_GPADC_MANDATAL_REG 0x05
-#define AB8500_GPADC_MANDATAH_REG 0x06
-#define AB8500_GPADC_AUTODATAL_REG 0x07
-#define AB8500_GPADC_AUTODATAH_REG 0x08
-#define AB8500_GPADC_MUX_CTRL_REG 0x09
-#define AB8540_GPADC_MANDATA2L_REG 0x09
-#define AB8540_GPADC_MANDATA2H_REG 0x0A
-#define AB8540_GPADC_APEAAX_REG 0x10
-#define AB8540_GPADC_APEAAT_REG 0x11
-#define AB8540_GPADC_APEAAM_REG 0x12
-#define AB8540_GPADC_APEAAH_REG 0x13
-#define AB8540_GPADC_APEAAL_REG 0x14
-
-/*
- * OTP register offsets
- * Bank : 0x15
- */
-#define AB8500_GPADC_CAL_1 0x0F
-#define AB8500_GPADC_CAL_2 0x10
-#define AB8500_GPADC_CAL_3 0x11
-#define AB8500_GPADC_CAL_4 0x12
-#define AB8500_GPADC_CAL_5 0x13
-#define AB8500_GPADC_CAL_6 0x14
-#define AB8500_GPADC_CAL_7 0x15
-/* New calibration for 8540 */
-#define AB8540_GPADC_OTP4_REG_7 0x38
-#define AB8540_GPADC_OTP4_REG_6 0x39
-#define AB8540_GPADC_OTP4_REG_5 0x3A
-
-/* gpadc constants */
-#define EN_VINTCORE12 0x04
-#define EN_VTVOUT 0x02
-#define EN_GPADC 0x01
-#define DIS_GPADC 0x00
-#define AVG_1 0x00
-#define AVG_4 0x20
-#define AVG_8 0x40
-#define AVG_16 0x60
-#define ADC_SW_CONV 0x04
-#define EN_ICHAR 0x80
-#define BTEMP_PULL_UP 0x08
-#define EN_BUF 0x40
-#define DIS_ZERO 0x00
-#define GPADC_BUSY 0x01
-#define EN_FALLING 0x10
-#define EN_TRIG_EDGE 0x02
-#define EN_VBIAS_XTAL_TEMP 0x02
-
-/* GPADC constants from AB8500 spec, UM0836 */
-#define ADC_RESOLUTION 1024
-#define ADC_CH_BTEMP_MIN 0
-#define ADC_CH_BTEMP_MAX 1350
-#define ADC_CH_DIETEMP_MIN 0
-#define ADC_CH_DIETEMP_MAX 1350
-#define ADC_CH_CHG_V_MIN 0
-#define ADC_CH_CHG_V_MAX 20030
-#define ADC_CH_ACCDET2_MIN 0
-#define ADC_CH_ACCDET2_MAX 2500
-#define ADC_CH_VBAT_MIN 2300
-#define ADC_CH_VBAT_MAX 4800
-#define ADC_CH_CHG_I_MIN 0
-#define ADC_CH_CHG_I_MAX 1500
-#define ADC_CH_BKBAT_MIN 0
-#define ADC_CH_BKBAT_MAX 3200
-
-/* GPADC constants from AB8540 spec */
-#define ADC_CH_IBAT_MIN (-6000) /* mA range measured by ADC for ibat */
-#define ADC_CH_IBAT_MAX 6000
-#define ADC_CH_IBAT_MIN_V (-60) /* mV range measured by ADC for ibat */
-#define ADC_CH_IBAT_MAX_V 60
-#define IBAT_VDROP_L (-56) /* mV */
-#define IBAT_VDROP_H 56
-
-/* This is used to not lose precision when dividing to get gain and offset */
-#define CALIB_SCALE 1000
-/*
- * Number of bits shift used to not lose precision
- * when dividing to get ibat gain.
- */
-#define CALIB_SHIFT_IBAT 20
-
-/* Time in ms before disabling regulator */
-#define GPADC_AUDOSUSPEND_DELAY 1
-
-#define CONVERSION_TIME 500 /* ms */
-
-enum cal_channels {
- ADC_INPUT_VMAIN = 0,
- ADC_INPUT_BTEMP,
- ADC_INPUT_VBAT,
- ADC_INPUT_IBAT,
- NBR_CAL_INPUTS,
-};
-
-/**
- * struct adc_cal_data - Table for storing gain and offset for the calibrated
- * ADC channels
- * @gain: Gain of the ADC channel
- * @offset: Offset of the ADC channel
- */
-struct adc_cal_data {
- s64 gain;
- s64 offset;
- u16 otp_calib_hi;
- u16 otp_calib_lo;
-};
-
-/**
- * struct ab8500_gpadc - AB8500 GPADC device information
- * @dev: pointer to the struct device
- * @node: a list of AB8500 GPADCs, hence prepared for
- reentrance
- * @parent: pointer to the struct ab8500
- * @ab8500_gpadc_complete: pointer to the struct completion, to indicate
- * the completion of gpadc conversion
- * @ab8500_gpadc_lock: structure of type mutex
- * @regu: pointer to the struct regulator
- * @irq_sw: interrupt number that is used by gpadc for Sw
- * conversion
- * @irq_hw: interrupt number that is used by gpadc for Hw
- * conversion
- * @cal_data array of ADC calibration data structs
- */
-struct ab8500_gpadc {
- struct device *dev;
- struct list_head node;
- struct ab8500 *parent;
- struct completion ab8500_gpadc_complete;
- struct mutex ab8500_gpadc_lock;
- struct regulator *regu;
- int irq_sw;
- int irq_hw;
- struct adc_cal_data cal_data[NBR_CAL_INPUTS];
-};
-
-static LIST_HEAD(ab8500_gpadc_list);
-
-/**
- * ab8500_gpadc_get() - returns a reference to the primary AB8500 GPADC
- * (i.e. the first GPADC in the instance list)
- */
-struct ab8500_gpadc *ab8500_gpadc_get(char *name)
-{
- struct ab8500_gpadc *gpadc;
-
- list_for_each_entry(gpadc, &ab8500_gpadc_list, node) {
- if (!strcmp(name, dev_name(gpadc->dev)))
- return gpadc;
- }
-
- return ERR_PTR(-ENOENT);
-}
-EXPORT_SYMBOL(ab8500_gpadc_get);
-
-/**
- * ab8500_gpadc_ad_to_voltage() - Convert a raw ADC value to a voltage
- */
-int ab8500_gpadc_ad_to_voltage(struct ab8500_gpadc *gpadc, u8 channel,
- int ad_value)
-{
- int res;
-
- switch (channel) {
- case MAIN_CHARGER_V:
- /* For some reason we don't have calibrated data */
- if (!gpadc->cal_data[ADC_INPUT_VMAIN].gain) {
- res = ADC_CH_CHG_V_MIN + (ADC_CH_CHG_V_MAX -
- ADC_CH_CHG_V_MIN) * ad_value /
- ADC_RESOLUTION;
- break;
- }
- /* Here we can use the calibrated data */
- res = (int) (ad_value * gpadc->cal_data[ADC_INPUT_VMAIN].gain +
- gpadc->cal_data[ADC_INPUT_VMAIN].offset) / CALIB_SCALE;
- break;
-
- case XTAL_TEMP:
- case BAT_CTRL:
- case BTEMP_BALL:
- case ACC_DETECT1:
- case ADC_AUX1:
- case ADC_AUX2:
- /* For some reason we don't have calibrated data */
- if (!gpadc->cal_data[ADC_INPUT_BTEMP].gain) {
- res = ADC_CH_BTEMP_MIN + (ADC_CH_BTEMP_MAX -
- ADC_CH_BTEMP_MIN) * ad_value /
- ADC_RESOLUTION;
- break;
- }
- /* Here we can use the calibrated data */
- res = (int) (ad_value * gpadc->cal_data[ADC_INPUT_BTEMP].gain +
- gpadc->cal_data[ADC_INPUT_BTEMP].offset) / CALIB_SCALE;
- break;
-
- case MAIN_BAT_V:
- case VBAT_TRUE_MEAS:
- /* For some reason we don't have calibrated data */
- if (!gpadc->cal_data[ADC_INPUT_VBAT].gain) {
- res = ADC_CH_VBAT_MIN + (ADC_CH_VBAT_MAX -
- ADC_CH_VBAT_MIN) * ad_value /
- ADC_RESOLUTION;
- break;
- }
- /* Here we can use the calibrated data */
- res = (int) (ad_value * gpadc->cal_data[ADC_INPUT_VBAT].gain +
- gpadc->cal_data[ADC_INPUT_VBAT].offset) / CALIB_SCALE;
- break;
-
- case DIE_TEMP:
- res = ADC_CH_DIETEMP_MIN +
- (ADC_CH_DIETEMP_MAX - ADC_CH_DIETEMP_MIN) * ad_value /
- ADC_RESOLUTION;
- break;
-
- case ACC_DETECT2:
- res = ADC_CH_ACCDET2_MIN +
- (ADC_CH_ACCDET2_MAX - ADC_CH_ACCDET2_MIN) * ad_value /
- ADC_RESOLUTION;
- break;
-
- case VBUS_V:
- res = ADC_CH_CHG_V_MIN +
- (ADC_CH_CHG_V_MAX - ADC_CH_CHG_V_MIN) * ad_value /
- ADC_RESOLUTION;
- break;
-
- case MAIN_CHARGER_C:
- case USB_CHARGER_C:
- res = ADC_CH_CHG_I_MIN +
- (ADC_CH_CHG_I_MAX - ADC_CH_CHG_I_MIN) * ad_value /
- ADC_RESOLUTION;
- break;
-
- case BK_BAT_V:
- res = ADC_CH_BKBAT_MIN +
- (ADC_CH_BKBAT_MAX - ADC_CH_BKBAT_MIN) * ad_value /
- ADC_RESOLUTION;
- break;
-
- case IBAT_VIRTUAL_CHANNEL:
- /* For some reason we don't have calibrated data */
- if (!gpadc->cal_data[ADC_INPUT_IBAT].gain) {
- res = ADC_CH_IBAT_MIN + (ADC_CH_IBAT_MAX -
- ADC_CH_IBAT_MIN) * ad_value /
- ADC_RESOLUTION;
- break;
- }
- /* Here we can use the calibrated data */
- res = (int) (ad_value * gpadc->cal_data[ADC_INPUT_IBAT].gain +
- gpadc->cal_data[ADC_INPUT_IBAT].offset)
- >> CALIB_SHIFT_IBAT;
- break;
-
- default:
- dev_err(gpadc->dev,
- "unknown channel, not possible to convert\n");
- res = -EINVAL;
- break;
-
- }
- return res;
-}
-EXPORT_SYMBOL(ab8500_gpadc_ad_to_voltage);
-
-/**
- * ab8500_gpadc_sw_hw_convert() - gpadc conversion
- * @channel: analog channel to be converted to digital data
- * @avg_sample: number of ADC sample to average
- * @trig_egde: selected ADC trig edge
- * @trig_timer: selected ADC trigger delay timer
- * @conv_type: selected conversion type (HW or SW conversion)
- *
- * This function converts the selected analog i/p to digital
- * data.
- */
-int ab8500_gpadc_sw_hw_convert(struct ab8500_gpadc *gpadc, u8 channel,
- u8 avg_sample, u8 trig_edge, u8 trig_timer, u8 conv_type)
-{
- int ad_value;
- int voltage;
-
- ad_value = ab8500_gpadc_read_raw(gpadc, channel, avg_sample,
- trig_edge, trig_timer, conv_type);
-
- /* On failure retry a second time */
- if (ad_value < 0)
- ad_value = ab8500_gpadc_read_raw(gpadc, channel, avg_sample,
- trig_edge, trig_timer, conv_type);
- if (ad_value < 0) {
- dev_err(gpadc->dev, "GPADC raw value failed ch: %d\n",
- channel);
- return ad_value;
- }
-
- voltage = ab8500_gpadc_ad_to_voltage(gpadc, channel, ad_value);
- if (voltage < 0)
- dev_err(gpadc->dev,
- "GPADC to voltage conversion failed ch: %d AD: 0x%x\n",
- channel, ad_value);
-
- return voltage;
-}
-EXPORT_SYMBOL(ab8500_gpadc_sw_hw_convert);
-
-/**
- * ab8500_gpadc_read_raw() - gpadc read
- * @channel: analog channel to be read
- * @avg_sample: number of ADC sample to average
- * @trig_edge: selected trig edge
- * @trig_timer: selected ADC trigger delay timer
- * @conv_type: selected conversion type (HW or SW conversion)
- *
- * This function obtains the raw ADC value for an hardware conversion,
- * this then needs to be converted by calling ab8500_gpadc_ad_to_voltage()
- */
-int ab8500_gpadc_read_raw(struct ab8500_gpadc *gpadc, u8 channel,
- u8 avg_sample, u8 trig_edge, u8 trig_timer, u8 conv_type)
-{
- return ab8500_gpadc_double_read_raw(gpadc, channel, avg_sample,
- trig_edge, trig_timer, conv_type,
- NULL);
-}
-
-int ab8500_gpadc_double_read_raw(struct ab8500_gpadc *gpadc, u8 channel,
- u8 avg_sample, u8 trig_edge, u8 trig_timer, u8 conv_type,
- int *ibat)
-{
- int ret;
- int looplimit = 0;
- unsigned long completion_timeout;
- u8 val, low_data, high_data, low_data2, high_data2;
- u8 val_reg1 = 0;
- unsigned int delay_min = 0;
- unsigned int delay_max = 0;
- u8 data_low_addr, data_high_addr;
-
- if (!gpadc)
- return -ENODEV;
-
- /* check if convertion is supported */
- if ((gpadc->irq_sw < 0) && (conv_type == ADC_SW))
- return -ENOTSUPP;
- if ((gpadc->irq_hw < 0) && (conv_type == ADC_HW))
- return -ENOTSUPP;
-
- mutex_lock(&gpadc->ab8500_gpadc_lock);
- /* Enable VTVout LDO this is required for GPADC */
- pm_runtime_get_sync(gpadc->dev);
-
- /* Check if ADC is not busy, lock and proceed */
- do {
- ret = abx500_get_register_interruptible(gpadc->dev,
- AB8500_GPADC, AB8500_GPADC_STAT_REG, &val);
- if (ret < 0)
- goto out;
- if (!(val & GPADC_BUSY))
- break;
- msleep(20);
- } while (++looplimit < 10);
- if (looplimit >= 10 && (val & GPADC_BUSY)) {
- dev_err(gpadc->dev, "gpadc_conversion: GPADC busy");
- ret = -EINVAL;
- goto out;
- }
-
- /* Enable GPADC */
- val_reg1 |= EN_GPADC;
-
- /* Select the channel source and set average samples */
- switch (avg_sample) {
- case SAMPLE_1:
- val = channel | AVG_1;
- break;
- case SAMPLE_4:
- val = channel | AVG_4;
- break;
- case SAMPLE_8:
- val = channel | AVG_8;
- break;
- default:
- val = channel | AVG_16;
- break;
- }
-
- if (conv_type == ADC_HW) {
- ret = abx500_set_register_interruptible(gpadc->dev,
- AB8500_GPADC, AB8500_GPADC_CTRL3_REG, val);
- val_reg1 |= EN_TRIG_EDGE;
- if (trig_edge)
- val_reg1 |= EN_FALLING;
- } else
- ret = abx500_set_register_interruptible(gpadc->dev,
- AB8500_GPADC, AB8500_GPADC_CTRL2_REG, val);
- if (ret < 0) {
- dev_err(gpadc->dev,
- "gpadc_conversion: set avg samples failed\n");
- goto out;
- }
-
- /*
- * Enable ADC, buffering, select rising edge and enable ADC path
- * charging current sense if it needed, ABB 3.0 needs some special
- * treatment too.
- */
- switch (channel) {
- case MAIN_CHARGER_C:
- case USB_CHARGER_C:
- val_reg1 |= EN_BUF | EN_ICHAR;
- break;
- case BTEMP_BALL:
- if (!is_ab8500_2p0_or_earlier(gpadc->parent)) {
- val_reg1 |= EN_BUF | BTEMP_PULL_UP;
- /*
- * Delay might be needed for ABB8500 cut 3.0, if not,
- * remove when hardware will be availible
- */
- delay_min = 1000; /* Delay in micro seconds */
- delay_max = 10000; /* large range optimises sleepmode */
- break;
- }
- /* Intentional fallthrough */
- default:
- val_reg1 |= EN_BUF;
- break;
- }
-
- /* Write configuration to register */
- ret = abx500_set_register_interruptible(gpadc->dev,
- AB8500_GPADC, AB8500_GPADC_CTRL1_REG, val_reg1);
- if (ret < 0) {
- dev_err(gpadc->dev,
- "gpadc_conversion: set Control register failed\n");
- goto out;
- }
-
- if (delay_min != 0)
- usleep_range(delay_min, delay_max);
-
- if (conv_type == ADC_HW) {
- /* Set trigger delay timer */
- ret = abx500_set_register_interruptible(gpadc->dev,
- AB8500_GPADC, AB8500_GPADC_AUTO_TIMER_REG, trig_timer);
- if (ret < 0) {
- dev_err(gpadc->dev,
- "gpadc_conversion: trig timer failed\n");
- goto out;
- }
- completion_timeout = 2 * HZ;
- data_low_addr = AB8500_GPADC_AUTODATAL_REG;
- data_high_addr = AB8500_GPADC_AUTODATAH_REG;
- } else {
- /* Start SW conversion */
- ret = abx500_mask_and_set_register_interruptible(gpadc->dev,
- AB8500_GPADC, AB8500_GPADC_CTRL1_REG,
- ADC_SW_CONV, ADC_SW_CONV);
- if (ret < 0) {
- dev_err(gpadc->dev,
- "gpadc_conversion: start s/w conv failed\n");
- goto out;
- }
- completion_timeout = msecs_to_jiffies(CONVERSION_TIME);
- data_low_addr = AB8500_GPADC_MANDATAL_REG;
- data_high_addr = AB8500_GPADC_MANDATAH_REG;
- }
-
- /* wait for completion of conversion */
- if (!wait_for_completion_timeout(&gpadc->ab8500_gpadc_complete,
- completion_timeout)) {
- dev_err(gpadc->dev,
- "timeout didn't receive GPADC conv interrupt\n");
- ret = -EINVAL;
- goto out;
- }
-
- /* Read the converted RAW data */
- ret = abx500_get_register_interruptible(gpadc->dev,
- AB8500_GPADC, data_low_addr, &low_data);
- if (ret < 0) {
- dev_err(gpadc->dev, "gpadc_conversion: read low data failed\n");
- goto out;
- }
-
- ret = abx500_get_register_interruptible(gpadc->dev,
- AB8500_GPADC, data_high_addr, &high_data);
- if (ret < 0) {
- dev_err(gpadc->dev, "gpadc_conversion: read high data failed\n");
- goto out;
- }
-
- /* Check if double convertion is required */
- if ((channel == BAT_CTRL_AND_IBAT) ||
- (channel == VBAT_MEAS_AND_IBAT) ||
- (channel == VBAT_TRUE_MEAS_AND_IBAT) ||
- (channel == BAT_TEMP_AND_IBAT)) {
-
- if (conv_type == ADC_HW) {
- /* not supported */
- ret = -ENOTSUPP;
- dev_err(gpadc->dev,
- "gpadc_conversion: only SW double conversion supported\n");
- goto out;
- } else {
- /* Read the converted RAW data 2 */
- ret = abx500_get_register_interruptible(gpadc->dev,
- AB8500_GPADC, AB8540_GPADC_MANDATA2L_REG,
- &low_data2);
- if (ret < 0) {
- dev_err(gpadc->dev,
- "gpadc_conversion: read sw low data 2 failed\n");
- goto out;
- }
-
- ret = abx500_get_register_interruptible(gpadc->dev,
- AB8500_GPADC, AB8540_GPADC_MANDATA2H_REG,
- &high_data2);
- if (ret < 0) {
- dev_err(gpadc->dev,
- "gpadc_conversion: read sw high data 2 failed\n");
- goto out;
- }
- if (ibat != NULL) {
- *ibat = (high_data2 << 8) | low_data2;
- } else {
- dev_warn(gpadc->dev,
- "gpadc_conversion: ibat not stored\n");
- }
-
- }
- }
-
- /* Disable GPADC */
- ret = abx500_set_register_interruptible(gpadc->dev, AB8500_GPADC,
- AB8500_GPADC_CTRL1_REG, DIS_GPADC);
- if (ret < 0) {
- dev_err(gpadc->dev, "gpadc_conversion: disable gpadc failed\n");
- goto out;
- }
-
- /* Disable VTVout LDO this is required for GPADC */
- pm_runtime_mark_last_busy(gpadc->dev);
- pm_runtime_put_autosuspend(gpadc->dev);
-
- mutex_unlock(&gpadc->ab8500_gpadc_lock);
-
- return (high_data << 8) | low_data;
-
-out:
- /*
- * It has shown to be needed to turn off the GPADC if an error occurs,
- * otherwise we might have problem when waiting for the busy bit in the
- * GPADC status register to go low. In V1.1 there wait_for_completion
- * seems to timeout when waiting for an interrupt.. Not seen in V2.0
- */
- (void) abx500_set_register_interruptible(gpadc->dev, AB8500_GPADC,
- AB8500_GPADC_CTRL1_REG, DIS_GPADC);
- pm_runtime_put(gpadc->dev);
- mutex_unlock(&gpadc->ab8500_gpadc_lock);
- dev_err(gpadc->dev,
- "gpadc_conversion: Failed to AD convert channel %d\n", channel);
- return ret;
-}
-EXPORT_SYMBOL(ab8500_gpadc_read_raw);
-
-/**
- * ab8500_bm_gpadcconvend_handler() - isr for gpadc conversion completion
- * @irq: irq number
- * @data: pointer to the data passed during request irq
- *
- * This is a interrupt service routine for gpadc conversion completion.
- * Notifies the gpadc completion is completed and the converted raw value
- * can be read from the registers.
- * Returns IRQ status(IRQ_HANDLED)
- */
-static irqreturn_t ab8500_bm_gpadcconvend_handler(int irq, void *_gpadc)
-{
- struct ab8500_gpadc *gpadc = _gpadc;
-
- complete(&gpadc->ab8500_gpadc_complete);
-
- return IRQ_HANDLED;
-}
-
-static int otp_cal_regs[] = {
- AB8500_GPADC_CAL_1,
- AB8500_GPADC_CAL_2,
- AB8500_GPADC_CAL_3,
- AB8500_GPADC_CAL_4,
- AB8500_GPADC_CAL_5,
- AB8500_GPADC_CAL_6,
- AB8500_GPADC_CAL_7,
-};
-
-static int otp4_cal_regs[] = {
- AB8540_GPADC_OTP4_REG_7,
- AB8540_GPADC_OTP4_REG_6,
- AB8540_GPADC_OTP4_REG_5,
-};
-
-static void ab8500_gpadc_read_calibration_data(struct ab8500_gpadc *gpadc)
-{
- int i;
- int ret[ARRAY_SIZE(otp_cal_regs)];
- u8 gpadc_cal[ARRAY_SIZE(otp_cal_regs)];
- int ret_otp4[ARRAY_SIZE(otp4_cal_regs)];
- u8 gpadc_otp4[ARRAY_SIZE(otp4_cal_regs)];
- int vmain_high, vmain_low;
- int btemp_high, btemp_low;
- int vbat_high, vbat_low;
- int ibat_high, ibat_low;
- s64 V_gain, V_offset, V2A_gain, V2A_offset;
- struct ab8500 *ab8500;
-
- ab8500 = gpadc->parent;
-
- /* First we read all OTP registers and store the error code */
- for (i = 0; i < ARRAY_SIZE(otp_cal_regs); i++) {
- ret[i] = abx500_get_register_interruptible(gpadc->dev,
- AB8500_OTP_EMUL, otp_cal_regs[i], &gpadc_cal[i]);
- if (ret[i] < 0)
- dev_err(gpadc->dev, "%s: read otp reg 0x%02x failed\n",
- __func__, otp_cal_regs[i]);
- }
-
- /*
- * The ADC calibration data is stored in OTP registers.
- * The layout of the calibration data is outlined below and a more
- * detailed description can be found in UM0836
- *
- * vm_h/l = vmain_high/low
- * bt_h/l = btemp_high/low
- * vb_h/l = vbat_high/low
- *
- * Data bits 8500/9540:
- * | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0
- * |.......|.......|.......|.......|.......|.......|.......|.......
- * | | vm_h9 | vm_h8
- * |.......|.......|.......|.......|.......|.......|.......|.......
- * | | vm_h7 | vm_h6 | vm_h5 | vm_h4 | vm_h3 | vm_h2
- * |.......|.......|.......|.......|.......|.......|.......|.......
- * | vm_h1 | vm_h0 | vm_l4 | vm_l3 | vm_l2 | vm_l1 | vm_l0 | bt_h9
- * |.......|.......|.......|.......|.......|.......|.......|.......
- * | bt_h8 | bt_h7 | bt_h6 | bt_h5 | bt_h4 | bt_h3 | bt_h2 | bt_h1
- * |.......|.......|.......|.......|.......|.......|.......|.......
- * | bt_h0 | bt_l4 | bt_l3 | bt_l2 | bt_l1 | bt_l0 | vb_h9 | vb_h8
- * |.......|.......|.......|.......|.......|.......|.......|.......
- * | vb_h7 | vb_h6 | vb_h5 | vb_h4 | vb_h3 | vb_h2 | vb_h1 | vb_h0
- * |.......|.......|.......|.......|.......|.......|.......|.......
- * | vb_l5 | vb_l4 | vb_l3 | vb_l2 | vb_l1 | vb_l0 |
- * |.......|.......|.......|.......|.......|.......|.......|.......
- *
- * Data bits 8540:
- * OTP2
- * | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0
- * |.......|.......|.......|.......|.......|.......|.......|.......
- * |
- * |.......|.......|.......|.......|.......|.......|.......|.......
- * | vm_h9 | vm_h8 | vm_h7 | vm_h6 | vm_h5 | vm_h4 | vm_h3 | vm_h2
- * |.......|.......|.......|.......|.......|.......|.......|.......
- * | vm_h1 | vm_h0 | vm_l4 | vm_l3 | vm_l2 | vm_l1 | vm_l0 | bt_h9
- * |.......|.......|.......|.......|.......|.......|.......|.......
- * | bt_h8 | bt_h7 | bt_h6 | bt_h5 | bt_h4 | bt_h3 | bt_h2 | bt_h1
- * |.......|.......|.......|.......|.......|.......|.......|.......
- * | bt_h0 | bt_l4 | bt_l3 | bt_l2 | bt_l1 | bt_l0 | vb_h9 | vb_h8
- * |.......|.......|.......|.......|.......|.......|.......|.......
- * | vb_h7 | vb_h6 | vb_h5 | vb_h4 | vb_h3 | vb_h2 | vb_h1 | vb_h0
- * |.......|.......|.......|.......|.......|.......|.......|.......
- * | vb_l5 | vb_l4 | vb_l3 | vb_l2 | vb_l1 | vb_l0 |
- * |.......|.......|.......|.......|.......|.......|.......|.......
- *
- * Data bits 8540:
- * OTP4
- * | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0
- * |.......|.......|.......|.......|.......|.......|.......|.......
- * | | ib_h9 | ib_h8 | ib_h7
- * |.......|.......|.......|.......|.......|.......|.......|.......
- * | ib_h6 | ib_h5 | ib_h4 | ib_h3 | ib_h2 | ib_h1 | ib_h0 | ib_l5
- * |.......|.......|.......|.......|.......|.......|.......|.......
- * | ib_l4 | ib_l3 | ib_l2 | ib_l1 | ib_l0 |
- *
- *
- * Ideal output ADC codes corresponding to injected input voltages
- * during manufacturing is:
- *
- * vmain_high: Vin = 19500mV / ADC ideal code = 997
- * vmain_low: Vin = 315mV / ADC ideal code = 16
- * btemp_high: Vin = 1300mV / ADC ideal code = 985
- * btemp_low: Vin = 21mV / ADC ideal code = 16
- * vbat_high: Vin = 4700mV / ADC ideal code = 982
- * vbat_low: Vin = 2380mV / ADC ideal code = 33
- */
-
- if (is_ab8540(ab8500)) {
- /* Calculate gain and offset for VMAIN if all reads succeeded*/
- if (!(ret[1] < 0 || ret[2] < 0)) {
- vmain_high = (((gpadc_cal[1] & 0xFF) << 2) |
- ((gpadc_cal[2] & 0xC0) >> 6));
- vmain_low = ((gpadc_cal[2] & 0x3E) >> 1);
-
- gpadc->cal_data[ADC_INPUT_VMAIN].otp_calib_hi =
- (u16)vmain_high;
- gpadc->cal_data[ADC_INPUT_VMAIN].otp_calib_lo =
- (u16)vmain_low;
-
- gpadc->cal_data[ADC_INPUT_VMAIN].gain = CALIB_SCALE *
- (19500 - 315) / (vmain_high - vmain_low);
- gpadc->cal_data[ADC_INPUT_VMAIN].offset = CALIB_SCALE *
- 19500 - (CALIB_SCALE * (19500 - 315) /
- (vmain_high - vmain_low)) * vmain_high;
- } else {
- gpadc->cal_data[ADC_INPUT_VMAIN].gain = 0;
- }
-
- /* Read IBAT calibration Data */
- for (i = 0; i < ARRAY_SIZE(otp4_cal_regs); i++) {
- ret_otp4[i] = abx500_get_register_interruptible(
- gpadc->dev, AB8500_OTP_EMUL,
- otp4_cal_regs[i], &gpadc_otp4[i]);
- if (ret_otp4[i] < 0)
- dev_err(gpadc->dev,
- "%s: read otp4 reg 0x%02x failed\n",
- __func__, otp4_cal_regs[i]);
- }
-
- /* Calculate gain and offset for IBAT if all reads succeeded */
- if (!(ret_otp4[0] < 0 || ret_otp4[1] < 0 || ret_otp4[2] < 0)) {
- ibat_high = (((gpadc_otp4[0] & 0x07) << 7) |
- ((gpadc_otp4[1] & 0xFE) >> 1));
- ibat_low = (((gpadc_otp4[1] & 0x01) << 5) |
- ((gpadc_otp4[2] & 0xF8) >> 3));
-
- gpadc->cal_data[ADC_INPUT_IBAT].otp_calib_hi =
- (u16)ibat_high;
- gpadc->cal_data[ADC_INPUT_IBAT].otp_calib_lo =
- (u16)ibat_low;
-
- V_gain = ((IBAT_VDROP_H - IBAT_VDROP_L)
- << CALIB_SHIFT_IBAT) / (ibat_high - ibat_low);
-
- V_offset = (IBAT_VDROP_H << CALIB_SHIFT_IBAT) -
- (((IBAT_VDROP_H - IBAT_VDROP_L) <<
- CALIB_SHIFT_IBAT) / (ibat_high - ibat_low))
- * ibat_high;
- /*
- * Result obtained is in mV (at a scale factor),
- * we need to calculate gain and offset to get mA
- */
- V2A_gain = (ADC_CH_IBAT_MAX - ADC_CH_IBAT_MIN)/
- (ADC_CH_IBAT_MAX_V - ADC_CH_IBAT_MIN_V);
- V2A_offset = ((ADC_CH_IBAT_MAX_V * ADC_CH_IBAT_MIN -
- ADC_CH_IBAT_MAX * ADC_CH_IBAT_MIN_V)
- << CALIB_SHIFT_IBAT)
- / (ADC_CH_IBAT_MAX_V - ADC_CH_IBAT_MIN_V);
-
- gpadc->cal_data[ADC_INPUT_IBAT].gain =
- V_gain * V2A_gain;
- gpadc->cal_data[ADC_INPUT_IBAT].offset =
- V_offset * V2A_gain + V2A_offset;
- } else {
- gpadc->cal_data[ADC_INPUT_IBAT].gain = 0;
- }
-
- dev_dbg(gpadc->dev, "IBAT gain %llu offset %llu\n",
- gpadc->cal_data[ADC_INPUT_IBAT].gain,
- gpadc->cal_data[ADC_INPUT_IBAT].offset);
- } else {
- /* Calculate gain and offset for VMAIN if all reads succeeded */
- if (!(ret[0] < 0 || ret[1] < 0 || ret[2] < 0)) {
- vmain_high = (((gpadc_cal[0] & 0x03) << 8) |
- ((gpadc_cal[1] & 0x3F) << 2) |
- ((gpadc_cal[2] & 0xC0) >> 6));
- vmain_low = ((gpadc_cal[2] & 0x3E) >> 1);
-
- gpadc->cal_data[ADC_INPUT_VMAIN].otp_calib_hi =
- (u16)vmain_high;
- gpadc->cal_data[ADC_INPUT_VMAIN].otp_calib_lo =
- (u16)vmain_low;
-
- gpadc->cal_data[ADC_INPUT_VMAIN].gain = CALIB_SCALE *
- (19500 - 315) / (vmain_high - vmain_low);
-
- gpadc->cal_data[ADC_INPUT_VMAIN].offset = CALIB_SCALE *
- 19500 - (CALIB_SCALE * (19500 - 315) /
- (vmain_high - vmain_low)) * vmain_high;
- } else {
- gpadc->cal_data[ADC_INPUT_VMAIN].gain = 0;
- }
- }
-
- /* Calculate gain and offset for BTEMP if all reads succeeded */
- if (!(ret[2] < 0 || ret[3] < 0 || ret[4] < 0)) {
- btemp_high = (((gpadc_cal[2] & 0x01) << 9) |
- (gpadc_cal[3] << 1) | ((gpadc_cal[4] & 0x80) >> 7));
- btemp_low = ((gpadc_cal[4] & 0x7C) >> 2);
-
- gpadc->cal_data[ADC_INPUT_BTEMP].otp_calib_hi = (u16)btemp_high;
- gpadc->cal_data[ADC_INPUT_BTEMP].otp_calib_lo = (u16)btemp_low;
-
- gpadc->cal_data[ADC_INPUT_BTEMP].gain =
- CALIB_SCALE * (1300 - 21) / (btemp_high - btemp_low);
- gpadc->cal_data[ADC_INPUT_BTEMP].offset = CALIB_SCALE * 1300 -
- (CALIB_SCALE * (1300 - 21) / (btemp_high - btemp_low))
- * btemp_high;
- } else {
- gpadc->cal_data[ADC_INPUT_BTEMP].gain = 0;
- }
-
- /* Calculate gain and offset for VBAT if all reads succeeded */
- if (!(ret[4] < 0 || ret[5] < 0 || ret[6] < 0)) {
- vbat_high = (((gpadc_cal[4] & 0x03) << 8) | gpadc_cal[5]);
- vbat_low = ((gpadc_cal[6] & 0xFC) >> 2);
-
- gpadc->cal_data[ADC_INPUT_VBAT].otp_calib_hi = (u16)vbat_high;
- gpadc->cal_data[ADC_INPUT_VBAT].otp_calib_lo = (u16)vbat_low;
-
- gpadc->cal_data[ADC_INPUT_VBAT].gain = CALIB_SCALE *
- (4700 - 2380) / (vbat_high - vbat_low);
- gpadc->cal_data[ADC_INPUT_VBAT].offset = CALIB_SCALE * 4700 -
- (CALIB_SCALE * (4700 - 2380) /
- (vbat_high - vbat_low)) * vbat_high;
- } else {
- gpadc->cal_data[ADC_INPUT_VBAT].gain = 0;
- }
-
- dev_dbg(gpadc->dev, "VMAIN gain %llu offset %llu\n",
- gpadc->cal_data[ADC_INPUT_VMAIN].gain,
- gpadc->cal_data[ADC_INPUT_VMAIN].offset);
-
- dev_dbg(gpadc->dev, "BTEMP gain %llu offset %llu\n",
- gpadc->cal_data[ADC_INPUT_BTEMP].gain,
- gpadc->cal_data[ADC_INPUT_BTEMP].offset);
-
- dev_dbg(gpadc->dev, "VBAT gain %llu offset %llu\n",
- gpadc->cal_data[ADC_INPUT_VBAT].gain,
- gpadc->cal_data[ADC_INPUT_VBAT].offset);
-}
-
-#ifdef CONFIG_PM
-static int ab8500_gpadc_runtime_suspend(struct device *dev)
-{
- struct ab8500_gpadc *gpadc = dev_get_drvdata(dev);
-
- regulator_disable(gpadc->regu);
- return 0;
-}
-
-static int ab8500_gpadc_runtime_resume(struct device *dev)
-{
- struct ab8500_gpadc *gpadc = dev_get_drvdata(dev);
- int ret;
-
- ret = regulator_enable(gpadc->regu);
- if (ret)
- dev_err(dev, "Failed to enable vtvout LDO: %d\n", ret);
- return ret;
-}
-#endif
-
-#ifdef CONFIG_PM_SLEEP
-static int ab8500_gpadc_suspend(struct device *dev)
-{
- struct ab8500_gpadc *gpadc = dev_get_drvdata(dev);
-
- mutex_lock(&gpadc->ab8500_gpadc_lock);
-
- pm_runtime_get_sync(dev);
-
- regulator_disable(gpadc->regu);
- return 0;
-}
-
-static int ab8500_gpadc_resume(struct device *dev)
-{
- struct ab8500_gpadc *gpadc = dev_get_drvdata(dev);
- int ret;
-
- ret = regulator_enable(gpadc->regu);
- if (ret)
- dev_err(dev, "Failed to enable vtvout LDO: %d\n", ret);
-
- pm_runtime_mark_last_busy(gpadc->dev);
- pm_runtime_put_autosuspend(gpadc->dev);
-
- mutex_unlock(&gpadc->ab8500_gpadc_lock);
- return ret;
-}
-#endif
-
-static int ab8500_gpadc_probe(struct platform_device *pdev)
-{
- int ret = 0;
- struct ab8500_gpadc *gpadc;
-
- gpadc = devm_kzalloc(&pdev->dev,
- sizeof(struct ab8500_gpadc), GFP_KERNEL);
- if (!gpadc)
- return -ENOMEM;
-
- gpadc->irq_sw = platform_get_irq_byname(pdev, "SW_CONV_END");
- if (gpadc->irq_sw < 0)
- dev_err(gpadc->dev, "failed to get platform sw_conv_end irq\n");
-
- gpadc->irq_hw = platform_get_irq_byname(pdev, "HW_CONV_END");
- if (gpadc->irq_hw < 0)
- dev_err(gpadc->dev, "failed to get platform hw_conv_end irq\n");
-
- gpadc->dev = &pdev->dev;
- gpadc->parent = dev_get_drvdata(pdev->dev.parent);
- mutex_init(&gpadc->ab8500_gpadc_lock);
-
- /* Initialize completion used to notify completion of conversion */
- init_completion(&gpadc->ab8500_gpadc_complete);
-
- /* Register interrupts */
- if (gpadc->irq_sw >= 0) {
- ret = request_threaded_irq(gpadc->irq_sw, NULL,
- ab8500_bm_gpadcconvend_handler,
- IRQF_NO_SUSPEND | IRQF_SHARED | IRQF_ONESHOT,
- "ab8500-gpadc-sw",
- gpadc);
- if (ret < 0) {
- dev_err(gpadc->dev,
- "Failed to register interrupt irq: %d\n",
- gpadc->irq_sw);
- goto fail;
- }
- }
-
- if (gpadc->irq_hw >= 0) {
- ret = request_threaded_irq(gpadc->irq_hw, NULL,
- ab8500_bm_gpadcconvend_handler,
- IRQF_NO_SUSPEND | IRQF_SHARED | IRQF_ONESHOT,
- "ab8500-gpadc-hw",
- gpadc);
- if (ret < 0) {
- dev_err(gpadc->dev,
- "Failed to register interrupt irq: %d\n",
- gpadc->irq_hw);
- goto fail_irq;
- }
- }
-
- /* VTVout LDO used to power up ab8500-GPADC */
- gpadc->regu = devm_regulator_get(&pdev->dev, "vddadc");
- if (IS_ERR(gpadc->regu)) {
- ret = PTR_ERR(gpadc->regu);
- dev_err(gpadc->dev, "failed to get vtvout LDO\n");
- goto fail_irq;
- }
-
- platform_set_drvdata(pdev, gpadc);
-
- ret = regulator_enable(gpadc->regu);
- if (ret) {
- dev_err(gpadc->dev, "Failed to enable vtvout LDO: %d\n", ret);
- goto fail_enable;
- }
-
- pm_runtime_set_autosuspend_delay(gpadc->dev, GPADC_AUDOSUSPEND_DELAY);
- pm_runtime_use_autosuspend(gpadc->dev);
- pm_runtime_set_active(gpadc->dev);
- pm_runtime_enable(gpadc->dev);
-
- ab8500_gpadc_read_calibration_data(gpadc);
- list_add_tail(&gpadc->node, &ab8500_gpadc_list);
- dev_dbg(gpadc->dev, "probe success\n");
-
- return 0;
-
-fail_enable:
-fail_irq:
- free_irq(gpadc->irq_sw, gpadc);
- free_irq(gpadc->irq_hw, gpadc);
-fail:
- return ret;
-}
-
-static int ab8500_gpadc_remove(struct platform_device *pdev)
-{
- struct ab8500_gpadc *gpadc = platform_get_drvdata(pdev);
-
- /* remove this gpadc entry from the list */
- list_del(&gpadc->node);
- /* remove interrupt - completion of Sw ADC conversion */
- if (gpadc->irq_sw >= 0)
- free_irq(gpadc->irq_sw, gpadc);
- if (gpadc->irq_hw >= 0)
- free_irq(gpadc->irq_hw, gpadc);
-
- pm_runtime_get_sync(gpadc->dev);
- pm_runtime_disable(gpadc->dev);
-
- regulator_disable(gpadc->regu);
-
- pm_runtime_set_suspended(gpadc->dev);
-
- pm_runtime_put_noidle(gpadc->dev);
-
- return 0;
-}
-
-static const struct dev_pm_ops ab8500_gpadc_pm_ops = {
- SET_RUNTIME_PM_OPS(ab8500_gpadc_runtime_suspend,
- ab8500_gpadc_runtime_resume,
- NULL)
- SET_SYSTEM_SLEEP_PM_OPS(ab8500_gpadc_suspend,
- ab8500_gpadc_resume)
-
-};
-
-static struct platform_driver ab8500_gpadc_driver = {
- .probe = ab8500_gpadc_probe,
- .remove = ab8500_gpadc_remove,
- .driver = {
- .name = "ab8500-gpadc",
- .pm = &ab8500_gpadc_pm_ops,
- },
-};
-
-static int __init ab8500_gpadc_init(void)
-{
- return platform_driver_register(&ab8500_gpadc_driver);
-}
-subsys_initcall_sync(ab8500_gpadc_init);
-
-/**
- * ab8540_gpadc_get_otp() - returns OTP values
- *
- */
-void ab8540_gpadc_get_otp(struct ab8500_gpadc *gpadc,
- u16 *vmain_l, u16 *vmain_h, u16 *btemp_l, u16 *btemp_h,
- u16 *vbat_l, u16 *vbat_h, u16 *ibat_l, u16 *ibat_h)
-{
- *vmain_l = gpadc->cal_data[ADC_INPUT_VMAIN].otp_calib_lo;
- *vmain_h = gpadc->cal_data[ADC_INPUT_VMAIN].otp_calib_hi;
- *btemp_l = gpadc->cal_data[ADC_INPUT_BTEMP].otp_calib_lo;
- *btemp_h = gpadc->cal_data[ADC_INPUT_BTEMP].otp_calib_hi;
- *vbat_l = gpadc->cal_data[ADC_INPUT_VBAT].otp_calib_lo;
- *vbat_h = gpadc->cal_data[ADC_INPUT_VBAT].otp_calib_hi;
- *ibat_l = gpadc->cal_data[ADC_INPUT_IBAT].otp_calib_lo;
- *ibat_h = gpadc->cal_data[ADC_INPUT_IBAT].otp_calib_hi;
-}
diff --git a/drivers/mfd/arizona-core.c b/drivers/mfd/arizona-core.c
index 4a31907a4525..f73cf76d1373 100644
--- a/drivers/mfd/arizona-core.c
+++ b/drivers/mfd/arizona-core.c
@@ -814,11 +814,7 @@ static int arizona_of_get_core_pdata(struct arizona *arizona)
int ret, i;
/* Handle old non-standard DT binding */
- pdata->reset = devm_gpiod_get_from_of_node(arizona->dev,
- arizona->dev->of_node,
- "wlf,reset", 0,
- GPIOD_OUT_LOW,
- "arizona /RESET");
+ pdata->reset = devm_gpiod_get(arizona->dev, "wlf,reset", GPIOD_OUT_LOW);
if (IS_ERR(pdata->reset)) {
ret = PTR_ERR(pdata->reset);
diff --git a/drivers/mfd/cros_ec_dev.c b/drivers/mfd/cros_ec_dev.c
index 6e6dfd6c1871..c4b977a5dd96 100644
--- a/drivers/mfd/cros_ec_dev.c
+++ b/drivers/mfd/cros_ec_dev.c
@@ -78,6 +78,10 @@ static const struct mfd_cell cros_ec_rtc_cells[] = {
{ .name = "cros-ec-rtc", },
};
+static const struct mfd_cell cros_ec_sensorhub_cells[] = {
+ { .name = "cros-ec-sensorhub", },
+};
+
static const struct mfd_cell cros_usbpd_charger_cells[] = {
{ .name = "cros-usbpd-charger", },
{ .name = "cros-usbpd-logger", },
@@ -112,229 +116,11 @@ static const struct mfd_cell cros_ec_vbc_cells[] = {
{ .name = "cros-ec-vbc", }
};
-static int cros_ec_check_features(struct cros_ec_dev *ec, int feature)
-{
- struct cros_ec_command *msg;
- int ret;
-
- if (ec->features[0] == -1U && ec->features[1] == -1U) {
- /* features bitmap not read yet */
- msg = kzalloc(sizeof(*msg) + sizeof(ec->features), GFP_KERNEL);
- if (!msg)
- return -ENOMEM;
-
- msg->command = EC_CMD_GET_FEATURES + ec->cmd_offset;
- msg->insize = sizeof(ec->features);
-
- ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg);
- if (ret < 0) {
- dev_warn(ec->dev, "cannot get EC features: %d/%d\n",
- ret, msg->result);
- memset(ec->features, 0, sizeof(ec->features));
- } else {
- memcpy(ec->features, msg->data, sizeof(ec->features));
- }
-
- dev_dbg(ec->dev, "EC features %08x %08x\n",
- ec->features[0], ec->features[1]);
-
- kfree(msg);
- }
-
- return ec->features[feature / 32] & EC_FEATURE_MASK_0(feature);
-}
-
static void cros_ec_class_release(struct device *dev)
{
kfree(to_cros_ec_dev(dev));
}
-static void cros_ec_sensors_register(struct cros_ec_dev *ec)
-{
- /*
- * Issue a command to get the number of sensor reported.
- * Build an array of sensors driver and register them all.
- */
- int ret, i, id, sensor_num;
- struct mfd_cell *sensor_cells;
- struct cros_ec_sensor_platform *sensor_platforms;
- int sensor_type[MOTIONSENSE_TYPE_MAX];
- struct ec_params_motion_sense *params;
- struct ec_response_motion_sense *resp;
- struct cros_ec_command *msg;
-
- msg = kzalloc(sizeof(struct cros_ec_command) +
- max(sizeof(*params), sizeof(*resp)), GFP_KERNEL);
- if (msg == NULL)
- return;
-
- msg->version = 2;
- msg->command = EC_CMD_MOTION_SENSE_CMD + ec->cmd_offset;
- msg->outsize = sizeof(*params);
- msg->insize = sizeof(*resp);
-
- params = (struct ec_params_motion_sense *)msg->data;
- params->cmd = MOTIONSENSE_CMD_DUMP;
-
- ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg);
- if (ret < 0) {
- dev_warn(ec->dev, "cannot get EC sensor information: %d/%d\n",
- ret, msg->result);
- goto error;
- }
-
- resp = (struct ec_response_motion_sense *)msg->data;
- sensor_num = resp->dump.sensor_count;
- /*
- * Allocate 2 extra sensors if lid angle sensor and/or FIFO are needed.
- */
- sensor_cells = kcalloc(sensor_num + 2, sizeof(struct mfd_cell),
- GFP_KERNEL);
- if (sensor_cells == NULL)
- goto error;
-
- sensor_platforms = kcalloc(sensor_num,
- sizeof(struct cros_ec_sensor_platform),
- GFP_KERNEL);
- if (sensor_platforms == NULL)
- goto error_platforms;
-
- memset(sensor_type, 0, sizeof(sensor_type));
- id = 0;
- for (i = 0; i < sensor_num; i++) {
- params->cmd = MOTIONSENSE_CMD_INFO;
- params->info.sensor_num = i;
- ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg);
- if (ret < 0) {
- dev_warn(ec->dev, "no info for EC sensor %d : %d/%d\n",
- i, ret, msg->result);
- continue;
- }
- switch (resp->info.type) {
- case MOTIONSENSE_TYPE_ACCEL:
- sensor_cells[id].name = "cros-ec-accel";
- break;
- case MOTIONSENSE_TYPE_BARO:
- sensor_cells[id].name = "cros-ec-baro";
- break;
- case MOTIONSENSE_TYPE_GYRO:
- sensor_cells[id].name = "cros-ec-gyro";
- break;
- case MOTIONSENSE_TYPE_MAG:
- sensor_cells[id].name = "cros-ec-mag";
- break;
- case MOTIONSENSE_TYPE_PROX:
- sensor_cells[id].name = "cros-ec-prox";
- break;
- case MOTIONSENSE_TYPE_LIGHT:
- sensor_cells[id].name = "cros-ec-light";
- break;
- case MOTIONSENSE_TYPE_ACTIVITY:
- sensor_cells[id].name = "cros-ec-activity";
- break;
- default:
- dev_warn(ec->dev, "unknown type %d\n", resp->info.type);
- continue;
- }
- sensor_platforms[id].sensor_num = i;
- sensor_cells[id].id = sensor_type[resp->info.type];
- sensor_cells[id].platform_data = &sensor_platforms[id];
- sensor_cells[id].pdata_size =
- sizeof(struct cros_ec_sensor_platform);
-
- sensor_type[resp->info.type]++;
- id++;
- }
-
- if (sensor_type[MOTIONSENSE_TYPE_ACCEL] >= 2)
- ec->has_kb_wake_angle = true;
-
- if (cros_ec_check_features(ec, EC_FEATURE_MOTION_SENSE_FIFO)) {
- sensor_cells[id].name = "cros-ec-ring";
- id++;
- }
- if (cros_ec_check_features(ec,
- EC_FEATURE_REFINED_TABLET_MODE_HYSTERESIS)) {
- sensor_cells[id].name = "cros-ec-lid-angle";
- id++;
- }
-
- ret = mfd_add_devices(ec->dev, 0, sensor_cells, id,
- NULL, 0, NULL);
- if (ret)
- dev_err(ec->dev, "failed to add EC sensors\n");
-
- kfree(sensor_platforms);
-error_platforms:
- kfree(sensor_cells);
-error:
- kfree(msg);
-}
-
-static struct cros_ec_sensor_platform sensor_platforms[] = {
- { .sensor_num = 0 },
- { .sensor_num = 1 }
-};
-
-static const struct mfd_cell cros_ec_accel_legacy_cells[] = {
- {
- .name = "cros-ec-accel-legacy",
- .platform_data = &sensor_platforms[0],
- .pdata_size = sizeof(struct cros_ec_sensor_platform),
- },
- {
- .name = "cros-ec-accel-legacy",
- .platform_data = &sensor_platforms[1],
- .pdata_size = sizeof(struct cros_ec_sensor_platform),
- }
-};
-
-static void cros_ec_accel_legacy_register(struct cros_ec_dev *ec)
-{
- struct cros_ec_device *ec_dev = ec->ec_dev;
- u8 status;
- int ret;
-
- /*
- * ECs that need legacy support are the main EC, directly connected to
- * the AP.
- */
- if (ec->cmd_offset != 0)
- return;
-
- /*
- * Check if EC supports direct memory reads and if EC has
- * accelerometers.
- */
- if (ec_dev->cmd_readmem) {
- ret = ec_dev->cmd_readmem(ec_dev, EC_MEMMAP_ACC_STATUS, 1,
- &status);
- if (ret < 0) {
- dev_warn(ec->dev, "EC direct read error.\n");
- return;
- }
-
- /* Check if EC has accelerometers. */
- if (!(status & EC_MEMMAP_ACC_STATUS_PRESENCE_BIT)) {
- dev_info(ec->dev, "EC does not have accelerometers.\n");
- return;
- }
- }
-
- /*
- * The device may still support accelerometers:
- * it would be an older ARM based device that do not suppor the
- * EC_CMD_GET_FEATURES command.
- *
- * Register 2 accelerometers, we will fail in the IIO driver if there
- * are no sensors.
- */
- ret = mfd_add_hotplug_devices(ec->dev, cros_ec_accel_legacy_cells,
- ARRAY_SIZE(cros_ec_accel_legacy_cells));
- if (ret)
- dev_err(ec_dev->dev, "failed to add EC sensors\n");
-}
-
static int ec_device_probe(struct platform_device *pdev)
{
int retval = -ENOMEM;
@@ -390,11 +176,14 @@ static int ec_device_probe(struct platform_device *pdev)
goto failed;
/* check whether this EC is a sensor hub. */
- if (cros_ec_check_features(ec, EC_FEATURE_MOTION_SENSE))
- cros_ec_sensors_register(ec);
- else
- /* Workaroud for older EC firmware */
- cros_ec_accel_legacy_register(ec);
+ if (cros_ec_get_sensor_count(ec) > 0) {
+ retval = mfd_add_hotplug_devices(ec->dev,
+ cros_ec_sensorhub_cells,
+ ARRAY_SIZE(cros_ec_sensorhub_cells));
+ if (retval)
+ dev_err(ec->dev, "failed to add %s subdevice: %d\n",
+ cros_ec_sensorhub_cells->name, retval);
+ }
/*
* The following subdevices can be detected by sending the
diff --git a/drivers/mfd/cs5535-mfd.c b/drivers/mfd/cs5535-mfd.c
index f1825c0ccbd0..d0fb2e52ee76 100644
--- a/drivers/mfd/cs5535-mfd.c
+++ b/drivers/mfd/cs5535-mfd.c
@@ -27,121 +27,106 @@ enum cs5535_mfd_bars {
NR_BARS,
};
-static int cs5535_mfd_res_enable(struct platform_device *pdev)
-{
- struct resource *res;
-
- res = platform_get_resource(pdev, IORESOURCE_IO, 0);
- if (!res) {
- dev_err(&pdev->dev, "can't fetch device resource info\n");
- return -EIO;
- }
-
- if (!request_region(res->start, resource_size(res), DRV_NAME)) {
- dev_err(&pdev->dev, "can't request region\n");
- return -EIO;
- }
-
- return 0;
-}
-
-static int cs5535_mfd_res_disable(struct platform_device *pdev)
-{
- struct resource *res;
-
- res = platform_get_resource(pdev, IORESOURCE_IO, 0);
- if (!res) {
- dev_err(&pdev->dev, "can't fetch device resource info\n");
- return -EIO;
- }
-
- release_region(res->start, resource_size(res));
- return 0;
-}
-
static struct resource cs5535_mfd_resources[NR_BARS];
static struct mfd_cell cs5535_mfd_cells[] = {
{
- .id = SMB_BAR,
.name = "cs5535-smb",
.num_resources = 1,
.resources = &cs5535_mfd_resources[SMB_BAR],
},
{
- .id = GPIO_BAR,
.name = "cs5535-gpio",
.num_resources = 1,
.resources = &cs5535_mfd_resources[GPIO_BAR],
},
{
- .id = MFGPT_BAR,
.name = "cs5535-mfgpt",
.num_resources = 1,
.resources = &cs5535_mfd_resources[MFGPT_BAR],
},
{
- .id = PMS_BAR,
.name = "cs5535-pms",
.num_resources = 1,
.resources = &cs5535_mfd_resources[PMS_BAR],
+ },
+};
- .enable = cs5535_mfd_res_enable,
- .disable = cs5535_mfd_res_disable,
+static struct mfd_cell cs5535_olpc_mfd_cells[] = {
+ {
+ .name = "olpc-xo1-pm-acpi",
+ .num_resources = 1,
+ .resources = &cs5535_mfd_resources[ACPI_BAR],
},
{
- .id = ACPI_BAR,
- .name = "cs5535-acpi",
+ .name = "olpc-xo1-sci-acpi",
.num_resources = 1,
.resources = &cs5535_mfd_resources[ACPI_BAR],
-
- .enable = cs5535_mfd_res_enable,
- .disable = cs5535_mfd_res_disable,
},
};
-static const char *olpc_acpi_clones[] = {
- "olpc-xo1-pm-acpi",
- "olpc-xo1-sci-acpi"
-};
-
static int cs5535_mfd_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
- int err, i;
+ int err, bar;
err = pci_enable_device(pdev);
if (err)
return err;
- /* fill in IO range for each cell; subdrivers handle the region */
- for (i = 0; i < ARRAY_SIZE(cs5535_mfd_cells); i++) {
- int bar = cs5535_mfd_cells[i].id;
+ for (bar = 0; bar < NR_BARS; bar++) {
struct resource *r = &cs5535_mfd_resources[bar];
r->flags = IORESOURCE_IO;
r->start = pci_resource_start(pdev, bar);
r->end = pci_resource_end(pdev, bar);
+ }
- /* id is used for temporarily storing BAR; unset it now */
- cs5535_mfd_cells[i].id = 0;
+ err = pci_request_region(pdev, PMS_BAR, DRV_NAME);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to request PMS_BAR's IO region\n");
+ goto err_disable;
}
- err = mfd_add_devices(&pdev->dev, -1, cs5535_mfd_cells,
+ err = mfd_add_devices(&pdev->dev, PLATFORM_DEVID_NONE, cs5535_mfd_cells,
ARRAY_SIZE(cs5535_mfd_cells), NULL, 0, NULL);
if (err) {
- dev_err(&pdev->dev, "MFD add devices failed: %d\n", err);
- goto err_disable;
+ dev_err(&pdev->dev,
+ "Failed to add CS5535 sub-devices: %d\n", err);
+ goto err_release_pms;
}
- if (machine_is_olpc())
- mfd_clone_cell("cs5535-acpi", olpc_acpi_clones, ARRAY_SIZE(olpc_acpi_clones));
+ if (machine_is_olpc()) {
+ err = pci_request_region(pdev, ACPI_BAR, DRV_NAME);
+ if (err) {
+ dev_err(&pdev->dev,
+ "Failed to request ACPI_BAR's IO region\n");
+ goto err_remove_devices;
+ }
+
+ err = mfd_add_devices(&pdev->dev, PLATFORM_DEVID_NONE,
+ cs5535_olpc_mfd_cells,
+ ARRAY_SIZE(cs5535_olpc_mfd_cells),
+ NULL, 0, NULL);
+ if (err) {
+ dev_err(&pdev->dev,
+ "Failed to add CS5535 OLPC sub-devices: %d\n",
+ err);
+ goto err_release_acpi;
+ }
+ }
dev_info(&pdev->dev, "%zu devices registered.\n",
ARRAY_SIZE(cs5535_mfd_cells));
return 0;
+err_release_acpi:
+ pci_release_region(pdev, ACPI_BAR);
+err_remove_devices:
+ mfd_remove_devices(&pdev->dev);
+err_release_pms:
+ pci_release_region(pdev, PMS_BAR);
err_disable:
pci_disable_device(pdev);
return err;
@@ -150,6 +135,11 @@ err_disable:
static void cs5535_mfd_remove(struct pci_dev *pdev)
{
mfd_remove_devices(&pdev->dev);
+
+ if (machine_is_olpc())
+ pci_release_region(pdev, ACPI_BAR);
+
+ pci_release_region(pdev, PMS_BAR);
pci_disable_device(pdev);
}
diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c
index dfac6afa82ca..57ac58b4b5f3 100644
--- a/drivers/mfd/db8500-prcmu.c
+++ b/drivers/mfd/db8500-prcmu.c
@@ -27,6 +27,7 @@
#include <linux/bitops.h>
#include <linux/fs.h>
#include <linux/of.h>
+#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <linux/uaccess.h>
@@ -668,6 +669,14 @@ struct prcmu_fw_version *prcmu_get_fw_version(void)
return fw_info.valid ? &fw_info.version : NULL;
}
+static bool prcmu_is_ulppll_disabled(void)
+{
+ struct prcmu_fw_version *ver;
+
+ ver = prcmu_get_fw_version();
+ return ver && ver->project == PRCMU_FW_PROJECT_U8420_SYSCLK;
+}
+
bool prcmu_has_arm_maxopp(void)
{
return (readb(tcdm_base + PRCM_AVS_VARM_MAX_OPP) &
@@ -1308,10 +1317,23 @@ static int request_sysclk(bool enable)
static int request_timclk(bool enable)
{
- u32 val = (PRCM_TCR_DOZE_MODE | PRCM_TCR_TENSEL_MASK);
+ u32 val;
+
+ /*
+ * On the U8420_CLKSEL firmware, the ULP (Ultra Low Power)
+ * PLL is disabled so we cannot use doze mode, this will
+ * stop the clock on this firmware.
+ */
+ if (prcmu_is_ulppll_disabled())
+ val = 0;
+ else
+ val = (PRCM_TCR_DOZE_MODE | PRCM_TCR_TENSEL_MASK);
if (!enable)
- val |= PRCM_TCR_STOP_TIMERS;
+ val |= PRCM_TCR_STOP_TIMERS |
+ PRCM_TCR_DOZE_MODE |
+ PRCM_TCR_TENSEL_MASK;
+
writel(val, PRCM_TCR);
return 0;
@@ -1615,7 +1637,8 @@ unsigned long prcmu_clock_rate(u8 clock)
if (clock < PRCMU_NUM_REG_CLOCKS)
return clock_rate(clock);
else if (clock == PRCMU_TIMCLK)
- return ROOT_CLOCK_RATE / 16;
+ return prcmu_is_ulppll_disabled() ?
+ 32768 : ROOT_CLOCK_RATE / 16;
else if (clock == PRCMU_SYSCLK)
return ROOT_CLOCK_RATE;
else if (clock == PRCMU_PLLSOC0)
@@ -2646,6 +2669,8 @@ static char *fw_project_name(u32 project)
return "U8520 MBL";
case PRCMU_FW_PROJECT_U8420:
return "U8420";
+ case PRCMU_FW_PROJECT_U8420_SYSCLK:
+ return "U8420-sysclk";
case PRCMU_FW_PROJECT_U9540:
return "U9540";
case PRCMU_FW_PROJECT_A9420:
@@ -2693,27 +2718,18 @@ static int db8500_irq_init(struct device_node *np)
return 0;
}
-static void dbx500_fw_version_init(struct platform_device *pdev,
- u32 version_offset)
+static void dbx500_fw_version_init(struct device_node *np)
{
- struct resource *res;
void __iomem *tcpm_base;
u32 version;
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- "prcmu-tcpm");
- if (!res) {
- dev_err(&pdev->dev,
- "Error: no prcmu tcpm memory region provided\n");
- return;
- }
- tcpm_base = ioremap(res->start, resource_size(res));
+ tcpm_base = of_iomap(np, 1);
if (!tcpm_base) {
- dev_err(&pdev->dev, "no prcmu tcpm mem region provided\n");
+ pr_err("no prcmu tcpm mem region provided\n");
return;
}
- version = readl(tcpm_base + version_offset);
+ version = readl(tcpm_base + DB8500_PRCMU_FW_VERSION_OFFSET);
fw_info.version.project = (version & 0xFF);
fw_info.version.api_version = (version >> 8) & 0xFF;
fw_info.version.func_version = (version >> 16) & 0xFF;
@@ -2731,7 +2747,7 @@ static void dbx500_fw_version_init(struct platform_device *pdev,
iounmap(tcpm_base);
}
-void __init db8500_prcmu_early_init(u32 phy_base, u32 size)
+void __init db8500_prcmu_early_init(void)
{
/*
* This is a temporary remap to bring up the clocks. It is
@@ -2740,9 +2756,17 @@ void __init db8500_prcmu_early_init(u32 phy_base, u32 size)
* clock driver can probe independently. An early initcall will
* still be needed, but it can be diverted into drivers/clk/ux500.
*/
- prcmu_base = ioremap(phy_base, size);
- if (!prcmu_base)
+ struct device_node *np;
+
+ np = of_find_compatible_node(NULL, NULL, "stericsson,db8500-prcmu");
+ prcmu_base = of_iomap(np, 0);
+ if (!prcmu_base) {
+ of_node_put(np);
pr_err("%s: ioremap() of prcmu registers failed!\n", __func__);
+ return;
+ }
+ dbx500_fw_version_init(np);
+ of_node_put(np);
spin_lock_init(&mb0_transfer.lock);
spin_lock_init(&mb0_transfer.dbb_irqs_lock);
@@ -3024,20 +3048,13 @@ static const struct mfd_cell common_prcmu_devs[] = {
};
static const struct mfd_cell db8500_prcmu_devs[] = {
- {
- .name = "db8500-prcmu-regulators",
- .of_compatible = "stericsson,db8500-prcmu-regulator",
- .platform_data = &db8500_regulators,
- .pdata_size = sizeof(db8500_regulators),
- },
- {
- .name = "cpuidle-dbx500",
- .of_compatible = "stericsson,cpuidle-dbx500",
- },
- {
- .name = "db8500-thermal",
- .of_compatible = "stericsson,db8500-thermal",
- },
+ OF_MFD_CELL("db8500-prcmu-regulators", NULL,
+ &db8500_regulators, sizeof(db8500_regulators), 0,
+ "stericsson,db8500-prcmu-regulator"),
+ OF_MFD_CELL("cpuidle-dbx500",
+ NULL, NULL, 0, 0, "stericsson,cpuidle-dbx500"),
+ OF_MFD_CELL("db8500-thermal",
+ NULL, NULL, 0, 0, "stericsson,db8500-thermal"),
};
static int db8500_prcmu_register_ab8500(struct device *parent)
@@ -3091,7 +3108,6 @@ static int db8500_prcmu_probe(struct platform_device *pdev)
return -ENOMEM;
}
init_prcm_registers();
- dbx500_fw_version_init(pdev, DB8500_PRCMU_FW_VERSION_OFFSET);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "prcmu-tcdm");
if (!res) {
dev_err(&pdev->dev, "no prcmu tcdm region provided\n");
diff --git a/drivers/mfd/intel-lpss-pci.c b/drivers/mfd/intel-lpss-pci.c
index 9355db29d2f9..b33030e3385c 100644
--- a/drivers/mfd/intel-lpss-pci.c
+++ b/drivers/mfd/intel-lpss-pci.c
@@ -122,13 +122,25 @@ static const struct intel_lpss_platform_info apl_i2c_info = {
.properties = apl_i2c_properties,
};
+static struct property_entry glk_i2c_properties[] = {
+ PROPERTY_ENTRY_U32("i2c-sda-hold-time-ns", 313),
+ PROPERTY_ENTRY_U32("i2c-sda-falling-time-ns", 171),
+ PROPERTY_ENTRY_U32("i2c-scl-falling-time-ns", 290),
+ { },
+};
+
+static const struct intel_lpss_platform_info glk_i2c_info = {
+ .clk_rate = 133000000,
+ .properties = glk_i2c_properties,
+};
+
static const struct intel_lpss_platform_info cnl_i2c_info = {
.clk_rate = 216000000,
.properties = spt_i2c_properties,
};
static const struct pci_device_id intel_lpss_pci_ids[] = {
- /* CML */
+ /* CML-LP */
{ PCI_VDEVICE(INTEL, 0x02a8), (kernel_ulong_t)&spt_uart_info },
{ PCI_VDEVICE(INTEL, 0x02a9), (kernel_ulong_t)&spt_uart_info },
{ PCI_VDEVICE(INTEL, 0x02aa), (kernel_ulong_t)&spt_info },
@@ -141,6 +153,17 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0x02ea), (kernel_ulong_t)&cnl_i2c_info },
{ PCI_VDEVICE(INTEL, 0x02eb), (kernel_ulong_t)&cnl_i2c_info },
{ PCI_VDEVICE(INTEL, 0x02fb), (kernel_ulong_t)&spt_info },
+ /* CML-H */
+ { PCI_VDEVICE(INTEL, 0x06a8), (kernel_ulong_t)&spt_uart_info },
+ { PCI_VDEVICE(INTEL, 0x06a9), (kernel_ulong_t)&spt_uart_info },
+ { PCI_VDEVICE(INTEL, 0x06aa), (kernel_ulong_t)&spt_info },
+ { PCI_VDEVICE(INTEL, 0x06ab), (kernel_ulong_t)&spt_info },
+ { PCI_VDEVICE(INTEL, 0x06c7), (kernel_ulong_t)&spt_uart_info },
+ { PCI_VDEVICE(INTEL, 0x06e8), (kernel_ulong_t)&cnl_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x06e9), (kernel_ulong_t)&cnl_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x06ea), (kernel_ulong_t)&cnl_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x06eb), (kernel_ulong_t)&cnl_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x06fb), (kernel_ulong_t)&spt_info },
/* BXT A-Step */
{ PCI_VDEVICE(INTEL, 0x0aac), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x0aae), (kernel_ulong_t)&bxt_i2c_info },
@@ -174,14 +197,14 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0x1ac6), (kernel_ulong_t)&bxt_info },
{ PCI_VDEVICE(INTEL, 0x1aee), (kernel_ulong_t)&bxt_uart_info },
/* GLK */
- { PCI_VDEVICE(INTEL, 0x31ac), (kernel_ulong_t)&bxt_i2c_info },
- { PCI_VDEVICE(INTEL, 0x31ae), (kernel_ulong_t)&bxt_i2c_info },
- { PCI_VDEVICE(INTEL, 0x31b0), (kernel_ulong_t)&bxt_i2c_info },
- { PCI_VDEVICE(INTEL, 0x31b2), (kernel_ulong_t)&bxt_i2c_info },
- { PCI_VDEVICE(INTEL, 0x31b4), (kernel_ulong_t)&bxt_i2c_info },
- { PCI_VDEVICE(INTEL, 0x31b6), (kernel_ulong_t)&bxt_i2c_info },
- { PCI_VDEVICE(INTEL, 0x31b8), (kernel_ulong_t)&bxt_i2c_info },
- { PCI_VDEVICE(INTEL, 0x31ba), (kernel_ulong_t)&bxt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x31ac), (kernel_ulong_t)&glk_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x31ae), (kernel_ulong_t)&glk_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x31b0), (kernel_ulong_t)&glk_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x31b2), (kernel_ulong_t)&glk_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x31b4), (kernel_ulong_t)&glk_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x31b6), (kernel_ulong_t)&glk_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x31b8), (kernel_ulong_t)&glk_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x31ba), (kernel_ulong_t)&glk_i2c_info },
{ PCI_VDEVICE(INTEL, 0x31bc), (kernel_ulong_t)&bxt_uart_info },
{ PCI_VDEVICE(INTEL, 0x31be), (kernel_ulong_t)&bxt_uart_info },
{ PCI_VDEVICE(INTEL, 0x31c0), (kernel_ulong_t)&bxt_uart_info },
diff --git a/drivers/mfd/intel-lpss.c b/drivers/mfd/intel-lpss.c
index bfe4ff337581..b0f0781a6b9c 100644
--- a/drivers/mfd/intel-lpss.c
+++ b/drivers/mfd/intel-lpss.c
@@ -384,7 +384,7 @@ int intel_lpss_probe(struct device *dev,
if (!lpss)
return -ENOMEM;
- lpss->priv = devm_ioremap(dev, info->mem->start + LPSS_PRIV_OFFSET,
+ lpss->priv = devm_ioremap_uc(dev, info->mem->start + LPSS_PRIV_OFFSET,
LPSS_PRIV_SIZE);
if (!lpss->priv)
return -ENOMEM;
diff --git a/drivers/mfd/intel_soc_pmic_crc.c b/drivers/mfd/intel_soc_pmic_crc.c
index b6ab72fa0569..429efa1f8e55 100644
--- a/drivers/mfd/intel_soc_pmic_crc.c
+++ b/drivers/mfd/intel_soc_pmic_crc.c
@@ -75,7 +75,7 @@ static struct mfd_cell crystal_cove_byt_dev[] = {
.resources = gpio_resources,
},
{
- .name = "crystal_cove_pmic",
+ .name = "byt_crystal_cove_pmic",
},
{
.name = "crystal_cove_pwm",
@@ -89,6 +89,9 @@ static struct mfd_cell crystal_cove_cht_dev[] = {
.resources = gpio_resources,
},
{
+ .name = "cht_crystal_cove_pmic",
+ },
+ {
.name = "crystal_cove_pwm",
},
};
diff --git a/drivers/mfd/ipaq-micro.c b/drivers/mfd/ipaq-micro.c
index a1d9be82734d..e92eeeb67a98 100644
--- a/drivers/mfd/ipaq-micro.c
+++ b/drivers/mfd/ipaq-micro.c
@@ -396,11 +396,7 @@ static int __init micro_probe(struct platform_device *pdev)
if (IS_ERR(micro->base))
return PTR_ERR(micro->base);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (!res)
- return -EINVAL;
-
- micro->sdlc = devm_ioremap_resource(&pdev->dev, res);
+ micro->sdlc = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(micro->sdlc))
return PTR_ERR(micro->sdlc);
diff --git a/drivers/mfd/madera-core.c b/drivers/mfd/madera-core.c
index 29540cbf7593..a8cfadc1fc01 100644
--- a/drivers/mfd/madera-core.c
+++ b/drivers/mfd/madera-core.c
@@ -450,6 +450,21 @@ int madera_dev_init(struct madera *madera)
sizeof(madera->pdata));
}
+ madera->mclk[MADERA_MCLK1].id = "mclk1";
+ madera->mclk[MADERA_MCLK2].id = "mclk2";
+ madera->mclk[MADERA_MCLK3].id = "mclk3";
+
+ ret = devm_clk_bulk_get_optional(madera->dev, ARRAY_SIZE(madera->mclk),
+ madera->mclk);
+ if (ret) {
+ dev_err(madera->dev, "Failed to get clocks: %d\n", ret);
+ return ret;
+ }
+
+ /* Not using devm_clk_get to prevent breakage of existing DTs */
+ if (!madera->mclk[MADERA_MCLK2].clk)
+ dev_warn(madera->dev, "Missing MCLK2, requires 32kHz clock\n");
+
ret = madera_get_reset_gpio(madera);
if (ret)
return ret;
@@ -660,13 +675,19 @@ int madera_dev_init(struct madera *madera)
}
/* Init 32k clock sourced from MCLK2 */
+ ret = clk_prepare_enable(madera->mclk[MADERA_MCLK2].clk);
+ if (ret) {
+ dev_err(madera->dev, "Failed to enable 32k clock: %d\n", ret);
+ goto err_reset;
+ }
+
ret = regmap_update_bits(madera->regmap,
MADERA_CLOCK_32K_1,
MADERA_CLK_32K_ENA_MASK | MADERA_CLK_32K_SRC_MASK,
MADERA_CLK_32K_ENA | MADERA_32KZ_MCLK2);
if (ret) {
dev_err(madera->dev, "Failed to init 32k clock: %d\n", ret);
- goto err_reset;
+ goto err_clock;
}
pm_runtime_set_active(madera->dev);
@@ -687,6 +708,8 @@ int madera_dev_init(struct madera *madera)
err_pm_runtime:
pm_runtime_disable(madera->dev);
+err_clock:
+ clk_disable_unprepare(madera->mclk[MADERA_MCLK2].clk);
err_reset:
madera_enable_hard_reset(madera);
regulator_disable(madera->dcvdd);
@@ -713,6 +736,8 @@ int madera_dev_exit(struct madera *madera)
*/
pm_runtime_disable(madera->dev);
+ clk_disable_unprepare(madera->mclk[MADERA_MCLK2].clk);
+
regulator_disable(madera->dcvdd);
regulator_put(madera->dcvdd);
diff --git a/drivers/mfd/max77620.c b/drivers/mfd/max77620.c
index a851ff473a44..c7ed5c353553 100644
--- a/drivers/mfd/max77620.c
+++ b/drivers/mfd/max77620.c
@@ -507,7 +507,6 @@ static int max77620_probe(struct i2c_client *client,
i2c_set_clientdata(client, chip);
chip->dev = &client->dev;
- chip->irq_base = -1;
chip->chip_irq = client->irq;
chip->chip_id = (enum max77620_chip_id)id->driver_data;
@@ -545,8 +544,8 @@ static int max77620_probe(struct i2c_client *client,
max77620_top_irq_chip.irq_drv_data = chip;
ret = devm_regmap_add_irq_chip(chip->dev, chip->rmap, client->irq,
- IRQF_ONESHOT | IRQF_SHARED,
- chip->irq_base, &max77620_top_irq_chip,
+ IRQF_ONESHOT | IRQF_SHARED, 0,
+ &max77620_top_irq_chip,
&chip->top_irq_data);
if (ret < 0) {
dev_err(chip->dev, "Failed to add regmap irq: %d\n", ret);
diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c
index 23276a80e3b4..f5a73af60dd4 100644
--- a/drivers/mfd/mfd-core.c
+++ b/drivers/mfd/mfd-core.c
@@ -26,54 +26,28 @@ static struct device_type mfd_dev_type = {
int mfd_cell_enable(struct platform_device *pdev)
{
const struct mfd_cell *cell = mfd_get_cell(pdev);
- int err = 0;
- /* only call enable hook if the cell wasn't previously enabled */
- if (atomic_inc_return(cell->usage_count) == 1)
- err = cell->enable(pdev);
-
- /* if the enable hook failed, decrement counter to allow retries */
- if (err)
- atomic_dec(cell->usage_count);
+ if (!cell->enable) {
+ dev_dbg(&pdev->dev, "No .enable() call-back registered\n");
+ return 0;
+ }
- return err;
+ return cell->enable(pdev);
}
EXPORT_SYMBOL(mfd_cell_enable);
int mfd_cell_disable(struct platform_device *pdev)
{
const struct mfd_cell *cell = mfd_get_cell(pdev);
- int err = 0;
-
- /* only disable if no other clients are using it */
- if (atomic_dec_return(cell->usage_count) == 0)
- err = cell->disable(pdev);
-
- /* if the disable hook failed, increment to allow retries */
- if (err)
- atomic_inc(cell->usage_count);
-
- /* sanity check; did someone call disable too many times? */
- WARN_ON(atomic_read(cell->usage_count) < 0);
- return err;
-}
-EXPORT_SYMBOL(mfd_cell_disable);
-
-static int mfd_platform_add_cell(struct platform_device *pdev,
- const struct mfd_cell *cell,
- atomic_t *usage_count)
-{
- if (!cell)
+ if (!cell->disable) {
+ dev_dbg(&pdev->dev, "No .disable() call-back registered\n");
return 0;
+ }
- pdev->mfd_cell = kmemdup(cell, sizeof(*cell), GFP_KERNEL);
- if (!pdev->mfd_cell)
- return -ENOMEM;
-
- pdev->mfd_cell->usage_count = usage_count;
- return 0;
+ return cell->disable(pdev);
}
+EXPORT_SYMBOL(mfd_cell_disable);
#if IS_ENABLED(CONFIG_ACPI)
static void mfd_acpi_add_device(const struct mfd_cell *cell,
@@ -134,7 +108,7 @@ static inline void mfd_acpi_add_device(const struct mfd_cell *cell,
#endif
static int mfd_add_device(struct device *parent, int id,
- const struct mfd_cell *cell, atomic_t *usage_count,
+ const struct mfd_cell *cell,
struct resource *mem_base,
int irq_base, struct irq_domain *domain)
{
@@ -154,6 +128,10 @@ static int mfd_add_device(struct device *parent, int id,
if (!pdev)
goto fail_alloc;
+ pdev->mfd_cell = kmemdup(cell, sizeof(*cell), GFP_KERNEL);
+ if (!pdev->mfd_cell)
+ goto fail_device;
+
res = kcalloc(cell->num_resources, sizeof(*res), GFP_KERNEL);
if (!res)
goto fail_device;
@@ -174,6 +152,11 @@ static int mfd_add_device(struct device *parent, int id,
if (parent->of_node && cell->of_compatible) {
for_each_child_of_node(parent->of_node, np) {
if (of_device_is_compatible(np, cell->of_compatible)) {
+ if (!of_device_is_available(np)) {
+ /* Ignore disabled devices error free */
+ ret = 0;
+ goto fail_alias;
+ }
pdev->dev.of_node = np;
pdev->dev.fwnode = &np->fwnode;
break;
@@ -196,10 +179,6 @@ static int mfd_add_device(struct device *parent, int id,
goto fail_alias;
}
- ret = mfd_platform_add_cell(pdev, cell, usage_count);
- if (ret)
- goto fail_alias;
-
for (r = 0; r < cell->num_resources; r++) {
res[r].name = cell->resources[r].name;
res[r].flags = cell->resources[r].flags;
@@ -286,16 +265,9 @@ int mfd_add_devices(struct device *parent, int id,
{
int i;
int ret;
- atomic_t *cnts;
-
- /* initialize reference counting for all cells */
- cnts = kcalloc(n_devs, sizeof(*cnts), GFP_KERNEL);
- if (!cnts)
- return -ENOMEM;
for (i = 0; i < n_devs; i++) {
- atomic_set(&cnts[i], 0);
- ret = mfd_add_device(parent, id, cells + i, cnts + i, mem_base,
+ ret = mfd_add_device(parent, id, cells + i, mem_base,
irq_base, domain);
if (ret)
goto fail;
@@ -306,17 +278,15 @@ int mfd_add_devices(struct device *parent, int id,
fail:
if (i)
mfd_remove_devices(parent);
- else
- kfree(cnts);
+
return ret;
}
EXPORT_SYMBOL(mfd_add_devices);
-static int mfd_remove_devices_fn(struct device *dev, void *c)
+static int mfd_remove_devices_fn(struct device *dev, void *data)
{
struct platform_device *pdev;
const struct mfd_cell *cell;
- atomic_t **usage_count = c;
if (dev->type != &mfd_dev_type)
return 0;
@@ -327,20 +297,13 @@ static int mfd_remove_devices_fn(struct device *dev, void *c)
regulator_bulk_unregister_supply_alias(dev, cell->parent_supplies,
cell->num_parent_supplies);
- /* find the base address of usage_count pointers (for freeing) */
- if (!*usage_count || (cell->usage_count < *usage_count))
- *usage_count = cell->usage_count;
-
platform_device_unregister(pdev);
return 0;
}
void mfd_remove_devices(struct device *parent)
{
- atomic_t *cnts = NULL;
-
- device_for_each_child_reverse(parent, &cnts, mfd_remove_devices_fn);
- kfree(cnts);
+ device_for_each_child_reverse(parent, NULL, mfd_remove_devices_fn);
}
EXPORT_SYMBOL(mfd_remove_devices);
@@ -382,38 +345,5 @@ int devm_mfd_add_devices(struct device *dev, int id,
}
EXPORT_SYMBOL(devm_mfd_add_devices);
-int mfd_clone_cell(const char *cell, const char **clones, size_t n_clones)
-{
- struct mfd_cell cell_entry;
- struct device *dev;
- struct platform_device *pdev;
- int i;
-
- /* fetch the parent cell's device (should already be registered!) */
- dev = bus_find_device_by_name(&platform_bus_type, NULL, cell);
- if (!dev) {
- printk(KERN_ERR "failed to find device for cell %s\n", cell);
- return -ENODEV;
- }
- pdev = to_platform_device(dev);
- memcpy(&cell_entry, mfd_get_cell(pdev), sizeof(cell_entry));
-
- WARN_ON(!cell_entry.enable);
-
- for (i = 0; i < n_clones; i++) {
- cell_entry.name = clones[i];
- /* don't give up if a single call fails; just report error */
- if (mfd_add_device(pdev->dev.parent, -1, &cell_entry,
- cell_entry.usage_count, NULL, 0, NULL))
- dev_err(dev, "failed to create platform device '%s'\n",
- clones[i]);
- }
-
- put_device(dev);
-
- return 0;
-}
-EXPORT_SYMBOL(mfd_clone_cell);
-
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Ian Molton, Dmitry Baryshkov");
diff --git a/drivers/mfd/mt6397-core.c b/drivers/mfd/mt6397-core.c
index b2c325ead1c8..0437c858d115 100644
--- a/drivers/mfd/mt6397-core.c
+++ b/drivers/mfd/mt6397-core.c
@@ -189,16 +189,16 @@ static int mt6397_probe(struct platform_device *pdev)
switch (pmic->chip_id) {
case MT6323_CHIP_ID:
- ret = devm_mfd_add_devices(&pdev->dev, -1, mt6323_devs,
- ARRAY_SIZE(mt6323_devs), NULL,
- 0, pmic->irq_domain);
+ ret = devm_mfd_add_devices(&pdev->dev, PLATFORM_DEVID_NONE,
+ mt6323_devs, ARRAY_SIZE(mt6323_devs),
+ NULL, 0, pmic->irq_domain);
break;
case MT6391_CHIP_ID:
case MT6397_CHIP_ID:
- ret = devm_mfd_add_devices(&pdev->dev, -1, mt6397_devs,
- ARRAY_SIZE(mt6397_devs), NULL,
- 0, pmic->irq_domain);
+ ret = devm_mfd_add_devices(&pdev->dev, PLATFORM_DEVID_NONE,
+ mt6397_devs, ARRAY_SIZE(mt6397_devs),
+ NULL, 0, pmic->irq_domain);
break;
default:
diff --git a/drivers/mfd/qcom-spmi-pmic.c b/drivers/mfd/qcom-spmi-pmic.c
index e8fe705073fa..1df1a2711328 100644
--- a/drivers/mfd/qcom-spmi-pmic.c
+++ b/drivers/mfd/qcom-spmi-pmic.c
@@ -31,6 +31,8 @@
#define PM8916_SUBTYPE 0x0b
#define PM8004_SUBTYPE 0x0c
#define PM8909_SUBTYPE 0x0d
+#define PM8950_SUBTYPE 0x10
+#define PMI8950_SUBTYPE 0x11
#define PM8998_SUBTYPE 0x14
#define PMI8998_SUBTYPE 0x15
#define PM8005_SUBTYPE 0x18
@@ -50,6 +52,8 @@ static const struct of_device_id pmic_spmi_id_table[] = {
{ .compatible = "qcom,pm8916", .data = (void *)PM8916_SUBTYPE },
{ .compatible = "qcom,pm8004", .data = (void *)PM8004_SUBTYPE },
{ .compatible = "qcom,pm8909", .data = (void *)PM8909_SUBTYPE },
+ { .compatible = "qcom,pm8950", .data = (void *)PM8950_SUBTYPE },
+ { .compatible = "qcom,pmi8950", .data = (void *)PMI8950_SUBTYPE },
{ .compatible = "qcom,pm8998", .data = (void *)PM8998_SUBTYPE },
{ .compatible = "qcom,pmi8998", .data = (void *)PMI8998_SUBTYPE },
{ .compatible = "qcom,pm8005", .data = (void *)PM8005_SUBTYPE },
diff --git a/drivers/mfd/rk808.c b/drivers/mfd/rk808.c
index 050478cabc95..a69a6742ecdc 100644
--- a/drivers/mfd/rk808.c
+++ b/drivers/mfd/rk808.c
@@ -109,11 +109,7 @@ static const struct regmap_config rk817_regmap_config = {
};
static struct resource rtc_resources[] = {
- {
- .start = RK808_IRQ_RTC_ALARM,
- .end = RK808_IRQ_RTC_ALARM,
- .flags = IORESOURCE_IRQ,
- }
+ DEFINE_RES_IRQ(RK808_IRQ_RTC_ALARM),
};
static struct resource rk817_rtc_resources[] = {
@@ -121,16 +117,8 @@ static struct resource rk817_rtc_resources[] = {
};
static struct resource rk805_key_resources[] = {
- {
- .start = RK805_IRQ_PWRON_FALL,
- .end = RK805_IRQ_PWRON_FALL,
- .flags = IORESOURCE_IRQ,
- },
- {
- .start = RK805_IRQ_PWRON_RISE,
- .end = RK805_IRQ_PWRON_RISE,
- .flags = IORESOURCE_IRQ,
- }
+ DEFINE_RES_IRQ(RK805_IRQ_PWRON_RISE),
+ DEFINE_RES_IRQ(RK805_IRQ_PWRON_FALL),
};
static struct resource rk817_pwrkey_resources[] = {
@@ -167,7 +155,7 @@ static const struct mfd_cell rk817s[] = {
{ .name = "rk808-clkout",},
{ .name = "rk808-regulator",},
{
- .name = "rk8xx-pwrkey",
+ .name = "rk805-pwrkey",
.num_resources = ARRAY_SIZE(rk817_pwrkey_resources),
.resources = &rk817_pwrkey_resources[0],
},
@@ -215,7 +203,7 @@ static const struct rk808_reg_data rk808_pre_init_reg[] = {
static const struct rk808_reg_data rk817_pre_init_reg[] = {
{RK817_RTC_CTRL_REG, RTC_STOP, RTC_STOP},
- {RK817_GPIO_INT_CFG, RK817_INT_POL_MSK, RK817_INT_POL_H},
+ {RK817_GPIO_INT_CFG, RK817_INT_POL_MSK, RK817_INT_POL_L},
{RK817_SYS_CFG(1), RK817_HOTDIE_TEMP_MSK | RK817_TSD_TEMP_MSK,
RK817_HOTDIE_105 | RK817_TSD_140},
};
diff --git a/drivers/mfd/rohm-bd70528.c b/drivers/mfd/rohm-bd70528.c
index 55599d5c5c86..ef6786fd3b00 100644
--- a/drivers/mfd/rohm-bd70528.c
+++ b/drivers/mfd/rohm-bd70528.c
@@ -105,15 +105,14 @@ static struct regmap_config bd70528_regmap = {
* register.
*/
-/* bit [0] - Shutdown register */
-unsigned int bit0_offsets[] = {0}; /* Shutdown register */
-unsigned int bit1_offsets[] = {1}; /* Power failure register */
-unsigned int bit2_offsets[] = {2}; /* VR FAULT register */
-unsigned int bit3_offsets[] = {3}; /* PMU register interrupts */
-unsigned int bit4_offsets[] = {4, 5}; /* Charger 1 and Charger 2 registers */
-unsigned int bit5_offsets[] = {6}; /* RTC register */
-unsigned int bit6_offsets[] = {7}; /* GPIO register */
-unsigned int bit7_offsets[] = {8}; /* Invalid operation register */
+static unsigned int bit0_offsets[] = {0}; /* Shutdown */
+static unsigned int bit1_offsets[] = {1}; /* Power failure */
+static unsigned int bit2_offsets[] = {2}; /* VR FAULT */
+static unsigned int bit3_offsets[] = {3}; /* PMU interrupts */
+static unsigned int bit4_offsets[] = {4, 5}; /* Charger 1 and Charger 2 */
+static unsigned int bit5_offsets[] = {6}; /* RTC */
+static unsigned int bit6_offsets[] = {7}; /* GPIO */
+static unsigned int bit7_offsets[] = {8}; /* Invalid operation */
static struct regmap_irq_sub_irq_map bd70528_sub_irq_offsets[] = {
REGMAP_IRQ_MAIN_REG_OFFSET(bit0_offsets),
diff --git a/drivers/mfd/syscon.c b/drivers/mfd/syscon.c
index 660723276481..e22197c832e8 100644
--- a/drivers/mfd/syscon.c
+++ b/drivers/mfd/syscon.c
@@ -105,7 +105,6 @@ static struct syscon *of_syscon_register(struct device_node *np, bool check_clk)
syscon_config.reg_stride = reg_io_width;
syscon_config.val_bits = reg_io_width * 8;
syscon_config.max_register = resource_size(&res) - reg_io_width;
- syscon_config.name = of_node_full_name(np);
regmap = regmap_init_mmio(NULL, base, &syscon_config);
if (IS_ERR(regmap)) {
diff --git a/drivers/mfd/ti_am335x_tscadc.c b/drivers/mfd/ti_am335x_tscadc.c
index fd111296b959..926c289cb040 100644
--- a/drivers/mfd/ti_am335x_tscadc.c
+++ b/drivers/mfd/ti_am335x_tscadc.c
@@ -182,11 +182,11 @@ static int ti_tscadc_probe(struct platform_device *pdev)
tscadc->irq = err;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- tscadc->tscadc_phys_base = res->start;
tscadc->tscadc_base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(tscadc->tscadc_base))
return PTR_ERR(tscadc->tscadc_base);
+ tscadc->tscadc_phys_base = res->start;
tscadc->regmap = devm_regmap_init_mmio(&pdev->dev,
tscadc->tscadc_base, &tscadc_regmap_config);
if (IS_ERR(tscadc->regmap)) {
diff --git a/drivers/mfd/wm8998-tables.c b/drivers/mfd/wm8998-tables.c
index ebf0eadd2075..9b34a6d76094 100644
--- a/drivers/mfd/wm8998-tables.c
+++ b/drivers/mfd/wm8998-tables.c
@@ -806,12 +806,6 @@ static const struct reg_default wm8998_reg_default[] = {
{ 0x00000EF3, 0x0000 }, /* R3827 - ISRC 2 CTRL 1 */
{ 0x00000EF4, 0x0001 }, /* R3828 - ISRC 2 CTRL 2 */
{ 0x00000EF5, 0x0000 }, /* R3829 - ISRC 2 CTRL 3 */
- { 0x00001700, 0x0000 }, /* R5888 - FRF_COEFF_1 */
- { 0x00001701, 0x0000 }, /* R5889 - FRF_COEFF_2 */
- { 0x00001702, 0x0000 }, /* R5890 - FRF_COEFF_3 */
- { 0x00001703, 0x0000 }, /* R5891 - FRF_COEFF_4 */
- { 0x00001704, 0x0000 }, /* R5892 - DAC_COMP_1 */
- { 0x00001705, 0x0000 }, /* R5893 - DAC_COMP_2 */
};
static bool wm8998_readable_register(struct device *dev, unsigned int reg)
@@ -1492,12 +1486,6 @@ static bool wm8998_readable_register(struct device *dev, unsigned int reg)
case ARIZONA_ISRC_2_CTRL_1:
case ARIZONA_ISRC_2_CTRL_2:
case ARIZONA_ISRC_2_CTRL_3:
- case ARIZONA_FRF_COEFF_1:
- case ARIZONA_FRF_COEFF_2:
- case ARIZONA_FRF_COEFF_3:
- case ARIZONA_FRF_COEFF_4:
- case ARIZONA_V2_DAC_COMP_1:
- case ARIZONA_V2_DAC_COMP_2:
return true;
default:
return false;
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index c55b63750757..7f0d48f406e3 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -8,7 +8,6 @@ menu "Misc devices"
config SENSORS_LIS3LV02D
tristate
depends on INPUT
- select INPUT_POLLDEV
config AD525X_DPOT
tristate "Analog Devices Digital Potentiometers"
@@ -326,14 +325,14 @@ config SENSORS_TSL2550
will be called tsl2550.
config SENSORS_BH1770
- tristate "BH1770GLC / SFH7770 combined ALS - Proximity sensor"
- depends on I2C
- ---help---
- Say Y here if you want to build a driver for BH1770GLC (ROHM) or
+ tristate "BH1770GLC / SFH7770 combined ALS - Proximity sensor"
+ depends on I2C
+ ---help---
+ Say Y here if you want to build a driver for BH1770GLC (ROHM) or
SFH7770 (Osram) combined ambient light and proximity sensor chip.
- To compile this driver as a module, choose M here: the
- module will be called bh1770glc. If unsure, say N here.
+ To compile this driver as a module, choose M here: the
+ module will be called bh1770glc. If unsure, say N here.
config SENSORS_APDS990X
tristate "APDS990X combined als and proximity sensors"
@@ -438,8 +437,8 @@ config PCI_ENDPOINT_TEST
select CRC32
tristate "PCI Endpoint Test driver"
---help---
- Enable this configuration option to enable the host side test driver
- for PCI Endpoint.
+ Enable this configuration option to enable the host side test driver
+ for PCI Endpoint.
config XILINX_SDFEC
tristate "Xilinx SDFEC 16"
diff --git a/drivers/misc/atmel_tclib.c b/drivers/misc/atmel_tclib.c
index 08b5b639d77f..7de7840f613c 100644
--- a/drivers/misc/atmel_tclib.c
+++ b/drivers/misc/atmel_tclib.c
@@ -109,7 +109,6 @@ static int __init tc_probe(struct platform_device *pdev)
struct atmel_tc *tc;
struct clk *clk;
int irq;
- struct resource *r;
unsigned int i;
if (of_get_child_count(pdev->dev.of_node))
@@ -133,8 +132,7 @@ static int __init tc_probe(struct platform_device *pdev)
if (IS_ERR(tc->slow_clk))
return PTR_ERR(tc->slow_clk);
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- tc->regs = devm_ioremap_resource(&pdev->dev, r);
+ tc->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(tc->regs))
return PTR_ERR(tc->regs);
diff --git a/drivers/misc/cardreader/Makefile b/drivers/misc/cardreader/Makefile
index d9bff5a2217e..1f56267ed2f4 100644
--- a/drivers/misc/cardreader/Makefile
+++ b/drivers/misc/cardreader/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_MISC_ALCOR_PCI) += alcor_pci.o
obj-$(CONFIG_MISC_RTSX_PCI) += rtsx_pci.o
-rtsx_pci-objs := rtsx_pcr.o rts5209.o rts5229.o rtl8411.o rts5227.o rts5249.o rts5260.o
+rtsx_pci-objs := rtsx_pcr.o rts5209.o rts5229.o rtl8411.o rts5227.o rts5249.o rts5260.o rts5261.o
obj-$(CONFIG_MISC_RTSX_USB) += rtsx_usb.o
diff --git a/drivers/misc/cardreader/rts5260.c b/drivers/misc/cardreader/rts5260.c
index 40a6d199f2ea..4214f02a17fd 100644
--- a/drivers/misc/cardreader/rts5260.c
+++ b/drivers/misc/cardreader/rts5260.c
@@ -191,7 +191,6 @@ static int sd_set_sample_push_timing_sd30(struct rtsx_pcr *pcr)
static int rts5260_card_power_on(struct rtsx_pcr *pcr, int card)
{
- int err = 0;
struct rtsx_cr_option *option = &pcr->option;
if (option->ocp_en)
@@ -231,7 +230,7 @@ static int rts5260_card_power_on(struct rtsx_pcr *pcr, int card)
rtsx_pci_write_register(pcr, REG_PRE_RW_MODE, EN_INFINITE_MODE, 0);
- return err;
+ return 0;
}
static int rts5260_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
diff --git a/drivers/misc/cardreader/rts5261.c b/drivers/misc/cardreader/rts5261.c
new file mode 100644
index 000000000000..32dcec2e9dfd
--- /dev/null
+++ b/drivers/misc/cardreader/rts5261.c
@@ -0,0 +1,792 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Driver for Realtek PCI-Express card reader
+ *
+ * Copyright(c) 2018-2019 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * Author:
+ * Rui FENG <rui_feng@realsil.com.cn>
+ * Wei WANG <wei_wang@realsil.com.cn>
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/rtsx_pci.h>
+
+#include "rts5261.h"
+#include "rtsx_pcr.h"
+
+static u8 rts5261_get_ic_version(struct rtsx_pcr *pcr)
+{
+ u8 val;
+
+ rtsx_pci_read_register(pcr, DUMMY_REG_RESET_0, &val);
+ return val & IC_VERSION_MASK;
+}
+
+static void rts5261_fill_driving(struct rtsx_pcr *pcr, u8 voltage)
+{
+ u8 driving_3v3[4][3] = {
+ {0x13, 0x13, 0x13},
+ {0x96, 0x96, 0x96},
+ {0x7F, 0x7F, 0x7F},
+ {0x96, 0x96, 0x96},
+ };
+ u8 driving_1v8[4][3] = {
+ {0x99, 0x99, 0x99},
+ {0x3A, 0x3A, 0x3A},
+ {0xE6, 0xE6, 0xE6},
+ {0xB3, 0xB3, 0xB3},
+ };
+ u8 (*driving)[3], drive_sel;
+
+ if (voltage == OUTPUT_3V3) {
+ driving = driving_3v3;
+ drive_sel = pcr->sd30_drive_sel_3v3;
+ } else {
+ driving = driving_1v8;
+ drive_sel = pcr->sd30_drive_sel_1v8;
+ }
+
+ rtsx_pci_write_register(pcr, SD30_CLK_DRIVE_SEL,
+ 0xFF, driving[drive_sel][0]);
+
+ rtsx_pci_write_register(pcr, SD30_CMD_DRIVE_SEL,
+ 0xFF, driving[drive_sel][1]);
+
+ rtsx_pci_write_register(pcr, SD30_DAT_DRIVE_SEL,
+ 0xFF, driving[drive_sel][2]);
+}
+
+static void rtsx5261_fetch_vendor_settings(struct rtsx_pcr *pcr)
+{
+ u32 reg;
+ /* 0x814~0x817 */
+ rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG2, &reg);
+ pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG2, reg);
+
+ if (!rts5261_vendor_setting_valid(reg)) {
+ pcr_dbg(pcr, "skip fetch vendor setting\n");
+ return;
+ }
+
+ pcr->card_drive_sel &= 0x3F;
+ pcr->card_drive_sel |= rts5261_reg_to_card_drive_sel(reg);
+
+ if (rts5261_reg_check_reverse_socket(reg))
+ pcr->flags |= PCR_REVERSE_SOCKET;
+
+ /* 0x724~0x727 */
+ rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG1, &reg);
+ pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG1, reg);
+
+ pcr->aspm_en = rts5261_reg_to_aspm(reg);
+ pcr->sd30_drive_sel_1v8 = rts5261_reg_to_sd30_drive_sel_1v8(reg);
+ pcr->sd30_drive_sel_3v3 = rts5261_reg_to_sd30_drive_sel_3v3(reg);
+}
+
+static void rts5261_force_power_down(struct rtsx_pcr *pcr, u8 pm_state)
+{
+ /* Set relink_time to 0 */
+ rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 1, MASK_8_BIT_DEF, 0);
+ rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 2, MASK_8_BIT_DEF, 0);
+ rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 3,
+ RELINK_TIME_MASK, 0);
+
+ if (pm_state == HOST_ENTER_S3)
+ rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3,
+ D3_DELINK_MODE_EN, D3_DELINK_MODE_EN);
+
+ rtsx_pci_write_register(pcr, RTS5261_REG_FPDCTL,
+ SSC_POWER_DOWN, SSC_POWER_DOWN);
+}
+
+static int rts5261_enable_auto_blink(struct rtsx_pcr *pcr)
+{
+ return rtsx_pci_write_register(pcr, OLT_LED_CTL,
+ LED_SHINE_MASK, LED_SHINE_EN);
+}
+
+static int rts5261_disable_auto_blink(struct rtsx_pcr *pcr)
+{
+ return rtsx_pci_write_register(pcr, OLT_LED_CTL,
+ LED_SHINE_MASK, LED_SHINE_DISABLE);
+}
+
+static int rts5261_turn_on_led(struct rtsx_pcr *pcr)
+{
+ return rtsx_pci_write_register(pcr, GPIO_CTL,
+ 0x02, 0x02);
+}
+
+static int rts5261_turn_off_led(struct rtsx_pcr *pcr)
+{
+ return rtsx_pci_write_register(pcr, GPIO_CTL,
+ 0x02, 0x00);
+}
+
+/* SD Pull Control Enable:
+ * SD_DAT[3:0] ==> pull up
+ * SD_CD ==> pull up
+ * SD_WP ==> pull up
+ * SD_CMD ==> pull up
+ * SD_CLK ==> pull down
+ */
+static const u32 rts5261_sd_pull_ctl_enable_tbl[] = {
+ RTSX_REG_PAIR(CARD_PULL_CTL2, 0xAA),
+ RTSX_REG_PAIR(CARD_PULL_CTL3, 0xE9),
+ 0,
+};
+
+/* SD Pull Control Disable:
+ * SD_DAT[3:0] ==> pull down
+ * SD_CD ==> pull up
+ * SD_WP ==> pull down
+ * SD_CMD ==> pull down
+ * SD_CLK ==> pull down
+ */
+static const u32 rts5261_sd_pull_ctl_disable_tbl[] = {
+ RTSX_REG_PAIR(CARD_PULL_CTL2, 0x55),
+ RTSX_REG_PAIR(CARD_PULL_CTL3, 0xD5),
+ 0,
+};
+
+static int rts5261_sd_set_sample_push_timing_sd30(struct rtsx_pcr *pcr)
+{
+ rtsx_pci_write_register(pcr, SD_CFG1, SD_MODE_SELECT_MASK
+ | SD_ASYNC_FIFO_NOT_RST, SD_30_MODE | SD_ASYNC_FIFO_NOT_RST);
+ rtsx_pci_write_register(pcr, CLK_CTL, CLK_LOW_FREQ, CLK_LOW_FREQ);
+ rtsx_pci_write_register(pcr, CARD_CLK_SOURCE, 0xFF,
+ CRC_VAR_CLK0 | SD30_FIX_CLK | SAMPLE_VAR_CLK1);
+ rtsx_pci_write_register(pcr, CLK_CTL, CLK_LOW_FREQ, 0);
+
+ return 0;
+}
+
+static int rts5261_card_power_on(struct rtsx_pcr *pcr, int card)
+{
+ struct rtsx_cr_option *option = &pcr->option;
+
+ if (option->ocp_en)
+ rtsx_pci_enable_ocp(pcr);
+
+
+ rtsx_pci_write_register(pcr, RTS5261_LDO1_CFG1,
+ RTS5261_LDO1_TUNE_MASK, RTS5261_LDO1_33);
+ rtsx_pci_write_register(pcr, RTS5261_LDO1233318_POW_CTL,
+ RTS5261_LDO1_POWERON, RTS5261_LDO1_POWERON);
+
+ rtsx_pci_write_register(pcr, RTS5261_LDO1233318_POW_CTL,
+ RTS5261_LDO3318_POWERON, RTS5261_LDO3318_POWERON);
+
+ msleep(20);
+
+ rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, SD_OUTPUT_EN);
+
+ /* Initialize SD_CFG1 register */
+ rtsx_pci_write_register(pcr, SD_CFG1, 0xFF,
+ SD_CLK_DIVIDE_128 | SD_20_MODE | SD_BUS_WIDTH_1BIT);
+
+ rtsx_pci_write_register(pcr, SD_SAMPLE_POINT_CTL,
+ 0xFF, SD20_RX_POS_EDGE);
+ rtsx_pci_write_register(pcr, SD_PUSH_POINT_CTL, 0xFF, 0);
+ rtsx_pci_write_register(pcr, CARD_STOP, SD_STOP | SD_CLR_ERR,
+ SD_STOP | SD_CLR_ERR);
+
+ /* Reset SD_CFG3 register */
+ rtsx_pci_write_register(pcr, SD_CFG3, SD30_CLK_END_EN, 0);
+ rtsx_pci_write_register(pcr, REG_SD_STOP_SDCLK_CFG,
+ SD30_CLK_STOP_CFG_EN | SD30_CLK_STOP_CFG1 |
+ SD30_CLK_STOP_CFG0, 0);
+
+ if (pcr->extra_caps & EXTRA_CAPS_SD_SDR50 ||
+ pcr->extra_caps & EXTRA_CAPS_SD_SDR104)
+ rts5261_sd_set_sample_push_timing_sd30(pcr);
+
+ return 0;
+}
+
+static int rts5261_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
+{
+ int err;
+ u16 val = 0;
+
+ rtsx_pci_write_register(pcr, RTS5261_CARD_PWR_CTL,
+ RTS5261_PUPDC, RTS5261_PUPDC);
+
+ switch (voltage) {
+ case OUTPUT_3V3:
+ rtsx_pci_read_phy_register(pcr, PHY_TUNE, &val);
+ val |= PHY_TUNE_SDBUS_33;
+ err = rtsx_pci_write_phy_register(pcr, PHY_TUNE, val);
+ if (err < 0)
+ return err;
+
+ rtsx_pci_write_register(pcr, RTS5261_DV3318_CFG,
+ RTS5261_DV3318_TUNE_MASK, RTS5261_DV3318_33);
+ rtsx_pci_write_register(pcr, SD_PAD_CTL,
+ SD_IO_USING_1V8, 0);
+ break;
+ case OUTPUT_1V8:
+ rtsx_pci_read_phy_register(pcr, PHY_TUNE, &val);
+ val &= ~PHY_TUNE_SDBUS_33;
+ err = rtsx_pci_write_phy_register(pcr, PHY_TUNE, val);
+ if (err < 0)
+ return err;
+
+ rtsx_pci_write_register(pcr, RTS5261_DV3318_CFG,
+ RTS5261_DV3318_TUNE_MASK, RTS5261_DV3318_18);
+ rtsx_pci_write_register(pcr, SD_PAD_CTL,
+ SD_IO_USING_1V8, SD_IO_USING_1V8);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* set pad drive */
+ rts5261_fill_driving(pcr, voltage);
+
+ return 0;
+}
+
+static void rts5261_stop_cmd(struct rtsx_pcr *pcr)
+{
+ rtsx_pci_writel(pcr, RTSX_HCBCTLR, STOP_CMD);
+ rtsx_pci_writel(pcr, RTSX_HDBCTLR, STOP_DMA);
+ rtsx_pci_write_register(pcr, RTS5260_DMA_RST_CTL_0,
+ RTS5260_DMA_RST | RTS5260_ADMA3_RST,
+ RTS5260_DMA_RST | RTS5260_ADMA3_RST);
+ rtsx_pci_write_register(pcr, RBCTL, RB_FLUSH, RB_FLUSH);
+}
+
+static void rts5261_card_before_power_off(struct rtsx_pcr *pcr)
+{
+ rts5261_stop_cmd(pcr);
+ rts5261_switch_output_voltage(pcr, OUTPUT_3V3);
+
+}
+
+static void rts5261_enable_ocp(struct rtsx_pcr *pcr)
+{
+ u8 val = 0;
+
+ val = SD_OCP_INT_EN | SD_DETECT_EN;
+ rtsx_pci_write_register(pcr, REG_OCPCTL, 0xFF, val);
+
+}
+
+static void rts5261_disable_ocp(struct rtsx_pcr *pcr)
+{
+ u8 mask = 0;
+
+ mask = SD_OCP_INT_EN | SD_DETECT_EN;
+ rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0);
+ rtsx_pci_write_register(pcr, RTS5261_LDO1_CFG0,
+ RTS5261_LDO1_OCP_EN | RTS5261_LDO1_OCP_LMT_EN, 0);
+
+}
+
+static int rts5261_card_power_off(struct rtsx_pcr *pcr, int card)
+{
+ int err = 0;
+
+ rts5261_card_before_power_off(pcr);
+ err = rtsx_pci_write_register(pcr, RTS5261_LDO1233318_POW_CTL,
+ RTS5261_LDO_POWERON_MASK, 0);
+
+ if (pcr->option.ocp_en)
+ rtsx_pci_disable_ocp(pcr);
+
+ return err;
+}
+
+static void rts5261_init_ocp(struct rtsx_pcr *pcr)
+{
+ struct rtsx_cr_option *option = &pcr->option;
+
+ if (option->ocp_en) {
+ u8 mask, val;
+
+ rtsx_pci_write_register(pcr, RTS5261_LDO1_CFG0,
+ RTS5261_LDO1_OCP_EN | RTS5261_LDO1_OCP_LMT_EN,
+ RTS5261_LDO1_OCP_EN | RTS5261_LDO1_OCP_LMT_EN);
+
+ rtsx_pci_write_register(pcr, RTS5261_LDO1_CFG0,
+ RTS5261_LDO1_OCP_THD_MASK, option->sd_800mA_ocp_thd);
+
+ rtsx_pci_write_register(pcr, RTS5261_LDO1_CFG0,
+ RTS5261_LDO1_OCP_LMT_THD_MASK,
+ RTS5261_LDO1_LMT_THD_2000);
+
+ mask = SD_OCP_GLITCH_MASK;
+ val = pcr->hw_param.ocp_glitch;
+ rtsx_pci_write_register(pcr, REG_OCPGLITCH, mask, val);
+
+ rts5261_enable_ocp(pcr);
+ } else {
+ rtsx_pci_write_register(pcr, RTS5261_LDO1_CFG0,
+ RTS5261_LDO1_OCP_EN | RTS5261_LDO1_OCP_LMT_EN, 0);
+ }
+}
+
+static void rts5261_clear_ocpstat(struct rtsx_pcr *pcr)
+{
+ u8 mask = 0;
+ u8 val = 0;
+
+ mask = SD_OCP_INT_CLR | SD_OC_CLR;
+ val = SD_OCP_INT_CLR | SD_OC_CLR;
+
+ rtsx_pci_write_register(pcr, REG_OCPCTL, mask, val);
+
+ udelay(10);
+ rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0);
+
+}
+
+static void rts5261_process_ocp(struct rtsx_pcr *pcr)
+{
+ if (!pcr->option.ocp_en)
+ return;
+
+ rtsx_pci_get_ocpstat(pcr, &pcr->ocp_stat);
+
+ if (pcr->ocp_stat & (SD_OC_NOW | SD_OC_EVER)) {
+ rts5261_card_power_off(pcr, RTSX_SD_CARD);
+ rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, 0);
+ rts5261_clear_ocpstat(pcr);
+ pcr->ocp_stat = 0;
+ }
+
+}
+
+static int rts5261_init_from_hw(struct rtsx_pcr *pcr)
+{
+ int retval;
+ u32 lval, i;
+ u8 valid, efuse_valid, tmp;
+
+ rtsx_pci_write_register(pcr, RTS5261_REG_PME_FORCE_CTL,
+ REG_EFUSE_POR | REG_EFUSE_POWER_MASK,
+ REG_EFUSE_POR | REG_EFUSE_POWERON);
+ udelay(1);
+ rtsx_pci_write_register(pcr, RTS5261_EFUSE_ADDR,
+ RTS5261_EFUSE_ADDR_MASK, 0x00);
+ rtsx_pci_write_register(pcr, RTS5261_EFUSE_CTL,
+ RTS5261_EFUSE_ENABLE | RTS5261_EFUSE_MODE_MASK,
+ RTS5261_EFUSE_ENABLE);
+
+ /* Wait transfer end */
+ for (i = 0; i < MAX_RW_REG_CNT; i++) {
+ rtsx_pci_read_register(pcr, RTS5261_EFUSE_CTL, &tmp);
+ if ((tmp & 0x80) == 0)
+ break;
+ }
+ rtsx_pci_read_register(pcr, RTS5261_EFUSE_READ_DATA, &tmp);
+ efuse_valid = ((tmp & 0x0C) >> 2);
+ pcr_dbg(pcr, "Load efuse valid: 0x%x\n", efuse_valid);
+
+ if (efuse_valid == 0) {
+ retval = rtsx_pci_read_config_dword(pcr,
+ PCR_SETTING_REG2, &lval);
+ if (retval != 0)
+ pcr_dbg(pcr, "read 0x814 DW fail\n");
+ pcr_dbg(pcr, "DW from 0x814: 0x%x\n", lval);
+ /* 0x816 */
+ valid = (u8)((lval >> 16) & 0x03);
+ pcr_dbg(pcr, "0x816: %d\n", valid);
+ }
+ rtsx_pci_write_register(pcr, RTS5261_REG_PME_FORCE_CTL,
+ REG_EFUSE_POR, 0);
+ pcr_dbg(pcr, "Disable efuse por!\n");
+
+ rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG2, &lval);
+ lval = lval & 0x00FFFFFF;
+ retval = rtsx_pci_write_config_dword(pcr, PCR_SETTING_REG2, lval);
+ if (retval != 0)
+ pcr_dbg(pcr, "write config fail\n");
+
+ return retval;
+}
+
+static void rts5261_init_from_cfg(struct rtsx_pcr *pcr)
+{
+ u32 lval;
+ struct rtsx_cr_option *option = &pcr->option;
+
+ rtsx_pci_read_config_dword(pcr, PCR_ASPM_SETTING_REG1, &lval);
+
+ if (lval & ASPM_L1_1_EN_MASK)
+ rtsx_set_dev_flag(pcr, ASPM_L1_1_EN);
+ else
+ rtsx_clear_dev_flag(pcr, ASPM_L1_1_EN);
+
+ if (lval & ASPM_L1_2_EN_MASK)
+ rtsx_set_dev_flag(pcr, ASPM_L1_2_EN);
+ else
+ rtsx_clear_dev_flag(pcr, ASPM_L1_2_EN);
+
+ if (lval & PM_L1_1_EN_MASK)
+ rtsx_set_dev_flag(pcr, PM_L1_1_EN);
+ else
+ rtsx_clear_dev_flag(pcr, PM_L1_1_EN);
+
+ if (lval & PM_L1_2_EN_MASK)
+ rtsx_set_dev_flag(pcr, PM_L1_2_EN);
+ else
+ rtsx_clear_dev_flag(pcr, PM_L1_2_EN);
+
+ rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, 0xFF, 0);
+ if (option->ltr_en) {
+ u16 val;
+
+ pcie_capability_read_word(pcr->pci, PCI_EXP_DEVCTL2, &val);
+ if (val & PCI_EXP_DEVCTL2_LTR_EN) {
+ option->ltr_enabled = true;
+ option->ltr_active = true;
+ rtsx_set_ltr_latency(pcr, option->ltr_active_latency);
+ } else {
+ option->ltr_enabled = false;
+ }
+ }
+
+ if (rtsx_check_dev_flag(pcr, ASPM_L1_1_EN | ASPM_L1_2_EN
+ | PM_L1_1_EN | PM_L1_2_EN))
+ option->force_clkreq_0 = false;
+ else
+ option->force_clkreq_0 = true;
+}
+
+static int rts5261_extra_init_hw(struct rtsx_pcr *pcr)
+{
+ struct rtsx_cr_option *option = &pcr->option;
+
+ rtsx_pci_write_register(pcr, RTS5261_AUTOLOAD_CFG1,
+ CD_RESUME_EN_MASK, CD_RESUME_EN_MASK);
+
+ rts5261_init_from_cfg(pcr);
+ rts5261_init_from_hw(pcr);
+
+ /* power off efuse */
+ rtsx_pci_write_register(pcr, RTS5261_REG_PME_FORCE_CTL,
+ REG_EFUSE_POWER_MASK, REG_EFUSE_POWEROFF);
+ rtsx_pci_write_register(pcr, L1SUB_CONFIG1,
+ AUX_CLK_ACTIVE_SEL_MASK, MAC_CKSW_DONE);
+ rtsx_pci_write_register(pcr, L1SUB_CONFIG3, 0xFF, 0);
+
+ rtsx_pci_write_register(pcr, RTS5261_AUTOLOAD_CFG4,
+ RTS5261_AUX_CLK_16M_EN, 0);
+
+ /* Release PRSNT# */
+ rtsx_pci_write_register(pcr, RTS5261_AUTOLOAD_CFG4,
+ RTS5261_FORCE_PRSNT_LOW, 0);
+ rtsx_pci_write_register(pcr, FUNC_FORCE_CTL,
+ FUNC_FORCE_UPME_XMT_DBG, FUNC_FORCE_UPME_XMT_DBG);
+
+ rtsx_pci_write_register(pcr, PCLK_CTL,
+ PCLK_MODE_SEL, PCLK_MODE_SEL);
+
+ rtsx_pci_write_register(pcr, PM_EVENT_DEBUG, PME_DEBUG_0, PME_DEBUG_0);
+ rtsx_pci_write_register(pcr, PM_CLK_FORCE_CTL, CLK_PM_EN, CLK_PM_EN);
+
+ /* LED shine disabled, set initial shine cycle period */
+ rtsx_pci_write_register(pcr, OLT_LED_CTL, 0x0F, 0x02);
+
+ /* Configure driving */
+ rts5261_fill_driving(pcr, OUTPUT_3V3);
+
+ /*
+ * If u_force_clkreq_0 is enabled, CLKREQ# PIN will be forced
+ * to drive low, and we forcibly request clock.
+ */
+ if (option->force_clkreq_0)
+ rtsx_pci_write_register(pcr, PETXCFG,
+ FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_LOW);
+ else
+ rtsx_pci_write_register(pcr, PETXCFG,
+ FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_HIGH);
+
+ rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3, 0x10, 0x00);
+ rtsx_pci_write_register(pcr, RTS5261_REG_PME_FORCE_CTL,
+ FORCE_PM_CONTROL | FORCE_PM_VALUE, FORCE_PM_CONTROL);
+
+ /* Clear Enter RTD3_cold Information*/
+ rtsx_pci_write_register(pcr, RTS5261_FW_CTL,
+ RTS5261_INFORM_RTD3_COLD, 0);
+
+ return 0;
+}
+
+static void rts5261_enable_aspm(struct rtsx_pcr *pcr, bool enable)
+{
+ struct rtsx_cr_option *option = &pcr->option;
+ u8 val = 0;
+
+ if (pcr->aspm_enabled == enable)
+ return;
+
+ if (option->dev_aspm_mode == DEV_ASPM_DYNAMIC) {
+ val = pcr->aspm_en;
+ rtsx_pci_update_cfg_byte(pcr, pcr->pcie_cap + PCI_EXP_LNKCTL,
+ ASPM_MASK_NEG, val);
+ } else if (option->dev_aspm_mode == DEV_ASPM_BACKDOOR) {
+ u8 mask = FORCE_ASPM_VAL_MASK | FORCE_ASPM_CTL0;
+
+ val = FORCE_ASPM_CTL0;
+ val |= (pcr->aspm_en & 0x02);
+ rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, mask, val);
+ val = pcr->aspm_en;
+ rtsx_pci_update_cfg_byte(pcr, pcr->pcie_cap + PCI_EXP_LNKCTL,
+ ASPM_MASK_NEG, val);
+ }
+ pcr->aspm_enabled = enable;
+
+}
+
+static void rts5261_disable_aspm(struct rtsx_pcr *pcr, bool enable)
+{
+ struct rtsx_cr_option *option = &pcr->option;
+ u8 val = 0;
+
+ if (pcr->aspm_enabled == enable)
+ return;
+
+ if (option->dev_aspm_mode == DEV_ASPM_DYNAMIC) {
+ val = 0;
+ rtsx_pci_update_cfg_byte(pcr, pcr->pcie_cap + PCI_EXP_LNKCTL,
+ ASPM_MASK_NEG, val);
+ } else if (option->dev_aspm_mode == DEV_ASPM_BACKDOOR) {
+ u8 mask = FORCE_ASPM_VAL_MASK | FORCE_ASPM_CTL0;
+
+ val = 0;
+ rtsx_pci_update_cfg_byte(pcr, pcr->pcie_cap + PCI_EXP_LNKCTL,
+ ASPM_MASK_NEG, val);
+ val = FORCE_ASPM_CTL0;
+ rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, mask, val);
+ }
+ rtsx_pci_write_register(pcr, SD_CFG1, SD_ASYNC_FIFO_NOT_RST, 0);
+ udelay(10);
+ pcr->aspm_enabled = enable;
+}
+
+static void rts5261_set_aspm(struct rtsx_pcr *pcr, bool enable)
+{
+ if (enable)
+ rts5261_enable_aspm(pcr, true);
+ else
+ rts5261_disable_aspm(pcr, false);
+}
+
+static void rts5261_set_l1off_cfg_sub_d0(struct rtsx_pcr *pcr, int active)
+{
+ struct rtsx_cr_option *option = &pcr->option;
+ int aspm_L1_1, aspm_L1_2;
+ u8 val = 0;
+
+ aspm_L1_1 = rtsx_check_dev_flag(pcr, ASPM_L1_1_EN);
+ aspm_L1_2 = rtsx_check_dev_flag(pcr, ASPM_L1_2_EN);
+
+ if (active) {
+ /* run, latency: 60us */
+ if (aspm_L1_1)
+ val = option->ltr_l1off_snooze_sspwrgate;
+ } else {
+ /* l1off, latency: 300us */
+ if (aspm_L1_2)
+ val = option->ltr_l1off_sspwrgate;
+ }
+
+ rtsx_set_l1off_sub(pcr, val);
+}
+
+static const struct pcr_ops rts5261_pcr_ops = {
+ .fetch_vendor_settings = rtsx5261_fetch_vendor_settings,
+ .turn_on_led = rts5261_turn_on_led,
+ .turn_off_led = rts5261_turn_off_led,
+ .extra_init_hw = rts5261_extra_init_hw,
+ .enable_auto_blink = rts5261_enable_auto_blink,
+ .disable_auto_blink = rts5261_disable_auto_blink,
+ .card_power_on = rts5261_card_power_on,
+ .card_power_off = rts5261_card_power_off,
+ .switch_output_voltage = rts5261_switch_output_voltage,
+ .force_power_down = rts5261_force_power_down,
+ .stop_cmd = rts5261_stop_cmd,
+ .set_aspm = rts5261_set_aspm,
+ .set_l1off_cfg_sub_d0 = rts5261_set_l1off_cfg_sub_d0,
+ .enable_ocp = rts5261_enable_ocp,
+ .disable_ocp = rts5261_disable_ocp,
+ .init_ocp = rts5261_init_ocp,
+ .process_ocp = rts5261_process_ocp,
+ .clear_ocpstat = rts5261_clear_ocpstat,
+};
+
+static inline u8 double_ssc_depth(u8 depth)
+{
+ return ((depth > 1) ? (depth - 1) : depth);
+}
+
+int rts5261_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock,
+ u8 ssc_depth, bool initial_mode, bool double_clk, bool vpclk)
+{
+ int err, clk;
+ u8 n, clk_divider, mcu_cnt, div;
+ static const u8 depth[] = {
+ [RTSX_SSC_DEPTH_4M] = RTS5261_SSC_DEPTH_4M,
+ [RTSX_SSC_DEPTH_2M] = RTS5261_SSC_DEPTH_2M,
+ [RTSX_SSC_DEPTH_1M] = RTS5261_SSC_DEPTH_1M,
+ [RTSX_SSC_DEPTH_500K] = RTS5261_SSC_DEPTH_512K,
+ };
+
+ if (initial_mode) {
+ /* We use 250k(around) here, in initial stage */
+ clk_divider = SD_CLK_DIVIDE_128;
+ card_clock = 30000000;
+ } else {
+ clk_divider = SD_CLK_DIVIDE_0;
+ }
+ err = rtsx_pci_write_register(pcr, SD_CFG1,
+ SD_CLK_DIVIDE_MASK, clk_divider);
+ if (err < 0)
+ return err;
+
+ card_clock /= 1000000;
+ pcr_dbg(pcr, "Switch card clock to %dMHz\n", card_clock);
+
+ clk = card_clock;
+ if (!initial_mode && double_clk)
+ clk = card_clock * 2;
+ pcr_dbg(pcr, "Internal SSC clock: %dMHz (cur_clock = %d)\n",
+ clk, pcr->cur_clock);
+
+ if (clk == pcr->cur_clock)
+ return 0;
+
+ if (pcr->ops->conv_clk_and_div_n)
+ n = (u8)pcr->ops->conv_clk_and_div_n(clk, CLK_TO_DIV_N);
+ else
+ n = (u8)(clk - 4);
+ if ((clk <= 4) || (n > 396))
+ return -EINVAL;
+
+ mcu_cnt = (u8)(125/clk + 3);
+ if (mcu_cnt > 15)
+ mcu_cnt = 15;
+
+ div = CLK_DIV_1;
+ while ((n < MIN_DIV_N_PCR - 4) && (div < CLK_DIV_8)) {
+ if (pcr->ops->conv_clk_and_div_n) {
+ int dbl_clk = pcr->ops->conv_clk_and_div_n(n,
+ DIV_N_TO_CLK) * 2;
+ n = (u8)pcr->ops->conv_clk_and_div_n(dbl_clk,
+ CLK_TO_DIV_N);
+ } else {
+ n = (n + 4) * 2 - 4;
+ }
+ div++;
+ }
+
+ n = (n / 2);
+ pcr_dbg(pcr, "n = %d, div = %d\n", n, div);
+
+ ssc_depth = depth[ssc_depth];
+ if (double_clk)
+ ssc_depth = double_ssc_depth(ssc_depth);
+
+ if (ssc_depth) {
+ if (div == CLK_DIV_2) {
+ if (ssc_depth > 1)
+ ssc_depth -= 1;
+ else
+ ssc_depth = RTS5261_SSC_DEPTH_8M;
+ } else if (div == CLK_DIV_4) {
+ if (ssc_depth > 2)
+ ssc_depth -= 2;
+ else
+ ssc_depth = RTS5261_SSC_DEPTH_8M;
+ } else if (div == CLK_DIV_8) {
+ if (ssc_depth > 3)
+ ssc_depth -= 3;
+ else
+ ssc_depth = RTS5261_SSC_DEPTH_8M;
+ }
+ } else {
+ ssc_depth = 0;
+ }
+ pcr_dbg(pcr, "ssc_depth = %d\n", ssc_depth);
+
+ rtsx_pci_init_cmd(pcr);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL,
+ CLK_LOW_FREQ, CLK_LOW_FREQ);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_DIV,
+ 0xFF, (div << 4) | mcu_cnt);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, 0);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2,
+ SSC_DEPTH_MASK, ssc_depth);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_DIV_N_0, 0xFF, n);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, SSC_RSTB);
+ if (vpclk) {
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL,
+ PHASE_NOT_RESET, 0);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK1_CTL,
+ PHASE_NOT_RESET, 0);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL,
+ PHASE_NOT_RESET, PHASE_NOT_RESET);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK1_CTL,
+ PHASE_NOT_RESET, PHASE_NOT_RESET);
+ }
+
+ err = rtsx_pci_send_cmd(pcr, 2000);
+ if (err < 0)
+ return err;
+
+ /* Wait SSC clock stable */
+ udelay(SSC_CLOCK_STABLE_WAIT);
+ err = rtsx_pci_write_register(pcr, CLK_CTL, CLK_LOW_FREQ, 0);
+ if (err < 0)
+ return err;
+
+ pcr->cur_clock = clk;
+ return 0;
+
+}
+
+void rts5261_init_params(struct rtsx_pcr *pcr)
+{
+ struct rtsx_cr_option *option = &pcr->option;
+ struct rtsx_hw_param *hw_param = &pcr->hw_param;
+
+ pcr->extra_caps = EXTRA_CAPS_SD_SDR50 | EXTRA_CAPS_SD_SDR104;
+ pcr->num_slots = 1;
+ pcr->ops = &rts5261_pcr_ops;
+
+ pcr->flags = 0;
+ pcr->card_drive_sel = RTSX_CARD_DRIVE_DEFAULT;
+ pcr->sd30_drive_sel_1v8 = CFG_DRIVER_TYPE_B;
+ pcr->sd30_drive_sel_3v3 = CFG_DRIVER_TYPE_B;
+ pcr->aspm_en = ASPM_L1_EN;
+ pcr->tx_initial_phase = SET_CLOCK_PHASE(20, 27, 16);
+ pcr->rx_initial_phase = SET_CLOCK_PHASE(24, 6, 5);
+
+ pcr->ic_version = rts5261_get_ic_version(pcr);
+ pcr->sd_pull_ctl_enable_tbl = rts5261_sd_pull_ctl_enable_tbl;
+ pcr->sd_pull_ctl_disable_tbl = rts5261_sd_pull_ctl_disable_tbl;
+
+ pcr->reg_pm_ctrl3 = RTS5261_AUTOLOAD_CFG3;
+
+ option->dev_flags = (LTR_L1SS_PWR_GATE_CHECK_CARD_EN
+ | LTR_L1SS_PWR_GATE_EN);
+ option->ltr_en = true;
+
+ /* init latency of active, idle, L1OFF to 60us, 300us, 3ms */
+ option->ltr_active_latency = LTR_ACTIVE_LATENCY_DEF;
+ option->ltr_idle_latency = LTR_IDLE_LATENCY_DEF;
+ option->ltr_l1off_latency = LTR_L1OFF_LATENCY_DEF;
+ option->l1_snooze_delay = L1_SNOOZE_DELAY_DEF;
+ option->ltr_l1off_sspwrgate = 0x7F;
+ option->ltr_l1off_snooze_sspwrgate = 0x78;
+ option->dev_aspm_mode = DEV_ASPM_DYNAMIC;
+
+ option->ocp_en = 1;
+ hw_param->interrupt_en |= SD_OC_INT_EN;
+ hw_param->ocp_glitch = SD_OCP_GLITCH_800U;
+ option->sd_800mA_ocp_thd = RTS5261_LDO1_OCP_THD_1040;
+}
diff --git a/drivers/misc/cardreader/rts5261.h b/drivers/misc/cardreader/rts5261.h
new file mode 100644
index 000000000000..ebfdd236a553
--- /dev/null
+++ b/drivers/misc/cardreader/rts5261.h
@@ -0,0 +1,233 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Driver for Realtek PCI-Express card reader
+ *
+ * Copyright(c) 2018-2019 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * Author:
+ * Rui FENG <rui_feng@realsil.com.cn>
+ * Wei WANG <wei_wang@realsil.com.cn>
+ */
+#ifndef RTS5261_H
+#define RTS5261_H
+
+/*New add*/
+#define rts5261_vendor_setting_valid(reg) ((reg) & 0x010000)
+#define rts5261_reg_to_aspm(reg) (((reg) >> 28) ^ 0x03)
+#define rts5261_reg_check_reverse_socket(reg) ((reg) & 0x04)
+#define rts5261_reg_to_card_drive_sel(reg) ((((reg) >> 6) & 0x01) << 6)
+#define rts5261_reg_to_sd30_drive_sel_1v8(reg) (((reg) >> 22) ^ 0x03)
+#define rts5261_reg_to_sd30_drive_sel_3v3(reg) (((reg) >> 16) ^ 0x03)
+
+
+#define RTS5261_AUTOLOAD_CFG0 0xFF7B
+#define RTS5261_AUTOLOAD_CFG1 0xFF7C
+#define RTS5261_AUTOLOAD_CFG2 0xFF7D
+#define RTS5261_AUTOLOAD_CFG3 0xFF7E
+#define RTS5261_AUTOLOAD_CFG4 0xFF7F
+#define RTS5261_FORCE_PRSNT_LOW (1 << 6)
+#define RTS5261_AUX_CLK_16M_EN (1 << 5)
+
+#define RTS5261_REG_VREF 0xFE97
+#define RTS5261_PWD_SUSPND_EN (1 << 4)
+
+#define RTS5261_PAD_H3L1 0xFF79
+#define PAD_GPIO_H3L1 (1 << 3)
+
+/* SSC_CTL2 0xFC12 */
+#define RTS5261_SSC_DEPTH_MASK 0x07
+#define RTS5261_SSC_DEPTH_DISALBE 0x00
+#define RTS5261_SSC_DEPTH_8M 0x01
+#define RTS5261_SSC_DEPTH_4M 0x02
+#define RTS5261_SSC_DEPTH_2M 0x03
+#define RTS5261_SSC_DEPTH_1M 0x04
+#define RTS5261_SSC_DEPTH_512K 0x05
+#define RTS5261_SSC_DEPTH_256K 0x06
+#define RTS5261_SSC_DEPTH_128K 0x07
+
+/* efuse control register*/
+#define RTS5261_EFUSE_CTL 0xFC30
+#define RTS5261_EFUSE_ENABLE 0x80
+/* EFUSE_MODE: 0=READ 1=PROGRAM */
+#define RTS5261_EFUSE_MODE_MASK 0x40
+#define RTS5261_EFUSE_PROGRAM 0x40
+
+#define RTS5261_EFUSE_ADDR 0xFC31
+#define RTS5261_EFUSE_ADDR_MASK 0x3F
+
+#define RTS5261_EFUSE_WRITE_DATA 0xFC32
+#define RTS5261_EFUSE_READ_DATA 0xFC34
+
+/* DMACTL 0xFE2C */
+#define RTS5261_DMA_PACK_SIZE_MASK 0xF0
+
+/* FW config info register */
+#define RTS5261_FW_CFG_INFO0 0xFF50
+#define RTS5261_FW_EXPRESS_TEST_MASK (0x01<<0)
+#define RTS5261_FW_EA_MODE_MASK (0x01<<5)
+
+/* FW config register */
+#define RTS5261_FW_CFG0 0xFF54
+#define RTS5261_FW_ENTER_EXPRESS (0x01<<0)
+
+#define RTS5261_FW_CFG1 0xFF55
+#define RTS5261_SYS_CLK_SEL_MCU_CLK (0x01<<7)
+#define RTS5261_CRC_CLK_SEL_MCU_CLK (0x01<<6)
+#define RTS5261_FAKE_MCU_CLOCK_GATING (0x01<<5)
+/*MCU_bus_mode_sel: 0=real 8051 1=fake mcu*/
+#define RTS5261_MCU_BUS_SEL_MASK (0x01<<4)
+/*MCU_clock_sel:VerA 00=aux16M 01=aux400K 1x=REFCLK100M*/
+/*MCU_clock_sel:VerB 00=aux400K 01=aux16M 10=REFCLK100M*/
+#define RTS5261_MCU_CLOCK_SEL_MASK (0x03<<2)
+#define RTS5261_MCU_CLOCK_SEL_16M (0x01<<2)
+#define RTS5261_MCU_CLOCK_GATING (0x01<<1)
+#define RTS5261_DRIVER_ENABLE_FW (0x01<<0)
+
+/* FW status register */
+#define RTS5261_FW_STATUS 0xFF56
+#define RTS5261_EXPRESS_LINK_FAIL_MASK (0x01<<7)
+
+/* FW control register */
+#define RTS5261_FW_CTL 0xFF5F
+#define RTS5261_INFORM_RTD3_COLD (0x01<<5)
+
+#define RTS5261_REG_FPDCTL 0xFF60
+
+#define RTS5261_REG_LDO12_CFG 0xFF6E
+#define RTS5261_LDO12_VO_TUNE_MASK (0x07<<1)
+#define RTS5261_LDO12_115 (0x03<<1)
+#define RTS5261_LDO12_120 (0x04<<1)
+#define RTS5261_LDO12_125 (0x05<<1)
+#define RTS5261_LDO12_130 (0x06<<1)
+#define RTS5261_LDO12_135 (0x07<<1)
+
+/* LDO control register */
+#define RTS5261_CARD_PWR_CTL 0xFD50
+#define RTS5261_SD_CLK_ISO (0x01<<7)
+#define RTS5261_PAD_SD_DAT_FW_CTRL (0x01<<6)
+#define RTS5261_PUPDC (0x01<<5)
+#define RTS5261_SD_CMD_ISO (0x01<<4)
+#define RTS5261_SD_DAT_ISO_MASK (0x0F<<0)
+
+#define RTS5261_LDO1233318_POW_CTL 0xFF70
+#define RTS5261_LDO3318_POWERON (0x01<<3)
+#define RTS5261_LDO3_POWERON (0x01<<2)
+#define RTS5261_LDO2_POWERON (0x01<<1)
+#define RTS5261_LDO1_POWERON (0x01<<0)
+#define RTS5261_LDO_POWERON_MASK (0x0F<<0)
+
+#define RTS5261_DV3318_CFG 0xFF71
+#define RTS5261_DV3318_TUNE_MASK (0x07<<4)
+#define RTS5261_DV3318_18 (0x02<<4)
+#define RTS5261_DV3318_19 (0x04<<4)
+#define RTS5261_DV3318_33 (0x07<<4)
+
+#define RTS5261_LDO1_CFG0 0xFF72
+#define RTS5261_LDO1_OCP_THD_MASK (0x07<<5)
+#define RTS5261_LDO1_OCP_EN (0x01<<4)
+#define RTS5261_LDO1_OCP_LMT_THD_MASK (0x03<<2)
+#define RTS5261_LDO1_OCP_LMT_EN (0x01<<1)
+
+/* CRD6603-433 190319 request changed */
+#define RTS5261_LDO1_OCP_THD_740 (0x00<<5)
+#define RTS5261_LDO1_OCP_THD_800 (0x01<<5)
+#define RTS5261_LDO1_OCP_THD_860 (0x02<<5)
+#define RTS5261_LDO1_OCP_THD_920 (0x03<<5)
+#define RTS5261_LDO1_OCP_THD_980 (0x04<<5)
+#define RTS5261_LDO1_OCP_THD_1040 (0x05<<5)
+#define RTS5261_LDO1_OCP_THD_1100 (0x06<<5)
+#define RTS5261_LDO1_OCP_THD_1160 (0x07<<5)
+
+#define RTS5261_LDO1_LMT_THD_450 (0x00<<2)
+#define RTS5261_LDO1_LMT_THD_1000 (0x01<<2)
+#define RTS5261_LDO1_LMT_THD_1500 (0x02<<2)
+#define RTS5261_LDO1_LMT_THD_2000 (0x03<<2)
+
+#define RTS5261_LDO1_CFG1 0xFF73
+#define RTS5261_LDO1_TUNE_MASK (0x07<<1)
+#define RTS5261_LDO1_18 (0x05<<1)
+#define RTS5261_LDO1_33 (0x07<<1)
+#define RTS5261_LDO1_PWD_MASK (0x01<<0)
+
+#define RTS5261_LDO2_CFG0 0xFF74
+#define RTS5261_LDO2_OCP_THD_MASK (0x07<<5)
+#define RTS5261_LDO2_OCP_EN (0x01<<4)
+#define RTS5261_LDO2_OCP_LMT_THD_MASK (0x03<<2)
+#define RTS5261_LDO2_OCP_LMT_EN (0x01<<1)
+
+#define RTS5261_LDO2_OCP_THD_620 (0x00<<5)
+#define RTS5261_LDO2_OCP_THD_650 (0x01<<5)
+#define RTS5261_LDO2_OCP_THD_680 (0x02<<5)
+#define RTS5261_LDO2_OCP_THD_720 (0x03<<5)
+#define RTS5261_LDO2_OCP_THD_750 (0x04<<5)
+#define RTS5261_LDO2_OCP_THD_780 (0x05<<5)
+#define RTS5261_LDO2_OCP_THD_810 (0x06<<5)
+#define RTS5261_LDO2_OCP_THD_840 (0x07<<5)
+
+#define RTS5261_LDO2_CFG1 0xFF75
+#define RTS5261_LDO2_TUNE_MASK (0x07<<1)
+#define RTS5261_LDO2_18 (0x05<<1)
+#define RTS5261_LDO2_33 (0x07<<1)
+#define RTS5261_LDO2_PWD_MASK (0x01<<0)
+
+#define RTS5261_LDO3_CFG0 0xFF76
+#define RTS5261_LDO3_OCP_THD_MASK (0x07<<5)
+#define RTS5261_LDO3_OCP_EN (0x01<<4)
+#define RTS5261_LDO3_OCP_LMT_THD_MASK (0x03<<2)
+#define RTS5261_LDO3_OCP_LMT_EN (0x01<<1)
+
+#define RTS5261_LDO3_OCP_THD_620 (0x00<<5)
+#define RTS5261_LDO3_OCP_THD_650 (0x01<<5)
+#define RTS5261_LDO3_OCP_THD_680 (0x02<<5)
+#define RTS5261_LDO3_OCP_THD_720 (0x03<<5)
+#define RTS5261_LDO3_OCP_THD_750 (0x04<<5)
+#define RTS5261_LDO3_OCP_THD_780 (0x05<<5)
+#define RTS5261_LDO3_OCP_THD_810 (0x06<<5)
+#define RTS5261_LDO3_OCP_THD_840 (0x07<<5)
+
+#define RTS5261_LDO3_CFG1 0xFF77
+#define RTS5261_LDO3_TUNE_MASK (0x07<<1)
+#define RTS5261_LDO3_18 (0x05<<1)
+#define RTS5261_LDO3_33 (0x07<<1)
+#define RTS5261_LDO3_PWD_MASK (0x01<<0)
+
+#define RTS5261_REG_PME_FORCE_CTL 0xFF78
+#define FORCE_PM_CONTROL 0x20
+#define FORCE_PM_VALUE 0x10
+#define REG_EFUSE_BYPASS 0x08
+#define REG_EFUSE_POR 0x04
+#define REG_EFUSE_POWER_MASK 0x03
+#define REG_EFUSE_POWERON 0x03
+#define REG_EFUSE_POWEROFF 0x00
+
+
+/* Single LUN, support SD/SD EXPRESS */
+#define DEFAULT_SINGLE 0
+#define SD_LUN 1
+#define SD_EXPRESS_LUN 2
+
+/* For Change_FPGA_SSCClock Function */
+#define MULTIPLY_BY_1 0x00
+#define MULTIPLY_BY_2 0x01
+#define MULTIPLY_BY_3 0x02
+#define MULTIPLY_BY_4 0x03
+#define MULTIPLY_BY_5 0x04
+#define MULTIPLY_BY_6 0x05
+#define MULTIPLY_BY_7 0x06
+#define MULTIPLY_BY_8 0x07
+#define MULTIPLY_BY_9 0x08
+#define MULTIPLY_BY_10 0x09
+
+#define DIVIDE_BY_2 0x01
+#define DIVIDE_BY_3 0x02
+#define DIVIDE_BY_4 0x03
+#define DIVIDE_BY_5 0x04
+#define DIVIDE_BY_6 0x05
+#define DIVIDE_BY_7 0x06
+#define DIVIDE_BY_8 0x07
+#define DIVIDE_BY_9 0x08
+#define DIVIDE_BY_10 0x09
+
+int rts5261_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock,
+ u8 ssc_depth, bool initial_mode, bool double_clk, bool vpclk);
+
+#endif /* RTS5261_H */
diff --git a/drivers/misc/cardreader/rtsx_pcr.c b/drivers/misc/cardreader/rtsx_pcr.c
index b4a66b64f742..fd7b2167103d 100644
--- a/drivers/misc/cardreader/rtsx_pcr.c
+++ b/drivers/misc/cardreader/rtsx_pcr.c
@@ -22,6 +22,7 @@
#include <asm/unaligned.h>
#include "rtsx_pcr.h"
+#include "rts5261.h"
static bool msi_en = true;
module_param(msi_en, bool, S_IRUGO | S_IWUSR);
@@ -34,9 +35,6 @@ static struct mfd_cell rtsx_pcr_cells[] = {
[RTSX_SD_CARD] = {
.name = DRV_NAME_RTSX_PCI_SDMMC,
},
- [RTSX_MS_CARD] = {
- .name = DRV_NAME_RTSX_PCI_MS,
- },
};
static const struct pci_device_id rtsx_pci_ids[] = {
@@ -51,6 +49,7 @@ static const struct pci_device_id rtsx_pci_ids[] = {
{ PCI_DEVICE(0x10EC, 0x524A), PCI_CLASS_OTHERS << 16, 0xFF0000 },
{ PCI_DEVICE(0x10EC, 0x525A), PCI_CLASS_OTHERS << 16, 0xFF0000 },
{ PCI_DEVICE(0x10EC, 0x5260), PCI_CLASS_OTHERS << 16, 0xFF0000 },
+ { PCI_DEVICE(0x10EC, 0x5261), PCI_CLASS_OTHERS << 16, 0xFF0000 },
{ 0, }
};
@@ -438,8 +437,16 @@ static void rtsx_pci_add_sg_tbl(struct rtsx_pcr *pcr,
if (end)
option |= RTSX_SG_END;
- val = ((u64)addr << 32) | ((u64)len << 12) | option;
+ if (PCI_PID(pcr) == PID_5261) {
+ if (len > 0xFFFF)
+ val = ((u64)addr << 32) | (((u64)len & 0xFFFF) << 16)
+ | (((u64)len >> 16) << 6) | option;
+ else
+ val = ((u64)addr << 32) | ((u64)len << 16) | option;
+ } else {
+ val = ((u64)addr << 32) | ((u64)len << 12) | option;
+ }
put_unaligned_le64(val, ptr);
pcr->sgi++;
}
@@ -684,7 +691,6 @@ int rtsx_pci_card_pull_ctl_disable(struct rtsx_pcr *pcr, int card)
else
return -EINVAL;
-
return rtsx_pci_set_pull_ctl(pcr, tbl);
}
EXPORT_SYMBOL_GPL(rtsx_pci_card_pull_ctl_disable);
@@ -735,6 +741,10 @@ int rtsx_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock,
[RTSX_SSC_DEPTH_250K] = SSC_DEPTH_250K,
};
+ if (PCI_PID(pcr) == PID_5261)
+ return rts5261_pci_switch_clock(pcr, card_clock,
+ ssc_depth, initial_mode, double_clk, vpclk);
+
if (initial_mode) {
/* We use 250k(around) here, in initial stage */
clk_divider = SD_CLK_DIVIDE_128;
@@ -1253,7 +1263,15 @@ static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
rtsx_pci_enable_bus_int(pcr);
/* Power on SSC */
- err = rtsx_pci_write_register(pcr, FPDCTL, SSC_POWER_DOWN, 0);
+ if (PCI_PID(pcr) == PID_5261) {
+ /* Gating real mcu clock */
+ err = rtsx_pci_write_register(pcr, RTS5261_FW_CFG1,
+ RTS5261_MCU_CLOCK_GATING, 0);
+ err = rtsx_pci_write_register(pcr, RTS5261_REG_FPDCTL,
+ SSC_POWER_DOWN, 0);
+ } else {
+ err = rtsx_pci_write_register(pcr, FPDCTL, SSC_POWER_DOWN, 0);
+ }
if (err < 0)
return err;
@@ -1283,7 +1301,12 @@ static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
/* Enable SSC Clock */
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1,
0xFF, SSC_8X_EN | SSC_SEL_4M);
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 0xFF, 0x12);
+ if (PCI_PID(pcr) == PID_5261)
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 0xFF,
+ RTS5261_SSC_DEPTH_2M);
+ else
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 0xFF, 0x12);
+
/* Disable cd_pwr_save */
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CHANGE_LINK_STATE, 0x16, 0x10);
/* Clear Link Ready Interrupt */
@@ -1314,6 +1337,7 @@ static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
case PID_524A:
case PID_525A:
case PID_5260:
+ case PID_5261:
rtsx_pci_write_register(pcr, PM_CLK_FORCE_CTL, 1, 1);
break;
default:
@@ -1393,9 +1417,14 @@ static int rtsx_pci_init_chip(struct rtsx_pcr *pcr)
case 0x5286:
rtl8402_init_params(pcr);
break;
+
case 0x5260:
rts5260_init_params(pcr);
break;
+
+ case 0x5261:
+ rts5261_init_params(pcr);
+ break;
}
pcr_dbg(pcr, "PID: 0x%04x, IC version: 0x%02x\n",
diff --git a/drivers/misc/cardreader/rtsx_pcr.h b/drivers/misc/cardreader/rtsx_pcr.h
index 98f729263dc1..ed391df52f4f 100644
--- a/drivers/misc/cardreader/rtsx_pcr.h
+++ b/drivers/misc/cardreader/rtsx_pcr.h
@@ -53,6 +53,7 @@ void rts524a_init_params(struct rtsx_pcr *pcr);
void rts525a_init_params(struct rtsx_pcr *pcr);
void rtl8411b_init_params(struct rtsx_pcr *pcr);
void rts5260_init_params(struct rtsx_pcr *pcr);
+void rts5261_init_params(struct rtsx_pcr *pcr);
static inline u8 map_sd_drive(int idx)
{
diff --git a/drivers/misc/cxl/flash.c b/drivers/misc/cxl/flash.c
index 4d6836f19489..cb9cca35a226 100644
--- a/drivers/misc/cxl/flash.c
+++ b/drivers/misc/cxl/flash.c
@@ -473,12 +473,6 @@ static long device_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
return -EINVAL;
}
-static long device_compat_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- return device_ioctl(file, cmd, arg);
-}
-
static int device_close(struct inode *inode, struct file *file)
{
struct cxl *adapter = file->private_data;
@@ -514,7 +508,7 @@ static const struct file_operations fops = {
.owner = THIS_MODULE,
.open = device_open,
.unlocked_ioctl = device_ioctl,
- .compat_ioctl = device_compat_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.release = device_close,
};
diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c
index 2cccd82a3106..0681d5fdd538 100644
--- a/drivers/misc/eeprom/at24.c
+++ b/drivers/misc/eeprom/at24.c
@@ -716,9 +716,12 @@ static int at24_probe(struct i2c_client *client)
return -ENODEV;
}
- dev_info(dev, "%u byte %s EEPROM, %s, %u bytes/write\n",
- byte_len, client->name,
- writable ? "writable" : "read-only", at24->write_max);
+ if (writable)
+ dev_info(dev, "%u byte %s EEPROM, writable, %u bytes/write\n",
+ byte_len, client->name, at24->write_max);
+ else
+ dev_info(dev, "%u byte %s EEPROM, read-only\n",
+ byte_len, client->name);
return 0;
}
diff --git a/drivers/misc/eeprom/eeprom.c b/drivers/misc/eeprom/eeprom.c
index 2cfe3d4ae144..226b5efa6a77 100644
--- a/drivers/misc/eeprom/eeprom.c
+++ b/drivers/misc/eeprom/eeprom.c
@@ -175,6 +175,10 @@ static int eeprom_probe(struct i2c_client *client,
}
}
+ /* Let the users know they are using deprecated driver */
+ dev_notice(&client->dev,
+ "eeprom driver is deprecated, please use at24 instead\n");
+
/* create the sysfs eeprom file */
return sysfs_create_bin_file(&client->dev.kobj, &eeprom_attr);
}
diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
index 1b1a794d639d..ae4ee27a63c4 100644
--- a/drivers/misc/fastrpc.c
+++ b/drivers/misc/fastrpc.c
@@ -32,8 +32,9 @@
#define FASTRPC_CTX_MAX (256)
#define FASTRPC_INIT_HANDLE 1
#define FASTRPC_CTXID_MASK (0xFF0)
-#define INIT_FILELEN_MAX (64 * 1024 * 1024)
+#define INIT_FILELEN_MAX (2 * 1024 * 1024)
#define FASTRPC_DEVICE_NAME "fastrpc"
+#define ADSP_MMAP_ADD_PAGES 0x1000
/* Retrives number of input buffers from the scalars parameter */
#define REMOTE_SCALARS_INBUFS(sc) (((sc) >> 16) & 0x0ff)
@@ -66,6 +67,8 @@
/* Remote Method id table */
#define FASTRPC_RMID_INIT_ATTACH 0
#define FASTRPC_RMID_INIT_RELEASE 1
+#define FASTRPC_RMID_INIT_MMAP 4
+#define FASTRPC_RMID_INIT_MUNMAP 5
#define FASTRPC_RMID_INIT_CREATE 6
#define FASTRPC_RMID_INIT_CREATE_ATTR 7
#define FASTRPC_RMID_INIT_CREATE_STATIC 8
@@ -89,6 +92,23 @@ struct fastrpc_remote_arg {
u64 len;
};
+struct fastrpc_mmap_rsp_msg {
+ u64 vaddr;
+};
+
+struct fastrpc_mmap_req_msg {
+ s32 pgid;
+ u32 flags;
+ u64 vaddr;
+ s32 num;
+};
+
+struct fastrpc_munmap_req_msg {
+ s32 pgid;
+ u64 vaddr;
+ u64 size;
+};
+
struct fastrpc_msg {
int pid; /* process group id */
int tid; /* thread id */
@@ -123,6 +143,9 @@ struct fastrpc_buf {
/* Lock for dma buf attachments */
struct mutex lock;
struct list_head attachments;
+ /* mmap support */
+ struct list_head node; /* list of user requested mmaps */
+ uintptr_t raddr;
};
struct fastrpc_dma_buf_attachment {
@@ -192,6 +215,7 @@ struct fastrpc_user {
struct list_head user;
struct list_head maps;
struct list_head pending;
+ struct list_head mmaps;
struct fastrpc_channel_ctx *cctx;
struct fastrpc_session_ctx *sctx;
@@ -269,6 +293,7 @@ static int fastrpc_buf_alloc(struct fastrpc_user *fl, struct device *dev,
return -ENOMEM;
INIT_LIST_HEAD(&buf->attachments);
+ INIT_LIST_HEAD(&buf->node);
mutex_init(&buf->lock);
buf->fl = fl;
@@ -276,6 +301,7 @@ static int fastrpc_buf_alloc(struct fastrpc_user *fl, struct device *dev,
buf->phys = 0;
buf->size = size;
buf->dev = dev;
+ buf->raddr = 0;
buf->virt = dma_alloc_coherent(dev, buf->size, (dma_addr_t *)&buf->phys,
GFP_KERNEL);
@@ -934,8 +960,13 @@ static int fastrpc_internal_invoke(struct fastrpc_user *fl, u32 kernel,
if (err)
goto bail;
- /* Wait for remote dsp to respond or time out */
- err = wait_for_completion_interruptible(&ctx->work);
+ if (kernel) {
+ if (!wait_for_completion_timeout(&ctx->work, 10 * HZ))
+ err = -ETIMEDOUT;
+ } else {
+ err = wait_for_completion_interruptible(&ctx->work);
+ }
+
if (err)
goto bail;
@@ -954,12 +985,13 @@ static int fastrpc_internal_invoke(struct fastrpc_user *fl, u32 kernel,
}
bail:
- /* We are done with this compute context, remove it from pending list */
- spin_lock(&fl->lock);
- list_del(&ctx->node);
- spin_unlock(&fl->lock);
- fastrpc_context_put(ctx);
-
+ if (err != -ERESTARTSYS && err != -ETIMEDOUT) {
+ /* We are done with this compute context */
+ spin_lock(&fl->lock);
+ list_del(&ctx->node);
+ spin_unlock(&fl->lock);
+ fastrpc_context_put(ctx);
+ }
if (err)
dev_dbg(fl->sctx->dev, "Error: Invoke Failed %d\n", err);
@@ -1131,6 +1163,7 @@ static int fastrpc_device_release(struct inode *inode, struct file *file)
struct fastrpc_channel_ctx *cctx = fl->cctx;
struct fastrpc_invoke_ctx *ctx, *n;
struct fastrpc_map *map, *m;
+ struct fastrpc_buf *buf, *b;
unsigned long flags;
fastrpc_release_current_dsp_process(fl);
@@ -1152,6 +1185,11 @@ static int fastrpc_device_release(struct inode *inode, struct file *file)
fastrpc_map_put(map);
}
+ list_for_each_entry_safe(buf, b, &fl->mmaps, node) {
+ list_del(&buf->node);
+ fastrpc_buf_free(buf);
+ }
+
fastrpc_session_free(cctx, fl->sctx);
fastrpc_channel_ctx_put(cctx);
@@ -1180,6 +1218,7 @@ static int fastrpc_device_open(struct inode *inode, struct file *filp)
mutex_init(&fl->mutex);
INIT_LIST_HEAD(&fl->pending);
INIT_LIST_HEAD(&fl->maps);
+ INIT_LIST_HEAD(&fl->mmaps);
INIT_LIST_HEAD(&fl->user);
fl->tgid = current->tgid;
fl->cctx = cctx;
@@ -1285,6 +1324,148 @@ static int fastrpc_invoke(struct fastrpc_user *fl, char __user *argp)
return err;
}
+static int fastrpc_req_munmap_impl(struct fastrpc_user *fl,
+ struct fastrpc_req_munmap *req)
+{
+ struct fastrpc_invoke_args args[1] = { [0] = { 0 } };
+ struct fastrpc_buf *buf, *b;
+ struct fastrpc_munmap_req_msg req_msg;
+ struct device *dev = fl->sctx->dev;
+ int err;
+ u32 sc;
+
+ spin_lock(&fl->lock);
+ list_for_each_entry_safe(buf, b, &fl->mmaps, node) {
+ if ((buf->raddr == req->vaddrout) && (buf->size == req->size))
+ break;
+ buf = NULL;
+ }
+ spin_unlock(&fl->lock);
+
+ if (!buf) {
+ dev_err(dev, "mmap not in list\n");
+ return -EINVAL;
+ }
+
+ req_msg.pgid = fl->tgid;
+ req_msg.size = buf->size;
+ req_msg.vaddr = buf->raddr;
+
+ args[0].ptr = (u64) (uintptr_t) &req_msg;
+ args[0].length = sizeof(req_msg);
+
+ sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_MUNMAP, 1, 0);
+ err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, sc,
+ &args[0]);
+ if (!err) {
+ dev_dbg(dev, "unmmap\tpt 0x%09lx OK\n", buf->raddr);
+ spin_lock(&fl->lock);
+ list_del(&buf->node);
+ spin_unlock(&fl->lock);
+ fastrpc_buf_free(buf);
+ } else {
+ dev_err(dev, "unmmap\tpt 0x%09lx ERROR\n", buf->raddr);
+ }
+
+ return err;
+}
+
+static int fastrpc_req_munmap(struct fastrpc_user *fl, char __user *argp)
+{
+ struct fastrpc_req_munmap req;
+
+ if (copy_from_user(&req, argp, sizeof(req)))
+ return -EFAULT;
+
+ return fastrpc_req_munmap_impl(fl, &req);
+}
+
+static int fastrpc_req_mmap(struct fastrpc_user *fl, char __user *argp)
+{
+ struct fastrpc_invoke_args args[3] = { [0 ... 2] = { 0 } };
+ struct fastrpc_buf *buf = NULL;
+ struct fastrpc_mmap_req_msg req_msg;
+ struct fastrpc_mmap_rsp_msg rsp_msg;
+ struct fastrpc_req_munmap req_unmap;
+ struct fastrpc_phy_page pages;
+ struct fastrpc_req_mmap req;
+ struct device *dev = fl->sctx->dev;
+ int err;
+ u32 sc;
+
+ if (copy_from_user(&req, argp, sizeof(req)))
+ return -EFAULT;
+
+ if (req.flags != ADSP_MMAP_ADD_PAGES) {
+ dev_err(dev, "flag not supported 0x%x\n", req.flags);
+ return -EINVAL;
+ }
+
+ if (req.vaddrin) {
+ dev_err(dev, "adding user allocated pages is not supported\n");
+ return -EINVAL;
+ }
+
+ err = fastrpc_buf_alloc(fl, fl->sctx->dev, req.size, &buf);
+ if (err) {
+ dev_err(dev, "failed to allocate buffer\n");
+ return err;
+ }
+
+ req_msg.pgid = fl->tgid;
+ req_msg.flags = req.flags;
+ req_msg.vaddr = req.vaddrin;
+ req_msg.num = sizeof(pages);
+
+ args[0].ptr = (u64) (uintptr_t) &req_msg;
+ args[0].length = sizeof(req_msg);
+
+ pages.addr = buf->phys;
+ pages.size = buf->size;
+
+ args[1].ptr = (u64) (uintptr_t) &pages;
+ args[1].length = sizeof(pages);
+
+ args[2].ptr = (u64) (uintptr_t) &rsp_msg;
+ args[2].length = sizeof(rsp_msg);
+
+ sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_MMAP, 2, 1);
+ err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, sc,
+ &args[0]);
+ if (err) {
+ dev_err(dev, "mmap error (len 0x%08llx)\n", buf->size);
+ goto err_invoke;
+ }
+
+ /* update the buffer to be able to deallocate the memory on the DSP */
+ buf->raddr = (uintptr_t) rsp_msg.vaddr;
+
+ /* let the client know the address to use */
+ req.vaddrout = rsp_msg.vaddr;
+
+ spin_lock(&fl->lock);
+ list_add_tail(&buf->node, &fl->mmaps);
+ spin_unlock(&fl->lock);
+
+ if (copy_to_user((void __user *)argp, &req, sizeof(req))) {
+ /* unmap the memory and release the buffer */
+ req_unmap.vaddrout = buf->raddr;
+ req_unmap.size = buf->size;
+ fastrpc_req_munmap_impl(fl, &req_unmap);
+ return -EFAULT;
+ }
+
+ dev_dbg(dev, "mmap\t\tpt 0x%09lx OK [len 0x%08llx]\n",
+ buf->raddr, buf->size);
+
+ return 0;
+
+err_invoke:
+ fastrpc_buf_free(buf);
+
+ return err;
+}
+
static long fastrpc_device_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
@@ -1305,6 +1486,12 @@ static long fastrpc_device_ioctl(struct file *file, unsigned int cmd,
case FASTRPC_IOCTL_ALLOC_DMA_BUFF:
err = fastrpc_dmabuf_alloc(fl, argp);
break;
+ case FASTRPC_IOCTL_MMAP:
+ err = fastrpc_req_mmap(fl, argp);
+ break;
+ case FASTRPC_IOCTL_MUNMAP:
+ err = fastrpc_req_munmap(fl, argp);
+ break;
default:
err = -ENOTTY;
break;
@@ -1430,8 +1617,8 @@ static int fastrpc_rpmsg_probe(struct rpmsg_device *rpdev)
return -ENOMEM;
data->miscdev.minor = MISC_DYNAMIC_MINOR;
- data->miscdev.name = kasprintf(GFP_KERNEL, "fastrpc-%s",
- domains[domain_id]);
+ data->miscdev.name = devm_kasprintf(rdev, GFP_KERNEL, "fastrpc-%s",
+ domains[domain_id]);
data->miscdev.fops = &fastrpc_fops;
err = misc_register(&data->miscdev);
if (err)
diff --git a/drivers/misc/genwqe/card_dev.c b/drivers/misc/genwqe/card_dev.c
index 0e34c0568fed..040a0bda3125 100644
--- a/drivers/misc/genwqe/card_dev.c
+++ b/drivers/misc/genwqe/card_dev.c
@@ -1215,34 +1215,13 @@ static long genwqe_ioctl(struct file *filp, unsigned int cmd,
return rc;
}
-#if defined(CONFIG_COMPAT)
-/**
- * genwqe_compat_ioctl() - Compatibility ioctl
- *
- * Called whenever a 32-bit process running under a 64-bit kernel
- * performs an ioctl on /dev/genwqe<n>_card.
- *
- * @filp: file pointer.
- * @cmd: command.
- * @arg: user argument.
- * Return: zero on success or negative number on failure.
- */
-static long genwqe_compat_ioctl(struct file *filp, unsigned int cmd,
- unsigned long arg)
-{
- return genwqe_ioctl(filp, cmd, arg);
-}
-#endif /* defined(CONFIG_COMPAT) */
-
static const struct file_operations genwqe_fops = {
.owner = THIS_MODULE,
.open = genwqe_open,
.fasync = genwqe_fasync,
.mmap = genwqe_mmap,
.unlocked_ioctl = genwqe_ioctl,
-#if defined(CONFIG_COMPAT)
- .compat_ioctl = genwqe_compat_ioctl,
-#endif
+ .compat_ioctl = compat_ptr_ioctl,
.release = genwqe_release,
};
diff --git a/drivers/misc/habanalabs/command_submission.c b/drivers/misc/habanalabs/command_submission.c
index a9ac045dcfde..8850f475a413 100644
--- a/drivers/misc/habanalabs/command_submission.c
+++ b/drivers/misc/habanalabs/command_submission.c
@@ -65,6 +65,18 @@ static void cs_put(struct hl_cs *cs)
kref_put(&cs->refcount, cs_do_release);
}
+static bool is_cb_patched(struct hl_device *hdev, struct hl_cs_job *job)
+{
+ /*
+ * Patched CB is created for external queues jobs, and for H/W queues
+ * jobs if the user CB was allocated by driver and MMU is disabled.
+ */
+ return (job->queue_type == QUEUE_TYPE_EXT ||
+ (job->queue_type == QUEUE_TYPE_HW &&
+ job->is_kernel_allocated_cb &&
+ !hdev->mmu_enable));
+}
+
/*
* cs_parser - parse the user command submission
*
@@ -91,11 +103,13 @@ static int cs_parser(struct hl_fpriv *hpriv, struct hl_cs_job *job)
parser.patched_cb = NULL;
parser.user_cb = job->user_cb;
parser.user_cb_size = job->user_cb_size;
- parser.ext_queue = job->ext_queue;
+ parser.queue_type = job->queue_type;
+ parser.is_kernel_allocated_cb = job->is_kernel_allocated_cb;
job->patched_cb = NULL;
rc = hdev->asic_funcs->cs_parser(hdev, &parser);
- if (job->ext_queue) {
+
+ if (is_cb_patched(hdev, job)) {
if (!rc) {
job->patched_cb = parser.patched_cb;
job->job_cb_size = parser.patched_cb_size;
@@ -124,7 +138,7 @@ static void free_job(struct hl_device *hdev, struct hl_cs_job *job)
{
struct hl_cs *cs = job->cs;
- if (job->ext_queue) {
+ if (is_cb_patched(hdev, job)) {
hl_userptr_delete_list(hdev, &job->userptr_list);
/*
@@ -140,6 +154,19 @@ static void free_job(struct hl_device *hdev, struct hl_cs_job *job)
}
}
+ /* For H/W queue jobs, if a user CB was allocated by driver and MMU is
+ * enabled, the user CB isn't released in cs_parser() and thus should be
+ * released here.
+ */
+ if (job->queue_type == QUEUE_TYPE_HW &&
+ job->is_kernel_allocated_cb && hdev->mmu_enable) {
+ spin_lock(&job->user_cb->lock);
+ job->user_cb->cs_cnt--;
+ spin_unlock(&job->user_cb->lock);
+
+ hl_cb_put(job->user_cb);
+ }
+
/*
* This is the only place where there can be multiple threads
* modifying the list at the same time
@@ -150,7 +177,8 @@ static void free_job(struct hl_device *hdev, struct hl_cs_job *job)
hl_debugfs_remove_job(hdev, job);
- if (job->ext_queue)
+ if (job->queue_type == QUEUE_TYPE_EXT ||
+ job->queue_type == QUEUE_TYPE_HW)
cs_put(cs);
kfree(job);
@@ -387,18 +415,13 @@ static void job_wq_completion(struct work_struct *work)
free_job(hdev, job);
}
-static struct hl_cb *validate_queue_index(struct hl_device *hdev,
- struct hl_cb_mgr *cb_mgr,
- struct hl_cs_chunk *chunk,
- bool *ext_queue)
+static int validate_queue_index(struct hl_device *hdev,
+ struct hl_cs_chunk *chunk,
+ enum hl_queue_type *queue_type,
+ bool *is_kernel_allocated_cb)
{
struct asic_fixed_properties *asic = &hdev->asic_prop;
struct hw_queue_properties *hw_queue_prop;
- u32 cb_handle;
- struct hl_cb *cb;
-
- /* Assume external queue */
- *ext_queue = true;
hw_queue_prop = &asic->hw_queues_props[chunk->queue_index];
@@ -406,20 +429,29 @@ static struct hl_cb *validate_queue_index(struct hl_device *hdev,
(hw_queue_prop->type == QUEUE_TYPE_NA)) {
dev_err(hdev->dev, "Queue index %d is invalid\n",
chunk->queue_index);
- return NULL;
+ return -EINVAL;
}
if (hw_queue_prop->driver_only) {
dev_err(hdev->dev,
"Queue index %d is restricted for the kernel driver\n",
chunk->queue_index);
- return NULL;
- } else if (hw_queue_prop->type == QUEUE_TYPE_INT) {
- *ext_queue = false;
- return (struct hl_cb *) (uintptr_t) chunk->cb_handle;
+ return -EINVAL;
}
- /* Retrieve CB object */
+ *queue_type = hw_queue_prop->type;
+ *is_kernel_allocated_cb = !!hw_queue_prop->requires_kernel_cb;
+
+ return 0;
+}
+
+static struct hl_cb *get_cb_from_cs_chunk(struct hl_device *hdev,
+ struct hl_cb_mgr *cb_mgr,
+ struct hl_cs_chunk *chunk)
+{
+ struct hl_cb *cb;
+ u32 cb_handle;
+
cb_handle = (u32) (chunk->cb_handle >> PAGE_SHIFT);
cb = hl_cb_get(hdev, cb_mgr, cb_handle);
@@ -444,7 +476,8 @@ release_cb:
return NULL;
}
-struct hl_cs_job *hl_cs_allocate_job(struct hl_device *hdev, bool ext_queue)
+struct hl_cs_job *hl_cs_allocate_job(struct hl_device *hdev,
+ enum hl_queue_type queue_type, bool is_kernel_allocated_cb)
{
struct hl_cs_job *job;
@@ -452,12 +485,14 @@ struct hl_cs_job *hl_cs_allocate_job(struct hl_device *hdev, bool ext_queue)
if (!job)
return NULL;
- job->ext_queue = ext_queue;
+ job->queue_type = queue_type;
+ job->is_kernel_allocated_cb = is_kernel_allocated_cb;
- if (job->ext_queue) {
+ if (is_cb_patched(hdev, job))
INIT_LIST_HEAD(&job->userptr_list);
+
+ if (job->queue_type == QUEUE_TYPE_EXT)
INIT_WORK(&job->finish_work, job_wq_completion);
- }
return job;
}
@@ -470,7 +505,7 @@ static int _hl_cs_ioctl(struct hl_fpriv *hpriv, void __user *chunks,
struct hl_cs_job *job;
struct hl_cs *cs;
struct hl_cb *cb;
- bool ext_queue_present = false;
+ bool int_queues_only = true;
u32 size_to_copy;
int rc, i, parse_cnt;
@@ -514,23 +549,33 @@ static int _hl_cs_ioctl(struct hl_fpriv *hpriv, void __user *chunks,
/* Validate ALL the CS chunks before submitting the CS */
for (i = 0, parse_cnt = 0 ; i < num_chunks ; i++, parse_cnt++) {
struct hl_cs_chunk *chunk = &cs_chunk_array[i];
- bool ext_queue;
+ enum hl_queue_type queue_type;
+ bool is_kernel_allocated_cb;
+
+ rc = validate_queue_index(hdev, chunk, &queue_type,
+ &is_kernel_allocated_cb);
+ if (rc)
+ goto free_cs_object;
- cb = validate_queue_index(hdev, &hpriv->cb_mgr, chunk,
- &ext_queue);
- if (ext_queue) {
- ext_queue_present = true;
+ if (is_kernel_allocated_cb) {
+ cb = get_cb_from_cs_chunk(hdev, &hpriv->cb_mgr, chunk);
if (!cb) {
rc = -EINVAL;
goto free_cs_object;
}
+ } else {
+ cb = (struct hl_cb *) (uintptr_t) chunk->cb_handle;
}
- job = hl_cs_allocate_job(hdev, ext_queue);
+ if (queue_type == QUEUE_TYPE_EXT || queue_type == QUEUE_TYPE_HW)
+ int_queues_only = false;
+
+ job = hl_cs_allocate_job(hdev, queue_type,
+ is_kernel_allocated_cb);
if (!job) {
dev_err(hdev->dev, "Failed to allocate a new job\n");
rc = -ENOMEM;
- if (ext_queue)
+ if (is_kernel_allocated_cb)
goto release_cb;
else
goto free_cs_object;
@@ -540,7 +585,7 @@ static int _hl_cs_ioctl(struct hl_fpriv *hpriv, void __user *chunks,
job->cs = cs;
job->user_cb = cb;
job->user_cb_size = chunk->cb_size;
- if (job->ext_queue)
+ if (is_kernel_allocated_cb)
job->job_cb_size = cb->size;
else
job->job_cb_size = chunk->cb_size;
@@ -553,10 +598,11 @@ static int _hl_cs_ioctl(struct hl_fpriv *hpriv, void __user *chunks,
/*
* Increment CS reference. When CS reference is 0, CS is
* done and can be signaled to user and free all its resources
- * Only increment for JOB on external queues, because only
- * for those JOBs we get completion
+ * Only increment for JOB on external or H/W queues, because
+ * only for those JOBs we get completion
*/
- if (job->ext_queue)
+ if (job->queue_type == QUEUE_TYPE_EXT ||
+ job->queue_type == QUEUE_TYPE_HW)
cs_get(cs);
hl_debugfs_add_job(hdev, job);
@@ -570,9 +616,9 @@ static int _hl_cs_ioctl(struct hl_fpriv *hpriv, void __user *chunks,
}
}
- if (!ext_queue_present) {
+ if (int_queues_only) {
dev_err(hdev->dev,
- "Reject CS %d.%llu because no external queues jobs\n",
+ "Reject CS %d.%llu because only internal queues jobs are present\n",
cs->ctx->asid, cs->sequence);
rc = -EINVAL;
goto free_cs_object;
@@ -580,9 +626,10 @@ static int _hl_cs_ioctl(struct hl_fpriv *hpriv, void __user *chunks,
rc = hl_hw_queue_schedule_cs(cs);
if (rc) {
- dev_err(hdev->dev,
- "Failed to submit CS %d.%llu to H/W queues, error %d\n",
- cs->ctx->asid, cs->sequence, rc);
+ if (rc != -EAGAIN)
+ dev_err(hdev->dev,
+ "Failed to submit CS %d.%llu to H/W queues, error %d\n",
+ cs->ctx->asid, cs->sequence, rc);
goto free_cs_object;
}
diff --git a/drivers/misc/habanalabs/debugfs.c b/drivers/misc/habanalabs/debugfs.c
index 87f37ac31ccd..20413e350343 100644
--- a/drivers/misc/habanalabs/debugfs.c
+++ b/drivers/misc/habanalabs/debugfs.c
@@ -307,45 +307,57 @@ static inline u64 get_hop0_addr(struct hl_ctx *ctx)
(ctx->asid * ctx->hdev->asic_prop.mmu_hop_table_size);
}
-static inline u64 get_hop0_pte_addr(struct hl_ctx *ctx, u64 hop_addr,
- u64 virt_addr)
+static inline u64 get_hopN_pte_addr(struct hl_ctx *ctx, u64 hop_addr,
+ u64 virt_addr, u64 mask, u64 shift)
{
return hop_addr + ctx->hdev->asic_prop.mmu_pte_size *
- ((virt_addr & HOP0_MASK) >> HOP0_SHIFT);
+ ((virt_addr & mask) >> shift);
}
-static inline u64 get_hop1_pte_addr(struct hl_ctx *ctx, u64 hop_addr,
- u64 virt_addr)
+static inline u64 get_hop0_pte_addr(struct hl_ctx *ctx,
+ struct hl_mmu_properties *mmu_specs,
+ u64 hop_addr, u64 vaddr)
{
- return hop_addr + ctx->hdev->asic_prop.mmu_pte_size *
- ((virt_addr & HOP1_MASK) >> HOP1_SHIFT);
+ return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_specs->hop0_mask,
+ mmu_specs->hop0_shift);
}
-static inline u64 get_hop2_pte_addr(struct hl_ctx *ctx, u64 hop_addr,
- u64 virt_addr)
+static inline u64 get_hop1_pte_addr(struct hl_ctx *ctx,
+ struct hl_mmu_properties *mmu_specs,
+ u64 hop_addr, u64 vaddr)
{
- return hop_addr + ctx->hdev->asic_prop.mmu_pte_size *
- ((virt_addr & HOP2_MASK) >> HOP2_SHIFT);
+ return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_specs->hop1_mask,
+ mmu_specs->hop1_shift);
}
-static inline u64 get_hop3_pte_addr(struct hl_ctx *ctx, u64 hop_addr,
- u64 virt_addr)
+static inline u64 get_hop2_pte_addr(struct hl_ctx *ctx,
+ struct hl_mmu_properties *mmu_specs,
+ u64 hop_addr, u64 vaddr)
{
- return hop_addr + ctx->hdev->asic_prop.mmu_pte_size *
- ((virt_addr & HOP3_MASK) >> HOP3_SHIFT);
+ return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_specs->hop2_mask,
+ mmu_specs->hop2_shift);
}
-static inline u64 get_hop4_pte_addr(struct hl_ctx *ctx, u64 hop_addr,
- u64 virt_addr)
+static inline u64 get_hop3_pte_addr(struct hl_ctx *ctx,
+ struct hl_mmu_properties *mmu_specs,
+ u64 hop_addr, u64 vaddr)
{
- return hop_addr + ctx->hdev->asic_prop.mmu_pte_size *
- ((virt_addr & HOP4_MASK) >> HOP4_SHIFT);
+ return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_specs->hop3_mask,
+ mmu_specs->hop3_shift);
+}
+
+static inline u64 get_hop4_pte_addr(struct hl_ctx *ctx,
+ struct hl_mmu_properties *mmu_specs,
+ u64 hop_addr, u64 vaddr)
+{
+ return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_specs->hop4_mask,
+ mmu_specs->hop4_shift);
}
static inline u64 get_next_hop_addr(u64 curr_pte)
{
if (curr_pte & PAGE_PRESENT_MASK)
- return curr_pte & PHYS_ADDR_MASK;
+ return curr_pte & HOP_PHYS_ADDR_MASK;
else
return ULLONG_MAX;
}
@@ -355,7 +367,10 @@ static int mmu_show(struct seq_file *s, void *data)
struct hl_debugfs_entry *entry = s->private;
struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
struct hl_device *hdev = dev_entry->hdev;
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct hl_mmu_properties *mmu_prop;
struct hl_ctx *ctx;
+ bool is_dram_addr;
u64 hop0_addr = 0, hop0_pte_addr = 0, hop0_pte = 0,
hop1_addr = 0, hop1_pte_addr = 0, hop1_pte = 0,
@@ -377,33 +392,39 @@ static int mmu_show(struct seq_file *s, void *data)
return 0;
}
+ is_dram_addr = hl_mem_area_inside_range(virt_addr, prop->dmmu.page_size,
+ prop->va_space_dram_start_address,
+ prop->va_space_dram_end_address);
+
+ mmu_prop = is_dram_addr ? &prop->dmmu : &prop->pmmu;
+
mutex_lock(&ctx->mmu_lock);
/* the following lookup is copied from unmap() in mmu.c */
hop0_addr = get_hop0_addr(ctx);
- hop0_pte_addr = get_hop0_pte_addr(ctx, hop0_addr, virt_addr);
+ hop0_pte_addr = get_hop0_pte_addr(ctx, mmu_prop, hop0_addr, virt_addr);
hop0_pte = hdev->asic_funcs->read_pte(hdev, hop0_pte_addr);
hop1_addr = get_next_hop_addr(hop0_pte);
if (hop1_addr == ULLONG_MAX)
goto not_mapped;
- hop1_pte_addr = get_hop1_pte_addr(ctx, hop1_addr, virt_addr);
+ hop1_pte_addr = get_hop1_pte_addr(ctx, mmu_prop, hop1_addr, virt_addr);
hop1_pte = hdev->asic_funcs->read_pte(hdev, hop1_pte_addr);
hop2_addr = get_next_hop_addr(hop1_pte);
if (hop2_addr == ULLONG_MAX)
goto not_mapped;
- hop2_pte_addr = get_hop2_pte_addr(ctx, hop2_addr, virt_addr);
+ hop2_pte_addr = get_hop2_pte_addr(ctx, mmu_prop, hop2_addr, virt_addr);
hop2_pte = hdev->asic_funcs->read_pte(hdev, hop2_pte_addr);
hop3_addr = get_next_hop_addr(hop2_pte);
if (hop3_addr == ULLONG_MAX)
goto not_mapped;
- hop3_pte_addr = get_hop3_pte_addr(ctx, hop3_addr, virt_addr);
+ hop3_pte_addr = get_hop3_pte_addr(ctx, mmu_prop, hop3_addr, virt_addr);
hop3_pte = hdev->asic_funcs->read_pte(hdev, hop3_pte_addr);
if (!(hop3_pte & LAST_MASK)) {
@@ -412,7 +433,8 @@ static int mmu_show(struct seq_file *s, void *data)
if (hop4_addr == ULLONG_MAX)
goto not_mapped;
- hop4_pte_addr = get_hop4_pte_addr(ctx, hop4_addr, virt_addr);
+ hop4_pte_addr = get_hop4_pte_addr(ctx, mmu_prop, hop4_addr,
+ virt_addr);
hop4_pte = hdev->asic_funcs->read_pte(hdev, hop4_pte_addr);
if (!(hop4_pte & PAGE_PRESENT_MASK))
goto not_mapped;
@@ -506,6 +528,12 @@ static int engines_show(struct seq_file *s, void *data)
struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
struct hl_device *hdev = dev_entry->hdev;
+ if (atomic_read(&hdev->in_reset)) {
+ dev_warn_ratelimited(hdev->dev,
+ "Can't check device idle during reset\n");
+ return 0;
+ }
+
hdev->asic_funcs->is_device_idle(hdev, NULL, s);
return 0;
@@ -534,41 +562,50 @@ static int device_va_to_pa(struct hl_device *hdev, u64 virt_addr,
u64 *phys_addr)
{
struct hl_ctx *ctx = hdev->compute_ctx;
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct hl_mmu_properties *mmu_prop;
u64 hop_addr, hop_pte_addr, hop_pte;
- u64 offset_mask = HOP4_MASK | OFFSET_MASK;
+ u64 offset_mask = HOP4_MASK | FLAGS_MASK;
int rc = 0;
+ bool is_dram_addr;
if (!ctx) {
dev_err(hdev->dev, "no ctx available\n");
return -EINVAL;
}
+ is_dram_addr = hl_mem_area_inside_range(virt_addr, prop->dmmu.page_size,
+ prop->va_space_dram_start_address,
+ prop->va_space_dram_end_address);
+
+ mmu_prop = is_dram_addr ? &prop->dmmu : &prop->pmmu;
+
mutex_lock(&ctx->mmu_lock);
/* hop 0 */
hop_addr = get_hop0_addr(ctx);
- hop_pte_addr = get_hop0_pte_addr(ctx, hop_addr, virt_addr);
+ hop_pte_addr = get_hop0_pte_addr(ctx, mmu_prop, hop_addr, virt_addr);
hop_pte = hdev->asic_funcs->read_pte(hdev, hop_pte_addr);
/* hop 1 */
hop_addr = get_next_hop_addr(hop_pte);
if (hop_addr == ULLONG_MAX)
goto not_mapped;
- hop_pte_addr = get_hop1_pte_addr(ctx, hop_addr, virt_addr);
+ hop_pte_addr = get_hop1_pte_addr(ctx, mmu_prop, hop_addr, virt_addr);
hop_pte = hdev->asic_funcs->read_pte(hdev, hop_pte_addr);
/* hop 2 */
hop_addr = get_next_hop_addr(hop_pte);
if (hop_addr == ULLONG_MAX)
goto not_mapped;
- hop_pte_addr = get_hop2_pte_addr(ctx, hop_addr, virt_addr);
+ hop_pte_addr = get_hop2_pte_addr(ctx, mmu_prop, hop_addr, virt_addr);
hop_pte = hdev->asic_funcs->read_pte(hdev, hop_pte_addr);
/* hop 3 */
hop_addr = get_next_hop_addr(hop_pte);
if (hop_addr == ULLONG_MAX)
goto not_mapped;
- hop_pte_addr = get_hop3_pte_addr(ctx, hop_addr, virt_addr);
+ hop_pte_addr = get_hop3_pte_addr(ctx, mmu_prop, hop_addr, virt_addr);
hop_pte = hdev->asic_funcs->read_pte(hdev, hop_pte_addr);
if (!(hop_pte & LAST_MASK)) {
@@ -576,10 +613,11 @@ static int device_va_to_pa(struct hl_device *hdev, u64 virt_addr,
hop_addr = get_next_hop_addr(hop_pte);
if (hop_addr == ULLONG_MAX)
goto not_mapped;
- hop_pte_addr = get_hop4_pte_addr(ctx, hop_addr, virt_addr);
+ hop_pte_addr = get_hop4_pte_addr(ctx, mmu_prop, hop_addr,
+ virt_addr);
hop_pte = hdev->asic_funcs->read_pte(hdev, hop_pte_addr);
- offset_mask = OFFSET_MASK;
+ offset_mask = FLAGS_MASK;
}
if (!(hop_pte & PAGE_PRESENT_MASK))
@@ -608,6 +646,11 @@ static ssize_t hl_data_read32(struct file *f, char __user *buf,
u32 val;
ssize_t rc;
+ if (atomic_read(&hdev->in_reset)) {
+ dev_warn_ratelimited(hdev->dev, "Can't read during reset\n");
+ return 0;
+ }
+
if (*ppos)
return 0;
@@ -637,6 +680,11 @@ static ssize_t hl_data_write32(struct file *f, const char __user *buf,
u32 value;
ssize_t rc;
+ if (atomic_read(&hdev->in_reset)) {
+ dev_warn_ratelimited(hdev->dev, "Can't write during reset\n");
+ return 0;
+ }
+
rc = kstrtouint_from_user(buf, count, 16, &value);
if (rc)
return rc;
diff --git a/drivers/misc/habanalabs/device.c b/drivers/misc/habanalabs/device.c
index 459fee70a597..b155e9549076 100644
--- a/drivers/misc/habanalabs/device.c
+++ b/drivers/misc/habanalabs/device.c
@@ -42,12 +42,10 @@ static void hpriv_release(struct kref *ref)
{
struct hl_fpriv *hpriv;
struct hl_device *hdev;
- struct hl_ctx *ctx;
hpriv = container_of(ref, struct hl_fpriv, refcount);
hdev = hpriv->hdev;
- ctx = hpriv->ctx;
put_pid(hpriv->taskpid);
@@ -889,13 +887,19 @@ again:
/* Go over all the queues, release all CS and their jobs */
hl_cs_rollback_all(hdev);
- /* Kill processes here after CS rollback. This is because the process
- * can't really exit until all its CSs are done, which is what we
- * do in cs rollback
- */
- if (from_hard_reset_thread)
+ if (hard_reset) {
+ /* Kill processes here after CS rollback. This is because the
+ * process can't really exit until all its CSs are done, which
+ * is what we do in cs rollback
+ */
device_kill_open_processes(hdev);
+ /* Flush the Event queue workers to make sure no other thread is
+ * reading or writing to registers during the reset
+ */
+ flush_workqueue(hdev->eq_wq);
+ }
+
/* Release kernel context */
if ((hard_reset) && (hl_ctx_put(hdev->kernel_ctx) == 1))
hdev->kernel_ctx = NULL;
diff --git a/drivers/misc/habanalabs/firmware_if.c b/drivers/misc/habanalabs/firmware_if.c
index ea2ca67fbfbf..f5bd03171dac 100644
--- a/drivers/misc/habanalabs/firmware_if.c
+++ b/drivers/misc/habanalabs/firmware_if.c
@@ -143,10 +143,7 @@ int hl_fw_test_cpu_queue(struct hl_device *hdev)
sizeof(test_pkt), HL_DEVICE_TIMEOUT_USEC, &result);
if (!rc) {
- if (result == ARMCP_PACKET_FENCE_VAL)
- dev_info(hdev->dev,
- "queue test on CPU queue succeeded\n");
- else
+ if (result != ARMCP_PACKET_FENCE_VAL)
dev_err(hdev->dev,
"CPU queue test failed (0x%08lX)\n", result);
} else {
diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c
index 6fba14b81f90..c8d16aa4382c 100644
--- a/drivers/misc/habanalabs/goya/goya.c
+++ b/drivers/misc/habanalabs/goya/goya.c
@@ -72,6 +72,9 @@
*
*/
+#define GOYA_UBOOT_FW_FILE "habanalabs/goya/goya-u-boot.bin"
+#define GOYA_LINUX_FW_FILE "habanalabs/goya/goya-fit.itb"
+
#define GOYA_MMU_REGS_NUM 63
#define GOYA_DMA_POOL_BLK_SIZE 0x100 /* 256 bytes */
@@ -337,17 +340,20 @@ void goya_get_fixed_properties(struct hl_device *hdev)
for (i = 0 ; i < NUMBER_OF_EXT_HW_QUEUES ; i++) {
prop->hw_queues_props[i].type = QUEUE_TYPE_EXT;
prop->hw_queues_props[i].driver_only = 0;
+ prop->hw_queues_props[i].requires_kernel_cb = 1;
}
for (; i < NUMBER_OF_EXT_HW_QUEUES + NUMBER_OF_CPU_HW_QUEUES ; i++) {
prop->hw_queues_props[i].type = QUEUE_TYPE_CPU;
prop->hw_queues_props[i].driver_only = 1;
+ prop->hw_queues_props[i].requires_kernel_cb = 0;
}
for (; i < NUMBER_OF_EXT_HW_QUEUES + NUMBER_OF_CPU_HW_QUEUES +
NUMBER_OF_INT_HW_QUEUES; i++) {
prop->hw_queues_props[i].type = QUEUE_TYPE_INT;
prop->hw_queues_props[i].driver_only = 0;
+ prop->hw_queues_props[i].requires_kernel_cb = 0;
}
for (; i < HL_MAX_QUEUES; i++)
@@ -377,6 +383,23 @@ void goya_get_fixed_properties(struct hl_device *hdev)
prop->mmu_hop0_tables_total_size = HOP0_TABLES_TOTAL_SIZE;
prop->dram_page_size = PAGE_SIZE_2MB;
+ prop->dmmu.hop0_shift = HOP0_SHIFT;
+ prop->dmmu.hop1_shift = HOP1_SHIFT;
+ prop->dmmu.hop2_shift = HOP2_SHIFT;
+ prop->dmmu.hop3_shift = HOP3_SHIFT;
+ prop->dmmu.hop4_shift = HOP4_SHIFT;
+ prop->dmmu.hop0_mask = HOP0_MASK;
+ prop->dmmu.hop1_mask = HOP1_MASK;
+ prop->dmmu.hop2_mask = HOP2_MASK;
+ prop->dmmu.hop3_mask = HOP3_MASK;
+ prop->dmmu.hop4_mask = HOP4_MASK;
+ prop->dmmu.huge_page_size = PAGE_SIZE_2MB;
+
+ /* No difference between PMMU and DMMU except of page size */
+ memcpy(&prop->pmmu, &prop->dmmu, sizeof(prop->dmmu));
+ prop->dmmu.page_size = PAGE_SIZE_2MB;
+ prop->pmmu.page_size = PAGE_SIZE_4KB;
+
prop->va_space_host_start_address = VA_HOST_SPACE_START;
prop->va_space_host_end_address = VA_HOST_SPACE_END;
prop->va_space_dram_start_address = VA_DDR_SPACE_START;
@@ -393,6 +416,9 @@ void goya_get_fixed_properties(struct hl_device *hdev)
prop->tpc_enabled_mask = TPC_ENABLED_MASK;
prop->pcie_dbi_base_address = mmPCIE_DBI_BASE;
prop->pcie_aux_dbi_reg_addr = CFG_BASE + mmPCIE_AUX_DBI;
+
+ strncpy(prop->armcp_info.card_name, GOYA_DEFAULT_CARD_NAME,
+ CARD_NAME_MAX_LEN);
}
/*
@@ -1454,6 +1480,9 @@ static void goya_init_golden_registers(struct hl_device *hdev)
1 << TPC0_NRTR_SCRAMB_EN_VAL_SHIFT);
WREG32(mmTPC0_NRTR_NON_LIN_SCRAMB + offset,
1 << TPC0_NRTR_NON_LIN_SCRAMB_EN_SHIFT);
+
+ WREG32_FIELD(TPC0_CFG_MSS_CONFIG, offset,
+ ICACHE_FETCH_LINE_NUM, 2);
}
WREG32(mmDMA_NRTR_SCRAMB_EN, 1 << DMA_NRTR_SCRAMB_EN_VAL_SHIFT);
@@ -1533,7 +1562,6 @@ static void goya_init_mme_cmdq(struct hl_device *hdev)
u32 mtr_base_lo, mtr_base_hi;
u32 so_base_lo, so_base_hi;
u32 gic_base_lo, gic_base_hi;
- u64 qman_base_addr;
mtr_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
mtr_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
@@ -1545,9 +1573,6 @@ static void goya_init_mme_cmdq(struct hl_device *hdev)
gic_base_hi =
upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
- qman_base_addr = hdev->asic_prop.sram_base_address +
- MME_QMAN_BASE_OFFSET;
-
WREG32(mmMME_CMDQ_CP_MSG_BASE0_ADDR_LO, mtr_base_lo);
WREG32(mmMME_CMDQ_CP_MSG_BASE0_ADDR_HI, mtr_base_hi);
WREG32(mmMME_CMDQ_CP_MSG_BASE1_ADDR_LO, so_base_lo);
@@ -2141,13 +2166,11 @@ static void goya_halt_engines(struct hl_device *hdev, bool hard_reset)
*/
static int goya_push_uboot_to_device(struct hl_device *hdev)
{
- char fw_name[200];
void __iomem *dst;
- snprintf(fw_name, sizeof(fw_name), "habanalabs/goya/goya-u-boot.bin");
dst = hdev->pcie_bar[SRAM_CFG_BAR_ID] + UBOOT_FW_OFFSET;
- return hl_fw_push_fw_to_device(hdev, fw_name, dst);
+ return hl_fw_push_fw_to_device(hdev, GOYA_UBOOT_FW_FILE, dst);
}
/*
@@ -2160,13 +2183,11 @@ static int goya_push_uboot_to_device(struct hl_device *hdev)
*/
static int goya_push_linux_to_device(struct hl_device *hdev)
{
- char fw_name[200];
void __iomem *dst;
- snprintf(fw_name, sizeof(fw_name), "habanalabs/goya/goya-fit.itb");
dst = hdev->pcie_bar[DDR_BAR_ID] + LINUX_FW_OFFSET;
- return hl_fw_push_fw_to_device(hdev, fw_name, dst);
+ return hl_fw_push_fw_to_device(hdev, GOYA_LINUX_FW_FILE, dst);
}
static int goya_pldm_init_cpu(struct hl_device *hdev)
@@ -2291,6 +2312,10 @@ static int goya_init_cpu(struct hl_device *hdev, u32 cpu_timeout)
10000,
cpu_timeout);
+ /* Read U-Boot version now in case we will later fail */
+ goya_read_device_fw_version(hdev, FW_COMP_UBOOT);
+ goya_read_device_fw_version(hdev, FW_COMP_PREBOOT);
+
if (rc) {
dev_err(hdev->dev, "Error in ARM u-boot!");
switch (status) {
@@ -2328,6 +2353,11 @@ static int goya_init_cpu(struct hl_device *hdev, u32 cpu_timeout)
"ARM status %d - u-boot stopped by user\n",
status);
break;
+ case CPU_BOOT_STATUS_TS_INIT_FAIL:
+ dev_err(hdev->dev,
+ "ARM status %d - Thermal Sensor initialization failed\n",
+ status);
+ break;
default:
dev_err(hdev->dev,
"ARM status %d - Invalid status code\n",
@@ -2337,10 +2367,6 @@ static int goya_init_cpu(struct hl_device *hdev, u32 cpu_timeout)
return -EIO;
}
- /* Read U-Boot version now in case we will later fail */
- goya_read_device_fw_version(hdev, FW_COMP_UBOOT);
- goya_read_device_fw_version(hdev, FW_COMP_PREBOOT);
-
if (!hdev->fw_loading) {
dev_info(hdev->dev, "Skip loading FW\n");
goto out;
@@ -2453,7 +2479,8 @@ int goya_mmu_init(struct hl_device *hdev)
WREG32_AND(mmSTLB_STLB_FEATURE_EN,
(~STLB_STLB_FEATURE_EN_FOLLOWER_EN_MASK));
- hdev->asic_funcs->mmu_invalidate_cache(hdev, true);
+ hdev->asic_funcs->mmu_invalidate_cache(hdev, true,
+ VM_TYPE_USERPTR | VM_TYPE_PHYS_PACK);
WREG32(mmMMU_MMU_ENABLE, 1);
WREG32(mmMMU_SPI_MASK, 0xF);
@@ -2978,9 +3005,6 @@ int goya_test_queue(struct hl_device *hdev, u32 hw_queue_id)
"H/W queue %d test failed (scratch(0x%08llX) == 0x%08X)\n",
hw_queue_id, (unsigned long long) fence_dma_addr, tmp);
rc = -EIO;
- } else {
- dev_info(hdev->dev, "queue test on H/W queue %d succeeded\n",
- hw_queue_id);
}
free_pkt:
@@ -3925,7 +3949,7 @@ static int goya_parse_cb_no_ext_queue(struct hl_device *hdev,
return 0;
dev_err(hdev->dev,
- "Internal CB address %px + 0x%x is not in SRAM nor in DRAM\n",
+ "Internal CB address 0x%px + 0x%x is not in SRAM nor in DRAM\n",
parser->user_cb, parser->user_cb_size);
return -EFAULT;
@@ -3935,7 +3959,7 @@ int goya_cs_parser(struct hl_device *hdev, struct hl_cs_parser *parser)
{
struct goya_device *goya = hdev->asic_specific;
- if (!parser->ext_queue)
+ if (parser->queue_type == QUEUE_TYPE_INT)
return goya_parse_cb_no_ext_queue(hdev, parser);
if (goya->hw_cap_initialized & HW_CAP_MMU)
@@ -4606,7 +4630,7 @@ static int goya_memset_device_memory(struct hl_device *hdev, u64 addr, u64 size,
lin_dma_pkt++;
} while (--lin_dma_pkts_cnt);
- job = hl_cs_allocate_job(hdev, true);
+ job = hl_cs_allocate_job(hdev, QUEUE_TYPE_EXT, true);
if (!job) {
dev_err(hdev->dev, "Failed to allocate a new job\n");
rc = -ENOMEM;
@@ -4835,13 +4859,15 @@ static void goya_mmu_prepare(struct hl_device *hdev, u32 asid)
goya_mmu_prepare_reg(hdev, goya_mmu_regs[i], asid);
}
-static void goya_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard)
+static void goya_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard,
+ u32 flags)
{
struct goya_device *goya = hdev->asic_specific;
u32 status, timeout_usec;
int rc;
- if (!(goya->hw_cap_initialized & HW_CAP_MMU))
+ if (!(goya->hw_cap_initialized & HW_CAP_MMU) ||
+ hdev->hard_reset_pending)
return;
/* no need in L1 only invalidation in Goya */
@@ -4880,7 +4906,8 @@ static void goya_mmu_invalidate_cache_range(struct hl_device *hdev,
u32 status, timeout_usec, inv_data, pi;
int rc;
- if (!(goya->hw_cap_initialized & HW_CAP_MMU))
+ if (!(goya->hw_cap_initialized & HW_CAP_MMU) ||
+ hdev->hard_reset_pending)
return;
/* no need in L1 only invalidation in Goya */
@@ -5137,7 +5164,8 @@ static const struct hl_asic_funcs goya_funcs = {
.init_iatu = goya_init_iatu,
.rreg = hl_rreg,
.wreg = hl_wreg,
- .halt_coresight = goya_halt_coresight
+ .halt_coresight = goya_halt_coresight,
+ .get_clk_rate = goya_get_clk_rate
};
/*
diff --git a/drivers/misc/habanalabs/goya/goyaP.h b/drivers/misc/habanalabs/goya/goyaP.h
index 89b6574f8e4f..c3230cb6e25c 100644
--- a/drivers/misc/habanalabs/goya/goyaP.h
+++ b/drivers/misc/habanalabs/goya/goyaP.h
@@ -233,4 +233,6 @@ void goya_cpu_accessible_dma_pool_free(struct hl_device *hdev, size_t size,
void *vaddr);
void goya_mmu_remove_device_cpu_mappings(struct hl_device *hdev);
+int goya_get_clk_rate(struct hl_device *hdev, u32 *cur_clk, u32 *max_clk);
+
#endif /* GOYAP_H_ */
diff --git a/drivers/misc/habanalabs/goya/goya_coresight.c b/drivers/misc/habanalabs/goya/goya_coresight.c
index b4d406af1bed..c1ee6e2b5dff 100644
--- a/drivers/misc/habanalabs/goya/goya_coresight.c
+++ b/drivers/misc/habanalabs/goya/goya_coresight.c
@@ -8,6 +8,7 @@
#include "goyaP.h"
#include "include/goya/goya_coresight.h"
#include "include/goya/asic_reg/goya_regs.h"
+#include "include/goya/asic_reg/goya_masks.h"
#include <uapi/misc/habanalabs.h>
@@ -377,33 +378,32 @@ static int goya_config_etr(struct hl_device *hdev,
struct hl_debug_params *params)
{
struct hl_debug_params_etr *input;
- u64 base_reg = mmPSOC_ETR_BASE - CFG_BASE;
u32 val;
int rc;
- WREG32(base_reg + 0xFB0, CORESIGHT_UNLOCK);
+ WREG32(mmPSOC_ETR_LAR, CORESIGHT_UNLOCK);
- val = RREG32(base_reg + 0x304);
+ val = RREG32(mmPSOC_ETR_FFCR);
val |= 0x1000;
- WREG32(base_reg + 0x304, val);
+ WREG32(mmPSOC_ETR_FFCR, val);
val |= 0x40;
- WREG32(base_reg + 0x304, val);
+ WREG32(mmPSOC_ETR_FFCR, val);
- rc = goya_coresight_timeout(hdev, base_reg + 0x304, 6, false);
+ rc = goya_coresight_timeout(hdev, mmPSOC_ETR_FFCR, 6, false);
if (rc) {
dev_err(hdev->dev, "Failed to %s ETR on timeout, error %d\n",
params->enable ? "enable" : "disable", rc);
return rc;
}
- rc = goya_coresight_timeout(hdev, base_reg + 0xC, 2, true);
+ rc = goya_coresight_timeout(hdev, mmPSOC_ETR_STS, 2, true);
if (rc) {
dev_err(hdev->dev, "Failed to %s ETR on timeout, error %d\n",
params->enable ? "enable" : "disable", rc);
return rc;
}
- WREG32(base_reg + 0x20, 0);
+ WREG32(mmPSOC_ETR_CTL, 0);
if (params->enable) {
input = params->input;
@@ -423,25 +423,26 @@ static int goya_config_etr(struct hl_device *hdev,
return -EINVAL;
}
- WREG32(base_reg + 0x34, 0x3FFC);
- WREG32(base_reg + 0x4, input->buffer_size);
- WREG32(base_reg + 0x28, input->sink_mode);
- WREG32(base_reg + 0x110, 0x700);
- WREG32(base_reg + 0x118,
+ WREG32(mmPSOC_ETR_BUFWM, 0x3FFC);
+ WREG32(mmPSOC_ETR_RSZ, input->buffer_size);
+ WREG32(mmPSOC_ETR_MODE, input->sink_mode);
+ WREG32(mmPSOC_ETR_AXICTL,
+ 0x700 | PSOC_ETR_AXICTL_PROTCTRLBIT1_SHIFT);
+ WREG32(mmPSOC_ETR_DBALO,
lower_32_bits(input->buffer_address));
- WREG32(base_reg + 0x11C,
+ WREG32(mmPSOC_ETR_DBAHI,
upper_32_bits(input->buffer_address));
- WREG32(base_reg + 0x304, 3);
- WREG32(base_reg + 0x308, 0xA);
- WREG32(base_reg + 0x20, 1);
+ WREG32(mmPSOC_ETR_FFCR, 3);
+ WREG32(mmPSOC_ETR_PSCR, 0xA);
+ WREG32(mmPSOC_ETR_CTL, 1);
} else {
- WREG32(base_reg + 0x34, 0);
- WREG32(base_reg + 0x4, 0x400);
- WREG32(base_reg + 0x118, 0);
- WREG32(base_reg + 0x11C, 0);
- WREG32(base_reg + 0x308, 0);
- WREG32(base_reg + 0x28, 0);
- WREG32(base_reg + 0x304, 0);
+ WREG32(mmPSOC_ETR_BUFWM, 0);
+ WREG32(mmPSOC_ETR_RSZ, 0x400);
+ WREG32(mmPSOC_ETR_DBALO, 0);
+ WREG32(mmPSOC_ETR_DBAHI, 0);
+ WREG32(mmPSOC_ETR_PSCR, 0);
+ WREG32(mmPSOC_ETR_MODE, 0);
+ WREG32(mmPSOC_ETR_FFCR, 0);
if (params->output_size >= sizeof(u64)) {
u32 rwp, rwphi;
@@ -451,8 +452,8 @@ static int goya_config_etr(struct hl_device *hdev,
* the buffer is set in the RWP register (lower 32
* bits), and in the RWPHI register (upper 8 bits).
*/
- rwp = RREG32(base_reg + 0x18);
- rwphi = RREG32(base_reg + 0x3c) & 0xff;
+ rwp = RREG32(mmPSOC_ETR_RWP);
+ rwphi = RREG32(mmPSOC_ETR_RWPHI) & 0xff;
*(u64 *) params->output = ((u64) rwphi << 32) | rwp;
}
}
diff --git a/drivers/misc/habanalabs/goya/goya_hwmgr.c b/drivers/misc/habanalabs/goya/goya_hwmgr.c
index a2a700c3d597..b2ebc01e27f4 100644
--- a/drivers/misc/habanalabs/goya/goya_hwmgr.c
+++ b/drivers/misc/habanalabs/goya/goya_hwmgr.c
@@ -32,6 +32,37 @@ void goya_set_pll_profile(struct hl_device *hdev, enum hl_pll_frequency freq)
}
}
+int goya_get_clk_rate(struct hl_device *hdev, u32 *cur_clk, u32 *max_clk)
+{
+ long value;
+
+ if (hl_device_disabled_or_in_reset(hdev))
+ return -ENODEV;
+
+ value = hl_get_frequency(hdev, MME_PLL, false);
+
+ if (value < 0) {
+ dev_err(hdev->dev, "Failed to retrieve device max clock %ld\n",
+ value);
+ return value;
+ }
+
+ *max_clk = (value / 1000 / 1000);
+
+ value = hl_get_frequency(hdev, MME_PLL, true);
+
+ if (value < 0) {
+ dev_err(hdev->dev,
+ "Failed to retrieve device current clock %ld\n",
+ value);
+ return value;
+ }
+
+ *cur_clk = (value / 1000 / 1000);
+
+ return 0;
+}
+
static ssize_t mme_clk_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
diff --git a/drivers/misc/habanalabs/habanalabs.h b/drivers/misc/habanalabs/habanalabs.h
index 75862be53c60..00c949f4ccd1 100644
--- a/drivers/misc/habanalabs/habanalabs.h
+++ b/drivers/misc/habanalabs/habanalabs.h
@@ -40,8 +40,6 @@
#define HL_MAX_QUEUES 128
-#define HL_MAX_JOBS_PER_CS 64
-
/* MUST BE POWER OF 2 and larger than 1 */
#define HL_MAX_PENDING_CS 64
@@ -85,12 +83,15 @@ struct hl_fpriv;
* @QUEUE_TYPE_INT: internal queue that performs DMA inside the device's
* memories and/or operates the compute engines.
* @QUEUE_TYPE_CPU: S/W queue for communication with the device's CPU.
+ * @QUEUE_TYPE_HW: queue of DMA and compute engines jobs, for which completion
+ * notifications are sent by H/W.
*/
enum hl_queue_type {
QUEUE_TYPE_NA,
QUEUE_TYPE_EXT,
QUEUE_TYPE_INT,
- QUEUE_TYPE_CPU
+ QUEUE_TYPE_CPU,
+ QUEUE_TYPE_HW
};
/**
@@ -98,10 +99,13 @@ enum hl_queue_type {
* @type: queue type.
* @driver_only: true if only the driver is allowed to send a job to this queue,
* false otherwise.
+ * @requires_kernel_cb: true if a CB handle must be provided for jobs on this
+ * queue, false otherwise (a CB address must be provided).
*/
struct hw_queue_properties {
enum hl_queue_type type;
u8 driver_only;
+ u8 requires_kernel_cb;
};
/**
@@ -110,8 +114,8 @@ struct hw_queue_properties {
* @VM_TYPE_PHYS_PACK: mapping of DRAM memory to device virtual address.
*/
enum vm_type_t {
- VM_TYPE_USERPTR,
- VM_TYPE_PHYS_PACK
+ VM_TYPE_USERPTR = 0x1,
+ VM_TYPE_PHYS_PACK = 0x2
};
/**
@@ -127,12 +131,44 @@ enum hl_device_hw_state {
};
/**
+ * struct hl_mmu_properties - ASIC specific MMU address translation properties.
+ * @hop0_shift: shift of hop 0 mask.
+ * @hop1_shift: shift of hop 1 mask.
+ * @hop2_shift: shift of hop 2 mask.
+ * @hop3_shift: shift of hop 3 mask.
+ * @hop4_shift: shift of hop 4 mask.
+ * @hop0_mask: mask to get the PTE address in hop 0.
+ * @hop1_mask: mask to get the PTE address in hop 1.
+ * @hop2_mask: mask to get the PTE address in hop 2.
+ * @hop3_mask: mask to get the PTE address in hop 3.
+ * @hop4_mask: mask to get the PTE address in hop 4.
+ * @page_size: default page size used to allocate memory.
+ * @huge_page_size: page size used to allocate memory with huge pages.
+ */
+struct hl_mmu_properties {
+ u64 hop0_shift;
+ u64 hop1_shift;
+ u64 hop2_shift;
+ u64 hop3_shift;
+ u64 hop4_shift;
+ u64 hop0_mask;
+ u64 hop1_mask;
+ u64 hop2_mask;
+ u64 hop3_mask;
+ u64 hop4_mask;
+ u32 page_size;
+ u32 huge_page_size;
+};
+
+/**
* struct asic_fixed_properties - ASIC specific immutable properties.
* @hw_queues_props: H/W queues properties.
* @armcp_info: received various information from ArmCP regarding the H/W, e.g.
* available sensors.
* @uboot_ver: F/W U-boot version.
* @preboot_ver: F/W Preboot version.
+ * @dmmu: DRAM MMU address translation properties.
+ * @pmmu: PCI (host) MMU address translation properties.
* @sram_base_address: SRAM physical start address.
* @sram_end_address: SRAM physical end address.
* @sram_user_base_address - SRAM physical start address for user access.
@@ -169,53 +205,55 @@ enum hl_device_hw_state {
* @psoc_pci_pll_nf: PCI PLL NF value.
* @psoc_pci_pll_od: PCI PLL OD value.
* @psoc_pci_pll_div_factor: PCI PLL DIV FACTOR 1 value.
- * @completion_queues_count: number of completion queues.
* @high_pll: high PLL frequency used by the device.
* @cb_pool_cb_cnt: number of CBs in the CB pool.
* @cb_pool_cb_size: size of each CB in the CB pool.
* @tpc_enabled_mask: which TPCs are enabled.
+ * @completion_queues_count: number of completion queues.
*/
struct asic_fixed_properties {
struct hw_queue_properties hw_queues_props[HL_MAX_QUEUES];
- struct armcp_info armcp_info;
- char uboot_ver[VERSION_MAX_LEN];
- char preboot_ver[VERSION_MAX_LEN];
- u64 sram_base_address;
- u64 sram_end_address;
- u64 sram_user_base_address;
- u64 dram_base_address;
- u64 dram_end_address;
- u64 dram_user_base_address;
- u64 dram_size;
- u64 dram_pci_bar_size;
- u64 max_power_default;
- u64 va_space_host_start_address;
- u64 va_space_host_end_address;
- u64 va_space_dram_start_address;
- u64 va_space_dram_end_address;
- u64 dram_size_for_default_page_mapping;
- u64 pcie_dbi_base_address;
- u64 pcie_aux_dbi_reg_addr;
- u64 mmu_pgt_addr;
- u64 mmu_dram_default_page_addr;
- u32 mmu_pgt_size;
- u32 mmu_pte_size;
- u32 mmu_hop_table_size;
- u32 mmu_hop0_tables_total_size;
- u32 dram_page_size;
- u32 cfg_size;
- u32 sram_size;
- u32 max_asid;
- u32 num_of_events;
- u32 psoc_pci_pll_nr;
- u32 psoc_pci_pll_nf;
- u32 psoc_pci_pll_od;
- u32 psoc_pci_pll_div_factor;
- u32 high_pll;
- u32 cb_pool_cb_cnt;
- u32 cb_pool_cb_size;
- u8 completion_queues_count;
- u8 tpc_enabled_mask;
+ struct armcp_info armcp_info;
+ char uboot_ver[VERSION_MAX_LEN];
+ char preboot_ver[VERSION_MAX_LEN];
+ struct hl_mmu_properties dmmu;
+ struct hl_mmu_properties pmmu;
+ u64 sram_base_address;
+ u64 sram_end_address;
+ u64 sram_user_base_address;
+ u64 dram_base_address;
+ u64 dram_end_address;
+ u64 dram_user_base_address;
+ u64 dram_size;
+ u64 dram_pci_bar_size;
+ u64 max_power_default;
+ u64 va_space_host_start_address;
+ u64 va_space_host_end_address;
+ u64 va_space_dram_start_address;
+ u64 va_space_dram_end_address;
+ u64 dram_size_for_default_page_mapping;
+ u64 pcie_dbi_base_address;
+ u64 pcie_aux_dbi_reg_addr;
+ u64 mmu_pgt_addr;
+ u64 mmu_dram_default_page_addr;
+ u32 mmu_pgt_size;
+ u32 mmu_pte_size;
+ u32 mmu_hop_table_size;
+ u32 mmu_hop0_tables_total_size;
+ u32 dram_page_size;
+ u32 cfg_size;
+ u32 sram_size;
+ u32 max_asid;
+ u32 num_of_events;
+ u32 psoc_pci_pll_nr;
+ u32 psoc_pci_pll_nf;
+ u32 psoc_pci_pll_od;
+ u32 psoc_pci_pll_div_factor;
+ u32 high_pll;
+ u32 cb_pool_cb_cnt;
+ u32 cb_pool_cb_size;
+ u8 tpc_enabled_mask;
+ u8 completion_queues_count;
};
/**
@@ -236,8 +274,6 @@ struct hl_dma_fence {
* Command Buffers
*/
-#define HL_MAX_CB_SIZE 0x200000 /* 2MB */
-
/**
* struct hl_cb_mgr - describes a Command Buffer Manager.
* @cb_lock: protects cb_handles.
@@ -481,8 +517,8 @@ enum hl_pll_frequency {
* @get_events_stat: retrieve event queue entries histogram.
* @read_pte: read MMU page table entry from DRAM.
* @write_pte: write MMU page table entry to DRAM.
- * @mmu_invalidate_cache: flush MMU STLB cache, either with soft (L1 only) or
- * hard (L0 & L1) flush.
+ * @mmu_invalidate_cache: flush MMU STLB host/DRAM cache, either with soft
+ * (L1 only) or hard (L0 & L1) flush.
* @mmu_invalidate_cache_range: flush specific MMU STLB cache lines with
* ASID-VA-size mask.
* @send_heartbeat: send is-alive packet to ArmCP and verify response.
@@ -502,6 +538,7 @@ enum hl_pll_frequency {
* @rreg: Read a register. Needed for simulator support.
* @wreg: Write a register. Needed for simulator support.
* @halt_coresight: stop the ETF and ETR traces.
+ * @get_clk_rate: Retrieve the ASIC current and maximum clock rate in MHz
*/
struct hl_asic_funcs {
int (*early_init)(struct hl_device *hdev);
@@ -562,7 +599,8 @@ struct hl_asic_funcs {
u32 *size);
u64 (*read_pte)(struct hl_device *hdev, u64 addr);
void (*write_pte)(struct hl_device *hdev, u64 addr, u64 val);
- void (*mmu_invalidate_cache)(struct hl_device *hdev, bool is_hard);
+ void (*mmu_invalidate_cache)(struct hl_device *hdev, bool is_hard,
+ u32 flags);
void (*mmu_invalidate_cache_range)(struct hl_device *hdev, bool is_hard,
u32 asid, u64 va, u64 size);
int (*send_heartbeat)(struct hl_device *hdev);
@@ -584,6 +622,7 @@ struct hl_asic_funcs {
u32 (*rreg)(struct hl_device *hdev, u32 reg);
void (*wreg)(struct hl_device *hdev, u32 reg, u32 val);
void (*halt_coresight)(struct hl_device *hdev);
+ int (*get_clk_rate)(struct hl_device *hdev, u32 *cur_clk, u32 *max_clk);
};
@@ -688,7 +727,7 @@ struct hl_ctx_mgr {
* @sgt: pointer to the scatter-gather table that holds the pages.
* @dir: for DMA unmapping, the direction must be supplied, so save it.
* @debugfs_list: node in debugfs list of command submissions.
- * @addr: user-space virtual pointer to the start of the memory area.
+ * @addr: user-space virtual address of the start of the memory area.
* @size: size of the memory area to pin & map.
* @dma_mapped: true if the SG was mapped to DMA addresses, false otherwise.
*/
@@ -752,11 +791,14 @@ struct hl_cs {
* @userptr_list: linked-list of userptr mappings that belong to this job and
* wait for completion.
* @debugfs_list: node in debugfs list of command submission jobs.
+ * @queue_type: the type of the H/W queue this job is submitted to.
* @id: the id of this job inside a CS.
* @hw_queue_id: the id of the H/W queue this job is submitted to.
* @user_cb_size: the actual size of the CB we got from the user.
* @job_cb_size: the actual size of the CB that we put on the queue.
- * @ext_queue: whether the job is for external queue or internal queue.
+ * @is_kernel_allocated_cb: true if the CB handle we got from the user holds a
+ * handle to a kernel-allocated CB object, false
+ * otherwise (SRAM/DRAM/host address).
*/
struct hl_cs_job {
struct list_head cs_node;
@@ -766,39 +808,44 @@ struct hl_cs_job {
struct work_struct finish_work;
struct list_head userptr_list;
struct list_head debugfs_list;
+ enum hl_queue_type queue_type;
u32 id;
u32 hw_queue_id;
u32 user_cb_size;
u32 job_cb_size;
- u8 ext_queue;
+ u8 is_kernel_allocated_cb;
};
/**
- * struct hl_cs_parser - command submission paerser properties.
+ * struct hl_cs_parser - command submission parser properties.
* @user_cb: the CB we got from the user.
* @patched_cb: in case of patching, this is internal CB which is submitted on
* the queue instead of the CB we got from the IOCTL.
* @job_userptr_list: linked-list of userptr mappings that belong to the related
* job and wait for completion.
* @cs_sequence: the sequence number of the related CS.
+ * @queue_type: the type of the H/W queue this job is submitted to.
* @ctx_id: the ID of the context the related CS belongs to.
* @hw_queue_id: the id of the H/W queue this job is submitted to.
* @user_cb_size: the actual size of the CB we got from the user.
* @patched_cb_size: the size of the CB after parsing.
- * @ext_queue: whether the job is for external queue or internal queue.
* @job_id: the id of the related job inside the related CS.
+ * @is_kernel_allocated_cb: true if the CB handle we got from the user holds a
+ * handle to a kernel-allocated CB object, false
+ * otherwise (SRAM/DRAM/host address).
*/
struct hl_cs_parser {
struct hl_cb *user_cb;
struct hl_cb *patched_cb;
struct list_head *job_userptr_list;
u64 cs_sequence;
+ enum hl_queue_type queue_type;
u32 ctx_id;
u32 hw_queue_id;
u32 user_cb_size;
u32 patched_cb_size;
- u8 ext_queue;
u8 job_id;
+ u8 is_kernel_allocated_cb;
};
@@ -1048,9 +1095,10 @@ void hl_wreg(struct hl_device *hdev, u32 reg, u32 val);
#define REG_FIELD_SHIFT(reg, field) reg##_##field##_SHIFT
#define REG_FIELD_MASK(reg, field) reg##_##field##_MASK
-#define WREG32_FIELD(reg, field, val) \
- WREG32(mm##reg, (RREG32(mm##reg) & ~REG_FIELD_MASK(reg, field)) | \
- (val) << REG_FIELD_SHIFT(reg, field))
+#define WREG32_FIELD(reg, offset, field, val) \
+ WREG32(mm##reg + offset, (RREG32(mm##reg + offset) & \
+ ~REG_FIELD_MASK(reg, field)) | \
+ (val) << REG_FIELD_SHIFT(reg, field))
/* Timeout should be longer when working with simulator but cap the
* increased timeout to some maximum
@@ -1501,7 +1549,8 @@ int hl_cb_pool_init(struct hl_device *hdev);
int hl_cb_pool_fini(struct hl_device *hdev);
void hl_cs_rollback_all(struct hl_device *hdev);
-struct hl_cs_job *hl_cs_allocate_job(struct hl_device *hdev, bool ext_queue);
+struct hl_cs_job *hl_cs_allocate_job(struct hl_device *hdev,
+ enum hl_queue_type queue_type, bool is_kernel_allocated_cb);
void goya_set_asic_funcs(struct hl_device *hdev);
@@ -1513,7 +1562,7 @@ void hl_vm_fini(struct hl_device *hdev);
int hl_pin_host_memory(struct hl_device *hdev, u64 addr, u64 size,
struct hl_userptr *userptr);
-int hl_unpin_host_memory(struct hl_device *hdev, struct hl_userptr *userptr);
+void hl_unpin_host_memory(struct hl_device *hdev, struct hl_userptr *userptr);
void hl_userptr_delete_list(struct hl_device *hdev,
struct list_head *userptr_list);
bool hl_userptr_is_pinned(struct hl_device *hdev, u64 addr, u32 size,
diff --git a/drivers/misc/habanalabs/habanalabs_ioctl.c b/drivers/misc/habanalabs/habanalabs_ioctl.c
index 66d9c710073c..6474b868ef27 100644
--- a/drivers/misc/habanalabs/habanalabs_ioctl.c
+++ b/drivers/misc/habanalabs/habanalabs_ioctl.c
@@ -60,11 +60,16 @@ static int hw_ip_info(struct hl_device *hdev, struct hl_info_args *args)
hw_ip.tpc_enabled_mask = prop->tpc_enabled_mask;
hw_ip.sram_size = prop->sram_size - sram_kmd_size;
hw_ip.dram_size = prop->dram_size - dram_kmd_size;
- if (hw_ip.dram_size > 0)
+ if (hw_ip.dram_size > PAGE_SIZE)
hw_ip.dram_enabled = 1;
hw_ip.num_of_events = prop->num_of_events;
- memcpy(hw_ip.armcp_version,
- prop->armcp_info.armcp_version, VERSION_MAX_LEN);
+
+ memcpy(hw_ip.armcp_version, prop->armcp_info.armcp_version,
+ min(VERSION_MAX_LEN, HL_INFO_VERSION_MAX_LEN));
+
+ memcpy(hw_ip.card_name, prop->armcp_info.card_name,
+ min(CARD_NAME_MAX_LEN, HL_INFO_CARD_NAME_MAX_LEN));
+
hw_ip.armcp_cpld_version = le32_to_cpu(prop->armcp_info.cpld_version);
hw_ip.psoc_pci_pll_nr = prop->psoc_pci_pll_nr;
hw_ip.psoc_pci_pll_nf = prop->psoc_pci_pll_nf;
@@ -179,17 +184,14 @@ static int debug_coresight(struct hl_device *hdev, struct hl_debug_args *args)
goto out;
}
- if (output) {
- if (copy_to_user((void __user *) (uintptr_t) args->output_ptr,
- output,
- args->output_size)) {
- dev_err(hdev->dev,
- "copy to user failed in debug ioctl\n");
- rc = -EFAULT;
- goto out;
- }
+ if (output && copy_to_user((void __user *) (uintptr_t) args->output_ptr,
+ output, args->output_size)) {
+ dev_err(hdev->dev, "copy to user failed in debug ioctl\n");
+ rc = -EFAULT;
+ goto out;
}
+
out:
kfree(params);
kfree(output);
@@ -221,6 +223,41 @@ static int device_utilization(struct hl_device *hdev, struct hl_info_args *args)
min((size_t) max_size, sizeof(device_util))) ? -EFAULT : 0;
}
+static int get_clk_rate(struct hl_device *hdev, struct hl_info_args *args)
+{
+ struct hl_info_clk_rate clk_rate = {0};
+ u32 max_size = args->return_size;
+ void __user *out = (void __user *) (uintptr_t) args->return_pointer;
+ int rc;
+
+ if ((!max_size) || (!out))
+ return -EINVAL;
+
+ rc = hdev->asic_funcs->get_clk_rate(hdev, &clk_rate.cur_clk_rate_mhz,
+ &clk_rate.max_clk_rate_mhz);
+ if (rc)
+ return rc;
+
+ return copy_to_user(out, &clk_rate,
+ min((size_t) max_size, sizeof(clk_rate))) ? -EFAULT : 0;
+}
+
+static int get_reset_count(struct hl_device *hdev, struct hl_info_args *args)
+{
+ struct hl_info_reset_count reset_count = {0};
+ u32 max_size = args->return_size;
+ void __user *out = (void __user *) (uintptr_t) args->return_pointer;
+
+ if ((!max_size) || (!out))
+ return -EINVAL;
+
+ reset_count.hard_reset_cnt = hdev->hard_reset_cnt;
+ reset_count.soft_reset_cnt = hdev->soft_reset_cnt;
+
+ return copy_to_user(out, &reset_count,
+ min((size_t) max_size, sizeof(reset_count))) ? -EFAULT : 0;
+}
+
static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data,
struct device *dev)
{
@@ -239,6 +276,9 @@ static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data,
case HL_INFO_DEVICE_STATUS:
return device_status_info(hdev, args);
+ case HL_INFO_RESET_COUNT:
+ return get_reset_count(hdev, args);
+
default:
break;
}
@@ -271,6 +311,10 @@ static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data,
rc = hw_events_info(hdev, true, args);
break;
+ case HL_INFO_CLK_RATE:
+ rc = get_clk_rate(hdev, args);
+ break;
+
default:
dev_err(dev, "Invalid request %d\n", args->op);
rc = -ENOTTY;
@@ -406,9 +450,8 @@ static long _hl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg,
retcode = func(hpriv, kdata);
- if (cmd & IOC_OUT)
- if (copy_to_user((void __user *)arg, kdata, usize))
- retcode = -EFAULT;
+ if ((cmd & IOC_OUT) && copy_to_user((void __user *)arg, kdata, usize))
+ retcode = -EFAULT;
out_err:
if (retcode)
diff --git a/drivers/misc/habanalabs/hw_queue.c b/drivers/misc/habanalabs/hw_queue.c
index 55b383b2a116..91579dde9262 100644
--- a/drivers/misc/habanalabs/hw_queue.c
+++ b/drivers/misc/habanalabs/hw_queue.c
@@ -58,8 +58,8 @@ out:
}
/*
- * ext_queue_submit_bd - Submit a buffer descriptor to an external queue
- *
+ * ext_and_hw_queue_submit_bd() - Submit a buffer descriptor to an external or a
+ * H/W queue.
* @hdev: pointer to habanalabs device structure
* @q: pointer to habanalabs queue structure
* @ctl: BD's control word
@@ -73,8 +73,8 @@ out:
* This function must be called when the scheduler mutex is taken
*
*/
-static void ext_queue_submit_bd(struct hl_device *hdev, struct hl_hw_queue *q,
- u32 ctl, u32 len, u64 ptr)
+static void ext_and_hw_queue_submit_bd(struct hl_device *hdev,
+ struct hl_hw_queue *q, u32 ctl, u32 len, u64 ptr)
{
struct hl_bd *bd;
@@ -174,6 +174,45 @@ static int int_queue_sanity_checks(struct hl_device *hdev,
}
/*
+ * hw_queue_sanity_checks() - Perform some sanity checks on a H/W queue.
+ * @hdev: Pointer to hl_device structure.
+ * @q: Pointer to hl_hw_queue structure.
+ * @num_of_entries: How many entries to check for space.
+ *
+ * Perform the following:
+ * - Make sure we have enough space in the completion queue.
+ * This check also ensures that there is enough space in the h/w queue, as
+ * both queues are of the same size.
+ * - Reserve space in the completion queue (needs to be reversed if there
+ * is a failure down the road before the actual submission of work).
+ *
+ * Both operations are done using the "free_slots_cnt" field of the completion
+ * queue. The CI counters of the queue and the completion queue are not
+ * needed/used for the H/W queue type.
+ */
+static int hw_queue_sanity_checks(struct hl_device *hdev, struct hl_hw_queue *q,
+ int num_of_entries)
+{
+ atomic_t *free_slots =
+ &hdev->completion_queue[q->hw_queue_id].free_slots_cnt;
+
+ /*
+ * Check we have enough space in the completion queue.
+ * Add -1 to counter (decrement) unless counter was already 0.
+ * In that case, CQ is full so we can't submit a new CB.
+ * atomic_add_unless will return 0 if counter was already 0.
+ */
+ if (atomic_add_negative(num_of_entries * -1, free_slots)) {
+ dev_dbg(hdev->dev, "No space for %d entries on CQ %d\n",
+ num_of_entries, q->hw_queue_id);
+ atomic_add(num_of_entries, free_slots);
+ return -EAGAIN;
+ }
+
+ return 0;
+}
+
+/*
* hl_hw_queue_send_cb_no_cmpl - send a single CB (not a JOB) without completion
*
* @hdev: pointer to hl_device structure
@@ -188,7 +227,7 @@ int hl_hw_queue_send_cb_no_cmpl(struct hl_device *hdev, u32 hw_queue_id,
u32 cb_size, u64 cb_ptr)
{
struct hl_hw_queue *q = &hdev->kernel_queues[hw_queue_id];
- int rc;
+ int rc = 0;
/*
* The CPU queue is a synchronous queue with an effective depth of
@@ -206,11 +245,18 @@ int hl_hw_queue_send_cb_no_cmpl(struct hl_device *hdev, u32 hw_queue_id,
goto out;
}
- rc = ext_queue_sanity_checks(hdev, q, 1, false);
- if (rc)
- goto out;
+ /*
+ * hl_hw_queue_send_cb_no_cmpl() is called for queues of a H/W queue
+ * type only on init phase, when the queues are empty and being tested,
+ * so there is no need for sanity checks.
+ */
+ if (q->queue_type != QUEUE_TYPE_HW) {
+ rc = ext_queue_sanity_checks(hdev, q, 1, false);
+ if (rc)
+ goto out;
+ }
- ext_queue_submit_bd(hdev, q, 0, cb_size, cb_ptr);
+ ext_and_hw_queue_submit_bd(hdev, q, 0, cb_size, cb_ptr);
out:
if (q->queue_type != QUEUE_TYPE_CPU)
@@ -220,14 +266,14 @@ out:
}
/*
- * ext_hw_queue_schedule_job - submit an JOB to an external queue
+ * ext_queue_schedule_job - submit a JOB to an external queue
*
* @job: pointer to the job that needs to be submitted to the queue
*
* This function must be called when the scheduler mutex is taken
*
*/
-static void ext_hw_queue_schedule_job(struct hl_cs_job *job)
+static void ext_queue_schedule_job(struct hl_cs_job *job)
{
struct hl_device *hdev = job->cs->ctx->hdev;
struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id];
@@ -260,7 +306,7 @@ static void ext_hw_queue_schedule_job(struct hl_cs_job *job)
* H/W queues is done under the scheduler mutex
*
* No need to check if CQ is full because it was already
- * checked in hl_queue_sanity_checks
+ * checked in ext_queue_sanity_checks
*/
cq = &hdev->completion_queue[q->hw_queue_id];
cq_addr = cq->bus_address + cq->pi * sizeof(struct hl_cq_entry);
@@ -274,18 +320,18 @@ static void ext_hw_queue_schedule_job(struct hl_cs_job *job)
cq->pi = hl_cq_inc_ptr(cq->pi);
- ext_queue_submit_bd(hdev, q, ctl, len, ptr);
+ ext_and_hw_queue_submit_bd(hdev, q, ctl, len, ptr);
}
/*
- * int_hw_queue_schedule_job - submit an JOB to an internal queue
+ * int_queue_schedule_job - submit a JOB to an internal queue
*
* @job: pointer to the job that needs to be submitted to the queue
*
* This function must be called when the scheduler mutex is taken
*
*/
-static void int_hw_queue_schedule_job(struct hl_cs_job *job)
+static void int_queue_schedule_job(struct hl_cs_job *job)
{
struct hl_device *hdev = job->cs->ctx->hdev;
struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id];
@@ -308,6 +354,60 @@ static void int_hw_queue_schedule_job(struct hl_cs_job *job)
}
/*
+ * hw_queue_schedule_job - submit a JOB to a H/W queue
+ *
+ * @job: pointer to the job that needs to be submitted to the queue
+ *
+ * This function must be called when the scheduler mutex is taken
+ *
+ */
+static void hw_queue_schedule_job(struct hl_cs_job *job)
+{
+ struct hl_device *hdev = job->cs->ctx->hdev;
+ struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id];
+ struct hl_cq *cq;
+ u64 ptr;
+ u32 offset, ctl, len;
+
+ /*
+ * Upon PQE completion, COMP_DATA is used as the write data to the
+ * completion queue (QMAN HBW message), and COMP_OFFSET is used as the
+ * write address offset in the SM block (QMAN LBW message).
+ * The write address offset is calculated as "COMP_OFFSET << 2".
+ */
+ offset = job->cs->sequence & (HL_MAX_PENDING_CS - 1);
+ ctl = ((offset << BD_CTL_COMP_OFFSET_SHIFT) & BD_CTL_COMP_OFFSET_MASK) |
+ ((q->pi << BD_CTL_COMP_DATA_SHIFT) & BD_CTL_COMP_DATA_MASK);
+
+ len = job->job_cb_size;
+
+ /*
+ * A patched CB is created only if a user CB was allocated by driver and
+ * MMU is disabled. If MMU is enabled, the user CB should be used
+ * instead. If the user CB wasn't allocated by driver, assume that it
+ * holds an address.
+ */
+ if (job->patched_cb)
+ ptr = job->patched_cb->bus_address;
+ else if (job->is_kernel_allocated_cb)
+ ptr = job->user_cb->bus_address;
+ else
+ ptr = (u64) (uintptr_t) job->user_cb;
+
+ /*
+ * No need to protect pi_offset because scheduling to the
+ * H/W queues is done under the scheduler mutex
+ *
+ * No need to check if CQ is full because it was already
+ * checked in hw_queue_sanity_checks
+ */
+ cq = &hdev->completion_queue[q->hw_queue_id];
+ cq->pi = hl_cq_inc_ptr(cq->pi);
+
+ ext_and_hw_queue_submit_bd(hdev, q, ctl, len, ptr);
+}
+
+/*
* hl_hw_queue_schedule_cs - schedule a command submission
*
* @job : pointer to the CS
@@ -330,23 +430,34 @@ int hl_hw_queue_schedule_cs(struct hl_cs *cs)
}
q = &hdev->kernel_queues[0];
- /* This loop assumes all external queues are consecutive */
for (i = 0, cq_cnt = 0 ; i < HL_MAX_QUEUES ; i++, q++) {
- if (q->queue_type == QUEUE_TYPE_EXT) {
- if (cs->jobs_in_queue_cnt[i]) {
+ if (cs->jobs_in_queue_cnt[i]) {
+ switch (q->queue_type) {
+ case QUEUE_TYPE_EXT:
rc = ext_queue_sanity_checks(hdev, q,
- cs->jobs_in_queue_cnt[i], true);
- if (rc)
- goto unroll_cq_resv;
- cq_cnt++;
- }
- } else if (q->queue_type == QUEUE_TYPE_INT) {
- if (cs->jobs_in_queue_cnt[i]) {
+ cs->jobs_in_queue_cnt[i], true);
+ break;
+ case QUEUE_TYPE_INT:
rc = int_queue_sanity_checks(hdev, q,
- cs->jobs_in_queue_cnt[i]);
- if (rc)
- goto unroll_cq_resv;
+ cs->jobs_in_queue_cnt[i]);
+ break;
+ case QUEUE_TYPE_HW:
+ rc = hw_queue_sanity_checks(hdev, q,
+ cs->jobs_in_queue_cnt[i]);
+ break;
+ default:
+ dev_err(hdev->dev, "Queue type %d is invalid\n",
+ q->queue_type);
+ rc = -EINVAL;
+ break;
}
+
+ if (rc)
+ goto unroll_cq_resv;
+
+ if (q->queue_type == QUEUE_TYPE_EXT ||
+ q->queue_type == QUEUE_TYPE_HW)
+ cq_cnt++;
}
}
@@ -373,21 +484,30 @@ int hl_hw_queue_schedule_cs(struct hl_cs *cs)
}
list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node)
- if (job->ext_queue)
- ext_hw_queue_schedule_job(job);
- else
- int_hw_queue_schedule_job(job);
+ switch (job->queue_type) {
+ case QUEUE_TYPE_EXT:
+ ext_queue_schedule_job(job);
+ break;
+ case QUEUE_TYPE_INT:
+ int_queue_schedule_job(job);
+ break;
+ case QUEUE_TYPE_HW:
+ hw_queue_schedule_job(job);
+ break;
+ default:
+ break;
+ }
cs->submitted = true;
goto out;
unroll_cq_resv:
- /* This loop assumes all external queues are consecutive */
q = &hdev->kernel_queues[0];
for (i = 0 ; (i < HL_MAX_QUEUES) && (cq_cnt > 0) ; i++, q++) {
- if ((q->queue_type == QUEUE_TYPE_EXT) &&
- (cs->jobs_in_queue_cnt[i])) {
+ if ((q->queue_type == QUEUE_TYPE_EXT ||
+ q->queue_type == QUEUE_TYPE_HW) &&
+ cs->jobs_in_queue_cnt[i]) {
atomic_t *free_slots =
&hdev->completion_queue[i].free_slots_cnt;
atomic_add(cs->jobs_in_queue_cnt[i], free_slots);
@@ -414,8 +534,8 @@ void hl_hw_queue_inc_ci_kernel(struct hl_device *hdev, u32 hw_queue_id)
q->ci = hl_queue_inc_ptr(q->ci);
}
-static int ext_and_cpu_hw_queue_init(struct hl_device *hdev,
- struct hl_hw_queue *q, bool is_cpu_queue)
+static int ext_and_cpu_queue_init(struct hl_device *hdev, struct hl_hw_queue *q,
+ bool is_cpu_queue)
{
void *p;
int rc;
@@ -465,7 +585,7 @@ free_queue:
return rc;
}
-static int int_hw_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
+static int int_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
{
void *p;
@@ -485,18 +605,38 @@ static int int_hw_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
return 0;
}
-static int cpu_hw_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
+static int cpu_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
+{
+ return ext_and_cpu_queue_init(hdev, q, true);
+}
+
+static int ext_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
{
- return ext_and_cpu_hw_queue_init(hdev, q, true);
+ return ext_and_cpu_queue_init(hdev, q, false);
}
-static int ext_hw_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
+static int hw_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
{
- return ext_and_cpu_hw_queue_init(hdev, q, false);
+ void *p;
+
+ p = hdev->asic_funcs->asic_dma_alloc_coherent(hdev,
+ HL_QUEUE_SIZE_IN_BYTES,
+ &q->bus_address,
+ GFP_KERNEL | __GFP_ZERO);
+ if (!p)
+ return -ENOMEM;
+
+ q->kernel_address = (u64) (uintptr_t) p;
+
+ /* Make sure read/write pointers are initialized to start of queue */
+ q->ci = 0;
+ q->pi = 0;
+
+ return 0;
}
/*
- * hw_queue_init - main initialization function for H/W queue object
+ * queue_init - main initialization function for H/W queue object
*
* @hdev: pointer to hl_device device structure
* @q: pointer to hl_hw_queue queue structure
@@ -505,7 +645,7 @@ static int ext_hw_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
* Allocate dma-able memory for the queue and initialize fields
* Returns 0 on success
*/
-static int hw_queue_init(struct hl_device *hdev, struct hl_hw_queue *q,
+static int queue_init(struct hl_device *hdev, struct hl_hw_queue *q,
u32 hw_queue_id)
{
int rc;
@@ -516,21 +656,20 @@ static int hw_queue_init(struct hl_device *hdev, struct hl_hw_queue *q,
switch (q->queue_type) {
case QUEUE_TYPE_EXT:
- rc = ext_hw_queue_init(hdev, q);
+ rc = ext_queue_init(hdev, q);
break;
-
case QUEUE_TYPE_INT:
- rc = int_hw_queue_init(hdev, q);
+ rc = int_queue_init(hdev, q);
break;
-
case QUEUE_TYPE_CPU:
- rc = cpu_hw_queue_init(hdev, q);
+ rc = cpu_queue_init(hdev, q);
+ break;
+ case QUEUE_TYPE_HW:
+ rc = hw_queue_init(hdev, q);
break;
-
case QUEUE_TYPE_NA:
q->valid = 0;
return 0;
-
default:
dev_crit(hdev->dev, "wrong queue type %d during init\n",
q->queue_type);
@@ -554,7 +693,7 @@ static int hw_queue_init(struct hl_device *hdev, struct hl_hw_queue *q,
*
* Free the queue memory
*/
-static void hw_queue_fini(struct hl_device *hdev, struct hl_hw_queue *q)
+static void queue_fini(struct hl_device *hdev, struct hl_hw_queue *q)
{
if (!q->valid)
return;
@@ -612,7 +751,7 @@ int hl_hw_queues_create(struct hl_device *hdev)
i < HL_MAX_QUEUES ; i++, q_ready_cnt++, q++) {
q->queue_type = asic->hw_queues_props[i].type;
- rc = hw_queue_init(hdev, q, i);
+ rc = queue_init(hdev, q, i);
if (rc) {
dev_err(hdev->dev,
"failed to initialize queue %d\n", i);
@@ -624,7 +763,7 @@ int hl_hw_queues_create(struct hl_device *hdev)
release_queues:
for (i = 0, q = hdev->kernel_queues ; i < q_ready_cnt ; i++, q++)
- hw_queue_fini(hdev, q);
+ queue_fini(hdev, q);
kfree(hdev->kernel_queues);
@@ -637,7 +776,7 @@ void hl_hw_queues_destroy(struct hl_device *hdev)
int i;
for (i = 0, q = hdev->kernel_queues ; i < HL_MAX_QUEUES ; i++, q++)
- hw_queue_fini(hdev, q);
+ queue_fini(hdev, q);
kfree(hdev->kernel_queues);
}
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/goya_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/goya_masks.h
index 8618891d5afa..3c44ef3a23ed 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/goya_masks.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/goya_masks.h
@@ -260,4 +260,6 @@
#define DMA_QM_3_GLBL_CFG1_DMA_STOP_SHIFT DMA_QM_0_GLBL_CFG1_DMA_STOP_SHIFT
#define DMA_QM_4_GLBL_CFG1_DMA_STOP_SHIFT DMA_QM_0_GLBL_CFG1_DMA_STOP_SHIFT
+#define PSOC_ETR_AXICTL_PROTCTRLBIT1_SHIFT 1
+
#endif /* ASIC_REG_GOYA_MASKS_H_ */
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/goya_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/goya_regs.h
index 19b0f0ef1d0b..fce490e6a231 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/goya_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/goya_regs.h
@@ -84,6 +84,7 @@
#include "tpc6_rtr_regs.h"
#include "tpc7_nrtr_regs.h"
#include "tpc0_eml_cfg_regs.h"
+#include "psoc_etr_regs.h"
#include "psoc_global_conf_masks.h"
#include "dma_macro_masks.h"
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/psoc_etr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/psoc_etr_regs.h
new file mode 100644
index 000000000000..b7c33e025db5
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/psoc_etr_regs.h
@@ -0,0 +1,114 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_PSOC_ETR_REGS_H_
+#define ASIC_REG_PSOC_ETR_REGS_H_
+
+/*
+ *****************************************
+ * PSOC_ETR (Prototype: ETR)
+ *****************************************
+ */
+
+#define mmPSOC_ETR_RSZ 0x2C43004
+
+#define mmPSOC_ETR_STS 0x2C4300C
+
+#define mmPSOC_ETR_RRD 0x2C43010
+
+#define mmPSOC_ETR_RRP 0x2C43014
+
+#define mmPSOC_ETR_RWP 0x2C43018
+
+#define mmPSOC_ETR_TRG 0x2C4301C
+
+#define mmPSOC_ETR_CTL 0x2C43020
+
+#define mmPSOC_ETR_RWD 0x2C43024
+
+#define mmPSOC_ETR_MODE 0x2C43028
+
+#define mmPSOC_ETR_LBUFLEVEL 0x2C4302C
+
+#define mmPSOC_ETR_CBUFLEVEL 0x2C43030
+
+#define mmPSOC_ETR_BUFWM 0x2C43034
+
+#define mmPSOC_ETR_RRPHI 0x2C43038
+
+#define mmPSOC_ETR_RWPHI 0x2C4303C
+
+#define mmPSOC_ETR_AXICTL 0x2C43110
+
+#define mmPSOC_ETR_DBALO 0x2C43118
+
+#define mmPSOC_ETR_DBAHI 0x2C4311C
+
+#define mmPSOC_ETR_FFSR 0x2C43300
+
+#define mmPSOC_ETR_FFCR 0x2C43304
+
+#define mmPSOC_ETR_PSCR 0x2C43308
+
+#define mmPSOC_ETR_ITMISCOP0 0x2C43EE0
+
+#define mmPSOC_ETR_ITTRFLIN 0x2C43EE8
+
+#define mmPSOC_ETR_ITATBDATA0 0x2C43EEC
+
+#define mmPSOC_ETR_ITATBCTR2 0x2C43EF0
+
+#define mmPSOC_ETR_ITATBCTR1 0x2C43EF4
+
+#define mmPSOC_ETR_ITATBCTR0 0x2C43EF8
+
+#define mmPSOC_ETR_ITCTRL 0x2C43F00
+
+#define mmPSOC_ETR_CLAIMSET 0x2C43FA0
+
+#define mmPSOC_ETR_CLAIMCLR 0x2C43FA4
+
+#define mmPSOC_ETR_LAR 0x2C43FB0
+
+#define mmPSOC_ETR_LSR 0x2C43FB4
+
+#define mmPSOC_ETR_AUTHSTATUS 0x2C43FB8
+
+#define mmPSOC_ETR_DEVID 0x2C43FC8
+
+#define mmPSOC_ETR_DEVTYPE 0x2C43FCC
+
+#define mmPSOC_ETR_PERIPHID4 0x2C43FD0
+
+#define mmPSOC_ETR_PERIPHID5 0x2C43FD4
+
+#define mmPSOC_ETR_PERIPHID6 0x2C43FD8
+
+#define mmPSOC_ETR_PERIPHID7 0x2C43FDC
+
+#define mmPSOC_ETR_PERIPHID0 0x2C43FE0
+
+#define mmPSOC_ETR_PERIPHID1 0x2C43FE4
+
+#define mmPSOC_ETR_PERIPHID2 0x2C43FE8
+
+#define mmPSOC_ETR_PERIPHID3 0x2C43FEC
+
+#define mmPSOC_ETR_COMPID0 0x2C43FF0
+
+#define mmPSOC_ETR_COMPID1 0x2C43FF4
+
+#define mmPSOC_ETR_COMPID2 0x2C43FF8
+
+#define mmPSOC_ETR_COMPID3 0x2C43FFC
+
+#endif /* ASIC_REG_PSOC_ETR_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/hl_boot_if.h b/drivers/misc/habanalabs/include/hl_boot_if.h
index 4cd04c090285..2853a2de8cf6 100644
--- a/drivers/misc/habanalabs/include/hl_boot_if.h
+++ b/drivers/misc/habanalabs/include/hl_boot_if.h
@@ -20,6 +20,8 @@ enum cpu_boot_status {
CPU_BOOT_STATUS_DRAM_INIT_FAIL,
CPU_BOOT_STATUS_FIT_CORRUPTED,
CPU_BOOT_STATUS_UBOOT_NOT_READY,
+ CPU_BOOT_STATUS_RESERVED,
+ CPU_BOOT_STATUS_TS_INIT_FAIL,
};
enum kmd_msg {
diff --git a/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_general.h b/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_general.h
index 71ea3c3e8ba3..a6851a9d3f03 100644
--- a/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_general.h
+++ b/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_general.h
@@ -12,18 +12,16 @@
#define PAGE_SHIFT_2MB 21
#define PAGE_SIZE_2MB (_AC(1, UL) << PAGE_SHIFT_2MB)
#define PAGE_SIZE_4KB (_AC(1, UL) << PAGE_SHIFT_4KB)
-#define PAGE_MASK_2MB (~(PAGE_SIZE_2MB - 1))
#define PAGE_PRESENT_MASK 0x0000000000001ull
#define SWAP_OUT_MASK 0x0000000000004ull
#define LAST_MASK 0x0000000000800ull
-#define PHYS_ADDR_MASK 0xFFFFFFFFFFFFF000ull
#define HOP0_MASK 0x3000000000000ull
#define HOP1_MASK 0x0FF8000000000ull
#define HOP2_MASK 0x0007FC0000000ull
#define HOP3_MASK 0x000003FE00000ull
#define HOP4_MASK 0x00000001FF000ull
-#define OFFSET_MASK 0x0000000000FFFull
+#define FLAGS_MASK 0x0000000000FFFull
#define HOP0_SHIFT 48
#define HOP1_SHIFT 39
@@ -31,8 +29,7 @@
#define HOP3_SHIFT 21
#define HOP4_SHIFT 12
-#define PTE_PHYS_ADDR_SHIFT 12
-#define PTE_PHYS_ADDR_MASK ~OFFSET_MASK
+#define HOP_PHYS_ADDR_MASK (~FLAGS_MASK)
#define HL_PTE_SIZE sizeof(u64)
#define HOP_TABLE_SIZE PAGE_SIZE_4KB
diff --git a/drivers/misc/habanalabs/include/qman_if.h b/drivers/misc/habanalabs/include/qman_if.h
index bf59bbe27fdc..0fdb49188ed7 100644
--- a/drivers/misc/habanalabs/include/qman_if.h
+++ b/drivers/misc/habanalabs/include/qman_if.h
@@ -23,6 +23,8 @@ struct hl_bd {
#define HL_BD_SIZE sizeof(struct hl_bd)
/*
+ * S/W CTL FIELDS.
+ *
* BD_CTL_REPEAT_VALID tells the CP whether the repeat field in the BD CTL is
* valid. 1 means the repeat field is valid, 0 means not-valid,
* i.e. repeat == 1
@@ -34,6 +36,16 @@ struct hl_bd {
#define BD_CTL_SHADOW_INDEX_MASK 0x00000FFF
/*
+ * H/W CTL FIELDS
+ */
+
+#define BD_CTL_COMP_OFFSET_SHIFT 16
+#define BD_CTL_COMP_OFFSET_MASK 0x00FF0000
+
+#define BD_CTL_COMP_DATA_SHIFT 0
+#define BD_CTL_COMP_DATA_MASK 0x0000FFFF
+
+/*
* COMPLETION QUEUE
*/
diff --git a/drivers/misc/habanalabs/memory.c b/drivers/misc/habanalabs/memory.c
index 365fb0cb8dff..6c72cb4eff54 100644
--- a/drivers/misc/habanalabs/memory.c
+++ b/drivers/misc/habanalabs/memory.c
@@ -13,7 +13,6 @@
#include <linux/slab.h>
#include <linux/genalloc.h>
-#define PGS_IN_2MB_PAGE (PAGE_SIZE_2MB >> PAGE_SHIFT)
#define HL_MMU_DEBUG 0
/*
@@ -159,20 +158,19 @@ pages_pack_err:
}
/*
- * get_userptr_from_host_va - initialize userptr structure from given host
- * virtual address
- *
- * @hdev : habanalabs device structure
- * @args : parameters containing the virtual address and size
- * @p_userptr : pointer to result userptr structure
+ * dma_map_host_va - DMA mapping of the given host virtual address.
+ * @hdev: habanalabs device structure
+ * @addr: the host virtual address of the memory area
+ * @size: the size of the memory area
+ * @p_userptr: pointer to result userptr structure
*
* This function does the following:
* - Allocate userptr structure
* - Pin the given host memory using the userptr structure
* - Perform DMA mapping to have the DMA addresses of the pages
*/
-static int get_userptr_from_host_va(struct hl_device *hdev,
- struct hl_mem_in *args, struct hl_userptr **p_userptr)
+static int dma_map_host_va(struct hl_device *hdev, u64 addr, u64 size,
+ struct hl_userptr **p_userptr)
{
struct hl_userptr *userptr;
int rc;
@@ -183,8 +181,7 @@ static int get_userptr_from_host_va(struct hl_device *hdev,
goto userptr_err;
}
- rc = hl_pin_host_memory(hdev, args->map_host.host_virt_addr,
- args->map_host.mem_size, userptr);
+ rc = hl_pin_host_memory(hdev, addr, size, userptr);
if (rc) {
dev_err(hdev->dev, "Failed to pin host memory\n");
goto pin_err;
@@ -215,16 +212,16 @@ userptr_err:
}
/*
- * free_userptr - free userptr structure
- *
- * @hdev : habanalabs device structure
- * @userptr : userptr to free
+ * dma_unmap_host_va - DMA unmapping of the given host virtual address.
+ * @hdev: habanalabs device structure
+ * @userptr: userptr to free
*
* This function does the following:
* - Unpins the physical pages
* - Frees the userptr structure
*/
-static void free_userptr(struct hl_device *hdev, struct hl_userptr *userptr)
+static void dma_unmap_host_va(struct hl_device *hdev,
+ struct hl_userptr *userptr)
{
hl_unpin_host_memory(hdev, userptr);
kfree(userptr);
@@ -253,10 +250,9 @@ static void dram_pg_pool_do_release(struct kref *ref)
}
/*
- * free_phys_pg_pack - free physical page pack
- *
- * @hdev : habanalabs device structure
- * @phys_pg_pack : physical page pack to free
+ * free_phys_pg_pack - free physical page pack
+ * @hdev: habanalabs device structure
+ * @phys_pg_pack: physical page pack to free
*
* This function does the following:
* - For DRAM memory only, iterate over the pack and free each physical block
@@ -264,7 +260,7 @@ static void dram_pg_pool_do_release(struct kref *ref)
* - Free the hl_vm_phys_pg_pack structure
*/
static void free_phys_pg_pack(struct hl_device *hdev,
- struct hl_vm_phys_pg_pack *phys_pg_pack)
+ struct hl_vm_phys_pg_pack *phys_pg_pack)
{
struct hl_vm *vm = &hdev->vm;
u64 i;
@@ -519,8 +515,8 @@ static inline int add_va_block(struct hl_device *hdev,
* - Return the start address of the virtual block
*/
static u64 get_va_block(struct hl_device *hdev,
- struct hl_va_range *va_range, u64 size, u64 hint_addr,
- bool is_userptr)
+ struct hl_va_range *va_range, u64 size, u64 hint_addr,
+ bool is_userptr)
{
struct hl_vm_va_block *va_block, *new_va_block = NULL;
u64 valid_start, valid_size, prev_start, prev_end, page_mask,
@@ -528,18 +524,17 @@ static u64 get_va_block(struct hl_device *hdev,
u32 page_size;
bool add_prev = false;
- if (is_userptr) {
+ if (is_userptr)
/*
* We cannot know if the user allocated memory with huge pages
* or not, hence we continue with the biggest possible
* granularity.
*/
- page_size = PAGE_SIZE_2MB;
- page_mask = PAGE_MASK_2MB;
- } else {
- page_size = hdev->asic_prop.dram_page_size;
- page_mask = ~((u64)page_size - 1);
- }
+ page_size = hdev->asic_prop.pmmu.huge_page_size;
+ else
+ page_size = hdev->asic_prop.dmmu.page_size;
+
+ page_mask = ~((u64)page_size - 1);
mutex_lock(&va_range->lock);
@@ -549,7 +544,6 @@ static u64 get_va_block(struct hl_device *hdev,
/* calc the first possible aligned addr */
valid_start = va_block->start;
-
if (valid_start & (page_size - 1)) {
valid_start &= page_mask;
valid_start += page_size;
@@ -561,7 +555,6 @@ static u64 get_va_block(struct hl_device *hdev,
if (valid_size >= size &&
(!new_va_block || valid_size < res_valid_size)) {
-
new_va_block = va_block;
res_valid_start = valid_start;
res_valid_size = valid_size;
@@ -631,11 +624,10 @@ static u32 get_sg_info(struct scatterlist *sg, dma_addr_t *dma_addr)
/*
* init_phys_pg_pack_from_userptr - initialize physical page pack from host
- * memory
- *
- * @ctx : current context
- * @userptr : userptr to initialize from
- * @pphys_pg_pack : res pointer
+ * memory
+ * @ctx: current context
+ * @userptr: userptr to initialize from
+ * @pphys_pg_pack: result pointer
*
* This function does the following:
* - Pin the physical pages related to the given virtual block
@@ -643,16 +635,19 @@ static u32 get_sg_info(struct scatterlist *sg, dma_addr_t *dma_addr)
* virtual block
*/
static int init_phys_pg_pack_from_userptr(struct hl_ctx *ctx,
- struct hl_userptr *userptr,
- struct hl_vm_phys_pg_pack **pphys_pg_pack)
+ struct hl_userptr *userptr,
+ struct hl_vm_phys_pg_pack **pphys_pg_pack)
{
+ struct hl_mmu_properties *mmu_prop = &ctx->hdev->asic_prop.pmmu;
struct hl_vm_phys_pg_pack *phys_pg_pack;
struct scatterlist *sg;
dma_addr_t dma_addr;
u64 page_mask, total_npages;
- u32 npages, page_size = PAGE_SIZE;
+ u32 npages, page_size = PAGE_SIZE,
+ huge_page_size = mmu_prop->huge_page_size;
bool first = true, is_huge_page_opt = true;
int rc, i, j;
+ u32 pgs_in_huge_page = huge_page_size >> __ffs(page_size);
phys_pg_pack = kzalloc(sizeof(*phys_pg_pack), GFP_KERNEL);
if (!phys_pg_pack)
@@ -675,14 +670,14 @@ static int init_phys_pg_pack_from_userptr(struct hl_ctx *ctx,
total_npages += npages;
- if ((npages % PGS_IN_2MB_PAGE) ||
- (dma_addr & (PAGE_SIZE_2MB - 1)))
+ if ((npages % pgs_in_huge_page) ||
+ (dma_addr & (huge_page_size - 1)))
is_huge_page_opt = false;
}
if (is_huge_page_opt) {
- page_size = PAGE_SIZE_2MB;
- total_npages /= PGS_IN_2MB_PAGE;
+ page_size = huge_page_size;
+ do_div(total_npages, pgs_in_huge_page);
}
page_mask = ~(((u64) page_size) - 1);
@@ -714,7 +709,7 @@ static int init_phys_pg_pack_from_userptr(struct hl_ctx *ctx,
dma_addr += page_size;
if (is_huge_page_opt)
- npages -= PGS_IN_2MB_PAGE;
+ npages -= pgs_in_huge_page;
else
npages--;
}
@@ -731,19 +726,18 @@ page_pack_arr_mem_err:
}
/*
- * map_phys_page_pack - maps the physical page pack
- *
- * @ctx : current context
- * @vaddr : start address of the virtual area to map from
- * @phys_pg_pack : the pack of physical pages to map to
+ * map_phys_pg_pack - maps the physical page pack.
+ * @ctx: current context
+ * @vaddr: start address of the virtual area to map from
+ * @phys_pg_pack: the pack of physical pages to map to
*
* This function does the following:
* - Maps each chunk of virtual memory to matching physical chunk
* - Stores number of successful mappings in the given argument
- * - Returns 0 on success, error code otherwise.
+ * - Returns 0 on success, error code otherwise
*/
-static int map_phys_page_pack(struct hl_ctx *ctx, u64 vaddr,
- struct hl_vm_phys_pg_pack *phys_pg_pack)
+static int map_phys_pg_pack(struct hl_ctx *ctx, u64 vaddr,
+ struct hl_vm_phys_pg_pack *phys_pg_pack)
{
struct hl_device *hdev = ctx->hdev;
u64 next_vaddr = vaddr, paddr, mapped_pg_cnt = 0, i;
@@ -783,6 +777,36 @@ err:
return rc;
}
+/*
+ * unmap_phys_pg_pack - unmaps the physical page pack
+ * @ctx: current context
+ * @vaddr: start address of the virtual area to unmap
+ * @phys_pg_pack: the pack of physical pages to unmap
+ */
+static void unmap_phys_pg_pack(struct hl_ctx *ctx, u64 vaddr,
+ struct hl_vm_phys_pg_pack *phys_pg_pack)
+{
+ struct hl_device *hdev = ctx->hdev;
+ u64 next_vaddr, i;
+ u32 page_size;
+
+ page_size = phys_pg_pack->page_size;
+ next_vaddr = vaddr;
+
+ for (i = 0 ; i < phys_pg_pack->npages ; i++, next_vaddr += page_size) {
+ if (hl_mmu_unmap(ctx, next_vaddr, page_size))
+ dev_warn_ratelimited(hdev->dev,
+ "unmap failed for vaddr: 0x%llx\n", next_vaddr);
+
+ /*
+ * unmapping on Palladium can be really long, so avoid a CPU
+ * soft lockup bug by sleeping a little between unmapping pages
+ */
+ if (hdev->pldm)
+ usleep_range(500, 1000);
+ }
+}
+
static int get_paddr_from_handle(struct hl_ctx *ctx, struct hl_mem_in *args,
u64 *paddr)
{
@@ -839,7 +863,10 @@ static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
*device_addr = 0;
if (is_userptr) {
- rc = get_userptr_from_host_va(hdev, args, &userptr);
+ u64 addr = args->map_host.host_virt_addr,
+ size = args->map_host.mem_size;
+
+ rc = dma_map_host_va(hdev, addr, size, &userptr);
if (rc) {
dev_err(hdev->dev, "failed to get userptr from va\n");
return rc;
@@ -850,7 +877,7 @@ static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
if (rc) {
dev_err(hdev->dev,
"unable to init page pack for vaddr 0x%llx\n",
- args->map_host.host_virt_addr);
+ addr);
goto init_page_pack_err;
}
@@ -909,7 +936,7 @@ static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
mutex_lock(&ctx->mmu_lock);
- rc = map_phys_page_pack(ctx, ret_vaddr, phys_pg_pack);
+ rc = map_phys_pg_pack(ctx, ret_vaddr, phys_pg_pack);
if (rc) {
mutex_unlock(&ctx->mmu_lock);
dev_err(hdev->dev, "mapping page pack failed for handle %u\n",
@@ -917,7 +944,7 @@ static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
goto map_err;
}
- hdev->asic_funcs->mmu_invalidate_cache(hdev, false);
+ hdev->asic_funcs->mmu_invalidate_cache(hdev, false, *vm_type);
mutex_unlock(&ctx->mmu_lock);
@@ -955,7 +982,7 @@ shared_err:
free_phys_pg_pack(hdev, phys_pg_pack);
init_page_pack_err:
if (is_userptr)
- free_userptr(hdev, userptr);
+ dma_unmap_host_va(hdev, userptr);
return rc;
}
@@ -965,20 +992,20 @@ init_page_pack_err:
*
* @ctx : current context
* @vaddr : device virtual address to unmap
+ * @ctx_free : true if in context free flow, false otherwise.
*
* This function does the following:
* - Unmap the physical pages related to the given virtual address
* - return the device virtual block to the virtual block list
*/
-static int unmap_device_va(struct hl_ctx *ctx, u64 vaddr)
+static int unmap_device_va(struct hl_ctx *ctx, u64 vaddr, bool ctx_free)
{
struct hl_device *hdev = ctx->hdev;
struct hl_vm_phys_pg_pack *phys_pg_pack = NULL;
struct hl_vm_hash_node *hnode = NULL;
struct hl_userptr *userptr = NULL;
+ struct hl_va_range *va_range;
enum vm_type_t *vm_type;
- u64 next_vaddr, i;
- u32 page_size;
bool is_userptr;
int rc;
@@ -1003,9 +1030,10 @@ static int unmap_device_va(struct hl_ctx *ctx, u64 vaddr)
if (*vm_type == VM_TYPE_USERPTR) {
is_userptr = true;
+ va_range = &ctx->host_va_range;
userptr = hnode->ptr;
rc = init_phys_pg_pack_from_userptr(ctx, userptr,
- &phys_pg_pack);
+ &phys_pg_pack);
if (rc) {
dev_err(hdev->dev,
"unable to init page pack for vaddr 0x%llx\n",
@@ -1014,6 +1042,7 @@ static int unmap_device_va(struct hl_ctx *ctx, u64 vaddr)
}
} else if (*vm_type == VM_TYPE_PHYS_PACK) {
is_userptr = false;
+ va_range = &ctx->dram_va_range;
phys_pg_pack = hnode->ptr;
} else {
dev_warn(hdev->dev,
@@ -1029,42 +1058,41 @@ static int unmap_device_va(struct hl_ctx *ctx, u64 vaddr)
goto mapping_cnt_err;
}
- page_size = phys_pg_pack->page_size;
- vaddr &= ~(((u64) page_size) - 1);
-
- next_vaddr = vaddr;
+ vaddr &= ~(((u64) phys_pg_pack->page_size) - 1);
mutex_lock(&ctx->mmu_lock);
- for (i = 0 ; i < phys_pg_pack->npages ; i++, next_vaddr += page_size) {
- if (hl_mmu_unmap(ctx, next_vaddr, page_size))
- dev_warn_ratelimited(hdev->dev,
- "unmap failed for vaddr: 0x%llx\n", next_vaddr);
-
- /* unmapping on Palladium can be really long, so avoid a CPU
- * soft lockup bug by sleeping a little between unmapping pages
- */
- if (hdev->pldm)
- usleep_range(500, 1000);
- }
+ unmap_phys_pg_pack(ctx, vaddr, phys_pg_pack);
- hdev->asic_funcs->mmu_invalidate_cache(hdev, true);
+ /*
+ * During context free this function is called in a loop to clean all
+ * the context mappings. Hence the cache invalidation can be called once
+ * at the loop end rather than for each iteration
+ */
+ if (!ctx_free)
+ hdev->asic_funcs->mmu_invalidate_cache(hdev, true, *vm_type);
mutex_unlock(&ctx->mmu_lock);
- if (add_va_block(hdev,
- is_userptr ? &ctx->host_va_range : &ctx->dram_va_range,
- vaddr,
- vaddr + phys_pg_pack->total_size - 1))
- dev_warn(hdev->dev, "add va block failed for vaddr: 0x%llx\n",
- vaddr);
+ /*
+ * No point in maintaining the free VA block list if the context is
+ * closing as the list will be freed anyway
+ */
+ if (!ctx_free) {
+ rc = add_va_block(hdev, va_range, vaddr,
+ vaddr + phys_pg_pack->total_size - 1);
+ if (rc)
+ dev_warn(hdev->dev,
+ "add va block failed for vaddr: 0x%llx\n",
+ vaddr);
+ }
atomic_dec(&phys_pg_pack->mapping_cnt);
kfree(hnode);
if (is_userptr) {
free_phys_pg_pack(hdev, phys_pg_pack);
- free_userptr(hdev, userptr);
+ dma_unmap_host_va(hdev, userptr);
}
return 0;
@@ -1189,8 +1217,8 @@ int hl_mem_ioctl(struct hl_fpriv *hpriv, void *data)
break;
case HL_MEM_OP_UNMAP:
- rc = unmap_device_va(ctx,
- args->in.unmap.device_virt_addr);
+ rc = unmap_device_va(ctx, args->in.unmap.device_virt_addr,
+ false);
break;
default:
@@ -1203,20 +1231,72 @@ out:
return rc;
}
+static int get_user_memory(struct hl_device *hdev, u64 addr, u64 size,
+ u32 npages, u64 start, u32 offset,
+ struct hl_userptr *userptr)
+{
+ int rc;
+
+ if (!access_ok((void __user *) (uintptr_t) addr, size)) {
+ dev_err(hdev->dev, "user pointer is invalid - 0x%llx\n", addr);
+ return -EFAULT;
+ }
+
+ userptr->vec = frame_vector_create(npages);
+ if (!userptr->vec) {
+ dev_err(hdev->dev, "Failed to create frame vector\n");
+ return -ENOMEM;
+ }
+
+ rc = get_vaddr_frames(start, npages, FOLL_FORCE | FOLL_WRITE,
+ userptr->vec);
+
+ if (rc != npages) {
+ dev_err(hdev->dev,
+ "Failed to map host memory, user ptr probably wrong\n");
+ if (rc < 0)
+ goto destroy_framevec;
+ rc = -EFAULT;
+ goto put_framevec;
+ }
+
+ if (frame_vector_to_pages(userptr->vec) < 0) {
+ dev_err(hdev->dev,
+ "Failed to translate frame vector to pages\n");
+ rc = -EFAULT;
+ goto put_framevec;
+ }
+
+ rc = sg_alloc_table_from_pages(userptr->sgt,
+ frame_vector_pages(userptr->vec),
+ npages, offset, size, GFP_ATOMIC);
+ if (rc < 0) {
+ dev_err(hdev->dev, "failed to create SG table from pages\n");
+ goto put_framevec;
+ }
+
+ return 0;
+
+put_framevec:
+ put_vaddr_frames(userptr->vec);
+destroy_framevec:
+ frame_vector_destroy(userptr->vec);
+ return rc;
+}
+
/*
- * hl_pin_host_memory - pins a chunk of host memory
- *
- * @hdev : pointer to the habanalabs device structure
- * @addr : the user-space virtual address of the memory area
- * @size : the size of the memory area
- * @userptr : pointer to hl_userptr structure
+ * hl_pin_host_memory - pins a chunk of host memory.
+ * @hdev: pointer to the habanalabs device structure
+ * @addr: the host virtual address of the memory area
+ * @size: the size of the memory area
+ * @userptr: pointer to hl_userptr structure
*
* This function does the following:
* - Pins the physical pages
- * - Create a SG list from those pages
+ * - Create an SG list from those pages
*/
int hl_pin_host_memory(struct hl_device *hdev, u64 addr, u64 size,
- struct hl_userptr *userptr)
+ struct hl_userptr *userptr)
{
u64 start, end;
u32 npages, offset;
@@ -1227,11 +1307,6 @@ int hl_pin_host_memory(struct hl_device *hdev, u64 addr, u64 size,
return -EINVAL;
}
- if (!access_ok((void __user *) (uintptr_t) addr, size)) {
- dev_err(hdev->dev, "user pointer is invalid - 0x%llx\n", addr);
- return -EFAULT;
- }
-
/*
* If the combination of the address and size requested for this memory
* region causes an integer overflow, return error.
@@ -1244,6 +1319,14 @@ int hl_pin_host_memory(struct hl_device *hdev, u64 addr, u64 size,
return -EINVAL;
}
+ /*
+ * This function can be called also from data path, hence use atomic
+ * always as it is not a big allocation.
+ */
+ userptr->sgt = kzalloc(sizeof(*userptr->sgt), GFP_ATOMIC);
+ if (!userptr->sgt)
+ return -ENOMEM;
+
start = addr & PAGE_MASK;
offset = addr & ~PAGE_MASK;
end = PAGE_ALIGN(addr + size);
@@ -1254,42 +1337,12 @@ int hl_pin_host_memory(struct hl_device *hdev, u64 addr, u64 size,
userptr->dma_mapped = false;
INIT_LIST_HEAD(&userptr->job_node);
- userptr->vec = frame_vector_create(npages);
- if (!userptr->vec) {
- dev_err(hdev->dev, "Failed to create frame vector\n");
- return -ENOMEM;
- }
-
- rc = get_vaddr_frames(start, npages, FOLL_FORCE | FOLL_WRITE,
- userptr->vec);
-
- if (rc != npages) {
- dev_err(hdev->dev,
- "Failed to map host memory, user ptr probably wrong\n");
- if (rc < 0)
- goto destroy_framevec;
- rc = -EFAULT;
- goto put_framevec;
- }
-
- if (frame_vector_to_pages(userptr->vec) < 0) {
+ rc = get_user_memory(hdev, addr, size, npages, start, offset,
+ userptr);
+ if (rc) {
dev_err(hdev->dev,
- "Failed to translate frame vector to pages\n");
- rc = -EFAULT;
- goto put_framevec;
- }
-
- userptr->sgt = kzalloc(sizeof(*userptr->sgt), GFP_ATOMIC);
- if (!userptr->sgt) {
- rc = -ENOMEM;
- goto put_framevec;
- }
-
- rc = sg_alloc_table_from_pages(userptr->sgt,
- frame_vector_pages(userptr->vec),
- npages, offset, size, GFP_ATOMIC);
- if (rc < 0) {
- dev_err(hdev->dev, "failed to create SG table from pages\n");
+ "failed to get user memory for address 0x%llx\n",
+ addr);
goto free_sgt;
}
@@ -1299,34 +1352,28 @@ int hl_pin_host_memory(struct hl_device *hdev, u64 addr, u64 size,
free_sgt:
kfree(userptr->sgt);
-put_framevec:
- put_vaddr_frames(userptr->vec);
-destroy_framevec:
- frame_vector_destroy(userptr->vec);
return rc;
}
/*
- * hl_unpin_host_memory - unpins a chunk of host memory
- *
- * @hdev : pointer to the habanalabs device structure
- * @userptr : pointer to hl_userptr structure
+ * hl_unpin_host_memory - unpins a chunk of host memory.
+ * @hdev: pointer to the habanalabs device structure
+ * @userptr: pointer to hl_userptr structure
*
* This function does the following:
* - Unpins the physical pages related to the host memory
* - Free the SG list
*/
-int hl_unpin_host_memory(struct hl_device *hdev, struct hl_userptr *userptr)
+void hl_unpin_host_memory(struct hl_device *hdev, struct hl_userptr *userptr)
{
struct page **pages;
hl_debugfs_remove_userptr(hdev, userptr);
if (userptr->dma_mapped)
- hdev->asic_funcs->hl_dma_unmap_sg(hdev,
- userptr->sgt->sgl,
- userptr->sgt->nents,
- userptr->dir);
+ hdev->asic_funcs->hl_dma_unmap_sg(hdev, userptr->sgt->sgl,
+ userptr->sgt->nents,
+ userptr->dir);
pages = frame_vector_pages(userptr->vec);
if (!IS_ERR(pages)) {
@@ -1342,8 +1389,6 @@ int hl_unpin_host_memory(struct hl_device *hdev, struct hl_userptr *userptr)
sg_free_table(userptr->sgt);
kfree(userptr->sgt);
-
- return 0;
}
/*
@@ -1542,43 +1587,16 @@ int hl_vm_ctx_init(struct hl_ctx *ctx)
* @hdev : pointer to the habanalabs structure
* va_range : pointer to virtual addresses range
*
- * This function initializes the following:
- * - Checks that the given range contains the whole initial range
+ * This function does the following:
* - Frees the virtual addresses block list and its lock
*/
static void hl_va_range_fini(struct hl_device *hdev,
struct hl_va_range *va_range)
{
- struct hl_vm_va_block *va_block;
-
- if (list_empty(&va_range->list)) {
- dev_warn(hdev->dev,
- "va list should not be empty on cleanup!\n");
- goto out;
- }
-
- if (!list_is_singular(&va_range->list)) {
- dev_warn(hdev->dev,
- "va list should not contain multiple blocks on cleanup!\n");
- goto free_va_list;
- }
-
- va_block = list_first_entry(&va_range->list, typeof(*va_block), node);
-
- if (va_block->start != va_range->start_addr ||
- va_block->end != va_range->end_addr) {
- dev_warn(hdev->dev,
- "wrong va block on cleanup, from 0x%llx to 0x%llx\n",
- va_block->start, va_block->end);
- goto free_va_list;
- }
-
-free_va_list:
mutex_lock(&va_range->lock);
clear_va_list_locked(hdev, &va_range->list);
mutex_unlock(&va_range->lock);
-out:
mutex_destroy(&va_range->lock);
}
@@ -1613,21 +1631,31 @@ void hl_vm_ctx_fini(struct hl_ctx *ctx)
hl_debugfs_remove_ctx_mem_hash(hdev, ctx);
- if (!hash_empty(ctx->mem_hash))
- dev_notice(hdev->dev, "ctx is freed while it has va in use\n");
+ /*
+ * Clearly something went wrong on hard reset so no point in printing
+ * another side effect error
+ */
+ if (!hdev->hard_reset_pending && !hash_empty(ctx->mem_hash))
+ dev_notice(hdev->dev,
+ "ctx %d is freed while it has va in use\n",
+ ctx->asid);
hash_for_each_safe(ctx->mem_hash, i, tmp_node, hnode, node) {
dev_dbg(hdev->dev,
"hl_mem_hash_node of vaddr 0x%llx of asid %d is still alive\n",
hnode->vaddr, ctx->asid);
- unmap_device_va(ctx, hnode->vaddr);
+ unmap_device_va(ctx, hnode->vaddr, true);
}
+ /* invalidate the cache once after the unmapping loop */
+ hdev->asic_funcs->mmu_invalidate_cache(hdev, true, VM_TYPE_USERPTR);
+ hdev->asic_funcs->mmu_invalidate_cache(hdev, true, VM_TYPE_PHYS_PACK);
+
spin_lock(&vm->idr_lock);
idr_for_each_entry(&vm->phys_pg_pack_handles, phys_pg_list, i)
if (phys_pg_list->asid == ctx->asid) {
dev_dbg(hdev->dev,
- "page list 0x%p of asid %d is still alive\n",
+ "page list 0x%px of asid %d is still alive\n",
phys_pg_list, ctx->asid);
atomic64_sub(phys_pg_list->total_size,
&hdev->dram_used_mem);
diff --git a/drivers/misc/habanalabs/mmu.c b/drivers/misc/habanalabs/mmu.c
index 176c315836f1..6262b26e2086 100644
--- a/drivers/misc/habanalabs/mmu.c
+++ b/drivers/misc/habanalabs/mmu.c
@@ -25,10 +25,9 @@ static struct pgt_info *get_pgt_info(struct hl_ctx *ctx, u64 hop_addr)
return pgt_info;
}
-static void free_hop(struct hl_ctx *ctx, u64 hop_addr)
+static void _free_hop(struct hl_ctx *ctx, struct pgt_info *pgt_info)
{
struct hl_device *hdev = ctx->hdev;
- struct pgt_info *pgt_info = get_pgt_info(ctx, hop_addr);
gen_pool_free(hdev->mmu_pgt_pool, pgt_info->phys_addr,
hdev->asic_prop.mmu_hop_table_size);
@@ -37,6 +36,13 @@ static void free_hop(struct hl_ctx *ctx, u64 hop_addr)
kfree(pgt_info);
}
+static void free_hop(struct hl_ctx *ctx, u64 hop_addr)
+{
+ struct pgt_info *pgt_info = get_pgt_info(ctx, hop_addr);
+
+ _free_hop(ctx, pgt_info);
+}
+
static u64 alloc_hop(struct hl_ctx *ctx)
{
struct hl_device *hdev = ctx->hdev;
@@ -105,8 +111,8 @@ static inline void write_pte(struct hl_ctx *ctx, u64 shadow_pte_addr, u64 val)
* clear the 12 LSBs and translate the shadow hop to its associated
* physical hop, and add back the original 12 LSBs.
*/
- u64 phys_val = get_phys_addr(ctx, val & PTE_PHYS_ADDR_MASK) |
- (val & OFFSET_MASK);
+ u64 phys_val = get_phys_addr(ctx, val & HOP_PHYS_ADDR_MASK) |
+ (val & FLAGS_MASK);
ctx->hdev->asic_funcs->write_pte(ctx->hdev,
get_phys_addr(ctx, shadow_pte_addr),
@@ -159,7 +165,7 @@ static inline int put_pte(struct hl_ctx *ctx, u64 hop_addr)
*/
num_of_ptes_left = pgt_info->num_of_ptes;
if (!num_of_ptes_left)
- free_hop(ctx, hop_addr);
+ _free_hop(ctx, pgt_info);
return num_of_ptes_left;
}
@@ -171,35 +177,50 @@ static inline u64 get_hopN_pte_addr(struct hl_ctx *ctx, u64 hop_addr,
((virt_addr & mask) >> shift);
}
-static inline u64 get_hop0_pte_addr(struct hl_ctx *ctx, u64 hop_addr, u64 vaddr)
+static inline u64 get_hop0_pte_addr(struct hl_ctx *ctx,
+ struct hl_mmu_properties *mmu_prop,
+ u64 hop_addr, u64 vaddr)
{
- return get_hopN_pte_addr(ctx, hop_addr, vaddr, HOP0_MASK, HOP0_SHIFT);
+ return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_prop->hop0_mask,
+ mmu_prop->hop0_shift);
}
-static inline u64 get_hop1_pte_addr(struct hl_ctx *ctx, u64 hop_addr, u64 vaddr)
+static inline u64 get_hop1_pte_addr(struct hl_ctx *ctx,
+ struct hl_mmu_properties *mmu_prop,
+ u64 hop_addr, u64 vaddr)
{
- return get_hopN_pte_addr(ctx, hop_addr, vaddr, HOP1_MASK, HOP1_SHIFT);
+ return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_prop->hop1_mask,
+ mmu_prop->hop1_shift);
}
-static inline u64 get_hop2_pte_addr(struct hl_ctx *ctx, u64 hop_addr, u64 vaddr)
+static inline u64 get_hop2_pte_addr(struct hl_ctx *ctx,
+ struct hl_mmu_properties *mmu_prop,
+ u64 hop_addr, u64 vaddr)
{
- return get_hopN_pte_addr(ctx, hop_addr, vaddr, HOP2_MASK, HOP2_SHIFT);
+ return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_prop->hop2_mask,
+ mmu_prop->hop2_shift);
}
-static inline u64 get_hop3_pte_addr(struct hl_ctx *ctx, u64 hop_addr, u64 vaddr)
+static inline u64 get_hop3_pte_addr(struct hl_ctx *ctx,
+ struct hl_mmu_properties *mmu_prop,
+ u64 hop_addr, u64 vaddr)
{
- return get_hopN_pte_addr(ctx, hop_addr, vaddr, HOP3_MASK, HOP3_SHIFT);
+ return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_prop->hop3_mask,
+ mmu_prop->hop3_shift);
}
-static inline u64 get_hop4_pte_addr(struct hl_ctx *ctx, u64 hop_addr, u64 vaddr)
+static inline u64 get_hop4_pte_addr(struct hl_ctx *ctx,
+ struct hl_mmu_properties *mmu_prop,
+ u64 hop_addr, u64 vaddr)
{
- return get_hopN_pte_addr(ctx, hop_addr, vaddr, HOP4_MASK, HOP4_SHIFT);
+ return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_prop->hop4_mask,
+ mmu_prop->hop4_shift);
}
static inline u64 get_next_hop_addr(struct hl_ctx *ctx, u64 curr_pte)
{
if (curr_pte & PAGE_PRESENT_MASK)
- return curr_pte & PHYS_ADDR_MASK;
+ return curr_pte & HOP_PHYS_ADDR_MASK;
else
return ULLONG_MAX;
}
@@ -288,23 +309,23 @@ static int dram_default_mapping_init(struct hl_ctx *ctx)
}
/* need only pte 0 in hops 0 and 1 */
- pte_val = (hop1_addr & PTE_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
+ pte_val = (hop1_addr & HOP_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
write_pte(ctx, hop0_addr, pte_val);
- pte_val = (hop2_addr & PTE_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
+ pte_val = (hop2_addr & HOP_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
write_pte(ctx, hop1_addr, pte_val);
get_pte(ctx, hop1_addr);
hop2_pte_addr = hop2_addr;
for (i = 0 ; i < num_of_hop3 ; i++) {
- pte_val = (ctx->dram_default_hops[i] & PTE_PHYS_ADDR_MASK) |
+ pte_val = (ctx->dram_default_hops[i] & HOP_PHYS_ADDR_MASK) |
PAGE_PRESENT_MASK;
write_pte(ctx, hop2_pte_addr, pte_val);
get_pte(ctx, hop2_addr);
hop2_pte_addr += HL_PTE_SIZE;
}
- pte_val = (prop->mmu_dram_default_page_addr & PTE_PHYS_ADDR_MASK) |
+ pte_val = (prop->mmu_dram_default_page_addr & HOP_PHYS_ADDR_MASK) |
LAST_MASK | PAGE_PRESENT_MASK;
for (i = 0 ; i < num_of_hop3 ; i++) {
@@ -400,8 +421,6 @@ int hl_mmu_init(struct hl_device *hdev)
if (!hdev->mmu_enable)
return 0;
- /* MMU H/W init was already done in device hw_init() */
-
hdev->mmu_pgt_pool =
gen_pool_create(__ffs(prop->mmu_hop_table_size), -1);
@@ -427,6 +446,8 @@ int hl_mmu_init(struct hl_device *hdev)
goto err_pool_add;
}
+ /* MMU H/W init will be done in device hw_init() */
+
return 0;
err_pool_add:
@@ -450,10 +471,10 @@ void hl_mmu_fini(struct hl_device *hdev)
if (!hdev->mmu_enable)
return;
+ /* MMU H/W fini was already done in device hw_fini() */
+
kvfree(hdev->mmu_shadow_hop0);
gen_pool_destroy(hdev->mmu_pgt_pool);
-
- /* MMU H/W fini will be done in device hw_fini() */
}
/**
@@ -501,36 +522,36 @@ void hl_mmu_ctx_fini(struct hl_ctx *ctx)
dram_default_mapping_fini(ctx);
if (!hash_empty(ctx->mmu_shadow_hash))
- dev_err(hdev->dev, "ctx is freed while it has pgts in use\n");
+ dev_err(hdev->dev, "ctx %d is freed while it has pgts in use\n",
+ ctx->asid);
hash_for_each_safe(ctx->mmu_shadow_hash, i, tmp, pgt_info, node) {
- dev_err(hdev->dev,
+ dev_err_ratelimited(hdev->dev,
"pgt_info of addr 0x%llx of asid %d was not destroyed, num_ptes: %d\n",
pgt_info->phys_addr, ctx->asid, pgt_info->num_of_ptes);
- free_hop(ctx, pgt_info->shadow_addr);
+ _free_hop(ctx, pgt_info);
}
mutex_destroy(&ctx->mmu_lock);
}
-static int _hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr)
+static int _hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr, bool is_dram_addr)
{
struct hl_device *hdev = ctx->hdev;
struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct hl_mmu_properties *mmu_prop;
u64 hop0_addr = 0, hop0_pte_addr = 0,
hop1_addr = 0, hop1_pte_addr = 0,
hop2_addr = 0, hop2_pte_addr = 0,
hop3_addr = 0, hop3_pte_addr = 0,
hop4_addr = 0, hop4_pte_addr = 0,
curr_pte;
- bool is_dram_addr, is_huge, clear_hop3 = true;
+ bool is_huge, clear_hop3 = true;
- is_dram_addr = hl_mem_area_inside_range(virt_addr, PAGE_SIZE_2MB,
- prop->va_space_dram_start_address,
- prop->va_space_dram_end_address);
+ mmu_prop = is_dram_addr ? &prop->dmmu : &prop->pmmu;
hop0_addr = get_hop0_addr(ctx);
- hop0_pte_addr = get_hop0_pte_addr(ctx, hop0_addr, virt_addr);
+ hop0_pte_addr = get_hop0_pte_addr(ctx, mmu_prop, hop0_addr, virt_addr);
curr_pte = *(u64 *) (uintptr_t) hop0_pte_addr;
@@ -539,7 +560,7 @@ static int _hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr)
if (hop1_addr == ULLONG_MAX)
goto not_mapped;
- hop1_pte_addr = get_hop1_pte_addr(ctx, hop1_addr, virt_addr);
+ hop1_pte_addr = get_hop1_pte_addr(ctx, mmu_prop, hop1_addr, virt_addr);
curr_pte = *(u64 *) (uintptr_t) hop1_pte_addr;
@@ -548,7 +569,7 @@ static int _hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr)
if (hop2_addr == ULLONG_MAX)
goto not_mapped;
- hop2_pte_addr = get_hop2_pte_addr(ctx, hop2_addr, virt_addr);
+ hop2_pte_addr = get_hop2_pte_addr(ctx, mmu_prop, hop2_addr, virt_addr);
curr_pte = *(u64 *) (uintptr_t) hop2_pte_addr;
@@ -557,7 +578,7 @@ static int _hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr)
if (hop3_addr == ULLONG_MAX)
goto not_mapped;
- hop3_pte_addr = get_hop3_pte_addr(ctx, hop3_addr, virt_addr);
+ hop3_pte_addr = get_hop3_pte_addr(ctx, mmu_prop, hop3_addr, virt_addr);
curr_pte = *(u64 *) (uintptr_t) hop3_pte_addr;
@@ -575,7 +596,8 @@ static int _hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr)
if (hop4_addr == ULLONG_MAX)
goto not_mapped;
- hop4_pte_addr = get_hop4_pte_addr(ctx, hop4_addr, virt_addr);
+ hop4_pte_addr = get_hop4_pte_addr(ctx, mmu_prop, hop4_addr,
+ virt_addr);
curr_pte = *(u64 *) (uintptr_t) hop4_pte_addr;
@@ -584,7 +606,7 @@ static int _hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr)
if (hdev->dram_default_page_mapping && is_dram_addr) {
u64 default_pte = (prop->mmu_dram_default_page_addr &
- PTE_PHYS_ADDR_MASK) | LAST_MASK |
+ HOP_PHYS_ADDR_MASK) | LAST_MASK |
PAGE_PRESENT_MASK;
if (curr_pte == default_pte) {
dev_err(hdev->dev,
@@ -667,25 +689,36 @@ not_mapped:
int hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr, u32 page_size)
{
struct hl_device *hdev = ctx->hdev;
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct hl_mmu_properties *mmu_prop;
u64 real_virt_addr;
u32 real_page_size, npages;
int i, rc;
+ bool is_dram_addr;
if (!hdev->mmu_enable)
return 0;
+ is_dram_addr = hl_mem_area_inside_range(virt_addr, prop->dmmu.page_size,
+ prop->va_space_dram_start_address,
+ prop->va_space_dram_end_address);
+
+ mmu_prop = is_dram_addr ? &prop->dmmu : &prop->pmmu;
+
/*
- * The H/W handles mapping of 4KB/2MB page. Hence if the host page size
- * is bigger, we break it to sub-pages and unmap them separately.
+ * The H/W handles mapping of specific page sizes. Hence if the page
+ * size is bigger, we break it to sub-pages and unmap them separately.
*/
- if ((page_size % PAGE_SIZE_2MB) == 0) {
- real_page_size = PAGE_SIZE_2MB;
- } else if ((page_size % PAGE_SIZE_4KB) == 0) {
- real_page_size = PAGE_SIZE_4KB;
+ if ((page_size % mmu_prop->huge_page_size) == 0) {
+ real_page_size = mmu_prop->huge_page_size;
+ } else if ((page_size % mmu_prop->page_size) == 0) {
+ real_page_size = mmu_prop->page_size;
} else {
dev_err(hdev->dev,
- "page size of %u is not 4KB nor 2MB aligned, can't unmap\n",
- page_size);
+ "page size of %u is not %uKB nor %uMB aligned, can't unmap\n",
+ page_size,
+ mmu_prop->page_size >> 10,
+ mmu_prop->huge_page_size >> 20);
return -EFAULT;
}
@@ -694,7 +727,7 @@ int hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr, u32 page_size)
real_virt_addr = virt_addr;
for (i = 0 ; i < npages ; i++) {
- rc = _hl_mmu_unmap(ctx, real_virt_addr);
+ rc = _hl_mmu_unmap(ctx, real_virt_addr, is_dram_addr);
if (rc)
return rc;
@@ -705,10 +738,11 @@ int hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr, u32 page_size)
}
static int _hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
- u32 page_size)
+ u32 page_size, bool is_dram_addr)
{
struct hl_device *hdev = ctx->hdev;
struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct hl_mmu_properties *mmu_prop;
u64 hop0_addr = 0, hop0_pte_addr = 0,
hop1_addr = 0, hop1_pte_addr = 0,
hop2_addr = 0, hop2_pte_addr = 0,
@@ -716,21 +750,19 @@ static int _hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
hop4_addr = 0, hop4_pte_addr = 0,
curr_pte = 0;
bool hop1_new = false, hop2_new = false, hop3_new = false,
- hop4_new = false, is_huge, is_dram_addr;
+ hop4_new = false, is_huge;
int rc = -ENOMEM;
+ mmu_prop = is_dram_addr ? &prop->dmmu : &prop->pmmu;
+
/*
- * This mapping function can map a 4KB/2MB page. For 2MB page there are
- * only 3 hops rather than 4. Currently the DRAM allocation uses 2MB
- * pages only but user memory could have been allocated with one of the
- * two page sizes. Since this is a common code for all the three cases,
- * we need this hugs page check.
+ * This mapping function can map a page or a huge page. For huge page
+ * there are only 3 hops rather than 4. Currently the DRAM allocation
+ * uses huge pages only but user memory could have been allocated with
+ * one of the two page sizes. Since this is a common code for all the
+ * three cases, we need this hugs page check.
*/
- is_huge = page_size == PAGE_SIZE_2MB;
-
- is_dram_addr = hl_mem_area_inside_range(virt_addr, page_size,
- prop->va_space_dram_start_address,
- prop->va_space_dram_end_address);
+ is_huge = page_size == mmu_prop->huge_page_size;
if (is_dram_addr && !is_huge) {
dev_err(hdev->dev, "DRAM mapping should use huge pages only\n");
@@ -738,28 +770,28 @@ static int _hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
}
hop0_addr = get_hop0_addr(ctx);
- hop0_pte_addr = get_hop0_pte_addr(ctx, hop0_addr, virt_addr);
+ hop0_pte_addr = get_hop0_pte_addr(ctx, mmu_prop, hop0_addr, virt_addr);
curr_pte = *(u64 *) (uintptr_t) hop0_pte_addr;
hop1_addr = get_alloc_next_hop_addr(ctx, curr_pte, &hop1_new);
if (hop1_addr == ULLONG_MAX)
goto err;
- hop1_pte_addr = get_hop1_pte_addr(ctx, hop1_addr, virt_addr);
+ hop1_pte_addr = get_hop1_pte_addr(ctx, mmu_prop, hop1_addr, virt_addr);
curr_pte = *(u64 *) (uintptr_t) hop1_pte_addr;
hop2_addr = get_alloc_next_hop_addr(ctx, curr_pte, &hop2_new);
if (hop2_addr == ULLONG_MAX)
goto err;
- hop2_pte_addr = get_hop2_pte_addr(ctx, hop2_addr, virt_addr);
+ hop2_pte_addr = get_hop2_pte_addr(ctx, mmu_prop, hop2_addr, virt_addr);
curr_pte = *(u64 *) (uintptr_t) hop2_pte_addr;
hop3_addr = get_alloc_next_hop_addr(ctx, curr_pte, &hop3_new);
if (hop3_addr == ULLONG_MAX)
goto err;
- hop3_pte_addr = get_hop3_pte_addr(ctx, hop3_addr, virt_addr);
+ hop3_pte_addr = get_hop3_pte_addr(ctx, mmu_prop, hop3_addr, virt_addr);
curr_pte = *(u64 *) (uintptr_t) hop3_pte_addr;
if (!is_huge) {
@@ -767,13 +799,14 @@ static int _hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
if (hop4_addr == ULLONG_MAX)
goto err;
- hop4_pte_addr = get_hop4_pte_addr(ctx, hop4_addr, virt_addr);
+ hop4_pte_addr = get_hop4_pte_addr(ctx, mmu_prop, hop4_addr,
+ virt_addr);
curr_pte = *(u64 *) (uintptr_t) hop4_pte_addr;
}
if (hdev->dram_default_page_mapping && is_dram_addr) {
u64 default_pte = (prop->mmu_dram_default_page_addr &
- PTE_PHYS_ADDR_MASK) | LAST_MASK |
+ HOP_PHYS_ADDR_MASK) | LAST_MASK |
PAGE_PRESENT_MASK;
if (curr_pte != default_pte) {
@@ -813,7 +846,7 @@ static int _hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
goto err;
}
- curr_pte = (phys_addr & PTE_PHYS_ADDR_MASK) | LAST_MASK
+ curr_pte = (phys_addr & HOP_PHYS_ADDR_MASK) | LAST_MASK
| PAGE_PRESENT_MASK;
if (is_huge)
@@ -823,25 +856,25 @@ static int _hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
if (hop1_new) {
curr_pte =
- (hop1_addr & PTE_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
+ (hop1_addr & HOP_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
write_pte(ctx, hop0_pte_addr, curr_pte);
}
if (hop2_new) {
curr_pte =
- (hop2_addr & PTE_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
+ (hop2_addr & HOP_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
write_pte(ctx, hop1_pte_addr, curr_pte);
get_pte(ctx, hop1_addr);
}
if (hop3_new) {
curr_pte =
- (hop3_addr & PTE_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
+ (hop3_addr & HOP_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
write_pte(ctx, hop2_pte_addr, curr_pte);
get_pte(ctx, hop2_addr);
}
if (!is_huge) {
if (hop4_new) {
- curr_pte = (hop4_addr & PTE_PHYS_ADDR_MASK) |
+ curr_pte = (hop4_addr & HOP_PHYS_ADDR_MASK) |
PAGE_PRESENT_MASK;
write_pte(ctx, hop3_pte_addr, curr_pte);
get_pte(ctx, hop3_addr);
@@ -890,25 +923,36 @@ err:
int hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_size)
{
struct hl_device *hdev = ctx->hdev;
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct hl_mmu_properties *mmu_prop;
u64 real_virt_addr, real_phys_addr;
u32 real_page_size, npages;
int i, rc, mapped_cnt = 0;
+ bool is_dram_addr;
if (!hdev->mmu_enable)
return 0;
+ is_dram_addr = hl_mem_area_inside_range(virt_addr, prop->dmmu.page_size,
+ prop->va_space_dram_start_address,
+ prop->va_space_dram_end_address);
+
+ mmu_prop = is_dram_addr ? &prop->dmmu : &prop->pmmu;
+
/*
- * The H/W handles mapping of 4KB/2MB page. Hence if the host page size
- * is bigger, we break it to sub-pages and map them separately.
+ * The H/W handles mapping of specific page sizes. Hence if the page
+ * size is bigger, we break it to sub-pages and map them separately.
*/
- if ((page_size % PAGE_SIZE_2MB) == 0) {
- real_page_size = PAGE_SIZE_2MB;
- } else if ((page_size % PAGE_SIZE_4KB) == 0) {
- real_page_size = PAGE_SIZE_4KB;
+ if ((page_size % mmu_prop->huge_page_size) == 0) {
+ real_page_size = mmu_prop->huge_page_size;
+ } else if ((page_size % mmu_prop->page_size) == 0) {
+ real_page_size = mmu_prop->page_size;
} else {
dev_err(hdev->dev,
- "page size of %u is not 4KB nor 2MB aligned, can't map\n",
- page_size);
+ "page size of %u is not %dKB nor %dMB aligned, can't unmap\n",
+ page_size,
+ mmu_prop->page_size >> 10,
+ mmu_prop->huge_page_size >> 20);
return -EFAULT;
}
@@ -923,7 +967,7 @@ int hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_size)
for (i = 0 ; i < npages ; i++) {
rc = _hl_mmu_map(ctx, real_virt_addr, real_phys_addr,
- real_page_size);
+ real_page_size, is_dram_addr);
if (rc)
goto err;
@@ -937,7 +981,7 @@ int hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_size)
err:
real_virt_addr = virt_addr;
for (i = 0 ; i < mapped_cnt ; i++) {
- if (_hl_mmu_unmap(ctx, real_virt_addr))
+ if (_hl_mmu_unmap(ctx, real_virt_addr, is_dram_addr))
dev_warn_ratelimited(hdev->dev,
"failed to unmap va: 0x%llx\n", real_virt_addr);
diff --git a/drivers/misc/hpilo.h b/drivers/misc/hpilo.h
index 94dfb9e40e29..1aa433a7f66c 100644
--- a/drivers/misc/hpilo.h
+++ b/drivers/misc/hpilo.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* linux/drivers/char/hpilo.h
*
diff --git a/drivers/misc/ibmvmc.h b/drivers/misc/ibmvmc.h
index e140ada8fe2c..0e1756fffeae 100644
--- a/drivers/misc/ibmvmc.h
+++ b/drivers/misc/ibmvmc.h
@@ -1,5 +1,5 @@
-/* SPDX-License-Identifier: GPL-2.0+
- *
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
* linux/drivers/misc/ibmvmc.h
*
* IBM Power Systems Virtual Management Channel Support.
diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
index 057d7bbde402..dd65cedf3b12 100644
--- a/drivers/misc/lis3lv02d/lis3lv02d.c
+++ b/drivers/misc/lis3lv02d/lis3lv02d.c
@@ -16,7 +16,7 @@
#include <linux/types.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
-#include <linux/input-polldev.h>
+#include <linux/input.h>
#include <linux/delay.h>
#include <linux/wait.h>
#include <linux/poll.h>
@@ -434,23 +434,23 @@ int lis3lv02d_poweron(struct lis3lv02d *lis3)
EXPORT_SYMBOL_GPL(lis3lv02d_poweron);
-static void lis3lv02d_joystick_poll(struct input_polled_dev *pidev)
+static void lis3lv02d_joystick_poll(struct input_dev *input)
{
- struct lis3lv02d *lis3 = pidev->private;
+ struct lis3lv02d *lis3 = input_get_drvdata(input);
int x, y, z;
mutex_lock(&lis3->mutex);
lis3lv02d_get_xyz(lis3, &x, &y, &z);
- input_report_abs(pidev->input, ABS_X, x);
- input_report_abs(pidev->input, ABS_Y, y);
- input_report_abs(pidev->input, ABS_Z, z);
- input_sync(pidev->input);
+ input_report_abs(input, ABS_X, x);
+ input_report_abs(input, ABS_Y, y);
+ input_report_abs(input, ABS_Z, z);
+ input_sync(input);
mutex_unlock(&lis3->mutex);
}
-static void lis3lv02d_joystick_open(struct input_polled_dev *pidev)
+static int lis3lv02d_joystick_open(struct input_dev *input)
{
- struct lis3lv02d *lis3 = pidev->private;
+ struct lis3lv02d *lis3 = input_get_drvdata(input);
if (lis3->pm_dev)
pm_runtime_get_sync(lis3->pm_dev);
@@ -461,12 +461,14 @@ static void lis3lv02d_joystick_open(struct input_polled_dev *pidev)
* Update coordinates for the case where poll interval is 0 and
* the chip in running purely under interrupt control
*/
- lis3lv02d_joystick_poll(pidev);
+ lis3lv02d_joystick_poll(input);
+
+ return 0;
}
-static void lis3lv02d_joystick_close(struct input_polled_dev *pidev)
+static void lis3lv02d_joystick_close(struct input_dev *input)
{
- struct lis3lv02d *lis3 = pidev->private;
+ struct lis3lv02d *lis3 = input_get_drvdata(input);
atomic_set(&lis3->wake_thread, 0);
if (lis3->pm_dev)
@@ -497,7 +499,7 @@ out:
static void lis302dl_interrupt_handle_click(struct lis3lv02d *lis3)
{
- struct input_dev *dev = lis3->idev->input;
+ struct input_dev *dev = lis3->idev;
u8 click_src;
mutex_lock(&lis3->mutex);
@@ -677,26 +679,19 @@ int lis3lv02d_joystick_enable(struct lis3lv02d *lis3)
if (lis3->idev)
return -EINVAL;
- lis3->idev = input_allocate_polled_device();
- if (!lis3->idev)
+ input_dev = input_allocate_device();
+ if (!input_dev)
return -ENOMEM;
- lis3->idev->poll = lis3lv02d_joystick_poll;
- lis3->idev->open = lis3lv02d_joystick_open;
- lis3->idev->close = lis3lv02d_joystick_close;
- lis3->idev->poll_interval = MDPS_POLL_INTERVAL;
- lis3->idev->poll_interval_min = MDPS_POLL_MIN;
- lis3->idev->poll_interval_max = MDPS_POLL_MAX;
- lis3->idev->private = lis3;
- input_dev = lis3->idev->input;
-
input_dev->name = "ST LIS3LV02DL Accelerometer";
input_dev->phys = DRIVER_NAME "/input0";
input_dev->id.bustype = BUS_HOST;
input_dev->id.vendor = 0;
input_dev->dev.parent = &lis3->pdev->dev;
- set_bit(EV_ABS, input_dev->evbit);
+ input_dev->open = lis3lv02d_joystick_open;
+ input_dev->close = lis3lv02d_joystick_close;
+
max_val = (lis3->mdps_max_val * lis3->scale) / LIS3_ACCURACY;
if (lis3->whoami == WAI_12B) {
fuzz = LIS3_DEFAULT_FUZZ_12B;
@@ -712,17 +707,32 @@ int lis3lv02d_joystick_enable(struct lis3lv02d *lis3)
input_set_abs_params(input_dev, ABS_Y, -max_val, max_val, fuzz, flat);
input_set_abs_params(input_dev, ABS_Z, -max_val, max_val, fuzz, flat);
+ input_set_drvdata(input_dev, lis3);
+ lis3->idev = input_dev;
+
+ err = input_setup_polling(input_dev, lis3lv02d_joystick_poll);
+ if (err)
+ goto err_free_input;
+
+ input_set_poll_interval(input_dev, MDPS_POLL_INTERVAL);
+ input_set_min_poll_interval(input_dev, MDPS_POLL_MIN);
+ input_set_max_poll_interval(input_dev, MDPS_POLL_MAX);
+
lis3->mapped_btns[0] = lis3lv02d_get_axis(abs(lis3->ac.x), btns);
lis3->mapped_btns[1] = lis3lv02d_get_axis(abs(lis3->ac.y), btns);
lis3->mapped_btns[2] = lis3lv02d_get_axis(abs(lis3->ac.z), btns);
- err = input_register_polled_device(lis3->idev);
- if (err) {
- input_free_polled_device(lis3->idev);
- lis3->idev = NULL;
- }
+ err = input_register_device(lis3->idev);
+ if (err)
+ goto err_free_input;
+ return 0;
+
+err_free_input:
+ input_free_device(input_dev);
+ lis3->idev = NULL;
return err;
+
}
EXPORT_SYMBOL_GPL(lis3lv02d_joystick_enable);
@@ -738,8 +748,7 @@ void lis3lv02d_joystick_disable(struct lis3lv02d *lis3)
if (lis3->irq)
misc_deregister(&lis3->miscdev);
- input_unregister_polled_device(lis3->idev);
- input_free_polled_device(lis3->idev);
+ input_unregister_device(lis3->idev);
lis3->idev = NULL;
}
EXPORT_SYMBOL_GPL(lis3lv02d_joystick_disable);
@@ -895,10 +904,9 @@ static void lis3lv02d_8b_configure(struct lis3lv02d *lis3,
(p->click_thresh_y << 4));
if (lis3->idev) {
- struct input_dev *input_dev = lis3->idev->input;
- input_set_capability(input_dev, EV_KEY, BTN_X);
- input_set_capability(input_dev, EV_KEY, BTN_Y);
- input_set_capability(input_dev, EV_KEY, BTN_Z);
+ input_set_capability(lis3->idev, EV_KEY, BTN_X);
+ input_set_capability(lis3->idev, EV_KEY, BTN_Y);
+ input_set_capability(lis3->idev, EV_KEY, BTN_Z);
}
}
diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
index 1b0c99883c57..c394c0b08519 100644
--- a/drivers/misc/lis3lv02d/lis3lv02d.h
+++ b/drivers/misc/lis3lv02d/lis3lv02d.h
@@ -6,7 +6,7 @@
* Copyright (C) 2008-2009 Eric Piel
*/
#include <linux/platform_device.h>
-#include <linux/input-polldev.h>
+#include <linux/input.h>
#include <linux/regulator/consumer.h>
#include <linux/miscdevice.h>
@@ -281,7 +281,7 @@ struct lis3lv02d {
* (1/1000th of earth gravity)
*/
- struct input_polled_dev *idev; /* input device */
+ struct input_dev *idev; /* input device */
struct platform_device *pdev; /* platform device */
struct regulator_bulk_data regulators[2];
atomic_t count; /* interrupt count after last read */
diff --git a/drivers/misc/lkdtm/bugs.c b/drivers/misc/lkdtm/bugs.c
index 7284a22b1a09..a4fdad04809a 100644
--- a/drivers/misc/lkdtm/bugs.c
+++ b/drivers/misc/lkdtm/bugs.c
@@ -12,6 +12,10 @@
#include <linux/sched/task_stack.h>
#include <linux/uaccess.h>
+#ifdef CONFIG_X86_32
+#include <asm/desc.h>
+#endif
+
struct lkdtm_list {
struct list_head node;
};
@@ -337,3 +341,38 @@ void lkdtm_UNSET_SMEP(void)
pr_err("FAIL: this test is x86_64-only\n");
#endif
}
+
+#ifdef CONFIG_X86_32
+void lkdtm_DOUBLE_FAULT(void)
+{
+ /*
+ * Trigger #DF by setting the stack limit to zero. This clobbers
+ * a GDT TLS slot, which is okay because the current task will die
+ * anyway due to the double fault.
+ */
+ struct desc_struct d = {
+ .type = 3, /* expand-up, writable, accessed data */
+ .p = 1, /* present */
+ .d = 1, /* 32-bit */
+ .g = 0, /* limit in bytes */
+ .s = 1, /* not system */
+ };
+
+ local_irq_disable();
+ write_gdt_entry(get_cpu_gdt_rw(smp_processor_id()),
+ GDT_ENTRY_TLS_MIN, &d, DESCTYPE_S);
+
+ /*
+ * Put our zero-limit segment in SS and then trigger a fault. The
+ * 4-byte access to (%esp) will fault with #SS, and the attempt to
+ * deliver the fault will recursively cause #SS and result in #DF.
+ * This whole process happens while NMIs and MCEs are blocked by the
+ * MOV SS window. This is nice because an NMI with an invalid SS
+ * would also double-fault, resulting in the NMI or MCE being lost.
+ */
+ asm volatile ("movw %0, %%ss; addl $0, (%%esp)" ::
+ "r" ((unsigned short)(GDT_ENTRY_TLS_MIN << 3)));
+
+ panic("tried to double fault but didn't die\n");
+}
+#endif
diff --git a/drivers/misc/lkdtm/core.c b/drivers/misc/lkdtm/core.c
index cbc4c9045a99..ee0d6e721441 100644
--- a/drivers/misc/lkdtm/core.c
+++ b/drivers/misc/lkdtm/core.c
@@ -171,6 +171,9 @@ static const struct crashtype crashtypes[] = {
CRASHTYPE(USERCOPY_KERNEL_DS),
CRASHTYPE(STACKLEAK_ERASING),
CRASHTYPE(CFI_FORWARD_PROTO),
+#ifdef CONFIG_X86_32
+ CRASHTYPE(DOUBLE_FAULT),
+#endif
};
diff --git a/drivers/misc/lkdtm/lkdtm.h b/drivers/misc/lkdtm/lkdtm.h
index ab446e0bde97..c56d23e37643 100644
--- a/drivers/misc/lkdtm/lkdtm.h
+++ b/drivers/misc/lkdtm/lkdtm.h
@@ -28,6 +28,9 @@ void lkdtm_CORRUPT_USER_DS(void);
void lkdtm_STACK_GUARD_PAGE_LEADING(void);
void lkdtm_STACK_GUARD_PAGE_TRAILING(void);
void lkdtm_UNSET_SMEP(void);
+#ifdef CONFIG_X86_32
+void lkdtm_DOUBLE_FAULT(void);
+#endif
/* lkdtm_heap.c */
void __init lkdtm_heap_init(void);
diff --git a/drivers/misc/mei/bus-fixup.c b/drivers/misc/mei/bus-fixup.c
index 0a2b99e1af45..9ad9c01ddf41 100644
--- a/drivers/misc/mei/bus-fixup.c
+++ b/drivers/misc/mei/bus-fixup.c
@@ -46,8 +46,6 @@ static const uuid_le mei_nfc_info_guid = MEI_UUID_NFC_INFO;
*/
static void number_of_connections(struct mei_cl_device *cldev)
{
- dev_dbg(&cldev->dev, "running hook %s\n", __func__);
-
if (cldev->me_cl->props.max_number_of_connections > 1)
cldev->do_match = 0;
}
@@ -59,8 +57,6 @@ static void number_of_connections(struct mei_cl_device *cldev)
*/
static void blacklist(struct mei_cl_device *cldev)
{
- dev_dbg(&cldev->dev, "running hook %s\n", __func__);
-
cldev->do_match = 0;
}
@@ -71,8 +67,6 @@ static void blacklist(struct mei_cl_device *cldev)
*/
static void whitelist(struct mei_cl_device *cldev)
{
- dev_dbg(&cldev->dev, "running hook %s\n", __func__);
-
cldev->do_match = 1;
}
@@ -256,7 +250,6 @@ static void mei_wd(struct mei_cl_device *cldev)
{
struct pci_dev *pdev = to_pci_dev(cldev->dev.parent);
- dev_dbg(&cldev->dev, "running hook %s\n", __func__);
if (pdev->device == MEI_DEV_ID_WPT_LP ||
pdev->device == MEI_DEV_ID_SPT ||
pdev->device == MEI_DEV_ID_SPT_H)
@@ -410,8 +403,6 @@ static void mei_nfc(struct mei_cl_device *cldev)
bus = cldev->bus;
- dev_dbg(&cldev->dev, "running hook %s\n", __func__);
-
mutex_lock(&bus->device_lock);
/* we need to connect to INFO GUID */
cl = mei_cl_alloc_linked(bus);
diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
index 985bd4fd3328..a0a495c95e3c 100644
--- a/drivers/misc/mei/bus.c
+++ b/drivers/misc/mei/bus.c
@@ -791,11 +791,44 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *a,
}
static DEVICE_ATTR_RO(modalias);
+static ssize_t max_conn_show(struct device *dev, struct device_attribute *a,
+ char *buf)
+{
+ struct mei_cl_device *cldev = to_mei_cl_device(dev);
+ u8 maxconn = mei_me_cl_max_conn(cldev->me_cl);
+
+ return scnprintf(buf, PAGE_SIZE, "%d", maxconn);
+}
+static DEVICE_ATTR_RO(max_conn);
+
+static ssize_t fixed_show(struct device *dev, struct device_attribute *a,
+ char *buf)
+{
+ struct mei_cl_device *cldev = to_mei_cl_device(dev);
+ u8 fixed = mei_me_cl_fixed(cldev->me_cl);
+
+ return scnprintf(buf, PAGE_SIZE, "%d", fixed);
+}
+static DEVICE_ATTR_RO(fixed);
+
+static ssize_t max_len_show(struct device *dev, struct device_attribute *a,
+ char *buf)
+{
+ struct mei_cl_device *cldev = to_mei_cl_device(dev);
+ u32 maxlen = mei_me_cl_max_len(cldev->me_cl);
+
+ return scnprintf(buf, PAGE_SIZE, "%u", maxlen);
+}
+static DEVICE_ATTR_RO(max_len);
+
static struct attribute *mei_cldev_attrs[] = {
&dev_attr_name.attr,
&dev_attr_uuid.attr,
&dev_attr_version.attr,
&dev_attr_modalias.attr,
+ &dev_attr_max_conn.attr,
+ &dev_attr_fixed.attr,
+ &dev_attr_max_len.attr,
NULL,
};
ATTRIBUTE_GROUPS(mei_cldev);
@@ -873,15 +906,16 @@ static const struct device_type mei_cl_device_type = {
/**
* mei_cl_bus_set_name - set device name for me client device
+ * <controller>-<client device>
+ * Example: 0000:00:16.0-55213584-9a29-4916-badf-0fb7ed682aeb
*
* @cldev: me client device
*/
static inline void mei_cl_bus_set_name(struct mei_cl_device *cldev)
{
- dev_set_name(&cldev->dev, "mei:%s:%pUl:%02X",
- cldev->name,
- mei_me_cl_uuid(cldev->me_cl),
- mei_me_cl_ver(cldev->me_cl));
+ dev_set_name(&cldev->dev, "%s-%pUl",
+ dev_name(cldev->bus->dev),
+ mei_me_cl_uuid(cldev->me_cl));
}
/**
diff --git a/drivers/misc/mei/client.h b/drivers/misc/mei/client.h
index c1f9e810cf81..2f8954def591 100644
--- a/drivers/misc/mei/client.h
+++ b/drivers/misc/mei/client.h
@@ -69,6 +69,42 @@ static inline u8 mei_me_cl_ver(const struct mei_me_client *me_cl)
return me_cl->props.protocol_version;
}
+/**
+ * mei_me_cl_max_conn - return me client max number of connections
+ *
+ * @me_cl: me client
+ *
+ * Return: me client max number of connections
+ */
+static inline u8 mei_me_cl_max_conn(const struct mei_me_client *me_cl)
+{
+ return me_cl->props.max_number_of_connections;
+}
+
+/**
+ * mei_me_cl_fixed - return me client fixed address, if any
+ *
+ * @me_cl: me client
+ *
+ * Return: me client fixed address
+ */
+static inline u8 mei_me_cl_fixed(const struct mei_me_client *me_cl)
+{
+ return me_cl->props.fixed_address;
+}
+
+/**
+ * mei_me_cl_max_len - return me client max msg length
+ *
+ * @me_cl: me client
+ *
+ * Return: me client max msg length
+ */
+static inline u32 mei_me_cl_max_len(const struct mei_me_client *me_cl)
+{
+ return me_cl->props.max_msg_length;
+}
+
/*
* MEI IO Functions
*/
diff --git a/drivers/misc/mei/hdcp/mei_hdcp.c b/drivers/misc/mei/hdcp/mei_hdcp.c
index c681f6fab342..93027fd96c71 100644
--- a/drivers/misc/mei/hdcp/mei_hdcp.c
+++ b/drivers/misc/mei/hdcp/mei_hdcp.c
@@ -27,18 +27,6 @@
#include "mei_hdcp.h"
-static inline u8 mei_get_ddi_index(enum port port)
-{
- switch (port) {
- case PORT_A:
- return MEI_DDI_A;
- case PORT_B ... PORT_F:
- return (u8)port;
- default:
- return MEI_DDI_INVALID_PORT;
- }
-}
-
/**
* mei_hdcp_initiate_session() - Initiate a Wired HDCP2.2 Tx Session in ME FW
* @dev: device corresponding to the mei_cl_device
@@ -69,7 +57,8 @@ mei_hdcp_initiate_session(struct device *dev, struct hdcp_port_data *data,
WIRED_CMD_BUF_LEN_INITIATE_HDCP2_SESSION_IN;
session_init_in.port.integrated_port_type = data->port_type;
- session_init_in.port.physical_port = mei_get_ddi_index(data->port);
+ session_init_in.port.physical_port = (u8)data->fw_ddi;
+ session_init_in.port.attached_transcoder = (u8)data->fw_tc;
session_init_in.protocol = data->protocol;
byte = mei_cldev_send(cldev, (u8 *)&session_init_in,
@@ -138,7 +127,8 @@ mei_hdcp_verify_receiver_cert_prepare_km(struct device *dev,
WIRED_CMD_BUF_LEN_VERIFY_RECEIVER_CERT_IN;
verify_rxcert_in.port.integrated_port_type = data->port_type;
- verify_rxcert_in.port.physical_port = mei_get_ddi_index(data->port);
+ verify_rxcert_in.port.physical_port = (u8)data->fw_ddi;
+ verify_rxcert_in.port.attached_transcoder = (u8)data->fw_tc;
verify_rxcert_in.cert_rx = rx_cert->cert_rx;
memcpy(verify_rxcert_in.r_rx, &rx_cert->r_rx, HDCP_2_2_RRX_LEN);
@@ -208,7 +198,8 @@ mei_hdcp_verify_hprime(struct device *dev, struct hdcp_port_data *data,
send_hprime_in.header.buffer_len = WIRED_CMD_BUF_LEN_AKE_SEND_HPRIME_IN;
send_hprime_in.port.integrated_port_type = data->port_type;
- send_hprime_in.port.physical_port = mei_get_ddi_index(data->port);
+ send_hprime_in.port.physical_port = (u8)data->fw_ddi;
+ send_hprime_in.port.attached_transcoder = (u8)data->fw_tc;
memcpy(send_hprime_in.h_prime, rx_hprime->h_prime,
HDCP_2_2_H_PRIME_LEN);
@@ -265,7 +256,8 @@ mei_hdcp_store_pairing_info(struct device *dev, struct hdcp_port_data *data,
WIRED_CMD_BUF_LEN_SEND_PAIRING_INFO_IN;
pairing_info_in.port.integrated_port_type = data->port_type;
- pairing_info_in.port.physical_port = mei_get_ddi_index(data->port);
+ pairing_info_in.port.physical_port = (u8)data->fw_ddi;
+ pairing_info_in.port.attached_transcoder = (u8)data->fw_tc;
memcpy(pairing_info_in.e_kh_km, pairing_info->e_kh_km,
HDCP_2_2_E_KH_KM_LEN);
@@ -323,7 +315,8 @@ mei_hdcp_initiate_locality_check(struct device *dev,
lc_init_in.header.buffer_len = WIRED_CMD_BUF_LEN_INIT_LOCALITY_CHECK_IN;
lc_init_in.port.integrated_port_type = data->port_type;
- lc_init_in.port.physical_port = mei_get_ddi_index(data->port);
+ lc_init_in.port.physical_port = (u8)data->fw_ddi;
+ lc_init_in.port.attached_transcoder = (u8)data->fw_tc;
byte = mei_cldev_send(cldev, (u8 *)&lc_init_in, sizeof(lc_init_in));
if (byte < 0) {
@@ -378,7 +371,8 @@ mei_hdcp_verify_lprime(struct device *dev, struct hdcp_port_data *data,
WIRED_CMD_BUF_LEN_VALIDATE_LOCALITY_IN;
verify_lprime_in.port.integrated_port_type = data->port_type;
- verify_lprime_in.port.physical_port = mei_get_ddi_index(data->port);
+ verify_lprime_in.port.physical_port = (u8)data->fw_ddi;
+ verify_lprime_in.port.attached_transcoder = (u8)data->fw_tc;
memcpy(verify_lprime_in.l_prime, rx_lprime->l_prime,
HDCP_2_2_L_PRIME_LEN);
@@ -435,7 +429,8 @@ static int mei_hdcp_get_session_key(struct device *dev,
get_skey_in.header.buffer_len = WIRED_CMD_BUF_LEN_GET_SESSION_KEY_IN;
get_skey_in.port.integrated_port_type = data->port_type;
- get_skey_in.port.physical_port = mei_get_ddi_index(data->port);
+ get_skey_in.port.physical_port = (u8)data->fw_ddi;
+ get_skey_in.port.attached_transcoder = (u8)data->fw_tc;
byte = mei_cldev_send(cldev, (u8 *)&get_skey_in, sizeof(get_skey_in));
if (byte < 0) {
@@ -499,7 +494,8 @@ mei_hdcp_repeater_check_flow_prepare_ack(struct device *dev,
WIRED_CMD_BUF_LEN_VERIFY_REPEATER_IN;
verify_repeater_in.port.integrated_port_type = data->port_type;
- verify_repeater_in.port.physical_port = mei_get_ddi_index(data->port);
+ verify_repeater_in.port.physical_port = (u8)data->fw_ddi;
+ verify_repeater_in.port.attached_transcoder = (u8)data->fw_tc;
memcpy(verify_repeater_in.rx_info, rep_topology->rx_info,
HDCP_2_2_RXINFO_LEN);
@@ -569,7 +565,8 @@ static int mei_hdcp_verify_mprime(struct device *dev,
WIRED_CMD_BUF_LEN_REPEATER_AUTH_STREAM_REQ_MIN_IN;
verify_mprime_in.port.integrated_port_type = data->port_type;
- verify_mprime_in.port.physical_port = mei_get_ddi_index(data->port);
+ verify_mprime_in.port.physical_port = (u8)data->fw_ddi;
+ verify_mprime_in.port.attached_transcoder = (u8)data->fw_tc;
memcpy(verify_mprime_in.m_prime, stream_ready->m_prime,
HDCP_2_2_MPRIME_LEN);
@@ -630,7 +627,8 @@ static int mei_hdcp_enable_authentication(struct device *dev,
enable_auth_in.header.buffer_len = WIRED_CMD_BUF_LEN_ENABLE_AUTH_IN;
enable_auth_in.port.integrated_port_type = data->port_type;
- enable_auth_in.port.physical_port = mei_get_ddi_index(data->port);
+ enable_auth_in.port.physical_port = (u8)data->fw_ddi;
+ enable_auth_in.port.attached_transcoder = (u8)data->fw_tc;
enable_auth_in.stream_type = data->streams[0].stream_type;
byte = mei_cldev_send(cldev, (u8 *)&enable_auth_in,
@@ -684,7 +682,8 @@ mei_hdcp_close_session(struct device *dev, struct hdcp_port_data *data)
WIRED_CMD_BUF_LEN_CLOSE_SESSION_IN;
session_close_in.port.integrated_port_type = data->port_type;
- session_close_in.port.physical_port = mei_get_ddi_index(data->port);
+ session_close_in.port.physical_port = (u8)data->fw_ddi;
+ session_close_in.port.attached_transcoder = (u8)data->fw_tc;
byte = mei_cldev_send(cldev, (u8 *)&session_close_in,
sizeof(session_close_in));
diff --git a/drivers/misc/mei/hdcp/mei_hdcp.h b/drivers/misc/mei/hdcp/mei_hdcp.h
index e4b1cd54c853..18ffc773fa18 100644
--- a/drivers/misc/mei/hdcp/mei_hdcp.h
+++ b/drivers/misc/mei/hdcp/mei_hdcp.h
@@ -184,8 +184,11 @@ struct hdcp_cmd_no_data {
/* Uniquely identifies the hdcp port being addressed for a given command. */
struct hdcp_port_id {
u8 integrated_port_type;
+ /* physical_port is used until Gen11.5. Must be zero for Gen11.5+ */
u8 physical_port;
- u16 reserved;
+ /* attached_transcoder is for Gen11.5+. Set to zero for <Gen11.5 */
+ u8 attached_transcoder;
+ u8 reserved;
} __packed;
/*
@@ -362,16 +365,4 @@ struct wired_cmd_repeater_auth_stream_req_out {
struct hdcp_cmd_header header;
struct hdcp_port_id port;
} __packed;
-
-enum mei_fw_ddi {
- MEI_DDI_INVALID_PORT = 0x0,
-
- MEI_DDI_B = 1,
- MEI_DDI_C,
- MEI_DDI_D,
- MEI_DDI_E,
- MEI_DDI_F,
- MEI_DDI_A = 7,
- MEI_DDI_RANGE_END = MEI_DDI_A,
-};
#endif /* __MEI_HDCP_H__ */
diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
index c09f8bb49495..7cd67fb2365d 100644
--- a/drivers/misc/mei/hw-me-regs.h
+++ b/drivers/misc/mei/hw-me-regs.h
@@ -81,6 +81,7 @@
#define MEI_DEV_ID_CMP_LP 0x02e0 /* Comet Point LP */
#define MEI_DEV_ID_CMP_LP_3 0x02e4 /* Comet Point LP 3 (iTouch) */
+#define MEI_DEV_ID_CMP_V 0xA3BA /* Comet Point Lake V */
#define MEI_DEV_ID_ICP_LP 0x34E0 /* Ice Lake Point LP */
@@ -162,7 +163,8 @@ access to ME_CBD */
#define ME_IS_HRA 0x00000002
/* ME Interrupt Enable HRA - host read only access to ME_IE */
#define ME_IE_HRA 0x00000001
-
+/* TRC control shadow register */
+#define ME_TRC 0x00000030
/* H_HPG_CSR register bits */
#define H_HPG_CSR_PGIHEXR 0x00000001
diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c
index c4f6991d3028..668418d7ea77 100644
--- a/drivers/misc/mei/hw-me.c
+++ b/drivers/misc/mei/hw-me.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2003-2018, Intel Corporation. All rights reserved.
+ * Copyright (c) 2003-2019, Intel Corporation. All rights reserved.
* Intel Management Engine Interface (Intel MEI) Linux driver
*/
@@ -173,6 +173,27 @@ static inline void mei_me_d0i3c_write(struct mei_device *dev, u32 reg)
}
/**
+ * mei_me_trc_status - read trc status register
+ *
+ * @dev: mei device
+ * @trc: trc status register value
+ *
+ * Return: 0 on success, error otherwise
+ */
+static int mei_me_trc_status(struct mei_device *dev, u32 *trc)
+{
+ struct mei_me_hw *hw = to_me_hw(dev);
+
+ if (!hw->cfg->hw_trc_supported)
+ return -EOPNOTSUPP;
+
+ *trc = mei_me_reg_read(hw, ME_TRC);
+ trace_mei_reg_read(dev->dev, "ME_TRC", ME_TRC, *trc);
+
+ return 0;
+}
+
+/**
* mei_me_fw_status - read fw status register from pci config space
*
* @dev: mei device
@@ -183,20 +204,19 @@ static inline void mei_me_d0i3c_write(struct mei_device *dev, u32 reg)
static int mei_me_fw_status(struct mei_device *dev,
struct mei_fw_status *fw_status)
{
- struct pci_dev *pdev = to_pci_dev(dev->dev);
struct mei_me_hw *hw = to_me_hw(dev);
const struct mei_fw_status *fw_src = &hw->cfg->fw_status;
int ret;
int i;
- if (!fw_status)
+ if (!fw_status || !hw->read_fws)
return -EINVAL;
fw_status->count = fw_src->count;
for (i = 0; i < fw_src->count && i < MEI_FW_STATUS_MAX; i++) {
- ret = pci_read_config_dword(pdev, fw_src->status[i],
- &fw_status->status[i]);
- trace_mei_pci_cfg_read(dev->dev, "PCI_CFG_HSF_X",
+ ret = hw->read_fws(dev, fw_src->status[i],
+ &fw_status->status[i]);
+ trace_mei_pci_cfg_read(dev->dev, "PCI_CFG_HFS_X",
fw_src->status[i],
fw_status->status[i]);
if (ret)
@@ -210,19 +230,26 @@ static int mei_me_fw_status(struct mei_device *dev,
* mei_me_hw_config - configure hw dependent settings
*
* @dev: mei device
+ *
+ * Return:
+ * * -EINVAL when read_fws is not set
+ * * 0 on success
+ *
*/
-static void mei_me_hw_config(struct mei_device *dev)
+static int mei_me_hw_config(struct mei_device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev->dev);
struct mei_me_hw *hw = to_me_hw(dev);
u32 hcsr, reg;
+ if (WARN_ON(!hw->read_fws))
+ return -EINVAL;
+
/* Doesn't change in runtime */
hcsr = mei_hcsr_read(dev);
hw->hbuf_depth = (hcsr & H_CBD) >> 24;
reg = 0;
- pci_read_config_dword(pdev, PCI_CFG_HFS_1, &reg);
+ hw->read_fws(dev, PCI_CFG_HFS_1, &reg);
trace_mei_pci_cfg_read(dev->dev, "PCI_CFG_HFS_1", PCI_CFG_HFS_1, reg);
hw->d0i3_supported =
((reg & PCI_CFG_HFS_1_D0I3_MSK) == PCI_CFG_HFS_1_D0I3_MSK);
@@ -233,6 +260,8 @@ static void mei_me_hw_config(struct mei_device *dev)
if (reg & H_D0I3C_I3)
hw->pg_state = MEI_PG_ON;
}
+
+ return 0;
}
/**
@@ -269,7 +298,7 @@ static inline void me_intr_disable(struct mei_device *dev, u32 hcsr)
}
/**
- * mei_me_intr_clear - clear and stop interrupts
+ * me_intr_clear - clear and stop interrupts
*
* @dev: the device structure
* @hcsr: supplied hcsr register value
@@ -323,9 +352,9 @@ static void mei_me_intr_disable(struct mei_device *dev)
*/
static void mei_me_synchronize_irq(struct mei_device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev->dev);
+ struct mei_me_hw *hw = to_me_hw(dev);
- synchronize_irq(pdev->irq);
+ synchronize_irq(hw->irq);
}
/**
@@ -1294,6 +1323,7 @@ end:
static const struct mei_hw_ops mei_me_hw_ops = {
+ .trc_status = mei_me_trc_status,
.fw_status = mei_me_fw_status,
.pg_state = mei_me_pg_state,
@@ -1384,6 +1414,9 @@ static bool mei_me_fw_type_sps(struct pci_dev *pdev)
.dma_size[DMA_DSCR_DEVICE] = SZ_128K, \
.dma_size[DMA_DSCR_CTRL] = PAGE_SIZE
+#define MEI_CFG_TRC \
+ .hw_trc_supported = 1
+
/* ICH Legacy devices */
static const struct mei_cfg mei_me_ich_cfg = {
MEI_CFG_ICH_HFS,
@@ -1432,6 +1465,14 @@ static const struct mei_cfg mei_me_pch12_cfg = {
MEI_CFG_DMA_128,
};
+/* Tiger Lake and newer devices */
+static const struct mei_cfg mei_me_pch15_cfg = {
+ MEI_CFG_PCH8_HFS,
+ MEI_CFG_FW_VER_SUPP,
+ MEI_CFG_DMA_128,
+ MEI_CFG_TRC,
+};
+
/*
* mei_cfg_list - A list of platform platform specific configurations.
* Note: has to be synchronized with enum mei_cfg_idx.
@@ -1446,6 +1487,7 @@ static const struct mei_cfg *const mei_cfg_list[] = {
[MEI_ME_PCH8_CFG] = &mei_me_pch8_cfg,
[MEI_ME_PCH8_SPS_CFG] = &mei_me_pch8_sps_cfg,
[MEI_ME_PCH12_CFG] = &mei_me_pch12_cfg,
+ [MEI_ME_PCH15_CFG] = &mei_me_pch15_cfg,
};
const struct mei_cfg *mei_me_get_cfg(kernel_ulong_t idx)
@@ -1461,19 +1503,19 @@ const struct mei_cfg *mei_me_get_cfg(kernel_ulong_t idx)
/**
* mei_me_dev_init - allocates and initializes the mei device structure
*
- * @pdev: The pci device structure
+ * @parent: device associated with physical device (pci/platform)
* @cfg: per device generation config
*
* Return: The mei_device pointer on success, NULL on failure.
*/
-struct mei_device *mei_me_dev_init(struct pci_dev *pdev,
+struct mei_device *mei_me_dev_init(struct device *parent,
const struct mei_cfg *cfg)
{
struct mei_device *dev;
struct mei_me_hw *hw;
int i;
- dev = devm_kzalloc(&pdev->dev, sizeof(struct mei_device) +
+ dev = devm_kzalloc(parent, sizeof(struct mei_device) +
sizeof(struct mei_me_hw), GFP_KERNEL);
if (!dev)
return NULL;
@@ -1483,7 +1525,7 @@ struct mei_device *mei_me_dev_init(struct pci_dev *pdev,
for (i = 0; i < DMA_DSCR_NUM; i++)
dev->dr_dscr[i].size = cfg->dma_size[i];
- mei_device_init(dev, &pdev->dev, &mei_me_hw_ops);
+ mei_device_init(dev, parent, &mei_me_hw_ops);
hw->cfg = cfg;
dev->fw_f_fw_ver_supported = cfg->fw_ver_supported;
diff --git a/drivers/misc/mei/hw-me.h b/drivers/misc/mei/hw-me.h
index 1d8794828cbc..4a8d4dcd5a91 100644
--- a/drivers/misc/mei/hw-me.h
+++ b/drivers/misc/mei/hw-me.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Copyright (c) 2012-2018, Intel Corporation. All rights reserved.
+ * Copyright (c) 2012-2019, Intel Corporation. All rights reserved.
* Intel Management Engine Interface (Intel MEI) Linux driver
*/
@@ -21,12 +21,14 @@
* @quirk_probe: device exclusion quirk
* @dma_size: device DMA buffers size
* @fw_ver_supported: is fw version retrievable from FW
+ * @hw_trc_supported: does the hw support trc register
*/
struct mei_cfg {
const struct mei_fw_status fw_status;
bool (*quirk_probe)(struct pci_dev *pdev);
size_t dma_size[DMA_DSCR_NUM];
u32 fw_ver_supported:1;
+ u32 hw_trc_supported:1;
};
@@ -42,16 +44,20 @@ struct mei_cfg {
*
* @cfg: per device generation config and ops
* @mem_addr: io memory address
+ * @irq: irq number
* @pg_state: power gating state
* @d0i3_supported: di03 support
* @hbuf_depth: depth of hardware host/write buffer in slots
+ * @read_fws: read FW status register handler
*/
struct mei_me_hw {
const struct mei_cfg *cfg;
void __iomem *mem_addr;
+ int irq;
enum mei_pg_state pg_state;
bool d0i3_supported;
u8 hbuf_depth;
+ int (*read_fws)(const struct mei_device *dev, int where, u32 *val);
};
#define to_me_hw(dev) (struct mei_me_hw *)((dev)->hw)
@@ -74,6 +80,7 @@ struct mei_me_hw {
* servers platforms with quirk for
* SPS firmware exclusion.
* @MEI_ME_PCH12_CFG: Platform Controller Hub Gen12 and newer
+ * @MEI_ME_PCH15_CFG: Platform Controller Hub Gen15 and newer
* @MEI_ME_NUM_CFG: Upper Sentinel.
*/
enum mei_cfg_idx {
@@ -86,12 +93,13 @@ enum mei_cfg_idx {
MEI_ME_PCH8_CFG,
MEI_ME_PCH8_SPS_CFG,
MEI_ME_PCH12_CFG,
+ MEI_ME_PCH15_CFG,
MEI_ME_NUM_CFG,
};
const struct mei_cfg *mei_me_get_cfg(kernel_ulong_t idx);
-struct mei_device *mei_me_dev_init(struct pci_dev *pdev,
+struct mei_device *mei_me_dev_init(struct device *parent,
const struct mei_cfg *cfg);
int mei_me_pg_enter_sync(struct mei_device *dev);
diff --git a/drivers/misc/mei/hw-txe.c b/drivers/misc/mei/hw-txe.c
index 5e58656b8e19..785b260b3ae9 100644
--- a/drivers/misc/mei/hw-txe.c
+++ b/drivers/misc/mei/hw-txe.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2013-2014, Intel Corporation. All rights reserved.
+ * Copyright (c) 2013-2019, Intel Corporation. All rights reserved.
* Intel Management Engine Interface (Intel MEI) Linux driver
*/
@@ -660,14 +660,16 @@ static int mei_txe_fw_status(struct mei_device *dev,
}
/**
- * mei_txe_hw_config - configure hardware at the start of the devices
+ * mei_txe_hw_config - configure hardware at the start of the devices
*
* @dev: the device structure
*
* Configure hardware at the start of the device should be done only
* once at the device probe time
+ *
+ * Return: always 0
*/
-static void mei_txe_hw_config(struct mei_device *dev)
+static int mei_txe_hw_config(struct mei_device *dev)
{
struct mei_txe_hw *hw = to_txe_hw(dev);
@@ -677,6 +679,8 @@ static void mei_txe_hw_config(struct mei_device *dev)
dev_dbg(dev->dev, "aliveness_resp = 0x%08x, readiness = 0x%08x.\n",
hw->aliveness, hw->readiness);
+
+ return 0;
}
/**
diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c
index b9fef773e71b..bcee77768b91 100644
--- a/drivers/misc/mei/init.c
+++ b/drivers/misc/mei/init.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2012-2018, Intel Corporation. All rights reserved.
+ * Copyright (c) 2012-2019, Intel Corporation. All rights reserved.
* Intel Management Engine Interface (Intel MEI) Linux driver
*/
@@ -190,7 +190,9 @@ int mei_start(struct mei_device *dev)
/* acknowledge interrupt and stop interrupts */
mei_clear_interrupts(dev);
- mei_hw_config(dev);
+ ret = mei_hw_config(dev);
+ if (ret)
+ goto err;
dev_dbg(dev->dev, "reset in start the mei device.\n");
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
index 7310b476323c..f17297f2943d 100644
--- a/drivers/misc/mei/main.c
+++ b/drivers/misc/mei/main.c
@@ -533,24 +533,6 @@ out:
}
/**
- * mei_compat_ioctl - the compat IOCTL function
- *
- * @file: pointer to file structure
- * @cmd: ioctl command
- * @data: pointer to mei message structure
- *
- * Return: 0 on success , <0 on error
- */
-#ifdef CONFIG_COMPAT
-static long mei_compat_ioctl(struct file *file,
- unsigned int cmd, unsigned long data)
-{
- return mei_ioctl(file, cmd, (unsigned long)compat_ptr(data));
-}
-#endif
-
-
-/**
* mei_poll - the poll function
*
* @file: pointer to file structure
@@ -701,6 +683,29 @@ static int mei_fasync(int fd, struct file *file, int band)
}
/**
+ * trc_show - mei device trc attribute show method
+ *
+ * @device: device pointer
+ * @attr: attribute pointer
+ * @buf: char out buffer
+ *
+ * Return: number of the bytes printed into buf or error
+ */
+static ssize_t trc_show(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct mei_device *dev = dev_get_drvdata(device);
+ u32 trc;
+ int ret;
+
+ ret = mei_trc_status(dev, &trc);
+ if (ret)
+ return ret;
+ return sprintf(buf, "%08X\n", trc);
+}
+static DEVICE_ATTR_RO(trc);
+
+/**
* fw_status_show - mei device fw_status attribute show method
*
* @device: device pointer
@@ -887,6 +892,7 @@ static struct attribute *mei_attrs[] = {
&dev_attr_tx_queue_limit.attr,
&dev_attr_fw_ver.attr,
&dev_attr_dev_state.attr,
+ &dev_attr_trc.attr,
NULL
};
ATTRIBUTE_GROUPS(mei);
@@ -898,9 +904,7 @@ static const struct file_operations mei_fops = {
.owner = THIS_MODULE,
.read = mei_read,
.unlocked_ioctl = mei_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = mei_compat_ioctl,
-#endif
+ .compat_ioctl = compat_ptr_ioctl,
.open = mei_open,
.release = mei_release,
.write = mei_write,
diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h
index 0f2141178299..76f8ff5ff974 100644
--- a/drivers/misc/mei/mei_dev.h
+++ b/drivers/misc/mei/mei_dev.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Copyright (c) 2003-2018, Intel Corporation. All rights reserved.
+ * Copyright (c) 2003-2019, Intel Corporation. All rights reserved.
* Intel Management Engine Interface (Intel MEI) Linux driver
*/
@@ -260,6 +260,7 @@ struct mei_cl {
* @hw_config : configure hw
*
* @fw_status : get fw status registers
+ * @trc_status : get trc status register
* @pg_state : power gating state of the device
* @pg_in_transition : is device now in pg transition
* @pg_is_enabled : is power gating enabled
@@ -287,9 +288,11 @@ struct mei_hw_ops {
bool (*hw_is_ready)(struct mei_device *dev);
int (*hw_reset)(struct mei_device *dev, bool enable);
int (*hw_start)(struct mei_device *dev);
- void (*hw_config)(struct mei_device *dev);
+ int (*hw_config)(struct mei_device *dev);
int (*fw_status)(struct mei_device *dev, struct mei_fw_status *fw_sts);
+ int (*trc_status)(struct mei_device *dev, u32 *trc);
+
enum mei_pg_state (*pg_state)(struct mei_device *dev);
bool (*pg_in_transition)(struct mei_device *dev);
bool (*pg_is_enabled)(struct mei_device *dev);
@@ -614,9 +617,9 @@ void mei_irq_compl_handler(struct mei_device *dev, struct list_head *cmpl_list);
*/
-static inline void mei_hw_config(struct mei_device *dev)
+static inline int mei_hw_config(struct mei_device *dev)
{
- dev->ops->hw_config(dev);
+ return dev->ops->hw_config(dev);
}
static inline enum mei_pg_state mei_pg_state(struct mei_device *dev)
@@ -711,6 +714,13 @@ static inline int mei_count_full_read_slots(struct mei_device *dev)
return dev->ops->rdbuf_full_slots(dev);
}
+static inline int mei_trc_status(struct mei_device *dev, u32 *trc)
+{
+ if (dev->ops->trc_status)
+ return dev->ops->trc_status(dev, trc);
+ return -EOPNOTSUPP;
+}
+
static inline int mei_fw_status(struct mei_device *dev,
struct mei_fw_status *fw_status)
{
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index 3dca63eddaa0..c845b7e40f26 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -98,12 +98,13 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
{MEI_PCI_DEVICE(MEI_DEV_ID_CMP_LP, MEI_ME_PCH12_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_CMP_LP_3, MEI_ME_PCH8_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_CMP_V, MEI_ME_PCH12_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_ICP_LP, MEI_ME_PCH12_CFG)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_TGP_LP, MEI_ME_PCH12_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_TGP_LP, MEI_ME_PCH15_CFG)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_MCC, MEI_ME_PCH12_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_MCC, MEI_ME_PCH15_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_MCC_4, MEI_ME_PCH8_CFG)},
/* required last entry */
@@ -120,6 +121,13 @@ static inline void mei_me_set_pm_domain(struct mei_device *dev) {}
static inline void mei_me_unset_pm_domain(struct mei_device *dev) {}
#endif /* CONFIG_PM */
+static int mei_me_read_fws(const struct mei_device *dev, int where, u32 *val)
+{
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
+
+ return pci_read_config_dword(pdev, where, val);
+}
+
/**
* mei_me_quirk_probe - probe for devices that doesn't valid ME interface
*
@@ -191,13 +199,15 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
/* allocates and initializes the mei dev structure */
- dev = mei_me_dev_init(pdev, cfg);
+ dev = mei_me_dev_init(&pdev->dev, cfg);
if (!dev) {
err = -ENOMEM;
goto end;
}
hw = to_me_hw(dev);
hw->mem_addr = pcim_iomap_table(pdev)[0];
+ hw->irq = pdev->irq;
+ hw->read_fws = mei_me_read_fws;
pci_enable_msi(pdev);
diff --git a/drivers/misc/mic/Kconfig b/drivers/misc/mic/Kconfig
index 948f45bbf135..b6841ba6d922 100644
--- a/drivers/misc/mic/Kconfig
+++ b/drivers/misc/mic/Kconfig
@@ -1,8 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
menu "Intel MIC & related support"
-comment "Intel MIC Bus Driver"
-
config INTEL_MIC_BUS
tristate "Intel MIC Bus Driver"
depends on 64BIT && PCI && X86
@@ -18,8 +16,6 @@ config INTEL_MIC_BUS
OS and tools for MIC to use with this driver are available from
<http://software.intel.com/en-us/mic-developer>.
-comment "SCIF Bus Driver"
-
config SCIF_BUS
tristate "SCIF Bus Driver"
depends on 64BIT && PCI && X86
@@ -35,8 +31,6 @@ config SCIF_BUS
OS and tools for MIC to use with this driver are available from
<http://software.intel.com/en-us/mic-developer>.
-comment "VOP Bus Driver"
-
config VOP_BUS
tristate "VOP Bus Driver"
help
@@ -51,8 +45,6 @@ config VOP_BUS
OS and tools for MIC to use with this driver are available from
<http://software.intel.com/en-us/mic-developer>.
-comment "Intel MIC Host Driver"
-
config INTEL_MIC_HOST
tristate "Intel MIC Host Driver"
depends on 64BIT && PCI && X86
@@ -71,8 +63,6 @@ config INTEL_MIC_HOST
OS and tools for MIC to use with this driver are available from
<http://software.intel.com/en-us/mic-developer>.
-comment "Intel MIC Card Driver"
-
config INTEL_MIC_CARD
tristate "Intel MIC Card Driver"
depends on 64BIT && X86
@@ -90,8 +80,6 @@ config INTEL_MIC_CARD
For more information see
<http://software.intel.com/en-us/mic-developer>.
-comment "SCIF Driver"
-
config SCIF
tristate "SCIF Driver"
depends on 64BIT && PCI && X86 && SCIF_BUS && IOMMU_SUPPORT
@@ -110,8 +98,6 @@ config SCIF
OS and tools for MIC to use with this driver are available from
<http://software.intel.com/en-us/mic-developer>.
-comment "Intel MIC Coprocessor State Management (COSM) Drivers"
-
config MIC_COSM
tristate "Intel MIC Coprocessor State Management (COSM) Drivers"
depends on 64BIT && PCI && X86 && SCIF
@@ -128,8 +114,6 @@ config MIC_COSM
OS and tools for MIC to use with this driver are available from
<http://software.intel.com/en-us/mic-developer>.
-comment "VOP Driver"
-
config VOP
tristate "VOP Driver"
depends on VOP_BUS
diff --git a/drivers/misc/ocxl/ocxl_internal.h b/drivers/misc/ocxl/ocxl_internal.h
index 97415afd79f3..345bf843a38e 100644
--- a/drivers/misc/ocxl/ocxl_internal.h
+++ b/drivers/misc/ocxl/ocxl_internal.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
// Copyright 2017 IBM Corp.
#ifndef _OCXL_INTERNAL_H_
#define _OCXL_INTERNAL_H_
diff --git a/drivers/misc/ocxl/trace.h b/drivers/misc/ocxl/trace.h
index 024f417e7e01..17e21cb2addd 100644
--- a/drivers/misc/ocxl/trace.h
+++ b/drivers/misc/ocxl/trace.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
// Copyright 2017 IBM Corp.
#undef TRACE_SYSTEM
#define TRACE_SYSTEM ocxl
diff --git a/drivers/misc/pci_endpoint_test.c b/drivers/misc/pci_endpoint_test.c
index 6e208a060a58..a5e317073d95 100644
--- a/drivers/misc/pci_endpoint_test.c
+++ b/drivers/misc/pci_endpoint_test.c
@@ -94,7 +94,7 @@ enum pci_barno {
struct pci_endpoint_test {
struct pci_dev *pdev;
void __iomem *base;
- void __iomem *bar[6];
+ void __iomem *bar[PCI_STD_NUM_BARS];
struct completion irq_raised;
int last_irq;
int num_irqs;
@@ -687,7 +687,7 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev,
if (!pci_endpoint_test_request_irq(test))
goto err_disable_irq;
- for (bar = BAR_0; bar <= BAR_5; bar++) {
+ for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
base = pci_ioremap_bar(pdev, bar);
if (!base) {
@@ -740,7 +740,7 @@ err_ida_remove:
ida_simple_remove(&pci_endpoint_test_ida, id);
err_iounmap:
- for (bar = BAR_0; bar <= BAR_5; bar++) {
+ for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
if (test->bar[bar])
pci_iounmap(pdev, test->bar[bar]);
}
@@ -771,7 +771,7 @@ static void pci_endpoint_test_remove(struct pci_dev *pdev)
misc_deregister(&test->miscdev);
kfree(misc_device->name);
ida_simple_remove(&pci_endpoint_test_ida, id);
- for (bar = BAR_0; bar <= BAR_5; bar++) {
+ for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
if (test->bar[bar])
pci_iounmap(pdev, test->bar[bar]);
}
diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
index 3a8d76d1ccae..2817f4751306 100644
--- a/drivers/misc/sgi-gru/gruprocfs.c
+++ b/drivers/misc/sgi-gru/gruprocfs.c
@@ -119,7 +119,7 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
"cch_interrupt_sync", "cch_deallocate", "tfh_write_only",
"tfh_write_restart", "tgh_invalidate"};
- seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
+ seq_puts(s, "#id count aver-clks max-clks\n");
for (op = 0; op < mcsop_last; op++) {
count = atomic_long_read(&mcs_op_statistics[op].count);
total = atomic_long_read(&mcs_op_statistics[op].total);
@@ -165,8 +165,7 @@ static int cch_seq_show(struct seq_file *file, void *data)
const char *mode[] = { "??", "UPM", "INTR", "OS_POLL" };
if (gid == 0)
- seq_printf(file, "#%5s%5s%6s%7s%9s%6s%8s%8s\n", "gid", "bid",
- "ctx#", "asid", "pid", "cbrs", "dsbytes", "mode");
+ seq_puts(file, "# gid bid ctx# asid pid cbrs dsbytes mode\n");
if (gru)
for (i = 0; i < GRU_NUM_CCH; i++) {
ts = gru->gs_gts[i];
@@ -191,10 +190,8 @@ static int gru_seq_show(struct seq_file *file, void *data)
struct gru_state *gru = GID_TO_GRU(gid);
if (gid == 0) {
- seq_printf(file, "#%5s%5s%7s%6s%6s%8s%6s%6s\n", "gid", "nid",
- "ctx", "cbr", "dsr", "ctx", "cbr", "dsr");
- seq_printf(file, "#%5s%5s%7s%6s%6s%8s%6s%6s\n", "", "", "busy",
- "busy", "busy", "free", "free", "free");
+ seq_puts(file, "# gid nid ctx cbr dsr ctx cbr dsr\n");
+ seq_puts(file, "# busy busy busy free free free\n");
}
if (gru) {
ctxfree = GRU_NUM_CCH - gru->gs_active_contexts;
diff --git a/drivers/misc/sram.c b/drivers/misc/sram.c
index f30448bf3a63..6c1a23cb3e8c 100644
--- a/drivers/misc/sram.c
+++ b/drivers/misc/sram.c
@@ -340,8 +340,6 @@ static const struct of_device_id sram_dt_ids[] = {
static int sram_probe(struct platform_device *pdev)
{
struct sram_dev *sram;
- struct resource *res;
- size_t size;
int ret;
int (*init_func)(void);
@@ -351,25 +349,14 @@ static int sram_probe(struct platform_device *pdev)
sram->dev = &pdev->dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(sram->dev, "found no memory resource\n");
- return -EINVAL;
- }
-
- size = resource_size(res);
-
- if (!devm_request_mem_region(sram->dev, res->start, size, pdev->name)) {
- dev_err(sram->dev, "could not request region for resource\n");
- return -EBUSY;
- }
-
if (of_property_read_bool(pdev->dev.of_node, "no-memory-wc"))
- sram->virt_base = devm_ioremap(sram->dev, res->start, size);
+ sram->virt_base = devm_platform_ioremap_resource(pdev, 0);
else
- sram->virt_base = devm_ioremap_wc(sram->dev, res->start, size);
- if (!sram->virt_base)
- return -ENOMEM;
+ sram->virt_base = devm_platform_ioremap_resource_wc(pdev, 0);
+ if (IS_ERR(sram->virt_base)) {
+ dev_err(&pdev->dev, "could not map SRAM registers\n");
+ return PTR_ERR(sram->virt_base);
+ }
sram->pool = devm_gen_pool_create(sram->dev, ilog2(SRAM_GRANULARITY),
NUMA_NO_NODE, NULL);
@@ -382,7 +369,8 @@ static int sram_probe(struct platform_device *pdev)
else
clk_prepare_enable(sram->clk);
- ret = sram_reserve_regions(sram, res);
+ ret = sram_reserve_regions(sram,
+ platform_get_resource(pdev, IORESOURCE_MEM, 0));
if (ret)
goto err_disable_clk;
diff --git a/drivers/misc/ti-st/st_core.c b/drivers/misc/ti-st/st_core.c
index 7d9e23aa0b92..2ae9948a91e1 100644
--- a/drivers/misc/ti-st/st_core.c
+++ b/drivers/misc/ti-st/st_core.c
@@ -708,7 +708,6 @@ EXPORT_SYMBOL_GPL(st_unregister);
*/
static int st_tty_open(struct tty_struct *tty)
{
- int err = 0;
struct st_data_s *st_gdata;
pr_info("%s ", __func__);
@@ -731,7 +730,8 @@ static int st_tty_open(struct tty_struct *tty)
*/
st_kim_complete(st_gdata->kim_data);
pr_debug("done %s", __func__);
- return err;
+
+ return 0;
}
static void st_tty_close(struct tty_struct *tty)
diff --git a/drivers/misc/vmw_vmci/vmci_host.c b/drivers/misc/vmw_vmci/vmci_host.c
index ff3c396146ff..ce16d6b99295 100644
--- a/drivers/misc/vmw_vmci/vmci_host.c
+++ b/drivers/misc/vmw_vmci/vmci_host.c
@@ -968,7 +968,7 @@ static const struct file_operations vmuser_fops = {
.release = vmci_host_close,
.poll = vmci_host_poll,
.unlocked_ioctl = vmci_host_unlocked_ioctl,
- .compat_ioctl = vmci_host_unlocked_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
};
static struct miscdevice vmci_host_miscdev = {
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index 2c71a434c915..95b41c0891d0 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -408,38 +408,6 @@ static int mmc_blk_ioctl_copy_to_user(struct mmc_ioc_cmd __user *ic_ptr,
return 0;
}
-static int ioctl_rpmb_card_status_poll(struct mmc_card *card, u32 *status,
- u32 retries_max)
-{
- int err;
- u32 retry_count = 0;
-
- if (!status || !retries_max)
- return -EINVAL;
-
- do {
- err = __mmc_send_status(card, status, 5);
- if (err)
- break;
-
- if (!R1_STATUS(*status) &&
- (R1_CURRENT_STATE(*status) != R1_STATE_PRG))
- break; /* RPMB programming operation complete */
-
- /*
- * Rechedule to give the MMC device a chance to continue
- * processing the previous command without being polled too
- * frequently.
- */
- usleep_range(1000, 5000);
- } while (++retry_count < retries_max);
-
- if (retry_count == retries_max)
- err = -EPERM;
-
- return err;
-}
-
static int ioctl_do_sanitize(struct mmc_card *card)
{
int err;
@@ -468,6 +436,58 @@ out:
return err;
}
+static inline bool mmc_blk_in_tran_state(u32 status)
+{
+ /*
+ * Some cards mishandle the status bits, so make sure to check both the
+ * busy indication and the card state.
+ */
+ return status & R1_READY_FOR_DATA &&
+ (R1_CURRENT_STATE(status) == R1_STATE_TRAN);
+}
+
+static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms,
+ u32 *resp_errs)
+{
+ unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
+ int err = 0;
+ u32 status;
+
+ do {
+ bool done = time_after(jiffies, timeout);
+
+ err = __mmc_send_status(card, &status, 5);
+ if (err) {
+ dev_err(mmc_dev(card->host),
+ "error %d requesting status\n", err);
+ return err;
+ }
+
+ /* Accumulate any response error bits seen */
+ if (resp_errs)
+ *resp_errs |= status;
+
+ /*
+ * Timeout if the device never becomes ready for data and never
+ * leaves the program state.
+ */
+ if (done) {
+ dev_err(mmc_dev(card->host),
+ "Card stuck in wrong state! %s status: %#x\n",
+ __func__, status);
+ return -ETIMEDOUT;
+ }
+
+ /*
+ * Some cards mishandle the status bits,
+ * so make sure to check both the busy
+ * indication and the card state.
+ */
+ } while (!mmc_blk_in_tran_state(status));
+
+ return err;
+}
+
static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
struct mmc_blk_ioc_data *idata)
{
@@ -477,7 +497,6 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
struct scatterlist sg;
int err;
unsigned int target_part;
- u32 status = 0;
if (!card || !md || !idata)
return -EINVAL;
@@ -611,16 +630,12 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
memcpy(&(idata->ic.response), cmd.resp, sizeof(cmd.resp));
- if (idata->rpmb) {
+ if (idata->rpmb || (cmd.flags & MMC_RSP_R1B)) {
/*
- * Ensure RPMB command has completed by polling CMD13
+ * Ensure RPMB/R1B command has completed by polling CMD13
* "Send Status".
*/
- err = ioctl_rpmb_card_status_poll(card, &status, 5);
- if (err)
- dev_err(mmc_dev(card->host),
- "%s: Card Status=0x%08X, error %d\n",
- __func__, status, err);
+ err = card_busy_detect(card, MMC_BLK_TIMEOUT_MS, NULL);
}
return err;
@@ -970,58 +985,6 @@ static unsigned int mmc_blk_data_timeout_ms(struct mmc_host *host,
return ms;
}
-static inline bool mmc_blk_in_tran_state(u32 status)
-{
- /*
- * Some cards mishandle the status bits, so make sure to check both the
- * busy indication and the card state.
- */
- return status & R1_READY_FOR_DATA &&
- (R1_CURRENT_STATE(status) == R1_STATE_TRAN);
-}
-
-static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms,
- struct request *req, u32 *resp_errs)
-{
- unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
- int err = 0;
- u32 status;
-
- do {
- bool done = time_after(jiffies, timeout);
-
- err = __mmc_send_status(card, &status, 5);
- if (err) {
- pr_err("%s: error %d requesting status\n",
- req->rq_disk->disk_name, err);
- return err;
- }
-
- /* Accumulate any response error bits seen */
- if (resp_errs)
- *resp_errs |= status;
-
- /*
- * Timeout if the device never becomes ready for data and never
- * leaves the program state.
- */
- if (done) {
- pr_err("%s: Card stuck in wrong state! %s %s status: %#x\n",
- mmc_hostname(card->host),
- req->rq_disk->disk_name, __func__, status);
- return -ETIMEDOUT;
- }
-
- /*
- * Some cards mishandle the status bits,
- * so make sure to check both the busy
- * indication and the card state.
- */
- } while (!mmc_blk_in_tran_state(status));
-
- return err;
-}
-
static int mmc_blk_reset(struct mmc_blk_data *md, struct mmc_host *host,
int type)
{
@@ -1671,7 +1634,7 @@ static int mmc_blk_fix_state(struct mmc_card *card, struct request *req)
mmc_blk_send_stop(card, timeout);
- err = card_busy_detect(card, timeout, req, NULL);
+ err = card_busy_detect(card, timeout, NULL);
mmc_retune_release(card->host);
@@ -1895,7 +1858,7 @@ static int mmc_blk_card_busy(struct mmc_card *card, struct request *req)
if (mmc_host_is_spi(card->host) || rq_data_dir(req) == READ)
return 0;
- err = card_busy_detect(card, MMC_BLK_TIMEOUT_MS, req, &status);
+ err = card_busy_detect(card, MMC_BLK_TIMEOUT_MS, &status);
/*
* Do not assume data transferred correctly if there are any error bits
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 221127324709..abf8f5eb0a1c 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -1469,8 +1469,7 @@ void mmc_detach_bus(struct mmc_host *host)
mmc_bus_put(host);
}
-static void _mmc_detect_change(struct mmc_host *host, unsigned long delay,
- bool cd_irq)
+void _mmc_detect_change(struct mmc_host *host, unsigned long delay, bool cd_irq)
{
/*
* If the device is configured as wakeup, we prevent a new sleep for
@@ -2129,7 +2128,7 @@ int mmc_hw_reset(struct mmc_host *host)
ret = host->bus_ops->hw_reset(host);
mmc_bus_put(host);
- if (ret)
+ if (ret < 0)
pr_warn("%s: tried to HW reset card, got error %d\n",
mmc_hostname(host), ret);
@@ -2297,11 +2296,8 @@ void mmc_rescan(struct work_struct *work)
mmc_bus_get(host);
- /*
- * if there is a _removable_ card registered, check whether it is
- * still present
- */
- if (host->bus_ops && !host->bus_dead && mmc_card_is_removable(host))
+ /* Verify a registered card to be functional, else remove it. */
+ if (host->bus_ops && !host->bus_dead)
host->bus_ops->detect(host);
host->detect_change = 0;
diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h
index 328c78dbee66..575ac0257af2 100644
--- a/drivers/mmc/core/core.h
+++ b/drivers/mmc/core/core.h
@@ -70,6 +70,8 @@ void mmc_rescan(struct work_struct *work);
void mmc_start_host(struct mmc_host *host);
void mmc_stop_host(struct mmc_host *host);
+void _mmc_detect_change(struct mmc_host *host, unsigned long delay,
+ bool cd_irq);
int _mmc_detect_card_removed(struct mmc_host *host);
int mmc_detect_card_removed(struct mmc_host *host);
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index c8804895595f..f6912ded652d 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -297,7 +297,7 @@ static void mmc_manage_enhanced_area(struct mmc_card *card, u8 *ext_csd)
}
}
-static void mmc_part_add(struct mmc_card *card, unsigned int size,
+static void mmc_part_add(struct mmc_card *card, u64 size,
unsigned int part_cfg, char *name, int idx, bool ro,
int area_type)
{
@@ -313,7 +313,7 @@ static void mmc_manage_gp_partitions(struct mmc_card *card, u8 *ext_csd)
{
int idx;
u8 hc_erase_grp_sz, hc_wp_grp_sz;
- unsigned int part_size;
+ u64 part_size;
/*
* General purpose partition feature support --
@@ -343,8 +343,7 @@ static void mmc_manage_gp_partitions(struct mmc_card *card, u8 *ext_csd)
(ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 1]
<< 8) +
ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3];
- part_size *= (size_t)(hc_erase_grp_sz *
- hc_wp_grp_sz);
+ part_size *= (hc_erase_grp_sz * hc_wp_grp_sz);
mmc_part_add(card, part_size << 19,
EXT_CSD_PART_CONFIG_ACC_GP0 + idx,
"gp%d", idx, false,
@@ -362,7 +361,7 @@ static void mmc_manage_gp_partitions(struct mmc_card *card, u8 *ext_csd)
static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd)
{
int err = 0, idx;
- unsigned int part_size;
+ u64 part_size;
struct device_node *np;
bool broken_hpi = false;
diff --git a/drivers/mmc/core/quirks.h b/drivers/mmc/core/quirks.h
index 2d2d9ea8be4f..3dba15bccce2 100644
--- a/drivers/mmc/core/quirks.h
+++ b/drivers/mmc/core/quirks.h
@@ -119,7 +119,14 @@ static const struct mmc_fixup mmc_ext_csd_fixups[] = {
END_FIXUP
};
+
static const struct mmc_fixup sdio_fixup_methods[] = {
+ SDIO_FIXUP(SDIO_VENDOR_ID_TI_WL1251, SDIO_DEVICE_ID_TI_WL1251,
+ add_quirk, MMC_QUIRK_NONSTD_FUNC_IF),
+
+ SDIO_FIXUP(SDIO_VENDOR_ID_TI_WL1251, SDIO_DEVICE_ID_TI_WL1251,
+ add_quirk, MMC_QUIRK_DISABLE_CD),
+
SDIO_FIXUP(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271,
add_quirk, MMC_QUIRK_NONSTD_FUNC_IF),
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index 26cabd53ddc5..ebb387aa5158 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -1048,9 +1048,35 @@ static int mmc_sdio_runtime_resume(struct mmc_host *host)
return ret;
}
+/*
+ * SDIO HW reset
+ *
+ * Returns 0 if the HW reset was executed synchronously, returns 1 if the HW
+ * reset was asynchronously scheduled, else a negative error code.
+ */
static int mmc_sdio_hw_reset(struct mmc_host *host)
{
- mmc_power_cycle(host, host->card->ocr);
+ struct mmc_card *card = host->card;
+
+ /*
+ * In case the card is shared among multiple func drivers, reset the
+ * card through a rescan work. In this way it will be removed and
+ * re-detected, thus all func drivers becomes informed about it.
+ */
+ if (atomic_read(&card->sdio_funcs_probed) > 1) {
+ if (mmc_card_removed(card))
+ return 1;
+ host->rescan_entered = 0;
+ mmc_card_set_removed(card);
+ _mmc_detect_change(host, 0, false);
+ return 1;
+ }
+
+ /*
+ * A single func driver has been probed, then let's skip the heavy
+ * hotplug dance above and execute the reset immediately.
+ */
+ mmc_power_cycle(host, card->ocr);
return mmc_sdio_reinit_card(host);
}
diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c
index 2963e6542958..3cc928282af7 100644
--- a/drivers/mmc/core/sdio_bus.c
+++ b/drivers/mmc/core/sdio_bus.c
@@ -138,6 +138,8 @@ static int sdio_bus_probe(struct device *dev)
if (ret)
return ret;
+ atomic_inc(&func->card->sdio_funcs_probed);
+
/* Unbound SDIO functions are always suspended.
* During probe, the function is set active and the usage count
* is incremented. If the driver supports runtime PM,
@@ -153,7 +155,10 @@ static int sdio_bus_probe(struct device *dev)
/* Set the default block size so the driver is sure it's something
* sensible. */
sdio_claim_host(func);
- ret = sdio_set_block_size(func, 0);
+ if (mmc_card_removed(func->card))
+ ret = -ENOMEDIUM;
+ else
+ ret = sdio_set_block_size(func, 0);
sdio_release_host(func);
if (ret)
goto disable_runtimepm;
@@ -165,6 +170,7 @@ static int sdio_bus_probe(struct device *dev)
return 0;
disable_runtimepm:
+ atomic_dec(&func->card->sdio_funcs_probed);
if (func->card->host->caps & MMC_CAP_POWER_OFF_CARD)
pm_runtime_put_noidle(dev);
dev_pm_domain_detach(dev, false);
@@ -181,6 +187,7 @@ static int sdio_bus_remove(struct device *dev)
pm_runtime_get_sync(dev);
drv->remove(func);
+ atomic_dec(&func->card->sdio_funcs_probed);
if (func->irq_handler) {
pr_warn("WARNING: driver %s did not remove its interrupt handler!\n",
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 49ea02c467bf..d06b2dfe3c95 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -159,6 +159,7 @@ config MMC_SDHCI_OF_ASPEED
tristate "SDHCI OF support for the ASPEED SDHCI controller"
depends on MMC_SDHCI_PLTFM
depends on OF && OF_ADDRESS
+ select MMC_SDHCI_IO_ACCESSORS
help
This selects the ASPEED Secure Digital Host Controller Interface.
@@ -368,6 +369,17 @@ config MMC_SDHCI_F_SDH30
If unsure, say N.
+config MMC_SDHCI_MILBEAUT
+ tristate "SDHCI support for Socionext Milbeaut Serieas using F_SDH30"
+ depends on MMC_SDHCI_PLTFM
+ depends on OF
+ help
+ This selects the Secure Digital Host Controller Interface (SDHCI)
+ Needed by Milbeaut SoC for MMC / SD / SDIO support.
+ If you have a controller with this interface, say Y or M here.
+
+ If unsure, say N.
+
config MMC_SDHCI_IPROC
tristate "SDHCI support for the BCM2835 & iProc SD/MMC Controller"
depends on ARCH_BCM2835 || ARCH_BCM_IPROC || COMPILE_TEST
@@ -1011,6 +1023,7 @@ config MMC_SDHCI_AM654
tristate "Support for the SDHCI Controller in TI's AM654 SOCs"
depends on MMC_SDHCI_PLTFM && OF && REGMAP_MMIO
select MMC_SDHCI_IO_ACCESSORS
+ select MMC_CQHCI
help
This selects the Secure Digital Host Controller Interface (SDHCI)
support present in TI's AM654 SOCs. The controller supports
@@ -1019,3 +1032,11 @@ config MMC_SDHCI_AM654
If you have a controller with this interface, say Y or M here.
If unsure, say N.
+
+config MMC_OWL
+ tristate "Actions Semi Owl SD/MMC Host Controller support"
+ depends on HAS_DMA
+ depends on ARCH_ACTIONS || COMPILE_TEST
+ help
+ This selects support for the SD/MMC Host Controller on
+ Actions Semi Owl SoCs.
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index 11c4598e91d9..21d9089e5eda 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -21,6 +21,7 @@ obj-$(CONFIG_MMC_SDHCI_PXAV2) += sdhci-pxav2.o
obj-$(CONFIG_MMC_SDHCI_S3C) += sdhci-s3c.o
obj-$(CONFIG_MMC_SDHCI_SIRF) += sdhci-sirf.o
obj-$(CONFIG_MMC_SDHCI_F_SDH30) += sdhci_f_sdh30.o
+obj-$(CONFIG_MMC_SDHCI_MILBEAUT) += sdhci-milbeaut.o
obj-$(CONFIG_MMC_SDHCI_SPEAR) += sdhci-spear.o
obj-$(CONFIG_MMC_SDHCI_AM654) += sdhci_am654.o
obj-$(CONFIG_MMC_WBSD) += wbsd.o
@@ -73,6 +74,7 @@ obj-$(CONFIG_MMC_SUNXI) += sunxi-mmc.o
obj-$(CONFIG_MMC_USDHI6ROL0) += usdhi6rol0.o
obj-$(CONFIG_MMC_TOSHIBA_PCI) += toshsd.o
obj-$(CONFIG_MMC_BCM2835) += bcm2835.o
+obj-$(CONFIG_MMC_OWL) += owl-mmc.o
obj-$(CONFIG_MMC_REALTEK_PCI) += rtsx_pci_sdmmc.o
obj-$(CONFIG_MMC_REALTEK_USB) += rtsx_usb_sdmmc.o
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index c26fbe5f2222..6f065bb5c55a 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -583,11 +583,11 @@ static void atmci_init_debugfs(struct atmel_mci_slot *slot)
debugfs_create_file("regs", S_IRUSR, root, host, &atmci_regs_fops);
debugfs_create_file("req", S_IRUSR, root, slot, &atmci_req_fops);
- debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
- debugfs_create_x32("pending_events", S_IRUSR, root,
- (u32 *)&host->pending_events);
- debugfs_create_x32("completed_events", S_IRUSR, root,
- (u32 *)&host->completed_events);
+ debugfs_create_u32("state", S_IRUSR, root, &host->state);
+ debugfs_create_xul("pending_events", S_IRUSR, root,
+ &host->pending_events);
+ debugfs_create_xul("completed_events", S_IRUSR, root,
+ &host->completed_events);
}
#if defined(CONFIG_OF)
@@ -2347,8 +2347,7 @@ static void atmci_cleanup_slot(struct atmel_mci_slot *slot,
static int atmci_configure_dma(struct atmel_mci *host)
{
- host->dma.chan = dma_request_slave_channel_reason(&host->pdev->dev,
- "rxtx");
+ host->dma.chan = dma_request_chan(&host->pdev->dev, "rxtx");
if (PTR_ERR(host->dma.chan) == -ENODEV) {
struct mci_platform_data *pdata = host->pdev->dev.platform_data;
diff --git a/drivers/mmc/host/bcm2835.c b/drivers/mmc/host/bcm2835.c
index 148414d7f0c9..99f61fd2a658 100644
--- a/drivers/mmc/host/bcm2835.c
+++ b/drivers/mmc/host/bcm2835.c
@@ -1357,7 +1357,6 @@ static int bcm2835_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct clk *clk;
- struct resource *iomem;
struct bcm2835_host *host;
struct mmc_host *mmc;
const __be32 *regaddr_p;
@@ -1373,8 +1372,7 @@ static int bcm2835_probe(struct platform_device *pdev)
host->pdev = pdev;
spin_lock_init(&host->lock);
- iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- host->ioaddr = devm_ioremap_resource(dev, iomem);
+ host->ioaddr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(host->ioaddr)) {
ret = PTR_ERR(host->ioaddr);
goto err;
diff --git a/drivers/mmc/host/cavium-octeon.c b/drivers/mmc/host/cavium-octeon.c
index 22aded1065ae..916746c6c2c7 100644
--- a/drivers/mmc/host/cavium-octeon.c
+++ b/drivers/mmc/host/cavium-octeon.c
@@ -148,7 +148,6 @@ static int octeon_mmc_probe(struct platform_device *pdev)
{
struct device_node *cn, *node = pdev->dev.of_node;
struct cvm_mmc_host *host;
- struct resource *res;
void __iomem *base;
int mmc_irq[9];
int i, ret = 0;
@@ -205,23 +204,13 @@ static int octeon_mmc_probe(struct platform_device *pdev)
host->last_slot = -1;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "Platform resource[0] is missing\n");
- return -ENXIO;
- }
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
host->base = (void __iomem *)base;
host->reg_off = 0;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (!res) {
- dev_err(&pdev->dev, "Platform resource[1] is missing\n");
- return -EINVAL;
- }
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(base))
return PTR_ERR(base);
host->dma_base = (void __iomem *)base;
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 79c55c7b4afd..fc9d4d000f97 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -176,11 +176,11 @@ static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
debugfs_create_file("regs", S_IRUSR, root, host, &dw_mci_regs_fops);
debugfs_create_file("req", S_IRUSR, root, slot, &dw_mci_req_fops);
- debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
- debugfs_create_x32("pending_events", S_IRUSR, root,
- (u32 *)&host->pending_events);
- debugfs_create_x32("completed_events", S_IRUSR, root,
- (u32 *)&host->completed_events);
+ debugfs_create_u32("state", S_IRUSR, root, &host->state);
+ debugfs_create_xul("pending_events", S_IRUSR, root,
+ &host->pending_events);
+ debugfs_create_xul("completed_events", S_IRUSR, root,
+ &host->completed_events);
}
#endif /* defined(CONFIG_DEBUG_FS) */
@@ -3441,8 +3441,8 @@ int dw_mci_runtime_resume(struct device *dev)
* Restore the initial value at FIFOTH register
* And Invalidate the prev_blksz with zero
*/
- mci_writel(host, FIFOTH, host->fifoth_val);
- host->prev_blksz = 0;
+ mci_writel(host, FIFOTH, host->fifoth_val);
+ host->prev_blksz = 0;
/* Put in max timeout */
mci_writel(host, TMOUT, 0xFFFFFFFF);
diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c
index f816c06ef916..78383f60a3dc 100644
--- a/drivers/mmc/host/jz4740_mmc.c
+++ b/drivers/mmc/host/jz4740_mmc.c
@@ -41,6 +41,7 @@
#define JZ_REG_MMC_RESP_FIFO 0x34
#define JZ_REG_MMC_RXFIFO 0x38
#define JZ_REG_MMC_TXFIFO 0x3C
+#define JZ_REG_MMC_LPM 0x40
#define JZ_REG_MMC_DMAC 0x44
#define JZ_MMC_STRPCL_EXIT_MULTIPLE BIT(7)
@@ -77,6 +78,8 @@
#define JZ_MMC_CMDAT_IO_ABORT BIT(11)
#define JZ_MMC_CMDAT_BUS_WIDTH_4BIT BIT(10)
+#define JZ_MMC_CMDAT_BUS_WIDTH_8BIT (BIT(10) | BIT(9))
+#define JZ_MMC_CMDAT_BUS_WIDTH_MASK (BIT(10) | BIT(9))
#define JZ_MMC_CMDAT_DMA_EN BIT(8)
#define JZ_MMC_CMDAT_INIT BIT(7)
#define JZ_MMC_CMDAT_BUSY BIT(6)
@@ -98,12 +101,20 @@
#define JZ_MMC_DMAC_DMA_SEL BIT(1)
#define JZ_MMC_DMAC_DMA_EN BIT(0)
+#define JZ_MMC_LPM_DRV_RISING BIT(31)
+#define JZ_MMC_LPM_DRV_RISING_QTR_PHASE_DLY BIT(31)
+#define JZ_MMC_LPM_DRV_RISING_1NS_DLY BIT(30)
+#define JZ_MMC_LPM_SMP_RISING_QTR_OR_HALF_PHASE_DLY BIT(29)
+#define JZ_MMC_LPM_LOW_POWER_MODE_EN BIT(0)
+
#define JZ_MMC_CLK_RATE 24000000
enum jz4740_mmc_version {
JZ_MMC_JZ4740,
JZ_MMC_JZ4725B,
+ JZ_MMC_JZ4760,
JZ_MMC_JZ4780,
+ JZ_MMC_X1000,
};
enum jz4740_mmc_state {
@@ -852,6 +863,22 @@ static int jz4740_mmc_set_clock_rate(struct jz4740_mmc_host *host, int rate)
}
writew(div, host->base + JZ_REG_MMC_CLKRT);
+
+ if (real_rate > 25000000) {
+ if (host->version >= JZ_MMC_X1000) {
+ writel(JZ_MMC_LPM_DRV_RISING_QTR_PHASE_DLY |
+ JZ_MMC_LPM_SMP_RISING_QTR_OR_HALF_PHASE_DLY |
+ JZ_MMC_LPM_LOW_POWER_MODE_EN,
+ host->base + JZ_REG_MMC_LPM);
+ } else if (host->version >= JZ_MMC_JZ4760) {
+ writel(JZ_MMC_LPM_DRV_RISING |
+ JZ_MMC_LPM_LOW_POWER_MODE_EN,
+ host->base + JZ_REG_MMC_LPM);
+ } else if (host->version >= JZ_MMC_JZ4725B)
+ writel(JZ_MMC_LPM_LOW_POWER_MODE_EN,
+ host->base + JZ_REG_MMC_LPM);
+ }
+
return real_rate;
}
@@ -895,11 +922,16 @@ static void jz4740_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
switch (ios->bus_width) {
case MMC_BUS_WIDTH_1:
- host->cmdat &= ~JZ_MMC_CMDAT_BUS_WIDTH_4BIT;
+ host->cmdat &= ~JZ_MMC_CMDAT_BUS_WIDTH_MASK;
break;
case MMC_BUS_WIDTH_4:
+ host->cmdat &= ~JZ_MMC_CMDAT_BUS_WIDTH_MASK;
host->cmdat |= JZ_MMC_CMDAT_BUS_WIDTH_4BIT;
break;
+ case MMC_BUS_WIDTH_8:
+ host->cmdat &= ~JZ_MMC_CMDAT_BUS_WIDTH_MASK;
+ host->cmdat |= JZ_MMC_CMDAT_BUS_WIDTH_8BIT;
+ break;
default:
break;
}
@@ -924,7 +956,9 @@ static const struct mmc_host_ops jz4740_mmc_ops = {
static const struct of_device_id jz4740_mmc_of_match[] = {
{ .compatible = "ingenic,jz4740-mmc", .data = (void *) JZ_MMC_JZ4740 },
{ .compatible = "ingenic,jz4725b-mmc", .data = (void *)JZ_MMC_JZ4725B },
+ { .compatible = "ingenic,jz4760-mmc", .data = (void *) JZ_MMC_JZ4760 },
{ .compatible = "ingenic,jz4780-mmc", .data = (void *) JZ_MMC_JZ4780 },
+ { .compatible = "ingenic,x1000-mmc", .data = (void *) JZ_MMC_X1000 },
{},
};
MODULE_DEVICE_TABLE(of, jz4740_mmc_of_match);
@@ -1025,11 +1059,12 @@ static int jz4740_mmc_probe(struct platform_device* pdev)
dev_err(&pdev->dev, "Failed to add mmc host: %d\n", ret);
goto err_release_dma;
}
- dev_info(&pdev->dev, "JZ SD/MMC card driver registered\n");
+ dev_info(&pdev->dev, "Ingenic SD/MMC card driver registered\n");
dev_info(&pdev->dev, "Using %s, %d-bit mode\n",
host->use_dma ? "DMA" : "PIO",
- (mmc->caps & MMC_CAP_4_BIT_DATA) ? 4 : 1);
+ (mmc->caps & MMC_CAP_8_BIT_DATA) ? 8 :
+ ((mmc->caps & MMC_CAP_4_BIT_DATA) ? 4 : 1));
return 0;
diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
index 66e354d51ee9..74c6cfbf9172 100644
--- a/drivers/mmc/host/mmc_spi.c
+++ b/drivers/mmc/host/mmc_spi.c
@@ -1421,7 +1421,7 @@ static int mmc_spi_probe(struct spi_device *spi)
* Index 0 is card detect
* Old boardfiles were specifying 1 ms as debounce
*/
- status = mmc_gpiod_request_cd(mmc, NULL, 0, false, 1, NULL);
+ status = mmc_gpiod_request_cd(mmc, NULL, 0, false, 1000, NULL);
if (status == -EPROBE_DEFER)
goto fail_add_host;
if (!status) {
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index c37e70dbe250..40e72c30ea84 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -44,6 +44,7 @@
#define DRIVER_NAME "mmci-pl18x"
static void mmci_variant_init(struct mmci_host *host);
+static void ux500_variant_init(struct mmci_host *host);
static void ux500v2_variant_init(struct mmci_host *host);
static unsigned int fmax = 515633;
@@ -184,7 +185,7 @@ static struct variant_data variant_ux500 = {
.irq_pio_mask = MCI_IRQ_PIO_MASK,
.start_err = MCI_STARTBITERR,
.opendrain = MCI_OD,
- .init = mmci_variant_init,
+ .init = ux500_variant_init,
};
static struct variant_data variant_ux500v2 = {
@@ -261,6 +262,10 @@ static struct variant_data variant_stm32_sdmmc = {
.datalength_bits = 25,
.datactrl_blocksz = 14,
.stm32_idmabsize_mask = GENMASK(12, 5),
+ .busy_timeout = true,
+ .busy_detect = true,
+ .busy_detect_flag = MCI_STM32_BUSYD0,
+ .busy_detect_mask = MCI_STM32_BUSYD0ENDMASK,
.init = sdmmc_variant_init,
};
@@ -419,7 +424,7 @@ static void mmci_set_clkreg(struct mmci_host *host, unsigned int desired)
mmci_write_clkreg(host, clk);
}
-void mmci_dma_release(struct mmci_host *host)
+static void mmci_dma_release(struct mmci_host *host)
{
if (host->ops && host->ops->dma_release)
host->ops->dma_release(host);
@@ -427,7 +432,7 @@ void mmci_dma_release(struct mmci_host *host)
host->use_dma = false;
}
-void mmci_dma_setup(struct mmci_host *host)
+static void mmci_dma_setup(struct mmci_host *host)
{
if (!host->ops || !host->ops->dma_setup)
return;
@@ -462,7 +467,7 @@ static int mmci_validate_data(struct mmci_host *host,
return 0;
}
-int mmci_prep_data(struct mmci_host *host, struct mmc_data *data, bool next)
+static int mmci_prep_data(struct mmci_host *host, struct mmc_data *data, bool next)
{
int err;
@@ -478,7 +483,7 @@ int mmci_prep_data(struct mmci_host *host, struct mmc_data *data, bool next)
return err;
}
-void mmci_unprep_data(struct mmci_host *host, struct mmc_data *data,
+static void mmci_unprep_data(struct mmci_host *host, struct mmc_data *data,
int err)
{
if (host->ops && host->ops->unprep_data)
@@ -487,7 +492,7 @@ void mmci_unprep_data(struct mmci_host *host, struct mmc_data *data,
data->host_cookie = 0;
}
-void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data)
+static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data)
{
WARN_ON(data->host_cookie && data->host_cookie != host->next_cookie);
@@ -495,7 +500,7 @@ void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data)
host->ops->get_next_data(host, data);
}
-int mmci_dma_start(struct mmci_host *host, unsigned int datactrl)
+static int mmci_dma_start(struct mmci_host *host, unsigned int datactrl)
{
struct mmc_data *data = host->data;
int ret;
@@ -530,7 +535,7 @@ int mmci_dma_start(struct mmci_host *host, unsigned int datactrl)
return 0;
}
-void mmci_dma_finalize(struct mmci_host *host, struct mmc_data *data)
+static void mmci_dma_finalize(struct mmci_host *host, struct mmc_data *data)
{
if (!host->use_dma)
return;
@@ -539,7 +544,7 @@ void mmci_dma_finalize(struct mmci_host *host, struct mmc_data *data)
host->ops->dma_finalize(host, data);
}
-void mmci_dma_error(struct mmci_host *host)
+static void mmci_dma_error(struct mmci_host *host)
{
if (!host->use_dma)
return;
@@ -610,6 +615,67 @@ static u32 ux500v2_get_dctrl_cfg(struct mmci_host *host)
return MCI_DPSM_ENABLE | (host->data->blksz << 16);
}
+static bool ux500_busy_complete(struct mmci_host *host, u32 status, u32 err_msk)
+{
+ void __iomem *base = host->base;
+
+ /*
+ * Before unmasking for the busy end IRQ, confirm that the
+ * command was sent successfully. To keep track of having a
+ * command in-progress, waiting for busy signaling to end,
+ * store the status in host->busy_status.
+ *
+ * Note that, the card may need a couple of clock cycles before
+ * it starts signaling busy on DAT0, hence re-read the
+ * MMCISTATUS register here, to allow the busy bit to be set.
+ * Potentially we may even need to poll the register for a
+ * while, to allow it to be set, but tests indicates that it
+ * isn't needed.
+ */
+ if (!host->busy_status && !(status & err_msk) &&
+ (readl(base + MMCISTATUS) & host->variant->busy_detect_flag)) {
+ writel(readl(base + MMCIMASK0) |
+ host->variant->busy_detect_mask,
+ base + MMCIMASK0);
+
+ host->busy_status = status & (MCI_CMDSENT | MCI_CMDRESPEND);
+ return false;
+ }
+
+ /*
+ * If there is a command in-progress that has been successfully
+ * sent, then bail out if busy status is set and wait for the
+ * busy end IRQ.
+ *
+ * Note that, the HW triggers an IRQ on both edges while
+ * monitoring DAT0 for busy completion, but there is only one
+ * status bit in MMCISTATUS for the busy state. Therefore
+ * both the start and the end interrupts needs to be cleared,
+ * one after the other. So, clear the busy start IRQ here.
+ */
+ if (host->busy_status &&
+ (status & host->variant->busy_detect_flag)) {
+ writel(host->variant->busy_detect_mask, base + MMCICLEAR);
+ return false;
+ }
+
+ /*
+ * If there is a command in-progress that has been successfully
+ * sent and the busy bit isn't set, it means we have received
+ * the busy end IRQ. Clear and mask the IRQ, then continue to
+ * process the command.
+ */
+ if (host->busy_status) {
+ writel(host->variant->busy_detect_mask, base + MMCICLEAR);
+
+ writel(readl(base + MMCIMASK0) &
+ ~host->variant->busy_detect_mask, base + MMCIMASK0);
+ host->busy_status = 0;
+ }
+
+ return true;
+}
+
/*
* All the DMA operation mode stuff goes inside this ifdef.
* This assumes that you have a generic DMA device interface,
@@ -948,14 +1014,21 @@ static struct mmci_host_ops mmci_variant_ops = {
};
#endif
-void mmci_variant_init(struct mmci_host *host)
+static void mmci_variant_init(struct mmci_host *host)
+{
+ host->ops = &mmci_variant_ops;
+}
+
+static void ux500_variant_init(struct mmci_host *host)
{
host->ops = &mmci_variant_ops;
+ host->ops->busy_complete = ux500_busy_complete;
}
-void ux500v2_variant_init(struct mmci_host *host)
+static void ux500v2_variant_init(struct mmci_host *host)
{
host->ops = &mmci_variant_ops;
+ host->ops->busy_complete = ux500_busy_complete;
host->ops->get_datactrl_cfg = ux500v2_get_dctrl_cfg;
}
@@ -1075,6 +1148,7 @@ static void
mmci_start_command(struct mmci_host *host, struct mmc_command *cmd, u32 c)
{
void __iomem *base = host->base;
+ unsigned long long clks;
dev_dbg(mmc_dev(host->mmc), "op %02x arg %08x flags %08x\n",
cmd->opcode, cmd->arg, cmd->flags);
@@ -1097,6 +1171,16 @@ mmci_start_command(struct mmci_host *host, struct mmc_command *cmd, u32 c)
else
c |= host->variant->cmdreg_srsp;
}
+
+ if (host->variant->busy_timeout && cmd->flags & MMC_RSP_BUSY) {
+ if (!cmd->busy_timeout)
+ cmd->busy_timeout = 10 * MSEC_PER_SEC;
+
+ clks = (unsigned long long)cmd->busy_timeout * host->cclk;
+ do_div(clks, MSEC_PER_SEC);
+ writel_relaxed(clks, host->base + MMCIDATATIMER);
+ }
+
if (/*interrupt*/0)
c |= MCI_CPSM_INTERRUPT;
@@ -1201,6 +1285,7 @@ static void
mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
unsigned int status)
{
+ u32 err_msk = MCI_CMDCRCFAIL | MCI_CMDTIMEOUT;
void __iomem *base = host->base;
bool sbc, busy_resp;
@@ -1215,74 +1300,17 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
* handling. Note that we tag on any latent IRQs postponed
* due to waiting for busy status.
*/
- if (!((status|host->busy_status) &
- (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT|MCI_CMDSENT|MCI_CMDRESPEND)))
+ if (host->variant->busy_timeout && busy_resp)
+ err_msk |= MCI_DATATIMEOUT;
+
+ if (!((status | host->busy_status) &
+ (err_msk | MCI_CMDSENT | MCI_CMDRESPEND)))
return;
/* Handle busy detection on DAT0 if the variant supports it. */
- if (busy_resp && host->variant->busy_detect) {
-
- /*
- * Before unmasking for the busy end IRQ, confirm that the
- * command was sent successfully. To keep track of having a
- * command in-progress, waiting for busy signaling to end,
- * store the status in host->busy_status.
- *
- * Note that, the card may need a couple of clock cycles before
- * it starts signaling busy on DAT0, hence re-read the
- * MMCISTATUS register here, to allow the busy bit to be set.
- * Potentially we may even need to poll the register for a
- * while, to allow it to be set, but tests indicates that it
- * isn't needed.
- */
- if (!host->busy_status &&
- !(status & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT)) &&
- (readl(base + MMCISTATUS) & host->variant->busy_detect_flag)) {
-
- writel(readl(base + MMCIMASK0) |
- host->variant->busy_detect_mask,
- base + MMCIMASK0);
-
- host->busy_status =
- status & (MCI_CMDSENT|MCI_CMDRESPEND);
- return;
- }
-
- /*
- * If there is a command in-progress that has been successfully
- * sent, then bail out if busy status is set and wait for the
- * busy end IRQ.
- *
- * Note that, the HW triggers an IRQ on both edges while
- * monitoring DAT0 for busy completion, but there is only one
- * status bit in MMCISTATUS for the busy state. Therefore
- * both the start and the end interrupts needs to be cleared,
- * one after the other. So, clear the busy start IRQ here.
- */
- if (host->busy_status &&
- (status & host->variant->busy_detect_flag)) {
- writel(host->variant->busy_detect_mask,
- host->base + MMCICLEAR);
+ if (busy_resp && host->variant->busy_detect)
+ if (!host->ops->busy_complete(host, status, err_msk))
return;
- }
-
- /*
- * If there is a command in-progress that has been successfully
- * sent and the busy bit isn't set, it means we have received
- * the busy end IRQ. Clear and mask the IRQ, then continue to
- * process the command.
- */
- if (host->busy_status) {
-
- writel(host->variant->busy_detect_mask,
- host->base + MMCICLEAR);
-
- writel(readl(base + MMCIMASK0) &
- ~host->variant->busy_detect_mask,
- base + MMCIMASK0);
- host->busy_status = 0;
- }
- }
host->cmd = NULL;
@@ -1290,6 +1318,9 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
cmd->error = -ETIMEDOUT;
} else if (status & MCI_CMDCRCFAIL && cmd->flags & MMC_RSP_CRC) {
cmd->error = -EILSEQ;
+ } else if (host->variant->busy_timeout && busy_resp &&
+ status & MCI_DATATIMEOUT) {
+ cmd->error = -ETIMEDOUT;
} else {
cmd->resp[0] = readl(base + MMCIRESPONSE0);
cmd->resp[1] = readl(base + MMCIRESPONSE1);
@@ -1583,6 +1614,20 @@ static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
spin_unlock_irqrestore(&host->lock, flags);
}
+static void mmci_set_max_busy_timeout(struct mmc_host *mmc)
+{
+ struct mmci_host *host = mmc_priv(mmc);
+ u32 max_busy_timeout = 0;
+
+ if (!host->variant->busy_detect)
+ return;
+
+ if (host->variant->busy_timeout && mmc->actual_clock)
+ max_busy_timeout = ~0UL / (mmc->actual_clock / MSEC_PER_SEC);
+
+ mmc->max_busy_timeout = max_busy_timeout;
+}
+
static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
{
struct mmci_host *host = mmc_priv(mmc);
@@ -1687,6 +1732,8 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
else
mmci_set_clkreg(host, ios->clock);
+ mmci_set_max_busy_timeout(mmc);
+
if (host->ops && host->ops->set_pwrreg)
host->ops->set_pwrreg(host, pwr);
else
@@ -1957,7 +2004,6 @@ static int mmci_probe(struct amba_device *dev,
mmci_write_datactrlreg(host,
host->variant->busy_dpsm_flag);
mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
- mmc->max_busy_timeout = 0;
}
/* Prepare a CMD12 - needed to clear the DPSM on some variants. */
diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h
index 833236ecb31e..158e1231aa23 100644
--- a/drivers/mmc/host/mmci.h
+++ b/drivers/mmc/host/mmci.h
@@ -164,6 +164,7 @@
#define MCI_ST_CARDBUSY (1 << 24)
/* Extended status bits for the STM32 variants */
#define MCI_STM32_BUSYD0 BIT(20)
+#define MCI_STM32_BUSYD0END BIT(21)
#define MMCICLEAR 0x038
#define MCI_CMDCRCFAILCLR (1 << 0)
@@ -287,6 +288,8 @@ struct mmci_host;
* @signal_direction: input/out direction of bus signals can be indicated
* @pwrreg_clkgate: MMCIPOWER register must be used to gate the clock
* @busy_detect: true if the variant supports busy detection on DAT0.
+ * @busy_timeout: true if the variant starts data timer when the DPSM
+ * enter in Wait_R or Busy state.
* @busy_dpsm_flag: bitmask enabling busy detection in the DPSM
* @busy_detect_flag: bitmask identifying the bit in the MMCISTATUS register
* indicating that the card is busy
@@ -333,6 +336,7 @@ struct variant_data {
u8 signal_direction:1;
u8 pwrreg_clkgate:1;
u8 busy_detect:1;
+ u8 busy_timeout:1;
u32 busy_dpsm_flag;
u32 busy_detect_flag;
u32 busy_detect_mask;
@@ -366,6 +370,7 @@ struct mmci_host_ops {
void (*dma_error)(struct mmci_host *host);
void (*set_clkreg)(struct mmci_host *host, unsigned int desired);
void (*set_pwrreg)(struct mmci_host *host, unsigned int pwr);
+ bool (*busy_complete)(struct mmci_host *host, u32 status, u32 err_msk);
};
struct mmci_host {
diff --git a/drivers/mmc/host/mmci_stm32_sdmmc.c b/drivers/mmc/host/mmci_stm32_sdmmc.c
index 8e83ae6920ae..a4f7e8e689d3 100644
--- a/drivers/mmc/host/mmci_stm32_sdmmc.c
+++ b/drivers/mmc/host/mmci_stm32_sdmmc.c
@@ -25,8 +25,8 @@ struct sdmmc_priv {
void *sg_cpu;
};
-int sdmmc_idma_validate_data(struct mmci_host *host,
- struct mmc_data *data)
+static int sdmmc_idma_validate_data(struct mmci_host *host,
+ struct mmc_data *data)
{
struct scatterlist *sg;
int i;
@@ -282,6 +282,47 @@ static u32 sdmmc_get_dctrl_cfg(struct mmci_host *host)
return datactrl;
}
+static bool sdmmc_busy_complete(struct mmci_host *host, u32 status, u32 err_msk)
+{
+ void __iomem *base = host->base;
+ u32 busy_d0, busy_d0end, mask, sdmmc_status;
+
+ mask = readl_relaxed(base + MMCIMASK0);
+ sdmmc_status = readl_relaxed(base + MMCISTATUS);
+ busy_d0end = sdmmc_status & MCI_STM32_BUSYD0END;
+ busy_d0 = sdmmc_status & MCI_STM32_BUSYD0;
+
+ /* complete if there is an error or busy_d0end */
+ if ((status & err_msk) || busy_d0end)
+ goto complete;
+
+ /*
+ * On response the busy signaling is reflected in the BUSYD0 flag.
+ * if busy_d0 is in-progress we must activate busyd0end interrupt
+ * to wait this completion. Else this request has no busy step.
+ */
+ if (busy_d0) {
+ if (!host->busy_status) {
+ writel_relaxed(mask | host->variant->busy_detect_mask,
+ base + MMCIMASK0);
+ host->busy_status = status &
+ (MCI_CMDSENT | MCI_CMDRESPEND);
+ }
+ return false;
+ }
+
+complete:
+ if (host->busy_status) {
+ writel_relaxed(mask & ~host->variant->busy_detect_mask,
+ base + MMCIMASK0);
+ writel_relaxed(host->variant->busy_detect_mask,
+ base + MMCICLEAR);
+ host->busy_status = 0;
+ }
+
+ return true;
+}
+
static struct mmci_host_ops sdmmc_variant_ops = {
.validate_data = sdmmc_idma_validate_data,
.prep_data = sdmmc_idma_prep_data,
@@ -292,6 +333,7 @@ static struct mmci_host_ops sdmmc_variant_ops = {
.dma_finalize = sdmmc_idma_finalize,
.set_clkreg = mmci_sdmmc_set_clkreg,
.set_pwrreg = mmci_sdmmc_set_pwrreg,
+ .busy_complete = sdmmc_busy_complete,
};
void sdmmc_variant_init(struct mmci_host *host)
diff --git a/drivers/mmc/host/moxart-mmc.c b/drivers/mmc/host/moxart-mmc.c
index a0670e9cd012..fc6b9cf27d0b 100644
--- a/drivers/mmc/host/moxart-mmc.c
+++ b/drivers/mmc/host/moxart-mmc.c
@@ -608,8 +608,8 @@ static int moxart_probe(struct platform_device *pdev)
host->timeout = msecs_to_jiffies(1000);
host->sysclk = clk_get_rate(clk);
host->fifo_width = readl(host->base + REG_FEATURE) << 2;
- host->dma_chan_tx = dma_request_slave_channel_reason(dev, "tx");
- host->dma_chan_rx = dma_request_slave_channel_reason(dev, "rx");
+ host->dma_chan_tx = dma_request_chan(dev, "tx");
+ host->dma_chan_rx = dma_request_chan(dev, "rx");
spin_lock_init(&host->lock);
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 952fa4063ff8..767e964ca5a2 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -1510,8 +1510,35 @@ static void omap_hsmmc_init_card(struct mmc_host *mmc, struct mmc_card *card)
{
struct omap_hsmmc_host *host = mmc_priv(mmc);
- if (mmc_pdata(host)->init_card)
- mmc_pdata(host)->init_card(card);
+ if (card->type == MMC_TYPE_SDIO || card->type == MMC_TYPE_SD_COMBO) {
+ struct device_node *np = mmc_dev(mmc)->of_node;
+
+ /*
+ * REVISIT: should be moved to sdio core and made more
+ * general e.g. by expanding the DT bindings of child nodes
+ * to provide a mechanism to provide this information:
+ * Documentation/devicetree/bindings/mmc/mmc-card.txt
+ */
+
+ np = of_get_compatible_child(np, "ti,wl1251");
+ if (np) {
+ /*
+ * We have TI wl1251 attached to MMC3. Pass this
+ * information to the SDIO core because it can't be
+ * probed by normal methods.
+ */
+
+ dev_info(host->dev, "found wl1251\n");
+ card->quirks |= MMC_QUIRK_NONSTD_SDIO;
+ card->cccr.wide_bus = 1;
+ card->cis.vendor = 0x104c;
+ card->cis.device = 0x9066;
+ card->cis.blksize = 512;
+ card->cis.max_dtr = 24000000;
+ card->ocr = 0x80;
+ of_node_put(np);
+ }
+ }
}
static void omap_hsmmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
diff --git a/drivers/mmc/host/owl-mmc.c b/drivers/mmc/host/owl-mmc.c
new file mode 100644
index 000000000000..771e3d00f1bb
--- /dev/null
+++ b/drivers/mmc/host/owl-mmc.c
@@ -0,0 +1,696 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Actions Semi Owl SoCs SD/MMC driver
+ *
+ * Copyright (c) 2014 Actions Semi Inc.
+ * Copyright (c) 2019 Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+ *
+ * TODO: SDIO support
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-direction.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/slot-gpio.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/reset.h>
+#include <linux/spinlock.h>
+
+/*
+ * SDC registers
+ */
+#define OWL_REG_SD_EN 0x0000
+#define OWL_REG_SD_CTL 0x0004
+#define OWL_REG_SD_STATE 0x0008
+#define OWL_REG_SD_CMD 0x000c
+#define OWL_REG_SD_ARG 0x0010
+#define OWL_REG_SD_RSPBUF0 0x0014
+#define OWL_REG_SD_RSPBUF1 0x0018
+#define OWL_REG_SD_RSPBUF2 0x001c
+#define OWL_REG_SD_RSPBUF3 0x0020
+#define OWL_REG_SD_RSPBUF4 0x0024
+#define OWL_REG_SD_DAT 0x0028
+#define OWL_REG_SD_BLK_SIZE 0x002c
+#define OWL_REG_SD_BLK_NUM 0x0030
+#define OWL_REG_SD_BUF_SIZE 0x0034
+
+/* SD_EN Bits */
+#define OWL_SD_EN_RANE BIT(31)
+#define OWL_SD_EN_RAN_SEED(x) (((x) & 0x3f) << 24)
+#define OWL_SD_EN_S18EN BIT(12)
+#define OWL_SD_EN_RESE BIT(10)
+#define OWL_SD_EN_DAT1_S BIT(9)
+#define OWL_SD_EN_CLK_S BIT(8)
+#define OWL_SD_ENABLE BIT(7)
+#define OWL_SD_EN_BSEL BIT(6)
+#define OWL_SD_EN_SDIOEN BIT(3)
+#define OWL_SD_EN_DDREN BIT(2)
+#define OWL_SD_EN_DATAWID(x) (((x) & 0x3) << 0)
+
+/* SD_CTL Bits */
+#define OWL_SD_CTL_TOUTEN BIT(31)
+#define OWL_SD_CTL_TOUTCNT(x) (((x) & 0x7f) << 24)
+#define OWL_SD_CTL_DELAY_MSK GENMASK(23, 16)
+#define OWL_SD_CTL_RDELAY(x) (((x) & 0xf) << 20)
+#define OWL_SD_CTL_WDELAY(x) (((x) & 0xf) << 16)
+#define OWL_SD_CTL_CMDLEN BIT(13)
+#define OWL_SD_CTL_SCC BIT(12)
+#define OWL_SD_CTL_TCN(x) (((x) & 0xf) << 8)
+#define OWL_SD_CTL_TS BIT(7)
+#define OWL_SD_CTL_LBE BIT(6)
+#define OWL_SD_CTL_C7EN BIT(5)
+#define OWL_SD_CTL_TM(x) (((x) & 0xf) << 0)
+
+#define OWL_SD_DELAY_LOW_CLK 0x0f
+#define OWL_SD_DELAY_MID_CLK 0x0a
+#define OWL_SD_DELAY_HIGH_CLK 0x09
+#define OWL_SD_RDELAY_DDR50 0x0a
+#define OWL_SD_WDELAY_DDR50 0x08
+
+/* SD_STATE Bits */
+#define OWL_SD_STATE_DAT1BS BIT(18)
+#define OWL_SD_STATE_SDIOB_P BIT(17)
+#define OWL_SD_STATE_SDIOB_EN BIT(16)
+#define OWL_SD_STATE_TOUTE BIT(15)
+#define OWL_SD_STATE_BAEP BIT(14)
+#define OWL_SD_STATE_MEMRDY BIT(12)
+#define OWL_SD_STATE_CMDS BIT(11)
+#define OWL_SD_STATE_DAT1AS BIT(10)
+#define OWL_SD_STATE_SDIOA_P BIT(9)
+#define OWL_SD_STATE_SDIOA_EN BIT(8)
+#define OWL_SD_STATE_DAT0S BIT(7)
+#define OWL_SD_STATE_TEIE BIT(6)
+#define OWL_SD_STATE_TEI BIT(5)
+#define OWL_SD_STATE_CLNR BIT(4)
+#define OWL_SD_STATE_CLC BIT(3)
+#define OWL_SD_STATE_WC16ER BIT(2)
+#define OWL_SD_STATE_RC16ER BIT(1)
+#define OWL_SD_STATE_CRC7ER BIT(0)
+
+struct owl_mmc_host {
+ struct device *dev;
+ struct reset_control *reset;
+ void __iomem *base;
+ struct clk *clk;
+ struct completion sdc_complete;
+ spinlock_t lock;
+ int irq;
+ u32 clock;
+ bool ddr_50;
+
+ enum dma_data_direction dma_dir;
+ struct dma_chan *dma;
+ struct dma_async_tx_descriptor *desc;
+ struct dma_slave_config dma_cfg;
+ struct completion dma_complete;
+
+ struct mmc_host *mmc;
+ struct mmc_request *mrq;
+ struct mmc_command *cmd;
+ struct mmc_data *data;
+};
+
+static void owl_mmc_update_reg(void __iomem *reg, unsigned int val, bool state)
+{
+ unsigned int regval;
+
+ regval = readl(reg);
+
+ if (state)
+ regval |= val;
+ else
+ regval &= ~val;
+
+ writel(regval, reg);
+}
+
+static irqreturn_t owl_irq_handler(int irq, void *devid)
+{
+ struct owl_mmc_host *owl_host = devid;
+ unsigned long flags;
+ u32 state;
+
+ spin_lock_irqsave(&owl_host->lock, flags);
+
+ state = readl(owl_host->base + OWL_REG_SD_STATE);
+ if (state & OWL_SD_STATE_TEI) {
+ state = readl(owl_host->base + OWL_REG_SD_STATE);
+ state |= OWL_SD_STATE_TEI;
+ writel(state, owl_host->base + OWL_REG_SD_STATE);
+ complete(&owl_host->sdc_complete);
+ }
+
+ spin_unlock_irqrestore(&owl_host->lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+static void owl_mmc_finish_request(struct owl_mmc_host *owl_host)
+{
+ struct mmc_request *mrq = owl_host->mrq;
+ struct mmc_data *data = mrq->data;
+
+ /* Should never be NULL */
+ WARN_ON(!mrq);
+
+ owl_host->mrq = NULL;
+
+ if (data)
+ dma_unmap_sg(owl_host->dma->device->dev, data->sg, data->sg_len,
+ owl_host->dma_dir);
+
+ /* Finally finish request */
+ mmc_request_done(owl_host->mmc, mrq);
+}
+
+static void owl_mmc_send_cmd(struct owl_mmc_host *owl_host,
+ struct mmc_command *cmd,
+ struct mmc_data *data)
+{
+ u32 mode, state, resp[2];
+ u32 cmd_rsp_mask = 0;
+
+ init_completion(&owl_host->sdc_complete);
+
+ switch (mmc_resp_type(cmd)) {
+ case MMC_RSP_NONE:
+ mode = OWL_SD_CTL_TM(0);
+ break;
+
+ case MMC_RSP_R1:
+ if (data) {
+ if (data->flags & MMC_DATA_READ)
+ mode = OWL_SD_CTL_TM(4);
+ else
+ mode = OWL_SD_CTL_TM(5);
+ } else {
+ mode = OWL_SD_CTL_TM(1);
+ }
+ cmd_rsp_mask = OWL_SD_STATE_CLNR | OWL_SD_STATE_CRC7ER;
+
+ break;
+
+ case MMC_RSP_R1B:
+ mode = OWL_SD_CTL_TM(3);
+ cmd_rsp_mask = OWL_SD_STATE_CLNR | OWL_SD_STATE_CRC7ER;
+ break;
+
+ case MMC_RSP_R2:
+ mode = OWL_SD_CTL_TM(2);
+ cmd_rsp_mask = OWL_SD_STATE_CLNR | OWL_SD_STATE_CRC7ER;
+ break;
+
+ case MMC_RSP_R3:
+ mode = OWL_SD_CTL_TM(1);
+ cmd_rsp_mask = OWL_SD_STATE_CLNR;
+ break;
+
+ default:
+ dev_warn(owl_host->dev, "Unknown MMC command\n");
+ cmd->error = -EINVAL;
+ return;
+ }
+
+ /* Keep current WDELAY and RDELAY */
+ mode |= (readl(owl_host->base + OWL_REG_SD_CTL) & (0xff << 16));
+
+ /* Start to send corresponding command type */
+ writel(cmd->arg, owl_host->base + OWL_REG_SD_ARG);
+ writel(cmd->opcode, owl_host->base + OWL_REG_SD_CMD);
+
+ /* Set LBE to send clk at the end of last read block */
+ if (data) {
+ mode |= (OWL_SD_CTL_TS | OWL_SD_CTL_LBE | 0x64000000);
+ } else {
+ mode &= ~(OWL_SD_CTL_TOUTEN | OWL_SD_CTL_LBE);
+ mode |= OWL_SD_CTL_TS;
+ }
+
+ owl_host->cmd = cmd;
+
+ /* Start transfer */
+ writel(mode, owl_host->base + OWL_REG_SD_CTL);
+
+ if (data)
+ return;
+
+ if (!wait_for_completion_timeout(&owl_host->sdc_complete, 30 * HZ)) {
+ dev_err(owl_host->dev, "CMD interrupt timeout\n");
+ cmd->error = -ETIMEDOUT;
+ return;
+ }
+
+ state = readl(owl_host->base + OWL_REG_SD_STATE);
+ if (mmc_resp_type(cmd) & MMC_RSP_PRESENT) {
+ if (cmd_rsp_mask & state) {
+ if (state & OWL_SD_STATE_CLNR) {
+ dev_err(owl_host->dev, "Error CMD_NO_RSP\n");
+ cmd->error = -EILSEQ;
+ return;
+ }
+
+ if (state & OWL_SD_STATE_CRC7ER) {
+ dev_err(owl_host->dev, "Error CMD_RSP_CRC\n");
+ cmd->error = -EILSEQ;
+ return;
+ }
+ }
+
+ if (mmc_resp_type(cmd) & MMC_RSP_136) {
+ cmd->resp[3] = readl(owl_host->base + OWL_REG_SD_RSPBUF0);
+ cmd->resp[2] = readl(owl_host->base + OWL_REG_SD_RSPBUF1);
+ cmd->resp[1] = readl(owl_host->base + OWL_REG_SD_RSPBUF2);
+ cmd->resp[0] = readl(owl_host->base + OWL_REG_SD_RSPBUF3);
+ } else {
+ resp[0] = readl(owl_host->base + OWL_REG_SD_RSPBUF0);
+ resp[1] = readl(owl_host->base + OWL_REG_SD_RSPBUF1);
+ cmd->resp[0] = resp[1] << 24 | resp[0] >> 8;
+ cmd->resp[1] = resp[1] >> 8;
+ }
+ }
+}
+
+static void owl_mmc_dma_complete(void *param)
+{
+ struct owl_mmc_host *owl_host = param;
+ struct mmc_data *data = owl_host->data;
+
+ if (data)
+ complete(&owl_host->dma_complete);
+}
+
+static int owl_mmc_prepare_data(struct owl_mmc_host *owl_host,
+ struct mmc_data *data)
+{
+ u32 total;
+
+ owl_mmc_update_reg(owl_host->base + OWL_REG_SD_EN, OWL_SD_EN_BSEL,
+ true);
+ writel(data->blocks, owl_host->base + OWL_REG_SD_BLK_NUM);
+ writel(data->blksz, owl_host->base + OWL_REG_SD_BLK_SIZE);
+ total = data->blksz * data->blocks;
+
+ if (total < 512)
+ writel(total, owl_host->base + OWL_REG_SD_BUF_SIZE);
+ else
+ writel(512, owl_host->base + OWL_REG_SD_BUF_SIZE);
+
+ if (data->flags & MMC_DATA_WRITE) {
+ owl_host->dma_dir = DMA_TO_DEVICE;
+ owl_host->dma_cfg.direction = DMA_MEM_TO_DEV;
+ } else {
+ owl_host->dma_dir = DMA_FROM_DEVICE;
+ owl_host->dma_cfg.direction = DMA_DEV_TO_MEM;
+ }
+
+ dma_map_sg(owl_host->dma->device->dev, data->sg,
+ data->sg_len, owl_host->dma_dir);
+
+ dmaengine_slave_config(owl_host->dma, &owl_host->dma_cfg);
+ owl_host->desc = dmaengine_prep_slave_sg(owl_host->dma, data->sg,
+ data->sg_len,
+ owl_host->dma_cfg.direction,
+ DMA_PREP_INTERRUPT |
+ DMA_CTRL_ACK);
+ if (!owl_host->desc) {
+ dev_err(owl_host->dev, "Can't prepare slave sg\n");
+ return -EBUSY;
+ }
+
+ owl_host->data = data;
+
+ owl_host->desc->callback = owl_mmc_dma_complete;
+ owl_host->desc->callback_param = (void *)owl_host;
+ data->error = 0;
+
+ return 0;
+}
+
+static void owl_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
+{
+ struct owl_mmc_host *owl_host = mmc_priv(mmc);
+ struct mmc_data *data = mrq->data;
+ int ret;
+
+ owl_host->mrq = mrq;
+ if (mrq->data) {
+ ret = owl_mmc_prepare_data(owl_host, data);
+ if (ret < 0) {
+ data->error = ret;
+ goto err_out;
+ }
+
+ init_completion(&owl_host->dma_complete);
+ dmaengine_submit(owl_host->desc);
+ dma_async_issue_pending(owl_host->dma);
+ }
+
+ owl_mmc_send_cmd(owl_host, mrq->cmd, data);
+
+ if (data) {
+ if (!wait_for_completion_timeout(&owl_host->sdc_complete,
+ 10 * HZ)) {
+ dev_err(owl_host->dev, "CMD interrupt timeout\n");
+ mrq->cmd->error = -ETIMEDOUT;
+ dmaengine_terminate_all(owl_host->dma);
+ goto err_out;
+ }
+
+ if (!wait_for_completion_timeout(&owl_host->dma_complete,
+ 5 * HZ)) {
+ dev_err(owl_host->dev, "DMA interrupt timeout\n");
+ mrq->cmd->error = -ETIMEDOUT;
+ dmaengine_terminate_all(owl_host->dma);
+ goto err_out;
+ }
+
+ if (data->stop)
+ owl_mmc_send_cmd(owl_host, data->stop, NULL);
+
+ data->bytes_xfered = data->blocks * data->blksz;
+ }
+
+err_out:
+ owl_mmc_finish_request(owl_host);
+}
+
+static int owl_mmc_set_clk_rate(struct owl_mmc_host *owl_host,
+ unsigned int rate)
+{
+ unsigned long clk_rate;
+ int ret;
+ u32 reg;
+
+ reg = readl(owl_host->base + OWL_REG_SD_CTL);
+ reg &= ~OWL_SD_CTL_DELAY_MSK;
+
+ /* Set RDELAY and WDELAY based on the clock */
+ if (rate <= 1000000) {
+ writel(reg | OWL_SD_CTL_RDELAY(OWL_SD_DELAY_LOW_CLK) |
+ OWL_SD_CTL_WDELAY(OWL_SD_DELAY_LOW_CLK),
+ owl_host->base + OWL_REG_SD_CTL);
+ } else if ((rate > 1000000) && (rate <= 26000000)) {
+ writel(reg | OWL_SD_CTL_RDELAY(OWL_SD_DELAY_MID_CLK) |
+ OWL_SD_CTL_WDELAY(OWL_SD_DELAY_MID_CLK),
+ owl_host->base + OWL_REG_SD_CTL);
+ } else if ((rate > 26000000) && (rate <= 52000000) && !owl_host->ddr_50) {
+ writel(reg | OWL_SD_CTL_RDELAY(OWL_SD_DELAY_HIGH_CLK) |
+ OWL_SD_CTL_WDELAY(OWL_SD_DELAY_HIGH_CLK),
+ owl_host->base + OWL_REG_SD_CTL);
+ /* DDR50 mode has special delay chain */
+ } else if ((rate > 26000000) && (rate <= 52000000) && owl_host->ddr_50) {
+ writel(reg | OWL_SD_CTL_RDELAY(OWL_SD_RDELAY_DDR50) |
+ OWL_SD_CTL_WDELAY(OWL_SD_WDELAY_DDR50),
+ owl_host->base + OWL_REG_SD_CTL);
+ } else {
+ dev_err(owl_host->dev, "SD clock rate not supported\n");
+ return -EINVAL;
+ }
+
+ clk_rate = clk_round_rate(owl_host->clk, rate << 1);
+ ret = clk_set_rate(owl_host->clk, clk_rate);
+
+ return ret;
+}
+
+static void owl_mmc_set_clk(struct owl_mmc_host *owl_host, struct mmc_ios *ios)
+{
+ if (!ios->clock)
+ return;
+
+ owl_host->clock = ios->clock;
+ owl_mmc_set_clk_rate(owl_host, ios->clock);
+}
+
+static void owl_mmc_set_bus_width(struct owl_mmc_host *owl_host,
+ struct mmc_ios *ios)
+{
+ u32 reg;
+
+ reg = readl(owl_host->base + OWL_REG_SD_EN);
+ reg &= ~0x03;
+ switch (ios->bus_width) {
+ case MMC_BUS_WIDTH_1:
+ break;
+ case MMC_BUS_WIDTH_4:
+ reg |= OWL_SD_EN_DATAWID(1);
+ break;
+ case MMC_BUS_WIDTH_8:
+ reg |= OWL_SD_EN_DATAWID(2);
+ break;
+ }
+
+ writel(reg, owl_host->base + OWL_REG_SD_EN);
+}
+
+static void owl_mmc_ctr_reset(struct owl_mmc_host *owl_host)
+{
+ reset_control_assert(owl_host->reset);
+ udelay(20);
+ reset_control_deassert(owl_host->reset);
+}
+
+static void owl_mmc_power_on(struct owl_mmc_host *owl_host)
+{
+ u32 mode;
+
+ init_completion(&owl_host->sdc_complete);
+
+ /* Enable transfer end IRQ */
+ owl_mmc_update_reg(owl_host->base + OWL_REG_SD_STATE,
+ OWL_SD_STATE_TEIE, true);
+
+ /* Send init clk */
+ mode = (readl(owl_host->base + OWL_REG_SD_CTL) & (0xff << 16));
+ mode |= OWL_SD_CTL_TS | OWL_SD_CTL_TCN(5) | OWL_SD_CTL_TM(8);
+ writel(mode, owl_host->base + OWL_REG_SD_CTL);
+
+ if (!wait_for_completion_timeout(&owl_host->sdc_complete, HZ)) {
+ dev_err(owl_host->dev, "CMD interrupt timeout\n");
+ return;
+ }
+}
+
+static void owl_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct owl_mmc_host *owl_host = mmc_priv(mmc);
+
+ switch (ios->power_mode) {
+ case MMC_POWER_UP:
+ dev_dbg(owl_host->dev, "Powering card up\n");
+
+ /* Reset the SDC controller to clear all previous states */
+ owl_mmc_ctr_reset(owl_host);
+ clk_prepare_enable(owl_host->clk);
+ writel(OWL_SD_ENABLE | OWL_SD_EN_RESE,
+ owl_host->base + OWL_REG_SD_EN);
+
+ break;
+
+ case MMC_POWER_ON:
+ dev_dbg(owl_host->dev, "Powering card on\n");
+ owl_mmc_power_on(owl_host);
+
+ break;
+
+ case MMC_POWER_OFF:
+ dev_dbg(owl_host->dev, "Powering card off\n");
+ clk_disable_unprepare(owl_host->clk);
+
+ return;
+
+ default:
+ dev_dbg(owl_host->dev, "Ignoring unknown card power state\n");
+ break;
+ }
+
+ if (ios->clock != owl_host->clock)
+ owl_mmc_set_clk(owl_host, ios);
+
+ owl_mmc_set_bus_width(owl_host, ios);
+
+ /* Enable DDR mode if requested */
+ if (ios->timing == MMC_TIMING_UHS_DDR50) {
+ owl_host->ddr_50 = 1;
+ owl_mmc_update_reg(owl_host->base + OWL_REG_SD_EN,
+ OWL_SD_EN_DDREN, true);
+ } else {
+ owl_host->ddr_50 = 0;
+ }
+}
+
+static int owl_mmc_start_signal_voltage_switch(struct mmc_host *mmc,
+ struct mmc_ios *ios)
+{
+ struct owl_mmc_host *owl_host = mmc_priv(mmc);
+
+ /* It is enough to change the pad ctrl bit for voltage switch */
+ switch (ios->signal_voltage) {
+ case MMC_SIGNAL_VOLTAGE_330:
+ owl_mmc_update_reg(owl_host->base + OWL_REG_SD_EN,
+ OWL_SD_EN_S18EN, false);
+ break;
+ case MMC_SIGNAL_VOLTAGE_180:
+ owl_mmc_update_reg(owl_host->base + OWL_REG_SD_EN,
+ OWL_SD_EN_S18EN, true);
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+static const struct mmc_host_ops owl_mmc_ops = {
+ .request = owl_mmc_request,
+ .set_ios = owl_mmc_set_ios,
+ .get_ro = mmc_gpio_get_ro,
+ .get_cd = mmc_gpio_get_cd,
+ .start_signal_voltage_switch = owl_mmc_start_signal_voltage_switch,
+};
+
+static int owl_mmc_probe(struct platform_device *pdev)
+{
+ struct owl_mmc_host *owl_host;
+ struct mmc_host *mmc;
+ struct resource *res;
+ int ret;
+
+ mmc = mmc_alloc_host(sizeof(struct owl_mmc_host), &pdev->dev);
+ if (!mmc) {
+ dev_err(&pdev->dev, "mmc alloc host failed\n");
+ return -ENOMEM;
+ }
+ platform_set_drvdata(pdev, mmc);
+
+ owl_host = mmc_priv(mmc);
+ owl_host->dev = &pdev->dev;
+ owl_host->mmc = mmc;
+ spin_lock_init(&owl_host->lock);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ owl_host->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(owl_host->base)) {
+ dev_err(&pdev->dev, "Failed to remap registers\n");
+ ret = PTR_ERR(owl_host->base);
+ goto err_free_host;
+ }
+
+ owl_host->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(owl_host->clk)) {
+ dev_err(&pdev->dev, "No clock defined\n");
+ ret = PTR_ERR(owl_host->clk);
+ goto err_free_host;
+ }
+
+ owl_host->reset = devm_reset_control_get_exclusive(&pdev->dev, NULL);
+ if (IS_ERR(owl_host->reset)) {
+ dev_err(&pdev->dev, "Could not get reset control\n");
+ ret = PTR_ERR(owl_host->reset);
+ goto err_free_host;
+ }
+
+ mmc->ops = &owl_mmc_ops;
+ mmc->max_blk_count = 512;
+ mmc->max_blk_size = 512;
+ mmc->max_segs = 256;
+ mmc->max_seg_size = 262144;
+ mmc->max_req_size = 262144;
+ /* 100kHz ~ 52MHz */
+ mmc->f_min = 100000;
+ mmc->f_max = 52000000;
+ mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED |
+ MMC_CAP_4_BIT_DATA;
+ mmc->caps2 = (MMC_CAP2_BOOTPART_NOACC | MMC_CAP2_NO_SDIO);
+ mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34 |
+ MMC_VDD_165_195;
+
+ ret = mmc_of_parse(mmc);
+ if (ret)
+ goto err_free_host;
+
+ pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+ owl_host->dma = dma_request_slave_channel(&pdev->dev, "mmc");
+ if (!owl_host->dma) {
+ dev_err(owl_host->dev, "Failed to get external DMA channel.\n");
+ ret = -ENXIO;
+ goto err_free_host;
+ }
+
+ dev_info(&pdev->dev, "Using %s for DMA transfers\n",
+ dma_chan_name(owl_host->dma));
+
+ owl_host->dma_cfg.src_addr = res->start + OWL_REG_SD_DAT;
+ owl_host->dma_cfg.dst_addr = res->start + OWL_REG_SD_DAT;
+ owl_host->dma_cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ owl_host->dma_cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ owl_host->dma_cfg.device_fc = false;
+
+ owl_host->irq = platform_get_irq(pdev, 0);
+ if (owl_host->irq < 0) {
+ ret = -EINVAL;
+ goto err_free_host;
+ }
+
+ ret = devm_request_irq(&pdev->dev, owl_host->irq, owl_irq_handler,
+ 0, dev_name(&pdev->dev), owl_host);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to request irq %d\n",
+ owl_host->irq);
+ goto err_free_host;
+ }
+
+ ret = mmc_add_host(mmc);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to add host\n");
+ goto err_free_host;
+ }
+
+ dev_dbg(&pdev->dev, "Owl MMC Controller Initialized\n");
+
+ return 0;
+
+err_free_host:
+ mmc_free_host(mmc);
+
+ return ret;
+}
+
+static int owl_mmc_remove(struct platform_device *pdev)
+{
+ struct mmc_host *mmc = platform_get_drvdata(pdev);
+ struct owl_mmc_host *owl_host = mmc_priv(mmc);
+
+ mmc_remove_host(mmc);
+ disable_irq(owl_host->irq);
+ mmc_free_host(mmc);
+
+ return 0;
+}
+
+static const struct of_device_id owl_mmc_of_match[] = {
+ {.compatible = "actions,owl-mmc",},
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, owl_mmc_of_match);
+
+static struct platform_driver owl_mmc_driver = {
+ .driver = {
+ .name = "owl_mmc",
+ .of_match_table = of_match_ptr(owl_mmc_of_match),
+ },
+ .probe = owl_mmc_probe,
+ .remove = owl_mmc_remove,
+};
+module_platform_driver(owl_mmc_driver);
+
+MODULE_DESCRIPTION("Actions Semi Owl SoCs SD/MMC Driver");
+MODULE_AUTHOR("Actions Semi");
+MODULE_AUTHOR("Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mmc/host/renesas_sdhi_internal_dmac.c b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
index a66f8d6d61d1..18839a10594c 100644
--- a/drivers/mmc/host/renesas_sdhi_internal_dmac.c
+++ b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
@@ -308,6 +308,7 @@ static const struct soc_device_attribute soc_whitelist[] = {
.data = (void *)BIT(SDHI_INTERNAL_DMAC_ONE_RX_ONLY) },
/* generic ones */
{ .soc_id = "r8a774a1" },
+ { .soc_id = "r8a774b1" },
{ .soc_id = "r8a774c0" },
{ .soc_id = "r8a77470" },
{ .soc_id = "r8a7795" },
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
index 1604f512c7bd..105e73d4a3b9 100644
--- a/drivers/mmc/host/sdhci-acpi.c
+++ b/drivers/mmc/host/sdhci-acpi.c
@@ -61,7 +61,7 @@ struct sdhci_acpi_slot {
mmc_pm_flag_t pm_caps;
unsigned int flags;
size_t priv_size;
- int (*probe_slot)(struct platform_device *, const char *, const char *);
+ int (*probe_slot)(struct platform_device *, struct acpi_device *);
int (*remove_slot)(struct platform_device *);
int (*free_slot)(struct platform_device *pdev);
int (*setup_host)(struct platform_device *pdev);
@@ -325,12 +325,10 @@ static bool sdhci_acpi_cht_pci_wifi(unsigned int vendor, unsigned int device,
* wifi card in the expected slot with an ACPI companion node, is used to
* indicate that acpi_device_fix_up_power() should be avoided.
*/
-static inline bool sdhci_acpi_no_fixup_child_power(const char *hid,
- const char *uid)
+static inline bool sdhci_acpi_no_fixup_child_power(struct acpi_device *adev)
{
return sdhci_acpi_cht() &&
- !strcmp(hid, "80860F14") &&
- !strcmp(uid, "2") &&
+ acpi_dev_hid_uid_match(adev, "80860F14", "2") &&
sdhci_acpi_cht_pci_wifi(0x14e4, 0x43ec, 0, 28);
}
@@ -345,8 +343,7 @@ static inline bool sdhci_acpi_byt_defer(struct device *dev)
return false;
}
-static inline bool sdhci_acpi_no_fixup_child_power(const char *hid,
- const char *uid)
+static inline bool sdhci_acpi_no_fixup_child_power(struct acpi_device *adev)
{
return false;
}
@@ -375,19 +372,18 @@ out:
return ret;
}
-static int intel_probe_slot(struct platform_device *pdev, const char *hid,
- const char *uid)
+static int intel_probe_slot(struct platform_device *pdev, struct acpi_device *adev)
{
struct sdhci_acpi_host *c = platform_get_drvdata(pdev);
struct intel_host *intel_host = sdhci_acpi_priv(c);
struct sdhci_host *host = c->host;
- if (hid && uid && !strcmp(hid, "80860F14") && !strcmp(uid, "1") &&
+ if (acpi_dev_hid_uid_match(adev, "80860F14", "1") &&
sdhci_readl(host, SDHCI_CAPABILITIES) == 0x446cc8b2 &&
sdhci_readl(host, SDHCI_CAPABILITIES_1) == 0x00000807)
host->timeout_clk = 1000; /* 1000 kHz i.e. 1 MHz */
- if (hid && !strcmp(hid, "80865ACA"))
+ if (acpi_dev_hid_uid_match(adev, "80865ACA", NULL))
host->mmc_host_ops.get_cd = bxt_get_cd;
intel_dsm_init(intel_host, &pdev->dev, host->mmc);
@@ -473,8 +469,7 @@ static irqreturn_t sdhci_acpi_qcom_handler(int irq, void *ptr)
return IRQ_HANDLED;
}
-static int qcom_probe_slot(struct platform_device *pdev, const char *hid,
- const char *uid)
+static int qcom_probe_slot(struct platform_device *pdev, struct acpi_device *adev)
{
struct sdhci_acpi_host *c = platform_get_drvdata(pdev);
struct sdhci_host *host = c->host;
@@ -482,7 +477,7 @@ static int qcom_probe_slot(struct platform_device *pdev, const char *hid,
*irq = -EINVAL;
- if (strcmp(hid, "QCOM8051"))
+ if (!acpi_dev_hid_uid_match(adev, "QCOM8051", NULL))
return 0;
*irq = platform_get_irq(pdev, 1);
@@ -501,14 +496,12 @@ static int qcom_free_slot(struct platform_device *pdev)
struct sdhci_host *host = c->host;
struct acpi_device *adev;
int *irq = sdhci_acpi_priv(c);
- const char *hid;
adev = ACPI_COMPANION(dev);
if (!adev)
return -ENODEV;
- hid = acpi_device_hid(adev);
- if (strcmp(hid, "QCOM8051"))
+ if (!acpi_dev_hid_uid_match(adev, "QCOM8051", NULL))
return 0;
if (*irq < 0)
@@ -583,7 +576,7 @@ static const struct sdhci_acpi_chip sdhci_acpi_chip_amd = {
};
static int sdhci_acpi_emmc_amd_probe_slot(struct platform_device *pdev,
- const char *hid, const char *uid)
+ struct acpi_device *adev)
{
struct sdhci_acpi_host *c = platform_get_drvdata(pdev);
struct sdhci_host *host = c->host;
@@ -654,17 +647,12 @@ static const struct acpi_device_id sdhci_acpi_ids[] = {
};
MODULE_DEVICE_TABLE(acpi, sdhci_acpi_ids);
-static const struct sdhci_acpi_slot *sdhci_acpi_get_slot(const char *hid,
- const char *uid)
+static const struct sdhci_acpi_slot *sdhci_acpi_get_slot(struct acpi_device *adev)
{
const struct sdhci_acpi_uid_slot *u;
for (u = sdhci_acpi_uids; u->hid; u++) {
- if (strcmp(u->hid, hid))
- continue;
- if (!u->uid)
- return u->slot;
- if (uid && !strcmp(u->uid, uid))
+ if (acpi_dev_hid_uid_match(adev, u->hid, u->uid))
return u->slot;
}
return NULL;
@@ -680,22 +668,17 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
struct resource *iomem;
resource_size_t len;
size_t priv_size;
- const char *hid;
- const char *uid;
int err;
device = ACPI_COMPANION(dev);
if (!device)
return -ENODEV;
- hid = acpi_device_hid(device);
- uid = acpi_device_uid(device);
-
- slot = sdhci_acpi_get_slot(hid, uid);
+ slot = sdhci_acpi_get_slot(device);
/* Power on the SDHCI controller and its children */
acpi_device_fix_up_power(device);
- if (!sdhci_acpi_no_fixup_child_power(hid, uid)) {
+ if (!sdhci_acpi_no_fixup_child_power(device)) {
list_for_each_entry(child, &device->children, node)
if (child->status.present && child->status.enabled)
acpi_device_fix_up_power(child);
@@ -745,7 +728,7 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
if (c->slot) {
if (c->slot->probe_slot) {
- err = c->slot->probe_slot(pdev, hid, uid);
+ err = c->slot->probe_slot(pdev, device);
if (err)
goto err_free;
}
diff --git a/drivers/mmc/host/sdhci-esdhc.h b/drivers/mmc/host/sdhci-esdhc.h
index 57b582bf73d9..9289bb4d633e 100644
--- a/drivers/mmc/host/sdhci-esdhc.h
+++ b/drivers/mmc/host/sdhci-esdhc.h
@@ -51,6 +51,11 @@
#define ESDHC_CLOCK_HCKEN 0x00000002
#define ESDHC_CLOCK_IPGEN 0x00000001
+/* System Control 2 Register */
+#define ESDHC_SYSTEM_CONTROL_2 0x3c
+#define ESDHC_SMPCLKSEL 0x00800000
+#define ESDHC_EXTN 0x00400000
+
/* Host Controller Capabilities Register 2 */
#define ESDHC_CAPABILITIES_1 0x114
@@ -59,7 +64,16 @@
#define ESDHC_HS400_WNDW_ADJUST 0x00000040
#define ESDHC_HS400_MODE 0x00000010
#define ESDHC_TB_EN 0x00000004
+#define ESDHC_TB_MODE_MASK 0x00000003
+#define ESDHC_TB_MODE_SW 0x00000003
+#define ESDHC_TB_MODE_3 0x00000002
+
+#define ESDHC_TBSTAT 0x124
+
#define ESDHC_TBPTR 0x128
+#define ESDHC_WNDW_STRT_PTR_SHIFT 8
+#define ESDHC_WNDW_STRT_PTR_MASK (0x7f << 8)
+#define ESDHC_WNDW_END_PTR_MASK 0x7f
/* SD Clock Control Register */
#define ESDHC_SDCLKCTL 0x144
diff --git a/drivers/mmc/host/sdhci-milbeaut.c b/drivers/mmc/host/sdhci-milbeaut.c
new file mode 100644
index 000000000000..a1aa21b9ae1c
--- /dev/null
+++ b/drivers/mmc/host/sdhci-milbeaut.c
@@ -0,0 +1,362 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2013 - 2015 Fujitsu Semiconductor, Ltd
+ * Vincent Yang <vincent.yang@tw.fujitsu.com>
+ * Copyright (C) 2015 Linaro Ltd Andy Green <andy.green@linaro.org>
+ * Copyright (C) 2019 Socionext Inc.
+ * Takao Orito <orito.takao@socionext.com>
+ */
+
+#include <linux/bits.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/property.h>
+
+#include "sdhci-pltfm.h"
+#include "sdhci_f_sdh30.h"
+
+/* milbeaut bridge controller register */
+#define MLB_SOFT_RESET 0x0200
+#define MLB_SOFT_RESET_RSTX BIT(0)
+
+#define MLB_WP_CD_LED_SET 0x0210
+#define MLB_WP_CD_LED_SET_LED_INV BIT(2)
+
+#define MLB_CR_SET 0x0220
+#define MLB_CR_SET_CR_TOCLKUNIT BIT(24)
+#define MLB_CR_SET_CR_TOCLKFREQ_SFT (16)
+#define MLB_CR_SET_CR_TOCLKFREQ_MASK (0x3F << MLB_CR_SET_CR_TOCLKFREQ_SFT)
+#define MLB_CR_SET_CR_BCLKFREQ_SFT (8)
+#define MLB_CR_SET_CR_BCLKFREQ_MASK (0xFF << MLB_CR_SET_CR_BCLKFREQ_SFT)
+#define MLB_CR_SET_CR_RTUNTIMER_SFT (4)
+#define MLB_CR_SET_CR_RTUNTIMER_MASK (0xF << MLB_CR_SET_CR_RTUNTIMER_SFT)
+
+#define MLB_SD_TOCLK_I_DIV 16
+#define MLB_TOCLKFREQ_UNIT_THRES 16000000
+#define MLB_CAL_TOCLKFREQ_MHZ(rate) (rate / MLB_SD_TOCLK_I_DIV / 1000000)
+#define MLB_CAL_TOCLKFREQ_KHZ(rate) (rate / MLB_SD_TOCLK_I_DIV / 1000)
+#define MLB_TOCLKFREQ_MAX 63
+#define MLB_TOCLKFREQ_MIN 1
+
+#define MLB_SD_BCLK_I_DIV 4
+#define MLB_CAL_BCLKFREQ(rate) (rate / MLB_SD_BCLK_I_DIV / 1000000)
+#define MLB_BCLKFREQ_MAX 255
+#define MLB_BCLKFREQ_MIN 1
+
+#define MLB_CDR_SET 0x0230
+#define MLB_CDR_SET_CLK2POW16 3
+
+struct f_sdhost_priv {
+ struct clk *clk_iface;
+ struct clk *clk;
+ struct device *dev;
+ bool enable_cmd_dat_delay;
+};
+
+static void sdhci_milbeaut_soft_voltage_switch(struct sdhci_host *host)
+{
+ u32 ctrl = 0;
+
+ usleep_range(2500, 3000);
+ ctrl = sdhci_readl(host, F_SDH30_IO_CONTROL2);
+ ctrl |= F_SDH30_CRES_O_DN;
+ sdhci_writel(host, ctrl, F_SDH30_IO_CONTROL2);
+ ctrl |= F_SDH30_MSEL_O_1_8;
+ sdhci_writel(host, ctrl, F_SDH30_IO_CONTROL2);
+
+ ctrl &= ~F_SDH30_CRES_O_DN;
+ sdhci_writel(host, ctrl, F_SDH30_IO_CONTROL2);
+ usleep_range(2500, 3000);
+
+ ctrl = sdhci_readl(host, F_SDH30_TUNING_SETTING);
+ ctrl |= F_SDH30_CMD_CHK_DIS;
+ sdhci_writel(host, ctrl, F_SDH30_TUNING_SETTING);
+}
+
+static unsigned int sdhci_milbeaut_get_min_clock(struct sdhci_host *host)
+{
+ return F_SDH30_MIN_CLOCK;
+}
+
+static void sdhci_milbeaut_reset(struct sdhci_host *host, u8 mask)
+{
+ struct f_sdhost_priv *priv = sdhci_priv(host);
+ u16 clk;
+ u32 ctl;
+ ktime_t timeout;
+
+ clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
+ clk = (clk & ~SDHCI_CLOCK_CARD_EN) | SDHCI_CLOCK_INT_EN;
+ sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
+
+ sdhci_reset(host, mask);
+
+ clk |= SDHCI_CLOCK_CARD_EN;
+ sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
+
+ timeout = ktime_add_ms(ktime_get(), 10);
+ while (1) {
+ bool timedout = ktime_after(ktime_get(), timeout);
+
+ clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
+ if (clk & SDHCI_CLOCK_INT_STABLE)
+ break;
+ if (timedout) {
+ pr_err("%s: Internal clock never stabilised.\n",
+ mmc_hostname(host->mmc));
+ sdhci_dumpregs(host);
+ return;
+ }
+ udelay(10);
+ }
+
+ if (priv->enable_cmd_dat_delay) {
+ ctl = sdhci_readl(host, F_SDH30_ESD_CONTROL);
+ ctl |= F_SDH30_CMD_DAT_DELAY;
+ sdhci_writel(host, ctl, F_SDH30_ESD_CONTROL);
+ }
+}
+
+static void sdhci_milbeaut_set_power(struct sdhci_host *host,
+ unsigned char mode, unsigned short vdd)
+{
+ if (!IS_ERR(host->mmc->supply.vmmc)) {
+ struct mmc_host *mmc = host->mmc;
+
+ mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
+ }
+ sdhci_set_power_noreg(host, mode, vdd);
+}
+
+static const struct sdhci_ops sdhci_milbeaut_ops = {
+ .voltage_switch = sdhci_milbeaut_soft_voltage_switch,
+ .get_min_clock = sdhci_milbeaut_get_min_clock,
+ .reset = sdhci_milbeaut_reset,
+ .set_clock = sdhci_set_clock,
+ .set_bus_width = sdhci_set_bus_width,
+ .set_uhs_signaling = sdhci_set_uhs_signaling,
+ .set_power = sdhci_milbeaut_set_power,
+};
+
+static void sdhci_milbeaut_bridge_reset(struct sdhci_host *host,
+ int reset_flag)
+{
+ if (reset_flag)
+ sdhci_writel(host, 0, MLB_SOFT_RESET);
+ else
+ sdhci_writel(host, MLB_SOFT_RESET_RSTX, MLB_SOFT_RESET);
+}
+
+static void sdhci_milbeaut_bridge_init(struct sdhci_host *host,
+ int rate)
+{
+ u32 val, clk;
+
+ /* IO_SDIO_CR_SET should be set while reset */
+ val = sdhci_readl(host, MLB_CR_SET);
+ val &= ~(MLB_CR_SET_CR_TOCLKFREQ_MASK | MLB_CR_SET_CR_TOCLKUNIT |
+ MLB_CR_SET_CR_BCLKFREQ_MASK);
+ if (rate >= MLB_TOCLKFREQ_UNIT_THRES) {
+ clk = MLB_CAL_TOCLKFREQ_MHZ(rate);
+ clk = min_t(u32, MLB_TOCLKFREQ_MAX, clk);
+ val |= MLB_CR_SET_CR_TOCLKUNIT |
+ (clk << MLB_CR_SET_CR_TOCLKFREQ_SFT);
+ } else {
+ clk = MLB_CAL_TOCLKFREQ_KHZ(rate);
+ clk = min_t(u32, MLB_TOCLKFREQ_MAX, clk);
+ clk = max_t(u32, MLB_TOCLKFREQ_MIN, clk);
+ val |= clk << MLB_CR_SET_CR_TOCLKFREQ_SFT;
+ }
+
+ clk = MLB_CAL_BCLKFREQ(rate);
+ clk = min_t(u32, MLB_BCLKFREQ_MAX, clk);
+ clk = max_t(u32, MLB_BCLKFREQ_MIN, clk);
+ val |= clk << MLB_CR_SET_CR_BCLKFREQ_SFT;
+ val &= ~MLB_CR_SET_CR_RTUNTIMER_MASK;
+ sdhci_writel(host, val, MLB_CR_SET);
+
+ sdhci_writel(host, MLB_CDR_SET_CLK2POW16, MLB_CDR_SET);
+
+ sdhci_writel(host, MLB_WP_CD_LED_SET_LED_INV, MLB_WP_CD_LED_SET);
+}
+
+static void sdhci_milbeaut_vendor_init(struct sdhci_host *host)
+{
+ struct f_sdhost_priv *priv = sdhci_priv(host);
+ u32 ctl;
+
+ ctl = sdhci_readl(host, F_SDH30_IO_CONTROL2);
+ ctl |= F_SDH30_CRES_O_DN;
+ sdhci_writel(host, ctl, F_SDH30_IO_CONTROL2);
+ ctl &= ~F_SDH30_MSEL_O_1_8;
+ sdhci_writel(host, ctl, F_SDH30_IO_CONTROL2);
+ ctl &= ~F_SDH30_CRES_O_DN;
+ sdhci_writel(host, ctl, F_SDH30_IO_CONTROL2);
+
+ ctl = sdhci_readw(host, F_SDH30_AHB_CONFIG);
+ ctl |= F_SDH30_SIN | F_SDH30_AHB_INCR_16 | F_SDH30_AHB_INCR_8 |
+ F_SDH30_AHB_INCR_4;
+ ctl &= ~(F_SDH30_AHB_BIGED | F_SDH30_BUSLOCK_EN);
+ sdhci_writew(host, ctl, F_SDH30_AHB_CONFIG);
+
+ if (priv->enable_cmd_dat_delay) {
+ ctl = sdhci_readl(host, F_SDH30_ESD_CONTROL);
+ ctl |= F_SDH30_CMD_DAT_DELAY;
+ sdhci_writel(host, ctl, F_SDH30_ESD_CONTROL);
+ }
+}
+
+static const struct of_device_id mlb_dt_ids[] = {
+ {
+ .compatible = "socionext,milbeaut-m10v-sdhci-3.0",
+ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, mlb_dt_ids);
+
+static void sdhci_milbeaut_init(struct sdhci_host *host)
+{
+ struct f_sdhost_priv *priv = sdhci_priv(host);
+ int rate = clk_get_rate(priv->clk);
+ u16 ctl;
+
+ sdhci_milbeaut_bridge_reset(host, 0);
+
+ ctl = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
+ ctl &= ~(SDHCI_CLOCK_CARD_EN | SDHCI_CLOCK_INT_EN);
+ sdhci_writew(host, ctl, SDHCI_CLOCK_CONTROL);
+
+ sdhci_milbeaut_bridge_reset(host, 1);
+
+ sdhci_milbeaut_bridge_init(host, rate);
+ sdhci_milbeaut_bridge_reset(host, 0);
+
+ sdhci_milbeaut_vendor_init(host);
+}
+
+static int sdhci_milbeaut_probe(struct platform_device *pdev)
+{
+ struct sdhci_host *host;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ int irq, ret = 0;
+ struct f_sdhost_priv *priv;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(dev, "%s: no irq specified\n", __func__);
+ return irq;
+ }
+
+ host = sdhci_alloc_host(dev, sizeof(struct f_sdhost_priv));
+ if (IS_ERR(host))
+ return PTR_ERR(host);
+
+ priv = sdhci_priv(host);
+ priv->dev = dev;
+
+ host->quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC |
+ SDHCI_QUIRK_INVERTED_WRITE_PROTECT |
+ SDHCI_QUIRK_CLOCK_BEFORE_RESET |
+ SDHCI_QUIRK_DELAY_AFTER_POWER;
+ host->quirks2 = SDHCI_QUIRK2_SUPPORT_SINGLE |
+ SDHCI_QUIRK2_TUNING_WORK_AROUND |
+ SDHCI_QUIRK2_PRESET_VALUE_BROKEN;
+
+ priv->enable_cmd_dat_delay = device_property_read_bool(dev,
+ "fujitsu,cmd-dat-delay-select");
+
+ ret = mmc_of_parse(host->mmc);
+ if (ret)
+ goto err;
+
+ platform_set_drvdata(pdev, host);
+
+ host->hw_name = "f_sdh30";
+ host->ops = &sdhci_milbeaut_ops;
+ host->irq = irq;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ host->ioaddr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(host->ioaddr)) {
+ ret = PTR_ERR(host->ioaddr);
+ goto err;
+ }
+
+ if (dev_of_node(dev)) {
+ sdhci_get_of_property(pdev);
+
+ priv->clk_iface = devm_clk_get(&pdev->dev, "iface");
+ if (IS_ERR(priv->clk_iface)) {
+ ret = PTR_ERR(priv->clk_iface);
+ goto err;
+ }
+
+ ret = clk_prepare_enable(priv->clk_iface);
+ if (ret)
+ goto err;
+
+ priv->clk = devm_clk_get(&pdev->dev, "core");
+ if (IS_ERR(priv->clk)) {
+ ret = PTR_ERR(priv->clk);
+ goto err_clk;
+ }
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret)
+ goto err_clk;
+ }
+
+ sdhci_milbeaut_init(host);
+
+ ret = sdhci_add_host(host);
+ if (ret)
+ goto err_add_host;
+
+ return 0;
+
+err_add_host:
+ clk_disable_unprepare(priv->clk);
+err_clk:
+ clk_disable_unprepare(priv->clk_iface);
+err:
+ sdhci_free_host(host);
+ return ret;
+}
+
+static int sdhci_milbeaut_remove(struct platform_device *pdev)
+{
+ struct sdhci_host *host = platform_get_drvdata(pdev);
+ struct f_sdhost_priv *priv = sdhci_priv(host);
+
+ sdhci_remove_host(host, readl(host->ioaddr + SDHCI_INT_STATUS) ==
+ 0xffffffff);
+
+ clk_disable_unprepare(priv->clk_iface);
+ clk_disable_unprepare(priv->clk);
+
+ sdhci_free_host(host);
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static struct platform_driver sdhci_milbeaut_driver = {
+ .driver = {
+ .name = "sdhci-milbeaut",
+ .of_match_table = of_match_ptr(mlb_dt_ids),
+ },
+ .probe = sdhci_milbeaut_probe,
+ .remove = sdhci_milbeaut_remove,
+};
+
+module_platform_driver(sdhci_milbeaut_driver);
+
+MODULE_DESCRIPTION("MILBEAUT SD Card Controller driver");
+MODULE_AUTHOR("Takao Orito <orito.takao@socionext.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:sdhci-milbeaut");
diff --git a/drivers/mmc/host/sdhci-of-arasan.c b/drivers/mmc/host/sdhci-of-arasan.c
index 7023cbec4017..e49b44b4d82e 100644
--- a/drivers/mmc/host/sdhci-of-arasan.c
+++ b/drivers/mmc/host/sdhci-of-arasan.c
@@ -22,6 +22,7 @@
#include <linux/phy/phy.h>
#include <linux/regmap.h>
#include <linux/of.h>
+#include <linux/firmware/xlnx-zynqmp.h>
#include "cqhci.h"
#include "sdhci-pltfm.h"
@@ -32,6 +33,10 @@
#define PHY_CLK_TOO_SLOW_HZ 400000
+/* Default settings for ZynqMP Clock Phases */
+#define ZYNQMP_ICLK_PHASE {0, 63, 63, 0, 63, 0, 0, 183, 54, 0, 0}
+#define ZYNQMP_OCLK_PHASE {0, 72, 60, 0, 60, 72, 135, 48, 72, 135, 0}
+
/*
* On some SoCs the syscon area has a feature where the upper 16-bits of
* each 32-bit register act as a write mask for the lower 16-bits. This allows
@@ -72,13 +77,38 @@ struct sdhci_arasan_soc_ctl_map {
};
/**
+ * struct sdhci_arasan_clk_data
+ * @sdcardclk_hw: Struct for the clock we might provide to a PHY.
+ * @sdcardclk: Pointer to normal 'struct clock' for sdcardclk_hw.
+ * @sampleclk_hw: Struct for the clock we might provide to a PHY.
+ * @sampleclk: Pointer to normal 'struct clock' for sampleclk_hw.
+ * @clk_phase_in: Array of Input Clock Phase Delays for all speed modes
+ * @clk_phase_out: Array of Output Clock Phase Delays for all speed modes
+ * @set_clk_delays: Function pointer for setting Clock Delays
+ * @clk_of_data: Platform specific runtime clock data storage pointer
+ */
+struct sdhci_arasan_clk_data {
+ struct clk_hw sdcardclk_hw;
+ struct clk *sdcardclk;
+ struct clk_hw sampleclk_hw;
+ struct clk *sampleclk;
+ int clk_phase_in[MMC_TIMING_MMC_HS400 + 1];
+ int clk_phase_out[MMC_TIMING_MMC_HS400 + 1];
+ void (*set_clk_delays)(struct sdhci_host *host);
+ void *clk_of_data;
+};
+
+struct sdhci_arasan_zynqmp_clk_data {
+ const struct zynqmp_eemi_ops *eemi_ops;
+};
+
+/**
* struct sdhci_arasan_data
* @host: Pointer to the main SDHCI host structure.
* @clk_ahb: Pointer to the AHB clock
* @phy: Pointer to the generic phy
* @is_phy_on: True if the PHY is on; false if not.
- * @sdcardclk_hw: Struct for the clock we might provide to a PHY.
- * @sdcardclk: Pointer to normal 'struct clock' for sdcardclk_hw.
+ * @clk_data: Struct for the Arasan Controller Clock Data.
* @soc_ctl_base: Pointer to regmap for syscon for soc_ctl registers.
* @soc_ctl_map: Map to get offsets into soc_ctl registers.
*/
@@ -89,8 +119,7 @@ struct sdhci_arasan_data {
bool is_phy_on;
bool has_cqe;
- struct clk_hw sdcardclk_hw;
- struct clk *sdcardclk;
+ struct sdhci_arasan_clk_data clk_data;
struct regmap *soc_ctl_base;
const struct sdhci_arasan_soc_ctl_map *soc_ctl_map;
@@ -120,6 +149,12 @@ static const struct sdhci_arasan_soc_ctl_map intel_lgm_emmc_soc_ctl_map = {
.hiword_update = false,
};
+static const struct sdhci_arasan_soc_ctl_map intel_lgm_sdxc_soc_ctl_map = {
+ .baseclkfreq = { .reg = 0x80, .width = 8, .shift = 2 },
+ .clockmultiplier = { .reg = 0, .width = -1, .shift = -1 },
+ .hiword_update = false,
+};
+
/**
* sdhci_arasan_syscon_write - Write to a field in soc_ctl registers
*
@@ -174,6 +209,7 @@ static void sdhci_arasan_set_clock(struct sdhci_host *host, unsigned int clock)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_arasan_data *sdhci_arasan = sdhci_pltfm_priv(pltfm_host);
+ struct sdhci_arasan_clk_data *clk_data = &sdhci_arasan->clk_data;
bool ctrl_phy = false;
if (!IS_ERR(sdhci_arasan->phy)) {
@@ -215,6 +251,10 @@ static void sdhci_arasan_set_clock(struct sdhci_host *host, unsigned int clock)
sdhci_arasan->is_phy_on = false;
}
+ /* Set the Input and Output Clock Phase Delays */
+ if (clk_data->set_clk_delays)
+ clk_data->set_clk_delays(host);
+
sdhci_set_clock(host, clock);
if (sdhci_arasan->quirks & SDHCI_ARASAN_QUIRK_CLOCK_UNSTABLE)
@@ -384,6 +424,11 @@ static struct sdhci_arasan_of_data intel_lgm_emmc_data = {
.pdata = &sdhci_arasan_cqe_pdata,
};
+static struct sdhci_arasan_of_data intel_lgm_sdxc_data = {
+ .soc_ctl_map = &intel_lgm_sdxc_soc_ctl_map,
+ .pdata = &sdhci_arasan_cqe_pdata,
+};
+
#ifdef CONFIG_PM_SLEEP
/**
* sdhci_arasan_suspend - Suspend method for the driver
@@ -489,6 +534,10 @@ static const struct of_device_id sdhci_arasan_of_match[] = {
.compatible = "intel,lgm-sdhci-5.1-emmc",
.data = &intel_lgm_emmc_data,
},
+ {
+ .compatible = "intel,lgm-sdhci-5.1-sdxc",
+ .data = &intel_lgm_sdxc_data,
+ },
/* Generic compatible below here */
{
.compatible = "arasan,sdhci-8.9a",
@@ -502,6 +551,10 @@ static const struct of_device_id sdhci_arasan_of_match[] = {
.compatible = "arasan,sdhci-4.9a",
.data = &sdhci_arasan_data,
},
+ {
+ .compatible = "xlnx,zynqmp-8.9a",
+ .data = &sdhci_arasan_data,
+ },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, sdhci_arasan_of_match);
@@ -520,8 +573,10 @@ static unsigned long sdhci_arasan_sdcardclk_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
+ struct sdhci_arasan_clk_data *clk_data =
+ container_of(hw, struct sdhci_arasan_clk_data, sdcardclk_hw);
struct sdhci_arasan_data *sdhci_arasan =
- container_of(hw, struct sdhci_arasan_data, sdcardclk_hw);
+ container_of(clk_data, struct sdhci_arasan_data, clk_data);
struct sdhci_host *host = sdhci_arasan->host;
return host->mmc->actual_clock;
@@ -532,6 +587,177 @@ static const struct clk_ops arasan_sdcardclk_ops = {
};
/**
+ * sdhci_arasan_sampleclk_recalc_rate - Return the sampling clock rate
+ *
+ * Return the current actual rate of the sampling clock. This can be used
+ * to communicate with out PHY.
+ *
+ * @hw: Pointer to the hardware clock structure.
+ * @parent_rate The parent rate (should be rate of clk_xin).
+ * Returns the sample clock rate.
+ */
+static unsigned long sdhci_arasan_sampleclk_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+
+{
+ struct sdhci_arasan_clk_data *clk_data =
+ container_of(hw, struct sdhci_arasan_clk_data, sampleclk_hw);
+ struct sdhci_arasan_data *sdhci_arasan =
+ container_of(clk_data, struct sdhci_arasan_data, clk_data);
+ struct sdhci_host *host = sdhci_arasan->host;
+
+ return host->mmc->actual_clock;
+}
+
+static const struct clk_ops arasan_sampleclk_ops = {
+ .recalc_rate = sdhci_arasan_sampleclk_recalc_rate,
+};
+
+/**
+ * sdhci_zynqmp_sdcardclk_set_phase - Set the SD Output Clock Tap Delays
+ *
+ * Set the SD Output Clock Tap Delays for Output path
+ *
+ * @hw: Pointer to the hardware clock structure.
+ * @degrees The clock phase shift between 0 - 359.
+ * Return: 0 on success and error value on error
+ */
+static int sdhci_zynqmp_sdcardclk_set_phase(struct clk_hw *hw, int degrees)
+
+{
+ struct sdhci_arasan_clk_data *clk_data =
+ container_of(hw, struct sdhci_arasan_clk_data, sdcardclk_hw);
+ struct sdhci_arasan_data *sdhci_arasan =
+ container_of(clk_data, struct sdhci_arasan_data, clk_data);
+ struct sdhci_host *host = sdhci_arasan->host;
+ struct sdhci_arasan_zynqmp_clk_data *zynqmp_clk_data =
+ clk_data->clk_of_data;
+ const struct zynqmp_eemi_ops *eemi_ops = zynqmp_clk_data->eemi_ops;
+ const char *clk_name = clk_hw_get_name(hw);
+ u32 node_id = !strcmp(clk_name, "clk_out_sd0") ? NODE_SD_0 : NODE_SD_1;
+ u8 tap_delay, tap_max = 0;
+ int ret;
+
+ /*
+ * This is applicable for SDHCI_SPEC_300 and above
+ * ZynqMP does not set phase for <=25MHz clock.
+ * If degrees is zero, no need to do anything.
+ */
+ if (host->version < SDHCI_SPEC_300 ||
+ host->timing == MMC_TIMING_LEGACY ||
+ host->timing == MMC_TIMING_UHS_SDR12 || !degrees)
+ return 0;
+
+ switch (host->timing) {
+ case MMC_TIMING_MMC_HS:
+ case MMC_TIMING_SD_HS:
+ case MMC_TIMING_UHS_SDR25:
+ case MMC_TIMING_UHS_DDR50:
+ case MMC_TIMING_MMC_DDR52:
+ /* For 50MHz clock, 30 Taps are available */
+ tap_max = 30;
+ break;
+ case MMC_TIMING_UHS_SDR50:
+ /* For 100MHz clock, 15 Taps are available */
+ tap_max = 15;
+ break;
+ case MMC_TIMING_UHS_SDR104:
+ case MMC_TIMING_MMC_HS200:
+ /* For 200MHz clock, 8 Taps are available */
+ tap_max = 8;
+ default:
+ break;
+ }
+
+ tap_delay = (degrees * tap_max) / 360;
+
+ /* Set the Clock Phase */
+ ret = eemi_ops->ioctl(node_id, IOCTL_SET_SD_TAPDELAY,
+ PM_TAPDELAY_OUTPUT, tap_delay, NULL);
+ if (ret)
+ pr_err("Error setting Output Tap Delay\n");
+
+ return ret;
+}
+
+static const struct clk_ops zynqmp_sdcardclk_ops = {
+ .recalc_rate = sdhci_arasan_sdcardclk_recalc_rate,
+ .set_phase = sdhci_zynqmp_sdcardclk_set_phase,
+};
+
+/**
+ * sdhci_zynqmp_sampleclk_set_phase - Set the SD Input Clock Tap Delays
+ *
+ * Set the SD Input Clock Tap Delays for Input path
+ *
+ * @hw: Pointer to the hardware clock structure.
+ * @degrees The clock phase shift between 0 - 359.
+ * Return: 0 on success and error value on error
+ */
+static int sdhci_zynqmp_sampleclk_set_phase(struct clk_hw *hw, int degrees)
+
+{
+ struct sdhci_arasan_clk_data *clk_data =
+ container_of(hw, struct sdhci_arasan_clk_data, sampleclk_hw);
+ struct sdhci_arasan_data *sdhci_arasan =
+ container_of(clk_data, struct sdhci_arasan_data, clk_data);
+ struct sdhci_host *host = sdhci_arasan->host;
+ struct sdhci_arasan_zynqmp_clk_data *zynqmp_clk_data =
+ clk_data->clk_of_data;
+ const struct zynqmp_eemi_ops *eemi_ops = zynqmp_clk_data->eemi_ops;
+ const char *clk_name = clk_hw_get_name(hw);
+ u32 node_id = !strcmp(clk_name, "clk_in_sd0") ? NODE_SD_0 : NODE_SD_1;
+ u8 tap_delay, tap_max = 0;
+ int ret;
+
+ /*
+ * This is applicable for SDHCI_SPEC_300 and above
+ * ZynqMP does not set phase for <=25MHz clock.
+ * If degrees is zero, no need to do anything.
+ */
+ if (host->version < SDHCI_SPEC_300 ||
+ host->timing == MMC_TIMING_LEGACY ||
+ host->timing == MMC_TIMING_UHS_SDR12 || !degrees)
+ return 0;
+
+ switch (host->timing) {
+ case MMC_TIMING_MMC_HS:
+ case MMC_TIMING_SD_HS:
+ case MMC_TIMING_UHS_SDR25:
+ case MMC_TIMING_UHS_DDR50:
+ case MMC_TIMING_MMC_DDR52:
+ /* For 50MHz clock, 120 Taps are available */
+ tap_max = 120;
+ break;
+ case MMC_TIMING_UHS_SDR50:
+ /* For 100MHz clock, 60 Taps are available */
+ tap_max = 60;
+ break;
+ case MMC_TIMING_UHS_SDR104:
+ case MMC_TIMING_MMC_HS200:
+ /* For 200MHz clock, 30 Taps are available */
+ tap_max = 30;
+ default:
+ break;
+ }
+
+ tap_delay = (degrees * tap_max) / 360;
+
+ /* Set the Clock Phase */
+ ret = eemi_ops->ioctl(node_id, IOCTL_SET_SD_TAPDELAY,
+ PM_TAPDELAY_INPUT, tap_delay, NULL);
+ if (ret)
+ pr_err("Error setting Input Tap Delay\n");
+
+ return ret;
+}
+
+static const struct clk_ops zynqmp_sampleclk_ops = {
+ .recalc_rate = sdhci_arasan_sampleclk_recalc_rate,
+ .set_phase = sdhci_zynqmp_sampleclk_set_phase,
+};
+
+/**
* sdhci_arasan_update_clockmultiplier - Set corecfg_clockmultiplier
*
* The corecfg_clockmultiplier is supposed to contain clock multiplier
@@ -609,39 +835,128 @@ static void sdhci_arasan_update_baseclkfreq(struct sdhci_host *host)
sdhci_arasan_syscon_write(host, &soc_ctl_map->baseclkfreq, mhz);
}
+static void sdhci_arasan_set_clk_delays(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_arasan_data *sdhci_arasan = sdhci_pltfm_priv(pltfm_host);
+ struct sdhci_arasan_clk_data *clk_data = &sdhci_arasan->clk_data;
+
+ clk_set_phase(clk_data->sampleclk,
+ clk_data->clk_phase_in[host->timing]);
+ clk_set_phase(clk_data->sdcardclk,
+ clk_data->clk_phase_out[host->timing]);
+}
+
+static void arasan_dt_read_clk_phase(struct device *dev,
+ struct sdhci_arasan_clk_data *clk_data,
+ unsigned int timing, const char *prop)
+{
+ struct device_node *np = dev->of_node;
+
+ int clk_phase[2] = {0};
+
+ /*
+ * Read Tap Delay values from DT, if the DT does not contain the
+ * Tap Values then use the pre-defined values.
+ */
+ if (of_property_read_variable_u32_array(np, prop, &clk_phase[0],
+ 2, 0)) {
+ dev_dbg(dev, "Using predefined clock phase for %s = %d %d\n",
+ prop, clk_data->clk_phase_in[timing],
+ clk_data->clk_phase_out[timing]);
+ return;
+ }
+
+ /* The values read are Input and Output Clock Delays in order */
+ clk_data->clk_phase_in[timing] = clk_phase[0];
+ clk_data->clk_phase_out[timing] = clk_phase[1];
+}
+
/**
- * sdhci_arasan_register_sdclk - Register the sdclk for a PHY to use
+ * arasan_dt_parse_clk_phases - Read Clock Delay values from DT
+ *
+ * Called at initialization to parse the values of Clock Delays.
+ *
+ * @dev: Pointer to our struct device.
+ * @clk_data: Pointer to the Clock Data structure
+ */
+static void arasan_dt_parse_clk_phases(struct device *dev,
+ struct sdhci_arasan_clk_data *clk_data)
+{
+ int *iclk_phase, *oclk_phase;
+ u32 mio_bank = 0;
+ int i;
+
+ /*
+ * This has been kept as a pointer and is assigned a function here.
+ * So that different controller variants can assign their own handling
+ * function.
+ */
+ clk_data->set_clk_delays = sdhci_arasan_set_clk_delays;
+
+ if (of_device_is_compatible(dev->of_node, "xlnx,zynqmp-8.9a")) {
+ iclk_phase = (int [MMC_TIMING_MMC_HS400 + 1]) ZYNQMP_ICLK_PHASE;
+ oclk_phase = (int [MMC_TIMING_MMC_HS400 + 1]) ZYNQMP_OCLK_PHASE;
+
+ of_property_read_u32(dev->of_node, "xlnx,mio-bank", &mio_bank);
+ if (mio_bank == 2) {
+ oclk_phase[MMC_TIMING_UHS_SDR104] = 90;
+ oclk_phase[MMC_TIMING_MMC_HS200] = 90;
+ }
+
+ for (i = 0; i <= MMC_TIMING_MMC_HS400; i++) {
+ clk_data->clk_phase_in[i] = iclk_phase[i];
+ clk_data->clk_phase_out[i] = oclk_phase[i];
+ }
+ }
+
+ arasan_dt_read_clk_phase(dev, clk_data, MMC_TIMING_LEGACY,
+ "clk-phase-legacy");
+ arasan_dt_read_clk_phase(dev, clk_data, MMC_TIMING_MMC_HS,
+ "clk-phase-mmc-hs");
+ arasan_dt_read_clk_phase(dev, clk_data, MMC_TIMING_SD_HS,
+ "clk-phase-sd-hs");
+ arasan_dt_read_clk_phase(dev, clk_data, MMC_TIMING_UHS_SDR12,
+ "clk-phase-uhs-sdr12");
+ arasan_dt_read_clk_phase(dev, clk_data, MMC_TIMING_UHS_SDR25,
+ "clk-phase-uhs-sdr25");
+ arasan_dt_read_clk_phase(dev, clk_data, MMC_TIMING_UHS_SDR50,
+ "clk-phase-uhs-sdr50");
+ arasan_dt_read_clk_phase(dev, clk_data, MMC_TIMING_UHS_SDR104,
+ "clk-phase-uhs-sdr104");
+ arasan_dt_read_clk_phase(dev, clk_data, MMC_TIMING_UHS_DDR50,
+ "clk-phase-uhs-ddr50");
+ arasan_dt_read_clk_phase(dev, clk_data, MMC_TIMING_MMC_DDR52,
+ "clk-phase-mmc-ddr52");
+ arasan_dt_read_clk_phase(dev, clk_data, MMC_TIMING_MMC_HS200,
+ "clk-phase-mmc-hs200");
+ arasan_dt_read_clk_phase(dev, clk_data, MMC_TIMING_MMC_HS400,
+ "clk-phase-mmc-hs400");
+}
+
+/**
+ * sdhci_arasan_register_sdcardclk - Register the sdcardclk for a PHY to use
*
* Some PHY devices need to know what the actual card clock is. In order for
* them to find out, we'll provide a clock through the common clock framework
* for them to query.
*
- * Note: without seriously re-architecting SDHCI's clock code and testing on
- * all platforms, there's no way to create a totally beautiful clock here
- * with all clock ops implemented. Instead, we'll just create a clock that can
- * be queried and set the CLK_GET_RATE_NOCACHE attribute to tell common clock
- * framework that we're doing things behind its back. This should be sufficient
- * to create nice clean device tree bindings and later (if needed) we can try
- * re-architecting SDHCI if we see some benefit to it.
- *
* @sdhci_arasan: Our private data structure.
* @clk_xin: Pointer to the functional clock
* @dev: Pointer to our struct device.
* Returns 0 on success and error value on error
*/
-static int sdhci_arasan_register_sdclk(struct sdhci_arasan_data *sdhci_arasan,
- struct clk *clk_xin,
- struct device *dev)
+static int
+sdhci_arasan_register_sdcardclk(struct sdhci_arasan_data *sdhci_arasan,
+ struct clk *clk_xin,
+ struct device *dev)
{
+ struct sdhci_arasan_clk_data *clk_data = &sdhci_arasan->clk_data;
struct device_node *np = dev->of_node;
struct clk_init_data sdcardclk_init;
const char *parent_clk_name;
int ret;
- /* Providing a clock to the PHY is optional; no error if missing */
- if (!of_find_property(np, "#clock-cells", NULL))
- return 0;
-
ret = of_property_read_string_index(np, "clock-output-names", 0,
&sdcardclk_init.name);
if (ret) {
@@ -653,17 +968,72 @@ static int sdhci_arasan_register_sdclk(struct sdhci_arasan_data *sdhci_arasan,
sdcardclk_init.parent_names = &parent_clk_name;
sdcardclk_init.num_parents = 1;
sdcardclk_init.flags = CLK_GET_RATE_NOCACHE;
- sdcardclk_init.ops = &arasan_sdcardclk_ops;
+ if (of_device_is_compatible(np, "xlnx,zynqmp-8.9a"))
+ sdcardclk_init.ops = &zynqmp_sdcardclk_ops;
+ else
+ sdcardclk_init.ops = &arasan_sdcardclk_ops;
+
+ clk_data->sdcardclk_hw.init = &sdcardclk_init;
+ clk_data->sdcardclk =
+ devm_clk_register(dev, &clk_data->sdcardclk_hw);
+ clk_data->sdcardclk_hw.init = NULL;
+
+ ret = of_clk_add_provider(np, of_clk_src_simple_get,
+ clk_data->sdcardclk);
+ if (ret)
+ dev_err(dev, "Failed to add sdcard clock provider\n");
+
+ return ret;
+}
+
+/**
+ * sdhci_arasan_register_sampleclk - Register the sampleclk for a PHY to use
+ *
+ * Some PHY devices need to know what the actual card clock is. In order for
+ * them to find out, we'll provide a clock through the common clock framework
+ * for them to query.
+ *
+ * @sdhci_arasan: Our private data structure.
+ * @clk_xin: Pointer to the functional clock
+ * @dev: Pointer to our struct device.
+ * Returns 0 on success and error value on error
+ */
+static int
+sdhci_arasan_register_sampleclk(struct sdhci_arasan_data *sdhci_arasan,
+ struct clk *clk_xin,
+ struct device *dev)
+{
+ struct sdhci_arasan_clk_data *clk_data = &sdhci_arasan->clk_data;
+ struct device_node *np = dev->of_node;
+ struct clk_init_data sampleclk_init;
+ const char *parent_clk_name;
+ int ret;
- sdhci_arasan->sdcardclk_hw.init = &sdcardclk_init;
- sdhci_arasan->sdcardclk =
- devm_clk_register(dev, &sdhci_arasan->sdcardclk_hw);
- sdhci_arasan->sdcardclk_hw.init = NULL;
+ ret = of_property_read_string_index(np, "clock-output-names", 1,
+ &sampleclk_init.name);
+ if (ret) {
+ dev_err(dev, "DT has #clock-cells but no clock-output-names\n");
+ return ret;
+ }
+
+ parent_clk_name = __clk_get_name(clk_xin);
+ sampleclk_init.parent_names = &parent_clk_name;
+ sampleclk_init.num_parents = 1;
+ sampleclk_init.flags = CLK_GET_RATE_NOCACHE;
+ if (of_device_is_compatible(np, "xlnx,zynqmp-8.9a"))
+ sampleclk_init.ops = &zynqmp_sampleclk_ops;
+ else
+ sampleclk_init.ops = &arasan_sampleclk_ops;
+
+ clk_data->sampleclk_hw.init = &sampleclk_init;
+ clk_data->sampleclk =
+ devm_clk_register(dev, &clk_data->sampleclk_hw);
+ clk_data->sampleclk_hw.init = NULL;
ret = of_clk_add_provider(np, of_clk_src_simple_get,
- sdhci_arasan->sdcardclk);
+ clk_data->sampleclk);
if (ret)
- dev_err(dev, "Failed to add clock provider\n");
+ dev_err(dev, "Failed to add sample clock provider\n");
return ret;
}
@@ -686,6 +1056,54 @@ static void sdhci_arasan_unregister_sdclk(struct device *dev)
of_clk_del_provider(dev->of_node);
}
+/**
+ * sdhci_arasan_register_sdclk - Register the sdcardclk for a PHY to use
+ *
+ * Some PHY devices need to know what the actual card clock is. In order for
+ * them to find out, we'll provide a clock through the common clock framework
+ * for them to query.
+ *
+ * Note: without seriously re-architecting SDHCI's clock code and testing on
+ * all platforms, there's no way to create a totally beautiful clock here
+ * with all clock ops implemented. Instead, we'll just create a clock that can
+ * be queried and set the CLK_GET_RATE_NOCACHE attribute to tell common clock
+ * framework that we're doing things behind its back. This should be sufficient
+ * to create nice clean device tree bindings and later (if needed) we can try
+ * re-architecting SDHCI if we see some benefit to it.
+ *
+ * @sdhci_arasan: Our private data structure.
+ * @clk_xin: Pointer to the functional clock
+ * @dev: Pointer to our struct device.
+ * Returns 0 on success and error value on error
+ */
+static int sdhci_arasan_register_sdclk(struct sdhci_arasan_data *sdhci_arasan,
+ struct clk *clk_xin,
+ struct device *dev)
+{
+ struct device_node *np = dev->of_node;
+ u32 num_clks = 0;
+ int ret;
+
+ /* Providing a clock to the PHY is optional; no error if missing */
+ if (of_property_read_u32(np, "#clock-cells", &num_clks) < 0)
+ return 0;
+
+ ret = sdhci_arasan_register_sdcardclk(sdhci_arasan, clk_xin, dev);
+ if (ret)
+ return ret;
+
+ if (num_clks) {
+ ret = sdhci_arasan_register_sampleclk(sdhci_arasan, clk_xin,
+ dev);
+ if (ret) {
+ sdhci_arasan_unregister_sdclk(dev);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
static int sdhci_arasan_add_host(struct sdhci_arasan_data *sdhci_arasan)
{
struct sdhci_host *host = sdhci_arasan->host;
@@ -814,6 +1232,25 @@ static int sdhci_arasan_probe(struct platform_device *pdev)
if (ret)
goto clk_disable_all;
+ if (of_device_is_compatible(np, "xlnx,zynqmp-8.9a")) {
+ struct sdhci_arasan_zynqmp_clk_data *zynqmp_clk_data;
+ const struct zynqmp_eemi_ops *eemi_ops;
+
+ zynqmp_clk_data = devm_kzalloc(&pdev->dev,
+ sizeof(*zynqmp_clk_data),
+ GFP_KERNEL);
+ eemi_ops = zynqmp_pm_get_eemi_ops();
+ if (IS_ERR(eemi_ops)) {
+ ret = PTR_ERR(eemi_ops);
+ goto unreg_clk;
+ }
+
+ zynqmp_clk_data->eemi_ops = eemi_ops;
+ sdhci_arasan->clk_data.clk_of_data = zynqmp_clk_data;
+ }
+
+ arasan_dt_parse_clk_phases(&pdev->dev, &sdhci_arasan->clk_data);
+
ret = mmc_of_parse(host->mmc);
if (ret) {
if (ret != -EPROBE_DEFER)
diff --git a/drivers/mmc/host/sdhci-of-aspeed.c b/drivers/mmc/host/sdhci-of-aspeed.c
index 8962f6664381..56912e30c47e 100644
--- a/drivers/mmc/host/sdhci-of-aspeed.c
+++ b/drivers/mmc/host/sdhci-of-aspeed.c
@@ -111,7 +111,19 @@ static void aspeed_sdhci_set_bus_width(struct sdhci_host *host, int width)
sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
}
+static u32 aspeed_sdhci_readl(struct sdhci_host *host, int reg)
+{
+ u32 val = readl(host->ioaddr + reg);
+
+ if (unlikely(reg == SDHCI_PRESENT_STATE) &&
+ (host->mmc->caps2 & MMC_CAP2_CD_ACTIVE_HIGH))
+ val ^= SDHCI_CARD_PRESENT;
+
+ return val;
+}
+
static const struct sdhci_ops aspeed_sdhci_ops = {
+ .read_l = aspeed_sdhci_readl,
.set_clock = aspeed_sdhci_set_clock,
.get_max_clock = aspeed_sdhci_get_max_clock,
.set_bus_width = aspeed_sdhci_set_bus_width,
diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c
index 0ae986c42bc8..5959e394b416 100644
--- a/drivers/mmc/host/sdhci-of-at91.c
+++ b/drivers/mmc/host/sdhci-of-at91.c
@@ -27,6 +27,9 @@
#define SDMMC_CACR 0x230
#define SDMMC_CACR_CAPWREN BIT(0)
#define SDMMC_CACR_KEY (0x46 << 8)
+#define SDMMC_CALCR 0x240
+#define SDMMC_CALCR_EN BIT(0)
+#define SDMMC_CALCR_ALWYSON BIT(4)
#define SDHCI_AT91_PRESET_COMMON_CONF 0x400 /* drv type B, programmable clock mode */
@@ -35,6 +38,7 @@ struct sdhci_at91_priv {
struct clk *gck;
struct clk *mainck;
bool restore_needed;
+ bool cal_always_on;
};
static void sdhci_at91_set_force_card_detect(struct sdhci_host *host)
@@ -116,10 +120,17 @@ static void sdhci_at91_set_uhs_signaling(struct sdhci_host *host,
static void sdhci_at91_reset(struct sdhci_host *host, u8 mask)
{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_at91_priv *priv = sdhci_pltfm_priv(pltfm_host);
+
sdhci_reset(host, mask);
if (host->mmc->caps & MMC_CAP_NONREMOVABLE)
sdhci_at91_set_force_card_detect(host);
+
+ if (priv->cal_always_on && (mask & SDHCI_RESET_ALL))
+ sdhci_writel(host, SDMMC_CALCR_ALWYSON | SDMMC_CALCR_EN,
+ SDMMC_CALCR);
}
static const struct sdhci_ops sdhci_at91_sama5d2_ops = {
@@ -345,6 +356,14 @@ static int sdhci_at91_probe(struct platform_device *pdev)
priv->restore_needed = false;
+ /*
+ * if SDCAL pin is wrongly connected, we must enable
+ * the analog calibration cell permanently.
+ */
+ priv->cal_always_on =
+ device_property_read_bool(&pdev->dev,
+ "microchip,sdcal-inverted");
+
ret = mmc_of_parse(host->mmc);
if (ret)
goto clocks_disable_unprepare;
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
index 1d1953dfc54b..5cca3fa4610b 100644
--- a/drivers/mmc/host/sdhci-of-esdhc.c
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -77,8 +77,10 @@ struct sdhci_esdhc {
bool quirk_incorrect_hostver;
bool quirk_limited_clk_division;
bool quirk_unreliable_pulse_detection;
- bool quirk_fixup_tuning;
+ bool quirk_tuning_erratum_type1;
+ bool quirk_tuning_erratum_type2;
bool quirk_ignore_data_inhibit;
+ bool in_sw_tuning;
unsigned int peripheral_clock;
const struct esdhc_clk_fixup *clk_fixup;
u32 div_ratio;
@@ -408,6 +410,8 @@ static void esdhc_le_writel(struct sdhci_host *host, u32 val, int reg)
static void esdhc_be_writew(struct sdhci_host *host, u16 val, int reg)
{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
int base = reg & ~0x3;
u32 value;
u32 ret;
@@ -416,10 +420,24 @@ static void esdhc_be_writew(struct sdhci_host *host, u16 val, int reg)
ret = esdhc_writew_fixup(host, reg, val, value);
if (reg != SDHCI_TRANSFER_MODE)
iowrite32be(ret, host->ioaddr + base);
+
+ /* Starting SW tuning requires ESDHC_SMPCLKSEL to be set
+ * 1us later after ESDHC_EXTN is set.
+ */
+ if (base == ESDHC_SYSTEM_CONTROL_2) {
+ if (!(value & ESDHC_EXTN) && (ret & ESDHC_EXTN) &&
+ esdhc->in_sw_tuning) {
+ udelay(1);
+ ret |= ESDHC_SMPCLKSEL;
+ iowrite32be(ret, host->ioaddr + base);
+ }
+ }
}
static void esdhc_le_writew(struct sdhci_host *host, u16 val, int reg)
{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
int base = reg & ~0x3;
u32 value;
u32 ret;
@@ -428,6 +446,18 @@ static void esdhc_le_writew(struct sdhci_host *host, u16 val, int reg)
ret = esdhc_writew_fixup(host, reg, val, value);
if (reg != SDHCI_TRANSFER_MODE)
iowrite32(ret, host->ioaddr + base);
+
+ /* Starting SW tuning requires ESDHC_SMPCLKSEL to be set
+ * 1us later after ESDHC_EXTN is set.
+ */
+ if (base == ESDHC_SYSTEM_CONTROL_2) {
+ if (!(value & ESDHC_EXTN) && (ret & ESDHC_EXTN) &&
+ esdhc->in_sw_tuning) {
+ udelay(1);
+ ret |= ESDHC_SMPCLKSEL;
+ iowrite32(ret, host->ioaddr + base);
+ }
+ }
}
static void esdhc_be_writeb(struct sdhci_host *host, u8 val, int reg)
@@ -560,6 +590,32 @@ static void esdhc_clock_enable(struct sdhci_host *host, bool enable)
}
}
+static void esdhc_flush_async_fifo(struct sdhci_host *host)
+{
+ ktime_t timeout;
+ u32 val;
+
+ val = sdhci_readl(host, ESDHC_DMA_SYSCTL);
+ val |= ESDHC_FLUSH_ASYNC_FIFO;
+ sdhci_writel(host, val, ESDHC_DMA_SYSCTL);
+
+ /* Wait max 20 ms */
+ timeout = ktime_add_ms(ktime_get(), 20);
+ while (1) {
+ bool timedout = ktime_after(ktime_get(), timeout);
+
+ if (!(sdhci_readl(host, ESDHC_DMA_SYSCTL) &
+ ESDHC_FLUSH_ASYNC_FIFO))
+ break;
+ if (timedout) {
+ pr_err("%s: flushing asynchronous FIFO timeout.\n",
+ mmc_hostname(host->mmc));
+ break;
+ }
+ usleep_range(10, 20);
+ }
+}
+
static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
@@ -652,9 +708,7 @@ static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock)
sdhci_writel(host, temp | ESDHC_HS400_WNDW_ADJUST, ESDHC_TBCTL);
esdhc_clock_enable(host, false);
- temp = sdhci_readl(host, ESDHC_DMA_SYSCTL);
- temp |= ESDHC_FLUSH_ASYNC_FIFO;
- sdhci_writel(host, temp, ESDHC_DMA_SYSCTL);
+ esdhc_flush_async_fifo(host);
}
/* Wait max 20 ms */
@@ -796,16 +850,21 @@ static int esdhc_signal_voltage_switch(struct mmc_host *mmc,
}
}
-static struct soc_device_attribute soc_fixup_tuning[] = {
+static struct soc_device_attribute soc_tuning_erratum_type1[] = {
+ { .family = "QorIQ T1023", .revision = "1.0", },
{ .family = "QorIQ T1040", .revision = "1.0", },
{ .family = "QorIQ T2080", .revision = "1.0", },
- { .family = "QorIQ T1023", .revision = "1.0", },
{ .family = "QorIQ LS1021A", .revision = "1.0", },
- { .family = "QorIQ LS1080A", .revision = "1.0", },
- { .family = "QorIQ LS2080A", .revision = "1.0", },
+ { },
+};
+
+static struct soc_device_attribute soc_tuning_erratum_type2[] = {
{ .family = "QorIQ LS1012A", .revision = "1.0", },
{ .family = "QorIQ LS1043A", .revision = "1.*", },
{ .family = "QorIQ LS1046A", .revision = "1.0", },
+ { .family = "QorIQ LS1080A", .revision = "1.0", },
+ { .family = "QorIQ LS2080A", .revision = "1.0", },
+ { .family = "QorIQ LA1575A", .revision = "1.0", },
{ },
};
@@ -814,10 +873,7 @@ static void esdhc_tuning_block_enable(struct sdhci_host *host, bool enable)
u32 val;
esdhc_clock_enable(host, false);
-
- val = sdhci_readl(host, ESDHC_DMA_SYSCTL);
- val |= ESDHC_FLUSH_ASYNC_FIFO;
- sdhci_writel(host, val, ESDHC_DMA_SYSCTL);
+ esdhc_flush_async_fifo(host);
val = sdhci_readl(host, ESDHC_TBCTL);
if (enable)
@@ -829,15 +885,97 @@ static void esdhc_tuning_block_enable(struct sdhci_host *host, bool enable)
esdhc_clock_enable(host, true);
}
+static void esdhc_prepare_sw_tuning(struct sdhci_host *host, u8 *window_start,
+ u8 *window_end)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
+ u8 tbstat_15_8, tbstat_7_0;
+ u32 val;
+
+ if (esdhc->quirk_tuning_erratum_type1) {
+ *window_start = 5 * esdhc->div_ratio;
+ *window_end = 3 * esdhc->div_ratio;
+ return;
+ }
+
+ /* Write TBCTL[11:8]=4'h8 */
+ val = sdhci_readl(host, ESDHC_TBCTL);
+ val &= ~(0xf << 8);
+ val |= 8 << 8;
+ sdhci_writel(host, val, ESDHC_TBCTL);
+
+ mdelay(1);
+
+ /* Read TBCTL[31:0] register and rewrite again */
+ val = sdhci_readl(host, ESDHC_TBCTL);
+ sdhci_writel(host, val, ESDHC_TBCTL);
+
+ mdelay(1);
+
+ /* Read the TBSTAT[31:0] register twice */
+ val = sdhci_readl(host, ESDHC_TBSTAT);
+ val = sdhci_readl(host, ESDHC_TBSTAT);
+
+ /* Reset data lines by setting ESDHCCTL[RSTD] */
+ sdhci_reset(host, SDHCI_RESET_DATA);
+ /* Write 32'hFFFF_FFFF to IRQSTAT register */
+ sdhci_writel(host, 0xFFFFFFFF, SDHCI_INT_STATUS);
+
+ /* If TBSTAT[15:8]-TBSTAT[7:0] > 4 * div_ratio
+ * or TBSTAT[7:0]-TBSTAT[15:8] > 4 * div_ratio,
+ * then program TBPTR[TB_WNDW_END_PTR] = 4 * div_ratio
+ * and program TBPTR[TB_WNDW_START_PTR] = 8 * div_ratio.
+ */
+ tbstat_7_0 = val & 0xff;
+ tbstat_15_8 = (val >> 8) & 0xff;
+
+ if (abs(tbstat_15_8 - tbstat_7_0) > (4 * esdhc->div_ratio)) {
+ *window_start = 8 * esdhc->div_ratio;
+ *window_end = 4 * esdhc->div_ratio;
+ } else {
+ *window_start = 5 * esdhc->div_ratio;
+ *window_end = 3 * esdhc->div_ratio;
+ }
+}
+
+static int esdhc_execute_sw_tuning(struct mmc_host *mmc, u32 opcode,
+ u8 window_start, u8 window_end)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
+ u32 val;
+ int ret;
+
+ /* Program TBPTR[TB_WNDW_END_PTR] and TBPTR[TB_WNDW_START_PTR] */
+ val = ((u32)window_start << ESDHC_WNDW_STRT_PTR_SHIFT) &
+ ESDHC_WNDW_STRT_PTR_MASK;
+ val |= window_end & ESDHC_WNDW_END_PTR_MASK;
+ sdhci_writel(host, val, ESDHC_TBPTR);
+
+ /* Program the software tuning mode by setting TBCTL[TB_MODE]=2'h3 */
+ val = sdhci_readl(host, ESDHC_TBCTL);
+ val &= ~ESDHC_TB_MODE_MASK;
+ val |= ESDHC_TB_MODE_SW;
+ sdhci_writel(host, val, ESDHC_TBCTL);
+
+ esdhc->in_sw_tuning = true;
+ ret = sdhci_execute_tuning(mmc, opcode);
+ esdhc->in_sw_tuning = false;
+ return ret;
+}
+
static int esdhc_execute_tuning(struct mmc_host *mmc, u32 opcode)
{
struct sdhci_host *host = mmc_priv(mmc);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
+ u8 window_start, window_end;
+ int ret, retries = 1;
bool hs400_tuning;
unsigned int clk;
u32 val;
- int ret;
/* For tuning mode, the sd clock divisor value
* must be larger than 3 according to reference manual.
@@ -846,39 +984,73 @@ static int esdhc_execute_tuning(struct mmc_host *mmc, u32 opcode)
if (host->clock > clk)
esdhc_of_set_clock(host, clk);
- if (esdhc->quirk_limited_clk_division &&
- host->flags & SDHCI_HS400_TUNING)
- esdhc_of_set_clock(host, host->clock);
-
esdhc_tuning_block_enable(host, true);
hs400_tuning = host->flags & SDHCI_HS400_TUNING;
- ret = sdhci_execute_tuning(mmc, opcode);
- if (hs400_tuning) {
- val = sdhci_readl(host, ESDHC_SDTIMNGCTL);
- val |= ESDHC_FLW_CTL_BG;
- sdhci_writel(host, val, ESDHC_SDTIMNGCTL);
- }
+ do {
+ if (esdhc->quirk_limited_clk_division &&
+ hs400_tuning)
+ esdhc_of_set_clock(host, host->clock);
- if (host->tuning_err == -EAGAIN && esdhc->quirk_fixup_tuning) {
+ /* Do HW tuning */
+ val = sdhci_readl(host, ESDHC_TBCTL);
+ val &= ~ESDHC_TB_MODE_MASK;
+ val |= ESDHC_TB_MODE_3;
+ sdhci_writel(host, val, ESDHC_TBCTL);
- /* program TBPTR[TB_WNDW_END_PTR] = 3*DIV_RATIO and
- * program TBPTR[TB_WNDW_START_PTR] = 5*DIV_RATIO
- */
- val = sdhci_readl(host, ESDHC_TBPTR);
- val = (val & ~((0x7f << 8) | 0x7f)) |
- (3 * esdhc->div_ratio) | ((5 * esdhc->div_ratio) << 8);
- sdhci_writel(host, val, ESDHC_TBPTR);
+ ret = sdhci_execute_tuning(mmc, opcode);
+ if (ret)
+ break;
- /* program the software tuning mode by setting
- * TBCTL[TB_MODE]=2'h3
+ /* If HW tuning fails and triggers erratum,
+ * try workaround.
*/
- val = sdhci_readl(host, ESDHC_TBCTL);
- val |= 0x3;
- sdhci_writel(host, val, ESDHC_TBCTL);
- sdhci_execute_tuning(mmc, opcode);
+ ret = host->tuning_err;
+ if (ret == -EAGAIN &&
+ (esdhc->quirk_tuning_erratum_type1 ||
+ esdhc->quirk_tuning_erratum_type2)) {
+ /* Recover HS400 tuning flag */
+ if (hs400_tuning)
+ host->flags |= SDHCI_HS400_TUNING;
+ pr_info("%s: Hold on to use fixed sampling clock. Try SW tuning!\n",
+ mmc_hostname(mmc));
+ /* Do SW tuning */
+ esdhc_prepare_sw_tuning(host, &window_start,
+ &window_end);
+ ret = esdhc_execute_sw_tuning(mmc, opcode,
+ window_start,
+ window_end);
+ if (ret)
+ break;
+
+ /* Retry both HW/SW tuning with reduced clock. */
+ ret = host->tuning_err;
+ if (ret == -EAGAIN && retries) {
+ /* Recover HS400 tuning flag */
+ if (hs400_tuning)
+ host->flags |= SDHCI_HS400_TUNING;
+
+ clk = host->max_clk / (esdhc->div_ratio + 1);
+ esdhc_of_set_clock(host, clk);
+ pr_info("%s: Hold on to use fixed sampling clock. Try tuning with reduced clock!\n",
+ mmc_hostname(mmc));
+ } else {
+ break;
+ }
+ } else {
+ break;
+ }
+ } while (retries--);
+
+ if (ret) {
+ esdhc_tuning_block_enable(host, false);
+ } else if (hs400_tuning) {
+ val = sdhci_readl(host, ESDHC_SDTIMNGCTL);
+ val |= ESDHC_FLW_CTL_BG;
+ sdhci_writel(host, val, ESDHC_SDTIMNGCTL);
}
+
return ret;
}
@@ -1114,10 +1286,15 @@ static int sdhci_esdhc_probe(struct platform_device *pdev)
pltfm_host = sdhci_priv(host);
esdhc = sdhci_pltfm_priv(pltfm_host);
- if (soc_device_match(soc_fixup_tuning))
- esdhc->quirk_fixup_tuning = true;
+ if (soc_device_match(soc_tuning_erratum_type1))
+ esdhc->quirk_tuning_erratum_type1 = true;
+ else
+ esdhc->quirk_tuning_erratum_type1 = false;
+
+ if (soc_device_match(soc_tuning_erratum_type2))
+ esdhc->quirk_tuning_erratum_type2 = true;
else
- esdhc->quirk_fixup_tuning = false;
+ esdhc->quirk_tuning_erratum_type2 = false;
if (esdhc->vendor_ver == VENDOR_V_22)
host->quirks2 |= SDHCI_QUIRK2_HOST_NO_CMD23;
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index eaffa85bc728..acefb76b4e15 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -21,6 +21,7 @@
#include <linux/mmc/mmc.h>
#include <linux/scatterlist.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/gpio.h>
#include <linux/pm_runtime.h>
#include <linux/mmc/slot-gpio.h>
@@ -1590,11 +1591,59 @@ static int amd_probe(struct sdhci_pci_chip *chip)
return 0;
}
+static u32 sdhci_read_present_state(struct sdhci_host *host)
+{
+ return sdhci_readl(host, SDHCI_PRESENT_STATE);
+}
+
+static void amd_sdhci_reset(struct sdhci_host *host, u8 mask)
+{
+ struct sdhci_pci_slot *slot = sdhci_priv(host);
+ struct pci_dev *pdev = slot->chip->pdev;
+ u32 present_state;
+
+ /*
+ * SDHC 0x7906 requires a hard reset to clear all internal state.
+ * Otherwise it can get into a bad state where the DATA lines are always
+ * read as zeros.
+ */
+ if (pdev->device == 0x7906 && (mask & SDHCI_RESET_ALL)) {
+ pci_clear_master(pdev);
+
+ pci_save_state(pdev);
+
+ pci_set_power_state(pdev, PCI_D3cold);
+ pr_debug("%s: power_state=%u\n", mmc_hostname(host->mmc),
+ pdev->current_state);
+ pci_set_power_state(pdev, PCI_D0);
+
+ pci_restore_state(pdev);
+
+ /*
+ * SDHCI_RESET_ALL says the card detect logic should not be
+ * reset, but since we need to reset the entire controller
+ * we should wait until the card detect logic has stabilized.
+ *
+ * This normally takes about 40ms.
+ */
+ readx_poll_timeout(
+ sdhci_read_present_state,
+ host,
+ present_state,
+ present_state & SDHCI_CD_STABLE,
+ 10000,
+ 100000
+ );
+ }
+
+ return sdhci_reset(host, mask);
+}
+
static const struct sdhci_ops amd_sdhci_pci_ops = {
.set_clock = sdhci_set_clock,
.enable_dma = sdhci_pci_enable_dma,
.set_bus_width = sdhci_set_bus_width,
- .reset = sdhci_reset,
+ .reset = amd_sdhci_reset,
.set_uhs_signaling = sdhci_set_uhs_signaling,
};
@@ -1673,6 +1722,8 @@ static const struct pci_device_id pci_ids[] = {
SDHCI_PCI_DEVICE(INTEL, CML_EMMC, intel_glk_emmc),
SDHCI_PCI_DEVICE(INTEL, CML_SD, intel_byt_sd),
SDHCI_PCI_DEVICE(INTEL, CMLH_SD, intel_byt_sd),
+ SDHCI_PCI_DEVICE(INTEL, JSL_EMMC, intel_glk_emmc),
+ SDHCI_PCI_DEVICE(INTEL, JSL_SD, intel_byt_sd),
SDHCI_PCI_DEVICE(O2, 8120, o2),
SDHCI_PCI_DEVICE(O2, 8220, o2),
SDHCI_PCI_DEVICE(O2, 8221, o2),
diff --git a/drivers/mmc/host/sdhci-pci.h b/drivers/mmc/host/sdhci-pci.h
index 558202fe64c6..981bbbe63aff 100644
--- a/drivers/mmc/host/sdhci-pci.h
+++ b/drivers/mmc/host/sdhci-pci.h
@@ -55,6 +55,8 @@
#define PCI_DEVICE_ID_INTEL_CML_EMMC 0x02c4
#define PCI_DEVICE_ID_INTEL_CML_SD 0x02f5
#define PCI_DEVICE_ID_INTEL_CMLH_SD 0x06f5
+#define PCI_DEVICE_ID_INTEL_JSL_EMMC 0x4dc4
+#define PCI_DEVICE_ID_INTEL_JSL_SD 0x4df8
#define PCI_DEVICE_ID_SYSKONNECT_8000 0x8000
#define PCI_DEVICE_ID_VIA_95D0 0x95d0
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index b056400e34b1..3140fe2e5dba 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -337,8 +337,19 @@ static void sdhci_init(struct sdhci_host *host, int soft)
static void sdhci_reinit(struct sdhci_host *host)
{
+ u32 cd = host->ier & (SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT);
+
sdhci_init(host, 0);
sdhci_enable_card_detection(host);
+
+ /*
+ * A change to the card detect bits indicates a change in present state,
+ * refer sdhci_set_card_detection(). A card detect interrupt might have
+ * been missed while the host controller was being reset, so trigger a
+ * rescan to check.
+ */
+ if (cd != (host->ier & (SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT)))
+ mmc_detect_change(host->mmc, msecs_to_jiffies(200));
}
static void __sdhci_led_activate(struct sdhci_host *host)
@@ -2202,7 +2213,7 @@ int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
if (!(ctrl & SDHCI_CTRL_VDD_180))
return 0;
- pr_warn("%s: 3.3V regulator output did not became stable\n",
+ pr_warn("%s: 3.3V regulator output did not become stable\n",
mmc_hostname(mmc));
return -EAGAIN;
@@ -2234,7 +2245,7 @@ int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
if (ctrl & SDHCI_CTRL_VDD_180)
return 0;
- pr_warn("%s: 1.8V regulator output did not became stable\n",
+ pr_warn("%s: 1.8V regulator output did not become stable\n",
mmc_hostname(mmc));
return -EAGAIN;
diff --git a/drivers/mmc/host/sdhci_am654.c b/drivers/mmc/host/sdhci_am654.c
index bb90757ecace..b8e897e31e2e 100644
--- a/drivers/mmc/host/sdhci_am654.c
+++ b/drivers/mmc/host/sdhci_am654.c
@@ -12,6 +12,7 @@
#include <linux/property.h>
#include <linux/regmap.h>
+#include "cqhci.h"
#include "sdhci-pltfm.h"
/* CTL_CFG Registers */
@@ -68,6 +69,9 @@
#define CLOCK_TOO_SLOW_HZ 400000
+/* Command Queue Host Controller Interface Base address */
+#define SDHCI_AM654_CQE_BASE_ADDR 0x200
+
static struct regmap_config sdhci_am654_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
@@ -259,6 +263,19 @@ static const struct sdhci_am654_driver_data sdhci_am654_drvdata = {
.flags = IOMUX_PRESENT | FREQSEL_2_BIT | STRBSEL_4_BIT | DLL_PRESENT,
};
+static u32 sdhci_am654_cqhci_irq(struct sdhci_host *host, u32 intmask)
+{
+ int cmd_error = 0;
+ int data_error = 0;
+
+ if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error))
+ return intmask;
+
+ cqhci_irq(host->mmc, intmask, cmd_error, data_error);
+
+ return 0;
+}
+
static struct sdhci_ops sdhci_j721e_8bit_ops = {
.get_max_clock = sdhci_pltfm_clk_get_max_clock,
.get_timeout_clock = sdhci_pltfm_clk_get_max_clock,
@@ -267,6 +284,7 @@ static struct sdhci_ops sdhci_j721e_8bit_ops = {
.set_power = sdhci_am654_set_power,
.set_clock = sdhci_am654_set_clock,
.write_b = sdhci_am654_write_b,
+ .irq = sdhci_am654_cqhci_irq,
.reset = sdhci_reset,
};
@@ -290,6 +308,7 @@ static struct sdhci_ops sdhci_j721e_4bit_ops = {
.set_power = sdhci_am654_set_power,
.set_clock = sdhci_j721e_4bit_set_clock,
.write_b = sdhci_am654_write_b,
+ .irq = sdhci_am654_cqhci_irq,
.reset = sdhci_reset,
};
@@ -304,6 +323,40 @@ static const struct sdhci_am654_driver_data sdhci_j721e_4bit_drvdata = {
.pdata = &sdhci_j721e_4bit_pdata,
.flags = IOMUX_PRESENT,
};
+
+static void sdhci_am654_dumpregs(struct mmc_host *mmc)
+{
+ sdhci_dumpregs(mmc_priv(mmc));
+}
+
+static const struct cqhci_host_ops sdhci_am654_cqhci_ops = {
+ .enable = sdhci_cqe_enable,
+ .disable = sdhci_cqe_disable,
+ .dumpregs = sdhci_am654_dumpregs,
+};
+
+static int sdhci_am654_cqe_add_host(struct sdhci_host *host)
+{
+ struct cqhci_host *cq_host;
+ int ret;
+
+ cq_host = devm_kzalloc(host->mmc->parent, sizeof(struct cqhci_host),
+ GFP_KERNEL);
+ if (!cq_host)
+ return -ENOMEM;
+
+ cq_host->mmio = host->ioaddr + SDHCI_AM654_CQE_BASE_ADDR;
+ cq_host->quirks |= CQHCI_QUIRK_SHORT_TXFR_DESC_SZ;
+ cq_host->caps |= CQHCI_TASK_DESC_SZ_128;
+ cq_host->ops = &sdhci_am654_cqhci_ops;
+
+ host->mmc->caps2 |= MMC_CAP2_CQE;
+
+ ret = cqhci_init(cq_host, host->mmc, 1);
+
+ return ret;
+}
+
static int sdhci_am654_init(struct sdhci_host *host)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
@@ -344,7 +397,23 @@ static int sdhci_am654_init(struct sdhci_host *host)
regmap_update_bits(sdhci_am654->base, CTL_CFG_2, SLOTTYPE_MASK,
ctl_cfg_2);
- return sdhci_add_host(host);
+ ret = sdhci_setup_host(host);
+ if (ret)
+ return ret;
+
+ ret = sdhci_am654_cqe_add_host(host);
+ if (ret)
+ goto err_cleanup_host;
+
+ ret = __sdhci_add_host(host);
+ if (ret)
+ goto err_cleanup_host;
+
+ return 0;
+
+err_cleanup_host:
+ sdhci_cleanup_host(host);
+ return ret;
}
static int sdhci_am654_get_of_property(struct platform_device *pdev,
diff --git a/drivers/mmc/host/sdhci_f_sdh30.c b/drivers/mmc/host/sdhci_f_sdh30.c
index f8b939e63e02..fa0dfc657c22 100644
--- a/drivers/mmc/host/sdhci_f_sdh30.c
+++ b/drivers/mmc/host/sdhci_f_sdh30.c
@@ -16,31 +16,7 @@
#include <linux/clk.h>
#include "sdhci-pltfm.h"
-
-/* F_SDH30 extended Controller registers */
-#define F_SDH30_AHB_CONFIG 0x100
-#define F_SDH30_AHB_BIGED 0x00000040
-#define F_SDH30_BUSLOCK_DMA 0x00000020
-#define F_SDH30_BUSLOCK_EN 0x00000010
-#define F_SDH30_SIN 0x00000008
-#define F_SDH30_AHB_INCR_16 0x00000004
-#define F_SDH30_AHB_INCR_8 0x00000002
-#define F_SDH30_AHB_INCR_4 0x00000001
-
-#define F_SDH30_TUNING_SETTING 0x108
-#define F_SDH30_CMD_CHK_DIS 0x00010000
-
-#define F_SDH30_IO_CONTROL2 0x114
-#define F_SDH30_CRES_O_DN 0x00080000
-#define F_SDH30_MSEL_O_1_8 0x00040000
-
-#define F_SDH30_ESD_CONTROL 0x124
-#define F_SDH30_EMMC_RST 0x00000002
-#define F_SDH30_EMMC_HS200 0x01000000
-
-#define F_SDH30_CMD_DAT_DELAY 0x200
-
-#define F_SDH30_MIN_CLOCK 400000
+#include "sdhci_f_sdh30.h"
struct f_sdhost_priv {
struct clk *clk_iface;
diff --git a/drivers/mmc/host/sdhci_f_sdh30.h b/drivers/mmc/host/sdhci_f_sdh30.h
new file mode 100644
index 000000000000..fc1ad28f7ca9
--- /dev/null
+++ b/drivers/mmc/host/sdhci_f_sdh30.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2013 - 2015 Fujitsu Semiconductor, Ltd
+ * Vincent Yang <vincent.yang@tw.fujitsu.com>
+ * Copyright (C) 2015 Linaro Ltd Andy Green <andy.green@linaro.org>
+ * Copyright (C) 2019 Socionext Inc.
+ *
+ */
+
+/* F_SDH30 extended Controller registers */
+#define F_SDH30_AHB_CONFIG 0x100
+#define F_SDH30_AHB_BIGED BIT(6)
+#define F_SDH30_BUSLOCK_DMA BIT(5)
+#define F_SDH30_BUSLOCK_EN BIT(4)
+#define F_SDH30_SIN BIT(3)
+#define F_SDH30_AHB_INCR_16 BIT(2)
+#define F_SDH30_AHB_INCR_8 BIT(1)
+#define F_SDH30_AHB_INCR_4 BIT(0)
+
+#define F_SDH30_TUNING_SETTING 0x108
+#define F_SDH30_CMD_CHK_DIS BIT(16)
+
+#define F_SDH30_IO_CONTROL2 0x114
+#define F_SDH30_CRES_O_DN BIT(19)
+#define F_SDH30_MSEL_O_1_8 BIT(18)
+
+#define F_SDH30_ESD_CONTROL 0x124
+#define F_SDH30_EMMC_RST BIT(1)
+#define F_SDH30_CMD_DAT_DELAY BIT(9)
+#define F_SDH30_EMMC_HS200 BIT(24)
+
+#define F_SDH30_MIN_CLOCK 400000
diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h
index 2f0b092d6dcc..c5ba13fae399 100644
--- a/drivers/mmc/host/tmio_mmc.h
+++ b/drivers/mmc/host/tmio_mmc.h
@@ -163,7 +163,6 @@ struct tmio_mmc_host {
unsigned long last_req_ts;
struct mutex ios_lock; /* protect set_ios() context */
bool native_hotplug;
- bool runtime_synced;
bool sdio_irq_enabled;
/* Mandatory callback */
diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c
index 9b6e1001e77c..c4a1d49fbea4 100644
--- a/drivers/mmc/host/tmio_mmc_core.c
+++ b/drivers/mmc/host/tmio_mmc_core.c
@@ -39,6 +39,7 @@
#include <linux/module.h>
#include <linux/pagemap.h>
#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
#include <linux/pm_qos.h>
#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
@@ -1184,7 +1185,7 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host)
if (ret == -EPROBE_DEFER)
return ret;
- mmc->caps |= MMC_CAP_4_BIT_DATA | pdata->capabilities;
+ mmc->caps |= MMC_CAP_ERASE | MMC_CAP_4_BIT_DATA | pdata->capabilities;
mmc->caps2 |= pdata->capabilities2;
mmc->max_segs = pdata->max_segs ? : 32;
mmc->max_blk_size = TMIO_MAX_BLK_SIZE;
@@ -1248,10 +1249,12 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host)
/* See if we also get DMA */
tmio_mmc_request_dma(_host, pdata);
+ dev_pm_domain_start(&pdev->dev);
+ pm_runtime_get_noresume(&pdev->dev);
+ pm_runtime_set_active(&pdev->dev);
pm_runtime_set_autosuspend_delay(&pdev->dev, 50);
pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_enable(&pdev->dev);
- pm_runtime_get_sync(&pdev->dev);
ret = mmc_add_host(mmc);
if (ret)
@@ -1333,11 +1336,6 @@ int tmio_mmc_host_runtime_resume(struct device *dev)
{
struct tmio_mmc_host *host = dev_get_drvdata(dev);
- if (!host->runtime_synced) {
- host->runtime_synced = true;
- return 0;
- }
-
tmio_mmc_clk_enable(host);
tmio_mmc_hw_reset(host->mmc);
diff --git a/drivers/mmc/host/vub300.c b/drivers/mmc/host/vub300.c
index a3680c900689..6ced1b7f642f 100644
--- a/drivers/mmc/host/vub300.c
+++ b/drivers/mmc/host/vub300.c
@@ -2070,18 +2070,11 @@ static void vub300_enable_sdio_irq(struct mmc_host *mmc, int enable)
kref_put(&vub300->kref, vub300_delete);
}
-static void vub300_init_card(struct mmc_host *mmc, struct mmc_card *card)
-{ /* NOT irq */
- struct vub300_mmc_host *vub300 = mmc_priv(mmc);
- dev_info(&vub300->udev->dev, "NO host QUIRKS for this card\n");
-}
-
static const struct mmc_host_ops vub300_mmc_ops = {
.request = vub300_mmc_request,
.set_ios = vub300_mmc_set_ios,
.get_ro = vub300_mmc_get_ro,
.enable_sdio_irq = vub300_enable_sdio_irq,
- .init_card = vub300_init_card,
};
static int vub300_probe(struct usb_interface *interface,
diff --git a/drivers/mtd/nand/onenand/Makefile b/drivers/mtd/nand/onenand/Makefile
index f8b624aca9cc..a27b635eb23a 100644
--- a/drivers/mtd/nand/onenand/Makefile
+++ b/drivers/mtd/nand/onenand/Makefile
@@ -9,6 +9,6 @@ obj-$(CONFIG_MTD_ONENAND) += onenand.o
# Board specific.
obj-$(CONFIG_MTD_ONENAND_GENERIC) += generic.o
obj-$(CONFIG_MTD_ONENAND_OMAP2) += omap2.o
-obj-$(CONFIG_MTD_ONENAND_SAMSUNG) += samsung.o
+obj-$(CONFIG_MTD_ONENAND_SAMSUNG) += samsung_mtd.o
onenand-objs = onenand_base.o onenand_bbt.o
diff --git a/drivers/mtd/nand/onenand/samsung.c b/drivers/mtd/nand/onenand/samsung_mtd.c
index 55e5536a5850..55e5536a5850 100644
--- a/drivers/mtd/nand/onenand/samsung.c
+++ b/drivers/mtd/nand/onenand/samsung_mtd.c
diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c
index 1b77fff9f892..cc9a28cf9d82 100644
--- a/drivers/mtd/ubi/cdev.c
+++ b/drivers/mtd/ubi/cdev.c
@@ -1078,36 +1078,6 @@ static long ctrl_cdev_ioctl(struct file *file, unsigned int cmd,
return err;
}
-#ifdef CONFIG_COMPAT
-static long vol_cdev_compat_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- unsigned long translated_arg = (unsigned long)compat_ptr(arg);
-
- return vol_cdev_ioctl(file, cmd, translated_arg);
-}
-
-static long ubi_cdev_compat_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- unsigned long translated_arg = (unsigned long)compat_ptr(arg);
-
- return ubi_cdev_ioctl(file, cmd, translated_arg);
-}
-
-static long ctrl_cdev_compat_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- unsigned long translated_arg = (unsigned long)compat_ptr(arg);
-
- return ctrl_cdev_ioctl(file, cmd, translated_arg);
-}
-#else
-#define vol_cdev_compat_ioctl NULL
-#define ubi_cdev_compat_ioctl NULL
-#define ctrl_cdev_compat_ioctl NULL
-#endif
-
/* UBI volume character device operations */
const struct file_operations ubi_vol_cdev_operations = {
.owner = THIS_MODULE,
@@ -1118,7 +1088,7 @@ const struct file_operations ubi_vol_cdev_operations = {
.write = vol_cdev_write,
.fsync = vol_cdev_fsync,
.unlocked_ioctl = vol_cdev_ioctl,
- .compat_ioctl = vol_cdev_compat_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
};
/* UBI character device operations */
@@ -1126,13 +1096,13 @@ const struct file_operations ubi_cdev_operations = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.unlocked_ioctl = ubi_cdev_ioctl,
- .compat_ioctl = ubi_cdev_compat_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
};
/* UBI control character device operations */
const struct file_operations ubi_ctrl_cdev_operations = {
.owner = THIS_MODULE,
.unlocked_ioctl = ctrl_cdev_ioctl,
- .compat_ioctl = ctrl_cdev_compat_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.llseek = no_llseek,
};
diff --git a/drivers/mtd/ubi/debug.c b/drivers/mtd/ubi/debug.c
index 0f847d510950..54646c2c2744 100644
--- a/drivers/mtd/ubi/debug.c
+++ b/drivers/mtd/ubi/debug.c
@@ -107,6 +107,7 @@ void ubi_dump_vol_info(const struct ubi_volume *vol)
pr_err("\tlast_eb_bytes %d\n", vol->last_eb_bytes);
pr_err("\tcorrupted %d\n", vol->corrupted);
pr_err("\tupd_marker %d\n", vol->upd_marker);
+ pr_err("\tskip_check %d\n", vol->skip_check);
if (vol->name_len <= UBI_VOL_NAME_MAX &&
strnlen(vol->name, vol->name_len + 1) == vol->name_len) {
diff --git a/drivers/mtd/ubi/fastmap-wl.c b/drivers/mtd/ubi/fastmap-wl.c
index c44c8470247e..426820ab9afe 100644
--- a/drivers/mtd/ubi/fastmap-wl.c
+++ b/drivers/mtd/ubi/fastmap-wl.c
@@ -57,18 +57,6 @@ static void return_unused_pool_pebs(struct ubi_device *ubi,
}
}
-static int anchor_pebs_available(struct rb_root *root)
-{
- struct rb_node *p;
- struct ubi_wl_entry *e;
-
- ubi_rb_for_each_entry(p, e, root, u.rb)
- if (e->pnum < UBI_FM_MAX_START)
- return 1;
-
- return 0;
-}
-
/**
* ubi_wl_get_fm_peb - find a physical erase block with a given maximal number.
* @ubi: UBI device description object
@@ -277,8 +265,26 @@ static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
int ubi_ensure_anchor_pebs(struct ubi_device *ubi)
{
struct ubi_work *wrk;
+ struct ubi_wl_entry *anchor;
spin_lock(&ubi->wl_lock);
+
+ /* Do we already have an anchor? */
+ if (ubi->fm_anchor) {
+ spin_unlock(&ubi->wl_lock);
+ return 0;
+ }
+
+ /* See if we can find an anchor PEB on the list of free PEBs */
+ anchor = ubi_wl_get_fm_peb(ubi, 1);
+ if (anchor) {
+ ubi->fm_anchor = anchor;
+ spin_unlock(&ubi->wl_lock);
+ return 0;
+ }
+
+ /* No luck, trigger wear leveling to produce a new anchor PEB */
+ ubi->fm_do_produce_anchor = 1;
if (ubi->wl_scheduled) {
spin_unlock(&ubi->wl_lock);
return 0;
@@ -294,7 +300,6 @@ int ubi_ensure_anchor_pebs(struct ubi_device *ubi)
return -ENOMEM;
}
- wrk->anchor = 1;
wrk->func = &wear_leveling_worker;
__schedule_ubi_work(ubi, wrk);
return 0;
diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c
index 30621c67721a..1c7be4eb3ba6 100644
--- a/drivers/mtd/ubi/fastmap.c
+++ b/drivers/mtd/ubi/fastmap.c
@@ -1540,14 +1540,6 @@ int ubi_update_fastmap(struct ubi_device *ubi)
return 0;
}
- ret = ubi_ensure_anchor_pebs(ubi);
- if (ret) {
- up_write(&ubi->fm_eba_sem);
- up_write(&ubi->work_sem);
- up_write(&ubi->fm_protect);
- return ret;
- }
-
new_fm = kzalloc(sizeof(*new_fm), GFP_KERNEL);
if (!new_fm) {
up_write(&ubi->fm_eba_sem);
@@ -1618,7 +1610,8 @@ int ubi_update_fastmap(struct ubi_device *ubi)
}
spin_lock(&ubi->wl_lock);
- tmp_e = ubi_wl_get_fm_peb(ubi, 1);
+ tmp_e = ubi->fm_anchor;
+ ubi->fm_anchor = NULL;
spin_unlock(&ubi->wl_lock);
if (old_fm) {
@@ -1670,6 +1663,9 @@ out_unlock:
up_write(&ubi->work_sem);
up_write(&ubi->fm_protect);
kfree(old_fm);
+
+ ubi_ensure_anchor_pebs(ubi);
+
return ret;
err:
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index 721b6aa7936c..9688b411c930 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -491,6 +491,8 @@ struct ubi_debug_info {
* @fm_work: fastmap work queue
* @fm_work_scheduled: non-zero if fastmap work was scheduled
* @fast_attach: non-zero if UBI was attached by fastmap
+ * @fm_anchor: The next anchor PEB to use for fastmap
+ * @fm_do_produce_anchor: If true produce an anchor PEB in wl
*
* @used: RB-tree of used physical eraseblocks
* @erroneous: RB-tree of erroneous used physical eraseblocks
@@ -599,6 +601,8 @@ struct ubi_device {
struct work_struct fm_work;
int fm_work_scheduled;
int fast_attach;
+ struct ubi_wl_entry *fm_anchor;
+ int fm_do_produce_anchor;
/* Wear-leveling sub-system's stuff */
struct rb_root used;
@@ -789,7 +793,6 @@ struct ubi_attach_info {
* @vol_id: the volume ID on which this erasure is being performed
* @lnum: the logical eraseblock number
* @torture: if the physical eraseblock has to be tortured
- * @anchor: produce a anchor PEB to by used by fastmap
*
* The @func pointer points to the worker function. If the @shutdown argument is
* not zero, the worker has to free the resources and exit immediately as the
@@ -805,7 +808,6 @@ struct ubi_work {
int vol_id;
int lnum;
int torture;
- int anchor;
};
#include "debug.h"
@@ -968,7 +970,7 @@ int ubi_fastmap_init_checkmap(struct ubi_volume *vol, int leb_count);
void ubi_fastmap_destroy_checkmap(struct ubi_volume *vol);
#else
static inline int ubi_update_fastmap(struct ubi_device *ubi) { return 0; }
-int static inline ubi_fastmap_init_checkmap(struct ubi_volume *vol, int leb_count) { return 0; }
+static inline int ubi_fastmap_init_checkmap(struct ubi_volume *vol, int leb_count) { return 0; }
static inline void ubi_fastmap_destroy_checkmap(struct ubi_volume *vol) {}
#endif
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 3fcdefe2714d..5d77a38dba54 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -339,13 +339,6 @@ static struct ubi_wl_entry *find_wl_entry(struct ubi_device *ubi,
}
}
- /* If no fastmap has been written and this WL entry can be used
- * as anchor PEB, hold it back and return the second best WL entry
- * such that fastmap can use the anchor PEB later. */
- if (prev_e && !ubi->fm_disabled &&
- !ubi->fm && e->pnum < UBI_FM_MAX_START)
- return prev_e;
-
return e;
}
@@ -656,9 +649,6 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
{
int err, scrubbing = 0, torture = 0, protect = 0, erroneous = 0;
int erase = 0, keep = 0, vol_id = -1, lnum = -1;
-#ifdef CONFIG_MTD_UBI_FASTMAP
- int anchor = wrk->anchor;
-#endif
struct ubi_wl_entry *e1, *e2;
struct ubi_vid_io_buf *vidb;
struct ubi_vid_hdr *vid_hdr;
@@ -698,11 +688,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
}
#ifdef CONFIG_MTD_UBI_FASTMAP
- /* Check whether we need to produce an anchor PEB */
- if (!anchor)
- anchor = !anchor_pebs_available(&ubi->free);
-
- if (anchor) {
+ if (ubi->fm_do_produce_anchor) {
e1 = find_anchor_wl_entry(&ubi->used);
if (!e1)
goto out_cancel;
@@ -719,6 +705,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
self_check_in_wl_tree(ubi, e1, &ubi->used);
rb_erase(&e1->u.rb, &ubi->used);
dbg_wl("anchor-move PEB %d to PEB %d", e1->pnum, e2->pnum);
+ ubi->fm_do_produce_anchor = 0;
} else if (!ubi->scrub.rb_node) {
#else
if (!ubi->scrub.rb_node) {
@@ -1051,7 +1038,6 @@ static int ensure_wear_leveling(struct ubi_device *ubi, int nested)
goto out_cancel;
}
- wrk->anchor = 0;
wrk->func = &wear_leveling_worker;
if (nested)
__schedule_ubi_work(ubi, wrk);
@@ -1093,8 +1079,15 @@ static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk)
err = sync_erase(ubi, e, wl_wrk->torture);
if (!err) {
spin_lock(&ubi->wl_lock);
- wl_tree_add(e, &ubi->free);
- ubi->free_count++;
+
+ if (!ubi->fm_anchor && e->pnum < UBI_FM_MAX_START) {
+ ubi->fm_anchor = e;
+ ubi->fm_do_produce_anchor = 0;
+ } else {
+ wl_tree_add(e, &ubi->free);
+ ubi->free_count++;
+ }
+
spin_unlock(&ubi->wl_lock);
/*
@@ -1882,6 +1875,9 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
if (err)
goto out_free;
+#ifdef CONFIG_MTD_UBI_FASTMAP
+ ubi_ensure_anchor_pebs(ubi);
+#endif
return 0;
out_free:
diff --git a/drivers/mtd/ubi/wl.h b/drivers/mtd/ubi/wl.h
index a9e2d669acd8..c93a53293786 100644
--- a/drivers/mtd/ubi/wl.h
+++ b/drivers/mtd/ubi/wl.h
@@ -2,7 +2,6 @@
#ifndef UBI_WL_H
#define UBI_WL_H
#ifdef CONFIG_MTD_UBI_FASTMAP
-static int anchor_pebs_available(struct rb_root *root);
static void update_fastmap_work_fn(struct work_struct *wrk);
static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root);
static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi);
diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c
index 40b079162804..bd40b114d6cd 100644
--- a/drivers/net/caif/caif_serial.c
+++ b/drivers/net/caif/caif_serial.c
@@ -102,8 +102,8 @@ static inline void debugfs_init(struct ser_device *ser, struct tty_struct *tty)
debugfs_create_blob("last_rx_msg", 0400, ser->debugfs_tty_dir,
&ser->rx_blob);
- debugfs_create_x32("ser_state", 0400, ser->debugfs_tty_dir,
- (u32 *)&ser->state);
+ debugfs_create_xul("ser_state", 0400, ser->debugfs_tty_dir,
+ &ser->state);
debugfs_create_x8("tty_status", 0400, ser->debugfs_tty_dir,
&ser->tty_status);
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index d5ae2e1e0b0e..9c767ee252ac 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -4422,6 +4422,7 @@ static int macb_remove(struct platform_device *pdev)
mdiobus_free(bp->mii_bus);
unregister_netdev(dev);
+ tasklet_kill(&bp->hresp_err_tasklet);
pm_runtime_disable(&pdev->dev);
pm_runtime_dont_use_autosuspend(&pdev->dev);
if (!pm_runtime_suspended(&pdev->dev)) {
diff --git a/drivers/net/ethernet/emulex/benet/Kconfig b/drivers/net/ethernet/emulex/benet/Kconfig
index 17d300ea9955..f51dca1526c6 100644
--- a/drivers/net/ethernet/emulex/benet/Kconfig
+++ b/drivers/net/ethernet/emulex/benet/Kconfig
@@ -49,4 +49,4 @@ config BE2NET_SKYHAWK
comment "WARNING: be2net is useless without any enabled chip"
depends on BE2NET_BE2=n && BE2NET_BE3=n && BE2NET_LANCER=n && \
- BE2NET_SKYHAWK=n && BE2NET
+ BE2NET_SKYHAWK=n && BE2NET
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index a6f2063f1475..8ed85037f021 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -1858,7 +1858,7 @@ static int ftgmac100_probe(struct platform_device *pdev)
}
/* Indicate that we support PAUSE frames (see comment in
- * Documentation/networking/phy.txt)
+ * Documentation/networking/phy.rst)
*/
phy_support_asym_pause(phy);
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index aca95f64bde8..9b7a8db9860f 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -544,7 +544,7 @@ static int gve_alloc_queue_page_list(struct gve_priv *priv, u32 id,
}
qpl->id = id;
- qpl->num_entries = pages;
+ qpl->num_entries = 0;
qpl->pages = kvzalloc(pages * sizeof(*qpl->pages), GFP_KERNEL);
/* caller handles clean up */
if (!qpl->pages)
@@ -562,6 +562,7 @@ static int gve_alloc_queue_page_list(struct gve_priv *priv, u32 id,
/* caller handles clean up */
if (err)
return -ENOMEM;
+ qpl->num_entries++;
}
priv->num_registered_pages += pages;
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 0686ded7ad3a..c90080781924 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -159,6 +159,40 @@ static long h_reg_sub_crq(unsigned long unit_address, unsigned long token,
return rc;
}
+/**
+ * ibmvnic_wait_for_completion - Check device state and wait for completion
+ * @adapter: private device data
+ * @comp_done: completion structure to wait for
+ * @timeout: time to wait in milliseconds
+ *
+ * Wait for a completion signal or until the timeout limit is reached
+ * while checking that the device is still active.
+ */
+static int ibmvnic_wait_for_completion(struct ibmvnic_adapter *adapter,
+ struct completion *comp_done,
+ unsigned long timeout)
+{
+ struct net_device *netdev;
+ unsigned long div_timeout;
+ u8 retry;
+
+ netdev = adapter->netdev;
+ retry = 5;
+ div_timeout = msecs_to_jiffies(timeout / retry);
+ while (true) {
+ if (!adapter->crq.active) {
+ netdev_err(netdev, "Device down!\n");
+ return -ENODEV;
+ }
+ if (retry--)
+ break;
+ if (wait_for_completion_timeout(comp_done, div_timeout))
+ return 0;
+ }
+ netdev_err(netdev, "Operation timed out.\n");
+ return -ETIMEDOUT;
+}
+
static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
struct ibmvnic_long_term_buff *ltb, int size)
{
@@ -176,21 +210,35 @@ static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
ltb->map_id = adapter->map_id;
adapter->map_id++;
- init_completion(&adapter->fw_done);
+ mutex_lock(&adapter->fw_lock);
+ adapter->fw_done_rc = 0;
+ reinit_completion(&adapter->fw_done);
rc = send_request_map(adapter, ltb->addr,
ltb->size, ltb->map_id);
if (rc) {
dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
+ mutex_unlock(&adapter->fw_lock);
+ return rc;
+ }
+
+ rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
+ if (rc) {
+ dev_err(dev,
+ "Long term map request aborted or timed out,rc = %d\n",
+ rc);
+ dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
+ mutex_unlock(&adapter->fw_lock);
return rc;
}
- wait_for_completion(&adapter->fw_done);
if (adapter->fw_done_rc) {
dev_err(dev, "Couldn't map long term buffer,rc = %d\n",
adapter->fw_done_rc);
dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
+ mutex_unlock(&adapter->fw_lock);
return -1;
}
+ mutex_unlock(&adapter->fw_lock);
return 0;
}
@@ -211,22 +259,37 @@ static void free_long_term_buff(struct ibmvnic_adapter *adapter,
static int reset_long_term_buff(struct ibmvnic_adapter *adapter,
struct ibmvnic_long_term_buff *ltb)
{
+ struct device *dev = &adapter->vdev->dev;
int rc;
memset(ltb->buff, 0, ltb->size);
- init_completion(&adapter->fw_done);
+ mutex_lock(&adapter->fw_lock);
+ adapter->fw_done_rc = 0;
+
+ reinit_completion(&adapter->fw_done);
rc = send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id);
- if (rc)
+ if (rc) {
+ mutex_unlock(&adapter->fw_lock);
+ return rc;
+ }
+
+ rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
+ if (rc) {
+ dev_info(dev,
+ "Reset failed, long term map request timed out or aborted\n");
+ mutex_unlock(&adapter->fw_lock);
return rc;
- wait_for_completion(&adapter->fw_done);
+ }
if (adapter->fw_done_rc) {
- dev_info(&adapter->vdev->dev,
+ dev_info(dev,
"Reset failed, attempting to free and reallocate buffer\n");
free_long_term_buff(adapter, ltb);
+ mutex_unlock(&adapter->fw_lock);
return alloc_long_term_buff(adapter, ltb, ltb->size);
}
+ mutex_unlock(&adapter->fw_lock);
return 0;
}
@@ -943,13 +1006,25 @@ static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter)
if (adapter->vpd->buff)
len = adapter->vpd->len;
- init_completion(&adapter->fw_done);
+ mutex_lock(&adapter->fw_lock);
+ adapter->fw_done_rc = 0;
+ reinit_completion(&adapter->fw_done);
+
crq.get_vpd_size.first = IBMVNIC_CRQ_CMD;
crq.get_vpd_size.cmd = GET_VPD_SIZE;
rc = ibmvnic_send_crq(adapter, &crq);
- if (rc)
+ if (rc) {
+ mutex_unlock(&adapter->fw_lock);
return rc;
- wait_for_completion(&adapter->fw_done);
+ }
+
+ rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
+ if (rc) {
+ dev_err(dev, "Could not retrieve VPD size, rc = %d\n", rc);
+ mutex_unlock(&adapter->fw_lock);
+ return rc;
+ }
+ mutex_unlock(&adapter->fw_lock);
if (!adapter->vpd->len)
return -ENODATA;
@@ -976,7 +1051,10 @@ static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter)
return -ENOMEM;
}
+ mutex_lock(&adapter->fw_lock);
+ adapter->fw_done_rc = 0;
reinit_completion(&adapter->fw_done);
+
crq.get_vpd.first = IBMVNIC_CRQ_CMD;
crq.get_vpd.cmd = GET_VPD;
crq.get_vpd.ioba = cpu_to_be32(adapter->vpd->dma_addr);
@@ -985,10 +1063,20 @@ static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter)
if (rc) {
kfree(adapter->vpd->buff);
adapter->vpd->buff = NULL;
+ mutex_unlock(&adapter->fw_lock);
+ return rc;
+ }
+
+ rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
+ if (rc) {
+ dev_err(dev, "Unable to retrieve VPD, rc = %d\n", rc);
+ kfree(adapter->vpd->buff);
+ adapter->vpd->buff = NULL;
+ mutex_unlock(&adapter->fw_lock);
return rc;
}
- wait_for_completion(&adapter->fw_done);
+ mutex_unlock(&adapter->fw_lock);
return 0;
}
@@ -1689,20 +1777,25 @@ static int __ibmvnic_set_mac(struct net_device *netdev, u8 *dev_addr)
crq.change_mac_addr.cmd = CHANGE_MAC_ADDR;
ether_addr_copy(&crq.change_mac_addr.mac_addr[0], dev_addr);
- init_completion(&adapter->fw_done);
+ mutex_lock(&adapter->fw_lock);
+ adapter->fw_done_rc = 0;
+ reinit_completion(&adapter->fw_done);
+
rc = ibmvnic_send_crq(adapter, &crq);
if (rc) {
rc = -EIO;
+ mutex_unlock(&adapter->fw_lock);
goto err;
}
- wait_for_completion(&adapter->fw_done);
+ rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
/* netdev->dev_addr is changed in handle_change_mac_rsp function */
- if (adapter->fw_done_rc) {
+ if (rc || adapter->fw_done_rc) {
rc = -EIO;
+ mutex_unlock(&adapter->fw_lock);
goto err;
}
-
+ mutex_unlock(&adapter->fw_lock);
return 0;
err:
ether_addr_copy(adapter->mac_addr, netdev->dev_addr);
@@ -2316,12 +2409,19 @@ static int wait_for_reset(struct ibmvnic_adapter *adapter)
adapter->fallback.rx_entries = adapter->req_rx_add_entries_per_subcrq;
adapter->fallback.tx_entries = adapter->req_tx_entries_per_subcrq;
- init_completion(&adapter->reset_done);
+ reinit_completion(&adapter->reset_done);
adapter->wait_for_reset = true;
rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
- if (rc)
- return rc;
- wait_for_completion(&adapter->reset_done);
+
+ if (rc) {
+ ret = rc;
+ goto out;
+ }
+ rc = ibmvnic_wait_for_completion(adapter, &adapter->reset_done, 60000);
+ if (rc) {
+ ret = -ENODEV;
+ goto out;
+ }
ret = 0;
if (adapter->reset_done_rc) {
@@ -2332,13 +2432,21 @@ static int wait_for_reset(struct ibmvnic_adapter *adapter)
adapter->desired.rx_entries = adapter->fallback.rx_entries;
adapter->desired.tx_entries = adapter->fallback.tx_entries;
- init_completion(&adapter->reset_done);
+ reinit_completion(&adapter->reset_done);
adapter->wait_for_reset = true;
rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
- if (rc)
- return ret;
- wait_for_completion(&adapter->reset_done);
+ if (rc) {
+ ret = rc;
+ goto out;
+ }
+ rc = ibmvnic_wait_for_completion(adapter, &adapter->reset_done,
+ 60000);
+ if (rc) {
+ ret = -ENODEV;
+ goto out;
+ }
}
+out:
adapter->wait_for_reset = false;
return ret;
@@ -2603,11 +2711,13 @@ static void ibmvnic_get_ethtool_stats(struct net_device *dev,
cpu_to_be32(sizeof(struct ibmvnic_statistics));
/* Wait for data to be written */
- init_completion(&adapter->stats_done);
+ reinit_completion(&adapter->stats_done);
rc = ibmvnic_send_crq(adapter, &crq);
if (rc)
return;
- wait_for_completion(&adapter->stats_done);
+ rc = ibmvnic_wait_for_completion(adapter, &adapter->stats_done, 10000);
+ if (rc)
+ return;
for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++)
data[i] = be64_to_cpu(IBMVNIC_GET_STAT(adapter,
@@ -4408,11 +4518,24 @@ static int send_query_phys_parms(struct ibmvnic_adapter *adapter)
memset(&crq, 0, sizeof(crq));
crq.query_phys_parms.first = IBMVNIC_CRQ_CMD;
crq.query_phys_parms.cmd = QUERY_PHYS_PARMS;
- init_completion(&adapter->fw_done);
+
+ mutex_lock(&adapter->fw_lock);
+ adapter->fw_done_rc = 0;
+ reinit_completion(&adapter->fw_done);
+
rc = ibmvnic_send_crq(adapter, &crq);
- if (rc)
+ if (rc) {
+ mutex_unlock(&adapter->fw_lock);
return rc;
- wait_for_completion(&adapter->fw_done);
+ }
+
+ rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
+ if (rc) {
+ mutex_unlock(&adapter->fw_lock);
+ return rc;
+ }
+
+ mutex_unlock(&adapter->fw_lock);
return adapter->fw_done_rc ? -EIO : 0;
}
@@ -4505,6 +4628,15 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
case IBMVNIC_CRQ_XPORT_EVENT:
netif_carrier_off(netdev);
adapter->crq.active = false;
+ /* terminate any thread waiting for a response
+ * from the device
+ */
+ if (!completion_done(&adapter->fw_done)) {
+ adapter->fw_done_rc = -EIO;
+ complete(&adapter->fw_done);
+ }
+ if (!completion_done(&adapter->stats_done))
+ complete(&adapter->stats_done);
if (test_bit(0, &adapter->resetting))
adapter->force_reset_recovery = true;
if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) {
@@ -4959,7 +5091,11 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
__ibmvnic_delayed_reset);
INIT_LIST_HEAD(&adapter->rwi_list);
spin_lock_init(&adapter->rwi_lock);
+ mutex_init(&adapter->fw_lock);
init_completion(&adapter->init_done);
+ init_completion(&adapter->fw_done);
+ init_completion(&adapter->reset_done);
+ init_completion(&adapter->stats_done);
clear_bit(0, &adapter->resetting);
do {
@@ -5017,6 +5153,7 @@ ibmvnic_stats_fail:
ibmvnic_init_fail:
release_sub_crqs(adapter, 1);
release_crq_queue(adapter);
+ mutex_destroy(&adapter->fw_lock);
free_netdev(netdev);
return rc;
@@ -5041,6 +5178,7 @@ static int ibmvnic_remove(struct vio_dev *dev)
adapter->state = VNIC_REMOVED;
rtnl_unlock();
+ mutex_destroy(&adapter->fw_lock);
device_remove_file(&dev->dev, &dev_attr_failover);
free_netdev(netdev);
dev_set_drvdata(&dev->dev, NULL);
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index ebc39248b334..60eccaf91b12 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -1026,6 +1026,8 @@ struct ibmvnic_adapter {
int init_done_rc;
struct completion fw_done;
+ /* Used for serialization of device commands */
+ struct mutex fw_lock;
int fw_done_rc;
struct completion reset_done;
diff --git a/drivers/net/ethernet/intel/e1000/e1000.h b/drivers/net/ethernet/intel/e1000/e1000.h
index c40729b2c184..7fad2f24dcad 100644
--- a/drivers/net/ethernet/intel/e1000/e1000.h
+++ b/drivers/net/ethernet/intel/e1000/e1000.h
@@ -45,7 +45,6 @@
#define BAR_0 0
#define BAR_1 1
-#define BAR_5 5
#define INTEL_E1000_ETHERNET_DEVICE(device_id) {\
PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 416da9619928..aca97b084003 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -977,7 +977,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_ioremap;
if (adapter->need_ioport) {
- for (i = BAR_1; i <= BAR_5; i++) {
+ for (i = BAR_1; i < PCI_STD_NUM_BARS; i++) {
if (pci_resource_len(pdev, i) == 0)
continue;
if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb.h b/drivers/net/ethernet/intel/ixgb/ixgb.h
index e85271b68410..681d44cc9784 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb.h
+++ b/drivers/net/ethernet/intel/ixgb/ixgb.h
@@ -42,7 +42,6 @@
#define BAR_0 0
#define BAR_1 1
-#define BAR_5 5
struct ixgb_adapter;
#include "ixgb_hw.h"
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
index 0940a0da16f2..3d8c051dd327 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
@@ -412,7 +412,7 @@ ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_ioremap;
}
- for (i = BAR_1; i <= BAR_5; i++) {
+ for (i = BAR_1; i < PCI_STD_NUM_BARS; i++) {
if (pci_resource_len(pdev, i) == 0)
continue;
if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
index 784b1e26f414..6ed87534d314 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
@@ -130,42 +130,6 @@ static const char *mlx5e_netdev_kind(struct net_device *dev)
return "unknown";
}
-static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
- struct net_device *mirred_dev,
- struct net_device **out_dev,
- struct net_device **route_dev,
- struct flowi6 *fl6,
- struct neighbour **out_n,
- u8 *out_ttl)
-{
- struct dst_entry *dst;
- struct neighbour *n;
-
- int ret;
-
- ret = ipv6_stub->ipv6_dst_lookup(dev_net(mirred_dev), NULL, &dst,
- fl6);
- if (ret < 0)
- return ret;
-
- if (!(*out_ttl))
- *out_ttl = ip6_dst_hoplimit(dst);
-
- ret = get_route_and_out_devs(priv, dst->dev, route_dev, out_dev);
- if (ret < 0) {
- dst_release(dst);
- return ret;
- }
-
- n = dst_neigh_lookup(dst, &fl6->daddr);
- dst_release(dst);
- if (!n)
- return -ENOMEM;
-
- *out_n = n;
- return 0;
-}
-
static int mlx5e_gen_ip_tunnel_header(char buf[], __u8 *ip_proto,
struct mlx5e_encap_entry *e)
{
@@ -319,6 +283,43 @@ release_neigh:
return err;
}
+#if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
+static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
+ struct net_device *mirred_dev,
+ struct net_device **out_dev,
+ struct net_device **route_dev,
+ struct flowi6 *fl6,
+ struct neighbour **out_n,
+ u8 *out_ttl)
+{
+ struct dst_entry *dst;
+ struct neighbour *n;
+
+ int ret;
+
+ ret = ipv6_stub->ipv6_dst_lookup(dev_net(mirred_dev), NULL, &dst,
+ fl6);
+ if (ret < 0)
+ return ret;
+
+ if (!(*out_ttl))
+ *out_ttl = ip6_dst_hoplimit(dst);
+
+ ret = get_route_and_out_devs(priv, dst->dev, route_dev, out_dev);
+ if (ret < 0) {
+ dst_release(dst);
+ return ret;
+ }
+
+ n = dst_neigh_lookup(dst, &fl6->daddr);
+ dst_release(dst);
+ if (!n)
+ return -ENOMEM;
+
+ *out_n = n;
+ return 0;
+}
+
int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
struct net_device *mirred_dev,
struct mlx5e_encap_entry *e)
@@ -436,6 +437,7 @@ release_neigh:
neigh_release(n);
return err;
}
+#endif
bool mlx5e_tc_tun_device_to_offload(struct mlx5e_priv *priv,
struct net_device *netdev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 584074bbf669..173e2c12e1c7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -837,8 +837,6 @@ static int mlx5_init_once(struct mlx5_core_dev *dev)
mlx5_init_qp_table(dev);
- mlx5_init_mkey_table(dev);
-
mlx5_init_reserved_gids(dev);
mlx5_init_clock(dev);
@@ -896,7 +894,6 @@ err_rl_cleanup:
err_tables_cleanup:
mlx5_geneve_destroy(dev->geneve);
mlx5_vxlan_destroy(dev->vxlan);
- mlx5_cleanup_mkey_table(dev);
mlx5_cleanup_qp_table(dev);
mlx5_cq_debugfs_cleanup(dev);
mlx5_events_cleanup(dev);
@@ -924,7 +921,6 @@ static void mlx5_cleanup_once(struct mlx5_core_dev *dev)
mlx5_vxlan_destroy(dev->vxlan);
mlx5_cleanup_clock(dev);
mlx5_cleanup_reserved_gids(dev);
- mlx5_cleanup_mkey_table(dev);
mlx5_cleanup_qp_table(dev);
mlx5_cq_debugfs_cleanup(dev);
mlx5_events_cleanup(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mr.c b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
index c501bf2a0252..42cc3c7ac5b6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
@@ -36,16 +36,6 @@
#include <linux/mlx5/cmd.h>
#include "mlx5_core.h"
-void mlx5_init_mkey_table(struct mlx5_core_dev *dev)
-{
- xa_init_flags(&dev->priv.mkey_table, XA_FLAGS_LOCK_IRQ);
-}
-
-void mlx5_cleanup_mkey_table(struct mlx5_core_dev *dev)
-{
- WARN_ON(!xa_empty(&dev->priv.mkey_table));
-}
-
int mlx5_core_create_mkey_cb(struct mlx5_core_dev *dev,
struct mlx5_core_mkey *mkey,
struct mlx5_async_ctx *async_ctx, u32 *in,
@@ -54,7 +44,6 @@ int mlx5_core_create_mkey_cb(struct mlx5_core_dev *dev,
struct mlx5_async_work *context)
{
u32 lout[MLX5_ST_SZ_DW(create_mkey_out)] = {0};
- struct xarray *mkeys = &dev->priv.mkey_table;
u32 mkey_index;
void *mkc;
int err;
@@ -84,16 +73,7 @@ int mlx5_core_create_mkey_cb(struct mlx5_core_dev *dev,
mlx5_core_dbg(dev, "out 0x%x, key 0x%x, mkey 0x%x\n",
mkey_index, key, mkey->key);
-
- err = xa_err(xa_store_irq(mkeys, mlx5_base_mkey(mkey->key), mkey,
- GFP_KERNEL));
- if (err) {
- mlx5_core_warn(dev, "failed xarray insert of mkey 0x%x, %d\n",
- mlx5_base_mkey(mkey->key), err);
- mlx5_core_destroy_mkey(dev, mkey);
- }
-
- return err;
+ return 0;
}
EXPORT_SYMBOL(mlx5_core_create_mkey_cb);
@@ -111,12 +91,6 @@ int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev,
{
u32 out[MLX5_ST_SZ_DW(destroy_mkey_out)] = {0};
u32 in[MLX5_ST_SZ_DW(destroy_mkey_in)] = {0};
- struct xarray *mkeys = &dev->priv.mkey_table;
- unsigned long flags;
-
- xa_lock_irqsave(mkeys, flags);
- __xa_erase(mkeys, mlx5_base_mkey(mkey->key));
- xa_unlock_irqrestore(mkeys, flags);
MLX5_SET(destroy_mkey_in, in, opcode, MLX5_CMD_OP_DESTROY_MKEY);
MLX5_SET(destroy_mkey_in, in, mkey_index, mlx5_mkey_to_idx(mkey->key));
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index 0e96ffab3b05..2cccadc204fd 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -583,18 +583,10 @@ int ocelot_port_add_txtstamp_skb(struct ocelot_port *ocelot_port,
if (ocelot->ptp && shinfo->tx_flags & SKBTX_HW_TSTAMP &&
ocelot_port->ptp_cmd == IFH_REW_OP_TWO_STEP_PTP) {
- struct ocelot_skb *oskb =
- kzalloc(sizeof(struct ocelot_skb), GFP_ATOMIC);
-
- if (unlikely(!oskb))
- return -ENOMEM;
-
shinfo->tx_flags |= SKBTX_IN_PROGRESS;
-
- oskb->skb = skb;
- oskb->id = ocelot_port->ts_id % 4;
-
- list_add_tail(&oskb->head, &ocelot_port->skbs);
+ /* Store timestamp ID in cb[0] of sk_buff */
+ skb->cb[0] = ocelot_port->ts_id % 4;
+ skb_queue_tail(&ocelot_port->tx_skbs, skb);
return 0;
}
return -ENODATA;
@@ -704,12 +696,11 @@ void ocelot_get_txtstamp(struct ocelot *ocelot)
int budget = OCELOT_PTP_QUEUE_SZ;
while (budget--) {
+ struct sk_buff *skb, *skb_tmp, *skb_match = NULL;
struct skb_shared_hwtstamps shhwtstamps;
- struct list_head *pos, *tmp;
- struct sk_buff *skb = NULL;
- struct ocelot_skb *entry;
struct ocelot_port *port;
struct timespec64 ts;
+ unsigned long flags;
u32 val, id, txport;
val = ocelot_read(ocelot, SYS_PTP_STATUS);
@@ -727,21 +718,22 @@ void ocelot_get_txtstamp(struct ocelot *ocelot)
/* Retrieve its associated skb */
port = ocelot->ports[txport];
- list_for_each_safe(pos, tmp, &port->skbs) {
- entry = list_entry(pos, struct ocelot_skb, head);
- if (entry->id != id)
- continue;
-
- skb = entry->skb;
+ spin_lock_irqsave(&port->tx_skbs.lock, flags);
- list_del(pos);
- kfree(entry);
+ skb_queue_walk_safe(&port->tx_skbs, skb, skb_tmp) {
+ if (skb->cb[0] != id)
+ continue;
+ __skb_unlink(skb, &port->tx_skbs);
+ skb_match = skb;
+ break;
}
+ spin_unlock_irqrestore(&port->tx_skbs.lock, flags);
+
/* Next ts */
ocelot_write(ocelot, SYS_PTP_NXT_PTP_NXT, SYS_PTP_NXT);
- if (unlikely(!skb))
+ if (unlikely(!skb_match))
continue;
/* Get the h/w timestamp */
@@ -750,9 +742,9 @@ void ocelot_get_txtstamp(struct ocelot *ocelot)
/* Set the timestamp into the skb */
memset(&shhwtstamps, 0, sizeof(shhwtstamps));
shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec);
- skb_tstamp_tx(skb, &shhwtstamps);
+ skb_tstamp_tx(skb_match, &shhwtstamps);
- dev_kfree_skb_any(skb);
+ dev_kfree_skb_any(skb_match);
}
}
EXPORT_SYMBOL(ocelot_get_txtstamp);
@@ -2205,7 +2197,7 @@ void ocelot_init_port(struct ocelot *ocelot, int port)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
- INIT_LIST_HEAD(&ocelot_port->skbs);
+ skb_queue_head_init(&ocelot_port->tx_skbs);
/* Basic L2 initialization */
@@ -2490,9 +2482,7 @@ EXPORT_SYMBOL(ocelot_init);
void ocelot_deinit(struct ocelot *ocelot)
{
- struct list_head *pos, *tmp;
struct ocelot_port *port;
- struct ocelot_skb *entry;
int i;
cancel_delayed_work(&ocelot->stats_work);
@@ -2502,14 +2492,7 @@ void ocelot_deinit(struct ocelot *ocelot)
for (i = 0; i < ocelot->num_phys_ports; i++) {
port = ocelot->ports[i];
-
- list_for_each_safe(pos, tmp, &port->skbs) {
- entry = list_entry(pos, struct ocelot_skb, head);
-
- list_del(pos);
- dev_kfree_skb_any(entry->skb);
- kfree(entry);
- }
+ skb_queue_purge(&port->tx_skbs);
}
}
EXPORT_SYMBOL(ocelot_deinit);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_if.h b/drivers/net/ethernet/pensando/ionic/ionic_if.h
index dbdb7c5ae8f1..39317cdfa6cf 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_if.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_if.h
@@ -596,8 +596,8 @@ enum ionic_txq_desc_opcode {
* the @encap is set, the device will
* offload the outer header checksums using
* LCO (local checksum offload) (see
- * Documentation/networking/checksum-
- * offloads.txt for more info).
+ * Documentation/networking/checksum-offloads.rst
+ * for more info).
*
* IONIC_TXQ_DESC_OPCODE_CSUM_HW:
*
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index d47a038cb8d0..38d212686123 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -1542,6 +1542,7 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
rtl_lock_config_regs(tp);
device_set_wakeup_enable(tp_to_dev(tp), wolopts);
+ tp->dev->wol_enabled = wolopts ? 1 : 0;
}
static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
@@ -3872,7 +3873,7 @@ static void rtl_hw_jumbo_enable(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_27 ... RTL_GIGA_MAC_VER_28:
r8168dp_hw_jumbo_enable(tp);
break;
- case RTL_GIGA_MAC_VER_31 ... RTL_GIGA_MAC_VER_34:
+ case RTL_GIGA_MAC_VER_31 ... RTL_GIGA_MAC_VER_33:
r8168e_hw_jumbo_enable(tp);
break;
default:
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index 292045f4581f..8237dbc3e991 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -489,7 +489,7 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
}
/* Get the base address of device */
- for (i = 0; i <= PCI_STD_RESOURCE_END; i++) {
+ for (i = 0; i < PCI_STD_NUM_BARS; i++) {
if (pci_resource_len(pdev, i) == 0)
continue;
ret = pcim_iomap_regions(pdev, BIT(i), pci_name(pdev));
@@ -532,7 +532,7 @@ static void stmmac_pci_remove(struct pci_dev *pdev)
if (priv->plat->stmmac_clk)
clk_unregister_fixed_rate(priv->plat->stmmac_clk);
- for (i = 0; i <= PCI_STD_RESOURCE_END; i++) {
+ for (i = 0; i < PCI_STD_NUM_BARS; i++) {
if (pci_resource_len(pdev, i) == 0)
continue;
pcim_iounmap_regions(pdev, BIT(i));
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-pci.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-pci.c
index 386bafe74c3f..fa8604d7b797 100644
--- a/drivers/net/ethernet/synopsys/dwc-xlgmac-pci.c
+++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-pci.c
@@ -34,7 +34,7 @@ static int xlgmac_probe(struct pci_dev *pcidev, const struct pci_device_id *id)
return ret;
}
- for (i = 0; i <= PCI_STD_RESOURCE_END; i++) {
+ for (i = 0; i < PCI_STD_NUM_BARS; i++) {
if (pci_resource_len(pcidev, i) == 0)
continue;
ret = pcim_iomap_regions(pcidev, BIT(i), XLGMAC_DRV_NAME);
diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c
index 929f3d3354e3..ecdbde539eb7 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.c
+++ b/drivers/net/ethernet/ti/cpsw_ale.c
@@ -384,7 +384,7 @@ int cpsw_ale_del_mcast(struct cpsw_ale *ale, const u8 *addr, int port_mask,
int flags, u16 vid)
{
u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
- int mcast_members;
+ int mcast_members = 0;
int idx;
idx = cpsw_ale_match_addr(ale, addr, (flags & ALE_VLAN) ? vid : 0);
@@ -397,11 +397,13 @@ int cpsw_ale_del_mcast(struct cpsw_ale *ale, const u8 *addr, int port_mask,
mcast_members = cpsw_ale_get_port_mask(ale_entry,
ale->port_mask_bits);
mcast_members &= ~port_mask;
+ }
+
+ if (mcast_members)
cpsw_ale_set_port_mask(ale_entry, mcast_members,
ale->port_mask_bits);
- } else {
+ else
cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE);
- }
cpsw_ale_write(ale, idx, ale_entry);
return 0;
@@ -478,6 +480,10 @@ static void cpsw_ale_del_vlan_modify(struct cpsw_ale *ale, u32 *ale_entry,
members = cpsw_ale_get_vlan_member_list(ale_entry,
ale->vlan_field_bits);
members &= ~port_mask;
+ if (!members) {
+ cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE);
+ return;
+ }
untag = cpsw_ale_get_vlan_untag_force(ale_entry,
ale->vlan_field_bits);
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index 250bd90627a5..9caa876ce6e8 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -955,6 +955,9 @@ struct net_device_context {
u32 vf_alloc;
/* Serial number of the VF to team with */
u32 vf_serial;
+
+ /* Used to temporarily save the config info across hibernation */
+ struct netvsc_device_info *saved_netvsc_dev_info;
};
/* Per channel data */
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 868e22e286ca..eff8fef4f775 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -2424,6 +2424,61 @@ static int netvsc_remove(struct hv_device *dev)
return 0;
}
+static int netvsc_suspend(struct hv_device *dev)
+{
+ struct net_device_context *ndev_ctx;
+ struct net_device *vf_netdev, *net;
+ struct netvsc_device *nvdev;
+ int ret;
+
+ net = hv_get_drvdata(dev);
+
+ ndev_ctx = netdev_priv(net);
+ cancel_delayed_work_sync(&ndev_ctx->dwork);
+
+ rtnl_lock();
+
+ nvdev = rtnl_dereference(ndev_ctx->nvdev);
+ if (nvdev == NULL) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
+ if (vf_netdev)
+ netvsc_unregister_vf(vf_netdev);
+
+ /* Save the current config info */
+ ndev_ctx->saved_netvsc_dev_info = netvsc_devinfo_get(nvdev);
+
+ ret = netvsc_detach(net, nvdev);
+out:
+ rtnl_unlock();
+
+ return ret;
+}
+
+static int netvsc_resume(struct hv_device *dev)
+{
+ struct net_device *net = hv_get_drvdata(dev);
+ struct net_device_context *net_device_ctx;
+ struct netvsc_device_info *device_info;
+ int ret;
+
+ rtnl_lock();
+
+ net_device_ctx = netdev_priv(net);
+ device_info = net_device_ctx->saved_netvsc_dev_info;
+
+ ret = netvsc_attach(net, device_info);
+
+ rtnl_unlock();
+
+ kfree(device_info);
+ net_device_ctx->saved_netvsc_dev_info = NULL;
+
+ return ret;
+}
static const struct hv_vmbus_device_id id_table[] = {
/* Network guid */
{ HV_NIC_GUID, },
@@ -2438,6 +2493,8 @@ static struct hv_driver netvsc_drv = {
.id_table = id_table,
.probe = netvsc_probe,
.remove = netvsc_remove,
+ .suspend = netvsc_suspend,
+ .resume = netvsc_resume,
.driver = {
.probe_type = PROBE_FORCE_SYNCHRONOUS,
},
diff --git a/drivers/net/phy/aquantia.h b/drivers/net/phy/aquantia.h
index 5a16caab7b2f..c684b65c642c 100644
--- a/drivers/net/phy/aquantia.h
+++ b/drivers/net/phy/aquantia.h
@@ -1,5 +1,5 @@
-/* SPDX-License-Identifier: GPL-2.0
- * HWMON driver for Aquantia PHY
+/* SPDX-License-Identifier: GPL-2.0 */
+/* HWMON driver for Aquantia PHY
*
* Author: Nikita Yushchenko <nikita.yoush@cogentembedded.com>
* Author: Andrew Lunn <andrew@lunn.ch>
diff --git a/drivers/net/phy/bcm-phy-lib.h b/drivers/net/phy/bcm-phy-lib.h
index 5ecacb4e64f0..c86fb9d1240c 100644
--- a/drivers/net/phy/bcm-phy-lib.h
+++ b/drivers/net/phy/bcm-phy-lib.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2015 Broadcom Corporation
*/
diff --git a/drivers/net/phy/dp83869.c b/drivers/net/phy/dp83869.c
index 1c7a7c57dec3..93021904c5e4 100644
--- a/drivers/net/phy/dp83869.c
+++ b/drivers/net/phy/dp83869.c
@@ -151,13 +151,13 @@ static int dp83869_config_port_mirroring(struct phy_device *phydev)
struct dp83869_private *dp83869 = phydev->priv;
if (dp83869->port_mirroring == DP83869_PORT_MIRRORING_EN)
- phy_set_bits_mmd(phydev, DP83869_DEVADDR, DP83869_GEN_CFG3,
- DP83869_CFG3_PORT_MIRROR_EN);
+ return phy_set_bits_mmd(phydev, DP83869_DEVADDR,
+ DP83869_GEN_CFG3,
+ DP83869_CFG3_PORT_MIRROR_EN);
else
- phy_clear_bits_mmd(phydev, DP83869_DEVADDR, DP83869_GEN_CFG3,
- DP83869_CFG3_PORT_MIRROR_EN);
-
- return 0;
+ return phy_clear_bits_mmd(phydev, DP83869_DEVADDR,
+ DP83869_GEN_CFG3,
+ DP83869_CFG3_PORT_MIRROR_EN);
}
#ifdef CONFIG_OF_MDIO
@@ -204,7 +204,7 @@ static int dp83869_of_init(struct phy_device *phydev)
&dp83869->tx_fifo_depth))
dp83869->tx_fifo_depth = DP83869_PHYCR_FIFO_DEPTH_4_B_NIB;
- return 0;
+ return ret;
}
#else
static int dp83869_of_init(struct phy_device *phydev)
@@ -216,7 +216,7 @@ static int dp83869_of_init(struct phy_device *phydev)
static int dp83869_configure_rgmii(struct phy_device *phydev,
struct dp83869_private *dp83869)
{
- int ret, val;
+ int ret = 0, val;
if (phy_interface_is_rgmii(phydev)) {
val = phy_read(phydev, MII_DP83869_PHYCTRL);
@@ -233,13 +233,13 @@ static int dp83869_configure_rgmii(struct phy_device *phydev,
}
if (dp83869->io_impedance >= 0)
- phy_modify_mmd(phydev, DP83869_DEVADDR,
- DP83869_IO_MUX_CFG,
- DP83869_IO_MUX_CFG_IO_IMPEDANCE_CTRL,
- dp83869->io_impedance &
- DP83869_IO_MUX_CFG_IO_IMPEDANCE_CTRL);
+ ret = phy_modify_mmd(phydev, DP83869_DEVADDR,
+ DP83869_IO_MUX_CFG,
+ DP83869_IO_MUX_CFG_IO_IMPEDANCE_CTRL,
+ dp83869->io_impedance &
+ DP83869_IO_MUX_CFG_IO_IMPEDANCE_CTRL);
- return 0;
+ return ret;
}
static int dp83869_configure_mode(struct phy_device *phydev,
@@ -284,9 +284,11 @@ static int dp83869_configure_mode(struct phy_device *phydev,
return ret;
break;
case DP83869_RGMII_SGMII_BRIDGE:
- phy_modify_mmd(phydev, DP83869_DEVADDR, DP83869_OP_MODE,
- DP83869_SGMII_RGMII_BRIDGE,
- DP83869_SGMII_RGMII_BRIDGE);
+ ret = phy_modify_mmd(phydev, DP83869_DEVADDR, DP83869_OP_MODE,
+ DP83869_SGMII_RGMII_BRIDGE,
+ DP83869_SGMII_RGMII_BRIDGE);
+ if (ret)
+ return ret;
ret = phy_write_mmd(phydev, DP83869_DEVADDR,
DP83869_FX_CTRL, DP83869_FX_CTRL_DEFAULT);
@@ -334,7 +336,7 @@ static int dp83869_configure_mode(struct phy_device *phydev,
return -EINVAL;
};
- return 0;
+ return ret;
}
static int dp83869_config_init(struct phy_device *phydev)
@@ -358,12 +360,13 @@ static int dp83869_config_init(struct phy_device *phydev)
/* Clock output selection if muxing property is set */
if (dp83869->clk_output_sel != DP83869_CLK_O_SEL_REF_CLK)
- phy_modify_mmd(phydev, DP83869_DEVADDR, DP83869_IO_MUX_CFG,
- DP83869_IO_MUX_CFG_CLK_O_SEL_MASK,
- dp83869->clk_output_sel <<
- DP83869_IO_MUX_CFG_CLK_O_SEL_SHIFT);
+ ret = phy_modify_mmd(phydev,
+ DP83869_DEVADDR, DP83869_IO_MUX_CFG,
+ DP83869_IO_MUX_CFG_CLK_O_SEL_MASK,
+ dp83869->clk_output_sel <<
+ DP83869_IO_MUX_CFG_CLK_O_SEL_SHIFT);
- return 0;
+ return ret;
}
static int dp83869_probe(struct phy_device *phydev)
diff --git a/drivers/net/phy/mdio-cavium.h b/drivers/net/phy/mdio-cavium.h
index b7f89ad27465..e33d3ea9a907 100644
--- a/drivers/net/phy/mdio-cavium.h
+++ b/drivers/net/phy/mdio-cavium.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2009-2016 Cavium, Inc.
*/
diff --git a/drivers/net/phy/mdio-i2c.h b/drivers/net/phy/mdio-i2c.h
index 751dab281f57..b1d27f7cd23f 100644
--- a/drivers/net/phy/mdio-i2c.h
+++ b/drivers/net/phy/mdio-i2c.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* MDIO I2C bridge
*
diff --git a/drivers/net/phy/mdio-xgene.h b/drivers/net/phy/mdio-xgene.h
index b1f5ccb4ad9c..8af93ada8b64 100644
--- a/drivers/net/phy/mdio-xgene.h
+++ b/drivers/net/phy/mdio-xgene.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/* Applied Micro X-Gene SoC MDIO Driver
*
* Copyright (c) 2016, Applied Micro Circuits Corporation
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
index 677c45985338..476db5345e1a 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek.c
@@ -440,6 +440,15 @@ static struct phy_driver realtek_drvs[] = {
.read_page = rtl821x_read_page,
.write_page = rtl821x_write_page,
}, {
+ PHY_ID_MATCH_MODEL(0x001cc880),
+ .name = "RTL8208 Fast Ethernet",
+ .read_mmd = genphy_read_mmd_unsupported,
+ .write_mmd = genphy_write_mmd_unsupported,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ .read_page = rtl821x_read_page,
+ .write_page = rtl821x_write_page,
+ }, {
PHY_ID_MATCH_EXACT(0x001cc910),
.name = "RTL8211 Gigabit Ethernet",
.config_aneg = rtl8211_config_aneg,
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 61824bbb5588..0cb1c2d0a8bc 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -270,7 +270,7 @@ static void ppp_mp_insert(struct ppp *ppp, struct sk_buff *skb);
static struct sk_buff *ppp_mp_reconstruct(struct ppp *ppp);
static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb);
#endif /* CONFIG_PPP_MULTILINK */
-static int ppp_set_compress(struct ppp *ppp, unsigned long arg);
+static int ppp_set_compress(struct ppp *ppp, struct ppp_option_data *data);
static void ppp_ccp_peek(struct ppp *ppp, struct sk_buff *skb, int inbound);
static void ppp_ccp_closed(struct ppp *ppp);
static struct compressor *find_compressor(int type);
@@ -554,36 +554,66 @@ static __poll_t ppp_poll(struct file *file, poll_table *wait)
}
#ifdef CONFIG_PPP_FILTER
-static int get_filter(void __user *arg, struct sock_filter **p)
+static struct bpf_prog *get_filter(struct sock_fprog *uprog)
+{
+ struct sock_fprog_kern fprog;
+ struct bpf_prog *res = NULL;
+ int err;
+
+ if (!uprog->len)
+ return NULL;
+
+ /* uprog->len is unsigned short, so no overflow here */
+ fprog.len = uprog->len * sizeof(struct sock_filter);
+ fprog.filter = memdup_user(uprog->filter, fprog.len);
+ if (IS_ERR(fprog.filter))
+ return ERR_CAST(fprog.filter);
+
+ err = bpf_prog_create(&res, &fprog);
+ kfree(fprog.filter);
+
+ return err ? ERR_PTR(err) : res;
+}
+
+static struct bpf_prog *ppp_get_filter(struct sock_fprog __user *p)
{
struct sock_fprog uprog;
- struct sock_filter *code = NULL;
- int len;
- if (copy_from_user(&uprog, arg, sizeof(uprog)))
- return -EFAULT;
+ if (copy_from_user(&uprog, p, sizeof(struct sock_fprog)))
+ return ERR_PTR(-EFAULT);
+ return get_filter(&uprog);
+}
- if (!uprog.len) {
- *p = NULL;
- return 0;
- }
+#ifdef CONFIG_COMPAT
+struct sock_fprog32 {
+ unsigned short len;
+ compat_caddr_t filter;
+};
- len = uprog.len * sizeof(struct sock_filter);
- code = memdup_user(uprog.filter, len);
- if (IS_ERR(code))
- return PTR_ERR(code);
+#define PPPIOCSPASS32 _IOW('t', 71, struct sock_fprog32)
+#define PPPIOCSACTIVE32 _IOW('t', 70, struct sock_fprog32)
+
+static struct bpf_prog *compat_ppp_get_filter(struct sock_fprog32 __user *p)
+{
+ struct sock_fprog32 uprog32;
+ struct sock_fprog uprog;
- *p = code;
- return uprog.len;
+ if (copy_from_user(&uprog32, p, sizeof(struct sock_fprog32)))
+ return ERR_PTR(-EFAULT);
+ uprog.len = uprog32.len;
+ uprog.filter = compat_ptr(uprog32.filter);
+ return get_filter(&uprog);
}
-#endif /* CONFIG_PPP_FILTER */
+#endif
+#endif
static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct ppp_file *pf;
struct ppp *ppp;
int err = -EFAULT, val, val2, i;
- struct ppp_idle idle;
+ struct ppp_idle32 idle32;
+ struct ppp_idle64 idle64;
struct npioctl npi;
int unit, cflags;
struct slcompress *vj;
@@ -679,9 +709,14 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
break;
case PPPIOCSCOMPRESS:
- err = ppp_set_compress(ppp, arg);
+ {
+ struct ppp_option_data data;
+ if (copy_from_user(&data, argp, sizeof(data)))
+ err = -EFAULT;
+ else
+ err = ppp_set_compress(ppp, &data);
break;
-
+ }
case PPPIOCGUNIT:
if (put_user(ppp->file.index, p))
break;
@@ -701,10 +736,18 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
err = 0;
break;
- case PPPIOCGIDLE:
- idle.xmit_idle = (jiffies - ppp->last_xmit) / HZ;
- idle.recv_idle = (jiffies - ppp->last_recv) / HZ;
- if (copy_to_user(argp, &idle, sizeof(idle)))
+ case PPPIOCGIDLE32:
+ idle32.xmit_idle = (jiffies - ppp->last_xmit) / HZ;
+ idle32.recv_idle = (jiffies - ppp->last_recv) / HZ;
+ if (copy_to_user(argp, &idle32, sizeof(idle32)))
+ break;
+ err = 0;
+ break;
+
+ case PPPIOCGIDLE64:
+ idle64.xmit_idle = (jiffies - ppp->last_xmit) / HZ;
+ idle64.recv_idle = (jiffies - ppp->last_recv) / HZ;
+ if (copy_to_user(argp, &idle64, sizeof(idle64)))
break;
err = 0;
break;
@@ -753,55 +796,25 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
#ifdef CONFIG_PPP_FILTER
case PPPIOCSPASS:
- {
- struct sock_filter *code;
-
- err = get_filter(argp, &code);
- if (err >= 0) {
- struct bpf_prog *pass_filter = NULL;
- struct sock_fprog_kern fprog = {
- .len = err,
- .filter = code,
- };
-
- err = 0;
- if (fprog.filter)
- err = bpf_prog_create(&pass_filter, &fprog);
- if (!err) {
- ppp_lock(ppp);
- if (ppp->pass_filter)
- bpf_prog_destroy(ppp->pass_filter);
- ppp->pass_filter = pass_filter;
- ppp_unlock(ppp);
- }
- kfree(code);
- }
- break;
- }
case PPPIOCSACTIVE:
{
- struct sock_filter *code;
-
- err = get_filter(argp, &code);
- if (err >= 0) {
- struct bpf_prog *active_filter = NULL;
- struct sock_fprog_kern fprog = {
- .len = err,
- .filter = code,
- };
+ struct bpf_prog *filter = ppp_get_filter(argp);
+ struct bpf_prog **which;
- err = 0;
- if (fprog.filter)
- err = bpf_prog_create(&active_filter, &fprog);
- if (!err) {
- ppp_lock(ppp);
- if (ppp->active_filter)
- bpf_prog_destroy(ppp->active_filter);
- ppp->active_filter = active_filter;
- ppp_unlock(ppp);
- }
- kfree(code);
+ if (IS_ERR(filter)) {
+ err = PTR_ERR(filter);
+ break;
}
+ if (cmd == PPPIOCSPASS)
+ which = &ppp->pass_filter;
+ else
+ which = &ppp->active_filter;
+ ppp_lock(ppp);
+ if (*which)
+ bpf_prog_destroy(*which);
+ *which = filter;
+ ppp_unlock(ppp);
+ err = 0;
break;
}
#endif /* CONFIG_PPP_FILTER */
@@ -827,6 +840,77 @@ out:
return err;
}
+#ifdef CONFIG_COMPAT
+struct ppp_option_data32 {
+ compat_uptr_t ptr;
+ u32 length;
+ compat_int_t transmit;
+};
+#define PPPIOCSCOMPRESS32 _IOW('t', 77, struct ppp_option_data32)
+
+static long ppp_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct ppp_file *pf;
+ int err = -ENOIOCTLCMD;
+ void __user *argp = (void __user *)arg;
+
+ mutex_lock(&ppp_mutex);
+
+ pf = file->private_data;
+ if (pf && pf->kind == INTERFACE) {
+ struct ppp *ppp = PF_TO_PPP(pf);
+ switch (cmd) {
+#ifdef CONFIG_PPP_FILTER
+ case PPPIOCSPASS32:
+ case PPPIOCSACTIVE32:
+ {
+ struct bpf_prog *filter = compat_ppp_get_filter(argp);
+ struct bpf_prog **which;
+
+ if (IS_ERR(filter)) {
+ err = PTR_ERR(filter);
+ break;
+ }
+ if (cmd == PPPIOCSPASS32)
+ which = &ppp->pass_filter;
+ else
+ which = &ppp->active_filter;
+ ppp_lock(ppp);
+ if (*which)
+ bpf_prog_destroy(*which);
+ *which = filter;
+ ppp_unlock(ppp);
+ err = 0;
+ break;
+ }
+#endif /* CONFIG_PPP_FILTER */
+ case PPPIOCSCOMPRESS32:
+ {
+ struct ppp_option_data32 data32;
+ if (copy_from_user(&data32, argp, sizeof(data32))) {
+ err = -EFAULT;
+ } else {
+ struct ppp_option_data data = {
+ .ptr = compat_ptr(data32.ptr),
+ .length = data32.length,
+ .transmit = data32.transmit
+ };
+ err = ppp_set_compress(ppp, &data);
+ }
+ break;
+ }
+ }
+ }
+ mutex_unlock(&ppp_mutex);
+
+ /* all other commands have compatible arguments */
+ if (err == -ENOIOCTLCMD)
+ err = ppp_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
+
+ return err;
+}
+#endif
+
static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf,
struct file *file, unsigned int cmd, unsigned long arg)
{
@@ -895,6 +979,9 @@ static const struct file_operations ppp_device_fops = {
.write = ppp_write,
.poll = ppp_poll,
.unlocked_ioctl = ppp_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = ppp_compat_ioctl,
+#endif
.open = ppp_open,
.release = ppp_release,
.llseek = noop_llseek,
@@ -2734,24 +2821,20 @@ ppp_output_wakeup(struct ppp_channel *chan)
/* Process the PPPIOCSCOMPRESS ioctl. */
static int
-ppp_set_compress(struct ppp *ppp, unsigned long arg)
+ppp_set_compress(struct ppp *ppp, struct ppp_option_data *data)
{
- int err;
+ int err = -EFAULT;
struct compressor *cp, *ocomp;
- struct ppp_option_data data;
void *state, *ostate;
unsigned char ccp_option[CCP_MAX_OPTION_LENGTH];
- err = -EFAULT;
- if (copy_from_user(&data, (void __user *) arg, sizeof(data)))
- goto out;
- if (data.length > CCP_MAX_OPTION_LENGTH)
+ if (data->length > CCP_MAX_OPTION_LENGTH)
goto out;
- if (copy_from_user(ccp_option, (void __user *) data.ptr, data.length))
+ if (copy_from_user(ccp_option, data->ptr, data->length))
goto out;
err = -EINVAL;
- if (data.length < 2 || ccp_option[1] < 2 || ccp_option[1] > data.length)
+ if (data->length < 2 || ccp_option[1] < 2 || ccp_option[1] > data->length)
goto out;
cp = try_then_request_module(
@@ -2761,8 +2844,8 @@ ppp_set_compress(struct ppp *ppp, unsigned long arg)
goto out;
err = -ENOBUFS;
- if (data.transmit) {
- state = cp->comp_alloc(ccp_option, data.length);
+ if (data->transmit) {
+ state = cp->comp_alloc(ccp_option, data->length);
if (state) {
ppp_xmit_lock(ppp);
ppp->xstate &= ~SC_COMP_RUN;
@@ -2780,7 +2863,7 @@ ppp_set_compress(struct ppp *ppp, unsigned long arg)
module_put(cp->owner);
} else {
- state = cp->decomp_alloc(ccp_option, data.length);
+ state = cp->decomp_alloc(ccp_option, data->length);
if (state) {
ppp_recv_lock(ppp);
ppp->rstate &= ~SC_DECOMP_RUN;
diff --git a/drivers/net/tap.c b/drivers/net/tap.c
index 3ae70c7e6860..a6d63665ad03 100644
--- a/drivers/net/tap.c
+++ b/drivers/net/tap.c
@@ -1123,14 +1123,6 @@ static long tap_ioctl(struct file *file, unsigned int cmd,
}
}
-#ifdef CONFIG_COMPAT
-static long tap_compat_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- return tap_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
-}
-#endif
-
static const struct file_operations tap_fops = {
.owner = THIS_MODULE,
.open = tap_open,
@@ -1140,9 +1132,7 @@ static const struct file_operations tap_fops = {
.poll = tap_poll,
.llseek = no_llseek,
.unlocked_ioctl = tap_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = tap_compat_ioctl,
-#endif
+ .compat_ioctl = compat_ptr_ioctl,
};
static int tap_get_user_xdp(struct tap_queue *q, struct xdp_buff *xdp)
diff --git a/drivers/net/usb/aqc111.h b/drivers/net/usb/aqc111.h
index 4d68b3a6067c..b562db4da337 100644
--- a/drivers/net/usb/aqc111.h
+++ b/drivers/net/usb/aqc111.h
@@ -1,5 +1,5 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later
- * Aquantia Corp. Aquantia AQtion USB to 5GbE Controller
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* Aquantia Corp. Aquantia AQtion USB to 5GbE Controller
* Copyright (C) 2003-2005 David Hollis <dhollis@davehollis.com>
* Copyright (C) 2005 Phil Chang <pchang23@sbcglobal.net>
* Copyright (C) 2002-2003 TiVo Inc.
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index 74849da031fa..ca827802f291 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -1214,8 +1214,9 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
* This needs to be a tasklet otherwise we will
* end up recursively calling this function.
*/
-static void hso_unthrottle_tasklet(struct hso_serial *serial)
+static void hso_unthrottle_tasklet(unsigned long data)
{
+ struct hso_serial *serial = (struct hso_serial *)data;
unsigned long flags;
spin_lock_irqsave(&serial->serial_lock, flags);
@@ -1265,7 +1266,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
/* Force default termio settings */
_hso_serial_set_termios(tty, NULL);
tasklet_init(&serial->unthrottle_tasklet,
- (void (*)(unsigned long))hso_unthrottle_tasklet,
+ hso_unthrottle_tasklet,
(unsigned long)serial);
result = hso_start_serial_device(serial->parent, GFP_KERNEL);
if (result) {
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index dde05e2fdc3e..30e511c2c8d0 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -1573,6 +1573,13 @@ static void usbnet_bh (struct timer_list *t)
}
}
+static void usbnet_bh_tasklet(unsigned long data)
+{
+ struct timer_list *t = (struct timer_list *)data;
+
+ usbnet_bh(t);
+}
+
/*-------------------------------------------------------------------------
*
@@ -1700,7 +1707,7 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
skb_queue_head_init (&dev->txq);
skb_queue_head_init (&dev->done);
skb_queue_head_init(&dev->rxq_pause);
- dev->bh.func = (void (*)(unsigned long))usbnet_bh;
+ dev->bh.func = usbnet_bh_tasklet;
dev->bh.data = (unsigned long)&dev->delay;
INIT_WORK (&dev->kevent, usbnet_deferred_kevent);
init_usb_anchor(&dev->deferred);
diff --git a/drivers/net/wan/z85230.h b/drivers/net/wan/z85230.h
index 32ae710d4f40..1081d171e477 100644
--- a/drivers/net/wan/z85230.h
+++ b/drivers/net/wan/z85230.h
@@ -421,8 +421,6 @@ extern struct z8530_irqhandler z8530_sync, z8530_async, z8530_nop;
* Asynchronous Interfacing
*/
-#define SERIAL_MAGIC 0x5301
-
/*
* The size of the serial xmit buffer is 1 page, or 4096 bytes
*/
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
index a4d325fcf94a..452da44a21e0 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
@@ -1421,6 +1421,7 @@ out_err:
static void iwl_pcie_rx_handle(struct iwl_trans *trans, int queue)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct napi_struct *napi;
struct iwl_rxq *rxq;
u32 r, i, count = 0;
bool emergency = false;
@@ -1526,8 +1527,16 @@ out:
if (unlikely(emergency && count))
iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq);
- if (rxq->napi.poll)
- napi_gro_flush(&rxq->napi, false);
+ napi = &rxq->napi;
+ if (napi->poll) {
+ if (napi->rx_count) {
+ netif_receive_skb_list(&napi->rx_list);
+ INIT_LIST_HEAD(&napi->rx_list);
+ napi->rx_count = 0;
+ }
+
+ napi_gro_flush(napi, false);
+ }
iwl_pcie_rxq_restock(trans, rxq);
}
diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c
index a9657ae6d782..d14e55e3c9da 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.c
+++ b/drivers/net/wireless/marvell/mwifiex/main.c
@@ -631,6 +631,7 @@ static int _mwifiex_fw_dpc(const struct firmware *firmware, void *context)
mwifiex_drv_get_driver_version(adapter, fmt, sizeof(fmt) - 1);
mwifiex_dbg(adapter, MSG, "driver_version = %s\n", fmt);
+ adapter->is_up = true;
goto done;
err_add_intf:
@@ -1469,6 +1470,7 @@ int mwifiex_shutdown_sw(struct mwifiex_adapter *adapter)
mwifiex_deauthenticate(priv, NULL);
mwifiex_uninit_sw(adapter);
+ adapter->is_up = false;
if (adapter->if_ops.down_dev)
adapter->if_ops.down_dev(adapter);
@@ -1730,7 +1732,8 @@ int mwifiex_remove_card(struct mwifiex_adapter *adapter)
if (!adapter)
return 0;
- mwifiex_uninit_sw(adapter);
+ if (adapter->is_up)
+ mwifiex_uninit_sw(adapter);
if (adapter->irq_wakeup >= 0)
device_init_wakeup(adapter->dev, false);
diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h
index 095837fba300..547ff3c578ee 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.h
+++ b/drivers/net/wireless/marvell/mwifiex/main.h
@@ -1017,6 +1017,7 @@ struct mwifiex_adapter {
/* For synchronizing FW initialization with device lifecycle. */
struct completion *fw_done;
+ bool is_up;
bool ext_scan;
u8 fw_api_ver;
diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c
index 24c041dad9f6..fec38b6e86ff 100644
--- a/drivers/net/wireless/marvell/mwifiex/sdio.c
+++ b/drivers/net/wireless/marvell/mwifiex/sdio.c
@@ -444,6 +444,9 @@ static int mwifiex_sdio_suspend(struct device *dev)
return 0;
}
+ if (!adapter->is_up)
+ return -EBUSY;
+
mwifiex_enable_wake(adapter);
/* Enable the Host Sleep */
@@ -2220,22 +2223,30 @@ static void mwifiex_sdio_card_reset_work(struct mwifiex_adapter *adapter)
struct sdio_func *func = card->func;
int ret;
+ /* Prepare the adapter for the reset. */
mwifiex_shutdown_sw(adapter);
+ clear_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &card->work_flags);
+ clear_bit(MWIFIEX_IFACE_WORK_CARD_RESET, &card->work_flags);
- /* power cycle the adapter */
+ /* Run a HW reset of the SDIO interface. */
sdio_claim_host(func);
- mmc_hw_reset(func->card->host);
+ ret = mmc_hw_reset(func->card->host);
sdio_release_host(func);
- /* Previous save_adapter won't be valid after this. We will cancel
- * pending work requests.
- */
- clear_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &card->work_flags);
- clear_bit(MWIFIEX_IFACE_WORK_CARD_RESET, &card->work_flags);
-
- ret = mwifiex_reinit_sw(adapter);
- if (ret)
- dev_err(&func->dev, "reinit failed: %d\n", ret);
+ switch (ret) {
+ case 1:
+ dev_dbg(&func->dev, "SDIO HW reset asynchronous\n");
+ complete_all(adapter->fw_done);
+ break;
+ case 0:
+ ret = mwifiex_reinit_sw(adapter);
+ if (ret)
+ dev_err(&func->dev, "reinit failed: %d\n", ret);
+ break;
+ default:
+ dev_err(&func->dev, "SDIO HW reset failed: %d\n", ret);
+ break;
+ }
}
/* This function read/write firmware */
diff --git a/drivers/net/wireless/ti/wl1251/sdio.c b/drivers/net/wireless/ti/wl1251/sdio.c
index 677f1146ccf0..94569cd695c8 100644
--- a/drivers/net/wireless/ti/wl1251/sdio.c
+++ b/drivers/net/wireless/ti/wl1251/sdio.c
@@ -16,17 +16,12 @@
#include <linux/irq.h>
#include <linux/pm_runtime.h>
#include <linux/gpio.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/of_irq.h>
#include "wl1251.h"
-#ifndef SDIO_VENDOR_ID_TI
-#define SDIO_VENDOR_ID_TI 0x104c
-#endif
-
-#ifndef SDIO_DEVICE_ID_TI_WL1251
-#define SDIO_DEVICE_ID_TI_WL1251 0x9066
-#endif
-
struct wl1251_sdio {
struct sdio_func *func;
u32 elp_val;
@@ -49,7 +44,7 @@ static void wl1251_sdio_interrupt(struct sdio_func *func)
}
static const struct sdio_device_id wl1251_devices[] = {
- { SDIO_DEVICE(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1251) },
+ { SDIO_DEVICE(SDIO_VENDOR_ID_TI_WL1251, SDIO_DEVICE_ID_TI_WL1251) },
{}
};
MODULE_DEVICE_TABLE(sdio, wl1251_devices);
@@ -217,6 +212,7 @@ static int wl1251_sdio_probe(struct sdio_func *func,
struct ieee80211_hw *hw;
struct wl1251_sdio *wl_sdio;
const struct wl1251_platform_data *wl1251_board_data;
+ struct device_node *np = func->dev.of_node;
hw = wl1251_alloc_hw();
if (IS_ERR(hw))
@@ -248,6 +244,17 @@ static int wl1251_sdio_probe(struct sdio_func *func,
wl->power_gpio = wl1251_board_data->power_gpio;
wl->irq = wl1251_board_data->irq;
wl->use_eeprom = wl1251_board_data->use_eeprom;
+ } else if (np) {
+ wl->use_eeprom = of_property_read_bool(np,
+ "ti,wl1251-has-eeprom");
+ wl->power_gpio = of_get_named_gpio(np, "ti,power-gpio", 0);
+ wl->irq = of_irq_get(np, 0);
+
+ if (wl->power_gpio == -EPROBE_DEFER ||
+ wl->irq == -EPROBE_DEFER) {
+ ret = -EPROBE_DEFER;
+ goto disable;
+ }
}
if (gpio_is_valid(wl->power_gpio)) {
diff --git a/drivers/net/wireless/ti/wlcore/sdio.c b/drivers/net/wireless/ti/wlcore/sdio.c
index 7afaf35f2453..9fd8cf2d270c 100644
--- a/drivers/net/wireless/ti/wlcore/sdio.c
+++ b/drivers/net/wireless/ti/wlcore/sdio.c
@@ -26,14 +26,6 @@
#include "wl12xx_80211.h"
#include "io.h"
-#ifndef SDIO_VENDOR_ID_TI
-#define SDIO_VENDOR_ID_TI 0x0097
-#endif
-
-#ifndef SDIO_DEVICE_ID_TI_WL1271
-#define SDIO_DEVICE_ID_TI_WL1271 0x4076
-#endif
-
static bool dump = false;
struct wl12xx_sdio_glue {
diff --git a/drivers/ntb/test/ntb_pingpong.c b/drivers/ntb/test/ntb_pingpong.c
index 65865e460ab8..04dd46647db3 100644
--- a/drivers/ntb/test/ntb_pingpong.c
+++ b/drivers/ntb/test/ntb_pingpong.c
@@ -354,13 +354,10 @@ static void pp_clear_ctx(struct pp_ctx *pp)
static void pp_setup_dbgfs(struct pp_ctx *pp)
{
struct pci_dev *pdev = pp->ntb->pdev;
- void *ret;
pp->dbgfs_dir = debugfs_create_dir(pci_name(pdev), pp_dbgfs_topdir);
- ret = debugfs_create_atomic_t("count", 0600, pp->dbgfs_dir, &pp->count);
- if (!ret)
- dev_warn(&pp->ntb->dev, "DebugFS unsupported\n");
+ debugfs_create_atomic_t("count", 0600, pp->dbgfs_dir, &pp->count);
}
static void pp_clear_dbgfs(struct pp_ctx *pp)
diff --git a/drivers/nvdimm/Kconfig b/drivers/nvdimm/Kconfig
index 36af7af6b7cf..b7d1eb38b27d 100644
--- a/drivers/nvdimm/Kconfig
+++ b/drivers/nvdimm/Kconfig
@@ -4,6 +4,7 @@ menuconfig LIBNVDIMM
depends on PHYS_ADDR_T_64BIT
depends on HAS_IOMEM
depends on BLK_DEV
+ select MEMREGION
help
Generic support for non-volatile memory devices including
ACPI-6-NFIT defined resources. On platforms that define an
diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c
index 3e9f45aec8d1..0d04ea3d9fd7 100644
--- a/drivers/nvdimm/btt.c
+++ b/drivers/nvdimm/btt.c
@@ -1261,11 +1261,11 @@ static int btt_read_pg(struct btt *btt, struct bio_integrity_payload *bip,
ret = btt_data_read(arena, page, off, postmap, cur_len);
if (ret) {
- int rc;
-
/* Media error - set the e_flag */
- rc = btt_map_write(arena, premap, postmap, 0, 1,
- NVDIMM_IO_ATOMIC);
+ if (btt_map_write(arena, premap, postmap, 0, 1, NVDIMM_IO_ATOMIC))
+ dev_warn_ratelimited(to_dev(arena),
+ "Error persistently tracking bad blocks at %#x\n",
+ premap);
goto out_rtt;
}
@@ -1674,7 +1674,8 @@ int nvdimm_namespace_attach_btt(struct nd_namespace_common *ndns)
struct nd_region *nd_region;
struct btt_sb *btt_sb;
struct btt *btt;
- size_t rawsize;
+ size_t size, rawsize;
+ int rc;
if (!nd_btt->uuid || !nd_btt->ndns || !nd_btt->lbasize) {
dev_dbg(&nd_btt->dev, "incomplete btt configuration\n");
@@ -1685,6 +1686,11 @@ int nvdimm_namespace_attach_btt(struct nd_namespace_common *ndns)
if (!btt_sb)
return -ENOMEM;
+ size = nvdimm_namespace_capacity(ndns);
+ rc = devm_namespace_enable(&nd_btt->dev, ndns, size);
+ if (rc)
+ return rc;
+
/*
* If this returns < 0, that is ok as it just means there wasn't
* an existing BTT, and we're creating a new one. We still need to
@@ -1693,7 +1699,7 @@ int nvdimm_namespace_attach_btt(struct nd_namespace_common *ndns)
*/
nd_btt_version(nd_btt, ndns, btt_sb);
- rawsize = nvdimm_namespace_capacity(ndns) - nd_btt->initial_offset;
+ rawsize = size - nd_btt->initial_offset;
if (rawsize < ARENA_MIN_SIZE) {
dev_dbg(&nd_btt->dev, "%s must be at least %ld bytes\n",
dev_name(&ndns->dev),
diff --git a/drivers/nvdimm/btt_devs.c b/drivers/nvdimm/btt_devs.c
index 3508a79110c7..05feb97e11ce 100644
--- a/drivers/nvdimm/btt_devs.c
+++ b/drivers/nvdimm/btt_devs.c
@@ -25,17 +25,6 @@ static void nd_btt_release(struct device *dev)
kfree(nd_btt);
}
-static struct device_type nd_btt_device_type = {
- .name = "nd_btt",
- .release = nd_btt_release,
-};
-
-bool is_nd_btt(struct device *dev)
-{
- return dev->type == &nd_btt_device_type;
-}
-EXPORT_SYMBOL(is_nd_btt);
-
struct nd_btt *to_nd_btt(struct device *dev)
{
struct nd_btt *nd_btt = container_of(dev, struct nd_btt, dev);
@@ -178,6 +167,18 @@ static const struct attribute_group *nd_btt_attribute_groups[] = {
NULL,
};
+static const struct device_type nd_btt_device_type = {
+ .name = "nd_btt",
+ .release = nd_btt_release,
+ .groups = nd_btt_attribute_groups,
+};
+
+bool is_nd_btt(struct device *dev)
+{
+ return dev->type == &nd_btt_device_type;
+}
+EXPORT_SYMBOL(is_nd_btt);
+
static struct device *__nd_btt_create(struct nd_region *nd_region,
unsigned long lbasize, u8 *uuid,
struct nd_namespace_common *ndns)
@@ -204,7 +205,6 @@ static struct device *__nd_btt_create(struct nd_region *nd_region,
dev_set_name(dev, "btt%d.%d", nd_region->id, nd_btt->id);
dev->parent = &nd_region->dev;
dev->type = &nd_btt_device_type;
- dev->groups = nd_btt_attribute_groups;
device_initialize(&nd_btt->dev);
if (ndns && !__nd_attach_ndns(&nd_btt->dev, ndns, &nd_btt->ndns)) {
dev_dbg(&ndns->dev, "failed, already claimed by %s\n",
diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
index d47412dcdf38..a8b515968569 100644
--- a/drivers/nvdimm/bus.c
+++ b/drivers/nvdimm/bus.c
@@ -300,9 +300,14 @@ static void nvdimm_bus_release(struct device *dev)
kfree(nvdimm_bus);
}
+static const struct device_type nvdimm_bus_dev_type = {
+ .release = nvdimm_bus_release,
+ .groups = nvdimm_bus_attribute_groups,
+};
+
bool is_nvdimm_bus(struct device *dev)
{
- return dev->release == nvdimm_bus_release;
+ return dev->type == &nvdimm_bus_dev_type;
}
struct nvdimm_bus *walk_to_nvdimm_bus(struct device *nd_dev)
@@ -355,7 +360,7 @@ struct nvdimm_bus *nvdimm_bus_register(struct device *parent,
badrange_init(&nvdimm_bus->badrange);
nvdimm_bus->nd_desc = nd_desc;
nvdimm_bus->dev.parent = parent;
- nvdimm_bus->dev.release = nvdimm_bus_release;
+ nvdimm_bus->dev.type = &nvdimm_bus_dev_type;
nvdimm_bus->dev.groups = nd_desc->attr_groups;
nvdimm_bus->dev.bus = &nvdimm_bus_type;
nvdimm_bus->dev.of_node = nd_desc->of_node;
@@ -669,10 +674,9 @@ static struct attribute *nd_device_attributes[] = {
/*
* nd_device_attribute_group - generic attributes for all devices on an nd bus
*/
-struct attribute_group nd_device_attribute_group = {
+const struct attribute_group nd_device_attribute_group = {
.attrs = nd_device_attributes,
};
-EXPORT_SYMBOL_GPL(nd_device_attribute_group);
static ssize_t numa_node_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -681,28 +685,56 @@ static ssize_t numa_node_show(struct device *dev,
}
static DEVICE_ATTR_RO(numa_node);
+static int nvdimm_dev_to_target_node(struct device *dev)
+{
+ struct device *parent = dev->parent;
+ struct nd_region *nd_region = NULL;
+
+ if (is_nd_region(dev))
+ nd_region = to_nd_region(dev);
+ else if (parent && is_nd_region(parent))
+ nd_region = to_nd_region(parent);
+
+ if (!nd_region)
+ return NUMA_NO_NODE;
+ return nd_region->target_node;
+}
+
+static ssize_t target_node_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n", nvdimm_dev_to_target_node(dev));
+}
+static DEVICE_ATTR_RO(target_node);
+
static struct attribute *nd_numa_attributes[] = {
&dev_attr_numa_node.attr,
+ &dev_attr_target_node.attr,
NULL,
};
static umode_t nd_numa_attr_visible(struct kobject *kobj, struct attribute *a,
int n)
{
+ struct device *dev = container_of(kobj, typeof(*dev), kobj);
+
if (!IS_ENABLED(CONFIG_NUMA))
return 0;
+ if (a == &dev_attr_target_node.attr &&
+ nvdimm_dev_to_target_node(dev) == NUMA_NO_NODE)
+ return 0;
+
return a->mode;
}
/*
* nd_numa_attribute_group - NUMA attributes for all devices on an nd bus
*/
-struct attribute_group nd_numa_attribute_group = {
+const struct attribute_group nd_numa_attribute_group = {
.attrs = nd_numa_attributes,
.is_visible = nd_numa_attr_visible,
};
-EXPORT_SYMBOL_GPL(nd_numa_attribute_group);
int nvdimm_bus_create_ndctl(struct nvdimm_bus *nvdimm_bus)
{
@@ -1227,7 +1259,7 @@ static const struct file_operations nvdimm_bus_fops = {
.owner = THIS_MODULE,
.open = nd_open,
.unlocked_ioctl = bus_ioctl,
- .compat_ioctl = bus_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.llseek = noop_llseek,
};
@@ -1235,7 +1267,7 @@ static const struct file_operations nvdimm_fops = {
.owner = THIS_MODULE,
.open = nd_open,
.unlocked_ioctl = dimm_ioctl,
- .compat_ioctl = dimm_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.llseek = noop_llseek,
};
diff --git a/drivers/nvdimm/claim.c b/drivers/nvdimm/claim.c
index 2985ca949912..45964acba944 100644
--- a/drivers/nvdimm/claim.c
+++ b/drivers/nvdimm/claim.c
@@ -300,13 +300,14 @@ static int nsio_rw_bytes(struct nd_namespace_common *ndns,
return rc;
}
-int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio)
+int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio,
+ resource_size_t size)
{
struct resource *res = &nsio->res;
struct nd_namespace_common *ndns = &nsio->common;
- nsio->size = resource_size(res);
- if (!devm_request_mem_region(dev, res->start, resource_size(res),
+ nsio->size = size;
+ if (!devm_request_mem_region(dev, res->start, size,
dev_name(&ndns->dev))) {
dev_warn(dev, "could not reserve region %pR\n", res);
return -EBUSY;
@@ -318,12 +319,10 @@ int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio)
nvdimm_badblocks_populate(to_nd_region(ndns->dev.parent), &nsio->bb,
&nsio->res);
- nsio->addr = devm_memremap(dev, res->start, resource_size(res),
- ARCH_MEMREMAP_PMEM);
+ nsio->addr = devm_memremap(dev, res->start, size, ARCH_MEMREMAP_PMEM);
return PTR_ERR_OR_ZERO(nsio->addr);
}
-EXPORT_SYMBOL_GPL(devm_nsio_enable);
void devm_nsio_disable(struct device *dev, struct nd_namespace_io *nsio)
{
@@ -331,6 +330,5 @@ void devm_nsio_disable(struct device *dev, struct nd_namespace_io *nsio)
devm_memunmap(dev, nsio->addr);
devm_exit_badblocks(dev, &nsio->bb);
- devm_release_mem_region(dev, res->start, resource_size(res));
+ devm_release_mem_region(dev, res->start, nsio->size);
}
-EXPORT_SYMBOL_GPL(devm_nsio_disable);
diff --git a/drivers/nvdimm/core.c b/drivers/nvdimm/core.c
index 9204f1e9fd14..fe9bd6febdd2 100644
--- a/drivers/nvdimm/core.c
+++ b/drivers/nvdimm/core.c
@@ -385,10 +385,14 @@ static struct attribute *nvdimm_bus_attributes[] = {
NULL,
};
-struct attribute_group nvdimm_bus_attribute_group = {
+static const struct attribute_group nvdimm_bus_attribute_group = {
.attrs = nvdimm_bus_attributes,
};
-EXPORT_SYMBOL_GPL(nvdimm_bus_attribute_group);
+
+const struct attribute_group *nvdimm_bus_attribute_groups[] = {
+ &nvdimm_bus_attribute_group,
+ NULL,
+};
int nvdimm_bus_add_badrange(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length)
{
@@ -455,7 +459,6 @@ static __exit void libnvdimm_exit(void)
nd_region_exit();
nvdimm_exit();
nvdimm_bus_exit();
- nd_region_devs_exit();
nvdimm_devs_exit();
}
diff --git a/drivers/nvdimm/dax_devs.c b/drivers/nvdimm/dax_devs.c
index 6d22b0f83b3b..99965077bac4 100644
--- a/drivers/nvdimm/dax_devs.c
+++ b/drivers/nvdimm/dax_devs.c
@@ -23,17 +23,6 @@ static void nd_dax_release(struct device *dev)
kfree(nd_dax);
}
-static struct device_type nd_dax_device_type = {
- .name = "nd_dax",
- .release = nd_dax_release,
-};
-
-bool is_nd_dax(struct device *dev)
-{
- return dev ? dev->type == &nd_dax_device_type : false;
-}
-EXPORT_SYMBOL(is_nd_dax);
-
struct nd_dax *to_nd_dax(struct device *dev)
{
struct nd_dax *nd_dax = container_of(dev, struct nd_dax, nd_pfn.dev);
@@ -43,13 +32,18 @@ struct nd_dax *to_nd_dax(struct device *dev)
}
EXPORT_SYMBOL(to_nd_dax);
-static const struct attribute_group *nd_dax_attribute_groups[] = {
- &nd_pfn_attribute_group,
- &nd_device_attribute_group,
- &nd_numa_attribute_group,
- NULL,
+static const struct device_type nd_dax_device_type = {
+ .name = "nd_dax",
+ .release = nd_dax_release,
+ .groups = nd_pfn_attribute_groups,
};
+bool is_nd_dax(struct device *dev)
+{
+ return dev ? dev->type == &nd_dax_device_type : false;
+}
+EXPORT_SYMBOL(is_nd_dax);
+
static struct nd_dax *nd_dax_alloc(struct nd_region *nd_region)
{
struct nd_pfn *nd_pfn;
@@ -69,7 +63,6 @@ static struct nd_dax *nd_dax_alloc(struct nd_region *nd_region)
dev = &nd_pfn->dev;
dev_set_name(dev, "dax%d.%d", nd_region->id, nd_pfn->id);
- dev->groups = nd_dax_attribute_groups;
dev->type = &nd_dax_device_type;
dev->parent = &nd_region->dev;
diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c
index 196aa44c4936..94ea6dba6b4f 100644
--- a/drivers/nvdimm/dimm_devs.c
+++ b/drivers/nvdimm/dimm_devs.c
@@ -202,16 +202,6 @@ static void nvdimm_release(struct device *dev)
kfree(nvdimm);
}
-static struct device_type nvdimm_device_type = {
- .name = "nvdimm",
- .release = nvdimm_release,
-};
-
-bool is_nvdimm(struct device *dev)
-{
- return dev->type == &nvdimm_device_type;
-}
-
struct nvdimm *to_nvdimm(struct device *dev)
{
struct nvdimm *nvdimm = container_of(dev, struct nvdimm, dev);
@@ -450,11 +440,27 @@ static umode_t nvdimm_visible(struct kobject *kobj, struct attribute *a, int n)
return 0;
}
-struct attribute_group nvdimm_attribute_group = {
+static const struct attribute_group nvdimm_attribute_group = {
.attrs = nvdimm_attributes,
.is_visible = nvdimm_visible,
};
-EXPORT_SYMBOL_GPL(nvdimm_attribute_group);
+
+static const struct attribute_group *nvdimm_attribute_groups[] = {
+ &nd_device_attribute_group,
+ &nvdimm_attribute_group,
+ NULL,
+};
+
+static const struct device_type nvdimm_device_type = {
+ .name = "nvdimm",
+ .release = nvdimm_release,
+ .groups = nvdimm_attribute_groups,
+};
+
+bool is_nvdimm(struct device *dev)
+{
+ return dev->type == &nvdimm_device_type;
+}
struct nvdimm *__nvdimm_create(struct nvdimm_bus *nvdimm_bus,
void *provider_data, const struct attribute_group **groups,
diff --git a/drivers/nvdimm/e820.c b/drivers/nvdimm/e820.c
index 87f72f725e4f..e02f60ad6c99 100644
--- a/drivers/nvdimm/e820.c
+++ b/drivers/nvdimm/e820.c
@@ -8,17 +8,6 @@
#include <linux/libnvdimm.h>
#include <linux/module.h>
-static const struct attribute_group *e820_pmem_attribute_groups[] = {
- &nvdimm_bus_attribute_group,
- NULL,
-};
-
-static const struct attribute_group *e820_pmem_region_attribute_groups[] = {
- &nd_region_attribute_group,
- &nd_device_attribute_group,
- NULL,
-};
-
static int e820_pmem_remove(struct platform_device *pdev)
{
struct nvdimm_bus *nvdimm_bus = platform_get_drvdata(pdev);
@@ -46,7 +35,6 @@ static int e820_register_one(struct resource *res, void *data)
memset(&ndr_desc, 0, sizeof(ndr_desc));
ndr_desc.res = res;
- ndr_desc.attr_groups = e820_pmem_region_attribute_groups;
ndr_desc.numa_node = e820_range_to_nid(res->start);
ndr_desc.target_node = ndr_desc.numa_node;
set_bit(ND_REGION_PAGEMAP, &ndr_desc.flags);
@@ -62,7 +50,6 @@ static int e820_pmem_probe(struct platform_device *pdev)
struct nvdimm_bus *nvdimm_bus;
int rc = -ENXIO;
- nd_desc.attr_groups = e820_pmem_attribute_groups;
nd_desc.provider_name = "e820";
nd_desc.module = THIS_MODULE;
nvdimm_bus = nvdimm_bus_register(dev, &nd_desc);
diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
index cca0a3ba1d2c..032dc61725ff 100644
--- a/drivers/nvdimm/namespace_devs.c
+++ b/drivers/nvdimm/namespace_devs.c
@@ -44,35 +44,9 @@ static void namespace_blk_release(struct device *dev)
kfree(nsblk);
}
-static const struct device_type namespace_io_device_type = {
- .name = "nd_namespace_io",
- .release = namespace_io_release,
-};
-
-static const struct device_type namespace_pmem_device_type = {
- .name = "nd_namespace_pmem",
- .release = namespace_pmem_release,
-};
-
-static const struct device_type namespace_blk_device_type = {
- .name = "nd_namespace_blk",
- .release = namespace_blk_release,
-};
-
-static bool is_namespace_pmem(const struct device *dev)
-{
- return dev ? dev->type == &namespace_pmem_device_type : false;
-}
-
-static bool is_namespace_blk(const struct device *dev)
-{
- return dev ? dev->type == &namespace_blk_device_type : false;
-}
-
-static bool is_namespace_io(const struct device *dev)
-{
- return dev ? dev->type == &namespace_io_device_type : false;
-}
+static bool is_namespace_pmem(const struct device *dev);
+static bool is_namespace_blk(const struct device *dev);
+static bool is_namespace_io(const struct device *dev);
static int is_uuid_busy(struct device *dev, void *data)
{
@@ -1329,7 +1303,7 @@ static ssize_t resource_show(struct device *dev,
return -ENXIO;
return sprintf(buf, "%#llx\n", (unsigned long long) res->start);
}
-static DEVICE_ATTR_RO(resource);
+static DEVICE_ATTR(resource, 0400, resource_show, NULL);
static const unsigned long blk_lbasize_supported[] = { 512, 520, 528,
4096, 4104, 4160, 4224, 0 };
@@ -1510,16 +1484,20 @@ static ssize_t holder_show(struct device *dev,
}
static DEVICE_ATTR_RO(holder);
-static ssize_t __holder_class_store(struct device *dev, const char *buf)
+static int __holder_class_store(struct device *dev, const char *buf)
{
struct nd_namespace_common *ndns = to_ndns(dev);
if (dev->driver || ndns->claim)
return -EBUSY;
- if (sysfs_streq(buf, "btt"))
- ndns->claim_class = btt_claim_class(dev);
- else if (sysfs_streq(buf, "pfn"))
+ if (sysfs_streq(buf, "btt")) {
+ int rc = btt_claim_class(dev);
+
+ if (rc < NVDIMM_CCLASS_NONE)
+ return rc;
+ ndns->claim_class = rc;
+ } else if (sysfs_streq(buf, "pfn"))
ndns->claim_class = NVDIMM_CCLASS_PFN;
else if (sysfs_streq(buf, "dax"))
ndns->claim_class = NVDIMM_CCLASS_DAX;
@@ -1528,10 +1506,6 @@ static ssize_t __holder_class_store(struct device *dev, const char *buf)
else
return -EINVAL;
- /* btt_claim_class() could've returned an error */
- if (ndns->claim_class < 0)
- return ndns->claim_class;
-
return 0;
}
@@ -1539,7 +1513,7 @@ static ssize_t holder_class_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
{
struct nd_region *nd_region = to_nd_region(dev->parent);
- ssize_t rc;
+ int rc;
nd_device_lock(dev);
nvdimm_bus_lock(dev);
@@ -1547,7 +1521,7 @@ static ssize_t holder_class_store(struct device *dev,
rc = __holder_class_store(dev, buf);
if (rc >= 0)
rc = nd_namespace_label_update(nd_region, dev);
- dev_dbg(dev, "%s(%zd)\n", rc < 0 ? "fail " : "", rc);
+ dev_dbg(dev, "%s(%d)\n", rc < 0 ? "fail " : "", rc);
nvdimm_bus_unlock(dev);
nd_device_unlock(dev);
@@ -1645,11 +1619,8 @@ static umode_t namespace_visible(struct kobject *kobj,
{
struct device *dev = container_of(kobj, struct device, kobj);
- if (a == &dev_attr_resource.attr) {
- if (is_namespace_blk(dev))
- return 0;
- return 0400;
- }
+ if (a == &dev_attr_resource.attr && is_namespace_blk(dev))
+ return 0;
if (is_namespace_pmem(dev) || is_namespace_blk(dev)) {
if (a == &dev_attr_size.attr)
@@ -1680,6 +1651,39 @@ static const struct attribute_group *nd_namespace_attribute_groups[] = {
NULL,
};
+static const struct device_type namespace_io_device_type = {
+ .name = "nd_namespace_io",
+ .release = namespace_io_release,
+ .groups = nd_namespace_attribute_groups,
+};
+
+static const struct device_type namespace_pmem_device_type = {
+ .name = "nd_namespace_pmem",
+ .release = namespace_pmem_release,
+ .groups = nd_namespace_attribute_groups,
+};
+
+static const struct device_type namespace_blk_device_type = {
+ .name = "nd_namespace_blk",
+ .release = namespace_blk_release,
+ .groups = nd_namespace_attribute_groups,
+};
+
+static bool is_namespace_pmem(const struct device *dev)
+{
+ return dev ? dev->type == &namespace_pmem_device_type : false;
+}
+
+static bool is_namespace_blk(const struct device *dev)
+{
+ return dev ? dev->type == &namespace_blk_device_type : false;
+}
+
+static bool is_namespace_io(const struct device *dev)
+{
+ return dev ? dev->type == &namespace_io_device_type : false;
+}
+
struct nd_namespace_common *nvdimm_namespace_common_probe(struct device *dev)
{
struct nd_btt *nd_btt = is_nd_btt(dev) ? to_nd_btt(dev) : NULL;
@@ -1759,6 +1763,23 @@ struct nd_namespace_common *nvdimm_namespace_common_probe(struct device *dev)
}
EXPORT_SYMBOL(nvdimm_namespace_common_probe);
+int devm_namespace_enable(struct device *dev, struct nd_namespace_common *ndns,
+ resource_size_t size)
+{
+ if (is_namespace_blk(&ndns->dev))
+ return 0;
+ return devm_nsio_enable(dev, to_nd_namespace_io(&ndns->dev), size);
+}
+EXPORT_SYMBOL_GPL(devm_namespace_enable);
+
+void devm_namespace_disable(struct device *dev, struct nd_namespace_common *ndns)
+{
+ if (is_namespace_blk(&ndns->dev))
+ return;
+ devm_nsio_disable(dev, to_nd_namespace_io(&ndns->dev));
+}
+EXPORT_SYMBOL_GPL(devm_namespace_disable);
+
static struct device **create_namespace_io(struct nd_region *nd_region)
{
struct nd_namespace_io *nsio;
@@ -2078,7 +2099,6 @@ static struct device *nd_namespace_blk_create(struct nd_region *nd_region)
}
dev_set_name(dev, "namespace%d.%d", nd_region->id, nsblk->id);
dev->parent = &nd_region->dev;
- dev->groups = nd_namespace_attribute_groups;
return &nsblk->common.dev;
}
@@ -2109,7 +2129,6 @@ static struct device *nd_namespace_pmem_create(struct nd_region *nd_region)
return NULL;
}
dev_set_name(dev, "namespace%d.%d", nd_region->id, nspm->id);
- dev->groups = nd_namespace_attribute_groups;
nd_namespace_pmem_set_resource(nd_region, nspm, 0);
return dev;
@@ -2608,7 +2627,6 @@ int nd_region_register_namespaces(struct nd_region *nd_region, int *err)
if (id < 0)
break;
dev_set_name(dev, "namespace%d.%d", nd_region->id, id);
- dev->groups = nd_namespace_attribute_groups;
nd_device_register(dev);
}
if (i)
diff --git a/drivers/nvdimm/nd-core.h b/drivers/nvdimm/nd-core.h
index 25fa121104d0..ddb9d97d9129 100644
--- a/drivers/nvdimm/nd-core.h
+++ b/drivers/nvdimm/nd-core.h
@@ -114,7 +114,6 @@ struct nvdimm_bus *walk_to_nvdimm_bus(struct device *nd_dev);
int __init nvdimm_bus_init(void);
void nvdimm_bus_exit(void);
void nvdimm_devs_exit(void);
-void nd_region_devs_exit(void);
struct nd_region;
void nd_region_advance_seeds(struct nd_region *nd_region, struct device *dev);
void nd_region_create_ns_seed(struct nd_region *nd_region);
@@ -124,11 +123,7 @@ void nd_region_create_dax_seed(struct nd_region *nd_region);
int nvdimm_bus_create_ndctl(struct nvdimm_bus *nvdimm_bus);
void nvdimm_bus_destroy_ndctl(struct nvdimm_bus *nvdimm_bus);
void nd_synchronize(void);
-int nvdimm_bus_register_dimms(struct nvdimm_bus *nvdimm_bus);
-int nvdimm_bus_register_regions(struct nvdimm_bus *nvdimm_bus);
-int nvdimm_bus_init_interleave_sets(struct nvdimm_bus *nvdimm_bus);
void __nd_device_register(struct device *dev);
-int nd_match_dimm(struct device *dev, void *data);
struct nd_label_id;
char *nd_label_gen_id(struct nd_label_id *label_id, u8 *uuid, u32 flags);
bool nd_is_uuid_unique(struct device *dev, u8 *uuid);
@@ -171,6 +166,23 @@ ssize_t nd_namespace_store(struct device *dev,
struct nd_pfn *to_nd_pfn_safe(struct device *dev);
bool is_nvdimm_bus(struct device *dev);
+#if IS_ENABLED(CONFIG_ND_CLAIM)
+int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio,
+ resource_size_t size);
+void devm_nsio_disable(struct device *dev, struct nd_namespace_io *nsio);
+#else
+static inline int devm_nsio_enable(struct device *dev,
+ struct nd_namespace_io *nsio, resource_size_t size)
+{
+ return -ENXIO;
+}
+
+static inline void devm_nsio_disable(struct device *dev,
+ struct nd_namespace_io *nsio)
+{
+}
+#endif
+
#ifdef CONFIG_PROVE_LOCKING
extern struct class *nd_class;
diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h
index ee5c04070ef9..c9f6a5b5253a 100644
--- a/drivers/nvdimm/nd.h
+++ b/drivers/nvdimm/nd.h
@@ -212,6 +212,11 @@ struct nd_dax {
struct nd_pfn nd_pfn;
};
+static inline u32 nd_info_block_reserve(void)
+{
+ return ALIGN(SZ_8K, PAGE_SIZE);
+}
+
enum nd_async_mode {
ND_SYNC,
ND_ASYNC,
@@ -234,6 +239,9 @@ int __init nd_label_init(void);
void nvdimm_exit(void);
void nd_region_exit(void);
struct nvdimm;
+extern const struct attribute_group nd_device_attribute_group;
+extern const struct attribute_group nd_numa_attribute_group;
+extern const struct attribute_group *nvdimm_bus_attribute_groups[];
struct nvdimm_drvdata *to_ndd(struct nd_mapping *nd_mapping);
int nvdimm_check_config_data(struct device *dev);
int nvdimm_init_nsarea(struct nvdimm_drvdata *ndd);
@@ -297,7 +305,7 @@ struct device *nd_pfn_create(struct nd_region *nd_region);
struct device *nd_pfn_devinit(struct nd_pfn *nd_pfn,
struct nd_namespace_common *ndns);
int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig);
-extern struct attribute_group nd_pfn_attribute_group;
+extern const struct attribute_group *nd_pfn_attribute_groups[];
#else
static inline int nd_pfn_probe(struct device *dev,
struct nd_namespace_common *ndns)
@@ -370,29 +378,20 @@ const char *nvdimm_namespace_disk_name(struct nd_namespace_common *ndns,
unsigned int pmem_sector_size(struct nd_namespace_common *ndns);
void nvdimm_badblocks_populate(struct nd_region *nd_region,
struct badblocks *bb, const struct resource *res);
+int devm_namespace_enable(struct device *dev, struct nd_namespace_common *ndns,
+ resource_size_t size);
+void devm_namespace_disable(struct device *dev,
+ struct nd_namespace_common *ndns);
#if IS_ENABLED(CONFIG_ND_CLAIM)
-
/* max struct page size independent of kernel config */
#define MAX_STRUCT_PAGE_SIZE 64
-
int nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap);
-int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio);
-void devm_nsio_disable(struct device *dev, struct nd_namespace_io *nsio);
#else
static inline int nvdimm_setup_pfn(struct nd_pfn *nd_pfn,
struct dev_pagemap *pgmap)
{
return -ENXIO;
}
-static inline int devm_nsio_enable(struct device *dev,
- struct nd_namespace_io *nsio)
-{
- return -ENXIO;
-}
-static inline void devm_nsio_disable(struct device *dev,
- struct nd_namespace_io *nsio)
-{
-}
#endif
int nd_blk_region_init(struct nd_region *nd_region);
int nd_region_activate(struct nd_region *nd_region);
diff --git a/drivers/nvdimm/of_pmem.c b/drivers/nvdimm/of_pmem.c
index 97187d6c0bdb..8224d1431ea9 100644
--- a/drivers/nvdimm/of_pmem.c
+++ b/drivers/nvdimm/of_pmem.c
@@ -9,17 +9,6 @@
#include <linux/ioport.h>
#include <linux/slab.h>
-static const struct attribute_group *region_attr_groups[] = {
- &nd_region_attribute_group,
- &nd_device_attribute_group,
- NULL,
-};
-
-static const struct attribute_group *bus_attr_groups[] = {
- &nvdimm_bus_attribute_group,
- NULL,
-};
-
struct of_pmem_private {
struct nvdimm_bus_descriptor bus_desc;
struct nvdimm_bus *bus;
@@ -41,7 +30,6 @@ static int of_pmem_region_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- priv->bus_desc.attr_groups = bus_attr_groups;
priv->bus_desc.provider_name = kstrdup(pdev->name, GFP_KERNEL);
priv->bus_desc.module = THIS_MODULE;
priv->bus_desc.of_node = np;
@@ -66,7 +54,6 @@ static int of_pmem_region_probe(struct platform_device *pdev)
* structures so passing a stack pointer is fine.
*/
memset(&ndr_desc, 0, sizeof(ndr_desc));
- ndr_desc.attr_groups = region_attr_groups;
ndr_desc.numa_node = dev_to_node(&pdev->dev);
ndr_desc.target_node = ndr_desc.numa_node;
ndr_desc.res = &pdev->resource[i];
diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
index 60d81fae06ee..b94f7a7e94b8 100644
--- a/drivers/nvdimm/pfn_devs.c
+++ b/drivers/nvdimm/pfn_devs.c
@@ -26,17 +26,6 @@ static void nd_pfn_release(struct device *dev)
kfree(nd_pfn);
}
-static struct device_type nd_pfn_device_type = {
- .name = "nd_pfn",
- .release = nd_pfn_release,
-};
-
-bool is_nd_pfn(struct device *dev)
-{
- return dev ? dev->type == &nd_pfn_device_type : false;
-}
-EXPORT_SYMBOL(is_nd_pfn);
-
struct nd_pfn *to_nd_pfn(struct device *dev)
{
struct nd_pfn *nd_pfn = container_of(dev, struct nd_pfn, dev);
@@ -229,7 +218,7 @@ static ssize_t resource_show(struct device *dev,
return rc;
}
-static DEVICE_ATTR_RO(resource);
+static DEVICE_ATTR(resource, 0400, resource_show, NULL);
static ssize_t size_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -280,25 +269,29 @@ static struct attribute *nd_pfn_attributes[] = {
NULL,
};
-static umode_t pfn_visible(struct kobject *kobj, struct attribute *a, int n)
-{
- if (a == &dev_attr_resource.attr)
- return 0400;
- return a->mode;
-}
-
-struct attribute_group nd_pfn_attribute_group = {
+static struct attribute_group nd_pfn_attribute_group = {
.attrs = nd_pfn_attributes,
- .is_visible = pfn_visible,
};
-static const struct attribute_group *nd_pfn_attribute_groups[] = {
+const struct attribute_group *nd_pfn_attribute_groups[] = {
&nd_pfn_attribute_group,
&nd_device_attribute_group,
&nd_numa_attribute_group,
NULL,
};
+static const struct device_type nd_pfn_device_type = {
+ .name = "nd_pfn",
+ .release = nd_pfn_release,
+ .groups = nd_pfn_attribute_groups,
+};
+
+bool is_nd_pfn(struct device *dev)
+{
+ return dev ? dev->type == &nd_pfn_device_type : false;
+}
+EXPORT_SYMBOL(is_nd_pfn);
+
struct device *nd_pfn_devinit(struct nd_pfn *nd_pfn,
struct nd_namespace_common *ndns)
{
@@ -337,7 +330,6 @@ static struct nd_pfn *nd_pfn_alloc(struct nd_region *nd_region)
dev = &nd_pfn->dev;
dev_set_name(dev, "pfn%d.%d", nd_region->id, nd_pfn->id);
- dev->groups = nd_pfn_attribute_groups;
dev->type = &nd_pfn_device_type;
dev->parent = &nd_region->dev;
@@ -382,6 +374,15 @@ static int nd_pfn_clear_memmap_errors(struct nd_pfn *nd_pfn)
meta_start = (SZ_4K + sizeof(*pfn_sb)) >> 9;
meta_num = (le64_to_cpu(pfn_sb->dataoff) >> 9) - meta_start;
+ /*
+ * re-enable the namespace with correct size so that we can access
+ * the device memmap area.
+ */
+ devm_namespace_disable(&nd_pfn->dev, ndns);
+ rc = devm_namespace_enable(&nd_pfn->dev, ndns, le64_to_cpu(pfn_sb->dataoff));
+ if (rc)
+ return rc;
+
do {
unsigned long zero_len;
u64 nsoff;
@@ -591,7 +592,7 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig)
return -ENXIO;
}
- return nd_pfn_clear_memmap_errors(nd_pfn);
+ return 0;
}
EXPORT_SYMBOL(nd_pfn_validate);
@@ -635,11 +636,6 @@ int nd_pfn_probe(struct device *dev, struct nd_namespace_common *ndns)
}
EXPORT_SYMBOL(nd_pfn_probe);
-static u32 info_block_reserve(void)
-{
- return ALIGN(SZ_8K, PAGE_SIZE);
-}
-
/*
* We hotplug memory at sub-section granularity, pad the reserved area
* from the previous section base to the namespace base address.
@@ -653,7 +649,7 @@ static unsigned long init_altmap_base(resource_size_t base)
static unsigned long init_altmap_reserve(resource_size_t base)
{
- unsigned long reserve = info_block_reserve() >> PAGE_SHIFT;
+ unsigned long reserve = nd_info_block_reserve() >> PAGE_SHIFT;
unsigned long base_pfn = PHYS_PFN(base);
reserve += base_pfn - SUBSECTION_ALIGN_DOWN(base_pfn);
@@ -668,7 +664,7 @@ static int __nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap)
u64 offset = le64_to_cpu(pfn_sb->dataoff);
u32 start_pad = __le32_to_cpu(pfn_sb->start_pad);
u32 end_trunc = __le32_to_cpu(pfn_sb->end_trunc);
- u32 reserve = info_block_reserve();
+ u32 reserve = nd_info_block_reserve();
struct nd_namespace_common *ndns = nd_pfn->ndns;
struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
resource_size_t base = nsio->res.start + start_pad;
@@ -729,6 +725,8 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
sig = PFN_SIG;
rc = nd_pfn_validate(nd_pfn, sig);
+ if (rc == 0)
+ return nd_pfn_clear_memmap_errors(nd_pfn);
if (rc != -ENODEV)
return rc;
@@ -796,6 +794,10 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
checksum = nd_sb_checksum((struct nd_gen_sb *) pfn_sb);
pfn_sb->checksum = cpu_to_le64(checksum);
+ rc = nd_pfn_clear_memmap_errors(nd_pfn);
+ if (rc)
+ return rc;
+
return nvdimm_write_bytes(ndns, SZ_4K, pfn_sb, sizeof(*pfn_sb), 0);
}
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index f9f76f6ba07b..ad8e4df1282b 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -28,7 +28,6 @@
#include "pmem.h"
#include "pfn.h"
#include "nd.h"
-#include "nd-core.h"
static struct device *to_dev(struct pmem_device *pmem)
{
@@ -372,6 +371,10 @@ static int pmem_attach_disk(struct device *dev,
if (!pmem)
return -ENOMEM;
+ rc = devm_namespace_enable(dev, ndns, nd_info_block_reserve());
+ if (rc)
+ return rc;
+
/* while nsio_rw_bytes is active, parse a pfn info block if present */
if (is_nd_pfn(dev)) {
nd_pfn = to_nd_pfn(dev);
@@ -381,7 +384,7 @@ static int pmem_attach_disk(struct device *dev,
}
/* we're attaching a block device, disable raw namespace access */
- devm_nsio_disable(dev, nsio);
+ devm_namespace_disable(dev, ndns);
dev_set_drvdata(dev, pmem);
pmem->phys_addr = res->start;
@@ -497,15 +500,16 @@ static int nd_pmem_probe(struct device *dev)
if (IS_ERR(ndns))
return PTR_ERR(ndns);
- if (devm_nsio_enable(dev, to_nd_namespace_io(&ndns->dev)))
- return -ENXIO;
-
if (is_nd_btt(dev))
return nvdimm_namespace_attach_btt(ndns);
if (is_nd_pfn(dev))
return pmem_attach_disk(dev, ndns);
+ ret = devm_namespace_enable(dev, ndns, nd_info_block_reserve());
+ if (ret)
+ return ret;
+
ret = nd_btt_probe(dev, ndns);
if (ret == 0)
return -ENXIO;
@@ -532,6 +536,10 @@ static int nd_pmem_probe(struct device *dev)
return -ENXIO;
else if (ret == -EOPNOTSUPP)
return ret;
+
+ /* probe complete, attach handles namespace enabling */
+ devm_namespace_disable(dev, ndns);
+
return pmem_attach_disk(dev, ndns);
}
diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
index ef423ba1a711..a19e535830d9 100644
--- a/drivers/nvdimm/region_devs.c
+++ b/drivers/nvdimm/region_devs.c
@@ -3,6 +3,7 @@
* Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
*/
#include <linux/scatterlist.h>
+#include <linux/memregion.h>
#include <linux/highmem.h>
#include <linux/sched.h>
#include <linux/slab.h>
@@ -19,7 +20,6 @@
*/
#include <linux/io-64-nonatomic-hi-lo.h>
-static DEFINE_IDA(region_ida);
static DEFINE_PER_CPU(int, flush_idx);
static int nvdimm_map_flush(struct device *dev, struct nvdimm *nvdimm, int dimm,
@@ -133,43 +133,13 @@ static void nd_region_release(struct device *dev)
put_device(&nvdimm->dev);
}
free_percpu(nd_region->lane);
- ida_simple_remove(&region_ida, nd_region->id);
+ memregion_free(nd_region->id);
if (is_nd_blk(dev))
kfree(to_nd_blk_region(dev));
else
kfree(nd_region);
}
-static struct device_type nd_blk_device_type = {
- .name = "nd_blk",
- .release = nd_region_release,
-};
-
-static struct device_type nd_pmem_device_type = {
- .name = "nd_pmem",
- .release = nd_region_release,
-};
-
-static struct device_type nd_volatile_device_type = {
- .name = "nd_volatile",
- .release = nd_region_release,
-};
-
-bool is_nd_pmem(struct device *dev)
-{
- return dev ? dev->type == &nd_pmem_device_type : false;
-}
-
-bool is_nd_blk(struct device *dev)
-{
- return dev ? dev->type == &nd_blk_device_type : false;
-}
-
-bool is_nd_volatile(struct device *dev)
-{
- return dev ? dev->type == &nd_volatile_device_type : false;
-}
-
struct nd_region *to_nd_region(struct device *dev)
{
struct nd_region *nd_region = container_of(dev, struct nd_region, dev);
@@ -583,7 +553,7 @@ static ssize_t resource_show(struct device *dev,
return sprintf(buf, "%#llx\n", nd_region->ndr_start);
}
-static DEVICE_ATTR_RO(resource);
+static DEVICE_ATTR(resource, 0400, resource_show, NULL);
static ssize_t persistence_domain_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -635,12 +605,8 @@ static umode_t region_visible(struct kobject *kobj, struct attribute *a, int n)
if (!is_memory(dev) && a == &dev_attr_badblocks.attr)
return 0;
- if (a == &dev_attr_resource.attr) {
- if (is_memory(dev))
- return 0400;
- else
- return 0;
- }
+ if (a == &dev_attr_resource.attr && !is_memory(dev))
+ return 0;
if (a == &dev_attr_deep_flush.attr) {
int has_flush = nvdimm_has_flush(nd_region);
@@ -674,80 +640,6 @@ static umode_t region_visible(struct kobject *kobj, struct attribute *a, int n)
return 0;
}
-struct attribute_group nd_region_attribute_group = {
- .attrs = nd_region_attributes,
- .is_visible = region_visible,
-};
-EXPORT_SYMBOL_GPL(nd_region_attribute_group);
-
-u64 nd_region_interleave_set_cookie(struct nd_region *nd_region,
- struct nd_namespace_index *nsindex)
-{
- struct nd_interleave_set *nd_set = nd_region->nd_set;
-
- if (!nd_set)
- return 0;
-
- if (nsindex && __le16_to_cpu(nsindex->major) == 1
- && __le16_to_cpu(nsindex->minor) == 1)
- return nd_set->cookie1;
- return nd_set->cookie2;
-}
-
-u64 nd_region_interleave_set_altcookie(struct nd_region *nd_region)
-{
- struct nd_interleave_set *nd_set = nd_region->nd_set;
-
- if (nd_set)
- return nd_set->altcookie;
- return 0;
-}
-
-void nd_mapping_free_labels(struct nd_mapping *nd_mapping)
-{
- struct nd_label_ent *label_ent, *e;
-
- lockdep_assert_held(&nd_mapping->lock);
- list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) {
- list_del(&label_ent->list);
- kfree(label_ent);
- }
-}
-
-/*
- * When a namespace is activated create new seeds for the next
- * namespace, or namespace-personality to be configured.
- */
-void nd_region_advance_seeds(struct nd_region *nd_region, struct device *dev)
-{
- nvdimm_bus_lock(dev);
- if (nd_region->ns_seed == dev) {
- nd_region_create_ns_seed(nd_region);
- } else if (is_nd_btt(dev)) {
- struct nd_btt *nd_btt = to_nd_btt(dev);
-
- if (nd_region->btt_seed == dev)
- nd_region_create_btt_seed(nd_region);
- if (nd_region->ns_seed == &nd_btt->ndns->dev)
- nd_region_create_ns_seed(nd_region);
- } else if (is_nd_pfn(dev)) {
- struct nd_pfn *nd_pfn = to_nd_pfn(dev);
-
- if (nd_region->pfn_seed == dev)
- nd_region_create_pfn_seed(nd_region);
- if (nd_region->ns_seed == &nd_pfn->ndns->dev)
- nd_region_create_ns_seed(nd_region);
- } else if (is_nd_dax(dev)) {
- struct nd_dax *nd_dax = to_nd_dax(dev);
-
- if (nd_region->dax_seed == dev)
- nd_region_create_dax_seed(nd_region);
- if (nd_region->ns_seed == &nd_dax->nd_pfn.ndns->dev)
- nd_region_create_ns_seed(nd_region);
- }
- nvdimm_bus_unlock(dev);
-}
-
static ssize_t mappingN(struct device *dev, char *buf, int n)
{
struct nd_region *nd_region = to_nd_region(dev);
@@ -855,11 +747,124 @@ static struct attribute *mapping_attributes[] = {
NULL,
};
-struct attribute_group nd_mapping_attribute_group = {
+static const struct attribute_group nd_mapping_attribute_group = {
.is_visible = mapping_visible,
.attrs = mapping_attributes,
};
-EXPORT_SYMBOL_GPL(nd_mapping_attribute_group);
+
+static const struct attribute_group nd_region_attribute_group = {
+ .attrs = nd_region_attributes,
+ .is_visible = region_visible,
+};
+
+static const struct attribute_group *nd_region_attribute_groups[] = {
+ &nd_device_attribute_group,
+ &nd_region_attribute_group,
+ &nd_numa_attribute_group,
+ &nd_mapping_attribute_group,
+ NULL,
+};
+
+static const struct device_type nd_blk_device_type = {
+ .name = "nd_blk",
+ .release = nd_region_release,
+ .groups = nd_region_attribute_groups,
+};
+
+static const struct device_type nd_pmem_device_type = {
+ .name = "nd_pmem",
+ .release = nd_region_release,
+ .groups = nd_region_attribute_groups,
+};
+
+static const struct device_type nd_volatile_device_type = {
+ .name = "nd_volatile",
+ .release = nd_region_release,
+ .groups = nd_region_attribute_groups,
+};
+
+bool is_nd_pmem(struct device *dev)
+{
+ return dev ? dev->type == &nd_pmem_device_type : false;
+}
+
+bool is_nd_blk(struct device *dev)
+{
+ return dev ? dev->type == &nd_blk_device_type : false;
+}
+
+bool is_nd_volatile(struct device *dev)
+{
+ return dev ? dev->type == &nd_volatile_device_type : false;
+}
+
+u64 nd_region_interleave_set_cookie(struct nd_region *nd_region,
+ struct nd_namespace_index *nsindex)
+{
+ struct nd_interleave_set *nd_set = nd_region->nd_set;
+
+ if (!nd_set)
+ return 0;
+
+ if (nsindex && __le16_to_cpu(nsindex->major) == 1
+ && __le16_to_cpu(nsindex->minor) == 1)
+ return nd_set->cookie1;
+ return nd_set->cookie2;
+}
+
+u64 nd_region_interleave_set_altcookie(struct nd_region *nd_region)
+{
+ struct nd_interleave_set *nd_set = nd_region->nd_set;
+
+ if (nd_set)
+ return nd_set->altcookie;
+ return 0;
+}
+
+void nd_mapping_free_labels(struct nd_mapping *nd_mapping)
+{
+ struct nd_label_ent *label_ent, *e;
+
+ lockdep_assert_held(&nd_mapping->lock);
+ list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) {
+ list_del(&label_ent->list);
+ kfree(label_ent);
+ }
+}
+
+/*
+ * When a namespace is activated create new seeds for the next
+ * namespace, or namespace-personality to be configured.
+ */
+void nd_region_advance_seeds(struct nd_region *nd_region, struct device *dev)
+{
+ nvdimm_bus_lock(dev);
+ if (nd_region->ns_seed == dev) {
+ nd_region_create_ns_seed(nd_region);
+ } else if (is_nd_btt(dev)) {
+ struct nd_btt *nd_btt = to_nd_btt(dev);
+
+ if (nd_region->btt_seed == dev)
+ nd_region_create_btt_seed(nd_region);
+ if (nd_region->ns_seed == &nd_btt->ndns->dev)
+ nd_region_create_ns_seed(nd_region);
+ } else if (is_nd_pfn(dev)) {
+ struct nd_pfn *nd_pfn = to_nd_pfn(dev);
+
+ if (nd_region->pfn_seed == dev)
+ nd_region_create_pfn_seed(nd_region);
+ if (nd_region->ns_seed == &nd_pfn->ndns->dev)
+ nd_region_create_ns_seed(nd_region);
+ } else if (is_nd_dax(dev)) {
+ struct nd_dax *nd_dax = to_nd_dax(dev);
+
+ if (nd_region->dax_seed == dev)
+ nd_region_create_dax_seed(nd_region);
+ if (nd_region->ns_seed == &nd_dax->nd_pfn.ndns->dev)
+ nd_region_create_ns_seed(nd_region);
+ }
+ nvdimm_bus_unlock(dev);
+}
int nd_blk_region_init(struct nd_region *nd_region)
{
@@ -931,8 +936,8 @@ void nd_region_release_lane(struct nd_region *nd_region, unsigned int lane)
EXPORT_SYMBOL(nd_region_release_lane);
static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus,
- struct nd_region_desc *ndr_desc, struct device_type *dev_type,
- const char *caller)
+ struct nd_region_desc *ndr_desc,
+ const struct device_type *dev_type, const char *caller)
{
struct nd_region *nd_region;
struct device *dev;
@@ -985,7 +990,7 @@ static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus,
if (!region_buf)
return NULL;
- nd_region->id = ida_simple_get(&region_ida, 0, 0, GFP_KERNEL);
+ nd_region->id = memregion_alloc(GFP_KERNEL);
if (nd_region->id < 0)
goto err_id;
@@ -1044,7 +1049,7 @@ static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus,
return nd_region;
err_percpu:
- ida_simple_remove(&region_ida, nd_region->id);
+ memregion_free(nd_region->id);
err_id:
kfree(region_buf);
return NULL;
@@ -1216,8 +1221,3 @@ int nd_region_conflict(struct nd_region *nd_region, resource_size_t start,
return device_for_each_child(&nvdimm_bus->dev, &ctx, region_conflict);
}
-
-void __exit nd_region_devs_exit(void)
-{
- ida_destroy(&region_ida);
-}
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 8e8527408db3..dfe37a525f3a 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -2412,16 +2412,6 @@ static const struct nvme_core_quirk_entry core_quirks[] = {
.vid = 0x14a4,
.fr = "22301111",
.quirks = NVME_QUIRK_SIMPLE_SUSPEND,
- },
- {
- /*
- * This Kingston E8FK11.T firmware version has no interrupt
- * after resume with actions related to suspend to idle
- * https://bugzilla.kernel.org/show_bug.cgi?id=204887
- */
- .vid = 0x2646,
- .fr = "E8FK11.T",
- .quirks = NVME_QUIRK_SIMPLE_SUSPEND,
}
};
@@ -2998,7 +2988,7 @@ static const struct file_operations nvme_dev_fops = {
.owner = THIS_MODULE,
.open = nvme_dev_open,
.unlocked_ioctl = nvme_dev_ioctl,
- .compat_ioctl = nvme_dev_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
};
static ssize_t nvme_sysfs_reset(struct device *dev,
diff --git a/drivers/nvmem/Kconfig b/drivers/nvmem/Kconfig
index c2ec750cae6e..73567e922491 100644
--- a/drivers/nvmem/Kconfig
+++ b/drivers/nvmem/Kconfig
@@ -50,6 +50,7 @@ config NVMEM_IMX_OCOTP
config NVMEM_IMX_OCOTP_SCU
tristate "i.MX8 SCU On-Chip OTP Controller support"
depends on IMX_SCU
+ depends on HAVE_ARM_SMCCC
help
This is a driver for the SCU On-Chip OTP Controller (OCOTP)
available on i.MX8 SoCs.
@@ -119,6 +120,17 @@ config ROCKCHIP_EFUSE
This driver can also be built as a module. If so, the module
will be called nvmem_rockchip_efuse.
+config ROCKCHIP_OTP
+ tristate "Rockchip OTP controller support"
+ depends on ARCH_ROCKCHIP || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ This is a simple drive to dump specified values of Rockchip SoC
+ from otp, such as cpu-leakage.
+
+ This driver can also be built as a module. If so, the module
+ will be called nvmem_rockchip_otp.
+
config NVMEM_BCM_OCOTP
tristate "Broadcom On-Chip OTP Controller support"
depends on ARCH_BCM_IPROC || COMPILE_TEST
@@ -230,4 +242,15 @@ config NVMEM_ZYNQMP
If sure, say yes. If unsure, say no.
+config SPRD_EFUSE
+ tristate "Spreadtrum SoC eFuse Support"
+ depends on ARCH_SPRD || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ This is a simple driver to dump specified values of Spreadtrum
+ SoCs from eFuse.
+
+ This driver can also be built as a module. If so, the module
+ will be called nvmem-sprd-efuse.
+
endif
diff --git a/drivers/nvmem/Makefile b/drivers/nvmem/Makefile
index e5c153d99a67..9e667823edb3 100644
--- a/drivers/nvmem/Makefile
+++ b/drivers/nvmem/Makefile
@@ -30,6 +30,8 @@ obj-$(CONFIG_QCOM_QFPROM) += nvmem_qfprom.o
nvmem_qfprom-y := qfprom.o
obj-$(CONFIG_ROCKCHIP_EFUSE) += nvmem_rockchip_efuse.o
nvmem_rockchip_efuse-y := rockchip-efuse.o
+obj-$(CONFIG_ROCKCHIP_OTP) += nvmem-rockchip-otp.o
+nvmem-rockchip-otp-y := rockchip-otp.o
obj-$(CONFIG_NVMEM_SUNXI_SID) += nvmem_sunxi_sid.o
nvmem_stm32_romem-y := stm32-romem.o
obj-$(CONFIG_NVMEM_STM32_ROMEM) += nvmem_stm32_romem.o
@@ -50,3 +52,5 @@ obj-$(CONFIG_SC27XX_EFUSE) += nvmem-sc27xx-efuse.o
nvmem-sc27xx-efuse-y := sc27xx-efuse.o
obj-$(CONFIG_NVMEM_ZYNQMP) += nvmem_zynqmp_nvmem.o
nvmem_zynqmp_nvmem-y := zynqmp_nvmem.o
+obj-$(CONFIG_SPRD_EFUSE) += nvmem_sprd_efuse.o
+nvmem_sprd_efuse-y := sprd-efuse.o
diff --git a/drivers/nvmem/imx-ocotp-scu.c b/drivers/nvmem/imx-ocotp-scu.c
index 61a17f943f47..03f1ab23ad51 100644
--- a/drivers/nvmem/imx-ocotp-scu.c
+++ b/drivers/nvmem/imx-ocotp-scu.c
@@ -7,6 +7,7 @@
* Peng Fan <peng.fan@nxp.com>
*/
+#include <linux/arm-smccc.h>
#include <linux/firmware/imx/sci.h>
#include <linux/module.h>
#include <linux/nvmem-provider.h>
@@ -14,14 +15,28 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
+#define IMX_SIP_OTP 0xC200000A
+#define IMX_SIP_OTP_WRITE 0x2
+
enum ocotp_devtype {
IMX8QXP,
IMX8QM,
};
+#define ECC_REGION BIT(0)
+#define HOLE_REGION BIT(1)
+
+struct ocotp_region {
+ u32 start;
+ u32 end;
+ u32 flag;
+};
+
struct ocotp_devtype_data {
int devtype;
int nregs;
+ u32 num_region;
+ struct ocotp_region region[];
};
struct ocotp_priv {
@@ -35,16 +50,63 @@ struct imx_sc_msg_misc_fuse_read {
u32 word;
} __packed;
+static DEFINE_MUTEX(scu_ocotp_mutex);
+
static struct ocotp_devtype_data imx8qxp_data = {
.devtype = IMX8QXP,
.nregs = 800,
+ .num_region = 3,
+ .region = {
+ {0x10, 0x10f, ECC_REGION},
+ {0x110, 0x21F, HOLE_REGION},
+ {0x220, 0x31F, ECC_REGION},
+ },
};
static struct ocotp_devtype_data imx8qm_data = {
.devtype = IMX8QM,
.nregs = 800,
+ .num_region = 2,
+ .region = {
+ {0x10, 0x10f, ECC_REGION},
+ {0x1a0, 0x1ff, ECC_REGION},
+ },
};
+static bool in_hole(void *context, u32 index)
+{
+ struct ocotp_priv *priv = context;
+ const struct ocotp_devtype_data *data = priv->data;
+ int i;
+
+ for (i = 0; i < data->num_region; i++) {
+ if (data->region[i].flag & HOLE_REGION) {
+ if ((index >= data->region[i].start) &&
+ (index <= data->region[i].end))
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static bool in_ecc(void *context, u32 index)
+{
+ struct ocotp_priv *priv = context;
+ const struct ocotp_devtype_data *data = priv->data;
+ int i;
+
+ for (i = 0; i < data->num_region; i++) {
+ if (data->region[i].flag & ECC_REGION) {
+ if ((index >= data->region[i].start) &&
+ (index <= data->region[i].end))
+ return true;
+ }
+ }
+
+ return false;
+}
+
static int imx_sc_misc_otp_fuse_read(struct imx_sc_ipc *ipc, u32 word,
u32 *val)
{
@@ -88,18 +150,19 @@ static int imx_scu_ocotp_read(void *context, unsigned int offset,
if (!p)
return -ENOMEM;
+ mutex_lock(&scu_ocotp_mutex);
+
buf = p;
for (i = index; i < (index + count); i++) {
- if (priv->data->devtype == IMX8QXP) {
- if ((i > 271) && (i < 544)) {
- *buf++ = 0;
- continue;
- }
+ if (in_hole(context, i)) {
+ *buf++ = 0;
+ continue;
}
ret = imx_sc_misc_otp_fuse_read(priv->nvmem_ipc, i, buf);
if (ret) {
+ mutex_unlock(&scu_ocotp_mutex);
kfree(p);
return ret;
}
@@ -108,18 +171,63 @@ static int imx_scu_ocotp_read(void *context, unsigned int offset,
memcpy(val, (u8 *)p + offset % 4, bytes);
+ mutex_unlock(&scu_ocotp_mutex);
+
kfree(p);
return 0;
}
+static int imx_scu_ocotp_write(void *context, unsigned int offset,
+ void *val, size_t bytes)
+{
+ struct ocotp_priv *priv = context;
+ struct arm_smccc_res res;
+ u32 *buf = val;
+ u32 tmp;
+ u32 index;
+ int ret;
+
+ /* allow only writing one complete OTP word at a time */
+ if ((bytes != 4) || (offset % 4))
+ return -EINVAL;
+
+ index = offset >> 2;
+
+ if (in_hole(context, index))
+ return -EINVAL;
+
+ if (in_ecc(context, index)) {
+ pr_warn("ECC region, only program once\n");
+ mutex_lock(&scu_ocotp_mutex);
+ ret = imx_sc_misc_otp_fuse_read(priv->nvmem_ipc, index, &tmp);
+ mutex_unlock(&scu_ocotp_mutex);
+ if (ret)
+ return ret;
+ if (tmp) {
+ pr_warn("ECC region, already has value: %x\n", tmp);
+ return -EIO;
+ }
+ }
+
+ mutex_lock(&scu_ocotp_mutex);
+
+ arm_smccc_smc(IMX_SIP_OTP, IMX_SIP_OTP_WRITE, index, *buf,
+ 0, 0, 0, 0, &res);
+
+ mutex_unlock(&scu_ocotp_mutex);
+
+ return res.a0;
+}
+
static struct nvmem_config imx_scu_ocotp_nvmem_config = {
.name = "imx-scu-ocotp",
- .read_only = true,
+ .read_only = false,
.word_size = 4,
.stride = 1,
.owner = THIS_MODULE,
.reg_read = imx_scu_ocotp_read,
+ .reg_write = imx_scu_ocotp_write,
};
static const struct of_device_id imx_scu_ocotp_dt_ids[] = {
diff --git a/drivers/nvmem/imx-ocotp.c b/drivers/nvmem/imx-ocotp.c
index dff2f3c357f5..fc40555ca4cd 100644
--- a/drivers/nvmem/imx-ocotp.c
+++ b/drivers/nvmem/imx-ocotp.c
@@ -521,6 +521,10 @@ static int imx_ocotp_probe(struct platform_device *pdev)
if (IS_ERR(priv->clk))
return PTR_ERR(priv->clk);
+ clk_prepare_enable(priv->clk);
+ imx_ocotp_clr_err_if_set(priv->base);
+ clk_disable_unprepare(priv->clk);
+
priv->params = of_device_get_match_data(&pdev->dev);
imx_ocotp_nvmem_config.size = 4 * priv->params->nregs;
imx_ocotp_nvmem_config.dev = dev;
diff --git a/drivers/nvmem/rockchip-otp.c b/drivers/nvmem/rockchip-otp.c
new file mode 100644
index 000000000000..9f53bcce2f87
--- /dev/null
+++ b/drivers/nvmem/rockchip-otp.c
@@ -0,0 +1,268 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Rockchip OTP Driver
+ *
+ * Copyright (c) 2018 Rockchip Electronics Co. Ltd.
+ * Author: Finley Xiao <finley.xiao@rock-chips.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/nvmem-provider.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+
+/* OTP Register Offsets */
+#define OTPC_SBPI_CTRL 0x0020
+#define OTPC_SBPI_CMD_VALID_PRE 0x0024
+#define OTPC_SBPI_CS_VALID_PRE 0x0028
+#define OTPC_SBPI_STATUS 0x002C
+#define OTPC_USER_CTRL 0x0100
+#define OTPC_USER_ADDR 0x0104
+#define OTPC_USER_ENABLE 0x0108
+#define OTPC_USER_Q 0x0124
+#define OTPC_INT_STATUS 0x0304
+#define OTPC_SBPI_CMD0_OFFSET 0x1000
+#define OTPC_SBPI_CMD1_OFFSET 0x1004
+
+/* OTP Register bits and masks */
+#define OTPC_USER_ADDR_MASK GENMASK(31, 16)
+#define OTPC_USE_USER BIT(0)
+#define OTPC_USE_USER_MASK GENMASK(16, 16)
+#define OTPC_USER_FSM_ENABLE BIT(0)
+#define OTPC_USER_FSM_ENABLE_MASK GENMASK(16, 16)
+#define OTPC_SBPI_DONE BIT(1)
+#define OTPC_USER_DONE BIT(2)
+
+#define SBPI_DAP_ADDR 0x02
+#define SBPI_DAP_ADDR_SHIFT 8
+#define SBPI_DAP_ADDR_MASK GENMASK(31, 24)
+#define SBPI_CMD_VALID_MASK GENMASK(31, 16)
+#define SBPI_DAP_CMD_WRF 0xC0
+#define SBPI_DAP_REG_ECC 0x3A
+#define SBPI_ECC_ENABLE 0x00
+#define SBPI_ECC_DISABLE 0x09
+#define SBPI_ENABLE BIT(0)
+#define SBPI_ENABLE_MASK GENMASK(16, 16)
+
+#define OTPC_TIMEOUT 10000
+
+struct rockchip_otp {
+ struct device *dev;
+ void __iomem *base;
+ struct clk_bulk_data *clks;
+ int num_clks;
+ struct reset_control *rst;
+};
+
+/* list of required clocks */
+static const char * const rockchip_otp_clocks[] = {
+ "otp", "apb_pclk", "phy",
+};
+
+struct rockchip_data {
+ int size;
+};
+
+static int rockchip_otp_reset(struct rockchip_otp *otp)
+{
+ int ret;
+
+ ret = reset_control_assert(otp->rst);
+ if (ret) {
+ dev_err(otp->dev, "failed to assert otp phy %d\n", ret);
+ return ret;
+ }
+
+ udelay(2);
+
+ ret = reset_control_deassert(otp->rst);
+ if (ret) {
+ dev_err(otp->dev, "failed to deassert otp phy %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int rockchip_otp_wait_status(struct rockchip_otp *otp, u32 flag)
+{
+ u32 status = 0;
+ int ret;
+
+ ret = readl_poll_timeout_atomic(otp->base + OTPC_INT_STATUS, status,
+ (status & flag), 1, OTPC_TIMEOUT);
+ if (ret)
+ return ret;
+
+ /* clean int status */
+ writel(flag, otp->base + OTPC_INT_STATUS);
+
+ return 0;
+}
+
+static int rockchip_otp_ecc_enable(struct rockchip_otp *otp, bool enable)
+{
+ int ret = 0;
+
+ writel(SBPI_DAP_ADDR_MASK | (SBPI_DAP_ADDR << SBPI_DAP_ADDR_SHIFT),
+ otp->base + OTPC_SBPI_CTRL);
+
+ writel(SBPI_CMD_VALID_MASK | 0x1, otp->base + OTPC_SBPI_CMD_VALID_PRE);
+ writel(SBPI_DAP_CMD_WRF | SBPI_DAP_REG_ECC,
+ otp->base + OTPC_SBPI_CMD0_OFFSET);
+ if (enable)
+ writel(SBPI_ECC_ENABLE, otp->base + OTPC_SBPI_CMD1_OFFSET);
+ else
+ writel(SBPI_ECC_DISABLE, otp->base + OTPC_SBPI_CMD1_OFFSET);
+
+ writel(SBPI_ENABLE_MASK | SBPI_ENABLE, otp->base + OTPC_SBPI_CTRL);
+
+ ret = rockchip_otp_wait_status(otp, OTPC_SBPI_DONE);
+ if (ret < 0)
+ dev_err(otp->dev, "timeout during ecc_enable\n");
+
+ return ret;
+}
+
+static int rockchip_otp_read(void *context, unsigned int offset,
+ void *val, size_t bytes)
+{
+ struct rockchip_otp *otp = context;
+ u8 *buf = val;
+ int ret = 0;
+
+ ret = clk_bulk_prepare_enable(otp->num_clks, otp->clks);
+ if (ret < 0) {
+ dev_err(otp->dev, "failed to prepare/enable clks\n");
+ return ret;
+ }
+
+ ret = rockchip_otp_reset(otp);
+ if (ret) {
+ dev_err(otp->dev, "failed to reset otp phy\n");
+ goto disable_clks;
+ }
+
+ ret = rockchip_otp_ecc_enable(otp, false);
+ if (ret < 0) {
+ dev_err(otp->dev, "rockchip_otp_ecc_enable err\n");
+ goto disable_clks;
+ }
+
+ writel(OTPC_USE_USER | OTPC_USE_USER_MASK, otp->base + OTPC_USER_CTRL);
+ udelay(5);
+ while (bytes--) {
+ writel(offset++ | OTPC_USER_ADDR_MASK,
+ otp->base + OTPC_USER_ADDR);
+ writel(OTPC_USER_FSM_ENABLE | OTPC_USER_FSM_ENABLE_MASK,
+ otp->base + OTPC_USER_ENABLE);
+ ret = rockchip_otp_wait_status(otp, OTPC_USER_DONE);
+ if (ret < 0) {
+ dev_err(otp->dev, "timeout during read setup\n");
+ goto read_end;
+ }
+ *buf++ = readb(otp->base + OTPC_USER_Q);
+ }
+
+read_end:
+ writel(0x0 | OTPC_USE_USER_MASK, otp->base + OTPC_USER_CTRL);
+disable_clks:
+ clk_bulk_disable_unprepare(otp->num_clks, otp->clks);
+
+ return ret;
+}
+
+static struct nvmem_config otp_config = {
+ .name = "rockchip-otp",
+ .owner = THIS_MODULE,
+ .read_only = true,
+ .stride = 1,
+ .word_size = 1,
+ .reg_read = rockchip_otp_read,
+};
+
+static const struct rockchip_data px30_data = {
+ .size = 0x40,
+};
+
+static const struct of_device_id rockchip_otp_match[] = {
+ {
+ .compatible = "rockchip,px30-otp",
+ .data = (void *)&px30_data,
+ },
+ {
+ .compatible = "rockchip,rk3308-otp",
+ .data = (void *)&px30_data,
+ },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, rockchip_otp_match);
+
+static int rockchip_otp_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct rockchip_otp *otp;
+ const struct rockchip_data *data;
+ struct nvmem_device *nvmem;
+ int ret, i;
+
+ data = of_device_get_match_data(dev);
+ if (!data) {
+ dev_err(dev, "failed to get match data\n");
+ return -EINVAL;
+ }
+
+ otp = devm_kzalloc(&pdev->dev, sizeof(struct rockchip_otp),
+ GFP_KERNEL);
+ if (!otp)
+ return -ENOMEM;
+
+ otp->dev = dev;
+ otp->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(otp->base))
+ return PTR_ERR(otp->base);
+
+ otp->num_clks = ARRAY_SIZE(rockchip_otp_clocks);
+ otp->clks = devm_kcalloc(dev, otp->num_clks,
+ sizeof(*otp->clks), GFP_KERNEL);
+ if (!otp->clks)
+ return -ENOMEM;
+
+ for (i = 0; i < otp->num_clks; ++i)
+ otp->clks[i].id = rockchip_otp_clocks[i];
+
+ ret = devm_clk_bulk_get(dev, otp->num_clks, otp->clks);
+ if (ret)
+ return ret;
+
+ otp->rst = devm_reset_control_get(dev, "phy");
+ if (IS_ERR(otp->rst))
+ return PTR_ERR(otp->rst);
+
+ otp_config.size = data->size;
+ otp_config.priv = otp;
+ otp_config.dev = dev;
+ nvmem = devm_nvmem_register(dev, &otp_config);
+
+ return PTR_ERR_OR_ZERO(nvmem);
+}
+
+static struct platform_driver rockchip_otp_driver = {
+ .probe = rockchip_otp_probe,
+ .driver = {
+ .name = "rockchip-otp",
+ .of_match_table = rockchip_otp_match,
+ },
+};
+
+module_platform_driver(rockchip_otp_driver);
+MODULE_DESCRIPTION("Rockchip OTP driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvmem/sc27xx-efuse.c b/drivers/nvmem/sc27xx-efuse.c
index c6ee21018d80..ab5e7e0bc3d8 100644
--- a/drivers/nvmem/sc27xx-efuse.c
+++ b/drivers/nvmem/sc27xx-efuse.c
@@ -211,7 +211,7 @@ static int sc27xx_efuse_probe(struct platform_device *pdev)
return ret;
}
- efuse->hwlock = hwspin_lock_request_specific(ret);
+ efuse->hwlock = devm_hwspin_lock_request_specific(&pdev->dev, ret);
if (!efuse->hwlock) {
dev_err(&pdev->dev, "failed to request hwspinlock\n");
return -ENXIO;
@@ -219,7 +219,6 @@ static int sc27xx_efuse_probe(struct platform_device *pdev)
mutex_init(&efuse->mutex);
efuse->dev = &pdev->dev;
- platform_set_drvdata(pdev, efuse);
econfig.stride = 1;
econfig.word_size = 1;
@@ -232,21 +231,12 @@ static int sc27xx_efuse_probe(struct platform_device *pdev)
nvmem = devm_nvmem_register(&pdev->dev, &econfig);
if (IS_ERR(nvmem)) {
dev_err(&pdev->dev, "failed to register nvmem config\n");
- hwspin_lock_free(efuse->hwlock);
return PTR_ERR(nvmem);
}
return 0;
}
-static int sc27xx_efuse_remove(struct platform_device *pdev)
-{
- struct sc27xx_efuse *efuse = platform_get_drvdata(pdev);
-
- hwspin_lock_free(efuse->hwlock);
- return 0;
-}
-
static const struct of_device_id sc27xx_efuse_of_match[] = {
{ .compatible = "sprd,sc2731-efuse" },
{ }
@@ -254,7 +244,6 @@ static const struct of_device_id sc27xx_efuse_of_match[] = {
static struct platform_driver sc27xx_efuse_driver = {
.probe = sc27xx_efuse_probe,
- .remove = sc27xx_efuse_remove,
.driver = {
.name = "sc27xx-efuse",
.of_match_table = sc27xx_efuse_of_match,
diff --git a/drivers/nvmem/sprd-efuse.c b/drivers/nvmem/sprd-efuse.c
new file mode 100644
index 000000000000..2f1e0fbd1901
--- /dev/null
+++ b/drivers/nvmem/sprd-efuse.c
@@ -0,0 +1,424 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2019 Spreadtrum Communications Inc.
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/hwspinlock.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/nvmem-provider.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#define SPRD_EFUSE_ENABLE 0x20
+#define SPRD_EFUSE_ERR_FLAG 0x24
+#define SPRD_EFUSE_ERR_CLR 0x28
+#define SPRD_EFUSE_MAGIC_NUM 0x2c
+#define SPRD_EFUSE_FW_CFG 0x50
+#define SPRD_EFUSE_PW_SWT 0x54
+#define SPRD_EFUSE_MEM(val) (0x1000 + ((val) << 2))
+
+#define SPRD_EFUSE_VDD_EN BIT(0)
+#define SPRD_EFUSE_AUTO_CHECK_EN BIT(1)
+#define SPRD_EFUSE_DOUBLE_EN BIT(2)
+#define SPRD_EFUSE_MARGIN_RD_EN BIT(3)
+#define SPRD_EFUSE_LOCK_WR_EN BIT(4)
+
+#define SPRD_EFUSE_ERR_CLR_MASK GENMASK(13, 0)
+
+#define SPRD_EFUSE_ENK1_ON BIT(0)
+#define SPRD_EFUSE_ENK2_ON BIT(1)
+#define SPRD_EFUSE_PROG_EN BIT(2)
+
+#define SPRD_EFUSE_MAGIC_NUMBER 0x8810
+
+/* Block width (bytes) definitions */
+#define SPRD_EFUSE_BLOCK_WIDTH 4
+
+/*
+ * The Spreadtrum AP efuse contains 2 parts: normal efuse and secure efuse,
+ * and we can only access the normal efuse in kernel. So define the normal
+ * block offset index and normal block numbers.
+ */
+#define SPRD_EFUSE_NORMAL_BLOCK_NUMS 24
+#define SPRD_EFUSE_NORMAL_BLOCK_OFFSET 72
+
+/* Timeout (ms) for the trylock of hardware spinlocks */
+#define SPRD_EFUSE_HWLOCK_TIMEOUT 5000
+
+/*
+ * Since different Spreadtrum SoC chip can have different normal block numbers
+ * and offset. And some SoC can support block double feature, which means
+ * when reading or writing data to efuse memory, the controller can save double
+ * data in case one data become incorrect after a long period.
+ *
+ * Thus we should save them in the device data structure.
+ */
+struct sprd_efuse_variant_data {
+ u32 blk_nums;
+ u32 blk_offset;
+ bool blk_double;
+};
+
+struct sprd_efuse {
+ struct device *dev;
+ struct clk *clk;
+ struct hwspinlock *hwlock;
+ struct mutex mutex;
+ void __iomem *base;
+ const struct sprd_efuse_variant_data *data;
+};
+
+static const struct sprd_efuse_variant_data ums312_data = {
+ .blk_nums = SPRD_EFUSE_NORMAL_BLOCK_NUMS,
+ .blk_offset = SPRD_EFUSE_NORMAL_BLOCK_OFFSET,
+ .blk_double = false,
+};
+
+/*
+ * On Spreadtrum platform, we have multi-subsystems will access the unique
+ * efuse controller, so we need one hardware spinlock to synchronize between
+ * the multiple subsystems.
+ */
+static int sprd_efuse_lock(struct sprd_efuse *efuse)
+{
+ int ret;
+
+ mutex_lock(&efuse->mutex);
+
+ ret = hwspin_lock_timeout_raw(efuse->hwlock,
+ SPRD_EFUSE_HWLOCK_TIMEOUT);
+ if (ret) {
+ dev_err(efuse->dev, "timeout get the hwspinlock\n");
+ mutex_unlock(&efuse->mutex);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void sprd_efuse_unlock(struct sprd_efuse *efuse)
+{
+ hwspin_unlock_raw(efuse->hwlock);
+ mutex_unlock(&efuse->mutex);
+}
+
+static void sprd_efuse_set_prog_power(struct sprd_efuse *efuse, bool en)
+{
+ u32 val = readl(efuse->base + SPRD_EFUSE_PW_SWT);
+
+ if (en)
+ val &= ~SPRD_EFUSE_ENK2_ON;
+ else
+ val &= ~SPRD_EFUSE_ENK1_ON;
+
+ writel(val, efuse->base + SPRD_EFUSE_PW_SWT);
+
+ /* Open or close efuse power need wait 1000us to make power stable. */
+ usleep_range(1000, 1200);
+
+ if (en)
+ val |= SPRD_EFUSE_ENK1_ON;
+ else
+ val |= SPRD_EFUSE_ENK2_ON;
+
+ writel(val, efuse->base + SPRD_EFUSE_PW_SWT);
+
+ /* Open or close efuse power need wait 1000us to make power stable. */
+ usleep_range(1000, 1200);
+}
+
+static void sprd_efuse_set_read_power(struct sprd_efuse *efuse, bool en)
+{
+ u32 val = readl(efuse->base + SPRD_EFUSE_ENABLE);
+
+ if (en)
+ val |= SPRD_EFUSE_VDD_EN;
+ else
+ val &= ~SPRD_EFUSE_VDD_EN;
+
+ writel(val, efuse->base + SPRD_EFUSE_ENABLE);
+
+ /* Open or close efuse power need wait 1000us to make power stable. */
+ usleep_range(1000, 1200);
+}
+
+static void sprd_efuse_set_prog_lock(struct sprd_efuse *efuse, bool en)
+{
+ u32 val = readl(efuse->base + SPRD_EFUSE_ENABLE);
+
+ if (en)
+ val |= SPRD_EFUSE_LOCK_WR_EN;
+ else
+ val &= ~SPRD_EFUSE_LOCK_WR_EN;
+
+ writel(val, efuse->base + SPRD_EFUSE_ENABLE);
+}
+
+static void sprd_efuse_set_auto_check(struct sprd_efuse *efuse, bool en)
+{
+ u32 val = readl(efuse->base + SPRD_EFUSE_ENABLE);
+
+ if (en)
+ val |= SPRD_EFUSE_AUTO_CHECK_EN;
+ else
+ val &= ~SPRD_EFUSE_AUTO_CHECK_EN;
+
+ writel(val, efuse->base + SPRD_EFUSE_ENABLE);
+}
+
+static void sprd_efuse_set_data_double(struct sprd_efuse *efuse, bool en)
+{
+ u32 val = readl(efuse->base + SPRD_EFUSE_ENABLE);
+
+ if (en)
+ val |= SPRD_EFUSE_DOUBLE_EN;
+ else
+ val &= ~SPRD_EFUSE_DOUBLE_EN;
+
+ writel(val, efuse->base + SPRD_EFUSE_ENABLE);
+}
+
+static void sprd_efuse_set_prog_en(struct sprd_efuse *efuse, bool en)
+{
+ u32 val = readl(efuse->base + SPRD_EFUSE_PW_SWT);
+
+ if (en)
+ val |= SPRD_EFUSE_PROG_EN;
+ else
+ val &= ~SPRD_EFUSE_PROG_EN;
+
+ writel(val, efuse->base + SPRD_EFUSE_PW_SWT);
+}
+
+static int sprd_efuse_raw_prog(struct sprd_efuse *efuse, u32 blk, bool doub,
+ bool lock, u32 *data)
+{
+ u32 status;
+ int ret = 0;
+
+ /*
+ * We need set the correct magic number before writing the efuse to
+ * allow programming, and block other programming until we clear the
+ * magic number.
+ */
+ writel(SPRD_EFUSE_MAGIC_NUMBER,
+ efuse->base + SPRD_EFUSE_MAGIC_NUM);
+
+ /*
+ * Power on the efuse, enable programme and enable double data
+ * if asked.
+ */
+ sprd_efuse_set_prog_power(efuse, true);
+ sprd_efuse_set_prog_en(efuse, true);
+ sprd_efuse_set_data_double(efuse, doub);
+
+ /*
+ * Enable the auto-check function to validate if the programming is
+ * successful.
+ */
+ sprd_efuse_set_auto_check(efuse, true);
+
+ writel(*data, efuse->base + SPRD_EFUSE_MEM(blk));
+
+ /* Disable auto-check and data double after programming */
+ sprd_efuse_set_auto_check(efuse, false);
+ sprd_efuse_set_data_double(efuse, false);
+
+ /*
+ * Check the efuse error status, if the programming is successful,
+ * we should lock this efuse block to avoid programming again.
+ */
+ status = readl(efuse->base + SPRD_EFUSE_ERR_FLAG);
+ if (status) {
+ dev_err(efuse->dev,
+ "write error status %d of block %d\n", ret, blk);
+
+ writel(SPRD_EFUSE_ERR_CLR_MASK,
+ efuse->base + SPRD_EFUSE_ERR_CLR);
+ ret = -EBUSY;
+ } else {
+ sprd_efuse_set_prog_lock(efuse, lock);
+ writel(*data, efuse->base + SPRD_EFUSE_MEM(blk));
+ sprd_efuse_set_prog_lock(efuse, false);
+ }
+
+ sprd_efuse_set_prog_power(efuse, false);
+ writel(0, efuse->base + SPRD_EFUSE_MAGIC_NUM);
+
+ return ret;
+}
+
+static int sprd_efuse_raw_read(struct sprd_efuse *efuse, int blk, u32 *val,
+ bool doub)
+{
+ u32 status;
+
+ /*
+ * Need power on the efuse before reading data from efuse, and will
+ * power off the efuse after reading process.
+ */
+ sprd_efuse_set_read_power(efuse, true);
+
+ /* Enable double data if asked */
+ sprd_efuse_set_data_double(efuse, doub);
+
+ /* Start to read data from efuse block */
+ *val = readl(efuse->base + SPRD_EFUSE_MEM(blk));
+
+ /* Disable double data */
+ sprd_efuse_set_data_double(efuse, false);
+
+ /* Power off the efuse */
+ sprd_efuse_set_read_power(efuse, false);
+
+ /*
+ * Check the efuse error status and clear them if there are some
+ * errors occurred.
+ */
+ status = readl(efuse->base + SPRD_EFUSE_ERR_FLAG);
+ if (status) {
+ dev_err(efuse->dev,
+ "read error status %d of block %d\n", status, blk);
+
+ writel(SPRD_EFUSE_ERR_CLR_MASK,
+ efuse->base + SPRD_EFUSE_ERR_CLR);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int sprd_efuse_read(void *context, u32 offset, void *val, size_t bytes)
+{
+ struct sprd_efuse *efuse = context;
+ bool blk_double = efuse->data->blk_double;
+ u32 index = offset / SPRD_EFUSE_BLOCK_WIDTH + efuse->data->blk_offset;
+ u32 blk_offset = (offset % SPRD_EFUSE_BLOCK_WIDTH) * BITS_PER_BYTE;
+ u32 data;
+ int ret;
+
+ ret = sprd_efuse_lock(efuse);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(efuse->clk);
+ if (ret)
+ goto unlock;
+
+ ret = sprd_efuse_raw_read(efuse, index, &data, blk_double);
+ if (!ret) {
+ data >>= blk_offset;
+ memcpy(val, &data, bytes);
+ }
+
+ clk_disable_unprepare(efuse->clk);
+
+unlock:
+ sprd_efuse_unlock(efuse);
+ return ret;
+}
+
+static int sprd_efuse_write(void *context, u32 offset, void *val, size_t bytes)
+{
+ struct sprd_efuse *efuse = context;
+ int ret;
+
+ ret = sprd_efuse_lock(efuse);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(efuse->clk);
+ if (ret)
+ goto unlock;
+
+ ret = sprd_efuse_raw_prog(efuse, offset, false, false, val);
+
+ clk_disable_unprepare(efuse->clk);
+
+unlock:
+ sprd_efuse_unlock(efuse);
+ return ret;
+}
+
+static int sprd_efuse_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct nvmem_device *nvmem;
+ struct nvmem_config econfig = { };
+ struct sprd_efuse *efuse;
+ const struct sprd_efuse_variant_data *pdata;
+ int ret;
+
+ pdata = of_device_get_match_data(&pdev->dev);
+ if (!pdata) {
+ dev_err(&pdev->dev, "No matching driver data found\n");
+ return -EINVAL;
+ }
+
+ efuse = devm_kzalloc(&pdev->dev, sizeof(*efuse), GFP_KERNEL);
+ if (!efuse)
+ return -ENOMEM;
+
+ efuse->base = devm_platform_ioremap_resource(pdev, 0);
+ if (!efuse->base)
+ return -ENOMEM;
+
+ ret = of_hwspin_lock_get_id(np, 0);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to get hwlock id\n");
+ return ret;
+ }
+
+ efuse->hwlock = devm_hwspin_lock_request_specific(&pdev->dev, ret);
+ if (!efuse->hwlock) {
+ dev_err(&pdev->dev, "failed to request hwlock\n");
+ return -ENXIO;
+ }
+
+ efuse->clk = devm_clk_get(&pdev->dev, "enable");
+ if (IS_ERR(efuse->clk)) {
+ dev_err(&pdev->dev, "failed to get enable clock\n");
+ return PTR_ERR(efuse->clk);
+ }
+
+ mutex_init(&efuse->mutex);
+ efuse->dev = &pdev->dev;
+ efuse->data = pdata;
+
+ econfig.stride = 1;
+ econfig.word_size = 1;
+ econfig.read_only = false;
+ econfig.name = "sprd-efuse";
+ econfig.size = efuse->data->blk_nums * SPRD_EFUSE_BLOCK_WIDTH;
+ econfig.reg_read = sprd_efuse_read;
+ econfig.reg_write = sprd_efuse_write;
+ econfig.priv = efuse;
+ econfig.dev = &pdev->dev;
+ nvmem = devm_nvmem_register(&pdev->dev, &econfig);
+ if (IS_ERR(nvmem)) {
+ dev_err(&pdev->dev, "failed to register nvmem\n");
+ return PTR_ERR(nvmem);
+ }
+
+ return 0;
+}
+
+static const struct of_device_id sprd_efuse_of_match[] = {
+ { .compatible = "sprd,ums312-efuse", .data = &ums312_data },
+ { }
+};
+
+static struct platform_driver sprd_efuse_driver = {
+ .probe = sprd_efuse_probe,
+ .driver = {
+ .name = "sprd-efuse",
+ .of_match_table = sprd_efuse_of_match,
+ },
+};
+
+module_platform_driver(sprd_efuse_driver);
+
+MODULE_AUTHOR("Freeman Liu <freeman.liu@spreadtrum.com>");
+MODULE_DESCRIPTION("Spreadtrum AP efuse driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/of/address.c b/drivers/of/address.c
index 978427a9d5e6..99c1b8058559 100644
--- a/drivers/of/address.c
+++ b/drivers/of/address.c
@@ -14,6 +14,8 @@
#include <linux/slab.h>
#include <linux/string.h>
+#include "of_private.h"
+
/* Max address size we deal with */
#define OF_MAX_ADDR_CELLS 4
#define OF_CHECK_ADDR_COUNT(na) ((na) > 0 && (na) <= OF_MAX_ADDR_CELLS)
@@ -241,6 +243,7 @@ static int parser_init(struct of_pci_range_parser *parser,
parser->node = node;
parser->pna = of_n_addr_cells(node);
parser->np = parser->pna + na + ns;
+ parser->dma = !strcmp(name, "dma-ranges");
parser->range = of_get_property(node, name, &rlen);
if (parser->range == NULL)
@@ -279,7 +282,11 @@ struct of_pci_range *of_pci_range_parser_one(struct of_pci_range_parser *parser,
range->pci_space = be32_to_cpup(parser->range);
range->flags = of_bus_pci_get_flags(parser->range);
range->pci_addr = of_read_number(parser->range + 1, ns);
- range->cpu_addr = of_translate_address(parser->node,
+ if (parser->dma)
+ range->cpu_addr = of_translate_dma_address(parser->node,
+ parser->range + na);
+ else
+ range->cpu_addr = of_translate_address(parser->node,
parser->range + na);
range->size = of_read_number(parser->range + parser->pna + na, ns);
@@ -292,8 +299,12 @@ struct of_pci_range *of_pci_range_parser_one(struct of_pci_range_parser *parser,
flags = of_bus_pci_get_flags(parser->range);
pci_addr = of_read_number(parser->range + 1, ns);
- cpu_addr = of_translate_address(parser->node,
- parser->range + na);
+ if (parser->dma)
+ cpu_addr = of_translate_dma_address(parser->node,
+ parser->range + na);
+ else
+ cpu_addr = of_translate_address(parser->node,
+ parser->range + na);
size = of_read_number(parser->range + parser->pna + na, ns);
if (flags != range->flags)
@@ -517,9 +528,13 @@ static int of_translate_one(struct device_node *parent, struct of_bus *bus,
*
* As far as we know, this damage only exists on Apple machines, so
* This code is only enabled on powerpc. --gcl
+ *
+ * This quirk also applies for 'dma-ranges' which frequently exist in
+ * child nodes without 'dma-ranges' in the parent nodes. --RobH
*/
ranges = of_get_property(parent, rprop, &rlen);
- if (ranges == NULL && !of_empty_ranges_quirk(parent)) {
+ if (ranges == NULL && !of_empty_ranges_quirk(parent) &&
+ strcmp(rprop, "dma-ranges")) {
pr_debug("no ranges; cannot translate\n");
return 1;
}
@@ -695,6 +710,16 @@ static struct device_node *__of_get_dma_parent(const struct device_node *np)
return of_node_get(args.np);
}
+static struct device_node *of_get_next_dma_parent(struct device_node *np)
+{
+ struct device_node *parent;
+
+ parent = __of_get_dma_parent(np);
+ of_node_put(np);
+
+ return parent;
+}
+
u64 of_translate_dma_address(struct device_node *dev, const __be32 *in_addr)
{
struct device_node *host;
@@ -826,25 +851,6 @@ int of_address_to_resource(struct device_node *dev, int index,
}
EXPORT_SYMBOL_GPL(of_address_to_resource);
-struct device_node *of_find_matching_node_by_address(struct device_node *from,
- const struct of_device_id *matches,
- u64 base_address)
-{
- struct device_node *dn = of_find_matching_node(from, matches);
- struct resource res;
-
- while (dn) {
- if (!of_address_to_resource(dn, 0, &res) &&
- res.start == base_address)
- return dn;
-
- dn = of_find_matching_node(dn, matches);
- }
-
- return NULL;
-}
-
-
/**
* of_iomap - Maps the memory mapped IO for a given device_node
* @device: the device whose io range will be mapped
@@ -924,47 +930,39 @@ int of_dma_get_range(struct device_node *np, u64 *dma_addr, u64 *paddr, u64 *siz
const __be32 *ranges = NULL;
int len, naddr, nsize, pna;
int ret = 0;
+ bool found_dma_ranges = false;
u64 dmaaddr;
- if (!node)
- return -EINVAL;
-
- while (1) {
- struct device_node *parent;
-
- naddr = of_n_addr_cells(node);
- nsize = of_n_size_cells(node);
-
- parent = __of_get_dma_parent(node);
- of_node_put(node);
-
- node = parent;
- if (!node)
- break;
-
+ while (node) {
ranges = of_get_property(node, "dma-ranges", &len);
/* Ignore empty ranges, they imply no translation required */
if (ranges && len > 0)
break;
- /*
- * At least empty ranges has to be defined for parent node if
- * DMA is supported
- */
- if (!ranges)
- break;
+ /* Once we find 'dma-ranges', then a missing one is an error */
+ if (found_dma_ranges && !ranges) {
+ ret = -ENODEV;
+ goto out;
+ }
+ found_dma_ranges = true;
+
+ node = of_get_next_dma_parent(node);
}
- if (!ranges) {
+ if (!node || !ranges) {
pr_debug("no dma-ranges found for node(%pOF)\n", np);
ret = -ENODEV;
goto out;
}
- len /= sizeof(u32);
-
+ naddr = of_bus_n_addr_cells(node);
+ nsize = of_bus_n_size_cells(node);
pna = of_n_addr_cells(node);
+ if ((len / sizeof(__be32)) % (pna + naddr + nsize)) {
+ ret = -EINVAL;
+ goto out;
+ }
/* dma-ranges format:
* DMA addr : naddr cells
@@ -972,10 +970,10 @@ int of_dma_get_range(struct device_node *np, u64 *dma_addr, u64 *paddr, u64 *siz
* size : nsize cells
*/
dmaaddr = of_read_number(ranges, naddr);
- *paddr = of_translate_dma_address(np, ranges);
+ *paddr = of_translate_dma_address(node, ranges + naddr);
if (*paddr == OF_BAD_ADDR) {
- pr_err("translation of DMA address(%pad) to CPU address failed node(%pOF)\n",
- dma_addr, np);
+ pr_err("translation of DMA address(%llx) to CPU address failed node(%pOF)\n",
+ dmaaddr, np);
ret = -EINVAL;
goto out;
}
@@ -991,7 +989,6 @@ out:
return ret;
}
-EXPORT_SYMBOL_GPL(of_dma_get_range);
/**
* of_dma_is_coherent - Check if device is coherent
@@ -1009,7 +1006,7 @@ bool of_dma_is_coherent(struct device_node *np)
of_node_put(node);
return true;
}
- node = of_get_next_parent(node);
+ node = of_get_next_dma_parent(node);
}
of_node_put(node);
return false;
diff --git a/drivers/of/base.c b/drivers/of/base.c
index 1d667eb730e1..db7fbc0c0893 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -86,34 +86,46 @@ static bool __of_node_is_type(const struct device_node *np, const char *type)
return np && match && type && !strcmp(match, type);
}
-int of_n_addr_cells(struct device_node *np)
+int of_bus_n_addr_cells(struct device_node *np)
{
u32 cells;
- do {
- if (np->parent)
- np = np->parent;
+ for (; np; np = np->parent)
if (!of_property_read_u32(np, "#address-cells", &cells))
return cells;
- } while (np->parent);
+
/* No #address-cells property for the root node */
return OF_ROOT_NODE_ADDR_CELLS_DEFAULT;
}
+
+int of_n_addr_cells(struct device_node *np)
+{
+ if (np->parent)
+ np = np->parent;
+
+ return of_bus_n_addr_cells(np);
+}
EXPORT_SYMBOL(of_n_addr_cells);
-int of_n_size_cells(struct device_node *np)
+int of_bus_n_size_cells(struct device_node *np)
{
u32 cells;
- do {
- if (np->parent)
- np = np->parent;
+ for (; np; np = np->parent)
if (!of_property_read_u32(np, "#size-cells", &cells))
return cells;
- } while (np->parent);
+
/* No #size-cells property for the root node */
return OF_ROOT_NODE_SIZE_CELLS_DEFAULT;
}
+
+int of_n_size_cells(struct device_node *np)
+{
+ if (np->parent)
+ np = np->parent;
+
+ return of_bus_n_size_cells(np);
+}
EXPORT_SYMBOL(of_n_size_cells);
#ifdef CONFIG_NUMA
diff --git a/drivers/of/device.c b/drivers/of/device.c
index da8158392010..e9127db7b067 100644
--- a/drivers/of/device.c
+++ b/drivers/of/device.c
@@ -93,7 +93,7 @@ int of_dma_configure(struct device *dev, struct device_node *np, bool force_dma)
bool coherent;
unsigned long offset;
const struct iommu_ops *iommu;
- u64 mask;
+ u64 mask, end;
ret = of_dma_get_range(np, &dma_addr, &paddr, &size);
if (ret < 0) {
@@ -148,12 +148,13 @@ int of_dma_configure(struct device *dev, struct device_node *np, bool force_dma)
* Limit coherent and dma mask based on size and default mask
* set by the driver.
*/
- mask = DMA_BIT_MASK(ilog2(dma_addr + size - 1) + 1);
+ end = dma_addr + size - 1;
+ mask = DMA_BIT_MASK(ilog2(end) + 1);
dev->coherent_dma_mask &= mask;
*dev->dma_mask &= mask;
- /* ...but only set bus mask if we found valid dma-ranges earlier */
+ /* ...but only set bus limit if we found valid dma-ranges earlier */
if (!ret)
- dev->bus_dma_mask = mask;
+ dev->bus_dma_limit = end;
coherent = of_dma_is_coherent(np);
dev_dbg(dev, "device is%sdma coherent\n",
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index f1c23aad951e..2cdf64d2456f 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -947,8 +947,8 @@ int __init early_init_dt_scan_chosen_stdout(void)
if (fdt_node_check_compatible(fdt, offset, match->compatible))
continue;
- of_setup_earlycon(match, offset, options);
- return 0;
+ if (of_setup_earlycon(match, offset, options) == 0)
+ return 0;
}
return -ENODEV;
}
diff --git a/drivers/of/of_private.h b/drivers/of/of_private.h
index 24786818e32e..66294d29942a 100644
--- a/drivers/of/of_private.h
+++ b/drivers/of/of_private.h
@@ -158,4 +158,18 @@ extern void __of_sysfs_remove_bin_file(struct device_node *np,
#define for_each_transaction_entry_reverse(_oft, _te) \
list_for_each_entry_reverse(_te, &(_oft)->te_list, node)
+extern int of_bus_n_addr_cells(struct device_node *np);
+extern int of_bus_n_size_cells(struct device_node *np);
+
+#ifdef CONFIG_OF_ADDRESS
+extern int of_dma_get_range(struct device_node *np, u64 *dma_addr,
+ u64 *paddr, u64 *size);
+#else
+static inline int of_dma_get_range(struct device_node *np, u64 *dma_addr,
+ u64 *paddr, u64 *size)
+{
+ return -ENODEV;
+}
+#endif
+
#endif /* _LINUX_OF_PRIVATE_H */
diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c
index c423e94baf0f..9617b7df7c4d 100644
--- a/drivers/of/overlay.c
+++ b/drivers/of/overlay.c
@@ -305,7 +305,6 @@ static int add_changeset_property(struct overlay_changeset *ovcs,
{
struct property *new_prop = NULL, *prop;
int ret = 0;
- bool check_for_non_overlay_node = false;
if (target->in_livetree)
if (!of_prop_cmp(overlay_prop->name, "name") ||
@@ -318,6 +317,25 @@ static int add_changeset_property(struct overlay_changeset *ovcs,
else
prop = NULL;
+ if (prop) {
+ if (!of_prop_cmp(prop->name, "#address-cells")) {
+ if (!of_prop_val_eq(prop, overlay_prop)) {
+ pr_err("ERROR: changing value of #address-cells is not allowed in %pOF\n",
+ target->np);
+ ret = -EINVAL;
+ }
+ return ret;
+
+ } else if (!of_prop_cmp(prop->name, "#size-cells")) {
+ if (!of_prop_val_eq(prop, overlay_prop)) {
+ pr_err("ERROR: changing value of #size-cells is not allowed in %pOF\n",
+ target->np);
+ ret = -EINVAL;
+ }
+ return ret;
+ }
+ }
+
if (is_symbols_prop) {
if (prop)
return -EINVAL;
@@ -330,33 +348,18 @@ static int add_changeset_property(struct overlay_changeset *ovcs,
return -ENOMEM;
if (!prop) {
- check_for_non_overlay_node = true;
if (!target->in_livetree) {
new_prop->next = target->np->deadprops;
target->np->deadprops = new_prop;
}
ret = of_changeset_add_property(&ovcs->cset, target->np,
new_prop);
- } else if (!of_prop_cmp(prop->name, "#address-cells")) {
- if (!of_prop_val_eq(prop, new_prop)) {
- pr_err("ERROR: changing value of #address-cells is not allowed in %pOF\n",
- target->np);
- ret = -EINVAL;
- }
- } else if (!of_prop_cmp(prop->name, "#size-cells")) {
- if (!of_prop_val_eq(prop, new_prop)) {
- pr_err("ERROR: changing value of #size-cells is not allowed in %pOF\n",
- target->np);
- ret = -EINVAL;
- }
} else {
- check_for_non_overlay_node = true;
ret = of_changeset_update_property(&ovcs->cset, target->np,
new_prop);
}
- if (check_for_non_overlay_node &&
- !of_node_check_flag(target->np, OF_OVERLAY))
+ if (!of_node_check_flag(target->np, OF_OVERLAY))
pr_err("WARNING: memory leak will occur if overlay removed, property: %pOF/%s\n",
target->np, new_prop->name);
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index b47a2292fe8e..d93891a05f60 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -480,6 +480,7 @@ int of_platform_populate(struct device_node *root,
pr_debug("%s()\n", __func__);
pr_debug(" starting at: %pOF\n", root);
+ device_links_supplier_sync_state_pause();
for_each_child_of_node(root, child) {
rc = of_platform_bus_create(child, matches, lookup, parent, true);
if (rc) {
@@ -487,6 +488,8 @@ int of_platform_populate(struct device_node *root,
break;
}
}
+ device_links_supplier_sync_state_resume();
+
of_node_set_flag(root, OF_POPULATED_BUS);
of_node_put(root);
@@ -518,6 +521,7 @@ static int __init of_platform_default_populate_init(void)
if (!of_have_populated_dt())
return -ENODEV;
+ device_links_supplier_sync_state_pause();
/*
* Handle certain compatibles explicitly, since we don't want to create
* platform_devices for every node in /reserved-memory with a
@@ -538,6 +542,14 @@ static int __init of_platform_default_populate_init(void)
return 0;
}
arch_initcall_sync(of_platform_default_populate_init);
+
+static int __init of_platform_sync_state_init(void)
+{
+ if (of_have_populated_dt())
+ device_links_supplier_sync_state_resume();
+ return 0;
+}
+late_initcall_sync(of_platform_sync_state_init);
#endif
int of_platform_device_destroy(struct device *dev, void *data)
diff --git a/drivers/of/property.c b/drivers/of/property.c
index d7fa75e31f22..e851c57a15b0 100644
--- a/drivers/of/property.c
+++ b/drivers/of/property.c
@@ -25,6 +25,7 @@
#include <linux/of_device.h>
#include <linux/of_graph.h>
#include <linux/string.h>
+#include <linux/moduleparam.h>
#include "of_private.h"
@@ -164,7 +165,7 @@ EXPORT_SYMBOL_GPL(of_property_read_u64_index);
*
* @np: device node from which the property value is to be read.
* @propname: name of the property to be searched.
- * @out_values: pointer to return value, modified only if return value is 0.
+ * @out_values: pointer to found values.
* @sz_min: minimum number of array elements to read
* @sz_max: maximum number of array elements to read, if zero there is no
* upper limit on the number of elements in the dts entry but only
@@ -212,7 +213,7 @@ EXPORT_SYMBOL_GPL(of_property_read_variable_u8_array);
*
* @np: device node from which the property value is to be read.
* @propname: name of the property to be searched.
- * @out_values: pointer to return value, modified only if return value is 0.
+ * @out_values: pointer to found values.
* @sz_min: minimum number of array elements to read
* @sz_max: maximum number of array elements to read, if zero there is no
* upper limit on the number of elements in the dts entry but only
@@ -260,7 +261,7 @@ EXPORT_SYMBOL_GPL(of_property_read_variable_u16_array);
*
* @np: device node from which the property value is to be read.
* @propname: name of the property to be searched.
- * @out_values: pointer to return value, modified only if return value is 0.
+ * @out_values: pointer to return found values.
* @sz_min: minimum number of array elements to read
* @sz_max: maximum number of array elements to read, if zero there is no
* upper limit on the number of elements in the dts entry but only
@@ -334,7 +335,7 @@ EXPORT_SYMBOL_GPL(of_property_read_u64);
*
* @np: device node from which the property value is to be read.
* @propname: name of the property to be searched.
- * @out_values: pointer to return value, modified only if return value is 0.
+ * @out_values: pointer to found values.
* @sz_min: minimum number of array elements to read
* @sz_max: maximum number of array elements to read, if zero there is no
* upper limit on the number of elements in the dts entry but only
@@ -872,6 +873,20 @@ of_fwnode_property_read_string_array(const struct fwnode_handle *fwnode,
of_property_count_strings(node, propname);
}
+static const char *of_fwnode_get_name(const struct fwnode_handle *fwnode)
+{
+ return kbasename(to_of_node(fwnode)->full_name);
+}
+
+static const char *of_fwnode_get_name_prefix(const struct fwnode_handle *fwnode)
+{
+ /* Root needs no prefix here (its name is "/"). */
+ if (!to_of_node(fwnode)->parent)
+ return "";
+
+ return "/";
+}
+
static struct fwnode_handle *
of_fwnode_get_parent(const struct fwnode_handle *fwnode)
{
@@ -985,6 +1000,320 @@ of_fwnode_device_get_match_data(const struct fwnode_handle *fwnode,
return of_device_get_match_data(dev);
}
+static bool of_is_ancestor_of(struct device_node *test_ancestor,
+ struct device_node *child)
+{
+ of_node_get(child);
+ while (child) {
+ if (child == test_ancestor) {
+ of_node_put(child);
+ return true;
+ }
+ child = of_get_next_parent(child);
+ }
+ return false;
+}
+
+/**
+ * of_link_to_phandle - Add device link to supplier from supplier phandle
+ * @dev: consumer device
+ * @sup_np: phandle to supplier device tree node
+ *
+ * Given a phandle to a supplier device tree node (@sup_np), this function
+ * finds the device that owns the supplier device tree node and creates a
+ * device link from @dev consumer device to the supplier device. This function
+ * doesn't create device links for invalid scenarios such as trying to create a
+ * link with a parent device as the consumer of its child device. In such
+ * cases, it returns an error.
+ *
+ * Returns:
+ * - 0 if link successfully created to supplier
+ * - -EAGAIN if linking to the supplier should be reattempted
+ * - -EINVAL if the supplier link is invalid and should not be created
+ * - -ENODEV if there is no device that corresponds to the supplier phandle
+ */
+static int of_link_to_phandle(struct device *dev, struct device_node *sup_np,
+ u32 dl_flags)
+{
+ struct device *sup_dev;
+ int ret = 0;
+ struct device_node *tmp_np = sup_np;
+ int is_populated;
+
+ of_node_get(sup_np);
+ /*
+ * Find the device node that contains the supplier phandle. It may be
+ * @sup_np or it may be an ancestor of @sup_np.
+ */
+ while (sup_np && !of_find_property(sup_np, "compatible", NULL))
+ sup_np = of_get_next_parent(sup_np);
+ if (!sup_np) {
+ dev_dbg(dev, "Not linking to %pOFP - No device\n", tmp_np);
+ return -ENODEV;
+ }
+
+ /*
+ * Don't allow linking a device node as a consumer of one of its
+ * descendant nodes. By definition, a child node can't be a functional
+ * dependency for the parent node.
+ */
+ if (of_is_ancestor_of(dev->of_node, sup_np)) {
+ dev_dbg(dev, "Not linking to %pOFP - is descendant\n", sup_np);
+ of_node_put(sup_np);
+ return -EINVAL;
+ }
+ sup_dev = get_dev_from_fwnode(&sup_np->fwnode);
+ is_populated = of_node_check_flag(sup_np, OF_POPULATED);
+ of_node_put(sup_np);
+ if (!sup_dev && is_populated) {
+ /* Early device without struct device. */
+ dev_dbg(dev, "Not linking to %pOFP - No struct device\n",
+ sup_np);
+ return -ENODEV;
+ } else if (!sup_dev) {
+ return -EAGAIN;
+ }
+ if (!device_link_add(dev, sup_dev, dl_flags))
+ ret = -EAGAIN;
+ put_device(sup_dev);
+ return ret;
+}
+
+/**
+ * parse_prop_cells - Property parsing function for suppliers
+ *
+ * @np: Pointer to device tree node containing a list
+ * @prop_name: Name of property to be parsed. Expected to hold phandle values
+ * @index: For properties holding a list of phandles, this is the index
+ * into the list.
+ * @list_name: Property name that is known to contain list of phandle(s) to
+ * supplier(s)
+ * @cells_name: property name that specifies phandles' arguments count
+ *
+ * This is a helper function to parse properties that have a known fixed name
+ * and are a list of phandles and phandle arguments.
+ *
+ * Returns:
+ * - phandle node pointer with refcount incremented. Caller must of_node_put()
+ * on it when done.
+ * - NULL if no phandle found at index
+ */
+static struct device_node *parse_prop_cells(struct device_node *np,
+ const char *prop_name, int index,
+ const char *list_name,
+ const char *cells_name)
+{
+ struct of_phandle_args sup_args;
+
+ if (strcmp(prop_name, list_name))
+ return NULL;
+
+ if (of_parse_phandle_with_args(np, list_name, cells_name, index,
+ &sup_args))
+ return NULL;
+
+ return sup_args.np;
+}
+
+#define DEFINE_SIMPLE_PROP(fname, name, cells) \
+static struct device_node *parse_##fname(struct device_node *np, \
+ const char *prop_name, int index) \
+{ \
+ return parse_prop_cells(np, prop_name, index, name, cells); \
+}
+
+static int strcmp_suffix(const char *str, const char *suffix)
+{
+ unsigned int len, suffix_len;
+
+ len = strlen(str);
+ suffix_len = strlen(suffix);
+ if (len <= suffix_len)
+ return -1;
+ return strcmp(str + len - suffix_len, suffix);
+}
+
+/**
+ * parse_suffix_prop_cells - Suffix property parsing function for suppliers
+ *
+ * @np: Pointer to device tree node containing a list
+ * @prop_name: Name of property to be parsed. Expected to hold phandle values
+ * @index: For properties holding a list of phandles, this is the index
+ * into the list.
+ * @suffix: Property suffix that is known to contain list of phandle(s) to
+ * supplier(s)
+ * @cells_name: property name that specifies phandles' arguments count
+ *
+ * This is a helper function to parse properties that have a known fixed suffix
+ * and are a list of phandles and phandle arguments.
+ *
+ * Returns:
+ * - phandle node pointer with refcount incremented. Caller must of_node_put()
+ * on it when done.
+ * - NULL if no phandle found at index
+ */
+static struct device_node *parse_suffix_prop_cells(struct device_node *np,
+ const char *prop_name, int index,
+ const char *suffix,
+ const char *cells_name)
+{
+ struct of_phandle_args sup_args;
+
+ if (strcmp_suffix(prop_name, suffix))
+ return NULL;
+
+ if (of_parse_phandle_with_args(np, prop_name, cells_name, index,
+ &sup_args))
+ return NULL;
+
+ return sup_args.np;
+}
+
+#define DEFINE_SUFFIX_PROP(fname, suffix, cells) \
+static struct device_node *parse_##fname(struct device_node *np, \
+ const char *prop_name, int index) \
+{ \
+ return parse_suffix_prop_cells(np, prop_name, index, suffix, cells); \
+}
+
+/**
+ * struct supplier_bindings - Property parsing functions for suppliers
+ *
+ * @parse_prop: function name
+ * parse_prop() finds the node corresponding to a supplier phandle
+ * @parse_prop.np: Pointer to device node holding supplier phandle property
+ * @parse_prop.prop_name: Name of property holding a phandle value
+ * @parse_prop.index: For properties holding a list of phandles, this is the
+ * index into the list
+ *
+ * Returns:
+ * parse_prop() return values are
+ * - phandle node pointer with refcount incremented. Caller must of_node_put()
+ * on it when done.
+ * - NULL if no phandle found at index
+ */
+struct supplier_bindings {
+ struct device_node *(*parse_prop)(struct device_node *np,
+ const char *prop_name, int index);
+};
+
+DEFINE_SIMPLE_PROP(clocks, "clocks", "#clock-cells")
+DEFINE_SIMPLE_PROP(interconnects, "interconnects", "#interconnect-cells")
+DEFINE_SIMPLE_PROP(iommus, "iommus", "#iommu-cells")
+DEFINE_SIMPLE_PROP(mboxes, "mboxes", "#mbox-cells")
+DEFINE_SIMPLE_PROP(io_channels, "io-channel", "#io-channel-cells")
+DEFINE_SIMPLE_PROP(interrupt_parent, "interrupt-parent", NULL)
+DEFINE_SIMPLE_PROP(dmas, "dmas", "#dma-cells")
+DEFINE_SUFFIX_PROP(regulators, "-supply", NULL)
+DEFINE_SUFFIX_PROP(gpio, "-gpio", "#gpio-cells")
+DEFINE_SUFFIX_PROP(gpios, "-gpios", "#gpio-cells")
+
+static struct device_node *parse_iommu_maps(struct device_node *np,
+ const char *prop_name, int index)
+{
+ if (strcmp(prop_name, "iommu-map"))
+ return NULL;
+
+ return of_parse_phandle(np, prop_name, (index * 4) + 1);
+}
+
+static const struct supplier_bindings of_supplier_bindings[] = {
+ { .parse_prop = parse_clocks, },
+ { .parse_prop = parse_interconnects, },
+ { .parse_prop = parse_iommus, },
+ { .parse_prop = parse_iommu_maps, },
+ { .parse_prop = parse_mboxes, },
+ { .parse_prop = parse_io_channels, },
+ { .parse_prop = parse_interrupt_parent, },
+ { .parse_prop = parse_dmas, },
+ { .parse_prop = parse_regulators, },
+ { .parse_prop = parse_gpio, },
+ { .parse_prop = parse_gpios, },
+ {}
+};
+
+/**
+ * of_link_property - Create device links to suppliers listed in a property
+ * @dev: Consumer device
+ * @con_np: The consumer device tree node which contains the property
+ * @prop_name: Name of property to be parsed
+ *
+ * This function checks if the property @prop_name that is present in the
+ * @con_np device tree node is one of the known common device tree bindings
+ * that list phandles to suppliers. If @prop_name isn't one, this function
+ * doesn't do anything.
+ *
+ * If @prop_name is one, this function attempts to create device links from the
+ * consumer device @dev to all the devices of the suppliers listed in
+ * @prop_name.
+ *
+ * Any failed attempt to create a device link will NOT result in an immediate
+ * return. of_link_property() must create links to all the available supplier
+ * devices even when attempts to create a link to one or more suppliers fail.
+ */
+static int of_link_property(struct device *dev, struct device_node *con_np,
+ const char *prop_name)
+{
+ struct device_node *phandle;
+ const struct supplier_bindings *s = of_supplier_bindings;
+ unsigned int i = 0;
+ bool matched = false;
+ int ret = 0;
+ u32 dl_flags;
+
+ if (dev->of_node == con_np)
+ dl_flags = DL_FLAG_AUTOPROBE_CONSUMER;
+ else
+ dl_flags = DL_FLAG_SYNC_STATE_ONLY;
+
+ /* Do not stop at first failed link, link all available suppliers. */
+ while (!matched && s->parse_prop) {
+ while ((phandle = s->parse_prop(con_np, prop_name, i))) {
+ matched = true;
+ i++;
+ if (of_link_to_phandle(dev, phandle, dl_flags)
+ == -EAGAIN)
+ ret = -EAGAIN;
+ of_node_put(phandle);
+ }
+ s++;
+ }
+ return ret;
+}
+
+static int of_link_to_suppliers(struct device *dev,
+ struct device_node *con_np)
+{
+ struct device_node *child;
+ struct property *p;
+ int ret = 0;
+
+ for_each_property_of_node(con_np, p)
+ if (of_link_property(dev, con_np, p->name))
+ ret = -ENODEV;
+
+ for_each_child_of_node(con_np, child)
+ if (of_link_to_suppliers(dev, child) && !ret)
+ ret = -EAGAIN;
+
+ return ret;
+}
+
+static bool of_devlink;
+core_param(of_devlink, of_devlink, bool, 0);
+
+static int of_fwnode_add_links(const struct fwnode_handle *fwnode,
+ struct device *dev)
+{
+ if (!of_devlink)
+ return 0;
+
+ if (unlikely(!is_of_node(fwnode)))
+ return 0;
+
+ return of_link_to_suppliers(dev, to_of_node(fwnode));
+}
+
const struct fwnode_operations of_fwnode_ops = {
.get = of_fwnode_get,
.put = of_fwnode_put,
@@ -993,6 +1322,8 @@ const struct fwnode_operations of_fwnode_ops = {
.property_present = of_fwnode_property_present,
.property_read_int_array = of_fwnode_property_read_int_array,
.property_read_string_array = of_fwnode_property_read_string_array,
+ .get_name = of_fwnode_get_name,
+ .get_name_prefix = of_fwnode_get_name_prefix,
.get_parent = of_fwnode_get_parent,
.get_next_child_node = of_fwnode_get_next_child_node,
.get_named_child_node = of_fwnode_get_named_child_node,
@@ -1001,5 +1332,6 @@ const struct fwnode_operations of_fwnode_ops = {
.graph_get_remote_endpoint = of_fwnode_graph_get_remote_endpoint,
.graph_get_port_parent = of_fwnode_graph_get_port_parent,
.graph_parse_endpoint = of_fwnode_graph_parse_endpoint,
+ .add_links = of_fwnode_add_links,
};
EXPORT_SYMBOL_GPL(of_fwnode_ops);
diff --git a/drivers/of/unittest-data/testcases.dts b/drivers/of/unittest-data/testcases.dts
index 55fe0ee20109..a85b5e1c381a 100644
--- a/drivers/of/unittest-data/testcases.dts
+++ b/drivers/of/unittest-data/testcases.dts
@@ -15,5 +15,6 @@
#include "tests-phandle.dtsi"
#include "tests-interrupts.dtsi"
#include "tests-match.dtsi"
+#include "tests-address.dtsi"
#include "tests-platform.dtsi"
#include "tests-overlay.dtsi"
diff --git a/drivers/of/unittest-data/tests-address.dtsi b/drivers/of/unittest-data/tests-address.dtsi
new file mode 100644
index 000000000000..3fe5d3987beb
--- /dev/null
+++ b/drivers/of/unittest-data/tests-address.dtsi
@@ -0,0 +1,48 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ testcase-data {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ address-tests {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ /* ranges here is to make sure we don't use it for
+ * dma-ranges translation */
+ ranges = <0x70000000 0x70000000 0x40000000>,
+ <0x00000000 0xd0000000 0x20000000>;
+ dma-ranges = <0x0 0x20000000 0x40000000>;
+
+ device@70000000 {
+ reg = <0x70000000 0x1000>;
+ };
+
+ bus@80000000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x80000000 0x100000>;
+ dma-ranges = <0x10000000 0x0 0x40000000>;
+
+ device@1000 {
+ reg = <0x1000 0x1000>;
+ };
+ };
+
+ pci@90000000 {
+ device_type = "pci";
+ #address-cells = <3>;
+ #size-cells = <2>;
+ reg = <0x90000000 0x1000>;
+ ranges = <0x42000000 0x0 0x40000000 0x40000000 0x0 0x10000000>;
+ dma-ranges = <0x42000000 0x0 0x80000000 0x00000000 0x0 0x10000000>,
+ <0x42000000 0x0 0xc0000000 0x20000000 0x0 0x10000000>;
+ };
+
+ };
+ };
+};
diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
index 92e895d86458..68b87587b2ef 100644
--- a/drivers/of/unittest.c
+++ b/drivers/of/unittest.c
@@ -12,6 +12,7 @@
#include <linux/hashtable.h>
#include <linux/libfdt.h>
#include <linux/of.h>
+#include <linux/of_address.h>
#include <linux/of_fdt.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
@@ -779,6 +780,95 @@ static void __init of_unittest_changeset(void)
#endif
}
+static void __init of_unittest_dma_ranges_one(const char *path,
+ u64 expect_dma_addr, u64 expect_paddr, u64 expect_size)
+{
+ struct device_node *np;
+ u64 dma_addr, paddr, size;
+ int rc;
+
+ np = of_find_node_by_path(path);
+ if (!np) {
+ pr_err("missing testcase data\n");
+ return;
+ }
+
+ rc = of_dma_get_range(np, &dma_addr, &paddr, &size);
+
+ unittest(!rc, "of_dma_get_range failed on node %pOF rc=%i\n", np, rc);
+ if (!rc) {
+ unittest(size == expect_size,
+ "of_dma_get_range wrong size on node %pOF size=%llx\n", np, size);
+ unittest(paddr == expect_paddr,
+ "of_dma_get_range wrong phys addr (%llx) on node %pOF", paddr, np);
+ unittest(dma_addr == expect_dma_addr,
+ "of_dma_get_range wrong DMA addr (%llx) on node %pOF", dma_addr, np);
+ }
+ of_node_put(np);
+}
+
+static void __init of_unittest_parse_dma_ranges(void)
+{
+ of_unittest_dma_ranges_one("/testcase-data/address-tests/device@70000000",
+ 0x0, 0x20000000, 0x40000000);
+ of_unittest_dma_ranges_one("/testcase-data/address-tests/bus@80000000/device@1000",
+ 0x10000000, 0x20000000, 0x40000000);
+ of_unittest_dma_ranges_one("/testcase-data/address-tests/pci@90000000",
+ 0x80000000, 0x20000000, 0x10000000);
+}
+
+static void __init of_unittest_pci_dma_ranges(void)
+{
+ struct device_node *np;
+ struct of_pci_range range;
+ struct of_pci_range_parser parser;
+ int i = 0;
+
+ if (!IS_ENABLED(CONFIG_PCI))
+ return;
+
+ np = of_find_node_by_path("/testcase-data/address-tests/pci@90000000");
+ if (!np) {
+ pr_err("missing testcase data\n");
+ return;
+ }
+
+ if (of_pci_dma_range_parser_init(&parser, np)) {
+ pr_err("missing dma-ranges property\n");
+ return;
+ }
+
+ /*
+ * Get the dma-ranges from the device tree
+ */
+ for_each_of_pci_range(&parser, &range) {
+ if (!i) {
+ unittest(range.size == 0x10000000,
+ "for_each_of_pci_range wrong size on node %pOF size=%llx\n",
+ np, range.size);
+ unittest(range.cpu_addr == 0x20000000,
+ "for_each_of_pci_range wrong CPU addr (%llx) on node %pOF",
+ range.cpu_addr, np);
+ unittest(range.pci_addr == 0x80000000,
+ "for_each_of_pci_range wrong DMA addr (%llx) on node %pOF",
+ range.pci_addr, np);
+ } else {
+ unittest(range.size == 0x10000000,
+ "for_each_of_pci_range wrong size on node %pOF size=%llx\n",
+ np, range.size);
+ unittest(range.cpu_addr == 0x40000000,
+ "for_each_of_pci_range wrong CPU addr (%llx) on node %pOF",
+ range.cpu_addr, np);
+ unittest(range.pci_addr == 0xc0000000,
+ "for_each_of_pci_range wrong DMA addr (%llx) on node %pOF",
+ range.pci_addr, np);
+ }
+ i++;
+ }
+
+ of_node_put(np);
+}
+
static void __init of_unittest_parse_interrupts(void)
{
struct device_node *np;
@@ -1146,8 +1236,10 @@ static void attach_node_and_children(struct device_node *np)
full_name = kasprintf(GFP_KERNEL, "%pOF", np);
if (!strcmp(full_name, "/__local_fixups__") ||
- !strcmp(full_name, "/__fixups__"))
+ !strcmp(full_name, "/__fixups__")) {
+ kfree(full_name);
return;
+ }
dup = of_find_node_by_path(full_name);
kfree(full_name);
@@ -2555,6 +2647,8 @@ static int __init of_unittest(void)
of_unittest_changeset();
of_unittest_parse_interrupts();
of_unittest_parse_interrupts_extended();
+ of_unittest_parse_dma_ranges();
+ of_unittest_pci_dma_ranges();
of_unittest_match_node();
of_unittest_platform_populate();
of_unittest_overlay();
diff --git a/drivers/opp/core.c b/drivers/opp/core.c
index 9ff0538ee83a..be7a7d332332 100644
--- a/drivers/opp/core.c
+++ b/drivers/opp/core.c
@@ -2103,6 +2103,75 @@ put_table:
}
/**
+ * dev_pm_opp_adjust_voltage() - helper to change the voltage of an OPP
+ * @dev: device for which we do this operation
+ * @freq: OPP frequency to adjust voltage of
+ * @u_volt: new OPP target voltage
+ * @u_volt_min: new OPP min voltage
+ * @u_volt_max: new OPP max voltage
+ *
+ * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
+ * copy operation, returns 0 if no modifcation was done OR modification was
+ * successful.
+ */
+int dev_pm_opp_adjust_voltage(struct device *dev, unsigned long freq,
+ unsigned long u_volt, unsigned long u_volt_min,
+ unsigned long u_volt_max)
+
+{
+ struct opp_table *opp_table;
+ struct dev_pm_opp *tmp_opp, *opp = ERR_PTR(-ENODEV);
+ int r = 0;
+
+ /* Find the opp_table */
+ opp_table = _find_opp_table(dev);
+ if (IS_ERR(opp_table)) {
+ r = PTR_ERR(opp_table);
+ dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r);
+ return r;
+ }
+
+ mutex_lock(&opp_table->lock);
+
+ /* Do we have the frequency? */
+ list_for_each_entry(tmp_opp, &opp_table->opp_list, node) {
+ if (tmp_opp->rate == freq) {
+ opp = tmp_opp;
+ break;
+ }
+ }
+
+ if (IS_ERR(opp)) {
+ r = PTR_ERR(opp);
+ goto adjust_unlock;
+ }
+
+ /* Is update really needed? */
+ if (opp->supplies->u_volt == u_volt)
+ goto adjust_unlock;
+
+ opp->supplies->u_volt = u_volt;
+ opp->supplies->u_volt_min = u_volt_min;
+ opp->supplies->u_volt_max = u_volt_max;
+
+ dev_pm_opp_get(opp);
+ mutex_unlock(&opp_table->lock);
+
+ /* Notify the voltage change of the OPP */
+ blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_ADJUST_VOLTAGE,
+ opp);
+
+ dev_pm_opp_put(opp);
+ goto adjust_put_table;
+
+adjust_unlock:
+ mutex_unlock(&opp_table->lock);
+adjust_put_table:
+ dev_pm_opp_put_opp_table(opp_table);
+ return r;
+}
+
+/**
* dev_pm_opp_enable() - Enable a specific OPP
* @dev: device for which we do this operation
* @freq: OPP frequency to enable
diff --git a/drivers/parport/daisy.c b/drivers/parport/daisy.c
index 5484a46dafda..3b00e2c8e2e9 100644
--- a/drivers/parport/daisy.c
+++ b/drivers/parport/daisy.c
@@ -45,6 +45,7 @@ static struct daisydev {
static DEFINE_SPINLOCK(topology_lock);
static int numdevs;
+static bool daisy_init_done;
/* Forward-declaration of lower-level functions. */
static int mux_present(struct parport *port);
@@ -87,6 +88,24 @@ static struct parport *clone_parport(struct parport *real, int muxport)
return extra;
}
+static int daisy_drv_probe(struct pardevice *par_dev)
+{
+ struct device_driver *drv = par_dev->dev.driver;
+
+ if (strcmp(drv->name, "daisy_drv"))
+ return -ENODEV;
+ if (strcmp(par_dev->name, daisy_dev_name))
+ return -ENODEV;
+
+ return 0;
+}
+
+static struct parport_driver daisy_driver = {
+ .name = "daisy_drv",
+ .probe = daisy_drv_probe,
+ .devmodel = true,
+};
+
/* Discover the IEEE1284.3 topology on a port -- muxes and daisy chains.
* Return value is number of devices actually detected. */
int parport_daisy_init(struct parport *port)
@@ -98,6 +117,23 @@ int parport_daisy_init(struct parport *port)
int i;
int last_try = 0;
+ if (!daisy_init_done) {
+ /*
+ * flag should be marked true first as
+ * parport_register_driver() might try to load the low
+ * level driver which will lead to announcing new ports
+ * and which will again come back here at
+ * parport_daisy_init()
+ */
+ daisy_init_done = true;
+ i = parport_register_driver(&daisy_driver);
+ if (i) {
+ pr_err("daisy registration failed\n");
+ daisy_init_done = false;
+ return i;
+ }
+ }
+
again:
/* Because this is called before any other devices exist,
* we don't have to claim exclusive access. */
@@ -213,10 +249,12 @@ void parport_daisy_fini(struct parport *port)
struct pardevice *parport_open(int devnum, const char *name)
{
struct daisydev *p = topology;
+ struct pardev_cb par_cb;
struct parport *port;
struct pardevice *dev;
int daisy;
+ memset(&par_cb, 0, sizeof(par_cb));
spin_lock(&topology_lock);
while (p && p->devnum != devnum)
p = p->next;
@@ -230,7 +268,7 @@ struct pardevice *parport_open(int devnum, const char *name)
port = parport_get_port(p->port);
spin_unlock(&topology_lock);
- dev = parport_register_device(port, name, NULL, NULL, NULL, 0, NULL);
+ dev = parport_register_dev_model(port, name, &par_cb, devnum);
parport_put_port(port);
if (!dev)
return NULL;
diff --git a/drivers/parport/probe.c b/drivers/parport/probe.c
index e035174ba205..e5e6a463a941 100644
--- a/drivers/parport/probe.c
+++ b/drivers/parport/probe.c
@@ -257,7 +257,7 @@ static ssize_t parport_read_device_id (struct parport *port, char *buffer,
ssize_t parport_device_id (int devnum, char *buffer, size_t count)
{
ssize_t retval = -ENXIO;
- struct pardevice *dev = parport_open (devnum, "Device ID probe");
+ struct pardevice *dev = parport_open(devnum, daisy_dev_name);
if (!dev)
return -ENXIO;
diff --git a/drivers/parport/share.c b/drivers/parport/share.c
index 7b4ee33c1935..d6920ebeabcd 100644
--- a/drivers/parport/share.c
+++ b/drivers/parport/share.c
@@ -230,6 +230,18 @@ static int port_check(struct device *dev, void *dev_drv)
return 0;
}
+/*
+ * Iterates through all the devices connected to the bus and return 1
+ * if the device is a parallel port.
+ */
+
+static int port_detect(struct device *dev, void *dev_drv)
+{
+ if (is_parport(dev))
+ return 1;
+ return 0;
+}
+
/**
* parport_register_driver - register a parallel port device driver
* @drv: structure describing the driver
@@ -266,9 +278,6 @@ static int port_check(struct device *dev, void *dev_drv)
int __parport_register_driver(struct parport_driver *drv, struct module *owner,
const char *mod_name)
{
- if (list_empty(&portlist))
- get_lowlevel_driver();
-
if (drv->devmodel) {
/* using device model */
int ret;
@@ -282,6 +291,15 @@ int __parport_register_driver(struct parport_driver *drv, struct module *owner,
if (ret)
return ret;
+ /*
+ * check if bus has any parallel port registered, if
+ * none is found then load the lowlevel driver.
+ */
+ ret = bus_for_each_dev(&parport_bus_type, NULL, NULL,
+ port_detect);
+ if (!ret)
+ get_lowlevel_driver();
+
mutex_lock(&registration_lock);
if (drv->match_port)
bus_for_each_dev(&parport_bus_type, NULL, drv,
@@ -292,6 +310,8 @@ int __parport_register_driver(struct parport_driver *drv, struct module *owner,
drv->devmodel = false;
+ if (list_empty(&portlist))
+ get_lowlevel_driver();
mutex_lock(&registration_lock);
list_for_each_entry(port, &portlist, list)
drv->attach(port);
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index a304f5ea11b9..4bef5c2bae9f 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -52,7 +52,7 @@ config PCI_MSI
If you don't know what to do here, say Y.
config PCI_MSI_IRQ_DOMAIN
- def_bool ARC || ARM || ARM64 || X86 || RISCV
+ def_bool y
depends on PCI_MSI
select GENERIC_MSI_IRQ_DOMAIN
@@ -106,14 +106,14 @@ config PCI_PF_STUB
When in doubt, say N.
config XEN_PCIDEV_FRONTEND
- tristate "Xen PCI Frontend"
- depends on X86 && XEN
- select PCI_XEN
+ tristate "Xen PCI Frontend"
+ depends on X86 && XEN
+ select PCI_XEN
select XEN_XENBUS_FRONTEND
- default y
- help
- The PCI device frontend driver allows the kernel to import arbitrary
- PCI devices from a PCI backend to support PCI driver domains.
+ default y
+ help
+ The PCI device frontend driver allows the kernel to import arbitrary
+ PCI devices from a PCI backend to support PCI driver domains.
config PCI_ATS
bool
@@ -180,12 +180,12 @@ config PCI_LABEL
select NLS
config PCI_HYPERV
- tristate "Hyper-V PCI Frontend"
- depends on X86_64 && HYPERV && PCI_MSI && PCI_MSI_IRQ_DOMAIN && SYSFS
+ tristate "Hyper-V PCI Frontend"
+ depends on X86_64 && HYPERV && PCI_MSI && PCI_MSI_IRQ_DOMAIN && SYSFS
select PCI_HYPERV_INTERFACE
- help
- The PCI device frontend driver allows the kernel to import arbitrary
- PCI devices from a PCI backend to support PCI driver domains.
+ help
+ The PCI device frontend driver allows the kernel to import arbitrary
+ PCI devices from a PCI backend to support PCI driver domains.
source "drivers/pci/hotplug/Kconfig"
source "drivers/pci/controller/Kconfig"
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index 28cdd8c0213a..522d2b974e91 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -7,6 +7,8 @@ obj-$(CONFIG_PCI) += access.o bus.o probe.o host-bridge.o \
pci-sysfs.o rom.o setup-res.o irq.o vpd.o \
setup-bus.o vc.o mmap.o setup-irq.o
+obj-$(CONFIG_PCI) += pcie/
+
ifdef CONFIG_PCI
obj-$(CONFIG_PROC_FS) += proc.o
obj-$(CONFIG_SYSFS) += slot.o
@@ -15,7 +17,6 @@ endif
obj-$(CONFIG_OF) += of.o
obj-$(CONFIG_PCI_QUIRKS) += quirks.o
-obj-$(CONFIG_PCIEPORTBUS) += pcie/
obj-$(CONFIG_HOTPLUG_PCI) += hotplug/
obj-$(CONFIG_PCI_MSI) += msi.o
obj-$(CONFIG_PCI_ATS) += ats.o
diff --git a/drivers/pci/access.c b/drivers/pci/access.c
index 2fccb5762c76..79c4a2ef269a 100644
--- a/drivers/pci/access.c
+++ b/drivers/pci/access.c
@@ -355,7 +355,7 @@ static inline bool pcie_cap_has_sltctl(const struct pci_dev *dev)
pcie_caps_reg(dev) & PCI_EXP_FLAGS_SLOT;
}
-static inline bool pcie_cap_has_rtctl(const struct pci_dev *dev)
+bool pcie_cap_has_rtctl(const struct pci_dev *dev)
{
int type = pci_pcie_type(dev);
diff --git a/drivers/pci/ats.c b/drivers/pci/ats.c
index e18499243f84..982b46f0a54d 100644
--- a/drivers/pci/ats.c
+++ b/drivers/pci/ats.c
@@ -60,8 +60,6 @@ int pci_enable_ats(struct pci_dev *dev, int ps)
pdev = pci_physfn(dev);
if (pdev->ats_stu != ps)
return -EINVAL;
-
- atomic_inc(&pdev->ats_ref_cnt); /* count enabled VFs */
} else {
dev->ats_stu = ps;
ctrl |= PCI_ATS_CTRL_STU(dev->ats_stu - PCI_ATS_MIN_STU);
@@ -71,7 +69,6 @@ int pci_enable_ats(struct pci_dev *dev, int ps)
dev->ats_enabled = 1;
return 0;
}
-EXPORT_SYMBOL_GPL(pci_enable_ats);
/**
* pci_disable_ats - disable the ATS capability
@@ -79,27 +76,17 @@ EXPORT_SYMBOL_GPL(pci_enable_ats);
*/
void pci_disable_ats(struct pci_dev *dev)
{
- struct pci_dev *pdev;
u16 ctrl;
if (WARN_ON(!dev->ats_enabled))
return;
- if (atomic_read(&dev->ats_ref_cnt))
- return; /* VFs still enabled */
-
- if (dev->is_virtfn) {
- pdev = pci_physfn(dev);
- atomic_dec(&pdev->ats_ref_cnt);
- }
-
pci_read_config_word(dev, dev->ats_cap + PCI_ATS_CTRL, &ctrl);
ctrl &= ~PCI_ATS_CTRL_ENABLE;
pci_write_config_word(dev, dev->ats_cap + PCI_ATS_CTRL, ctrl);
dev->ats_enabled = 0;
}
-EXPORT_SYMBOL_GPL(pci_disable_ats);
void pci_restore_ats_state(struct pci_dev *dev)
{
@@ -113,7 +100,6 @@ void pci_restore_ats_state(struct pci_dev *dev)
ctrl |= PCI_ATS_CTRL_STU(dev->ats_stu - PCI_ATS_MIN_STU);
pci_write_config_word(dev, dev->ats_cap + PCI_ATS_CTRL, ctrl);
}
-EXPORT_SYMBOL_GPL(pci_restore_ats_state);
/**
* pci_ats_queue_depth - query the ATS Invalidate Queue Depth
@@ -140,7 +126,6 @@ int pci_ats_queue_depth(struct pci_dev *dev)
pci_read_config_word(dev, dev->ats_cap + PCI_ATS_CAP, &cap);
return PCI_ATS_CAP_QDEP(cap) ? PCI_ATS_CAP_QDEP(cap) : PCI_ATS_MAX_QDEP;
}
-EXPORT_SYMBOL_GPL(pci_ats_queue_depth);
/**
* pci_ats_page_aligned - Return Page Aligned Request bit status.
@@ -167,9 +152,22 @@ int pci_ats_page_aligned(struct pci_dev *pdev)
return 0;
}
-EXPORT_SYMBOL_GPL(pci_ats_page_aligned);
#ifdef CONFIG_PCI_PRI
+void pci_pri_init(struct pci_dev *pdev)
+{
+ u16 status;
+
+ pdev->pri_cap = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
+
+ if (!pdev->pri_cap)
+ return;
+
+ pci_read_config_word(pdev, pdev->pri_cap + PCI_PRI_STATUS, &status);
+ if (status & PCI_PRI_STATUS_PASID)
+ pdev->pasid_required = 1;
+}
+
/**
* pci_enable_pri - Enable PRI capability
* @ pdev: PCI device structure
@@ -180,32 +178,41 @@ int pci_enable_pri(struct pci_dev *pdev, u32 reqs)
{
u16 control, status;
u32 max_requests;
- int pos;
+ int pri = pdev->pri_cap;
+
+ /*
+ * VFs must not implement the PRI Capability. If their PF
+ * implements PRI, it is shared by the VFs, so if the PF PRI is
+ * enabled, it is also enabled for the VF.
+ */
+ if (pdev->is_virtfn) {
+ if (pci_physfn(pdev)->pri_enabled)
+ return 0;
+ return -EINVAL;
+ }
if (WARN_ON(pdev->pri_enabled))
return -EBUSY;
- pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
- if (!pos)
+ if (!pri)
return -EINVAL;
- pci_read_config_word(pdev, pos + PCI_PRI_STATUS, &status);
+ pci_read_config_word(pdev, pri + PCI_PRI_STATUS, &status);
if (!(status & PCI_PRI_STATUS_STOPPED))
return -EBUSY;
- pci_read_config_dword(pdev, pos + PCI_PRI_MAX_REQ, &max_requests);
+ pci_read_config_dword(pdev, pri + PCI_PRI_MAX_REQ, &max_requests);
reqs = min(max_requests, reqs);
pdev->pri_reqs_alloc = reqs;
- pci_write_config_dword(pdev, pos + PCI_PRI_ALLOC_REQ, reqs);
+ pci_write_config_dword(pdev, pri + PCI_PRI_ALLOC_REQ, reqs);
control = PCI_PRI_CTRL_ENABLE;
- pci_write_config_word(pdev, pos + PCI_PRI_CTRL, control);
+ pci_write_config_word(pdev, pri + PCI_PRI_CTRL, control);
pdev->pri_enabled = 1;
return 0;
}
-EXPORT_SYMBOL_GPL(pci_enable_pri);
/**
* pci_disable_pri - Disable PRI capability
@@ -216,18 +223,21 @@ EXPORT_SYMBOL_GPL(pci_enable_pri);
void pci_disable_pri(struct pci_dev *pdev)
{
u16 control;
- int pos;
+ int pri = pdev->pri_cap;
+
+ /* VFs share the PF PRI */
+ if (pdev->is_virtfn)
+ return;
if (WARN_ON(!pdev->pri_enabled))
return;
- pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
- if (!pos)
+ if (!pri)
return;
- pci_read_config_word(pdev, pos + PCI_PRI_CTRL, &control);
+ pci_read_config_word(pdev, pri + PCI_PRI_CTRL, &control);
control &= ~PCI_PRI_CTRL_ENABLE;
- pci_write_config_word(pdev, pos + PCI_PRI_CTRL, control);
+ pci_write_config_word(pdev, pri + PCI_PRI_CTRL, control);
pdev->pri_enabled = 0;
}
@@ -241,19 +251,20 @@ void pci_restore_pri_state(struct pci_dev *pdev)
{
u16 control = PCI_PRI_CTRL_ENABLE;
u32 reqs = pdev->pri_reqs_alloc;
- int pos;
+ int pri = pdev->pri_cap;
+
+ if (pdev->is_virtfn)
+ return;
if (!pdev->pri_enabled)
return;
- pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
- if (!pos)
+ if (!pri)
return;
- pci_write_config_dword(pdev, pos + PCI_PRI_ALLOC_REQ, reqs);
- pci_write_config_word(pdev, pos + PCI_PRI_CTRL, control);
+ pci_write_config_dword(pdev, pri + PCI_PRI_ALLOC_REQ, reqs);
+ pci_write_config_word(pdev, pri + PCI_PRI_CTRL, control);
}
-EXPORT_SYMBOL_GPL(pci_restore_pri_state);
/**
* pci_reset_pri - Resets device's PRI state
@@ -265,24 +276,45 @@ EXPORT_SYMBOL_GPL(pci_restore_pri_state);
int pci_reset_pri(struct pci_dev *pdev)
{
u16 control;
- int pos;
+ int pri = pdev->pri_cap;
+
+ if (pdev->is_virtfn)
+ return 0;
if (WARN_ON(pdev->pri_enabled))
return -EBUSY;
- pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
- if (!pos)
+ if (!pri)
return -EINVAL;
control = PCI_PRI_CTRL_RESET;
- pci_write_config_word(pdev, pos + PCI_PRI_CTRL, control);
+ pci_write_config_word(pdev, pri + PCI_PRI_CTRL, control);
return 0;
}
-EXPORT_SYMBOL_GPL(pci_reset_pri);
+
+/**
+ * pci_prg_resp_pasid_required - Return PRG Response PASID Required bit
+ * status.
+ * @pdev: PCI device structure
+ *
+ * Returns 1 if PASID is required in PRG Response Message, 0 otherwise.
+ */
+int pci_prg_resp_pasid_required(struct pci_dev *pdev)
+{
+ if (pdev->is_virtfn)
+ pdev = pci_physfn(pdev);
+
+ return pdev->pasid_required;
+}
#endif /* CONFIG_PCI_PRI */
#ifdef CONFIG_PCI_PASID
+void pci_pasid_init(struct pci_dev *pdev)
+{
+ pdev->pasid_cap = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PASID);
+}
+
/**
* pci_enable_pasid - Enable the PASID capability
* @pdev: PCI device structure
@@ -295,7 +327,17 @@ EXPORT_SYMBOL_GPL(pci_reset_pri);
int pci_enable_pasid(struct pci_dev *pdev, int features)
{
u16 control, supported;
- int pos;
+ int pasid = pdev->pasid_cap;
+
+ /*
+ * VFs must not implement the PASID Capability, but if a PF
+ * supports PASID, its VFs share the PF PASID configuration.
+ */
+ if (pdev->is_virtfn) {
+ if (pci_physfn(pdev)->pasid_enabled)
+ return 0;
+ return -EINVAL;
+ }
if (WARN_ON(pdev->pasid_enabled))
return -EBUSY;
@@ -303,11 +345,10 @@ int pci_enable_pasid(struct pci_dev *pdev, int features)
if (!pdev->eetlp_prefix_path)
return -EINVAL;
- pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PASID);
- if (!pos)
+ if (!pasid)
return -EINVAL;
- pci_read_config_word(pdev, pos + PCI_PASID_CAP, &supported);
+ pci_read_config_word(pdev, pasid + PCI_PASID_CAP, &supported);
supported &= PCI_PASID_CAP_EXEC | PCI_PASID_CAP_PRIV;
/* User wants to enable anything unsupported? */
@@ -317,13 +358,12 @@ int pci_enable_pasid(struct pci_dev *pdev, int features)
control = PCI_PASID_CTRL_ENABLE | features;
pdev->pasid_features = features;
- pci_write_config_word(pdev, pos + PCI_PASID_CTRL, control);
+ pci_write_config_word(pdev, pasid + PCI_PASID_CTRL, control);
pdev->pasid_enabled = 1;
return 0;
}
-EXPORT_SYMBOL_GPL(pci_enable_pasid);
/**
* pci_disable_pasid - Disable the PASID capability
@@ -332,20 +372,22 @@ EXPORT_SYMBOL_GPL(pci_enable_pasid);
void pci_disable_pasid(struct pci_dev *pdev)
{
u16 control = 0;
- int pos;
+ int pasid = pdev->pasid_cap;
+
+ /* VFs share the PF PASID configuration */
+ if (pdev->is_virtfn)
+ return;
if (WARN_ON(!pdev->pasid_enabled))
return;
- pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PASID);
- if (!pos)
+ if (!pasid)
return;
- pci_write_config_word(pdev, pos + PCI_PASID_CTRL, control);
+ pci_write_config_word(pdev, pasid + PCI_PASID_CTRL, control);
pdev->pasid_enabled = 0;
}
-EXPORT_SYMBOL_GPL(pci_disable_pasid);
/**
* pci_restore_pasid_state - Restore PASID capabilities
@@ -354,19 +396,20 @@ EXPORT_SYMBOL_GPL(pci_disable_pasid);
void pci_restore_pasid_state(struct pci_dev *pdev)
{
u16 control;
- int pos;
+ int pasid = pdev->pasid_cap;
+
+ if (pdev->is_virtfn)
+ return;
if (!pdev->pasid_enabled)
return;
- pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PASID);
- if (!pos)
+ if (!pasid)
return;
control = PCI_PASID_CTRL_ENABLE | pdev->pasid_features;
- pci_write_config_word(pdev, pos + PCI_PASID_CTRL, control);
+ pci_write_config_word(pdev, pasid + PCI_PASID_CTRL, control);
}
-EXPORT_SYMBOL_GPL(pci_restore_pasid_state);
/**
* pci_pasid_features - Check which PASID features are supported
@@ -381,49 +424,20 @@ EXPORT_SYMBOL_GPL(pci_restore_pasid_state);
int pci_pasid_features(struct pci_dev *pdev)
{
u16 supported;
- int pos;
+ int pasid = pdev->pasid_cap;
- pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PASID);
- if (!pos)
+ if (pdev->is_virtfn)
+ pdev = pci_physfn(pdev);
+
+ if (!pasid)
return -EINVAL;
- pci_read_config_word(pdev, pos + PCI_PASID_CAP, &supported);
+ pci_read_config_word(pdev, pasid + PCI_PASID_CAP, &supported);
supported &= PCI_PASID_CAP_EXEC | PCI_PASID_CAP_PRIV;
return supported;
}
-EXPORT_SYMBOL_GPL(pci_pasid_features);
-
-/**
- * pci_prg_resp_pasid_required - Return PRG Response PASID Required bit
- * status.
- * @pdev: PCI device structure
- *
- * Returns 1 if PASID is required in PRG Response Message, 0 otherwise.
- *
- * Even though the PRG response PASID status is read from PRI Status
- * Register, since this API will mainly be used by PASID users, this
- * function is defined within #ifdef CONFIG_PCI_PASID instead of
- * CONFIG_PCI_PRI.
- */
-int pci_prg_resp_pasid_required(struct pci_dev *pdev)
-{
- u16 status;
- int pos;
-
- pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
- if (!pos)
- return 0;
-
- pci_read_config_word(pdev, pos + PCI_PRI_STATUS, &status);
-
- if (status & PCI_PRI_STATUS_PASID)
- return 1;
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(pci_prg_resp_pasid_required);
#define PASID_NUMBER_SHIFT 8
#define PASID_NUMBER_MASK (0x1f << PASID_NUMBER_SHIFT)
@@ -437,17 +451,18 @@ EXPORT_SYMBOL_GPL(pci_prg_resp_pasid_required);
int pci_max_pasids(struct pci_dev *pdev)
{
u16 supported;
- int pos;
+ int pasid = pdev->pasid_cap;
- pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PASID);
- if (!pos)
+ if (pdev->is_virtfn)
+ pdev = pci_physfn(pdev);
+
+ if (!pasid)
return -EINVAL;
- pci_read_config_word(pdev, pos + PCI_PASID_CAP, &supported);
+ pci_read_config_word(pdev, pasid + PCI_PASID_CAP, &supported);
supported = (supported & PASID_NUMBER_MASK) >> PASID_NUMBER_SHIFT;
return (1 << supported);
}
-EXPORT_SYMBOL_GPL(pci_max_pasids);
#endif /* CONFIG_PCI_PASID */
diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig
index 70e078238899..c77069c8ee5d 100644
--- a/drivers/pci/controller/Kconfig
+++ b/drivers/pci/controller/Kconfig
@@ -22,34 +22,6 @@ config PCI_AARDVARK
controller is part of the South Bridge of the Marvel Armada
3700 SoC.
-menu "Cadence PCIe controllers support"
-
-config PCIE_CADENCE
- bool
-
-config PCIE_CADENCE_HOST
- bool "Cadence PCIe host controller"
- depends on OF
- depends on PCI
- select IRQ_DOMAIN
- select PCIE_CADENCE
- help
- Say Y here if you want to support the Cadence PCIe controller in host
- mode. This PCIe controller may be embedded into many different vendors
- SoCs.
-
-config PCIE_CADENCE_EP
- bool "Cadence PCIe endpoint controller"
- depends on OF
- depends on PCI_ENDPOINT
- select PCIE_CADENCE
- help
- Say Y here if you want to support the Cadence PCIe controller in
- endpoint mode. This PCIe controller may be embedded into many
- different vendors SoCs.
-
-endmenu
-
config PCIE_XILINX_NWL
bool "NWL PCIe Core"
depends on ARCH_ZYNQMP || COMPILE_TEST
@@ -135,7 +107,7 @@ config PCI_V3_SEMI
config PCI_VERSATILE
bool "ARM Versatile PB PCI controller"
- depends on ARCH_VERSATILE
+ depends on ARCH_VERSATILE || COMPILE_TEST
config PCIE_IPROC
tristate
@@ -289,4 +261,5 @@ config PCI_HYPERV_INTERFACE
have a common interface with the Hyper-V PCI frontend driver.
source "drivers/pci/controller/dwc/Kconfig"
+source "drivers/pci/controller/cadence/Kconfig"
endmenu
diff --git a/drivers/pci/controller/Makefile b/drivers/pci/controller/Makefile
index a2a22c9d91af..3d4f597f15ce 100644
--- a/drivers/pci/controller/Makefile
+++ b/drivers/pci/controller/Makefile
@@ -1,7 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_PCIE_CADENCE) += pcie-cadence.o
-obj-$(CONFIG_PCIE_CADENCE_HOST) += pcie-cadence-host.o
-obj-$(CONFIG_PCIE_CADENCE_EP) += pcie-cadence-ep.o
+obj-$(CONFIG_PCIE_CADENCE) += cadence/
obj-$(CONFIG_PCI_FTPCI100) += pci-ftpci100.o
obj-$(CONFIG_PCI_HYPERV) += pci-hyperv.o
obj-$(CONFIG_PCI_HYPERV_INTERFACE) += pci-hyperv-intf.o
diff --git a/drivers/pci/controller/cadence/Kconfig b/drivers/pci/controller/cadence/Kconfig
new file mode 100644
index 000000000000..b76b3cf55ce5
--- /dev/null
+++ b/drivers/pci/controller/cadence/Kconfig
@@ -0,0 +1,45 @@
+# SPDX-License-Identifier: GPL-2.0
+
+menu "Cadence PCIe controllers support"
+ depends on PCI
+
+config PCIE_CADENCE
+ bool
+
+config PCIE_CADENCE_HOST
+ bool
+ depends on OF
+ select IRQ_DOMAIN
+ select PCIE_CADENCE
+
+config PCIE_CADENCE_EP
+ bool
+ depends on OF
+ depends on PCI_ENDPOINT
+ select PCIE_CADENCE
+
+config PCIE_CADENCE_PLAT
+ bool
+
+config PCIE_CADENCE_PLAT_HOST
+ bool "Cadence PCIe platform host controller"
+ depends on OF
+ select PCIE_CADENCE_HOST
+ select PCIE_CADENCE_PLAT
+ help
+ Say Y here if you want to support the Cadence PCIe platform controller in
+ host mode. This PCIe controller may be embedded into many different
+ vendors SoCs.
+
+config PCIE_CADENCE_PLAT_EP
+ bool "Cadence PCIe platform endpoint controller"
+ depends on OF
+ depends on PCI_ENDPOINT
+ select PCIE_CADENCE_EP
+ select PCIE_CADENCE_PLAT
+ help
+ Say Y here if you want to support the Cadence PCIe platform controller in
+ endpoint mode. This PCIe controller may be embedded into many
+ different vendors SoCs.
+
+endmenu
diff --git a/drivers/pci/controller/cadence/Makefile b/drivers/pci/controller/cadence/Makefile
new file mode 100644
index 000000000000..232a3f20876a
--- /dev/null
+++ b/drivers/pci/controller/cadence/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_PCIE_CADENCE) += pcie-cadence.o
+obj-$(CONFIG_PCIE_CADENCE_HOST) += pcie-cadence-host.o
+obj-$(CONFIG_PCIE_CADENCE_EP) += pcie-cadence-ep.o
+obj-$(CONFIG_PCIE_CADENCE_PLAT) += pcie-cadence-plat.o
diff --git a/drivers/pci/controller/pcie-cadence-ep.c b/drivers/pci/controller/cadence/pcie-cadence-ep.c
index def7820cb824..1c173dad67d1 100644
--- a/drivers/pci/controller/pcie-cadence-ep.c
+++ b/drivers/pci/controller/cadence/pcie-cadence-ep.c
@@ -17,35 +17,6 @@
#define CDNS_PCIE_EP_IRQ_PCI_ADDR_NONE 0x1
#define CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY 0x3
-/**
- * struct cdns_pcie_ep - private data for this PCIe endpoint controller driver
- * @pcie: Cadence PCIe controller
- * @max_regions: maximum number of regions supported by hardware
- * @ob_region_map: bitmask of mapped outbound regions
- * @ob_addr: base addresses in the AXI bus where the outbound regions start
- * @irq_phys_addr: base address on the AXI bus where the MSI/legacy IRQ
- * dedicated outbound regions is mapped.
- * @irq_cpu_addr: base address in the CPU space where a write access triggers
- * the sending of a memory write (MSI) / normal message (legacy
- * IRQ) TLP through the PCIe bus.
- * @irq_pci_addr: used to save the current mapping of the MSI/legacy IRQ
- * dedicated outbound region.
- * @irq_pci_fn: the latest PCI function that has updated the mapping of
- * the MSI/legacy IRQ dedicated outbound region.
- * @irq_pending: bitmask of asserted legacy IRQs.
- */
-struct cdns_pcie_ep {
- struct cdns_pcie pcie;
- u32 max_regions;
- unsigned long ob_region_map;
- phys_addr_t *ob_addr;
- phys_addr_t irq_phys_addr;
- void __iomem *irq_cpu_addr;
- u64 irq_pci_addr;
- u8 irq_pci_fn;
- u8 irq_pending;
-};
-
static int cdns_pcie_ep_write_header(struct pci_epc *epc, u8 fn,
struct pci_epf_header *hdr)
{
@@ -424,28 +395,17 @@ static const struct pci_epc_ops cdns_pcie_epc_ops = {
.get_features = cdns_pcie_ep_get_features,
};
-static const struct of_device_id cdns_pcie_ep_of_match[] = {
- { .compatible = "cdns,cdns-pcie-ep" },
-
- { },
-};
-static int cdns_pcie_ep_probe(struct platform_device *pdev)
+int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep)
{
- struct device *dev = &pdev->dev;
+ struct device *dev = ep->pcie.dev;
+ struct platform_device *pdev = to_platform_device(dev);
struct device_node *np = dev->of_node;
- struct cdns_pcie_ep *ep;
- struct cdns_pcie *pcie;
- struct pci_epc *epc;
+ struct cdns_pcie *pcie = &ep->pcie;
struct resource *res;
+ struct pci_epc *epc;
int ret;
- int phy_count;
-
- ep = devm_kzalloc(dev, sizeof(*ep), GFP_KERNEL);
- if (!ep)
- return -ENOMEM;
- pcie = &ep->pcie;
pcie->is_rc = false;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "reg");
@@ -474,19 +434,6 @@ static int cdns_pcie_ep_probe(struct platform_device *pdev)
if (!ep->ob_addr)
return -ENOMEM;
- ret = cdns_pcie_init_phy(dev, pcie);
- if (ret) {
- dev_err(dev, "failed to init phy\n");
- return ret;
- }
- platform_set_drvdata(pdev, pcie);
- pm_runtime_enable(dev);
- ret = pm_runtime_get_sync(dev);
- if (ret < 0) {
- dev_err(dev, "pm_runtime_get_sync() failed\n");
- goto err_get_sync;
- }
-
/* Disable all but function 0 (anyway BIT(0) is hardwired to 1). */
cdns_pcie_writel(pcie, CDNS_PCIE_LM_EP_FUNC_CFG, BIT(0));
@@ -528,38 +475,5 @@ static int cdns_pcie_ep_probe(struct platform_device *pdev)
err_init:
pm_runtime_put_sync(dev);
- err_get_sync:
- pm_runtime_disable(dev);
- cdns_pcie_disable_phy(pcie);
- phy_count = pcie->phy_count;
- while (phy_count--)
- device_link_del(pcie->link[phy_count]);
-
return ret;
}
-
-static void cdns_pcie_ep_shutdown(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct cdns_pcie *pcie = dev_get_drvdata(dev);
- int ret;
-
- ret = pm_runtime_put_sync(dev);
- if (ret < 0)
- dev_dbg(dev, "pm_runtime_put_sync failed\n");
-
- pm_runtime_disable(dev);
-
- cdns_pcie_disable_phy(pcie);
-}
-
-static struct platform_driver cdns_pcie_ep_driver = {
- .driver = {
- .name = "cdns-pcie-ep",
- .of_match_table = cdns_pcie_ep_of_match,
- .pm = &cdns_pcie_pm_ops,
- },
- .probe = cdns_pcie_ep_probe,
- .shutdown = cdns_pcie_ep_shutdown,
-};
-builtin_platform_driver(cdns_pcie_ep_driver);
diff --git a/drivers/pci/controller/pcie-cadence-host.c b/drivers/pci/controller/cadence/pcie-cadence-host.c
index 97e251090b4f..9b1c3966414b 100644
--- a/drivers/pci/controller/pcie-cadence-host.c
+++ b/drivers/pci/controller/cadence/pcie-cadence-host.c
@@ -11,33 +11,6 @@
#include "pcie-cadence.h"
-/**
- * struct cdns_pcie_rc - private data for this PCIe Root Complex driver
- * @pcie: Cadence PCIe controller
- * @dev: pointer to PCIe device
- * @cfg_res: start/end offsets in the physical system memory to map PCI
- * configuration space accesses
- * @bus_range: first/last buses behind the PCIe host controller
- * @cfg_base: IO mapped window to access the PCI configuration space of a
- * single function at a time
- * @max_regions: maximum number of regions supported by the hardware
- * @no_bar_nbits: Number of bits to keep for inbound (PCIe -> CPU) address
- * translation (nbits sets into the "no BAR match" register)
- * @vendor_id: PCI vendor ID
- * @device_id: PCI device ID
- */
-struct cdns_pcie_rc {
- struct cdns_pcie pcie;
- struct device *dev;
- struct resource *cfg_res;
- struct resource *bus_range;
- void __iomem *cfg_base;
- u32 max_regions;
- u32 no_bar_nbits;
- u16 vendor_id;
- u16 device_id;
-};
-
static void __iomem *cdns_pci_map_bus(struct pci_bus *bus, unsigned int devfn,
int where)
{
@@ -92,11 +65,6 @@ static struct pci_ops cdns_pcie_host_ops = {
.write = pci_generic_config_write,
};
-static const struct of_device_id cdns_pcie_host_of_match[] = {
- { .compatible = "cdns,cdns-pcie-host" },
-
- { },
-};
static int cdns_pcie_host_init_root_port(struct cdns_pcie_rc *rc)
{
@@ -136,10 +104,10 @@ static int cdns_pcie_host_init_root_port(struct cdns_pcie_rc *rc)
static int cdns_pcie_host_init_address_translation(struct cdns_pcie_rc *rc)
{
struct cdns_pcie *pcie = &rc->pcie;
- struct resource *cfg_res = rc->cfg_res;
struct resource *mem_res = pcie->mem_res;
struct resource *bus_range = rc->bus_range;
- struct device *dev = rc->dev;
+ struct resource *cfg_res = rc->cfg_res;
+ struct device *dev = pcie->dev;
struct device_node *np = dev->of_node;
struct of_pci_range_parser parser;
struct of_pci_range range;
@@ -211,7 +179,7 @@ static int cdns_pcie_host_init(struct device *dev,
int err;
/* Parse our PCI ranges and request their resources */
- err = pci_parse_request_of_pci_ranges(dev, resources, &bus_range);
+ err = pci_parse_request_of_pci_ranges(dev, resources, NULL, &bus_range);
if (err)
return err;
@@ -233,25 +201,21 @@ static int cdns_pcie_host_init(struct device *dev,
return err;
}
-static int cdns_pcie_host_probe(struct platform_device *pdev)
+int cdns_pcie_host_setup(struct cdns_pcie_rc *rc)
{
- struct device *dev = &pdev->dev;
+ struct device *dev = rc->pcie.dev;
+ struct platform_device *pdev = to_platform_device(dev);
struct device_node *np = dev->of_node;
struct pci_host_bridge *bridge;
struct list_head resources;
- struct cdns_pcie_rc *rc;
struct cdns_pcie *pcie;
struct resource *res;
int ret;
- int phy_count;
- bridge = devm_pci_alloc_host_bridge(dev, sizeof(*rc));
+ bridge = pci_host_bridge_from_priv(rc);
if (!bridge)
return -ENOMEM;
- rc = pci_host_bridge_priv(bridge);
- rc->dev = dev;
-
pcie = &rc->pcie;
pcie->is_rc = true;
@@ -287,21 +251,8 @@ static int cdns_pcie_host_probe(struct platform_device *pdev)
dev_err(dev, "missing \"mem\"\n");
return -EINVAL;
}
- pcie->mem_res = res;
- ret = cdns_pcie_init_phy(dev, pcie);
- if (ret) {
- dev_err(dev, "failed to init phy\n");
- return ret;
- }
- platform_set_drvdata(pdev, pcie);
-
- pm_runtime_enable(dev);
- ret = pm_runtime_get_sync(dev);
- if (ret < 0) {
- dev_err(dev, "pm_runtime_get_sync() failed\n");
- goto err_get_sync;
- }
+ pcie->mem_res = res;
ret = cdns_pcie_host_init(dev, &resources, rc);
if (ret)
@@ -326,37 +277,5 @@ static int cdns_pcie_host_probe(struct platform_device *pdev)
err_init:
pm_runtime_put_sync(dev);
- err_get_sync:
- pm_runtime_disable(dev);
- cdns_pcie_disable_phy(pcie);
- phy_count = pcie->phy_count;
- while (phy_count--)
- device_link_del(pcie->link[phy_count]);
-
return ret;
}
-
-static void cdns_pcie_shutdown(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct cdns_pcie *pcie = dev_get_drvdata(dev);
- int ret;
-
- ret = pm_runtime_put_sync(dev);
- if (ret < 0)
- dev_dbg(dev, "pm_runtime_put_sync failed\n");
-
- pm_runtime_disable(dev);
- cdns_pcie_disable_phy(pcie);
-}
-
-static struct platform_driver cdns_pcie_host_driver = {
- .driver = {
- .name = "cdns-pcie-host",
- .of_match_table = cdns_pcie_host_of_match,
- .pm = &cdns_pcie_pm_ops,
- },
- .probe = cdns_pcie_host_probe,
- .shutdown = cdns_pcie_shutdown,
-};
-builtin_platform_driver(cdns_pcie_host_driver);
diff --git a/drivers/pci/controller/cadence/pcie-cadence-plat.c b/drivers/pci/controller/cadence/pcie-cadence-plat.c
new file mode 100644
index 000000000000..f5c6bf6dfcb8
--- /dev/null
+++ b/drivers/pci/controller/cadence/pcie-cadence-plat.c
@@ -0,0 +1,174 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Cadence PCIe platform driver.
+ *
+ * Copyright (c) 2019, Cadence Design Systems
+ * Author: Tom Joseph <tjoseph@cadence.com>
+ */
+#include <linux/kernel.h>
+#include <linux/of_address.h>
+#include <linux/of_pci.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/of_device.h>
+#include "pcie-cadence.h"
+
+/**
+ * struct cdns_plat_pcie - private data for this PCIe platform driver
+ * @pcie: Cadence PCIe controller
+ * @is_rc: Set to 1 indicates the PCIe controller mode is Root Complex,
+ * if 0 it is in Endpoint mode.
+ */
+struct cdns_plat_pcie {
+ struct cdns_pcie *pcie;
+ bool is_rc;
+};
+
+struct cdns_plat_pcie_of_data {
+ bool is_rc;
+};
+
+static const struct of_device_id cdns_plat_pcie_of_match[];
+
+static int cdns_plat_pcie_probe(struct platform_device *pdev)
+{
+ const struct cdns_plat_pcie_of_data *data;
+ struct cdns_plat_pcie *cdns_plat_pcie;
+ const struct of_device_id *match;
+ struct device *dev = &pdev->dev;
+ struct pci_host_bridge *bridge;
+ struct cdns_pcie_ep *ep;
+ struct cdns_pcie_rc *rc;
+ int phy_count;
+ bool is_rc;
+ int ret;
+
+ match = of_match_device(cdns_plat_pcie_of_match, dev);
+ if (!match)
+ return -EINVAL;
+
+ data = (struct cdns_plat_pcie_of_data *)match->data;
+ is_rc = data->is_rc;
+
+ pr_debug(" Started %s with is_rc: %d\n", __func__, is_rc);
+ cdns_plat_pcie = devm_kzalloc(dev, sizeof(*cdns_plat_pcie), GFP_KERNEL);
+ if (!cdns_plat_pcie)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, cdns_plat_pcie);
+ if (is_rc) {
+ if (!IS_ENABLED(CONFIG_PCIE_CADENCE_PLAT_HOST))
+ return -ENODEV;
+
+ bridge = devm_pci_alloc_host_bridge(dev, sizeof(*rc));
+ if (!bridge)
+ return -ENOMEM;
+
+ rc = pci_host_bridge_priv(bridge);
+ rc->pcie.dev = dev;
+ cdns_plat_pcie->pcie = &rc->pcie;
+ cdns_plat_pcie->is_rc = is_rc;
+
+ ret = cdns_pcie_init_phy(dev, cdns_plat_pcie->pcie);
+ if (ret) {
+ dev_err(dev, "failed to init phy\n");
+ return ret;
+ }
+ pm_runtime_enable(dev);
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ dev_err(dev, "pm_runtime_get_sync() failed\n");
+ goto err_get_sync;
+ }
+
+ ret = cdns_pcie_host_setup(rc);
+ if (ret)
+ goto err_init;
+ } else {
+ if (!IS_ENABLED(CONFIG_PCIE_CADENCE_PLAT_EP))
+ return -ENODEV;
+
+ ep = devm_kzalloc(dev, sizeof(*ep), GFP_KERNEL);
+ if (!ep)
+ return -ENOMEM;
+
+ ep->pcie.dev = dev;
+ cdns_plat_pcie->pcie = &ep->pcie;
+ cdns_plat_pcie->is_rc = is_rc;
+
+ ret = cdns_pcie_init_phy(dev, cdns_plat_pcie->pcie);
+ if (ret) {
+ dev_err(dev, "failed to init phy\n");
+ return ret;
+ }
+
+ pm_runtime_enable(dev);
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ dev_err(dev, "pm_runtime_get_sync() failed\n");
+ goto err_get_sync;
+ }
+
+ ret = cdns_pcie_ep_setup(ep);
+ if (ret)
+ goto err_init;
+ }
+
+ err_init:
+ pm_runtime_put_sync(dev);
+
+ err_get_sync:
+ pm_runtime_disable(dev);
+ cdns_pcie_disable_phy(cdns_plat_pcie->pcie);
+ phy_count = cdns_plat_pcie->pcie->phy_count;
+ while (phy_count--)
+ device_link_del(cdns_plat_pcie->pcie->link[phy_count]);
+
+ return 0;
+}
+
+static void cdns_plat_pcie_shutdown(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct cdns_pcie *pcie = dev_get_drvdata(dev);
+ int ret;
+
+ ret = pm_runtime_put_sync(dev);
+ if (ret < 0)
+ dev_dbg(dev, "pm_runtime_put_sync failed\n");
+
+ pm_runtime_disable(dev);
+
+ cdns_pcie_disable_phy(pcie);
+}
+
+static const struct cdns_plat_pcie_of_data cdns_plat_pcie_host_of_data = {
+ .is_rc = true,
+};
+
+static const struct cdns_plat_pcie_of_data cdns_plat_pcie_ep_of_data = {
+ .is_rc = false,
+};
+
+static const struct of_device_id cdns_plat_pcie_of_match[] = {
+ {
+ .compatible = "cdns,cdns-pcie-host",
+ .data = &cdns_plat_pcie_host_of_data,
+ },
+ {
+ .compatible = "cdns,cdns-pcie-ep",
+ .data = &cdns_plat_pcie_ep_of_data,
+ },
+ {},
+};
+
+static struct platform_driver cdns_plat_pcie_driver = {
+ .driver = {
+ .name = "cdns-pcie",
+ .of_match_table = cdns_plat_pcie_of_match,
+ .pm = &cdns_pcie_pm_ops,
+ },
+ .probe = cdns_plat_pcie_probe,
+ .shutdown = cdns_plat_pcie_shutdown,
+};
+builtin_platform_driver(cdns_plat_pcie_driver);
diff --git a/drivers/pci/controller/pcie-cadence.c b/drivers/pci/controller/cadence/pcie-cadence.c
index cd795f6fc1e2..cd795f6fc1e2 100644
--- a/drivers/pci/controller/pcie-cadence.c
+++ b/drivers/pci/controller/cadence/pcie-cadence.c
diff --git a/drivers/pci/controller/pcie-cadence.h b/drivers/pci/controller/cadence/pcie-cadence.h
index ae6bf2a2b3d3..a2b28b912ca4 100644
--- a/drivers/pci/controller/pcie-cadence.h
+++ b/drivers/pci/controller/cadence/pcie-cadence.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (c) 2017 Cadence
// Cadence PCIe controller driver.
// Author: Cyrille Pitchen <cyrille.pitchen@free-electrons.com>
@@ -190,6 +190,8 @@ enum cdns_pcie_rp_bar {
(((code) << 8) & CDNS_PCIE_NORMAL_MSG_CODE_MASK)
#define CDNS_PCIE_MSG_NO_DATA BIT(16)
+struct cdns_pcie;
+
enum cdns_pcie_msg_code {
MSG_CODE_ASSERT_INTA = 0x20,
MSG_CODE_ASSERT_INTB = 0x21,
@@ -231,13 +233,71 @@ enum cdns_pcie_msg_routing {
struct cdns_pcie {
void __iomem *reg_base;
struct resource *mem_res;
+ struct device *dev;
bool is_rc;
u8 bus;
int phy_count;
struct phy **phy;
struct device_link **link;
+ const struct cdns_pcie_common_ops *ops;
+};
+
+/**
+ * struct cdns_pcie_rc - private data for this PCIe Root Complex driver
+ * @pcie: Cadence PCIe controller
+ * @dev: pointer to PCIe device
+ * @cfg_res: start/end offsets in the physical system memory to map PCI
+ * configuration space accesses
+ * @bus_range: first/last buses behind the PCIe host controller
+ * @cfg_base: IO mapped window to access the PCI configuration space of a
+ * single function at a time
+ * @max_regions: maximum number of regions supported by the hardware
+ * @no_bar_nbits: Number of bits to keep for inbound (PCIe -> CPU) address
+ * translation (nbits sets into the "no BAR match" register)
+ * @vendor_id: PCI vendor ID
+ * @device_id: PCI device ID
+ */
+struct cdns_pcie_rc {
+ struct cdns_pcie pcie;
+ struct resource *cfg_res;
+ struct resource *bus_range;
+ void __iomem *cfg_base;
+ u32 max_regions;
+ u32 no_bar_nbits;
+ u16 vendor_id;
+ u16 device_id;
};
+/**
+ * struct cdns_pcie_ep - private data for this PCIe endpoint controller driver
+ * @pcie: Cadence PCIe controller
+ * @max_regions: maximum number of regions supported by hardware
+ * @ob_region_map: bitmask of mapped outbound regions
+ * @ob_addr: base addresses in the AXI bus where the outbound regions start
+ * @irq_phys_addr: base address on the AXI bus where the MSI/legacy IRQ
+ * dedicated outbound regions is mapped.
+ * @irq_cpu_addr: base address in the CPU space where a write access triggers
+ * the sending of a memory write (MSI) / normal message (legacy
+ * IRQ) TLP through the PCIe bus.
+ * @irq_pci_addr: used to save the current mapping of the MSI/legacy IRQ
+ * dedicated outbound region.
+ * @irq_pci_fn: the latest PCI function that has updated the mapping of
+ * the MSI/legacy IRQ dedicated outbound region.
+ * @irq_pending: bitmask of asserted legacy IRQs.
+ */
+struct cdns_pcie_ep {
+ struct cdns_pcie pcie;
+ u32 max_regions;
+ unsigned long ob_region_map;
+ phys_addr_t *ob_addr;
+ phys_addr_t irq_phys_addr;
+ void __iomem *irq_cpu_addr;
+ u64 irq_pci_addr;
+ u8 irq_pci_fn;
+ u8 irq_pending;
+};
+
+
/* Register access */
static inline void cdns_pcie_writeb(struct cdns_pcie *pcie, u32 reg, u8 value)
{
@@ -306,6 +366,23 @@ static inline u32 cdns_pcie_ep_fn_readl(struct cdns_pcie *pcie, u8 fn, u32 reg)
return readl(pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg);
}
+#ifdef CONFIG_PCIE_CADENCE_HOST
+int cdns_pcie_host_setup(struct cdns_pcie_rc *rc);
+#else
+static inline int cdns_pcie_host_setup(struct cdns_pcie_rc *rc)
+{
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_PCIE_CADENCE_EP
+int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep);
+#else
+static inline int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep)
+{
+ return 0;
+}
+#endif
void cdns_pcie_set_outbound_region(struct cdns_pcie *pcie, u8 fn,
u32 r, bool is_io,
u64 cpu_addr, u64 pci_addr, size_t size);
diff --git a/drivers/pci/controller/dwc/Kconfig b/drivers/pci/controller/dwc/Kconfig
index 0ba988b5b5bc..625a031b2193 100644
--- a/drivers/pci/controller/dwc/Kconfig
+++ b/drivers/pci/controller/dwc/Kconfig
@@ -7,9 +7,9 @@ config PCIE_DW
bool
config PCIE_DW_HOST
- bool
+ bool
depends on PCI_MSI_IRQ_DOMAIN
- select PCIE_DW
+ select PCIE_DW
config PCIE_DW_EP
bool
@@ -224,7 +224,7 @@ config PCIE_HISI_STB
depends on PCI_MSI_IRQ_DOMAIN
select PCIE_DW_HOST
help
- Say Y here if you want PCIe controller support on HiSilicon STB SoCs
+ Say Y here if you want PCIe controller support on HiSilicon STB SoCs
config PCI_MESON
bool "MESON PCIe controller"
diff --git a/drivers/pci/controller/dwc/pci-dra7xx.c b/drivers/pci/controller/dwc/pci-dra7xx.c
index 4234ddb4722f..b20651cea09f 100644
--- a/drivers/pci/controller/dwc/pci-dra7xx.c
+++ b/drivers/pci/controller/dwc/pci-dra7xx.c
@@ -353,7 +353,7 @@ static void dra7xx_pcie_ep_init(struct dw_pcie_ep *ep)
struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
enum pci_barno bar;
- for (bar = BAR_0; bar <= BAR_5; bar++)
+ for (bar = 0; bar < PCI_STD_NUM_BARS; bar++)
dw_pcie_ep_reset_bar(pci, bar);
dra7xx_pcie_enable_wrapper_interrupts(dra7xx);
diff --git a/drivers/pci/controller/dwc/pci-layerscape-ep.c b/drivers/pci/controller/dwc/pci-layerscape-ep.c
index ca9aa4501e7e..0d151cead1b7 100644
--- a/drivers/pci/controller/dwc/pci-layerscape-ep.c
+++ b/drivers/pci/controller/dwc/pci-layerscape-ep.c
@@ -58,7 +58,7 @@ static void ls_pcie_ep_init(struct dw_pcie_ep *ep)
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
enum pci_barno bar;
- for (bar = BAR_0; bar <= BAR_5; bar++)
+ for (bar = 0; bar < PCI_STD_NUM_BARS; bar++)
dw_pcie_ep_reset_bar(pci, bar);
}
diff --git a/drivers/pci/controller/dwc/pci-layerscape.c b/drivers/pci/controller/dwc/pci-layerscape.c
index 3a5fa26d5e56..f24f79a70d9a 100644
--- a/drivers/pci/controller/dwc/pci-layerscape.c
+++ b/drivers/pci/controller/dwc/pci-layerscape.c
@@ -263,6 +263,7 @@ static const struct ls_pcie_drvdata ls2088_drvdata = {
static const struct of_device_id ls_pcie_of_match[] = {
{ .compatible = "fsl,ls1012a-pcie", .data = &ls1046_drvdata },
{ .compatible = "fsl,ls1021a-pcie", .data = &ls1021_drvdata },
+ { .compatible = "fsl,ls1028a-pcie", .data = &ls2088_drvdata },
{ .compatible = "fsl,ls1043a-pcie", .data = &ls1043_drvdata },
{ .compatible = "fsl,ls1046a-pcie", .data = &ls1046_drvdata },
{ .compatible = "fsl,ls2080a-pcie", .data = &ls2080_drvdata },
diff --git a/drivers/pci/controller/dwc/pci-meson.c b/drivers/pci/controller/dwc/pci-meson.c
index e35e9eaa50ee..3772b02a5c55 100644
--- a/drivers/pci/controller/dwc/pci-meson.c
+++ b/drivers/pci/controller/dwc/pci-meson.c
@@ -16,6 +16,7 @@
#include <linux/reset.h>
#include <linux/resource.h>
#include <linux/types.h>
+#include <linux/phy/phy.h>
#include "pcie-designware.h"
@@ -96,12 +97,18 @@ struct meson_pcie_rc_reset {
struct reset_control *apb;
};
+struct meson_pcie_param {
+ bool has_shared_phy;
+};
+
struct meson_pcie {
struct dw_pcie pci;
struct meson_pcie_mem_res mem_res;
struct meson_pcie_clk_res clk_res;
struct meson_pcie_rc_reset mrst;
struct gpio_desc *reset_gpio;
+ struct phy *phy;
+ const struct meson_pcie_param *param;
};
static struct reset_control *meson_pcie_get_reset(struct meson_pcie *mp,
@@ -123,10 +130,12 @@ static int meson_pcie_get_resets(struct meson_pcie *mp)
{
struct meson_pcie_rc_reset *mrst = &mp->mrst;
- mrst->phy = meson_pcie_get_reset(mp, "phy", PCIE_SHARED_RESET);
- if (IS_ERR(mrst->phy))
- return PTR_ERR(mrst->phy);
- reset_control_deassert(mrst->phy);
+ if (!mp->param->has_shared_phy) {
+ mrst->phy = meson_pcie_get_reset(mp, "phy", PCIE_SHARED_RESET);
+ if (IS_ERR(mrst->phy))
+ return PTR_ERR(mrst->phy);
+ reset_control_deassert(mrst->phy);
+ }
mrst->port = meson_pcie_get_reset(mp, "port", PCIE_NORMAL_RESET);
if (IS_ERR(mrst->port))
@@ -180,27 +189,52 @@ static int meson_pcie_get_mems(struct platform_device *pdev,
if (IS_ERR(mp->mem_res.cfg_base))
return PTR_ERR(mp->mem_res.cfg_base);
- /* Meson SoC has two PCI controllers use same phy register*/
- mp->mem_res.phy_base = meson_pcie_get_mem_shared(pdev, mp, "phy");
- if (IS_ERR(mp->mem_res.phy_base))
- return PTR_ERR(mp->mem_res.phy_base);
+ /* Meson AXG SoC has two PCI controllers use same phy register */
+ if (!mp->param->has_shared_phy) {
+ mp->mem_res.phy_base =
+ meson_pcie_get_mem_shared(pdev, mp, "phy");
+ if (IS_ERR(mp->mem_res.phy_base))
+ return PTR_ERR(mp->mem_res.phy_base);
+ }
return 0;
}
-static void meson_pcie_power_on(struct meson_pcie *mp)
+static int meson_pcie_power_on(struct meson_pcie *mp)
{
- writel(MESON_PCIE_PHY_POWERUP, mp->mem_res.phy_base);
+ int ret = 0;
+
+ if (mp->param->has_shared_phy) {
+ ret = phy_init(mp->phy);
+ if (ret)
+ return ret;
+
+ ret = phy_power_on(mp->phy);
+ if (ret) {
+ phy_exit(mp->phy);
+ return ret;
+ }
+ } else
+ writel(MESON_PCIE_PHY_POWERUP, mp->mem_res.phy_base);
+
+ return 0;
}
-static void meson_pcie_reset(struct meson_pcie *mp)
+static int meson_pcie_reset(struct meson_pcie *mp)
{
struct meson_pcie_rc_reset *mrst = &mp->mrst;
-
- reset_control_assert(mrst->phy);
- udelay(PCIE_RESET_DELAY);
- reset_control_deassert(mrst->phy);
- udelay(PCIE_RESET_DELAY);
+ int ret = 0;
+
+ if (mp->param->has_shared_phy) {
+ ret = phy_reset(mp->phy);
+ if (ret)
+ return ret;
+ } else {
+ reset_control_assert(mrst->phy);
+ udelay(PCIE_RESET_DELAY);
+ reset_control_deassert(mrst->phy);
+ udelay(PCIE_RESET_DELAY);
+ }
reset_control_assert(mrst->port);
reset_control_assert(mrst->apb);
@@ -208,6 +242,8 @@ static void meson_pcie_reset(struct meson_pcie *mp)
reset_control_deassert(mrst->port);
reset_control_deassert(mrst->apb);
udelay(PCIE_RESET_DELAY);
+
+ return 0;
}
static inline struct clk *meson_pcie_probe_clock(struct device *dev,
@@ -250,15 +286,17 @@ static int meson_pcie_probe_clocks(struct meson_pcie *mp)
if (IS_ERR(res->port_clk))
return PTR_ERR(res->port_clk);
- res->mipi_gate = meson_pcie_probe_clock(dev, "pcie_mipi_en", 0);
- if (IS_ERR(res->mipi_gate))
- return PTR_ERR(res->mipi_gate);
+ if (!mp->param->has_shared_phy) {
+ res->mipi_gate = meson_pcie_probe_clock(dev, "mipi", 0);
+ if (IS_ERR(res->mipi_gate))
+ return PTR_ERR(res->mipi_gate);
+ }
- res->general_clk = meson_pcie_probe_clock(dev, "pcie_general", 0);
+ res->general_clk = meson_pcie_probe_clock(dev, "general", 0);
if (IS_ERR(res->general_clk))
return PTR_ERR(res->general_clk);
- res->clk = meson_pcie_probe_clock(dev, "pcie", 0);
+ res->clk = meson_pcie_probe_clock(dev, "pclk", 0);
if (IS_ERR(res->clk))
return PTR_ERR(res->clk);
@@ -287,9 +325,9 @@ static inline void meson_cfg_writel(struct meson_pcie *mp, u32 val, u32 reg)
static void meson_pcie_assert_reset(struct meson_pcie *mp)
{
- gpiod_set_value_cansleep(mp->reset_gpio, 0);
- udelay(500);
gpiod_set_value_cansleep(mp->reset_gpio, 1);
+ udelay(500);
+ gpiod_set_value_cansleep(mp->reset_gpio, 0);
}
static void meson_pcie_init_dw(struct meson_pcie *mp)
@@ -524,6 +562,7 @@ static const struct dw_pcie_ops dw_pcie_ops = {
static int meson_pcie_probe(struct platform_device *pdev)
{
+ const struct meson_pcie_param *match_data;
struct device *dev = &pdev->dev;
struct dw_pcie *pci;
struct meson_pcie *mp;
@@ -537,6 +576,19 @@ static int meson_pcie_probe(struct platform_device *pdev)
pci->dev = dev;
pci->ops = &dw_pcie_ops;
+ match_data = of_device_get_match_data(dev);
+ if (!match_data) {
+ dev_err(dev, "failed to get match data\n");
+ return -ENODEV;
+ }
+ mp->param = match_data;
+
+ if (mp->param->has_shared_phy) {
+ mp->phy = devm_phy_get(dev, "pcie");
+ if (IS_ERR(mp->phy))
+ return PTR_ERR(mp->phy);
+ }
+
mp->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(mp->reset_gpio)) {
dev_err(dev, "get reset gpio failed\n");
@@ -555,13 +607,22 @@ static int meson_pcie_probe(struct platform_device *pdev)
return ret;
}
- meson_pcie_power_on(mp);
- meson_pcie_reset(mp);
+ ret = meson_pcie_power_on(mp);
+ if (ret) {
+ dev_err(dev, "phy power on failed, %d\n", ret);
+ return ret;
+ }
+
+ ret = meson_pcie_reset(mp);
+ if (ret) {
+ dev_err(dev, "reset failed, %d\n", ret);
+ goto err_phy;
+ }
ret = meson_pcie_probe_clocks(mp);
if (ret) {
dev_err(dev, "init clock resources failed, %d\n", ret);
- return ret;
+ goto err_phy;
}
platform_set_drvdata(pdev, mp);
@@ -569,15 +630,36 @@ static int meson_pcie_probe(struct platform_device *pdev)
ret = meson_add_pcie_port(mp, pdev);
if (ret < 0) {
dev_err(dev, "Add PCIe port failed, %d\n", ret);
- return ret;
+ goto err_phy;
}
return 0;
+
+err_phy:
+ if (mp->param->has_shared_phy) {
+ phy_power_off(mp->phy);
+ phy_exit(mp->phy);
+ }
+
+ return ret;
}
+static struct meson_pcie_param meson_pcie_axg_param = {
+ .has_shared_phy = false,
+};
+
+static struct meson_pcie_param meson_pcie_g12a_param = {
+ .has_shared_phy = true,
+};
+
static const struct of_device_id meson_pcie_of_match[] = {
{
.compatible = "amlogic,axg-pcie",
+ .data = &meson_pcie_axg_param,
+ },
+ {
+ .compatible = "amlogic,g12a-pcie",
+ .data = &meson_pcie_g12a_param,
},
{},
};
diff --git a/drivers/pci/controller/dwc/pcie-artpec6.c b/drivers/pci/controller/dwc/pcie-artpec6.c
index d00252bd8fae..9e2482bd7b6d 100644
--- a/drivers/pci/controller/dwc/pcie-artpec6.c
+++ b/drivers/pci/controller/dwc/pcie-artpec6.c
@@ -422,7 +422,7 @@ static void artpec6_pcie_ep_init(struct dw_pcie_ep *ep)
artpec6_pcie_wait_for_phy(artpec6_pcie);
artpec6_pcie_set_nfts(artpec6_pcie);
- for (bar = BAR_0; bar <= BAR_5; bar++)
+ for (bar = 0; bar < PCI_STD_NUM_BARS; bar++)
dw_pcie_ep_reset_bar(pci, bar);
}
diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c
index 0f36a926059a..395feb8ca051 100644
--- a/drivers/pci/controller/dwc/pcie-designware-host.c
+++ b/drivers/pci/controller/dwc/pcie-designware-host.c
@@ -10,6 +10,7 @@
#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
+#include <linux/msi.h>
#include <linux/of_address.h>
#include <linux/of_pci.h>
#include <linux/pci_regs.h>
@@ -78,7 +79,8 @@ static struct msi_domain_info dw_pcie_msi_domain_info = {
irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)
{
int i, pos, irq;
- u32 val, num_ctrls;
+ unsigned long val;
+ u32 status, num_ctrls;
irqreturn_t ret = IRQ_NONE;
num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
@@ -86,14 +88,14 @@ irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)
for (i = 0; i < num_ctrls; i++) {
dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_STATUS +
(i * MSI_REG_CTRL_BLOCK_SIZE),
- 4, &val);
- if (!val)
+ 4, &status);
+ if (!status)
continue;
ret = IRQ_HANDLED;
+ val = status;
pos = 0;
- while ((pos = find_next_bit((unsigned long *) &val,
- MAX_MSI_IRQS_PER_CTRL,
+ while ((pos = find_next_bit(&val, MAX_MSI_IRQS_PER_CTRL,
pos)) != MAX_MSI_IRQS_PER_CTRL) {
irq = irq_find_mapping(pp->irq_domain,
(i * MAX_MSI_IRQS_PER_CTRL) +
@@ -319,7 +321,7 @@ int dw_pcie_host_init(struct pcie_port *pp)
struct device *dev = pci->dev;
struct device_node *np = dev->of_node;
struct platform_device *pdev = to_platform_device(dev);
- struct resource_entry *win, *tmp;
+ struct resource_entry *win;
struct pci_bus *child;
struct pci_host_bridge *bridge;
struct resource *cfg_res;
@@ -342,31 +344,20 @@ int dw_pcie_host_init(struct pcie_port *pp)
if (!bridge)
return -ENOMEM;
- ret = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff,
- &bridge->windows, &pp->io_base);
- if (ret)
- return ret;
-
- ret = devm_request_pci_bus_resources(dev, &bridge->windows);
+ ret = pci_parse_request_of_pci_ranges(dev, &bridge->windows,
+ &bridge->dma_ranges, NULL);
if (ret)
return ret;
/* Get the I/O and memory ranges from DT */
- resource_list_for_each_entry_safe(win, tmp, &bridge->windows) {
+ resource_list_for_each_entry(win, &bridge->windows) {
switch (resource_type(win->res)) {
case IORESOURCE_IO:
- ret = devm_pci_remap_iospace(dev, win->res,
- pp->io_base);
- if (ret) {
- dev_warn(dev, "Error %d: failed to map resource %pR\n",
- ret, win->res);
- resource_list_destroy_entry(win);
- } else {
- pp->io = win->res;
- pp->io->name = "I/O";
- pp->io_size = resource_size(pp->io);
- pp->io_bus_addr = pp->io->start - win->offset;
- }
+ pp->io = win->res;
+ pp->io->name = "I/O";
+ pp->io_size = resource_size(pp->io);
+ pp->io_bus_addr = pp->io->start - win->offset;
+ pp->io_base = pci_pio_to_address(pp->io->start);
break;
case IORESOURCE_MEM:
pp->mem = win->res;
diff --git a/drivers/pci/controller/dwc/pcie-designware-plat.c b/drivers/pci/controller/dwc/pcie-designware-plat.c
index b58fdcbc664b..73646b677aff 100644
--- a/drivers/pci/controller/dwc/pcie-designware-plat.c
+++ b/drivers/pci/controller/dwc/pcie-designware-plat.c
@@ -70,7 +70,7 @@ static void dw_plat_pcie_ep_init(struct dw_pcie_ep *ep)
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
enum pci_barno bar;
- for (bar = BAR_0; bar <= BAR_5; bar++)
+ for (bar = 0; bar < PCI_STD_NUM_BARS; bar++)
dw_pcie_ep_reset_bar(pci, bar);
}
diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h
index 5a18e94e52c8..5accdd6bc388 100644
--- a/drivers/pci/controller/dwc/pcie-designware.h
+++ b/drivers/pci/controller/dwc/pcie-designware.h
@@ -214,7 +214,7 @@ struct dw_pcie_ep {
phys_addr_t phys_base;
size_t addr_size;
size_t page_size;
- u8 bar_to_atu[6];
+ u8 bar_to_atu[PCI_STD_NUM_BARS];
phys_addr_t *outbound_addr;
unsigned long *ib_window_map;
unsigned long *ob_window_map;
diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c
index f89f5acee72d..cbe95f0ea0ca 100644
--- a/drivers/pci/controller/dwc/pcie-tegra194.c
+++ b/drivers/pci/controller/dwc/pcie-tegra194.c
@@ -40,8 +40,6 @@
#define APPL_PINMUX_CLKREQ_OVERRIDE BIT(3)
#define APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE_EN BIT(4)
#define APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE BIT(5)
-#define APPL_PINMUX_CLKREQ_OUT_OVRD_EN BIT(9)
-#define APPL_PINMUX_CLKREQ_OUT_OVRD BIT(10)
#define APPL_CTRL 0x4
#define APPL_CTRL_SYS_PRE_DET_STATE BIT(6)
@@ -1193,8 +1191,8 @@ static int tegra_pcie_config_controller(struct tegra_pcie_dw *pcie,
if (!pcie->supports_clkreq) {
val = appl_readl(pcie, APPL_PINMUX);
- val |= APPL_PINMUX_CLKREQ_OUT_OVRD_EN;
- val |= APPL_PINMUX_CLKREQ_OUT_OVRD;
+ val |= APPL_PINMUX_CLKREQ_OVERRIDE_EN;
+ val &= ~APPL_PINMUX_CLKREQ_OVERRIDE;
appl_writel(pcie, val, APPL_PINMUX);
}
diff --git a/drivers/pci/controller/dwc/pcie-uniphier.c b/drivers/pci/controller/dwc/pcie-uniphier.c
index 3f30ee4a00b3..8fd7badd59c2 100644
--- a/drivers/pci/controller/dwc/pcie-uniphier.c
+++ b/drivers/pci/controller/dwc/pcie-uniphier.c
@@ -33,6 +33,10 @@
#define PCL_PIPEMON 0x0044
#define PCL_PCLK_ALIVE BIT(15)
+#define PCL_MODE 0x8000
+#define PCL_MODE_REGEN BIT(8)
+#define PCL_MODE_REGVAL BIT(0)
+
#define PCL_APP_READY_CTRL 0x8008
#define PCL_APP_LTSSM_ENABLE BIT(0)
@@ -85,6 +89,12 @@ static void uniphier_pcie_init_rc(struct uniphier_pcie_priv *priv)
{
u32 val;
+ /* set RC MODE */
+ val = readl(priv->base + PCL_MODE);
+ val |= PCL_MODE_REGEN;
+ val &= ~PCL_MODE_REGVAL;
+ writel(val, priv->base + PCL_MODE);
+
/* use auxiliary power detection */
val = readl(priv->base + PCL_APP_PM0);
val |= PCL_SYS_AUX_PWR_DET;
diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c
index fc0fe4d4de49..2a20b649f40c 100644
--- a/drivers/pci/controller/pci-aardvark.c
+++ b/drivers/pci/controller/pci-aardvark.c
@@ -16,6 +16,7 @@
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/platform_device.h>
+#include <linux/msi.h>
#include <linux/of_address.h>
#include <linux/of_pci.h>
@@ -175,18 +176,20 @@
(PCIE_CONF_BUS(bus) | PCIE_CONF_DEV(PCI_SLOT(devfn)) | \
PCIE_CONF_FUNC(PCI_FUNC(devfn)) | PCIE_CONF_REG(where))
-#define PIO_TIMEOUT_MS 1
+#define PIO_RETRY_CNT 500
+#define PIO_RETRY_DELAY 2 /* 2 us*/
#define LINK_WAIT_MAX_RETRIES 10
#define LINK_WAIT_USLEEP_MIN 90000
#define LINK_WAIT_USLEEP_MAX 100000
+#define RETRAIN_WAIT_MAX_RETRIES 10
+#define RETRAIN_WAIT_USLEEP_US 2000
#define MSI_IRQ_NUM 32
struct advk_pcie {
struct platform_device *pdev;
void __iomem *base;
- struct list_head resources;
struct irq_domain *irq_domain;
struct irq_chip irq_chip;
struct irq_domain *msi_domain;
@@ -239,6 +242,17 @@ static int advk_pcie_wait_for_link(struct advk_pcie *pcie)
return -ETIMEDOUT;
}
+static void advk_pcie_wait_for_retrain(struct advk_pcie *pcie)
+{
+ size_t retries;
+
+ for (retries = 0; retries < RETRAIN_WAIT_MAX_RETRIES; ++retries) {
+ if (!advk_pcie_link_up(pcie))
+ break;
+ udelay(RETRAIN_WAIT_USLEEP_US);
+ }
+}
+
static void advk_pcie_setup_hw(struct advk_pcie *pcie)
{
u32 reg;
@@ -324,6 +338,14 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie)
reg |= PIO_CTRL_ADDR_WIN_DISABLE;
advk_writel(pcie, reg, PIO_CTRL);
+ /*
+ * PERST# signal could have been asserted by pinctrl subsystem before
+ * probe() callback has been called, making the endpoint going into
+ * fundamental reset. As required by PCI Express spec a delay for at
+ * least 100ms after such a reset before link training is needed.
+ */
+ msleep(PCI_PM_D3COLD_WAIT);
+
/* Start link training */
reg = advk_readl(pcie, PCIE_CORE_LINK_CTRL_STAT_REG);
reg |= PCIE_CORE_LINK_TRAINING;
@@ -383,17 +405,16 @@ static void advk_pcie_check_pio_status(struct advk_pcie *pcie)
static int advk_pcie_wait_pio(struct advk_pcie *pcie)
{
struct device *dev = &pcie->pdev->dev;
- unsigned long timeout;
-
- timeout = jiffies + msecs_to_jiffies(PIO_TIMEOUT_MS);
+ int i;
- while (time_before(jiffies, timeout)) {
+ for (i = 0; i < PIO_RETRY_CNT; i++) {
u32 start, isr;
start = advk_readl(pcie, PIO_START);
isr = advk_readl(pcie, PIO_ISR);
if (!start && isr)
return 0;
+ udelay(PIO_RETRY_DELAY);
}
dev_err(dev, "config read/write timed out\n");
@@ -415,7 +436,7 @@ advk_pci_bridge_emul_pcie_conf_read(struct pci_bridge_emul *bridge,
case PCI_EXP_RTCTL: {
u32 val = advk_readl(pcie, PCIE_ISR0_MASK_REG);
- *value = (val & PCIE_MSG_PM_PME_MASK) ? PCI_EXP_RTCTL_PMEIE : 0;
+ *value = (val & PCIE_MSG_PM_PME_MASK) ? 0 : PCI_EXP_RTCTL_PMEIE;
return PCI_BRIDGE_EMUL_HANDLED;
}
@@ -426,11 +447,20 @@ advk_pci_bridge_emul_pcie_conf_read(struct pci_bridge_emul *bridge,
return PCI_BRIDGE_EMUL_HANDLED;
}
+ case PCI_EXP_LNKCTL: {
+ /* u32 contains both PCI_EXP_LNKCTL and PCI_EXP_LNKSTA */
+ u32 val = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + reg) &
+ ~(PCI_EXP_LNKSTA_LT << 16);
+ if (!advk_pcie_link_up(pcie))
+ val |= (PCI_EXP_LNKSTA_LT << 16);
+ *value = val;
+ return PCI_BRIDGE_EMUL_HANDLED;
+ }
+
case PCI_CAP_LIST_ID:
case PCI_EXP_DEVCAP:
case PCI_EXP_DEVCTL:
case PCI_EXP_LNKCAP:
- case PCI_EXP_LNKCTL:
*value = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + reg);
return PCI_BRIDGE_EMUL_HANDLED;
default:
@@ -447,14 +477,24 @@ advk_pci_bridge_emul_pcie_conf_write(struct pci_bridge_emul *bridge,
switch (reg) {
case PCI_EXP_DEVCTL:
+ advk_writel(pcie, new, PCIE_CORE_PCIEXP_CAP + reg);
+ break;
+
case PCI_EXP_LNKCTL:
advk_writel(pcie, new, PCIE_CORE_PCIEXP_CAP + reg);
+ if (new & PCI_EXP_LNKCTL_RL)
+ advk_pcie_wait_for_retrain(pcie);
break;
- case PCI_EXP_RTCTL:
- new = (new & PCI_EXP_RTCTL_PMEIE) << 3;
- advk_writel(pcie, new, PCIE_ISR0_MASK_REG);
+ case PCI_EXP_RTCTL: {
+ /* Only mask/unmask PME interrupt */
+ u32 val = advk_readl(pcie, PCIE_ISR0_MASK_REG) &
+ ~PCIE_MSG_PM_PME_MASK;
+ if ((new & PCI_EXP_RTCTL_PMEIE) == 0)
+ val |= PCIE_MSG_PM_PME_MASK;
+ advk_writel(pcie, val, PCIE_ISR0_MASK_REG);
break;
+ }
case PCI_EXP_RTSTA:
new = (new & PCI_EXP_RTSTA_PME) >> 9;
@@ -479,18 +519,20 @@ static void advk_sw_pci_bridge_init(struct advk_pcie *pcie)
{
struct pci_bridge_emul *bridge = &pcie->bridge;
- bridge->conf.vendor = advk_readl(pcie, PCIE_CORE_DEV_ID_REG) & 0xffff;
- bridge->conf.device = advk_readl(pcie, PCIE_CORE_DEV_ID_REG) >> 16;
+ bridge->conf.vendor =
+ cpu_to_le16(advk_readl(pcie, PCIE_CORE_DEV_ID_REG) & 0xffff);
+ bridge->conf.device =
+ cpu_to_le16(advk_readl(pcie, PCIE_CORE_DEV_ID_REG) >> 16);
bridge->conf.class_revision =
- advk_readl(pcie, PCIE_CORE_DEV_REV_REG) & 0xff;
+ cpu_to_le32(advk_readl(pcie, PCIE_CORE_DEV_REV_REG) & 0xff);
/* Support 32 bits I/O addressing */
bridge->conf.iobase = PCI_IO_RANGE_TYPE_32;
bridge->conf.iolimit = PCI_IO_RANGE_TYPE_32;
/* Support 64 bits memory pref */
- bridge->conf.pref_mem_base = PCI_PREF_RANGE_TYPE_64;
- bridge->conf.pref_mem_limit = PCI_PREF_RANGE_TYPE_64;
+ bridge->conf.pref_mem_base = cpu_to_le16(PCI_PREF_RANGE_TYPE_64);
+ bridge->conf.pref_mem_limit = cpu_to_le16(PCI_PREF_RANGE_TYPE_64);
/* Support interrupt A for MSI feature */
bridge->conf.intpin = PCIE_CORE_INT_A_ASSERT_ENABLE;
@@ -910,63 +952,11 @@ static irqreturn_t advk_pcie_irq_handler(int irq, void *arg)
return IRQ_HANDLED;
}
-static int advk_pcie_parse_request_of_pci_ranges(struct advk_pcie *pcie)
-{
- int err, res_valid = 0;
- struct device *dev = &pcie->pdev->dev;
- struct resource_entry *win, *tmp;
- resource_size_t iobase;
-
- INIT_LIST_HEAD(&pcie->resources);
-
- err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff,
- &pcie->resources, &iobase);
- if (err)
- return err;
-
- err = devm_request_pci_bus_resources(dev, &pcie->resources);
- if (err)
- goto out_release_res;
-
- resource_list_for_each_entry_safe(win, tmp, &pcie->resources) {
- struct resource *res = win->res;
-
- switch (resource_type(res)) {
- case IORESOURCE_IO:
- err = devm_pci_remap_iospace(dev, res, iobase);
- if (err) {
- dev_warn(dev, "error %d: failed to map resource %pR\n",
- err, res);
- resource_list_destroy_entry(win);
- }
- break;
- case IORESOURCE_MEM:
- res_valid |= !(res->flags & IORESOURCE_PREFETCH);
- break;
- case IORESOURCE_BUS:
- pcie->root_bus_nr = res->start;
- break;
- }
- }
-
- if (!res_valid) {
- dev_err(dev, "non-prefetchable memory resource required\n");
- err = -EINVAL;
- goto out_release_res;
- }
-
- return 0;
-
-out_release_res:
- pci_free_resource_list(&pcie->resources);
- return err;
-}
-
static int advk_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct advk_pcie *pcie;
- struct resource *res;
+ struct resource *res, *bus;
struct pci_host_bridge *bridge;
int ret, irq;
@@ -991,11 +981,13 @@ static int advk_pcie_probe(struct platform_device *pdev)
return ret;
}
- ret = advk_pcie_parse_request_of_pci_ranges(pcie);
+ ret = pci_parse_request_of_pci_ranges(dev, &bridge->windows,
+ &bridge->dma_ranges, &bus);
if (ret) {
dev_err(dev, "Failed to parse resources\n");
return ret;
}
+ pcie->root_bus_nr = bus->start;
advk_pcie_setup_hw(pcie);
@@ -1014,7 +1006,6 @@ static int advk_pcie_probe(struct platform_device *pdev)
return ret;
}
- list_splice_init(&pcie->resources, &bridge->windows);
bridge->dev.parent = dev;
bridge->sysdata = pcie;
bridge->busnr = 0;
diff --git a/drivers/pci/controller/pci-ftpci100.c b/drivers/pci/controller/pci-ftpci100.c
index bf5ece5d9291..1b67564de7af 100644
--- a/drivers/pci/controller/pci-ftpci100.c
+++ b/drivers/pci/controller/pci-ftpci100.c
@@ -375,12 +375,11 @@ static int faraday_pci_setup_cascaded_irq(struct faraday_pci *p)
return 0;
}
-static int faraday_pci_parse_map_dma_ranges(struct faraday_pci *p,
- struct device_node *np)
+static int faraday_pci_parse_map_dma_ranges(struct faraday_pci *p)
{
- struct of_pci_range range;
- struct of_pci_range_parser parser;
struct device *dev = p->dev;
+ struct pci_host_bridge *bridge = pci_host_bridge_from_priv(p);
+ struct resource_entry *entry;
u32 confreg[3] = {
FARADAY_PCI_MEM1_BASE_SIZE,
FARADAY_PCI_MEM2_BASE_SIZE,
@@ -389,19 +388,13 @@ static int faraday_pci_parse_map_dma_ranges(struct faraday_pci *p,
int i = 0;
u32 val;
- if (of_pci_dma_range_parser_init(&parser, np)) {
- dev_err(dev, "missing dma-ranges property\n");
- return -EINVAL;
- }
-
- /*
- * Get the dma-ranges from the device tree
- */
- for_each_of_pci_range(&parser, &range) {
- u64 end = range.pci_addr + range.size - 1;
+ resource_list_for_each_entry(entry, &bridge->dma_ranges) {
+ u64 pci_addr = entry->res->start - entry->offset;
+ u64 end = entry->res->end - entry->offset;
int ret;
- ret = faraday_res_to_memcfg(range.pci_addr, range.size, &val);
+ ret = faraday_res_to_memcfg(pci_addr,
+ resource_size(entry->res), &val);
if (ret) {
dev_err(dev,
"DMA range %d: illegal MEM resource size\n", i);
@@ -409,7 +402,7 @@ static int faraday_pci_parse_map_dma_ranges(struct faraday_pci *p,
}
dev_info(dev, "DMA MEM%d BASE: 0x%016llx -> 0x%016llx config %08x\n",
- i + 1, range.pci_addr, end, val);
+ i + 1, pci_addr, end, val);
if (i <= 2) {
faraday_raw_pci_write_config(p, 0, 0, confreg[i],
4, val);
@@ -430,10 +423,8 @@ static int faraday_pci_probe(struct platform_device *pdev)
const struct faraday_pci_variant *variant =
of_device_get_match_data(dev);
struct resource *regs;
- resource_size_t io_base;
struct resource_entry *win;
struct faraday_pci *p;
- struct resource *mem;
struct resource *io;
struct pci_host_bridge *host;
struct clk *clk;
@@ -441,7 +432,6 @@ static int faraday_pci_probe(struct platform_device *pdev)
unsigned char cur_bus_speed = PCI_SPEED_33MHz;
int ret;
u32 val;
- LIST_HEAD(res);
host = devm_pci_alloc_host_bridge(dev, sizeof(*p));
if (!host)
@@ -480,44 +470,21 @@ static int faraday_pci_probe(struct platform_device *pdev)
if (IS_ERR(p->base))
return PTR_ERR(p->base);
- ret = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff,
- &res, &io_base);
- if (ret)
- return ret;
-
- ret = devm_request_pci_bus_resources(dev, &res);
+ ret = pci_parse_request_of_pci_ranges(dev, &host->windows,
+ &host->dma_ranges, NULL);
if (ret)
return ret;
- /* Get the I/O and memory ranges from DT */
- resource_list_for_each_entry(win, &res) {
- switch (resource_type(win->res)) {
- case IORESOURCE_IO:
- io = win->res;
- io->name = "Gemini PCI I/O";
- if (!faraday_res_to_memcfg(io->start - win->offset,
- resource_size(io), &val)) {
- /* setup I/O space size */
- writel(val, p->base + PCI_IOSIZE);
- } else {
- dev_err(dev, "illegal IO mem size\n");
- return -EINVAL;
- }
- ret = devm_pci_remap_iospace(dev, io, io_base);
- if (ret) {
- dev_warn(dev, "error %d: failed to map resource %pR\n",
- ret, io);
- continue;
- }
- break;
- case IORESOURCE_MEM:
- mem = win->res;
- mem->name = "Gemini PCI MEM";
- break;
- case IORESOURCE_BUS:
- break;
- default:
- break;
+ win = resource_list_first_type(&host->windows, IORESOURCE_IO);
+ if (win) {
+ io = win->res;
+ if (!faraday_res_to_memcfg(io->start - win->offset,
+ resource_size(io), &val)) {
+ /* setup I/O space size */
+ writel(val, p->base + PCI_IOSIZE);
+ } else {
+ dev_err(dev, "illegal IO mem size\n");
+ return -EINVAL;
}
}
@@ -565,11 +532,10 @@ static int faraday_pci_probe(struct platform_device *pdev)
cur_bus_speed = PCI_SPEED_66MHz;
}
- ret = faraday_pci_parse_map_dma_ranges(p, dev->of_node);
+ ret = faraday_pci_parse_map_dma_ranges(p);
if (ret)
return ret;
- list_splice_init(&res, &host->windows);
ret = pci_scan_root_bus_bridge(host);
if (ret) {
dev_err(dev, "failed to scan host: %d\n", ret);
@@ -581,7 +547,6 @@ static int faraday_pci_probe(struct platform_device *pdev)
pci_bus_assign_resources(p->bus);
pci_bus_add_devices(p->bus);
- pci_free_resource_list(&res);
return 0;
}
diff --git a/drivers/pci/controller/pci-host-common.c b/drivers/pci/controller/pci-host-common.c
index c8cb9c5188a4..250a3fc80ec6 100644
--- a/drivers/pci/controller/pci-host-common.c
+++ b/drivers/pci/controller/pci-host-common.c
@@ -27,7 +27,7 @@ static struct pci_config_window *gen_pci_init(struct device *dev,
struct pci_config_window *cfg;
/* Parse our PCI ranges and request their resources */
- err = pci_parse_request_of_pci_ranges(dev, resources, &bus_range);
+ err = pci_parse_request_of_pci_ranges(dev, resources, NULL, &bus_range);
if (err)
return ERR_PTR(err);
diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
index f1f300218fab..9977abff92fc 100644
--- a/drivers/pci/controller/pci-hyperv.c
+++ b/drivers/pci/controller/pci-hyperv.c
@@ -76,11 +76,6 @@ static enum pci_protocol_version_t pci_protocol_versions[] = {
PCI_PROTOCOL_VERSION_1_1,
};
-/*
- * Protocol version negotiated by hv_pci_protocol_negotiation().
- */
-static enum pci_protocol_version_t pci_protocol_version;
-
#define PCI_CONFIG_MMIO_LENGTH 0x2000
#define CFG_PAGE_OFFSET 0x1000
#define CFG_PAGE_SIZE (PCI_CONFIG_MMIO_LENGTH - CFG_PAGE_OFFSET)
@@ -307,7 +302,7 @@ struct pci_bus_relations {
struct pci_q_res_req_response {
struct vmpacket_descriptor hdr;
s32 status; /* negative values are failures */
- u32 probed_bar[6];
+ u32 probed_bar[PCI_STD_NUM_BARS];
} __packed;
struct pci_set_power {
@@ -455,12 +450,15 @@ enum hv_pcibus_state {
hv_pcibus_init = 0,
hv_pcibus_probed,
hv_pcibus_installed,
+ hv_pcibus_removing,
hv_pcibus_removed,
hv_pcibus_maximum
};
struct hv_pcibus_device {
struct pci_sysdata sysdata;
+ /* Protocol version negotiated with the host */
+ enum pci_protocol_version_t protocol_version;
enum hv_pcibus_state state;
refcount_t remove_lock;
struct hv_device *hdev;
@@ -539,7 +537,7 @@ struct hv_pci_dev {
* What would be observed if one wrote 0xFFFFFFFF to a BAR and then
* read it back, for each of the BAR offsets within config space.
*/
- u32 probed_bar[6];
+ u32 probed_bar[PCI_STD_NUM_BARS];
};
struct hv_pci_compl {
@@ -1224,7 +1222,7 @@ static void hv_irq_unmask(struct irq_data *data)
* negative effect (yet?).
*/
- if (pci_protocol_version >= PCI_PROTOCOL_VERSION_1_2) {
+ if (hbus->protocol_version >= PCI_PROTOCOL_VERSION_1_2) {
/*
* PCI_PROTOCOL_VERSION_1_2 supports the VP_SET version of the
* HVCALL_RETARGET_INTERRUPT hypercall, which also coincides
@@ -1394,7 +1392,7 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
ctxt.pci_pkt.completion_func = hv_pci_compose_compl;
ctxt.pci_pkt.compl_ctxt = &comp;
- switch (pci_protocol_version) {
+ switch (hbus->protocol_version) {
case PCI_PROTOCOL_VERSION_1_1:
size = hv_compose_msi_req_v1(&ctxt.int_pkts.v1,
dest,
@@ -1610,7 +1608,7 @@ static void survey_child_resources(struct hv_pcibus_device *hbus)
* so it's sufficient to just add them up without tracking alignment.
*/
list_for_each_entry(hpdev, &hbus->children, list_entry) {
- for (i = 0; i < 6; i++) {
+ for (i = 0; i < PCI_STD_NUM_BARS; i++) {
if (hpdev->probed_bar[i] & PCI_BASE_ADDRESS_SPACE_IO)
dev_err(&hbus->hdev->device,
"There's an I/O BAR in this list!\n");
@@ -1681,10 +1679,27 @@ static void prepopulate_bars(struct hv_pcibus_device *hbus)
spin_lock_irqsave(&hbus->device_list_lock, flags);
+ /*
+ * Clear the memory enable bit, in case it's already set. This occurs
+ * in the suspend path of hibernation, where the device is suspended,
+ * resumed and suspended again: see hibernation_snapshot() and
+ * hibernation_platform_enter().
+ *
+ * If the memory enable bit is already set, Hyper-V sliently ignores
+ * the below BAR updates, and the related PCI device driver can not
+ * work, because reading from the device register(s) always returns
+ * 0xFFFFFFFF.
+ */
+ list_for_each_entry(hpdev, &hbus->children, list_entry) {
+ _hv_pcifront_read_config(hpdev, PCI_COMMAND, 2, &command);
+ command &= ~PCI_COMMAND_MEMORY;
+ _hv_pcifront_write_config(hpdev, PCI_COMMAND, 2, command);
+ }
+
/* Pick addresses for the BARs. */
do {
list_for_each_entry(hpdev, &hbus->children, list_entry) {
- for (i = 0; i < 6; i++) {
+ for (i = 0; i < PCI_STD_NUM_BARS; i++) {
bar_val = hpdev->probed_bar[i];
if (bar_val == 0)
continue;
@@ -1841,7 +1856,7 @@ static void q_resource_requirements(void *context, struct pci_response *resp,
"query resource requirements failed: %x\n",
resp->status);
} else {
- for (i = 0; i < 6; i++) {
+ for (i = 0; i < PCI_STD_NUM_BARS; i++) {
completion->hpdev->probed_bar[i] =
q_res_req->probed_bar[i];
}
@@ -2107,6 +2122,12 @@ static void hv_pci_devices_present(struct hv_pcibus_device *hbus,
unsigned long flags;
bool pending_dr;
+ if (hbus->state == hv_pcibus_removing) {
+ dev_info(&hbus->hdev->device,
+ "PCI VMBus BUS_RELATIONS: ignored\n");
+ return;
+ }
+
dr_wrk = kzalloc(sizeof(*dr_wrk), GFP_NOWAIT);
if (!dr_wrk)
return;
@@ -2223,11 +2244,19 @@ static void hv_eject_device_work(struct work_struct *work)
*/
static void hv_pci_eject_device(struct hv_pci_dev *hpdev)
{
+ struct hv_pcibus_device *hbus = hpdev->hbus;
+ struct hv_device *hdev = hbus->hdev;
+
+ if (hbus->state == hv_pcibus_removing) {
+ dev_info(&hdev->device, "PCI VMBus EJECT: ignored\n");
+ return;
+ }
+
hpdev->state = hv_pcichild_ejecting;
get_pcichild(hpdev);
INIT_WORK(&hpdev->wrk, hv_eject_device_work);
- get_hvpcibus(hpdev->hbus);
- queue_work(hpdev->hbus->wq, &hpdev->wrk);
+ get_hvpcibus(hbus);
+ queue_work(hbus->wq, &hpdev->wrk);
}
/**
@@ -2379,8 +2408,11 @@ static void hv_pci_onchannelcallback(void *context)
* failing if the host doesn't support the necessary protocol
* level.
*/
-static int hv_pci_protocol_negotiation(struct hv_device *hdev)
+static int hv_pci_protocol_negotiation(struct hv_device *hdev,
+ enum pci_protocol_version_t version[],
+ int num_version)
{
+ struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
struct pci_version_request *version_req;
struct hv_pci_compl comp_pkt;
struct pci_packet *pkt;
@@ -2403,8 +2435,8 @@ static int hv_pci_protocol_negotiation(struct hv_device *hdev)
version_req = (struct pci_version_request *)&pkt->message;
version_req->message_type.type = PCI_QUERY_PROTOCOL_VERSION;
- for (i = 0; i < ARRAY_SIZE(pci_protocol_versions); i++) {
- version_req->protocol_version = pci_protocol_versions[i];
+ for (i = 0; i < num_version; i++) {
+ version_req->protocol_version = version[i];
ret = vmbus_sendpacket(hdev->channel, version_req,
sizeof(struct pci_version_request),
(unsigned long)pkt, VM_PKT_DATA_INBAND,
@@ -2420,10 +2452,10 @@ static int hv_pci_protocol_negotiation(struct hv_device *hdev)
}
if (comp_pkt.completion_status >= 0) {
- pci_protocol_version = pci_protocol_versions[i];
+ hbus->protocol_version = version[i];
dev_info(&hdev->device,
"PCI VMBus probing: Using version %#x\n",
- pci_protocol_version);
+ hbus->protocol_version);
goto exit;
}
@@ -2707,7 +2739,7 @@ static int hv_send_resources_allocated(struct hv_device *hdev)
u32 wslot;
int ret;
- size_res = (pci_protocol_version < PCI_PROTOCOL_VERSION_1_2)
+ size_res = (hbus->protocol_version < PCI_PROTOCOL_VERSION_1_2)
? sizeof(*res_assigned) : sizeof(*res_assigned2);
pkt = kmalloc(sizeof(*pkt) + size_res, GFP_KERNEL);
@@ -2726,7 +2758,7 @@ static int hv_send_resources_allocated(struct hv_device *hdev)
pkt->completion_func = hv_pci_generic_compl;
pkt->compl_ctxt = &comp_pkt;
- if (pci_protocol_version < PCI_PROTOCOL_VERSION_1_2) {
+ if (hbus->protocol_version < PCI_PROTOCOL_VERSION_1_2) {
res_assigned =
(struct pci_resources_assigned *)&pkt->message;
res_assigned->message_type.type =
@@ -2870,9 +2902,27 @@ static int hv_pci_probe(struct hv_device *hdev,
* hv_pcibus_device contains the hypercall arguments for retargeting in
* hv_irq_unmask(). Those must not cross a page boundary.
*/
- BUILD_BUG_ON(sizeof(*hbus) > PAGE_SIZE);
+ BUILD_BUG_ON(sizeof(*hbus) > HV_HYP_PAGE_SIZE);
- hbus = (struct hv_pcibus_device *)get_zeroed_page(GFP_KERNEL);
+ /*
+ * With the recent 59bb47985c1d ("mm, sl[aou]b: guarantee natural
+ * alignment for kmalloc(power-of-two)"), kzalloc() is able to allocate
+ * a 4KB buffer that is guaranteed to be 4KB-aligned. Here the size and
+ * alignment of hbus is important because hbus's field
+ * retarget_msi_interrupt_params must not cross a 4KB page boundary.
+ *
+ * Here we prefer kzalloc to get_zeroed_page(), because a buffer
+ * allocated by the latter is not tracked and scanned by kmemleak, and
+ * hence kmemleak reports the pointer contained in the hbus buffer
+ * (i.e. the hpdev struct, which is created in new_pcichild_device() and
+ * is tracked by hbus->children) as memory leak (false positive).
+ *
+ * If the kernel doesn't have 59bb47985c1d, get_zeroed_page() *must* be
+ * used to allocate the hbus buffer and we can avoid the kmemleak false
+ * positive by using kmemleak_alloc() and kmemleak_free() to ask
+ * kmemleak to track and scan the hbus buffer.
+ */
+ hbus = (struct hv_pcibus_device *)kzalloc(HV_HYP_PAGE_SIZE, GFP_KERNEL);
if (!hbus)
return -ENOMEM;
hbus->state = hv_pcibus_init;
@@ -2930,7 +2980,8 @@ static int hv_pci_probe(struct hv_device *hdev,
hv_set_drvdata(hdev, hbus);
- ret = hv_pci_protocol_negotiation(hdev);
+ ret = hv_pci_protocol_negotiation(hdev, pci_protocol_versions,
+ ARRAY_SIZE(pci_protocol_versions));
if (ret)
goto close;
@@ -3011,7 +3062,7 @@ free_bus:
return ret;
}
-static void hv_pci_bus_exit(struct hv_device *hdev)
+static int hv_pci_bus_exit(struct hv_device *hdev, bool hibernating)
{
struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
struct {
@@ -3027,16 +3078,20 @@ static void hv_pci_bus_exit(struct hv_device *hdev)
* access the per-channel ringbuffer any longer.
*/
if (hdev->channel->rescind)
- return;
+ return 0;
- /* Delete any children which might still exist. */
- memset(&relations, 0, sizeof(relations));
- hv_pci_devices_present(hbus, &relations);
+ if (!hibernating) {
+ /* Delete any children which might still exist. */
+ memset(&relations, 0, sizeof(relations));
+ hv_pci_devices_present(hbus, &relations);
+ }
ret = hv_send_resources_released(hdev);
- if (ret)
+ if (ret) {
dev_err(&hdev->device,
"Couldn't send resources released packet(s)\n");
+ return ret;
+ }
memset(&pkt.teardown_packet, 0, sizeof(pkt.teardown_packet));
init_completion(&comp_pkt.host_event);
@@ -3049,8 +3104,13 @@ static void hv_pci_bus_exit(struct hv_device *hdev)
(unsigned long)&pkt.teardown_packet,
VM_PKT_DATA_INBAND,
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
- if (!ret)
- wait_for_completion_timeout(&comp_pkt.host_event, 10 * HZ);
+ if (ret)
+ return ret;
+
+ if (wait_for_completion_timeout(&comp_pkt.host_event, 10 * HZ) == 0)
+ return -ETIMEDOUT;
+
+ return 0;
}
/**
@@ -3062,6 +3122,7 @@ static void hv_pci_bus_exit(struct hv_device *hdev)
static int hv_pci_remove(struct hv_device *hdev)
{
struct hv_pcibus_device *hbus;
+ int ret;
hbus = hv_get_drvdata(hdev);
if (hbus->state == hv_pcibus_installed) {
@@ -3074,7 +3135,7 @@ static int hv_pci_remove(struct hv_device *hdev)
hbus->state = hv_pcibus_removed;
}
- hv_pci_bus_exit(hdev);
+ ret = hv_pci_bus_exit(hdev, false);
vmbus_close(hdev->channel);
@@ -3090,10 +3151,97 @@ static int hv_pci_remove(struct hv_device *hdev)
hv_put_dom_num(hbus->sysdata.domain);
- free_page((unsigned long)hbus);
+ kfree(hbus);
+ return ret;
+}
+
+static int hv_pci_suspend(struct hv_device *hdev)
+{
+ struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
+ enum hv_pcibus_state old_state;
+ int ret;
+
+ /*
+ * hv_pci_suspend() must make sure there are no pending work items
+ * before calling vmbus_close(), since it runs in a process context
+ * as a callback in dpm_suspend(). When it starts to run, the channel
+ * callback hv_pci_onchannelcallback(), which runs in a tasklet
+ * context, can be still running concurrently and scheduling new work
+ * items onto hbus->wq in hv_pci_devices_present() and
+ * hv_pci_eject_device(), and the work item handlers can access the
+ * vmbus channel, which can be being closed by hv_pci_suspend(), e.g.
+ * the work item handler pci_devices_present_work() ->
+ * new_pcichild_device() writes to the vmbus channel.
+ *
+ * To eliminate the race, hv_pci_suspend() disables the channel
+ * callback tasklet, sets hbus->state to hv_pcibus_removing, and
+ * re-enables the tasklet. This way, when hv_pci_suspend() proceeds,
+ * it knows that no new work item can be scheduled, and then it flushes
+ * hbus->wq and safely closes the vmbus channel.
+ */
+ tasklet_disable(&hdev->channel->callback_event);
+
+ /* Change the hbus state to prevent new work items. */
+ old_state = hbus->state;
+ if (hbus->state == hv_pcibus_installed)
+ hbus->state = hv_pcibus_removing;
+
+ tasklet_enable(&hdev->channel->callback_event);
+
+ if (old_state != hv_pcibus_installed)
+ return -EINVAL;
+
+ flush_workqueue(hbus->wq);
+
+ ret = hv_pci_bus_exit(hdev, true);
+ if (ret)
+ return ret;
+
+ vmbus_close(hdev->channel);
+
return 0;
}
+static int hv_pci_resume(struct hv_device *hdev)
+{
+ struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
+ enum pci_protocol_version_t version[1];
+ int ret;
+
+ hbus->state = hv_pcibus_init;
+
+ ret = vmbus_open(hdev->channel, pci_ring_size, pci_ring_size, NULL, 0,
+ hv_pci_onchannelcallback, hbus);
+ if (ret)
+ return ret;
+
+ /* Only use the version that was in use before hibernation. */
+ version[0] = hbus->protocol_version;
+ ret = hv_pci_protocol_negotiation(hdev, version, 1);
+ if (ret)
+ goto out;
+
+ ret = hv_pci_query_relations(hdev);
+ if (ret)
+ goto out;
+
+ ret = hv_pci_enter_d0(hdev);
+ if (ret)
+ goto out;
+
+ ret = hv_send_resources_allocated(hdev);
+ if (ret)
+ goto out;
+
+ prepopulate_bars(hbus);
+
+ hbus->state = hv_pcibus_installed;
+ return 0;
+out:
+ vmbus_close(hdev->channel);
+ return ret;
+}
+
static const struct hv_vmbus_device_id hv_pci_id_table[] = {
/* PCI Pass-through Class ID */
/* 44C4F61D-4444-4400-9D52-802E27EDE19F */
@@ -3108,6 +3256,8 @@ static struct hv_driver hv_pci_drv = {
.id_table = hv_pci_id_table,
.probe = hv_pci_probe,
.remove = hv_pci_remove,
+ .suspend = hv_pci_suspend,
+ .resume = hv_pci_resume,
};
static void __exit exit_hv_pci_drv(void)
diff --git a/drivers/pci/controller/pci-mvebu.c b/drivers/pci/controller/pci-mvebu.c
index d3a0419e42f2..153a64676bc9 100644
--- a/drivers/pci/controller/pci-mvebu.c
+++ b/drivers/pci/controller/pci-mvebu.c
@@ -554,7 +554,7 @@ mvebu_pci_bridge_emul_pcie_conf_write(struct pci_bridge_emul *bridge,
}
}
-struct pci_bridge_emul_ops mvebu_pci_bridge_emul_ops = {
+static struct pci_bridge_emul_ops mvebu_pci_bridge_emul_ops = {
.write_base = mvebu_pci_bridge_emul_base_conf_write,
.read_pcie = mvebu_pci_bridge_emul_pcie_conf_read,
.write_pcie = mvebu_pci_bridge_emul_pcie_conf_write,
@@ -713,7 +713,7 @@ static void __iomem *mvebu_pcie_map_registers(struct platform_device *pdev,
ret = of_address_to_resource(np, 0, &regs);
if (ret)
- return ERR_PTR(ret);
+ return (void __iomem *)ERR_PTR(ret);
return devm_ioremap_resource(&pdev->dev, &regs);
}
diff --git a/drivers/pci/controller/pci-thunder-pem.c b/drivers/pci/controller/pci-thunder-pem.c
index f127ce8bd4ef..9491e266b1ea 100644
--- a/drivers/pci/controller/pci-thunder-pem.c
+++ b/drivers/pci/controller/pci-thunder-pem.c
@@ -6,6 +6,7 @@
#include <linux/bitfield.h>
#include <linux/kernel.h>
#include <linux/init.h>
+#include <linux/pci.h>
#include <linux/of_address.h>
#include <linux/of_pci.h>
#include <linux/pci-acpi.h>
diff --git a/drivers/pci/controller/pci-v3-semi.c b/drivers/pci/controller/pci-v3-semi.c
index d219404bad92..bd05221f5a22 100644
--- a/drivers/pci/controller/pci-v3-semi.c
+++ b/drivers/pci/controller/pci-v3-semi.c
@@ -241,10 +241,8 @@ struct v3_pci {
void __iomem *config_base;
struct pci_bus *bus;
u32 config_mem;
- u32 io_mem;
u32 non_pre_mem;
u32 pre_mem;
- phys_addr_t io_bus_addr;
phys_addr_t non_pre_bus_addr;
phys_addr_t pre_bus_addr;
struct regmap *map;
@@ -520,35 +518,22 @@ static int v3_integrator_init(struct v3_pci *v3)
}
static int v3_pci_setup_resource(struct v3_pci *v3,
- resource_size_t io_base,
struct pci_host_bridge *host,
struct resource_entry *win)
{
struct device *dev = v3->dev;
struct resource *mem;
struct resource *io;
- int ret;
switch (resource_type(win->res)) {
case IORESOURCE_IO:
io = win->res;
- io->name = "V3 PCI I/O";
- v3->io_mem = io_base;
- v3->io_bus_addr = io->start - win->offset;
- dev_dbg(dev, "I/O window %pR, bus addr %pap\n",
- io, &v3->io_bus_addr);
- ret = devm_pci_remap_iospace(dev, io, io_base);
- if (ret) {
- dev_warn(dev,
- "error %d: failed to map resource %pR\n",
- ret, io);
- return ret;
- }
+
/* Setup window 2 - PCI I/O */
- writel(v3_addr_to_lb_base2(v3->io_mem) |
+ writel(v3_addr_to_lb_base2(pci_pio_to_address(io->start)) |
V3_LB_BASE2_ENABLE,
v3->base + V3_LB_BASE2);
- writew(v3_addr_to_lb_map2(v3->io_bus_addr),
+ writew(v3_addr_to_lb_map2(io->start - win->offset),
v3->base + V3_LB_MAP2);
break;
case IORESOURCE_MEM:
@@ -613,28 +598,30 @@ static int v3_pci_setup_resource(struct v3_pci *v3,
}
static int v3_get_dma_range_config(struct v3_pci *v3,
- struct of_pci_range *range,
+ struct resource_entry *entry,
u32 *pci_base, u32 *pci_map)
{
struct device *dev = v3->dev;
- u64 cpu_end = range->cpu_addr + range->size - 1;
- u64 pci_end = range->pci_addr + range->size - 1;
+ u64 cpu_addr = entry->res->start;
+ u64 cpu_end = entry->res->end;
+ u64 pci_end = cpu_end - entry->offset;
+ u64 pci_addr = entry->res->start - entry->offset;
u32 val;
- if (range->pci_addr & ~V3_PCI_BASE_M_ADR_BASE) {
+ if (pci_addr & ~V3_PCI_BASE_M_ADR_BASE) {
dev_err(dev, "illegal range, only PCI bits 31..20 allowed\n");
return -EINVAL;
}
- val = ((u32)range->pci_addr) & V3_PCI_BASE_M_ADR_BASE;
+ val = ((u32)pci_addr) & V3_PCI_BASE_M_ADR_BASE;
*pci_base = val;
- if (range->cpu_addr & ~V3_PCI_MAP_M_MAP_ADR) {
+ if (cpu_addr & ~V3_PCI_MAP_M_MAP_ADR) {
dev_err(dev, "illegal range, only CPU bits 31..20 allowed\n");
return -EINVAL;
}
- val = ((u32)range->cpu_addr) & V3_PCI_MAP_M_MAP_ADR;
+ val = ((u32)cpu_addr) & V3_PCI_MAP_M_MAP_ADR;
- switch (range->size) {
+ switch (resource_size(entry->res)) {
case SZ_1M:
val |= V3_LB_BASE_ADR_SIZE_1MB;
break;
@@ -682,8 +669,8 @@ static int v3_get_dma_range_config(struct v3_pci *v3,
dev_dbg(dev,
"DMA MEM CPU: 0x%016llx -> 0x%016llx => "
"PCI: 0x%016llx -> 0x%016llx base %08x map %08x\n",
- range->cpu_addr, cpu_end,
- range->pci_addr, pci_end,
+ cpu_addr, cpu_end,
+ pci_addr, pci_end,
*pci_base, *pci_map);
return 0;
@@ -692,24 +679,16 @@ static int v3_get_dma_range_config(struct v3_pci *v3,
static int v3_pci_parse_map_dma_ranges(struct v3_pci *v3,
struct device_node *np)
{
- struct of_pci_range range;
- struct of_pci_range_parser parser;
+ struct pci_host_bridge *bridge = pci_host_bridge_from_priv(v3);
struct device *dev = v3->dev;
+ struct resource_entry *entry;
int i = 0;
- if (of_pci_dma_range_parser_init(&parser, np)) {
- dev_err(dev, "missing dma-ranges property\n");
- return -EINVAL;
- }
-
- /*
- * Get the dma-ranges from the device tree
- */
- for_each_of_pci_range(&parser, &range) {
+ resource_list_for_each_entry(entry, &bridge->dma_ranges) {
int ret;
u32 pci_base, pci_map;
- ret = v3_get_dma_range_config(v3, &range, &pci_base, &pci_map);
+ ret = v3_get_dma_range_config(v3, entry, &pci_base, &pci_map);
if (ret)
return ret;
@@ -732,7 +711,6 @@ static int v3_pci_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
- resource_size_t io_base;
struct resource *regs;
struct resource_entry *win;
struct v3_pci *v3;
@@ -741,7 +719,6 @@ static int v3_pci_probe(struct platform_device *pdev)
u16 val;
int irq;
int ret;
- LIST_HEAD(res);
host = pci_alloc_host_bridge(sizeof(*v3));
if (!host)
@@ -793,12 +770,8 @@ static int v3_pci_probe(struct platform_device *pdev)
if (IS_ERR(v3->config_base))
return PTR_ERR(v3->config_base);
- ret = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, &res,
- &io_base);
- if (ret)
- return ret;
-
- ret = devm_request_pci_bus_resources(dev, &res);
+ ret = pci_parse_request_of_pci_ranges(dev, &host->windows,
+ &host->dma_ranges, NULL);
if (ret)
return ret;
@@ -852,8 +825,8 @@ static int v3_pci_probe(struct platform_device *pdev)
writew(val, v3->base + V3_PCI_CMD);
/* Get the I/O and memory ranges from DT */
- resource_list_for_each_entry(win, &res) {
- ret = v3_pci_setup_resource(v3, io_base, host, win);
+ resource_list_for_each_entry(win, &host->windows) {
+ ret = v3_pci_setup_resource(v3, host, win);
if (ret) {
dev_err(dev, "error setting up resources\n");
return ret;
@@ -931,7 +904,6 @@ static int v3_pci_probe(struct platform_device *pdev)
val |= V3_SYSTEM_M_LOCK;
writew(val, v3->base + V3_SYSTEM);
- list_splice_init(&res, &host->windows);
ret = pci_scan_root_bus_bridge(host);
if (ret) {
dev_err(dev, "failed to register host: %d\n", ret);
diff --git a/drivers/pci/controller/pci-versatile.c b/drivers/pci/controller/pci-versatile.c
index f59ad2728c0b..b911359b6d81 100644
--- a/drivers/pci/controller/pci-versatile.c
+++ b/drivers/pci/controller/pci-versatile.c
@@ -62,65 +62,16 @@ static struct pci_ops pci_versatile_ops = {
.write = pci_generic_config_write,
};
-static int versatile_pci_parse_request_of_pci_ranges(struct device *dev,
- struct list_head *res)
-{
- int err, mem = 1, res_valid = 0;
- resource_size_t iobase;
- struct resource_entry *win, *tmp;
-
- err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, res, &iobase);
- if (err)
- return err;
-
- err = devm_request_pci_bus_resources(dev, res);
- if (err)
- goto out_release_res;
-
- resource_list_for_each_entry_safe(win, tmp, res) {
- struct resource *res = win->res;
-
- switch (resource_type(res)) {
- case IORESOURCE_IO:
- err = devm_pci_remap_iospace(dev, res, iobase);
- if (err) {
- dev_warn(dev, "error %d: failed to map resource %pR\n",
- err, res);
- resource_list_destroy_entry(win);
- }
- break;
- case IORESOURCE_MEM:
- res_valid |= !(res->flags & IORESOURCE_PREFETCH);
-
- writel(res->start >> 28, PCI_IMAP(mem));
- writel(PHYS_OFFSET >> 28, PCI_SMAP(mem));
- mem++;
-
- break;
- }
- }
-
- if (res_valid)
- return 0;
-
- dev_err(dev, "non-prefetchable memory resource required\n");
- err = -EINVAL;
-
-out_release_res:
- pci_free_resource_list(res);
- return err;
-}
-
static int versatile_pci_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct resource *res;
- int ret, i, myslot = -1;
+ struct resource_entry *entry;
+ int ret, i, myslot = -1, mem = 1;
u32 val;
void __iomem *local_pci_cfg_base;
struct pci_bus *bus, *child;
struct pci_host_bridge *bridge;
- LIST_HEAD(pci_res);
bridge = devm_pci_alloc_host_bridge(dev, 0);
if (!bridge)
@@ -141,10 +92,19 @@ static int versatile_pci_probe(struct platform_device *pdev)
if (IS_ERR(versatile_cfg_base[1]))
return PTR_ERR(versatile_cfg_base[1]);
- ret = versatile_pci_parse_request_of_pci_ranges(dev, &pci_res);
+ ret = pci_parse_request_of_pci_ranges(dev, &bridge->windows,
+ NULL, NULL);
if (ret)
return ret;
+ resource_list_for_each_entry(entry, &bridge->windows) {
+ if (resource_type(entry->res) == IORESOURCE_MEM) {
+ writel(entry->res->start >> 28, PCI_IMAP(mem));
+ writel(__pa(PAGE_OFFSET) >> 28, PCI_SMAP(mem));
+ mem++;
+ }
+ }
+
/*
* We need to discover the PCI core first to configure itself
* before the main PCI probing is performed
@@ -177,9 +137,9 @@ static int versatile_pci_probe(struct platform_device *pdev)
/*
* Configure the PCI inbound memory windows to be 1:1 mapped to SDRAM
*/
- writel(PHYS_OFFSET, local_pci_cfg_base + PCI_BASE_ADDRESS_0);
- writel(PHYS_OFFSET, local_pci_cfg_base + PCI_BASE_ADDRESS_1);
- writel(PHYS_OFFSET, local_pci_cfg_base + PCI_BASE_ADDRESS_2);
+ writel(__pa(PAGE_OFFSET), local_pci_cfg_base + PCI_BASE_ADDRESS_0);
+ writel(__pa(PAGE_OFFSET), local_pci_cfg_base + PCI_BASE_ADDRESS_1);
+ writel(__pa(PAGE_OFFSET), local_pci_cfg_base + PCI_BASE_ADDRESS_2);
/*
* For many years the kernel and QEMU were symbiotically buggy
@@ -197,7 +157,6 @@ static int versatile_pci_probe(struct platform_device *pdev)
pci_add_flags(PCI_ENABLE_PROC_DOMAINS);
pci_add_flags(PCI_REASSIGN_ALL_BUS);
- list_splice_init(&pci_res, &bridge->windows);
bridge->dev.parent = dev;
bridge->sysdata = NULL;
bridge->busnr = 0;
diff --git a/drivers/pci/controller/pci-xgene.c b/drivers/pci/controller/pci-xgene.c
index ffda3e8b4742..de195fd430dc 100644
--- a/drivers/pci/controller/pci-xgene.c
+++ b/drivers/pci/controller/pci-xgene.c
@@ -405,15 +405,13 @@ static void xgene_pcie_setup_cfg_reg(struct xgene_pcie_port *port)
xgene_pcie_writel(port, CFGCTL, EN_REG);
}
-static int xgene_pcie_map_ranges(struct xgene_pcie_port *port,
- struct list_head *res,
- resource_size_t io_base)
+static int xgene_pcie_map_ranges(struct xgene_pcie_port *port)
{
+ struct pci_host_bridge *bridge = pci_host_bridge_from_priv(port);
struct resource_entry *window;
struct device *dev = port->dev;
- int ret;
- resource_list_for_each_entry(window, res) {
+ resource_list_for_each_entry(window, &bridge->windows) {
struct resource *res = window->res;
u64 restype = resource_type(res);
@@ -421,11 +419,9 @@ static int xgene_pcie_map_ranges(struct xgene_pcie_port *port,
switch (restype) {
case IORESOURCE_IO:
- xgene_pcie_setup_ob_reg(port, res, OMR3BARL, io_base,
+ xgene_pcie_setup_ob_reg(port, res, OMR3BARL,
+ pci_pio_to_address(res->start),
res->start - window->offset);
- ret = devm_pci_remap_iospace(dev, res, io_base);
- if (ret < 0)
- return ret;
break;
case IORESOURCE_MEM:
if (res->flags & IORESOURCE_PREFETCH)
@@ -485,27 +481,28 @@ static int xgene_pcie_select_ib_reg(u8 *ib_reg_mask, u64 size)
}
static void xgene_pcie_setup_ib_reg(struct xgene_pcie_port *port,
- struct of_pci_range *range, u8 *ib_reg_mask)
+ struct resource_entry *entry,
+ u8 *ib_reg_mask)
{
void __iomem *cfg_base = port->cfg_base;
struct device *dev = port->dev;
void *bar_addr;
u32 pim_reg;
- u64 cpu_addr = range->cpu_addr;
- u64 pci_addr = range->pci_addr;
- u64 size = range->size;
+ u64 cpu_addr = entry->res->start;
+ u64 pci_addr = cpu_addr - entry->offset;
+ u64 size = resource_size(entry->res);
u64 mask = ~(size - 1) | EN_REG;
u32 flags = PCI_BASE_ADDRESS_MEM_TYPE_64;
u32 bar_low;
int region;
- region = xgene_pcie_select_ib_reg(ib_reg_mask, range->size);
+ region = xgene_pcie_select_ib_reg(ib_reg_mask, size);
if (region < 0) {
dev_warn(dev, "invalid pcie dma-range config\n");
return;
}
- if (range->flags & IORESOURCE_PREFETCH)
+ if (entry->res->flags & IORESOURCE_PREFETCH)
flags |= PCI_BASE_ADDRESS_MEM_PREFETCH;
bar_low = pcie_bar_low_val((u32)cpu_addr, flags);
@@ -536,25 +533,13 @@ static void xgene_pcie_setup_ib_reg(struct xgene_pcie_port *port,
static int xgene_pcie_parse_map_dma_ranges(struct xgene_pcie_port *port)
{
- struct device_node *np = port->node;
- struct of_pci_range range;
- struct of_pci_range_parser parser;
- struct device *dev = port->dev;
+ struct pci_host_bridge *bridge = pci_host_bridge_from_priv(port);
+ struct resource_entry *entry;
u8 ib_reg_mask = 0;
- if (of_pci_dma_range_parser_init(&parser, np)) {
- dev_err(dev, "missing dma-ranges property\n");
- return -EINVAL;
- }
-
- /* Get the dma-ranges from DT */
- for_each_of_pci_range(&parser, &range) {
- u64 end = range.cpu_addr + range.size - 1;
+ resource_list_for_each_entry(entry, &bridge->dma_ranges)
+ xgene_pcie_setup_ib_reg(port, entry, &ib_reg_mask);
- dev_dbg(dev, "0x%08x 0x%016llx..0x%016llx -> 0x%016llx\n",
- range.flags, range.cpu_addr, end, range.pci_addr);
- xgene_pcie_setup_ib_reg(port, &range, &ib_reg_mask);
- }
return 0;
}
@@ -567,8 +552,7 @@ static void xgene_pcie_clear_config(struct xgene_pcie_port *port)
xgene_pcie_writel(port, i, 0);
}
-static int xgene_pcie_setup(struct xgene_pcie_port *port, struct list_head *res,
- resource_size_t io_base)
+static int xgene_pcie_setup(struct xgene_pcie_port *port)
{
struct device *dev = port->dev;
u32 val, lanes = 0, speed = 0;
@@ -580,7 +564,7 @@ static int xgene_pcie_setup(struct xgene_pcie_port *port, struct list_head *res,
val = (XGENE_PCIE_DEVICEID << 16) | XGENE_PCIE_VENDORID;
xgene_pcie_writel(port, BRIDGE_CFG_0, val);
- ret = xgene_pcie_map_ranges(port, res, io_base);
+ ret = xgene_pcie_map_ranges(port);
if (ret)
return ret;
@@ -607,11 +591,9 @@ static int xgene_pcie_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct device_node *dn = dev->of_node;
struct xgene_pcie_port *port;
- resource_size_t iobase = 0;
struct pci_bus *bus, *child;
struct pci_host_bridge *bridge;
int ret;
- LIST_HEAD(res);
bridge = devm_pci_alloc_host_bridge(dev, sizeof(*port));
if (!bridge)
@@ -634,20 +616,15 @@ static int xgene_pcie_probe(struct platform_device *pdev)
if (ret)
return ret;
- ret = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, &res,
- &iobase);
+ ret = pci_parse_request_of_pci_ranges(dev, &bridge->windows,
+ &bridge->dma_ranges, NULL);
if (ret)
return ret;
- ret = devm_request_pci_bus_resources(dev, &res);
- if (ret)
- goto error;
-
- ret = xgene_pcie_setup(port, &res, iobase);
+ ret = xgene_pcie_setup(port);
if (ret)
- goto error;
+ return ret;
- list_splice_init(&res, &bridge->windows);
bridge->dev.parent = dev;
bridge->sysdata = port;
bridge->busnr = 0;
@@ -657,7 +634,7 @@ static int xgene_pcie_probe(struct platform_device *pdev)
ret = pci_scan_root_bus_bridge(bridge);
if (ret < 0)
- goto error;
+ return ret;
bus = bridge->bus;
@@ -666,10 +643,6 @@ static int xgene_pcie_probe(struct platform_device *pdev)
pcie_bus_configure_settings(child);
pci_bus_add_devices(bus);
return 0;
-
-error:
- pci_free_resource_list(&res);
- return ret;
}
static const struct of_device_id xgene_pcie_match_table[] = {
diff --git a/drivers/pci/controller/pcie-altera.c b/drivers/pci/controller/pcie-altera.c
index d2497ca43828..b447c3e4abad 100644
--- a/drivers/pci/controller/pcie-altera.c
+++ b/drivers/pci/controller/pcie-altera.c
@@ -92,7 +92,6 @@ struct altera_pcie {
u8 root_bus_nr;
struct irq_domain *irq_domain;
struct resource bus_range;
- struct list_head resources;
const struct altera_pcie_data *pcie_data;
};
@@ -670,39 +669,6 @@ static void altera_pcie_isr(struct irq_desc *desc)
chained_irq_exit(chip, desc);
}
-static int altera_pcie_parse_request_of_pci_ranges(struct altera_pcie *pcie)
-{
- int err, res_valid = 0;
- struct device *dev = &pcie->pdev->dev;
- struct resource_entry *win;
-
- err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff,
- &pcie->resources, NULL);
- if (err)
- return err;
-
- err = devm_request_pci_bus_resources(dev, &pcie->resources);
- if (err)
- goto out_release_res;
-
- resource_list_for_each_entry(win, &pcie->resources) {
- struct resource *res = win->res;
-
- if (resource_type(res) == IORESOURCE_MEM)
- res_valid |= !(res->flags & IORESOURCE_PREFETCH);
- }
-
- if (res_valid)
- return 0;
-
- dev_err(dev, "non-prefetchable memory resource required\n");
- err = -EINVAL;
-
-out_release_res:
- pci_free_resource_list(&pcie->resources);
- return err;
-}
-
static int altera_pcie_init_irq_domain(struct altera_pcie *pcie)
{
struct device *dev = &pcie->pdev->dev;
@@ -833,9 +799,8 @@ static int altera_pcie_probe(struct platform_device *pdev)
return ret;
}
- INIT_LIST_HEAD(&pcie->resources);
-
- ret = altera_pcie_parse_request_of_pci_ranges(pcie);
+ ret = pci_parse_request_of_pci_ranges(dev, &bridge->windows,
+ &bridge->dma_ranges, NULL);
if (ret) {
dev_err(dev, "Failed add resources\n");
return ret;
@@ -853,7 +818,6 @@ static int altera_pcie_probe(struct platform_device *pdev)
cra_writel(pcie, P2A_INT_ENA_ALL, P2A_INT_ENABLE);
altera_pcie_host_init(pcie);
- list_splice_init(&pcie->resources, &bridge->windows);
bridge->dev.parent = dev;
bridge->sysdata = pcie;
bridge->busnr = pcie->root_bus_nr;
@@ -884,7 +848,6 @@ static int altera_pcie_remove(struct platform_device *pdev)
pci_stop_root_bus(bridge->bus);
pci_remove_root_bus(bridge->bus);
- pci_free_resource_list(&pcie->resources);
altera_pcie_irq_teardown(pcie);
return 0;
diff --git a/drivers/pci/controller/pcie-iproc-msi.c b/drivers/pci/controller/pcie-iproc-msi.c
index 0a3f61be5625..3176ad3ab0e5 100644
--- a/drivers/pci/controller/pcie-iproc-msi.c
+++ b/drivers/pci/controller/pcie-iproc-msi.c
@@ -293,11 +293,12 @@ static const struct irq_domain_ops msi_domain_ops = {
static inline u32 decode_msi_hwirq(struct iproc_msi *msi, u32 eq, u32 head)
{
- u32 *msg, hwirq;
+ u32 __iomem *msg;
+ u32 hwirq;
unsigned int offs;
offs = iproc_msi_eq_offset(msi, eq) + head * sizeof(u32);
- msg = (u32 *)(msi->eq_cpu + offs);
+ msg = (u32 __iomem *)(msi->eq_cpu + offs);
hwirq = readl(msg);
hwirq = (hwirq >> 5) + (hwirq & 0x1f);
diff --git a/drivers/pci/controller/pcie-iproc-platform.c b/drivers/pci/controller/pcie-iproc-platform.c
index 9ee6200a66f4..ff0a81a632a1 100644
--- a/drivers/pci/controller/pcie-iproc-platform.c
+++ b/drivers/pci/controller/pcie-iproc-platform.c
@@ -43,8 +43,6 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev)
struct iproc_pcie *pcie;
struct device_node *np = dev->of_node;
struct resource reg;
- resource_size_t iobase = 0;
- LIST_HEAD(resources);
struct pci_host_bridge *bridge;
int ret;
@@ -97,8 +95,8 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev)
if (IS_ERR(pcie->phy))
return PTR_ERR(pcie->phy);
- ret = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, &resources,
- &iobase);
+ ret = pci_parse_request_of_pci_ranges(dev, &bridge->windows,
+ &bridge->dma_ranges, NULL);
if (ret) {
dev_err(dev, "unable to get PCI host bridge resources\n");
return ret;
@@ -113,10 +111,9 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev)
pcie->map_irq = of_irq_parse_and_map_pci;
}
- ret = iproc_pcie_setup(pcie, &resources);
+ ret = iproc_pcie_setup(pcie, &bridge->windows);
if (ret) {
dev_err(dev, "PCIe controller setup failed\n");
- pci_free_resource_list(&resources);
return ret;
}
diff --git a/drivers/pci/controller/pcie-iproc.c b/drivers/pci/controller/pcie-iproc.c
index 2d457bfdaf66..0a468c73bae3 100644
--- a/drivers/pci/controller/pcie-iproc.c
+++ b/drivers/pci/controller/pcie-iproc.c
@@ -1122,15 +1122,16 @@ static int iproc_pcie_ib_write(struct iproc_pcie *pcie, int region_idx,
}
static int iproc_pcie_setup_ib(struct iproc_pcie *pcie,
- struct of_pci_range *range,
+ struct resource_entry *entry,
enum iproc_pcie_ib_map_type type)
{
struct device *dev = pcie->dev;
struct iproc_pcie_ib *ib = &pcie->ib;
int ret;
unsigned int region_idx, size_idx;
- u64 axi_addr = range->cpu_addr, pci_addr = range->pci_addr;
- resource_size_t size = range->size;
+ u64 axi_addr = entry->res->start;
+ u64 pci_addr = entry->res->start - entry->offset;
+ resource_size_t size = resource_size(entry->res);
/* iterate through all IARR mapping regions */
for (region_idx = 0; region_idx < ib->nr_regions; region_idx++) {
@@ -1182,67 +1183,46 @@ err_ib:
return ret;
}
-static int iproc_pcie_add_dma_range(struct device *dev,
- struct list_head *resources,
- struct of_pci_range *range)
+static int iproc_pcie_map_dma_ranges(struct iproc_pcie *pcie)
{
- struct resource *res;
- struct resource_entry *entry, *tmp;
- struct list_head *head = resources;
-
- res = devm_kzalloc(dev, sizeof(struct resource), GFP_KERNEL);
- if (!res)
- return -ENOMEM;
+ struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
+ struct resource_entry *entry;
+ int ret = 0;
- resource_list_for_each_entry(tmp, resources) {
- if (tmp->res->start < range->cpu_addr)
- head = &tmp->node;
+ resource_list_for_each_entry(entry, &host->dma_ranges) {
+ /* Each range entry corresponds to an inbound mapping region */
+ ret = iproc_pcie_setup_ib(pcie, entry, IPROC_PCIE_IB_MAP_MEM);
+ if (ret)
+ break;
}
- res->start = range->cpu_addr;
- res->end = res->start + range->size - 1;
-
- entry = resource_list_create_entry(res, 0);
- if (!entry)
- return -ENOMEM;
-
- entry->offset = res->start - range->cpu_addr;
- resource_list_add(entry, head);
-
- return 0;
+ return ret;
}
-static int iproc_pcie_map_dma_ranges(struct iproc_pcie *pcie)
+static void iproc_pcie_invalidate_mapping(struct iproc_pcie *pcie)
{
- struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
- struct of_pci_range range;
- struct of_pci_range_parser parser;
- int ret;
- LIST_HEAD(resources);
+ struct iproc_pcie_ib *ib = &pcie->ib;
+ struct iproc_pcie_ob *ob = &pcie->ob;
+ int idx;
- /* Get the dma-ranges from DT */
- ret = of_pci_dma_range_parser_init(&parser, pcie->dev->of_node);
- if (ret)
- return ret;
+ if (pcie->ep_is_internal)
+ return;
- for_each_of_pci_range(&parser, &range) {
- ret = iproc_pcie_add_dma_range(pcie->dev,
- &resources,
- &range);
- if (ret)
- goto out;
- /* Each range entry corresponds to an inbound mapping region */
- ret = iproc_pcie_setup_ib(pcie, &range, IPROC_PCIE_IB_MAP_MEM);
- if (ret)
- goto out;
+ if (pcie->need_ob_cfg) {
+ /* iterate through all OARR mapping regions */
+ for (idx = ob->nr_windows - 1; idx >= 0; idx--) {
+ iproc_pcie_write_reg(pcie,
+ MAP_REG(IPROC_PCIE_OARR0, idx), 0);
+ }
}
- list_splice_init(&resources, &host->dma_ranges);
-
- return 0;
-out:
- pci_free_resource_list(&resources);
- return ret;
+ if (pcie->need_ib_cfg) {
+ /* iterate through all IARR mapping regions */
+ for (idx = 0; idx < ib->nr_regions; idx++) {
+ iproc_pcie_write_reg(pcie,
+ MAP_REG(IPROC_PCIE_IARR0, idx), 0);
+ }
+ }
}
static int iproce_pcie_get_msi(struct iproc_pcie *pcie,
@@ -1276,13 +1256,16 @@ static int iproce_pcie_get_msi(struct iproc_pcie *pcie,
static int iproc_pcie_paxb_v2_msi_steer(struct iproc_pcie *pcie, u64 msi_addr)
{
int ret;
- struct of_pci_range range;
+ struct resource_entry entry;
- memset(&range, 0, sizeof(range));
- range.size = SZ_32K;
- range.pci_addr = range.cpu_addr = msi_addr & ~(range.size - 1);
+ memset(&entry, 0, sizeof(entry));
+ entry.res = &entry.__res;
- ret = iproc_pcie_setup_ib(pcie, &range, IPROC_PCIE_IB_MAP_IO);
+ msi_addr &= ~(SZ_32K - 1);
+ entry.res->start = msi_addr;
+ entry.res->end = msi_addr + SZ_32K - 1;
+
+ ret = iproc_pcie_setup_ib(pcie, &entry, IPROC_PCIE_IB_MAP_IO);
return ret;
}
@@ -1498,10 +1481,6 @@ int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res)
return ret;
}
- ret = devm_request_pci_bus_resources(dev, res);
- if (ret)
- return ret;
-
ret = phy_init(pcie->phy);
if (ret) {
dev_err(dev, "unable to initialize PCIe PHY\n");
@@ -1517,6 +1496,8 @@ int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res)
iproc_pcie_perst_ctrl(pcie, true);
iproc_pcie_perst_ctrl(pcie, false);
+ iproc_pcie_invalidate_mapping(pcie);
+
if (pcie->need_ob_cfg) {
ret = iproc_pcie_map_ranges(pcie, res);
if (ret) {
@@ -1543,7 +1524,6 @@ int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res)
if (iproc_pcie_msi_enable(pcie))
dev_info(dev, "not using iProc MSI\n");
- list_splice_init(res, &host->windows);
host->busnr = 0;
host->dev.parent = dev;
host->ops = &iproc_pcie_ops;
diff --git a/drivers/pci/controller/pcie-mediatek.c b/drivers/pci/controller/pcie-mediatek.c
index 626a7c352dfd..cb982891b22b 100644
--- a/drivers/pci/controller/pcie-mediatek.c
+++ b/drivers/pci/controller/pcie-mediatek.c
@@ -216,7 +216,6 @@ struct mtk_pcie {
void __iomem *base;
struct clk *free_ck;
- struct resource mem;
struct list_head ports;
const struct mtk_pcie_soc *soc;
unsigned int busnr;
@@ -661,11 +660,19 @@ static int mtk_pcie_setup_irq(struct mtk_pcie_port *port,
static int mtk_pcie_startup_port_v2(struct mtk_pcie_port *port)
{
struct mtk_pcie *pcie = port->pcie;
- struct resource *mem = &pcie->mem;
+ struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
+ struct resource *mem = NULL;
+ struct resource_entry *entry;
const struct mtk_pcie_soc *soc = port->pcie->soc;
u32 val;
int err;
+ entry = resource_list_first_type(&host->windows, IORESOURCE_MEM);
+ if (entry)
+ mem = entry->res;
+ if (!mem)
+ return -EINVAL;
+
/* MT7622 platforms need to enable LTSSM and ASPM from PCIe subsys */
if (pcie->base) {
val = readl(pcie->base + PCIE_SYS_CFG_V2);
@@ -1023,39 +1030,15 @@ static int mtk_pcie_setup(struct mtk_pcie *pcie)
struct mtk_pcie_port *port, *tmp;
struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
struct list_head *windows = &host->windows;
- struct resource_entry *win, *tmp_win;
- resource_size_t io_base;
+ struct resource *bus;
int err;
- err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff,
- windows, &io_base);
+ err = pci_parse_request_of_pci_ranges(dev, windows,
+ &host->dma_ranges, &bus);
if (err)
return err;
- err = devm_request_pci_bus_resources(dev, windows);
- if (err < 0)
- return err;
-
- /* Get the I/O and memory ranges from DT */
- resource_list_for_each_entry_safe(win, tmp_win, windows) {
- switch (resource_type(win->res)) {
- case IORESOURCE_IO:
- err = devm_pci_remap_iospace(dev, win->res, io_base);
- if (err) {
- dev_warn(dev, "error %d: failed to map resource %pR\n",
- err, win->res);
- resource_list_destroy_entry(win);
- }
- break;
- case IORESOURCE_MEM:
- memcpy(&pcie->mem, win->res, sizeof(*win->res));
- pcie->mem.name = "non-prefetchable";
- break;
- case IORESOURCE_BUS:
- pcie->busnr = win->res->start;
- break;
- }
- }
+ pcie->busnr = bus->start;
for_each_available_child_of_node(node, child) {
int slot;
diff --git a/drivers/pci/controller/pcie-mobiveil.c b/drivers/pci/controller/pcie-mobiveil.c
index a45a6447b01d..3a696ca45bfa 100644
--- a/drivers/pci/controller/pcie-mobiveil.c
+++ b/drivers/pci/controller/pcie-mobiveil.c
@@ -140,7 +140,6 @@ struct mobiveil_msi { /* MSI information */
struct mobiveil_pcie {
struct platform_device *pdev;
- struct list_head resources;
void __iomem *config_axi_slave_base; /* endpoint config base */
void __iomem *csr_axi_slave_base; /* root port config base */
void __iomem *apb_csr_base; /* MSI register base */
@@ -235,7 +234,7 @@ static int mobiveil_pcie_write(void __iomem *addr, int size, u32 val)
return PCIBIOS_SUCCESSFUL;
}
-static u32 csr_read(struct mobiveil_pcie *pcie, u32 off, size_t size)
+static u32 mobiveil_csr_read(struct mobiveil_pcie *pcie, u32 off, size_t size)
{
void *addr;
u32 val;
@@ -250,7 +249,8 @@ static u32 csr_read(struct mobiveil_pcie *pcie, u32 off, size_t size)
return val;
}
-static void csr_write(struct mobiveil_pcie *pcie, u32 val, u32 off, size_t size)
+static void mobiveil_csr_write(struct mobiveil_pcie *pcie, u32 val, u32 off,
+ size_t size)
{
void *addr;
int ret;
@@ -262,19 +262,19 @@ static void csr_write(struct mobiveil_pcie *pcie, u32 val, u32 off, size_t size)
dev_err(&pcie->pdev->dev, "write CSR address failed\n");
}
-static u32 csr_readl(struct mobiveil_pcie *pcie, u32 off)
+static u32 mobiveil_csr_readl(struct mobiveil_pcie *pcie, u32 off)
{
- return csr_read(pcie, off, 0x4);
+ return mobiveil_csr_read(pcie, off, 0x4);
}
-static void csr_writel(struct mobiveil_pcie *pcie, u32 val, u32 off)
+static void mobiveil_csr_writel(struct mobiveil_pcie *pcie, u32 val, u32 off)
{
- csr_write(pcie, val, off, 0x4);
+ mobiveil_csr_write(pcie, val, off, 0x4);
}
static bool mobiveil_pcie_link_up(struct mobiveil_pcie *pcie)
{
- return (csr_readl(pcie, LTSSM_STATUS) &
+ return (mobiveil_csr_readl(pcie, LTSSM_STATUS) &
LTSSM_STATUS_L0_MASK) == LTSSM_STATUS_L0;
}
@@ -323,7 +323,7 @@ static void __iomem *mobiveil_pcie_map_bus(struct pci_bus *bus,
PCI_SLOT(devfn) << PAB_DEVICE_SHIFT |
PCI_FUNC(devfn) << PAB_FUNCTION_SHIFT;
- csr_writel(pcie, value, PAB_AXI_AMAP_PEX_WIN_L(WIN_NUM_0));
+ mobiveil_csr_writel(pcie, value, PAB_AXI_AMAP_PEX_WIN_L(WIN_NUM_0));
return pcie->config_axi_slave_base + where;
}
@@ -353,13 +353,14 @@ static void mobiveil_pcie_isr(struct irq_desc *desc)
chained_irq_enter(chip, desc);
/* read INTx status */
- val = csr_readl(pcie, PAB_INTP_AMBA_MISC_STAT);
- mask = csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
+ val = mobiveil_csr_readl(pcie, PAB_INTP_AMBA_MISC_STAT);
+ mask = mobiveil_csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
intr_status = val & mask;
/* Handle INTx */
if (intr_status & PAB_INTP_INTX_MASK) {
- shifted_status = csr_readl(pcie, PAB_INTP_AMBA_MISC_STAT);
+ shifted_status = mobiveil_csr_readl(pcie,
+ PAB_INTP_AMBA_MISC_STAT);
shifted_status &= PAB_INTP_INTX_MASK;
shifted_status >>= PAB_INTX_START;
do {
@@ -373,12 +374,13 @@ static void mobiveil_pcie_isr(struct irq_desc *desc)
bit);
/* clear interrupt handled */
- csr_writel(pcie, 1 << (PAB_INTX_START + bit),
- PAB_INTP_AMBA_MISC_STAT);
+ mobiveil_csr_writel(pcie,
+ 1 << (PAB_INTX_START + bit),
+ PAB_INTP_AMBA_MISC_STAT);
}
- shifted_status = csr_readl(pcie,
- PAB_INTP_AMBA_MISC_STAT);
+ shifted_status = mobiveil_csr_readl(pcie,
+ PAB_INTP_AMBA_MISC_STAT);
shifted_status &= PAB_INTP_INTX_MASK;
shifted_status >>= PAB_INTX_START;
} while (shifted_status != 0);
@@ -413,7 +415,7 @@ static void mobiveil_pcie_isr(struct irq_desc *desc)
}
/* Clear the interrupt status */
- csr_writel(pcie, intr_status, PAB_INTP_AMBA_MISC_STAT);
+ mobiveil_csr_writel(pcie, intr_status, PAB_INTP_AMBA_MISC_STAT);
chained_irq_exit(chip, desc);
}
@@ -474,24 +476,24 @@ static void program_ib_windows(struct mobiveil_pcie *pcie, int win_num,
return;
}
- value = csr_readl(pcie, PAB_PEX_AMAP_CTRL(win_num));
+ value = mobiveil_csr_readl(pcie, PAB_PEX_AMAP_CTRL(win_num));
value &= ~(AMAP_CTRL_TYPE_MASK << AMAP_CTRL_TYPE_SHIFT | WIN_SIZE_MASK);
value |= type << AMAP_CTRL_TYPE_SHIFT | 1 << AMAP_CTRL_EN_SHIFT |
(lower_32_bits(size64) & WIN_SIZE_MASK);
- csr_writel(pcie, value, PAB_PEX_AMAP_CTRL(win_num));
+ mobiveil_csr_writel(pcie, value, PAB_PEX_AMAP_CTRL(win_num));
- csr_writel(pcie, upper_32_bits(size64),
- PAB_EXT_PEX_AMAP_SIZEN(win_num));
+ mobiveil_csr_writel(pcie, upper_32_bits(size64),
+ PAB_EXT_PEX_AMAP_SIZEN(win_num));
- csr_writel(pcie, lower_32_bits(cpu_addr),
- PAB_PEX_AMAP_AXI_WIN(win_num));
- csr_writel(pcie, upper_32_bits(cpu_addr),
- PAB_EXT_PEX_AMAP_AXI_WIN(win_num));
+ mobiveil_csr_writel(pcie, lower_32_bits(cpu_addr),
+ PAB_PEX_AMAP_AXI_WIN(win_num));
+ mobiveil_csr_writel(pcie, upper_32_bits(cpu_addr),
+ PAB_EXT_PEX_AMAP_AXI_WIN(win_num));
- csr_writel(pcie, lower_32_bits(pci_addr),
- PAB_PEX_AMAP_PEX_WIN_L(win_num));
- csr_writel(pcie, upper_32_bits(pci_addr),
- PAB_PEX_AMAP_PEX_WIN_H(win_num));
+ mobiveil_csr_writel(pcie, lower_32_bits(pci_addr),
+ PAB_PEX_AMAP_PEX_WIN_L(win_num));
+ mobiveil_csr_writel(pcie, upper_32_bits(pci_addr),
+ PAB_PEX_AMAP_PEX_WIN_H(win_num));
pcie->ib_wins_configured++;
}
@@ -515,27 +517,29 @@ static void program_ob_windows(struct mobiveil_pcie *pcie, int win_num,
* program Enable Bit to 1, Type Bit to (00) base 2, AXI Window Size Bit
* to 4 KB in PAB_AXI_AMAP_CTRL register
*/
- value = csr_readl(pcie, PAB_AXI_AMAP_CTRL(win_num));
+ value = mobiveil_csr_readl(pcie, PAB_AXI_AMAP_CTRL(win_num));
value &= ~(WIN_TYPE_MASK << WIN_TYPE_SHIFT | WIN_SIZE_MASK);
value |= 1 << WIN_ENABLE_SHIFT | type << WIN_TYPE_SHIFT |
(lower_32_bits(size64) & WIN_SIZE_MASK);
- csr_writel(pcie, value, PAB_AXI_AMAP_CTRL(win_num));
+ mobiveil_csr_writel(pcie, value, PAB_AXI_AMAP_CTRL(win_num));
- csr_writel(pcie, upper_32_bits(size64), PAB_EXT_AXI_AMAP_SIZE(win_num));
+ mobiveil_csr_writel(pcie, upper_32_bits(size64),
+ PAB_EXT_AXI_AMAP_SIZE(win_num));
/*
* program AXI window base with appropriate value in
* PAB_AXI_AMAP_AXI_WIN0 register
*/
- csr_writel(pcie, lower_32_bits(cpu_addr) & (~AXI_WINDOW_ALIGN_MASK),
- PAB_AXI_AMAP_AXI_WIN(win_num));
- csr_writel(pcie, upper_32_bits(cpu_addr),
- PAB_EXT_AXI_AMAP_AXI_WIN(win_num));
+ mobiveil_csr_writel(pcie,
+ lower_32_bits(cpu_addr) & (~AXI_WINDOW_ALIGN_MASK),
+ PAB_AXI_AMAP_AXI_WIN(win_num));
+ mobiveil_csr_writel(pcie, upper_32_bits(cpu_addr),
+ PAB_EXT_AXI_AMAP_AXI_WIN(win_num));
- csr_writel(pcie, lower_32_bits(pci_addr),
- PAB_AXI_AMAP_PEX_WIN_L(win_num));
- csr_writel(pcie, upper_32_bits(pci_addr),
- PAB_AXI_AMAP_PEX_WIN_H(win_num));
+ mobiveil_csr_writel(pcie, lower_32_bits(pci_addr),
+ PAB_AXI_AMAP_PEX_WIN_L(win_num));
+ mobiveil_csr_writel(pcie, upper_32_bits(pci_addr),
+ PAB_AXI_AMAP_PEX_WIN_H(win_num));
pcie->ob_wins_configured++;
}
@@ -575,46 +579,47 @@ static void mobiveil_pcie_enable_msi(struct mobiveil_pcie *pcie)
static int mobiveil_host_init(struct mobiveil_pcie *pcie)
{
+ struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
u32 value, pab_ctrl, type;
struct resource_entry *win;
/* setup bus numbers */
- value = csr_readl(pcie, PCI_PRIMARY_BUS);
+ value = mobiveil_csr_readl(pcie, PCI_PRIMARY_BUS);
value &= 0xff000000;
value |= 0x00ff0100;
- csr_writel(pcie, value, PCI_PRIMARY_BUS);
+ mobiveil_csr_writel(pcie, value, PCI_PRIMARY_BUS);
/*
* program Bus Master Enable Bit in Command Register in PAB Config
* Space
*/
- value = csr_readl(pcie, PCI_COMMAND);
+ value = mobiveil_csr_readl(pcie, PCI_COMMAND);
value |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER;
- csr_writel(pcie, value, PCI_COMMAND);
+ mobiveil_csr_writel(pcie, value, PCI_COMMAND);
/*
* program PIO Enable Bit to 1 (and PEX PIO Enable to 1) in PAB_CTRL
* register
*/
- pab_ctrl = csr_readl(pcie, PAB_CTRL);
+ pab_ctrl = mobiveil_csr_readl(pcie, PAB_CTRL);
pab_ctrl |= (1 << AMBA_PIO_ENABLE_SHIFT) | (1 << PEX_PIO_ENABLE_SHIFT);
- csr_writel(pcie, pab_ctrl, PAB_CTRL);
+ mobiveil_csr_writel(pcie, pab_ctrl, PAB_CTRL);
- csr_writel(pcie, (PAB_INTP_INTX_MASK | PAB_INTP_MSI_MASK),
- PAB_INTP_AMBA_MISC_ENB);
+ mobiveil_csr_writel(pcie, (PAB_INTP_INTX_MASK | PAB_INTP_MSI_MASK),
+ PAB_INTP_AMBA_MISC_ENB);
/*
* program PIO Enable Bit to 1 and Config Window Enable Bit to 1 in
* PAB_AXI_PIO_CTRL Register
*/
- value = csr_readl(pcie, PAB_AXI_PIO_CTRL);
+ value = mobiveil_csr_readl(pcie, PAB_AXI_PIO_CTRL);
value |= APIO_EN_MASK;
- csr_writel(pcie, value, PAB_AXI_PIO_CTRL);
+ mobiveil_csr_writel(pcie, value, PAB_AXI_PIO_CTRL);
/* Enable PCIe PIO master */
- value = csr_readl(pcie, PAB_PEX_PIO_CTRL);
+ value = mobiveil_csr_readl(pcie, PAB_PEX_PIO_CTRL);
value |= 1 << PIO_ENABLE_SHIFT;
- csr_writel(pcie, value, PAB_PEX_PIO_CTRL);
+ mobiveil_csr_writel(pcie, value, PAB_PEX_PIO_CTRL);
/*
* we'll program one outbound window for config reads and
@@ -631,7 +636,7 @@ static int mobiveil_host_init(struct mobiveil_pcie *pcie)
program_ib_windows(pcie, WIN_NUM_0, 0, 0, MEM_WINDOW_TYPE, IB_WIN_SIZE);
/* Get the I/O and memory ranges from DT */
- resource_list_for_each_entry(win, &pcie->resources) {
+ resource_list_for_each_entry(win, &bridge->windows) {
if (resource_type(win->res) == IORESOURCE_MEM)
type = MEM_WINDOW_TYPE;
else if (resource_type(win->res) == IORESOURCE_IO)
@@ -647,10 +652,10 @@ static int mobiveil_host_init(struct mobiveil_pcie *pcie)
}
/* fixup for PCIe class register */
- value = csr_readl(pcie, PAB_INTP_AXI_PIO_CLASS);
+ value = mobiveil_csr_readl(pcie, PAB_INTP_AXI_PIO_CLASS);
value &= 0xff;
value |= (PCI_CLASS_BRIDGE_PCI << 16);
- csr_writel(pcie, value, PAB_INTP_AXI_PIO_CLASS);
+ mobiveil_csr_writel(pcie, value, PAB_INTP_AXI_PIO_CLASS);
/* setup MSI hardware registers */
mobiveil_pcie_enable_msi(pcie);
@@ -668,9 +673,9 @@ static void mobiveil_mask_intx_irq(struct irq_data *data)
pcie = irq_desc_get_chip_data(desc);
mask = 1 << ((data->hwirq + PAB_INTX_START) - 1);
raw_spin_lock_irqsave(&pcie->intx_mask_lock, flags);
- shifted_val = csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
+ shifted_val = mobiveil_csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
shifted_val &= ~mask;
- csr_writel(pcie, shifted_val, PAB_INTP_AMBA_MISC_ENB);
+ mobiveil_csr_writel(pcie, shifted_val, PAB_INTP_AMBA_MISC_ENB);
raw_spin_unlock_irqrestore(&pcie->intx_mask_lock, flags);
}
@@ -684,9 +689,9 @@ static void mobiveil_unmask_intx_irq(struct irq_data *data)
pcie = irq_desc_get_chip_data(desc);
mask = 1 << ((data->hwirq + PAB_INTX_START) - 1);
raw_spin_lock_irqsave(&pcie->intx_mask_lock, flags);
- shifted_val = csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
+ shifted_val = mobiveil_csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
shifted_val |= mask;
- csr_writel(pcie, shifted_val, PAB_INTP_AMBA_MISC_ENB);
+ mobiveil_csr_writel(pcie, shifted_val, PAB_INTP_AMBA_MISC_ENB);
raw_spin_unlock_irqrestore(&pcie->intx_mask_lock, flags);
}
@@ -857,7 +862,6 @@ static int mobiveil_pcie_probe(struct platform_device *pdev)
struct pci_bus *child;
struct pci_host_bridge *bridge;
struct device *dev = &pdev->dev;
- resource_size_t iobase;
int ret;
/* allocate the PCIe port */
@@ -875,11 +879,9 @@ static int mobiveil_pcie_probe(struct platform_device *pdev)
return ret;
}
- INIT_LIST_HEAD(&pcie->resources);
-
/* parse the host bridge base addresses from the device tree file */
- ret = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff,
- &pcie->resources, &iobase);
+ ret = pci_parse_request_of_pci_ranges(dev, &bridge->windows,
+ &bridge->dma_ranges, NULL);
if (ret) {
dev_err(dev, "Getting bridge resources failed\n");
return ret;
@@ -892,24 +894,19 @@ static int mobiveil_pcie_probe(struct platform_device *pdev)
ret = mobiveil_host_init(pcie);
if (ret) {
dev_err(dev, "Failed to initialize host\n");
- goto error;
+ return ret;
}
/* initialize the IRQ domains */
ret = mobiveil_pcie_init_irq_domain(pcie);
if (ret) {
dev_err(dev, "Failed creating IRQ Domain\n");
- goto error;
+ return ret;
}
irq_set_chained_handler_and_data(pcie->irq, mobiveil_pcie_isr, pcie);
- ret = devm_request_pci_bus_resources(dev, &pcie->resources);
- if (ret)
- goto error;
-
/* Initialize bridge */
- list_splice_init(&pcie->resources, &bridge->windows);
bridge->dev.parent = dev;
bridge->sysdata = pcie;
bridge->busnr = pcie->root_bus_nr;
@@ -920,13 +917,13 @@ static int mobiveil_pcie_probe(struct platform_device *pdev)
ret = mobiveil_bringup_link(pcie);
if (ret) {
dev_info(dev, "link bring-up failed\n");
- goto error;
+ return ret;
}
/* setup the kernel resources for the newly added PCIe root bus */
ret = pci_scan_root_bus_bridge(bridge);
if (ret)
- goto error;
+ return ret;
bus = bridge->bus;
@@ -936,9 +933,6 @@ static int mobiveil_pcie_probe(struct platform_device *pdev)
pci_bus_add_devices(bus);
return 0;
-error:
- pci_free_resource_list(&pcie->resources);
- return ret;
}
static const struct of_device_id mobiveil_pcie_of_match[] = {
diff --git a/drivers/pci/controller/pcie-rcar.c b/drivers/pci/controller/pcie-rcar.c
index f6a669a9af41..759c6542c5c8 100644
--- a/drivers/pci/controller/pcie-rcar.c
+++ b/drivers/pci/controller/pcie-rcar.c
@@ -30,8 +30,6 @@
#include <linux/pm_runtime.h>
#include <linux/slab.h>
-#include "../pci.h"
-
#define PCIECAR 0x000010
#define PCIECCTLR 0x000018
#define CONFIG_SEND_ENABLE BIT(31)
@@ -93,8 +91,11 @@
#define LINK_SPEED_2_5GTS (1 << 16)
#define LINK_SPEED_5_0GTS (2 << 16)
#define MACCTLR 0x011058
+#define MACCTLR_NFTS_MASK GENMASK(23, 16) /* The name is from SH7786 */
#define SPEED_CHANGE BIT(24)
#define SCRAMBLE_DISABLE BIT(27)
+#define LTSMDIS BIT(31)
+#define MACCTLR_INIT_VAL (LTSMDIS | MACCTLR_NFTS_MASK)
#define PMSR 0x01105c
#define MACS2R 0x011078
#define MACCGSPSETR 0x011084
@@ -615,6 +616,8 @@ static int rcar_pcie_hw_init(struct rcar_pcie *pcie)
if (IS_ENABLED(CONFIG_PCI_MSI))
rcar_pci_write_reg(pcie, 0x801f0000, PCIEMSITXR);
+ rcar_pci_write_reg(pcie, MACCTLR_INIT_VAL, MACCTLR);
+
/* Finish initialization - establish a PCI Express link */
rcar_pci_write_reg(pcie, CFINIT, PCIETCTLR);
@@ -1014,40 +1017,43 @@ err_irq1:
}
static int rcar_pcie_inbound_ranges(struct rcar_pcie *pcie,
- struct of_pci_range *range,
+ struct resource_entry *entry,
int *index)
{
- u64 restype = range->flags;
- u64 cpu_addr = range->cpu_addr;
- u64 cpu_end = range->cpu_addr + range->size;
- u64 pci_addr = range->pci_addr;
+ u64 restype = entry->res->flags;
+ u64 cpu_addr = entry->res->start;
+ u64 cpu_end = entry->res->end;
+ u64 pci_addr = entry->res->start - entry->offset;
u32 flags = LAM_64BIT | LAR_ENABLE;
u64 mask;
- u64 size;
+ u64 size = resource_size(entry->res);
int idx = *index;
if (restype & IORESOURCE_PREFETCH)
flags |= LAM_PREFETCH;
- /*
- * If the size of the range is larger than the alignment of the start
- * address, we have to use multiple entries to perform the mapping.
- */
- if (cpu_addr > 0) {
- unsigned long nr_zeros = __ffs64(cpu_addr);
- u64 alignment = 1ULL << nr_zeros;
+ while (cpu_addr < cpu_end) {
+ if (idx >= MAX_NR_INBOUND_MAPS - 1) {
+ dev_err(pcie->dev, "Failed to map inbound regions!\n");
+ return -EINVAL;
+ }
+ /*
+ * If the size of the range is larger than the alignment of
+ * the start address, we have to use multiple entries to
+ * perform the mapping.
+ */
+ if (cpu_addr > 0) {
+ unsigned long nr_zeros = __ffs64(cpu_addr);
+ u64 alignment = 1ULL << nr_zeros;
- size = min(range->size, alignment);
- } else {
- size = range->size;
- }
- /* Hardware supports max 4GiB inbound region */
- size = min(size, 1ULL << 32);
+ size = min(size, alignment);
+ }
+ /* Hardware supports max 4GiB inbound region */
+ size = min(size, 1ULL << 32);
- mask = roundup_pow_of_two(size) - 1;
- mask &= ~0xf;
+ mask = roundup_pow_of_two(size) - 1;
+ mask &= ~0xf;
- while (cpu_addr < cpu_end) {
/*
* Set up 64-bit inbound regions as the range parser doesn't
* distinguish between 32 and 64-bit types.
@@ -1067,41 +1073,25 @@ static int rcar_pcie_inbound_ranges(struct rcar_pcie *pcie,
pci_addr += size;
cpu_addr += size;
idx += 2;
-
- if (idx > MAX_NR_INBOUND_MAPS) {
- dev_err(pcie->dev, "Failed to map inbound regions!\n");
- return -EINVAL;
- }
}
*index = idx;
return 0;
}
-static int rcar_pcie_parse_map_dma_ranges(struct rcar_pcie *pcie,
- struct device_node *np)
+static int rcar_pcie_parse_map_dma_ranges(struct rcar_pcie *pcie)
{
- struct of_pci_range range;
- struct of_pci_range_parser parser;
- int index = 0;
- int err;
-
- if (of_pci_dma_range_parser_init(&parser, np))
- return -EINVAL;
-
- /* Get the dma-ranges from DT */
- for_each_of_pci_range(&parser, &range) {
- u64 end = range.cpu_addr + range.size - 1;
-
- dev_dbg(pcie->dev, "0x%08x 0x%016llx..0x%016llx -> 0x%016llx\n",
- range.flags, range.cpu_addr, end, range.pci_addr);
+ struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
+ struct resource_entry *entry;
+ int index = 0, err = 0;
- err = rcar_pcie_inbound_ranges(pcie, &range, &index);
+ resource_list_for_each_entry(entry, &bridge->dma_ranges) {
+ err = rcar_pcie_inbound_ranges(pcie, entry, &index);
if (err)
- return err;
+ break;
}
- return 0;
+ return err;
}
static const struct of_device_id rcar_pcie_of_match[] = {
@@ -1138,7 +1128,8 @@ static int rcar_pcie_probe(struct platform_device *pdev)
pcie->dev = dev;
platform_set_drvdata(pdev, pcie);
- err = pci_parse_request_of_pci_ranges(dev, &pcie->resources, NULL);
+ err = pci_parse_request_of_pci_ranges(dev, &pcie->resources,
+ &bridge->dma_ranges, NULL);
if (err)
goto err_free_bridge;
@@ -1161,7 +1152,7 @@ static int rcar_pcie_probe(struct platform_device *pdev)
goto err_unmap_msi_irqs;
}
- err = rcar_pcie_parse_map_dma_ranges(pcie, dev->of_node);
+ err = rcar_pcie_parse_map_dma_ranges(pcie);
if (err)
goto err_clk_disable;
@@ -1237,6 +1228,7 @@ static int rcar_pcie_resume_noirq(struct device *dev)
return 0;
/* Re-establish the PCIe link */
+ rcar_pci_write_reg(pcie, MACCTLR_INIT_VAL, MACCTLR);
rcar_pci_write_reg(pcie, CFINIT, PCIETCTLR);
return rcar_pcie_wait_for_dl(pcie);
}
diff --git a/drivers/pci/controller/pcie-rockchip-host.c b/drivers/pci/controller/pcie-rockchip-host.c
index ef8e677ce9d1..d9b63bfa5dd7 100644
--- a/drivers/pci/controller/pcie-rockchip-host.c
+++ b/drivers/pci/controller/pcie-rockchip-host.c
@@ -620,19 +620,13 @@ static int rockchip_pcie_parse_host_dt(struct rockchip_pcie *rockchip)
dev_info(dev, "no vpcie3v3 regulator found\n");
}
- rockchip->vpcie1v8 = devm_regulator_get_optional(dev, "vpcie1v8");
- if (IS_ERR(rockchip->vpcie1v8)) {
- if (PTR_ERR(rockchip->vpcie1v8) != -ENODEV)
- return PTR_ERR(rockchip->vpcie1v8);
- dev_info(dev, "no vpcie1v8 regulator found\n");
- }
+ rockchip->vpcie1v8 = devm_regulator_get(dev, "vpcie1v8");
+ if (IS_ERR(rockchip->vpcie1v8))
+ return PTR_ERR(rockchip->vpcie1v8);
- rockchip->vpcie0v9 = devm_regulator_get_optional(dev, "vpcie0v9");
- if (IS_ERR(rockchip->vpcie0v9)) {
- if (PTR_ERR(rockchip->vpcie0v9) != -ENODEV)
- return PTR_ERR(rockchip->vpcie0v9);
- dev_info(dev, "no vpcie0v9 regulator found\n");
- }
+ rockchip->vpcie0v9 = devm_regulator_get(dev, "vpcie0v9");
+ if (IS_ERR(rockchip->vpcie0v9))
+ return PTR_ERR(rockchip->vpcie0v9);
return 0;
}
@@ -658,27 +652,22 @@ static int rockchip_pcie_set_vpcie(struct rockchip_pcie *rockchip)
}
}
- if (!IS_ERR(rockchip->vpcie1v8)) {
- err = regulator_enable(rockchip->vpcie1v8);
- if (err) {
- dev_err(dev, "fail to enable vpcie1v8 regulator\n");
- goto err_disable_3v3;
- }
+ err = regulator_enable(rockchip->vpcie1v8);
+ if (err) {
+ dev_err(dev, "fail to enable vpcie1v8 regulator\n");
+ goto err_disable_3v3;
}
- if (!IS_ERR(rockchip->vpcie0v9)) {
- err = regulator_enable(rockchip->vpcie0v9);
- if (err) {
- dev_err(dev, "fail to enable vpcie0v9 regulator\n");
- goto err_disable_1v8;
- }
+ err = regulator_enable(rockchip->vpcie0v9);
+ if (err) {
+ dev_err(dev, "fail to enable vpcie0v9 regulator\n");
+ goto err_disable_1v8;
}
return 0;
err_disable_1v8:
- if (!IS_ERR(rockchip->vpcie1v8))
- regulator_disable(rockchip->vpcie1v8);
+ regulator_disable(rockchip->vpcie1v8);
err_disable_3v3:
if (!IS_ERR(rockchip->vpcie3v3))
regulator_disable(rockchip->vpcie3v3);
@@ -806,19 +795,28 @@ static int rockchip_pcie_prog_ib_atu(struct rockchip_pcie *rockchip,
static int rockchip_pcie_cfg_atu(struct rockchip_pcie *rockchip)
{
struct device *dev = rockchip->dev;
+ struct pci_host_bridge *bridge = pci_host_bridge_from_priv(rockchip);
+ struct resource_entry *entry;
+ u64 pci_addr, size;
int offset;
int err;
int reg_no;
rockchip_pcie_cfg_configuration_accesses(rockchip,
AXI_WRAPPER_TYPE0_CFG);
+ entry = resource_list_first_type(&bridge->windows, IORESOURCE_MEM);
+ if (!entry)
+ return -ENODEV;
+
+ size = resource_size(entry->res);
+ pci_addr = entry->res->start - entry->offset;
+ rockchip->msg_bus_addr = pci_addr;
- for (reg_no = 0; reg_no < (rockchip->mem_size >> 20); reg_no++) {
+ for (reg_no = 0; reg_no < (size >> 20); reg_no++) {
err = rockchip_pcie_prog_ob_atu(rockchip, reg_no + 1,
AXI_WRAPPER_MEM_WRITE,
20 - 1,
- rockchip->mem_bus_addr +
- (reg_no << 20),
+ pci_addr + (reg_no << 20),
0);
if (err) {
dev_err(dev, "program RC mem outbound ATU failed\n");
@@ -832,14 +830,20 @@ static int rockchip_pcie_cfg_atu(struct rockchip_pcie *rockchip)
return err;
}
- offset = rockchip->mem_size >> 20;
- for (reg_no = 0; reg_no < (rockchip->io_size >> 20); reg_no++) {
+ entry = resource_list_first_type(&bridge->windows, IORESOURCE_IO);
+ if (!entry)
+ return -ENODEV;
+
+ size = resource_size(entry->res);
+ pci_addr = entry->res->start - entry->offset;
+
+ offset = size >> 20;
+ for (reg_no = 0; reg_no < (size >> 20); reg_no++) {
err = rockchip_pcie_prog_ob_atu(rockchip,
reg_no + 1 + offset,
AXI_WRAPPER_IO_WRITE,
20 - 1,
- rockchip->io_bus_addr +
- (reg_no << 20),
+ pci_addr + (reg_no << 20),
0);
if (err) {
dev_err(dev, "program RC io outbound ATU failed\n");
@@ -852,8 +856,7 @@ static int rockchip_pcie_cfg_atu(struct rockchip_pcie *rockchip)
AXI_WRAPPER_NOR_MSG,
20 - 1, 0, 0);
- rockchip->msg_bus_addr = rockchip->mem_bus_addr +
- ((reg_no + offset) << 20);
+ rockchip->msg_bus_addr += ((reg_no + offset) << 20);
return err;
}
@@ -897,8 +900,7 @@ static int __maybe_unused rockchip_pcie_suspend_noirq(struct device *dev)
rockchip_pcie_disable_clocks(rockchip);
- if (!IS_ERR(rockchip->vpcie0v9))
- regulator_disable(rockchip->vpcie0v9);
+ regulator_disable(rockchip->vpcie0v9);
return ret;
}
@@ -908,12 +910,10 @@ static int __maybe_unused rockchip_pcie_resume_noirq(struct device *dev)
struct rockchip_pcie *rockchip = dev_get_drvdata(dev);
int err;
- if (!IS_ERR(rockchip->vpcie0v9)) {
- err = regulator_enable(rockchip->vpcie0v9);
- if (err) {
- dev_err(dev, "fail to enable vpcie0v9 regulator\n");
- return err;
- }
+ err = regulator_enable(rockchip->vpcie0v9);
+ if (err) {
+ dev_err(dev, "fail to enable vpcie0v9 regulator\n");
+ return err;
}
err = rockchip_pcie_enable_clocks(rockchip);
@@ -939,8 +939,7 @@ err_err_deinit_port:
err_pcie_resume:
rockchip_pcie_disable_clocks(rockchip);
err_disable_0v9:
- if (!IS_ERR(rockchip->vpcie0v9))
- regulator_disable(rockchip->vpcie0v9);
+ regulator_disable(rockchip->vpcie0v9);
return err;
}
@@ -950,14 +949,9 @@ static int rockchip_pcie_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct pci_bus *bus, *child;
struct pci_host_bridge *bridge;
- struct resource_entry *win;
- resource_size_t io_base;
- struct resource *mem;
- struct resource *io;
+ struct resource *bus_res;
int err;
- LIST_HEAD(res);
-
if (!dev->of_node)
return -ENODEV;
@@ -995,56 +989,23 @@ static int rockchip_pcie_probe(struct platform_device *pdev)
if (err < 0)
goto err_deinit_port;
- err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff,
- &res, &io_base);
+ err = pci_parse_request_of_pci_ranges(dev, &bridge->windows,
+ &bridge->dma_ranges, &bus_res);
if (err)
goto err_remove_irq_domain;
- err = devm_request_pci_bus_resources(dev, &res);
- if (err)
- goto err_free_res;
-
- /* Get the I/O and memory ranges from DT */
- resource_list_for_each_entry(win, &res) {
- switch (resource_type(win->res)) {
- case IORESOURCE_IO:
- io = win->res;
- io->name = "I/O";
- rockchip->io_size = resource_size(io);
- rockchip->io_bus_addr = io->start - win->offset;
- err = pci_remap_iospace(io, io_base);
- if (err) {
- dev_warn(dev, "error %d: failed to map resource %pR\n",
- err, io);
- continue;
- }
- rockchip->io = io;
- break;
- case IORESOURCE_MEM:
- mem = win->res;
- mem->name = "MEM";
- rockchip->mem_size = resource_size(mem);
- rockchip->mem_bus_addr = mem->start - win->offset;
- break;
- case IORESOURCE_BUS:
- rockchip->root_bus_nr = win->res->start;
- break;
- default:
- continue;
- }
- }
+ rockchip->root_bus_nr = bus_res->start;
err = rockchip_pcie_cfg_atu(rockchip);
if (err)
- goto err_unmap_iospace;
+ goto err_remove_irq_domain;
rockchip->msg_region = devm_ioremap(dev, rockchip->msg_bus_addr, SZ_1M);
if (!rockchip->msg_region) {
err = -ENOMEM;
- goto err_unmap_iospace;
+ goto err_remove_irq_domain;
}
- list_splice_init(&res, &bridge->windows);
bridge->dev.parent = dev;
bridge->sysdata = rockchip;
bridge->busnr = 0;
@@ -1054,7 +1015,7 @@ static int rockchip_pcie_probe(struct platform_device *pdev)
err = pci_scan_root_bus_bridge(bridge);
if (err < 0)
- goto err_unmap_iospace;
+ goto err_remove_irq_domain;
bus = bridge->bus;
@@ -1068,10 +1029,6 @@ static int rockchip_pcie_probe(struct platform_device *pdev)
pci_bus_add_devices(bus);
return 0;
-err_unmap_iospace:
- pci_unmap_iospace(rockchip->io);
-err_free_res:
- pci_free_resource_list(&res);
err_remove_irq_domain:
irq_domain_remove(rockchip->irq_domain);
err_deinit_port:
@@ -1081,10 +1038,8 @@ err_vpcie:
regulator_disable(rockchip->vpcie12v);
if (!IS_ERR(rockchip->vpcie3v3))
regulator_disable(rockchip->vpcie3v3);
- if (!IS_ERR(rockchip->vpcie1v8))
- regulator_disable(rockchip->vpcie1v8);
- if (!IS_ERR(rockchip->vpcie0v9))
- regulator_disable(rockchip->vpcie0v9);
+ regulator_disable(rockchip->vpcie1v8);
+ regulator_disable(rockchip->vpcie0v9);
err_set_vpcie:
rockchip_pcie_disable_clocks(rockchip);
return err;
@@ -1097,7 +1052,6 @@ static int rockchip_pcie_remove(struct platform_device *pdev)
pci_stop_root_bus(rockchip->root_bus);
pci_remove_root_bus(rockchip->root_bus);
- pci_unmap_iospace(rockchip->io);
irq_domain_remove(rockchip->irq_domain);
rockchip_pcie_deinit_phys(rockchip);
@@ -1108,10 +1062,8 @@ static int rockchip_pcie_remove(struct platform_device *pdev)
regulator_disable(rockchip->vpcie12v);
if (!IS_ERR(rockchip->vpcie3v3))
regulator_disable(rockchip->vpcie3v3);
- if (!IS_ERR(rockchip->vpcie1v8))
- regulator_disable(rockchip->vpcie1v8);
- if (!IS_ERR(rockchip->vpcie0v9))
- regulator_disable(rockchip->vpcie0v9);
+ regulator_disable(rockchip->vpcie1v8);
+ regulator_disable(rockchip->vpcie0v9);
return 0;
}
diff --git a/drivers/pci/controller/pcie-rockchip.h b/drivers/pci/controller/pcie-rockchip.h
index 8e87a059ce73..d90dfb354573 100644
--- a/drivers/pci/controller/pcie-rockchip.h
+++ b/drivers/pci/controller/pcie-rockchip.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Rockchip AXI PCIe controller driver
*
@@ -304,13 +304,8 @@ struct rockchip_pcie {
struct irq_domain *irq_domain;
int offset;
struct pci_bus *root_bus;
- struct resource *io;
- phys_addr_t io_bus_addr;
- u32 io_size;
void __iomem *msg_region;
- u32 mem_size;
phys_addr_t msg_bus_addr;
- phys_addr_t mem_bus_addr;
bool is_rc;
struct resource *mem_res;
};
diff --git a/drivers/pci/controller/pcie-xilinx-nwl.c b/drivers/pci/controller/pcie-xilinx-nwl.c
index 45c0f344ccd1..9bd1427f2fd6 100644
--- a/drivers/pci/controller/pcie-xilinx-nwl.c
+++ b/drivers/pci/controller/pcie-xilinx-nwl.c
@@ -821,8 +821,6 @@ static int nwl_pcie_probe(struct platform_device *pdev)
struct pci_bus *child;
struct pci_host_bridge *bridge;
int err;
- resource_size_t iobase = 0;
- LIST_HEAD(res);
bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
if (!bridge)
@@ -845,24 +843,19 @@ static int nwl_pcie_probe(struct platform_device *pdev)
return err;
}
- err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, &res,
- &iobase);
+ err = pci_parse_request_of_pci_ranges(dev, &bridge->windows,
+ &bridge->dma_ranges, NULL);
if (err) {
dev_err(dev, "Getting bridge resources failed\n");
return err;
}
- err = devm_request_pci_bus_resources(dev, &res);
- if (err)
- goto error;
-
err = nwl_pcie_init_irq_domain(pcie);
if (err) {
dev_err(dev, "Failed creating IRQ Domain\n");
- goto error;
+ return err;
}
- list_splice_init(&res, &bridge->windows);
bridge->dev.parent = dev;
bridge->sysdata = pcie;
bridge->busnr = pcie->root_busno;
@@ -874,13 +867,13 @@ static int nwl_pcie_probe(struct platform_device *pdev)
err = nwl_pcie_enable_msi(pcie);
if (err < 0) {
dev_err(dev, "failed to enable MSI support: %d\n", err);
- goto error;
+ return err;
}
}
err = pci_scan_root_bus_bridge(bridge);
if (err)
- goto error;
+ return err;
bus = bridge->bus;
@@ -889,10 +882,6 @@ static int nwl_pcie_probe(struct platform_device *pdev)
pcie_bus_configure_settings(child);
pci_bus_add_devices(bus);
return 0;
-
-error:
- pci_free_resource_list(&res);
- return err;
}
static struct platform_driver nwl_pcie_driver = {
diff --git a/drivers/pci/controller/pcie-xilinx.c b/drivers/pci/controller/pcie-xilinx.c
index 5bf3af3b28e6..98e55297815b 100644
--- a/drivers/pci/controller/pcie-xilinx.c
+++ b/drivers/pci/controller/pcie-xilinx.c
@@ -619,8 +619,6 @@ static int xilinx_pcie_probe(struct platform_device *pdev)
struct pci_bus *bus, *child;
struct pci_host_bridge *bridge;
int err;
- resource_size_t iobase = 0;
- LIST_HEAD(res);
if (!dev->of_node)
return -ENODEV;
@@ -647,19 +645,13 @@ static int xilinx_pcie_probe(struct platform_device *pdev)
return err;
}
- err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, &res,
- &iobase);
+ err = pci_parse_request_of_pci_ranges(dev, &bridge->windows,
+ &bridge->dma_ranges, NULL);
if (err) {
dev_err(dev, "Getting bridge resources failed\n");
return err;
}
- err = devm_request_pci_bus_resources(dev, &res);
- if (err)
- goto error;
-
-
- list_splice_init(&res, &bridge->windows);
bridge->dev.parent = dev;
bridge->sysdata = port;
bridge->busnr = 0;
@@ -673,7 +665,7 @@ static int xilinx_pcie_probe(struct platform_device *pdev)
#endif
err = pci_scan_root_bus_bridge(bridge);
if (err < 0)
- goto error;
+ return err;
bus = bridge->bus;
@@ -682,10 +674,6 @@ static int xilinx_pcie_probe(struct platform_device *pdev)
pcie_bus_configure_settings(child);
pci_bus_add_devices(bus);
return 0;
-
-error:
- pci_free_resource_list(&res);
- return err;
}
static const struct of_device_id xilinx_pcie_of_match[] = {
diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c
index a35d3f3996d7..212842263f55 100644
--- a/drivers/pci/controller/vmd.c
+++ b/drivers/pci/controller/vmd.c
@@ -602,16 +602,30 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
/*
* Certain VMD devices may have a root port configuration option which
- * limits the bus range to between 0-127 or 128-255
+ * limits the bus range to between 0-127, 128-255, or 224-255
*/
if (features & VMD_FEAT_HAS_BUS_RESTRICTIONS) {
- u32 vmcap, vmconfig;
-
- pci_read_config_dword(vmd->dev, PCI_REG_VMCAP, &vmcap);
- pci_read_config_dword(vmd->dev, PCI_REG_VMCONFIG, &vmconfig);
- if (BUS_RESTRICT_CAP(vmcap) &&
- (BUS_RESTRICT_CFG(vmconfig) == 0x1))
- vmd->busn_start = 128;
+ u16 reg16;
+
+ pci_read_config_word(vmd->dev, PCI_REG_VMCAP, &reg16);
+ if (BUS_RESTRICT_CAP(reg16)) {
+ pci_read_config_word(vmd->dev, PCI_REG_VMCONFIG,
+ &reg16);
+
+ switch (BUS_RESTRICT_CFG(reg16)) {
+ case 1:
+ vmd->busn_start = 128;
+ break;
+ case 2:
+ vmd->busn_start = 224;
+ break;
+ case 3:
+ pci_err(vmd->dev, "Unknown Bus Offset Setting\n");
+ return -ENODEV;
+ default:
+ break;
+ }
+ }
}
res = &vmd->dev->resource[VMD_CFGBAR];
@@ -823,7 +837,7 @@ static int vmd_suspend(struct device *dev)
int i;
for (i = 0; i < vmd->msix_count; i++)
- devm_free_irq(dev, pci_irq_vector(pdev, i), &vmd->irqs[i]);
+ devm_free_irq(dev, pci_irq_vector(pdev, i), &vmd->irqs[i]);
pci_save_state(pdev);
return 0;
@@ -854,6 +868,8 @@ static const struct pci_device_id vmd_ids[] = {
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_VMD_28C0),
.driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW |
VMD_FEAT_HAS_BUS_RESTRICTIONS,},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_VMD_9A0B),
+ .driver_data = VMD_FEAT_HAS_BUS_RESTRICTIONS,},
{0,}
};
MODULE_DEVICE_TABLE(pci, vmd_ids);
diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c
index 1cfe3687a211..5d74f81ddfe4 100644
--- a/drivers/pci/endpoint/functions/pci-epf-test.c
+++ b/drivers/pci/endpoint/functions/pci-epf-test.c
@@ -44,7 +44,7 @@
static struct workqueue_struct *kpcitest_workqueue;
struct pci_epf_test {
- void *reg[6];
+ void *reg[PCI_STD_NUM_BARS];
struct pci_epf *epf;
enum pci_barno test_reg_bar;
struct delayed_work cmd_handler;
@@ -377,7 +377,7 @@ static void pci_epf_test_unbind(struct pci_epf *epf)
cancel_delayed_work(&epf_test->cmd_handler);
pci_epc_stop(epc);
- for (bar = BAR_0; bar <= BAR_5; bar++) {
+ for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
epf_bar = &epf->bar[bar];
if (epf_test->reg[bar]) {
@@ -400,7 +400,7 @@ static int pci_epf_test_set_bar(struct pci_epf *epf)
epc_features = epf_test->epc_features;
- for (bar = BAR_0; bar <= BAR_5; bar += add) {
+ for (bar = 0; bar < PCI_STD_NUM_BARS; bar += add) {
epf_bar = &epf->bar[bar];
/*
* pci_epc_set_bar() sets PCI_BASE_ADDRESS_MEM_TYPE_64
@@ -450,7 +450,7 @@ static int pci_epf_test_alloc_space(struct pci_epf *epf)
}
epf_test->reg[test_reg_bar] = base;
- for (bar = BAR_0; bar <= BAR_5; bar += add) {
+ for (bar = 0; bar < PCI_STD_NUM_BARS; bar += add) {
epf_bar = &epf->bar[bar];
add = (epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64) ? 2 : 1;
@@ -478,7 +478,7 @@ static void pci_epf_configure_bar(struct pci_epf *epf,
bool bar_fixed_64bit;
int i;
- for (i = BAR_0; i <= BAR_5; i++) {
+ for (i = 0; i < PCI_STD_NUM_BARS; i++) {
epf_bar = &epf->bar[i];
bar_fixed_64bit = !!(epc_features->bar_fixed_64bit & (1 << i));
if (bar_fixed_64bit)
diff --git a/drivers/pci/endpoint/pci-epc-mem.c b/drivers/pci/endpoint/pci-epc-mem.c
index 2bf8bd1f0563..d2b174ce15de 100644
--- a/drivers/pci/endpoint/pci-epc-mem.c
+++ b/drivers/pci/endpoint/pci-epc-mem.c
@@ -134,7 +134,7 @@ void __iomem *pci_epc_mem_alloc_addr(struct pci_epc *epc,
if (pageno < 0)
return NULL;
- *phys_addr = mem->phys_base + (pageno << page_shift);
+ *phys_addr = mem->phys_base + ((phys_addr_t)pageno << page_shift);
virt_addr = ioremap(*phys_addr, size);
if (!virt_addr)
bitmap_release_region(mem->bitmap, pageno, order);
diff --git a/drivers/pci/hotplug/Kconfig b/drivers/pci/hotplug/Kconfig
index e7b493c22bf3..32455a79372d 100644
--- a/drivers/pci/hotplug/Kconfig
+++ b/drivers/pci/hotplug/Kconfig
@@ -83,7 +83,7 @@ config HOTPLUG_PCI_CPCI_ZT5550
depends on HOTPLUG_PCI_CPCI && X86
help
Say Y here if you have an Performance Technologies (formerly Intel,
- formerly just Ziatech) Ziatech ZT5550 CompactPCI system card.
+ formerly just Ziatech) Ziatech ZT5550 CompactPCI system card.
To compile this driver as a module, choose M here: the
module will be called cpcihp_zt5550.
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index e4c46637f32f..b3869951c0eb 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -449,8 +449,15 @@ static void acpiphp_native_scan_bridge(struct pci_dev *bridge)
/* Scan non-hotplug bridges that need to be reconfigured */
for_each_pci_bridge(dev, bus) {
- if (!hotplug_is_native(dev))
- max = pci_scan_bridge(bus, dev, max, 1);
+ if (hotplug_is_native(dev))
+ continue;
+
+ max = pci_scan_bridge(bus, dev, max, 1);
+ if (dev->subordinate) {
+ pcibios_resource_survey_bus(dev->subordinate);
+ pci_bus_size_bridges(dev->subordinate);
+ pci_bus_assign_resources(dev->subordinate);
+ }
}
}
@@ -480,7 +487,6 @@ static void enable_slot(struct acpiphp_slot *slot, bool bridge)
if (PCI_SLOT(dev->devfn) == slot->device)
acpiphp_native_scan_bridge(dev);
}
- pci_assign_unassigned_bridge_resources(bus->self);
} else {
LIST_HEAD(add_list);
int max, pass;
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index 654c972b8ea0..aa61d4c219d7 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -72,6 +72,7 @@ extern int pciehp_poll_time;
* @reset_lock: prevents access to the Data Link Layer Link Active bit in the
* Link Status register and to the Presence Detect State bit in the Slot
* Status register during a slot reset which may cause them to flap
+ * @ist_running: flag to keep user request waiting while IRQ thread is running
* @request_result: result of last user request submitted to the IRQ thread
* @requester: wait queue to wake up on completion of user request,
* used for synchronous slot enable/disable request via sysfs
@@ -101,6 +102,7 @@ struct controller {
struct hotplug_slot hotplug_slot; /* hotplug core interface */
struct rw_semaphore reset_lock;
+ unsigned int ist_running;
int request_result;
wait_queue_head_t requester;
};
@@ -172,10 +174,10 @@ void pciehp_set_indicators(struct controller *ctrl, int pwr, int attn);
void pciehp_get_latch_status(struct controller *ctrl, u8 *status);
int pciehp_query_power_fault(struct controller *ctrl);
-bool pciehp_card_present(struct controller *ctrl);
-bool pciehp_card_present_or_link_active(struct controller *ctrl);
+int pciehp_card_present(struct controller *ctrl);
+int pciehp_card_present_or_link_active(struct controller *ctrl);
int pciehp_check_link_status(struct controller *ctrl);
-bool pciehp_check_link_active(struct controller *ctrl);
+int pciehp_check_link_active(struct controller *ctrl);
void pciehp_release_ctrl(struct controller *ctrl);
int pciehp_sysfs_enable_slot(struct hotplug_slot *hotplug_slot);
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index b3122c151b80..312cc45c44c7 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -139,10 +139,15 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
struct controller *ctrl = to_ctrl(hotplug_slot);
struct pci_dev *pdev = ctrl->pcie->port;
+ int ret;
pci_config_pm_runtime_get(pdev);
- *value = pciehp_card_present_or_link_active(ctrl);
+ ret = pciehp_card_present_or_link_active(ctrl);
pci_config_pm_runtime_put(pdev);
+ if (ret < 0)
+ return ret;
+
+ *value = ret;
return 0;
}
@@ -158,13 +163,13 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
*/
static void pciehp_check_presence(struct controller *ctrl)
{
- bool occupied;
+ int occupied;
down_read(&ctrl->reset_lock);
mutex_lock(&ctrl->state_lock);
occupied = pciehp_card_present_or_link_active(ctrl);
- if ((occupied && (ctrl->state == OFF_STATE ||
+ if ((occupied > 0 && (ctrl->state == OFF_STATE ||
ctrl->state == BLINKINGON_STATE)) ||
(!occupied && (ctrl->state == ON_STATE ||
ctrl->state == BLINKINGOFF_STATE)))
@@ -253,7 +258,7 @@ static bool pme_is_native(struct pcie_device *dev)
return pcie_ports_native || host->native_pme;
}
-static int pciehp_suspend(struct pcie_device *dev)
+static void pciehp_disable_interrupt(struct pcie_device *dev)
{
/*
* Disable hotplug interrupt so that it does not trigger
@@ -261,7 +266,19 @@ static int pciehp_suspend(struct pcie_device *dev)
*/
if (pme_is_native(dev))
pcie_disable_interrupt(get_service_data(dev));
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int pciehp_suspend(struct pcie_device *dev)
+{
+ /*
+ * If the port is already runtime suspended we can keep it that
+ * way.
+ */
+ if (dev_pm_smart_suspend_and_suspended(&dev->port->dev))
+ return 0;
+ pciehp_disable_interrupt(dev);
return 0;
}
@@ -279,6 +296,7 @@ static int pciehp_resume_noirq(struct pcie_device *dev)
return 0;
}
+#endif
static int pciehp_resume(struct pcie_device *dev)
{
@@ -292,6 +310,12 @@ static int pciehp_resume(struct pcie_device *dev)
return 0;
}
+static int pciehp_runtime_suspend(struct pcie_device *dev)
+{
+ pciehp_disable_interrupt(dev);
+ return 0;
+}
+
static int pciehp_runtime_resume(struct pcie_device *dev)
{
struct controller *ctrl = get_service_data(dev);
@@ -318,10 +342,12 @@ static struct pcie_port_service_driver hpdriver_portdrv = {
.remove = pciehp_remove,
#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
.suspend = pciehp_suspend,
.resume_noirq = pciehp_resume_noirq,
.resume = pciehp_resume,
- .runtime_suspend = pciehp_suspend,
+#endif
+ .runtime_suspend = pciehp_runtime_suspend,
.runtime_resume = pciehp_runtime_resume,
#endif /* PM */
};
diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c
index 21af7b16d7a4..6503d15effbb 100644
--- a/drivers/pci/hotplug/pciehp_ctrl.c
+++ b/drivers/pci/hotplug/pciehp_ctrl.c
@@ -226,7 +226,7 @@ void pciehp_handle_disable_request(struct controller *ctrl)
void pciehp_handle_presence_or_link_change(struct controller *ctrl, u32 events)
{
- bool present, link_active;
+ int present, link_active;
/*
* If the slot is on and presence or link has changed, turn it off.
@@ -257,7 +257,7 @@ void pciehp_handle_presence_or_link_change(struct controller *ctrl, u32 events)
mutex_lock(&ctrl->state_lock);
present = pciehp_card_present(ctrl);
link_active = pciehp_check_link_active(ctrl);
- if (!present && !link_active) {
+ if (present <= 0 && link_active <= 0) {
mutex_unlock(&ctrl->state_lock);
return;
}
@@ -375,7 +375,8 @@ int pciehp_sysfs_enable_slot(struct hotplug_slot *hotplug_slot)
ctrl->request_result = -ENODEV;
pciehp_request(ctrl, PCI_EXP_SLTSTA_PDC);
wait_event(ctrl->requester,
- !atomic_read(&ctrl->pending_events));
+ !atomic_read(&ctrl->pending_events) &&
+ !ctrl->ist_running);
return ctrl->request_result;
case POWERON_STATE:
ctrl_info(ctrl, "Slot(%s): Already in powering on state\n",
@@ -408,7 +409,8 @@ int pciehp_sysfs_disable_slot(struct hotplug_slot *hotplug_slot)
mutex_unlock(&ctrl->state_lock);
pciehp_request(ctrl, DISABLE_SLOT);
wait_event(ctrl->requester,
- !atomic_read(&ctrl->pending_events));
+ !atomic_read(&ctrl->pending_events) &&
+ !ctrl->ist_running);
return ctrl->request_result;
case POWEROFF_STATE:
ctrl_info(ctrl, "Slot(%s): Already in powering off state\n",
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 1a522c1c4177..8a2cb1764386 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -68,7 +68,7 @@ static int pcie_poll_cmd(struct controller *ctrl, int timeout)
struct pci_dev *pdev = ctrl_dev(ctrl);
u16 slot_status;
- while (true) {
+ do {
pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
if (slot_status == (u16) ~0) {
ctrl_info(ctrl, "%s: no response from device\n",
@@ -81,11 +81,9 @@ static int pcie_poll_cmd(struct controller *ctrl, int timeout)
PCI_EXP_SLTSTA_CC);
return 1;
}
- if (timeout < 0)
- break;
msleep(10);
timeout -= 10;
- }
+ } while (timeout >= 0);
return 0; /* timeout */
}
@@ -201,17 +199,29 @@ static void pcie_write_cmd_nowait(struct controller *ctrl, u16 cmd, u16 mask)
pcie_do_write_cmd(ctrl, cmd, mask, false);
}
-bool pciehp_check_link_active(struct controller *ctrl)
+/**
+ * pciehp_check_link_active() - Is the link active
+ * @ctrl: PCIe hotplug controller
+ *
+ * Check whether the downstream link is currently active. Note it is
+ * possible that the card is removed immediately after this so the
+ * caller may need to take it into account.
+ *
+ * If the hotplug controller itself is not available anymore returns
+ * %-ENODEV.
+ */
+int pciehp_check_link_active(struct controller *ctrl)
{
struct pci_dev *pdev = ctrl_dev(ctrl);
u16 lnk_status;
- bool ret;
+ int ret;
- pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status);
- ret = !!(lnk_status & PCI_EXP_LNKSTA_DLLLA);
+ ret = pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status);
+ if (ret == PCIBIOS_DEVICE_NOT_FOUND || lnk_status == (u16)~0)
+ return -ENODEV;
- if (ret)
- ctrl_dbg(ctrl, "%s: lnk_status = %x\n", __func__, lnk_status);
+ ret = !!(lnk_status & PCI_EXP_LNKSTA_DLLLA);
+ ctrl_dbg(ctrl, "%s: lnk_status = %x\n", __func__, lnk_status);
return ret;
}
@@ -373,13 +383,29 @@ void pciehp_get_latch_status(struct controller *ctrl, u8 *status)
*status = !!(slot_status & PCI_EXP_SLTSTA_MRLSS);
}
-bool pciehp_card_present(struct controller *ctrl)
+/**
+ * pciehp_card_present() - Is the card present
+ * @ctrl: PCIe hotplug controller
+ *
+ * Function checks whether the card is currently present in the slot and
+ * in that case returns true. Note it is possible that the card is
+ * removed immediately after the check so the caller may need to take
+ * this into account.
+ *
+ * It the hotplug controller itself is not available anymore returns
+ * %-ENODEV.
+ */
+int pciehp_card_present(struct controller *ctrl)
{
struct pci_dev *pdev = ctrl_dev(ctrl);
u16 slot_status;
+ int ret;
- pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
- return slot_status & PCI_EXP_SLTSTA_PDS;
+ ret = pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
+ if (ret == PCIBIOS_DEVICE_NOT_FOUND || slot_status == (u16)~0)
+ return -ENODEV;
+
+ return !!(slot_status & PCI_EXP_SLTSTA_PDS);
}
/**
@@ -390,10 +416,19 @@ bool pciehp_card_present(struct controller *ctrl)
* Presence Detect State bit, this helper also returns true if the Link Active
* bit is set. This is a concession to broken hotplug ports which hardwire
* Presence Detect State to zero, such as Wilocity's [1ae9:0200].
+ *
+ * Returns: %1 if the slot is occupied and %0 if it is not. If the hotplug
+ * port is not present anymore returns %-ENODEV.
*/
-bool pciehp_card_present_or_link_active(struct controller *ctrl)
+int pciehp_card_present_or_link_active(struct controller *ctrl)
{
- return pciehp_card_present(ctrl) || pciehp_check_link_active(ctrl);
+ int ret;
+
+ ret = pciehp_card_present(ctrl);
+ if (ret)
+ return ret;
+
+ return pciehp_check_link_active(ctrl);
}
int pciehp_query_power_fault(struct controller *ctrl)
@@ -583,6 +618,7 @@ static irqreturn_t pciehp_ist(int irq, void *dev_id)
irqreturn_t ret;
u32 events;
+ ctrl->ist_running = true;
pci_config_pm_runtime_get(pdev);
/* rerun pciehp_isr() if the port was inaccessible on interrupt */
@@ -629,6 +665,7 @@ static irqreturn_t pciehp_ist(int irq, void *dev_id)
up_read(&ctrl->reset_lock);
pci_config_pm_runtime_put(pdev);
+ ctrl->ist_running = false;
wake_up(&ctrl->requester);
return IRQ_HANDLED;
}
diff --git a/drivers/pci/hotplug/rpaphp_core.c b/drivers/pci/hotplug/rpaphp_core.c
index 18627bb21e9e..e408e4021cee 100644
--- a/drivers/pci/hotplug/rpaphp_core.c
+++ b/drivers/pci/hotplug/rpaphp_core.c
@@ -154,11 +154,11 @@ static enum pci_bus_speed get_max_bus_speed(struct slot *slot)
return speed;
}
-static int get_children_props(struct device_node *dn, const int **drc_indexes,
- const int **drc_names, const int **drc_types,
- const int **drc_power_domains)
+static int get_children_props(struct device_node *dn, const __be32 **drc_indexes,
+ const __be32 **drc_names, const __be32 **drc_types,
+ const __be32 **drc_power_domains)
{
- const int *indexes, *names, *types, *domains;
+ const __be32 *indexes, *names, *types, *domains;
indexes = of_get_property(dn, "ibm,drc-indexes", NULL);
names = of_get_property(dn, "ibm,drc-names", NULL);
@@ -185,8 +185,8 @@ static int get_children_props(struct device_node *dn, const int **drc_indexes,
/* Verify the existence of 'drc_name' and/or 'drc_type' within the
- * current node. First obtain it's my-drc-index property. Next,
- * obtain the DRC info from it's parent. Use the my-drc-index for
+ * current node. First obtain its my-drc-index property. Next,
+ * obtain the DRC info from its parent. Use the my-drc-index for
* correlation, and obtain/validate the requested properties.
*/
@@ -194,8 +194,8 @@ static int rpaphp_check_drc_props_v1(struct device_node *dn, char *drc_name,
char *drc_type, unsigned int my_index)
{
char *name_tmp, *type_tmp;
- const int *indexes, *names;
- const int *types, *domains;
+ const __be32 *indexes, *names;
+ const __be32 *types, *domains;
int i, rc;
rc = get_children_props(dn->parent, &indexes, &names, &types, &domains);
@@ -208,7 +208,7 @@ static int rpaphp_check_drc_props_v1(struct device_node *dn, char *drc_name,
/* Iterate through parent properties, looking for my-drc-index */
for (i = 0; i < be32_to_cpu(indexes[0]); i++) {
- if ((unsigned int) indexes[i + 1] == my_index)
+ if (be32_to_cpu(indexes[i + 1]) == my_index)
break;
name_tmp += (strlen(name_tmp) + 1);
@@ -239,6 +239,8 @@ static int rpaphp_check_drc_props_v2(struct device_node *dn, char *drc_name,
value = of_prop_next_u32(info, NULL, &entries);
if (!value)
return -EINVAL;
+ else
+ value++;
for (j = 0; j < entries; j++) {
of_read_drc_info_cell(&info, &value, &drc);
@@ -246,9 +248,10 @@ static int rpaphp_check_drc_props_v2(struct device_node *dn, char *drc_name,
/* Should now know end of current entry */
/* Found it */
- if (my_index <= drc.last_drc_index) {
+ if (my_index >= drc.drc_index_start && my_index <= drc.last_drc_index) {
+ int index = my_index - drc.drc_index_start;
sprintf(cell_drc_name, "%s%d", drc.drc_name_prefix,
- my_index);
+ drc.drc_name_suffix_start + index);
break;
}
}
@@ -265,7 +268,7 @@ static int rpaphp_check_drc_props_v2(struct device_node *dn, char *drc_name,
int rpaphp_check_drc_props(struct device_node *dn, char *drc_name,
char *drc_type)
{
- const unsigned int *my_index;
+ const __be32 *my_index;
my_index = of_get_property(dn, "ibm,my-drc-index", NULL);
if (!my_index) {
@@ -273,12 +276,12 @@ int rpaphp_check_drc_props(struct device_node *dn, char *drc_name,
return -EINVAL;
}
- if (firmware_has_feature(FW_FEATURE_DRC_INFO))
+ if (of_find_property(dn->parent, "ibm,drc-info", NULL))
return rpaphp_check_drc_props_v2(dn, drc_name, drc_type,
- *my_index);
+ be32_to_cpu(*my_index));
else
return rpaphp_check_drc_props_v1(dn, drc_name, drc_type,
- *my_index);
+ be32_to_cpu(*my_index));
}
EXPORT_SYMBOL_GPL(rpaphp_check_drc_props);
@@ -309,10 +312,11 @@ static int is_php_type(char *drc_type)
* for built-in pci slots (even when the built-in slots are
* dlparable.)
*/
-static int is_php_dn(struct device_node *dn, const int **indexes,
- const int **names, const int **types, const int **power_domains)
+static int is_php_dn(struct device_node *dn, const __be32 **indexes,
+ const __be32 **names, const __be32 **types,
+ const __be32 **power_domains)
{
- const int *drc_types;
+ const __be32 *drc_types;
int rc;
rc = get_children_props(dn, indexes, names, &drc_types, power_domains);
@@ -326,33 +330,55 @@ static int is_php_dn(struct device_node *dn, const int **indexes,
return 1;
}
-/**
- * rpaphp_add_slot -- declare a hotplug slot to the hotplug subsystem.
- * @dn: device node of slot
- *
- * This subroutine will register a hotpluggable slot with the
- * PCI hotplug infrastructure. This routine is typically called
- * during boot time, if the hotplug slots are present at boot time,
- * or is called later, by the dlpar add code, if the slot is
- * being dynamically added during runtime.
- *
- * If the device node points at an embedded (built-in) slot, this
- * routine will just return without doing anything, since embedded
- * slots cannot be hotplugged.
- *
- * To remove a slot, it suffices to call rpaphp_deregister_slot().
- */
-int rpaphp_add_slot(struct device_node *dn)
+static int rpaphp_drc_info_add_slot(struct device_node *dn)
{
struct slot *slot;
+ struct property *info;
+ struct of_drc_info drc;
+ char drc_name[MAX_DRC_NAME_LEN];
+ const __be32 *cur;
+ u32 count;
int retval = 0;
- int i;
- const int *indexes, *names, *types, *power_domains;
- char *name, *type;
- if (!dn->name || strcmp(dn->name, "pci"))
+ info = of_find_property(dn, "ibm,drc-info", NULL);
+ if (!info)
+ return 0;
+
+ cur = of_prop_next_u32(info, NULL, &count);
+ if (cur)
+ cur++;
+ else
return 0;
+ of_read_drc_info_cell(&info, &cur, &drc);
+ if (!is_php_type(drc.drc_type))
+ return 0;
+
+ sprintf(drc_name, "%s%d", drc.drc_name_prefix, drc.drc_name_suffix_start);
+
+ slot = alloc_slot_struct(dn, drc.drc_index_start, drc_name, drc.drc_power_domain);
+ if (!slot)
+ return -ENOMEM;
+
+ slot->type = simple_strtoul(drc.drc_type, NULL, 10);
+ retval = rpaphp_enable_slot(slot);
+ if (!retval)
+ retval = rpaphp_register_slot(slot);
+
+ if (retval)
+ dealloc_slot_struct(slot);
+
+ return retval;
+}
+
+static int rpaphp_drc_add_slot(struct device_node *dn)
+{
+ struct slot *slot;
+ int retval = 0;
+ int i;
+ const __be32 *indexes, *names, *types, *power_domains;
+ char *name, *type;
+
/* If this is not a hotplug slot, return without doing anything. */
if (!is_php_dn(dn, &indexes, &names, &types, &power_domains))
return 0;
@@ -391,6 +417,33 @@ int rpaphp_add_slot(struct device_node *dn)
/* XXX FIXME: reports a failure only if last entry in loop failed */
return retval;
}
+
+/**
+ * rpaphp_add_slot -- declare a hotplug slot to the hotplug subsystem.
+ * @dn: device node of slot
+ *
+ * This subroutine will register a hotpluggable slot with the
+ * PCI hotplug infrastructure. This routine is typically called
+ * during boot time, if the hotplug slots are present at boot time,
+ * or is called later, by the dlpar add code, if the slot is
+ * being dynamically added during runtime.
+ *
+ * If the device node points at an embedded (built-in) slot, this
+ * routine will just return without doing anything, since embedded
+ * slots cannot be hotplugged.
+ *
+ * To remove a slot, it suffices to call rpaphp_deregister_slot().
+ */
+int rpaphp_add_slot(struct device_node *dn)
+{
+ if (!dn->name || strcmp(dn->name, "pci"))
+ return 0;
+
+ if (of_find_property(dn, "ibm,drc-info", NULL))
+ return rpaphp_drc_info_add_slot(dn);
+ else
+ return rpaphp_drc_add_slot(dn);
+}
EXPORT_SYMBOL_GPL(rpaphp_add_slot);
static void __exit cleanup_slots(void)
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index b3f972e8cfed..1e88fd427757 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -9,7 +9,6 @@
#include <linux/pci.h>
#include <linux/slab.h>
-#include <linux/mutex.h>
#include <linux/export.h>
#include <linux/string.h>
#include <linux/delay.h>
@@ -254,8 +253,14 @@ static ssize_t sriov_numvfs_show(struct device *dev,
char *buf)
{
struct pci_dev *pdev = to_pci_dev(dev);
+ u16 num_vfs;
+
+ /* Serialize vs sriov_numvfs_store() so readers see valid num_VFs */
+ device_lock(&pdev->dev);
+ num_vfs = pdev->sriov->num_VFs;
+ device_unlock(&pdev->dev);
- return sprintf(buf, "%u\n", pdev->sriov->num_VFs);
+ return sprintf(buf, "%u\n", num_vfs);
}
/*
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 0884bedcfc7a..c7709e49f0e4 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -213,12 +213,13 @@ u32 __pci_msix_desc_mask_irq(struct msi_desc *desc, u32 flag)
if (pci_msi_ignore_mask)
return 0;
+
desc_addr = pci_msix_desc_addr(desc);
if (!desc_addr)
return 0;
mask_bits &= ~PCI_MSIX_ENTRY_CTRL_MASKBIT;
- if (flag)
+ if (flag & PCI_MSIX_ENTRY_CTRL_MASKBIT)
mask_bits |= PCI_MSIX_ENTRY_CTRL_MASKBIT;
writel(mask_bits, desc_addr + PCI_MSIX_ENTRY_VECTOR_CTRL);
@@ -861,7 +862,7 @@ static int pci_msi_supported(struct pci_dev *dev, int nvec)
if (!pci_msi_enable)
return 0;
- if (!dev || dev->no_msi || dev->current_state != PCI_D0)
+ if (!dev || dev->no_msi)
return 0;
/*
@@ -972,7 +973,7 @@ static int __pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries,
int nr_entries;
int i, j;
- if (!pci_msi_supported(dev, nvec))
+ if (!pci_msi_supported(dev, nvec) || dev->current_state != PCI_D0)
return -EINVAL;
nr_entries = pci_msix_vec_count(dev);
@@ -1058,7 +1059,7 @@ static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec,
int nvec;
int rc;
- if (!pci_msi_supported(dev, minvec))
+ if (!pci_msi_supported(dev, minvec) || dev->current_state != PCI_D0)
return -EINVAL;
/* Check whether driver already requested MSI-X IRQs */
@@ -1315,22 +1316,6 @@ const struct cpumask *pci_irq_get_affinity(struct pci_dev *dev, int nr)
}
EXPORT_SYMBOL(pci_irq_get_affinity);
-/**
- * pci_irq_get_node - return the NUMA node of a particular MSI vector
- * @pdev: PCI device to operate on
- * @vec: device-relative interrupt vector index (0-based).
- */
-int pci_irq_get_node(struct pci_dev *pdev, int vec)
-{
- const struct cpumask *mask;
-
- mask = pci_irq_get_affinity(pdev, vec);
- if (mask)
- return local_memory_node(cpu_to_node(cpumask_first(mask)));
- return dev_to_node(&pdev->dev);
-}
-EXPORT_SYMBOL(pci_irq_get_node);
-
struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc)
{
return to_pci_dev(desc->dev);
diff --git a/drivers/pci/of.c b/drivers/pci/of.c
index 36891e7deee3..81ceeaa6f1d5 100644
--- a/drivers/pci/of.c
+++ b/drivers/pci/of.c
@@ -236,7 +236,6 @@ void of_pci_check_probe_only(void)
}
EXPORT_SYMBOL_GPL(of_pci_check_probe_only);
-#if defined(CONFIG_OF_ADDRESS)
/**
* devm_of_pci_get_host_bridge_resources() - Resource-managed parsing of PCI
* host bridge resources from DT
@@ -255,16 +254,18 @@ EXPORT_SYMBOL_GPL(of_pci_check_probe_only);
* It returns zero if the range parsing has been successful or a standard error
* value if it failed.
*/
-int devm_of_pci_get_host_bridge_resources(struct device *dev,
+static int devm_of_pci_get_host_bridge_resources(struct device *dev,
unsigned char busno, unsigned char bus_max,
- struct list_head *resources, resource_size_t *io_base)
+ struct list_head *resources,
+ struct list_head *ib_resources,
+ resource_size_t *io_base)
{
struct device_node *dev_node = dev->of_node;
struct resource *res, tmp_res;
struct resource *bus_range;
struct of_pci_range range;
struct of_pci_range_parser parser;
- char range_type[4];
+ const char *range_type;
int err;
if (io_base)
@@ -298,12 +299,12 @@ int devm_of_pci_get_host_bridge_resources(struct device *dev,
for_each_of_pci_range(&parser, &range) {
/* Read next ranges element */
if ((range.flags & IORESOURCE_TYPE_BITS) == IORESOURCE_IO)
- snprintf(range_type, 4, " IO");
+ range_type = "IO";
else if ((range.flags & IORESOURCE_TYPE_BITS) == IORESOURCE_MEM)
- snprintf(range_type, 4, "MEM");
+ range_type = "MEM";
else
- snprintf(range_type, 4, "err");
- dev_info(dev, " %s %#010llx..%#010llx -> %#010llx\n",
+ range_type = "err";
+ dev_info(dev, " %6s %#012llx..%#012llx -> %#012llx\n",
range_type, range.cpu_addr,
range.cpu_addr + range.size - 1, range.pci_addr);
@@ -340,14 +341,54 @@ int devm_of_pci_get_host_bridge_resources(struct device *dev,
pci_add_resource_offset(resources, res, res->start - range.pci_addr);
}
+ /* Check for dma-ranges property */
+ if (!ib_resources)
+ return 0;
+ err = of_pci_dma_range_parser_init(&parser, dev_node);
+ if (err)
+ return 0;
+
+ dev_dbg(dev, "Parsing dma-ranges property...\n");
+ for_each_of_pci_range(&parser, &range) {
+ struct resource_entry *entry;
+ /*
+ * If we failed translation or got a zero-sized region
+ * then skip this range
+ */
+ if (((range.flags & IORESOURCE_TYPE_BITS) != IORESOURCE_MEM) ||
+ range.cpu_addr == OF_BAD_ADDR || range.size == 0)
+ continue;
+
+ dev_info(dev, " %6s %#012llx..%#012llx -> %#012llx\n",
+ "IB MEM", range.cpu_addr,
+ range.cpu_addr + range.size - 1, range.pci_addr);
+
+
+ err = of_pci_range_to_resource(&range, dev_node, &tmp_res);
+ if (err)
+ continue;
+
+ res = devm_kmemdup(dev, &tmp_res, sizeof(tmp_res), GFP_KERNEL);
+ if (!res) {
+ err = -ENOMEM;
+ goto failed;
+ }
+
+ /* Keep the resource list sorted */
+ resource_list_for_each_entry(entry, ib_resources)
+ if (entry->res->start > res->start)
+ break;
+
+ pci_add_resource_offset(&entry->node, res,
+ res->start - range.pci_addr);
+ }
+
return 0;
failed:
pci_free_resource_list(resources);
return err;
}
-EXPORT_SYMBOL_GPL(devm_of_pci_get_host_bridge_resources);
-#endif /* CONFIG_OF_ADDRESS */
#if IS_ENABLED(CONFIG_OF_IRQ)
/**
@@ -482,6 +523,7 @@ EXPORT_SYMBOL_GPL(of_irq_parse_and_map_pci);
int pci_parse_request_of_pci_ranges(struct device *dev,
struct list_head *resources,
+ struct list_head *ib_resources,
struct resource **bus_range)
{
int err, res_valid = 0;
@@ -489,8 +531,10 @@ int pci_parse_request_of_pci_ranges(struct device *dev,
struct resource_entry *win, *tmp;
INIT_LIST_HEAD(resources);
+ if (ib_resources)
+ INIT_LIST_HEAD(ib_resources);
err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, resources,
- &iobase);
+ ib_resources, &iobase);
if (err)
return err;
@@ -530,6 +574,7 @@ int pci_parse_request_of_pci_ranges(struct device *dev,
pci_free_resource_list(resources);
return err;
}
+EXPORT_SYMBOL_GPL(pci_parse_request_of_pci_ranges);
#endif /* CONFIG_PCI */
diff --git a/drivers/pci/pci-bridge-emul.c b/drivers/pci/pci-bridge-emul.c
index 5fd90105510d..fffa77093c08 100644
--- a/drivers/pci/pci-bridge-emul.c
+++ b/drivers/pci/pci-bridge-emul.c
@@ -270,10 +270,10 @@ static const struct pci_bridge_reg_behavior pcie_cap_regs_behavior[] = {
int pci_bridge_emul_init(struct pci_bridge_emul *bridge,
unsigned int flags)
{
- bridge->conf.class_revision |= PCI_CLASS_BRIDGE_PCI << 16;
+ bridge->conf.class_revision |= cpu_to_le32(PCI_CLASS_BRIDGE_PCI << 16);
bridge->conf.header_type = PCI_HEADER_TYPE_BRIDGE;
bridge->conf.cache_line_size = 0x10;
- bridge->conf.status = PCI_STATUS_CAP_LIST;
+ bridge->conf.status = cpu_to_le16(PCI_STATUS_CAP_LIST);
bridge->pci_regs_behavior = kmemdup(pci_regs_behavior,
sizeof(pci_regs_behavior),
GFP_KERNEL);
@@ -284,8 +284,9 @@ int pci_bridge_emul_init(struct pci_bridge_emul *bridge,
bridge->conf.capabilities_pointer = PCI_CAP_PCIE_START;
bridge->pcie_conf.cap_id = PCI_CAP_ID_EXP;
/* Set PCIe v2, root port, slot support */
- bridge->pcie_conf.cap = PCI_EXP_TYPE_ROOT_PORT << 4 | 2 |
- PCI_EXP_FLAGS_SLOT;
+ bridge->pcie_conf.cap =
+ cpu_to_le16(PCI_EXP_TYPE_ROOT_PORT << 4 | 2 |
+ PCI_EXP_FLAGS_SLOT);
bridge->pcie_cap_regs_behavior =
kmemdup(pcie_cap_regs_behavior,
sizeof(pcie_cap_regs_behavior),
@@ -327,7 +328,7 @@ int pci_bridge_emul_conf_read(struct pci_bridge_emul *bridge, int where,
int reg = where & ~3;
pci_bridge_emul_read_status_t (*read_op)(struct pci_bridge_emul *bridge,
int reg, u32 *value);
- u32 *cfgspace;
+ __le32 *cfgspace;
const struct pci_bridge_reg_behavior *behavior;
if (bridge->has_pcie && reg >= PCI_CAP_PCIE_END) {
@@ -343,11 +344,11 @@ int pci_bridge_emul_conf_read(struct pci_bridge_emul *bridge, int where,
if (bridge->has_pcie && reg >= PCI_CAP_PCIE_START) {
reg -= PCI_CAP_PCIE_START;
read_op = bridge->ops->read_pcie;
- cfgspace = (u32 *) &bridge->pcie_conf;
+ cfgspace = (__le32 *) &bridge->pcie_conf;
behavior = bridge->pcie_cap_regs_behavior;
} else {
read_op = bridge->ops->read_base;
- cfgspace = (u32 *) &bridge->conf;
+ cfgspace = (__le32 *) &bridge->conf;
behavior = bridge->pci_regs_behavior;
}
@@ -357,7 +358,7 @@ int pci_bridge_emul_conf_read(struct pci_bridge_emul *bridge, int where,
ret = PCI_BRIDGE_EMUL_NOT_HANDLED;
if (ret == PCI_BRIDGE_EMUL_NOT_HANDLED)
- *value = cfgspace[reg / 4];
+ *value = le32_to_cpu(cfgspace[reg / 4]);
/*
* Make sure we never return any reserved bit with a value
@@ -387,7 +388,7 @@ int pci_bridge_emul_conf_write(struct pci_bridge_emul *bridge, int where,
int mask, ret, old, new, shift;
void (*write_op)(struct pci_bridge_emul *bridge, int reg,
u32 old, u32 new, u32 mask);
- u32 *cfgspace;
+ __le32 *cfgspace;
const struct pci_bridge_reg_behavior *behavior;
if (bridge->has_pcie && reg >= PCI_CAP_PCIE_END)
@@ -414,11 +415,11 @@ int pci_bridge_emul_conf_write(struct pci_bridge_emul *bridge, int where,
if (bridge->has_pcie && reg >= PCI_CAP_PCIE_START) {
reg -= PCI_CAP_PCIE_START;
write_op = bridge->ops->write_pcie;
- cfgspace = (u32 *) &bridge->pcie_conf;
+ cfgspace = (__le32 *) &bridge->pcie_conf;
behavior = bridge->pcie_cap_regs_behavior;
} else {
write_op = bridge->ops->write_base;
- cfgspace = (u32 *) &bridge->conf;
+ cfgspace = (__le32 *) &bridge->conf;
behavior = bridge->pci_regs_behavior;
}
@@ -431,7 +432,7 @@ int pci_bridge_emul_conf_write(struct pci_bridge_emul *bridge, int where,
/* Clear the W1C bits */
new &= ~((value << shift) & (behavior[reg / 4].w1c & mask));
- cfgspace[reg / 4] = new;
+ cfgspace[reg / 4] = cpu_to_le32(new);
if (write_op)
write_op(bridge, reg, old, new, mask);
diff --git a/drivers/pci/pci-bridge-emul.h b/drivers/pci/pci-bridge-emul.h
index e65b1b79899d..b31883022a8e 100644
--- a/drivers/pci/pci-bridge-emul.h
+++ b/drivers/pci/pci-bridge-emul.h
@@ -6,65 +6,65 @@
/* PCI configuration space of a PCI-to-PCI bridge. */
struct pci_bridge_emul_conf {
- u16 vendor;
- u16 device;
- u16 command;
- u16 status;
- u32 class_revision;
+ __le16 vendor;
+ __le16 device;
+ __le16 command;
+ __le16 status;
+ __le32 class_revision;
u8 cache_line_size;
u8 latency_timer;
u8 header_type;
u8 bist;
- u32 bar[2];
+ __le32 bar[2];
u8 primary_bus;
u8 secondary_bus;
u8 subordinate_bus;
u8 secondary_latency_timer;
u8 iobase;
u8 iolimit;
- u16 secondary_status;
- u16 membase;
- u16 memlimit;
- u16 pref_mem_base;
- u16 pref_mem_limit;
- u32 prefbaseupper;
- u32 preflimitupper;
- u16 iobaseupper;
- u16 iolimitupper;
+ __le16 secondary_status;
+ __le16 membase;
+ __le16 memlimit;
+ __le16 pref_mem_base;
+ __le16 pref_mem_limit;
+ __le32 prefbaseupper;
+ __le32 preflimitupper;
+ __le16 iobaseupper;
+ __le16 iolimitupper;
u8 capabilities_pointer;
u8 reserve[3];
- u32 romaddr;
+ __le32 romaddr;
u8 intline;
u8 intpin;
- u16 bridgectrl;
+ __le16 bridgectrl;
};
/* PCI configuration space of the PCIe capabilities */
struct pci_bridge_emul_pcie_conf {
u8 cap_id;
u8 next;
- u16 cap;
- u32 devcap;
- u16 devctl;
- u16 devsta;
- u32 lnkcap;
- u16 lnkctl;
- u16 lnksta;
- u32 slotcap;
- u16 slotctl;
- u16 slotsta;
- u16 rootctl;
- u16 rsvd;
- u32 rootsta;
- u32 devcap2;
- u16 devctl2;
- u16 devsta2;
- u32 lnkcap2;
- u16 lnkctl2;
- u16 lnksta2;
- u32 slotcap2;
- u16 slotctl2;
- u16 slotsta2;
+ __le16 cap;
+ __le32 devcap;
+ __le16 devctl;
+ __le16 devsta;
+ __le32 lnkcap;
+ __le16 lnkctl;
+ __le16 lnksta;
+ __le32 slotcap;
+ __le16 slotctl;
+ __le16 slotsta;
+ __le16 rootctl;
+ __le16 rsvd;
+ __le32 rootsta;
+ __le32 devcap2;
+ __le16 devctl2;
+ __le16 devsta2;
+ __le32 lnkcap2;
+ __le16 lnkctl2;
+ __le16 lnksta2;
+ __le32 slotcap2;
+ __le16 slotctl2;
+ __le16 slotsta2;
};
struct pci_bridge_emul;
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index a8124e47bf6e..0454ca0e4e3f 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -315,7 +315,8 @@ static long local_pci_probe(void *_ddi)
* Probe function should return < 0 for failure, 0 for success
* Treat values > 0 as success, but warn.
*/
- dev_warn(dev, "Driver probe function unexpectedly returned %d\n", rc);
+ pci_warn(pci_dev, "Driver probe function unexpectedly returned %d\n",
+ rc);
return 0;
}
@@ -517,6 +518,12 @@ static int pci_restore_standard_config(struct pci_dev *pci_dev)
return 0;
}
+static void pci_pm_default_resume(struct pci_dev *pci_dev)
+{
+ pci_fixup_device(pci_fixup_resume, pci_dev);
+ pci_enable_wake(pci_dev, PCI_D0, false);
+}
+
#endif
#ifdef CONFIG_PM_SLEEP
@@ -524,6 +531,7 @@ static int pci_restore_standard_config(struct pci_dev *pci_dev)
static void pci_pm_default_resume_early(struct pci_dev *pci_dev)
{
pci_power_up(pci_dev);
+ pci_update_current_state(pci_dev, PCI_D0);
pci_restore_state(pci_dev);
pci_pme_restore(pci_dev);
}
@@ -578,9 +586,9 @@ static int pci_legacy_suspend(struct device *dev, pm_message_t state)
if (!pci_dev->state_saved && pci_dev->current_state != PCI_D0
&& pci_dev->current_state != PCI_UNKNOWN) {
- WARN_ONCE(pci_dev->current_state != prev,
- "PCI PM: Device state not saved by %pS\n",
- drv->suspend);
+ pci_WARN_ONCE(pci_dev, pci_dev->current_state != prev,
+ "PCI PM: Device state not saved by %pS\n",
+ drv->suspend);
}
}
@@ -592,46 +600,17 @@ static int pci_legacy_suspend(struct device *dev, pm_message_t state)
static int pci_legacy_suspend_late(struct device *dev, pm_message_t state)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
- struct pci_driver *drv = pci_dev->driver;
-
- if (drv && drv->suspend_late) {
- pci_power_t prev = pci_dev->current_state;
- int error;
-
- error = drv->suspend_late(pci_dev, state);
- suspend_report_result(drv->suspend_late, error);
- if (error)
- return error;
-
- if (!pci_dev->state_saved && pci_dev->current_state != PCI_D0
- && pci_dev->current_state != PCI_UNKNOWN) {
- WARN_ONCE(pci_dev->current_state != prev,
- "PCI PM: Device state not saved by %pS\n",
- drv->suspend_late);
- goto Fixup;
- }
- }
if (!pci_dev->state_saved)
pci_save_state(pci_dev);
pci_pm_set_unknown_state(pci_dev);
-Fixup:
pci_fixup_device(pci_fixup_suspend_late, pci_dev);
return 0;
}
-static int pci_legacy_resume_early(struct device *dev)
-{
- struct pci_dev *pci_dev = to_pci_dev(dev);
- struct pci_driver *drv = pci_dev->driver;
-
- return drv && drv->resume_early ?
- drv->resume_early(pci_dev) : 0;
-}
-
static int pci_legacy_resume(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
@@ -645,12 +624,6 @@ static int pci_legacy_resume(struct device *dev)
/* Auxiliary functions used by the new power management framework */
-static void pci_pm_default_resume(struct pci_dev *pci_dev)
-{
- pci_fixup_device(pci_fixup_resume, pci_dev);
- pci_enable_wake(pci_dev, PCI_D0, false);
-}
-
static void pci_pm_default_suspend(struct pci_dev *pci_dev)
{
/* Disable non-bridge devices without PM support */
@@ -661,16 +634,15 @@ static void pci_pm_default_suspend(struct pci_dev *pci_dev)
static bool pci_has_legacy_pm_support(struct pci_dev *pci_dev)
{
struct pci_driver *drv = pci_dev->driver;
- bool ret = drv && (drv->suspend || drv->suspend_late || drv->resume
- || drv->resume_early);
+ bool ret = drv && (drv->suspend || drv->resume);
/*
* Legacy PM support is used by default, so warn if the new framework is
* supported as well. Drivers are supposed to support either the
* former, or the latter, but not both at the same time.
*/
- WARN(ret && drv->driver.pm, "driver %s device %04x:%04x\n",
- drv->name, pci_dev->vendor, pci_dev->device);
+ pci_WARN(pci_dev, ret && drv->driver.pm, "device %04x:%04x\n",
+ pci_dev->vendor, pci_dev->device);
return ret;
}
@@ -679,11 +651,11 @@ static bool pci_has_legacy_pm_support(struct pci_dev *pci_dev)
static int pci_pm_prepare(struct device *dev)
{
- struct device_driver *drv = dev->driver;
struct pci_dev *pci_dev = to_pci_dev(dev);
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
- if (drv && drv->pm && drv->pm->prepare) {
- int error = drv->pm->prepare(dev);
+ if (pm && pm->prepare) {
+ int error = pm->prepare(dev);
if (error < 0)
return error;
@@ -793,9 +765,9 @@ static int pci_pm_suspend(struct device *dev)
if (!pci_dev->state_saved && pci_dev->current_state != PCI_D0
&& pci_dev->current_state != PCI_UNKNOWN) {
- WARN_ONCE(pci_dev->current_state != prev,
- "PCI PM: State of device not saved by %pS\n",
- pm->suspend);
+ pci_WARN_ONCE(pci_dev, pci_dev->current_state != prev,
+ "PCI PM: State of device not saved by %pS\n",
+ pm->suspend);
}
}
@@ -841,9 +813,9 @@ static int pci_pm_suspend_noirq(struct device *dev)
if (!pci_dev->state_saved && pci_dev->current_state != PCI_D0
&& pci_dev->current_state != PCI_UNKNOWN) {
- WARN_ONCE(pci_dev->current_state != prev,
- "PCI PM: State of device not saved by %pS\n",
- pm->suspend_noirq);
+ pci_WARN_ONCE(pci_dev, pci_dev->current_state != prev,
+ "PCI PM: State of device not saved by %pS\n",
+ pm->suspend_noirq);
goto Fixup;
}
}
@@ -865,7 +837,7 @@ static int pci_pm_suspend_noirq(struct device *dev)
pci_prepare_to_sleep(pci_dev);
}
- dev_dbg(dev, "PCI PM: Suspend power state: %s\n",
+ pci_dbg(pci_dev, "PCI PM: Suspend power state: %s\n",
pci_power_name(pci_dev->current_state));
if (pci_dev->current_state == PCI_D0) {
@@ -880,7 +852,7 @@ static int pci_pm_suspend_noirq(struct device *dev)
}
if (pci_dev->skip_bus_pm && pm_suspend_no_platform()) {
- dev_dbg(dev, "PCI PM: Skipped\n");
+ pci_dbg(pci_dev, "PCI PM: Skipped\n");
goto Fixup;
}
@@ -917,8 +889,9 @@ Fixup:
static int pci_pm_resume_noirq(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
- struct device_driver *drv = dev->driver;
- int error = 0;
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+ pci_power_t prev_state = pci_dev->current_state;
+ bool skip_bus_pm = pci_dev->skip_bus_pm;
if (dev_pm_may_skip_resume(dev))
return 0;
@@ -937,27 +910,28 @@ static int pci_pm_resume_noirq(struct device *dev)
* configuration here and attempting to put them into D0 again is
* pointless, so avoid doing that.
*/
- if (!(pci_dev->skip_bus_pm && pm_suspend_no_platform()))
+ if (!(skip_bus_pm && pm_suspend_no_platform()))
pci_pm_default_resume_early(pci_dev);
pci_fixup_device(pci_fixup_resume_early, pci_dev);
+ pcie_pme_root_status_cleanup(pci_dev);
- if (pci_has_legacy_pm_support(pci_dev))
- return pci_legacy_resume_early(dev);
+ if (!skip_bus_pm && prev_state == PCI_D3cold)
+ pci_bridge_wait_for_secondary_bus(pci_dev);
- pcie_pme_root_status_cleanup(pci_dev);
+ if (pci_has_legacy_pm_support(pci_dev))
+ return 0;
- if (drv && drv->pm && drv->pm->resume_noirq)
- error = drv->pm->resume_noirq(dev);
+ if (pm && pm->resume_noirq)
+ return pm->resume_noirq(dev);
- return error;
+ return 0;
}
static int pci_pm_resume(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
- int error = 0;
/*
* This is necessary for the suspend error path in which resume is
@@ -973,12 +947,12 @@ static int pci_pm_resume(struct device *dev)
if (pm) {
if (pm->resume)
- error = pm->resume(dev);
+ return pm->resume(dev);
} else {
pci_pm_reenable_device(pci_dev);
}
- return error;
+ return 0;
}
#else /* !CONFIG_SUSPEND */
@@ -993,7 +967,6 @@ static int pci_pm_resume(struct device *dev)
#ifdef CONFIG_HIBERNATE_CALLBACKS
-
/*
* pcibios_pm_ops - provide arch-specific hooks when a PCI device is doing
* a hibernate transition
@@ -1039,16 +1012,16 @@ static int pci_pm_freeze(struct device *dev)
static int pci_pm_freeze_noirq(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
- struct device_driver *drv = dev->driver;
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
if (pci_has_legacy_pm_support(pci_dev))
return pci_legacy_suspend_late(dev, PMSG_FREEZE);
- if (drv && drv->pm && drv->pm->freeze_noirq) {
+ if (pm && pm->freeze_noirq) {
int error;
- error = drv->pm->freeze_noirq(dev);
- suspend_report_result(drv->pm->freeze_noirq, error);
+ error = pm->freeze_noirq(dev);
+ suspend_report_result(pm->freeze_noirq, error);
if (error)
return error;
}
@@ -1067,8 +1040,8 @@ static int pci_pm_freeze_noirq(struct device *dev)
static int pci_pm_thaw_noirq(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
- struct device_driver *drv = dev->driver;
- int error = 0;
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+ int error;
if (pcibios_pm_ops.thaw_noirq) {
error = pcibios_pm_ops.thaw_noirq(dev);
@@ -1076,21 +1049,25 @@ static int pci_pm_thaw_noirq(struct device *dev)
return error;
}
- if (pci_has_legacy_pm_support(pci_dev))
- return pci_legacy_resume_early(dev);
-
/*
- * pci_restore_state() requires the device to be in D0 (because of MSI
- * restoration among other things), so force it into D0 in case the
- * driver's "freeze" callbacks put it into a low-power state directly.
+ * The pm->thaw_noirq() callback assumes the device has been
+ * returned to D0 and its config state has been restored.
+ *
+ * In addition, pci_restore_state() restores MSI-X state in MMIO
+ * space, which requires the device to be in D0, so return it to D0
+ * in case the driver's "freeze" callbacks put it into a low-power
+ * state.
*/
pci_set_power_state(pci_dev, PCI_D0);
pci_restore_state(pci_dev);
- if (drv && drv->pm && drv->pm->thaw_noirq)
- error = drv->pm->thaw_noirq(dev);
+ if (pci_has_legacy_pm_support(pci_dev))
+ return 0;
+
+ if (pm && pm->thaw_noirq)
+ return pm->thaw_noirq(dev);
- return error;
+ return 0;
}
static int pci_pm_thaw(struct device *dev)
@@ -1161,24 +1138,24 @@ static int pci_pm_poweroff_late(struct device *dev)
static int pci_pm_poweroff_noirq(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
- struct device_driver *drv = dev->driver;
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
if (dev_pm_smart_suspend_and_suspended(dev))
return 0;
- if (pci_has_legacy_pm_support(to_pci_dev(dev)))
+ if (pci_has_legacy_pm_support(pci_dev))
return pci_legacy_suspend_late(dev, PMSG_HIBERNATE);
- if (!drv || !drv->pm) {
+ if (!pm) {
pci_fixup_device(pci_fixup_suspend_late, pci_dev);
return 0;
}
- if (drv->pm->poweroff_noirq) {
+ if (pm->poweroff_noirq) {
int error;
- error = drv->pm->poweroff_noirq(dev);
- suspend_report_result(drv->pm->poweroff_noirq, error);
+ error = pm->poweroff_noirq(dev);
+ suspend_report_result(pm->poweroff_noirq, error);
if (error)
return error;
}
@@ -1204,8 +1181,8 @@ static int pci_pm_poweroff_noirq(struct device *dev)
static int pci_pm_restore_noirq(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
- struct device_driver *drv = dev->driver;
- int error = 0;
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+ int error;
if (pcibios_pm_ops.restore_noirq) {
error = pcibios_pm_ops.restore_noirq(dev);
@@ -1217,19 +1194,18 @@ static int pci_pm_restore_noirq(struct device *dev)
pci_fixup_device(pci_fixup_resume_early, pci_dev);
if (pci_has_legacy_pm_support(pci_dev))
- return pci_legacy_resume_early(dev);
+ return 0;
- if (drv && drv->pm && drv->pm->restore_noirq)
- error = drv->pm->restore_noirq(dev);
+ if (pm && pm->restore_noirq)
+ return pm->restore_noirq(dev);
- return error;
+ return 0;
}
static int pci_pm_restore(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
- int error = 0;
/*
* This is necessary for the hibernation error path in which restore is
@@ -1245,12 +1221,12 @@ static int pci_pm_restore(struct device *dev)
if (pm) {
if (pm->restore)
- error = pm->restore(dev);
+ return pm->restore(dev);
} else {
pci_pm_reenable_device(pci_dev);
}
- return error;
+ return 0;
}
#else /* !CONFIG_HIBERNATE_CALLBACKS */
@@ -1295,11 +1271,11 @@ static int pci_pm_runtime_suspend(struct device *dev)
* log level.
*/
if (error == -EBUSY || error == -EAGAIN) {
- dev_dbg(dev, "can't suspend now (%ps returned %d)\n",
+ pci_dbg(pci_dev, "can't suspend now (%ps returned %d)\n",
pm->runtime_suspend, error);
return error;
} else if (error) {
- dev_err(dev, "can't suspend (%ps returned %d)\n",
+ pci_err(pci_dev, "can't suspend (%ps returned %d)\n",
pm->runtime_suspend, error);
return error;
}
@@ -1310,9 +1286,9 @@ static int pci_pm_runtime_suspend(struct device *dev)
if (pm && pm->runtime_suspend
&& !pci_dev->state_saved && pci_dev->current_state != PCI_D0
&& pci_dev->current_state != PCI_UNKNOWN) {
- WARN_ONCE(pci_dev->current_state != prev,
- "PCI PM: State of device not saved by %pS\n",
- pm->runtime_suspend);
+ pci_WARN_ONCE(pci_dev, pci_dev->current_state != prev,
+ "PCI PM: State of device not saved by %pS\n",
+ pm->runtime_suspend);
return 0;
}
@@ -1326,9 +1302,10 @@ static int pci_pm_runtime_suspend(struct device *dev)
static int pci_pm_runtime_resume(struct device *dev)
{
- int rc = 0;
struct pci_dev *pci_dev = to_pci_dev(dev);
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+ pci_power_t prev_state = pci_dev->current_state;
+ int error = 0;
/*
* Restoring config space is necessary even if the device is not bound
@@ -1341,22 +1318,23 @@ static int pci_pm_runtime_resume(struct device *dev)
return 0;
pci_fixup_device(pci_fixup_resume_early, pci_dev);
- pci_enable_wake(pci_dev, PCI_D0, false);
- pci_fixup_device(pci_fixup_resume, pci_dev);
+ pci_pm_default_resume(pci_dev);
+
+ if (prev_state == PCI_D3cold)
+ pci_bridge_wait_for_secondary_bus(pci_dev);
if (pm && pm->runtime_resume)
- rc = pm->runtime_resume(dev);
+ error = pm->runtime_resume(dev);
pci_dev->runtime_d3cold = false;
- return rc;
+ return error;
}
static int pci_pm_runtime_idle(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
- int ret = 0;
/*
* If pci_dev->driver is not set (unbound), the device should
@@ -1369,9 +1347,9 @@ static int pci_pm_runtime_idle(struct device *dev)
return -ENOSYS;
if (pm->runtime_idle)
- ret = pm->runtime_idle(dev);
+ return pm->runtime_idle(dev);
- return ret;
+ return 0;
}
static const struct dev_pm_ops pci_dev_pm_ops = {
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 793412954529..13f766db0684 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -1122,7 +1122,7 @@ static void pci_remove_resource_files(struct pci_dev *pdev)
{
int i;
- for (i = 0; i < PCI_ROM_RESOURCE; i++) {
+ for (i = 0; i < PCI_STD_NUM_BARS; i++) {
struct bin_attribute *res_attr;
res_attr = pdev->res_attr[i];
@@ -1193,7 +1193,7 @@ static int pci_create_resource_files(struct pci_dev *pdev)
int retval;
/* Expose the PCI resources from this device as files */
- for (i = 0; i < PCI_ROM_RESOURCE; i++) {
+ for (i = 0; i < PCI_STD_NUM_BARS; i++) {
/* skip empty resources */
if (!pci_resource_len(pdev, i))
@@ -1330,7 +1330,6 @@ static int pci_create_capabilities_sysfs(struct pci_dev *dev)
int retval;
pcie_vpd_create_sysfs_dev_files(dev);
- pcie_aspm_create_sysfs_dev_files(dev);
if (dev->reset_fn) {
retval = device_create_file(&dev->dev, &dev_attr_reset);
@@ -1340,7 +1339,6 @@ static int pci_create_capabilities_sysfs(struct pci_dev *dev)
return 0;
error:
- pcie_aspm_remove_sysfs_dev_files(dev);
pcie_vpd_remove_sysfs_dev_files(dev);
return retval;
}
@@ -1416,7 +1414,6 @@ err:
static void pci_remove_capabilities_sysfs(struct pci_dev *dev)
{
pcie_vpd_remove_sysfs_dev_files(dev);
- pcie_aspm_remove_sysfs_dev_files(dev);
if (dev->reset_fn) {
device_remove_file(&dev->dev, &dev_attr_reset);
dev->reset_fn = 0;
@@ -1539,24 +1536,6 @@ const struct attribute_group *pci_dev_groups[] = {
NULL,
};
-static const struct attribute_group pci_bridge_group = {
- .attrs = pci_bridge_attrs,
-};
-
-const struct attribute_group *pci_bridge_groups[] = {
- &pci_bridge_group,
- NULL,
-};
-
-static const struct attribute_group pcie_dev_group = {
- .attrs = pcie_dev_attrs,
-};
-
-const struct attribute_group *pcie_dev_groups[] = {
- &pcie_dev_group,
- NULL,
-};
-
static const struct attribute_group pci_dev_hp_attr_group = {
.attrs = pci_dev_hp_attrs,
.is_visible = pci_dev_hp_attrs_are_visible,
@@ -1588,6 +1567,9 @@ static const struct attribute_group *pci_dev_attr_groups[] = {
#ifdef CONFIG_PCIEAER
&aer_stats_attr_group,
#endif
+#ifdef CONFIG_PCIEASPM
+ &aspm_ctrl_attr_group,
+#endif
NULL,
};
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index a97e2571a527..e87196cc1a7f 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -13,6 +13,7 @@
#include <linux/delay.h>
#include <linux/dmi.h>
#include <linux/init.h>
+#include <linux/msi.h>
#include <linux/of.h>
#include <linux/of_pci.h>
#include <linux/pci.h>
@@ -85,10 +86,17 @@ unsigned long pci_cardbus_io_size = DEFAULT_CARDBUS_IO_SIZE;
unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
#define DEFAULT_HOTPLUG_IO_SIZE (256)
-#define DEFAULT_HOTPLUG_MEM_SIZE (2*1024*1024)
-/* pci=hpmemsize=nnM,hpiosize=nn can override this */
+#define DEFAULT_HOTPLUG_MMIO_SIZE (2*1024*1024)
+#define DEFAULT_HOTPLUG_MMIO_PREF_SIZE (2*1024*1024)
+/* hpiosize=nn can override this */
unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE;
-unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE;
+/*
+ * pci=hpmmiosize=nnM overrides non-prefetchable MMIO size,
+ * pci=hpmmioprefsize=nnM overrides prefetchable MMIO size;
+ * pci=hpmemsize=nnM overrides both
+ */
+unsigned long pci_hotplug_mmio_size = DEFAULT_HOTPLUG_MMIO_SIZE;
+unsigned long pci_hotplug_mmio_pref_size = DEFAULT_HOTPLUG_MMIO_PREF_SIZE;
#define DEFAULT_HOTPLUG_BUS_SIZE 1
unsigned long pci_hotplug_bus_size = DEFAULT_HOTPLUG_BUS_SIZE;
@@ -674,7 +682,7 @@ struct resource *pci_find_resource(struct pci_dev *dev, struct resource *res)
{
int i;
- for (i = 0; i < PCI_ROM_RESOURCE; i++) {
+ for (i = 0; i < PCI_STD_NUM_BARS; i++) {
struct resource *r = &dev->resource[i];
if (r->start && resource_contains(r, res))
@@ -834,14 +842,16 @@ static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
return -EINVAL;
/*
- * Validate current state:
- * Can enter D0 from any state, but if we can only go deeper
- * to sleep if we're already in a low power state
+ * Validate transition: We can enter D0 from any state, but if
+ * we're already in a low-power state, we can only go deeper. E.g.,
+ * we can go from D1 to D3, but we can't go directly from D3 to D1;
+ * we'd have to go from D3 to D0, then to D1.
*/
if (state != PCI_D0 && dev->current_state <= PCI_D3cold
&& dev->current_state > state) {
- pci_err(dev, "invalid power transition (from state %d to %d)\n",
- dev->current_state, state);
+ pci_err(dev, "invalid power transition (from %s to %s)\n",
+ pci_power_name(dev->current_state),
+ pci_power_name(state));
return -EINVAL;
}
@@ -851,6 +861,12 @@ static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
return -EIO;
pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
+ if (pmcsr == (u16) ~0) {
+ pci_err(dev, "can't change power state from %s to %s (config space inaccessible)\n",
+ pci_power_name(dev->current_state),
+ pci_power_name(state));
+ return -EIO;
+ }
/*
* If we're (effectively) in D3, force entire word to 0.
@@ -886,13 +902,14 @@ static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
if (state == PCI_D3hot || dev->current_state == PCI_D3hot)
pci_dev_d3_sleep(dev);
else if (state == PCI_D2 || dev->current_state == PCI_D2)
- udelay(PCI_PM_D2_DELAY);
+ msleep(PCI_PM_D2_DELAY);
pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
if (dev->current_state != state)
- pci_info_ratelimited(dev, "Refused to change power state, currently in D%d\n",
- dev->current_state);
+ pci_info_ratelimited(dev, "refused to change power state from %s to %s\n",
+ pci_power_name(dev->current_state),
+ pci_power_name(state));
/*
* According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT
@@ -963,7 +980,7 @@ void pci_refresh_power_state(struct pci_dev *dev)
* @dev: PCI device to handle.
* @state: State to put the device into.
*/
-static int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
+int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
{
int error;
@@ -979,6 +996,7 @@ static int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
return error;
}
+EXPORT_SYMBOL_GPL(pci_platform_power_transition);
/**
* pci_wakeup - Wake up a PCI device
@@ -1002,34 +1020,70 @@ void pci_wakeup_bus(struct pci_bus *bus)
pci_walk_bus(bus, pci_wakeup, NULL);
}
+static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout)
+{
+ int delay = 1;
+ u32 id;
+
+ /*
+ * After reset, the device should not silently discard config
+ * requests, but it may still indicate that it needs more time by
+ * responding to them with CRS completions. The Root Port will
+ * generally synthesize ~0 data to complete the read (except when
+ * CRS SV is enabled and the read was for the Vendor ID; in that
+ * case it synthesizes 0x0001 data).
+ *
+ * Wait for the device to return a non-CRS completion. Read the
+ * Command register instead of Vendor ID so we don't have to
+ * contend with the CRS SV value.
+ */
+ pci_read_config_dword(dev, PCI_COMMAND, &id);
+ while (id == ~0) {
+ if (delay > timeout) {
+ pci_warn(dev, "not ready %dms after %s; giving up\n",
+ delay - 1, reset_type);
+ return -ENOTTY;
+ }
+
+ if (delay > 1000)
+ pci_info(dev, "not ready %dms after %s; waiting\n",
+ delay - 1, reset_type);
+
+ msleep(delay);
+ delay *= 2;
+ pci_read_config_dword(dev, PCI_COMMAND, &id);
+ }
+
+ if (delay > 1000)
+ pci_info(dev, "ready %dms after %s\n", delay - 1,
+ reset_type);
+
+ return 0;
+}
+
/**
- * __pci_start_power_transition - Start power transition of a PCI device
- * @dev: PCI device to handle.
- * @state: State to put the device into.
+ * pci_power_up - Put the given device into D0
+ * @dev: PCI device to power up
*/
-static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state)
+int pci_power_up(struct pci_dev *dev)
{
- if (state == PCI_D0) {
- pci_platform_power_transition(dev, PCI_D0);
+ pci_platform_power_transition(dev, PCI_D0);
+
+ /*
+ * Mandatory power management transition delays are handled in
+ * pci_pm_resume_noirq() and pci_pm_runtime_resume() of the
+ * corresponding bridge.
+ */
+ if (dev->runtime_d3cold) {
/*
- * Mandatory power management transition delays, see
- * PCI Express Base Specification Revision 2.0 Section
- * 6.6.1: Conventional Reset. Do not delay for
- * devices powered on/off by corresponding bridge,
- * because have already delayed for the bridge.
+ * When powering on a bridge from D3cold, the whole hierarchy
+ * may be powered on into D0uninitialized state, resume them to
+ * give them a chance to suspend again
*/
- if (dev->runtime_d3cold) {
- if (dev->d3cold_delay && !dev->imm_ready)
- msleep(dev->d3cold_delay);
- /*
- * When powering on a bridge from D3cold, the
- * whole hierarchy may be powered on into
- * D0uninitialized state, resume them to give
- * them a chance to suspend again
- */
- pci_wakeup_bus(dev->subordinate);
- }
+ pci_wakeup_bus(dev->subordinate);
}
+
+ return pci_raw_set_power_state(dev, PCI_D0);
}
/**
@@ -1057,27 +1111,6 @@ void pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state)
}
/**
- * __pci_complete_power_transition - Complete power transition of a PCI device
- * @dev: PCI device to handle.
- * @state: State to put the device into.
- *
- * This function should not be called directly by device drivers.
- */
-int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state)
-{
- int ret;
-
- if (state <= PCI_D0)
- return -EINVAL;
- ret = pci_platform_power_transition(dev, state);
- /* Power off the bridge may power off the whole hierarchy */
- if (!ret && state == PCI_D3cold)
- pci_bus_set_current_state(dev->subordinate, PCI_D3cold);
- return ret;
-}
-EXPORT_SYMBOL_GPL(__pci_complete_power_transition);
-
-/**
* pci_set_power_state - Set the power state of a PCI device
* @dev: PCI device to handle.
* @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
@@ -1117,7 +1150,8 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
if (dev->current_state == state)
return 0;
- __pci_start_power_transition(dev, state);
+ if (state == PCI_D0)
+ return pci_power_up(dev);
/*
* This device is quirked not to be put into D3, so don't put it in
@@ -1133,23 +1167,16 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
error = pci_raw_set_power_state(dev, state > PCI_D3hot ?
PCI_D3hot : state);
- if (!__pci_complete_power_transition(dev, state))
- error = 0;
+ if (pci_platform_power_transition(dev, state))
+ return error;
- return error;
-}
-EXPORT_SYMBOL(pci_set_power_state);
+ /* Powering off a bridge may power off the whole hierarchy */
+ if (state == PCI_D3cold)
+ pci_bus_set_current_state(dev->subordinate, PCI_D3cold);
-/**
- * pci_power_up - Put the given device into D0 forcibly
- * @dev: PCI device to power up
- */
-void pci_power_up(struct pci_dev *dev)
-{
- __pci_start_power_transition(dev, PCI_D0);
- pci_raw_set_power_state(dev, PCI_D0);
- pci_update_current_state(dev, PCI_D0);
+ return 0;
}
+EXPORT_SYMBOL(pci_set_power_state);
/**
* pci_choose_state - Choose the power state of a PCI device
@@ -1359,6 +1386,7 @@ int pci_save_state(struct pci_dev *dev)
pci_save_ltr_state(dev);
pci_save_dpc_state(dev);
+ pci_save_aer_state(dev);
return pci_save_vc_state(dev);
}
EXPORT_SYMBOL(pci_save_state);
@@ -1472,6 +1500,7 @@ void pci_restore_state(struct pci_dev *dev)
pci_restore_dpc_state(dev);
pci_cleanup_aer_error_status_regs(dev);
+ pci_restore_aer_state(dev);
pci_restore_config_space(dev);
@@ -3766,7 +3795,7 @@ void pci_release_selected_regions(struct pci_dev *pdev, int bars)
{
int i;
- for (i = 0; i < 6; i++)
+ for (i = 0; i < PCI_STD_NUM_BARS; i++)
if (bars & (1 << i))
pci_release_region(pdev, i);
}
@@ -3777,7 +3806,7 @@ static int __pci_request_selected_regions(struct pci_dev *pdev, int bars,
{
int i;
- for (i = 0; i < 6; i++)
+ for (i = 0; i < PCI_STD_NUM_BARS; i++)
if (bars & (1 << i))
if (__pci_request_region(pdev, i, res_name, excl))
goto err_out;
@@ -3825,7 +3854,7 @@ EXPORT_SYMBOL(pci_request_selected_regions_exclusive);
void pci_release_regions(struct pci_dev *pdev)
{
- pci_release_selected_regions(pdev, (1 << 6) - 1);
+ pci_release_selected_regions(pdev, (1 << PCI_STD_NUM_BARS) - 1);
}
EXPORT_SYMBOL(pci_release_regions);
@@ -3844,7 +3873,8 @@ EXPORT_SYMBOL(pci_release_regions);
*/
int pci_request_regions(struct pci_dev *pdev, const char *res_name)
{
- return pci_request_selected_regions(pdev, ((1 << 6) - 1), res_name);
+ return pci_request_selected_regions(pdev,
+ ((1 << PCI_STD_NUM_BARS) - 1), res_name);
}
EXPORT_SYMBOL(pci_request_regions);
@@ -3866,7 +3896,7 @@ EXPORT_SYMBOL(pci_request_regions);
int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name)
{
return pci_request_selected_regions_exclusive(pdev,
- ((1 << 6) - 1), res_name);
+ ((1 << PCI_STD_NUM_BARS) - 1), res_name);
}
EXPORT_SYMBOL(pci_request_regions_exclusive);
@@ -4428,47 +4458,6 @@ int pci_wait_for_pending_transaction(struct pci_dev *dev)
}
EXPORT_SYMBOL(pci_wait_for_pending_transaction);
-static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout)
-{
- int delay = 1;
- u32 id;
-
- /*
- * After reset, the device should not silently discard config
- * requests, but it may still indicate that it needs more time by
- * responding to them with CRS completions. The Root Port will
- * generally synthesize ~0 data to complete the read (except when
- * CRS SV is enabled and the read was for the Vendor ID; in that
- * case it synthesizes 0x0001 data).
- *
- * Wait for the device to return a non-CRS completion. Read the
- * Command register instead of Vendor ID so we don't have to
- * contend with the CRS SV value.
- */
- pci_read_config_dword(dev, PCI_COMMAND, &id);
- while (id == ~0) {
- if (delay > timeout) {
- pci_warn(dev, "not ready %dms after %s; giving up\n",
- delay - 1, reset_type);
- return -ENOTTY;
- }
-
- if (delay > 1000)
- pci_info(dev, "not ready %dms after %s; waiting\n",
- delay - 1, reset_type);
-
- msleep(delay);
- delay *= 2;
- pci_read_config_dword(dev, PCI_COMMAND, &id);
- }
-
- if (delay > 1000)
- pci_info(dev, "ready %dms after %s\n", delay - 1,
- reset_type);
-
- return 0;
-}
-
/**
* pcie_has_flr - check if a device supports function level resets
* @dev: device to check
@@ -4603,16 +4592,19 @@ static int pci_pm_reset(struct pci_dev *dev, int probe)
pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
pci_dev_d3_sleep(dev);
- return pci_dev_wait(dev, "PM D3->D0", PCIE_RESET_READY_POLL_MS);
+ return pci_dev_wait(dev, "PM D3hot->D0", PCIE_RESET_READY_POLL_MS);
}
+
/**
- * pcie_wait_for_link - Wait until link is active or inactive
+ * pcie_wait_for_link_delay - Wait until link is active or inactive
* @pdev: Bridge device
* @active: waiting for active or inactive?
+ * @delay: Delay to wait after link has become active (in ms)
*
* Use this to wait till link becomes active or inactive.
*/
-bool pcie_wait_for_link(struct pci_dev *pdev, bool active)
+static bool pcie_wait_for_link_delay(struct pci_dev *pdev, bool active,
+ int delay)
{
int timeout = 1000;
bool ret;
@@ -4649,13 +4641,144 @@ bool pcie_wait_for_link(struct pci_dev *pdev, bool active)
timeout -= 10;
}
if (active && ret)
- msleep(100);
+ msleep(delay);
else if (ret != active)
pci_info(pdev, "Data Link Layer Link Active not %s in 1000 msec\n",
active ? "set" : "cleared");
return ret == active;
}
+/**
+ * pcie_wait_for_link - Wait until link is active or inactive
+ * @pdev: Bridge device
+ * @active: waiting for active or inactive?
+ *
+ * Use this to wait till link becomes active or inactive.
+ */
+bool pcie_wait_for_link(struct pci_dev *pdev, bool active)
+{
+ return pcie_wait_for_link_delay(pdev, active, 100);
+}
+
+/*
+ * Find maximum D3cold delay required by all the devices on the bus. The
+ * spec says 100 ms, but firmware can lower it and we allow drivers to
+ * increase it as well.
+ *
+ * Called with @pci_bus_sem locked for reading.
+ */
+static int pci_bus_max_d3cold_delay(const struct pci_bus *bus)
+{
+ const struct pci_dev *pdev;
+ int min_delay = 100;
+ int max_delay = 0;
+
+ list_for_each_entry(pdev, &bus->devices, bus_list) {
+ if (pdev->d3cold_delay < min_delay)
+ min_delay = pdev->d3cold_delay;
+ if (pdev->d3cold_delay > max_delay)
+ max_delay = pdev->d3cold_delay;
+ }
+
+ return max(min_delay, max_delay);
+}
+
+/**
+ * pci_bridge_wait_for_secondary_bus - Wait for secondary bus to be accessible
+ * @dev: PCI bridge
+ *
+ * Handle necessary delays before access to the devices on the secondary
+ * side of the bridge are permitted after D3cold to D0 transition.
+ *
+ * For PCIe this means the delays in PCIe 5.0 section 6.6.1. For
+ * conventional PCI it means Tpvrh + Trhfa specified in PCI 3.0 section
+ * 4.3.2.
+ */
+void pci_bridge_wait_for_secondary_bus(struct pci_dev *dev)
+{
+ struct pci_dev *child;
+ int delay;
+
+ if (pci_dev_is_disconnected(dev))
+ return;
+
+ if (!pci_is_bridge(dev) || !dev->bridge_d3)
+ return;
+
+ down_read(&pci_bus_sem);
+
+ /*
+ * We only deal with devices that are present currently on the bus.
+ * For any hot-added devices the access delay is handled in pciehp
+ * board_added(). In case of ACPI hotplug the firmware is expected
+ * to configure the devices before OS is notified.
+ */
+ if (!dev->subordinate || list_empty(&dev->subordinate->devices)) {
+ up_read(&pci_bus_sem);
+ return;
+ }
+
+ /* Take d3cold_delay requirements into account */
+ delay = pci_bus_max_d3cold_delay(dev->subordinate);
+ if (!delay) {
+ up_read(&pci_bus_sem);
+ return;
+ }
+
+ child = list_first_entry(&dev->subordinate->devices, struct pci_dev,
+ bus_list);
+ up_read(&pci_bus_sem);
+
+ /*
+ * Conventional PCI and PCI-X we need to wait Tpvrh + Trhfa before
+ * accessing the device after reset (that is 1000 ms + 100 ms). In
+ * practice this should not be needed because we don't do power
+ * management for them (see pci_bridge_d3_possible()).
+ */
+ if (!pci_is_pcie(dev)) {
+ pci_dbg(dev, "waiting %d ms for secondary bus\n", 1000 + delay);
+ msleep(1000 + delay);
+ return;
+ }
+
+ /*
+ * For PCIe downstream and root ports that do not support speeds
+ * greater than 5 GT/s need to wait minimum 100 ms. For higher
+ * speeds (gen3) we need to wait first for the data link layer to
+ * become active.
+ *
+ * However, 100 ms is the minimum and the PCIe spec says the
+ * software must allow at least 1s before it can determine that the
+ * device that did not respond is a broken device. There is
+ * evidence that 100 ms is not always enough, for example certain
+ * Titan Ridge xHCI controller does not always respond to
+ * configuration requests if we only wait for 100 ms (see
+ * https://bugzilla.kernel.org/show_bug.cgi?id=203885).
+ *
+ * Therefore we wait for 100 ms and check for the device presence.
+ * If it is still not present give it an additional 100 ms.
+ */
+ if (!pcie_downstream_port(dev))
+ return;
+
+ if (pcie_get_speed_cap(dev) <= PCIE_SPEED_5_0GT) {
+ pci_dbg(dev, "waiting %d ms for downstream link\n", delay);
+ msleep(delay);
+ } else {
+ pci_dbg(dev, "waiting %d ms for downstream link, after activation\n",
+ delay);
+ if (!pcie_wait_for_link_delay(dev, true, delay)) {
+ /* Did not train, no need to wait any further */
+ return;
+ }
+ }
+
+ if (!pci_device_is_present(child)) {
+ pci_dbg(child, "waiting additional %d ms to become accessible\n", delay);
+ msleep(delay);
+ }
+}
+
void pci_reset_secondary_bus(struct pci_dev *dev)
{
u16 ctrl;
@@ -5854,6 +5977,24 @@ int pci_set_vga_state(struct pci_dev *dev, bool decode,
return 0;
}
+#ifdef CONFIG_ACPI
+bool pci_pr3_present(struct pci_dev *pdev)
+{
+ struct acpi_device *adev;
+
+ if (acpi_disabled)
+ return false;
+
+ adev = ACPI_COMPANION(&pdev->dev);
+ if (!adev)
+ return false;
+
+ return adev->power.flags.power_resources &&
+ acpi_has_method(adev->handle, "_PR3");
+}
+EXPORT_SYMBOL_GPL(pci_pr3_present);
+#endif
+
/**
* pci_add_dma_alias - Add a DMA devfn alias for a device
* @dev: the PCI device for which alias is added
@@ -6286,8 +6427,13 @@ static int __init pci_setup(char *str)
pcie_ecrc_get_policy(str + 5);
} else if (!strncmp(str, "hpiosize=", 9)) {
pci_hotplug_io_size = memparse(str + 9, &str);
+ } else if (!strncmp(str, "hpmmiosize=", 11)) {
+ pci_hotplug_mmio_size = memparse(str + 11, &str);
+ } else if (!strncmp(str, "hpmmioprefsize=", 15)) {
+ pci_hotplug_mmio_pref_size = memparse(str + 15, &str);
} else if (!strncmp(str, "hpmemsize=", 10)) {
- pci_hotplug_mem_size = memparse(str + 10, &str);
+ pci_hotplug_mmio_size = memparse(str + 10, &str);
+ pci_hotplug_mmio_pref_size = pci_hotplug_mmio_size;
} else if (!strncmp(str, "hpbussize=", 10)) {
pci_hotplug_bus_size =
simple_strtoul(str + 10, &str, 0);
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 3f6947ee3324..a0a53bd05a0b 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -12,6 +12,7 @@ extern const unsigned char pcie_link_speed[];
extern bool pci_early_dump;
bool pcie_cap_has_lnkctl(const struct pci_dev *dev);
+bool pcie_cap_has_rtctl(const struct pci_dev *dev);
/* Functions internal to the PCI core code */
@@ -85,7 +86,7 @@ struct pci_platform_pm_ops {
int pci_set_platform_pm(const struct pci_platform_pm_ops *ops);
void pci_update_current_state(struct pci_dev *dev, pci_power_t state);
void pci_refresh_power_state(struct pci_dev *dev);
-void pci_power_up(struct pci_dev *dev);
+int pci_power_up(struct pci_dev *dev);
void pci_disable_enabled_device(struct pci_dev *dev);
int pci_finish_runtime_suspend(struct pci_dev *dev);
void pcie_clear_root_pme_status(struct pci_dev *dev);
@@ -104,6 +105,7 @@ void pci_allocate_cap_save_buffers(struct pci_dev *dev);
void pci_free_cap_save_buffers(struct pci_dev *dev);
bool pci_bridge_d3_possible(struct pci_dev *dev);
void pci_bridge_d3_update(struct pci_dev *dev);
+void pci_bridge_wait_for_secondary_bus(struct pci_dev *dev);
static inline void pci_wakeup_event(struct pci_dev *dev)
{
@@ -218,7 +220,8 @@ extern const struct device_type pci_dev_type;
extern const struct attribute_group *pci_bus_groups[];
extern unsigned long pci_hotplug_io_size;
-extern unsigned long pci_hotplug_mem_size;
+extern unsigned long pci_hotplug_mmio_size;
+extern unsigned long pci_hotplug_mmio_pref_size;
extern unsigned long pci_hotplug_bus_size;
/**
@@ -456,6 +459,22 @@ static inline void pci_ats_init(struct pci_dev *d) { }
static inline void pci_restore_ats_state(struct pci_dev *dev) { }
#endif /* CONFIG_PCI_ATS */
+#ifdef CONFIG_PCI_PRI
+void pci_pri_init(struct pci_dev *dev);
+void pci_restore_pri_state(struct pci_dev *pdev);
+#else
+static inline void pci_pri_init(struct pci_dev *dev) { }
+static inline void pci_restore_pri_state(struct pci_dev *pdev) { }
+#endif
+
+#ifdef CONFIG_PCI_PASID
+void pci_pasid_init(struct pci_dev *dev);
+void pci_restore_pasid_state(struct pci_dev *pdev);
+#else
+static inline void pci_pasid_init(struct pci_dev *dev) { }
+static inline void pci_restore_pasid_state(struct pci_dev *pdev) { }
+#endif
+
#ifdef CONFIG_PCI_IOV
int pci_iov_init(struct pci_dev *dev);
void pci_iov_release(struct pci_dev *dev);
@@ -541,14 +560,6 @@ static inline void pcie_aspm_pm_state_change(struct pci_dev *pdev) { }
static inline void pcie_aspm_powersave_config_link(struct pci_dev *pdev) { }
#endif
-#ifdef CONFIG_PCIEASPM_DEBUG
-void pcie_aspm_create_sysfs_dev_files(struct pci_dev *pdev);
-void pcie_aspm_remove_sysfs_dev_files(struct pci_dev *pdev);
-#else
-static inline void pcie_aspm_create_sysfs_dev_files(struct pci_dev *pdev) { }
-static inline void pcie_aspm_remove_sysfs_dev_files(struct pci_dev *pdev) { }
-#endif
-
#ifdef CONFIG_PCIE_ECRC
void pcie_set_ecrc_checking(struct pci_dev *dev);
void pcie_ecrc_get_policy(char *str);
@@ -630,19 +641,6 @@ static inline void pci_set_bus_of_node(struct pci_bus *bus) { }
static inline void pci_release_bus_of_node(struct pci_bus *bus) { }
#endif /* CONFIG_OF */
-#if defined(CONFIG_OF_ADDRESS)
-int devm_of_pci_get_host_bridge_resources(struct device *dev,
- unsigned char busno, unsigned char bus_max,
- struct list_head *resources, resource_size_t *io_base);
-#else
-static inline int devm_of_pci_get_host_bridge_resources(struct device *dev,
- unsigned char busno, unsigned char bus_max,
- struct list_head *resources, resource_size_t *io_base)
-{
- return -EINVAL;
-}
-#endif
-
#ifdef CONFIG_PCIEAER
void pci_no_aer(void);
void pci_aer_init(struct pci_dev *dev);
@@ -667,4 +665,8 @@ static inline int pci_acpi_program_hp_params(struct pci_dev *dev)
}
#endif
+#ifdef CONFIG_PCIEASPM
+extern const struct attribute_group aspm_ctrl_attr_group;
+#endif
+
#endif /* DRIVERS_PCI_H */
diff --git a/drivers/pci/pcie/Kconfig b/drivers/pci/pcie/Kconfig
index 362eb8cfa53b..6e3c04b46fb1 100644
--- a/drivers/pci/pcie/Kconfig
+++ b/drivers/pci/pcie/Kconfig
@@ -4,7 +4,6 @@
#
config PCIEPORTBUS
bool "PCI Express Port Bus support"
- depends on PCI
help
This enables PCI Express Port Bus support. Users can then enable
support for Native Hot-Plug, Advanced Error Reporting, Power
@@ -63,7 +62,6 @@ config PCIE_ECRC
#
config PCIEASPM
bool "PCI Express ASPM control" if EXPERT
- depends on PCI && PCIEPORTBUS
default y
help
This enables OS control over PCI Express ASPM (Active State
@@ -79,13 +77,6 @@ config PCIEASPM
When in doubt, say Y.
-config PCIEASPM_DEBUG
- bool "Debug PCI Express ASPM"
- depends on PCIEASPM
- help
- This enables PCI Express ASPM debug support. It will add per-device
- interface to control ASPM.
-
choice
prompt "Default ASPM policy"
default PCIEASPM_DEFAULT
@@ -135,7 +126,6 @@ config PCIE_DPC
config PCIE_PTM
bool "PCI Express Precision Time Measurement support"
- depends on PCIEPORTBUS
help
This enables PCI Express Precision Time Measurement (PTM)
support.
diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c
index b45bc47d04fe..1ca86f2e0166 100644
--- a/drivers/pci/pcie/aer.c
+++ b/drivers/pci/pcie/aer.c
@@ -15,6 +15,7 @@
#define pr_fmt(fmt) "AER: " fmt
#define dev_fmt pr_fmt
+#include <linux/bitops.h>
#include <linux/cper.h>
#include <linux/pci.h>
#include <linux/pci-acpi.h>
@@ -36,7 +37,7 @@
#define AER_ERROR_SOURCES_MAX 128
#define AER_MAX_TYPEOF_COR_ERRS 16 /* as per PCI_ERR_COR_STATUS */
-#define AER_MAX_TYPEOF_UNCOR_ERRS 26 /* as per PCI_ERR_UNCOR_STATUS*/
+#define AER_MAX_TYPEOF_UNCOR_ERRS 27 /* as per PCI_ERR_UNCOR_STATUS*/
struct aer_err_source {
unsigned int status;
@@ -201,6 +202,7 @@ void pcie_set_ecrc_checking(struct pci_dev *dev)
/**
* pcie_ecrc_get_policy - parse kernel command-line ecrc option
+ * @str: ECRC policy from kernel command line to use
*/
void pcie_ecrc_get_policy(char *str)
{
@@ -448,12 +450,70 @@ int pci_cleanup_aer_error_status_regs(struct pci_dev *dev)
return 0;
}
+void pci_save_aer_state(struct pci_dev *dev)
+{
+ struct pci_cap_saved_state *save_state;
+ u32 *cap;
+ int pos;
+
+ pos = dev->aer_cap;
+ if (!pos)
+ return;
+
+ save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_ERR);
+ if (!save_state)
+ return;
+
+ cap = &save_state->cap.data[0];
+ pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, cap++);
+ pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, cap++);
+ pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, cap++);
+ pci_read_config_dword(dev, pos + PCI_ERR_CAP, cap++);
+ if (pcie_cap_has_rtctl(dev))
+ pci_read_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, cap++);
+}
+
+void pci_restore_aer_state(struct pci_dev *dev)
+{
+ struct pci_cap_saved_state *save_state;
+ u32 *cap;
+ int pos;
+
+ pos = dev->aer_cap;
+ if (!pos)
+ return;
+
+ save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_ERR);
+ if (!save_state)
+ return;
+
+ cap = &save_state->cap.data[0];
+ pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, *cap++);
+ pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, *cap++);
+ pci_write_config_dword(dev, pos + PCI_ERR_COR_MASK, *cap++);
+ pci_write_config_dword(dev, pos + PCI_ERR_CAP, *cap++);
+ if (pcie_cap_has_rtctl(dev))
+ pci_write_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, *cap++);
+}
+
void pci_aer_init(struct pci_dev *dev)
{
+ int n;
+
dev->aer_cap = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
+ if (!dev->aer_cap)
+ return;
- if (dev->aer_cap)
- dev->aer_stats = kzalloc(sizeof(struct aer_stats), GFP_KERNEL);
+ dev->aer_stats = kzalloc(sizeof(struct aer_stats), GFP_KERNEL);
+
+ /*
+ * We save/restore PCI_ERR_UNCOR_MASK, PCI_ERR_UNCOR_SEVER,
+ * PCI_ERR_COR_MASK, and PCI_ERR_CAP. Root and Root Complex Event
+ * Collectors also implement PCI_ERR_ROOT_COMMAND (PCIe r5.0, sec
+ * 7.8.4).
+ */
+ n = pcie_cap_has_rtctl(dev) ? 5 : 4;
+ pci_add_ext_cap_save_buffer(dev, PCI_EXT_CAP_ID_ERR, sizeof(u32) * n);
pci_cleanup_aer_error_status_regs(dev);
}
@@ -560,6 +620,7 @@ static const char *aer_uncorrectable_error_string[AER_MAX_TYPEOF_UNCOR_ERRS] = {
"BlockedTLP", /* Bit Position 23 */
"AtomicOpBlocked", /* Bit Position 24 */
"TLPBlockedErr", /* Bit Position 25 */
+ "PoisonTLPBlocked", /* Bit Position 26 */
};
static const char *aer_agent_string[] = {
@@ -657,7 +718,8 @@ const struct attribute_group aer_stats_attr_group = {
static void pci_dev_aer_stats_incr(struct pci_dev *pdev,
struct aer_err_info *info)
{
- int status, i, max = -1;
+ unsigned long status = info->status & ~info->mask;
+ int i, max = -1;
u64 *counter = NULL;
struct aer_stats *aer_stats = pdev->aer_stats;
@@ -682,10 +744,8 @@ static void pci_dev_aer_stats_incr(struct pci_dev *pdev,
break;
}
- status = (info->status & ~info->mask);
- for (i = 0; i < max; i++)
- if (status & (1 << i))
- counter[i]++;
+ for_each_set_bit(i, &status, max)
+ counter[i]++;
}
static void pci_rootport_aer_stats_incr(struct pci_dev *pdev,
@@ -717,14 +777,11 @@ static void __print_tlp_header(struct pci_dev *dev,
static void __aer_print_error(struct pci_dev *dev,
struct aer_err_info *info)
{
- int i, status;
+ unsigned long status = info->status & ~info->mask;
const char *errmsg = NULL;
- status = (info->status & ~info->mask);
-
- for (i = 0; i < 32; i++) {
- if (!(status & (1 << i)))
- continue;
+ int i;
+ for_each_set_bit(i, &status, 32) {
if (info->severity == AER_CORRECTABLE)
errmsg = i < ARRAY_SIZE(aer_correctable_error_string) ?
aer_correctable_error_string[i] : NULL;
@@ -1204,7 +1261,8 @@ static void aer_isr_one_error(struct aer_rpc *rpc,
/**
* aer_isr - consume errors detected by root port
- * @work: definition of this work item
+ * @irq: IRQ assigned to Root Port
+ * @context: pointer to Root Port data structure
*
* Invoked, as DPC, when root port records new detected error
*/
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index 652ef23bba35..0dcd44308228 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -64,6 +64,7 @@ struct pcie_link_state {
u32 clkpm_capable:1; /* Clock PM capable? */
u32 clkpm_enabled:1; /* Current Clock PM state */
u32 clkpm_default:1; /* Default Clock PM state by BIOS */
+ u32 clkpm_disable:1; /* Clock PM disabled */
/* Exit latencies */
struct aspm_latency latency_up; /* Upstream direction exit latency */
@@ -161,8 +162,11 @@ static void pcie_set_clkpm_nocheck(struct pcie_link_state *link, int enable)
static void pcie_set_clkpm(struct pcie_link_state *link, int enable)
{
- /* Don't enable Clock PM if the link is not Clock PM capable */
- if (!link->clkpm_capable)
+ /*
+ * Don't enable Clock PM if the link is not Clock PM capable
+ * or Clock PM is disabled
+ */
+ if (!link->clkpm_capable || link->clkpm_disable)
enable = 0;
/* Need nothing if the specified equals to current state */
if (link->clkpm_enabled == enable)
@@ -192,7 +196,8 @@ static void pcie_clkpm_cap_init(struct pcie_link_state *link, int blacklist)
}
link->clkpm_enabled = enabled;
link->clkpm_default = enabled;
- link->clkpm_capable = (blacklist) ? 0 : capable;
+ link->clkpm_capable = capable;
+ link->clkpm_disable = blacklist ? 1 : 0;
}
static bool pcie_retrain_link(struct pcie_link_state *link)
@@ -894,6 +899,14 @@ static struct pcie_link_state *alloc_pcie_link_state(struct pci_dev *pdev)
return link;
}
+static void pcie_aspm_update_sysfs_visibility(struct pci_dev *pdev)
+{
+ struct pci_dev *child;
+
+ list_for_each_entry(child, &pdev->subordinate->devices, bus_list)
+ sysfs_update_group(&child->dev.kobj, &aspm_ctrl_attr_group);
+}
+
/*
* pcie_aspm_init_link_state: Initiate PCI express link state.
* It is called after the pcie and its children devices are scanned.
@@ -955,6 +968,8 @@ void pcie_aspm_init_link_state(struct pci_dev *pdev)
pcie_set_clkpm(link, policy_to_clkpm_state(link));
}
+ pcie_aspm_update_sysfs_visibility(pdev);
+
unlock:
mutex_unlock(&aspm_lock);
out:
@@ -1061,19 +1076,26 @@ void pcie_aspm_powersave_config_link(struct pci_dev *pdev)
up_read(&pci_bus_sem);
}
-static int __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem)
+static struct pcie_link_state *pcie_aspm_get_link(struct pci_dev *pdev)
{
- struct pci_dev *parent = pdev->bus->self;
- struct pcie_link_state *link;
+ struct pci_dev *bridge;
if (!pci_is_pcie(pdev))
- return 0;
+ return NULL;
- if (pcie_downstream_port(pdev))
- parent = pdev;
- if (!parent || !parent->link_state)
- return -EINVAL;
+ bridge = pci_upstream_bridge(pdev);
+ if (!bridge || !pci_is_pcie(bridge))
+ return NULL;
+ return bridge->link_state;
+}
+
+static int __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem)
+{
+ struct pcie_link_state *link = pcie_aspm_get_link(pdev);
+
+ if (!link)
+ return -EINVAL;
/*
* A driver requested that ASPM be disabled on this device, but
* if we don't have permission to manage ASPM (e.g., on ACPI
@@ -1090,17 +1112,24 @@ static int __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem)
if (sem)
down_read(&pci_bus_sem);
mutex_lock(&aspm_lock);
- link = parent->link_state;
if (state & PCIE_LINK_STATE_L0S)
link->aspm_disable |= ASPM_STATE_L0S;
if (state & PCIE_LINK_STATE_L1)
- link->aspm_disable |= ASPM_STATE_L1;
+ /* L1 PM substates require L1 */
+ link->aspm_disable |= ASPM_STATE_L1 | ASPM_STATE_L1SS;
+ if (state & PCIE_LINK_STATE_L1_1)
+ link->aspm_disable |= ASPM_STATE_L1_1;
+ if (state & PCIE_LINK_STATE_L1_2)
+ link->aspm_disable |= ASPM_STATE_L1_2;
+ if (state & PCIE_LINK_STATE_L1_1_PCIPM)
+ link->aspm_disable |= ASPM_STATE_L1_1_PCIPM;
+ if (state & PCIE_LINK_STATE_L1_2_PCIPM)
+ link->aspm_disable |= ASPM_STATE_L1_2_PCIPM;
pcie_config_aspm_link(link, policy_to_aspm_state(link));
- if (state & PCIE_LINK_STATE_CLKPM) {
- link->clkpm_capable = 0;
- pcie_set_clkpm(link, 0);
- }
+ if (state & PCIE_LINK_STATE_CLKPM)
+ link->clkpm_disable = 1;
+ pcie_set_clkpm(link, policy_to_clkpm_state(link));
mutex_unlock(&aspm_lock);
if (sem)
up_read(&pci_bus_sem);
@@ -1172,127 +1201,161 @@ module_param_call(policy, pcie_aspm_set_policy, pcie_aspm_get_policy,
/**
* pcie_aspm_enabled - Check if PCIe ASPM has been enabled for a device.
* @pdev: Target device.
+ *
+ * Relies on the upstream bridge's link_state being valid. The link_state
+ * is deallocated only when the last child of the bridge (i.e., @pdev or a
+ * sibling) is removed, and the caller should be holding a reference to
+ * @pdev, so this should be safe.
*/
bool pcie_aspm_enabled(struct pci_dev *pdev)
{
- struct pci_dev *bridge = pci_upstream_bridge(pdev);
- bool ret;
+ struct pcie_link_state *link = pcie_aspm_get_link(pdev);
- if (!bridge)
+ if (!link)
return false;
- mutex_lock(&aspm_lock);
- ret = bridge->link_state ? !!bridge->link_state->aspm_enabled : false;
- mutex_unlock(&aspm_lock);
-
- return ret;
+ return link->aspm_enabled;
}
EXPORT_SYMBOL_GPL(pcie_aspm_enabled);
-#ifdef CONFIG_PCIEASPM_DEBUG
-static ssize_t link_state_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t aspm_attr_show_common(struct device *dev,
+ struct device_attribute *attr,
+ char *buf, u8 state)
{
- struct pci_dev *pci_device = to_pci_dev(dev);
- struct pcie_link_state *link_state = pci_device->link_state;
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct pcie_link_state *link = pcie_aspm_get_link(pdev);
- return sprintf(buf, "%d\n", link_state->aspm_enabled);
+ return sprintf(buf, "%d\n", (link->aspm_enabled & state) ? 1 : 0);
}
-static ssize_t link_state_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t n)
+static ssize_t aspm_attr_store_common(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len, u8 state)
{
struct pci_dev *pdev = to_pci_dev(dev);
- struct pcie_link_state *link, *root = pdev->link_state->root;
- u32 state;
-
- if (aspm_disabled)
- return -EPERM;
+ struct pcie_link_state *link = pcie_aspm_get_link(pdev);
+ bool state_enable;
- if (kstrtouint(buf, 10, &state))
- return -EINVAL;
- if ((state & ~ASPM_STATE_ALL) != 0)
+ if (strtobool(buf, &state_enable) < 0)
return -EINVAL;
down_read(&pci_bus_sem);
mutex_lock(&aspm_lock);
- list_for_each_entry(link, &link_list, sibling) {
- if (link->root != root)
- continue;
- pcie_config_aspm_link(link, state);
+
+ if (state_enable) {
+ link->aspm_disable &= ~state;
+ /* need to enable L1 for substates */
+ if (state & ASPM_STATE_L1SS)
+ link->aspm_disable &= ~ASPM_STATE_L1;
+ } else {
+ link->aspm_disable |= state;
}
+
+ pcie_config_aspm_link(link, policy_to_aspm_state(link));
+
mutex_unlock(&aspm_lock);
up_read(&pci_bus_sem);
- return n;
+
+ return len;
}
-static ssize_t clk_ctl_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+#define ASPM_ATTR(_f, _s) \
+static ssize_t _f##_show(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ return aspm_attr_show_common(dev, attr, buf, ASPM_STATE_##_s); } \
+ \
+static ssize_t _f##_store(struct device *dev, \
+ struct device_attribute *attr, \
+ const char *buf, size_t len) \
+{ return aspm_attr_store_common(dev, attr, buf, len, ASPM_STATE_##_s); }
+
+ASPM_ATTR(l0s_aspm, L0S)
+ASPM_ATTR(l1_aspm, L1)
+ASPM_ATTR(l1_1_aspm, L1_1)
+ASPM_ATTR(l1_2_aspm, L1_2)
+ASPM_ATTR(l1_1_pcipm, L1_1_PCIPM)
+ASPM_ATTR(l1_2_pcipm, L1_2_PCIPM)
+
+static ssize_t clkpm_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- struct pci_dev *pci_device = to_pci_dev(dev);
- struct pcie_link_state *link_state = pci_device->link_state;
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct pcie_link_state *link = pcie_aspm_get_link(pdev);
- return sprintf(buf, "%d\n", link_state->clkpm_enabled);
+ return sprintf(buf, "%d\n", link->clkpm_enabled);
}
-static ssize_t clk_ctl_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t n)
+static ssize_t clkpm_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
{
struct pci_dev *pdev = to_pci_dev(dev);
- bool state;
+ struct pcie_link_state *link = pcie_aspm_get_link(pdev);
+ bool state_enable;
- if (strtobool(buf, &state))
+ if (strtobool(buf, &state_enable) < 0)
return -EINVAL;
down_read(&pci_bus_sem);
mutex_lock(&aspm_lock);
- pcie_set_clkpm_nocheck(pdev->link_state, state);
+
+ link->clkpm_disable = !state_enable;
+ pcie_set_clkpm(link, policy_to_clkpm_state(link));
+
mutex_unlock(&aspm_lock);
up_read(&pci_bus_sem);
- return n;
+ return len;
}
-static DEVICE_ATTR_RW(link_state);
-static DEVICE_ATTR_RW(clk_ctl);
+static DEVICE_ATTR_RW(clkpm);
+static DEVICE_ATTR_RW(l0s_aspm);
+static DEVICE_ATTR_RW(l1_aspm);
+static DEVICE_ATTR_RW(l1_1_aspm);
+static DEVICE_ATTR_RW(l1_2_aspm);
+static DEVICE_ATTR_RW(l1_1_pcipm);
+static DEVICE_ATTR_RW(l1_2_pcipm);
+
+static struct attribute *aspm_ctrl_attrs[] = {
+ &dev_attr_clkpm.attr,
+ &dev_attr_l0s_aspm.attr,
+ &dev_attr_l1_aspm.attr,
+ &dev_attr_l1_1_aspm.attr,
+ &dev_attr_l1_2_aspm.attr,
+ &dev_attr_l1_1_pcipm.attr,
+ &dev_attr_l1_2_pcipm.attr,
+ NULL
+};
-static char power_group[] = "power";
-void pcie_aspm_create_sysfs_dev_files(struct pci_dev *pdev)
+static umode_t aspm_ctrl_attrs_are_visible(struct kobject *kobj,
+ struct attribute *a, int n)
{
- struct pcie_link_state *link_state = pdev->link_state;
-
- if (!link_state)
- return;
-
- if (link_state->aspm_support)
- sysfs_add_file_to_group(&pdev->dev.kobj,
- &dev_attr_link_state.attr, power_group);
- if (link_state->clkpm_capable)
- sysfs_add_file_to_group(&pdev->dev.kobj,
- &dev_attr_clk_ctl.attr, power_group);
-}
+ struct device *dev = kobj_to_dev(kobj);
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct pcie_link_state *link = pcie_aspm_get_link(pdev);
+ static const u8 aspm_state_map[] = {
+ ASPM_STATE_L0S,
+ ASPM_STATE_L1,
+ ASPM_STATE_L1_1,
+ ASPM_STATE_L1_2,
+ ASPM_STATE_L1_1_PCIPM,
+ ASPM_STATE_L1_2_PCIPM,
+ };
-void pcie_aspm_remove_sysfs_dev_files(struct pci_dev *pdev)
-{
- struct pcie_link_state *link_state = pdev->link_state;
+ if (aspm_disabled || !link)
+ return 0;
- if (!link_state)
- return;
+ if (n == 0)
+ return link->clkpm_capable ? a->mode : 0;
- if (link_state->aspm_support)
- sysfs_remove_file_from_group(&pdev->dev.kobj,
- &dev_attr_link_state.attr, power_group);
- if (link_state->clkpm_capable)
- sysfs_remove_file_from_group(&pdev->dev.kobj,
- &dev_attr_clk_ctl.attr, power_group);
+ return link->aspm_capable & aspm_state_map[n - 1] ? a->mode : 0;
}
-#endif
+
+const struct attribute_group aspm_ctrl_attr_group = {
+ .name = "link",
+ .attrs = aspm_ctrl_attrs,
+ .is_visible = aspm_ctrl_attrs_are_visible,
+};
static int __init pcie_aspm_disable(char *str)
{
diff --git a/drivers/pci/pcie/dpc.c b/drivers/pci/pcie/dpc.c
index a32ec3487a8d..e06f42f58d3d 100644
--- a/drivers/pci/pcie/dpc.c
+++ b/drivers/pci/pcie/dpc.c
@@ -291,7 +291,7 @@ static int dpc_probe(struct pcie_device *dev)
int status;
u16 ctl, cap;
- if (pcie_aer_get_firmware_first(pdev))
+ if (pcie_aer_get_firmware_first(pdev) && !pcie_ports_dpc_native)
return -ENOTSUPP;
dpc = devm_kzalloc(device, sizeof(*dpc), GFP_KERNEL);
diff --git a/drivers/pci/pcie/portdrv.h b/drivers/pci/pcie/portdrv.h
index 944827a8c7d3..1e673619b101 100644
--- a/drivers/pci/pcie/portdrv.h
+++ b/drivers/pci/pcie/portdrv.h
@@ -25,6 +25,8 @@
#define PCIE_PORT_DEVICE_MAXSERVICES 5
+extern bool pcie_ports_dpc_native;
+
#ifdef CONFIG_PCIEAER
int pcie_aer_init(void);
#else
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
index 1b330129089f..5075cb9e850c 100644
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -250,8 +250,13 @@ static int get_port_device_capability(struct pci_dev *dev)
pcie_pme_interrupt_enable(dev, false);
}
+ /*
+ * With dpc-native, allow Linux to use DPC even if it doesn't have
+ * permission to use AER.
+ */
if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_DPC) &&
- pci_aer_available() && services & PCIE_PORT_SERVICE_AER)
+ pci_aer_available() &&
+ (pcie_ports_dpc_native || (services & PCIE_PORT_SERVICE_AER)))
services |= PCIE_PORT_SERVICE_DPC;
if (pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM ||
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c
index 0a87091a0800..160d67c59310 100644
--- a/drivers/pci/pcie/portdrv_pci.c
+++ b/drivers/pci/pcie/portdrv_pci.c
@@ -29,12 +29,20 @@ bool pcie_ports_disabled;
*/
bool pcie_ports_native;
+/*
+ * If the user specified "pcie_ports=dpc-native", use the Linux DPC PCIe
+ * service even if the platform hasn't given us permission.
+ */
+bool pcie_ports_dpc_native;
+
static int __init pcie_port_setup(char *str)
{
if (!strncmp(str, "compat", 6))
pcie_ports_disabled = true;
else if (!strncmp(str, "native", 6))
pcie_ports_native = true;
+ else if (!strncmp(str, "dpc-native", 10))
+ pcie_ports_dpc_native = true;
return 1;
}
diff --git a/drivers/pci/pcie/ptm.c b/drivers/pci/pcie/ptm.c
index 98cfa30f3fae..9361f3aa26ab 100644
--- a/drivers/pci/pcie/ptm.c
+++ b/drivers/pci/pcie/ptm.c
@@ -21,7 +21,7 @@ static void pci_ptm_info(struct pci_dev *dev)
snprintf(clock_desc, sizeof(clock_desc), ">254ns");
break;
default:
- snprintf(clock_desc, sizeof(clock_desc), "%udns",
+ snprintf(clock_desc, sizeof(clock_desc), "%uns",
dev->ptm_granularity);
break;
}
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 3d5271a7a849..512cb4312ddd 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -7,6 +7,7 @@
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/pci.h>
+#include <linux/msi.h>
#include <linux/of_device.h>
#include <linux/of_pci.h>
#include <linux/pci_hotplug.h>
@@ -572,6 +573,7 @@ static void devm_pci_release_host_bridge_dev(struct device *dev)
bridge->release_fn(bridge);
pci_free_resource_list(&bridge->windows);
+ pci_free_resource_list(&bridge->dma_ranges);
}
static void pci_release_host_bridge_dev(struct device *dev)
@@ -897,6 +899,9 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge)
else
pr_info("PCI host bridge to bus %s\n", name);
+ if (nr_node_ids > 1 && pcibus_to_node(bus) == NUMA_NO_NODE)
+ dev_warn(&bus->dev, "Unknown NUMA node; performance will be reduced\n");
+
/* Add initial resources to the bus */
resource_list_for_each_entry_safe(window, n, &resources) {
list_move_tail(&window->node, &bridge->windows);
@@ -1089,14 +1094,15 @@ static unsigned int pci_scan_child_bus_extend(struct pci_bus *bus,
* @sec: updated with secondary bus number from EA
* @sub: updated with subordinate bus number from EA
*
- * If @dev is a bridge with EA capability, update @sec and @sub with
- * fixed bus numbers from the capability and return true. Otherwise,
- * return false.
+ * If @dev is a bridge with EA capability that specifies valid secondary
+ * and subordinate bus numbers, return true with the bus numbers in @sec
+ * and @sub. Otherwise return false.
*/
static bool pci_ea_fixed_busnrs(struct pci_dev *dev, u8 *sec, u8 *sub)
{
int ea, offset;
u32 dw;
+ u8 ea_sec, ea_sub;
if (dev->hdr_type != PCI_HEADER_TYPE_BRIDGE)
return false;
@@ -1108,8 +1114,13 @@ static bool pci_ea_fixed_busnrs(struct pci_dev *dev, u8 *sec, u8 *sub)
offset = ea + PCI_EA_FIRST_ENT;
pci_read_config_dword(dev, offset, &dw);
- *sec = dw & PCI_EA_SEC_BUS_MASK;
- *sub = (dw & PCI_EA_SUB_BUS_MASK) >> PCI_EA_SUB_BUS_SHIFT;
+ ea_sec = dw & PCI_EA_SEC_BUS_MASK;
+ ea_sub = (dw & PCI_EA_SUB_BUS_MASK) >> PCI_EA_SUB_BUS_SHIFT;
+ if (ea_sec == 0 || ea_sub < ea_sec)
+ return false;
+
+ *sec = ea_sec;
+ *sub = ea_sub;
return true;
}
@@ -2300,8 +2311,7 @@ void pcie_report_downtraining(struct pci_dev *dev)
static void pci_init_capabilities(struct pci_dev *dev)
{
- /* Enhanced Allocation */
- pci_ea_init(dev);
+ pci_ea_init(dev); /* Enhanced Allocation */
/* Setup MSI caps & disable MSI/MSI-X interrupts */
pci_msi_setup_pci_dev(dev);
@@ -2309,29 +2319,16 @@ static void pci_init_capabilities(struct pci_dev *dev)
/* Buffers for saving PCIe and PCI-X capabilities */
pci_allocate_cap_save_buffers(dev);
- /* Power Management */
- pci_pm_init(dev);
-
- /* Vital Product Data */
- pci_vpd_init(dev);
-
- /* Alternative Routing-ID Forwarding */
- pci_configure_ari(dev);
-
- /* Single Root I/O Virtualization */
- pci_iov_init(dev);
-
- /* Address Translation Services */
- pci_ats_init(dev);
-
- /* Enable ACS P2P upstream forwarding */
- pci_enable_acs(dev);
-
- /* Precision Time Measurement */
- pci_ptm_init(dev);
-
- /* Advanced Error Reporting */
- pci_aer_init(dev);
+ pci_pm_init(dev); /* Power Management */
+ pci_vpd_init(dev); /* Vital Product Data */
+ pci_configure_ari(dev); /* Alternative Routing-ID Forwarding */
+ pci_iov_init(dev); /* Single Root I/O Virtualization */
+ pci_ats_init(dev); /* Address Translation Services */
+ pci_pri_init(dev); /* Page Request Interface */
+ pci_pasid_init(dev); /* Process Address Space ID */
+ pci_enable_acs(dev); /* Enable ACS P2P upstream forwarding */
+ pci_ptm_init(dev); /* Precision Time Measurement */
+ pci_aer_init(dev); /* Advanced Error Reporting */
pcie_report_downtraining(dev);
@@ -2403,13 +2400,10 @@ void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
/* Fix up broken headers */
pci_fixup_device(pci_fixup_header, dev);
- /* Moved out from quirk header fixup code */
pci_reassigndev_resource_alignment(dev);
- /* Clear the state_saved flag */
dev->state_saved = false;
- /* Initialize various capabilities */
pci_init_capabilities(dev);
/*
diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
index 5495537c60c2..6ef74bf5013f 100644
--- a/drivers/pci/proc.c
+++ b/drivers/pci/proc.c
@@ -258,13 +258,13 @@ static int proc_bus_pci_mmap(struct file *file, struct vm_area_struct *vma)
}
/* Make sure the caller is mapping a real resource for this device */
- for (i = 0; i < PCI_ROM_RESOURCE; i++) {
+ for (i = 0; i < PCI_STD_NUM_BARS; i++) {
if (dev->resource[i].flags & res_bit &&
pci_mmap_fits(dev, i, vma, PCI_MMAP_PROCFS))
break;
}
- if (i >= PCI_ROM_RESOURCE)
+ if (i >= PCI_STD_NUM_BARS)
return -ENODEV;
if (fpriv->mmap_state == pci_mmap_mem &&
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 320255e5e8f8..4937a088d7d8 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -474,7 +474,7 @@ static void quirk_extend_bar_to_page(struct pci_dev *dev)
{
int i;
- for (i = 0; i <= PCI_STD_RESOURCE_END; i++) {
+ for (i = 0; i < PCI_STD_NUM_BARS; i++) {
struct resource *r = &dev->resource[i];
if (r->flags & IORESOURCE_MEM && resource_size(r) < PAGE_SIZE) {
@@ -1809,7 +1809,7 @@ static void quirk_alder_ioapic(struct pci_dev *pdev)
* The next five BARs all seem to be rubbish, so just clean
* them out.
*/
- for (i = 1; i < 6; i++)
+ for (i = 1; i < PCI_STD_NUM_BARS; i++)
memset(&pdev->resource[i], 0, sizeof(pdev->resource[i]));
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EESSC, quirk_alder_ioapic);
@@ -4033,7 +4033,6 @@ static void quirk_fixed_dma_alias(struct pci_dev *dev)
if (id)
pci_add_dma_alias(dev, id->driver_data);
}
-
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ADAPTEC2, 0x0285, quirk_fixed_dma_alias);
/*
@@ -4081,6 +4080,40 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2260, quirk_mic_x200_dma_alias);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2264, quirk_mic_x200_dma_alias);
/*
+ * Intel Visual Compute Accelerator (VCA) is a family of PCIe add-in devices
+ * exposing computational units via Non Transparent Bridges (NTB, PEX 87xx).
+ *
+ * Similarly to MIC x200, we need to add DMA aliases to allow buffer access
+ * when IOMMU is enabled. These aliases allow computational unit access to
+ * host memory. These aliases mark the whole VCA device as one IOMMU
+ * group.
+ *
+ * All possible slot numbers (0x20) are used, since we are unable to tell
+ * what slot is used on other side. This quirk is intended for both host
+ * and computational unit sides. The VCA devices have up to five functions
+ * (four for DMA channels and one additional).
+ */
+static void quirk_pex_vca_alias(struct pci_dev *pdev)
+{
+ const unsigned int num_pci_slots = 0x20;
+ unsigned int slot;
+
+ for (slot = 0; slot < num_pci_slots; slot++) {
+ pci_add_dma_alias(pdev, PCI_DEVFN(slot, 0x0));
+ pci_add_dma_alias(pdev, PCI_DEVFN(slot, 0x1));
+ pci_add_dma_alias(pdev, PCI_DEVFN(slot, 0x2));
+ pci_add_dma_alias(pdev, PCI_DEVFN(slot, 0x3));
+ pci_add_dma_alias(pdev, PCI_DEVFN(slot, 0x4));
+ }
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2954, quirk_pex_vca_alias);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2955, quirk_pex_vca_alias);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2956, quirk_pex_vca_alias);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2958, quirk_pex_vca_alias);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2959, quirk_pex_vca_alias);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x295A, quirk_pex_vca_alias);
+
+/*
* The IOMMU and interrupt controller on Broadcom Vulcan/Cavium ThunderX2 are
* associated not at the root bus, but at a bridge below. This quirk avoids
* generating invalid DMA aliases.
@@ -4263,6 +4296,24 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_CHELSIO, PCI_ANY_ID,
quirk_chelsio_T5_disable_root_port_attributes);
/*
+ * pci_acs_ctrl_enabled - compare desired ACS controls with those provided
+ * by a device
+ * @acs_ctrl_req: Bitmask of desired ACS controls
+ * @acs_ctrl_ena: Bitmask of ACS controls enabled or provided implicitly by
+ * the hardware design
+ *
+ * Return 1 if all ACS controls in the @acs_ctrl_req bitmask are included
+ * in @acs_ctrl_ena, i.e., the device provides all the access controls the
+ * caller desires. Return 0 otherwise.
+ */
+static int pci_acs_ctrl_enabled(u16 acs_ctrl_req, u16 acs_ctrl_ena)
+{
+ if ((acs_ctrl_req & acs_ctrl_ena) == acs_ctrl_req)
+ return 1;
+ return 0;
+}
+
+/*
* AMD has indicated that the devices below do not support peer-to-peer
* in any system where they are found in the southbridge with an AMD
* IOMMU in the system. Multifunction devices that do not support
@@ -4305,7 +4356,7 @@ static int pci_quirk_amd_sb_acs(struct pci_dev *dev, u16 acs_flags)
/* Filter out flags not applicable to multifunction */
acs_flags &= (PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC | PCI_ACS_DT);
- return acs_flags & ~(PCI_ACS_RR | PCI_ACS_CR) ? 0 : 1;
+ return pci_acs_ctrl_enabled(acs_flags, PCI_ACS_RR | PCI_ACS_CR);
#else
return -ENODEV;
#endif
@@ -4313,33 +4364,38 @@ static int pci_quirk_amd_sb_acs(struct pci_dev *dev, u16 acs_flags)
static bool pci_quirk_cavium_acs_match(struct pci_dev *dev)
{
+ if (!pci_is_pcie(dev) || pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT)
+ return false;
+
+ switch (dev->device) {
/*
- * Effectively selects all downstream ports for whole ThunderX 1
- * family by 0xf800 mask (which represents 8 SoCs), while the lower
- * bits of device ID are used to indicate which subdevice is used
- * within the SoC.
+ * Effectively selects all downstream ports for whole ThunderX1
+ * (which represents 8 SoCs).
*/
- return (pci_is_pcie(dev) &&
- (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT) &&
- ((dev->device & 0xf800) == 0xa000));
+ case 0xa000 ... 0xa7ff: /* ThunderX1 */
+ case 0xaf84: /* ThunderX2 */
+ case 0xb884: /* ThunderX3 */
+ return true;
+ default:
+ return false;
+ }
}
static int pci_quirk_cavium_acs(struct pci_dev *dev, u16 acs_flags)
{
+ if (!pci_quirk_cavium_acs_match(dev))
+ return -ENOTTY;
+
/*
- * Cavium root ports don't advertise an ACS capability. However,
+ * Cavium Root Ports don't advertise an ACS capability. However,
* the RTL internally implements similar protection as if ACS had
- * Request Redirection, Completion Redirection, Source Validation,
+ * Source Validation, Request Redirection, Completion Redirection,
* and Upstream Forwarding features enabled. Assert that the
* hardware implements and enables equivalent ACS functionality for
* these flags.
*/
- acs_flags &= ~(PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_SV | PCI_ACS_UF);
-
- if (!pci_quirk_cavium_acs_match(dev))
- return -ENOTTY;
-
- return acs_flags ? 0 : 1;
+ return pci_acs_ctrl_enabled(acs_flags,
+ PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
}
static int pci_quirk_xgene_acs(struct pci_dev *dev, u16 acs_flags)
@@ -4349,13 +4405,12 @@ static int pci_quirk_xgene_acs(struct pci_dev *dev, u16 acs_flags)
* transactions with others, allowing masking out these bits as if they
* were unimplemented in the ACS capability.
*/
- acs_flags &= ~(PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
-
- return acs_flags ? 0 : 1;
+ return pci_acs_ctrl_enabled(acs_flags,
+ PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
}
/*
- * Many Intel PCH root ports do provide ACS-like features to disable peer
+ * Many Intel PCH Root Ports do provide ACS-like features to disable peer
* transactions and validate bus numbers in requests, but do not provide an
* actual PCIe ACS capability. This is the list of device IDs known to fall
* into that category as provided by Intel in Red Hat bugzilla 1037684.
@@ -4403,37 +4458,32 @@ static bool pci_quirk_intel_pch_acs_match(struct pci_dev *dev)
return false;
}
-#define INTEL_PCH_ACS_FLAGS (PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_SV)
-
static int pci_quirk_intel_pch_acs(struct pci_dev *dev, u16 acs_flags)
{
- u16 flags = dev->dev_flags & PCI_DEV_FLAGS_ACS_ENABLED_QUIRK ?
- INTEL_PCH_ACS_FLAGS : 0;
-
if (!pci_quirk_intel_pch_acs_match(dev))
return -ENOTTY;
- return acs_flags & ~flags ? 0 : 1;
+ if (dev->dev_flags & PCI_DEV_FLAGS_ACS_ENABLED_QUIRK)
+ return pci_acs_ctrl_enabled(acs_flags,
+ PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
+
+ return pci_acs_ctrl_enabled(acs_flags, 0);
}
/*
- * These QCOM root ports do provide ACS-like features to disable peer
+ * These QCOM Root Ports do provide ACS-like features to disable peer
* transactions and validate bus numbers in requests, but do not provide an
* actual PCIe ACS capability. Hardware supports source validation but it
* will report the issue as Completer Abort instead of ACS Violation.
- * Hardware doesn't support peer-to-peer and each root port is a root
- * complex with unique segment numbers. It is not possible for one root
- * port to pass traffic to another root port. All PCIe transactions are
- * terminated inside the root port.
+ * Hardware doesn't support peer-to-peer and each Root Port is a Root
+ * Complex with unique segment numbers. It is not possible for one Root
+ * Port to pass traffic to another Root Port. All PCIe transactions are
+ * terminated inside the Root Port.
*/
static int pci_quirk_qcom_rp_acs(struct pci_dev *dev, u16 acs_flags)
{
- u16 flags = (PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_SV);
- int ret = acs_flags & ~flags ? 0 : 1;
-
- pci_info(dev, "Using QCOM ACS Quirk (%d)\n", ret);
-
- return ret;
+ return pci_acs_ctrl_enabled(acs_flags,
+ PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
}
static int pci_quirk_al_acs(struct pci_dev *dev, u16 acs_flags)
@@ -4534,7 +4584,7 @@ static int pci_quirk_intel_spt_pch_acs(struct pci_dev *dev, u16 acs_flags)
pci_read_config_dword(dev, pos + INTEL_SPT_ACS_CTRL, &ctrl);
- return acs_flags & ~ctrl ? 0 : 1;
+ return pci_acs_ctrl_enabled(acs_flags, ctrl);
}
static int pci_quirk_mf_endpoint_acs(struct pci_dev *dev, u16 acs_flags)
@@ -4548,10 +4598,9 @@ static int pci_quirk_mf_endpoint_acs(struct pci_dev *dev, u16 acs_flags)
* perform peer-to-peer with other functions, allowing us to mask out
* these bits as if they were unimplemented in the ACS capability.
*/
- acs_flags &= ~(PCI_ACS_SV | PCI_ACS_TB | PCI_ACS_RR |
- PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_DT);
-
- return acs_flags ? 0 : 1;
+ return pci_acs_ctrl_enabled(acs_flags,
+ PCI_ACS_SV | PCI_ACS_TB | PCI_ACS_RR |
+ PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_DT);
}
static int pci_quirk_brcm_acs(struct pci_dev *dev, u16 acs_flags)
@@ -4562,9 +4611,8 @@ static int pci_quirk_brcm_acs(struct pci_dev *dev, u16 acs_flags)
* Allow each Root Port to be in a separate IOMMU group by masking
* SV/RR/CR/UF bits.
*/
- acs_flags &= ~(PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
-
- return acs_flags ? 0 : 1;
+ return pci_acs_ctrl_enabled(acs_flags,
+ PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
}
static const struct pci_dev_acs_enabled {
@@ -4666,6 +4714,17 @@ static const struct pci_dev_acs_enabled {
{ 0 }
};
+/*
+ * pci_dev_specific_acs_enabled - check whether device provides ACS controls
+ * @dev: PCI device
+ * @acs_flags: Bitmask of desired ACS controls
+ *
+ * Returns:
+ * -ENOTTY: No quirk applies to this device; we can't tell whether the
+ * device provides the desired controls
+ * 0: Device does not provide all the desired controls
+ * >0: Device provides all the controls in @acs_flags
+ */
int pci_dev_specific_acs_enabled(struct pci_dev *dev, u16 acs_flags)
{
const struct pci_dev_acs_enabled *i;
@@ -4706,7 +4765,7 @@ int pci_dev_specific_acs_enabled(struct pci_dev *dev, u16 acs_flags)
#define INTEL_BSPR_REG_BPPD (1 << 9)
/* Upstream Peer Decode Configuration Register */
-#define INTEL_UPDCR_REG 0x1114
+#define INTEL_UPDCR_REG 0x1014
/* 5:0 Peer Decode Enable bits */
#define INTEL_UPDCR_REG_MASK 0x3f
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index e7dbe21705ba..f279826204eb 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -752,24 +752,32 @@ static void pci_bridge_check_ranges(struct pci_bus *bus)
}
/*
- * Helper function for sizing routines: find first available bus resource
- * of a given type. Note: we intentionally skip the bus resources which
- * have already been assigned (that is, have non-NULL parent resource).
+ * Helper function for sizing routines. Assigned resources have non-NULL
+ * parent resource.
+ *
+ * Return first unassigned resource of the correct type. If there is none,
+ * return first assigned resource of the correct type. If none of the
+ * above, return NULL.
+ *
+ * Returning an assigned resource of the correct type allows the caller to
+ * distinguish between already assigned and no resource of the correct type.
*/
-static struct resource *find_free_bus_resource(struct pci_bus *bus,
- unsigned long type_mask,
- unsigned long type)
+static struct resource *find_bus_resource_of_type(struct pci_bus *bus,
+ unsigned long type_mask,
+ unsigned long type)
{
+ struct resource *r, *r_assigned = NULL;
int i;
- struct resource *r;
pci_bus_for_each_resource(bus, r, i) {
if (r == &ioport_resource || r == &iomem_resource)
continue;
if (r && (r->flags & type_mask) == type && !r->parent)
return r;
+ if (r && (r->flags & type_mask) == type && !r_assigned)
+ r_assigned = r;
}
- return NULL;
+ return r_assigned;
}
static resource_size_t calculate_iosize(resource_size_t size,
@@ -866,8 +874,8 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size,
struct list_head *realloc_head)
{
struct pci_dev *dev;
- struct resource *b_res = find_free_bus_resource(bus, IORESOURCE_IO,
- IORESOURCE_IO);
+ struct resource *b_res = find_bus_resource_of_type(bus, IORESOURCE_IO,
+ IORESOURCE_IO);
resource_size_t size = 0, size0 = 0, size1 = 0;
resource_size_t children_add_size = 0;
resource_size_t min_align, align;
@@ -875,6 +883,10 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size,
if (!b_res)
return;
+ /* If resource is already assigned, nothing more to do */
+ if (b_res->parent)
+ return;
+
min_align = window_alignment(bus, IORESOURCE_IO);
list_for_each_entry(dev, &bus->devices, bus_list) {
int i;
@@ -978,7 +990,7 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
resource_size_t min_align, align, size, size0, size1;
resource_size_t aligns[18]; /* Alignments from 1MB to 128GB */
int order, max_order;
- struct resource *b_res = find_free_bus_resource(bus,
+ struct resource *b_res = find_bus_resource_of_type(bus,
mask | IORESOURCE_PREFETCH, type);
resource_size_t children_add_size = 0;
resource_size_t children_add_align = 0;
@@ -987,6 +999,10 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
if (!b_res)
return -ENOSPC;
+ /* If resource is already assigned, nothing more to do */
+ if (b_res->parent)
+ return 0;
+
memset(aligns, 0, sizeof(aligns));
max_order = 0;
size = 0;
@@ -1178,7 +1194,8 @@ void __pci_bus_size_bridges(struct pci_bus *bus, struct list_head *realloc_head)
{
struct pci_dev *dev;
unsigned long mask, prefmask, type2 = 0, type3 = 0;
- resource_size_t additional_mem_size = 0, additional_io_size = 0;
+ resource_size_t additional_io_size = 0, additional_mmio_size = 0,
+ additional_mmio_pref_size = 0;
struct resource *b_res;
int ret;
@@ -1212,7 +1229,8 @@ void __pci_bus_size_bridges(struct pci_bus *bus, struct list_head *realloc_head)
pci_bridge_check_ranges(bus);
if (bus->self->is_hotplug_bridge) {
additional_io_size = pci_hotplug_io_size;
- additional_mem_size = pci_hotplug_mem_size;
+ additional_mmio_size = pci_hotplug_mmio_size;
+ additional_mmio_pref_size = pci_hotplug_mmio_pref_size;
}
/* Fall through */
default:
@@ -1230,9 +1248,9 @@ void __pci_bus_size_bridges(struct pci_bus *bus, struct list_head *realloc_head)
if (b_res[2].flags & IORESOURCE_MEM_64) {
prefmask |= IORESOURCE_MEM_64;
ret = pbus_size_mem(bus, prefmask, prefmask,
- prefmask, prefmask,
- realloc_head ? 0 : additional_mem_size,
- additional_mem_size, realloc_head);
+ prefmask, prefmask,
+ realloc_head ? 0 : additional_mmio_pref_size,
+ additional_mmio_pref_size, realloc_head);
/*
* If successful, all non-prefetchable resources
@@ -1254,9 +1272,9 @@ void __pci_bus_size_bridges(struct pci_bus *bus, struct list_head *realloc_head)
if (!type2) {
prefmask &= ~IORESOURCE_MEM_64;
ret = pbus_size_mem(bus, prefmask, prefmask,
- prefmask, prefmask,
- realloc_head ? 0 : additional_mem_size,
- additional_mem_size, realloc_head);
+ prefmask, prefmask,
+ realloc_head ? 0 : additional_mmio_pref_size,
+ additional_mmio_pref_size, realloc_head);
/*
* If successful, only non-prefetchable resources
@@ -1265,7 +1283,7 @@ void __pci_bus_size_bridges(struct pci_bus *bus, struct list_head *realloc_head)
if (ret == 0)
mask = prefmask;
else
- additional_mem_size += additional_mem_size;
+ additional_mmio_size += additional_mmio_pref_size;
type2 = type3 = IORESOURCE_MEM;
}
@@ -1285,8 +1303,8 @@ void __pci_bus_size_bridges(struct pci_bus *bus, struct list_head *realloc_head)
* prefetchable resource in a 64-bit prefetchable window.
*/
pbus_size_mem(bus, mask, IORESOURCE_MEM, type2, type3,
- realloc_head ? 0 : additional_mem_size,
- additional_mem_size, realloc_head);
+ realloc_head ? 0 : additional_mmio_size,
+ additional_mmio_size, realloc_head);
break;
}
}
@@ -2066,6 +2084,8 @@ int pci_reassign_bridge_resources(struct pci_dev *bridge, unsigned long type)
unsigned int i;
int ret;
+ down_read(&pci_bus_sem);
+
/* Walk to the root hub, releasing bridge BARs when possible */
next = bridge;
do {
@@ -2100,8 +2120,10 @@ int pci_reassign_bridge_resources(struct pci_dev *bridge, unsigned long type)
next = bridge->bus ? bridge->bus->self : NULL;
} while (next);
- if (list_empty(&saved))
+ if (list_empty(&saved)) {
+ up_read(&pci_bus_sem);
return -ENOENT;
+ }
__pci_bus_size_bridges(bridge->subordinate, &added);
__pci_bridge_assign_resources(bridge, &added, &failed);
@@ -2122,6 +2144,7 @@ int pci_reassign_bridge_resources(struct pci_dev *bridge, unsigned long type)
}
free_list(&saved);
+ up_read(&pci_bus_sem);
return 0;
cleanup:
@@ -2150,6 +2173,7 @@ cleanup:
pci_setup_bridge(bridge->subordinate);
}
free_list(&saved);
+ up_read(&pci_bus_sem);
return ret;
}
diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c
index 8c94cd3fd1f2..88091bbfe77f 100644
--- a/drivers/pci/switch/switchtec.c
+++ b/drivers/pci/switch/switchtec.c
@@ -675,7 +675,7 @@ static int ioctl_event_summary(struct switchtec_dev *stdev,
return -ENOMEM;
s->global = ioread32(&stdev->mmio_sw_event->global_summary);
- s->part_bitmap = ioread32(&stdev->mmio_sw_event->part_event_bitmap);
+ s->part_bitmap = ioread64(&stdev->mmio_sw_event->part_event_bitmap);
s->local_part = ioread32(&stdev->mmio_part_cfg->part_event_summary);
for (i = 0; i < stdev->partition_count; i++) {
@@ -1025,7 +1025,7 @@ static const struct file_operations switchtec_fops = {
.read = switchtec_dev_read,
.poll = switchtec_dev_poll,
.unlocked_ioctl = switchtec_dev_ioctl,
- .compat_ioctl = switchtec_dev_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
};
static void link_event_work(struct work_struct *work)
diff --git a/drivers/phy/allwinner/Kconfig b/drivers/phy/allwinner/Kconfig
index 215425296c77..3dab79e9d52b 100644
--- a/drivers/phy/allwinner/Kconfig
+++ b/drivers/phy/allwinner/Kconfig
@@ -45,3 +45,14 @@ config PHY_SUN9I_USB
sun9i SoCs.
This driver controls each individual USB 2 host PHY.
+
+config PHY_SUN50I_USB3
+ tristate "Allwinner H6 SoC USB3 PHY driver"
+ depends on ARCH_SUNXI && HAS_IOMEM && OF
+ depends on RESET_CONTROLLER
+ select GENERIC_PHY
+ help
+ Enable this to support the USB3.0-capable transceiver that is
+ part of Allwinner H6 SoC.
+
+ This driver controls each individual USB 2+3 host PHY combo.
diff --git a/drivers/phy/allwinner/Makefile b/drivers/phy/allwinner/Makefile
index 799a65c0b58d..bd74901a1255 100644
--- a/drivers/phy/allwinner/Makefile
+++ b/drivers/phy/allwinner/Makefile
@@ -2,3 +2,4 @@
obj-$(CONFIG_PHY_SUN4I_USB) += phy-sun4i-usb.o
obj-$(CONFIG_PHY_SUN6I_MIPI_DPHY) += phy-sun6i-mipi-dphy.o
obj-$(CONFIG_PHY_SUN9I_USB) += phy-sun9i-usb.o
+obj-$(CONFIG_PHY_SUN50I_USB3) += phy-sun50i-usb3.o
diff --git a/drivers/phy/allwinner/phy-sun50i-usb3.c b/drivers/phy/allwinner/phy-sun50i-usb3.c
new file mode 100644
index 000000000000..1169f3e83a6f
--- /dev/null
+++ b/drivers/phy/allwinner/phy-sun50i-usb3.c
@@ -0,0 +1,190 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Allwinner sun50i(H6) USB 3.0 phy driver
+ *
+ * Copyright (C) 2017 Icenowy Zheng <icenowy@aosc.io>
+ *
+ * Based on phy-sun9i-usb.c, which is:
+ *
+ * Copyright (C) 2014-2015 Chen-Yu Tsai <wens@csie.org>
+ *
+ * Based on code from Allwinner BSP, which is:
+ *
+ * Copyright (c) 2010-2015 Allwinner Technology Co., Ltd.
+ */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+
+/* Interface Status and Control Registers */
+#define SUNXI_ISCR 0x00
+#define SUNXI_PIPE_CLOCK_CONTROL 0x14
+#define SUNXI_PHY_TUNE_LOW 0x18
+#define SUNXI_PHY_TUNE_HIGH 0x1c
+#define SUNXI_PHY_EXTERNAL_CONTROL 0x20
+
+/* USB2.0 Interface Status and Control Register */
+#define SUNXI_ISCR_FORCE_VBUS (3 << 12)
+
+/* PIPE Clock Control Register */
+#define SUNXI_PCC_PIPE_CLK_OPEN (1 << 6)
+
+/* PHY External Control Register */
+#define SUNXI_PEC_EXTERN_VBUS (3 << 1)
+#define SUNXI_PEC_SSC_EN (1 << 24)
+#define SUNXI_PEC_REF_SSP_EN (1 << 26)
+
+/* PHY Tune High Register */
+#define SUNXI_TX_DEEMPH_3P5DB(n) ((n) << 19)
+#define SUNXI_TX_DEEMPH_3P5DB_MASK GENMASK(24, 19)
+#define SUNXI_TX_DEEMPH_6DB(n) ((n) << 13)
+#define SUNXI_TX_DEEMPH_6GB_MASK GENMASK(18, 13)
+#define SUNXI_TX_SWING_FULL(n) ((n) << 6)
+#define SUNXI_TX_SWING_FULL_MASK GENMASK(12, 6)
+#define SUNXI_LOS_BIAS(n) ((n) << 3)
+#define SUNXI_LOS_BIAS_MASK GENMASK(5, 3)
+#define SUNXI_TXVBOOSTLVL(n) ((n) << 0)
+#define SUNXI_TXVBOOSTLVL_MASK GENMASK(0, 2)
+
+struct sun50i_usb3_phy {
+ struct phy *phy;
+ void __iomem *regs;
+ struct reset_control *reset;
+ struct clk *clk;
+};
+
+static void sun50i_usb3_phy_open(struct sun50i_usb3_phy *phy)
+{
+ u32 val;
+
+ val = readl(phy->regs + SUNXI_PHY_EXTERNAL_CONTROL);
+ val |= SUNXI_PEC_EXTERN_VBUS;
+ val |= SUNXI_PEC_SSC_EN | SUNXI_PEC_REF_SSP_EN;
+ writel(val, phy->regs + SUNXI_PHY_EXTERNAL_CONTROL);
+
+ val = readl(phy->regs + SUNXI_PIPE_CLOCK_CONTROL);
+ val |= SUNXI_PCC_PIPE_CLK_OPEN;
+ writel(val, phy->regs + SUNXI_PIPE_CLOCK_CONTROL);
+
+ val = readl(phy->regs + SUNXI_ISCR);
+ val |= SUNXI_ISCR_FORCE_VBUS;
+ writel(val, phy->regs + SUNXI_ISCR);
+
+ /*
+ * All the magic numbers written to the PHY_TUNE_{LOW_HIGH}
+ * registers are directly taken from the BSP USB3 driver from
+ * Allwiner.
+ */
+ writel(0x0047fc87, phy->regs + SUNXI_PHY_TUNE_LOW);
+
+ val = readl(phy->regs + SUNXI_PHY_TUNE_HIGH);
+ val &= ~(SUNXI_TXVBOOSTLVL_MASK | SUNXI_LOS_BIAS_MASK |
+ SUNXI_TX_SWING_FULL_MASK | SUNXI_TX_DEEMPH_6GB_MASK |
+ SUNXI_TX_DEEMPH_3P5DB_MASK);
+ val |= SUNXI_TXVBOOSTLVL(0x7);
+ val |= SUNXI_LOS_BIAS(0x7);
+ val |= SUNXI_TX_SWING_FULL(0x55);
+ val |= SUNXI_TX_DEEMPH_6DB(0x20);
+ val |= SUNXI_TX_DEEMPH_3P5DB(0x15);
+ writel(val, phy->regs + SUNXI_PHY_TUNE_HIGH);
+}
+
+static int sun50i_usb3_phy_init(struct phy *_phy)
+{
+ struct sun50i_usb3_phy *phy = phy_get_drvdata(_phy);
+ int ret;
+
+ ret = clk_prepare_enable(phy->clk);
+ if (ret)
+ return ret;
+
+ ret = reset_control_deassert(phy->reset);
+ if (ret) {
+ clk_disable_unprepare(phy->clk);
+ return ret;
+ }
+
+ sun50i_usb3_phy_open(phy);
+ return 0;
+}
+
+static int sun50i_usb3_phy_exit(struct phy *_phy)
+{
+ struct sun50i_usb3_phy *phy = phy_get_drvdata(_phy);
+
+ reset_control_assert(phy->reset);
+ clk_disable_unprepare(phy->clk);
+
+ return 0;
+}
+
+static const struct phy_ops sun50i_usb3_phy_ops = {
+ .init = sun50i_usb3_phy_init,
+ .exit = sun50i_usb3_phy_exit,
+ .owner = THIS_MODULE,
+};
+
+static int sun50i_usb3_phy_probe(struct platform_device *pdev)
+{
+ struct sun50i_usb3_phy *phy;
+ struct device *dev = &pdev->dev;
+ struct phy_provider *phy_provider;
+ struct resource *res;
+
+ phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
+ if (!phy)
+ return -ENOMEM;
+
+ phy->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(phy->clk)) {
+ if (PTR_ERR(phy->clk) != -EPROBE_DEFER)
+ dev_err(dev, "failed to get phy clock\n");
+ return PTR_ERR(phy->clk);
+ }
+
+ phy->reset = devm_reset_control_get(dev, NULL);
+ if (IS_ERR(phy->reset)) {
+ dev_err(dev, "failed to get reset control\n");
+ return PTR_ERR(phy->reset);
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ phy->regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(phy->regs))
+ return PTR_ERR(phy->regs);
+
+ phy->phy = devm_phy_create(dev, NULL, &sun50i_usb3_phy_ops);
+ if (IS_ERR(phy->phy)) {
+ dev_err(dev, "failed to create PHY\n");
+ return PTR_ERR(phy->phy);
+ }
+
+ phy_set_drvdata(phy->phy, phy);
+ phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+
+ return PTR_ERR_OR_ZERO(phy_provider);
+}
+
+static const struct of_device_id sun50i_usb3_phy_of_match[] = {
+ { .compatible = "allwinner,sun50i-h6-usb3-phy" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, sun50i_usb3_phy_of_match);
+
+static struct platform_driver sun50i_usb3_phy_driver = {
+ .probe = sun50i_usb3_phy_probe,
+ .driver = {
+ .of_match_table = sun50i_usb3_phy_of_match,
+ .name = "sun50i-usb3-phy",
+ }
+};
+module_platform_driver(sun50i_usb3_phy_driver);
+
+MODULE_DESCRIPTION("Allwinner H6 USB 3.0 phy driver");
+MODULE_AUTHOR("Icenowy Zheng <icenowy@aosc.io>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/phy/amlogic/phy-meson-g12a-usb3-pcie.c b/drivers/phy/amlogic/phy-meson-g12a-usb3-pcie.c
index ac322d643c7a..08e322789e59 100644
--- a/drivers/phy/amlogic/phy-meson-g12a-usb3-pcie.c
+++ b/drivers/phy/amlogic/phy-meson-g12a-usb3-pcie.c
@@ -50,6 +50,8 @@
#define PHY_R5_PHY_CR_ACK BIT(16)
#define PHY_R5_PHY_BS_OUT BIT(17)
+#define PCIE_RESET_DELAY 500
+
struct phy_g12a_usb3_pcie_priv {
struct regmap *regmap;
struct regmap *regmap_cr;
@@ -196,6 +198,10 @@ static int phy_g12a_usb3_init(struct phy *phy)
struct phy_g12a_usb3_pcie_priv *priv = phy_get_drvdata(phy);
int data, ret;
+ ret = reset_control_reset(priv->reset);
+ if (ret)
+ return ret;
+
/* Switch PHY to USB3 */
/* TODO figure out how to handle when PCIe was set in the bootloader */
regmap_update_bits(priv->regmap, PHY_R0,
@@ -272,24 +278,64 @@ static int phy_g12a_usb3_init(struct phy *phy)
return 0;
}
-static int phy_g12a_usb3_pcie_init(struct phy *phy)
+static int phy_g12a_usb3_pcie_power_on(struct phy *phy)
+{
+ struct phy_g12a_usb3_pcie_priv *priv = phy_get_drvdata(phy);
+
+ if (priv->mode == PHY_TYPE_USB3)
+ return 0;
+
+ regmap_update_bits(priv->regmap, PHY_R0,
+ PHY_R0_PCIE_POWER_STATE,
+ FIELD_PREP(PHY_R0_PCIE_POWER_STATE, 0x1c));
+
+ return 0;
+}
+
+static int phy_g12a_usb3_pcie_power_off(struct phy *phy)
+{
+ struct phy_g12a_usb3_pcie_priv *priv = phy_get_drvdata(phy);
+
+ if (priv->mode == PHY_TYPE_USB3)
+ return 0;
+
+ regmap_update_bits(priv->regmap, PHY_R0,
+ PHY_R0_PCIE_POWER_STATE,
+ FIELD_PREP(PHY_R0_PCIE_POWER_STATE, 0x1d));
+
+ return 0;
+}
+
+static int phy_g12a_usb3_pcie_reset(struct phy *phy)
{
struct phy_g12a_usb3_pcie_priv *priv = phy_get_drvdata(phy);
int ret;
- ret = reset_control_reset(priv->reset);
+ if (priv->mode == PHY_TYPE_USB3)
+ return 0;
+
+ ret = reset_control_assert(priv->reset);
if (ret)
return ret;
+ udelay(PCIE_RESET_DELAY);
+
+ ret = reset_control_deassert(priv->reset);
+ if (ret)
+ return ret;
+
+ udelay(PCIE_RESET_DELAY);
+
+ return 0;
+}
+
+static int phy_g12a_usb3_pcie_init(struct phy *phy)
+{
+ struct phy_g12a_usb3_pcie_priv *priv = phy_get_drvdata(phy);
+
if (priv->mode == PHY_TYPE_USB3)
return phy_g12a_usb3_init(phy);
- /* Power UP PCIE */
- /* TODO figure out when the bootloader has set USB3 mode before */
- regmap_update_bits(priv->regmap, PHY_R0,
- PHY_R0_PCIE_POWER_STATE,
- FIELD_PREP(PHY_R0_PCIE_POWER_STATE, 0x1c));
-
return 0;
}
@@ -297,7 +343,10 @@ static int phy_g12a_usb3_pcie_exit(struct phy *phy)
{
struct phy_g12a_usb3_pcie_priv *priv = phy_get_drvdata(phy);
- return reset_control_reset(priv->reset);
+ if (priv->mode == PHY_TYPE_USB3)
+ return reset_control_reset(priv->reset);
+
+ return 0;
}
static struct phy *phy_g12a_usb3_pcie_xlate(struct device *dev,
@@ -326,6 +375,9 @@ static struct phy *phy_g12a_usb3_pcie_xlate(struct device *dev,
static const struct phy_ops phy_g12a_usb3_pcie_ops = {
.init = phy_g12a_usb3_pcie_init,
.exit = phy_g12a_usb3_pcie_exit,
+ .power_on = phy_g12a_usb3_pcie_power_on,
+ .power_off = phy_g12a_usb3_pcie_power_off,
+ .reset = phy_g12a_usb3_pcie_reset,
.owner = THIS_MODULE,
};
diff --git a/drivers/phy/broadcom/phy-brcm-usb-init.c b/drivers/phy/broadcom/phy-brcm-usb-init.c
index 3c53625f8bc2..91b5b09589d6 100644
--- a/drivers/phy/broadcom/phy-brcm-usb-init.c
+++ b/drivers/phy/broadcom/phy-brcm-usb-init.c
@@ -126,8 +126,8 @@ enum {
USB_CTRL_SELECTOR_COUNT,
};
-#define USB_CTRL_REG(base, reg) ((void *)base + USB_CTRL_##reg)
-#define USB_XHCI_EC_REG(base, reg) ((void *)base + USB_XHCI_EC_##reg)
+#define USB_CTRL_REG(base, reg) ((void __iomem *)base + USB_CTRL_##reg)
+#define USB_XHCI_EC_REG(base, reg) ((void __iomem *)base + USB_XHCI_EC_##reg)
#define USB_CTRL_MASK(reg, field) \
USB_CTRL_##reg##_##field##_MASK
#define USB_CTRL_MASK_FAMILY(params, reg, field) \
@@ -416,7 +416,7 @@ void usb_ctrl_unset_family(struct brcm_usb_init_params *params,
u32 reg_offset, u32 field)
{
u32 mask;
- void *reg;
+ void __iomem *reg;
mask = params->usb_reg_bits_map[field];
reg = params->ctrl_regs + reg_offset;
@@ -428,7 +428,7 @@ void usb_ctrl_set_family(struct brcm_usb_init_params *params,
u32 reg_offset, u32 field)
{
u32 mask;
- void *reg;
+ void __iomem *reg;
mask = params->usb_reg_bits_map[field];
reg = params->ctrl_regs + reg_offset;
@@ -707,7 +707,7 @@ static void brcmusb_usb3_otp_fix(struct brcm_usb_init_params *params)
void __iomem *xhci_ec_base = params->xhci_ec_regs;
u32 val;
- if (params->family_id != 0x74371000 || xhci_ec_base == 0)
+ if (params->family_id != 0x74371000 || !xhci_ec_base)
return;
brcmusb_writel(0xa20c, USB_XHCI_EC_REG(xhci_ec_base, IRAADR));
val = brcmusb_readl(USB_XHCI_EC_REG(xhci_ec_base, IRADAT));
diff --git a/drivers/phy/hisilicon/phy-hisi-inno-usb2.c b/drivers/phy/hisilicon/phy-hisi-inno-usb2.c
index 9b16f13b5ab2..34a6a9a1ceb2 100644
--- a/drivers/phy/hisilicon/phy-hisi-inno-usb2.c
+++ b/drivers/phy/hisilicon/phy-hisi-inno-usb2.c
@@ -114,7 +114,6 @@ static int hisi_inno_phy_probe(struct platform_device *pdev)
struct hisi_inno_phy_priv *priv;
struct phy_provider *provider;
struct device_node *child;
- struct resource *res;
int i = 0;
int ret;
@@ -122,8 +121,7 @@ static int hisi_inno_phy_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->mmio = devm_ioremap_resource(dev, res);
+ priv->mmio = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->mmio)) {
ret = PTR_ERR(priv->mmio);
return ret;
diff --git a/drivers/phy/hisilicon/phy-histb-combphy.c b/drivers/phy/hisilicon/phy-histb-combphy.c
index 62d10ef20296..f1cb3e4d2add 100644
--- a/drivers/phy/hisilicon/phy-histb-combphy.c
+++ b/drivers/phy/hisilicon/phy-histb-combphy.c
@@ -195,7 +195,6 @@ static int histb_combphy_probe(struct platform_device *pdev)
struct histb_combphy_priv *priv;
struct device_node *np = dev->of_node;
struct histb_combphy_mode *mode;
- struct resource *res;
u32 vals[3];
int ret;
@@ -203,8 +202,7 @@ static int histb_combphy_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->mmio = devm_ioremap_resource(dev, res);
+ priv->mmio = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->mmio)) {
ret = PTR_ERR(priv->mmio);
return ret;
diff --git a/drivers/phy/lantiq/phy-lantiq-vrx200-pcie.c b/drivers/phy/lantiq/phy-lantiq-vrx200-pcie.c
index 544d64a84cc0..6e457967653e 100644
--- a/drivers/phy/lantiq/phy-lantiq-vrx200-pcie.c
+++ b/drivers/phy/lantiq/phy-lantiq-vrx200-pcie.c
@@ -323,7 +323,8 @@ static int ltq_vrx200_pcie_phy_power_on(struct phy *phy)
goto err_disable_pdi_clk;
/* Check if we are in "startup ready" status */
- if (ltq_vrx200_pcie_phy_wait_for_pll(phy) != 0)
+ ret = ltq_vrx200_pcie_phy_wait_for_pll(phy);
+ if (ret)
goto err_disable_phy_clk;
ltq_vrx200_pcie_phy_apply_workarounds(phy);
diff --git a/drivers/phy/marvell/phy-mvebu-a3700-utmi.c b/drivers/phy/marvell/phy-mvebu-a3700-utmi.c
index ded900b06f5a..23bc3bf5c4c0 100644
--- a/drivers/phy/marvell/phy-mvebu-a3700-utmi.c
+++ b/drivers/phy/marvell/phy-mvebu-a3700-utmi.c
@@ -216,20 +216,13 @@ static int mvebu_a3700_utmi_phy_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct mvebu_a3700_utmi *utmi;
struct phy_provider *provider;
- struct resource *res;
utmi = devm_kzalloc(dev, sizeof(*utmi), GFP_KERNEL);
if (!utmi)
return -ENOMEM;
/* Get UTMI memory region */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(dev, "Missing UTMI PHY memory resource\n");
- return -ENODEV;
- }
-
- utmi->regs = devm_ioremap_resource(dev, res);
+ utmi->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(utmi->regs))
return PTR_ERR(utmi->regs);
diff --git a/drivers/phy/phy-xgene.c b/drivers/phy/phy-xgene.c
index 3c9189473407..7a33ec12f71b 100644
--- a/drivers/phy/phy-xgene.c
+++ b/drivers/phy/phy-xgene.c
@@ -1342,7 +1342,7 @@ static int xgene_phy_hw_initialize(struct xgene_phy_ctx *ctx,
static void xgene_phy_force_lat_summer_cal(struct xgene_phy_ctx *ctx, int lane)
{
int i;
- struct {
+ static const struct {
u32 reg;
u32 val;
} serdes_reg[] = {
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.c b/drivers/phy/qualcomm/phy-qcom-qmp.c
index 39e8deb8001e..091e20303a14 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp.c
@@ -165,6 +165,11 @@ static const unsigned int sdm845_ufsphy_regs_layout[] = {
[QPHY_PCS_READY_STATUS] = 0x160,
};
+static const unsigned int sm8150_ufsphy_regs_layout[] = {
+ [QPHY_START_CTRL] = 0x00,
+ [QPHY_PCS_READY_STATUS] = 0x180,
+};
+
static const struct qmp_phy_init_tbl msm8996_pcie_serdes_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x1c),
QMP_PHY_INIT_CFG(QSERDES_COM_CLK_ENABLE1, 0x10),
@@ -879,6 +884,93 @@ static const struct qmp_phy_init_tbl msm8998_usb3_pcs_tbl[] = {
QMP_PHY_INIT_CFG(QPHY_V3_PCS_RXEQTRAINING_RUN_TIME, 0x13),
};
+static const struct qmp_phy_init_tbl sm8150_ufsphy_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_POWER_DOWN_CONTROL, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SYSCLK_EN_SEL, 0xd9),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_HSCLK_SEL, 0x11),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_HSCLK_HS_SWITCH_SEL, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP_EN, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE_MAP, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_IVCO, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE_INITVAL2, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_HSCLK_SEL, 0x11),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DEC_START_MODE0, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_CP_CTRL_MODE0, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_CCTRL_MODE0, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP1_MODE0, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP2_MODE0, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE1_MODE0, 0xac),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE2_MODE0, 0x1e),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DEC_START_MODE1, 0x98),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_CP_CTRL_MODE1, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_RCTRL_MODE1, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_CCTRL_MODE1, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP1_MODE1, 0x32),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP2_MODE1, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE1_MODE1, 0xdd),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE2_MODE1, 0x23),
+
+ /* Rate B */
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE_MAP, 0x06),
+};
+
+static const struct qmp_phy_init_tbl sm8150_ufsphy_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_PWM_GEAR_1_DIVIDER_BAND0_1, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_PWM_GEAR_2_DIVIDER_BAND0_1, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_PWM_GEAR_3_DIVIDER_BAND0_1, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_PWM_GEAR_4_DIVIDER_BAND0_1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_LANE_MODE_1, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_TRAN_DRVR_EMP_EN, 0x0c),
+};
+
+static const struct qmp_phy_init_tbl sm8150_ufsphy_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_LVL, 0x24),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_CNTRL, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_DEGLITCH_CNTRL, 0x1e),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_BAND, 0x18),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_FO_GAIN, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x4b),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_PI_CONTROLS, 0xf1),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_COUNT_LOW, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_PI_CTRL2, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FO_GAIN, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_GAIN, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_TERM_BW, 0x1b),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL2, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL3, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL4, 0x1d),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_OFFSET_ADAPTOR_CNTRL2, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_MEASURE_TIME, 0x10),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_LOW, 0xc0),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_HIGH, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_LOW, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH2, 0xf6),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH3, 0x3b),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH4, 0x3d),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_LOW, 0xe0),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH, 0xc8),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH2, 0xc8),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH3, 0x3b),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH4, 0xb1),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_LOW, 0xe0),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_HIGH, 0xc8),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_HIGH2, 0xc8),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_HIGH3, 0x3b),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_HIGH4, 0xb1),
+
+};
+
+static const struct qmp_phy_init_tbl sm8150_ufsphy_pcs_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V4_RX_SIGDET_CTRL2, 0x6d),
+ QMP_PHY_INIT_CFG(QPHY_V4_TX_LARGE_AMP_DRV_LVL, 0x0a),
+ QMP_PHY_INIT_CFG(QPHY_V4_TX_SMALL_AMP_DRV_LVL, 0x02),
+ QMP_PHY_INIT_CFG(QPHY_V4_TX_MID_TERM_CTRL1, 0x43),
+ QMP_PHY_INIT_CFG(QPHY_V4_DEBUG_BUS_CLKSEL, 0x1f),
+ QMP_PHY_INIT_CFG(QPHY_V4_RX_MIN_HIBERN8_TIME, 0xff),
+ QMP_PHY_INIT_CFG(QPHY_V4_MULTI_LANE_CTRL1, 0x02),
+};
/* struct qmp_phy_cfg - per-PHY initialization config */
struct qmp_phy_cfg {
@@ -1276,6 +1368,31 @@ static const struct qmp_phy_cfg msm8998_usb3phy_cfg = {
.is_dual_lane_phy = true,
};
+static const struct qmp_phy_cfg sm8150_ufsphy_cfg = {
+ .type = PHY_TYPE_UFS,
+ .nlanes = 2,
+
+ .serdes_tbl = sm8150_ufsphy_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(sm8150_ufsphy_serdes_tbl),
+ .tx_tbl = sm8150_ufsphy_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(sm8150_ufsphy_tx_tbl),
+ .rx_tbl = sm8150_ufsphy_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(sm8150_ufsphy_rx_tbl),
+ .pcs_tbl = sm8150_ufsphy_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(sm8150_ufsphy_pcs_tbl),
+ .clk_list = sdm845_ufs_phy_clk_l,
+ .num_clks = ARRAY_SIZE(sdm845_ufs_phy_clk_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = sm8150_ufsphy_regs_layout,
+
+ .start_ctrl = SERDES_START,
+ .pwrdn_ctrl = SW_PWRDN,
+
+ .is_dual_lane_phy = true,
+ .no_pcs_sw_reset = true,
+};
+
static void qcom_qmp_phy_configure(void __iomem *base,
const unsigned int *regs,
const struct qmp_phy_init_tbl tbl[],
@@ -1998,6 +2115,9 @@ static const struct of_device_id qcom_qmp_phy_of_match_table[] = {
}, {
.compatible = "qcom,msm8998-qmp-usb3-phy",
.data = &msm8998_usb3phy_cfg,
+ }, {
+ .compatible = "qcom,sm8150-qmp-ufs-phy",
+ .data = &sm8150_ufsphy_cfg,
},
{ },
};
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.h b/drivers/phy/qualcomm/phy-qcom-qmp.h
index 335ea5d7ef40..ab6ff9b45a32 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp.h
+++ b/drivers/phy/qualcomm/phy-qcom-qmp.h
@@ -313,4 +313,100 @@
#define QPHY_V3_PCS_MISC_OSC_DTCT_MODE2_CONFIG4 0x5c
#define QPHY_V3_PCS_MISC_OSC_DTCT_MODE2_CONFIG5 0x60
+/* Only for QMP V4 PHY - QSERDES COM registers */
+#define QSERDES_V4_COM_PLL_IVCO 0x058
+#define QSERDES_V4_COM_CMN_IPTRIM 0x060
+#define QSERDES_V4_COM_CP_CTRL_MODE0 0x074
+#define QSERDES_V4_COM_CP_CTRL_MODE1 0x078
+#define QSERDES_V4_COM_PLL_RCTRL_MODE0 0x07c
+#define QSERDES_V4_COM_PLL_RCTRL_MODE1 0x080
+#define QSERDES_V4_COM_PLL_CCTRL_MODE0 0x084
+#define QSERDES_V4_COM_PLL_CCTRL_MODE1 0x088
+#define QSERDES_V4_COM_SYSCLK_EN_SEL 0x094
+#define QSERDES_V4_COM_LOCK_CMP_EN 0x0a4
+#define QSERDES_V4_COM_LOCK_CMP1_MODE0 0x0ac
+#define QSERDES_V4_COM_LOCK_CMP2_MODE0 0x0b0
+#define QSERDES_V4_COM_LOCK_CMP1_MODE1 0x0b4
+#define QSERDES_V4_COM_DEC_START_MODE0 0x0bc
+#define QSERDES_V4_COM_LOCK_CMP2_MODE1 0x0b8
+#define QSERDES_V4_COM_DEC_START_MODE1 0x0c4
+#define QSERDES_V4_COM_VCO_TUNE_MAP 0x10c
+#define QSERDES_V4_COM_VCO_TUNE_INITVAL2 0x124
+#define QSERDES_V4_COM_HSCLK_SEL 0x158
+#define QSERDES_V4_COM_HSCLK_HS_SWITCH_SEL 0x15c
+#define QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE1_MODE0 0x1ac
+#define QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE2_MODE0 0x1b0
+#define QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE1_MODE1 0x1b4
+#define QSERDES_V4_COM_BIN_VCOCAL_HSCLK_SEL 0x1bc
+#define QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE2_MODE1 0x1b8
+
+/* Only for QMP V4 PHY - TX registers */
+#define QSERDES_V4_TX_LANE_MODE_1 0x84
+#define QSERDES_V4_TX_PWM_GEAR_1_DIVIDER_BAND0_1 0xd8
+#define QSERDES_V4_TX_PWM_GEAR_2_DIVIDER_BAND0_1 0xdC
+#define QSERDES_V4_TX_PWM_GEAR_3_DIVIDER_BAND0_1 0xe0
+#define QSERDES_V4_TX_PWM_GEAR_4_DIVIDER_BAND0_1 0xe4
+#define QSERDES_V4_TX_TRAN_DRVR_EMP_EN 0xb8
+
+/* Only for QMP V4 PHY - RX registers */
+#define QSERDES_V4_RX_UCDR_FO_GAIN 0x008
+#define QSERDES_V4_RX_UCDR_SO_GAIN 0x014
+#define QSERDES_V4_RX_UCDR_FASTLOCK_FO_GAIN 0x030
+#define QSERDES_V4_RX_UCDR_SO_SATURATION_AND_ENABLE 0x034
+#define QSERDES_V4_RX_UCDR_FASTLOCK_COUNT_LOW 0x03c
+#define QSERDES_V4_RX_UCDR_PI_CONTROLS 0x044
+#define QSERDES_V4_RX_UCDR_PI_CTRL2 0x048
+#define QSERDES_V4_RX_AC_JTAG_ENABLE 0x068
+#define QSERDES_V4_RX_AC_JTAG_MODE 0x078
+#define QSERDES_V4_RX_RX_TERM_BW 0x080
+#define QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL2 0x0ec
+#define QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL3 0x0f0
+#define QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL4 0x0f4
+#define QSERDES_V4_RX_RX_IDAC_TSETTLE_LOW 0x0f8
+#define QSERDES_V4_RX_RX_IDAC_TSETTLE_HIGH 0x0fc
+#define QSERDES_V4_RX_RX_IDAC_MEASURE_TIME 0x100
+#define QSERDES_V4_RX_RX_OFFSET_ADAPTOR_CNTRL2 0x114
+#define QSERDES_V4_RX_SIGDET_CNTRL 0x11c
+#define QSERDES_V4_RX_SIGDET_LVL 0x120
+#define QSERDES_V4_RX_SIGDET_DEGLITCH_CNTRL 0x124
+#define QSERDES_V4_RX_RX_BAND 0x128
+#define QSERDES_V4_RX_RX_MODE_00_LOW 0x170
+#define QSERDES_V4_RX_RX_MODE_00_HIGH 0x174
+#define QSERDES_V4_RX_RX_MODE_00_HIGH2 0x178
+#define QSERDES_V4_RX_RX_MODE_00_HIGH3 0x17c
+#define QSERDES_V4_RX_RX_MODE_00_HIGH4 0x180
+#define QSERDES_V4_RX_RX_MODE_01_LOW 0x184
+#define QSERDES_V4_RX_RX_MODE_01_HIGH 0x188
+#define QSERDES_V4_RX_RX_MODE_01_HIGH2 0x18c
+#define QSERDES_V4_RX_RX_MODE_01_HIGH3 0x190
+#define QSERDES_V4_RX_RX_MODE_01_HIGH4 0x194
+#define QSERDES_V4_RX_RX_MODE_10_LOW 0x198
+#define QSERDES_V4_RX_RX_MODE_10_HIGH 0x19c
+#define QSERDES_V4_RX_RX_MODE_10_HIGH2 0x1a0
+#define QSERDES_V4_RX_RX_MODE_10_HIGH3 0x1a4
+#define QSERDES_V4_RX_RX_MODE_10_HIGH4 0x1a8
+#define QSERDES_V4_RX_DCC_CTRL1 0x1bc
+
+/* Only for QMP V4 PHY - PCS registers */
+#define QPHY_V4_PHY_START 0x000
+#define QPHY_V4_POWER_DOWN_CONTROL 0x004
+#define QPHY_V4_SW_RESET 0x008
+#define QPHY_V4_TIMER_20US_CORECLK_STEPS_MSB 0x00c
+#define QPHY_V4_TIMER_20US_CORECLK_STEPS_LSB 0x010
+#define QPHY_V4_PLL_CNTL 0x02c
+#define QPHY_V4_TX_LARGE_AMP_DRV_LVL 0x030
+#define QPHY_V4_TX_SMALL_AMP_DRV_LVL 0x038
+#define QPHY_V4_BIST_FIXED_PAT_CTRL 0x060
+#define QPHY_V4_TX_HSGEAR_CAPABILITY 0x074
+#define QPHY_V4_RX_HSGEAR_CAPABILITY 0x0b4
+#define QPHY_V4_DEBUG_BUS_CLKSEL 0x124
+#define QPHY_V4_LINECFG_DISABLE 0x148
+#define QPHY_V4_RX_MIN_HIBERN8_TIME 0x150
+#define QPHY_V4_RX_SIGDET_CTRL2 0x158
+#define QPHY_V4_TX_PWM_GEAR_BAND 0x160
+#define QPHY_V4_TX_HS_GEAR_BAND 0x168
+#define QPHY_V4_PCS_READY_STATUS 0x180
+#define QPHY_V4_TX_MID_TERM_CTRL1 0x1d8
+#define QPHY_V4_MULTI_LANE_CTRL1 0x1e0
+
#endif
diff --git a/drivers/phy/qualcomm/phy-qcom-usb-hs.c b/drivers/phy/qualcomm/phy-qcom-usb-hs.c
index b163b3a1558d..61054272a7c8 100644
--- a/drivers/phy/qualcomm/phy-qcom-usb-hs.c
+++ b/drivers/phy/qualcomm/phy-qcom-usb-hs.c
@@ -158,8 +158,8 @@ static int qcom_usb_hs_phy_power_on(struct phy *phy)
/* setup initial state */
qcom_usb_hs_phy_vbus_notifier(&uphy->vbus_notify, state,
uphy->vbus_edev);
- ret = devm_extcon_register_notifier(&ulpi->dev, uphy->vbus_edev,
- EXTCON_USB, &uphy->vbus_notify);
+ ret = extcon_register_notifier(uphy->vbus_edev, EXTCON_USB,
+ &uphy->vbus_notify);
if (ret)
goto err_ulpi;
}
@@ -180,6 +180,9 @@ static int qcom_usb_hs_phy_power_off(struct phy *phy)
{
struct qcom_usb_hs_phy *uphy = phy_get_drvdata(phy);
+ if (uphy->vbus_edev)
+ extcon_unregister_notifier(uphy->vbus_edev, EXTCON_USB,
+ &uphy->vbus_notify);
regulator_disable(uphy->v3p3);
regulator_disable(uphy->v1p8);
clk_disable_unprepare(uphy->sleep_clk);
diff --git a/drivers/phy/renesas/phy-rcar-gen2.c b/drivers/phy/renesas/phy-rcar-gen2.c
index 2926e4937301..2e279ac0fa4d 100644
--- a/drivers/phy/renesas/phy-rcar-gen2.c
+++ b/drivers/phy/renesas/phy-rcar-gen2.c
@@ -71,6 +71,7 @@ struct rcar_gen2_phy_driver {
struct rcar_gen2_phy_data {
const struct phy_ops *gen2_phy_ops;
const u32 (*select_value)[PHYS_PER_CHANNEL];
+ const u32 num_channels;
};
static int rcar_gen2_phy_init(struct phy *p)
@@ -271,11 +272,13 @@ static const u32 usb20_select_value[][PHYS_PER_CHANNEL] = {
static const struct rcar_gen2_phy_data rcar_gen2_usb_phy_data = {
.gen2_phy_ops = &rcar_gen2_phy_ops,
.select_value = pci_select_value,
+ .num_channels = ARRAY_SIZE(pci_select_value),
};
static const struct rcar_gen2_phy_data rz_g1c_usb_phy_data = {
.gen2_phy_ops = &rz_g1c_phy_ops,
.select_value = usb20_select_value,
+ .num_channels = ARRAY_SIZE(usb20_select_value),
};
static const struct of_device_id rcar_gen2_phy_match_table[] = {
@@ -389,7 +392,7 @@ static int rcar_gen2_phy_probe(struct platform_device *pdev)
channel->selected_phy = -1;
error = of_property_read_u32(np, "reg", &channel_num);
- if (error || channel_num > 2) {
+ if (error || channel_num >= data->num_channels) {
dev_err(dev, "Invalid \"reg\" property\n");
of_node_put(np);
return error;
diff --git a/drivers/phy/renesas/phy-rcar-gen3-usb2.c b/drivers/phy/renesas/phy-rcar-gen3-usb2.c
index b7f6b1324395..bfb22f868857 100644
--- a/drivers/phy/renesas/phy-rcar-gen3-usb2.c
+++ b/drivers/phy/renesas/phy-rcar-gen3-usb2.c
@@ -21,6 +21,7 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
+#include <linux/string.h>
#include <linux/usb/of.h>
#include <linux/workqueue.h>
@@ -320,9 +321,9 @@ static ssize_t role_store(struct device *dev, struct device_attribute *attr,
if (!ch->is_otg_channel || !rcar_gen3_is_any_rphy_initialized(ch))
return -EIO;
- if (!strncmp(buf, "host", strlen("host")))
+ if (sysfs_streq(buf, "host"))
new_mode = PHY_MODE_USB_HOST;
- else if (!strncmp(buf, "peripheral", strlen("peripheral")))
+ else if (sysfs_streq(buf, "peripheral"))
new_mode = PHY_MODE_USB_DEVICE;
else
return -EINVAL;
@@ -614,7 +615,7 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev)
return PTR_ERR(channel->base);
/* call request_irq for OTG */
- irq = platform_get_irq(pdev, 0);
+ irq = platform_get_irq_optional(pdev, 0);
if (irq >= 0) {
INIT_WORK(&channel->work, rcar_gen3_phy_usb2_work);
irq = devm_request_irq(dev, irq, rcar_gen3_phy_usb2_irq,
diff --git a/drivers/phy/rockchip/Kconfig b/drivers/phy/rockchip/Kconfig
index c454c90cd99e..dbd2de4d28b1 100644
--- a/drivers/phy/rockchip/Kconfig
+++ b/drivers/phy/rockchip/Kconfig
@@ -35,6 +35,14 @@ config PHY_ROCKCHIP_INNO_USB2
help
Support for Rockchip USB2.0 PHY with Innosilicon IP block.
+config PHY_ROCKCHIP_INNO_DSIDPHY
+ tristate "Rockchip Innosilicon MIPI/LVDS/TTL PHY driver"
+ depends on (ARCH_ROCKCHIP || COMPILE_TEST) && OF
+ select GENERIC_PHY
+ help
+ Enable this to support the Rockchip MIPI/LVDS/TTL PHY with
+ Innosilicon IP block.
+
config PHY_ROCKCHIP_PCIE
tristate "Rockchip PCIe PHY Driver"
depends on (ARCH_ROCKCHIP && OF) || COMPILE_TEST
diff --git a/drivers/phy/rockchip/Makefile b/drivers/phy/rockchip/Makefile
index fd21cbaf40dd..9f59a81e4e0d 100644
--- a/drivers/phy/rockchip/Makefile
+++ b/drivers/phy/rockchip/Makefile
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_PHY_ROCKCHIP_DP) += phy-rockchip-dp.o
obj-$(CONFIG_PHY_ROCKCHIP_EMMC) += phy-rockchip-emmc.o
+obj-$(CONFIG_PHY_ROCKCHIP_INNO_DSIDPHY) += phy-rockchip-inno-dsidphy.o
obj-$(CONFIG_PHY_ROCKCHIP_INNO_HDMI) += phy-rockchip-inno-hdmi.o
obj-$(CONFIG_PHY_ROCKCHIP_INNO_USB2) += phy-rockchip-inno-usb2.o
obj-$(CONFIG_PHY_ROCKCHIP_PCIE) += phy-rockchip-pcie.o
diff --git a/drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c b/drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c
new file mode 100644
index 000000000000..fc729ecd3fe9
--- /dev/null
+++ b/drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c
@@ -0,0 +1,805 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018 Rockchip Electronics Co. Ltd.
+ *
+ * Author: Wyon Bi <bivvy.bi@rock-chips.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/iopoll.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include <linux/phy/phy.h>
+#include <linux/pm_runtime.h>
+#include <linux/mfd/syscon.h>
+
+#define PSEC_PER_SEC 1000000000000LL
+
+#define UPDATE(x, h, l) (((x) << (l)) & GENMASK((h), (l)))
+
+/*
+ * The offset address[7:0] is distributed two parts, one from the bit7 to bit5
+ * is the first address, the other from the bit4 to bit0 is the second address.
+ * when you configure the registers, you must set both of them. The Clock Lane
+ * and Data Lane use the same registers with the same second address, but the
+ * first address is different.
+ */
+#define FIRST_ADDRESS(x) (((x) & 0x7) << 5)
+#define SECOND_ADDRESS(x) (((x) & 0x1f) << 0)
+#define PHY_REG(first, second) (FIRST_ADDRESS(first) | \
+ SECOND_ADDRESS(second))
+
+/* Analog Register Part: reg00 */
+#define BANDGAP_POWER_MASK BIT(7)
+#define BANDGAP_POWER_DOWN BIT(7)
+#define BANDGAP_POWER_ON 0
+#define LANE_EN_MASK GENMASK(6, 2)
+#define LANE_EN_CK BIT(6)
+#define LANE_EN_3 BIT(5)
+#define LANE_EN_2 BIT(4)
+#define LANE_EN_1 BIT(3)
+#define LANE_EN_0 BIT(2)
+#define POWER_WORK_MASK GENMASK(1, 0)
+#define POWER_WORK_ENABLE UPDATE(1, 1, 0)
+#define POWER_WORK_DISABLE UPDATE(2, 1, 0)
+/* Analog Register Part: reg01 */
+#define REG_SYNCRST_MASK BIT(2)
+#define REG_SYNCRST_RESET BIT(2)
+#define REG_SYNCRST_NORMAL 0
+#define REG_LDOPD_MASK BIT(1)
+#define REG_LDOPD_POWER_DOWN BIT(1)
+#define REG_LDOPD_POWER_ON 0
+#define REG_PLLPD_MASK BIT(0)
+#define REG_PLLPD_POWER_DOWN BIT(0)
+#define REG_PLLPD_POWER_ON 0
+/* Analog Register Part: reg03 */
+#define REG_FBDIV_HI_MASK BIT(5)
+#define REG_FBDIV_HI(x) UPDATE((x >> 8), 5, 5)
+#define REG_PREDIV_MASK GENMASK(4, 0)
+#define REG_PREDIV(x) UPDATE(x, 4, 0)
+/* Analog Register Part: reg04 */
+#define REG_FBDIV_LO_MASK GENMASK(7, 0)
+#define REG_FBDIV_LO(x) UPDATE(x, 7, 0)
+/* Analog Register Part: reg05 */
+#define SAMPLE_CLOCK_PHASE_MASK GENMASK(6, 4)
+#define SAMPLE_CLOCK_PHASE(x) UPDATE(x, 6, 4)
+#define CLOCK_LANE_SKEW_PHASE_MASK GENMASK(2, 0)
+#define CLOCK_LANE_SKEW_PHASE(x) UPDATE(x, 2, 0)
+/* Analog Register Part: reg06 */
+#define DATA_LANE_3_SKEW_PHASE_MASK GENMASK(6, 4)
+#define DATA_LANE_3_SKEW_PHASE(x) UPDATE(x, 6, 4)
+#define DATA_LANE_2_SKEW_PHASE_MASK GENMASK(2, 0)
+#define DATA_LANE_2_SKEW_PHASE(x) UPDATE(x, 2, 0)
+/* Analog Register Part: reg07 */
+#define DATA_LANE_1_SKEW_PHASE_MASK GENMASK(6, 4)
+#define DATA_LANE_1_SKEW_PHASE(x) UPDATE(x, 6, 4)
+#define DATA_LANE_0_SKEW_PHASE_MASK GENMASK(2, 0)
+#define DATA_LANE_0_SKEW_PHASE(x) UPDATE(x, 2, 0)
+/* Analog Register Part: reg08 */
+#define SAMPLE_CLOCK_DIRECTION_MASK BIT(4)
+#define SAMPLE_CLOCK_DIRECTION_REVERSE BIT(4)
+#define SAMPLE_CLOCK_DIRECTION_FORWARD 0
+/* Digital Register Part: reg00 */
+#define REG_DIG_RSTN_MASK BIT(0)
+#define REG_DIG_RSTN_NORMAL BIT(0)
+#define REG_DIG_RSTN_RESET 0
+/* Digital Register Part: reg01 */
+#define INVERT_TXCLKESC_MASK BIT(1)
+#define INVERT_TXCLKESC_ENABLE BIT(1)
+#define INVERT_TXCLKESC_DISABLE 0
+#define INVERT_TXBYTECLKHS_MASK BIT(0)
+#define INVERT_TXBYTECLKHS_ENABLE BIT(0)
+#define INVERT_TXBYTECLKHS_DISABLE 0
+/* Clock/Data0/Data1/Data2/Data3 Lane Register Part: reg05 */
+#define T_LPX_CNT_MASK GENMASK(5, 0)
+#define T_LPX_CNT(x) UPDATE(x, 5, 0)
+/* Clock/Data0/Data1/Data2/Data3 Lane Register Part: reg06 */
+#define T_HS_PREPARE_CNT_MASK GENMASK(6, 0)
+#define T_HS_PREPARE_CNT(x) UPDATE(x, 6, 0)
+/* Clock/Data0/Data1/Data2/Data3 Lane Register Part: reg07 */
+#define T_HS_ZERO_CNT_MASK GENMASK(5, 0)
+#define T_HS_ZERO_CNT(x) UPDATE(x, 5, 0)
+/* Clock/Data0/Data1/Data2/Data3 Lane Register Part: reg08 */
+#define T_HS_TRAIL_CNT_MASK GENMASK(6, 0)
+#define T_HS_TRAIL_CNT(x) UPDATE(x, 6, 0)
+/* Clock/Data0/Data1/Data2/Data3 Lane Register Part: reg09 */
+#define T_HS_EXIT_CNT_MASK GENMASK(4, 0)
+#define T_HS_EXIT_CNT(x) UPDATE(x, 4, 0)
+/* Clock/Data0/Data1/Data2/Data3 Lane Register Part: reg0a */
+#define T_CLK_POST_CNT_MASK GENMASK(3, 0)
+#define T_CLK_POST_CNT(x) UPDATE(x, 3, 0)
+/* Clock/Data0/Data1/Data2/Data3 Lane Register Part: reg0c */
+#define LPDT_TX_PPI_SYNC_MASK BIT(2)
+#define LPDT_TX_PPI_SYNC_ENABLE BIT(2)
+#define LPDT_TX_PPI_SYNC_DISABLE 0
+#define T_WAKEUP_CNT_HI_MASK GENMASK(1, 0)
+#define T_WAKEUP_CNT_HI(x) UPDATE(x, 1, 0)
+/* Clock/Data0/Data1/Data2/Data3 Lane Register Part: reg0d */
+#define T_WAKEUP_CNT_LO_MASK GENMASK(7, 0)
+#define T_WAKEUP_CNT_LO(x) UPDATE(x, 7, 0)
+/* Clock/Data0/Data1/Data2/Data3 Lane Register Part: reg0e */
+#define T_CLK_PRE_CNT_MASK GENMASK(3, 0)
+#define T_CLK_PRE_CNT(x) UPDATE(x, 3, 0)
+/* Clock/Data0/Data1/Data2/Data3 Lane Register Part: reg10 */
+#define T_TA_GO_CNT_MASK GENMASK(5, 0)
+#define T_TA_GO_CNT(x) UPDATE(x, 5, 0)
+/* Clock/Data0/Data1/Data2/Data3 Lane Register Part: reg11 */
+#define T_TA_SURE_CNT_MASK GENMASK(5, 0)
+#define T_TA_SURE_CNT(x) UPDATE(x, 5, 0)
+/* Clock/Data0/Data1/Data2/Data3 Lane Register Part: reg12 */
+#define T_TA_WAIT_CNT_MASK GENMASK(5, 0)
+#define T_TA_WAIT_CNT(x) UPDATE(x, 5, 0)
+/* LVDS Register Part: reg00 */
+#define LVDS_DIGITAL_INTERNAL_RESET_MASK BIT(2)
+#define LVDS_DIGITAL_INTERNAL_RESET_DISABLE BIT(2)
+#define LVDS_DIGITAL_INTERNAL_RESET_ENABLE 0
+/* LVDS Register Part: reg01 */
+#define LVDS_DIGITAL_INTERNAL_ENABLE_MASK BIT(7)
+#define LVDS_DIGITAL_INTERNAL_ENABLE BIT(7)
+#define LVDS_DIGITAL_INTERNAL_DISABLE 0
+/* LVDS Register Part: reg03 */
+#define MODE_ENABLE_MASK GENMASK(2, 0)
+#define TTL_MODE_ENABLE BIT(2)
+#define LVDS_MODE_ENABLE BIT(1)
+#define MIPI_MODE_ENABLE BIT(0)
+/* LVDS Register Part: reg0b */
+#define LVDS_LANE_EN_MASK GENMASK(7, 3)
+#define LVDS_DATA_LANE0_EN BIT(7)
+#define LVDS_DATA_LANE1_EN BIT(6)
+#define LVDS_DATA_LANE2_EN BIT(5)
+#define LVDS_DATA_LANE3_EN BIT(4)
+#define LVDS_CLK_LANE_EN BIT(3)
+#define LVDS_PLL_POWER_MASK BIT(2)
+#define LVDS_PLL_POWER_OFF BIT(2)
+#define LVDS_PLL_POWER_ON 0
+#define LVDS_BANDGAP_POWER_MASK BIT(0)
+#define LVDS_BANDGAP_POWER_DOWN BIT(0)
+#define LVDS_BANDGAP_POWER_ON 0
+
+#define DSI_PHY_RSTZ 0xa0
+#define PHY_ENABLECLK BIT(2)
+#define DSI_PHY_STATUS 0xb0
+#define PHY_LOCK BIT(0)
+
+struct mipi_dphy_timing {
+ unsigned int clkmiss;
+ unsigned int clkpost;
+ unsigned int clkpre;
+ unsigned int clkprepare;
+ unsigned int clksettle;
+ unsigned int clktermen;
+ unsigned int clktrail;
+ unsigned int clkzero;
+ unsigned int dtermen;
+ unsigned int eot;
+ unsigned int hsexit;
+ unsigned int hsprepare;
+ unsigned int hszero;
+ unsigned int hssettle;
+ unsigned int hsskip;
+ unsigned int hstrail;
+ unsigned int init;
+ unsigned int lpx;
+ unsigned int taget;
+ unsigned int tago;
+ unsigned int tasure;
+ unsigned int wakeup;
+};
+
+struct inno_dsidphy {
+ struct device *dev;
+ struct clk *ref_clk;
+ struct clk *pclk_phy;
+ struct clk *pclk_host;
+ void __iomem *phy_base;
+ void __iomem *host_base;
+ struct reset_control *rst;
+ enum phy_mode mode;
+
+ struct {
+ struct clk_hw hw;
+ u8 prediv;
+ u16 fbdiv;
+ unsigned long rate;
+ } pll;
+};
+
+enum {
+ REGISTER_PART_ANALOG,
+ REGISTER_PART_DIGITAL,
+ REGISTER_PART_CLOCK_LANE,
+ REGISTER_PART_DATA0_LANE,
+ REGISTER_PART_DATA1_LANE,
+ REGISTER_PART_DATA2_LANE,
+ REGISTER_PART_DATA3_LANE,
+ REGISTER_PART_LVDS,
+};
+
+static inline struct inno_dsidphy *hw_to_inno(struct clk_hw *hw)
+{
+ return container_of(hw, struct inno_dsidphy, pll.hw);
+}
+
+static void phy_update_bits(struct inno_dsidphy *inno,
+ u8 first, u8 second, u8 mask, u8 val)
+{
+ u32 reg = PHY_REG(first, second) << 2;
+ unsigned int tmp, orig;
+
+ orig = readl(inno->phy_base + reg);
+ tmp = orig & ~mask;
+ tmp |= val & mask;
+ writel(tmp, inno->phy_base + reg);
+}
+
+static void mipi_dphy_timing_get_default(struct mipi_dphy_timing *timing,
+ unsigned long period)
+{
+ /* Global Operation Timing Parameters */
+ timing->clkmiss = 0;
+ timing->clkpost = 70000 + 52 * period;
+ timing->clkpre = 8 * period;
+ timing->clkprepare = 65000;
+ timing->clksettle = 95000;
+ timing->clktermen = 0;
+ timing->clktrail = 80000;
+ timing->clkzero = 260000;
+ timing->dtermen = 0;
+ timing->eot = 0;
+ timing->hsexit = 120000;
+ timing->hsprepare = 65000 + 4 * period;
+ timing->hszero = 145000 + 6 * period;
+ timing->hssettle = 85000 + 6 * period;
+ timing->hsskip = 40000;
+ timing->hstrail = max(8 * period, 60000 + 4 * period);
+ timing->init = 100000000;
+ timing->lpx = 60000;
+ timing->taget = 5 * timing->lpx;
+ timing->tago = 4 * timing->lpx;
+ timing->tasure = 2 * timing->lpx;
+ timing->wakeup = 1000000000;
+}
+
+static void inno_dsidphy_mipi_mode_enable(struct inno_dsidphy *inno)
+{
+ struct mipi_dphy_timing gotp;
+ const struct {
+ unsigned long rate;
+ u8 hs_prepare;
+ u8 clk_lane_hs_zero;
+ u8 data_lane_hs_zero;
+ u8 hs_trail;
+ } timings[] = {
+ { 110000000, 0x20, 0x16, 0x02, 0x22},
+ { 150000000, 0x06, 0x16, 0x03, 0x45},
+ { 200000000, 0x18, 0x17, 0x04, 0x0b},
+ { 250000000, 0x05, 0x17, 0x05, 0x16},
+ { 300000000, 0x51, 0x18, 0x06, 0x2c},
+ { 400000000, 0x64, 0x19, 0x07, 0x33},
+ { 500000000, 0x20, 0x1b, 0x07, 0x4e},
+ { 600000000, 0x6a, 0x1d, 0x08, 0x3a},
+ { 700000000, 0x3e, 0x1e, 0x08, 0x6a},
+ { 800000000, 0x21, 0x1f, 0x09, 0x29},
+ {1000000000, 0x09, 0x20, 0x09, 0x27},
+ };
+ u32 t_txbyteclkhs, t_txclkesc, ui;
+ u32 txbyteclkhs, txclkesc, esc_clk_div;
+ u32 hs_exit, clk_post, clk_pre, wakeup, lpx, ta_go, ta_sure, ta_wait;
+ u32 hs_prepare, hs_trail, hs_zero, clk_lane_hs_zero, data_lane_hs_zero;
+ unsigned int i;
+
+ /* Select MIPI mode */
+ phy_update_bits(inno, REGISTER_PART_LVDS, 0x03,
+ MODE_ENABLE_MASK, MIPI_MODE_ENABLE);
+ /* Configure PLL */
+ phy_update_bits(inno, REGISTER_PART_ANALOG, 0x03,
+ REG_PREDIV_MASK, REG_PREDIV(inno->pll.prediv));
+ phy_update_bits(inno, REGISTER_PART_ANALOG, 0x03,
+ REG_FBDIV_HI_MASK, REG_FBDIV_HI(inno->pll.fbdiv));
+ phy_update_bits(inno, REGISTER_PART_ANALOG, 0x04,
+ REG_FBDIV_LO_MASK, REG_FBDIV_LO(inno->pll.fbdiv));
+ /* Enable PLL and LDO */
+ phy_update_bits(inno, REGISTER_PART_ANALOG, 0x01,
+ REG_LDOPD_MASK | REG_PLLPD_MASK,
+ REG_LDOPD_POWER_ON | REG_PLLPD_POWER_ON);
+ /* Reset analog */
+ phy_update_bits(inno, REGISTER_PART_ANALOG, 0x01,
+ REG_SYNCRST_MASK, REG_SYNCRST_RESET);
+ udelay(1);
+ phy_update_bits(inno, REGISTER_PART_ANALOG, 0x01,
+ REG_SYNCRST_MASK, REG_SYNCRST_NORMAL);
+ /* Reset digital */
+ phy_update_bits(inno, REGISTER_PART_DIGITAL, 0x00,
+ REG_DIG_RSTN_MASK, REG_DIG_RSTN_RESET);
+ udelay(1);
+ phy_update_bits(inno, REGISTER_PART_DIGITAL, 0x00,
+ REG_DIG_RSTN_MASK, REG_DIG_RSTN_NORMAL);
+
+ txbyteclkhs = inno->pll.rate / 8;
+ t_txbyteclkhs = div_u64(PSEC_PER_SEC, txbyteclkhs);
+
+ esc_clk_div = DIV_ROUND_UP(txbyteclkhs, 20000000);
+ txclkesc = txbyteclkhs / esc_clk_div;
+ t_txclkesc = div_u64(PSEC_PER_SEC, txclkesc);
+
+ ui = div_u64(PSEC_PER_SEC, inno->pll.rate);
+
+ memset(&gotp, 0, sizeof(gotp));
+ mipi_dphy_timing_get_default(&gotp, ui);
+
+ /*
+ * The value of counter for HS Ths-exit
+ * Ths-exit = Tpin_txbyteclkhs * value
+ */
+ hs_exit = DIV_ROUND_UP(gotp.hsexit, t_txbyteclkhs);
+ /*
+ * The value of counter for HS Tclk-post
+ * Tclk-post = Tpin_txbyteclkhs * value
+ */
+ clk_post = DIV_ROUND_UP(gotp.clkpost, t_txbyteclkhs);
+ /*
+ * The value of counter for HS Tclk-pre
+ * Tclk-pre = Tpin_txbyteclkhs * value
+ */
+ clk_pre = DIV_ROUND_UP(gotp.clkpre, t_txbyteclkhs);
+
+ /*
+ * The value of counter for HS Tlpx Time
+ * Tlpx = Tpin_txbyteclkhs * (2 + value)
+ */
+ lpx = DIV_ROUND_UP(gotp.lpx, t_txbyteclkhs);
+ if (lpx >= 2)
+ lpx -= 2;
+
+ /*
+ * The value of counter for HS Tta-go
+ * Tta-go for turnaround
+ * Tta-go = Ttxclkesc * value
+ */
+ ta_go = DIV_ROUND_UP(gotp.tago, t_txclkesc);
+ /*
+ * The value of counter for HS Tta-sure
+ * Tta-sure for turnaround
+ * Tta-sure = Ttxclkesc * value
+ */
+ ta_sure = DIV_ROUND_UP(gotp.tasure, t_txclkesc);
+ /*
+ * The value of counter for HS Tta-wait
+ * Tta-wait for turnaround
+ * Tta-wait = Ttxclkesc * value
+ */
+ ta_wait = DIV_ROUND_UP(gotp.taget, t_txclkesc);
+
+ for (i = 0; i < ARRAY_SIZE(timings); i++)
+ if (inno->pll.rate <= timings[i].rate)
+ break;
+
+ if (i == ARRAY_SIZE(timings))
+ --i;
+
+ hs_prepare = timings[i].hs_prepare;
+ hs_trail = timings[i].hs_trail;
+ clk_lane_hs_zero = timings[i].clk_lane_hs_zero;
+ data_lane_hs_zero = timings[i].data_lane_hs_zero;
+ wakeup = 0x3ff;
+
+ for (i = REGISTER_PART_CLOCK_LANE; i <= REGISTER_PART_DATA3_LANE; i++) {
+ if (i == REGISTER_PART_CLOCK_LANE)
+ hs_zero = clk_lane_hs_zero;
+ else
+ hs_zero = data_lane_hs_zero;
+
+ phy_update_bits(inno, i, 0x05, T_LPX_CNT_MASK,
+ T_LPX_CNT(lpx));
+ phy_update_bits(inno, i, 0x06, T_HS_PREPARE_CNT_MASK,
+ T_HS_PREPARE_CNT(hs_prepare));
+ phy_update_bits(inno, i, 0x07, T_HS_ZERO_CNT_MASK,
+ T_HS_ZERO_CNT(hs_zero));
+ phy_update_bits(inno, i, 0x08, T_HS_TRAIL_CNT_MASK,
+ T_HS_TRAIL_CNT(hs_trail));
+ phy_update_bits(inno, i, 0x09, T_HS_EXIT_CNT_MASK,
+ T_HS_EXIT_CNT(hs_exit));
+ phy_update_bits(inno, i, 0x0a, T_CLK_POST_CNT_MASK,
+ T_CLK_POST_CNT(clk_post));
+ phy_update_bits(inno, i, 0x0e, T_CLK_PRE_CNT_MASK,
+ T_CLK_PRE_CNT(clk_pre));
+ phy_update_bits(inno, i, 0x0c, T_WAKEUP_CNT_HI_MASK,
+ T_WAKEUP_CNT_HI(wakeup >> 8));
+ phy_update_bits(inno, i, 0x0d, T_WAKEUP_CNT_LO_MASK,
+ T_WAKEUP_CNT_LO(wakeup));
+ phy_update_bits(inno, i, 0x10, T_TA_GO_CNT_MASK,
+ T_TA_GO_CNT(ta_go));
+ phy_update_bits(inno, i, 0x11, T_TA_SURE_CNT_MASK,
+ T_TA_SURE_CNT(ta_sure));
+ phy_update_bits(inno, i, 0x12, T_TA_WAIT_CNT_MASK,
+ T_TA_WAIT_CNT(ta_wait));
+ }
+
+ /* Enable all lanes on analog part */
+ phy_update_bits(inno, REGISTER_PART_ANALOG, 0x00,
+ LANE_EN_MASK, LANE_EN_CK | LANE_EN_3 | LANE_EN_2 |
+ LANE_EN_1 | LANE_EN_0);
+}
+
+static void inno_dsidphy_lvds_mode_enable(struct inno_dsidphy *inno)
+{
+ u8 prediv = 2;
+ u16 fbdiv = 28;
+
+ /* Sample clock reverse direction */
+ phy_update_bits(inno, REGISTER_PART_ANALOG, 0x08,
+ SAMPLE_CLOCK_DIRECTION_MASK,
+ SAMPLE_CLOCK_DIRECTION_REVERSE);
+
+ /* Select LVDS mode */
+ phy_update_bits(inno, REGISTER_PART_LVDS, 0x03,
+ MODE_ENABLE_MASK, LVDS_MODE_ENABLE);
+ /* Configure PLL */
+ phy_update_bits(inno, REGISTER_PART_ANALOG, 0x03,
+ REG_PREDIV_MASK, REG_PREDIV(prediv));
+ phy_update_bits(inno, REGISTER_PART_ANALOG, 0x03,
+ REG_FBDIV_HI_MASK, REG_FBDIV_HI(fbdiv));
+ phy_update_bits(inno, REGISTER_PART_ANALOG, 0x04,
+ REG_FBDIV_LO_MASK, REG_FBDIV_LO(fbdiv));
+ phy_update_bits(inno, REGISTER_PART_LVDS, 0x08, 0xff, 0xfc);
+ /* Enable PLL and Bandgap */
+ phy_update_bits(inno, REGISTER_PART_LVDS, 0x0b,
+ LVDS_PLL_POWER_MASK | LVDS_BANDGAP_POWER_MASK,
+ LVDS_PLL_POWER_ON | LVDS_BANDGAP_POWER_ON);
+
+ msleep(20);
+
+ /* Reset LVDS digital logic */
+ phy_update_bits(inno, REGISTER_PART_LVDS, 0x00,
+ LVDS_DIGITAL_INTERNAL_RESET_MASK,
+ LVDS_DIGITAL_INTERNAL_RESET_ENABLE);
+ udelay(1);
+ phy_update_bits(inno, REGISTER_PART_LVDS, 0x00,
+ LVDS_DIGITAL_INTERNAL_RESET_MASK,
+ LVDS_DIGITAL_INTERNAL_RESET_DISABLE);
+ /* Enable LVDS digital logic */
+ phy_update_bits(inno, REGISTER_PART_LVDS, 0x01,
+ LVDS_DIGITAL_INTERNAL_ENABLE_MASK,
+ LVDS_DIGITAL_INTERNAL_ENABLE);
+ /* Enable LVDS analog driver */
+ phy_update_bits(inno, REGISTER_PART_LVDS, 0x0b,
+ LVDS_LANE_EN_MASK, LVDS_CLK_LANE_EN |
+ LVDS_DATA_LANE0_EN | LVDS_DATA_LANE1_EN |
+ LVDS_DATA_LANE2_EN | LVDS_DATA_LANE3_EN);
+}
+
+static int inno_dsidphy_power_on(struct phy *phy)
+{
+ struct inno_dsidphy *inno = phy_get_drvdata(phy);
+
+ clk_prepare_enable(inno->pclk_phy);
+ pm_runtime_get_sync(inno->dev);
+
+ /* Bandgap power on */
+ phy_update_bits(inno, REGISTER_PART_ANALOG, 0x00,
+ BANDGAP_POWER_MASK, BANDGAP_POWER_ON);
+ /* Enable power work */
+ phy_update_bits(inno, REGISTER_PART_ANALOG, 0x00,
+ POWER_WORK_MASK, POWER_WORK_ENABLE);
+
+ switch (inno->mode) {
+ case PHY_MODE_MIPI_DPHY:
+ inno_dsidphy_mipi_mode_enable(inno);
+ break;
+ case PHY_MODE_LVDS:
+ inno_dsidphy_lvds_mode_enable(inno);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int inno_dsidphy_power_off(struct phy *phy)
+{
+ struct inno_dsidphy *inno = phy_get_drvdata(phy);
+
+ phy_update_bits(inno, REGISTER_PART_ANALOG, 0x00, LANE_EN_MASK, 0);
+ phy_update_bits(inno, REGISTER_PART_ANALOG, 0x01,
+ REG_LDOPD_MASK | REG_PLLPD_MASK,
+ REG_LDOPD_POWER_DOWN | REG_PLLPD_POWER_DOWN);
+ phy_update_bits(inno, REGISTER_PART_ANALOG, 0x00,
+ POWER_WORK_MASK, POWER_WORK_DISABLE);
+ phy_update_bits(inno, REGISTER_PART_ANALOG, 0x00,
+ BANDGAP_POWER_MASK, BANDGAP_POWER_DOWN);
+
+ phy_update_bits(inno, REGISTER_PART_LVDS, 0x0b, LVDS_LANE_EN_MASK, 0);
+ phy_update_bits(inno, REGISTER_PART_LVDS, 0x01,
+ LVDS_DIGITAL_INTERNAL_ENABLE_MASK,
+ LVDS_DIGITAL_INTERNAL_DISABLE);
+ phy_update_bits(inno, REGISTER_PART_LVDS, 0x0b,
+ LVDS_PLL_POWER_MASK | LVDS_BANDGAP_POWER_MASK,
+ LVDS_PLL_POWER_OFF | LVDS_BANDGAP_POWER_DOWN);
+
+ pm_runtime_put(inno->dev);
+ clk_disable_unprepare(inno->pclk_phy);
+
+ return 0;
+}
+
+static int inno_dsidphy_set_mode(struct phy *phy, enum phy_mode mode,
+ int submode)
+{
+ struct inno_dsidphy *inno = phy_get_drvdata(phy);
+
+ switch (mode) {
+ case PHY_MODE_MIPI_DPHY:
+ case PHY_MODE_LVDS:
+ inno->mode = mode;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct phy_ops inno_dsidphy_ops = {
+ .set_mode = inno_dsidphy_set_mode,
+ .power_on = inno_dsidphy_power_on,
+ .power_off = inno_dsidphy_power_off,
+ .owner = THIS_MODULE,
+};
+
+static unsigned long inno_dsidphy_pll_round_rate(struct inno_dsidphy *inno,
+ unsigned long prate,
+ unsigned long rate,
+ u8 *prediv, u16 *fbdiv)
+{
+ unsigned long best_freq = 0;
+ unsigned long fref, fout;
+ u8 min_prediv, max_prediv;
+ u8 _prediv, best_prediv = 1;
+ u16 _fbdiv, best_fbdiv = 1;
+ u32 min_delta = UINT_MAX;
+
+ /*
+ * The PLL output frequency can be calculated using a simple formula:
+ * PLL_Output_Frequency = (FREF / PREDIV * FBDIV) / 2
+ * PLL_Output_Frequency: it is equal to DDR-Clock-Frequency * 2
+ */
+ fref = prate / 2;
+ if (rate > 1000000000UL)
+ fout = 1000000000UL;
+ else
+ fout = rate;
+
+ /* 5Mhz < Fref / prediv < 40MHz */
+ min_prediv = DIV_ROUND_UP(fref, 40000000);
+ max_prediv = fref / 5000000;
+
+ for (_prediv = min_prediv; _prediv <= max_prediv; _prediv++) {
+ u64 tmp;
+ u32 delta;
+
+ tmp = (u64)fout * _prediv;
+ do_div(tmp, fref);
+ _fbdiv = tmp;
+
+ /*
+ * The possible settings of feedback divider are
+ * 12, 13, 14, 16, ~ 511
+ */
+ if (_fbdiv == 15)
+ continue;
+
+ if (_fbdiv < 12 || _fbdiv > 511)
+ continue;
+
+ tmp = (u64)_fbdiv * fref;
+ do_div(tmp, _prediv);
+
+ delta = abs(fout - tmp);
+ if (!delta) {
+ best_prediv = _prediv;
+ best_fbdiv = _fbdiv;
+ best_freq = tmp;
+ break;
+ } else if (delta < min_delta) {
+ best_prediv = _prediv;
+ best_fbdiv = _fbdiv;
+ best_freq = tmp;
+ min_delta = delta;
+ }
+ }
+
+ if (best_freq) {
+ *prediv = best_prediv;
+ *fbdiv = best_fbdiv;
+ }
+
+ return best_freq;
+}
+
+static long inno_dsidphy_pll_clk_round_rate(struct clk_hw *hw,
+ unsigned long rate,
+ unsigned long *prate)
+{
+ struct inno_dsidphy *inno = hw_to_inno(hw);
+ unsigned long fout;
+ u16 fbdiv = 1;
+ u8 prediv = 1;
+
+ fout = inno_dsidphy_pll_round_rate(inno, *prate, rate,
+ &prediv, &fbdiv);
+
+ return fout;
+}
+
+static int inno_dsidphy_pll_clk_set_rate(struct clk_hw *hw,
+ unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct inno_dsidphy *inno = hw_to_inno(hw);
+ unsigned long fout;
+ u16 fbdiv = 1;
+ u8 prediv = 1;
+
+ fout = inno_dsidphy_pll_round_rate(inno, parent_rate, rate,
+ &prediv, &fbdiv);
+
+ dev_dbg(inno->dev, "fin=%lu, fout=%lu, prediv=%u, fbdiv=%u\n",
+ parent_rate, fout, prediv, fbdiv);
+
+ inno->pll.prediv = prediv;
+ inno->pll.fbdiv = fbdiv;
+ inno->pll.rate = fout;
+
+ return 0;
+}
+
+static unsigned long
+inno_dsidphy_pll_clk_recalc_rate(struct clk_hw *hw, unsigned long prate)
+{
+ struct inno_dsidphy *inno = hw_to_inno(hw);
+
+ /* PLL_Output_Frequency = (FREF / PREDIV * FBDIV) / 2 */
+ return (prate / inno->pll.prediv * inno->pll.fbdiv) / 2;
+}
+
+static const struct clk_ops inno_dsidphy_pll_clk_ops = {
+ .round_rate = inno_dsidphy_pll_clk_round_rate,
+ .set_rate = inno_dsidphy_pll_clk_set_rate,
+ .recalc_rate = inno_dsidphy_pll_clk_recalc_rate,
+};
+
+static int inno_dsidphy_pll_register(struct inno_dsidphy *inno)
+{
+ struct device *dev = inno->dev;
+ struct clk *clk;
+ const char *parent_name;
+ struct clk_init_data init;
+ int ret;
+
+ parent_name = __clk_get_name(inno->ref_clk);
+
+ init.name = "mipi_dphy_pll";
+ ret = of_property_read_string(dev->of_node, "clock-output-names",
+ &init.name);
+ if (ret < 0)
+ dev_dbg(dev, "phy should set clock-output-names property\n");
+
+ init.ops = &inno_dsidphy_pll_clk_ops;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+ init.flags = 0;
+
+ inno->pll.hw.init = &init;
+ clk = devm_clk_register(dev, &inno->pll.hw);
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ dev_err(dev, "failed to register PLL: %d\n", ret);
+ return ret;
+ }
+
+ return devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get,
+ &inno->pll.hw);
+}
+
+static int inno_dsidphy_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct inno_dsidphy *inno;
+ struct phy_provider *phy_provider;
+ struct phy *phy;
+ int ret;
+
+ inno = devm_kzalloc(dev, sizeof(*inno), GFP_KERNEL);
+ if (!inno)
+ return -ENOMEM;
+
+ inno->dev = dev;
+ platform_set_drvdata(pdev, inno);
+
+ inno->phy_base = devm_platform_ioremap_resource(pdev, 0);
+ if (!inno->phy_base)
+ return -ENOMEM;
+
+ inno->ref_clk = devm_clk_get(dev, "ref");
+ if (IS_ERR(inno->ref_clk)) {
+ ret = PTR_ERR(inno->ref_clk);
+ dev_err(dev, "failed to get ref clock: %d\n", ret);
+ return ret;
+ }
+
+ inno->pclk_phy = devm_clk_get(dev, "pclk");
+ if (IS_ERR(inno->pclk_phy)) {
+ ret = PTR_ERR(inno->pclk_phy);
+ dev_err(dev, "failed to get phy pclk: %d\n", ret);
+ return ret;
+ }
+
+ inno->rst = devm_reset_control_get(dev, "apb");
+ if (IS_ERR(inno->rst)) {
+ ret = PTR_ERR(inno->rst);
+ dev_err(dev, "failed to get system reset control: %d\n", ret);
+ return ret;
+ }
+
+ phy = devm_phy_create(dev, NULL, &inno_dsidphy_ops);
+ if (IS_ERR(phy)) {
+ ret = PTR_ERR(phy);
+ dev_err(dev, "failed to create phy: %d\n", ret);
+ return ret;
+ }
+
+ phy_set_drvdata(phy, inno);
+
+ phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+ if (IS_ERR(phy_provider)) {
+ ret = PTR_ERR(phy_provider);
+ dev_err(dev, "failed to register phy provider: %d\n", ret);
+ return ret;
+ }
+
+ ret = inno_dsidphy_pll_register(inno);
+ if (ret)
+ return ret;
+
+ pm_runtime_enable(dev);
+
+ return 0;
+}
+
+static int inno_dsidphy_remove(struct platform_device *pdev)
+{
+ struct inno_dsidphy *inno = platform_get_drvdata(pdev);
+
+ pm_runtime_disable(inno->dev);
+
+ return 0;
+}
+
+static const struct of_device_id inno_dsidphy_of_match[] = {
+ { .compatible = "rockchip,px30-dsi-dphy", },
+ { .compatible = "rockchip,rk3128-dsi-dphy", },
+ { .compatible = "rockchip,rk3368-dsi-dphy", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, inno_dsidphy_of_match);
+
+static struct platform_driver inno_dsidphy_driver = {
+ .driver = {
+ .name = "inno-dsidphy",
+ .of_match_table = of_match_ptr(inno_dsidphy_of_match),
+ },
+ .probe = inno_dsidphy_probe,
+ .remove = inno_dsidphy_remove,
+};
+module_platform_driver(inno_dsidphy_driver);
+
+MODULE_AUTHOR("Wyon Bi <bivvy.bi@rock-chips.com>");
+MODULE_DESCRIPTION("Innosilicon MIPI/LVDS/TTL Video Combo PHY driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/rockchip/phy-rockchip-inno-usb2.c b/drivers/phy/rockchip/phy-rockchip-inno-usb2.c
index eae865ff312c..680cc0c8825c 100644
--- a/drivers/phy/rockchip/phy-rockchip-inno-usb2.c
+++ b/drivers/phy/rockchip/phy-rockchip-inno-usb2.c
@@ -1423,6 +1423,7 @@ static const struct rockchip_usb2phy_cfg rv1108_phy_cfgs[] = {
};
static const struct of_device_id rockchip_usb2phy_dt_match[] = {
+ { .compatible = "rockchip,px30-usb2phy", .data = &rk3328_phy_cfgs },
{ .compatible = "rockchip,rk3228-usb2phy", .data = &rk3228_phy_cfgs },
{ .compatible = "rockchip,rk3328-usb2phy", .data = &rk3328_phy_cfgs },
{ .compatible = "rockchip,rk3366-usb2phy", .data = &rk3366_phy_cfgs },
diff --git a/drivers/phy/tegra/xusb-tegra186.c b/drivers/phy/tegra/xusb-tegra186.c
index 6f3afaf9398f..84c27394c181 100644
--- a/drivers/phy/tegra/xusb-tegra186.c
+++ b/drivers/phy/tegra/xusb-tegra186.c
@@ -857,9 +857,32 @@ static void tegra186_xusb_padctl_remove(struct tegra_xusb_padctl *padctl)
{
}
+static int tegra186_xusb_padctl_vbus_override(struct tegra_xusb_padctl *padctl,
+ bool status)
+{
+ u32 value;
+
+ dev_dbg(padctl->dev, "%s vbus override\n", status ? "set" : "clear");
+
+ value = padctl_readl(padctl, USB2_VBUS_ID);
+
+ if (status) {
+ value |= VBUS_OVERRIDE;
+ value &= ~ID_OVERRIDE(~0);
+ value |= ID_OVERRIDE_FLOATING;
+ } else {
+ value &= ~VBUS_OVERRIDE;
+ }
+
+ padctl_writel(padctl, value, USB2_VBUS_ID);
+
+ return 0;
+}
+
static const struct tegra_xusb_padctl_ops tegra186_xusb_padctl_ops = {
.probe = tegra186_xusb_padctl_probe,
.remove = tegra186_xusb_padctl_remove,
+ .vbus_override = tegra186_xusb_padctl_vbus_override,
};
static const char * const tegra186_xusb_padctl_supply_names[] = {
diff --git a/drivers/phy/tegra/xusb-tegra210.c b/drivers/phy/tegra/xusb-tegra210.c
index 0c0df6897a3b..394913bb2f20 100644
--- a/drivers/phy/tegra/xusb-tegra210.c
+++ b/drivers/phy/tegra/xusb-tegra210.c
@@ -39,7 +39,10 @@
#define XUSB_PADCTL_USB2_PAD_MUX_USB2_BIAS_PAD_XUSB 0x1
#define XUSB_PADCTL_USB2_PORT_CAP 0x008
+#define XUSB_PADCTL_USB2_PORT_CAP_PORTX_CAP_DISABLED(x) (0x0 << ((x) * 4))
#define XUSB_PADCTL_USB2_PORT_CAP_PORTX_CAP_HOST(x) (0x1 << ((x) * 4))
+#define XUSB_PADCTL_USB2_PORT_CAP_PORTX_CAP_DEVICE(x) (0x2 << ((x) * 4))
+#define XUSB_PADCTL_USB2_PORT_CAP_PORTX_CAP_OTG(x) (0x3 << ((x) * 4))
#define XUSB_PADCTL_USB2_PORT_CAP_PORTX_CAP_MASK(x) (0x3 << ((x) * 4))
#define XUSB_PADCTL_SS_PORT_MAP 0x014
@@ -47,6 +50,7 @@
#define XUSB_PADCTL_SS_PORT_MAP_PORTX_MAP_SHIFT(x) ((x) * 5)
#define XUSB_PADCTL_SS_PORT_MAP_PORTX_MAP_MASK(x) (0x7 << ((x) * 5))
#define XUSB_PADCTL_SS_PORT_MAP_PORTX_MAP(x, v) (((v) & 0x7) << ((x) * 5))
+#define XUSB_PADCTL_SS_PORT_MAP_PORT_DISABLED 0x7
#define XUSB_PADCTL_ELPG_PROGRAM1 0x024
#define XUSB_PADCTL_ELPG_PROGRAM1_AUX_MUX_LP0_VCORE_DOWN (1 << 31)
@@ -61,9 +65,14 @@
#define XUSB_PADCTL_USB3_PAD_MUX_PCIE_IDDQ_DISABLE(x) (1 << (1 + (x)))
#define XUSB_PADCTL_USB3_PAD_MUX_SATA_IDDQ_DISABLE(x) (1 << (8 + (x)))
+#define XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPADX_CTL0(x) (0x080 + (x) * 0x40)
+#define XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPAD_CTL0_ZIP (1 << 18)
+#define XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPAD_CTL0_ZIN (1 << 22)
+
#define XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPADX_CTL1(x) (0x084 + (x) * 0x40)
#define XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPAD_CTL1_VREG_LEV_SHIFT 7
#define XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPAD_CTL1_VREG_LEV_MASK 0x3
+#define XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPAD_CTL1_VREG_LEV_VAL 0x1
#define XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPAD_CTL1_VREG_FIX18 (1 << 6)
#define XUSB_PADCTL_USB2_OTG_PADX_CTL0(x) (0x088 + (x) * 0x40)
@@ -222,6 +231,12 @@
#define XUSB_PADCTL_UPHY_USB3_PADX_ECTL6(x) (0xa74 + (x) * 0x40)
#define XUSB_PADCTL_UPHY_USB3_PAD_ECTL6_RX_EQ_CTRL_H_VAL 0xfcf01368
+#define XUSB_PADCTL_USB2_VBUS_ID 0xc60
+#define XUSB_PADCTL_USB2_VBUS_ID_OVERRIDE_VBUS_ON (1 << 14)
+#define XUSB_PADCTL_USB2_VBUS_ID_OVERRIDE_SHIFT 18
+#define XUSB_PADCTL_USB2_VBUS_ID_OVERRIDE_MASK 0xf
+#define XUSB_PADCTL_USB2_VBUS_ID_OVERRIDE_FLOATING 8
+
struct tegra210_xusb_fuse_calibration {
u32 hs_curr_level[4];
u32 hs_term_range_adj;
@@ -940,6 +955,34 @@ static int tegra210_usb2_phy_power_on(struct phy *phy)
priv = to_tegra210_xusb_padctl(padctl);
+ if (port->usb3_port_fake != -1) {
+ value = padctl_readl(padctl, XUSB_PADCTL_SS_PORT_MAP);
+ value &= ~XUSB_PADCTL_SS_PORT_MAP_PORTX_MAP_MASK(
+ port->usb3_port_fake);
+ value |= XUSB_PADCTL_SS_PORT_MAP_PORTX_MAP(
+ port->usb3_port_fake, index);
+ padctl_writel(padctl, value, XUSB_PADCTL_SS_PORT_MAP);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM1);
+ value &= ~XUSB_PADCTL_ELPG_PROGRAM1_SSPX_ELPG_VCORE_DOWN(
+ port->usb3_port_fake);
+ padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM1);
+
+ usleep_range(100, 200);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM1);
+ value &= ~XUSB_PADCTL_ELPG_PROGRAM1_SSPX_ELPG_CLAMP_EN_EARLY(
+ port->usb3_port_fake);
+ padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM1);
+
+ usleep_range(100, 200);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM1);
+ value &= ~XUSB_PADCTL_ELPG_PROGRAM1_SSPX_ELPG_CLAMP_EN(
+ port->usb3_port_fake);
+ padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM1);
+ }
+
value = padctl_readl(padctl, XUSB_PADCTL_USB2_BIAS_PAD_CTL0);
value &= ~((XUSB_PADCTL_USB2_BIAS_PAD_CTL0_HS_SQUELCH_LEVEL_MASK <<
XUSB_PADCTL_USB2_BIAS_PAD_CTL0_HS_SQUELCH_LEVEL_SHIFT) |
@@ -957,7 +1000,14 @@ static int tegra210_usb2_phy_power_on(struct phy *phy)
value = padctl_readl(padctl, XUSB_PADCTL_USB2_PORT_CAP);
value &= ~XUSB_PADCTL_USB2_PORT_CAP_PORTX_CAP_MASK(index);
- value |= XUSB_PADCTL_USB2_PORT_CAP_PORTX_CAP_HOST(index);
+ if (port->mode == USB_DR_MODE_UNKNOWN)
+ value |= XUSB_PADCTL_USB2_PORT_CAP_PORTX_CAP_DISABLED(index);
+ else if (port->mode == USB_DR_MODE_PERIPHERAL)
+ value |= XUSB_PADCTL_USB2_PORT_CAP_PORTX_CAP_DEVICE(index);
+ else if (port->mode == USB_DR_MODE_HOST)
+ value |= XUSB_PADCTL_USB2_PORT_CAP_PORTX_CAP_HOST(index);
+ else if (port->mode == USB_DR_MODE_OTG)
+ value |= XUSB_PADCTL_USB2_PORT_CAP_PORTX_CAP_OTG(index);
padctl_writel(padctl, value, XUSB_PADCTL_USB2_PORT_CAP);
value = padctl_readl(padctl, XUSB_PADCTL_USB2_OTG_PADX_CTL0(index));
@@ -989,7 +1039,12 @@ static int tegra210_usb2_phy_power_on(struct phy *phy)
XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPADX_CTL1(index));
value &= ~(XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPAD_CTL1_VREG_LEV_MASK <<
XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPAD_CTL1_VREG_LEV_SHIFT);
- value |= XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPAD_CTL1_VREG_FIX18;
+ if (port->mode == USB_DR_MODE_HOST)
+ value |= XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPAD_CTL1_VREG_FIX18;
+ else
+ value |=
+ XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPAD_CTL1_VREG_LEV_VAL <<
+ XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPAD_CTL1_VREG_LEV_SHIFT;
padctl_writel(padctl, value,
XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPADX_CTL1(index));
@@ -1062,6 +1117,32 @@ static int tegra210_usb2_phy_power_off(struct phy *phy)
mutex_lock(&padctl->lock);
+ if (port->usb3_port_fake != -1) {
+ value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM1);
+ value |= XUSB_PADCTL_ELPG_PROGRAM1_SSPX_ELPG_CLAMP_EN_EARLY(
+ port->usb3_port_fake);
+ padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM1);
+
+ usleep_range(100, 200);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM1);
+ value |= XUSB_PADCTL_ELPG_PROGRAM1_SSPX_ELPG_CLAMP_EN(
+ port->usb3_port_fake);
+ padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM1);
+
+ usleep_range(250, 350);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM1);
+ value |= XUSB_PADCTL_ELPG_PROGRAM1_SSPX_ELPG_VCORE_DOWN(
+ port->usb3_port_fake);
+ padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM1);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_SS_PORT_MAP);
+ value |= XUSB_PADCTL_SS_PORT_MAP_PORTX_MAP(port->usb3_port_fake,
+ XUSB_PADCTL_SS_PORT_MAP_PORT_DISABLED);
+ padctl_writel(padctl, value, XUSB_PADCTL_SS_PORT_MAP);
+ }
+
if (WARN_ON(pad->enable == 0))
goto out;
@@ -1225,13 +1306,10 @@ static int tegra210_hsic_phy_power_on(struct phy *phy)
struct tegra_xusb_hsic_lane *hsic = to_hsic_lane(lane);
struct tegra_xusb_hsic_pad *pad = to_hsic_pad(lane->pad);
struct tegra_xusb_padctl *padctl = lane->pad->padctl;
- struct tegra210_xusb_padctl *priv;
unsigned int index = lane->index;
u32 value;
int err;
- priv = to_tegra210_xusb_padctl(padctl);
-
err = regulator_enable(pad->supply);
if (err)
return err;
@@ -1945,6 +2023,52 @@ static const struct tegra_xusb_port_ops tegra210_usb3_port_ops = {
.map = tegra210_usb3_port_map,
};
+static int tegra210_xusb_padctl_vbus_override(struct tegra_xusb_padctl *padctl,
+ bool status)
+{
+ u32 value;
+
+ dev_dbg(padctl->dev, "%s vbus override\n", status ? "set" : "clear");
+
+ value = padctl_readl(padctl, XUSB_PADCTL_USB2_VBUS_ID);
+
+ if (status) {
+ value |= XUSB_PADCTL_USB2_VBUS_ID_OVERRIDE_VBUS_ON;
+ value &= ~(XUSB_PADCTL_USB2_VBUS_ID_OVERRIDE_MASK <<
+ XUSB_PADCTL_USB2_VBUS_ID_OVERRIDE_SHIFT);
+ value |= XUSB_PADCTL_USB2_VBUS_ID_OVERRIDE_FLOATING <<
+ XUSB_PADCTL_USB2_VBUS_ID_OVERRIDE_SHIFT;
+ } else {
+ value &= ~XUSB_PADCTL_USB2_VBUS_ID_OVERRIDE_VBUS_ON;
+ }
+
+ padctl_writel(padctl, value, XUSB_PADCTL_USB2_VBUS_ID);
+
+ return 0;
+}
+
+static int tegra210_utmi_port_reset(struct phy *phy)
+{
+ struct tegra_xusb_padctl *padctl;
+ struct tegra_xusb_lane *lane;
+ u32 value;
+
+ lane = phy_get_drvdata(phy);
+ padctl = lane->pad->padctl;
+
+ value = padctl_readl(padctl,
+ XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPADX_CTL0(lane->index));
+
+ if ((value & XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPAD_CTL0_ZIP) ||
+ (value & XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPAD_CTL0_ZIN)) {
+ tegra210_xusb_padctl_vbus_override(padctl, false);
+ tegra210_xusb_padctl_vbus_override(padctl, true);
+ return 1;
+ }
+
+ return 0;
+}
+
static int
tegra210_xusb_read_fuse_calibration(struct tegra210_xusb_fuse_calibration *fuse)
{
@@ -2007,6 +2131,8 @@ static const struct tegra_xusb_padctl_ops tegra210_xusb_padctl_ops = {
.remove = tegra210_xusb_padctl_remove,
.usb3_set_lfps_detect = tegra210_usb3_set_lfps_detect,
.hsic_set_idle = tegra210_hsic_set_idle,
+ .vbus_override = tegra210_xusb_padctl_vbus_override,
+ .utmi_port_reset = tegra210_utmi_port_reset,
};
static const char * const tegra210_xusb_padctl_supply_names[] = {
@@ -2036,6 +2162,7 @@ const struct tegra_xusb_padctl_soc tegra210_xusb_padctl_soc = {
.ops = &tegra210_xusb_padctl_ops,
.supply_names = tegra210_xusb_padctl_supply_names,
.num_supplies = ARRAY_SIZE(tegra210_xusb_padctl_supply_names),
+ .need_fake_usb3_port = true,
};
EXPORT_SYMBOL_GPL(tegra210_xusb_padctl_soc);
diff --git a/drivers/phy/tegra/xusb.c b/drivers/phy/tegra/xusb.c
index 2ea8497af82a..f98ec3922c02 100644
--- a/drivers/phy/tegra/xusb.c
+++ b/drivers/phy/tegra/xusb.c
@@ -800,9 +800,62 @@ static void __tegra_xusb_remove_ports(struct tegra_xusb_padctl *padctl)
}
}
+static int tegra_xusb_find_unused_usb3_port(struct tegra_xusb_padctl *padctl)
+{
+ struct device_node *np;
+ unsigned int i;
+
+ for (i = 0; i < padctl->soc->ports.usb3.count; i++) {
+ np = tegra_xusb_find_port_node(padctl, "usb3", i);
+ if (!np || !of_device_is_available(np))
+ return i;
+ }
+
+ return -ENODEV;
+}
+
+static bool tegra_xusb_port_is_companion(struct tegra_xusb_usb2_port *usb2)
+{
+ unsigned int i;
+ struct tegra_xusb_usb3_port *usb3;
+ struct tegra_xusb_padctl *padctl = usb2->base.padctl;
+
+ for (i = 0; i < padctl->soc->ports.usb3.count; i++) {
+ usb3 = tegra_xusb_find_usb3_port(padctl, i);
+ if (usb3 && usb3->port == usb2->base.index)
+ return true;
+ }
+
+ return false;
+}
+
+static int tegra_xusb_update_usb3_fake_port(struct tegra_xusb_usb2_port *usb2)
+{
+ int fake;
+
+ /* Disable usb3_port_fake usage by default and assign if needed */
+ usb2->usb3_port_fake = -1;
+
+ if ((usb2->mode == USB_DR_MODE_OTG ||
+ usb2->mode == USB_DR_MODE_PERIPHERAL) &&
+ !tegra_xusb_port_is_companion(usb2)) {
+ fake = tegra_xusb_find_unused_usb3_port(usb2->base.padctl);
+ if (fake < 0) {
+ dev_err(&usb2->base.dev, "no unused USB3 ports available\n");
+ return -ENODEV;
+ }
+
+ dev_dbg(&usb2->base.dev, "Found unused usb3 port: %d\n", fake);
+ usb2->usb3_port_fake = fake;
+ }
+
+ return 0;
+}
+
static int tegra_xusb_setup_ports(struct tegra_xusb_padctl *padctl)
{
struct tegra_xusb_port *port;
+ struct tegra_xusb_usb2_port *usb2;
unsigned int i;
int err = 0;
@@ -832,6 +885,18 @@ static int tegra_xusb_setup_ports(struct tegra_xusb_padctl *padctl)
goto remove_ports;
}
+ if (padctl->soc->need_fake_usb3_port) {
+ for (i = 0; i < padctl->soc->ports.usb2.count; i++) {
+ usb2 = tegra_xusb_find_usb2_port(padctl, i);
+ if (!usb2)
+ continue;
+
+ err = tegra_xusb_update_usb3_fake_port(usb2);
+ if (err < 0)
+ goto remove_ports;
+ }
+ }
+
list_for_each_entry(port, &padctl->ports, list) {
err = port->ops->enable(port);
if (err < 0)
@@ -862,7 +927,6 @@ static int tegra_xusb_padctl_probe(struct platform_device *pdev)
struct tegra_xusb_padctl *padctl;
const struct of_device_id *match;
struct resource *res;
- unsigned int i;
int err;
/* for backwards compatibility with old device trees */
@@ -907,8 +971,9 @@ static int tegra_xusb_padctl_probe(struct platform_device *pdev)
goto remove;
}
- for (i = 0; i < padctl->soc->num_supplies; i++)
- padctl->supplies[i].supply = padctl->soc->supply_names[i];
+ regulator_bulk_set_supply_names(padctl->supplies,
+ padctl->soc->supply_names,
+ padctl->soc->num_supplies);
err = devm_regulator_bulk_get(&pdev->dev, padctl->soc->num_supplies,
padctl->supplies);
@@ -1056,6 +1121,28 @@ int tegra_xusb_padctl_usb3_set_lfps_detect(struct tegra_xusb_padctl *padctl,
}
EXPORT_SYMBOL_GPL(tegra_xusb_padctl_usb3_set_lfps_detect);
+int tegra_xusb_padctl_set_vbus_override(struct tegra_xusb_padctl *padctl,
+ bool val)
+{
+ if (padctl->soc->ops->vbus_override)
+ return padctl->soc->ops->vbus_override(padctl, val);
+
+ return -ENOTSUPP;
+}
+EXPORT_SYMBOL_GPL(tegra_xusb_padctl_set_vbus_override);
+
+int tegra_phy_xusb_utmi_port_reset(struct phy *phy)
+{
+ struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
+ struct tegra_xusb_padctl *padctl = lane->pad->padctl;
+
+ if (padctl->soc->ops->utmi_port_reset)
+ return padctl->soc->ops->utmi_port_reset(phy);
+
+ return -ENOTSUPP;
+}
+EXPORT_SYMBOL_GPL(tegra_phy_xusb_utmi_port_reset);
+
MODULE_AUTHOR("Thierry Reding <treding@nvidia.com>");
MODULE_DESCRIPTION("Tegra XUSB Pad Controller driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/tegra/xusb.h b/drivers/phy/tegra/xusb.h
index 093076ca27fd..da94fcce6307 100644
--- a/drivers/phy/tegra/xusb.h
+++ b/drivers/phy/tegra/xusb.h
@@ -291,6 +291,7 @@ struct tegra_xusb_usb2_port {
struct regulator *supply;
enum usb_dr_mode mode;
bool internal;
+ int usb3_port_fake;
};
static inline struct tegra_xusb_usb2_port *
@@ -372,6 +373,8 @@ struct tegra_xusb_padctl_ops {
unsigned int index, bool idle);
int (*usb3_set_lfps_detect)(struct tegra_xusb_padctl *padctl,
unsigned int index, bool enable);
+ int (*vbus_override)(struct tegra_xusb_padctl *padctl, bool set);
+ int (*utmi_port_reset)(struct phy *phy);
};
struct tegra_xusb_padctl_soc {
@@ -389,6 +392,7 @@ struct tegra_xusb_padctl_soc {
const char * const *supply_names;
unsigned int num_supplies;
+ bool need_fake_usb3_port;
};
struct tegra_xusb_padctl {
diff --git a/drivers/phy/ti/phy-dm816x-usb.c b/drivers/phy/ti/phy-dm816x-usb.c
index cbcce7cf0028..26f194779064 100644
--- a/drivers/phy/ti/phy-dm816x-usb.c
+++ b/drivers/phy/ti/phy-dm816x-usb.c
@@ -189,7 +189,6 @@ static int dm816x_usb_phy_probe(struct platform_device *pdev)
struct phy_provider *phy_provider;
struct usb_otg *otg;
const struct of_device_id *of_id;
- const struct usb_phy_data *phy_data;
int error;
of_id = of_match_device(of_match_ptr(dm816x_usb_phy_id_table),
@@ -220,8 +219,6 @@ static int dm816x_usb_phy_probe(struct platform_device *pdev)
if (phy->usbphy_ctrl == 0x2c)
phy->instance = 1;
- phy_data = of_id->data;
-
otg = devm_kzalloc(&pdev->dev, sizeof(*otg), GFP_KERNEL);
if (!otg)
return -ENOMEM;
diff --git a/drivers/phy/ti/phy-gmii-sel.c b/drivers/phy/ti/phy-gmii-sel.c
index a52c5bb35033..a28bd15297f5 100644
--- a/drivers/phy/ti/phy-gmii-sel.c
+++ b/drivers/phy/ti/phy-gmii-sel.c
@@ -69,11 +69,11 @@ static int phy_gmii_sel_mode(struct phy *phy, enum phy_mode mode, int submode)
break;
case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
gmii_sel_mode = AM33XX_GMII_SEL_MODE_RGMII;
break;
case PHY_INTERFACE_MODE_RGMII_ID:
- case PHY_INTERFACE_MODE_RGMII_RXID:
case PHY_INTERFACE_MODE_RGMII_TXID:
gmii_sel_mode = AM33XX_GMII_SEL_MODE_RGMII;
rgmii_id = 1;
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index b372419d61f2..3bfbf2ff6e2b 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -32,15 +32,15 @@ config DEBUG_PINCTRL
Say Y here to add some extra checks and diagnostics to PINCTRL calls.
config PINCTRL_ARTPEC6
- bool "Axis ARTPEC-6 pin controller driver"
- depends on MACH_ARTPEC6
- select PINMUX
- select GENERIC_PINCONF
- help
- This is the driver for the Axis ARTPEC-6 pin controller. This driver
- supports pin function multiplexing as well as pin bias and drive
- strength configuration. Device tree integration instructions can be
- found in Documentation/devicetree/bindings/pinctrl/axis,artpec6-pinctrl.txt
+ bool "Axis ARTPEC-6 pin controller driver"
+ depends on MACH_ARTPEC6
+ select PINMUX
+ select GENERIC_PINCONF
+ help
+ This is the driver for the Axis ARTPEC-6 pin controller. This driver
+ supports pin function multiplexing as well as pin bias and drive
+ strength configuration. Device tree integration instructions can be
+ found in Documentation/devicetree/bindings/pinctrl/axis,artpec6-pinctrl.txt
config PINCTRL_AS3722
tristate "Pinctrl and GPIO driver for ams AS3722 PMIC"
@@ -420,4 +420,22 @@ config PINCTRL_TB10X
depends on OF && ARC_PLAT_TB10X
select GPIOLIB
+config PINCTRL_EQUILIBRIUM
+ tristate "Generic pinctrl and GPIO driver for Intel Lightning Mountain SoC"
+ select PINMUX
+ select PINCONF
+ select GPIOLIB
+ select GPIO_GENERIC
+ select GPIOLIB_IRQCHIP
+ select GENERIC_PINCONF
+ select GENERIC_PINCTRL_GROUPS
+ select GENERIC_PINMUX_FUNCTIONS
+
+ help
+ Equilibrium pinctrl driver is a pinctrl & GPIO driver for Intel Lightning
+ Mountain network processor SoC that supports both the linux GPIO and pin
+ control frameworks. It provides interfaces to setup pinmux, assign desired
+ pin functions, configure GPIO attributes for LGM SoC pins. Pinmux and
+ pinconf settings are retrieved from device tree.
+
endif
diff --git a/drivers/pinctrl/Makefile b/drivers/pinctrl/Makefile
index ac537fdbc998..879f312bfb75 100644
--- a/drivers/pinctrl/Makefile
+++ b/drivers/pinctrl/Makefile
@@ -46,6 +46,7 @@ obj-$(CONFIG_PINCTRL_ZYNQ) += pinctrl-zynq.o
obj-$(CONFIG_PINCTRL_INGENIC) += pinctrl-ingenic.o
obj-$(CONFIG_PINCTRL_RK805) += pinctrl-rk805.o
obj-$(CONFIG_PINCTRL_OCELOT) += pinctrl-ocelot.o
+obj-$(CONFIG_PINCTRL_EQUILIBRIUM) += pinctrl-equilibrium.o
obj-y += actions/
obj-$(CONFIG_ARCH_ASPEED) += aspeed/
diff --git a/drivers/pinctrl/actions/pinctrl-owl.c b/drivers/pinctrl/actions/pinctrl-owl.c
index 5dfe7188a5f8..5a0c8e87aa7c 100644
--- a/drivers/pinctrl/actions/pinctrl-owl.c
+++ b/drivers/pinctrl/actions/pinctrl-owl.c
@@ -915,7 +915,6 @@ static int owl_gpio_init(struct owl_pinctrl *pctrl)
int owl_pinctrl_probe(struct platform_device *pdev,
struct owl_pinctrl_soc_data *soc_data)
{
- struct resource *res;
struct owl_pinctrl *pctrl;
int ret, i;
@@ -923,8 +922,7 @@ int owl_pinctrl_probe(struct platform_device *pdev,
if (!pctrl)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pctrl->base = devm_ioremap_resource(&pdev->dev, res);
+ pctrl->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pctrl->base))
return PTR_ERR(pctrl->base);
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm281xx.c b/drivers/pinctrl/bcm/pinctrl-bcm281xx.c
index bc3b232a727a..f690fc5cd688 100644
--- a/drivers/pinctrl/bcm/pinctrl-bcm281xx.c
+++ b/drivers/pinctrl/bcm/pinctrl-bcm281xx.c
@@ -1400,12 +1400,10 @@ static struct pinctrl_desc bcm281xx_pinctrl_desc = {
static int __init bcm281xx_pinctrl_probe(struct platform_device *pdev)
{
struct bcm281xx_pinctrl_data *pdata = &bcm281xx_pinctrl;
- struct resource *res;
struct pinctrl_dev *pctl;
/* So far We can assume there is only 1 bank of registers */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pdata->reg_base = devm_ioremap_resource(&pdev->dev, res);
+ pdata->reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pdata->reg_base)) {
dev_err(&pdev->dev, "Failed to ioremap MEM resource\n");
return -ENODEV;
diff --git a/drivers/pinctrl/bcm/pinctrl-cygnus-mux.c b/drivers/pinctrl/bcm/pinctrl-cygnus-mux.c
index dcab2204c60c..4344c5732400 100644
--- a/drivers/pinctrl/bcm/pinctrl-cygnus-mux.c
+++ b/drivers/pinctrl/bcm/pinctrl-cygnus-mux.c
@@ -940,7 +940,6 @@ static int cygnus_mux_log_init(struct cygnus_pinctrl *pinctrl)
static int cygnus_pinmux_probe(struct platform_device *pdev)
{
struct cygnus_pinctrl *pinctrl;
- struct resource *res;
int i, ret;
struct pinctrl_pin_desc *pins;
unsigned num_pins = ARRAY_SIZE(cygnus_pins);
@@ -953,15 +952,13 @@ static int cygnus_pinmux_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, pinctrl);
spin_lock_init(&pinctrl->lock);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pinctrl->base0 = devm_ioremap_resource(&pdev->dev, res);
+ pinctrl->base0 = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pinctrl->base0)) {
dev_err(&pdev->dev, "unable to map I/O space\n");
return PTR_ERR(pinctrl->base0);
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- pinctrl->base1 = devm_ioremap_resource(&pdev->dev, res);
+ pinctrl->base1 = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(pinctrl->base1)) {
dev_err(&pdev->dev, "unable to map I/O space\n");
return PTR_ERR(pinctrl->base1);
diff --git a/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c b/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c
index 42f7ab383ad9..831a9318c384 100644
--- a/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c
+++ b/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c
@@ -795,8 +795,7 @@ static int iproc_gpio_probe(struct platform_device *pdev)
chip->dev = dev;
platform_set_drvdata(pdev, chip);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- chip->base = devm_ioremap_resource(dev, res);
+ chip->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(chip->base)) {
dev_err(dev, "unable to map I/O memory\n");
return PTR_ERR(chip->base);
@@ -850,7 +849,7 @@ static int iproc_gpio_probe(struct platform_device *pdev)
struct gpio_irq_chip *girq;
irqc = &chip->irqchip;
- irqc->name = "bcm-iproc-gpio";
+ irqc->name = dev_name(dev);
irqc->irq_ack = iproc_gpio_irq_ack;
irqc->irq_mask = iproc_gpio_irq_mask;
irqc->irq_unmask = iproc_gpio_irq_unmask;
diff --git a/drivers/pinctrl/bcm/pinctrl-ns2-mux.c b/drivers/pinctrl/bcm/pinctrl-ns2-mux.c
index 9fabc451550e..32f268f173d1 100644
--- a/drivers/pinctrl/bcm/pinctrl-ns2-mux.c
+++ b/drivers/pinctrl/bcm/pinctrl-ns2-mux.c
@@ -1042,8 +1042,7 @@ static int ns2_pinmux_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, pinctrl);
spin_lock_init(&pinctrl->lock);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pinctrl->base0 = devm_ioremap_resource(&pdev->dev, res);
+ pinctrl->base0 = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pinctrl->base0))
return PTR_ERR(pinctrl->base0);
@@ -1057,8 +1056,7 @@ static int ns2_pinmux_probe(struct platform_device *pdev)
return -ENOMEM;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
- pinctrl->pinconf_base = devm_ioremap_resource(&pdev->dev, res);
+ pinctrl->pinconf_base = devm_platform_ioremap_resource(pdev, 2);
if (IS_ERR(pinctrl->pinconf_base))
return PTR_ERR(pinctrl->pinconf_base);
diff --git a/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c b/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c
index e67ae52023ad..bed0124388c0 100644
--- a/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c
+++ b/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c
@@ -64,17 +64,16 @@
* @gc: GPIO chip
* @pctl: pointer to pinctrl_dev
* @pctldesc: pinctrl descriptor
- * @irq_domain: pointer to irq domain
* @lock: lock to protect access to I/O registers
*/
struct nsp_gpio {
struct device *dev;
void __iomem *base;
void __iomem *io_ctrl;
+ struct irq_chip irqchip;
struct gpio_chip gc;
struct pinctrl_dev *pctl;
struct pinctrl_desc pctldesc;
- struct irq_domain *irq_domain;
raw_spinlock_t lock;
};
@@ -136,8 +135,8 @@ static inline bool nsp_get_bit(struct nsp_gpio *chip, enum base_type address,
static irqreturn_t nsp_gpio_irq_handler(int irq, void *data)
{
- struct nsp_gpio *chip = (struct nsp_gpio *)data;
- struct gpio_chip gc = chip->gc;
+ struct gpio_chip *gc = (struct gpio_chip *)data;
+ struct nsp_gpio *chip = gpiochip_get_data(gc);
int bit;
unsigned long int_bits = 0;
u32 int_status;
@@ -155,14 +154,14 @@ static irqreturn_t nsp_gpio_irq_handler(int irq, void *data)
level &= readl(chip->base + NSP_GPIO_INT_MASK);
int_bits = level | event;
- for_each_set_bit(bit, &int_bits, gc.ngpio) {
+ for_each_set_bit(bit, &int_bits, gc->ngpio) {
/*
* Clear the interrupt before invoking the
* handler, so we do not leave any window
*/
writel(BIT(bit), chip->base + NSP_GPIO_EVENT);
generic_handle_irq(
- irq_linear_revmap(chip->irq_domain, bit));
+ irq_linear_revmap(gc->irq.domain, bit));
}
}
@@ -171,7 +170,8 @@ static irqreturn_t nsp_gpio_irq_handler(int irq, void *data)
static void nsp_gpio_irq_ack(struct irq_data *d)
{
- struct nsp_gpio *chip = irq_data_get_irq_chip_data(d);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct nsp_gpio *chip = gpiochip_get_data(gc);
unsigned gpio = d->hwirq;
u32 val = BIT(gpio);
u32 trigger_type;
@@ -189,7 +189,8 @@ static void nsp_gpio_irq_ack(struct irq_data *d)
*/
static void nsp_gpio_irq_set_mask(struct irq_data *d, bool unmask)
{
- struct nsp_gpio *chip = irq_data_get_irq_chip_data(d);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct nsp_gpio *chip = gpiochip_get_data(gc);
unsigned gpio = d->hwirq;
u32 trigger_type;
@@ -202,7 +203,8 @@ static void nsp_gpio_irq_set_mask(struct irq_data *d, bool unmask)
static void nsp_gpio_irq_mask(struct irq_data *d)
{
- struct nsp_gpio *chip = irq_data_get_irq_chip_data(d);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct nsp_gpio *chip = gpiochip_get_data(gc);
unsigned long flags;
raw_spin_lock_irqsave(&chip->lock, flags);
@@ -212,7 +214,8 @@ static void nsp_gpio_irq_mask(struct irq_data *d)
static void nsp_gpio_irq_unmask(struct irq_data *d)
{
- struct nsp_gpio *chip = irq_data_get_irq_chip_data(d);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct nsp_gpio *chip = gpiochip_get_data(gc);
unsigned long flags;
raw_spin_lock_irqsave(&chip->lock, flags);
@@ -222,7 +225,8 @@ static void nsp_gpio_irq_unmask(struct irq_data *d)
static int nsp_gpio_irq_set_type(struct irq_data *d, unsigned int type)
{
- struct nsp_gpio *chip = irq_data_get_irq_chip_data(d);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct nsp_gpio *chip = gpiochip_get_data(gc);
unsigned gpio = d->hwirq;
bool level_low;
bool falling;
@@ -265,16 +269,6 @@ static int nsp_gpio_irq_set_type(struct irq_data *d, unsigned int type)
return 0;
}
-static struct irq_chip nsp_gpio_irq_chip = {
- .name = "gpio-a",
- .irq_enable = nsp_gpio_irq_unmask,
- .irq_disable = nsp_gpio_irq_mask,
- .irq_ack = nsp_gpio_irq_ack,
- .irq_mask = nsp_gpio_irq_mask,
- .irq_unmask = nsp_gpio_irq_unmask,
- .irq_set_type = nsp_gpio_irq_set_type,
-};
-
static int nsp_gpio_direction_input(struct gpio_chip *gc, unsigned gpio)
{
struct nsp_gpio *chip = gpiochip_get_data(gc);
@@ -303,30 +297,36 @@ static int nsp_gpio_direction_output(struct gpio_chip *gc, unsigned gpio,
return 0;
}
-static void nsp_gpio_set(struct gpio_chip *gc, unsigned gpio, int val)
+static int nsp_gpio_get_direction(struct gpio_chip *gc, unsigned gpio)
{
struct nsp_gpio *chip = gpiochip_get_data(gc);
unsigned long flags;
+ int val;
raw_spin_lock_irqsave(&chip->lock, flags);
- nsp_set_bit(chip, REG, NSP_GPIO_DATA_OUT, gpio, !!(val));
+ val = nsp_get_bit(chip, REG, NSP_GPIO_OUT_EN, gpio);
raw_spin_unlock_irqrestore(&chip->lock, flags);
- dev_dbg(chip->dev, "gpio:%u set, value:%d\n", gpio, val);
+ return !val;
}
-static int nsp_gpio_get(struct gpio_chip *gc, unsigned gpio)
+static void nsp_gpio_set(struct gpio_chip *gc, unsigned gpio, int val)
{
struct nsp_gpio *chip = gpiochip_get_data(gc);
+ unsigned long flags;
- return !!(readl(chip->base + NSP_GPIO_DATA_IN) & BIT(gpio));
+ raw_spin_lock_irqsave(&chip->lock, flags);
+ nsp_set_bit(chip, REG, NSP_GPIO_DATA_OUT, gpio, !!(val));
+ raw_spin_unlock_irqrestore(&chip->lock, flags);
+
+ dev_dbg(chip->dev, "gpio:%u set, value:%d\n", gpio, val);
}
-static int nsp_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
+static int nsp_gpio_get(struct gpio_chip *gc, unsigned gpio)
{
struct nsp_gpio *chip = gpiochip_get_data(gc);
- return irq_linear_revmap(chip->irq_domain, offset);
+ return !!(readl(chip->base + NSP_GPIO_DATA_IN) & BIT(gpio));
}
static int nsp_get_groups_count(struct pinctrl_dev *pctldev)
@@ -613,10 +613,9 @@ static const struct of_device_id nsp_gpio_of_match[] = {
static int nsp_gpio_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct resource *res;
struct nsp_gpio *chip;
struct gpio_chip *gc;
- u32 val, count;
+ u32 val;
int irq, ret;
if (of_property_read_u32(pdev->dev.of_node, "ngpios", &val)) {
@@ -631,15 +630,13 @@ static int nsp_gpio_probe(struct platform_device *pdev)
chip->dev = dev;
platform_set_drvdata(pdev, chip);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- chip->base = devm_ioremap_resource(dev, res);
+ chip->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(chip->base)) {
dev_err(dev, "unable to map I/O memory\n");
return PTR_ERR(chip->base);
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- chip->io_ctrl = devm_ioremap_resource(dev, res);
+ chip->io_ctrl = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(chip->io_ctrl)) {
dev_err(dev, "unable to map I/O memory\n");
return PTR_ERR(chip->io_ctrl);
@@ -657,46 +654,47 @@ static int nsp_gpio_probe(struct platform_device *pdev)
gc->free = gpiochip_generic_free;
gc->direction_input = nsp_gpio_direction_input;
gc->direction_output = nsp_gpio_direction_output;
+ gc->get_direction = nsp_gpio_get_direction;
gc->set = nsp_gpio_set;
gc->get = nsp_gpio_get;
- gc->to_irq = nsp_gpio_to_irq;
/* optional GPIO interrupt support */
irq = platform_get_irq(pdev, 0);
if (irq > 0) {
- /* Create irq domain so that each pin can be assigned an IRQ.*/
- chip->irq_domain = irq_domain_add_linear(gc->of_node, gc->ngpio,
- &irq_domain_simple_ops,
- chip);
- if (!chip->irq_domain) {
- dev_err(&pdev->dev, "Couldn't allocate IRQ domain\n");
- return -ENXIO;
- }
+ struct gpio_irq_chip *girq;
+ struct irq_chip *irqc;
- /* Map each gpio to an IRQ and set the handler for gpiolib. */
- for (count = 0; count < gc->ngpio; count++) {
- int irq = irq_create_mapping(chip->irq_domain, count);
+ irqc = &chip->irqchip;
+ irqc->name = "gpio-a";
+ irqc->irq_ack = nsp_gpio_irq_ack;
+ irqc->irq_mask = nsp_gpio_irq_mask;
+ irqc->irq_unmask = nsp_gpio_irq_unmask;
+ irqc->irq_set_type = nsp_gpio_irq_set_type;
- irq_set_chip_and_handler(irq, &nsp_gpio_irq_chip,
- handle_simple_irq);
- irq_set_chip_data(irq, chip);
- }
+ val = readl(chip->base + NSP_CHIP_A_INT_MASK);
+ val = val | NSP_CHIP_A_GPIO_INT_BIT;
+ writel(val, (chip->base + NSP_CHIP_A_INT_MASK));
/* Install ISR for this GPIO controller. */
- ret = devm_request_irq(&pdev->dev, irq, nsp_gpio_irq_handler,
- IRQF_SHARED, "gpio-a", chip);
+ ret = devm_request_irq(dev, irq, nsp_gpio_irq_handler,
+ IRQF_SHARED, "gpio-a", &chip->gc);
if (ret) {
dev_err(&pdev->dev, "Unable to request IRQ%d: %d\n",
irq, ret);
- goto err_rm_gpiochip;
+ return ret;
}
- val = readl(chip->base + NSP_CHIP_A_INT_MASK);
- val = val | NSP_CHIP_A_GPIO_INT_BIT;
- writel(val, (chip->base + NSP_CHIP_A_INT_MASK));
+ girq = &chip->gc.irq;
+ girq->chip = irqc;
+ /* This will let us handle the parent IRQ in the driver */
+ girq->parent_handler = NULL;
+ girq->num_parents = 0;
+ girq->parents = NULL;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_simple_irq;
}
- ret = gpiochip_add_data(gc, chip);
+ ret = devm_gpiochip_add_data(dev, gc, chip);
if (ret < 0) {
dev_err(dev, "unable to add GPIO chip\n");
return ret;
@@ -705,15 +703,10 @@ static int nsp_gpio_probe(struct platform_device *pdev)
ret = nsp_gpio_register_pinconf(chip);
if (ret) {
dev_err(dev, "unable to register pinconf\n");
- goto err_rm_gpiochip;
+ return ret;
}
return 0;
-
-err_rm_gpiochip:
- gpiochip_remove(gc);
-
- return ret;
}
static struct platform_driver nsp_gpio_driver = {
diff --git a/drivers/pinctrl/bcm/pinctrl-nsp-mux.c b/drivers/pinctrl/bcm/pinctrl-nsp-mux.c
index 87618a4e90e4..3756fc9d5826 100644
--- a/drivers/pinctrl/bcm/pinctrl-nsp-mux.c
+++ b/drivers/pinctrl/bcm/pinctrl-nsp-mux.c
@@ -571,8 +571,7 @@ static int nsp_pinmux_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, pinctrl);
spin_lock_init(&pinctrl->lock);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pinctrl->base0 = devm_ioremap_resource(&pdev->dev, res);
+ pinctrl->base0 = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pinctrl->base0))
return PTR_ERR(pinctrl->base0);
@@ -586,8 +585,7 @@ static int nsp_pinmux_probe(struct platform_device *pdev)
return -ENOMEM;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
- pinctrl->base2 = devm_ioremap_resource(&pdev->dev, res);
+ pinctrl->base2 = devm_platform_ioremap_resource(pdev, 2);
if (IS_ERR(pinctrl->base2))
return PTR_ERR(pinctrl->base2);
diff --git a/drivers/pinctrl/devicetree.c b/drivers/pinctrl/devicetree.c
index 5d6d8b1e9062..674920daac26 100644
--- a/drivers/pinctrl/devicetree.c
+++ b/drivers/pinctrl/devicetree.c
@@ -29,6 +29,13 @@ struct pinctrl_dt_map {
static void dt_free_map(struct pinctrl_dev *pctldev,
struct pinctrl_map *map, unsigned num_maps)
{
+ int i;
+
+ for (i = 0; i < num_maps; ++i) {
+ kfree_const(map[i].dev_name);
+ map[i].dev_name = NULL;
+ }
+
if (pctldev) {
const struct pinctrl_ops *ops = pctldev->desc->pctlops;
if (ops->dt_free_map)
@@ -63,7 +70,13 @@ static int dt_remember_or_free_map(struct pinctrl *p, const char *statename,
/* Initialize common mapping table entry fields */
for (i = 0; i < num_maps; i++) {
- map[i].dev_name = dev_name(p->dev);
+ const char *devname;
+
+ devname = kstrdup_const(dev_name(p->dev), GFP_KERNEL);
+ if (!devname)
+ goto err_free_map;
+
+ map[i].dev_name = devname;
map[i].name = statename;
if (pctldev)
map[i].ctrl_dev_name = dev_name(pctldev->dev);
@@ -71,10 +84,8 @@ static int dt_remember_or_free_map(struct pinctrl *p, const char *statename,
/* Remember the converted mapping table entries */
dt_map = kzalloc(sizeof(*dt_map), GFP_KERNEL);
- if (!dt_map) {
- dt_free_map(pctldev, map, num_maps);
- return -ENOMEM;
- }
+ if (!dt_map)
+ goto err_free_map;
dt_map->pctldev = pctldev;
dt_map->map = map;
@@ -82,6 +93,10 @@ static int dt_remember_or_free_map(struct pinctrl *p, const char *statename,
list_add_tail(&dt_map->node, &p->dt_maps);
return pinctrl_register_map(map, num_maps, false);
+
+err_free_map:
+ dt_free_map(pctldev, map, num_maps);
+ return -ENOMEM;
}
struct pinctrl_dev *of_pinctrl_get(struct device_node *np)
@@ -147,6 +162,16 @@ static int dt_to_map_one_config(struct pinctrl *p,
ret = ops->dt_node_to_map(pctldev, np_config, &map, &num_maps);
if (ret < 0)
return ret;
+ else if (num_maps == 0) {
+ /*
+ * If we have no valid maps (maybe caused by empty pinctrl node
+ * or typing error) ther is no need remember this, so just
+ * return.
+ */
+ dev_info(p->dev,
+ "there is not valid maps for state %s\n", statename);
+ return 0;
+ }
/* Stash the mapping table chunk away for later use */
return dt_remember_or_free_map(p, statename, pctldev, map, num_maps);
@@ -166,21 +191,6 @@ static int dt_remember_dummy_state(struct pinctrl *p, const char *statename)
return dt_remember_or_free_map(p, statename, NULL, map, 1);
}
-bool pinctrl_dt_has_hogs(struct pinctrl_dev *pctldev)
-{
- struct device_node *np;
- struct property *prop;
- int size;
-
- np = pctldev->dev->of_node;
- if (!np)
- return false;
-
- prop = of_find_property(np, "pinctrl-0", &size);
-
- return prop ? true : false;
-}
-
int pinctrl_dt_to_map(struct pinctrl *p, struct pinctrl_dev *pctldev)
{
struct device_node *np = p->dev->of_node;
diff --git a/drivers/pinctrl/devicetree.h b/drivers/pinctrl/devicetree.h
index 00e645d7fac7..efa80779de4f 100644
--- a/drivers/pinctrl/devicetree.h
+++ b/drivers/pinctrl/devicetree.h
@@ -9,8 +9,6 @@ struct of_phandle_args;
#ifdef CONFIG_OF
-bool pinctrl_dt_has_hogs(struct pinctrl_dev *pctldev);
-
void pinctrl_dt_free_maps(struct pinctrl *p);
int pinctrl_dt_to_map(struct pinctrl *p, struct pinctrl_dev *pctldev);
@@ -23,11 +21,6 @@ int pinctrl_parse_index_with_args(const struct device_node *np,
#else
-static inline bool pinctrl_dt_has_hogs(struct pinctrl_dev *pctldev)
-{
- return false;
-}
-
static inline int pinctrl_dt_to_map(struct pinctrl *p,
struct pinctrl_dev *pctldev)
{
diff --git a/drivers/pinctrl/freescale/Kconfig b/drivers/pinctrl/freescale/Kconfig
index 5f4058033ec6..3ea9ce3e0cd9 100644
--- a/drivers/pinctrl/freescale/Kconfig
+++ b/drivers/pinctrl/freescale/Kconfig
@@ -39,12 +39,12 @@ config PINCTRL_IMX27
config PINCTRL_IMX25
- bool "IMX25 pinctrl driver"
- depends on OF
- depends on SOC_IMX25
- select PINCTRL_IMX
- help
- Say Y here to enable the imx25 pinctrl driver
+ bool "IMX25 pinctrl driver"
+ depends on OF
+ depends on SOC_IMX25
+ select PINCTRL_IMX
+ help
+ Say Y here to enable the imx25 pinctrl driver
config PINCTRL_IMX35
bool "IMX35 pinctrl driver"
diff --git a/drivers/pinctrl/intel/Kconfig b/drivers/pinctrl/intel/Kconfig
index 452a14f78707..6091947a8f51 100644
--- a/drivers/pinctrl/intel/Kconfig
+++ b/drivers/pinctrl/intel/Kconfig
@@ -115,4 +115,11 @@ config PINCTRL_SUNRISEPOINT
provides an interface that allows configuring of PCH pins and
using them as GPIOs.
+config PINCTRL_TIGERLAKE
+ tristate "Intel Tiger Lake pinctrl and GPIO driver"
+ depends on ACPI
+ select PINCTRL_INTEL
+ help
+ This pinctrl driver provides an interface that allows configuring
+ of Intel Tiger Lake PCH pins and using them as GPIOs.
endif
diff --git a/drivers/pinctrl/intel/Makefile b/drivers/pinctrl/intel/Makefile
index cb491e655749..7e620b471ef6 100644
--- a/drivers/pinctrl/intel/Makefile
+++ b/drivers/pinctrl/intel/Makefile
@@ -13,3 +13,4 @@ obj-$(CONFIG_PINCTRL_GEMINILAKE) += pinctrl-geminilake.o
obj-$(CONFIG_PINCTRL_ICELAKE) += pinctrl-icelake.o
obj-$(CONFIG_PINCTRL_LEWISBURG) += pinctrl-lewisburg.o
obj-$(CONFIG_PINCTRL_SUNRISEPOINT) += pinctrl-sunrisepoint.o
+obj-$(CONFIG_PINCTRL_TIGERLAKE) += pinctrl-tigerlake.o
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
index 2c419fa5d1c1..582fa8a75559 100644
--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
+++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
@@ -165,7 +165,7 @@ struct chv_pinctrl {
struct gpio_chip chip;
struct irq_chip irqchip;
void __iomem *regs;
- unsigned intr_lines[16];
+ unsigned int intr_lines[16];
const struct chv_community *community;
u32 saved_intmask;
struct chv_pin_context *saved_pin_context;
@@ -379,7 +379,7 @@ static const struct chv_community southwest_community = {
.gpio_ranges = southwest_gpio_ranges,
.ngpio_ranges = ARRAY_SIZE(southwest_gpio_ranges),
/*
- * Southwest community can benerate GPIO interrupts only for the
+ * Southwest community can generate GPIO interrupts only for the
* first 8 interrupts. The upper half (8-15) can only be used to
* trigger GPEs.
*/
@@ -1480,7 +1480,7 @@ static void chv_gpio_irq_handler(struct irq_desc *desc)
pending = readl(pctrl->regs + CHV_INTSTAT);
for_each_set_bit(intr_line, &pending, pctrl->community->nirqs) {
- unsigned irq, offset;
+ unsigned int irq, offset;
offset = pctrl->intr_lines[intr_line];
irq = irq_find_mapping(gc->irq.domain, offset);
diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
index 83981ad66a71..4860bc9a4e48 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.c
+++ b/drivers/pinctrl/intel/pinctrl-intel.c
@@ -1131,7 +1131,7 @@ static irqreturn_t intel_gpio_community_irq_handler(struct intel_pinctrl *pctrl,
pending &= enabled;
for_each_set_bit(gpp_offset, &pending, padgrp->size) {
- unsigned irq;
+ unsigned int irq;
irq = irq_find_mapping(gc->irq.domain,
padgrp->gpio_base + gpp_offset);
@@ -1181,7 +1181,7 @@ static int intel_gpio_add_pin_ranges(struct intel_pinctrl *pctrl,
return ret;
}
-static unsigned intel_gpio_ngpio(const struct intel_pinctrl *pctrl)
+static unsigned int intel_gpio_ngpio(const struct intel_pinctrl *pctrl)
{
const struct intel_community *community;
unsigned int ngpio = 0;
@@ -1595,16 +1595,65 @@ intel_gpio_is_requested(struct gpio_chip *chip, int base, unsigned int size)
return requested;
}
-static u32
-intel_gpio_update_pad_mode(void __iomem *hostown, u32 mask, u32 value)
+static bool intel_gpio_update_reg(void __iomem *reg, u32 mask, u32 value)
{
u32 curr, updated;
- curr = readl(hostown);
+ curr = readl(reg);
+
updated = (curr & ~mask) | (value & mask);
- writel(updated, hostown);
+ if (curr == updated)
+ return false;
+
+ writel(updated, reg);
+ return true;
+}
+
+static void intel_restore_hostown(struct intel_pinctrl *pctrl, unsigned int c,
+ void __iomem *base, unsigned int gpp, u32 saved)
+{
+ const struct intel_community *community = &pctrl->communities[c];
+ const struct intel_padgroup *padgrp = &community->gpps[gpp];
+ struct device *dev = pctrl->dev;
+ u32 requested;
+
+ if (padgrp->gpio_base < 0)
+ return;
- return curr;
+ requested = intel_gpio_is_requested(&pctrl->chip, padgrp->gpio_base, padgrp->size);
+ if (!intel_gpio_update_reg(base + gpp * 4, requested, saved))
+ return;
+
+ dev_dbg(dev, "restored hostown %u/%u %#08x\n", c, gpp, readl(base + gpp * 4));
+}
+
+static void intel_restore_intmask(struct intel_pinctrl *pctrl, unsigned int c,
+ void __iomem *base, unsigned int gpp, u32 saved)
+{
+ struct device *dev = pctrl->dev;
+
+ if (!intel_gpio_update_reg(base + gpp * 4, ~0U, saved))
+ return;
+
+ dev_dbg(dev, "restored mask %u/%u %#08x\n", c, gpp, readl(base + gpp * 4));
+}
+
+static void intel_restore_padcfg(struct intel_pinctrl *pctrl, unsigned int pin,
+ unsigned int reg, u32 saved)
+{
+ u32 mask = (reg == PADCFG0) ? PADCFG0_GPIORXSTATE : 0;
+ unsigned int n = reg / sizeof(u32);
+ struct device *dev = pctrl->dev;
+ void __iomem *padcfg;
+
+ padcfg = intel_get_padcfg(pctrl, pin, reg);
+ if (!padcfg)
+ return;
+
+ if (!intel_gpio_update_reg(padcfg, ~mask, saved))
+ return;
+
+ dev_dbg(dev, "restored pin %u padcfg%u %#08x\n", pin, n, readl(padcfg));
}
int intel_pinctrl_resume_noirq(struct device *dev)
@@ -1620,37 +1669,13 @@ int intel_pinctrl_resume_noirq(struct device *dev)
pads = pctrl->context.pads;
for (i = 0; i < pctrl->soc->npins; i++) {
const struct pinctrl_pin_desc *desc = &pctrl->soc->pins[i];
- void __iomem *padcfg;
- u32 val;
if (!intel_pinctrl_should_save(pctrl, desc->number))
continue;
- padcfg = intel_get_padcfg(pctrl, desc->number, PADCFG0);
- val = readl(padcfg) & ~PADCFG0_GPIORXSTATE;
- if (val != pads[i].padcfg0) {
- writel(pads[i].padcfg0, padcfg);
- dev_dbg(dev, "restored pin %u padcfg0 %#08x\n",
- desc->number, readl(padcfg));
- }
-
- padcfg = intel_get_padcfg(pctrl, desc->number, PADCFG1);
- val = readl(padcfg);
- if (val != pads[i].padcfg1) {
- writel(pads[i].padcfg1, padcfg);
- dev_dbg(dev, "restored pin %u padcfg1 %#08x\n",
- desc->number, readl(padcfg));
- }
-
- padcfg = intel_get_padcfg(pctrl, desc->number, PADCFG2);
- if (padcfg) {
- val = readl(padcfg);
- if (val != pads[i].padcfg2) {
- writel(pads[i].padcfg2, padcfg);
- dev_dbg(dev, "restored pin %u padcfg2 %#08x\n",
- desc->number, readl(padcfg));
- }
- }
+ intel_restore_padcfg(pctrl, desc->number, PADCFG0, pads[i].padcfg0);
+ intel_restore_padcfg(pctrl, desc->number, PADCFG1, pads[i].padcfg1);
+ intel_restore_padcfg(pctrl, desc->number, PADCFG2, pads[i].padcfg2);
}
communities = pctrl->context.communities;
@@ -1660,30 +1685,12 @@ int intel_pinctrl_resume_noirq(struct device *dev)
unsigned int gpp;
base = community->regs + community->ie_offset;
- for (gpp = 0; gpp < community->ngpps; gpp++) {
- writel(communities[i].intmask[gpp], base + gpp * 4);
- dev_dbg(dev, "restored mask %d/%u %#08x\n", i, gpp,
- readl(base + gpp * 4));
- }
+ for (gpp = 0; gpp < community->ngpps; gpp++)
+ intel_restore_intmask(pctrl, i, base, gpp, communities[i].intmask[gpp]);
base = community->regs + community->hostown_offset;
- for (gpp = 0; gpp < community->ngpps; gpp++) {
- const struct intel_padgroup *padgrp = &community->gpps[gpp];
- u32 requested = 0, value = 0;
- u32 saved = communities[i].hostown[gpp];
-
- if (padgrp->gpio_base < 0)
- continue;
-
- requested = intel_gpio_is_requested(&pctrl->chip,
- padgrp->gpio_base, padgrp->size);
- value = intel_gpio_update_pad_mode(base + gpp * 4,
- requested, saved);
- if ((value ^ saved) & requested) {
- dev_warn(dev, "restore hostown %d/%u %#8x->%#8x\n",
- i, gpp, value, saved);
- }
- }
+ for (gpp = 0; gpp < community->ngpps; gpp++)
+ intel_restore_hostown(pctrl, i, base, gpp, communities[i].hostown[gpp]);
}
return 0;
diff --git a/drivers/pinctrl/intel/pinctrl-lewisburg.c b/drivers/pinctrl/intel/pinctrl-lewisburg.c
index 2e06fb1464ab..7fdf4257df1e 100644
--- a/drivers/pinctrl/intel/pinctrl-lewisburg.c
+++ b/drivers/pinctrl/intel/pinctrl-lewisburg.c
@@ -33,6 +33,7 @@
.npins = ((e) - (s) + 1), \
}
+/* Lewisburg */
static const struct pinctrl_pin_desc lbg_pins[] = {
/* GPP_A */
PINCTRL_PIN(0, "RCINB"),
@@ -72,7 +73,7 @@ static const struct pinctrl_pin_desc lbg_pins[] = {
PINCTRL_PIN(33, "SRCCLKREQB_4"),
PINCTRL_PIN(34, "SRCCLKREQB_5"),
PINCTRL_PIN(35, "GPP_B_11"),
- PINCTRL_PIN(36, "GLB_RST_WARN_N"),
+ PINCTRL_PIN(36, "SLP_S0B"),
PINCTRL_PIN(37, "PLTRSTB"),
PINCTRL_PIN(38, "SPKR"),
PINCTRL_PIN(39, "GPP_B_15"),
@@ -185,96 +186,96 @@ static const struct pinctrl_pin_desc lbg_pins[] = {
PINCTRL_PIN(141, "GBE_PCI_DIS"),
PINCTRL_PIN(142, "GBE_LAN_DIS"),
PINCTRL_PIN(143, "GPP_I_10"),
- PINCTRL_PIN(144, "GPIO_RCOMP_3P3"),
/* GPP_J */
- PINCTRL_PIN(145, "GBE_LED_0_0"),
- PINCTRL_PIN(146, "GBE_LED_0_1"),
- PINCTRL_PIN(147, "GBE_LED_1_0"),
- PINCTRL_PIN(148, "GBE_LED_1_1"),
- PINCTRL_PIN(149, "GBE_LED_2_0"),
- PINCTRL_PIN(150, "GBE_LED_2_1"),
- PINCTRL_PIN(151, "GBE_LED_3_0"),
- PINCTRL_PIN(152, "GBE_LED_3_1"),
- PINCTRL_PIN(153, "GBE_SCL_0"),
- PINCTRL_PIN(154, "GBE_SDA_0"),
- PINCTRL_PIN(155, "GBE_SCL_1"),
- PINCTRL_PIN(156, "GBE_SDA_1"),
- PINCTRL_PIN(157, "GBE_SCL_2"),
- PINCTRL_PIN(158, "GBE_SDA_2"),
- PINCTRL_PIN(159, "GBE_SCL_3"),
- PINCTRL_PIN(160, "GBE_SDA_3"),
- PINCTRL_PIN(161, "GBE_SDP_0_0"),
- PINCTRL_PIN(162, "GBE_SDP_0_1"),
- PINCTRL_PIN(163, "GBE_SDP_1_0"),
- PINCTRL_PIN(164, "GBE_SDP_1_1"),
- PINCTRL_PIN(165, "GBE_SDP_2_0"),
- PINCTRL_PIN(166, "GBE_SDP_2_1"),
- PINCTRL_PIN(167, "GBE_SDP_3_0"),
- PINCTRL_PIN(168, "GBE_SDP_3_1"),
+ PINCTRL_PIN(144, "GBE_LED_0_0"),
+ PINCTRL_PIN(145, "GBE_LED_0_1"),
+ PINCTRL_PIN(146, "GBE_LED_1_0"),
+ PINCTRL_PIN(147, "GBE_LED_1_1"),
+ PINCTRL_PIN(148, "GBE_LED_2_0"),
+ PINCTRL_PIN(149, "GBE_LED_2_1"),
+ PINCTRL_PIN(150, "GBE_LED_3_0"),
+ PINCTRL_PIN(151, "GBE_LED_3_1"),
+ PINCTRL_PIN(152, "GBE_SCL_0"),
+ PINCTRL_PIN(153, "GBE_SDA_0"),
+ PINCTRL_PIN(154, "GBE_SCL_1"),
+ PINCTRL_PIN(155, "GBE_SDA_1"),
+ PINCTRL_PIN(156, "GBE_SCL_2"),
+ PINCTRL_PIN(157, "GBE_SDA_2"),
+ PINCTRL_PIN(158, "GBE_SCL_3"),
+ PINCTRL_PIN(159, "GBE_SDA_3"),
+ PINCTRL_PIN(160, "GBE_SDP_0_0"),
+ PINCTRL_PIN(161, "GBE_SDP_0_1"),
+ PINCTRL_PIN(162, "GBE_SDP_1_0"),
+ PINCTRL_PIN(163, "GBE_SDP_1_1"),
+ PINCTRL_PIN(164, "GBE_SDP_2_0"),
+ PINCTRL_PIN(165, "GBE_SDP_2_1"),
+ PINCTRL_PIN(166, "GBE_SDP_3_0"),
+ PINCTRL_PIN(167, "GBE_SDP_3_1"),
/* GPP_K */
- PINCTRL_PIN(169, "GBE_RMIICLK"),
- PINCTRL_PIN(170, "GBE_RMII_TXD_0"),
- PINCTRL_PIN(171, "GBE_RMII_TXD_1"),
+ PINCTRL_PIN(168, "GBE_RMIICLK"),
+ PINCTRL_PIN(169, "GBE_RMII_RXD_0"),
+ PINCTRL_PIN(170, "GBE_RMII_RXD_1"),
+ PINCTRL_PIN(171, "GBE_RMII_CRS_DV"),
PINCTRL_PIN(172, "GBE_RMII_TX_EN"),
- PINCTRL_PIN(173, "GBE_RMII_CRS_DV"),
- PINCTRL_PIN(174, "GBE_RMII_RXD_0"),
- PINCTRL_PIN(175, "GBE_RMII_RXD_1"),
- PINCTRL_PIN(176, "GBE_RMII_RX_ER"),
- PINCTRL_PIN(177, "GBE_RMII_ARBIN"),
- PINCTRL_PIN(178, "GBE_RMII_ARB_OUT"),
- PINCTRL_PIN(179, "PE_RST_N"),
- PINCTRL_PIN(180, "GPIO_RCOMP_1P8_3P3"),
+ PINCTRL_PIN(173, "GBE_RMII_TXD_0"),
+ PINCTRL_PIN(174, "GBE_RMII_TXD_1"),
+ PINCTRL_PIN(175, "GBE_RMII_RX_ER"),
+ PINCTRL_PIN(176, "GBE_RMII_ARBIN"),
+ PINCTRL_PIN(177, "GBE_RMII_ARB_OUT"),
+ PINCTRL_PIN(178, "PE_RST_N"),
/* GPP_G */
- PINCTRL_PIN(181, "FAN_TACH_0"),
- PINCTRL_PIN(182, "FAN_TACH_1"),
- PINCTRL_PIN(183, "FAN_TACH_2"),
- PINCTRL_PIN(184, "FAN_TACH_3"),
- PINCTRL_PIN(185, "FAN_TACH_4"),
- PINCTRL_PIN(186, "FAN_TACH_5"),
- PINCTRL_PIN(187, "FAN_TACH_6"),
- PINCTRL_PIN(188, "FAN_TACH_7"),
- PINCTRL_PIN(189, "FAN_PWM_0"),
- PINCTRL_PIN(190, "FAN_PWM_1"),
- PINCTRL_PIN(191, "FAN_PWM_2"),
- PINCTRL_PIN(192, "FAN_PWM_3"),
- PINCTRL_PIN(193, "GSXDOUT"),
- PINCTRL_PIN(194, "GSXSLOAD"),
- PINCTRL_PIN(195, "GSXDIN"),
- PINCTRL_PIN(196, "GSXSRESETB"),
- PINCTRL_PIN(197, "GSXCLK"),
- PINCTRL_PIN(198, "ADR_COMPLETE"),
- PINCTRL_PIN(199, "NMIB"),
- PINCTRL_PIN(200, "SMIB"),
- PINCTRL_PIN(201, "SSATA_DEVSLP_0"),
- PINCTRL_PIN(202, "SSATA_DEVSLP_1"),
- PINCTRL_PIN(203, "SSATA_DEVSLP_2"),
- PINCTRL_PIN(204, "SSATAXPCIE0_SSATAGP0"),
+ PINCTRL_PIN(179, "FAN_TACH_0"),
+ PINCTRL_PIN(180, "FAN_TACH_1"),
+ PINCTRL_PIN(181, "FAN_TACH_2"),
+ PINCTRL_PIN(182, "FAN_TACH_3"),
+ PINCTRL_PIN(183, "FAN_TACH_4"),
+ PINCTRL_PIN(184, "FAN_TACH_5"),
+ PINCTRL_PIN(185, "FAN_TACH_6"),
+ PINCTRL_PIN(186, "FAN_TACH_7"),
+ PINCTRL_PIN(187, "FAN_PWM_0"),
+ PINCTRL_PIN(188, "FAN_PWM_1"),
+ PINCTRL_PIN(189, "FAN_PWM_2"),
+ PINCTRL_PIN(190, "FAN_PWM_3"),
+ PINCTRL_PIN(191, "GSXDOUT"),
+ PINCTRL_PIN(192, "GSXSLOAD"),
+ PINCTRL_PIN(193, "GSXDIN"),
+ PINCTRL_PIN(194, "GSXSRESETB"),
+ PINCTRL_PIN(195, "GSXCLK"),
+ PINCTRL_PIN(196, "ADR_COMPLETE"),
+ PINCTRL_PIN(197, "NMIB"),
+ PINCTRL_PIN(198, "SMIB"),
+ PINCTRL_PIN(199, "SSATA_DEVSLP_0"),
+ PINCTRL_PIN(200, "SSATA_DEVSLP_1"),
+ PINCTRL_PIN(201, "SSATA_DEVSLP_2"),
+ PINCTRL_PIN(202, "SSATAXPCIE0_SSATAGP0"),
/* GPP_H */
- PINCTRL_PIN(205, "SRCCLKREQB_6"),
- PINCTRL_PIN(206, "SRCCLKREQB_7"),
- PINCTRL_PIN(207, "SRCCLKREQB_8"),
- PINCTRL_PIN(208, "SRCCLKREQB_9"),
- PINCTRL_PIN(209, "SRCCLKREQB_10"),
- PINCTRL_PIN(210, "SRCCLKREQB_11"),
- PINCTRL_PIN(211, "SRCCLKREQB_12"),
- PINCTRL_PIN(212, "SRCCLKREQB_13"),
- PINCTRL_PIN(213, "SRCCLKREQB_14"),
- PINCTRL_PIN(214, "SRCCLKREQB_15"),
- PINCTRL_PIN(215, "SML2CLK"),
- PINCTRL_PIN(216, "SML2DATA"),
- PINCTRL_PIN(217, "SML2ALERTB"),
- PINCTRL_PIN(218, "SML3CLK"),
- PINCTRL_PIN(219, "SML3DATA"),
- PINCTRL_PIN(220, "SML3ALERTB"),
- PINCTRL_PIN(221, "SML4CLK"),
- PINCTRL_PIN(222, "SML4DATA"),
- PINCTRL_PIN(223, "SML4ALERTB"),
- PINCTRL_PIN(224, "SSATAXPCIE1_SSATAGP1"),
- PINCTRL_PIN(225, "SSATAXPCIE2_SSATAGP2"),
- PINCTRL_PIN(226, "SSATAXPCIE3_SSATAGP3"),
- PINCTRL_PIN(227, "SSATAXPCIE4_SSATAGP4"),
- PINCTRL_PIN(228, "SSATAXPCIE5_SSATAGP5"),
+ PINCTRL_PIN(203, "SRCCLKREQB_6"),
+ PINCTRL_PIN(204, "SRCCLKREQB_7"),
+ PINCTRL_PIN(205, "SRCCLKREQB_8"),
+ PINCTRL_PIN(206, "SRCCLKREQB_9"),
+ PINCTRL_PIN(207, "SRCCLKREQB_10"),
+ PINCTRL_PIN(208, "SRCCLKREQB_11"),
+ PINCTRL_PIN(209, "SRCCLKREQB_12"),
+ PINCTRL_PIN(210, "SRCCLKREQB_13"),
+ PINCTRL_PIN(211, "SRCCLKREQB_14"),
+ PINCTRL_PIN(212, "SRCCLKREQB_15"),
+ PINCTRL_PIN(213, "SML2CLK"),
+ PINCTRL_PIN(214, "SML2DATA"),
+ PINCTRL_PIN(215, "SML2ALERTB"),
+ PINCTRL_PIN(216, "SML3CLK"),
+ PINCTRL_PIN(217, "SML3DATA"),
+ PINCTRL_PIN(218, "SML3ALERTB"),
+ PINCTRL_PIN(219, "SML4CLK"),
+ PINCTRL_PIN(220, "SML4DATA"),
+ PINCTRL_PIN(221, "SML4ALERTB"),
+ PINCTRL_PIN(222, "SSATAXPCIE1_SSATAGP1"),
+ PINCTRL_PIN(223, "SSATAXPCIE2_SSATAGP2"),
+ PINCTRL_PIN(224, "SSATAXPCIE3_SSATAGP3"),
+ PINCTRL_PIN(225, "SSATAXPCIE4_SSATAGP4"),
+ PINCTRL_PIN(226, "SSATAXPCIE5_SSATAGP5"),
/* GPP_L */
+ PINCTRL_PIN(227, "GPP_L_0"),
+ PINCTRL_PIN(228, "EC_CSME_INTR_OUT"),
PINCTRL_PIN(229, "VISA2CH0_D0"),
PINCTRL_PIN(230, "VISA2CH0_D1"),
PINCTRL_PIN(231, "VISA2CH0_D2"),
diff --git a/drivers/pinctrl/intel/pinctrl-tigerlake.c b/drivers/pinctrl/intel/pinctrl-tigerlake.c
new file mode 100644
index 000000000000..58572b15b3ce
--- /dev/null
+++ b/drivers/pinctrl/intel/pinctrl-tigerlake.c
@@ -0,0 +1,454 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel Tiger Lake PCH pinctrl/GPIO driver
+ *
+ * Copyright (C) 2019, Intel Corporation
+ * Authors: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ * Mika Westerberg <mika.westerberg@linux.intel.com>
+ */
+
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-intel.h"
+
+#define TGL_PAD_OWN 0x020
+#define TGL_PADCFGLOCK 0x080
+#define TGL_HOSTSW_OWN 0x0b0
+#define TGL_GPI_IS 0x100
+#define TGL_GPI_IE 0x120
+
+#define TGL_GPP(r, s, e) \
+ { \
+ .reg_num = (r), \
+ .base = (s), \
+ .size = ((e) - (s) + 1), \
+ }
+
+#define TGL_COMMUNITY(s, e, g) \
+ { \
+ .padown_offset = TGL_PAD_OWN, \
+ .padcfglock_offset = TGL_PADCFGLOCK, \
+ .hostown_offset = TGL_HOSTSW_OWN, \
+ .is_offset = TGL_GPI_IS, \
+ .ie_offset = TGL_GPI_IE, \
+ .pin_base = (s), \
+ .npins = ((e) - (s) + 1), \
+ .gpps = (g), \
+ .ngpps = ARRAY_SIZE(g), \
+ }
+
+/* Tiger Lake-LP */
+static const struct pinctrl_pin_desc tgllp_community0_pins[] = {
+ /* GPP_B */
+ PINCTRL_PIN(0, "CORE_VID_0"),
+ PINCTRL_PIN(1, "CORE_VID_1"),
+ PINCTRL_PIN(2, "VRALERTB"),
+ PINCTRL_PIN(3, "CPU_GP_2"),
+ PINCTRL_PIN(4, "CPU_GP_3"),
+ PINCTRL_PIN(5, "ISH_I2C0_SDA"),
+ PINCTRL_PIN(6, "ISH_I2C0_SCL"),
+ PINCTRL_PIN(7, "ISH_I2C1_SDA"),
+ PINCTRL_PIN(8, "ISH_I2C1_SCL"),
+ PINCTRL_PIN(9, "I2C5_SDA"),
+ PINCTRL_PIN(10, "I2C5_SCL"),
+ PINCTRL_PIN(11, "PMCALERTB"),
+ PINCTRL_PIN(12, "SLP_S0B"),
+ PINCTRL_PIN(13, "PLTRSTB"),
+ PINCTRL_PIN(14, "SPKR"),
+ PINCTRL_PIN(15, "GSPI0_CS0B"),
+ PINCTRL_PIN(16, "GSPI0_CLK"),
+ PINCTRL_PIN(17, "GSPI0_MISO"),
+ PINCTRL_PIN(18, "GSPI0_MOSI"),
+ PINCTRL_PIN(19, "GSPI1_CS0B"),
+ PINCTRL_PIN(20, "GSPI1_CLK"),
+ PINCTRL_PIN(21, "GSPI1_MISO"),
+ PINCTRL_PIN(22, "GSPI1_MOSI"),
+ PINCTRL_PIN(23, "SML1ALERTB"),
+ PINCTRL_PIN(24, "GSPI0_CLK_LOOPBK"),
+ PINCTRL_PIN(25, "GSPI1_CLK_LOOPBK"),
+ /* GPP_T */
+ PINCTRL_PIN(26, "I2C6_SDA"),
+ PINCTRL_PIN(27, "I2C6_SCL"),
+ PINCTRL_PIN(28, "I2C7_SDA"),
+ PINCTRL_PIN(29, "I2C7_SCL"),
+ PINCTRL_PIN(30, "UART4_RXD"),
+ PINCTRL_PIN(31, "UART4_TXD"),
+ PINCTRL_PIN(32, "UART4_RTSB"),
+ PINCTRL_PIN(33, "UART4_CTSB"),
+ PINCTRL_PIN(34, "UART5_RXD"),
+ PINCTRL_PIN(35, "UART5_TXD"),
+ PINCTRL_PIN(36, "UART5_RTSB"),
+ PINCTRL_PIN(37, "UART5_CTSB"),
+ PINCTRL_PIN(38, "UART6_RXD"),
+ PINCTRL_PIN(39, "UART6_TXD"),
+ PINCTRL_PIN(40, "UART6_RTSB"),
+ PINCTRL_PIN(41, "UART6_CTSB"),
+ /* GPP_A */
+ PINCTRL_PIN(42, "ESPI_IO_0"),
+ PINCTRL_PIN(43, "ESPI_IO_1"),
+ PINCTRL_PIN(44, "ESPI_IO_2"),
+ PINCTRL_PIN(45, "ESPI_IO_3"),
+ PINCTRL_PIN(46, "ESPI_CSB"),
+ PINCTRL_PIN(47, "ESPI_CLK"),
+ PINCTRL_PIN(48, "ESPI_RESETB"),
+ PINCTRL_PIN(49, "I2S2_SCLK"),
+ PINCTRL_PIN(50, "I2S2_SFRM"),
+ PINCTRL_PIN(51, "I2S2_TXD"),
+ PINCTRL_PIN(52, "I2S2_RXD"),
+ PINCTRL_PIN(53, "PMC_I2C_SDA"),
+ PINCTRL_PIN(54, "SATAXPCIE_1"),
+ PINCTRL_PIN(55, "PMC_I2C_SCL"),
+ PINCTRL_PIN(56, "USB2_OCB_1"),
+ PINCTRL_PIN(57, "USB2_OCB_2"),
+ PINCTRL_PIN(58, "USB2_OCB_3"),
+ PINCTRL_PIN(59, "DDSP_HPD_C"),
+ PINCTRL_PIN(60, "DDSP_HPD_B"),
+ PINCTRL_PIN(61, "DDSP_HPD_1"),
+ PINCTRL_PIN(62, "DDSP_HPD_2"),
+ PINCTRL_PIN(63, "GPPC_A_21"),
+ PINCTRL_PIN(64, "GPPC_A_22"),
+ PINCTRL_PIN(65, "I2S1_SCLK"),
+ PINCTRL_PIN(66, "ESPI_CLK_LOOPBK"),
+};
+
+static const struct intel_padgroup tgllp_community0_gpps[] = {
+ TGL_GPP(0, 0, 25), /* GPP_B */
+ TGL_GPP(1, 26, 41), /* GPP_T */
+ TGL_GPP(2, 42, 66), /* GPP_A */
+};
+
+static const struct intel_community tgllp_community0[] = {
+ TGL_COMMUNITY(0, 66, tgllp_community0_gpps),
+};
+
+static const struct intel_pinctrl_soc_data tgllp_community0_soc_data = {
+ .uid = "0",
+ .pins = tgllp_community0_pins,
+ .npins = ARRAY_SIZE(tgllp_community0_pins),
+ .communities = tgllp_community0,
+ .ncommunities = ARRAY_SIZE(tgllp_community0),
+};
+
+static const struct pinctrl_pin_desc tgllp_community1_pins[] = {
+ /* GPP_S */
+ PINCTRL_PIN(0, "SNDW0_CLK"),
+ PINCTRL_PIN(1, "SNDW0_DATA"),
+ PINCTRL_PIN(2, "SNDW1_CLK"),
+ PINCTRL_PIN(3, "SNDW1_DATA"),
+ PINCTRL_PIN(4, "SNDW2_CLK"),
+ PINCTRL_PIN(5, "SNDW2_DATA"),
+ PINCTRL_PIN(6, "SNDW3_CLK"),
+ PINCTRL_PIN(7, "SNDW3_DATA"),
+ /* GPP_H */
+ PINCTRL_PIN(8, "GPPC_H_0"),
+ PINCTRL_PIN(9, "GPPC_H_1"),
+ PINCTRL_PIN(10, "GPPC_H_2"),
+ PINCTRL_PIN(11, "SX_EXIT_HOLDOFFB"),
+ PINCTRL_PIN(12, "I2C2_SDA"),
+ PINCTRL_PIN(13, "I2C2_SCL"),
+ PINCTRL_PIN(14, "I2C3_SDA"),
+ PINCTRL_PIN(15, "I2C3_SCL"),
+ PINCTRL_PIN(16, "I2C4_SDA"),
+ PINCTRL_PIN(17, "I2C4_SCL"),
+ PINCTRL_PIN(18, "SRCCLKREQB_4"),
+ PINCTRL_PIN(19, "SRCCLKREQB_5"),
+ PINCTRL_PIN(20, "M2_SKT2_CFG_0"),
+ PINCTRL_PIN(21, "M2_SKT2_CFG_1"),
+ PINCTRL_PIN(22, "M2_SKT2_CFG_2"),
+ PINCTRL_PIN(23, "M2_SKT2_CFG_3"),
+ PINCTRL_PIN(24, "DDPB_CTRLCLK"),
+ PINCTRL_PIN(25, "DDPB_CTRLDATA"),
+ PINCTRL_PIN(26, "CPU_C10_GATEB"),
+ PINCTRL_PIN(27, "TIME_SYNC_0"),
+ PINCTRL_PIN(28, "IMGCLKOUT_1"),
+ PINCTRL_PIN(29, "IMGCLKOUT_2"),
+ PINCTRL_PIN(30, "IMGCLKOUT_3"),
+ PINCTRL_PIN(31, "IMGCLKOUT_4"),
+ /* GPP_D */
+ PINCTRL_PIN(32, "ISH_GP_0"),
+ PINCTRL_PIN(33, "ISH_GP_1"),
+ PINCTRL_PIN(34, "ISH_GP_2"),
+ PINCTRL_PIN(35, "ISH_GP_3"),
+ PINCTRL_PIN(36, "IMGCLKOUT_0"),
+ PINCTRL_PIN(37, "SRCCLKREQB_0"),
+ PINCTRL_PIN(38, "SRCCLKREQB_1"),
+ PINCTRL_PIN(39, "SRCCLKREQB_2"),
+ PINCTRL_PIN(40, "SRCCLKREQB_3"),
+ PINCTRL_PIN(41, "ISH_SPI_CSB"),
+ PINCTRL_PIN(42, "ISH_SPI_CLK"),
+ PINCTRL_PIN(43, "ISH_SPI_MISO"),
+ PINCTRL_PIN(44, "ISH_SPI_MOSI"),
+ PINCTRL_PIN(45, "ISH_UART0_RXD"),
+ PINCTRL_PIN(46, "ISH_UART0_TXD"),
+ PINCTRL_PIN(47, "ISH_UART0_RTSB"),
+ PINCTRL_PIN(48, "ISH_UART0_CTSB"),
+ PINCTRL_PIN(49, "ISH_GP_4"),
+ PINCTRL_PIN(50, "ISH_GP_5"),
+ PINCTRL_PIN(51, "I2S_MCLK1_OUT"),
+ PINCTRL_PIN(52, "GSPI2_CLK_LOOPBK"),
+ /* GPP_U */
+ PINCTRL_PIN(53, "UART3_RXD"),
+ PINCTRL_PIN(54, "UART3_TXD"),
+ PINCTRL_PIN(55, "UART3_RTSB"),
+ PINCTRL_PIN(56, "UART3_CTSB"),
+ PINCTRL_PIN(57, "GSPI3_CS0B"),
+ PINCTRL_PIN(58, "GSPI3_CLK"),
+ PINCTRL_PIN(59, "GSPI3_MISO"),
+ PINCTRL_PIN(60, "GSPI3_MOSI"),
+ PINCTRL_PIN(61, "GSPI4_CS0B"),
+ PINCTRL_PIN(62, "GSPI4_CLK"),
+ PINCTRL_PIN(63, "GSPI4_MISO"),
+ PINCTRL_PIN(64, "GSPI4_MOSI"),
+ PINCTRL_PIN(65, "GSPI5_CS0B"),
+ PINCTRL_PIN(66, "GSPI5_CLK"),
+ PINCTRL_PIN(67, "GSPI5_MISO"),
+ PINCTRL_PIN(68, "GSPI5_MOSI"),
+ PINCTRL_PIN(69, "GSPI6_CS0B"),
+ PINCTRL_PIN(70, "GSPI6_CLK"),
+ PINCTRL_PIN(71, "GSPI6_MISO"),
+ PINCTRL_PIN(72, "GSPI6_MOSI"),
+ PINCTRL_PIN(73, "GSPI3_CLK_LOOPBK"),
+ PINCTRL_PIN(74, "GSPI4_CLK_LOOPBK"),
+ PINCTRL_PIN(75, "GSPI5_CLK_LOOPBK"),
+ PINCTRL_PIN(76, "GSPI6_CLK_LOOPBK"),
+ /* vGPIO */
+ PINCTRL_PIN(77, "CNV_BTEN"),
+ PINCTRL_PIN(78, "CNV_BT_HOST_WAKEB"),
+ PINCTRL_PIN(79, "CNV_BT_IF_SELECT"),
+ PINCTRL_PIN(80, "vCNV_BT_UART_TXD"),
+ PINCTRL_PIN(81, "vCNV_BT_UART_RXD"),
+ PINCTRL_PIN(82, "vCNV_BT_UART_CTS_B"),
+ PINCTRL_PIN(83, "vCNV_BT_UART_RTS_B"),
+ PINCTRL_PIN(84, "vCNV_MFUART1_TXD"),
+ PINCTRL_PIN(85, "vCNV_MFUART1_RXD"),
+ PINCTRL_PIN(86, "vCNV_MFUART1_CTS_B"),
+ PINCTRL_PIN(87, "vCNV_MFUART1_RTS_B"),
+ PINCTRL_PIN(88, "vUART0_TXD"),
+ PINCTRL_PIN(89, "vUART0_RXD"),
+ PINCTRL_PIN(90, "vUART0_CTS_B"),
+ PINCTRL_PIN(91, "vUART0_RTS_B"),
+ PINCTRL_PIN(92, "vISH_UART0_TXD"),
+ PINCTRL_PIN(93, "vISH_UART0_RXD"),
+ PINCTRL_PIN(94, "vISH_UART0_CTS_B"),
+ PINCTRL_PIN(95, "vISH_UART0_RTS_B"),
+ PINCTRL_PIN(96, "vCNV_BT_I2S_BCLK"),
+ PINCTRL_PIN(97, "vCNV_BT_I2S_WS_SYNC"),
+ PINCTRL_PIN(98, "vCNV_BT_I2S_SDO"),
+ PINCTRL_PIN(99, "vCNV_BT_I2S_SDI"),
+ PINCTRL_PIN(100, "vI2S2_SCLK"),
+ PINCTRL_PIN(101, "vI2S2_SFRM"),
+ PINCTRL_PIN(102, "vI2S2_TXD"),
+ PINCTRL_PIN(103, "vI2S2_RXD"),
+};
+
+static const struct intel_padgroup tgllp_community1_gpps[] = {
+ TGL_GPP(0, 0, 7), /* GPP_S */
+ TGL_GPP(1, 8, 31), /* GPP_H */
+ TGL_GPP(2, 32, 52), /* GPP_D */
+ TGL_GPP(3, 53, 76), /* GPP_U */
+ TGL_GPP(4, 77, 103), /* vGPIO */
+};
+
+static const struct intel_community tgllp_community1[] = {
+ TGL_COMMUNITY(0, 103, tgllp_community1_gpps),
+};
+
+static const struct intel_pinctrl_soc_data tgllp_community1_soc_data = {
+ .uid = "1",
+ .pins = tgllp_community1_pins,
+ .npins = ARRAY_SIZE(tgllp_community1_pins),
+ .communities = tgllp_community1,
+ .ncommunities = ARRAY_SIZE(tgllp_community1),
+};
+
+static const struct pinctrl_pin_desc tgllp_community4_pins[] = {
+ /* GPP_C */
+ PINCTRL_PIN(0, "SMBCLK"),
+ PINCTRL_PIN(1, "SMBDATA"),
+ PINCTRL_PIN(2, "SMBALERTB"),
+ PINCTRL_PIN(3, "SML0CLK"),
+ PINCTRL_PIN(4, "SML0DATA"),
+ PINCTRL_PIN(5, "SML0ALERTB"),
+ PINCTRL_PIN(6, "SML1CLK"),
+ PINCTRL_PIN(7, "SML1DATA"),
+ PINCTRL_PIN(8, "UART0_RXD"),
+ PINCTRL_PIN(9, "UART0_TXD"),
+ PINCTRL_PIN(10, "UART0_RTSB"),
+ PINCTRL_PIN(11, "UART0_CTSB"),
+ PINCTRL_PIN(12, "UART1_RXD"),
+ PINCTRL_PIN(13, "UART1_TXD"),
+ PINCTRL_PIN(14, "UART1_RTSB"),
+ PINCTRL_PIN(15, "UART1_CTSB"),
+ PINCTRL_PIN(16, "I2C0_SDA"),
+ PINCTRL_PIN(17, "I2C0_SCL"),
+ PINCTRL_PIN(18, "I2C1_SDA"),
+ PINCTRL_PIN(19, "I2C1_SCL"),
+ PINCTRL_PIN(20, "UART2_RXD"),
+ PINCTRL_PIN(21, "UART2_TXD"),
+ PINCTRL_PIN(22, "UART2_RTSB"),
+ PINCTRL_PIN(23, "UART2_CTSB"),
+ /* GPP_F */
+ PINCTRL_PIN(24, "CNV_BRI_DT"),
+ PINCTRL_PIN(25, "CNV_BRI_RSP"),
+ PINCTRL_PIN(26, "CNV_RGI_DT"),
+ PINCTRL_PIN(27, "CNV_RGI_RSP"),
+ PINCTRL_PIN(28, "CNV_RF_RESET_B"),
+ PINCTRL_PIN(29, "GPPC_F_5"),
+ PINCTRL_PIN(30, "CNV_PA_BLANKING"),
+ PINCTRL_PIN(31, "GPPC_F_7"),
+ PINCTRL_PIN(32, "I2S_MCLK2_INOUT"),
+ PINCTRL_PIN(33, "BOOTMPC"),
+ PINCTRL_PIN(34, "GPPC_F_10"),
+ PINCTRL_PIN(35, "GPPC_F_11"),
+ PINCTRL_PIN(36, "GSXDOUT"),
+ PINCTRL_PIN(37, "GSXSLOAD"),
+ PINCTRL_PIN(38, "GSXDIN"),
+ PINCTRL_PIN(39, "GSXSRESETB"),
+ PINCTRL_PIN(40, "GSXCLK"),
+ PINCTRL_PIN(41, "GMII_MDC"),
+ PINCTRL_PIN(42, "GMII_MDIO"),
+ PINCTRL_PIN(43, "SRCCLKREQB_6"),
+ PINCTRL_PIN(44, "EXT_PWR_GATEB"),
+ PINCTRL_PIN(45, "EXT_PWR_GATE2B"),
+ PINCTRL_PIN(46, "VNN_CTRL"),
+ PINCTRL_PIN(47, "V1P05_CTRL"),
+ PINCTRL_PIN(48, "GPPF_CLK_LOOPBACK"),
+ /* HVCMOS */
+ PINCTRL_PIN(49, "L_BKLTEN"),
+ PINCTRL_PIN(50, "L_BKLTCTL"),
+ PINCTRL_PIN(51, "L_VDDEN"),
+ PINCTRL_PIN(52, "SYS_PWROK"),
+ PINCTRL_PIN(53, "SYS_RESETB"),
+ PINCTRL_PIN(54, "MLK_RSTB"),
+ /* GPP_E */
+ PINCTRL_PIN(55, "SATAXPCIE_0"),
+ PINCTRL_PIN(56, "SPI1_IO_2"),
+ PINCTRL_PIN(57, "SPI1_IO_3"),
+ PINCTRL_PIN(58, "CPU_GP_0"),
+ PINCTRL_PIN(59, "SATA_DEVSLP_0"),
+ PINCTRL_PIN(60, "SATA_DEVSLP_1"),
+ PINCTRL_PIN(61, "GPPC_E_6"),
+ PINCTRL_PIN(62, "CPU_GP_1"),
+ PINCTRL_PIN(63, "SPI1_CS1B"),
+ PINCTRL_PIN(64, "USB2_OCB_0"),
+ PINCTRL_PIN(65, "SPI1_CSB"),
+ PINCTRL_PIN(66, "SPI1_CLK"),
+ PINCTRL_PIN(67, "SPI1_MISO_IO_1"),
+ PINCTRL_PIN(68, "SPI1_MOSI_IO_0"),
+ PINCTRL_PIN(69, "DDSP_HPD_A"),
+ PINCTRL_PIN(70, "ISH_GP_6"),
+ PINCTRL_PIN(71, "ISH_GP_7"),
+ PINCTRL_PIN(72, "GPPC_E_17"),
+ PINCTRL_PIN(73, "DDP1_CTRLCLK"),
+ PINCTRL_PIN(74, "DDP1_CTRLDATA"),
+ PINCTRL_PIN(75, "DDP2_CTRLCLK"),
+ PINCTRL_PIN(76, "DDP2_CTRLDATA"),
+ PINCTRL_PIN(77, "DDPA_CTRLCLK"),
+ PINCTRL_PIN(78, "DDPA_CTRLDATA"),
+ PINCTRL_PIN(79, "SPI1_CLK_LOOPBK"),
+ /* JTAG */
+ PINCTRL_PIN(80, "JTAG_TDO"),
+ PINCTRL_PIN(81, "JTAGX"),
+ PINCTRL_PIN(82, "PRDYB"),
+ PINCTRL_PIN(83, "PREQB"),
+ PINCTRL_PIN(84, "CPU_TRSTB"),
+ PINCTRL_PIN(85, "JTAG_TDI"),
+ PINCTRL_PIN(86, "JTAG_TMS"),
+ PINCTRL_PIN(87, "JTAG_TCK"),
+ PINCTRL_PIN(88, "DBG_PMODE"),
+};
+
+static const struct intel_padgroup tgllp_community4_gpps[] = {
+ TGL_GPP(0, 0, 23), /* GPP_C */
+ TGL_GPP(1, 24, 48), /* GPP_F */
+ TGL_GPP(2, 49, 54), /* HVCMOS */
+ TGL_GPP(3, 55, 79), /* GPP_E */
+ TGL_GPP(4, 80, 88), /* JTAG */
+};
+
+static const struct intel_community tgllp_community4[] = {
+ TGL_COMMUNITY(0, 88, tgllp_community4_gpps),
+};
+
+static const struct intel_pinctrl_soc_data tgllp_community4_soc_data = {
+ .uid = "4",
+ .pins = tgllp_community4_pins,
+ .npins = ARRAY_SIZE(tgllp_community4_pins),
+ .communities = tgllp_community4,
+ .ncommunities = ARRAY_SIZE(tgllp_community4),
+};
+
+static const struct pinctrl_pin_desc tgllp_community5_pins[] = {
+ /* GPP_R */
+ PINCTRL_PIN(0, "HDA_BCLK"),
+ PINCTRL_PIN(1, "HDA_SYNC"),
+ PINCTRL_PIN(2, "HDA_SDO"),
+ PINCTRL_PIN(3, "HDA_SDI_0"),
+ PINCTRL_PIN(4, "HDA_RSTB"),
+ PINCTRL_PIN(5, "HDA_SDI_1"),
+ PINCTRL_PIN(6, "GPP_R_6"),
+ PINCTRL_PIN(7, "GPP_R_7"),
+ /* SPI */
+ PINCTRL_PIN(8, "SPI0_IO_2"),
+ PINCTRL_PIN(9, "SPI0_IO_3"),
+ PINCTRL_PIN(10, "SPI0_MOSI_IO_0"),
+ PINCTRL_PIN(11, "SPI0_MISO_IO_1"),
+ PINCTRL_PIN(12, "SPI0_TPM_CSB"),
+ PINCTRL_PIN(13, "SPI0_FLASH_0_CSB"),
+ PINCTRL_PIN(14, "SPI0_FLASH_1_CSB"),
+ PINCTRL_PIN(15, "SPI0_CLK"),
+ PINCTRL_PIN(16, "SPI0_CLK_LOOPBK"),
+};
+
+static const struct intel_padgroup tgllp_community5_gpps[] = {
+ TGL_GPP(0, 0, 7), /* GPP_R */
+ TGL_GPP(1, 8, 16), /* SPI */
+};
+
+static const struct intel_community tgllp_community5[] = {
+ TGL_COMMUNITY(0, 16, tgllp_community5_gpps),
+};
+
+static const struct intel_pinctrl_soc_data tgllp_community5_soc_data = {
+ .uid = "5",
+ .pins = tgllp_community5_pins,
+ .npins = ARRAY_SIZE(tgllp_community5_pins),
+ .communities = tgllp_community5,
+ .ncommunities = ARRAY_SIZE(tgllp_community5),
+};
+
+static const struct intel_pinctrl_soc_data *tgllp_soc_data_array[] = {
+ &tgllp_community0_soc_data,
+ &tgllp_community1_soc_data,
+ &tgllp_community4_soc_data,
+ &tgllp_community5_soc_data,
+ NULL
+};
+
+static const struct acpi_device_id tgl_pinctrl_acpi_match[] = {
+ { "INT34C5", (kernel_ulong_t)tgllp_soc_data_array },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, tgl_pinctrl_acpi_match);
+
+static INTEL_PINCTRL_PM_OPS(tgl_pinctrl_pm_ops);
+
+static struct platform_driver tgl_pinctrl_driver = {
+ .probe = intel_pinctrl_probe_by_uid,
+ .driver = {
+ .name = "tigerlake-pinctrl",
+ .acpi_match_table = tgl_pinctrl_acpi_match,
+ .pm = &tgl_pinctrl_pm_ops,
+ },
+};
+
+module_platform_driver(tgl_pinctrl_driver);
+
+MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
+MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
+MODULE_DESCRIPTION("Intel Tiger Lake PCH pinctrl/GPIO driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
index 53f52b9a0acd..67f8444f7a0c 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
@@ -982,7 +982,6 @@ static const struct mtk_eint_xt mtk_eint_xt = {
static int mtk_eint_init(struct mtk_pinctrl *pctl, struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
- struct resource *res;
if (!of_property_read_bool(np, "interrupt-controller"))
return -ENODEV;
@@ -991,8 +990,7 @@ static int mtk_eint_init(struct mtk_pinctrl *pctl, struct platform_device *pdev)
if (!pctl->eint)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pctl->eint->base = devm_ioremap_resource(&pdev->dev, res);
+ pctl->eint->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pctl->eint->base))
return PTR_ERR(pctl->eint->base);
diff --git a/drivers/pinctrl/meson/Kconfig b/drivers/pinctrl/meson/Kconfig
index df55f617aa98..3cb119105ddb 100644
--- a/drivers/pinctrl/meson/Kconfig
+++ b/drivers/pinctrl/meson/Kconfig
@@ -54,4 +54,10 @@ config PINCTRL_MESON_G12A
select PINCTRL_MESON_AXG_PMX
default y
+config PINCTRL_MESON_A1
+ bool "Meson a1 Soc pinctrl driver"
+ depends on ARM64
+ select PINCTRL_MESON_AXG_PMX
+ default y
+
endif
diff --git a/drivers/pinctrl/meson/Makefile b/drivers/pinctrl/meson/Makefile
index a69c565f2f13..1a5bffe953f9 100644
--- a/drivers/pinctrl/meson/Makefile
+++ b/drivers/pinctrl/meson/Makefile
@@ -8,3 +8,4 @@ obj-$(CONFIG_PINCTRL_MESON_GXL) += pinctrl-meson-gxl.o
obj-$(CONFIG_PINCTRL_MESON_AXG_PMX) += pinctrl-meson-axg-pmx.o
obj-$(CONFIG_PINCTRL_MESON_AXG) += pinctrl-meson-axg.o
obj-$(CONFIG_PINCTRL_MESON_G12A) += pinctrl-meson-g12a.o
+obj-$(CONFIG_PINCTRL_MESON_A1) += pinctrl-meson-a1.o
diff --git a/drivers/pinctrl/meson/pinctrl-meson-a1.c b/drivers/pinctrl/meson/pinctrl-meson-a1.c
new file mode 100644
index 000000000000..0bcec03f344a
--- /dev/null
+++ b/drivers/pinctrl/meson/pinctrl-meson-a1.c
@@ -0,0 +1,942 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Pin controller and GPIO driver for Amlogic Meson A1 SoC.
+ *
+ * Copyright (c) 2019 Amlogic, Inc. All rights reserved.
+ * Author: Qianggui Song <qianggui.song@amlogic.com>
+ */
+
+#include <dt-bindings/gpio/meson-a1-gpio.h>
+#include "pinctrl-meson.h"
+#include "pinctrl-meson-axg-pmx.h"
+
+static const struct pinctrl_pin_desc meson_a1_periphs_pins[] = {
+ MESON_PIN(GPIOP_0),
+ MESON_PIN(GPIOP_1),
+ MESON_PIN(GPIOP_2),
+ MESON_PIN(GPIOP_3),
+ MESON_PIN(GPIOP_4),
+ MESON_PIN(GPIOP_5),
+ MESON_PIN(GPIOP_6),
+ MESON_PIN(GPIOP_7),
+ MESON_PIN(GPIOP_8),
+ MESON_PIN(GPIOP_9),
+ MESON_PIN(GPIOP_10),
+ MESON_PIN(GPIOP_11),
+ MESON_PIN(GPIOP_12),
+ MESON_PIN(GPIOB_0),
+ MESON_PIN(GPIOB_1),
+ MESON_PIN(GPIOB_2),
+ MESON_PIN(GPIOB_3),
+ MESON_PIN(GPIOB_4),
+ MESON_PIN(GPIOB_5),
+ MESON_PIN(GPIOB_6),
+ MESON_PIN(GPIOX_0),
+ MESON_PIN(GPIOX_1),
+ MESON_PIN(GPIOX_2),
+ MESON_PIN(GPIOX_3),
+ MESON_PIN(GPIOX_4),
+ MESON_PIN(GPIOX_5),
+ MESON_PIN(GPIOX_6),
+ MESON_PIN(GPIOX_7),
+ MESON_PIN(GPIOX_8),
+ MESON_PIN(GPIOX_9),
+ MESON_PIN(GPIOX_10),
+ MESON_PIN(GPIOX_11),
+ MESON_PIN(GPIOX_12),
+ MESON_PIN(GPIOX_13),
+ MESON_PIN(GPIOX_14),
+ MESON_PIN(GPIOX_15),
+ MESON_PIN(GPIOX_16),
+ MESON_PIN(GPIOF_0),
+ MESON_PIN(GPIOF_1),
+ MESON_PIN(GPIOF_2),
+ MESON_PIN(GPIOF_3),
+ MESON_PIN(GPIOF_4),
+ MESON_PIN(GPIOF_5),
+ MESON_PIN(GPIOF_6),
+ MESON_PIN(GPIOF_7),
+ MESON_PIN(GPIOF_8),
+ MESON_PIN(GPIOF_9),
+ MESON_PIN(GPIOF_10),
+ MESON_PIN(GPIOF_11),
+ MESON_PIN(GPIOF_12),
+ MESON_PIN(GPIOA_0),
+ MESON_PIN(GPIOA_1),
+ MESON_PIN(GPIOA_2),
+ MESON_PIN(GPIOA_3),
+ MESON_PIN(GPIOA_4),
+ MESON_PIN(GPIOA_5),
+ MESON_PIN(GPIOA_6),
+ MESON_PIN(GPIOA_7),
+ MESON_PIN(GPIOA_8),
+ MESON_PIN(GPIOA_9),
+ MESON_PIN(GPIOA_10),
+ MESON_PIN(GPIOA_11),
+};
+
+/* psram */
+static const unsigned int psram_clkn_pins[] = { GPIOP_0 };
+static const unsigned int psram_clkp_pins[] = { GPIOP_1 };
+static const unsigned int psram_ce_n_pins[] = { GPIOP_2 };
+static const unsigned int psram_rst_n_pins[] = { GPIOP_3 };
+static const unsigned int psram_adq0_pins[] = { GPIOP_4 };
+static const unsigned int psram_adq1_pins[] = { GPIOP_5 };
+static const unsigned int psram_adq2_pins[] = { GPIOP_6 };
+static const unsigned int psram_adq3_pins[] = { GPIOP_7 };
+static const unsigned int psram_adq4_pins[] = { GPIOP_8 };
+static const unsigned int psram_adq5_pins[] = { GPIOP_9 };
+static const unsigned int psram_adq6_pins[] = { GPIOP_10 };
+static const unsigned int psram_adq7_pins[] = { GPIOP_11 };
+static const unsigned int psram_dqs_dm_pins[] = { GPIOP_12 };
+
+/* sdcard */
+static const unsigned int sdcard_d0_b_pins[] = { GPIOB_0 };
+static const unsigned int sdcard_d1_b_pins[] = { GPIOB_1 };
+static const unsigned int sdcard_d2_b_pins[] = { GPIOB_2 };
+static const unsigned int sdcard_d3_b_pins[] = { GPIOB_3 };
+static const unsigned int sdcard_clk_b_pins[] = { GPIOB_4 };
+static const unsigned int sdcard_cmd_b_pins[] = { GPIOB_5 };
+
+static const unsigned int sdcard_d0_x_pins[] = { GPIOX_0 };
+static const unsigned int sdcard_d1_x_pins[] = { GPIOX_1 };
+static const unsigned int sdcard_d2_x_pins[] = { GPIOX_2 };
+static const unsigned int sdcard_d3_x_pins[] = { GPIOX_3 };
+static const unsigned int sdcard_clk_x_pins[] = { GPIOX_4 };
+static const unsigned int sdcard_cmd_x_pins[] = { GPIOX_5 };
+
+/* spif */
+static const unsigned int spif_mo_pins[] = { GPIOB_0 };
+static const unsigned int spif_mi_pins[] = { GPIOB_1 };
+static const unsigned int spif_wp_n_pins[] = { GPIOB_2 };
+static const unsigned int spif_hold_n_pins[] = { GPIOB_3 };
+static const unsigned int spif_clk_pins[] = { GPIOB_4 };
+static const unsigned int spif_cs_pins[] = { GPIOB_5 };
+
+/* i2c0 */
+static const unsigned int i2c0_sck_f9_pins[] = { GPIOF_9 };
+static const unsigned int i2c0_sda_f10_pins[] = { GPIOF_10 };
+static const unsigned int i2c0_sck_f11_pins[] = { GPIOF_11 };
+static const unsigned int i2c0_sda_f12_pins[] = { GPIOF_12 };
+
+/* i2c1 */
+static const unsigned int i2c1_sda_x_pins[] = { GPIOX_9 };
+static const unsigned int i2c1_sck_x_pins[] = { GPIOX_10 };
+static const unsigned int i2c1_sda_a_pins[] = { GPIOA_10 };
+static const unsigned int i2c1_sck_a_pins[] = { GPIOA_11 };
+
+/* i2c2 */
+static const unsigned int i2c2_sck_x0_pins[] = { GPIOX_0 };
+static const unsigned int i2c2_sda_x1_pins[] = { GPIOX_1 };
+static const unsigned int i2c2_sck_x15_pins[] = { GPIOX_15 };
+static const unsigned int i2c2_sda_x16_pins[] = { GPIOX_16 };
+static const unsigned int i2c2_sck_a4_pins[] = { GPIOA_4 };
+static const unsigned int i2c2_sda_a5_pins[] = { GPIOA_5 };
+static const unsigned int i2c2_sck_a8_pins[] = { GPIOA_8 };
+static const unsigned int i2c2_sda_a9_pins[] = { GPIOA_9 };
+
+/* i2c3 */
+static const unsigned int i2c3_sck_f_pins[] = { GPIOF_4 };
+static const unsigned int i2c3_sda_f_pins[] = { GPIOF_5 };
+static const unsigned int i2c3_sck_x_pins[] = { GPIOX_11 };
+static const unsigned int i2c3_sda_x_pins[] = { GPIOX_12 };
+
+/* i2c slave */
+static const unsigned int i2c_slave_sck_a_pins[] = { GPIOA_10 };
+static const unsigned int i2c_slave_sda_a_pins[] = { GPIOA_11 };
+static const unsigned int i2c_slave_sck_f_pins[] = { GPIOF_11 };
+static const unsigned int i2c_slave_sda_f_pins[] = { GPIOF_12 };
+
+/* uart_a */
+static const unsigned int uart_a_tx_pins[] = { GPIOX_11 };
+static const unsigned int uart_a_rx_pins[] = { GPIOX_12 };
+static const unsigned int uart_a_cts_pins[] = { GPIOX_13 };
+static const unsigned int uart_a_rts_pins[] = { GPIOX_14 };
+
+/* uart_b */
+static const unsigned int uart_b_tx_x_pins[] = { GPIOX_7 };
+static const unsigned int uart_b_rx_x_pins[] = { GPIOX_8 };
+static const unsigned int uart_b_tx_f_pins[] = { GPIOF_0 };
+static const unsigned int uart_b_rx_f_pins[] = { GPIOF_1 };
+
+/* uart_c */
+static const unsigned int uart_c_tx_x0_pins[] = { GPIOX_0 };
+static const unsigned int uart_c_rx_x1_pins[] = { GPIOX_1 };
+static const unsigned int uart_c_cts_pins[] = { GPIOX_2 };
+static const unsigned int uart_c_rts_pins[] = { GPIOX_3 };
+static const unsigned int uart_c_tx_x15_pins[] = { GPIOX_15 };
+static const unsigned int uart_c_rx_x16_pins[] = { GPIOX_16 };
+
+/* pmw_a */
+static const unsigned int pwm_a_x6_pins[] = { GPIOX_6 };
+static const unsigned int pwm_a_x7_pins[] = { GPIOX_7 };
+static const unsigned int pwm_a_f6_pins[] = { GPIOF_6 };
+static const unsigned int pwm_a_f10_pins[] = { GPIOF_10 };
+static const unsigned int pwm_a_a_pins[] = { GPIOA_5 };
+
+/* pmw_b */
+static const unsigned int pwm_b_x_pins[] = { GPIOX_8 };
+static const unsigned int pwm_b_f_pins[] = { GPIOF_7 };
+static const unsigned int pwm_b_a_pins[] = { GPIOA_11 };
+
+/* pmw_c */
+static const unsigned int pwm_c_x_pins[] = { GPIOX_9 };
+static const unsigned int pwm_c_f3_pins[] = { GPIOF_3 };
+static const unsigned int pwm_c_f8_pins[] = { GPIOF_8 };
+static const unsigned int pwm_c_a_pins[] = { GPIOA_10 };
+
+/* pwm_d */
+static const unsigned int pwm_d_x10_pins[] = { GPIOX_10 };
+static const unsigned int pwm_d_x13_pins[] = { GPIOX_13 };
+static const unsigned int pwm_d_x15_pins[] = { GPIOX_15 };
+static const unsigned int pwm_d_f_pins[] = { GPIOF_11 };
+
+/* pwm_e */
+static const unsigned int pwm_e_p_pins[] = { GPIOP_3 };
+static const unsigned int pwm_e_x2_pins[] = { GPIOX_2 };
+static const unsigned int pwm_e_x14_pins[] = { GPIOX_14 };
+static const unsigned int pwm_e_x16_pins[] = { GPIOX_16 };
+static const unsigned int pwm_e_f_pins[] = { GPIOF_3 };
+static const unsigned int pwm_e_a_pins[] = { GPIOA_0 };
+
+/* pwm_f */
+static const unsigned int pwm_f_b_pins[] = { GPIOB_6 };
+static const unsigned int pwm_f_x_pins[] = { GPIOX_3 };
+static const unsigned int pwm_f_f4_pins[] = { GPIOF_4 };
+static const unsigned int pwm_f_f12_pins[] = { GPIOF_12 };
+
+/* pwm_a_hiz */
+static const unsigned int pwm_a_hiz_f8_pins[] = { GPIOF_8 };
+static const unsigned int pwm_a_hiz_f10_pins[] = { GPIOF_10 };
+static const unsigned int pmw_a_hiz_f6_pins[] = { GPIOF_6 };
+
+/* pwm_b_hiz */
+static const unsigned int pwm_b_hiz_pins[] = { GPIOF_7 };
+
+/* pmw_c_hiz */
+static const unsigned int pwm_c_hiz_pins[] = { GPIOF_8 };
+
+/* tdm_a */
+static const unsigned int tdm_a_dout1_pins[] = { GPIOX_7 };
+static const unsigned int tdm_a_dout0_pins[] = { GPIOX_8 };
+static const unsigned int tdm_a_fs_pins[] = { GPIOX_9 };
+static const unsigned int tdm_a_sclk_pins[] = { GPIOX_10 };
+static const unsigned int tdm_a_din1_pins[] = { GPIOX_7 };
+static const unsigned int tdm_a_din0_pins[] = { GPIOX_8 };
+static const unsigned int tdm_a_slv_fs_pins[] = { GPIOX_9 };
+static const unsigned int tdm_a_slv_sclk_pins[] = { GPIOX_10 };
+
+/* spi_a */
+static const unsigned int spi_a_mosi_x2_pins[] = { GPIOX_2 };
+static const unsigned int spi_a_ss0_x3_pins[] = { GPIOX_3 };
+static const unsigned int spi_a_sclk_x4_pins[] = { GPIOX_4 };
+static const unsigned int spi_a_miso_x5_pins[] = { GPIOX_5 };
+static const unsigned int spi_a_mosi_x7_pins[] = { GPIOX_7 };
+static const unsigned int spi_a_miso_x8_pins[] = { GPIOX_8 };
+static const unsigned int spi_a_ss0_x9_pins[] = { GPIOX_9 };
+static const unsigned int spi_a_sclk_x10_pins[] = { GPIOX_10 };
+
+static const unsigned int spi_a_mosi_a_pins[] = { GPIOA_6 };
+static const unsigned int spi_a_miso_a_pins[] = { GPIOA_7 };
+static const unsigned int spi_a_ss0_a_pins[] = { GPIOA_8 };
+static const unsigned int spi_a_sclk_a_pins[] = { GPIOA_9 };
+
+/* pdm */
+static const unsigned int pdm_din0_x_pins[] = { GPIOX_7 };
+static const unsigned int pdm_din1_x_pins[] = { GPIOX_8 };
+static const unsigned int pdm_din2_x_pins[] = { GPIOX_9 };
+static const unsigned int pdm_dclk_x_pins[] = { GPIOX_10 };
+
+static const unsigned int pdm_din2_a_pins[] = { GPIOA_6 };
+static const unsigned int pdm_din1_a_pins[] = { GPIOA_7 };
+static const unsigned int pdm_din0_a_pins[] = { GPIOA_8 };
+static const unsigned int pdm_dclk_pins[] = { GPIOA_9 };
+
+/* gen_clk */
+static const unsigned int gen_clk_x_pins[] = { GPIOX_7 };
+static const unsigned int gen_clk_f8_pins[] = { GPIOF_8 };
+static const unsigned int gen_clk_f10_pins[] = { GPIOF_10 };
+static const unsigned int gen_clk_a_pins[] = { GPIOA_11 };
+
+/* jtag_a */
+static const unsigned int jtag_a_clk_pins[] = { GPIOF_4 };
+static const unsigned int jtag_a_tms_pins[] = { GPIOF_5 };
+static const unsigned int jtag_a_tdi_pins[] = { GPIOF_6 };
+static const unsigned int jtag_a_tdo_pins[] = { GPIOF_7 };
+
+/* clk_32_in */
+static const unsigned int clk_32k_in_pins[] = { GPIOF_2 };
+
+/* ir in */
+static const unsigned int remote_input_f_pins[] = { GPIOF_3 };
+static const unsigned int remote_input_a_pins[] = { GPIOA_11 };
+
+/* ir out */
+static const unsigned int remote_out_pins[] = { GPIOF_5 };
+
+/* spdif */
+static const unsigned int spdif_in_f6_pins[] = { GPIOF_6 };
+static const unsigned int spdif_in_f7_pins[] = { GPIOF_7 };
+
+/* sw */
+static const unsigned int swclk_pins[] = { GPIOF_4 };
+static const unsigned int swdio_pins[] = { GPIOF_5 };
+
+/* clk_25 */
+static const unsigned int clk25_pins[] = { GPIOF_10 };
+
+/* cec_a */
+static const unsigned int cec_a_pins[] = { GPIOF_2 };
+
+/* cec_b */
+static const unsigned int cec_b_pins[] = { GPIOF_2 };
+
+/* clk12_24 */
+static const unsigned int clk12_24_pins[] = { GPIOF_10 };
+
+/* mclk_0 */
+static const unsigned int mclk_0_pins[] = { GPIOA_0 };
+
+/* tdm_b */
+static const unsigned int tdm_b_sclk_pins[] = { GPIOA_1 };
+static const unsigned int tdm_b_fs_pins[] = { GPIOA_2 };
+static const unsigned int tdm_b_dout0_pins[] = { GPIOA_3 };
+static const unsigned int tdm_b_dout1_pins[] = { GPIOA_4 };
+static const unsigned int tdm_b_dout2_pins[] = { GPIOA_5 };
+static const unsigned int tdm_b_dout3_pins[] = { GPIOA_6 };
+static const unsigned int tdm_b_dout4_pins[] = { GPIOA_7 };
+static const unsigned int tdm_b_dout5_pins[] = { GPIOA_8 };
+static const unsigned int tdm_b_slv_sclk_pins[] = { GPIOA_5 };
+static const unsigned int tdm_b_slv_fs_pins[] = { GPIOA_6 };
+static const unsigned int tdm_b_din0_pins[] = { GPIOA_7 };
+static const unsigned int tdm_b_din1_pins[] = { GPIOA_8 };
+static const unsigned int tdm_b_din2_pins[] = { GPIOA_9 };
+
+/* mclk_vad */
+static const unsigned int mclk_vad_pins[] = { GPIOA_0 };
+
+/* tdm_vad */
+static const unsigned int tdm_vad_sclk_a1_pins[] = { GPIOA_1 };
+static const unsigned int tdm_vad_fs_a2_pins[] = { GPIOA_2 };
+static const unsigned int tdm_vad_sclk_a5_pins[] = { GPIOA_5 };
+static const unsigned int tdm_vad_fs_a6_pins[] = { GPIOA_6 };
+
+/* tst_out */
+static const unsigned int tst_out0_pins[] = { GPIOA_0 };
+static const unsigned int tst_out1_pins[] = { GPIOA_1 };
+static const unsigned int tst_out2_pins[] = { GPIOA_2 };
+static const unsigned int tst_out3_pins[] = { GPIOA_3 };
+static const unsigned int tst_out4_pins[] = { GPIOA_4 };
+static const unsigned int tst_out5_pins[] = { GPIOA_5 };
+static const unsigned int tst_out6_pins[] = { GPIOA_6 };
+static const unsigned int tst_out7_pins[] = { GPIOA_7 };
+static const unsigned int tst_out8_pins[] = { GPIOA_8 };
+static const unsigned int tst_out9_pins[] = { GPIOA_9 };
+static const unsigned int tst_out10_pins[] = { GPIOA_10 };
+static const unsigned int tst_out11_pins[] = { GPIOA_11 };
+
+/* mute */
+static const unsigned int mute_key_pins[] = { GPIOA_4 };
+static const unsigned int mute_en_pins[] = { GPIOA_5 };
+
+static struct meson_pmx_group meson_a1_periphs_groups[] = {
+ GPIO_GROUP(GPIOP_0),
+ GPIO_GROUP(GPIOP_1),
+ GPIO_GROUP(GPIOP_2),
+ GPIO_GROUP(GPIOP_3),
+ GPIO_GROUP(GPIOP_4),
+ GPIO_GROUP(GPIOP_5),
+ GPIO_GROUP(GPIOP_6),
+ GPIO_GROUP(GPIOP_7),
+ GPIO_GROUP(GPIOP_8),
+ GPIO_GROUP(GPIOP_9),
+ GPIO_GROUP(GPIOP_10),
+ GPIO_GROUP(GPIOP_11),
+ GPIO_GROUP(GPIOP_12),
+ GPIO_GROUP(GPIOB_0),
+ GPIO_GROUP(GPIOB_1),
+ GPIO_GROUP(GPIOB_2),
+ GPIO_GROUP(GPIOB_3),
+ GPIO_GROUP(GPIOB_4),
+ GPIO_GROUP(GPIOB_5),
+ GPIO_GROUP(GPIOB_6),
+ GPIO_GROUP(GPIOX_0),
+ GPIO_GROUP(GPIOX_1),
+ GPIO_GROUP(GPIOX_2),
+ GPIO_GROUP(GPIOX_3),
+ GPIO_GROUP(GPIOX_4),
+ GPIO_GROUP(GPIOX_5),
+ GPIO_GROUP(GPIOX_6),
+ GPIO_GROUP(GPIOX_7),
+ GPIO_GROUP(GPIOX_8),
+ GPIO_GROUP(GPIOX_9),
+ GPIO_GROUP(GPIOX_10),
+ GPIO_GROUP(GPIOX_11),
+ GPIO_GROUP(GPIOX_12),
+ GPIO_GROUP(GPIOX_13),
+ GPIO_GROUP(GPIOX_14),
+ GPIO_GROUP(GPIOX_15),
+ GPIO_GROUP(GPIOX_16),
+ GPIO_GROUP(GPIOF_0),
+ GPIO_GROUP(GPIOF_1),
+ GPIO_GROUP(GPIOF_2),
+ GPIO_GROUP(GPIOF_3),
+ GPIO_GROUP(GPIOF_4),
+ GPIO_GROUP(GPIOF_5),
+ GPIO_GROUP(GPIOF_6),
+ GPIO_GROUP(GPIOF_7),
+ GPIO_GROUP(GPIOF_8),
+ GPIO_GROUP(GPIOF_9),
+ GPIO_GROUP(GPIOF_10),
+ GPIO_GROUP(GPIOF_11),
+ GPIO_GROUP(GPIOF_12),
+ GPIO_GROUP(GPIOA_0),
+ GPIO_GROUP(GPIOA_1),
+ GPIO_GROUP(GPIOA_2),
+ GPIO_GROUP(GPIOA_3),
+ GPIO_GROUP(GPIOA_4),
+ GPIO_GROUP(GPIOA_5),
+ GPIO_GROUP(GPIOA_6),
+ GPIO_GROUP(GPIOA_7),
+ GPIO_GROUP(GPIOA_8),
+ GPIO_GROUP(GPIOA_9),
+ GPIO_GROUP(GPIOA_10),
+ GPIO_GROUP(GPIOA_11),
+
+ /* bank P func1 */
+ GROUP(psram_clkn, 1),
+ GROUP(psram_clkp, 1),
+ GROUP(psram_ce_n, 1),
+ GROUP(psram_rst_n, 1),
+ GROUP(psram_adq0, 1),
+ GROUP(psram_adq1, 1),
+ GROUP(psram_adq2, 1),
+ GROUP(psram_adq3, 1),
+ GROUP(psram_adq4, 1),
+ GROUP(psram_adq5, 1),
+ GROUP(psram_adq6, 1),
+ GROUP(psram_adq7, 1),
+ GROUP(psram_dqs_dm, 1),
+
+ /*bank P func2 */
+ GROUP(pwm_e_p, 2),
+
+ /*bank B func1 */
+ GROUP(spif_mo, 1),
+ GROUP(spif_mi, 1),
+ GROUP(spif_wp_n, 1),
+ GROUP(spif_hold_n, 1),
+ GROUP(spif_clk, 1),
+ GROUP(spif_cs, 1),
+ GROUP(pwm_f_b, 1),
+
+ /*bank B func2 */
+ GROUP(sdcard_d0_b, 2),
+ GROUP(sdcard_d1_b, 2),
+ GROUP(sdcard_d2_b, 2),
+ GROUP(sdcard_d3_b, 2),
+ GROUP(sdcard_clk_b, 2),
+ GROUP(sdcard_cmd_b, 2),
+
+ /*bank X func1 */
+ GROUP(sdcard_d0_x, 1),
+ GROUP(sdcard_d1_x, 1),
+ GROUP(sdcard_d2_x, 1),
+ GROUP(sdcard_d3_x, 1),
+ GROUP(sdcard_clk_x, 1),
+ GROUP(sdcard_cmd_x, 1),
+ GROUP(pwm_a_x6, 1),
+ GROUP(tdm_a_dout1, 1),
+ GROUP(tdm_a_dout0, 1),
+ GROUP(tdm_a_fs, 1),
+ GROUP(tdm_a_sclk, 1),
+ GROUP(uart_a_tx, 1),
+ GROUP(uart_a_rx, 1),
+ GROUP(uart_a_cts, 1),
+ GROUP(uart_a_rts, 1),
+ GROUP(pwm_d_x15, 1),
+ GROUP(pwm_e_x16, 1),
+
+ /*bank X func2 */
+ GROUP(i2c2_sck_x0, 2),
+ GROUP(i2c2_sda_x1, 2),
+ GROUP(spi_a_mosi_x2, 2),
+ GROUP(spi_a_ss0_x3, 2),
+ GROUP(spi_a_sclk_x4, 2),
+ GROUP(spi_a_miso_x5, 2),
+ GROUP(tdm_a_din1, 2),
+ GROUP(tdm_a_din0, 2),
+ GROUP(tdm_a_slv_fs, 2),
+ GROUP(tdm_a_slv_sclk, 2),
+ GROUP(i2c3_sck_x, 2),
+ GROUP(i2c3_sda_x, 2),
+ GROUP(pwm_d_x13, 2),
+ GROUP(pwm_e_x14, 2),
+ GROUP(i2c2_sck_x15, 2),
+ GROUP(i2c2_sda_x16, 2),
+
+ /*bank X func3 */
+ GROUP(uart_c_tx_x0, 3),
+ GROUP(uart_c_rx_x1, 3),
+ GROUP(uart_c_cts, 3),
+ GROUP(uart_c_rts, 3),
+ GROUP(pdm_din0_x, 3),
+ GROUP(pdm_din1_x, 3),
+ GROUP(pdm_din2_x, 3),
+ GROUP(pdm_dclk_x, 3),
+ GROUP(uart_c_tx_x15, 3),
+ GROUP(uart_c_rx_x16, 3),
+
+ /*bank X func4 */
+ GROUP(pwm_e_x2, 4),
+ GROUP(pwm_f_x, 4),
+ GROUP(spi_a_mosi_x7, 4),
+ GROUP(spi_a_miso_x8, 4),
+ GROUP(spi_a_ss0_x9, 4),
+ GROUP(spi_a_sclk_x10, 4),
+
+ /*bank X func5 */
+ GROUP(uart_b_tx_x, 5),
+ GROUP(uart_b_rx_x, 5),
+ GROUP(i2c1_sda_x, 5),
+ GROUP(i2c1_sck_x, 5),
+
+ /*bank X func6 */
+ GROUP(pwm_a_x7, 6),
+ GROUP(pwm_b_x, 6),
+ GROUP(pwm_c_x, 6),
+ GROUP(pwm_d_x10, 6),
+
+ /*bank X func7 */
+ GROUP(gen_clk_x, 7),
+
+ /*bank F func1 */
+ GROUP(uart_b_tx_f, 1),
+ GROUP(uart_b_rx_f, 1),
+ GROUP(remote_input_f, 1),
+ GROUP(jtag_a_clk, 1),
+ GROUP(jtag_a_tms, 1),
+ GROUP(jtag_a_tdi, 1),
+ GROUP(jtag_a_tdo, 1),
+ GROUP(gen_clk_f8, 1),
+ GROUP(pwm_a_f10, 1),
+ GROUP(i2c0_sck_f11, 1),
+ GROUP(i2c0_sda_f12, 1),
+
+ /*bank F func2 */
+ GROUP(clk_32k_in, 2),
+ GROUP(pwm_e_f, 2),
+ GROUP(pwm_f_f4, 2),
+ GROUP(remote_out, 2),
+ GROUP(spdif_in_f6, 2),
+ GROUP(spdif_in_f7, 2),
+ GROUP(pwm_a_hiz_f8, 2),
+ GROUP(pwm_a_hiz_f10, 2),
+ GROUP(pwm_d_f, 2),
+ GROUP(pwm_f_f12, 2),
+
+ /*bank F func3 */
+ GROUP(pwm_c_f3, 3),
+ GROUP(swclk, 3),
+ GROUP(swdio, 3),
+ GROUP(pwm_a_f6, 3),
+ GROUP(pwm_b_f, 3),
+ GROUP(pwm_c_f8, 3),
+ GROUP(clk25, 3),
+ GROUP(i2c_slave_sck_f, 3),
+ GROUP(i2c_slave_sda_f, 3),
+
+ /*bank F func4 */
+ GROUP(cec_a, 4),
+ GROUP(i2c3_sck_f, 4),
+ GROUP(i2c3_sda_f, 4),
+ GROUP(pmw_a_hiz_f6, 4),
+ GROUP(pwm_b_hiz, 4),
+ GROUP(pwm_c_hiz, 4),
+ GROUP(i2c0_sck_f9, 4),
+ GROUP(i2c0_sda_f10, 4),
+
+ /*bank F func5 */
+ GROUP(cec_b, 5),
+ GROUP(clk12_24, 5),
+
+ /*bank F func7 */
+ GROUP(gen_clk_f10, 7),
+
+ /*bank A func1 */
+ GROUP(mclk_0, 1),
+ GROUP(tdm_b_sclk, 1),
+ GROUP(tdm_b_fs, 1),
+ GROUP(tdm_b_dout0, 1),
+ GROUP(tdm_b_dout1, 1),
+ GROUP(tdm_b_dout2, 1),
+ GROUP(tdm_b_dout3, 1),
+ GROUP(tdm_b_dout4, 1),
+ GROUP(tdm_b_dout5, 1),
+ GROUP(remote_input_a, 1),
+
+ /*bank A func2 */
+ GROUP(pwm_e_a, 2),
+ GROUP(tdm_b_slv_sclk, 2),
+ GROUP(tdm_b_slv_fs, 2),
+ GROUP(tdm_b_din0, 2),
+ GROUP(tdm_b_din1, 2),
+ GROUP(tdm_b_din2, 2),
+ GROUP(i2c1_sda_a, 2),
+ GROUP(i2c1_sck_a, 2),
+
+ /*bank A func3 */
+ GROUP(i2c2_sck_a4, 3),
+ GROUP(i2c2_sda_a5, 3),
+ GROUP(pdm_din2_a, 3),
+ GROUP(pdm_din1_a, 3),
+ GROUP(pdm_din0_a, 3),
+ GROUP(pdm_dclk, 3),
+ GROUP(pwm_c_a, 3),
+ GROUP(pwm_b_a, 3),
+
+ /*bank A func4 */
+ GROUP(pwm_a_a, 4),
+ GROUP(spi_a_mosi_a, 4),
+ GROUP(spi_a_miso_a, 4),
+ GROUP(spi_a_ss0_a, 4),
+ GROUP(spi_a_sclk_a, 4),
+ GROUP(i2c_slave_sck_a, 4),
+ GROUP(i2c_slave_sda_a, 4),
+
+ /*bank A func5 */
+ GROUP(mclk_vad, 5),
+ GROUP(tdm_vad_sclk_a1, 5),
+ GROUP(tdm_vad_fs_a2, 5),
+ GROUP(tdm_vad_sclk_a5, 5),
+ GROUP(tdm_vad_fs_a6, 5),
+ GROUP(i2c2_sck_a8, 5),
+ GROUP(i2c2_sda_a9, 5),
+
+ /*bank A func6 */
+ GROUP(tst_out0, 6),
+ GROUP(tst_out1, 6),
+ GROUP(tst_out2, 6),
+ GROUP(tst_out3, 6),
+ GROUP(tst_out4, 6),
+ GROUP(tst_out5, 6),
+ GROUP(tst_out6, 6),
+ GROUP(tst_out7, 6),
+ GROUP(tst_out8, 6),
+ GROUP(tst_out9, 6),
+ GROUP(tst_out10, 6),
+ GROUP(tst_out11, 6),
+
+ /*bank A func7 */
+ GROUP(mute_key, 7),
+ GROUP(mute_en, 7),
+ GROUP(gen_clk_a, 7),
+};
+
+static const char * const gpio_periphs_groups[] = {
+ "GPIOP_0", "GPIOP_1", "GPIOP_2", "GPIOP_3", "GPIOP_4",
+ "GPIOP_5", "GPIOP_6", "GPIOP_7", "GPIOP_8", "GPIOP_9",
+ "GPIOP_10", "GPIOP_11", "GPIOP_12",
+
+ "GPIOB_0", "GPIOB_1", "GPIOB_2", "GPIOB_3", "GPIOB_4",
+ "GPIOB_5", "GPIOB_6",
+
+ "GPIOX_0", "GPIOX_1", "GPIOX_2", "GPIOX_3", "GPIOX_4",
+ "GPIOX_5", "GPIOX_6", "GPIOX_7", "GPIOX_8", "GPIOX_9",
+ "GPIOX_10", "GPIOX_11", "GPIOX_12", "GPIOX_13", "GPIOX_14",
+ "GPIOX_15", "GPIOX_16",
+
+ "GPIOF_0", "GPIOF_1", "GPIOF_2", "GPIOF_3", "GPIOF_4",
+ "GPIOF_5", "GPIOF_6", "GPIOF_7", "GPIOF_8", "GPIOF_9",
+ "GPIOF_10", "GPIOF_11", "GPIOF_12",
+
+ "GPIOA_0", "GPIOA_1", "GPIOA_2", "GPIOA_3", "GPIOA_4",
+ "GPIOA_5", "GPIOA_6", "GPIOA_7", "GPIOA_8", "GPIOA_9",
+ "GPIOA_10", "GPIOA_11",
+};
+
+static const char * const psram_groups[] = {
+ "psram_clkn", "psram_clkp", "psram_ce_n", "psram_rst_n", "psram_adq0",
+ "psram_adq1", "psram_adq2", "psram_adq3", "psram_adq4", "psram_adq5",
+ "psram_adq6", "psram_adq7", "psram_dqs_dm",
+};
+
+static const char * const pwm_a_groups[] = {
+ "pwm_a_x6", "pwm_a_x7", "pwm_a_f10", "pwm_a_f6", "pwm_a_a",
+};
+
+static const char * const pwm_b_groups[] = {
+ "pwm_b_x", "pwm_b_f", "pwm_b_a",
+};
+
+static const char * const pwm_c_groups[] = {
+ "pwm_c_x", "pwm_c_f3", "pwm_c_f8", "pwm_c_a",
+};
+
+static const char * const pwm_d_groups[] = {
+ "pwm_d_x15", "pwm_d_x13", "pwm_d_x10", "pwm_d_f",
+};
+
+static const char * const pwm_e_groups[] = {
+ "pwm_e_p", "pwm_e_x16", "pwm_e_x14", "pwm_e_x2", "pwm_e_f",
+ "pwm_e_a",
+};
+
+static const char * const pwm_f_groups[] = {
+ "pwm_f_b", "pwm_f_x", "pwm_f_f4", "pwm_f_f12",
+};
+
+static const char * const pwm_a_hiz_groups[] = {
+ "pwm_a_hiz_f8", "pwm_a_hiz_f10", "pwm_a_hiz_f6",
+};
+
+static const char * const pwm_b_hiz_groups[] = {
+ "pwm_b_hiz",
+};
+
+static const char * const pwm_c_hiz_groups[] = {
+ "pwm_c_hiz",
+};
+
+static const char * const spif_groups[] = {
+ "spif_mo", "spif_mi", "spif_wp_n", "spif_hold_n", "spif_clk",
+ "spif_cs",
+};
+
+static const char * const sdcard_groups[] = {
+ "sdcard_d0_b", "sdcard_d1_b", "sdcard_d2_b", "sdcard_d3_b",
+ "sdcard_clk_b", "sdcard_cmd_b",
+
+ "sdcard_d0_x", "sdcard_d1_x", "sdcard_d2_x", "sdcard_d3_x",
+ "sdcard_clk_x", "sdcard_cmd_x",
+};
+
+static const char * const tdm_a_groups[] = {
+ "tdm_a_din0", "tdm_a_din1", "tdm_a_fs", "tdm_a_sclk",
+ "tdm_a_slv_fs", "tdm_a_slv_sclk", "tdm_a_dout0", "tdm_a_dout1",
+};
+
+static const char * const uart_a_groups[] = {
+ "uart_a_tx", "uart_a_rx", "uart_a_cts", "uart_a_rts",
+};
+
+static const char * const uart_b_groups[] = {
+ "uart_b_tx_x", "uart_b_rx_x", "uart_b_tx_f", "uart_b_rx_f",
+};
+
+static const char * const uart_c_groups[] = {
+ "uart_c_tx_x0", "uart_c_rx_x1", "uart_c_cts", "uart_c_rts",
+ "uart_c_tx_x15", "uart_c_rx_x16",
+};
+
+static const char * const i2c0_groups[] = {
+ "i2c0_sck_f11", "i2c0_sda_f12", "i2c0_sck_f9", "i2c0_sda_f10",
+};
+
+static const char * const i2c1_groups[] = {
+ "i2c1_sda_x", "i2c1_sck_x", "i2c1_sda_a", "i2c1_sck_a",
+};
+
+static const char * const i2c2_groups[] = {
+ "i2c2_sck_x0", "i2c2_sda_x1", "i2c2_sck_x15", "i2c2_sda_x16",
+ "i2c2_sck_a4", "i2c2_sda_a5", "i2c2_sck_a8", "i2c2_sda_a9",
+};
+
+static const char * const i2c3_groups[] = {
+ "i2c3_sck_x", "i2c3_sda_x", "i2c3_sck_f", "i2c3_sda_f",
+};
+
+static const char * const i2c_slave_groups[] = {
+ "i2c_slave_sda_a", "i2c_slave_sck_a",
+ "i2c_slave_sda_f", "i2c_slave_sck_f",
+};
+
+static const char * const spi_a_groups[] = {
+ "spi_a_mosi_x2", "spi_a_ss0_x3", "spi_a_sclk_x4", "spi_a_miso_x5",
+ "spi_a_mosi_x7", "spi_a_miso_x8", "spi_a_ss0_x9", "spi_a_sclk_x10",
+
+ "spi_a_mosi_a", "spi_a_miso_a", "spi_a_ss0_a", "spi_a_sclk_a",
+};
+
+static const char * const pdm_groups[] = {
+ "pdm_din0_x", "pdm_din1_x", "pdm_din2_x", "pdm_dclk_x", "pdm_din2_a",
+ "pdm_din1_a", "pdm_din0_a", "pdm_dclk",
+};
+
+static const char * const gen_clk_groups[] = {
+ "gen_clk_x", "gen_clk_f8", "gen_clk_f10", "gen_clk_a",
+};
+
+static const char * const remote_input_groups[] = {
+ "remote_input_f",
+ "remote_input_a",
+};
+
+static const char * const jtag_a_groups[] = {
+ "jtag_a_clk", "jtag_a_tms", "jtag_a_tdi", "jtag_a_tdo",
+};
+
+static const char * const clk_32k_in_groups[] = {
+ "clk_32k_in",
+};
+
+static const char * const remote_out_groups[] = {
+ "remote_out",
+};
+
+static const char * const spdif_in_groups[] = {
+ "spdif_in_f6", "spdif_in_f7",
+};
+
+static const char * const sw_groups[] = {
+ "swclk", "swdio",
+};
+
+static const char * const clk25_groups[] = {
+ "clk_25",
+};
+
+static const char * const cec_a_groups[] = {
+ "cec_a",
+};
+
+static const char * const cec_b_groups[] = {
+ "cec_b",
+};
+
+static const char * const clk12_24_groups[] = {
+ "clk12_24",
+};
+
+static const char * const mclk_0_groups[] = {
+ "mclk_0",
+};
+
+static const char * const tdm_b_groups[] = {
+ "tdm_b_din0", "tdm_b_din1", "tdm_b_din2",
+ "tdm_b_sclk", "tdm_b_fs", "tdm_b_dout0", "tdm_b_dout1",
+ "tdm_b_dout2", "tdm_b_dout3", "tdm_b_dout4", "tdm_b_dout5",
+ "tdm_b_slv_sclk", "tdm_b_slv_fs",
+};
+
+static const char * const mclk_vad_groups[] = {
+ "mclk_vad",
+};
+
+static const char * const tdm_vad_groups[] = {
+ "tdm_vad_sclk_a1", "tdm_vad_fs_a2", "tdm_vad_sclk_a5", "tdm_vad_fs_a6",
+};
+
+static const char * const tst_out_groups[] = {
+ "tst_out0", "tst_out1", "tst_out2", "tst_out3",
+ "tst_out4", "tst_out5", "tst_out6", "tst_out7",
+ "tst_out8", "tst_out9", "tst_out10", "tst_out11",
+};
+
+static const char * const mute_groups[] = {
+ "mute_key", "mute_en",
+};
+
+static struct meson_pmx_func meson_a1_periphs_functions[] = {
+ FUNCTION(gpio_periphs),
+ FUNCTION(psram),
+ FUNCTION(pwm_a),
+ FUNCTION(pwm_b),
+ FUNCTION(pwm_c),
+ FUNCTION(pwm_d),
+ FUNCTION(pwm_e),
+ FUNCTION(pwm_f),
+ FUNCTION(pwm_a_hiz),
+ FUNCTION(pwm_b_hiz),
+ FUNCTION(pwm_c_hiz),
+ FUNCTION(spif),
+ FUNCTION(sdcard),
+ FUNCTION(tdm_a),
+ FUNCTION(uart_a),
+ FUNCTION(uart_b),
+ FUNCTION(uart_c),
+ FUNCTION(i2c0),
+ FUNCTION(i2c1),
+ FUNCTION(i2c2),
+ FUNCTION(i2c3),
+ FUNCTION(spi_a),
+ FUNCTION(pdm),
+ FUNCTION(gen_clk),
+ FUNCTION(remote_input),
+ FUNCTION(jtag_a),
+ FUNCTION(clk_32k_in),
+ FUNCTION(remote_out),
+ FUNCTION(spdif_in),
+ FUNCTION(sw),
+ FUNCTION(clk25),
+ FUNCTION(cec_a),
+ FUNCTION(cec_b),
+ FUNCTION(clk12_24),
+ FUNCTION(mclk_0),
+ FUNCTION(tdm_b),
+ FUNCTION(mclk_vad),
+ FUNCTION(tdm_vad),
+ FUNCTION(tst_out),
+ FUNCTION(mute),
+};
+
+static struct meson_bank meson_a1_periphs_banks[] = {
+ /* name first last irq pullen pull dir out in ds*/
+ BANK_DS("P", GPIOP_0, GPIOP_12, 0, 12, 0x3, 0, 0x4, 0,
+ 0x2, 0, 0x1, 0, 0x0, 0, 0x5, 0),
+ BANK_DS("B", GPIOB_0, GPIOB_6, 13, 19, 0x13, 0, 0x14, 0,
+ 0x12, 0, 0x11, 0, 0x10, 0, 0x15, 0),
+ BANK_DS("X", GPIOX_0, GPIOX_16, 20, 36, 0x23, 0, 0x24, 0,
+ 0x22, 0, 0x21, 0, 0x20, 0, 0x25, 0),
+ BANK_DS("F", GPIOF_0, GPIOF_12, 37, 49, 0x33, 0, 0x34, 0,
+ 0x32, 0, 0x31, 0, 0x30, 0, 0x35, 0),
+ BANK_DS("A", GPIOA_0, GPIOA_11, 50, 61, 0x43, 0, 0x44, 0,
+ 0x42, 0, 0x41, 0, 0x40, 0, 0x45, 0),
+};
+
+static struct meson_pmx_bank meson_a1_periphs_pmx_banks[] = {
+ /* name first lask reg offset */
+ BANK_PMX("P", GPIOP_0, GPIOP_12, 0x0, 0),
+ BANK_PMX("B", GPIOB_0, GPIOB_6, 0x2, 0),
+ BANK_PMX("X", GPIOX_0, GPIOX_16, 0x3, 0),
+ BANK_PMX("F", GPIOF_0, GPIOF_12, 0x6, 0),
+ BANK_PMX("A", GPIOA_0, GPIOA_11, 0x8, 0),
+};
+
+static struct meson_axg_pmx_data meson_a1_periphs_pmx_banks_data = {
+ .pmx_banks = meson_a1_periphs_pmx_banks,
+ .num_pmx_banks = ARRAY_SIZE(meson_a1_periphs_pmx_banks),
+};
+
+static struct meson_pinctrl_data meson_a1_periphs_pinctrl_data = {
+ .name = "periphs-banks",
+ .pins = meson_a1_periphs_pins,
+ .groups = meson_a1_periphs_groups,
+ .funcs = meson_a1_periphs_functions,
+ .banks = meson_a1_periphs_banks,
+ .num_pins = ARRAY_SIZE(meson_a1_periphs_pins),
+ .num_groups = ARRAY_SIZE(meson_a1_periphs_groups),
+ .num_funcs = ARRAY_SIZE(meson_a1_periphs_functions),
+ .num_banks = ARRAY_SIZE(meson_a1_periphs_banks),
+ .pmx_ops = &meson_axg_pmx_ops,
+ .pmx_data = &meson_a1_periphs_pmx_banks_data,
+ .parse_dt = &meson_a1_parse_dt_extra,
+};
+
+static const struct of_device_id meson_a1_pinctrl_dt_match[] = {
+ {
+ .compatible = "amlogic,meson-a1-periphs-pinctrl",
+ .data = &meson_a1_periphs_pinctrl_data,
+ },
+ { },
+};
+
+static struct platform_driver meson_a1_pinctrl_driver = {
+ .probe = meson_pinctrl_probe,
+ .driver = {
+ .name = "meson-a1-pinctrl",
+ .of_match_table = meson_a1_pinctrl_dt_match,
+ },
+};
+
+builtin_platform_driver(meson_a1_pinctrl_driver);
diff --git a/drivers/pinctrl/meson/pinctrl-meson-axg.c b/drivers/pinctrl/meson/pinctrl-meson-axg.c
index ad502eda4afa..072765db93d7 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-axg.c
+++ b/drivers/pinctrl/meson/pinctrl-meson-axg.c
@@ -1066,6 +1066,7 @@ static struct meson_pinctrl_data meson_axg_aobus_pinctrl_data = {
.num_banks = ARRAY_SIZE(meson_axg_aobus_banks),
.pmx_ops = &meson_axg_pmx_ops,
.pmx_data = &meson_axg_aobus_pmx_banks_data,
+ .parse_dt = meson8_aobus_parse_dt_extra,
};
static const struct of_device_id meson_axg_pinctrl_dt_match[] = {
diff --git a/drivers/pinctrl/meson/pinctrl-meson-g12a.c b/drivers/pinctrl/meson/pinctrl-meson-g12a.c
index 582665fd362a..41850e3c0091 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-g12a.c
+++ b/drivers/pinctrl/meson/pinctrl-meson-g12a.c
@@ -1362,6 +1362,14 @@ static struct meson_axg_pmx_data meson_g12a_aobus_pmx_banks_data = {
.num_pmx_banks = ARRAY_SIZE(meson_g12a_aobus_pmx_banks),
};
+static int meson_g12a_aobus_parse_dt_extra(struct meson_pinctrl *pc)
+{
+ pc->reg_pull = pc->reg_gpio;
+ pc->reg_pullen = pc->reg_gpio;
+
+ return 0;
+}
+
static struct meson_pinctrl_data meson_g12a_periphs_pinctrl_data = {
.name = "periphs-banks",
.pins = meson_g12a_periphs_pins,
@@ -1388,6 +1396,7 @@ static struct meson_pinctrl_data meson_g12a_aobus_pinctrl_data = {
.num_banks = ARRAY_SIZE(meson_g12a_aobus_banks),
.pmx_ops = &meson_axg_pmx_ops,
.pmx_data = &meson_g12a_aobus_pmx_banks_data,
+ .parse_dt = meson_g12a_aobus_parse_dt_extra,
};
static const struct of_device_id meson_g12a_pinctrl_dt_match[] = {
diff --git a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
index 5bfa56f3847e..926b9997159a 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
+++ b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
@@ -851,6 +851,7 @@ static struct meson_pinctrl_data meson_gxbb_aobus_pinctrl_data = {
.num_funcs = ARRAY_SIZE(meson_gxbb_aobus_functions),
.num_banks = ARRAY_SIZE(meson_gxbb_aobus_banks),
.pmx_ops = &meson8_pmx_ops,
+ .parse_dt = meson8_aobus_parse_dt_extra,
};
static const struct of_device_id meson_gxbb_pinctrl_dt_match[] = {
diff --git a/drivers/pinctrl/meson/pinctrl-meson-gxl.c b/drivers/pinctrl/meson/pinctrl-meson-gxl.c
index 72c5373c8dc1..1b6e8646700f 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-gxl.c
+++ b/drivers/pinctrl/meson/pinctrl-meson-gxl.c
@@ -820,6 +820,7 @@ static struct meson_pinctrl_data meson_gxl_aobus_pinctrl_data = {
.num_funcs = ARRAY_SIZE(meson_gxl_aobus_functions),
.num_banks = ARRAY_SIZE(meson_gxl_aobus_banks),
.pmx_ops = &meson8_pmx_ops,
+ .parse_dt = meson8_aobus_parse_dt_extra,
};
static const struct of_device_id meson_gxl_pinctrl_dt_match[] = {
diff --git a/drivers/pinctrl/meson/pinctrl-meson.c b/drivers/pinctrl/meson/pinctrl-meson.c
index 8bba9d053d9f..3c80828a5e50 100644
--- a/drivers/pinctrl/meson/pinctrl-meson.c
+++ b/drivers/pinctrl/meson/pinctrl-meson.c
@@ -625,7 +625,7 @@ static struct regmap *meson_map_resource(struct meson_pinctrl *pc,
i = of_property_match_string(node, "reg-names", name);
if (of_address_to_resource(node, i, &res))
- return ERR_PTR(-ENOENT);
+ return NULL;
base = devm_ioremap_resource(pc->dev, &res);
if (IS_ERR(base))
@@ -665,26 +665,24 @@ static int meson_pinctrl_parse_dt(struct meson_pinctrl *pc,
pc->of_node = gpio_np;
pc->reg_mux = meson_map_resource(pc, gpio_np, "mux");
- if (IS_ERR(pc->reg_mux)) {
+ if (IS_ERR_OR_NULL(pc->reg_mux)) {
dev_err(pc->dev, "mux registers not found\n");
- return PTR_ERR(pc->reg_mux);
+ return pc->reg_mux ? PTR_ERR(pc->reg_mux) : -ENOENT;
}
pc->reg_gpio = meson_map_resource(pc, gpio_np, "gpio");
- if (IS_ERR(pc->reg_gpio)) {
+ if (IS_ERR_OR_NULL(pc->reg_gpio)) {
dev_err(pc->dev, "gpio registers not found\n");
- return PTR_ERR(pc->reg_gpio);
+ return pc->reg_gpio ? PTR_ERR(pc->reg_gpio) : -ENOENT;
}
pc->reg_pull = meson_map_resource(pc, gpio_np, "pull");
- /* Use gpio region if pull one is not present */
if (IS_ERR(pc->reg_pull))
- pc->reg_pull = pc->reg_gpio;
+ pc->reg_pull = NULL;
pc->reg_pullen = meson_map_resource(pc, gpio_np, "pull-enable");
- /* Use pull region if pull-enable one is not present */
if (IS_ERR(pc->reg_pullen))
- pc->reg_pullen = pc->reg_pull;
+ pc->reg_pullen = NULL;
pc->reg_ds = meson_map_resource(pc, gpio_np, "ds");
if (IS_ERR(pc->reg_ds)) {
@@ -692,6 +690,28 @@ static int meson_pinctrl_parse_dt(struct meson_pinctrl *pc,
pc->reg_ds = NULL;
}
+ if (pc->data->parse_dt)
+ return pc->data->parse_dt(pc);
+
+ return 0;
+}
+
+int meson8_aobus_parse_dt_extra(struct meson_pinctrl *pc)
+{
+ if (!pc->reg_pull)
+ return -EINVAL;
+
+ pc->reg_pullen = pc->reg_pull;
+
+ return 0;
+}
+
+int meson_a1_parse_dt_extra(struct meson_pinctrl *pc)
+{
+ pc->reg_pull = pc->reg_gpio;
+ pc->reg_pullen = pc->reg_gpio;
+ pc->reg_ds = pc->reg_gpio;
+
return 0;
}
diff --git a/drivers/pinctrl/meson/pinctrl-meson.h b/drivers/pinctrl/meson/pinctrl-meson.h
index c696f3241a36..f8b0ff9d419a 100644
--- a/drivers/pinctrl/meson/pinctrl-meson.h
+++ b/drivers/pinctrl/meson/pinctrl-meson.h
@@ -11,6 +11,8 @@
#include <linux/regmap.h>
#include <linux/types.h>
+struct meson_pinctrl;
+
/**
* struct meson_pmx_group - a pinmux group
*
@@ -114,6 +116,7 @@ struct meson_pinctrl_data {
unsigned int num_banks;
const struct pinmux_ops *pmx_ops;
void *pmx_data;
+ int (*parse_dt)(struct meson_pinctrl *pc);
};
struct meson_pinctrl {
@@ -171,3 +174,7 @@ int meson_pmx_get_groups(struct pinctrl_dev *pcdev,
/* Common probe function */
int meson_pinctrl_probe(struct platform_device *pdev);
+/* Common ao groups extra dt parse function for SoCs before g12a */
+int meson8_aobus_parse_dt_extra(struct meson_pinctrl *pc);
+/* Common extra dt parse function for SoCs like A1 */
+int meson_a1_parse_dt_extra(struct meson_pinctrl *pc);
diff --git a/drivers/pinctrl/meson/pinctrl-meson8.c b/drivers/pinctrl/meson/pinctrl-meson8.c
index 0b97befa6335..dd17100efdcf 100644
--- a/drivers/pinctrl/meson/pinctrl-meson8.c
+++ b/drivers/pinctrl/meson/pinctrl-meson8.c
@@ -1103,6 +1103,7 @@ static struct meson_pinctrl_data meson8_aobus_pinctrl_data = {
.num_funcs = ARRAY_SIZE(meson8_aobus_functions),
.num_banks = ARRAY_SIZE(meson8_aobus_banks),
.pmx_ops = &meson8_pmx_ops,
+ .parse_dt = &meson8_aobus_parse_dt_extra,
};
static const struct of_device_id meson8_pinctrl_dt_match[] = {
diff --git a/drivers/pinctrl/meson/pinctrl-meson8b.c b/drivers/pinctrl/meson/pinctrl-meson8b.c
index a7de388388e6..2d5339edd0b7 100644
--- a/drivers/pinctrl/meson/pinctrl-meson8b.c
+++ b/drivers/pinctrl/meson/pinctrl-meson8b.c
@@ -962,6 +962,7 @@ static struct meson_pinctrl_data meson8b_aobus_pinctrl_data = {
.num_funcs = ARRAY_SIZE(meson8b_aobus_functions),
.num_banks = ARRAY_SIZE(meson8b_aobus_banks),
.pmx_ops = &meson8_pmx_ops,
+ .parse_dt = &meson8_aobus_parse_dt_extra,
};
static const struct of_device_id meson8b_pinctrl_dt_match[] = {
diff --git a/drivers/pinctrl/mvebu/Kconfig b/drivers/pinctrl/mvebu/Kconfig
index d69c25798871..0d12894d3ee1 100644
--- a/drivers/pinctrl/mvebu/Kconfig
+++ b/drivers/pinctrl/mvebu/Kconfig
@@ -46,8 +46,8 @@ config PINCTRL_ORION
select PINCTRL_MVEBU
config PINCTRL_ARMADA_37XX
- bool
- select GENERIC_PINCONF
- select MFD_SYSCON
- select PINCONF
- select PINMUX
+ bool
+ select GENERIC_PINCONF
+ select MFD_SYSCON
+ select PINCONF
+ select PINMUX
diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
index f2f5fcd9a237..aa9dcde0f069 100644
--- a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
+++ b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
@@ -595,10 +595,10 @@ static int armada_37xx_irq_set_type(struct irq_data *d, unsigned int type)
regmap_read(info->regmap, in_reg, &in_val);
/* Set initial polarity based on current input level. */
- if (in_val & d->mask)
- val |= d->mask; /* falling */
+ if (in_val & BIT(d->hwirq % GPIO_PER_REG))
+ val |= BIT(d->hwirq % GPIO_PER_REG); /* falling */
else
- val &= ~d->mask; /* rising */
+ val &= ~(BIT(d->hwirq % GPIO_PER_REG)); /* rising */
break;
}
default:
@@ -722,6 +722,8 @@ static int armada_37xx_irqchip_register(struct platform_device *pdev,
struct device_node *np = info->dev->of_node;
struct gpio_chip *gc = &info->gpio_chip;
struct irq_chip *irqchip = &info->irq_chip;
+ struct gpio_irq_chip *girq = &gc->irq;
+ struct device *dev = &pdev->dev;
struct resource res;
int ret = -ENODEV, i, nr_irq_parent;
@@ -732,19 +734,21 @@ static int armada_37xx_irqchip_register(struct platform_device *pdev,
break;
}
};
- if (ret)
+ if (ret) {
+ dev_err(dev, "no gpio-controller child node\n");
return ret;
+ }
nr_irq_parent = of_irq_count(np);
spin_lock_init(&info->irq_lock);
if (!nr_irq_parent) {
- dev_err(&pdev->dev, "Invalid or no IRQ\n");
+ dev_err(dev, "invalid or no IRQ\n");
return 0;
}
if (of_address_to_resource(info->dev->of_node, 1, &res)) {
- dev_err(info->dev, "cannot find IO resource\n");
+ dev_err(dev, "cannot find IO resource\n");
return -ENOENT;
}
@@ -759,27 +763,27 @@ static int armada_37xx_irqchip_register(struct platform_device *pdev,
irqchip->irq_set_type = armada_37xx_irq_set_type;
irqchip->irq_startup = armada_37xx_irq_startup;
irqchip->name = info->data->name;
- ret = gpiochip_irqchip_add(gc, irqchip, 0,
- handle_edge_irq, IRQ_TYPE_NONE);
- if (ret) {
- dev_info(&pdev->dev, "could not add irqchip\n");
- return ret;
- }
-
+ girq->chip = irqchip;
+ girq->parent_handler = armada_37xx_irq_handler;
/*
* Many interrupts are connected to the parent interrupt
* controller. But we do not take advantage of this and use
* the chained irq with all of them.
*/
+ girq->num_parents = nr_irq_parent;
+ girq->parents = devm_kcalloc(&pdev->dev, nr_irq_parent,
+ sizeof(*girq->parents), GFP_KERNEL);
+ if (!girq->parents)
+ return -ENOMEM;
for (i = 0; i < nr_irq_parent; i++) {
int irq = irq_of_parse_and_map(np, i);
if (irq < 0)
continue;
-
- gpiochip_set_chained_irqchip(gc, irqchip, irq,
- armada_37xx_irq_handler);
+ girq->parents[i] = irq;
}
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_edge_irq;
return 0;
}
@@ -809,10 +813,10 @@ static int armada_37xx_gpiochip_register(struct platform_device *pdev,
gc->of_node = np;
gc->label = info->data->name;
- ret = devm_gpiochip_add_data(&pdev->dev, gc, info);
+ ret = armada_37xx_irqchip_register(pdev, info);
if (ret)
return ret;
- ret = armada_37xx_irqchip_register(pdev, info);
+ ret = devm_gpiochip_add_data(&pdev->dev, gc, info);
if (ret)
return ret;
diff --git a/drivers/pinctrl/mvebu/pinctrl-mvebu.c b/drivers/pinctrl/mvebu/pinctrl-mvebu.c
index 00cfaf2c9d4a..a1f93859e7ca 100644
--- a/drivers/pinctrl/mvebu/pinctrl-mvebu.c
+++ b/drivers/pinctrl/mvebu/pinctrl-mvebu.c
@@ -759,12 +759,10 @@ int mvebu_pinctrl_simple_mmio_probe(struct platform_device *pdev)
{
struct mvebu_pinctrl_soc_info *soc = dev_get_platdata(&pdev->dev);
struct mvebu_mpp_ctrl_data *mpp_data;
- struct resource *res;
void __iomem *base;
int i;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/drivers/pinctrl/mvebu/pinctrl-orion.c b/drivers/pinctrl/mvebu/pinctrl-orion.c
index 29bb9d8cbbb5..cc97d270be61 100644
--- a/drivers/pinctrl/mvebu/pinctrl-orion.c
+++ b/drivers/pinctrl/mvebu/pinctrl-orion.c
@@ -220,17 +220,14 @@ static int orion_pinctrl_probe(struct platform_device *pdev)
{
const struct of_device_id *match =
of_match_device(orion_pinctrl_of_match, &pdev->dev);
- struct resource *res;
pdev->dev.platform_data = (void*)match->data;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mpp_base = devm_ioremap_resource(&pdev->dev, res);
+ mpp_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mpp_base))
return PTR_ERR(mpp_base);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- high_mpp_base = devm_ioremap_resource(&pdev->dev, res);
+ high_mpp_base = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(high_mpp_base))
return PTR_ERR(high_mpp_base);
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik-db8500.c b/drivers/pinctrl/nomadik/pinctrl-nomadik-db8500.c
index 726c0b5501fa..b9246e0b4fe2 100644
--- a/drivers/pinctrl/nomadik/pinctrl-nomadik-db8500.c
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik-db8500.c
@@ -391,6 +391,15 @@ static const unsigned mc0_a_1_pins[] = { DB8500_PIN_AC2, /* MC0_CMDDIR */
DB8500_PIN_AA2, /* MC0_DAT2 */
DB8500_PIN_AA1 /* MC0_DAT3 */
};
+/* MMC/SD card 0 interface without CMD/DAT0/DAT2 direction control */
+static const unsigned mc0_a_2_pins[] = { DB8500_PIN_AA3, /* MC0_FBCLK */
+ DB8500_PIN_AA4, /* MC0_CLK */
+ DB8500_PIN_AB2, /* MC0_CMD */
+ DB8500_PIN_Y4, /* MC0_DAT0 */
+ DB8500_PIN_Y2, /* MC0_DAT1 */
+ DB8500_PIN_AA2, /* MC0_DAT2 */
+ DB8500_PIN_AA1 /* MC0_DAT3 */
+};
/* Often only 4 bits are used, then these are not needed (only used for MMC) */
static const unsigned mc0_dat47_a_1_pins[] = { DB8500_PIN_W2, /* MC0_DAT4 */
DB8500_PIN_W3, /* MC0_DAT5 */
@@ -670,6 +679,7 @@ static const struct nmk_pingroup nmk_db8500_groups[] = {
DB8500_PIN_GROUP(msp0tfstck_a_1, NMK_GPIO_ALT_A),
DB8500_PIN_GROUP(msp0rfsrck_a_1, NMK_GPIO_ALT_A),
DB8500_PIN_GROUP(mc0_a_1, NMK_GPIO_ALT_A),
+ DB8500_PIN_GROUP(mc0_a_2, NMK_GPIO_ALT_A),
DB8500_PIN_GROUP(mc0_dat47_a_1, NMK_GPIO_ALT_A),
DB8500_PIN_GROUP(mc0dat31dir_a_1, NMK_GPIO_ALT_A),
DB8500_PIN_GROUP(msp1txrx_a_1, NMK_GPIO_ALT_A),
@@ -828,7 +838,7 @@ DB8500_FUNC_GROUPS(ipi2c, "ipi2c_a_1", "ipi2c_a_2");
*/
DB8500_FUNC_GROUPS(msp0, "msp0txrx_a_1", "msp0tfstck_a_1", "msp0rfstck_a_1",
"msp0txrx_b_1", "msp0sck_b_1");
-DB8500_FUNC_GROUPS(mc0, "mc0_a_1", "mc0_dat47_a_1", "mc0dat31dir_a_1");
+DB8500_FUNC_GROUPS(mc0, "mc0_a_1", "mc0_a_2", "mc0_dat47_a_1", "mc0dat31dir_a_1");
/* MSP0 can swap RX/TX like MSP0 but has no SCK pin available */
DB8500_FUNC_GROUPS(msp1, "msp1txrx_a_1", "msp1_a_1", "msp1txrx_b_1");
DB8500_FUNC_GROUPS(lcdb, "lcdb_a_1");
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik.c b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
index 2a8190b11d10..95f864dfdef4 100644
--- a/drivers/pinctrl/nomadik/pinctrl-nomadik.c
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
@@ -248,9 +248,6 @@ struct nmk_gpio_chip {
void __iomem *addr;
struct clk *clk;
unsigned int bank;
- unsigned int parent_irq;
- int latent_parent_irq;
- u32 (*get_latent_status)(unsigned int bank);
void (*set_ioforce)(bool enable);
spinlock_t lock;
bool sleepmode;
@@ -802,13 +799,19 @@ static void nmk_gpio_irq_shutdown(struct irq_data *d)
clk_disable(nmk_chip->clk);
}
-static void __nmk_gpio_irq_handler(struct irq_desc *desc, u32 status)
+static void nmk_gpio_irq_handler(struct irq_desc *desc)
{
struct irq_chip *host_chip = irq_desc_get_chip(desc);
struct gpio_chip *chip = irq_desc_get_handler_data(desc);
+ struct nmk_gpio_chip *nmk_chip = gpiochip_get_data(chip);
+ u32 status;
chained_irq_enter(host_chip, desc);
+ clk_enable(nmk_chip->clk);
+ status = readl(nmk_chip->addr + NMK_GPIO_IS);
+ clk_disable(nmk_chip->clk);
+
while (status) {
int bit = __ffs(status);
@@ -819,28 +822,6 @@ static void __nmk_gpio_irq_handler(struct irq_desc *desc, u32 status)
chained_irq_exit(host_chip, desc);
}
-static void nmk_gpio_irq_handler(struct irq_desc *desc)
-{
- struct gpio_chip *chip = irq_desc_get_handler_data(desc);
- struct nmk_gpio_chip *nmk_chip = gpiochip_get_data(chip);
- u32 status;
-
- clk_enable(nmk_chip->clk);
- status = readl(nmk_chip->addr + NMK_GPIO_IS);
- clk_disable(nmk_chip->clk);
-
- __nmk_gpio_irq_handler(desc, status);
-}
-
-static void nmk_gpio_latent_irq_handler(struct irq_desc *desc)
-{
- struct gpio_chip *chip = irq_desc_get_handler_data(desc);
- struct nmk_gpio_chip *nmk_chip = gpiochip_get_data(chip);
- u32 status = nmk_chip->get_latent_status(nmk_chip->bank);
-
- __nmk_gpio_irq_handler(desc, status);
-}
-
/* I/O Functions */
static int nmk_gpio_get_dir(struct gpio_chip *chip, unsigned offset)
@@ -1103,8 +1084,8 @@ static int nmk_gpio_probe(struct platform_device *dev)
struct device_node *np = dev->dev.of_node;
struct nmk_gpio_chip *nmk_chip;
struct gpio_chip *chip;
+ struct gpio_irq_chip *girq;
struct irq_chip *irqchip;
- int latent_irq;
bool supports_sleepmode;
int irq;
int ret;
@@ -1125,15 +1106,10 @@ static int nmk_gpio_probe(struct platform_device *dev)
if (irq < 0)
return irq;
- /* It's OK for this IRQ not to be present */
- latent_irq = platform_get_irq(dev, 1);
-
/*
* The virt address in nmk_chip->addr is in the nomadik register space,
* so we can simply convert the resource address, without remapping
*/
- nmk_chip->parent_irq = irq;
- nmk_chip->latent_parent_irq = latent_irq;
nmk_chip->sleepmode = supports_sleepmode;
spin_lock_init(&nmk_chip->lock);
@@ -1163,6 +1139,19 @@ static int nmk_gpio_probe(struct platform_device *dev)
chip->base,
chip->base + chip->ngpio - 1);
+ girq = &chip->irq;
+ girq->chip = irqchip;
+ girq->parent_handler = nmk_gpio_irq_handler;
+ girq->num_parents = 1;
+ girq->parents = devm_kcalloc(&dev->dev, 1,
+ sizeof(*girq->parents),
+ GFP_KERNEL);
+ if (!girq->parents)
+ return -ENOMEM;
+ girq->parents[0] = irq;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_edge_irq;
+
clk_enable(nmk_chip->clk);
nmk_chip->lowemi = readl_relaxed(nmk_chip->addr + NMK_GPIO_LOWEMI);
clk_disable(nmk_chip->clk);
@@ -1174,33 +1163,7 @@ static int nmk_gpio_probe(struct platform_device *dev)
platform_set_drvdata(dev, nmk_chip);
- /*
- * Let the generic code handle this edge IRQ, the the chained
- * handler will perform the actual work of handling the parent
- * interrupt.
- */
- ret = gpiochip_irqchip_add(chip,
- irqchip,
- 0,
- handle_edge_irq,
- IRQ_TYPE_NONE);
- if (ret) {
- dev_err(&dev->dev, "could not add irqchip\n");
- gpiochip_remove(&nmk_chip->chip);
- return -ENODEV;
- }
- /* Then register the chain on the parent IRQ */
- gpiochip_set_chained_irqchip(chip,
- irqchip,
- nmk_chip->parent_irq,
- nmk_gpio_irq_handler);
- if (nmk_chip->latent_parent_irq > 0)
- gpiochip_set_chained_irqchip(chip,
- irqchip,
- nmk_chip->latent_parent_irq,
- nmk_gpio_latent_irq_handler);
-
- dev_info(&dev->dev, "at address %p\n", nmk_chip->addr);
+ dev_info(&dev->dev, "chip registered\n");
return 0;
}
diff --git a/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c b/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c
index 17f909d8b63a..22077cbe6880 100644
--- a/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c
+++ b/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c
@@ -1954,6 +1954,22 @@ static int npcm7xx_gpio_register(struct npcm7xx_pinctrl *pctrl)
int ret, id;
for (id = 0 ; id < pctrl->bank_num ; id++) {
+ struct gpio_irq_chip *girq;
+
+ girq = &pctrl->gpio_bank[id].gc.irq;
+ girq->chip = &pctrl->gpio_bank[id].irq_chip;
+ girq->parent_handler = npcmgpio_irq_handler;
+ girq->num_parents = 1;
+ girq->parents = devm_kcalloc(pctrl->dev, 1,
+ sizeof(*girq->parents),
+ GFP_KERNEL);
+ if (!girq->parents) {
+ ret = -ENOMEM;
+ goto err_register;
+ }
+ girq->parents[0] = pctrl->gpio_bank[id].irq;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_level_irq;
ret = devm_gpiochip_add_data(pctrl->dev,
&pctrl->gpio_bank[id].gc,
&pctrl->gpio_bank[id]);
@@ -1972,22 +1988,6 @@ static int npcm7xx_gpio_register(struct npcm7xx_pinctrl *pctrl)
gpiochip_remove(&pctrl->gpio_bank[id].gc);
goto err_register;
}
-
- ret = gpiochip_irqchip_add(&pctrl->gpio_bank[id].gc,
- &pctrl->gpio_bank[id].irq_chip,
- 0, handle_level_irq,
- IRQ_TYPE_NONE);
- if (ret < 0) {
- dev_err(pctrl->dev,
- "Failed to add IRQ chip %u\n", id);
- gpiochip_remove(&pctrl->gpio_bank[id].gc);
- goto err_register;
- }
-
- gpiochip_set_chained_irqchip(&pctrl->gpio_bank[id].gc,
- &pctrl->gpio_bank[id].irq_chip,
- pctrl->gpio_bank[id].irq,
- npcmgpio_irq_handler);
}
return 0;
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
index 2c61141519f8..eab078244a4c 100644
--- a/drivers/pinctrl/pinctrl-amd.c
+++ b/drivers/pinctrl/pinctrl-amd.c
@@ -540,7 +540,8 @@ static irqreturn_t amd_gpio_irq_handler(int irq, void *dev_id)
irqreturn_t ret = IRQ_NONE;
unsigned int i, irqnr;
unsigned long flags;
- u32 *regs, regval;
+ u32 __iomem *regs;
+ u32 regval;
u64 status, mask;
/* Read the wake status */
diff --git a/drivers/pinctrl/pinctrl-artpec6.c b/drivers/pinctrl/pinctrl-artpec6.c
index e3239cf926f9..986e04ac6b5b 100644
--- a/drivers/pinctrl/pinctrl-artpec6.c
+++ b/drivers/pinctrl/pinctrl-artpec6.c
@@ -936,7 +936,6 @@ static void artpec6_pmx_reset(struct artpec6_pmx *pmx)
static int artpec6_pmx_probe(struct platform_device *pdev)
{
struct artpec6_pmx *pmx;
- struct resource *res;
pmx = devm_kzalloc(&pdev->dev, sizeof(*pmx), GFP_KERNEL);
if (!pmx)
@@ -944,8 +943,7 @@ static int artpec6_pmx_probe(struct platform_device *pdev)
pmx->dev = &pdev->dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pmx->base = devm_ioremap_resource(&pdev->dev, res);
+ pmx->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pmx->base))
return PTR_ERR(pmx->base);
diff --git a/drivers/pinctrl/pinctrl-at91-pio4.c b/drivers/pinctrl/pinctrl-at91-pio4.c
index d6de4d360cd4..694912409fd9 100644
--- a/drivers/pinctrl/pinctrl-at91-pio4.c
+++ b/drivers/pinctrl/pinctrl-at91-pio4.c
@@ -328,6 +328,33 @@ static int atmel_gpio_get(struct gpio_chip *chip, unsigned offset)
return !!(reg & BIT(pin->line));
}
+static int atmel_gpio_get_multiple(struct gpio_chip *chip, unsigned long *mask,
+ unsigned long *bits)
+{
+ struct atmel_pioctrl *atmel_pioctrl = gpiochip_get_data(chip);
+ unsigned int bank;
+
+ bitmap_zero(bits, atmel_pioctrl->npins);
+
+ for (bank = 0; bank < atmel_pioctrl->nbanks; bank++) {
+ unsigned int word = bank;
+ unsigned int offset = 0;
+ unsigned int reg;
+
+#if ATMEL_PIO_NPINS_PER_BANK != BITS_PER_LONG
+ word = BIT_WORD(bank * ATMEL_PIO_NPINS_PER_BANK);
+ offset = bank * ATMEL_PIO_NPINS_PER_BANK % BITS_PER_LONG;
+#endif
+ if (!mask[word])
+ continue;
+
+ reg = atmel_gpio_read(atmel_pioctrl, bank, ATMEL_PIO_PDSR);
+ bits[word] |= mask[word] & (reg << offset);
+ }
+
+ return 0;
+}
+
static int atmel_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
int value)
{
@@ -358,11 +385,46 @@ static void atmel_gpio_set(struct gpio_chip *chip, unsigned offset, int val)
BIT(pin->line));
}
+static void atmel_gpio_set_multiple(struct gpio_chip *chip, unsigned long *mask,
+ unsigned long *bits)
+{
+ struct atmel_pioctrl *atmel_pioctrl = gpiochip_get_data(chip);
+ unsigned int bank;
+
+ for (bank = 0; bank < atmel_pioctrl->nbanks; bank++) {
+ unsigned int bitmask;
+ unsigned int word = bank;
+
+/*
+ * On a 64-bit platform, BITS_PER_LONG is 64 so it is necessary to iterate over
+ * two 32bit words to handle the whole bitmask
+ */
+#if ATMEL_PIO_NPINS_PER_BANK != BITS_PER_LONG
+ word = BIT_WORD(bank * ATMEL_PIO_NPINS_PER_BANK);
+#endif
+ if (!mask[word])
+ continue;
+
+ bitmask = mask[word] & bits[word];
+ atmel_gpio_write(atmel_pioctrl, bank, ATMEL_PIO_SODR, bitmask);
+
+ bitmask = mask[word] & ~bits[word];
+ atmel_gpio_write(atmel_pioctrl, bank, ATMEL_PIO_CODR, bitmask);
+
+#if ATMEL_PIO_NPINS_PER_BANK != BITS_PER_LONG
+ mask[word] >>= ATMEL_PIO_NPINS_PER_BANK;
+ bits[word] >>= ATMEL_PIO_NPINS_PER_BANK;
+#endif
+ }
+}
+
static struct gpio_chip atmel_gpio_chip = {
.direction_input = atmel_gpio_direction_input,
.get = atmel_gpio_get,
+ .get_multiple = atmel_gpio_get_multiple,
.direction_output = atmel_gpio_direction_output,
.set = atmel_gpio_set,
+ .set_multiple = atmel_gpio_set_multiple,
.to_irq = atmel_gpio_to_irq,
.base = 0,
};
@@ -955,8 +1017,7 @@ static int atmel_pinctrl_probe(struct platform_device *pdev)
atmel_pioctrl->nbanks = atmel_pioctrl_data->nbanks;
atmel_pioctrl->npins = atmel_pioctrl->nbanks * ATMEL_PIO_NPINS_PER_BANK;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- atmel_pioctrl->reg_base = devm_ioremap_resource(dev, res);
+ atmel_pioctrl->reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(atmel_pioctrl->reg_base))
return -EINVAL;
diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c
index d6e7e9f0ddec..207f266e9cf2 100644
--- a/drivers/pinctrl/pinctrl-at91.c
+++ b/drivers/pinctrl/pinctrl-at91.c
@@ -85,8 +85,8 @@ enum drive_strength_bit {
DRIVE_STRENGTH_SHIFT)
enum slewrate_bit {
- SLEWRATE_BIT_DIS,
SLEWRATE_BIT_ENA,
+ SLEWRATE_BIT_DIS,
};
#define SLEWRATE_BIT_MSK(name) (SLEWRATE_BIT_##name << SLEWRATE_SHIFT)
@@ -669,7 +669,7 @@ static void at91_mux_sam9x60_set_slewrate(void __iomem *pio, unsigned pin,
{
unsigned int tmp;
- if (setting < SLEWRATE_BIT_DIS || setting > SLEWRATE_BIT_ENA)
+ if (setting < SLEWRATE_BIT_ENA || setting > SLEWRATE_BIT_DIS)
return;
tmp = readl_relaxed(pio + SAM9X60_PIO_SLEWR);
@@ -1723,9 +1723,11 @@ static int at91_gpio_of_irq_setup(struct platform_device *pdev,
struct at91_gpio_chip *prev = NULL;
struct irq_data *d = irq_get_irq_data(at91_gpio->pioc_virq);
struct irq_chip *gpio_irqchip;
- int ret, i;
+ struct gpio_irq_chip *girq;
+ int i;
- gpio_irqchip = devm_kzalloc(&pdev->dev, sizeof(*gpio_irqchip), GFP_KERNEL);
+ gpio_irqchip = devm_kzalloc(&pdev->dev, sizeof(*gpio_irqchip),
+ GFP_KERNEL);
if (!gpio_irqchip)
return -ENOMEM;
@@ -1747,33 +1749,30 @@ static int at91_gpio_of_irq_setup(struct platform_device *pdev,
* handler will perform the actual work of handling the parent
* interrupt.
*/
- ret = gpiochip_irqchip_add(&at91_gpio->chip,
- gpio_irqchip,
- 0,
- handle_edge_irq,
- IRQ_TYPE_NONE);
- if (ret) {
- dev_err(&pdev->dev, "at91_gpio.%d: Couldn't add irqchip to gpiochip.\n",
- at91_gpio->pioc_idx);
- return ret;
- }
+ girq = &at91_gpio->chip.irq;
+ girq->chip = gpio_irqchip;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_edge_irq;
- /* The top level handler handles one bank of GPIOs, except
+ /*
+ * The top level handler handles one bank of GPIOs, except
* on some SoC it can handle up to three...
* We only set up the handler for the first of the list.
*/
gpiochip_prev = irq_get_handler_data(at91_gpio->pioc_virq);
if (!gpiochip_prev) {
- /* Then register the chain on the parent IRQ */
- gpiochip_set_chained_irqchip(&at91_gpio->chip,
- gpio_irqchip,
- at91_gpio->pioc_virq,
- gpio_irq_handler);
+ girq->parent_handler = gpio_irq_handler;
+ girq->num_parents = 1;
+ girq->parents = devm_kcalloc(&pdev->dev, 1,
+ sizeof(*girq->parents),
+ GFP_KERNEL);
+ if (!girq->parents)
+ return -ENOMEM;
+ girq->parents[0] = at91_gpio->pioc_virq;
return 0;
}
prev = gpiochip_get_data(gpiochip_prev);
-
/* we can only have 2 banks before */
for (i = 0; i < 2; i++) {
if (prev->next) {
@@ -1812,7 +1811,6 @@ static const struct of_device_id at91_gpio_of_match[] = {
static int at91_gpio_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
- struct resource *res;
struct at91_gpio_chip *at91_chip = NULL;
struct gpio_chip *chip;
struct pinctrl_gpio_range *range;
@@ -1840,8 +1838,7 @@ static int at91_gpio_probe(struct platform_device *pdev)
goto err;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- at91_chip->regbase = devm_ioremap_resource(&pdev->dev, res);
+ at91_chip->regbase = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(at91_chip->regbase)) {
ret = PTR_ERR(at91_chip->regbase);
goto err;
@@ -1903,6 +1900,10 @@ static int at91_gpio_probe(struct platform_device *pdev)
range->npins = chip->ngpio;
range->gc = chip;
+ ret = at91_gpio_of_irq_setup(pdev, at91_chip);
+ if (ret)
+ goto gpiochip_add_err;
+
ret = gpiochip_add_data(chip, at91_chip);
if (ret)
goto gpiochip_add_err;
@@ -1910,16 +1911,10 @@ static int at91_gpio_probe(struct platform_device *pdev)
gpio_chips[alias_idx] = at91_chip;
gpio_banks = max(gpio_banks, alias_idx + 1);
- ret = at91_gpio_of_irq_setup(pdev, at91_chip);
- if (ret)
- goto irq_setup_err;
-
dev_info(&pdev->dev, "at address %p\n", at91_chip->regbase);
return 0;
-irq_setup_err:
- gpiochip_remove(chip);
gpiochip_add_err:
clk_enable_err:
clk_disable_unprepare(at91_chip->clock);
diff --git a/drivers/pinctrl/pinctrl-bm1880.c b/drivers/pinctrl/pinctrl-bm1880.c
index 63b130cb1ffb..f7dff4f14101 100644
--- a/drivers/pinctrl/pinctrl-bm1880.c
+++ b/drivers/pinctrl/pinctrl-bm1880.c
@@ -1308,15 +1308,13 @@ static struct pinctrl_desc bm1880_desc = {
static int bm1880_pinctrl_probe(struct platform_device *pdev)
{
- struct resource *res;
struct bm1880_pinctrl *pctrl;
pctrl = devm_kzalloc(&pdev->dev, sizeof(*pctrl), GFP_KERNEL);
if (!pctrl)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pctrl->base = devm_ioremap_resource(&pdev->dev, res);
+ pctrl->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pctrl->base))
return PTR_ERR(pctrl->base);
diff --git a/drivers/pinctrl/pinctrl-coh901.c b/drivers/pinctrl/pinctrl-coh901.c
index 08b9e909e917..2905348ff430 100644
--- a/drivers/pinctrl/pinctrl-coh901.c
+++ b/drivers/pinctrl/pinctrl-coh901.c
@@ -615,7 +615,7 @@ static struct coh901_pinpair coh901_pintable[] = {
static int __init u300_gpio_probe(struct platform_device *pdev)
{
struct u300_gpio *gpio;
- struct resource *memres;
+ struct gpio_irq_chip *girq;
int err = 0;
int portno;
u32 val;
@@ -632,8 +632,7 @@ static int __init u300_gpio_probe(struct platform_device *pdev)
gpio->chip.base = 0;
gpio->dev = &pdev->dev;
- memres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- gpio->base = devm_ioremap_resource(&pdev->dev, memres);
+ gpio->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(gpio->base))
return PTR_ERR(gpio->base);
@@ -672,26 +671,17 @@ static int __init u300_gpio_probe(struct platform_device *pdev)
gpio->base + U300_GPIO_CR);
u300_gpio_init_coh901571(gpio);
-#ifdef CONFIG_OF_GPIO
- gpio->chip.of_node = pdev->dev.of_node;
-#endif
- err = gpiochip_add_data(&gpio->chip, gpio);
- if (err) {
- dev_err(gpio->dev, "unable to add gpiochip: %d\n", err);
- goto err_no_chip;
- }
-
- err = gpiochip_irqchip_add(&gpio->chip,
- &u300_gpio_irqchip,
- 0,
- handle_simple_irq,
- IRQ_TYPE_EDGE_FALLING);
- if (err) {
- dev_err(gpio->dev, "no GPIO irqchip\n");
- goto err_no_irqchip;
+ girq = &gpio->chip.irq;
+ girq->chip = &u300_gpio_irqchip;
+ girq->parent_handler = u300_gpio_irq_handler;
+ girq->num_parents = U300_GPIO_NUM_PORTS;
+ girq->parents = devm_kcalloc(gpio->dev, U300_GPIO_NUM_PORTS,
+ sizeof(*girq->parents),
+ GFP_KERNEL);
+ if (!girq->parents) {
+ err = -ENOMEM;
+ goto err_dis_clk;
}
-
- /* Add each port with its IRQ separately */
for (portno = 0 ; portno < U300_GPIO_NUM_PORTS; portno++) {
struct u300_gpio_port *port = &gpio->ports[portno];
@@ -700,16 +690,21 @@ static int __init u300_gpio_probe(struct platform_device *pdev)
port->gpio = gpio;
port->irq = platform_get_irq(pdev, portno);
-
- gpiochip_set_chained_irqchip(&gpio->chip,
- &u300_gpio_irqchip,
- port->irq,
- u300_gpio_irq_handler);
+ girq->parents[portno] = port->irq;
/* Turns off irq force (test register) for this port */
writel(0x0, gpio->base + portno * gpio->stride + ifr);
}
- dev_dbg(gpio->dev, "initialized %d GPIO ports\n", portno);
+ girq->default_type = IRQ_TYPE_EDGE_FALLING;
+ girq->handler = handle_simple_irq;
+#ifdef CONFIG_OF_GPIO
+ gpio->chip.of_node = pdev->dev.of_node;
+#endif
+ err = gpiochip_add_data(&gpio->chip, gpio);
+ if (err) {
+ dev_err(gpio->dev, "unable to add gpiochip: %d\n", err);
+ goto err_dis_clk;
+ }
/*
* Add pinctrl pin ranges, the pin controller must be registered
@@ -729,9 +724,8 @@ static int __init u300_gpio_probe(struct platform_device *pdev)
return 0;
err_no_range:
-err_no_irqchip:
gpiochip_remove(&gpio->chip);
-err_no_chip:
+err_dis_clk:
clk_disable_unprepare(gpio->clk);
dev_err(&pdev->dev, "module ERROR:%d\n", err);
return err;
diff --git a/drivers/pinctrl/pinctrl-da850-pupd.c b/drivers/pinctrl/pinctrl-da850-pupd.c
index d06f13a79740..5a0a1f20c843 100644
--- a/drivers/pinctrl/pinctrl-da850-pupd.c
+++ b/drivers/pinctrl/pinctrl-da850-pupd.c
@@ -146,14 +146,12 @@ static int da850_pupd_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct da850_pupd_data *data;
- struct resource *res;
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- data->base = devm_ioremap_resource(dev, res);
+ data->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(data->base)) {
dev_err(dev, "Could not map resource\n");
return PTR_ERR(data->base);
diff --git a/drivers/pinctrl/pinctrl-digicolor.c b/drivers/pinctrl/pinctrl-digicolor.c
index 7e1ceee5895b..ff702cfbaa28 100644
--- a/drivers/pinctrl/pinctrl-digicolor.c
+++ b/drivers/pinctrl/pinctrl-digicolor.c
@@ -270,7 +270,6 @@ static int dc_gpiochip_add(struct dc_pinmap *pmap, struct device_node *np)
static int dc_pinctrl_probe(struct platform_device *pdev)
{
struct dc_pinmap *pmap;
- struct resource *r;
struct pinctrl_pin_desc *pins;
struct pinctrl_desc *pctl_desc;
char *pin_names;
@@ -281,8 +280,7 @@ static int dc_pinctrl_probe(struct platform_device *pdev)
if (!pmap)
return -ENOMEM;
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pmap->regs = devm_ioremap_resource(&pdev->dev, r);
+ pmap->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pmap->regs))
return PTR_ERR(pmap->regs);
diff --git a/drivers/pinctrl/pinctrl-equilibrium.c b/drivers/pinctrl/pinctrl-equilibrium.c
new file mode 100644
index 000000000000..067271b7d35a
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-equilibrium.c
@@ -0,0 +1,945 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2019 Intel Corporation */
+
+#include <linux/gpio/driver.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/platform_device.h>
+
+#include "core.h"
+#include "pinconf.h"
+#include "pinmux.h"
+#include "pinctrl-equilibrium.h"
+
+#define PIN_NAME_FMT "io-%d"
+#define PIN_NAME_LEN 10
+#define PAD_REG_OFF 0x100
+
+static void eqbr_gpio_disable_irq(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct eqbr_gpio_ctrl *gctrl = gpiochip_get_data(gc);
+ unsigned int offset = irqd_to_hwirq(d);
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&gctrl->lock, flags);
+ writel(BIT(offset), gctrl->membase + GPIO_IRNENCLR);
+ raw_spin_unlock_irqrestore(&gctrl->lock, flags);
+}
+
+static void eqbr_gpio_enable_irq(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct eqbr_gpio_ctrl *gctrl = gpiochip_get_data(gc);
+ unsigned int offset = irqd_to_hwirq(d);
+ unsigned long flags;
+
+ gc->direction_input(gc, offset);
+ raw_spin_lock_irqsave(&gctrl->lock, flags);
+ writel(BIT(offset), gctrl->membase + GPIO_IRNRNSET);
+ raw_spin_unlock_irqrestore(&gctrl->lock, flags);
+}
+
+static void eqbr_gpio_ack_irq(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct eqbr_gpio_ctrl *gctrl = gpiochip_get_data(gc);
+ unsigned int offset = irqd_to_hwirq(d);
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&gctrl->lock, flags);
+ writel(BIT(offset), gctrl->membase + GPIO_IRNCR);
+ raw_spin_unlock_irqrestore(&gctrl->lock, flags);
+}
+
+static void eqbr_gpio_mask_ack_irq(struct irq_data *d)
+{
+ eqbr_gpio_disable_irq(d);
+ eqbr_gpio_ack_irq(d);
+}
+
+static inline void eqbr_cfg_bit(void __iomem *addr,
+ unsigned int offset, unsigned int set)
+{
+ if (set)
+ writel(readl(addr) | BIT(offset), addr);
+ else
+ writel(readl(addr) & ~BIT(offset), addr);
+}
+
+static int eqbr_irq_type_cfg(struct gpio_irq_type *type,
+ struct eqbr_gpio_ctrl *gctrl,
+ unsigned int offset)
+{
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&gctrl->lock, flags);
+ eqbr_cfg_bit(gctrl->membase + GPIO_IRNCFG, offset, type->trig_type);
+ eqbr_cfg_bit(gctrl->membase + GPIO_EXINTCR1, offset, type->trig_type);
+ eqbr_cfg_bit(gctrl->membase + GPIO_EXINTCR0, offset, type->logic_type);
+ raw_spin_unlock_irqrestore(&gctrl->lock, flags);
+
+ return 0;
+}
+
+static int eqbr_gpio_set_irq_type(struct irq_data *d, unsigned int type)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct eqbr_gpio_ctrl *gctrl = gpiochip_get_data(gc);
+ unsigned int offset = irqd_to_hwirq(d);
+ struct gpio_irq_type it;
+
+ memset(&it, 0, sizeof(it));
+
+ if ((type & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_NONE)
+ return 0;
+
+ switch (type) {
+ case IRQ_TYPE_EDGE_RISING:
+ it.trig_type = GPIO_EDGE_TRIG;
+ it.edge_type = GPIO_SINGLE_EDGE;
+ it.logic_type = GPIO_POSITIVE_TRIG;
+ break;
+
+ case IRQ_TYPE_EDGE_FALLING:
+ it.trig_type = GPIO_EDGE_TRIG;
+ it.edge_type = GPIO_SINGLE_EDGE;
+ it.logic_type = GPIO_NEGATIVE_TRIG;
+ break;
+
+ case IRQ_TYPE_EDGE_BOTH:
+ it.trig_type = GPIO_EDGE_TRIG;
+ it.edge_type = GPIO_BOTH_EDGE;
+ it.logic_type = GPIO_POSITIVE_TRIG;
+ break;
+
+ case IRQ_TYPE_LEVEL_HIGH:
+ it.trig_type = GPIO_LEVEL_TRIG;
+ it.edge_type = GPIO_SINGLE_EDGE;
+ it.logic_type = GPIO_POSITIVE_TRIG;
+ break;
+
+ case IRQ_TYPE_LEVEL_LOW:
+ it.trig_type = GPIO_LEVEL_TRIG;
+ it.edge_type = GPIO_SINGLE_EDGE;
+ it.logic_type = GPIO_NEGATIVE_TRIG;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ eqbr_irq_type_cfg(&it, gctrl, offset);
+ if (it.trig_type == GPIO_EDGE_TRIG)
+ irq_set_handler_locked(d, handle_edge_irq);
+ else
+ irq_set_handler_locked(d, handle_level_irq);
+
+ return 0;
+}
+
+static void eqbr_irq_handler(struct irq_desc *desc)
+{
+ struct gpio_chip *gc = irq_desc_get_handler_data(desc);
+ struct eqbr_gpio_ctrl *gctrl = gpiochip_get_data(gc);
+ struct irq_chip *ic = irq_desc_get_chip(desc);
+ unsigned long pins, offset;
+
+ chained_irq_enter(ic, desc);
+ pins = readl(gctrl->membase + GPIO_IRNCR);
+
+ for_each_set_bit(offset, &pins, gc->ngpio)
+ generic_handle_irq(irq_find_mapping(gc->irq.domain, offset));
+
+ chained_irq_exit(ic, desc);
+}
+
+static int gpiochip_setup(struct device *dev, struct eqbr_gpio_ctrl *gctrl)
+{
+ struct gpio_irq_chip *girq;
+ struct gpio_chip *gc;
+
+ gc = &gctrl->chip;
+ gc->label = gctrl->name;
+#if defined(CONFIG_OF_GPIO)
+ gc->of_node = gctrl->node;
+#endif
+
+ if (!of_property_read_bool(gctrl->node, "interrupt-controller")) {
+ dev_dbg(dev, "gc %s: doesn't act as interrupt controller!\n",
+ gctrl->name);
+ return 0;
+ }
+
+ gctrl->ic.name = "gpio_irq";
+ gctrl->ic.irq_mask = eqbr_gpio_disable_irq;
+ gctrl->ic.irq_unmask = eqbr_gpio_enable_irq;
+ gctrl->ic.irq_ack = eqbr_gpio_ack_irq;
+ gctrl->ic.irq_mask_ack = eqbr_gpio_mask_ack_irq;
+ gctrl->ic.irq_set_type = eqbr_gpio_set_irq_type;
+
+ girq = &gctrl->chip.irq;
+ girq->chip = &gctrl->ic;
+ girq->parent_handler = eqbr_irq_handler;
+ girq->num_parents = 1;
+ girq->parents = devm_kcalloc(dev, 1, sizeof(*girq->parents), GFP_KERNEL);
+ if (!girq->parents)
+ return -ENOMEM;
+
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_bad_irq;
+ girq->parents[0] = gctrl->virq;
+
+ return 0;
+}
+
+static int gpiolib_reg(struct eqbr_pinctrl_drv_data *drvdata)
+{
+ struct device *dev = drvdata->dev;
+ struct eqbr_gpio_ctrl *gctrl;
+ struct device_node *np;
+ struct resource res;
+ int i, ret;
+
+ for (i = 0; i < drvdata->nr_gpio_ctrls; i++) {
+ gctrl = drvdata->gpio_ctrls + i;
+ np = gctrl->node;
+
+ gctrl->name = devm_kasprintf(dev, GFP_KERNEL, "gpiochip%d", i);
+ if (!gctrl->name)
+ return -ENOMEM;
+
+ if (of_address_to_resource(np, 0, &res)) {
+ dev_err(dev, "Failed to get GPIO register address\n");
+ return -ENXIO;
+ }
+
+ gctrl->membase = devm_ioremap_resource(dev, &res);
+ if (IS_ERR(gctrl->membase))
+ return PTR_ERR(gctrl->membase);
+
+ gctrl->virq = irq_of_parse_and_map(np, 0);
+ if (!gctrl->virq) {
+ dev_err(dev, "%s: failed to parse and map irq\n",
+ gctrl->name);
+ return -ENXIO;
+ }
+ raw_spin_lock_init(&gctrl->lock);
+
+ ret = bgpio_init(&gctrl->chip, dev, gctrl->bank->nr_pins / 8,
+ gctrl->membase + GPIO_IN,
+ gctrl->membase + GPIO_OUTSET,
+ gctrl->membase + GPIO_OUTCLR,
+ gctrl->membase + GPIO_DIR,
+ NULL, 0);
+ if (ret) {
+ dev_err(dev, "unable to init generic GPIO\n");
+ return ret;
+ }
+
+ ret = gpiochip_setup(dev, gctrl);
+ if (ret)
+ return ret;
+
+ ret = devm_gpiochip_add_data(dev, &gctrl->chip, gctrl);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static inline struct eqbr_pin_bank
+*find_pinbank_via_pin(struct eqbr_pinctrl_drv_data *pctl, unsigned int pin)
+{
+ struct eqbr_pin_bank *bank;
+ int i;
+
+ for (i = 0; i < pctl->nr_banks; i++) {
+ bank = &pctl->pin_banks[i];
+ if (pin >= bank->pin_base &&
+ (pin - bank->pin_base) < bank->nr_pins)
+ return bank;
+ }
+
+ return NULL;
+}
+
+static const struct pinctrl_ops eqbr_pctl_ops = {
+ .get_groups_count = pinctrl_generic_get_group_count,
+ .get_group_name = pinctrl_generic_get_group_name,
+ .get_group_pins = pinctrl_generic_get_group_pins,
+ .dt_node_to_map = pinconf_generic_dt_node_to_map_all,
+ .dt_free_map = pinconf_generic_dt_free_map,
+};
+
+static int eqbr_set_pin_mux(struct eqbr_pinctrl_drv_data *pctl,
+ unsigned int pmx, unsigned int pin)
+{
+ struct eqbr_pin_bank *bank;
+ unsigned long flags;
+ unsigned int offset;
+ void __iomem *mem;
+
+ bank = find_pinbank_via_pin(pctl, pin);
+ if (!bank) {
+ dev_err(pctl->dev, "Couldn't find pin bank for pin %u\n", pin);
+ return -ENODEV;
+ }
+ mem = bank->membase;
+ offset = pin - bank->pin_base;
+
+ if (!(bank->aval_pinmap & BIT(offset))) {
+ dev_err(pctl->dev,
+ "PIN: %u is not valid, pinbase: %u, bitmap: %u\n",
+ pin, bank->pin_base, bank->aval_pinmap);
+ return -ENODEV;
+ }
+
+ raw_spin_lock_irqsave(&pctl->lock, flags);
+ writel(pmx, mem + (offset * 4));
+ raw_spin_unlock_irqrestore(&pctl->lock, flags);
+ return 0;
+}
+
+static int eqbr_pinmux_set_mux(struct pinctrl_dev *pctldev,
+ unsigned int selector, unsigned int group)
+{
+ struct eqbr_pinctrl_drv_data *pctl = pinctrl_dev_get_drvdata(pctldev);
+ struct function_desc *func;
+ struct group_desc *grp;
+ unsigned int *pinmux;
+ int i;
+
+ func = pinmux_generic_get_function(pctldev, selector);
+ if (!func)
+ return -EINVAL;
+
+ grp = pinctrl_generic_get_group(pctldev, group);
+ if (!grp)
+ return -EINVAL;
+
+ pinmux = grp->data;
+ for (i = 0; i < grp->num_pins; i++)
+ eqbr_set_pin_mux(pctl, pinmux[i], grp->pins[i]);
+
+ return 0;
+}
+
+static int eqbr_pinmux_gpio_request(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned int pin)
+{
+ struct eqbr_pinctrl_drv_data *pctl = pinctrl_dev_get_drvdata(pctldev);
+
+ return eqbr_set_pin_mux(pctl, EQBR_GPIO_MODE, pin);
+}
+
+static const struct pinmux_ops eqbr_pinmux_ops = {
+ .get_functions_count = pinmux_generic_get_function_count,
+ .get_function_name = pinmux_generic_get_function_name,
+ .get_function_groups = pinmux_generic_get_function_groups,
+ .set_mux = eqbr_pinmux_set_mux,
+ .gpio_request_enable = eqbr_pinmux_gpio_request,
+ .strict = true,
+};
+
+static int get_drv_cur(void __iomem *mem, unsigned int offset)
+{
+ unsigned int idx = offset / DRV_CUR_PINS; /* 0-15, 16-31 per register*/
+ unsigned int pin_offset = offset % DRV_CUR_PINS;
+
+ return PARSE_DRV_CURRENT(readl(mem + REG_DRCC(idx)), pin_offset);
+}
+
+static struct eqbr_gpio_ctrl
+*get_gpio_ctrls_via_bank(struct eqbr_pinctrl_drv_data *pctl,
+ struct eqbr_pin_bank *bank)
+{
+ int i;
+
+ for (i = 0; i < pctl->nr_gpio_ctrls; i++) {
+ if (pctl->gpio_ctrls[i].bank == bank)
+ return &pctl->gpio_ctrls[i];
+ }
+
+ return NULL;
+}
+
+static int eqbr_pinconf_get(struct pinctrl_dev *pctldev, unsigned int pin,
+ unsigned long *config)
+{
+ struct eqbr_pinctrl_drv_data *pctl = pinctrl_dev_get_drvdata(pctldev);
+ enum pin_config_param param = pinconf_to_config_param(*config);
+ struct eqbr_gpio_ctrl *gctrl;
+ struct eqbr_pin_bank *bank;
+ unsigned long flags;
+ unsigned int offset;
+ void __iomem *mem;
+ u32 val;
+
+ bank = find_pinbank_via_pin(pctl, pin);
+ if (!bank) {
+ dev_err(pctl->dev, "Couldn't find pin bank for pin %u\n", pin);
+ return -ENODEV;
+ }
+ mem = bank->membase;
+ offset = pin - bank->pin_base;
+
+ if (!(bank->aval_pinmap & BIT(offset))) {
+ dev_err(pctl->dev,
+ "PIN: %u is not valid, pinbase: %u, bitmap: %u\n",
+ pin, bank->pin_base, bank->aval_pinmap);
+ return -ENODEV;
+ }
+
+ raw_spin_lock_irqsave(&pctl->lock, flags);
+ switch (param) {
+ case PIN_CONFIG_BIAS_PULL_UP:
+ val = !!(readl(mem + REG_PUEN) & BIT(offset));
+ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ val = !!(readl(mem + REG_PDEN) & BIT(offset));
+ break;
+ case PIN_CONFIG_DRIVE_OPEN_DRAIN:
+ val = !!(readl(mem + REG_OD) & BIT(offset));
+ break;
+ case PIN_CONFIG_DRIVE_STRENGTH:
+ val = get_drv_cur(mem, offset);
+ break;
+ case PIN_CONFIG_SLEW_RATE:
+ val = !!(readl(mem + REG_SRC) & BIT(offset));
+ break;
+ case PIN_CONFIG_OUTPUT_ENABLE:
+ gctrl = get_gpio_ctrls_via_bank(pctl, bank);
+ if (!gctrl) {
+ dev_err(pctl->dev, "Failed to find gpio via bank pinbase: %u, pin: %u\n",
+ bank->pin_base, pin);
+ raw_spin_unlock_irqrestore(&pctl->lock, flags);
+ return -ENODEV;
+ }
+ val = !!(readl(gctrl->membase + GPIO_DIR) & BIT(offset));
+ break;
+ default:
+ raw_spin_unlock_irqrestore(&pctl->lock, flags);
+ return -ENOTSUPP;
+ }
+ raw_spin_unlock_irqrestore(&pctl->lock, flags);
+ *config = pinconf_to_config_packed(param, val);
+;
+ return 0;
+}
+
+static int eqbr_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
+ unsigned long *configs, unsigned int num_configs)
+{
+ struct eqbr_pinctrl_drv_data *pctl = pinctrl_dev_get_drvdata(pctldev);
+ struct eqbr_gpio_ctrl *gctrl;
+ enum pin_config_param param;
+ struct eqbr_pin_bank *bank;
+ unsigned int val, offset;
+ struct gpio_chip *gc;
+ unsigned long flags;
+ void __iomem *mem;
+ u32 regval, mask;
+ int i;
+
+ for (i = 0; i < num_configs; i++) {
+ param = pinconf_to_config_param(configs[i]);
+ val = pinconf_to_config_argument(configs[i]);
+
+ bank = find_pinbank_via_pin(pctl, pin);
+ if (!bank) {
+ dev_err(pctl->dev,
+ "Couldn't find pin bank for pin %u\n", pin);
+ return -ENODEV;
+ }
+ mem = bank->membase;
+ offset = pin - bank->pin_base;
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_PULL_UP:
+ mem += REG_PUEN;
+ mask = BIT(offset);
+ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ mem += REG_PDEN;
+ mask = BIT(offset);
+ break;
+ case PIN_CONFIG_DRIVE_OPEN_DRAIN:
+ mem += REG_OD;
+ mask = BIT(offset);
+ break;
+ case PIN_CONFIG_DRIVE_STRENGTH:
+ mem += REG_DRCC(offset / DRV_CUR_PINS);
+ offset = (offset % DRV_CUR_PINS) * 2;
+ mask = GENMASK(1, 0) << offset;
+ break;
+ case PIN_CONFIG_SLEW_RATE:
+ mem += REG_SRC;
+ mask = BIT(offset);
+ break;
+ case PIN_CONFIG_OUTPUT_ENABLE:
+ gctrl = get_gpio_ctrls_via_bank(pctl, bank);
+ if (!gctrl) {
+ dev_err(pctl->dev, "Failed to find gpio via bank pinbase: %u, pin: %u\n",
+ bank->pin_base, pin);
+ return -ENODEV;
+ }
+ gc = &gctrl->chip;
+ gc->direction_output(gc, offset, 0);
+ continue;
+ default:
+ return -ENOTSUPP;
+ }
+
+ raw_spin_lock_irqsave(&pctl->lock, flags);
+ regval = readl(mem);
+ regval = (regval & ~mask) | ((val << offset) & mask);
+ writel(regval, mem);
+ raw_spin_unlock_irqrestore(&pctl->lock, flags);
+ }
+
+ return 0;
+}
+
+static int eqbr_pinconf_group_get(struct pinctrl_dev *pctldev,
+ unsigned int group, unsigned long *config)
+{
+ unsigned int i, npins, old = 0;
+ const unsigned int *pins;
+ int ret;
+
+ ret = pinctrl_generic_get_group_pins(pctldev, group, &pins, &npins);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < npins; i++) {
+ if (eqbr_pinconf_get(pctldev, pins[i], config))
+ return -ENOTSUPP;
+
+ if (i && old != *config)
+ return -ENOTSUPP;
+
+ old = *config;
+ }
+ return 0;
+}
+
+static int eqbr_pinconf_group_set(struct pinctrl_dev *pctldev,
+ unsigned int group, unsigned long *configs,
+ unsigned int num_configs)
+{
+ const unsigned int *pins;
+ unsigned int i, npins;
+ int ret;
+
+ ret = pinctrl_generic_get_group_pins(pctldev, group, &pins, &npins);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < npins; i++) {
+ ret = eqbr_pinconf_set(pctldev, pins[i], configs, num_configs);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+static const struct pinconf_ops eqbr_pinconf_ops = {
+ .is_generic = true,
+ .pin_config_get = eqbr_pinconf_get,
+ .pin_config_set = eqbr_pinconf_set,
+ .pin_config_group_get = eqbr_pinconf_group_get,
+ .pin_config_group_set = eqbr_pinconf_group_set,
+ .pin_config_config_dbg_show = pinconf_generic_dump_config,
+};
+
+static bool is_func_exist(struct eqbr_pmx_func *funcs, const char *name,
+ unsigned int nr_funcs, unsigned int *idx)
+{
+ int i;
+
+ if (!funcs)
+ return false;
+
+ for (i = 0; i < nr_funcs; i++) {
+ if (funcs[i].name && !strcmp(funcs[i].name, name)) {
+ *idx = i;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static int funcs_utils(struct device *dev, struct eqbr_pmx_func *funcs,
+ unsigned int *nr_funcs, funcs_util_ops op)
+{
+ struct device_node *node = dev->of_node;
+ struct device_node *np;
+ struct property *prop;
+ const char *fn_name;
+ unsigned int fid;
+ int i, j;
+
+ i = 0;
+ for_each_child_of_node(node, np) {
+ prop = of_find_property(np, "groups", NULL);
+ if (!prop)
+ continue;
+
+ if (of_property_read_string(np, "function", &fn_name)) {
+ /* some groups may not have function, it's OK */
+ dev_dbg(dev, "Group %s: not function binded!\n",
+ (char *)prop->value);
+ continue;
+ }
+
+ switch (op) {
+ case OP_COUNT_NR_FUNCS:
+ if (!is_func_exist(funcs, fn_name, *nr_funcs, &fid))
+ *nr_funcs = *nr_funcs + 1;
+ break;
+
+ case OP_ADD_FUNCS:
+ if (!is_func_exist(funcs, fn_name, *nr_funcs, &fid))
+ funcs[i].name = fn_name;
+ break;
+
+ case OP_COUNT_NR_FUNC_GRPS:
+ if (is_func_exist(funcs, fn_name, *nr_funcs, &fid))
+ funcs[fid].nr_groups++;
+ break;
+
+ case OP_ADD_FUNC_GRPS:
+ if (is_func_exist(funcs, fn_name, *nr_funcs, &fid)) {
+ for (j = 0; j < funcs[fid].nr_groups; j++)
+ if (!funcs[fid].groups[j])
+ break;
+ funcs[fid].groups[j] = prop->value;
+ }
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ i++;
+ }
+
+ return 0;
+}
+
+static int eqbr_build_functions(struct eqbr_pinctrl_drv_data *drvdata)
+{
+ struct device *dev = drvdata->dev;
+ struct eqbr_pmx_func *funcs = NULL;
+ unsigned int nr_funcs = 0;
+ int i, ret;
+
+ ret = funcs_utils(dev, funcs, &nr_funcs, OP_COUNT_NR_FUNCS);
+ if (ret)
+ return ret;
+
+ funcs = devm_kcalloc(dev, nr_funcs, sizeof(*funcs), GFP_KERNEL);
+ if (!funcs)
+ return -ENOMEM;
+
+ ret = funcs_utils(dev, funcs, &nr_funcs, OP_ADD_FUNCS);
+ if (ret)
+ return ret;
+
+ ret = funcs_utils(dev, funcs, &nr_funcs, OP_COUNT_NR_FUNC_GRPS);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < nr_funcs; i++) {
+ if (!funcs[i].nr_groups)
+ continue;
+ funcs[i].groups = devm_kcalloc(dev, funcs[i].nr_groups,
+ sizeof(*(funcs[i].groups)),
+ GFP_KERNEL);
+ if (!funcs[i].groups)
+ return -ENOMEM;
+ }
+
+ ret = funcs_utils(dev, funcs, &nr_funcs, OP_ADD_FUNC_GRPS);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < nr_funcs; i++) {
+ ret = pinmux_generic_add_function(drvdata->pctl_dev,
+ funcs[i].name,
+ funcs[i].groups,
+ funcs[i].nr_groups,
+ drvdata);
+ if (ret < 0) {
+ dev_err(dev, "Failed to register function %s\n",
+ funcs[i].name);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int eqbr_build_groups(struct eqbr_pinctrl_drv_data *drvdata)
+{
+ struct device *dev = drvdata->dev;
+ struct device_node *node = dev->of_node;
+ unsigned int *pinmux, pin_id, pinmux_id;
+ struct group_desc group;
+ struct device_node *np;
+ struct property *prop;
+ int j, err;
+
+ for_each_child_of_node(node, np) {
+ prop = of_find_property(np, "groups", NULL);
+ if (!prop)
+ continue;
+
+ group.num_pins = of_property_count_u32_elems(np, "pins");
+ if (group.num_pins < 0) {
+ dev_err(dev, "No pins in the group: %s\n", prop->name);
+ return -EINVAL;
+ }
+ group.name = prop->value;
+ group.pins = devm_kcalloc(dev, group.num_pins,
+ sizeof(*(group.pins)), GFP_KERNEL);
+ if (!group.pins)
+ return -ENOMEM;
+
+ pinmux = devm_kcalloc(dev, group.num_pins, sizeof(*pinmux),
+ GFP_KERNEL);
+ if (!pinmux)
+ return -ENOMEM;
+
+ for (j = 0; j < group.num_pins; j++) {
+ if (of_property_read_u32_index(np, "pins", j, &pin_id)) {
+ dev_err(dev, "Group %s: Read intel pins id failed\n",
+ group.name);
+ return -EINVAL;
+ }
+ if (pin_id >= drvdata->pctl_desc.npins) {
+ dev_err(dev, "Group %s: Invalid pin ID, idx: %d, pin %u\n",
+ group.name, j, pin_id);
+ return -EINVAL;
+ }
+ group.pins[j] = pin_id;
+ if (of_property_read_u32_index(np, "pinmux", j, &pinmux_id)) {
+ dev_err(dev, "Group %s: Read intel pinmux id failed\n",
+ group.name);
+ return -EINVAL;
+ }
+ pinmux[j] = pinmux_id;
+ }
+
+ err = pinctrl_generic_add_group(drvdata->pctl_dev, group.name,
+ group.pins, group.num_pins,
+ pinmux);
+ if (err < 0) {
+ dev_err(dev, "Failed to register group %s\n", group.name);
+ return err;
+ }
+ memset(&group, 0, sizeof(group));
+ pinmux = NULL;
+ }
+
+ return 0;
+}
+
+static int pinctrl_reg(struct eqbr_pinctrl_drv_data *drvdata)
+{
+ struct pinctrl_desc *pctl_desc;
+ struct pinctrl_pin_desc *pdesc;
+ struct device *dev;
+ unsigned int nr_pins;
+ char *pin_names;
+ int i, ret;
+
+ dev = drvdata->dev;
+ pctl_desc = &drvdata->pctl_desc;
+ pctl_desc->name = "eqbr-pinctrl";
+ pctl_desc->owner = THIS_MODULE;
+ pctl_desc->pctlops = &eqbr_pctl_ops;
+ pctl_desc->pmxops = &eqbr_pinmux_ops;
+ pctl_desc->confops = &eqbr_pinconf_ops;
+ raw_spin_lock_init(&drvdata->lock);
+
+ for (i = 0, nr_pins = 0; i < drvdata->nr_banks; i++)
+ nr_pins += drvdata->pin_banks[i].nr_pins;
+
+ pdesc = devm_kcalloc(dev, nr_pins, sizeof(*pdesc), GFP_KERNEL);
+ if (!pdesc)
+ return -ENOMEM;
+ pin_names = devm_kcalloc(dev, nr_pins, PIN_NAME_LEN, GFP_KERNEL);
+ if (!pin_names)
+ return -ENOMEM;
+
+ for (i = 0; i < nr_pins; i++) {
+ sprintf(pin_names, PIN_NAME_FMT, i);
+ pdesc[i].number = i;
+ pdesc[i].name = pin_names;
+ pin_names += PIN_NAME_LEN;
+ }
+ pctl_desc->pins = pdesc;
+ pctl_desc->npins = nr_pins;
+ dev_dbg(dev, "pinctrl total pin number: %u\n", nr_pins);
+
+ ret = devm_pinctrl_register_and_init(dev, pctl_desc, drvdata,
+ &drvdata->pctl_dev);
+ if (ret)
+ return ret;
+
+ ret = eqbr_build_groups(drvdata);
+ if (ret) {
+ dev_err(dev, "Failed to build groups\n");
+ return ret;
+ }
+
+ ret = eqbr_build_functions(drvdata);
+ if (ret) {
+ dev_err(dev, "Failed to build groups\n");
+ return ret;
+ }
+
+ return pinctrl_enable(drvdata->pctl_dev);
+}
+
+static int pinbank_init(struct device_node *np,
+ struct eqbr_pinctrl_drv_data *drvdata,
+ struct eqbr_pin_bank *bank, unsigned int id)
+{
+ struct device *dev = drvdata->dev;
+ struct of_phandle_args spec;
+ int ret;
+
+ bank->membase = drvdata->membase + id * PAD_REG_OFF;
+
+ ret = of_parse_phandle_with_fixed_args(np, "gpio-ranges", 3, 0, &spec);
+ if (ret) {
+ dev_err(dev, "gpio-range not available!\n");
+ return ret;
+ }
+
+ bank->pin_base = spec.args[1];
+ bank->nr_pins = spec.args[2];
+
+ bank->aval_pinmap = readl(bank->membase + REG_AVAIL);
+ bank->id = id;
+
+ dev_dbg(dev, "pinbank id: %d, reg: %px, pinbase: %u, pin number: %u, pinmap: 0x%x\n",
+ id, bank->membase, bank->pin_base,
+ bank->nr_pins, bank->aval_pinmap);
+
+ return ret;
+}
+
+static int pinbank_probe(struct eqbr_pinctrl_drv_data *drvdata)
+{
+ struct device *dev = drvdata->dev;
+ struct device_node *np_gpio;
+ struct eqbr_gpio_ctrl *gctrls;
+ struct eqbr_pin_bank *banks;
+ int i, nr_gpio;
+
+ /* Count gpio bank number */
+ nr_gpio = 0;
+ for_each_node_by_name(np_gpio, "gpio") {
+ if (of_device_is_available(np_gpio))
+ nr_gpio++;
+ }
+
+ if (!nr_gpio) {
+ dev_err(dev, "NO pin bank available!\n");
+ return -ENODEV;
+ }
+
+ /* Count pin bank number and gpio controller number */
+ banks = devm_kcalloc(dev, nr_gpio, sizeof(*banks), GFP_KERNEL);
+ if (!banks)
+ return -ENOMEM;
+
+ gctrls = devm_kcalloc(dev, nr_gpio, sizeof(*gctrls), GFP_KERNEL);
+ if (!gctrls)
+ return -ENOMEM;
+
+ dev_dbg(dev, "found %d gpio controller!\n", nr_gpio);
+
+ /* Initialize Pin bank */
+ i = 0;
+ for_each_node_by_name(np_gpio, "gpio") {
+ if (!of_device_is_available(np_gpio))
+ continue;
+
+ pinbank_init(np_gpio, drvdata, banks + i, i);
+
+ gctrls[i].node = np_gpio;
+ gctrls[i].bank = banks + i;
+ i++;
+ }
+
+ drvdata->pin_banks = banks;
+ drvdata->nr_banks = nr_gpio;
+ drvdata->gpio_ctrls = gctrls;
+ drvdata->nr_gpio_ctrls = nr_gpio;
+
+ return 0;
+}
+
+static int eqbr_pinctrl_probe(struct platform_device *pdev)
+{
+ struct eqbr_pinctrl_drv_data *drvdata;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
+ if (!drvdata)
+ return -ENOMEM;
+
+ drvdata->dev = dev;
+
+ drvdata->membase = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(drvdata->membase))
+ return PTR_ERR(drvdata->membase);
+
+ ret = pinbank_probe(drvdata);
+ if (ret)
+ return ret;
+
+ ret = pinctrl_reg(drvdata);
+ if (ret)
+ return ret;
+
+ ret = gpiolib_reg(drvdata);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, drvdata);
+ return 0;
+}
+
+static const struct of_device_id eqbr_pinctrl_dt_match[] = {
+ { .compatible = "intel,lgm-io" },
+ {}
+};
+
+static struct platform_driver eqbr_pinctrl_driver = {
+ .probe = eqbr_pinctrl_probe,
+ .driver = {
+ .name = "eqbr-pinctrl",
+ .of_match_table = eqbr_pinctrl_dt_match,
+ },
+};
+
+module_platform_driver(eqbr_pinctrl_driver);
+
+MODULE_AUTHOR("Zhu Yixin <yixin.zhu@intel.com>, Rahul Tanwar <rahul.tanwar@intel.com>");
+MODULE_DESCRIPTION("Pinctrl Driver for LGM SoC (Equilibrium)");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/pinctrl-equilibrium.h b/drivers/pinctrl/pinctrl-equilibrium.h
new file mode 100644
index 000000000000..83cb7dafc657
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-equilibrium.h
@@ -0,0 +1,144 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright(c) 2019 Intel Corporation.
+ */
+
+#ifndef __PINCTRL_EQUILIBRIUM_H
+#define __PINCTRL_EQUILIBRIUM_H
+
+/* PINPAD register offset */
+#define REG_PMX_BASE 0x0 /* Port Multiplexer Control Register */
+#define REG_PUEN 0x80 /* PULL UP Enable Register */
+#define REG_PDEN 0x84 /* PULL DOWN Enable Register */
+#define REG_SRC 0x88 /* Slew Rate Control Register */
+#define REG_DCC0 0x8C /* Drive Current Control Register 0 */
+#define REG_DCC1 0x90 /* Drive Current Control Register 1 */
+#define REG_OD 0x94 /* Open Drain Enable Register */
+#define REG_AVAIL 0x98 /* Pad Control Availability Register */
+#define DRV_CUR_PINS 16 /* Drive Current pin number per register */
+#define REG_DRCC(x) (REG_DCC0 + (x) * 4) /* Driver current macro */
+
+/* GPIO register offset */
+#define GPIO_OUT 0x0 /* Data Output Register */
+#define GPIO_IN 0x4 /* Data Input Register */
+#define GPIO_DIR 0x8 /* Direction Register */
+#define GPIO_EXINTCR0 0x18 /* External Interrupt Control Register 0 */
+#define GPIO_EXINTCR1 0x1C /* External Interrupt Control Register 1 */
+#define GPIO_IRNCR 0x20 /* IRN Capture Register */
+#define GPIO_IRNICR 0x24 /* IRN Interrupt Control Register */
+#define GPIO_IRNEN 0x28 /* IRN Interrupt Enable Register */
+#define GPIO_IRNCFG 0x2C /* IRN Interrupt Configuration Register */
+#define GPIO_IRNRNSET 0x30 /* IRN Interrupt Enable Set Register */
+#define GPIO_IRNENCLR 0x34 /* IRN Interrupt Enable Clear Register */
+#define GPIO_OUTSET 0x40 /* Output Set Register */
+#define GPIO_OUTCLR 0x44 /* Output Clear Register */
+#define GPIO_DIRSET 0x48 /* Direction Set Register */
+#define GPIO_DIRCLR 0x4C /* Direction Clear Register */
+
+/* parse given pin's driver current value */
+#define PARSE_DRV_CURRENT(val, pin) (((val) >> ((pin) * 2)) & 0x3)
+
+#define GPIO_EDGE_TRIG 0
+#define GPIO_LEVEL_TRIG 1
+#define GPIO_SINGLE_EDGE 0
+#define GPIO_BOTH_EDGE 1
+#define GPIO_POSITIVE_TRIG 0
+#define GPIO_NEGATIVE_TRIG 1
+
+#define EQBR_GPIO_MODE 0
+
+typedef enum {
+ OP_COUNT_NR_FUNCS,
+ OP_ADD_FUNCS,
+ OP_COUNT_NR_FUNC_GRPS,
+ OP_ADD_FUNC_GRPS,
+ OP_NONE,
+} funcs_util_ops;
+
+/**
+ * struct gpio_irq_type: gpio irq configuration
+ * @trig_type: level trigger or edge trigger
+ * @edge_type: sigle edge or both edge
+ * @logic_type: positive trigger or negative trigger
+ */
+struct gpio_irq_type {
+ unsigned int trig_type;
+ unsigned int edge_type;
+ unsigned int logic_type;
+};
+
+/**
+ * struct eqbr_pmx_func: represent a pin function.
+ * @name: name of the pin function, used to lookup the function.
+ * @groups: one or more names of pin groups that provide this function.
+ * @nr_groups: number of groups included in @groups.
+ */
+struct eqbr_pmx_func {
+ const char *name;
+ const char **groups;
+ unsigned int nr_groups;
+};
+
+/**
+ * struct eqbr_pin_bank: represent a pin bank.
+ * @membase: base address of the pin bank register.
+ * @id: bank id, to idenify the unique bank.
+ * @pin_base: starting pin number of the pin bank.
+ * @nr_pins: number of the pins of the pin bank.
+ * @aval_pinmap: available pin bitmap of the pin bank.
+ */
+struct eqbr_pin_bank {
+ void __iomem *membase;
+ unsigned int id;
+ unsigned int pin_base;
+ unsigned int nr_pins;
+ u32 aval_pinmap;
+};
+
+/**
+ * struct eqbr_gpio_ctrl: represent a gpio controller.
+ * @node: device node of gpio controller.
+ * @bank: pointer to corresponding pin bank.
+ * @membase: base address of the gpio controller.
+ * @chip: gpio chip.
+ * @ic: irq chip.
+ * @name: gpio chip name.
+ * @virq: irq number of the gpio chip to parent's irq domain.
+ * @lock: spin lock to protect gpio register write.
+ */
+struct eqbr_gpio_ctrl {
+ struct device_node *node;
+ struct eqbr_pin_bank *bank;
+ void __iomem *membase;
+ struct gpio_chip chip;
+ struct irq_chip ic;
+ const char *name;
+ unsigned int virq;
+ raw_spinlock_t lock; /* protect gpio register */
+};
+
+/**
+ * struct eqbr_pinctrl_drv_data:
+ * @dev: device instance representing the controller.
+ * @pctl_desc: pin controller descriptor.
+ * @pctl_dev: pin control class device
+ * @membase: base address of pin controller
+ * @pin_banks: list of pin banks of the driver.
+ * @nr_banks: number of pin banks.
+ * @gpio_ctrls: list of gpio controllers.
+ * @nr_gpio_ctrls: number of gpio controllers.
+ * @lock: protect pinctrl register write
+ */
+struct eqbr_pinctrl_drv_data {
+ struct device *dev;
+ struct pinctrl_desc pctl_desc;
+ struct pinctrl_dev *pctl_dev;
+ void __iomem *membase;
+ struct eqbr_pin_bank *pin_banks;
+ unsigned int nr_banks;
+ struct eqbr_gpio_ctrl *gpio_ctrls;
+ unsigned int nr_gpio_ctrls;
+ raw_spinlock_t lock; /* protect pinpad register */
+};
+
+#endif /* __PINCTRL_EQUILIBRIUM_H */
diff --git a/drivers/pinctrl/pinctrl-ingenic.c b/drivers/pinctrl/pinctrl-ingenic.c
index 6e2683016c1f..24e0e2ef47a4 100644
--- a/drivers/pinctrl/pinctrl-ingenic.c
+++ b/drivers/pinctrl/pinctrl-ingenic.c
@@ -686,6 +686,7 @@ static int jz4770_mac_rmii_pins[] = {
0xa9, 0xab, 0xaa, 0xac, 0xa5, 0xa4, 0xad, 0xae, 0xa6, 0xa8,
};
static int jz4770_mac_mii_pins[] = { 0xa7, 0xaf, };
+static int jz4770_otg_pins[] = { 0x8a, };
static int jz4770_uart0_data_funcs[] = { 0, 0, };
static int jz4770_uart0_hwflow_funcs[] = { 0, 0, };
@@ -744,6 +745,7 @@ static int jz4770_pwm_pwm6_funcs[] = { 0, };
static int jz4770_pwm_pwm7_funcs[] = { 0, };
static int jz4770_mac_rmii_funcs[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, };
static int jz4770_mac_mii_funcs[] = { 0, 0, };
+static int jz4770_otg_funcs[] = { 0, };
static const struct group_desc jz4770_groups[] = {
INGENIC_PIN_GROUP("uart0-data", jz4770_uart0_data),
@@ -799,6 +801,7 @@ static const struct group_desc jz4770_groups[] = {
INGENIC_PIN_GROUP("pwm7", jz4770_pwm_pwm7),
INGENIC_PIN_GROUP("mac-rmii", jz4770_mac_rmii),
INGENIC_PIN_GROUP("mac-mii", jz4770_mac_mii),
+ INGENIC_PIN_GROUP("otg-vbus", jz4770_otg),
};
static const char *jz4770_uart0_groups[] = { "uart0-data", "uart0-hwflow", };
@@ -841,6 +844,7 @@ static const char *jz4770_pwm5_groups[] = { "pwm5", };
static const char *jz4770_pwm6_groups[] = { "pwm6", };
static const char *jz4770_pwm7_groups[] = { "pwm7", };
static const char *jz4770_mac_groups[] = { "mac-rmii", "mac-mii", };
+static const char *jz4770_otg_groups[] = { "otg-vbus", };
static const struct function_desc jz4770_functions[] = {
{ "uart0", jz4770_uart0_groups, ARRAY_SIZE(jz4770_uart0_groups), },
@@ -871,6 +875,7 @@ static const struct function_desc jz4770_functions[] = {
{ "pwm6", jz4770_pwm6_groups, ARRAY_SIZE(jz4770_pwm6_groups), },
{ "pwm7", jz4770_pwm7_groups, ARRAY_SIZE(jz4770_pwm7_groups), },
{ "mac", jz4770_mac_groups, ARRAY_SIZE(jz4770_mac_groups), },
+ { "otg", jz4770_otg_groups, ARRAY_SIZE(jz4770_otg_groups), },
};
static const struct ingenic_chip_info jz4770_chip_info = {
@@ -1801,19 +1806,30 @@ static void ingenic_set_bias(struct ingenic_pinctrl *jzpc,
ingenic_config_pin(jzpc, pin, JZ4740_GPIO_PULL_DIS, !enabled);
}
+static void ingenic_set_output_level(struct ingenic_pinctrl *jzpc,
+ unsigned int pin, bool high)
+{
+ if (jzpc->version >= ID_JZ4770)
+ ingenic_config_pin(jzpc, pin, JZ4760_GPIO_PAT0, high);
+ else
+ ingenic_config_pin(jzpc, pin, JZ4740_GPIO_DATA, high);
+}
+
static int ingenic_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
unsigned long *configs, unsigned int num_configs)
{
struct ingenic_pinctrl *jzpc = pinctrl_dev_get_drvdata(pctldev);
unsigned int idx = pin % PINS_PER_GPIO_CHIP;
unsigned int offt = pin / PINS_PER_GPIO_CHIP;
- unsigned int cfg;
+ unsigned int cfg, arg;
+ int ret;
for (cfg = 0; cfg < num_configs; cfg++) {
switch (pinconf_to_config_param(configs[cfg])) {
case PIN_CONFIG_BIAS_DISABLE:
case PIN_CONFIG_BIAS_PULL_UP:
case PIN_CONFIG_BIAS_PULL_DOWN:
+ case PIN_CONFIG_OUTPUT:
continue;
default:
return -ENOTSUPP;
@@ -1821,6 +1837,8 @@ static int ingenic_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
}
for (cfg = 0; cfg < num_configs; cfg++) {
+ arg = pinconf_to_config_argument(configs[cfg]);
+
switch (pinconf_to_config_param(configs[cfg])) {
case PIN_CONFIG_BIAS_DISABLE:
dev_dbg(jzpc->dev, "disable pull-over for pin P%c%u\n",
@@ -1844,6 +1862,14 @@ static int ingenic_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
ingenic_set_bias(jzpc, pin, true);
break;
+ case PIN_CONFIG_OUTPUT:
+ ret = pinctrl_gpio_direction_output(pin);
+ if (ret)
+ return ret;
+
+ ingenic_set_output_level(jzpc, pin, arg);
+ break;
+
default:
unreachable();
}
@@ -1940,6 +1966,7 @@ static int __init ingenic_gpio_probe(struct ingenic_pinctrl *jzpc,
{
struct ingenic_gpio_chip *jzgc;
struct device *dev = jzpc->dev;
+ struct gpio_irq_chip *girq;
unsigned int bank;
int err;
@@ -1982,10 +2009,6 @@ static int __init ingenic_gpio_probe(struct ingenic_pinctrl *jzpc,
jzgc->gc.free = gpiochip_generic_free;
}
- err = devm_gpiochip_add_data(dev, &jzgc->gc, jzgc);
- if (err)
- return err;
-
jzgc->irq = irq_of_parse_and_map(node, 0);
if (!jzgc->irq)
return -EINVAL;
@@ -2000,13 +2023,22 @@ static int __init ingenic_gpio_probe(struct ingenic_pinctrl *jzpc,
jzgc->irq_chip.irq_set_wake = ingenic_gpio_irq_set_wake;
jzgc->irq_chip.flags = IRQCHIP_MASK_ON_SUSPEND;
- err = gpiochip_irqchip_add(&jzgc->gc, &jzgc->irq_chip, 0,
- handle_level_irq, IRQ_TYPE_NONE);
+ girq = &jzgc->gc.irq;
+ girq->chip = &jzgc->irq_chip;
+ girq->parent_handler = ingenic_gpio_irq_handler;
+ girq->num_parents = 1;
+ girq->parents = devm_kcalloc(dev, 1, sizeof(*girq->parents),
+ GFP_KERNEL);
+ if (!girq->parents)
+ return -ENOMEM;
+ girq->parents[0] = jzgc->irq;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_level_irq;
+
+ err = devm_gpiochip_add_data(dev, &jzgc->gc, jzgc);
if (err)
return err;
- gpiochip_set_chained_irqchip(&jzgc->gc, &jzgc->irq_chip,
- jzgc->irq, ingenic_gpio_irq_handler);
return 0;
}
diff --git a/drivers/pinctrl/pinctrl-lpc18xx.c b/drivers/pinctrl/pinctrl-lpc18xx.c
index 06be55dab341..e4677546aec4 100644
--- a/drivers/pinctrl/pinctrl-lpc18xx.c
+++ b/drivers/pinctrl/pinctrl-lpc18xx.c
@@ -1324,15 +1324,13 @@ static int lpc18xx_create_group_func_map(struct device *dev,
static int lpc18xx_scu_probe(struct platform_device *pdev)
{
struct lpc18xx_scu_data *scu;
- struct resource *res;
int ret;
scu = devm_kzalloc(&pdev->dev, sizeof(*scu), GFP_KERNEL);
if (!scu)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- scu->base = devm_ioremap_resource(&pdev->dev, res);
+ scu->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(scu->base))
return PTR_ERR(scu->base);
diff --git a/drivers/pinctrl/pinctrl-ocelot.c b/drivers/pinctrl/pinctrl-ocelot.c
index fb76fb2e9ea5..eb3dd0d46d6c 100644
--- a/drivers/pinctrl/pinctrl-ocelot.c
+++ b/drivers/pinctrl/pinctrl-ocelot.c
@@ -736,6 +736,7 @@ static int ocelot_gpiochip_register(struct platform_device *pdev,
struct ocelot_pinctrl *info)
{
struct gpio_chip *gc;
+ struct gpio_irq_chip *girq;
int ret, irq;
info->gpio_chip = ocelot_gpiolib_chip;
@@ -747,22 +748,26 @@ static int ocelot_gpiochip_register(struct platform_device *pdev,
gc->of_node = info->dev->of_node;
gc->label = "ocelot-gpio";
- ret = devm_gpiochip_add_data(&pdev->dev, gc, info);
- if (ret)
- return ret;
-
irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
if (irq <= 0)
return irq;
- ret = gpiochip_irqchip_add(gc, &ocelot_irqchip, 0, handle_edge_irq,
- IRQ_TYPE_NONE);
+ girq = &gc->irq;
+ girq->chip = &ocelot_irqchip;
+ girq->parent_handler = ocelot_irq_handler;
+ girq->num_parents = 1;
+ girq->parents = devm_kcalloc(&pdev->dev, 1, sizeof(*girq->parents),
+ GFP_KERNEL);
+ if (!girq->parents)
+ return -ENOMEM;
+ girq->parents[0] = irq;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_edge_irq;
+
+ ret = devm_gpiochip_add_data(&pdev->dev, gc, info);
if (ret)
return ret;
- gpiochip_set_chained_irqchip(gc, &ocelot_irqchip, irq,
- ocelot_irq_handler);
-
return 0;
}
diff --git a/drivers/pinctrl/pinctrl-oxnas.c b/drivers/pinctrl/pinctrl-oxnas.c
index 55488ca246f1..674b7b5919df 100644
--- a/drivers/pinctrl/pinctrl-oxnas.c
+++ b/drivers/pinctrl/pinctrl-oxnas.c
@@ -1196,7 +1196,7 @@ static int oxnas_gpio_probe(struct platform_device *pdev)
struct oxnas_gpio_bank *bank;
unsigned int id, ngpios;
int irq, ret;
- struct resource *res;
+ struct gpio_irq_chip *girq;
if (of_parse_phandle_with_fixed_args(np, "gpio-ranges",
3, 0, &pinspec)) {
@@ -1219,8 +1219,7 @@ static int oxnas_gpio_probe(struct platform_device *pdev)
bank = &oxnas_gpio_banks[id];
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- bank->reg_base = devm_ioremap_resource(&pdev->dev, res);
+ bank->reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(bank->reg_base))
return PTR_ERR(bank->reg_base);
@@ -1232,6 +1231,18 @@ static int oxnas_gpio_probe(struct platform_device *pdev)
bank->gpio_chip.parent = &pdev->dev;
bank->gpio_chip.of_node = np;
bank->gpio_chip.ngpio = ngpios;
+ girq = &bank->gpio_chip.irq;
+ girq->chip = &bank->irq_chip;
+ girq->parent_handler = oxnas_gpio_irq_handler;
+ girq->num_parents = 1;
+ girq->parents = devm_kcalloc(&pdev->dev, 1, sizeof(*girq->parents),
+ GFP_KERNEL);
+ if (!girq->parents)
+ return -ENOMEM;
+ girq->parents[0] = irq;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_level_irq;
+
ret = gpiochip_add_data(&bank->gpio_chip, bank);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to add GPIO chip %u: %d\n",
@@ -1239,18 +1250,6 @@ static int oxnas_gpio_probe(struct platform_device *pdev)
return ret;
}
- ret = gpiochip_irqchip_add(&bank->gpio_chip, &bank->irq_chip,
- 0, handle_level_irq, IRQ_TYPE_NONE);
- if (ret < 0) {
- dev_err(&pdev->dev, "Failed to add IRQ chip %u: %d\n",
- id, ret);
- gpiochip_remove(&bank->gpio_chip);
- return ret;
- }
-
- gpiochip_set_chained_irqchip(&bank->gpio_chip, &bank->irq_chip,
- irq, oxnas_gpio_irq_handler);
-
return 0;
}
diff --git a/drivers/pinctrl/pinctrl-pic32.c b/drivers/pinctrl/pinctrl-pic32.c
index e7f6dd5ab578..e5d6d3f9753e 100644
--- a/drivers/pinctrl/pinctrl-pic32.c
+++ b/drivers/pinctrl/pinctrl-pic32.c
@@ -2202,7 +2202,7 @@ static int pic32_gpio_probe(struct platform_device *pdev)
struct pic32_gpio_bank *bank;
u32 id;
int irq, ret;
- struct resource *res;
+ struct gpio_irq_chip *girq;
if (of_property_read_u32(np, "microchip,gpio-bank", &id)) {
dev_err(&pdev->dev, "microchip,gpio-bank property not found\n");
@@ -2216,8 +2216,7 @@ static int pic32_gpio_probe(struct platform_device *pdev)
bank = &pic32_gpio_banks[id];
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- bank->reg_base = devm_ioremap_resource(&pdev->dev, res);
+ bank->reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(bank->reg_base))
return PTR_ERR(bank->reg_base);
@@ -2240,25 +2239,23 @@ static int pic32_gpio_probe(struct platform_device *pdev)
bank->gpio_chip.parent = &pdev->dev;
bank->gpio_chip.of_node = np;
+ girq = &bank->gpio_chip.irq;
+ girq->chip = &bank->irq_chip;
+ girq->parent_handler = pic32_gpio_irq_handler;
+ girq->num_parents = 1;
+ girq->parents = devm_kcalloc(&pdev->dev, 1, sizeof(*girq->parents),
+ GFP_KERNEL);
+ if (!girq->parents)
+ return -ENOMEM;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_level_irq;
+ girq->parents[0] = irq;
ret = gpiochip_add_data(&bank->gpio_chip, bank);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to add GPIO chip %u: %d\n",
id, ret);
return ret;
}
-
- ret = gpiochip_irqchip_add(&bank->gpio_chip, &bank->irq_chip,
- 0, handle_level_irq, IRQ_TYPE_NONE);
- if (ret < 0) {
- dev_err(&pdev->dev, "Failed to add IRQ chip %u: %d\n",
- id, ret);
- gpiochip_remove(&bank->gpio_chip);
- return ret;
- }
-
- gpiochip_set_chained_irqchip(&bank->gpio_chip, &bank->irq_chip,
- irq, pic32_gpio_irq_handler);
-
return 0;
}
diff --git a/drivers/pinctrl/pinctrl-pistachio.c b/drivers/pinctrl/pinctrl-pistachio.c
index 379e9a6a6d89..fa370c171cad 100644
--- a/drivers/pinctrl/pinctrl-pistachio.c
+++ b/drivers/pinctrl/pinctrl-pistachio.c
@@ -1352,6 +1352,7 @@ static int pistachio_gpio_register(struct pistachio_pinctrl *pctl)
for (i = 0; i < pctl->nbanks; i++) {
char child_name[sizeof("gpioXX")];
struct device_node *child;
+ struct gpio_irq_chip *girq;
snprintf(child_name, sizeof(child_name), "gpio%d", i);
child = of_get_child_by_name(node, child_name);
@@ -1383,23 +1384,28 @@ static int pistachio_gpio_register(struct pistachio_pinctrl *pctl)
bank->gpio_chip.parent = pctl->dev;
bank->gpio_chip.of_node = child;
- ret = gpiochip_add_data(&bank->gpio_chip, bank);
- if (ret < 0) {
- dev_err(pctl->dev, "Failed to add GPIO chip %u: %d\n",
- i, ret);
+
+ girq = &bank->gpio_chip.irq;
+ girq->chip = &bank->irq_chip;
+ girq->parent_handler = pistachio_gpio_irq_handler;
+ girq->num_parents = 1;
+ girq->parents = devm_kcalloc(pctl->dev, 1,
+ sizeof(*girq->parents),
+ GFP_KERNEL);
+ if (!girq->parents) {
+ ret = -ENOMEM;
goto err;
}
+ girq->parents[0] = irq;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_level_irq;
- ret = gpiochip_irqchip_add(&bank->gpio_chip, &bank->irq_chip,
- 0, handle_level_irq, IRQ_TYPE_NONE);
+ ret = gpiochip_add_data(&bank->gpio_chip, bank);
if (ret < 0) {
- dev_err(pctl->dev, "Failed to add IRQ chip %u: %d\n",
+ dev_err(pctl->dev, "Failed to add GPIO chip %u: %d\n",
i, ret);
- gpiochip_remove(&bank->gpio_chip);
goto err;
}
- gpiochip_set_chained_irqchip(&bank->gpio_chip, &bank->irq_chip,
- irq, pistachio_gpio_irq_handler);
ret = gpiochip_add_pin_range(&bank->gpio_chip,
dev_name(pctl->dev), 0,
@@ -1429,7 +1435,6 @@ static const struct of_device_id pistachio_pinctrl_of_match[] = {
static int pistachio_pinctrl_probe(struct platform_device *pdev)
{
struct pistachio_pinctrl *pctl;
- struct resource *res;
pctl = devm_kzalloc(&pdev->dev, sizeof(*pctl), GFP_KERNEL);
if (!pctl)
@@ -1437,8 +1442,7 @@ static int pistachio_pinctrl_probe(struct platform_device *pdev)
pctl->dev = &pdev->dev;
dev_set_drvdata(&pdev->dev, pctl);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pctl->base = devm_ioremap_resource(&pdev->dev, res);
+ pctl->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pctl->base))
return PTR_ERR(pctl->base);
diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c
index dc0bbf198cbc..fc9a2a9959d9 100644
--- a/drivers/pinctrl/pinctrl-rockchip.c
+++ b/drivers/pinctrl/pinctrl-rockchip.c
@@ -58,6 +58,7 @@ enum rockchip_pinctrl_type {
RK3128,
RK3188,
RK3288,
+ RK3308,
RK3368,
RK3399,
};
@@ -70,6 +71,7 @@ enum rockchip_pinctrl_type {
#define IOMUX_SOURCE_PMU BIT(2)
#define IOMUX_UNROUTED BIT(3)
#define IOMUX_WIDTH_3BIT BIT(4)
+#define IOMUX_WIDTH_2BIT BIT(5)
/**
* @type: iomux variant using IOMUX_* constants
@@ -656,6 +658,100 @@ static struct rockchip_mux_recalced_data rk3128_mux_recalced_data[] = {
},
};
+static struct rockchip_mux_recalced_data rk3308_mux_recalced_data[] = {
+ {
+ .num = 1,
+ .pin = 14,
+ .reg = 0x28,
+ .bit = 12,
+ .mask = 0xf
+ }, {
+ .num = 1,
+ .pin = 15,
+ .reg = 0x2c,
+ .bit = 0,
+ .mask = 0x3
+ }, {
+ .num = 1,
+ .pin = 18,
+ .reg = 0x30,
+ .bit = 4,
+ .mask = 0xf
+ }, {
+ .num = 1,
+ .pin = 19,
+ .reg = 0x30,
+ .bit = 8,
+ .mask = 0xf
+ }, {
+ .num = 1,
+ .pin = 20,
+ .reg = 0x30,
+ .bit = 12,
+ .mask = 0xf
+ }, {
+ .num = 1,
+ .pin = 21,
+ .reg = 0x34,
+ .bit = 0,
+ .mask = 0xf
+ }, {
+ .num = 1,
+ .pin = 22,
+ .reg = 0x34,
+ .bit = 4,
+ .mask = 0xf
+ }, {
+ .num = 1,
+ .pin = 23,
+ .reg = 0x34,
+ .bit = 8,
+ .mask = 0xf
+ }, {
+ .num = 3,
+ .pin = 12,
+ .reg = 0x68,
+ .bit = 8,
+ .mask = 0xf
+ }, {
+ .num = 3,
+ .pin = 13,
+ .reg = 0x68,
+ .bit = 12,
+ .mask = 0xf
+ }, {
+ .num = 2,
+ .pin = 2,
+ .reg = 0x608,
+ .bit = 0,
+ .mask = 0x7
+ }, {
+ .num = 2,
+ .pin = 3,
+ .reg = 0x608,
+ .bit = 4,
+ .mask = 0x7
+ }, {
+ .num = 2,
+ .pin = 16,
+ .reg = 0x610,
+ .bit = 8,
+ .mask = 0x7
+ }, {
+ .num = 3,
+ .pin = 10,
+ .reg = 0x610,
+ .bit = 0,
+ .mask = 0x7
+ }, {
+ .num = 3,
+ .pin = 11,
+ .reg = 0x610,
+ .bit = 4,
+ .mask = 0x7
+ },
+};
+
static struct rockchip_mux_recalced_data rk3328_mux_recalced_data[] = {
{
.num = 2,
@@ -982,6 +1078,192 @@ static struct rockchip_mux_route_data rk3288_mux_route_data[] = {
},
};
+static struct rockchip_mux_route_data rk3308_mux_route_data[] = {
+ {
+ /* rtc_clk */
+ .bank_num = 0,
+ .pin = 19,
+ .func = 1,
+ .route_offset = 0x314,
+ .route_val = BIT(16 + 0) | BIT(0),
+ }, {
+ /* uart2_rxm0 */
+ .bank_num = 1,
+ .pin = 22,
+ .func = 2,
+ .route_offset = 0x314,
+ .route_val = BIT(16 + 2) | BIT(16 + 3),
+ }, {
+ /* uart2_rxm1 */
+ .bank_num = 4,
+ .pin = 26,
+ .func = 2,
+ .route_offset = 0x314,
+ .route_val = BIT(16 + 2) | BIT(16 + 3) | BIT(2),
+ }, {
+ /* i2c3_sdam0 */
+ .bank_num = 0,
+ .pin = 15,
+ .func = 2,
+ .route_offset = 0x608,
+ .route_val = BIT(16 + 8) | BIT(16 + 9),
+ }, {
+ /* i2c3_sdam1 */
+ .bank_num = 3,
+ .pin = 12,
+ .func = 2,
+ .route_offset = 0x608,
+ .route_val = BIT(16 + 8) | BIT(16 + 9) | BIT(8),
+ }, {
+ /* i2c3_sdam2 */
+ .bank_num = 2,
+ .pin = 0,
+ .func = 3,
+ .route_offset = 0x608,
+ .route_val = BIT(16 + 8) | BIT(16 + 9) | BIT(9),
+ }, {
+ /* i2s-8ch-1-sclktxm0 */
+ .bank_num = 1,
+ .pin = 3,
+ .func = 2,
+ .route_offset = 0x308,
+ .route_val = BIT(16 + 3),
+ }, {
+ /* i2s-8ch-1-sclkrxm0 */
+ .bank_num = 1,
+ .pin = 4,
+ .func = 2,
+ .route_offset = 0x308,
+ .route_val = BIT(16 + 3),
+ }, {
+ /* i2s-8ch-1-sclktxm1 */
+ .bank_num = 1,
+ .pin = 13,
+ .func = 2,
+ .route_offset = 0x308,
+ .route_val = BIT(16 + 3) | BIT(3),
+ }, {
+ /* i2s-8ch-1-sclkrxm1 */
+ .bank_num = 1,
+ .pin = 14,
+ .func = 2,
+ .route_offset = 0x308,
+ .route_val = BIT(16 + 3) | BIT(3),
+ }, {
+ /* pdm-clkm0 */
+ .bank_num = 1,
+ .pin = 4,
+ .func = 3,
+ .route_offset = 0x308,
+ .route_val = BIT(16 + 12) | BIT(16 + 13),
+ }, {
+ /* pdm-clkm1 */
+ .bank_num = 1,
+ .pin = 14,
+ .func = 4,
+ .route_offset = 0x308,
+ .route_val = BIT(16 + 12) | BIT(16 + 13) | BIT(12),
+ }, {
+ /* pdm-clkm2 */
+ .bank_num = 2,
+ .pin = 6,
+ .func = 2,
+ .route_offset = 0x308,
+ .route_val = BIT(16 + 12) | BIT(16 + 13) | BIT(13),
+ }, {
+ /* pdm-clkm-m2 */
+ .bank_num = 2,
+ .pin = 4,
+ .func = 3,
+ .route_offset = 0x600,
+ .route_val = BIT(16 + 2) | BIT(2),
+ }, {
+ /* spi1_miso */
+ .bank_num = 3,
+ .pin = 10,
+ .func = 3,
+ .route_offset = 0x314,
+ .route_val = BIT(16 + 9),
+ }, {
+ /* spi1_miso_m1 */
+ .bank_num = 2,
+ .pin = 4,
+ .func = 2,
+ .route_offset = 0x314,
+ .route_val = BIT(16 + 9) | BIT(9),
+ }, {
+ /* owire_m0 */
+ .bank_num = 0,
+ .pin = 11,
+ .func = 3,
+ .route_offset = 0x314,
+ .route_val = BIT(16 + 10) | BIT(16 + 11),
+ }, {
+ /* owire_m1 */
+ .bank_num = 1,
+ .pin = 22,
+ .func = 7,
+ .route_offset = 0x314,
+ .route_val = BIT(16 + 10) | BIT(16 + 11) | BIT(10),
+ }, {
+ /* owire_m2 */
+ .bank_num = 2,
+ .pin = 2,
+ .func = 5,
+ .route_offset = 0x314,
+ .route_val = BIT(16 + 10) | BIT(16 + 11) | BIT(11),
+ }, {
+ /* can_rxd_m0 */
+ .bank_num = 0,
+ .pin = 11,
+ .func = 2,
+ .route_offset = 0x314,
+ .route_val = BIT(16 + 12) | BIT(16 + 13),
+ }, {
+ /* can_rxd_m1 */
+ .bank_num = 1,
+ .pin = 22,
+ .func = 5,
+ .route_offset = 0x314,
+ .route_val = BIT(16 + 12) | BIT(16 + 13) | BIT(12),
+ }, {
+ /* can_rxd_m2 */
+ .bank_num = 2,
+ .pin = 2,
+ .func = 4,
+ .route_offset = 0x314,
+ .route_val = BIT(16 + 12) | BIT(16 + 13) | BIT(13),
+ }, {
+ /* mac_rxd0_m0 */
+ .bank_num = 1,
+ .pin = 20,
+ .func = 3,
+ .route_offset = 0x314,
+ .route_val = BIT(16 + 14),
+ }, {
+ /* mac_rxd0_m1 */
+ .bank_num = 4,
+ .pin = 2,
+ .func = 2,
+ .route_offset = 0x314,
+ .route_val = BIT(16 + 14) | BIT(14),
+ }, {
+ /* uart3_rx */
+ .bank_num = 3,
+ .pin = 12,
+ .func = 4,
+ .route_offset = 0x314,
+ .route_val = BIT(16 + 15),
+ }, {
+ /* uart3_rx_m1 */
+ .bank_num = 0,
+ .pin = 17,
+ .func = 3,
+ .route_offset = 0x314,
+ .route_val = BIT(16 + 15) | BIT(15),
+ },
+};
+
static struct rockchip_mux_route_data rk3328_mux_route_data[] = {
{
/* uart2dbg_rxm0 */
@@ -1475,6 +1757,26 @@ static int rv1108_calc_schmitt_reg_and_bit(struct rockchip_pin_bank *bank,
return 0;
}
+#define RK3308_SCHMITT_PINS_PER_REG 8
+#define RK3308_SCHMITT_BANK_STRIDE 16
+#define RK3308_SCHMITT_GRF_OFFSET 0x1a0
+
+static int rk3308_calc_schmitt_reg_and_bit(struct rockchip_pin_bank *bank,
+ int pin_num, struct regmap **regmap,
+ int *reg, u8 *bit)
+{
+ struct rockchip_pinctrl *info = bank->drvdata;
+
+ *regmap = info->regmap_base;
+ *reg = RK3308_SCHMITT_GRF_OFFSET;
+
+ *reg += bank->bank_num * RK3308_SCHMITT_BANK_STRIDE;
+ *reg += ((pin_num / RK3308_SCHMITT_PINS_PER_REG) * 4);
+ *bit = pin_num % RK3308_SCHMITT_PINS_PER_REG;
+
+ return 0;
+}
+
#define RK2928_PULL_OFFSET 0x118
#define RK2928_PULL_PINS_PER_REG 16
#define RK2928_PULL_BANK_STRIDE 8
@@ -1646,6 +1948,40 @@ static void rk3228_calc_drv_reg_and_bit(struct rockchip_pin_bank *bank,
*bit *= RK3288_DRV_BITS_PER_PIN;
}
+#define RK3308_PULL_OFFSET 0xa0
+
+static void rk3308_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank,
+ int pin_num, struct regmap **regmap,
+ int *reg, u8 *bit)
+{
+ struct rockchip_pinctrl *info = bank->drvdata;
+
+ *regmap = info->regmap_base;
+ *reg = RK3308_PULL_OFFSET;
+ *reg += bank->bank_num * RK3188_PULL_BANK_STRIDE;
+ *reg += ((pin_num / RK3188_PULL_PINS_PER_REG) * 4);
+
+ *bit = (pin_num % RK3188_PULL_PINS_PER_REG);
+ *bit *= RK3188_PULL_BITS_PER_PIN;
+}
+
+#define RK3308_DRV_GRF_OFFSET 0x100
+
+static void rk3308_calc_drv_reg_and_bit(struct rockchip_pin_bank *bank,
+ int pin_num, struct regmap **regmap,
+ int *reg, u8 *bit)
+{
+ struct rockchip_pinctrl *info = bank->drvdata;
+
+ *regmap = info->regmap_base;
+ *reg = RK3308_DRV_GRF_OFFSET;
+ *reg += bank->bank_num * RK3288_DRV_BANK_STRIDE;
+ *reg += ((pin_num / RK3288_DRV_PINS_PER_REG) * 4);
+
+ *bit = (pin_num % RK3288_DRV_PINS_PER_REG);
+ *bit *= RK3288_DRV_BITS_PER_PIN;
+}
+
#define RK3368_PULL_GRF_OFFSET 0x100
#define RK3368_PULL_PMU_OFFSET 0x10
@@ -1986,6 +2322,7 @@ static int rockchip_get_pull(struct rockchip_pin_bank *bank, int pin_num)
case RV1108:
case RK3188:
case RK3288:
+ case RK3308:
case RK3368:
case RK3399:
pull_type = bank->pull_type[pin_num / 8];
@@ -2030,6 +2367,7 @@ static int rockchip_set_pull(struct rockchip_pin_bank *bank,
case RV1108:
case RK3188:
case RK3288:
+ case RK3308:
case RK3368:
case RK3399:
pull_type = bank->pull_type[pin_num / 8];
@@ -2293,6 +2631,7 @@ static bool rockchip_pinconf_pull_valid(struct rockchip_pin_ctrl *ctrl,
case RV1108:
case RK3188:
case RK3288:
+ case RK3308:
case RK3368:
case RK3399:
return (pull != PIN_CONFIG_BIAS_PULL_PIN_DEFAULT);
@@ -3303,7 +3642,8 @@ static struct rockchip_pin_ctrl *rockchip_pinctrl_get_soc_data(
* 4bit iomux'es are spread over two registers.
*/
inc = (iom->type & (IOMUX_WIDTH_4BIT |
- IOMUX_WIDTH_3BIT)) ? 8 : 4;
+ IOMUX_WIDTH_3BIT |
+ IOMUX_WIDTH_2BIT)) ? 8 : 4;
if (iom->type & IOMUX_SOURCE_PMU)
pmu_offs += inc;
else
@@ -3709,6 +4049,44 @@ static struct rockchip_pin_ctrl rk3288_pin_ctrl = {
.drv_calc_reg = rk3288_calc_drv_reg_and_bit,
};
+static struct rockchip_pin_bank rk3308_pin_banks[] = {
+ PIN_BANK_IOMUX_FLAGS(0, 32, "gpio0", IOMUX_WIDTH_2BIT,
+ IOMUX_WIDTH_2BIT,
+ IOMUX_WIDTH_2BIT,
+ IOMUX_WIDTH_2BIT),
+ PIN_BANK_IOMUX_FLAGS(1, 32, "gpio1", IOMUX_WIDTH_2BIT,
+ IOMUX_WIDTH_2BIT,
+ IOMUX_WIDTH_2BIT,
+ IOMUX_WIDTH_2BIT),
+ PIN_BANK_IOMUX_FLAGS(2, 32, "gpio2", IOMUX_WIDTH_2BIT,
+ IOMUX_WIDTH_2BIT,
+ IOMUX_WIDTH_2BIT,
+ IOMUX_WIDTH_2BIT),
+ PIN_BANK_IOMUX_FLAGS(3, 32, "gpio3", IOMUX_WIDTH_2BIT,
+ IOMUX_WIDTH_2BIT,
+ IOMUX_WIDTH_2BIT,
+ IOMUX_WIDTH_2BIT),
+ PIN_BANK_IOMUX_FLAGS(4, 32, "gpio4", IOMUX_WIDTH_2BIT,
+ IOMUX_WIDTH_2BIT,
+ IOMUX_WIDTH_2BIT,
+ IOMUX_WIDTH_2BIT),
+};
+
+static struct rockchip_pin_ctrl rk3308_pin_ctrl = {
+ .pin_banks = rk3308_pin_banks,
+ .nr_banks = ARRAY_SIZE(rk3308_pin_banks),
+ .label = "RK3308-GPIO",
+ .type = RK3308,
+ .grf_mux_offset = 0x0,
+ .iomux_recalced = rk3308_mux_recalced_data,
+ .niomux_recalced = ARRAY_SIZE(rk3308_mux_recalced_data),
+ .iomux_routes = rk3308_mux_route_data,
+ .niomux_routes = ARRAY_SIZE(rk3308_mux_route_data),
+ .pull_calc_reg = rk3308_calc_pull_reg_and_bit,
+ .drv_calc_reg = rk3308_calc_drv_reg_and_bit,
+ .schmitt_calc_reg = rk3308_calc_schmitt_reg_and_bit,
+};
+
static struct rockchip_pin_bank rk3328_pin_banks[] = {
PIN_BANK_IOMUX_FLAGS(0, 32, "gpio0", 0, 0, 0, 0),
PIN_BANK_IOMUX_FLAGS(1, 32, "gpio1", 0, 0, 0, 0),
@@ -3849,6 +4227,8 @@ static const struct of_device_id rockchip_pinctrl_dt_match[] = {
.data = &rk3228_pin_ctrl },
{ .compatible = "rockchip,rk3288-pinctrl",
.data = &rk3288_pin_ctrl },
+ { .compatible = "rockchip,rk3308-pinctrl",
+ .data = &rk3308_pin_ctrl },
{ .compatible = "rockchip,rk3328-pinctrl",
.data = &rk3328_pin_ctrl },
{ .compatible = "rockchip,rk3368-pinctrl",
diff --git a/drivers/pinctrl/pinctrl-rza1.c b/drivers/pinctrl/pinctrl-rza1.c
index 017fc6b3e27e..215db220d795 100644
--- a/drivers/pinctrl/pinctrl-rza1.c
+++ b/drivers/pinctrl/pinctrl-rza1.c
@@ -617,12 +617,6 @@ static void rza1_pin_reset(struct rza1_port *port, unsigned int pin)
spin_unlock_irqrestore(&port->lock, irqflags);
}
-static inline int rza1_pin_get_direction(struct rza1_port *port,
- unsigned int pin)
-{
- return !!rza1_get_bit(port, RZA1_PM_REG, pin);
-}
-
/**
* rza1_pin_set_direction() - set I/O direction on a pin in port mode
*
@@ -783,7 +777,7 @@ static int rza1_gpio_get_direction(struct gpio_chip *chip, unsigned int gpio)
{
struct rza1_port *port = gpiochip_get_data(chip);
- return rza1_pin_get_direction(port, gpio);
+ return !!rza1_get_bit(port, RZA1_PM_REG, gpio);
}
static int rza1_gpio_direction_input(struct gpio_chip *chip,
diff --git a/drivers/pinctrl/pinctrl-rza2.c b/drivers/pinctrl/pinctrl-rza2.c
index 3be1d833bf25..a205964e839b 100644
--- a/drivers/pinctrl/pinctrl-rza2.c
+++ b/drivers/pinctrl/pinctrl-rza2.c
@@ -213,8 +213,8 @@ static const char * const rza2_gpio_names[] = {
"PC_0", "PC_1", "PC_2", "PC_3", "PC_4", "PC_5", "PC_6", "PC_7",
"PD_0", "PD_1", "PD_2", "PD_3", "PD_4", "PD_5", "PD_6", "PD_7",
"PE_0", "PE_1", "PE_2", "PE_3", "PE_4", "PE_5", "PE_6", "PE_7",
- "PF_0", "PF_1", "PF_2", "PF_3", "P0_4", "PF_5", "PF_6", "PF_7",
- "PG_0", "PG_1", "PG_2", "P0_3", "PG_4", "PG_5", "PG_6", "PG_7",
+ "PF_0", "PF_1", "PF_2", "PF_3", "PF_4", "PF_5", "PF_6", "PF_7",
+ "PG_0", "PG_1", "PG_2", "PG_3", "PG_4", "PG_5", "PG_6", "PG_7",
"PH_0", "PH_1", "PH_2", "PH_3", "PH_4", "PH_5", "PH_6", "PH_7",
/* port I does not exist */
"PJ_0", "PJ_1", "PJ_2", "PJ_3", "PJ_4", "PJ_5", "PJ_6", "PJ_7",
@@ -462,7 +462,6 @@ static const struct pinmux_ops rza2_pinmux_ops = {
static int rza2_pinctrl_probe(struct platform_device *pdev)
{
struct rza2_pinctrl_priv *priv;
- struct resource *res;
int ret;
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
@@ -471,8 +470,7 @@ static int rza2_pinctrl_probe(struct platform_device *pdev)
priv->dev = &pdev->dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->base = devm_ioremap_resource(&pdev->dev, res);
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
diff --git a/drivers/pinctrl/pinctrl-rzn1.c b/drivers/pinctrl/pinctrl-rzn1.c
index 0f6f8a10a53a..39538d40dbf3 100644
--- a/drivers/pinctrl/pinctrl-rzn1.c
+++ b/drivers/pinctrl/pinctrl-rzn1.c
@@ -487,7 +487,7 @@ static int rzn1_pinconf_get(struct pinctrl_dev *pctldev, unsigned int pin,
{
struct rzn1_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev);
enum pin_config_param param = pinconf_to_config_param(*config);
- const u32 reg_drive[4] = { 4, 6, 8, 12 };
+ static const u32 reg_drive[4] = { 4, 6, 8, 12 };
u32 pull, drive, l1mux;
u32 l1, l2, arg = 0;
diff --git a/drivers/pinctrl/pinctrl-st.c b/drivers/pinctrl/pinctrl-st.c
index 00db8b9efb2c..4f39a7945d01 100644
--- a/drivers/pinctrl/pinctrl-st.c
+++ b/drivers/pinctrl/pinctrl-st.c
@@ -1477,7 +1477,7 @@ static int st_gpiolib_register_bank(struct st_pinctrl *info,
struct device *dev = info->dev;
int bank_num = of_alias_get_id(np, "gpio");
struct resource res, irq_res;
- int gpio_irq = 0, err;
+ int err;
if (of_address_to_resource(np, 0, &res))
return -ENODEV;
@@ -1500,12 +1500,6 @@ static int st_gpiolib_register_bank(struct st_pinctrl *info,
range->pin_base = range->base = range->id * ST_GPIO_PINS_PER_BANK;
range->npins = bank->gpio_chip.ngpio;
range->gc = &bank->gpio_chip;
- err = gpiochip_add_data(&bank->gpio_chip, bank);
- if (err) {
- dev_err(dev, "Failed to add gpiochip(%d)!\n", bank_num);
- return err;
- }
- dev_info(dev, "%s bank added.\n", range->name);
/**
* GPIO bank can have one of the two possible types of
@@ -1527,23 +1521,40 @@ static int st_gpiolib_register_bank(struct st_pinctrl *info,
*/
if (of_irq_to_resource(np, 0, &irq_res) > 0) {
- gpio_irq = irq_res.start;
- gpiochip_set_chained_irqchip(&bank->gpio_chip, &st_gpio_irqchip,
- gpio_irq, st_gpio_irq_handler);
- }
+ struct gpio_irq_chip *girq;
+ int gpio_irq = irq_res.start;
- if (info->irqmux_base || gpio_irq > 0) {
- err = gpiochip_irqchip_add(&bank->gpio_chip, &st_gpio_irqchip,
- 0, handle_simple_irq,
- IRQ_TYPE_NONE);
- if (err) {
- gpiochip_remove(&bank->gpio_chip);
- dev_info(dev, "could not add irqchip\n");
- return err;
+ /* This is not a valid IRQ */
+ if (gpio_irq <= 0) {
+ dev_err(dev, "invalid IRQ for %pOF bank\n", np);
+ goto skip_irq;
}
- } else {
- dev_info(dev, "No IRQ support for %pOF bank\n", np);
+ /* We need to have a mux as well */
+ if (!info->irqmux_base) {
+ dev_err(dev, "no irqmux for %pOF bank\n", np);
+ goto skip_irq;
+ }
+
+ girq = &bank->gpio_chip.irq;
+ girq->chip = &st_gpio_irqchip;
+ girq->parent_handler = st_gpio_irq_handler;
+ girq->num_parents = 1;
+ girq->parents = devm_kcalloc(dev, 1, sizeof(*girq->parents),
+ GFP_KERNEL);
+ if (!girq->parents)
+ return -ENOMEM;
+ girq->parents[0] = gpio_irq;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_simple_irq;
+ }
+
+skip_irq:
+ err = gpiochip_add_data(&bank->gpio_chip, bank);
+ if (err) {
+ dev_err(dev, "Failed to add gpiochip(%d)!\n", bank_num);
+ return err;
}
+ dev_info(dev, "%s bank added.\n", range->name);
return 0;
}
diff --git a/drivers/pinctrl/pinctrl-stmfx.c b/drivers/pinctrl/pinctrl-stmfx.c
index ccdf0bb21414..16723797fa7c 100644
--- a/drivers/pinctrl/pinctrl-stmfx.c
+++ b/drivers/pinctrl/pinctrl-stmfx.c
@@ -505,6 +505,25 @@ static void stmfx_pinctrl_irq_bus_sync_unlock(struct irq_data *data)
mutex_unlock(&pctl->lock);
}
+static int stmfx_gpio_irq_request_resources(struct irq_data *data)
+{
+ struct gpio_chip *gpio_chip = irq_data_get_irq_chip_data(data);
+ int ret;
+
+ ret = stmfx_gpio_direction_input(gpio_chip, data->hwirq);
+ if (ret)
+ return ret;
+
+ return gpiochip_reqres_irq(gpio_chip, data->hwirq);
+}
+
+static void stmfx_gpio_irq_release_resources(struct irq_data *data)
+{
+ struct gpio_chip *gpio_chip = irq_data_get_irq_chip_data(data);
+
+ return gpiochip_relres_irq(gpio_chip, data->hwirq);
+}
+
static void stmfx_pinctrl_irq_toggle_trigger(struct stmfx_pinctrl *pctl,
unsigned int offset)
{
@@ -664,6 +683,8 @@ static int stmfx_pinctrl_probe(struct platform_device *pdev)
pctl->irq_chip.irq_set_type = stmfx_pinctrl_irq_set_type;
pctl->irq_chip.irq_bus_lock = stmfx_pinctrl_irq_bus_lock;
pctl->irq_chip.irq_bus_sync_unlock = stmfx_pinctrl_irq_bus_sync_unlock;
+ pctl->irq_chip.irq_request_resources = stmfx_gpio_irq_request_resources;
+ pctl->irq_chip.irq_release_resources = stmfx_gpio_irq_release_resources;
ret = gpiochip_irqchip_add_nested(&pctl->gpio_chip, &pctl->irq_chip,
0, handle_bad_irq, IRQ_TYPE_NONE);
diff --git a/drivers/pinctrl/pinctrl-tb10x.c b/drivers/pinctrl/pinctrl-tb10x.c
index 1f64e2e7efd9..ab49bd708969 100644
--- a/drivers/pinctrl/pinctrl-tb10x.c
+++ b/drivers/pinctrl/pinctrl-tb10x.c
@@ -747,7 +747,6 @@ static struct pinctrl_desc tb10x_pindesc = {
static int tb10x_pinctrl_probe(struct platform_device *pdev)
{
int ret = -EINVAL;
- struct resource *mem;
struct device *dev = &pdev->dev;
struct device_node *of_node = dev->of_node;
struct device_node *child;
@@ -768,8 +767,7 @@ static int tb10x_pinctrl_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, state);
mutex_init(&state->mutex);
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- state->base = devm_ioremap_resource(dev, mem);
+ state->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(state->base)) {
ret = PTR_ERR(state->base);
goto fail;
diff --git a/drivers/pinctrl/pinctrl-u300.c b/drivers/pinctrl/pinctrl-u300.c
index 348423bb39dd..cc306448259e 100644
--- a/drivers/pinctrl/pinctrl-u300.c
+++ b/drivers/pinctrl/pinctrl-u300.c
@@ -1055,7 +1055,6 @@ static struct pinctrl_desc u300_pmx_desc = {
static int u300_pmx_probe(struct platform_device *pdev)
{
struct u300_pmx *upmx;
- struct resource *res;
/* Create state holders etc for this driver */
upmx = devm_kzalloc(&pdev->dev, sizeof(*upmx), GFP_KERNEL);
@@ -1064,8 +1063,7 @@ static int u300_pmx_probe(struct platform_device *pdev)
upmx->dev = &pdev->dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- upmx->virtbase = devm_ioremap_resource(&pdev->dev, res);
+ upmx->virtbase = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(upmx->virtbase))
return PTR_ERR(upmx->virtbase);
diff --git a/drivers/pinctrl/pinctrl-xway.c b/drivers/pinctrl/pinctrl-xway.c
index 913d38f29b73..5e3f31b55eb7 100644
--- a/drivers/pinctrl/pinctrl-xway.c
+++ b/drivers/pinctrl/pinctrl-xway.c
@@ -1705,12 +1705,10 @@ static int pinmux_xway_probe(struct platform_device *pdev)
{
const struct of_device_id *match;
const struct pinctrl_xway_soc *xway_soc;
- struct resource *res;
int ret, i;
/* get and remap our register range */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- xway_info.membase[0] = devm_ioremap_resource(&pdev->dev, res);
+ xway_info.membase[0] = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(xway_info.membase[0]))
return PTR_ERR(xway_info.membase[0]);
diff --git a/drivers/pinctrl/pxa/pinctrl-pxa25x.c b/drivers/pinctrl/pxa/pinctrl-pxa25x.c
index 8d1247078ae5..95640698422f 100644
--- a/drivers/pinctrl/pxa/pinctrl-pxa25x.c
+++ b/drivers/pinctrl/pxa/pinctrl-pxa25x.c
@@ -216,25 +216,20 @@ static int pxa25x_pinctrl_probe(struct platform_device *pdev)
void __iomem *base_af[8];
void __iomem *base_dir[4];
void __iomem *base_sleep[4];
- struct resource *res;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base_af[0] = devm_ioremap_resource(&pdev->dev, res);
+ base_af[0] = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base_af[0]))
return PTR_ERR(base_af[0]);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- base_dir[0] = devm_ioremap_resource(&pdev->dev, res);
+ base_dir[0] = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(base_dir[0]))
return PTR_ERR(base_dir[0]);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
- base_dir[3] = devm_ioremap_resource(&pdev->dev, res);
+ base_dir[3] = devm_platform_ioremap_resource(pdev, 2);
if (IS_ERR(base_dir[3]))
return PTR_ERR(base_dir[3]);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 3);
- base_sleep[0] = devm_ioremap_resource(&pdev->dev, res);
+ base_sleep[0] = devm_platform_ioremap_resource(pdev, 3);
if (IS_ERR(base_sleep[0]))
return PTR_ERR(base_sleep[0]);
diff --git a/drivers/pinctrl/pxa/pinctrl-pxa27x.c b/drivers/pinctrl/pxa/pinctrl-pxa27x.c
index 64943e819af6..48ccfb50b23e 100644
--- a/drivers/pinctrl/pxa/pinctrl-pxa27x.c
+++ b/drivers/pinctrl/pxa/pinctrl-pxa27x.c
@@ -508,25 +508,20 @@ static int pxa27x_pinctrl_probe(struct platform_device *pdev)
void __iomem *base_af[8];
void __iomem *base_dir[4];
void __iomem *base_sleep[4];
- struct resource *res;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base_af[0] = devm_ioremap_resource(&pdev->dev, res);
+ base_af[0] = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base_af[0]))
return PTR_ERR(base_af[0]);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- base_dir[0] = devm_ioremap_resource(&pdev->dev, res);
+ base_dir[0] = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(base_dir[0]))
return PTR_ERR(base_dir[0]);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
- base_dir[3] = devm_ioremap_resource(&pdev->dev, res);
+ base_dir[3] = devm_platform_ioremap_resource(pdev, 2);
if (IS_ERR(base_dir[3]))
return PTR_ERR(base_dir[3]);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 3);
- base_sleep[0] = devm_ioremap_resource(&pdev->dev, res);
+ base_sleep[0] = devm_platform_ioremap_resource(pdev, 3);
if (IS_ERR(base_sleep[0]))
return PTR_ERR(base_sleep[0]);
diff --git a/drivers/pinctrl/qcom/Kconfig b/drivers/pinctrl/qcom/Kconfig
index 32fc2458b8eb..811af2f81c39 100644
--- a/drivers/pinctrl/qcom/Kconfig
+++ b/drivers/pinctrl/qcom/Kconfig
@@ -90,6 +90,16 @@ config PINCTRL_MSM8916
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
Qualcomm TLMM block found on the Qualcomm 8916 platform.
+config PINCTRL_MSM8976
+ tristate "Qualcomm 8976 pin controller driver"
+ depends on GPIOLIB && OF
+ select PINCTRL_MSM
+ help
+ This is the pinctrl, pinmux, pinconf and gpiolib driver for the
+ Qualcomm TLMM block found on the Qualcomm MSM8976 platform.
+ The Qualcomm MSM8956, APQ8056, APQ8076 platforms are also
+ supported by this driver.
+
config PINCTRL_MSM8994
tristate "Qualcomm 8994 pin controller driver"
depends on GPIOLIB && OF
@@ -132,32 +142,33 @@ config PINCTRL_QDF2XXX
Qualcomm Technologies QDF2xxx SOCs.
config PINCTRL_QCOM_SPMI_PMIC
- tristate "Qualcomm SPMI PMIC pin controller driver"
- depends on GPIOLIB && OF && SPMI
- select REGMAP_SPMI
- select PINMUX
- select PINCONF
- select GENERIC_PINCONF
- select GPIOLIB_IRQCHIP
- select IRQ_DOMAIN_HIERARCHY
- help
- This is the pinctrl, pinmux, pinconf and gpiolib driver for the
- Qualcomm GPIO and MPP blocks found in the Qualcomm PMIC's chips,
- which are using SPMI for communication with SoC. Example PMIC's
- devices are pm8841, pm8941 and pma8084.
+ tristate "Qualcomm SPMI PMIC pin controller driver"
+ depends on GPIOLIB && OF && SPMI
+ select REGMAP_SPMI
+ select PINMUX
+ select PINCONF
+ select GENERIC_PINCONF
+ select GPIOLIB_IRQCHIP
+ select IRQ_DOMAIN_HIERARCHY
+ help
+ This is the pinctrl, pinmux, pinconf and gpiolib driver for the
+ Qualcomm GPIO and MPP blocks found in the Qualcomm PMIC's chips,
+ which are using SPMI for communication with SoC. Example PMIC's
+ devices are pm8841, pm8941 and pma8084.
config PINCTRL_QCOM_SSBI_PMIC
- tristate "Qualcomm SSBI PMIC pin controller driver"
- depends on GPIOLIB && OF
- select PINMUX
- select PINCONF
- select GENERIC_PINCONF
- select IRQ_DOMAIN_HIERARCHY
- help
- This is the pinctrl, pinmux, pinconf and gpiolib driver for the
- Qualcomm GPIO and MPP blocks found in the Qualcomm PMIC's chips,
- which are using SSBI for communication with SoC. Example PMIC's
- devices are pm8058 and pm8921.
+ tristate "Qualcomm SSBI PMIC pin controller driver"
+ depends on GPIOLIB && OF
+ select PINMUX
+ select PINCONF
+ select GENERIC_PINCONF
+ select GPIOLIB_IRQCHIP
+ select IRQ_DOMAIN_HIERARCHY
+ help
+ This is the pinctrl, pinmux, pinconf and gpiolib driver for the
+ Qualcomm GPIO and MPP blocks found in the Qualcomm PMIC's chips,
+ which are using SSBI for communication with SoC. Example PMIC's
+ devices are pm8058 and pm8921.
config PINCTRL_SC7180
tristate "Qualcomm Technologies Inc SC7180 pin controller driver"
@@ -169,30 +180,30 @@ config PINCTRL_SC7180
Technologies Inc SC7180 platform.
config PINCTRL_SDM660
- tristate "Qualcomm Technologies Inc SDM660 pin controller driver"
- depends on GPIOLIB && OF
- select PINCTRL_MSM
- help
- This is the pinctrl, pinmux, pinconf and gpiolib driver for the
- Qualcomm Technologies Inc TLMM block found on the Qualcomm
- Technologies Inc SDM660 platform.
+ tristate "Qualcomm Technologies Inc SDM660 pin controller driver"
+ depends on GPIOLIB && OF
+ select PINCTRL_MSM
+ help
+ This is the pinctrl, pinmux, pinconf and gpiolib driver for the
+ Qualcomm Technologies Inc TLMM block found on the Qualcomm
+ Technologies Inc SDM660 platform.
config PINCTRL_SDM845
- tristate "Qualcomm Technologies Inc SDM845 pin controller driver"
- depends on GPIOLIB && (OF || ACPI)
- select PINCTRL_MSM
- help
- This is the pinctrl, pinmux, pinconf and gpiolib driver for the
- Qualcomm Technologies Inc TLMM block found on the Qualcomm
- Technologies Inc SDM845 platform.
+ tristate "Qualcomm Technologies Inc SDM845 pin controller driver"
+ depends on GPIOLIB && (OF || ACPI)
+ select PINCTRL_MSM
+ help
+ This is the pinctrl, pinmux, pinconf and gpiolib driver for the
+ Qualcomm Technologies Inc TLMM block found on the Qualcomm
+ Technologies Inc SDM845 platform.
config PINCTRL_SM8150
- tristate "Qualcomm Technologies Inc SM8150 pin controller driver"
- depends on GPIOLIB && OF
- select PINCTRL_MSM
- help
- This is the pinctrl, pinmux, pinconf and gpiolib driver for the
- Qualcomm Technologies Inc TLMM block found on the Qualcomm
- Technologies Inc SM8150 platform.
+ tristate "Qualcomm Technologies Inc SM8150 pin controller driver"
+ depends on GPIOLIB && OF
+ select PINCTRL_MSM
+ help
+ This is the pinctrl, pinmux, pinconf and gpiolib driver for the
+ Qualcomm Technologies Inc TLMM block found on the Qualcomm
+ Technologies Inc SM8150 platform.
endif
diff --git a/drivers/pinctrl/qcom/Makefile b/drivers/pinctrl/qcom/Makefile
index f8bb0c265381..c2c2f9ad6827 100644
--- a/drivers/pinctrl/qcom/Makefile
+++ b/drivers/pinctrl/qcom/Makefile
@@ -10,6 +10,7 @@ obj-$(CONFIG_PINCTRL_MSM8660) += pinctrl-msm8660.o
obj-$(CONFIG_PINCTRL_MSM8960) += pinctrl-msm8960.o
obj-$(CONFIG_PINCTRL_MSM8X74) += pinctrl-msm8x74.o
obj-$(CONFIG_PINCTRL_MSM8916) += pinctrl-msm8916.o
+obj-$(CONFIG_PINCTRL_MSM8976) += pinctrl-msm8976.o
obj-$(CONFIG_PINCTRL_MSM8994) += pinctrl-msm8994.o
obj-$(CONFIG_PINCTRL_MSM8996) += pinctrl-msm8996.o
obj-$(CONFIG_PINCTRL_MSM8998) += pinctrl-msm8998.o
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
index 763da0be10d6..5d6f9f61ce02 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -23,6 +23,8 @@
#include <linux/pm.h>
#include <linux/log2.h>
+#include <linux/soc/qcom/irq.h>
+
#include "../core.h"
#include "../pinconf.h"
#include "pinctrl-msm.h"
@@ -44,6 +46,7 @@
* @enabled_irqs: Bitmap of currently enabled irqs.
* @dual_edge_irqs: Bitmap of irqs that need sw emulated dual edge
* detection.
+ * @skip_wake_irqs: Skip IRQs that are handled by wakeup interrupt controller
* @soc; Reference to soc_data of platform specific data.
* @regs: Base addresses for the TLMM tiles.
*/
@@ -61,6 +64,7 @@ struct msm_pinctrl {
DECLARE_BITMAP(dual_edge_irqs, MAX_NR_GPIO);
DECLARE_BITMAP(enabled_irqs, MAX_NR_GPIO);
+ DECLARE_BITMAP(skip_wake_irqs, MAX_NR_GPIO);
const struct msm_pinctrl_soc_data *soc;
void __iomem *regs[MAX_NR_TILES];
@@ -707,6 +711,12 @@ static void msm_gpio_irq_mask(struct irq_data *d)
unsigned long flags;
u32 val;
+ if (d->parent_data)
+ irq_chip_mask_parent(d);
+
+ if (test_bit(d->hwirq, pctrl->skip_wake_irqs))
+ return;
+
g = &pctrl->soc->groups[d->hwirq];
raw_spin_lock_irqsave(&pctrl->lock, flags);
@@ -751,6 +761,12 @@ static void msm_gpio_irq_clear_unmask(struct irq_data *d, bool status_clear)
unsigned long flags;
u32 val;
+ if (d->parent_data)
+ irq_chip_unmask_parent(d);
+
+ if (test_bit(d->hwirq, pctrl->skip_wake_irqs))
+ return;
+
g = &pctrl->soc->groups[d->hwirq];
raw_spin_lock_irqsave(&pctrl->lock, flags);
@@ -778,10 +794,35 @@ static void msm_gpio_irq_clear_unmask(struct irq_data *d, bool status_clear)
static void msm_gpio_irq_enable(struct irq_data *d)
{
+ /*
+ * Clear the interrupt that may be pending before we enable
+ * the line.
+ * This is especially a problem with the GPIOs routed to the
+ * PDC. These GPIOs are direct-connect interrupts to the GIC.
+ * Disabling the interrupt line at the PDC does not prevent
+ * the interrupt from being latched at the GIC. The state at
+ * GIC needs to be cleared before enabling.
+ */
+ if (d->parent_data) {
+ irq_chip_set_parent_state(d, IRQCHIP_STATE_PENDING, 0);
+ irq_chip_enable_parent(d);
+ }
msm_gpio_irq_clear_unmask(d, true);
}
+static void msm_gpio_irq_disable(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct msm_pinctrl *pctrl = gpiochip_get_data(gc);
+
+ if (d->parent_data)
+ irq_chip_disable_parent(d);
+
+ if (!test_bit(d->hwirq, pctrl->skip_wake_irqs))
+ msm_gpio_irq_mask(d);
+}
+
static void msm_gpio_irq_unmask(struct irq_data *d)
{
msm_gpio_irq_clear_unmask(d, false);
@@ -795,6 +836,9 @@ static void msm_gpio_irq_ack(struct irq_data *d)
unsigned long flags;
u32 val;
+ if (test_bit(d->hwirq, pctrl->skip_wake_irqs))
+ return;
+
g = &pctrl->soc->groups[d->hwirq];
raw_spin_lock_irqsave(&pctrl->lock, flags);
@@ -820,6 +864,12 @@ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type)
unsigned long flags;
u32 val;
+ if (d->parent_data)
+ irq_chip_set_type_parent(d, type);
+
+ if (test_bit(d->hwirq, pctrl->skip_wake_irqs))
+ return 0;
+
g = &pctrl->soc->groups[d->hwirq];
raw_spin_lock_irqsave(&pctrl->lock, flags);
@@ -912,6 +962,15 @@ static int msm_gpio_irq_set_wake(struct irq_data *d, unsigned int on)
struct msm_pinctrl *pctrl = gpiochip_get_data(gc);
unsigned long flags;
+ /*
+ * While they may not wake up when the TLMM is powered off,
+ * some GPIOs would like to wakeup the system from suspend
+ * when TLMM is powered on. To allow that, enable the GPIO
+ * summary line to be wakeup capable at GIC.
+ */
+ if (d->parent_data)
+ irq_chip_set_wake_parent(d, on);
+
raw_spin_lock_irqsave(&pctrl->lock, flags);
irq_set_irq_wake(pctrl->irq, on);
@@ -990,6 +1049,30 @@ static void msm_gpio_irq_handler(struct irq_desc *desc)
chained_irq_exit(chip, desc);
}
+static int msm_gpio_wakeirq(struct gpio_chip *gc,
+ unsigned int child,
+ unsigned int child_type,
+ unsigned int *parent,
+ unsigned int *parent_type)
+{
+ struct msm_pinctrl *pctrl = gpiochip_get_data(gc);
+ const struct msm_gpio_wakeirq_map *map;
+ int i;
+
+ *parent = GPIO_NO_WAKE_IRQ;
+ *parent_type = IRQ_TYPE_EDGE_RISING;
+
+ for (i = 0; i < pctrl->soc->nwakeirq_map; i++) {
+ map = &pctrl->soc->wakeirq_map[i];
+ if (map->gpio == child) {
+ *parent = map->wakeirq;
+ break;
+ }
+ }
+
+ return 0;
+}
+
static bool msm_gpio_needs_valid_mask(struct msm_pinctrl *pctrl)
{
if (pctrl->soc->reserved_gpios)
@@ -1002,8 +1085,10 @@ static int msm_gpio_init(struct msm_pinctrl *pctrl)
{
struct gpio_chip *chip;
struct gpio_irq_chip *girq;
- int ret;
- unsigned ngpio = pctrl->soc->ngpios;
+ int i, ret;
+ unsigned gpio, ngpio = pctrl->soc->ngpios;
+ struct device_node *np;
+ bool skip;
if (WARN_ON(ngpio > MAX_NR_GPIO))
return -EINVAL;
@@ -1020,17 +1105,40 @@ static int msm_gpio_init(struct msm_pinctrl *pctrl)
pctrl->irq_chip.name = "msmgpio";
pctrl->irq_chip.irq_enable = msm_gpio_irq_enable;
+ pctrl->irq_chip.irq_disable = msm_gpio_irq_disable;
pctrl->irq_chip.irq_mask = msm_gpio_irq_mask;
pctrl->irq_chip.irq_unmask = msm_gpio_irq_unmask;
pctrl->irq_chip.irq_ack = msm_gpio_irq_ack;
+ pctrl->irq_chip.irq_eoi = irq_chip_eoi_parent;
pctrl->irq_chip.irq_set_type = msm_gpio_irq_set_type;
pctrl->irq_chip.irq_set_wake = msm_gpio_irq_set_wake;
pctrl->irq_chip.irq_request_resources = msm_gpio_irq_reqres;
pctrl->irq_chip.irq_release_resources = msm_gpio_irq_relres;
+ np = of_parse_phandle(pctrl->dev->of_node, "wakeup-parent", 0);
+ if (np) {
+ chip->irq.parent_domain = irq_find_matching_host(np,
+ DOMAIN_BUS_WAKEUP);
+ of_node_put(np);
+ if (!chip->irq.parent_domain)
+ return -EPROBE_DEFER;
+ chip->irq.child_to_parent_hwirq = msm_gpio_wakeirq;
+
+ /*
+ * Let's skip handling the GPIOs, if the parent irqchip
+ * is handling the direct connect IRQ of the GPIO.
+ */
+ skip = irq_domain_qcom_handle_wakeup(chip->irq.parent_domain);
+ for (i = 0; skip && i < pctrl->soc->nwakeirq_map; i++) {
+ gpio = pctrl->soc->wakeirq_map[i].gpio;
+ set_bit(gpio, pctrl->skip_wake_irqs);
+ }
+ }
+
girq = &chip->irq;
girq->chip = &pctrl->irq_chip;
girq->parent_handler = msm_gpio_irq_handler;
+ girq->fwnode = pctrl->dev->fwnode;
girq->num_parents = 1;
girq->parents = devm_kcalloc(pctrl->dev, 1, sizeof(*girq->parents),
GFP_KERNEL);
@@ -1150,8 +1258,7 @@ int msm_pinctrl_probe(struct platform_device *pdev,
return PTR_ERR(pctrl->regs[i]);
}
} else {
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pctrl->regs[0] = devm_ioremap_resource(&pdev->dev, res);
+ pctrl->regs[0] = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pctrl->regs[0]))
return PTR_ERR(pctrl->regs[0]);
}
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.h b/drivers/pinctrl/qcom/pinctrl-msm.h
index 48569cda8471..9452da18a78b 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.h
+++ b/drivers/pinctrl/qcom/pinctrl-msm.h
@@ -92,6 +92,16 @@ struct msm_pingroup {
};
/**
+ * struct msm_gpio_wakeirq_map - Map of GPIOs and their wakeup pins
+ * @gpio: The GPIOs that are wakeup capable
+ * @wakeirq: The interrupt at the always-on interrupt controller
+ */
+struct msm_gpio_wakeirq_map {
+ unsigned int gpio;
+ unsigned int wakeirq;
+};
+
+/**
* struct msm_pinctrl_soc_data - Qualcomm pin controller driver configuration
* @pins: An array describing all pins the pin controller affects.
* @npins: The number of entries in @pins.
@@ -101,6 +111,8 @@ struct msm_pingroup {
* @ngroups: The numbmer of entries in @groups.
* @ngpio: The number of pingroups the driver should expose as GPIOs.
* @pull_no_keeper: The SoC does not support keeper bias.
+ * @wakeirq_map: The map of wakeup capable GPIOs and the pin at PDC/MPM
+ * @nwakeirq_map: The number of entries in @wakeirq_map
*/
struct msm_pinctrl_soc_data {
const struct pinctrl_pin_desc *pins;
@@ -114,6 +126,8 @@ struct msm_pinctrl_soc_data {
const char *const *tiles;
unsigned int ntiles;
const int *reserved_gpios;
+ const struct msm_gpio_wakeirq_map *wakeirq_map;
+ unsigned int nwakeirq_map;
};
extern const struct dev_pm_ops msm_pinctrl_dev_pm_ops;
diff --git a/drivers/pinctrl/qcom/pinctrl-msm8976.c b/drivers/pinctrl/qcom/pinctrl-msm8976.c
new file mode 100644
index 000000000000..e1259ce27396
--- /dev/null
+++ b/drivers/pinctrl/qcom/pinctrl-msm8976.c
@@ -0,0 +1,1127 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * Copyright (c) 2016, AngeloGioacchino Del Regno <kholk11@gmail.com>
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-msm.h"
+
+#define FUNCTION(fname) \
+ [msm_mux_##fname] = { \
+ .name = #fname, \
+ .groups = fname##_groups, \
+ .ngroups = ARRAY_SIZE(fname##_groups), \
+ }
+
+#define REG_BASE 0x0
+#define REG_SIZE 0x1000
+#define PINGROUP(id, f1, f2, f3, f4, f5, f6, f7, f8, f9) \
+ { \
+ .name = "gpio" #id, \
+ .pins = gpio##id##_pins, \
+ .npins = ARRAY_SIZE(gpio##id##_pins), \
+ .funcs = (int[]){ \
+ msm_mux_gpio, /* gpio mode */ \
+ msm_mux_##f1, \
+ msm_mux_##f2, \
+ msm_mux_##f3, \
+ msm_mux_##f4, \
+ msm_mux_##f5, \
+ msm_mux_##f6, \
+ msm_mux_##f7, \
+ msm_mux_##f8, \
+ msm_mux_##f9 \
+ }, \
+ .nfuncs = 10, \
+ .ctl_reg = REG_BASE + REG_SIZE * id, \
+ .io_reg = REG_BASE + 0x4 + REG_SIZE * id, \
+ .intr_cfg_reg = REG_BASE + 0x8 + REG_SIZE * id, \
+ .intr_status_reg = REG_BASE + 0xc + REG_SIZE * id, \
+ .intr_target_reg = REG_BASE + 0x8 + REG_SIZE * id, \
+ .mux_bit = 2, \
+ .pull_bit = 0, \
+ .drv_bit = 6, \
+ .oe_bit = 9, \
+ .in_bit = 0, \
+ .out_bit = 1, \
+ .intr_enable_bit = 0, \
+ .intr_status_bit = 0, \
+ .intr_target_bit = 5, \
+ .intr_target_kpss_val = 4, \
+ .intr_raw_status_bit = 4, \
+ .intr_polarity_bit = 1, \
+ .intr_detection_bit = 2, \
+ .intr_detection_width = 2, \
+ }
+
+#define SDC_QDSD_PINGROUP(pg_name, ctl, pull, drv) \
+ { \
+ .name = #pg_name, \
+ .pins = pg_name##_pins, \
+ .npins = ARRAY_SIZE(pg_name##_pins), \
+ .ctl_reg = ctl, \
+ .io_reg = 0, \
+ .intr_cfg_reg = 0, \
+ .intr_status_reg = 0, \
+ .intr_target_reg = 0, \
+ .mux_bit = -1, \
+ .pull_bit = pull, \
+ .drv_bit = drv, \
+ .oe_bit = -1, \
+ .in_bit = -1, \
+ .out_bit = -1, \
+ .intr_enable_bit = -1, \
+ .intr_status_bit = -1, \
+ .intr_target_bit = -1, \
+ .intr_raw_status_bit = -1, \
+ .intr_polarity_bit = -1, \
+ .intr_detection_bit = -1, \
+ .intr_detection_width = -1, \
+ }
+static const struct pinctrl_pin_desc msm8976_pins[] = {
+ PINCTRL_PIN(0, "GPIO_0"),
+ PINCTRL_PIN(1, "GPIO_1"),
+ PINCTRL_PIN(2, "GPIO_2"),
+ PINCTRL_PIN(3, "GPIO_3"),
+ PINCTRL_PIN(4, "GPIO_4"),
+ PINCTRL_PIN(5, "GPIO_5"),
+ PINCTRL_PIN(6, "GPIO_6"),
+ PINCTRL_PIN(7, "GPIO_7"),
+ PINCTRL_PIN(8, "GPIO_8"),
+ PINCTRL_PIN(9, "GPIO_9"),
+ PINCTRL_PIN(10, "GPIO_10"),
+ PINCTRL_PIN(11, "GPIO_11"),
+ PINCTRL_PIN(12, "GPIO_12"),
+ PINCTRL_PIN(13, "GPIO_13"),
+ PINCTRL_PIN(14, "GPIO_14"),
+ PINCTRL_PIN(15, "GPIO_15"),
+ PINCTRL_PIN(16, "GPIO_16"),
+ PINCTRL_PIN(17, "GPIO_17"),
+ PINCTRL_PIN(18, "GPIO_18"),
+ PINCTRL_PIN(19, "GPIO_19"),
+ PINCTRL_PIN(20, "GPIO_20"),
+ PINCTRL_PIN(21, "GPIO_21"),
+ PINCTRL_PIN(22, "GPIO_22"),
+ PINCTRL_PIN(23, "GPIO_23"),
+ PINCTRL_PIN(24, "GPIO_24"),
+ PINCTRL_PIN(25, "GPIO_25"),
+ PINCTRL_PIN(26, "GPIO_26"),
+ PINCTRL_PIN(27, "GPIO_27"),
+ PINCTRL_PIN(28, "GPIO_28"),
+ PINCTRL_PIN(29, "GPIO_29"),
+ PINCTRL_PIN(30, "GPIO_30"),
+ PINCTRL_PIN(31, "GPIO_31"),
+ PINCTRL_PIN(32, "GPIO_32"),
+ PINCTRL_PIN(33, "GPIO_33"),
+ PINCTRL_PIN(34, "GPIO_34"),
+ PINCTRL_PIN(35, "GPIO_35"),
+ PINCTRL_PIN(36, "GPIO_36"),
+ PINCTRL_PIN(37, "GPIO_37"),
+ PINCTRL_PIN(38, "GPIO_38"),
+ PINCTRL_PIN(39, "GPIO_39"),
+ PINCTRL_PIN(40, "GPIO_40"),
+ PINCTRL_PIN(41, "GPIO_41"),
+ PINCTRL_PIN(42, "GPIO_42"),
+ PINCTRL_PIN(43, "GPIO_43"),
+ PINCTRL_PIN(44, "GPIO_44"),
+ PINCTRL_PIN(45, "GPIO_45"),
+ PINCTRL_PIN(46, "GPIO_46"),
+ PINCTRL_PIN(47, "GPIO_47"),
+ PINCTRL_PIN(48, "GPIO_48"),
+ PINCTRL_PIN(49, "GPIO_49"),
+ PINCTRL_PIN(50, "GPIO_50"),
+ PINCTRL_PIN(51, "GPIO_51"),
+ PINCTRL_PIN(52, "GPIO_52"),
+ PINCTRL_PIN(53, "GPIO_53"),
+ PINCTRL_PIN(54, "GPIO_54"),
+ PINCTRL_PIN(55, "GPIO_55"),
+ PINCTRL_PIN(56, "GPIO_56"),
+ PINCTRL_PIN(57, "GPIO_57"),
+ PINCTRL_PIN(58, "GPIO_58"),
+ PINCTRL_PIN(59, "GPIO_59"),
+ PINCTRL_PIN(60, "GPIO_60"),
+ PINCTRL_PIN(61, "GPIO_61"),
+ PINCTRL_PIN(62, "GPIO_62"),
+ PINCTRL_PIN(63, "GPIO_63"),
+ PINCTRL_PIN(64, "GPIO_64"),
+ PINCTRL_PIN(65, "GPIO_65"),
+ PINCTRL_PIN(66, "GPIO_66"),
+ PINCTRL_PIN(67, "GPIO_67"),
+ PINCTRL_PIN(68, "GPIO_68"),
+ PINCTRL_PIN(69, "GPIO_69"),
+ PINCTRL_PIN(70, "GPIO_70"),
+ PINCTRL_PIN(71, "GPIO_71"),
+ PINCTRL_PIN(72, "GPIO_72"),
+ PINCTRL_PIN(73, "GPIO_73"),
+ PINCTRL_PIN(74, "GPIO_74"),
+ PINCTRL_PIN(75, "GPIO_75"),
+ PINCTRL_PIN(76, "GPIO_76"),
+ PINCTRL_PIN(77, "GPIO_77"),
+ PINCTRL_PIN(78, "GPIO_78"),
+ PINCTRL_PIN(79, "GPIO_79"),
+ PINCTRL_PIN(80, "GPIO_80"),
+ PINCTRL_PIN(81, "GPIO_81"),
+ PINCTRL_PIN(82, "GPIO_82"),
+ PINCTRL_PIN(83, "GPIO_83"),
+ PINCTRL_PIN(84, "GPIO_84"),
+ PINCTRL_PIN(85, "GPIO_85"),
+ PINCTRL_PIN(86, "GPIO_86"),
+ PINCTRL_PIN(87, "GPIO_87"),
+ PINCTRL_PIN(88, "GPIO_88"),
+ PINCTRL_PIN(89, "GPIO_89"),
+ PINCTRL_PIN(90, "GPIO_90"),
+ PINCTRL_PIN(91, "GPIO_91"),
+ PINCTRL_PIN(92, "GPIO_92"),
+ PINCTRL_PIN(93, "GPIO_93"),
+ PINCTRL_PIN(94, "GPIO_94"),
+ PINCTRL_PIN(95, "GPIO_95"),
+ PINCTRL_PIN(96, "GPIO_96"),
+ PINCTRL_PIN(97, "GPIO_97"),
+ PINCTRL_PIN(98, "GPIO_98"),
+ PINCTRL_PIN(99, "GPIO_99"),
+ PINCTRL_PIN(100, "GPIO_100"),
+ PINCTRL_PIN(101, "GPIO_101"),
+ PINCTRL_PIN(102, "GPIO_102"),
+ PINCTRL_PIN(103, "GPIO_103"),
+ PINCTRL_PIN(104, "GPIO_104"),
+ PINCTRL_PIN(105, "GPIO_105"),
+ PINCTRL_PIN(106, "GPIO_106"),
+ PINCTRL_PIN(107, "GPIO_107"),
+ PINCTRL_PIN(108, "GPIO_108"),
+ PINCTRL_PIN(109, "GPIO_109"),
+ PINCTRL_PIN(110, "GPIO_110"),
+ PINCTRL_PIN(111, "GPIO_111"),
+ PINCTRL_PIN(112, "GPIO_112"),
+ PINCTRL_PIN(113, "GPIO_113"),
+ PINCTRL_PIN(114, "GPIO_114"),
+ PINCTRL_PIN(115, "GPIO_115"),
+ PINCTRL_PIN(116, "GPIO_116"),
+ PINCTRL_PIN(117, "GPIO_117"),
+ PINCTRL_PIN(118, "GPIO_118"),
+ PINCTRL_PIN(119, "GPIO_119"),
+ PINCTRL_PIN(120, "GPIO_120"),
+ PINCTRL_PIN(121, "GPIO_121"),
+ PINCTRL_PIN(122, "GPIO_122"),
+ PINCTRL_PIN(123, "GPIO_123"),
+ PINCTRL_PIN(124, "GPIO_124"),
+ PINCTRL_PIN(125, "GPIO_125"),
+ PINCTRL_PIN(126, "GPIO_126"),
+ PINCTRL_PIN(127, "GPIO_127"),
+ PINCTRL_PIN(128, "GPIO_128"),
+ PINCTRL_PIN(129, "GPIO_129"),
+ PINCTRL_PIN(130, "GPIO_130"),
+ PINCTRL_PIN(131, "GPIO_131"),
+ PINCTRL_PIN(132, "GPIO_132"),
+ PINCTRL_PIN(133, "GPIO_133"),
+ PINCTRL_PIN(134, "GPIO_134"),
+ PINCTRL_PIN(135, "GPIO_135"),
+ PINCTRL_PIN(136, "GPIO_136"),
+ PINCTRL_PIN(137, "GPIO_137"),
+ PINCTRL_PIN(138, "GPIO_138"),
+ PINCTRL_PIN(139, "GPIO_139"),
+ PINCTRL_PIN(140, "GPIO_140"),
+ PINCTRL_PIN(141, "GPIO_141"),
+ PINCTRL_PIN(142, "GPIO_142"),
+ PINCTRL_PIN(143, "GPIO_143"),
+ PINCTRL_PIN(144, "GPIO_144"),
+ PINCTRL_PIN(145, "SDC1_CLK"),
+ PINCTRL_PIN(146, "SDC1_CMD"),
+ PINCTRL_PIN(147, "SDC1_DATA"),
+ PINCTRL_PIN(148, "SDC1_RCLK"),
+ PINCTRL_PIN(149, "SDC2_CLK"),
+ PINCTRL_PIN(150, "SDC2_CMD"),
+ PINCTRL_PIN(151, "SDC2_DATA"),
+ PINCTRL_PIN(152, "QDSD_CLK"),
+ PINCTRL_PIN(153, "QDSD_CMD"),
+ PINCTRL_PIN(154, "QDSD_DATA0"),
+ PINCTRL_PIN(155, "QDSD_DATA1"),
+ PINCTRL_PIN(156, "QDSD_DATA2"),
+ PINCTRL_PIN(157, "QDSD_DATA3"),
+};
+
+#define DECLARE_MSM_GPIO_PINS(pin) \
+ static const unsigned int gpio##pin##_pins[] = { pin }
+DECLARE_MSM_GPIO_PINS(0);
+DECLARE_MSM_GPIO_PINS(1);
+DECLARE_MSM_GPIO_PINS(2);
+DECLARE_MSM_GPIO_PINS(3);
+DECLARE_MSM_GPIO_PINS(4);
+DECLARE_MSM_GPIO_PINS(5);
+DECLARE_MSM_GPIO_PINS(6);
+DECLARE_MSM_GPIO_PINS(7);
+DECLARE_MSM_GPIO_PINS(8);
+DECLARE_MSM_GPIO_PINS(9);
+DECLARE_MSM_GPIO_PINS(10);
+DECLARE_MSM_GPIO_PINS(11);
+DECLARE_MSM_GPIO_PINS(12);
+DECLARE_MSM_GPIO_PINS(13);
+DECLARE_MSM_GPIO_PINS(14);
+DECLARE_MSM_GPIO_PINS(15);
+DECLARE_MSM_GPIO_PINS(16);
+DECLARE_MSM_GPIO_PINS(17);
+DECLARE_MSM_GPIO_PINS(18);
+DECLARE_MSM_GPIO_PINS(19);
+DECLARE_MSM_GPIO_PINS(20);
+DECLARE_MSM_GPIO_PINS(21);
+DECLARE_MSM_GPIO_PINS(22);
+DECLARE_MSM_GPIO_PINS(23);
+DECLARE_MSM_GPIO_PINS(24);
+DECLARE_MSM_GPIO_PINS(25);
+DECLARE_MSM_GPIO_PINS(26);
+DECLARE_MSM_GPIO_PINS(27);
+DECLARE_MSM_GPIO_PINS(28);
+DECLARE_MSM_GPIO_PINS(29);
+DECLARE_MSM_GPIO_PINS(30);
+DECLARE_MSM_GPIO_PINS(31);
+DECLARE_MSM_GPIO_PINS(32);
+DECLARE_MSM_GPIO_PINS(33);
+DECLARE_MSM_GPIO_PINS(34);
+DECLARE_MSM_GPIO_PINS(35);
+DECLARE_MSM_GPIO_PINS(36);
+DECLARE_MSM_GPIO_PINS(37);
+DECLARE_MSM_GPIO_PINS(38);
+DECLARE_MSM_GPIO_PINS(39);
+DECLARE_MSM_GPIO_PINS(40);
+DECLARE_MSM_GPIO_PINS(41);
+DECLARE_MSM_GPIO_PINS(42);
+DECLARE_MSM_GPIO_PINS(43);
+DECLARE_MSM_GPIO_PINS(44);
+DECLARE_MSM_GPIO_PINS(45);
+DECLARE_MSM_GPIO_PINS(46);
+DECLARE_MSM_GPIO_PINS(47);
+DECLARE_MSM_GPIO_PINS(48);
+DECLARE_MSM_GPIO_PINS(49);
+DECLARE_MSM_GPIO_PINS(50);
+DECLARE_MSM_GPIO_PINS(51);
+DECLARE_MSM_GPIO_PINS(52);
+DECLARE_MSM_GPIO_PINS(53);
+DECLARE_MSM_GPIO_PINS(54);
+DECLARE_MSM_GPIO_PINS(55);
+DECLARE_MSM_GPIO_PINS(56);
+DECLARE_MSM_GPIO_PINS(57);
+DECLARE_MSM_GPIO_PINS(58);
+DECLARE_MSM_GPIO_PINS(59);
+DECLARE_MSM_GPIO_PINS(60);
+DECLARE_MSM_GPIO_PINS(61);
+DECLARE_MSM_GPIO_PINS(62);
+DECLARE_MSM_GPIO_PINS(63);
+DECLARE_MSM_GPIO_PINS(64);
+DECLARE_MSM_GPIO_PINS(65);
+DECLARE_MSM_GPIO_PINS(66);
+DECLARE_MSM_GPIO_PINS(67);
+DECLARE_MSM_GPIO_PINS(68);
+DECLARE_MSM_GPIO_PINS(69);
+DECLARE_MSM_GPIO_PINS(70);
+DECLARE_MSM_GPIO_PINS(71);
+DECLARE_MSM_GPIO_PINS(72);
+DECLARE_MSM_GPIO_PINS(73);
+DECLARE_MSM_GPIO_PINS(74);
+DECLARE_MSM_GPIO_PINS(75);
+DECLARE_MSM_GPIO_PINS(76);
+DECLARE_MSM_GPIO_PINS(77);
+DECLARE_MSM_GPIO_PINS(78);
+DECLARE_MSM_GPIO_PINS(79);
+DECLARE_MSM_GPIO_PINS(80);
+DECLARE_MSM_GPIO_PINS(81);
+DECLARE_MSM_GPIO_PINS(82);
+DECLARE_MSM_GPIO_PINS(83);
+DECLARE_MSM_GPIO_PINS(84);
+DECLARE_MSM_GPIO_PINS(85);
+DECLARE_MSM_GPIO_PINS(86);
+DECLARE_MSM_GPIO_PINS(87);
+DECLARE_MSM_GPIO_PINS(88);
+DECLARE_MSM_GPIO_PINS(89);
+DECLARE_MSM_GPIO_PINS(90);
+DECLARE_MSM_GPIO_PINS(91);
+DECLARE_MSM_GPIO_PINS(92);
+DECLARE_MSM_GPIO_PINS(93);
+DECLARE_MSM_GPIO_PINS(94);
+DECLARE_MSM_GPIO_PINS(95);
+DECLARE_MSM_GPIO_PINS(96);
+DECLARE_MSM_GPIO_PINS(97);
+DECLARE_MSM_GPIO_PINS(98);
+DECLARE_MSM_GPIO_PINS(99);
+DECLARE_MSM_GPIO_PINS(100);
+DECLARE_MSM_GPIO_PINS(101);
+DECLARE_MSM_GPIO_PINS(102);
+DECLARE_MSM_GPIO_PINS(103);
+DECLARE_MSM_GPIO_PINS(104);
+DECLARE_MSM_GPIO_PINS(105);
+DECLARE_MSM_GPIO_PINS(106);
+DECLARE_MSM_GPIO_PINS(107);
+DECLARE_MSM_GPIO_PINS(108);
+DECLARE_MSM_GPIO_PINS(109);
+DECLARE_MSM_GPIO_PINS(110);
+DECLARE_MSM_GPIO_PINS(111);
+DECLARE_MSM_GPIO_PINS(112);
+DECLARE_MSM_GPIO_PINS(113);
+DECLARE_MSM_GPIO_PINS(114);
+DECLARE_MSM_GPIO_PINS(115);
+DECLARE_MSM_GPIO_PINS(116);
+DECLARE_MSM_GPIO_PINS(117);
+DECLARE_MSM_GPIO_PINS(118);
+DECLARE_MSM_GPIO_PINS(119);
+DECLARE_MSM_GPIO_PINS(120);
+DECLARE_MSM_GPIO_PINS(121);
+DECLARE_MSM_GPIO_PINS(122);
+DECLARE_MSM_GPIO_PINS(123);
+DECLARE_MSM_GPIO_PINS(124);
+DECLARE_MSM_GPIO_PINS(125);
+DECLARE_MSM_GPIO_PINS(126);
+DECLARE_MSM_GPIO_PINS(127);
+DECLARE_MSM_GPIO_PINS(128);
+DECLARE_MSM_GPIO_PINS(129);
+DECLARE_MSM_GPIO_PINS(130);
+DECLARE_MSM_GPIO_PINS(131);
+DECLARE_MSM_GPIO_PINS(132);
+DECLARE_MSM_GPIO_PINS(133);
+DECLARE_MSM_GPIO_PINS(134);
+DECLARE_MSM_GPIO_PINS(135);
+DECLARE_MSM_GPIO_PINS(136);
+DECLARE_MSM_GPIO_PINS(137);
+DECLARE_MSM_GPIO_PINS(138);
+DECLARE_MSM_GPIO_PINS(139);
+DECLARE_MSM_GPIO_PINS(140);
+DECLARE_MSM_GPIO_PINS(141);
+DECLARE_MSM_GPIO_PINS(142);
+DECLARE_MSM_GPIO_PINS(143);
+DECLARE_MSM_GPIO_PINS(144);
+
+static const unsigned int sdc1_clk_pins[] = { 145 };
+static const unsigned int sdc1_cmd_pins[] = { 146 };
+static const unsigned int sdc1_data_pins[] = { 147 };
+static const unsigned int sdc1_rclk_pins[] = { 148 };
+static const unsigned int sdc2_clk_pins[] = { 149 };
+static const unsigned int sdc2_cmd_pins[] = { 150 };
+static const unsigned int sdc2_data_pins[] = { 151 };
+static const unsigned int qdsd_clk_pins[] = { 152 };
+static const unsigned int qdsd_cmd_pins[] = { 153 };
+static const unsigned int qdsd_data0_pins[] = { 154 };
+static const unsigned int qdsd_data1_pins[] = { 155 };
+static const unsigned int qdsd_data2_pins[] = { 156 };
+static const unsigned int qdsd_data3_pins[] = { 157 };
+
+enum msm8976_functions {
+ msm_mux_gpio,
+ msm_mux_blsp_uart1,
+ msm_mux_blsp_spi1,
+ msm_mux_smb_int,
+ msm_mux_blsp_i2c1,
+ msm_mux_blsp_spi2,
+ msm_mux_blsp_uart2,
+ msm_mux_blsp_i2c2,
+ msm_mux_gcc_gp1_clk_b,
+ msm_mux_blsp_spi3,
+ msm_mux_qdss_tracedata_b,
+ msm_mux_blsp_i2c3,
+ msm_mux_gcc_gp2_clk_b,
+ msm_mux_gcc_gp3_clk_b,
+ msm_mux_blsp_spi4,
+ msm_mux_cap_int,
+ msm_mux_blsp_i2c4,
+ msm_mux_blsp_spi5,
+ msm_mux_blsp_uart5,
+ msm_mux_qdss_traceclk_a,
+ msm_mux_m_voc,
+ msm_mux_blsp_i2c5,
+ msm_mux_qdss_tracectl_a,
+ msm_mux_qdss_tracedata_a,
+ msm_mux_blsp_spi6,
+ msm_mux_blsp_uart6,
+ msm_mux_qdss_tracectl_b,
+ msm_mux_blsp_i2c6,
+ msm_mux_qdss_traceclk_b,
+ msm_mux_mdp_vsync,
+ msm_mux_pri_mi2s_mclk_a,
+ msm_mux_sec_mi2s_mclk_a,
+ msm_mux_cam_mclk,
+ msm_mux_cci0_i2c,
+ msm_mux_cci1_i2c,
+ msm_mux_blsp1_spi,
+ msm_mux_blsp3_spi,
+ msm_mux_gcc_gp1_clk_a,
+ msm_mux_gcc_gp2_clk_a,
+ msm_mux_gcc_gp3_clk_a,
+ msm_mux_uim_batt,
+ msm_mux_sd_write,
+ msm_mux_uim1_data,
+ msm_mux_uim1_clk,
+ msm_mux_uim1_reset,
+ msm_mux_uim1_present,
+ msm_mux_uim2_data,
+ msm_mux_uim2_clk,
+ msm_mux_uim2_reset,
+ msm_mux_uim2_present,
+ msm_mux_ts_xvdd,
+ msm_mux_mipi_dsi0,
+ msm_mux_us_euro,
+ msm_mux_ts_resout,
+ msm_mux_ts_sample,
+ msm_mux_sec_mi2s_mclk_b,
+ msm_mux_pri_mi2s,
+ msm_mux_codec_reset,
+ msm_mux_cdc_pdm0,
+ msm_mux_us_emitter,
+ msm_mux_pri_mi2s_mclk_b,
+ msm_mux_pri_mi2s_mclk_c,
+ msm_mux_lpass_slimbus,
+ msm_mux_lpass_slimbus0,
+ msm_mux_lpass_slimbus1,
+ msm_mux_codec_int1,
+ msm_mux_codec_int2,
+ msm_mux_wcss_bt,
+ msm_mux_sdc3,
+ msm_mux_wcss_wlan2,
+ msm_mux_wcss_wlan1,
+ msm_mux_wcss_wlan0,
+ msm_mux_wcss_wlan,
+ msm_mux_wcss_fm,
+ msm_mux_key_volp,
+ msm_mux_key_snapshot,
+ msm_mux_key_focus,
+ msm_mux_key_home,
+ msm_mux_pwr_down,
+ msm_mux_dmic0_clk,
+ msm_mux_hdmi_int,
+ msm_mux_dmic0_data,
+ msm_mux_wsa_vi,
+ msm_mux_wsa_en,
+ msm_mux_blsp_spi8,
+ msm_mux_wsa_irq,
+ msm_mux_blsp_i2c8,
+ msm_mux_pa_indicator,
+ msm_mux_modem_tsync,
+ msm_mux_ssbi_wtr1,
+ msm_mux_gsm1_tx,
+ msm_mux_gsm0_tx,
+ msm_mux_sdcard_det,
+ msm_mux_sec_mi2s,
+ msm_mux_ss_switch,
+ msm_mux_NA,
+};
+
+static const char * const gpio_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7",
+ "gpio8", "gpio9", "gpio10", "gpio11", "gpio12", "gpio13", "gpio14",
+ "gpio15", "gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21",
+ "gpio22", "gpio23", "gpio24", "gpio25", "gpio26", "gpio27", "gpio28",
+ "gpio29", "gpio30", "gpio31", "gpio32", "gpio33", "gpio34", "gpio35",
+ "gpio36", "gpio37", "gpio38", "gpio39", "gpio40", "gpio41", "gpio42",
+ "gpio43", "gpio44", "gpio45", "gpio46", "gpio47", "gpio48", "gpio49",
+ "gpio50", "gpio51", "gpio52", "gpio53", "gpio54", "gpio55", "gpio56",
+ "gpio57", "gpio58", "gpio59", "gpio60", "gpio61", "gpio62", "gpio63",
+ "gpio64", "gpio65", "gpio66", "gpio67", "gpio68", "gpio69", "gpio70",
+ "gpio71", "gpio72", "gpio73", "gpio74", "gpio75", "gpio76", "gpio77",
+ "gpio78", "gpio79", "gpio80", "gpio81", "gpio82", "gpio83", "gpio84",
+ "gpio85", "gpio86", "gpio87", "gpio88", "gpio89", "gpio90", "gpio91",
+ "gpio92", "gpio93", "gpio94", "gpio95", "gpio96", "gpio97", "gpio98",
+ "gpio99", "gpio100", "gpio101", "gpio102", "gpio103", "gpio104",
+ "gpio105", "gpio106", "gpio107", "gpio108", "gpio109", "gpio110",
+ "gpio111", "gpio112", "gpio113", "gpio114", "gpio115", "gpio116",
+ "gpio117", "gpio118", "gpio119", "gpio120", "gpio121", "gpio122",
+ "gpio123", "gpio124", "gpio125", "gpio126", "gpio127", "gpio128",
+ "gpio129", "gpio130", "gpio131", "gpio132", "gpio133", "gpio134",
+ "gpio135", "gpio136", "gpio137", "gpio138", "gpio139", "gpio140",
+ "gpio141", "gpio142", "gpio143", "gpio144",
+};
+static const char * const blsp_uart1_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3",
+};
+static const char * const blsp_spi1_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3",
+};
+static const char * const smb_int_groups[] = {
+ "gpio1",
+};
+static const char * const blsp_i2c1_groups[] = {
+ "gpio2", "gpio3",
+};
+static const char * const blsp_spi2_groups[] = {
+ "gpio4", "gpio5", "gpio6", "gpio7",
+};
+static const char * const blsp_uart2_groups[] = {
+ "gpio4", "gpio5", "gpio6", "gpio7",
+};
+static const char * const blsp_i2c2_groups[] = {
+ "gpio6", "gpio7",
+};
+static const char * const gcc_gp1_clk_b_groups[] = {
+ "gpio105",
+};
+static const char * const blsp_spi3_groups[] = {
+ "gpio8", "gpio9", "gpio10", "gpio11",
+};
+static const char * const qdss_tracedata_b_groups[] = {
+ "gpio26", "gpio27", "gpio28", "gpio29", "gpio30",
+ "gpio31", "gpio33", "gpio34", "gpio35", "gpio36", "gpio37", "gpio38",
+ "gpio116", "gpio126", "gpio128", "gpio129",
+};
+static const char * const blsp_i2c3_groups[] = {
+ "gpio10", "gpio11",
+};
+static const char * const gcc_gp2_clk_b_groups[] = {
+ "gpio12",
+};
+static const char * const gcc_gp3_clk_b_groups[] = {
+ "gpio13",
+};
+static const char * const blsp_spi4_groups[] = {
+ "gpio12", "gpio13", "gpio14", "gpio15",
+};
+static const char * const cap_int_groups[] = {
+ "gpio13",
+};
+static const char * const blsp_i2c4_groups[] = {
+ "gpio14", "gpio15",
+};
+static const char * const blsp_spi5_groups[] = {
+ "gpio134", "gpio135", "gpio136", "gpio137",
+};
+static const char * const blsp_uart5_groups[] = {
+ "gpio134", "gpio135", "gpio136", "gpio137",
+};
+static const char * const qdss_traceclk_a_groups[] = {
+ "gpio46",
+};
+const char * const m_voc_groups[] = {
+ "gpio123", "gpio124",
+};
+static const char * const blsp_i2c5_groups[] = {
+ "gpio136", "gpio137",
+};
+static const char * const qdss_tracectl_a_groups[] = {
+ "gpio45",
+};
+static const char * const qdss_tracedata_a_groups[] = {
+ "gpio8", "gpio9", "gpio10", "gpio39", "gpio40", "gpio41", "gpio42",
+ "gpio43", "gpio44", "gpio47", "gpio48", "gpio62", "gpio69", "gpio120",
+ "gpio121", "gpio130", "gpio131",
+};
+static const char * const blsp_spi6_groups[] = {
+ "gpio20", "gpio21", "gpio22", "gpio23",
+};
+static const char * const blsp_uart6_groups[] = {
+ "gpio20", "gpio21", "gpio22", "gpio23",
+};
+static const char * const qdss_tracectl_b_groups[] = {
+ "gpio5",
+};
+static const char * const blsp_i2c6_groups[] = {
+ "gpio22", "gpio23",
+};
+static const char * const qdss_traceclk_b_groups[] = {
+ "gpio5",
+};
+static const char * const mdp_vsync_groups[] = {
+ "gpio24", "gpio25",
+};
+static const char * const pri_mi2s_mclk_a_groups[] = {
+ "gpio126",
+};
+static const char * const sec_mi2s_mclk_a_groups[] = {
+ "gpio62",
+};
+static const char * const cam_mclk_groups[] = {
+ "gpio26", "gpio27", "gpio28",
+};
+static const char * const cci0_i2c_groups[] = {
+ "gpio30", "gpio29",
+};
+static const char * const cci1_i2c_groups[] = {
+ "gpio104", "gpio103",
+};
+static const char * const blsp1_spi_groups[] = {
+ "gpio101",
+};
+static const char * const blsp3_spi_groups[] = {
+ "gpio106", "gpio107",
+};
+static const char * const gcc_gp1_clk_a_groups[] = {
+ "gpio49",
+};
+static const char * const gcc_gp2_clk_a_groups[] = {
+ "gpio50",
+};
+static const char * const gcc_gp3_clk_a_groups[] = {
+ "gpio51",
+};
+static const char * const uim_batt_groups[] = {
+ "gpio61",
+};
+static const char * const sd_write_groups[] = {
+ "gpio50",
+};
+static const char * const uim2_data_groups[] = {
+ "gpio51",
+};
+static const char * const uim2_clk_groups[] = {
+ "gpio52",
+};
+static const char * const uim2_reset_groups[] = {
+ "gpio53",
+};
+static const char * const uim2_present_groups[] = {
+ "gpio54",
+};
+static const char * const uim1_data_groups[] = {
+ "gpio55",
+};
+static const char * const uim1_clk_groups[] = {
+ "gpio56",
+};
+static const char * const uim1_reset_groups[] = {
+ "gpio57",
+};
+static const char * const uim1_present_groups[] = {
+ "gpio58",
+};
+static const char * const ts_xvdd_groups[] = {
+ "gpio60",
+};
+static const char * const mipi_dsi0_groups[] = {
+ "gpio61",
+};
+static const char * const us_euro_groups[] = {
+ "gpio63",
+};
+static const char * const ts_resout_groups[] = {
+ "gpio64",
+};
+static const char * const ts_sample_groups[] = {
+ "gpio65",
+};
+static const char * const sec_mi2s_mclk_b_groups[] = {
+ "gpio66",
+};
+static const char * const pri_mi2s_groups[] = {
+ "gpio122", "gpio123", "gpio124", "gpio125", "gpio127",
+};
+static const char * const codec_reset_groups[] = {
+ "gpio67",
+};
+static const char * const cdc_pdm0_groups[] = {
+ "gpio116", "gpio117", "gpio118", "gpio119", "gpio120", "gpio121",
+};
+static const char * const us_emitter_groups[] = {
+ "gpio68",
+};
+static const char * const pri_mi2s_mclk_b_groups[] = {
+ "gpio62",
+};
+static const char * const pri_mi2s_mclk_c_groups[] = {
+ "gpio116",
+};
+static const char * const lpass_slimbus_groups[] = {
+ "gpio117",
+};
+static const char * const lpass_slimbus0_groups[] = {
+ "gpio118",
+};
+static const char * const lpass_slimbus1_groups[] = {
+ "gpio119",
+};
+static const char * const codec_int1_groups[] = {
+ "gpio73",
+};
+static const char * const codec_int2_groups[] = {
+ "gpio74",
+};
+static const char * const wcss_bt_groups[] = {
+ "gpio39", "gpio47", "gpio88",
+};
+static const char * const sdc3_groups[] = {
+ "gpio39", "gpio40", "gpio41",
+ "gpio42", "gpio43", "gpio44",
+};
+static const char * const wcss_wlan2_groups[] = {
+ "gpio40",
+};
+static const char * const wcss_wlan1_groups[] = {
+ "gpio41",
+};
+static const char * const wcss_wlan0_groups[] = {
+ "gpio42",
+};
+static const char * const wcss_wlan_groups[] = {
+ "gpio43", "gpio44",
+};
+static const char * const wcss_fm_groups[] = {
+ "gpio45", "gpio46",
+};
+static const char * const key_volp_groups[] = {
+ "gpio85",
+};
+static const char * const key_snapshot_groups[] = {
+ "gpio86",
+};
+static const char * const key_focus_groups[] = {
+ "gpio87",
+};
+static const char * const key_home_groups[] = {
+ "gpio88",
+};
+static const char * const pwr_down_groups[] = {
+ "gpio89",
+};
+static const char * const dmic0_clk_groups[] = {
+ "gpio66",
+};
+static const char * const hdmi_int_groups[] = {
+ "gpio90",
+};
+static const char * const dmic0_data_groups[] = {
+ "gpio67",
+};
+static const char * const wsa_vi_groups[] = {
+ "gpio108", "gpio109",
+};
+static const char * const wsa_en_groups[] = {
+ "gpio96",
+};
+static const char * const blsp_spi8_groups[] = {
+ "gpio16", "gpio17", "gpio18", "gpio19",
+};
+static const char * const wsa_irq_groups[] = {
+ "gpio97",
+};
+static const char * const blsp_i2c8_groups[] = {
+ "gpio18", "gpio19",
+};
+static const char * const pa_indicator_groups[] = {
+ "gpio92",
+};
+static const char * const modem_tsync_groups[] = {
+ "gpio93",
+};
+static const char * const nav_tsync_groups[] = {
+ "gpio93",
+};
+static const char * const ssbi_wtr1_groups[] = {
+ "gpio79", "gpio94",
+};
+static const char * const gsm1_tx_groups[] = {
+ "gpio95",
+};
+static const char * const gsm0_tx_groups[] = {
+ "gpio99",
+};
+static const char * const sdcard_det_groups[] = {
+ "gpio133",
+};
+static const char * const sec_mi2s_groups[] = {
+ "gpio102", "gpio105", "gpio134", "gpio135",
+};
+
+static const char * const ss_switch_groups[] = {
+ "gpio139",
+};
+
+static const struct msm_function msm8976_functions[] = {
+ FUNCTION(gpio),
+ FUNCTION(blsp_spi1),
+ FUNCTION(smb_int),
+ FUNCTION(blsp_i2c1),
+ FUNCTION(blsp_spi2),
+ FUNCTION(blsp_uart1),
+ FUNCTION(blsp_uart2),
+ FUNCTION(blsp_i2c2),
+ FUNCTION(gcc_gp1_clk_b),
+ FUNCTION(blsp_spi3),
+ FUNCTION(qdss_tracedata_b),
+ FUNCTION(blsp_i2c3),
+ FUNCTION(gcc_gp2_clk_b),
+ FUNCTION(gcc_gp3_clk_b),
+ FUNCTION(blsp_spi4),
+ FUNCTION(cap_int),
+ FUNCTION(blsp_i2c4),
+ FUNCTION(blsp_spi5),
+ FUNCTION(blsp_uart5),
+ FUNCTION(qdss_traceclk_a),
+ FUNCTION(m_voc),
+ FUNCTION(blsp_i2c5),
+ FUNCTION(qdss_tracectl_a),
+ FUNCTION(qdss_tracedata_a),
+ FUNCTION(blsp_spi6),
+ FUNCTION(blsp_uart6),
+ FUNCTION(qdss_tracectl_b),
+ FUNCTION(blsp_i2c6),
+ FUNCTION(qdss_traceclk_b),
+ FUNCTION(mdp_vsync),
+ FUNCTION(pri_mi2s_mclk_a),
+ FUNCTION(sec_mi2s_mclk_a),
+ FUNCTION(cam_mclk),
+ FUNCTION(cci0_i2c),
+ FUNCTION(cci1_i2c),
+ FUNCTION(blsp1_spi),
+ FUNCTION(blsp3_spi),
+ FUNCTION(gcc_gp1_clk_a),
+ FUNCTION(gcc_gp2_clk_a),
+ FUNCTION(gcc_gp3_clk_a),
+ FUNCTION(uim_batt),
+ FUNCTION(sd_write),
+ FUNCTION(uim1_data),
+ FUNCTION(uim1_clk),
+ FUNCTION(uim1_reset),
+ FUNCTION(uim1_present),
+ FUNCTION(uim2_data),
+ FUNCTION(uim2_clk),
+ FUNCTION(uim2_reset),
+ FUNCTION(uim2_present),
+ FUNCTION(ts_xvdd),
+ FUNCTION(mipi_dsi0),
+ FUNCTION(us_euro),
+ FUNCTION(ts_resout),
+ FUNCTION(ts_sample),
+ FUNCTION(sec_mi2s_mclk_b),
+ FUNCTION(pri_mi2s),
+ FUNCTION(codec_reset),
+ FUNCTION(cdc_pdm0),
+ FUNCTION(us_emitter),
+ FUNCTION(pri_mi2s_mclk_b),
+ FUNCTION(pri_mi2s_mclk_c),
+ FUNCTION(lpass_slimbus),
+ FUNCTION(lpass_slimbus0),
+ FUNCTION(lpass_slimbus1),
+ FUNCTION(codec_int1),
+ FUNCTION(codec_int2),
+ FUNCTION(wcss_bt),
+ FUNCTION(sdc3),
+ FUNCTION(wcss_wlan2),
+ FUNCTION(wcss_wlan1),
+ FUNCTION(wcss_wlan0),
+ FUNCTION(wcss_wlan),
+ FUNCTION(wcss_fm),
+ FUNCTION(key_volp),
+ FUNCTION(key_snapshot),
+ FUNCTION(key_focus),
+ FUNCTION(key_home),
+ FUNCTION(pwr_down),
+ FUNCTION(dmic0_clk),
+ FUNCTION(hdmi_int),
+ FUNCTION(dmic0_data),
+ FUNCTION(wsa_vi),
+ FUNCTION(wsa_en),
+ FUNCTION(blsp_spi8),
+ FUNCTION(wsa_irq),
+ FUNCTION(blsp_i2c8),
+ FUNCTION(pa_indicator),
+ FUNCTION(modem_tsync),
+ FUNCTION(ssbi_wtr1),
+ FUNCTION(gsm1_tx),
+ FUNCTION(gsm0_tx),
+ FUNCTION(sdcard_det),
+ FUNCTION(sec_mi2s),
+ FUNCTION(ss_switch),
+};
+
+static const struct msm_pingroup msm8976_groups[] = {
+ PINGROUP(0, blsp_spi1, blsp_uart1, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(1, blsp_spi1, blsp_uart1, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(2, blsp_spi1, blsp_uart1, blsp_i2c1, NA, NA, NA, NA, NA, NA),
+ PINGROUP(3, blsp_spi1, blsp_uart1, blsp_i2c1, NA, NA, NA, NA, NA, NA),
+ PINGROUP(4, blsp_spi2, blsp_uart2, NA, NA, NA, qdss_tracectl_b, NA, NA, NA),
+ PINGROUP(5, blsp_spi2, blsp_uart2, NA, NA, NA, qdss_traceclk_b, NA, NA, NA),
+ PINGROUP(6, blsp_spi2, blsp_uart2, blsp_i2c2, NA, NA, NA, NA, NA, NA),
+ PINGROUP(7, blsp_spi2, blsp_uart2, blsp_i2c2, NA, NA, NA, NA, NA, NA),
+ PINGROUP(8, blsp_spi3, NA, NA, NA, NA, qdss_tracedata_a, NA, NA, NA),
+ PINGROUP(9, blsp_spi3, NA, NA, NA, qdss_tracedata_a, NA, NA, NA, NA),
+ PINGROUP(10, blsp_spi3, NA, blsp_i2c3, NA, NA, qdss_tracedata_a, NA, NA, NA),
+ PINGROUP(11, blsp_spi3, NA, blsp_i2c3, NA, NA, NA, NA, NA, NA),
+ PINGROUP(12, blsp_spi4, NA, gcc_gp2_clk_b, NA, NA, NA, NA, NA, NA),
+ PINGROUP(13, blsp_spi4, NA, gcc_gp3_clk_b, NA, NA, NA, NA, NA, NA),
+ PINGROUP(14, blsp_spi4, NA, blsp_i2c4, NA, NA, NA, NA, NA, NA),
+ PINGROUP(15, blsp_spi4, NA, blsp_i2c4, NA, NA, NA, NA, NA, NA),
+ PINGROUP(16, blsp_spi8, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(17, blsp_spi8, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(18, blsp_spi8, NA, blsp_i2c8, NA, NA, NA, NA, NA, NA),
+ PINGROUP(19, blsp_spi8, NA, blsp_i2c8, NA, NA, NA, NA, NA, NA),
+ PINGROUP(20, blsp_spi6, blsp_uart6, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(21, blsp_spi6, blsp_uart6, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(22, blsp_spi6, blsp_uart6, blsp_i2c6, NA, NA, NA, NA, NA, NA),
+ PINGROUP(23, blsp_spi6, blsp_uart6, blsp_i2c6, NA, NA, NA, NA, NA, NA),
+ PINGROUP(24, mdp_vsync, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(25, mdp_vsync, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(26, cam_mclk, NA, NA, NA, NA, qdss_tracedata_b, NA, NA, NA),
+ PINGROUP(27, cam_mclk, NA, NA, NA, NA, NA, qdss_tracedata_b, NA, NA),
+ PINGROUP(28, cam_mclk, NA, NA, NA, NA, qdss_tracedata_b, NA, NA, NA),
+ PINGROUP(29, cci0_i2c, NA, NA, NA, NA, qdss_tracedata_b, NA, NA, NA),
+ PINGROUP(30, cci0_i2c, NA, NA, NA, NA, NA, qdss_tracedata_b, NA, NA),
+ PINGROUP(31, NA, NA, NA, NA, NA, NA, NA, qdss_tracedata_b, NA),
+ PINGROUP(32, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(33, NA, NA, NA, NA, NA, NA, qdss_tracedata_b, NA, NA),
+ PINGROUP(34, NA, NA, NA, NA, NA, NA, NA, NA, qdss_tracedata_b),
+ PINGROUP(35, NA, NA, NA, NA, NA, NA, NA, NA, qdss_tracedata_b),
+ PINGROUP(36, NA, NA, NA, NA, NA, NA, qdss_tracedata_b, NA, NA),
+ PINGROUP(37, NA, NA, NA, qdss_tracedata_b, NA, NA, NA, NA, NA),
+ PINGROUP(38, NA, NA, NA, NA, NA, NA, NA, qdss_tracedata_b, NA),
+ PINGROUP(39, wcss_bt, sdc3, NA, qdss_tracedata_a, NA, NA, NA, NA, NA),
+ PINGROUP(40, wcss_wlan, sdc3, NA, qdss_tracedata_a, NA, NA, NA, NA, NA),
+ PINGROUP(41, wcss_wlan, sdc3, NA, qdss_tracedata_a, NA, NA, NA, NA, NA),
+ PINGROUP(42, wcss_wlan, sdc3, NA, qdss_tracedata_a, NA, NA, NA, NA, NA),
+ PINGROUP(43, wcss_wlan, sdc3, NA, NA, qdss_tracedata_a, NA, NA, NA, NA),
+ PINGROUP(44, wcss_wlan, sdc3, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(45, wcss_fm, NA, qdss_tracectl_a, NA, NA, NA, NA, NA, NA),
+ PINGROUP(46, wcss_fm, NA, NA, qdss_traceclk_a, NA, NA, NA, NA, NA),
+ PINGROUP(47, wcss_bt, NA, qdss_tracedata_a, NA, NA, NA, NA, NA, NA),
+ PINGROUP(48, wcss_bt, NA, qdss_tracedata_a, NA, NA, NA, NA, NA, NA),
+ PINGROUP(49, NA, NA, gcc_gp1_clk_a, NA, NA, NA, NA, NA, NA),
+ PINGROUP(50, NA, sd_write, gcc_gp2_clk_a, NA, NA, NA, NA, NA, NA),
+ PINGROUP(51, uim2_data, gcc_gp3_clk_a, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(52, uim2_clk, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(53, uim2_reset, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(54, uim2_present, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(55, uim1_data, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(56, uim1_clk, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(57, uim1_reset, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(58, uim1_present, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(59, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(60, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(61, uim_batt, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(62, sec_mi2s_mclk_a, pri_mi2s_mclk_b, qdss_tracedata_a, NA, NA, NA, NA, NA, NA),
+ PINGROUP(63, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(64, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(65, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(66, dmic0_clk, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(67, dmic0_data, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(68, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(69, qdss_tracedata_a, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(70, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(71, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(72, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(73, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(74, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(75, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(76, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(77, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(78, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(79, NA, ssbi_wtr1, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(80, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(81, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(82, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(83, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(84, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(85, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(86, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(87, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(88, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(89, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(90, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(91, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(92, NA, NA, pa_indicator, NA, NA, NA, NA, NA, NA),
+ PINGROUP(93, NA, modem_tsync, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(94, NA, ssbi_wtr1, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(95, NA, gsm1_tx, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(96, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(97, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(98, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(99, gsm0_tx, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(100, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(101, blsp1_spi, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(102, sec_mi2s, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(103, cci1_i2c, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(104, cci1_i2c, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(105, sec_mi2s, gcc_gp1_clk_b, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(106, blsp3_spi, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(107, blsp3_spi, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(108, wsa_vi, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(109, wsa_vi, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(110, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(111, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(112, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(113, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(114, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(115, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(116, pri_mi2s_mclk_c, cdc_pdm0, NA, NA, NA, qdss_tracedata_b, NA, NA, NA),
+ PINGROUP(117, lpass_slimbus, cdc_pdm0, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(118, lpass_slimbus0, cdc_pdm0, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(119, lpass_slimbus1, cdc_pdm0, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(120, cdc_pdm0, NA, NA, NA, NA, NA, NA, qdss_tracedata_a, NA),
+ PINGROUP(121, cdc_pdm0, NA, NA, NA, NA, NA, NA, qdss_tracedata_a, NA),
+ PINGROUP(122, pri_mi2s, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(123, pri_mi2s, m_voc, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(124, pri_mi2s, m_voc, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(125, pri_mi2s, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(126, pri_mi2s_mclk_a, sec_mi2s_mclk_b, NA, NA, NA, NA, NA, NA, qdss_tracedata_b),
+ PINGROUP(127, pri_mi2s, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(128, NA, NA, NA, NA, NA, NA, qdss_tracedata_b, NA, NA),
+ PINGROUP(129, qdss_tracedata_b, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(130, qdss_tracedata_a, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(131, qdss_tracedata_a, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(132, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(133, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(134, blsp_spi5, blsp_uart5, sec_mi2s, NA, NA, NA, NA, NA, NA),
+ PINGROUP(135, blsp_spi5, blsp_uart5, sec_mi2s, NA, NA, NA, NA, NA, NA),
+ PINGROUP(136, blsp_spi5, blsp_uart5, blsp_i2c5, NA, NA, NA, NA, NA, NA),
+ PINGROUP(137, blsp_spi5, blsp_uart5, blsp_i2c5, NA, NA, NA, NA, NA, NA),
+ PINGROUP(138, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(139, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(140, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(141, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(142, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(143, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(144, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ SDC_QDSD_PINGROUP(sdc1_clk, 0x10a000, 13, 6),
+ SDC_QDSD_PINGROUP(sdc1_cmd, 0x10a000, 11, 3),
+ SDC_QDSD_PINGROUP(sdc1_data, 0x10a000, 9, 0),
+ SDC_QDSD_PINGROUP(sdc1_rclk, 0x10a000, 15, 0),
+ SDC_QDSD_PINGROUP(sdc2_clk, 0x109000, 14, 6),
+ SDC_QDSD_PINGROUP(sdc2_cmd, 0x109000, 11, 3),
+ SDC_QDSD_PINGROUP(sdc2_data, 0x109000, 9, 0),
+ SDC_QDSD_PINGROUP(qdsd_clk, 0x19c000, 3, 0),
+ SDC_QDSD_PINGROUP(qdsd_cmd, 0x19c000, 8, 5),
+ SDC_QDSD_PINGROUP(qdsd_data0, 0x19c000, 13, 10),
+ SDC_QDSD_PINGROUP(qdsd_data1, 0x19c000, 18, 15),
+ SDC_QDSD_PINGROUP(qdsd_data2, 0x19c000, 23, 20),
+ SDC_QDSD_PINGROUP(qdsd_data3, 0x19c000, 28, 25),
+};
+
+static const struct msm_pinctrl_soc_data msm8976_pinctrl = {
+ .pins = msm8976_pins,
+ .npins = ARRAY_SIZE(msm8976_pins),
+ .functions = msm8976_functions,
+ .nfunctions = ARRAY_SIZE(msm8976_functions),
+ .groups = msm8976_groups,
+ .ngroups = ARRAY_SIZE(msm8976_groups),
+ .ngpios = 145,
+};
+
+static int msm8976_pinctrl_probe(struct platform_device *pdev)
+{
+ return msm_pinctrl_probe(pdev, &msm8976_pinctrl);
+}
+
+static const struct of_device_id msm8976_pinctrl_of_match[] = {
+ { .compatible = "qcom,msm8976-pinctrl", },
+ { },
+};
+
+static struct platform_driver msm8976_pinctrl_driver = {
+ .driver = {
+ .name = "msm8976-pinctrl",
+ .of_match_table = msm8976_pinctrl_of_match,
+ },
+ .probe = msm8976_pinctrl_probe,
+ .remove = msm_pinctrl_remove,
+};
+
+static int __init msm8976_pinctrl_init(void)
+{
+ return platform_driver_register(&msm8976_pinctrl_driver);
+}
+arch_initcall(msm8976_pinctrl_init);
+
+static void __exit msm8976_pinctrl_exit(void)
+{
+ platform_driver_unregister(&msm8976_pinctrl_driver);
+}
+module_exit(msm8976_pinctrl_exit);
+
+MODULE_DESCRIPTION("Qualcomm msm8976 pinctrl driver");
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(of, msm8976_pinctrl_of_match);
diff --git a/drivers/pinctrl/qcom/pinctrl-sc7180.c b/drivers/pinctrl/qcom/pinctrl-sc7180.c
index 6399c8a2bc22..d6cfad7417b1 100644
--- a/drivers/pinctrl/qcom/pinctrl-sc7180.c
+++ b/drivers/pinctrl/qcom/pinctrl-sc7180.c
@@ -77,6 +77,7 @@ enum {
.intr_cfg_reg = 0, \
.intr_status_reg = 0, \
.intr_target_reg = 0, \
+ .tile = SOUTH, \
.mux_bit = -1, \
.pull_bit = pull, \
.drv_bit = drv, \
@@ -102,6 +103,7 @@ enum {
.intr_cfg_reg = 0, \
.intr_status_reg = 0, \
.intr_target_reg = 0, \
+ .tile = SOUTH, \
.mux_bit = -1, \
.pull_bit = 3, \
.drv_bit = 0, \
@@ -1087,14 +1089,14 @@ static const struct msm_pingroup sc7180_groups[] = {
[116] = PINGROUP(116, WEST, qup04, qup04, _, _, _, _, _, _, _),
[117] = PINGROUP(117, WEST, dp_hot, _, _, _, _, _, _, _, _),
[118] = PINGROUP(118, WEST, _, _, _, _, _, _, _, _, _),
- [119] = UFS_RESET(ufs_reset, 0x97f000),
- [120] = SDC_QDSD_PINGROUP(sdc1_rclk, 0x97a000, 15, 0),
- [121] = SDC_QDSD_PINGROUP(sdc1_clk, 0x97a000, 13, 6),
- [122] = SDC_QDSD_PINGROUP(sdc1_cmd, 0x97a000, 11, 3),
- [123] = SDC_QDSD_PINGROUP(sdc1_data, 0x97a000, 9, 0),
- [124] = SDC_QDSD_PINGROUP(sdc2_clk, 0x97b000, 14, 6),
- [125] = SDC_QDSD_PINGROUP(sdc2_cmd, 0x97b000, 11, 3),
- [126] = SDC_QDSD_PINGROUP(sdc2_data, 0x97b000, 9, 0),
+ [119] = UFS_RESET(ufs_reset, 0x7f000),
+ [120] = SDC_QDSD_PINGROUP(sdc1_rclk, 0x7a000, 15, 0),
+ [121] = SDC_QDSD_PINGROUP(sdc1_clk, 0x7a000, 13, 6),
+ [122] = SDC_QDSD_PINGROUP(sdc1_cmd, 0x7a000, 11, 3),
+ [123] = SDC_QDSD_PINGROUP(sdc1_data, 0x7a000, 9, 0),
+ [124] = SDC_QDSD_PINGROUP(sdc2_clk, 0x7b000, 14, 6),
+ [125] = SDC_QDSD_PINGROUP(sdc2_cmd, 0x7b000, 11, 3),
+ [126] = SDC_QDSD_PINGROUP(sdc2_data, 0x7b000, 9, 0),
};
static const struct msm_pinctrl_soc_data sc7180_pinctrl = {
diff --git a/drivers/pinctrl/qcom/pinctrl-sdm845.c b/drivers/pinctrl/qcom/pinctrl-sdm845.c
index ce495970459d..2834d2c1338c 100644
--- a/drivers/pinctrl/qcom/pinctrl-sdm845.c
+++ b/drivers/pinctrl/qcom/pinctrl-sdm845.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*/
#include <linux/acpi.h>
@@ -1282,6 +1282,24 @@ static const int sdm845_acpi_reserved_gpios[] = {
0, 1, 2, 3, 81, 82, 83, 84, -1
};
+static const struct msm_gpio_wakeirq_map sdm845_pdc_map[] = {
+ { 1, 30 }, { 3, 31 }, { 5, 32 }, { 10, 33 }, { 11, 34 },
+ { 20, 35 }, { 22, 36 }, { 24, 37 }, { 26, 38 }, { 30, 39 },
+ { 31, 117 }, { 32, 41 }, { 34, 42 }, { 36, 43 }, { 37, 44 },
+ { 38, 45 }, { 39, 46 }, { 40, 47 }, { 41, 115 }, { 43, 49 },
+ { 44, 50 }, { 46, 51 }, { 48, 52 }, { 49, 118 }, { 52, 54 },
+ { 53, 55 }, { 54, 56 }, { 56, 57 }, { 57, 58 }, { 58, 59 },
+ { 59, 60 }, { 60, 61 }, { 61, 62 }, { 62, 63 }, { 63, 64 },
+ { 64, 65 }, { 66, 66 }, { 68, 67 }, { 71, 68 }, { 73, 69 },
+ { 77, 70 }, { 78, 71 }, { 79, 72 }, { 80, 73 }, { 84, 74 },
+ { 85, 75 }, { 86, 76 }, { 88, 77 }, { 89, 116 }, { 91, 79 },
+ { 92, 80 }, { 95, 81 }, { 96, 82 }, { 97, 83 }, { 101, 84 },
+ { 103, 85 }, { 104, 86 }, { 115, 90 }, { 116, 91 }, { 117, 92 },
+ { 118, 93 }, { 119, 94 }, { 120, 95 }, { 121, 96 }, { 122, 97 },
+ { 123, 98 }, { 124, 99 }, { 125, 100 }, { 127, 102 }, { 128, 103 },
+ { 129, 104 }, { 130, 105 }, { 132, 106 }, { 133, 107 }, { 145, 108 },
+};
+
static const struct msm_pinctrl_soc_data sdm845_pinctrl = {
.pins = sdm845_pins,
.npins = ARRAY_SIZE(sdm845_pins),
@@ -1290,6 +1308,9 @@ static const struct msm_pinctrl_soc_data sdm845_pinctrl = {
.groups = sdm845_groups,
.ngroups = ARRAY_SIZE(sdm845_groups),
.ngpios = 151,
+ .wakeirq_map = sdm845_pdc_map,
+ .nwakeirq_map = ARRAY_SIZE(sdm845_pdc_map),
+
};
static const struct msm_pinctrl_soc_data sdm845_acpi_pinctrl = {
diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
index f1fece5b9c06..653d1095bfea 100644
--- a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
+++ b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
@@ -1108,6 +1108,9 @@ static const struct of_device_id pmic_gpio_of_match[] = {
{ .compatible = "qcom,pm8005-gpio", .data = (void *) 4 },
{ .compatible = "qcom,pm8916-gpio", .data = (void *) 4 },
{ .compatible = "qcom,pm8941-gpio", .data = (void *) 36 },
+ /* pm8950 has 8 GPIOs with holes on 3 */
+ { .compatible = "qcom,pm8950-gpio", .data = (void *) 8 },
+ { .compatible = "qcom,pmi8950-gpio", .data = (void *) 2 },
{ .compatible = "qcom,pm8994-gpio", .data = (void *) 22 },
{ .compatible = "qcom,pmi8994-gpio", .data = (void *) 10 },
{ .compatible = "qcom,pm8998-gpio", .data = (void *) 26 },
@@ -1121,6 +1124,8 @@ static const struct of_device_id pmic_gpio_of_match[] = {
{ .compatible = "qcom,pm8150b-gpio", .data = (void *) 12 },
/* pm8150l has 12 GPIOs with holes on 7 */
{ .compatible = "qcom,pm8150l-gpio", .data = (void *) 12 },
+ { .compatible = "qcom,pm6150-gpio", .data = (void *) 10 },
+ { .compatible = "qcom,pm6150l-gpio", .data = (void *) 12 },
{ },
};
diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c b/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
index 91407b024cf3..48602dba4967 100644
--- a/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
+++ b/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
@@ -915,6 +915,8 @@ static const struct of_device_id pmic_mpp_of_match[] = {
{ .compatible = "qcom,pm8841-mpp" }, /* 4 MPP's */
{ .compatible = "qcom,pm8916-mpp" }, /* 4 MPP's */
{ .compatible = "qcom,pm8941-mpp" }, /* 8 MPP's */
+ { .compatible = "qcom,pm8950-mpp" }, /* 4 MPP's */
+ { .compatible = "qcom,pmi8950-mpp" }, /* 4 MPP's */
{ .compatible = "qcom,pm8994-mpp" }, /* 8 MPP's */
{ .compatible = "qcom,pma8084-mpp" }, /* 8 MPP's */
{ .compatible = "qcom,spmi-mpp" }, /* Generic */
diff --git a/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
index c1f7d0799ebe..dca86886b1f9 100644
--- a/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
+++ b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
@@ -56,7 +56,6 @@
/**
* struct pm8xxx_pin_data - dynamic configuration for a pin
* @reg: address of the control register
- * @irq: IRQ from the PMIC interrupt controller
* @power_source: logical selected voltage source, mapping in static data
* is used translate to register values
* @mode: operating mode for the pin (input/output)
@@ -72,7 +71,6 @@
*/
struct pm8xxx_pin_data {
unsigned reg;
- int irq;
u8 power_source;
u8 mode;
bool open_drain;
@@ -93,9 +91,6 @@ struct pm8xxx_gpio {
struct pinctrl_desc desc;
unsigned npins;
-
- struct fwnode_handle *fwnode;
- struct irq_domain *domain;
};
static const struct pinconf_generic_params pm8xxx_gpio_bindings[] = {
@@ -491,13 +486,16 @@ static int pm8xxx_gpio_get(struct gpio_chip *chip, unsigned offset)
{
struct pm8xxx_gpio *pctrl = gpiochip_get_data(chip);
struct pm8xxx_pin_data *pin = pctrl->desc.pins[offset].drv_data;
+ int ret, irq;
bool state;
- int ret;
- if (pin->mode == PM8XXX_GPIO_MODE_OUTPUT) {
- ret = pin->output_value;
- } else if (pin->irq >= 0) {
- ret = irq_get_irqchip_state(pin->irq, IRQCHIP_STATE_LINE_LEVEL, &state);
+ if (pin->mode == PM8XXX_GPIO_MODE_OUTPUT)
+ return pin->output_value;
+
+ irq = chip->to_irq(chip, offset);
+ if (irq >= 0) {
+ ret = irq_get_irqchip_state(irq, IRQCHIP_STATE_LINE_LEVEL,
+ &state);
if (!ret)
ret = !!state;
} else
@@ -535,37 +533,6 @@ static int pm8xxx_gpio_of_xlate(struct gpio_chip *chip,
}
-static int pm8xxx_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
-{
- struct pm8xxx_gpio *pctrl = gpiochip_get_data(chip);
- struct pm8xxx_pin_data *pin = pctrl->desc.pins[offset].drv_data;
- struct irq_fwspec fwspec;
- int ret;
-
- fwspec.fwnode = pctrl->fwnode;
- fwspec.param_count = 2;
- fwspec.param[0] = offset + PM8XXX_GPIO_PHYSICAL_OFFSET;
- fwspec.param[1] = IRQ_TYPE_EDGE_RISING;
-
- ret = irq_create_fwspec_mapping(&fwspec);
-
- /*
- * Cache the IRQ since pm8xxx_gpio_get() needs this to get determine the
- * line level.
- */
- pin->irq = ret;
-
- return ret;
-}
-
-static void pm8xxx_gpio_free(struct gpio_chip *chip, unsigned int offset)
-{
- struct pm8xxx_gpio *pctrl = gpiochip_get_data(chip);
- struct pm8xxx_pin_data *pin = pctrl->desc.pins[offset].drv_data;
-
- pin->irq = -1;
-}
-
#ifdef CONFIG_DEBUG_FS
#include <linux/seq_file.h>
@@ -624,13 +591,11 @@ static void pm8xxx_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
#endif
static const struct gpio_chip pm8xxx_gpio_template = {
- .free = pm8xxx_gpio_free,
.direction_input = pm8xxx_gpio_direction_input,
.direction_output = pm8xxx_gpio_direction_output,
.get = pm8xxx_gpio_get,
.set = pm8xxx_gpio_set,
.of_xlate = pm8xxx_gpio_of_xlate,
- .to_irq = pm8xxx_gpio_to_irq,
.dbg_show = pm8xxx_gpio_dbg_show,
.owner = THIS_MODULE,
};
@@ -712,43 +677,24 @@ static int pm8xxx_domain_translate(struct irq_domain *domain,
return 0;
}
-static int pm8xxx_domain_alloc(struct irq_domain *domain, unsigned int virq,
- unsigned int nr_irqs, void *data)
+static unsigned int pm8xxx_child_offset_to_irq(struct gpio_chip *chip,
+ unsigned int offset)
{
- struct pm8xxx_gpio *pctrl = container_of(domain->host_data,
- struct pm8xxx_gpio, chip);
- struct irq_fwspec *fwspec = data;
- struct irq_fwspec parent_fwspec;
- irq_hw_number_t hwirq;
- unsigned int type;
- int ret, i;
-
- ret = pm8xxx_domain_translate(domain, fwspec, &hwirq, &type);
- if (ret)
- return ret;
-
- for (i = 0; i < nr_irqs; i++)
- irq_domain_set_info(domain, virq + i, hwirq + i,
- &pm8xxx_irq_chip, pctrl, handle_level_irq,
- NULL, NULL);
+ return offset + PM8XXX_GPIO_PHYSICAL_OFFSET;
+}
- parent_fwspec.fwnode = domain->parent->fwnode;
- parent_fwspec.param_count = 2;
- parent_fwspec.param[0] = hwirq + 0xc0;
- parent_fwspec.param[1] = fwspec->param[1];
+static int pm8xxx_child_to_parent_hwirq(struct gpio_chip *chip,
+ unsigned int child_hwirq,
+ unsigned int child_type,
+ unsigned int *parent_hwirq,
+ unsigned int *parent_type)
+{
+ *parent_hwirq = child_hwirq + 0xc0;
+ *parent_type = child_type;
- return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs,
- &parent_fwspec);
+ return 0;
}
-static const struct irq_domain_ops pm8xxx_domain_ops = {
- .activate = gpiochip_irq_domain_activate,
- .alloc = pm8xxx_domain_alloc,
- .deactivate = gpiochip_irq_domain_deactivate,
- .free = irq_domain_free_irqs_common,
- .translate = pm8xxx_domain_translate,
-};
-
static const struct of_device_id pm8xxx_gpio_of_match[] = {
{ .compatible = "qcom,pm8018-gpio", .data = (void *) 6 },
{ .compatible = "qcom,pm8038-gpio", .data = (void *) 12 },
@@ -765,6 +711,7 @@ static int pm8xxx_gpio_probe(struct platform_device *pdev)
struct irq_domain *parent_domain;
struct device_node *parent_node;
struct pinctrl_pin_desc *pins;
+ struct gpio_irq_chip *girq;
struct pm8xxx_gpio *pctrl;
int ret, i;
@@ -800,7 +747,6 @@ static int pm8xxx_gpio_probe(struct platform_device *pdev)
for (i = 0; i < pctrl->desc.npins; i++) {
pin_data[i].reg = SSBI_REG_ADDR_GPIO(i);
- pin_data[i].irq = -1;
ret = pm8xxx_pin_populate(pctrl, &pin_data[i]);
if (ret)
@@ -841,19 +787,21 @@ static int pm8xxx_gpio_probe(struct platform_device *pdev)
if (!parent_domain)
return -ENXIO;
- pctrl->fwnode = of_node_to_fwnode(pctrl->dev->of_node);
- pctrl->domain = irq_domain_create_hierarchy(parent_domain, 0,
- pctrl->chip.ngpio,
- pctrl->fwnode,
- &pm8xxx_domain_ops,
- &pctrl->chip);
- if (!pctrl->domain)
- return -ENODEV;
+ girq = &pctrl->chip.irq;
+ girq->chip = &pm8xxx_irq_chip;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_level_irq;
+ girq->fwnode = of_node_to_fwnode(pctrl->dev->of_node);
+ girq->parent_domain = parent_domain;
+ girq->child_to_parent_hwirq = pm8xxx_child_to_parent_hwirq;
+ girq->populate_parent_fwspec = gpiochip_populate_parent_fwspec_fourcell;
+ girq->child_offset_to_irq = pm8xxx_child_offset_to_irq;
+ girq->child_irq_domain_ops.translate = pm8xxx_domain_translate;
ret = gpiochip_add_data(&pctrl->chip, pctrl);
if (ret) {
dev_err(&pdev->dev, "failed register gpiochip\n");
- goto err_chip_add_data;
+ return ret;
}
/*
@@ -883,8 +831,6 @@ static int pm8xxx_gpio_probe(struct platform_device *pdev)
unregister_gpiochip:
gpiochip_remove(&pctrl->chip);
-err_chip_add_data:
- irq_domain_remove(pctrl->domain);
return ret;
}
@@ -894,7 +840,6 @@ static int pm8xxx_gpio_remove(struct platform_device *pdev)
struct pm8xxx_gpio *pctrl = platform_get_drvdata(pdev);
gpiochip_remove(&pctrl->chip);
- irq_domain_remove(pctrl->domain);
return 0;
}
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.c b/drivers/pinctrl/samsung/pinctrl-exynos.c
index ebc27b06718c..0599f5127b01 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos.c
+++ b/drivers/pinctrl/samsung/pinctrl-exynos.c
@@ -486,8 +486,10 @@ int exynos_eint_wkup_init(struct samsung_pinctrl_drv_data *d)
if (match) {
irq_chip = kmemdup(match->data,
sizeof(*irq_chip), GFP_KERNEL);
- if (!irq_chip)
+ if (!irq_chip) {
+ of_node_put(np);
return -ENOMEM;
+ }
wkup_np = np;
break;
}
@@ -504,6 +506,7 @@ int exynos_eint_wkup_init(struct samsung_pinctrl_drv_data *d)
bank->nr_pins, &exynos_eint_irqd_ops, bank);
if (!bank->irq_domain) {
dev_err(dev, "wkup irq domain add failed\n");
+ of_node_put(wkup_np);
return -ENXIO;
}
@@ -518,8 +521,10 @@ int exynos_eint_wkup_init(struct samsung_pinctrl_drv_data *d)
weint_data = devm_kcalloc(dev,
bank->nr_pins, sizeof(*weint_data),
GFP_KERNEL);
- if (!weint_data)
+ if (!weint_data) {
+ of_node_put(wkup_np);
return -ENOMEM;
+ }
for (idx = 0; idx < bank->nr_pins; ++idx) {
irq = irq_of_parse_and_map(bank->of_node, idx);
@@ -536,10 +541,13 @@ int exynos_eint_wkup_init(struct samsung_pinctrl_drv_data *d)
}
}
- if (!muxed_banks)
+ if (!muxed_banks) {
+ of_node_put(wkup_np);
return 0;
+ }
irq = irq_of_parse_and_map(wkup_np, 0);
+ of_node_put(wkup_np);
if (!irq) {
dev_err(dev, "irq number for muxed EINTs not found\n");
return 0;
diff --git a/drivers/pinctrl/samsung/pinctrl-s3c24xx.c b/drivers/pinctrl/samsung/pinctrl-s3c24xx.c
index 7e824e4d20f4..9bd0a3de101d 100644
--- a/drivers/pinctrl/samsung/pinctrl-s3c24xx.c
+++ b/drivers/pinctrl/samsung/pinctrl-s3c24xx.c
@@ -490,8 +490,10 @@ static int s3c24xx_eint_init(struct samsung_pinctrl_drv_data *d)
return -ENODEV;
eint_data = devm_kzalloc(dev, sizeof(*eint_data), GFP_KERNEL);
- if (!eint_data)
+ if (!eint_data) {
+ of_node_put(eint_np);
return -ENOMEM;
+ }
eint_data->drvdata = d;
@@ -503,12 +505,14 @@ static int s3c24xx_eint_init(struct samsung_pinctrl_drv_data *d)
irq = irq_of_parse_and_map(eint_np, i);
if (!irq) {
dev_err(dev, "failed to get wakeup EINT IRQ %d\n", i);
+ of_node_put(eint_np);
return -ENXIO;
}
eint_data->parents[i] = irq;
irq_set_chained_handler_and_data(irq, handlers[i], eint_data);
}
+ of_node_put(eint_np);
bank = d->pin_banks;
for (i = 0; i < d->nr_banks; ++i, ++bank) {
diff --git a/drivers/pinctrl/samsung/pinctrl-s3c64xx.c b/drivers/pinctrl/samsung/pinctrl-s3c64xx.c
index c399f0932af5..f97f8179f2b1 100644
--- a/drivers/pinctrl/samsung/pinctrl-s3c64xx.c
+++ b/drivers/pinctrl/samsung/pinctrl-s3c64xx.c
@@ -704,8 +704,10 @@ static int s3c64xx_eint_eint0_init(struct samsung_pinctrl_drv_data *d)
return -ENODEV;
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
- if (!data)
+ if (!data) {
+ of_node_put(eint0_np);
return -ENOMEM;
+ }
data->drvdata = d;
for (i = 0; i < NUM_EINT0_IRQ; ++i) {
@@ -714,6 +716,7 @@ static int s3c64xx_eint_eint0_init(struct samsung_pinctrl_drv_data *d)
irq = irq_of_parse_and_map(eint0_np, i);
if (!irq) {
dev_err(dev, "failed to get wakeup EINT IRQ %d\n", i);
+ of_node_put(eint0_np);
return -ENXIO;
}
@@ -721,6 +724,7 @@ static int s3c64xx_eint_eint0_init(struct samsung_pinctrl_drv_data *d)
s3c64xx_eint0_handlers[i],
data);
}
+ of_node_put(eint0_np);
bank = d->pin_banks;
for (i = 0; i < d->nr_banks; ++i, ++bank) {
diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.c b/drivers/pinctrl/samsung/pinctrl-samsung.c
index de0477bb469d..f26574ef234a 100644
--- a/drivers/pinctrl/samsung/pinctrl-samsung.c
+++ b/drivers/pinctrl/samsung/pinctrl-samsung.c
@@ -272,6 +272,7 @@ static int samsung_dt_node_to_map(struct pinctrl_dev *pctldev,
&reserved_maps, num_maps);
if (ret < 0) {
samsung_dt_free_map(pctldev, *map, *num_maps);
+ of_node_put(np);
return ret;
}
}
@@ -785,8 +786,10 @@ static struct samsung_pmx_func *samsung_pinctrl_create_functions(
if (!of_get_child_count(cfg_np)) {
ret = samsung_pinctrl_create_function(dev, drvdata,
cfg_np, func);
- if (ret < 0)
+ if (ret < 0) {
+ of_node_put(cfg_np);
return ERR_PTR(ret);
+ }
if (ret > 0) {
++func;
++func_cnt;
@@ -797,8 +800,11 @@ static struct samsung_pmx_func *samsung_pinctrl_create_functions(
for_each_child_of_node(cfg_np, func_np) {
ret = samsung_pinctrl_create_function(dev, drvdata,
func_np, func);
- if (ret < 0)
+ if (ret < 0) {
+ of_node_put(func_np);
+ of_node_put(cfg_np);
return ERR_PTR(ret);
+ }
if (ret > 0) {
++func;
++func_cnt;
diff --git a/drivers/pinctrl/sh-pfc/Kconfig b/drivers/pinctrl/sh-pfc/Kconfig
index 2dd716b016a3..28d66e7cb098 100644
--- a/drivers/pinctrl/sh-pfc/Kconfig
+++ b/drivers/pinctrl/sh-pfc/Kconfig
@@ -17,6 +17,7 @@ config PINCTRL_SH_PFC
select PINCTRL_PFC_R8A7745 if ARCH_R8A7745
select PINCTRL_PFC_R8A77470 if ARCH_R8A77470
select PINCTRL_PFC_R8A774A1 if ARCH_R8A774A1
+ select PINCTRL_PFC_R8A774B1 if ARCH_R8A774B1
select PINCTRL_PFC_R8A774C0 if ARCH_R8A774C0
select PINCTRL_PFC_R8A7778 if ARCH_R8A7778
select PINCTRL_PFC_R8A7779 if ARCH_R8A7779
@@ -26,7 +27,8 @@ config PINCTRL_SH_PFC
select PINCTRL_PFC_R8A7793 if ARCH_R8A7793
select PINCTRL_PFC_R8A7794 if ARCH_R8A7794
select PINCTRL_PFC_R8A7795 if ARCH_R8A7795
- select PINCTRL_PFC_R8A7796 if ARCH_R8A7796
+ select PINCTRL_PFC_R8A77960 if ARCH_R8A77960 || ARCH_R8A7796
+ select PINCTRL_PFC_R8A77961 if ARCH_R8A77961
select PINCTRL_PFC_R8A77965 if ARCH_R8A77965
select PINCTRL_PFC_R8A77970 if ARCH_R8A77970
select PINCTRL_PFC_R8A77980 if ARCH_R8A77980
@@ -86,6 +88,9 @@ config PINCTRL_PFC_R8A77470
config PINCTRL_PFC_R8A774A1
bool "RZ/G2M pin control support" if COMPILE_TEST
+config PINCTRL_PFC_R8A774B1
+ bool "RZ/G2N pin control support" if COMPILE_TEST
+
config PINCTRL_PFC_R8A774C0
bool "RZ/G2E pin control support" if COMPILE_TEST
@@ -113,9 +118,12 @@ config PINCTRL_PFC_R8A7794
config PINCTRL_PFC_R8A7795
bool "R-Car H3 pin control support" if COMPILE_TEST
-config PINCTRL_PFC_R8A7796
+config PINCTRL_PFC_R8A77960
bool "R-Car M3-W pin control support" if COMPILE_TEST
+config PINCTRL_PFC_R8A77961
+ bool "R-Car M3-W+ pin control support" if COMPILE_TEST
+
config PINCTRL_PFC_R8A77965
bool "R-Car M3-N pin control support" if COMPILE_TEST
diff --git a/drivers/pinctrl/sh-pfc/Makefile b/drivers/pinctrl/sh-pfc/Makefile
index 8c95abcfcc00..3bc05666e1a6 100644
--- a/drivers/pinctrl/sh-pfc/Makefile
+++ b/drivers/pinctrl/sh-pfc/Makefile
@@ -9,6 +9,7 @@ obj-$(CONFIG_PINCTRL_PFC_R8A7744) += pfc-r8a7791.o
obj-$(CONFIG_PINCTRL_PFC_R8A7745) += pfc-r8a7794.o
obj-$(CONFIG_PINCTRL_PFC_R8A77470) += pfc-r8a77470.o
obj-$(CONFIG_PINCTRL_PFC_R8A774A1) += pfc-r8a7796.o
+obj-$(CONFIG_PINCTRL_PFC_R8A774B1) += pfc-r8a77965.o
obj-$(CONFIG_PINCTRL_PFC_R8A774C0) += pfc-r8a77990.o
obj-$(CONFIG_PINCTRL_PFC_R8A7778) += pfc-r8a7778.o
obj-$(CONFIG_PINCTRL_PFC_R8A7779) += pfc-r8a7779.o
@@ -19,7 +20,8 @@ obj-$(CONFIG_PINCTRL_PFC_R8A7793) += pfc-r8a7791.o
obj-$(CONFIG_PINCTRL_PFC_R8A7794) += pfc-r8a7794.o
obj-$(CONFIG_PINCTRL_PFC_R8A7795) += pfc-r8a7795.o
obj-$(CONFIG_PINCTRL_PFC_R8A7795) += pfc-r8a7795-es1.o
-obj-$(CONFIG_PINCTRL_PFC_R8A7796) += pfc-r8a7796.o
+obj-$(CONFIG_PINCTRL_PFC_R8A77960) += pfc-r8a7796.o
+obj-$(CONFIG_PINCTRL_PFC_R8A77961) += pfc-r8a7796.o
obj-$(CONFIG_PINCTRL_PFC_R8A77965) += pfc-r8a77965.o
obj-$(CONFIG_PINCTRL_PFC_R8A77970) += pfc-r8a77970.o
obj-$(CONFIG_PINCTRL_PFC_R8A77980) += pfc-r8a77980.o
diff --git a/drivers/pinctrl/sh-pfc/core.c b/drivers/pinctrl/sh-pfc/core.c
index b8640ad41bef..65e52688f091 100644
--- a/drivers/pinctrl/sh-pfc/core.c
+++ b/drivers/pinctrl/sh-pfc/core.c
@@ -29,12 +29,12 @@
static int sh_pfc_map_resources(struct sh_pfc *pfc,
struct platform_device *pdev)
{
- unsigned int num_windows, num_irqs;
struct sh_pfc_window *windows;
unsigned int *irqs = NULL;
+ unsigned int num_windows;
struct resource *res;
unsigned int i;
- int irq;
+ int num_irqs;
/* Count the MEM and IRQ resources. */
for (num_windows = 0;; num_windows++) {
@@ -42,17 +42,13 @@ static int sh_pfc_map_resources(struct sh_pfc *pfc,
if (!res)
break;
}
- for (num_irqs = 0;; num_irqs++) {
- irq = platform_get_irq(pdev, num_irqs);
- if (irq == -EPROBE_DEFER)
- return irq;
- if (irq < 0)
- break;
- }
-
if (num_windows == 0)
return -EINVAL;
+ num_irqs = platform_irq_count(pdev);
+ if (num_irqs < 0)
+ return num_irqs;
+
/* Allocate memory windows and IRQs arrays. */
windows = devm_kcalloc(pfc->dev, num_windows, sizeof(*windows),
GFP_KERNEL);
@@ -518,6 +514,12 @@ static const struct of_device_id sh_pfc_of_table[] = {
.data = &r8a774a1_pinmux_info,
},
#endif
+#ifdef CONFIG_PINCTRL_PFC_R8A774B1
+ {
+ .compatible = "renesas,pfc-r8a774b1",
+ .data = &r8a774b1_pinmux_info,
+ },
+#endif
#ifdef CONFIG_PINCTRL_PFC_R8A774C0
{
.compatible = "renesas,pfc-r8a774c0",
@@ -579,10 +581,16 @@ static const struct of_device_id sh_pfc_of_table[] = {
},
#endif /* DEBUG */
#endif
-#ifdef CONFIG_PINCTRL_PFC_R8A7796
+#ifdef CONFIG_PINCTRL_PFC_R8A77960
{
.compatible = "renesas,pfc-r8a7796",
- .data = &r8a7796_pinmux_info,
+ .data = &r8a77960_pinmux_info,
+ },
+#endif
+#ifdef CONFIG_PINCTRL_PFC_R8A77961
+ {
+ .compatible = "renesas,pfc-r8a77961",
+ .data = &r8a77961_pinmux_info,
},
#endif
#ifdef CONFIG_PINCTRL_PFC_R8A77965
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7795-es1.c b/drivers/pinctrl/sh-pfc/pfc-r8a7795-es1.c
index 95f9aae3bfba..ad05da8f6516 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7795-es1.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7795-es1.c
@@ -718,7 +718,7 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_PHYS_MSEL(IP1_23_20, HRX3_D, I2C_SEL_3_0, SEL_HSCIF3_3),
PINMUX_IPSR_PHYS_MSEL(IP1_23_20, VI4_DATA7_B, I2C_SEL_3_0, SEL_VIN4_1),
PINMUX_IPSR_PHYS_MSEL(IP1_23_20, IERX_B, I2C_SEL_3_0, SEL_IEBUS_1),
- PINMUX_IPSR_PHYS(IP0_23_20, SCL3, I2C_SEL_3_1),
+ PINMUX_IPSR_PHYS(IP1_23_20, SCL3, I2C_SEL_3_1),
PINMUX_IPSR_PHYS_MSEL(IP1_27_24, PWM2_A, I2C_SEL_3_0, SEL_PWM2_0),
PINMUX_IPSR_MSEL(IP1_27_24, A20, I2C_SEL_3_0),
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7795.c b/drivers/pinctrl/sh-pfc/pfc-r8a7795.c
index 7df010f757b1..d3145aa135d0 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7795.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7795.c
@@ -726,7 +726,7 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_PHYS_MSEL(IP1_23_20, HRX3_D, I2C_SEL_3_0, SEL_HSCIF3_3),
PINMUX_IPSR_PHYS_MSEL(IP1_23_20, VI4_DATA7_B, I2C_SEL_3_0, SEL_VIN4_1),
PINMUX_IPSR_PHYS_MSEL(IP1_23_20, IERX_B, I2C_SEL_3_0, SEL_IEBUS_1),
- PINMUX_IPSR_PHYS(IP0_23_20, SCL3, I2C_SEL_3_1),
+ PINMUX_IPSR_PHYS(IP1_23_20, SCL3, I2C_SEL_3_1),
PINMUX_IPSR_PHYS_MSEL(IP1_27_24, PWM2_A, I2C_SEL_3_0, SEL_PWM2_0),
PINMUX_IPSR_PHYS_MSEL(IP1_27_24, HTX3_D, I2C_SEL_3_0, SEL_HSCIF3_3),
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7796.c b/drivers/pinctrl/sh-pfc/pfc-r8a7796.c
index 61db7c7a35ec..a2496baca85d 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7796.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7796.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * R8A7796 processor support - PFC hardware block.
+ * R8A7796 (R-Car M3-W/W+) support - PFC hardware block.
*
* Copyright (C) 2016-2019 Renesas Electronics Corp.
*
@@ -729,7 +729,7 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_PHYS_MSEL(IP1_23_20, HRX3_D, I2C_SEL_3_0, SEL_HSCIF3_3),
PINMUX_IPSR_PHYS_MSEL(IP1_23_20, VI4_DATA7_B, I2C_SEL_3_0, SEL_VIN4_1),
PINMUX_IPSR_PHYS_MSEL(IP1_23_20, IERX_B, I2C_SEL_3_0, SEL_IEBUS_1),
- PINMUX_IPSR_PHYS(IP0_23_20, SCL3, I2C_SEL_3_1),
+ PINMUX_IPSR_PHYS(IP1_23_20, SCL3, I2C_SEL_3_1),
PINMUX_IPSR_PHYS_MSEL(IP1_27_24, PWM2_A, I2C_SEL_3_0, SEL_PWM2_0),
PINMUX_IPSR_PHYS_MSEL(IP1_27_24, HTX3_D, I2C_SEL_3_0, SEL_HSCIF3_3),
@@ -6210,8 +6210,8 @@ const struct sh_pfc_soc_info r8a774a1_pinmux_info = {
};
#endif
-#ifdef CONFIG_PINCTRL_PFC_R8A7796
-const struct sh_pfc_soc_info r8a7796_pinmux_info = {
+#ifdef CONFIG_PINCTRL_PFC_R8A77960
+const struct sh_pfc_soc_info r8a77960_pinmux_info = {
.name = "r8a77960_pfc",
.ops = &r8a7796_pinmux_ops,
.unlock_reg = 0xe6060000, /* PMMR */
@@ -6236,3 +6236,30 @@ const struct sh_pfc_soc_info r8a7796_pinmux_info = {
.pinmux_data_size = ARRAY_SIZE(pinmux_data),
};
#endif
+
+#ifdef CONFIG_PINCTRL_PFC_R8A77961
+const struct sh_pfc_soc_info r8a77961_pinmux_info = {
+ .name = "r8a77961_pfc",
+ .ops = &r8a7796_pinmux_ops,
+ .unlock_reg = 0xe6060000, /* PMMR */
+
+ .function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
+
+ .pins = pinmux_pins,
+ .nr_pins = ARRAY_SIZE(pinmux_pins),
+ .groups = pinmux_groups.common,
+ .nr_groups = ARRAY_SIZE(pinmux_groups.common) +
+ ARRAY_SIZE(pinmux_groups.automotive),
+ .functions = pinmux_functions.common,
+ .nr_functions = ARRAY_SIZE(pinmux_functions.common) +
+ ARRAY_SIZE(pinmux_functions.automotive),
+
+ .cfg_regs = pinmux_config_regs,
+ .drive_regs = pinmux_drive_regs,
+ .bias_regs = pinmux_bias_regs,
+ .ioctrl_regs = pinmux_ioctrl_regs,
+
+ .pinmux_data = pinmux_data,
+ .pinmux_data_size = ARRAY_SIZE(pinmux_data),
+};
+#endif
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a77965.c b/drivers/pinctrl/sh-pfc/pfc-r8a77965.c
index 697c77a4ea95..8bdf33c807f6 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a77965.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a77965.c
@@ -732,7 +732,7 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_PHYS_MSEL(IP1_23_20, HRX3_D, I2C_SEL_3_0, SEL_HSCIF3_3),
PINMUX_IPSR_PHYS_MSEL(IP1_23_20, VI4_DATA7_B, I2C_SEL_3_0, SEL_VIN4_1),
PINMUX_IPSR_PHYS_MSEL(IP1_23_20, IERX_B, I2C_SEL_3_0, SEL_IEBUS_1),
- PINMUX_IPSR_PHYS(IP0_23_20, SCL3, I2C_SEL_3_1),
+ PINMUX_IPSR_PHYS(IP1_23_20, SCL3, I2C_SEL_3_1),
PINMUX_IPSR_PHYS_MSEL(IP1_27_24, PWM2_A, I2C_SEL_3_0, SEL_PWM2_0),
PINMUX_IPSR_PHYS_MSEL(IP1_27_24, HTX3_D, I2C_SEL_3_0, SEL_HSCIF3_3),
@@ -4378,355 +4378,362 @@ static const unsigned int vin5_clk_mux[] = {
VI5_CLK_MARK,
};
-static const struct sh_pfc_pin_group pinmux_groups[] = {
- SH_PFC_PIN_GROUP(audio_clk_a_a),
- SH_PFC_PIN_GROUP(audio_clk_a_b),
- SH_PFC_PIN_GROUP(audio_clk_a_c),
- SH_PFC_PIN_GROUP(audio_clk_b_a),
- SH_PFC_PIN_GROUP(audio_clk_b_b),
- SH_PFC_PIN_GROUP(audio_clk_c_a),
- SH_PFC_PIN_GROUP(audio_clk_c_b),
- SH_PFC_PIN_GROUP(audio_clkout_a),
- SH_PFC_PIN_GROUP(audio_clkout_b),
- SH_PFC_PIN_GROUP(audio_clkout_c),
- SH_PFC_PIN_GROUP(audio_clkout_d),
- SH_PFC_PIN_GROUP(audio_clkout1_a),
- SH_PFC_PIN_GROUP(audio_clkout1_b),
- SH_PFC_PIN_GROUP(audio_clkout2_a),
- SH_PFC_PIN_GROUP(audio_clkout2_b),
- SH_PFC_PIN_GROUP(audio_clkout3_a),
- SH_PFC_PIN_GROUP(audio_clkout3_b),
- SH_PFC_PIN_GROUP(avb_link),
- SH_PFC_PIN_GROUP(avb_magic),
- SH_PFC_PIN_GROUP(avb_phy_int),
- SH_PFC_PIN_GROUP_ALIAS(avb_mdc, avb_mdio), /* Deprecated */
- SH_PFC_PIN_GROUP(avb_mdio),
- SH_PFC_PIN_GROUP(avb_mii),
- SH_PFC_PIN_GROUP(avb_avtp_pps),
- SH_PFC_PIN_GROUP(avb_avtp_match_a),
- SH_PFC_PIN_GROUP(avb_avtp_capture_a),
- SH_PFC_PIN_GROUP(avb_avtp_match_b),
- SH_PFC_PIN_GROUP(avb_avtp_capture_b),
- SH_PFC_PIN_GROUP(can0_data_a),
- SH_PFC_PIN_GROUP(can0_data_b),
- SH_PFC_PIN_GROUP(can1_data),
- SH_PFC_PIN_GROUP(can_clk),
- SH_PFC_PIN_GROUP(canfd0_data_a),
- SH_PFC_PIN_GROUP(canfd0_data_b),
- SH_PFC_PIN_GROUP(canfd1_data),
- SH_PFC_PIN_GROUP(drif0_ctrl_a),
- SH_PFC_PIN_GROUP(drif0_data0_a),
- SH_PFC_PIN_GROUP(drif0_data1_a),
- SH_PFC_PIN_GROUP(drif0_ctrl_b),
- SH_PFC_PIN_GROUP(drif0_data0_b),
- SH_PFC_PIN_GROUP(drif0_data1_b),
- SH_PFC_PIN_GROUP(drif0_ctrl_c),
- SH_PFC_PIN_GROUP(drif0_data0_c),
- SH_PFC_PIN_GROUP(drif0_data1_c),
- SH_PFC_PIN_GROUP(drif1_ctrl_a),
- SH_PFC_PIN_GROUP(drif1_data0_a),
- SH_PFC_PIN_GROUP(drif1_data1_a),
- SH_PFC_PIN_GROUP(drif1_ctrl_b),
- SH_PFC_PIN_GROUP(drif1_data0_b),
- SH_PFC_PIN_GROUP(drif1_data1_b),
- SH_PFC_PIN_GROUP(drif1_ctrl_c),
- SH_PFC_PIN_GROUP(drif1_data0_c),
- SH_PFC_PIN_GROUP(drif1_data1_c),
- SH_PFC_PIN_GROUP(drif2_ctrl_a),
- SH_PFC_PIN_GROUP(drif2_data0_a),
- SH_PFC_PIN_GROUP(drif2_data1_a),
- SH_PFC_PIN_GROUP(drif2_ctrl_b),
- SH_PFC_PIN_GROUP(drif2_data0_b),
- SH_PFC_PIN_GROUP(drif2_data1_b),
- SH_PFC_PIN_GROUP(drif3_ctrl_a),
- SH_PFC_PIN_GROUP(drif3_data0_a),
- SH_PFC_PIN_GROUP(drif3_data1_a),
- SH_PFC_PIN_GROUP(drif3_ctrl_b),
- SH_PFC_PIN_GROUP(drif3_data0_b),
- SH_PFC_PIN_GROUP(drif3_data1_b),
- SH_PFC_PIN_GROUP(du_rgb666),
- SH_PFC_PIN_GROUP(du_rgb888),
- SH_PFC_PIN_GROUP(du_clk_out_0),
- SH_PFC_PIN_GROUP(du_clk_out_1),
- SH_PFC_PIN_GROUP(du_sync),
- SH_PFC_PIN_GROUP(du_oddf),
- SH_PFC_PIN_GROUP(du_cde),
- SH_PFC_PIN_GROUP(du_disp),
- SH_PFC_PIN_GROUP(hscif0_data),
- SH_PFC_PIN_GROUP(hscif0_clk),
- SH_PFC_PIN_GROUP(hscif0_ctrl),
- SH_PFC_PIN_GROUP(hscif1_data_a),
- SH_PFC_PIN_GROUP(hscif1_clk_a),
- SH_PFC_PIN_GROUP(hscif1_ctrl_a),
- SH_PFC_PIN_GROUP(hscif1_data_b),
- SH_PFC_PIN_GROUP(hscif1_clk_b),
- SH_PFC_PIN_GROUP(hscif1_ctrl_b),
- SH_PFC_PIN_GROUP(hscif2_data_a),
- SH_PFC_PIN_GROUP(hscif2_clk_a),
- SH_PFC_PIN_GROUP(hscif2_ctrl_a),
- SH_PFC_PIN_GROUP(hscif2_data_b),
- SH_PFC_PIN_GROUP(hscif2_clk_b),
- SH_PFC_PIN_GROUP(hscif2_ctrl_b),
- SH_PFC_PIN_GROUP(hscif2_data_c),
- SH_PFC_PIN_GROUP(hscif2_clk_c),
- SH_PFC_PIN_GROUP(hscif2_ctrl_c),
- SH_PFC_PIN_GROUP(hscif3_data_a),
- SH_PFC_PIN_GROUP(hscif3_clk),
- SH_PFC_PIN_GROUP(hscif3_ctrl),
- SH_PFC_PIN_GROUP(hscif3_data_b),
- SH_PFC_PIN_GROUP(hscif3_data_c),
- SH_PFC_PIN_GROUP(hscif3_data_d),
- SH_PFC_PIN_GROUP(hscif4_data_a),
- SH_PFC_PIN_GROUP(hscif4_clk),
- SH_PFC_PIN_GROUP(hscif4_ctrl),
- SH_PFC_PIN_GROUP(hscif4_data_b),
- SH_PFC_PIN_GROUP(i2c0),
- SH_PFC_PIN_GROUP(i2c1_a),
- SH_PFC_PIN_GROUP(i2c1_b),
- SH_PFC_PIN_GROUP(i2c2_a),
- SH_PFC_PIN_GROUP(i2c2_b),
- SH_PFC_PIN_GROUP(i2c3),
- SH_PFC_PIN_GROUP(i2c5),
- SH_PFC_PIN_GROUP(i2c6_a),
- SH_PFC_PIN_GROUP(i2c6_b),
- SH_PFC_PIN_GROUP(i2c6_c),
- SH_PFC_PIN_GROUP(intc_ex_irq0),
- SH_PFC_PIN_GROUP(intc_ex_irq1),
- SH_PFC_PIN_GROUP(intc_ex_irq2),
- SH_PFC_PIN_GROUP(intc_ex_irq3),
- SH_PFC_PIN_GROUP(intc_ex_irq4),
- SH_PFC_PIN_GROUP(intc_ex_irq5),
- SH_PFC_PIN_GROUP(msiof0_clk),
- SH_PFC_PIN_GROUP(msiof0_sync),
- SH_PFC_PIN_GROUP(msiof0_ss1),
- SH_PFC_PIN_GROUP(msiof0_ss2),
- SH_PFC_PIN_GROUP(msiof0_txd),
- SH_PFC_PIN_GROUP(msiof0_rxd),
- SH_PFC_PIN_GROUP(msiof1_clk_a),
- SH_PFC_PIN_GROUP(msiof1_sync_a),
- SH_PFC_PIN_GROUP(msiof1_ss1_a),
- SH_PFC_PIN_GROUP(msiof1_ss2_a),
- SH_PFC_PIN_GROUP(msiof1_txd_a),
- SH_PFC_PIN_GROUP(msiof1_rxd_a),
- SH_PFC_PIN_GROUP(msiof1_clk_b),
- SH_PFC_PIN_GROUP(msiof1_sync_b),
- SH_PFC_PIN_GROUP(msiof1_ss1_b),
- SH_PFC_PIN_GROUP(msiof1_ss2_b),
- SH_PFC_PIN_GROUP(msiof1_txd_b),
- SH_PFC_PIN_GROUP(msiof1_rxd_b),
- SH_PFC_PIN_GROUP(msiof1_clk_c),
- SH_PFC_PIN_GROUP(msiof1_sync_c),
- SH_PFC_PIN_GROUP(msiof1_ss1_c),
- SH_PFC_PIN_GROUP(msiof1_ss2_c),
- SH_PFC_PIN_GROUP(msiof1_txd_c),
- SH_PFC_PIN_GROUP(msiof1_rxd_c),
- SH_PFC_PIN_GROUP(msiof1_clk_d),
- SH_PFC_PIN_GROUP(msiof1_sync_d),
- SH_PFC_PIN_GROUP(msiof1_ss1_d),
- SH_PFC_PIN_GROUP(msiof1_ss2_d),
- SH_PFC_PIN_GROUP(msiof1_txd_d),
- SH_PFC_PIN_GROUP(msiof1_rxd_d),
- SH_PFC_PIN_GROUP(msiof1_clk_e),
- SH_PFC_PIN_GROUP(msiof1_sync_e),
- SH_PFC_PIN_GROUP(msiof1_ss1_e),
- SH_PFC_PIN_GROUP(msiof1_ss2_e),
- SH_PFC_PIN_GROUP(msiof1_txd_e),
- SH_PFC_PIN_GROUP(msiof1_rxd_e),
- SH_PFC_PIN_GROUP(msiof1_clk_f),
- SH_PFC_PIN_GROUP(msiof1_sync_f),
- SH_PFC_PIN_GROUP(msiof1_ss1_f),
- SH_PFC_PIN_GROUP(msiof1_ss2_f),
- SH_PFC_PIN_GROUP(msiof1_txd_f),
- SH_PFC_PIN_GROUP(msiof1_rxd_f),
- SH_PFC_PIN_GROUP(msiof1_clk_g),
- SH_PFC_PIN_GROUP(msiof1_sync_g),
- SH_PFC_PIN_GROUP(msiof1_ss1_g),
- SH_PFC_PIN_GROUP(msiof1_ss2_g),
- SH_PFC_PIN_GROUP(msiof1_txd_g),
- SH_PFC_PIN_GROUP(msiof1_rxd_g),
- SH_PFC_PIN_GROUP(msiof2_clk_a),
- SH_PFC_PIN_GROUP(msiof2_sync_a),
- SH_PFC_PIN_GROUP(msiof2_ss1_a),
- SH_PFC_PIN_GROUP(msiof2_ss2_a),
- SH_PFC_PIN_GROUP(msiof2_txd_a),
- SH_PFC_PIN_GROUP(msiof2_rxd_a),
- SH_PFC_PIN_GROUP(msiof2_clk_b),
- SH_PFC_PIN_GROUP(msiof2_sync_b),
- SH_PFC_PIN_GROUP(msiof2_ss1_b),
- SH_PFC_PIN_GROUP(msiof2_ss2_b),
- SH_PFC_PIN_GROUP(msiof2_txd_b),
- SH_PFC_PIN_GROUP(msiof2_rxd_b),
- SH_PFC_PIN_GROUP(msiof2_clk_c),
- SH_PFC_PIN_GROUP(msiof2_sync_c),
- SH_PFC_PIN_GROUP(msiof2_ss1_c),
- SH_PFC_PIN_GROUP(msiof2_ss2_c),
- SH_PFC_PIN_GROUP(msiof2_txd_c),
- SH_PFC_PIN_GROUP(msiof2_rxd_c),
- SH_PFC_PIN_GROUP(msiof2_clk_d),
- SH_PFC_PIN_GROUP(msiof2_sync_d),
- SH_PFC_PIN_GROUP(msiof2_ss1_d),
- SH_PFC_PIN_GROUP(msiof2_ss2_d),
- SH_PFC_PIN_GROUP(msiof2_txd_d),
- SH_PFC_PIN_GROUP(msiof2_rxd_d),
- SH_PFC_PIN_GROUP(msiof3_clk_a),
- SH_PFC_PIN_GROUP(msiof3_sync_a),
- SH_PFC_PIN_GROUP(msiof3_ss1_a),
- SH_PFC_PIN_GROUP(msiof3_ss2_a),
- SH_PFC_PIN_GROUP(msiof3_txd_a),
- SH_PFC_PIN_GROUP(msiof3_rxd_a),
- SH_PFC_PIN_GROUP(msiof3_clk_b),
- SH_PFC_PIN_GROUP(msiof3_sync_b),
- SH_PFC_PIN_GROUP(msiof3_ss1_b),
- SH_PFC_PIN_GROUP(msiof3_ss2_b),
- SH_PFC_PIN_GROUP(msiof3_txd_b),
- SH_PFC_PIN_GROUP(msiof3_rxd_b),
- SH_PFC_PIN_GROUP(msiof3_clk_c),
- SH_PFC_PIN_GROUP(msiof3_sync_c),
- SH_PFC_PIN_GROUP(msiof3_txd_c),
- SH_PFC_PIN_GROUP(msiof3_rxd_c),
- SH_PFC_PIN_GROUP(msiof3_clk_d),
- SH_PFC_PIN_GROUP(msiof3_sync_d),
- SH_PFC_PIN_GROUP(msiof3_ss1_d),
- SH_PFC_PIN_GROUP(msiof3_txd_d),
- SH_PFC_PIN_GROUP(msiof3_rxd_d),
- SH_PFC_PIN_GROUP(msiof3_clk_e),
- SH_PFC_PIN_GROUP(msiof3_sync_e),
- SH_PFC_PIN_GROUP(msiof3_ss1_e),
- SH_PFC_PIN_GROUP(msiof3_ss2_e),
- SH_PFC_PIN_GROUP(msiof3_txd_e),
- SH_PFC_PIN_GROUP(msiof3_rxd_e),
- SH_PFC_PIN_GROUP(pwm0),
- SH_PFC_PIN_GROUP(pwm1_a),
- SH_PFC_PIN_GROUP(pwm1_b),
- SH_PFC_PIN_GROUP(pwm2_a),
- SH_PFC_PIN_GROUP(pwm2_b),
- SH_PFC_PIN_GROUP(pwm3_a),
- SH_PFC_PIN_GROUP(pwm3_b),
- SH_PFC_PIN_GROUP(pwm4_a),
- SH_PFC_PIN_GROUP(pwm4_b),
- SH_PFC_PIN_GROUP(pwm5_a),
- SH_PFC_PIN_GROUP(pwm5_b),
- SH_PFC_PIN_GROUP(pwm6_a),
- SH_PFC_PIN_GROUP(pwm6_b),
- SH_PFC_PIN_GROUP(sata0_devslp_a),
- SH_PFC_PIN_GROUP(sata0_devslp_b),
- SH_PFC_PIN_GROUP(scif0_data),
- SH_PFC_PIN_GROUP(scif0_clk),
- SH_PFC_PIN_GROUP(scif0_ctrl),
- SH_PFC_PIN_GROUP(scif1_data_a),
- SH_PFC_PIN_GROUP(scif1_clk),
- SH_PFC_PIN_GROUP(scif1_ctrl),
- SH_PFC_PIN_GROUP(scif1_data_b),
- SH_PFC_PIN_GROUP(scif2_data_a),
- SH_PFC_PIN_GROUP(scif2_clk),
- SH_PFC_PIN_GROUP(scif2_data_b),
- SH_PFC_PIN_GROUP(scif3_data_a),
- SH_PFC_PIN_GROUP(scif3_clk),
- SH_PFC_PIN_GROUP(scif3_ctrl),
- SH_PFC_PIN_GROUP(scif3_data_b),
- SH_PFC_PIN_GROUP(scif4_data_a),
- SH_PFC_PIN_GROUP(scif4_clk_a),
- SH_PFC_PIN_GROUP(scif4_ctrl_a),
- SH_PFC_PIN_GROUP(scif4_data_b),
- SH_PFC_PIN_GROUP(scif4_clk_b),
- SH_PFC_PIN_GROUP(scif4_ctrl_b),
- SH_PFC_PIN_GROUP(scif4_data_c),
- SH_PFC_PIN_GROUP(scif4_clk_c),
- SH_PFC_PIN_GROUP(scif4_ctrl_c),
- SH_PFC_PIN_GROUP(scif5_data_a),
- SH_PFC_PIN_GROUP(scif5_clk_a),
- SH_PFC_PIN_GROUP(scif5_data_b),
- SH_PFC_PIN_GROUP(scif5_clk_b),
- SH_PFC_PIN_GROUP(scif_clk_a),
- SH_PFC_PIN_GROUP(scif_clk_b),
- SH_PFC_PIN_GROUP(sdhi0_data1),
- SH_PFC_PIN_GROUP(sdhi0_data4),
- SH_PFC_PIN_GROUP(sdhi0_ctrl),
- SH_PFC_PIN_GROUP(sdhi0_cd),
- SH_PFC_PIN_GROUP(sdhi0_wp),
- SH_PFC_PIN_GROUP(sdhi1_data1),
- SH_PFC_PIN_GROUP(sdhi1_data4),
- SH_PFC_PIN_GROUP(sdhi1_ctrl),
- SH_PFC_PIN_GROUP(sdhi1_cd),
- SH_PFC_PIN_GROUP(sdhi1_wp),
- SH_PFC_PIN_GROUP(sdhi2_data1),
- SH_PFC_PIN_GROUP(sdhi2_data4),
- SH_PFC_PIN_GROUP(sdhi2_data8),
- SH_PFC_PIN_GROUP(sdhi2_ctrl),
- SH_PFC_PIN_GROUP(sdhi2_cd_a),
- SH_PFC_PIN_GROUP(sdhi2_wp_a),
- SH_PFC_PIN_GROUP(sdhi2_cd_b),
- SH_PFC_PIN_GROUP(sdhi2_wp_b),
- SH_PFC_PIN_GROUP(sdhi2_ds),
- SH_PFC_PIN_GROUP(sdhi3_data1),
- SH_PFC_PIN_GROUP(sdhi3_data4),
- SH_PFC_PIN_GROUP(sdhi3_data8),
- SH_PFC_PIN_GROUP(sdhi3_ctrl),
- SH_PFC_PIN_GROUP(sdhi3_cd),
- SH_PFC_PIN_GROUP(sdhi3_wp),
- SH_PFC_PIN_GROUP(sdhi3_ds),
- SH_PFC_PIN_GROUP(ssi0_data),
- SH_PFC_PIN_GROUP(ssi01239_ctrl),
- SH_PFC_PIN_GROUP(ssi1_data_a),
- SH_PFC_PIN_GROUP(ssi1_data_b),
- SH_PFC_PIN_GROUP(ssi1_ctrl_a),
- SH_PFC_PIN_GROUP(ssi1_ctrl_b),
- SH_PFC_PIN_GROUP(ssi2_data_a),
- SH_PFC_PIN_GROUP(ssi2_data_b),
- SH_PFC_PIN_GROUP(ssi2_ctrl_a),
- SH_PFC_PIN_GROUP(ssi2_ctrl_b),
- SH_PFC_PIN_GROUP(ssi3_data),
- SH_PFC_PIN_GROUP(ssi349_ctrl),
- SH_PFC_PIN_GROUP(ssi4_data),
- SH_PFC_PIN_GROUP(ssi4_ctrl),
- SH_PFC_PIN_GROUP(ssi5_data),
- SH_PFC_PIN_GROUP(ssi5_ctrl),
- SH_PFC_PIN_GROUP(ssi6_data),
- SH_PFC_PIN_GROUP(ssi6_ctrl),
- SH_PFC_PIN_GROUP(ssi7_data),
- SH_PFC_PIN_GROUP(ssi78_ctrl),
- SH_PFC_PIN_GROUP(ssi8_data),
- SH_PFC_PIN_GROUP(ssi9_data_a),
- SH_PFC_PIN_GROUP(ssi9_data_b),
- SH_PFC_PIN_GROUP(ssi9_ctrl_a),
- SH_PFC_PIN_GROUP(ssi9_ctrl_b),
- SH_PFC_PIN_GROUP(tmu_tclk1_a),
- SH_PFC_PIN_GROUP(tmu_tclk1_b),
- SH_PFC_PIN_GROUP(tmu_tclk2_a),
- SH_PFC_PIN_GROUP(tmu_tclk2_b),
- SH_PFC_PIN_GROUP(tpu_to0),
- SH_PFC_PIN_GROUP(tpu_to1),
- SH_PFC_PIN_GROUP(tpu_to2),
- SH_PFC_PIN_GROUP(tpu_to3),
- SH_PFC_PIN_GROUP(usb0),
- SH_PFC_PIN_GROUP(usb1),
- SH_PFC_PIN_GROUP(usb30),
- VIN_DATA_PIN_GROUP(vin4_data, 8, _a),
- VIN_DATA_PIN_GROUP(vin4_data, 10, _a),
- VIN_DATA_PIN_GROUP(vin4_data, 12, _a),
- VIN_DATA_PIN_GROUP(vin4_data, 16, _a),
- SH_PFC_PIN_GROUP(vin4_data18_a),
- VIN_DATA_PIN_GROUP(vin4_data, 20, _a),
- VIN_DATA_PIN_GROUP(vin4_data, 24, _a),
- VIN_DATA_PIN_GROUP(vin4_data, 8, _b),
- VIN_DATA_PIN_GROUP(vin4_data, 10, _b),
- VIN_DATA_PIN_GROUP(vin4_data, 12, _b),
- VIN_DATA_PIN_GROUP(vin4_data, 16, _b),
- SH_PFC_PIN_GROUP(vin4_data18_b),
- VIN_DATA_PIN_GROUP(vin4_data, 20, _b),
- VIN_DATA_PIN_GROUP(vin4_data, 24, _b),
- SH_PFC_PIN_GROUP(vin4_sync),
- SH_PFC_PIN_GROUP(vin4_field),
- SH_PFC_PIN_GROUP(vin4_clkenb),
- SH_PFC_PIN_GROUP(vin4_clk),
- VIN_DATA_PIN_GROUP(vin5_data, 8),
- VIN_DATA_PIN_GROUP(vin5_data, 10),
- VIN_DATA_PIN_GROUP(vin5_data, 12),
- VIN_DATA_PIN_GROUP(vin5_data, 16),
- SH_PFC_PIN_GROUP(vin5_sync),
- SH_PFC_PIN_GROUP(vin5_field),
- SH_PFC_PIN_GROUP(vin5_clkenb),
- SH_PFC_PIN_GROUP(vin5_clk),
+static const struct {
+ struct sh_pfc_pin_group common[318];
+ struct sh_pfc_pin_group automotive[30];
+} pinmux_groups = {
+ .common = {
+ SH_PFC_PIN_GROUP(audio_clk_a_a),
+ SH_PFC_PIN_GROUP(audio_clk_a_b),
+ SH_PFC_PIN_GROUP(audio_clk_a_c),
+ SH_PFC_PIN_GROUP(audio_clk_b_a),
+ SH_PFC_PIN_GROUP(audio_clk_b_b),
+ SH_PFC_PIN_GROUP(audio_clk_c_a),
+ SH_PFC_PIN_GROUP(audio_clk_c_b),
+ SH_PFC_PIN_GROUP(audio_clkout_a),
+ SH_PFC_PIN_GROUP(audio_clkout_b),
+ SH_PFC_PIN_GROUP(audio_clkout_c),
+ SH_PFC_PIN_GROUP(audio_clkout_d),
+ SH_PFC_PIN_GROUP(audio_clkout1_a),
+ SH_PFC_PIN_GROUP(audio_clkout1_b),
+ SH_PFC_PIN_GROUP(audio_clkout2_a),
+ SH_PFC_PIN_GROUP(audio_clkout2_b),
+ SH_PFC_PIN_GROUP(audio_clkout3_a),
+ SH_PFC_PIN_GROUP(audio_clkout3_b),
+ SH_PFC_PIN_GROUP(avb_link),
+ SH_PFC_PIN_GROUP(avb_magic),
+ SH_PFC_PIN_GROUP(avb_phy_int),
+ SH_PFC_PIN_GROUP_ALIAS(avb_mdc, avb_mdio), /* Deprecated */
+ SH_PFC_PIN_GROUP(avb_mdio),
+ SH_PFC_PIN_GROUP(avb_mii),
+ SH_PFC_PIN_GROUP(avb_avtp_pps),
+ SH_PFC_PIN_GROUP(avb_avtp_match_a),
+ SH_PFC_PIN_GROUP(avb_avtp_capture_a),
+ SH_PFC_PIN_GROUP(avb_avtp_match_b),
+ SH_PFC_PIN_GROUP(avb_avtp_capture_b),
+ SH_PFC_PIN_GROUP(can0_data_a),
+ SH_PFC_PIN_GROUP(can0_data_b),
+ SH_PFC_PIN_GROUP(can1_data),
+ SH_PFC_PIN_GROUP(can_clk),
+ SH_PFC_PIN_GROUP(canfd0_data_a),
+ SH_PFC_PIN_GROUP(canfd0_data_b),
+ SH_PFC_PIN_GROUP(canfd1_data),
+ SH_PFC_PIN_GROUP(du_rgb666),
+ SH_PFC_PIN_GROUP(du_rgb888),
+ SH_PFC_PIN_GROUP(du_clk_out_0),
+ SH_PFC_PIN_GROUP(du_clk_out_1),
+ SH_PFC_PIN_GROUP(du_sync),
+ SH_PFC_PIN_GROUP(du_oddf),
+ SH_PFC_PIN_GROUP(du_cde),
+ SH_PFC_PIN_GROUP(du_disp),
+ SH_PFC_PIN_GROUP(hscif0_data),
+ SH_PFC_PIN_GROUP(hscif0_clk),
+ SH_PFC_PIN_GROUP(hscif0_ctrl),
+ SH_PFC_PIN_GROUP(hscif1_data_a),
+ SH_PFC_PIN_GROUP(hscif1_clk_a),
+ SH_PFC_PIN_GROUP(hscif1_ctrl_a),
+ SH_PFC_PIN_GROUP(hscif1_data_b),
+ SH_PFC_PIN_GROUP(hscif1_clk_b),
+ SH_PFC_PIN_GROUP(hscif1_ctrl_b),
+ SH_PFC_PIN_GROUP(hscif2_data_a),
+ SH_PFC_PIN_GROUP(hscif2_clk_a),
+ SH_PFC_PIN_GROUP(hscif2_ctrl_a),
+ SH_PFC_PIN_GROUP(hscif2_data_b),
+ SH_PFC_PIN_GROUP(hscif2_clk_b),
+ SH_PFC_PIN_GROUP(hscif2_ctrl_b),
+ SH_PFC_PIN_GROUP(hscif2_data_c),
+ SH_PFC_PIN_GROUP(hscif2_clk_c),
+ SH_PFC_PIN_GROUP(hscif2_ctrl_c),
+ SH_PFC_PIN_GROUP(hscif3_data_a),
+ SH_PFC_PIN_GROUP(hscif3_clk),
+ SH_PFC_PIN_GROUP(hscif3_ctrl),
+ SH_PFC_PIN_GROUP(hscif3_data_b),
+ SH_PFC_PIN_GROUP(hscif3_data_c),
+ SH_PFC_PIN_GROUP(hscif3_data_d),
+ SH_PFC_PIN_GROUP(hscif4_data_a),
+ SH_PFC_PIN_GROUP(hscif4_clk),
+ SH_PFC_PIN_GROUP(hscif4_ctrl),
+ SH_PFC_PIN_GROUP(hscif4_data_b),
+ SH_PFC_PIN_GROUP(i2c0),
+ SH_PFC_PIN_GROUP(i2c1_a),
+ SH_PFC_PIN_GROUP(i2c1_b),
+ SH_PFC_PIN_GROUP(i2c2_a),
+ SH_PFC_PIN_GROUP(i2c2_b),
+ SH_PFC_PIN_GROUP(i2c3),
+ SH_PFC_PIN_GROUP(i2c5),
+ SH_PFC_PIN_GROUP(i2c6_a),
+ SH_PFC_PIN_GROUP(i2c6_b),
+ SH_PFC_PIN_GROUP(i2c6_c),
+ SH_PFC_PIN_GROUP(intc_ex_irq0),
+ SH_PFC_PIN_GROUP(intc_ex_irq1),
+ SH_PFC_PIN_GROUP(intc_ex_irq2),
+ SH_PFC_PIN_GROUP(intc_ex_irq3),
+ SH_PFC_PIN_GROUP(intc_ex_irq4),
+ SH_PFC_PIN_GROUP(intc_ex_irq5),
+ SH_PFC_PIN_GROUP(msiof0_clk),
+ SH_PFC_PIN_GROUP(msiof0_sync),
+ SH_PFC_PIN_GROUP(msiof0_ss1),
+ SH_PFC_PIN_GROUP(msiof0_ss2),
+ SH_PFC_PIN_GROUP(msiof0_txd),
+ SH_PFC_PIN_GROUP(msiof0_rxd),
+ SH_PFC_PIN_GROUP(msiof1_clk_a),
+ SH_PFC_PIN_GROUP(msiof1_sync_a),
+ SH_PFC_PIN_GROUP(msiof1_ss1_a),
+ SH_PFC_PIN_GROUP(msiof1_ss2_a),
+ SH_PFC_PIN_GROUP(msiof1_txd_a),
+ SH_PFC_PIN_GROUP(msiof1_rxd_a),
+ SH_PFC_PIN_GROUP(msiof1_clk_b),
+ SH_PFC_PIN_GROUP(msiof1_sync_b),
+ SH_PFC_PIN_GROUP(msiof1_ss1_b),
+ SH_PFC_PIN_GROUP(msiof1_ss2_b),
+ SH_PFC_PIN_GROUP(msiof1_txd_b),
+ SH_PFC_PIN_GROUP(msiof1_rxd_b),
+ SH_PFC_PIN_GROUP(msiof1_clk_c),
+ SH_PFC_PIN_GROUP(msiof1_sync_c),
+ SH_PFC_PIN_GROUP(msiof1_ss1_c),
+ SH_PFC_PIN_GROUP(msiof1_ss2_c),
+ SH_PFC_PIN_GROUP(msiof1_txd_c),
+ SH_PFC_PIN_GROUP(msiof1_rxd_c),
+ SH_PFC_PIN_GROUP(msiof1_clk_d),
+ SH_PFC_PIN_GROUP(msiof1_sync_d),
+ SH_PFC_PIN_GROUP(msiof1_ss1_d),
+ SH_PFC_PIN_GROUP(msiof1_ss2_d),
+ SH_PFC_PIN_GROUP(msiof1_txd_d),
+ SH_PFC_PIN_GROUP(msiof1_rxd_d),
+ SH_PFC_PIN_GROUP(msiof1_clk_e),
+ SH_PFC_PIN_GROUP(msiof1_sync_e),
+ SH_PFC_PIN_GROUP(msiof1_ss1_e),
+ SH_PFC_PIN_GROUP(msiof1_ss2_e),
+ SH_PFC_PIN_GROUP(msiof1_txd_e),
+ SH_PFC_PIN_GROUP(msiof1_rxd_e),
+ SH_PFC_PIN_GROUP(msiof1_clk_f),
+ SH_PFC_PIN_GROUP(msiof1_sync_f),
+ SH_PFC_PIN_GROUP(msiof1_ss1_f),
+ SH_PFC_PIN_GROUP(msiof1_ss2_f),
+ SH_PFC_PIN_GROUP(msiof1_txd_f),
+ SH_PFC_PIN_GROUP(msiof1_rxd_f),
+ SH_PFC_PIN_GROUP(msiof1_clk_g),
+ SH_PFC_PIN_GROUP(msiof1_sync_g),
+ SH_PFC_PIN_GROUP(msiof1_ss1_g),
+ SH_PFC_PIN_GROUP(msiof1_ss2_g),
+ SH_PFC_PIN_GROUP(msiof1_txd_g),
+ SH_PFC_PIN_GROUP(msiof1_rxd_g),
+ SH_PFC_PIN_GROUP(msiof2_clk_a),
+ SH_PFC_PIN_GROUP(msiof2_sync_a),
+ SH_PFC_PIN_GROUP(msiof2_ss1_a),
+ SH_PFC_PIN_GROUP(msiof2_ss2_a),
+ SH_PFC_PIN_GROUP(msiof2_txd_a),
+ SH_PFC_PIN_GROUP(msiof2_rxd_a),
+ SH_PFC_PIN_GROUP(msiof2_clk_b),
+ SH_PFC_PIN_GROUP(msiof2_sync_b),
+ SH_PFC_PIN_GROUP(msiof2_ss1_b),
+ SH_PFC_PIN_GROUP(msiof2_ss2_b),
+ SH_PFC_PIN_GROUP(msiof2_txd_b),
+ SH_PFC_PIN_GROUP(msiof2_rxd_b),
+ SH_PFC_PIN_GROUP(msiof2_clk_c),
+ SH_PFC_PIN_GROUP(msiof2_sync_c),
+ SH_PFC_PIN_GROUP(msiof2_ss1_c),
+ SH_PFC_PIN_GROUP(msiof2_ss2_c),
+ SH_PFC_PIN_GROUP(msiof2_txd_c),
+ SH_PFC_PIN_GROUP(msiof2_rxd_c),
+ SH_PFC_PIN_GROUP(msiof2_clk_d),
+ SH_PFC_PIN_GROUP(msiof2_sync_d),
+ SH_PFC_PIN_GROUP(msiof2_ss1_d),
+ SH_PFC_PIN_GROUP(msiof2_ss2_d),
+ SH_PFC_PIN_GROUP(msiof2_txd_d),
+ SH_PFC_PIN_GROUP(msiof2_rxd_d),
+ SH_PFC_PIN_GROUP(msiof3_clk_a),
+ SH_PFC_PIN_GROUP(msiof3_sync_a),
+ SH_PFC_PIN_GROUP(msiof3_ss1_a),
+ SH_PFC_PIN_GROUP(msiof3_ss2_a),
+ SH_PFC_PIN_GROUP(msiof3_txd_a),
+ SH_PFC_PIN_GROUP(msiof3_rxd_a),
+ SH_PFC_PIN_GROUP(msiof3_clk_b),
+ SH_PFC_PIN_GROUP(msiof3_sync_b),
+ SH_PFC_PIN_GROUP(msiof3_ss1_b),
+ SH_PFC_PIN_GROUP(msiof3_ss2_b),
+ SH_PFC_PIN_GROUP(msiof3_txd_b),
+ SH_PFC_PIN_GROUP(msiof3_rxd_b),
+ SH_PFC_PIN_GROUP(msiof3_clk_c),
+ SH_PFC_PIN_GROUP(msiof3_sync_c),
+ SH_PFC_PIN_GROUP(msiof3_txd_c),
+ SH_PFC_PIN_GROUP(msiof3_rxd_c),
+ SH_PFC_PIN_GROUP(msiof3_clk_d),
+ SH_PFC_PIN_GROUP(msiof3_sync_d),
+ SH_PFC_PIN_GROUP(msiof3_ss1_d),
+ SH_PFC_PIN_GROUP(msiof3_txd_d),
+ SH_PFC_PIN_GROUP(msiof3_rxd_d),
+ SH_PFC_PIN_GROUP(msiof3_clk_e),
+ SH_PFC_PIN_GROUP(msiof3_sync_e),
+ SH_PFC_PIN_GROUP(msiof3_ss1_e),
+ SH_PFC_PIN_GROUP(msiof3_ss2_e),
+ SH_PFC_PIN_GROUP(msiof3_txd_e),
+ SH_PFC_PIN_GROUP(msiof3_rxd_e),
+ SH_PFC_PIN_GROUP(pwm0),
+ SH_PFC_PIN_GROUP(pwm1_a),
+ SH_PFC_PIN_GROUP(pwm1_b),
+ SH_PFC_PIN_GROUP(pwm2_a),
+ SH_PFC_PIN_GROUP(pwm2_b),
+ SH_PFC_PIN_GROUP(pwm3_a),
+ SH_PFC_PIN_GROUP(pwm3_b),
+ SH_PFC_PIN_GROUP(pwm4_a),
+ SH_PFC_PIN_GROUP(pwm4_b),
+ SH_PFC_PIN_GROUP(pwm5_a),
+ SH_PFC_PIN_GROUP(pwm5_b),
+ SH_PFC_PIN_GROUP(pwm6_a),
+ SH_PFC_PIN_GROUP(pwm6_b),
+ SH_PFC_PIN_GROUP(sata0_devslp_a),
+ SH_PFC_PIN_GROUP(sata0_devslp_b),
+ SH_PFC_PIN_GROUP(scif0_data),
+ SH_PFC_PIN_GROUP(scif0_clk),
+ SH_PFC_PIN_GROUP(scif0_ctrl),
+ SH_PFC_PIN_GROUP(scif1_data_a),
+ SH_PFC_PIN_GROUP(scif1_clk),
+ SH_PFC_PIN_GROUP(scif1_ctrl),
+ SH_PFC_PIN_GROUP(scif1_data_b),
+ SH_PFC_PIN_GROUP(scif2_data_a),
+ SH_PFC_PIN_GROUP(scif2_clk),
+ SH_PFC_PIN_GROUP(scif2_data_b),
+ SH_PFC_PIN_GROUP(scif3_data_a),
+ SH_PFC_PIN_GROUP(scif3_clk),
+ SH_PFC_PIN_GROUP(scif3_ctrl),
+ SH_PFC_PIN_GROUP(scif3_data_b),
+ SH_PFC_PIN_GROUP(scif4_data_a),
+ SH_PFC_PIN_GROUP(scif4_clk_a),
+ SH_PFC_PIN_GROUP(scif4_ctrl_a),
+ SH_PFC_PIN_GROUP(scif4_data_b),
+ SH_PFC_PIN_GROUP(scif4_clk_b),
+ SH_PFC_PIN_GROUP(scif4_ctrl_b),
+ SH_PFC_PIN_GROUP(scif4_data_c),
+ SH_PFC_PIN_GROUP(scif4_clk_c),
+ SH_PFC_PIN_GROUP(scif4_ctrl_c),
+ SH_PFC_PIN_GROUP(scif5_data_a),
+ SH_PFC_PIN_GROUP(scif5_clk_a),
+ SH_PFC_PIN_GROUP(scif5_data_b),
+ SH_PFC_PIN_GROUP(scif5_clk_b),
+ SH_PFC_PIN_GROUP(scif_clk_a),
+ SH_PFC_PIN_GROUP(scif_clk_b),
+ SH_PFC_PIN_GROUP(sdhi0_data1),
+ SH_PFC_PIN_GROUP(sdhi0_data4),
+ SH_PFC_PIN_GROUP(sdhi0_ctrl),
+ SH_PFC_PIN_GROUP(sdhi0_cd),
+ SH_PFC_PIN_GROUP(sdhi0_wp),
+ SH_PFC_PIN_GROUP(sdhi1_data1),
+ SH_PFC_PIN_GROUP(sdhi1_data4),
+ SH_PFC_PIN_GROUP(sdhi1_ctrl),
+ SH_PFC_PIN_GROUP(sdhi1_cd),
+ SH_PFC_PIN_GROUP(sdhi1_wp),
+ SH_PFC_PIN_GROUP(sdhi2_data1),
+ SH_PFC_PIN_GROUP(sdhi2_data4),
+ SH_PFC_PIN_GROUP(sdhi2_data8),
+ SH_PFC_PIN_GROUP(sdhi2_ctrl),
+ SH_PFC_PIN_GROUP(sdhi2_cd_a),
+ SH_PFC_PIN_GROUP(sdhi2_wp_a),
+ SH_PFC_PIN_GROUP(sdhi2_cd_b),
+ SH_PFC_PIN_GROUP(sdhi2_wp_b),
+ SH_PFC_PIN_GROUP(sdhi2_ds),
+ SH_PFC_PIN_GROUP(sdhi3_data1),
+ SH_PFC_PIN_GROUP(sdhi3_data4),
+ SH_PFC_PIN_GROUP(sdhi3_data8),
+ SH_PFC_PIN_GROUP(sdhi3_ctrl),
+ SH_PFC_PIN_GROUP(sdhi3_cd),
+ SH_PFC_PIN_GROUP(sdhi3_wp),
+ SH_PFC_PIN_GROUP(sdhi3_ds),
+ SH_PFC_PIN_GROUP(ssi0_data),
+ SH_PFC_PIN_GROUP(ssi01239_ctrl),
+ SH_PFC_PIN_GROUP(ssi1_data_a),
+ SH_PFC_PIN_GROUP(ssi1_data_b),
+ SH_PFC_PIN_GROUP(ssi1_ctrl_a),
+ SH_PFC_PIN_GROUP(ssi1_ctrl_b),
+ SH_PFC_PIN_GROUP(ssi2_data_a),
+ SH_PFC_PIN_GROUP(ssi2_data_b),
+ SH_PFC_PIN_GROUP(ssi2_ctrl_a),
+ SH_PFC_PIN_GROUP(ssi2_ctrl_b),
+ SH_PFC_PIN_GROUP(ssi3_data),
+ SH_PFC_PIN_GROUP(ssi349_ctrl),
+ SH_PFC_PIN_GROUP(ssi4_data),
+ SH_PFC_PIN_GROUP(ssi4_ctrl),
+ SH_PFC_PIN_GROUP(ssi5_data),
+ SH_PFC_PIN_GROUP(ssi5_ctrl),
+ SH_PFC_PIN_GROUP(ssi6_data),
+ SH_PFC_PIN_GROUP(ssi6_ctrl),
+ SH_PFC_PIN_GROUP(ssi7_data),
+ SH_PFC_PIN_GROUP(ssi78_ctrl),
+ SH_PFC_PIN_GROUP(ssi8_data),
+ SH_PFC_PIN_GROUP(ssi9_data_a),
+ SH_PFC_PIN_GROUP(ssi9_data_b),
+ SH_PFC_PIN_GROUP(ssi9_ctrl_a),
+ SH_PFC_PIN_GROUP(ssi9_ctrl_b),
+ SH_PFC_PIN_GROUP(tmu_tclk1_a),
+ SH_PFC_PIN_GROUP(tmu_tclk1_b),
+ SH_PFC_PIN_GROUP(tmu_tclk2_a),
+ SH_PFC_PIN_GROUP(tmu_tclk2_b),
+ SH_PFC_PIN_GROUP(tpu_to0),
+ SH_PFC_PIN_GROUP(tpu_to1),
+ SH_PFC_PIN_GROUP(tpu_to2),
+ SH_PFC_PIN_GROUP(tpu_to3),
+ SH_PFC_PIN_GROUP(usb0),
+ SH_PFC_PIN_GROUP(usb1),
+ SH_PFC_PIN_GROUP(usb30),
+ VIN_DATA_PIN_GROUP(vin4_data, 8, _a),
+ VIN_DATA_PIN_GROUP(vin4_data, 10, _a),
+ VIN_DATA_PIN_GROUP(vin4_data, 12, _a),
+ VIN_DATA_PIN_GROUP(vin4_data, 16, _a),
+ SH_PFC_PIN_GROUP(vin4_data18_a),
+ VIN_DATA_PIN_GROUP(vin4_data, 20, _a),
+ VIN_DATA_PIN_GROUP(vin4_data, 24, _a),
+ VIN_DATA_PIN_GROUP(vin4_data, 8, _b),
+ VIN_DATA_PIN_GROUP(vin4_data, 10, _b),
+ VIN_DATA_PIN_GROUP(vin4_data, 12, _b),
+ VIN_DATA_PIN_GROUP(vin4_data, 16, _b),
+ SH_PFC_PIN_GROUP(vin4_data18_b),
+ VIN_DATA_PIN_GROUP(vin4_data, 20, _b),
+ VIN_DATA_PIN_GROUP(vin4_data, 24, _b),
+ SH_PFC_PIN_GROUP(vin4_sync),
+ SH_PFC_PIN_GROUP(vin4_field),
+ SH_PFC_PIN_GROUP(vin4_clkenb),
+ SH_PFC_PIN_GROUP(vin4_clk),
+ VIN_DATA_PIN_GROUP(vin5_data, 8),
+ VIN_DATA_PIN_GROUP(vin5_data, 10),
+ VIN_DATA_PIN_GROUP(vin5_data, 12),
+ VIN_DATA_PIN_GROUP(vin5_data, 16),
+ SH_PFC_PIN_GROUP(vin5_sync),
+ SH_PFC_PIN_GROUP(vin5_field),
+ SH_PFC_PIN_GROUP(vin5_clkenb),
+ SH_PFC_PIN_GROUP(vin5_clk),
+ },
+ .automotive = {
+ SH_PFC_PIN_GROUP(drif0_ctrl_a),
+ SH_PFC_PIN_GROUP(drif0_data0_a),
+ SH_PFC_PIN_GROUP(drif0_data1_a),
+ SH_PFC_PIN_GROUP(drif0_ctrl_b),
+ SH_PFC_PIN_GROUP(drif0_data0_b),
+ SH_PFC_PIN_GROUP(drif0_data1_b),
+ SH_PFC_PIN_GROUP(drif0_ctrl_c),
+ SH_PFC_PIN_GROUP(drif0_data0_c),
+ SH_PFC_PIN_GROUP(drif0_data1_c),
+ SH_PFC_PIN_GROUP(drif1_ctrl_a),
+ SH_PFC_PIN_GROUP(drif1_data0_a),
+ SH_PFC_PIN_GROUP(drif1_data1_a),
+ SH_PFC_PIN_GROUP(drif1_ctrl_b),
+ SH_PFC_PIN_GROUP(drif1_data0_b),
+ SH_PFC_PIN_GROUP(drif1_data1_b),
+ SH_PFC_PIN_GROUP(drif1_ctrl_c),
+ SH_PFC_PIN_GROUP(drif1_data0_c),
+ SH_PFC_PIN_GROUP(drif1_data1_c),
+ SH_PFC_PIN_GROUP(drif2_ctrl_a),
+ SH_PFC_PIN_GROUP(drif2_data0_a),
+ SH_PFC_PIN_GROUP(drif2_data1_a),
+ SH_PFC_PIN_GROUP(drif2_ctrl_b),
+ SH_PFC_PIN_GROUP(drif2_data0_b),
+ SH_PFC_PIN_GROUP(drif2_data1_b),
+ SH_PFC_PIN_GROUP(drif3_ctrl_a),
+ SH_PFC_PIN_GROUP(drif3_data0_a),
+ SH_PFC_PIN_GROUP(drif3_data1_a),
+ SH_PFC_PIN_GROUP(drif3_ctrl_b),
+ SH_PFC_PIN_GROUP(drif3_data0_b),
+ SH_PFC_PIN_GROUP(drif3_data1_b),
+ }
};
static const char * const audio_clk_groups[] = {
@@ -5241,62 +5248,69 @@ static const char * const vin5_groups[] = {
"vin5_clk",
};
-static const struct sh_pfc_function pinmux_functions[] = {
- SH_PFC_FUNCTION(audio_clk),
- SH_PFC_FUNCTION(avb),
- SH_PFC_FUNCTION(can0),
- SH_PFC_FUNCTION(can1),
- SH_PFC_FUNCTION(can_clk),
- SH_PFC_FUNCTION(canfd0),
- SH_PFC_FUNCTION(canfd1),
- SH_PFC_FUNCTION(drif0),
- SH_PFC_FUNCTION(drif1),
- SH_PFC_FUNCTION(drif2),
- SH_PFC_FUNCTION(drif3),
- SH_PFC_FUNCTION(du),
- SH_PFC_FUNCTION(hscif0),
- SH_PFC_FUNCTION(hscif1),
- SH_PFC_FUNCTION(hscif2),
- SH_PFC_FUNCTION(hscif3),
- SH_PFC_FUNCTION(hscif4),
- SH_PFC_FUNCTION(i2c0),
- SH_PFC_FUNCTION(i2c1),
- SH_PFC_FUNCTION(i2c2),
- SH_PFC_FUNCTION(i2c3),
- SH_PFC_FUNCTION(i2c5),
- SH_PFC_FUNCTION(i2c6),
- SH_PFC_FUNCTION(intc_ex),
- SH_PFC_FUNCTION(msiof0),
- SH_PFC_FUNCTION(msiof1),
- SH_PFC_FUNCTION(msiof2),
- SH_PFC_FUNCTION(msiof3),
- SH_PFC_FUNCTION(pwm0),
- SH_PFC_FUNCTION(pwm1),
- SH_PFC_FUNCTION(pwm2),
- SH_PFC_FUNCTION(pwm3),
- SH_PFC_FUNCTION(pwm4),
- SH_PFC_FUNCTION(pwm5),
- SH_PFC_FUNCTION(pwm6),
- SH_PFC_FUNCTION(sata0),
- SH_PFC_FUNCTION(scif0),
- SH_PFC_FUNCTION(scif1),
- SH_PFC_FUNCTION(scif2),
- SH_PFC_FUNCTION(scif3),
- SH_PFC_FUNCTION(scif4),
- SH_PFC_FUNCTION(scif5),
- SH_PFC_FUNCTION(scif_clk),
- SH_PFC_FUNCTION(sdhi0),
- SH_PFC_FUNCTION(sdhi1),
- SH_PFC_FUNCTION(sdhi2),
- SH_PFC_FUNCTION(sdhi3),
- SH_PFC_FUNCTION(ssi),
- SH_PFC_FUNCTION(tmu),
- SH_PFC_FUNCTION(tpu),
- SH_PFC_FUNCTION(usb0),
- SH_PFC_FUNCTION(usb1),
- SH_PFC_FUNCTION(usb30),
- SH_PFC_FUNCTION(vin4),
- SH_PFC_FUNCTION(vin5),
+static const struct {
+ struct sh_pfc_function common[51];
+ struct sh_pfc_function automotive[4];
+} pinmux_functions = {
+ .common = {
+ SH_PFC_FUNCTION(audio_clk),
+ SH_PFC_FUNCTION(avb),
+ SH_PFC_FUNCTION(can0),
+ SH_PFC_FUNCTION(can1),
+ SH_PFC_FUNCTION(can_clk),
+ SH_PFC_FUNCTION(canfd0),
+ SH_PFC_FUNCTION(canfd1),
+ SH_PFC_FUNCTION(du),
+ SH_PFC_FUNCTION(hscif0),
+ SH_PFC_FUNCTION(hscif1),
+ SH_PFC_FUNCTION(hscif2),
+ SH_PFC_FUNCTION(hscif3),
+ SH_PFC_FUNCTION(hscif4),
+ SH_PFC_FUNCTION(i2c0),
+ SH_PFC_FUNCTION(i2c1),
+ SH_PFC_FUNCTION(i2c2),
+ SH_PFC_FUNCTION(i2c3),
+ SH_PFC_FUNCTION(i2c5),
+ SH_PFC_FUNCTION(i2c6),
+ SH_PFC_FUNCTION(intc_ex),
+ SH_PFC_FUNCTION(msiof0),
+ SH_PFC_FUNCTION(msiof1),
+ SH_PFC_FUNCTION(msiof2),
+ SH_PFC_FUNCTION(msiof3),
+ SH_PFC_FUNCTION(pwm0),
+ SH_PFC_FUNCTION(pwm1),
+ SH_PFC_FUNCTION(pwm2),
+ SH_PFC_FUNCTION(pwm3),
+ SH_PFC_FUNCTION(pwm4),
+ SH_PFC_FUNCTION(pwm5),
+ SH_PFC_FUNCTION(pwm6),
+ SH_PFC_FUNCTION(sata0),
+ SH_PFC_FUNCTION(scif0),
+ SH_PFC_FUNCTION(scif1),
+ SH_PFC_FUNCTION(scif2),
+ SH_PFC_FUNCTION(scif3),
+ SH_PFC_FUNCTION(scif4),
+ SH_PFC_FUNCTION(scif5),
+ SH_PFC_FUNCTION(scif_clk),
+ SH_PFC_FUNCTION(sdhi0),
+ SH_PFC_FUNCTION(sdhi1),
+ SH_PFC_FUNCTION(sdhi2),
+ SH_PFC_FUNCTION(sdhi3),
+ SH_PFC_FUNCTION(ssi),
+ SH_PFC_FUNCTION(tmu),
+ SH_PFC_FUNCTION(tpu),
+ SH_PFC_FUNCTION(usb0),
+ SH_PFC_FUNCTION(usb1),
+ SH_PFC_FUNCTION(usb30),
+ SH_PFC_FUNCTION(vin4),
+ SH_PFC_FUNCTION(vin5),
+ },
+ .automotive = {
+ SH_PFC_FUNCTION(drif0),
+ SH_PFC_FUNCTION(drif1),
+ SH_PFC_FUNCTION(drif2),
+ SH_PFC_FUNCTION(drif3),
+ }
};
static const struct pinmux_cfg_reg pinmux_config_regs[] = {
@@ -6425,6 +6439,32 @@ static const struct sh_pfc_soc_operations r8a77965_pinmux_ops = {
.set_bias = r8a77965_pinmux_set_bias,
};
+#ifdef CONFIG_PINCTRL_PFC_R8A774B1
+const struct sh_pfc_soc_info r8a774b1_pinmux_info = {
+ .name = "r8a774b1_pfc",
+ .ops = &r8a77965_pinmux_ops,
+ .unlock_reg = 0xe6060000, /* PMMR */
+
+ .function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
+
+ .pins = pinmux_pins,
+ .nr_pins = ARRAY_SIZE(pinmux_pins),
+ .groups = pinmux_groups.common,
+ .nr_groups = ARRAY_SIZE(pinmux_groups.common),
+ .functions = pinmux_functions.common,
+ .nr_functions = ARRAY_SIZE(pinmux_functions.common),
+
+ .cfg_regs = pinmux_config_regs,
+ .drive_regs = pinmux_drive_regs,
+ .bias_regs = pinmux_bias_regs,
+ .ioctrl_regs = pinmux_ioctrl_regs,
+
+ .pinmux_data = pinmux_data,
+ .pinmux_data_size = ARRAY_SIZE(pinmux_data),
+};
+#endif
+
+#ifdef CONFIG_PINCTRL_PFC_R8A77965
const struct sh_pfc_soc_info r8a77965_pinmux_info = {
.name = "r8a77965_pfc",
.ops = &r8a77965_pinmux_ops,
@@ -6434,10 +6474,12 @@ const struct sh_pfc_soc_info r8a77965_pinmux_info = {
.pins = pinmux_pins,
.nr_pins = ARRAY_SIZE(pinmux_pins),
- .groups = pinmux_groups,
- .nr_groups = ARRAY_SIZE(pinmux_groups),
- .functions = pinmux_functions,
- .nr_functions = ARRAY_SIZE(pinmux_functions),
+ .groups = pinmux_groups.common,
+ .nr_groups = ARRAY_SIZE(pinmux_groups.common) +
+ ARRAY_SIZE(pinmux_groups.automotive),
+ .functions = pinmux_functions.common,
+ .nr_functions = ARRAY_SIZE(pinmux_functions.common) +
+ ARRAY_SIZE(pinmux_functions.automotive),
.cfg_regs = pinmux_config_regs,
.drive_regs = pinmux_drive_regs,
@@ -6447,3 +6489,4 @@ const struct sh_pfc_soc_info r8a77965_pinmux_info = {
.pinmux_data = pinmux_data,
.pinmux_data_size = ARRAY_SIZE(pinmux_data),
};
+#endif
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a77990.c b/drivers/pinctrl/sh-pfc/pfc-r8a77990.c
index 2dfb8d9cfda1..c926a59dd21c 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a77990.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a77990.c
@@ -232,8 +232,8 @@
#define IP2_11_8 FM(AVB_MDC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP2_15_12 FM(BS_N) FM(PWM0_A) FM(AVB_MAGIC) FM(VI4_CLK) F_(0, 0) FM(TX3_C) F_(0, 0) FM(VI5_CLK_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP2_19_16 FM(RD_N) FM(PWM1_A) FM(AVB_LINK) FM(VI4_FIELD) F_(0, 0) FM(RX3_C) FM(FSCLKST2_N_A) FM(VI5_DATA0_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP2_23_20 FM(RD_WR_N) FM(SCL7_A) FM(AVB_AVTP_MATCH_A) FM(VI4_VSYNC_N) FM(TX5_B) FM(SCK3_C) FM(PWM5_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP2_27_24 FM(EX_WAIT0) FM(SDA7_A) FM(AVB_AVTP_CAPTURE_A) FM(VI4_HSYNC_N) FM(RX5_B) FM(PWM6_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2_23_20 FM(RD_WR_N) FM(SCL7_A) FM(AVB_AVTP_MATCH) FM(VI4_VSYNC_N) FM(TX5_B) FM(SCK3_C) FM(PWM5_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP2_27_24 FM(EX_WAIT0) FM(SDA7_A) FM(AVB_AVTP_CAPTURE) FM(VI4_HSYNC_N) FM(RX5_B) FM(PWM6_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP2_31_28 FM(A0) FM(IRQ0) FM(PWM2_A) FM(MSIOF3_SS1_B) FM(VI5_CLK_A) FM(DU_CDE) FM(HRX3_D) FM(IERX) FM(QSTB_QHE) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP3_3_0 FM(A1) FM(IRQ1) FM(PWM3_A) FM(DU_DOTCLKIN1) FM(VI5_DATA0_A) FM(DU_DISP_CDE) FM(SDA6_B) FM(IETX) FM(QCPV_QDE) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP3_7_4 FM(A2) FM(IRQ2) FM(AVB_AVTP_PPS) FM(VI4_CLKENB) FM(VI5_DATA1_A) FM(DU_DISP) FM(SCL6_B) F_(0, 0) FM(QSTVB_QVE) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
@@ -448,6 +448,8 @@ FM(IP12_31_28) IP12_31_28 FM(IP13_31_28) IP13_31_28 FM(IP14_31_28) IP14_31_28 FM
#define MOD_SEL0_1_0 REV4(FM(SEL_SPEED_PULSE_IF_0), FM(SEL_SPEED_PULSE_IF_1), FM(SEL_SPEED_PULSE_IF_2), F_(0, 0))
/* MOD_SEL1 */ /* 0 */ /* 1 */ /* 2 */ /* 3 */ /* 4 */ /* 5 */ /* 6 */ /* 7 */
+#define MOD_SEL1_31 FM(SEL_SIMCARD_0) FM(SEL_SIMCARD_1)
+#define MOD_SEL1_30 FM(SEL_SSI2_0) FM(SEL_SSI2_1)
#define MOD_SEL1_29 FM(SEL_TIMER_TMU_0) FM(SEL_TIMER_TMU_1)
#define MOD_SEL1_28 FM(SEL_USB_20_CH0_0) FM(SEL_USB_20_CH0_1)
#define MOD_SEL1_26 FM(SEL_DRIF2_0) FM(SEL_DRIF2_1)
@@ -468,7 +470,8 @@ FM(IP12_31_28) IP12_31_28 FM(IP13_31_28) IP13_31_28 FM(IP14_31_28) IP14_31_28 FM
#define PINMUX_MOD_SELS \
\
-MOD_SEL0_30_29 \
+ MOD_SEL1_31 \
+MOD_SEL0_30_29 MOD_SEL1_30 \
MOD_SEL1_29 \
MOD_SEL0_28 MOD_SEL1_28 \
MOD_SEL0_27_26 \
@@ -634,7 +637,7 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_GPSR(IP2_23_20, RD_WR_N),
PINMUX_IPSR_MSEL(IP2_23_20, SCL7_A, SEL_I2C7_0),
- PINMUX_IPSR_GPSR(IP2_23_20, AVB_AVTP_MATCH_A),
+ PINMUX_IPSR_GPSR(IP2_23_20, AVB_AVTP_MATCH),
PINMUX_IPSR_GPSR(IP2_23_20, VI4_VSYNC_N),
PINMUX_IPSR_GPSR(IP2_23_20, TX5_B),
PINMUX_IPSR_MSEL(IP2_23_20, SCK3_C, SEL_SCIF3_2),
@@ -642,7 +645,7 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_GPSR(IP2_27_24, EX_WAIT0),
PINMUX_IPSR_MSEL(IP2_27_24, SDA7_A, SEL_I2C7_0),
- PINMUX_IPSR_GPSR(IP2_27_24, AVB_AVTP_CAPTURE_A),
+ PINMUX_IPSR_GPSR(IP2_27_24, AVB_AVTP_CAPTURE),
PINMUX_IPSR_GPSR(IP2_27_24, VI4_HSYNC_N),
PINMUX_IPSR_MSEL(IP2_27_24, RX5_B, SEL_SCIF5_1),
PINMUX_IPSR_MSEL(IP2_27_24, PWM6_A, SEL_PWM6_0),
@@ -1058,7 +1061,7 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_MSEL(IP10_27_24, RIF0_CLK_B, SEL_DRIF0_1),
PINMUX_IPSR_MSEL(IP10_27_24, SCL2_B, SEL_I2C2_1),
PINMUX_IPSR_MSEL(IP10_27_24, TCLK1_A, SEL_TIMER_TMU_0),
- PINMUX_IPSR_GPSR(IP10_27_24, SSI_SCK2_B),
+ PINMUX_IPSR_MSEL(IP10_27_24, SSI_SCK2_B, SEL_SSI2_1),
PINMUX_IPSR_GPSR(IP10_27_24, TS_SCK0),
PINMUX_IPSR_GPSR(IP10_31_28, SD0_WP),
@@ -1067,7 +1070,7 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_MSEL(IP10_31_28, RIF0_D0_B, SEL_DRIF0_1),
PINMUX_IPSR_MSEL(IP10_31_28, SDA2_B, SEL_I2C2_1),
PINMUX_IPSR_MSEL(IP10_31_28, TCLK2_A, SEL_TIMER_TMU_0),
- PINMUX_IPSR_GPSR(IP10_31_28, SSI_WS2_B),
+ PINMUX_IPSR_MSEL(IP10_31_28, SSI_WS2_B, SEL_SSI2_1),
PINMUX_IPSR_GPSR(IP10_31_28, TS_SDAT0),
/* IPSR11 */
@@ -1085,13 +1088,13 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_MSEL(IP11_11_8, RX0_A, SEL_SCIF0_0),
PINMUX_IPSR_MSEL(IP11_11_8, HRX1_A, SEL_HSCIF1_0),
- PINMUX_IPSR_GPSR(IP11_11_8, SSI_SCK2_A),
+ PINMUX_IPSR_MSEL(IP11_11_8, SSI_SCK2_A, SEL_SSI2_0),
PINMUX_IPSR_GPSR(IP11_11_8, RIF1_SYNC),
PINMUX_IPSR_GPSR(IP11_11_8, TS_SCK1),
PINMUX_IPSR_MSEL(IP11_15_12, TX0_A, SEL_SCIF0_0),
PINMUX_IPSR_GPSR(IP11_15_12, HTX1_A),
- PINMUX_IPSR_GPSR(IP11_15_12, SSI_WS2_A),
+ PINMUX_IPSR_MSEL(IP11_15_12, SSI_WS2_A, SEL_SSI2_0),
PINMUX_IPSR_GPSR(IP11_15_12, RIF1_D0),
PINMUX_IPSR_GPSR(IP11_15_12, TS_SDAT1),
@@ -1196,7 +1199,7 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_MSEL(IP13_19_16, RIF0_D1_A, SEL_DRIF0_0),
PINMUX_IPSR_MSEL(IP13_19_16, SDA1_B, SEL_I2C1_1),
PINMUX_IPSR_MSEL(IP13_19_16, TCLK2_B, SEL_TIMER_TMU_1),
- PINMUX_IPSR_GPSR(IP13_19_16, SIM0_D_A),
+ PINMUX_IPSR_MSEL(IP13_19_16, SIM0_D_A, SEL_SIMCARD_0),
PINMUX_IPSR_GPSR(IP13_23_20, MLB_DAT),
PINMUX_IPSR_MSEL(IP13_23_20, TX0_B, SEL_SCIF0_1),
@@ -1264,7 +1267,7 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_GPSR(IP15_15_12, TPU0TO2),
PINMUX_IPSR_MSEL(IP15_15_12, SDA1_D, SEL_I2C1_3),
PINMUX_IPSR_MSEL(IP15_15_12, FSO_CFE_1_N_B, SEL_FSO_1),
- PINMUX_IPSR_GPSR(IP15_15_12, SIM0_D_B),
+ PINMUX_IPSR_MSEL(IP15_15_12, SIM0_D_B, SEL_SIMCARD_1),
PINMUX_IPSR_GPSR(IP15_19_16, SSI_SDATA6),
PINMUX_IPSR_MSEL(IP15_19_16, HRTS2_N_A, SEL_HSCIF2_0),
@@ -1524,22 +1527,22 @@ static const unsigned int avb_avtp_pps_mux[] = {
AVB_AVTP_PPS_MARK,
};
-static const unsigned int avb_avtp_match_a_pins[] = {
- /* AVB_AVTP_MATCH_A */
+static const unsigned int avb_avtp_match_pins[] = {
+ /* AVB_AVTP_MATCH */
RCAR_GP_PIN(2, 24),
};
-static const unsigned int avb_avtp_match_a_mux[] = {
- AVB_AVTP_MATCH_A_MARK,
+static const unsigned int avb_avtp_match_mux[] = {
+ AVB_AVTP_MATCH_MARK,
};
-static const unsigned int avb_avtp_capture_a_pins[] = {
- /* AVB_AVTP_CAPTURE_A */
+static const unsigned int avb_avtp_capture_pins[] = {
+ /* AVB_AVTP_CAPTURE */
RCAR_GP_PIN(2, 25),
};
-static const unsigned int avb_avtp_capture_a_mux[] = {
- AVB_AVTP_CAPTURE_A_MARK,
+static const unsigned int avb_avtp_capture_mux[] = {
+ AVB_AVTP_CAPTURE_MARK,
};
/* - CAN ------------------------------------------------------------------ */
@@ -3784,8 +3787,8 @@ static const struct {
SH_PFC_PIN_GROUP(avb_phy_int),
SH_PFC_PIN_GROUP(avb_mii),
SH_PFC_PIN_GROUP(avb_avtp_pps),
- SH_PFC_PIN_GROUP(avb_avtp_match_a),
- SH_PFC_PIN_GROUP(avb_avtp_capture_a),
+ SH_PFC_PIN_GROUP(avb_avtp_match),
+ SH_PFC_PIN_GROUP(avb_avtp_capture),
SH_PFC_PIN_GROUP(can0_data),
SH_PFC_PIN_GROUP(can1_data),
SH_PFC_PIN_GROUP(can_clk),
@@ -4061,8 +4064,8 @@ static const char * const avb_groups[] = {
"avb_phy_int",
"avb_mii",
"avb_avtp_pps",
- "avb_avtp_match_a",
- "avb_avtp_capture_a",
+ "avb_avtp_match",
+ "avb_avtp_capture",
};
static const char * const can0_groups[] = {
@@ -4957,11 +4960,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
MOD_SEL0_1_0 ))
},
{ PINMUX_CFG_REG_VAR("MOD_SEL1", 0xe6060504, 32,
- GROUP(2, 1, 1, 1, 1, 1, 3, 3, 1, 1, 1, 1,
- 2, 2, 2, 1, 1, 2, 1, 4),
+ GROUP(1, 1, 1, 1, 1, 1, 1, 3, 3, 1, 1, 1,
+ 1, 2, 2, 2, 1, 1, 2, 1, 4),
GROUP(
- /* RESERVED 31, 30 */
- 0, 0, 0, 0,
+ MOD_SEL1_31
+ MOD_SEL1_30
MOD_SEL1_29
MOD_SEL1_28
/* RESERVED 27 */
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7734.c b/drivers/pinctrl/sh-pfc/pfc-sh7734.c
index 5dfd991ffdaa..dbc36079c381 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh7734.c
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7734.c
@@ -1450,7 +1450,7 @@ static const struct pinmux_func pinmux_func_gpios[] = {
GPIO_FN(ET0_ETXD2_A),
GPIO_FN(EX_CS5), GPIO_FN(SD1_CMD_A), GPIO_FN(ATADIR), GPIO_FN(QSSL_B),
GPIO_FN(ET0_ETXD3_A),
- GPIO_FN(RD_WR), GPIO_FN(TCLK1_B),
+ GPIO_FN(RD_WR), GPIO_FN(TCLK0), GPIO_FN(CAN_CLK_B), GPIO_FN(ET0_ETXD4),
GPIO_FN(EX_WAIT0), GPIO_FN(TCLK1_B),
GPIO_FN(EX_WAIT1), GPIO_FN(SD1_DAT0_A), GPIO_FN(DREQ2),
GPIO_FN(CAN1_TX_C), GPIO_FN(ET0_LINK_C), GPIO_FN(ET0_ETXD5_A),
@@ -1949,7 +1949,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
/* IP3_20 [1] */
FN_EX_WAIT0, FN_TCLK1_B,
/* IP3_19_18 [2] */
- FN_RD_WR, FN_TCLK1_B, 0, 0,
+ FN_RD_WR, FN_TCLK0, FN_CAN_CLK_B, FN_ET0_ETXD4,
/* IP3_17_15 [3] */
FN_EX_CS5, FN_SD1_CMD_A, FN_ATADIR, FN_QSSL_B,
FN_ET0_ETXD3_A, 0, 0, 0,
diff --git a/drivers/pinctrl/sh-pfc/sh_pfc.h b/drivers/pinctrl/sh-pfc/sh_pfc.h
index 835148fc0f28..640d2a4cb838 100644
--- a/drivers/pinctrl/sh-pfc/sh_pfc.h
+++ b/drivers/pinctrl/sh-pfc/sh_pfc.h
@@ -309,6 +309,7 @@ extern const struct sh_pfc_soc_info r8a7744_pinmux_info;
extern const struct sh_pfc_soc_info r8a7745_pinmux_info;
extern const struct sh_pfc_soc_info r8a77470_pinmux_info;
extern const struct sh_pfc_soc_info r8a774a1_pinmux_info;
+extern const struct sh_pfc_soc_info r8a774b1_pinmux_info;
extern const struct sh_pfc_soc_info r8a774c0_pinmux_info;
extern const struct sh_pfc_soc_info r8a7778_pinmux_info;
extern const struct sh_pfc_soc_info r8a7779_pinmux_info;
@@ -319,7 +320,8 @@ extern const struct sh_pfc_soc_info r8a7793_pinmux_info;
extern const struct sh_pfc_soc_info r8a7794_pinmux_info;
extern const struct sh_pfc_soc_info r8a7795_pinmux_info;
extern const struct sh_pfc_soc_info r8a7795es1_pinmux_info;
-extern const struct sh_pfc_soc_info r8a7796_pinmux_info;
+extern const struct sh_pfc_soc_info r8a77960_pinmux_info;
+extern const struct sh_pfc_soc_info r8a77961_pinmux_info;
extern const struct sh_pfc_soc_info r8a77965_pinmux_info;
extern const struct sh_pfc_soc_info r8a77970_pinmux_info;
extern const struct sh_pfc_soc_info r8a77980_pinmux_info;
@@ -422,12 +424,12 @@ extern const struct sh_pfc_soc_info shx3_pinmux_info;
/*
* Describe a pinmux configuration in which a pin is physically multiplexed
* with other pins.
- * - ipsr: IPSR field (unused, for documentation purposes only)
+ * - ipsr: IPSR field
* - fn: Function name
* - psel: Physical multiplexing selector
*/
#define PINMUX_IPSR_PHYS(ipsr, fn, psel) \
- PINMUX_DATA(fn##_MARK, FN_##psel)
+ PINMUX_DATA(fn##_MARK, FN_##psel, FN_##ipsr)
/*
* Describe a pinmux configuration for a single-function pin with GPIO
diff --git a/drivers/pinctrl/sirf/pinctrl-atlas7.c b/drivers/pinctrl/sirf/pinctrl-atlas7.c
index 924080362bf7..b1a9611f46b3 100644
--- a/drivers/pinctrl/sirf/pinctrl-atlas7.c
+++ b/drivers/pinctrl/sirf/pinctrl-atlas7.c
@@ -5996,6 +5996,7 @@ static int atlas7_gpio_probe(struct platform_device *pdev)
struct gpio_chip *chip;
u32 nbank;
int ret, idx;
+ struct gpio_irq_chip *girq;
ret = of_property_read_u32(np, "gpio-banks", &nbank);
if (ret) {
@@ -6048,24 +6049,15 @@ static int atlas7_gpio_probe(struct platform_device *pdev)
chip->of_gpio_n_cells = 2;
chip->parent = &pdev->dev;
- /* Add gpio chip to system */
- ret = gpiochip_add_data(chip, a7gc);
- if (ret) {
- dev_err(&pdev->dev,
- "%pOF: error in probe function with status %d\n",
- np, ret);
- goto failed;
- }
-
- /* Add gpio chip to irq subsystem */
- ret = gpiochip_irqchip_add(chip, &atlas7_gpio_irq_chip,
- 0, handle_level_irq, IRQ_TYPE_NONE);
- if (ret) {
- dev_err(&pdev->dev,
- "could not connect irqchip to gpiochip\n");
- goto failed;
- }
-
+ girq = &chip->irq;
+ girq->chip = &atlas7_gpio_irq_chip;
+ girq->parent_handler = atlas7_gpio_handle_irq;
+ girq->num_parents = nbank;
+ girq->parents = devm_kcalloc(&pdev->dev, nbank,
+ sizeof(*girq->parents),
+ GFP_KERNEL);
+ if (!girq->parents)
+ return -ENOMEM;
for (idx = 0; idx < nbank; idx++) {
struct atlas7_gpio_bank *bank;
@@ -6084,9 +6076,18 @@ static int atlas7_gpio_probe(struct platform_device *pdev)
goto failed;
}
bank->irq = ret;
+ girq->parents[idx] = ret;
+ }
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_level_irq;
- gpiochip_set_chained_irqchip(chip, &atlas7_gpio_irq_chip,
- bank->irq, atlas7_gpio_handle_irq);
+ /* Add gpio chip to system */
+ ret = gpiochip_add_data(chip, a7gc);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "%pOF: error in probe function with status %d\n",
+ np, ret);
+ goto failed;
}
platform_set_drvdata(pdev, a7gc);
diff --git a/drivers/pinctrl/sirf/pinctrl-sirf.c b/drivers/pinctrl/sirf/pinctrl-sirf.c
index 780c31bb4009..1ebcb957c654 100644
--- a/drivers/pinctrl/sirf/pinctrl-sirf.c
+++ b/drivers/pinctrl/sirf/pinctrl-sirf.c
@@ -785,6 +785,7 @@ static int sirfsoc_gpio_probe(struct device_node *np)
struct sirfsoc_gpio_bank *bank;
void __iomem *regs;
struct platform_device *pdev;
+ struct gpio_irq_chip *girq;
u32 pullups[SIRFSOC_GPIO_NO_OF_BANKS], pulldowns[SIRFSOC_GPIO_NO_OF_BANKS];
@@ -816,36 +817,33 @@ static int sirfsoc_gpio_probe(struct device_node *np)
sgpio->chip.gc.parent = &pdev->dev;
sgpio->chip.regs = regs;
- err = gpiochip_add_data(&sgpio->chip.gc, sgpio);
- if (err) {
- dev_err(&pdev->dev, "%pOF: error in probe function with status %d\n",
- np, err);
- goto out;
- }
-
- err = gpiochip_irqchip_add(&sgpio->chip.gc,
- &sirfsoc_irq_chip,
- 0, handle_level_irq,
- IRQ_TYPE_NONE);
- if (err) {
- dev_err(&pdev->dev,
- "could not connect irqchip to gpiochip\n");
- goto out_banks;
- }
-
+ girq = &sgpio->chip.gc.irq;
+ girq->chip = &sirfsoc_irq_chip;
+ girq->parent_handler = sirfsoc_gpio_handle_irq;
+ girq->num_parents = SIRFSOC_GPIO_NO_OF_BANKS;
+ girq->parents = devm_kcalloc(&pdev->dev, SIRFSOC_GPIO_NO_OF_BANKS,
+ sizeof(*girq->parents),
+ GFP_KERNEL);
+ if (!girq->parents)
+ return -ENOMEM;
for (i = 0; i < SIRFSOC_GPIO_NO_OF_BANKS; i++) {
bank = &sgpio->sgpio_bank[i];
spin_lock_init(&bank->lock);
bank->parent_irq = platform_get_irq(pdev, i);
if (bank->parent_irq < 0) {
err = bank->parent_irq;
- goto out_banks;
+ goto out;
}
+ girq->parents[i] = bank->parent_irq;
+ }
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_level_irq;
- gpiochip_set_chained_irqchip(&sgpio->chip.gc,
- &sirfsoc_irq_chip,
- bank->parent_irq,
- sirfsoc_gpio_handle_irq);
+ err = gpiochip_add_data(&sgpio->chip.gc, sgpio);
+ if (err) {
+ dev_err(&pdev->dev, "%pOF: error in probe function with status %d\n",
+ np, err);
+ goto out;
}
err = gpiochip_add_pin_range(&sgpio->chip.gc, dev_name(&pdev->dev),
@@ -867,7 +865,6 @@ static int sirfsoc_gpio_probe(struct device_node *np)
return 0;
out_no_range:
-out_banks:
gpiochip_remove(&sgpio->chip.gc);
out:
iounmap(regs);
diff --git a/drivers/pinctrl/spear/pinctrl-plgpio.c b/drivers/pinctrl/spear/pinctrl-plgpio.c
index 9d906474f3e4..1ebbc49b16f1 100644
--- a/drivers/pinctrl/spear/pinctrl-plgpio.c
+++ b/drivers/pinctrl/spear/pinctrl-plgpio.c
@@ -515,15 +515,13 @@ end:
static int plgpio_probe(struct platform_device *pdev)
{
struct plgpio *plgpio;
- struct resource *res;
int ret, irq;
plgpio = devm_kzalloc(&pdev->dev, sizeof(*plgpio), GFP_KERNEL);
if (!plgpio)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- plgpio->base = devm_ioremap_resource(&pdev->dev, res);
+ plgpio->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(plgpio->base))
return PTR_ERR(plgpio->base);
@@ -569,40 +567,35 @@ static int plgpio_probe(struct platform_device *pdev)
}
}
- ret = gpiochip_add_data(&plgpio->chip, plgpio);
- if (ret) {
- dev_err(&pdev->dev, "unable to add gpio chip\n");
- goto unprepare_clk;
- }
-
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_info(&pdev->dev, "PLGPIO registered without IRQs\n");
- return 0;
+ if (irq > 0) {
+ struct gpio_irq_chip *girq;
+
+ girq = &plgpio->chip.irq;
+ girq->chip = &plgpio_irqchip;
+ girq->parent_handler = plgpio_irq_handler;
+ girq->num_parents = 1;
+ girq->parents = devm_kcalloc(&pdev->dev, 1,
+ sizeof(*girq->parents),
+ GFP_KERNEL);
+ if (!girq->parents)
+ return -ENOMEM;
+ girq->parents[0] = irq;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_simple_irq;
+ dev_info(&pdev->dev, "PLGPIO registering with IRQs\n");
+ } else {
+ dev_info(&pdev->dev, "PLGPIO registering without IRQs\n");
}
- ret = gpiochip_irqchip_add(&plgpio->chip,
- &plgpio_irqchip,
- 0,
- handle_simple_irq,
- IRQ_TYPE_NONE);
+ ret = gpiochip_add_data(&plgpio->chip, plgpio);
if (ret) {
- dev_err(&pdev->dev, "failed to add irqchip to gpiochip\n");
- goto remove_gpiochip;
+ dev_err(&pdev->dev, "unable to add gpio chip\n");
+ goto unprepare_clk;
}
- gpiochip_set_chained_irqchip(&plgpio->chip,
- &plgpio_irqchip,
- irq,
- plgpio_irq_handler);
-
- dev_info(&pdev->dev, "PLGPIO registered with IRQs\n");
-
return 0;
-remove_gpiochip:
- dev_info(&pdev->dev, "Remove gpiochip\n");
- gpiochip_remove(&plgpio->chip);
unprepare_clk:
if (!IS_ERR(plgpio->clk))
clk_unprepare(plgpio->clk);
diff --git a/drivers/pinctrl/spear/pinctrl-spear.c b/drivers/pinctrl/spear/pinctrl-spear.c
index 7ec19c73f870..948f56abb9ae 100644
--- a/drivers/pinctrl/spear/pinctrl-spear.c
+++ b/drivers/pinctrl/spear/pinctrl-spear.c
@@ -358,7 +358,6 @@ int spear_pinctrl_probe(struct platform_device *pdev,
struct spear_pinctrl_machdata *machdata)
{
struct device_node *np = pdev->dev.of_node;
- struct resource *res;
struct spear_pmx *pmx;
if (!machdata)
@@ -368,8 +367,7 @@ int spear_pinctrl_probe(struct platform_device *pdev,
if (!pmx)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pmx->vbase = devm_ioremap_resource(&pdev->dev, res);
+ pmx->vbase = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pmx->vbase))
return PTR_ERR(pmx->vbase);
diff --git a/drivers/pinctrl/sprd/pinctrl-sprd.c b/drivers/pinctrl/sprd/pinctrl-sprd.c
index 7b95bf5a82a9..157712ab05a8 100644
--- a/drivers/pinctrl/sprd/pinctrl-sprd.c
+++ b/drivers/pinctrl/sprd/pinctrl-sprd.c
@@ -41,7 +41,8 @@
#define PUBCP_SLEEP_MODE BIT(14)
#define TGLDSP_SLEEP_MODE BIT(15)
#define AGDSP_SLEEP_MODE BIT(16)
-#define SLEEP_MODE_MASK GENMASK(3, 0)
+#define CM4_SLEEP_MODE BIT(17)
+#define SLEEP_MODE_MASK GENMASK(5, 0)
#define SLEEP_MODE_SHIFT 13
#define SLEEP_INPUT BIT(1)
@@ -81,6 +82,7 @@ enum pin_sleep_mode {
PUBCP_SLEEP = BIT(1),
TGLDSP_SLEEP = BIT(2),
AGDSP_SLEEP = BIT(3),
+ CM4_SLEEP = BIT(4),
};
enum pin_func_sel {
@@ -484,6 +486,13 @@ static int sprd_pinconf_get(struct pinctrl_dev *pctldev, unsigned int pin_id,
SLEEP_PULL_UP_MASK) << 16;
arg |= (reg >> PULL_UP_SHIFT) & PULL_UP_MASK;
break;
+ case PIN_CONFIG_BIAS_DISABLE:
+ if ((reg & (SLEEP_PULL_DOWN | SLEEP_PULL_UP)) ||
+ (reg & (PULL_DOWN | PULL_UP_4_7K | PULL_UP_20K)))
+ return -EINVAL;
+
+ arg = 1;
+ break;
case PIN_CONFIG_SLEEP_HARDWARE_STATE:
arg = 0;
break;
@@ -609,6 +618,8 @@ static int sprd_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin_id,
val |= TGLDSP_SLEEP_MODE;
if (arg & AGDSP_SLEEP)
val |= AGDSP_SLEEP_MODE;
+ if (arg & CM4_SLEEP)
+ val |= CM4_SLEEP_MODE;
mask = SLEEP_MODE_MASK;
shift = SLEEP_MODE_SHIFT;
@@ -674,6 +685,16 @@ static int sprd_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin_id,
shift = PULL_UP_SHIFT;
}
break;
+ case PIN_CONFIG_BIAS_DISABLE:
+ if (is_sleep_config == true) {
+ val = shift = 0;
+ mask = SLEEP_PULL_DOWN | SLEEP_PULL_UP;
+ } else {
+ val = shift = 0;
+ mask = PULL_DOWN | PULL_UP_20K |
+ PULL_UP_4_7K;
+ }
+ break;
case PIN_CONFIG_SLEEP_HARDWARE_STATE:
continue;
default:
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
index 0cbca30b75dc..b35c3245ab3f 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
@@ -1385,7 +1385,6 @@ int sunxi_pinctrl_init_with_variant(struct platform_device *pdev,
struct pinctrl_pin_desc *pins;
struct sunxi_pinctrl *pctl;
struct pinmux_ops *pmxops;
- struct resource *res;
int i, ret, last_pin, pin_idx;
struct clk *clk;
@@ -1396,8 +1395,7 @@ int sunxi_pinctrl_init_with_variant(struct platform_device *pdev,
raw_spin_lock_init(&pctl->lock);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pctl->membase = devm_ioremap_resource(&pdev->dev, res);
+ pctl->membase = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pctl->membase))
return PTR_ERR(pctl->membase);
diff --git a/drivers/pinctrl/tegra/pinctrl-tegra-xusb.c b/drivers/pinctrl/tegra/pinctrl-tegra-xusb.c
index 95002e3ecaff..6f7b3767f453 100644
--- a/drivers/pinctrl/tegra/pinctrl-tegra-xusb.c
+++ b/drivers/pinctrl/tegra/pinctrl-tegra-xusb.c
@@ -873,7 +873,6 @@ int tegra_xusb_padctl_legacy_probe(struct platform_device *pdev)
{
struct tegra_xusb_padctl *padctl;
const struct of_device_id *match;
- struct resource *res;
struct phy *phy;
int err;
@@ -885,11 +884,16 @@ int tegra_xusb_padctl_legacy_probe(struct platform_device *pdev)
mutex_init(&padctl->lock);
padctl->dev = &pdev->dev;
+ /*
+ * Note that we can't replace this by of_device_get_match_data()
+ * because we need the separate matching table for this legacy code on
+ * Tegra124. of_device_get_match_data() would attempt to use the table
+ * from the updated driver and fail.
+ */
match = of_match_node(tegra_xusb_padctl_of_match, pdev->dev.of_node);
padctl->soc = match->data;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- padctl->regs = devm_ioremap_resource(&pdev->dev, res);
+ padctl->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(padctl->regs))
return PTR_ERR(padctl->regs);
diff --git a/drivers/pinctrl/tegra/pinctrl-tegra.c b/drivers/pinctrl/tegra/pinctrl-tegra.c
index e9a7cbb9aa33..692d8b3e2a20 100644
--- a/drivers/pinctrl/tegra/pinctrl-tegra.c
+++ b/drivers/pinctrl/tegra/pinctrl-tegra.c
@@ -781,8 +781,7 @@ int tegra_pinctrl_probe(struct platform_device *pdev,
return -ENOMEM;
for (i = 0; i < pmx->nbanks; i++) {
- res = platform_get_resource(pdev, IORESOURCE_MEM, i);
- pmx->regs[i] = devm_ioremap_resource(&pdev->dev, res);
+ pmx->regs[i] = devm_platform_ioremap_resource(pdev, i);
if (IS_ERR(pmx->regs[i]))
return PTR_ERR(pmx->regs[i]);
}
diff --git a/drivers/pinctrl/ti/pinctrl-ti-iodelay.c b/drivers/pinctrl/ti/pinctrl-ti-iodelay.c
index e5e7f1f22813..b522ca010332 100644
--- a/drivers/pinctrl/ti/pinctrl-ti-iodelay.c
+++ b/drivers/pinctrl/ti/pinctrl-ti-iodelay.c
@@ -496,7 +496,7 @@ static int ti_iodelay_dt_node_to_map(struct pinctrl_dev *pctldev,
return -EINVAL;
rows = pinctrl_count_index_with_args(np, name);
- if (rows == -EINVAL)
+ if (rows < 0)
return rows;
*map = devm_kzalloc(iod->dev, sizeof(**map), GFP_KERNEL);
diff --git a/drivers/pinctrl/vt8500/pinctrl-wmt.c b/drivers/pinctrl/vt8500/pinctrl-wmt.c
index 4d5cd7d8c760..ea910a18b4d7 100644
--- a/drivers/pinctrl/vt8500/pinctrl-wmt.c
+++ b/drivers/pinctrl/vt8500/pinctrl-wmt.c
@@ -553,10 +553,8 @@ int wmt_pinctrl_probe(struct platform_device *pdev,
struct wmt_pinctrl_data *data)
{
int err;
- struct resource *res;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- data->base = devm_ioremap_resource(&pdev->dev, res);
+ data->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(data->base))
return PTR_ERR(data->base);
diff --git a/drivers/pinctrl/zte/pinctrl-zx.c b/drivers/pinctrl/zte/pinctrl-zx.c
index 9512045420ec..786bf89487d6 100644
--- a/drivers/pinctrl/zte/pinctrl-zx.c
+++ b/drivers/pinctrl/zte/pinctrl-zx.c
@@ -387,7 +387,6 @@ int zx_pinctrl_init(struct platform_device *pdev,
struct pinctrl_desc *pctldesc;
struct zx_pinctrl *zpctl;
struct device_node *np;
- struct resource *res;
int ret;
zpctl = devm_kzalloc(&pdev->dev, sizeof(*zpctl), GFP_KERNEL);
@@ -396,8 +395,7 @@ int zx_pinctrl_init(struct platform_device *pdev,
spin_lock_init(&zpctl->lock);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- zpctl->base = devm_ioremap_resource(&pdev->dev, res);
+ zpctl->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(zpctl->base))
return PTR_ERR(zpctl->base);
diff --git a/drivers/platform/chrome/Kconfig b/drivers/platform/chrome/Kconfig
index ee5f08ea57b6..5f57282a28da 100644
--- a/drivers/platform/chrome/Kconfig
+++ b/drivers/platform/chrome/Kconfig
@@ -132,9 +132,9 @@ config CROS_EC_LPC
module will be called cros_ec_lpcs.
config CROS_EC_PROTO
- bool
- help
- ChromeOS EC communication protocol helpers.
+ bool
+ help
+ ChromeOS EC communication protocol helpers.
config CROS_KBD_LED_BACKLIGHT
tristate "Backlight LED support for Chrome OS keyboards"
@@ -190,6 +190,19 @@ config CROS_EC_DEBUGFS
To compile this driver as a module, choose M here: the
module will be called cros_ec_debugfs.
+config CROS_EC_SENSORHUB
+ tristate "ChromeOS EC MEMS Sensor Hub"
+ depends on MFD_CROS_EC_DEV
+ default MFD_CROS_EC_DEV
+ help
+ Allow loading IIO sensors. This driver is loaded by MFD and will in
+ turn query the EC and register the sensors.
+ It also spreads the sensor data coming from the EC to the IIO sensor
+ object.
+
+ To compile this driver as a module, choose M here: the
+ module will be called cros_ec_sensorhub.
+
config CROS_EC_SYSFS
tristate "ChromeOS EC control and information through sysfs"
depends on MFD_CROS_EC_DEV && SYSFS
diff --git a/drivers/platform/chrome/Makefile b/drivers/platform/chrome/Makefile
index 477ec3d1d1c9..aacd5920d8a1 100644
--- a/drivers/platform/chrome/Makefile
+++ b/drivers/platform/chrome/Makefile
@@ -19,6 +19,7 @@ obj-$(CONFIG_CROS_EC_CHARDEV) += cros_ec_chardev.o
obj-$(CONFIG_CROS_EC_LIGHTBAR) += cros_ec_lightbar.o
obj-$(CONFIG_CROS_EC_VBC) += cros_ec_vbc.o
obj-$(CONFIG_CROS_EC_DEBUGFS) += cros_ec_debugfs.o
+obj-$(CONFIG_CROS_EC_SENSORHUB) += cros_ec_sensorhub.o
obj-$(CONFIG_CROS_EC_SYSFS) += cros_ec_sysfs.o
obj-$(CONFIG_CROS_USBPD_LOGGER) += cros_usbpd_logger.o
diff --git a/drivers/platform/chrome/cros_ec.c b/drivers/platform/chrome/cros_ec.c
index fd77e6fa74c2..6d6ce86a1408 100644
--- a/drivers/platform/chrome/cros_ec.c
+++ b/drivers/platform/chrome/cros_ec.c
@@ -31,13 +31,32 @@ static struct cros_ec_platform pd_p = {
.cmd_offset = EC_CMD_PASSTHRU_OFFSET(CROS_EC_DEV_PD_INDEX),
};
-static irqreturn_t ec_irq_thread(int irq, void *data)
+static irqreturn_t ec_irq_handler(int irq, void *data)
{
struct cros_ec_device *ec_dev = data;
- bool wake_event = true;
+
+ ec_dev->last_event_time = cros_ec_get_time_ns();
+
+ return IRQ_WAKE_THREAD;
+}
+
+/**
+ * cros_ec_handle_event() - process and forward pending events on EC
+ * @ec_dev: Device with events to process.
+ *
+ * Call this function in a loop when the kernel is notified that the EC has
+ * pending events.
+ *
+ * Return: true if more events are still pending and this function should be
+ * called again.
+ */
+bool cros_ec_handle_event(struct cros_ec_device *ec_dev)
+{
+ bool wake_event;
+ bool ec_has_more_events;
int ret;
- ret = cros_ec_get_next_event(ec_dev, &wake_event);
+ ret = cros_ec_get_next_event(ec_dev, &wake_event, &ec_has_more_events);
/*
* Signal only if wake host events or any interrupt if
@@ -50,6 +69,20 @@ static irqreturn_t ec_irq_thread(int irq, void *data)
if (ret > 0)
blocking_notifier_call_chain(&ec_dev->event_notifier,
0, ec_dev);
+
+ return ec_has_more_events;
+}
+EXPORT_SYMBOL(cros_ec_handle_event);
+
+static irqreturn_t ec_irq_thread(int irq, void *data)
+{
+ struct cros_ec_device *ec_dev = data;
+ bool ec_has_more_events;
+
+ do {
+ ec_has_more_events = cros_ec_handle_event(ec_dev);
+ } while (ec_has_more_events);
+
return IRQ_HANDLED;
}
@@ -104,6 +137,15 @@ static int cros_ec_sleep_event(struct cros_ec_device *ec_dev, u8 sleep_event)
return ret;
}
+/**
+ * cros_ec_register() - Register a new ChromeOS EC, using the provided info.
+ * @ec_dev: Device to register.
+ *
+ * Before calling this, allocate a pointer to a new device and then fill
+ * in all the fields up to the --private-- marker.
+ *
+ * Return: 0 on success or negative error code.
+ */
int cros_ec_register(struct cros_ec_device *ec_dev)
{
struct device *dev = ec_dev->dev;
@@ -131,10 +173,12 @@ int cros_ec_register(struct cros_ec_device *ec_dev)
return err;
}
- if (ec_dev->irq) {
- err = devm_request_threaded_irq(dev, ec_dev->irq, NULL,
- ec_irq_thread, IRQF_TRIGGER_LOW | IRQF_ONESHOT,
- "chromeos-ec", ec_dev);
+ if (ec_dev->irq > 0) {
+ err = devm_request_threaded_irq(dev, ec_dev->irq,
+ ec_irq_handler,
+ ec_irq_thread,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ "chromeos-ec", ec_dev);
if (err) {
dev_err(dev, "Failed to request IRQ %d: %d",
ec_dev->irq, err);
@@ -198,6 +242,14 @@ int cros_ec_register(struct cros_ec_device *ec_dev)
}
EXPORT_SYMBOL(cros_ec_register);
+/**
+ * cros_ec_unregister() - Remove a ChromeOS EC.
+ * @ec_dev: Device to unregister.
+ *
+ * Call this to deregister a ChromeOS EC, then clean up any private data.
+ *
+ * Return: 0 on success or negative error code.
+ */
int cros_ec_unregister(struct cros_ec_device *ec_dev)
{
if (ec_dev->pd)
@@ -209,6 +261,14 @@ int cros_ec_unregister(struct cros_ec_device *ec_dev)
EXPORT_SYMBOL(cros_ec_unregister);
#ifdef CONFIG_PM_SLEEP
+/**
+ * cros_ec_suspend() - Handle a suspend operation for the ChromeOS EC device.
+ * @ec_dev: Device to suspend.
+ *
+ * This can be called by drivers to handle a suspend event.
+ *
+ * Return: 0 on success or negative error code.
+ */
int cros_ec_suspend(struct cros_ec_device *ec_dev)
{
struct device *dev = ec_dev->dev;
@@ -238,11 +298,19 @@ EXPORT_SYMBOL(cros_ec_suspend);
static void cros_ec_report_events_during_suspend(struct cros_ec_device *ec_dev)
{
while (ec_dev->mkbp_event_supported &&
- cros_ec_get_next_event(ec_dev, NULL) > 0)
+ cros_ec_get_next_event(ec_dev, NULL, NULL) > 0)
blocking_notifier_call_chain(&ec_dev->event_notifier,
1, ec_dev);
}
+/**
+ * cros_ec_resume() - Handle a resume operation for the ChromeOS EC device.
+ * @ec_dev: Device to resume.
+ *
+ * This can be called by drivers to handle a resume event.
+ *
+ * Return: 0 on success or negative error code.
+ */
int cros_ec_resume(struct cros_ec_device *ec_dev)
{
int ret;
diff --git a/drivers/platform/chrome/cros_ec_ishtp.c b/drivers/platform/chrome/cros_ec_ishtp.c
index 25ca2c894b4d..e5996821d08b 100644
--- a/drivers/platform/chrome/cros_ec_ishtp.c
+++ b/drivers/platform/chrome/cros_ec_ishtp.c
@@ -136,11 +136,11 @@ static void ish_evt_handler(struct work_struct *work)
struct ishtp_cl_data *client_data =
container_of(work, struct ishtp_cl_data, work_ec_evt);
struct cros_ec_device *ec_dev = client_data->ec_dev;
+ bool ec_has_more_events;
- if (cros_ec_get_next_event(ec_dev, NULL) > 0) {
- blocking_notifier_call_chain(&ec_dev->event_notifier,
- 0, ec_dev);
- }
+ do {
+ ec_has_more_events = cros_ec_handle_event(ec_dev);
+ } while (ec_has_more_events);
}
/**
@@ -200,13 +200,14 @@ static int ish_send(struct ishtp_cl_data *client_data,
* process_recv() - Received and parse incoming packet
* @cros_ish_cl: Client instance to get stats
* @rb_in_proc: Host interface message buffer
+ * @timestamp: Timestamp of when parent callback started
*
* Parse the incoming packet. If it is a response packet then it will
* update per instance flags and wake up the caller waiting to for the
* response. If it is an event packet then it will schedule event work.
*/
static void process_recv(struct ishtp_cl *cros_ish_cl,
- struct ishtp_cl_rb *rb_in_proc)
+ struct ishtp_cl_rb *rb_in_proc, ktime_t timestamp)
{
size_t data_len = rb_in_proc->buf_idx;
struct ishtp_cl_data *client_data =
@@ -295,6 +296,11 @@ error_wake_up:
break;
case CROS_MKBP_EVENT:
+ /*
+ * Set timestamp from beginning of function since we actually
+ * got an incoming MKBP event
+ */
+ client_data->ec_dev->last_event_time = timestamp;
/* The event system doesn't send any data in buffer */
schedule_work(&client_data->work_ec_evt);
@@ -322,10 +328,17 @@ static void ish_event_cb(struct ishtp_cl_device *cl_device)
{
struct ishtp_cl_rb *rb_in_proc;
struct ishtp_cl *cros_ish_cl = ishtp_get_drvdata(cl_device);
+ ktime_t timestamp;
+
+ /*
+ * Take timestamp as close to hardware interrupt as possible for sensor
+ * timestamps.
+ */
+ timestamp = cros_ec_get_time_ns();
while ((rb_in_proc = ishtp_cl_rx_get_rb(cros_ish_cl)) != NULL) {
/* Decide what to do with received data */
- process_recv(cros_ish_cl, rb_in_proc);
+ process_recv(cros_ish_cl, rb_in_proc, timestamp);
}
}
diff --git a/drivers/platform/chrome/cros_ec_lpc.c b/drivers/platform/chrome/cros_ec_lpc.c
index 7d10d909435f..dccf479c6625 100644
--- a/drivers/platform/chrome/cros_ec_lpc.c
+++ b/drivers/platform/chrome/cros_ec_lpc.c
@@ -312,11 +312,20 @@ static int cros_ec_lpc_readmem(struct cros_ec_device *ec, unsigned int offset,
static void cros_ec_lpc_acpi_notify(acpi_handle device, u32 value, void *data)
{
struct cros_ec_device *ec_dev = data;
+ bool ec_has_more_events;
+ int ret;
- if (ec_dev->mkbp_event_supported &&
- cros_ec_get_next_event(ec_dev, NULL) > 0)
- blocking_notifier_call_chain(&ec_dev->event_notifier, 0,
- ec_dev);
+ ec_dev->last_event_time = cros_ec_get_time_ns();
+
+ if (ec_dev->mkbp_event_supported)
+ do {
+ ret = cros_ec_get_next_event(ec_dev, NULL,
+ &ec_has_more_events);
+ if (ret > 0)
+ blocking_notifier_call_chain(
+ &ec_dev->event_notifier, 0,
+ ec_dev);
+ } while (ec_has_more_events);
if (value == ACPI_NOTIFY_DEVICE_WAKE)
pm_system_wakeup();
diff --git a/drivers/platform/chrome/cros_ec_proto.c b/drivers/platform/chrome/cros_ec_proto.c
index f659f96bda12..da1b1c450433 100644
--- a/drivers/platform/chrome/cros_ec_proto.c
+++ b/drivers/platform/chrome/cros_ec_proto.c
@@ -117,6 +117,17 @@ static int send_command(struct cros_ec_device *ec_dev,
return ret;
}
+/**
+ * cros_ec_prepare_tx() - Prepare an outgoing message in the output buffer.
+ * @ec_dev: Device to register.
+ * @msg: Message to write.
+ *
+ * This is intended to be used by all ChromeOS EC drivers, but at present
+ * only SPI uses it. Once LPC uses the same protocol it can start using it.
+ * I2C could use it now, with a refactor of the existing code.
+ *
+ * Return: 0 on success or negative error code.
+ */
int cros_ec_prepare_tx(struct cros_ec_device *ec_dev,
struct cros_ec_command *msg)
{
@@ -141,6 +152,16 @@ int cros_ec_prepare_tx(struct cros_ec_device *ec_dev,
}
EXPORT_SYMBOL(cros_ec_prepare_tx);
+/**
+ * cros_ec_check_result() - Check ec_msg->result.
+ * @ec_dev: EC device.
+ * @msg: Message to check.
+ *
+ * This is used by ChromeOS EC drivers to check the ec_msg->result for
+ * errors and to warn about them.
+ *
+ * Return: 0 on success or negative error code.
+ */
int cros_ec_check_result(struct cros_ec_device *ec_dev,
struct cros_ec_command *msg)
{
@@ -326,6 +347,13 @@ static int cros_ec_get_host_command_version_mask(struct cros_ec_device *ec_dev,
return ret;
}
+/**
+ * cros_ec_query_all() - Query the protocol version supported by the
+ * ChromeOS EC.
+ * @ec_dev: Device to register.
+ *
+ * Return: 0 on success or negative error code.
+ */
int cros_ec_query_all(struct cros_ec_device *ec_dev)
{
struct device *dev = ec_dev->dev;
@@ -428,7 +456,10 @@ int cros_ec_query_all(struct cros_ec_device *ec_dev)
if (ret < 0 || ver_mask == 0)
ec_dev->mkbp_event_supported = 0;
else
- ec_dev->mkbp_event_supported = 1;
+ ec_dev->mkbp_event_supported = fls(ver_mask);
+
+ dev_dbg(ec_dev->dev, "MKBP support version %u\n",
+ ec_dev->mkbp_event_supported - 1);
/* Probe if host sleep v1 is supported for S0ix failure detection. */
ret = cros_ec_get_host_command_version_mask(ec_dev,
@@ -453,6 +484,16 @@ exit:
}
EXPORT_SYMBOL(cros_ec_query_all);
+/**
+ * cros_ec_cmd_xfer() - Send a command to the ChromeOS EC.
+ * @ec_dev: EC device.
+ * @msg: Message to write.
+ *
+ * Call this to send a command to the ChromeOS EC. This should be used
+ * instead of calling the EC's cmd_xfer() callback directly.
+ *
+ * Return: 0 on success or negative error code.
+ */
int cros_ec_cmd_xfer(struct cros_ec_device *ec_dev,
struct cros_ec_command *msg)
{
@@ -500,6 +541,18 @@ int cros_ec_cmd_xfer(struct cros_ec_device *ec_dev,
}
EXPORT_SYMBOL(cros_ec_cmd_xfer);
+/**
+ * cros_ec_cmd_xfer_status() - Send a command to the ChromeOS EC.
+ * @ec_dev: EC device.
+ * @msg: Message to write.
+ *
+ * This function is identical to cros_ec_cmd_xfer, except it returns success
+ * status only if both the command was transmitted successfully and the EC
+ * replied with success status. It's not necessary to check msg->result when
+ * using this function.
+ *
+ * Return: The number of bytes transferred on success or negative error code.
+ */
int cros_ec_cmd_xfer_status(struct cros_ec_device *ec_dev,
struct cros_ec_command *msg)
{
@@ -519,6 +572,7 @@ EXPORT_SYMBOL(cros_ec_cmd_xfer_status);
static int get_next_event_xfer(struct cros_ec_device *ec_dev,
struct cros_ec_command *msg,
+ struct ec_response_get_next_event_v1 *event,
int version, uint32_t size)
{
int ret;
@@ -531,7 +585,7 @@ static int get_next_event_xfer(struct cros_ec_device *ec_dev,
ret = cros_ec_cmd_xfer(ec_dev, msg);
if (ret > 0) {
ec_dev->event_size = ret - 1;
- memcpy(&ec_dev->event_data, msg->data, ret);
+ ec_dev->event_data = *event;
}
return ret;
@@ -539,30 +593,26 @@ static int get_next_event_xfer(struct cros_ec_device *ec_dev,
static int get_next_event(struct cros_ec_device *ec_dev)
{
- u8 buffer[sizeof(struct cros_ec_command) + sizeof(ec_dev->event_data)];
- struct cros_ec_command *msg = (struct cros_ec_command *)&buffer;
- static int cmd_version = 1;
- int ret;
+ struct {
+ struct cros_ec_command msg;
+ struct ec_response_get_next_event_v1 event;
+ } __packed buf;
+ struct cros_ec_command *msg = &buf.msg;
+ struct ec_response_get_next_event_v1 *event = &buf.event;
+ const int cmd_version = ec_dev->mkbp_event_supported - 1;
+ memset(msg, 0, sizeof(*msg));
if (ec_dev->suspended) {
dev_dbg(ec_dev->dev, "Device suspended.\n");
return -EHOSTDOWN;
}
- if (cmd_version == 1) {
- ret = get_next_event_xfer(ec_dev, msg, cmd_version,
- sizeof(struct ec_response_get_next_event_v1));
- if (ret < 0 || msg->result != EC_RES_INVALID_VERSION)
- return ret;
-
- /* Fallback to version 0 for future send attempts */
- cmd_version = 0;
- }
-
- ret = get_next_event_xfer(ec_dev, msg, cmd_version,
+ if (cmd_version == 0)
+ return get_next_event_xfer(ec_dev, msg, event, 0,
sizeof(struct ec_response_get_next_event));
- return ret;
+ return get_next_event_xfer(ec_dev, msg, event, cmd_version,
+ sizeof(struct ec_response_get_next_event_v1));
}
static int get_keyboard_state_event(struct cros_ec_device *ec_dev)
@@ -584,27 +634,60 @@ static int get_keyboard_state_event(struct cros_ec_device *ec_dev)
return ec_dev->event_size;
}
-int cros_ec_get_next_event(struct cros_ec_device *ec_dev, bool *wake_event)
+/**
+ * cros_ec_get_next_event() - Fetch next event from the ChromeOS EC.
+ * @ec_dev: Device to fetch event from.
+ * @wake_event: Pointer to a bool set to true upon return if the event might be
+ * treated as a wake event. Ignored if null.
+ * @has_more_events: Pointer to bool set to true if more than one event is
+ * pending.
+ * Some EC will set this flag to indicate cros_ec_get_next_event()
+ * can be called multiple times in a row.
+ * It is an optimization to prevent issuing a EC command for
+ * nothing or wait for another interrupt from the EC to process
+ * the next message.
+ * Ignored if null.
+ *
+ * Return: negative error code on errors; 0 for no data; or else number of
+ * bytes received (i.e., an event was retrieved successfully). Event types are
+ * written out to @ec_dev->event_data.event_type on success.
+ */
+int cros_ec_get_next_event(struct cros_ec_device *ec_dev,
+ bool *wake_event,
+ bool *has_more_events)
{
u8 event_type;
u32 host_event;
int ret;
- if (!ec_dev->mkbp_event_supported) {
- ret = get_keyboard_state_event(ec_dev);
- if (ret <= 0)
- return ret;
+ /*
+ * Default value for wake_event.
+ * Wake up on keyboard event, wake up for spurious interrupt or link
+ * error to the EC.
+ */
+ if (wake_event)
+ *wake_event = true;
- if (wake_event)
- *wake_event = true;
+ /*
+ * Default value for has_more_events.
+ * EC will raise another interrupt if AP does not process all events
+ * anyway.
+ */
+ if (has_more_events)
+ *has_more_events = false;
- return ret;
- }
+ if (!ec_dev->mkbp_event_supported)
+ return get_keyboard_state_event(ec_dev);
ret = get_next_event(ec_dev);
if (ret <= 0)
return ret;
+ if (has_more_events)
+ *has_more_events = ec_dev->event_data.event_type &
+ EC_MKBP_HAS_MORE_EVENTS;
+ ec_dev->event_data.event_type &= EC_MKBP_EVENT_TYPE_MASK;
+
if (wake_event) {
event_type = ec_dev->event_data.event_type;
host_event = cros_ec_get_host_event(ec_dev);
@@ -619,15 +702,22 @@ int cros_ec_get_next_event(struct cros_ec_device *ec_dev, bool *wake_event)
else if (host_event &&
!(host_event & ec_dev->host_event_wake_mask))
*wake_event = false;
- /* Consider all other events as wake events. */
- else
- *wake_event = true;
}
return ret;
}
EXPORT_SYMBOL(cros_ec_get_next_event);
+/**
+ * cros_ec_get_host_event() - Return a mask of event set by the ChromeOS EC.
+ * @ec_dev: Device to fetch event from.
+ *
+ * When MKBP is supported, when the EC raises an interrupt, we collect the
+ * events raised and call the functions in the ec notifier. This function
+ * is a helper to know which events are raised.
+ *
+ * Return: 0 on error or non-zero bitmask of one or more EC_HOST_EVENT_*.
+ */
u32 cros_ec_get_host_event(struct cros_ec_device *ec_dev)
{
u32 host_event;
@@ -647,3 +737,120 @@ u32 cros_ec_get_host_event(struct cros_ec_device *ec_dev)
return host_event;
}
EXPORT_SYMBOL(cros_ec_get_host_event);
+
+/**
+ * cros_ec_check_features() - Test for the presence of EC features
+ *
+ * @ec: EC device, does not have to be connected directly to the AP,
+ * can be daisy chained through another device.
+ * @feature: One of ec_feature_code bit.
+ *
+ * Call this function to test whether the ChromeOS EC supports a feature.
+ *
+ * Return: 1 if supported, 0 if not
+ */
+int cros_ec_check_features(struct cros_ec_dev *ec, int feature)
+{
+ struct cros_ec_command *msg;
+ int ret;
+
+ if (ec->features[0] == -1U && ec->features[1] == -1U) {
+ /* features bitmap not read yet */
+ msg = kzalloc(sizeof(*msg) + sizeof(ec->features), GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ msg->command = EC_CMD_GET_FEATURES + ec->cmd_offset;
+ msg->insize = sizeof(ec->features);
+
+ ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg);
+ if (ret < 0) {
+ dev_warn(ec->dev, "cannot get EC features: %d/%d\n",
+ ret, msg->result);
+ memset(ec->features, 0, sizeof(ec->features));
+ } else {
+ memcpy(ec->features, msg->data, sizeof(ec->features));
+ }
+
+ dev_dbg(ec->dev, "EC features %08x %08x\n",
+ ec->features[0], ec->features[1]);
+
+ kfree(msg);
+ }
+
+ return ec->features[feature / 32] & EC_FEATURE_MASK_0(feature);
+}
+EXPORT_SYMBOL_GPL(cros_ec_check_features);
+
+/**
+ * cros_ec_get_sensor_count() - Return the number of MEMS sensors supported.
+ *
+ * @ec: EC device, does not have to be connected directly to the AP,
+ * can be daisy chained through another device.
+ * Return: < 0 in case of error.
+ */
+int cros_ec_get_sensor_count(struct cros_ec_dev *ec)
+{
+ /*
+ * Issue a command to get the number of sensor reported.
+ * If not supported, check for legacy mode.
+ */
+ int ret, sensor_count;
+ struct ec_params_motion_sense *params;
+ struct ec_response_motion_sense *resp;
+ struct cros_ec_command *msg;
+ struct cros_ec_device *ec_dev = ec->ec_dev;
+ u8 status;
+
+ msg = kzalloc(sizeof(*msg) + max(sizeof(*params), sizeof(*resp)),
+ GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ msg->version = 1;
+ msg->command = EC_CMD_MOTION_SENSE_CMD + ec->cmd_offset;
+ msg->outsize = sizeof(*params);
+ msg->insize = sizeof(*resp);
+
+ params = (struct ec_params_motion_sense *)msg->data;
+ params->cmd = MOTIONSENSE_CMD_DUMP;
+
+ ret = cros_ec_cmd_xfer(ec->ec_dev, msg);
+ if (ret < 0) {
+ sensor_count = ret;
+ } else if (msg->result != EC_RES_SUCCESS) {
+ sensor_count = -EPROTO;
+ } else {
+ resp = (struct ec_response_motion_sense *)msg->data;
+ sensor_count = resp->dump.sensor_count;
+ }
+ kfree(msg);
+
+ /*
+ * Check legacy mode: Let's find out if sensors are accessible
+ * via LPC interface.
+ */
+ if (sensor_count == -EPROTO &&
+ ec->cmd_offset == 0 &&
+ ec_dev->cmd_readmem) {
+ ret = ec_dev->cmd_readmem(ec_dev, EC_MEMMAP_ACC_STATUS,
+ 1, &status);
+ if (ret >= 0 &&
+ (status & EC_MEMMAP_ACC_STATUS_PRESENCE_BIT)) {
+ /*
+ * We have 2 sensors, one in the lid, one in the base.
+ */
+ sensor_count = 2;
+ } else {
+ /*
+ * EC uses LPC interface and no sensors are presented.
+ */
+ sensor_count = 0;
+ }
+ } else if (sensor_count == -EPROTO) {
+ /* EC responded, but does not understand DUMP command. */
+ sensor_count = 0;
+ }
+ return sensor_count;
+}
+EXPORT_SYMBOL_GPL(cros_ec_get_sensor_count);
diff --git a/drivers/platform/chrome/cros_ec_rpmsg.c b/drivers/platform/chrome/cros_ec_rpmsg.c
index 0c3738c3244d..bd068afe43b5 100644
--- a/drivers/platform/chrome/cros_ec_rpmsg.c
+++ b/drivers/platform/chrome/cros_ec_rpmsg.c
@@ -143,22 +143,11 @@ cros_ec_rpmsg_host_event_function(struct work_struct *host_event_work)
struct cros_ec_rpmsg,
host_event_work);
struct cros_ec_device *ec_dev = dev_get_drvdata(&ec_rpmsg->rpdev->dev);
- bool wake_event = true;
- int ret;
-
- ret = cros_ec_get_next_event(ec_dev, &wake_event);
-
- /*
- * Signal only if wake host events or any interrupt if
- * cros_ec_get_next_event() returned an error (default value for
- * wake_event is true)
- */
- if (wake_event && device_may_wakeup(ec_dev->dev))
- pm_wakeup_event(ec_dev->dev, 0);
+ bool ec_has_more_events;
- if (ret > 0)
- blocking_notifier_call_chain(&ec_dev->event_notifier,
- 0, ec_dev);
+ do {
+ ec_has_more_events = cros_ec_handle_event(ec_dev);
+ } while (ec_has_more_events);
}
static int cros_ec_rpmsg_callback(struct rpmsg_device *rpdev, void *data,
diff --git a/drivers/platform/chrome/cros_ec_sensorhub.c b/drivers/platform/chrome/cros_ec_sensorhub.c
new file mode 100644
index 000000000000..04d8879689e9
--- /dev/null
+++ b/drivers/platform/chrome/cros_ec_sensorhub.c
@@ -0,0 +1,199 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Sensor HUB driver that discovers sensors behind a ChromeOS Embedded
+ * Controller.
+ *
+ * Copyright 2019 Google LLC
+ */
+
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/mfd/cros_ec.h>
+#include <linux/platform_data/cros_ec_commands.h>
+#include <linux/platform_data/cros_ec_proto.h>
+#include <linux/platform_data/cros_ec_sensorhub.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#define DRV_NAME "cros-ec-sensorhub"
+
+static void cros_ec_sensorhub_free_sensor(void *arg)
+{
+ struct platform_device *pdev = arg;
+
+ platform_device_unregister(pdev);
+}
+
+static int cros_ec_sensorhub_allocate_sensor(struct device *parent,
+ char *sensor_name,
+ int sensor_num)
+{
+ struct cros_ec_sensor_platform sensor_platforms = {
+ .sensor_num = sensor_num,
+ };
+ struct platform_device *pdev;
+
+ pdev = platform_device_register_data(parent, sensor_name,
+ PLATFORM_DEVID_AUTO,
+ &sensor_platforms,
+ sizeof(sensor_platforms));
+ if (IS_ERR(pdev))
+ return PTR_ERR(pdev);
+
+ return devm_add_action_or_reset(parent,
+ cros_ec_sensorhub_free_sensor,
+ pdev);
+}
+
+static int cros_ec_sensorhub_register(struct device *dev,
+ struct cros_ec_sensorhub *sensorhub)
+{
+ int sensor_type[MOTIONSENSE_TYPE_MAX] = { 0 };
+ struct cros_ec_dev *ec = sensorhub->ec;
+ struct ec_params_motion_sense *params;
+ struct ec_response_motion_sense *resp;
+ struct cros_ec_command *msg;
+ int ret, i, sensor_num;
+ char *name;
+
+ sensor_num = cros_ec_get_sensor_count(ec);
+ if (sensor_num < 0) {
+ dev_err(dev,
+ "Unable to retrieve sensor information (err:%d)\n",
+ sensor_num);
+ return sensor_num;
+ }
+
+ if (sensor_num == 0) {
+ dev_err(dev, "Zero sensors reported.\n");
+ return -EINVAL;
+ }
+
+ /* Prepare a message to send INFO command to each sensor. */
+ msg = kzalloc(sizeof(*msg) + max(sizeof(*params), sizeof(*resp)),
+ GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ msg->version = 1;
+ msg->command = EC_CMD_MOTION_SENSE_CMD + ec->cmd_offset;
+ msg->outsize = sizeof(*params);
+ msg->insize = sizeof(*resp);
+ params = (struct ec_params_motion_sense *)msg->data;
+ resp = (struct ec_response_motion_sense *)msg->data;
+
+ for (i = 0; i < sensor_num; i++) {
+ params->cmd = MOTIONSENSE_CMD_INFO;
+ params->info.sensor_num = i;
+
+ ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg);
+ if (ret < 0) {
+ dev_warn(dev, "no info for EC sensor %d : %d/%d\n",
+ i, ret, msg->result);
+ continue;
+ }
+
+ switch (resp->info.type) {
+ case MOTIONSENSE_TYPE_ACCEL:
+ name = "cros-ec-accel";
+ break;
+ case MOTIONSENSE_TYPE_BARO:
+ name = "cros-ec-baro";
+ break;
+ case MOTIONSENSE_TYPE_GYRO:
+ name = "cros-ec-gyro";
+ break;
+ case MOTIONSENSE_TYPE_MAG:
+ name = "cros-ec-mag";
+ break;
+ case MOTIONSENSE_TYPE_PROX:
+ name = "cros-ec-prox";
+ break;
+ case MOTIONSENSE_TYPE_LIGHT:
+ name = "cros-ec-light";
+ break;
+ case MOTIONSENSE_TYPE_ACTIVITY:
+ name = "cros-ec-activity";
+ break;
+ default:
+ dev_warn(dev, "unknown type %d\n", resp->info.type);
+ continue;
+ }
+
+ ret = cros_ec_sensorhub_allocate_sensor(dev, name, i);
+ if (ret)
+ goto error;
+
+ sensor_type[resp->info.type]++;
+ }
+
+ if (sensor_type[MOTIONSENSE_TYPE_ACCEL] >= 2)
+ ec->has_kb_wake_angle = true;
+
+ if (cros_ec_check_features(ec,
+ EC_FEATURE_REFINED_TABLET_MODE_HYSTERESIS)) {
+ ret = cros_ec_sensorhub_allocate_sensor(dev,
+ "cros-ec-lid-angle",
+ 0);
+ if (ret)
+ goto error;
+ }
+
+ kfree(msg);
+ return 0;
+
+error:
+ kfree(msg);
+ return ret;
+}
+
+static int cros_ec_sensorhub_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct cros_ec_sensorhub *data;
+ int ret;
+ int i;
+
+ data = devm_kzalloc(dev, sizeof(struct cros_ec_sensorhub), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->ec = dev_get_drvdata(dev->parent);
+ dev_set_drvdata(dev, data);
+
+ /* Check whether this EC is a sensor hub. */
+ if (cros_ec_check_features(data->ec, EC_FEATURE_MOTION_SENSE)) {
+ ret = cros_ec_sensorhub_register(dev, data);
+ if (ret)
+ return ret;
+ } else {
+ /*
+ * If the device has sensors but does not claim to
+ * be a sensor hub, we are in legacy mode.
+ */
+ for (i = 0; i < 2; i++) {
+ ret = cros_ec_sensorhub_allocate_sensor(dev,
+ "cros-ec-accel-legacy", i);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static struct platform_driver cros_ec_sensorhub_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ },
+ .probe = cros_ec_sensorhub_probe,
+};
+
+module_platform_driver(cros_ec_sensorhub_driver);
+
+MODULE_ALIAS("platform:" DRV_NAME);
+MODULE_AUTHOR("Gwendal Grignou <gwendal@chromium.org>");
+MODULE_DESCRIPTION("ChromeOS EC MEMS Sensor Hub Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/chrome/cros_ec_trace.c b/drivers/platform/chrome/cros_ec_trace.c
index 6f80ff4532ae..5af1d66d9eca 100644
--- a/drivers/platform/chrome/cros_ec_trace.c
+++ b/drivers/platform/chrome/cros_ec_trace.c
@@ -98,7 +98,10 @@
TRACE_SYMBOL(EC_CMD_SB_READ_BLOCK), \
TRACE_SYMBOL(EC_CMD_SB_WRITE_BLOCK), \
TRACE_SYMBOL(EC_CMD_BATTERY_VENDOR_PARAM), \
- TRACE_SYMBOL(EC_CMD_CODEC_I2S), \
+ TRACE_SYMBOL(EC_CMD_EC_CODEC), \
+ TRACE_SYMBOL(EC_CMD_EC_CODEC_DMIC), \
+ TRACE_SYMBOL(EC_CMD_EC_CODEC_I2S_RX), \
+ TRACE_SYMBOL(EC_CMD_EC_CODEC_WOV), \
TRACE_SYMBOL(EC_CMD_REBOOT_EC), \
TRACE_SYMBOL(EC_CMD_GET_PANIC_INFO), \
TRACE_SYMBOL(EC_CMD_ACPI_READ), \
diff --git a/drivers/platform/chrome/cros_usbpd_logger.c b/drivers/platform/chrome/cros_usbpd_logger.c
index 2430e8b82810..374cdd1e868a 100644
--- a/drivers/platform/chrome/cros_usbpd_logger.c
+++ b/drivers/platform/chrome/cros_usbpd_logger.c
@@ -224,6 +224,7 @@ static int cros_usbpd_logger_remove(struct platform_device *pd)
struct logger_data *logger = platform_get_drvdata(pd);
cancel_delayed_work_sync(&logger->log_work);
+ destroy_workqueue(logger->log_workqueue);
return 0;
}
diff --git a/drivers/platform/chrome/wilco_ec/Kconfig b/drivers/platform/chrome/wilco_ec/Kconfig
index 89007b0bc743..365f30e116ee 100644
--- a/drivers/platform/chrome/wilco_ec/Kconfig
+++ b/drivers/platform/chrome/wilco_ec/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config WILCO_EC
tristate "ChromeOS Wilco Embedded Controller"
- depends on ACPI && X86 && CROS_EC_LPC
+ depends on ACPI && X86 && CROS_EC_LPC && LEDS_CLASS
help
If you say Y here, you get support for talking to the ChromeOS
Wilco EC over an eSPI bus. This uses a simple byte-level protocol
diff --git a/drivers/platform/chrome/wilco_ec/Makefile b/drivers/platform/chrome/wilco_ec/Makefile
index bc817164596e..ecb3145cab18 100644
--- a/drivers/platform/chrome/wilco_ec/Makefile
+++ b/drivers/platform/chrome/wilco_ec/Makefile
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
-wilco_ec-objs := core.o mailbox.o properties.o sysfs.o
+wilco_ec-objs := core.o keyboard_leds.o mailbox.o \
+ properties.o sysfs.o
obj-$(CONFIG_WILCO_EC) += wilco_ec.o
wilco_ec_debugfs-objs := debugfs.o
obj-$(CONFIG_WILCO_EC_DEBUGFS) += wilco_ec_debugfs.o
diff --git a/drivers/platform/chrome/wilco_ec/core.c b/drivers/platform/chrome/wilco_ec/core.c
index 3724bf4b77c6..5210c357feef 100644
--- a/drivers/platform/chrome/wilco_ec/core.c
+++ b/drivers/platform/chrome/wilco_ec/core.c
@@ -5,10 +5,6 @@
* Copyright 2018 Google LLC
*
* This is the entry point for the drivers that control the Wilco EC.
- * This driver is responsible for several tasks:
- * - Initialize the register interface that is used by wilco_ec_mailbox()
- * - Create a platform device which is picked up by the debugfs driver
- * - Create a platform device which is picked up by the RTC driver
*/
#include <linux/acpi.h>
@@ -87,12 +83,31 @@ static int wilco_ec_probe(struct platform_device *pdev)
goto unregister_debugfs;
}
+ /* Set up the keyboard backlight LEDs. */
+ ret = wilco_keyboard_leds_init(ec);
+ if (ret < 0) {
+ dev_err(dev,
+ "Failed to initialize keyboard LEDs: %d\n",
+ ret);
+ goto unregister_rtc;
+ }
+
ret = wilco_ec_add_sysfs(ec);
if (ret < 0) {
dev_err(dev, "Failed to create sysfs entries: %d", ret);
goto unregister_rtc;
}
+ /* Register child device to be found by charger config driver. */
+ ec->charger_pdev = platform_device_register_data(dev, "wilco-charger",
+ PLATFORM_DEVID_AUTO,
+ NULL, 0);
+ if (IS_ERR(ec->charger_pdev)) {
+ dev_err(dev, "Failed to create charger platform device\n");
+ ret = PTR_ERR(ec->charger_pdev);
+ goto remove_sysfs;
+ }
+
/* Register child device that will be found by the telemetry driver. */
ec->telem_pdev = platform_device_register_data(dev, "wilco_telem",
PLATFORM_DEVID_AUTO,
@@ -100,11 +115,13 @@ static int wilco_ec_probe(struct platform_device *pdev)
if (IS_ERR(ec->telem_pdev)) {
dev_err(dev, "Failed to create telemetry platform device\n");
ret = PTR_ERR(ec->telem_pdev);
- goto remove_sysfs;
+ goto unregister_charge_config;
}
return 0;
+unregister_charge_config:
+ platform_device_unregister(ec->charger_pdev);
remove_sysfs:
wilco_ec_remove_sysfs(ec);
unregister_rtc:
@@ -120,6 +137,7 @@ static int wilco_ec_remove(struct platform_device *pdev)
{
struct wilco_ec_device *ec = platform_get_drvdata(pdev);
+ platform_device_unregister(ec->charger_pdev);
wilco_ec_remove_sysfs(ec);
platform_device_unregister(ec->telem_pdev);
platform_device_unregister(ec->rtc_pdev);
diff --git a/drivers/platform/chrome/wilco_ec/debugfs.c b/drivers/platform/chrome/wilco_ec/debugfs.c
index 8d65a1e2f1a3..df5a5f6c3ec6 100644
--- a/drivers/platform/chrome/wilco_ec/debugfs.c
+++ b/drivers/platform/chrome/wilco_ec/debugfs.c
@@ -160,29 +160,29 @@ static const struct file_operations fops_raw = {
#define CMD_KB_CHROME 0x88
#define SUB_CMD_H1_GPIO 0x0A
+#define SUB_CMD_TEST_EVENT 0x0B
-struct h1_gpio_status_request {
+struct ec_request {
u8 cmd; /* Always CMD_KB_CHROME */
u8 reserved;
- u8 sub_cmd; /* Always SUB_CMD_H1_GPIO */
+ u8 sub_cmd;
} __packed;
-struct hi_gpio_status_response {
+struct ec_response {
u8 status; /* 0 if allowed */
- u8 val; /* BIT(0)=ENTRY_TO_FACT_MODE, BIT(1)=SPI_CHROME_SEL */
+ u8 val;
} __packed;
-static int h1_gpio_get(void *arg, u64 *val)
+static int send_ec_cmd(struct wilco_ec_device *ec, u8 sub_cmd, u8 *out_val)
{
- struct wilco_ec_device *ec = arg;
- struct h1_gpio_status_request rq;
- struct hi_gpio_status_response rs;
+ struct ec_request rq;
+ struct ec_response rs;
struct wilco_ec_message msg;
int ret;
memset(&rq, 0, sizeof(rq));
rq.cmd = CMD_KB_CHROME;
- rq.sub_cmd = SUB_CMD_H1_GPIO;
+ rq.sub_cmd = sub_cmd;
memset(&msg, 0, sizeof(msg));
msg.type = WILCO_EC_MSG_LEGACY;
@@ -196,14 +196,39 @@ static int h1_gpio_get(void *arg, u64 *val)
if (rs.status)
return -EIO;
- *val = rs.val;
+ *out_val = rs.val;
return 0;
}
+/**
+ * h1_gpio_get() - Gets h1 gpio status.
+ * @arg: The wilco EC device.
+ * @val: BIT(0)=ENTRY_TO_FACT_MODE, BIT(1)=SPI_CHROME_SEL
+ */
+static int h1_gpio_get(void *arg, u64 *val)
+{
+ return send_ec_cmd(arg, SUB_CMD_H1_GPIO, (u8 *)val);
+}
+
DEFINE_DEBUGFS_ATTRIBUTE(fops_h1_gpio, h1_gpio_get, NULL, "0x%02llx\n");
/**
+ * test_event_set() - Sends command to EC to cause an EC test event.
+ * @arg: The wilco EC device.
+ * @val: unused.
+ */
+static int test_event_set(void *arg, u64 val)
+{
+ u8 ret;
+
+ return send_ec_cmd(arg, SUB_CMD_TEST_EVENT, &ret);
+}
+
+/* Format is unused since it is only required for get method which is NULL */
+DEFINE_DEBUGFS_ATTRIBUTE(fops_test_event, NULL, test_event_set, "%llu\n");
+
+/**
* wilco_ec_debugfs_probe() - Create the debugfs node
* @pdev: The platform device, probably created in core.c
*
@@ -226,6 +251,8 @@ static int wilco_ec_debugfs_probe(struct platform_device *pdev)
debugfs_create_file("raw", 0644, debug_info->dir, NULL, &fops_raw);
debugfs_create_file("h1_gpio", 0444, debug_info->dir, ec,
&fops_h1_gpio);
+ debugfs_create_file("test_event", 0200, debug_info->dir, ec,
+ &fops_test_event);
return 0;
}
diff --git a/drivers/platform/chrome/wilco_ec/keyboard_leds.c b/drivers/platform/chrome/wilco_ec/keyboard_leds.c
new file mode 100644
index 000000000000..bb0edf51dfda
--- /dev/null
+++ b/drivers/platform/chrome/wilco_ec/keyboard_leds.c
@@ -0,0 +1,191 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Keyboard backlight LED driver for the Wilco Embedded Controller
+ *
+ * Copyright 2019 Google LLC
+ *
+ * Since the EC will never change the backlight level of its own accord,
+ * we don't need to implement a brightness_get() method.
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/leds.h>
+#include <linux/platform_data/wilco-ec.h>
+#include <linux/slab.h>
+
+#define WILCO_EC_COMMAND_KBBL 0x75
+#define WILCO_KBBL_MODE_FLAG_PWM BIT(1) /* Set brightness by percent. */
+#define WILCO_KBBL_DEFAULT_BRIGHTNESS 0
+
+struct wilco_keyboard_leds {
+ struct wilco_ec_device *ec;
+ struct led_classdev keyboard;
+};
+
+enum wilco_kbbl_subcommand {
+ WILCO_KBBL_SUBCMD_GET_FEATURES = 0x00,
+ WILCO_KBBL_SUBCMD_GET_STATE = 0x01,
+ WILCO_KBBL_SUBCMD_SET_STATE = 0x02,
+};
+
+/**
+ * struct wilco_keyboard_leds_msg - Message to/from EC for keyboard LED control.
+ * @command: Always WILCO_EC_COMMAND_KBBL.
+ * @status: Set by EC to 0 on success, 0xFF on failure.
+ * @subcmd: One of enum wilco_kbbl_subcommand.
+ * @reserved3: Should be 0.
+ * @mode: Bit flags for used mode, we want to use WILCO_KBBL_MODE_FLAG_PWM.
+ * @reserved5to8: Should be 0.
+ * @percent: Brightness in 0-100. Only meaningful in PWM mode.
+ * @reserved10to15: Should be 0.
+ */
+struct wilco_keyboard_leds_msg {
+ u8 command;
+ u8 status;
+ u8 subcmd;
+ u8 reserved3;
+ u8 mode;
+ u8 reserved5to8[4];
+ u8 percent;
+ u8 reserved10to15[6];
+} __packed;
+
+/* Send a request, get a response, and check that the response is good. */
+static int send_kbbl_msg(struct wilco_ec_device *ec,
+ struct wilco_keyboard_leds_msg *request,
+ struct wilco_keyboard_leds_msg *response)
+{
+ struct wilco_ec_message msg;
+ int ret;
+
+ memset(&msg, 0, sizeof(msg));
+ msg.type = WILCO_EC_MSG_LEGACY;
+ msg.request_data = request;
+ msg.request_size = sizeof(*request);
+ msg.response_data = response;
+ msg.response_size = sizeof(*response);
+
+ ret = wilco_ec_mailbox(ec, &msg);
+ if (ret < 0) {
+ dev_err(ec->dev,
+ "Failed sending keyboard LEDs command: %d", ret);
+ return ret;
+ }
+
+ if (response->status) {
+ dev_err(ec->dev,
+ "EC reported failure sending keyboard LEDs command: %d",
+ response->status);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int set_kbbl(struct wilco_ec_device *ec, enum led_brightness brightness)
+{
+ struct wilco_keyboard_leds_msg request;
+ struct wilco_keyboard_leds_msg response;
+
+ memset(&request, 0, sizeof(request));
+ request.command = WILCO_EC_COMMAND_KBBL;
+ request.subcmd = WILCO_KBBL_SUBCMD_SET_STATE;
+ request.mode = WILCO_KBBL_MODE_FLAG_PWM;
+ request.percent = brightness;
+
+ return send_kbbl_msg(ec, &request, &response);
+}
+
+static int kbbl_exist(struct wilco_ec_device *ec, bool *exists)
+{
+ struct wilco_keyboard_leds_msg request;
+ struct wilco_keyboard_leds_msg response;
+ int ret;
+
+ memset(&request, 0, sizeof(request));
+ request.command = WILCO_EC_COMMAND_KBBL;
+ request.subcmd = WILCO_KBBL_SUBCMD_GET_FEATURES;
+
+ ret = send_kbbl_msg(ec, &request, &response);
+ if (ret < 0)
+ return ret;
+
+ *exists = response.status != 0xFF;
+
+ return 0;
+}
+
+/**
+ * kbbl_init() - Initialize the state of the keyboard backlight.
+ * @ec: EC device to talk to.
+ *
+ * Gets the current brightness, ensuring that the BIOS already initialized the
+ * backlight to PWM mode. If not in PWM mode, then the current brightness is
+ * meaningless, so set the brightness to WILCO_KBBL_DEFAULT_BRIGHTNESS.
+ *
+ * Return: Final brightness of the keyboard, or negative error code on failure.
+ */
+static int kbbl_init(struct wilco_ec_device *ec)
+{
+ struct wilco_keyboard_leds_msg request;
+ struct wilco_keyboard_leds_msg response;
+ int ret;
+
+ memset(&request, 0, sizeof(request));
+ request.command = WILCO_EC_COMMAND_KBBL;
+ request.subcmd = WILCO_KBBL_SUBCMD_GET_STATE;
+
+ ret = send_kbbl_msg(ec, &request, &response);
+ if (ret < 0)
+ return ret;
+
+ if (response.mode & WILCO_KBBL_MODE_FLAG_PWM)
+ return response.percent;
+
+ ret = set_kbbl(ec, WILCO_KBBL_DEFAULT_BRIGHTNESS);
+ if (ret < 0)
+ return ret;
+
+ return WILCO_KBBL_DEFAULT_BRIGHTNESS;
+}
+
+static int wilco_keyboard_leds_set(struct led_classdev *cdev,
+ enum led_brightness brightness)
+{
+ struct wilco_keyboard_leds *wkl =
+ container_of(cdev, struct wilco_keyboard_leds, keyboard);
+ return set_kbbl(wkl->ec, brightness);
+}
+
+int wilco_keyboard_leds_init(struct wilco_ec_device *ec)
+{
+ struct wilco_keyboard_leds *wkl;
+ bool leds_exist;
+ int ret;
+
+ ret = kbbl_exist(ec, &leds_exist);
+ if (ret < 0) {
+ dev_err(ec->dev,
+ "Failed checking keyboard LEDs support: %d", ret);
+ return ret;
+ }
+ if (!leds_exist)
+ return 0;
+
+ wkl = devm_kzalloc(ec->dev, sizeof(*wkl), GFP_KERNEL);
+ if (!wkl)
+ return -ENOMEM;
+
+ wkl->ec = ec;
+ wkl->keyboard.name = "platform::kbd_backlight";
+ wkl->keyboard.max_brightness = 100;
+ wkl->keyboard.flags = LED_CORE_SUSPENDRESUME;
+ wkl->keyboard.brightness_set_blocking = wilco_keyboard_leds_set;
+ ret = kbbl_init(ec);
+ if (ret < 0)
+ return ret;
+ wkl->keyboard.brightness = ret;
+
+ return devm_led_classdev_register(ec->dev, &wkl->keyboard);
+}
diff --git a/drivers/platform/chrome/wilco_ec/sysfs.c b/drivers/platform/chrome/wilco_ec/sysfs.c
index 3b86a21005d3..f0d174b6bb21 100644
--- a/drivers/platform/chrome/wilco_ec/sysfs.c
+++ b/drivers/platform/chrome/wilco_ec/sysfs.c
@@ -23,6 +23,26 @@ struct boot_on_ac_request {
u8 reserved7;
} __packed;
+#define CMD_USB_CHARGE 0x39
+
+enum usb_charge_op {
+ USB_CHARGE_GET = 0,
+ USB_CHARGE_SET = 1,
+};
+
+struct usb_charge_request {
+ u8 cmd; /* Always CMD_USB_CHARGE */
+ u8 reserved;
+ u8 op; /* One of enum usb_charge_op */
+ u8 val; /* When setting, either 0 or 1 */
+} __packed;
+
+struct usb_charge_response {
+ u8 reserved;
+ u8 status; /* Set by EC to 0 on success, other value on failure */
+ u8 val; /* When getting, set by EC to either 0 or 1 */
+} __packed;
+
#define CMD_EC_INFO 0x38
enum get_ec_info_op {
CMD_GET_EC_LABEL = 0,
@@ -131,12 +151,83 @@ static ssize_t model_number_show(struct device *dev,
static DEVICE_ATTR_RO(model_number);
+static int send_usb_charge(struct wilco_ec_device *ec,
+ struct usb_charge_request *rq,
+ struct usb_charge_response *rs)
+{
+ struct wilco_ec_message msg;
+ int ret;
+
+ memset(&msg, 0, sizeof(msg));
+ msg.type = WILCO_EC_MSG_LEGACY;
+ msg.request_data = rq;
+ msg.request_size = sizeof(*rq);
+ msg.response_data = rs;
+ msg.response_size = sizeof(*rs);
+ ret = wilco_ec_mailbox(ec, &msg);
+ if (ret < 0)
+ return ret;
+ if (rs->status)
+ return -EIO;
+
+ return 0;
+}
+
+static ssize_t usb_charge_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct wilco_ec_device *ec = dev_get_drvdata(dev);
+ struct usb_charge_request rq;
+ struct usb_charge_response rs;
+ int ret;
+
+ memset(&rq, 0, sizeof(rq));
+ rq.cmd = CMD_USB_CHARGE;
+ rq.op = USB_CHARGE_GET;
+
+ ret = send_usb_charge(ec, &rq, &rs);
+ if (ret < 0)
+ return ret;
+
+ return sprintf(buf, "%d\n", rs.val);
+}
+
+static ssize_t usb_charge_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct wilco_ec_device *ec = dev_get_drvdata(dev);
+ struct usb_charge_request rq;
+ struct usb_charge_response rs;
+ int ret;
+ u8 val;
+
+ ret = kstrtou8(buf, 10, &val);
+ if (ret < 0)
+ return ret;
+ if (val > 1)
+ return -EINVAL;
+
+ memset(&rq, 0, sizeof(rq));
+ rq.cmd = CMD_USB_CHARGE;
+ rq.op = USB_CHARGE_SET;
+ rq.val = val;
+
+ ret = send_usb_charge(ec, &rq, &rs);
+ if (ret < 0)
+ return ret;
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(usb_charge);
static struct attribute *wilco_dev_attrs[] = {
&dev_attr_boot_on_ac.attr,
&dev_attr_build_date.attr,
&dev_attr_build_revision.attr,
&dev_attr_model_number.attr,
+ &dev_attr_usb_charge.attr,
&dev_attr_version.attr,
NULL,
};
diff --git a/drivers/platform/chrome/wilco_ec/telemetry.c b/drivers/platform/chrome/wilco_ec/telemetry.c
index b9d03c33d8dc..1176d543191a 100644
--- a/drivers/platform/chrome/wilco_ec/telemetry.c
+++ b/drivers/platform/chrome/wilco_ec/telemetry.c
@@ -406,8 +406,8 @@ static int telem_device_remove(struct platform_device *pdev)
struct telem_device_data *dev_data = platform_get_drvdata(pdev);
cdev_device_del(&dev_data->cdev, &dev_data->dev);
- put_device(&dev_data->dev);
ida_simple_remove(&telem_ida, MINOR(dev_data->dev.devt));
+ put_device(&dev_data->dev);
return 0;
}
diff --git a/drivers/platform/goldfish/Kconfig b/drivers/platform/goldfish/Kconfig
index 77b35df3a801..f3d09b1631e3 100644
--- a/drivers/platform/goldfish/Kconfig
+++ b/drivers/platform/goldfish/Kconfig
@@ -1,8 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
menuconfig GOLDFISH
bool "Platform support for Goldfish virtual devices"
- depends on X86_32 || X86_64 || ARM || ARM64 || MIPS
- depends on HAS_IOMEM
+ depends on HAS_IOMEM && HAS_DMA
help
Say Y here to get to see options for the Goldfish virtual platform.
This option alone does not add any kernel code.
diff --git a/drivers/platform/mellanox/Kconfig b/drivers/platform/mellanox/Kconfig
index 530fe7e31397..56e037623b29 100644
--- a/drivers/platform/mellanox/Kconfig
+++ b/drivers/platform/mellanox/Kconfig
@@ -41,7 +41,19 @@ config MLXBF_TMFIFO
depends on VIRTIO_CONSOLE && VIRTIO_NET
help
Say y here to enable TmFifo support. The TmFifo driver provides
- platform driver support for the TmFifo which supports console
- and networking based on the virtio framework.
+ platform driver support for the TmFifo which supports console
+ and networking based on the virtio framework.
+
+config MLXBF_BOOTCTL
+ tristate "Mellanox BlueField Firmware Boot Control driver"
+ depends on ARM64
+ depends on ACPI
+ help
+ The Mellanox BlueField firmware implements functionality to
+ request swapping the primary and alternate eMMC boot partition,
+ and to set up a watchdog that can undo that swap if the system
+ does not boot up correctly. This driver provides sysfs access
+ to the userspace tools, to be used in conjunction with the eMMC
+ device driver to do necessary initial swap of the boot partition.
endif # MELLANOX_PLATFORM
diff --git a/drivers/platform/mellanox/Makefile b/drivers/platform/mellanox/Makefile
index a229bda18fd9..499623ccf2fe 100644
--- a/drivers/platform/mellanox/Makefile
+++ b/drivers/platform/mellanox/Makefile
@@ -3,6 +3,7 @@
# Makefile for linux/drivers/platform/mellanox
# Mellanox Platform-Specific Drivers
#
+obj-$(CONFIG_MLXBF_BOOTCTL) += mlxbf-bootctl.o
obj-$(CONFIG_MLXBF_TMFIFO) += mlxbf-tmfifo.o
obj-$(CONFIG_MLXREG_HOTPLUG) += mlxreg-hotplug.o
obj-$(CONFIG_MLXREG_IO) += mlxreg-io.o
diff --git a/drivers/platform/mellanox/mlxbf-bootctl.c b/drivers/platform/mellanox/mlxbf-bootctl.c
new file mode 100644
index 000000000000..61753b648506
--- /dev/null
+++ b/drivers/platform/mellanox/mlxbf-bootctl.c
@@ -0,0 +1,321 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Mellanox boot control driver
+ *
+ * This driver provides a sysfs interface for systems management
+ * software to manage reset-time actions.
+ *
+ * Copyright (C) 2019 Mellanox Technologies
+ */
+
+#include <linux/acpi.h>
+#include <linux/arm-smccc.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include "mlxbf-bootctl.h"
+
+#define MLXBF_BOOTCTL_SB_SECURE_MASK 0x03
+#define MLXBF_BOOTCTL_SB_TEST_MASK 0x0c
+
+#define MLXBF_SB_KEY_NUM 4
+
+/* UUID used to probe ATF service. */
+static const char *mlxbf_bootctl_svc_uuid_str =
+ "89c036b4-e7d7-11e6-8797-001aca00bfc4";
+
+struct mlxbf_bootctl_name {
+ u32 value;
+ const char *name;
+};
+
+static struct mlxbf_bootctl_name boot_names[] = {
+ { MLXBF_BOOTCTL_EXTERNAL, "external" },
+ { MLXBF_BOOTCTL_EMMC, "emmc" },
+ { MLNX_BOOTCTL_SWAP_EMMC, "swap_emmc" },
+ { MLXBF_BOOTCTL_EMMC_LEGACY, "emmc_legacy" },
+ { MLXBF_BOOTCTL_NONE, "none" },
+};
+
+static const char * const mlxbf_bootctl_lifecycle_states[] = {
+ [0] = "Production",
+ [1] = "GA Secured",
+ [2] = "GA Non-Secured",
+ [3] = "RMA",
+};
+
+/* ARM SMC call which is atomic and no need for lock. */
+static int mlxbf_bootctl_smc(unsigned int smc_op, int smc_arg)
+{
+ struct arm_smccc_res res;
+
+ arm_smccc_smc(smc_op, smc_arg, 0, 0, 0, 0, 0, 0, &res);
+
+ return res.a0;
+}
+
+/* Return the action in integer or an error code. */
+static int mlxbf_bootctl_reset_action_to_val(const char *action)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(boot_names); i++)
+ if (sysfs_streq(boot_names[i].name, action))
+ return boot_names[i].value;
+
+ return -EINVAL;
+}
+
+/* Return the action in string. */
+static const char *mlxbf_bootctl_action_to_string(int action)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(boot_names); i++)
+ if (boot_names[i].value == action)
+ return boot_names[i].name;
+
+ return "invalid action";
+}
+
+static ssize_t post_reset_wdog_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int ret;
+
+ ret = mlxbf_bootctl_smc(MLXBF_BOOTCTL_GET_POST_RESET_WDOG, 0);
+ if (ret < 0)
+ return ret;
+
+ return sprintf(buf, "%d\n", ret);
+}
+
+static ssize_t post_reset_wdog_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned long value;
+ int ret;
+
+ ret = kstrtoul(buf, 10, &value);
+ if (ret)
+ return ret;
+
+ ret = mlxbf_bootctl_smc(MLXBF_BOOTCTL_SET_POST_RESET_WDOG, value);
+ if (ret < 0)
+ return ret;
+
+ return count;
+}
+
+static ssize_t mlxbf_bootctl_show(int smc_op, char *buf)
+{
+ int action;
+
+ action = mlxbf_bootctl_smc(smc_op, 0);
+ if (action < 0)
+ return action;
+
+ return sprintf(buf, "%s\n", mlxbf_bootctl_action_to_string(action));
+}
+
+static int mlxbf_bootctl_store(int smc_op, const char *buf, size_t count)
+{
+ int ret, action;
+
+ action = mlxbf_bootctl_reset_action_to_val(buf);
+ if (action < 0)
+ return action;
+
+ ret = mlxbf_bootctl_smc(smc_op, action);
+ if (ret < 0)
+ return ret;
+
+ return count;
+}
+
+static ssize_t reset_action_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return mlxbf_bootctl_show(MLXBF_BOOTCTL_GET_RESET_ACTION, buf);
+}
+
+static ssize_t reset_action_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ return mlxbf_bootctl_store(MLXBF_BOOTCTL_SET_RESET_ACTION, buf, count);
+}
+
+static ssize_t second_reset_action_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return mlxbf_bootctl_show(MLXBF_BOOTCTL_GET_SECOND_RESET_ACTION, buf);
+}
+
+static ssize_t second_reset_action_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ return mlxbf_bootctl_store(MLXBF_BOOTCTL_SET_SECOND_RESET_ACTION, buf,
+ count);
+}
+
+static ssize_t lifecycle_state_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int lc_state;
+
+ lc_state = mlxbf_bootctl_smc(MLXBF_BOOTCTL_GET_TBB_FUSE_STATUS,
+ MLXBF_BOOTCTL_FUSE_STATUS_LIFECYCLE);
+ if (lc_state < 0)
+ return lc_state;
+
+ lc_state &=
+ MLXBF_BOOTCTL_SB_TEST_MASK | MLXBF_BOOTCTL_SB_SECURE_MASK;
+
+ /*
+ * If the test bits are set, we specify that the current state may be
+ * due to using the test bits.
+ */
+ if (lc_state & MLXBF_BOOTCTL_SB_TEST_MASK) {
+ lc_state &= MLXBF_BOOTCTL_SB_SECURE_MASK;
+
+ return sprintf(buf, "%s(test)\n",
+ mlxbf_bootctl_lifecycle_states[lc_state]);
+ }
+
+ return sprintf(buf, "%s\n", mlxbf_bootctl_lifecycle_states[lc_state]);
+}
+
+static ssize_t secure_boot_fuse_state_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int burnt, valid, key, key_state, buf_len = 0, upper_key_used = 0;
+ const char *status;
+
+ key_state = mlxbf_bootctl_smc(MLXBF_BOOTCTL_GET_TBB_FUSE_STATUS,
+ MLXBF_BOOTCTL_FUSE_STATUS_KEYS);
+ if (key_state < 0)
+ return key_state;
+
+ /*
+ * key_state contains the bits for 4 Key versions, loaded from eFuses
+ * after a hard reset. Lower 4 bits are a thermometer code indicating
+ * key programming has started for key n (0000 = none, 0001 = version 0,
+ * 0011 = version 1, 0111 = version 2, 1111 = version 3). Upper 4 bits
+ * are a thermometer code indicating key programming has completed for
+ * key n (same encodings as the start bits). This allows for detection
+ * of an interruption in the progamming process which has left the key
+ * partially programmed (and thus invalid). The process is to burn the
+ * eFuse for the new key start bit, burn the key eFuses, then burn the
+ * eFuse for the new key complete bit.
+ *
+ * For example 0000_0000: no key valid, 0001_0001: key version 0 valid,
+ * 0011_0011: key 1 version valid, 0011_0111: key version 2 started
+ * programming but did not complete, etc. The most recent key for which
+ * both start and complete bit is set is loaded. On soft reset, this
+ * register is not modified.
+ */
+ for (key = MLXBF_SB_KEY_NUM - 1; key >= 0; key--) {
+ burnt = key_state & BIT(key);
+ valid = key_state & BIT(key + MLXBF_SB_KEY_NUM);
+
+ if (burnt && valid)
+ upper_key_used = 1;
+
+ if (upper_key_used) {
+ if (burnt)
+ status = valid ? "Used" : "Wasted";
+ else
+ status = valid ? "Invalid" : "Skipped";
+ } else {
+ if (burnt)
+ status = valid ? "InUse" : "Incomplete";
+ else
+ status = valid ? "Invalid" : "Free";
+ }
+ buf_len += sprintf(buf + buf_len, "%d:%s ", key, status);
+ }
+ buf_len += sprintf(buf + buf_len, "\n");
+
+ return buf_len;
+}
+
+static DEVICE_ATTR_RW(post_reset_wdog);
+static DEVICE_ATTR_RW(reset_action);
+static DEVICE_ATTR_RW(second_reset_action);
+static DEVICE_ATTR_RO(lifecycle_state);
+static DEVICE_ATTR_RO(secure_boot_fuse_state);
+
+static struct attribute *mlxbf_bootctl_attrs[] = {
+ &dev_attr_post_reset_wdog.attr,
+ &dev_attr_reset_action.attr,
+ &dev_attr_second_reset_action.attr,
+ &dev_attr_lifecycle_state.attr,
+ &dev_attr_secure_boot_fuse_state.attr,
+ NULL
+};
+
+ATTRIBUTE_GROUPS(mlxbf_bootctl);
+
+static const struct acpi_device_id mlxbf_bootctl_acpi_ids[] = {
+ {"MLNXBF04", 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(acpi, mlxbf_bootctl_acpi_ids);
+
+static bool mlxbf_bootctl_guid_match(const guid_t *guid,
+ const struct arm_smccc_res *res)
+{
+ guid_t id = GUID_INIT(res->a0, res->a1, res->a1 >> 16,
+ res->a2, res->a2 >> 8, res->a2 >> 16,
+ res->a2 >> 24, res->a3, res->a3 >> 8,
+ res->a3 >> 16, res->a3 >> 24);
+
+ return guid_equal(guid, &id);
+}
+
+static int mlxbf_bootctl_probe(struct platform_device *pdev)
+{
+ struct arm_smccc_res res = { 0 };
+ guid_t guid;
+ int ret;
+
+ /* Ensure we have the UUID we expect for this service. */
+ arm_smccc_smc(MLXBF_BOOTCTL_SIP_SVC_UID, 0, 0, 0, 0, 0, 0, 0, &res);
+ guid_parse(mlxbf_bootctl_svc_uuid_str, &guid);
+ if (!mlxbf_bootctl_guid_match(&guid, &res))
+ return -ENODEV;
+
+ /*
+ * When watchdog is used, it sets boot mode to MLXBF_BOOTCTL_SWAP_EMMC
+ * in case of boot failures. However it doesn't clear the state if there
+ * is no failure. Restore the default boot mode here to avoid any
+ * unnecessary boot partition swapping.
+ */
+ ret = mlxbf_bootctl_smc(MLXBF_BOOTCTL_SET_RESET_ACTION,
+ MLXBF_BOOTCTL_EMMC);
+ if (ret < 0)
+ dev_warn(&pdev->dev, "Unable to reset the EMMC boot mode\n");
+
+ return 0;
+}
+
+static struct platform_driver mlxbf_bootctl_driver = {
+ .probe = mlxbf_bootctl_probe,
+ .driver = {
+ .name = "mlxbf-bootctl",
+ .groups = mlxbf_bootctl_groups,
+ .acpi_match_table = mlxbf_bootctl_acpi_ids,
+ }
+};
+
+module_platform_driver(mlxbf_bootctl_driver);
+
+MODULE_DESCRIPTION("Mellanox boot control driver");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Mellanox Technologies");
diff --git a/drivers/platform/mellanox/mlxbf-bootctl.h b/drivers/platform/mellanox/mlxbf-bootctl.h
new file mode 100644
index 000000000000..148fdb43b435
--- /dev/null
+++ b/drivers/platform/mellanox/mlxbf-bootctl.h
@@ -0,0 +1,103 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019, Mellanox Technologies. All rights reserved.
+ */
+
+#ifndef __MLXBF_BOOTCTL_H__
+#define __MLXBF_BOOTCTL_H__
+
+/*
+ * Request that the on-chip watchdog be enabled, or disabled, after
+ * the next chip soft reset. This call does not affect the current
+ * status of the on-chip watchdog. If non-zero, the argument
+ * specifies the watchdog interval in seconds. If zero, the watchdog
+ * will not be enabled after the next soft reset. Non-zero errors are
+ * returned as documented below.
+ */
+#define MLXBF_BOOTCTL_SET_POST_RESET_WDOG 0x82000000
+
+/*
+ * Query the status which has been requested for the on-chip watchdog
+ * after the next chip soft reset. Returns the interval as set by
+ * MLXBF_BOOTCTL_SET_POST_RESET_WDOG.
+ */
+#define MLXBF_BOOTCTL_GET_POST_RESET_WDOG 0x82000001
+
+/*
+ * Request that a specific boot action be taken at the next soft
+ * reset. By default, the boot action is set by external chip pins,
+ * which are sampled on hard reset. Note that the boot action
+ * requested by this call will persist on subsequent resets unless
+ * this service, or the MLNX_SET_SECOND_RESET_ACTION service, is
+ * invoked. See below for the available MLNX_BOOT_xxx parameter
+ * values. Non-zero errors are returned as documented below.
+ */
+#define MLXBF_BOOTCTL_SET_RESET_ACTION 0x82000002
+
+/*
+ * Return the specific boot action which will be taken at the next
+ * soft reset. Returns the reset action (see below for the parameter
+ * values for MLXBF_BOOTCTL_SET_RESET_ACTION).
+ */
+#define MLXBF_BOOTCTL_GET_RESET_ACTION 0x82000003
+
+/*
+ * Request that a specific boot action be taken at the soft reset
+ * after the next soft reset. For a specified valid boot mode, the
+ * effect of this call is identical to that of invoking
+ * MLXBF_BOOTCTL_SET_RESET_ACTION after the next chip soft reset; in
+ * particular, after that reset, the action for the now next reset can
+ * be queried with MLXBF_BOOTCTL_GET_RESET_ACTION and modified with
+ * MLXBF_BOOTCTL_SET_RESET_ACTION. You may also specify the parameter as
+ * MLNX_BOOT_NONE, which is equivalent to specifying that no call to
+ * MLXBF_BOOTCTL_SET_RESET_ACTION be taken after the next chip soft reset.
+ * This call does not affect the action to be taken at the next soft
+ * reset. Non-zero errors are returned as documented below.
+ */
+#define MLXBF_BOOTCTL_SET_SECOND_RESET_ACTION 0x82000004
+
+/*
+ * Return the specific boot action which will be taken at the soft
+ * reset after the next soft reset; this will be one of the valid
+ * actions for MLXBF_BOOTCTL_SET_SECOND_RESET_ACTION.
+ */
+#define MLXBF_BOOTCTL_GET_SECOND_RESET_ACTION 0x82000005
+
+/*
+ * Return the fuse status of the current chip. The caller should specify
+ * with the second argument if the state of the lifecycle fuses or the
+ * version of secure boot fuse keys left should be returned.
+ */
+#define MLXBF_BOOTCTL_GET_TBB_FUSE_STATUS 0x82000006
+
+/* Reset eMMC by programming the RST_N register. */
+#define MLXBF_BOOTCTL_SET_EMMC_RST_N 0x82000007
+
+#define MLXBF_BOOTCTL_GET_DIMM_INFO 0x82000008
+
+/* SMC function IDs for SiP Service queries */
+#define MLXBF_BOOTCTL_SIP_SVC_CALL_COUNT 0x8200ff00
+#define MLXBF_BOOTCTL_SIP_SVC_UID 0x8200ff01
+#define MLXBF_BOOTCTL_SIP_SVC_VERSION 0x8200ff03
+
+/* ARM Standard Service Calls version numbers */
+#define MLXBF_BOOTCTL_SVC_VERSION_MAJOR 0x0
+#define MLXBF_BOOTCTL_SVC_VERSION_MINOR 0x2
+
+/* Number of svc calls defined. */
+#define MLXBF_BOOTCTL_NUM_SVC_CALLS 12
+
+/* Valid reset actions for MLXBF_BOOTCTL_SET_RESET_ACTION. */
+#define MLXBF_BOOTCTL_EXTERNAL 0 /* Not boot from eMMC */
+#define MLXBF_BOOTCTL_EMMC 1 /* From primary eMMC boot partition */
+#define MLNX_BOOTCTL_SWAP_EMMC 2 /* Swap eMMC boot partitions and reboot */
+#define MLXBF_BOOTCTL_EMMC_LEGACY 3 /* From primary eMMC in legacy mode */
+
+/* Valid arguments for requesting the fuse status. */
+#define MLXBF_BOOTCTL_FUSE_STATUS_LIFECYCLE 0 /* Return lifecycle status. */
+#define MLXBF_BOOTCTL_FUSE_STATUS_KEYS 1 /* Return secure boot key status */
+
+/* Additional value to disable the MLXBF_BOOTCTL_SET_SECOND_RESET_ACTION. */
+#define MLXBF_BOOTCTL_NONE 0x7fffffff /* Don't change next boot action */
+
+#endif /* __MLXBF_BOOTCTL_H__ */
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index ae21d08c65e8..27d5b40fb717 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -94,7 +94,6 @@ config ASUS_LAPTOP
depends on RFKILL || RFKILL = n
depends on ACPI_VIDEO || ACPI_VIDEO = n
select INPUT_SPARSEKMAP
- select INPUT_POLLDEV
---help---
This is a driver for Asus laptops, Lenovo SL and the Pegatron
Lucid tablet. It may also support some MEDION, JVC or VICTOR
@@ -259,7 +258,7 @@ config DELL_RBU
DELL system. Note you need a Dell OpenManage or Dell Update package (DUP)
supporting application to communicate with the BIOS regarding the new
image for the image update to take effect.
- See <file:Documentation/driver-api/dell_rbu.rst> for more details on the driver.
+ See <file:Documentation/admin-guide/dell_rbu.rst> for more details on the driver.
config FUJITSU_LAPTOP
@@ -623,7 +622,6 @@ config THINKPAD_ACPI_HOTKEY_POLL
config SENSORS_HDAPS
tristate "Thinkpad Hard Drive Active Protection System (hdaps)"
depends on INPUT
- select INPUT_POLLDEV
help
This driver provides support for the IBM Hard Drive Active Protection
System (hdaps), which provides an accelerometer and other misc. data.
@@ -806,7 +804,6 @@ config PEAQ_WMI
tristate "PEAQ 2-in-1 WMI hotkey driver"
depends on ACPI_WMI
depends on INPUT
- select INPUT_POLLDEV
help
Say Y here if you want to support WMI-based hotkeys on PEAQ 2-in-1s.
@@ -834,7 +831,6 @@ config ACPI_TOSHIBA
depends on ACPI_VIDEO || ACPI_VIDEO = n
depends on RFKILL || RFKILL = n
depends on IIO
- select INPUT_POLLDEV
select INPUT_SPARSEKMAP
---help---
This driver adds support for access to certain system settings
@@ -931,14 +927,20 @@ config INTEL_CHT_INT33FE
This driver add support for the INT33FE ACPI device found on
some Intel Cherry Trail devices.
+ There are two kinds of INT33FE ACPI device possible: for hardware
+ with USB Type-C and Micro-B connectors. This driver supports both.
+
The INT33FE ACPI device has a CRS table with I2cSerialBusV2
- resources for 3 devices: Maxim MAX17047 Fuel Gauge Controller,
+ resources for Fuel Gauge Controller and (in the Type-C variant)
FUSB302 USB Type-C Controller and PI3USB30532 USB switch.
This driver instantiates i2c-clients for these, so that standard
i2c drivers for these chips can bind to the them.
If you enable this driver it is advised to also select
- CONFIG_TYPEC_FUSB302=m and CONFIG_BATTERY_MAX17042=m.
+ CONFIG_BATTERY_BQ27XXX=m or CONFIG_BATTERY_BQ27XXX_I2C=m for Micro-B
+ device and CONFIG_TYPEC_FUSB302=m and CONFIG_BATTERY_MAX17042=m
+ for Type-C device.
+
config INTEL_INT0002_VGPIO
tristate "Intel ACPI INT0002 Virtual GPIO driver"
@@ -1305,7 +1307,8 @@ config INTEL_ATOMISP2_PM
will be called intel_atomisp2_pm.
config HUAWEI_WMI
- tristate "Huawei WMI hotkeys driver"
+ tristate "Huawei WMI laptop extras driver"
+ depends on ACPI_BATTERY
depends on ACPI_WMI
depends on INPUT
select INPUT_SPARSEKMAP
@@ -1314,9 +1317,8 @@ config HUAWEI_WMI
select LEDS_TRIGGER_AUDIO
select NEW_LEDS
help
- This driver provides support for Huawei WMI hotkeys.
- It enables the missing keys and adds support to the micmute
- LED found on some of these laptops.
+ This driver provides support for Huawei WMI hotkeys, battery charge
+ control, fn-lock, mic-mute LED, and other extra features.
To compile this driver as a module, choose M here: the module
will be called huawei-wmi.
@@ -1337,6 +1339,19 @@ config PCENGINES_APU2
source "drivers/platform/x86/intel_speed_select_if/Kconfig"
+config SYSTEM76_ACPI
+ tristate "System76 ACPI Driver"
+ depends on ACPI
+ select NEW_LEDS
+ select LEDS_CLASS
+ select LEDS_TRIGGERS
+ help
+ This is a driver for System76 laptops running open firmware. It adds
+ support for Fn-Fx key combinations, keyboard backlight, and airplane mode
+ LEDs.
+
+ If you have a System76 laptop running open firmware, say Y or M here.
+
endif # X86_PLATFORM_DEVICES
config PMC_ATOM
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
index 415104033060..42d85a00be4e 100644
--- a/drivers/platform/x86/Makefile
+++ b/drivers/platform/x86/Makefile
@@ -61,6 +61,10 @@ obj-$(CONFIG_TOSHIBA_BT_RFKILL) += toshiba_bluetooth.o
obj-$(CONFIG_TOSHIBA_HAPS) += toshiba_haps.o
obj-$(CONFIG_TOSHIBA_WMI) += toshiba-wmi.o
obj-$(CONFIG_INTEL_CHT_INT33FE) += intel_cht_int33fe.o
+intel_cht_int33fe-objs := intel_cht_int33fe_common.o \
+ intel_cht_int33fe_typec.o \
+ intel_cht_int33fe_microb.o
+
obj-$(CONFIG_INTEL_INT0002_VGPIO) += intel_int0002_vgpio.o
obj-$(CONFIG_INTEL_HID_EVENT) += intel-hid.o
obj-$(CONFIG_INTEL_VBTN) += intel-vbtn.o
@@ -100,3 +104,4 @@ obj-$(CONFIG_I2C_MULTI_INSTANTIATE) += i2c-multi-instantiate.o
obj-$(CONFIG_INTEL_ATOMISP2_PM) += intel_atomisp2_pm.o
obj-$(CONFIG_PCENGINES_APU2) += pcengines-apuv2.o
obj-$(CONFIG_INTEL_SPEED_SELECT_INTERFACE) += intel_speed_select_if/
+obj-$(CONFIG_SYSTEM76_ACPI) += system76_acpi.o
diff --git a/drivers/platform/x86/acerhdf.c b/drivers/platform/x86/acerhdf.c
index 5ea8da5f0f70..8cc86f4e3ac1 100644
--- a/drivers/platform/x86/acerhdf.c
+++ b/drivers/platform/x86/acerhdf.c
@@ -4,7 +4,7 @@
* of the aspire one netbook, turns on/off the fan
* as soon as the upper/lower threshold is reached.
*
- * (C) 2009 - Peter Feuerer peter (a) piie.net
+ * (C) 2009 - Peter Kaestle peter (a) piie.net
* http://piie.net
* 2009 Borislav Petkov bp (a) alien8.de
*
@@ -224,6 +224,8 @@ static const struct bios_settings bios_tbl[] __initconst = {
{"Acer", "Aspire 5739G", "V1.3311", 0x55, 0x58, {0x20, 0x00}, 0},
/* Acer TravelMate 7730 */
{"Acer", "TravelMate 7730G", "v0.3509", 0x55, 0x58, {0xaf, 0x00}, 0},
+ /* Acer Aspire 7551 */
+ {"Acer", "Aspire 7551", "V1.18", 0x93, 0xa8, {0x14, 0x04}, 1},
/* Acer TravelMate TM8573T */
{"Acer", "TM8573T", "V1.13", 0x93, 0xa8, {0x14, 0x04}, 1},
/* Gateway */
@@ -801,7 +803,7 @@ static void __exit acerhdf_exit(void)
}
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Peter Feuerer");
+MODULE_AUTHOR("Peter Kaestle");
MODULE_DESCRIPTION("Aspire One temperature and fan driver");
MODULE_ALIAS("dmi:*:*Acer*:pnAOA*:");
MODULE_ALIAS("dmi:*:*Acer*:pnAO751h*:");
@@ -815,6 +817,7 @@ MODULE_ALIAS("dmi:*:*Acer*:pnAspire*5739G:");
MODULE_ALIAS("dmi:*:*Acer*:pnAspire*One*753:");
MODULE_ALIAS("dmi:*:*Acer*:pnAspire*5315:");
MODULE_ALIAS("dmi:*:*Acer*:TravelMate*7730G:");
+MODULE_ALIAS("dmi:*:*Acer*:pnAspire*7551:");
MODULE_ALIAS("dmi:*:*Acer*:TM8573T:");
MODULE_ALIAS("dmi:*:*Gateway*:pnAOA*:");
MODULE_ALIAS("dmi:*:*Gateway*:pnLT31*:");
diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
index ca65e1039f92..a666fbc2e73b 100644
--- a/drivers/platform/x86/asus-laptop.c
+++ b/drivers/platform/x86/asus-laptop.c
@@ -34,7 +34,6 @@
#include <linux/uaccess.h>
#include <linux/input.h>
#include <linux/input/sparse-keymap.h>
-#include <linux/input-polldev.h>
#include <linux/rfkill.h>
#include <linux/slab.h>
#include <linux/dmi.h>
@@ -244,7 +243,7 @@ struct asus_laptop {
struct input_dev *inputdev;
struct key_entry *keymap;
- struct input_polled_dev *pega_accel_poll;
+ struct input_dev *pega_accel_poll;
struct asus_led wled;
struct asus_led bled;
@@ -446,9 +445,9 @@ static int pega_acc_axis(struct asus_laptop *asus, int curr, char *method)
return clamp_val((short)val, -PEGA_ACC_CLAMP, PEGA_ACC_CLAMP);
}
-static void pega_accel_poll(struct input_polled_dev *ipd)
+static void pega_accel_poll(struct input_dev *input)
{
- struct device *parent = ipd->input->dev.parent;
+ struct device *parent = input->dev.parent;
struct asus_laptop *asus = dev_get_drvdata(parent);
/* In some cases, the very first call to poll causes a
@@ -457,10 +456,10 @@ static void pega_accel_poll(struct input_polled_dev *ipd)
* device, and perhaps a firmware bug. Fake the first report. */
if (!asus->pega_acc_live) {
asus->pega_acc_live = true;
- input_report_abs(ipd->input, ABS_X, 0);
- input_report_abs(ipd->input, ABS_Y, 0);
- input_report_abs(ipd->input, ABS_Z, 0);
- input_sync(ipd->input);
+ input_report_abs(input, ABS_X, 0);
+ input_report_abs(input, ABS_Y, 0);
+ input_report_abs(input, ABS_Z, 0);
+ input_sync(input);
return;
}
@@ -471,25 +470,24 @@ static void pega_accel_poll(struct input_polled_dev *ipd)
/* Note transform, convert to "right/up/out" in the native
* landscape orientation (i.e. the vector is the direction of
* "real up" in the device's cartiesian coordinates). */
- input_report_abs(ipd->input, ABS_X, -asus->pega_acc_x);
- input_report_abs(ipd->input, ABS_Y, -asus->pega_acc_y);
- input_report_abs(ipd->input, ABS_Z, asus->pega_acc_z);
- input_sync(ipd->input);
+ input_report_abs(input, ABS_X, -asus->pega_acc_x);
+ input_report_abs(input, ABS_Y, -asus->pega_acc_y);
+ input_report_abs(input, ABS_Z, asus->pega_acc_z);
+ input_sync(input);
}
static void pega_accel_exit(struct asus_laptop *asus)
{
if (asus->pega_accel_poll) {
- input_unregister_polled_device(asus->pega_accel_poll);
- input_free_polled_device(asus->pega_accel_poll);
+ input_unregister_device(asus->pega_accel_poll);
+ asus->pega_accel_poll = NULL;
}
- asus->pega_accel_poll = NULL;
}
static int pega_accel_init(struct asus_laptop *asus)
{
int err;
- struct input_polled_dev *ipd;
+ struct input_dev *input;
if (!asus->is_pega_lucid)
return -ENODEV;
@@ -499,37 +497,39 @@ static int pega_accel_init(struct asus_laptop *asus)
acpi_check_handle(asus->handle, METHOD_XLRZ, NULL))
return -ENODEV;
- ipd = input_allocate_polled_device();
- if (!ipd)
+ input = input_allocate_device();
+ if (!input)
return -ENOMEM;
- ipd->poll = pega_accel_poll;
- ipd->poll_interval = 125;
- ipd->poll_interval_min = 50;
- ipd->poll_interval_max = 2000;
-
- ipd->input->name = PEGA_ACCEL_DESC;
- ipd->input->phys = PEGA_ACCEL_NAME "/input0";
- ipd->input->dev.parent = &asus->platform_device->dev;
- ipd->input->id.bustype = BUS_HOST;
+ input->name = PEGA_ACCEL_DESC;
+ input->phys = PEGA_ACCEL_NAME "/input0";
+ input->dev.parent = &asus->platform_device->dev;
+ input->id.bustype = BUS_HOST;
- set_bit(EV_ABS, ipd->input->evbit);
- input_set_abs_params(ipd->input, ABS_X,
+ input_set_abs_params(input, ABS_X,
-PEGA_ACC_CLAMP, PEGA_ACC_CLAMP, 0, 0);
- input_set_abs_params(ipd->input, ABS_Y,
+ input_set_abs_params(input, ABS_Y,
-PEGA_ACC_CLAMP, PEGA_ACC_CLAMP, 0, 0);
- input_set_abs_params(ipd->input, ABS_Z,
+ input_set_abs_params(input, ABS_Z,
-PEGA_ACC_CLAMP, PEGA_ACC_CLAMP, 0, 0);
- err = input_register_polled_device(ipd);
+ err = input_setup_polling(input, pega_accel_poll);
if (err)
goto exit;
- asus->pega_accel_poll = ipd;
+ input_set_poll_interval(input, 125);
+ input_set_min_poll_interval(input, 50);
+ input_set_max_poll_interval(input, 2000);
+
+ err = input_register_device(input);
+ if (err)
+ goto exit;
+
+ asus->pega_accel_poll = input;
return 0;
exit:
- input_free_polled_device(ipd);
+ input_free_device(input);
return err;
}
@@ -1550,8 +1550,7 @@ static void asus_acpi_notify(struct acpi_device *device, u32 event)
/* Accelerometer "coarse orientation change" event */
if (asus->pega_accel_poll && event == 0xEA) {
- kobject_uevent(&asus->pega_accel_poll->input->dev.kobj,
- KOBJ_CHANGE);
+ kobject_uevent(&asus->pega_accel_poll->dev.kobj, KOBJ_CHANGE);
return ;
}
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
index d27be2836bc2..74e988f839e8 100644
--- a/drivers/platform/x86/dell-laptop.c
+++ b/drivers/platform/x86/dell-laptop.c
@@ -33,6 +33,7 @@
struct quirk_entry {
bool touchpad_led;
+ bool kbd_led_not_present;
bool kbd_led_levels_off_1;
bool kbd_missing_ac_tag;
@@ -73,6 +74,10 @@ static struct quirk_entry quirk_dell_latitude_e6410 = {
.kbd_led_levels_off_1 = true,
};
+static struct quirk_entry quirk_dell_inspiron_1012 = {
+ .kbd_led_not_present = true,
+};
+
static struct platform_driver platform_driver = {
.driver = {
.name = "dell-laptop",
@@ -310,6 +315,24 @@ static const struct dmi_system_id dell_quirks[] __initconst = {
},
.driver_data = &quirk_dell_latitude_e6410,
},
+ {
+ .callback = dmi_matched,
+ .ident = "Dell Inspiron 1012",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1012"),
+ },
+ .driver_data = &quirk_dell_inspiron_1012,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "Dell Inspiron 1018",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1018"),
+ },
+ .driver_data = &quirk_dell_inspiron_1012,
+ },
{ }
};
@@ -1493,6 +1516,9 @@ static void kbd_init(void)
{
int ret;
+ if (quirks && quirks->kbd_led_not_present)
+ return;
+
ret = kbd_init_info();
kbd_init_tokens();
diff --git a/drivers/platform/x86/dell_rbu.c b/drivers/platform/x86/dell_rbu.c
index 3691391fea6b..7d5453326b43 100644
--- a/drivers/platform/x86/dell_rbu.c
+++ b/drivers/platform/x86/dell_rbu.c
@@ -24,7 +24,7 @@
* on every time the packet data is written. This driver requires an
* application to break the BIOS image in to fixed sized packet chunks.
*
- * See Documentation/driver-api/dell_rbu.rst for more info.
+ * See Documentation/admin-guide/dell_rbu.rst for more info.
*/
#include <linux/init.h>
#include <linux/module.h>
diff --git a/drivers/platform/x86/hdaps.c b/drivers/platform/x86/hdaps.c
index 3adcb0de0193..04c4da6692d7 100644
--- a/drivers/platform/x86/hdaps.c
+++ b/drivers/platform/x86/hdaps.c
@@ -18,7 +18,7 @@
#include <linux/delay.h>
#include <linux/platform_device.h>
-#include <linux/input-polldev.h>
+#include <linux/input.h>
#include <linux/kernel.h>
#include <linux/mutex.h>
#include <linux/module.h>
@@ -59,7 +59,7 @@
#define HDAPS_BOTH_AXES (HDAPS_X_AXIS | HDAPS_Y_AXIS)
static struct platform_device *pdev;
-static struct input_polled_dev *hdaps_idev;
+static struct input_dev *hdaps_idev;
static unsigned int hdaps_invert;
static u8 km_activity;
static int rest_x;
@@ -318,9 +318,8 @@ static void hdaps_calibrate(void)
__hdaps_read_pair(HDAPS_PORT_XPOS, HDAPS_PORT_YPOS, &rest_x, &rest_y);
}
-static void hdaps_mousedev_poll(struct input_polled_dev *dev)
+static void hdaps_mousedev_poll(struct input_dev *input_dev)
{
- struct input_dev *input_dev = dev->input;
int x, y;
mutex_lock(&hdaps_mtx);
@@ -531,7 +530,6 @@ static const struct dmi_system_id hdaps_whitelist[] __initconst = {
static int __init hdaps_init(void)
{
- struct input_dev *idev;
int ret;
if (!dmi_check_system(hdaps_whitelist)) {
@@ -559,31 +557,32 @@ static int __init hdaps_init(void)
if (ret)
goto out_device;
- hdaps_idev = input_allocate_polled_device();
+ hdaps_idev = input_allocate_device();
if (!hdaps_idev) {
ret = -ENOMEM;
goto out_group;
}
- hdaps_idev->poll = hdaps_mousedev_poll;
- hdaps_idev->poll_interval = HDAPS_POLL_INTERVAL;
-
/* initial calibrate for the input device */
hdaps_calibrate();
/* initialize the input class */
- idev = hdaps_idev->input;
- idev->name = "hdaps";
- idev->phys = "isa1600/input0";
- idev->id.bustype = BUS_ISA;
- idev->dev.parent = &pdev->dev;
- idev->evbit[0] = BIT_MASK(EV_ABS);
- input_set_abs_params(idev, ABS_X,
+ hdaps_idev->name = "hdaps";
+ hdaps_idev->phys = "isa1600/input0";
+ hdaps_idev->id.bustype = BUS_ISA;
+ hdaps_idev->dev.parent = &pdev->dev;
+ input_set_abs_params(hdaps_idev, ABS_X,
-256, 256, HDAPS_INPUT_FUZZ, HDAPS_INPUT_FLAT);
- input_set_abs_params(idev, ABS_Y,
+ input_set_abs_params(hdaps_idev, ABS_Y,
-256, 256, HDAPS_INPUT_FUZZ, HDAPS_INPUT_FLAT);
- ret = input_register_polled_device(hdaps_idev);
+ ret = input_setup_polling(hdaps_idev, hdaps_mousedev_poll);
+ if (ret)
+ goto out_idev;
+
+ input_set_poll_interval(hdaps_idev, HDAPS_POLL_INTERVAL);
+
+ ret = input_register_device(hdaps_idev);
if (ret)
goto out_idev;
@@ -591,7 +590,7 @@ static int __init hdaps_init(void)
return 0;
out_idev:
- input_free_polled_device(hdaps_idev);
+ input_free_device(hdaps_idev);
out_group:
sysfs_remove_group(&pdev->dev.kobj, &hdaps_attribute_group);
out_device:
@@ -607,8 +606,7 @@ out:
static void __exit hdaps_exit(void)
{
- input_unregister_polled_device(hdaps_idev);
- input_free_polled_device(hdaps_idev);
+ input_unregister_device(hdaps_idev);
sysfs_remove_group(&pdev->dev.kobj, &hdaps_attribute_group);
platform_device_unregister(pdev);
platform_driver_unregister(&hdaps_driver);
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
index 6bcbbb375401..9579a706fc08 100644
--- a/drivers/platform/x86/hp-wmi.c
+++ b/drivers/platform/x86/hp-wmi.c
@@ -65,7 +65,7 @@ struct bios_args {
u32 command;
u32 commandtype;
u32 datasize;
- u32 data;
+ u8 data[128];
};
enum hp_wmi_commandtype {
@@ -216,7 +216,7 @@ static int hp_wmi_perform_query(int query, enum hp_wmi_command command,
.command = command,
.commandtype = query,
.datasize = insize,
- .data = 0,
+ .data = { 0 },
};
struct acpi_buffer input = { sizeof(struct bios_args), &args };
struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
@@ -228,7 +228,7 @@ static int hp_wmi_perform_query(int query, enum hp_wmi_command command,
if (WARN_ON(insize > sizeof(args.data)))
return -EINVAL;
- memcpy(&args.data, buffer, insize);
+ memcpy(&args.data[0], buffer, insize);
wmi_evaluate_method(HPWMI_BIOS_GUID, 0, mid, &input, &output);
@@ -380,7 +380,7 @@ static int hp_wmi_rfkill2_refresh(void)
int err, i;
err = hp_wmi_perform_query(HPWMI_WIRELESS2_QUERY, HPWMI_READ, &state,
- 0, sizeof(state));
+ sizeof(state), sizeof(state));
if (err)
return err;
@@ -778,7 +778,7 @@ static int __init hp_wmi_rfkill2_setup(struct platform_device *device)
int err, i;
err = hp_wmi_perform_query(HPWMI_WIRELESS2_QUERY, HPWMI_READ, &state,
- 0, sizeof(state));
+ sizeof(state), sizeof(state));
if (err)
return err < 0 ? err : -EINVAL;
diff --git a/drivers/platform/x86/huawei-wmi.c b/drivers/platform/x86/huawei-wmi.c
index 195a7f3638cb..a2d846c4a7ee 100644
--- a/drivers/platform/x86/huawei-wmi.c
+++ b/drivers/platform/x86/huawei-wmi.c
@@ -1,32 +1,77 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Huawei WMI hotkeys
+ * Huawei WMI laptop extras driver
*
* Copyright (C) 2018 Ayman Bagabas <ayman.bagabas@gmail.com>
*/
#include <linux/acpi.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/dmi.h>
#include <linux/input.h>
#include <linux/input/sparse-keymap.h>
#include <linux/leds.h>
#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/sysfs.h>
#include <linux/wmi.h>
+#include <acpi/battery.h>
/*
* Huawei WMI GUIDs
*/
-#define WMI0_EVENT_GUID "59142400-C6A3-40fa-BADB-8A2652834100"
-#define AMW0_EVENT_GUID "ABBC0F5C-8EA1-11D1-A000-C90629100000"
+#define HWMI_METHOD_GUID "ABBC0F5B-8EA1-11D1-A000-C90629100000"
+#define HWMI_EVENT_GUID "ABBC0F5C-8EA1-11D1-A000-C90629100000"
+/* Legacy GUIDs */
#define WMI0_EXPENSIVE_GUID "39142400-C6A3-40fa-BADB-8A2652834100"
+#define WMI0_EVENT_GUID "59142400-C6A3-40fa-BADB-8A2652834100"
+
+/* HWMI commands */
+
+enum {
+ BATTERY_THRESH_GET = 0x00001103, /* \GBTT */
+ BATTERY_THRESH_SET = 0x00001003, /* \SBTT */
+ FN_LOCK_GET = 0x00000604, /* \GFRS */
+ FN_LOCK_SET = 0x00000704, /* \SFRS */
+ MICMUTE_LED_SET = 0x00000b04, /* \SMLS */
+};
+
+union hwmi_arg {
+ u64 cmd;
+ u8 args[8];
+};
+
+struct quirk_entry {
+ bool battery_reset;
+ bool ec_micmute;
+ bool report_brightness;
+};
+
+static struct quirk_entry *quirks;
-struct huawei_wmi_priv {
- struct input_dev *idev;
+struct huawei_wmi_debug {
+ struct dentry *root;
+ u64 arg;
+};
+
+struct huawei_wmi {
+ bool battery_available;
+ bool fn_lock_available;
+
+ struct huawei_wmi_debug debug;
+ struct input_dev *idev[2];
struct led_classdev cdev;
- acpi_handle handle;
- char *acpi_method;
+ struct device *dev;
+
+ struct mutex wmi_lock;
};
+static struct huawei_wmi *huawei_wmi;
+
static const struct key_entry huawei_wmi_keymap[] = {
{ KE_KEY, 0x281, { KEY_BRIGHTNESSDOWN } },
{ KE_KEY, 0x282, { KEY_BRIGHTNESSUP } },
@@ -37,73 +82,614 @@ static const struct key_entry huawei_wmi_keymap[] = {
{ KE_KEY, 0x289, { KEY_WLAN } },
// Huawei |M| key
{ KE_KEY, 0x28a, { KEY_CONFIG } },
- // Keyboard backlight
+ // Keyboard backlit
{ KE_IGNORE, 0x293, { KEY_KBDILLUMTOGGLE } },
{ KE_IGNORE, 0x294, { KEY_KBDILLUMUP } },
{ KE_IGNORE, 0x295, { KEY_KBDILLUMUP } },
{ KE_END, 0 }
};
+static int battery_reset = -1;
+static int report_brightness = -1;
+
+module_param(battery_reset, bint, 0444);
+MODULE_PARM_DESC(battery_reset,
+ "Reset battery charge values to (0-0) before disabling it using (0-100)");
+module_param(report_brightness, bint, 0444);
+MODULE_PARM_DESC(report_brightness,
+ "Report brightness keys.");
+
+/* Quirks */
+
+static int __init dmi_matched(const struct dmi_system_id *dmi)
+{
+ quirks = dmi->driver_data;
+ return 1;
+}
+
+static struct quirk_entry quirk_unknown = {
+};
+
+static struct quirk_entry quirk_battery_reset = {
+ .battery_reset = true,
+};
+
+static struct quirk_entry quirk_matebook_x = {
+ .ec_micmute = true,
+ .report_brightness = true,
+};
+
+static const struct dmi_system_id huawei_quirks[] = {
+ {
+ .callback = dmi_matched,
+ .ident = "Huawei MACH-WX9",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HUAWEI"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MACH-WX9"),
+ },
+ .driver_data = &quirk_battery_reset
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "Huawei MateBook X",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HUAWEI"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HUAWEI MateBook X")
+ },
+ .driver_data = &quirk_matebook_x
+ },
+ { }
+};
+
+/* Utils */
+
+static int huawei_wmi_call(struct huawei_wmi *huawei,
+ struct acpi_buffer *in, struct acpi_buffer *out)
+{
+ acpi_status status;
+
+ mutex_lock(&huawei->wmi_lock);
+ status = wmi_evaluate_method(HWMI_METHOD_GUID, 0, 1, in, out);
+ mutex_unlock(&huawei->wmi_lock);
+ if (ACPI_FAILURE(status)) {
+ dev_err(huawei->dev, "Failed to evaluate wmi method\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+/* HWMI takes a 64 bit input and returns either a package with 2 buffers, one of
+ * 4 bytes and the other of 256 bytes, or one buffer of size 0x104 (260) bytes.
+ * The first 4 bytes are ignored, we ignore the first 4 bytes buffer if we got a
+ * package, or skip the first 4 if a buffer of 0x104 is used. The first byte of
+ * the remaining 0x100 sized buffer has the return status of every call. In case
+ * the return status is non-zero, we return -ENODEV but still copy the returned
+ * buffer to the given buffer parameter (buf).
+ */
+static int huawei_wmi_cmd(u64 arg, u8 *buf, size_t buflen)
+{
+ struct huawei_wmi *huawei = huawei_wmi;
+ struct acpi_buffer out = { ACPI_ALLOCATE_BUFFER, NULL };
+ struct acpi_buffer in;
+ union acpi_object *obj;
+ size_t len;
+ int err, i;
+
+ in.length = sizeof(arg);
+ in.pointer = &arg;
+
+ /* Some models require calling HWMI twice to execute a command. We evaluate
+ * HWMI and if we get a non-zero return status we evaluate it again.
+ */
+ for (i = 0; i < 2; i++) {
+ err = huawei_wmi_call(huawei, &in, &out);
+ if (err)
+ goto fail_cmd;
+
+ obj = out.pointer;
+ if (!obj) {
+ err = -EIO;
+ goto fail_cmd;
+ }
+
+ switch (obj->type) {
+ /* Models that implement both "legacy" and HWMI tend to return a 0x104
+ * sized buffer instead of a package of 0x4 and 0x100 buffers.
+ */
+ case ACPI_TYPE_BUFFER:
+ if (obj->buffer.length == 0x104) {
+ // Skip the first 4 bytes.
+ obj->buffer.pointer += 4;
+ len = 0x100;
+ } else {
+ dev_err(huawei->dev, "Bad buffer length, got %d\n", obj->buffer.length);
+ err = -EIO;
+ goto fail_cmd;
+ }
+
+ break;
+ /* HWMI returns a package with 2 buffer elements, one of 4 bytes and the
+ * other is 256 bytes.
+ */
+ case ACPI_TYPE_PACKAGE:
+ if (obj->package.count != 2) {
+ dev_err(huawei->dev, "Bad package count, got %d\n", obj->package.count);
+ err = -EIO;
+ goto fail_cmd;
+ }
+
+ obj = &obj->package.elements[1];
+ if (obj->type != ACPI_TYPE_BUFFER) {
+ dev_err(huawei->dev, "Bad package element type, got %d\n", obj->type);
+ err = -EIO;
+ goto fail_cmd;
+ }
+ len = obj->buffer.length;
+
+ break;
+ /* Shouldn't get here! */
+ default:
+ dev_err(huawei->dev, "Unexpected obj type, got: %d\n", obj->type);
+ err = -EIO;
+ goto fail_cmd;
+ }
+
+ if (!*obj->buffer.pointer)
+ break;
+ }
+
+ err = (*obj->buffer.pointer) ? -ENODEV : 0;
+
+ if (buf) {
+ len = min(buflen, len);
+ memcpy(buf, obj->buffer.pointer, len);
+ }
+
+fail_cmd:
+ kfree(out.pointer);
+ return err;
+}
+
+/* LEDs */
+
static int huawei_wmi_micmute_led_set(struct led_classdev *led_cdev,
enum led_brightness brightness)
{
- struct huawei_wmi_priv *priv = dev_get_drvdata(led_cdev->dev->parent);
- acpi_status status;
- union acpi_object args[3];
- struct acpi_object_list arg_list = {
- .pointer = args,
- .count = ARRAY_SIZE(args),
- };
-
- args[0].type = args[1].type = args[2].type = ACPI_TYPE_INTEGER;
- args[1].integer.value = 0x04;
-
- if (strcmp(priv->acpi_method, "SPIN") == 0) {
- args[0].integer.value = 0;
- args[2].integer.value = brightness ? 1 : 0;
- } else if (strcmp(priv->acpi_method, "WPIN") == 0) {
- args[0].integer.value = 1;
- args[2].integer.value = brightness ? 0 : 1;
+ /* This is a workaround until the "legacy" interface is implemented. */
+ if (quirks && quirks->ec_micmute) {
+ char *acpi_method;
+ acpi_handle handle;
+ acpi_status status;
+ union acpi_object args[3];
+ struct acpi_object_list arg_list = {
+ .pointer = args,
+ .count = ARRAY_SIZE(args),
+ };
+
+ handle = ec_get_handle();
+ if (!handle)
+ return -ENODEV;
+
+ args[0].type = args[1].type = args[2].type = ACPI_TYPE_INTEGER;
+ args[1].integer.value = 0x04;
+
+ if (acpi_has_method(handle, "SPIN")) {
+ acpi_method = "SPIN";
+ args[0].integer.value = 0;
+ args[2].integer.value = brightness ? 1 : 0;
+ } else if (acpi_has_method(handle, "WPIN")) {
+ acpi_method = "WPIN";
+ args[0].integer.value = 1;
+ args[2].integer.value = brightness ? 0 : 1;
+ } else {
+ return -ENODEV;
+ }
+
+ status = acpi_evaluate_object(handle, acpi_method, &arg_list, NULL);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ return 0;
} else {
+ union hwmi_arg arg;
+
+ arg.cmd = MICMUTE_LED_SET;
+ arg.args[2] = brightness;
+
+ return huawei_wmi_cmd(arg.cmd, NULL, 0);
+ }
+}
+
+static void huawei_wmi_leds_setup(struct device *dev)
+{
+ struct huawei_wmi *huawei = dev_get_drvdata(dev);
+
+ huawei->cdev.name = "platform::micmute";
+ huawei->cdev.max_brightness = 1;
+ huawei->cdev.brightness_set_blocking = &huawei_wmi_micmute_led_set;
+ huawei->cdev.default_trigger = "audio-micmute";
+ huawei->cdev.brightness = ledtrig_audio_get(LED_AUDIO_MICMUTE);
+ huawei->cdev.dev = dev;
+ huawei->cdev.flags = LED_CORE_SUSPENDRESUME;
+
+ devm_led_classdev_register(dev, &huawei->cdev);
+}
+
+/* Battery protection */
+
+static int huawei_wmi_battery_get(int *start, int *end)
+{
+ u8 ret[0x100];
+ int err, i;
+
+ err = huawei_wmi_cmd(BATTERY_THRESH_GET, ret, 0x100);
+ if (err)
+ return err;
+
+ /* Find the last two non-zero values. Return status is ignored. */
+ i = 0xff;
+ do {
+ if (start)
+ *start = ret[i-1];
+ if (end)
+ *end = ret[i];
+ } while (i > 2 && !ret[i--]);
+
+ return 0;
+}
+
+static int huawei_wmi_battery_set(int start, int end)
+{
+ union hwmi_arg arg;
+ int err;
+
+ if (start < 0 || end < 0 || start > 100 || end > 100)
return -EINVAL;
+
+ arg.cmd = BATTERY_THRESH_SET;
+ arg.args[2] = start;
+ arg.args[3] = end;
+
+ /* This is an edge case were some models turn battery protection
+ * off without changing their thresholds values. We clear the
+ * values before turning off protection. Sometimes we need a sleep delay to
+ * make sure these values make their way to EC memory.
+ */
+ if (quirks && quirks->battery_reset && start == 0 && end == 100) {
+ err = huawei_wmi_battery_set(0, 0);
+ if (err)
+ return err;
+
+ msleep(1000);
}
- status = acpi_evaluate_object(priv->handle, priv->acpi_method, &arg_list, NULL);
- if (ACPI_FAILURE(status))
- return -ENXIO;
+ err = huawei_wmi_cmd(arg.cmd, NULL, 0);
+
+ return err;
+}
+
+static ssize_t charge_control_start_threshold_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int err, start;
+
+ err = huawei_wmi_battery_get(&start, NULL);
+ if (err)
+ return err;
+
+ return sprintf(buf, "%d\n", start);
+}
+
+static ssize_t charge_control_end_threshold_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int err, end;
+
+ err = huawei_wmi_battery_get(NULL, &end);
+ if (err)
+ return err;
+
+ return sprintf(buf, "%d\n", end);
+}
+
+static ssize_t charge_control_thresholds_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int err, start, end;
+
+ err = huawei_wmi_battery_get(&start, &end);
+ if (err)
+ return err;
+
+ return sprintf(buf, "%d %d\n", start, end);
+}
+
+static ssize_t charge_control_start_threshold_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int err, start, end;
+
+ err = huawei_wmi_battery_get(NULL, &end);
+ if (err)
+ return err;
+
+ if (sscanf(buf, "%d", &start) != 1)
+ return -EINVAL;
+
+ err = huawei_wmi_battery_set(start, end);
+ if (err)
+ return err;
+
+ return size;
+}
+
+static ssize_t charge_control_end_threshold_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int err, start, end;
+
+ err = huawei_wmi_battery_get(&start, NULL);
+ if (err)
+ return err;
+
+ if (sscanf(buf, "%d", &end) != 1)
+ return -EINVAL;
+
+ err = huawei_wmi_battery_set(start, end);
+ if (err)
+ return err;
+
+ return size;
+}
+
+static ssize_t charge_control_thresholds_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int err, start, end;
+
+ if (sscanf(buf, "%d %d", &start, &end) != 2)
+ return -EINVAL;
+
+ err = huawei_wmi_battery_set(start, end);
+ if (err)
+ return err;
+
+ return size;
+}
+
+static DEVICE_ATTR_RW(charge_control_start_threshold);
+static DEVICE_ATTR_RW(charge_control_end_threshold);
+static DEVICE_ATTR_RW(charge_control_thresholds);
+
+static int huawei_wmi_battery_add(struct power_supply *battery)
+{
+ device_create_file(&battery->dev, &dev_attr_charge_control_start_threshold);
+ device_create_file(&battery->dev, &dev_attr_charge_control_end_threshold);
return 0;
}
-static int huawei_wmi_leds_setup(struct wmi_device *wdev)
+static int huawei_wmi_battery_remove(struct power_supply *battery)
{
- struct huawei_wmi_priv *priv = dev_get_drvdata(&wdev->dev);
+ device_remove_file(&battery->dev, &dev_attr_charge_control_start_threshold);
+ device_remove_file(&battery->dev, &dev_attr_charge_control_end_threshold);
- priv->handle = ec_get_handle();
- if (!priv->handle)
- return 0;
+ return 0;
+}
- if (acpi_has_method(priv->handle, "SPIN"))
- priv->acpi_method = "SPIN";
- else if (acpi_has_method(priv->handle, "WPIN"))
- priv->acpi_method = "WPIN";
- else
- return 0;
+static struct acpi_battery_hook huawei_wmi_battery_hook = {
+ .add_battery = huawei_wmi_battery_add,
+ .remove_battery = huawei_wmi_battery_remove,
+ .name = "Huawei Battery Extension"
+};
+
+static void huawei_wmi_battery_setup(struct device *dev)
+{
+ struct huawei_wmi *huawei = dev_get_drvdata(dev);
- priv->cdev.name = "platform::micmute";
- priv->cdev.max_brightness = 1;
- priv->cdev.brightness_set_blocking = huawei_wmi_micmute_led_set;
- priv->cdev.default_trigger = "audio-micmute";
- priv->cdev.brightness = ledtrig_audio_get(LED_AUDIO_MICMUTE);
- priv->cdev.dev = &wdev->dev;
- priv->cdev.flags = LED_CORE_SUSPENDRESUME;
+ huawei->battery_available = true;
+ if (huawei_wmi_battery_get(NULL, NULL)) {
+ huawei->battery_available = false;
+ return;
+ }
- return devm_led_classdev_register(&wdev->dev, &priv->cdev);
+ battery_hook_register(&huawei_wmi_battery_hook);
+ device_create_file(dev, &dev_attr_charge_control_thresholds);
}
-static void huawei_wmi_process_key(struct wmi_device *wdev, int code)
+static void huawei_wmi_battery_exit(struct device *dev)
+{
+ struct huawei_wmi *huawei = dev_get_drvdata(dev);
+
+ if (huawei->battery_available) {
+ battery_hook_unregister(&huawei_wmi_battery_hook);
+ device_remove_file(dev, &dev_attr_charge_control_thresholds);
+ }
+}
+
+/* Fn lock */
+
+static int huawei_wmi_fn_lock_get(int *on)
+{
+ u8 ret[0x100] = { 0 };
+ int err, i;
+
+ err = huawei_wmi_cmd(FN_LOCK_GET, ret, 0x100);
+ if (err)
+ return err;
+
+ /* Find the first non-zero value. Return status is ignored. */
+ i = 1;
+ do {
+ if (on)
+ *on = ret[i] - 1; // -1 undefined, 0 off, 1 on.
+ } while (i < 0xff && !ret[i++]);
+
+ return 0;
+}
+
+static int huawei_wmi_fn_lock_set(int on)
+{
+ union hwmi_arg arg;
+
+ arg.cmd = FN_LOCK_SET;
+ arg.args[2] = on + 1; // 0 undefined, 1 off, 2 on.
+
+ return huawei_wmi_cmd(arg.cmd, NULL, 0);
+}
+
+static ssize_t fn_lock_state_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int err, on;
+
+ err = huawei_wmi_fn_lock_get(&on);
+ if (err)
+ return err;
+
+ return sprintf(buf, "%d\n", on);
+}
+
+static ssize_t fn_lock_state_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int on, err;
+
+ if (kstrtoint(buf, 10, &on) ||
+ on < 0 || on > 1)
+ return -EINVAL;
+
+ err = huawei_wmi_fn_lock_set(on);
+ if (err)
+ return err;
+
+ return size;
+}
+
+static DEVICE_ATTR_RW(fn_lock_state);
+
+static void huawei_wmi_fn_lock_setup(struct device *dev)
+{
+ struct huawei_wmi *huawei = dev_get_drvdata(dev);
+
+ huawei->fn_lock_available = true;
+ if (huawei_wmi_fn_lock_get(NULL)) {
+ huawei->fn_lock_available = false;
+ return;
+ }
+
+ device_create_file(dev, &dev_attr_fn_lock_state);
+}
+
+static void huawei_wmi_fn_lock_exit(struct device *dev)
+{
+ struct huawei_wmi *huawei = dev_get_drvdata(dev);
+
+ if (huawei->fn_lock_available)
+ device_remove_file(dev, &dev_attr_fn_lock_state);
+}
+
+/* debugfs */
+
+static void huawei_wmi_debugfs_call_dump(struct seq_file *m, void *data,
+ union acpi_object *obj)
+{
+ struct huawei_wmi *huawei = m->private;
+ int i;
+
+ switch (obj->type) {
+ case ACPI_TYPE_INTEGER:
+ seq_printf(m, "0x%llx", obj->integer.value);
+ break;
+ case ACPI_TYPE_STRING:
+ seq_printf(m, "\"%.*s\"", obj->string.length, obj->string.pointer);
+ break;
+ case ACPI_TYPE_BUFFER:
+ seq_puts(m, "{");
+ for (i = 0; i < obj->buffer.length; i++) {
+ seq_printf(m, "0x%02x", obj->buffer.pointer[i]);
+ if (i < obj->buffer.length - 1)
+ seq_puts(m, ",");
+ }
+ seq_puts(m, "}");
+ break;
+ case ACPI_TYPE_PACKAGE:
+ seq_puts(m, "[");
+ for (i = 0; i < obj->package.count; i++) {
+ huawei_wmi_debugfs_call_dump(m, huawei, &obj->package.elements[i]);
+ if (i < obj->package.count - 1)
+ seq_puts(m, ",");
+ }
+ seq_puts(m, "]");
+ break;
+ default:
+ dev_err(huawei->dev, "Unexpected obj type, got %d\n", obj->type);
+ return;
+ }
+}
+
+static int huawei_wmi_debugfs_call_show(struct seq_file *m, void *data)
+{
+ struct huawei_wmi *huawei = m->private;
+ struct acpi_buffer out = { ACPI_ALLOCATE_BUFFER, NULL };
+ struct acpi_buffer in;
+ union acpi_object *obj;
+ int err;
+
+ in.length = sizeof(u64);
+ in.pointer = &huawei->debug.arg;
+
+ err = huawei_wmi_call(huawei, &in, &out);
+ if (err)
+ return err;
+
+ obj = out.pointer;
+ if (!obj) {
+ err = -EIO;
+ goto fail_debugfs_call;
+ }
+
+ huawei_wmi_debugfs_call_dump(m, huawei, obj);
+
+fail_debugfs_call:
+ kfree(out.pointer);
+ return err;
+}
+
+DEFINE_SHOW_ATTRIBUTE(huawei_wmi_debugfs_call);
+
+static void huawei_wmi_debugfs_setup(struct device *dev)
+{
+ struct huawei_wmi *huawei = dev_get_drvdata(dev);
+
+ huawei->debug.root = debugfs_create_dir("huawei-wmi", NULL);
+
+ debugfs_create_x64("arg", 0644, huawei->debug.root,
+ &huawei->debug.arg);
+ debugfs_create_file("call", 0400,
+ huawei->debug.root, huawei, &huawei_wmi_debugfs_call_fops);
+}
+
+static void huawei_wmi_debugfs_exit(struct device *dev)
+{
+ struct huawei_wmi *huawei = dev_get_drvdata(dev);
+
+ debugfs_remove_recursive(huawei->debug.root);
+}
+
+/* Input */
+
+static void huawei_wmi_process_key(struct input_dev *idev, int code)
{
- struct huawei_wmi_priv *priv = dev_get_drvdata(&wdev->dev);
const struct key_entry *key;
/*
@@ -127,81 +713,187 @@ static void huawei_wmi_process_key(struct wmi_device *wdev, int code)
kfree(response.pointer);
}
- key = sparse_keymap_entry_from_scancode(priv->idev, code);
+ key = sparse_keymap_entry_from_scancode(idev, code);
if (!key) {
- dev_info(&wdev->dev, "Unknown key pressed, code: 0x%04x\n", code);
+ dev_info(&idev->dev, "Unknown key pressed, code: 0x%04x\n", code);
return;
}
- sparse_keymap_report_entry(priv->idev, key, 1, true);
+ if (quirks && !quirks->report_brightness &&
+ (key->sw.code == KEY_BRIGHTNESSDOWN ||
+ key->sw.code == KEY_BRIGHTNESSUP))
+ return;
+
+ sparse_keymap_report_entry(idev, key, 1, true);
}
-static void huawei_wmi_notify(struct wmi_device *wdev,
- union acpi_object *obj)
+static void huawei_wmi_input_notify(u32 value, void *context)
{
- if (obj->type == ACPI_TYPE_INTEGER)
- huawei_wmi_process_key(wdev, obj->integer.value);
+ struct input_dev *idev = (struct input_dev *)context;
+ struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *obj;
+ acpi_status status;
+
+ status = wmi_get_event_data(value, &response);
+ if (ACPI_FAILURE(status)) {
+ dev_err(&idev->dev, "Unable to get event data\n");
+ return;
+ }
+
+ obj = (union acpi_object *)response.pointer;
+ if (obj && obj->type == ACPI_TYPE_INTEGER)
+ huawei_wmi_process_key(idev, obj->integer.value);
else
- dev_info(&wdev->dev, "Bad response type %d\n", obj->type);
+ dev_err(&idev->dev, "Bad response type\n");
+
+ kfree(response.pointer);
}
-static int huawei_wmi_input_setup(struct wmi_device *wdev)
+static int huawei_wmi_input_setup(struct device *dev,
+ const char *guid,
+ struct input_dev **idev)
{
- struct huawei_wmi_priv *priv = dev_get_drvdata(&wdev->dev);
- int err;
-
- priv->idev = devm_input_allocate_device(&wdev->dev);
- if (!priv->idev)
+ *idev = devm_input_allocate_device(dev);
+ if (!*idev)
return -ENOMEM;
- priv->idev->name = "Huawei WMI hotkeys";
- priv->idev->phys = "wmi/input0";
- priv->idev->id.bustype = BUS_HOST;
- priv->idev->dev.parent = &wdev->dev;
+ (*idev)->name = "Huawei WMI hotkeys";
+ (*idev)->phys = "wmi/input0";
+ (*idev)->id.bustype = BUS_HOST;
+ (*idev)->dev.parent = dev;
- err = sparse_keymap_setup(priv->idev, huawei_wmi_keymap, NULL);
- if (err)
- return err;
+ return sparse_keymap_setup(*idev, huawei_wmi_keymap, NULL) ||
+ input_register_device(*idev) ||
+ wmi_install_notify_handler(guid, huawei_wmi_input_notify,
+ *idev);
+}
- return input_register_device(priv->idev);
+static void huawei_wmi_input_exit(struct device *dev, const char *guid)
+{
+ wmi_remove_notify_handler(guid);
}
-static int huawei_wmi_probe(struct wmi_device *wdev, const void *context)
+/* Huawei driver */
+
+static const struct wmi_device_id huawei_wmi_events_id_table[] = {
+ { .guid_string = WMI0_EVENT_GUID },
+ { .guid_string = HWMI_EVENT_GUID },
+ { }
+};
+
+static int huawei_wmi_probe(struct platform_device *pdev)
{
- struct huawei_wmi_priv *priv;
+ const struct wmi_device_id *guid = huawei_wmi_events_id_table;
int err;
- priv = devm_kzalloc(&wdev->dev, sizeof(struct huawei_wmi_priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
+ platform_set_drvdata(pdev, huawei_wmi);
+ huawei_wmi->dev = &pdev->dev;
- dev_set_drvdata(&wdev->dev, priv);
+ while (*guid->guid_string) {
+ struct input_dev *idev = *huawei_wmi->idev;
- err = huawei_wmi_input_setup(wdev);
- if (err)
- return err;
+ if (wmi_has_guid(guid->guid_string)) {
+ err = huawei_wmi_input_setup(&pdev->dev, guid->guid_string, &idev);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to setup input on %s\n", guid->guid_string);
+ return err;
+ }
+ }
+
+ idev++;
+ guid++;
+ }
+
+ if (wmi_has_guid(HWMI_METHOD_GUID)) {
+ mutex_init(&huawei_wmi->wmi_lock);
- return huawei_wmi_leds_setup(wdev);
+ huawei_wmi_leds_setup(&pdev->dev);
+ huawei_wmi_fn_lock_setup(&pdev->dev);
+ huawei_wmi_battery_setup(&pdev->dev);
+ huawei_wmi_debugfs_setup(&pdev->dev);
+ }
+
+ return 0;
}
-static const struct wmi_device_id huawei_wmi_id_table[] = {
- { .guid_string = WMI0_EVENT_GUID },
- { .guid_string = AMW0_EVENT_GUID },
- { }
-};
+static int huawei_wmi_remove(struct platform_device *pdev)
+{
+ const struct wmi_device_id *guid = huawei_wmi_events_id_table;
-static struct wmi_driver huawei_wmi_driver = {
+ while (*guid->guid_string) {
+ if (wmi_has_guid(guid->guid_string))
+ huawei_wmi_input_exit(&pdev->dev, guid->guid_string);
+
+ guid++;
+ }
+
+ if (wmi_has_guid(HWMI_METHOD_GUID)) {
+ huawei_wmi_debugfs_exit(&pdev->dev);
+ huawei_wmi_battery_exit(&pdev->dev);
+ huawei_wmi_fn_lock_exit(&pdev->dev);
+ }
+
+ return 0;
+}
+
+static struct platform_driver huawei_wmi_driver = {
.driver = {
.name = "huawei-wmi",
},
- .id_table = huawei_wmi_id_table,
.probe = huawei_wmi_probe,
- .notify = huawei_wmi_notify,
+ .remove = huawei_wmi_remove,
};
-module_wmi_driver(huawei_wmi_driver);
+static __init int huawei_wmi_init(void)
+{
+ struct platform_device *pdev;
+ int err;
+
+ huawei_wmi = kzalloc(sizeof(struct huawei_wmi), GFP_KERNEL);
+ if (!huawei_wmi)
+ return -ENOMEM;
+
+ quirks = &quirk_unknown;
+ dmi_check_system(huawei_quirks);
+ if (battery_reset != -1)
+ quirks->battery_reset = battery_reset;
+ if (report_brightness != -1)
+ quirks->report_brightness = report_brightness;
+
+ err = platform_driver_register(&huawei_wmi_driver);
+ if (err)
+ goto pdrv_err;
+
+ pdev = platform_device_register_simple("huawei-wmi", -1, NULL, 0);
+ if (IS_ERR(pdev)) {
+ err = PTR_ERR(pdev);
+ goto pdev_err;
+ }
+
+ return 0;
+
+pdev_err:
+ platform_driver_unregister(&huawei_wmi_driver);
+pdrv_err:
+ kfree(huawei_wmi);
+ return err;
+}
+
+static __exit void huawei_wmi_exit(void)
+{
+ struct platform_device *pdev = to_platform_device(huawei_wmi->dev);
+
+ platform_device_unregister(pdev);
+ platform_driver_unregister(&huawei_wmi_driver);
+
+ kfree(huawei_wmi);
+}
+
+module_init(huawei_wmi_init);
+module_exit(huawei_wmi_exit);
-MODULE_DEVICE_TABLE(wmi, huawei_wmi_id_table);
+MODULE_ALIAS("wmi:"HWMI_METHOD_GUID);
+MODULE_DEVICE_TABLE(wmi, huawei_wmi_events_id_table);
MODULE_AUTHOR("Ayman Bagabas <ayman.bagabas@gmail.com>");
-MODULE_DESCRIPTION("Huawei WMI hotkeys");
+MODULE_DESCRIPTION("Huawei WMI laptop extras driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/x86/intel_cht_int33fe_common.c b/drivers/platform/x86/intel_cht_int33fe_common.c
new file mode 100644
index 000000000000..42dd11623f56
--- /dev/null
+++ b/drivers/platform/x86/intel_cht_int33fe_common.c
@@ -0,0 +1,147 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Common code for Intel Cherry Trail ACPI INT33FE pseudo device drivers
+ * (USB Micro-B and Type-C connector variants).
+ *
+ * Copyright (c) 2019 Yauhen Kharuzhy <jekhor@gmail.com>
+ */
+
+#include <linux/acpi.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "intel_cht_int33fe_common.h"
+
+#define EXPECTED_PTYPE 4
+
+static int cht_int33fe_i2c_res_filter(struct acpi_resource *ares, void *data)
+{
+ struct acpi_resource_i2c_serialbus *sb;
+ int *count = data;
+
+ if (i2c_acpi_get_i2c_resource(ares, &sb))
+ (*count)++;
+
+ return 1;
+}
+
+static int cht_int33fe_count_i2c_clients(struct device *dev)
+{
+ struct acpi_device *adev;
+ LIST_HEAD(resource_list);
+ int count = 0;
+
+ adev = ACPI_COMPANION(dev);
+ if (!adev)
+ return -EINVAL;
+
+ acpi_dev_get_resources(adev, &resource_list,
+ cht_int33fe_i2c_res_filter, &count);
+
+ acpi_dev_free_resource_list(&resource_list);
+
+ return count;
+}
+
+static int cht_int33fe_check_hw_type(struct device *dev)
+{
+ unsigned long long ptyp;
+ acpi_status status;
+ int ret;
+
+ status = acpi_evaluate_integer(ACPI_HANDLE(dev), "PTYP", NULL, &ptyp);
+ if (ACPI_FAILURE(status)) {
+ dev_err(dev, "Error getting PTYPE\n");
+ return -ENODEV;
+ }
+
+ /*
+ * The same ACPI HID is used for different configurations check PTYP
+ * to ensure that we are dealing with the expected config.
+ */
+ if (ptyp != EXPECTED_PTYPE)
+ return -ENODEV;
+
+ /* Check presence of INT34D3 (hardware-rev 3) expected for ptype == 4 */
+ if (!acpi_dev_present("INT34D3", "1", 3)) {
+ dev_err(dev, "Error PTYPE == %d, but no INT34D3 device\n",
+ EXPECTED_PTYPE);
+ return -ENODEV;
+ }
+
+ ret = cht_int33fe_count_i2c_clients(dev);
+ if (ret < 0)
+ return ret;
+
+ switch (ret) {
+ case 2:
+ return INT33FE_HW_MICROB;
+ case 4:
+ return INT33FE_HW_TYPEC;
+ default:
+ return -ENODEV;
+ }
+}
+
+static int cht_int33fe_probe(struct platform_device *pdev)
+{
+ struct cht_int33fe_data *data;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ ret = cht_int33fe_check_hw_type(dev);
+ if (ret < 0)
+ return ret;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->dev = dev;
+
+ switch (ret) {
+ case INT33FE_HW_MICROB:
+ data->probe = cht_int33fe_microb_probe;
+ data->remove = cht_int33fe_microb_remove;
+ break;
+
+ case INT33FE_HW_TYPEC:
+ data->probe = cht_int33fe_typec_probe;
+ data->remove = cht_int33fe_typec_remove;
+ break;
+ }
+
+ platform_set_drvdata(pdev, data);
+
+ return data->probe(data);
+}
+
+static int cht_int33fe_remove(struct platform_device *pdev)
+{
+ struct cht_int33fe_data *data = platform_get_drvdata(pdev);
+
+ return data->remove(data);
+}
+
+static const struct acpi_device_id cht_int33fe_acpi_ids[] = {
+ { "INT33FE", },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, cht_int33fe_acpi_ids);
+
+static struct platform_driver cht_int33fe_driver = {
+ .driver = {
+ .name = "Intel Cherry Trail ACPI INT33FE driver",
+ .acpi_match_table = ACPI_PTR(cht_int33fe_acpi_ids),
+ },
+ .probe = cht_int33fe_probe,
+ .remove = cht_int33fe_remove,
+};
+
+module_platform_driver(cht_int33fe_driver);
+
+MODULE_DESCRIPTION("Intel Cherry Trail ACPI INT33FE pseudo device driver");
+MODULE_AUTHOR("Yauhen Kharuzhy <jekhor@gmail.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/x86/intel_cht_int33fe_common.h b/drivers/platform/x86/intel_cht_int33fe_common.h
new file mode 100644
index 000000000000..03cd45f4e8cb
--- /dev/null
+++ b/drivers/platform/x86/intel_cht_int33fe_common.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Common code for Intel Cherry Trail ACPI INT33FE pseudo device drivers
+ * (USB Micro-B and Type-C connector variants), header file
+ *
+ * Copyright (c) 2019 Yauhen Kharuzhy <jekhor@gmail.com>
+ */
+
+#ifndef _INTEL_CHT_INT33FE_COMMON_H
+#define _INTEL_CHT_INT33FE_COMMON_H
+
+#include <linux/device.h>
+#include <linux/fwnode.h>
+#include <linux/i2c.h>
+
+enum int33fe_hw_type {
+ INT33FE_HW_MICROB,
+ INT33FE_HW_TYPEC,
+};
+
+struct cht_int33fe_data {
+ struct device *dev;
+
+ int (*probe)(struct cht_int33fe_data *data);
+ int (*remove)(struct cht_int33fe_data *data);
+
+ struct i2c_client *battery_fg;
+
+ /* Type-C only */
+ struct i2c_client *fusb302;
+ struct i2c_client *pi3usb30532;
+
+ struct fwnode_handle *dp;
+};
+
+int cht_int33fe_microb_probe(struct cht_int33fe_data *data);
+int cht_int33fe_microb_remove(struct cht_int33fe_data *data);
+int cht_int33fe_typec_probe(struct cht_int33fe_data *data);
+int cht_int33fe_typec_remove(struct cht_int33fe_data *data);
+
+#endif /* _INTEL_CHT_INT33FE_COMMON_H */
diff --git a/drivers/platform/x86/intel_cht_int33fe_microb.c b/drivers/platform/x86/intel_cht_int33fe_microb.c
new file mode 100644
index 000000000000..20b11e0d9a75
--- /dev/null
+++ b/drivers/platform/x86/intel_cht_int33fe_microb.c
@@ -0,0 +1,57 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel Cherry Trail ACPI INT33FE pseudo device driver for devices with
+ * USB Micro-B connector (e.g. without of FUSB302 USB Type-C controller)
+ *
+ * Copyright (C) 2019 Yauhen Kharuzhy <jekhor@gmail.com>
+ *
+ * At least one Intel Cherry Trail based device which ship with Windows 10
+ * (Lenovo YogaBook YB1-X91L/F tablet), have this weird INT33FE ACPI device
+ * with a CRS table with 2 I2cSerialBusV2 resources, for 2 different chips
+ * attached to various i2c busses:
+ * 1. The Whiskey Cove PMIC, which is also described by the INT34D3 ACPI device
+ * 2. TI BQ27542 Fuel Gauge Controller
+ *
+ * So this driver is a stub / pseudo driver whose only purpose is to
+ * instantiate i2c-client for battery fuel gauge, so that standard i2c driver
+ * for these chip can bind to the it.
+ */
+
+#include <linux/acpi.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include <linux/usb/pd.h>
+
+#include "intel_cht_int33fe_common.h"
+
+static const char * const bq27xxx_suppliers[] = { "bq25890-charger" };
+
+static const struct property_entry bq27xxx_props[] = {
+ PROPERTY_ENTRY_STRING_ARRAY("supplied-from", bq27xxx_suppliers),
+ { }
+};
+
+int cht_int33fe_microb_probe(struct cht_int33fe_data *data)
+{
+ struct device *dev = data->dev;
+ struct i2c_board_info board_info;
+
+ memset(&board_info, 0, sizeof(board_info));
+ strscpy(board_info.type, "bq27542", ARRAY_SIZE(board_info.type));
+ board_info.dev_name = "bq27542";
+ board_info.properties = bq27xxx_props;
+ data->battery_fg = i2c_acpi_new_device(dev, 1, &board_info);
+
+ return PTR_ERR_OR_ZERO(data->battery_fg);
+}
+
+int cht_int33fe_microb_remove(struct cht_int33fe_data *data)
+{
+ i2c_unregister_device(data->battery_fg);
+
+ return 0;
+}
diff --git a/drivers/platform/x86/intel_cht_int33fe.c b/drivers/platform/x86/intel_cht_int33fe_typec.c
index 1d5d877b9582..2d097fc2dd46 100644
--- a/drivers/platform/x86/intel_cht_int33fe.c
+++ b/drivers/platform/x86/intel_cht_int33fe_typec.c
@@ -17,17 +17,15 @@
* for these chips can bind to the them.
*/
-#include <linux/acpi.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
-#include <linux/module.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
#include <linux/usb/pd.h>
-#define EXPECTED_PTYPE 4
+#include "intel_cht_int33fe_common.h"
enum {
INT33FE_NODE_FUSB302,
@@ -38,14 +36,6 @@ enum {
INT33FE_NODE_MAX,
};
-struct cht_int33fe_data {
- struct i2c_client *max17047;
- struct i2c_client *fusb302;
- struct i2c_client *pi3usb30532;
-
- struct fwnode_handle *dp;
-};
-
static const struct software_node nodes[];
static const struct software_node_ref_args pi3usb30532_ref = {
@@ -251,43 +241,20 @@ cht_int33fe_register_max17047(struct device *dev, struct cht_int33fe_data *data)
strlcpy(board_info.type, "max17047", I2C_NAME_SIZE);
board_info.dev_name = "max17047";
board_info.fwnode = fwnode;
- data->max17047 = i2c_acpi_new_device(dev, 1, &board_info);
+ data->battery_fg = i2c_acpi_new_device(dev, 1, &board_info);
- return PTR_ERR_OR_ZERO(data->max17047);
+ return PTR_ERR_OR_ZERO(data->battery_fg);
}
-static int cht_int33fe_probe(struct platform_device *pdev)
+int cht_int33fe_typec_probe(struct cht_int33fe_data *data)
{
- struct device *dev = &pdev->dev;
+ struct device *dev = data->dev;
struct i2c_board_info board_info;
- struct cht_int33fe_data *data;
struct fwnode_handle *fwnode;
struct regulator *regulator;
- unsigned long long ptyp;
- acpi_status status;
int fusb302_irq;
int ret;
- status = acpi_evaluate_integer(ACPI_HANDLE(dev), "PTYP", NULL, &ptyp);
- if (ACPI_FAILURE(status)) {
- dev_err(dev, "Error getting PTYPE\n");
- return -ENODEV;
- }
-
- /*
- * The same ACPI HID is used for different configurations check PTYP
- * to ensure that we are dealing with the expected config.
- */
- if (ptyp != EXPECTED_PTYPE)
- return -ENODEV;
-
- /* Check presence of INT34D3 (hardware-rev 3) expected for ptype == 4 */
- if (!acpi_dev_present("INT34D3", "1", 3)) {
- dev_err(dev, "Error PTYPE == %d, but no INT34D3 device\n",
- EXPECTED_PTYPE);
- return -ENODEV;
- }
-
/*
* We expect the WC PMIC to be paired with a TI bq24292i charger-IC.
* We check for the bq24292i vbus regulator here, this has 2 purposes:
@@ -317,10 +284,6 @@ static int cht_int33fe_probe(struct platform_device *pdev)
return fusb302_irq;
}
- data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
ret = cht_int33fe_add_nodes(data);
if (ret)
return ret;
@@ -365,15 +328,13 @@ static int cht_int33fe_probe(struct platform_device *pdev)
goto out_unregister_fusb302;
}
- platform_set_drvdata(pdev, data);
-
return 0;
out_unregister_fusb302:
i2c_unregister_device(data->fusb302);
out_unregister_max17047:
- i2c_unregister_device(data->max17047);
+ i2c_unregister_device(data->battery_fg);
out_remove_nodes:
cht_int33fe_remove_nodes(data);
@@ -381,36 +342,13 @@ out_remove_nodes:
return ret;
}
-static int cht_int33fe_remove(struct platform_device *pdev)
+int cht_int33fe_typec_remove(struct cht_int33fe_data *data)
{
- struct cht_int33fe_data *data = platform_get_drvdata(pdev);
-
i2c_unregister_device(data->pi3usb30532);
i2c_unregister_device(data->fusb302);
- i2c_unregister_device(data->max17047);
+ i2c_unregister_device(data->battery_fg);
cht_int33fe_remove_nodes(data);
return 0;
}
-
-static const struct acpi_device_id cht_int33fe_acpi_ids[] = {
- { "INT33FE", },
- { }
-};
-MODULE_DEVICE_TABLE(acpi, cht_int33fe_acpi_ids);
-
-static struct platform_driver cht_int33fe_driver = {
- .driver = {
- .name = "Intel Cherry Trail ACPI INT33FE driver",
- .acpi_match_table = ACPI_PTR(cht_int33fe_acpi_ids),
- },
- .probe = cht_int33fe_probe,
- .remove = cht_int33fe_remove,
-};
-
-module_platform_driver(cht_int33fe_driver);
-
-MODULE_DESCRIPTION("Intel Cherry Trail ACPI INT33FE pseudo device driver");
-MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/x86/intel_int0002_vgpio.c b/drivers/platform/x86/intel_int0002_vgpio.c
index af233b7b77f2..f14e2c5f9da5 100644
--- a/drivers/platform/x86/intel_int0002_vgpio.c
+++ b/drivers/platform/x86/intel_int0002_vgpio.c
@@ -164,8 +164,8 @@ static int int0002_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
const struct x86_cpu_id *cpu_id;
- struct irq_chip *irq_chip;
struct gpio_chip *chip;
+ struct gpio_irq_chip *girq;
int irq, ret;
/* Menlow has a different INT0002 device? <sigh> */
@@ -192,15 +192,11 @@ static int int0002_probe(struct platform_device *pdev)
chip->ngpio = GPE0A_PME_B0_VIRT_GPIO_PIN + 1;
chip->irq.init_valid_mask = int0002_init_irq_valid_mask;
- ret = devm_gpiochip_add_data(&pdev->dev, chip, NULL);
- if (ret) {
- dev_err(dev, "Error adding gpio chip: %d\n", ret);
- return ret;
- }
-
/*
- * We manually request the irq here instead of passing a flow-handler
+ * We directly request the irq here instead of passing a flow-handler
* to gpiochip_set_chained_irqchip, because the irq is shared.
+ * FIXME: augment this if we managed to pull handling of shared
+ * IRQs into gpiolib.
*/
ret = devm_request_irq(dev, irq, int0002_irq,
IRQF_SHARED, "INT0002", chip);
@@ -209,17 +205,21 @@ static int int0002_probe(struct platform_device *pdev)
return ret;
}
- irq_chip = (struct irq_chip *)cpu_id->driver_data;
+ girq = &chip->irq;
+ girq->chip = (struct irq_chip *)cpu_id->driver_data;
+ /* This let us handle the parent IRQ in the driver */
+ girq->parent_handler = NULL;
+ girq->num_parents = 0;
+ girq->parents = NULL;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_edge_irq;
- ret = gpiochip_irqchip_add(chip, irq_chip, 0, handle_edge_irq,
- IRQ_TYPE_NONE);
+ ret = devm_gpiochip_add_data(dev, chip, NULL);
if (ret) {
- dev_err(dev, "Error adding irqchip: %d\n", ret);
+ dev_err(dev, "Error adding gpio chip: %d\n", ret);
return ret;
}
- gpiochip_set_chained_irqchip(chip, irq_chip, irq, NULL);
-
device_init_wakeup(dev, true);
return 0;
}
diff --git a/drivers/platform/x86/intel_pmc_core.c b/drivers/platform/x86/intel_pmc_core.c
index 94a008efb09b..571b4754477c 100644
--- a/drivers/platform/x86/intel_pmc_core.c
+++ b/drivers/platform/x86/intel_pmc_core.c
@@ -158,8 +158,9 @@ static const struct pmc_reg_map spt_reg_map = {
.pm_vric1_offset = SPT_PMC_VRIC1_OFFSET,
};
-/* Cannonlake: PGD PFET Enable Ack Status Register(s) bitmap */
+/* Cannon Lake: PGD PFET Enable Ack Status Register(s) bitmap */
static const struct pmc_bit_map cnp_pfear_map[] = {
+ /* Reserved for Cannon Lake but valid for Comet Lake */
{"PMC", BIT(0)},
{"OPI-DMI", BIT(1)},
{"SPI/eSPI", BIT(2)},
@@ -185,7 +186,7 @@ static const struct pmc_bit_map cnp_pfear_map[] = {
{"SDX", BIT(4)},
{"SPE", BIT(5)},
{"Fuse", BIT(6)},
- /* Reserved for Cannonlake but valid for Icelake */
+ /* Reserved for Cannon Lake but valid for Ice Lake and Comet Lake */
{"SBR8", BIT(7)},
{"CSME_FSC", BIT(0)},
@@ -229,12 +230,12 @@ static const struct pmc_bit_map cnp_pfear_map[] = {
{"HDA_PGD4", BIT(2)},
{"HDA_PGD5", BIT(3)},
{"HDA_PGD6", BIT(4)},
- /* Reserved for Cannonlake but valid for Icelake */
+ /* Reserved for Cannon Lake but valid for Ice Lake and Comet Lake */
{"PSF6", BIT(5)},
{"PSF7", BIT(6)},
{"PSF8", BIT(7)},
- /* Icelake generation onwards only */
+ /* Ice Lake generation onwards only */
{"RES_65", BIT(0)},
{"RES_66", BIT(1)},
{"RES_67", BIT(2)},
@@ -324,7 +325,7 @@ static const struct pmc_bit_map cnp_ltr_show_map[] = {
{"ISH", CNP_PMC_LTR_ISH},
{"UFSX2", CNP_PMC_LTR_UFSX2},
{"EMMC", CNP_PMC_LTR_EMMC},
- /* Reserved for Cannonlake but valid for Icelake */
+ /* Reserved for Cannon Lake but valid for Ice Lake */
{"WIGIG", ICL_PMC_LTR_WIGIG},
/* Below two cannot be used for LTR_IGNORE */
{"CURRENT_PLATFORM", CNP_PMC_LTR_CUR_PLT},
@@ -813,6 +814,8 @@ static const struct x86_cpu_id intel_pmc_core_ids[] = {
INTEL_CPU_FAM6(CANNONLAKE_L, cnp_reg_map),
INTEL_CPU_FAM6(ICELAKE_L, icl_reg_map),
INTEL_CPU_FAM6(ICELAKE_NNPI, icl_reg_map),
+ INTEL_CPU_FAM6(COMETLAKE, cnp_reg_map),
+ INTEL_CPU_FAM6(COMETLAKE_L, cnp_reg_map),
{}
};
@@ -871,8 +874,8 @@ static int pmc_core_probe(struct platform_device *pdev)
pmcdev->map = (struct pmc_reg_map *)cpu_id->driver_data;
/*
- * Coffeelake has CPU ID of Kabylake and Cannonlake PCH. So here
- * Sunrisepoint PCH regmap can't be used. Use Cannonlake PCH regmap
+ * Coffee Lake has CPU ID of Kaby Lake and Cannon Lake PCH. So here
+ * Sunrisepoint PCH regmap can't be used. Use Cannon Lake PCH regmap
* in this case.
*/
if (pmcdev->map == &spt_reg_map && !pci_dev_present(pmc_pci_ids))
diff --git a/drivers/platform/x86/intel_punit_ipc.c b/drivers/platform/x86/intel_punit_ipc.c
index fa97834fdb78..05cced59e251 100644
--- a/drivers/platform/x86/intel_punit_ipc.c
+++ b/drivers/platform/x86/intel_punit_ipc.c
@@ -224,7 +224,6 @@ static irqreturn_t intel_punit_ioc(int irq, void *dev_id)
static int intel_punit_get_bars(struct platform_device *pdev)
{
- struct resource *res;
void __iomem *addr;
/*
@@ -232,14 +231,12 @@ static int intel_punit_get_bars(struct platform_device *pdev)
* - BIOS_IPC BASE_DATA
* - BIOS_IPC BASE_IFACE
*/
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- addr = devm_ioremap_resource(&pdev->dev, res);
+ addr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(addr))
return PTR_ERR(addr);
punit_ipcdev->base[BIOS_IPC][BASE_DATA] = addr;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- addr = devm_ioremap_resource(&pdev->dev, res);
+ addr = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(addr))
return PTR_ERR(addr);
punit_ipcdev->base[BIOS_IPC][BASE_IFACE] = addr;
@@ -251,33 +248,21 @@ static int intel_punit_get_bars(struct platform_device *pdev)
* - GTDRIVER_IPC BASE_DATA
* - GTDRIVER_IPC BASE_IFACE
*/
- res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
- if (res) {
- addr = devm_ioremap_resource(&pdev->dev, res);
- if (!IS_ERR(addr))
- punit_ipcdev->base[ISPDRIVER_IPC][BASE_DATA] = addr;
- }
+ addr = devm_platform_ioremap_resource(pdev, 2);
+ if (!IS_ERR(addr))
+ punit_ipcdev->base[ISPDRIVER_IPC][BASE_DATA] = addr;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 3);
- if (res) {
- addr = devm_ioremap_resource(&pdev->dev, res);
- if (!IS_ERR(addr))
- punit_ipcdev->base[ISPDRIVER_IPC][BASE_IFACE] = addr;
- }
+ addr = devm_platform_ioremap_resource(pdev, 3);
+ if (!IS_ERR(addr))
+ punit_ipcdev->base[ISPDRIVER_IPC][BASE_IFACE] = addr;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 4);
- if (res) {
- addr = devm_ioremap_resource(&pdev->dev, res);
- if (!IS_ERR(addr))
- punit_ipcdev->base[GTDRIVER_IPC][BASE_DATA] = addr;
- }
+ addr = devm_platform_ioremap_resource(pdev, 4);
+ if (!IS_ERR(addr))
+ punit_ipcdev->base[GTDRIVER_IPC][BASE_DATA] = addr;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 5);
- if (res) {
- addr = devm_ioremap_resource(&pdev->dev, res);
- if (!IS_ERR(addr))
- punit_ipcdev->base[GTDRIVER_IPC][BASE_IFACE] = addr;
- }
+ addr = devm_platform_ioremap_resource(pdev, 5);
+ if (!IS_ERR(addr))
+ punit_ipcdev->base[GTDRIVER_IPC][BASE_IFACE] = addr;
return 0;
}
@@ -309,14 +294,13 @@ static int intel_punit_ipc_probe(struct platform_device *pdev)
ret = intel_punit_get_bars(pdev);
if (ret)
- goto out;
+ return ret;
punit_ipcdev->dev = &pdev->dev;
mutex_init(&punit_ipcdev->lock);
init_completion(&punit_ipcdev->cmd_complete);
-out:
- return ret;
+ return 0;
}
static int intel_punit_ipc_remove(struct platform_device *pdev)
diff --git a/drivers/platform/x86/peaq-wmi.c b/drivers/platform/x86/peaq-wmi.c
index fdeb3624c529..cf9c44c20a82 100644
--- a/drivers/platform/x86/peaq-wmi.c
+++ b/drivers/platform/x86/peaq-wmi.c
@@ -6,7 +6,7 @@
#include <linux/acpi.h>
#include <linux/dmi.h>
-#include <linux/input-polldev.h>
+#include <linux/input.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -18,8 +18,7 @@
MODULE_ALIAS("wmi:"PEAQ_DOLBY_BUTTON_GUID);
-static unsigned int peaq_ignore_events_counter;
-static struct input_polled_dev *peaq_poll_dev;
+static struct input_dev *peaq_poll_dev;
/*
* The Dolby button (yes really a Dolby button) causes an ACPI variable to get
@@ -28,8 +27,10 @@ static struct input_polled_dev *peaq_poll_dev;
* (if polling after the release) or twice (polling between press and release).
* We ignore events for 0.5s after the first event to avoid reporting 2 presses.
*/
-static void peaq_wmi_poll(struct input_polled_dev *dev)
+static void peaq_wmi_poll(struct input_dev *input_dev)
{
+ static unsigned long last_event_time;
+ static bool had_events;
union acpi_object obj;
acpi_status status;
u32 dummy = 0;
@@ -44,22 +45,25 @@ static void peaq_wmi_poll(struct input_polled_dev *dev)
return;
if (obj.type != ACPI_TYPE_INTEGER) {
- dev_err(&peaq_poll_dev->input->dev,
+ dev_err(&input_dev->dev,
"Error WMBC did not return an integer\n");
return;
}
- if (peaq_ignore_events_counter && peaq_ignore_events_counter--)
+ if (!obj.integer.value)
return;
- if (obj.integer.value) {
- input_event(peaq_poll_dev->input, EV_KEY, KEY_SOUND, 1);
- input_sync(peaq_poll_dev->input);
- input_event(peaq_poll_dev->input, EV_KEY, KEY_SOUND, 0);
- input_sync(peaq_poll_dev->input);
- peaq_ignore_events_counter = max(1u,
- PEAQ_POLL_IGNORE_MS / peaq_poll_dev->poll_interval);
- }
+ if (had_events && time_before(jiffies, last_event_time +
+ msecs_to_jiffies(PEAQ_POLL_IGNORE_MS)))
+ return;
+
+ input_event(input_dev, EV_KEY, KEY_SOUND, 1);
+ input_sync(input_dev);
+ input_event(input_dev, EV_KEY, KEY_SOUND, 0);
+ input_sync(input_dev);
+
+ last_event_time = jiffies;
+ had_events = true;
}
/* Some other devices (Shuttle XS35) use the same WMI GUID for other purposes */
@@ -75,6 +79,8 @@ static const struct dmi_system_id peaq_dmi_table[] __initconst = {
static int __init peaq_wmi_init(void)
{
+ int err;
+
/* WMI GUID is not unique, also check for a DMI match */
if (!dmi_check_system(peaq_dmi_table))
return -ENODEV;
@@ -82,24 +88,36 @@ static int __init peaq_wmi_init(void)
if (!wmi_has_guid(PEAQ_DOLBY_BUTTON_GUID))
return -ENODEV;
- peaq_poll_dev = input_allocate_polled_device();
+ peaq_poll_dev = input_allocate_device();
if (!peaq_poll_dev)
return -ENOMEM;
- peaq_poll_dev->poll = peaq_wmi_poll;
- peaq_poll_dev->poll_interval = PEAQ_POLL_INTERVAL_MS;
- peaq_poll_dev->poll_interval_max = PEAQ_POLL_MAX_MS;
- peaq_poll_dev->input->name = "PEAQ WMI hotkeys";
- peaq_poll_dev->input->phys = "wmi/input0";
- peaq_poll_dev->input->id.bustype = BUS_HOST;
- input_set_capability(peaq_poll_dev->input, EV_KEY, KEY_SOUND);
+ peaq_poll_dev->name = "PEAQ WMI hotkeys";
+ peaq_poll_dev->phys = "wmi/input0";
+ peaq_poll_dev->id.bustype = BUS_HOST;
+ input_set_capability(peaq_poll_dev, EV_KEY, KEY_SOUND);
+
+ err = input_setup_polling(peaq_poll_dev, peaq_wmi_poll);
+ if (err)
+ goto err_out;
+
+ input_set_poll_interval(peaq_poll_dev, PEAQ_POLL_INTERVAL_MS);
+ input_set_max_poll_interval(peaq_poll_dev, PEAQ_POLL_MAX_MS);
+
+ err = input_register_device(peaq_poll_dev);
+ if (err)
+ goto err_out;
+
+ return 0;
- return input_register_polled_device(peaq_poll_dev);
+err_out:
+ input_free_device(peaq_poll_dev);
+ return err;
}
static void __exit peaq_wmi_exit(void)
{
- input_unregister_polled_device(peaq_poll_dev);
+ input_unregister_device(peaq_poll_dev);
}
module_init(peaq_wmi_init);
diff --git a/drivers/platform/x86/system76_acpi.c b/drivers/platform/x86/system76_acpi.c
new file mode 100644
index 000000000000..4f6e4c342382
--- /dev/null
+++ b/drivers/platform/x86/system76_acpi.c
@@ -0,0 +1,384 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * System76 ACPI Driver
+ *
+ * Copyright (C) 2019 System76
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/acpi.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/pci_ids.h>
+#include <linux/types.h>
+
+struct system76_data {
+ struct acpi_device *acpi_dev;
+ struct led_classdev ap_led;
+ struct led_classdev kb_led;
+ enum led_brightness kb_brightness;
+ enum led_brightness kb_toggle_brightness;
+ int kb_color;
+};
+
+static const struct acpi_device_id device_ids[] = {
+ {"17761776", 0},
+ {"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, device_ids);
+
+// Array of keyboard LED brightness levels
+static const enum led_brightness kb_levels[] = {
+ 48,
+ 72,
+ 96,
+ 144,
+ 192,
+ 255
+};
+
+// Array of keyboard LED colors in 24-bit RGB format
+static const int kb_colors[] = {
+ 0xFFFFFF,
+ 0x0000FF,
+ 0xFF0000,
+ 0xFF00FF,
+ 0x00FF00,
+ 0x00FFFF,
+ 0xFFFF00
+};
+
+// Get a System76 ACPI device value by name
+static int system76_get(struct system76_data *data, char *method)
+{
+ acpi_handle handle;
+ acpi_status status;
+ unsigned long long ret = 0;
+
+ handle = acpi_device_handle(data->acpi_dev);
+ status = acpi_evaluate_integer(handle, method, NULL, &ret);
+ if (ACPI_SUCCESS(status))
+ return (int)ret;
+ else
+ return -1;
+}
+
+// Set a System76 ACPI device value by name
+static int system76_set(struct system76_data *data, char *method, int value)
+{
+ union acpi_object obj;
+ struct acpi_object_list obj_list;
+ acpi_handle handle;
+ acpi_status status;
+
+ obj.type = ACPI_TYPE_INTEGER;
+ obj.integer.value = value;
+ obj_list.count = 1;
+ obj_list.pointer = &obj;
+ handle = acpi_device_handle(data->acpi_dev);
+ status = acpi_evaluate_object(handle, method, &obj_list, NULL);
+ if (ACPI_SUCCESS(status))
+ return 0;
+ else
+ return -1;
+}
+
+// Get the airplane mode LED brightness
+static enum led_brightness ap_led_get(struct led_classdev *led)
+{
+ struct system76_data *data;
+ int value;
+
+ data = container_of(led, struct system76_data, ap_led);
+ value = system76_get(data, "GAPL");
+ if (value > 0)
+ return (enum led_brightness)value;
+ else
+ return LED_OFF;
+}
+
+// Set the airplane mode LED brightness
+static void ap_led_set(struct led_classdev *led, enum led_brightness value)
+{
+ struct system76_data *data;
+
+ data = container_of(led, struct system76_data, ap_led);
+ system76_set(data, "SAPL", value == LED_OFF ? 0 : 1);
+}
+
+// Get the last set keyboard LED brightness
+static enum led_brightness kb_led_get(struct led_classdev *led)
+{
+ struct system76_data *data;
+
+ data = container_of(led, struct system76_data, kb_led);
+ return data->kb_brightness;
+}
+
+// Set the keyboard LED brightness
+static void kb_led_set(struct led_classdev *led, enum led_brightness value)
+{
+ struct system76_data *data;
+
+ data = container_of(led, struct system76_data, kb_led);
+ data->kb_brightness = value;
+ system76_set(data, "SKBL", (int)data->kb_brightness);
+}
+
+// Get the last set keyboard LED color
+static ssize_t kb_led_color_show(
+ struct device *dev,
+ struct device_attribute *dev_attr,
+ char *buf)
+{
+ struct led_classdev *led;
+ struct system76_data *data;
+
+ led = (struct led_classdev *)dev->driver_data;
+ data = container_of(led, struct system76_data, kb_led);
+ return sprintf(buf, "%06X\n", data->kb_color);
+}
+
+// Set the keyboard LED color
+static ssize_t kb_led_color_store(
+ struct device *dev,
+ struct device_attribute *dev_attr,
+ const char *buf,
+ size_t size)
+{
+ struct led_classdev *led;
+ struct system76_data *data;
+ unsigned int val;
+ int ret;
+
+ led = (struct led_classdev *)dev->driver_data;
+ data = container_of(led, struct system76_data, kb_led);
+ ret = kstrtouint(buf, 16, &val);
+ if (ret)
+ return ret;
+ if (val > 0xFFFFFF)
+ return -EINVAL;
+ data->kb_color = (int)val;
+ system76_set(data, "SKBC", data->kb_color);
+
+ return size;
+}
+
+static const struct device_attribute kb_led_color_dev_attr = {
+ .attr = {
+ .name = "color",
+ .mode = 0644,
+ },
+ .show = kb_led_color_show,
+ .store = kb_led_color_store,
+};
+
+// Notify that the keyboard LED was changed by hardware
+static void kb_led_notify(struct system76_data *data)
+{
+ led_classdev_notify_brightness_hw_changed(
+ &data->kb_led,
+ data->kb_brightness
+ );
+}
+
+// Read keyboard LED brightness as set by hardware
+static void kb_led_hotkey_hardware(struct system76_data *data)
+{
+ int value;
+
+ value = system76_get(data, "GKBL");
+ if (value < 0)
+ return;
+ data->kb_brightness = value;
+ kb_led_notify(data);
+}
+
+// Toggle the keyboard LED
+static void kb_led_hotkey_toggle(struct system76_data *data)
+{
+ if (data->kb_brightness > 0) {
+ data->kb_toggle_brightness = data->kb_brightness;
+ kb_led_set(&data->kb_led, 0);
+ } else {
+ kb_led_set(&data->kb_led, data->kb_toggle_brightness);
+ }
+ kb_led_notify(data);
+}
+
+// Decrease the keyboard LED brightness
+static void kb_led_hotkey_down(struct system76_data *data)
+{
+ int i;
+
+ if (data->kb_brightness > 0) {
+ for (i = ARRAY_SIZE(kb_levels); i > 0; i--) {
+ if (kb_levels[i - 1] < data->kb_brightness) {
+ kb_led_set(&data->kb_led, kb_levels[i - 1]);
+ break;
+ }
+ }
+ } else {
+ kb_led_set(&data->kb_led, data->kb_toggle_brightness);
+ }
+ kb_led_notify(data);
+}
+
+// Increase the keyboard LED brightness
+static void kb_led_hotkey_up(struct system76_data *data)
+{
+ int i;
+
+ if (data->kb_brightness > 0) {
+ for (i = 0; i < ARRAY_SIZE(kb_levels); i++) {
+ if (kb_levels[i] > data->kb_brightness) {
+ kb_led_set(&data->kb_led, kb_levels[i]);
+ break;
+ }
+ }
+ } else {
+ kb_led_set(&data->kb_led, data->kb_toggle_brightness);
+ }
+ kb_led_notify(data);
+}
+
+// Cycle the keyboard LED color
+static void kb_led_hotkey_color(struct system76_data *data)
+{
+ int i;
+
+ if (data->kb_color < 0)
+ return;
+ if (data->kb_brightness > 0) {
+ for (i = 0; i < ARRAY_SIZE(kb_colors); i++) {
+ if (kb_colors[i] == data->kb_color)
+ break;
+ }
+ i += 1;
+ if (i >= ARRAY_SIZE(kb_colors))
+ i = 0;
+ data->kb_color = kb_colors[i];
+ system76_set(data, "SKBC", data->kb_color);
+ } else {
+ kb_led_set(&data->kb_led, data->kb_toggle_brightness);
+ }
+ kb_led_notify(data);
+}
+
+// Handle ACPI notification
+static void system76_notify(struct acpi_device *acpi_dev, u32 event)
+{
+ struct system76_data *data;
+
+ data = acpi_driver_data(acpi_dev);
+ switch (event) {
+ case 0x80:
+ kb_led_hotkey_hardware(data);
+ break;
+ case 0x81:
+ kb_led_hotkey_toggle(data);
+ break;
+ case 0x82:
+ kb_led_hotkey_down(data);
+ break;
+ case 0x83:
+ kb_led_hotkey_up(data);
+ break;
+ case 0x84:
+ kb_led_hotkey_color(data);
+ break;
+ }
+}
+
+// Add a System76 ACPI device
+static int system76_add(struct acpi_device *acpi_dev)
+{
+ struct system76_data *data;
+ int err;
+
+ data = devm_kzalloc(&acpi_dev->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+ acpi_dev->driver_data = data;
+ data->acpi_dev = acpi_dev;
+
+ err = system76_get(data, "INIT");
+ if (err)
+ return err;
+ data->ap_led.name = "system76_acpi::airplane";
+ data->ap_led.flags = LED_CORE_SUSPENDRESUME;
+ data->ap_led.brightness_get = ap_led_get;
+ data->ap_led.brightness_set = ap_led_set;
+ data->ap_led.max_brightness = 1;
+ data->ap_led.default_trigger = "rfkill-none";
+ err = devm_led_classdev_register(&acpi_dev->dev, &data->ap_led);
+ if (err)
+ return err;
+
+ data->kb_led.name = "system76_acpi::kbd_backlight";
+ data->kb_led.flags = LED_BRIGHT_HW_CHANGED | LED_CORE_SUSPENDRESUME;
+ data->kb_led.brightness_get = kb_led_get;
+ data->kb_led.brightness_set = kb_led_set;
+ if (acpi_has_method(acpi_device_handle(data->acpi_dev), "SKBC")) {
+ data->kb_led.max_brightness = 255;
+ data->kb_toggle_brightness = 72;
+ data->kb_color = 0xffffff;
+ system76_set(data, "SKBC", data->kb_color);
+ } else {
+ data->kb_led.max_brightness = 5;
+ data->kb_color = -1;
+ }
+ err = devm_led_classdev_register(&acpi_dev->dev, &data->kb_led);
+ if (err)
+ return err;
+
+ if (data->kb_color >= 0) {
+ err = device_create_file(
+ data->kb_led.dev,
+ &kb_led_color_dev_attr
+ );
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+// Remove a System76 ACPI device
+static int system76_remove(struct acpi_device *acpi_dev)
+{
+ struct system76_data *data;
+
+ data = acpi_driver_data(acpi_dev);
+ if (data->kb_color >= 0)
+ device_remove_file(data->kb_led.dev, &kb_led_color_dev_attr);
+
+ devm_led_classdev_unregister(&acpi_dev->dev, &data->ap_led);
+
+ devm_led_classdev_unregister(&acpi_dev->dev, &data->kb_led);
+
+ system76_get(data, "FINI");
+
+ return 0;
+}
+
+static struct acpi_driver system76_driver = {
+ .name = "System76 ACPI Driver",
+ .class = "hotkey",
+ .ids = device_ids,
+ .ops = {
+ .add = system76_add,
+ .remove = system76_remove,
+ .notify = system76_notify,
+ },
+};
+module_acpi_driver(system76_driver);
+
+MODULE_DESCRIPTION("System76 ACPI Driver");
+MODULE_AUTHOR("Jeremy Soller <jeremy@system76.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c
index 1c7d8324ff5c..72205771d03d 100644
--- a/drivers/platform/x86/touchscreen_dmi.c
+++ b/drivers/platform/x86/touchscreen_dmi.c
@@ -310,6 +310,22 @@ static const struct ts_dmi_data jumper_ezpad_6_pro_b_data = {
.properties = jumper_ezpad_6_pro_b_props,
};
+static const struct property_entry jumper_ezpad_6_m4_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-min-x", 35),
+ PROPERTY_ENTRY_U32("touchscreen-min-y", 15),
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1950),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1525),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl3692-jumper-ezpad-6-m4.fw"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ PROPERTY_ENTRY_BOOL("silead,home-button"),
+ { }
+};
+
+static const struct ts_dmi_data jumper_ezpad_6_m4_data = {
+ .acpi_name = "MSSL1680:00",
+ .properties = jumper_ezpad_6_m4_props,
+};
+
static const struct property_entry jumper_ezpad_mini3_props[] = {
PROPERTY_ENTRY_U32("touchscreen-min-x", 23),
PROPERTY_ENTRY_U32("touchscreen-min-y", 16),
@@ -498,6 +514,24 @@ static const struct ts_dmi_data pov_mobii_wintab_p1006w_v10_data = {
.properties = pov_mobii_wintab_p1006w_v10_props,
};
+static const struct property_entry schneider_sct101ctm_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1715),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1140),
+ PROPERTY_ENTRY_BOOL("touchscreen-inverted-x"),
+ PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
+ PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
+ PROPERTY_ENTRY_STRING("firmware-name",
+ "gsl1680-schneider-sct101ctm.fw"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ PROPERTY_ENTRY_BOOL("silead,home-button"),
+ { }
+};
+
+static const struct ts_dmi_data schneider_sct101ctm_data = {
+ .acpi_name = "MSSL1680:00",
+ .properties = schneider_sct101ctm_props,
+};
+
static const struct property_entry teclast_x3_plus_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 1980),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1500),
@@ -789,6 +823,16 @@ static const struct dmi_system_id touchscreen_dmi_table[] = {
},
},
{
+ /* Jumper EZpad 6 m4 */
+ .driver_data = (void *)&jumper_ezpad_6_m4_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "jumper"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "EZpad"),
+ /* Jumper8.S106x.A00C.1066 with the version dropped */
+ DMI_MATCH(DMI_BIOS_VERSION, "Jumper8.S106x"),
+ },
+ },
+ {
/* Jumper EZpad mini3 */
.driver_data = (void *)&jumper_ezpad_mini3_data,
.matches = {
@@ -909,6 +953,14 @@ static const struct dmi_system_id touchscreen_dmi_table[] = {
},
},
{
+ /* Schneider SCT101CTM */
+ .driver_data = (void *)&schneider_sct101ctm_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Default string"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "SCT101CTM"),
+ },
+ },
+ {
/* Teclast X3 Plus */
.driver_data = (void *)&teclast_x3_plus_data,
.matches = {
diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c
index 59e9aa0f9643..dc2e966a5c25 100644
--- a/drivers/platform/x86/wmi.c
+++ b/drivers/platform/x86/wmi.c
@@ -911,7 +911,7 @@ static const struct file_operations wmi_fops = {
.read = wmi_char_read,
.open = wmi_char_open,
.unlocked_ioctl = wmi_ioctl,
- .compat_ioctl = wmi_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
};
static int wmi_dev_probe(struct device *dev)
diff --git a/drivers/power/avs/smartreflex.c b/drivers/power/avs/smartreflex.c
index 4684e7df833a..5376f3d22f31 100644
--- a/drivers/power/avs/smartreflex.c
+++ b/drivers/power/avs/smartreflex.c
@@ -905,7 +905,7 @@ static int omap_sr_probe(struct platform_device *pdev)
sr_info->dbg_dir = debugfs_create_dir(sr_info->name, sr_dbg_dir);
debugfs_create_file("autocomp", S_IRUGO | S_IWUSR, sr_info->dbg_dir,
- (void *)sr_info, &pm_sr_fops);
+ sr_info, &pm_sr_fops);
debugfs_create_x32("errweight", S_IRUGO, sr_info->dbg_dir,
&sr_info->err_weight);
debugfs_create_x32("errmaxlimit", S_IRUGO, sr_info->dbg_dir,
diff --git a/drivers/power/reset/Kconfig b/drivers/power/reset/Kconfig
index a564237278ff..c721939767eb 100644
--- a/drivers/power/reset/Kconfig
+++ b/drivers/power/reset/Kconfig
@@ -140,6 +140,16 @@ config POWER_RESET_LTC2952
This driver supports an external powerdown trigger and board power
down via the LTC2952. Bindings are made in the device tree.
+config POWER_RESET_MT6323
+ bool "MediaTek MT6323 power-off driver"
+ depends on MFD_MT6397
+ help
+ The power-off driver is responsible for externally shutdown down
+ the power of a remote MediaTek SoC MT6323 is connected to through
+ controlling a tiny circuit BBPU inside MT6323 RTC.
+
+ Say Y if you have a board where MT6323 could be found.
+
config POWER_RESET_QNAP
bool "QNAP power-off driver"
depends on OF_GPIO && PLAT_ORION
diff --git a/drivers/power/reset/Makefile b/drivers/power/reset/Makefile
index 85da3198e4e0..da37f8b851dc 100644
--- a/drivers/power/reset/Makefile
+++ b/drivers/power/reset/Makefile
@@ -11,6 +11,7 @@ obj-$(CONFIG_POWER_RESET_GPIO) += gpio-poweroff.o
obj-$(CONFIG_POWER_RESET_GPIO_RESTART) += gpio-restart.o
obj-$(CONFIG_POWER_RESET_HISI) += hisi-reboot.o
obj-$(CONFIG_POWER_RESET_MSM) += msm-poweroff.o
+obj-$(CONFIG_POWER_RESET_MT6323) += mt6323-poweroff.o
obj-$(CONFIG_POWER_RESET_QCOM_PON) += qcom-pon.o
obj-$(CONFIG_POWER_RESET_OCELOT_RESET) += ocelot-reset.o
obj-$(CONFIG_POWER_RESET_PIIX4_POWEROFF) += piix4-poweroff.o
diff --git a/drivers/power/reset/at91-reset.c b/drivers/power/reset/at91-reset.c
index 44ca983a49a1..d94e3267c3b6 100644
--- a/drivers/power/reset/at91-reset.c
+++ b/drivers/power/reset/at91-reset.c
@@ -131,7 +131,7 @@ static int at91sam9g45_restart(struct notifier_block *this, unsigned long mode,
static int sama5d3_restart(struct notifier_block *this, unsigned long mode,
void *cmd)
{
- writel(cpu_to_le32(AT91_RSTC_KEY | AT91_RSTC_PERRST | AT91_RSTC_PROCRST),
+ writel(AT91_RSTC_KEY | AT91_RSTC_PERRST | AT91_RSTC_PROCRST,
at91_rstc_base);
return NOTIFY_DONE;
@@ -140,9 +140,7 @@ static int sama5d3_restart(struct notifier_block *this, unsigned long mode,
static int samx7_restart(struct notifier_block *this, unsigned long mode,
void *cmd)
{
- writel(cpu_to_le32(AT91_RSTC_KEY | AT91_RSTC_PROCRST),
- at91_rstc_base);
-
+ writel(AT91_RSTC_KEY | AT91_RSTC_PROCRST, at91_rstc_base);
return NOTIFY_DONE;
}
diff --git a/drivers/power/reset/at91-sama5d2_shdwc.c b/drivers/power/reset/at91-sama5d2_shdwc.c
index e341cc5c0ea6..1c18f465a245 100644
--- a/drivers/power/reset/at91-sama5d2_shdwc.c
+++ b/drivers/power/reset/at91-sama5d2_shdwc.c
@@ -269,6 +269,12 @@ static const struct of_device_id at91_shdwc_of_match[] = {
};
MODULE_DEVICE_TABLE(of, at91_shdwc_of_match);
+static const struct of_device_id at91_pmc_ids[] = {
+ { .compatible = "atmel,sama5d2-pmc" },
+ { .compatible = "microchip,sam9x60-pmc" },
+ { /* Sentinel. */ }
+};
+
static int __init at91_shdwc_probe(struct platform_device *pdev)
{
struct resource *res;
@@ -313,7 +319,7 @@ static int __init at91_shdwc_probe(struct platform_device *pdev)
at91_shdwc_dt_configure(pdev);
- np = of_find_compatible_node(NULL, NULL, "atmel,sama5d2-pmc");
+ np = of_find_matching_node(NULL, at91_pmc_ids);
if (!np) {
ret = -ENODEV;
goto clk_disable;
diff --git a/drivers/power/reset/mt6323-poweroff.c b/drivers/power/reset/mt6323-poweroff.c
new file mode 100644
index 000000000000..1caf43d9e46d
--- /dev/null
+++ b/drivers/power/reset/mt6323-poweroff.c
@@ -0,0 +1,97 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Power off through MediaTek PMIC
+ *
+ * Copyright (C) 2018 MediaTek Inc.
+ *
+ * Author: Sean Wang <sean.wang@mediatek.com>
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/mt6397/core.h>
+#include <linux/mfd/mt6397/rtc.h>
+
+struct mt6323_pwrc {
+ struct device *dev;
+ struct regmap *regmap;
+ u32 base;
+};
+
+static struct mt6323_pwrc *mt_pwrc;
+
+static void mt6323_do_pwroff(void)
+{
+ struct mt6323_pwrc *pwrc = mt_pwrc;
+ unsigned int val;
+ int ret;
+
+ regmap_write(pwrc->regmap, pwrc->base + RTC_BBPU, RTC_BBPU_KEY);
+ regmap_write(pwrc->regmap, pwrc->base + RTC_WRTGR, 1);
+
+ ret = regmap_read_poll_timeout(pwrc->regmap,
+ pwrc->base + RTC_BBPU, val,
+ !(val & RTC_BBPU_CBUSY),
+ MTK_RTC_POLL_DELAY_US,
+ MTK_RTC_POLL_TIMEOUT);
+ if (ret)
+ dev_err(pwrc->dev, "failed to write BBPU: %d\n", ret);
+
+ /* Wait some time until system down, otherwise, notice with a warn */
+ mdelay(1000);
+
+ WARN_ONCE(1, "Unable to power off system\n");
+}
+
+static int mt6323_pwrc_probe(struct platform_device *pdev)
+{
+ struct mt6397_chip *mt6397_chip = dev_get_drvdata(pdev->dev.parent);
+ struct mt6323_pwrc *pwrc;
+ struct resource *res;
+
+ pwrc = devm_kzalloc(&pdev->dev, sizeof(*pwrc), GFP_KERNEL);
+ if (!pwrc)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ pwrc->base = res->start;
+ pwrc->regmap = mt6397_chip->regmap;
+ pwrc->dev = &pdev->dev;
+ mt_pwrc = pwrc;
+
+ pm_power_off = &mt6323_do_pwroff;
+
+ return 0;
+}
+
+static int mt6323_pwrc_remove(struct platform_device *pdev)
+{
+ if (pm_power_off == &mt6323_do_pwroff)
+ pm_power_off = NULL;
+
+ return 0;
+}
+
+static const struct of_device_id mt6323_pwrc_dt_match[] = {
+ { .compatible = "mediatek,mt6323-pwrc" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, mt6323_pwrc_dt_match);
+
+static struct platform_driver mt6323_pwrc_driver = {
+ .probe = mt6323_pwrc_probe,
+ .remove = mt6323_pwrc_remove,
+ .driver = {
+ .name = "mt6323-pwrc",
+ .of_match_table = mt6323_pwrc_dt_match,
+ },
+};
+
+module_platform_driver(mt6323_pwrc_driver);
+
+MODULE_DESCRIPTION("Poweroff driver for MT6323 PMIC");
+MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/power/supply/Kconfig b/drivers/power/supply/Kconfig
index c84a7b1caeb6..27164a1d3c7c 100644
--- a/drivers/power/supply/Kconfig
+++ b/drivers/power/supply/Kconfig
@@ -629,7 +629,7 @@ config BATTERY_GAUGE_LTC2941
config AB8500_BM
bool "AB8500 Battery Management Driver"
- depends on AB8500_CORE && AB8500_GPADC
+ depends on AB8500_CORE && AB8500_GPADC && (IIO = y)
help
Say Y to include support for AB8500 battery management.
diff --git a/drivers/power/supply/ab8500_btemp.c b/drivers/power/supply/ab8500_btemp.c
index 8fe81259bfd9..909f0242bacb 100644
--- a/drivers/power/supply/ab8500_btemp.c
+++ b/drivers/power/supply/ab8500_btemp.c
@@ -26,7 +26,7 @@
#include <linux/mfd/abx500.h>
#include <linux/mfd/abx500/ab8500.h>
#include <linux/mfd/abx500/ab8500-bm.h>
-#include <linux/mfd/abx500/ab8500-gpadc.h>
+#include <linux/iio/consumer.h>
#define VTVOUT_V 1800
@@ -79,7 +79,8 @@ struct ab8500_btemp_ranges {
* @bat_temp: Dispatched battery temperature in degree Celsius
* @prev_bat_temp Last measured battery temperature in degree Celsius
* @parent: Pointer to the struct ab8500
- * @gpadc: Pointer to the struct gpadc
+ * @adc_btemp_ball: ADC channel for the battery ball temperature
+ * @adc_bat_ctrl: ADC channel for the battery control
* @fg: Pointer to the struct fg
* @bm: Platform specific battery management information
* @btemp_psy: Structure for BTEMP specific battery properties
@@ -96,7 +97,8 @@ struct ab8500_btemp {
int bat_temp;
int prev_bat_temp;
struct ab8500 *parent;
- struct ab8500_gpadc *gpadc;
+ struct iio_channel *btemp_ball;
+ struct iio_channel *bat_ctrl;
struct ab8500_fg *fg;
struct abx500_bm_data *bm;
struct power_supply *btemp_psy;
@@ -177,13 +179,13 @@ static int ab8500_btemp_batctrl_volt_to_res(struct ab8500_btemp *di,
*/
static int ab8500_btemp_read_batctrl_voltage(struct ab8500_btemp *di)
{
- int vbtemp;
+ int vbtemp, ret;
static int prev;
- vbtemp = ab8500_gpadc_convert(di->gpadc, BAT_CTRL);
- if (vbtemp < 0) {
+ ret = iio_read_channel_processed(di->bat_ctrl, &vbtemp);
+ if (ret < 0) {
dev_err(di->dev,
- "%s gpadc conversion failed, using previous value",
+ "%s ADC conversion failed, using previous value",
__func__);
return prev;
}
@@ -455,7 +457,7 @@ static int ab8500_btemp_res_to_temp(struct ab8500_btemp *di,
*/
static int ab8500_btemp_measure_temp(struct ab8500_btemp *di)
{
- int temp;
+ int temp, ret;
static int prev;
int rbat, rntc, vntc;
u8 id;
@@ -480,10 +482,10 @@ static int ab8500_btemp_measure_temp(struct ab8500_btemp *di)
di->bm->bat_type[id].r_to_t_tbl,
di->bm->bat_type[id].n_temp_tbl_elements, rbat);
} else {
- vntc = ab8500_gpadc_convert(di->gpadc, BTEMP_BALL);
- if (vntc < 0) {
+ ret = iio_read_channel_processed(di->btemp_ball, &vntc);
+ if (ret < 0) {
dev_err(di->dev,
- "%s gpadc conversion failed,"
+ "%s ADC conversion failed,"
" using previous value\n", __func__);
return prev;
}
@@ -1024,7 +1026,22 @@ static int ab8500_btemp_probe(struct platform_device *pdev)
/* get parent data */
di->dev = &pdev->dev;
di->parent = dev_get_drvdata(pdev->dev.parent);
- di->gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
+
+ /* Get ADC channels */
+ di->btemp_ball = devm_iio_channel_get(&pdev->dev, "btemp_ball");
+ if (IS_ERR(di->btemp_ball)) {
+ if (PTR_ERR(di->btemp_ball) == -ENODEV)
+ return -EPROBE_DEFER;
+ dev_err(&pdev->dev, "failed to get BTEMP BALL ADC channel\n");
+ return PTR_ERR(di->btemp_ball);
+ }
+ di->bat_ctrl = devm_iio_channel_get(&pdev->dev, "bat_ctrl");
+ if (IS_ERR(di->bat_ctrl)) {
+ if (PTR_ERR(di->bat_ctrl) == -ENODEV)
+ return -EPROBE_DEFER;
+ dev_err(&pdev->dev, "failed to get BAT CTRL ADC channel\n");
+ return PTR_ERR(di->bat_ctrl);
+ }
di->initialized = false;
@@ -1082,6 +1099,11 @@ static int ab8500_btemp_probe(struct platform_device *pdev)
/* Register interrupts */
for (i = 0; i < ARRAY_SIZE(ab8500_btemp_irq); i++) {
irq = platform_get_irq_byname(pdev, ab8500_btemp_irq[i].name);
+ if (irq < 0) {
+ ret = irq;
+ goto free_irq;
+ }
+
ret = request_threaded_irq(irq, NULL, ab8500_btemp_irq[i].isr,
IRQF_SHARED | IRQF_NO_SUSPEND,
ab8500_btemp_irq[i].name, di);
@@ -1104,13 +1126,13 @@ static int ab8500_btemp_probe(struct platform_device *pdev)
return ret;
free_irq:
- power_supply_unregister(di->btemp_psy);
-
/* We also have to free all successfully registered irqs */
for (i = i - 1; i >= 0; i--) {
irq = platform_get_irq_byname(pdev, ab8500_btemp_irq[i].name);
free_irq(irq, di);
}
+
+ power_supply_unregister(di->btemp_psy);
free_btemp_wq:
destroy_workqueue(di->btemp_wq);
return ret;
diff --git a/drivers/power/supply/ab8500_charger.c b/drivers/power/supply/ab8500_charger.c
index e51d0e72beea..8a0f9d769690 100644
--- a/drivers/power/supply/ab8500_charger.c
+++ b/drivers/power/supply/ab8500_charger.c
@@ -29,10 +29,10 @@
#include <linux/mfd/abx500/ab8500.h>
#include <linux/mfd/abx500.h>
#include <linux/mfd/abx500/ab8500-bm.h>
-#include <linux/mfd/abx500/ab8500-gpadc.h>
#include <linux/mfd/abx500/ux500_chargalg.h>
#include <linux/usb/otg.h>
#include <linux/mutex.h>
+#include <linux/iio/consumer.h>
/* Charger constants */
#define NO_PW_CONN 0
@@ -233,7 +233,10 @@ struct ab8500_charger_max_usb_in_curr {
* @current_stepping_sessions:
* Counter for current stepping sessions
* @parent: Pointer to the struct ab8500
- * @gpadc: Pointer to the struct gpadc
+ * @adc_main_charger_v ADC channel for main charger voltage
+ * @adc_main_charger_c ADC channel for main charger current
+ * @adc_vbus_v ADC channel for USB charger voltage
+ * @adc_usb_charger_c ADC channel for USB charger current
* @bm: Platform specific battery management information
* @flags: Structure for information about events triggered
* @usb_state: Structure for usb stack information
@@ -283,7 +286,10 @@ struct ab8500_charger {
int is_aca_rid;
atomic_t current_stepping_sessions;
struct ab8500 *parent;
- struct ab8500_gpadc *gpadc;
+ struct iio_channel *adc_main_charger_v;
+ struct iio_channel *adc_main_charger_c;
+ struct iio_channel *adc_vbus_v;
+ struct iio_channel *adc_usb_charger_c;
struct abx500_bm_data *bm;
struct ab8500_charger_event_flags flags;
struct ab8500_charger_usb_state usb_state;
@@ -459,13 +465,13 @@ static void ab8500_charger_set_usb_connected(struct ab8500_charger *di,
*/
static int ab8500_charger_get_ac_voltage(struct ab8500_charger *di)
{
- int vch;
+ int vch, ret;
/* Only measure voltage if the charger is connected */
if (di->ac.charger_connected) {
- vch = ab8500_gpadc_convert(di->gpadc, MAIN_CHARGER_V);
- if (vch < 0)
- dev_err(di->dev, "%s gpadc conv failed,\n", __func__);
+ ret = iio_read_channel_processed(di->adc_main_charger_v, &vch);
+ if (ret < 0)
+ dev_err(di->dev, "%s ADC conv failed,\n", __func__);
} else {
vch = 0;
}
@@ -510,13 +516,13 @@ static int ab8500_charger_ac_cv(struct ab8500_charger *di)
*/
static int ab8500_charger_get_vbus_voltage(struct ab8500_charger *di)
{
- int vch;
+ int vch, ret;
/* Only measure voltage if the charger is connected */
if (di->usb.charger_connected) {
- vch = ab8500_gpadc_convert(di->gpadc, VBUS_V);
- if (vch < 0)
- dev_err(di->dev, "%s gpadc conv failed\n", __func__);
+ ret = iio_read_channel_processed(di->adc_vbus_v, &vch);
+ if (ret < 0)
+ dev_err(di->dev, "%s ADC conv failed,\n", __func__);
} else {
vch = 0;
}
@@ -532,13 +538,13 @@ static int ab8500_charger_get_vbus_voltage(struct ab8500_charger *di)
*/
static int ab8500_charger_get_usb_current(struct ab8500_charger *di)
{
- int ich;
+ int ich, ret;
/* Only measure current if the charger is online */
if (di->usb.charger_online) {
- ich = ab8500_gpadc_convert(di->gpadc, USB_CHARGER_C);
- if (ich < 0)
- dev_err(di->dev, "%s gpadc conv failed\n", __func__);
+ ret = iio_read_channel_processed(di->adc_usb_charger_c, &ich);
+ if (ret < 0)
+ dev_err(di->dev, "%s ADC conv failed,\n", __func__);
} else {
ich = 0;
}
@@ -554,13 +560,13 @@ static int ab8500_charger_get_usb_current(struct ab8500_charger *di)
*/
static int ab8500_charger_get_ac_current(struct ab8500_charger *di)
{
- int ich;
+ int ich, ret;
/* Only measure current if the charger is online */
if (di->ac.charger_online) {
- ich = ab8500_gpadc_convert(di->gpadc, MAIN_CHARGER_C);
- if (ich < 0)
- dev_err(di->dev, "%s gpadc conv failed\n", __func__);
+ ret = iio_read_channel_processed(di->adc_main_charger_c, &ich);
+ if (ret < 0)
+ dev_err(di->dev, "%s ADC conv failed,\n", __func__);
} else {
ich = 0;
}
@@ -3371,7 +3377,39 @@ static int ab8500_charger_probe(struct platform_device *pdev)
/* get parent data */
di->dev = &pdev->dev;
di->parent = dev_get_drvdata(pdev->dev.parent);
- di->gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
+
+ /* Get ADC channels */
+ di->adc_main_charger_v = devm_iio_channel_get(&pdev->dev,
+ "main_charger_v");
+ if (IS_ERR(di->adc_main_charger_v)) {
+ if (PTR_ERR(di->adc_main_charger_v) == -ENODEV)
+ return -EPROBE_DEFER;
+ dev_err(&pdev->dev, "failed to get ADC main charger voltage\n");
+ return PTR_ERR(di->adc_main_charger_v);
+ }
+ di->adc_main_charger_c = devm_iio_channel_get(&pdev->dev,
+ "main_charger_c");
+ if (IS_ERR(di->adc_main_charger_c)) {
+ if (PTR_ERR(di->adc_main_charger_c) == -ENODEV)
+ return -EPROBE_DEFER;
+ dev_err(&pdev->dev, "failed to get ADC main charger current\n");
+ return PTR_ERR(di->adc_main_charger_c);
+ }
+ di->adc_vbus_v = devm_iio_channel_get(&pdev->dev, "vbus_v");
+ if (IS_ERR(di->adc_vbus_v)) {
+ if (PTR_ERR(di->adc_vbus_v) == -ENODEV)
+ return -EPROBE_DEFER;
+ dev_err(&pdev->dev, "failed to get ADC USB charger voltage\n");
+ return PTR_ERR(di->adc_vbus_v);
+ }
+ di->adc_usb_charger_c = devm_iio_channel_get(&pdev->dev,
+ "usb_charger_c");
+ if (IS_ERR(di->adc_usb_charger_c)) {
+ if (PTR_ERR(di->adc_usb_charger_c) == -ENODEV)
+ return -EPROBE_DEFER;
+ dev_err(&pdev->dev, "failed to get ADC USB charger current\n");
+ return PTR_ERR(di->adc_usb_charger_c);
+ }
/* initialize lock */
spin_lock_init(&di->usb_state.usb_lock);
@@ -3556,6 +3594,11 @@ static int ab8500_charger_probe(struct platform_device *pdev)
/* Register interrupts */
for (i = 0; i < ARRAY_SIZE(ab8500_charger_irq); i++) {
irq = platform_get_irq_byname(pdev, ab8500_charger_irq[i].name);
+ if (irq < 0) {
+ ret = irq;
+ goto free_irq;
+ }
+
ret = request_threaded_irq(irq, NULL, ab8500_charger_irq[i].isr,
IRQF_SHARED | IRQF_NO_SUSPEND,
ab8500_charger_irq[i].name, di);
diff --git a/drivers/power/supply/ab8500_fg.c b/drivers/power/supply/ab8500_fg.c
index 6fc4bc30644c..c3912ee9eb99 100644
--- a/drivers/power/supply/ab8500_fg.c
+++ b/drivers/power/supply/ab8500_fg.c
@@ -32,7 +32,7 @@
#include <linux/mfd/abx500.h>
#include <linux/mfd/abx500/ab8500.h>
#include <linux/mfd/abx500/ab8500-bm.h>
-#include <linux/mfd/abx500/ab8500-gpadc.h>
+#include <linux/iio/consumer.h>
#include <linux/kernel.h>
#define MILLI_TO_MICRO 1000
@@ -182,7 +182,7 @@ struct inst_curr_result_list {
* @bat_cap: Structure for battery capacity specific parameters
* @avg_cap: Average capacity filter
* @parent: Pointer to the struct ab8500
- * @gpadc: Pointer to the struct gpadc
+ * @main_bat_v: ADC channel for the main battery voltage
* @bm: Platform specific battery management information
* @fg_psy: Structure that holds the FG specific battery properties
* @fg_wq: Work queue for running the FG algorithm
@@ -224,7 +224,7 @@ struct ab8500_fg {
struct ab8500_fg_battery_capacity bat_cap;
struct ab8500_fg_avg_cap avg_cap;
struct ab8500 *parent;
- struct ab8500_gpadc *gpadc;
+ struct iio_channel *main_bat_v;
struct abx500_bm_data *bm;
struct power_supply *fg_psy;
struct workqueue_struct *fg_wq;
@@ -829,13 +829,13 @@ exit:
*/
static int ab8500_fg_bat_voltage(struct ab8500_fg *di)
{
- int vbat;
+ int vbat, ret;
static int prev;
- vbat = ab8500_gpadc_convert(di->gpadc, MAIN_BAT_V);
- if (vbat < 0) {
+ ret = iio_read_channel_processed(di->main_bat_v, &vbat);
+ if (ret < 0) {
dev_err(di->dev,
- "%s gpadc conversion failed, using previous value\n",
+ "%s ADC conversion failed, using previous value\n",
__func__);
return prev;
}
@@ -3066,7 +3066,14 @@ static int ab8500_fg_probe(struct platform_device *pdev)
/* get parent data */
di->dev = &pdev->dev;
di->parent = dev_get_drvdata(pdev->dev.parent);
- di->gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
+
+ di->main_bat_v = devm_iio_channel_get(&pdev->dev, "main_bat_v");
+ if (IS_ERR(di->main_bat_v)) {
+ if (PTR_ERR(di->main_bat_v) == -ENODEV)
+ return -EPROBE_DEFER;
+ dev_err(&pdev->dev, "failed to get main battery ADC channel\n");
+ return PTR_ERR(di->main_bat_v);
+ }
psy_cfg.supplied_to = supply_interface;
psy_cfg.num_supplicants = ARRAY_SIZE(supply_interface);
@@ -3151,6 +3158,11 @@ static int ab8500_fg_probe(struct platform_device *pdev)
/* Register primary interrupt handlers */
for (i = 0; i < ARRAY_SIZE(ab8500_fg_irq_th); i++) {
irq = platform_get_irq_byname(pdev, ab8500_fg_irq_th[i].name);
+ if (irq < 0) {
+ ret = irq;
+ goto free_irq_th;
+ }
+
ret = request_irq(irq, ab8500_fg_irq_th[i].isr,
IRQF_SHARED | IRQF_NO_SUSPEND,
ab8500_fg_irq_th[i].name, di);
@@ -3158,7 +3170,7 @@ static int ab8500_fg_probe(struct platform_device *pdev)
if (ret != 0) {
dev_err(di->dev, "failed to request %s IRQ %d: %d\n",
ab8500_fg_irq_th[i].name, irq, ret);
- goto free_irq;
+ goto free_irq_th;
}
dev_dbg(di->dev, "Requested %s IRQ %d: %d\n",
ab8500_fg_irq_th[i].name, irq, ret);
@@ -3166,6 +3178,11 @@ static int ab8500_fg_probe(struct platform_device *pdev)
/* Register threaded interrupt handler */
irq = platform_get_irq_byname(pdev, ab8500_fg_irq_bh[0].name);
+ if (irq < 0) {
+ ret = irq;
+ goto free_irq_th;
+ }
+
ret = request_threaded_irq(irq, NULL, ab8500_fg_irq_bh[0].isr,
IRQF_SHARED | IRQF_NO_SUSPEND | IRQF_ONESHOT,
ab8500_fg_irq_bh[0].name, di);
@@ -3173,7 +3190,7 @@ static int ab8500_fg_probe(struct platform_device *pdev)
if (ret != 0) {
dev_err(di->dev, "failed to request %s IRQ %d: %d\n",
ab8500_fg_irq_bh[0].name, irq, ret);
- goto free_irq;
+ goto free_irq_th;
}
dev_dbg(di->dev, "Requested %s IRQ %d: %d\n",
ab8500_fg_irq_bh[0].name, irq, ret);
@@ -3212,15 +3229,17 @@ static int ab8500_fg_probe(struct platform_device *pdev)
return ret;
free_irq:
- power_supply_unregister(di->fg_psy);
-
/* We also have to free all registered irqs */
- for (i = 0; i < ARRAY_SIZE(ab8500_fg_irq_th); i++) {
+ irq = platform_get_irq_byname(pdev, ab8500_fg_irq_bh[0].name);
+ free_irq(irq, di);
+free_irq_th:
+ while (--i >= 0) {
+ /* Last assignment of i from primary interrupt handlers */
irq = platform_get_irq_byname(pdev, ab8500_fg_irq_th[i].name);
free_irq(irq, di);
}
- irq = platform_get_irq_byname(pdev, ab8500_fg_irq_bh[0].name);
- free_irq(irq, di);
+
+ power_supply_unregister(di->fg_psy);
free_inst_curr_wq:
destroy_workqueue(di->fg_wq);
return ret;
diff --git a/drivers/power/supply/abx500_chargalg.c b/drivers/power/supply/abx500_chargalg.c
index 23757fb10479..e6e37d4f20e4 100644
--- a/drivers/power/supply/abx500_chargalg.c
+++ b/drivers/power/supply/abx500_chargalg.c
@@ -354,13 +354,13 @@ static int abx500_chargalg_check_charger_enable(struct abx500_chargalg *di)
if (di->chg_info.charger_type & USB_CHG) {
return di->usb_chg->ops.check_enable(di->usb_chg,
- di->bm->bat_type[di->bm->batt_id].normal_vol_lvl,
- di->bm->bat_type[di->bm->batt_id].normal_cur_lvl);
+ di->bm->bat_type[di->bm->batt_id].normal_vol_lvl,
+ di->bm->bat_type[di->bm->batt_id].normal_cur_lvl);
} else if ((di->chg_info.charger_type & AC_CHG) &&
!(di->ac_chg->external)) {
return di->ac_chg->ops.check_enable(di->ac_chg,
- di->bm->bat_type[di->bm->batt_id].normal_vol_lvl,
- di->bm->bat_type[di->bm->batt_id].normal_cur_lvl);
+ di->bm->bat_type[di->bm->batt_id].normal_vol_lvl,
+ di->bm->bat_type[di->bm->batt_id].normal_cur_lvl);
}
return 0;
}
diff --git a/drivers/power/supply/axp20x_usb_power.c b/drivers/power/supply/axp20x_usb_power.c
index dc4c316eff81..5f0a5722b19e 100644
--- a/drivers/power/supply/axp20x_usb_power.c
+++ b/drivers/power/supply/axp20x_usb_power.c
@@ -48,6 +48,8 @@
#define AXP20X_VBUS_MON_VBUS_VALID BIT(3)
+#define AXP813_BC_EN BIT(0)
+
/*
* Note do not raise the debounce time, we must report Vusb high within
* 100ms otherwise we get Vbus errors in musb.
@@ -495,6 +497,12 @@ static int axp20x_usb_power_probe(struct platform_device *pdev)
return -EINVAL;
}
+ if (power->axp20x_id == AXP813_ID) {
+ /* Enable USB Battery Charging specification detection */
+ regmap_update_bits(axp20x->regmap, AXP288_BC_GLOBAL,
+ AXP813_BC_EN, AXP813_BC_EN);
+ }
+
psy_cfg.of_node = pdev->dev.of_node;
psy_cfg.drv_data = power;
diff --git a/drivers/power/supply/bd70528-charger.c b/drivers/power/supply/bd70528-charger.c
index 1bb32b7226d7..b8e1ec106627 100644
--- a/drivers/power/supply/bd70528-charger.c
+++ b/drivers/power/supply/bd70528-charger.c
@@ -741,3 +741,4 @@ module_platform_driver(bd70528_power);
MODULE_AUTHOR("Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>");
MODULE_DESCRIPTION("BD70528 power-supply driver");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:bd70528-power");
diff --git a/drivers/power/supply/cpcap-battery.c b/drivers/power/supply/cpcap-battery.c
index 61d6447d1966..6e9392901b0a 100644
--- a/drivers/power/supply/cpcap-battery.c
+++ b/drivers/power/supply/cpcap-battery.c
@@ -33,8 +33,6 @@
#include <linux/iio/types.h>
#include <linux/mfd/motorola-cpcap.h>
-#include <asm/div64.h>
-
/*
* Register bit defines for CPCAP_REG_BPEOL. Some of these seem to
* map to MC13783UG.pdf "Table 5-19. Register 13, Power Control 0"
@@ -52,6 +50,26 @@
#define CPCAP_REG_BPEOL_BIT_BATTDETEN BIT(1) /* Enable battery detect */
#define CPCAP_REG_BPEOL_BIT_EOLSEL BIT(0) /* BPDET = 0, EOL = 1 */
+/*
+ * Register bit defines for CPCAP_REG_CCC1. These seem similar to the twl6030
+ * coulomb counter registers rather than the mc13892 registers. Both twl6030
+ * and mc13892 set bits 2 and 1 to reset and clear registers. But mc13892
+ * sets bit 0 to start the coulomb counter while twl6030 sets bit 0 to stop
+ * the coulomb counter like cpcap does. So for now, we use the twl6030 style
+ * naming for the registers.
+ */
+#define CPCAP_REG_CCC1_ACTIVE_MODE1 BIT(4) /* Update rate */
+#define CPCAP_REG_CCC1_ACTIVE_MODE0 BIT(3) /* Update rate */
+#define CPCAP_REG_CCC1_AUTOCLEAR BIT(2) /* Resets sample registers */
+#define CPCAP_REG_CCC1_CAL_EN BIT(1) /* Clears after write in 1s */
+#define CPCAP_REG_CCC1_PAUSE BIT(0) /* Stop counters, allow write */
+#define CPCAP_REG_CCC1_RESET_MASK (CPCAP_REG_CCC1_AUTOCLEAR | \
+ CPCAP_REG_CCC1_CAL_EN)
+
+#define CPCAP_REG_CCCC2_RATE1 BIT(5)
+#define CPCAP_REG_CCCC2_RATE0 BIT(4)
+#define CPCAP_REG_CCCC2_ENABLE BIT(3)
+
#define CPCAP_BATTERY_CC_SAMPLE_PERIOD_MS 250
enum {
@@ -64,6 +82,7 @@ enum {
enum cpcap_battery_irq_action {
CPCAP_BATTERY_IRQ_ACTION_NONE,
+ CPCAP_BATTERY_IRQ_ACTION_CC_CAL_DONE,
CPCAP_BATTERY_IRQ_ACTION_BATTERY_LOW,
CPCAP_BATTERY_IRQ_ACTION_POWEROFF,
};
@@ -76,15 +95,16 @@ struct cpcap_interrupt_desc {
};
struct cpcap_battery_config {
- int ccm;
int cd_factor;
struct power_supply_info info;
+ struct power_supply_battery_info bat;
};
struct cpcap_coulomb_counter_data {
s32 sample; /* 24 or 32 bits */
s32 accumulator;
s16 offset; /* 9 bits */
+ s16 integrator; /* 13 or 16 bits */
};
enum cpcap_battery_state {
@@ -110,6 +130,7 @@ struct cpcap_battery_ddata {
struct power_supply *psy;
struct cpcap_battery_config config;
struct cpcap_battery_state_data state[CPCAP_BATTERY_STATE_NR];
+ u32 cc_lsb; /* μAms per LSB */
atomic_t active;
int status;
u16 vendor;
@@ -217,41 +238,17 @@ static int cpcap_battery_cc_raw_div(struct cpcap_battery_ddata *ddata,
s16 offset, u32 divider)
{
s64 acc;
- u64 tmp;
- int avg_current;
- u32 cc_lsb;
if (!divider)
return 0;
- switch (ddata->vendor) {
- case CPCAP_VENDOR_ST:
- cc_lsb = 95374; /* μAms per LSB */
- break;
- case CPCAP_VENDOR_TI:
- cc_lsb = 91501; /* μAms per LSB */
- break;
- default:
- return -EINVAL;
- }
-
acc = accumulator;
- acc = acc - ((s64)sample * offset);
- cc_lsb = (cc_lsb * ddata->config.cd_factor) / 1000;
+ acc -= (s64)sample * offset;
+ acc *= ddata->cc_lsb;
+ acc *= -1;
+ acc = div_s64(acc, divider);
- if (acc >= 0)
- tmp = acc;
- else
- tmp = acc * -1;
-
- tmp = tmp * cc_lsb;
- do_div(tmp, divider);
- avg_current = tmp;
-
- if (acc >= 0)
- return -avg_current;
- else
- return avg_current;
+ return acc;
}
/* 3600000μAms = 1μAh */
@@ -293,12 +290,13 @@ static int
cpcap_battery_read_accumulated(struct cpcap_battery_ddata *ddata,
struct cpcap_coulomb_counter_data *ccd)
{
- u16 buf[7]; /* CPCAP_REG_CC1 to CCI */
+ u16 buf[7]; /* CPCAP_REG_CCS1 to CCI */
int error;
ccd->sample = 0;
ccd->accumulator = 0;
ccd->offset = 0;
+ ccd->integrator = 0;
/* Read coulomb counter register range */
error = regmap_bulk_read(ddata->reg, CPCAP_REG_CCS1,
@@ -323,6 +321,12 @@ cpcap_battery_read_accumulated(struct cpcap_battery_ddata *ddata,
ccd->offset = buf[4];
ccd->offset = sign_extend32(ccd->offset, 9);
+ /* Integrator register CPCAP_REG_CCI */
+ if (ddata->vendor == CPCAP_VENDOR_TI)
+ ccd->integrator = sign_extend32(buf[6], 13);
+ else
+ ccd->integrator = (s16)buf[6];
+
return cpcap_battery_cc_to_uah(ddata,
ccd->sample,
ccd->accumulator,
@@ -336,31 +340,28 @@ cpcap_battery_read_accumulated(struct cpcap_battery_ddata *ddata,
static int cpcap_battery_cc_get_avg_current(struct cpcap_battery_ddata *ddata)
{
int value, acc, error;
- s32 sample = 1;
+ s32 sample;
s16 offset;
- if (ddata->vendor == CPCAP_VENDOR_ST)
- sample = 4;
-
/* Coulomb counter integrator */
error = regmap_read(ddata->reg, CPCAP_REG_CCI, &value);
if (error)
return error;
- if ((ddata->vendor == CPCAP_VENDOR_TI) && (value > 0x2000))
- value = value | 0xc000;
-
- acc = (s16)value;
+ if (ddata->vendor == CPCAP_VENDOR_TI) {
+ acc = sign_extend32(value, 13);
+ sample = 1;
+ } else {
+ acc = (s16)value;
+ sample = 4;
+ }
- /* Coulomb counter sample time */
+ /* Coulomb counter calibration offset */
error = regmap_read(ddata->reg, CPCAP_REG_CCM, &value);
if (error)
return error;
- if (value < 0x200)
- offset = value;
- else
- offset = value | 0xfc00;
+ offset = sign_extend32(value, 9);
return cpcap_battery_cc_to_ua(ddata, sample, acc, offset);
}
@@ -369,8 +370,8 @@ static bool cpcap_battery_full(struct cpcap_battery_ddata *ddata)
{
struct cpcap_battery_state_data *state = cpcap_battery_latest(ddata);
- /* Basically anything that measures above 4347000 is full */
- if (state->voltage >= (ddata->config.info.voltage_max_design - 4000))
+ if (state->voltage >=
+ (ddata->config.bat.constant_charge_voltage_max_uv - 18000))
return true;
return false;
@@ -417,6 +418,7 @@ static enum power_supply_property cpcap_battery_props[] = {
POWER_SUPPLY_PROP_VOLTAGE_NOW,
POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN,
POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE,
POWER_SUPPLY_PROP_CURRENT_AVG,
POWER_SUPPLY_PROP_CURRENT_NOW,
POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
@@ -475,6 +477,9 @@ static int cpcap_battery_get_property(struct power_supply *psy,
case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
val->intval = ddata->config.info.voltage_min_design;
break;
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
+ val->intval = ddata->config.bat.constant_charge_voltage_max_uv;
+ break;
case POWER_SUPPLY_PROP_CURRENT_AVG:
sample = latest->cc.sample - previous->cc.sample;
if (!sample) {
@@ -540,6 +545,69 @@ static int cpcap_battery_get_property(struct power_supply *psy,
return 0;
}
+static int cpcap_battery_update_charger(struct cpcap_battery_ddata *ddata,
+ int const_charge_voltage)
+{
+ union power_supply_propval prop;
+ union power_supply_propval val;
+ struct power_supply *charger;
+ int error;
+
+ charger = power_supply_get_by_name("usb");
+ if (!charger)
+ return -ENODEV;
+
+ error = power_supply_get_property(charger,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE,
+ &prop);
+ if (error)
+ return error;
+
+ /* Allow charger const voltage lower than battery const voltage */
+ if (const_charge_voltage > prop.intval)
+ return 0;
+
+ val.intval = const_charge_voltage;
+
+ return power_supply_set_property(charger,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE,
+ &val);
+}
+
+static int cpcap_battery_set_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ const union power_supply_propval *val)
+{
+ struct cpcap_battery_ddata *ddata = power_supply_get_drvdata(psy);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
+ if (val->intval < ddata->config.info.voltage_min_design)
+ return -EINVAL;
+ if (val->intval > ddata->config.info.voltage_max_design)
+ return -EINVAL;
+
+ ddata->config.bat.constant_charge_voltage_max_uv = val->intval;
+
+ return cpcap_battery_update_charger(ddata, val->intval);
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int cpcap_battery_property_is_writeable(struct power_supply *psy,
+ enum power_supply_property psp)
+{
+ switch (psp) {
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
static irqreturn_t cpcap_battery_irq_thread(int irq, void *data)
{
struct cpcap_battery_ddata *ddata = data;
@@ -560,14 +628,19 @@ static irqreturn_t cpcap_battery_irq_thread(int irq, void *data)
latest = cpcap_battery_latest(ddata);
switch (d->action) {
+ case CPCAP_BATTERY_IRQ_ACTION_CC_CAL_DONE:
+ dev_info(ddata->dev, "Coulomb counter calibration done\n");
+ break;
case CPCAP_BATTERY_IRQ_ACTION_BATTERY_LOW:
if (latest->current_ua >= 0)
- dev_warn(ddata->dev, "Battery low at 3.3V!\n");
+ dev_warn(ddata->dev, "Battery low at %imV!\n",
+ latest->voltage / 1000);
break;
case CPCAP_BATTERY_IRQ_ACTION_POWEROFF:
- if (latest->current_ua >= 0) {
+ if (latest->current_ua >= 0 && latest->voltage <= 3200000) {
dev_emerg(ddata->dev,
- "Battery empty at 3.1V, powering off\n");
+ "Battery empty at %imV, powering off\n",
+ latest->voltage / 1000);
orderly_poweroff(true);
}
break;
@@ -609,7 +682,9 @@ static int cpcap_battery_init_irq(struct platform_device *pdev,
d->name = name;
d->irq = irq;
- if (!strncmp(name, "lowbph", 6))
+ if (!strncmp(name, "cccal", 5))
+ d->action = CPCAP_BATTERY_IRQ_ACTION_CC_CAL_DONE;
+ else if (!strncmp(name, "lowbph", 6))
d->action = CPCAP_BATTERY_IRQ_ACTION_BATTERY_LOW;
else if (!strncmp(name, "lowbpl", 6))
d->action = CPCAP_BATTERY_IRQ_ACTION_POWEROFF;
@@ -635,6 +710,9 @@ static int cpcap_battery_init_interrupts(struct platform_device *pdev,
return error;
}
+ /* Enable calibration interrupt if already available in dts */
+ cpcap_battery_init_irq(pdev, ddata, "cccal");
+
/* Enable low battery interrupts for 3.3V high and 3.1V low */
error = regmap_update_bits(ddata->reg, CPCAP_REG_BPEOL,
0xffff,
@@ -676,6 +754,60 @@ out_err:
return error;
}
+/* Calibrate coulomb counter */
+static int cpcap_battery_calibrate(struct cpcap_battery_ddata *ddata)
+{
+ int error, ccc1, value;
+ unsigned long timeout;
+
+ error = regmap_read(ddata->reg, CPCAP_REG_CCC1, &ccc1);
+ if (error)
+ return error;
+
+ timeout = jiffies + msecs_to_jiffies(6000);
+
+ /* Start calibration */
+ error = regmap_update_bits(ddata->reg, CPCAP_REG_CCC1,
+ 0xffff,
+ CPCAP_REG_CCC1_CAL_EN);
+ if (error)
+ goto restore;
+
+ while (time_before(jiffies, timeout)) {
+ error = regmap_read(ddata->reg, CPCAP_REG_CCC1, &value);
+ if (error)
+ goto restore;
+
+ if (!(value & CPCAP_REG_CCC1_CAL_EN))
+ break;
+
+ error = regmap_read(ddata->reg, CPCAP_REG_CCM, &value);
+ if (error)
+ goto restore;
+
+ msleep(300);
+ }
+
+ /* Read calibration offset from CCM */
+ error = regmap_read(ddata->reg, CPCAP_REG_CCM, &value);
+ if (error)
+ goto restore;
+
+ dev_info(ddata->dev, "calibration done: 0x%04x\n", value);
+
+restore:
+ if (error)
+ dev_err(ddata->dev, "%s: error %i\n", __func__, error);
+
+ error = regmap_update_bits(ddata->reg, CPCAP_REG_CCC1,
+ 0xffff, ccc1);
+ if (error)
+ dev_err(ddata->dev, "%s: restore error %i\n",
+ __func__, error);
+
+ return error;
+}
+
/*
* Based on the values from Motorola mapphone Linux kernel. In the
* the Motorola mapphone Linux kernel tree the value for pm_cd_factor
@@ -687,12 +819,12 @@ out_err:
* at 3078000. The device will die around 2743000.
*/
static const struct cpcap_battery_config cpcap_battery_default_data = {
- .ccm = 0x3ff,
.cd_factor = 0x3cc,
.info.technology = POWER_SUPPLY_TECHNOLOGY_LION,
.info.voltage_max_design = 4351000,
.info.voltage_min_design = 3100000,
.info.charge_full_design = 1740000,
+ .bat.constant_charge_voltage_max_uv = 4200000,
};
#ifdef CONFIG_OF
@@ -741,12 +873,19 @@ static int cpcap_battery_probe(struct platform_device *pdev)
if (error)
return error;
- platform_set_drvdata(pdev, ddata);
+ switch (ddata->vendor) {
+ case CPCAP_VENDOR_ST:
+ ddata->cc_lsb = 95374; /* μAms per LSB */
+ break;
+ case CPCAP_VENDOR_TI:
+ ddata->cc_lsb = 91501; /* μAms per LSB */
+ break;
+ default:
+ return -EINVAL;
+ }
+ ddata->cc_lsb = (ddata->cc_lsb * ddata->config.cd_factor) / 1000;
- error = regmap_update_bits(ddata->reg, CPCAP_REG_CCM,
- 0xffff, ddata->config.ccm);
- if (error)
- return error;
+ platform_set_drvdata(pdev, ddata);
error = cpcap_battery_init_interrupts(pdev, ddata);
if (error)
@@ -760,11 +899,13 @@ static int cpcap_battery_probe(struct platform_device *pdev)
if (!psy_desc)
return -ENOMEM;
- psy_desc->name = "battery",
- psy_desc->type = POWER_SUPPLY_TYPE_BATTERY,
- psy_desc->properties = cpcap_battery_props,
- psy_desc->num_properties = ARRAY_SIZE(cpcap_battery_props),
- psy_desc->get_property = cpcap_battery_get_property,
+ psy_desc->name = "battery";
+ psy_desc->type = POWER_SUPPLY_TYPE_BATTERY;
+ psy_desc->properties = cpcap_battery_props;
+ psy_desc->num_properties = ARRAY_SIZE(cpcap_battery_props);
+ psy_desc->get_property = cpcap_battery_get_property;
+ psy_desc->set_property = cpcap_battery_set_property;
+ psy_desc->property_is_writeable = cpcap_battery_property_is_writeable;
psy_cfg.of_node = pdev->dev.of_node;
psy_cfg.drv_data = ddata;
@@ -779,6 +920,10 @@ static int cpcap_battery_probe(struct platform_device *pdev)
atomic_set(&ddata->active, 1);
+ error = cpcap_battery_calibrate(ddata);
+ if (error)
+ return error;
+
return 0;
}
diff --git a/drivers/power/supply/cpcap-charger.c b/drivers/power/supply/cpcap-charger.c
index 74258c7fe17d..c0d452e3dc8b 100644
--- a/drivers/power/supply/cpcap-charger.c
+++ b/drivers/power/supply/cpcap-charger.c
@@ -120,6 +120,13 @@ enum {
CPCAP_CHARGER_IIO_NR,
};
+enum {
+ CPCAP_CHARGER_DISCONNECTED,
+ CPCAP_CHARGER_DETECTING,
+ CPCAP_CHARGER_CHARGING,
+ CPCAP_CHARGER_DONE,
+};
+
struct cpcap_charger_ddata {
struct device *dev;
struct regmap *reg;
@@ -138,6 +145,8 @@ struct cpcap_charger_ddata {
atomic_t active;
int status;
+ int state;
+ int voltage;
};
struct cpcap_interrupt_desc {
@@ -153,6 +162,7 @@ struct cpcap_charger_ints_state {
bool chrg_se1b;
bool rvrs_mode;
+ bool chrgcurr2;
bool chrgcurr1;
bool vbusvld;
@@ -162,24 +172,26 @@ struct cpcap_charger_ints_state {
static enum power_supply_property cpcap_charger_props[] = {
POWER_SUPPLY_PROP_STATUS,
POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE,
POWER_SUPPLY_PROP_VOLTAGE_NOW,
POWER_SUPPLY_PROP_CURRENT_NOW,
};
+/* No battery always shows temperature of -40000 */
static bool cpcap_charger_battery_found(struct cpcap_charger_ddata *ddata)
{
struct iio_channel *channel;
- int error, value;
+ int error, temperature;
channel = ddata->channels[CPCAP_CHARGER_IIO_BATTDET];
- error = iio_read_channel_raw(channel, &value);
+ error = iio_read_channel_processed(channel, &temperature);
if (error < 0) {
dev_warn(ddata->dev, "%s failed: %i\n", __func__, error);
return false;
}
- return value == 1;
+ return temperature > -20000 && temperature < 60000;
}
static int cpcap_charger_get_charge_voltage(struct cpcap_charger_ddata *ddata)
@@ -224,6 +236,9 @@ static int cpcap_charger_get_property(struct power_supply *psy,
case POWER_SUPPLY_PROP_STATUS:
val->intval = ddata->status;
break;
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
+ val->intval = ddata->voltage;
+ break;
case POWER_SUPPLY_PROP_VOLTAGE_NOW:
if (ddata->status == POWER_SUPPLY_STATUS_CHARGING)
val->intval = cpcap_charger_get_charge_voltage(ddata) *
@@ -248,6 +263,83 @@ static int cpcap_charger_get_property(struct power_supply *psy,
return 0;
}
+static int cpcap_charger_match_voltage(int voltage)
+{
+ switch (voltage) {
+ case 0 ... 4100000 - 1: return 3800000;
+ case 4100000 ... 4120000 - 1: return 4100000;
+ case 4120000 ... 4150000 - 1: return 4120000;
+ case 4150000 ... 4170000 - 1: return 4150000;
+ case 4170000 ... 4200000 - 1: return 4170000;
+ case 4200000 ... 4230000 - 1: return 4200000;
+ case 4230000 ... 4250000 - 1: return 4230000;
+ case 4250000 ... 4270000 - 1: return 4250000;
+ case 4270000 ... 4300000 - 1: return 4270000;
+ case 4300000 ... 4330000 - 1: return 4300000;
+ case 4330000 ... 4350000 - 1: return 4330000;
+ case 4350000 ... 4380000 - 1: return 4350000;
+ case 4380000 ... 4400000 - 1: return 4380000;
+ case 4400000 ... 4420000 - 1: return 4400000;
+ case 4420000 ... 4440000 - 1: return 4420000;
+ case 4440000: return 4440000;
+ default: return 0;
+ }
+}
+
+static int
+cpcap_charger_get_bat_const_charge_voltage(struct cpcap_charger_ddata *ddata)
+{
+ union power_supply_propval prop;
+ struct power_supply *battery;
+ int voltage = ddata->voltage;
+ int error;
+
+ battery = power_supply_get_by_name("battery");
+ if (battery) {
+ error = power_supply_get_property(battery,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE,
+ &prop);
+ if (!error)
+ voltage = prop.intval;
+ }
+
+ return voltage;
+}
+
+static int cpcap_charger_set_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ const union power_supply_propval *val)
+{
+ struct cpcap_charger_ddata *ddata = dev_get_drvdata(psy->dev.parent);
+ int voltage, batvolt;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
+ voltage = cpcap_charger_match_voltage(val->intval);
+ batvolt = cpcap_charger_get_bat_const_charge_voltage(ddata);
+ if (voltage > batvolt)
+ voltage = batvolt;
+ ddata->voltage = voltage;
+ schedule_delayed_work(&ddata->detect_work, 0);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int cpcap_charger_property_is_writeable(struct power_supply *psy,
+ enum power_supply_property psp)
+{
+ switch (psp) {
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
static void cpcap_charger_set_cable_path(struct cpcap_charger_ddata *ddata,
bool enabled)
{
@@ -422,6 +514,7 @@ static int cpcap_charger_get_ints_state(struct cpcap_charger_ddata *ddata,
s->chrg_se1b = val & BIT(13);
s->rvrs_mode = val & BIT(6);
+ s->chrgcurr2 = val & BIT(5);
s->chrgcurr1 = val & BIT(4);
s->vbusvld = val & BIT(3);
@@ -434,6 +527,79 @@ static int cpcap_charger_get_ints_state(struct cpcap_charger_ddata *ddata,
return 0;
}
+static void cpcap_charger_update_state(struct cpcap_charger_ddata *ddata,
+ int state)
+{
+ const char *status;
+
+ if (state > CPCAP_CHARGER_DONE) {
+ dev_warn(ddata->dev, "unknown state: %i\n", state);
+
+ return;
+ }
+
+ ddata->state = state;
+
+ switch (state) {
+ case CPCAP_CHARGER_DISCONNECTED:
+ status = "DISCONNECTED";
+ break;
+ case CPCAP_CHARGER_DETECTING:
+ status = "DETECTING";
+ break;
+ case CPCAP_CHARGER_CHARGING:
+ status = "CHARGING";
+ break;
+ case CPCAP_CHARGER_DONE:
+ status = "DONE";
+ break;
+ default:
+ return;
+ }
+
+ dev_dbg(ddata->dev, "state: %s\n", status);
+}
+
+static int cpcap_charger_voltage_to_regval(int voltage)
+{
+ int offset;
+
+ switch (voltage) {
+ case 0 ... 4100000 - 1:
+ return 0;
+ case 4100000 ... 4200000 - 1:
+ offset = 1;
+ break;
+ case 4200000 ... 4300000 - 1:
+ offset = 0;
+ break;
+ case 4300000 ... 4380000 - 1:
+ offset = -1;
+ break;
+ case 4380000 ... 4440000:
+ offset = -2;
+ break;
+ default:
+ return 0;
+ }
+
+ return ((voltage - 4100000) / 20000) + offset;
+}
+
+static void cpcap_charger_disconnect(struct cpcap_charger_ddata *ddata,
+ int state, unsigned long delay)
+{
+ int error;
+
+ error = cpcap_charger_set_state(ddata, 0, 0, 0);
+ if (error)
+ return;
+
+ cpcap_charger_update_state(ddata, state);
+ power_supply_changed(ddata->usb);
+ schedule_delayed_work(&ddata->detect_work, delay);
+}
+
static void cpcap_usb_detect(struct work_struct *work)
{
struct cpcap_charger_ddata *ddata;
@@ -447,24 +613,67 @@ static void cpcap_usb_detect(struct work_struct *work)
if (error)
return;
+ /* Just init the state if a charger is connected with no chrg_det set */
+ if (!s.chrg_det && s.chrgcurr1 && s.vbusvld) {
+ cpcap_charger_update_state(ddata, CPCAP_CHARGER_DETECTING);
+
+ return;
+ }
+
+ /*
+ * If battery voltage is higher than charge voltage, it may have been
+ * charged to 4.35V by Android. Try again in 10 minutes.
+ */
+ if (cpcap_charger_get_charge_voltage(ddata) > ddata->voltage) {
+ cpcap_charger_disconnect(ddata, CPCAP_CHARGER_DETECTING,
+ HZ * 60 * 10);
+
+ return;
+ }
+
+ /* Throttle chrgcurr2 interrupt for charger done and retry */
+ switch (ddata->state) {
+ case CPCAP_CHARGER_CHARGING:
+ if (s.chrgcurr2)
+ break;
+ if (s.chrgcurr1 && s.vbusvld) {
+ cpcap_charger_disconnect(ddata, CPCAP_CHARGER_DONE,
+ HZ * 5);
+ return;
+ }
+ break;
+ case CPCAP_CHARGER_DONE:
+ if (!s.chrgcurr2)
+ break;
+ cpcap_charger_disconnect(ddata, CPCAP_CHARGER_DETECTING,
+ HZ * 5);
+ return;
+ default:
+ break;
+ }
+
if (!ddata->feeding_vbus && cpcap_charger_vbus_valid(ddata) &&
s.chrgcurr1) {
int max_current;
+ int vchrg;
if (cpcap_charger_battery_found(ddata))
max_current = CPCAP_REG_CRM_ICHRG_1A596;
else
max_current = CPCAP_REG_CRM_ICHRG_0A532;
+ vchrg = cpcap_charger_voltage_to_regval(ddata->voltage);
error = cpcap_charger_set_state(ddata,
- CPCAP_REG_CRM_VCHRG_4V35,
+ CPCAP_REG_CRM_VCHRG(vchrg),
max_current, 0);
if (error)
goto out_err;
+ cpcap_charger_update_state(ddata, CPCAP_CHARGER_CHARGING);
} else {
error = cpcap_charger_set_state(ddata, 0, 0, 0);
if (error)
goto out_err;
+ cpcap_charger_update_state(ddata, CPCAP_CHARGER_DISCONNECTED);
}
power_supply_changed(ddata->usb);
@@ -524,7 +733,7 @@ static const char * const cpcap_charger_irqs[] = {
"chrg_det", "rvrs_chrg",
/* REG_INT1 */
- "chrg_se1b", "se0conn", "rvrs_mode", "chrgcurr1", "vbusvld",
+ "chrg_se1b", "se0conn", "rvrs_mode", "chrgcurr2", "chrgcurr1", "vbusvld",
/* REG_INT_3 */
"battdetb",
@@ -596,6 +805,8 @@ static const struct power_supply_desc cpcap_charger_usb_desc = {
.properties = cpcap_charger_props,
.num_properties = ARRAY_SIZE(cpcap_charger_props),
.get_property = cpcap_charger_get_property,
+ .set_property = cpcap_charger_set_property,
+ .property_is_writeable = cpcap_charger_property_is_writeable,
};
#ifdef CONFIG_OF
@@ -625,6 +836,7 @@ static int cpcap_charger_probe(struct platform_device *pdev)
return -ENOMEM;
ddata->dev = &pdev->dev;
+ ddata->voltage = 4200000;
ddata->reg = dev_get_regmap(ddata->dev->parent, NULL);
if (!ddata->reg)
diff --git a/drivers/power/supply/test_power.c b/drivers/power/supply/test_power.c
index c3cad2b6daba..65c23ef6408d 100644
--- a/drivers/power/supply/test_power.c
+++ b/drivers/power/supply/test_power.c
@@ -33,6 +33,8 @@ static int battery_present = 1; /* true */
static int battery_technology = POWER_SUPPLY_TECHNOLOGY_LION;
static int battery_capacity = 50;
static int battery_voltage = 3300;
+static int battery_charge_counter = -1000;
+static int battery_current = 1600;
static bool module_initialized;
@@ -100,6 +102,9 @@ static int test_power_get_battery_property(struct power_supply *psy,
case POWER_SUPPLY_PROP_CHARGE_NOW:
val->intval = battery_capacity;
break;
+ case POWER_SUPPLY_PROP_CHARGE_COUNTER:
+ val->intval = battery_charge_counter;
+ break;
case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
case POWER_SUPPLY_PROP_CHARGE_FULL:
val->intval = 100;
@@ -114,6 +119,10 @@ static int test_power_get_battery_property(struct power_supply *psy,
case POWER_SUPPLY_PROP_VOLTAGE_NOW:
val->intval = battery_voltage;
break;
+ case POWER_SUPPLY_PROP_CURRENT_AVG:
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ val->intval = battery_current;
+ break;
default:
pr_info("%s: some properties deliberately report errors.\n",
__func__);
@@ -135,6 +144,7 @@ static enum power_supply_property test_power_battery_props[] = {
POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
POWER_SUPPLY_PROP_CHARGE_FULL,
POWER_SUPPLY_PROP_CHARGE_NOW,
+ POWER_SUPPLY_PROP_CHARGE_COUNTER,
POWER_SUPPLY_PROP_CAPACITY,
POWER_SUPPLY_PROP_CAPACITY_LEVEL,
POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG,
@@ -144,6 +154,8 @@ static enum power_supply_property test_power_battery_props[] = {
POWER_SUPPLY_PROP_SERIAL_NUMBER,
POWER_SUPPLY_PROP_TEMP,
POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_CURRENT_AVG,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
};
static char *test_power_ac_supplied_to[] = {
@@ -447,6 +459,36 @@ static int param_set_battery_voltage(const char *key,
#define param_get_battery_voltage param_get_int
+static int param_set_battery_charge_counter(const char *key,
+ const struct kernel_param *kp)
+{
+ int tmp;
+
+ if (1 != sscanf(key, "%d", &tmp))
+ return -EINVAL;
+
+ battery_charge_counter = tmp;
+ signal_power_supply_changed(test_power_supplies[TEST_BATTERY]);
+ return 0;
+}
+
+#define param_get_battery_charge_counter param_get_int
+
+static int param_set_battery_current(const char *key,
+ const struct kernel_param *kp)
+{
+ int tmp;
+
+ if (1 != sscanf(key, "%d", &tmp))
+ return -EINVAL;
+
+ battery_current = tmp;
+ signal_power_supply_changed(test_power_supplies[TEST_BATTERY]);
+ return 0;
+}
+
+#define param_get_battery_current param_get_int
+
static const struct kernel_param_ops param_ops_ac_online = {
.set = param_set_ac_online,
.get = param_get_ac_online,
@@ -487,6 +529,16 @@ static const struct kernel_param_ops param_ops_battery_voltage = {
.get = param_get_battery_voltage,
};
+static const struct kernel_param_ops param_ops_battery_charge_counter = {
+ .set = param_set_battery_charge_counter,
+ .get = param_get_battery_charge_counter,
+};
+
+static const struct kernel_param_ops param_ops_battery_current = {
+ .set = param_set_battery_current,
+ .get = param_get_battery_current,
+};
+
#define param_check_ac_online(name, p) __param_check(name, p, void);
#define param_check_usb_online(name, p) __param_check(name, p, void);
#define param_check_battery_status(name, p) __param_check(name, p, void);
@@ -495,6 +547,8 @@ static const struct kernel_param_ops param_ops_battery_voltage = {
#define param_check_battery_health(name, p) __param_check(name, p, void);
#define param_check_battery_capacity(name, p) __param_check(name, p, void);
#define param_check_battery_voltage(name, p) __param_check(name, p, void);
+#define param_check_battery_charge_counter(name, p) __param_check(name, p, void);
+#define param_check_battery_current(name, p) __param_check(name, p, void);
module_param(ac_online, ac_online, 0644);
@@ -525,6 +579,13 @@ MODULE_PARM_DESC(battery_capacity, "battery capacity (percentage)");
module_param(battery_voltage, battery_voltage, 0644);
MODULE_PARM_DESC(battery_voltage, "battery voltage (millivolts)");
+module_param(battery_charge_counter, battery_charge_counter, 0644);
+MODULE_PARM_DESC(battery_charge_counter,
+ "battery charge counter (microampere-hours)");
+
+module_param(battery_current, battery_current, 0644);
+MODULE_PARM_DESC(battery_current, "battery current (milliampere)");
+
MODULE_DESCRIPTION("Power supply driver for testing");
MODULE_AUTHOR("Anton Vorontsov <cbouatmailru@gmail.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/powercap/intel_rapl_common.c b/drivers/powercap/intel_rapl_common.c
index 94ddd7d659c8..a67701ed93e8 100644
--- a/drivers/powercap/intel_rapl_common.c
+++ b/drivers/powercap/intel_rapl_common.c
@@ -978,6 +978,8 @@ static const struct x86_cpu_id rapl_ids[] __initconst = {
INTEL_CPU_FAM6(ICELAKE_NNPI, rapl_defaults_core),
INTEL_CPU_FAM6(ICELAKE_X, rapl_defaults_hsw_server),
INTEL_CPU_FAM6(ICELAKE_D, rapl_defaults_hsw_server),
+ INTEL_CPU_FAM6(COMETLAKE_L, rapl_defaults_core),
+ INTEL_CPU_FAM6(COMETLAKE, rapl_defaults_core),
INTEL_CPU_FAM6(ATOM_SILVERMONT, rapl_defaults_byt),
INTEL_CPU_FAM6(ATOM_AIRMONT, rapl_defaults_cht),
diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig
index e3a2518503ed..bd21655c37a6 100644
--- a/drivers/pwm/Kconfig
+++ b/drivers/pwm/Kconfig
@@ -508,15 +508,6 @@ config PWM_TIEHRPWM
To compile this driver as a module, choose M here: the module
will be called pwm-tiehrpwm.
-config PWM_TIPWMSS
- bool
- default y if (ARCH_OMAP2PLUS) && (PWM_TIECAP || PWM_TIEHRPWM)
- help
- PWM Subsystem driver support for AM33xx SOC.
-
- PWM submodules require PWM config space access from submodule
- drivers and require common parent driver support.
-
config PWM_TWL
tristate "TWL4030/6030 PWM support"
depends on TWL4030_CORE
diff --git a/drivers/pwm/Makefile b/drivers/pwm/Makefile
index 26326adf71d7..9a475073dafc 100644
--- a/drivers/pwm/Makefile
+++ b/drivers/pwm/Makefile
@@ -50,7 +50,6 @@ obj-$(CONFIG_PWM_SUN4I) += pwm-sun4i.o
obj-$(CONFIG_PWM_TEGRA) += pwm-tegra.o
obj-$(CONFIG_PWM_TIECAP) += pwm-tiecap.o
obj-$(CONFIG_PWM_TIEHRPWM) += pwm-tiehrpwm.o
-obj-$(CONFIG_PWM_TIPWMSS) += pwm-tipwmss.o
obj-$(CONFIG_PWM_TWL) += pwm-twl.o
obj-$(CONFIG_PWM_TWL_LED) += pwm-twl-led.o
obj-$(CONFIG_PWM_VT8500) += pwm-vt8500.o
diff --git a/drivers/rapidio/devices/tsi721.c b/drivers/rapidio/devices/tsi721.c
index 125a173bed45..4dd31dd9feea 100644
--- a/drivers/rapidio/devices/tsi721.c
+++ b/drivers/rapidio/devices/tsi721.c
@@ -2755,7 +2755,7 @@ static int tsi721_probe(struct pci_dev *pdev,
{
int i;
- for (i = 0; i <= PCI_STD_RESOURCE_END; i++) {
+ for (i = 0; i < PCI_STD_NUM_BARS; i++) {
tsi_debug(INIT, &pdev->dev, "res%d %pR",
i, &pdev->resource[i]);
}
diff --git a/drivers/remoteproc/qcom_q6v5_mss.c b/drivers/remoteproc/qcom_q6v5_mss.c
index de919f2e8b94..471128a2e723 100644
--- a/drivers/remoteproc/qcom_q6v5_mss.c
+++ b/drivers/remoteproc/qcom_q6v5_mss.c
@@ -61,6 +61,7 @@
#define QDSP6SS_GFMUX_CTL_REG 0x020
#define QDSP6SS_PWR_CTL_REG 0x030
#define QDSP6SS_MEM_PWR_CTL 0x0B0
+#define QDSP6V6SS_MEM_PWR_CTL 0x034
#define QDSP6SS_STRAP_ACC 0x110
/* AXI Halt Register Offsets */
@@ -196,6 +197,7 @@ enum {
MSS_MSM8916,
MSS_MSM8974,
MSS_MSM8996,
+ MSS_MSM8998,
MSS_SDM845,
};
@@ -498,7 +500,10 @@ static int q6v5proc_reset(struct q6v5 *qproc)
}
goto pbl_wait;
- } else if (qproc->version == MSS_MSM8996) {
+ } else if (qproc->version == MSS_MSM8996 ||
+ qproc->version == MSS_MSM8998) {
+ int mem_pwr_ctl;
+
/* Override the ACC value if required */
writel(QDSP6SS_ACC_OVERRIDE_VAL,
qproc->reg_base + QDSP6SS_STRAP_ACC);
@@ -543,17 +548,24 @@ static int q6v5proc_reset(struct q6v5 *qproc)
writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
/* Turn on L1, L2, ETB and JU memories 1 at a time */
- val = readl(qproc->reg_base + QDSP6SS_MEM_PWR_CTL);
- for (i = 19; i >= 0; i--) {
+ if (qproc->version == MSS_MSM8996) {
+ mem_pwr_ctl = QDSP6SS_MEM_PWR_CTL;
+ i = 19;
+ } else {
+ /* MSS_MSM8998 */
+ mem_pwr_ctl = QDSP6V6SS_MEM_PWR_CTL;
+ i = 28;
+ }
+ val = readl(qproc->reg_base + mem_pwr_ctl);
+ for (; i >= 0; i--) {
val |= BIT(i);
- writel(val, qproc->reg_base +
- QDSP6SS_MEM_PWR_CTL);
+ writel(val, qproc->reg_base + mem_pwr_ctl);
/*
* Read back value to ensure the write is done then
* wait for 1us for both memory peripheral and data
* array to turn on.
*/
- val |= readl(qproc->reg_base + QDSP6SS_MEM_PWR_CTL);
+ val |= readl(qproc->reg_base + mem_pwr_ctl);
udelay(1);
}
/* Remove word line clamp */
@@ -1571,6 +1583,33 @@ static const struct rproc_hexagon_res sdm845_mss = {
.version = MSS_SDM845,
};
+static const struct rproc_hexagon_res msm8998_mss = {
+ .hexagon_mba_image = "mba.mbn",
+ .proxy_clk_names = (char*[]){
+ "xo",
+ "qdss",
+ "mem",
+ NULL
+ },
+ .active_clk_names = (char*[]){
+ "iface",
+ "bus",
+ "mem",
+ "gpll0_mss",
+ "mnoc_axi",
+ "snoc_axi",
+ NULL
+ },
+ .proxy_pd_names = (char*[]){
+ "cx",
+ "mx",
+ NULL
+ },
+ .need_mem_protection = true,
+ .has_alt_reset = false,
+ .version = MSS_MSM8998,
+};
+
static const struct rproc_hexagon_res msm8996_mss = {
.hexagon_mba_image = "mba.mbn",
.proxy_supply = (struct qcom_mss_reg_res[]) {
@@ -1677,6 +1716,7 @@ static const struct of_device_id q6v5_of_match[] = {
{ .compatible = "qcom,msm8916-mss-pil", .data = &msm8916_mss},
{ .compatible = "qcom,msm8974-mss-pil", .data = &msm8974_mss},
{ .compatible = "qcom,msm8996-mss-pil", .data = &msm8996_mss},
+ { .compatible = "qcom,msm8998-mss-pil", .data = &msm8998_mss},
{ .compatible = "qcom,sdm845-mss-pil", .data = &sdm845_mss},
{ },
};
diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c
index 3c5fbbbfb0f1..307df98347ba 100644
--- a/drivers/remoteproc/remoteproc_core.c
+++ b/drivers/remoteproc/remoteproc_core.c
@@ -44,8 +44,6 @@
static DEFINE_MUTEX(rproc_list_mutex);
static LIST_HEAD(rproc_list);
-typedef int (*rproc_handle_resources_t)(struct rproc *rproc,
- struct resource_table *table, int len);
typedef int (*rproc_handle_resource_t)(struct rproc *rproc,
void *, int offset, int avail);
@@ -336,7 +334,8 @@ int rproc_alloc_vring(struct rproc_vdev *rvdev, int i)
return -ENOMEM;
} else {
/* Register carveout in in list */
- mem = rproc_mem_entry_init(dev, 0, 0, size, rsc->vring[i].da,
+ mem = rproc_mem_entry_init(dev, NULL, 0,
+ size, rsc->vring[i].da,
rproc_alloc_carveout,
rproc_release_carveout,
"vdev%dvring%d",
@@ -400,7 +399,7 @@ rproc_parse_vring(struct rproc_vdev *rvdev, struct fw_rsc_vdev *rsc, int i)
void rproc_free_vring(struct rproc_vring *rvring)
{
struct rproc *rproc = rvring->rvdev->rproc;
- int idx = rvring->rvdev->vring - rvring;
+ int idx = rvring - rvring->rvdev->vring;
struct fw_rsc_vdev *rsc;
idr_remove(&rproc->notifyids, rvring->notifyid);
@@ -913,7 +912,7 @@ static int rproc_handle_carveout(struct rproc *rproc,
}
/* Register carveout in in list */
- carveout = rproc_mem_entry_init(dev, 0, 0, rsc->len, rsc->da,
+ carveout = rproc_mem_entry_init(dev, NULL, 0, rsc->len, rsc->da,
rproc_alloc_carveout,
rproc_release_carveout, rsc->name);
if (!carveout) {
diff --git a/drivers/remoteproc/remoteproc_debugfs.c b/drivers/remoteproc/remoteproc_debugfs.c
index 8cd4a0a3892b..dd93cf04e17f 100644
--- a/drivers/remoteproc/remoteproc_debugfs.c
+++ b/drivers/remoteproc/remoteproc_debugfs.c
@@ -333,9 +333,6 @@ struct dentry *rproc_create_trace_file(const char *name, struct rproc *rproc,
void rproc_delete_debug_dir(struct rproc *rproc)
{
- if (!rproc->dbg_dir)
- return;
-
debugfs_remove_recursive(rproc->dbg_dir);
}
diff --git a/drivers/remoteproc/stm32_rproc.c b/drivers/remoteproc/stm32_rproc.c
index 2cf4b2992bfc..a18f88044111 100644
--- a/drivers/remoteproc/stm32_rproc.c
+++ b/drivers/remoteproc/stm32_rproc.c
@@ -15,9 +15,11 @@
#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/of_reserved_mem.h>
+#include <linux/pm_wakeirq.h>
#include <linux/regmap.h>
#include <linux/remoteproc.h>
#include <linux/reset.h>
+#include <linux/workqueue.h>
#include "remoteproc_internal.h"
@@ -31,7 +33,9 @@
#define STM32_SMC_REG_WRITE 0x1
#define STM32_MBX_VQ0 "vq0"
+#define STM32_MBX_VQ0_ID 0
#define STM32_MBX_VQ1 "vq1"
+#define STM32_MBX_VQ1_ID 1
#define STM32_MBX_SHUTDOWN "shutdown"
struct stm32_syscon {
@@ -58,6 +62,7 @@ struct stm32_mbox {
const unsigned char name[10];
struct mbox_chan *chan;
struct mbox_client client;
+ struct work_struct vq_work;
int vq_id;
};
@@ -65,9 +70,11 @@ struct stm32_rproc {
struct reset_control *rst;
struct stm32_syscon hold_boot;
struct stm32_syscon pdds;
+ int wdg_irq;
u32 nb_rmems;
struct stm32_rproc_mem *rmems;
struct stm32_mbox mb[MBOX_NB_MBX];
+ struct workqueue_struct *workqueue;
bool secured_soc;
};
@@ -261,13 +268,22 @@ static irqreturn_t stm32_rproc_wdg(int irq, void *data)
return IRQ_HANDLED;
}
+static void stm32_rproc_mb_vq_work(struct work_struct *work)
+{
+ struct stm32_mbox *mb = container_of(work, struct stm32_mbox, vq_work);
+ struct rproc *rproc = dev_get_drvdata(mb->client.dev);
+
+ if (rproc_vq_interrupt(rproc, mb->vq_id) == IRQ_NONE)
+ dev_dbg(&rproc->dev, "no message found in vq%d\n", mb->vq_id);
+}
+
static void stm32_rproc_mb_callback(struct mbox_client *cl, void *data)
{
struct rproc *rproc = dev_get_drvdata(cl->dev);
struct stm32_mbox *mb = container_of(cl, struct stm32_mbox, client);
+ struct stm32_rproc *ddata = rproc->priv;
- if (rproc_vq_interrupt(rproc, mb->vq_id) == IRQ_NONE)
- dev_dbg(&rproc->dev, "no message found in vq%d\n", mb->vq_id);
+ queue_work(ddata->workqueue, &mb->vq_work);
}
static void stm32_rproc_free_mbox(struct rproc *rproc)
@@ -285,7 +301,7 @@ static void stm32_rproc_free_mbox(struct rproc *rproc)
static const struct stm32_mbox stm32_rproc_mbox[MBOX_NB_MBX] = {
{
.name = STM32_MBX_VQ0,
- .vq_id = 0,
+ .vq_id = STM32_MBX_VQ0_ID,
.client = {
.rx_callback = stm32_rproc_mb_callback,
.tx_block = false,
@@ -293,7 +309,7 @@ static const struct stm32_mbox stm32_rproc_mbox[MBOX_NB_MBX] = {
},
{
.name = STM32_MBX_VQ1,
- .vq_id = 1,
+ .vq_id = STM32_MBX_VQ1_ID,
.client = {
.rx_callback = stm32_rproc_mb_callback,
.tx_block = false,
@@ -310,11 +326,12 @@ static const struct stm32_mbox stm32_rproc_mbox[MBOX_NB_MBX] = {
}
};
-static void stm32_rproc_request_mbox(struct rproc *rproc)
+static int stm32_rproc_request_mbox(struct rproc *rproc)
{
struct stm32_rproc *ddata = rproc->priv;
struct device *dev = &rproc->dev;
unsigned int i;
+ int j;
const unsigned char *name;
struct mbox_client *cl;
@@ -329,10 +346,24 @@ static void stm32_rproc_request_mbox(struct rproc *rproc)
ddata->mb[i].chan = mbox_request_channel_byname(cl, name);
if (IS_ERR(ddata->mb[i].chan)) {
+ if (PTR_ERR(ddata->mb[i].chan) == -EPROBE_DEFER)
+ goto err_probe;
dev_warn(dev, "cannot get %s mbox\n", name);
ddata->mb[i].chan = NULL;
}
+ if (ddata->mb[i].vq_id >= 0) {
+ INIT_WORK(&ddata->mb[i].vq_work,
+ stm32_rproc_mb_vq_work);
+ }
}
+
+ return 0;
+
+err_probe:
+ for (j = i - 1; j >= 0; j--)
+ if (ddata->mb[j].chan)
+ mbox_free_channel(ddata->mb[j].chan);
+ return -EPROBE_DEFER;
}
static int stm32_rproc_set_hold_boot(struct rproc *rproc, bool hold)
@@ -528,6 +559,13 @@ static int stm32_rproc_parse_dt(struct platform_device *pdev)
return err;
}
+ ddata->wdg_irq = irq;
+
+ if (of_property_read_bool(np, "wakeup-source")) {
+ device_init_wakeup(dev, true);
+ dev_pm_set_wake_irq(dev, irq);
+ }
+
dev_info(dev, "wdg irq registered\n");
}
@@ -589,14 +627,22 @@ static int stm32_rproc_probe(struct platform_device *pdev)
rproc->has_iommu = false;
ddata = rproc->priv;
+ ddata->workqueue = create_workqueue(dev_name(dev));
+ if (!ddata->workqueue) {
+ dev_err(dev, "cannot create workqueue\n");
+ ret = -ENOMEM;
+ goto free_rproc;
+ }
platform_set_drvdata(pdev, rproc);
ret = stm32_rproc_parse_dt(pdev);
if (ret)
- goto free_rproc;
+ goto free_wkq;
- stm32_rproc_request_mbox(rproc);
+ ret = stm32_rproc_request_mbox(rproc);
+ if (ret)
+ goto free_rproc;
ret = rproc_add(rproc);
if (ret)
@@ -606,7 +652,13 @@ static int stm32_rproc_probe(struct platform_device *pdev)
free_mb:
stm32_rproc_free_mbox(rproc);
+free_wkq:
+ destroy_workqueue(ddata->workqueue);
free_rproc:
+ if (device_may_wakeup(dev)) {
+ dev_pm_clear_wake_irq(dev);
+ device_init_wakeup(dev, false);
+ }
rproc_free(rproc);
return ret;
}
@@ -614,22 +666,56 @@ free_rproc:
static int stm32_rproc_remove(struct platform_device *pdev)
{
struct rproc *rproc = platform_get_drvdata(pdev);
+ struct stm32_rproc *ddata = rproc->priv;
+ struct device *dev = &pdev->dev;
if (atomic_read(&rproc->power) > 0)
rproc_shutdown(rproc);
rproc_del(rproc);
stm32_rproc_free_mbox(rproc);
+ destroy_workqueue(ddata->workqueue);
+
+ if (device_may_wakeup(dev)) {
+ dev_pm_clear_wake_irq(dev);
+ device_init_wakeup(dev, false);
+ }
rproc_free(rproc);
return 0;
}
+static int __maybe_unused stm32_rproc_suspend(struct device *dev)
+{
+ struct rproc *rproc = dev_get_drvdata(dev);
+ struct stm32_rproc *ddata = rproc->priv;
+
+ if (device_may_wakeup(dev))
+ return enable_irq_wake(ddata->wdg_irq);
+
+ return 0;
+}
+
+static int __maybe_unused stm32_rproc_resume(struct device *dev)
+{
+ struct rproc *rproc = dev_get_drvdata(dev);
+ struct stm32_rproc *ddata = rproc->priv;
+
+ if (device_may_wakeup(dev))
+ return disable_irq_wake(ddata->wdg_irq);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(stm32_rproc_pm_ops,
+ stm32_rproc_suspend, stm32_rproc_resume);
+
static struct platform_driver stm32_rproc_driver = {
.probe = stm32_rproc_probe,
.remove = stm32_rproc_remove,
.driver = {
.name = "stm32-rproc",
+ .pm = &stm32_rproc_pm_ops,
.of_match_table = of_match_ptr(stm32_rproc_match),
},
};
diff --git a/drivers/rpmsg/Kconfig b/drivers/rpmsg/Kconfig
index d0322b41eca5..709276540ef1 100644
--- a/drivers/rpmsg/Kconfig
+++ b/drivers/rpmsg/Kconfig
@@ -21,7 +21,7 @@ config RPMSG_QCOM_GLINK_NATIVE
config RPMSG_QCOM_GLINK_RPM
tristate "Qualcomm RPM Glink driver"
- select RPMSG_QCOM_GLINK_NATIVE
+ select RPMSG_QCOM_GLINK_NATIVE
depends on HAS_IOMEM
depends on MAILBOX
help
diff --git a/drivers/rpmsg/qcom_glink_native.c b/drivers/rpmsg/qcom_glink_native.c
index 621f1afd4d6b..1995f5b3ea67 100644
--- a/drivers/rpmsg/qcom_glink_native.c
+++ b/drivers/rpmsg/qcom_glink_native.c
@@ -241,10 +241,31 @@ static void qcom_glink_channel_release(struct kref *ref)
{
struct glink_channel *channel = container_of(ref, struct glink_channel,
refcount);
+ struct glink_core_rx_intent *intent;
+ struct glink_core_rx_intent *tmp;
unsigned long flags;
+ int iid;
+
+ /* cancel pending rx_done work */
+ cancel_work_sync(&channel->intent_work);
spin_lock_irqsave(&channel->intent_lock, flags);
+ /* Free all non-reuse intents pending rx_done work */
+ list_for_each_entry_safe(intent, tmp, &channel->done_intents, node) {
+ if (!intent->reuse) {
+ kfree(intent->data);
+ kfree(intent);
+ }
+ }
+
+ idr_for_each_entry(&channel->liids, tmp, iid) {
+ kfree(tmp->data);
+ kfree(tmp);
+ }
idr_destroy(&channel->liids);
+
+ idr_for_each_entry(&channel->riids, tmp, iid)
+ kfree(tmp);
idr_destroy(&channel->riids);
spin_unlock_irqrestore(&channel->intent_lock, flags);
@@ -1094,13 +1115,12 @@ static int qcom_glink_create_remote(struct qcom_glink *glink,
close_link:
/*
* Send a close request to "undo" our open-ack. The close-ack will
- * release the last reference.
+ * release qcom_glink_send_open_req() reference and the last reference
+ * will be relesed after receiving remote_close or transport unregister
+ * by calling qcom_glink_native_remove().
*/
qcom_glink_send_close_req(glink, channel);
- /* Release qcom_glink_send_open_req() reference */
- kref_put(&channel->refcount, qcom_glink_channel_release);
-
return ret;
}
@@ -1415,15 +1435,13 @@ static int qcom_glink_rx_open(struct qcom_glink *glink, unsigned int rcid,
ret = rpmsg_register_device(rpdev);
if (ret)
- goto free_rpdev;
+ goto rcid_remove;
channel->rpdev = rpdev;
}
return 0;
-free_rpdev:
- kfree(rpdev);
rcid_remove:
spin_lock_irqsave(&glink->idr_lock, flags);
idr_remove(&glink->rcids, channel->rcid);
@@ -1544,6 +1562,18 @@ static void qcom_glink_work(struct work_struct *work)
}
}
+static void qcom_glink_cancel_rx_work(struct qcom_glink *glink)
+{
+ struct glink_defer_cmd *dcmd;
+ struct glink_defer_cmd *tmp;
+
+ /* cancel any pending deferred rx_work */
+ cancel_work_sync(&glink->rx_work);
+
+ list_for_each_entry_safe(dcmd, tmp, &glink->rx_queue, node)
+ kfree(dcmd);
+}
+
struct qcom_glink *qcom_glink_native_probe(struct device *dev,
unsigned long features,
struct qcom_glink_pipe *rx,
@@ -1619,23 +1649,24 @@ void qcom_glink_native_remove(struct qcom_glink *glink)
struct glink_channel *channel;
int cid;
int ret;
- unsigned long flags;
disable_irq(glink->irq);
- cancel_work_sync(&glink->rx_work);
+ qcom_glink_cancel_rx_work(glink);
ret = device_for_each_child(glink->dev, NULL, qcom_glink_remove_device);
if (ret)
dev_warn(glink->dev, "Can't remove GLINK devices: %d\n", ret);
- spin_lock_irqsave(&glink->idr_lock, flags);
/* Release any defunct local channels, waiting for close-ack */
idr_for_each_entry(&glink->lcids, channel, cid)
kref_put(&channel->refcount, qcom_glink_channel_release);
+ /* Release any defunct local channels, waiting for close-req */
+ idr_for_each_entry(&glink->rcids, channel, cid)
+ kref_put(&channel->refcount, qcom_glink_channel_release);
+
idr_destroy(&glink->lcids);
idr_destroy(&glink->rcids);
- spin_unlock_irqrestore(&glink->idr_lock, flags);
mbox_free_channel(glink->mbox_chan);
}
EXPORT_SYMBOL_GPL(qcom_glink_native_remove);
diff --git a/drivers/rpmsg/qcom_glink_smem.c b/drivers/rpmsg/qcom_glink_smem.c
index 4238383d8685..579bc4443f6d 100644
--- a/drivers/rpmsg/qcom_glink_smem.c
+++ b/drivers/rpmsg/qcom_glink_smem.c
@@ -105,7 +105,7 @@ static void glink_smem_rx_advance(struct qcom_glink_pipe *np,
tail = le32_to_cpu(*pipe->tail);
tail += count;
- if (tail > pipe->native.length)
+ if (tail >= pipe->native.length)
tail -= pipe->native.length;
*pipe->tail = cpu_to_le32(tail);
diff --git a/drivers/rpmsg/rpmsg_char.c b/drivers/rpmsg/rpmsg_char.c
index eea5ebbb5119..4bbbacdbf3bb 100644
--- a/drivers/rpmsg/rpmsg_char.c
+++ b/drivers/rpmsg/rpmsg_char.c
@@ -146,7 +146,6 @@ static int rpmsg_eptdev_release(struct inode *inode, struct file *filp)
{
struct rpmsg_eptdev *eptdev = cdev_to_eptdev(inode->i_cdev);
struct device *dev = &eptdev->dev;
- struct sk_buff *skb;
/* Close the endpoint, if it's not already destroyed by the parent */
mutex_lock(&eptdev->ept_lock);
@@ -157,10 +156,7 @@ static int rpmsg_eptdev_release(struct inode *inode, struct file *filp)
mutex_unlock(&eptdev->ept_lock);
/* Discard all SKBs */
- while (!skb_queue_empty(&eptdev->queue)) {
- skb = skb_dequeue(&eptdev->queue);
- kfree_skb(skb);
- }
+ skb_queue_purge(&eptdev->queue);
put_device(dev);
@@ -227,8 +223,10 @@ static ssize_t rpmsg_eptdev_write_iter(struct kiocb *iocb,
if (!kbuf)
return -ENOMEM;
- if (!copy_from_iter_full(kbuf, len, from))
- return -EFAULT;
+ if (!copy_from_iter_full(kbuf, len, from)) {
+ ret = -EFAULT;
+ goto free_kbuf;
+ }
if (mutex_lock_interruptible(&eptdev->ept_lock)) {
ret = -ERESTARTSYS;
@@ -290,7 +288,7 @@ static const struct file_operations rpmsg_eptdev_fops = {
.write_iter = rpmsg_eptdev_write_iter,
.poll = rpmsg_eptdev_poll,
.unlocked_ioctl = rpmsg_eptdev_ioctl,
- .compat_ioctl = rpmsg_eptdev_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
};
static ssize_t name_show(struct device *dev, struct device_attribute *attr,
@@ -451,7 +449,7 @@ static const struct file_operations rpmsg_ctrldev_fops = {
.open = rpmsg_ctrldev_open,
.release = rpmsg_ctrldev_release,
.unlocked_ioctl = rpmsg_ctrldev_ioctl,
- .compat_ioctl = rpmsg_ctrldev_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
};
static void rpmsg_ctrldev_release_device(struct device *dev)
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 1adf9f815652..d77515d8382c 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -373,17 +373,6 @@ config RTC_DRV_MAX77686
This driver can also be built as a module. If so, the module
will be called rtc-max77686.
-config RTC_DRV_MESON_VRTC
- tristate "Amlogic Meson Virtual RTC"
- depends on ARCH_MESON || COMPILE_TEST
- default m if ARCH_MESON
- help
- If you say yes here you will get support for the
- Virtual RTC of Amlogic SoCs.
-
- This driver can also be built as a module. If so, the module
- will be called rtc-meson-vrtc.
-
config RTC_DRV_RK808
tristate "Rockchip RK805/RK808/RK809/RK817/RK818 RTC"
depends on MFD_RK808
@@ -1337,8 +1326,6 @@ config RTC_DRV_IMXDI
config RTC_DRV_FSL_FTM_ALARM
tristate "Freescale FlexTimer alarm timer"
depends on ARCH_LAYERSCAPE || SOC_LS1021A
- select FSL_RCPM
- default y
help
For the FlexTimer in LS1012A, LS1021A, LS1028A, LS1043A, LS1046A,
LS1088A, LS208xA, we can use FTM as the wakeup source.
@@ -1360,6 +1347,17 @@ config RTC_DRV_MESON
This driver can also be built as a module, if so, the module
will be called "rtc-meson".
+config RTC_DRV_MESON_VRTC
+ tristate "Amlogic Meson Virtual RTC"
+ depends on ARCH_MESON || COMPILE_TEST
+ default m if ARCH_MESON
+ help
+ If you say yes here you will get support for the
+ Virtual RTC of Amlogic SoCs.
+
+ This driver can also be built as a module. If so, the module
+ will be called rtc-meson-vrtc.
+
config RTC_DRV_OMAP
tristate "TI OMAP Real Time Clock"
depends on ARCH_OMAP || ARCH_DAVINCI || COMPILE_TEST
@@ -1459,6 +1457,7 @@ config RTC_DRV_PL031
config RTC_DRV_AT91RM9200
tristate "AT91RM9200 or some AT91SAM9 RTC"
depends on ARCH_AT91 || COMPILE_TEST
+ depends on OF
help
Driver for the internal RTC (Realtime Clock) module found on
Atmel AT91RM9200's and some AT91SAM9 chips. On AT91SAM9 chips
@@ -1510,9 +1509,9 @@ config RTC_DRV_PXA
depends on ARCH_PXA
select RTC_DRV_SA1100
help
- If you say Y here you will get access to the real time clock
- built into your PXA27x or PXA3xx CPU. This RTC is actually 2 RTCs
- consisting of an SA1100 compatible RTC and the extended PXA RTC.
+ If you say Y here you will get access to the real time clock
+ built into your PXA27x or PXA3xx CPU. This RTC is actually 2 RTCs
+ consisting of an SA1100 compatible RTC and the extended PXA RTC.
This RTC driver uses PXA RTC registers available since pxa27x
series (RDxR, RYxR) instead of legacy RCNR, RTAR.
diff --git a/drivers/rtc/dev.c b/drivers/rtc/dev.c
index 84feb2565abd..5b8ebe86124a 100644
--- a/drivers/rtc/dev.c
+++ b/drivers/rtc/dev.c
@@ -10,6 +10,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/compat.h>
#include <linux/module.h>
#include <linux/rtc.h>
#include <linux/sched/signal.h>
@@ -360,7 +361,6 @@ static long rtc_dev_ioctl(struct file *file,
case RTC_IRQP_SET:
err = rtc_irq_set_freq(rtc, arg);
break;
-
case RTC_IRQP_READ:
err = put_user(rtc->irq_freq, (unsigned long __user *)uarg);
break;
@@ -399,6 +399,34 @@ done:
return err;
}
+#ifdef CONFIG_COMPAT
+#define RTC_IRQP_SET32 _IOW('p', 0x0c, __u32)
+#define RTC_IRQP_READ32 _IOR('p', 0x0b, __u32)
+#define RTC_EPOCH_SET32 _IOW('p', 0x0e, __u32)
+
+static long rtc_dev_compat_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ struct rtc_device *rtc = file->private_data;
+ void __user *uarg = compat_ptr(arg);
+
+ switch (cmd) {
+ case RTC_IRQP_READ32:
+ return put_user(rtc->irq_freq, (__u32 __user *)uarg);
+
+ case RTC_IRQP_SET32:
+ /* arg is a plain integer, not pointer */
+ return rtc_dev_ioctl(file, RTC_IRQP_SET, arg);
+
+ case RTC_EPOCH_SET32:
+ /* arg is a plain integer, not pointer */
+ return rtc_dev_ioctl(file, RTC_EPOCH_SET, arg);
+ }
+
+ return rtc_dev_ioctl(file, cmd, (unsigned long)uarg);
+}
+#endif
+
static int rtc_dev_fasync(int fd, struct file *file, int on)
{
struct rtc_device *rtc = file->private_data;
@@ -434,6 +462,9 @@ static const struct file_operations rtc_dev_fops = {
.read = rtc_dev_read,
.poll = rtc_dev_poll,
.unlocked_ioctl = rtc_dev_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = rtc_dev_compat_ioctl,
+#endif
.open = rtc_dev_open,
.release = rtc_dev_release,
.fasync = rtc_dev_fasync,
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index c93ef33b01d3..794a4f036b99 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -70,7 +70,7 @@ static int rtc_valid_range(struct rtc_device *rtc, struct rtc_time *tm)
time64_t time = rtc_tm_to_time64(tm);
time64_t range_min = rtc->set_start_time ? rtc->start_secs :
rtc->range_min;
- time64_t range_max = rtc->set_start_time ?
+ timeu64_t range_max = rtc->set_start_time ?
(rtc->start_secs + rtc->range_max - rtc->range_min) :
rtc->range_max;
@@ -125,7 +125,7 @@ EXPORT_SYMBOL_GPL(rtc_read_time);
int rtc_set_time(struct rtc_device *rtc, struct rtc_time *tm)
{
- int err;
+ int err, uie;
err = rtc_valid_tm(tm);
if (err != 0)
@@ -137,6 +137,17 @@ int rtc_set_time(struct rtc_device *rtc, struct rtc_time *tm)
rtc_subtract_offset(rtc, tm);
+#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
+ uie = rtc->uie_rtctimer.enabled || rtc->uie_irq_active;
+#else
+ uie = rtc->uie_rtctimer.enabled;
+#endif
+ if (uie) {
+ err = rtc_update_irq_enable(rtc, 0);
+ if (err)
+ return err;
+ }
+
err = mutex_lock_interruptible(&rtc->ops_lock);
if (err)
return err;
@@ -153,6 +164,12 @@ int rtc_set_time(struct rtc_device *rtc, struct rtc_time *tm)
/* A timer might have just expired */
schedule_work(&rtc->irqwork);
+ if (uie) {
+ err = rtc_update_irq_enable(rtc, 1);
+ if (err)
+ return err;
+ }
+
trace_rtc_set_time(rtc_tm_to_time64(tm), err);
return err;
}
@@ -528,7 +545,7 @@ EXPORT_SYMBOL_GPL(rtc_alarm_irq_enable);
int rtc_update_irq_enable(struct rtc_device *rtc, unsigned int enabled)
{
- int err;
+ int rc = 0, err;
err = mutex_lock_interruptible(&rtc->ops_lock);
if (err)
@@ -553,7 +570,9 @@ int rtc_update_irq_enable(struct rtc_device *rtc, unsigned int enabled)
struct rtc_time tm;
ktime_t now, onesec;
- __rtc_read_time(rtc, &tm);
+ rc = __rtc_read_time(rtc, &tm);
+ if (rc)
+ goto out;
onesec = ktime_set(1, 0);
now = rtc_tm_to_ktime(tm);
rtc->uie_rtctimer.node.expires = ktime_add(now, onesec);
@@ -565,6 +584,16 @@ int rtc_update_irq_enable(struct rtc_device *rtc, unsigned int enabled)
out:
mutex_unlock(&rtc->ops_lock);
+
+ /*
+ * __rtc_read_time() failed, this probably means that the RTC time has
+ * never been set or less probably there is a transient error on the
+ * bus. In any case, avoid enabling emulation has this will fail when
+ * reading the time too.
+ */
+ if (rc)
+ return rc;
+
#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
/*
* Enable emulation if the driver returned -EINVAL to signal that it has
@@ -581,6 +610,8 @@ EXPORT_SYMBOL_GPL(rtc_update_irq_enable);
/**
* rtc_handle_legacy_irq - AIE, UIE and PIE event hook
* @rtc: pointer to the rtc device
+ * @num: number of occurence of the event
+ * @mode: type of the event, RTC_AF, RTC_UF of RTC_PF
*
* This function is called when an AIE, UIE or PIE mode interrupt
* has occurred (or been emulated).
@@ -761,8 +792,8 @@ int rtc_irq_set_freq(struct rtc_device *rtc, int freq)
/**
* rtc_timer_enqueue - Adds a rtc_timer to the rtc_device timerqueue
- * @rtc rtc device
- * @timer timer being added.
+ * @rtc: rtc device
+ * @timer: timer being added.
*
* Enqueues a timer onto the rtc devices timerqueue and sets
* the next alarm event appropriately.
@@ -821,8 +852,8 @@ static void rtc_alarm_disable(struct rtc_device *rtc)
/**
* rtc_timer_remove - Removes a rtc_timer from the rtc_device timerqueue
- * @rtc rtc device
- * @timer timer being removed.
+ * @rtc: rtc device
+ * @timer: timer being removed.
*
* Removes a timer onto the rtc devices timerqueue and sets
* the next alarm event appropriately.
@@ -859,8 +890,7 @@ static void rtc_timer_remove(struct rtc_device *rtc, struct rtc_timer *timer)
/**
* rtc_timer_do_work - Expires rtc timers
- * @rtc rtc device
- * @timer timer being removed.
+ * @work: work item
*
* Expires rtc timers. Reprograms next alarm event if needed.
* Called via worktask.
@@ -993,8 +1023,8 @@ void rtc_timer_cancel(struct rtc_device *rtc, struct rtc_timer *timer)
/**
* rtc_read_offset - Read the amount of rtc offset in parts per billion
- * @ rtc: rtc device to be used
- * @ offset: the offset in parts per billion
+ * @rtc: rtc device to be used
+ * @offset: the offset in parts per billion
*
* see below for details.
*
@@ -1022,8 +1052,8 @@ int rtc_read_offset(struct rtc_device *rtc, long *offset)
/**
* rtc_set_offset - Adjusts the duration of the average second
- * @ rtc: rtc device to be used
- * @ offset: the offset in parts per billion
+ * @rtc: rtc device to be used
+ * @offset: the offset in parts per billion
*
* Some rtc's allow an adjustment to the average duration of a second
* to compensate for differences in the actual clock rate due to temperature,
diff --git a/drivers/rtc/rtc-ab-b5ze-s3.c b/drivers/rtc/rtc-ab-b5ze-s3.c
index cdad6f00debf..811fe2005488 100644
--- a/drivers/rtc/rtc-ab-b5ze-s3.c
+++ b/drivers/rtc/rtc-ab-b5ze-s3.c
@@ -900,16 +900,6 @@ err:
return ret;
}
-static int abb5zes3_remove(struct i2c_client *client)
-{
- struct abb5zes3_rtc_data *rtc_data = dev_get_drvdata(&client->dev);
-
- if (rtc_data->irq > 0)
- device_init_wakeup(&client->dev, false);
-
- return 0;
-}
-
#ifdef CONFIG_PM_SLEEP
static int abb5zes3_rtc_suspend(struct device *dev)
{
@@ -956,7 +946,6 @@ static struct i2c_driver abb5zes3_driver = {
.of_match_table = of_match_ptr(abb5zes3_dt_match),
},
.probe = abb5zes3_probe,
- .remove = abb5zes3_remove,
.id_table = abb5zes3_id,
};
module_i2c_driver(abb5zes3_driver);
diff --git a/drivers/rtc/rtc-armada38x.c b/drivers/rtc/rtc-armada38x.c
index 9351bd52477e..94d7c22fc4f3 100644
--- a/drivers/rtc/rtc-armada38x.c
+++ b/drivers/rtc/rtc-armada38x.c
@@ -74,7 +74,7 @@ struct armada38x_rtc {
int irq;
bool initialized;
struct value_to_freq *val_to_freq;
- struct armada38x_rtc_data *data;
+ const struct armada38x_rtc_data *data;
};
#define ALARM1 0
@@ -501,17 +501,14 @@ static __init int armada38x_rtc_probe(struct platform_device *pdev)
{
struct resource *res;
struct armada38x_rtc *rtc;
- const struct of_device_id *match;
-
- match = of_match_device(armada38x_rtc_of_match_table, &pdev->dev);
- if (!match)
- return -ENODEV;
rtc = devm_kzalloc(&pdev->dev, sizeof(struct armada38x_rtc),
GFP_KERNEL);
if (!rtc)
return -ENOMEM;
+ rtc->data = of_device_get_match_data(&pdev->dev);
+
rtc->val_to_freq = devm_kcalloc(&pdev->dev, SAMPLE_NR,
sizeof(struct value_to_freq), GFP_KERNEL);
if (!rtc->val_to_freq)
@@ -553,7 +550,6 @@ static __init int armada38x_rtc_probe(struct platform_device *pdev)
*/
rtc->rtc_dev->ops = &armada38x_rtc_ops_noirq;
}
- rtc->data = (struct armada38x_rtc_data *)match->data;
/* Update RTC-MBUS bridge timing parameters */
rtc->data->update_mbus_timing(rtc);
diff --git a/drivers/rtc/rtc-asm9260.c b/drivers/rtc/rtc-asm9260.c
index 10413d803caa..10064bdabdff 100644
--- a/drivers/rtc/rtc-asm9260.c
+++ b/drivers/rtc/rtc-asm9260.c
@@ -245,7 +245,6 @@ static int asm9260_rtc_probe(struct platform_device *pdev)
{
struct asm9260_rtc_priv *priv;
struct device *dev = &pdev->dev;
- struct resource *res;
int irq_alarm, ret;
u32 ccr;
@@ -260,8 +259,7 @@ static int asm9260_rtc_probe(struct platform_device *pdev)
if (irq_alarm < 0)
return irq_alarm;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->iobase = devm_ioremap_resource(dev, res);
+ priv->iobase = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->iobase))
return PTR_ERR(priv->iobase);
diff --git a/drivers/rtc/rtc-aspeed.c b/drivers/rtc/rtc-aspeed.c
index e351d35b29a3..eacdd0637cce 100644
--- a/drivers/rtc/rtc-aspeed.c
+++ b/drivers/rtc/rtc-aspeed.c
@@ -85,14 +85,12 @@ static const struct rtc_class_ops aspeed_rtc_ops = {
static int aspeed_rtc_probe(struct platform_device *pdev)
{
struct aspeed_rtc *rtc;
- struct resource *res;
rtc = devm_kzalloc(&pdev->dev, sizeof(*rtc), GFP_KERNEL);
if (!rtc)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- rtc->base = devm_ioremap_resource(&pdev->dev, res);
+ rtc->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(rtc->base))
return PTR_ERR(rtc->base);
diff --git a/drivers/rtc/rtc-at91rm9200.c b/drivers/rtc/rtc-at91rm9200.c
index d119c6e6353e..3b833e02a657 100644
--- a/drivers/rtc/rtc-at91rm9200.c
+++ b/drivers/rtc/rtc-at91rm9200.c
@@ -319,7 +319,6 @@ static const struct at91_rtc_config at91sam9x5_config = {
.use_shadow_imr = true,
};
-#ifdef CONFIG_OF
static const struct of_device_id at91_rtc_dt_ids[] = {
{
.compatible = "atmel,at91rm9200-rtc",
@@ -332,22 +331,6 @@ static const struct of_device_id at91_rtc_dt_ids[] = {
}
};
MODULE_DEVICE_TABLE(of, at91_rtc_dt_ids);
-#endif
-
-static const struct at91_rtc_config *
-at91_rtc_get_config(struct platform_device *pdev)
-{
- const struct of_device_id *match;
-
- if (pdev->dev.of_node) {
- match = of_match_node(at91_rtc_dt_ids, pdev->dev.of_node);
- if (!match)
- return NULL;
- return (const struct at91_rtc_config *)match->data;
- }
-
- return &at91rm9200_config;
-}
static const struct rtc_class_ops at91_rtc_ops = {
.read_time = at91_rtc_readtime,
@@ -367,7 +350,7 @@ static int __init at91_rtc_probe(struct platform_device *pdev)
struct resource *regs;
int ret = 0;
- at91_rtc_config = at91_rtc_get_config(pdev);
+ at91_rtc_config = of_device_get_match_data(&pdev->dev);
if (!at91_rtc_config)
return -ENODEV;
diff --git a/drivers/rtc/rtc-at91sam9.c b/drivers/rtc/rtc-at91sam9.c
index bb3ba7bfe6a5..e39e89867d29 100644
--- a/drivers/rtc/rtc-at91sam9.c
+++ b/drivers/rtc/rtc-at91sam9.c
@@ -334,7 +334,6 @@ static const struct rtc_class_ops at91_rtc_ops = {
*/
static int at91_rtc_probe(struct platform_device *pdev)
{
- struct resource *r;
struct sam9_rtc *rtc;
int ret, irq;
u32 mr;
@@ -358,8 +357,7 @@ static int at91_rtc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, rtc);
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- rtc->rtt = devm_ioremap_resource(&pdev->dev, r);
+ rtc->rtt = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(rtc->rtt))
return PTR_ERR(rtc->rtt);
diff --git a/drivers/rtc/rtc-bd70528.c b/drivers/rtc/rtc-bd70528.c
index 7744333b0f40..627037aa66a8 100644
--- a/drivers/rtc/rtc-bd70528.c
+++ b/drivers/rtc/rtc-bd70528.c
@@ -491,3 +491,4 @@ module_platform_driver(bd70528_rtc);
MODULE_AUTHOR("Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>");
MODULE_DESCRIPTION("BD70528 RTC driver");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:bd70528-rtc");
diff --git a/drivers/rtc/rtc-brcmstb-waketimer.c b/drivers/rtc/rtc-brcmstb-waketimer.c
index 3e9800f9878a..4fee57c51280 100644
--- a/drivers/rtc/rtc-brcmstb-waketimer.c
+++ b/drivers/rtc/rtc-brcmstb-waketimer.c
@@ -200,7 +200,6 @@ static int brcmstb_waketmr_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct brcmstb_waketmr *timer;
- struct resource *res;
int ret;
timer = devm_kzalloc(dev, sizeof(*timer), GFP_KERNEL);
@@ -210,8 +209,7 @@ static int brcmstb_waketmr_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, timer);
timer->dev = dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- timer->base = devm_ioremap_resource(dev, res);
+ timer->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(timer->base))
return PTR_ERR(timer->base);
@@ -277,6 +275,7 @@ static int brcmstb_waketmr_remove(struct platform_device *pdev)
struct brcmstb_waketmr *timer = dev_get_drvdata(&pdev->dev);
unregister_reboot_notifier(&timer->reboot_notifier);
+ clk_disable_unprepare(timer->clk);
return 0;
}
diff --git a/drivers/rtc/rtc-cadence.c b/drivers/rtc/rtc-cadence.c
index 592aae23cbaf..595d5d252850 100644
--- a/drivers/rtc/rtc-cadence.c
+++ b/drivers/rtc/rtc-cadence.c
@@ -255,7 +255,6 @@ static const struct rtc_class_ops cdns_rtc_ops = {
static int cdns_rtc_probe(struct platform_device *pdev)
{
struct cdns_rtc *crtc;
- struct resource *res;
int ret;
unsigned long ref_clk_freq;
@@ -263,8 +262,7 @@ static int cdns_rtc_probe(struct platform_device *pdev)
if (!crtc)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- crtc->regs = devm_ioremap_resource(&pdev->dev, res);
+ crtc->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(crtc->regs))
return PTR_ERR(crtc->regs);
diff --git a/drivers/rtc/rtc-coh901331.c b/drivers/rtc/rtc-coh901331.c
index 4ac850837153..da59917c9ee8 100644
--- a/drivers/rtc/rtc-coh901331.c
+++ b/drivers/rtc/rtc-coh901331.c
@@ -164,15 +164,13 @@ static int __init coh901331_probe(struct platform_device *pdev)
{
int ret;
struct coh901331_port *rtap;
- struct resource *res;
rtap = devm_kzalloc(&pdev->dev,
sizeof(struct coh901331_port), GFP_KERNEL);
if (!rtap)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- rtap->virtbase = devm_ioremap_resource(&pdev->dev, res);
+ rtap->virtbase = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(rtap->virtbase))
return PTR_ERR(rtap->virtbase);
diff --git a/drivers/rtc/rtc-cros-ec.c b/drivers/rtc/rtc-cros-ec.c
index 6909e01936d9..d043d30f05bc 100644
--- a/drivers/rtc/rtc-cros-ec.c
+++ b/drivers/rtc/rtc-cros-ec.c
@@ -107,11 +107,7 @@ static int cros_ec_rtc_set_time(struct device *dev, struct rtc_time *tm)
struct cros_ec_rtc *cros_ec_rtc = dev_get_drvdata(dev);
struct cros_ec_device *cros_ec = cros_ec_rtc->cros_ec;
int ret;
- time64_t time;
-
- time = rtc_tm_to_time64(tm);
- if (time < 0 || time > U32_MAX)
- return -EINVAL;
+ time64_t time = rtc_tm_to_time64(tm);
ret = cros_ec_rtc_set(cros_ec, EC_CMD_RTC_SET_VALUE, (u32)time);
if (ret < 0) {
@@ -348,14 +344,16 @@ static int cros_ec_rtc_probe(struct platform_device *pdev)
return ret;
}
- cros_ec_rtc->rtc = devm_rtc_device_register(&pdev->dev, DRV_NAME,
- &cros_ec_rtc_ops,
- THIS_MODULE);
- if (IS_ERR(cros_ec_rtc->rtc)) {
- ret = PTR_ERR(cros_ec_rtc->rtc);
- dev_err(&pdev->dev, "failed to register rtc device\n");
+ cros_ec_rtc->rtc = devm_rtc_allocate_device(&pdev->dev);
+ if (IS_ERR(cros_ec_rtc->rtc))
+ return PTR_ERR(cros_ec_rtc->rtc);
+
+ cros_ec_rtc->rtc->ops = &cros_ec_rtc_ops;
+ cros_ec_rtc->rtc->range_max = U32_MAX;
+
+ ret = rtc_register_device(cros_ec_rtc->rtc);
+ if (ret)
return ret;
- }
/* Get RTC events from the EC. */
cros_ec_rtc->notifier.notifier_call = cros_ec_rtc_event;
diff --git a/drivers/rtc/rtc-da9063.c b/drivers/rtc/rtc-da9063.c
index 15908d51b1cb..046b1d4c3dae 100644
--- a/drivers/rtc/rtc-da9063.c
+++ b/drivers/rtc/rtc-da9063.c
@@ -483,6 +483,9 @@ static int da9063_rtc_probe(struct platform_device *pdev)
rtc->rtc_dev->uie_unsupported = 1;
irq_alarm = platform_get_irq_byname(pdev, "ALARM");
+ if (irq_alarm < 0)
+ return irq_alarm;
+
ret = devm_request_threaded_irq(&pdev->dev, irq_alarm, NULL,
da9063_alarm_event,
IRQF_TRIGGER_LOW | IRQF_ONESHOT,
diff --git a/drivers/rtc/rtc-davinci.c b/drivers/rtc/rtc-davinci.c
index d8e0db2e7fc6..390b7351e0fe 100644
--- a/drivers/rtc/rtc-davinci.c
+++ b/drivers/rtc/rtc-davinci.c
@@ -469,7 +469,6 @@ static int __init davinci_rtc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct davinci_rtc *davinci_rtc;
- struct resource *res;
int ret = 0;
davinci_rtc = devm_kzalloc(&pdev->dev, sizeof(struct davinci_rtc), GFP_KERNEL);
@@ -480,8 +479,7 @@ static int __init davinci_rtc_probe(struct platform_device *pdev)
if (davinci_rtc->irq < 0)
return davinci_rtc->irq;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- davinci_rtc->base = devm_ioremap_resource(dev, res);
+ davinci_rtc->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(davinci_rtc->base))
return PTR_ERR(davinci_rtc->base);
diff --git a/drivers/rtc/rtc-digicolor.c b/drivers/rtc/rtc-digicolor.c
index 0aecc3f8e721..200d85b01e8b 100644
--- a/drivers/rtc/rtc-digicolor.c
+++ b/drivers/rtc/rtc-digicolor.c
@@ -175,7 +175,6 @@ static irqreturn_t dc_rtc_irq(int irq, void *dev_id)
static int __init dc_rtc_probe(struct platform_device *pdev)
{
- struct resource *res;
struct dc_rtc *rtc;
int irq, ret;
@@ -183,8 +182,7 @@ static int __init dc_rtc_probe(struct platform_device *pdev)
if (!rtc)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- rtc->regs = devm_ioremap_resource(&pdev->dev, res);
+ rtc->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(rtc->regs))
return PTR_ERR(rtc->regs);
diff --git a/drivers/rtc/rtc-ds1216.c b/drivers/rtc/rtc-ds1216.c
index b225bcfef50b..7eeb3f359de8 100644
--- a/drivers/rtc/rtc-ds1216.c
+++ b/drivers/rtc/rtc-ds1216.c
@@ -137,7 +137,6 @@ static const struct rtc_class_ops ds1216_rtc_ops = {
static int __init ds1216_rtc_probe(struct platform_device *pdev)
{
- struct resource *res;
struct ds1216_priv *priv;
u8 dummy[8];
@@ -147,8 +146,7 @@ static int __init ds1216_rtc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, priv);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->ioaddr = devm_ioremap_resource(&pdev->dev, res);
+ priv->ioaddr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->ioaddr))
return PTR_ERR(priv->ioaddr);
diff --git a/drivers/rtc/rtc-ds1286.c b/drivers/rtc/rtc-ds1286.c
index a06508b6c404..7acf849d4902 100644
--- a/drivers/rtc/rtc-ds1286.c
+++ b/drivers/rtc/rtc-ds1286.c
@@ -323,15 +323,13 @@ static const struct rtc_class_ops ds1286_ops = {
static int ds1286_probe(struct platform_device *pdev)
{
struct rtc_device *rtc;
- struct resource *res;
struct ds1286_priv *priv;
priv = devm_kzalloc(&pdev->dev, sizeof(struct ds1286_priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->rtcregs = devm_ioremap_resource(&pdev->dev, res);
+ priv->rtcregs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->rtcregs))
return PTR_ERR(priv->rtcregs);
diff --git a/drivers/rtc/rtc-ds1302.c b/drivers/rtc/rtc-ds1302.c
index 4faa24c88af5..b3de6d2e680a 100644
--- a/drivers/rtc/rtc-ds1302.c
+++ b/drivers/rtc/rtc-ds1302.c
@@ -15,8 +15,6 @@
#include <linux/rtc.h>
#include <linux/spi/spi.h>
-#define DRV_NAME "rtc-ds1302"
-
#define RTC_CMD_READ 0x81 /* Read command */
#define RTC_CMD_WRITE 0x80 /* Write command */
diff --git a/drivers/rtc/rtc-ds1343.c b/drivers/rtc/rtc-ds1343.c
index fa6de31d5793..d21004a68ee0 100644
--- a/drivers/rtc/rtc-ds1343.c
+++ b/drivers/rtc/rtc-ds1343.c
@@ -78,42 +78,19 @@ struct ds1343_priv {
struct spi_device *spi;
struct rtc_device *rtc;
struct regmap *map;
- struct mutex mutex;
- unsigned int irqen;
int irq;
- int alarm_sec;
- int alarm_min;
- int alarm_hour;
- int alarm_mday;
};
-static int ds1343_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
-{
- switch (cmd) {
-#ifdef RTC_SET_CHARGE
- case RTC_SET_CHARGE:
- {
- int val;
-
- if (copy_from_user(&val, (int __user *)arg, sizeof(int)))
- return -EFAULT;
-
- return regmap_write(priv->map, DS1343_TRICKLE_REG, val);
- }
- break;
-#endif
- }
-
- return -ENOIOCTLCMD;
-}
-
static ssize_t ds1343_show_glitchfilter(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct ds1343_priv *priv = dev_get_drvdata(dev);
+ struct ds1343_priv *priv = dev_get_drvdata(dev->parent);
int glitch_filt_status, data;
+ int res;
- regmap_read(priv->map, DS1343_CONTROL_REG, &data);
+ res = regmap_read(priv->map, DS1343_CONTROL_REG, &data);
+ if (res)
+ return res;
glitch_filt_status = !!(data & DS1343_EGFIL);
@@ -127,21 +104,19 @@ static ssize_t ds1343_store_glitchfilter(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct ds1343_priv *priv = dev_get_drvdata(dev);
- int data;
-
- regmap_read(priv->map, DS1343_CONTROL_REG, &data);
+ struct ds1343_priv *priv = dev_get_drvdata(dev->parent);
+ int data = 0;
+ int res;
if (strncmp(buf, "enabled", 7) == 0)
- data |= DS1343_EGFIL;
-
- else if (strncmp(buf, "disabled", 8) == 0)
- data &= ~(DS1343_EGFIL);
-
- else
+ data = DS1343_EGFIL;
+ else if (strncmp(buf, "disabled", 8))
return -EINVAL;
- regmap_write(priv->map, DS1343_CONTROL_REG, data);
+ res = regmap_update_bits(priv->map, DS1343_CONTROL_REG,
+ DS1343_EGFIL, data);
+ if (res)
+ return res;
return count;
}
@@ -168,11 +143,13 @@ static int ds1343_nvram_read(void *priv, unsigned int off, void *val,
static ssize_t ds1343_show_tricklecharger(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct ds1343_priv *priv = dev_get_drvdata(dev);
- int data;
+ struct ds1343_priv *priv = dev_get_drvdata(dev->parent);
+ int res, data;
char *diodes = "disabled", *resistors = " ";
- regmap_read(priv->map, DS1343_TRICKLE_REG, &data);
+ res = regmap_read(priv->map, DS1343_TRICKLE_REG, &data);
+ if (res)
+ return res;
if ((data & 0xf0) == DS1343_TRICKLE_MAGIC) {
switch (data & 0x0c) {
@@ -209,28 +186,15 @@ static ssize_t ds1343_show_tricklecharger(struct device *dev,
static DEVICE_ATTR(trickle_charger, S_IRUGO, ds1343_show_tricklecharger, NULL);
-static int ds1343_sysfs_register(struct device *dev)
-{
- int err;
-
- err = device_create_file(dev, &dev_attr_glitch_filter);
- if (err)
- return err;
-
- err = device_create_file(dev, &dev_attr_trickle_charger);
- if (!err)
- return 0;
-
- device_remove_file(dev, &dev_attr_glitch_filter);
-
- return err;
-}
+static struct attribute *ds1343_attrs[] = {
+ &dev_attr_glitch_filter.attr,
+ &dev_attr_trickle_charger.attr,
+ NULL
+};
-static void ds1343_sysfs_unregister(struct device *dev)
-{
- device_remove_file(dev, &dev_attr_glitch_filter);
- device_remove_file(dev, &dev_attr_trickle_charger);
-}
+static const struct attribute_group ds1343_attr_group = {
+ .attrs = ds1343_attrs,
+};
static int ds1343_read_time(struct device *dev, struct rtc_time *dt)
{
@@ -256,144 +220,78 @@ static int ds1343_read_time(struct device *dev, struct rtc_time *dt)
static int ds1343_set_time(struct device *dev, struct rtc_time *dt)
{
struct ds1343_priv *priv = dev_get_drvdata(dev);
- int res;
-
- res = regmap_write(priv->map, DS1343_SECONDS_REG,
- bin2bcd(dt->tm_sec));
- if (res)
- return res;
-
- res = regmap_write(priv->map, DS1343_MINUTES_REG,
- bin2bcd(dt->tm_min));
- if (res)
- return res;
-
- res = regmap_write(priv->map, DS1343_HOURS_REG,
- bin2bcd(dt->tm_hour) & 0x3F);
- if (res)
- return res;
-
- res = regmap_write(priv->map, DS1343_DAY_REG,
- bin2bcd(dt->tm_wday + 1));
- if (res)
- return res;
-
- res = regmap_write(priv->map, DS1343_DATE_REG,
- bin2bcd(dt->tm_mday));
- if (res)
- return res;
-
- res = regmap_write(priv->map, DS1343_MONTH_REG,
- bin2bcd(dt->tm_mon + 1));
- if (res)
- return res;
-
- dt->tm_year %= 100;
-
- res = regmap_write(priv->map, DS1343_YEAR_REG,
- bin2bcd(dt->tm_year));
- if (res)
- return res;
-
- return 0;
+ u8 buf[7];
+
+ buf[0] = bin2bcd(dt->tm_sec);
+ buf[1] = bin2bcd(dt->tm_min);
+ buf[2] = bin2bcd(dt->tm_hour) & 0x3F;
+ buf[3] = bin2bcd(dt->tm_wday + 1);
+ buf[4] = bin2bcd(dt->tm_mday);
+ buf[5] = bin2bcd(dt->tm_mon + 1);
+ buf[6] = bin2bcd(dt->tm_year - 100);
+
+ return regmap_bulk_write(priv->map, DS1343_SECONDS_REG,
+ buf, sizeof(buf));
}
-static int ds1343_update_alarm(struct device *dev)
+static int ds1343_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
{
struct ds1343_priv *priv = dev_get_drvdata(dev);
- unsigned int control, stat;
unsigned char buf[4];
- int res = 0;
+ unsigned int val;
+ int res;
- res = regmap_read(priv->map, DS1343_CONTROL_REG, &control);
- if (res)
- return res;
+ if (priv->irq <= 0)
+ return -EINVAL;
- res = regmap_read(priv->map, DS1343_STATUS_REG, &stat);
+ res = regmap_read(priv->map, DS1343_STATUS_REG, &val);
if (res)
return res;
- control &= ~(DS1343_A0IE);
- stat &= ~(DS1343_IRQF0);
-
- res = regmap_write(priv->map, DS1343_CONTROL_REG, control);
- if (res)
- return res;
+ alarm->pending = !!(val & DS1343_IRQF0);
- res = regmap_write(priv->map, DS1343_STATUS_REG, stat);
+ res = regmap_read(priv->map, DS1343_CONTROL_REG, &val);
if (res)
return res;
+ alarm->enabled = !!(val & DS1343_A0IE);
- buf[0] = priv->alarm_sec < 0 || (priv->irqen & RTC_UF) ?
- 0x80 : bin2bcd(priv->alarm_sec) & 0x7F;
- buf[1] = priv->alarm_min < 0 || (priv->irqen & RTC_UF) ?
- 0x80 : bin2bcd(priv->alarm_min) & 0x7F;
- buf[2] = priv->alarm_hour < 0 || (priv->irqen & RTC_UF) ?
- 0x80 : bin2bcd(priv->alarm_hour) & 0x3F;
- buf[3] = priv->alarm_mday < 0 || (priv->irqen & RTC_UF) ?
- 0x80 : bin2bcd(priv->alarm_mday) & 0x7F;
-
- res = regmap_bulk_write(priv->map, DS1343_ALM0_SEC_REG, buf, 4);
+ res = regmap_bulk_read(priv->map, DS1343_ALM0_SEC_REG, buf, 4);
if (res)
return res;
- if (priv->irqen) {
- control |= DS1343_A0IE;
- res = regmap_write(priv->map, DS1343_CONTROL_REG, control);
- }
+ alarm->time.tm_sec = bcd2bin(buf[0]) & 0x7f;
+ alarm->time.tm_min = bcd2bin(buf[1]) & 0x7f;
+ alarm->time.tm_hour = bcd2bin(buf[2]) & 0x3f;
+ alarm->time.tm_mday = bcd2bin(buf[3]) & 0x3f;
- return res;
+ return 0;
}
-static int ds1343_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
+static int ds1343_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
{
struct ds1343_priv *priv = dev_get_drvdata(dev);
+ unsigned char buf[4];
int res = 0;
- unsigned int stat;
if (priv->irq <= 0)
return -EINVAL;
- mutex_lock(&priv->mutex);
-
- res = regmap_read(priv->map, DS1343_STATUS_REG, &stat);
+ res = regmap_update_bits(priv->map, DS1343_CONTROL_REG, DS1343_A0IE, 0);
if (res)
- goto out;
-
- alarm->enabled = !!(priv->irqen & RTC_AF);
- alarm->pending = !!(stat & DS1343_IRQF0);
-
- alarm->time.tm_sec = priv->alarm_sec < 0 ? 0 : priv->alarm_sec;
- alarm->time.tm_min = priv->alarm_min < 0 ? 0 : priv->alarm_min;
- alarm->time.tm_hour = priv->alarm_hour < 0 ? 0 : priv->alarm_hour;
- alarm->time.tm_mday = priv->alarm_mday < 0 ? 0 : priv->alarm_mday;
-
-out:
- mutex_unlock(&priv->mutex);
- return res;
-}
-
-static int ds1343_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
-{
- struct ds1343_priv *priv = dev_get_drvdata(dev);
- int res = 0;
-
- if (priv->irq <= 0)
- return -EINVAL;
+ return res;
- mutex_lock(&priv->mutex);
+ buf[0] = bin2bcd(alarm->time.tm_sec);
+ buf[1] = bin2bcd(alarm->time.tm_min);
+ buf[2] = bin2bcd(alarm->time.tm_hour);
+ buf[3] = bin2bcd(alarm->time.tm_mday);
- priv->alarm_sec = alarm->time.tm_sec;
- priv->alarm_min = alarm->time.tm_min;
- priv->alarm_hour = alarm->time.tm_hour;
- priv->alarm_mday = alarm->time.tm_mday;
+ res = regmap_bulk_write(priv->map, DS1343_ALM0_SEC_REG, buf, 4);
+ if (res)
+ return res;
if (alarm->enabled)
- priv->irqen |= RTC_AF;
-
- res = ds1343_update_alarm(dev);
-
- mutex_unlock(&priv->mutex);
+ res = regmap_update_bits(priv->map, DS1343_CONTROL_REG,
+ DS1343_A0IE, DS1343_A0IE);
return res;
}
@@ -401,32 +299,21 @@ static int ds1343_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
static int ds1343_alarm_irq_enable(struct device *dev, unsigned int enabled)
{
struct ds1343_priv *priv = dev_get_drvdata(dev);
- int res = 0;
if (priv->irq <= 0)
return -EINVAL;
- mutex_lock(&priv->mutex);
-
- if (enabled)
- priv->irqen |= RTC_AF;
- else
- priv->irqen &= ~RTC_AF;
-
- res = ds1343_update_alarm(dev);
-
- mutex_unlock(&priv->mutex);
-
- return res;
+ return regmap_update_bits(priv->map, DS1343_CONTROL_REG,
+ DS1343_A0IE, enabled ? DS1343_A0IE : 0);
}
static irqreturn_t ds1343_thread(int irq, void *dev_id)
{
struct ds1343_priv *priv = dev_id;
- unsigned int stat, control;
+ unsigned int stat;
int res = 0;
- mutex_lock(&priv->mutex);
+ rtc_lock(priv->rtc);
res = regmap_read(priv->map, DS1343_STATUS_REG, &stat);
if (res)
@@ -436,23 +323,18 @@ static irqreturn_t ds1343_thread(int irq, void *dev_id)
stat &= ~DS1343_IRQF0;
regmap_write(priv->map, DS1343_STATUS_REG, stat);
- res = regmap_read(priv->map, DS1343_CONTROL_REG, &control);
- if (res)
- goto out;
-
- control &= ~DS1343_A0IE;
- regmap_write(priv->map, DS1343_CONTROL_REG, control);
-
rtc_update_irq(priv->rtc, 1, RTC_AF | RTC_IRQF);
+
+ regmap_update_bits(priv->map, DS1343_CONTROL_REG,
+ DS1343_A0IE, 0);
}
out:
- mutex_unlock(&priv->mutex);
+ rtc_unlock(priv->rtc);
return IRQ_HANDLED;
}
static const struct rtc_class_ops ds1343_rtc_ops = {
- .ioctl = ds1343_ioctl,
.read_time = ds1343_read_time,
.set_time = ds1343_set_time,
.read_alarm = ds1343_read_alarm,
@@ -481,7 +363,6 @@ static int ds1343_probe(struct spi_device *spi)
return -ENOMEM;
priv->spi = spi;
- mutex_init(&priv->mutex);
/* RTC DS1347 works in spi mode 3 and
* its chip select is active high
@@ -520,6 +401,13 @@ static int ds1343_probe(struct spi_device *spi)
priv->rtc->nvram_old_abi = true;
priv->rtc->ops = &ds1343_rtc_ops;
+ priv->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
+ priv->rtc->range_max = RTC_TIMESTAMP_END_2099;
+
+ res = rtc_add_group(priv->rtc, &ds1343_attr_group);
+ if (res)
+ dev_err(&spi->dev,
+ "unable to create sysfs entries for rtc ds1343\n");
res = rtc_register_device(priv->rtc);
if (res)
@@ -544,31 +432,12 @@ static int ds1343_probe(struct spi_device *spi)
}
}
- res = ds1343_sysfs_register(&spi->dev);
- if (res)
- dev_err(&spi->dev,
- "unable to create sysfs entries for rtc ds1343\n");
-
return 0;
}
static int ds1343_remove(struct spi_device *spi)
{
- struct ds1343_priv *priv = spi_get_drvdata(spi);
-
- if (spi->irq) {
- mutex_lock(&priv->mutex);
- priv->irqen &= ~RTC_AF;
- mutex_unlock(&priv->mutex);
-
- dev_pm_clear_wake_irq(&spi->dev);
- device_init_wakeup(&spi->dev, false);
- devm_free_irq(&spi->dev, spi->irq, priv);
- }
-
- spi_set_drvdata(spi, NULL);
-
- ds1343_sysfs_unregister(&spi->dev);
+ dev_pm_clear_wake_irq(&spi->dev);
return 0;
}
diff --git a/drivers/rtc/rtc-ds1347.c b/drivers/rtc/rtc-ds1347.c
index d392a7bfdd1c..7025cf3fb9f8 100644
--- a/drivers/rtc/rtc-ds1347.c
+++ b/drivers/rtc/rtc-ds1347.c
@@ -26,9 +26,15 @@
#define DS1347_DAY_REG 0x0B
#define DS1347_YEAR_REG 0x0D
#define DS1347_CONTROL_REG 0x0F
+#define DS1347_CENTURY_REG 0x13
#define DS1347_STATUS_REG 0x17
#define DS1347_CLOCK_BURST 0x3F
+#define DS1347_WP_BIT BIT(7)
+
+#define DS1347_NEOSC_BIT BIT(7)
+#define DS1347_OSF_BIT BIT(2)
+
static const struct regmap_range ds1347_ranges[] = {
{
.range_min = DS1347_SECONDS_REG,
@@ -43,35 +49,54 @@ static const struct regmap_access_table ds1347_access_table = {
static int ds1347_read_time(struct device *dev, struct rtc_time *dt)
{
- struct spi_device *spi = to_spi_device(dev);
- struct regmap *map;
- int err;
+ struct regmap *map = dev_get_drvdata(dev);
+ unsigned int status, century, secs;
unsigned char buf[8];
+ int err;
- map = spi_get_drvdata(spi);
-
- err = regmap_bulk_read(map, DS1347_CLOCK_BURST, buf, 8);
+ err = regmap_read(map, DS1347_STATUS_REG, &status);
if (err)
return err;
+ if (status & DS1347_OSF_BIT)
+ return -EINVAL;
+
+ do {
+ err = regmap_bulk_read(map, DS1347_CLOCK_BURST, buf, 8);
+ if (err)
+ return err;
+
+ err = regmap_read(map, DS1347_CENTURY_REG, &century);
+ if (err)
+ return err;
+
+ err = regmap_read(map, DS1347_SECONDS_REG, &secs);
+ if (err)
+ return err;
+ } while (buf[0] != secs);
+
dt->tm_sec = bcd2bin(buf[0]);
- dt->tm_min = bcd2bin(buf[1]);
+ dt->tm_min = bcd2bin(buf[1] & 0x7f);
dt->tm_hour = bcd2bin(buf[2] & 0x3F);
dt->tm_mday = bcd2bin(buf[3]);
dt->tm_mon = bcd2bin(buf[4]) - 1;
dt->tm_wday = bcd2bin(buf[5]) - 1;
- dt->tm_year = bcd2bin(buf[6]) + 100;
+ dt->tm_year = (bcd2bin(century) * 100) + bcd2bin(buf[6]) - 1900;
return 0;
}
static int ds1347_set_time(struct device *dev, struct rtc_time *dt)
{
- struct spi_device *spi = to_spi_device(dev);
- struct regmap *map;
+ struct regmap *map = dev_get_drvdata(dev);
+ unsigned int century;
unsigned char buf[8];
+ int err;
- map = spi_get_drvdata(spi);
+ err = regmap_update_bits(map, DS1347_STATUS_REG,
+ DS1347_NEOSC_BIT, DS1347_NEOSC_BIT);
+ if (err)
+ return err;
buf[0] = bin2bcd(dt->tm_sec);
buf[1] = bin2bcd(dt->tm_min);
@@ -79,16 +104,20 @@ static int ds1347_set_time(struct device *dev, struct rtc_time *dt)
buf[3] = bin2bcd(dt->tm_mday);
buf[4] = bin2bcd(dt->tm_mon + 1);
buf[5] = bin2bcd(dt->tm_wday + 1);
+ buf[6] = bin2bcd(dt->tm_year % 100);
+ buf[7] = bin2bcd(0x00);
- /* year in linux is from 1900 i.e in range of 100
- in rtc it is from 00 to 99 */
- dt->tm_year = dt->tm_year % 100;
+ err = regmap_bulk_write(map, DS1347_CLOCK_BURST, buf, 8);
+ if (err)
+ return err;
- buf[6] = bin2bcd(dt->tm_year);
- buf[7] = bin2bcd(0x00);
+ century = (dt->tm_year / 100) + 19;
+ err = regmap_write(map, DS1347_CENTURY_REG, century);
+ if (err)
+ return err;
- /* write the rtc settings */
- return regmap_bulk_write(map, DS1347_CLOCK_BURST, buf, 8);
+ return regmap_update_bits(map, DS1347_STATUS_REG,
+ DS1347_NEOSC_BIT | DS1347_OSF_BIT, 0);
}
static const struct rtc_class_ops ds1347_rtc_ops = {
@@ -101,8 +130,7 @@ static int ds1347_probe(struct spi_device *spi)
struct rtc_device *rtc;
struct regmap_config config;
struct regmap *map;
- unsigned int data;
- int res;
+ int err;
memset(&config, 0, sizeof(config));
config.reg_bits = 8;
@@ -125,36 +153,20 @@ static int ds1347_probe(struct spi_device *spi)
spi_set_drvdata(spi, map);
- /* RTC Settings */
- res = regmap_read(map, DS1347_SECONDS_REG, &data);
- if (res)
- return res;
-
/* Disable the write protect of rtc */
- regmap_read(map, DS1347_CONTROL_REG, &data);
- data = data & ~(1<<7);
- regmap_write(map, DS1347_CONTROL_REG, data);
-
- /* Enable the oscillator , disable the oscillator stop flag,
- and glitch filter to reduce current consumption */
- regmap_read(map, DS1347_STATUS_REG, &data);
- data = data & 0x1B;
- regmap_write(map, DS1347_STATUS_REG, data);
-
- /* display the settings */
- regmap_read(map, DS1347_CONTROL_REG, &data);
- dev_info(&spi->dev, "DS1347 RTC CTRL Reg = 0x%02x\n", data);
-
- regmap_read(map, DS1347_STATUS_REG, &data);
- dev_info(&spi->dev, "DS1347 RTC Status Reg = 0x%02x\n", data);
-
- rtc = devm_rtc_device_register(&spi->dev, "ds1347",
- &ds1347_rtc_ops, THIS_MODULE);
+ err = regmap_update_bits(map, DS1347_CONTROL_REG, DS1347_WP_BIT, 0);
+ if (err)
+ return err;
+ rtc = devm_rtc_allocate_device(&spi->dev);
if (IS_ERR(rtc))
return PTR_ERR(rtc);
- return 0;
+ rtc->ops = &ds1347_rtc_ops;
+ rtc->range_min = RTC_TIMESTAMP_BEGIN_0000;
+ rtc->range_max = RTC_TIMESTAMP_END_9999;
+
+ return rtc_register_device(rtc);
}
static struct spi_driver ds1347_driver = {
diff --git a/drivers/rtc/rtc-ds1374.c b/drivers/rtc/rtc-ds1374.c
index 367497914c10..6e9ddcd03992 100644
--- a/drivers/rtc/rtc-ds1374.c
+++ b/drivers/rtc/rtc-ds1374.c
@@ -439,14 +439,13 @@ static void ds1374_wdt_ping(void)
static void ds1374_wdt_disable(void)
{
- int ret = -ENOIOCTLCMD;
int cr;
cr = i2c_smbus_read_byte_data(save_client, DS1374_REG_CR);
/* Disable watchdog timer */
cr &= ~DS1374_REG_CR_WACE;
- ret = i2c_smbus_write_byte_data(save_client, DS1374_REG_CR, cr);
+ i2c_smbus_write_byte_data(save_client, DS1374_REG_CR, cr);
}
/*
@@ -586,6 +585,7 @@ static const struct file_operations ds1374_wdt_fops = {
.owner = THIS_MODULE,
.read = ds1374_wdt_read,
.unlocked_ioctl = ds1374_wdt_unlocked_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.write = ds1374_wdt_write,
.open = ds1374_wdt_open,
.release = ds1374_wdt_release,
diff --git a/drivers/rtc/rtc-ds1511.c b/drivers/rtc/rtc-ds1511.c
index b6a477519280..a63872c4c76d 100644
--- a/drivers/rtc/rtc-ds1511.c
+++ b/drivers/rtc/rtc-ds1511.c
@@ -414,7 +414,6 @@ static int ds1511_nvram_write(void *priv, unsigned int pos, void *buf,
static int ds1511_rtc_probe(struct platform_device *pdev)
{
- struct resource *res;
struct rtc_plat_data *pdata;
int ret = 0;
struct nvmem_config ds1511_nvmem_cfg = {
@@ -431,8 +430,7 @@ static int ds1511_rtc_probe(struct platform_device *pdev)
if (!pdata)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ds1511_base = devm_ioremap_resource(&pdev->dev, res);
+ ds1511_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ds1511_base))
return PTR_ERR(ds1511_base);
pdata->ioaddr = ds1511_base;
diff --git a/drivers/rtc/rtc-ds1553.c b/drivers/rtc/rtc-ds1553.c
index 219d6b520a69..cdf5e05b9489 100644
--- a/drivers/rtc/rtc-ds1553.c
+++ b/drivers/rtc/rtc-ds1553.c
@@ -249,7 +249,6 @@ static int ds1553_nvram_write(void *priv, unsigned int pos, void *val,
static int ds1553_rtc_probe(struct platform_device *pdev)
{
- struct resource *res;
unsigned int cen, sec;
struct rtc_plat_data *pdata;
void __iomem *ioaddr;
@@ -268,8 +267,7 @@ static int ds1553_rtc_probe(struct platform_device *pdev)
if (!pdata)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ioaddr = devm_ioremap_resource(&pdev->dev, res);
+ ioaddr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ioaddr))
return PTR_ERR(ioaddr);
pdata->ioaddr = ioaddr;
diff --git a/drivers/rtc/rtc-ds1685.c b/drivers/rtc/rtc-ds1685.c
index 184e4a3e2bef..56c670af2e50 100644
--- a/drivers/rtc/rtc-ds1685.c
+++ b/drivers/rtc/rtc-ds1685.c
@@ -31,7 +31,10 @@
/* ----------------------------------------------------------------------- */
-/* Standard read/write functions if platform does not provide overrides */
+/*
+ * Standard read/write
+ * all registers are mapped in CPU address space
+ */
/**
* ds1685_read - read a value from an rtc register.
@@ -59,6 +62,35 @@ ds1685_write(struct ds1685_priv *rtc, int reg, u8 value)
}
/* ----------------------------------------------------------------------- */
+/*
+ * Indirect read/write functions
+ * access happens via address and data register mapped in CPU address space
+ */
+
+/**
+ * ds1685_indirect_read - read a value from an rtc register.
+ * @rtc: pointer to the ds1685 rtc structure.
+ * @reg: the register address to read.
+ */
+static u8
+ds1685_indirect_read(struct ds1685_priv *rtc, int reg)
+{
+ writeb(reg, rtc->regs);
+ return readb(rtc->data);
+}
+
+/**
+ * ds1685_indirect_write - write a value to an rtc register.
+ * @rtc: pointer to the ds1685 rtc structure.
+ * @reg: the register address to write.
+ * @value: value to write to the register.
+ */
+static void
+ds1685_indirect_write(struct ds1685_priv *rtc, int reg, u8 value)
+{
+ writeb(reg, rtc->regs);
+ writeb(value, rtc->data);
+}
/* ----------------------------------------------------------------------- */
/* Inlined functions */
@@ -229,7 +261,7 @@ static int
ds1685_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
struct ds1685_priv *rtc = dev_get_drvdata(dev);
- u8 ctrlb, century;
+ u8 century;
u8 seconds, minutes, hours, wday, mday, month, years;
/* Fetch the time info from the RTC registers. */
@@ -242,7 +274,6 @@ ds1685_rtc_read_time(struct device *dev, struct rtc_time *tm)
month = rtc->read(rtc, RTC_MONTH);
years = rtc->read(rtc, RTC_YEAR);
century = rtc->read(rtc, RTC_CENTURY);
- ctrlb = rtc->read(rtc, RTC_CTRL_B);
ds1685_rtc_end_data_access(rtc);
/* bcd2bin if needed, perform fixups, and store to rtc_time. */
@@ -723,7 +754,7 @@ static int
ds1685_rtc_proc(struct device *dev, struct seq_file *seq)
{
struct ds1685_priv *rtc = dev_get_drvdata(dev);
- u8 ctrla, ctrlb, ctrlc, ctrld, ctrl4a, ctrl4b, ssn[8];
+ u8 ctrla, ctrlb, ctrld, ctrl4a, ctrl4b, ssn[8];
char *model;
/* Read all the relevant data from the control registers. */
@@ -731,7 +762,6 @@ ds1685_rtc_proc(struct device *dev, struct seq_file *seq)
ds1685_rtc_get_ssn(rtc, ssn);
ctrla = rtc->read(rtc, RTC_CTRL_A);
ctrlb = rtc->read(rtc, RTC_CTRL_B);
- ctrlc = rtc->read(rtc, RTC_CTRL_C);
ctrld = rtc->read(rtc, RTC_CTRL_D);
ctrl4a = rtc->read(rtc, RTC_EXT_CTRL_4A);
ctrl4b = rtc->read(rtc, RTC_EXT_CTRL_4B);
@@ -1009,7 +1039,7 @@ ds1685_rtc_sysfs_serial_show(struct device *dev,
}
static DEVICE_ATTR(serial, S_IRUGO, ds1685_rtc_sysfs_serial_show, NULL);
-/**
+/*
* struct ds1685_rtc_sysfs_misc_attrs - list for misc RTC features.
*/
static struct attribute*
@@ -1020,7 +1050,7 @@ ds1685_rtc_sysfs_misc_attrs[] = {
NULL,
};
-/**
+/*
* struct ds1685_rtc_sysfs_misc_grp - attr group for misc RTC features.
*/
static const struct attribute_group
@@ -1040,7 +1070,6 @@ static int
ds1685_rtc_probe(struct platform_device *pdev)
{
struct rtc_device *rtc_dev;
- struct resource *res;
struct ds1685_priv *rtc;
struct ds1685_rtc_platform_data *pdata;
u8 ctrla, ctrlb, hours;
@@ -1063,35 +1092,29 @@ ds1685_rtc_probe(struct platform_device *pdev)
if (!rtc)
return -ENOMEM;
- /*
- * Allocate/setup any IORESOURCE_MEM resources, if required. Not all
- * platforms put the RTC in an easy-access place. Like the SGI Octane,
- * which attaches the RTC to a "ByteBus", hooked to a SuperIO chip
- * that sits behind the IOC3 PCI metadevice.
- */
- if (pdata->alloc_io_resources) {
- /* Get the platform resources. */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENXIO;
- rtc->size = resource_size(res);
-
- /* Request a memory region. */
- /* XXX: mmio-only for now. */
- if (!devm_request_mem_region(&pdev->dev, res->start, rtc->size,
- pdev->name))
- return -EBUSY;
-
- /*
- * Set the base address for the rtc, and ioremap its
- * registers.
- */
- rtc->baseaddr = res->start;
- rtc->regs = devm_ioremap(&pdev->dev, res->start, rtc->size);
- if (!rtc->regs)
- return -ENOMEM;
+ /* Setup resources and access functions */
+ switch (pdata->access_type) {
+ case ds1685_reg_direct:
+ rtc->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(rtc->regs))
+ return PTR_ERR(rtc->regs);
+ rtc->read = ds1685_read;
+ rtc->write = ds1685_write;
+ break;
+ case ds1685_reg_indirect:
+ rtc->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(rtc->regs))
+ return PTR_ERR(rtc->regs);
+ rtc->data = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(rtc->data))
+ return PTR_ERR(rtc->data);
+ rtc->read = ds1685_indirect_read;
+ rtc->write = ds1685_indirect_write;
+ break;
}
- rtc->alloc_io_resources = pdata->alloc_io_resources;
+
+ if (!rtc->read || !rtc->write)
+ return -ENXIO;
/* Get the register step size. */
if (pdata->regstep > 0)
@@ -1099,24 +1122,6 @@ ds1685_rtc_probe(struct platform_device *pdev)
else
rtc->regstep = 1;
- /* Platform read function, else default if mmio setup */
- if (pdata->plat_read)
- rtc->read = pdata->plat_read;
- else
- if (pdata->alloc_io_resources)
- rtc->read = ds1685_read;
- else
- return -ENXIO;
-
- /* Platform write function, else default if mmio setup */
- if (pdata->plat_write)
- rtc->write = pdata->plat_write;
- else
- if (pdata->alloc_io_resources)
- rtc->write = ds1685_write;
- else
- return -ENXIO;
-
/* Platform pre-shutdown function, if defined. */
if (pdata->plat_prepare_poweroff)
rtc->prepare_poweroff = pdata->plat_prepare_poweroff;
@@ -1271,7 +1276,6 @@ ds1685_rtc_probe(struct platform_device *pdev)
/* See if the platform doesn't support UIE. */
if (pdata->uie_unsupported)
rtc_dev->uie_unsupported = 1;
- rtc->uie_unsupported = pdata->uie_unsupported;
rtc->dev = rtc_dev;
@@ -1351,7 +1355,7 @@ ds1685_rtc_remove(struct platform_device *pdev)
return 0;
}
-/**
+/*
* ds1685_rtc_driver - rtc driver properties.
*/
static struct platform_driver ds1685_rtc_driver = {
diff --git a/drivers/rtc/rtc-em3027.c b/drivers/rtc/rtc-em3027.c
index 77cca1392253..9f176bce48ba 100644
--- a/drivers/rtc/rtc-em3027.c
+++ b/drivers/rtc/rtc-em3027.c
@@ -71,7 +71,7 @@ static int em3027_get_time(struct device *dev, struct rtc_time *tm)
tm->tm_hour = bcd2bin(buf[2]);
tm->tm_mday = bcd2bin(buf[3]);
tm->tm_wday = bcd2bin(buf[4]);
- tm->tm_mon = bcd2bin(buf[5]);
+ tm->tm_mon = bcd2bin(buf[5]) - 1;
tm->tm_year = bcd2bin(buf[6]) + 100;
return 0;
@@ -94,7 +94,7 @@ static int em3027_set_time(struct device *dev, struct rtc_time *tm)
buf[3] = bin2bcd(tm->tm_hour);
buf[4] = bin2bcd(tm->tm_mday);
buf[5] = bin2bcd(tm->tm_wday);
- buf[6] = bin2bcd(tm->tm_mon);
+ buf[6] = bin2bcd(tm->tm_mon + 1);
buf[7] = bin2bcd(tm->tm_year % 100);
/* write time/date registers */
diff --git a/drivers/rtc/rtc-ep93xx.c b/drivers/rtc/rtc-ep93xx.c
index 1766496385fe..8ec9ea1ca72e 100644
--- a/drivers/rtc/rtc-ep93xx.c
+++ b/drivers/rtc/rtc-ep93xx.c
@@ -122,15 +122,13 @@ static const struct attribute_group ep93xx_rtc_sysfs_files = {
static int ep93xx_rtc_probe(struct platform_device *pdev)
{
struct ep93xx_rtc *ep93xx_rtc;
- struct resource *res;
int err;
ep93xx_rtc = devm_kzalloc(&pdev->dev, sizeof(*ep93xx_rtc), GFP_KERNEL);
if (!ep93xx_rtc)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ep93xx_rtc->mmio_base = devm_ioremap_resource(&pdev->dev, res);
+ ep93xx_rtc->mmio_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ep93xx_rtc->mmio_base))
return PTR_ERR(ep93xx_rtc->mmio_base);
diff --git a/drivers/rtc/rtc-fsl-ftm-alarm.c b/drivers/rtc/rtc-fsl-ftm-alarm.c
index 8df2075af9a2..9e6e994cce99 100644
--- a/drivers/rtc/rtc-fsl-ftm-alarm.c
+++ b/drivers/rtc/rtc-fsl-ftm-alarm.c
@@ -180,10 +180,7 @@ static int ftm_rtc_alarm_irq_enable(struct device *dev,
*/
static int ftm_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
- struct timespec64 ts64;
-
- ktime_get_real_ts64(&ts64);
- rtc_time_to_tm(ts64.tv_sec, tm);
+ rtc_time64_to_tm(ktime_get_real_seconds(), tm);
return 0;
}
@@ -206,16 +203,14 @@ static int ftm_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm)
*/
static int ftm_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
{
- struct rtc_time tm;
- unsigned long now, alm_time, cycle;
+ time64_t alm_time;
+ unsigned long long cycle;
struct ftm_rtc *rtc = dev_get_drvdata(dev);
- ftm_rtc_read_time(dev, &tm);
- rtc_tm_to_time(&tm, &now);
- rtc_tm_to_time(&alm->time, &alm_time);
+ alm_time = rtc_tm_to_time64(&alm->time);
ftm_clean_alarm(rtc);
- cycle = (alm_time - now) * rtc->alarm_freq;
+ cycle = (alm_time - ktime_get_real_seconds()) * rtc->alarm_freq;
if (cycle > MAX_COUNT_VAL) {
pr_err("Out of alarm range {0~262} seconds.\n");
return -ERANGE;
@@ -248,7 +243,6 @@ static const struct rtc_class_ops ftm_rtc_ops = {
static int ftm_rtc_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
- struct resource *r;
int irq;
int ret;
struct ftm_rtc *rtc;
@@ -265,13 +259,7 @@ static int ftm_rtc_probe(struct platform_device *pdev)
if (IS_ERR(rtc->rtc_dev))
return PTR_ERR(rtc->rtc_dev);
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!r) {
- dev_err(&pdev->dev, "cannot get resource for rtc\n");
- return -ENODEV;
- }
-
- rtc->base = devm_ioremap_resource(&pdev->dev, r);
+ rtc->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(rtc->base)) {
dev_err(&pdev->dev, "cannot ioremap resource for rtc\n");
return PTR_ERR(rtc->base);
diff --git a/drivers/rtc/rtc-goldfish.c b/drivers/rtc/rtc-goldfish.c
index 1a3420ee6a4d..cb6b0ad7ec3f 100644
--- a/drivers/rtc/rtc-goldfish.c
+++ b/drivers/rtc/rtc-goldfish.c
@@ -165,7 +165,6 @@ static const struct rtc_class_ops goldfish_rtc_ops = {
static int goldfish_rtc_probe(struct platform_device *pdev)
{
struct goldfish_rtc *rtcdrv;
- struct resource *r;
int err;
rtcdrv = devm_kzalloc(&pdev->dev, sizeof(*rtcdrv), GFP_KERNEL);
@@ -173,12 +172,7 @@ static int goldfish_rtc_probe(struct platform_device *pdev)
return -ENOMEM;
platform_set_drvdata(pdev, rtcdrv);
-
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!r)
- return -ENODEV;
-
- rtcdrv->base = devm_ioremap_resource(&pdev->dev, r);
+ rtcdrv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(rtcdrv->base))
return -ENODEV;
diff --git a/drivers/rtc/rtc-jz4740.c b/drivers/rtc/rtc-jz4740.c
index 3089645e0ce8..18023e472cbc 100644
--- a/drivers/rtc/rtc-jz4740.c
+++ b/drivers/rtc/rtc-jz4740.c
@@ -307,7 +307,6 @@ static int jz4740_rtc_probe(struct platform_device *pdev)
{
int ret;
struct jz4740_rtc *rtc;
- struct resource *mem;
const struct platform_device_id *id = platform_get_device_id(pdev);
const struct of_device_id *of_id = of_match_device(
jz4740_rtc_of_match, &pdev->dev);
@@ -326,8 +325,7 @@ static int jz4740_rtc_probe(struct platform_device *pdev)
if (rtc->irq < 0)
return -ENOENT;
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- rtc->base = devm_ioremap_resource(&pdev->dev, mem);
+ rtc->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(rtc->base))
return PTR_ERR(rtc->base);
diff --git a/drivers/rtc/rtc-lpc24xx.c b/drivers/rtc/rtc-lpc24xx.c
index a8bb15606ec8..00ef16ba9480 100644
--- a/drivers/rtc/rtc-lpc24xx.c
+++ b/drivers/rtc/rtc-lpc24xx.c
@@ -194,15 +194,13 @@ static const struct rtc_class_ops lpc24xx_rtc_ops = {
static int lpc24xx_rtc_probe(struct platform_device *pdev)
{
struct lpc24xx_rtc *rtc;
- struct resource *res;
int irq, ret;
rtc = devm_kzalloc(&pdev->dev, sizeof(*rtc), GFP_KERNEL);
if (!rtc)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- rtc->rtc_base = devm_ioremap_resource(&pdev->dev, res);
+ rtc->rtc_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(rtc->rtc_base))
return PTR_ERR(rtc->rtc_base);
diff --git a/drivers/rtc/rtc-lpc32xx.c b/drivers/rtc/rtc-lpc32xx.c
index ac393230e592..15d8abda81fe 100644
--- a/drivers/rtc/rtc-lpc32xx.c
+++ b/drivers/rtc/rtc-lpc32xx.c
@@ -185,7 +185,6 @@ static const struct rtc_class_ops lpc32xx_rtc_ops = {
static int lpc32xx_rtc_probe(struct platform_device *pdev)
{
- struct resource *res;
struct lpc32xx_rtc *rtc;
int err;
u32 tmp;
@@ -194,8 +193,7 @@ static int lpc32xx_rtc_probe(struct platform_device *pdev)
if (unlikely(!rtc))
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- rtc->rtc_base = devm_ioremap_resource(&pdev->dev, res);
+ rtc->rtc_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(rtc->rtc_base))
return PTR_ERR(rtc->rtc_base);
@@ -266,16 +264,6 @@ static int lpc32xx_rtc_probe(struct platform_device *pdev)
return 0;
}
-static int lpc32xx_rtc_remove(struct platform_device *pdev)
-{
- struct lpc32xx_rtc *rtc = platform_get_drvdata(pdev);
-
- if (rtc->irq >= 0)
- device_init_wakeup(&pdev->dev, 0);
-
- return 0;
-}
-
#ifdef CONFIG_PM
static int lpc32xx_rtc_suspend(struct device *dev)
{
@@ -357,7 +345,6 @@ MODULE_DEVICE_TABLE(of, lpc32xx_rtc_match);
static struct platform_driver lpc32xx_rtc_driver = {
.probe = lpc32xx_rtc_probe,
- .remove = lpc32xx_rtc_remove,
.driver = {
.name = "rtc-lpc32xx",
.pm = LPC32XX_RTC_PM_OPS,
diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c
index 5f46f85f814b..9b70b371bd0c 100644
--- a/drivers/rtc/rtc-m41t80.c
+++ b/drivers/rtc/rtc-m41t80.c
@@ -235,9 +235,6 @@ static int m41t80_rtc_set_time(struct device *dev, struct rtc_time *tm)
unsigned char buf[8];
int err, flags;
- if (tm->tm_year < 100 || tm->tm_year > 199)
- return -EINVAL;
-
buf[M41T80_REG_SSEC] = 0;
buf[M41T80_REG_SEC] = bin2bcd(tm->tm_sec);
buf[M41T80_REG_MIN] = bin2bcd(tm->tm_min);
@@ -705,7 +702,6 @@ static ssize_t wdt_read(struct file *file, char __user *buf,
/**
* wdt_ioctl:
- * @inode: inode of the device
* @file: file handle to the device
* @cmd: watchdog command
* @arg: argument pointer
@@ -840,6 +836,7 @@ static const struct file_operations wdt_fops = {
.owner = THIS_MODULE,
.read = wdt_read,
.unlocked_ioctl = wdt_unlocked_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.write = wdt_write,
.open = wdt_open,
.release = wdt_release,
@@ -925,6 +922,8 @@ static int m41t80_probe(struct i2c_client *client,
}
m41t80_data->rtc->ops = &m41t80_rtc_ops;
+ m41t80_data->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
+ m41t80_data->rtc->range_max = RTC_TIMESTAMP_END_2099;
if (client->irq <= 0) {
/* We cannot support UIE mode if we do not have an IRQ line */
diff --git a/drivers/rtc/rtc-m48t86.c b/drivers/rtc/rtc-m48t86.c
index 59b54ed9b841..75a0e73071d8 100644
--- a/drivers/rtc/rtc-m48t86.c
+++ b/drivers/rtc/rtc-m48t86.c
@@ -218,7 +218,6 @@ static bool m48t86_verify_chip(struct platform_device *pdev)
static int m48t86_rtc_probe(struct platform_device *pdev)
{
struct m48t86_rtc_info *info;
- struct resource *res;
unsigned char reg;
int err;
struct nvmem_config m48t86_nvmem_cfg = {
@@ -235,17 +234,11 @@ static int m48t86_rtc_probe(struct platform_device *pdev)
if (!info)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENODEV;
- info->index_reg = devm_ioremap_resource(&pdev->dev, res);
+ info->index_reg = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(info->index_reg))
return PTR_ERR(info->index_reg);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (!res)
- return -ENODEV;
- info->data_reg = devm_ioremap_resource(&pdev->dev, res);
+ info->data_reg = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(info->data_reg))
return PTR_ERR(info->data_reg);
diff --git a/drivers/rtc/rtc-mc146818-lib.c b/drivers/rtc/rtc-mc146818-lib.c
index 2ecd8752b088..df2829dd55ad 100644
--- a/drivers/rtc/rtc-mc146818-lib.c
+++ b/drivers/rtc/rtc-mc146818-lib.c
@@ -172,7 +172,20 @@ int mc146818_set_time(struct rtc_time *time)
save_control = CMOS_READ(RTC_CONTROL);
CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL);
save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
- CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT);
+
+#ifdef CONFIG_X86
+ if ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
+ boot_cpu_data.x86 == 0x17) ||
+ boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) {
+ CMOS_WRITE((save_freq_select & (~RTC_DIV_RESET2)),
+ RTC_FREQ_SELECT);
+ save_freq_select &= ~RTC_DIV_RESET2;
+ } else
+ CMOS_WRITE((save_freq_select | RTC_DIV_RESET2),
+ RTC_FREQ_SELECT);
+#else
+ CMOS_WRITE((save_freq_select | RTC_DIV_RESET2), RTC_FREQ_SELECT);
+#endif
#ifdef CONFIG_MACH_DECSTATION
CMOS_WRITE(real_yrs, RTC_DEC_YEAR);
diff --git a/drivers/rtc/rtc-meson.c b/drivers/rtc/rtc-meson.c
index e08b981dfc21..47ebcf834cc2 100644
--- a/drivers/rtc/rtc-meson.c
+++ b/drivers/rtc/rtc-meson.c
@@ -131,7 +131,7 @@ static u32 meson_rtc_get_data(struct meson_rtc *rtc)
static int meson_rtc_get_bus(struct meson_rtc *rtc)
{
- int ret, retries = 3;
+ int ret, retries;
u32 val;
/* prepare bus for transfers, set all lines low */
@@ -292,7 +292,6 @@ static int meson_rtc_probe(struct platform_device *pdev)
};
struct device *dev = &pdev->dev;
struct meson_rtc *rtc;
- struct resource *res;
void __iomem *base;
int ret;
u32 tm;
@@ -312,8 +311,7 @@ static int meson_rtc_probe(struct platform_device *pdev)
rtc->rtc->ops = &meson_rtc_ops;
rtc->rtc->range_max = U32_MAX;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/drivers/rtc/rtc-msm6242.c b/drivers/rtc/rtc-msm6242.c
index 1c2d3c4a4963..80e364baac53 100644
--- a/drivers/rtc/rtc-msm6242.c
+++ b/drivers/rtc/rtc-msm6242.c
@@ -88,28 +88,16 @@ static inline void msm6242_write(struct msm6242_priv *priv, unsigned int val,
__raw_writel(val, &priv->regs[reg]);
}
-static inline void msm6242_set(struct msm6242_priv *priv, unsigned int val,
- unsigned int reg)
-{
- msm6242_write(priv, msm6242_read(priv, reg) | val, reg);
-}
-
-static inline void msm6242_clear(struct msm6242_priv *priv, unsigned int val,
- unsigned int reg)
-{
- msm6242_write(priv, msm6242_read(priv, reg) & ~val, reg);
-}
-
static void msm6242_lock(struct msm6242_priv *priv)
{
int cnt = 5;
- msm6242_set(priv, MSM6242_CD_HOLD, MSM6242_CD);
+ msm6242_write(priv, MSM6242_CD_HOLD|MSM6242_CD_IRQ_FLAG, MSM6242_CD);
while ((msm6242_read(priv, MSM6242_CD) & MSM6242_CD_BUSY) && cnt) {
- msm6242_clear(priv, MSM6242_CD_HOLD, MSM6242_CD);
+ msm6242_write(priv, MSM6242_CD_IRQ_FLAG, MSM6242_CD);
udelay(70);
- msm6242_set(priv, MSM6242_CD_HOLD, MSM6242_CD);
+ msm6242_write(priv, MSM6242_CD_HOLD|MSM6242_CD_IRQ_FLAG, MSM6242_CD);
cnt--;
}
@@ -120,7 +108,7 @@ static void msm6242_lock(struct msm6242_priv *priv)
static void msm6242_unlock(struct msm6242_priv *priv)
{
- msm6242_clear(priv, MSM6242_CD_HOLD, MSM6242_CD);
+ msm6242_write(priv, MSM6242_CD_IRQ_FLAG, MSM6242_CD);
}
static int msm6242_read_time(struct device *dev, struct rtc_time *tm)
@@ -133,7 +121,8 @@ static int msm6242_read_time(struct device *dev, struct rtc_time *tm)
msm6242_read(priv, MSM6242_SECOND1);
tm->tm_min = msm6242_read(priv, MSM6242_MINUTE10) * 10 +
msm6242_read(priv, MSM6242_MINUTE1);
- tm->tm_hour = (msm6242_read(priv, MSM6242_HOUR10 & 3)) * 10 +
+ tm->tm_hour = (msm6242_read(priv, MSM6242_HOUR10) &
+ MSM6242_HOUR10_HR_MASK) * 10 +
msm6242_read(priv, MSM6242_HOUR1);
tm->tm_mday = msm6242_read(priv, MSM6242_DAY10) * 10 +
msm6242_read(priv, MSM6242_DAY1);
diff --git a/drivers/rtc/rtc-mt6397.c b/drivers/rtc/rtc-mt6397.c
index 704229eb0cac..5249fc99fd5f 100644
--- a/drivers/rtc/rtc-mt6397.c
+++ b/drivers/rtc/rtc-mt6397.c
@@ -4,69 +4,19 @@
* Author: Tianping.Fang <tianping.fang@mediatek.com>
*/
-#include <linux/delay.h>
-#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/mt6397/core.h>
#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/rtc.h>
-#include <linux/irqdomain.h>
-#include <linux/platform_device.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
-#include <linux/io.h>
-#include <linux/mfd/mt6397/core.h>
-
-#define RTC_BBPU 0x0000
-#define RTC_BBPU_CBUSY BIT(6)
-
-#define RTC_WRTGR 0x003c
-
-#define RTC_IRQ_STA 0x0002
-#define RTC_IRQ_STA_AL BIT(0)
-#define RTC_IRQ_STA_LP BIT(3)
-
-#define RTC_IRQ_EN 0x0004
-#define RTC_IRQ_EN_AL BIT(0)
-#define RTC_IRQ_EN_ONESHOT BIT(2)
-#define RTC_IRQ_EN_LP BIT(3)
-#define RTC_IRQ_EN_ONESHOT_AL (RTC_IRQ_EN_ONESHOT | RTC_IRQ_EN_AL)
-
-#define RTC_AL_MASK 0x0008
-#define RTC_AL_MASK_DOW BIT(4)
-
-#define RTC_TC_SEC 0x000a
-/* Min, Hour, Dom... register offset to RTC_TC_SEC */
-#define RTC_OFFSET_SEC 0
-#define RTC_OFFSET_MIN 1
-#define RTC_OFFSET_HOUR 2
-#define RTC_OFFSET_DOM 3
-#define RTC_OFFSET_DOW 4
-#define RTC_OFFSET_MTH 5
-#define RTC_OFFSET_YEAR 6
-#define RTC_OFFSET_COUNT 7
-
-#define RTC_AL_SEC 0x0018
-
-#define RTC_PDN2 0x002e
-#define RTC_PDN2_PWRON_ALARM BIT(4)
-
-#define RTC_MIN_YEAR 1968
-#define RTC_BASE_YEAR 1900
-#define RTC_NUM_YEARS 128
-#define RTC_MIN_YEAR_OFFSET (RTC_MIN_YEAR - RTC_BASE_YEAR)
-
-struct mt6397_rtc {
- struct device *dev;
- struct rtc_device *rtc_dev;
- struct mutex lock;
- struct regmap *regmap;
- int irq;
- u32 addr_base;
-};
+#include <linux/mfd/mt6397/rtc.h>
+#include <linux/mod_devicetable.h>
static int mtk_rtc_write_trigger(struct mt6397_rtc *rtc)
{
- unsigned long timeout = jiffies + HZ;
int ret;
u32 data;
@@ -74,19 +24,13 @@ static int mtk_rtc_write_trigger(struct mt6397_rtc *rtc)
if (ret < 0)
return ret;
- while (1) {
- ret = regmap_read(rtc->regmap, rtc->addr_base + RTC_BBPU,
- &data);
- if (ret < 0)
- break;
- if (!(data & RTC_BBPU_CBUSY))
- break;
- if (time_after(jiffies, timeout)) {
- ret = -ETIMEDOUT;
- break;
- }
- cpu_relax();
- }
+ ret = regmap_read_poll_timeout(rtc->regmap,
+ rtc->addr_base + RTC_BBPU, data,
+ !(data & RTC_BBPU_CBUSY),
+ MTK_RTC_POLL_DELAY_US,
+ MTK_RTC_POLL_TIMEOUT);
+ if (ret < 0)
+ dev_err(rtc->dev, "failed to write WRTGE: %d\n", ret);
return ret;
}
@@ -319,19 +263,19 @@ static int mtk_rtc_probe(struct platform_device *pdev)
return rtc->irq;
rtc->regmap = mt6397_chip->regmap;
- rtc->dev = &pdev->dev;
mutex_init(&rtc->lock);
platform_set_drvdata(pdev, rtc);
- rtc->rtc_dev = devm_rtc_allocate_device(rtc->dev);
+ rtc->rtc_dev = devm_rtc_allocate_device(&pdev->dev);
if (IS_ERR(rtc->rtc_dev))
return PTR_ERR(rtc->rtc_dev);
- ret = request_threaded_irq(rtc->irq, NULL,
- mtk_rtc_irq_handler_thread,
- IRQF_ONESHOT | IRQF_TRIGGER_HIGH,
- "mt6397-rtc", rtc);
+ ret = devm_request_threaded_irq(&pdev->dev, rtc->irq, NULL,
+ mtk_rtc_irq_handler_thread,
+ IRQF_ONESHOT | IRQF_TRIGGER_HIGH,
+ "mt6397-rtc", rtc);
+
if (ret) {
dev_err(&pdev->dev, "Failed to request alarm IRQ: %d: %d\n",
rtc->irq, ret);
@@ -353,15 +297,6 @@ out_free_irq:
return ret;
}
-static int mtk_rtc_remove(struct platform_device *pdev)
-{
- struct mt6397_rtc *rtc = platform_get_drvdata(pdev);
-
- free_irq(rtc->irq, rtc);
-
- return 0;
-}
-
#ifdef CONFIG_PM_SLEEP
static int mt6397_rtc_suspend(struct device *dev)
{
@@ -388,6 +323,7 @@ static SIMPLE_DEV_PM_OPS(mt6397_pm_ops, mt6397_rtc_suspend,
mt6397_rtc_resume);
static const struct of_device_id mt6397_rtc_of_match[] = {
+ { .compatible = "mediatek,mt6323-rtc", },
{ .compatible = "mediatek,mt6397-rtc", },
{ }
};
@@ -400,7 +336,6 @@ static struct platform_driver mtk_rtc_driver = {
.pm = &mt6397_pm_ops,
},
.probe = mtk_rtc_probe,
- .remove = mtk_rtc_remove,
};
module_platform_driver(mtk_rtc_driver);
diff --git a/drivers/rtc/rtc-mt7622.c b/drivers/rtc/rtc-mt7622.c
index 16bd26b5aa6f..f1e356394814 100644
--- a/drivers/rtc/rtc-mt7622.c
+++ b/drivers/rtc/rtc-mt7622.c
@@ -303,7 +303,6 @@ MODULE_DEVICE_TABLE(of, mtk_rtc_match);
static int mtk_rtc_probe(struct platform_device *pdev)
{
struct mtk_rtc *hw;
- struct resource *res;
int ret;
hw = devm_kzalloc(&pdev->dev, sizeof(*hw), GFP_KERNEL);
@@ -312,8 +311,7 @@ static int mtk_rtc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, hw);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- hw->base = devm_ioremap_resource(&pdev->dev, res);
+ hw->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(hw->base))
return PTR_ERR(hw->base);
diff --git a/drivers/rtc/rtc-mv.c b/drivers/rtc/rtc-mv.c
index ab9db57a6834..d5f190e578e4 100644
--- a/drivers/rtc/rtc-mv.c
+++ b/drivers/rtc/rtc-mv.c
@@ -212,7 +212,6 @@ static const struct rtc_class_ops mv_rtc_alarm_ops = {
static int __init mv_rtc_probe(struct platform_device *pdev)
{
- struct resource *res;
struct rtc_plat_data *pdata;
u32 rtc_time;
int ret = 0;
@@ -221,8 +220,7 @@ static int __init mv_rtc_probe(struct platform_device *pdev)
if (!pdata)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pdata->ioaddr = devm_ioremap_resource(&pdev->dev, res);
+ pdata->ioaddr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pdata->ioaddr))
return PTR_ERR(pdata->ioaddr);
diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c
index a2941c875a06..988a4dfcfaf8 100644
--- a/drivers/rtc/rtc-omap.c
+++ b/drivers/rtc/rtc-omap.c
@@ -727,7 +727,6 @@ static struct nvmem_config omap_rtc_nvmem_config = {
static int omap_rtc_probe(struct platform_device *pdev)
{
struct omap_rtc *rtc;
- struct resource *res;
u8 reg, mask, new_ctrl;
const struct platform_device_id *id_entry;
const struct of_device_id *of_id;
@@ -764,8 +763,7 @@ static int omap_rtc_probe(struct platform_device *pdev)
if (!IS_ERR(rtc->clk))
clk_prepare_enable(rtc->clk);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- rtc->base = devm_ioremap_resource(&pdev->dev, res);
+ rtc->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(rtc->base)) {
clk_disable_unprepare(rtc->clk);
return PTR_ERR(rtc->base);
diff --git a/drivers/rtc/rtc-pcf2127.c b/drivers/rtc/rtc-pcf2127.c
index 02b069caffd5..ba5baaca47be 100644
--- a/drivers/rtc/rtc-pcf2127.c
+++ b/drivers/rtc/rtc-pcf2127.c
@@ -417,6 +417,7 @@ static int pcf2127_probe(struct device *dev, struct regmap *regmap,
const char *name, bool has_nvmem)
{
struct pcf2127 *pcf2127;
+ u32 wdd_timeout;
int ret = 0;
dev_dbg(dev, "%s\n", __func__);
@@ -459,7 +460,6 @@ static int pcf2127_probe(struct device *dev, struct regmap *regmap,
/*
* Watchdog timer enabled and reset pin /RST activated when timed out.
* Select 1Hz clock source for watchdog timer.
- * Timer is not started until WD_VAL is loaded with a valid value.
* Note: Countdown timer disabled and not available.
*/
ret = regmap_update_bits(pcf2127->regmap, PCF2127_REG_WD_CTL,
@@ -475,6 +475,14 @@ static int pcf2127_probe(struct device *dev, struct regmap *regmap,
return ret;
}
+ /* Test if watchdog timer is started by bootloader */
+ ret = regmap_read(pcf2127->regmap, PCF2127_REG_WD_VAL, &wdd_timeout);
+ if (ret)
+ return ret;
+
+ if (wdd_timeout)
+ set_bit(WDOG_HW_RUNNING, &pcf2127->wdd.status);
+
#ifdef CONFIG_WATCHDOG
ret = devm_watchdog_register_device(dev, &pcf2127->wdd);
if (ret)
diff --git a/drivers/rtc/rtc-pcf8523.c b/drivers/rtc/rtc-pcf8523.c
index 2f435e533b10..b24c908f5f06 100644
--- a/drivers/rtc/rtc-pcf8523.c
+++ b/drivers/rtc/rtc-pcf8523.c
@@ -35,10 +35,6 @@
#define REG_OFFSET 0x0e
#define REG_OFFSET_MODE BIT(7)
-struct pcf8523 {
- struct rtc_device *rtc;
-};
-
static int pcf8523_read(struct i2c_client *client, u8 reg, u8 *valuep)
{
struct i2c_msg msgs[2];
@@ -345,16 +341,12 @@ static const struct rtc_class_ops pcf8523_rtc_ops = {
static int pcf8523_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- struct pcf8523 *pcf;
+ struct rtc_device *rtc;
int err;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
return -ENODEV;
- pcf = devm_kzalloc(&client->dev, sizeof(*pcf), GFP_KERNEL);
- if (!pcf)
- return -ENOMEM;
-
err = pcf8523_load_capacitance(client);
if (err < 0)
dev_warn(&client->dev, "failed to set xtal load capacitance: %d",
@@ -364,12 +356,10 @@ static int pcf8523_probe(struct i2c_client *client,
if (err < 0)
return err;
- pcf->rtc = devm_rtc_device_register(&client->dev, DRIVER_NAME,
+ rtc = devm_rtc_device_register(&client->dev, DRIVER_NAME,
&pcf8523_rtc_ops, THIS_MODULE);
- if (IS_ERR(pcf->rtc))
- return PTR_ERR(pcf->rtc);
-
- i2c_set_clientdata(client, pcf);
+ if (IS_ERR(rtc))
+ return PTR_ERR(rtc);
return 0;
}
diff --git a/drivers/rtc/rtc-pcf8563.c b/drivers/rtc/rtc-pcf8563.c
index 24baa4767b11..3c322f3079b0 100644
--- a/drivers/rtc/rtc-pcf8563.c
+++ b/drivers/rtc/rtc-pcf8563.c
@@ -390,7 +390,7 @@ static int pcf8563_irq_enable(struct device *dev, unsigned int enabled)
#define clkout_hw_to_pcf8563(_hw) container_of(_hw, struct pcf8563, clkout_hw)
-static int clkout_rates[] = {
+static const int clkout_rates[] = {
32768,
1024,
32,
diff --git a/drivers/rtc/rtc-pic32.c b/drivers/rtc/rtc-pic32.c
index 17653ed52ebb..2b6946744654 100644
--- a/drivers/rtc/rtc-pic32.c
+++ b/drivers/rtc/rtc-pic32.c
@@ -298,7 +298,6 @@ static int pic32_rtc_remove(struct platform_device *pdev)
static int pic32_rtc_probe(struct platform_device *pdev)
{
struct pic32_rtc_dev *pdata;
- struct resource *res;
int ret;
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
@@ -311,8 +310,7 @@ static int pic32_rtc_probe(struct platform_device *pdev)
if (pdata->alarm_irq < 0)
return pdata->alarm_irq;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pdata->reg_base = devm_ioremap_resource(&pdev->dev, res);
+ pdata->reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pdata->reg_base))
return PTR_ERR(pdata->reg_base);
diff --git a/drivers/rtc/rtc-pm8xxx.c b/drivers/rtc/rtc-pm8xxx.c
index f5a30e0f16c2..07ea1be3abb9 100644
--- a/drivers/rtc/rtc-pm8xxx.c
+++ b/drivers/rtc/rtc-pm8xxx.c
@@ -49,7 +49,7 @@ struct pm8xxx_rtc_regs {
* @regmap: regmap used to access RTC registers
* @allow_set_time: indicates whether writing to the RTC is allowed
* @rtc_alarm_irq: rtc alarm irq number.
- * @ctrl_reg: rtc control register.
+ * @regs: rtc registers description.
* @rtc_dev: device structure.
* @ctrl_reg_lock: spinlock protecting access to ctrl_reg.
*/
diff --git a/drivers/rtc/rtc-r7301.c b/drivers/rtc/rtc-r7301.c
index 2498278853af..aaf1b95e3990 100644
--- a/drivers/rtc/rtc-r7301.c
+++ b/drivers/rtc/rtc-r7301.c
@@ -354,21 +354,16 @@ static void rtc7301_init(struct rtc7301_priv *priv)
static int __init rtc7301_rtc_probe(struct platform_device *dev)
{
- struct resource *res;
void __iomem *regs;
struct rtc7301_priv *priv;
struct rtc_device *rtc;
int ret;
- res = platform_get_resource(dev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENODEV;
-
priv = devm_kzalloc(&dev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
- regs = devm_ioremap_resource(&dev->dev, res);
+ regs = devm_platform_ioremap_resource(dev, 0);
if (IS_ERR(regs))
return PTR_ERR(regs);
diff --git a/drivers/rtc/rtc-rtd119x.c b/drivers/rtc/rtc-rtd119x.c
index b233559d950b..bb98f2d574a5 100644
--- a/drivers/rtc/rtc-rtd119x.c
+++ b/drivers/rtc/rtc-rtd119x.c
@@ -167,7 +167,6 @@ static const struct of_device_id rtd119x_rtc_dt_ids[] = {
static int rtd119x_rtc_probe(struct platform_device *pdev)
{
struct rtd119x_rtc *data;
- struct resource *res;
u32 val;
int ret;
@@ -178,8 +177,7 @@ static int rtd119x_rtc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, data);
data->base_year = 2014;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- data->base = devm_ioremap_resource(&pdev->dev, res);
+ data->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(data->base))
return PTR_ERR(data->base);
diff --git a/drivers/rtc/rtc-rv3028.c b/drivers/rtc/rtc-rv3028.c
index 2b316661a578..6b7b3a69601a 100644
--- a/drivers/rtc/rtc-rv3028.c
+++ b/drivers/rtc/rtc-rv3028.c
@@ -8,6 +8,7 @@
*
*/
+#include <linux/clk-provider.h>
#include <linux/bcd.h>
#include <linux/bitops.h>
#include <linux/i2c.h>
@@ -52,6 +53,11 @@
#define RV3028_STATUS_CLKF BIT(6)
#define RV3028_STATUS_EEBUSY BIT(7)
+#define RV3028_CLKOUT_FD_MASK GENMASK(2, 0)
+#define RV3028_CLKOUT_PORIE BIT(3)
+#define RV3028_CLKOUT_CLKSY BIT(6)
+#define RV3028_CLKOUT_CLKOE BIT(7)
+
#define RV3028_CTRL1_EERD BIT(3)
#define RV3028_CTRL1_WADA BIT(5)
@@ -84,6 +90,9 @@ struct rv3028_data {
struct regmap *regmap;
struct rtc_device *rtc;
enum rv3028_type type;
+#ifdef CONFIG_COMMON_CLK
+ struct clk_hw clkout_hw;
+#endif
};
static u16 rv3028_trickle_resistors[] = {1000, 3000, 6000, 11000};
@@ -581,6 +590,140 @@ restore_eerd:
return ret;
}
+#ifdef CONFIG_COMMON_CLK
+#define clkout_hw_to_rv3028(hw) container_of(hw, struct rv3028_data, clkout_hw)
+
+static int clkout_rates[] = {
+ 32768,
+ 8192,
+ 1024,
+ 64,
+ 32,
+ 1,
+};
+
+static unsigned long rv3028_clkout_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ int clkout, ret;
+ struct rv3028_data *rv3028 = clkout_hw_to_rv3028(hw);
+
+ ret = regmap_read(rv3028->regmap, RV3028_CLKOUT, &clkout);
+ if (ret < 0)
+ return 0;
+
+ clkout &= RV3028_CLKOUT_FD_MASK;
+ return clkout_rates[clkout];
+}
+
+static long rv3028_clkout_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(clkout_rates); i++)
+ if (clkout_rates[i] <= rate)
+ return clkout_rates[i];
+
+ return 0;
+}
+
+static int rv3028_clkout_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ int i, ret;
+ struct rv3028_data *rv3028 = clkout_hw_to_rv3028(hw);
+
+ ret = regmap_write(rv3028->regmap, RV3028_CLKOUT, 0x0);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < ARRAY_SIZE(clkout_rates); i++) {
+ if (clkout_rates[i] == rate) {
+ ret = regmap_update_bits(rv3028->regmap,
+ RV3028_CLKOUT,
+ RV3028_CLKOUT_FD_MASK, i);
+ if (ret < 0)
+ return ret;
+
+ return regmap_write(rv3028->regmap, RV3028_CLKOUT,
+ RV3028_CLKOUT_CLKSY | RV3028_CLKOUT_CLKOE);
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int rv3028_clkout_prepare(struct clk_hw *hw)
+{
+ struct rv3028_data *rv3028 = clkout_hw_to_rv3028(hw);
+
+ return regmap_write(rv3028->regmap, RV3028_CLKOUT,
+ RV3028_CLKOUT_CLKSY | RV3028_CLKOUT_CLKOE);
+}
+
+static void rv3028_clkout_unprepare(struct clk_hw *hw)
+{
+ struct rv3028_data *rv3028 = clkout_hw_to_rv3028(hw);
+
+ regmap_write(rv3028->regmap, RV3028_CLKOUT, 0x0);
+ regmap_update_bits(rv3028->regmap, RV3028_STATUS,
+ RV3028_STATUS_CLKF, 0);
+}
+
+static int rv3028_clkout_is_prepared(struct clk_hw *hw)
+{
+ int clkout, ret;
+ struct rv3028_data *rv3028 = clkout_hw_to_rv3028(hw);
+
+ ret = regmap_read(rv3028->regmap, RV3028_CLKOUT, &clkout);
+ if (ret < 0)
+ return ret;
+
+ return !!(clkout & RV3028_CLKOUT_CLKOE);
+}
+
+static const struct clk_ops rv3028_clkout_ops = {
+ .prepare = rv3028_clkout_prepare,
+ .unprepare = rv3028_clkout_unprepare,
+ .is_prepared = rv3028_clkout_is_prepared,
+ .recalc_rate = rv3028_clkout_recalc_rate,
+ .round_rate = rv3028_clkout_round_rate,
+ .set_rate = rv3028_clkout_set_rate,
+};
+
+static int rv3028_clkout_register_clk(struct rv3028_data *rv3028,
+ struct i2c_client *client)
+{
+ int ret;
+ struct clk *clk;
+ struct clk_init_data init;
+ struct device_node *node = client->dev.of_node;
+
+ ret = regmap_update_bits(rv3028->regmap, RV3028_STATUS,
+ RV3028_STATUS_CLKF, 0);
+ if (ret < 0)
+ return ret;
+
+ init.name = "rv3028-clkout";
+ init.ops = &rv3028_clkout_ops;
+ init.flags = 0;
+ init.parent_names = NULL;
+ init.num_parents = 0;
+ rv3028->clkout_hw.init = &init;
+
+ /* optional override of the clockname */
+ of_property_read_string(node, "clock-output-names", &init.name);
+
+ /* register the clock */
+ clk = devm_clk_register(&client->dev, &rv3028->clkout_hw);
+ if (!IS_ERR(clk))
+ of_clk_add_provider(node, of_clk_src_simple_get, clk);
+
+ return 0;
+}
+#endif
+
static struct rtc_class_ops rv3028_rtc_ops = {
.read_time = rv3028_get_time,
.set_time = rv3028_set_time,
@@ -708,6 +851,9 @@ static int rv3028_probe(struct i2c_client *client)
rv3028->rtc->max_user_freq = 1;
+#ifdef CONFIG_COMMON_CLK
+ rv3028_clkout_register_clk(rv3028, client);
+#endif
return 0;
}
diff --git a/drivers/rtc/rtc-rx6110.c b/drivers/rtc/rtc-rx6110.c
index 71e20a6bd387..3a9eb7043f01 100644
--- a/drivers/rtc/rtc-rx6110.c
+++ b/drivers/rtc/rtc-rx6110.c
@@ -1,17 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Driver for the Epson RTC module RX-6110 SA
*
* Copyright(C) 2015 Pengutronix, Steffen Trumtrar <kernel@pengutronix.de>
* Copyright(C) SEIKO EPSON CORPORATION 2013. All rights reserved.
- *
- * This driver software is distributed as is, without any warranty of any kind,
- * either express or implied as further specified in the GNU Public License.
- * This software may be used and distributed according to the terms of the GNU
- * Public License, version 2 as published by the Free Software Foundation.
- * See the file COPYING in the main directory of this archive for more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/bcd.h>
@@ -370,11 +362,6 @@ static int rx6110_probe(struct spi_device *spi)
return 0;
}
-static int rx6110_remove(struct spi_device *spi)
-{
- return 0;
-}
-
static const struct spi_device_id rx6110_id[] = {
{ "rx6110", 0 },
{ }
@@ -393,7 +380,6 @@ static struct spi_driver rx6110_driver = {
.of_match_table = of_match_ptr(rx6110_spi_of_match),
},
.probe = rx6110_probe,
- .remove = rx6110_remove,
.id_table = rx6110_id,
};
diff --git a/drivers/rtc/rtc-s35390a.c b/drivers/rtc/rtc-s35390a.c
index da34cfd70f95..03672a246356 100644
--- a/drivers/rtc/rtc-s35390a.c
+++ b/drivers/rtc/rtc-s35390a.c
@@ -423,8 +423,6 @@ static const struct rtc_class_ops s35390a_rtc_ops = {
.ioctl = s35390a_rtc_ioctl,
};
-static struct i2c_driver s35390a_driver;
-
static int s35390a_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -456,6 +454,10 @@ static int s35390a_probe(struct i2c_client *client,
}
}
+ s35390a->rtc = devm_rtc_allocate_device(dev);
+ if (IS_ERR(s35390a->rtc))
+ return PTR_ERR(s35390a->rtc);
+
err_read = s35390a_read_status(s35390a, &status1);
if (err_read < 0) {
dev_err(dev, "error resetting chip\n");
@@ -485,11 +487,9 @@ static int s35390a_probe(struct i2c_client *client,
device_set_wakeup_capable(dev, 1);
- s35390a->rtc = devm_rtc_device_register(dev, s35390a_driver.driver.name,
- &s35390a_rtc_ops, THIS_MODULE);
-
- if (IS_ERR(s35390a->rtc))
- return PTR_ERR(s35390a->rtc);
+ s35390a->rtc->ops = &s35390a_rtc_ops;
+ s35390a->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
+ s35390a->rtc->range_max = RTC_TIMESTAMP_END_2099;
/* supports per-minute alarms only, therefore set uie_unsupported */
s35390a->rtc->uie_unsupported = 1;
@@ -497,7 +497,7 @@ static int s35390a_probe(struct i2c_client *client,
if (status1 & S35390A_FLAG_INT2)
rtc_update_irq(s35390a->rtc, 1, RTC_AF);
- return 0;
+ return rtc_register_device(s35390a->rtc);
}
static struct i2c_driver s35390a_driver = {
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
index 7801249c254b..e1b50e682fc4 100644
--- a/drivers/rtc/rtc-s3c.c
+++ b/drivers/rtc/rtc-s3c.c
@@ -444,7 +444,6 @@ static int s3c_rtc_probe(struct platform_device *pdev)
{
struct s3c_rtc *info = NULL;
struct rtc_time rtc_tm;
- struct resource *res;
int ret;
info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
@@ -475,8 +474,7 @@ static int s3c_rtc_probe(struct platform_device *pdev)
info->irq_tick, info->irq_alarm);
/* get the memory region */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- info->base = devm_ioremap_resource(&pdev->dev, res);
+ info->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(info->base))
return PTR_ERR(info->base);
diff --git a/drivers/rtc/rtc-sa1100.c b/drivers/rtc/rtc-sa1100.c
index 86fa723b3b76..d37893f6eaee 100644
--- a/drivers/rtc/rtc-sa1100.c
+++ b/drivers/rtc/rtc-sa1100.c
@@ -252,7 +252,6 @@ EXPORT_SYMBOL_GPL(sa1100_rtc_init);
static int sa1100_rtc_probe(struct platform_device *pdev)
{
struct sa1100_rtc *info;
- struct resource *iores;
void __iomem *base;
int irq_1hz, irq_alarm;
int ret;
@@ -281,8 +280,7 @@ static int sa1100_rtc_probe(struct platform_device *pdev)
return ret;
}
- iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, iores);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/drivers/rtc/rtc-sc27xx.c b/drivers/rtc/rtc-sc27xx.c
index b95676899750..36810dd40cd3 100644
--- a/drivers/rtc/rtc-sc27xx.c
+++ b/drivers/rtc/rtc-sc27xx.c
@@ -661,12 +661,6 @@ static int sprd_rtc_probe(struct platform_device *pdev)
return 0;
}
-static int sprd_rtc_remove(struct platform_device *pdev)
-{
- device_init_wakeup(&pdev->dev, 0);
- return 0;
-}
-
static const struct of_device_id sprd_rtc_of_match[] = {
{ .compatible = "sprd,sc2731-rtc", },
{ },
@@ -679,7 +673,6 @@ static struct platform_driver sprd_rtc_driver = {
.of_match_table = sprd_rtc_of_match,
},
.probe = sprd_rtc_probe,
- .remove = sprd_rtc_remove,
};
module_platform_driver(sprd_rtc_driver);
diff --git a/drivers/rtc/rtc-sirfsoc.c b/drivers/rtc/rtc-sirfsoc.c
index c759c55359a1..a2c9c55667cd 100644
--- a/drivers/rtc/rtc-sirfsoc.c
+++ b/drivers/rtc/rtc-sirfsoc.c
@@ -365,13 +365,6 @@ static int sirfsoc_rtc_probe(struct platform_device *pdev)
return 0;
}
-static int sirfsoc_rtc_remove(struct platform_device *pdev)
-{
- device_init_wakeup(&pdev->dev, 0);
-
- return 0;
-}
-
#ifdef CONFIG_PM_SLEEP
static int sirfsoc_rtc_suspend(struct device *dev)
{
@@ -450,7 +443,6 @@ static struct platform_driver sirfsoc_rtc_driver = {
.of_match_table = sirfsoc_rtc_of_match,
},
.probe = sirfsoc_rtc_probe,
- .remove = sirfsoc_rtc_remove,
};
module_platform_driver(sirfsoc_rtc_driver);
diff --git a/drivers/rtc/rtc-spear.c b/drivers/rtc/rtc-spear.c
index 9f23b24f466c..833daeb7b60e 100644
--- a/drivers/rtc/rtc-spear.c
+++ b/drivers/rtc/rtc-spear.c
@@ -347,7 +347,6 @@ static const struct rtc_class_ops spear_rtc_ops = {
static int spear_rtc_probe(struct platform_device *pdev)
{
- struct resource *res;
struct spear_rtc_config *config;
int status = 0;
int irq;
@@ -369,8 +368,7 @@ static int spear_rtc_probe(struct platform_device *pdev)
return status;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- config->ioaddr = devm_ioremap_resource(&pdev->dev, res);
+ config->ioaddr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(config->ioaddr))
return PTR_ERR(config->ioaddr);
diff --git a/drivers/rtc/rtc-st-lpc.c b/drivers/rtc/rtc-st-lpc.c
index 49474a31c66d..51041dc08af4 100644
--- a/drivers/rtc/rtc-st-lpc.c
+++ b/drivers/rtc/rtc-st-lpc.c
@@ -41,7 +41,6 @@
struct st_rtc {
struct rtc_device *rtc_dev;
struct rtc_wkalrm alarm;
- struct resource *res;
struct clk *clk;
unsigned long clkrate;
void __iomem *ioaddr;
@@ -186,7 +185,6 @@ static int st_rtc_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct st_rtc *rtc;
- struct resource *res;
uint32_t mode;
int ret = 0;
@@ -210,8 +208,7 @@ static int st_rtc_probe(struct platform_device *pdev)
spin_lock_init(&rtc->lock);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- rtc->ioaddr = devm_ioremap_resource(&pdev->dev, res);
+ rtc->ioaddr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(rtc->ioaddr))
return PTR_ERR(rtc->ioaddr);
diff --git a/drivers/rtc/rtc-stk17ta8.c b/drivers/rtc/rtc-stk17ta8.c
index a833ebc4ecb9..01a45044f468 100644
--- a/drivers/rtc/rtc-stk17ta8.c
+++ b/drivers/rtc/rtc-stk17ta8.c
@@ -256,7 +256,6 @@ static int stk17ta8_nvram_write(void *priv, unsigned int pos, void *val,
static int stk17ta8_rtc_probe(struct platform_device *pdev)
{
- struct resource *res;
unsigned int cal;
unsigned int flags;
struct rtc_plat_data *pdata;
@@ -275,8 +274,7 @@ static int stk17ta8_rtc_probe(struct platform_device *pdev)
if (!pdata)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ioaddr = devm_ioremap_resource(&pdev->dev, res);
+ ioaddr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ioaddr))
return PTR_ERR(ioaddr);
pdata->ioaddr = ioaddr;
diff --git a/drivers/rtc/rtc-stm32.c b/drivers/rtc/rtc-stm32.c
index 2999e33a7e37..781cabb2afca 100644
--- a/drivers/rtc/rtc-stm32.c
+++ b/drivers/rtc/rtc-stm32.c
@@ -693,15 +693,13 @@ static int stm32_rtc_probe(struct platform_device *pdev)
{
struct stm32_rtc *rtc;
const struct stm32_rtc_registers *regs;
- struct resource *res;
int ret;
rtc = devm_kzalloc(&pdev->dev, sizeof(*rtc), GFP_KERNEL);
if (!rtc)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- rtc->base = devm_ioremap_resource(&pdev->dev, res);
+ rtc->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(rtc->base))
return PTR_ERR(rtc->base);
diff --git a/drivers/rtc/rtc-sun6i.c b/drivers/rtc/rtc-sun6i.c
index 5e2bd9f1d01e..8dcd20b34dde 100644
--- a/drivers/rtc/rtc-sun6i.c
+++ b/drivers/rtc/rtc-sun6i.c
@@ -136,7 +136,6 @@ struct sun6i_rtc_clk_data {
struct sun6i_rtc_dev {
struct rtc_device *rtc;
- struct device *dev;
const struct sun6i_rtc_clk_data *data;
void __iomem *base;
int irq;
@@ -669,7 +668,6 @@ static int sun6i_rtc_probe(struct platform_device *pdev)
return -ENODEV;
platform_set_drvdata(pdev, chip);
- chip->dev = &pdev->dev;
chip->irq = platform_get_irq(pdev, 0);
if (chip->irq < 0)
diff --git a/drivers/rtc/rtc-sunxi.c b/drivers/rtc/rtc-sunxi.c
index 9b6f2483c1c6..f5d7f44550ce 100644
--- a/drivers/rtc/rtc-sunxi.c
+++ b/drivers/rtc/rtc-sunxi.c
@@ -422,7 +422,6 @@ MODULE_DEVICE_TABLE(of, sunxi_rtc_dt_ids);
static int sunxi_rtc_probe(struct platform_device *pdev)
{
struct sunxi_rtc_dev *chip;
- struct resource *res;
int ret;
chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
@@ -436,8 +435,7 @@ static int sunxi_rtc_probe(struct platform_device *pdev)
if (IS_ERR(chip->rtc))
return PTR_ERR(chip->rtc);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- chip->base = devm_ioremap_resource(&pdev->dev, res);
+ chip->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(chip->base))
return PTR_ERR(chip->base);
diff --git a/drivers/rtc/rtc-tegra.c b/drivers/rtc/rtc-tegra.c
index 69d695bf9500..7fbb1741692f 100644
--- a/drivers/rtc/rtc-tegra.c
+++ b/drivers/rtc/rtc-tegra.c
@@ -103,7 +103,7 @@ static int tegra_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
struct tegra_rtc_info *info = dev_get_drvdata(dev);
unsigned long flags;
- u32 sec, msec;
+ u32 sec;
/*
* RTC hardware copies seconds to shadow seconds when a read of
@@ -111,7 +111,7 @@ static int tegra_rtc_read_time(struct device *dev, struct rtc_time *tm)
*/
spin_lock_irqsave(&info->lock, flags);
- msec = readl(info->base + TEGRA_RTC_REG_MILLI_SECONDS);
+ readl(info->base + TEGRA_RTC_REG_MILLI_SECONDS);
sec = readl(info->base + TEGRA_RTC_REG_SHADOW_SECONDS);
spin_unlock_irqrestore(&info->lock, flags);
@@ -277,15 +277,13 @@ MODULE_DEVICE_TABLE(of, tegra_rtc_dt_match);
static int tegra_rtc_probe(struct platform_device *pdev)
{
struct tegra_rtc_info *info;
- struct resource *res;
int ret;
info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- info->base = devm_ioremap_resource(&pdev->dev, res);
+ info->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(info->base))
return PTR_ERR(info->base);
diff --git a/drivers/rtc/rtc-tps65910.c b/drivers/rtc/rtc-tps65910.c
index 2c0467a9e717..e3840386f430 100644
--- a/drivers/rtc/rtc-tps65910.c
+++ b/drivers/rtc/rtc-tps65910.c
@@ -361,6 +361,13 @@ static const struct rtc_class_ops tps65910_rtc_ops = {
.set_offset = tps65910_set_offset,
};
+static const struct rtc_class_ops tps65910_rtc_ops_noirq = {
+ .read_time = tps65910_rtc_read_time,
+ .set_time = tps65910_rtc_set_time,
+ .read_offset = tps65910_read_offset,
+ .set_offset = tps65910_set_offset,
+};
+
static int tps65910_rtc_probe(struct platform_device *pdev)
{
struct tps65910 *tps65910 = NULL;
@@ -414,14 +421,16 @@ static int tps65910_rtc_probe(struct platform_device *pdev)
ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
tps65910_rtc_interrupt, IRQF_TRIGGER_LOW,
dev_name(&pdev->dev), &pdev->dev);
- if (ret < 0) {
- dev_err(&pdev->dev, "IRQ is not free.\n");
- return ret;
- }
+ if (ret < 0)
+ irq = -1;
+
tps_rtc->irq = irq;
- device_set_wakeup_capable(&pdev->dev, 1);
+ if (irq != -1) {
+ device_set_wakeup_capable(&pdev->dev, 1);
+ tps_rtc->rtc->ops = &tps65910_rtc_ops;
+ } else
+ tps_rtc->rtc->ops = &tps65910_rtc_ops_noirq;
- tps_rtc->rtc->ops = &tps65910_rtc_ops;
tps_rtc->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
tps_rtc->rtc->range_max = RTC_TIMESTAMP_END_2099;
diff --git a/drivers/rtc/rtc-tx4939.c b/drivers/rtc/rtc-tx4939.c
index 5a29915a06ec..715b82981279 100644
--- a/drivers/rtc/rtc-tx4939.c
+++ b/drivers/rtc/rtc-tx4939.c
@@ -236,7 +236,6 @@ static int __init tx4939_rtc_probe(struct platform_device *pdev)
{
struct rtc_device *rtc;
struct tx4939rtc_plat_data *pdata;
- struct resource *res;
int irq, ret;
struct nvmem_config nvmem_cfg = {
.name = "tx4939_nvram",
@@ -253,8 +252,7 @@ static int __init tx4939_rtc_probe(struct platform_device *pdev)
return -ENOMEM;
platform_set_drvdata(pdev, pdata);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pdata->rtcreg = devm_ioremap_resource(&pdev->dev, res);
+ pdata->rtcreg = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pdata->rtcreg))
return PTR_ERR(pdata->rtcreg);
diff --git a/drivers/rtc/rtc-v3020.c b/drivers/rtc/rtc-v3020.c
index 63ffba21397b..d2da92187d56 100644
--- a/drivers/rtc/rtc-v3020.c
+++ b/drivers/rtc/rtc-v3020.c
@@ -284,7 +284,6 @@ static int rtc_probe(struct platform_device *pdev)
struct v3020 *chip;
int retval = -EBUSY;
int i;
- int temp;
chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
if (!chip)
@@ -302,7 +301,7 @@ static int rtc_probe(struct platform_device *pdev)
/* Make sure the v3020 expects a communication cycle
* by reading 8 times */
for (i = 0; i < 8; i++)
- temp = chip->ops->read_bit(chip);
+ chip->ops->read_bit(chip);
/* Test chip by doing a write/read sequence
* to the chip ram */
diff --git a/drivers/rtc/rtc-vr41xx.c b/drivers/rtc/rtc-vr41xx.c
index c75230562c0d..c3671043ace7 100644
--- a/drivers/rtc/rtc-vr41xx.c
+++ b/drivers/rtc/rtc-vr41xx.c
@@ -4,6 +4,7 @@
*
* Copyright (C) 2003-2008 Yoichi Yuasa <yuasa@linux-mips.org>
*/
+#include <linux/compat.h>
#include <linux/err.h>
#include <linux/fs.h>
#include <linux/init.h>
@@ -66,6 +67,9 @@ static void __iomem *rtc2_base;
#define rtc2_read(offset) readw(rtc2_base + (offset))
#define rtc2_write(offset, value) writew((value), rtc2_base + (offset))
+/* 32-bit compat for ioctls that nobody else uses */
+#define RTC_EPOCH_READ32 _IOR('p', 0x0d, __u32)
+
static unsigned long epoch = 1970; /* Jan 1 1970 00:00:00 */
static DEFINE_SPINLOCK(rtc_lock);
@@ -179,6 +183,10 @@ static int vr41xx_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long
switch (cmd) {
case RTC_EPOCH_READ:
return put_user(epoch, (unsigned long __user *)arg);
+#ifdef CONFIG_64BIT
+ case RTC_EPOCH_READ32:
+ return put_user(epoch, (unsigned int __user *)arg);
+#endif
case RTC_EPOCH_SET:
/* Doesn't support before 1900 */
if (arg < 1900)
diff --git a/drivers/rtc/rtc-vt8500.c b/drivers/rtc/rtc-vt8500.c
index d5d14cf86e0d..e2588625025f 100644
--- a/drivers/rtc/rtc-vt8500.c
+++ b/drivers/rtc/rtc-vt8500.c
@@ -122,12 +122,6 @@ static int vt8500_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
struct vt8500_rtc *vt8500_rtc = dev_get_drvdata(dev);
- if (tm->tm_year < 100) {
- dev_warn(dev, "Only years 2000-2199 are supported by the "
- "hardware!\n");
- return -EINVAL;
- }
-
writel((bin2bcd(tm->tm_year % 100) << DATE_YEAR_S)
| (bin2bcd(tm->tm_mon + 1) << DATE_MONTH_S)
| (bin2bcd(tm->tm_mday))
@@ -200,7 +194,6 @@ static const struct rtc_class_ops vt8500_rtc_ops = {
static int vt8500_rtc_probe(struct platform_device *pdev)
{
struct vt8500_rtc *vt8500_rtc;
- struct resource *res;
int ret;
vt8500_rtc = devm_kzalloc(&pdev->dev,
@@ -215,8 +208,7 @@ static int vt8500_rtc_probe(struct platform_device *pdev)
if (vt8500_rtc->irq_alarm < 0)
return vt8500_rtc->irq_alarm;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- vt8500_rtc->regbase = devm_ioremap_resource(&pdev->dev, res);
+ vt8500_rtc->regbase = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(vt8500_rtc->regbase))
return PTR_ERR(vt8500_rtc->regbase);
@@ -224,27 +216,23 @@ static int vt8500_rtc_probe(struct platform_device *pdev)
writel(VT8500_RTC_CR_ENABLE,
vt8500_rtc->regbase + VT8500_RTC_CR);
- vt8500_rtc->rtc = devm_rtc_device_register(&pdev->dev, "vt8500-rtc",
- &vt8500_rtc_ops, THIS_MODULE);
- if (IS_ERR(vt8500_rtc->rtc)) {
- ret = PTR_ERR(vt8500_rtc->rtc);
- dev_err(&pdev->dev,
- "Failed to register RTC device -> %d\n", ret);
- goto err_return;
- }
+ vt8500_rtc->rtc = devm_rtc_allocate_device(&pdev->dev);
+ if (IS_ERR(vt8500_rtc->rtc))
+ return PTR_ERR(vt8500_rtc->rtc);
+
+ vt8500_rtc->rtc->ops = &vt8500_rtc_ops;
+ vt8500_rtc->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
+ vt8500_rtc->rtc->range_max = RTC_TIMESTAMP_END_2199;
ret = devm_request_irq(&pdev->dev, vt8500_rtc->irq_alarm,
vt8500_rtc_irq, 0, "rtc alarm", vt8500_rtc);
if (ret < 0) {
dev_err(&pdev->dev, "can't get irq %i, err %d\n",
vt8500_rtc->irq_alarm, ret);
- goto err_return;
+ return ret;
}
- return 0;
-
-err_return:
- return ret;
+ return rtc_register_device(vt8500_rtc->rtc);
}
static int vt8500_rtc_remove(struct platform_device *pdev)
diff --git a/drivers/rtc/rtc-wilco-ec.c b/drivers/rtc/rtc-wilco-ec.c
index 8ad4c4e6d557..ff46066a68a4 100644
--- a/drivers/rtc/rtc-wilco-ec.c
+++ b/drivers/rtc/rtc-wilco-ec.c
@@ -110,10 +110,12 @@ static int wilco_ec_rtc_read(struct device *dev, struct rtc_time *tm)
tm->tm_mday = rtc.day;
tm->tm_mon = rtc.month - 1;
tm->tm_year = rtc.year + (rtc.century * 100) - 1900;
- tm->tm_yday = rtc_year_days(tm->tm_mday, tm->tm_mon, tm->tm_year);
+ /* Ignore other tm fields, man rtc says userspace shouldn't use them. */
- /* Don't compute day of week, we don't need it. */
- tm->tm_wday = -1;
+ if (rtc_valid_tm(tm)) {
+ dev_err(dev, "Time from RTC is invalid: %ptRr\n", tm);
+ return -EIO;
+ }
return 0;
}
diff --git a/drivers/rtc/rtc-xgene.c b/drivers/rtc/rtc-xgene.c
index 9683fbf7c78d..96db441f92b3 100644
--- a/drivers/rtc/rtc-xgene.c
+++ b/drivers/rtc/rtc-xgene.c
@@ -34,7 +34,6 @@
struct xgene_rtc_dev {
struct rtc_device *rtc;
- struct device *dev;
void __iomem *csr_base;
struct clk *clk;
unsigned int irq_wake;
@@ -137,7 +136,6 @@ static irqreturn_t xgene_rtc_interrupt(int irq, void *id)
static int xgene_rtc_probe(struct platform_device *pdev)
{
struct xgene_rtc_dev *pdata;
- struct resource *res;
int ret;
int irq;
@@ -145,10 +143,8 @@ static int xgene_rtc_probe(struct platform_device *pdev)
if (!pdata)
return -ENOMEM;
platform_set_drvdata(pdev, pdata);
- pdata->dev = &pdev->dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pdata->csr_base = devm_ioremap_resource(&pdev->dev, res);
+ pdata->csr_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pdata->csr_base))
return PTR_ERR(pdata->csr_base);
diff --git a/drivers/rtc/rtc-zynqmp.c b/drivers/rtc/rtc-zynqmp.c
index 2c762757fb54..539690568298 100644
--- a/drivers/rtc/rtc-zynqmp.c
+++ b/drivers/rtc/rtc-zynqmp.c
@@ -44,7 +44,7 @@ struct xlnx_rtc_dev {
void __iomem *reg_base;
int alarm_irq;
int sec_irq;
- int calibval;
+ unsigned int calibval;
};
static int xlnx_rtc_set_time(struct device *dev, struct rtc_time *tm)
@@ -195,7 +195,6 @@ static irqreturn_t xlnx_rtc_interrupt(int irq, void *id)
static int xlnx_rtc_probe(struct platform_device *pdev)
{
struct xlnx_rtc_dev *xrtcdev;
- struct resource *res;
int ret;
xrtcdev = devm_kzalloc(&pdev->dev, sizeof(*xrtcdev), GFP_KERNEL);
@@ -211,9 +210,7 @@ static int xlnx_rtc_probe(struct platform_device *pdev)
xrtcdev->rtc->ops = &xlnx_rtc_ops;
xrtcdev->rtc->range_max = U32_MAX;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
- xrtcdev->reg_base = devm_ioremap_resource(&pdev->dev, res);
+ xrtcdev->reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(xrtcdev->reg_base))
return PTR_ERR(xrtcdev->reg_base);
diff --git a/drivers/rtc/sysfs.c b/drivers/rtc/sysfs.c
index be3531e7f868..b7ca7d79fb28 100644
--- a/drivers/rtc/sysfs.c
+++ b/drivers/rtc/sysfs.c
@@ -103,8 +103,11 @@ static DEVICE_ATTR_RW(max_user_freq);
/**
* rtc_sysfs_show_hctosys - indicate if the given RTC set the system time
+ * @dev: The device that the attribute belongs to.
+ * @attr: The attribute being read.
+ * @buf: The result buffer.
*
- * Returns 1 if the system clock was set by this RTC at the last
+ * buf is "1" if the system clock was set by this RTC at the last
* boot or resume event.
*/
static ssize_t
diff --git a/drivers/s390/char/tape_char.c b/drivers/s390/char/tape_char.c
index ea4253939555..8abb42923307 100644
--- a/drivers/s390/char/tape_char.c
+++ b/drivers/s390/char/tape_char.c
@@ -341,14 +341,14 @@ tapechar_release(struct inode *inode, struct file *filp)
*/
static int
__tapechar_ioctl(struct tape_device *device,
- unsigned int no, unsigned long data)
+ unsigned int no, void __user *data)
{
int rc;
if (no == MTIOCTOP) {
struct mtop op;
- if (copy_from_user(&op, (char __user *) data, sizeof(op)) != 0)
+ if (copy_from_user(&op, data, sizeof(op)) != 0)
return -EFAULT;
if (op.mt_count < 0)
return -EINVAL;
@@ -392,9 +392,7 @@ __tapechar_ioctl(struct tape_device *device,
if (rc < 0)
return rc;
pos.mt_blkno = rc;
- if (copy_to_user((char __user *) data, &pos, sizeof(pos)) != 0)
- return -EFAULT;
- return 0;
+ return put_user_mtpos(data, &pos);
}
if (no == MTIOCGET) {
/* MTIOCGET: query the tape drive status. */
@@ -424,15 +422,12 @@ __tapechar_ioctl(struct tape_device *device,
get.mt_blkno = rc;
}
- if (copy_to_user((char __user *) data, &get, sizeof(get)) != 0)
- return -EFAULT;
-
- return 0;
+ return put_user_mtget(data, &get);
}
/* Try the discipline ioctl function. */
if (device->discipline->ioctl_fn == NULL)
return -EINVAL;
- return device->discipline->ioctl_fn(device, no, data);
+ return device->discipline->ioctl_fn(device, no, (unsigned long)data);
}
static long
@@ -445,7 +440,7 @@ tapechar_ioctl(struct file *filp, unsigned int no, unsigned long data)
device = (struct tape_device *) filp->private_data;
mutex_lock(&device->mutex);
- rc = __tapechar_ioctl(device, no, data);
+ rc = __tapechar_ioctl(device, no, (void __user *)data);
mutex_unlock(&device->mutex);
return rc;
}
@@ -455,23 +450,17 @@ static long
tapechar_compat_ioctl(struct file *filp, unsigned int no, unsigned long data)
{
struct tape_device *device = filp->private_data;
- int rval = -ENOIOCTLCMD;
- unsigned long argp;
+ long rc;
- /* The 'arg' argument of any ioctl function may only be used for
- * pointers because of the compat pointer conversion.
- * Consider this when adding new ioctls.
- */
- argp = (unsigned long) compat_ptr(data);
- if (device->discipline->ioctl_fn) {
- mutex_lock(&device->mutex);
- rval = device->discipline->ioctl_fn(device, no, argp);
- mutex_unlock(&device->mutex);
- if (rval == -EINVAL)
- rval = -ENOIOCTLCMD;
- }
+ if (no == MTIOCPOS32)
+ no = MTIOCPOS;
+ else if (no == MTIOCGET32)
+ no = MTIOCGET;
- return rval;
+ mutex_lock(&device->mutex);
+ rc = __tapechar_ioctl(device, no, compat_ptr(data));
+ mutex_unlock(&device->mutex);
+ return rc;
}
#endif /* CONFIG_COMPAT */
diff --git a/drivers/s390/crypto/zcrypt_error.h b/drivers/s390/crypto/zcrypt_error.h
index f34ee41cbed8..4f4dd9d727c9 100644
--- a/drivers/s390/crypto/zcrypt_error.h
+++ b/drivers/s390/crypto/zcrypt_error.h
@@ -61,6 +61,7 @@ struct error_hdr {
#define REP82_ERROR_EVEN_MOD_IN_OPND 0x85
#define REP82_ERROR_RESERVED_FIELD 0x88
#define REP82_ERROR_INVALID_DOMAIN_PENDING 0x8A
+#define REP82_ERROR_FILTERED_BY_HYPERVISOR 0x8B
#define REP82_ERROR_TRANSPORT_FAIL 0x90
#define REP82_ERROR_PACKET_TRUNCATED 0xA0
#define REP82_ERROR_ZERO_BUFFER_LEN 0xB0
@@ -91,6 +92,7 @@ static inline int convert_error(struct zcrypt_queue *zq,
case REP82_ERROR_INVALID_DOMAIN_PRECHECK:
case REP82_ERROR_INVALID_DOMAIN_PENDING:
case REP82_ERROR_INVALID_SPECIAL_CMD:
+ case REP82_ERROR_FILTERED_BY_HYPERVISOR:
// REP88_ERROR_INVALID_KEY // '82' CEX2A
// REP88_ERROR_OPERAND // '84' CEX2A
// REP88_ERROR_OPERAND_EVEN_MOD // '85' CEX2A
diff --git a/drivers/s390/scsi/Makefile b/drivers/s390/scsi/Makefile
index 9dda431ec8f3..352056eb0dd1 100644
--- a/drivers/s390/scsi/Makefile
+++ b/drivers/s390/scsi/Makefile
@@ -5,6 +5,6 @@
zfcp-objs := zfcp_aux.o zfcp_ccw.o zfcp_dbf.o zfcp_erp.o \
zfcp_fc.o zfcp_fsf.o zfcp_qdio.o zfcp_scsi.o zfcp_sysfs.o \
- zfcp_unit.o
+ zfcp_unit.o zfcp_diag.o
obj-$(CONFIG_ZFCP) += zfcp.o
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index e390f8c6d5f3..09ec846fe01d 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -4,7 +4,7 @@
*
* Module interface and handling of zfcp data structures.
*
- * Copyright IBM Corp. 2002, 2017
+ * Copyright IBM Corp. 2002, 2018
*/
/*
@@ -25,6 +25,7 @@
* Martin Petermann
* Sven Schuetz
* Steffen Maier
+ * Benjamin Block
*/
#define KMSG_COMPONENT "zfcp"
@@ -36,6 +37,7 @@
#include "zfcp_ext.h"
#include "zfcp_fc.h"
#include "zfcp_reqlist.h"
+#include "zfcp_diag.h"
#define ZFCP_BUS_ID_SIZE 20
@@ -356,6 +358,9 @@ struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device)
adapter->erp_action.adapter = adapter;
+ if (zfcp_diag_adapter_setup(adapter))
+ goto failed;
+
if (zfcp_qdio_setup(adapter))
goto failed;
@@ -402,6 +407,9 @@ struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device)
&zfcp_sysfs_adapter_attrs))
goto failed;
+ if (zfcp_diag_sysfs_setup(adapter))
+ goto failed;
+
/* report size limit per scatter-gather segment */
adapter->ccw_device->dev.dma_parms = &adapter->dma_parms;
@@ -426,6 +434,7 @@ void zfcp_adapter_unregister(struct zfcp_adapter *adapter)
zfcp_fc_wka_ports_force_offline(adapter->gs);
zfcp_scsi_adapter_unregister(adapter);
+ zfcp_diag_sysfs_destroy(adapter);
sysfs_remove_group(&cdev->dev.kobj, &zfcp_sysfs_adapter_attrs);
zfcp_erp_thread_kill(adapter);
@@ -449,6 +458,7 @@ void zfcp_adapter_release(struct kref *ref)
dev_set_drvdata(&adapter->ccw_device->dev, NULL);
zfcp_fc_gs_destroy(adapter);
zfcp_free_low_mem_buffers(adapter);
+ zfcp_diag_adapter_free(adapter);
kfree(adapter->req_list);
kfree(adapter->fc_stats);
kfree(adapter->stats_reset_data);
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index dccdb41bed8c..1234294700c4 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -95,11 +95,9 @@ void zfcp_dbf_hba_fsf_res(char *tag, int level, struct zfcp_fsf_req *req)
memcpy(rec->u.res.fsf_status_qual, &q_head->fsf_status_qual,
FSF_STATUS_QUALIFIER_SIZE);
- if (q_head->fsf_command != FSF_QTCB_FCP_CMND) {
- rec->pl_len = q_head->log_length;
- zfcp_dbf_pl_write(dbf, (char *)q_pref + q_head->log_start,
- rec->pl_len, "fsf_res", req->req_id);
- }
+ rec->pl_len = q_head->log_length;
+ zfcp_dbf_pl_write(dbf, (char *)q_pref + q_head->log_start,
+ rec->pl_len, "fsf_res", req->req_id);
debug_event(dbf->hba, level, rec, sizeof(*rec));
spin_unlock_irqrestore(&dbf->hba_lock, flags);
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h
index 87d2f47a6990..8cc0eefe4ccc 100644
--- a/drivers/s390/scsi/zfcp_def.h
+++ b/drivers/s390/scsi/zfcp_def.h
@@ -4,7 +4,7 @@
*
* Global definitions for the zfcp device driver.
*
- * Copyright IBM Corp. 2002, 2017
+ * Copyright IBM Corp. 2002, 2018
*/
#ifndef ZFCP_DEF_H
@@ -86,6 +86,7 @@
#define ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED 0x00000080
#define ZFCP_STATUS_FSFREQ_TMFUNCFAILED 0x00000200
#define ZFCP_STATUS_FSFREQ_DISMISSED 0x00001000
+#define ZFCP_STATUS_FSFREQ_XDATAINCOMPLETE 0x00020000
/************************* STRUCTURE DEFINITIONS *****************************/
@@ -197,6 +198,7 @@ struct zfcp_adapter {
struct device_dma_parameters dma_parms;
struct zfcp_fc_events events;
unsigned long next_port_scan;
+ struct zfcp_diag_adapter *diagnostics;
};
struct zfcp_port {
diff --git a/drivers/s390/scsi/zfcp_diag.c b/drivers/s390/scsi/zfcp_diag.c
new file mode 100644
index 000000000000..67a8f4e57db1
--- /dev/null
+++ b/drivers/s390/scsi/zfcp_diag.c
@@ -0,0 +1,305 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * zfcp device driver
+ *
+ * Functions to handle diagnostics.
+ *
+ * Copyright IBM Corp. 2018
+ */
+
+#include <linux/spinlock.h>
+#include <linux/jiffies.h>
+#include <linux/string.h>
+#include <linux/kernfs.h>
+#include <linux/sysfs.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+
+#include "zfcp_diag.h"
+#include "zfcp_ext.h"
+#include "zfcp_def.h"
+
+static DECLARE_WAIT_QUEUE_HEAD(__zfcp_diag_publish_wait);
+
+/**
+ * zfcp_diag_adapter_setup() - Setup storage for adapter diagnostics.
+ * @adapter: the adapter to setup diagnostics for.
+ *
+ * Creates the data-structures to store the diagnostics for an adapter. This
+ * overwrites whatever was stored before at &zfcp_adapter->diagnostics!
+ *
+ * Return:
+ * * 0 - Everyting is OK
+ * * -ENOMEM - Could not allocate all/parts of the data-structures;
+ * &zfcp_adapter->diagnostics remains unchanged
+ */
+int zfcp_diag_adapter_setup(struct zfcp_adapter *const adapter)
+{
+ struct zfcp_diag_adapter *diag;
+ struct zfcp_diag_header *hdr;
+
+ diag = kzalloc(sizeof(*diag), GFP_KERNEL);
+ if (diag == NULL)
+ return -ENOMEM;
+
+ diag->max_age = (5 * 1000); /* default value: 5 s */
+
+ /* setup header for port_data */
+ hdr = &diag->port_data.header;
+
+ spin_lock_init(&hdr->access_lock);
+ hdr->buffer = &diag->port_data.data;
+ hdr->buffer_size = sizeof(diag->port_data.data);
+ /* set the timestamp so that the first test on age will always fail */
+ hdr->timestamp = jiffies - msecs_to_jiffies(diag->max_age);
+
+ /* setup header for config_data */
+ hdr = &diag->config_data.header;
+
+ spin_lock_init(&hdr->access_lock);
+ hdr->buffer = &diag->config_data.data;
+ hdr->buffer_size = sizeof(diag->config_data.data);
+ /* set the timestamp so that the first test on age will always fail */
+ hdr->timestamp = jiffies - msecs_to_jiffies(diag->max_age);
+
+ adapter->diagnostics = diag;
+ return 0;
+}
+
+/**
+ * zfcp_diag_adapter_free() - Frees all adapter diagnostics allocations.
+ * @adapter: the adapter whose diagnostic structures should be freed.
+ *
+ * Frees all data-structures in the given adapter that store diagnostics
+ * information. Can savely be called with partially setup diagnostics.
+ */
+void zfcp_diag_adapter_free(struct zfcp_adapter *const adapter)
+{
+ kfree(adapter->diagnostics);
+ adapter->diagnostics = NULL;
+}
+
+/**
+ * zfcp_diag_sysfs_setup() - Setup the sysfs-group for adapter-diagnostics.
+ * @adapter: target adapter to which the group should be added.
+ *
+ * Return: 0 on success; Something else otherwise (see sysfs_create_group()).
+ */
+int zfcp_diag_sysfs_setup(struct zfcp_adapter *const adapter)
+{
+ int rc = sysfs_create_group(&adapter->ccw_device->dev.kobj,
+ &zfcp_sysfs_diag_attr_group);
+ if (rc == 0)
+ adapter->diagnostics->sysfs_established = 1;
+
+ return rc;
+}
+
+/**
+ * zfcp_diag_sysfs_destroy() - Remove the sysfs-group for adapter-diagnostics.
+ * @adapter: target adapter from which the group should be removed.
+ */
+void zfcp_diag_sysfs_destroy(struct zfcp_adapter *const adapter)
+{
+ if (adapter->diagnostics == NULL ||
+ !adapter->diagnostics->sysfs_established)
+ return;
+
+ /*
+ * We need this state-handling so we can prevent warnings being printed
+ * on the kernel-console in case we have to abort a halfway done
+ * zfcp_adapter_enqueue(), in which the sysfs-group was not yet
+ * established. sysfs_remove_group() does this checking as well, but
+ * still prints a warning in case we try to remove a group that has not
+ * been established before
+ */
+ adapter->diagnostics->sysfs_established = 0;
+ sysfs_remove_group(&adapter->ccw_device->dev.kobj,
+ &zfcp_sysfs_diag_attr_group);
+}
+
+
+/**
+ * zfcp_diag_update_xdata() - Update a diagnostics buffer.
+ * @hdr: the meta data to update.
+ * @data: data to use for the update.
+ * @incomplete: flag stating whether the data in @data is incomplete.
+ */
+void zfcp_diag_update_xdata(struct zfcp_diag_header *const hdr,
+ const void *const data, const bool incomplete)
+{
+ const unsigned long capture_timestamp = jiffies;
+ unsigned long flags;
+
+ spin_lock_irqsave(&hdr->access_lock, flags);
+
+ /* make sure we never go into the past with an update */
+ if (!time_after_eq(capture_timestamp, hdr->timestamp))
+ goto out;
+
+ hdr->timestamp = capture_timestamp;
+ hdr->incomplete = incomplete;
+ memcpy(hdr->buffer, data, hdr->buffer_size);
+out:
+ spin_unlock_irqrestore(&hdr->access_lock, flags);
+}
+
+/**
+ * zfcp_diag_update_port_data_buffer() - Implementation of
+ * &typedef zfcp_diag_update_buffer_func
+ * to collect and update Port Data.
+ * @adapter: Adapter to collect Port Data from.
+ *
+ * This call is SYNCHRONOUS ! It blocks till the respective command has
+ * finished completely, or has failed in some way.
+ *
+ * Return:
+ * * 0 - Successfully retrieved new Diagnostics and Updated the buffer;
+ * this also includes cases where data was retrieved, but
+ * incomplete; you'll have to check the flag ``incomplete``
+ * of &struct zfcp_diag_header.
+ * * see zfcp_fsf_exchange_port_data_sync() for possible error-codes (
+ * excluding -EAGAIN)
+ */
+int zfcp_diag_update_port_data_buffer(struct zfcp_adapter *const adapter)
+{
+ int rc;
+
+ rc = zfcp_fsf_exchange_port_data_sync(adapter->qdio, NULL);
+ if (rc == -EAGAIN)
+ rc = 0; /* signaling incomplete via struct zfcp_diag_header */
+
+ /* buffer-data was updated in zfcp_fsf_exchange_port_data_handler() */
+
+ return rc;
+}
+
+/**
+ * zfcp_diag_update_config_data_buffer() - Implementation of
+ * &typedef zfcp_diag_update_buffer_func
+ * to collect and update Config Data.
+ * @adapter: Adapter to collect Config Data from.
+ *
+ * This call is SYNCHRONOUS ! It blocks till the respective command has
+ * finished completely, or has failed in some way.
+ *
+ * Return:
+ * * 0 - Successfully retrieved new Diagnostics and Updated the buffer;
+ * this also includes cases where data was retrieved, but
+ * incomplete; you'll have to check the flag ``incomplete``
+ * of &struct zfcp_diag_header.
+ * * see zfcp_fsf_exchange_config_data_sync() for possible error-codes (
+ * excluding -EAGAIN)
+ */
+int zfcp_diag_update_config_data_buffer(struct zfcp_adapter *const adapter)
+{
+ int rc;
+
+ rc = zfcp_fsf_exchange_config_data_sync(adapter->qdio, NULL);
+ if (rc == -EAGAIN)
+ rc = 0; /* signaling incomplete via struct zfcp_diag_header */
+
+ /* buffer-data was updated in zfcp_fsf_exchange_config_data_handler() */
+
+ return rc;
+}
+
+static int __zfcp_diag_update_buffer(struct zfcp_adapter *const adapter,
+ struct zfcp_diag_header *const hdr,
+ zfcp_diag_update_buffer_func buffer_update,
+ unsigned long *const flags)
+ __must_hold(hdr->access_lock)
+{
+ int rc;
+
+ if (hdr->updating == 1) {
+ rc = wait_event_interruptible_lock_irq(__zfcp_diag_publish_wait,
+ hdr->updating == 0,
+ hdr->access_lock);
+ rc = (rc == 0 ? -EAGAIN : -EINTR);
+ } else {
+ hdr->updating = 1;
+ spin_unlock_irqrestore(&hdr->access_lock, *flags);
+
+ /* unlocked, because update function sleeps */
+ rc = buffer_update(adapter);
+
+ spin_lock_irqsave(&hdr->access_lock, *flags);
+ hdr->updating = 0;
+
+ /*
+ * every thread waiting here went via an interruptible wait,
+ * so its fine to only wake those
+ */
+ wake_up_interruptible_all(&__zfcp_diag_publish_wait);
+ }
+
+ return rc;
+}
+
+static bool
+__zfcp_diag_test_buffer_age_isfresh(const struct zfcp_diag_adapter *const diag,
+ const struct zfcp_diag_header *const hdr)
+ __must_hold(hdr->access_lock)
+{
+ const unsigned long now = jiffies;
+
+ /*
+ * Should not happen (data is from the future).. if it does, still
+ * signal that it needs refresh
+ */
+ if (!time_after_eq(now, hdr->timestamp))
+ return false;
+
+ if (jiffies_to_msecs(now - hdr->timestamp) >= diag->max_age)
+ return false;
+
+ return true;
+}
+
+/**
+ * zfcp_diag_update_buffer_limited() - Collect diagnostics and update a
+ * diagnostics buffer rate limited.
+ * @adapter: Adapter to collect the diagnostics from.
+ * @hdr: buffer-header for which to update with the collected diagnostics.
+ * @buffer_update: Specific implementation for collecting and updating.
+ *
+ * This function will cause an update of the given @hdr by calling the also
+ * given @buffer_update function. If called by multiple sources at the same
+ * time, it will synchornize the update by only allowing one source to call
+ * @buffer_update and the others to wait for that source to complete instead
+ * (the wait is interruptible).
+ *
+ * Additionally this version is rate-limited and will only exit if either the
+ * buffer is fresh enough (within the limit) - it will do nothing if the buffer
+ * is fresh enough to begin with -, or if the source/thread that started this
+ * update is the one that made the update (to prevent endless loops).
+ *
+ * Return:
+ * * 0 - If the update was successfully published and/or the buffer is
+ * fresh enough
+ * * -EINTR - If the thread went into the wait-state and was interrupted
+ * * whatever @buffer_update returns
+ */
+int zfcp_diag_update_buffer_limited(struct zfcp_adapter *const adapter,
+ struct zfcp_diag_header *const hdr,
+ zfcp_diag_update_buffer_func buffer_update)
+{
+ unsigned long flags;
+ int rc;
+
+ spin_lock_irqsave(&hdr->access_lock, flags);
+
+ for (rc = 0;
+ !__zfcp_diag_test_buffer_age_isfresh(adapter->diagnostics, hdr);
+ rc = 0) {
+ rc = __zfcp_diag_update_buffer(adapter, hdr, buffer_update,
+ &flags);
+ if (rc != -EAGAIN)
+ break;
+ }
+
+ spin_unlock_irqrestore(&hdr->access_lock, flags);
+
+ return rc;
+}
diff --git a/drivers/s390/scsi/zfcp_diag.h b/drivers/s390/scsi/zfcp_diag.h
new file mode 100644
index 000000000000..b9c93d15f67c
--- /dev/null
+++ b/drivers/s390/scsi/zfcp_diag.h
@@ -0,0 +1,101 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * zfcp device driver
+ *
+ * Definitions for handling diagnostics in the the zfcp device driver.
+ *
+ * Copyright IBM Corp. 2018
+ */
+
+#ifndef ZFCP_DIAG_H
+#define ZFCP_DIAG_H
+
+#include <linux/spinlock.h>
+
+#include "zfcp_fsf.h"
+#include "zfcp_def.h"
+
+/**
+ * struct zfcp_diag_header - general part of a diagnostic buffer.
+ * @access_lock: lock protecting all the data in this buffer.
+ * @updating: flag showing that an update for this buffer is currently running.
+ * @incomplete: flag showing that the data in @buffer is incomplete.
+ * @timestamp: time in jiffies when the data of this buffer was last captured.
+ * @buffer: implementation-depending data of this buffer
+ * @buffer_size: size of @buffer
+ */
+struct zfcp_diag_header {
+ spinlock_t access_lock;
+
+ /* Flags */
+ u64 updating :1;
+ u64 incomplete :1;
+
+ unsigned long timestamp;
+
+ void *buffer;
+ size_t buffer_size;
+};
+
+/**
+ * struct zfcp_diag_adapter - central storage for all diagnostics concerning an
+ * adapter.
+ * @sysfs_established: flag showing that the associated sysfs-group was created
+ * during run of zfcp_adapter_enqueue().
+ * @max_age: maximum age of data in diagnostic buffers before they need to be
+ * refreshed (in ms).
+ * @port_data: data retrieved using exchange port data.
+ * @port_data.header: header with metadata for the cache in @port_data.data.
+ * @port_data.data: cached QTCB Bottom of command exchange port data.
+ * @config_data: data retrieved using exchange config data.
+ * @config_data.header: header with metadata for the cache in @config_data.data.
+ * @config_data.data: cached QTCB Bottom of command exchange config data.
+ */
+struct zfcp_diag_adapter {
+ u64 sysfs_established :1;
+
+ unsigned long max_age;
+
+ struct {
+ struct zfcp_diag_header header;
+ struct fsf_qtcb_bottom_port data;
+ } port_data;
+ struct {
+ struct zfcp_diag_header header;
+ struct fsf_qtcb_bottom_config data;
+ } config_data;
+};
+
+int zfcp_diag_adapter_setup(struct zfcp_adapter *const adapter);
+void zfcp_diag_adapter_free(struct zfcp_adapter *const adapter);
+
+int zfcp_diag_sysfs_setup(struct zfcp_adapter *const adapter);
+void zfcp_diag_sysfs_destroy(struct zfcp_adapter *const adapter);
+
+void zfcp_diag_update_xdata(struct zfcp_diag_header *const hdr,
+ const void *const data, const bool incomplete);
+
+/*
+ * Function-Type used in zfcp_diag_update_buffer_limited() for the function
+ * that does the buffer-implementation dependent work.
+ */
+typedef int (*zfcp_diag_update_buffer_func)(struct zfcp_adapter *const adapter);
+
+int zfcp_diag_update_config_data_buffer(struct zfcp_adapter *const adapter);
+int zfcp_diag_update_port_data_buffer(struct zfcp_adapter *const adapter);
+int zfcp_diag_update_buffer_limited(struct zfcp_adapter *const adapter,
+ struct zfcp_diag_header *const hdr,
+ zfcp_diag_update_buffer_func buffer_update);
+
+/**
+ * zfcp_diag_support_sfp() - Return %true if the @adapter supports reporting
+ * SFP Data.
+ * @adapter: adapter to test the availability of SFP Data reporting for.
+ */
+static inline bool
+zfcp_diag_support_sfp(const struct zfcp_adapter *const adapter)
+{
+ return !!(adapter->adapter_features & FSF_FEATURE_REPORT_SFP_DATA);
+}
+
+#endif /* ZFCP_DIAG_H */
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index 96f0d34e9459..93655b85b73f 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -174,7 +174,7 @@ static enum zfcp_erp_act_type zfcp_erp_required_act(enum zfcp_erp_act_type want,
return 0;
p_status = atomic_read(&port->status);
if (!(p_status & ZFCP_STATUS_COMMON_RUNNING) ||
- p_status & ZFCP_STATUS_COMMON_ERP_FAILED)
+ p_status & ZFCP_STATUS_COMMON_ERP_FAILED)
return 0;
if (!(p_status & ZFCP_STATUS_COMMON_UNBLOCKED))
need = ZFCP_ERP_ACTION_REOPEN_PORT;
@@ -190,7 +190,7 @@ static enum zfcp_erp_act_type zfcp_erp_required_act(enum zfcp_erp_act_type want,
return 0;
a_status = atomic_read(&adapter->status);
if (!(a_status & ZFCP_STATUS_COMMON_RUNNING) ||
- a_status & ZFCP_STATUS_COMMON_ERP_FAILED)
+ a_status & ZFCP_STATUS_COMMON_ERP_FAILED)
return 0;
if (p_status & ZFCP_STATUS_COMMON_NOESC)
return need;
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index 31e8a7240fd7..c8556787cfdc 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -167,6 +167,7 @@ extern const struct attribute_group *zfcp_port_attr_groups[];
extern struct mutex zfcp_sysfs_port_units_mutex;
extern struct device_attribute *zfcp_sysfs_sdev_attrs[];
extern struct device_attribute *zfcp_sysfs_shost_attrs[];
+extern const struct attribute_group zfcp_sysfs_diag_attr_group;
bool zfcp_sysfs_port_is_removing(const struct zfcp_port *const port);
/* zfcp_unit.c */
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index cf63916814cc..223a805f0b0b 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -11,6 +11,7 @@
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/blktrace_api.h>
+#include <linux/jiffies.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <scsi/fc/fc_els.h>
@@ -19,6 +20,7 @@
#include "zfcp_dbf.h"
#include "zfcp_qdio.h"
#include "zfcp_reqlist.h"
+#include "zfcp_diag.h"
/* timeout for FSF requests sent during scsi_eh: abort or FCP TMF */
#define ZFCP_FSF_SCSI_ER_TIMEOUT (10*HZ)
@@ -554,6 +556,8 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
{
struct zfcp_adapter *adapter = req->adapter;
+ struct zfcp_diag_header *const diag_hdr =
+ &adapter->diagnostics->config_data.header;
struct fsf_qtcb *qtcb = req->qtcb;
struct fsf_qtcb_bottom_config *bottom = &qtcb->bottom.config;
struct Scsi_Host *shost = adapter->scsi_host;
@@ -570,6 +574,12 @@ static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
switch (qtcb->header.fsf_status) {
case FSF_GOOD:
+ /*
+ * usually we wait with an update till the cache is too old,
+ * but because we have the data available, update it anyway
+ */
+ zfcp_diag_update_xdata(diag_hdr, bottom, false);
+
if (zfcp_fsf_exchange_config_evaluate(req))
return;
@@ -585,6 +595,9 @@ static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
&adapter->status);
break;
case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE:
+ zfcp_diag_update_xdata(diag_hdr, bottom, true);
+ req->status |= ZFCP_STATUS_FSFREQ_XDATAINCOMPLETE;
+
fc_host_node_name(shost) = 0;
fc_host_port_name(shost) = 0;
fc_host_port_id(shost) = 0;
@@ -653,16 +666,28 @@ static void zfcp_fsf_exchange_port_evaluate(struct zfcp_fsf_req *req)
static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *req)
{
+ struct zfcp_diag_header *const diag_hdr =
+ &req->adapter->diagnostics->port_data.header;
struct fsf_qtcb *qtcb = req->qtcb;
+ struct fsf_qtcb_bottom_port *bottom = &qtcb->bottom.port;
if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
return;
switch (qtcb->header.fsf_status) {
case FSF_GOOD:
+ /*
+ * usually we wait with an update till the cache is too old,
+ * but because we have the data available, update it anyway
+ */
+ zfcp_diag_update_xdata(diag_hdr, bottom, false);
+
zfcp_fsf_exchange_port_evaluate(req);
break;
case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE:
+ zfcp_diag_update_xdata(diag_hdr, bottom, true);
+ req->status |= ZFCP_STATUS_FSFREQ_XDATAINCOMPLETE;
+
zfcp_fsf_exchange_port_evaluate(req);
zfcp_fsf_link_down_info_eval(req,
&qtcb->header.fsf_status_qual.link_down_info);
@@ -1261,7 +1286,8 @@ int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action)
req->qtcb->bottom.config.feature_selection =
FSF_FEATURE_NOTIFICATION_LOST |
- FSF_FEATURE_UPDATE_ALERT;
+ FSF_FEATURE_UPDATE_ALERT |
+ FSF_FEATURE_REQUEST_SFP_DATA;
req->erp_action = erp_action;
req->handler = zfcp_fsf_exchange_config_data_handler;
erp_action->fsf_req_id = req->req_id;
@@ -1278,6 +1304,19 @@ out:
return retval;
}
+
+/**
+ * zfcp_fsf_exchange_config_data_sync() - Request information about FCP channel.
+ * @qdio: pointer to the QDIO-Queue to use for sending the command.
+ * @data: pointer to the QTCB-Bottom for storing the result of the command,
+ * might be %NULL.
+ *
+ * Returns:
+ * * 0 - Exchange Config Data was successful, @data is complete
+ * * -EIO - Exchange Config Data was not successful, @data is invalid
+ * * -EAGAIN - @data contains incomplete data
+ * * -ENOMEM - Some memory allocation failed along the way
+ */
int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *qdio,
struct fsf_qtcb_bottom_config *data)
{
@@ -1301,7 +1340,8 @@ int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *qdio,
req->qtcb->bottom.config.feature_selection =
FSF_FEATURE_NOTIFICATION_LOST |
- FSF_FEATURE_UPDATE_ALERT;
+ FSF_FEATURE_UPDATE_ALERT |
+ FSF_FEATURE_REQUEST_SFP_DATA;
if (data)
req->data = data;
@@ -1309,9 +1349,16 @@ int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *qdio,
zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
retval = zfcp_fsf_req_send(req);
spin_unlock_irq(&qdio->req_q_lock);
+
if (!retval) {
/* NOTE: ONLY TOUCH SYNC req AGAIN ON req->completion. */
wait_for_completion(&req->completion);
+
+ if (req->status &
+ (ZFCP_STATUS_FSFREQ_ERROR | ZFCP_STATUS_FSFREQ_DISMISSED))
+ retval = -EIO;
+ else if (req->status & ZFCP_STATUS_FSFREQ_XDATAINCOMPLETE)
+ retval = -EAGAIN;
}
zfcp_fsf_req_free(req);
@@ -1369,10 +1416,17 @@ out:
}
/**
- * zfcp_fsf_exchange_port_data_sync - request information about local port
- * @qdio: pointer to struct zfcp_qdio
- * @data: pointer to struct fsf_qtcb_bottom_port
- * Returns: 0 on success, error otherwise
+ * zfcp_fsf_exchange_port_data_sync() - Request information about local port.
+ * @qdio: pointer to the QDIO-Queue to use for sending the command.
+ * @data: pointer to the QTCB-Bottom for storing the result of the command,
+ * might be %NULL.
+ *
+ * Returns:
+ * * 0 - Exchange Port Data was successful, @data is complete
+ * * -EIO - Exchange Port Data was not successful, @data is invalid
+ * * -EAGAIN - @data contains incomplete data
+ * * -ENOMEM - Some memory allocation failed along the way
+ * * -EOPNOTSUPP - This operation is not supported
*/
int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *qdio,
struct fsf_qtcb_bottom_port *data)
@@ -1408,10 +1462,15 @@ int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *qdio,
if (!retval) {
/* NOTE: ONLY TOUCH SYNC req AGAIN ON req->completion. */
wait_for_completion(&req->completion);
+
+ if (req->status &
+ (ZFCP_STATUS_FSFREQ_ERROR | ZFCP_STATUS_FSFREQ_DISMISSED))
+ retval = -EIO;
+ else if (req->status & ZFCP_STATUS_FSFREQ_XDATAINCOMPLETE)
+ retval = -EAGAIN;
}
zfcp_fsf_req_free(req);
-
return retval;
out_unlock:
diff --git a/drivers/s390/scsi/zfcp_fsf.h b/drivers/s390/scsi/zfcp_fsf.h
index 2c658b66318c..2b1e4da1944f 100644
--- a/drivers/s390/scsi/zfcp_fsf.h
+++ b/drivers/s390/scsi/zfcp_fsf.h
@@ -163,6 +163,8 @@
#define FSF_FEATURE_ELS_CT_CHAINED_SBALS 0x00000020
#define FSF_FEATURE_UPDATE_ALERT 0x00000100
#define FSF_FEATURE_MEASUREMENT_DATA 0x00000200
+#define FSF_FEATURE_REQUEST_SFP_DATA 0x00000200
+#define FSF_FEATURE_REPORT_SFP_DATA 0x00000800
#define FSF_FEATURE_DIF_PROT_TYPE1 0x00010000
#define FSF_FEATURE_DIX_PROT_TCPIP 0x00020000
@@ -407,7 +409,24 @@ struct fsf_qtcb_bottom_port {
u8 cp_util;
u8 cb_util;
u8 a_util;
- u8 res2[253];
+ u8 res2;
+ u16 temperature;
+ u16 vcc;
+ u16 tx_bias;
+ u16 tx_power;
+ u16 rx_power;
+ union {
+ u16 raw;
+ struct {
+ u16 fec_active :1;
+ u16:7;
+ u16 connector_type :2;
+ u16 sfp_invalid :1;
+ u16 optical_port :1;
+ u16 port_tx_type :4;
+ };
+ } sfp_flags;
+ u8 res3[240];
} __attribute__ ((packed));
union fsf_qtcb_bottom {
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index e9ded2befa0d..3910d529c15a 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -605,7 +605,7 @@ zfcp_scsi_get_fc_host_stats(struct Scsi_Host *host)
return NULL;
ret = zfcp_fsf_exchange_port_data_sync(adapter->qdio, data);
- if (ret) {
+ if (ret != 0 && ret != -EAGAIN) {
kfree(data);
return NULL;
}
@@ -634,7 +634,7 @@ static void zfcp_scsi_reset_fc_host_stats(struct Scsi_Host *shost)
return;
ret = zfcp_fsf_exchange_port_data_sync(adapter->qdio, data);
- if (ret)
+ if (ret != 0 && ret != -EAGAIN)
kfree(data);
else {
adapter->stats_reset = jiffies/HZ;
diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c
index af197e2b3e69..494b9fe9cc94 100644
--- a/drivers/s390/scsi/zfcp_sysfs.c
+++ b/drivers/s390/scsi/zfcp_sysfs.c
@@ -11,6 +11,7 @@
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/slab.h>
+#include "zfcp_diag.h"
#include "zfcp_ext.h"
#define ZFCP_DEV_ATTR(_feat, _name, _mode, _show, _store) \
@@ -325,6 +326,50 @@ static ssize_t zfcp_sysfs_port_remove_store(struct device *dev,
static ZFCP_DEV_ATTR(adapter, port_remove, S_IWUSR, NULL,
zfcp_sysfs_port_remove_store);
+static ssize_t
+zfcp_sysfs_adapter_diag_max_age_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(to_ccwdev(dev));
+ ssize_t rc;
+
+ if (!adapter)
+ return -ENODEV;
+
+ /* ceil(log(2^64 - 1) / log(10)) = 20 */
+ rc = scnprintf(buf, 20 + 2, "%lu\n", adapter->diagnostics->max_age);
+
+ zfcp_ccw_adapter_put(adapter);
+ return rc;
+}
+
+static ssize_t
+zfcp_sysfs_adapter_diag_max_age_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(to_ccwdev(dev));
+ unsigned long max_age;
+ ssize_t rc;
+
+ if (!adapter)
+ return -ENODEV;
+
+ rc = kstrtoul(buf, 10, &max_age);
+ if (rc != 0)
+ goto out;
+
+ adapter->diagnostics->max_age = max_age;
+
+ rc = count;
+out:
+ zfcp_ccw_adapter_put(adapter);
+ return rc;
+}
+static ZFCP_DEV_ATTR(adapter, diag_max_age, 0644,
+ zfcp_sysfs_adapter_diag_max_age_show,
+ zfcp_sysfs_adapter_diag_max_age_store);
+
static struct attribute *zfcp_adapter_attrs[] = {
&dev_attr_adapter_failed.attr,
&dev_attr_adapter_in_recovery.attr,
@@ -337,6 +382,7 @@ static struct attribute *zfcp_adapter_attrs[] = {
&dev_attr_adapter_lic_version.attr,
&dev_attr_adapter_status.attr,
&dev_attr_adapter_hardware_version.attr,
+ &dev_attr_adapter_diag_max_age.attr,
NULL
};
@@ -577,7 +623,7 @@ static ssize_t zfcp_sysfs_adapter_util_show(struct device *dev,
return -ENOMEM;
retval = zfcp_fsf_exchange_port_data_sync(adapter->qdio, qtcb_port);
- if (!retval)
+ if (retval == 0 || retval == -EAGAIN)
retval = sprintf(buf, "%u %u %u\n", qtcb_port->cp_util,
qtcb_port->cb_util, qtcb_port->a_util);
kfree(qtcb_port);
@@ -603,7 +649,7 @@ static int zfcp_sysfs_adapter_ex_config(struct device *dev,
return -ENOMEM;
retval = zfcp_fsf_exchange_config_data_sync(adapter->qdio, qtcb_config);
- if (!retval)
+ if (retval == 0 || retval == -EAGAIN)
*stat_inf = qtcb_config->stat_info;
kfree(qtcb_config);
@@ -664,3 +710,123 @@ struct device_attribute *zfcp_sysfs_shost_attrs[] = {
&dev_attr_queue_full,
NULL
};
+
+static ssize_t zfcp_sysfs_adapter_diag_b2b_credit_show(
+ struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(to_ccwdev(dev));
+ struct zfcp_diag_header *diag_hdr;
+ struct fc_els_flogi *nsp;
+ ssize_t rc = -ENOLINK;
+ unsigned long flags;
+ unsigned int status;
+
+ if (!adapter)
+ return -ENODEV;
+
+ status = atomic_read(&adapter->status);
+ if (0 == (status & ZFCP_STATUS_COMMON_OPEN) ||
+ 0 == (status & ZFCP_STATUS_COMMON_UNBLOCKED) ||
+ 0 != (status & ZFCP_STATUS_COMMON_ERP_FAILED))
+ goto out;
+
+ diag_hdr = &adapter->diagnostics->config_data.header;
+
+ rc = zfcp_diag_update_buffer_limited(
+ adapter, diag_hdr, zfcp_diag_update_config_data_buffer);
+ if (rc != 0)
+ goto out;
+
+ spin_lock_irqsave(&diag_hdr->access_lock, flags);
+ /* nport_serv_param doesn't contain the ELS_Command code */
+ nsp = (struct fc_els_flogi *)((unsigned long)
+ adapter->diagnostics->config_data
+ .data.nport_serv_param -
+ sizeof(u32));
+
+ rc = scnprintf(buf, 5 + 2, "%hu\n",
+ be16_to_cpu(nsp->fl_csp.sp_bb_cred));
+ spin_unlock_irqrestore(&diag_hdr->access_lock, flags);
+
+out:
+ zfcp_ccw_adapter_put(adapter);
+ return rc;
+}
+static ZFCP_DEV_ATTR(adapter_diag, b2b_credit, 0400,
+ zfcp_sysfs_adapter_diag_b2b_credit_show, NULL);
+
+#define ZFCP_DEFINE_DIAG_SFP_ATTR(_name, _qtcb_member, _prtsize, _prtfmt) \
+ static ssize_t zfcp_sysfs_adapter_diag_sfp_##_name##_show( \
+ struct device *dev, struct device_attribute *attr, char *buf) \
+ { \
+ struct zfcp_adapter *const adapter = \
+ zfcp_ccw_adapter_by_cdev(to_ccwdev(dev)); \
+ struct zfcp_diag_header *diag_hdr; \
+ ssize_t rc = -ENOLINK; \
+ unsigned long flags; \
+ unsigned int status; \
+ \
+ if (!adapter) \
+ return -ENODEV; \
+ \
+ status = atomic_read(&adapter->status); \
+ if (0 == (status & ZFCP_STATUS_COMMON_OPEN) || \
+ 0 == (status & ZFCP_STATUS_COMMON_UNBLOCKED) || \
+ 0 != (status & ZFCP_STATUS_COMMON_ERP_FAILED)) \
+ goto out; \
+ \
+ if (!zfcp_diag_support_sfp(adapter)) { \
+ rc = -EOPNOTSUPP; \
+ goto out; \
+ } \
+ \
+ diag_hdr = &adapter->diagnostics->port_data.header; \
+ \
+ rc = zfcp_diag_update_buffer_limited( \
+ adapter, diag_hdr, zfcp_diag_update_port_data_buffer); \
+ if (rc != 0) \
+ goto out; \
+ \
+ spin_lock_irqsave(&diag_hdr->access_lock, flags); \
+ rc = scnprintf( \
+ buf, (_prtsize) + 2, _prtfmt "\n", \
+ adapter->diagnostics->port_data.data._qtcb_member); \
+ spin_unlock_irqrestore(&diag_hdr->access_lock, flags); \
+ \
+ out: \
+ zfcp_ccw_adapter_put(adapter); \
+ return rc; \
+ } \
+ static ZFCP_DEV_ATTR(adapter_diag_sfp, _name, 0400, \
+ zfcp_sysfs_adapter_diag_sfp_##_name##_show, NULL)
+
+ZFCP_DEFINE_DIAG_SFP_ATTR(temperature, temperature, 5, "%hu");
+ZFCP_DEFINE_DIAG_SFP_ATTR(vcc, vcc, 5, "%hu");
+ZFCP_DEFINE_DIAG_SFP_ATTR(tx_bias, tx_bias, 5, "%hu");
+ZFCP_DEFINE_DIAG_SFP_ATTR(tx_power, tx_power, 5, "%hu");
+ZFCP_DEFINE_DIAG_SFP_ATTR(rx_power, rx_power, 5, "%hu");
+ZFCP_DEFINE_DIAG_SFP_ATTR(port_tx_type, sfp_flags.port_tx_type, 2, "%hu");
+ZFCP_DEFINE_DIAG_SFP_ATTR(optical_port, sfp_flags.optical_port, 1, "%hu");
+ZFCP_DEFINE_DIAG_SFP_ATTR(sfp_invalid, sfp_flags.sfp_invalid, 1, "%hu");
+ZFCP_DEFINE_DIAG_SFP_ATTR(connector_type, sfp_flags.connector_type, 1, "%hu");
+ZFCP_DEFINE_DIAG_SFP_ATTR(fec_active, sfp_flags.fec_active, 1, "%hu");
+
+static struct attribute *zfcp_sysfs_diag_attrs[] = {
+ &dev_attr_adapter_diag_sfp_temperature.attr,
+ &dev_attr_adapter_diag_sfp_vcc.attr,
+ &dev_attr_adapter_diag_sfp_tx_bias.attr,
+ &dev_attr_adapter_diag_sfp_tx_power.attr,
+ &dev_attr_adapter_diag_sfp_rx_power.attr,
+ &dev_attr_adapter_diag_sfp_port_tx_type.attr,
+ &dev_attr_adapter_diag_sfp_optical_port.attr,
+ &dev_attr_adapter_diag_sfp_sfp_invalid.attr,
+ &dev_attr_adapter_diag_sfp_connector_type.attr,
+ &dev_attr_adapter_diag_sfp_fec_active.attr,
+ &dev_attr_adapter_diag_b2b_credit.attr,
+ NULL,
+};
+
+const struct attribute_group zfcp_sysfs_diag_attr_group = {
+ .name = "diagnostics",
+ .attrs = zfcp_sysfs_diag_attrs,
+};
diff --git a/drivers/sbus/char/display7seg.c b/drivers/sbus/char/display7seg.c
index 971fe074d7c9..fad936eb845f 100644
--- a/drivers/sbus/char/display7seg.c
+++ b/drivers/sbus/char/display7seg.c
@@ -156,7 +156,7 @@ static long d7s_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
static const struct file_operations d7s_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = d7s_ioctl,
- .compat_ioctl = d7s_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.open = d7s_open,
.release = d7s_release,
.llseek = noop_llseek,
diff --git a/drivers/sbus/char/envctrl.c b/drivers/sbus/char/envctrl.c
index a63d5e402ff2..12d66aa61ede 100644
--- a/drivers/sbus/char/envctrl.c
+++ b/drivers/sbus/char/envctrl.c
@@ -715,9 +715,7 @@ static const struct file_operations envctrl_fops = {
.owner = THIS_MODULE,
.read = envctrl_read,
.unlocked_ioctl = envctrl_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = envctrl_ioctl,
-#endif
+ .compat_ioctl = compat_ptr_ioctl,
.open = envctrl_open,
.release = envctrl_release,
.llseek = noop_llseek,
diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c
index 2b1e0d503020..fb6444d0409c 100644
--- a/drivers/scsi/3w-xxxx.c
+++ b/drivers/scsi/3w-xxxx.c
@@ -1049,9 +1049,7 @@ static int tw_chrdev_open(struct inode *inode, struct file *file)
static const struct file_operations tw_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = tw_chrdev_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = tw_chrdev_ioctl,
-#endif
+ .compat_ioctl = compat_ptr_ioctl,
.open = tw_chrdev_open,
.release = NULL,
.llseek = noop_llseek,
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
index 536426f25e86..f2f7e6e76c07 100644
--- a/drivers/scsi/NCR5380.c
+++ b/drivers/scsi/NCR5380.c
@@ -129,6 +129,9 @@
#define NCR5380_release_dma_irq(x)
#endif
+static unsigned int disconnect_mask = ~0;
+module_param(disconnect_mask, int, 0444);
+
static int do_abort(struct Scsi_Host *);
static void do_reset(struct Scsi_Host *);
static void bus_reset_cleanup(struct Scsi_Host *);
@@ -172,6 +175,19 @@ static inline void advance_sg_buffer(struct scsi_cmnd *cmd)
}
}
+static inline void set_resid_from_SCp(struct scsi_cmnd *cmd)
+{
+ int resid = cmd->SCp.this_residual;
+ struct scatterlist *s = cmd->SCp.buffer;
+
+ if (s)
+ while (!sg_is_last(s)) {
+ s = sg_next(s);
+ resid += s->length;
+ }
+ scsi_set_resid(cmd, resid);
+}
+
/**
* NCR5380_poll_politely2 - wait for two chip register values
* @hostdata: host private data
@@ -954,7 +970,8 @@ static bool NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd)
int err;
bool ret = true;
bool can_disconnect = instance->irq != NO_IRQ &&
- cmd->cmnd[0] != REQUEST_SENSE;
+ cmd->cmnd[0] != REQUEST_SENSE &&
+ (disconnect_mask & BIT(scmd_id(cmd)));
NCR5380_dprint(NDEBUG_ARBITRATION, instance);
dsprintk(NDEBUG_ARBITRATION, instance, "starting arbitration, id = %d\n",
@@ -1379,7 +1396,7 @@ static void do_reset(struct Scsi_Host *instance)
* MESSAGE OUT phase and sending an ABORT message.
* @instance: relevant scsi host instance
*
- * Returns 0 on success, -1 on failure.
+ * Returns 0 on success, negative error code on failure.
*/
static int do_abort(struct Scsi_Host *instance)
@@ -1404,7 +1421,7 @@ static int do_abort(struct Scsi_Host *instance)
rc = NCR5380_poll_politely(hostdata, STATUS_REG, SR_REQ, SR_REQ, 10 * HZ);
if (rc < 0)
- goto timeout;
+ goto out;
tmp = NCR5380_read(STATUS_REG) & PHASE_MASK;
@@ -1415,7 +1432,7 @@ static int do_abort(struct Scsi_Host *instance)
ICR_BASE | ICR_ASSERT_ATN | ICR_ASSERT_ACK);
rc = NCR5380_poll_politely(hostdata, STATUS_REG, SR_REQ, 0, 3 * HZ);
if (rc < 0)
- goto timeout;
+ goto out;
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN);
}
@@ -1424,17 +1441,17 @@ static int do_abort(struct Scsi_Host *instance)
len = 1;
phase = PHASE_MSGOUT;
NCR5380_transfer_pio(instance, &phase, &len, &msgptr);
+ if (len)
+ rc = -ENXIO;
/*
* If we got here, and the command completed successfully,
* we're about to go into bus free state.
*/
- return len ? -1 : 0;
-
-timeout:
+out:
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
- return -1;
+ return rc;
}
/*
@@ -1803,6 +1820,8 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
cmd->result |= cmd->SCp.Status;
cmd->result |= cmd->SCp.Message << 8;
+ set_resid_from_SCp(cmd);
+
if (cmd->cmnd[0] == REQUEST_SENSE)
complete_cmd(instance, cmd);
else {
@@ -2264,7 +2283,7 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)
dsprintk(NDEBUG_ABORT, instance, "abort: cmd %p is connected\n", cmd);
hostdata->connected = NULL;
hostdata->dma_len = 0;
- if (do_abort(instance)) {
+ if (do_abort(instance) < 0) {
set_host_byte(cmd, DID_ERROR);
complete_cmd(instance, cmd);
result = FAILED;
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index 0ed3f806ace5..e36608ce937a 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -1477,6 +1477,7 @@ static struct aac_srb * aac_scsi_common(struct fib * fib, struct scsi_cmnd * cmd
struct aac_srb * srbcmd;
u32 flag;
u32 timeout;
+ struct aac_dev *dev = fib->dev;
aac_fib_init(fib);
switch(cmd->sc_data_direction){
@@ -1503,7 +1504,7 @@ static struct aac_srb * aac_scsi_common(struct fib * fib, struct scsi_cmnd * cmd
srbcmd->flags = cpu_to_le32(flag);
timeout = cmd->request->timeout/HZ;
if (timeout == 0)
- timeout = 1;
+ timeout = (dev->sa_firmware ? AAC_SA_TIMEOUT : AAC_ARC_TIMEOUT);
srbcmd->timeout = cpu_to_le32(timeout); // timeout in seconds
srbcmd->retry_limit = 0; /* Obsolete parameter */
srbcmd->cdb_size = cpu_to_le32(cmd->cmd_len);
@@ -2467,13 +2468,13 @@ static int aac_read(struct scsi_cmnd * scsicmd)
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
SAM_STAT_CHECK_CONDITION;
set_sense(&dev->fsa_dev[cid].sense_data,
- HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE,
+ ILLEGAL_REQUEST, SENCODE_LBA_OUT_OF_RANGE,
ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0);
memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
SCSI_SENSE_BUFFERSIZE));
scsicmd->scsi_done(scsicmd);
- return 1;
+ return 0;
}
dprintk((KERN_DEBUG "aac_read[cpu %d]: lba = %llu, t = %ld.\n",
@@ -2559,13 +2560,13 @@ static int aac_write(struct scsi_cmnd * scsicmd)
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
SAM_STAT_CHECK_CONDITION;
set_sense(&dev->fsa_dev[cid].sense_data,
- HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE,
+ ILLEGAL_REQUEST, SENCODE_LBA_OUT_OF_RANGE,
ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0);
memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
SCSI_SENSE_BUFFERSIZE));
scsicmd->scsi_done(scsicmd);
- return 1;
+ return 0;
}
dprintk((KERN_DEBUG "aac_write[cpu %d]: lba = %llu, t = %ld.\n",
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index 3fa03230f6ba..e3e4ecbea726 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -85,7 +85,7 @@ enum {
#define PMC_GLOBAL_INT_BIT0 0x00000001
#ifndef AAC_DRIVER_BUILD
-# define AAC_DRIVER_BUILD 50877
+# define AAC_DRIVER_BUILD 50983
# define AAC_DRIVER_BRANCH "-custom"
#endif
#define MAXIMUM_NUM_CONTAINERS 32
@@ -108,6 +108,8 @@ enum {
#define AAC_BUS_TARGET_LOOP (AAC_MAX_BUSES * AAC_MAX_TARGETS)
#define AAC_MAX_NATIVE_SIZE 2048
#define FW_ERROR_BUFFER_SIZE 512
+#define AAC_SA_TIMEOUT 180
+#define AAC_ARC_TIMEOUT 60
#define get_bus_number(x) (x/AAC_MAX_TARGETS)
#define get_target_number(x) (x%AAC_MAX_TARGETS)
@@ -1328,7 +1330,7 @@ struct fib {
#define AAC_DEVTYPE_ARC_RAW 2
#define AAC_DEVTYPE_NATIVE_RAW 3
-#define AAC_SAFW_RESCAN_DELAY (10 * HZ)
+#define AAC_RESCAN_DELAY (10 * HZ)
struct aac_hba_map_info {
__le32 rmw_nexus; /* nexus for native HBA devices */
@@ -1601,6 +1603,7 @@ struct aac_dev
struct fsa_dev_info *fsa_dev;
struct task_struct *thread;
struct delayed_work safw_rescan_work;
+ struct delayed_work src_reinit_aif_worker;
int cardtype;
/*
*This lock will protect the two 32-bit
@@ -1673,6 +1676,7 @@ struct aac_dev
u8 adapter_shutdown;
u32 handle_pci_error;
bool init_reset;
+ u8 soft_reset_support;
};
#define aac_adapter_interrupt(dev) \
@@ -2644,7 +2648,12 @@ int aac_scan_host(struct aac_dev *dev);
static inline void aac_schedule_safw_scan_worker(struct aac_dev *dev)
{
- schedule_delayed_work(&dev->safw_rescan_work, AAC_SAFW_RESCAN_DELAY);
+ schedule_delayed_work(&dev->safw_rescan_work, AAC_RESCAN_DELAY);
+}
+
+static inline void aac_schedule_src_reinit_aif_worker(struct aac_dev *dev)
+{
+ schedule_delayed_work(&dev->src_reinit_aif_worker, AAC_RESCAN_DELAY);
}
static inline void aac_safw_rescan_worker(struct work_struct *work)
@@ -2658,10 +2667,10 @@ static inline void aac_safw_rescan_worker(struct work_struct *work)
aac_scan_host(dev);
}
-static inline void aac_cancel_safw_rescan_worker(struct aac_dev *dev)
+static inline void aac_cancel_rescan_worker(struct aac_dev *dev)
{
- if (dev->sa_firmware)
- cancel_delayed_work_sync(&dev->safw_rescan_work);
+ cancel_delayed_work_sync(&dev->safw_rescan_work);
+ cancel_delayed_work_sync(&dev->src_reinit_aif_worker);
}
/* SCp.phase values */
@@ -2671,6 +2680,7 @@ static inline void aac_cancel_safw_rescan_worker(struct aac_dev *dev)
#define AAC_OWNER_FIRMWARE 0x106
void aac_safw_rescan_worker(struct work_struct *work);
+void aac_src_reinit_aif_worker(struct work_struct *work);
int aac_acquire_irq(struct aac_dev *dev);
void aac_free_irq(struct aac_dev *dev);
int aac_setup_safw_adapter(struct aac_dev *dev);
@@ -2728,6 +2738,7 @@ int aac_probe_container(struct aac_dev *dev, int cid);
int _aac_rx_init(struct aac_dev *dev);
int aac_rx_select_comm(struct aac_dev *dev, int comm);
int aac_rx_deliver_producer(struct fib * fib);
+void aac_reinit_aif(struct aac_dev *aac, unsigned int index);
static inline int aac_is_src(struct aac_dev *dev)
{
diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c
index d4fcfa1e54e0..f75878d773cf 100644
--- a/drivers/scsi/aacraid/comminit.c
+++ b/drivers/scsi/aacraid/comminit.c
@@ -571,6 +571,11 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev)
else
dev->sa_firmware = 0;
+ if (status[4] & le32_to_cpu(AAC_EXTOPT_SOFT_RESET))
+ dev->soft_reset_support = 1;
+ else
+ dev->soft_reset_support = 0;
+
if ((dev->comm_interface == AAC_COMM_MESSAGE) &&
(status[2] > dev->base_size)) {
aac_adapter_ioremap(dev, 0);
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index 2142a649e865..5a8a999606ea 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -232,6 +232,7 @@ struct fib *aac_fib_alloc_tag(struct aac_dev *dev, struct scsi_cmnd *scmd)
fibptr->type = FSAFS_NTC_FIB_CONTEXT;
fibptr->callback_data = NULL;
fibptr->callback = NULL;
+ fibptr->flags = 0;
return fibptr;
}
@@ -1463,6 +1464,14 @@ retry_next:
}
}
+static void aac_schedule_bus_scan(struct aac_dev *aac)
+{
+ if (aac->sa_firmware)
+ aac_schedule_safw_scan_worker(aac);
+ else
+ aac_schedule_src_reinit_aif_worker(aac);
+}
+
static int _aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
{
int index, quirks;
@@ -1638,7 +1647,7 @@ out:
*/
if (!retval && !is_kdump_kernel()) {
dev_info(&aac->pdev->dev, "Scheduling bus rescan\n");
- aac_schedule_safw_scan_worker(aac);
+ aac_schedule_bus_scan(aac);
}
if (jafo) {
@@ -1959,6 +1968,16 @@ int aac_scan_host(struct aac_dev *dev)
return rcode;
}
+void aac_src_reinit_aif_worker(struct work_struct *work)
+{
+ struct aac_dev *dev = container_of(to_delayed_work(work),
+ struct aac_dev, src_reinit_aif_worker);
+
+ wait_event(dev->scsi_host_ptr->host_wait,
+ !scsi_host_in_recovery(dev->scsi_host_ptr));
+ aac_reinit_aif(dev, dev->cardtype);
+}
+
/**
* aac_handle_sa_aif Handle a message from the firmware
* @dev: Which adapter this fib is from
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 4a858789e6c5..ee6bc2f9b80a 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -391,6 +391,7 @@ static int aac_slave_configure(struct scsi_device *sdev)
int chn, tid;
unsigned int depth = 0;
unsigned int set_timeout = 0;
+ int timeout = 0;
bool set_qd_dev_type = false;
u8 devtype = 0;
@@ -483,10 +484,13 @@ common_config:
/*
* Firmware has an individual device recovery time typically
- * of 35 seconds, give us a margin.
+ * of 35 seconds, give us a margin. Thor devices can take longer in
+ * error recovery, hence different value.
*/
- if (set_timeout && sdev->request_queue->rq_timeout < (45 * HZ))
- blk_queue_rq_timeout(sdev->request_queue, 45*HZ);
+ if (set_timeout) {
+ timeout = aac->sa_firmware ? AAC_SA_TIMEOUT : AAC_ARC_TIMEOUT;
+ blk_queue_rq_timeout(sdev->request_queue, timeout * HZ);
+ }
if (depth > 256)
depth = 256;
@@ -608,9 +612,13 @@ static struct device_attribute *aac_dev_attrs[] = {
static int aac_ioctl(struct scsi_device *sdev, unsigned int cmd,
void __user *arg)
{
+ int retval;
struct aac_dev *dev = (struct aac_dev *)sdev->host->hostdata;
if (!capable(CAP_SYS_RAWIO))
return -EPERM;
+ retval = aac_adapter_check_health(dev);
+ if (retval)
+ return -EBUSY;
return aac_do_ioctl(dev, cmd, arg);
}
@@ -1585,6 +1593,19 @@ static void aac_init_char(void)
}
}
+void aac_reinit_aif(struct aac_dev *aac, unsigned int index)
+{
+ /*
+ * Firmware may send a AIF messages very early and the Driver may have
+ * ignored as it is not fully ready to process the messages. Send
+ * AIF to firmware so that if there are any unprocessed events they
+ * can be processed now.
+ */
+ if (aac_drivers[index].quirks & AAC_QUIRK_SRC)
+ aac_intr_normal(aac, 0, 2, 0, NULL);
+
+}
+
static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
unsigned index = id->driver_data;
@@ -1682,6 +1703,8 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
mutex_init(&aac->scan_mutex);
INIT_DELAYED_WORK(&aac->safw_rescan_work, aac_safw_rescan_worker);
+ INIT_DELAYED_WORK(&aac->src_reinit_aif_worker,
+ aac_src_reinit_aif_worker);
/*
* Map in the registers from the adapter.
*/
@@ -1872,7 +1895,7 @@ static int aac_suspend(struct pci_dev *pdev, pm_message_t state)
struct aac_dev *aac = (struct aac_dev *)shost->hostdata;
scsi_block_requests(shost);
- aac_cancel_safw_rescan_worker(aac);
+ aac_cancel_rescan_worker(aac);
aac_send_shutdown(aac);
aac_release_resources(aac);
@@ -1931,7 +1954,7 @@ static void aac_remove_one(struct pci_dev *pdev)
struct Scsi_Host *shost = pci_get_drvdata(pdev);
struct aac_dev *aac = (struct aac_dev *)shost->hostdata;
- aac_cancel_safw_rescan_worker(aac);
+ aac_cancel_rescan_worker(aac);
scsi_remove_host(shost);
__aac_shutdown(aac);
@@ -1989,7 +2012,7 @@ static pci_ers_result_t aac_pci_error_detected(struct pci_dev *pdev,
aac->handle_pci_error = 1;
scsi_block_requests(aac->scsi_host_ptr);
- aac_cancel_safw_rescan_worker(aac);
+ aac_cancel_rescan_worker(aac);
aac_flush_ios(aac);
aac_release_resources(aac);
diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c
index 3b66e06726c8..787ec9baebb0 100644
--- a/drivers/scsi/aacraid/src.c
+++ b/drivers/scsi/aacraid/src.c
@@ -733,10 +733,20 @@ static bool aac_is_ctrl_up_and_running(struct aac_dev *dev)
return ctrl_up;
}
+static void aac_src_drop_io(struct aac_dev *dev)
+{
+ if (!dev->soft_reset_support)
+ return;
+
+ aac_adapter_sync_cmd(dev, DROP_IO,
+ 0, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL);
+}
+
static void aac_notify_fw_of_iop_reset(struct aac_dev *dev)
{
aac_adapter_sync_cmd(dev, IOP_RESET_ALWAYS, 0, 0, 0, 0, 0, 0, NULL,
NULL, NULL, NULL, NULL);
+ aac_src_drop_io(dev);
}
static void aac_send_iop_reset(struct aac_dev *dev)
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index 88053b15c363..db687ef8a99e 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -1400,7 +1400,7 @@ static void arcmsr_drain_donequeue(struct AdapterControlBlock *acb, struct Comma
, pCCB->acb
, pCCB->startdone
, atomic_read(&acb->ccboutstandingcount));
- return;
+ return;
}
arcmsr_report_ccb_state(acb, pCCB, error);
}
@@ -3476,8 +3476,8 @@ polling_hbc_ccb_retry:
, pCCB->pcmd->device->id
, (u32)pCCB->pcmd->device->lun
, pCCB);
- pCCB->pcmd->result = DID_ABORT << 16;
- arcmsr_ccb_complete(pCCB);
+ pCCB->pcmd->result = DID_ABORT << 16;
+ arcmsr_ccb_complete(pCCB);
continue;
}
printk(KERN_NOTICE "arcmsr%d: polling get an illegal ccb"
diff --git a/drivers/scsi/arm/acornscsi.c b/drivers/scsi/arm/acornscsi.c
index d12dd89538df..ddb52e7ba622 100644
--- a/drivers/scsi/arm/acornscsi.c
+++ b/drivers/scsi/arm/acornscsi.c
@@ -1067,7 +1067,7 @@ void acornscsi_dma_setup(AS_Host *host, dmadir_t direction)
* Purpose : ensure that all DMA transfers are up-to-date & host->scsi.SCp is correct
* Params : host - host to finish
* Notes : This is called when a command is:
- * terminating, RESTORE_POINTERS, SAVE_POINTERS, DISCONECT
+ * terminating, RESTORE_POINTERS, SAVE_POINTERS, DISCONNECT
* : This must not return until all transfers are completed.
*/
static
@@ -1816,7 +1816,7 @@ int acornscsi_reconnect(AS_Host *host)
}
/*
- * Function: int acornscsi_reconect_finish(AS_Host *host)
+ * Function: int acornscsi_reconnect_finish(AS_Host *host)
* Purpose : finish reconnecting a command
* Params : host - host to complete
* Returns : 0 if failed
diff --git a/drivers/scsi/atari_scsi.c b/drivers/scsi/atari_scsi.c
index e809493d0d06..a82b63a66635 100644
--- a/drivers/scsi/atari_scsi.c
+++ b/drivers/scsi/atari_scsi.c
@@ -742,7 +742,7 @@ static int __init atari_scsi_probe(struct platform_device *pdev)
atari_scsi_template.sg_tablesize = SG_ALL;
} else {
atari_scsi_template.can_queue = 1;
- atari_scsi_template.sg_tablesize = SG_NONE;
+ atari_scsi_template.sg_tablesize = 1;
}
if (setup_can_queue > 0)
@@ -751,8 +751,8 @@ static int __init atari_scsi_probe(struct platform_device *pdev)
if (setup_cmd_per_lun > 0)
atari_scsi_template.cmd_per_lun = setup_cmd_per_lun;
- /* Leave sg_tablesize at 0 on a Falcon! */
- if (ATARIHW_PRESENT(TT_SCSI) && setup_sg_tablesize >= 0)
+ /* Don't increase sg_tablesize on Falcon! */
+ if (ATARIHW_PRESENT(TT_SCSI) && setup_sg_tablesize > 0)
atari_scsi_template.sg_tablesize = setup_sg_tablesize;
if (setup_hostid >= 0) {
diff --git a/drivers/scsi/atp870u.c b/drivers/scsi/atp870u.c
index e41f0bbdc9fd..c6a752309dda 100644
--- a/drivers/scsi/atp870u.c
+++ b/drivers/scsi/atp870u.c
@@ -1680,7 +1680,7 @@ static struct scsi_host_template atp870u_template = {
.bios_param = atp870u_biosparam /* biosparm */,
.can_queue = qcnt /* can_queue */,
.this_id = 7 /* SCSI ID */,
- .sg_tablesize = ATP870U_SCATTER /*SG_ALL*/ /*SG_NONE*/,
+ .sg_tablesize = ATP870U_SCATTER /*SG_ALL*/,
.max_sectors = ATP870U_MAX_SECTORS,
};
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
index 2f9213b257a4..eb0c76338295 100644
--- a/drivers/scsi/bfa/bfad.c
+++ b/drivers/scsi/bfa/bfad.c
@@ -1487,8 +1487,7 @@ bfad_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
return ret;
}
-int
-restart_bfa(struct bfad_s *bfad)
+static int restart_bfa(struct bfad_s *bfad)
{
unsigned long flags;
struct pci_dev *pdev = bfad->pcidev;
diff --git a/drivers/scsi/bfa/bfad_attr.c b/drivers/scsi/bfa/bfad_attr.c
index 29ab81df75c0..fbfce02e5b93 100644
--- a/drivers/scsi/bfa/bfad_attr.c
+++ b/drivers/scsi/bfa/bfad_attr.c
@@ -275,8 +275,10 @@ bfad_im_get_stats(struct Scsi_Host *shost)
rc = bfa_port_get_stats(BFA_FCPORT(&bfad->bfa),
fcstats, bfad_hcb_comp, &fcomp);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
- if (rc != BFA_STATUS_OK)
+ if (rc != BFA_STATUS_OK) {
+ kfree(fcstats);
return NULL;
+ }
wait_for_completion(&fcomp.comp);
diff --git a/drivers/scsi/bnx2fc/57xx_hsi_bnx2fc.h b/drivers/scsi/bnx2fc/57xx_hsi_bnx2fc.h
index e4469df9c469..698f5ebaa0c2 100644
--- a/drivers/scsi/bnx2fc/57xx_hsi_bnx2fc.h
+++ b/drivers/scsi/bnx2fc/57xx_hsi_bnx2fc.h
@@ -813,7 +813,7 @@ struct fcoe_confqe {
/*
- * FCoE conection data base
+ * FCoE connection data base
*/
struct fcoe_conn_db {
#if defined(__BIG_ENDIAN)
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index 401743e2b429..4c8122a82322 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -1242,7 +1242,7 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
/* Wait 2 * RA_TOV + 1 to be sure timeout function hasn't fired */
time_left = wait_for_completion_timeout(&io_req->abts_done,
- (2 * rp->r_a_tov + 1) * HZ);
+ msecs_to_jiffies(2 * rp->r_a_tov + 1));
if (time_left)
BNX2FC_IO_DBG(io_req,
"Timed out in eh_abort waiting for abts_done");
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
index c5fa5f3b00e9..0b28d44d3573 100644
--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -915,12 +915,12 @@ void bnx2i_free_hba(struct bnx2i_hba *hba)
INIT_LIST_HEAD(&hba->ep_ofld_list);
INIT_LIST_HEAD(&hba->ep_active_list);
INIT_LIST_HEAD(&hba->ep_destroy_list);
- pci_dev_put(hba->pcidev);
if (hba->regview) {
pci_iounmap(hba->pcidev, hba->regview);
hba->regview = NULL;
}
+ pci_dev_put(hba->pcidev);
bnx2i_free_mp_bdt(hba);
bnx2i_release_free_cid_que(hba);
iscsi_host_free(shost);
diff --git a/drivers/scsi/csiostor/csio_hw.c b/drivers/scsi/csiostor/csio_hw.c
index e51923886475..950f9cdf0577 100644
--- a/drivers/scsi/csiostor/csio_hw.c
+++ b/drivers/scsi/csiostor/csio_hw.c
@@ -793,10 +793,10 @@ csio_hw_get_flash_params(struct csio_hw *hw)
goto found;
}
- /* Decode Flash part size. The code below looks repetative with
+ /* Decode Flash part size. The code below looks repetitive with
* common encodings, but that's not guaranteed in the JEDEC
- * specification for the Read JADEC ID command. The only thing that
- * we're guaranteed by the JADEC specification is where the
+ * specification for the Read JEDEC ID command. The only thing that
+ * we're guaranteed by the JEDEC specification is where the
* Manufacturer ID is in the returned result. After that each
* Manufacturer ~could~ encode things completely differently.
* Note, all Flash parts must have 64KB sectors.
@@ -983,8 +983,8 @@ retry:
waiting -= 50;
/*
- * If neither Error nor Initialialized are indicated
- * by the firmware keep waiting till we exaust our
+ * If neither Error nor Initialized are indicated
+ * by the firmware keep waiting till we exhaust our
* timeout ... and then retry if we haven't exhausted
* our retries ...
*/
@@ -1738,7 +1738,7 @@ static void csio_link_l1cfg(struct link_config *lc, uint16_t fw_caps,
* Convert Common Code Forward Error Control settings into the
* Firmware's API. If the current Requested FEC has "Automatic"
* (IEEE 802.3) specified, then we use whatever the Firmware
- * sent us as part of it's IEEE 802.3-based interpratation of
+ * sent us as part of it's IEEE 802.3-based interpretation of
* the Transceiver Module EPROM FEC parameters. Otherwise we
* use whatever is in the current Requested FEC settings.
*/
@@ -2834,7 +2834,7 @@ csio_hws_configuring(struct csio_hw *hw, enum csio_hw_ev evt)
}
/*
- * csio_hws_initializing - Initialiazing state
+ * csio_hws_initializing - Initializing state
* @hw - HW module
* @evt - Event
*
@@ -3049,7 +3049,7 @@ csio_hws_removing(struct csio_hw *hw, enum csio_hw_ev evt)
if (!csio_is_hw_master(hw))
break;
/*
- * The BYE should have alerady been issued, so we cant
+ * The BYE should have already been issued, so we can't
* use the mailbox interface. Hence we use the PL_RST
* register directly.
*/
@@ -3104,7 +3104,7 @@ csio_hws_pcierr(struct csio_hw *hw, enum csio_hw_ev evt)
*
* A table driven interrupt handler that applies a set of masks to an
* interrupt status word and performs the corresponding actions if the
- * interrupts described by the mask have occured. The actions include
+ * interrupts described by the mask have occurred. The actions include
* optionally emitting a warning or alert message. The table is terminated
* by an entry specifying mask 0. Returns the number of fatal interrupt
* conditions.
@@ -4219,7 +4219,7 @@ csio_mgmtm_exit(struct csio_mgmtm *mgmtm)
* @hw: Pointer to HW module.
*
* It is assumed that the initialization is a synchronous operation.
- * So when we return afer posting the event, the HW SM should be in
+ * So when we return after posting the event, the HW SM should be in
* the ready state, if there were no errors during init.
*/
int
diff --git a/drivers/scsi/csiostor/csio_init.c b/drivers/scsi/csiostor/csio_init.c
index a6dd704d7f2d..2e8a3ac575cb 100644
--- a/drivers/scsi/csiostor/csio_init.c
+++ b/drivers/scsi/csiostor/csio_init.c
@@ -154,13 +154,10 @@ csio_dfs_create(struct csio_hw *hw)
/*
* csio_dfs_destroy - Destroys per-hw debugfs.
*/
-static int
+static void
csio_dfs_destroy(struct csio_hw *hw)
{
- if (hw->debugfs_root)
- debugfs_remove_recursive(hw->debugfs_root);
-
- return 0;
+ debugfs_remove_recursive(hw->debugfs_root);
}
/*
diff --git a/drivers/scsi/csiostor/csio_lnode.c b/drivers/scsi/csiostor/csio_lnode.c
index 66e58f0a75dc..74ff8adc41f7 100644
--- a/drivers/scsi/csiostor/csio_lnode.c
+++ b/drivers/scsi/csiostor/csio_lnode.c
@@ -301,6 +301,7 @@ csio_ln_fdmi_rhba_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
struct fc_fdmi_port_name *port_name;
uint8_t buf[64];
uint8_t *fc4_type;
+ unsigned long flags;
if (fdmi_req->wr_status != FW_SUCCESS) {
csio_ln_dbg(ln, "WR error:%x in processing fdmi rhba cmd\n",
@@ -385,13 +386,13 @@ csio_ln_fdmi_rhba_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
len = (uint32_t)(pld - (uint8_t *)cmd);
/* Submit FDMI RPA request */
- spin_lock_irq(&hw->lock);
+ spin_lock_irqsave(&hw->lock, flags);
if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_done,
FCOE_CT, &fdmi_req->dma_buf, len)) {
CSIO_INC_STATS(ln, n_fdmi_err);
csio_ln_dbg(ln, "Failed to issue fdmi rpa req\n");
}
- spin_unlock_irq(&hw->lock);
+ spin_unlock_irqrestore(&hw->lock, flags);
}
/*
@@ -412,6 +413,7 @@ csio_ln_fdmi_dprt_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
struct fc_fdmi_rpl *reg_pl;
struct fs_fdmi_attrs *attrib_blk;
uint8_t buf[64];
+ unsigned long flags;
if (fdmi_req->wr_status != FW_SUCCESS) {
csio_ln_dbg(ln, "WR error:%x in processing fdmi dprt cmd\n",
@@ -491,13 +493,13 @@ csio_ln_fdmi_dprt_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
attrib_blk->numattrs = htonl(numattrs);
/* Submit FDMI RHBA request */
- spin_lock_irq(&hw->lock);
+ spin_lock_irqsave(&hw->lock, flags);
if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_rhba_cbfn,
FCOE_CT, &fdmi_req->dma_buf, len)) {
CSIO_INC_STATS(ln, n_fdmi_err);
csio_ln_dbg(ln, "Failed to issue fdmi rhba req\n");
}
- spin_unlock_irq(&hw->lock);
+ spin_unlock_irqrestore(&hw->lock, flags);
}
/*
@@ -512,6 +514,7 @@ csio_ln_fdmi_dhba_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
void *cmd;
struct fc_fdmi_port_name *port_name;
uint32_t len;
+ unsigned long flags;
if (fdmi_req->wr_status != FW_SUCCESS) {
csio_ln_dbg(ln, "WR error:%x in processing fdmi dhba cmd\n",
@@ -542,13 +545,13 @@ csio_ln_fdmi_dhba_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
len += sizeof(*port_name);
/* Submit FDMI request */
- spin_lock_irq(&hw->lock);
+ spin_lock_irqsave(&hw->lock, flags);
if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_dprt_cbfn,
FCOE_CT, &fdmi_req->dma_buf, len)) {
CSIO_INC_STATS(ln, n_fdmi_err);
csio_ln_dbg(ln, "Failed to issue fdmi dprt req\n");
}
- spin_unlock_irq(&hw->lock);
+ spin_unlock_irqrestore(&hw->lock, flags);
}
/**
@@ -1989,7 +1992,7 @@ static int
csio_ln_init(struct csio_lnode *ln)
{
int rv = -EINVAL;
- struct csio_lnode *rln, *pln;
+ struct csio_lnode *pln;
struct csio_hw *hw = csio_lnode_to_hw(ln);
csio_init_state(&ln->sm, csio_lns_uninit);
@@ -2019,7 +2022,6 @@ csio_ln_init(struct csio_lnode *ln)
* THe rest is common for non-root physical and NPIV lnodes.
* Just get references to all other modules
*/
- rln = csio_root_lnode(ln);
if (csio_is_npiv_ln(ln)) {
/* NPIV */
diff --git a/drivers/scsi/csiostor/csio_mb.c b/drivers/scsi/csiostor/csio_mb.c
index 6f13673d6aa0..94810b19e747 100644
--- a/drivers/scsi/csiostor/csio_mb.c
+++ b/drivers/scsi/csiostor/csio_mb.c
@@ -1210,7 +1210,7 @@ csio_mb_issue(struct csio_hw *hw, struct csio_mb *mbp)
!csio_is_hw_intr_enabled(hw)) {
csio_err(hw, "Cannot issue mailbox in interrupt mode 0x%x\n",
*((uint8_t *)mbp->mb));
- goto error_out;
+ goto error_out;
}
if (mbm->mcurrent != NULL) {
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index da50e87921bc..bc1086ae6835 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -2073,7 +2073,6 @@ static int cxgb4i_ddp_init(struct cxgbi_device *cdev)
struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
struct net_device *ndev = cdev->ports[0];
struct cxgbi_tag_format tformat;
- unsigned int ppmax;
int i, err;
if (!lldi->vr->iscsi.size) {
@@ -2082,7 +2081,6 @@ static int cxgb4i_ddp_init(struct cxgbi_device *cdev)
}
cdev->flags |= CXGBI_FLAG_USE_PPOD_OFLDQ;
- ppmax = lldi->vr->iscsi.size >> PPOD_SIZE_SHIFT;
memset(&tformat, 0, sizeof(struct cxgbi_tag_format));
for (i = 0; i < 4; i++)
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index 3e17af8aedeb..0d044c165960 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -2284,34 +2284,6 @@ int cxgbi_set_conn_param(struct iscsi_cls_conn *cls_conn,
}
EXPORT_SYMBOL_GPL(cxgbi_set_conn_param);
-static inline int csk_print_port(struct cxgbi_sock *csk, char *buf)
-{
- int len;
-
- cxgbi_sock_get(csk);
- len = sprintf(buf, "%hu\n", ntohs(csk->daddr.sin_port));
- cxgbi_sock_put(csk);
-
- return len;
-}
-
-static inline int csk_print_ip(struct cxgbi_sock *csk, char *buf)
-{
- int len;
-
- cxgbi_sock_get(csk);
- if (csk->csk_family == AF_INET)
- len = sprintf(buf, "%pI4",
- &csk->daddr.sin_addr.s_addr);
- else
- len = sprintf(buf, "%pI6",
- &csk->daddr6.sin6_addr);
-
- cxgbi_sock_put(csk);
-
- return len;
-}
-
int cxgbi_get_ep_param(struct iscsi_endpoint *ep, enum iscsi_param param,
char *buf)
{
diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c
index 93ef97af22df..fbd2ae40dab4 100644
--- a/drivers/scsi/cxlflash/main.c
+++ b/drivers/scsi/cxlflash/main.c
@@ -44,14 +44,12 @@ static void process_cmd_err(struct afu_cmd *cmd, struct scsi_cmnd *scp)
struct afu *afu = cmd->parent;
struct cxlflash_cfg *cfg = afu->parent;
struct device *dev = &cfg->dev->dev;
- struct sisl_ioarcb *ioarcb;
struct sisl_ioasa *ioasa;
u32 resid;
if (unlikely(!cmd))
return;
- ioarcb = &(cmd->rcb);
ioasa = &(cmd->sa);
if (ioasa->rc.flags & SISL_RC_FLAGS_UNDERRUN) {
@@ -3593,7 +3591,7 @@ static const struct file_operations cxlflash_chr_fops = {
.owner = THIS_MODULE,
.open = cxlflash_chr_open,
.unlocked_ioctl = cxlflash_chr_ioctl,
- .compat_ioctl = cxlflash_chr_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
};
/**
diff --git a/drivers/scsi/esas2r/esas2r_flash.c b/drivers/scsi/esas2r/esas2r_flash.c
index 7bd376d95ed5..b02ac389e6c6 100644
--- a/drivers/scsi/esas2r/esas2r_flash.c
+++ b/drivers/scsi/esas2r/esas2r_flash.c
@@ -1197,6 +1197,7 @@ bool esas2r_nvram_read_direct(struct esas2r_adapter *a)
if (!esas2r_read_flash_block(a, a->nvram, FLS_OFFSET_NVR,
sizeof(struct esas2r_sas_nvram))) {
esas2r_hdebug("NVRAM read failed, using defaults");
+ up(&a->nvram_semaphore);
return false;
}
diff --git a/drivers/scsi/esas2r/esas2r_main.c b/drivers/scsi/esas2r/esas2r_main.c
index fdbda5c05aa0..80c5a235d193 100644
--- a/drivers/scsi/esas2r/esas2r_main.c
+++ b/drivers/scsi/esas2r/esas2r_main.c
@@ -613,7 +613,7 @@ static int __init esas2r_init(void)
/* Handle ioctl calls to "/proc/scsi/esas2r/ATTOnode" */
static const struct file_operations esas2r_proc_fops = {
- .compat_ioctl = esas2r_proc_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.unlocked_ioctl = esas2r_proc_ioctl,
};
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index 80608b53897b..8ef150dfb6f7 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -1024,7 +1024,8 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
atomic64_inc(&fnic_stats->io_stats.io_completions);
- io_duration_time = jiffies_to_msecs(jiffies) - jiffies_to_msecs(io_req->start_time);
+ io_duration_time = jiffies_to_msecs(jiffies) -
+ jiffies_to_msecs(start_time);
if(io_duration_time <= 10)
atomic64_inc(&fnic_stats->io_stats.io_btw_0_to_10_msec);
diff --git a/drivers/scsi/fnic/vnic_dev.c b/drivers/scsi/fnic/vnic_dev.c
index 78af9cc2009b..1f55b9e4e74a 100644
--- a/drivers/scsi/fnic/vnic_dev.c
+++ b/drivers/scsi/fnic/vnic_dev.c
@@ -259,7 +259,7 @@ int vnic_dev_cmd1(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, int wait)
struct vnic_devcmd __iomem *devcmd = vdev->devcmd;
int delay;
u32 status;
- int dev_cmd_err[] = {
+ static const int dev_cmd_err[] = {
/* convert from fw's version of error.h to host's version */
0, /* ERR_SUCCESS */
EINVAL, /* ERR_EINVAL */
diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h
index 720c4d6be939..233c73e01246 100644
--- a/drivers/scsi/hisi_sas/hisi_sas.h
+++ b/drivers/scsi/hisi_sas/hisi_sas.h
@@ -21,6 +21,7 @@
#include <linux/platform_device.h>
#include <linux/property.h>
#include <linux/regmap.h>
+#include <linux/timer.h>
#include <scsi/sas_ata.h>
#include <scsi/libsas.h>
@@ -84,6 +85,7 @@
#define HISI_SAS_PROT_MASK (HISI_SAS_DIF_PROT_MASK | HISI_SAS_DIX_PROT_MASK)
#define HISI_SAS_WAIT_PHYUP_TIMEOUT 20
+#define CLEAR_ITCT_TIMEOUT 20
struct hisi_hba;
@@ -167,6 +169,7 @@ struct hisi_sas_phy {
enum sas_linkrate minimum_linkrate;
enum sas_linkrate maximum_linkrate;
int enable;
+ atomic_t down_cnt;
};
struct hisi_sas_port {
@@ -296,8 +299,8 @@ struct hisi_sas_hw {
void (*phy_set_linkrate)(struct hisi_hba *hisi_hba, int phy_no,
struct sas_phy_linkrates *linkrates);
enum sas_linkrate (*phy_get_max_linkrate)(void);
- void (*clear_itct)(struct hisi_hba *hisi_hba,
- struct hisi_sas_device *dev);
+ int (*clear_itct)(struct hisi_hba *hisi_hba,
+ struct hisi_sas_device *dev);
void (*free_device)(struct hisi_sas_device *sas_dev);
int (*get_wideport_bitmap)(struct hisi_hba *hisi_hba, int port_id);
void (*dereg_device)(struct hisi_hba *hisi_hba,
@@ -321,6 +324,44 @@ struct hisi_sas_hw {
const struct hisi_sas_debugfs_reg *debugfs_reg_port;
};
+#define HISI_SAS_MAX_DEBUGFS_DUMP (50)
+
+struct hisi_sas_debugfs_cq {
+ struct hisi_sas_cq *cq;
+ void *complete_hdr;
+};
+
+struct hisi_sas_debugfs_dq {
+ struct hisi_sas_dq *dq;
+ struct hisi_sas_cmd_hdr *hdr;
+};
+
+struct hisi_sas_debugfs_regs {
+ struct hisi_hba *hisi_hba;
+ u32 *data;
+};
+
+struct hisi_sas_debugfs_port {
+ struct hisi_sas_phy *phy;
+ u32 *data;
+};
+
+struct hisi_sas_debugfs_iost {
+ struct hisi_sas_iost *iost;
+};
+
+struct hisi_sas_debugfs_itct {
+ struct hisi_sas_itct *itct;
+};
+
+struct hisi_sas_debugfs_iost_cache {
+ struct hisi_sas_iost_itct_cache *cache;
+};
+
+struct hisi_sas_debugfs_itct_cache {
+ struct hisi_sas_iost_itct_cache *cache;
+};
+
struct hisi_hba {
/* This must be the first element, used by SHOST_TO_SAS_HA */
struct sas_ha_struct *p;
@@ -402,19 +443,20 @@ struct hisi_hba {
/* debugfs memories */
/* Put Global AXI and RAS Register into register array */
- u32 *debugfs_regs[DEBUGFS_REGS_NUM];
- u32 *debugfs_port_reg[HISI_SAS_MAX_PHYS];
- void *debugfs_complete_hdr[HISI_SAS_MAX_QUEUES];
- struct hisi_sas_cmd_hdr *debugfs_cmd_hdr[HISI_SAS_MAX_QUEUES];
- struct hisi_sas_iost *debugfs_iost;
- struct hisi_sas_itct *debugfs_itct;
- u64 *debugfs_iost_cache;
- u64 *debugfs_itct_cache;
-
+ struct hisi_sas_debugfs_regs debugfs_regs[HISI_SAS_MAX_DEBUGFS_DUMP][DEBUGFS_REGS_NUM];
+ struct hisi_sas_debugfs_port debugfs_port_reg[HISI_SAS_MAX_DEBUGFS_DUMP][HISI_SAS_MAX_PHYS];
+ struct hisi_sas_debugfs_cq debugfs_cq[HISI_SAS_MAX_DEBUGFS_DUMP][HISI_SAS_MAX_QUEUES];
+ struct hisi_sas_debugfs_dq debugfs_dq[HISI_SAS_MAX_DEBUGFS_DUMP][HISI_SAS_MAX_QUEUES];
+ struct hisi_sas_debugfs_iost debugfs_iost[HISI_SAS_MAX_DEBUGFS_DUMP];
+ struct hisi_sas_debugfs_itct debugfs_itct[HISI_SAS_MAX_DEBUGFS_DUMP];
+ struct hisi_sas_debugfs_iost_cache debugfs_iost_cache[HISI_SAS_MAX_DEBUGFS_DUMP];
+ struct hisi_sas_debugfs_itct_cache debugfs_itct_cache[HISI_SAS_MAX_DEBUGFS_DUMP];
+
+ u64 debugfs_timestamp[HISI_SAS_MAX_DEBUGFS_DUMP];
+ int debugfs_dump_index;
struct dentry *debugfs_dir;
struct dentry *debugfs_dump_dentry;
struct dentry *debugfs_bist_dentry;
- bool debugfs_snapshot;
};
/* Generic HW DMA host memory structures */
@@ -556,6 +598,7 @@ struct hisi_sas_slot_dif_buf_table {
extern struct scsi_transport_template *hisi_sas_stt;
extern bool hisi_sas_debugfs_enable;
+extern u32 hisi_sas_debugfs_dump_count;
extern struct dentry *hisi_sas_debugfs_dir;
extern void hisi_sas_stop_phys(struct hisi_hba *hisi_hba);
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index 0847e682797b..03588ec3c394 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -587,7 +587,13 @@ static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags,
dev = hisi_hba->dev;
if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags))) {
- if (in_softirq())
+ /*
+ * For IOs from upper layer, it may already disable preempt
+ * in the IO path, if disable preempt again in down(),
+ * function schedule() will report schedule_bug(), so check
+ * preemptible() before goto down().
+ */
+ if (!preemptible())
return -EINVAL;
down(&hisi_hba->sem);
@@ -968,12 +974,13 @@ static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy)
struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
struct hisi_sas_phy *phy = sas_phy->lldd_phy;
struct asd_sas_port *sas_port = sas_phy->port;
- struct hisi_sas_port *port = to_hisi_sas_port(sas_port);
+ struct hisi_sas_port *port;
unsigned long flags;
if (!sas_port)
return;
+ port = to_hisi_sas_port(sas_port);
spin_lock_irqsave(&hisi_hba->lock, flags);
port->port_attached = 1;
port->id = phy->port_id;
@@ -1045,6 +1052,7 @@ static void hisi_sas_dev_gone(struct domain_device *device)
struct hisi_sas_device *sas_dev = device->lldd_dev;
struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
struct device *dev = hisi_hba->dev;
+ int ret = 0;
dev_info(dev, "dev[%d:%x] is gone\n",
sas_dev->device_id, sas_dev->dev_type);
@@ -1056,13 +1064,16 @@ static void hisi_sas_dev_gone(struct domain_device *device)
hisi_sas_dereg_device(hisi_hba, device);
- hisi_hba->hw->clear_itct(hisi_hba, sas_dev);
+ ret = hisi_hba->hw->clear_itct(hisi_hba, sas_dev);
device->lldd_dev = NULL;
}
if (hisi_hba->hw->free_device)
hisi_hba->hw->free_device(sas_dev);
- sas_dev->dev_type = SAS_PHY_UNUSED;
+
+ /* Don't mark it as SAS_PHY_UNUSED if failed to clear ITCT */
+ if (!ret)
+ sas_dev->dev_type = SAS_PHY_UNUSED;
sas_dev->sas_device = NULL;
up(&hisi_hba->sem);
}
@@ -1402,7 +1413,7 @@ static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 state)
struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
struct asd_sas_phy *sas_phy = &phy->sas_phy;
struct asd_sas_port *sas_port = sas_phy->port;
- bool do_port_check = !!(_sas_port != sas_port);
+ bool do_port_check = _sas_port != sas_port;
if (!sas_phy->phy->enabled)
continue;
@@ -1563,7 +1574,7 @@ static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
struct Scsi_Host *shost = hisi_hba->shost;
int rc;
- if (hisi_sas_debugfs_enable && hisi_hba->debugfs_itct)
+ if (hisi_sas_debugfs_enable && hisi_hba->debugfs_itct[0].itct)
queue_work(hisi_hba->wq, &hisi_hba->debugfs_work);
if (!hisi_hba->hw->soft_reset)
@@ -2055,7 +2066,7 @@ _hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
/* Internal abort timed out */
if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
- if (hisi_sas_debugfs_enable && hisi_hba->debugfs_itct)
+ if (hisi_sas_debugfs_enable && hisi_hba->debugfs_itct[0].itct)
queue_work(hisi_hba->wq, &hisi_hba->debugfs_work);
if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
@@ -2676,6 +2687,7 @@ int hisi_sas_probe(struct platform_device *pdev,
err_out_register_ha:
scsi_remove_host(shost);
err_out_ha:
+ hisi_sas_debugfs_exit(hisi_hba);
hisi_sas_free(hisi_hba);
scsi_host_put(shost);
return rc;
@@ -2687,10 +2699,11 @@ struct dentry *hisi_sas_debugfs_dir;
static void hisi_sas_debugfs_snapshot_cq_reg(struct hisi_hba *hisi_hba)
{
int queue_entry_size = hisi_hba->hw->complete_hdr_size;
+ int dump_index = hisi_hba->debugfs_dump_index;
int i;
for (i = 0; i < hisi_hba->queue_count; i++)
- memcpy(hisi_hba->debugfs_complete_hdr[i],
+ memcpy(hisi_hba->debugfs_cq[dump_index][i].complete_hdr,
hisi_hba->complete_hdr[i],
HISI_SAS_QUEUE_SLOTS * queue_entry_size);
}
@@ -2698,13 +2711,14 @@ static void hisi_sas_debugfs_snapshot_cq_reg(struct hisi_hba *hisi_hba)
static void hisi_sas_debugfs_snapshot_dq_reg(struct hisi_hba *hisi_hba)
{
int queue_entry_size = sizeof(struct hisi_sas_cmd_hdr);
+ int dump_index = hisi_hba->debugfs_dump_index;
int i;
for (i = 0; i < hisi_hba->queue_count; i++) {
- struct hisi_sas_cmd_hdr *debugfs_cmd_hdr, *cmd_hdr;
+ struct hisi_sas_cmd_hdr *debugfs_cmd_hdr, *cmd_hdr;
int j;
- debugfs_cmd_hdr = hisi_hba->debugfs_cmd_hdr[i];
+ debugfs_cmd_hdr = hisi_hba->debugfs_dq[dump_index][i].hdr;
cmd_hdr = hisi_hba->cmd_hdr[i];
for (j = 0; j < HISI_SAS_QUEUE_SLOTS; j++)
@@ -2715,6 +2729,7 @@ static void hisi_sas_debugfs_snapshot_dq_reg(struct hisi_hba *hisi_hba)
static void hisi_sas_debugfs_snapshot_port_reg(struct hisi_hba *hisi_hba)
{
+ int dump_index = hisi_hba->debugfs_dump_index;
const struct hisi_sas_debugfs_reg *port =
hisi_hba->hw->debugfs_reg_port;
int i, phy_cnt;
@@ -2722,7 +2737,7 @@ static void hisi_sas_debugfs_snapshot_port_reg(struct hisi_hba *hisi_hba)
u32 *databuf;
for (phy_cnt = 0; phy_cnt < hisi_hba->n_phy; phy_cnt++) {
- databuf = (u32 *)hisi_hba->debugfs_port_reg[phy_cnt];
+ databuf = hisi_hba->debugfs_port_reg[dump_index][phy_cnt].data;
for (i = 0; i < port->count; i++, databuf++) {
offset = port->base_off + 4 * i;
*databuf = port->read_port_reg(hisi_hba, phy_cnt,
@@ -2733,7 +2748,8 @@ static void hisi_sas_debugfs_snapshot_port_reg(struct hisi_hba *hisi_hba)
static void hisi_sas_debugfs_snapshot_global_reg(struct hisi_hba *hisi_hba)
{
- u32 *databuf = hisi_hba->debugfs_regs[DEBUGFS_GLOBAL];
+ int dump_index = hisi_hba->debugfs_dump_index;
+ u32 *databuf = hisi_hba->debugfs_regs[dump_index][DEBUGFS_GLOBAL].data;
const struct hisi_sas_hw *hw = hisi_hba->hw;
const struct hisi_sas_debugfs_reg *global =
hw->debugfs_reg_array[DEBUGFS_GLOBAL];
@@ -2745,7 +2761,8 @@ static void hisi_sas_debugfs_snapshot_global_reg(struct hisi_hba *hisi_hba)
static void hisi_sas_debugfs_snapshot_axi_reg(struct hisi_hba *hisi_hba)
{
- u32 *databuf = hisi_hba->debugfs_regs[DEBUGFS_AXI];
+ int dump_index = hisi_hba->debugfs_dump_index;
+ u32 *databuf = hisi_hba->debugfs_regs[dump_index][DEBUGFS_AXI].data;
const struct hisi_sas_hw *hw = hisi_hba->hw;
const struct hisi_sas_debugfs_reg *axi =
hw->debugfs_reg_array[DEBUGFS_AXI];
@@ -2758,7 +2775,8 @@ static void hisi_sas_debugfs_snapshot_axi_reg(struct hisi_hba *hisi_hba)
static void hisi_sas_debugfs_snapshot_ras_reg(struct hisi_hba *hisi_hba)
{
- u32 *databuf = hisi_hba->debugfs_regs[DEBUGFS_RAS];
+ int dump_index = hisi_hba->debugfs_dump_index;
+ u32 *databuf = hisi_hba->debugfs_regs[dump_index][DEBUGFS_RAS].data;
const struct hisi_sas_hw *hw = hisi_hba->hw;
const struct hisi_sas_debugfs_reg *ras =
hw->debugfs_reg_array[DEBUGFS_RAS];
@@ -2771,8 +2789,9 @@ static void hisi_sas_debugfs_snapshot_ras_reg(struct hisi_hba *hisi_hba)
static void hisi_sas_debugfs_snapshot_itct_reg(struct hisi_hba *hisi_hba)
{
- void *cachebuf = hisi_hba->debugfs_itct_cache;
- void *databuf = hisi_hba->debugfs_itct;
+ int dump_index = hisi_hba->debugfs_dump_index;
+ void *cachebuf = hisi_hba->debugfs_itct_cache[dump_index].cache;
+ void *databuf = hisi_hba->debugfs_itct[dump_index].itct;
struct hisi_sas_itct *itct;
int i;
@@ -2789,9 +2808,10 @@ static void hisi_sas_debugfs_snapshot_itct_reg(struct hisi_hba *hisi_hba)
static void hisi_sas_debugfs_snapshot_iost_reg(struct hisi_hba *hisi_hba)
{
+ int dump_index = hisi_hba->debugfs_dump_index;
int max_command_entries = HISI_SAS_MAX_COMMANDS;
- void *cachebuf = hisi_hba->debugfs_iost_cache;
- void *databuf = hisi_hba->debugfs_iost;
+ void *cachebuf = hisi_hba->debugfs_iost_cache[dump_index].cache;
+ void *databuf = hisi_hba->debugfs_iost[dump_index].iost;
struct hisi_sas_iost *iost;
int i;
@@ -2842,11 +2862,12 @@ static void hisi_sas_debugfs_print_reg(u32 *regs_val, const void *ptr,
static int hisi_sas_debugfs_global_show(struct seq_file *s, void *p)
{
- struct hisi_hba *hisi_hba = s->private;
+ struct hisi_sas_debugfs_regs *global = s->private;
+ struct hisi_hba *hisi_hba = global->hisi_hba;
const struct hisi_sas_hw *hw = hisi_hba->hw;
const void *reg_global = hw->debugfs_reg_array[DEBUGFS_GLOBAL];
- hisi_sas_debugfs_print_reg(hisi_hba->debugfs_regs[DEBUGFS_GLOBAL],
+ hisi_sas_debugfs_print_reg(global->data,
reg_global, s);
return 0;
@@ -2868,11 +2889,12 @@ static const struct file_operations hisi_sas_debugfs_global_fops = {
static int hisi_sas_debugfs_axi_show(struct seq_file *s, void *p)
{
- struct hisi_hba *hisi_hba = s->private;
+ struct hisi_sas_debugfs_regs *axi = s->private;
+ struct hisi_hba *hisi_hba = axi->hisi_hba;
const struct hisi_sas_hw *hw = hisi_hba->hw;
const void *reg_axi = hw->debugfs_reg_array[DEBUGFS_AXI];
- hisi_sas_debugfs_print_reg(hisi_hba->debugfs_regs[DEBUGFS_AXI],
+ hisi_sas_debugfs_print_reg(axi->data,
reg_axi, s);
return 0;
@@ -2894,11 +2916,12 @@ static const struct file_operations hisi_sas_debugfs_axi_fops = {
static int hisi_sas_debugfs_ras_show(struct seq_file *s, void *p)
{
- struct hisi_hba *hisi_hba = s->private;
+ struct hisi_sas_debugfs_regs *ras = s->private;
+ struct hisi_hba *hisi_hba = ras->hisi_hba;
const struct hisi_sas_hw *hw = hisi_hba->hw;
const void *reg_ras = hw->debugfs_reg_array[DEBUGFS_RAS];
- hisi_sas_debugfs_print_reg(hisi_hba->debugfs_regs[DEBUGFS_RAS],
+ hisi_sas_debugfs_print_reg(ras->data,
reg_ras, s);
return 0;
@@ -2920,13 +2943,13 @@ static const struct file_operations hisi_sas_debugfs_ras_fops = {
static int hisi_sas_debugfs_port_show(struct seq_file *s, void *p)
{
- struct hisi_sas_phy *phy = s->private;
+ struct hisi_sas_debugfs_port *port = s->private;
+ struct hisi_sas_phy *phy = port->phy;
struct hisi_hba *hisi_hba = phy->hisi_hba;
const struct hisi_sas_hw *hw = hisi_hba->hw;
const struct hisi_sas_debugfs_reg *reg_port = hw->debugfs_reg_port;
- u32 *databuf = hisi_hba->debugfs_port_reg[phy->sas_phy.id];
- hisi_sas_debugfs_print_reg(databuf, reg_port, s);
+ hisi_sas_debugfs_print_reg(port->data, reg_port, s);
return 0;
}
@@ -2975,13 +2998,13 @@ static void hisi_sas_show_row_32(struct seq_file *s, int index,
seq_puts(s, "\n");
}
-static void hisi_sas_cq_show_slot(struct seq_file *s, int slot, void *cq_ptr)
+static void hisi_sas_cq_show_slot(struct seq_file *s, int slot,
+ struct hisi_sas_debugfs_cq *debugfs_cq)
{
- struct hisi_sas_cq *cq = cq_ptr;
+ struct hisi_sas_cq *cq = debugfs_cq->cq;
struct hisi_hba *hisi_hba = cq->hisi_hba;
- void *complete_queue = hisi_hba->debugfs_complete_hdr[cq->id];
- __le32 *complete_hdr = complete_queue +
- (hisi_hba->hw->complete_hdr_size * slot);
+ __le32 *complete_hdr = debugfs_cq->complete_hdr +
+ (hisi_hba->hw->complete_hdr_size * slot);
hisi_sas_show_row_32(s, slot,
hisi_hba->hw->complete_hdr_size,
@@ -2990,11 +3013,11 @@ static void hisi_sas_cq_show_slot(struct seq_file *s, int slot, void *cq_ptr)
static int hisi_sas_debugfs_cq_show(struct seq_file *s, void *p)
{
- struct hisi_sas_cq *cq = s->private;
+ struct hisi_sas_debugfs_cq *debugfs_cq = s->private;
int slot;
for (slot = 0; slot < HISI_SAS_QUEUE_SLOTS; slot++) {
- hisi_sas_cq_show_slot(s, slot, cq);
+ hisi_sas_cq_show_slot(s, slot, debugfs_cq);
}
return 0;
}
@@ -3014,9 +3037,8 @@ static const struct file_operations hisi_sas_debugfs_cq_fops = {
static void hisi_sas_dq_show_slot(struct seq_file *s, int slot, void *dq_ptr)
{
- struct hisi_sas_dq *dq = dq_ptr;
- struct hisi_hba *hisi_hba = dq->hisi_hba;
- void *cmd_queue = hisi_hba->debugfs_cmd_hdr[dq->id];
+ struct hisi_sas_debugfs_dq *debugfs_dq = dq_ptr;
+ void *cmd_queue = debugfs_dq->hdr;
__le32 *cmd_hdr = cmd_queue +
sizeof(struct hisi_sas_cmd_hdr) * slot;
@@ -3048,14 +3070,14 @@ static const struct file_operations hisi_sas_debugfs_dq_fops = {
static int hisi_sas_debugfs_iost_show(struct seq_file *s, void *p)
{
- struct hisi_hba *hisi_hba = s->private;
- struct hisi_sas_iost *debugfs_iost = hisi_hba->debugfs_iost;
+ struct hisi_sas_debugfs_iost *debugfs_iost = s->private;
+ struct hisi_sas_iost *iost = debugfs_iost->iost;
int i, max_command_entries = HISI_SAS_MAX_COMMANDS;
- for (i = 0; i < max_command_entries; i++, debugfs_iost++) {
- __le64 *iost = &debugfs_iost->qw0;
+ for (i = 0; i < max_command_entries; i++, iost++) {
+ __le64 *data = &iost->qw0;
- hisi_sas_show_row_64(s, i, sizeof(*debugfs_iost), iost);
+ hisi_sas_show_row_64(s, i, sizeof(*iost), data);
}
return 0;
@@ -3076,9 +3098,8 @@ static const struct file_operations hisi_sas_debugfs_iost_fops = {
static int hisi_sas_debugfs_iost_cache_show(struct seq_file *s, void *p)
{
- struct hisi_hba *hisi_hba = s->private;
- struct hisi_sas_iost_itct_cache *iost_cache =
- (struct hisi_sas_iost_itct_cache *)hisi_hba->debugfs_iost_cache;
+ struct hisi_sas_debugfs_iost_cache *debugfs_iost_cache = s->private;
+ struct hisi_sas_iost_itct_cache *iost_cache = debugfs_iost_cache->cache;
u32 cache_size = HISI_SAS_IOST_ITCT_CACHE_DW_SZ * 4;
int i, tab_idx;
__le64 *iost;
@@ -3117,13 +3138,13 @@ static const struct file_operations hisi_sas_debugfs_iost_cache_fops = {
static int hisi_sas_debugfs_itct_show(struct seq_file *s, void *p)
{
int i;
- struct hisi_hba *hisi_hba = s->private;
- struct hisi_sas_itct *debugfs_itct = hisi_hba->debugfs_itct;
+ struct hisi_sas_debugfs_itct *debugfs_itct = s->private;
+ struct hisi_sas_itct *itct = debugfs_itct->itct;
- for (i = 0; i < HISI_SAS_MAX_ITCT_ENTRIES; i++, debugfs_itct++) {
- __le64 *itct = &debugfs_itct->qw0;
+ for (i = 0; i < HISI_SAS_MAX_ITCT_ENTRIES; i++, itct++) {
+ __le64 *data = &itct->qw0;
- hisi_sas_show_row_64(s, i, sizeof(*debugfs_itct), itct);
+ hisi_sas_show_row_64(s, i, sizeof(*itct), data);
}
return 0;
@@ -3144,9 +3165,8 @@ static const struct file_operations hisi_sas_debugfs_itct_fops = {
static int hisi_sas_debugfs_itct_cache_show(struct seq_file *s, void *p)
{
- struct hisi_hba *hisi_hba = s->private;
- struct hisi_sas_iost_itct_cache *itct_cache =
- (struct hisi_sas_iost_itct_cache *)hisi_hba->debugfs_itct_cache;
+ struct hisi_sas_debugfs_itct_cache *debugfs_itct_cache = s->private;
+ struct hisi_sas_iost_itct_cache *itct_cache = debugfs_itct_cache->cache;
u32 cache_size = HISI_SAS_IOST_ITCT_CACHE_DW_SZ * 4;
int i, tab_idx;
__le64 *itct;
@@ -3184,6 +3204,8 @@ static const struct file_operations hisi_sas_debugfs_itct_cache_fops = {
static void hisi_sas_debugfs_create_files(struct hisi_hba *hisi_hba)
{
+ u64 *debugfs_timestamp;
+ int dump_index = hisi_hba->debugfs_dump_index;
struct dentry *dump_dentry;
struct dentry *dentry;
char name[256];
@@ -3191,19 +3213,26 @@ static void hisi_sas_debugfs_create_files(struct hisi_hba *hisi_hba)
int c;
int d;
- /* Create dump dir inside device dir */
- dump_dentry = debugfs_create_dir("dump", hisi_hba->debugfs_dir);
- hisi_hba->debugfs_dump_dentry = dump_dentry;
+ snprintf(name, 256, "%d", dump_index);
+
+ dump_dentry = debugfs_create_dir(name, hisi_hba->debugfs_dump_dentry);
- debugfs_create_file("global", 0400, dump_dentry, hisi_hba,
- &hisi_sas_debugfs_global_fops);
+ debugfs_timestamp = &hisi_hba->debugfs_timestamp[dump_index];
+
+ debugfs_create_u64("timestamp", 0400, dump_dentry,
+ debugfs_timestamp);
+
+ debugfs_create_file("global", 0400, dump_dentry,
+ &hisi_hba->debugfs_regs[dump_index][DEBUGFS_GLOBAL],
+ &hisi_sas_debugfs_global_fops);
/* Create port dir and files */
dentry = debugfs_create_dir("port", dump_dentry);
for (p = 0; p < hisi_hba->n_phy; p++) {
snprintf(name, 256, "%d", p);
- debugfs_create_file(name, 0400, dentry, &hisi_hba->phy[p],
+ debugfs_create_file(name, 0400, dentry,
+ &hisi_hba->debugfs_port_reg[dump_index][p],
&hisi_sas_debugfs_port_fops);
}
@@ -3212,7 +3241,8 @@ static void hisi_sas_debugfs_create_files(struct hisi_hba *hisi_hba)
for (c = 0; c < hisi_hba->queue_count; c++) {
snprintf(name, 256, "%d", c);
- debugfs_create_file(name, 0400, dentry, &hisi_hba->cq[c],
+ debugfs_create_file(name, 0400, dentry,
+ &hisi_hba->debugfs_cq[dump_index][c],
&hisi_sas_debugfs_cq_fops);
}
@@ -3221,26 +3251,33 @@ static void hisi_sas_debugfs_create_files(struct hisi_hba *hisi_hba)
for (d = 0; d < hisi_hba->queue_count; d++) {
snprintf(name, 256, "%d", d);
- debugfs_create_file(name, 0400, dentry, &hisi_hba->dq[d],
+ debugfs_create_file(name, 0400, dentry,
+ &hisi_hba->debugfs_dq[dump_index][d],
&hisi_sas_debugfs_dq_fops);
}
- debugfs_create_file("iost", 0400, dump_dentry, hisi_hba,
+ debugfs_create_file("iost", 0400, dump_dentry,
+ &hisi_hba->debugfs_iost[dump_index],
&hisi_sas_debugfs_iost_fops);
- debugfs_create_file("iost_cache", 0400, dump_dentry, hisi_hba,
+ debugfs_create_file("iost_cache", 0400, dump_dentry,
+ &hisi_hba->debugfs_iost_cache[dump_index],
&hisi_sas_debugfs_iost_cache_fops);
- debugfs_create_file("itct", 0400, dump_dentry, hisi_hba,
+ debugfs_create_file("itct", 0400, dump_dentry,
+ &hisi_hba->debugfs_itct[dump_index],
&hisi_sas_debugfs_itct_fops);
- debugfs_create_file("itct_cache", 0400, dump_dentry, hisi_hba,
+ debugfs_create_file("itct_cache", 0400, dump_dentry,
+ &hisi_hba->debugfs_itct_cache[dump_index],
&hisi_sas_debugfs_itct_cache_fops);
- debugfs_create_file("axi", 0400, dump_dentry, hisi_hba,
+ debugfs_create_file("axi", 0400, dump_dentry,
+ &hisi_hba->debugfs_regs[dump_index][DEBUGFS_AXI],
&hisi_sas_debugfs_axi_fops);
- debugfs_create_file("ras", 0400, dump_dentry, hisi_hba,
+ debugfs_create_file("ras", 0400, dump_dentry,
+ &hisi_hba->debugfs_regs[dump_index][DEBUGFS_RAS],
&hisi_sas_debugfs_ras_fops);
return;
@@ -3271,8 +3308,7 @@ static ssize_t hisi_sas_debugfs_trigger_dump_write(struct file *file,
struct hisi_hba *hisi_hba = file->f_inode->i_private;
char buf[8];
- /* A bit racy, but don't care too much since it's only debugfs */
- if (hisi_hba->debugfs_snapshot)
+ if (hisi_hba->debugfs_dump_index >= hisi_sas_debugfs_dump_count)
return -EFAULT;
if (count > 8)
@@ -3539,7 +3575,7 @@ static const struct {
int value;
char *name;
} hisi_sas_debugfs_loop_modes[] = {
- { HISI_SAS_BIST_LOOPBACK_MODE_DIGITAL, "digial" },
+ { HISI_SAS_BIST_LOOPBACK_MODE_DIGITAL, "digital" },
{ HISI_SAS_BIST_LOOPBACK_MODE_SERDES, "serdes" },
{ HISI_SAS_BIST_LOOPBACK_MODE_REMOTE, "remote" },
};
@@ -3670,132 +3706,201 @@ static const struct file_operations hisi_sas_debugfs_bist_enable_ops = {
.owner = THIS_MODULE,
};
+static ssize_t hisi_sas_debugfs_phy_down_cnt_write(struct file *filp,
+ const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *s = filp->private_data;
+ struct hisi_sas_phy *phy = s->private;
+ unsigned int set_val;
+ int res;
+
+ res = kstrtouint_from_user(buf, count, 0, &set_val);
+ if (res)
+ return res;
+
+ if (set_val > 0)
+ return -EINVAL;
+
+ atomic_set(&phy->down_cnt, 0);
+
+ return count;
+}
+
+static int hisi_sas_debugfs_phy_down_cnt_show(struct seq_file *s, void *p)
+{
+ struct hisi_sas_phy *phy = s->private;
+
+ seq_printf(s, "%d\n", atomic_read(&phy->down_cnt));
+
+ return 0;
+}
+
+static int hisi_sas_debugfs_phy_down_cnt_open(struct inode *inode,
+ struct file *filp)
+{
+ return single_open(filp, hisi_sas_debugfs_phy_down_cnt_show,
+ inode->i_private);
+}
+
+static const struct file_operations hisi_sas_debugfs_phy_down_cnt_ops = {
+ .open = hisi_sas_debugfs_phy_down_cnt_open,
+ .read = seq_read,
+ .write = hisi_sas_debugfs_phy_down_cnt_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
void hisi_sas_debugfs_work_handler(struct work_struct *work)
{
struct hisi_hba *hisi_hba =
container_of(work, struct hisi_hba, debugfs_work);
+ int debugfs_dump_index = hisi_hba->debugfs_dump_index;
+ struct device *dev = hisi_hba->dev;
+ u64 timestamp = local_clock();
- if (hisi_hba->debugfs_snapshot)
+ if (debugfs_dump_index >= hisi_sas_debugfs_dump_count) {
+ dev_warn(dev, "dump count exceeded!\n");
return;
- hisi_hba->debugfs_snapshot = true;
+ }
+
+ do_div(timestamp, NSEC_PER_MSEC);
+ hisi_hba->debugfs_timestamp[debugfs_dump_index] = timestamp;
hisi_sas_debugfs_snapshot_regs(hisi_hba);
+ hisi_hba->debugfs_dump_index++;
}
EXPORT_SYMBOL_GPL(hisi_sas_debugfs_work_handler);
-static void hisi_sas_debugfs_release(struct hisi_hba *hisi_hba)
+static void hisi_sas_debugfs_release(struct hisi_hba *hisi_hba, int dump_index)
{
struct device *dev = hisi_hba->dev;
int i;
- devm_kfree(dev, hisi_hba->debugfs_iost_cache);
- devm_kfree(dev, hisi_hba->debugfs_itct_cache);
- devm_kfree(dev, hisi_hba->debugfs_iost);
+ devm_kfree(dev, hisi_hba->debugfs_iost_cache[dump_index].cache);
+ devm_kfree(dev, hisi_hba->debugfs_itct_cache[dump_index].cache);
+ devm_kfree(dev, hisi_hba->debugfs_iost[dump_index].iost);
+ devm_kfree(dev, hisi_hba->debugfs_itct[dump_index].itct);
for (i = 0; i < hisi_hba->queue_count; i++)
- devm_kfree(dev, hisi_hba->debugfs_cmd_hdr[i]);
+ devm_kfree(dev, hisi_hba->debugfs_dq[dump_index][i].hdr);
for (i = 0; i < hisi_hba->queue_count; i++)
- devm_kfree(dev, hisi_hba->debugfs_complete_hdr[i]);
+ devm_kfree(dev,
+ hisi_hba->debugfs_cq[dump_index][i].complete_hdr);
for (i = 0; i < DEBUGFS_REGS_NUM; i++)
- devm_kfree(dev, hisi_hba->debugfs_regs[i]);
+ devm_kfree(dev, hisi_hba->debugfs_regs[dump_index][i].data);
for (i = 0; i < hisi_hba->n_phy; i++)
- devm_kfree(dev, hisi_hba->debugfs_port_reg[i]);
+ devm_kfree(dev, hisi_hba->debugfs_port_reg[dump_index][i].data);
}
-static int hisi_sas_debugfs_alloc(struct hisi_hba *hisi_hba)
+static int hisi_sas_debugfs_alloc(struct hisi_hba *hisi_hba, int dump_index)
{
const struct hisi_sas_hw *hw = hisi_hba->hw;
struct device *dev = hisi_hba->dev;
- int p, c, d;
+ int p, c, d, r, i;
size_t sz;
- hisi_hba->debugfs_dump_dentry =
- debugfs_create_dir("dump", hisi_hba->debugfs_dir);
+ for (r = 0; r < DEBUGFS_REGS_NUM; r++) {
+ struct hisi_sas_debugfs_regs *regs =
+ &hisi_hba->debugfs_regs[dump_index][r];
- sz = hw->debugfs_reg_array[DEBUGFS_GLOBAL]->count * 4;
- hisi_hba->debugfs_regs[DEBUGFS_GLOBAL] =
- devm_kmalloc(dev, sz, GFP_KERNEL);
-
- if (!hisi_hba->debugfs_regs[DEBUGFS_GLOBAL])
- goto fail;
+ sz = hw->debugfs_reg_array[r]->count * 4;
+ regs->data = devm_kmalloc(dev, sz, GFP_KERNEL);
+ if (!regs->data)
+ goto fail;
+ regs->hisi_hba = hisi_hba;
+ }
sz = hw->debugfs_reg_port->count * 4;
for (p = 0; p < hisi_hba->n_phy; p++) {
- hisi_hba->debugfs_port_reg[p] =
- devm_kmalloc(dev, sz, GFP_KERNEL);
+ struct hisi_sas_debugfs_port *port =
+ &hisi_hba->debugfs_port_reg[dump_index][p];
- if (!hisi_hba->debugfs_port_reg[p])
+ port->data = devm_kmalloc(dev, sz, GFP_KERNEL);
+ if (!port->data)
goto fail;
+ port->phy = &hisi_hba->phy[p];
}
- sz = hw->debugfs_reg_array[DEBUGFS_AXI]->count * 4;
- hisi_hba->debugfs_regs[DEBUGFS_AXI] =
- devm_kmalloc(dev, sz, GFP_KERNEL);
-
- if (!hisi_hba->debugfs_regs[DEBUGFS_AXI])
- goto fail;
-
- sz = hw->debugfs_reg_array[DEBUGFS_RAS]->count * 4;
- hisi_hba->debugfs_regs[DEBUGFS_RAS] =
- devm_kmalloc(dev, sz, GFP_KERNEL);
-
- if (!hisi_hba->debugfs_regs[DEBUGFS_RAS])
- goto fail;
-
sz = hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
for (c = 0; c < hisi_hba->queue_count; c++) {
- hisi_hba->debugfs_complete_hdr[c] =
- devm_kmalloc(dev, sz, GFP_KERNEL);
+ struct hisi_sas_debugfs_cq *cq =
+ &hisi_hba->debugfs_cq[dump_index][c];
- if (!hisi_hba->debugfs_complete_hdr[c])
+ cq->complete_hdr = devm_kmalloc(dev, sz, GFP_KERNEL);
+ if (!cq->complete_hdr)
goto fail;
+ cq->cq = &hisi_hba->cq[c];
}
sz = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
for (d = 0; d < hisi_hba->queue_count; d++) {
- hisi_hba->debugfs_cmd_hdr[d] =
- devm_kmalloc(dev, sz, GFP_KERNEL);
+ struct hisi_sas_debugfs_dq *dq =
+ &hisi_hba->debugfs_dq[dump_index][d];
- if (!hisi_hba->debugfs_cmd_hdr[d])
+ dq->hdr = devm_kmalloc(dev, sz, GFP_KERNEL);
+ if (!dq->hdr)
goto fail;
+ dq->dq = &hisi_hba->dq[d];
}
sz = HISI_SAS_MAX_COMMANDS * sizeof(struct hisi_sas_iost);
- hisi_hba->debugfs_iost = devm_kmalloc(dev, sz, GFP_KERNEL);
- if (!hisi_hba->debugfs_iost)
+ hisi_hba->debugfs_iost[dump_index].iost =
+ devm_kmalloc(dev, sz, GFP_KERNEL);
+ if (!hisi_hba->debugfs_iost[dump_index].iost)
goto fail;
sz = HISI_SAS_IOST_ITCT_CACHE_NUM *
sizeof(struct hisi_sas_iost_itct_cache);
- hisi_hba->debugfs_iost_cache = devm_kmalloc(dev, sz, GFP_KERNEL);
- if (!hisi_hba->debugfs_iost_cache)
+ hisi_hba->debugfs_iost_cache[dump_index].cache =
+ devm_kmalloc(dev, sz, GFP_KERNEL);
+ if (!hisi_hba->debugfs_iost_cache[dump_index].cache)
goto fail;
sz = HISI_SAS_IOST_ITCT_CACHE_NUM *
sizeof(struct hisi_sas_iost_itct_cache);
- hisi_hba->debugfs_itct_cache = devm_kmalloc(dev, sz, GFP_KERNEL);
- if (!hisi_hba->debugfs_itct_cache)
+ hisi_hba->debugfs_itct_cache[dump_index].cache =
+ devm_kmalloc(dev, sz, GFP_KERNEL);
+ if (!hisi_hba->debugfs_itct_cache[dump_index].cache)
goto fail;
/* New memory allocation must be locate before itct */
sz = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct);
- hisi_hba->debugfs_itct = devm_kmalloc(dev, sz, GFP_KERNEL);
- if (!hisi_hba->debugfs_itct)
+ hisi_hba->debugfs_itct[dump_index].itct =
+ devm_kmalloc(dev, sz, GFP_KERNEL);
+ if (!hisi_hba->debugfs_itct[dump_index].itct)
goto fail;
return 0;
fail:
- hisi_sas_debugfs_release(hisi_hba);
+ for (i = 0; i < hisi_sas_debugfs_dump_count; i++)
+ hisi_sas_debugfs_release(hisi_hba, i);
return -ENOMEM;
}
+static void hisi_sas_debugfs_phy_down_cnt_init(struct hisi_hba *hisi_hba)
+{
+ struct dentry *dir = debugfs_create_dir("phy_down_cnt",
+ hisi_hba->debugfs_dir);
+ char name[16];
+ int phy_no;
+
+ for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) {
+ snprintf(name, 16, "%d", phy_no);
+ debugfs_create_file(name, 0600, dir,
+ &hisi_hba->phy[phy_no],
+ &hisi_sas_debugfs_phy_down_cnt_ops);
+ }
+}
+
static void hisi_sas_debugfs_bist_init(struct hisi_hba *hisi_hba)
{
hisi_hba->debugfs_bist_dentry =
@@ -3827,6 +3932,7 @@ static void hisi_sas_debugfs_bist_init(struct hisi_hba *hisi_hba)
void hisi_sas_debugfs_init(struct hisi_hba *hisi_hba)
{
struct device *dev = hisi_hba->dev;
+ int i;
hisi_hba->debugfs_dir = debugfs_create_dir(dev_name(dev),
hisi_sas_debugfs_dir);
@@ -3838,9 +3944,17 @@ void hisi_sas_debugfs_init(struct hisi_hba *hisi_hba)
/* create bist structures */
hisi_sas_debugfs_bist_init(hisi_hba);
- if (hisi_sas_debugfs_alloc(hisi_hba)) {
- debugfs_remove_recursive(hisi_hba->debugfs_dir);
- dev_dbg(dev, "failed to init debugfs!\n");
+ hisi_hba->debugfs_dump_dentry =
+ debugfs_create_dir("dump", hisi_hba->debugfs_dir);
+
+ hisi_sas_debugfs_phy_down_cnt_init(hisi_hba);
+
+ for (i = 0; i < hisi_sas_debugfs_dump_count; i++) {
+ if (hisi_sas_debugfs_alloc(hisi_hba, i)) {
+ debugfs_remove_recursive(hisi_hba->debugfs_dir);
+ dev_dbg(dev, "failed to init debugfs!\n");
+ break;
+ }
}
}
EXPORT_SYMBOL_GPL(hisi_sas_debugfs_init);
@@ -3874,14 +3988,24 @@ EXPORT_SYMBOL_GPL(hisi_sas_debugfs_enable);
module_param_named(debugfs_enable, hisi_sas_debugfs_enable, bool, 0444);
MODULE_PARM_DESC(hisi_sas_debugfs_enable, "Enable driver debugfs (default disabled)");
+u32 hisi_sas_debugfs_dump_count = 1;
+EXPORT_SYMBOL_GPL(hisi_sas_debugfs_dump_count);
+module_param_named(debugfs_dump_count, hisi_sas_debugfs_dump_count, uint, 0444);
+MODULE_PARM_DESC(hisi_sas_debugfs_dump_count, "Number of debugfs dumps to allow");
+
static __init int hisi_sas_init(void)
{
hisi_sas_stt = sas_domain_attach_transport(&hisi_sas_transport_ops);
if (!hisi_sas_stt)
return -ENOMEM;
- if (hisi_sas_debugfs_enable)
+ if (hisi_sas_debugfs_enable) {
hisi_sas_debugfs_dir = debugfs_create_dir("hisi_sas", NULL);
+ if (hisi_sas_debugfs_dump_count > HISI_SAS_MAX_DEBUGFS_DUMP) {
+ pr_info("hisi_sas: Limiting debugfs dump count\n");
+ hisi_sas_debugfs_dump_count = HISI_SAS_MAX_DEBUGFS_DUMP;
+ }
+ }
return 0;
}
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
index b861a0f14c9d..3af53cc42bd6 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
@@ -531,8 +531,8 @@ static void setup_itct_v1_hw(struct hisi_hba *hisi_hba,
(0xff00ULL << ITCT_HDR_REJ_OPEN_TL_OFF));
}
-static void clear_itct_v1_hw(struct hisi_hba *hisi_hba,
- struct hisi_sas_device *sas_dev)
+static int clear_itct_v1_hw(struct hisi_hba *hisi_hba,
+ struct hisi_sas_device *sas_dev)
{
u64 dev_id = sas_dev->device_id;
struct hisi_sas_itct *itct = &hisi_hba->itct[dev_id];
@@ -551,6 +551,8 @@ static void clear_itct_v1_hw(struct hisi_hba *hisi_hba,
qw0 = le64_to_cpu(itct->qw0);
qw0 &= ~ITCT_HDR_VALID_MSK;
itct->qw0 = cpu_to_le64(qw0);
+
+ return 0;
}
static int reset_hw_v1_hw(struct hisi_hba *hisi_hba)
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
index 8e96a257e439..61b1e2693b08 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
@@ -974,13 +974,14 @@ static void setup_itct_v2_hw(struct hisi_hba *hisi_hba,
(0x1ULL << ITCT_HDR_RTOLT_OFF));
}
-static void clear_itct_v2_hw(struct hisi_hba *hisi_hba,
- struct hisi_sas_device *sas_dev)
+static int clear_itct_v2_hw(struct hisi_hba *hisi_hba,
+ struct hisi_sas_device *sas_dev)
{
DECLARE_COMPLETION_ONSTACK(completion);
u64 dev_id = sas_dev->device_id;
struct hisi_sas_itct *itct = &hisi_hba->itct[dev_id];
u32 reg_val = hisi_sas_read32(hisi_hba, ENT_INT_SRC3);
+ struct device *dev = hisi_hba->dev;
int i;
sas_dev->completion = &completion;
@@ -990,13 +991,19 @@ static void clear_itct_v2_hw(struct hisi_hba *hisi_hba,
hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
ENT_INT_SRC3_ITC_INT_MSK);
+ /* need to set register twice to clear ITCT for v2 hw */
for (i = 0; i < 2; i++) {
reg_val = ITCT_CLR_EN_MSK | (dev_id & ITCT_DEV_MSK);
hisi_sas_write32(hisi_hba, ITCT_CLR, reg_val);
- wait_for_completion(sas_dev->completion);
+ if (!wait_for_completion_timeout(sas_dev->completion,
+ CLEAR_ITCT_TIMEOUT * HZ)) {
+ dev_warn(dev, "failed to clear ITCT\n");
+ return -ETIMEDOUT;
+ }
memset(itct, 0, sizeof(struct hisi_sas_itct));
}
+ return 0;
}
static void free_device_v2_hw(struct hisi_sas_device *sas_dev)
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index cb8d087762db..bf5d5f138437 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -795,13 +795,14 @@ static void setup_itct_v3_hw(struct hisi_hba *hisi_hba,
(0x1ULL << ITCT_HDR_RTOLT_OFF));
}
-static void clear_itct_v3_hw(struct hisi_hba *hisi_hba,
- struct hisi_sas_device *sas_dev)
+static int clear_itct_v3_hw(struct hisi_hba *hisi_hba,
+ struct hisi_sas_device *sas_dev)
{
DECLARE_COMPLETION_ONSTACK(completion);
u64 dev_id = sas_dev->device_id;
struct hisi_sas_itct *itct = &hisi_hba->itct[dev_id];
u32 reg_val = hisi_sas_read32(hisi_hba, ENT_INT_SRC3);
+ struct device *dev = hisi_hba->dev;
sas_dev->completion = &completion;
@@ -814,8 +815,14 @@ static void clear_itct_v3_hw(struct hisi_hba *hisi_hba,
reg_val = ITCT_CLR_EN_MSK | (dev_id & ITCT_DEV_MSK);
hisi_sas_write32(hisi_hba, ITCT_CLR, reg_val);
- wait_for_completion(sas_dev->completion);
+ if (!wait_for_completion_timeout(sas_dev->completion,
+ CLEAR_ITCT_TIMEOUT * HZ)) {
+ dev_warn(dev, "failed to clear ITCT\n");
+ return -ETIMEDOUT;
+ }
+
memset(itct, 0, sizeof(struct hisi_sas_itct));
+ return 0;
}
static void dereg_device_v3_hw(struct hisi_hba *hisi_hba,
@@ -1542,6 +1549,8 @@ static irqreturn_t phy_down_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
u32 phy_state, sl_ctrl, txid_auto;
struct device *dev = hisi_hba->dev;
+ atomic_inc(&phy->down_cnt);
+
del_timer(&phy->timer);
hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_NOT_RDY_MSK, 1);
@@ -3022,11 +3031,6 @@ static int debugfs_set_bist_v3_hw(struct hisi_hba *hisi_hba, bool enable)
hisi_sas_phy_write32(hisi_hba, phy_id,
SAS_PHY_BIST_CTRL, reg_val);
- mdelay(100);
- reg_val |= (CFG_RX_BIST_EN_MSK | CFG_TX_BIST_EN_MSK);
- hisi_sas_phy_write32(hisi_hba, phy_id,
- SAS_PHY_BIST_CTRL, reg_val);
-
/* set the bist init value */
hisi_sas_phy_write32(hisi_hba, phy_id,
SAS_PHY_BIST_CODE,
@@ -3035,6 +3039,11 @@ static int debugfs_set_bist_v3_hw(struct hisi_hba *hisi_hba, bool enable)
SAS_PHY_BIST_CODE1,
SAS_PHY_BIST_CODE1_INIT);
+ mdelay(100);
+ reg_val |= (CFG_RX_BIST_EN_MSK | CFG_TX_BIST_EN_MSK);
+ hisi_sas_phy_write32(hisi_hba, phy_id,
+ SAS_PHY_BIST_CTRL, reg_val);
+
/* clear error bit */
mdelay(100);
hisi_sas_phy_read32(hisi_hba, phy_id, SAS_BIST_ERR_CNT);
@@ -3259,6 +3268,7 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err_out_register_ha:
scsi_remove_host(shost);
err_out_ha:
+ hisi_sas_debugfs_exit(hisi_hba);
scsi_host_put(shost);
err_out_regions:
pci_release_regions(pdev);
@@ -3292,8 +3302,6 @@ static void hisi_sas_v3_remove(struct pci_dev *pdev)
struct hisi_hba *hisi_hba = sha->lldd_ha;
struct Scsi_Host *shost = sha->core.shost;
- hisi_sas_debugfs_exit(hisi_hba);
-
if (timer_pending(&hisi_hba->timer))
del_timer(&hisi_hba->timer);
@@ -3305,6 +3313,7 @@ static void hisi_sas_v3_remove(struct pci_dev *pdev)
pci_release_regions(pdev);
pci_disable_device(pdev);
hisi_sas_free(hisi_hba);
+ hisi_sas_debugfs_exit(hisi_hba);
scsi_host_put(shost);
}
@@ -3422,6 +3431,7 @@ static int hisi_sas_v3_resume(struct pci_dev *pdev)
if (rc) {
scsi_remove_host(shost);
pci_disable_device(pdev);
+ return rc;
}
hisi_hba->hw->phys_init(hisi_hba);
sas_resume_ha(sha);
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index 55522b7162d3..1d669e47b692 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -38,6 +38,7 @@
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_transport.h>
+#include <scsi/scsi_cmnd.h>
#include "scsi_priv.h"
#include "scsi_logging.h"
@@ -554,13 +555,29 @@ struct Scsi_Host *scsi_host_get(struct Scsi_Host *shost)
}
EXPORT_SYMBOL(scsi_host_get);
+static bool scsi_host_check_in_flight(struct request *rq, void *data,
+ bool reserved)
+{
+ int *count = data;
+ struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
+
+ if (test_bit(SCMD_STATE_INFLIGHT, &cmd->state))
+ (*count)++;
+
+ return true;
+}
+
/**
* scsi_host_busy - Return the host busy counter
* @shost: Pointer to Scsi_Host to inc.
**/
int scsi_host_busy(struct Scsi_Host *shost)
{
- return atomic_read(&shost->host_busy);
+ int cnt = 0;
+
+ blk_mq_tagset_busy_iter(&shost->tag_set,
+ scsi_host_check_in_flight, &cnt);
+ return cnt;
}
EXPORT_SYMBOL(scsi_host_busy);
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c
index e8bc8d328bab..f25672982c5f 100644
--- a/drivers/scsi/ips.c
+++ b/drivers/scsi/ips.c
@@ -498,7 +498,7 @@ ips_setup(char *ips_str)
int i;
char *key;
char *value;
- IPS_OPTION options[] = {
+ static const IPS_OPTION options[] = {
{"noi2o", &ips_force_i2o, 0},
{"nommap", &ips_force_memio, 0},
{"ioctlsize", &ips_ioctlsize, IPS_IOCTL_SIZE},
diff --git a/drivers/scsi/isci/port_config.c b/drivers/scsi/isci/port_config.c
index 9e8de1462593..b1c197505579 100644
--- a/drivers/scsi/isci/port_config.c
+++ b/drivers/scsi/isci/port_config.c
@@ -147,7 +147,7 @@ static struct isci_port *sci_port_configuration_agent_find_port(
/**
*
* @controller: This is the controller object that contains the port agent
- * @port_agent: This is the port configruation agent for the controller.
+ * @port_agent: This is the port configuration agent for the controller.
*
* This routine will validate the port configuration is correct for the SCU
* hardware. The SCU hardware allows for port configurations as follows. LP0
diff --git a/drivers/scsi/isci/remote_device.c b/drivers/scsi/isci/remote_device.c
index 49aa4e657c44..cd1e4b4d95bb 100644
--- a/drivers/scsi/isci/remote_device.c
+++ b/drivers/scsi/isci/remote_device.c
@@ -1504,7 +1504,7 @@ static enum sci_status isci_remote_device_construct(struct isci_port *iport,
* This function builds the isci_remote_device when a libsas dev_found message
* is received.
* @isci_host: This parameter specifies the isci host object.
- * @port: This parameter specifies the isci_port conected to this device.
+ * @port: This parameter specifies the isci_port connected to this device.
*
* pointer to new isci_remote_device.
*/
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 7bedbe877704..0bc63a7ab41c 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -369,8 +369,16 @@ static int iscsi_sw_tcp_pdu_xmit(struct iscsi_task *task)
{
struct iscsi_conn *conn = task->conn;
unsigned int noreclaim_flag;
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+ struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
int rc = 0;
+ if (!tcp_sw_conn->sock) {
+ iscsi_conn_printk(KERN_ERR, conn,
+ "Transport not bound to socket!\n");
+ return -EINVAL;
+ }
+
noreclaim_flag = memalloc_noreclaim_save();
while (iscsi_sw_tcp_xmit_qlen(conn)) {
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 691acbdcc46d..935f98804198 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -605,6 +605,12 @@ struct lpfc_epd_pool {
spinlock_t lock; /* lock for expedite pool */
};
+enum ras_state {
+ INACTIVE,
+ REG_INPROGRESS,
+ ACTIVE
+};
+
struct lpfc_ras_fwlog {
uint8_t *fwlog_buff;
uint32_t fw_buffcount; /* Buffer size posted to FW */
@@ -621,7 +627,7 @@ struct lpfc_ras_fwlog {
bool ras_enabled; /* Ras Enabled for the function */
#define LPFC_RAS_DISABLE_LOGGING 0x00
#define LPFC_RAS_ENABLE_LOGGING 0x01
- bool ras_active; /* RAS logging running state */
+ enum ras_state state; /* RAS logging running state */
};
struct lpfc_hba {
@@ -725,6 +731,7 @@ struct lpfc_hba {
#define HBA_FCOE_MODE 0x4 /* HBA function in FCoE Mode */
#define HBA_SP_QUEUE_EVT 0x8 /* Slow-path qevt posted to worker thread*/
#define HBA_POST_RECEIVE_BUFFER 0x10 /* Rcv buffers need to be posted */
+#define HBA_PERSISTENT_TOPO 0x20 /* Persistent topology support in hba */
#define ELS_XRI_ABORT_EVENT 0x40
#define ASYNC_EVENT 0x80
#define LINK_DISABLED 0x100 /* Link disabled by user */
@@ -830,6 +837,7 @@ struct lpfc_hba {
uint32_t cfg_fcp_mq_threshold;
uint32_t cfg_hdw_queue;
uint32_t cfg_irq_chann;
+ uint32_t cfg_irq_numa;
uint32_t cfg_suppress_rsp;
uint32_t cfg_nvme_oas;
uint32_t cfg_nvme_embed_cmd;
@@ -872,7 +880,6 @@ struct lpfc_hba {
uint32_t cfg_aer_support;
uint32_t cfg_sriov_nr_virtfn;
uint32_t cfg_request_firmware_upgrade;
- uint32_t cfg_iocb_cnt;
uint32_t cfg_suppress_link_up;
uint32_t cfg_rrq_xri_bitmap_sz;
uint32_t cfg_delay_discovery;
@@ -990,7 +997,6 @@ struct lpfc_hba {
struct dma_pool *lpfc_drb_pool; /* data receive buffer pool */
struct dma_pool *lpfc_nvmet_drb_pool; /* data receive buffer pool */
struct dma_pool *lpfc_hbq_pool; /* SLI3 hbq buffer pool */
- struct dma_pool *txrdy_payload_pool;
struct dma_pool *lpfc_cmd_rsp_buf_pool;
struct lpfc_dma_pool lpfc_mbuf_safety_pool;
@@ -1055,6 +1061,7 @@ struct lpfc_hba {
#ifdef LPFC_HDWQ_LOCK_STAT
struct dentry *debug_lockstat;
#endif
+ struct dentry *debug_ras_log;
atomic_t nvmeio_trc_cnt;
uint32_t nvmeio_trc_size;
uint32_t nvmeio_trc_output_idx;
@@ -1209,6 +1216,13 @@ struct lpfc_hba {
uint64_t ktime_seg10_min;
uint64_t ktime_seg10_max;
#endif
+
+ struct hlist_node cpuhp; /* used for cpuhp per hba callback */
+ struct timer_list cpuhp_poll_timer;
+ struct list_head poll_list; /* slowpath eq polling list */
+#define LPFC_POLL_HB 1 /* slowpath heartbeat */
+#define LPFC_POLL_FASTPATH 0 /* called from fastpath */
+#define LPFC_POLL_SLOWPATH 1 /* called from slowpath */
};
static inline struct Scsi_Host *
@@ -1299,6 +1313,26 @@ lpfc_phba_elsring(struct lpfc_hba *phba)
}
/**
+ * lpfc_next_online_numa_cpu - Finds next online CPU on NUMA node
+ * @numa_mask: Pointer to phba's numa_mask member.
+ * @start: starting cpu index
+ *
+ * Note: If no valid cpu found, then nr_cpu_ids is returned.
+ *
+ **/
+static inline unsigned int
+lpfc_next_online_numa_cpu(const struct cpumask *numa_mask, unsigned int start)
+{
+ unsigned int cpu_it;
+
+ for_each_cpu_wrap(cpu_it, numa_mask, start) {
+ if (cpu_online(cpu_it))
+ break;
+ }
+
+ return cpu_it;
+}
+/**
* lpfc_sli4_mod_hba_eq_delay - update EQ delay
* @phba: Pointer to HBA context object.
* @q: The Event Queue to update.
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 25aa7a53d255..4ff82b36a37a 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -176,7 +176,6 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
int i;
int len = 0;
char tmp[LPFC_MAX_NVME_INFO_TMP_LEN] = {0};
- unsigned long iflags = 0;
if (!(vport->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) {
len = scnprintf(buf, PAGE_SIZE, "NVME Disabled\n");
@@ -347,7 +346,6 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
if (strlcat(buf, "\nNVME Initiator Enabled\n", PAGE_SIZE) >= PAGE_SIZE)
goto buffer_done;
- rcu_read_lock();
scnprintf(tmp, sizeof(tmp),
"XRI Dist lpfc%d Total %d IO %d ELS %d\n",
phba->brd_no,
@@ -355,7 +353,7 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
phba->sli4_hba.io_xri_max,
lpfc_sli4_get_els_iocb_cnt(phba));
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
- goto rcu_unlock_buf_done;
+ goto buffer_done;
/* Port state is only one of two values for now. */
if (localport->port_id)
@@ -371,15 +369,17 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
wwn_to_u64(vport->fc_nodename.u.wwn),
localport->port_id, statep);
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
- goto rcu_unlock_buf_done;
+ goto buffer_done;
+
+ spin_lock_irq(shost->host_lock);
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
nrport = NULL;
- spin_lock_irqsave(&vport->phba->hbalock, iflags);
+ spin_lock(&vport->phba->hbalock);
rport = lpfc_ndlp_get_nrport(ndlp);
if (rport)
nrport = rport->remoteport;
- spin_unlock_irqrestore(&vport->phba->hbalock, iflags);
+ spin_unlock(&vport->phba->hbalock);
if (!nrport)
continue;
@@ -398,39 +398,39 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
/* Tab in to show lport ownership. */
if (strlcat(buf, "NVME RPORT ", PAGE_SIZE) >= PAGE_SIZE)
- goto rcu_unlock_buf_done;
+ goto unlock_buf_done;
if (phba->brd_no >= 10) {
if (strlcat(buf, " ", PAGE_SIZE) >= PAGE_SIZE)
- goto rcu_unlock_buf_done;
+ goto unlock_buf_done;
}
scnprintf(tmp, sizeof(tmp), "WWPN x%llx ",
nrport->port_name);
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
- goto rcu_unlock_buf_done;
+ goto unlock_buf_done;
scnprintf(tmp, sizeof(tmp), "WWNN x%llx ",
nrport->node_name);
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
- goto rcu_unlock_buf_done;
+ goto unlock_buf_done;
scnprintf(tmp, sizeof(tmp), "DID x%06x ",
nrport->port_id);
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
- goto rcu_unlock_buf_done;
+ goto unlock_buf_done;
/* An NVME rport can have multiple roles. */
if (nrport->port_role & FC_PORT_ROLE_NVME_INITIATOR) {
if (strlcat(buf, "INITIATOR ", PAGE_SIZE) >= PAGE_SIZE)
- goto rcu_unlock_buf_done;
+ goto unlock_buf_done;
}
if (nrport->port_role & FC_PORT_ROLE_NVME_TARGET) {
if (strlcat(buf, "TARGET ", PAGE_SIZE) >= PAGE_SIZE)
- goto rcu_unlock_buf_done;
+ goto unlock_buf_done;
}
if (nrport->port_role & FC_PORT_ROLE_NVME_DISCOVERY) {
if (strlcat(buf, "DISCSRVC ", PAGE_SIZE) >= PAGE_SIZE)
- goto rcu_unlock_buf_done;
+ goto unlock_buf_done;
}
if (nrport->port_role & ~(FC_PORT_ROLE_NVME_INITIATOR |
FC_PORT_ROLE_NVME_TARGET |
@@ -438,14 +438,14 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
scnprintf(tmp, sizeof(tmp), "UNKNOWN ROLE x%x",
nrport->port_role);
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
- goto rcu_unlock_buf_done;
+ goto unlock_buf_done;
}
scnprintf(tmp, sizeof(tmp), "%s\n", statep);
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
- goto rcu_unlock_buf_done;
+ goto unlock_buf_done;
}
- rcu_read_unlock();
+ spin_unlock_irq(shost->host_lock);
if (!lport)
goto buffer_done;
@@ -505,11 +505,11 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
atomic_read(&lport->cmpl_fcp_err));
strlcat(buf, tmp, PAGE_SIZE);
- /* RCU is already unlocked. */
+ /* host_lock is already unlocked. */
goto buffer_done;
- rcu_unlock_buf_done:
- rcu_read_unlock();
+ unlock_buf_done:
+ spin_unlock_irq(shost->host_lock);
buffer_done:
len = strnlen(buf, PAGE_SIZE);
@@ -1475,8 +1475,9 @@ lpfc_sli4_pdev_status_reg_wait(struct lpfc_hba *phba)
int i;
msleep(100);
- lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
- &portstat_reg.word0);
+ if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
+ &portstat_reg.word0))
+ return -EIO;
/* verify if privileged for the request operation */
if (!bf_get(lpfc_sliport_status_rn, &portstat_reg) &&
@@ -1486,8 +1487,9 @@ lpfc_sli4_pdev_status_reg_wait(struct lpfc_hba *phba)
/* wait for the SLI port firmware ready after firmware reset */
for (i = 0; i < LPFC_FW_RESET_MAXIMUM_WAIT_10MS_CNT; i++) {
msleep(10);
- lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
- &portstat_reg.word0);
+ if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
+ &portstat_reg.word0))
+ continue;
if (!bf_get(lpfc_sliport_status_err, &portstat_reg))
continue;
if (!bf_get(lpfc_sliport_status_rn, &portstat_reg))
@@ -1642,7 +1644,7 @@ lpfc_set_trunking(struct lpfc_hba *phba, char *buff_out)
{
LPFC_MBOXQ_t *mbox = NULL;
unsigned long val = 0;
- char *pval = 0;
+ char *pval = NULL;
int rc = 0;
if (!strncmp("enable", buff_out,
@@ -3533,6 +3535,31 @@ LPFC_ATTR_R(enable_rrq, 2, 0, 2,
LPFC_ATTR_R(suppress_link_up, LPFC_INITIALIZE_LINK, LPFC_INITIALIZE_LINK,
LPFC_DELAY_INIT_LINK_INDEFINITELY,
"Suppress Link Up at initialization");
+
+static ssize_t
+lpfc_pls_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n",
+ phba->sli4_hba.pc_sli4_params.pls);
+}
+static DEVICE_ATTR(pls, 0444,
+ lpfc_pls_show, NULL);
+
+static ssize_t
+lpfc_pt_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n",
+ (phba->hba_flag & HBA_PERSISTENT_TOPO) ? 1 : 0);
+}
+static DEVICE_ATTR(pt, 0444,
+ lpfc_pt_show, NULL);
+
/*
# lpfc_cnt: Number of IOCBs allocated for ELS, CT, and ABTS
# 1 - (1024)
@@ -3580,9 +3607,6 @@ lpfc_txcmplq_hw_show(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR(txcmplq_hw, S_IRUGO,
lpfc_txcmplq_hw_show, NULL);
-LPFC_ATTR_R(iocb_cnt, 2, 1, 5,
- "Number of IOCBs alloc for ELS, CT, and ABTS: 1k to 5k IOCBs");
-
/*
# lpfc_nodev_tmo: If set, it will hold all I/O errors on devices that disappear
# until the timer expires. Value range is [0,255]. Default value is 30.
@@ -4096,7 +4120,16 @@ lpfc_topology_store(struct device *dev, struct device_attribute *attr,
val);
return -EINVAL;
}
- if ((phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC ||
+ /*
+ * The 'topology' is not a configurable parameter if :
+ * - persistent topology enabled
+ * - G7 adapters
+ * - G6 with no private loop support
+ */
+
+ if (((phba->hba_flag & HBA_PERSISTENT_TOPO) ||
+ (!phba->sli4_hba.pc_sli4_params.pls &&
+ phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC) ||
phba->pcidev->device == PCI_DEVICE_ID_LANCER_G7_FC) &&
val == 4) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
@@ -5298,7 +5331,7 @@ lpfc_fcp_cpu_map_show(struct device *dev, struct device_attribute *attr,
len += scnprintf(buf + len, PAGE_SIZE - len,
"CPU %02d not present\n",
phba->sli4_hba.curr_disp_cpu);
- else if (cpup->irq == LPFC_VECTOR_MAP_EMPTY) {
+ else if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) {
if (cpup->hdwq == LPFC_VECTOR_MAP_EMPTY)
len += scnprintf(
buf + len, PAGE_SIZE - len,
@@ -5311,10 +5344,10 @@ lpfc_fcp_cpu_map_show(struct device *dev, struct device_attribute *attr,
else
len += scnprintf(
buf + len, PAGE_SIZE - len,
- "CPU %02d EQ %04d hdwq %04d "
+ "CPU %02d EQ None hdwq %04d "
"physid %d coreid %d ht %d ua %d\n",
phba->sli4_hba.curr_disp_cpu,
- cpup->eq, cpup->hdwq, cpup->phys_id,
+ cpup->hdwq, cpup->phys_id,
cpup->core_id,
(cpup->flag & LPFC_CPU_MAP_HYPER),
(cpup->flag & LPFC_CPU_MAP_UNASSIGN));
@@ -5329,7 +5362,7 @@ lpfc_fcp_cpu_map_show(struct device *dev, struct device_attribute *attr,
cpup->core_id,
(cpup->flag & LPFC_CPU_MAP_HYPER),
(cpup->flag & LPFC_CPU_MAP_UNASSIGN),
- cpup->irq);
+ lpfc_get_irq(cpup->eq));
else
len += scnprintf(
buf + len, PAGE_SIZE - len,
@@ -5340,7 +5373,7 @@ lpfc_fcp_cpu_map_show(struct device *dev, struct device_attribute *attr,
cpup->core_id,
(cpup->flag & LPFC_CPU_MAP_HYPER),
(cpup->flag & LPFC_CPU_MAP_UNASSIGN),
- cpup->irq);
+ lpfc_get_irq(cpup->eq));
}
phba->sli4_hba.curr_disp_cpu++;
@@ -5711,7 +5744,7 @@ LPFC_ATTR_RW(nvme_embed_cmd, 1, 0, 2,
* the driver will advertise it supports to the SCSI layer.
*
* 0 = Set nr_hw_queues by the number of CPUs or HW queues.
- * 1,128 = Manually specify the maximum nr_hw_queue value to be set,
+ * 1,256 = Manually specify nr_hw_queue value to be advertised,
*
* Value range is [0,256]. Default value is 8.
*/
@@ -5729,30 +5762,130 @@ LPFC_ATTR_R(fcp_mq_threshold, LPFC_FCP_MQ_THRESHOLD_DEF,
* A hardware IO queue maps (qidx) to a specific driver CQ/WQ.
*
* 0 = Configure the number of hdw queues to the number of active CPUs.
- * 1,128 = Manually specify how many hdw queues to use.
+ * 1,256 = Manually specify how many hdw queues to use.
*
- * Value range is [0,128]. Default value is 0.
+ * Value range is [0,256]. Default value is 0.
*/
LPFC_ATTR_R(hdw_queue,
LPFC_HBA_HDWQ_DEF,
LPFC_HBA_HDWQ_MIN, LPFC_HBA_HDWQ_MAX,
"Set the number of I/O Hardware Queues");
+static inline void
+lpfc_assign_default_irq_numa(struct lpfc_hba *phba)
+{
+#if IS_ENABLED(CONFIG_X86)
+ /* If AMD architecture, then default is LPFC_IRQ_CHANN_NUMA */
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
+ phba->cfg_irq_numa = 1;
+ else
+ phba->cfg_irq_numa = 0;
+#else
+ phba->cfg_irq_numa = 0;
+#endif
+}
+
/*
* lpfc_irq_chann: Set the number of IRQ vectors that are available
* for Hardware Queues to utilize. This also will map to the number
* of EQ / MSI-X vectors the driver will create. This should never be
* more than the number of Hardware Queues
*
- * 0 = Configure number of IRQ Channels to the number of active CPUs.
- * 1,128 = Manually specify how many IRQ Channels to use.
+ * 0 = Configure number of IRQ Channels to:
+ * if AMD architecture, number of CPUs on HBA's NUMA node
+ * otherwise, number of active CPUs.
+ * [1,256] = Manually specify how many IRQ Channels to use.
*
- * Value range is [0,128]. Default value is 0.
+ * Value range is [0,256]. Default value is [0].
*/
-LPFC_ATTR_R(irq_chann,
- LPFC_HBA_HDWQ_DEF,
- LPFC_HBA_HDWQ_MIN, LPFC_HBA_HDWQ_MAX,
- "Set the number of I/O IRQ Channels");
+static uint lpfc_irq_chann = LPFC_IRQ_CHANN_DEF;
+module_param(lpfc_irq_chann, uint, 0444);
+MODULE_PARM_DESC(lpfc_irq_chann, "Set number of interrupt vectors to allocate");
+
+/* lpfc_irq_chann_init - Set the hba irq_chann initial value
+ * @phba: lpfc_hba pointer.
+ * @val: contains the initial value
+ *
+ * Description:
+ * Validates the initial value is within range and assigns it to the
+ * adapter. If not in range, an error message is posted and the
+ * default value is assigned.
+ *
+ * Returns:
+ * zero if value is in range and is set
+ * -EINVAL if value was out of range
+ **/
+static int
+lpfc_irq_chann_init(struct lpfc_hba *phba, uint32_t val)
+{
+ const struct cpumask *numa_mask;
+
+ if (phba->cfg_use_msi != 2) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "8532 use_msi = %u ignoring cfg_irq_numa\n",
+ phba->cfg_use_msi);
+ phba->cfg_irq_numa = 0;
+ phba->cfg_irq_chann = LPFC_IRQ_CHANN_MIN;
+ return 0;
+ }
+
+ /* Check if default setting was passed */
+ if (val == LPFC_IRQ_CHANN_DEF)
+ lpfc_assign_default_irq_numa(phba);
+
+ if (phba->cfg_irq_numa) {
+ numa_mask = &phba->sli4_hba.numa_mask;
+
+ if (cpumask_empty(numa_mask)) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "8533 Could not identify NUMA node, "
+ "ignoring cfg_irq_numa\n");
+ phba->cfg_irq_numa = 0;
+ phba->cfg_irq_chann = LPFC_IRQ_CHANN_MIN;
+ } else {
+ phba->cfg_irq_chann = cpumask_weight(numa_mask);
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "8543 lpfc_irq_chann set to %u "
+ "(numa)\n", phba->cfg_irq_chann);
+ }
+ } else {
+ if (val > LPFC_IRQ_CHANN_MAX) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "8545 lpfc_irq_chann attribute cannot "
+ "be set to %u, allowed range is "
+ "[%u,%u]\n",
+ val,
+ LPFC_IRQ_CHANN_MIN,
+ LPFC_IRQ_CHANN_MAX);
+ phba->cfg_irq_chann = LPFC_IRQ_CHANN_MIN;
+ return -EINVAL;
+ }
+ phba->cfg_irq_chann = val;
+ }
+
+ return 0;
+}
+
+/**
+ * lpfc_irq_chann_show - Display value of irq_chann
+ * @dev: class converted to a Scsi_host structure.
+ * @attr: device attribute, not used.
+ * @buf: on return contains a string with the list sizes
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_irq_chann_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n", phba->cfg_irq_chann);
+}
+
+static DEVICE_ATTR_RO(lpfc_irq_chann);
/*
# lpfc_enable_hba_reset: Allow or prevent HBA resets to the hardware.
@@ -5933,7 +6066,53 @@ LPFC_ATTR_RW(enable_mds_diags, 0, 0, 1, "Enable MDS Diagnostics");
* [1-4] = Multiple of 1/4th Mb of host memory for FW logging
* Value range [0..4]. Default value is 0
*/
-LPFC_ATTR_RW(ras_fwlog_buffsize, 0, 0, 4, "Host memory for FW logging");
+LPFC_ATTR(ras_fwlog_buffsize, 0, 0, 4, "Host memory for FW logging");
+lpfc_param_show(ras_fwlog_buffsize);
+
+static ssize_t
+lpfc_ras_fwlog_buffsize_set(struct lpfc_hba *phba, uint val)
+{
+ int ret = 0;
+ enum ras_state state;
+
+ if (!lpfc_rangecheck(val, 0, 4))
+ return -EINVAL;
+
+ if (phba->cfg_ras_fwlog_buffsize == val)
+ return 0;
+
+ if (phba->cfg_ras_fwlog_func != PCI_FUNC(phba->pcidev->devfn))
+ return -EINVAL;
+
+ spin_lock_irq(&phba->hbalock);
+ state = phba->ras_fwlog.state;
+ spin_unlock_irq(&phba->hbalock);
+
+ if (state == REG_INPROGRESS) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "6147 RAS Logging "
+ "registration is in progress\n");
+ return -EBUSY;
+ }
+
+ /* For disable logging: stop the logs and free the DMA.
+ * For ras_fwlog_buffsize size change we still need to free and
+ * reallocate the DMA in lpfc_sli4_ras_fwlog_init.
+ */
+ phba->cfg_ras_fwlog_buffsize = val;
+ if (state == ACTIVE) {
+ lpfc_ras_stop_fwlog(phba);
+ lpfc_sli4_ras_dma_free(phba);
+ }
+
+ lpfc_sli4_ras_init(phba);
+ if (phba->ras_fwlog.ras_enabled)
+ ret = lpfc_sli4_ras_fwlog_init(phba, phba->cfg_ras_fwlog_level,
+ LPFC_RAS_ENABLE_LOGGING);
+ return ret;
+}
+
+lpfc_param_store(ras_fwlog_buffsize);
+static DEVICE_ATTR_RW(lpfc_ras_fwlog_buffsize);
/*
* lpfc_ras_fwlog_level: Firmware logging verbosity level
@@ -6071,8 +6250,9 @@ struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_lpfc_sriov_nr_virtfn,
&dev_attr_lpfc_req_fw_upgrade,
&dev_attr_lpfc_suppress_link_up,
- &dev_attr_lpfc_iocb_cnt,
&dev_attr_iocb_hw,
+ &dev_attr_pls,
+ &dev_attr_pt,
&dev_attr_txq_hw,
&dev_attr_txcmplq_hw,
&dev_attr_lpfc_fips_level,
@@ -7085,11 +7265,22 @@ struct fc_function_template lpfc_vport_transport_functions = {
static void
lpfc_get_hba_function_mode(struct lpfc_hba *phba)
{
- /* If it's a SkyHawk FCoE adapter */
- if (phba->pcidev->device == PCI_DEVICE_ID_SKYHAWK)
+ /* If the adapter supports FCoE mode */
+ switch (phba->pcidev->device) {
+ case PCI_DEVICE_ID_SKYHAWK:
+ case PCI_DEVICE_ID_SKYHAWK_VF:
+ case PCI_DEVICE_ID_LANCER_FCOE:
+ case PCI_DEVICE_ID_LANCER_FCOE_VF:
+ case PCI_DEVICE_ID_ZEPHYR_DCSP:
+ case PCI_DEVICE_ID_HORNET:
+ case PCI_DEVICE_ID_TIGERSHARK:
+ case PCI_DEVICE_ID_TOMCAT:
phba->hba_flag |= HBA_FCOE_MODE;
- else
+ break;
+ default:
+ /* for others, clear the flag */
phba->hba_flag &= ~HBA_FCOE_MODE;
+ }
}
/**
@@ -7099,6 +7290,7 @@ lpfc_get_hba_function_mode(struct lpfc_hba *phba)
void
lpfc_get_cfgparam(struct lpfc_hba *phba)
{
+ lpfc_hba_log_verbose_init(phba, lpfc_log_verbose);
lpfc_fcp_io_sched_init(phba, lpfc_fcp_io_sched);
lpfc_ns_query_init(phba, lpfc_ns_query);
lpfc_fcp2_no_tgt_reset_init(phba, lpfc_fcp2_no_tgt_reset);
@@ -7205,12 +7397,10 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
phba->cfg_soft_wwpn = 0L;
lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt);
lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth);
- lpfc_hba_log_verbose_init(phba, lpfc_log_verbose);
lpfc_aer_support_init(phba, lpfc_aer_support);
lpfc_sriov_nr_virtfn_init(phba, lpfc_sriov_nr_virtfn);
lpfc_request_firmware_upgrade_init(phba, lpfc_req_fw_upgrade);
lpfc_suppress_link_up_init(phba, lpfc_suppress_link_up);
- lpfc_iocb_cnt_init(phba, lpfc_iocb_cnt);
lpfc_delay_discovery_init(phba, lpfc_delay_discovery);
lpfc_sli_mode_init(phba, lpfc_sli_mode);
phba->cfg_enable_dss = 1;
@@ -7256,11 +7446,11 @@ lpfc_nvme_mod_param_dep(struct lpfc_hba *phba)
}
if (!phba->cfg_nvmet_mrq)
- phba->cfg_nvmet_mrq = phba->cfg_irq_chann;
+ phba->cfg_nvmet_mrq = phba->cfg_hdw_queue;
/* Adjust lpfc_nvmet_mrq to avoid running out of WQE slots */
- if (phba->cfg_nvmet_mrq > phba->cfg_irq_chann) {
- phba->cfg_nvmet_mrq = phba->cfg_irq_chann;
+ if (phba->cfg_nvmet_mrq > phba->cfg_hdw_queue) {
+ phba->cfg_nvmet_mrq = phba->cfg_hdw_queue;
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
"6018 Adjust lpfc_nvmet_mrq to %d\n",
phba->cfg_nvmet_mrq);
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index 39a736b887b1..d4e1b120cc9e 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -5435,10 +5435,12 @@ lpfc_bsg_get_ras_config(struct bsg_job *job)
bsg_reply->reply_data.vendor_reply.vendor_rsp;
/* Current logging state */
- if (ras_fwlog->ras_active == true)
+ spin_lock_irq(&phba->hbalock);
+ if (ras_fwlog->state == ACTIVE)
ras_reply->state = LPFC_RASLOG_STATE_RUNNING;
else
ras_reply->state = LPFC_RASLOG_STATE_STOPPED;
+ spin_unlock_irq(&phba->hbalock);
ras_reply->log_level = phba->ras_fwlog.fw_loglevel;
ras_reply->log_buff_sz = phba->cfg_ras_fwlog_buffsize;
@@ -5495,10 +5497,13 @@ lpfc_bsg_set_ras_config(struct bsg_job *job)
if (action == LPFC_RASACTION_STOP_LOGGING) {
/* Check if already disabled */
- if (ras_fwlog->ras_active == false) {
+ spin_lock_irq(&phba->hbalock);
+ if (ras_fwlog->state != ACTIVE) {
+ spin_unlock_irq(&phba->hbalock);
rc = -ESRCH;
goto ras_job_error;
}
+ spin_unlock_irq(&phba->hbalock);
/* Disable logging */
lpfc_ras_stop_fwlog(phba);
@@ -5509,8 +5514,10 @@ lpfc_bsg_set_ras_config(struct bsg_job *job)
* FW-logging with new log-level. Return status
* "Logging already Running" to caller.
**/
- if (ras_fwlog->ras_active)
+ spin_lock_irq(&phba->hbalock);
+ if (ras_fwlog->state != INACTIVE)
action_status = -EINPROGRESS;
+ spin_unlock_irq(&phba->hbalock);
/* Enable logging */
rc = lpfc_sli4_ras_fwlog_init(phba, log_level,
@@ -5626,10 +5633,13 @@ lpfc_bsg_get_ras_fwlog(struct bsg_job *job)
goto ras_job_error;
/* Logging to be stopped before reading */
- if (ras_fwlog->ras_active == true) {
+ spin_lock_irq(&phba->hbalock);
+ if (ras_fwlog->state == ACTIVE) {
+ spin_unlock_irq(&phba->hbalock);
rc = -EINPROGRESS;
goto ras_job_error;
}
+ spin_unlock_irq(&phba->hbalock);
if (job->request_len <
sizeof(struct fc_bsg_request) +
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index b2ad8c750486..ee353c84a097 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -215,6 +215,12 @@ irqreturn_t lpfc_sli_fp_intr_handler(int, void *);
irqreturn_t lpfc_sli4_intr_handler(int, void *);
irqreturn_t lpfc_sli4_hba_intr_handler(int, void *);
+void lpfc_sli4_cleanup_poll_list(struct lpfc_hba *phba);
+int lpfc_sli4_poll_eq(struct lpfc_queue *q, uint8_t path);
+void lpfc_sli4_poll_hbtimer(struct timer_list *t);
+void lpfc_sli4_start_polling(struct lpfc_queue *q);
+void lpfc_sli4_stop_polling(struct lpfc_queue *q);
+
void lpfc_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_sli4_swap_str(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_config_ring(struct lpfc_hba *, int, LPFC_MBOXQ_t *);
@@ -586,6 +592,7 @@ void lpfc_release_io_buf(struct lpfc_hba *phba, struct lpfc_io_buf *ncmd,
void lpfc_nvme_cmd_template(void);
void lpfc_nvmet_cmd_template(void);
void lpfc_nvme_cancel_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn);
+void lpfc_nvme_prep_abort_wqe(struct lpfc_iocbq *pwqeq, u16 xritag, u8 opt);
extern int lpfc_enable_nvmet_cnt;
extern unsigned long long lpfc_enable_nvmet[];
extern int lpfc_no_hba_reset_cnt;
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 25e86706e207..99c9bb249758 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -763,9 +763,11 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
cpu_to_be16(SLI_CT_RESPONSE_FS_ACC)) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0208 NameServer Rsp Data: x%x x%x "
- "sz x%x\n",
+ "x%x x%x sz x%x\n",
vport->fc_flag,
CTreq->un.gid.Fc4Type,
+ vport->num_disc_nodes,
+ vport->gidft_inp,
irsp->un.genreq64.bdl.bdeSize);
lpfc_ns_rsp(vport,
@@ -961,9 +963,13 @@ lpfc_cmpl_ct_cmd_gid_pt(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if (CTrsp->CommandResponse.bits.CmdRsp ==
cpu_to_be16(SLI_CT_RESPONSE_FS_ACC)) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
- "4105 NameServer Rsp Data: x%x x%x\n",
+ "4105 NameServer Rsp Data: x%x x%x "
+ "x%x x%x sz x%x\n",
vport->fc_flag,
- CTreq->un.gid.Fc4Type);
+ CTreq->un.gid.Fc4Type,
+ vport->num_disc_nodes,
+ vport->gidft_inp,
+ irsp->un.genreq64.bdl.bdeSize);
lpfc_ns_rsp(vport,
outp,
@@ -1025,6 +1031,11 @@ lpfc_cmpl_ct_cmd_gid_pt(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
}
vport->gidft_inp--;
}
+
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "6450 GID_PT cmpl inp %d disc %d\n",
+ vport->gidft_inp, vport->num_disc_nodes);
+
/* Link up / RSCN discovery */
if ((vport->num_disc_nodes == 0) &&
(vport->gidft_inp == 0)) {
@@ -1159,6 +1170,11 @@ out:
/* Link up / RSCN discovery */
if (vport->num_disc_nodes)
vport->num_disc_nodes--;
+
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "6451 GFF_ID cmpl inp %d disc %d\n",
+ vport->gidft_inp, vport->num_disc_nodes);
+
if (vport->num_disc_nodes == 0) {
/*
* The driver has cycled through all Nports in the RSCN payload.
@@ -1868,6 +1884,12 @@ lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
switch ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK)) {
case IOERR_SLI_ABORTED:
+ case IOERR_SLI_DOWN:
+ /* Driver aborted this IO. No retry as error
+ * is likely Offline->Online or some adapter
+ * error. Recovery will try again.
+ */
+ break;
case IOERR_ABORT_IN_PROGRESS:
case IOERR_SEQUENCE_TIMEOUT:
case IOERR_ILLEGAL_FRAME:
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 8d34be60d379..2e6a68d9ea4f 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -31,6 +31,7 @@
#include <linux/pci.h>
#include <linux/spinlock.h>
#include <linux/ctype.h>
+#include <linux/vmalloc.h>
#include <scsi/scsi.h>
#include <scsi/scsi_device.h>
@@ -2078,6 +2079,96 @@ lpfc_debugfs_lockstat_write(struct file *file, const char __user *buf,
}
#endif
+static int lpfc_debugfs_ras_log_data(struct lpfc_hba *phba,
+ char *buffer, int size)
+{
+ int copied = 0;
+ struct lpfc_dmabuf *dmabuf, *next;
+
+ spin_lock_irq(&phba->hbalock);
+ if (phba->ras_fwlog.state != ACTIVE) {
+ spin_unlock_irq(&phba->hbalock);
+ return -EINVAL;
+ }
+ spin_unlock_irq(&phba->hbalock);
+
+ list_for_each_entry_safe(dmabuf, next,
+ &phba->ras_fwlog.fwlog_buff_list, list) {
+ memcpy(buffer + copied, dmabuf->virt, LPFC_RAS_MAX_ENTRY_SIZE);
+ copied += LPFC_RAS_MAX_ENTRY_SIZE;
+ if (size > copied)
+ break;
+ }
+ return copied;
+}
+
+static int
+lpfc_debugfs_ras_log_release(struct inode *inode, struct file *file)
+{
+ struct lpfc_debug *debug = file->private_data;
+
+ vfree(debug->buffer);
+ kfree(debug);
+
+ return 0;
+}
+
+/**
+ * lpfc_debugfs_ras_log_open - Open the RAS log debugfs buffer
+ * @inode: The inode pointer that contains a vport pointer.
+ * @file: The file pointer to attach the log output.
+ *
+ * Description:
+ * This routine is the entry point for the debugfs open file operation. It gets
+ * the vport from the i_private field in @inode, allocates the necessary buffer
+ * for the log, fills the buffer from the in-memory log for this vport, and then
+ * returns a pointer to that log in the private_data field in @file.
+ *
+ * Returns:
+ * This function returns zero if successful. On error it will return a negative
+ * error value.
+ **/
+static int
+lpfc_debugfs_ras_log_open(struct inode *inode, struct file *file)
+{
+ struct lpfc_hba *phba = inode->i_private;
+ struct lpfc_debug *debug;
+ int size;
+ int rc = -ENOMEM;
+
+ spin_lock_irq(&phba->hbalock);
+ if (phba->ras_fwlog.state != ACTIVE) {
+ spin_unlock_irq(&phba->hbalock);
+ rc = -EINVAL;
+ goto out;
+ }
+ spin_unlock_irq(&phba->hbalock);
+ debug = kmalloc(sizeof(*debug), GFP_KERNEL);
+ if (!debug)
+ goto out;
+
+ size = LPFC_RAS_MIN_BUFF_POST_SIZE * phba->cfg_ras_fwlog_buffsize;
+ debug->buffer = vmalloc(size);
+ if (!debug->buffer)
+ goto free_debug;
+
+ debug->len = lpfc_debugfs_ras_log_data(phba, debug->buffer, size);
+ if (debug->len < 0) {
+ rc = -EINVAL;
+ goto free_buffer;
+ }
+ file->private_data = debug;
+
+ return 0;
+
+free_buffer:
+ vfree(debug->buffer);
+free_debug:
+ kfree(debug);
+out:
+ return rc;
+}
+
/**
* lpfc_debugfs_dumpHBASlim_open - Open the Dump HBA SLIM debugfs buffer
* @inode: The inode pointer that contains a vport pointer.
@@ -5286,6 +5377,16 @@ static const struct file_operations lpfc_debugfs_op_lockstat = {
};
#endif
+#undef lpfc_debugfs_ras_log
+static const struct file_operations lpfc_debugfs_ras_log = {
+ .owner = THIS_MODULE,
+ .open = lpfc_debugfs_ras_log_open,
+ .llseek = lpfc_debugfs_lseek,
+ .read = lpfc_debugfs_read,
+ .release = lpfc_debugfs_ras_log_release,
+};
+#endif
+
#undef lpfc_debugfs_op_dumpHBASlim
static const struct file_operations lpfc_debugfs_op_dumpHBASlim = {
.owner = THIS_MODULE,
@@ -5457,7 +5558,6 @@ static const struct file_operations lpfc_idiag_op_extAcc = {
.release = lpfc_idiag_cmd_release,
};
-#endif
/* lpfc_idiag_mbxacc_dump_bsg_mbox - idiag debugfs dump bsg mailbox command
* @phba: Pointer to HBA context object.
@@ -5707,6 +5807,19 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
goto debug_failed;
}
+ /* RAS log */
+ snprintf(name, sizeof(name), "ras_log");
+ phba->debug_ras_log =
+ debugfs_create_file(name, 0644,
+ phba->hba_debugfs_root,
+ phba, &lpfc_debugfs_ras_log);
+ if (!phba->debug_ras_log) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "6148 Cannot create debugfs"
+ " ras_log\n");
+ goto debug_failed;
+ }
+
/* Setup hbqinfo */
snprintf(name, sizeof(name), "hbqinfo");
phba->debug_hbqinfo =
@@ -6117,6 +6230,9 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport)
debugfs_remove(phba->debug_hbqinfo); /* hbqinfo */
phba->debug_hbqinfo = NULL;
+ debugfs_remove(phba->debug_ras_log);
+ phba->debug_ras_log = NULL;
+
#ifdef LPFC_HDWQ_LOCK_STAT
debugfs_remove(phba->debug_lockstat); /* lockstat */
phba->debug_lockstat = NULL;
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index d5303994bfd6..42a2bf38eaea 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -2236,6 +2236,7 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
IOCB_t *irsp;
struct lpfc_nodelist *ndlp;
+ char *mode;
/* we pass cmdiocb to state machine which needs rspiocb as well */
cmdiocb->context_un.rsp_iocb = rspiocb;
@@ -2273,8 +2274,17 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
goto out;
}
+ /* If we don't send GFT_ID to Fabric, a PRLI error
+ * could be expected.
+ */
+ if ((vport->fc_flag & FC_FABRIC) ||
+ (vport->cfg_enable_fc4_type != LPFC_ENABLE_BOTH))
+ mode = KERN_ERR;
+ else
+ mode = KERN_INFO;
+
/* PRLI failed */
- lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ lpfc_printf_vlog(vport, mode, LOG_ELS,
"2754 PRLI failure DID:%06X Status:x%x/x%x, "
"data: x%x\n",
ndlp->nlp_DID, irsp->ulpStatus,
@@ -4291,6 +4301,11 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
irsp = &rspiocb->iocb;
+ if (!vport) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
+ "3177 ELS response failed\n");
+ goto out;
+ }
if (cmdiocb->context_un.mbox)
mbox = cmdiocb->context_un.mbox;
@@ -4430,7 +4445,7 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
mempool_free(mbox, phba->mbox_mem_pool);
}
out:
- if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
+ if (ndlp && NLP_CHK_NODE_ACT(ndlp) && shost) {
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~(NLP_ACC_REGLOGIN | NLP_RM_DFLT_RPI);
spin_unlock_irq(shost->host_lock);
@@ -5260,6 +5275,11 @@ lpfc_els_disc_plogi(struct lpfc_vport *vport)
}
}
}
+
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "6452 Discover PLOGI %d flag x%x\n",
+ sentplogi, vport->fc_flag);
+
if (sentplogi) {
lpfc_set_disctmo(vport);
}
@@ -6455,7 +6475,7 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
uint32_t payload_len, length, nportid, *cmd;
int rscn_cnt;
int rscn_id = 0, hba_id = 0;
- int i;
+ int i, tmo;
pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
lp = (uint32_t *) pcmd->virt;
@@ -6561,6 +6581,13 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
spin_lock_irq(shost->host_lock);
vport->fc_flag |= FC_RSCN_DEFERRED;
+
+ /* Restart disctmo if its already running */
+ if (vport->fc_flag & FC_DISC_TMO) {
+ tmo = ((phba->fc_ratov * 3) + 3);
+ mod_timer(&vport->fc_disctmo,
+ jiffies + msecs_to_jiffies(1000 * tmo));
+ }
if ((rscn_cnt < FC_MAX_HOLD_RSCN) &&
!(vport->fc_flag & FC_RSCN_DISCOVERY)) {
vport->fc_flag |= FC_RSCN_MODE;
@@ -6663,9 +6690,10 @@ lpfc_els_handle_rscn(struct lpfc_vport *vport)
/* RSCN processed */
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
- "0215 RSCN processed Data: x%x x%x x%x x%x\n",
+ "0215 RSCN processed Data: x%x x%x x%x x%x x%x x%x\n",
vport->fc_flag, 0, vport->fc_rscn_id_cnt,
- vport->port_state);
+ vport->port_state, vport->num_disc_nodes,
+ vport->gidft_inp);
/* To process RSCN, first compare RSCN data with NameServer */
vport->fc_ns_retry = 0;
@@ -7986,20 +8014,22 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
struct lpfc_sli_ring *pring;
struct lpfc_iocbq *tmp_iocb, *piocb;
IOCB_t *cmd = NULL;
+ unsigned long iflags = 0;
lpfc_fabric_abort_vport(vport);
+
/*
* For SLI3, only the hbalock is required. But SLI4 needs to coordinate
* with the ring insert operation. Because lpfc_sli_issue_abort_iotag
* ultimately grabs the ring_lock, the driver must splice the list into
* a working list and release the locks before calling the abort.
*/
- spin_lock_irq(&phba->hbalock);
+ spin_lock_irqsave(&phba->hbalock, iflags);
pring = lpfc_phba_elsring(phba);
/* Bail out if we've no ELS wq, like in PCI error recovery case. */
if (unlikely(!pring)) {
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
return;
}
@@ -8014,6 +8044,9 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
if (piocb->vport != vport)
continue;
+ if (piocb->iocb_flag & LPFC_DRIVER_ABORTED)
+ continue;
+
/* On the ELS ring we can have ELS_REQUESTs or
* GEN_REQUESTs waiting for a response.
*/
@@ -8037,21 +8070,21 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
if (phba->sli_rev == LPFC_SLI_REV4)
spin_unlock(&pring->ring_lock);
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
/* Abort each txcmpl iocb on aborted list and remove the dlist links. */
list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) {
- spin_lock_irq(&phba->hbalock);
+ spin_lock_irqsave(&phba->hbalock, iflags);
list_del_init(&piocb->dlist);
lpfc_sli_issue_abort_iotag(phba, pring, piocb);
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
}
if (!list_empty(&abort_list))
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
"3387 abort list for txq not empty\n");
INIT_LIST_HEAD(&abort_list);
- spin_lock_irq(&phba->hbalock);
+ spin_lock_irqsave(&phba->hbalock, iflags);
if (phba->sli_rev == LPFC_SLI_REV4)
spin_lock(&pring->ring_lock);
@@ -8091,7 +8124,7 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
if (phba->sli_rev == LPFC_SLI_REV4)
spin_unlock(&pring->ring_lock);
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
/* Cancel all the IOCBs from the completions list */
lpfc_sli_cancel_iocbs(phba, &abort_list,
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 749286acdc17..85ada3deb47d 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -700,7 +700,10 @@ lpfc_work_done(struct lpfc_hba *phba)
if (!(phba->hba_flag & HBA_SP_QUEUE_EVT))
set_bit(LPFC_DATA_READY, &phba->data_flags);
} else {
- if (phba->link_state >= LPFC_LINK_UP ||
+ /* Driver could have abort request completed in queue
+ * when link goes down. Allow for this transition.
+ */
+ if (phba->link_state >= LPFC_LINK_DOWN ||
phba->link_flag & LS_MDS_LOOPBACK) {
pring->flag &= ~LPFC_DEFERRED_RING_EVENT;
lpfc_sli_handle_slow_ring_event(phba, pring,
@@ -1135,7 +1138,6 @@ void
lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
struct lpfc_vport *vport = pmb->vport;
- uint8_t bbscn = 0;
if (pmb->u.mb.mbxStatus)
goto out;
@@ -1162,17 +1164,11 @@ lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
/* Start discovery by sending a FLOGI. port_state is identically
* LPFC_FLOGI while waiting for FLOGI cmpl
*/
- if (vport->port_state != LPFC_FLOGI) {
- if (phba->bbcredit_support && phba->cfg_enable_bbcr) {
- bbscn = bf_get(lpfc_bbscn_def,
- &phba->sli4_hba.bbscn_params);
- vport->fc_sparam.cmn.bbRcvSizeMsb &= 0xf;
- vport->fc_sparam.cmn.bbRcvSizeMsb |= (bbscn << 4);
- }
+ if (vport->port_state != LPFC_FLOGI)
lpfc_initial_flogi(vport);
- } else if (vport->fc_flag & FC_PT2PT) {
+ else if (vport->fc_flag & FC_PT2PT)
lpfc_disc_start(vport);
- }
+
return;
out:
@@ -3456,8 +3452,8 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
phba->pport->port_state, vport->fc_flag);
else if (attn_type == LPFC_ATT_UNEXP_WWPN)
lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
- "1313 Link Down UNEXP WWPN Event x%x received "
- "Data: x%x x%x x%x x%x x%x\n",
+ "1313 Link Down Unexpected FA WWPN Event x%x "
+ "received Data: x%x x%x x%x x%x x%x\n",
la->eventTag, phba->fc_eventTag,
phba->pport->port_state, vport->fc_flag,
bf_get(lpfc_mbx_read_top_mm, la),
@@ -4046,7 +4042,7 @@ out:
ndlp->nlp_flag |= NLP_RPI_REGISTERED;
ndlp->nlp_type |= NLP_FABRIC;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
- lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY,
"0003 rpi:%x DID:%x flg:%x %d map%x x%px\n",
ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
kref_read(&ndlp->kref),
@@ -4575,8 +4571,10 @@ lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
return ndlp;
free_rpi:
- if (phba->sli_rev == LPFC_SLI_REV4)
+ if (phba->sli_rev == LPFC_SLI_REV4) {
lpfc_sli4_free_rpi(vport->phba, rpi);
+ ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
+ }
return NULL;
}
@@ -4835,12 +4833,51 @@ lpfc_nlp_logo_unreg(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
if (ndlp->nlp_flag & NLP_RELEASE_RPI) {
lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi);
ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
+ ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
}
ndlp->nlp_flag &= ~NLP_UNREG_INP;
}
}
/*
+ * Sets the mailbox completion handler to be used for the
+ * unreg_rpi command. The handler varies based on the state of
+ * the port and what will be happening to the rpi next.
+ */
+static void
+lpfc_set_unreg_login_mbx_cmpl(struct lpfc_hba *phba, struct lpfc_vport *vport,
+ struct lpfc_nodelist *ndlp, LPFC_MBOXQ_t *mbox)
+{
+ unsigned long iflags;
+
+ if (ndlp->nlp_flag & NLP_ISSUE_LOGO) {
+ mbox->ctx_ndlp = ndlp;
+ mbox->mbox_cmpl = lpfc_nlp_logo_unreg;
+
+ } else if (phba->sli_rev == LPFC_SLI_REV4 &&
+ (!(vport->load_flag & FC_UNLOADING)) &&
+ (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
+ LPFC_SLI_INTF_IF_TYPE_2) &&
+ (kref_read(&ndlp->kref) > 0)) {
+ mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
+ mbox->mbox_cmpl = lpfc_sli4_unreg_rpi_cmpl_clr;
+ } else {
+ if (vport->load_flag & FC_UNLOADING) {
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ spin_lock_irqsave(&vport->phba->ndlp_lock,
+ iflags);
+ ndlp->nlp_flag |= NLP_RELEASE_RPI;
+ spin_unlock_irqrestore(&vport->phba->ndlp_lock,
+ iflags);
+ }
+ lpfc_nlp_get(ndlp);
+ }
+ mbox->ctx_ndlp = ndlp;
+ mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ }
+}
+
+/*
* Free rpi associated with LPFC_NODELIST entry.
* This routine is called from lpfc_freenode(), when we are removing
* a LPFC_NODELIST entry. It is also called if the driver initiates a
@@ -4860,7 +4897,8 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
if (ndlp->nlp_flag & NLP_RPI_REGISTERED ||
ndlp->nlp_flag & NLP_REG_LOGIN_SEND) {
if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND)
- lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
+ lpfc_printf_vlog(vport, KERN_INFO,
+ LOG_NODE | LOG_DISCOVERY,
"3366 RPI x%x needs to be "
"unregistered nlp_flag x%x "
"did x%x\n",
@@ -4871,7 +4909,8 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
* no need to queue up another one.
*/
if (ndlp->nlp_flag & NLP_UNREG_INP) {
- lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ lpfc_printf_vlog(vport, KERN_INFO,
+ LOG_NODE | LOG_DISCOVERY,
"1436 unreg_rpi SKIP UNREG x%x on "
"NPort x%x deferred x%x flg x%x "
"Data: x%px\n",
@@ -4890,39 +4929,19 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
lpfc_unreg_login(phba, vport->vpi, rpi, mbox);
mbox->vport = vport;
- if (ndlp->nlp_flag & NLP_ISSUE_LOGO) {
- mbox->ctx_ndlp = ndlp;
- mbox->mbox_cmpl = lpfc_nlp_logo_unreg;
- } else {
- if (phba->sli_rev == LPFC_SLI_REV4 &&
- (!(vport->load_flag & FC_UNLOADING)) &&
- (bf_get(lpfc_sli_intf_if_type,
- &phba->sli4_hba.sli_intf) >=
- LPFC_SLI_INTF_IF_TYPE_2) &&
- (kref_read(&ndlp->kref) > 0)) {
- mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
- mbox->mbox_cmpl =
- lpfc_sli4_unreg_rpi_cmpl_clr;
- /*
- * accept PLOGIs after unreg_rpi_cmpl
- */
- acc_plogi = 0;
- } else if (vport->load_flag & FC_UNLOADING) {
- mbox->ctx_ndlp = NULL;
- mbox->mbox_cmpl =
- lpfc_sli_def_mbox_cmpl;
- } else {
- mbox->ctx_ndlp = ndlp;
- mbox->mbox_cmpl =
- lpfc_sli_def_mbox_cmpl;
- }
- }
+ lpfc_set_unreg_login_mbx_cmpl(phba, vport, ndlp, mbox);
+ if (mbox->mbox_cmpl == lpfc_sli4_unreg_rpi_cmpl_clr)
+ /*
+ * accept PLOGIs after unreg_rpi_cmpl
+ */
+ acc_plogi = 0;
if (((ndlp->nlp_DID & Fabric_DID_MASK) !=
Fabric_DID_MASK) &&
(!(vport->fc_flag & FC_OFFLINE_MODE)))
ndlp->nlp_flag |= NLP_UNREG_INP;
- lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ lpfc_printf_vlog(vport, KERN_INFO,
+ LOG_NODE | LOG_DISCOVERY,
"1433 unreg_rpi UNREG x%x on "
"NPort x%x deferred flg x%x "
"Data:x%px\n",
@@ -5057,6 +5076,7 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
struct lpfc_hba *phba = vport->phba;
LPFC_MBOXQ_t *mb, *nextmb;
struct lpfc_dmabuf *mp;
+ unsigned long iflags;
/* Cleanup node for NPort <nlp_DID> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
@@ -5138,8 +5158,20 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
lpfc_cleanup_vports_rrqs(vport, ndlp);
if (phba->sli_rev == LPFC_SLI_REV4)
ndlp->nlp_flag |= NLP_RELEASE_RPI;
- lpfc_unreg_rpi(vport, ndlp);
-
+ if (!lpfc_unreg_rpi(vport, ndlp)) {
+ /* Clean up unregistered and non freed rpis */
+ if ((ndlp->nlp_flag & NLP_RELEASE_RPI) &&
+ !(ndlp->nlp_rpi == LPFC_RPI_ALLOC_ERROR)) {
+ lpfc_sli4_free_rpi(vport->phba,
+ ndlp->nlp_rpi);
+ spin_lock_irqsave(&vport->phba->ndlp_lock,
+ iflags);
+ ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
+ ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
+ spin_unlock_irqrestore(&vport->phba->ndlp_lock,
+ iflags);
+ }
+ }
return 0;
}
@@ -5165,8 +5197,10 @@ lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
/* For this case we need to cleanup the default rpi
* allocated by the firmware.
*/
- lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
- "0005 rpi:%x DID:%x flg:%x %d map:%x x%px\n",
+ lpfc_printf_vlog(vport, KERN_INFO,
+ LOG_NODE | LOG_DISCOVERY,
+ "0005 Cleanup Default rpi:x%x DID:x%x flg:x%x "
+ "ref %d map:x%x ndlp x%px\n",
ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
kref_read(&ndlp->kref),
ndlp->nlp_usg_map, ndlp);
@@ -5203,8 +5237,9 @@ lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
*/
lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
"0940 removed node x%px DID x%x "
- " rport not null x%px\n",
- ndlp, ndlp->nlp_DID, ndlp->rport);
+ "rpi %d rport not null x%px\n",
+ ndlp, ndlp->nlp_DID, ndlp->nlp_rpi,
+ ndlp->rport);
rport = ndlp->rport;
rdata = rport->dd_data;
rdata->pnode = NULL;
@@ -5362,6 +5397,13 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
if (!ndlp)
return NULL;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "6453 Setup New Node 2B_DISC x%x "
+ "Data:x%x x%x x%x\n",
+ ndlp->nlp_DID, ndlp->nlp_flag,
+ ndlp->nlp_state, vport->fc_flag);
+
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_NPR_2B_DISC;
spin_unlock_irq(shost->host_lock);
@@ -5375,6 +5417,12 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
"0014 Could not enable ndlp\n");
return NULL;
}
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "6454 Setup Enabled Node 2B_DISC x%x "
+ "Data:x%x x%x x%x\n",
+ ndlp->nlp_DID, ndlp->nlp_flag,
+ ndlp->nlp_state, vport->fc_flag);
+
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_NPR_2B_DISC;
spin_unlock_irq(shost->host_lock);
@@ -5394,6 +5442,12 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
*/
lpfc_cancel_retry_delay_tmo(vport, ndlp);
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "6455 Setup RSCN Node 2B_DISC x%x "
+ "Data:x%x x%x x%x\n",
+ ndlp->nlp_DID, ndlp->nlp_flag,
+ ndlp->nlp_state, vport->fc_flag);
+
/* NVME Target mode waits until rport is known to be
* impacted by the RSCN before it transitions. No
* active management - just go to NPR provided the
@@ -5405,15 +5459,32 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
/* If we've already received a PLOGI from this NPort
* we don't need to try to discover it again.
*/
- if (ndlp->nlp_flag & NLP_RCV_PLOGI)
+ if (ndlp->nlp_flag & NLP_RCV_PLOGI &&
+ !(ndlp->nlp_type &
+ (NLP_FCP_TARGET | NLP_NVME_TARGET)))
return NULL;
+ ndlp->nlp_prev_state = ndlp->nlp_state;
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_NPR_2B_DISC;
spin_unlock_irq(shost->host_lock);
- } else
+ } else {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "6456 Skip Setup RSCN Node x%x "
+ "Data:x%x x%x x%x\n",
+ ndlp->nlp_DID, ndlp->nlp_flag,
+ ndlp->nlp_state, vport->fc_flag);
ndlp = NULL;
+ }
} else {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "6457 Setup Active Node 2B_DISC x%x "
+ "Data:x%x x%x x%x\n",
+ ndlp->nlp_DID, ndlp->nlp_flag,
+ ndlp->nlp_state, vport->fc_flag);
+
/* If the initiator received a PLOGI from this NPort or if the
* initiator is already in the process of discovery on it,
* there's no need to try to discover it again.
@@ -5565,10 +5636,10 @@ lpfc_disc_start(struct lpfc_vport *vport)
/* Start Discovery state <hba_state> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
- "0202 Start Discovery hba state x%x "
- "Data: x%x x%x x%x\n",
+ "0202 Start Discovery port state x%x "
+ "flg x%x Data: x%x x%x x%x\n",
vport->port_state, vport->fc_flag, vport->fc_plogi_cnt,
- vport->fc_adisc_cnt);
+ vport->fc_adisc_cnt, vport->fc_npr_cnt);
/* First do ADISCs - if any */
num_sent = lpfc_els_disc_adisc(vport);
@@ -5996,7 +6067,7 @@ lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
ndlp->nlp_flag |= NLP_RPI_REGISTERED;
ndlp->nlp_type |= NLP_FABRIC;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
- lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY,
"0004 rpi:%x DID:%x flg:%x %d map:%x x%px\n",
ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
kref_read(&ndlp->kref),
@@ -6185,12 +6256,12 @@ lpfc_nlp_init(struct lpfc_vport *vport, uint32_t did)
INIT_LIST_HEAD(&ndlp->nlp_listp);
if (vport->phba->sli_rev == LPFC_SLI_REV4) {
ndlp->nlp_rpi = rpi;
- lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
- "0007 rpi:%x DID:%x flg:%x refcnt:%d "
- "map:%x x%px\n", ndlp->nlp_rpi, ndlp->nlp_DID,
- ndlp->nlp_flag,
- kref_read(&ndlp->kref),
- ndlp->nlp_usg_map, ndlp);
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY,
+ "0007 Init New ndlp x%px, rpi:x%x DID:%x "
+ "flg:x%x refcnt:%d map:x%x\n",
+ ndlp, ndlp->nlp_rpi, ndlp->nlp_DID,
+ ndlp->nlp_flag, kref_read(&ndlp->kref),
+ ndlp->nlp_usg_map);
ndlp->active_rrqs_xri_bitmap =
mempool_alloc(vport->phba->active_rrq_pool,
@@ -6419,7 +6490,8 @@ lpfc_fcf_inuse(struct lpfc_hba *phba)
goto out;
} else if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
ret = 1;
- lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+ lpfc_printf_log(phba, KERN_INFO,
+ LOG_NODE | LOG_DISCOVERY,
"2624 RPI %x DID %x flag %x "
"still logged in\n",
ndlp->nlp_rpi, ndlp->nlp_DID,
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index bd533475c86a..25cdcbc2b02f 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -210,7 +210,6 @@ struct lpfc_sli_intf {
#define LPFC_MAX_IMAX 5000000
#define LPFC_DEF_IMAX 0
-#define LPFC_IMAX_THRESHOLD 1000
#define LPFC_MAX_AUTO_EQ_DELAY 120
#define LPFC_EQ_DELAY_STEP 15
#define LPFC_EQD_ISR_TRIGGER 20000
@@ -2320,6 +2319,7 @@ struct lpfc_mbx_redisc_fcf_tbl {
#define ADD_STATUS_OPERATION_ALREADY_ACTIVE 0x67
#define ADD_STATUS_FW_NOT_SUPPORTED 0xEB
#define ADD_STATUS_INVALID_REQUEST 0x4B
+#define ADD_STATUS_FW_DOWNLOAD_HW_DISABLED 0x58
struct lpfc_mbx_sli4_config {
struct mbox_header header;
@@ -2809,6 +2809,15 @@ struct lpfc_mbx_read_config {
#define lpfc_mbx_rd_conf_trunk_SHIFT 12
#define lpfc_mbx_rd_conf_trunk_MASK 0x0000000F
#define lpfc_mbx_rd_conf_trunk_WORD word2
+#define lpfc_mbx_rd_conf_pt_SHIFT 20
+#define lpfc_mbx_rd_conf_pt_MASK 0x00000003
+#define lpfc_mbx_rd_conf_pt_WORD word2
+#define lpfc_mbx_rd_conf_tf_SHIFT 22
+#define lpfc_mbx_rd_conf_tf_MASK 0x00000001
+#define lpfc_mbx_rd_conf_tf_WORD word2
+#define lpfc_mbx_rd_conf_ptv_SHIFT 23
+#define lpfc_mbx_rd_conf_ptv_MASK 0x00000001
+#define lpfc_mbx_rd_conf_ptv_WORD word2
#define lpfc_mbx_rd_conf_topology_SHIFT 24
#define lpfc_mbx_rd_conf_topology_MASK 0x000000FF
#define lpfc_mbx_rd_conf_topology_WORD word2
@@ -3479,6 +3488,9 @@ struct lpfc_sli4_parameters {
#define cfg_bv1s_SHIFT 10
#define cfg_bv1s_MASK 0x00000001
#define cfg_bv1s_WORD word19
+#define cfg_pvl_SHIFT 13
+#define cfg_pvl_MASK 0x00000001
+#define cfg_pvl_WORD word19
#define cfg_nsler_SHIFT 12
#define cfg_nsler_MASK 0x00000001
@@ -3518,6 +3530,7 @@ struct lpfc_sli4_parameters {
#define LPFC_SET_UE_RECOVERY 0x10
#define LPFC_SET_MDS_DIAGS 0x11
+#define LPFC_SET_DUAL_DUMP 0x1e
struct lpfc_mbx_set_feature {
struct mbox_header header;
uint32_t feature;
@@ -3532,6 +3545,15 @@ struct lpfc_mbx_set_feature {
#define lpfc_mbx_set_feature_mds_deep_loopbk_SHIFT 1
#define lpfc_mbx_set_feature_mds_deep_loopbk_MASK 0x00000001
#define lpfc_mbx_set_feature_mds_deep_loopbk_WORD word6
+#define lpfc_mbx_set_feature_dd_SHIFT 0
+#define lpfc_mbx_set_feature_dd_MASK 0x00000001
+#define lpfc_mbx_set_feature_dd_WORD word6
+#define lpfc_mbx_set_feature_ddquery_SHIFT 1
+#define lpfc_mbx_set_feature_ddquery_MASK 0x00000001
+#define lpfc_mbx_set_feature_ddquery_WORD word6
+#define LPFC_DISABLE_DUAL_DUMP 0
+#define LPFC_ENABLE_DUAL_DUMP 1
+#define LPFC_QUERY_OP_DUAL_DUMP 2
uint32_t word7;
#define lpfc_mbx_set_feature_UERP_SHIFT 0
#define lpfc_mbx_set_feature_UERP_MASK 0x0000ffff
@@ -4261,6 +4283,8 @@ struct lpfc_acqe_sli {
#define LPFC_SLI_EVENT_TYPE_DIAG_DUMP 0x5
#define LPFC_SLI_EVENT_TYPE_MISCONFIGURED 0x9
#define LPFC_SLI_EVENT_TYPE_REMOTE_DPORT 0xA
+#define LPFC_SLI_EVENT_TYPE_MISCONF_FAWWN 0xF
+#define LPFC_SLI_EVENT_TYPE_EEPROM_FAILURE 0x10
};
/*
@@ -4659,6 +4683,7 @@ struct create_xri_wqe {
uint32_t rsvd_12_15[4]; /* word 12-15 */
};
+#define INHIBIT_ABORT 1
#define T_REQUEST_TAG 3
#define T_XRI_TAG 1
@@ -4807,8 +4832,8 @@ union lpfc_wqe128 {
struct send_frame_wqe send_frame;
};
-#define MAGIC_NUMER_G6 0xFEAA0003
-#define MAGIC_NUMER_G7 0xFEAA0005
+#define MAGIC_NUMBER_G6 0xFEAA0003
+#define MAGIC_NUMBER_G7 0xFEAA0005
struct lpfc_grp_hdr {
uint32_t size;
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index e8813d26e594..dc6f7c4b54c6 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -40,6 +40,8 @@
#include <linux/irq.h>
#include <linux/bitops.h>
#include <linux/crash_dump.h>
+#include <linux/cpu.h>
+#include <linux/cpuhotplug.h>
#include <scsi/scsi.h>
#include <scsi/scsi_device.h>
@@ -66,9 +68,13 @@
#include "lpfc_version.h"
#include "lpfc_ids.h"
+static enum cpuhp_state lpfc_cpuhp_state;
/* Used when mapping IRQ vectors in a driver centric manner */
static uint32_t lpfc_present_cpu;
+static void __lpfc_cpuhp_remove(struct lpfc_hba *phba);
+static void lpfc_cpuhp_remove(struct lpfc_hba *phba);
+static void lpfc_cpuhp_add(struct lpfc_hba *phba);
static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
static int lpfc_post_rcv_buf(struct lpfc_hba *);
static int lpfc_sli4_queue_verify(struct lpfc_hba *);
@@ -1235,10 +1241,9 @@ lpfc_hb_eq_delay_work(struct work_struct *work)
struct lpfc_hba, eq_delay_work);
struct lpfc_eq_intr_info *eqi, *eqi_new;
struct lpfc_queue *eq, *eq_next;
- unsigned char *eqcnt = NULL;
+ unsigned char *ena_delay = NULL;
uint32_t usdelay;
int i;
- bool update = false;
if (!phba->cfg_auto_imax || phba->pport->load_flag & FC_UNLOADING)
return;
@@ -1247,44 +1252,36 @@ lpfc_hb_eq_delay_work(struct work_struct *work)
phba->pport->fc_flag & FC_OFFLINE_MODE)
goto requeue;
- eqcnt = kcalloc(num_possible_cpus(), sizeof(unsigned char),
- GFP_KERNEL);
- if (!eqcnt)
+ ena_delay = kcalloc(phba->sli4_hba.num_possible_cpu, sizeof(*ena_delay),
+ GFP_KERNEL);
+ if (!ena_delay)
goto requeue;
- if (phba->cfg_irq_chann > 1) {
- /* Loop thru all IRQ vectors */
- for (i = 0; i < phba->cfg_irq_chann; i++) {
- /* Get the EQ corresponding to the IRQ vector */
- eq = phba->sli4_hba.hba_eq_hdl[i].eq;
- if (!eq)
- continue;
- if (eq->q_mode) {
- update = true;
- break;
- }
- if (eqcnt[eq->last_cpu] < 2)
- eqcnt[eq->last_cpu]++;
+ for (i = 0; i < phba->cfg_irq_chann; i++) {
+ /* Get the EQ corresponding to the IRQ vector */
+ eq = phba->sli4_hba.hba_eq_hdl[i].eq;
+ if (!eq)
+ continue;
+ if (eq->q_mode || eq->q_flag & HBA_EQ_DELAY_CHK) {
+ eq->q_flag &= ~HBA_EQ_DELAY_CHK;
+ ena_delay[eq->last_cpu] = 1;
}
- } else
- update = true;
+ }
for_each_present_cpu(i) {
eqi = per_cpu_ptr(phba->sli4_hba.eq_info, i);
- if (!update && eqcnt[i] < 2) {
- eqi->icnt = 0;
- continue;
+ if (ena_delay[i]) {
+ usdelay = (eqi->icnt >> 10) * LPFC_EQ_DELAY_STEP;
+ if (usdelay > LPFC_MAX_AUTO_EQ_DELAY)
+ usdelay = LPFC_MAX_AUTO_EQ_DELAY;
+ } else {
+ usdelay = 0;
}
- usdelay = (eqi->icnt / LPFC_IMAX_THRESHOLD) *
- LPFC_EQ_DELAY_STEP;
- if (usdelay > LPFC_MAX_AUTO_EQ_DELAY)
- usdelay = LPFC_MAX_AUTO_EQ_DELAY;
-
eqi->icnt = 0;
list_for_each_entry_safe(eq, eq_next, &eqi->list, cpu_list) {
- if (eq->last_cpu != i) {
+ if (unlikely(eq->last_cpu != i)) {
eqi_new = per_cpu_ptr(phba->sli4_hba.eq_info,
eq->last_cpu);
list_move_tail(&eq->cpu_list, &eqi_new->list);
@@ -1296,7 +1293,7 @@ lpfc_hb_eq_delay_work(struct work_struct *work)
}
}
- kfree(eqcnt);
+ kfree(ena_delay);
requeue:
queue_delayed_work(phba->wq, &phba->eq_delay_work,
@@ -3053,11 +3050,12 @@ lpfc_sli4_node_prep(struct lpfc_hba *phba)
continue;
}
ndlp->nlp_rpi = rpi;
- lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
- "0009 rpi:%x DID:%x "
- "flg:%x map:%x x%px\n", ndlp->nlp_rpi,
- ndlp->nlp_DID, ndlp->nlp_flag,
- ndlp->nlp_usg_map, ndlp);
+ lpfc_printf_vlog(ndlp->vport, KERN_INFO,
+ LOG_NODE | LOG_DISCOVERY,
+ "0009 Assign RPI x%x to ndlp x%px "
+ "DID:x%06x flg:x%x map:x%x\n",
+ ndlp->nlp_rpi, ndlp, ndlp->nlp_DID,
+ ndlp->nlp_flag, ndlp->nlp_usg_map);
}
}
lpfc_destroy_vport_work_array(phba, vports);
@@ -3387,6 +3385,8 @@ lpfc_online(struct lpfc_hba *phba)
if (phba->cfg_xri_rebalancing)
lpfc_create_multixri_pools(phba);
+ lpfc_cpuhp_add(phba);
+
lpfc_unblock_mgmt_io(phba);
return 0;
}
@@ -3453,10 +3453,15 @@ lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
list_for_each_entry_safe(ndlp, next_ndlp,
&vports[i]->fc_nodes,
nlp_listp) {
- if (!NLP_CHK_NODE_ACT(ndlp))
- continue;
- if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
+ if ((!NLP_CHK_NODE_ACT(ndlp)) ||
+ ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
+ /* Driver must assume RPI is invalid for
+ * any unused or inactive node.
+ */
+ ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
continue;
+ }
+
if (ndlp->nlp_type & NLP_FABRIC) {
lpfc_disc_state_machine(vports[i], ndlp,
NULL, NLP_EVT_DEVICE_RECOVERY);
@@ -3472,16 +3477,16 @@ lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
* comes back online.
*/
if (phba->sli_rev == LPFC_SLI_REV4) {
- lpfc_printf_vlog(ndlp->vport,
- KERN_INFO, LOG_NODE,
- "0011 lpfc_offline: "
- "ndlp:x%px did %x "
- "usgmap:x%x rpi:%x\n",
- ndlp, ndlp->nlp_DID,
- ndlp->nlp_usg_map,
- ndlp->nlp_rpi);
-
+ lpfc_printf_vlog(ndlp->vport, KERN_INFO,
+ LOG_NODE | LOG_DISCOVERY,
+ "0011 Free RPI x%x on "
+ "ndlp:x%px did x%x "
+ "usgmap:x%x\n",
+ ndlp->nlp_rpi, ndlp,
+ ndlp->nlp_DID,
+ ndlp->nlp_usg_map);
lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
+ ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
}
lpfc_unreg_rpi(vports[i], ndlp);
}
@@ -3545,6 +3550,7 @@ lpfc_offline(struct lpfc_hba *phba)
spin_unlock_irq(shost->host_lock);
}
lpfc_destroy_vport_work_array(phba, vports);
+ __lpfc_cpuhp_remove(phba);
if (phba->cfg_xri_rebalancing)
lpfc_destroy_multixri_pools(phba);
@@ -5283,10 +5289,10 @@ lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
evt_type = bf_get(lpfc_trailer_type, acqe_sli);
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
- "2901 Async SLI event - Event Data1:x%08x Event Data2:"
- "x%08x SLI Event Type:%d\n",
+ "2901 Async SLI event - Type:%d, Event Data: x%08x "
+ "x%08x x%08x x%08x\n", evt_type,
acqe_sli->event_data1, acqe_sli->event_data2,
- evt_type);
+ acqe_sli->reserved, acqe_sli->trailer);
port_name = phba->Port[0];
if (port_name == 0x00)
@@ -5433,11 +5439,26 @@ lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
"Event Data1:x%08x Event Data2: x%08x\n",
acqe_sli->event_data1, acqe_sli->event_data2);
break;
+ case LPFC_SLI_EVENT_TYPE_MISCONF_FAWWN:
+ /* Misconfigured WWN. Reports that the SLI Port is configured
+ * to use FA-WWN, but the attached device doesn’t support it.
+ * No driver action is required.
+ * Event Data1 - N.A, Event Data2 - N.A
+ */
+ lpfc_log_msg(phba, KERN_WARNING, LOG_SLI,
+ "2699 Misconfigured FA-WWN - Attached device does "
+ "not support FA-WWN\n");
+ break;
+ case LPFC_SLI_EVENT_TYPE_EEPROM_FAILURE:
+ /* EEPROM failure. No driver action is required */
+ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+ "2518 EEPROM failure - "
+ "Event Data1: x%08x Event Data2: x%08x\n",
+ acqe_sli->event_data1, acqe_sli->event_data2);
+ break;
default:
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
- "3193 Async SLI event - Event Data1:x%08x Event Data2:"
- "x%08x SLI Event Type:%d\n",
- acqe_sli->event_data1, acqe_sli->event_data2,
+ "3193 Unrecognized SLI event, type: 0x%x",
evt_type);
break;
}
@@ -5976,6 +5997,29 @@ static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode)
}
/**
+ * lpfc_cpumask_of_node_init - initalizes cpumask of phba's NUMA node
+ * @phba: Pointer to HBA context object.
+ *
+ **/
+static void
+lpfc_cpumask_of_node_init(struct lpfc_hba *phba)
+{
+ unsigned int cpu, numa_node;
+ struct cpumask *numa_mask = &phba->sli4_hba.numa_mask;
+
+ cpumask_clear(numa_mask);
+
+ /* Check if we're a NUMA architecture */
+ numa_node = dev_to_node(&phba->pcidev->dev);
+ if (numa_node == NUMA_NO_NODE)
+ return;
+
+ for_each_possible_cpu(cpu)
+ if (cpu_to_node(cpu) == numa_node)
+ cpumask_set_cpu(cpu, numa_mask);
+}
+
+/**
* lpfc_enable_pci_dev - Enable a generic PCI device.
* @phba: pointer to lpfc hba data structure.
*
@@ -6418,6 +6462,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
phba->sli4_hba.num_present_cpu = lpfc_present_cpu;
phba->sli4_hba.num_possible_cpu = num_possible_cpus();
phba->sli4_hba.curr_disp_cpu = 0;
+ lpfc_cpumask_of_node_init(phba);
/* Get all the module params for configuring this host */
lpfc_get_cfgparam(phba);
@@ -6953,6 +6998,7 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
phba->sli4_hba.num_possible_cpu = 0;
phba->sli4_hba.num_present_cpu = 0;
phba->sli4_hba.curr_disp_cpu = 0;
+ cpumask_clear(&phba->sli4_hba.numa_mask);
/* Free memory allocated for fast-path work queue handles */
kfree(phba->sli4_hba.hba_eq_hdl);
@@ -7126,7 +7172,7 @@ lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count)
if (iocbq_entry == NULL) {
printk(KERN_ERR "%s: only allocated %d iocbs of "
"expected %d count. Unloading driver.\n",
- __func__, i, LPFC_IOCB_LIST_CNT);
+ __func__, i, iocb_count);
goto out_free_iocbq;
}
@@ -7545,18 +7591,10 @@ lpfc_create_shost(struct lpfc_hba *phba)
if (phba->nvmet_support) {
/* Only 1 vport (pport) will support NVME target */
- if (phba->txrdy_payload_pool == NULL) {
- phba->txrdy_payload_pool = dma_pool_create(
- "txrdy_pool", &phba->pcidev->dev,
- TXRDY_PAYLOAD_LEN, 16, 0);
- if (phba->txrdy_payload_pool) {
- phba->targetport = NULL;
- phba->cfg_enable_fc4_type = LPFC_ENABLE_NVME;
- lpfc_printf_log(phba, KERN_INFO,
- LOG_INIT | LOG_NVME_DISC,
- "6076 NVME Target Found\n");
- }
- }
+ phba->targetport = NULL;
+ phba->cfg_enable_fc4_type = LPFC_ENABLE_NVME;
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME_DISC,
+ "6076 NVME Target Found\n");
}
lpfc_debugfs_initialize(vport);
@@ -8235,6 +8273,94 @@ lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba)
memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx));
}
+static const char * const lpfc_topo_to_str[] = {
+ "Loop then P2P",
+ "Loopback",
+ "P2P Only",
+ "Unsupported",
+ "Loop Only",
+ "Unsupported",
+ "P2P then Loop",
+};
+
+/**
+ * lpfc_map_topology - Map the topology read from READ_CONFIG
+ * @phba: pointer to lpfc hba data structure.
+ * @rdconf: pointer to read config data
+ *
+ * This routine is invoked to map the topology values as read
+ * from the read config mailbox command. If the persistent
+ * topology feature is supported, the firmware will provide the
+ * saved topology information to be used in INIT_LINK
+ *
+ **/
+#define LINK_FLAGS_DEF 0x0
+#define LINK_FLAGS_P2P 0x1
+#define LINK_FLAGS_LOOP 0x2
+static void
+lpfc_map_topology(struct lpfc_hba *phba, struct lpfc_mbx_read_config *rd_config)
+{
+ u8 ptv, tf, pt;
+
+ ptv = bf_get(lpfc_mbx_rd_conf_ptv, rd_config);
+ tf = bf_get(lpfc_mbx_rd_conf_tf, rd_config);
+ pt = bf_get(lpfc_mbx_rd_conf_pt, rd_config);
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "2027 Read Config Data : ptv:0x%x, tf:0x%x pt:0x%x",
+ ptv, tf, pt);
+ if (!ptv) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+ "2019 FW does not support persistent topology "
+ "Using driver parameter defined value [%s]",
+ lpfc_topo_to_str[phba->cfg_topology]);
+ return;
+ }
+ /* FW supports persistent topology - override module parameter value */
+ phba->hba_flag |= HBA_PERSISTENT_TOPO;
+ switch (phba->pcidev->device) {
+ case PCI_DEVICE_ID_LANCER_G7_FC:
+ if (tf || (pt == LINK_FLAGS_LOOP)) {
+ /* Invalid values from FW - use driver params */
+ phba->hba_flag &= ~HBA_PERSISTENT_TOPO;
+ } else {
+ /* Prism only supports PT2PT topology */
+ phba->cfg_topology = FLAGS_TOPOLOGY_MODE_PT_PT;
+ }
+ break;
+ case PCI_DEVICE_ID_LANCER_G6_FC:
+ if (!tf) {
+ phba->cfg_topology = ((pt == LINK_FLAGS_LOOP)
+ ? FLAGS_TOPOLOGY_MODE_LOOP
+ : FLAGS_TOPOLOGY_MODE_PT_PT);
+ } else {
+ phba->hba_flag &= ~HBA_PERSISTENT_TOPO;
+ }
+ break;
+ default: /* G5 */
+ if (tf) {
+ /* If topology failover set - pt is '0' or '1' */
+ phba->cfg_topology = (pt ? FLAGS_TOPOLOGY_MODE_PT_LOOP :
+ FLAGS_TOPOLOGY_MODE_LOOP_PT);
+ } else {
+ phba->cfg_topology = ((pt == LINK_FLAGS_P2P)
+ ? FLAGS_TOPOLOGY_MODE_PT_PT
+ : FLAGS_TOPOLOGY_MODE_LOOP);
+ }
+ break;
+ }
+ if (phba->hba_flag & HBA_PERSISTENT_TOPO) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "2020 Using persistent topology value [%s]",
+ lpfc_topo_to_str[phba->cfg_topology]);
+ } else {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+ "2021 Invalid topology values from FW "
+ "Using driver parameter defined value [%s]",
+ lpfc_topo_to_str[phba->cfg_topology]);
+ }
+}
+
/**
* lpfc_sli4_read_config - Get the config parameters.
* @phba: pointer to lpfc hba data structure.
@@ -8346,6 +8472,7 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ?
(phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0;
phba->max_vports = phba->max_vpi;
+ lpfc_map_topology(phba, rd_config);
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
"2003 cfg params Extents? %d "
"XRI(B:%d M:%d), "
@@ -8619,8 +8746,8 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba)
*/
if (phba->nvmet_support) {
- if (phba->cfg_irq_chann < phba->cfg_nvmet_mrq)
- phba->cfg_nvmet_mrq = phba->cfg_irq_chann;
+ if (phba->cfg_hdw_queue < phba->cfg_nvmet_mrq)
+ phba->cfg_nvmet_mrq = phba->cfg_hdw_queue;
if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX)
phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX;
}
@@ -9160,6 +9287,8 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
}
spin_unlock_irq(&phba->hbalock);
+ lpfc_sli4_cleanup_poll_list(phba);
+
/* Release HBA eqs */
if (phba->sli4_hba.hdwq)
lpfc_sli4_release_hdwq(phba);
@@ -10581,7 +10710,6 @@ lpfc_find_cpu_handle(struct lpfc_hba *phba, uint16_t id, int match)
*/
if ((match == LPFC_FIND_BY_EQ) &&
(cpup->flag & LPFC_CPU_FIRST_IRQ) &&
- (cpup->irq != LPFC_VECTOR_MAP_EMPTY) &&
(cpup->eq == id))
return cpu;
@@ -10619,6 +10747,75 @@ lpfc_find_hyper(struct lpfc_hba *phba, int cpu,
}
#endif
+/*
+ * lpfc_assign_eq_map_info - Assigns eq for vector_map structure
+ * @phba: pointer to lpfc hba data structure.
+ * @eqidx: index for eq and irq vector
+ * @flag: flags to set for vector_map structure
+ * @cpu: cpu used to index vector_map structure
+ *
+ * The routine assigns eq info into vector_map structure
+ */
+static inline void
+lpfc_assign_eq_map_info(struct lpfc_hba *phba, uint16_t eqidx, uint16_t flag,
+ unsigned int cpu)
+{
+ struct lpfc_vector_map_info *cpup = &phba->sli4_hba.cpu_map[cpu];
+ struct lpfc_hba_eq_hdl *eqhdl = lpfc_get_eq_hdl(eqidx);
+
+ cpup->eq = eqidx;
+ cpup->flag |= flag;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "3336 Set Affinity: CPU %d irq %d eq %d flag x%x\n",
+ cpu, eqhdl->irq, cpup->eq, cpup->flag);
+}
+
+/**
+ * lpfc_cpu_map_array_init - Initialize cpu_map structure
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * The routine initializes the cpu_map array structure
+ */
+static void
+lpfc_cpu_map_array_init(struct lpfc_hba *phba)
+{
+ struct lpfc_vector_map_info *cpup;
+ struct lpfc_eq_intr_info *eqi;
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ cpup = &phba->sli4_hba.cpu_map[cpu];
+ cpup->phys_id = LPFC_VECTOR_MAP_EMPTY;
+ cpup->core_id = LPFC_VECTOR_MAP_EMPTY;
+ cpup->hdwq = LPFC_VECTOR_MAP_EMPTY;
+ cpup->eq = LPFC_VECTOR_MAP_EMPTY;
+ cpup->flag = 0;
+ eqi = per_cpu_ptr(phba->sli4_hba.eq_info, cpu);
+ INIT_LIST_HEAD(&eqi->list);
+ eqi->icnt = 0;
+ }
+}
+
+/**
+ * lpfc_hba_eq_hdl_array_init - Initialize hba_eq_hdl structure
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * The routine initializes the hba_eq_hdl array structure
+ */
+static void
+lpfc_hba_eq_hdl_array_init(struct lpfc_hba *phba)
+{
+ struct lpfc_hba_eq_hdl *eqhdl;
+ int i;
+
+ for (i = 0; i < phba->cfg_irq_chann; i++) {
+ eqhdl = lpfc_get_eq_hdl(i);
+ eqhdl->irq = LPFC_VECTOR_MAP_EMPTY;
+ eqhdl->phba = phba;
+ }
+}
+
/**
* lpfc_cpu_affinity_check - Check vector CPU affinity mappings
* @phba: pointer to lpfc hba data structure.
@@ -10637,22 +10834,10 @@ lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
int max_core_id, min_core_id;
struct lpfc_vector_map_info *cpup;
struct lpfc_vector_map_info *new_cpup;
- const struct cpumask *maskp;
#ifdef CONFIG_X86
struct cpuinfo_x86 *cpuinfo;
#endif
- /* Init cpu_map array */
- for_each_possible_cpu(cpu) {
- cpup = &phba->sli4_hba.cpu_map[cpu];
- cpup->phys_id = LPFC_VECTOR_MAP_EMPTY;
- cpup->core_id = LPFC_VECTOR_MAP_EMPTY;
- cpup->hdwq = LPFC_VECTOR_MAP_EMPTY;
- cpup->eq = LPFC_VECTOR_MAP_EMPTY;
- cpup->irq = LPFC_VECTOR_MAP_EMPTY;
- cpup->flag = 0;
- }
-
max_phys_id = 0;
min_phys_id = LPFC_VECTOR_MAP_EMPTY;
max_core_id = 0;
@@ -10688,65 +10873,6 @@ lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
min_core_id = cpup->core_id;
}
- for_each_possible_cpu(i) {
- struct lpfc_eq_intr_info *eqi =
- per_cpu_ptr(phba->sli4_hba.eq_info, i);
-
- INIT_LIST_HEAD(&eqi->list);
- eqi->icnt = 0;
- }
-
- /* This loop sets up all CPUs that are affinitized with a
- * irq vector assigned to the driver. All affinitized CPUs
- * will get a link to that vectors IRQ and EQ.
- *
- * NULL affinity mask handling:
- * If irq count is greater than one, log an error message.
- * If the null mask is received for the first irq, find the
- * first present cpu, and assign the eq index to ensure at
- * least one EQ is assigned.
- */
- for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
- /* Get a CPU mask for all CPUs affinitized to this vector */
- maskp = pci_irq_get_affinity(phba->pcidev, idx);
- if (!maskp) {
- if (phba->cfg_irq_chann > 1)
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "3329 No affinity mask found "
- "for vector %d (%d)\n",
- idx, phba->cfg_irq_chann);
- if (!idx) {
- cpu = cpumask_first(cpu_present_mask);
- cpup = &phba->sli4_hba.cpu_map[cpu];
- cpup->eq = idx;
- cpup->irq = pci_irq_vector(phba->pcidev, idx);
- cpup->flag |= LPFC_CPU_FIRST_IRQ;
- }
- break;
- }
-
- i = 0;
- /* Loop through all CPUs associated with vector idx */
- for_each_cpu_and(cpu, maskp, cpu_present_mask) {
- /* Set the EQ index and IRQ for that vector */
- cpup = &phba->sli4_hba.cpu_map[cpu];
- cpup->eq = idx;
- cpup->irq = pci_irq_vector(phba->pcidev, idx);
-
- /* If this is the first CPU thats assigned to this
- * vector, set LPFC_CPU_FIRST_IRQ.
- */
- if (!i)
- cpup->flag |= LPFC_CPU_FIRST_IRQ;
- i++;
-
- lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
- "3336 Set Affinity: CPU %d "
- "irq %d eq %d flag x%x\n",
- cpu, cpup->irq, cpup->eq, cpup->flag);
- }
- }
-
/* After looking at each irq vector assigned to this pcidev, its
* possible to see that not ALL CPUs have been accounted for.
* Next we will set any unassigned (unaffinitized) cpu map
@@ -10772,7 +10898,7 @@ lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) &&
- (new_cpup->irq != LPFC_VECTOR_MAP_EMPTY) &&
+ (new_cpup->eq != LPFC_VECTOR_MAP_EMPTY) &&
(new_cpup->phys_id == cpup->phys_id))
goto found_same;
new_cpu = cpumask_next(
@@ -10785,7 +10911,6 @@ lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
found_same:
/* We found a matching phys_id, so copy the IRQ info */
cpup->eq = new_cpup->eq;
- cpup->irq = new_cpup->irq;
/* Bump start_cpu to the next slot to minmize the
* chance of having multiple unassigned CPU entries
@@ -10797,9 +10922,10 @@ found_same:
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"3337 Set Affinity: CPU %d "
- "irq %d from id %d same "
+ "eq %d from peer cpu %d same "
"phys_id (%d)\n",
- cpu, cpup->irq, new_cpu, cpup->phys_id);
+ cpu, cpup->eq, new_cpu,
+ cpup->phys_id);
}
}
@@ -10823,7 +10949,7 @@ found_same:
for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) &&
- (new_cpup->irq != LPFC_VECTOR_MAP_EMPTY))
+ (new_cpup->eq != LPFC_VECTOR_MAP_EMPTY))
goto found_any;
new_cpu = cpumask_next(
new_cpu, cpu_present_mask);
@@ -10833,13 +10959,12 @@ found_same:
/* We should never leave an entry unassigned */
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"3339 Set Affinity: CPU %d "
- "irq %d UNASSIGNED\n",
- cpup->hdwq, cpup->irq);
+ "eq %d UNASSIGNED\n",
+ cpup->hdwq, cpup->eq);
continue;
found_any:
/* We found an available entry, copy the IRQ info */
cpup->eq = new_cpup->eq;
- cpup->irq = new_cpup->irq;
/* Bump start_cpu to the next slot to minmize the
* chance of having multiple unassigned CPU entries
@@ -10851,8 +10976,8 @@ found_any:
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"3338 Set Affinity: CPU %d "
- "irq %d from id %d (%d/%d)\n",
- cpu, cpup->irq, new_cpu,
+ "eq %d from peer cpu %d (%d/%d)\n",
+ cpu, cpup->eq, new_cpu,
new_cpup->phys_id, new_cpup->core_id);
}
}
@@ -10873,11 +10998,11 @@ found_any:
idx++;
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"3333 Set Affinity: CPU %d (phys %d core %d): "
- "hdwq %d eq %d irq %d flg x%x\n",
+ "hdwq %d eq %d flg x%x\n",
cpu, cpup->phys_id, cpup->core_id,
- cpup->hdwq, cpup->eq, cpup->irq, cpup->flag);
+ cpup->hdwq, cpup->eq, cpup->flag);
}
- /* Finally we need to associate a hdwq with each cpu_map entry
+ /* Associate a hdwq with each cpu_map entry
* This will be 1 to 1 - hdwq to cpu, unless there are less
* hardware queues then CPUs. For that case we will just round-robin
* the available hardware queues as they get assigned to CPUs.
@@ -10951,9 +11076,26 @@ found_any:
logit:
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"3335 Set Affinity: CPU %d (phys %d core %d): "
- "hdwq %d eq %d irq %d flg x%x\n",
+ "hdwq %d eq %d flg x%x\n",
cpu, cpup->phys_id, cpup->core_id,
- cpup->hdwq, cpup->eq, cpup->irq, cpup->flag);
+ cpup->hdwq, cpup->eq, cpup->flag);
+ }
+
+ /*
+ * Initialize the cpu_map slots for not-present cpus in case
+ * a cpu is hot-added. Perform a simple hdwq round robin assignment.
+ */
+ idx = 0;
+ for_each_possible_cpu(cpu) {
+ cpup = &phba->sli4_hba.cpu_map[cpu];
+ if (cpup->hdwq != LPFC_VECTOR_MAP_EMPTY)
+ continue;
+
+ cpup->hdwq = idx++ % phba->cfg_hdw_queue;
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "3340 Set Affinity: not present "
+ "CPU %d hdwq %d\n",
+ cpu, cpup->hdwq);
}
/* The cpu_map array will be used later during initialization
@@ -10963,11 +11105,280 @@ found_any:
}
/**
+ * lpfc_cpuhp_get_eq
+ *
+ * @phba: pointer to lpfc hba data structure.
+ * @cpu: cpu going offline
+ * @eqlist:
+ */
+static void
+lpfc_cpuhp_get_eq(struct lpfc_hba *phba, unsigned int cpu,
+ struct list_head *eqlist)
+{
+ const struct cpumask *maskp;
+ struct lpfc_queue *eq;
+ cpumask_t tmp;
+ u16 idx;
+
+ for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
+ maskp = pci_irq_get_affinity(phba->pcidev, idx);
+ if (!maskp)
+ continue;
+ /*
+ * if irq is not affinitized to the cpu going
+ * then we don't need to poll the eq attached
+ * to it.
+ */
+ if (!cpumask_and(&tmp, maskp, cpumask_of(cpu)))
+ continue;
+ /* get the cpus that are online and are affini-
+ * tized to this irq vector. If the count is
+ * more than 1 then cpuhp is not going to shut-
+ * down this vector. Since this cpu has not
+ * gone offline yet, we need >1.
+ */
+ cpumask_and(&tmp, maskp, cpu_online_mask);
+ if (cpumask_weight(&tmp) > 1)
+ continue;
+
+ /* Now that we have an irq to shutdown, get the eq
+ * mapped to this irq. Note: multiple hdwq's in
+ * the software can share an eq, but eventually
+ * only eq will be mapped to this vector
+ */
+ eq = phba->sli4_hba.hba_eq_hdl[idx].eq;
+ list_add(&eq->_poll_list, eqlist);
+ }
+}
+
+static void __lpfc_cpuhp_remove(struct lpfc_hba *phba)
+{
+ if (phba->sli_rev != LPFC_SLI_REV4)
+ return;
+
+ cpuhp_state_remove_instance_nocalls(lpfc_cpuhp_state,
+ &phba->cpuhp);
+ /*
+ * unregistering the instance doesn't stop the polling
+ * timer. Wait for the poll timer to retire.
+ */
+ synchronize_rcu();
+ del_timer_sync(&phba->cpuhp_poll_timer);
+}
+
+static void lpfc_cpuhp_remove(struct lpfc_hba *phba)
+{
+ if (phba->pport->fc_flag & FC_OFFLINE_MODE)
+ return;
+
+ __lpfc_cpuhp_remove(phba);
+}
+
+static void lpfc_cpuhp_add(struct lpfc_hba *phba)
+{
+ if (phba->sli_rev != LPFC_SLI_REV4)
+ return;
+
+ rcu_read_lock();
+
+ if (!list_empty(&phba->poll_list)) {
+ timer_setup(&phba->cpuhp_poll_timer, lpfc_sli4_poll_hbtimer, 0);
+ mod_timer(&phba->cpuhp_poll_timer,
+ jiffies + msecs_to_jiffies(LPFC_POLL_HB));
+ }
+
+ rcu_read_unlock();
+
+ cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state,
+ &phba->cpuhp);
+}
+
+static int __lpfc_cpuhp_checks(struct lpfc_hba *phba, int *retval)
+{
+ if (phba->pport->load_flag & FC_UNLOADING) {
+ *retval = -EAGAIN;
+ return true;
+ }
+
+ if (phba->sli_rev != LPFC_SLI_REV4) {
+ *retval = 0;
+ return true;
+ }
+
+ /* proceed with the hotplug */
+ return false;
+}
+
+/**
+ * lpfc_irq_set_aff - set IRQ affinity
+ * @eqhdl: EQ handle
+ * @cpu: cpu to set affinity
+ *
+ **/
+static inline void
+lpfc_irq_set_aff(struct lpfc_hba_eq_hdl *eqhdl, unsigned int cpu)
+{
+ cpumask_clear(&eqhdl->aff_mask);
+ cpumask_set_cpu(cpu, &eqhdl->aff_mask);
+ irq_set_status_flags(eqhdl->irq, IRQ_NO_BALANCING);
+ irq_set_affinity_hint(eqhdl->irq, &eqhdl->aff_mask);
+}
+
+/**
+ * lpfc_irq_clear_aff - clear IRQ affinity
+ * @eqhdl: EQ handle
+ *
+ **/
+static inline void
+lpfc_irq_clear_aff(struct lpfc_hba_eq_hdl *eqhdl)
+{
+ cpumask_clear(&eqhdl->aff_mask);
+ irq_clear_status_flags(eqhdl->irq, IRQ_NO_BALANCING);
+ irq_set_affinity_hint(eqhdl->irq, &eqhdl->aff_mask);
+}
+
+/**
+ * lpfc_irq_rebalance - rebalances IRQ affinity according to cpuhp event
+ * @phba: pointer to HBA context object.
+ * @cpu: cpu going offline/online
+ * @offline: true, cpu is going offline. false, cpu is coming online.
+ *
+ * If cpu is going offline, we'll try our best effort to find the next
+ * online cpu on the phba's NUMA node and migrate all offlining IRQ affinities.
+ *
+ * If cpu is coming online, reaffinitize the IRQ back to the onlineng cpu.
+ *
+ * Note: Call only if cfg_irq_numa is enabled, otherwise rely on
+ * PCI_IRQ_AFFINITY to auto-manage IRQ affinity.
+ *
+ **/
+static void
+lpfc_irq_rebalance(struct lpfc_hba *phba, unsigned int cpu, bool offline)
+{
+ struct lpfc_vector_map_info *cpup;
+ struct cpumask *aff_mask;
+ unsigned int cpu_select, cpu_next, idx;
+ const struct cpumask *numa_mask;
+
+ if (!phba->cfg_irq_numa)
+ return;
+
+ numa_mask = &phba->sli4_hba.numa_mask;
+
+ if (!cpumask_test_cpu(cpu, numa_mask))
+ return;
+
+ cpup = &phba->sli4_hba.cpu_map[cpu];
+
+ if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
+ return;
+
+ if (offline) {
+ /* Find next online CPU on NUMA node */
+ cpu_next = cpumask_next_wrap(cpu, numa_mask, cpu, true);
+ cpu_select = lpfc_next_online_numa_cpu(numa_mask, cpu_next);
+
+ /* Found a valid CPU */
+ if ((cpu_select < nr_cpu_ids) && (cpu_select != cpu)) {
+ /* Go through each eqhdl and ensure offlining
+ * cpu aff_mask is migrated
+ */
+ for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
+ aff_mask = lpfc_get_aff_mask(idx);
+
+ /* Migrate affinity */
+ if (cpumask_test_cpu(cpu, aff_mask))
+ lpfc_irq_set_aff(lpfc_get_eq_hdl(idx),
+ cpu_select);
+ }
+ } else {
+ /* Rely on irqbalance if no online CPUs left on NUMA */
+ for (idx = 0; idx < phba->cfg_irq_chann; idx++)
+ lpfc_irq_clear_aff(lpfc_get_eq_hdl(idx));
+ }
+ } else {
+ /* Migrate affinity back to this CPU */
+ lpfc_irq_set_aff(lpfc_get_eq_hdl(cpup->eq), cpu);
+ }
+}
+
+static int lpfc_cpu_offline(unsigned int cpu, struct hlist_node *node)
+{
+ struct lpfc_hba *phba = hlist_entry_safe(node, struct lpfc_hba, cpuhp);
+ struct lpfc_queue *eq, *next;
+ LIST_HEAD(eqlist);
+ int retval;
+
+ if (!phba) {
+ WARN_ONCE(!phba, "cpu: %u. phba:NULL", raw_smp_processor_id());
+ return 0;
+ }
+
+ if (__lpfc_cpuhp_checks(phba, &retval))
+ return retval;
+
+ lpfc_irq_rebalance(phba, cpu, true);
+
+ lpfc_cpuhp_get_eq(phba, cpu, &eqlist);
+
+ /* start polling on these eq's */
+ list_for_each_entry_safe(eq, next, &eqlist, _poll_list) {
+ list_del_init(&eq->_poll_list);
+ lpfc_sli4_start_polling(eq);
+ }
+
+ return 0;
+}
+
+static int lpfc_cpu_online(unsigned int cpu, struct hlist_node *node)
+{
+ struct lpfc_hba *phba = hlist_entry_safe(node, struct lpfc_hba, cpuhp);
+ struct lpfc_queue *eq, *next;
+ unsigned int n;
+ int retval;
+
+ if (!phba) {
+ WARN_ONCE(!phba, "cpu: %u. phba:NULL", raw_smp_processor_id());
+ return 0;
+ }
+
+ if (__lpfc_cpuhp_checks(phba, &retval))
+ return retval;
+
+ lpfc_irq_rebalance(phba, cpu, false);
+
+ list_for_each_entry_safe(eq, next, &phba->poll_list, _poll_list) {
+ n = lpfc_find_cpu_handle(phba, eq->hdwq, LPFC_FIND_BY_HDWQ);
+ if (n == cpu)
+ lpfc_sli4_stop_polling(eq);
+ }
+
+ return 0;
+}
+
+/**
* lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device
* @phba: pointer to lpfc hba data structure.
*
* This routine is invoked to enable the MSI-X interrupt vectors to device
- * with SLI-4 interface spec.
+ * with SLI-4 interface spec. It also allocates MSI-X vectors and maps them
+ * to cpus on the system.
+ *
+ * When cfg_irq_numa is enabled, the adapter will only allocate vectors for
+ * the number of cpus on the same numa node as this adapter. The vectors are
+ * allocated without requesting OS affinity mapping. A vector will be
+ * allocated and assigned to each online and offline cpu. If the cpu is
+ * online, then affinity will be set to that cpu. If the cpu is offline, then
+ * affinity will be set to the nearest peer cpu within the numa node that is
+ * online. If there are no online cpus within the numa node, affinity is not
+ * assigned and the OS may do as it pleases. Note: cpu vector affinity mapping
+ * is consistent with the way cpu online/offline is handled when cfg_irq_numa is
+ * configured.
+ *
+ * If numa mode is not enabled and there is more than 1 vector allocated, then
+ * the driver relies on the managed irq interface where the OS assigns vector to
+ * cpu affinity. The driver will then use that affinity mapping to setup its
+ * cpu mapping table.
*
* Return codes
* 0 - successful
@@ -10978,13 +11389,31 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
{
int vectors, rc, index;
char *name;
+ const struct cpumask *numa_mask = NULL;
+ unsigned int cpu = 0, cpu_cnt = 0, cpu_select = nr_cpu_ids;
+ struct lpfc_hba_eq_hdl *eqhdl;
+ const struct cpumask *maskp;
+ bool first;
+ unsigned int flags = PCI_IRQ_MSIX;
/* Set up MSI-X multi-message vectors */
vectors = phba->cfg_irq_chann;
- rc = pci_alloc_irq_vectors(phba->pcidev,
- 1,
- vectors, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
+ if (phba->cfg_irq_numa) {
+ numa_mask = &phba->sli4_hba.numa_mask;
+ cpu_cnt = cpumask_weight(numa_mask);
+ vectors = min(phba->cfg_irq_chann, cpu_cnt);
+
+ /* cpu: iterates over numa_mask including offline or online
+ * cpu_select: iterates over online numa_mask to set affinity
+ */
+ cpu = cpumask_first(numa_mask);
+ cpu_select = lpfc_next_online_numa_cpu(numa_mask, cpu);
+ } else {
+ flags |= PCI_IRQ_AFFINITY;
+ }
+
+ rc = pci_alloc_irq_vectors(phba->pcidev, 1, vectors, flags);
if (rc < 0) {
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"0484 PCI enable MSI-X failed (%d)\n", rc);
@@ -10994,23 +11423,61 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
/* Assign MSI-X vectors to interrupt handlers */
for (index = 0; index < vectors; index++) {
- name = phba->sli4_hba.hba_eq_hdl[index].handler_name;
+ eqhdl = lpfc_get_eq_hdl(index);
+ name = eqhdl->handler_name;
memset(name, 0, LPFC_SLI4_HANDLER_NAME_SZ);
snprintf(name, LPFC_SLI4_HANDLER_NAME_SZ,
LPFC_DRIVER_HANDLER_NAME"%d", index);
- phba->sli4_hba.hba_eq_hdl[index].idx = index;
- phba->sli4_hba.hba_eq_hdl[index].phba = phba;
+ eqhdl->idx = index;
rc = request_irq(pci_irq_vector(phba->pcidev, index),
&lpfc_sli4_hba_intr_handler, 0,
- name,
- &phba->sli4_hba.hba_eq_hdl[index]);
+ name, eqhdl);
if (rc) {
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
"0486 MSI-X fast-path (%d) "
"request_irq failed (%d)\n", index, rc);
goto cfg_fail_out;
}
+
+ eqhdl->irq = pci_irq_vector(phba->pcidev, index);
+
+ if (phba->cfg_irq_numa) {
+ /* If found a neighboring online cpu, set affinity */
+ if (cpu_select < nr_cpu_ids)
+ lpfc_irq_set_aff(eqhdl, cpu_select);
+
+ /* Assign EQ to cpu_map */
+ lpfc_assign_eq_map_info(phba, index,
+ LPFC_CPU_FIRST_IRQ,
+ cpu);
+
+ /* Iterate to next offline or online cpu in numa_mask */
+ cpu = cpumask_next(cpu, numa_mask);
+
+ /* Find next online cpu in numa_mask to set affinity */
+ cpu_select = lpfc_next_online_numa_cpu(numa_mask, cpu);
+ } else if (vectors == 1) {
+ cpu = cpumask_first(cpu_present_mask);
+ lpfc_assign_eq_map_info(phba, index, LPFC_CPU_FIRST_IRQ,
+ cpu);
+ } else {
+ maskp = pci_irq_get_affinity(phba->pcidev, index);
+
+ first = true;
+ /* Loop through all CPUs associated with vector index */
+ for_each_cpu_and(cpu, maskp, cpu_present_mask) {
+ /* If this is the first CPU thats assigned to
+ * this vector, set LPFC_CPU_FIRST_IRQ.
+ */
+ lpfc_assign_eq_map_info(phba, index,
+ first ?
+ LPFC_CPU_FIRST_IRQ : 0,
+ cpu);
+ if (first)
+ first = false;
+ }
+ }
}
if (vectors != phba->cfg_irq_chann) {
@@ -11020,17 +11487,18 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
phba->cfg_irq_chann, vectors);
if (phba->cfg_irq_chann > vectors)
phba->cfg_irq_chann = vectors;
- if (phba->nvmet_support && (phba->cfg_nvmet_mrq > vectors))
- phba->cfg_nvmet_mrq = vectors;
}
return rc;
cfg_fail_out:
/* free the irq already requested */
- for (--index; index >= 0; index--)
- free_irq(pci_irq_vector(phba->pcidev, index),
- &phba->sli4_hba.hba_eq_hdl[index]);
+ for (--index; index >= 0; index--) {
+ eqhdl = lpfc_get_eq_hdl(index);
+ lpfc_irq_clear_aff(eqhdl);
+ irq_set_affinity_hint(eqhdl->irq, NULL);
+ free_irq(eqhdl->irq, eqhdl);
+ }
/* Unconfigure MSI-X capability structure */
pci_free_irq_vectors(phba->pcidev);
@@ -11057,6 +11525,8 @@ static int
lpfc_sli4_enable_msi(struct lpfc_hba *phba)
{
int rc, index;
+ unsigned int cpu;
+ struct lpfc_hba_eq_hdl *eqhdl;
rc = pci_alloc_irq_vectors(phba->pcidev, 1, 1,
PCI_IRQ_MSI | PCI_IRQ_AFFINITY);
@@ -11078,9 +11548,15 @@ lpfc_sli4_enable_msi(struct lpfc_hba *phba)
return rc;
}
+ eqhdl = lpfc_get_eq_hdl(0);
+ eqhdl->irq = pci_irq_vector(phba->pcidev, 0);
+
+ cpu = cpumask_first(cpu_present_mask);
+ lpfc_assign_eq_map_info(phba, 0, LPFC_CPU_FIRST_IRQ, cpu);
+
for (index = 0; index < phba->cfg_irq_chann; index++) {
- phba->sli4_hba.hba_eq_hdl[index].idx = index;
- phba->sli4_hba.hba_eq_hdl[index].phba = phba;
+ eqhdl = lpfc_get_eq_hdl(index);
+ eqhdl->idx = index;
}
return 0;
@@ -11138,15 +11614,21 @@ lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
IRQF_SHARED, LPFC_DRIVER_NAME, phba);
if (!retval) {
struct lpfc_hba_eq_hdl *eqhdl;
+ unsigned int cpu;
/* Indicate initialization to INTx mode */
phba->intr_type = INTx;
intr_mode = 0;
+ eqhdl = lpfc_get_eq_hdl(0);
+ eqhdl->irq = pci_irq_vector(phba->pcidev, 0);
+
+ cpu = cpumask_first(cpu_present_mask);
+ lpfc_assign_eq_map_info(phba, 0, LPFC_CPU_FIRST_IRQ,
+ cpu);
for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
- eqhdl = &phba->sli4_hba.hba_eq_hdl[idx];
+ eqhdl = lpfc_get_eq_hdl(idx);
eqhdl->idx = idx;
- eqhdl->phba = phba;
}
}
}
@@ -11168,14 +11650,14 @@ lpfc_sli4_disable_intr(struct lpfc_hba *phba)
/* Disable the currently initialized interrupt mode */
if (phba->intr_type == MSIX) {
int index;
+ struct lpfc_hba_eq_hdl *eqhdl;
/* Free up MSI-X multi-message vectors */
for (index = 0; index < phba->cfg_irq_chann; index++) {
- irq_set_affinity_hint(
- pci_irq_vector(phba->pcidev, index),
- NULL);
- free_irq(pci_irq_vector(phba->pcidev, index),
- &phba->sli4_hba.hba_eq_hdl[index]);
+ eqhdl = lpfc_get_eq_hdl(index);
+ lpfc_irq_clear_aff(eqhdl);
+ irq_set_affinity_hint(eqhdl->irq, NULL);
+ free_irq(eqhdl->irq, eqhdl);
}
} else {
free_irq(phba->pcidev->irq, phba);
@@ -11367,6 +11849,9 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba)
/* Wait for completion of device XRI exchange busy */
lpfc_sli4_xri_exchange_busy_wait(phba);
+ /* per-phba callback de-registration for hotplug event */
+ lpfc_cpuhp_remove(phba);
+
/* Disable PCI subsystem interrupt */
lpfc_sli4_disable_intr(phba);
@@ -11538,6 +12023,7 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
sli4_params->cqav = bf_get(cfg_cqav, mbx_sli4_parameters);
sli4_params->wqsize = bf_get(cfg_wqsize, mbx_sli4_parameters);
sli4_params->bv1s = bf_get(cfg_bv1s, mbx_sli4_parameters);
+ sli4_params->pls = bf_get(cfg_pvl, mbx_sli4_parameters);
sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt,
mbx_sli4_parameters);
sli4_params->wqpcnt = bf_get(cfg_wqpcnt, mbx_sli4_parameters);
@@ -11589,13 +12075,10 @@ fcponly:
}
/* If the NVME FC4 type is enabled, scale the sg_seg_cnt to
- * accommodate 512K and 1M IOs in a single nvme buf and supply
- * enough NVME LS iocb buffers for larger connectivity counts.
+ * accommodate 512K and 1M IOs in a single nvme buf.
*/
- if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
phba->cfg_sg_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
- phba->cfg_iocb_cnt = 5;
- }
/* Only embed PBDE for if_type 6, PBDE support requires xib be set */
if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
@@ -12312,35 +12795,57 @@ lpfc_sli4_get_iocb_cnt(struct lpfc_hba *phba)
}
-static void
+static int
lpfc_log_write_firmware_error(struct lpfc_hba *phba, uint32_t offset,
uint32_t magic_number, uint32_t ftype, uint32_t fid, uint32_t fsize,
const struct firmware *fw)
{
- if ((offset == ADD_STATUS_FW_NOT_SUPPORTED) ||
+ int rc;
+
+ /* Three cases: (1) FW was not supported on the detected adapter.
+ * (2) FW update has been locked out administratively.
+ * (3) Some other error during FW update.
+ * In each case, an unmaskable message is written to the console
+ * for admin diagnosis.
+ */
+ if (offset == ADD_STATUS_FW_NOT_SUPPORTED ||
(phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC &&
- magic_number != MAGIC_NUMER_G6) ||
+ magic_number != MAGIC_NUMBER_G6) ||
(phba->pcidev->device == PCI_DEVICE_ID_LANCER_G7_FC &&
- magic_number != MAGIC_NUMER_G7))
+ magic_number != MAGIC_NUMBER_G7)) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "3030 This firmware version is not supported on "
- "this HBA model. Device:%x Magic:%x Type:%x "
- "ID:%x Size %d %zd\n",
- phba->pcidev->device, magic_number, ftype, fid,
- fsize, fw->size);
- else
+ "3030 This firmware version is not supported on"
+ " this HBA model. Device:%x Magic:%x Type:%x "
+ "ID:%x Size %d %zd\n",
+ phba->pcidev->device, magic_number, ftype, fid,
+ fsize, fw->size);
+ rc = -EINVAL;
+ } else if (offset == ADD_STATUS_FW_DOWNLOAD_HW_DISABLED) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "3022 FW Download failed. Device:%x Magic:%x Type:%x "
- "ID:%x Size %d %zd\n",
- phba->pcidev->device, magic_number, ftype, fid,
- fsize, fw->size);
+ "3021 Firmware downloads have been prohibited "
+ "by a system configuration setting on "
+ "Device:%x Magic:%x Type:%x ID:%x Size %d "
+ "%zd\n",
+ phba->pcidev->device, magic_number, ftype, fid,
+ fsize, fw->size);
+ rc = -EACCES;
+ } else {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3022 FW Download failed. Add Status x%x "
+ "Device:%x Magic:%x Type:%x ID:%x Size %d "
+ "%zd\n",
+ offset, phba->pcidev->device, magic_number,
+ ftype, fid, fsize, fw->size);
+ rc = -EIO;
+ }
+ return rc;
}
-
/**
* lpfc_write_firmware - attempt to write a firmware image to the port
* @fw: pointer to firmware image returned from request_firmware.
- * @phba: pointer to lpfc hba data structure.
+ * @context: pointer to firmware image returned from request_firmware.
+ * @ret: return value this routine provides to the caller.
*
**/
static void
@@ -12409,8 +12914,12 @@ lpfc_write_firmware(const struct firmware *fw, void *context)
rc = lpfc_wr_object(phba, &dma_buffer_list,
(fw->size - offset), &offset);
if (rc) {
- lpfc_log_write_firmware_error(phba, offset,
- magic_number, ftype, fid, fsize, fw);
+ rc = lpfc_log_write_firmware_error(phba, offset,
+ magic_number,
+ ftype,
+ fid,
+ fsize,
+ fw);
goto release_out;
}
}
@@ -12430,9 +12939,12 @@ release_out:
}
release_firmware(fw);
out:
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "3024 Firmware update done: %d.\n", rc);
- return;
+ if (rc < 0)
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3062 Firmware update error, status %d.\n", rc);
+ else
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3024 Firmware update success: size %d.\n", rc);
}
/**
@@ -12551,6 +13063,12 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
phba->pport = NULL;
lpfc_stop_port(phba);
+ /* Init cpu_map array */
+ lpfc_cpu_map_array_init(phba);
+
+ /* Init hba_eq_hdl array */
+ lpfc_hba_eq_hdl_array_init(phba);
+
/* Configure and enable interrupt */
intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode);
if (intr_mode == LPFC_INTR_ERROR) {
@@ -12632,6 +13150,9 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
/* Enable RAS FW log support */
lpfc_sli4_ras_setup(phba);
+ INIT_LIST_HEAD(&phba->poll_list);
+ cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state, &phba->cpuhp);
+
return 0;
out_free_sysfs_attr:
@@ -13344,8 +13865,7 @@ lpfc_sli4_oas_verify(struct lpfc_hba *phba)
phba->cfg_fof = 1;
} else {
phba->cfg_fof = 0;
- if (phba->device_data_mem_pool)
- mempool_destroy(phba->device_data_mem_pool);
+ mempool_destroy(phba->device_data_mem_pool);
phba->device_data_mem_pool = NULL;
}
@@ -13450,11 +13970,24 @@ lpfc_init(void)
/* Initialize in case vector mapping is needed */
lpfc_present_cpu = num_present_cpus();
+ error = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
+ "lpfc/sli4:online",
+ lpfc_cpu_online, lpfc_cpu_offline);
+ if (error < 0)
+ goto cpuhp_failure;
+ lpfc_cpuhp_state = error;
+
error = pci_register_driver(&lpfc_driver);
- if (error) {
- fc_release_transport(lpfc_transport_template);
- fc_release_transport(lpfc_vport_transport_template);
- }
+ if (error)
+ goto unwind;
+
+ return error;
+
+unwind:
+ cpuhp_remove_multi_state(lpfc_cpuhp_state);
+cpuhp_failure:
+ fc_release_transport(lpfc_transport_template);
+ fc_release_transport(lpfc_vport_transport_template);
return error;
}
@@ -13471,6 +14004,7 @@ lpfc_exit(void)
{
misc_deregister(&lpfc_mgmt_dev);
pci_unregister_driver(&lpfc_driver);
+ cpuhp_remove_multi_state(lpfc_cpuhp_state);
fc_release_transport(lpfc_transport_template);
fc_release_transport(lpfc_vport_transport_template);
idr_destroy(&lpfc_hba_index);
diff --git a/drivers/scsi/lpfc/lpfc_logmsg.h b/drivers/scsi/lpfc/lpfc_logmsg.h
index ea10f03437f5..148d02a27b58 100644
--- a/drivers/scsi/lpfc/lpfc_logmsg.h
+++ b/drivers/scsi/lpfc/lpfc_logmsg.h
@@ -46,6 +46,23 @@
#define LOG_NVME_IOERR 0x00800000 /* NVME IO Error events. */
#define LOG_ALL_MSG 0xffffffff /* LOG all messages */
+/* generate message by verbose log setting or severity */
+#define lpfc_vlog_msg(vport, level, mask, fmt, arg...) \
+{ if (((mask) & (vport)->cfg_log_verbose) || (level[1] <= '4')) \
+ dev_printk(level, &((vport)->phba->pcidev)->dev, "%d:(%d):" \
+ fmt, (vport)->phba->brd_no, vport->vpi, ##arg); }
+
+#define lpfc_log_msg(phba, level, mask, fmt, arg...) \
+do { \
+ { uint32_t log_verbose = (phba)->pport ? \
+ (phba)->pport->cfg_log_verbose : \
+ (phba)->cfg_log_verbose; \
+ if (((mask) & log_verbose) || (level[1] <= '4')) \
+ dev_printk(level, &((phba)->pcidev)->dev, "%d:" \
+ fmt, phba->brd_no, ##arg); \
+ } \
+} while (0)
+
#define lpfc_printf_vlog(vport, level, mask, fmt, arg...) \
do { \
{ if (((mask) & (vport)->cfg_log_verbose) || (level[1] <= '3')) \
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index 8abe933bad09..d1773c01d2b3 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -515,6 +515,7 @@ lpfc_init_link(struct lpfc_hba * phba,
if ((phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC ||
phba->pcidev->device == PCI_DEVICE_ID_LANCER_G7_FC) &&
+ !(phba->sli4_hba.pc_sli4_params.pls) &&
mb->un.varInitLnk.link_flags & FLAGS_TOPOLOGY_MODE_LOOP) {
mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_PT_PT;
phba->cfg_topology = FLAGS_TOPOLOGY_MODE_PT_PT;
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index ae09bb863497..7082279e4c01 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -230,9 +230,6 @@ lpfc_mem_free(struct lpfc_hba *phba)
dma_pool_destroy(phba->lpfc_hrb_pool);
phba->lpfc_hrb_pool = NULL;
- dma_pool_destroy(phba->txrdy_payload_pool);
- phba->txrdy_payload_pool = NULL;
-
dma_pool_destroy(phba->lpfc_hbq_pool);
phba->lpfc_hbq_pool = NULL;
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index fc6e4546d738..ae4359013846 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -279,6 +279,55 @@ lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
lpfc_cancel_retry_delay_tmo(phba->pport, ndlp);
}
+/* lpfc_defer_pt2pt_acc - Complete SLI3 pt2pt processing on link up
+ * @phba: pointer to lpfc hba data structure.
+ * @link_mbox: pointer to CONFIG_LINK mailbox object
+ *
+ * This routine is only called if we are SLI3, direct connect pt2pt
+ * mode and the remote NPort issues the PLOGI after link up.
+ */
+static void
+lpfc_defer_pt2pt_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *link_mbox)
+{
+ LPFC_MBOXQ_t *login_mbox;
+ MAILBOX_t *mb = &link_mbox->u.mb;
+ struct lpfc_iocbq *save_iocb;
+ struct lpfc_nodelist *ndlp;
+ int rc;
+
+ ndlp = link_mbox->ctx_ndlp;
+ login_mbox = link_mbox->context3;
+ save_iocb = login_mbox->context3;
+ link_mbox->context3 = NULL;
+ login_mbox->context3 = NULL;
+
+ /* Check for CONFIG_LINK error */
+ if (mb->mbxStatus) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
+ "4575 CONFIG_LINK fails pt2pt discovery: %x\n",
+ mb->mbxStatus);
+ mempool_free(login_mbox, phba->mbox_mem_pool);
+ mempool_free(link_mbox, phba->mbox_mem_pool);
+ lpfc_sli_release_iocbq(phba, save_iocb);
+ return;
+ }
+
+ /* Now that CONFIG_LINK completed, and our SID is configured,
+ * we can now proceed with sending the PLOGI ACC.
+ */
+ rc = lpfc_els_rsp_acc(link_mbox->vport, ELS_CMD_PLOGI,
+ save_iocb, ndlp, login_mbox);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
+ "4576 PLOGI ACC fails pt2pt discovery: %x\n",
+ rc);
+ mempool_free(login_mbox, phba->mbox_mem_pool);
+ }
+
+ mempool_free(link_mbox, phba->mbox_mem_pool);
+ lpfc_sli_release_iocbq(phba, save_iocb);
+}
+
static int
lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
struct lpfc_iocbq *cmdiocb)
@@ -291,10 +340,12 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
IOCB_t *icmd;
struct serv_parm *sp;
uint32_t ed_tov;
- LPFC_MBOXQ_t *mbox;
+ LPFC_MBOXQ_t *link_mbox;
+ LPFC_MBOXQ_t *login_mbox;
+ struct lpfc_iocbq *save_iocb;
struct ls_rjt stat;
uint32_t vid, flag;
- int rc;
+ int rc, defer_acc;
memset(&stat, 0, sizeof (struct ls_rjt));
pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
@@ -343,6 +394,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
else
ndlp->nlp_fcp_info |= CLASS3;
+ defer_acc = 0;
ndlp->nlp_class_sup = 0;
if (sp->cls1.classValid)
ndlp->nlp_class_sup |= FC_COS_CLASS1;
@@ -354,7 +406,6 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ndlp->nlp_class_sup |= FC_COS_CLASS4;
ndlp->nlp_maxframe =
((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb;
-
/* if already logged in, do implicit logout */
switch (ndlp->nlp_state) {
case NLP_STE_NPR_NODE:
@@ -396,6 +447,10 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
ndlp->nlp_flag &= ~NLP_FIRSTBURST;
+ login_mbox = NULL;
+ link_mbox = NULL;
+ save_iocb = NULL;
+
/* Check for Nport to NPort pt2pt protocol */
if ((vport->fc_flag & FC_PT2PT) &&
!(vport->fc_flag & FC_PT2PT_PLOGI)) {
@@ -423,17 +478,22 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (phba->sli_rev == LPFC_SLI_REV4)
lpfc_issue_reg_vfi(vport);
else {
- mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
- if (mbox == NULL)
+ defer_acc = 1;
+ link_mbox = mempool_alloc(phba->mbox_mem_pool,
+ GFP_KERNEL);
+ if (!link_mbox)
goto out;
- lpfc_config_link(phba, mbox);
- mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
- mbox->vport = vport;
- rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
- if (rc == MBX_NOT_FINISHED) {
- mempool_free(mbox, phba->mbox_mem_pool);
+ lpfc_config_link(phba, link_mbox);
+ link_mbox->mbox_cmpl = lpfc_defer_pt2pt_acc;
+ link_mbox->vport = vport;
+ link_mbox->ctx_ndlp = ndlp;
+
+ save_iocb = lpfc_sli_get_iocbq(phba);
+ if (!save_iocb)
goto out;
- }
+ /* Save info from cmd IOCB used in rsp */
+ memcpy((uint8_t *)save_iocb, (uint8_t *)cmdiocb,
+ sizeof(struct lpfc_iocbq));
}
lpfc_can_disctmo(vport);
@@ -448,8 +508,8 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ndlp->nlp_flag |= NLP_SUPPRESS_RSP;
}
- mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
- if (!mbox)
+ login_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!login_mbox)
goto out;
/* Registering an existing RPI behaves differently for SLI3 vs SLI4 */
@@ -457,21 +517,19 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
lpfc_unreg_rpi(vport, ndlp);
rc = lpfc_reg_rpi(phba, vport->vpi, icmd->un.rcvels.remoteID,
- (uint8_t *) sp, mbox, ndlp->nlp_rpi);
- if (rc) {
- mempool_free(mbox, phba->mbox_mem_pool);
+ (uint8_t *)sp, login_mbox, ndlp->nlp_rpi);
+ if (rc)
goto out;
- }
/* ACC PLOGI rsp command needs to execute first,
- * queue this mbox command to be processed later.
+ * queue this login_mbox command to be processed later.
*/
- mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
+ login_mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
/*
- * mbox->ctx_ndlp = lpfc_nlp_get(ndlp) deferred until mailbox
+ * login_mbox->ctx_ndlp = lpfc_nlp_get(ndlp) deferred until mailbox
* command issued in lpfc_cmpl_els_acc().
*/
- mbox->vport = vport;
+ login_mbox->vport = vport;
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI);
spin_unlock_irq(shost->host_lock);
@@ -484,8 +542,10 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
* single discovery thread, this will cause a huge delay in
* discovery. Also this will cause multiple state machines
* running in parallel for this node.
+ * This only applies to a fabric environment.
*/
- if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE) {
+ if ((ndlp->nlp_state == NLP_STE_PLOGI_ISSUE) &&
+ (vport->fc_flag & FC_FABRIC)) {
/* software abort outstanding PLOGI */
lpfc_els_abort(phba, ndlp);
}
@@ -504,16 +564,47 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
stat.un.b.lsRjtRsnCode = LSRJT_INVALID_CMD;
stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
rc = lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb,
- ndlp, mbox);
+ ndlp, login_mbox);
if (rc)
- mempool_free(mbox, phba->mbox_mem_pool);
+ mempool_free(login_mbox, phba->mbox_mem_pool);
+ return 1;
+ }
+ if (defer_acc) {
+ /* So the order here should be:
+ * Issue CONFIG_LINK mbox
+ * CONFIG_LINK cmpl
+ * Issue PLOGI ACC
+ * PLOGI ACC cmpl
+ * Issue REG_LOGIN mbox
+ */
+
+ /* Save the REG_LOGIN mbox for and rcv IOCB copy later */
+ link_mbox->context3 = login_mbox;
+ login_mbox->context3 = save_iocb;
+
+ /* Start the ball rolling by issuing CONFIG_LINK here */
+ rc = lpfc_sli_issue_mbox(phba, link_mbox, MBX_NOWAIT);
+ if (rc == MBX_NOT_FINISHED)
+ goto out;
return 1;
}
- rc = lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, mbox);
+
+ rc = lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, login_mbox);
if (rc)
- mempool_free(mbox, phba->mbox_mem_pool);
+ mempool_free(login_mbox, phba->mbox_mem_pool);
return 1;
out:
+ if (defer_acc)
+ lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
+ "4577 pt2pt discovery failure: %p %p %p\n",
+ save_iocb, link_mbox, login_mbox);
+ if (save_iocb)
+ lpfc_sli_release_iocbq(phba, save_iocb);
+ if (link_mbox)
+ mempool_free(link_mbox, phba->mbox_mem_pool);
+ if (login_mbox)
+ mempool_free(login_mbox, phba->mbox_mem_pool);
+
stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
stat.un.b.lsRjtRsnCodeExp = LSEXP_OUT_OF_RESOURCE;
lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
@@ -2030,7 +2121,9 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (bf_get_be32(prli_init, nvpr))
ndlp->nlp_type |= NLP_NVME_INITIATOR;
- if (phba->nsler && bf_get_be32(prli_nsler, nvpr))
+ if (phba->nsler && bf_get_be32(prli_nsler, nvpr) &&
+ bf_get_be32(prli_conf, nvpr))
+
ndlp->nlp_nvme_info |= NLP_NVME_NSLER;
else
ndlp->nlp_nvme_info &= ~NLP_NVME_NSLER;
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index a227e36cbdc2..db4a04a207ec 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -196,6 +196,46 @@ lpfc_nvme_cmd_template(void)
}
/**
+ * lpfc_nvme_prep_abort_wqe - set up 'abort' work queue entry.
+ * @pwqeq: Pointer to command iocb.
+ * @xritag: Tag that uniqely identifies the local exchange resource.
+ * @opt: Option bits -
+ * bit 0 = inhibit sending abts on the link
+ *
+ * This function is called with hbalock held.
+ **/
+void
+lpfc_nvme_prep_abort_wqe(struct lpfc_iocbq *pwqeq, u16 xritag, u8 opt)
+{
+ union lpfc_wqe128 *wqe = &pwqeq->wqe;
+
+ /* WQEs are reused. Clear stale data and set key fields to
+ * zero like ia, iaab, iaar, xri_tag, and ctxt_tag.
+ */
+ memset(wqe, 0, sizeof(*wqe));
+
+ if (opt & INHIBIT_ABORT)
+ bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
+ /* Abort specified xri tag, with the mask deliberately zeroed */
+ bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
+
+ bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
+
+ /* Abort the IO associated with this outstanding exchange ID. */
+ wqe->abort_cmd.wqe_com.abort_tag = xritag;
+
+ /* iotag for the wqe completion. */
+ bf_set(wqe_reqtag, &wqe->abort_cmd.wqe_com, pwqeq->iotag);
+
+ bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1);
+ bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
+
+ bf_set(wqe_cmd_type, &wqe->abort_cmd.wqe_com, OTHER_COMMAND);
+ bf_set(wqe_wqec, &wqe->abort_cmd.wqe_com, 1);
+ bf_set(wqe_cqid, &wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
+}
+
+/**
* lpfc_nvme_create_queue -
* @lpfc_pnvme: Pointer to the driver's nvme instance data
* @qidx: An cpu index used to affinitize IO queues and MSIX vectors.
@@ -1791,7 +1831,6 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
struct lpfc_iocbq *abts_buf;
struct lpfc_iocbq *nvmereq_wqe;
struct lpfc_nvme_fcpreq_priv *freqpriv;
- union lpfc_wqe128 *abts_wqe;
unsigned long flags;
int ret_val;
@@ -1912,37 +1951,7 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
/* Ready - mark outstanding as aborted by driver. */
nvmereq_wqe->iocb_flag |= LPFC_DRIVER_ABORTED;
- /* Complete prepping the abort wqe and issue to the FW. */
- abts_wqe = &abts_buf->wqe;
-
- /* WQEs are reused. Clear stale data and set key fields to
- * zero like ia, iaab, iaar, xri_tag, and ctxt_tag.
- */
- memset(abts_wqe, 0, sizeof(*abts_wqe));
- bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG);
-
- /* word 7 */
- bf_set(wqe_cmnd, &abts_wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
- bf_set(wqe_class, &abts_wqe->abort_cmd.wqe_com,
- nvmereq_wqe->iocb.ulpClass);
-
- /* word 8 - tell the FW to abort the IO associated with this
- * outstanding exchange ID.
- */
- abts_wqe->abort_cmd.wqe_com.abort_tag = nvmereq_wqe->sli4_xritag;
-
- /* word 9 - this is the iotag for the abts_wqe completion. */
- bf_set(wqe_reqtag, &abts_wqe->abort_cmd.wqe_com,
- abts_buf->iotag);
-
- /* word 10 */
- bf_set(wqe_qosd, &abts_wqe->abort_cmd.wqe_com, 1);
- bf_set(wqe_lenloc, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
-
- /* word 11 */
- bf_set(wqe_cmd_type, &abts_wqe->abort_cmd.wqe_com, OTHER_COMMAND);
- bf_set(wqe_wqec, &abts_wqe->abort_cmd.wqe_com, 1);
- bf_set(wqe_cqid, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
+ lpfc_nvme_prep_abort_wqe(abts_buf, nvmereq_wqe->sli4_xritag, 0);
/* ABTS WQE must go to the same WQ as the WQE to be aborted */
abts_buf->iocb_flag |= LPFC_IO_NVME;
@@ -2084,7 +2093,7 @@ lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd)
lpfc_ncmd->flags &= ~LPFC_SBUF_BUMP_QDEPTH;
qp = lpfc_ncmd->hdwq;
- if (lpfc_ncmd->flags & LPFC_SBUF_XBUSY) {
+ if (unlikely(lpfc_ncmd->flags & LPFC_SBUF_XBUSY)) {
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
"6310 XB release deferred for "
"ox_id x%x on reqtag x%x\n",
@@ -2139,12 +2148,10 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
*/
lpfc_nvme_template.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1;
- /* Advertise how many hw queues we support based on fcp_io_sched */
- if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ)
- lpfc_nvme_template.max_hw_queues = phba->cfg_hdw_queue;
- else
- lpfc_nvme_template.max_hw_queues =
- phba->sli4_hba.num_present_cpu;
+ /* Advertise how many hw queues we support based on cfg_hdw_queue,
+ * which will not exceed cpu count.
+ */
+ lpfc_nvme_template.max_hw_queues = phba->cfg_hdw_queue;
if (!IS_ENABLED(CONFIG_NVME_FC))
return ret;
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
index 9884228800a5..9dc9afe1c255 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.c
+++ b/drivers/scsi/lpfc/lpfc_nvmet.c
@@ -378,13 +378,6 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
int cpu;
unsigned long iflag;
- if (ctxp->txrdy) {
- dma_pool_free(phba->txrdy_payload_pool, ctxp->txrdy,
- ctxp->txrdy_phys);
- ctxp->txrdy = NULL;
- ctxp->txrdy_phys = 0;
- }
-
if (ctxp->state == LPFC_NVMET_STE_FREE) {
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
"6411 NVMET free, already free IO x%x: %d %d\n",
@@ -430,7 +423,6 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
ctxp = (struct lpfc_nvmet_rcv_ctx *)ctx_buf->context;
ctxp->wqeq = NULL;
- ctxp->txrdy = NULL;
ctxp->offset = 0;
ctxp->phba = phba;
ctxp->size = size;
@@ -1958,12 +1950,10 @@ lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
uint32_t *payload;
uint32_t size, oxid, sid, rc;
- fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
- oxid = be16_to_cpu(fc_hdr->fh_ox_id);
- if (!phba->targetport) {
+ if (!nvmebuf || !phba->targetport) {
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
- "6154 LS Drop IO x%x\n", oxid);
+ "6154 LS Drop IO\n");
oxid = 0;
size = 0;
sid = 0;
@@ -1971,6 +1961,9 @@ lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
goto dropit;
}
+ fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
+ oxid = be16_to_cpu(fc_hdr->fh_ox_id);
+
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
payload = (uint32_t *)(nvmebuf->dbuf.virt);
size = bf_get(lpfc_rcqe_length, &nvmebuf->cq_event.cqe.rcqe_cmpl);
@@ -2326,7 +2319,6 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
ctxp->state, ctxp->entry_cnt, ctxp->oxid);
}
ctxp->wqeq = NULL;
- ctxp->txrdy = NULL;
ctxp->offset = 0;
ctxp->phba = phba;
ctxp->size = size;
@@ -2401,6 +2393,11 @@ lpfc_nvmet_unsol_ls_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
d_buf = piocb->context2;
nvmebuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
+ if (!nvmebuf) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+ "3015 LS Drop IO\n");
+ return;
+ }
if (phba->nvmet_support == 0) {
lpfc_in_buf_free(phba, &nvmebuf->dbuf);
return;
@@ -2429,6 +2426,11 @@ lpfc_nvmet_unsol_fcp_event(struct lpfc_hba *phba,
uint64_t isr_timestamp,
uint8_t cqflag)
{
+ if (!nvmebuf) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+ "3167 NVMET FCP Drop IO\n");
+ return;
+ }
if (phba->nvmet_support == 0) {
lpfc_rq_buf_free(phba, &nvmebuf->hbuf);
return;
@@ -2595,7 +2597,6 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
struct scatterlist *sgel;
union lpfc_wqe128 *wqe;
struct ulp_bde64 *bde;
- uint32_t *txrdy;
dma_addr_t physaddr;
int i, cnt;
int do_pbde;
@@ -2757,23 +2758,11 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
&lpfc_treceive_cmd_template.words[3],
sizeof(uint32_t) * 9);
- /* Words 0 - 2 : The first sg segment */
- txrdy = dma_pool_alloc(phba->txrdy_payload_pool,
- GFP_KERNEL, &physaddr);
- if (!txrdy) {
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
- "6041 Bad txrdy buffer: oxid x%x\n",
- ctxp->oxid);
- return NULL;
- }
- ctxp->txrdy = txrdy;
- ctxp->txrdy_phys = physaddr;
- wqe->fcp_treceive.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
- wqe->fcp_treceive.bde.tus.f.bdeSize = TXRDY_PAYLOAD_LEN;
- wqe->fcp_treceive.bde.addrLow =
- cpu_to_le32(putPaddrLow(physaddr));
- wqe->fcp_treceive.bde.addrHigh =
- cpu_to_le32(putPaddrHigh(physaddr));
+ /* Words 0 - 2 : First SGE is skipped, set invalid BDE type */
+ wqe->fcp_treceive.bde.tus.f.bdeFlags = LPFC_SGE_TYPE_SKIP;
+ wqe->fcp_treceive.bde.tus.f.bdeSize = 0;
+ wqe->fcp_treceive.bde.addrLow = 0;
+ wqe->fcp_treceive.bde.addrHigh = 0;
/* Word 4 */
wqe->fcp_treceive.relative_offset = ctxp->offset;
@@ -2808,17 +2797,13 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
/* Word 12 */
wqe->fcp_tsend.fcp_data_len = rsp->transfer_length;
- /* Setup 1 TXRDY and 1 SKIP SGE */
- txrdy[0] = 0;
- txrdy[1] = cpu_to_be32(rsp->transfer_length);
- txrdy[2] = 0;
-
- sgl->addr_hi = putPaddrHigh(physaddr);
- sgl->addr_lo = putPaddrLow(physaddr);
+ /* Setup 2 SKIP SGEs */
+ sgl->addr_hi = 0;
+ sgl->addr_lo = 0;
sgl->word2 = 0;
- bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
+ bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
sgl->word2 = cpu_to_le32(sgl->word2);
- sgl->sge_len = cpu_to_le32(TXRDY_PAYLOAD_LEN);
+ sgl->sge_len = 0;
sgl++;
sgl->addr_hi = 0;
sgl->addr_lo = 0;
@@ -3239,9 +3224,9 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
{
struct lpfc_nvmet_tgtport *tgtp;
struct lpfc_iocbq *abts_wqeq;
- union lpfc_wqe128 *abts_wqe;
struct lpfc_nodelist *ndlp;
unsigned long flags;
+ u8 opt;
int rc;
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
@@ -3280,8 +3265,8 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
return 0;
}
abts_wqeq = ctxp->abort_wqeq;
- abts_wqe = &abts_wqeq->wqe;
ctxp->state = LPFC_NVMET_STE_ABORT;
+ opt = (ctxp->flag & LPFC_NVMET_ABTS_RCV) ? INHIBIT_ABORT : 0;
spin_unlock_irqrestore(&ctxp->ctxlock, flags);
/* Announce entry to new IO submit field. */
@@ -3327,40 +3312,12 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
/* Ready - mark outstanding as aborted by driver. */
abts_wqeq->iocb_flag |= LPFC_DRIVER_ABORTED;
- /* WQEs are reused. Clear stale data and set key fields to
- * zero like ia, iaab, iaar, xri_tag, and ctxt_tag.
- */
- memset(abts_wqe, 0, sizeof(*abts_wqe));
-
- /* word 3 */
- bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG);
-
- /* word 7 */
- bf_set(wqe_ct, &abts_wqe->abort_cmd.wqe_com, 0);
- bf_set(wqe_cmnd, &abts_wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
-
- /* word 8 - tell the FW to abort the IO associated with this
- * outstanding exchange ID.
- */
- abts_wqe->abort_cmd.wqe_com.abort_tag = ctxp->wqeq->sli4_xritag;
-
- /* word 9 - this is the iotag for the abts_wqe completion. */
- bf_set(wqe_reqtag, &abts_wqe->abort_cmd.wqe_com,
- abts_wqeq->iotag);
-
- /* word 10 */
- bf_set(wqe_qosd, &abts_wqe->abort_cmd.wqe_com, 1);
- bf_set(wqe_lenloc, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
-
- /* word 11 */
- bf_set(wqe_cmd_type, &abts_wqe->abort_cmd.wqe_com, OTHER_COMMAND);
- bf_set(wqe_wqec, &abts_wqe->abort_cmd.wqe_com, 1);
- bf_set(wqe_cqid, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
+ lpfc_nvme_prep_abort_wqe(abts_wqeq, ctxp->wqeq->sli4_xritag, opt);
/* ABTS WQE must go to the same WQ as the WQE to be aborted */
abts_wqeq->hba_wqidx = ctxp->wqeq->hba_wqidx;
abts_wqeq->wqe_cmpl = lpfc_nvmet_sol_fcp_abort_cmp;
- abts_wqeq->iocb_cmpl = 0;
+ abts_wqeq->iocb_cmpl = NULL;
abts_wqeq->iocb_flag |= LPFC_IO_NVME;
abts_wqeq->context2 = ctxp;
abts_wqeq->vport = phba->pport;
@@ -3495,7 +3452,7 @@ lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *phba,
spin_lock_irqsave(&phba->hbalock, flags);
abts_wqeq->wqe_cmpl = lpfc_nvmet_xmt_ls_abort_cmp;
- abts_wqeq->iocb_cmpl = 0;
+ abts_wqeq->iocb_cmpl = NULL;
abts_wqeq->iocb_flag |= LPFC_IO_NVME_LS;
rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq);
spin_unlock_irqrestore(&phba->hbalock, flags);
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.h b/drivers/scsi/lpfc/lpfc_nvmet.h
index 8ff67deac10a..b80b1639b9a7 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.h
+++ b/drivers/scsi/lpfc/lpfc_nvmet.h
@@ -112,9 +112,7 @@ struct lpfc_nvmet_rcv_ctx {
struct lpfc_hba *phba;
struct lpfc_iocbq *wqeq;
struct lpfc_iocbq *abort_wqeq;
- dma_addr_t txrdy_phys;
spinlock_t ctxlock; /* protect flag access */
- uint32_t *txrdy;
uint32_t sid;
uint32_t offset;
uint16_t oxid;
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 6822cd9ff8f1..b138d9fee675 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -134,21 +134,21 @@ lpfc_sli4_set_rsp_sgl_last(struct lpfc_hba *phba,
/**
* lpfc_update_stats - Update statistical data for the command completion
- * @phba: Pointer to HBA object.
+ * @vport: The virtual port on which this call is executing.
* @lpfc_cmd: lpfc scsi command object pointer.
*
* This function is called when there is a command completion and this
* function updates the statistical data for the command completion.
**/
static void
-lpfc_update_stats(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
+lpfc_update_stats(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd)
{
+ struct lpfc_hba *phba = vport->phba;
struct lpfc_rport_data *rdata;
struct lpfc_nodelist *pnode;
struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
unsigned long flags;
- struct Scsi_Host *shost = cmd->device->host;
- struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
unsigned long latency;
int i;
@@ -526,7 +526,7 @@ lpfc_sli4_io_xri_aborted(struct lpfc_hba *phba,
&qp->lpfc_abts_io_buf_list, list) {
if (psb->cur_iocbq.sli4_xritag == xri) {
list_del_init(&psb->list);
- psb->exch_busy = 0;
+ psb->flags &= ~LPFC_SBUF_XBUSY;
psb->status = IOSTAT_SUCCESS;
if (psb->cur_iocbq.iocb_flag == LPFC_IO_NVME) {
qp->abts_nvme_io_bufs--;
@@ -566,7 +566,7 @@ lpfc_sli4_io_xri_aborted(struct lpfc_hba *phba,
if (iocbq->sli4_xritag != xri)
continue;
psb = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
- psb->exch_busy = 0;
+ psb->flags &= ~LPFC_SBUF_XBUSY;
spin_unlock_irqrestore(&phba->hbalock, iflag);
if (!list_empty(&pring->txq))
lpfc_worker_wake_up(phba);
@@ -786,7 +786,7 @@ lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
psb->prot_seg_cnt = 0;
qp = psb->hdwq;
- if (psb->exch_busy) {
+ if (psb->flags & LPFC_SBUF_XBUSY) {
spin_lock_irqsave(&qp->abts_io_buf_list_lock, iflag);
psb->pCmd = NULL;
list_add_tail(&psb->list, &qp->lpfc_abts_io_buf_list);
@@ -3812,7 +3812,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
/* Sanity check on return of outstanding command */
cmd = lpfc_cmd->pCmd;
- if (!cmd) {
+ if (!cmd || !phba) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
"2621 IO completion: Not an active IO\n");
spin_unlock(&lpfc_cmd->buf_lock);
@@ -3824,7 +3824,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
phba->sli4_hba.hdwq[idx].scsi_cstat.io_cmpls++;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
- if (phba->cpucheck_on & LPFC_CHECK_SCSI_IO) {
+ if (unlikely(phba->cpucheck_on & LPFC_CHECK_SCSI_IO)) {
cpu = raw_smp_processor_id();
if (cpu < LPFC_CHECK_CPU_CNT && phba->sli4_hba.hdwq)
phba->sli4_hba.hdwq[idx].cpucheck_cmpl_io[cpu]++;
@@ -3835,7 +3835,10 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
lpfc_cmd->result = (pIocbOut->iocb.un.ulpWord[4] & IOERR_PARAM_MASK);
lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
/* pick up SLI4 exhange busy status from HBA */
- lpfc_cmd->exch_busy = pIocbOut->iocb_flag & LPFC_EXCHANGE_BUSY;
+ if (pIocbOut->iocb_flag & LPFC_EXCHANGE_BUSY)
+ lpfc_cmd->flags |= LPFC_SBUF_XBUSY;
+ else
+ lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
if (lpfc_cmd->prot_data_type) {
@@ -3869,7 +3872,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
}
#endif
- if (lpfc_cmd->status) {
+ if (unlikely(lpfc_cmd->status)) {
if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
(lpfc_cmd->result & IOERR_DRVR_MASK))
lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
@@ -4002,7 +4005,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
scsi_get_resid(cmd));
}
- lpfc_update_stats(phba, lpfc_cmd);
+ lpfc_update_stats(vport, lpfc_cmd);
if (vport->cfg_max_scsicmpl_time &&
time_after(jiffies, lpfc_cmd->start_time +
msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
@@ -4610,17 +4613,18 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
}
- if (err == 2) {
- cmnd->result = DID_ERROR << 16;
- goto out_fail_command_release_buf;
- } else if (err) {
+ if (unlikely(err)) {
+ if (err == 2) {
+ cmnd->result = DID_ERROR << 16;
+ goto out_fail_command_release_buf;
+ }
goto out_host_busy_free_buf;
}
lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
- if (phba->cpucheck_on & LPFC_CHECK_SCSI_IO) {
+ if (unlikely(phba->cpucheck_on & LPFC_CHECK_SCSI_IO)) {
cpu = raw_smp_processor_id();
if (cpu < LPFC_CHECK_CPU_CNT) {
struct lpfc_sli4_hdw_queue *hdwq =
@@ -4843,20 +4847,21 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
ret_val = __lpfc_sli_issue_iocb(phba, LPFC_FCP_RING,
abtsiocb, 0);
}
- /* no longer need the lock after this point */
- spin_unlock_irqrestore(&phba->hbalock, flags);
if (ret_val == IOCB_ERROR) {
/* Indicate the IO is not being aborted by the driver. */
iocb->iocb_flag &= ~LPFC_DRIVER_ABORTED;
lpfc_cmd->waitq = NULL;
spin_unlock(&lpfc_cmd->buf_lock);
+ spin_unlock_irqrestore(&phba->hbalock, flags);
lpfc_sli_release_iocbq(phba, abtsiocb);
ret = FAILED;
goto out;
}
+ /* no longer need the lock after this point */
spin_unlock(&lpfc_cmd->buf_lock);
+ spin_unlock_irqrestore(&phba->hbalock, flags);
if (phba->cfg_poll & DISABLE_FCP_RING_INT)
lpfc_sli_handle_fast_ring_event(phba,
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 614f78dddafe..c82b5792da98 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -87,6 +87,10 @@ static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba,
struct lpfc_eqe *eqe);
static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba);
static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba);
+static struct lpfc_cqe *lpfc_sli4_cq_get(struct lpfc_queue *q);
+static void __lpfc_sli4_consume_cqe(struct lpfc_hba *phba,
+ struct lpfc_queue *cq,
+ struct lpfc_cqe *cqe);
static IOCB_t *
lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
@@ -467,25 +471,52 @@ __lpfc_sli4_consume_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq,
}
static void
-lpfc_sli4_eq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
+lpfc_sli4_eqcq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
{
- struct lpfc_eqe *eqe;
- uint32_t count = 0;
+ struct lpfc_eqe *eqe = NULL;
+ u32 eq_count = 0, cq_count = 0;
+ struct lpfc_cqe *cqe = NULL;
+ struct lpfc_queue *cq = NULL, *childq = NULL;
+ int cqid = 0;
/* walk all the EQ entries and drop on the floor */
eqe = lpfc_sli4_eq_get(eq);
while (eqe) {
+ /* Get the reference to the corresponding CQ */
+ cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
+ cq = NULL;
+
+ list_for_each_entry(childq, &eq->child_list, list) {
+ if (childq->queue_id == cqid) {
+ cq = childq;
+ break;
+ }
+ }
+ /* If CQ is valid, iterate through it and drop all the CQEs */
+ if (cq) {
+ cqe = lpfc_sli4_cq_get(cq);
+ while (cqe) {
+ __lpfc_sli4_consume_cqe(phba, cq, cqe);
+ cq_count++;
+ cqe = lpfc_sli4_cq_get(cq);
+ }
+ /* Clear and re-arm the CQ */
+ phba->sli4_hba.sli4_write_cq_db(phba, cq, cq_count,
+ LPFC_QUEUE_REARM);
+ cq_count = 0;
+ }
__lpfc_sli4_consume_eqe(phba, eq, eqe);
- count++;
+ eq_count++;
eqe = lpfc_sli4_eq_get(eq);
}
/* Clear and re-arm the EQ */
- phba->sli4_hba.sli4_write_eq_db(phba, eq, count, LPFC_QUEUE_REARM);
+ phba->sli4_hba.sli4_write_eq_db(phba, eq, eq_count, LPFC_QUEUE_REARM);
}
static int
-lpfc_sli4_process_eq(struct lpfc_hba *phba, struct lpfc_queue *eq)
+lpfc_sli4_process_eq(struct lpfc_hba *phba, struct lpfc_queue *eq,
+ uint8_t rearm)
{
struct lpfc_eqe *eqe;
int count = 0, consumed = 0;
@@ -519,8 +550,8 @@ lpfc_sli4_process_eq(struct lpfc_hba *phba, struct lpfc_queue *eq)
eq->queue_claimed = 0;
rearm_and_exit:
- /* Always clear and re-arm the EQ */
- phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed, LPFC_QUEUE_REARM);
+ /* Always clear the EQ. */
+ phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed, rearm);
return count;
}
@@ -2526,6 +2557,8 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
} else {
__lpfc_sli_rpi_release(vport, ndlp);
}
+ if (vport->load_flag & FC_UNLOADING)
+ lpfc_nlp_put(ndlp);
pmb->ctx_ndlp = NULL;
}
}
@@ -2672,7 +2705,8 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
"(%d):0323 Unknown Mailbox command "
"x%x (x%x/x%x) Cmpl\n",
- pmb->vport ? pmb->vport->vpi : 0,
+ pmb->vport ? pmb->vport->vpi :
+ LPFC_VPORT_UNKNOWN,
pmbox->mbxCommand,
lpfc_sli_config_mbox_subsys_get(phba,
pmb),
@@ -2693,7 +2727,8 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
"(%d):0305 Mbox cmd cmpl "
"error - RETRYing Data: x%x "
"(x%x/x%x) x%x x%x x%x\n",
- pmb->vport ? pmb->vport->vpi : 0,
+ pmb->vport ? pmb->vport->vpi :
+ LPFC_VPORT_UNKNOWN,
pmbox->mbxCommand,
lpfc_sli_config_mbox_subsys_get(phba,
pmb),
@@ -2701,7 +2736,8 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
pmb),
pmbox->mbxStatus,
pmbox->un.varWords[0],
- pmb->vport->port_state);
+ pmb->vport ? pmb->vport->port_state :
+ LPFC_VPORT_UNKNOWN);
pmbox->mbxStatus = 0;
pmbox->mbxOwner = OWN_HOST;
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
@@ -6167,6 +6203,14 @@ lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox,
mbox->u.mqe.un.set_feature.feature = LPFC_SET_MDS_DIAGS;
mbox->u.mqe.un.set_feature.param_len = 8;
break;
+ case LPFC_SET_DUAL_DUMP:
+ bf_set(lpfc_mbx_set_feature_dd,
+ &mbox->u.mqe.un.set_feature, LPFC_ENABLE_DUAL_DUMP);
+ bf_set(lpfc_mbx_set_feature_ddquery,
+ &mbox->u.mqe.un.set_feature, 0);
+ mbox->u.mqe.un.set_feature.feature = LPFC_SET_DUAL_DUMP;
+ mbox->u.mqe.un.set_feature.param_len = 4;
+ break;
}
return;
@@ -6184,11 +6228,16 @@ lpfc_ras_stop_fwlog(struct lpfc_hba *phba)
{
struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
- ras_fwlog->ras_active = false;
+ spin_lock_irq(&phba->hbalock);
+ ras_fwlog->state = INACTIVE;
+ spin_unlock_irq(&phba->hbalock);
/* Disable FW logging to host memory */
writel(LPFC_CTL_PDEV_CTL_DDL_RAS,
phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET);
+
+ /* Wait 10ms for firmware to stop using DMA buffer */
+ usleep_range(10 * 1000, 20 * 1000);
}
/**
@@ -6224,7 +6273,9 @@ lpfc_sli4_ras_dma_free(struct lpfc_hba *phba)
ras_fwlog->lwpd.virt = NULL;
}
- ras_fwlog->ras_active = false;
+ spin_lock_irq(&phba->hbalock);
+ ras_fwlog->state = INACTIVE;
+ spin_unlock_irq(&phba->hbalock);
}
/**
@@ -6326,7 +6377,9 @@ lpfc_sli4_ras_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
goto disable_ras;
}
- ras_fwlog->ras_active = true;
+ spin_lock_irq(&phba->hbalock);
+ ras_fwlog->state = ACTIVE;
+ spin_unlock_irq(&phba->hbalock);
mempool_free(pmb, phba->mbox_mem_pool);
return;
@@ -6358,6 +6411,10 @@ lpfc_sli4_ras_fwlog_init(struct lpfc_hba *phba,
uint32_t len = 0, fwlog_buffsize, fwlog_entry_count;
int rc = 0;
+ spin_lock_irq(&phba->hbalock);
+ ras_fwlog->state = INACTIVE;
+ spin_unlock_irq(&phba->hbalock);
+
fwlog_buffsize = (LPFC_RAS_MIN_BUFF_POST_SIZE *
phba->cfg_ras_fwlog_buffsize);
fwlog_entry_count = (fwlog_buffsize/LPFC_RAS_MAX_ENTRY_SIZE);
@@ -6417,6 +6474,9 @@ lpfc_sli4_ras_fwlog_init(struct lpfc_hba *phba,
mbx_fwlog->u.request.lwpd.addr_lo = putPaddrLow(ras_fwlog->lwpd.phys);
mbx_fwlog->u.request.lwpd.addr_hi = putPaddrHigh(ras_fwlog->lwpd.phys);
+ spin_lock_irq(&phba->hbalock);
+ ras_fwlog->state = REG_INPROGRESS;
+ spin_unlock_irq(&phba->hbalock);
mbox->vport = phba->pport;
mbox->mbox_cmpl = lpfc_sli4_ras_mbox_cmpl;
@@ -7148,7 +7208,7 @@ lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
int
lpfc_sli4_hba_setup(struct lpfc_hba *phba)
{
- int rc, i, cnt, len;
+ int rc, i, cnt, len, dd;
LPFC_MBOXQ_t *mboxq;
struct lpfc_mqe *mqe;
uint8_t *vpd;
@@ -7399,6 +7459,23 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED);
spin_unlock_irq(&phba->hbalock);
+ /* Always try to enable dual dump feature if we can */
+ lpfc_set_features(phba, mboxq, LPFC_SET_DUAL_DUMP);
+ rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+ dd = bf_get(lpfc_mbx_set_feature_dd, &mboxq->u.mqe.un.set_feature);
+ if ((rc == MBX_SUCCESS) && (dd == LPFC_ENABLE_DUAL_DUMP))
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_INIT,
+ "6448 Dual Dump is enabled\n");
+ else
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI | LOG_INIT,
+ "6447 Dual Dump Mailbox x%x (x%x/x%x) failed, "
+ "rc:x%x dd:x%x\n",
+ bf_get(lpfc_mqe_command, &mboxq->u.mqe),
+ lpfc_sli_config_mbox_subsys_get(
+ phba, mboxq),
+ lpfc_sli_config_mbox_opcode_get(
+ phba, mboxq),
+ rc, dd);
/*
* Allocate all resources (xri,rpi,vpi,vfi) now. Subsequent
* calls depends on these resources to complete port setup.
@@ -7523,9 +7600,11 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
}
phba->sli4_hba.nvmet_xri_cnt = rc;
- cnt = phba->cfg_iocb_cnt * 1024;
- /* We need 1 iocbq for every SGL, for IO processing */
- cnt += phba->sli4_hba.nvmet_xri_cnt;
+ /* We allocate an iocbq for every receive context SGL.
+ * The additional allocation is for abort and ls handling.
+ */
+ cnt = phba->sli4_hba.nvmet_xri_cnt +
+ phba->sli4_hba.max_cfg_param.max_xri;
} else {
/* update host common xri-sgl sizes and mappings */
rc = lpfc_sli4_io_sgl_update(phba);
@@ -7547,14 +7626,17 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
rc = -ENODEV;
goto out_destroy_queue;
}
- cnt = phba->cfg_iocb_cnt * 1024;
+ /* Each lpfc_io_buf job structure has an iocbq element.
+ * This cnt provides for abort, els, ct and ls requests.
+ */
+ cnt = phba->sli4_hba.max_cfg_param.max_xri;
}
if (!phba->sli.iocbq_lookup) {
/* Initialize and populate the iocb list per host */
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
- "2821 initialize iocb list %d total %d\n",
- phba->cfg_iocb_cnt, cnt);
+ "2821 initialize iocb list with %d entries\n",
+ cnt);
rc = lpfc_init_iocb_list(phba, cnt);
if (rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -7892,7 +7974,7 @@ lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba)
if (mbox_pending)
/* process and rearm the EQ */
- lpfc_sli4_process_eq(phba, fpeq);
+ lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM);
else
/* Always clear and re-arm the EQ */
sli4_hba->sli4_write_eq_db(phba, fpeq, 0, LPFC_QUEUE_REARM);
@@ -8964,7 +9046,8 @@ lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
* @pring: Pointer to driver SLI ring object.
* @piocb: Pointer to address of newly added command iocb.
*
- * This function is called with hbalock held to add a command
+ * This function is called with hbalock held for SLI3 ports or
+ * the ring lock held for SLI4 ports to add a command
* iocb to the txq when SLI layer cannot submit the command iocb
* to the ring.
**/
@@ -8972,7 +9055,10 @@ void
__lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct lpfc_iocbq *piocb)
{
- lockdep_assert_held(&phba->hbalock);
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ lockdep_assert_held(&pring->ring_lock);
+ else
+ lockdep_assert_held(&phba->hbalock);
/* Insert the caller's iocb in the txq tail for later processing. */
list_add_tail(&piocb->list, &pring->txq);
}
@@ -9863,7 +9949,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
* __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue
* an iocb command to an HBA with SLI-4 interface spec.
*
- * This function is called with hbalock held. The function will return success
+ * This function is called with ringlock held. The function will return success
* after it successfully submit the iocb to firmware or after adding to the
* txq.
**/
@@ -10053,10 +10139,13 @@ lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
struct lpfc_iocbq *piocb, uint32_t flag)
{
struct lpfc_sli_ring *pring;
+ struct lpfc_queue *eq;
unsigned long iflags;
int rc;
if (phba->sli_rev == LPFC_SLI_REV4) {
+ eq = phba->sli4_hba.hdwq[piocb->hba_wqidx].hba_eq;
+
pring = lpfc_sli4_calc_ring(phba, piocb);
if (unlikely(pring == NULL))
return IOCB_ERROR;
@@ -10064,6 +10153,8 @@ lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
spin_lock_irqsave(&pring->ring_lock, iflags);
rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
spin_unlock_irqrestore(&pring->ring_lock, iflags);
+
+ lpfc_sli4_poll_eq(eq, LPFC_POLL_FASTPATH);
} else {
/* For now, SLI2/3 will still use hbalock */
spin_lock_irqsave(&phba->hbalock, iflags);
@@ -10678,14 +10769,14 @@ lpfc_sli_host_down(struct lpfc_vport *vport)
set_bit(LPFC_DATA_READY, &phba->data_flags);
}
prev_pring_flag = pring->flag;
- spin_lock_irq(&pring->ring_lock);
+ spin_lock(&pring->ring_lock);
list_for_each_entry_safe(iocb, next_iocb,
&pring->txq, list) {
if (iocb->vport != vport)
continue;
list_move_tail(&iocb->list, &completions);
}
- spin_unlock_irq(&pring->ring_lock);
+ spin_unlock(&pring->ring_lock);
list_for_each_entry_safe(iocb, next_iocb,
&pring->txcmplq, list) {
if (iocb->vport != vport)
@@ -11050,9 +11141,6 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
irsp->ulpStatus, irsp->un.ulpWord[4]);
spin_unlock_irq(&phba->hbalock);
- if (irsp->ulpStatus == IOSTAT_LOCAL_REJECT &&
- irsp->un.ulpWord[4] == IOERR_SLI_ABORTED)
- lpfc_sli_release_iocbq(phba, abort_iocb);
}
release_iocb:
lpfc_sli_release_iocbq(phba, cmdiocb);
@@ -11736,7 +11824,10 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
!(cmdiocbq->iocb_flag & LPFC_IO_LIBDFC)) {
lpfc_cmd = container_of(cmdiocbq, struct lpfc_io_buf,
cur_iocbq);
- lpfc_cmd->exch_busy = rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY;
+ if (rspiocbq && (rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY))
+ lpfc_cmd->flags |= LPFC_SBUF_XBUSY;
+ else
+ lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY;
}
pdone_q = cmdiocbq->context_un.wait_queue;
@@ -13158,13 +13249,19 @@ send_current_mbox:
phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
/* Setting active mailbox pointer need to be in sync to flag clear */
phba->sli.mbox_active = NULL;
+ if (bf_get(lpfc_trailer_consumed, mcqe))
+ lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
spin_unlock_irqrestore(&phba->hbalock, iflags);
/* Wake up worker thread to post the next pending mailbox command */
lpfc_worker_wake_up(phba);
+ return workposted;
+
out_no_mqe_complete:
+ spin_lock_irqsave(&phba->hbalock, iflags);
if (bf_get(lpfc_trailer_consumed, mcqe))
lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
- return workposted;
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ return false;
}
/**
@@ -13217,7 +13314,6 @@ lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
struct lpfc_sli_ring *pring = cq->pring;
int txq_cnt = 0;
int txcmplq_cnt = 0;
- int fcp_txcmplq_cnt = 0;
/* Check for response status */
if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
@@ -13239,9 +13335,8 @@ lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
txcmplq_cnt++;
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d "
- "fcp_txcmplq_cnt=%d, els_txcmplq_cnt=%d\n",
+ "els_txcmplq_cnt=%d\n",
txq_cnt, phba->iocb_cnt,
- fcp_txcmplq_cnt,
txcmplq_cnt);
return false;
}
@@ -13592,6 +13687,7 @@ __lpfc_sli4_process_cq(struct lpfc_hba *phba, struct lpfc_queue *cq,
phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed,
LPFC_QUEUE_NOARM);
consumed = 0;
+ cq->assoc_qp->q_flag |= HBA_EQ_DELAY_CHK;
}
if (count == LPFC_NVMET_CQ_NOTIFY)
@@ -14220,7 +14316,7 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
spin_lock_irqsave(&phba->hbalock, iflag);
if (phba->link_state < LPFC_LINK_DOWN)
/* Flush, clear interrupt, and rearm the EQ */
- lpfc_sli4_eq_flush(phba, fpeq);
+ lpfc_sli4_eqcq_flush(phba, fpeq);
spin_unlock_irqrestore(&phba->hbalock, iflag);
return IRQ_NONE;
}
@@ -14230,14 +14326,14 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
fpeq->last_cpu = raw_smp_processor_id();
if (icnt > LPFC_EQD_ISR_TRIGGER &&
- phba->cfg_irq_chann == 1 &&
+ fpeq->q_flag & HBA_EQ_DELAY_CHK &&
phba->cfg_auto_imax &&
fpeq->q_mode != LPFC_MAX_AUTO_EQ_DELAY &&
phba->sli.sli_flag & LPFC_SLI_USE_EQDR)
lpfc_sli4_mod_hba_eq_delay(phba, fpeq, LPFC_MAX_AUTO_EQ_DELAY);
/* process and rearm the EQ */
- ecount = lpfc_sli4_process_eq(phba, fpeq);
+ ecount = lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM);
if (unlikely(ecount == 0)) {
fpeq->EQ_no_entry++;
@@ -14297,6 +14393,147 @@ lpfc_sli4_intr_handler(int irq, void *dev_id)
return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE;
} /* lpfc_sli4_intr_handler */
+void lpfc_sli4_poll_hbtimer(struct timer_list *t)
+{
+ struct lpfc_hba *phba = from_timer(phba, t, cpuhp_poll_timer);
+ struct lpfc_queue *eq;
+ int i = 0;
+
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(eq, &phba->poll_list, _poll_list)
+ i += lpfc_sli4_poll_eq(eq, LPFC_POLL_SLOWPATH);
+ if (!list_empty(&phba->poll_list))
+ mod_timer(&phba->cpuhp_poll_timer,
+ jiffies + msecs_to_jiffies(LPFC_POLL_HB));
+
+ rcu_read_unlock();
+}
+
+inline int lpfc_sli4_poll_eq(struct lpfc_queue *eq, uint8_t path)
+{
+ struct lpfc_hba *phba = eq->phba;
+ int i = 0;
+
+ /*
+ * Unlocking an irq is one of the entry point to check
+ * for re-schedule, but we are good for io submission
+ * path as midlayer does a get_cpu to glue us in. Flush
+ * out the invalidate queue so we can see the updated
+ * value for flag.
+ */
+ smp_rmb();
+
+ if (READ_ONCE(eq->mode) == LPFC_EQ_POLL)
+ /* We will not likely get the completion for the caller
+ * during this iteration but i guess that's fine.
+ * Future io's coming on this eq should be able to
+ * pick it up. As for the case of single io's, they
+ * will be handled through a sched from polling timer
+ * function which is currently triggered every 1msec.
+ */
+ i = lpfc_sli4_process_eq(phba, eq, LPFC_QUEUE_NOARM);
+
+ return i;
+}
+
+static inline void lpfc_sli4_add_to_poll_list(struct lpfc_queue *eq)
+{
+ struct lpfc_hba *phba = eq->phba;
+
+ if (list_empty(&phba->poll_list)) {
+ timer_setup(&phba->cpuhp_poll_timer, lpfc_sli4_poll_hbtimer, 0);
+ /* kickstart slowpath processing for this eq */
+ mod_timer(&phba->cpuhp_poll_timer,
+ jiffies + msecs_to_jiffies(LPFC_POLL_HB));
+ }
+
+ list_add_rcu(&eq->_poll_list, &phba->poll_list);
+ synchronize_rcu();
+}
+
+static inline void lpfc_sli4_remove_from_poll_list(struct lpfc_queue *eq)
+{
+ struct lpfc_hba *phba = eq->phba;
+
+ /* Disable slowpath processing for this eq. Kick start the eq
+ * by RE-ARMING the eq's ASAP
+ */
+ list_del_rcu(&eq->_poll_list);
+ synchronize_rcu();
+
+ if (list_empty(&phba->poll_list))
+ del_timer_sync(&phba->cpuhp_poll_timer);
+}
+
+void lpfc_sli4_cleanup_poll_list(struct lpfc_hba *phba)
+{
+ struct lpfc_queue *eq, *next;
+
+ list_for_each_entry_safe(eq, next, &phba->poll_list, _poll_list)
+ list_del(&eq->_poll_list);
+
+ INIT_LIST_HEAD(&phba->poll_list);
+ synchronize_rcu();
+}
+
+static inline void
+__lpfc_sli4_switch_eqmode(struct lpfc_queue *eq, uint8_t mode)
+{
+ if (mode == eq->mode)
+ return;
+ /*
+ * currently this function is only called during a hotplug
+ * event and the cpu on which this function is executing
+ * is going offline. By now the hotplug has instructed
+ * the scheduler to remove this cpu from cpu active mask.
+ * So we don't need to work about being put aside by the
+ * scheduler for a high priority process. Yes, the inte-
+ * rrupts could come but they are known to retire ASAP.
+ */
+
+ /* Disable polling in the fastpath */
+ WRITE_ONCE(eq->mode, mode);
+ /* flush out the store buffer */
+ smp_wmb();
+
+ /*
+ * Add this eq to the polling list and start polling. For
+ * a grace period both interrupt handler and poller will
+ * try to process the eq _but_ that's fine. We have a
+ * synchronization mechanism in place (queue_claimed) to
+ * deal with it. This is just a draining phase for int-
+ * errupt handler (not eq's) as we have guranteed through
+ * barrier that all the CPUs have seen the new CQ_POLLED
+ * state. which will effectively disable the REARMING of
+ * the EQ. The whole idea is eq's die off eventually as
+ * we are not rearming EQ's anymore.
+ */
+ mode ? lpfc_sli4_add_to_poll_list(eq) :
+ lpfc_sli4_remove_from_poll_list(eq);
+}
+
+void lpfc_sli4_start_polling(struct lpfc_queue *eq)
+{
+ __lpfc_sli4_switch_eqmode(eq, LPFC_EQ_POLL);
+}
+
+void lpfc_sli4_stop_polling(struct lpfc_queue *eq)
+{
+ struct lpfc_hba *phba = eq->phba;
+
+ __lpfc_sli4_switch_eqmode(eq, LPFC_EQ_INTERRUPT);
+
+ /* Kick start for the pending io's in h/w.
+ * Once we switch back to interrupt processing on a eq
+ * the io path completion will only arm eq's when it
+ * receives a completion. But since eq's are in disa-
+ * rmed state it doesn't receive a completion. This
+ * creates a deadlock scenaro.
+ */
+ phba->sli4_hba.sli4_write_eq_db(phba, eq, 0, LPFC_QUEUE_REARM);
+}
+
/**
* lpfc_sli4_queue_free - free a queue structure and associated memory
* @queue: The queue structure to free.
@@ -14371,6 +14608,7 @@ lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t page_size,
return NULL;
INIT_LIST_HEAD(&queue->list);
+ INIT_LIST_HEAD(&queue->_poll_list);
INIT_LIST_HEAD(&queue->wq_list);
INIT_LIST_HEAD(&queue->wqfull_list);
INIT_LIST_HEAD(&queue->page_list);
@@ -18124,8 +18362,9 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
phba->sli4_hba.max_cfg_param.rpi_used++;
phba->sli4_hba.rpi_count++;
}
- lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
- "0001 rpi:%x max:%x lim:%x\n",
+ lpfc_printf_log(phba, KERN_INFO,
+ LOG_NODE | LOG_DISCOVERY,
+ "0001 Allocated rpi:x%x max:x%x lim:x%x\n",
(int) rpi, max_rpi, rpi_limit);
/*
@@ -18181,11 +18420,19 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
static void
__lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
{
+ /*
+ * if the rpi value indicates a prior unreg has already
+ * been done, skip the unreg.
+ */
+ if (rpi == LPFC_RPI_ALLOC_ERROR)
+ return;
+
if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) {
phba->sli4_hba.rpi_count--;
phba->sli4_hba.max_cfg_param.rpi_used--;
} else {
- lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ lpfc_printf_log(phba, KERN_INFO,
+ LOG_NODE | LOG_DISCOVERY,
"2016 rpi %x not inuse\n",
rpi);
}
@@ -19683,6 +19930,8 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
spin_unlock_irqrestore(&pring->ring_lock, iflags);
+
+ lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH);
return 0;
}
@@ -19703,6 +19952,8 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
}
lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
spin_unlock_irqrestore(&pring->ring_lock, iflags);
+
+ lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH);
return 0;
}
@@ -19731,6 +19982,8 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
}
lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
spin_unlock_irqrestore(&pring->ring_lock, iflags);
+
+ lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH);
return 0;
}
return WQE_ERROR;
@@ -20093,6 +20346,13 @@ void lpfc_release_io_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd,
lpfc_ncmd->cur_iocbq.wqe_cmpl = NULL;
lpfc_ncmd->cur_iocbq.iocb_cmpl = NULL;
+ if (phba->cfg_xpsgl && !phba->nvmet_support &&
+ !list_empty(&lpfc_ncmd->dma_sgl_xtra_list))
+ lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
+
+ if (!list_empty(&lpfc_ncmd->dma_cmd_rsp_list))
+ lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
+
if (phba->cfg_xri_rebalancing) {
if (lpfc_ncmd->expedite) {
/* Return to expedite pool */
@@ -20157,13 +20417,6 @@ void lpfc_release_io_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd,
spin_unlock_irqrestore(&qp->io_buf_list_put_lock,
iflag);
}
-
- if (phba->cfg_xpsgl && !phba->nvmet_support &&
- !list_empty(&lpfc_ncmd->dma_sgl_xtra_list))
- lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
-
- if (!list_empty(&lpfc_ncmd->dma_cmd_rsp_list))
- lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
}
/**
@@ -20399,8 +20652,9 @@ lpfc_get_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
struct sli4_hybrid_sgl *allocated_sgl = NULL;
struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
struct list_head *buf_list = &hdwq->sgl_list;
+ unsigned long iflags;
- spin_lock_irq(&hdwq->hdwq_lock);
+ spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
if (likely(!list_empty(buf_list))) {
/* break off 1 chunk from the sgl_list */
@@ -20412,9 +20666,9 @@ lpfc_get_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
}
} else {
/* allocate more */
- spin_unlock_irq(&hdwq->hdwq_lock);
+ spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC,
- cpu_to_node(smp_processor_id()));
+ cpu_to_node(hdwq->io_wq->chann));
if (!tmp) {
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
"8353 error kmalloc memory for HDWQ "
@@ -20434,7 +20688,7 @@ lpfc_get_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
return NULL;
}
- spin_lock_irq(&hdwq->hdwq_lock);
+ spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
list_add_tail(&tmp->list_node, &lpfc_buf->dma_sgl_xtra_list);
}
@@ -20442,7 +20696,7 @@ lpfc_get_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
struct sli4_hybrid_sgl,
list_node);
- spin_unlock_irq(&hdwq->hdwq_lock);
+ spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
return allocated_sgl;
}
@@ -20466,8 +20720,9 @@ lpfc_put_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
struct sli4_hybrid_sgl *tmp = NULL;
struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
struct list_head *buf_list = &hdwq->sgl_list;
+ unsigned long iflags;
- spin_lock_irq(&hdwq->hdwq_lock);
+ spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
if (likely(!list_empty(&lpfc_buf->dma_sgl_xtra_list))) {
list_for_each_entry_safe(list_entry, tmp,
@@ -20480,7 +20735,7 @@ lpfc_put_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
rc = -EINVAL;
}
- spin_unlock_irq(&hdwq->hdwq_lock);
+ spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
return rc;
}
@@ -20501,8 +20756,9 @@ lpfc_free_sgl_per_hdwq(struct lpfc_hba *phba,
struct list_head *buf_list = &hdwq->sgl_list;
struct sli4_hybrid_sgl *list_entry = NULL;
struct sli4_hybrid_sgl *tmp = NULL;
+ unsigned long iflags;
- spin_lock_irq(&hdwq->hdwq_lock);
+ spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
/* Free sgl pool */
list_for_each_entry_safe(list_entry, tmp,
@@ -20514,7 +20770,7 @@ lpfc_free_sgl_per_hdwq(struct lpfc_hba *phba,
kfree(list_entry);
}
- spin_unlock_irq(&hdwq->hdwq_lock);
+ spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
}
/**
@@ -20538,8 +20794,9 @@ lpfc_get_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
struct fcp_cmd_rsp_buf *allocated_buf = NULL;
struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
+ unsigned long iflags;
- spin_lock_irq(&hdwq->hdwq_lock);
+ spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
if (likely(!list_empty(buf_list))) {
/* break off 1 chunk from the list */
@@ -20552,9 +20809,9 @@ lpfc_get_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
}
} else {
/* allocate more */
- spin_unlock_irq(&hdwq->hdwq_lock);
+ spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC,
- cpu_to_node(smp_processor_id()));
+ cpu_to_node(hdwq->io_wq->chann));
if (!tmp) {
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
"8355 error kmalloc memory for HDWQ "
@@ -20579,7 +20836,7 @@ lpfc_get_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
tmp->fcp_rsp = (struct fcp_rsp *)((uint8_t *)tmp->fcp_cmnd +
sizeof(struct fcp_cmnd));
- spin_lock_irq(&hdwq->hdwq_lock);
+ spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
list_add_tail(&tmp->list_node, &lpfc_buf->dma_cmd_rsp_list);
}
@@ -20587,7 +20844,7 @@ lpfc_get_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
struct fcp_cmd_rsp_buf,
list_node);
- spin_unlock_irq(&hdwq->hdwq_lock);
+ spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
return allocated_buf;
}
@@ -20612,8 +20869,9 @@ lpfc_put_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
struct fcp_cmd_rsp_buf *tmp = NULL;
struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
+ unsigned long iflags;
- spin_lock_irq(&hdwq->hdwq_lock);
+ spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
if (likely(!list_empty(&lpfc_buf->dma_cmd_rsp_list))) {
list_for_each_entry_safe(list_entry, tmp,
@@ -20626,7 +20884,7 @@ lpfc_put_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
rc = -EINVAL;
}
- spin_unlock_irq(&hdwq->hdwq_lock);
+ spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
return rc;
}
@@ -20647,8 +20905,9 @@ lpfc_free_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
struct fcp_cmd_rsp_buf *list_entry = NULL;
struct fcp_cmd_rsp_buf *tmp = NULL;
+ unsigned long iflags;
- spin_lock_irq(&hdwq->hdwq_lock);
+ spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
/* Free cmd_rsp buf pool */
list_for_each_entry_safe(list_entry, tmp,
@@ -20661,5 +20920,5 @@ lpfc_free_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
kfree(list_entry);
}
- spin_unlock_irq(&hdwq->hdwq_lock);
+ spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
}
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index 37fbcb46387e..7bcf922a8be2 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -384,14 +384,13 @@ struct lpfc_io_buf {
struct lpfc_nodelist *ndlp;
uint32_t timeout;
- uint16_t flags; /* TBD convert exch_busy to flags */
+ uint16_t flags;
#define LPFC_SBUF_XBUSY 0x1 /* SLI4 hba reported XB on WCQE cmpl */
#define LPFC_SBUF_BUMP_QDEPTH 0x2 /* bumped queue depth counter */
/* External DIF device IO conversions */
#define LPFC_SBUF_NORMAL_DIF 0x4 /* normal mode to insert/strip */
#define LPFC_SBUF_PASS_DIF 0x8 /* insert/strip mode to passthru */
#define LPFC_SBUF_NOT_POSTED 0x10 /* SGL failed post to FW. */
- uint16_t exch_busy; /* SLI4 hba reported XB on complete WCQE */
uint16_t status; /* From IOCB Word 7- ulpStatus */
uint32_t result; /* From IOCB Word 4. */
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 0d4882a9e634..d963ca871383 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -41,8 +41,13 @@
/* Multi-queue arrangement for FCP EQ/CQ/WQ tuples */
#define LPFC_HBA_HDWQ_MIN 0
-#define LPFC_HBA_HDWQ_MAX 128
-#define LPFC_HBA_HDWQ_DEF 0
+#define LPFC_HBA_HDWQ_MAX 256
+#define LPFC_HBA_HDWQ_DEF LPFC_HBA_HDWQ_MIN
+
+/* irq_chann range, values */
+#define LPFC_IRQ_CHANN_MIN 0
+#define LPFC_IRQ_CHANN_MAX 256
+#define LPFC_IRQ_CHANN_DEF LPFC_IRQ_CHANN_MIN
/* FCP MQ queue count limiting */
#define LPFC_FCP_MQ_THRESHOLD_MIN 0
@@ -133,6 +138,23 @@ struct lpfc_rqb {
struct lpfc_queue {
struct list_head list;
struct list_head wq_list;
+
+ /*
+ * If interrupts are in effect on _all_ the eq's the footprint
+ * of polling code is zero (except mode). This memory is chec-
+ * ked for every io to see if the io needs to be polled and
+ * while completion to check if the eq's needs to be rearmed.
+ * Keep in same cacheline as the queue ptr to avoid cpu fetch
+ * stalls. Using 1B memory will leave us with 7B hole. Fill
+ * it with other frequently used members.
+ */
+ uint16_t last_cpu; /* most recent cpu */
+ uint16_t hdwq;
+ uint8_t qe_valid;
+ uint8_t mode; /* interrupt or polling */
+#define LPFC_EQ_INTERRUPT 0
+#define LPFC_EQ_POLL 1
+
struct list_head wqfull_list;
enum lpfc_sli4_queue_type type;
enum lpfc_sli4_queue_subtype subtype;
@@ -199,6 +221,7 @@ struct lpfc_queue {
uint8_t q_flag;
#define HBA_NVMET_WQFULL 0x1 /* We hit WQ Full condition for NVMET */
#define HBA_NVMET_CQ_NOTIFY 0x1 /* LPFC_NVMET_CQ_NOTIFY CQEs this EQE */
+#define HBA_EQ_DELAY_CHK 0x2 /* EQ is a candidate for coalescing */
#define LPFC_NVMET_CQ_NOTIFY 4
void __iomem *db_regaddr;
uint16_t dpp_enable;
@@ -239,10 +262,8 @@ struct lpfc_queue {
struct delayed_work sched_spwork;
uint64_t isr_timestamp;
- uint16_t hdwq;
- uint16_t last_cpu; /* most recent cpu */
- uint8_t qe_valid;
struct lpfc_queue *assoc_qp;
+ struct list_head _poll_list;
void **q_pgs; /* array to index entries per page */
};
@@ -451,11 +472,17 @@ struct lpfc_hba;
#define LPFC_SLI4_HANDLER_NAME_SZ 16
struct lpfc_hba_eq_hdl {
uint32_t idx;
+ uint16_t irq;
char handler_name[LPFC_SLI4_HANDLER_NAME_SZ];
struct lpfc_hba *phba;
struct lpfc_queue *eq;
+ struct cpumask aff_mask;
};
+#define lpfc_get_eq_hdl(eqidx) (&phba->sli4_hba.hba_eq_hdl[eqidx])
+#define lpfc_get_aff_mask(eqidx) (&phba->sli4_hba.hba_eq_hdl[eqidx].aff_mask)
+#define lpfc_get_irq(eqidx) (phba->sli4_hba.hba_eq_hdl[eqidx].irq)
+
/*BB Credit recovery value*/
struct lpfc_bbscn_params {
uint32_t word0;
@@ -513,6 +540,7 @@ struct lpfc_pc_sli4_params {
uint8_t cqav;
uint8_t wqsize;
uint8_t bv1s;
+ uint8_t pls;
#define LPFC_WQ_SZ64_SUPPORT 1
#define LPFC_WQ_SZ128_SUPPORT 2
uint8_t wqpcnt;
@@ -544,11 +572,10 @@ struct lpfc_sli4_lnk_info {
#define LPFC_SLI4_HANDLER_CNT (LPFC_HBA_IO_CHAN_MAX+ \
LPFC_FOF_IO_CHAN_NUM)
-/* Used for IRQ vector to CPU mapping */
+/* Used for tracking CPU mapping attributes */
struct lpfc_vector_map_info {
uint16_t phys_id;
uint16_t core_id;
- uint16_t irq;
uint16_t eq;
uint16_t hdwq;
uint16_t flag;
@@ -891,6 +918,7 @@ struct lpfc_sli4_hba {
struct lpfc_vector_map_info *cpu_map;
uint16_t num_possible_cpu;
uint16_t num_present_cpu;
+ struct cpumask numa_mask;
uint16_t curr_disp_cpu;
struct lpfc_eq_intr_info __percpu *eq_info;
uint32_t conf_trunk;
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index b8aae31ffda3..9e5ff58edaca 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -20,7 +20,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "12.4.0.0"
+#define LPFC_DRIVER_VERSION "12.6.0.2"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
diff --git a/drivers/scsi/mac_scsi.c b/drivers/scsi/mac_scsi.c
index 9c5566217ef6..b5dde9d0d054 100644
--- a/drivers/scsi/mac_scsi.c
+++ b/drivers/scsi/mac_scsi.c
@@ -464,7 +464,7 @@ static int __init mac_scsi_probe(struct platform_device *pdev)
mac_scsi_template.can_queue = setup_can_queue;
if (setup_cmd_per_lun > 0)
mac_scsi_template.cmd_per_lun = setup_cmd_per_lun;
- if (setup_sg_tablesize >= 0)
+ if (setup_sg_tablesize > 0)
mac_scsi_template.sg_tablesize = setup_sg_tablesize;
if (setup_hostid >= 0)
mac_scsi_template.this_id = setup_hostid & 7;
diff --git a/drivers/scsi/megaraid/megaraid_mm.c b/drivers/scsi/megaraid/megaraid_mm.c
index 59cca898f088..e83163c66884 100644
--- a/drivers/scsi/megaraid/megaraid_mm.c
+++ b/drivers/scsi/megaraid/megaraid_mm.c
@@ -41,10 +41,6 @@ static int mraid_mm_setup_dma_pools(mraid_mmadp_t *);
static void mraid_mm_free_adp_resources(mraid_mmadp_t *);
static void mraid_mm_teardown_dma_pools(mraid_mmadp_t *);
-#ifdef CONFIG_COMPAT
-static long mraid_mm_compat_ioctl(struct file *, unsigned int, unsigned long);
-#endif
-
MODULE_AUTHOR("LSI Logic Corporation");
MODULE_DESCRIPTION("LSI Logic Management Module");
MODULE_LICENSE("GPL");
@@ -68,9 +64,7 @@ static wait_queue_head_t wait_q;
static const struct file_operations lsi_fops = {
.open = mraid_mm_open,
.unlocked_ioctl = mraid_mm_unlocked_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = mraid_mm_compat_ioctl,
-#endif
+ .compat_ioctl = compat_ptr_ioctl,
.owner = THIS_MODULE,
.llseek = noop_llseek,
};
@@ -224,7 +218,6 @@ mraid_mm_unlocked_ioctl(struct file *filep, unsigned int cmd,
{
int err;
- /* inconsistent: mraid_mm_compat_ioctl doesn't take the BKL */
mutex_lock(&mraid_mm_mutex);
err = mraid_mm_ioctl(filep, cmd, arg);
mutex_unlock(&mraid_mm_mutex);
@@ -1228,25 +1221,6 @@ mraid_mm_init(void)
}
-#ifdef CONFIG_COMPAT
-/**
- * mraid_mm_compat_ioctl - 32bit to 64bit ioctl conversion routine
- * @filep : file operations pointer (ignored)
- * @cmd : ioctl command
- * @arg : user ioctl packet
- */
-static long
-mraid_mm_compat_ioctl(struct file *filep, unsigned int cmd,
- unsigned long arg)
-{
- int err;
-
- err = mraid_mm_ioctl(filep, cmd, arg);
-
- return err;
-}
-#endif
-
/**
* mraid_mm_exit - Module exit point
*/
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index a6e788c02ff4..bd8184072bed 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -24,6 +24,8 @@
#define MEGASAS_VERSION "07.710.50.00-rc1"
#define MEGASAS_RELDATE "June 28, 2019"
+#define MEGASAS_MSIX_NAME_LEN 32
+
/*
* Device IDs
*/
@@ -2203,6 +2205,7 @@ struct megasas_aen_event {
};
struct megasas_irq_context {
+ char name[MEGASAS_MSIX_NAME_LEN];
struct megasas_instance *instance;
u32 MSIxIndex;
u32 os_irq;
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 42cf38c1ea99..c40fbea06cc5 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -5546,9 +5546,11 @@ megasas_setup_irqs_ioapic(struct megasas_instance *instance)
pdev = instance->pdev;
instance->irq_context[0].instance = instance;
instance->irq_context[0].MSIxIndex = 0;
+ snprintf(instance->irq_context->name, MEGASAS_MSIX_NAME_LEN, "%s%u",
+ "megasas", instance->host->host_no);
if (request_irq(pci_irq_vector(pdev, 0),
instance->instancet->service_isr, IRQF_SHARED,
- "megasas", &instance->irq_context[0])) {
+ instance->irq_context->name, &instance->irq_context[0])) {
dev_err(&instance->pdev->dev,
"Failed to register IRQ from %s %d\n",
__func__, __LINE__);
@@ -5580,8 +5582,10 @@ megasas_setup_irqs_msix(struct megasas_instance *instance, u8 is_probe)
for (i = 0; i < instance->msix_vectors; i++) {
instance->irq_context[i].instance = instance;
instance->irq_context[i].MSIxIndex = i;
+ snprintf(instance->irq_context[i].name, MEGASAS_MSIX_NAME_LEN, "%s%u-msix%u",
+ "megasas", instance->host->host_no, i);
if (request_irq(pci_irq_vector(pdev, i),
- instance->instancet->service_isr, 0, "megasas",
+ instance->instancet->service_isr, 0, instance->irq_context[i].name,
&instance->irq_context[i])) {
dev_err(&instance->pdev->dev,
"Failed to register IRQ for vector %d.\n", i);
diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c
index 50b8c1b12767..89c3685f5163 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fp.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fp.c
@@ -386,9 +386,8 @@ u32 MR_GetSpanBlock(u32 ld, u64 row, u64 *span_blk,
le64_to_cpu(quad->logEnd) && (mega_mod64(row - le64_to_cpu(quad->logStart),
le32_to_cpu(quad->diff))) == 0) {
if (span_blk != NULL) {
- u64 blk, debugBlk;
+ u64 blk;
blk = mega_div64_32((row-le64_to_cpu(quad->logStart)), le32_to_cpu(quad->diff));
- debugBlk = blk;
blk = (blk + le64_to_cpu(quad->offsetInSpan)) << raid->stripeShift;
*span_blk = blk;
@@ -699,9 +698,7 @@ static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld,
__le16 *pDevHandle = &io_info->devHandle;
u8 *pPdInterface = &io_info->pd_interface;
u32 logArm, rowMod, armQ, arm;
- struct fusion_context *fusion;
- fusion = instance->ctrl_context;
*pDevHandle = cpu_to_le16(MR_DEVHANDLE_INVALID);
/*Get row and span from io_info for Uneven Span IO.*/
@@ -801,9 +798,7 @@ u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow,
u64 *pdBlock = &io_info->pdBlock;
__le16 *pDevHandle = &io_info->devHandle;
u8 *pPdInterface = &io_info->pd_interface;
- struct fusion_context *fusion;
- fusion = instance->ctrl_context;
*pDevHandle = cpu_to_le16(MR_DEVHANDLE_INVALID);
row = mega_div64_32(stripRow, raid->rowDataSize);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index fea3cb6a090b..848fbec7bda6 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -3044,11 +3044,11 @@ _base_alloc_irq_vectors(struct MPT3SAS_ADAPTER *ioc)
descp = NULL;
ioc_info(ioc, " %d %d\n", ioc->high_iops_queues,
- ioc->msix_vector_count);
+ ioc->reply_queue_count);
i = pci_alloc_irq_vectors_affinity(ioc->pdev,
ioc->high_iops_queues,
- ioc->msix_vector_count, irq_flags, descp);
+ ioc->reply_queue_count, irq_flags, descp);
return i;
}
@@ -4242,10 +4242,12 @@ _base_display_OEMs_branding(struct MPT3SAS_ADAPTER *ioc)
static int
_base_display_fwpkg_version(struct MPT3SAS_ADAPTER *ioc)
{
- Mpi2FWImageHeader_t *FWImgHdr;
+ Mpi2FWImageHeader_t *fw_img_hdr;
+ Mpi26ComponentImageHeader_t *cmp_img_hdr;
Mpi25FWUploadRequest_t *mpi_request;
Mpi2FWUploadReply_t mpi_reply;
int r = 0;
+ u32 package_version = 0;
void *fwpkg_data = NULL;
dma_addr_t fwpkg_data_dma;
u16 smid, ioc_status;
@@ -4302,14 +4304,26 @@ _base_display_fwpkg_version(struct MPT3SAS_ADAPTER *ioc)
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
- FWImgHdr = (Mpi2FWImageHeader_t *)fwpkg_data;
- if (FWImgHdr->PackageVersion.Word) {
- ioc_info(ioc, "FW Package Version (%02d.%02d.%02d.%02d)\n",
- FWImgHdr->PackageVersion.Struct.Major,
- FWImgHdr->PackageVersion.Struct.Minor,
- FWImgHdr->PackageVersion.Struct.Unit,
- FWImgHdr->PackageVersion.Struct.Dev);
- }
+ fw_img_hdr = (Mpi2FWImageHeader_t *)fwpkg_data;
+ if (le32_to_cpu(fw_img_hdr->Signature) ==
+ MPI26_IMAGE_HEADER_SIGNATURE0_MPI26) {
+ cmp_img_hdr =
+ (Mpi26ComponentImageHeader_t *)
+ (fwpkg_data);
+ package_version =
+ le32_to_cpu(
+ cmp_img_hdr->ApplicationSpecific);
+ } else
+ package_version =
+ le32_to_cpu(
+ fw_img_hdr->PackageVersion.Word);
+ if (package_version)
+ ioc_info(ioc,
+ "FW Package Ver(%02d.%02d.%02d.%02d)\n",
+ ((package_version) & 0xFF000000) >> 24,
+ ((package_version) & 0x00FF0000) >> 16,
+ ((package_version) & 0x0000FF00) >> 8,
+ (package_version) & 0x000000FF);
} else {
_debug_dump_mf(&mpi_reply,
sizeof(Mpi2FWUploadReply_t)/4);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index faca0a5e71f8..4ebf81ea4d2f 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -76,8 +76,8 @@
#define MPT3SAS_DRIVER_NAME "mpt3sas"
#define MPT3SAS_AUTHOR "Avago Technologies <MPT-FusionLinux.pdl@avagotech.com>"
#define MPT3SAS_DESCRIPTION "LSI MPT Fusion SAS 3.0 Device Driver"
-#define MPT3SAS_DRIVER_VERSION "31.100.00.00"
-#define MPT3SAS_MAJOR_VERSION 31
+#define MPT3SAS_DRIVER_VERSION "32.100.00.00"
+#define MPT3SAS_MAJOR_VERSION 32
#define MPT3SAS_MINOR_VERSION 100
#define MPT3SAS_BUILD_VERSION 0
#define MPT3SAS_RELEASE_VERSION 00
@@ -303,6 +303,8 @@ struct mpt3sas_nvme_cmd {
#define MPT3_DIAG_BUFFER_IS_REGISTERED (0x01)
#define MPT3_DIAG_BUFFER_IS_RELEASED (0x02)
#define MPT3_DIAG_BUFFER_IS_DIAG_RESET (0x04)
+#define MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED (0x08)
+#define MPT3_DIAG_BUFFER_IS_APP_OWNED (0x10)
/*
* HP HBA branding
@@ -391,9 +393,12 @@ struct Mpi2ManufacturingPage11_t {
u8 Reserved6; /* 2Fh */
__le32 Reserved7[7]; /* 30h - 4Bh */
u8 NVMeAbortTO; /* 4Ch */
- u8 Reserved8; /* 4Dh */
- u16 Reserved9; /* 4Eh */
- __le32 Reserved10[4]; /* 50h - 60h */
+ u8 NumPerDevEvents; /* 4Dh */
+ u8 HostTraceBufferDecrementSizeKB; /* 4Eh */
+ u8 HostTraceBufferFlags; /* 4Fh */
+ u16 HostTraceBufferMaxSizeKB; /* 50h */
+ u16 HostTraceBufferMinSizeKB; /* 52h */
+ __le32 Reserved10[2]; /* 54h - 5Bh */
};
/**
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
index 7d696952b376..6874cf017739 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
@@ -466,6 +466,13 @@ void mpt3sas_ctl_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc)
if ((ioc->diag_buffer_status[i] &
MPT3_DIAG_BUFFER_IS_RELEASED))
continue;
+
+ /*
+ * add a log message to indicate the release
+ */
+ ioc_info(ioc,
+ "%s: Releasing the trace buffer due to adapter reset.",
+ __func__);
mpt3sas_send_diag_release(ioc, i, &issue_reset);
}
}
@@ -778,6 +785,18 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
case MPI2_FUNCTION_NVME_ENCAPSULATED:
{
nvme_encap_request = (Mpi26NVMeEncapsulatedRequest_t *)request;
+ if (!ioc->pcie_sg_lookup) {
+ dtmprintk(ioc, ioc_info(ioc,
+ "HBA doesn't support NVMe. Rejecting NVMe Encapsulated request.\n"
+ ));
+
+ if (ioc->logging_level & MPT_DEBUG_TM)
+ _debug_dump_mf(nvme_encap_request,
+ ioc->request_sz/4);
+ mpt3sas_base_free_smid(ioc, smid);
+ ret = -EINVAL;
+ goto out;
+ }
/*
* Get the Physical Address of the sense buffer.
* Use Error Response buffer address field to hold the sense
@@ -1484,6 +1503,26 @@ _ctl_diag_capability(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type)
return rc;
}
+/**
+ * _ctl_diag_get_bufftype - return diag buffer type
+ * either TRACE, SNAPSHOT, or EXTENDED
+ * @ioc: per adapter object
+ * @unique_id: specifies the unique_id for the buffer
+ *
+ * returns MPT3_DIAG_UID_NOT_FOUND if the id not found
+ */
+static u8
+_ctl_diag_get_bufftype(struct MPT3SAS_ADAPTER *ioc, u32 unique_id)
+{
+ u8 index;
+
+ for (index = 0; index < MPI2_DIAG_BUF_TYPE_COUNT; index++) {
+ if (ioc->unique_id[index] == unique_id)
+ return index;
+ }
+
+ return MPT3_DIAG_UID_NOT_FOUND;
+}
/**
* _ctl_diag_register_2 - wrapper for registering diag buffer support
@@ -1531,11 +1570,88 @@ _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc,
return -EPERM;
}
+ if (diag_register->unique_id == 0) {
+ ioc_err(ioc,
+ "%s: Invalid UID(0x%08x), buffer_type(0x%02x)\n", __func__,
+ diag_register->unique_id, buffer_type);
+ return -EINVAL;
+ }
+
+ if ((ioc->diag_buffer_status[buffer_type] &
+ MPT3_DIAG_BUFFER_IS_APP_OWNED) &&
+ !(ioc->diag_buffer_status[buffer_type] &
+ MPT3_DIAG_BUFFER_IS_RELEASED)) {
+ ioc_err(ioc,
+ "%s: buffer_type(0x%02x) is already registered by application with UID(0x%08x)\n",
+ __func__, buffer_type, ioc->unique_id[buffer_type]);
+ return -EINVAL;
+ }
+
if (ioc->diag_buffer_status[buffer_type] &
MPT3_DIAG_BUFFER_IS_REGISTERED) {
- ioc_err(ioc, "%s: already has a registered buffer for buffer_type(0x%02x)\n",
- __func__, buffer_type);
- return -EINVAL;
+ /*
+ * If driver posts buffer initially, then an application wants
+ * to Register that buffer (own it) without Releasing first,
+ * the application Register command MUST have the same buffer
+ * type and size in the Register command (obtained from the
+ * Query command). Otherwise that Register command will be
+ * failed. If the application has released the buffer but wants
+ * to re-register it, it should be allowed as long as the
+ * Unique-Id/Size match.
+ */
+
+ if (ioc->unique_id[buffer_type] == MPT3DIAGBUFFUNIQUEID &&
+ ioc->diag_buffer_sz[buffer_type] ==
+ diag_register->requested_buffer_size) {
+
+ if (!(ioc->diag_buffer_status[buffer_type] &
+ MPT3_DIAG_BUFFER_IS_RELEASED)) {
+ dctlprintk(ioc, ioc_info(ioc,
+ "%s: diag_buffer (%d) ownership changed. old-ID(0x%08x), new-ID(0x%08x)\n",
+ __func__, buffer_type,
+ ioc->unique_id[buffer_type],
+ diag_register->unique_id));
+
+ /*
+ * Application wants to own the buffer with
+ * the same size.
+ */
+ ioc->unique_id[buffer_type] =
+ diag_register->unique_id;
+ rc = 0; /* success */
+ goto out;
+ }
+ } else if (ioc->unique_id[buffer_type] !=
+ MPT3DIAGBUFFUNIQUEID) {
+ if (ioc->unique_id[buffer_type] !=
+ diag_register->unique_id ||
+ ioc->diag_buffer_sz[buffer_type] !=
+ diag_register->requested_buffer_size ||
+ !(ioc->diag_buffer_status[buffer_type] &
+ MPT3_DIAG_BUFFER_IS_RELEASED)) {
+ ioc_err(ioc,
+ "%s: already has a registered buffer for buffer_type(0x%02x)\n",
+ __func__, buffer_type);
+ return -EINVAL;
+ }
+ } else {
+ ioc_err(ioc, "%s: already has a registered buffer for buffer_type(0x%02x)\n",
+ __func__, buffer_type);
+ return -EINVAL;
+ }
+ } else if (ioc->diag_buffer_status[buffer_type] &
+ MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED) {
+
+ if (ioc->unique_id[buffer_type] != MPT3DIAGBUFFUNIQUEID ||
+ ioc->diag_buffer_sz[buffer_type] !=
+ diag_register->requested_buffer_size) {
+
+ ioc_err(ioc,
+ "%s: already a buffer is allocated for buffer_type(0x%02x) of size %d bytes, so please try registering again with same size\n",
+ __func__, buffer_type,
+ ioc->diag_buffer_sz[buffer_type]);
+ return -EINVAL;
+ }
}
if (diag_register->requested_buffer_size % 4) {
@@ -1560,7 +1676,8 @@ _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc,
request_data = ioc->diag_buffer[buffer_type];
request_data_sz = diag_register->requested_buffer_size;
ioc->unique_id[buffer_type] = diag_register->unique_id;
- ioc->diag_buffer_status[buffer_type] = 0;
+ ioc->diag_buffer_status[buffer_type] &=
+ MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED;
memcpy(ioc->product_specific[buffer_type],
diag_register->product_specific, MPT3_PRODUCT_SPECIFIC_DWORDS);
ioc->diagnostic_flags[buffer_type] = diag_register->diagnostic_flags;
@@ -1584,7 +1701,8 @@ _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc,
ioc_err(ioc, "%s: failed allocating memory for diag buffers, requested size(%d)\n",
__func__, request_data_sz);
mpt3sas_base_free_smid(ioc, smid);
- return -ENOMEM;
+ rc = -ENOMEM;
+ goto out;
}
ioc->diag_buffer[buffer_type] = request_data;
ioc->diag_buffer_sz[buffer_type] = request_data_sz;
@@ -1649,9 +1767,12 @@ _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc,
out:
- if (rc && request_data)
+ if (rc && request_data) {
dma_free_coherent(&ioc->pdev->dev, request_data_sz,
request_data, request_data_dma);
+ ioc->diag_buffer_status[buffer_type] &=
+ ~MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED;
+ }
ioc->ctl_cmds.status = MPT3_CMD_NOT_USED;
return rc;
@@ -1669,6 +1790,10 @@ void
mpt3sas_enable_diag_buffer(struct MPT3SAS_ADAPTER *ioc, u8 bits_to_register)
{
struct mpt3_diag_register diag_register;
+ u32 ret_val;
+ u32 trace_buff_size = ioc->manu_pg11.HostTraceBufferMaxSizeKB<<10;
+ u32 min_trace_buff_size = 0;
+ u32 decr_trace_buff_size = 0;
memset(&diag_register, 0, sizeof(struct mpt3_diag_register));
@@ -1677,10 +1802,68 @@ mpt3sas_enable_diag_buffer(struct MPT3SAS_ADAPTER *ioc, u8 bits_to_register)
ioc->diag_trigger_master.MasterData =
(MASTER_TRIGGER_FW_FAULT + MASTER_TRIGGER_ADAPTER_RESET);
diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_TRACE;
- /* register for 2MB buffers */
- diag_register.requested_buffer_size = 2 * (1024 * 1024);
- diag_register.unique_id = 0x7075900;
- _ctl_diag_register_2(ioc, &diag_register);
+ diag_register.unique_id =
+ (ioc->hba_mpi_version_belonged == MPI2_VERSION) ?
+ (MPT2DIAGBUFFUNIQUEID):(MPT3DIAGBUFFUNIQUEID);
+
+ if (trace_buff_size != 0) {
+ diag_register.requested_buffer_size = trace_buff_size;
+ min_trace_buff_size =
+ ioc->manu_pg11.HostTraceBufferMinSizeKB<<10;
+ decr_trace_buff_size =
+ ioc->manu_pg11.HostTraceBufferDecrementSizeKB<<10;
+
+ if (min_trace_buff_size > trace_buff_size) {
+ /* The buff size is not set correctly */
+ ioc_err(ioc,
+ "Min Trace Buff size (%d KB) greater than Max Trace Buff size (%d KB)\n",
+ min_trace_buff_size>>10,
+ trace_buff_size>>10);
+ ioc_err(ioc,
+ "Using zero Min Trace Buff Size\n");
+ min_trace_buff_size = 0;
+ }
+
+ if (decr_trace_buff_size == 0) {
+ /*
+ * retry the min size if decrement
+ * is not available.
+ */
+ decr_trace_buff_size =
+ trace_buff_size - min_trace_buff_size;
+ }
+ } else {
+ /* register for 2MB buffers */
+ diag_register.requested_buffer_size = 2 * (1024 * 1024);
+ }
+
+ do {
+ ret_val = _ctl_diag_register_2(ioc, &diag_register);
+
+ if (ret_val == -ENOMEM && min_trace_buff_size &&
+ (trace_buff_size - decr_trace_buff_size) >=
+ min_trace_buff_size) {
+ /* adjust the buffer size */
+ trace_buff_size -= decr_trace_buff_size;
+ diag_register.requested_buffer_size =
+ trace_buff_size;
+ } else
+ break;
+ } while (true);
+
+ if (ret_val == -ENOMEM)
+ ioc_err(ioc,
+ "Cannot allocate trace buffer memory. Last memory tried = %d KB\n",
+ diag_register.requested_buffer_size>>10);
+ else if (ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE]
+ & MPT3_DIAG_BUFFER_IS_REGISTERED) {
+ ioc_err(ioc, "Trace buffer memory %d KB allocated\n",
+ diag_register.requested_buffer_size>>10);
+ if (ioc->hba_mpi_version_belonged != MPI2_VERSION)
+ ioc->diag_buffer_status[
+ MPI2_DIAG_BUF_TYPE_TRACE] |=
+ MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED;
+ }
}
if (bits_to_register & 2) {
@@ -1723,6 +1906,12 @@ _ctl_diag_register(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
}
rc = _ctl_diag_register_2(ioc, &karg);
+
+ if (!rc && (ioc->diag_buffer_status[karg.buffer_type] &
+ MPT3_DIAG_BUFFER_IS_REGISTERED))
+ ioc->diag_buffer_status[karg.buffer_type] |=
+ MPT3_DIAG_BUFFER_IS_APP_OWNED;
+
return rc;
}
@@ -1752,7 +1941,13 @@ _ctl_diag_unregister(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
dctlprintk(ioc, ioc_info(ioc, "%s\n",
__func__));
- buffer_type = karg.unique_id & 0x000000ff;
+ buffer_type = _ctl_diag_get_bufftype(ioc, karg.unique_id);
+ if (buffer_type == MPT3_DIAG_UID_NOT_FOUND) {
+ ioc_err(ioc, "%s: buffer with unique_id(0x%08x) not found\n",
+ __func__, karg.unique_id);
+ return -EINVAL;
+ }
+
if (!_ctl_diag_capability(ioc, buffer_type)) {
ioc_err(ioc, "%s: doesn't have capability for buffer_type(0x%02x)\n",
__func__, buffer_type);
@@ -1785,12 +1980,21 @@ _ctl_diag_unregister(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
return -ENOMEM;
}
- request_data_sz = ioc->diag_buffer_sz[buffer_type];
- request_data_dma = ioc->diag_buffer_dma[buffer_type];
- dma_free_coherent(&ioc->pdev->dev, request_data_sz,
- request_data, request_data_dma);
- ioc->diag_buffer[buffer_type] = NULL;
- ioc->diag_buffer_status[buffer_type] = 0;
+ if (ioc->diag_buffer_status[buffer_type] &
+ MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED) {
+ ioc->unique_id[buffer_type] = MPT3DIAGBUFFUNIQUEID;
+ ioc->diag_buffer_status[buffer_type] &=
+ ~MPT3_DIAG_BUFFER_IS_APP_OWNED;
+ ioc->diag_buffer_status[buffer_type] &=
+ ~MPT3_DIAG_BUFFER_IS_REGISTERED;
+ } else {
+ request_data_sz = ioc->diag_buffer_sz[buffer_type];
+ request_data_dma = ioc->diag_buffer_dma[buffer_type];
+ dma_free_coherent(&ioc->pdev->dev, request_data_sz,
+ request_data, request_data_dma);
+ ioc->diag_buffer[buffer_type] = NULL;
+ ioc->diag_buffer_status[buffer_type] = 0;
+ }
return 0;
}
@@ -1829,14 +2033,17 @@ _ctl_diag_query(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
return -EPERM;
}
- if ((ioc->diag_buffer_status[buffer_type] &
- MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
- ioc_err(ioc, "%s: buffer_type(0x%02x) is not registered\n",
- __func__, buffer_type);
- return -EINVAL;
+ if (!(ioc->diag_buffer_status[buffer_type] &
+ MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED)) {
+ if ((ioc->diag_buffer_status[buffer_type] &
+ MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
+ ioc_err(ioc, "%s: buffer_type(0x%02x) is not registered\n",
+ __func__, buffer_type);
+ return -EINVAL;
+ }
}
- if (karg.unique_id & 0xffffff00) {
+ if (karg.unique_id) {
if (karg.unique_id != ioc->unique_id[buffer_type]) {
ioc_err(ioc, "%s: unique_id(0x%08x) is not registered\n",
__func__, karg.unique_id);
@@ -1851,13 +2058,21 @@ _ctl_diag_query(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
return -ENOMEM;
}
- if (ioc->diag_buffer_status[buffer_type] & MPT3_DIAG_BUFFER_IS_RELEASED)
- karg.application_flags = (MPT3_APP_FLAGS_APP_OWNED |
- MPT3_APP_FLAGS_BUFFER_VALID);
- else
- karg.application_flags = (MPT3_APP_FLAGS_APP_OWNED |
- MPT3_APP_FLAGS_BUFFER_VALID |
- MPT3_APP_FLAGS_FW_BUFFER_ACCESS);
+ if ((ioc->diag_buffer_status[buffer_type] &
+ MPT3_DIAG_BUFFER_IS_REGISTERED))
+ karg.application_flags |= MPT3_APP_FLAGS_BUFFER_VALID;
+
+ if (!(ioc->diag_buffer_status[buffer_type] &
+ MPT3_DIAG_BUFFER_IS_RELEASED))
+ karg.application_flags |= MPT3_APP_FLAGS_FW_BUFFER_ACCESS;
+
+ if (!(ioc->diag_buffer_status[buffer_type] &
+ MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED))
+ karg.application_flags |= MPT3_APP_FLAGS_DYNAMIC_BUFFER_ALLOC;
+
+ if ((ioc->diag_buffer_status[buffer_type] &
+ MPT3_DIAG_BUFFER_IS_APP_OWNED))
+ karg.application_flags |= MPT3_APP_FLAGS_APP_OWNED;
for (i = 0; i < MPT3_PRODUCT_SPECIFIC_DWORDS; i++)
karg.product_specific[i] =
@@ -2002,7 +2217,13 @@ _ctl_diag_release(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
dctlprintk(ioc, ioc_info(ioc, "%s\n",
__func__));
- buffer_type = karg.unique_id & 0x000000ff;
+ buffer_type = _ctl_diag_get_bufftype(ioc, karg.unique_id);
+ if (buffer_type == MPT3_DIAG_UID_NOT_FOUND) {
+ ioc_err(ioc, "%s: buffer with unique_id(0x%08x) not found\n",
+ __func__, karg.unique_id);
+ return -EINVAL;
+ }
+
if (!_ctl_diag_capability(ioc, buffer_type)) {
ioc_err(ioc, "%s: doesn't have capability for buffer_type(0x%02x)\n",
__func__, buffer_type);
@@ -2026,7 +2247,7 @@ _ctl_diag_release(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
MPT3_DIAG_BUFFER_IS_RELEASED) {
ioc_err(ioc, "%s: buffer_type(0x%02x) is already released\n",
__func__, buffer_type);
- return 0;
+ return -EINVAL;
}
request_data = ioc->diag_buffer[buffer_type];
@@ -2086,7 +2307,13 @@ _ctl_diag_read_buffer(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
dctlprintk(ioc, ioc_info(ioc, "%s\n",
__func__));
- buffer_type = karg.unique_id & 0x000000ff;
+ buffer_type = _ctl_diag_get_bufftype(ioc, karg.unique_id);
+ if (buffer_type == MPT3_DIAG_UID_NOT_FOUND) {
+ ioc_err(ioc, "%s: buffer with unique_id(0x%08x) not found\n",
+ __func__, karg.unique_id);
+ return -EINVAL;
+ }
+
if (!_ctl_diag_capability(ioc, buffer_type)) {
ioc_err(ioc, "%s: doesn't have capability for buffer_type(0x%02x)\n",
__func__, buffer_type);
@@ -2210,6 +2437,8 @@ _ctl_diag_read_buffer(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
ioc->diag_buffer_status[buffer_type] |=
MPT3_DIAG_BUFFER_IS_REGISTERED;
+ ioc->diag_buffer_status[buffer_type] &=
+ ~MPT3_DIAG_BUFFER_IS_RELEASED;
dctlprintk(ioc, ioc_info(ioc, "%s: success\n", __func__));
} else {
ioc_info(ioc, "%s: ioc_status(0x%04x) log_info(0x%08x)\n",
@@ -3130,10 +3359,49 @@ host_trace_buffer_enable_store(struct device *cdev,
memset(&diag_register, 0, sizeof(struct mpt3_diag_register));
ioc_info(ioc, "posting host trace buffers\n");
diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_TRACE;
- diag_register.requested_buffer_size = (1024 * 1024);
- diag_register.unique_id = 0x7075900;
+
+ if (ioc->manu_pg11.HostTraceBufferMaxSizeKB != 0 &&
+ ioc->diag_buffer_sz[MPI2_DIAG_BUF_TYPE_TRACE] != 0) {
+ /* post the same buffer allocated previously */
+ diag_register.requested_buffer_size =
+ ioc->diag_buffer_sz[MPI2_DIAG_BUF_TYPE_TRACE];
+ } else {
+ /*
+ * Free the diag buffer memory which was previously
+ * allocated by an application.
+ */
+ if ((ioc->diag_buffer_sz[MPI2_DIAG_BUF_TYPE_TRACE] != 0)
+ &&
+ (ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
+ MPT3_DIAG_BUFFER_IS_APP_OWNED)) {
+ pci_free_consistent(ioc->pdev,
+ ioc->diag_buffer_sz[
+ MPI2_DIAG_BUF_TYPE_TRACE],
+ ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE],
+ ioc->diag_buffer_dma[
+ MPI2_DIAG_BUF_TYPE_TRACE]);
+ ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE] =
+ NULL;
+ }
+
+ diag_register.requested_buffer_size = (1024 * 1024);
+ }
+
+ diag_register.unique_id =
+ (ioc->hba_mpi_version_belonged == MPI2_VERSION) ?
+ (MPT2DIAGBUFFUNIQUEID):(MPT3DIAGBUFFUNIQUEID);
ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] = 0;
_ctl_diag_register_2(ioc, &diag_register);
+ if (ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
+ MPT3_DIAG_BUFFER_IS_REGISTERED) {
+ ioc_info(ioc,
+ "Trace buffer %d KB allocated through sysfs\n",
+ diag_register.requested_buffer_size>>10);
+ if (ioc->hba_mpi_version_belonged != MPI2_VERSION)
+ ioc->diag_buffer_status[
+ MPI2_DIAG_BUF_TYPE_TRACE] |=
+ MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED;
+ }
} else if (!strcmp(str, "release")) {
/* exit out if host buffers are already released */
if (!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE])
@@ -3702,12 +3970,6 @@ mpt3sas_ctl_exit(ushort hbas_to_enumerate)
for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) {
if (!ioc->diag_buffer[i])
continue;
- if (!(ioc->diag_buffer_status[i] &
- MPT3_DIAG_BUFFER_IS_REGISTERED))
- continue;
- if ((ioc->diag_buffer_status[i] &
- MPT3_DIAG_BUFFER_IS_RELEASED))
- continue;
dma_free_coherent(&ioc->pdev->dev,
ioc->diag_buffer_sz[i],
ioc->diag_buffer[i],
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.h b/drivers/scsi/mpt3sas/mpt3sas_ctl.h
index 18b46faef6f1..0f7aa4ddade0 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.h
@@ -95,6 +95,14 @@
#define MPT3DIAGREADBUFFER _IOWR(MPT3_MAGIC_NUMBER, 30, \
struct mpt3_diag_read_buffer)
+/* Trace Buffer default UniqueId */
+#define MPT2DIAGBUFFUNIQUEID (0x07075900)
+#define MPT3DIAGBUFFUNIQUEID (0x4252434D)
+
+/* UID not found */
+#define MPT3_DIAG_UID_NOT_FOUND (0xFF)
+
+
/**
* struct mpt3_ioctl_header - main header structure
* @ioc_number - IOC unit number
@@ -310,6 +318,7 @@ struct mpt3_ioctl_btdh_mapping {
#define MPT3_APP_FLAGS_APP_OWNED (0x0001)
#define MPT3_APP_FLAGS_BUFFER_VALID (0x0002)
#define MPT3_APP_FLAGS_FW_BUFFER_ACCESS (0x0004)
+#define MPT3_APP_FLAGS_DYNAMIC_BUFFER_ALLOC (0x0008)
/* flags for mpt3_diag_read_buffer */
#define MPT3_FLAGS_REREGISTER (0x0001)
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index c8e512ba6d39..a038be8a0e90 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -5161,7 +5161,7 @@ _scsih_smart_predicted_fault(struct MPT3SAS_ADAPTER *ioc, u16 handle)
/* insert into event log */
sz = offsetof(Mpi2EventNotificationReply_t, EventData) +
sizeof(Mpi2EventDataSasDeviceStatusChange_t);
- event_reply = kzalloc(sz, GFP_KERNEL);
+ event_reply = kzalloc(sz, GFP_ATOMIC);
if (!event_reply) {
ioc_err(ioc, "failure at %s:%d/%s()!\n",
__FILE__, __LINE__, __func__);
@@ -10193,6 +10193,8 @@ scsih_scan_start(struct Scsi_Host *shost)
int rc;
if (diag_buffer_enable != -1 && diag_buffer_enable != 0)
mpt3sas_enable_diag_buffer(ioc, diag_buffer_enable);
+ else if (ioc->manu_pg11.HostTraceBufferMaxSizeKB != 0)
+ mpt3sas_enable_diag_buffer(ioc, 1);
if (disable_discovery > 0)
return;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c b/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c
index 6ac453fd5937..8ec9bab20ec4 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c
@@ -113,15 +113,21 @@ mpt3sas_process_trigger_data(struct MPT3SAS_ADAPTER *ioc,
struct SL_WH_TRIGGERS_EVENT_DATA_T *event_data)
{
u8 issue_reset = 0;
+ u32 *trig_data = (u32 *)&event_data->u.master;
dTriggerDiagPrintk(ioc, ioc_info(ioc, "%s: enter\n", __func__));
/* release the diag buffer trace */
if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
MPT3_DIAG_BUFFER_IS_RELEASED) == 0) {
- dTriggerDiagPrintk(ioc,
- ioc_info(ioc, "%s: release trace diag buffer\n",
- __func__));
+ /*
+ * add a log message so that user knows which event caused
+ * the release
+ */
+ ioc_info(ioc,
+ "%s: Releasing the trace buffer. Trigger_Type 0x%08x, Data[0] 0x%08x, Data[1] 0x%08x\n",
+ __func__, event_data->trigger_type,
+ trig_data[0], trig_data[1]);
mpt3sas_send_diag_release(ioc, MPI2_DIAG_BUF_TYPE_TRACE,
&issue_reset);
}
diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
index 3e0b8ebe257f..a920eced92ec 100644
--- a/drivers/scsi/mvsas/mv_sas.c
+++ b/drivers/scsi/mvsas/mv_sas.c
@@ -1541,7 +1541,7 @@ out:
int mvs_abort_task_set(struct domain_device *dev, u8 *lun)
{
- int rc = TMF_RESP_FUNC_FAILED;
+ int rc;
struct mvs_tmf_task tmf_task;
tmf_task.tmf = TMF_ABORT_TASK_SET;
diff --git a/drivers/scsi/ncr53c8xx.c b/drivers/scsi/ncr53c8xx.c
index e0b427fdf818..11a2cb844ecb 100644
--- a/drivers/scsi/ncr53c8xx.c
+++ b/drivers/scsi/ncr53c8xx.c
@@ -1722,7 +1722,7 @@ struct ncb {
** Miscellaneous configuration and status parameters.
**----------------------------------------------------------------
*/
- u_char disc; /* Diconnection allowed */
+ u_char disc; /* Disconnection allowed */
u_char scsi_mode; /* Current SCSI BUS mode */
u_char order; /* Tag order to use */
u_char verbose; /* Verbosity for this controller*/
diff --git a/drivers/scsi/nsp32.c b/drivers/scsi/nsp32.c
index 70db79254155..b6e04d14292d 100644
--- a/drivers/scsi/nsp32.c
+++ b/drivers/scsi/nsp32.c
@@ -1542,7 +1542,7 @@ static void nsp32_scsi_done(struct scsi_cmnd *SCpnt)
* with ACK reply when below condition is matched:
* MsgIn 00: Command Complete.
* MsgIn 02: Save Data Pointer.
- * MsgIn 04: Diconnect.
+ * MsgIn 04: Disconnect.
* In other case, unexpected BUSFREE is detected.
*/
static int nsp32_busfree_occur(struct scsi_cmnd *SCpnt, unsigned short execph)
diff --git a/drivers/scsi/pcmcia/Kconfig b/drivers/scsi/pcmcia/Kconfig
index 2368f34efba3..dc9b74c9348a 100644
--- a/drivers/scsi/pcmcia/Kconfig
+++ b/drivers/scsi/pcmcia/Kconfig
@@ -32,7 +32,7 @@ config PCMCIA_FDOMAIN
config PCMCIA_NINJA_SCSI
tristate "NinjaSCSI-3 / NinjaSCSI-32Bi (16bit) PCMCIA support"
- depends on !64BIT
+ depends on !64BIT || COMPILE_TEST
help
If you intend to attach this type of PCMCIA SCSI host adapter to
your computer, say Y here and read
diff --git a/drivers/scsi/pcmcia/nsp_cs.c b/drivers/scsi/pcmcia/nsp_cs.c
index 97416e1dcc5b..93616f9fd6d7 100644
--- a/drivers/scsi/pcmcia/nsp_cs.c
+++ b/drivers/scsi/pcmcia/nsp_cs.c
@@ -56,9 +56,7 @@
MODULE_AUTHOR("YOKOTA Hiroshi <yokota@netlab.is.tsukuba.ac.jp>");
MODULE_DESCRIPTION("WorkBit NinjaSCSI-3 / NinjaSCSI-32Bi(16bit) PCMCIA SCSI host adapter module");
MODULE_SUPPORTED_DEVICE("sd,sr,sg,st");
-#ifdef MODULE_LICENSE
MODULE_LICENSE("GPL");
-#endif
#include "nsp_io.h"
diff --git a/drivers/scsi/pm8001/pm8001_ctl.c b/drivers/scsi/pm8001/pm8001_ctl.c
index 6b85016b4db3..7c6be2ec110d 100644
--- a/drivers/scsi/pm8001/pm8001_ctl.c
+++ b/drivers/scsi/pm8001/pm8001_ctl.c
@@ -70,6 +70,25 @@ static
DEVICE_ATTR(interface_rev, S_IRUGO, pm8001_ctl_mpi_interface_rev_show, NULL);
/**
+ * controller_fatal_error_show - check controller is under fatal err
+ * @cdev: pointer to embedded class device
+ * @buf: the buffer returned
+ *
+ * A sysfs 'read only' shost attribute.
+ */
+static ssize_t controller_fatal_error_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+ struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ pm8001_ha->controller_fatal_error);
+}
+static DEVICE_ATTR_RO(controller_fatal_error);
+
+/**
* pm8001_ctl_fw_version_show - firmware version
* @cdev: pointer to embedded class device
* @buf: the buffer returned
@@ -804,6 +823,7 @@ static DEVICE_ATTR(update_fw, S_IRUGO|S_IWUSR|S_IWGRP,
pm8001_show_update_fw, pm8001_store_update_fw);
struct device_attribute *pm8001_host_attrs[] = {
&dev_attr_interface_rev,
+ &dev_attr_controller_fatal_error,
&dev_attr_fw_version,
&dev_attr_update_fw,
&dev_attr_aap_log,
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index 68a8217032d0..2328ff1349ac 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -1186,7 +1186,7 @@ static void pm8001_hw_chip_rst(struct pm8001_hba_info *pm8001_ha)
void pm8001_chip_iounmap(struct pm8001_hba_info *pm8001_ha)
{
s8 bar, logical = 0;
- for (bar = 0; bar < 6; bar++) {
+ for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
/*
** logical BARs for SPC:
** bar 0 and 1 - logical BAR0
@@ -1336,10 +1336,13 @@ int pm8001_mpi_msg_free_get(struct inbound_queue_table *circularQ,
* @circularQ: the inbound queue we want to transfer to HBA.
* @opCode: the operation code represents commands which LLDD and fw recognized.
* @payload: the command payload of each operation command.
+ * @nb: size in bytes of the command payload
+ * @responseQueue: queue to interrupt on w/ command response (if any)
*/
int pm8001_mpi_build_cmd(struct pm8001_hba_info *pm8001_ha,
struct inbound_queue_table *circularQ,
- u32 opCode, void *payload, u32 responseQueue)
+ u32 opCode, void *payload, size_t nb,
+ u32 responseQueue)
{
u32 Header = 0, hpriority = 0, bc = 1, category = 0x02;
void *pMessage;
@@ -1350,10 +1353,13 @@ int pm8001_mpi_build_cmd(struct pm8001_hba_info *pm8001_ha,
pm8001_printk("No free mpi buffer\n"));
return -ENOMEM;
}
- BUG_ON(!payload);
- /*Copy to the payload*/
- memcpy(pMessage, payload, (pm8001_ha->iomb_size -
- sizeof(struct mpi_msg_hdr)));
+
+ if (nb > (pm8001_ha->iomb_size - sizeof(struct mpi_msg_hdr)))
+ nb = pm8001_ha->iomb_size - sizeof(struct mpi_msg_hdr);
+ memcpy(pMessage, payload, nb);
+ if (nb + sizeof(struct mpi_msg_hdr) < pm8001_ha->iomb_size)
+ memset(pMessage + nb, 0, pm8001_ha->iomb_size -
+ (nb + sizeof(struct mpi_msg_hdr)));
/*Build the header*/
Header = ((1 << 31) | (hpriority << 30) | ((bc & 0x1f) << 24)
@@ -1364,7 +1370,7 @@ int pm8001_mpi_build_cmd(struct pm8001_hba_info *pm8001_ha,
/*Update the PI to the firmware*/
pm8001_cw32(pm8001_ha, circularQ->pi_pci_bar,
circularQ->pi_offset, circularQ->producer_idx);
- PM8001_IO_DBG(pm8001_ha,
+ PM8001_DEVIO_DBG(pm8001_ha,
pm8001_printk("INB Q %x OPCODE:%x , UPDATED PI=%d CI=%d\n",
responseQueue, opCode, circularQ->producer_idx,
circularQ->consumer_index));
@@ -1436,6 +1442,10 @@ u32 pm8001_mpi_msg_consume(struct pm8001_hba_info *pm8001_ha,
/* read header */
header_tmp = pm8001_read_32(msgHeader);
msgHeader_tmp = cpu_to_le32(header_tmp);
+ PM8001_DEVIO_DBG(pm8001_ha, pm8001_printk(
+ "outbound opcode msgheader:%x ci=%d pi=%d\n",
+ msgHeader_tmp, circularQ->consumer_idx,
+ circularQ->producer_index));
if (0 != (le32_to_cpu(msgHeader_tmp) & 0x80000000)) {
if (OPC_OUB_SKIP_ENTRY !=
(le32_to_cpu(msgHeader_tmp) & 0xfff)) {
@@ -1604,7 +1614,8 @@ void pm8001_work_fn(struct work_struct *work)
break;
default:
- pm8001_printk("...query task failed!!!\n");
+ PM8001_DEVIO_DBG(pm8001_ha, pm8001_printk(
+ "...query task failed!!!\n"));
break;
});
@@ -1758,7 +1769,8 @@ static void pm8001_send_abort_all(struct pm8001_hba_info *pm8001_ha,
task_abort.device_id = cpu_to_le32(pm8001_ha_dev->device_id);
task_abort.tag = cpu_to_le32(ccb_tag);
- ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort, 0);
+ ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort,
+ sizeof(task_abort), 0);
if (ret)
pm8001_tag_free(pm8001_ha, ccb_tag);
@@ -1831,7 +1843,8 @@ static void pm8001_send_read_log(struct pm8001_hba_info *pm8001_ha,
sata_cmd.ncqtag_atap_dir_m |= ((0x1 << 7) | (0x5 << 9));
memcpy(&sata_cmd.sata_fis, &fis, sizeof(struct host_to_dev_fis));
- res = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd, 0);
+ res = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd,
+ sizeof(sata_cmd), 0);
if (res) {
sas_free_task(task);
pm8001_tag_free(pm8001_ha, ccb_tag);
@@ -1890,6 +1903,11 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb)
pm8001_printk("SAS Address of IO Failure Drive:"
"%016llx", SAS_ADDR(t->dev->sas_addr)));
+ if (status)
+ PM8001_IOERR_DBG(pm8001_ha, pm8001_printk(
+ "status:0x%x, tag:0x%x, task:0x%p\n",
+ status, tag, t));
+
switch (status) {
case IO_SUCCESS:
PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS"
@@ -2072,7 +2090,7 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb)
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
break;
default:
- PM8001_IO_DBG(pm8001_ha,
+ PM8001_DEVIO_DBG(pm8001_ha,
pm8001_printk("Unknown status 0x%x\n", status));
/* not allowed case. Therefore, return failed status */
ts->resp = SAS_TASK_COMPLETE;
@@ -2125,7 +2143,7 @@ static void mpi_ssp_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
if (unlikely(!t || !t->lldd_task || !t->dev))
return;
ts = &t->task_status;
- PM8001_IO_DBG(pm8001_ha,
+ PM8001_DEVIO_DBG(pm8001_ha,
pm8001_printk("port_id = %x,device_id = %x\n",
port_id, dev_id));
switch (event) {
@@ -2263,7 +2281,7 @@ static void mpi_ssp_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
pm8001_printk(" IO_XFER_CMD_FRAME_ISSUED\n"));
return;
default:
- PM8001_IO_DBG(pm8001_ha,
+ PM8001_DEVIO_DBG(pm8001_ha,
pm8001_printk("Unknown status 0x%x\n", event));
/* not allowed case. Therefore, return failed status */
ts->resp = SAS_TASK_COMPLETE;
@@ -2352,6 +2370,12 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
pm8001_printk("ts null\n"));
return;
}
+
+ if (status)
+ PM8001_IOERR_DBG(pm8001_ha, pm8001_printk(
+ "status:0x%x, tag:0x%x, task::0x%p\n",
+ status, tag, t));
+
/* Print sas address of IO failed device */
if ((status != IO_SUCCESS) && (status != IO_OVERFLOW) &&
(status != IO_UNDERFLOW)) {
@@ -2652,7 +2676,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
break;
default:
- PM8001_IO_DBG(pm8001_ha,
+ PM8001_DEVIO_DBG(pm8001_ha,
pm8001_printk("Unknown status 0x%x\n", status));
/* not allowed case. Therefore, return failed status */
ts->resp = SAS_TASK_COMPLETE;
@@ -2723,7 +2747,7 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
if (unlikely(!t || !t->lldd_task || !t->dev))
return;
ts = &t->task_status;
- PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+ PM8001_DEVIO_DBG(pm8001_ha, pm8001_printk(
"port_id:0x%x, device_id:0x%x, tag:0x%x, event:0x%x\n",
port_id, dev_id, tag, event));
switch (event) {
@@ -2872,7 +2896,7 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
ts->stat = SAS_OPEN_TO;
break;
default:
- PM8001_IO_DBG(pm8001_ha,
+ PM8001_DEVIO_DBG(pm8001_ha,
pm8001_printk("Unknown status 0x%x\n", event));
/* not allowed case. Therefore, return failed status */
ts->resp = SAS_TASK_COMPLETE;
@@ -2917,9 +2941,13 @@ mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
t = ccb->task;
ts = &t->task_status;
pm8001_dev = ccb->device;
- if (status)
+ if (status) {
PM8001_FAIL_DBG(pm8001_ha,
pm8001_printk("smp IO status 0x%x\n", status));
+ PM8001_IOERR_DBG(pm8001_ha,
+ pm8001_printk("status:0x%x, tag:0x%x, task:0x%p\n",
+ status, tag, t));
+ }
if (unlikely(!t || !t->lldd_task || !t->dev))
return;
@@ -3070,7 +3098,7 @@ mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
break;
default:
- PM8001_IO_DBG(pm8001_ha,
+ PM8001_DEVIO_DBG(pm8001_ha,
pm8001_printk("Unknown status 0x%x\n", status));
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DEV_NO_RESPONSE;
@@ -3355,7 +3383,8 @@ static void pm8001_hw_event_ack_req(struct pm8001_hba_info *pm8001_ha,
((phyId & 0x0F) << 4) | (port_id & 0x0F));
payload.param0 = cpu_to_le32(param0);
payload.param1 = cpu_to_le32(param1);
- pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
+ pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
+ sizeof(payload), 0);
}
static int pm8001_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha,
@@ -3416,7 +3445,7 @@ hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
pm8001_get_lrate_mode(phy, link_rate);
break;
default:
- PM8001_MSG_DBG(pm8001_ha,
+ PM8001_DEVIO_DBG(pm8001_ha,
pm8001_printk("unknown device type(%x)\n", deviceType));
break;
}
@@ -3463,7 +3492,7 @@ hw_event_sata_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
struct sas_ha_struct *sas_ha = pm8001_ha->sas;
struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
unsigned long flags;
- PM8001_MSG_DBG(pm8001_ha,
+ PM8001_DEVIO_DBG(pm8001_ha,
pm8001_printk("HW_EVENT_SATA_PHY_UP port id = %d,"
" phy id = %d\n", port_id, phy_id));
port->port_state = portstate;
@@ -3541,7 +3570,7 @@ hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb)
break;
default:
port->port_attached = 0;
- PM8001_MSG_DBG(pm8001_ha,
+ PM8001_DEVIO_DBG(pm8001_ha,
pm8001_printk(" phy Down and(default) = %x\n",
portstate));
break;
@@ -3689,7 +3718,7 @@ int pm8001_mpi_fw_flash_update_resp(struct pm8001_hba_info *pm8001_ha,
pm8001_printk(": FLASH_UPDATE_DISABLED\n"));
break;
default:
- PM8001_MSG_DBG(pm8001_ha,
+ PM8001_DEVIO_DBG(pm8001_ha,
pm8001_printk("No matched status = %d\n", status));
break;
}
@@ -3805,8 +3834,9 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void* piomb)
struct sas_ha_struct *sas_ha = pm8001_ha->sas;
struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
struct asd_sas_phy *sas_phy = sas_ha->sas_phy[phy_id];
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("outbound queue HW event & event type : "));
+ PM8001_DEVIO_DBG(pm8001_ha, pm8001_printk(
+ "SPC HW event for portid:%d, phyid:%d, event:%x, status:%x\n",
+ port_id, phy_id, eventType, status));
switch (eventType) {
case HW_EVENT_PHY_START_STATUS:
PM8001_MSG_DBG(pm8001_ha,
@@ -3990,7 +4020,7 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void* piomb)
pm8001_printk("EVENT_BROADCAST_ASYNCH_EVENT\n"));
break;
default:
- PM8001_MSG_DBG(pm8001_ha,
+ PM8001_DEVIO_DBG(pm8001_ha,
pm8001_printk("Unknown event type = %x\n", eventType));
break;
}
@@ -4161,7 +4191,7 @@ static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb)
pm8001_printk("OPC_OUB_SAS_RE_INITIALIZE\n"));
break;
default:
- PM8001_MSG_DBG(pm8001_ha,
+ PM8001_DEVIO_DBG(pm8001_ha,
pm8001_printk("Unknown outbound Queue IOMB OPC = %x\n",
opc));
break;
@@ -4284,7 +4314,7 @@ static int pm8001_chip_smp_req(struct pm8001_hba_info *pm8001_ha,
cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_resp)-4);
build_smp_cmd(pm8001_dev->device_id, smp_cmd.tag, &smp_cmd);
rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc,
- (u32 *)&smp_cmd, 0);
+ &smp_cmd, sizeof(smp_cmd), 0);
if (rc)
goto err_out_2;
@@ -4352,7 +4382,8 @@ static int pm8001_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
ssp_cmd.len = cpu_to_le32(task->total_xfer_len);
ssp_cmd.esgl = 0;
}
- ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &ssp_cmd, 0);
+ ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &ssp_cmd,
+ sizeof(ssp_cmd), 0);
return ret;
}
@@ -4461,7 +4492,8 @@ static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
}
}
- ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd, 0);
+ ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd,
+ sizeof(sata_cmd), 0);
return ret;
}
@@ -4496,7 +4528,8 @@ pm8001_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id)
memcpy(payload.sas_identify.sas_addr,
pm8001_ha->sas_addr, SAS_ADDR_SIZE);
payload.sas_identify.phy_id = phy_id;
- ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload, 0);
+ ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload,
+ sizeof(payload), 0);
return ret;
}
@@ -4518,7 +4551,8 @@ static int pm8001_chip_phy_stop_req(struct pm8001_hba_info *pm8001_ha,
memset(&payload, 0, sizeof(payload));
payload.tag = cpu_to_le32(tag);
payload.phy_id = cpu_to_le32(phy_id);
- ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload, 0);
+ ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload,
+ sizeof(payload), 0);
return ret;
}
@@ -4577,7 +4611,8 @@ static int pm8001_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha,
cpu_to_le32(ITNT | (firstBurstSize * 0x10000));
memcpy(payload.sas_addr, pm8001_dev->sas_device->sas_addr,
SAS_ADDR_SIZE);
- rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
+ rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
+ sizeof(payload), 0);
return rc;
}
@@ -4598,7 +4633,8 @@ int pm8001_chip_dereg_dev_req(struct pm8001_hba_info *pm8001_ha,
payload.device_id = cpu_to_le32(device_id);
PM8001_MSG_DBG(pm8001_ha,
pm8001_printk("unregister device device_id = %d\n", device_id));
- ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
+ ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
+ sizeof(payload), 0);
return ret;
}
@@ -4621,7 +4657,8 @@ static int pm8001_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha,
payload.tag = cpu_to_le32(1);
payload.phyop_phyid =
cpu_to_le32(((phy_op & 0xff) << 8) | (phyId & 0x0F));
- ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
+ ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
+ sizeof(payload), 0);
return ret;
}
@@ -4649,6 +4686,9 @@ static irqreturn_t
pm8001_chip_isr(struct pm8001_hba_info *pm8001_ha, u8 vec)
{
pm8001_chip_interrupt_disable(pm8001_ha, vec);
+ PM8001_DEVIO_DBG(pm8001_ha, pm8001_printk(
+ "irq vec %d, ODMR:0x%x\n",
+ vec, pm8001_cr32(pm8001_ha, 0, 0x30)));
process_oq(pm8001_ha, vec);
pm8001_chip_interrupt_enable(pm8001_ha, vec);
return IRQ_HANDLED;
@@ -4672,7 +4712,8 @@ static int send_task_abort(struct pm8001_hba_info *pm8001_ha, u32 opc,
task_abort.device_id = cpu_to_le32(dev_id);
task_abort.tag = cpu_to_le32(cmd_tag);
}
- ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort, 0);
+ ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort,
+ sizeof(task_abort), 0);
return ret;
}
@@ -4729,7 +4770,8 @@ int pm8001_chip_ssp_tm_req(struct pm8001_hba_info *pm8001_ha,
if (pm8001_ha->chip_id != chip_8001)
sspTMCmd.ds_ads_m = 0x08;
circularQ = &pm8001_ha->inbnd_q_tbl[0];
- ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sspTMCmd, 0);
+ ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sspTMCmd,
+ sizeof(sspTMCmd), 0);
return ret;
}
@@ -4819,7 +4861,8 @@ int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha,
default:
break;
}
- rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &nvmd_req, 0);
+ rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &nvmd_req,
+ sizeof(nvmd_req), 0);
if (rc) {
kfree(fw_control_context);
pm8001_tag_free(pm8001_ha, tag);
@@ -4903,7 +4946,8 @@ int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha,
default:
break;
}
- rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &nvmd_req, 0);
+ rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &nvmd_req,
+ sizeof(nvmd_req), 0);
if (rc) {
kfree(fw_control_context);
pm8001_tag_free(pm8001_ha, tag);
@@ -4938,7 +4982,8 @@ pm8001_chip_fw_flash_update_build(struct pm8001_hba_info *pm8001_ha,
cpu_to_le32(lower_32_bits(le64_to_cpu(info->sgl.addr)));
payload.sgl_addr_hi =
cpu_to_le32(upper_32_bits(le64_to_cpu(info->sgl.addr)));
- ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
+ ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
+ sizeof(payload), 0);
return ret;
}
@@ -4960,6 +5005,8 @@ pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha,
if (!fw_control_context)
return -ENOMEM;
fw_control = (struct fw_control_info *)&ioctl_payload->func_specific;
+ PM8001_DEVIO_DBG(pm8001_ha, pm8001_printk(
+ "dma fw_control context input length :%x\n", fw_control->len));
memcpy(buffer, fw_control->buffer, fw_control->len);
flash_update_info.sgl.addr = cpu_to_le64(phys_addr);
flash_update_info.sgl.im_len.len = cpu_to_le32(fw_control->len);
@@ -5083,7 +5130,8 @@ pm8001_chip_set_dev_state_req(struct pm8001_hba_info *pm8001_ha,
payload.tag = cpu_to_le32(tag);
payload.device_id = cpu_to_le32(pm8001_dev->device_id);
payload.nds = cpu_to_le32(state);
- rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
+ rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
+ sizeof(payload), 0);
return rc;
}
@@ -5108,7 +5156,8 @@ pm8001_chip_sas_re_initialization(struct pm8001_hba_info *pm8001_ha)
payload.SSAHOLT = cpu_to_le32(0xd << 25);
payload.sata_hol_tmo = cpu_to_le32(80);
payload.open_reject_cmdretries_data_retries = cpu_to_le32(0xff00ff);
- rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
+ rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
+ sizeof(payload), 0);
if (rc)
pm8001_tag_free(pm8001_ha, tag);
return rc;
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index 3374f553c617..ff618ad80ebd 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -41,6 +41,19 @@
#include <linux/slab.h>
#include "pm8001_sas.h"
#include "pm8001_chips.h"
+#include "pm80xx_hwi.h"
+
+static ulong logging_level = PM8001_FAIL_LOGGING | PM8001_IOERR_LOGGING;
+module_param(logging_level, ulong, 0644);
+MODULE_PARM_DESC(logging_level, " bits for enabling logging info.");
+
+static ulong link_rate = LINKRATE_15 | LINKRATE_30 | LINKRATE_60 | LINKRATE_120;
+module_param(link_rate, ulong, 0644);
+MODULE_PARM_DESC(link_rate, "Enable link rate.\n"
+ " 1: Link rate 1.5G\n"
+ " 2: Link rate 3.0G\n"
+ " 4: Link rate 6.0G\n"
+ " 8: Link rate 12.0G\n");
static struct scsi_transport_template *pm8001_stt;
@@ -401,7 +414,7 @@ static int pm8001_ioremap(struct pm8001_hba_info *pm8001_ha)
pdev = pm8001_ha->pdev;
/* map pci mem (PMC pci base 0-3)*/
- for (bar = 0; bar < 6; bar++) {
+ for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
/*
** logical BARs for SPC:
** bar 0 and 1 - logical BAR0
@@ -432,7 +445,7 @@ static int pm8001_ioremap(struct pm8001_hba_info *pm8001_ha)
} else {
pm8001_ha->io_mem[logicalBar].membase = 0;
pm8001_ha->io_mem[logicalBar].memsize = 0;
- pm8001_ha->io_mem[logicalBar].memvirtaddr = 0;
+ pm8001_ha->io_mem[logicalBar].memvirtaddr = NULL;
}
logicalBar++;
}
@@ -466,7 +479,15 @@ static struct pm8001_hba_info *pm8001_pci_alloc(struct pci_dev *pdev,
pm8001_ha->sas = sha;
pm8001_ha->shost = shost;
pm8001_ha->id = pm8001_id++;
- pm8001_ha->logging_level = 0x01;
+ pm8001_ha->logging_level = logging_level;
+ if (link_rate >= 1 && link_rate <= 15)
+ pm8001_ha->link_rate = (link_rate << 8);
+ else {
+ pm8001_ha->link_rate = LINKRATE_15 | LINKRATE_30 |
+ LINKRATE_60 | LINKRATE_120;
+ PM8001_FAIL_DBG(pm8001_ha, pm8001_printk(
+ "Setting link rate to default value\n"));
+ }
sprintf(pm8001_ha->name, "%s%d", DRV_NAME, pm8001_ha->id);
/* IOMB size is 128 for 8088/89 controllers */
if (pm8001_ha->chip_id != chip_8001)
@@ -873,7 +894,6 @@ static u32 pm8001_setup_msix(struct pm8001_hba_info *pm8001_ha)
u32 number_of_intr;
int flag = 0;
int rc;
- static char intr_drvname[PM8001_MAX_MSIX_VEC][sizeof(DRV_NAME)+3];
/* SPCv controllers supports 64 msi-x */
if (pm8001_ha->chip_id == chip_8001) {
@@ -894,14 +914,16 @@ static u32 pm8001_setup_msix(struct pm8001_hba_info *pm8001_ha)
rc, pm8001_ha->number_of_intr));
for (i = 0; i < number_of_intr; i++) {
- snprintf(intr_drvname[i], sizeof(intr_drvname[0]),
- DRV_NAME"%d", i);
+ snprintf(pm8001_ha->intr_drvname[i],
+ sizeof(pm8001_ha->intr_drvname[0]),
+ "%s-%d", pm8001_ha->name, i);
pm8001_ha->irq_vector[i].irq_id = i;
pm8001_ha->irq_vector[i].drv_inst = pm8001_ha;
rc = request_irq(pci_irq_vector(pm8001_ha->pdev, i),
pm8001_interrupt_handler_msix, flag,
- intr_drvname[i], &(pm8001_ha->irq_vector[i]));
+ pm8001_ha->intr_drvname[i],
+ &(pm8001_ha->irq_vector[i]));
if (rc) {
for (j = 0; j < i; j++) {
free_irq(pci_irq_vector(pm8001_ha->pdev, i),
@@ -942,7 +964,7 @@ intx:
pm8001_ha->irq_vector[0].irq_id = 0;
pm8001_ha->irq_vector[0].drv_inst = pm8001_ha;
rc = request_irq(pdev->irq, pm8001_interrupt_handler_intx, IRQF_SHARED,
- DRV_NAME, SHOST_TO_SAS_HA(pm8001_ha->shost));
+ pm8001_ha->name, SHOST_TO_SAS_HA(pm8001_ha->shost));
return rc;
}
diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
index 7e48154e11c3..b7cbc312843e 100644
--- a/drivers/scsi/pm8001/pm8001_sas.c
+++ b/drivers/scsi/pm8001/pm8001_sas.c
@@ -119,7 +119,7 @@ int pm8001_mem_alloc(struct pci_dev *pdev, void **virt_addr,
mem_virt_alloc = dma_alloc_coherent(&pdev->dev, mem_size + align,
&mem_dma_handle, GFP_KERNEL);
if (!mem_virt_alloc) {
- pm8001_printk("memory allocation error\n");
+ pr_err("pm80xx: memory allocation error\n");
return -1;
}
*pphys_addr = mem_dma_handle;
@@ -249,6 +249,8 @@ int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
return 0;
default:
+ PM8001_DEVIO_DBG(pm8001_ha,
+ pm8001_printk("func 0x%x\n", func));
rc = -EOPNOTSUPP;
}
msleep(300);
@@ -384,8 +386,9 @@ static int pm8001_task_exec(struct sas_task *task,
struct pm8001_port *port = NULL;
struct sas_task *t = task;
struct pm8001_ccb_info *ccb;
- u32 tag = 0xdeadbeef, rc, n_elem = 0;
+ u32 tag = 0xdeadbeef, rc = 0, n_elem = 0;
unsigned long flags = 0;
+ enum sas_protocol task_proto = t->task_proto;
if (!dev->port) {
struct task_status_struct *tsm = &t->task_status;
@@ -410,7 +413,7 @@ static int pm8001_task_exec(struct sas_task *task,
pm8001_dev = dev->lldd_dev;
port = &pm8001_ha->port[sas_find_local_port_id(dev)];
if (DEV_IS_GONE(pm8001_dev) || !port->port_attached) {
- if (sas_protocol_ata(t->task_proto)) {
+ if (sas_protocol_ata(task_proto)) {
struct task_status_struct *ts = &t->task_status;
ts->resp = SAS_TASK_UNDELIVERED;
ts->stat = SAS_PHY_DOWN;
@@ -432,7 +435,7 @@ static int pm8001_task_exec(struct sas_task *task,
goto err_out;
ccb = &pm8001_ha->ccb_info[tag];
- if (!sas_protocol_ata(t->task_proto)) {
+ if (!sas_protocol_ata(task_proto)) {
if (t->num_scatter) {
n_elem = dma_map_sg(pm8001_ha->dev,
t->scatter,
@@ -452,7 +455,7 @@ static int pm8001_task_exec(struct sas_task *task,
ccb->ccb_tag = tag;
ccb->task = t;
ccb->device = pm8001_dev;
- switch (t->task_proto) {
+ switch (task_proto) {
case SAS_PROTOCOL_SMP:
rc = pm8001_task_prep_smp(pm8001_ha, ccb);
break;
@@ -469,8 +472,7 @@ static int pm8001_task_exec(struct sas_task *task,
break;
default:
dev_printk(KERN_ERR, pm8001_ha->dev,
- "unknown sas_task proto: 0x%x\n",
- t->task_proto);
+ "unknown sas_task proto: 0x%x\n", task_proto);
rc = -EINVAL;
break;
}
@@ -493,7 +495,7 @@ err_out_tag:
pm8001_tag_free(pm8001_ha, tag);
err_out:
dev_printk(KERN_ERR, pm8001_ha->dev, "pm8001 exec failed[%d]!\n", rc);
- if (!sas_protocol_ata(t->task_proto))
+ if (!sas_protocol_ata(task_proto))
if (n_elem)
dma_unmap_sg(pm8001_ha->dev, t->scatter, t->num_scatter,
t->data_dir);
@@ -1179,7 +1181,7 @@ int pm8001_query_task(struct sas_task *task)
break;
}
}
- pm8001_printk(":rc= %d\n", rc);
+ pr_err("pm80xx: rc= %d\n", rc);
return rc;
}
@@ -1202,8 +1204,8 @@ int pm8001_abort_task(struct sas_task *task)
pm8001_dev = dev->lldd_dev;
pm8001_ha = pm8001_find_ha_by_dev(dev);
phy_id = pm8001_dev->attached_phy;
- rc = pm8001_find_tag(task, &tag);
- if (rc == 0) {
+ ret = pm8001_find_tag(task, &tag);
+ if (ret == 0) {
pm8001_printk("no tag for task:%p\n", task);
return TMF_RESP_FUNC_FAILED;
}
@@ -1241,26 +1243,50 @@ int pm8001_abort_task(struct sas_task *task)
/* 2. Send Phy Control Hard Reset */
reinit_completion(&completion);
+ phy->port_reset_status = PORT_RESET_TMO;
phy->reset_success = false;
phy->enable_completion = &completion;
phy->reset_completion = &completion_reset;
ret = PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id,
PHY_HARD_RESET);
- if (ret)
- goto out;
- PM8001_MSG_DBG(pm8001_ha,
- pm8001_printk("Waiting for local phy ctl\n"));
- wait_for_completion(&completion);
- if (!phy->reset_success)
+ if (ret) {
+ phy->enable_completion = NULL;
+ phy->reset_completion = NULL;
goto out;
+ }
- /* 3. Wait for Port Reset complete / Port reset TMO */
+ /* In the case of the reset timeout/fail we still
+ * abort the command at the firmware. The assumption
+ * here is that the drive is off doing something so
+ * that it's not processing requests, and we want to
+ * avoid getting a completion for this and either
+ * leaking the task in libsas or losing the race and
+ * getting a double free.
+ */
PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("Waiting for local phy ctl\n"));
+ ret = wait_for_completion_timeout(&completion,
+ PM8001_TASK_TIMEOUT * HZ);
+ if (!ret || !phy->reset_success) {
+ phy->enable_completion = NULL;
+ phy->reset_completion = NULL;
+ } else {
+ /* 3. Wait for Port Reset complete or
+ * Port reset TMO
+ */
+ PM8001_MSG_DBG(pm8001_ha,
pm8001_printk("Waiting for Port reset\n"));
- wait_for_completion(&completion_reset);
- if (phy->port_reset_status) {
- pm8001_dev_gone_notify(dev);
- goto out;
+ ret = wait_for_completion_timeout(
+ &completion_reset,
+ PM8001_TASK_TIMEOUT * HZ);
+ if (!ret)
+ phy->reset_completion = NULL;
+ WARN_ON(phy->port_reset_status ==
+ PORT_RESET_TMO);
+ if (phy->port_reset_status == PORT_RESET_TMO) {
+ pm8001_dev_gone_notify(dev);
+ goto out;
+ }
}
/*
diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h
index ff17c6aff63d..93438c8f67da 100644
--- a/drivers/scsi/pm8001/pm8001_sas.h
+++ b/drivers/scsi/pm8001/pm8001_sas.h
@@ -66,8 +66,11 @@
#define PM8001_EH_LOGGING 0x10 /* libsas EH function logging*/
#define PM8001_IOCTL_LOGGING 0x20 /* IOCTL message logging */
#define PM8001_MSG_LOGGING 0x40 /* misc message logging */
-#define pm8001_printk(format, arg...) printk(KERN_INFO "pm80xx %s %d:" \
- format, __func__, __LINE__, ## arg)
+#define PM8001_DEV_LOGGING 0x80 /* development message logging */
+#define PM8001_DEVIO_LOGGING 0x100 /* development io message logging */
+#define PM8001_IOERR_LOGGING 0x200 /* development io err message logging */
+#define pm8001_printk(format, arg...) pr_info("%s:: %s %d:" \
+ format, pm8001_ha->name, __func__, __LINE__, ## arg)
#define PM8001_CHECK_LOGGING(HBA, LEVEL, CMD) \
do { \
if (unlikely(HBA->logging_level & LEVEL)) \
@@ -97,6 +100,14 @@ do { \
#define PM8001_MSG_DBG(HBA, CMD) \
PM8001_CHECK_LOGGING(HBA, PM8001_MSG_LOGGING, CMD)
+#define PM8001_DEV_DBG(HBA, CMD) \
+ PM8001_CHECK_LOGGING(HBA, PM8001_DEV_LOGGING, CMD)
+
+#define PM8001_DEVIO_DBG(HBA, CMD) \
+ PM8001_CHECK_LOGGING(HBA, PM8001_DEVIO_LOGGING, CMD)
+
+#define PM8001_IOERR_DBG(HBA, CMD) \
+ PM8001_CHECK_LOGGING(HBA, PM8001_IOERR_LOGGING, CMD)
#define PM8001_USE_TASKLET
#define PM8001_USE_MSIX
@@ -141,6 +152,8 @@ struct pm8001_ioctl_payload {
#define MPI_FATAL_EDUMP_TABLE_HANDSHAKE 0x0C /* FDDHSHK */
#define MPI_FATAL_EDUMP_TABLE_STATUS 0x10 /* FDDTSTAT */
#define MPI_FATAL_EDUMP_TABLE_ACCUM_LEN 0x14 /* ACCDDLEN */
+#define MPI_FATAL_EDUMP_TABLE_TOTAL_LEN 0x18 /* TOTALLEN */
+#define MPI_FATAL_EDUMP_TABLE_SIGNATURE 0x1C /* SIGNITURE */
#define MPI_FATAL_EDUMP_HANDSHAKE_RDY 0x1
#define MPI_FATAL_EDUMP_HANDSHAKE_BUSY 0x0
#define MPI_FATAL_EDUMP_TABLE_STAT_RSVD 0x0
@@ -496,6 +509,7 @@ struct pm8001_hba_info {
u32 forensic_last_offset;
u32 fatal_forensic_shift_offset;
u32 forensic_fatal_step;
+ u32 forensic_preserved_accumulated_transfer;
u32 evtlog_ib_offset;
u32 evtlog_ob_offset;
void __iomem *msg_unit_tbl_addr;/*Message Unit Table Addr*/
@@ -530,11 +544,14 @@ struct pm8001_hba_info {
struct pm8001_ccb_info *ccb_info;
#ifdef PM8001_USE_MSIX
int number_of_intr;/*will be used in remove()*/
+ char intr_drvname[PM8001_MAX_MSIX_VEC]
+ [PM8001_NAME_LENGTH+1+3+1];
#endif
#ifdef PM8001_USE_TASKLET
struct tasklet_struct tasklet[PM8001_MAX_MSIX_VEC];
#endif
u32 logging_level;
+ u32 link_rate;
u32 fw_status;
u32 smp_exp_mode;
bool controller_fatal_error;
@@ -663,7 +680,8 @@ int pm8001_mem_alloc(struct pci_dev *pdev, void **virt_addr,
void pm8001_chip_iounmap(struct pm8001_hba_info *pm8001_ha);
int pm8001_mpi_build_cmd(struct pm8001_hba_info *pm8001_ha,
struct inbound_queue_table *circularQ,
- u32 opCode, void *payload, u32 responseQueue);
+ u32 opCode, void *payload, size_t nb,
+ u32 responseQueue);
int pm8001_mpi_msg_free_get(struct inbound_queue_table *circularQ,
u16 messageSize, void **messagePtr);
u32 pm8001_mpi_msg_free_set(struct pm8001_hba_info *pm8001_ha, void *pMsg,
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
index 73261902d75d..19601138e889 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.c
+++ b/drivers/scsi/pm8001/pm80xx_hwi.c
@@ -37,6 +37,7 @@
* POSSIBILITY OF SUCH DAMAGES.
*
*/
+ #include <linux/version.h>
#include <linux/slab.h>
#include "pm8001_sas.h"
#include "pm80xx_hwi.h"
@@ -75,7 +76,7 @@ void pm80xx_pci_mem_copy(struct pm8001_hba_info *pm8001_ha, u32 soffset,
destination1 = (u32 *)destination;
for (index = 0; index < dw_count; index += 4, destination1++) {
- offset = (soffset + index / 4);
+ offset = (soffset + index);
if (offset < (64 * 1024)) {
value = pm8001_cr32(pm8001_ha, bus_base_number, offset);
*destination1 = cpu_to_le32(value);
@@ -92,9 +93,12 @@ ssize_t pm80xx_get_fatal_dump(struct device *cdev,
struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
void __iomem *fatal_table_address = pm8001_ha->fatal_tbl_addr;
u32 accum_len , reg_val, index, *temp;
+ u32 status = 1;
unsigned long start;
u8 *direct_data;
char *fatal_error_data = buf;
+ u32 length_to_read;
+ u32 offset;
pm8001_ha->forensic_info.data_buf.direct_data = buf;
if (pm8001_ha->chip_id == chip_8001) {
@@ -104,16 +108,35 @@ ssize_t pm80xx_get_fatal_dump(struct device *cdev,
return (char *)pm8001_ha->forensic_info.data_buf.direct_data -
(char *)buf;
}
+ /* initialize variables for very first call from host application */
if (pm8001_ha->forensic_info.data_buf.direct_offset == 0) {
PM8001_IO_DBG(pm8001_ha,
pm8001_printk("forensic_info TYPE_NON_FATAL..............\n"));
direct_data = (u8 *)fatal_error_data;
pm8001_ha->forensic_info.data_type = TYPE_NON_FATAL;
pm8001_ha->forensic_info.data_buf.direct_len = SYSFS_OFFSET;
+ pm8001_ha->forensic_info.data_buf.direct_offset = 0;
pm8001_ha->forensic_info.data_buf.read_len = 0;
+ pm8001_ha->forensic_preserved_accumulated_transfer = 0;
- pm8001_ha->forensic_info.data_buf.direct_data = direct_data;
+ /* Write signature to fatal dump table */
+ pm8001_mw32(fatal_table_address,
+ MPI_FATAL_EDUMP_TABLE_SIGNATURE, 0x1234abcd);
+ pm8001_ha->forensic_info.data_buf.direct_data = direct_data;
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("ossaHwCB: status1 %d\n", status));
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("ossaHwCB: read_len 0x%x\n",
+ pm8001_ha->forensic_info.data_buf.read_len));
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("ossaHwCB: direct_len 0x%x\n",
+ pm8001_ha->forensic_info.data_buf.direct_len));
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("ossaHwCB: direct_offset 0x%x\n",
+ pm8001_ha->forensic_info.data_buf.direct_offset));
+ }
+ if (pm8001_ha->forensic_info.data_buf.direct_offset == 0) {
/* start to get data */
/* Program the MEMBASE II Shifting Register with 0x00.*/
pm8001_cw32(pm8001_ha, 0, MEMBASE_II_SHIFT_REGISTER,
@@ -126,30 +149,66 @@ ssize_t pm80xx_get_fatal_dump(struct device *cdev,
/* Read until accum_len is retrived */
accum_len = pm8001_mr32(fatal_table_address,
MPI_FATAL_EDUMP_TABLE_ACCUM_LEN);
- PM8001_IO_DBG(pm8001_ha, pm8001_printk("accum_len 0x%x\n",
- accum_len));
+ /* Determine length of data between previously stored transfer length
+ * and current accumulated transfer length
+ */
+ length_to_read =
+ accum_len - pm8001_ha->forensic_preserved_accumulated_transfer;
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("get_fatal_spcv: accum_len 0x%x\n", accum_len));
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("get_fatal_spcv: length_to_read 0x%x\n",
+ length_to_read));
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("get_fatal_spcv: last_offset 0x%x\n",
+ pm8001_ha->forensic_last_offset));
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("get_fatal_spcv: read_len 0x%x\n",
+ pm8001_ha->forensic_info.data_buf.read_len));
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("get_fatal_spcv:: direct_len 0x%x\n",
+ pm8001_ha->forensic_info.data_buf.direct_len));
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("get_fatal_spcv:: direct_offset 0x%x\n",
+ pm8001_ha->forensic_info.data_buf.direct_offset));
+
+ /* If accumulated length failed to read correctly fail the attempt.*/
if (accum_len == 0xFFFFFFFF) {
PM8001_IO_DBG(pm8001_ha,
pm8001_printk("Possible PCI issue 0x%x not expected\n",
- accum_len));
- return -EIO;
+ accum_len));
+ return status;
}
- if (accum_len == 0 || accum_len >= 0x100000) {
+ /* If accumulated length is zero fail the attempt */
+ if (accum_len == 0) {
pm8001_ha->forensic_info.data_buf.direct_data +=
sprintf(pm8001_ha->forensic_info.data_buf.direct_data,
- "%08x ", 0xFFFFFFFF);
+ "%08x ", 0xFFFFFFFF);
return (char *)pm8001_ha->forensic_info.data_buf.direct_data -
(char *)buf;
}
+ /* Accumulated length is good so start capturing the first data */
temp = (u32 *)pm8001_ha->memoryMap.region[FORENSIC_MEM].virt_ptr;
if (pm8001_ha->forensic_fatal_step == 0) {
moreData:
+ /* If data to read is less than SYSFS_OFFSET then reduce the
+ * length of dataLen
+ */
+ if (pm8001_ha->forensic_last_offset + SYSFS_OFFSET
+ > length_to_read) {
+ pm8001_ha->forensic_info.data_buf.direct_len =
+ length_to_read -
+ pm8001_ha->forensic_last_offset;
+ } else {
+ pm8001_ha->forensic_info.data_buf.direct_len =
+ SYSFS_OFFSET;
+ }
if (pm8001_ha->forensic_info.data_buf.direct_data) {
/* Data is in bar, copy to host memory */
- pm80xx_pci_mem_copy(pm8001_ha, pm8001_ha->fatal_bar_loc,
- pm8001_ha->memoryMap.region[FORENSIC_MEM].virt_ptr,
- pm8001_ha->forensic_info.data_buf.direct_len ,
- 1);
+ pm80xx_pci_mem_copy(pm8001_ha,
+ pm8001_ha->fatal_bar_loc,
+ pm8001_ha->memoryMap.region[FORENSIC_MEM].virt_ptr,
+ pm8001_ha->forensic_info.data_buf.direct_len, 1);
}
pm8001_ha->fatal_bar_loc +=
pm8001_ha->forensic_info.data_buf.direct_len;
@@ -160,21 +219,29 @@ moreData:
pm8001_ha->forensic_info.data_buf.read_len =
pm8001_ha->forensic_info.data_buf.direct_len;
- if (pm8001_ha->forensic_last_offset >= accum_len) {
+ if (pm8001_ha->forensic_last_offset >= length_to_read) {
pm8001_ha->forensic_info.data_buf.direct_data +=
sprintf(pm8001_ha->forensic_info.data_buf.direct_data,
"%08x ", 3);
- for (index = 0; index < (SYSFS_OFFSET / 4); index++) {
+ for (index = 0; index <
+ (pm8001_ha->forensic_info.data_buf.direct_len
+ / 4); index++) {
pm8001_ha->forensic_info.data_buf.direct_data +=
- sprintf(pm8001_ha->
- forensic_info.data_buf.direct_data,
- "%08x ", *(temp + index));
+ sprintf(
+ pm8001_ha->forensic_info.data_buf.direct_data,
+ "%08x ", *(temp + index));
}
pm8001_ha->fatal_bar_loc = 0;
pm8001_ha->forensic_fatal_step = 1;
pm8001_ha->fatal_forensic_shift_offset = 0;
pm8001_ha->forensic_last_offset = 0;
+ status = 0;
+ offset = (int)
+ ((char *)pm8001_ha->forensic_info.data_buf.direct_data
+ - (char *)buf);
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("get_fatal_spcv:return1 0x%x\n", offset));
return (char *)pm8001_ha->
forensic_info.data_buf.direct_data -
(char *)buf;
@@ -184,12 +251,20 @@ moreData:
sprintf(pm8001_ha->
forensic_info.data_buf.direct_data,
"%08x ", 2);
- for (index = 0; index < (SYSFS_OFFSET / 4); index++) {
- pm8001_ha->forensic_info.data_buf.direct_data +=
- sprintf(pm8001_ha->
+ for (index = 0; index <
+ (pm8001_ha->forensic_info.data_buf.direct_len
+ / 4); index++) {
+ pm8001_ha->forensic_info.data_buf.direct_data
+ += sprintf(pm8001_ha->
forensic_info.data_buf.direct_data,
"%08x ", *(temp + index));
}
+ status = 0;
+ offset = (int)
+ ((char *)pm8001_ha->forensic_info.data_buf.direct_data
+ - (char *)buf);
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("get_fatal_spcv:return2 0x%x\n", offset));
return (char *)pm8001_ha->
forensic_info.data_buf.direct_data -
(char *)buf;
@@ -199,63 +274,122 @@ moreData:
pm8001_ha->forensic_info.data_buf.direct_data +=
sprintf(pm8001_ha->forensic_info.data_buf.direct_data,
"%08x ", 2);
- for (index = 0; index < 256; index++) {
+ for (index = 0; index <
+ (pm8001_ha->forensic_info.data_buf.direct_len
+ / 4) ; index++) {
pm8001_ha->forensic_info.data_buf.direct_data +=
sprintf(pm8001_ha->
- forensic_info.data_buf.direct_data,
- "%08x ", *(temp + index));
+ forensic_info.data_buf.direct_data,
+ "%08x ", *(temp + index));
}
pm8001_ha->fatal_forensic_shift_offset += 0x100;
pm8001_cw32(pm8001_ha, 0, MEMBASE_II_SHIFT_REGISTER,
pm8001_ha->fatal_forensic_shift_offset);
pm8001_ha->fatal_bar_loc = 0;
+ status = 0;
+ offset = (int)
+ ((char *)pm8001_ha->forensic_info.data_buf.direct_data
+ - (char *)buf);
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("get_fatal_spcv: return3 0x%x\n", offset));
return (char *)pm8001_ha->forensic_info.data_buf.direct_data -
(char *)buf;
}
if (pm8001_ha->forensic_fatal_step == 1) {
- pm8001_ha->fatal_forensic_shift_offset = 0;
- /* Read 64K of the debug data. */
- pm8001_cw32(pm8001_ha, 0, MEMBASE_II_SHIFT_REGISTER,
- pm8001_ha->fatal_forensic_shift_offset);
- pm8001_mw32(fatal_table_address,
- MPI_FATAL_EDUMP_TABLE_HANDSHAKE,
+ /* store previous accumulated length before triggering next
+ * accumulated length update
+ */
+ pm8001_ha->forensic_preserved_accumulated_transfer =
+ pm8001_mr32(fatal_table_address,
+ MPI_FATAL_EDUMP_TABLE_ACCUM_LEN);
+
+ /* continue capturing the fatal log until Dump status is 0x3 */
+ if (pm8001_mr32(fatal_table_address,
+ MPI_FATAL_EDUMP_TABLE_STATUS) <
+ MPI_FATAL_EDUMP_TABLE_STAT_NF_SUCCESS_DONE) {
+
+ /* reset fddstat bit by writing to zero*/
+ pm8001_mw32(fatal_table_address,
+ MPI_FATAL_EDUMP_TABLE_STATUS, 0x0);
+
+ /* set dump control value to '1' so that new data will
+ * be transferred to shared memory
+ */
+ pm8001_mw32(fatal_table_address,
+ MPI_FATAL_EDUMP_TABLE_HANDSHAKE,
MPI_FATAL_EDUMP_HANDSHAKE_RDY);
- /* Poll FDDHSHK until clear */
- start = jiffies + (2 * HZ); /* 2 sec */
+ /*Poll FDDHSHK until clear */
+ start = jiffies + (2 * HZ); /* 2 sec */
- do {
- reg_val = pm8001_mr32(fatal_table_address,
+ do {
+ reg_val = pm8001_mr32(fatal_table_address,
MPI_FATAL_EDUMP_TABLE_HANDSHAKE);
- } while ((reg_val) && time_before(jiffies, start));
+ } while ((reg_val) && time_before(jiffies, start));
- if (reg_val != 0) {
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("TIMEOUT:MEMBASE_II_SHIFT_REGISTER"
- " = 0x%x\n", reg_val));
- return -EIO;
- }
-
- /* Read the next 64K of the debug data. */
- pm8001_ha->forensic_fatal_step = 0;
- if (pm8001_mr32(fatal_table_address,
- MPI_FATAL_EDUMP_TABLE_STATUS) !=
- MPI_FATAL_EDUMP_TABLE_STAT_NF_SUCCESS_DONE) {
- pm8001_mw32(fatal_table_address,
- MPI_FATAL_EDUMP_TABLE_HANDSHAKE, 0);
- goto moreData;
- } else {
- pm8001_ha->forensic_info.data_buf.direct_data +=
- sprintf(pm8001_ha->
- forensic_info.data_buf.direct_data,
- "%08x ", 4);
- pm8001_ha->forensic_info.data_buf.read_len = 0xFFFFFFFF;
- pm8001_ha->forensic_info.data_buf.direct_len = 0;
- pm8001_ha->forensic_info.data_buf.direct_offset = 0;
- pm8001_ha->forensic_info.data_buf.read_len = 0;
+ if (reg_val != 0) {
+ PM8001_FAIL_DBG(pm8001_ha, pm8001_printk(
+ "TIMEOUT:MPI_FATAL_EDUMP_TABLE_HDSHAKE 0x%x\n",
+ reg_val));
+ /* Fail the dump if a timeout occurs */
+ pm8001_ha->forensic_info.data_buf.direct_data +=
+ sprintf(
+ pm8001_ha->forensic_info.data_buf.direct_data,
+ "%08x ", 0xFFFFFFFF);
+ return((char *)
+ pm8001_ha->forensic_info.data_buf.direct_data
+ - (char *)buf);
+ }
+ /* Poll status register until set to 2 or
+ * 3 for up to 2 seconds
+ */
+ start = jiffies + (2 * HZ); /* 2 sec */
+
+ do {
+ reg_val = pm8001_mr32(fatal_table_address,
+ MPI_FATAL_EDUMP_TABLE_STATUS);
+ } while (((reg_val != 2) || (reg_val != 3)) &&
+ time_before(jiffies, start));
+
+ if (reg_val < 2) {
+ PM8001_FAIL_DBG(pm8001_ha, pm8001_printk(
+ "TIMEOUT:MPI_FATAL_EDUMP_TABLE_STATUS = 0x%x\n",
+ reg_val));
+ /* Fail the dump if a timeout occurs */
+ pm8001_ha->forensic_info.data_buf.direct_data +=
+ sprintf(
+ pm8001_ha->forensic_info.data_buf.direct_data,
+ "%08x ", 0xFFFFFFFF);
+ pm8001_cw32(pm8001_ha, 0,
+ MEMBASE_II_SHIFT_REGISTER,
+ pm8001_ha->fatal_forensic_shift_offset);
+ }
+ /* Read the next block of the debug data.*/
+ length_to_read = pm8001_mr32(fatal_table_address,
+ MPI_FATAL_EDUMP_TABLE_ACCUM_LEN) -
+ pm8001_ha->forensic_preserved_accumulated_transfer;
+ if (length_to_read != 0x0) {
+ pm8001_ha->forensic_fatal_step = 0;
+ goto moreData;
+ } else {
+ pm8001_ha->forensic_info.data_buf.direct_data +=
+ sprintf(
+ pm8001_ha->forensic_info.data_buf.direct_data,
+ "%08x ", 4);
+ pm8001_ha->forensic_info.data_buf.read_len
+ = 0xFFFFFFFF;
+ pm8001_ha->forensic_info.data_buf.direct_len
+ = 0;
+ pm8001_ha->forensic_info.data_buf.direct_offset
+ = 0;
+ pm8001_ha->forensic_info.data_buf.read_len = 0;
+ }
}
}
-
+ offset = (int)((char *)pm8001_ha->forensic_info.data_buf.direct_data
+ - (char *)buf);
+ PM8001_IO_DBG(pm8001_ha,
+ pm8001_printk("get_fatal_spcv: return4 0x%x\n", offset));
return (char *)pm8001_ha->forensic_info.data_buf.direct_data -
(char *)buf;
}
@@ -317,6 +451,25 @@ static void read_main_config_table(struct pm8001_hba_info *pm8001_ha)
pm8001_mr32(address, MAIN_MPI_ILA_RELEASE_TYPE);
pm8001_ha->main_cfg_tbl.pm80xx_tbl.inc_fw_version =
pm8001_mr32(address, MAIN_MPI_INACTIVE_FW_VERSION);
+
+ PM8001_DEV_DBG(pm8001_ha, pm8001_printk(
+ "Main cfg table: sign:%x interface rev:%x fw_rev:%x\n",
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.signature,
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.interface_rev,
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.firmware_rev));
+
+ PM8001_DEV_DBG(pm8001_ha, pm8001_printk(
+ "table offset: gst:%x iq:%x oq:%x int vec:%x phy attr:%x\n",
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.gst_offset,
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.inbound_queue_offset,
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.outbound_queue_offset,
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.int_vec_table_offset,
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.phy_attr_table_offset));
+
+ PM8001_DEV_DBG(pm8001_ha, pm8001_printk(
+ "Main cfg table; ila rev:%x Inactive fw rev:%x\n",
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.ila_version,
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.inc_fw_version));
}
/**
@@ -521,6 +674,11 @@ static void init_default_table_values(struct pm8001_hba_info *pm8001_ha)
pm8001_mr32(addressib, (offsetib + 0x18));
pm8001_ha->inbnd_q_tbl[i].producer_idx = 0;
pm8001_ha->inbnd_q_tbl[i].consumer_index = 0;
+
+ PM8001_DEV_DBG(pm8001_ha, pm8001_printk(
+ "IQ %d pi_bar 0x%x pi_offset 0x%x\n", i,
+ pm8001_ha->inbnd_q_tbl[i].pi_pci_bar,
+ pm8001_ha->inbnd_q_tbl[i].pi_offset));
}
for (i = 0; i < PM8001_MAX_SPCV_OUTB_NUM; i++) {
pm8001_ha->outbnd_q_tbl[i].element_size_cnt =
@@ -549,6 +707,11 @@ static void init_default_table_values(struct pm8001_hba_info *pm8001_ha)
pm8001_mr32(addressob, (offsetob + 0x18));
pm8001_ha->outbnd_q_tbl[i].consumer_idx = 0;
pm8001_ha->outbnd_q_tbl[i].producer_index = 0;
+
+ PM8001_DEV_DBG(pm8001_ha, pm8001_printk(
+ "OQ %d ci_bar 0x%x ci_offset 0x%x\n", i,
+ pm8001_ha->outbnd_q_tbl[i].ci_pci_bar,
+ pm8001_ha->outbnd_q_tbl[i].ci_offset));
}
}
@@ -582,6 +745,10 @@ static void update_main_config_table(struct pm8001_hba_info *pm8001_ha)
((pm8001_ha->number_of_intr - 1) << 8);
pm8001_mw32(address, MAIN_FATAL_ERROR_INTERRUPT,
pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_interrupt);
+ PM8001_DEV_DBG(pm8001_ha, pm8001_printk(
+ "Updated Fatal error interrupt vector 0x%x\n",
+ pm8001_mr32(address, MAIN_FATAL_ERROR_INTERRUPT)));
+
pm8001_mw32(address, MAIN_EVENT_CRC_CHECK,
pm8001_ha->main_cfg_tbl.pm80xx_tbl.crc_core_dump);
@@ -591,6 +758,9 @@ static void update_main_config_table(struct pm8001_hba_info *pm8001_ha)
pm8001_ha->main_cfg_tbl.pm80xx_tbl.gpio_led_mapping |= 0x20000000;
pm8001_mw32(address, MAIN_GPIO_LED_FLAGS_OFFSET,
pm8001_ha->main_cfg_tbl.pm80xx_tbl.gpio_led_mapping);
+ PM8001_DEV_DBG(pm8001_ha, pm8001_printk(
+ "Programming DW 0x21 in main cfg table with 0x%x\n",
+ pm8001_mr32(address, MAIN_GPIO_LED_FLAGS_OFFSET)));
pm8001_mw32(address, MAIN_PORT_RECOVERY_TIMER,
pm8001_ha->main_cfg_tbl.pm80xx_tbl.port_recovery_timer);
@@ -629,6 +799,21 @@ static void update_inbnd_queue_table(struct pm8001_hba_info *pm8001_ha,
pm8001_ha->inbnd_q_tbl[number].ci_upper_base_addr);
pm8001_mw32(address, offset + IB_CI_BASE_ADDR_LO_OFFSET,
pm8001_ha->inbnd_q_tbl[number].ci_lower_base_addr);
+
+ PM8001_DEV_DBG(pm8001_ha, pm8001_printk(
+ "IQ %d: Element pri size 0x%x\n",
+ number,
+ pm8001_ha->inbnd_q_tbl[number].element_pri_size_cnt));
+
+ PM8001_DEV_DBG(pm8001_ha, pm8001_printk(
+ "IQ upr base addr 0x%x IQ lwr base addr 0x%x\n",
+ pm8001_ha->inbnd_q_tbl[number].upper_base_addr,
+ pm8001_ha->inbnd_q_tbl[number].lower_base_addr));
+
+ PM8001_DEV_DBG(pm8001_ha, pm8001_printk(
+ "CI upper base addr 0x%x CI lower base addr 0x%x\n",
+ pm8001_ha->inbnd_q_tbl[number].ci_upper_base_addr,
+ pm8001_ha->inbnd_q_tbl[number].ci_lower_base_addr));
}
/**
@@ -652,6 +837,21 @@ static void update_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha,
pm8001_ha->outbnd_q_tbl[number].pi_lower_base_addr);
pm8001_mw32(address, offset + OB_INTERRUPT_COALES_OFFSET,
pm8001_ha->outbnd_q_tbl[number].interrup_vec_cnt_delay);
+
+ PM8001_DEV_DBG(pm8001_ha, pm8001_printk(
+ "OQ %d: Element pri size 0x%x\n",
+ number,
+ pm8001_ha->outbnd_q_tbl[number].element_size_cnt));
+
+ PM8001_DEV_DBG(pm8001_ha, pm8001_printk(
+ "OQ upr base addr 0x%x OQ lwr base addr 0x%x\n",
+ pm8001_ha->outbnd_q_tbl[number].upper_base_addr,
+ pm8001_ha->outbnd_q_tbl[number].lower_base_addr));
+
+ PM8001_DEV_DBG(pm8001_ha, pm8001_printk(
+ "PI upper base addr 0x%x PI lower base addr 0x%x\n",
+ pm8001_ha->outbnd_q_tbl[number].pi_upper_base_addr,
+ pm8001_ha->outbnd_q_tbl[number].pi_lower_base_addr));
}
/**
@@ -669,9 +869,9 @@ static int mpi_init_check(struct pm8001_hba_info *pm8001_ha)
pm8001_cw32(pm8001_ha, 0, MSGU_IBDB_SET, SPCv_MSGU_CFG_TABLE_UPDATE);
/* wait until Inbound DoorBell Clear Register toggled */
if (IS_SPCV_12G(pm8001_ha->pdev)) {
- max_wait_count = 4 * 1000 * 1000;/* 4 sec */
+ max_wait_count = SPCV_DOORBELL_CLEAR_TIMEOUT;
} else {
- max_wait_count = 2 * 1000 * 1000;/* 2 sec */
+ max_wait_count = SPC_DOORBELL_CLEAR_TIMEOUT;
}
do {
udelay(1);
@@ -797,7 +997,7 @@ static void init_pci_device_addresses(struct pm8001_hba_info *pm8001_ha)
value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_0);
offset = value & 0x03FFFFFF; /* scratch pad 0 TBL address */
- PM8001_INIT_DBG(pm8001_ha,
+ PM8001_DEV_DBG(pm8001_ha,
pm8001_printk("Scratchpad 0 Offset: 0x%x value 0x%x\n",
offset, value));
pcilogic = (value & 0xFC000000) >> 26;
@@ -885,7 +1085,12 @@ pm80xx_set_thermal_config(struct pm8001_hba_info *pm8001_ha)
(THERMAL_ENABLE << 8) | page_code;
payload.cfg_pg[1] = (LTEMPHIL << 24) | (RTEMPHIL << 8);
- rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
+ PM8001_DEV_DBG(pm8001_ha, pm8001_printk(
+ "Setting up thermal config. cfg_pg 0 0x%x cfg_pg 1 0x%x\n",
+ payload.cfg_pg[0], payload.cfg_pg[1]));
+
+ rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
+ sizeof(payload), 0);
if (rc)
pm8001_tag_free(pm8001_ha, tag);
return rc;
@@ -967,7 +1172,8 @@ pm80xx_set_sas_protocol_timer_config(struct pm8001_hba_info *pm8001_ha)
memcpy(&payload.cfg_pg, &SASConfigPage,
sizeof(SASProtocolTimerConfig_t));
- rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
+ rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
+ sizeof(payload), 0);
if (rc)
pm8001_tag_free(pm8001_ha, tag);
@@ -1090,7 +1296,12 @@ static int pm80xx_encrypt_update(struct pm8001_hba_info *pm8001_ha)
payload.new_curidx_ksop = ((1 << 24) | (1 << 16) | (1 << 8) |
KEK_MGMT_SUBOP_KEYCARDUPDATE);
- rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
+ PM8001_DEV_DBG(pm8001_ha, pm8001_printk(
+ "Saving Encryption info to flash. payload 0x%x\n",
+ payload.new_curidx_ksop));
+
+ rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
+ sizeof(payload), 0);
if (rc)
pm8001_tag_free(pm8001_ha, tag);
@@ -1241,7 +1452,7 @@ pm80xx_chip_soft_rst(struct pm8001_hba_info *pm8001_ha)
pm8001_printk("reset register before write : 0x%x\n", regval));
pm8001_cw32(pm8001_ha, 0, SPC_REG_SOFT_RESET, SPCv_NORMAL_RESET_VALUE);
- mdelay(500);
+ msleep(500);
regval = pm8001_cr32(pm8001_ha, 0, SPC_REG_SOFT_RESET);
PM8001_INIT_DBG(pm8001_ha,
@@ -1443,7 +1654,10 @@ static void pm80xx_send_abort_all(struct pm8001_hba_info *pm8001_ha,
task_abort.device_id = cpu_to_le32(pm8001_ha_dev->device_id);
task_abort.tag = cpu_to_le32(ccb_tag);
- ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort, 0);
+ ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort,
+ sizeof(task_abort), 0);
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("Executing abort task end\n"));
if (ret) {
sas_free_task(task);
pm8001_tag_free(pm8001_ha, ccb_tag);
@@ -1519,7 +1733,9 @@ static void pm80xx_send_read_log(struct pm8001_hba_info *pm8001_ha,
sata_cmd.ncqtag_atap_dir_m_dad |= ((0x1 << 7) | (0x5 << 9));
memcpy(&sata_cmd.sata_fis, &fis, sizeof(struct host_to_dev_fis));
- res = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd, 0);
+ res = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd,
+ sizeof(sata_cmd), 0);
+ PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("Executing read log end\n"));
if (res) {
sas_free_task(task);
pm8001_tag_free(pm8001_ha, ccb_tag);
@@ -1570,6 +1786,10 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb)
if (unlikely(!t || !t->lldd_task || !t->dev))
return;
ts = &t->task_status;
+
+ PM8001_DEV_DBG(pm8001_ha, pm8001_printk(
+ "tag::0x%x, status::0x%x task::0x%p\n", tag, status, t));
+
/* Print sas address of IO failed device */
if ((status != IO_SUCCESS) && (status != IO_OVERFLOW) &&
(status != IO_UNDERFLOW))
@@ -1772,7 +1992,7 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb)
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
break;
default:
- PM8001_IO_DBG(pm8001_ha,
+ PM8001_DEVIO_DBG(pm8001_ha,
pm8001_printk("Unknown status 0x%x\n", status));
/* not allowed case. Therefore, return failed status */
ts->resp = SAS_TASK_COMPLETE;
@@ -1826,7 +2046,7 @@ static void mpi_ssp_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
if (unlikely(!t || !t->lldd_task || !t->dev))
return;
ts = &t->task_status;
- PM8001_IO_DBG(pm8001_ha,
+ PM8001_IOERR_DBG(pm8001_ha,
pm8001_printk("port_id:0x%x, tag:0x%x, event:0x%x\n",
port_id, tag, event));
switch (event) {
@@ -1963,7 +2183,7 @@ static void mpi_ssp_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
ts->stat = SAS_DATA_OVERRUN;
break;
case IO_XFER_ERROR_INTERNAL_CRC_ERROR:
- PM8001_IO_DBG(pm8001_ha,
+ PM8001_IOERR_DBG(pm8001_ha,
pm8001_printk("IO_XFR_ERROR_INTERNAL_CRC_ERROR\n"));
/* TBC: used default set values */
ts->resp = SAS_TASK_COMPLETE;
@@ -1974,7 +2194,7 @@ static void mpi_ssp_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
pm8001_printk("IO_XFER_CMD_FRAME_ISSUED\n"));
return;
default:
- PM8001_IO_DBG(pm8001_ha,
+ PM8001_DEVIO_DBG(pm8001_ha,
pm8001_printk("Unknown status 0x%x\n", event));
/* not allowed case. Therefore, return failed status */
ts->resp = SAS_TASK_COMPLETE;
@@ -2062,6 +2282,12 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
pm8001_printk("ts null\n"));
return;
}
+
+ if (unlikely(status))
+ PM8001_IOERR_DBG(pm8001_ha, pm8001_printk(
+ "status:0x%x, tag:0x%x, task::0x%p\n",
+ status, tag, t));
+
/* Print sas address of IO failed device */
if ((status != IO_SUCCESS) && (status != IO_OVERFLOW) &&
(status != IO_UNDERFLOW)) {
@@ -2365,7 +2591,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
break;
default:
- PM8001_IO_DBG(pm8001_ha,
+ PM8001_DEVIO_DBG(pm8001_ha,
pm8001_printk("Unknown status 0x%x\n", status));
/* not allowed case. Therefore, return failed status */
ts->resp = SAS_TASK_COMPLETE;
@@ -2382,6 +2608,8 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
pm8001_printk("task 0x%p done with io_status 0x%x"
" resp 0x%x stat 0x%x but aborted by upper layer!\n",
t, status, ts->resp, ts->stat));
+ if (t->slow_task)
+ complete(&t->slow_task->completion);
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
} else {
spin_unlock_irqrestore(&t->task_state_lock, flags);
@@ -2435,7 +2663,7 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
}
ts = &t->task_status;
- PM8001_IO_DBG(pm8001_ha,
+ PM8001_IOERR_DBG(pm8001_ha,
pm8001_printk("port_id:0x%x, tag:0x%x, event:0x%x\n",
port_id, tag, event));
switch (event) {
@@ -2655,6 +2883,9 @@ mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
if (unlikely(!t || !t->lldd_task || !t->dev))
return;
+ PM8001_DEV_DBG(pm8001_ha,
+ pm8001_printk("tag::0x%x status::0x%x\n", tag, status));
+
switch (status) {
case IO_SUCCESS:
@@ -2822,7 +3053,7 @@ mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
break;
default:
- PM8001_IO_DBG(pm8001_ha,
+ PM8001_DEVIO_DBG(pm8001_ha,
pm8001_printk("Unknown status 0x%x\n", status));
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DEV_NO_RESPONSE;
@@ -2873,7 +3104,8 @@ static void pm80xx_hw_event_ack_req(struct pm8001_hba_info *pm8001_ha,
((phyId & 0xFF) << 24) | (port_id & 0xFF));
payload.param0 = cpu_to_le32(param0);
payload.param1 = cpu_to_le32(param1);
- pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
+ pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
+ sizeof(payload), 0);
}
static int pm80xx_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha,
@@ -2964,7 +3196,7 @@ hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
pm8001_get_lrate_mode(phy, link_rate);
break;
default:
- PM8001_MSG_DBG(pm8001_ha,
+ PM8001_DEVIO_DBG(pm8001_ha,
pm8001_printk("unknown device type(%x)\n", deviceType));
break;
}
@@ -2984,7 +3216,7 @@ hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
pm8001_get_attached_sas_addr(phy, phy->sas_phy.attached_sas_addr);
spin_unlock_irqrestore(&phy->sas_phy.frame_rcvd_lock, flags);
if (pm8001_ha->flags == PM8001F_RUN_TIME)
- mdelay(200);/*delay a moment to wait disk to spinup*/
+ msleep(200);/*delay a moment to wait disk to spinup*/
pm8001_bytes_dmaed(pm8001_ha, phy_id);
}
@@ -3013,7 +3245,7 @@ hw_event_sata_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
struct sas_ha_struct *sas_ha = pm8001_ha->sas;
struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
unsigned long flags;
- PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+ PM8001_DEVIO_DBG(pm8001_ha, pm8001_printk(
"port id %d, phy id %d link_rate %d portstate 0x%x\n",
port_id, phy_id, link_rate, portstate));
@@ -3101,7 +3333,7 @@ hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb)
break;
default:
port->port_attached = 0;
- PM8001_MSG_DBG(pm8001_ha,
+ PM8001_DEVIO_DBG(pm8001_ha,
pm8001_printk(" Phy Down and(default) = 0x%x\n",
portstate));
break;
@@ -3130,8 +3362,10 @@ static int mpi_phy_start_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
if (status == 0) {
phy->phy_state = PHY_LINK_DOWN;
if (pm8001_ha->flags == PM8001F_RUN_TIME &&
- phy->enable_completion != NULL)
+ phy->enable_completion != NULL) {
complete(phy->enable_completion);
+ phy->enable_completion = NULL;
+ }
}
return 0;
@@ -3191,7 +3425,7 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
struct pm8001_port *port = &pm8001_ha->port[port_id];
struct asd_sas_phy *sas_phy = sas_ha->sas_phy[phy_id];
- PM8001_MSG_DBG(pm8001_ha,
+ PM8001_DEV_DBG(pm8001_ha,
pm8001_printk("portid:%d phyid:%d event:0x%x status:0x%x\n",
port_id, phy_id, eventType, status));
@@ -3376,7 +3610,7 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
pm8001_printk("EVENT_BROADCAST_ASYNCH_EVENT\n"));
break;
default:
- PM8001_MSG_DBG(pm8001_ha,
+ PM8001_DEVIO_DBG(pm8001_ha,
pm8001_printk("Unknown event type 0x%x\n", eventType));
break;
}
@@ -3758,7 +3992,7 @@ static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb)
ssp_coalesced_comp_resp(pm8001_ha, piomb);
break;
default:
- PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+ PM8001_DEVIO_DBG(pm8001_ha, pm8001_printk(
"Unknown outbound Queue IOMB OPC = 0x%x\n", opc));
break;
}
@@ -3991,8 +4225,8 @@ static int pm80xx_chip_smp_req(struct pm8001_hba_info *pm8001_ha,
build_smp_cmd(pm8001_dev->device_id, smp_cmd.tag,
&smp_cmd, pm8001_ha->smp_exp_mode, length);
- rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc,
- (u32 *)&smp_cmd, 0);
+ rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &smp_cmd,
+ sizeof(smp_cmd), 0);
if (rc)
goto err_out_2;
return 0;
@@ -4200,7 +4434,7 @@ static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
}
q_index = (u32) (pm8001_dev->id & 0x00ffffff) % PM8001_MAX_OUTB_NUM;
ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc,
- &ssp_cmd, q_index);
+ &ssp_cmd, sizeof(ssp_cmd), q_index);
return ret;
}
@@ -4441,7 +4675,7 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
}
q_index = (u32) (pm8001_ha_dev->id & 0x00ffffff) % PM8001_MAX_OUTB_NUM;
ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc,
- &sata_cmd, q_index);
+ &sata_cmd, sizeof(sata_cmd), q_index);
return ret;
}
@@ -4465,23 +4699,9 @@ pm80xx_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id)
PM8001_INIT_DBG(pm8001_ha,
pm8001_printk("PHY START REQ for phy_id %d\n", phy_id));
- /*
- ** [0:7] PHY Identifier
- ** [8:11] link rate 1.5G, 3G, 6G
- ** [12:13] link mode 01b SAS mode; 10b SATA mode; 11b Auto mode
- ** [14] 0b disable spin up hold; 1b enable spin up hold
- ** [15] ob no change in current PHY analig setup 1b enable using SPAST
- */
- if (!IS_SPCV_12G(pm8001_ha->pdev))
- payload.ase_sh_lm_slr_phyid = cpu_to_le32(SPINHOLD_DISABLE |
- LINKMODE_AUTO | LINKRATE_15 |
- LINKRATE_30 | LINKRATE_60 | phy_id);
- else
- payload.ase_sh_lm_slr_phyid = cpu_to_le32(SPINHOLD_DISABLE |
- LINKMODE_AUTO | LINKRATE_15 |
- LINKRATE_30 | LINKRATE_60 | LINKRATE_120 |
- phy_id);
+ payload.ase_sh_lm_slr_phyid = cpu_to_le32(SPINHOLD_DISABLE |
+ LINKMODE_AUTO | pm8001_ha->link_rate | phy_id);
/* SSC Disable and SAS Analog ST configuration */
/**
payload.ase_sh_lm_slr_phyid =
@@ -4494,9 +4714,10 @@ pm80xx_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id)
payload.sas_identify.dev_type = SAS_END_DEVICE;
payload.sas_identify.initiator_bits = SAS_PROTOCOL_ALL;
memcpy(payload.sas_identify.sas_addr,
- &pm8001_ha->phy[phy_id].dev_sas_addr, SAS_ADDR_SIZE);
+ &pm8001_ha->sas_addr, SAS_ADDR_SIZE);
payload.sas_identify.phy_id = phy_id;
- ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload, 0);
+ ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload,
+ sizeof(payload), 0);
return ret;
}
@@ -4518,7 +4739,8 @@ static int pm80xx_chip_phy_stop_req(struct pm8001_hba_info *pm8001_ha,
memset(&payload, 0, sizeof(payload));
payload.tag = cpu_to_le32(tag);
payload.phy_id = cpu_to_le32(phy_id);
- ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload, 0);
+ ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload,
+ sizeof(payload), 0);
return ret;
}
@@ -4584,7 +4806,8 @@ static int pm80xx_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha,
memcpy(payload.sas_addr, pm8001_dev->sas_device->sas_addr,
SAS_ADDR_SIZE);
- rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
+ rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
+ sizeof(payload), 0);
if (rc)
pm8001_tag_free(pm8001_ha, tag);
@@ -4614,7 +4837,8 @@ static int pm80xx_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha,
payload.tag = cpu_to_le32(tag);
payload.phyop_phyid =
cpu_to_le32(((phy_op & 0xFF) << 8) | (phyId & 0xFF));
- return pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
+ return pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
+ sizeof(payload), 0);
}
static u32 pm80xx_chip_is_our_interrupt(struct pm8001_hba_info *pm8001_ha)
@@ -4641,6 +4865,9 @@ static irqreturn_t
pm80xx_chip_isr(struct pm8001_hba_info *pm8001_ha, u8 vec)
{
pm80xx_chip_interrupt_disable(pm8001_ha, vec);
+ PM8001_DEVIO_DBG(pm8001_ha, pm8001_printk(
+ "irq vec %d, ODMR:0x%x\n",
+ vec, pm8001_cr32(pm8001_ha, 0, 0x30)));
process_oq(pm8001_ha, vec);
pm80xx_chip_interrupt_enable(pm8001_ha, vec);
return IRQ_HANDLED;
@@ -4669,7 +4896,8 @@ void mpi_set_phy_profile_req(struct pm8001_hba_info *pm8001_ha,
payload.reserved[j] = cpu_to_le32(*((u32 *)buf + i));
j++;
}
- rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
+ rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
+ sizeof(payload), 0);
if (rc)
pm8001_tag_free(pm8001_ha, tag);
}
@@ -4711,7 +4939,8 @@ void pm8001_set_phy_profile_single(struct pm8001_hba_info *pm8001_ha,
for (i = 0; i < length; i++)
payload.reserved[i] = cpu_to_le32(*(buf + i));
- rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
+ rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
+ sizeof(payload), 0);
if (rc)
pm8001_tag_free(pm8001_ha, tag);
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.h b/drivers/scsi/pm8001/pm80xx_hwi.h
index dc9ab7689060..701951a0f715 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.h
+++ b/drivers/scsi/pm8001/pm80xx_hwi.h
@@ -220,6 +220,9 @@
#define SAS_DOPNRJT_RTRY_TMO 128
#define SAS_COPNRJT_RTRY_TMO 128
+#define SPCV_DOORBELL_CLEAR_TIMEOUT (30 * 1000 * 1000) /* 30 sec */
+#define SPC_DOORBELL_CLEAR_TIMEOUT (15 * 1000 * 1000) /* 15 sec */
+
/*
Making ORR bigger than IT NEXUS LOSS which is 2000000us = 2 second.
Assuming a bigger value 3 second, 3000000/128 = 23437.5 where 128
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index 398d2af60832..7eb88fe1eb0b 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -3973,9 +3973,7 @@ static const struct file_operations pmcraid_fops = {
.open = pmcraid_chr_open,
.fasync = pmcraid_chr_fasync,
.unlocked_ioctl = pmcraid_chr_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = pmcraid_chr_ioctl,
-#endif
+ .compat_ioctl = compat_ptr_ioctl,
.llseek = noop_llseek,
};
diff --git a/drivers/scsi/qedf/qedf_dbg.h b/drivers/scsi/qedf/qedf_dbg.h
index d979f095aeda..2386bfb73c46 100644
--- a/drivers/scsi/qedf/qedf_dbg.h
+++ b/drivers/scsi/qedf/qedf_dbg.h
@@ -42,7 +42,7 @@ extern uint qedf_debug;
#define QEDF_LOG_LPORT 0x4000 /* lport logs */
#define QEDF_LOG_ELS 0x8000 /* ELS logs */
#define QEDF_LOG_NPIV 0x10000 /* NPIV logs */
-#define QEDF_LOG_SESS 0x20000 /* Conection setup, cleanup */
+#define QEDF_LOG_SESS 0x20000 /* Connection setup, cleanup */
#define QEDF_LOG_TID 0x80000 /*
* FW TID context acquire
* free
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index 59ca98f12afd..604856e72cfb 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -1926,6 +1926,13 @@ static int qedf_fcoe_reset(struct Scsi_Host *shost)
return 0;
}
+static void qedf_get_host_port_id(struct Scsi_Host *shost)
+{
+ struct fc_lport *lport = shost_priv(shost);
+
+ fc_host_port_id(shost) = lport->port_id;
+}
+
static struct fc_host_statistics *qedf_fc_get_host_stats(struct Scsi_Host
*shost)
{
@@ -1996,6 +2003,7 @@ static struct fc_function_template qedf_fc_transport_fn = {
.show_host_active_fc4s = 1,
.show_host_maxframe_size = 1,
+ .get_host_port_id = qedf_get_host_port_id,
.show_host_port_id = 1,
.show_host_supported_speeds = 1,
.get_host_speed = fc_get_host_speed,
diff --git a/drivers/scsi/qedi/qedi_dbg.h b/drivers/scsi/qedi/qedi_dbg.h
index 243acc8b520a..37d084086fd4 100644
--- a/drivers/scsi/qedi/qedi_dbg.h
+++ b/drivers/scsi/qedi/qedi_dbg.h
@@ -44,7 +44,7 @@ extern uint qedi_dbg_log;
#define QEDI_LOG_LPORT 0x4000 /* lport logs */
#define QEDI_LOG_ELS 0x8000 /* ELS logs */
#define QEDI_LOG_NPIV 0x10000 /* NPIV logs */
-#define QEDI_LOG_SESS 0x20000 /* Conection setup, cleanup */
+#define QEDI_LOG_SESS 0x20000 /* Connection setup, cleanup */
#define QEDI_LOG_UIO 0x40000 /* iSCSI UIO logs */
#define QEDI_LOG_TID 0x80000 /* FW TID context acquire,
* free
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 7259bce85e0e..ae97e2f310a3 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -102,8 +102,10 @@ qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj,
qla8044_idc_lock(ha);
qla82xx_set_reset_owner(vha);
qla8044_idc_unlock(ha);
- } else
+ } else {
+ ha->fw_dump_mpi = 1;
qla2x00_system_error(vha);
+ }
break;
case 4:
if (IS_P3P_TYPE(ha)) {
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 6ffa9877c28b..460f443f6471 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -591,19 +591,23 @@ typedef struct srb {
*/
uint8_t cmd_type;
uint8_t pad[3];
- atomic_t ref_count;
struct kref cmd_kref; /* need to migrate ref_count over to this */
void *priv;
wait_queue_head_t nvme_ls_waitq;
struct fc_port *fcport;
struct scsi_qla_host *vha;
unsigned int start_timer:1;
+ unsigned int abort:1;
+ unsigned int aborted:1;
+ unsigned int completed:1;
+
uint32_t handle;
uint16_t flags;
uint16_t type;
const char *name;
int iocbs;
struct qla_qpair *qpair;
+ struct srb *cmd_sp;
struct list_head elem;
u32 gen1; /* scratch */
u32 gen2; /* scratch */
@@ -2277,7 +2281,7 @@ typedef struct {
uint8_t fabric_port_name[WWN_SIZE];
uint16_t fp_speed;
uint8_t fc4_type;
- uint8_t fc4f_nvme; /* nvme fc4 feature bits */
+ uint8_t fc4_features;
} sw_info_t;
/* FCP-4 types */
@@ -2445,7 +2449,7 @@ typedef struct fc_port {
u32 supported_classes;
uint8_t fc4_type;
- uint8_t fc4f_nvme;
+ uint8_t fc4_features;
uint8_t scan_state;
unsigned long last_queue_full;
@@ -2476,6 +2480,11 @@ typedef struct fc_port {
u16 n2n_chip_reset;
} fc_port_t;
+enum {
+ FC4_PRIORITY_NVME = 1,
+ FC4_PRIORITY_FCP = 2,
+};
+
#define QLA_FCPORT_SCAN 1
#define QLA_FCPORT_FOUND 2
@@ -4291,6 +4300,8 @@ struct qla_hw_data {
atomic_t nvme_active_aen_cnt;
uint16_t nvme_last_rptd_aen; /* Last recorded aen count */
+ uint8_t fc4_type_priority;
+
atomic_t zio_threshold;
uint16_t last_zio_threshold;
@@ -4816,6 +4827,23 @@ struct sff_8247_a0 {
ha->current_topology == ISP_CFG_N || \
!ha->current_topology)
+#define NVME_TYPE(fcport) \
+ (fcport->fc4_type & FS_FC4TYPE_NVME) \
+
+#define FCP_TYPE(fcport) \
+ (fcport->fc4_type & FS_FC4TYPE_FCP) \
+
+#define NVME_ONLY_TARGET(fcport) \
+ (NVME_TYPE(fcport) && !FCP_TYPE(fcport)) \
+
+#define NVME_FCP_TARGET(fcport) \
+ (FCP_TYPE(fcport) && NVME_TYPE(fcport)) \
+
+#define NVME_TARGET(ha, fcport) \
+ ((NVME_FCP_TARGET(fcport) && \
+ (ha->fc4_type_priority == FC4_PRIORITY_NVME)) || \
+ NVME_ONLY_TARGET(fcport)) \
+
#include "qla_target.h"
#include "qla_gbl.h"
#include "qla_dbg.h"
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index 732bb871c433..59f6903e5abe 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -2101,4 +2101,6 @@ struct qla_fcp_prio_cfg {
#define FA_FLASH_LAYOUT_ADDR_83 (0x3F1000/4)
#define FA_FLASH_LAYOUT_ADDR_28 (0x11000/4)
+#define NVRAM_DUAL_FCP_NVME_FLAG_OFFSET 0x196
+
#endif
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index d11416dcee4e..5b163ad85c34 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -917,4 +917,5 @@ int qla2x00_set_data_rate(scsi_qla_host_t *vha, uint16_t mode);
/* nvme.c */
void qla_nvme_unregister_remote_port(struct fc_port *fcport);
+void qla_handle_els_plogi_done(scsi_qla_host_t *vha, struct event_arg *ea);
#endif /* _QLA_GBL_H */
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index 5298ed10059f..67230688b05e 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -248,7 +248,7 @@ qla2x00_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
WWN_SIZE);
fcport->fc4_type = (ct_rsp->rsp.ga_nxt.fc4_types[2] & BIT_0) ?
- FC4_TYPE_FCP_SCSI : FC4_TYPE_OTHER;
+ FS_FC4TYPE_FCP : FC4_TYPE_OTHER;
if (ct_rsp->rsp.ga_nxt.port_type != NS_N_PORT_TYPE &&
ct_rsp->rsp.ga_nxt.port_type != NS_NL_PORT_TYPE)
@@ -2887,7 +2887,7 @@ qla2x00_gff_id(scsi_qla_host_t *vha, sw_info_t *list)
struct ct_sns_req *ct_req;
struct ct_sns_rsp *ct_rsp;
struct qla_hw_data *ha = vha->hw;
- uint8_t fcp_scsi_features = 0;
+ uint8_t fcp_scsi_features = 0, nvme_features = 0;
struct ct_arg arg;
for (i = 0; i < ha->max_fibre_devices; i++) {
@@ -2933,14 +2933,19 @@ qla2x00_gff_id(scsi_qla_host_t *vha, sw_info_t *list)
ct_rsp->rsp.gff_id.fc4_features[GFF_FCP_SCSI_OFFSET];
fcp_scsi_features &= 0x0f;
- if (fcp_scsi_features)
- list[i].fc4_type = FC4_TYPE_FCP_SCSI;
- else
- list[i].fc4_type = FC4_TYPE_OTHER;
+ if (fcp_scsi_features) {
+ list[i].fc4_type = FS_FC4TYPE_FCP;
+ list[i].fc4_features = fcp_scsi_features;
+ }
- list[i].fc4f_nvme =
+ nvme_features =
ct_rsp->rsp.gff_id.fc4_features[GFF_NVME_OFFSET];
- list[i].fc4f_nvme &= 0xf;
+ nvme_features &= 0xf;
+
+ if (nvme_features) {
+ list[i].fc4_type |= FS_FC4TYPE_NVME;
+ list[i].fc4_features = nvme_features;
+ }
}
/* Last device exit. */
@@ -3005,7 +3010,7 @@ static void qla24xx_async_gpsc_sp_done(srb_t *sp, int res)
fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
if (res == QLA_FUNCTION_TIMEOUT)
- return;
+ goto done;
if (res == (DID_ERROR << 16)) {
/* entry status error */
@@ -3435,6 +3440,8 @@ void qla24xx_async_gffid_sp_done(srb_t *sp, int res)
fc_port_t *fcport = sp->fcport;
struct ct_sns_rsp *ct_rsp;
struct event_arg ea;
+ uint8_t fc4_scsi_feat;
+ uint8_t fc4_nvme_feat;
ql_dbg(ql_dbg_disc, vha, 0x2133,
"Async done-%s res %x ID %x. %8phC\n",
@@ -3442,24 +3449,25 @@ void qla24xx_async_gffid_sp_done(srb_t *sp, int res)
fcport->flags &= ~FCF_ASYNC_SENT;
ct_rsp = &fcport->ct_desc.ct_sns->p.rsp;
+ fc4_scsi_feat = ct_rsp->rsp.gff_id.fc4_features[GFF_FCP_SCSI_OFFSET];
+ fc4_nvme_feat = ct_rsp->rsp.gff_id.fc4_features[GFF_NVME_OFFSET];
+
/*
* FC-GS-7, 5.2.3.12 FC-4 Features - format
* The format of the FC-4 Features object, as defined by the FC-4,
* Shall be an array of 4-bit values, one for each type code value
*/
if (!res) {
- if (ct_rsp->rsp.gff_id.fc4_features[GFF_FCP_SCSI_OFFSET] & 0xf) {
+ if (fc4_scsi_feat & 0xf) {
/* w1 b00:03 */
- fcport->fc4_type =
- ct_rsp->rsp.gff_id.fc4_features[GFF_FCP_SCSI_OFFSET];
- fcport->fc4_type &= 0xf;
- }
+ fcport->fc4_type = FS_FC4TYPE_FCP;
+ fcport->fc4_features = fc4_scsi_feat & 0xf;
+ }
- if (ct_rsp->rsp.gff_id.fc4_features[GFF_NVME_OFFSET] & 0xf) {
+ if (fc4_nvme_feat & 0xf) {
/* w5 [00:03]/28h */
- fcport->fc4f_nvme =
- ct_rsp->rsp.gff_id.fc4_features[GFF_NVME_OFFSET];
- fcport->fc4f_nvme &= 0xf;
+ fcport->fc4_type |= FS_FC4TYPE_NVME;
+ fcport->fc4_features = fc4_nvme_feat & 0xf;
}
}
@@ -3563,7 +3571,7 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
u8 recheck = 0;
u16 dup = 0, dup_cnt = 0;
- ql_dbg(ql_dbg_disc, vha, 0xffff,
+ ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff,
"%s enter\n", __func__);
if (sp->gen1 != vha->hw->base_qpair->chip_reset) {
@@ -3580,8 +3588,9 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
} else {
- ql_dbg(ql_dbg_disc, vha, 0xffff,
- "Fabric scan failed on all retries.\n");
+ ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff,
+ "%s: Fabric scan failed for %d retries.\n",
+ __func__, vha->scan.scan_retry);
}
goto out;
}
@@ -4047,7 +4056,7 @@ done_free_sp:
void qla24xx_async_gpnft_done(scsi_qla_host_t *vha, srb_t *sp)
{
- ql_dbg(ql_dbg_disc, vha, 0xffff,
+ ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff,
"%s enter\n", __func__);
qla24xx_async_gnnft(vha, sp, sp->gen2);
}
@@ -4061,7 +4070,7 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp)
u32 rspsz;
unsigned long flags;
- ql_dbg(ql_dbg_disc, vha, 0xffff,
+ ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff,
"%s enter\n", __func__);
if (!vha->flags.online)
@@ -4070,14 +4079,15 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp)
spin_lock_irqsave(&vha->work_lock, flags);
if (vha->scan.scan_flags & SF_SCANNING) {
spin_unlock_irqrestore(&vha->work_lock, flags);
- ql_dbg(ql_dbg_disc, vha, 0xffff, "scan active\n");
+ ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff,
+ "%s: scan active\n", __func__);
return rval;
}
vha->scan.scan_flags |= SF_SCANNING;
spin_unlock_irqrestore(&vha->work_lock, flags);
if (fc4_type == FC4_TYPE_FCP_SCSI) {
- ql_dbg(ql_dbg_disc, vha, 0xffff,
+ ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff,
"%s: Performing FCP Scan\n", __func__);
if (sp)
@@ -4132,7 +4142,7 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp)
}
sp->u.iocb_cmd.u.ctarg.rsp_size = rspsz;
- ql_dbg(ql_dbg_disc, vha, 0xffff,
+ ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff,
"%s scan list size %d\n", __func__, vha->scan.size);
memset(vha->scan.l, 0, vha->scan.size);
@@ -4197,8 +4207,8 @@ done_free_sp:
spin_lock_irqsave(&vha->work_lock, flags);
vha->scan.scan_flags &= ~SF_SCANNING;
if (vha->scan.scan_flags == 0) {
- ql_dbg(ql_dbg_disc, vha, 0xffff,
- "%s: schedule\n", __func__);
+ ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff,
+ "%s: Scan scheduled.\n", __func__);
vha->scan.scan_flags |= SF_QUEUED;
schedule_delayed_work(&vha->scan.scan_work, 5);
}
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 1d041313ec52..1dbee8800218 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -17,7 +17,6 @@
#include <asm/prom.h>
#endif
-#include <target/target_core_base.h>
#include "qla_target.h"
/*
@@ -101,8 +100,22 @@ static void qla24xx_abort_iocb_timeout(void *data)
u32 handle;
unsigned long flags;
+ if (sp->cmd_sp)
+ ql_dbg(ql_dbg_async, sp->vha, 0x507c,
+ "Abort timeout - cmd hdl=%x, cmd type=%x hdl=%x, type=%x\n",
+ sp->cmd_sp->handle, sp->cmd_sp->type,
+ sp->handle, sp->type);
+ else
+ ql_dbg(ql_dbg_async, sp->vha, 0x507c,
+ "Abort timeout 2 - hdl=%x, type=%x\n",
+ sp->handle, sp->type);
+
spin_lock_irqsave(qpair->qp_lock_ptr, flags);
for (handle = 1; handle < qpair->req->num_outstanding_cmds; handle++) {
+ if (sp->cmd_sp && (qpair->req->outstanding_cmds[handle] ==
+ sp->cmd_sp))
+ qpair->req->outstanding_cmds[handle] = NULL;
+
/* removing the abort */
if (qpair->req->outstanding_cmds[handle] == sp) {
qpair->req->outstanding_cmds[handle] = NULL;
@@ -111,6 +124,9 @@ static void qla24xx_abort_iocb_timeout(void *data)
}
spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
+ if (sp->cmd_sp)
+ sp->cmd_sp->done(sp->cmd_sp, QLA_OS_TIMER_EXPIRED);
+
abt->u.abt.comp_status = CS_TIMEOUT;
sp->done(sp, QLA_OS_TIMER_EXPIRED);
}
@@ -142,6 +158,7 @@ static int qla24xx_async_abort_cmd(srb_t *cmd_sp, bool wait)
sp->type = SRB_ABT_CMD;
sp->name = "abort";
sp->qpair = cmd_sp->qpair;
+ sp->cmd_sp = cmd_sp;
if (wait)
sp->flags = SRB_WAKEUP_ON_COMP;
@@ -328,7 +345,7 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
else
lio->u.logio.flags |= SRB_LOGIN_COND_PLOGI;
- if (fcport->fc4f_nvme)
+ if (NVME_TARGET(vha->hw, fcport))
lio->u.logio.flags |= SRB_LOGIN_SKIP_PRLI;
ql_dbg(ql_dbg_disc, vha, 0x2072,
@@ -726,19 +743,17 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
loop_id = le16_to_cpu(e->nport_handle);
loop_id = (loop_id & 0x7fff);
- if (fcport->fc4f_nvme)
+ if (NVME_TARGET(vha->hw, fcport))
current_login_state = e->current_login_state >> 4;
else
current_login_state = e->current_login_state & 0xf;
-
ql_dbg(ql_dbg_disc, vha, 0x20e2,
- "%s found %8phC CLS [%x|%x] nvme %d ID[%02x%02x%02x|%02x%02x%02x] lid[%d|%d]\n",
+ "%s found %8phC CLS [%x|%x] fc4_type %d ID[%06x|%06x] lid[%d|%d]\n",
__func__, fcport->port_name,
e->current_login_state, fcport->fw_login_state,
- fcport->fc4f_nvme, id.b.domain, id.b.area, id.b.al_pa,
- fcport->d_id.b.domain, fcport->d_id.b.area,
- fcport->d_id.b.al_pa, loop_id, fcport->loop_id);
+ fcport->fc4_type, id.b24, fcport->d_id.b24,
+ loop_id, fcport->loop_id);
switch (fcport->disc_state) {
case DSC_DELETE_PEND:
@@ -1135,19 +1150,18 @@ static void qla24xx_async_gpdb_sp_done(srb_t *sp, int res)
"Async done-%s res %x, WWPN %8phC mb[1]=%x mb[2]=%x \n",
sp->name, res, fcport->port_name, mb[1], mb[2]);
- if (res == QLA_FUNCTION_TIMEOUT) {
- dma_pool_free(sp->vha->hw->s_dma_pool, sp->u.iocb_cmd.u.mbx.in,
- sp->u.iocb_cmd.u.mbx.in_dma);
- return;
- }
-
fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
+
+ if (res == QLA_FUNCTION_TIMEOUT)
+ goto done;
+
memset(&ea, 0, sizeof(ea));
ea.fcport = fcport;
ea.sp = sp;
qla24xx_handle_gpdb_event(vha, &ea);
+done:
dma_pool_free(ha->s_dma_pool, sp->u.iocb_cmd.u.mbx.in,
sp->u.iocb_cmd.u.mbx.in_dma);
@@ -1225,13 +1239,13 @@ qla24xx_async_prli(struct scsi_qla_host *vha, fc_port_t *fcport)
sp->done = qla2x00_async_prli_sp_done;
lio->u.logio.flags = 0;
- if (fcport->fc4f_nvme)
+ if (NVME_TARGET(vha->hw, fcport))
lio->u.logio.flags |= SRB_LOGIN_NVME_PRLI;
ql_dbg(ql_dbg_disc, vha, 0x211b,
"Async-prli - %8phC hdl=%x, loopid=%x portid=%06x retries=%d %s.\n",
fcport->port_name, sp->handle, fcport->loop_id, fcport->d_id.b24,
- fcport->login_retry, fcport->fc4f_nvme ? "nvme" : "fc");
+ fcport->login_retry, NVME_TARGET(vha->hw, fcport) ? "nvme" : "fc");
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS) {
@@ -1382,14 +1396,14 @@ void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
fcport->flags &= ~FCF_ASYNC_SENT;
ql_dbg(ql_dbg_disc, vha, 0x20d2,
- "%s %8phC DS %d LS %d nvme %x rc %d\n", __func__, fcport->port_name,
- fcport->disc_state, pd->current_login_state, fcport->fc4f_nvme,
- ea->rc);
+ "%s %8phC DS %d LS %d fc4_type %x rc %d\n", __func__,
+ fcport->port_name, fcport->disc_state, pd->current_login_state,
+ fcport->fc4_type, ea->rc);
if (fcport->disc_state == DSC_DELETE_PEND)
return;
- if (fcport->fc4f_nvme)
+ if (NVME_TARGET(vha->hw, fcport))
ls = pd->current_login_state >> 4;
else
ls = pd->current_login_state & 0xf;
@@ -1578,7 +1592,8 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
ql_dbg(ql_dbg_disc, vha, 0x2118,
"%s %d %8phC post %s PRLI\n",
__func__, __LINE__, fcport->port_name,
- fcport->fc4f_nvme ? "NVME" : "FC");
+ NVME_TARGET(vha->hw, fcport) ? "NVME" :
+ "FC");
qla24xx_post_prli_work(vha, fcport);
}
break;
@@ -1701,6 +1716,15 @@ void qla24xx_handle_relogin_event(scsi_qla_host_t *vha,
qla24xx_fcport_handle_login(vha, fcport);
}
+void qla_handle_els_plogi_done(scsi_qla_host_t *vha,
+ struct event_arg *ea)
+{
+ ql_dbg(ql_dbg_disc, vha, 0x2118,
+ "%s %d %8phC post PRLI\n",
+ __func__, __LINE__, ea->fcport->port_name);
+ qla24xx_post_prli_work(vha, ea->fcport);
+}
+
/*
* RSCN(s) came in for this fcport, but the RSCN(s) was not able
* to be consumed by the fcport
@@ -1860,38 +1884,26 @@ qla24xx_handle_prli_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
break;
}
- if (ea->fcport->fc4f_nvme) {
+ /*
+ * Retry PRLI with other FC-4 type if failure occurred on dual
+ * FCP/NVMe port
+ */
+ if (NVME_FCP_TARGET(ea->fcport)) {
ql_dbg(ql_dbg_disc, vha, 0x2118,
- "%s %d %8phC post fc4 prli\n",
- __func__, __LINE__, ea->fcport->port_name);
- ea->fcport->fc4f_nvme = 0;
- qla24xx_post_prli_work(vha, ea->fcport);
- return;
+ "%s %d %8phC post %s prli\n",
+ __func__, __LINE__, ea->fcport->port_name,
+ (ea->fcport->fc4_type & FS_FC4TYPE_NVME) ?
+ "NVMe" : "FCP");
+ if (vha->hw->fc4_type_priority == FC4_PRIORITY_NVME)
+ ea->fcport->fc4_type &= ~FS_FC4TYPE_NVME;
+ else
+ ea->fcport->fc4_type &= ~FS_FC4TYPE_FCP;
}
- /* at this point both PRLI NVME & PRLI FCP failed */
- if (N2N_TOPO(vha->hw)) {
- if (ea->fcport->n2n_link_reset_cnt < 3) {
- ea->fcport->n2n_link_reset_cnt++;
- /*
- * remote port is not sending Plogi. Reset
- * link to kick start his state machine
- */
- set_bit(N2N_LINK_RESET, &vha->dpc_flags);
- } else {
- ql_log(ql_log_warn, vha, 0x2119,
- "%s %d %8phC Unable to reconnect\n",
- __func__, __LINE__, ea->fcport->port_name);
- }
- } else {
- /*
- * switch connect. login failed. Take connection
- * down and allow relogin to retrigger
- */
- ea->fcport->flags &= ~FCF_ASYNC_SENT;
- ea->fcport->keep_nport_handle = 0;
- qlt_schedule_sess_for_deletion(ea->fcport);
- }
+ ea->fcport->flags &= ~FCF_ASYNC_SENT;
+ ea->fcport->keep_nport_handle = 0;
+ ea->fcport->logout_on_delete = 1;
+ qlt_schedule_sess_for_deletion(ea->fcport);
break;
}
}
@@ -1952,7 +1964,7 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
* force a relogin attempt via implicit LOGO, PLOGI, and PRLI
* requests.
*/
- if (ea->fcport->fc4f_nvme) {
+ if (NVME_TARGET(vha->hw, ea->fcport)) {
ql_dbg(ql_dbg_disc, vha, 0x2117,
"%s %d %8phC post prli\n",
__func__, __LINE__, ea->fcport->port_name);
@@ -2206,8 +2218,18 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
ql_dbg(ql_dbg_init, vha, 0x0061,
"Configure NVRAM parameters...\n");
+ /* Let priority default to FCP, can be overridden by nvram_config */
+ ha->fc4_type_priority = FC4_PRIORITY_FCP;
+
ha->isp_ops->nvram_config(vha);
+ if (ha->fc4_type_priority != FC4_PRIORITY_FCP &&
+ ha->fc4_type_priority != FC4_PRIORITY_NVME)
+ ha->fc4_type_priority = FC4_PRIORITY_FCP;
+
+ ql_log(ql_log_info, vha, 0xffff, "FC4 priority set to %s\n",
+ ha->fc4_type_priority == FC4_PRIORITY_FCP ? "FCP" : "NVMe");
+
if (ha->flags.disable_serdes) {
/* Mask HBA via NVRAM settings? */
ql_log(ql_log_info, vha, 0x0077,
@@ -5382,7 +5404,7 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
qla2x00_iidma_fcport(vha, fcport);
- if (fcport->fc4f_nvme) {
+ if (NVME_TARGET(vha->hw, fcport)) {
qla_nvme_register_remote(vha, fcport);
fcport->disc_state = DSC_LOGIN_COMPLETE;
qla2x00_set_fcport_state(fcport, FCS_ONLINE);
@@ -5710,11 +5732,8 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha)
new_fcport->fc4_type = swl[swl_idx].fc4_type;
new_fcport->nvme_flag = 0;
- new_fcport->fc4f_nvme = 0;
if (vha->flags.nvme_enabled &&
- swl[swl_idx].fc4f_nvme) {
- new_fcport->fc4f_nvme =
- swl[swl_idx].fc4f_nvme;
+ swl[swl_idx].fc4_type & FS_FC4TYPE_NVME) {
ql_log(ql_log_info, vha, 0x2131,
"FOUND: NVME port %8phC as FC Type 28h\n",
new_fcport->port_name);
@@ -5770,7 +5789,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha)
/* Bypass ports whose FCP-4 type is not FCP_SCSI */
if (ql2xgffidenable &&
- (new_fcport->fc4_type != FC4_TYPE_FCP_SCSI &&
+ (!(new_fcport->fc4_type & FS_FC4TYPE_FCP) &&
new_fcport->fc4_type != FC4_TYPE_UNKNOWN))
continue;
@@ -5839,7 +5858,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha)
break;
}
- if (fcport->fc4f_nvme) {
+ if (NVME_TARGET(vha->hw, fcport)) {
if (fcport->disc_state == DSC_DELETE_PEND) {
fcport->disc_state = DSC_GNL;
vha->fcport_count--;
@@ -8514,6 +8533,9 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
/* N2N: driver will initiate Login instead of FW */
icb->firmware_options_3 |= BIT_8;
+ /* Determine NVMe/FCP priority for target ports */
+ ha->fc4_type_priority = qla2xxx_get_fc4_priority(vha);
+
if (rval) {
ql_log(ql_log_warn, vha, 0x0076,
"NVRAM configuration failed.\n");
@@ -9003,8 +9025,6 @@ int qla2xxx_delete_qpair(struct scsi_qla_host *vha, struct qla_qpair *qpair)
struct qla_hw_data *ha = qpair->hw;
qpair->delete_in_progress = 1;
- while (atomic_read(&qpair->ref_count))
- msleep(500);
ret = qla25xx_delete_req_que(vha, qpair->req);
if (ret != QLA_SUCCESS)
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index 0c3d907af769..352aba4127f7 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -307,3 +307,15 @@ qla_83xx_start_iocbs(struct qla_qpair *qpair)
WRT_REG_DWORD(req->req_q_in, req->ring_index);
}
+
+static inline int
+qla2xxx_get_fc4_priority(struct scsi_qla_host *vha)
+{
+ uint32_t data;
+
+ data =
+ ((uint8_t *)vha->hw->nvram)[NVRAM_DUAL_FCP_NVME_FLAG_OFFSET];
+
+
+ return (data >> 6) & BIT_0 ? FC4_PRIORITY_FCP : FC4_PRIORITY_NVME;
+}
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 518eb954cf42..b25f87ff8cde 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -2740,6 +2740,10 @@ static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res)
struct scsi_qla_host *vha = sp->vha;
struct event_arg ea;
struct qla_work_evt *e;
+ struct fc_port *conflict_fcport;
+ port_id_t cid; /* conflict Nport id */
+ u32 *fw_status = sp->u.iocb_cmd.u.els_plogi.fw_status;
+ u16 lid;
ql_dbg(ql_dbg_disc, vha, 0x3072,
"%s ELS done rc %d hdl=%x, portid=%06x %8phC\n",
@@ -2751,14 +2755,101 @@ static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res)
if (sp->flags & SRB_WAKEUP_ON_COMP)
complete(&lio->u.els_plogi.comp);
else {
- if (res) {
- set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
- } else {
+ switch (fw_status[0]) {
+ case CS_DATA_UNDERRUN:
+ case CS_COMPLETE:
memset(&ea, 0, sizeof(ea));
ea.fcport = fcport;
- ea.data[0] = MBS_COMMAND_COMPLETE;
- ea.sp = sp;
- qla24xx_handle_plogi_done_event(vha, &ea);
+ ea.rc = res;
+ qla_handle_els_plogi_done(vha, &ea);
+ break;
+
+ case CS_IOCB_ERROR:
+ switch (fw_status[1]) {
+ case LSC_SCODE_PORTID_USED:
+ lid = fw_status[2] & 0xffff;
+ qlt_find_sess_invalidate_other(vha,
+ wwn_to_u64(fcport->port_name),
+ fcport->d_id, lid, &conflict_fcport);
+ if (conflict_fcport) {
+ /*
+ * Another fcport shares the same
+ * loop_id & nport id; conflict
+ * fcport needs to finish cleanup
+ * before this fcport can proceed
+ * to login.
+ */
+ conflict_fcport->conflict = fcport;
+ fcport->login_pause = 1;
+ ql_dbg(ql_dbg_disc, vha, 0x20ed,
+ "%s %d %8phC pid %06x inuse with lid %#x post gidpn\n",
+ __func__, __LINE__,
+ fcport->port_name,
+ fcport->d_id.b24, lid);
+ } else {
+ ql_dbg(ql_dbg_disc, vha, 0x20ed,
+ "%s %d %8phC pid %06x inuse with lid %#x sched del\n",
+ __func__, __LINE__,
+ fcport->port_name,
+ fcport->d_id.b24, lid);
+ qla2x00_clear_loop_id(fcport);
+ set_bit(lid, vha->hw->loop_id_map);
+ fcport->loop_id = lid;
+ fcport->keep_nport_handle = 0;
+ qlt_schedule_sess_for_deletion(fcport);
+ }
+ break;
+
+ case LSC_SCODE_NPORT_USED:
+ cid.b.domain = (fw_status[2] >> 16) & 0xff;
+ cid.b.area = (fw_status[2] >> 8) & 0xff;
+ cid.b.al_pa = fw_status[2] & 0xff;
+ cid.b.rsvd_1 = 0;
+
+ ql_dbg(ql_dbg_disc, vha, 0x20ec,
+ "%s %d %8phC lid %#x in use with pid %06x post gnl\n",
+ __func__, __LINE__, fcport->port_name,
+ fcport->loop_id, cid.b24);
+ set_bit(fcport->loop_id,
+ vha->hw->loop_id_map);
+ fcport->loop_id = FC_NO_LOOP_ID;
+ qla24xx_post_gnl_work(vha, fcport);
+ break;
+
+ case LSC_SCODE_NOXCB:
+ vha->hw->exch_starvation++;
+ if (vha->hw->exch_starvation > 5) {
+ ql_log(ql_log_warn, vha, 0xd046,
+ "Exchange starvation. Resetting RISC\n");
+ vha->hw->exch_starvation = 0;
+ set_bit(ISP_ABORT_NEEDED,
+ &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ }
+ /* fall through */
+ default:
+ ql_dbg(ql_dbg_disc, vha, 0x20eb,
+ "%s %8phC cmd error fw_status 0x%x 0x%x 0x%x\n",
+ __func__, sp->fcport->port_name,
+ fw_status[0], fw_status[1], fw_status[2]);
+
+ fcport->flags &= ~FCF_ASYNC_SENT;
+ fcport->disc_state = DSC_LOGIN_FAILED;
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+ break;
+ }
+ break;
+
+ default:
+ ql_dbg(ql_dbg_disc, vha, 0x20eb,
+ "%s %8phC cmd error 2 fw_status 0x%x 0x%x 0x%x\n",
+ __func__, sp->fcport->port_name,
+ fw_status[0], fw_status[1], fw_status[2]);
+
+ sp->fcport->flags &= ~FCF_ASYNC_SENT;
+ sp->fcport->disc_state = DSC_LOGIN_FAILED;
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+ break;
}
e = qla2x00_alloc_work(vha, QLA_EVT_UNMAP);
@@ -2792,11 +2883,12 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
return -ENOMEM;
}
+ fcport->flags |= FCF_ASYNC_SENT;
+ fcport->disc_state = DSC_LOGIN_PEND;
elsio = &sp->u.iocb_cmd;
ql_dbg(ql_dbg_io, vha, 0x3073,
"Enter: PLOGI portid=%06x\n", fcport->d_id.b24);
- fcport->flags |= FCF_ASYNC_SENT;
sp->type = SRB_ELS_DCMD;
sp->name = "ELS_DCMD";
sp->fcport = fcport;
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 009fd5a33fcd..1b8f297449cf 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -1227,11 +1227,32 @@ global_port_update:
break;
case MBA_IDC_AEN:
- mb[4] = RD_REG_WORD(&reg24->mailbox4);
- mb[5] = RD_REG_WORD(&reg24->mailbox5);
- mb[6] = RD_REG_WORD(&reg24->mailbox6);
- mb[7] = RD_REG_WORD(&reg24->mailbox7);
- qla83xx_handle_8200_aen(vha, mb);
+ if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
+ ha->flags.fw_init_done = 0;
+ ql_log(ql_log_warn, vha, 0xffff,
+ "MPI Heartbeat stop. Chip reset needed. MB0[%xh] MB1[%xh] MB2[%xh] MB3[%xh]\n",
+ mb[0], mb[1], mb[2], mb[3]);
+
+ if ((mb[1] & BIT_8) ||
+ (mb[2] & BIT_8)) {
+ ql_log(ql_log_warn, vha, 0xd013,
+ "MPI Heartbeat stop. FW dump needed\n");
+ ha->fw_dump_mpi = 1;
+ ha->isp_ops->fw_dump(vha, 1);
+ }
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ } else if (IS_QLA83XX(ha)) {
+ mb[4] = RD_REG_WORD(&reg24->mailbox4);
+ mb[5] = RD_REG_WORD(&reg24->mailbox5);
+ mb[6] = RD_REG_WORD(&reg24->mailbox6);
+ mb[7] = RD_REG_WORD(&reg24->mailbox7);
+ qla83xx_handle_8200_aen(vha, mb);
+ } else {
+ ql_dbg(ql_dbg_async, vha, 0x5052,
+ "skip Heartbeat processing mb0-3=[0x%04x] [0x%04x] [0x%04x] [0x%04x]\n",
+ mb[0], mb[1], mb[2], mb[3]);
+ }
break;
case MBA_DPORT_DIAGNOSTICS:
@@ -2466,6 +2487,11 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
return;
}
+ if (sp->abort)
+ sp->aborted = 1;
+ else
+ sp->completed = 1;
+
if (sp->cmd_type != TYPE_SRB) {
req->outstanding_cmds[handle] = NULL;
ql_dbg(ql_dbg_io, vha, 0x3015,
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 4a1f21c11758..0cf94f05f008 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -1932,7 +1932,7 @@ qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt)
pd24 = (struct port_database_24xx *) pd;
/* Check for logged in state. */
- if (fcport->fc4f_nvme) {
+ if (NVME_TARGET(ha, fcport)) {
current_login_state = pd24->current_login_state >> 4;
last_login_state = pd24->last_login_state >> 4;
} else {
@@ -3899,8 +3899,9 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
fcport->scan_state = QLA_FCPORT_FOUND;
fcport->n2n_flag = 1;
fcport->keep_nport_handle = 1;
+ fcport->fc4_type = FS_FC4TYPE_FCP;
if (vha->flags.nvme_enabled)
- fcport->fc4f_nvme = 1;
+ fcport->fc4_type |= FS_FC4TYPE_NVME;
switch (fcport->disc_state) {
case DSC_DELETED:
@@ -6287,17 +6288,13 @@ int qla24xx_send_mb_cmd(struct scsi_qla_host *vha, mbx_cmd_t *mcp)
case QLA_SUCCESS:
ql_dbg(ql_dbg_mbx, vha, 0x119d, "%s: %s done.\n",
__func__, sp->name);
- sp->free(sp);
break;
default:
ql_dbg(ql_dbg_mbx, vha, 0x119e, "%s: %s Failed. %x.\n",
__func__, sp->name, rval);
- sp->free(sp);
break;
}
- return rval;
-
done_free_sp:
sp->free(sp);
done:
@@ -6362,7 +6359,7 @@ int __qla24xx_parse_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport,
uint64_t zero = 0;
u8 current_login_state, last_login_state;
- if (fcport->fc4f_nvme) {
+ if (NVME_TARGET(vha->hw, fcport)) {
current_login_state = pd->current_login_state >> 4;
last_login_state = pd->last_login_state >> 4;
} else {
@@ -6397,8 +6394,8 @@ int __qla24xx_parse_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport,
fcport->d_id.b.al_pa = pd->port_id[2];
fcport->d_id.b.rsvd_1 = 0;
- if (fcport->fc4f_nvme) {
- fcport->port_type = 0;
+ if (NVME_TARGET(vha->hw, fcport)) {
+ fcport->port_type = FCT_NVME;
if ((pd->prli_svc_param_word_3[0] & BIT_5) == 0)
fcport->port_type |= FCT_NVME_INITIATOR;
if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0)
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index 238240984bc1..eabc5127174e 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -946,7 +946,7 @@ int qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
sp = qla2x00_get_sp(base_vha, NULL, GFP_KERNEL);
if (!sp)
- goto done;
+ return rval;
sp->type = SRB_CTRL_VP;
sp->name = "ctrl_vp";
@@ -962,7 +962,7 @@ int qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
ql_dbg(ql_dbg_async, vha, 0xffff,
"%s: %s Failed submission. %x.\n",
__func__, sp->name, rval);
- goto done_free_sp;
+ goto done;
}
ql_dbg(ql_dbg_vport, vha, 0x113f, "%s hndl %x submitted\n",
@@ -980,16 +980,13 @@ int qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
case QLA_SUCCESS:
ql_dbg(ql_dbg_vport, vha, 0xffff, "%s: %s done.\n",
__func__, sp->name);
- goto done_free_sp;
+ break;
default:
ql_dbg(ql_dbg_vport, vha, 0xffff, "%s: %s Failed. %x.\n",
__func__, sp->name, rval);
- goto done_free_sp;
+ break;
}
done:
- return rval;
-
-done_free_sp:
sp->free(sp);
return rval;
}
diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
index 6cc19e060afc..941aa53363f5 100644
--- a/drivers/scsi/qla2xxx/qla_nvme.c
+++ b/drivers/scsi/qla2xxx/qla_nvme.c
@@ -224,8 +224,8 @@ static void qla_nvme_abort_work(struct work_struct *work)
if (ha->flags.host_shutting_down) {
ql_log(ql_log_info, sp->fcport->vha, 0xffff,
- "%s Calling done on sp: %p, type: 0x%x, sp->ref_count: 0x%x\n",
- __func__, sp, sp->type, atomic_read(&sp->ref_count));
+ "%s Calling done on sp: %p, type: 0x%x\n",
+ __func__, sp, sp->type);
sp->done(sp, 0);
goto out;
}
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 726ad4cbf4a6..8b84bc4a6ac8 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -698,11 +698,6 @@ void qla2x00_sp_compl(srb_t *sp, int res)
struct scsi_cmnd *cmd = GET_CMD_SP(sp);
struct completion *comp = sp->comp;
- if (WARN_ON_ONCE(atomic_read(&sp->ref_count) == 0))
- return;
-
- atomic_dec(&sp->ref_count);
-
sp->free(sp);
cmd->result = res;
CMD_SP(cmd) = NULL;
@@ -794,11 +789,6 @@ void qla2xxx_qpair_sp_compl(srb_t *sp, int res)
struct scsi_cmnd *cmd = GET_CMD_SP(sp);
struct completion *comp = sp->comp;
- if (WARN_ON_ONCE(atomic_read(&sp->ref_count) == 0))
- return;
-
- atomic_dec(&sp->ref_count);
-
sp->free(sp);
cmd->result = res;
CMD_SP(cmd) = NULL;
@@ -903,7 +893,7 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
sp->u.scmd.cmd = cmd;
sp->type = SRB_SCSI_CMD;
- atomic_set(&sp->ref_count, 1);
+
CMD_SP(cmd) = (void *)sp;
sp->free = qla2x00_sp_free_dma;
sp->done = qla2x00_sp_compl;
@@ -985,18 +975,16 @@ qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd,
sp->u.scmd.cmd = cmd;
sp->type = SRB_SCSI_CMD;
- atomic_set(&sp->ref_count, 1);
CMD_SP(cmd) = (void *)sp;
sp->free = qla2xxx_qpair_sp_free_dma;
sp->done = qla2xxx_qpair_sp_compl;
- sp->qpair = qpair;
rval = ha->isp_ops->start_scsi_mq(sp);
if (rval != QLA_SUCCESS) {
ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3078,
"Start scsi failed rval=%d for cmd=%p.\n", rval, cmd);
if (rval == QLA_INTERFACE_ERROR)
- goto qc24_fail_command;
+ goto qc24_free_sp_fail_command;
goto qc24_host_busy_free_sp;
}
@@ -1008,6 +996,11 @@ qc24_host_busy_free_sp:
qc24_target_busy:
return SCSI_MLQUEUE_TARGET_BUSY;
+qc24_free_sp_fail_command:
+ sp->free(sp);
+ CMD_SP(cmd) = NULL;
+ qla2xxx_rel_qpair_sp(sp->qpair, sp);
+
qc24_fail_command:
cmd->scsi_done(cmd);
@@ -1184,16 +1177,6 @@ qla2x00_wait_for_chip_reset(scsi_qla_host_t *vha)
return return_status;
}
-static int
-sp_get(struct srb *sp)
-{
- if (!refcount_inc_not_zero((refcount_t *)&sp->ref_count))
- /* kref get fail */
- return ENXIO;
- else
- return 0;
-}
-
#define ISP_REG_DISCONNECT 0xffffffffU
/**************************************************************************
* qla2x00_isp_reg_stat
@@ -1249,6 +1232,9 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
uint64_t lun;
int rval;
struct qla_hw_data *ha = vha->hw;
+ uint32_t ratov_j;
+ struct qla_qpair *qpair;
+ unsigned long flags;
if (qla2x00_isp_reg_stat(ha)) {
ql_log(ql_log_info, vha, 0x8042,
@@ -1261,13 +1247,26 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
return ret;
sp = scsi_cmd_priv(cmd);
+ qpair = sp->qpair;
- if (sp->fcport && sp->fcport->deleted)
+ if ((sp->fcport && sp->fcport->deleted) || !qpair)
return SUCCESS;
- /* Return if the command has already finished. */
- if (sp_get(sp))
+ spin_lock_irqsave(qpair->qp_lock_ptr, flags);
+ if (sp->completed) {
+ spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
return SUCCESS;
+ }
+
+ if (sp->abort || sp->aborted) {
+ spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
+ return FAILED;
+ }
+
+ sp->abort = 1;
+ sp->comp = &comp;
+ spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
+
id = cmd->device->id;
lun = cmd->device->lun;
@@ -1276,47 +1275,37 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
"Aborting from RISC nexus=%ld:%d:%llu sp=%p cmd=%p handle=%x\n",
vha->host_no, id, lun, sp, cmd, sp->handle);
+ /*
+ * Abort will release the original Command/sp from FW. Let the
+ * original command call scsi_done. In return, he will wakeup
+ * this sleeping thread.
+ */
rval = ha->isp_ops->abort_command(sp);
+
ql_dbg(ql_dbg_taskm, vha, 0x8003,
"Abort command mbx cmd=%p, rval=%x.\n", cmd, rval);
+ /* Wait for the command completion. */
+ ratov_j = ha->r_a_tov/10 * 4 * 1000;
+ ratov_j = msecs_to_jiffies(ratov_j);
switch (rval) {
case QLA_SUCCESS:
- /*
- * The command has been aborted. That means that the firmware
- * won't report a completion.
- */
- sp->done(sp, DID_ABORT << 16);
- ret = SUCCESS;
- break;
- case QLA_FUNCTION_PARAMETER_ERROR: {
- /* Wait for the command completion. */
- uint32_t ratov = ha->r_a_tov/10;
- uint32_t ratov_j = msecs_to_jiffies(4 * ratov * 1000);
-
- WARN_ON_ONCE(sp->comp);
- sp->comp = &comp;
if (!wait_for_completion_timeout(&comp, ratov_j)) {
ql_dbg(ql_dbg_taskm, vha, 0xffff,
"%s: Abort wait timer (4 * R_A_TOV[%d]) expired\n",
- __func__, ha->r_a_tov);
+ __func__, ha->r_a_tov/10);
ret = FAILED;
} else {
ret = SUCCESS;
}
break;
- }
default:
- /*
- * Either abort failed or abort and completion raced. Let
- * the SCSI core retry the abort in the former case.
- */
ret = FAILED;
break;
}
sp->comp = NULL;
- atomic_dec(&sp->ref_count);
+
ql_log(ql_log_info, vha, 0x801c,
"Abort command issued nexus=%ld:%d:%llu -- %x.\n",
vha->host_no, id, lun, ret);
@@ -1708,32 +1697,53 @@ static void qla2x00_abort_srb(struct qla_qpair *qp, srb_t *sp, const int res,
scsi_qla_host_t *vha = qp->vha;
struct qla_hw_data *ha = vha->hw;
int rval;
+ bool ret_cmd;
+ uint32_t ratov_j;
- if (sp_get(sp))
+ if (qla2x00_chip_is_down(vha)) {
+ sp->done(sp, res);
return;
+ }
if (sp->type == SRB_NVME_CMD || sp->type == SRB_NVME_LS ||
(sp->type == SRB_SCSI_CMD && !ha->flags.eeh_busy &&
!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
!qla2x00_isp_reg_stat(ha))) {
+ if (sp->comp) {
+ sp->done(sp, res);
+ return;
+ }
+
sp->comp = &comp;
+ sp->abort = 1;
spin_unlock_irqrestore(qp->qp_lock_ptr, *flags);
- rval = ha->isp_ops->abort_command(sp);
+ rval = ha->isp_ops->abort_command(sp);
+ /* Wait for command completion. */
+ ret_cmd = false;
+ ratov_j = ha->r_a_tov/10 * 4 * 1000;
+ ratov_j = msecs_to_jiffies(ratov_j);
switch (rval) {
case QLA_SUCCESS:
- sp->done(sp, res);
+ if (wait_for_completion_timeout(&comp, ratov_j)) {
+ ql_dbg(ql_dbg_taskm, vha, 0xffff,
+ "%s: Abort wait timer (4 * R_A_TOV[%d]) expired\n",
+ __func__, ha->r_a_tov/10);
+ ret_cmd = true;
+ }
+ /* else FW return SP to driver */
break;
- case QLA_FUNCTION_PARAMETER_ERROR:
- wait_for_completion(&comp);
+ default:
+ ret_cmd = true;
break;
}
spin_lock_irqsave(qp->qp_lock_ptr, *flags);
- sp->comp = NULL;
+ if (ret_cmd && (!sp->completed || !sp->aborted))
+ sp->done(sp, res);
+ } else {
+ sp->done(sp, res);
}
-
- atomic_dec(&sp->ref_count);
}
static void
@@ -1755,7 +1765,6 @@ __qla2x00_abort_all_cmds(struct qla_qpair *qp, int res)
for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
sp = req->outstanding_cmds[cnt];
if (sp) {
- req->outstanding_cmds[cnt] = NULL;
switch (sp->cmd_type) {
case TYPE_SRB:
qla2x00_abort_srb(qp, sp, res, &flags);
@@ -1777,6 +1786,7 @@ __qla2x00_abort_all_cmds(struct qla_qpair *qp, int res)
default:
break;
}
+ req->outstanding_cmds[cnt] = NULL;
}
}
spin_unlock_irqrestore(qp->qp_lock_ptr, flags);
@@ -3492,6 +3502,29 @@ disable_device:
return ret;
}
+static void __qla_set_remove_flag(scsi_qla_host_t *base_vha)
+{
+ scsi_qla_host_t *vp;
+ unsigned long flags;
+ struct qla_hw_data *ha;
+
+ if (!base_vha)
+ return;
+
+ ha = base_vha->hw;
+
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ list_for_each_entry(vp, &ha->vp_list, list)
+ set_bit(PFLG_DRIVER_REMOVING, &vp->pci_flags);
+
+ /*
+ * Indicate device removal to prevent future board_disable
+ * and wait until any pending board_disable has completed.
+ */
+ set_bit(PFLG_DRIVER_REMOVING, &base_vha->pci_flags);
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+}
+
static void
qla2x00_shutdown(struct pci_dev *pdev)
{
@@ -3508,7 +3541,7 @@ qla2x00_shutdown(struct pci_dev *pdev)
* Prevent future board_disable and wait
* until any pending board_disable has completed.
*/
- set_bit(PFLG_DRIVER_REMOVING, &vha->pci_flags);
+ __qla_set_remove_flag(vha);
cancel_work_sync(&ha->board_disable);
if (!atomic_read(&pdev->enable_cnt))
@@ -3668,10 +3701,7 @@ qla2x00_remove_one(struct pci_dev *pdev)
ha = base_vha->hw;
ql_log(ql_log_info, base_vha, 0xb079,
"Removing driver\n");
-
- /* Indicate device removal to prevent future board_disable and wait
- * until any pending board_disable has completed. */
- set_bit(PFLG_DRIVER_REMOVING, &base_vha->pci_flags);
+ __qla_set_remove_flag(base_vha);
cancel_work_sync(&ha->board_disable);
/*
@@ -4666,7 +4696,8 @@ qla2x00_mem_free(struct qla_hw_data *ha)
ha->sfp_data = NULL;
if (ha->flt)
- dma_free_coherent(&ha->pdev->dev, SFP_DEV_SIZE,
+ dma_free_coherent(&ha->pdev->dev,
+ sizeof(struct qla_flt_header) + FLT_REGIONS_SIZE,
ha->flt, ha->flt_dma);
ha->flt = NULL;
ha->flt_dma = 0;
@@ -5042,19 +5073,17 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
fcport->d_id = e->u.new_sess.id;
fcport->flags |= FCF_FABRIC_DEVICE;
fcport->fw_login_state = DSC_LS_PLOGI_PEND;
- if (e->u.new_sess.fc4_type == FS_FC4TYPE_FCP)
- fcport->fc4_type = FC4_TYPE_FCP_SCSI;
-
- if (e->u.new_sess.fc4_type == FS_FC4TYPE_NVME) {
- fcport->fc4_type = FC4_TYPE_OTHER;
- fcport->fc4f_nvme = FC4_TYPE_NVME;
- }
memcpy(fcport->port_name, e->u.new_sess.port_name,
WWN_SIZE);
- if (e->u.new_sess.fc4_type & FS_FCP_IS_N2N)
+ fcport->fc4_type = e->u.new_sess.fc4_type;
+ if (e->u.new_sess.fc4_type & FS_FCP_IS_N2N) {
+ fcport->fc4_type = FS_FC4TYPE_FCP;
fcport->n2n_flag = 1;
+ if (vha->flags.nvme_enabled)
+ fcport->fc4_type |= FS_FC4TYPE_NVME;
+ }
} else {
ql_dbg(ql_dbg_disc, vha, 0xffff,
@@ -5158,7 +5187,8 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
fcport->flags &= ~FCF_FABRIC_DEVICE;
fcport->keep_nport_handle = 1;
if (vha->flags.nvme_enabled) {
- fcport->fc4f_nvme = 1;
+ fcport->fc4_type =
+ (FS_FC4TYPE_NVME | FS_FC4TYPE_FCP);
fcport->n2n_flag = 1;
}
fcport->fw_login_state = 0;
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index a06e56224a55..51b275a575a5 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -463,7 +463,7 @@ void qlt_response_pkt_all_vps(struct scsi_qla_host *vha,
case IMMED_NOTIFY_TYPE:
{
- struct scsi_qla_host *host = vha;
+ struct scsi_qla_host *host;
struct imm_ntfy_from_isp *entry =
(struct imm_ntfy_from_isp *)pkt;
diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c
index 294d77c02cdf..5b0c057def2b 100644
--- a/drivers/scsi/qla2xxx/qla_tmpl.c
+++ b/drivers/scsi/qla2xxx/qla_tmpl.c
@@ -10,6 +10,7 @@
#define ISPREG(vha) (&(vha)->hw->iobase->isp24)
#define IOBAR(reg) offsetof(typeof(*(reg)), iobase_addr)
#define IOBASE(vha) IOBAR(ISPREG(vha))
+#define INVALID_ENTRY ((struct qla27xx_fwdt_entry *)0xffffffffffffffffUL)
static inline void
qla27xx_insert16(uint16_t value, void *buf, ulong *len)
@@ -261,6 +262,7 @@ qla27xx_fwdt_entry_t262(struct scsi_qla_host *vha,
ulong start = le32_to_cpu(ent->t262.start_addr);
ulong end = le32_to_cpu(ent->t262.end_addr);
ulong dwords;
+ int rc;
ql_dbg(ql_dbg_misc, vha, 0xd206,
"%s: rdram(%x) [%lx]\n", __func__, ent->t262.ram_area, *len);
@@ -308,7 +310,13 @@ qla27xx_fwdt_entry_t262(struct scsi_qla_host *vha,
dwords = end - start + 1;
if (buf) {
buf += *len;
- qla24xx_dump_ram(vha->hw, start, buf, dwords, &buf);
+ rc = qla24xx_dump_ram(vha->hw, start, buf, dwords, &buf);
+ if (rc != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_async, vha, 0xffff,
+ "%s: dump ram MB failed. Area %xh start %lxh end %lxh\n",
+ __func__, area, start, end);
+ return INVALID_ENTRY;
+ }
}
*len += dwords * sizeof(uint32_t);
done:
@@ -838,6 +846,13 @@ qla27xx_walk_template(struct scsi_qla_host *vha,
ent = qla27xx_find_entry(type)(vha, ent, buf, len);
if (!ent)
break;
+
+ if (ent == INVALID_ENTRY) {
+ *len = 0;
+ ql_dbg(ql_dbg_async, vha, 0xffff,
+ "Unable to capture FW dump");
+ goto bailout;
+ }
}
if (tmp->count)
@@ -847,6 +862,9 @@ qla27xx_walk_template(struct scsi_qla_host *vha,
if (ent)
ql_dbg(ql_dbg_misc, vha, 0xd019,
"%s: missing end entry\n", __func__);
+
+bailout:
+ cpu_to_le32s(&tmp->count); /* endianize residual count */
}
static void
@@ -999,8 +1017,9 @@ qla27xx_fwdump(scsi_qla_host_t *vha, int hardware_locked)
uint j;
ulong len;
void *buf = vha->hw->fw_dump;
+ uint count = vha->hw->fw_dump_mpi ? 2 : 1;
- for (j = 0; j < 2; j++, fwdt++, buf += len) {
+ for (j = 0; j < count; j++, fwdt++, buf += len) {
ql_log(ql_log_warn, vha, 0xd011,
"-> fwdt%u running...\n", j);
if (!fwdt->template) {
@@ -1010,7 +1029,9 @@ qla27xx_fwdump(scsi_qla_host_t *vha, int hardware_locked)
}
len = qla27xx_execute_fwdt_template(vha,
fwdt->template, buf);
- if (len != fwdt->dump_size) {
+ if (len == 0) {
+ goto bailout;
+ } else if (len != fwdt->dump_size) {
ql_log(ql_log_warn, vha, 0xd013,
"-> fwdt%u fwdump residual=%+ld\n",
j, fwdt->dump_size - len);
@@ -1025,6 +1046,8 @@ qla27xx_fwdump(scsi_qla_host_t *vha, int hardware_locked)
qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP);
}
+bailout:
+ vha->hw->fw_dump_mpi = 0;
#ifndef __CHECKER__
if (!hardware_locked)
spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index a8f2a953ceff..03bd3b712b77 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "10.01.00.19-k"
+#define QLA2XXX_VERSION "10.01.00.21-k"
#define QLA_DRIVER_MAJOR_VER 10
#define QLA_DRIVER_MINOR_VER 1
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index dac9a7013208..02636b4785c5 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -640,9 +640,6 @@ int qla4xxx_initialize_fw_cb(struct scsi_qla_host * ha)
if (qla4xxx_get_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma) !=
QLA_SUCCESS) {
- dma_free_coherent(&ha->pdev->dev,
- sizeof(struct addr_ctrl_blk),
- init_fw_cb, init_fw_cb_dma);
goto exit_init_fw_cb;
}
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 7a1b6c76f263..930e4803d888 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -186,7 +186,7 @@ void scsi_finish_command(struct scsi_cmnd *cmd)
struct scsi_driver *drv;
unsigned int good_bytes;
- scsi_device_unbusy(sdev);
+ scsi_device_unbusy(sdev, cmd);
/*
* Clear the flags that say that the device/target/host is no longer
@@ -465,10 +465,14 @@ void scsi_attach_vpd(struct scsi_device *sdev)
return;
for (i = 4; i < vpd_buf->len; i++) {
+ if (vpd_buf->data[i] == 0x0)
+ scsi_update_vpd_page(sdev, 0x0, &sdev->vpd_pg0);
if (vpd_buf->data[i] == 0x80)
scsi_update_vpd_page(sdev, 0x80, &sdev->vpd_pg80);
if (vpd_buf->data[i] == 0x83)
scsi_update_vpd_page(sdev, 0x83, &sdev->vpd_pg83);
+ if (vpd_buf->data[i] == 0x89)
+ scsi_update_vpd_page(sdev, 0x89, &sdev->vpd_pg89);
}
kfree(vpd_buf);
}
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index d323523f5f9d..44cb054d5e66 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -1025,7 +1025,7 @@ static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
int arr_len, unsigned int off_dst)
{
- int act_len, n;
+ unsigned int act_len, n;
struct scsi_data_buffer *sdb = &scp->sdb;
off_t skip = off_dst;
@@ -1039,7 +1039,7 @@ static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
pr_debug("%s: off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n",
__func__, off_dst, scsi_bufflen(scp), act_len,
scsi_get_resid(scp));
- n = (int)scsi_bufflen(scp) - ((int)off_dst + act_len);
+ n = scsi_bufflen(scp) - (off_dst + act_len);
scsi_set_resid(scp, min(scsi_get_resid(scp), n));
return 0;
}
@@ -5263,6 +5263,11 @@ static int __init scsi_debug_init(void)
return -EINVAL;
}
+ if (sdebug_num_tgts < 0) {
+ pr_err("num_tgts must be >= 0\n");
+ return -EINVAL;
+ }
+
if (sdebug_guard > 1) {
pr_err("guard must be 0 or 1\n");
return -EINVAL;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 91c007d26c1e..3e7a45d0daca 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -189,7 +189,7 @@ static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, bool unbusy)
* active on the host/device.
*/
if (unbusy)
- scsi_device_unbusy(device);
+ scsi_device_unbusy(device, cmd);
/*
* Requeue this command. It will go before all other commands
@@ -321,20 +321,20 @@ static void scsi_init_cmd_errh(struct scsi_cmnd *cmd)
}
/*
- * Decrement the host_busy counter and wake up the error handler if necessary.
- * Avoid as follows that the error handler is not woken up if shost->host_busy
- * == shost->host_failed: use call_rcu() in scsi_eh_scmd_add() in combination
- * with an RCU read lock in this function to ensure that this function in its
- * entirety either finishes before scsi_eh_scmd_add() increases the
+ * Wake up the error handler if necessary. Avoid as follows that the error
+ * handler is not woken up if host in-flight requests number ==
+ * shost->host_failed: use call_rcu() in scsi_eh_scmd_add() in combination
+ * with an RCU read lock in this function to ensure that this function in
+ * its entirety either finishes before scsi_eh_scmd_add() increases the
* host_failed counter or that it notices the shost state change made by
* scsi_eh_scmd_add().
*/
-static void scsi_dec_host_busy(struct Scsi_Host *shost)
+static void scsi_dec_host_busy(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
{
unsigned long flags;
rcu_read_lock();
- atomic_dec(&shost->host_busy);
+ __clear_bit(SCMD_STATE_INFLIGHT, &cmd->state);
if (unlikely(scsi_host_in_recovery(shost))) {
spin_lock_irqsave(shost->host_lock, flags);
if (shost->host_failed || shost->host_eh_scheduled)
@@ -344,12 +344,12 @@ static void scsi_dec_host_busy(struct Scsi_Host *shost)
rcu_read_unlock();
}
-void scsi_device_unbusy(struct scsi_device *sdev)
+void scsi_device_unbusy(struct scsi_device *sdev, struct scsi_cmnd *cmd)
{
struct Scsi_Host *shost = sdev->host;
struct scsi_target *starget = scsi_target(sdev);
- scsi_dec_host_busy(shost);
+ scsi_dec_host_busy(shost, cmd);
if (starget->can_queue > 0)
atomic_dec(&starget->target_busy);
@@ -430,9 +430,6 @@ static inline bool scsi_target_is_busy(struct scsi_target *starget)
static inline bool scsi_host_is_busy(struct Scsi_Host *shost)
{
- if (shost->can_queue > 0 &&
- atomic_read(&shost->host_busy) >= shost->can_queue)
- return true;
if (atomic_read(&shost->host_blocked) > 0)
return true;
if (shost->host_self_blocked)
@@ -1139,6 +1136,7 @@ void scsi_init_command(struct scsi_device *dev, struct scsi_cmnd *cmd)
unsigned int flags = cmd->flags & SCMD_PRESERVED_FLAGS;
unsigned long jiffies_at_alloc;
int retries;
+ bool in_flight;
if (!blk_rq_is_scsi(rq) && !(flags & SCMD_INITIALIZED)) {
flags |= SCMD_INITIALIZED;
@@ -1147,6 +1145,7 @@ void scsi_init_command(struct scsi_device *dev, struct scsi_cmnd *cmd)
jiffies_at_alloc = cmd->jiffies_at_alloc;
retries = cmd->retries;
+ in_flight = test_bit(SCMD_STATE_INFLIGHT, &cmd->state);
/* zero out the cmd, except for the embedded scsi_request */
memset((char *)cmd + sizeof(cmd->req), 0,
sizeof(*cmd) - sizeof(cmd->req) + dev->host->hostt->cmd_size);
@@ -1158,6 +1157,8 @@ void scsi_init_command(struct scsi_device *dev, struct scsi_cmnd *cmd)
INIT_DELAYED_WORK(&cmd->abort_work, scmd_eh_abort_handler);
cmd->jiffies_at_alloc = jiffies_at_alloc;
cmd->retries = retries;
+ if (in_flight)
+ __set_bit(SCMD_STATE_INFLIGHT, &cmd->state);
scsi_add_cmd_to_list(cmd);
}
@@ -1367,16 +1368,14 @@ out_dec:
*/
static inline int scsi_host_queue_ready(struct request_queue *q,
struct Scsi_Host *shost,
- struct scsi_device *sdev)
+ struct scsi_device *sdev,
+ struct scsi_cmnd *cmd)
{
- unsigned int busy;
-
if (scsi_host_in_recovery(shost))
return 0;
- busy = atomic_inc_return(&shost->host_busy) - 1;
if (atomic_read(&shost->host_blocked) > 0) {
- if (busy)
+ if (scsi_host_busy(shost) > 0)
goto starved;
/*
@@ -1390,8 +1389,6 @@ static inline int scsi_host_queue_ready(struct request_queue *q,
"unblocking host at zero depth\n"));
}
- if (shost->can_queue > 0 && busy >= shost->can_queue)
- goto starved;
if (shost->host_self_blocked)
goto starved;
@@ -1403,6 +1400,8 @@ static inline int scsi_host_queue_ready(struct request_queue *q,
spin_unlock_irq(shost->host_lock);
}
+ __set_bit(SCMD_STATE_INFLIGHT, &cmd->state);
+
return 1;
starved:
@@ -1411,7 +1410,7 @@ starved:
list_add_tail(&sdev->starved_entry, &shost->starved_list);
spin_unlock_irq(shost->host_lock);
out_dec:
- scsi_dec_host_busy(shost);
+ scsi_dec_host_busy(shost, cmd);
return 0;
}
@@ -1665,7 +1664,7 @@ static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
ret = BLK_STS_RESOURCE;
if (!scsi_target_queue_ready(shost, sdev))
goto out_put_budget;
- if (!scsi_host_queue_ready(q, shost, sdev))
+ if (!scsi_host_queue_ready(q, shost, sdev, cmd))
goto out_dec_target_busy;
if (!(req->rq_flags & RQF_DONTPREP)) {
@@ -1697,7 +1696,7 @@ static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
return BLK_STS_OK;
out_dec_host_busy:
- scsi_dec_host_busy(shost);
+ scsi_dec_host_busy(shost, cmd);
out_dec_target_busy:
if (scsi_target(sdev)->can_queue > 0)
atomic_dec(&scsi_target(sdev)->target_busy);
diff --git a/drivers/scsi/scsi_logging.c b/drivers/scsi/scsi_logging.c
index c6ed0b12e807..c91fa3feb930 100644
--- a/drivers/scsi/scsi_logging.c
+++ b/drivers/scsi/scsi_logging.c
@@ -390,6 +390,7 @@ void scsi_print_result(const struct scsi_cmnd *cmd, const char *msg,
const char *mlret_string = scsi_mlreturn_string(disposition);
const char *hb_string = scsi_hostbyte_string(cmd->result);
const char *db_string = scsi_driverbyte_string(cmd->result);
+ unsigned long cmd_age = (jiffies - cmd->jiffies_at_alloc) / HZ;
logbuf = scsi_log_reserve_buffer(&logbuf_len);
if (!logbuf)
@@ -431,10 +432,15 @@ void scsi_print_result(const struct scsi_cmnd *cmd, const char *msg,
if (db_string)
off += scnprintf(logbuf + off, logbuf_len - off,
- "driverbyte=%s", db_string);
+ "driverbyte=%s ", db_string);
else
off += scnprintf(logbuf + off, logbuf_len - off,
- "driverbyte=0x%02x", driver_byte(cmd->result));
+ "driverbyte=0x%02x ",
+ driver_byte(cmd->result));
+
+ off += scnprintf(logbuf + off, logbuf_len - off,
+ "cmd_age=%lus", cmd_age);
+
out_printk:
dev_printk(KERN_INFO, &cmd->device->sdev_gendev, "%s", logbuf);
scsi_log_release_buffer(logbuf);
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index cc2859d76d81..3bff9f7aa684 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -87,7 +87,7 @@ int scsi_noretry_cmd(struct scsi_cmnd *scmd);
extern void scsi_add_cmd_to_list(struct scsi_cmnd *cmd);
extern void scsi_del_cmd_from_list(struct scsi_cmnd *cmd);
extern int scsi_maybe_unblock_host(struct scsi_device *sdev);
-extern void scsi_device_unbusy(struct scsi_device *sdev);
+extern void scsi_device_unbusy(struct scsi_device *sdev, struct scsi_cmnd *cmd);
extern void scsi_queue_insert(struct scsi_cmnd *cmd, int reason);
extern void scsi_io_completion(struct scsi_cmnd *, unsigned int);
extern void scsi_run_host_queues(struct Scsi_Host *shost);
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index cc51f4756077..677b5c5403d2 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -437,6 +437,7 @@ static void scsi_device_dev_release_usercontext(struct work_struct *work)
struct device *parent;
struct list_head *this, *tmp;
struct scsi_vpd *vpd_pg80 = NULL, *vpd_pg83 = NULL;
+ struct scsi_vpd *vpd_pg0 = NULL, *vpd_pg89 = NULL;
unsigned long flags;
sdev = container_of(work, struct scsi_device, ew.work);
@@ -466,16 +467,24 @@ static void scsi_device_dev_release_usercontext(struct work_struct *work)
sdev->request_queue = NULL;
mutex_lock(&sdev->inquiry_mutex);
+ vpd_pg0 = rcu_replace_pointer(sdev->vpd_pg0, vpd_pg0,
+ lockdep_is_held(&sdev->inquiry_mutex));
vpd_pg80 = rcu_replace_pointer(sdev->vpd_pg80, vpd_pg80,
lockdep_is_held(&sdev->inquiry_mutex));
vpd_pg83 = rcu_replace_pointer(sdev->vpd_pg83, vpd_pg83,
lockdep_is_held(&sdev->inquiry_mutex));
+ vpd_pg89 = rcu_replace_pointer(sdev->vpd_pg89, vpd_pg89,
+ lockdep_is_held(&sdev->inquiry_mutex));
mutex_unlock(&sdev->inquiry_mutex);
+ if (vpd_pg0)
+ kfree_rcu(vpd_pg0, rcu);
if (vpd_pg83)
kfree_rcu(vpd_pg83, rcu);
if (vpd_pg80)
kfree_rcu(vpd_pg80, rcu);
+ if (vpd_pg89)
+ kfree_rcu(vpd_pg89, rcu);
kfree(sdev->inquiry);
kfree(sdev);
@@ -868,6 +877,8 @@ static struct bin_attribute dev_attr_vpd_##_page = { \
sdev_vpd_pg_attr(pg83);
sdev_vpd_pg_attr(pg80);
+sdev_vpd_pg_attr(pg89);
+sdev_vpd_pg_attr(pg0);
static ssize_t show_inquiry(struct file *filep, struct kobject *kobj,
struct bin_attribute *bin_attr,
@@ -1200,12 +1211,18 @@ static umode_t scsi_sdev_bin_attr_is_visible(struct kobject *kobj,
struct scsi_device *sdev = to_scsi_device(dev);
+ if (attr == &dev_attr_vpd_pg0 && !sdev->vpd_pg0)
+ return 0;
+
if (attr == &dev_attr_vpd_pg80 && !sdev->vpd_pg80)
return 0;
if (attr == &dev_attr_vpd_pg83 && !sdev->vpd_pg83)
return 0;
+ if (attr == &dev_attr_vpd_pg89 && !sdev->vpd_pg89)
+ return 0;
+
return S_IRUGO;
}
@@ -1248,8 +1265,10 @@ static struct attribute *scsi_sdev_attrs[] = {
};
static struct bin_attribute *scsi_sdev_bin_attrs[] = {
+ &dev_attr_vpd_pg0,
&dev_attr_vpd_pg83,
&dev_attr_vpd_pg80,
+ &dev_attr_vpd_pg89,
&dev_attr_inquiry,
NULL
};
@@ -1309,7 +1328,8 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
device_enable_async_suspend(&sdev->sdev_gendev);
scsi_autopm_get_target(starget);
pm_runtime_set_active(&sdev->sdev_gendev);
- pm_runtime_forbid(&sdev->sdev_gendev);
+ if (!sdev->rpm_autosuspend)
+ pm_runtime_forbid(&sdev->sdev_gendev);
pm_runtime_enable(&sdev->sdev_gendev);
scsi_autopm_put_target(starget);
diff --git a/drivers/scsi/scsi_trace.c b/drivers/scsi/scsi_trace.c
index 0f17e7dac1b0..ac35c301c792 100644
--- a/drivers/scsi/scsi_trace.c
+++ b/drivers/scsi/scsi_trace.c
@@ -9,7 +9,7 @@
#include <trace/events/scsi.h>
#define SERVICE_ACTION16(cdb) (cdb[1] & 0x1f)
-#define SERVICE_ACTION32(cdb) ((cdb[8] << 8) | cdb[9])
+#define SERVICE_ACTION32(cdb) (get_unaligned_be16(&cdb[8]))
static const char *
scsi_trace_misc(struct trace_seq *, unsigned char *, int);
@@ -18,15 +18,18 @@ static const char *
scsi_trace_rw6(struct trace_seq *p, unsigned char *cdb, int len)
{
const char *ret = trace_seq_buffer_ptr(p);
- sector_t lba = 0, txlen = 0;
+ u32 lba = 0, txlen;
lba |= ((cdb[1] & 0x1F) << 16);
lba |= (cdb[2] << 8);
lba |= cdb[3];
- txlen = cdb[4];
+ /*
+ * From SBC-2: a TRANSFER LENGTH field set to zero specifies that 256
+ * logical blocks shall be read (READ(6)) or written (WRITE(6)).
+ */
+ txlen = cdb[4] ? cdb[4] : 256;
- trace_seq_printf(p, "lba=%llu txlen=%llu",
- (unsigned long long)lba, (unsigned long long)txlen);
+ trace_seq_printf(p, "lba=%u txlen=%u", lba, txlen);
trace_seq_putc(p, 0);
return ret;
@@ -36,17 +39,12 @@ static const char *
scsi_trace_rw10(struct trace_seq *p, unsigned char *cdb, int len)
{
const char *ret = trace_seq_buffer_ptr(p);
- sector_t lba = 0, txlen = 0;
+ u32 lba, txlen;
- lba |= (cdb[2] << 24);
- lba |= (cdb[3] << 16);
- lba |= (cdb[4] << 8);
- lba |= cdb[5];
- txlen |= (cdb[7] << 8);
- txlen |= cdb[8];
+ lba = get_unaligned_be32(&cdb[2]);
+ txlen = get_unaligned_be16(&cdb[7]);
- trace_seq_printf(p, "lba=%llu txlen=%llu protect=%u",
- (unsigned long long)lba, (unsigned long long)txlen,
+ trace_seq_printf(p, "lba=%u txlen=%u protect=%u", lba, txlen,
cdb[1] >> 5);
if (cdb[0] == WRITE_SAME)
@@ -61,19 +59,12 @@ static const char *
scsi_trace_rw12(struct trace_seq *p, unsigned char *cdb, int len)
{
const char *ret = trace_seq_buffer_ptr(p);
- sector_t lba = 0, txlen = 0;
-
- lba |= (cdb[2] << 24);
- lba |= (cdb[3] << 16);
- lba |= (cdb[4] << 8);
- lba |= cdb[5];
- txlen |= (cdb[6] << 24);
- txlen |= (cdb[7] << 16);
- txlen |= (cdb[8] << 8);
- txlen |= cdb[9];
-
- trace_seq_printf(p, "lba=%llu txlen=%llu protect=%u",
- (unsigned long long)lba, (unsigned long long)txlen,
+ u32 lba, txlen;
+
+ lba = get_unaligned_be32(&cdb[2]);
+ txlen = get_unaligned_be32(&cdb[6]);
+
+ trace_seq_printf(p, "lba=%u txlen=%u protect=%u", lba, txlen,
cdb[1] >> 5);
trace_seq_putc(p, 0);
@@ -84,23 +75,13 @@ static const char *
scsi_trace_rw16(struct trace_seq *p, unsigned char *cdb, int len)
{
const char *ret = trace_seq_buffer_ptr(p);
- sector_t lba = 0, txlen = 0;
-
- lba |= ((u64)cdb[2] << 56);
- lba |= ((u64)cdb[3] << 48);
- lba |= ((u64)cdb[4] << 40);
- lba |= ((u64)cdb[5] << 32);
- lba |= (cdb[6] << 24);
- lba |= (cdb[7] << 16);
- lba |= (cdb[8] << 8);
- lba |= cdb[9];
- txlen |= (cdb[10] << 24);
- txlen |= (cdb[11] << 16);
- txlen |= (cdb[12] << 8);
- txlen |= cdb[13];
-
- trace_seq_printf(p, "lba=%llu txlen=%llu protect=%u",
- (unsigned long long)lba, (unsigned long long)txlen,
+ u64 lba;
+ u32 txlen;
+
+ lba = get_unaligned_be64(&cdb[2]);
+ txlen = get_unaligned_be32(&cdb[10]);
+
+ trace_seq_printf(p, "lba=%llu txlen=%u protect=%u", lba, txlen,
cdb[1] >> 5);
if (cdb[0] == WRITE_SAME_16)
@@ -115,8 +96,8 @@ static const char *
scsi_trace_rw32(struct trace_seq *p, unsigned char *cdb, int len)
{
const char *ret = trace_seq_buffer_ptr(p), *cmd;
- sector_t lba = 0, txlen = 0;
- u32 ei_lbrt = 0;
+ u64 lba;
+ u32 ei_lbrt, txlen;
switch (SERVICE_ACTION32(cdb)) {
case READ_32:
@@ -136,26 +117,12 @@ scsi_trace_rw32(struct trace_seq *p, unsigned char *cdb, int len)
goto out;
}
- lba |= ((u64)cdb[12] << 56);
- lba |= ((u64)cdb[13] << 48);
- lba |= ((u64)cdb[14] << 40);
- lba |= ((u64)cdb[15] << 32);
- lba |= (cdb[16] << 24);
- lba |= (cdb[17] << 16);
- lba |= (cdb[18] << 8);
- lba |= cdb[19];
- ei_lbrt |= (cdb[20] << 24);
- ei_lbrt |= (cdb[21] << 16);
- ei_lbrt |= (cdb[22] << 8);
- ei_lbrt |= cdb[23];
- txlen |= (cdb[28] << 24);
- txlen |= (cdb[29] << 16);
- txlen |= (cdb[30] << 8);
- txlen |= cdb[31];
-
- trace_seq_printf(p, "%s_32 lba=%llu txlen=%llu protect=%u ei_lbrt=%u",
- cmd, (unsigned long long)lba,
- (unsigned long long)txlen, cdb[10] >> 5, ei_lbrt);
+ lba = get_unaligned_be64(&cdb[12]);
+ ei_lbrt = get_unaligned_be32(&cdb[20]);
+ txlen = get_unaligned_be32(&cdb[28]);
+
+ trace_seq_printf(p, "%s_32 lba=%llu txlen=%u protect=%u ei_lbrt=%u",
+ cmd, lba, txlen, cdb[10] >> 5, ei_lbrt);
if (SERVICE_ACTION32(cdb) == WRITE_SAME_32)
trace_seq_printf(p, " unmap=%u", cdb[10] >> 3 & 1);
@@ -170,7 +137,7 @@ static const char *
scsi_trace_unmap(struct trace_seq *p, unsigned char *cdb, int len)
{
const char *ret = trace_seq_buffer_ptr(p);
- unsigned int regions = cdb[7] << 8 | cdb[8];
+ unsigned int regions = get_unaligned_be16(&cdb[7]);
trace_seq_printf(p, "regions=%u", (regions - 8) / 16);
trace_seq_putc(p, 0);
@@ -182,8 +149,8 @@ static const char *
scsi_trace_service_action_in(struct trace_seq *p, unsigned char *cdb, int len)
{
const char *ret = trace_seq_buffer_ptr(p), *cmd;
- sector_t lba = 0;
- u32 alloc_len = 0;
+ u64 lba;
+ u32 alloc_len;
switch (SERVICE_ACTION16(cdb)) {
case SAI_READ_CAPACITY_16:
@@ -197,21 +164,10 @@ scsi_trace_service_action_in(struct trace_seq *p, unsigned char *cdb, int len)
goto out;
}
- lba |= ((u64)cdb[2] << 56);
- lba |= ((u64)cdb[3] << 48);
- lba |= ((u64)cdb[4] << 40);
- lba |= ((u64)cdb[5] << 32);
- lba |= (cdb[6] << 24);
- lba |= (cdb[7] << 16);
- lba |= (cdb[8] << 8);
- lba |= cdb[9];
- alloc_len |= (cdb[10] << 24);
- alloc_len |= (cdb[11] << 16);
- alloc_len |= (cdb[12] << 8);
- alloc_len |= cdb[13];
-
- trace_seq_printf(p, "%s lba=%llu alloc_len=%u", cmd,
- (unsigned long long)lba, alloc_len);
+ lba = get_unaligned_be64(&cdb[2]);
+ alloc_len = get_unaligned_be32(&cdb[10]);
+
+ trace_seq_printf(p, "%s lba=%llu alloc_len=%u", cmd, lba, alloc_len);
out:
trace_seq_putc(p, 0);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 470ee6dc3f7e..7dc17821f873 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1702,20 +1702,30 @@ static void sd_rescan(struct device *dev)
static int sd_compat_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
- struct scsi_device *sdev = scsi_disk(bdev->bd_disk)->device;
+ struct gendisk *disk = bdev->bd_disk;
+ struct scsi_disk *sdkp = scsi_disk(disk);
+ struct scsi_device *sdev = sdkp->device;
+ void __user *p = compat_ptr(arg);
int error;
+ error = scsi_verify_blk_ioctl(bdev, cmd);
+ if (error < 0)
+ return error;
+
error = scsi_ioctl_block_when_processing_errors(sdev, cmd,
(mode & FMODE_NDELAY) != 0);
if (error)
return error;
+
+ if (is_sed_ioctl(cmd))
+ return sed_ioctl(sdkp->opal_dev, cmd, p);
/*
* Let the static ioctl translation table take care of it.
*/
if (!sdev->host->hostt->compat_ioctl)
return -ENOIOCTLCMD;
- return sdev->host->hostt->compat_ioctl(sdev, cmd, (void __user *)arg);
+ return sdev->host->hostt->compat_ioctl(sdev, cmd, p);
}
#endif
@@ -3380,6 +3390,10 @@ static int sd_probe(struct device *dev)
}
blk_pm_runtime_init(sdp->request_queue, dev);
+ if (sdp->rpm_autosuspend) {
+ pm_runtime_set_autosuspend_delay(dev,
+ sdp->host->hostt->rpm_autosuspend_delay);
+ }
device_add_disk(dev, gd, NULL);
if (sdkp->capacity)
sd_dif_config_host(sdkp);
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index cce757506383..160748ad9c0f 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -429,26 +429,33 @@ sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos)
SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
"sg_read: count=%d\n", (int) count));
- if (!access_ok(buf, count))
- return -EFAULT;
if (sfp->force_packid && (count >= SZ_SG_HEADER)) {
- old_hdr = kmalloc(SZ_SG_HEADER, GFP_KERNEL);
- if (!old_hdr)
- return -ENOMEM;
- if (__copy_from_user(old_hdr, buf, SZ_SG_HEADER)) {
- retval = -EFAULT;
- goto free_old_hdr;
- }
+ old_hdr = memdup_user(buf, SZ_SG_HEADER);
+ if (IS_ERR(old_hdr))
+ return PTR_ERR(old_hdr);
if (old_hdr->reply_len < 0) {
if (count >= SZ_SG_IO_HDR) {
+ /*
+ * This is stupid.
+ *
+ * We're copying the whole sg_io_hdr_t from user
+ * space just to get the 'pack_id' field. But the
+ * field is at different offsets for the compat
+ * case, so we'll use "get_sg_io_hdr()" to copy
+ * the whole thing and convert it.
+ *
+ * We could do something like just calculating the
+ * offset based of 'in_compat_syscall()', but the
+ * 'compat_sg_io_hdr' definition is in the wrong
+ * place for that.
+ */
sg_io_hdr_t *new_hdr;
new_hdr = kmalloc(SZ_SG_IO_HDR, GFP_KERNEL);
if (!new_hdr) {
retval = -ENOMEM;
goto free_old_hdr;
}
- retval =__copy_from_user
- (new_hdr, buf, SZ_SG_IO_HDR);
+ retval = get_sg_io_hdr(new_hdr, buf);
req_pack_id = new_hdr->pack_id;
kfree(new_hdr);
if (retval) {
@@ -538,7 +545,7 @@ sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos)
/* Now copy the result back to the user buffer. */
if (count >= SZ_SG_HEADER) {
- if (__copy_to_user(buf, old_hdr, SZ_SG_HEADER)) {
+ if (copy_to_user(buf, old_hdr, SZ_SG_HEADER)) {
retval = -EFAULT;
goto free_old_hdr;
}
@@ -589,10 +596,7 @@ sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp)
}
if (hp->masked_status || hp->host_status || hp->driver_status)
hp->info |= SG_INFO_CHECK;
- if (copy_to_user(buf, hp, SZ_SG_IO_HDR)) {
- err = -EFAULT;
- goto err_out;
- }
+ err = put_sg_io_hdr(hp, buf);
err_out:
err2 = sg_finish_rem_req(srp);
sg_remove_request(sfp, srp);
@@ -627,11 +631,9 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
scsi_block_when_processing_errors(sdp->device)))
return -ENXIO;
- if (!access_ok(buf, count))
- return -EFAULT; /* protects following copy_from_user()s + get_user()s */
if (count < SZ_SG_HEADER)
return -EIO;
- if (__copy_from_user(&old_hdr, buf, SZ_SG_HEADER))
+ if (copy_from_user(&old_hdr, buf, SZ_SG_HEADER))
return -EFAULT;
blocking = !(filp->f_flags & O_NONBLOCK);
if (old_hdr.reply_len < 0)
@@ -640,13 +642,15 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
if (count < (SZ_SG_HEADER + 6))
return -EIO; /* The minimum scsi command length is 6 bytes. */
+ buf += SZ_SG_HEADER;
+ if (get_user(opcode, buf))
+ return -EFAULT;
+
if (!(srp = sg_add_request(sfp))) {
SCSI_LOG_TIMEOUT(1, sg_printk(KERN_INFO, sdp,
"sg_write: queue full\n"));
return -EDOM;
}
- buf += SZ_SG_HEADER;
- __get_user(opcode, buf);
mutex_lock(&sfp->f_mutex);
if (sfp->next_cmd_len > 0) {
cmd_size = sfp->next_cmd_len;
@@ -689,7 +693,7 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
hp->flags = input_size; /* structure abuse ... */
hp->pack_id = old_hdr.pack_id;
hp->usr_ptr = NULL;
- if (__copy_from_user(cmnd, buf, cmd_size))
+ if (copy_from_user(cmnd, buf, cmd_size))
return -EFAULT;
/*
* SG_DXFER_TO_FROM_DEV is functionally equivalent to SG_DXFER_FROM_DEV,
@@ -724,8 +728,6 @@ sg_new_write(Sg_fd *sfp, struct file *file, const char __user *buf,
if (count < SZ_SG_IO_HDR)
return -EINVAL;
- if (!access_ok(buf, count))
- return -EFAULT; /* protects following copy_from_user()s + get_user()s */
sfp->cmd_q = 1; /* when sg_io_hdr seen, set command queuing on */
if (!(srp = sg_add_request(sfp))) {
@@ -735,7 +737,7 @@ sg_new_write(Sg_fd *sfp, struct file *file, const char __user *buf,
}
srp->sg_io_owned = sg_io_owned;
hp = &srp->header;
- if (__copy_from_user(hp, buf, SZ_SG_IO_HDR)) {
+ if (get_sg_io_hdr(hp, buf)) {
sg_remove_request(sfp, srp);
return -EFAULT;
}
@@ -763,11 +765,7 @@ sg_new_write(Sg_fd *sfp, struct file *file, const char __user *buf,
sg_remove_request(sfp, srp);
return -EMSGSIZE;
}
- if (!access_ok(hp->cmdp, hp->cmd_len)) {
- sg_remove_request(sfp, srp);
- return -EFAULT; /* protects following copy_from_user()s + get_user()s */
- }
- if (__copy_from_user(cmnd, hp->cmdp, hp->cmd_len)) {
+ if (copy_from_user(cmnd, hp->cmdp, hp->cmd_len)) {
sg_remove_request(sfp, srp);
return -EFAULT;
}
@@ -893,6 +891,33 @@ sg_fill_request_table(Sg_fd *sfp, sg_req_info_t *rinfo)
}
}
+#ifdef CONFIG_COMPAT
+struct compat_sg_req_info { /* used by SG_GET_REQUEST_TABLE ioctl() */
+ char req_state;
+ char orphan;
+ char sg_io_owned;
+ char problem;
+ int pack_id;
+ compat_uptr_t usr_ptr;
+ unsigned int duration;
+ int unused;
+};
+
+static int put_compat_request_table(struct compat_sg_req_info __user *o,
+ struct sg_req_info *rinfo)
+{
+ int i;
+ for (i = 0; i < SG_MAX_QUEUE; i++) {
+ if (copy_to_user(o + i, rinfo + i, offsetof(sg_req_info_t, usr_ptr)) ||
+ put_user((uintptr_t)rinfo[i].usr_ptr, &o[i].usr_ptr) ||
+ put_user(rinfo[i].duration, &o[i].duration) ||
+ put_user(rinfo[i].unused, &o[i].unused))
+ return -EFAULT;
+ }
+ return 0;
+}
+#endif
+
static long
sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
{
@@ -917,8 +942,6 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
return -ENODEV;
if (!scsi_block_when_processing_errors(sdp->device))
return -ENXIO;
- if (!access_ok(p, SZ_SG_IO_HDR))
- return -EFAULT;
result = sg_new_write(sfp, filp, p, SZ_SG_IO_HDR,
1, read_only, 1, &srp);
if (result < 0)
@@ -963,26 +986,21 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
case SG_GET_LOW_DMA:
return put_user((int) sdp->device->host->unchecked_isa_dma, ip);
case SG_GET_SCSI_ID:
- if (!access_ok(p, sizeof (sg_scsi_id_t)))
- return -EFAULT;
- else {
- sg_scsi_id_t __user *sg_idp = p;
+ {
+ sg_scsi_id_t v;
if (atomic_read(&sdp->detaching))
return -ENODEV;
- __put_user((int) sdp->device->host->host_no,
- &sg_idp->host_no);
- __put_user((int) sdp->device->channel,
- &sg_idp->channel);
- __put_user((int) sdp->device->id, &sg_idp->scsi_id);
- __put_user((int) sdp->device->lun, &sg_idp->lun);
- __put_user((int) sdp->device->type, &sg_idp->scsi_type);
- __put_user((short) sdp->device->host->cmd_per_lun,
- &sg_idp->h_cmd_per_lun);
- __put_user((short) sdp->device->queue_depth,
- &sg_idp->d_queue_depth);
- __put_user(0, &sg_idp->unused[0]);
- __put_user(0, &sg_idp->unused[1]);
+ memset(&v, 0, sizeof(v));
+ v.host_no = sdp->device->host->host_no;
+ v.channel = sdp->device->channel;
+ v.scsi_id = sdp->device->id;
+ v.lun = sdp->device->lun;
+ v.scsi_type = sdp->device->type;
+ v.h_cmd_per_lun = sdp->device->host->cmd_per_lun;
+ v.d_queue_depth = sdp->device->queue_depth;
+ if (copy_to_user(p, &v, sizeof(sg_scsi_id_t)))
+ return -EFAULT;
return 0;
}
case SG_SET_FORCE_PACK_ID:
@@ -992,20 +1010,16 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
sfp->force_packid = val ? 1 : 0;
return 0;
case SG_GET_PACK_ID:
- if (!access_ok(ip, sizeof (int)))
- return -EFAULT;
read_lock_irqsave(&sfp->rq_list_lock, iflags);
list_for_each_entry(srp, &sfp->rq_list, entry) {
if ((1 == srp->done) && (!srp->sg_io_owned)) {
read_unlock_irqrestore(&sfp->rq_list_lock,
iflags);
- __put_user(srp->header.pack_id, ip);
- return 0;
+ return put_user(srp->header.pack_id, ip);
}
}
read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
- __put_user(-1, ip);
- return 0;
+ return put_user(-1, ip);
case SG_GET_NUM_WAITING:
read_lock_irqsave(&sfp->rq_list_lock, iflags);
val = 0;
@@ -1073,9 +1087,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
val = (sdp->device ? 1 : 0);
return put_user(val, ip);
case SG_GET_REQUEST_TABLE:
- if (!access_ok(p, SZ_SG_REQ_INFO * SG_MAX_QUEUE))
- return -EFAULT;
- else {
+ {
sg_req_info_t *rinfo;
rinfo = kcalloc(SG_MAX_QUEUE, SZ_SG_REQ_INFO,
@@ -1085,8 +1097,13 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
read_lock_irqsave(&sfp->rq_list_lock, iflags);
sg_fill_request_table(sfp, rinfo);
read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
- result = __copy_to_user(p, rinfo,
- SZ_SG_REQ_INFO * SG_MAX_QUEUE);
+ #ifdef CONFIG_COMPAT
+ if (in_compat_syscall())
+ result = put_compat_request_table(p, rinfo);
+ else
+ #endif
+ result = copy_to_user(p, rinfo,
+ SZ_SG_REQ_INFO * SG_MAX_QUEUE);
result = result ? -EFAULT : 0;
kfree(rinfo);
return result;
@@ -1797,7 +1814,14 @@ sg_start_req(Sg_request *srp, unsigned char *cmd)
struct iovec *iov = NULL;
struct iov_iter i;
- res = import_iovec(rw, hp->dxferp, iov_count, 0, &iov, &i);
+#ifdef CONFIG_COMPAT
+ if (in_compat_syscall())
+ res = compat_import_iovec(rw, hp->dxferp, iov_count,
+ 0, &iov, &i);
+ else
+#endif
+ res = import_iovec(rw, hp->dxferp, iov_count,
+ 0, &iov, &i);
if (res < 0)
return res;
@@ -1984,12 +2008,12 @@ sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer)
num = 1 << (PAGE_SHIFT + schp->page_order);
for (k = 0; k < schp->k_use_sg && schp->pages[k]; k++) {
if (num > num_read_xfer) {
- if (__copy_to_user(outp, page_address(schp->pages[k]),
+ if (copy_to_user(outp, page_address(schp->pages[k]),
num_read_xfer))
return -EFAULT;
break;
} else {
- if (__copy_to_user(outp, page_address(schp->pages[k]),
+ if (copy_to_user(outp, page_address(schp->pages[k]),
num))
return -EFAULT;
num_read_xfer -= num;
diff --git a/drivers/scsi/smartpqi/smartpqi.h b/drivers/scsi/smartpqi/smartpqi.h
index 79d2af36f655..1129fe7a27ed 100644
--- a/drivers/scsi/smartpqi/smartpqi.h
+++ b/drivers/scsi/smartpqi/smartpqi.h
@@ -276,7 +276,9 @@ struct pqi_raid_path_request {
u8 reserved4 : 2;
u8 additional_cdb_bytes_usage : 3;
u8 reserved5 : 3;
- u8 cdb[32];
+ u8 cdb[16];
+ u8 reserved6[12];
+ __le32 timeout;
struct pqi_sg_descriptor
sg_descriptors[PQI_MAX_EMBEDDED_SG_DESCRIPTORS];
};
@@ -385,7 +387,8 @@ struct pqi_task_management_request {
struct pqi_iu_header header;
__le16 request_id;
__le16 nexus_id;
- u8 reserved[4];
+ u8 reserved[2];
+ __le16 timeout;
u8 lun_number[8];
__le16 protocol_specific;
__le16 outbound_queue_id_to_manage;
@@ -445,7 +448,7 @@ struct pqi_vendor_general_response {
struct pqi_ofa_memory {
__le64 signature; /* "OFA_QRM" */
- __le16 version; /* version of this struct(1 = 1st version) */
+ __le16 version; /* version of this struct (1 = 1st version) */
u8 reserved[62];
__le32 bytes_allocated; /* total allocated memory in bytes */
__le16 num_memory_descriptors;
@@ -761,6 +764,8 @@ struct pqi_config_table_firmware_features {
#define PQI_FIRMWARE_FEATURE_OFA 0
#define PQI_FIRMWARE_FEATURE_SMP 1
#define PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE 11
+#define PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT 13
+#define PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT 14
struct pqi_config_table_debug {
struct pqi_config_table_section_header header;
@@ -826,10 +831,17 @@ union pqi_reset_register {
struct report_lun_header {
__be32 list_length;
- u8 extended_response;
+ u8 flags;
u8 reserved[3];
};
+/* for flags field of struct report_lun_header */
+#define CISS_REPORT_LOG_FLAG_UNIQUE_LUN_ID (1 << 0)
+#define CISS_REPORT_LOG_FLAG_QUEUE_DEPTH (1 << 5)
+#define CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX (1 << 6)
+
+#define CISS_REPORT_PHYS_FLAG_OTHER (1 << 1)
+
struct report_log_lun_extended_entry {
u8 lunid[8];
u8 volume_id[16];
@@ -851,7 +863,7 @@ struct report_phys_lun_extended_entry {
};
/* for device_flags field of struct report_phys_lun_extended_entry */
-#define REPORT_PHYS_LUN_DEV_FLAG_AIO_ENABLED 0x8
+#define CISS_REPORT_PHYS_DEV_FLAG_AIO_ENABLED 0x8
struct report_phys_lun_extended {
struct report_lun_header header;
@@ -864,7 +876,7 @@ struct raid_map_disk_data {
u8 reserved[2];
};
-/* constants for flags field of RAID map */
+/* for flags field of RAID map */
#define RAID_MAP_ENCRYPTION_ENABLED 0x1
struct raid_map {
@@ -907,7 +919,6 @@ struct pqi_scsi_dev {
u8 scsi3addr[8];
__be64 wwid;
u8 volume_id[16];
- u8 unique_id[16];
u8 is_physical_device : 1;
u8 is_external_raid_device : 1;
u8 is_expander_smp_device : 1;
@@ -954,13 +965,9 @@ struct pqi_scsi_dev {
};
/* VPD inquiry pages */
-#define SCSI_VPD_SUPPORTED_PAGES 0x0 /* standard page */
-#define SCSI_VPD_DEVICE_ID 0x83 /* standard page */
#define CISS_VPD_LV_DEVICE_GEOMETRY 0xc1 /* vendor-specific page */
#define CISS_VPD_LV_BYPASS_STATUS 0xc2 /* vendor-specific page */
#define CISS_VPD_LV_STATUS 0xc3 /* vendor-specific page */
-#define SCSI_VPD_HEADER_SZ 4
-#define SCSI_VPD_DEVICE_ID_IDX 8 /* Index of page id in page */
#define VPD_PAGE (1 << 8)
@@ -1130,13 +1137,16 @@ struct pqi_ctrl_info {
struct mutex ofa_mutex; /* serialize ofa */
bool controller_online;
bool block_requests;
- bool in_shutdown;
+ bool block_device_reset;
bool in_ofa;
+ bool in_shutdown;
u8 inbound_spanning_supported : 1;
u8 outbound_spanning_supported : 1;
u8 pqi_mode_enabled : 1;
u8 pqi_reset_quiesce_supported : 1;
u8 soft_reset_handshake_supported : 1;
+ u8 raid_iu_timeout_supported: 1;
+ u8 tmf_iu_timeout_supported: 1;
struct list_head scsi_device_list;
spinlock_t scsi_device_list_lock;
@@ -1170,9 +1180,10 @@ struct pqi_ctrl_info {
spinlock_t raid_bypass_retry_list_lock;
struct work_struct raid_bypass_retry_work;
- struct pqi_ofa_memory *pqi_ofa_mem_virt_addr;
- dma_addr_t pqi_ofa_mem_dma_handle;
- void **pqi_ofa_chunk_virt_addr;
+ struct pqi_ofa_memory *pqi_ofa_mem_virt_addr;
+ dma_addr_t pqi_ofa_mem_dma_handle;
+ void **pqi_ofa_chunk_virt_addr;
+ atomic_t sync_cmds_outstanding;
};
enum pqi_ctrl_mode {
@@ -1191,10 +1202,6 @@ enum pqi_ctrl_mode {
#define CISS_REPORT_PHYS 0xc3 /* Report Physical LUNs */
#define CISS_GET_RAID_MAP 0xc8
-/* constants for CISS_REPORT_LOG/CISS_REPORT_PHYS commands */
-#define CISS_REPORT_LOG_EXTENDED 0x1
-#define CISS_REPORT_PHYS_EXTENDED 0x2
-
/* BMIC commands */
#define BMIC_IDENTIFY_CONTROLLER 0x11
#define BMIC_IDENTIFY_PHYSICAL_DEVICE 0x15
@@ -1208,7 +1215,7 @@ enum pqi_ctrl_mode {
#define BMIC_SET_DIAG_OPTIONS 0xf4
#define BMIC_SENSE_DIAG_OPTIONS 0xf5
-#define CSMI_CC_SAS_SMP_PASSTHRU 0X17
+#define CSMI_CC_SAS_SMP_PASSTHRU 0x17
#define SA_FLUSH_CACHE 0x1
@@ -1244,10 +1251,12 @@ struct bmic_sense_subsystem_info {
u8 ctrl_serial_number[16];
};
-#define SA_EXPANDER_SMP_DEVICE 0x05
-#define SA_CONTROLLER_DEVICE 0x07
-/*SCSI Invalid Device Type for SAS devices*/
-#define PQI_SAS_SCSI_INVALID_DEVTYPE 0xff
+/* constants for device_type field */
+#define SA_DEVICE_TYPE_SATA 0x1
+#define SA_DEVICE_TYPE_SAS 0x2
+#define SA_DEVICE_TYPE_EXPANDER_SMP 0x5
+#define SA_DEVICE_TYPE_CONTROLLER 0x7
+#define SA_DEVICE_TYPE_NVME 0x9
struct bmic_identify_physical_device {
u8 scsi_bus; /* SCSI Bus number on controller */
@@ -1273,7 +1282,7 @@ struct bmic_identify_physical_device {
__le32 rpm; /* drive rotational speed in RPM */
u8 device_type; /* type of drive */
u8 sata_version; /* only valid when device_type = */
- /* BMIC_DEVICE_TYPE_SATA */
+ /* SA_DEVICE_TYPE_SATA */
__le64 big_total_block_count;
__le64 ris_starting_lba;
__le32 ris_size;
@@ -1396,18 +1405,6 @@ struct bmic_diag_options {
#pragma pack()
-static inline struct pqi_ctrl_info *shost_to_hba(struct Scsi_Host *shost)
-{
- void *hostdata = shost_priv(shost);
-
- return *((struct pqi_ctrl_info **)hostdata);
-}
-
-static inline bool pqi_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
-{
- return !ctrl_info->controller_online;
-}
-
static inline void pqi_ctrl_busy(struct pqi_ctrl_info *ctrl_info)
{
atomic_inc(&ctrl_info->num_busy_threads);
@@ -1418,9 +1415,11 @@ static inline void pqi_ctrl_unbusy(struct pqi_ctrl_info *ctrl_info)
atomic_dec(&ctrl_info->num_busy_threads);
}
-static inline bool pqi_ctrl_blocked(struct pqi_ctrl_info *ctrl_info)
+static inline struct pqi_ctrl_info *shost_to_hba(struct Scsi_Host *shost)
{
- return ctrl_info->block_requests;
+ void *hostdata = shost_priv(shost);
+
+ return *((struct pqi_ctrl_info **)hostdata);
}
void pqi_sas_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index ea5409bebf57..7b7ef3acb504 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -33,11 +33,11 @@
#define BUILD_TIMESTAMP
#endif
-#define DRIVER_VERSION "1.2.8-026"
+#define DRIVER_VERSION "1.2.10-025"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 2
-#define DRIVER_RELEASE 8
-#define DRIVER_REVISION 26
+#define DRIVER_RELEASE 10
+#define DRIVER_REVISION 25
#define DRIVER_NAME "Microsemi PQI Driver (v" \
DRIVER_VERSION BUILD_TIMESTAMP ")"
@@ -211,6 +211,11 @@ static inline bool pqi_is_external_raid_addr(u8 *scsi3addr)
return scsi3addr[2] != 0;
}
+static inline bool pqi_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
+{
+ return !ctrl_info->controller_online;
+}
+
static inline void pqi_check_ctrl_health(struct pqi_ctrl_info *ctrl_info)
{
if (ctrl_info->controller_online)
@@ -235,6 +240,21 @@ static inline void pqi_save_ctrl_mode(struct pqi_ctrl_info *ctrl_info,
sis_write_driver_scratch(ctrl_info, mode);
}
+static inline void pqi_ctrl_block_device_reset(struct pqi_ctrl_info *ctrl_info)
+{
+ ctrl_info->block_device_reset = true;
+}
+
+static inline bool pqi_device_reset_blocked(struct pqi_ctrl_info *ctrl_info)
+{
+ return ctrl_info->block_device_reset;
+}
+
+static inline bool pqi_ctrl_blocked(struct pqi_ctrl_info *ctrl_info)
+{
+ return ctrl_info->block_requests;
+}
+
static inline void pqi_ctrl_block_requests(struct pqi_ctrl_info *ctrl_info)
{
ctrl_info->block_requests = true;
@@ -331,6 +351,16 @@ static inline bool pqi_device_in_remove(struct pqi_ctrl_info *ctrl_info,
return device->in_remove && !ctrl_info->in_shutdown;
}
+static inline void pqi_ctrl_shutdown_start(struct pqi_ctrl_info *ctrl_info)
+{
+ ctrl_info->in_shutdown = true;
+}
+
+static inline bool pqi_ctrl_in_shutdown(struct pqi_ctrl_info *ctrl_info)
+{
+ return ctrl_info->in_shutdown;
+}
+
static inline void pqi_schedule_rescan_worker_with_delay(
struct pqi_ctrl_info *ctrl_info, unsigned long delay)
{
@@ -360,6 +390,11 @@ static inline void pqi_cancel_rescan_worker(struct pqi_ctrl_info *ctrl_info)
cancel_delayed_work_sync(&ctrl_info->rescan_work);
}
+static inline void pqi_cancel_event_worker(struct pqi_ctrl_info *ctrl_info)
+{
+ cancel_work_sync(&ctrl_info->event_work);
+}
+
static inline u32 pqi_read_heartbeat_counter(struct pqi_ctrl_info *ctrl_info)
{
if (!ctrl_info->heartbeat_counter)
@@ -377,7 +412,7 @@ static inline u8 pqi_read_soft_reset_status(struct pqi_ctrl_info *ctrl_info)
}
static inline void pqi_clear_soft_reset_status(struct pqi_ctrl_info *ctrl_info,
- u8 clear)
+ u8 clear)
{
u8 status;
@@ -462,9 +497,9 @@ static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
request->data_direction = SOP_READ_FLAG;
cdb[0] = cmd;
if (cmd == CISS_REPORT_PHYS)
- cdb[1] = CISS_REPORT_PHYS_EXTENDED;
+ cdb[1] = CISS_REPORT_PHYS_FLAG_OTHER;
else
- cdb[1] = CISS_REPORT_LOG_EXTENDED;
+ cdb[1] = CISS_REPORT_LOG_FLAG_UNIQUE_LUN_ID;
put_unaligned_be32(cdb_length, &cdb[6]);
break;
case CISS_GET_RAID_MAP:
@@ -567,13 +602,12 @@ static void pqi_free_io_request(struct pqi_io_request *io_request)
}
static int pqi_send_scsi_raid_request(struct pqi_ctrl_info *ctrl_info, u8 cmd,
- u8 *scsi3addr, void *buffer, size_t buffer_length, u16 vpd_page,
- struct pqi_raid_error_info *error_info,
- unsigned long timeout_msecs)
+ u8 *scsi3addr, void *buffer, size_t buffer_length, u16 vpd_page,
+ struct pqi_raid_error_info *error_info, unsigned long timeout_msecs)
{
int rc;
- enum dma_data_direction dir;
struct pqi_raid_path_request request;
+ enum dma_data_direction dir;
rc = pqi_build_raid_path_request(ctrl_info, &request,
cmd, scsi3addr, buffer,
@@ -581,44 +615,44 @@ static int pqi_send_scsi_raid_request(struct pqi_ctrl_info *ctrl_info, u8 cmd,
if (rc)
return rc;
- rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
- 0, error_info, timeout_msecs);
+ rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
+ error_info, timeout_msecs);
pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
+
return rc;
}
-/* Helper functions for pqi_send_scsi_raid_request */
+/* helper functions for pqi_send_scsi_raid_request */
static inline int pqi_send_ctrl_raid_request(struct pqi_ctrl_info *ctrl_info,
- u8 cmd, void *buffer, size_t buffer_length)
+ u8 cmd, void *buffer, size_t buffer_length)
{
return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID,
- buffer, buffer_length, 0, NULL, NO_TIMEOUT);
+ buffer, buffer_length, 0, NULL, NO_TIMEOUT);
}
static inline int pqi_send_ctrl_raid_with_error(struct pqi_ctrl_info *ctrl_info,
- u8 cmd, void *buffer, size_t buffer_length,
- struct pqi_raid_error_info *error_info)
+ u8 cmd, void *buffer, size_t buffer_length,
+ struct pqi_raid_error_info *error_info)
{
return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID,
- buffer, buffer_length, 0, error_info, NO_TIMEOUT);
+ buffer, buffer_length, 0, error_info, NO_TIMEOUT);
}
-
static inline int pqi_identify_controller(struct pqi_ctrl_info *ctrl_info,
- struct bmic_identify_controller *buffer)
+ struct bmic_identify_controller *buffer)
{
return pqi_send_ctrl_raid_request(ctrl_info, BMIC_IDENTIFY_CONTROLLER,
- buffer, sizeof(*buffer));
+ buffer, sizeof(*buffer));
}
static inline int pqi_sense_subsystem_info(struct pqi_ctrl_info *ctrl_info,
- struct bmic_sense_subsystem_info *sense_info)
+ struct bmic_sense_subsystem_info *sense_info)
{
return pqi_send_ctrl_raid_request(ctrl_info,
- BMIC_SENSE_SUBSYSTEM_INFORMATION,
- sense_info, sizeof(*sense_info));
+ BMIC_SENSE_SUBSYSTEM_INFORMATION, sense_info,
+ sizeof(*sense_info));
}
static inline int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info,
@@ -628,83 +662,9 @@ static inline int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info,
buffer, buffer_length, vpd_page, NULL, NO_TIMEOUT);
}
-static bool pqi_vpd_page_supported(struct pqi_ctrl_info *ctrl_info,
- u8 *scsi3addr, u16 vpd_page)
-{
- int rc;
- int i;
- int pages;
- unsigned char *buf, bufsize;
-
- buf = kzalloc(256, GFP_KERNEL);
- if (!buf)
- return false;
-
- /* Get the size of the page list first */
- rc = pqi_scsi_inquiry(ctrl_info, scsi3addr,
- VPD_PAGE | SCSI_VPD_SUPPORTED_PAGES,
- buf, SCSI_VPD_HEADER_SZ);
- if (rc != 0)
- goto exit_unsupported;
-
- pages = buf[3];
- if ((pages + SCSI_VPD_HEADER_SZ) <= 255)
- bufsize = pages + SCSI_VPD_HEADER_SZ;
- else
- bufsize = 255;
-
- /* Get the whole VPD page list */
- rc = pqi_scsi_inquiry(ctrl_info, scsi3addr,
- VPD_PAGE | SCSI_VPD_SUPPORTED_PAGES,
- buf, bufsize);
- if (rc != 0)
- goto exit_unsupported;
-
- pages = buf[3];
- for (i = 1; i <= pages; i++)
- if (buf[3 + i] == vpd_page)
- goto exit_supported;
-
-exit_unsupported:
- kfree(buf);
- return false;
-
-exit_supported:
- kfree(buf);
- return true;
-}
-
-static int pqi_get_device_id(struct pqi_ctrl_info *ctrl_info,
- u8 *scsi3addr, u8 *device_id, int buflen)
-{
- int rc;
- unsigned char *buf;
-
- if (!pqi_vpd_page_supported(ctrl_info, scsi3addr, SCSI_VPD_DEVICE_ID))
- return 1; /* function not supported */
-
- buf = kzalloc(64, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- rc = pqi_scsi_inquiry(ctrl_info, scsi3addr,
- VPD_PAGE | SCSI_VPD_DEVICE_ID,
- buf, 64);
- if (rc == 0) {
- if (buflen > 16)
- buflen = 16;
- memcpy(device_id, &buf[SCSI_VPD_DEVICE_ID_IDX], buflen);
- }
-
- kfree(buf);
-
- return rc;
-}
-
static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info,
struct pqi_scsi_dev *device,
- struct bmic_identify_physical_device *buffer,
- size_t buffer_length)
+ struct bmic_identify_physical_device *buffer, size_t buffer_length)
{
int rc;
enum dma_data_direction dir;
@@ -725,6 +685,7 @@ static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info,
0, NULL, NO_TIMEOUT);
pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
+
return rc;
}
@@ -763,7 +724,7 @@ int pqi_csmi_smp_passthru(struct pqi_ctrl_info *ctrl_info,
buffer, buffer_length, error_info);
}
-#define PQI_FETCH_PTRAID_DATA (1UL<<31)
+#define PQI_FETCH_PTRAID_DATA (1 << 31)
static int pqi_set_diag_rescan(struct pqi_ctrl_info *ctrl_info)
{
@@ -775,14 +736,15 @@ static int pqi_set_diag_rescan(struct pqi_ctrl_info *ctrl_info)
return -ENOMEM;
rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SENSE_DIAG_OPTIONS,
- diag, sizeof(*diag));
+ diag, sizeof(*diag));
if (rc)
goto out;
diag->options |= cpu_to_le32(PQI_FETCH_PTRAID_DATA);
- rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SET_DIAG_OPTIONS,
- diag, sizeof(*diag));
+ rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SET_DIAG_OPTIONS, diag,
+ sizeof(*diag));
+
out:
kfree(diag);
@@ -793,7 +755,7 @@ static inline int pqi_write_host_wellness(struct pqi_ctrl_info *ctrl_info,
void *buffer, size_t buffer_length)
{
return pqi_send_ctrl_raid_request(ctrl_info, BMIC_WRITE_HOST_WELLNESS,
- buffer, buffer_length);
+ buffer, buffer_length);
}
#pragma pack(1)
@@ -946,7 +908,7 @@ static inline int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
void *buffer, size_t buffer_length)
{
return pqi_send_ctrl_raid_request(ctrl_info, cmd, buffer,
- buffer_length);
+ buffer_length);
}
static int pqi_report_phys_logical_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
@@ -1280,9 +1242,9 @@ static void pqi_get_raid_bypass_status(struct pqi_ctrl_info *ctrl_info,
if (rc)
goto out;
-#define RAID_BYPASS_STATUS 4
-#define RAID_BYPASS_CONFIGURED 0x1
-#define RAID_BYPASS_ENABLED 0x2
+#define RAID_BYPASS_STATUS 4
+#define RAID_BYPASS_CONFIGURED 0x1
+#define RAID_BYPASS_ENABLED 0x2
bypass_status = buffer[RAID_BYPASS_STATUS];
device->raid_bypass_configured =
@@ -1385,14 +1347,6 @@ static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info,
}
}
- if (pqi_get_device_id(ctrl_info, device->scsi3addr,
- device->unique_id, sizeof(device->unique_id)) < 0)
- dev_warn(&ctrl_info->pci_dev->dev,
- "Can't get device id for scsi %d:%d:%d:%d\n",
- ctrl_info->scsi_host->host_no,
- device->bus, device->target,
- device->lun);
-
out:
kfree(buffer);
@@ -1413,6 +1367,7 @@ static void pqi_get_physical_disk_info(struct pqi_ctrl_info *ctrl_info,
device->queue_depth = PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH;
return;
}
+
device->box_index = id_phys->box_index;
device->phys_box_on_bus = id_phys->phys_box_on_bus;
device->phy_connected_dev_type = id_phys->phy_connected_dev_type[0];
@@ -1828,7 +1783,7 @@ static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
device = new_device_list[i];
find_result = pqi_scsi_find_entry(ctrl_info, device,
- &matching_device);
+ &matching_device);
switch (find_result) {
case DEVICE_SAME:
@@ -2057,9 +2012,8 @@ static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
rc = -ENOMEM;
goto out;
}
- if (pqi_hide_vsep) {
- int i;
+ if (pqi_hide_vsep) {
for (i = num_physicals - 1; i >= 0; i--) {
phys_lun_ext_entry =
&physdev_list->lun_entries[i];
@@ -2132,7 +2086,7 @@ static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
device->is_physical_device = is_physical_device;
if (is_physical_device) {
if (phys_lun_ext_entry->device_type ==
- SA_EXPANDER_SMP_DEVICE)
+ SA_DEVICE_TYPE_EXPANDER_SMP)
device->is_expander_smp_device = true;
} else {
device->is_external_raid_device =
@@ -2169,16 +2123,13 @@ static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
if (device->is_physical_device) {
device->wwid = phys_lun_ext_entry->wwid;
if ((phys_lun_ext_entry->device_flags &
- REPORT_PHYS_LUN_DEV_FLAG_AIO_ENABLED) &&
+ CISS_REPORT_PHYS_DEV_FLAG_AIO_ENABLED) &&
phys_lun_ext_entry->aio_handle) {
device->aio_enabled = true;
- device->aio_handle =
- phys_lun_ext_entry->aio_handle;
+ device->aio_handle =
+ phys_lun_ext_entry->aio_handle;
}
-
- pqi_get_physical_disk_info(ctrl_info,
- device, id_phys);
-
+ pqi_get_physical_disk_info(ctrl_info, device, id_phys);
} else {
memcpy(device->volume_id, log_lun_ext_entry->volume_id,
sizeof(device->volume_id));
@@ -3158,7 +3109,7 @@ static enum pqi_soft_reset_status pqi_poll_for_soft_reset_status(
}
static void pqi_process_soft_reset(struct pqi_ctrl_info *ctrl_info,
- enum pqi_soft_reset_status reset_status)
+ enum pqi_soft_reset_status reset_status)
{
int rc;
@@ -3202,8 +3153,8 @@ static void pqi_ofa_process_event(struct pqi_ctrl_info *ctrl_info,
if (event_id == PQI_EVENT_OFA_QUIESCE) {
dev_info(&ctrl_info->pci_dev->dev,
- "Received Online Firmware Activation quiesce event for controller %u\n",
- ctrl_info->ctrl_id);
+ "Received Online Firmware Activation quiesce event for controller %u\n",
+ ctrl_info->ctrl_id);
pqi_ofa_ctrl_quiesce(ctrl_info);
pqi_acknowledge_event(ctrl_info, event);
if (ctrl_info->soft_reset_handshake_supported) {
@@ -3223,8 +3174,8 @@ static void pqi_ofa_process_event(struct pqi_ctrl_info *ctrl_info,
pqi_ofa_free_host_buffer(ctrl_info);
pqi_acknowledge_event(ctrl_info, event);
dev_info(&ctrl_info->pci_dev->dev,
- "Online Firmware Activation(%u) cancel reason : %u\n",
- ctrl_info->ctrl_id, event->ofa_cancel_reason);
+ "Online Firmware Activation(%u) cancel reason : %u\n",
+ ctrl_info->ctrl_id, event->ofa_cancel_reason);
}
mutex_unlock(&ctrl_info->ofa_mutex);
@@ -3403,7 +3354,7 @@ static unsigned int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
#define PQI_LEGACY_INTX_MASK 0x1
static inline void pqi_configure_legacy_intx(struct pqi_ctrl_info *ctrl_info,
- bool enable_intx)
+ bool enable_intx)
{
u32 intx_mask;
struct pqi_device_registers __iomem *pqi_registers;
@@ -3841,7 +3792,7 @@ static int pqi_create_admin_queues(struct pqi_ctrl_info *ctrl_info)
&pqi_registers->admin_oq_pi_addr);
reg = PQI_ADMIN_IQ_NUM_ELEMENTS |
- (PQI_ADMIN_OQ_NUM_ELEMENTS) << 8 |
+ (PQI_ADMIN_OQ_NUM_ELEMENTS << 8) |
(admin_queues->int_msg_num << 16);
writel(reg, &pqi_registers->admin_iq_num_elements);
writel(PQI_CREATE_ADMIN_QUEUE_PAIR,
@@ -4048,8 +3999,8 @@ static void pqi_raid_synchronous_complete(struct pqi_io_request *io_request,
complete(waiting);
}
-static int pqi_process_raid_io_error_synchronous(struct pqi_raid_error_info
- *error_info)
+static int pqi_process_raid_io_error_synchronous(
+ struct pqi_raid_error_info *error_info)
{
int rc = -EIO;
@@ -4122,6 +4073,8 @@ static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
goto out;
}
+ atomic_inc(&ctrl_info->sync_cmds_outstanding);
+
io_request = pqi_alloc_io_request(ctrl_info);
put_unaligned_le16(io_request->index,
@@ -4168,6 +4121,7 @@ static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
pqi_free_io_request(io_request);
+ atomic_dec(&ctrl_info->sync_cmds_outstanding);
out:
up(&ctrl_info->sync_request_sem);
@@ -4665,11 +4619,11 @@ static void pqi_free_all_io_requests(struct pqi_ctrl_info *ctrl_info)
static inline int pqi_alloc_error_buffer(struct pqi_ctrl_info *ctrl_info)
{
- ctrl_info->error_buffer = dma_alloc_coherent(&ctrl_info->pci_dev->dev,
- ctrl_info->error_buffer_length,
- &ctrl_info->error_buffer_dma_handle,
- GFP_KERNEL);
+ ctrl_info->error_buffer = dma_alloc_coherent(&ctrl_info->pci_dev->dev,
+ ctrl_info->error_buffer_length,
+ &ctrl_info->error_buffer_dma_handle,
+ GFP_KERNEL);
if (!ctrl_info->error_buffer)
return -ENOMEM;
@@ -5402,7 +5356,7 @@ static int pqi_scsi_queue_command(struct Scsi_Host *shost,
pqi_ctrl_busy(ctrl_info);
if (pqi_ctrl_blocked(ctrl_info) || pqi_device_in_reset(device) ||
- pqi_ctrl_in_ofa(ctrl_info)) {
+ pqi_ctrl_in_ofa(ctrl_info) || pqi_ctrl_in_shutdown(ctrl_info)) {
rc = SCSI_MLQUEUE_HOST_BUSY;
goto out;
}
@@ -5419,7 +5373,7 @@ static int pqi_scsi_queue_command(struct Scsi_Host *shost,
if (pqi_is_logical_device(device)) {
raid_bypassed = false;
if (device->raid_bypass_enabled &&
- !blk_rq_is_passthrough(scmd->request)) {
+ !blk_rq_is_passthrough(scmd->request)) {
rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device,
scmd, queue_group);
if (rc == 0 || rc == SCSI_MLQUEUE_HOST_BUSY)
@@ -5650,6 +5604,18 @@ static int pqi_ctrl_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
return 0;
}
+static int pqi_ctrl_wait_for_pending_sync_cmds(struct pqi_ctrl_info *ctrl_info)
+{
+ while (atomic_read(&ctrl_info->sync_cmds_outstanding)) {
+ pqi_check_ctrl_health(ctrl_info);
+ if (pqi_ctrl_offline(ctrl_info))
+ return -ENXIO;
+ usleep_range(1000, 2000);
+ }
+
+ return 0;
+}
+
static void pqi_lun_reset_complete(struct pqi_io_request *io_request,
void *context)
{
@@ -5658,7 +5624,8 @@ static void pqi_lun_reset_complete(struct pqi_io_request *io_request,
complete(waiting);
}
-#define PQI_LUN_RESET_TIMEOUT_SECS 10
+#define PQI_LUN_RESET_TIMEOUT_SECS 30
+#define PQI_LUN_RESET_POLL_COMPLETION_SECS 10
static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info,
struct pqi_scsi_dev *device, struct completion *wait)
@@ -5667,7 +5634,7 @@ static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info,
while (1) {
if (wait_for_completion_io_timeout(wait,
- PQI_LUN_RESET_TIMEOUT_SECS * PQI_HZ)) {
+ PQI_LUN_RESET_POLL_COMPLETION_SECS * PQI_HZ)) {
rc = 0;
break;
}
@@ -5704,6 +5671,9 @@ static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info,
memcpy(request->lun_number, device->scsi3addr,
sizeof(request->lun_number));
request->task_management_function = SOP_TASK_MANAGEMENT_LUN_RESET;
+ if (ctrl_info->tmf_iu_timeout_supported)
+ put_unaligned_le16(PQI_LUN_RESET_TIMEOUT_SECS,
+ &request->timeout);
pqi_start_io(ctrl_info,
&ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
@@ -5733,7 +5703,7 @@ static int _pqi_device_reset(struct pqi_ctrl_info *ctrl_info,
for (retries = 0;;) {
rc = pqi_lun_reset(ctrl_info, device);
- if (rc != -EAGAIN || ++retries > PQI_LUN_RESET_RETRIES)
+ if (rc == 0 || ++retries > PQI_LUN_RESET_RETRIES)
break;
msleep(PQI_LUN_RESET_RETRY_INTERVAL_MSECS);
}
@@ -5787,17 +5757,17 @@ static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd)
shost->host_no, device->bus, device->target, device->lun);
pqi_check_ctrl_health(ctrl_info);
- if (pqi_ctrl_offline(ctrl_info)) {
- dev_err(&ctrl_info->pci_dev->dev,
- "controller %u offlined - cannot send device reset\n",
- ctrl_info->ctrl_id);
+ if (pqi_ctrl_offline(ctrl_info) ||
+ pqi_device_reset_blocked(ctrl_info)) {
rc = FAILED;
goto out;
}
pqi_wait_until_ofa_finished(ctrl_info);
+ atomic_inc(&ctrl_info->sync_cmds_outstanding);
rc = pqi_device_reset(ctrl_info, device);
+ atomic_dec(&ctrl_info->sync_cmds_outstanding);
out:
dev_err(&ctrl_info->pci_dev->dev,
@@ -6066,6 +6036,9 @@ static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
put_unaligned_le16(iu_length, &request.header.iu_length);
+ if (ctrl_info->raid_iu_timeout_supported)
+ put_unaligned_le32(iocommand.Request.Timeout, &request.timeout);
+
rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
PQI_SYNC_FLAGS_INTERRUPTABLE, &pqi_error_info, NO_TIMEOUT);
@@ -6119,7 +6092,7 @@ static int pqi_ioctl(struct scsi_device *sdev, unsigned int cmd,
ctrl_info = shost_to_hba(sdev->host);
- if (pqi_ctrl_in_ofa(ctrl_info))
+ if (pqi_ctrl_in_ofa(ctrl_info) || pqi_ctrl_in_shutdown(ctrl_info))
return -EBUSY;
switch (cmd) {
@@ -6160,14 +6133,8 @@ static ssize_t pqi_firmware_version_show(struct device *dev,
static ssize_t pqi_driver_version_show(struct device *dev,
struct device_attribute *attr, char *buffer)
{
- struct Scsi_Host *shost;
- struct pqi_ctrl_info *ctrl_info;
-
- shost = class_to_shost(dev);
- ctrl_info = shost_to_hba(shost);
-
- return snprintf(buffer, PAGE_SIZE,
- "%s\n", DRIVER_VERSION BUILD_TIMESTAMP);
+ return snprintf(buffer, PAGE_SIZE, "%s\n",
+ DRIVER_VERSION BUILD_TIMESTAMP);
}
static ssize_t pqi_serial_number_show(struct device *dev,
@@ -6283,7 +6250,7 @@ static ssize_t pqi_unique_id_show(struct device *dev,
struct scsi_device *sdev;
struct pqi_scsi_dev *device;
unsigned long flags;
- unsigned char uid[16];
+ u8 unique_id[16];
sdev = to_scsi_device(dev);
ctrl_info = shost_to_hba(sdev->host);
@@ -6296,16 +6263,22 @@ static ssize_t pqi_unique_id_show(struct device *dev,
flags);
return -ENODEV;
}
- memcpy(uid, device->unique_id, sizeof(uid));
+
+ if (device->is_physical_device) {
+ memset(unique_id, 0, 8);
+ memcpy(unique_id + 8, &device->wwid, sizeof(device->wwid));
+ } else {
+ memcpy(unique_id, device->volume_id, sizeof(device->volume_id));
+ }
spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
return snprintf(buffer, PAGE_SIZE,
"%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X\n",
- uid[0], uid[1], uid[2], uid[3],
- uid[4], uid[5], uid[6], uid[7],
- uid[8], uid[9], uid[10], uid[11],
- uid[12], uid[13], uid[14], uid[15]);
+ unique_id[0], unique_id[1], unique_id[2], unique_id[3],
+ unique_id[4], unique_id[5], unique_id[6], unique_id[7],
+ unique_id[8], unique_id[9], unique_id[10], unique_id[11],
+ unique_id[12], unique_id[13], unique_id[14], unique_id[15]);
}
static ssize_t pqi_lunid_show(struct device *dev,
@@ -6328,6 +6301,7 @@ static ssize_t pqi_lunid_show(struct device *dev,
flags);
return -ENODEV;
}
+
memcpy(lunid, device->scsi3addr, sizeof(lunid));
spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
@@ -6335,7 +6309,8 @@ static ssize_t pqi_lunid_show(struct device *dev,
return snprintf(buffer, PAGE_SIZE, "0x%8phN\n", lunid);
}
-#define MAX_PATHS 8
+#define MAX_PATHS 8
+
static ssize_t pqi_path_info_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -6347,9 +6322,9 @@ static ssize_t pqi_path_info_show(struct device *dev,
int output_len = 0;
u8 box;
u8 bay;
- u8 path_map_index = 0;
+ u8 path_map_index;
char *active;
- unsigned char phys_connector[2];
+ u8 phys_connector[2];
sdev = to_scsi_device(dev);
ctrl_info = shost_to_hba(sdev->host);
@@ -6365,7 +6340,7 @@ static ssize_t pqi_path_info_show(struct device *dev,
bay = device->bay;
for (i = 0; i < MAX_PATHS; i++) {
- path_map_index = 1<<i;
+ path_map_index = 1 << i;
if (i == device->active_path_index)
active = "Active";
else if (device->path_map & path_map_index)
@@ -6416,10 +6391,10 @@ end_buffer:
}
spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+
return output_len;
}
-
static ssize_t pqi_sas_address_show(struct device *dev,
struct device_attribute *attr, char *buffer)
{
@@ -6440,6 +6415,7 @@ static ssize_t pqi_sas_address_show(struct device *dev,
flags);
return -ENODEV;
}
+
sas_address = device->sas_address;
spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
@@ -6844,6 +6820,27 @@ static void pqi_firmware_feature_status(struct pqi_ctrl_info *ctrl_info,
firmware_feature->feature_name);
}
+static void pqi_ctrl_update_feature_flags(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_firmware_feature *firmware_feature)
+{
+ switch (firmware_feature->feature_bit) {
+ case PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE:
+ ctrl_info->soft_reset_handshake_supported =
+ firmware_feature->enabled;
+ break;
+ case PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT:
+ ctrl_info->raid_iu_timeout_supported =
+ firmware_feature->enabled;
+ break;
+ case PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT:
+ ctrl_info->tmf_iu_timeout_supported =
+ firmware_feature->enabled;
+ break;
+ }
+
+ pqi_firmware_feature_status(ctrl_info, firmware_feature);
+}
+
static inline void pqi_firmware_feature_update(struct pqi_ctrl_info *ctrl_info,
struct pqi_firmware_feature *firmware_feature)
{
@@ -6867,7 +6864,17 @@ static struct pqi_firmware_feature pqi_firmware_features[] = {
{
.feature_name = "New Soft Reset Handshake",
.feature_bit = PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE,
- .feature_status = pqi_firmware_feature_status,
+ .feature_status = pqi_ctrl_update_feature_flags,
+ },
+ {
+ .feature_name = "RAID IU Timeout",
+ .feature_bit = PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT,
+ .feature_status = pqi_ctrl_update_feature_flags,
+ },
+ {
+ .feature_name = "TMF IU Timeout",
+ .feature_bit = PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT,
+ .feature_status = pqi_ctrl_update_feature_flags,
},
};
@@ -6921,7 +6928,6 @@ static void pqi_process_firmware_features(
return;
}
- ctrl_info->soft_reset_handshake_supported = false;
for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
if (!pqi_firmware_features[i].supported)
continue;
@@ -6929,10 +6935,6 @@ static void pqi_process_firmware_features(
firmware_features_iomem_addr,
pqi_firmware_features[i].feature_bit)) {
pqi_firmware_features[i].enabled = true;
- if (pqi_firmware_features[i].feature_bit ==
- PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE)
- ctrl_info->soft_reset_handshake_supported =
- true;
}
pqi_firmware_feature_update(ctrl_info,
&pqi_firmware_features[i]);
@@ -7074,13 +7076,20 @@ static int pqi_force_sis_mode(struct pqi_ctrl_info *ctrl_info)
return pqi_revert_to_sis_mode(ctrl_info);
}
+#define PQI_POST_RESET_DELAY_B4_MSGU_READY 5000
+
static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
{
int rc;
- rc = pqi_force_sis_mode(ctrl_info);
- if (rc)
- return rc;
+ if (reset_devices) {
+ sis_soft_reset(ctrl_info);
+ msleep(PQI_POST_RESET_DELAY_B4_MSGU_READY);
+ } else {
+ rc = pqi_force_sis_mode(ctrl_info);
+ if (rc)
+ return rc;
+ }
/*
* Wait until the controller is ready to start accepting SIS
@@ -7386,7 +7395,7 @@ static int pqi_ctrl_init_resume(struct pqi_ctrl_info *ctrl_info)
rc = pqi_get_ctrl_product_details(ctrl_info);
if (rc) {
dev_err(&ctrl_info->pci_dev->dev,
- "error obtaining product detail\n");
+ "error obtaining product details\n");
return rc;
}
@@ -7514,6 +7523,7 @@ static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node)
INIT_WORK(&ctrl_info->event_work, pqi_event_worker);
atomic_set(&ctrl_info->num_interrupts, 0);
+ atomic_set(&ctrl_info->sync_cmds_outstanding, 0);
INIT_DELAYED_WORK(&ctrl_info->rescan_work, pqi_rescan_worker);
INIT_DELAYED_WORK(&ctrl_info->update_time_work, pqi_update_time_worker);
@@ -7721,6 +7731,8 @@ static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info,
dev_err(dev, "Failed to allocate host buffer of size = %u",
bytes_requested);
}
+
+ return;
}
static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info)
@@ -7787,8 +7799,6 @@ static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info)
0, NULL, NO_TIMEOUT);
}
-#define PQI_POST_RESET_DELAY_B4_MSGU_READY 5000
-
static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info)
{
msleep(PQI_POST_RESET_DELAY_B4_MSGU_READY);
@@ -7956,28 +7966,73 @@ static void pqi_pci_remove(struct pci_dev *pci_dev)
pqi_remove_ctrl(ctrl_info);
}
+static void pqi_crash_if_pending_command(struct pqi_ctrl_info *ctrl_info)
+{
+ unsigned int i;
+ struct pqi_io_request *io_request;
+ struct scsi_cmnd *scmd;
+
+ for (i = 0; i < ctrl_info->max_io_slots; i++) {
+ io_request = &ctrl_info->io_request_pool[i];
+ if (atomic_read(&io_request->refcount) == 0)
+ continue;
+ scmd = io_request->scmd;
+ WARN_ON(scmd != NULL); /* IO command from SML */
+ WARN_ON(scmd == NULL); /* Non-IO cmd or driver initiated*/
+ }
+}
+
static void pqi_shutdown(struct pci_dev *pci_dev)
{
int rc;
struct pqi_ctrl_info *ctrl_info;
ctrl_info = pci_get_drvdata(pci_dev);
- if (!ctrl_info)
- goto error;
+ if (!ctrl_info) {
+ dev_err(&pci_dev->dev,
+ "cache could not be flushed\n");
+ return;
+ }
+
+ pqi_disable_events(ctrl_info);
+ pqi_wait_until_ofa_finished(ctrl_info);
+ pqi_cancel_update_time_worker(ctrl_info);
+ pqi_cancel_rescan_worker(ctrl_info);
+ pqi_cancel_event_worker(ctrl_info);
+
+ pqi_ctrl_shutdown_start(ctrl_info);
+ pqi_ctrl_wait_until_quiesced(ctrl_info);
+
+ rc = pqi_ctrl_wait_for_pending_io(ctrl_info, NO_TIMEOUT);
+ if (rc) {
+ dev_err(&pci_dev->dev,
+ "wait for pending I/O failed\n");
+ return;
+ }
+
+ pqi_ctrl_block_device_reset(ctrl_info);
+ pqi_wait_until_lun_reset_finished(ctrl_info);
/*
* Write all data in the controller's battery-backed cache to
* storage.
*/
rc = pqi_flush_cache(ctrl_info, SHUTDOWN);
- pqi_free_interrupts(ctrl_info);
- pqi_reset(ctrl_info);
- if (rc == 0)
+ if (rc)
+ dev_err(&pci_dev->dev,
+ "unable to flush controller cache\n");
+
+ pqi_ctrl_block_requests(ctrl_info);
+
+ rc = pqi_ctrl_wait_for_pending_sync_cmds(ctrl_info);
+ if (rc) {
+ dev_err(&pci_dev->dev,
+ "wait for pending sync cmds failed\n");
return;
+ }
-error:
- dev_warn(&pci_dev->dev,
- "unable to flush controller cache\n");
+ pqi_crash_if_pending_command(ctrl_info);
+ pqi_reset(ctrl_info);
}
static void pqi_process_lockup_action_param(void)
@@ -8686,6 +8741,8 @@ static void __attribute__((unused)) verify_structures(void)
BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
cdb) != 32);
BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
+ timeout) != 60);
+ BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
sg_descriptors) != 64);
BUILD_BUG_ON(sizeof(struct pqi_raid_path_request) !=
PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
@@ -8840,6 +8897,8 @@ static void __attribute__((unused)) verify_structures(void)
BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
nexus_id) != 10);
BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
+ timeout) != 14);
+ BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
lun_number) != 16);
BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
protocol_specific) != 24);
diff --git a/drivers/scsi/smartpqi/smartpqi_sas_transport.c b/drivers/scsi/smartpqi/smartpqi_sas_transport.c
index 6776dfc1d317..b7289112455c 100644
--- a/drivers/scsi/smartpqi/smartpqi_sas_transport.c
+++ b/drivers/scsi/smartpqi/smartpqi_sas_transport.c
@@ -45,9 +45,9 @@ static void pqi_free_sas_phy(struct pqi_sas_phy *pqi_sas_phy)
struct sas_phy *phy = pqi_sas_phy->phy;
sas_port_delete_phy(pqi_sas_phy->parent_port->port, phy);
- sas_phy_free(phy);
if (pqi_sas_phy->added_to_port)
list_del(&pqi_sas_phy->phy_list_entry);
+ sas_phy_delete(phy);
kfree(pqi_sas_phy);
}
@@ -312,7 +312,6 @@ static int pqi_sas_get_linkerrors(struct sas_phy *phy)
static int pqi_sas_get_enclosure_identifier(struct sas_rphy *rphy,
u64 *identifier)
{
-
int rc;
unsigned long flags;
struct Scsi_Host *shost;
@@ -361,7 +360,7 @@ static int pqi_sas_get_enclosure_identifier(struct sas_rphy *rphy,
}
}
- if (found_device->phy_connected_dev_type != SA_CONTROLLER_DEVICE) {
+ if (found_device->phy_connected_dev_type != SA_DEVICE_TYPE_CONTROLLER) {
rc = -EINVAL;
goto out;
}
@@ -382,12 +381,10 @@ out:
spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
return rc;
-
}
static int pqi_sas_get_bay_identifier(struct sas_rphy *rphy)
{
-
int rc;
unsigned long flags;
struct pqi_ctrl_info *ctrl_info;
@@ -482,7 +479,6 @@ pqi_build_csmi_smp_passthru_buffer(struct sas_rphy *rphy,
req_size -= SMP_CRC_FIELD_LENGTH;
put_unaligned_le32(req_size, &parameters->request_length);
-
put_unaligned_le32(resp_size, &parameters->response_length);
sg_copy_to_buffer(job->request_payload.sg_list,
@@ -512,12 +508,12 @@ void pqi_sas_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
struct sas_rphy *rphy)
{
int rc;
- struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
+ struct pqi_ctrl_info *ctrl_info;
struct bmic_csmi_smp_passthru_buffer *smp_buf;
struct pqi_raid_error_info error_info;
unsigned int reslen = 0;
- pqi_ctrl_busy(ctrl_info);
+ ctrl_info = shost_to_hba(shost);
if (job->reply_payload.payload_len == 0) {
rc = -ENOMEM;
@@ -539,16 +535,6 @@ void pqi_sas_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
goto out;
}
- if (pqi_ctrl_offline(ctrl_info)) {
- rc = -ENXIO;
- goto out;
- }
-
- if (pqi_ctrl_blocked(ctrl_info)) {
- rc = -EBUSY;
- goto out;
- }
-
smp_buf = pqi_build_csmi_smp_passthru_buffer(rphy, job);
if (!smp_buf) {
rc = -ENOMEM;
diff --git a/drivers/scsi/sr_vendor.c b/drivers/scsi/sr_vendor.c
index e3b0ce25162b..17a56c87d383 100644
--- a/drivers/scsi/sr_vendor.c
+++ b/drivers/scsi/sr_vendor.c
@@ -61,6 +61,7 @@
#define VENDOR_NEC 2
#define VENDOR_TOSHIBA 3
#define VENDOR_WRITER 4 /* pre-scsi3 writers */
+#define VENDOR_CYGNAL_85ED 5 /* CD-on-a-chip */
#define VENDOR_TIMEOUT 30*HZ
@@ -99,6 +100,23 @@ void sr_vendor_init(Scsi_CD *cd)
} else if (!strncmp(vendor, "TOSHIBA", 7)) {
cd->vendor = VENDOR_TOSHIBA;
+ } else if (!strncmp(vendor, "Beurer", 6) &&
+ !strncmp(model, "Gluco Memory", 12)) {
+ /* The Beurer GL50 evo uses a Cygnal-manufactured CD-on-a-chip
+ that only accepts a subset of SCSI commands. Most of the
+ not-implemented commands are fine to fail, but a few,
+ particularly around the MMC or Audio commands, will put the
+ device into an unrecoverable state, so they need to be
+ avoided at all costs.
+ */
+ cd->vendor = VENDOR_CYGNAL_85ED;
+ cd->cdi.mask |= (
+ CDC_MULTI_SESSION |
+ CDC_CLOSE_TRAY | CDC_OPEN_TRAY |
+ CDC_LOCK |
+ CDC_GENERIC_PACKET |
+ CDC_PLAY_AUDIO
+ );
}
#endif
}
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index e3266a64a477..9e3fff2de83e 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -22,6 +22,7 @@ static const char *verstr = "20160209";
#include <linux/module.h>
+#include <linux/compat.h>
#include <linux/fs.h>
#include <linux/kernel.h>
#include <linux/sched/signal.h>
@@ -3800,14 +3801,11 @@ static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg)
if (STp->cleaning_req)
mt_status.mt_gstat |= GMT_CLN(0xffffffff);
- i = copy_to_user(p, &mt_status, sizeof(struct mtget));
- if (i) {
- retval = (-EFAULT);
+ retval = put_user_mtget(p, &mt_status);
+ if (retval)
goto out;
- }
STp->recover_reg = 0; /* Clear after read */
- retval = 0;
goto out;
} /* End of MTIOCGET */
if (cmd_type == _IOC_TYPE(MTIOCPOS) && cmd_nr == _IOC_NR(MTIOCPOS)) {
@@ -3821,9 +3819,7 @@ static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg)
goto out;
}
mt_pos.mt_blkno = blk;
- i = copy_to_user(p, &mt_pos, sizeof(struct mtpos));
- if (i)
- retval = (-EFAULT);
+ retval = put_user_mtpos(p, &mt_pos);
goto out;
}
mutex_unlock(&STp->lock);
@@ -3857,14 +3853,26 @@ static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg)
}
#ifdef CONFIG_COMPAT
-static long st_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+static long st_compat_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg)
{
+ void __user *p = compat_ptr(arg);
struct scsi_tape *STp = file->private_data;
struct scsi_device *sdev = STp->device;
int ret = -ENOIOCTLCMD;
+
+ /* argument conversion is handled using put_user_mtpos/put_user_mtget */
+ switch (cmd_in) {
+ case MTIOCTOP:
+ return st_ioctl(file, MTIOCTOP, (unsigned long)p);
+ case MTIOCPOS32:
+ return st_ioctl(file, MTIOCPOS, (unsigned long)p);
+ case MTIOCGET32:
+ return st_ioctl(file, MTIOCGET, (unsigned long)p);
+ }
+
if (sdev->host->hostt->compat_ioctl) {
- ret = sdev->host->hostt->compat_ioctl(sdev, cmd, (void __user *)arg);
+ ret = sdev->host->hostt->compat_ioctl(sdev, cmd_in, (void __user *)arg);
}
return ret;
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 542d2bac2922..f8faf8b3d965 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -1727,6 +1727,13 @@ static const struct hv_vmbus_device_id id_table[] = {
MODULE_DEVICE_TABLE(vmbus, id_table);
+static const struct { guid_t guid; } fc_guid = { HV_SYNTHFC_GUID };
+
+static bool hv_dev_is_fc(struct hv_device *hv_dev)
+{
+ return guid_equal(&fc_guid.guid, &hv_dev->dev_type);
+}
+
static int storvsc_probe(struct hv_device *device,
const struct hv_vmbus_device_id *dev_id)
{
@@ -1934,11 +1941,45 @@ static int storvsc_remove(struct hv_device *dev)
return 0;
}
+static int storvsc_suspend(struct hv_device *hv_dev)
+{
+ struct storvsc_device *stor_device = hv_get_drvdata(hv_dev);
+ struct Scsi_Host *host = stor_device->host;
+ struct hv_host_device *host_dev = shost_priv(host);
+
+ storvsc_wait_to_drain(stor_device);
+
+ drain_workqueue(host_dev->handle_error_wq);
+
+ vmbus_close(hv_dev->channel);
+
+ memset(stor_device->stor_chns, 0,
+ num_possible_cpus() * sizeof(void *));
+
+ kfree(stor_device->stor_chns);
+ stor_device->stor_chns = NULL;
+
+ cpumask_clear(&stor_device->alloced_cpus);
+
+ return 0;
+}
+
+static int storvsc_resume(struct hv_device *hv_dev)
+{
+ int ret;
+
+ ret = storvsc_connect_to_vsp(hv_dev, storvsc_ringbuffer_size,
+ hv_dev_is_fc(hv_dev));
+ return ret;
+}
+
static struct hv_driver storvsc_drv = {
.name = KBUILD_MODNAME,
.id_table = id_table,
.probe = storvsc_probe,
.remove = storvsc_remove,
+ .suspend = storvsc_suspend,
+ .resume = storvsc_resume,
.driver = {
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
diff --git a/drivers/scsi/sun3_scsi.c b/drivers/scsi/sun3_scsi.c
index 955e4c938d49..701b842296f0 100644
--- a/drivers/scsi/sun3_scsi.c
+++ b/drivers/scsi/sun3_scsi.c
@@ -501,7 +501,7 @@ static struct scsi_host_template sun3_scsi_template = {
.eh_host_reset_handler = sun3scsi_host_reset,
.can_queue = 16,
.this_id = 7,
- .sg_tablesize = SG_NONE,
+ .sg_tablesize = 1,
.cmd_per_lun = 2,
.dma_boundary = PAGE_SIZE - 1,
.cmd_size = NCR5380_CMD_SIZE,
@@ -523,7 +523,7 @@ static int __init sun3_scsi_probe(struct platform_device *pdev)
sun3_scsi_template.can_queue = setup_can_queue;
if (setup_cmd_per_lun > 0)
sun3_scsi_template.cmd_per_lun = setup_cmd_per_lun;
- if (setup_sg_tablesize >= 0)
+ if (setup_sg_tablesize > 0)
sun3_scsi_template.sg_tablesize = setup_sg_tablesize;
if (setup_hostid >= 0)
sun3_scsi_template.this_id = setup_hostid & 7;
diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig
index 0b845ab7c3bf..d14c2243e02a 100644
--- a/drivers/scsi/ufs/Kconfig
+++ b/drivers/scsi/ufs/Kconfig
@@ -132,6 +132,16 @@ config SCSI_UFS_HISI
Select this if you have UFS controller on Hisilicon chipset.
If unsure, say N.
+config SCSI_UFS_TI_J721E
+ tristate "TI glue layer for Cadence UFS Controller"
+ depends on OF && HAS_IOMEM && (ARCH_K3 || COMPILE_TEST)
+ help
+ This selects driver for TI glue layer for Cadence UFS Host
+ Controller IP.
+
+ Selects this if you have TI platform with UFS controller.
+ If unsure, say N.
+
config SCSI_UFS_BSG
bool "Universal Flash Storage BSG device node"
depends on SCSI_UFSHCD
diff --git a/drivers/scsi/ufs/Makefile b/drivers/scsi/ufs/Makefile
index 2a9097939bcb..94c6c5d7334b 100644
--- a/drivers/scsi/ufs/Makefile
+++ b/drivers/scsi/ufs/Makefile
@@ -11,3 +11,4 @@ obj-$(CONFIG_SCSI_UFSHCD_PCI) += ufshcd-pci.o
obj-$(CONFIG_SCSI_UFSHCD_PLATFORM) += ufshcd-pltfrm.o
obj-$(CONFIG_SCSI_UFS_HISI) += ufs-hisi.o
obj-$(CONFIG_SCSI_UFS_MEDIATEK) += ufs-mediatek.o
+obj-$(CONFIG_SCSI_UFS_TI_J721E) += ti-j721e-ufs.o
diff --git a/drivers/scsi/ufs/ti-j721e-ufs.c b/drivers/scsi/ufs/ti-j721e-ufs.c
new file mode 100644
index 000000000000..5216d228cdd9
--- /dev/null
+++ b/drivers/scsi/ufs/ti-j721e-ufs.c
@@ -0,0 +1,90 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
+//
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#define TI_UFS_SS_CTRL 0x4
+#define TI_UFS_SS_RST_N_PCS BIT(0)
+#define TI_UFS_SS_CLK_26MHZ BIT(4)
+
+static int ti_j721e_ufs_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ unsigned long clk_rate;
+ void __iomem *regbase;
+ struct clk *clk;
+ u32 reg = 0;
+ int ret;
+
+ regbase = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(regbase))
+ return PTR_ERR(regbase);
+
+ pm_runtime_enable(dev);
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(dev);
+ return ret;
+ }
+
+ /* Select MPHY refclk frequency */
+ clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(clk)) {
+ dev_err(dev, "Cannot claim MPHY clock.\n");
+ return PTR_ERR(clk);
+ }
+ clk_rate = clk_get_rate(clk);
+ if (clk_rate == 26000000)
+ reg |= TI_UFS_SS_CLK_26MHZ;
+ devm_clk_put(dev, clk);
+
+ /* Take UFS slave device out of reset */
+ reg |= TI_UFS_SS_RST_N_PCS;
+ writel(reg, regbase + TI_UFS_SS_CTRL);
+
+ ret = of_platform_populate(pdev->dev.of_node, NULL, NULL,
+ dev);
+ if (ret) {
+ dev_err(dev, "failed to populate child nodes %d\n", ret);
+ pm_runtime_put_sync(dev);
+ }
+
+ return ret;
+}
+
+static int ti_j721e_ufs_remove(struct platform_device *pdev)
+{
+ of_platform_depopulate(&pdev->dev);
+ pm_runtime_put_sync(&pdev->dev);
+
+ return 0;
+}
+
+static const struct of_device_id ti_j721e_ufs_of_match[] = {
+ {
+ .compatible = "ti,j721e-ufs",
+ },
+ { },
+};
+
+static struct platform_driver ti_j721e_ufs_driver = {
+ .probe = ti_j721e_ufs_probe,
+ .remove = ti_j721e_ufs_remove,
+ .driver = {
+ .name = "ti-j721e-ufs",
+ .of_match_table = ti_j721e_ufs_of_match,
+ },
+};
+module_platform_driver(ti_j721e_ufs_driver);
+
+MODULE_AUTHOR("Vignesh Raghavendra <vigneshr@ti.com>");
+MODULE_DESCRIPTION("TI UFS host controller glue driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/scsi/ufs/ufs-hisi.c b/drivers/scsi/ufs/ufs-hisi.c
index 6bbb1679bb91..5d6487350a6c 100644
--- a/drivers/scsi/ufs/ufs-hisi.c
+++ b/drivers/scsi/ufs/ufs-hisi.c
@@ -452,10 +452,7 @@ static int ufs_hisi_get_resource(struct ufs_hisi_host *host)
/* get resource of ufs sys ctrl */
host->ufs_sys_ctrl = devm_platform_ioremap_resource(pdev, 1);
- if (IS_ERR(host->ufs_sys_ctrl))
- return PTR_ERR(host->ufs_sys_ctrl);
-
- return 0;
+ return PTR_ERR_OR_ZERO(host->ufs_sys_ctrl);
}
static void ufs_hisi_set_pm_lvl(struct ufs_hba *hba)
diff --git a/drivers/scsi/ufs/ufs-mediatek.c b/drivers/scsi/ufs/ufs-mediatek.c
index 0f6ff33ce52e..83e28edc3ac5 100644
--- a/drivers/scsi/ufs/ufs-mediatek.c
+++ b/drivers/scsi/ufs/ufs-mediatek.c
@@ -147,6 +147,9 @@ static int ufs_mtk_init(struct ufs_hba *hba)
if (err)
goto out_variant_clear;
+ /* Enable runtime autosuspend */
+ hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND;
+
/*
* ufshcd_vops_init() is invoked after
* ufshcd_setup_clock(true) in ufshcd_hba_init() thus
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
index a5b71487a206..c69c29a1ceb9 100644
--- a/drivers/scsi/ufs/ufs-qcom.c
+++ b/drivers/scsi/ufs/ufs-qcom.c
@@ -246,6 +246,44 @@ static void ufs_qcom_select_unipro_mode(struct ufs_qcom_host *host)
mb();
}
+/**
+ * ufs_qcom_host_reset - reset host controller and PHY
+ */
+static int ufs_qcom_host_reset(struct ufs_hba *hba)
+{
+ int ret = 0;
+ struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+
+ if (!host->core_reset) {
+ dev_warn(hba->dev, "%s: reset control not set\n", __func__);
+ goto out;
+ }
+
+ ret = reset_control_assert(host->core_reset);
+ if (ret) {
+ dev_err(hba->dev, "%s: core_reset assert failed, err = %d\n",
+ __func__, ret);
+ goto out;
+ }
+
+ /*
+ * The hardware requirement for delay between assert/deassert
+ * is at least 3-4 sleep clock (32.7KHz) cycles, which comes to
+ * ~125us (4/32768). To be on the safe side add 200us delay.
+ */
+ usleep_range(200, 210);
+
+ ret = reset_control_deassert(host->core_reset);
+ if (ret)
+ dev_err(hba->dev, "%s: core_reset deassert failed, err = %d\n",
+ __func__, ret);
+
+ usleep_range(1000, 1100);
+
+out:
+ return ret;
+}
+
static int ufs_qcom_power_up_sequence(struct ufs_hba *hba)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
@@ -254,6 +292,12 @@ static int ufs_qcom_power_up_sequence(struct ufs_hba *hba)
bool is_rate_B = (UFS_QCOM_LIMIT_HS_RATE == PA_HS_MODE_B)
? true : false;
+ /* Reset UFS Host Controller and PHY */
+ ret = ufs_qcom_host_reset(hba);
+ if (ret)
+ dev_warn(hba->dev, "%s: host reset returned %d\n",
+ __func__, ret);
+
if (is_rate_B)
phy_set_mode(phy, PHY_MODE_UFS_HS_B);
@@ -1101,6 +1145,15 @@ static int ufs_qcom_init(struct ufs_hba *hba)
host->hba = hba;
ufshcd_set_variant(hba, host);
+ /* Setup the reset control of HCI */
+ host->core_reset = devm_reset_control_get(hba->dev, "rst");
+ if (IS_ERR(host->core_reset)) {
+ err = PTR_ERR(host->core_reset);
+ dev_warn(dev, "Failed to get reset control %d\n", err);
+ host->core_reset = NULL;
+ err = 0;
+ }
+
/* Fire up the reset controller. Failure here is non-fatal. */
host->rcdev.of_node = dev->of_node;
host->rcdev.ops = &ufs_qcom_reset_ops;
diff --git a/drivers/scsi/ufs/ufs-qcom.h b/drivers/scsi/ufs/ufs-qcom.h
index d401f174bb70..2d95e7cc7187 100644
--- a/drivers/scsi/ufs/ufs-qcom.h
+++ b/drivers/scsi/ufs/ufs-qcom.h
@@ -6,6 +6,7 @@
#define UFS_QCOM_H_
#include <linux/reset-controller.h>
+#include <linux/reset.h>
#define MAX_UFS_QCOM_HOSTS 1
#define MAX_U32 (~(u32)0)
@@ -233,6 +234,8 @@ struct ufs_qcom_host {
u32 dbg_print_en;
struct ufs_qcom_testbus testbus;
+ /* Reset control of HCI */
+ struct reset_control *core_reset;
struct reset_controller_dev rcdev;
struct gpio_desc *device_reset;
diff --git a/drivers/scsi/ufs/ufs-sysfs.c b/drivers/scsi/ufs/ufs-sysfs.c
index 969a36b15897..ad2abc96c0f1 100644
--- a/drivers/scsi/ufs/ufs-sysfs.c
+++ b/drivers/scsi/ufs/ufs-sysfs.c
@@ -126,13 +126,16 @@ static void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit)
return;
spin_lock_irqsave(hba->host->host_lock, flags);
- if (hba->ahit == ahit)
- goto out_unlock;
- hba->ahit = ahit;
- if (!pm_runtime_suspended(hba->dev))
- ufshcd_writel(hba, hba->ahit, REG_AUTO_HIBERNATE_IDLE_TIMER);
-out_unlock:
+ if (hba->ahit != ahit)
+ hba->ahit = ahit;
spin_unlock_irqrestore(hba->host->host_lock, flags);
+ if (!pm_runtime_suspended(hba->dev)) {
+ pm_runtime_get_sync(hba->dev);
+ ufshcd_hold(hba, false);
+ ufshcd_auto_hibern8_enable(hba);
+ ufshcd_release(hba);
+ pm_runtime_put(hba->dev);
+ }
}
/* Convert Auto-Hibernate Idle Timer register value to microseconds */
diff --git a/drivers/scsi/ufs/ufs_bsg.c b/drivers/scsi/ufs/ufs_bsg.c
index dc2f6d2b46ed..baeecee35d1e 100644
--- a/drivers/scsi/ufs/ufs_bsg.c
+++ b/drivers/scsi/ufs/ufs_bsg.c
@@ -162,6 +162,7 @@ out:
/**
* ufs_bsg_remove - detach and remove the added ufs-bsg node
+ * @hba: per adapter object
*
* Should be called when unloading the driver.
*/
diff --git a/drivers/scsi/ufs/ufshcd-dwc.c b/drivers/scsi/ufs/ufshcd-dwc.c
index fb9e2ff4f8d2..6a901da2d15a 100644
--- a/drivers/scsi/ufs/ufshcd-dwc.c
+++ b/drivers/scsi/ufs/ufshcd-dwc.c
@@ -80,7 +80,7 @@ static int ufshcd_dwc_link_is_up(struct ufs_hba *hba)
*/
static int ufshcd_dwc_connection_setup(struct ufs_hba *hba)
{
- const struct ufshcd_dme_attr_val setup_attrs[] = {
+ static const struct ufshcd_dme_attr_val setup_attrs[] = {
{ UIC_ARG_MIB(T_CONNECTIONSTATE), 0, DME_LOCAL },
{ UIC_ARG_MIB(N_DEVICEID), 0, DME_LOCAL },
{ UIC_ARG_MIB(N_DEVICEID_VALID), 0, DME_LOCAL },
diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c
index 8d40dc918f4e..76f9be71c31b 100644
--- a/drivers/scsi/ufs/ufshcd-pltfrm.c
+++ b/drivers/scsi/ufs/ufshcd-pltfrm.c
@@ -402,7 +402,6 @@ int ufshcd_pltfrm_init(struct platform_device *pdev,
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
- dev_err(dev, "IRQ resource not available\n");
err = -ENODEV;
goto out;
}
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 11a87f51c442..b5966faf3e98 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -88,6 +88,9 @@
/* Interrupt aggregation default timeout, unit: 40us */
#define INT_AGGR_DEF_TO 0x02
+/* default delay of autosuspend: 2000 ms */
+#define RPM_AUTOSUSPEND_DELAY_MS 2000
+
#define ufshcd_toggle_vreg(_dev, _vreg, _on) \
({ \
int _ret; \
@@ -114,7 +117,7 @@ int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len,
if (offset % 4 != 0 || len % 4 != 0) /* keep readl happy */
return -EINVAL;
- regs = kzalloc(len, GFP_KERNEL);
+ regs = kzalloc(len, GFP_ATOMIC);
if (!regs)
return -ENOMEM;
@@ -237,7 +240,7 @@ static struct ufs_dev_fix ufs_fixups[] = {
END_FIX
};
-static void ufshcd_tmc_handler(struct ufs_hba *hba);
+static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba);
static void ufshcd_async_scan(void *data, async_cookie_t cookie);
static int ufshcd_reset_and_restore(struct ufs_hba *hba);
static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd);
@@ -1607,7 +1610,7 @@ static void ufshcd_gate_work(struct work_struct *work)
* state to CLKS_ON.
*/
if (hba->clk_gating.is_suspended ||
- (hba->clk_gating.state == REQ_CLKS_ON)) {
+ (hba->clk_gating.state != REQ_CLKS_OFF)) {
hba->clk_gating.state = CLKS_ON;
trace_ufshcd_clk_gating(dev_name(hba->dev),
hba->clk_gating.state);
@@ -1935,8 +1938,8 @@ int ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
memcpy(hba->dev_cmd.query.descriptor, descp, resp_len);
} else {
dev_warn(hba->dev,
- "%s: Response size is bigger than buffer",
- __func__);
+ "%s: rsp size %d is bigger than buffer size %d",
+ __func__, resp_len, buf_len);
return -EINVAL;
}
}
@@ -2986,10 +2989,10 @@ static int __ufshcd_query_descriptor(struct ufs_hba *hba,
goto out_unlock;
}
- hba->dev_cmd.query.descriptor = NULL;
*buf_len = be16_to_cpu(response->upiu_res.length);
out_unlock:
+ hba->dev_cmd.query.descriptor = NULL;
mutex_unlock(&hba->dev_cmd.lock);
out:
ufshcd_release(hba);
@@ -3856,6 +3859,9 @@ static int ufshcd_link_recovery(struct ufs_hba *hba)
ufshcd_set_eh_in_progress(hba);
spin_unlock_irqrestore(hba->host->host_lock, flags);
+ /* Reset the attached device */
+ ufshcd_vops_device_reset(hba);
+
ret = ufshcd_host_reset_and_restore(hba);
spin_lock_irqsave(hba->host->host_lock, flags);
@@ -3885,15 +3891,24 @@ static int __ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
ktime_to_us(ktime_sub(ktime_get(), start)), ret);
if (ret) {
+ int err;
+
dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d\n",
__func__, ret);
/*
- * If link recovery fails then return error so that caller
- * don't retry the hibern8 enter again.
+ * If link recovery fails then return error code returned from
+ * ufshcd_link_recovery().
+ * If link recovery succeeds then return -EAGAIN to attempt
+ * hibern8 enter retry again.
*/
- if (ufshcd_link_recovery(hba))
- ret = -ENOLINK;
+ err = ufshcd_link_recovery(hba);
+ if (err) {
+ dev_err(hba->dev, "%s: link recovery failed", __func__);
+ ret = err;
+ } else {
+ ret = -EAGAIN;
+ }
} else
ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER,
POST_CHANGE);
@@ -3907,7 +3922,7 @@ static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
for (retries = UIC_HIBERN8_ENTER_RETRIES; retries > 0; retries--) {
ret = __ufshcd_uic_hibern8_enter(hba);
- if (!ret || ret == -ENOLINK)
+ if (!ret)
goto out;
}
out:
@@ -3941,7 +3956,7 @@ static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
return ret;
}
-static void ufshcd_auto_hibern8_enable(struct ufs_hba *hba)
+void ufshcd_auto_hibern8_enable(struct ufs_hba *hba)
{
unsigned long flags;
@@ -4631,9 +4646,14 @@ static int ufshcd_change_queue_depth(struct scsi_device *sdev, int depth)
*/
static int ufshcd_slave_configure(struct scsi_device *sdev)
{
+ struct ufs_hba *hba = shost_priv(sdev->host);
struct request_queue *q = sdev->request_queue;
blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1);
+
+ if (ufshcd_is_rpm_autosuspend_allowed(hba))
+ sdev->rpm_autosuspend = 1;
+
return 0;
}
@@ -4788,19 +4808,29 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
* ufshcd_uic_cmd_compl - handle completion of uic command
* @hba: per adapter instance
* @intr_status: interrupt status generated by the controller
+ *
+ * Returns
+ * IRQ_HANDLED - If interrupt is valid
+ * IRQ_NONE - If invalid interrupt
*/
-static void ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
+static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
{
+ irqreturn_t retval = IRQ_NONE;
+
if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) {
hba->active_uic_cmd->argument2 |=
ufshcd_get_uic_cmd_result(hba);
hba->active_uic_cmd->argument3 =
ufshcd_get_dme_attr_val(hba);
complete(&hba->active_uic_cmd->done);
+ retval = IRQ_HANDLED;
}
- if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done)
+ if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done) {
complete(hba->uic_async_done);
+ retval = IRQ_HANDLED;
+ }
+ return retval;
}
/**
@@ -4856,8 +4886,12 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
/**
* ufshcd_transfer_req_compl - handle SCSI and query command completion
* @hba: per adapter instance
+ *
+ * Returns
+ * IRQ_HANDLED - If interrupt is valid
+ * IRQ_NONE - If invalid interrupt
*/
-static void ufshcd_transfer_req_compl(struct ufs_hba *hba)
+static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba)
{
unsigned long completed_reqs;
u32 tr_doorbell;
@@ -4876,7 +4910,12 @@ static void ufshcd_transfer_req_compl(struct ufs_hba *hba)
tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
- __ufshcd_transfer_req_compl(hba, completed_reqs);
+ if (completed_reqs) {
+ __ufshcd_transfer_req_compl(hba, completed_reqs);
+ return IRQ_HANDLED;
+ } else {
+ return IRQ_NONE;
+ }
}
/**
@@ -5395,61 +5434,77 @@ out:
/**
* ufshcd_update_uic_error - check and set fatal UIC error flags.
* @hba: per-adapter instance
+ *
+ * Returns
+ * IRQ_HANDLED - If interrupt is valid
+ * IRQ_NONE - If invalid interrupt
*/
-static void ufshcd_update_uic_error(struct ufs_hba *hba)
+static irqreturn_t ufshcd_update_uic_error(struct ufs_hba *hba)
{
u32 reg;
+ irqreturn_t retval = IRQ_NONE;
/* PHY layer lane error */
reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
/* Ignore LINERESET indication, as this is not an error */
if ((reg & UIC_PHY_ADAPTER_LAYER_ERROR) &&
- (reg & UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK)) {
+ (reg & UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK)) {
/*
* To know whether this error is fatal or not, DB timeout
* must be checked but this error is handled separately.
*/
dev_dbg(hba->dev, "%s: UIC Lane error reported\n", __func__);
ufshcd_update_reg_hist(&hba->ufs_stats.pa_err, reg);
+ retval |= IRQ_HANDLED;
}
/* PA_INIT_ERROR is fatal and needs UIC reset */
reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
- if (reg)
+ if ((reg & UIC_DATA_LINK_LAYER_ERROR) &&
+ (reg & UIC_DATA_LINK_LAYER_ERROR_CODE_MASK)) {
ufshcd_update_reg_hist(&hba->ufs_stats.dl_err, reg);
- if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
- hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
- else if (hba->dev_quirks &
- UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
- if (reg & UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED)
- hba->uic_error |=
- UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
- else if (reg & UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT)
- hba->uic_error |= UFSHCD_UIC_DL_TCx_REPLAY_ERROR;
+ if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
+ hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
+ else if (hba->dev_quirks &
+ UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
+ if (reg & UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED)
+ hba->uic_error |=
+ UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
+ else if (reg & UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT)
+ hba->uic_error |= UFSHCD_UIC_DL_TCx_REPLAY_ERROR;
+ }
+ retval |= IRQ_HANDLED;
}
/* UIC NL/TL/DME errors needs software retry */
reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER);
- if (reg) {
+ if ((reg & UIC_NETWORK_LAYER_ERROR) &&
+ (reg & UIC_NETWORK_LAYER_ERROR_CODE_MASK)) {
ufshcd_update_reg_hist(&hba->ufs_stats.nl_err, reg);
hba->uic_error |= UFSHCD_UIC_NL_ERROR;
+ retval |= IRQ_HANDLED;
}
reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER);
- if (reg) {
+ if ((reg & UIC_TRANSPORT_LAYER_ERROR) &&
+ (reg & UIC_TRANSPORT_LAYER_ERROR_CODE_MASK)) {
ufshcd_update_reg_hist(&hba->ufs_stats.tl_err, reg);
hba->uic_error |= UFSHCD_UIC_TL_ERROR;
+ retval |= IRQ_HANDLED;
}
reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME);
- if (reg) {
+ if ((reg & UIC_DME_ERROR) &&
+ (reg & UIC_DME_ERROR_CODE_MASK)) {
ufshcd_update_reg_hist(&hba->ufs_stats.dme_err, reg);
hba->uic_error |= UFSHCD_UIC_DME_ERROR;
+ retval |= IRQ_HANDLED;
}
dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n",
__func__, hba->uic_error);
+ return retval;
}
static bool ufshcd_is_auto_hibern8_error(struct ufs_hba *hba,
@@ -5472,10 +5527,15 @@ static bool ufshcd_is_auto_hibern8_error(struct ufs_hba *hba,
/**
* ufshcd_check_errors - Check for errors that need s/w attention
* @hba: per-adapter instance
+ *
+ * Returns
+ * IRQ_HANDLED - If interrupt is valid
+ * IRQ_NONE - If invalid interrupt
*/
-static void ufshcd_check_errors(struct ufs_hba *hba)
+static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba)
{
bool queue_eh_work = false;
+ irqreturn_t retval = IRQ_NONE;
if (hba->errors & INT_FATAL_ERRORS) {
ufshcd_update_reg_hist(&hba->ufs_stats.fatal_err, hba->errors);
@@ -5484,7 +5544,7 @@ static void ufshcd_check_errors(struct ufs_hba *hba)
if (hba->errors & UIC_ERROR) {
hba->uic_error = 0;
- ufshcd_update_uic_error(hba);
+ retval = ufshcd_update_uic_error(hba);
if (hba->uic_error)
queue_eh_work = true;
}
@@ -5532,6 +5592,7 @@ static void ufshcd_check_errors(struct ufs_hba *hba)
}
schedule_work(&hba->eh_work);
}
+ retval |= IRQ_HANDLED;
}
/*
* if (!queue_eh_work) -
@@ -5539,44 +5600,62 @@ static void ufshcd_check_errors(struct ufs_hba *hba)
* itself without s/w intervention or errors that will be
* handled by the SCSI core layer.
*/
+ return retval;
}
/**
* ufshcd_tmc_handler - handle task management function completion
* @hba: per adapter instance
+ *
+ * Returns
+ * IRQ_HANDLED - If interrupt is valid
+ * IRQ_NONE - If invalid interrupt
*/
-static void ufshcd_tmc_handler(struct ufs_hba *hba)
+static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba)
{
u32 tm_doorbell;
tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
hba->tm_condition = tm_doorbell ^ hba->outstanding_tasks;
- wake_up(&hba->tm_wq);
+ if (hba->tm_condition) {
+ wake_up(&hba->tm_wq);
+ return IRQ_HANDLED;
+ } else {
+ return IRQ_NONE;
+ }
}
/**
* ufshcd_sl_intr - Interrupt service routine
* @hba: per adapter instance
* @intr_status: contains interrupts generated by the controller
+ *
+ * Returns
+ * IRQ_HANDLED - If interrupt is valid
+ * IRQ_NONE - If invalid interrupt
*/
-static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
+static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
{
+ irqreturn_t retval = IRQ_NONE;
+
hba->errors = UFSHCD_ERROR_MASK & intr_status;
if (ufshcd_is_auto_hibern8_error(hba, intr_status))
hba->errors |= (UFSHCD_UIC_HIBERN8_MASK & intr_status);
if (hba->errors)
- ufshcd_check_errors(hba);
+ retval |= ufshcd_check_errors(hba);
if (intr_status & UFSHCD_UIC_MASK)
- ufshcd_uic_cmd_compl(hba, intr_status);
+ retval |= ufshcd_uic_cmd_compl(hba, intr_status);
if (intr_status & UTP_TASK_REQ_COMPL)
- ufshcd_tmc_handler(hba);
+ retval |= ufshcd_tmc_handler(hba);
if (intr_status & UTP_TRANSFER_REQ_COMPL)
- ufshcd_transfer_req_compl(hba);
+ retval |= ufshcd_transfer_req_compl(hba);
+
+ return retval;
}
/**
@@ -5584,8 +5663,9 @@ static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
* @irq: irq number
* @__hba: pointer to adapter instance
*
- * Returns IRQ_HANDLED - If interrupt is valid
- * IRQ_NONE - If invalid interrupt
+ * Returns
+ * IRQ_HANDLED - If interrupt is valid
+ * IRQ_NONE - If invalid interrupt
*/
static irqreturn_t ufshcd_intr(int irq, void *__hba)
{
@@ -5608,14 +5688,18 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba)
intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
if (intr_status)
ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
- if (enabled_intr_status) {
- ufshcd_sl_intr(hba, enabled_intr_status);
- retval = IRQ_HANDLED;
- }
+ if (enabled_intr_status)
+ retval |= ufshcd_sl_intr(hba, enabled_intr_status);
intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
} while (intr_status && --retries);
+ if (retval == IRQ_NONE) {
+ dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x\n",
+ __func__, intr_status);
+ ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: ");
+ }
+
spin_unlock(hba->host->host_lock);
return retval;
}
@@ -5760,9 +5844,9 @@ static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
* @hba: per-adapter instance
* @req_upiu: upiu request
* @rsp_upiu: upiu reply
- * @msgcode: message code, one of UPIU Transaction Codes Initiator to Target
* @desc_buff: pointer to descriptor buffer, NULL if NA
* @buff_len: descriptor size, 0 if NA
+ * @cmd_type: specifies the type (NOP, Query...)
* @desc_op: descriptor operation
*
* Those type of requests uses UTP Transfer Request Descriptor - utrd.
@@ -5776,7 +5860,7 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
struct utp_upiu_req *req_upiu,
struct utp_upiu_req *rsp_upiu,
u8 *desc_buff, int *buff_len,
- int cmd_type,
+ enum dev_cmd_type cmd_type,
enum query_opcode desc_op)
{
struct ufshcd_lrb *lrbp;
@@ -5856,7 +5940,9 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
memcpy(desc_buff, descp, resp_len);
*buff_len = resp_len;
} else {
- dev_warn(hba->dev, "rsp size is bigger than buffer");
+ dev_warn(hba->dev,
+ "%s: rsp size %d is bigger than buffer size %d",
+ __func__, resp_len, *buff_len);
*buff_len = 0;
err = -EINVAL;
}
@@ -5891,7 +5977,7 @@ int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
enum query_opcode desc_op)
{
int err;
- int cmd_type = DEV_CMD_TYPE_QUERY;
+ enum dev_cmd_type cmd_type = DEV_CMD_TYPE_QUERY;
struct utp_task_req_desc treq = { { 0 }, };
int ocs_value;
u8 tm_f = be32_to_cpu(req_upiu->header.dword_1) >> 16 & MASK_TM_FUNC;
@@ -6770,23 +6856,13 @@ static void ufshcd_init_desc_sizes(struct ufs_hba *hba)
&hba->desc_size.geom_desc);
if (err)
hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE;
+
err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_HEALTH, 0,
&hba->desc_size.hlth_desc);
if (err)
hba->desc_size.hlth_desc = QUERY_DESC_HEALTH_DEF_SIZE;
}
-static void ufshcd_def_desc_sizes(struct ufs_hba *hba)
-{
- hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE;
- hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE;
- hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE;
- hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE;
- hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE;
- hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE;
- hba->desc_size.hlth_desc = QUERY_DESC_HEALTH_DEF_SIZE;
-}
-
static struct ufs_ref_clk ufs_ref_clk_freqs[] = {
{19200000, REF_CLK_FREQ_19_2_MHZ},
{26000000, REF_CLK_FREQ_26_MHZ},
@@ -6881,9 +6957,6 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
/* UniPro link is active now */
ufshcd_set_link_active(hba);
- /* Enable Auto-Hibernate if configured */
- ufshcd_auto_hibern8_enable(hba);
-
ret = ufshcd_verify_dev_init(hba);
if (ret)
goto out;
@@ -6934,6 +7007,9 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
/* set the state as operational after switching to desired gear */
hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
+ /* Enable Auto-Hibernate if configured */
+ ufshcd_auto_hibern8_enable(hba);
+
/*
* If we are in error handling context or in power management callbacks
* context, no need to scan the host
@@ -7069,6 +7145,7 @@ static struct scsi_host_template ufshcd_driver_template = {
.track_queue_depth = 1,
.sdev_groups = ufshcd_driver_groups,
.dma_boundary = PAGE_SIZE - 1,
+ .rpm_autosuspend_delay = RPM_AUTOSUSPEND_DELAY_MS,
};
static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg,
@@ -7950,12 +8027,12 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
if (hba->clk_scaling.is_allowed)
ufshcd_resume_clkscaling(hba);
- /* Schedule clock gating in case of no access to UFS device yet */
- ufshcd_release(hba);
-
/* Enable Auto-Hibernate if configured */
ufshcd_auto_hibern8_enable(hba);
+ /* Schedule clock gating in case of no access to UFS device yet */
+ ufshcd_release(hba);
+
goto out;
set_old_link_state:
@@ -8274,9 +8351,6 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
hba->mmio_base = mmio_base;
hba->irq = irq;
- /* Set descriptor lengths to specification defaults */
- ufshcd_def_desc_sizes(hba);
-
err = ufshcd_hba_init(hba);
if (err)
goto out_error;
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index c94cfda52829..2740f6941ec6 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -716,6 +716,12 @@ struct ufs_hba {
* the performance of ongoing read/write operations.
*/
#define UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND (1 << 5)
+ /*
+ * This capability allows host controller driver to automatically
+ * enable runtime power management by itself instead of waiting
+ * for userspace to control the power management.
+ */
+#define UFSHCD_CAP_RPM_AUTOSUSPEND (1 << 6)
struct devfreq *devfreq;
struct ufs_clk_scaling clk_scaling;
@@ -749,6 +755,10 @@ static inline bool ufshcd_can_autobkops_during_suspend(struct ufs_hba *hba)
{
return hba->caps & UFSHCD_CAP_AUTO_BKOPS_SUSPEND;
}
+static inline bool ufshcd_is_rpm_autosuspend_allowed(struct ufs_hba *hba)
+{
+ return hba->caps & UFSHCD_CAP_RPM_AUTOSUSPEND;
+}
static inline bool ufshcd_is_intr_aggr_allowed(struct ufs_hba *hba)
{
@@ -916,6 +926,8 @@ int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
enum flag_idn idn, bool *flag_res);
+void ufshcd_auto_hibern8_enable(struct ufs_hba *hba);
+
#define SD_ASCII_STD true
#define SD_RAW false
int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index,
diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h
index dbb75cd28dc8..c2961d37cc1c 100644
--- a/drivers/scsi/ufs/ufshci.h
+++ b/drivers/scsi/ufs/ufshci.h
@@ -195,7 +195,7 @@ enum {
/* UECDL - Host UIC Error Code Data Link Layer 3Ch */
#define UIC_DATA_LINK_LAYER_ERROR 0x80000000
-#define UIC_DATA_LINK_LAYER_ERROR_CODE_MASK 0x7FFF
+#define UIC_DATA_LINK_LAYER_ERROR_CODE_MASK 0xFFFF
#define UIC_DATA_LINK_LAYER_ERROR_TCX_REP_TIMER_EXP 0x2
#define UIC_DATA_LINK_LAYER_ERROR_AFCX_REQ_TIMER_EXP 0x4
#define UIC_DATA_LINK_LAYER_ERROR_FCX_PRO_TIMER_EXP 0x8
diff --git a/drivers/scsi/zorro_esp.c b/drivers/scsi/zorro_esp.c
index ca8e3abeb2c7..a23a8e5794f5 100644
--- a/drivers/scsi/zorro_esp.c
+++ b/drivers/scsi/zorro_esp.c
@@ -218,7 +218,14 @@ static int fastlane_esp_irq_pending(struct esp *esp)
static u32 zorro_esp_dma_length_limit(struct esp *esp, u32 dma_addr,
u32 dma_len)
{
- return dma_len > 0xFFFF ? 0xFFFF : dma_len;
+ return dma_len > (1U << 16) ? (1U << 16) : dma_len;
+}
+
+static u32 fastlane_esp_dma_length_limit(struct esp *esp, u32 dma_addr,
+ u32 dma_len)
+{
+ /* The old driver used 0xfffc as limit, so do that here too */
+ return dma_len > 0xfffc ? 0xfffc : dma_len;
}
static void zorro_esp_reset_dma(struct esp *esp)
@@ -604,7 +611,7 @@ static const struct esp_driver_ops fastlane_esp_ops = {
.esp_write8 = zorro_esp_write8,
.esp_read8 = zorro_esp_read8,
.irq_pending = fastlane_esp_irq_pending,
- .dma_length_limit = zorro_esp_dma_length_limit,
+ .dma_length_limit = fastlane_esp_dma_length_limit,
.reset_dma = zorro_esp_reset_dma,
.dma_drain = zorro_esp_dma_drain,
.dma_invalidate = fastlane_esp_dma_invalidate,
diff --git a/drivers/soundwire/Kconfig b/drivers/soundwire/Kconfig
index c8c80df090d1..c725d0a8b288 100644
--- a/drivers/soundwire/Kconfig
+++ b/drivers/soundwire/Kconfig
@@ -24,7 +24,7 @@ config SOUNDWIRE_CADENCE
config SOUNDWIRE_INTEL
tristate "Intel SoundWire Master driver"
select SOUNDWIRE_CADENCE
- depends on X86 && ACPI && SND_SOC
+ depends on ACPI && SND_SOC
help
SoundWire Intel Master driver.
If you have an Intel platform which has a SoundWire Master then
diff --git a/drivers/soundwire/bus.c b/drivers/soundwire/bus.c
index fc53dbe57f85..be5d437058ed 100644
--- a/drivers/soundwire/bus.c
+++ b/drivers/soundwire/bus.c
@@ -422,10 +422,11 @@ static struct sdw_slave *sdw_get_slave(struct sdw_bus *bus, int i)
static int sdw_compare_devid(struct sdw_slave *slave, struct sdw_slave_id id)
{
- if (slave->id.unique_id != id.unique_id ||
- slave->id.mfg_id != id.mfg_id ||
+ if (slave->id.mfg_id != id.mfg_id ||
slave->id.part_id != id.part_id ||
- slave->id.class_id != id.class_id)
+ slave->id.class_id != id.class_id ||
+ (slave->id.unique_id != SDW_IGNORED_UNIQUE_ID &&
+ slave->id.unique_id != id.unique_id))
return -ENODEV;
return 0;
diff --git a/drivers/soundwire/cadence_master.c b/drivers/soundwire/cadence_master.c
index 502ed4ec8f07..fed21e2b2277 100644
--- a/drivers/soundwire/cadence_master.c
+++ b/drivers/soundwire/cadence_master.c
@@ -183,9 +183,6 @@ MODULE_PARM_DESC(cdns_mcp_int_mask, "Cadence MCP IntMask");
#define CDNS_DEFAULT_SSP_INTERVAL 0x18
#define CDNS_TX_TIMEOUT 2000
-#define CDNS_PCM_PDI_OFFSET 0x2
-#define CDNS_PDM_PDI_OFFSET 0x6
-
#define CDNS_SCP_RX_FIFOLEVEL 0x2
/*
@@ -232,6 +229,22 @@ static int cdns_clear_bit(struct sdw_cdns *cdns, int offset, u32 value)
}
/*
+ * all changes to the MCP_CONFIG, MCP_CONTROL, MCP_CMDCTRL and MCP_PHYCTRL
+ * need to be confirmed with a write to MCP_CONFIG_UPDATE
+ */
+static int cdns_update_config(struct sdw_cdns *cdns)
+{
+ int ret;
+
+ ret = cdns_clear_bit(cdns, CDNS_MCP_CONFIG_UPDATE,
+ CDNS_MCP_CONFIG_UPDATE_BIT);
+ if (ret < 0)
+ dev_err(cdns->dev, "Config update timedout\n");
+
+ return ret;
+}
+
+/*
* debugfs
*/
#ifdef CONFIG_DEBUG_FS
@@ -279,11 +292,7 @@ static int cdns_reg_show(struct seq_file *s, void *data)
ret += scnprintf(buf + ret, RD_BUF - ret,
"\nDPn B0 Registers\n");
- /*
- * in sdw_cdns_pdi_init() we filter out the Bulk PDIs,
- * so the indices need to be corrected again
- */
- num_ports = cdns->num_ports + CDNS_PCM_PDI_OFFSET;
+ num_ports = cdns->num_ports;
for (i = 0; i < num_ports; i++) {
ret += scnprintf(buf + ret, RD_BUF - ret,
@@ -324,6 +333,26 @@ static int cdns_reg_show(struct seq_file *s, void *data)
}
DEFINE_SHOW_ATTRIBUTE(cdns_reg);
+static int cdns_hw_reset(void *data, u64 value)
+{
+ struct sdw_cdns *cdns = data;
+ int ret;
+
+ if (value != 1)
+ return -EINVAL;
+
+ /* Userspace changed the hardware state behind the kernel's back */
+ add_taint(TAINT_USER, LOCKDEP_STILL_OK);
+
+ ret = sdw_cdns_exit_reset(cdns);
+
+ dev_dbg(cdns->dev, "link hw_reset done: %d\n", ret);
+
+ return ret;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(cdns_hw_reset_fops, NULL, cdns_hw_reset, "%llu\n");
+
/**
* sdw_cdns_debugfs_init() - Cadence debugfs init
* @cdns: Cadence instance
@@ -332,6 +361,9 @@ DEFINE_SHOW_ATTRIBUTE(cdns_reg);
void sdw_cdns_debugfs_init(struct sdw_cdns *cdns, struct dentry *root)
{
debugfs_create_file("cdns-registers", 0400, root, cdns, &cdns_reg_fops);
+
+ debugfs_create_file("cdns-hw-reset", 0200, root, cdns,
+ &cdns_hw_reset_fops);
}
EXPORT_SYMBOL_GPL(sdw_cdns_debugfs_init);
@@ -752,14 +784,48 @@ EXPORT_SYMBOL(sdw_cdns_thread);
/*
* init routines
*/
-static int _cdns_enable_interrupt(struct sdw_cdns *cdns)
+
+/**
+ * sdw_cdns_exit_reset() - Program reset parameters and start bus operations
+ * @cdns: Cadence instance
+ */
+int sdw_cdns_exit_reset(struct sdw_cdns *cdns)
{
- u32 mask;
+ /* program maximum length reset to be safe */
+ cdns_updatel(cdns, CDNS_MCP_CONTROL,
+ CDNS_MCP_CONTROL_RST_DELAY,
+ CDNS_MCP_CONTROL_RST_DELAY);
+
+ /* use hardware generated reset */
+ cdns_updatel(cdns, CDNS_MCP_CONTROL,
+ CDNS_MCP_CONTROL_HW_RST,
+ CDNS_MCP_CONTROL_HW_RST);
+
+ /* enable bus operations with clock and data */
+ cdns_updatel(cdns, CDNS_MCP_CONFIG,
+ CDNS_MCP_CONFIG_OP,
+ CDNS_MCP_CONFIG_OP_NORMAL);
+
+ /* commit changes */
+ return cdns_update_config(cdns);
+}
+EXPORT_SYMBOL(sdw_cdns_exit_reset);
- cdns_writel(cdns, CDNS_MCP_SLAVE_INTMASK0,
- CDNS_MCP_SLAVE_INTMASK0_MASK);
- cdns_writel(cdns, CDNS_MCP_SLAVE_INTMASK1,
- CDNS_MCP_SLAVE_INTMASK1_MASK);
+/**
+ * sdw_cdns_enable_interrupt() - Enable SDW interrupts and update config
+ * @cdns: Cadence instance
+ */
+int sdw_cdns_enable_interrupt(struct sdw_cdns *cdns, bool state)
+{
+ u32 slave_intmask0 = 0;
+ u32 slave_intmask1 = 0;
+ u32 mask = 0;
+
+ if (!state)
+ goto update_masks;
+
+ slave_intmask0 = CDNS_MCP_SLAVE_INTMASK0_MASK;
+ slave_intmask1 = CDNS_MCP_SLAVE_INTMASK1_MASK;
/* enable detection of all slave state changes */
mask = CDNS_MCP_INT_SLAVE_MASK;
@@ -782,26 +848,13 @@ static int _cdns_enable_interrupt(struct sdw_cdns *cdns)
if (interrupt_mask) /* parameter override */
mask = interrupt_mask;
+update_masks:
+ cdns_writel(cdns, CDNS_MCP_SLAVE_INTMASK0, slave_intmask0);
+ cdns_writel(cdns, CDNS_MCP_SLAVE_INTMASK1, slave_intmask1);
cdns_writel(cdns, CDNS_MCP_INTMASK, mask);
- return 0;
-}
-
-/**
- * sdw_cdns_enable_interrupt() - Enable SDW interrupts and update config
- * @cdns: Cadence instance
- */
-int sdw_cdns_enable_interrupt(struct sdw_cdns *cdns)
-{
- int ret;
-
- _cdns_enable_interrupt(cdns);
- ret = cdns_clear_bit(cdns, CDNS_MCP_CONFIG_UPDATE,
- CDNS_MCP_CONFIG_UPDATE_BIT);
- if (ret < 0)
- dev_err(cdns->dev, "Config update timedout\n");
-
- return ret;
+ /* commit changes */
+ return cdns_update_config(cdns);
}
EXPORT_SYMBOL(sdw_cdns_enable_interrupt);
@@ -821,7 +874,6 @@ static int cdns_allocate_pdi(struct sdw_cdns *cdns,
for (i = 0; i < num; i++) {
pdi[i].num = i + pdi_offset;
- pdi[i].assigned = false;
}
*stream = pdi;
@@ -838,7 +890,8 @@ int sdw_cdns_pdi_init(struct sdw_cdns *cdns,
struct sdw_cdns_stream_config config)
{
struct sdw_cdns_streams *stream;
- int offset, i, ret;
+ int offset;
+ int ret;
cdns->pcm.num_bd = config.pcm_bd;
cdns->pcm.num_in = config.pcm_in;
@@ -850,11 +903,8 @@ int sdw_cdns_pdi_init(struct sdw_cdns *cdns,
/* Allocate PDIs for PCMs */
stream = &cdns->pcm;
- /* First two PDIs are reserved for bulk transfers */
- if (stream->num_bd < CDNS_PCM_PDI_OFFSET)
- return -EINVAL;
- stream->num_bd -= CDNS_PCM_PDI_OFFSET;
- offset = CDNS_PCM_PDI_OFFSET;
+ /* we allocate PDI0 and PDI1 which are used for Bulk */
+ offset = 0;
ret = cdns_allocate_pdi(cdns, &stream->bd,
stream->num_bd, offset);
@@ -881,7 +931,6 @@ int sdw_cdns_pdi_init(struct sdw_cdns *cdns,
/* Allocate PDIs for PDMs */
stream = &cdns->pdm;
- offset = CDNS_PDM_PDI_OFFSET;
ret = cdns_allocate_pdi(cdns, &stream->bd,
stream->num_bd, offset);
if (ret)
@@ -898,6 +947,9 @@ int sdw_cdns_pdi_init(struct sdw_cdns *cdns,
ret = cdns_allocate_pdi(cdns, &stream->out,
stream->num_out, offset);
+
+ offset += stream->num_out;
+
if (ret)
return ret;
@@ -905,18 +957,6 @@ int sdw_cdns_pdi_init(struct sdw_cdns *cdns,
stream->num_pdi = stream->num_bd + stream->num_in + stream->num_out;
cdns->num_ports += stream->num_pdi;
- cdns->ports = devm_kcalloc(cdns->dev, cdns->num_ports,
- sizeof(*cdns->ports), GFP_KERNEL);
- if (!cdns->ports) {
- ret = -ENOMEM;
- return ret;
- }
-
- for (i = 0; i < cdns->num_ports; i++) {
- cdns->ports[i].assigned = false;
- cdns->ports[i].num = i + 1; /* Port 0 reserved for bulk */
- }
-
return 0;
}
EXPORT_SYMBOL(sdw_cdns_pdi_init);
@@ -939,7 +979,7 @@ static u32 cdns_set_initial_frame_shape(int n_rows, int n_cols)
* sdw_cdns_init() - Cadence initialization
* @cdns: Cadence instance
*/
-int sdw_cdns_init(struct sdw_cdns *cdns)
+int sdw_cdns_init(struct sdw_cdns *cdns, bool clock_stop_exit)
{
struct sdw_bus *bus = &cdns->bus;
struct sdw_master_prop *prop = &bus->prop;
@@ -947,12 +987,13 @@ int sdw_cdns_init(struct sdw_cdns *cdns)
int divider;
int ret;
- /* Exit clock stop */
- ret = cdns_clear_bit(cdns, CDNS_MCP_CONTROL,
- CDNS_MCP_CONTROL_CLK_STOP_CLR);
- if (ret < 0) {
- dev_err(cdns->dev, "Couldn't exit from clock stop\n");
- return ret;
+ if (clock_stop_exit) {
+ ret = cdns_clear_bit(cdns, CDNS_MCP_CONTROL,
+ CDNS_MCP_CONTROL_CLK_STOP_CLR);
+ if (ret < 0) {
+ dev_err(cdns->dev, "Couldn't exit from clock stop\n");
+ return ret;
+ }
}
/* Set clock divider */
@@ -975,6 +1016,10 @@ int sdw_cdns_init(struct sdw_cdns *cdns)
cdns_writel(cdns, CDNS_MCP_SSP_CTRL0, CDNS_DEFAULT_SSP_INTERVAL);
cdns_writel(cdns, CDNS_MCP_SSP_CTRL1, CDNS_DEFAULT_SSP_INTERVAL);
+ /* flush command FIFOs */
+ cdns_updatel(cdns, CDNS_MCP_CONTROL, CDNS_MCP_CONTROL_CMD_RST,
+ CDNS_MCP_CONTROL_CMD_RST);
+
/* Set cmd accept mode */
cdns_updatel(cdns, CDNS_MCP_CONTROL, CDNS_MCP_CONTROL_CMD_ACCEPT,
CDNS_MCP_CONTROL_CMD_ACCEPT);
@@ -997,13 +1042,10 @@ int sdw_cdns_init(struct sdw_cdns *cdns)
/* Set cmd mode for Tx and Rx cmds */
val &= ~CDNS_MCP_CONFIG_CMD;
- /* Set operation to normal */
- val &= ~CDNS_MCP_CONFIG_OP;
- val |= CDNS_MCP_CONFIG_OP_NORMAL;
-
cdns_writel(cdns, CDNS_MCP_CONFIG, val);
- return 0;
+ /* commit changes */
+ return cdns_update_config(cdns);
}
EXPORT_SYMBOL(sdw_cdns_init);
@@ -1185,20 +1227,20 @@ EXPORT_SYMBOL(cdns_set_sdw_stream);
* @num: Number of PDIs
* @pdi: PDI instances
*
- * Find and return a free PDI for a given PDI array
+ * Find a PDI for a given PDI array. The PDI num and dai_id are
+ * expected to match, return NULL otherwise.
*/
static struct sdw_cdns_pdi *cdns_find_pdi(struct sdw_cdns *cdns,
+ unsigned int offset,
unsigned int num,
- struct sdw_cdns_pdi *pdi)
+ struct sdw_cdns_pdi *pdi,
+ int dai_id)
{
int i;
- for (i = 0; i < num; i++) {
- if (pdi[i].assigned)
- continue;
- pdi[i].assigned = true;
- return &pdi[i];
- }
+ for (i = offset; i < offset + num; i++)
+ if (pdi[i].num == dai_id)
+ return &pdi[i];
return NULL;
}
@@ -1207,13 +1249,11 @@ static struct sdw_cdns_pdi *cdns_find_pdi(struct sdw_cdns *cdns,
* sdw_cdns_config_stream: Configure a stream
*
* @cdns: Cadence instance
- * @port: Cadence data port
* @ch: Channel count
* @dir: Data direction
* @pdi: PDI to be used
*/
void sdw_cdns_config_stream(struct sdw_cdns *cdns,
- struct sdw_cdns_port *port,
u32 ch, u32 dir, struct sdw_cdns_pdi *pdi)
{
u32 offset, val = 0;
@@ -1221,113 +1261,51 @@ void sdw_cdns_config_stream(struct sdw_cdns *cdns,
if (dir == SDW_DATA_DIR_RX)
val = CDNS_PORTCTRL_DIRN;
- offset = CDNS_PORTCTRL + port->num * CDNS_PORT_OFFSET;
+ offset = CDNS_PORTCTRL + pdi->num * CDNS_PORT_OFFSET;
cdns_updatel(cdns, offset, CDNS_PORTCTRL_DIRN, val);
- val = port->num;
+ val = pdi->num;
val |= ((1 << ch) - 1) << SDW_REG_SHIFT(CDNS_PDI_CONFIG_CHANNEL);
cdns_writel(cdns, CDNS_PDI_CONFIG(pdi->num), val);
}
EXPORT_SYMBOL(sdw_cdns_config_stream);
/**
- * cdns_get_num_pdi() - Get number of PDIs required
- *
- * @cdns: Cadence instance
- * @pdi: PDI to be used
- * @num: Number of PDIs
- * @ch_count: Channel count
- */
-static int cdns_get_num_pdi(struct sdw_cdns *cdns,
- struct sdw_cdns_pdi *pdi,
- unsigned int num, u32 ch_count)
-{
- int i, pdis = 0;
-
- for (i = 0; i < num; i++) {
- if (pdi[i].assigned)
- continue;
-
- if (pdi[i].ch_count < ch_count)
- ch_count -= pdi[i].ch_count;
- else
- ch_count = 0;
-
- pdis++;
-
- if (!ch_count)
- break;
- }
-
- if (ch_count)
- return 0;
-
- return pdis;
-}
-
-/**
- * sdw_cdns_get_stream() - Get stream information
- *
- * @cdns: Cadence instance
- * @stream: Stream to be allocated
- * @ch: Channel count
- * @dir: Data direction
- */
-int sdw_cdns_get_stream(struct sdw_cdns *cdns,
- struct sdw_cdns_streams *stream,
- u32 ch, u32 dir)
-{
- int pdis = 0;
-
- if (dir == SDW_DATA_DIR_RX)
- pdis = cdns_get_num_pdi(cdns, stream->in, stream->num_in, ch);
- else
- pdis = cdns_get_num_pdi(cdns, stream->out, stream->num_out, ch);
-
- /* check if we found PDI, else find in bi-directional */
- if (!pdis)
- pdis = cdns_get_num_pdi(cdns, stream->bd, stream->num_bd, ch);
-
- return pdis;
-}
-EXPORT_SYMBOL(sdw_cdns_get_stream);
-
-/**
- * sdw_cdns_alloc_stream() - Allocate a stream
+ * sdw_cdns_alloc_pdi() - Allocate a PDI
*
* @cdns: Cadence instance
* @stream: Stream to be allocated
- * @port: Cadence data port
* @ch: Channel count
* @dir: Data direction
*/
-int sdw_cdns_alloc_stream(struct sdw_cdns *cdns,
- struct sdw_cdns_streams *stream,
- struct sdw_cdns_port *port, u32 ch, u32 dir)
+struct sdw_cdns_pdi *sdw_cdns_alloc_pdi(struct sdw_cdns *cdns,
+ struct sdw_cdns_streams *stream,
+ u32 ch, u32 dir, int dai_id)
{
struct sdw_cdns_pdi *pdi = NULL;
if (dir == SDW_DATA_DIR_RX)
- pdi = cdns_find_pdi(cdns, stream->num_in, stream->in);
+ pdi = cdns_find_pdi(cdns, 0, stream->num_in, stream->in,
+ dai_id);
else
- pdi = cdns_find_pdi(cdns, stream->num_out, stream->out);
+ pdi = cdns_find_pdi(cdns, 0, stream->num_out, stream->out,
+ dai_id);
/* check if we found a PDI, else find in bi-directional */
if (!pdi)
- pdi = cdns_find_pdi(cdns, stream->num_bd, stream->bd);
-
- if (!pdi)
- return -EIO;
-
- port->pdi = pdi;
- pdi->l_ch_num = 0;
- pdi->h_ch_num = ch - 1;
- pdi->dir = dir;
- pdi->ch_count = ch;
+ pdi = cdns_find_pdi(cdns, 2, stream->num_bd, stream->bd,
+ dai_id);
+
+ if (pdi) {
+ pdi->l_ch_num = 0;
+ pdi->h_ch_num = ch - 1;
+ pdi->dir = dir;
+ pdi->ch_count = ch;
+ }
- return 0;
+ return pdi;
}
-EXPORT_SYMBOL(sdw_cdns_alloc_stream);
+EXPORT_SYMBOL(sdw_cdns_alloc_pdi);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_DESCRIPTION("Cadence Soundwire Library");
diff --git a/drivers/soundwire/cadence_master.h b/drivers/soundwire/cadence_master.h
index 0b72b7094735..001457cbe5ad 100644
--- a/drivers/soundwire/cadence_master.h
+++ b/drivers/soundwire/cadence_master.h
@@ -8,7 +8,6 @@
/**
* struct sdw_cdns_pdi: PDI (Physical Data Interface) instance
*
- * @assigned: pdi assigned
* @num: pdi number
* @intel_alh_id: link identifier
* @l_ch_num: low channel for PDI
@@ -18,7 +17,6 @@
* @type: stream type, PDM or PCM
*/
struct sdw_cdns_pdi {
- bool assigned;
int num;
int intel_alh_id;
int l_ch_num;
@@ -29,23 +27,6 @@ struct sdw_cdns_pdi {
};
/**
- * struct sdw_cdns_port: Cadence port structure
- *
- * @num: port number
- * @assigned: port assigned
- * @ch: channel count
- * @direction: data port direction
- * @pdi: pdi for this port
- */
-struct sdw_cdns_port {
- unsigned int num;
- bool assigned;
- unsigned int ch;
- enum sdw_data_direction direction;
- struct sdw_cdns_pdi *pdi;
-};
-
-/**
* struct sdw_cdns_streams: Cadence stream data structure
*
* @num_bd: number of bidirectional streams
@@ -95,8 +76,8 @@ struct sdw_cdns_stream_config {
* struct sdw_cdns_dma_data: Cadence DMA data
*
* @name: SoundWire stream name
- * @nr_ports: Number of ports
- * @port: Ports
+ * @stream: stream runtime
+ * @pdi: PDI used for this dai
* @bus: Bus handle
* @stream_type: Stream type
* @link_id: Master link id
@@ -104,8 +85,7 @@ struct sdw_cdns_stream_config {
struct sdw_cdns_dma_data {
char *name;
struct sdw_stream_runtime *stream;
- int nr_ports;
- struct sdw_cdns_port **port;
+ struct sdw_cdns_pdi *pdi;
struct sdw_bus *bus;
enum sdw_stream_type stream_type;
int link_id;
@@ -158,10 +138,11 @@ extern struct sdw_master_ops sdw_cdns_master_ops;
irqreturn_t sdw_cdns_irq(int irq, void *dev_id);
irqreturn_t sdw_cdns_thread(int irq, void *dev_id);
-int sdw_cdns_init(struct sdw_cdns *cdns);
+int sdw_cdns_init(struct sdw_cdns *cdns, bool clock_stop_exit);
int sdw_cdns_pdi_init(struct sdw_cdns *cdns,
struct sdw_cdns_stream_config config);
-int sdw_cdns_enable_interrupt(struct sdw_cdns *cdns);
+int sdw_cdns_exit_reset(struct sdw_cdns *cdns);
+int sdw_cdns_enable_interrupt(struct sdw_cdns *cdns, bool state);
#ifdef CONFIG_DEBUG_FS
void sdw_cdns_debugfs_init(struct sdw_cdns *cdns, struct dentry *root);
@@ -170,10 +151,10 @@ void sdw_cdns_debugfs_init(struct sdw_cdns *cdns, struct dentry *root);
int sdw_cdns_get_stream(struct sdw_cdns *cdns,
struct sdw_cdns_streams *stream,
u32 ch, u32 dir);
-int sdw_cdns_alloc_stream(struct sdw_cdns *cdns,
- struct sdw_cdns_streams *stream,
- struct sdw_cdns_port *port, u32 ch, u32 dir);
-void sdw_cdns_config_stream(struct sdw_cdns *cdns, struct sdw_cdns_port *port,
+struct sdw_cdns_pdi *sdw_cdns_alloc_pdi(struct sdw_cdns *cdns,
+ struct sdw_cdns_streams *stream,
+ u32 ch, u32 dir, int dai_id);
+void sdw_cdns_config_stream(struct sdw_cdns *cdns,
u32 ch, u32 dir, struct sdw_cdns_pdi *pdi);
int sdw_cdns_pcm_set_stream(struct snd_soc_dai *dai,
diff --git a/drivers/soundwire/intel.c b/drivers/soundwire/intel.c
index 13c54eac0cc3..99dc61021211 100644
--- a/drivers/soundwire/intel.c
+++ b/drivers/soundwire/intel.c
@@ -10,6 +10,7 @@
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/interrupt.h>
+#include <linux/io.h>
#include <linux/platform_device.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
@@ -479,7 +480,10 @@ intel_pdi_shim_configure(struct sdw_intel *sdw, struct sdw_cdns_pdi *pdi)
unsigned int link_id = sdw->instance;
int pdi_conf = 0;
- pdi->intel_alh_id = (link_id * 16) + pdi->num + 5;
+ /* the Bulk and PCM streams are not contiguous */
+ pdi->intel_alh_id = (link_id * 16) + pdi->num + 3;
+ if (pdi->num >= 2)
+ pdi->intel_alh_id += 2;
/*
* Program stream parameters to stream SHIM register
@@ -508,7 +512,10 @@ intel_pdi_alh_configure(struct sdw_intel *sdw, struct sdw_cdns_pdi *pdi)
unsigned int link_id = sdw->instance;
unsigned int conf;
- pdi->intel_alh_id = (link_id * 16) + pdi->num + 5;
+ /* the Bulk and PCM streams are not contiguous */
+ pdi->intel_alh_id = (link_id * 16) + pdi->num + 3;
+ if (pdi->num >= 2)
+ pdi->intel_alh_id += 2;
/* Program Stream config ALH register */
conf = intel_readl(alh, SDW_ALH_STRMZCFG(pdi->intel_alh_id));
@@ -603,66 +610,6 @@ static int intel_post_bank_switch(struct sdw_bus *bus)
* DAI routines
*/
-static struct sdw_cdns_port *intel_alloc_port(struct sdw_intel *sdw,
- u32 ch, u32 dir, bool pcm)
-{
- struct sdw_cdns *cdns = &sdw->cdns;
- struct sdw_cdns_port *port = NULL;
- int i, ret = 0;
-
- for (i = 0; i < cdns->num_ports; i++) {
- if (cdns->ports[i].assigned)
- continue;
-
- port = &cdns->ports[i];
- port->assigned = true;
- port->direction = dir;
- port->ch = ch;
- break;
- }
-
- if (!port) {
- dev_err(cdns->dev, "Unable to find a free port\n");
- return NULL;
- }
-
- if (pcm) {
- ret = sdw_cdns_alloc_stream(cdns, &cdns->pcm, port, ch, dir);
- if (ret)
- goto out;
-
- intel_pdi_shim_configure(sdw, port->pdi);
- sdw_cdns_config_stream(cdns, port, ch, dir, port->pdi);
-
- intel_pdi_alh_configure(sdw, port->pdi);
-
- } else {
- ret = sdw_cdns_alloc_stream(cdns, &cdns->pdm, port, ch, dir);
- }
-
-out:
- if (ret) {
- port->assigned = false;
- port = NULL;
- }
-
- return port;
-}
-
-static void intel_port_cleanup(struct sdw_cdns_dma_data *dma)
-{
- int i;
-
- for (i = 0; i < dma->nr_ports; i++) {
- if (dma->port[i]) {
- dma->port[i]->pdi->assigned = false;
- dma->port[i]->pdi = NULL;
- dma->port[i]->assigned = false;
- dma->port[i] = NULL;
- }
- }
-}
-
static int intel_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
@@ -670,9 +617,11 @@ static int intel_hw_params(struct snd_pcm_substream *substream,
struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
struct sdw_intel *sdw = cdns_to_intel(cdns);
struct sdw_cdns_dma_data *dma;
+ struct sdw_cdns_pdi *pdi;
struct sdw_stream_config sconfig;
struct sdw_port_config *pconfig;
- int ret, i, ch, dir;
+ int ch, dir;
+ int ret;
bool pcm = true;
dma = snd_soc_dai_get_dma_data(dai, substream);
@@ -685,38 +634,30 @@ static int intel_hw_params(struct snd_pcm_substream *substream,
else
dir = SDW_DATA_DIR_TX;
- if (dma->stream_type == SDW_STREAM_PDM) {
- /* TODO: Check whether PDM decimator is already in use */
- dma->nr_ports = sdw_cdns_get_stream(cdns, &cdns->pdm, ch, dir);
+ if (dma->stream_type == SDW_STREAM_PDM)
pcm = false;
- } else {
- dma->nr_ports = sdw_cdns_get_stream(cdns, &cdns->pcm, ch, dir);
- }
- if (!dma->nr_ports) {
- dev_err(dai->dev, "ports/resources not available\n");
- return -EINVAL;
+ if (pcm)
+ pdi = sdw_cdns_alloc_pdi(cdns, &cdns->pcm, ch, dir, dai->id);
+ else
+ pdi = sdw_cdns_alloc_pdi(cdns, &cdns->pdm, ch, dir, dai->id);
+
+ if (!pdi) {
+ ret = -EINVAL;
+ goto error;
}
- dma->port = kcalloc(dma->nr_ports, sizeof(*dma->port), GFP_KERNEL);
- if (!dma->port)
- return -ENOMEM;
+ /* do run-time configurations for SHIM, ALH and PDI/PORT */
+ intel_pdi_shim_configure(sdw, pdi);
+ intel_pdi_alh_configure(sdw, pdi);
+ sdw_cdns_config_stream(cdns, ch, dir, pdi);
- for (i = 0; i < dma->nr_ports; i++) {
- dma->port[i] = intel_alloc_port(sdw, ch, dir, pcm);
- if (!dma->port[i]) {
- ret = -EINVAL;
- goto port_error;
- }
- }
/* Inform DSP about PDI stream number */
- for (i = 0; i < dma->nr_ports; i++) {
- ret = intel_config_stream(sdw, substream, dai, params,
- dma->port[i]->pdi->intel_alh_id);
- if (ret)
- goto port_error;
- }
+ ret = intel_config_stream(sdw, substream, dai, params,
+ pdi->intel_alh_id);
+ if (ret)
+ goto error;
sconfig.direction = dir;
sconfig.ch_count = ch;
@@ -731,32 +672,22 @@ static int intel_hw_params(struct snd_pcm_substream *substream,
}
/* Port configuration */
- pconfig = kcalloc(dma->nr_ports, sizeof(*pconfig), GFP_KERNEL);
+ pconfig = kcalloc(1, sizeof(*pconfig), GFP_KERNEL);
if (!pconfig) {
ret = -ENOMEM;
- goto port_error;
+ goto error;
}
- for (i = 0; i < dma->nr_ports; i++) {
- pconfig[i].num = dma->port[i]->num;
- pconfig[i].ch_mask = (1 << ch) - 1;
- }
+ pconfig->num = pdi->num;
+ pconfig->ch_mask = (1 << ch) - 1;
ret = sdw_stream_add_master(&cdns->bus, &sconfig,
- pconfig, dma->nr_ports, dma->stream);
- if (ret) {
+ pconfig, 1, dma->stream);
+ if (ret)
dev_err(cdns->dev, "add master to stream failed:%d\n", ret);
- goto stream_error;
- }
kfree(pconfig);
- return ret;
-
-stream_error:
- kfree(pconfig);
-port_error:
- intel_port_cleanup(dma);
- kfree(dma->port);
+error:
return ret;
}
@@ -776,8 +707,6 @@ intel_hw_free(struct snd_pcm_substream *substream, struct snd_soc_dai *dai)
dev_err(dai->dev, "remove master from stream %s failed: %d\n",
dma->stream->name, ret);
- intel_port_cleanup(dma);
- kfree(dma->port);
return ret;
}
@@ -842,14 +771,6 @@ static int intel_create_dai(struct sdw_cdns *cdns,
return -ENOMEM;
if (type == INTEL_PDI_BD || type == INTEL_PDI_OUT) {
- dais[i].playback.stream_name =
- kasprintf(GFP_KERNEL, "SDW%d Tx%d",
- cdns->instance, i);
- if (!dais[i].playback.stream_name) {
- kfree(dais[i].name);
- return -ENOMEM;
- }
-
dais[i].playback.channels_min = 1;
dais[i].playback.channels_max = max_ch;
dais[i].playback.rates = SNDRV_PCM_RATE_48000;
@@ -857,23 +778,12 @@ static int intel_create_dai(struct sdw_cdns *cdns,
}
if (type == INTEL_PDI_BD || type == INTEL_PDI_IN) {
- dais[i].capture.stream_name =
- kasprintf(GFP_KERNEL, "SDW%d Rx%d",
- cdns->instance, i);
- if (!dais[i].capture.stream_name) {
- kfree(dais[i].name);
- kfree(dais[i].playback.stream_name);
- return -ENOMEM;
- }
-
dais[i].capture.channels_min = 1;
dais[i].capture.channels_max = max_ch;
dais[i].capture.rates = SNDRV_PCM_RATE_48000;
dais[i].capture.formats = SNDRV_PCM_FMTBIT_S16_LE;
}
- dais[i].id = SDW_DAI_ID_RANGE_START + i;
-
if (pcm)
dais[i].ops = &intel_pcm_dai_ops;
else
@@ -993,6 +903,15 @@ static struct sdw_master_ops sdw_intel_ops = {
.post_bank_switch = intel_post_bank_switch,
};
+static int intel_init(struct sdw_intel *sdw)
+{
+ /* Initialize shim and controller */
+ intel_link_power_up(sdw);
+ intel_shim_init(sdw);
+
+ return sdw_cdns_init(&sdw->cdns, false);
+}
+
/*
* probe and init
*/
@@ -1026,7 +945,7 @@ static int intel_probe(struct platform_device *pdev)
ret = sdw_add_bus_master(&sdw->cdns.bus);
if (ret) {
dev_err(&pdev->dev, "sdw_add_bus_master fail: %d\n", ret);
- goto err_master_reg;
+ return ret;
}
if (sdw->cdns.bus.prop.hw_disabled) {
@@ -1035,16 +954,11 @@ static int intel_probe(struct platform_device *pdev)
return 0;
}
- /* Initialize shim and controller */
- intel_link_power_up(sdw);
- intel_shim_init(sdw);
-
- ret = sdw_cdns_init(&sdw->cdns);
+ /* Initialize shim, controller and Cadence IP */
+ ret = intel_init(sdw);
if (ret)
goto err_init;
- ret = sdw_cdns_enable_interrupt(&sdw->cdns);
-
/* Read the PDI config and initialize cadence PDI */
intel_pdi_init(sdw, &config);
ret = sdw_cdns_pdi_init(&sdw->cdns, config);
@@ -1062,23 +976,35 @@ static int intel_probe(struct platform_device *pdev)
goto err_init;
}
+ ret = sdw_cdns_enable_interrupt(&sdw->cdns, true);
+ if (ret < 0) {
+ dev_err(sdw->cdns.dev, "cannot enable interrupts\n");
+ goto err_init;
+ }
+
+ ret = sdw_cdns_exit_reset(&sdw->cdns);
+ if (ret < 0) {
+ dev_err(sdw->cdns.dev, "unable to exit bus reset sequence\n");
+ goto err_interrupt;
+ }
+
/* Register DAIs */
ret = intel_register_dai(sdw);
if (ret) {
dev_err(sdw->cdns.dev, "DAI registration failed: %d\n", ret);
snd_soc_unregister_component(sdw->cdns.dev);
- goto err_dai;
+ goto err_interrupt;
}
intel_debugfs_init(sdw);
return 0;
-err_dai:
+err_interrupt:
+ sdw_cdns_enable_interrupt(&sdw->cdns, false);
free_irq(sdw->res->irq, sdw);
err_init:
sdw_delete_bus_master(&sdw->cdns.bus);
-err_master_reg:
return ret;
}
@@ -1090,6 +1016,7 @@ static int intel_remove(struct platform_device *pdev)
if (!sdw->cdns.bus.prop.hw_disabled) {
intel_debugfs_exit(sdw);
+ sdw_cdns_enable_interrupt(&sdw->cdns, false);
free_irq(sdw->res->irq, sdw);
snd_soc_unregister_component(sdw->cdns.dev);
}
diff --git a/drivers/soundwire/intel_init.c b/drivers/soundwire/intel_init.c
index b74c2f144962..2a2b4d8df462 100644
--- a/drivers/soundwire/intel_init.c
+++ b/drivers/soundwire/intel_init.c
@@ -9,6 +9,7 @@
#include <linux/acpi.h>
#include <linux/export.h>
+#include <linux/io.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/soundwire/sdw_intel.h>
diff --git a/drivers/soundwire/slave.c b/drivers/soundwire/slave.c
index 6473fa602f82..19919975bb6d 100644
--- a/drivers/soundwire/slave.c
+++ b/drivers/soundwire/slave.c
@@ -29,10 +29,17 @@ static int sdw_slave_add(struct sdw_bus *bus,
slave->dev.parent = bus->dev;
slave->dev.fwnode = fwnode;
- /* name shall be sdw:link:mfg:part:class:unique */
- dev_set_name(&slave->dev, "sdw:%x:%x:%x:%x:%x",
- bus->link_id, id->mfg_id, id->part_id,
- id->class_id, id->unique_id);
+ if (id->unique_id == SDW_IGNORED_UNIQUE_ID) {
+ /* name shall be sdw:link:mfg:part:class */
+ dev_set_name(&slave->dev, "sdw:%x:%x:%x:%x",
+ bus->link_id, id->mfg_id, id->part_id,
+ id->class_id);
+ } else {
+ /* name shall be sdw:link:mfg:part:class:unique */
+ dev_set_name(&slave->dev, "sdw:%x:%x:%x:%x:%x",
+ bus->link_id, id->mfg_id, id->part_id,
+ id->class_id, id->unique_id);
+ }
slave->dev.release = sdw_slave_release;
slave->dev.bus = &sdw_bus_type;
@@ -64,6 +71,36 @@ static int sdw_slave_add(struct sdw_bus *bus,
}
#if IS_ENABLED(CONFIG_ACPI)
+
+static bool find_slave(struct sdw_bus *bus,
+ struct acpi_device *adev,
+ struct sdw_slave_id *id)
+{
+ unsigned long long addr;
+ unsigned int link_id;
+ acpi_status status;
+
+ status = acpi_evaluate_integer(adev->handle,
+ METHOD_NAME__ADR, NULL, &addr);
+
+ if (ACPI_FAILURE(status)) {
+ dev_err(bus->dev, "_ADR resolution failed: %x\n",
+ status);
+ return false;
+ }
+
+ /* Extract link id from ADR, Bit 51 to 48 (included) */
+ link_id = (addr >> 48) & GENMASK(3, 0);
+
+ /* Check for link_id match */
+ if (link_id != bus->link_id)
+ return false;
+
+ sdw_extract_slave_id(bus, addr, id);
+
+ return true;
+}
+
/*
* sdw_acpi_find_slaves() - Find Slave devices in Master ACPI node
* @bus: SDW bus instance
@@ -73,6 +110,7 @@ static int sdw_slave_add(struct sdw_bus *bus,
int sdw_acpi_find_slaves(struct sdw_bus *bus)
{
struct acpi_device *adev, *parent;
+ struct acpi_device *adev2, *parent2;
parent = ACPI_COMPANION(bus->dev);
if (!parent) {
@@ -81,28 +119,46 @@ int sdw_acpi_find_slaves(struct sdw_bus *bus)
}
list_for_each_entry(adev, &parent->children, node) {
- unsigned long long addr;
struct sdw_slave_id id;
- unsigned int link_id;
- acpi_status status;
+ struct sdw_slave_id id2;
+ bool ignore_unique_id = true;
- status = acpi_evaluate_integer(adev->handle,
- METHOD_NAME__ADR, NULL, &addr);
+ if (!find_slave(bus, adev, &id))
+ continue;
- if (ACPI_FAILURE(status)) {
- dev_err(bus->dev, "_ADR resolution failed: %x\n",
- status);
- return status;
+ /* brute-force O(N^2) search for duplicates */
+ parent2 = parent;
+ list_for_each_entry(adev2, &parent2->children, node) {
+
+ if (adev == adev2)
+ continue;
+
+ if (!find_slave(bus, adev2, &id2))
+ continue;
+
+ if (id.sdw_version != id2.sdw_version ||
+ id.mfg_id != id2.mfg_id ||
+ id.part_id != id2.part_id ||
+ id.class_id != id2.class_id)
+ continue;
+
+ if (id.unique_id != id2.unique_id) {
+ dev_dbg(bus->dev,
+ "Valid unique IDs %x %x for Slave mfg %x part %d\n",
+ id.unique_id, id2.unique_id,
+ id.mfg_id, id.part_id);
+ ignore_unique_id = false;
+ } else {
+ dev_err(bus->dev,
+ "Invalid unique IDs %x %x for Slave mfg %x part %d\n",
+ id.unique_id, id2.unique_id,
+ id.mfg_id, id.part_id);
+ return -ENODEV;
+ }
}
- /* Extract link id from ADR, Bit 51 to 48 (included) */
- link_id = (addr >> 48) & GENMASK(3, 0);
-
- /* Check for link_id match */
- if (link_id != bus->link_id)
- continue;
-
- sdw_extract_slave_id(bus, addr, &id);
+ if (ignore_unique_id)
+ id.unique_id = SDW_IGNORED_UNIQUE_ID;
/*
* don't error check for sdw_slave_add as we want to continue
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 333308fe807e..eaf753b70ec5 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -127,4 +127,6 @@ source "drivers/staging/qlge/Kconfig"
source "drivers/staging/hp/Kconfig"
+source "drivers/staging/wfx/Kconfig"
+
endif # STAGING
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index e4943cd63e98..0a4396c9067b 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -54,3 +54,4 @@ obj-$(CONFIG_USB_WUSB) += wusbcore/
obj-$(CONFIG_EXFAT_FS) += exfat/
obj-$(CONFIG_QLGE) += qlge/
obj-$(CONFIG_NET_VENDOR_HP) += hp/
+obj-$(CONFIG_WFX) += wfx/
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
index e6b1ca141b93..c394686a8e7d 100644
--- a/drivers/staging/android/ion/ion.c
+++ b/drivers/staging/android/ion/ion.c
@@ -533,9 +533,7 @@ static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
static const struct file_operations ion_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = ion_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = ion_ioctl,
-#endif
+ .compat_ioctl = compat_ptr_ioctl,
};
static int debug_shrink_set(void *data, u64 val)
diff --git a/drivers/staging/axis-fifo/axis-fifo.c b/drivers/staging/axis-fifo/axis-fifo.c
index 805437fa249a..39e6c59df1e9 100644
--- a/drivers/staging/axis-fifo/axis-fifo.c
+++ b/drivers/staging/axis-fifo/axis-fifo.c
@@ -125,7 +125,6 @@ MODULE_PARM_DESC(write_timeout, "ms to wait before blocking write() timing out;
struct axis_fifo {
int irq; /* interrupt */
- struct resource *mem; /* physical memory */
void __iomem *base_addr; /* kernel space memory */
unsigned int rx_fifo_depth; /* max words in the receive fifo */
@@ -701,6 +700,68 @@ static int get_dts_property(struct axis_fifo *fifo,
return 0;
}
+static int axis_fifo_parse_dt(struct axis_fifo *fifo)
+{
+ int ret;
+ unsigned int value;
+
+ ret = get_dts_property(fifo, "xlnx,axi-str-rxd-tdata-width", &value);
+ if (ret) {
+ dev_err(fifo->dt_device, "missing xlnx,axi-str-rxd-tdata-width property\n");
+ goto end;
+ } else if (value != 32) {
+ dev_err(fifo->dt_device, "xlnx,axi-str-rxd-tdata-width only supports 32 bits\n");
+ ret = -EIO;
+ goto end;
+ }
+
+ ret = get_dts_property(fifo, "xlnx,axi-str-txd-tdata-width", &value);
+ if (ret) {
+ dev_err(fifo->dt_device, "missing xlnx,axi-str-txd-tdata-width property\n");
+ goto end;
+ } else if (value != 32) {
+ dev_err(fifo->dt_device, "xlnx,axi-str-txd-tdata-width only supports 32 bits\n");
+ ret = -EIO;
+ goto end;
+ }
+
+ ret = get_dts_property(fifo, "xlnx,rx-fifo-depth",
+ &fifo->rx_fifo_depth);
+ if (ret) {
+ dev_err(fifo->dt_device, "missing xlnx,rx-fifo-depth property\n");
+ ret = -EIO;
+ goto end;
+ }
+
+ ret = get_dts_property(fifo, "xlnx,tx-fifo-depth",
+ &fifo->tx_fifo_depth);
+ if (ret) {
+ dev_err(fifo->dt_device, "missing xlnx,tx-fifo-depth property\n");
+ ret = -EIO;
+ goto end;
+ }
+
+ /* IP sets TDFV to fifo depth - 4 so we will do the same */
+ fifo->tx_fifo_depth -= 4;
+
+ ret = get_dts_property(fifo, "xlnx,use-rx-data", &fifo->has_rx_fifo);
+ if (ret) {
+ dev_err(fifo->dt_device, "missing xlnx,use-rx-data property\n");
+ ret = -EIO;
+ goto end;
+ }
+
+ ret = get_dts_property(fifo, "xlnx,use-tx-data", &fifo->has_tx_fifo);
+ if (ret) {
+ dev_err(fifo->dt_device, "missing xlnx,use-tx-data property\n");
+ ret = -EIO;
+ goto end;
+ }
+
+end:
+ return ret;
+}
+
static int axis_fifo_probe(struct platform_device *pdev)
{
struct resource *r_irq; /* interrupt resources */
@@ -712,34 +773,6 @@ static int axis_fifo_probe(struct platform_device *pdev)
int rc = 0; /* error return value */
- /* IP properties from device tree */
- unsigned int rxd_tdata_width;
- unsigned int txc_tdata_width;
- unsigned int txd_tdata_width;
- unsigned int tdest_width;
- unsigned int tid_width;
- unsigned int tuser_width;
- unsigned int data_interface_type;
- unsigned int has_tdest;
- unsigned int has_tid;
- unsigned int has_tkeep;
- unsigned int has_tstrb;
- unsigned int has_tuser;
- unsigned int rx_fifo_depth;
- unsigned int rx_programmable_empty_threshold;
- unsigned int rx_programmable_full_threshold;
- unsigned int axi_id_width;
- unsigned int axi4_data_width;
- unsigned int select_xpm;
- unsigned int tx_fifo_depth;
- unsigned int tx_programmable_empty_threshold;
- unsigned int tx_programmable_full_threshold;
- unsigned int use_rx_cut_through;
- unsigned int use_rx_data;
- unsigned int use_tx_control;
- unsigned int use_tx_cut_through;
- unsigned int use_tx_data;
-
/* ----------------------------
* init wrapper device
* ----------------------------
@@ -772,32 +805,19 @@ static int axis_fifo_probe(struct platform_device *pdev)
goto err_initial;
}
- fifo->mem = r_mem;
-
/* request physical memory */
- if (!request_mem_region(fifo->mem->start, resource_size(fifo->mem),
- DRIVER_NAME)) {
- dev_err(fifo->dt_device,
- "couldn't lock memory region at 0x%pa\n",
- &fifo->mem->start);
- rc = -EBUSY;
+ fifo->base_addr = devm_ioremap_resource(fifo->dt_device, r_mem);
+ if (IS_ERR(fifo->base_addr)) {
+ rc = PTR_ERR(fifo->base_addr);
+ dev_err(fifo->dt_device, "can't remap IO resource (%d)\n", rc);
goto err_initial;
}
- dev_dbg(fifo->dt_device, "got memory location [0x%pa - 0x%pa]\n",
- &fifo->mem->start, &fifo->mem->end);
-
- /* map physical memory to kernel virtual address space */
- fifo->base_addr = ioremap(fifo->mem->start, resource_size(fifo->mem));
- if (!fifo->base_addr) {
- dev_err(fifo->dt_device, "couldn't map physical memory\n");
- rc = -ENOMEM;
- goto err_mem;
- }
+
dev_dbg(fifo->dt_device, "remapped memory to 0x%p\n", fifo->base_addr);
/* create unique device name */
snprintf(device_name, sizeof(device_name), "%s_%pa",
- DRIVER_NAME, &fifo->mem->start);
+ DRIVER_NAME, &r_mem->start);
dev_dbg(fifo->dt_device, "device name [%s]\n", device_name);
@@ -806,164 +826,9 @@ static int axis_fifo_probe(struct platform_device *pdev)
* ----------------------------
*/
- /* retrieve device tree properties */
- rc = get_dts_property(fifo, "xlnx,axi-str-rxd-tdata-width",
- &rxd_tdata_width);
- if (rc)
- goto err_unmap;
- rc = get_dts_property(fifo, "xlnx,axi-str-txc-tdata-width",
- &txc_tdata_width);
- if (rc)
- goto err_unmap;
- rc = get_dts_property(fifo, "xlnx,axi-str-txd-tdata-width",
- &txd_tdata_width);
- if (rc)
- goto err_unmap;
- rc = get_dts_property(fifo, "xlnx,axis-tdest-width", &tdest_width);
- if (rc)
- goto err_unmap;
- rc = get_dts_property(fifo, "xlnx,axis-tid-width", &tid_width);
- if (rc)
- goto err_unmap;
- rc = get_dts_property(fifo, "xlnx,axis-tuser-width", &tuser_width);
- if (rc)
- goto err_unmap;
- rc = get_dts_property(fifo, "xlnx,data-interface-type",
- &data_interface_type);
- if (rc)
- goto err_unmap;
- rc = get_dts_property(fifo, "xlnx,has-axis-tdest", &has_tdest);
- if (rc)
- goto err_unmap;
- rc = get_dts_property(fifo, "xlnx,has-axis-tid", &has_tid);
- if (rc)
- goto err_unmap;
- rc = get_dts_property(fifo, "xlnx,has-axis-tkeep", &has_tkeep);
- if (rc)
- goto err_unmap;
- rc = get_dts_property(fifo, "xlnx,has-axis-tstrb", &has_tstrb);
- if (rc)
- goto err_unmap;
- rc = get_dts_property(fifo, "xlnx,has-axis-tuser", &has_tuser);
- if (rc)
- goto err_unmap;
- rc = get_dts_property(fifo, "xlnx,rx-fifo-depth", &rx_fifo_depth);
- if (rc)
- goto err_unmap;
- rc = get_dts_property(fifo, "xlnx,rx-fifo-pe-threshold",
- &rx_programmable_empty_threshold);
- if (rc)
- goto err_unmap;
- rc = get_dts_property(fifo, "xlnx,rx-fifo-pf-threshold",
- &rx_programmable_full_threshold);
- if (rc)
- goto err_unmap;
- rc = get_dts_property(fifo, "xlnx,s-axi-id-width", &axi_id_width);
- if (rc)
- goto err_unmap;
- rc = get_dts_property(fifo, "xlnx,s-axi4-data-width", &axi4_data_width);
- if (rc)
- goto err_unmap;
- rc = get_dts_property(fifo, "xlnx,select-xpm", &select_xpm);
+ rc = axis_fifo_parse_dt(fifo);
if (rc)
- goto err_unmap;
- rc = get_dts_property(fifo, "xlnx,tx-fifo-depth", &tx_fifo_depth);
- if (rc)
- goto err_unmap;
- rc = get_dts_property(fifo, "xlnx,tx-fifo-pe-threshold",
- &tx_programmable_empty_threshold);
- if (rc)
- goto err_unmap;
- rc = get_dts_property(fifo, "xlnx,tx-fifo-pf-threshold",
- &tx_programmable_full_threshold);
- if (rc)
- goto err_unmap;
- rc = get_dts_property(fifo, "xlnx,use-rx-cut-through",
- &use_rx_cut_through);
- if (rc)
- goto err_unmap;
- rc = get_dts_property(fifo, "xlnx,use-rx-data", &use_rx_data);
- if (rc)
- goto err_unmap;
- rc = get_dts_property(fifo, "xlnx,use-tx-ctrl", &use_tx_control);
- if (rc)
- goto err_unmap;
- rc = get_dts_property(fifo, "xlnx,use-tx-cut-through",
- &use_tx_cut_through);
- if (rc)
- goto err_unmap;
- rc = get_dts_property(fifo, "xlnx,use-tx-data", &use_tx_data);
- if (rc)
- goto err_unmap;
-
- /* check validity of device tree properties */
- if (rxd_tdata_width != 32) {
- dev_err(fifo->dt_device,
- "rxd_tdata_width width [%u] unsupported\n",
- rxd_tdata_width);
- rc = -EIO;
- goto err_unmap;
- }
- if (txd_tdata_width != 32) {
- dev_err(fifo->dt_device,
- "txd_tdata_width width [%u] unsupported\n",
- txd_tdata_width);
- rc = -EIO;
- goto err_unmap;
- }
- if (has_tdest) {
- dev_err(fifo->dt_device, "tdest not supported\n");
- rc = -EIO;
- goto err_unmap;
- }
- if (has_tid) {
- dev_err(fifo->dt_device, "tid not supported\n");
- rc = -EIO;
- goto err_unmap;
- }
- if (has_tkeep) {
- dev_err(fifo->dt_device, "tkeep not supported\n");
- rc = -EIO;
- goto err_unmap;
- }
- if (has_tstrb) {
- dev_err(fifo->dt_device, "tstrb not supported\n");
- rc = -EIO;
- goto err_unmap;
- }
- if (has_tuser) {
- dev_err(fifo->dt_device, "tuser not supported\n");
- rc = -EIO;
- goto err_unmap;
- }
- if (use_rx_cut_through) {
- dev_err(fifo->dt_device, "rx cut-through not supported\n");
- rc = -EIO;
- goto err_unmap;
- }
- if (use_tx_cut_through) {
- dev_err(fifo->dt_device, "tx cut-through not supported\n");
- rc = -EIO;
- goto err_unmap;
- }
- if (use_tx_control) {
- dev_err(fifo->dt_device, "tx control not supported\n");
- rc = -EIO;
- goto err_unmap;
- }
-
- /* TODO
- * these exist in the device tree but it's unclear what they do
- * - select-xpm
- * - data-interface-type
- */
-
- /* set device wrapper properties based on IP config */
- fifo->rx_fifo_depth = rx_fifo_depth;
- /* IP sets TDFV to fifo depth - 4 so we will do the same */
- fifo->tx_fifo_depth = tx_fifo_depth - 4;
- fifo->has_rx_fifo = use_rx_data;
- fifo->has_tx_fifo = use_tx_data;
+ goto err_initial;
reset_ip_core(fifo);
@@ -976,18 +841,19 @@ static int axis_fifo_probe(struct platform_device *pdev)
r_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!r_irq) {
dev_err(fifo->dt_device, "no IRQ found for 0x%pa\n",
- &fifo->mem->start);
+ &r_mem->start);
rc = -EIO;
- goto err_unmap;
+ goto err_initial;
}
/* request IRQ */
fifo->irq = r_irq->start;
- rc = request_irq(fifo->irq, &axis_fifo_irq, 0, DRIVER_NAME, fifo);
+ rc = devm_request_irq(fifo->dt_device, fifo->irq, &axis_fifo_irq, 0,
+ DRIVER_NAME, fifo);
if (rc) {
dev_err(fifo->dt_device, "couldn't allocate interrupt %i\n",
fifo->irq);
- goto err_unmap;
+ goto err_initial;
}
/* ----------------------------
@@ -998,7 +864,7 @@ static int axis_fifo_probe(struct platform_device *pdev)
/* allocate device number */
rc = alloc_chrdev_region(&fifo->devt, 0, 1, DRIVER_NAME);
if (rc < 0)
- goto err_irq;
+ goto err_initial;
dev_dbg(fifo->dt_device, "allocated device number major %i minor %i\n",
MAJOR(fifo->devt), MINOR(fifo->devt));
@@ -1022,14 +888,14 @@ static int axis_fifo_probe(struct platform_device *pdev)
}
/* create sysfs entries */
- rc = sysfs_create_group(&fifo->device->kobj, &axis_fifo_attrs_group);
+ rc = devm_device_add_group(fifo->device, &axis_fifo_attrs_group);
if (rc < 0) {
dev_err(fifo->dt_device, "couldn't register sysfs group\n");
goto err_cdev;
}
dev_info(fifo->dt_device, "axis-fifo created at %pa mapped to 0x%pa, irq=%i, major=%i, minor=%i\n",
- &fifo->mem->start, &fifo->base_addr, fifo->irq,
+ &r_mem->start, &fifo->base_addr, fifo->irq,
MAJOR(fifo->devt), MINOR(fifo->devt));
return 0;
@@ -1040,12 +906,6 @@ err_dev:
device_destroy(axis_fifo_driver_class, fifo->devt);
err_chrdev_region:
unregister_chrdev_region(fifo->devt, 1);
-err_irq:
- free_irq(fifo->irq, fifo);
-err_unmap:
- iounmap(fifo->base_addr);
-err_mem:
- release_mem_region(fifo->mem->start, resource_size(fifo->mem));
err_initial:
dev_set_drvdata(dev, NULL);
return rc;
@@ -1056,15 +916,12 @@ static int axis_fifo_remove(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct axis_fifo *fifo = dev_get_drvdata(dev);
- sysfs_remove_group(&fifo->device->kobj, &axis_fifo_attrs_group);
cdev_del(&fifo->char_device);
dev_set_drvdata(fifo->device, NULL);
device_destroy(axis_fifo_driver_class, fifo->devt);
unregister_chrdev_region(fifo->devt, 1);
- free_irq(fifo->irq, fifo);
- iounmap(fifo->base_addr);
- release_mem_region(fifo->mem->start, resource_size(fifo->mem));
dev_set_drvdata(dev, NULL);
+
return 0;
}
diff --git a/drivers/staging/axis-fifo/axis-fifo.txt b/drivers/staging/axis-fifo/axis-fifo.txt
index 85d88c010e72..5828e1b8e822 100644
--- a/drivers/staging/axis-fifo/axis-fifo.txt
+++ b/drivers/staging/axis-fifo/axis-fifo.txt
@@ -25,10 +25,10 @@ Required properties:
- xlnx,axi-str-txc-tdata-width: Should be <0x20>
- xlnx,axi-str-txd-protocol: Should be "XIL_AXI_STREAM_ETH_DATA"
- xlnx,axi-str-txd-tdata-width: Should be <0x20>
-- xlnx,axis-tdest-width: AXI-Stream TDEST width
-- xlnx,axis-tid-width: AXI-Stream TID width
-- xlnx,axis-tuser-width: AXI-Stream TUSER width
-- xlnx,data-interface-type: Should be <0x0>
+- xlnx,axis-tdest-width: AXI-Stream TDEST width (ignored by the driver)
+- xlnx,axis-tid-width: AXI-Stream TID width (ignored by the driver)
+- xlnx,axis-tuser-width: AXI-Stream TUSER width (ignored by the driver)
+- xlnx,data-interface-type: Should be <0x0> (ignored by the driver)
- xlnx,has-axis-tdest: Should be <0x0> (this feature isn't supported)
- xlnx,has-axis-tid: Should be <0x0> (this feature isn't supported)
- xlnx,has-axis-tkeep: Should be <0x0> (this feature isn't supported)
@@ -36,13 +36,17 @@ Required properties:
- xlnx,has-axis-tuser: Should be <0x0> (this feature isn't supported)
- xlnx,rx-fifo-depth: Depth of RX FIFO in words
- xlnx,rx-fifo-pe-threshold: RX programmable empty interrupt threshold
+ (ignored by the driver)
- xlnx,rx-fifo-pf-threshold: RX programmable full interrupt threshold
-- xlnx,s-axi-id-width: Should be <0x4>
-- xlnx,s-axi4-data-width: Should be <0x20>
-- xlnx,select-xpm: Should be <0x0>
+ (ignored by the driver)
+- xlnx,s-axi-id-width: Should be <0x4> (ignored by the driver)
+- xlnx,s-axi4-data-width: Should be <0x20> (ignored by the driver)
+- xlnx,select-xpm: Should be <0x0> (ignored by the driver)
- xlnx,tx-fifo-depth: Depth of TX FIFO in words
- xlnx,tx-fifo-pe-threshold: TX programmable empty interrupt threshold
+ (ignored by the driver)
- xlnx,tx-fifo-pf-threshold: TX programmable full interrupt threshold
+ (ignored by the driver)
- xlnx,use-rx-cut-through: Should be <0x0> (this feature isn't supported)
- xlnx,use-rx-data: <0x1> if RX FIFO is enabled, <0x0> otherwise
- xlnx,use-tx-ctrl: Should be <0x0> (this feature isn't supported)
diff --git a/drivers/staging/board/armadillo800eva.c b/drivers/staging/board/armadillo800eva.c
index 962cc0c79988..0225234dd7aa 100644
--- a/drivers/staging/board/armadillo800eva.c
+++ b/drivers/staging/board/armadillo800eva.c
@@ -50,16 +50,8 @@ static struct sh_mobile_lcdc_info lcdc0_info = {
};
static struct resource lcdc0_resources[] = {
- [0] = {
- .name = "LCD0",
- .start = 0xfe940000,
- .end = 0xfe943fff,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = 177 + 32,
- .flags = IORESOURCE_IRQ,
- },
+ DEFINE_RES_MEM_NAMED(0xfe940000, 0x4000, "LCD0"),
+ DEFINE_RES_IRQ(177 + 32),
};
static struct platform_device lcdc0_device = {
diff --git a/drivers/staging/clocking-wizard/clk-xlnx-clock-wizard.c b/drivers/staging/clocking-wizard/clk-xlnx-clock-wizard.c
index 15b7a82f4b1e..e52a64be93f3 100644
--- a/drivers/staging/clocking-wizard/clk-xlnx-clock-wizard.c
+++ b/drivers/staging/clocking-wizard/clk-xlnx-clock-wizard.c
@@ -135,7 +135,6 @@ static int clk_wzrd_probe(struct platform_device *pdev)
unsigned long rate;
const char *clk_name;
struct clk_wzrd *clk_wzrd;
- struct resource *mem;
struct device_node *np = pdev->dev.of_node;
clk_wzrd = devm_kzalloc(&pdev->dev, sizeof(*clk_wzrd), GFP_KERNEL);
@@ -143,8 +142,7 @@ static int clk_wzrd_probe(struct platform_device *pdev)
return -ENOMEM;
platform_set_drvdata(pdev, clk_wzrd);
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- clk_wzrd->base = devm_ioremap_resource(&pdev->dev, mem);
+ clk_wzrd->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(clk_wzrd->base))
return PTR_ERR(clk_wzrd->base);
diff --git a/drivers/staging/comedi/drivers/dt3000.c b/drivers/staging/comedi/drivers/dt3000.c
index caf4d4df4bd3..f7c365b70106 100644
--- a/drivers/staging/comedi/drivers/dt3000.c
+++ b/drivers/staging/comedi/drivers/dt3000.c
@@ -508,12 +508,11 @@ static int dt3k_ai_insn_read(struct comedi_device *dev,
unsigned int *data)
{
int i;
- unsigned int chan, gain, aref;
+ unsigned int chan, gain;
chan = CR_CHAN(insn->chanspec);
gain = CR_RANGE(insn->chanspec);
/* XXX docs don't explain how to select aref */
- aref = CR_AREF(insn->chanspec);
for (i = 0; i < insn->n; i++)
data[i] = dt3k_readsingle(dev, DPR_SUBSYS_AI, chan, gain);
diff --git a/drivers/staging/comedi/drivers/ni_routes.c b/drivers/staging/comedi/drivers/ni_routes.c
index eb61494dc2bd..673d732dcb8f 100644
--- a/drivers/staging/comedi/drivers/ni_routes.c
+++ b/drivers/staging/comedi/drivers/ni_routes.c
@@ -49,8 +49,6 @@
/* Helper for accessing data. */
#define RVi(table, src, dest) ((table)[(dest) * NI_NUM_NAMES + (src)])
-static const size_t route_table_size = NI_NUM_NAMES * NI_NUM_NAMES;
-
/*
* Find the proper route_values and ni_device_routes tables for this particular
* device.
diff --git a/drivers/staging/comedi/drivers/usbduxfast.c b/drivers/staging/comedi/drivers/usbduxfast.c
index 04bc488385e6..4af012968cb6 100644
--- a/drivers/staging/comedi/drivers/usbduxfast.c
+++ b/drivers/staging/comedi/drivers/usbduxfast.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0+
/*
- * Copyright (C) 2004-2014 Bernd Porr, mail@berndporr.me.uk
+ * Copyright (C) 2004-2019 Bernd Porr, mail@berndporr.me.uk
*/
/*
@@ -8,7 +8,7 @@
* Description: University of Stirling USB DAQ & INCITE Technology Limited
* Devices: [ITL] USB-DUX-FAST (usbduxfast)
* Author: Bernd Porr <mail@berndporr.me.uk>
- * Updated: 10 Oct 2014
+ * Updated: 16 Nov 2019
* Status: stable
*/
@@ -22,6 +22,7 @@
*
*
* Revision history:
+ * 1.0: Fixed a rounding error in usbduxfast_ai_cmdtest
* 0.9: Dropping the first data packet which seems to be from the last transfer.
* Buffer overflows in the FX2 are handed over to comedi.
* 0.92: Dropping now 4 packets. The quad buffer has to be emptied.
@@ -350,6 +351,7 @@ static int usbduxfast_ai_cmdtest(struct comedi_device *dev,
struct comedi_cmd *cmd)
{
int err = 0;
+ int err2 = 0;
unsigned int steps;
unsigned int arg;
@@ -399,11 +401,16 @@ static int usbduxfast_ai_cmdtest(struct comedi_device *dev,
*/
steps = (cmd->convert_arg * 30) / 1000;
if (cmd->chanlist_len != 1)
- err |= comedi_check_trigger_arg_min(&steps,
- MIN_SAMPLING_PERIOD);
- err |= comedi_check_trigger_arg_max(&steps, MAX_SAMPLING_PERIOD);
- arg = (steps * 1000) / 30;
- err |= comedi_check_trigger_arg_is(&cmd->convert_arg, arg);
+ err2 |= comedi_check_trigger_arg_min(&steps,
+ MIN_SAMPLING_PERIOD);
+ else
+ err2 |= comedi_check_trigger_arg_min(&steps, 1);
+ err2 |= comedi_check_trigger_arg_max(&steps, MAX_SAMPLING_PERIOD);
+ if (err2) {
+ err |= err2;
+ arg = (steps * 1000) / 30;
+ err |= comedi_check_trigger_arg_is(&cmd->convert_arg, arg);
+ }
if (cmd->stop_src == TRIG_COUNT)
err |= comedi_check_trigger_arg_min(&cmd->stop_arg, 1);
diff --git a/drivers/staging/emxx_udc/emxx_udc.c b/drivers/staging/emxx_udc/emxx_udc.c
index 147481bf680c..03929b9d3a8b 100644
--- a/drivers/staging/emxx_udc/emxx_udc.c
+++ b/drivers/staging/emxx_udc/emxx_udc.c
@@ -1072,9 +1072,8 @@ static int _nbu2ss_epn_in_pio(struct nbu2ss_udc *udc, struct nbu2ss_ep *ep,
if (i_word_length > 0) {
for (i = 0; i < i_word_length; i++) {
_nbu2ss_writel(
- &preg->EP_REGS[ep->epnum - 1].EP_WRITE
- , p_buf_32->dw
- );
+ &preg->EP_REGS[ep->epnum - 1].EP_WRITE,
+ p_buf_32->dw);
p_buf_32++;
}
@@ -2661,20 +2660,18 @@ static int nbu2ss_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
/* make sure it's actually queued on this endpoint */
list_for_each_entry(req, &ep->queue, queue) {
- if (&req->req == _req)
- break;
- }
- if (&req->req != _req) {
- spin_unlock_irqrestore(&udc->lock, flags);
- pr_debug("%s no queue(EINVAL)\n", __func__);
- return -EINVAL;
+ if (&req->req == _req) {
+ _nbu2ss_ep_done(ep, req, -ECONNRESET);
+ spin_unlock_irqrestore(&udc->lock, flags);
+ return 0;
+ }
}
- _nbu2ss_ep_done(ep, req, -ECONNRESET);
-
spin_unlock_irqrestore(&udc->lock, flags);
- return 0;
+ pr_debug("%s no queue(EINVAL)\n", __func__);
+
+ return -EINVAL;
}
/*-------------------------------------------------------------------------*/
@@ -3078,7 +3075,6 @@ static int nbu2ss_drv_probe(struct platform_device *pdev)
{
int status = -ENODEV;
struct nbu2ss_udc *udc;
- struct resource *r;
int irq;
void __iomem *mmio_base;
@@ -3088,8 +3084,7 @@ static int nbu2ss_drv_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, udc);
/* require I/O memory and IRQ to be provided as resources */
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mmio_base = devm_ioremap_resource(&pdev->dev, r);
+ mmio_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mmio_base))
return PTR_ERR(mmio_base);
diff --git a/drivers/staging/exfat/Kconfig b/drivers/staging/exfat/Kconfig
index ce32dfe33bec..0130019cbec2 100644
--- a/drivers/staging/exfat/Kconfig
+++ b/drivers/staging/exfat/Kconfig
@@ -6,15 +6,6 @@ config EXFAT_FS
help
This adds support for the exFAT file system.
-config EXFAT_DONT_MOUNT_VFAT
- bool "Prohibit mounting of fat/vfat filesystems by exFAT"
- depends on EXFAT_FS
- default y
- help
- By default, the exFAT driver will only mount exFAT filesystems, and refuse
- to mount fat/vfat filesystems. Set this to 'n' to allow the exFAT driver
- to mount these filesystems.
-
config EXFAT_DISCARD
bool "enable discard support"
depends on EXFAT_FS
diff --git a/drivers/staging/exfat/TODO b/drivers/staging/exfat/TODO
index a3eb282f9efc..a283ce534cf4 100644
--- a/drivers/staging/exfat/TODO
+++ b/drivers/staging/exfat/TODO
@@ -1,8 +1,22 @@
+A laundry list of things that need looking at, most of which will
+require more work than the average checkpatch cleanup...
+
+Note that some of these entries may not be bugs - they're things
+that need to be looked at, and *possibly* fixed.
+
+Clean up the ffsCamelCase function names.
+
+Fix (thing)->flags to not use magic numbers - multiple offenders
+
+Sort out all the s32/u32/u8 nonsense - most of these should be plain int.
+
exfat_core.c - ffsReadFile - the goto err_out seem to leak a brelse().
same for ffsWriteFile.
-exfat_core.c - fs_sync(sb,0) all over the place looks fishy as hell.
-There's only one place that calls it with a non-zero argument.
+All the calls to fs_sync() need to be looked at, particularly in the
+context of EXFAT_DELAYED_SYNC. Currently, if that's defined, we only
+flush to disk when sync() gets called. We should be doing at least
+metadata flushes at appropriate times.
ffsTruncateFile - if (old_size <= new_size) {
That doesn't look right. How did it ever work? Are they relying on lazy
@@ -10,3 +24,46 @@ block allocation when actual writes happen? If nothing else, it never
does the 'fid->size = new_size' and do the inode update....
ffsSetAttr() is just dangling in the breeze, not wired up at all...
+
+Convert global mutexes to a per-superblock mutex.
+
+Right now, we load exactly one UTF-8 table. Check to see
+if that plays nice with different codepage and iocharset values
+for simultanous mounts of different devices
+
+exfat_rmdir() checks for -EBUSY but ffsRemoveDir() doesn't return it.
+In fact, there's a complete lack of -EBUSY testing anywhere.
+
+There's probably a few missing checks for -EEXIST
+
+check return codes of sync_dirty_buffer()
+
+Why is remove_file doing a num_entries++??
+
+Double check a lot of can't-happen parameter checks (for null pointers for
+things that have only one call site and can't pass a null, etc).
+
+All the DEBUG stuff can probably be tossed, including the ioctl(). Either
+that, or convert to a proper fault-injection system.
+
+exfat_remount does exactly one thing. Fix to actually deal with remount
+options, particularly handling R/O correctly. For that matter, allow
+R/O mounts in the first place.
+
+Figure out why the VFAT code used multi_sector_(read|write) but the
+exfat code doesn't use it. The difference matters on SSDs with wear leveling.
+
+exfat_fat_sync(), exfat_buf_sync(), and sync_alloc_bitmap()
+aren't called anyplace....
+
+Create helper function for exfat_set_entry_time() and exfat_set_entry_type()
+because it's sort of ugly to be calling the same functionn directly and
+other code calling through the fs_func struc ponters...
+
+clean up the remaining vol_type checks, which are of two types:
+some are ?: operators with magic numbers, and the rest are places
+where we're doing stuff with '.' and '..'.
+
+Patches to:
+ Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+ Valdis Kletnieks <valdis.kletnieks@vt.edu>
diff --git a/drivers/staging/exfat/exfat.h b/drivers/staging/exfat/exfat.h
index 3abab33e932c..2aac1e000977 100644
--- a/drivers/staging/exfat/exfat.h
+++ b/drivers/staging/exfat/exfat.h
@@ -30,6 +30,8 @@
#undef DEBUG
#endif
+#define EFSCORRUPTED EUCLEAN /* Filesystem is corrupted */
+
#define DENTRY_SIZE 32 /* dir entry size */
#define DENTRY_SIZE_BITS 5
@@ -206,28 +208,6 @@ static inline u16 get_row_index(u16 i)
#define FM_REGULAR 0x00
#define FM_SYMLINK 0x40
-/* return values */
-#define FFS_SUCCESS 0
-#define FFS_MEDIAERR 1
-#define FFS_FORMATERR 2
-#define FFS_MOUNTED 3
-#define FFS_NOTMOUNTED 4
-#define FFS_ALIGNMENTERR 5
-#define FFS_SEMAPHOREERR 6
-#define FFS_INVALIDPATH 7
-#define FFS_INVALIDFID 8
-#define FFS_NOTFOUND 9
-#define FFS_FILEEXIST 10
-#define FFS_PERMISSIONERR 11
-#define FFS_NOTOPENED 12
-#define FFS_MAXOPENED 13
-#define FFS_FULL 14
-#define FFS_EOF 15
-#define FFS_DIRBUSY 16
-#define FFS_MEMORYERR 17
-#define FFS_NAMETOOLONG 18
-#define FFS_ERROR 19
-
#define NUM_UPCASE 2918
#define DOS_CUR_DIR_NAME ". "
@@ -618,7 +598,7 @@ struct fs_info_t {
u32 dev_ejected; /* block device operation error flag */
struct fs_func *fs_func;
- struct semaphore v_sem;
+ struct mutex v_mutex;
/* FAT cache */
struct buf_cache_t FAT_cache_array[FAT_CACHE_SIZE];
@@ -749,14 +729,7 @@ static inline struct exfat_inode_info *EXFAT_I(struct inode *inode)
/* NLS management function */
u16 nls_upper(struct super_block *sb, u16 a);
-int nls_dosname_cmp(struct super_block *sb, u8 *a, u8 *b);
int nls_uniname_cmp(struct super_block *sb, u16 *a, u16 *b);
-void nls_uniname_to_dosname(struct super_block *sb,
- struct dos_name_t *p_dosname,
- struct uni_name_t *p_uniname, bool *p_lossy);
-void nls_dosname_to_uniname(struct super_block *sb,
- struct uni_name_t *p_uniname,
- struct dos_name_t *p_dosname);
void nls_uniname_to_cstring(struct super_block *sb, u8 *p_cstring,
struct uni_name_t *p_uniname);
void nls_cstring_to_uniname(struct super_block *sb,
@@ -764,48 +737,33 @@ void nls_cstring_to_uniname(struct super_block *sb,
bool *p_lossy);
/* buffer cache management */
-void buf_init(struct super_block *sb);
-void buf_shutdown(struct super_block *sb);
-int FAT_read(struct super_block *sb, u32 loc, u32 *content);
-s32 FAT_write(struct super_block *sb, u32 loc, u32 content);
-u8 *FAT_getblk(struct super_block *sb, sector_t sec);
-void FAT_modify(struct super_block *sb, sector_t sec);
-void FAT_release_all(struct super_block *sb);
-void FAT_sync(struct super_block *sb);
-u8 *buf_getblk(struct super_block *sb, sector_t sec);
-void buf_modify(struct super_block *sb, sector_t sec);
-void buf_lock(struct super_block *sb, sector_t sec);
-void buf_unlock(struct super_block *sb, sector_t sec);
-void buf_release(struct super_block *sb, sector_t sec);
-void buf_release_all(struct super_block *sb);
-void buf_sync(struct super_block *sb);
+void exfat_buf_init(struct super_block *sb);
+void exfat_buf_shutdown(struct super_block *sb);
+int exfat_fat_read(struct super_block *sb, u32 loc, u32 *content);
+s32 exfat_fat_write(struct super_block *sb, u32 loc, u32 content);
+u8 *exfat_fat_getblk(struct super_block *sb, sector_t sec);
+void exfat_fat_modify(struct super_block *sb, sector_t sec);
+void exfat_fat_release_all(struct super_block *sb);
+void exfat_fat_sync(struct super_block *sb);
+u8 *exfat_buf_getblk(struct super_block *sb, sector_t sec);
+void exfat_buf_modify(struct super_block *sb, sector_t sec);
+void exfat_buf_lock(struct super_block *sb, sector_t sec);
+void exfat_buf_unlock(struct super_block *sb, sector_t sec);
+void exfat_buf_release(struct super_block *sb, sector_t sec);
+void exfat_buf_release_all(struct super_block *sb);
+void exfat_buf_sync(struct super_block *sb);
/* fs management functions */
void fs_set_vol_flags(struct super_block *sb, u32 new_flag);
void fs_error(struct super_block *sb);
/* cluster management functions */
-s32 clear_cluster(struct super_block *sb, u32 clu);
-s32 fat_alloc_cluster(struct super_block *sb, s32 num_alloc,
- struct chain_t *p_chain);
-s32 exfat_alloc_cluster(struct super_block *sb, s32 num_alloc,
- struct chain_t *p_chain);
-void fat_free_cluster(struct super_block *sb, struct chain_t *p_chain,
- s32 do_relse);
-void exfat_free_cluster(struct super_block *sb, struct chain_t *p_chain,
- s32 do_relse);
-u32 find_last_cluster(struct super_block *sb, struct chain_t *p_chain);
s32 count_num_clusters(struct super_block *sb, struct chain_t *dir);
-s32 fat_count_used_clusters(struct super_block *sb);
-s32 exfat_count_used_clusters(struct super_block *sb);
void exfat_chain_cont_cluster(struct super_block *sb, u32 chain, s32 len);
/* allocation bitmap management functions */
s32 load_alloc_bitmap(struct super_block *sb);
void free_alloc_bitmap(struct super_block *sb);
-s32 set_alloc_bitmap(struct super_block *sb, u32 clu);
-s32 clr_alloc_bitmap(struct super_block *sb, u32 clu);
-u32 test_alloc_bitmap(struct super_block *sb, u32 clu);
void sync_alloc_bitmap(struct super_block *sb);
/* upcase table management functions */
@@ -813,63 +771,8 @@ s32 load_upcase_table(struct super_block *sb);
void free_upcase_table(struct super_block *sb);
/* dir entry management functions */
-u32 fat_get_entry_type(struct dentry_t *p_entry);
-u32 exfat_get_entry_type(struct dentry_t *p_entry);
-void fat_set_entry_type(struct dentry_t *p_entry, u32 type);
-void exfat_set_entry_type(struct dentry_t *p_entry, u32 type);
-u32 fat_get_entry_attr(struct dentry_t *p_entry);
-u32 exfat_get_entry_attr(struct dentry_t *p_entry);
-void fat_set_entry_attr(struct dentry_t *p_entry, u32 attr);
-void exfat_set_entry_attr(struct dentry_t *p_entry, u32 attr);
-u8 fat_get_entry_flag(struct dentry_t *p_entry);
-u8 exfat_get_entry_flag(struct dentry_t *p_entry);
-void fat_set_entry_flag(struct dentry_t *p_entry, u8 flag);
-void exfat_set_entry_flag(struct dentry_t *p_entry, u8 flag);
-u32 fat_get_entry_clu0(struct dentry_t *p_entry);
-u32 exfat_get_entry_clu0(struct dentry_t *p_entry);
-void fat_set_entry_clu0(struct dentry_t *p_entry, u32 start_clu);
-void exfat_set_entry_clu0(struct dentry_t *p_entry, u32 start_clu);
-u64 fat_get_entry_size(struct dentry_t *p_entry);
-u64 exfat_get_entry_size(struct dentry_t *p_entry);
-void fat_set_entry_size(struct dentry_t *p_entry, u64 size);
-void exfat_set_entry_size(struct dentry_t *p_entry, u64 size);
struct timestamp_t *tm_current(struct timestamp_t *tm);
-void fat_get_entry_time(struct dentry_t *p_entry, struct timestamp_t *tp,
- u8 mode);
-void exfat_get_entry_time(struct dentry_t *p_entry, struct timestamp_t *tp,
- u8 mode);
-void fat_set_entry_time(struct dentry_t *p_entry, struct timestamp_t *tp,
- u8 mode);
-void exfat_set_entry_time(struct dentry_t *p_entry, struct timestamp_t *tp,
- u8 mode);
-s32 fat_init_dir_entry(struct super_block *sb, struct chain_t *p_dir, s32 entry,
- u32 type, u32 start_clu, u64 size);
-s32 exfat_init_dir_entry(struct super_block *sb, struct chain_t *p_dir,
- s32 entry, u32 type, u32 start_clu, u64 size);
-s32 fat_init_ext_dir_entry(struct super_block *sb, struct chain_t *p_dir,
- s32 entry, s32 num_entries,
- struct uni_name_t *p_uniname,
- struct dos_name_t *p_dosname);
-s32 exfat_init_ext_dir_entry(struct super_block *sb, struct chain_t *p_dir,
- s32 entry, s32 num_entries,
- struct uni_name_t *p_uniname,
- struct dos_name_t *p_dosname);
-void init_dos_entry(struct dos_dentry_t *ep, u32 type, u32 start_clu);
-void init_ext_entry(struct ext_dentry_t *ep, s32 order, u8 chksum,
- u16 *uniname);
-void init_file_entry(struct file_dentry_t *ep, u32 type);
-void init_strm_entry(struct strm_dentry_t *ep, u8 flags, u32 start_clu,
- u64 size);
-void init_name_entry(struct name_dentry_t *ep, u16 *uniname);
-void fat_delete_dir_entry(struct super_block *sb, struct chain_t *p_dir,
- s32 entry, s32 order, s32 num_entries);
-void exfat_delete_dir_entry(struct super_block *sb, struct chain_t *p_dir,
- s32 entry, s32 order, s32 num_entries);
-
-s32 find_location(struct super_block *sb, struct chain_t *p_dir, s32 entry,
- sector_t *sector, s32 *offset);
-struct dentry_t *get_entry_with_sector(struct super_block *sb, sector_t sector,
- s32 offset);
+
struct dentry_t *get_entry_in_dir(struct super_block *sb, struct chain_t *p_dir,
s32 entry, sector_t *sector);
struct entry_set_cache_t *get_entry_set_in_dir(struct super_block *sb,
@@ -877,24 +780,6 @@ struct entry_set_cache_t *get_entry_set_in_dir(struct super_block *sb,
u32 type,
struct dentry_t **file_ep);
void release_entry_set(struct entry_set_cache_t *es);
-s32 write_whole_entry_set(struct super_block *sb, struct entry_set_cache_t *es);
-s32 write_partial_entries_in_entry_set(struct super_block *sb,
- struct entry_set_cache_t *es,
- struct dentry_t *ep, u32 count);
-s32 search_deleted_or_unused_entry(struct super_block *sb,
- struct chain_t *p_dir, s32 num_entries);
-s32 find_empty_entry(struct inode *inode, struct chain_t *p_dir,
- s32 num_entries);
-s32 fat_find_dir_entry(struct super_block *sb, struct chain_t *p_dir,
- struct uni_name_t *p_uniname, s32 num_entries,
- struct dos_name_t *p_dosname, u32 type);
-s32 exfat_find_dir_entry(struct super_block *sb, struct chain_t *p_dir,
- struct uni_name_t *p_uniname, s32 num_entries,
- struct dos_name_t *p_dosname, u32 type);
-s32 fat_count_ext_entries(struct super_block *sb, struct chain_t *p_dir,
- s32 entry, struct dentry_t *p_entry);
-s32 exfat_count_ext_entries(struct super_block *sb, struct chain_t *p_dir,
- s32 entry, struct dentry_t *p_entry);
s32 count_dos_name_entries(struct super_block *sb, struct chain_t *p_dir,
u32 type);
void update_dir_checksum(struct super_block *sb, struct chain_t *p_dir,
@@ -907,36 +792,13 @@ bool is_dir_empty(struct super_block *sb, struct chain_t *p_dir);
s32 get_num_entries_and_dos_name(struct super_block *sb, struct chain_t *p_dir,
struct uni_name_t *p_uniname, s32 *entries,
struct dos_name_t *p_dosname);
-void get_uni_name_from_dos_entry(struct super_block *sb,
- struct dos_dentry_t *ep,
- struct uni_name_t *p_uniname, u8 mode);
-void fat_get_uni_name_from_ext_entry(struct super_block *sb,
- struct chain_t *p_dir, s32 entry,
- u16 *uniname);
-void exfat_get_uni_name_from_ext_entry(struct super_block *sb,
- struct chain_t *p_dir, s32 entry,
- u16 *uniname);
-s32 extract_uni_name_from_ext_entry(struct ext_dentry_t *ep,
- u16 *uniname, s32 order);
-s32 extract_uni_name_from_name_entry(struct name_dentry_t *ep,
- u16 *uniname, s32 order);
-s32 fat_generate_dos_name(struct super_block *sb, struct chain_t *p_dir,
- struct dos_name_t *p_dosname);
-void fat_attach_count_to_dos_name(u8 *dosname, s32 count);
-s32 fat_calc_num_entries(struct uni_name_t *p_uniname);
-s32 exfat_calc_num_entries(struct uni_name_t *p_uniname);
-u8 calc_checksum_1byte(void *data, s32 len, u8 chksum);
u16 calc_checksum_2byte(void *data, s32 len, u16 chksum, s32 type);
-u32 calc_checksum_4byte(void *data, s32 len, u32 chksum, s32 type);
/* name resolution functions */
s32 resolve_path(struct inode *inode, char *path, struct chain_t *p_dir,
struct uni_name_t *p_uniname);
-s32 resolve_name(u8 *name, u8 **arg);
/* file operation functions */
-s32 fat16_mount(struct super_block *sb, struct pbr_sector_t *p_pbr);
-s32 fat32_mount(struct super_block *sb, struct pbr_sector_t *p_pbr);
s32 exfat_mount(struct super_block *sb, struct pbr_sector_t *p_pbr);
s32 create_dir(struct inode *inode, struct chain_t *p_dir,
struct uni_name_t *p_uniname, struct file_id_t *fid);
@@ -959,13 +821,13 @@ int multi_sector_read(struct super_block *sb, sector_t sec,
int multi_sector_write(struct super_block *sb, sector_t sec,
struct buffer_head *bh, s32 num_secs, bool sync);
-void bdev_open(struct super_block *sb);
-void bdev_close(struct super_block *sb);
-int bdev_read(struct super_block *sb, sector_t secno,
+void exfat_bdev_open(struct super_block *sb);
+void exfat_bdev_close(struct super_block *sb);
+int exfat_bdev_read(struct super_block *sb, sector_t secno,
struct buffer_head **bh, u32 num_secs, bool read);
-int bdev_write(struct super_block *sb, sector_t secno,
+int exfat_bdev_write(struct super_block *sb, sector_t secno,
struct buffer_head *bh, u32 num_secs, bool sync);
-int bdev_sync(struct super_block *sb);
+int exfat_bdev_sync(struct super_block *sb);
extern const u8 uni_upcase[];
#endif /* _EXFAT_H */
diff --git a/drivers/staging/exfat/exfat_blkdev.c b/drivers/staging/exfat/exfat_blkdev.c
index 81d20e6241c6..7bcd98b13109 100644
--- a/drivers/staging/exfat/exfat_blkdev.c
+++ b/drivers/staging/exfat/exfat_blkdev.c
@@ -8,7 +8,7 @@
#include <linux/fs.h>
#include "exfat.h"
-void bdev_open(struct super_block *sb)
+void exfat_bdev_open(struct super_block *sb)
{
struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
@@ -23,14 +23,14 @@ void bdev_open(struct super_block *sb)
p_bd->opened = true;
}
-void bdev_close(struct super_block *sb)
+void exfat_bdev_close(struct super_block *sb)
{
struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
p_bd->opened = false;
}
-int bdev_read(struct super_block *sb, sector_t secno, struct buffer_head **bh,
+int exfat_bdev_read(struct super_block *sb, sector_t secno, struct buffer_head **bh,
u32 num_secs, bool read)
{
struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
@@ -40,11 +40,11 @@ int bdev_read(struct super_block *sb, sector_t secno, struct buffer_head **bh,
long flags = sbi->debug_flags;
if (flags & EXFAT_DEBUGFLAGS_ERROR_RW)
- return FFS_MEDIAERR;
+ return -EIO;
#endif /* CONFIG_EXFAT_KERNEL_DEBUG */
if (!p_bd->opened)
- return FFS_MEDIAERR;
+ return -ENODEV;
if (*bh)
__brelse(*bh);
@@ -62,10 +62,10 @@ int bdev_read(struct super_block *sb, sector_t secno, struct buffer_head **bh,
WARN(!p_fs->dev_ejected,
"[EXFAT] No bh, device seems wrong or to be ejected.\n");
- return FFS_MEDIAERR;
+ return -EIO;
}
-int bdev_write(struct super_block *sb, sector_t secno, struct buffer_head *bh,
+int exfat_bdev_write(struct super_block *sb, sector_t secno, struct buffer_head *bh,
u32 num_secs, bool sync)
{
s32 count;
@@ -77,11 +77,11 @@ int bdev_write(struct super_block *sb, sector_t secno, struct buffer_head *bh,
long flags = sbi->debug_flags;
if (flags & EXFAT_DEBUGFLAGS_ERROR_RW)
- return FFS_MEDIAERR;
+ return -EIO;
#endif /* CONFIG_EXFAT_KERNEL_DEBUG */
if (!p_bd->opened)
- return FFS_MEDIAERR;
+ return -ENODEV;
if (secno == bh->b_blocknr) {
lock_buffer(bh);
@@ -89,7 +89,7 @@ int bdev_write(struct super_block *sb, sector_t secno, struct buffer_head *bh,
mark_buffer_dirty(bh);
unlock_buffer(bh);
if (sync && (sync_dirty_buffer(bh) != 0))
- return FFS_MEDIAERR;
+ return -EIO;
} else {
count = num_secs << p_bd->sector_size_bits;
@@ -115,10 +115,10 @@ no_bh:
WARN(!p_fs->dev_ejected,
"[EXFAT] No bh, device seems wrong or to be ejected.\n");
- return FFS_MEDIAERR;
+ return -EIO;
}
-int bdev_sync(struct super_block *sb)
+int exfat_bdev_sync(struct super_block *sb)
{
struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
#ifdef CONFIG_EXFAT_KERNEL_DEBUG
@@ -126,11 +126,11 @@ int bdev_sync(struct super_block *sb)
long flags = sbi->debug_flags;
if (flags & EXFAT_DEBUGFLAGS_ERROR_RW)
- return FFS_MEDIAERR;
+ return -EIO;
#endif /* CONFIG_EXFAT_KERNEL_DEBUG */
if (!p_bd->opened)
- return FFS_MEDIAERR;
+ return -ENODEV;
return sync_blockdev(sb->s_bdev);
}
diff --git a/drivers/staging/exfat/exfat_cache.c b/drivers/staging/exfat/exfat_cache.c
index e1b001718709..3fd5604058a9 100644
--- a/drivers/staging/exfat/exfat_cache.c
+++ b/drivers/staging/exfat/exfat_cache.c
@@ -12,8 +12,8 @@
#define DIRTYBIT 0x02
/* Local variables */
-static DEFINE_SEMAPHORE(f_sem);
-static DEFINE_SEMAPHORE(b_sem);
+static DEFINE_MUTEX(f_mutex);
+static DEFINE_MUTEX(b_mutex);
static struct buf_cache_t *FAT_cache_find(struct super_block *sb, sector_t sec)
{
@@ -128,7 +128,7 @@ static void buf_cache_remove_hash(struct buf_cache_t *bp)
(bp->hash_next)->hash_prev = bp->hash_prev;
}
-void buf_init(struct super_block *sb)
+void exfat_buf_init(struct super_block *sb)
{
struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
@@ -189,11 +189,11 @@ void buf_init(struct super_block *sb)
buf_cache_insert_hash(sb, &p_fs->buf_cache_array[i]);
}
-void buf_shutdown(struct super_block *sb)
+void exfat_buf_shutdown(struct super_block *sb)
{
}
-static int __FAT_read(struct super_block *sb, u32 loc, u32 *content)
+static int __exfat_fat_read(struct super_block *sb, u32 loc, u32 *content)
{
s32 off;
u32 _content;
@@ -202,107 +202,22 @@ static int __FAT_read(struct super_block *sb, u32 loc, u32 *content)
struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
- if (p_fs->vol_type == FAT12) {
- sec = p_fs->FAT1_start_sector +
- ((loc + (loc >> 1)) >> p_bd->sector_size_bits);
- off = (loc + (loc >> 1)) & p_bd->sector_size_mask;
+ sec = p_fs->FAT1_start_sector +
+ (loc >> (p_bd->sector_size_bits - 2));
+ off = (loc << 2) & p_bd->sector_size_mask;
- if (off == (p_bd->sector_size - 1)) {
- fat_sector = FAT_getblk(sb, sec);
- if (!fat_sector)
- return -1;
+ fat_sector = exfat_fat_getblk(sb, sec);
+ if (!fat_sector)
+ return -1;
- _content = (u32)fat_sector[off];
+ fat_entry = &fat_sector[off];
+ _content = GET32_A(fat_entry);
- fat_sector = FAT_getblk(sb, ++sec);
- if (!fat_sector)
- return -1;
-
- _content |= (u32)fat_sector[0] << 8;
- } else {
- fat_sector = FAT_getblk(sb, sec);
- if (!fat_sector)
- return -1;
-
- fat_entry = &fat_sector[off];
- _content = GET16(fat_entry);
- }
-
- if (loc & 1)
- _content >>= 4;
-
- _content &= 0x00000FFF;
-
- if (_content >= CLUSTER_16(0x0FF8)) {
- *content = CLUSTER_32(~0);
- return 0;
- }
- *content = CLUSTER_32(_content);
- return 0;
- } else if (p_fs->vol_type == FAT16) {
- sec = p_fs->FAT1_start_sector +
- (loc >> (p_bd->sector_size_bits - 1));
- off = (loc << 1) & p_bd->sector_size_mask;
-
- fat_sector = FAT_getblk(sb, sec);
- if (!fat_sector)
- return -1;
-
- fat_entry = &fat_sector[off];
-
- _content = GET16_A(fat_entry);
-
- _content &= 0x0000FFFF;
-
- if (_content >= CLUSTER_16(0xFFF8)) {
- *content = CLUSTER_32(~0);
- return 0;
- }
- *content = CLUSTER_32(_content);
- return 0;
- } else if (p_fs->vol_type == FAT32) {
- sec = p_fs->FAT1_start_sector +
- (loc >> (p_bd->sector_size_bits - 2));
- off = (loc << 2) & p_bd->sector_size_mask;
-
- fat_sector = FAT_getblk(sb, sec);
- if (!fat_sector)
- return -1;
-
- fat_entry = &fat_sector[off];
-
- _content = GET32_A(fat_entry);
-
- _content &= 0x0FFFFFFF;
-
- if (_content >= CLUSTER_32(0x0FFFFFF8)) {
- *content = CLUSTER_32(~0);
- return 0;
- }
- *content = CLUSTER_32(_content);
- return 0;
- } else if (p_fs->vol_type == EXFAT) {
- sec = p_fs->FAT1_start_sector +
- (loc >> (p_bd->sector_size_bits - 2));
- off = (loc << 2) & p_bd->sector_size_mask;
-
- fat_sector = FAT_getblk(sb, sec);
- if (!fat_sector)
- return -1;
-
- fat_entry = &fat_sector[off];
- _content = GET32_A(fat_entry);
-
- if (_content >= CLUSTER_32(0xFFFFFFF8)) {
- *content = CLUSTER_32(~0);
- return 0;
- }
- *content = CLUSTER_32(_content);
+ if (_content >= CLUSTER_32(0xFFFFFFF8)) {
+ *content = CLUSTER_32(~0);
return 0;
}
-
- /* Unknown volume type, throw in the towel and go home */
- *content = CLUSTER_32(~0);
+ *content = CLUSTER_32(_content);
return 0;
}
@@ -311,18 +226,18 @@ static int __FAT_read(struct super_block *sb, u32 loc, u32 *content)
* returns 0 on success
* -1 on error
*/
-int FAT_read(struct super_block *sb, u32 loc, u32 *content)
+int exfat_fat_read(struct super_block *sb, u32 loc, u32 *content)
{
s32 ret;
- down(&f_sem);
- ret = __FAT_read(sb, loc, content);
- up(&f_sem);
+ mutex_lock(&f_mutex);
+ ret = __exfat_fat_read(sb, loc, content);
+ mutex_unlock(&f_mutex);
return ret;
}
-static s32 __FAT_write(struct super_block *sb, u32 loc, u32 content)
+static s32 __exfat_fat_write(struct super_block *sb, u32 loc, u32 content)
{
s32 off;
sector_t sec;
@@ -330,118 +245,34 @@ static s32 __FAT_write(struct super_block *sb, u32 loc, u32 content)
struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
- if (p_fs->vol_type == FAT12) {
- content &= 0x00000FFF;
-
- sec = p_fs->FAT1_start_sector +
- ((loc + (loc >> 1)) >> p_bd->sector_size_bits);
- off = (loc + (loc >> 1)) & p_bd->sector_size_mask;
-
- fat_sector = FAT_getblk(sb, sec);
- if (!fat_sector)
- return -1;
-
- if (loc & 1) { /* odd */
- content <<= 4;
-
- if (off == (p_bd->sector_size - 1)) {
- fat_sector[off] = (u8)(content |
- (fat_sector[off] &
- 0x0F));
- FAT_modify(sb, sec);
-
- fat_sector = FAT_getblk(sb, ++sec);
- if (!fat_sector)
- return -1;
-
- fat_sector[0] = (u8)(content >> 8);
- } else {
- fat_entry = &fat_sector[off];
- content |= GET16(fat_entry) & 0x000F;
-
- SET16(fat_entry, content);
- }
- } else { /* even */
- fat_sector[off] = (u8)(content);
-
- if (off == (p_bd->sector_size - 1)) {
- fat_sector[off] = (u8)(content);
- FAT_modify(sb, sec);
-
- fat_sector = FAT_getblk(sb, ++sec);
- if (!fat_sector)
- return -1;
- fat_sector[0] = (u8)((fat_sector[0] & 0xF0) |
- (content >> 8));
- } else {
- fat_entry = &fat_sector[off];
- content |= GET16(fat_entry) & 0xF000;
-
- SET16(fat_entry, content);
- }
- }
- }
-
- else if (p_fs->vol_type == FAT16) {
- content &= 0x0000FFFF;
-
- sec = p_fs->FAT1_start_sector + (loc >>
- (p_bd->sector_size_bits - 1));
- off = (loc << 1) & p_bd->sector_size_mask;
-
- fat_sector = FAT_getblk(sb, sec);
- if (!fat_sector)
- return -1;
-
- fat_entry = &fat_sector[off];
-
- SET16_A(fat_entry, content);
- } else if (p_fs->vol_type == FAT32) {
- content &= 0x0FFFFFFF;
+ sec = p_fs->FAT1_start_sector + (loc >>
+ (p_bd->sector_size_bits - 2));
+ off = (loc << 2) & p_bd->sector_size_mask;
- sec = p_fs->FAT1_start_sector + (loc >>
- (p_bd->sector_size_bits - 2));
- off = (loc << 2) & p_bd->sector_size_mask;
+ fat_sector = exfat_fat_getblk(sb, sec);
+ if (!fat_sector)
+ return -1;
- fat_sector = FAT_getblk(sb, sec);
- if (!fat_sector)
- return -1;
+ fat_entry = &fat_sector[off];
- fat_entry = &fat_sector[off];
-
- content |= GET32_A(fat_entry) & 0xF0000000;
-
- SET32_A(fat_entry, content);
- } else { /* p_fs->vol_type == EXFAT */
- sec = p_fs->FAT1_start_sector + (loc >>
- (p_bd->sector_size_bits - 2));
- off = (loc << 2) & p_bd->sector_size_mask;
-
- fat_sector = FAT_getblk(sb, sec);
- if (!fat_sector)
- return -1;
-
- fat_entry = &fat_sector[off];
-
- SET32_A(fat_entry, content);
- }
+ SET32_A(fat_entry, content);
- FAT_modify(sb, sec);
+ exfat_fat_modify(sb, sec);
return 0;
}
-int FAT_write(struct super_block *sb, u32 loc, u32 content)
+int exfat_fat_write(struct super_block *sb, u32 loc, u32 content)
{
s32 ret;
- down(&f_sem);
- ret = __FAT_write(sb, loc, content);
- up(&f_sem);
+ mutex_lock(&f_mutex);
+ ret = __exfat_fat_write(sb, loc, content);
+ mutex_unlock(&f_mutex);
return ret;
}
-u8 *FAT_getblk(struct super_block *sb, sector_t sec)
+u8 *exfat_fat_getblk(struct super_block *sb, sector_t sec)
{
struct buf_cache_t *bp;
struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
@@ -462,7 +293,7 @@ u8 *FAT_getblk(struct super_block *sb, sector_t sec)
FAT_cache_insert_hash(sb, bp);
- if (sector_read(sb, sec, &bp->buf_bh, 1) != FFS_SUCCESS) {
+ if (sector_read(sb, sec, &bp->buf_bh, 1) != 0) {
FAT_cache_remove_hash(bp);
bp->drv = -1;
bp->sec = ~0;
@@ -476,7 +307,7 @@ u8 *FAT_getblk(struct super_block *sb, sector_t sec)
return bp->buf_bh->b_data;
}
-void FAT_modify(struct super_block *sb, sector_t sec)
+void exfat_fat_modify(struct super_block *sb, sector_t sec)
{
struct buf_cache_t *bp;
@@ -485,12 +316,12 @@ void FAT_modify(struct super_block *sb, sector_t sec)
sector_write(sb, sec, bp->buf_bh, 0);
}
-void FAT_release_all(struct super_block *sb)
+void exfat_fat_release_all(struct super_block *sb)
{
struct buf_cache_t *bp;
struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
- down(&f_sem);
+ mutex_lock(&f_mutex);
bp = p_fs->FAT_cache_lru_list.next;
while (bp != &p_fs->FAT_cache_lru_list) {
@@ -507,15 +338,15 @@ void FAT_release_all(struct super_block *sb)
bp = bp->next;
}
- up(&f_sem);
+ mutex_unlock(&f_mutex);
}
-void FAT_sync(struct super_block *sb)
+void exfat_fat_sync(struct super_block *sb)
{
struct buf_cache_t *bp;
struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
- down(&f_sem);
+ mutex_lock(&f_mutex);
bp = p_fs->FAT_cache_lru_list.next;
while (bp != &p_fs->FAT_cache_lru_list) {
@@ -526,7 +357,7 @@ void FAT_sync(struct super_block *sb)
bp = bp->next;
}
- up(&f_sem);
+ mutex_unlock(&f_mutex);
}
static struct buf_cache_t *buf_cache_find(struct super_block *sb, sector_t sec)
@@ -561,7 +392,7 @@ static struct buf_cache_t *buf_cache_get(struct super_block *sb, sector_t sec)
return bp;
}
-static u8 *__buf_getblk(struct super_block *sb, sector_t sec)
+static u8 *__exfat_buf_getblk(struct super_block *sb, sector_t sec)
{
struct buf_cache_t *bp;
struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
@@ -582,7 +413,7 @@ static u8 *__buf_getblk(struct super_block *sb, sector_t sec)
buf_cache_insert_hash(sb, bp);
- if (sector_read(sb, sec, &bp->buf_bh, 1) != FFS_SUCCESS) {
+ if (sector_read(sb, sec, &bp->buf_bh, 1) != 0) {
buf_cache_remove_hash(bp);
bp->drv = -1;
bp->sec = ~0;
@@ -596,22 +427,22 @@ static u8 *__buf_getblk(struct super_block *sb, sector_t sec)
return bp->buf_bh->b_data;
}
-u8 *buf_getblk(struct super_block *sb, sector_t sec)
+u8 *exfat_buf_getblk(struct super_block *sb, sector_t sec)
{
u8 *buf;
- down(&b_sem);
- buf = __buf_getblk(sb, sec);
- up(&b_sem);
+ mutex_lock(&b_mutex);
+ buf = __exfat_buf_getblk(sb, sec);
+ mutex_unlock(&b_mutex);
return buf;
}
-void buf_modify(struct super_block *sb, sector_t sec)
+void exfat_buf_modify(struct super_block *sb, sector_t sec)
{
struct buf_cache_t *bp;
- down(&b_sem);
+ mutex_lock(&b_mutex);
bp = buf_cache_find(sb, sec);
if (likely(bp))
@@ -620,14 +451,14 @@ void buf_modify(struct super_block *sb, sector_t sec)
WARN(!bp, "[EXFAT] failed to find buffer_cache(sector:%llu).\n",
(unsigned long long)sec);
- up(&b_sem);
+ mutex_unlock(&b_mutex);
}
-void buf_lock(struct super_block *sb, sector_t sec)
+void exfat_buf_lock(struct super_block *sb, sector_t sec)
{
struct buf_cache_t *bp;
- down(&b_sem);
+ mutex_lock(&b_mutex);
bp = buf_cache_find(sb, sec);
if (likely(bp))
@@ -636,14 +467,14 @@ void buf_lock(struct super_block *sb, sector_t sec)
WARN(!bp, "[EXFAT] failed to find buffer_cache(sector:%llu).\n",
(unsigned long long)sec);
- up(&b_sem);
+ mutex_unlock(&b_mutex);
}
-void buf_unlock(struct super_block *sb, sector_t sec)
+void exfat_buf_unlock(struct super_block *sb, sector_t sec)
{
struct buf_cache_t *bp;
- down(&b_sem);
+ mutex_lock(&b_mutex);
bp = buf_cache_find(sb, sec);
if (likely(bp))
@@ -652,15 +483,15 @@ void buf_unlock(struct super_block *sb, sector_t sec)
WARN(!bp, "[EXFAT] failed to find buffer_cache(sector:%llu).\n",
(unsigned long long)sec);
- up(&b_sem);
+ mutex_unlock(&b_mutex);
}
-void buf_release(struct super_block *sb, sector_t sec)
+void exfat_buf_release(struct super_block *sb, sector_t sec)
{
struct buf_cache_t *bp;
struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
- down(&b_sem);
+ mutex_lock(&b_mutex);
bp = buf_cache_find(sb, sec);
if (likely(bp)) {
@@ -676,15 +507,15 @@ void buf_release(struct super_block *sb, sector_t sec)
move_to_lru(bp, &p_fs->buf_cache_lru_list);
}
- up(&b_sem);
+ mutex_unlock(&b_mutex);
}
-void buf_release_all(struct super_block *sb)
+void exfat_buf_release_all(struct super_block *sb)
{
struct buf_cache_t *bp;
struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
- down(&b_sem);
+ mutex_lock(&b_mutex);
bp = p_fs->buf_cache_lru_list.next;
while (bp != &p_fs->buf_cache_lru_list) {
@@ -701,15 +532,15 @@ void buf_release_all(struct super_block *sb)
bp = bp->next;
}
- up(&b_sem);
+ mutex_unlock(&b_mutex);
}
-void buf_sync(struct super_block *sb)
+void exfat_buf_sync(struct super_block *sb)
{
struct buf_cache_t *bp;
struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
- down(&b_sem);
+ mutex_lock(&b_mutex);
bp = p_fs->buf_cache_lru_list.next;
while (bp != &p_fs->buf_cache_lru_list) {
@@ -720,5 +551,5 @@ void buf_sync(struct super_block *sb)
bp = bp->next;
}
- up(&b_sem);
+ mutex_unlock(&b_mutex);
}
diff --git a/drivers/staging/exfat/exfat_core.c b/drivers/staging/exfat/exfat_core.c
index 79174e5c4145..d2d3447083c7 100644
--- a/drivers/staging/exfat/exfat_core.c
+++ b/drivers/staging/exfat/exfat_core.c
@@ -20,15 +20,6 @@ static void __set_sb_dirty(struct super_block *sb)
static u8 name_buf[MAX_PATH_LENGTH * MAX_CHARSET_SIZE];
-static char *reserved_names[] = {
- "AUX ", "CON ", "NUL ", "PRN ",
- "COM1 ", "COM2 ", "COM3 ", "COM4 ",
- "COM5 ", "COM6 ", "COM7 ", "COM8 ", "COM9 ",
- "LPT1 ", "LPT2 ", "LPT3 ", "LPT4 ",
- "LPT5 ", "LPT6 ", "LPT7 ", "LPT8 ", "LPT9 ",
- NULL
-};
-
static u8 free_bit[] = {
0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, /* 0 ~ 19 */
0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 5, 0, 1, 0, 2, 0, 1, 0, 3, /* 20 ~ 39 */
@@ -99,25 +90,23 @@ void fs_set_vol_flags(struct super_block *sb, u32 new_flag)
p_fs->vol_flag = new_flag;
- if (p_fs->vol_type == EXFAT) {
- if (!p_fs->pbr_bh) {
- if (sector_read(sb, p_fs->PBR_sector,
- &p_fs->pbr_bh, 1) != FFS_SUCCESS)
- return;
- }
+ if (!p_fs->pbr_bh) {
+ if (sector_read(sb, p_fs->PBR_sector,
+ &p_fs->pbr_bh, 1) != 0)
+ return;
+ }
- p_pbr = (struct pbr_sector_t *)p_fs->pbr_bh->b_data;
- p_bpb = (struct bpbex_t *)p_pbr->bpb;
- SET16(p_bpb->vol_flags, (u16)new_flag);
+ p_pbr = (struct pbr_sector_t *)p_fs->pbr_bh->b_data;
+ p_bpb = (struct bpbex_t *)p_pbr->bpb;
+ SET16(p_bpb->vol_flags, (u16)new_flag);
- /* XXX duyoung
- * what can we do here? (cuz fs_set_vol_flags() is void)
- */
- if ((new_flag == VOL_DIRTY) && (!buffer_dirty(p_fs->pbr_bh)))
- sector_write(sb, p_fs->PBR_sector, p_fs->pbr_bh, 1);
- else
- sector_write(sb, p_fs->PBR_sector, p_fs->pbr_bh, 0);
- }
+ /* XXX duyoung
+ * what can we do here? (cuz fs_set_vol_flags() is void)
+ */
+ if ((new_flag == VOL_DIRTY) && (!buffer_dirty(p_fs->pbr_bh)))
+ sector_write(sb, p_fs->PBR_sector, p_fs->pbr_bh, 1);
+ else
+ sector_write(sb, p_fs->PBR_sector, p_fs->pbr_bh, 0);
}
void fs_error(struct super_block *sb)
@@ -136,10 +125,10 @@ void fs_error(struct super_block *sb)
* Cluster Management Functions
*/
-s32 clear_cluster(struct super_block *sb, u32 clu)
+static s32 clear_cluster(struct super_block *sb, u32 clu)
{
sector_t s, n;
- s32 ret = FFS_SUCCESS;
+ s32 ret = 0;
struct buffer_head *tmp_bh = NULL;
struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
@@ -154,12 +143,12 @@ s32 clear_cluster(struct super_block *sb, u32 clu)
for (; s < n; s++) {
ret = sector_read(sb, s, &tmp_bh, 0);
- if (ret != FFS_SUCCESS)
+ if (ret != 0)
return ret;
memset((char *)tmp_bh->b_data, 0x0, p_bd->sector_size);
ret = sector_write(sb, s, tmp_bh, 0);
- if (ret != FFS_SUCCESS)
+ if (ret != 0)
break;
}
@@ -167,61 +156,98 @@ s32 clear_cluster(struct super_block *sb, u32 clu)
return ret;
}
-s32 fat_alloc_cluster(struct super_block *sb, s32 num_alloc,
- struct chain_t *p_chain)
+static s32 set_alloc_bitmap(struct super_block *sb, u32 clu)
{
- int i, num_clusters = 0;
- u32 new_clu, last_clu = CLUSTER_32(~0), read_clu;
+ int i, b;
+ sector_t sector;
struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+ struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
- new_clu = p_chain->dir;
- if (new_clu == CLUSTER_32(~0))
- new_clu = p_fs->clu_srch_ptr;
- else if (new_clu >= p_fs->num_clusters)
- new_clu = 2;
+ i = clu >> (p_bd->sector_size_bits + 3);
+ b = clu & ((p_bd->sector_size << 3) - 1);
- __set_sb_dirty(sb);
+ sector = START_SECTOR(p_fs->map_clu) + i;
- p_chain->dir = CLUSTER_32(~0);
+ exfat_bitmap_set((u8 *)p_fs->vol_amap[i]->b_data, b);
- for (i = 2; i < p_fs->num_clusters; i++) {
- if (FAT_read(sb, new_clu, &read_clu) != 0)
- return -1;
+ return sector_write(sb, sector, p_fs->vol_amap[i], 0);
+}
- if (read_clu == CLUSTER_32(0)) {
- if (FAT_write(sb, new_clu, CLUSTER_32(~0)) < 0)
- return -1;
- num_clusters++;
+static s32 clr_alloc_bitmap(struct super_block *sb, u32 clu)
+{
+ int i, b;
+ sector_t sector;
+#ifdef CONFIG_EXFAT_DISCARD
+ struct exfat_sb_info *sbi = EXFAT_SB(sb);
+ struct exfat_mount_options *opts = &sbi->options;
+ int ret;
+#endif /* CONFIG_EXFAT_DISCARD */
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+ struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
- if (p_chain->dir == CLUSTER_32(~0)) {
- p_chain->dir = new_clu;
- } else {
- if (FAT_write(sb, last_clu, new_clu) < 0)
- return -1;
- }
+ i = clu >> (p_bd->sector_size_bits + 3);
+ b = clu & ((p_bd->sector_size << 3) - 1);
- last_clu = new_clu;
+ sector = START_SECTOR(p_fs->map_clu) + i;
- if ((--num_alloc) == 0) {
- p_fs->clu_srch_ptr = new_clu;
- if (p_fs->used_clusters != UINT_MAX)
- p_fs->used_clusters += num_clusters;
+ exfat_bitmap_clear((u8 *)p_fs->vol_amap[i]->b_data, b);
- return num_clusters;
- }
+ return sector_write(sb, sector, p_fs->vol_amap[i], 0);
+
+#ifdef CONFIG_EXFAT_DISCARD
+ if (opts->discard) {
+ ret = sb_issue_discard(sb, START_SECTOR(clu),
+ (1 << p_fs->sectors_per_clu_bits),
+ GFP_NOFS, 0);
+ if (ret == -EOPNOTSUPP) {
+ pr_warn("discard not supported by device, disabling");
+ opts->discard = 0;
}
- if ((++new_clu) >= p_fs->num_clusters)
- new_clu = 2;
}
+#endif /* CONFIG_EXFAT_DISCARD */
+}
- p_fs->clu_srch_ptr = new_clu;
- if (p_fs->used_clusters != UINT_MAX)
- p_fs->used_clusters += num_clusters;
+static u32 test_alloc_bitmap(struct super_block *sb, u32 clu)
+{
+ int i, map_i, map_b;
+ u32 clu_base, clu_free;
+ u8 k, clu_mask;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+ struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
- return num_clusters;
+ clu_base = (clu & ~(0x7)) + 2;
+ clu_mask = (1 << (clu - clu_base + 2)) - 1;
+
+ map_i = clu >> (p_bd->sector_size_bits + 3);
+ map_b = (clu >> 3) & p_bd->sector_size_mask;
+
+ for (i = 2; i < p_fs->num_clusters; i += 8) {
+ k = *(((u8 *)p_fs->vol_amap[map_i]->b_data) + map_b);
+ if (clu_mask > 0) {
+ k |= clu_mask;
+ clu_mask = 0;
+ }
+ if (k < 0xFF) {
+ clu_free = clu_base + free_bit[k];
+ if (clu_free < p_fs->num_clusters)
+ return clu_free;
+ }
+ clu_base += 8;
+
+ if (((++map_b) >= p_bd->sector_size) ||
+ (clu_base >= p_fs->num_clusters)) {
+ if ((++map_i) >= p_fs->map_sectors) {
+ clu_base = 2;
+ map_i = 0;
+ }
+ map_b = 0;
+ }
+ }
+
+ return CLUSTER_32(~0);
}
-s32 exfat_alloc_cluster(struct super_block *sb, s32 num_alloc,
+static s32 exfat_alloc_cluster(struct super_block *sb, s32 num_alloc,
struct chain_t *p_chain)
{
s32 num_clusters = 0;
@@ -251,22 +277,22 @@ s32 exfat_alloc_cluster(struct super_block *sb, s32 num_alloc,
}
}
- if (set_alloc_bitmap(sb, new_clu - 2) != FFS_SUCCESS)
- return -1;
+ if (set_alloc_bitmap(sb, new_clu - 2) != 0)
+ return -EIO;
num_clusters++;
if (p_chain->flags == 0x01) {
- if (FAT_write(sb, new_clu, CLUSTER_32(~0)) < 0)
- return -1;
+ if (exfat_fat_write(sb, new_clu, CLUSTER_32(~0)) < 0)
+ return -EIO;
}
if (p_chain->dir == CLUSTER_32(~0)) {
p_chain->dir = new_clu;
} else {
if (p_chain->flags == 0x01) {
- if (FAT_write(sb, last_clu, new_clu) < 0)
- return -1;
+ if (exfat_fat_write(sb, last_clu, new_clu) < 0)
+ return -EIO;
}
}
last_clu = new_clu;
@@ -300,48 +326,7 @@ s32 exfat_alloc_cluster(struct super_block *sb, s32 num_alloc,
return num_clusters;
}
-void fat_free_cluster(struct super_block *sb, struct chain_t *p_chain,
- s32 do_relse)
-{
- s32 num_clusters = 0;
- u32 clu, prev;
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
- int i;
- sector_t sector;
-
- if ((p_chain->dir == CLUSTER_32(0)) || (p_chain->dir == CLUSTER_32(~0)))
- return;
- __set_sb_dirty(sb);
- clu = p_chain->dir;
-
- if (p_chain->size <= 0)
- return;
-
- do {
- if (p_fs->dev_ejected)
- break;
-
- if (do_relse) {
- sector = START_SECTOR(clu);
- for (i = 0; i < p_fs->sectors_per_clu; i++)
- buf_release(sb, sector + i);
- }
-
- prev = clu;
- if (FAT_read(sb, clu, &clu) == -1)
- break;
-
- if (FAT_write(sb, prev, CLUSTER_32(0)) < 0)
- break;
- num_clusters++;
-
- } while (clu != CLUSTER_32(~0));
-
- if (p_fs->used_clusters != UINT_MAX)
- p_fs->used_clusters -= num_clusters;
-}
-
-void exfat_free_cluster(struct super_block *sb, struct chain_t *p_chain,
+static void exfat_free_cluster(struct super_block *sb, struct chain_t *p_chain,
s32 do_relse)
{
s32 num_clusters = 0;
@@ -367,10 +352,10 @@ void exfat_free_cluster(struct super_block *sb, struct chain_t *p_chain,
if (do_relse) {
sector = START_SECTOR(clu);
for (i = 0; i < p_fs->sectors_per_clu; i++)
- buf_release(sb, sector + i);
+ exfat_buf_release(sb, sector + i);
}
- if (clr_alloc_bitmap(sb, clu - 2) != FFS_SUCCESS)
+ if (clr_alloc_bitmap(sb, clu - 2) != 0)
break;
clu++;
@@ -384,13 +369,13 @@ void exfat_free_cluster(struct super_block *sb, struct chain_t *p_chain,
if (do_relse) {
sector = START_SECTOR(clu);
for (i = 0; i < p_fs->sectors_per_clu; i++)
- buf_release(sb, sector + i);
+ exfat_buf_release(sb, sector + i);
}
- if (clr_alloc_bitmap(sb, clu - 2) != FFS_SUCCESS)
+ if (clr_alloc_bitmap(sb, clu - 2) != 0)
break;
- if (FAT_read(sb, clu, &clu) == -1)
+ if (exfat_fat_read(sb, clu, &clu) == -1)
break;
num_clusters++;
} while ((clu != CLUSTER_32(0)) && (clu != CLUSTER_32(~0)));
@@ -400,7 +385,7 @@ void exfat_free_cluster(struct super_block *sb, struct chain_t *p_chain,
p_fs->used_clusters -= num_clusters;
}
-u32 find_last_cluster(struct super_block *sb, struct chain_t *p_chain)
+static u32 find_last_cluster(struct super_block *sb, struct chain_t *p_chain)
{
u32 clu, next;
struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
@@ -410,7 +395,7 @@ u32 find_last_cluster(struct super_block *sb, struct chain_t *p_chain)
if (p_chain->flags == 0x03) {
clu += p_chain->size - 1;
} else {
- while ((FAT_read(sb, clu, &next) == 0) &&
+ while ((exfat_fat_read(sb, clu, &next) == 0) &&
(next != CLUSTER_32(~0))) {
if (p_fs->dev_ejected)
break;
@@ -437,7 +422,7 @@ s32 count_num_clusters(struct super_block *sb, struct chain_t *p_chain)
} else {
for (i = 2; i < p_fs->num_clusters; i++) {
count++;
- if (FAT_read(sb, clu, &clu) != 0)
+ if (exfat_fat_read(sb, clu, &clu) != 0)
return 0;
if (clu == CLUSTER_32(~0))
break;
@@ -447,30 +432,15 @@ s32 count_num_clusters(struct super_block *sb, struct chain_t *p_chain)
return count;
}
-s32 fat_count_used_clusters(struct super_block *sb)
-{
- int i, count = 0;
- u32 clu;
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
-
- for (i = 2; i < p_fs->num_clusters; i++) {
- if (FAT_read(sb, i, &clu) != 0)
- break;
- if (clu != CLUSTER_32(0))
- count++;
- }
-
- return count;
-}
-
-s32 exfat_count_used_clusters(struct super_block *sb)
+static s32 exfat_count_used_clusters(struct super_block *sb)
{
int i, map_i, map_b, count = 0;
u8 k;
struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
- map_i = map_b = 0;
+ map_i = 0;
+ map_b = 0;
for (i = 2; i < p_fs->num_clusters; i += 8) {
k = *(((u8 *)p_fs->vol_amap[map_i]->b_data) + map_b);
@@ -491,12 +461,12 @@ void exfat_chain_cont_cluster(struct super_block *sb, u32 chain, s32 len)
return;
while (len > 1) {
- if (FAT_write(sb, chain, chain + 1) < 0)
+ if (exfat_fat_write(sb, chain, chain + 1) < 0)
break;
chain++;
len--;
}
- FAT_write(sb, chain, CLUSTER_32(~0));
+ exfat_fat_write(sb, chain, CLUSTER_32(~0));
}
/*
@@ -525,7 +495,7 @@ s32 load_alloc_bitmap(struct super_block *sb)
ep = (struct bmap_dentry_t *)get_entry_in_dir(sb, &clu,
i, NULL);
if (!ep)
- return FFS_MEDIAERR;
+ return -ENOENT;
type = p_fs->fs_func->get_entry_type((struct dentry_t *)ep);
@@ -544,14 +514,14 @@ s32 load_alloc_bitmap(struct super_block *sb)
sizeof(struct buffer_head *),
GFP_KERNEL);
if (!p_fs->vol_amap)
- return FFS_MEMORYERR;
+ return -ENOMEM;
sector = START_SECTOR(p_fs->map_clu);
for (j = 0; j < p_fs->map_sectors; j++) {
p_fs->vol_amap[j] = NULL;
- ret = sector_read(sb, sector + j, &(p_fs->vol_amap[j]), 1);
- if (ret != FFS_SUCCESS) {
+ ret = sector_read(sb, sector + j, &p_fs->vol_amap[j], 1);
+ if (ret != 0) {
/* release all buffers and free vol_amap */
i = 0;
while (i < j)
@@ -564,15 +534,15 @@ s32 load_alloc_bitmap(struct super_block *sb)
}
p_fs->pbr_bh = NULL;
- return FFS_SUCCESS;
+ return 0;
}
}
- if (FAT_read(sb, clu.dir, &clu.dir) != 0)
- return FFS_MEDIAERR;
+ if (exfat_fat_read(sb, clu.dir, &clu.dir) != 0)
+ return -EIO;
}
- return FFS_FORMATERR;
+ return -EFSCORRUPTED;
}
void free_alloc_bitmap(struct super_block *sb)
@@ -589,97 +559,6 @@ void free_alloc_bitmap(struct super_block *sb)
p_fs->vol_amap = NULL;
}
-s32 set_alloc_bitmap(struct super_block *sb, u32 clu)
-{
- int i, b;
- sector_t sector;
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
- struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
-
- i = clu >> (p_bd->sector_size_bits + 3);
- b = clu & ((p_bd->sector_size << 3) - 1);
-
- sector = START_SECTOR(p_fs->map_clu) + i;
-
- exfat_bitmap_set((u8 *)p_fs->vol_amap[i]->b_data, b);
-
- return sector_write(sb, sector, p_fs->vol_amap[i], 0);
-}
-
-s32 clr_alloc_bitmap(struct super_block *sb, u32 clu)
-{
- int i, b;
- sector_t sector;
-#ifdef CONFIG_EXFAT_DISCARD
- struct exfat_sb_info *sbi = EXFAT_SB(sb);
- struct exfat_mount_options *opts = &sbi->options;
- int ret;
-#endif /* CONFIG_EXFAT_DISCARD */
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
- struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
-
- i = clu >> (p_bd->sector_size_bits + 3);
- b = clu & ((p_bd->sector_size << 3) - 1);
-
- sector = START_SECTOR(p_fs->map_clu) + i;
-
- exfat_bitmap_clear((u8 *)p_fs->vol_amap[i]->b_data, b);
-
- return sector_write(sb, sector, p_fs->vol_amap[i], 0);
-
-#ifdef CONFIG_EXFAT_DISCARD
- if (opts->discard) {
- ret = sb_issue_discard(sb, START_SECTOR(clu),
- (1 << p_fs->sectors_per_clu_bits),
- GFP_NOFS, 0);
- if (ret == -EOPNOTSUPP) {
- pr_warn("discard not supported by device, disabling");
- opts->discard = 0;
- }
- }
-#endif /* CONFIG_EXFAT_DISCARD */
-}
-
-u32 test_alloc_bitmap(struct super_block *sb, u32 clu)
-{
- int i, map_i, map_b;
- u32 clu_base, clu_free;
- u8 k, clu_mask;
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
- struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
-
- clu_base = (clu & ~(0x7)) + 2;
- clu_mask = (1 << (clu - clu_base + 2)) - 1;
-
- map_i = clu >> (p_bd->sector_size_bits + 3);
- map_b = (clu >> 3) & p_bd->sector_size_mask;
-
- for (i = 2; i < p_fs->num_clusters; i += 8) {
- k = *(((u8 *)p_fs->vol_amap[map_i]->b_data) + map_b);
- if (clu_mask > 0) {
- k |= clu_mask;
- clu_mask = 0;
- }
- if (k < 0xFF) {
- clu_free = clu_base + free_bit[k];
- if (clu_free < p_fs->num_clusters)
- return clu_free;
- }
- clu_base += 8;
-
- if (((++map_b) >= p_bd->sector_size) ||
- (clu_base >= p_fs->num_clusters)) {
- if ((++map_i) >= p_fs->map_sectors) {
- clu_base = 2;
- map_i = 0;
- }
- map_b = 0;
- }
- }
-
- return CLUSTER_32(~0);
-}
-
void sync_alloc_bitmap(struct super_block *sb)
{
int i;
@@ -698,7 +577,7 @@ void sync_alloc_bitmap(struct super_block *sb)
static s32 __load_upcase_table(struct super_block *sb, sector_t sector,
u32 num_sectors, u32 utbl_checksum)
{
- int i, ret = FFS_ERROR;
+ int i, ret = -EINVAL;
u32 j;
struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
@@ -712,15 +591,15 @@ static s32 __load_upcase_table(struct super_block *sb, sector_t sector,
u32 checksum = 0;
- upcase_table = p_fs->vol_utbl = kmalloc(UTBL_COL_COUNT * sizeof(u16 *),
- GFP_KERNEL);
+ upcase_table = kmalloc_array(UTBL_COL_COUNT, sizeof(u16 *), GFP_KERNEL);
+ p_fs->vol_utbl = upcase_table;
if (!upcase_table)
- return FFS_MEMORYERR;
+ return -ENOMEM;
memset(upcase_table, 0, UTBL_COL_COUNT * sizeof(u16 *));
while (sector < end_sector) {
ret = sector_read(sb, sector, &tmp_bh, 1);
- if (ret != FFS_SUCCESS) {
+ if (ret != 0) {
pr_debug("sector read (0x%llX)fail\n",
(unsigned long long)sector);
goto error;
@@ -755,7 +634,7 @@ static s32 __load_upcase_table(struct super_block *sb, sector_t sector,
upcase_table[col_index] = kmalloc_array(UTBL_ROW_COUNT,
sizeof(u16), GFP_KERNEL);
if (!upcase_table[col_index]) {
- ret = FFS_MEMORYERR;
+ ret = -ENOMEM;
goto error;
}
@@ -771,9 +650,9 @@ static s32 __load_upcase_table(struct super_block *sb, sector_t sector,
if (index >= 0xFFFF && utbl_checksum == checksum) {
if (tmp_bh)
brelse(tmp_bh);
- return FFS_SUCCESS;
+ return 0;
}
- ret = FFS_ERROR;
+ ret = -EINVAL;
error:
if (tmp_bh)
brelse(tmp_bh);
@@ -783,7 +662,7 @@ error:
static s32 __load_default_upcase_table(struct super_block *sb)
{
- int i, ret = FFS_ERROR;
+ int i, ret = -EINVAL;
u32 j;
struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
@@ -792,10 +671,10 @@ static s32 __load_default_upcase_table(struct super_block *sb)
u16 uni = 0;
u16 **upcase_table;
- upcase_table = p_fs->vol_utbl = kmalloc(UTBL_COL_COUNT * sizeof(u16 *),
- GFP_KERNEL);
+ upcase_table = kmalloc_array(UTBL_COL_COUNT, sizeof(u16 *), GFP_KERNEL);
+ p_fs->vol_utbl = upcase_table;
if (!upcase_table)
- return FFS_MEMORYERR;
+ return -ENOMEM;
memset(upcase_table, 0, UTBL_COL_COUNT * sizeof(u16 *));
for (i = 0; index <= 0xFFFF && i < NUM_UPCASE * 2; i += 2) {
@@ -818,7 +697,7 @@ static s32 __load_default_upcase_table(struct super_block *sb)
sizeof(u16),
GFP_KERNEL);
if (!upcase_table[col_index]) {
- ret = FFS_MEMORYERR;
+ ret = -ENOMEM;
goto error;
}
@@ -832,7 +711,7 @@ static s32 __load_default_upcase_table(struct super_block *sb)
}
if (index >= 0xFFFF)
- return FFS_SUCCESS;
+ return 0;
error:
/* FATAL error: default upcase table has error */
@@ -855,14 +734,14 @@ s32 load_upcase_table(struct super_block *sb)
clu.flags = 0x01;
if (p_fs->dev_ejected)
- return FFS_MEDIAERR;
+ return -EIO;
while (clu.dir != CLUSTER_32(~0)) {
for (i = 0; i < p_fs->dentries_per_clu; i++) {
ep = (struct case_dentry_t *)get_entry_in_dir(sb, &clu,
i, NULL);
if (!ep)
- return FFS_MEDIAERR;
+ return -ENOENT;
type = p_fs->fs_func->get_entry_type((struct dentry_t *)ep);
@@ -877,12 +756,12 @@ s32 load_upcase_table(struct super_block *sb)
sector = START_SECTOR(tbl_clu);
num_sectors = ((tbl_size - 1) >> p_bd->sector_size_bits) + 1;
if (__load_upcase_table(sb, sector, num_sectors,
- GET32_A(ep->checksum)) != FFS_SUCCESS)
+ GET32_A(ep->checksum)) != 0)
break;
- return FFS_SUCCESS;
+ return 0;
}
- if (FAT_read(sb, clu.dir, &clu.dir) != 0)
- return FFS_MEDIAERR;
+ if (exfat_fat_read(sb, clu.dir, &clu.dir) != 0)
+ return -EIO;
}
/* load default upcase table */
return __load_default_upcase_table(sb);
@@ -906,29 +785,7 @@ void free_upcase_table(struct super_block *sb)
* Directory Entry Management Functions
*/
-u32 fat_get_entry_type(struct dentry_t *p_entry)
-{
- struct dos_dentry_t *ep = (struct dos_dentry_t *)p_entry;
-
- if (*(ep->name) == 0x0)
- return TYPE_UNUSED;
-
- else if (*(ep->name) == 0xE5)
- return TYPE_DELETED;
-
- else if (ep->attr == ATTR_EXTEND)
- return TYPE_EXTEND;
-
- else if ((ep->attr & (ATTR_SUBDIR | ATTR_VOLUME)) == ATTR_VOLUME)
- return TYPE_VOLUME;
-
- else if ((ep->attr & (ATTR_SUBDIR | ATTR_VOLUME)) == ATTR_SUBDIR)
- return TYPE_DIR;
-
- return TYPE_FILE;
-}
-
-u32 exfat_get_entry_type(struct dentry_t *p_entry)
+static u32 exfat_get_entry_type(struct dentry_t *p_entry)
{
struct file_dentry_t *ep = (struct file_dentry_t *)p_entry;
@@ -973,30 +830,7 @@ u32 exfat_get_entry_type(struct dentry_t *p_entry)
return TYPE_BENIGN_SEC;
}
-void fat_set_entry_type(struct dentry_t *p_entry, u32 type)
-{
- struct dos_dentry_t *ep = (struct dos_dentry_t *)p_entry;
-
- if (type == TYPE_UNUSED)
- *(ep->name) = 0x0;
-
- else if (type == TYPE_DELETED)
- *(ep->name) = 0xE5;
-
- else if (type == TYPE_EXTEND)
- ep->attr = ATTR_EXTEND;
-
- else if (type == TYPE_DIR)
- ep->attr = ATTR_SUBDIR;
-
- else if (type == TYPE_FILE)
- ep->attr = ATTR_ARCHIVE;
-
- else if (type == TYPE_SYMLINK)
- ep->attr = ATTR_ARCHIVE | ATTR_SYMLINK;
-}
-
-void exfat_set_entry_type(struct dentry_t *p_entry, u32 type)
+static void exfat_set_entry_type(struct dentry_t *p_entry, u32 type)
{
struct file_dentry_t *ep = (struct file_dentry_t *)p_entry;
@@ -1026,109 +860,56 @@ void exfat_set_entry_type(struct dentry_t *p_entry, u32 type)
}
}
-u32 fat_get_entry_attr(struct dentry_t *p_entry)
-{
- struct dos_dentry_t *ep = (struct dos_dentry_t *)p_entry;
-
- return (u32)ep->attr;
-}
-
-u32 exfat_get_entry_attr(struct dentry_t *p_entry)
+static u32 exfat_get_entry_attr(struct dentry_t *p_entry)
{
struct file_dentry_t *ep = (struct file_dentry_t *)p_entry;
return (u32)GET16_A(ep->attr);
}
-void fat_set_entry_attr(struct dentry_t *p_entry, u32 attr)
-{
- struct dos_dentry_t *ep = (struct dos_dentry_t *)p_entry;
-
- ep->attr = (u8)attr;
-}
-
-void exfat_set_entry_attr(struct dentry_t *p_entry, u32 attr)
+static void exfat_set_entry_attr(struct dentry_t *p_entry, u32 attr)
{
struct file_dentry_t *ep = (struct file_dentry_t *)p_entry;
SET16_A(ep->attr, (u16)attr);
}
-u8 fat_get_entry_flag(struct dentry_t *p_entry)
-{
- return 0x01;
-}
-
-u8 exfat_get_entry_flag(struct dentry_t *p_entry)
+static u8 exfat_get_entry_flag(struct dentry_t *p_entry)
{
struct strm_dentry_t *ep = (struct strm_dentry_t *)p_entry;
return ep->flags;
}
-void fat_set_entry_flag(struct dentry_t *p_entry, u8 flags)
-{
-}
-
-void exfat_set_entry_flag(struct dentry_t *p_entry, u8 flags)
+static void exfat_set_entry_flag(struct dentry_t *p_entry, u8 flags)
{
struct strm_dentry_t *ep = (struct strm_dentry_t *)p_entry;
ep->flags = flags;
}
-u32 fat_get_entry_clu0(struct dentry_t *p_entry)
-{
- struct dos_dentry_t *ep = (struct dos_dentry_t *)p_entry;
-
- return ((u32)GET16_A(ep->start_clu_hi) << 16) |
- GET16_A(ep->start_clu_lo);
-}
-
-u32 exfat_get_entry_clu0(struct dentry_t *p_entry)
+static u32 exfat_get_entry_clu0(struct dentry_t *p_entry)
{
struct strm_dentry_t *ep = (struct strm_dentry_t *)p_entry;
return GET32_A(ep->start_clu);
}
-void fat_set_entry_clu0(struct dentry_t *p_entry, u32 start_clu)
-{
- struct dos_dentry_t *ep = (struct dos_dentry_t *)p_entry;
-
- SET16_A(ep->start_clu_lo, CLUSTER_16(start_clu));
- SET16_A(ep->start_clu_hi, CLUSTER_16(start_clu >> 16));
-}
-
-void exfat_set_entry_clu0(struct dentry_t *p_entry, u32 start_clu)
+static void exfat_set_entry_clu0(struct dentry_t *p_entry, u32 start_clu)
{
struct strm_dentry_t *ep = (struct strm_dentry_t *)p_entry;
SET32_A(ep->start_clu, start_clu);
}
-u64 fat_get_entry_size(struct dentry_t *p_entry)
-{
- struct dos_dentry_t *ep = (struct dos_dentry_t *)p_entry;
-
- return (u64)GET32_A(ep->size);
-}
-
-u64 exfat_get_entry_size(struct dentry_t *p_entry)
+static u64 exfat_get_entry_size(struct dentry_t *p_entry)
{
struct strm_dentry_t *ep = (struct strm_dentry_t *)p_entry;
return GET64_A(ep->valid_size);
}
-void fat_set_entry_size(struct dentry_t *p_entry, u64 size)
-{
- struct dos_dentry_t *ep = (struct dos_dentry_t *)p_entry;
-
- SET32_A(ep->size, (u32)size);
-}
-
-void exfat_set_entry_size(struct dentry_t *p_entry, u64 size)
+static void exfat_set_entry_size(struct dentry_t *p_entry, u64 size)
{
struct strm_dentry_t *ep = (struct strm_dentry_t *)p_entry;
@@ -1136,32 +917,7 @@ void exfat_set_entry_size(struct dentry_t *p_entry, u64 size)
SET64_A(ep->size, size);
}
-void fat_get_entry_time(struct dentry_t *p_entry, struct timestamp_t *tp,
- u8 mode)
-{
- u16 t = 0x00, d = 0x21;
- struct dos_dentry_t *ep = (struct dos_dentry_t *)p_entry;
-
- switch (mode) {
- case TM_CREATE:
- t = GET16_A(ep->create_time);
- d = GET16_A(ep->create_date);
- break;
- case TM_MODIFY:
- t = GET16_A(ep->modify_time);
- d = GET16_A(ep->modify_date);
- break;
- }
-
- tp->sec = (t & 0x001F) << 1;
- tp->min = (t >> 5) & 0x003F;
- tp->hour = (t >> 11);
- tp->day = (d & 0x001F);
- tp->mon = (d >> 5) & 0x000F;
- tp->year = (d >> 9);
-}
-
-void exfat_get_entry_time(struct dentry_t *p_entry, struct timestamp_t *tp,
+static void exfat_get_entry_time(struct dentry_t *p_entry, struct timestamp_t *tp,
u8 mode)
{
u16 t = 0x00, d = 0x21;
@@ -1190,28 +946,7 @@ void exfat_get_entry_time(struct dentry_t *p_entry, struct timestamp_t *tp,
tp->year = (d >> 9);
}
-void fat_set_entry_time(struct dentry_t *p_entry, struct timestamp_t *tp,
- u8 mode)
-{
- u16 t, d;
- struct dos_dentry_t *ep = (struct dos_dentry_t *)p_entry;
-
- t = (tp->hour << 11) | (tp->min << 5) | (tp->sec >> 1);
- d = (tp->year << 9) | (tp->mon << 5) | tp->day;
-
- switch (mode) {
- case TM_CREATE:
- SET16_A(ep->create_time, t);
- SET16_A(ep->create_date, d);
- break;
- case TM_MODIFY:
- SET16_A(ep->modify_time, t);
- SET16_A(ep->modify_date, d);
- break;
- }
-}
-
-void exfat_set_entry_time(struct dentry_t *p_entry, struct timestamp_t *tp,
+static void exfat_set_entry_time(struct dentry_t *p_entry, struct timestamp_t *tp,
u8 mode)
{
u16 t, d;
@@ -1236,24 +971,46 @@ void exfat_set_entry_time(struct dentry_t *p_entry, struct timestamp_t *tp,
}
}
-s32 fat_init_dir_entry(struct super_block *sb, struct chain_t *p_dir, s32 entry,
- u32 type, u32 start_clu, u64 size)
+static void init_file_entry(struct file_dentry_t *ep, u32 type)
{
- sector_t sector;
- struct dos_dentry_t *dos_ep;
+ struct timestamp_t tm, *tp;
- dos_ep = (struct dos_dentry_t *)get_entry_in_dir(sb, p_dir, entry,
- &sector);
- if (!dos_ep)
- return FFS_MEDIAERR;
+ exfat_set_entry_type((struct dentry_t *)ep, type);
- init_dos_entry(dos_ep, type, start_clu);
- buf_modify(sb, sector);
+ tp = tm_current(&tm);
+ exfat_set_entry_time((struct dentry_t *)ep, tp, TM_CREATE);
+ exfat_set_entry_time((struct dentry_t *)ep, tp, TM_MODIFY);
+ exfat_set_entry_time((struct dentry_t *)ep, tp, TM_ACCESS);
+ ep->create_time_ms = 0;
+ ep->modify_time_ms = 0;
+ ep->access_time_ms = 0;
+}
- return FFS_SUCCESS;
+static void init_strm_entry(struct strm_dentry_t *ep, u8 flags, u32 start_clu, u64 size)
+{
+ exfat_set_entry_type((struct dentry_t *)ep, TYPE_STREAM);
+ ep->flags = flags;
+ SET32_A(ep->start_clu, start_clu);
+ SET64_A(ep->valid_size, size);
+ SET64_A(ep->size, size);
}
-s32 exfat_init_dir_entry(struct super_block *sb, struct chain_t *p_dir,
+static void init_name_entry(struct name_dentry_t *ep, u16 *uniname)
+{
+ int i;
+
+ exfat_set_entry_type((struct dentry_t *)ep, TYPE_EXTEND);
+ ep->flags = 0x0;
+
+ for (i = 0; i < 30; i++, i++) {
+ SET16_A(ep->unicode_0_14 + i, *uniname);
+ if (*uniname == 0x0)
+ break;
+ uniname++;
+ }
+}
+
+static s32 exfat_init_dir_entry(struct super_block *sb, struct chain_t *p_dir,
s32 entry, u32 type, u32 start_clu, u64 size)
{
sector_t sector;
@@ -1267,71 +1024,20 @@ s32 exfat_init_dir_entry(struct super_block *sb, struct chain_t *p_dir,
file_ep = (struct file_dentry_t *)get_entry_in_dir(sb, p_dir, entry,
&sector);
if (!file_ep)
- return FFS_MEDIAERR;
+ return -ENOENT;
strm_ep = (struct strm_dentry_t *)get_entry_in_dir(sb, p_dir, entry + 1,
&sector);
if (!strm_ep)
- return FFS_MEDIAERR;
+ return -ENOENT;
init_file_entry(file_ep, type);
- buf_modify(sb, sector);
+ exfat_buf_modify(sb, sector);
init_strm_entry(strm_ep, flags, start_clu, size);
- buf_modify(sb, sector);
-
- return FFS_SUCCESS;
-}
-
-static s32 fat_init_ext_entry(struct super_block *sb, struct chain_t *p_dir,
- s32 entry, s32 num_entries,
- struct uni_name_t *p_uniname,
- struct dos_name_t *p_dosname)
-{
- int i;
- sector_t sector;
- u8 chksum;
- u16 *uniname = p_uniname->name;
- struct dos_dentry_t *dos_ep;
- struct ext_dentry_t *ext_ep;
-
- dos_ep = (struct dos_dentry_t *)get_entry_in_dir(sb, p_dir, entry,
- &sector);
- if (!dos_ep)
- return FFS_MEDIAERR;
-
- dos_ep->lcase = p_dosname->name_case;
- memcpy(dos_ep->name, p_dosname->name, DOS_NAME_LENGTH);
- buf_modify(sb, sector);
-
- if ((--num_entries) > 0) {
- chksum = calc_checksum_1byte((void *)dos_ep->name,
- DOS_NAME_LENGTH, 0);
-
- for (i = 1; i < num_entries; i++) {
- ext_ep = (struct ext_dentry_t *)get_entry_in_dir(sb,
- p_dir,
- entry - i,
- &sector);
- if (!ext_ep)
- return FFS_MEDIAERR;
-
- init_ext_entry(ext_ep, i, chksum, uniname);
- buf_modify(sb, sector);
- uniname += 13;
- }
-
- ext_ep = (struct ext_dentry_t *)get_entry_in_dir(sb, p_dir,
- entry - i,
- &sector);
- if (!ext_ep)
- return FFS_MEDIAERR;
+ exfat_buf_modify(sb, sector);
- init_ext_entry(ext_ep, i + 0x40, chksum, uniname);
- buf_modify(sb, sector);
- }
-
- return FFS_SUCCESS;
+ return 0;
}
static s32 exfat_init_ext_entry(struct super_block *sb, struct chain_t *p_dir,
@@ -1349,160 +1055,39 @@ static s32 exfat_init_ext_entry(struct super_block *sb, struct chain_t *p_dir,
file_ep = (struct file_dentry_t *)get_entry_in_dir(sb, p_dir, entry,
&sector);
if (!file_ep)
- return FFS_MEDIAERR;
+ return -ENOENT;
file_ep->num_ext = (u8)(num_entries - 1);
- buf_modify(sb, sector);
+ exfat_buf_modify(sb, sector);
strm_ep = (struct strm_dentry_t *)get_entry_in_dir(sb, p_dir, entry + 1,
&sector);
if (!strm_ep)
- return FFS_MEDIAERR;
+ return -ENOENT;
strm_ep->name_len = p_uniname->name_len;
SET16_A(strm_ep->name_hash, p_uniname->name_hash);
- buf_modify(sb, sector);
+ exfat_buf_modify(sb, sector);
for (i = 2; i < num_entries; i++) {
name_ep = (struct name_dentry_t *)get_entry_in_dir(sb, p_dir,
entry + i,
&sector);
if (!name_ep)
- return FFS_MEDIAERR;
+ return -ENOENT;
init_name_entry(name_ep, uniname);
- buf_modify(sb, sector);
+ exfat_buf_modify(sb, sector);
uniname += 15;
}
update_dir_checksum(sb, p_dir, entry);
- return FFS_SUCCESS;
-}
-
-void init_dos_entry(struct dos_dentry_t *ep, u32 type, u32 start_clu)
-{
- struct timestamp_t tm, *tp;
-
- fat_set_entry_type((struct dentry_t *)ep, type);
- SET16_A(ep->start_clu_lo, CLUSTER_16(start_clu));
- SET16_A(ep->start_clu_hi, CLUSTER_16(start_clu >> 16));
- SET32_A(ep->size, 0);
-
- tp = tm_current(&tm);
- fat_set_entry_time((struct dentry_t *)ep, tp, TM_CREATE);
- fat_set_entry_time((struct dentry_t *)ep, tp, TM_MODIFY);
- SET16_A(ep->access_date, 0);
- ep->create_time_ms = 0;
-}
-
-void init_ext_entry(struct ext_dentry_t *ep, s32 order, u8 chksum, u16 *uniname)
-{
- int i;
- bool end = false;
-
- fat_set_entry_type((struct dentry_t *)ep, TYPE_EXTEND);
- ep->order = (u8)order;
- ep->sysid = 0;
- ep->checksum = chksum;
- SET16_A(ep->start_clu, 0);
-
- for (i = 0; i < 10; i += 2) {
- if (!end) {
- SET16(ep->unicode_0_4 + i, *uniname);
- if (*uniname == 0x0)
- end = true;
- else
- uniname++;
- } else {
- SET16(ep->unicode_0_4 + i, 0xFFFF);
- }
- }
-
- for (i = 0; i < 12; i += 2) {
- if (!end) {
- SET16_A(ep->unicode_5_10 + i, *uniname);
- if (*uniname == 0x0)
- end = true;
- else
- uniname++;
- } else {
- SET16_A(ep->unicode_5_10 + i, 0xFFFF);
- }
- }
-
- for (i = 0; i < 4; i += 2) {
- if (!end) {
- SET16_A(ep->unicode_11_12 + i, *uniname);
- if (*uniname == 0x0)
- end = true;
- else
- uniname++;
- } else {
- SET16_A(ep->unicode_11_12 + i, 0xFFFF);
- }
- }
-}
-
-void init_file_entry(struct file_dentry_t *ep, u32 type)
-{
- struct timestamp_t tm, *tp;
-
- exfat_set_entry_type((struct dentry_t *)ep, type);
-
- tp = tm_current(&tm);
- exfat_set_entry_time((struct dentry_t *)ep, tp, TM_CREATE);
- exfat_set_entry_time((struct dentry_t *)ep, tp, TM_MODIFY);
- exfat_set_entry_time((struct dentry_t *)ep, tp, TM_ACCESS);
- ep->create_time_ms = 0;
- ep->modify_time_ms = 0;
- ep->access_time_ms = 0;
-}
-
-void init_strm_entry(struct strm_dentry_t *ep, u8 flags, u32 start_clu, u64 size)
-{
- exfat_set_entry_type((struct dentry_t *)ep, TYPE_STREAM);
- ep->flags = flags;
- SET32_A(ep->start_clu, start_clu);
- SET64_A(ep->valid_size, size);
- SET64_A(ep->size, size);
-}
-
-void init_name_entry(struct name_dentry_t *ep, u16 *uniname)
-{
- int i;
-
- exfat_set_entry_type((struct dentry_t *)ep, TYPE_EXTEND);
- ep->flags = 0x0;
-
- for (i = 0; i < 30; i++, i++) {
- SET16_A(ep->unicode_0_14 + i, *uniname);
- if (*uniname == 0x0)
- break;
- uniname++;
- }
-}
-
-void fat_delete_dir_entry(struct super_block *sb, struct chain_t *p_dir,
- s32 entry, s32 order, s32 num_entries)
-{
- int i;
- sector_t sector;
- struct dentry_t *ep;
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
-
- for (i = num_entries - 1; i >= order; i--) {
- ep = get_entry_in_dir(sb, p_dir, entry - i, &sector);
- if (!ep)
- return;
-
- p_fs->fs_func->set_entry_type(ep, TYPE_DELETED);
- buf_modify(sb, sector);
- }
+ return 0;
}
-void exfat_delete_dir_entry(struct super_block *sb, struct chain_t *p_dir,
- s32 entry, s32 order, s32 num_entries)
+static void exfat_delete_dir_entry(struct super_block *sb, struct chain_t *p_dir,
+ s32 entry, s32 order, s32 num_entries)
{
int i;
sector_t sector;
@@ -1515,7 +1100,7 @@ void exfat_delete_dir_entry(struct super_block *sb, struct chain_t *p_dir,
return;
p_fs->fs_func->set_entry_type(ep, TYPE_DELETED);
- buf_modify(sb, sector);
+ exfat_buf_modify(sb, sector);
}
}
@@ -1533,7 +1118,7 @@ void update_dir_checksum(struct super_block *sb, struct chain_t *p_dir,
if (!file_ep)
return;
- buf_lock(sb, sector);
+ exfat_buf_lock(sb, sector);
num_entries = (s32)file_ep->num_ext + 1;
chksum = calc_checksum_2byte((void *)file_ep, DENTRY_SIZE, 0,
@@ -1542,7 +1127,7 @@ void update_dir_checksum(struct super_block *sb, struct chain_t *p_dir,
for (i = 1; i < num_entries; i++) {
ep = get_entry_in_dir(sb, p_dir, entry + i, NULL);
if (!ep) {
- buf_unlock(sb, sector);
+ exfat_buf_unlock(sb, sector);
return;
}
@@ -1551,8 +1136,75 @@ void update_dir_checksum(struct super_block *sb, struct chain_t *p_dir,
}
SET16_A(file_ep->checksum, chksum);
- buf_modify(sb, sector);
- buf_unlock(sb, sector);
+ exfat_buf_modify(sb, sector);
+ exfat_buf_unlock(sb, sector);
+}
+
+static s32 __write_partial_entries_in_entry_set(struct super_block *sb,
+ struct entry_set_cache_t *es,
+ sector_t sec, s32 off, u32 count)
+{
+ s32 num_entries, buf_off = (off - es->offset);
+ u32 remaining_byte_in_sector, copy_entries;
+ struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
+ struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
+ u32 clu;
+ u8 *buf, *esbuf = (u8 *)&es->__buf;
+
+ pr_debug("%s entered es %p sec %llu off %d count %d\n",
+ __func__, es, (unsigned long long)sec, off, count);
+ num_entries = count;
+
+ while (num_entries) {
+ /* white per sector base */
+ remaining_byte_in_sector = (1 << p_bd->sector_size_bits) - off;
+ copy_entries = min_t(s32,
+ remaining_byte_in_sector >> DENTRY_SIZE_BITS,
+ num_entries);
+ buf = exfat_buf_getblk(sb, sec);
+ if (!buf)
+ goto err_out;
+ pr_debug("es->buf %p buf_off %u\n", esbuf, buf_off);
+ pr_debug("copying %d entries from %p to sector %llu\n",
+ copy_entries, (esbuf + buf_off),
+ (unsigned long long)sec);
+ memcpy(buf + off, esbuf + buf_off,
+ copy_entries << DENTRY_SIZE_BITS);
+ exfat_buf_modify(sb, sec);
+ num_entries -= copy_entries;
+
+ if (num_entries) {
+ /* get next sector */
+ if (IS_LAST_SECTOR_IN_CLUSTER(sec)) {
+ clu = GET_CLUSTER_FROM_SECTOR(sec);
+ if (es->alloc_flag == 0x03) {
+ clu++;
+ } else {
+ if (exfat_fat_read(sb, clu, &clu) == -1)
+ goto err_out;
+ }
+ sec = START_SECTOR(clu);
+ } else {
+ sec++;
+ }
+ off = 0;
+ buf_off += copy_entries << DENTRY_SIZE_BITS;
+ }
+ }
+
+ pr_debug("%s exited successfully\n", __func__);
+ return 0;
+err_out:
+ pr_debug("%s failed\n", __func__);
+ return -EINVAL;
+}
+
+/* write back all entries in entry set */
+static s32 write_whole_entry_set(struct super_block *sb, struct entry_set_cache_t *es)
+{
+ return __write_partial_entries_in_entry_set(sb, es, es->sector,
+ es->offset,
+ es->num_entries);
}
void update_dir_checksum_with_entry_set(struct super_block *sb,
@@ -1562,7 +1214,7 @@ void update_dir_checksum_with_entry_set(struct super_block *sb,
u16 chksum = 0;
s32 chksum_type = CS_DIR_ENTRY, i;
- ep = (struct dentry_t *)&(es->__buf);
+ ep = (struct dentry_t *)&es->__buf;
for (i = 0; i < es->num_entries; i++) {
pr_debug("%s ep %p\n", __func__, ep);
chksum = calc_checksum_2byte((void *)ep, DENTRY_SIZE, chksum,
@@ -1571,7 +1223,7 @@ void update_dir_checksum_with_entry_set(struct super_block *sb,
chksum_type = CS_DEFAULT;
}
- ep = (struct dentry_t *)&(es->__buf);
+ ep = (struct dentry_t *)&es->__buf;
SET16_A(((struct file_dentry_t *)ep)->checksum, chksum);
write_whole_entry_set(sb, es);
}
@@ -1590,18 +1242,18 @@ static s32 _walk_fat_chain(struct super_block *sb, struct chain_t *p_dir,
cur_clu += clu_offset;
} else {
while (clu_offset > 0) {
- if (FAT_read(sb, cur_clu, &cur_clu) == -1)
- return FFS_MEDIAERR;
+ if (exfat_fat_read(sb, cur_clu, &cur_clu) == -1)
+ return -EIO;
clu_offset--;
}
}
if (clu)
*clu = cur_clu;
- return FFS_SUCCESS;
+ return 0;
}
-s32 find_location(struct super_block *sb, struct chain_t *p_dir, s32 entry,
+static s32 find_location(struct super_block *sb, struct chain_t *p_dir, s32 entry,
sector_t *sector, s32 *offset)
{
s32 off, ret;
@@ -1617,7 +1269,7 @@ s32 find_location(struct super_block *sb, struct chain_t *p_dir, s32 entry,
*sector += p_fs->root_start_sector;
} else {
ret = _walk_fat_chain(sb, p_dir, off, &clu);
- if (ret != FFS_SUCCESS)
+ if (ret != 0)
return ret;
/* byte offset in cluster */
@@ -1630,20 +1282,7 @@ s32 find_location(struct super_block *sb, struct chain_t *p_dir, s32 entry,
*sector = off >> p_bd->sector_size_bits;
*sector += START_SECTOR(clu);
}
- return FFS_SUCCESS;
-}
-
-struct dentry_t *get_entry_with_sector(struct super_block *sb, sector_t sector,
- s32 offset)
-{
- u8 *buf;
-
- buf = buf_getblk(sb, sector);
-
- if (!buf)
- return NULL;
-
- return (struct dentry_t *)(buf + offset);
+ return 0;
}
struct dentry_t *get_entry_in_dir(struct super_block *sb, struct chain_t *p_dir,
@@ -1653,10 +1292,10 @@ struct dentry_t *get_entry_in_dir(struct super_block *sb, struct chain_t *p_dir,
sector_t sec;
u8 *buf;
- if (find_location(sb, p_dir, entry, &sec, &off) != FFS_SUCCESS)
+ if (find_location(sb, p_dir, entry, &sec, &off) != 0)
return NULL;
- buf = buf_getblk(sb, sec);
+ buf = exfat_buf_getblk(sb, sec);
if (!buf)
return NULL;
@@ -1703,11 +1342,11 @@ struct entry_set_cache_t *get_entry_set_in_dir(struct super_block *sb,
size_t bufsize;
pr_debug("%s entered p_dir dir %u flags %x size %d\n",
- __func__, p_dir->dir, p_dir->flags, p_dir->size);
+ __func__, p_dir->dir, p_dir->flags, p_dir->size);
byte_offset = entry << DENTRY_SIZE_BITS;
ret = _walk_fat_chain(sb, p_dir, byte_offset, &clu);
- if (ret != FFS_SUCCESS)
+ if (ret != 0)
return NULL;
/* byte offset in cluster */
@@ -1720,15 +1359,14 @@ struct entry_set_cache_t *get_entry_set_in_dir(struct super_block *sb,
sec = byte_offset >> p_bd->sector_size_bits;
sec += START_SECTOR(clu);
- buf = buf_getblk(sb, sec);
+ buf = exfat_buf_getblk(sb, sec);
if (!buf)
goto err_out;
ep = (struct dentry_t *)(buf + off);
entry_type = p_fs->fs_func->get_entry_type(ep);
- if ((entry_type != TYPE_FILE)
- && (entry_type != TYPE_DIR))
+ if ((entry_type != TYPE_FILE) && (entry_type != TYPE_DIR))
goto err_out;
if (type == ES_ALL_ENTRIES)
@@ -1812,14 +1450,14 @@ struct entry_set_cache_t *get_entry_set_in_dir(struct super_block *sb,
if (es->alloc_flag == 0x03) {
clu++;
} else {
- if (FAT_read(sb, clu, &clu) == -1)
+ if (exfat_fat_read(sb, clu, &clu) == -1)
goto err_out;
}
sec = START_SECTOR(clu);
} else {
sec++;
}
- buf = buf_getblk(sb, sec);
+ buf = exfat_buf_getblk(sb, sec);
if (!buf)
goto err_out;
off = 0;
@@ -1832,11 +1470,11 @@ struct entry_set_cache_t *get_entry_set_in_dir(struct super_block *sb,
}
if (file_ep)
- *file_ep = (struct dentry_t *)&(es->__buf);
+ *file_ep = (struct dentry_t *)&es->__buf;
pr_debug("%s exiting es %p sec %llu offset %d flags %d, num_entries %u buf ptr %p\n",
- __func__, es, (unsigned long long)es->sector, es->offset,
- es->alloc_flag, es->num_entries, &es->__buf);
+ __func__, es, (unsigned long long)es->sector, es->offset,
+ es->alloc_flag, es->num_entries, &es->__buf);
return es;
err_out:
pr_debug("%s exited NULL (es %p)\n", __func__, es);
@@ -1850,114 +1488,8 @@ void release_entry_set(struct entry_set_cache_t *es)
kfree(es);
}
-static s32 __write_partial_entries_in_entry_set(struct super_block *sb,
- struct entry_set_cache_t *es,
- sector_t sec, s32 off, u32 count)
-{
- s32 num_entries, buf_off = (off - es->offset);
- u32 remaining_byte_in_sector, copy_entries;
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
- struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
- u32 clu;
- u8 *buf, *esbuf = (u8 *)&(es->__buf);
-
- pr_debug("%s entered es %p sec %llu off %d count %d\n",
- __func__, es, (unsigned long long)sec, off, count);
- num_entries = count;
-
- while (num_entries) {
- /* white per sector base */
- remaining_byte_in_sector = (1 << p_bd->sector_size_bits) - off;
- copy_entries = min_t(s32,
- remaining_byte_in_sector >> DENTRY_SIZE_BITS,
- num_entries);
- buf = buf_getblk(sb, sec);
- if (!buf)
- goto err_out;
- pr_debug("es->buf %p buf_off %u\n", esbuf, buf_off);
- pr_debug("copying %d entries from %p to sector %llu\n",
- copy_entries, (esbuf + buf_off),
- (unsigned long long)sec);
- memcpy(buf + off, esbuf + buf_off,
- copy_entries << DENTRY_SIZE_BITS);
- buf_modify(sb, sec);
- num_entries -= copy_entries;
-
- if (num_entries) {
- /* get next sector */
- if (IS_LAST_SECTOR_IN_CLUSTER(sec)) {
- clu = GET_CLUSTER_FROM_SECTOR(sec);
- if (es->alloc_flag == 0x03) {
- clu++;
- } else {
- if (FAT_read(sb, clu, &clu) == -1)
- goto err_out;
- }
- sec = START_SECTOR(clu);
- } else {
- sec++;
- }
- off = 0;
- buf_off += copy_entries << DENTRY_SIZE_BITS;
- }
- }
-
- pr_debug("%s exited successfully\n", __func__);
- return FFS_SUCCESS;
-err_out:
- pr_debug("%s failed\n", __func__);
- return FFS_ERROR;
-}
-
-/* write back all entries in entry set */
-s32 write_whole_entry_set(struct super_block *sb, struct entry_set_cache_t *es)
-{
- return __write_partial_entries_in_entry_set(sb, es, es->sector,
- es->offset,
- es->num_entries);
-}
-
-/* write back some entries in entry set */
-s32 write_partial_entries_in_entry_set(struct super_block *sb,
- struct entry_set_cache_t *es, struct dentry_t *ep, u32 count)
-{
- s32 ret, byte_offset, off;
- u32 clu = 0;
- sector_t sec;
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
- struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
- struct chain_t dir;
-
- /* vaidity check */
- if (ep + count > ((struct dentry_t *)&(es->__buf)) + es->num_entries)
- return FFS_ERROR;
-
- dir.dir = GET_CLUSTER_FROM_SECTOR(es->sector);
- dir.flags = es->alloc_flag;
- dir.size = 0xffffffff; /* XXX */
-
- byte_offset = (es->sector - START_SECTOR(dir.dir)) <<
- p_bd->sector_size_bits;
- byte_offset += ((void **)ep - &(es->__buf)) + es->offset;
-
- ret = _walk_fat_chain(sb, &dir, byte_offset, &clu);
- if (ret != FFS_SUCCESS)
- return ret;
-
- /* byte offset in cluster */
- byte_offset &= p_fs->cluster_size - 1;
-
- /* byte offset in sector */
- off = byte_offset & p_bd->sector_size_mask;
-
- /* sector offset in cluster */
- sec = byte_offset >> p_bd->sector_size_bits;
- sec += START_SECTOR(clu);
- return __write_partial_entries_in_entry_set(sb, es, sec, off, count);
-}
-
/* search EMPTY CONTINUOUS "num_entries" entries */
-s32 search_deleted_or_unused_entry(struct super_block *sb,
+static s32 search_deleted_or_unused_entry(struct super_block *sb,
struct chain_t *p_dir, s32 num_entries)
{
int i, dentry, num_empty = 0;
@@ -2043,7 +1575,7 @@ s32 search_deleted_or_unused_entry(struct super_block *sb,
else
clu.dir = CLUSTER_32(~0);
} else {
- if (FAT_read(sb, clu.dir, &clu.dir) != 0)
+ if (exfat_fat_read(sb, clu.dir, &clu.dir) != 0)
return -1;
}
}
@@ -2051,7 +1583,7 @@ s32 search_deleted_or_unused_entry(struct super_block *sb,
return -1;
}
-s32 find_empty_entry(struct inode *inode, struct chain_t *p_dir, s32 num_entries)
+static s32 find_empty_entry(struct inode *inode, struct chain_t *p_dir, s32 num_entries)
{
s32 ret, dentry;
u32 last_clu;
@@ -2070,10 +1602,8 @@ s32 find_empty_entry(struct inode *inode, struct chain_t *p_dir, s32 num_entries
if (p_fs->dev_ejected)
break;
- if (p_fs->vol_type == EXFAT) {
- if (p_dir->dir != p_fs->root_dir)
- size = i_size_read(inode);
- }
+ if (p_dir->dir != p_fs->root_dir)
+ size = i_size_read(inode);
last_clu = find_last_cluster(sb, p_dir);
clu.dir = last_clu + 1;
@@ -2083,10 +1613,10 @@ s32 find_empty_entry(struct inode *inode, struct chain_t *p_dir, s32 num_entries
/* (1) allocate a cluster */
ret = p_fs->fs_func->alloc_cluster(sb, 1, &clu);
if (ret < 1)
- return -1;
+ return -EIO;
- if (clear_cluster(sb, clu.dir) != FFS_SUCCESS)
- return -1;
+ if (clear_cluster(sb, clu.dir) != 0)
+ return -EIO;
/* (2) append to the FAT chain */
if (clu.flags != p_dir->flags) {
@@ -2095,8 +1625,8 @@ s32 find_empty_entry(struct inode *inode, struct chain_t *p_dir, s32 num_entries
p_fs->hint_uentry.clu.flags = 0x01;
}
if (clu.flags == 0x01)
- if (FAT_write(sb, last_clu, clu.dir) < 0)
- return -1;
+ if (exfat_fat_write(sb, last_clu, clu.dir) < 0)
+ return -EIO;
if (p_fs->hint_uentry.entry == -1) {
p_fs->hint_uentry.dir = p_dir->dir;
@@ -2110,21 +1640,19 @@ s32 find_empty_entry(struct inode *inode, struct chain_t *p_dir, s32 num_entries
p_dir->size++;
/* (3) update the directory entry */
- if (p_fs->vol_type == EXFAT) {
- if (p_dir->dir != p_fs->root_dir) {
- size += p_fs->cluster_size;
-
- ep = get_entry_in_dir(sb, &fid->dir,
- fid->entry + 1, &sector);
- if (!ep)
- return -1;
- p_fs->fs_func->set_entry_size(ep, size);
- p_fs->fs_func->set_entry_flag(ep, p_dir->flags);
- buf_modify(sb, sector);
-
- update_dir_checksum(sb, &(fid->dir),
- fid->entry);
- }
+ if (p_dir->dir != p_fs->root_dir) {
+ size += p_fs->cluster_size;
+
+ ep = get_entry_in_dir(sb, &fid->dir,
+ fid->entry + 1, &sector);
+ if (!ep)
+ return -ENOENT;
+ p_fs->fs_func->set_entry_size(ep, size);
+ p_fs->fs_func->set_entry_flag(ep, p_dir->flags);
+ exfat_buf_modify(sb, sector);
+
+ update_dir_checksum(sb, &fid->dir,
+ fid->entry);
}
i_size_write(inode, i_size_read(inode) + p_fs->cluster_size);
@@ -2137,102 +1665,21 @@ s32 find_empty_entry(struct inode *inode, struct chain_t *p_dir, s32 num_entries
return dentry;
}
-/* return values of fat_find_dir_entry()
- * >= 0 : return dir entiry position with the name in dir
- * -1 : (root dir, ".") it is the root dir itself
- * -2 : entry with the name does not exist
- */
-s32 fat_find_dir_entry(struct super_block *sb, struct chain_t *p_dir,
- struct uni_name_t *p_uniname, s32 num_entries,
- struct dos_name_t *p_dosname, u32 type)
+static s32 extract_uni_name_from_name_entry(struct name_dentry_t *ep, u16 *uniname,
+ s32 order)
{
- int i, dentry = 0, len;
- s32 order = 0;
- bool is_feasible_entry = true, has_ext_entry = false;
- s32 dentries_per_clu;
- u32 entry_type;
- u16 entry_uniname[14], *uniname = NULL, unichar;
- struct chain_t clu;
- struct dentry_t *ep;
- struct dos_dentry_t *dos_ep;
- struct ext_dentry_t *ext_ep;
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
-
- if (p_dir->dir == p_fs->root_dir) {
- if ((!nls_uniname_cmp(sb, p_uniname->name,
- (u16 *)UNI_CUR_DIR_NAME)) ||
- (!nls_uniname_cmp(sb, p_uniname->name,
- (u16 *)UNI_PAR_DIR_NAME)))
- return -1; // special case, root directory itself
- }
-
- if (p_dir->dir == CLUSTER_32(0)) /* FAT16 root_dir */
- dentries_per_clu = p_fs->dentries_in_root;
- else
- dentries_per_clu = p_fs->dentries_per_clu;
-
- clu.dir = p_dir->dir;
- clu.flags = p_dir->flags;
-
- while (clu.dir != CLUSTER_32(~0)) {
- if (p_fs->dev_ejected)
- break;
-
- for (i = 0; i < dentries_per_clu; i++, dentry++) {
- ep = get_entry_in_dir(sb, &clu, i, NULL);
- if (!ep)
- return -2;
-
- entry_type = p_fs->fs_func->get_entry_type(ep);
-
- if ((entry_type == TYPE_FILE) || (entry_type == TYPE_DIR)) {
- if ((type == TYPE_ALL) || (type == entry_type)) {
- if (is_feasible_entry && has_ext_entry)
- return dentry;
-
- dos_ep = (struct dos_dentry_t *)ep;
- if (!nls_dosname_cmp(sb, p_dosname->name, dos_ep->name))
- return dentry;
- }
- is_feasible_entry = true;
- has_ext_entry = false;
- } else if (entry_type == TYPE_EXTEND) {
- if (is_feasible_entry) {
- ext_ep = (struct ext_dentry_t *)ep;
- if (ext_ep->order > 0x40) {
- order = (s32)(ext_ep->order - 0x40);
- uniname = p_uniname->name + 13 * (order - 1);
- } else {
- order = (s32)ext_ep->order;
- uniname -= 13;
- }
-
- len = extract_uni_name_from_ext_entry(ext_ep, entry_uniname, order);
-
- unichar = *(uniname + len);
- *(uniname + len) = 0x0;
-
- if (nls_uniname_cmp(sb, uniname, entry_uniname))
- is_feasible_entry = false;
-
- *(uniname + len) = unichar;
- }
- has_ext_entry = true;
- } else if (entry_type == TYPE_UNUSED) {
- return -2;
- }
- is_feasible_entry = true;
- has_ext_entry = false;
- }
-
- if (p_dir->dir == CLUSTER_32(0))
- break; /* FAT16 root_dir */
+ int i, len = 0;
- if (FAT_read(sb, clu.dir, &clu.dir) != 0)
- return -2;
+ for (i = 0; i < 30; i += 2) {
+ *uniname = GET16_A(ep->unicode_0_14 + i);
+ if (*uniname == 0x0)
+ return len;
+ uniname++;
+ len++;
}
- return -2;
+ *uniname = 0x0;
+ return len;
}
/* return values of exfat_find_dir_entry()
@@ -2240,7 +1687,7 @@ s32 fat_find_dir_entry(struct super_block *sb, struct chain_t *p_dir,
* -1 : (root dir, ".") it is the root dir itself
* -2 : entry with the name does not exist
*/
-s32 exfat_find_dir_entry(struct super_block *sb, struct chain_t *p_dir,
+static s32 exfat_find_dir_entry(struct super_block *sb, struct chain_t *p_dir,
struct uni_name_t *p_uniname, s32 num_entries,
struct dos_name_t *p_dosname, u32 type)
{
@@ -2375,7 +1822,7 @@ s32 exfat_find_dir_entry(struct super_block *sb, struct chain_t *p_dir,
else
clu.dir = CLUSTER_32(~0);
} else {
- if (FAT_read(sb, clu.dir, &clu.dir) != 0)
+ if (exfat_fat_read(sb, clu.dir, &clu.dir) != 0)
return -2;
}
}
@@ -2383,37 +1830,7 @@ s32 exfat_find_dir_entry(struct super_block *sb, struct chain_t *p_dir,
return -2;
}
-s32 fat_count_ext_entries(struct super_block *sb, struct chain_t *p_dir,
- s32 entry, struct dentry_t *p_entry)
-{
- s32 count = 0;
- u8 chksum;
- struct dos_dentry_t *dos_ep = (struct dos_dentry_t *)p_entry;
- struct ext_dentry_t *ext_ep;
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
-
- chksum = calc_checksum_1byte((void *)dos_ep->name, DOS_NAME_LENGTH, 0);
-
- for (entry--; entry >= 0; entry--) {
- ext_ep = (struct ext_dentry_t *)get_entry_in_dir(sb, p_dir,
- entry, NULL);
- if (!ext_ep)
- return -1;
-
- if ((p_fs->fs_func->get_entry_type((struct dentry_t *)ext_ep) ==
- TYPE_EXTEND) && (ext_ep->checksum == chksum)) {
- count++;
- if (ext_ep->order > 0x40)
- return count;
- } else {
- return count;
- }
- }
-
- return count;
-}
-
-s32 exfat_count_ext_entries(struct super_block *sb, struct chain_t *p_dir,
+static s32 exfat_count_ext_entries(struct super_block *sb, struct chain_t *p_dir,
s32 entry, struct dentry_t *p_entry)
{
int i, count = 0;
@@ -2463,7 +1880,7 @@ s32 count_dos_name_entries(struct super_block *sb, struct chain_t *p_dir,
for (i = 0; i < dentries_per_clu; i++) {
ep = get_entry_in_dir(sb, &clu, i, NULL);
if (!ep)
- return -1;
+ return -ENOENT;
entry_type = p_fs->fs_func->get_entry_type(ep);
@@ -2486,8 +1903,8 @@ s32 count_dos_name_entries(struct super_block *sb, struct chain_t *p_dir,
else
clu.dir = CLUSTER_32(~0);
} else {
- if (FAT_read(sb, clu.dir, &clu.dir) != 0)
- return -1;
+ if (exfat_fat_read(sb, clu.dir, &clu.dir) != 0)
+ return -EIO;
}
}
@@ -2546,7 +1963,7 @@ bool is_dir_empty(struct super_block *sb, struct chain_t *p_dir)
else
clu.dir = CLUSTER_32(~0);
}
- if (FAT_read(sb, clu.dir, &clu.dir) != 0)
+ if (exfat_fat_read(sb, clu.dir, &clu.dir) != 0)
break;
}
@@ -2564,84 +1981,19 @@ s32 get_num_entries_and_dos_name(struct super_block *sb, struct chain_t *p_dir,
struct uni_name_t *p_uniname, s32 *entries,
struct dos_name_t *p_dosname)
{
- s32 ret, num_entries;
- bool lossy = false;
- char **r;
+ s32 num_entries;
struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
num_entries = p_fs->fs_func->calc_num_entries(p_uniname);
if (num_entries == 0)
- return FFS_INVALIDPATH;
-
- if (p_fs->vol_type != EXFAT) {
- nls_uniname_to_dosname(sb, p_dosname, p_uniname, &lossy);
-
- if (lossy) {
- ret = fat_generate_dos_name(sb, p_dir, p_dosname);
- if (ret)
- return ret;
- } else {
- for (r = reserved_names; *r; r++) {
- if (!strncmp((void *)p_dosname->name, *r, 8))
- return FFS_INVALIDPATH;
- }
-
- if (p_dosname->name_case != 0xFF)
- num_entries = 1;
- }
-
- if (num_entries > 1)
- p_dosname->name_case = 0x0;
- }
+ return -EINVAL;
*entries = num_entries;
- return FFS_SUCCESS;
-}
-
-void get_uni_name_from_dos_entry(struct super_block *sb,
- struct dos_dentry_t *ep,
- struct uni_name_t *p_uniname, u8 mode)
-{
- struct dos_name_t dos_name;
-
- if (mode == 0x0)
- dos_name.name_case = 0x0;
- else
- dos_name.name_case = ep->lcase;
-
- memcpy(dos_name.name, ep->name, DOS_NAME_LENGTH);
- nls_dosname_to_uniname(sb, p_uniname, &dos_name);
-}
-
-void fat_get_uni_name_from_ext_entry(struct super_block *sb,
- struct chain_t *p_dir, s32 entry,
- u16 *uniname)
-{
- int i;
- struct ext_dentry_t *ep;
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
-
- for (entry--, i = 1; entry >= 0; entry--, i++) {
- ep = (struct ext_dentry_t *)get_entry_in_dir(sb, p_dir, entry,
- NULL);
- if (!ep)
- return;
-
- if (p_fs->fs_func->get_entry_type((struct dentry_t *)ep) ==
- TYPE_EXTEND) {
- extract_uni_name_from_ext_entry(ep, uniname, i);
- if (ep->order > 0x40)
- return;
- } else {
- return;
- }
-
- uniname += 13;
- }
+ return 0;
}
-void exfat_get_uni_name_from_ext_entry(struct super_block *sb,
+static void exfat_get_uni_name_from_ext_entry(struct super_block *sb,
struct chain_t *p_dir, s32 entry,
u16 *uniname)
{
@@ -2678,203 +2030,7 @@ out:
release_entry_set(es);
}
-s32 extract_uni_name_from_ext_entry(struct ext_dentry_t *ep, u16 *uniname,
- s32 order)
-{
- int i, len = 0;
-
- for (i = 0; i < 10; i += 2) {
- *uniname = GET16(ep->unicode_0_4 + i);
- if (*uniname == 0x0)
- return len;
- uniname++;
- len++;
- }
-
- if (order < 20) {
- for (i = 0; i < 12; i += 2) {
- *uniname = GET16_A(ep->unicode_5_10 + i);
- if (*uniname == 0x0)
- return len;
- uniname++;
- len++;
- }
- } else {
- for (i = 0; i < 8; i += 2) {
- *uniname = GET16_A(ep->unicode_5_10 + i);
- if (*uniname == 0x0)
- return len;
- uniname++;
- len++;
- }
- *uniname = 0x0; /* uniname[MAX_NAME_LENGTH-1] */
- return len;
- }
-
- for (i = 0; i < 4; i += 2) {
- *uniname = GET16_A(ep->unicode_11_12 + i);
- if (*uniname == 0x0)
- return len;
- uniname++;
- len++;
- }
-
- *uniname = 0x0;
- return len;
-}
-
-s32 extract_uni_name_from_name_entry(struct name_dentry_t *ep, u16 *uniname,
- s32 order)
-{
- int i, len = 0;
-
- for (i = 0; i < 30; i += 2) {
- *uniname = GET16_A(ep->unicode_0_14 + i);
- if (*uniname == 0x0)
- return len;
- uniname++;
- len++;
- }
-
- *uniname = 0x0;
- return len;
-}
-
-s32 fat_generate_dos_name(struct super_block *sb, struct chain_t *p_dir,
- struct dos_name_t *p_dosname)
-{
- int i, j, count = 0;
- bool count_begin = false;
- s32 dentries_per_clu;
- u32 type;
- u8 bmap[128/* 1 ~ 1023 */];
- struct chain_t clu;
- struct dos_dentry_t *ep;
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
-
- memset(bmap, 0, sizeof(bmap));
- exfat_bitmap_set(bmap, 0);
-
- if (p_dir->dir == CLUSTER_32(0)) /* FAT16 root_dir */
- dentries_per_clu = p_fs->dentries_in_root;
- else
- dentries_per_clu = p_fs->dentries_per_clu;
-
- clu.dir = p_dir->dir;
- clu.flags = p_dir->flags;
-
- while (clu.dir != CLUSTER_32(~0)) {
- if (p_fs->dev_ejected)
- break;
-
- for (i = 0; i < dentries_per_clu; i++) {
- ep = (struct dos_dentry_t *)get_entry_in_dir(sb, &clu,
- i, NULL);
- if (!ep)
- return FFS_MEDIAERR;
-
- type = p_fs->fs_func->get_entry_type((struct dentry_t *)
- ep);
-
- if (type == TYPE_UNUSED)
- break;
- if ((type != TYPE_FILE) && (type != TYPE_DIR))
- continue;
-
- count = 0;
- count_begin = false;
-
- for (j = 0; j < 8; j++) {
- if (ep->name[j] == ' ')
- break;
-
- if (ep->name[j] == '~') {
- count_begin = true;
- } else if (count_begin) {
- if ((ep->name[j] >= '0') &&
- (ep->name[j] <= '9')) {
- count = count * 10 +
- (ep->name[j] - '0');
- } else {
- count = 0;
- count_begin = false;
- }
- }
- }
-
- if ((count > 0) && (count < 1024))
- exfat_bitmap_set(bmap, count);
- }
-
- if (p_dir->dir == CLUSTER_32(0))
- break; /* FAT16 root_dir */
-
- if (FAT_read(sb, clu.dir, &clu.dir) != 0)
- return FFS_MEDIAERR;
- }
-
- count = 0;
- for (i = 0; i < 128; i++) {
- if (bmap[i] != 0xFF) {
- for (j = 0; j < 8; j++) {
- if (exfat_bitmap_test(&bmap[i], j) == 0) {
- count = (i << 3) + j;
- break;
- }
- }
- if (count != 0)
- break;
- }
- }
-
- if ((count == 0) || (count >= 1024))
- return FFS_FILEEXIST;
- fat_attach_count_to_dos_name(p_dosname->name, count);
-
- /* Now dos_name has DOS~????.EXT */
- return FFS_SUCCESS;
-}
-
-void fat_attach_count_to_dos_name(u8 *dosname, s32 count)
-{
- int i, j, length;
- char str_count[6];
-
- snprintf(str_count, sizeof(str_count), "~%d", count);
- length = strlen(str_count);
-
- i = 0;
- j = 0;
- while (j <= (8 - length)) {
- i = j;
- if (dosname[j] == ' ')
- break;
- if (dosname[j] & 0x80)
- j += 2;
- else
- j++;
- }
-
- for (j = 0; j < length; i++, j++)
- dosname[i] = (u8)str_count[j];
-
- if (i == 7)
- dosname[7] = ' ';
-}
-
-s32 fat_calc_num_entries(struct uni_name_t *p_uniname)
-{
- s32 len;
-
- len = p_uniname->name_len;
- if (len == 0)
- return 0;
-
- /* 1 dos name entry + extended entries */
- return (len - 1) / 13 + 2;
-}
-
-s32 exfat_calc_num_entries(struct uni_name_t *p_uniname)
+static s32 exfat_calc_num_entries(struct uni_name_t *p_uniname)
{
s32 len;
@@ -2886,17 +2042,6 @@ s32 exfat_calc_num_entries(struct uni_name_t *p_uniname)
return (len - 1) / 15 + 3;
}
-u8 calc_checksum_1byte(void *data, s32 len, u8 chksum)
-{
- int i;
- u8 *c = (u8 *)data;
-
- for (i = 0; i < len; i++, c++)
- chksum = (((chksum & 1) << 7) | ((chksum & 0xFE) >> 1)) + *c;
-
- return chksum;
-}
-
u16 calc_checksum_2byte(void *data, s32 len, u16 chksum, s32 type)
{
int i;
@@ -2921,30 +2066,6 @@ u16 calc_checksum_2byte(void *data, s32 len, u16 chksum, s32 type)
return chksum;
}
-u32 calc_checksum_4byte(void *data, s32 len, u32 chksum, s32 type)
-{
- int i;
- u8 *c = (u8 *)data;
-
- switch (type) {
- case CS_PBR_SECTOR:
- for (i = 0; i < len; i++, c++) {
- if ((i == 106) || (i == 107) || (i == 112))
- continue;
- chksum = (((chksum & 1) << 31) |
- ((chksum & 0xFFFFFFFE) >> 1)) + (u32)*c;
- }
- break;
- default
- :
- for (i = 0; i < len; i++, c++)
- chksum = (((chksum & 1) << 31) |
- ((chksum & 0xFFFFFFFE) >> 1)) + (u32)*c;
- }
-
- return chksum;
-}
-
/*
* Name Resolution Functions
*/
@@ -2962,11 +2083,11 @@ s32 resolve_path(struct inode *inode, char *path, struct chain_t *p_dir,
struct file_id_t *fid = &(EXFAT_I(inode)->fid);
if (strscpy(name_buf, path, sizeof(name_buf)) < 0)
- return FFS_INVALIDPATH;
+ return -EINVAL;
nls_cstring_to_uniname(sb, p_uniname, name_buf, &lossy);
if (lossy)
- return FFS_INVALIDPATH;
+ return -EINVAL;
fid->size = i_size_read(inode);
@@ -2974,154 +2095,12 @@ s32 resolve_path(struct inode *inode, char *path, struct chain_t *p_dir,
p_dir->size = (s32)(fid->size >> p_fs->cluster_size_bits);
p_dir->flags = fid->flags;
- return FFS_SUCCESS;
+ return 0;
}
/*
* File Operation Functions
*/
-static struct fs_func fat_fs_func = {
- .alloc_cluster = fat_alloc_cluster,
- .free_cluster = fat_free_cluster,
- .count_used_clusters = fat_count_used_clusters,
-
- .init_dir_entry = fat_init_dir_entry,
- .init_ext_entry = fat_init_ext_entry,
- .find_dir_entry = fat_find_dir_entry,
- .delete_dir_entry = fat_delete_dir_entry,
- .get_uni_name_from_ext_entry = fat_get_uni_name_from_ext_entry,
- .count_ext_entries = fat_count_ext_entries,
- .calc_num_entries = fat_calc_num_entries,
-
- .get_entry_type = fat_get_entry_type,
- .set_entry_type = fat_set_entry_type,
- .get_entry_attr = fat_get_entry_attr,
- .set_entry_attr = fat_set_entry_attr,
- .get_entry_flag = fat_get_entry_flag,
- .set_entry_flag = fat_set_entry_flag,
- .get_entry_clu0 = fat_get_entry_clu0,
- .set_entry_clu0 = fat_set_entry_clu0,
- .get_entry_size = fat_get_entry_size,
- .set_entry_size = fat_set_entry_size,
- .get_entry_time = fat_get_entry_time,
- .set_entry_time = fat_set_entry_time,
-};
-
-s32 fat16_mount(struct super_block *sb, struct pbr_sector_t *p_pbr)
-{
- s32 num_reserved, num_root_sectors;
- struct bpb16_t *p_bpb = (struct bpb16_t *)p_pbr->bpb;
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
- struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
-
- if (p_bpb->num_fats == 0)
- return FFS_FORMATERR;
-
- num_root_sectors = GET16(p_bpb->num_root_entries) << DENTRY_SIZE_BITS;
- num_root_sectors = ((num_root_sectors - 1) >>
- p_bd->sector_size_bits) + 1;
-
- p_fs->sectors_per_clu = p_bpb->sectors_per_clu;
- p_fs->sectors_per_clu_bits = ilog2(p_bpb->sectors_per_clu);
- p_fs->cluster_size_bits = p_fs->sectors_per_clu_bits +
- p_bd->sector_size_bits;
- p_fs->cluster_size = 1 << p_fs->cluster_size_bits;
-
- p_fs->num_FAT_sectors = GET16(p_bpb->num_fat_sectors);
-
- p_fs->FAT1_start_sector = p_fs->PBR_sector + GET16(p_bpb->num_reserved);
- if (p_bpb->num_fats == 1)
- p_fs->FAT2_start_sector = p_fs->FAT1_start_sector;
- else
- p_fs->FAT2_start_sector = p_fs->FAT1_start_sector +
- p_fs->num_FAT_sectors;
-
- p_fs->root_start_sector = p_fs->FAT2_start_sector +
- p_fs->num_FAT_sectors;
- p_fs->data_start_sector = p_fs->root_start_sector + num_root_sectors;
-
- p_fs->num_sectors = GET16(p_bpb->num_sectors);
- if (p_fs->num_sectors == 0)
- p_fs->num_sectors = GET32(p_bpb->num_huge_sectors);
-
- num_reserved = p_fs->data_start_sector - p_fs->PBR_sector;
- p_fs->num_clusters = ((p_fs->num_sectors - num_reserved) >>
- p_fs->sectors_per_clu_bits) + 2;
- /* because the cluster index starts with 2 */
-
- if (p_fs->num_clusters < FAT12_THRESHOLD)
- p_fs->vol_type = FAT12;
- else
- p_fs->vol_type = FAT16;
- p_fs->vol_id = GET32(p_bpb->vol_serial);
-
- p_fs->root_dir = 0;
- p_fs->dentries_in_root = GET16(p_bpb->num_root_entries);
- p_fs->dentries_per_clu = 1 << (p_fs->cluster_size_bits -
- DENTRY_SIZE_BITS);
-
- p_fs->vol_flag = VOL_CLEAN;
- p_fs->clu_srch_ptr = 2;
- p_fs->used_clusters = UINT_MAX;
-
- p_fs->fs_func = &fat_fs_func;
-
- return FFS_SUCCESS;
-}
-
-s32 fat32_mount(struct super_block *sb, struct pbr_sector_t *p_pbr)
-{
- s32 num_reserved;
- struct bpb32_t *p_bpb = (struct bpb32_t *)p_pbr->bpb;
- struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
- struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
-
- if (p_bpb->num_fats == 0)
- return FFS_FORMATERR;
-
- p_fs->sectors_per_clu = p_bpb->sectors_per_clu;
- p_fs->sectors_per_clu_bits = ilog2(p_bpb->sectors_per_clu);
- p_fs->cluster_size_bits = p_fs->sectors_per_clu_bits +
- p_bd->sector_size_bits;
- p_fs->cluster_size = 1 << p_fs->cluster_size_bits;
-
- p_fs->num_FAT_sectors = GET32(p_bpb->num_fat32_sectors);
-
- p_fs->FAT1_start_sector = p_fs->PBR_sector + GET16(p_bpb->num_reserved);
- if (p_bpb->num_fats == 1)
- p_fs->FAT2_start_sector = p_fs->FAT1_start_sector;
- else
- p_fs->FAT2_start_sector = p_fs->FAT1_start_sector +
- p_fs->num_FAT_sectors;
-
- p_fs->root_start_sector = p_fs->FAT2_start_sector +
- p_fs->num_FAT_sectors;
- p_fs->data_start_sector = p_fs->root_start_sector;
-
- p_fs->num_sectors = GET32(p_bpb->num_huge_sectors);
- num_reserved = p_fs->data_start_sector - p_fs->PBR_sector;
-
- p_fs->num_clusters = ((p_fs->num_sectors - num_reserved) >>
- p_fs->sectors_per_clu_bits) + 2;
- /* because the cluster index starts with 2 */
-
- p_fs->vol_type = FAT32;
- p_fs->vol_id = GET32(p_bpb->vol_serial);
-
- p_fs->root_dir = GET32(p_bpb->root_cluster);
- p_fs->dentries_in_root = 0;
- p_fs->dentries_per_clu = 1 << (p_fs->cluster_size_bits -
- DENTRY_SIZE_BITS);
-
- p_fs->vol_flag = VOL_CLEAN;
- p_fs->clu_srch_ptr = 2;
- p_fs->used_clusters = UINT_MAX;
-
- p_fs->fs_func = &fat_fs_func;
-
- return FFS_SUCCESS;
-}
-
static struct fs_func exfat_fs_func = {
.alloc_cluster = exfat_alloc_cluster,
.free_cluster = exfat_free_cluster,
@@ -3156,7 +2135,7 @@ s32 exfat_mount(struct super_block *sb, struct pbr_sector_t *p_pbr)
struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
if (p_bpb->num_fats == 0)
- return FFS_FORMATERR;
+ return -EFSCORRUPTED;
p_fs->sectors_per_clu = 1 << p_bpb->sectors_per_clu_bits;
p_fs->sectors_per_clu_bits = p_bpb->sectors_per_clu_bits;
@@ -3194,7 +2173,7 @@ s32 exfat_mount(struct super_block *sb, struct pbr_sector_t *p_pbr)
p_fs->fs_func = &exfat_fs_func;
- return FFS_SUCCESS;
+ return 0;
}
s32 create_dir(struct inode *inode, struct chain_t *p_dir,
@@ -3203,7 +2182,7 @@ s32 create_dir(struct inode *inode, struct chain_t *p_dir,
s32 ret, dentry, num_entries;
u64 size;
struct chain_t clu;
- struct dos_name_t dos_name, dot_name;
+ struct dos_name_t dos_name;
struct super_block *sb = inode->i_sb;
struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
struct fs_func *fs_func = p_fs->fs_func;
@@ -3216,7 +2195,7 @@ s32 create_dir(struct inode *inode, struct chain_t *p_dir,
/* find_empty_entry must be called before alloc_cluster */
dentry = find_empty_entry(inode, p_dir, num_entries);
if (dentry < 0)
- return FFS_FULL;
+ return -ENOSPC;
clu.dir = CLUSTER_32(~0);
clu.size = 0;
@@ -3225,64 +2204,26 @@ s32 create_dir(struct inode *inode, struct chain_t *p_dir,
/* (1) allocate a cluster */
ret = fs_func->alloc_cluster(sb, 1, &clu);
if (ret < 0)
- return FFS_MEDIAERR;
+ return ret;
else if (ret == 0)
- return FFS_FULL;
+ return -ENOSPC;
ret = clear_cluster(sb, clu.dir);
- if (ret != FFS_SUCCESS)
+ if (ret != 0)
return ret;
- if (p_fs->vol_type == EXFAT) {
- size = p_fs->cluster_size;
- } else {
- size = 0;
-
- /* initialize the . and .. entry
- * Information for . points to itself
- * Information for .. points to parent dir
- */
-
- dot_name.name_case = 0x0;
- memcpy(dot_name.name, DOS_CUR_DIR_NAME, DOS_NAME_LENGTH);
-
- ret = fs_func->init_dir_entry(sb, &clu, 0, TYPE_DIR, clu.dir,
- 0);
- if (ret != FFS_SUCCESS)
- return ret;
-
- ret = fs_func->init_ext_entry(sb, &clu, 0, 1, NULL, &dot_name);
- if (ret != FFS_SUCCESS)
- return ret;
-
- memcpy(dot_name.name, DOS_PAR_DIR_NAME, DOS_NAME_LENGTH);
-
- if (p_dir->dir == p_fs->root_dir)
- ret = fs_func->init_dir_entry(sb, &clu, 1, TYPE_DIR,
- CLUSTER_32(0), 0);
- else
- ret = fs_func->init_dir_entry(sb, &clu, 1, TYPE_DIR,
- p_dir->dir, 0);
-
- if (ret != FFS_SUCCESS)
- return ret;
-
- ret = p_fs->fs_func->init_ext_entry(sb, &clu, 1, 1, NULL,
- &dot_name);
- if (ret != FFS_SUCCESS)
- return ret;
- }
+ size = p_fs->cluster_size;
/* (2) update the directory entry */
/* make sub-dir entry in parent directory */
ret = fs_func->init_dir_entry(sb, p_dir, dentry, TYPE_DIR, clu.dir,
size);
- if (ret != FFS_SUCCESS)
+ if (ret != 0)
return ret;
ret = fs_func->init_ext_entry(sb, p_dir, dentry, num_entries, p_uniname,
&dos_name);
- if (ret != FFS_SUCCESS)
+ if (ret != 0)
return ret;
fid->dir.dir = p_dir->dir;
@@ -3299,7 +2240,7 @@ s32 create_dir(struct inode *inode, struct chain_t *p_dir,
fid->rwoffset = 0;
fid->hint_last_off = -1;
- return FFS_SUCCESS;
+ return 0;
}
s32 create_file(struct inode *inode, struct chain_t *p_dir,
@@ -3319,7 +2260,7 @@ s32 create_file(struct inode *inode, struct chain_t *p_dir,
/* find_empty_entry must be called before alloc_cluster() */
dentry = find_empty_entry(inode, p_dir, num_entries);
if (dentry < 0)
- return FFS_FULL;
+ return -ENOSPC;
/* (1) update the directory entry */
/* fill the dos name directory entry information of the created file.
@@ -3327,12 +2268,12 @@ s32 create_file(struct inode *inode, struct chain_t *p_dir,
*/
ret = fs_func->init_dir_entry(sb, p_dir, dentry, TYPE_FILE | mode,
CLUSTER_32(0), 0);
- if (ret != FFS_SUCCESS)
+ if (ret != 0)
return ret;
ret = fs_func->init_ext_entry(sb, p_dir, dentry, num_entries, p_uniname,
&dos_name);
- if (ret != FFS_SUCCESS)
+ if (ret != 0)
return ret;
fid->dir.dir = p_dir->dir;
@@ -3349,7 +2290,7 @@ s32 create_file(struct inode *inode, struct chain_t *p_dir,
fid->rwoffset = 0;
fid->hint_last_off = -1;
- return FFS_SUCCESS;
+ return 0;
}
void remove_file(struct inode *inode, struct chain_t *p_dir, s32 entry)
@@ -3365,17 +2306,17 @@ void remove_file(struct inode *inode, struct chain_t *p_dir, s32 entry)
if (!ep)
return;
- buf_lock(sb, sector);
+ exfat_buf_lock(sb, sector);
- /* buf_lock() before call count_ext_entries() */
+ /* exfat_buf_lock() before call count_ext_entries() */
num_entries = fs_func->count_ext_entries(sb, p_dir, entry, ep);
if (num_entries < 0) {
- buf_unlock(sb, sector);
+ exfat_buf_unlock(sb, sector);
return;
}
num_entries++;
- buf_unlock(sb, sector);
+ exfat_buf_unlock(sb, sector);
/* (1) update the directory entry */
fs_func->delete_dir_entry(sb, p_dir, entry, 0, num_entries);
@@ -3394,37 +2335,37 @@ s32 rename_file(struct inode *inode, struct chain_t *p_dir, s32 oldentry,
epold = get_entry_in_dir(sb, p_dir, oldentry, &sector_old);
if (!epold)
- return FFS_MEDIAERR;
+ return -ENOENT;
- buf_lock(sb, sector_old);
+ exfat_buf_lock(sb, sector_old);
- /* buf_lock() before call count_ext_entries() */
+ /* exfat_buf_lock() before call count_ext_entries() */
num_old_entries = fs_func->count_ext_entries(sb, p_dir, oldentry,
epold);
if (num_old_entries < 0) {
- buf_unlock(sb, sector_old);
- return FFS_MEDIAERR;
+ exfat_buf_unlock(sb, sector_old);
+ return -ENOENT;
}
num_old_entries++;
ret = get_num_entries_and_dos_name(sb, p_dir, p_uniname,
&num_new_entries, &dos_name);
if (ret) {
- buf_unlock(sb, sector_old);
+ exfat_buf_unlock(sb, sector_old);
return ret;
}
if (num_old_entries < num_new_entries) {
newentry = find_empty_entry(inode, p_dir, num_new_entries);
if (newentry < 0) {
- buf_unlock(sb, sector_old);
- return FFS_FULL;
+ exfat_buf_unlock(sb, sector_old);
+ return -ENOSPC;
}
epnew = get_entry_in_dir(sb, p_dir, newentry, &sector_new);
if (!epnew) {
- buf_unlock(sb, sector_old);
- return FFS_MEDIAERR;
+ exfat_buf_unlock(sb, sector_old);
+ return -ENOENT;
}
memcpy((void *)epnew, (void *)epold, DENTRY_SIZE);
@@ -3434,30 +2375,28 @@ s32 rename_file(struct inode *inode, struct chain_t *p_dir, s32 oldentry,
ATTR_ARCHIVE);
fid->attr |= ATTR_ARCHIVE;
}
- buf_modify(sb, sector_new);
- buf_unlock(sb, sector_old);
-
- if (p_fs->vol_type == EXFAT) {
- epold = get_entry_in_dir(sb, p_dir, oldentry + 1,
- &sector_old);
- buf_lock(sb, sector_old);
- epnew = get_entry_in_dir(sb, p_dir, newentry + 1,
- &sector_new);
-
- if (!epold || !epnew) {
- buf_unlock(sb, sector_old);
- return FFS_MEDIAERR;
- }
+ exfat_buf_modify(sb, sector_new);
+ exfat_buf_unlock(sb, sector_old);
- memcpy((void *)epnew, (void *)epold, DENTRY_SIZE);
- buf_modify(sb, sector_new);
- buf_unlock(sb, sector_old);
+ epold = get_entry_in_dir(sb, p_dir, oldentry + 1,
+ &sector_old);
+ exfat_buf_lock(sb, sector_old);
+ epnew = get_entry_in_dir(sb, p_dir, newentry + 1,
+ &sector_new);
+
+ if (!epold || !epnew) {
+ exfat_buf_unlock(sb, sector_old);
+ return -ENOENT;
}
+ memcpy((void *)epnew, (void *)epold, DENTRY_SIZE);
+ exfat_buf_modify(sb, sector_new);
+ exfat_buf_unlock(sb, sector_old);
+
ret = fs_func->init_ext_entry(sb, p_dir, newentry,
num_new_entries, p_uniname,
&dos_name);
- if (ret != FFS_SUCCESS)
+ if (ret != 0)
return ret;
fs_func->delete_dir_entry(sb, p_dir, oldentry, 0,
@@ -3470,20 +2409,20 @@ s32 rename_file(struct inode *inode, struct chain_t *p_dir, s32 oldentry,
ATTR_ARCHIVE);
fid->attr |= ATTR_ARCHIVE;
}
- buf_modify(sb, sector_old);
- buf_unlock(sb, sector_old);
+ exfat_buf_modify(sb, sector_old);
+ exfat_buf_unlock(sb, sector_old);
ret = fs_func->init_ext_entry(sb, p_dir, oldentry,
num_new_entries, p_uniname,
&dos_name);
- if (ret != FFS_SUCCESS)
+ if (ret != 0)
return ret;
fs_func->delete_dir_entry(sb, p_dir, oldentry, num_new_entries,
num_old_entries);
}
- return FFS_SUCCESS;
+ return 0;
}
s32 move_file(struct inode *inode, struct chain_t *p_olddir, s32 oldentry,
@@ -3492,7 +2431,6 @@ s32 move_file(struct inode *inode, struct chain_t *p_olddir, s32 oldentry,
{
s32 ret, newentry, num_new_entries, num_old_entries;
sector_t sector_mov, sector_new;
- struct chain_t clu;
struct dos_name_t dos_name;
struct dentry_t *epmov, *epnew;
struct super_block *sb = inode->i_sb;
@@ -3501,41 +2439,41 @@ s32 move_file(struct inode *inode, struct chain_t *p_olddir, s32 oldentry,
epmov = get_entry_in_dir(sb, p_olddir, oldentry, &sector_mov);
if (!epmov)
- return FFS_MEDIAERR;
+ return -ENOENT;
/* check if the source and target directory is the same */
if (fs_func->get_entry_type(epmov) == TYPE_DIR &&
fs_func->get_entry_clu0(epmov) == p_newdir->dir)
- return FFS_INVALIDPATH;
+ return -EINVAL;
- buf_lock(sb, sector_mov);
+ exfat_buf_lock(sb, sector_mov);
- /* buf_lock() before call count_ext_entries() */
+ /* exfat_buf_lock() before call count_ext_entries() */
num_old_entries = fs_func->count_ext_entries(sb, p_olddir, oldentry,
epmov);
if (num_old_entries < 0) {
- buf_unlock(sb, sector_mov);
- return FFS_MEDIAERR;
+ exfat_buf_unlock(sb, sector_mov);
+ return -ENOENT;
}
num_old_entries++;
ret = get_num_entries_and_dos_name(sb, p_newdir, p_uniname,
&num_new_entries, &dos_name);
if (ret) {
- buf_unlock(sb, sector_mov);
+ exfat_buf_unlock(sb, sector_mov);
return ret;
}
newentry = find_empty_entry(inode, p_newdir, num_new_entries);
if (newentry < 0) {
- buf_unlock(sb, sector_mov);
- return FFS_FULL;
+ exfat_buf_unlock(sb, sector_mov);
+ return -ENOSPC;
}
epnew = get_entry_in_dir(sb, p_newdir, newentry, &sector_new);
if (!epnew) {
- buf_unlock(sb, sector_mov);
- return FFS_MEDIAERR;
+ exfat_buf_unlock(sb, sector_mov);
+ return -ENOENT;
}
memcpy((void *)epnew, (void *)epmov, DENTRY_SIZE);
@@ -3544,42 +2482,26 @@ s32 move_file(struct inode *inode, struct chain_t *p_olddir, s32 oldentry,
ATTR_ARCHIVE);
fid->attr |= ATTR_ARCHIVE;
}
- buf_modify(sb, sector_new);
- buf_unlock(sb, sector_mov);
-
- if (p_fs->vol_type == EXFAT) {
- epmov = get_entry_in_dir(sb, p_olddir, oldentry + 1,
- &sector_mov);
- buf_lock(sb, sector_mov);
- epnew = get_entry_in_dir(sb, p_newdir, newentry + 1,
- &sector_new);
- if (!epmov || !epnew) {
- buf_unlock(sb, sector_mov);
- return FFS_MEDIAERR;
- }
-
- memcpy((void *)epnew, (void *)epmov, DENTRY_SIZE);
- buf_modify(sb, sector_new);
- buf_unlock(sb, sector_mov);
- } else if (fs_func->get_entry_type(epnew) == TYPE_DIR) {
- /* change ".." pointer to new parent dir */
- clu.dir = fs_func->get_entry_clu0(epnew);
- clu.flags = 0x01;
+ exfat_buf_modify(sb, sector_new);
+ exfat_buf_unlock(sb, sector_mov);
- epnew = get_entry_in_dir(sb, &clu, 1, &sector_new);
- if (!epnew)
- return FFS_MEDIAERR;
-
- if (p_newdir->dir == p_fs->root_dir)
- fs_func->set_entry_clu0(epnew, CLUSTER_32(0));
- else
- fs_func->set_entry_clu0(epnew, p_newdir->dir);
- buf_modify(sb, sector_new);
+ epmov = get_entry_in_dir(sb, p_olddir, oldentry + 1,
+ &sector_mov);
+ exfat_buf_lock(sb, sector_mov);
+ epnew = get_entry_in_dir(sb, p_newdir, newentry + 1,
+ &sector_new);
+ if (!epmov || !epnew) {
+ exfat_buf_unlock(sb, sector_mov);
+ return -ENOENT;
}
+ memcpy((void *)epnew, (void *)epmov, DENTRY_SIZE);
+ exfat_buf_modify(sb, sector_new);
+ exfat_buf_unlock(sb, sector_mov);
+
ret = fs_func->init_ext_entry(sb, p_newdir, newentry, num_new_entries,
p_uniname, &dos_name);
- if (ret != FFS_SUCCESS)
+ if (ret != 0)
return ret;
fs_func->delete_dir_entry(sb, p_olddir, oldentry, 0, num_old_entries);
@@ -3590,7 +2512,7 @@ s32 move_file(struct inode *inode, struct chain_t *p_olddir, s32 oldentry,
fid->entry = newentry;
- return FFS_SUCCESS;
+ return 0;
}
/*
@@ -3600,7 +2522,7 @@ s32 move_file(struct inode *inode, struct chain_t *p_olddir, s32 oldentry,
int sector_read(struct super_block *sb, sector_t sec, struct buffer_head **bh,
bool read)
{
- s32 ret = FFS_MEDIAERR;
+ s32 ret = -EIO;
struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
if ((sec >= (p_fs->PBR_sector + p_fs->num_sectors)) &&
@@ -3612,8 +2534,8 @@ int sector_read(struct super_block *sb, sector_t sec, struct buffer_head **bh,
}
if (!p_fs->dev_ejected) {
- ret = bdev_read(sb, sec, bh, 1, read);
- if (ret != FFS_SUCCESS)
+ ret = exfat_bdev_read(sb, sec, bh, 1, read);
+ if (ret != 0)
p_fs->dev_ejected = 1;
}
@@ -3623,7 +2545,7 @@ int sector_read(struct super_block *sb, sector_t sec, struct buffer_head **bh,
int sector_write(struct super_block *sb, sector_t sec, struct buffer_head *bh,
bool sync)
{
- s32 ret = FFS_MEDIAERR;
+ s32 ret = -EIO;
struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
if (sec >= (p_fs->PBR_sector + p_fs->num_sectors) &&
@@ -3641,8 +2563,8 @@ int sector_write(struct super_block *sb, sector_t sec, struct buffer_head *bh,
}
if (!p_fs->dev_ejected) {
- ret = bdev_write(sb, sec, bh, 1, sync);
- if (ret != FFS_SUCCESS)
+ ret = exfat_bdev_write(sb, sec, bh, 1, sync);
+ if (ret != 0)
p_fs->dev_ejected = 1;
}
@@ -3652,7 +2574,7 @@ int sector_write(struct super_block *sb, sector_t sec, struct buffer_head *bh,
int multi_sector_read(struct super_block *sb, sector_t sec,
struct buffer_head **bh, s32 num_secs, bool read)
{
- s32 ret = FFS_MEDIAERR;
+ s32 ret = -EIO;
struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
if (((sec + num_secs) > (p_fs->PBR_sector + p_fs->num_sectors)) &&
@@ -3664,8 +2586,8 @@ int multi_sector_read(struct super_block *sb, sector_t sec,
}
if (!p_fs->dev_ejected) {
- ret = bdev_read(sb, sec, bh, num_secs, read);
- if (ret != FFS_SUCCESS)
+ ret = exfat_bdev_read(sb, sec, bh, num_secs, read);
+ if (ret != 0)
p_fs->dev_ejected = 1;
}
@@ -3675,7 +2597,7 @@ int multi_sector_read(struct super_block *sb, sector_t sec,
int multi_sector_write(struct super_block *sb, sector_t sec,
struct buffer_head *bh, s32 num_secs, bool sync)
{
- s32 ret = FFS_MEDIAERR;
+ s32 ret = -EIO;
struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
if ((sec + num_secs) > (p_fs->PBR_sector + p_fs->num_sectors) &&
@@ -3692,8 +2614,8 @@ int multi_sector_write(struct super_block *sb, sector_t sec,
}
if (!p_fs->dev_ejected) {
- ret = bdev_write(sb, sec, bh, num_secs, sync);
- if (ret != FFS_SUCCESS)
+ ret = exfat_bdev_write(sb, sec, bh, num_secs, sync);
+ if (ret != 0)
p_fs->dev_ejected = 1;
}
diff --git a/drivers/staging/exfat/exfat_nls.c b/drivers/staging/exfat/exfat_nls.c
index a5c4b68925fb..91e8b0c4dce7 100644
--- a/drivers/staging/exfat/exfat_nls.c
+++ b/drivers/staging/exfat/exfat_nls.c
@@ -7,13 +7,6 @@
#include <linux/nls.h>
#include "exfat.h"
-static u16 bad_dos_chars[] = {
- /* + , ; = [ ] */
- 0x002B, 0x002C, 0x003B, 0x003D, 0x005B, 0x005D,
- 0xFF0B, 0xFF0C, 0xFF1B, 0xFF1D, 0xFF3B, 0xFF3D,
- 0
-};
-
static u16 bad_uni_chars[] = {
/* " * / : < > ? \ | */
0x0022, 0x002A, 0x002F, 0x003A,
@@ -96,11 +89,6 @@ static u16 *nls_wstrchr(u16 *str, u16 wchar)
return NULL;
}
-int nls_dosname_cmp(struct super_block *sb, u8 *a, u8 *b)
-{
- return strncmp(a, b, DOS_NAME_LENGTH);
-}
-
int nls_uniname_cmp(struct super_block *sb, u16 *a, u16 *b)
{
int i;
@@ -114,186 +102,6 @@ int nls_uniname_cmp(struct super_block *sb, u16 *a, u16 *b)
return 0;
}
-void nls_uniname_to_dosname(struct super_block *sb,
- struct dos_name_t *p_dosname,
- struct uni_name_t *p_uniname, bool *p_lossy)
-{
- int i, j, len;
- bool lossy = false;
- u8 buf[MAX_CHARSET_SIZE];
- u8 lower = 0, upper = 0;
- u8 *dosname = p_dosname->name;
- u16 *uniname = p_uniname->name;
- u16 *p, *last_period;
- struct nls_table *nls = EXFAT_SB(sb)->nls_disk;
-
- for (i = 0; i < DOS_NAME_LENGTH; i++)
- *(dosname + i) = ' ';
-
- if (!nls_uniname_cmp(sb, uniname, (u16 *)UNI_CUR_DIR_NAME)) {
- *(dosname) = '.';
- p_dosname->name_case = 0x0;
- if (p_lossy)
- *p_lossy = false;
- return;
- }
-
- if (!nls_uniname_cmp(sb, uniname, (u16 *)UNI_PAR_DIR_NAME)) {
- *(dosname) = '.';
- *(dosname + 1) = '.';
- p_dosname->name_case = 0x0;
- if (p_lossy)
- *p_lossy = false;
- return;
- }
-
- /* search for the last embedded period */
- last_period = NULL;
- for (p = uniname; *p; p++) {
- if (*p == (u16)'.')
- last_period = p;
- }
-
- i = 0;
- while (i < DOS_NAME_LENGTH) {
- if (i == 8) {
- if (!last_period)
- break;
-
- if (uniname <= last_period) {
- if (uniname < last_period)
- lossy = true;
- uniname = last_period + 1;
- }
- }
-
- if (*uniname == (u16)'\0') {
- break;
- } else if (*uniname == (u16)' ') {
- lossy = true;
- } else if (*uniname == (u16)'.') {
- if (uniname < last_period)
- lossy = true;
- else
- i = 8;
- } else if (nls_wstrchr(bad_dos_chars, *uniname)) {
- lossy = true;
- *(dosname + i) = '_';
- i++;
- } else {
- len = convert_uni_to_ch(nls, buf, *uniname, &lossy);
-
- if (len > 1) {
- if ((i >= 8) && ((i + len) > DOS_NAME_LENGTH))
- break;
-
- if ((i < 8) && ((i + len) > 8)) {
- i = 8;
- continue;
- }
-
- lower = 0xFF;
-
- for (j = 0; j < len; j++, i++)
- *(dosname + i) = *(buf + j);
- } else { /* len == 1 */
- if ((*buf >= 'a') && (*buf <= 'z')) {
- *(dosname + i) = *buf - ('a' - 'A');
-
- if (i < 8)
- lower |= 0x08;
- else
- lower |= 0x10;
- } else if ((*buf >= 'A') && (*buf <= 'Z')) {
- *(dosname + i) = *buf;
-
- if (i < 8)
- upper |= 0x08;
- else
- upper |= 0x10;
- } else {
- *(dosname + i) = *buf;
- }
- i++;
- }
- }
-
- uniname++;
- }
-
- if (*dosname == 0xE5)
- *dosname = 0x05;
-
- if (*uniname != 0x0)
- lossy = true;
-
- if (upper & lower)
- p_dosname->name_case = 0xFF;
- else
- p_dosname->name_case = lower;
-
- if (p_lossy)
- *p_lossy = lossy;
-}
-
-void nls_dosname_to_uniname(struct super_block *sb,
- struct uni_name_t *p_uniname,
- struct dos_name_t *p_dosname)
-{
- int i = 0, j, n = 0;
- u8 buf[DOS_NAME_LENGTH + 2];
- u8 *dosname = p_dosname->name;
- u16 *uniname = p_uniname->name;
- struct nls_table *nls = EXFAT_SB(sb)->nls_disk;
-
- if (*dosname == 0x05) {
- *buf = 0xE5;
- i++;
- n++;
- }
-
- for (; i < 8; i++, n++) {
- if (*(dosname + i) == ' ')
- break;
-
- if ((*(dosname + i) >= 'A') && (*(dosname + i) <= 'Z') &&
- (p_dosname->name_case & 0x08))
- *(buf + n) = *(dosname + i) + ('a' - 'A');
- else
- *(buf + n) = *(dosname + i);
- }
- if (*(dosname + 8) != ' ') {
- *(buf + n) = '.';
- n++;
- }
-
- for (i = 8; i < DOS_NAME_LENGTH; i++, n++) {
- if (*(dosname + i) == ' ')
- break;
-
- if ((*(dosname + i) >= 'A') && (*(dosname + i) <= 'Z') &&
- (p_dosname->name_case & 0x10))
- *(buf + n) = *(dosname + i) + ('a' - 'A');
- else
- *(buf + n) = *(dosname + i);
- }
- *(buf + n) = '\0';
-
- i = 0;
- j = 0;
- while (j < (MAX_NAME_LENGTH - 1)) {
- if (*(buf + i) == '\0')
- break;
-
- i += convert_ch_to_uni(nls, uniname, (buf + i), NULL);
-
- uniname++;
- j++;
- }
-
- *uniname = (u16)'\0';
-}
-
void nls_uniname_to_cstring(struct super_block *sb, u8 *p_cstring,
struct uni_name_t *p_uniname)
{
diff --git a/drivers/staging/exfat/exfat_super.c b/drivers/staging/exfat/exfat_super.c
index 3b2b0ceb7297..6e481908c59f 100644
--- a/drivers/staging/exfat/exfat_super.c
+++ b/drivers/staging/exfat/exfat_super.c
@@ -26,7 +26,7 @@
#include <linux/sched.h>
#include <linux/fs_struct.h>
#include <linux/namei.h>
-
+#include <linux/random.h>
#include <linux/string.h>
#include <linux/nls.h>
#include <linux/mutex.h>
@@ -284,12 +284,12 @@ static const struct dentry_operations exfat_dentry_ops = {
.d_compare = exfat_cmp,
};
-static DEFINE_SEMAPHORE(z_sem);
+static DEFINE_MUTEX(z_mutex);
static inline void fs_sync(struct super_block *sb, bool do_sync)
{
if (do_sync)
- bdev_sync(sb);
+ exfat_bdev_sync(sb);
}
/*
@@ -353,26 +353,28 @@ static int ffsMountVol(struct super_block *sb)
pr_info("[EXFAT] trying to mount...\n");
- down(&z_sem);
+ mutex_lock(&z_mutex);
- buf_init(sb);
+ exfat_buf_init(sb);
- sema_init(&p_fs->v_sem, 1);
+ mutex_init(&p_fs->v_mutex);
p_fs->dev_ejected = 0;
/* open the block device */
- bdev_open(sb);
+ exfat_bdev_open(sb);
if (p_bd->sector_size < sb->s_blocksize) {
- ret = FFS_MEDIAERR;
+ printk(KERN_INFO "EXFAT: mount failed - sector size %d less than blocksize %ld\n",
+ p_bd->sector_size, sb->s_blocksize);
+ ret = -EINVAL;
goto out;
}
if (p_bd->sector_size > sb->s_blocksize)
sb_set_blocksize(sb, p_bd->sector_size);
/* read Sector 0 */
- if (sector_read(sb, 0, &tmp_bh, 1) != FFS_SUCCESS) {
- ret = FFS_MEDIAERR;
+ if (sector_read(sb, 0, &tmp_bh, 1) != 0) {
+ ret = -EIO;
goto out;
}
@@ -383,8 +385,8 @@ static int ffsMountVol(struct super_block *sb)
/* check the validity of PBR */
if (GET16_A(p_pbr->signature) != PBR_SIGNATURE) {
brelse(tmp_bh);
- bdev_close(sb);
- ret = FFS_FORMATERR;
+ exfat_bdev_close(sb);
+ ret = -EFSCORRUPTED;
goto out;
}
@@ -394,16 +396,10 @@ static int ffsMountVol(struct super_block *sb)
break;
if (i < 53) {
-#ifdef CONFIG_EXFAT_DONT_MOUNT_VFAT
+ /* Not sure how we'd get here, but complain if it does */
ret = -EINVAL;
- printk(KERN_INFO "EXFAT: Attempted to mount VFAT filesystem\n");
+ pr_info("EXFAT: Attempted to mount VFAT filesystem\n");
goto out;
-#else
- if (GET16(p_pbr->bpb + 11)) /* num_fat_sectors */
- ret = fat16_mount(sb, p_pbr);
- else
- ret = fat32_mount(sb, p_pbr);
-#endif
} else {
ret = exfat_mount(sb, p_pbr);
}
@@ -411,38 +407,34 @@ static int ffsMountVol(struct super_block *sb)
brelse(tmp_bh);
if (ret) {
- bdev_close(sb);
+ exfat_bdev_close(sb);
goto out;
}
- if (p_fs->vol_type == EXFAT) {
- ret = load_alloc_bitmap(sb);
- if (ret) {
- bdev_close(sb);
- goto out;
- }
- ret = load_upcase_table(sb);
- if (ret) {
- free_alloc_bitmap(sb);
- bdev_close(sb);
- goto out;
- }
+ ret = load_alloc_bitmap(sb);
+ if (ret) {
+ exfat_bdev_close(sb);
+ goto out;
+ }
+ ret = load_upcase_table(sb);
+ if (ret) {
+ free_alloc_bitmap(sb);
+ exfat_bdev_close(sb);
+ goto out;
}
if (p_fs->dev_ejected) {
- if (p_fs->vol_type == EXFAT) {
- free_upcase_table(sb);
- free_alloc_bitmap(sb);
- }
- bdev_close(sb);
- ret = FFS_MEDIAERR;
+ free_upcase_table(sb);
+ free_alloc_bitmap(sb);
+ exfat_bdev_close(sb);
+ ret = -EIO;
goto out;
}
pr_info("[EXFAT] mounted successfully\n");
out:
- up(&z_sem);
+ mutex_unlock(&z_mutex);
return ret;
}
@@ -450,39 +442,37 @@ out:
static int ffsUmountVol(struct super_block *sb)
{
struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
- int err = FFS_SUCCESS;
+ int err = 0;
pr_info("[EXFAT] trying to unmount...\n");
- down(&z_sem);
+ mutex_lock(&z_mutex);
/* acquire the lock for file system critical section */
- down(&p_fs->v_sem);
+ mutex_lock(&p_fs->v_mutex);
- fs_sync(sb, false);
+ fs_sync(sb, true);
fs_set_vol_flags(sb, VOL_CLEAN);
- if (p_fs->vol_type == EXFAT) {
- free_upcase_table(sb);
- free_alloc_bitmap(sb);
- }
+ free_upcase_table(sb);
+ free_alloc_bitmap(sb);
- FAT_release_all(sb);
- buf_release_all(sb);
+ exfat_fat_release_all(sb);
+ exfat_buf_release_all(sb);
/* close the block device */
- bdev_close(sb);
+ exfat_bdev_close(sb);
if (p_fs->dev_ejected) {
pr_info("[EXFAT] unmounted with media errors. Device is already ejected.\n");
- err = FFS_MEDIAERR;
+ err = -EIO;
}
- buf_shutdown(sb);
+ exfat_buf_shutdown(sb);
/* release the lock for file system critical section */
- up(&p_fs->v_sem);
- up(&z_sem);
+ mutex_unlock(&p_fs->v_mutex);
+ mutex_unlock(&z_mutex);
pr_info("[EXFAT] unmounted successfully\n");
@@ -491,15 +481,15 @@ static int ffsUmountVol(struct super_block *sb)
static int ffsGetVolInfo(struct super_block *sb, struct vol_info_t *info)
{
- int err = FFS_SUCCESS;
+ int err = 0;
struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
/* check the validity of pointer parameters */
if (!info)
- return FFS_ERROR;
+ return -EINVAL;
/* acquire the lock for file system critical section */
- down(&p_fs->v_sem);
+ mutex_lock(&p_fs->v_mutex);
if (p_fs->used_clusters == UINT_MAX)
p_fs->used_clusters = p_fs->fs_func->count_used_clusters(sb);
@@ -511,31 +501,31 @@ static int ffsGetVolInfo(struct super_block *sb, struct vol_info_t *info)
info->FreeClusters = info->NumClusters - info->UsedClusters;
if (p_fs->dev_ejected)
- err = FFS_MEDIAERR;
+ err = -EIO;
/* release the lock for file system critical section */
- up(&p_fs->v_sem);
+ mutex_unlock(&p_fs->v_mutex);
return err;
}
static int ffsSyncVol(struct super_block *sb, bool do_sync)
{
- int err = FFS_SUCCESS;
+ int err = 0;
struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
/* acquire the lock for file system critical section */
- down(&p_fs->v_sem);
+ mutex_lock(&p_fs->v_mutex);
/* synchronize the file system */
fs_sync(sb, do_sync);
fs_set_vol_flags(sb, VOL_CLEAN);
if (p_fs->dev_ejected)
- err = FFS_MEDIAERR;
+ err = -EIO;
/* release the lock for file system critical section */
- up(&p_fs->v_sem);
+ mutex_unlock(&p_fs->v_mutex);
return err;
}
@@ -559,10 +549,10 @@ static int ffsLookupFile(struct inode *inode, char *path, struct file_id_t *fid)
/* check the validity of pointer parameters */
if (!fid || !path || (*path == '\0'))
- return FFS_ERROR;
+ return -EINVAL;
/* acquire the lock for file system critical section */
- down(&p_fs->v_sem);
+ mutex_lock(&p_fs->v_mutex);
/* check the validity of directory name in the given pathname */
ret = resolve_path(inode, path, &dir, &uni_name);
@@ -578,7 +568,7 @@ static int ffsLookupFile(struct inode *inode, char *path, struct file_id_t *fid)
dentry = p_fs->fs_func->find_dir_entry(sb, &dir, &uni_name, num_entries,
&dos_name, TYPE_ALL);
if (dentry < -1) {
- ret = FFS_NOTFOUND;
+ ret = -ENOENT;
goto out;
}
@@ -597,22 +587,13 @@ static int ffsLookupFile(struct inode *inode, char *path, struct file_id_t *fid)
fid->size = 0;
fid->start_clu = p_fs->root_dir;
} else {
- if (p_fs->vol_type == EXFAT) {
- es = get_entry_set_in_dir(sb, &dir, dentry,
- ES_2_ENTRIES, &ep);
- if (!es) {
- ret = FFS_MEDIAERR;
- goto out;
- }
- ep2 = ep + 1;
- } else {
- ep = get_entry_in_dir(sb, &dir, dentry, NULL);
- if (!ep) {
- ret = FFS_MEDIAERR;
- goto out;
- }
- ep2 = ep;
+ es = get_entry_set_in_dir(sb, &dir, dentry,
+ ES_2_ENTRIES, &ep);
+ if (!es) {
+ ret = -ENOENT;
+ goto out;
}
+ ep2 = ep + 1;
fid->type = p_fs->fs_func->get_entry_type(ep);
fid->rwoffset = 0;
@@ -628,15 +609,14 @@ static int ffsLookupFile(struct inode *inode, char *path, struct file_id_t *fid)
fid->start_clu = p_fs->fs_func->get_entry_clu0(ep2);
}
- if (p_fs->vol_type == EXFAT)
- release_entry_set(es);
+ release_entry_set(es);
}
if (p_fs->dev_ejected)
- ret = FFS_MEDIAERR;
+ ret = -EIO;
out:
/* release the lock for file system critical section */
- up(&p_fs->v_sem);
+ mutex_unlock(&p_fs->v_mutex);
return ret;
}
@@ -648,14 +628,14 @@ static int ffsCreateFile(struct inode *inode, char *path, u8 mode,
struct uni_name_t uni_name;
struct super_block *sb = inode->i_sb;
struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
- int ret;
+ int ret = 0;
/* check the validity of pointer parameters */
if (!fid || !path || (*path == '\0'))
- return FFS_ERROR;
+ return -EINVAL;
/* acquire the lock for file system critical section */
- down(&p_fs->v_sem);
+ mutex_lock(&p_fs->v_mutex);
/* check the validity of directory name in the given pathname */
ret = resolve_path(inode, path, &dir, &uni_name);
@@ -667,17 +647,17 @@ static int ffsCreateFile(struct inode *inode, char *path, u8 mode,
/* create a new file */
ret = create_file(inode, &dir, &uni_name, mode, fid);
-#ifdef CONFIG_EXFAT_DELAYED_SYNC
- fs_sync(sb, false);
+#ifndef CONFIG_EXFAT_DELAYED_SYNC
+ fs_sync(sb, true);
fs_set_vol_flags(sb, VOL_CLEAN);
#endif
if (p_fs->dev_ejected)
- ret = FFS_MEDIAERR;
+ ret = -EIO;
out:
/* release the lock for file system critical section */
- up(&p_fs->v_sem);
+ mutex_unlock(&p_fs->v_mutex);
return ret;
}
@@ -697,18 +677,18 @@ static int ffsReadFile(struct inode *inode, struct file_id_t *fid, void *buffer,
/* check the validity of the given file id */
if (!fid)
- return FFS_INVALIDFID;
+ return -EINVAL;
/* check the validity of pointer parameters */
if (!buffer)
- return FFS_ERROR;
+ return -EINVAL;
/* acquire the lock for file system critical section */
- down(&p_fs->v_sem);
+ mutex_lock(&p_fs->v_mutex);
/* check if the given file ID is opened */
if (fid->type != TYPE_FILE) {
- ret = FFS_PERMISSIONERR;
+ ret = -EPERM;
goto out;
}
@@ -721,7 +701,7 @@ static int ffsReadFile(struct inode *inode, struct file_id_t *fid, void *buffer,
if (count == 0) {
if (rcount)
*rcount = 0;
- ret = FFS_EOF;
+ ret = 0;
goto out;
}
@@ -742,9 +722,11 @@ static int ffsReadFile(struct inode *inode, struct file_id_t *fid, void *buffer,
}
while (clu_offset > 0) {
- /* clu = FAT_read(sb, clu); */
- if (FAT_read(sb, clu, &clu) == -1)
- return FFS_MEDIAERR;
+ /* clu = exfat_fat_read(sb, clu); */
+ if (exfat_fat_read(sb, clu, &clu) == -1) {
+ ret = -EIO;
+ goto out;
+ }
clu_offset--;
}
@@ -772,13 +754,13 @@ static int ffsReadFile(struct inode *inode, struct file_id_t *fid, void *buffer,
if ((offset == 0) && (oneblkread == p_bd->sector_size)) {
if (sector_read(sb, LogSector, &tmp_bh, 1) !=
- FFS_SUCCESS)
+ 0)
goto err_out;
memcpy((char *)buffer + read_bytes,
(char *)tmp_bh->b_data, (s32)oneblkread);
} else {
if (sector_read(sb, LogSector, &tmp_bh, 1) !=
- FFS_SUCCESS)
+ 0)
goto err_out;
memcpy((char *)buffer + read_bytes,
(char *)tmp_bh->b_data + offset,
@@ -797,11 +779,11 @@ err_out:
*rcount = read_bytes;
if (p_fs->dev_ejected)
- ret = FFS_MEDIAERR;
+ ret = -EIO;
out:
/* release the lock for file system critical section */
- up(&p_fs->v_sem);
+ mutex_unlock(&p_fs->v_mutex);
return ret;
}
@@ -814,7 +796,7 @@ static int ffsWriteFile(struct inode *inode, struct file_id_t *fid,
s32 num_clusters, num_alloc, num_alloced = (s32)~0;
int ret = 0;
u32 clu, last_clu;
- sector_t LogSector, sector = 0;
+ sector_t LogSector;
u64 oneblkwrite, write_bytes;
struct chain_t new_clu;
struct timestamp_t tm;
@@ -827,18 +809,18 @@ static int ffsWriteFile(struct inode *inode, struct file_id_t *fid,
/* check the validity of the given file id */
if (!fid)
- return FFS_INVALIDFID;
+ return -EINVAL;
/* check the validity of pointer parameters */
if (!buffer)
- return FFS_ERROR;
+ return -EINVAL;
/* acquire the lock for file system critical section */
- down(&p_fs->v_sem);
+ mutex_lock(&p_fs->v_mutex);
/* check if the given file ID is opened */
if (fid->type != TYPE_FILE) {
- ret = FFS_PERMISSIONERR;
+ ret = -EPERM;
goto out;
}
@@ -848,7 +830,7 @@ static int ffsWriteFile(struct inode *inode, struct file_id_t *fid,
if (count == 0) {
if (wcount)
*wcount = 0;
- ret = FFS_SUCCESS;
+ ret = 0;
goto out;
}
@@ -864,7 +846,8 @@ static int ffsWriteFile(struct inode *inode, struct file_id_t *fid,
while (count > 0) {
clu_offset = (s32)(fid->rwoffset >> p_fs->cluster_size_bits);
- clu = last_clu = fid->start_clu;
+ clu = fid->start_clu;
+ last_clu = fid->start_clu;
if (fid->flags == 0x03) {
if ((clu_offset > 0) && (clu != CLUSTER_32(~0))) {
@@ -885,9 +868,9 @@ static int ffsWriteFile(struct inode *inode, struct file_id_t *fid,
while ((clu_offset > 0) && (clu != CLUSTER_32(~0))) {
last_clu = clu;
- /* clu = FAT_read(sb, clu); */
- if (FAT_read(sb, clu, &clu) == -1) {
- ret = FFS_MEDIAERR;
+ /* clu = exfat_fat_read(sb, clu); */
+ if (exfat_fat_read(sb, clu, &clu) == -1) {
+ ret = -EIO;
goto out;
}
clu_offset--;
@@ -909,7 +892,7 @@ static int ffsWriteFile(struct inode *inode, struct file_id_t *fid,
if (num_alloced == 0)
break;
if (num_alloced < 0) {
- ret = FFS_MEDIAERR;
+ ret = num_alloced;
goto out;
}
@@ -928,7 +911,7 @@ static int ffsWriteFile(struct inode *inode, struct file_id_t *fid,
modified = true;
}
if (new_clu.flags == 0x01)
- FAT_write(sb, last_clu, new_clu.dir);
+ exfat_fat_write(sb, last_clu, new_clu.dir);
}
num_clusters += num_alloced;
@@ -957,12 +940,12 @@ static int ffsWriteFile(struct inode *inode, struct file_id_t *fid,
if ((offset == 0) && (oneblkwrite == p_bd->sector_size)) {
if (sector_read(sb, LogSector, &tmp_bh, 0) !=
- FFS_SUCCESS)
+ 0)
goto err_out;
memcpy((char *)tmp_bh->b_data,
(char *)buffer + write_bytes, (s32)oneblkwrite);
if (sector_write(sb, LogSector, tmp_bh, 0) !=
- FFS_SUCCESS) {
+ 0) {
brelse(tmp_bh);
goto err_out;
}
@@ -970,18 +953,18 @@ static int ffsWriteFile(struct inode *inode, struct file_id_t *fid,
if ((offset > 0) ||
((fid->rwoffset + oneblkwrite) < fid->size)) {
if (sector_read(sb, LogSector, &tmp_bh, 1) !=
- FFS_SUCCESS)
+ 0)
goto err_out;
} else {
if (sector_read(sb, LogSector, &tmp_bh, 0) !=
- FFS_SUCCESS)
+ 0)
goto err_out;
}
memcpy((char *)tmp_bh->b_data + offset,
(char *)buffer + write_bytes, (s32)oneblkwrite);
if (sector_write(sb, LogSector, tmp_bh, 0) !=
- FFS_SUCCESS) {
+ 0) {
brelse(tmp_bh);
goto err_out;
}
@@ -1002,25 +985,15 @@ static int ffsWriteFile(struct inode *inode, struct file_id_t *fid,
brelse(tmp_bh);
/* (3) update the direcoty entry */
- if (p_fs->vol_type == EXFAT) {
- es = get_entry_set_in_dir(sb, &(fid->dir), fid->entry,
- ES_ALL_ENTRIES, &ep);
- if (!es)
- goto err_out;
- ep2 = ep + 1;
- } else {
- ep = get_entry_in_dir(sb, &(fid->dir), fid->entry, &sector);
- if (!ep)
- goto err_out;
- ep2 = ep;
- }
+ es = get_entry_set_in_dir(sb, &fid->dir, fid->entry,
+ ES_ALL_ENTRIES, &ep);
+ if (!es)
+ goto err_out;
+ ep2 = ep + 1;
p_fs->fs_func->set_entry_time(ep, tm_current(&tm), TM_MODIFY);
p_fs->fs_func->set_entry_attr(ep, fid->attr);
- if (p_fs->vol_type != EXFAT)
- buf_modify(sb, sector);
-
if (modified) {
if (p_fs->fs_func->get_entry_flag(ep2) != fid->flags)
p_fs->fs_func->set_entry_flag(ep2, fid->flags);
@@ -1030,18 +1003,13 @@ static int ffsWriteFile(struct inode *inode, struct file_id_t *fid,
if (p_fs->fs_func->get_entry_clu0(ep2) != fid->start_clu)
p_fs->fs_func->set_entry_clu0(ep2, fid->start_clu);
-
- if (p_fs->vol_type != EXFAT)
- buf_modify(sb, sector);
}
- if (p_fs->vol_type == EXFAT) {
- update_dir_checksum_with_entry_set(sb, es);
- release_entry_set(es);
- }
+ update_dir_checksum_with_entry_set(sb, es);
+ release_entry_set(es);
-#ifdef CONFIG_EXFAT_DELAYED_SYNC
- fs_sync(sb, false);
+#ifndef CONFIG_EXFAT_DELAYED_SYNC
+ fs_sync(sb, true);
fs_set_vol_flags(sb, VOL_CLEAN);
#endif
@@ -1051,14 +1019,14 @@ err_out:
*wcount = write_bytes;
if (num_alloced == 0)
- ret = FFS_FULL;
+ ret = -ENOSPC;
else if (p_fs->dev_ejected)
- ret = FFS_MEDIAERR;
+ ret = -EIO;
out:
/* release the lock for file system critical section */
- up(&p_fs->v_sem);
+ mutex_unlock(&p_fs->v_mutex);
return ret;
}
@@ -1068,7 +1036,6 @@ static int ffsTruncateFile(struct inode *inode, u64 old_size, u64 new_size)
s32 num_clusters;
u32 last_clu = CLUSTER_32(0);
int ret = 0;
- sector_t sector = 0;
struct chain_t clu;
struct timestamp_t tm;
struct dentry_t *ep, *ep2;
@@ -1081,11 +1048,11 @@ static int ffsTruncateFile(struct inode *inode, u64 old_size, u64 new_size)
new_size);
/* acquire the lock for file system critical section */
- down(&p_fs->v_sem);
+ mutex_lock(&p_fs->v_mutex);
/* check if the given file ID is opened */
if (fid->type != TYPE_FILE) {
- ret = FFS_PERMISSIONERR;
+ ret = -EPERM;
goto out;
}
@@ -1095,7 +1062,7 @@ static int ffsTruncateFile(struct inode *inode, u64 old_size, u64 new_size)
}
if (old_size <= new_size) {
- ret = FFS_SUCCESS;
+ ret = 0;
goto out;
}
@@ -1114,8 +1081,8 @@ static int ffsTruncateFile(struct inode *inode, u64 old_size, u64 new_size)
} else {
while (num_clusters > 0) {
last_clu = clu.dir;
- if (FAT_read(sb, clu.dir, &clu.dir) == -1) {
- ret = FFS_MEDIAERR;
+ if (exfat_fat_read(sb, clu.dir, &clu.dir) == -1) {
+ ret = -EIO;
goto out;
}
num_clusters--;
@@ -1133,22 +1100,13 @@ static int ffsTruncateFile(struct inode *inode, u64 old_size, u64 new_size)
}
/* (1) update the directory entry */
- if (p_fs->vol_type == EXFAT) {
- es = get_entry_set_in_dir(sb, &fid->dir, fid->entry,
- ES_ALL_ENTRIES, &ep);
- if (!es) {
- ret = FFS_MEDIAERR;
- goto out;
- }
- ep2 = ep + 1;
- } else {
- ep = get_entry_in_dir(sb, &(fid->dir), fid->entry, &sector);
- if (!ep) {
- ret = FFS_MEDIAERR;
- goto out;
+ es = get_entry_set_in_dir(sb, &fid->dir, fid->entry,
+ ES_ALL_ENTRIES, &ep);
+ if (!es) {
+ ret = -ENOENT;
+ goto out;
}
- ep2 = ep;
- }
+ ep2 = ep + 1;
p_fs->fs_func->set_entry_time(ep, tm_current(&tm), TM_MODIFY);
p_fs->fs_func->set_entry_attr(ep, fid->attr);
@@ -1159,17 +1117,13 @@ static int ffsTruncateFile(struct inode *inode, u64 old_size, u64 new_size)
p_fs->fs_func->set_entry_clu0(ep2, CLUSTER_32(0));
}
- if (p_fs->vol_type != EXFAT) {
- buf_modify(sb, sector);
- } else {
- update_dir_checksum_with_entry_set(sb, es);
- release_entry_set(es);
- }
+ update_dir_checksum_with_entry_set(sb, es);
+ release_entry_set(es);
/* (2) cut off from the FAT chain */
if (last_clu != CLUSTER_32(0)) {
if (fid->flags == 0x01)
- FAT_write(sb, last_clu, CLUSTER_32(~0));
+ exfat_fat_write(sb, last_clu, CLUSTER_32(~0));
}
/* (3) free the clusters */
@@ -1180,18 +1134,18 @@ static int ffsTruncateFile(struct inode *inode, u64 old_size, u64 new_size)
if (fid->rwoffset > fid->size)
fid->rwoffset = fid->size;
-#ifdef CONFIG_EXFAT_DELAYED_SYNC
- fs_sync(sb, false);
+#ifndef CONFIG_EXFAT_DELAYED_SYNC
+ fs_sync(sb, true);
fs_set_vol_flags(sb, VOL_CLEAN);
#endif
if (p_fs->dev_ejected)
- ret = FFS_MEDIAERR;
+ ret = -EIO;
out:
pr_debug("%s exited (%d)\n", __func__, ret);
/* release the lock for file system critical section */
- up(&p_fs->v_sem);
+ mutex_unlock(&p_fs->v_mutex);
return ret;
}
@@ -1232,14 +1186,14 @@ static int ffsMoveFile(struct inode *old_parent_inode, struct file_id_t *fid,
/* check the validity of the given file id */
if (!fid)
- return FFS_INVALIDFID;
+ return -EINVAL;
/* check the validity of pointer parameters */
if (!new_path || (*new_path == '\0'))
- return FFS_ERROR;
+ return -EINVAL;
/* acquire the lock for file system critical section */
- down(&p_fs->v_sem);
+ mutex_lock(&p_fs->v_mutex);
update_parent_info(fid, old_parent_inode);
@@ -1252,19 +1206,19 @@ static int ffsMoveFile(struct inode *old_parent_inode, struct file_id_t *fid,
/* check if the old file is "." or ".." */
if (p_fs->vol_type != EXFAT) {
if ((olddir.dir != p_fs->root_dir) && (dentry < 2)) {
- ret = FFS_PERMISSIONERR;
+ ret = -EPERM;
goto out2;
}
}
ep = get_entry_in_dir(sb, &olddir, dentry, NULL);
if (!ep) {
- ret = FFS_MEDIAERR;
+ ret = -ENOENT;
goto out2;
}
if (p_fs->fs_func->get_entry_attr(ep) & ATTR_READONLY) {
- ret = FFS_PERMISSIONERR;
+ ret = -EPERM;
goto out2;
}
@@ -1272,12 +1226,12 @@ static int ffsMoveFile(struct inode *old_parent_inode, struct file_id_t *fid,
if (new_inode) {
u32 entry_type;
- ret = FFS_MEDIAERR;
+ ret = -ENOENT;
new_fid = &EXFAT_I(new_inode)->fid;
update_parent_info(new_fid, new_parent_inode);
- p_dir = &(new_fid->dir);
+ p_dir = &new_fid->dir;
new_entry = new_fid->entry;
ep = get_entry_in_dir(sb, p_dir, new_entry, NULL);
if (!ep)
@@ -1294,7 +1248,7 @@ static int ffsMoveFile(struct inode *old_parent_inode, struct file_id_t *fid,
new_clu.flags = new_fid->flags;
if (!is_dir_empty(sb, &new_clu)) {
- ret = FFS_FILEEXIST;
+ ret = -EEXIST;
goto out;
}
}
@@ -1314,7 +1268,7 @@ static int ffsMoveFile(struct inode *old_parent_inode, struct file_id_t *fid,
ret = move_file(new_parent_inode, &olddir, dentry, &newdir,
&uni_name, fid);
- if ((ret == FFS_SUCCESS) && new_inode) {
+ if ((ret == 0) && new_inode) {
/* delete entries of new_dir */
ep = get_entry_in_dir(sb, p_dir, new_entry, NULL);
if (!ep)
@@ -1328,16 +1282,16 @@ static int ffsMoveFile(struct inode *old_parent_inode, struct file_id_t *fid,
num_entries + 1);
}
out:
-#ifdef CONFIG_EXFAT_DELAYED_SYNC
- fs_sync(sb, false);
+#ifndef CONFIG_EXFAT_DELAYED_SYNC
+ fs_sync(sb, true);
fs_set_vol_flags(sb, VOL_CLEAN);
#endif
if (p_fs->dev_ejected)
- ret = FFS_MEDIAERR;
+ ret = -EIO;
out2:
/* release the lock for file system critical section */
- up(&p_fs->v_sem);
+ mutex_unlock(&p_fs->v_mutex);
return ret;
}
@@ -1345,7 +1299,7 @@ out2:
static int ffsRemoveFile(struct inode *inode, struct file_id_t *fid)
{
s32 dentry;
- int ret = FFS_SUCCESS;
+ int ret = 0;
struct chain_t dir, clu_to_free;
struct dentry_t *ep;
struct super_block *sb = inode->i_sb;
@@ -1353,10 +1307,10 @@ static int ffsRemoveFile(struct inode *inode, struct file_id_t *fid)
/* check the validity of the given file id */
if (!fid)
- return FFS_INVALIDFID;
+ return -EINVAL;
/* acquire the lock for file system critical section */
- down(&p_fs->v_sem);
+ mutex_lock(&p_fs->v_mutex);
dir.dir = fid->dir.dir;
dir.size = fid->dir.size;
@@ -1366,12 +1320,12 @@ static int ffsRemoveFile(struct inode *inode, struct file_id_t *fid)
ep = get_entry_in_dir(sb, &dir, dentry, NULL);
if (!ep) {
- ret = FFS_MEDIAERR;
+ ret = -ENOENT;
goto out;
}
if (p_fs->fs_func->get_entry_attr(ep) & ATTR_READONLY) {
- ret = FFS_PERMISSIONERR;
+ ret = -EPERM;
goto out;
}
fs_set_vol_flags(sb, VOL_DIRTY);
@@ -1390,16 +1344,16 @@ static int ffsRemoveFile(struct inode *inode, struct file_id_t *fid)
fid->start_clu = CLUSTER_32(~0);
fid->flags = (p_fs->vol_type == EXFAT) ? 0x03 : 0x01;
-#ifdef CONFIG_EXFAT_DELAYED_SYNC
- fs_sync(sb, false);
+#ifndef CONFIG_EXFAT_DELAYED_SYNC
+ fs_sync(sb, true);
fs_set_vol_flags(sb, VOL_CLEAN);
#endif
if (p_fs->dev_ejected)
- ret = FFS_MEDIAERR;
+ ret = -EIO;
out:
/* release the lock for file system critical section */
- up(&p_fs->v_sem);
+ mutex_unlock(&p_fs->v_mutex);
return ret;
}
@@ -1409,7 +1363,7 @@ out:
static int ffsSetAttr(struct inode *inode, u32 attr)
{
u32 type;
- int ret = FFS_SUCCESS;
+ int ret = 0;
sector_t sector = 0;
struct dentry_t *ep;
struct super_block *sb = inode->i_sb;
@@ -1420,36 +1374,28 @@ static int ffsSetAttr(struct inode *inode, u32 attr)
if (fid->attr == attr) {
if (p_fs->dev_ejected)
- return FFS_MEDIAERR;
- return FFS_SUCCESS;
+ return -EIO;
+ return 0;
}
if (is_dir) {
if ((fid->dir.dir == p_fs->root_dir) &&
(fid->entry == -1)) {
if (p_fs->dev_ejected)
- return FFS_MEDIAERR;
- return FFS_SUCCESS;
+ return -EIO;
+ return 0;
}
}
/* acquire the lock for file system critical section */
- down(&p_fs->v_sem);
+ mutex_lock(&p_fs->v_mutex);
/* get the directory entry of given file */
- if (p_fs->vol_type == EXFAT) {
- es = get_entry_set_in_dir(sb, &(fid->dir), fid->entry,
- ES_ALL_ENTRIES, &ep);
- if (!es) {
- ret = FFS_MEDIAERR;
- goto out;
- }
- } else {
- ep = get_entry_in_dir(sb, &(fid->dir), fid->entry, &sector);
- if (!ep) {
- ret = FFS_MEDIAERR;
- goto out;
- }
+ es = get_entry_set_in_dir(sb, &fid->dir, fid->entry,
+ ES_ALL_ENTRIES, &ep);
+ if (!es) {
+ ret = -ENOENT;
+ goto out;
}
type = p_fs->fs_func->get_entry_type(ep);
@@ -1457,12 +1403,11 @@ static int ffsSetAttr(struct inode *inode, u32 attr)
if (((type == TYPE_FILE) && (attr & ATTR_SUBDIR)) ||
((type == TYPE_DIR) && (!(attr & ATTR_SUBDIR)))) {
if (p_fs->dev_ejected)
- ret = FFS_MEDIAERR;
+ ret = -EIO;
else
- ret = FFS_ERROR;
+ ret = -EINVAL;
- if (p_fs->vol_type == EXFAT)
- release_entry_set(es);
+ release_entry_set(es);
goto out;
}
@@ -1472,23 +1417,19 @@ static int ffsSetAttr(struct inode *inode, u32 attr)
fid->attr = attr;
p_fs->fs_func->set_entry_attr(ep, attr);
- if (p_fs->vol_type != EXFAT) {
- buf_modify(sb, sector);
- } else {
- update_dir_checksum_with_entry_set(sb, es);
- release_entry_set(es);
- }
+ update_dir_checksum_with_entry_set(sb, es);
+ release_entry_set(es);
-#ifdef CONFIG_EXFAT_DELAYED_SYNC
- fs_sync(sb, false);
+#ifndef CONFIG_EXFAT_DELAYED_SYNC
+ fs_sync(sb, true);
fs_set_vol_flags(sb, VOL_CLEAN);
#endif
if (p_fs->dev_ejected)
- ret = FFS_MEDIAERR;
+ ret = -EIO;
out:
/* release the lock for file system critical section */
- up(&p_fs->v_sem);
+ mutex_unlock(&p_fs->v_mutex);
return ret;
}
@@ -1496,9 +1437,8 @@ out:
static int ffsReadStat(struct inode *inode, struct dir_entry_t *info)
{
- sector_t sector = 0;
s32 count;
- int ret = FFS_SUCCESS;
+ int ret = 0;
struct chain_t dir;
struct uni_name_t uni_name;
struct timestamp_t tm;
@@ -1512,7 +1452,7 @@ static int ffsReadStat(struct inode *inode, struct dir_entry_t *info)
pr_debug("%s entered\n", __func__);
/* acquire the lock for file system critical section */
- down(&p_fs->v_sem);
+ mutex_lock(&p_fs->v_mutex);
if (is_dir) {
if ((fid->dir.dir == p_fs->root_dir) &&
@@ -1541,35 +1481,25 @@ static int ffsReadStat(struct inode *inode, struct dir_entry_t *info)
count = count_dos_name_entries(sb, &dir, TYPE_DIR);
if (count < 0) {
- ret = FFS_MEDIAERR;
+ ret = count; /* propogate error upward */
goto out;
}
info->NumSubdirs = count;
if (p_fs->dev_ejected)
- ret = FFS_MEDIAERR;
+ ret = -EIO;
goto out;
}
}
/* get the directory entry of given file or directory */
- if (p_fs->vol_type == EXFAT) {
- es = get_entry_set_in_dir(sb, &(fid->dir), fid->entry,
- ES_2_ENTRIES, &ep);
- if (!es) {
- ret = FFS_MEDIAERR;
- goto out;
- }
- ep2 = ep + 1;
- } else {
- ep = get_entry_in_dir(sb, &(fid->dir), fid->entry, &sector);
- if (!ep) {
- ret = FFS_MEDIAERR;
- goto out;
- }
- ep2 = ep;
- buf_lock(sb, sector);
+ es = get_entry_set_in_dir(sb, &fid->dir, fid->entry,
+ ES_2_ENTRIES, &ep);
+ if (!es) {
+ ret = -ENOENT;
+ goto out;
}
+ ep2 = ep + 1;
/* set FILE_INFO structure using the acquired struct dentry_t */
info->Attr = p_fs->fs_func->get_entry_attr(ep);
@@ -1594,31 +1524,19 @@ static int ffsReadStat(struct inode *inode, struct dir_entry_t *info)
memset((char *)&info->AccessTimestamp, 0, sizeof(struct date_time_t));
- *(uni_name.name) = 0x0;
+ *uni_name.name = 0x0;
/* XXX this is very bad for exfat cuz name is already included in es.
* API should be revised
*/
- p_fs->fs_func->get_uni_name_from_ext_entry(sb, &(fid->dir), fid->entry,
+ p_fs->fs_func->get_uni_name_from_ext_entry(sb, &fid->dir, fid->entry,
uni_name.name);
- if (*uni_name.name == 0x0 && p_fs->vol_type != EXFAT)
- get_uni_name_from_dos_entry(sb, (struct dos_dentry_t *)ep,
- &uni_name, 0x1);
nls_uniname_to_cstring(sb, info->Name, &uni_name);
- if (p_fs->vol_type == EXFAT) {
- info->NumSubdirs = 2;
- } else {
- buf_unlock(sb, sector);
- get_uni_name_from_dos_entry(sb, (struct dos_dentry_t *)ep,
- &uni_name, 0x0);
- nls_uniname_to_cstring(sb, info->ShortName, &uni_name);
- info->NumSubdirs = 0;
- }
+ info->NumSubdirs = 2;
info->Size = p_fs->fs_func->get_entry_size(ep2);
- if (p_fs->vol_type == EXFAT)
- release_entry_set(es);
+ release_entry_set(es);
if (is_dir) {
dir.dir = fid->start_clu;
@@ -1630,18 +1548,18 @@ static int ffsReadStat(struct inode *inode, struct dir_entry_t *info)
count = count_dos_name_entries(sb, &dir, TYPE_DIR);
if (count < 0) {
- ret = FFS_MEDIAERR;
+ ret = count; /* propogate error upward */
goto out;
}
info->NumSubdirs += count;
}
if (p_fs->dev_ejected)
- ret = FFS_MEDIAERR;
+ ret = -EIO;
out:
/* release the lock for file system critical section */
- up(&p_fs->v_sem);
+ mutex_unlock(&p_fs->v_mutex);
pr_debug("%s exited successfully\n", __func__);
return ret;
@@ -1649,8 +1567,7 @@ out:
static int ffsWriteStat(struct inode *inode, struct dir_entry_t *info)
{
- sector_t sector = 0;
- int ret = FFS_SUCCESS;
+ int ret = 0;
struct timestamp_t tm;
struct dentry_t *ep, *ep2;
struct entry_set_cache_t *es = NULL;
@@ -1662,14 +1579,14 @@ static int ffsWriteStat(struct inode *inode, struct dir_entry_t *info)
pr_debug("%s entered (inode %p info %p\n", __func__, inode, info);
/* acquire the lock for file system critical section */
- down(&p_fs->v_sem);
+ mutex_lock(&p_fs->v_mutex);
if (is_dir) {
if ((fid->dir.dir == p_fs->root_dir) &&
(fid->entry == -1)) {
if (p_fs->dev_ejected)
- ret = FFS_MEDIAERR;
- ret = FFS_SUCCESS;
+ ret = -EIO;
+ ret = 0;
goto out;
}
}
@@ -1677,23 +1594,13 @@ static int ffsWriteStat(struct inode *inode, struct dir_entry_t *info)
fs_set_vol_flags(sb, VOL_DIRTY);
/* get the directory entry of given file or directory */
- if (p_fs->vol_type == EXFAT) {
- es = get_entry_set_in_dir(sb, &(fid->dir), fid->entry,
- ES_ALL_ENTRIES, &ep);
- if (!es) {
- ret = FFS_MEDIAERR;
- goto out;
- }
- ep2 = ep + 1;
- } else {
- /* for other than exfat */
- ep = get_entry_in_dir(sb, &(fid->dir), fid->entry, &sector);
- if (!ep) {
- ret = FFS_MEDIAERR;
- goto out;
- }
- ep2 = ep;
+ es = get_entry_set_in_dir(sb, &fid->dir, fid->entry,
+ ES_ALL_ENTRIES, &ep);
+ if (!es) {
+ ret = -ENOENT;
+ goto out;
}
+ ep2 = ep + 1;
p_fs->fs_func->set_entry_attr(ep, info->Attr);
@@ -1716,19 +1623,15 @@ static int ffsWriteStat(struct inode *inode, struct dir_entry_t *info)
p_fs->fs_func->set_entry_size(ep2, info->Size);
- if (p_fs->vol_type != EXFAT) {
- buf_modify(sb, sector);
- } else {
- update_dir_checksum_with_entry_set(sb, es);
- release_entry_set(es);
- }
+ update_dir_checksum_with_entry_set(sb, es);
+ release_entry_set(es);
if (p_fs->dev_ejected)
- ret = FFS_MEDIAERR;
+ ret = -EIO;
out:
/* release the lock for file system critical section */
- up(&p_fs->v_sem);
+ mutex_unlock(&p_fs->v_mutex);
pr_debug("%s exited (%d)\n", __func__, ret);
@@ -1740,8 +1643,7 @@ static int ffsMapCluster(struct inode *inode, s32 clu_offset, u32 *clu)
s32 num_clusters, num_alloced;
bool modified = false;
u32 last_clu;
- int ret = FFS_SUCCESS;
- sector_t sector = 0;
+ int ret = 0;
struct chain_t new_clu;
struct dentry_t *ep;
struct entry_set_cache_t *es = NULL;
@@ -1751,10 +1653,10 @@ static int ffsMapCluster(struct inode *inode, s32 clu_offset, u32 *clu)
/* check the validity of pointer parameters */
if (!clu)
- return FFS_ERROR;
+ return -EINVAL;
/* acquire the lock for file system critical section */
- down(&p_fs->v_sem);
+ mutex_lock(&p_fs->v_mutex);
fid->rwoffset = (s64)(clu_offset) << p_fs->cluster_size_bits;
@@ -1785,8 +1687,8 @@ static int ffsMapCluster(struct inode *inode, s32 clu_offset, u32 *clu)
while ((clu_offset > 0) && (*clu != CLUSTER_32(~0))) {
last_clu = *clu;
- if (FAT_read(sb, *clu, clu) == -1) {
- ret = FFS_MEDIAERR;
+ if (exfat_fat_read(sb, *clu, clu) == -1) {
+ ret = -EIO;
goto out;
}
clu_offset--;
@@ -1804,10 +1706,10 @@ static int ffsMapCluster(struct inode *inode, s32 clu_offset, u32 *clu)
/* (1) allocate a cluster */
num_alloced = p_fs->fs_func->alloc_cluster(sb, 1, &new_clu);
if (num_alloced < 0) {
- ret = FFS_MEDIAERR;
+ ret = -EIO;
goto out;
} else if (num_alloced == 0) {
- ret = FFS_FULL;
+ ret = -ENOSPC;
goto out;
}
@@ -1825,34 +1727,23 @@ static int ffsMapCluster(struct inode *inode, s32 clu_offset, u32 *clu)
modified = true;
}
if (new_clu.flags == 0x01)
- FAT_write(sb, last_clu, new_clu.dir);
+ exfat_fat_write(sb, last_clu, new_clu.dir);
}
num_clusters += num_alloced;
*clu = new_clu.dir;
- if (p_fs->vol_type == EXFAT) {
- es = get_entry_set_in_dir(sb, &fid->dir, fid->entry,
- ES_ALL_ENTRIES, &ep);
- if (!es) {
- ret = FFS_MEDIAERR;
- goto out;
- }
- /* get stream entry */
- ep++;
+ es = get_entry_set_in_dir(sb, &fid->dir, fid->entry,
+ ES_ALL_ENTRIES, &ep);
+ if (!es) {
+ ret = -ENOENT;
+ goto out;
}
+ /* get stream entry */
+ ep++;
/* (3) update directory entry */
if (modified) {
- if (p_fs->vol_type != EXFAT) {
- ep = get_entry_in_dir(sb, &(fid->dir),
- fid->entry, &sector);
- if (!ep) {
- ret = FFS_MEDIAERR;
- goto out;
- }
- }
-
if (p_fs->fs_func->get_entry_flag(ep) != fid->flags)
p_fs->fs_func->set_entry_flag(ep, fid->flags);
@@ -1860,14 +1751,10 @@ static int ffsMapCluster(struct inode *inode, s32 clu_offset, u32 *clu)
p_fs->fs_func->set_entry_clu0(ep,
fid->start_clu);
- if (p_fs->vol_type != EXFAT)
- buf_modify(sb, sector);
}
- if (p_fs->vol_type == EXFAT) {
- update_dir_checksum_with_entry_set(sb, es);
- release_entry_set(es);
- }
+ update_dir_checksum_with_entry_set(sb, es);
+ release_entry_set(es);
/* add number of new blocks to inode */
inode->i_blocks += num_alloced << (p_fs->cluster_size_bits - 9);
@@ -1878,11 +1765,11 @@ static int ffsMapCluster(struct inode *inode, s32 clu_offset, u32 *clu)
fid->hint_last_clu = *clu;
if (p_fs->dev_ejected)
- ret = FFS_MEDIAERR;
+ ret = -EIO;
out:
/* release the lock for file system critical section */
- up(&p_fs->v_sem);
+ mutex_unlock(&p_fs->v_mutex);
return ret;
}
@@ -1893,7 +1780,7 @@ out:
static int ffsCreateDir(struct inode *inode, char *path, struct file_id_t *fid)
{
- int ret = FFS_SUCCESS;
+ int ret = 0;
struct chain_t dir;
struct uni_name_t uni_name;
struct super_block *sb = inode->i_sb;
@@ -1903,10 +1790,10 @@ static int ffsCreateDir(struct inode *inode, char *path, struct file_id_t *fid)
/* check the validity of pointer parameters */
if (!fid || !path || (*path == '\0'))
- return FFS_ERROR;
+ return -EINVAL;
/* acquire the lock for file system critical section */
- down(&p_fs->v_sem);
+ mutex_lock(&p_fs->v_mutex);
/* check the validity of directory name in the given old pathname */
ret = resolve_path(inode, path, &dir, &uni_name);
@@ -1917,16 +1804,16 @@ static int ffsCreateDir(struct inode *inode, char *path, struct file_id_t *fid)
ret = create_dir(inode, &dir, &uni_name, fid);
-#ifdef CONFIG_EXFAT_DELAYED_SYNC
- fs_sync(sb, false);
+#ifndef CONFIG_EXFAT_DELAYED_SYNC
+ fs_sync(sb, true);
fs_set_vol_flags(sb, VOL_CLEAN);
#endif
if (p_fs->dev_ejected)
- ret = FFS_MEDIAERR;
+ ret = -EIO;
out:
/* release the lock for file system critical section */
- up(&p_fs->v_sem);
+ mutex_unlock(&p_fs->v_mutex);
return ret;
}
@@ -1934,7 +1821,7 @@ out:
static int ffsReadDir(struct inode *inode, struct dir_entry_t *dir_entry)
{
int i, dentry, clu_offset;
- int ret = FFS_SUCCESS;
+ int ret = 0;
s32 dentries_per_clu, dentries_per_clu_bits = 0;
u32 type;
sector_t sector;
@@ -1949,14 +1836,14 @@ static int ffsReadDir(struct inode *inode, struct dir_entry_t *dir_entry)
/* check the validity of pointer parameters */
if (!dir_entry)
- return FFS_ERROR;
+ return -EINVAL;
/* check if the given file ID is opened */
if (fid->type != TYPE_DIR)
- return FFS_PERMISSIONERR;
+ return -ENOTDIR;
/* acquire the lock for file system critical section */
- down(&p_fs->v_sem);
+ mutex_lock(&p_fs->v_mutex);
if (fid->entry == -1) {
dir.dir = p_fs->root_dir;
@@ -2001,9 +1888,9 @@ static int ffsReadDir(struct inode *inode, struct dir_entry_t *dir_entry)
}
while (clu_offset > 0) {
- /* clu.dir = FAT_read(sb, clu.dir); */
- if (FAT_read(sb, clu.dir, &clu.dir) == -1) {
- ret = FFS_MEDIAERR;
+ /* clu.dir = exfat_fat_read(sb, clu.dir); */
+ if (exfat_fat_read(sb, clu.dir, &clu.dir) == -1) {
+ ret = -EIO;
goto out;
}
clu_offset--;
@@ -2023,7 +1910,7 @@ static int ffsReadDir(struct inode *inode, struct dir_entry_t *dir_entry)
for ( ; i < dentries_per_clu; i++, dentry++) {
ep = get_entry_in_dir(sb, &clu, i, &sector);
if (!ep) {
- ret = FFS_MEDIAERR;
+ ret = -ENOENT;
goto out;
}
type = fs_func->get_entry_type(ep);
@@ -2034,7 +1921,7 @@ static int ffsReadDir(struct inode *inode, struct dir_entry_t *dir_entry)
if ((type != TYPE_FILE) && (type != TYPE_DIR))
continue;
- buf_lock(sb, sector);
+ exfat_buf_lock(sb, sector);
dir_entry->Attr = fs_func->get_entry_attr(ep);
fs_func->get_entry_time(ep, &tm, TM_CREATE);
@@ -2058,28 +1945,16 @@ static int ffsReadDir(struct inode *inode, struct dir_entry_t *dir_entry)
memset((char *)&dir_entry->AccessTimestamp, 0,
sizeof(struct date_time_t));
- *(uni_name.name) = 0x0;
+ *uni_name.name = 0x0;
fs_func->get_uni_name_from_ext_entry(sb, &dir, dentry,
uni_name.name);
- if (*uni_name.name == 0x0 && p_fs->vol_type != EXFAT)
- get_uni_name_from_dos_entry(sb,
- (struct dos_dentry_t *)ep,
- &uni_name, 0x1);
nls_uniname_to_cstring(sb, dir_entry->Name, &uni_name);
- buf_unlock(sb, sector);
+ exfat_buf_unlock(sb, sector);
- if (p_fs->vol_type == EXFAT) {
- ep = get_entry_in_dir(sb, &clu, i + 1, NULL);
- if (!ep) {
- ret = FFS_MEDIAERR;
- goto out;
- }
- } else {
- get_uni_name_from_dos_entry(sb,
- (struct dos_dentry_t *)ep,
- &uni_name, 0x0);
- nls_uniname_to_cstring(sb, dir_entry->ShortName,
- &uni_name);
+ ep = get_entry_in_dir(sb, &clu, i + 1, NULL);
+ if (!ep) {
+ ret = -ENOENT;
+ goto out;
}
dir_entry->Size = fs_func->get_entry_size(ep);
@@ -2095,7 +1970,7 @@ static int ffsReadDir(struct inode *inode, struct dir_entry_t *dir_entry)
fid->rwoffset = (s64)(++dentry);
if (p_fs->dev_ejected)
- ret = FFS_MEDIAERR;
+ ret = -EIO;
goto out;
}
@@ -2108,24 +1983,24 @@ static int ffsReadDir(struct inode *inode, struct dir_entry_t *dir_entry)
else
clu.dir = CLUSTER_32(~0);
} else {
- /* clu.dir = FAT_read(sb, clu.dir); */
- if (FAT_read(sb, clu.dir, &clu.dir) == -1) {
- ret = FFS_MEDIAERR;
+ /* clu.dir = exfat_fat_read(sb, clu.dir); */
+ if (exfat_fat_read(sb, clu.dir, &clu.dir) == -1) {
+ ret = -EIO;
goto out;
}
}
}
- *(dir_entry->Name) = '\0';
+ *dir_entry->Name = '\0';
fid->rwoffset = (s64)(++dentry);
if (p_fs->dev_ejected)
- ret = FFS_MEDIAERR;
+ ret = -EIO;
out:
/* release the lock for file system critical section */
- up(&p_fs->v_sem);
+ mutex_unlock(&p_fs->v_mutex);
return ret;
}
@@ -2133,14 +2008,14 @@ out:
static int ffsRemoveDir(struct inode *inode, struct file_id_t *fid)
{
s32 dentry;
- int ret = FFS_SUCCESS;
+ int ret = 0;
struct chain_t dir, clu_to_free;
struct super_block *sb = inode->i_sb;
struct fs_info_t *p_fs = &(EXFAT_SB(sb)->fs_info);
/* check the validity of the given file id */
if (!fid)
- return FFS_INVALIDFID;
+ return -EINVAL;
dir.dir = fid->dir.dir;
dir.size = fid->dir.size;
@@ -2151,18 +2026,18 @@ static int ffsRemoveDir(struct inode *inode, struct file_id_t *fid)
/* check if the file is "." or ".." */
if (p_fs->vol_type != EXFAT) {
if ((dir.dir != p_fs->root_dir) && (dentry < 2))
- return FFS_PERMISSIONERR;
+ return -EPERM;
}
/* acquire the lock for file system critical section */
- down(&p_fs->v_sem);
+ mutex_lock(&p_fs->v_mutex);
clu_to_free.dir = fid->start_clu;
clu_to_free.size = (s32)((fid->size - 1) >> p_fs->cluster_size_bits) + 1;
clu_to_free.flags = fid->flags;
if (!is_dir_empty(sb, &clu_to_free)) {
- ret = FFS_FILEEXIST;
+ ret = -ENOTEMPTY;
goto out;
}
@@ -2178,17 +2053,17 @@ static int ffsRemoveDir(struct inode *inode, struct file_id_t *fid)
fid->start_clu = CLUSTER_32(~0);
fid->flags = (p_fs->vol_type == EXFAT) ? 0x03 : 0x01;
-#ifdef CONFIG_EXFAT_DELAYED_SYNC
- fs_sync(sb, false);
+#ifndef CONFIG_EXFAT_DELAYED_SYNC
+ fs_sync(sb, true);
fs_set_vol_flags(sb, VOL_CLEAN);
#endif
if (p_fs->dev_ejected)
- ret = FFS_MEDIAERR;
+ ret = -EIO;
out:
/* release the lock for file system critical section */
- up(&p_fs->v_sem);
+ mutex_unlock(&p_fs->v_mutex);
return ret;
}
@@ -2202,7 +2077,7 @@ static int exfat_readdir(struct file *filp, struct dir_context *ctx)
struct inode *inode = file_inode(filp);
struct super_block *sb = inode->i_sb;
struct exfat_sb_info *sbi = EXFAT_SB(sb);
- struct fs_info_t *p_fs = &(sbi->fs_info);
+ struct fs_info_t *p_fs = &sbi->fs_info;
struct bd_info_t *p_bd = &(EXFAT_SB(sb)->bd_info);
struct dir_entry_t de;
unsigned long inum;
@@ -2244,12 +2119,11 @@ get_new:
/* at least we tried to read a sector
* move cpos to next sector position (should be aligned)
*/
- if (err == FFS_MEDIAERR) {
+ if (err == -EIO) {
cpos += 1 << p_bd->sector_size_bits;
cpos &= ~((1 << p_bd->sector_size_bits) - 1);
}
- err = -EIO;
goto end_of_dir;
}
@@ -2293,7 +2167,7 @@ static int exfat_ioctl_volume_id(struct inode *dir)
{
struct super_block *sb = dir->i_sb;
struct exfat_sb_info *sbi = EXFAT_SB(sb);
- struct fs_info_t *p_fs = &(sbi->fs_info);
+ struct fs_info_t *p_fs = &sbi->fs_info;
return p_fs->vol_id;
}
@@ -2301,7 +2175,7 @@ static int exfat_ioctl_volume_id(struct inode *dir)
static long exfat_generic_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
{
-struct inode *inode = filp->f_path.dentry->d_inode;
+ struct inode *inode = filp->f_path.dentry->d_inode;
#ifdef CONFIG_EXFAT_KERNEL_DEBUG
unsigned int flags;
#endif /* CONFIG_EXFAT_KERNEL_DEBUG */
@@ -2351,6 +2225,7 @@ static int exfat_create(struct inode *dir, struct dentry *dentry, umode_t mode,
bool excl)
{
struct super_block *sb = dir->i_sb;
+ struct timespec64 curtime;
struct inode *inode;
struct file_id_t fid;
loff_t i_pos;
@@ -2361,21 +2236,14 @@ static int exfat_create(struct inode *dir, struct dentry *dentry, umode_t mode,
pr_debug("%s entered\n", __func__);
err = ffsCreateFile(dir, (u8 *)dentry->d_name.name, FM_REGULAR, &fid);
- if (err) {
- if (err == FFS_INVALIDPATH)
- err = -EINVAL;
- else if (err == FFS_FILEEXIST)
- err = -EEXIST;
- else if (err == FFS_FULL)
- err = -ENOSPC;
- else if (err == FFS_NAMETOOLONG)
- err = -ENAMETOOLONG;
- else
- err = -EIO;
+ if (err)
goto out;
- }
+
INC_IVERSION(dir);
- dir->i_ctime = dir->i_mtime = dir->i_atime = current_time(dir);
+ curtime = current_time(dir);
+ dir->i_ctime = curtime;
+ dir->i_mtime = curtime;
+ dir->i_atime = curtime;
if (IS_DIRSYNC(dir))
(void)exfat_sync_inode(dir);
else
@@ -2389,7 +2257,10 @@ static int exfat_create(struct inode *dir, struct dentry *dentry, umode_t mode,
goto out;
}
INC_IVERSION(inode);
- inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
+ curtime = current_time(inode);
+ inode->i_mtime = curtime;
+ inode->i_atime = curtime;
+ inode->i_ctime = curtime;
/*
* timestamp is already written, so mark_inode_dirty() is unnecessary.
*/
@@ -2522,6 +2393,7 @@ static int exfat_unlink(struct inode *dir, struct dentry *dentry)
{
struct inode *inode = dentry->d_inode;
struct super_block *sb = dir->i_sb;
+ struct timespec64 curtime;
int err;
__lock_super(sb);
@@ -2531,22 +2403,22 @@ static int exfat_unlink(struct inode *dir, struct dentry *dentry)
EXFAT_I(inode)->fid.size = i_size_read(inode);
err = ffsRemoveFile(dir, &(EXFAT_I(inode)->fid));
- if (err) {
- if (err == FFS_PERMISSIONERR)
- err = -EPERM;
- else
- err = -EIO;
+ if (err)
goto out;
- }
+
INC_IVERSION(dir);
- dir->i_mtime = dir->i_atime = current_time(dir);
+ curtime = current_time(dir);
+ dir->i_mtime = curtime;
+ dir->i_atime = curtime;
if (IS_DIRSYNC(dir))
(void)exfat_sync_inode(dir);
else
mark_inode_dirty(dir);
clear_nlink(inode);
- inode->i_mtime = inode->i_atime = current_time(inode);
+ curtime = current_time(inode);
+ inode->i_mtime = curtime;
+ inode->i_atime = curtime;
exfat_detach(inode);
remove_inode_hash(inode);
@@ -2560,6 +2432,7 @@ static int exfat_symlink(struct inode *dir, struct dentry *dentry,
const char *target)
{
struct super_block *sb = dir->i_sb;
+ struct timespec64 curtime;
struct inode *inode;
struct file_id_t fid;
loff_t i_pos;
@@ -2572,32 +2445,22 @@ static int exfat_symlink(struct inode *dir, struct dentry *dentry,
pr_debug("%s entered\n", __func__);
err = ffsCreateFile(dir, (u8 *)dentry->d_name.name, FM_SYMLINK, &fid);
- if (err) {
- if (err == FFS_INVALIDPATH)
- err = -EINVAL;
- else if (err == FFS_FILEEXIST)
- err = -EEXIST;
- else if (err == FFS_FULL)
- err = -ENOSPC;
- else
- err = -EIO;
+ if (err)
goto out;
- }
+
err = ffsWriteFile(dir, &fid, (char *)target, len, &ret);
if (err) {
ffsRemoveFile(dir, &fid);
-
- if (err == FFS_FULL)
- err = -ENOSPC;
- else
- err = -EIO;
goto out;
}
INC_IVERSION(dir);
- dir->i_ctime = dir->i_mtime = dir->i_atime = current_time(dir);
+ curtime = current_time(dir);
+ dir->i_ctime = curtime;
+ dir->i_mtime = curtime;
+ dir->i_atime = curtime;
if (IS_DIRSYNC(dir))
(void)exfat_sync_inode(dir);
else
@@ -2611,7 +2474,10 @@ static int exfat_symlink(struct inode *dir, struct dentry *dentry,
goto out;
}
INC_IVERSION(inode);
- inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
+ curtime = current_time(inode);
+ inode->i_mtime = curtime;
+ inode->i_atime = curtime;
+ inode->i_ctime = curtime;
/* timestamp is already written, so mark_inode_dirty() is unneeded. */
EXFAT_I(inode)->target = kmemdup(target, len + 1, GFP_KERNEL);
@@ -2632,6 +2498,7 @@ out:
static int exfat_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
{
struct super_block *sb = dir->i_sb;
+ struct timespec64 curtime;
struct inode *inode;
struct file_id_t fid;
loff_t i_pos;
@@ -2642,21 +2509,14 @@ static int exfat_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
pr_debug("%s entered\n", __func__);
err = ffsCreateDir(dir, (u8 *)dentry->d_name.name, &fid);
- if (err) {
- if (err == FFS_INVALIDPATH)
- err = -EINVAL;
- else if (err == FFS_FILEEXIST)
- err = -EEXIST;
- else if (err == FFS_FULL)
- err = -ENOSPC;
- else if (err == FFS_NAMETOOLONG)
- err = -ENAMETOOLONG;
- else
- err = -EIO;
+ if (err)
goto out;
- }
+
INC_IVERSION(dir);
- dir->i_ctime = dir->i_mtime = dir->i_atime = current_time(dir);
+ curtime = current_time(dir);
+ dir->i_ctime = curtime;
+ dir->i_mtime = curtime;
+ dir->i_atime = curtime;
if (IS_DIRSYNC(dir))
(void)exfat_sync_inode(dir);
else
@@ -2671,7 +2531,10 @@ static int exfat_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
goto out;
}
INC_IVERSION(inode);
- inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
+ curtime = current_time(inode);
+ inode->i_mtime = curtime;
+ inode->i_atime = curtime;
+ inode->i_ctime = curtime;
/* timestamp is already written, so mark_inode_dirty() is unneeded. */
dentry->d_time = GET_IVERSION(dentry->d_parent->d_inode);
@@ -2687,6 +2550,7 @@ static int exfat_rmdir(struct inode *dir, struct dentry *dentry)
{
struct inode *inode = dentry->d_inode;
struct super_block *sb = dir->i_sb;
+ struct timespec64 curtime;
int err;
__lock_super(sb);
@@ -2696,21 +2560,13 @@ static int exfat_rmdir(struct inode *dir, struct dentry *dentry)
EXFAT_I(inode)->fid.size = i_size_read(inode);
err = ffsRemoveDir(dir, &(EXFAT_I(inode)->fid));
- if (err) {
- if (err == FFS_INVALIDPATH)
- err = -EINVAL;
- else if (err == FFS_FILEEXIST)
- err = -ENOTEMPTY;
- else if (err == FFS_NOTFOUND)
- err = -ENOENT;
- else if (err == FFS_DIRBUSY)
- err = -EBUSY;
- else
- err = -EIO;
+ if (err)
goto out;
- }
+
INC_IVERSION(dir);
- dir->i_mtime = dir->i_atime = current_time(dir);
+ curtime = current_time(dir);
+ dir->i_mtime = curtime;
+ dir->i_atime = curtime;
if (IS_DIRSYNC(dir))
(void)exfat_sync_inode(dir);
else
@@ -2718,7 +2574,9 @@ static int exfat_rmdir(struct inode *dir, struct dentry *dentry)
drop_nlink(dir);
clear_nlink(inode);
- inode->i_mtime = inode->i_atime = current_time(inode);
+ curtime = current_time(inode);
+ inode->i_mtime = curtime;
+ inode->i_atime = curtime;
exfat_detach(inode);
remove_inode_hash(inode);
@@ -2734,6 +2592,7 @@ static int exfat_rename(struct inode *old_dir, struct dentry *old_dentry,
{
struct inode *old_inode, *new_inode;
struct super_block *sb = old_dir->i_sb;
+ struct timespec64 curtime;
loff_t i_pos;
int err;
@@ -2751,24 +2610,15 @@ static int exfat_rename(struct inode *old_dir, struct dentry *old_dentry,
err = ffsMoveFile(old_dir, &(EXFAT_I(old_inode)->fid), new_dir,
new_dentry);
- if (err) {
- if (err == FFS_PERMISSIONERR)
- err = -EPERM;
- else if (err == FFS_INVALIDPATH)
- err = -EINVAL;
- else if (err == FFS_FILEEXIST)
- err = -EEXIST;
- else if (err == FFS_NOTFOUND)
- err = -ENOENT;
- else if (err == FFS_FULL)
- err = -ENOSPC;
- else
- err = -EIO;
+ if (err)
goto out;
- }
+
INC_IVERSION(new_dir);
- new_dir->i_ctime = new_dir->i_mtime = new_dir->i_atime =
- current_time(new_dir);
+ curtime = current_time(new_dir);
+ new_dir->i_ctime = curtime;
+ new_dir->i_mtime = curtime;
+ new_dir->i_atime = curtime;
+
if (IS_DIRSYNC(new_dir))
(void)exfat_sync_inode(new_dir);
else
@@ -2790,7 +2640,9 @@ static int exfat_rename(struct inode *old_dir, struct dentry *old_dentry,
inc_nlink(new_dir);
}
INC_IVERSION(old_dir);
- old_dir->i_ctime = old_dir->i_mtime = current_time(old_dir);
+ curtime = current_time(old_dir);
+ old_dir->i_ctime = curtime;
+ old_dir->i_mtime = curtime;
if (IS_DIRSYNC(old_dir))
(void)exfat_sync_inode(old_dir);
else
@@ -2814,13 +2666,16 @@ static int exfat_cont_expand(struct inode *inode, loff_t size)
{
struct address_space *mapping = inode->i_mapping;
loff_t start = i_size_read(inode), count = size - i_size_read(inode);
+ struct timespec64 curtime;
int err, err2;
err = generic_cont_expand_simple(inode, size);
if (err != 0)
return err;
- inode->i_ctime = inode->i_mtime = current_time(inode);
+ curtime = current_time(inode);
+ inode->i_ctime = curtime;
+ inode->i_mtime = curtime;
mark_inode_dirty(inode);
if (IS_SYNC(inode)) {
@@ -2895,7 +2750,8 @@ static void exfat_truncate(struct inode *inode, loff_t old_size)
{
struct super_block *sb = inode->i_sb;
struct exfat_sb_info *sbi = EXFAT_SB(sb);
- struct fs_info_t *p_fs = &(sbi->fs_info);
+ struct fs_info_t *p_fs = &sbi->fs_info;
+ struct timespec64 curtime;
int err;
__lock_super(sb);
@@ -2914,7 +2770,9 @@ static void exfat_truncate(struct inode *inode, loff_t old_size)
if (err)
goto out;
- inode->i_ctime = inode->i_mtime = current_time(inode);
+ curtime = current_time(inode);
+ inode->i_ctime = curtime;
+ inode->i_mtime = curtime;
if (IS_DIRSYNC(inode))
(void)exfat_sync_inode(inode);
else
@@ -2936,8 +2794,8 @@ static int exfat_setattr(struct dentry *dentry, struct iattr *attr)
pr_debug("%s entered\n", __func__);
- if ((attr->ia_valid & ATTR_SIZE)
- && (attr->ia_size > i_size_read(inode))) {
+ if ((attr->ia_valid & ATTR_SIZE) &&
+ attr->ia_size > i_size_read(inode)) {
error = exfat_cont_expand(inode, attr->ia_size);
if (error || attr->ia_valid == ATTR_SIZE)
return error;
@@ -2946,8 +2804,8 @@ static int exfat_setattr(struct dentry *dentry, struct iattr *attr)
ia_valid = attr->ia_valid;
- if ((ia_valid & (ATTR_MTIME_SET | ATTR_ATIME_SET | ATTR_TIMES_SET))
- && exfat_allow_set_time(sbi, inode)) {
+ if ((ia_valid & (ATTR_MTIME_SET | ATTR_ATIME_SET | ATTR_TIMES_SET)) &&
+ exfat_allow_set_time(sbi, inode)) {
attr->ia_valid &= ~(ATTR_MTIME_SET |
ATTR_ATIME_SET |
ATTR_TIMES_SET);
@@ -3073,8 +2931,7 @@ static int exfat_bmap(struct inode *inode, sector_t sector, sector_t *phys,
{
struct super_block *sb = inode->i_sb;
struct exfat_sb_info *sbi = EXFAT_SB(sb);
- struct fs_info_t *p_fs = &(sbi->fs_info);
- struct bd_info_t *p_bd = &(sbi->bd_info);
+ struct fs_info_t *p_fs = &sbi->fs_info;
const unsigned long blocksize = sb->s_blocksize;
const unsigned char blocksize_bits = sb->s_blocksize_bits;
sector_t last_block;
@@ -3084,18 +2941,6 @@ static int exfat_bmap(struct inode *inode, sector_t sector, sector_t *phys,
*phys = 0;
*mapped_blocks = 0;
- if ((p_fs->vol_type == FAT12) || (p_fs->vol_type == FAT16)) {
- if (inode->i_ino == EXFAT_ROOT_INO) {
- if (sector <
- (p_fs->dentries_in_root >>
- (p_bd->sector_size_bits - DENTRY_SIZE_BITS))) {
- *phys = sector + p_fs->root_start_sector;
- *mapped_blocks = 1;
- }
- return 0;
- }
- }
-
last_block = (i_size_read(inode) + (blocksize - 1)) >> blocksize_bits;
if (sector >= last_block) {
if (*create == 0)
@@ -3114,12 +2959,7 @@ static int exfat_bmap(struct inode *inode, sector_t sector, sector_t *phys,
err = ffsMapCluster(inode, clu_offset, &cluster);
- if (err) {
- if (err == FFS_FULL)
- return -ENOSPC;
- else
- return -EIO;
- } else if (cluster != CLUSTER_32(~0)) {
+ if (!err && (cluster != CLUSTER_32(~0))) {
*phys = START_SECTOR(cluster) + sec_offset;
*mapped_blocks = p_fs->sectors_per_clu - sec_offset;
}
@@ -3215,6 +3055,7 @@ static int exfat_write_end(struct file *file, struct address_space *mapping,
{
struct inode *inode = mapping->host;
struct file_id_t *fid = &(EXFAT_I(inode)->fid);
+ struct timespec64 curtime;
int err;
err = generic_write_end(file, mapping, pos, len, copied, pagep, fsdata);
@@ -3223,7 +3064,9 @@ static int exfat_write_end(struct file *file, struct address_space *mapping,
exfat_write_failed(mapping, pos + len);
if (!(err < 0) && !(fid->attr & ATTR_ARCHIVE)) {
- inode->i_mtime = inode->i_ctime = current_time(inode);
+ curtime = current_time(inode);
+ inode->i_mtime = curtime;
+ inode->i_ctime = curtime;
fid->attr |= ATTR_ARCHIVE;
mark_inode_dirty(inode);
}
@@ -3302,7 +3145,7 @@ static struct inode *exfat_iget(struct super_block *sb, loff_t i_pos)
static int exfat_fill_inode(struct inode *inode, struct file_id_t *fid)
{
struct exfat_sb_info *sbi = EXFAT_SB(inode->i_sb);
- struct fs_info_t *p_fs = &(sbi->fs_info);
+ struct fs_info_t *p_fs = &sbi->fs_info;
struct dir_entry_t info;
memcpy(&(EXFAT_I(inode)->fid), fid, sizeof(struct file_id_t));
@@ -3314,7 +3157,7 @@ static int exfat_fill_inode(struct inode *inode, struct file_id_t *fid)
inode->i_uid = sbi->options.fs_uid;
inode->i_gid = sbi->options.fs_gid;
INC_IVERSION(inode);
- inode->i_generation = get_seconds();
+ inode->i_generation = prandom_u32();
if (info.Attr & ATTR_SUBDIR) { /* directory */
inode->i_generation &= ~1;
@@ -3501,7 +3344,7 @@ static int exfat_statfs(struct dentry *dentry, struct kstatfs *buf)
struct vol_info_t info;
if (p_fs->used_clusters == UINT_MAX) {
- if (ffsGetVolInfo(sb, &info) == FFS_MEDIAERR)
+ if (ffsGetVolInfo(sb, &info) == -EIO)
return -EIO;
} else {
@@ -3674,7 +3517,8 @@ static int parse_options(char *options, int silent, int *debug,
opts->fs_uid = current_uid();
opts->fs_gid = current_gid();
- opts->fs_fmask = opts->fs_dmask = current->fs->umask;
+ opts->fs_fmask = current->fs->umask;
+ opts->fs_dmask = current->fs->umask;
opts->allow_utime = U16_MAX;
opts->codepage = exfat_default_codepage;
opts->iocharset = exfat_default_iocharset;
@@ -3787,7 +3631,8 @@ static int exfat_read_root(struct inode *inode)
{
struct super_block *sb = inode->i_sb;
struct exfat_sb_info *sbi = EXFAT_SB(sb);
- struct fs_info_t *p_fs = &(sbi->fs_info);
+ struct fs_info_t *p_fs = &sbi->fs_info;
+ struct timespec64 curtime;
struct dir_entry_t info;
EXFAT_I(inode)->fid.dir.dir = p_fs->root_dir;
@@ -3818,7 +3663,10 @@ static int exfat_read_root(struct inode *inode)
EXFAT_I(inode)->mmu_private = i_size_read(inode);
exfat_save_attr(inode, ATTR_SUBDIR);
- inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
+ curtime = current_time(inode);
+ inode->i_mtime = curtime;
+ inode->i_atime = curtime;
+ inode->i_ctime = curtime;
set_nlink(inode, info.NumSubdirs + 2);
return 0;
@@ -3838,7 +3686,6 @@ static int exfat_fill_super(struct super_block *sb, void *data, int silent)
struct exfat_sb_info *sbi;
int debug, ret;
long error;
- char buf[50];
/*
* GFP_KERNEL is ok here, because while we do hold the
@@ -3885,17 +3732,6 @@ static int exfat_fill_super(struct super_block *sb, void *data, int silent)
* if (FAT_FIRST_ENT(sb, media) != first)
*/
- /* codepage is not meaningful in exfat */
- if (sbi->fs_info.vol_type != EXFAT) {
- error = -EINVAL;
- sprintf(buf, "cp%d", sbi->options.codepage);
- sbi->nls_disk = load_nls(buf);
- if (!sbi->nls_disk) {
- pr_err("[EXFAT] Codepage %s not found\n", buf);
- goto out_fail2;
- }
- }
-
sbi->nls_io = load_nls(sbi->options.iocharset);
error = -ENOMEM;
@@ -3984,10 +3820,10 @@ static void exfat_debug_kill_sb(struct super_block *sb)
* invalidate_bdev drops all device cache include
* dirty. We use this to simulate device removal.
*/
- down(&p_fs->v_sem);
- FAT_release_all(sb);
- buf_release_all(sb);
- up(&p_fs->v_sem);
+ mutex_lock(&p_fs->v_mutex);
+ exfat_fat_release_all(sb);
+ exfat_buf_release_all(sb);
+ mutex_unlock(&p_fs->v_mutex);
invalidate_bdev(bdev);
}
diff --git a/drivers/staging/fbtft/Kconfig b/drivers/staging/fbtft/Kconfig
index cb61c2a772bd..dad1ddcd7b0c 100644
--- a/drivers/staging/fbtft/Kconfig
+++ b/drivers/staging/fbtft/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
menuconfig FB_TFT
tristate "Support for small TFT LCD display modules"
- depends on FB && SPI && OF
+ depends on FB && SPI
depends on GPIOLIB || COMPILE_TEST
select FB_SYS_FILLRECT
select FB_SYS_COPYAREA
@@ -95,8 +95,8 @@ config FB_TFT_PCD8544
Generic Framebuffer support for PCD8544
config FB_TFT_RA8875
- tristate "FB driver for the RA8875 LCD Controller"
- depends on FB_TFT
+ tristate "FB driver for the RA8875 LCD Controller"
+ depends on FB_TFT
help
Generic Framebuffer support for RA8875
@@ -112,6 +112,13 @@ config FB_TFT_S6D1121
help
Generic Framebuffer support for S6D1121
+config FB_TFT_SEPS525
+ tristate "FB driver for the SEPS525 LCD Controller"
+ depends on FB_TFT
+ help
+ Generic Framebuffer support for SEPS525
+ Say Y if you have such a display that utilizes this controller.
+
config FB_TFT_SH1106
tristate "FB driver for the SH1106 OLED Controller"
depends on FB_TFT
@@ -125,10 +132,10 @@ config FB_TFT_SSD1289
Framebuffer support for SSD1289
config FB_TFT_SSD1305
- tristate "FB driver for the SSD1305 OLED Controller"
- depends on FB_TFT
- help
- Framebuffer support for SSD1305
+ tristate "FB driver for the SSD1305 OLED Controller"
+ depends on FB_TFT
+ help
+ Framebuffer support for SSD1305
config FB_TFT_SSD1306
tristate "FB driver for the SSD1306 OLED Controller"
diff --git a/drivers/staging/fbtft/Makefile b/drivers/staging/fbtft/Makefile
index 27af43f32f81..e87193f7df14 100644
--- a/drivers/staging/fbtft/Makefile
+++ b/drivers/staging/fbtft/Makefile
@@ -21,6 +21,7 @@ obj-$(CONFIG_FB_TFT_PCD8544) += fb_pcd8544.o
obj-$(CONFIG_FB_TFT_RA8875) += fb_ra8875.o
obj-$(CONFIG_FB_TFT_S6D02A1) += fb_s6d02a1.o
obj-$(CONFIG_FB_TFT_S6D1121) += fb_s6d1121.o
+obj-$(CONFIG_FB_TFT_SEPS525) += fb_seps525.o
obj-$(CONFIG_FB_TFT_SH1106) += fb_sh1106.o
obj-$(CONFIG_FB_TFT_SSD1289) += fb_ssd1289.o
obj-$(CONFIG_FB_TFT_SSD1305) += fb_ssd1305.o
diff --git a/drivers/staging/fbtft/fb_seps525.c b/drivers/staging/fbtft/fb_seps525.c
new file mode 100644
index 000000000000..05882e2cde7f
--- /dev/null
+++ b/drivers/staging/fbtft/fb_seps525.c
@@ -0,0 +1,213 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * FB driver for the NHD-1.69-160128UGC3 (Newhaven Display International, Inc.)
+ * using the SEPS525 (Syncoam) LCD Controller
+ *
+ * Copyright (C) 2016 Analog Devices Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+
+#include "fbtft.h"
+
+#define DRVNAME "fb_seps525"
+#define WIDTH 160
+#define HEIGHT 128
+
+#define SEPS525_INDEX 0x00
+#define SEPS525_STATUS_RD 0x01
+#define SEPS525_OSC_CTL 0x02
+#define SEPS525_IREF 0x80
+#define SEPS525_CLOCK_DIV 0x03
+#define SEPS525_REDUCE_CURRENT 0x04
+#define SEPS525_SOFT_RST 0x05
+#define SEPS525_DISP_ONOFF 0x06
+#define SEPS525_PRECHARGE_TIME_R 0x08
+#define SEPS525_PRECHARGE_TIME_G 0x09
+#define SEPS525_PRECHARGE_TIME_B 0x0A
+#define SEPS525_PRECHARGE_CURRENT_R 0x0B
+#define SEPS525_PRECHARGE_CURRENT_G 0x0C
+#define SEPS525_PRECHARGE_CURRENT_B 0x0D
+#define SEPS525_DRIVING_CURRENT_R 0x10
+#define SEPS525_DRIVING_CURRENT_G 0x11
+#define SEPS525_DRIVING_CURRENT_B 0x12
+#define SEPS525_DISPLAYMODE_SET 0x13
+#define SEPS525_RGBIF 0x14
+#define SEPS525_RGB_POL 0x15
+#define SEPS525_MEMORY_WRITEMODE 0x16
+#define SEPS525_MX1_ADDR 0x17
+#define SEPS525_MX2_ADDR 0x18
+#define SEPS525_MY1_ADDR 0x19
+#define SEPS525_MY2_ADDR 0x1A
+#define SEPS525_MEMORY_ACCESS_POINTER_X 0x20
+#define SEPS525_MEMORY_ACCESS_POINTER_Y 0x21
+#define SEPS525_DDRAM_DATA_ACCESS_PORT 0x22
+#define SEPS525_GRAY_SCALE_TABLE_INDEX 0x50
+#define SEPS525_GRAY_SCALE_TABLE_DATA 0x51
+#define SEPS525_DUTY 0x28
+#define SEPS525_DSL 0x29
+#define SEPS525_D1_DDRAM_FAC 0x2E
+#define SEPS525_D1_DDRAM_FAR 0x2F
+#define SEPS525_D2_DDRAM_SAC 0x31
+#define SEPS525_D2_DDRAM_SAR 0x32
+#define SEPS525_SCR1_FX1 0x33
+#define SEPS525_SCR1_FX2 0x34
+#define SEPS525_SCR1_FY1 0x35
+#define SEPS525_SCR1_FY2 0x36
+#define SEPS525_SCR2_SX1 0x37
+#define SEPS525_SCR2_SX2 0x38
+#define SEPS525_SCR2_SY1 0x39
+#define SEPS525_SCR2_SY2 0x3A
+#define SEPS525_SCREEN_SAVER_CONTEROL 0x3B
+#define SEPS525_SS_SLEEP_TIMER 0x3C
+#define SEPS525_SCREEN_SAVER_MODE 0x3D
+#define SEPS525_SS_SCR1_FU 0x3E
+#define SEPS525_SS_SCR1_MXY 0x3F
+#define SEPS525_SS_SCR2_FU 0x40
+#define SEPS525_SS_SCR2_MXY 0x41
+#define SEPS525_MOVING_DIRECTION 0x42
+#define SEPS525_SS_SCR2_SX1 0x47
+#define SEPS525_SS_SCR2_SX2 0x48
+#define SEPS525_SS_SCR2_SY1 0x49
+#define SEPS525_SS_SCR2_SY2 0x4A
+
+/* SEPS525_DISPLAYMODE_SET */
+#define MODE_SWAP_BGR BIT(7)
+#define MODE_SM BIT(6)
+#define MODE_RD BIT(5)
+#define MODE_CD BIT(4)
+
+#define seps525_use_window 0 /* FBTFT doesn't really use it today */
+
+/* Init sequence taken from: Arduino Library for the Adafruit 2.2" display */
+static int init_display(struct fbtft_par *par)
+{
+ par->fbtftops.reset(par);
+
+ usleep_range(1000, 5000);
+
+ /* Disable Oscillator Power Down */
+ write_reg(par, SEPS525_REDUCE_CURRENT, 0x03);
+ usleep_range(1000, 5000);
+ /* Set Normal Driving Current */
+ write_reg(par, SEPS525_REDUCE_CURRENT, 0x00);
+ usleep_range(1000, 5000);
+
+ write_reg(par, SEPS525_SCREEN_SAVER_CONTEROL, 0x00);
+ /* Set EXPORT1 Pin at Internal Clock */
+ write_reg(par, SEPS525_OSC_CTL, 0x01);
+ /* Set Clock as 120 Frames/Sec */
+ write_reg(par, SEPS525_CLOCK_DIV, 0x90);
+ /* Set Reference Voltage Controlled by External Resister */
+ write_reg(par, SEPS525_IREF, 0x01);
+
+ /* precharge time R G B */
+ write_reg(par, SEPS525_PRECHARGE_TIME_R, 0x04);
+ write_reg(par, SEPS525_PRECHARGE_TIME_G, 0x05);
+ write_reg(par, SEPS525_PRECHARGE_TIME_B, 0x05);
+
+ /* precharge current R G B (uA) */
+ write_reg(par, SEPS525_PRECHARGE_CURRENT_R, 0x9D);
+ write_reg(par, SEPS525_PRECHARGE_CURRENT_G, 0x8C);
+ write_reg(par, SEPS525_PRECHARGE_CURRENT_B, 0x57);
+
+ /* driving current R G B (uA) */
+ write_reg(par, SEPS525_DRIVING_CURRENT_R, 0x56);
+ write_reg(par, SEPS525_DRIVING_CURRENT_G, 0x4D);
+ write_reg(par, SEPS525_DRIVING_CURRENT_B, 0x46);
+ /* Set Color Sequence */
+ write_reg(par, SEPS525_DISPLAYMODE_SET, 0xA0);
+ write_reg(par, SEPS525_RGBIF, 0x01); /* Set MCU Interface Mode */
+ /* Set Memory Write Mode */
+ write_reg(par, SEPS525_MEMORY_WRITEMODE, 0x66);
+ write_reg(par, SEPS525_DUTY, 0x7F); /* 1/128 Duty (0x0F~0x7F) */
+ /* Set Mapping RAM Display Start Line (0x00~0x7F) */
+ write_reg(par, SEPS525_DSL, 0x00);
+ write_reg(par, SEPS525_DISP_ONOFF, 0x01); /* Display On (0x00/0x01) */
+ /* Set All Internal Register Value as Normal Mode */
+ write_reg(par, SEPS525_SOFT_RST, 0x00);
+ /* Set RGB Interface Polarity as Active Low */
+ write_reg(par, SEPS525_RGB_POL, 0x00);
+
+ write_reg(par, SEPS525_DDRAM_DATA_ACCESS_PORT);
+
+ return 0;
+}
+
+static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
+{
+ if (seps525_use_window) {
+ /* Set Window Xs,Ys Xe,Ye*/
+ write_reg(par, SEPS525_MX1_ADDR, xs);
+ write_reg(par, SEPS525_MX2_ADDR, xe);
+ write_reg(par, SEPS525_MY1_ADDR, ys);
+ write_reg(par, SEPS525_MY2_ADDR, ye);
+ }
+ /* start position X,Y */
+ write_reg(par, SEPS525_MEMORY_ACCESS_POINTER_X, xs);
+ write_reg(par, SEPS525_MEMORY_ACCESS_POINTER_Y, ys);
+
+ write_reg(par, SEPS525_DDRAM_DATA_ACCESS_PORT);
+}
+
+static int set_var(struct fbtft_par *par)
+{
+ u8 val;
+
+ switch (par->info->var.rotate) {
+ case 0:
+ val = 0;
+ break;
+ case 180:
+ val = MODE_RD | MODE_CD;
+ break;
+ case 90:
+ case 270:
+
+ default:
+ return -EINVAL;
+ }
+ /* Memory Access Control */
+ write_reg(par, SEPS525_DISPLAYMODE_SET, val |
+ (par->bgr ? MODE_SWAP_BGR : 0));
+
+ write_reg(par, SEPS525_DDRAM_DATA_ACCESS_PORT);
+
+ return 0;
+}
+
+static struct fbtft_display display = {
+ .regwidth = 8,
+ .width = WIDTH,
+ .height = HEIGHT,
+ .fbtftops = {
+ .init_display = init_display,
+ .set_addr_win = set_addr_win,
+ .set_var = set_var,
+ },
+};
+
+FBTFT_REGISTER_DRIVER(DRVNAME, "syncoam,seps525", &display);
+
+MODULE_ALIAS("spi:" DRVNAME);
+MODULE_ALIAS("platform:" DRVNAME);
+MODULE_ALIAS("spi:seps525");
+MODULE_ALIAS("platform:seps525");
+
+MODULE_DESCRIPTION("FB driver for the SEPS525 LCD Controller");
+MODULE_AUTHOR("Michael Hennerich <michael.hennerich@analog.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/fbtft/fb_uc1611.c b/drivers/staging/fbtft/fb_uc1611.c
index 65681d0fe200..e763205e9e4f 100644
--- a/drivers/staging/fbtft/fb_uc1611.c
+++ b/drivers/staging/fbtft/fb_uc1611.c
@@ -91,7 +91,7 @@ static int init_display(struct fbtft_par *par)
write_reg(par, 0x2C | (pump & 0x03));
/* Set inverse display */
- write_reg(par, 0xA6 | (0x01 & 0x01));
+ write_reg(par, 0xA6 | 0x01);
/* Set 4-bit grayscale mode */
write_reg(par, 0xD0 | (0x02 & 0x03));
@@ -157,8 +157,8 @@ static int set_var(struct fbtft_par *par)
/* Set RAM address control */
write_reg(par, 0x88
| (0x0 & 0x1) << 2 /* Increment positively */
- | (0x1 & 0x1) << 1 /* Increment page first */
- | (0x1 & 0x1)); /* Wrap around (default) */
+ | (0x1 << 1) /* Increment page first */
+ | 0x1); /* Wrap around (default) */
/* Set LCD mapping */
write_reg(par, 0xC0
@@ -171,11 +171,11 @@ static int set_var(struct fbtft_par *par)
write_reg(par, 0x88
| (0x0 & 0x1) << 2 /* Increment positively */
| (0x0 & 0x1) << 1 /* Increment column first */
- | (0x1 & 0x1)); /* Wrap around (default) */
+ | 0x1); /* Wrap around (default) */
/* Set LCD mapping */
write_reg(par, 0xC0
- | (0x1 & 0x1) << 2 /* Mirror Y ON */
+ | (0x1 << 2) /* Mirror Y ON */
| (0x0 & 0x1) << 1 /* Mirror X OFF */
| (0x0 & 0x1)); /* MS nibble last (default) */
break;
@@ -183,13 +183,13 @@ static int set_var(struct fbtft_par *par)
/* Set RAM address control */
write_reg(par, 0x88
| (0x0 & 0x1) << 2 /* Increment positively */
- | (0x1 & 0x1) << 1 /* Increment page first */
- | (0x1 & 0x1)); /* Wrap around (default) */
+ | (0x1 << 1) /* Increment page first */
+ | 0x1); /* Wrap around (default) */
/* Set LCD mapping */
write_reg(par, 0xC0
- | (0x1 & 0x1) << 2 /* Mirror Y ON */
- | (0x1 & 0x1) << 1 /* Mirror X ON */
+ | (0x1 << 2) /* Mirror Y ON */
+ | (0x1 << 1) /* Mirror X ON */
| (0x0 & 0x1)); /* MS nibble last (default) */
break;
default:
@@ -197,12 +197,12 @@ static int set_var(struct fbtft_par *par)
write_reg(par, 0x88
| (0x0 & 0x1) << 2 /* Increment positively */
| (0x0 & 0x1) << 1 /* Increment column first */
- | (0x1 & 0x1)); /* Wrap around (default) */
+ | 0x1); /* Wrap around (default) */
/* Set LCD mapping */
write_reg(par, 0xC0
| (0x0 & 0x1) << 2 /* Mirror Y OFF */
- | (0x1 & 0x1) << 1 /* Mirror X ON */
+ | (0x1 << 1) /* Mirror X ON */
| (0x0 & 0x1)); /* MS nibble last (default) */
break;
}
diff --git a/drivers/staging/fbtft/fbtft-core.c b/drivers/staging/fbtft/fbtft-core.c
index a0a67aa517f0..ffb84987dd86 100644
--- a/drivers/staging/fbtft/fbtft-core.c
+++ b/drivers/staging/fbtft/fbtft-core.c
@@ -22,8 +22,9 @@
#include <linux/uaccess.h>
#include <linux/backlight.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/spinlock.h>
-#include <linux/of.h>
+
#include <video/mipi_display.h>
#include "fbtft.h"
@@ -70,7 +71,6 @@ void fbtft_dbg_hex(const struct device *dev, int groupsize,
}
EXPORT_SYMBOL(fbtft_dbg_hex);
-#ifdef CONFIG_OF
static int fbtft_request_one_gpio(struct fbtft_par *par,
const char *name, int index,
struct gpio_desc **gpiop)
@@ -92,14 +92,11 @@ static int fbtft_request_one_gpio(struct fbtft_par *par,
return ret;
}
-static int fbtft_request_gpios_dt(struct fbtft_par *par)
+static int fbtft_request_gpios(struct fbtft_par *par)
{
int i;
int ret;
- if (!par->info->device->of_node)
- return -EINVAL;
-
ret = fbtft_request_one_gpio(par, "reset", 0, &par->gpio.reset);
if (ret)
return ret;
@@ -135,7 +132,6 @@ static int fbtft_request_gpios_dt(struct fbtft_par *par)
return 0;
}
-#endif
#ifdef CONFIG_FB_BACKLIGHT
static int fbtft_backlight_update_status(struct backlight_device *bd)
@@ -317,7 +313,7 @@ static void fbtft_mkdirty(struct fb_info *info, int y, int height)
/* special case, needed ? */
if (y == -1) {
y = 0;
- height = info->var.yres - 1;
+ height = info->var.yres;
}
/* Mark display lines/area as dirty */
@@ -529,6 +525,7 @@ static void fbtft_merge_fbtftops(struct fbtft_ops *dst, struct fbtft_ops *src)
*
* @display: pointer to structure describing the display
* @dev: pointer to the device for this fb, this can be NULL
+ * @pdata: platform data for the display in use
*
* Creates a new frame buffer info structure.
*
@@ -666,7 +663,7 @@ struct fb_info *fbtft_framebuffer_alloc(struct fbtft_display *display,
fbdefio->deferred_io = fbtft_deferred_io;
fb_deferred_io_init(info);
- strncpy(info->fix.id, dev->driver->name, 16);
+ snprintf(info->fix.id, sizeof(info->fix.id), "%s", dev->driver->name);
info->fix.type = FB_TYPE_PACKED_PIXELS;
info->fix.visual = FB_VISUAL_TRUECOLOR;
info->fix.xpanstep = 0;
@@ -897,46 +894,54 @@ int fbtft_unregister_framebuffer(struct fb_info *fb_info)
}
EXPORT_SYMBOL(fbtft_unregister_framebuffer);
-#ifdef CONFIG_OF
/**
- * fbtft_init_display_dt() - Device Tree init_display() function
+ * fbtft_init_display_from_property() - Device Tree init_display() function
* @par: Driver data
*
* Return: 0 if successful, negative if error
*/
-static int fbtft_init_display_dt(struct fbtft_par *par)
+static int fbtft_init_display_from_property(struct fbtft_par *par)
{
- struct device_node *node = par->info->device->of_node;
- struct property *prop;
- const __be32 *p;
+ struct device *dev = par->info->device;
+ int buf[64], count, index, i, j, ret;
+ u32 *values;
u32 val;
- int buf[64], i, j;
- if (!node)
+ count = device_property_count_u32(dev, "init");
+ if (count < 0)
+ return count;
+ if (count == 0)
return -EINVAL;
- prop = of_find_property(node, "init", NULL);
- p = of_prop_next_u32(prop, NULL, &val);
- if (!p)
- return -EINVAL;
+ values = kmalloc_array(count, sizeof(*values), GFP_KERNEL);
+ if (!values)
+ return -ENOMEM;
+
+ ret = device_property_read_u32_array(dev, "init", values, count);
+ if (ret)
+ goto out_free;
par->fbtftops.reset(par);
if (par->gpio.cs)
gpiod_set_value(par->gpio.cs, 0); /* Activate chip */
- while (p) {
+ index = -1;
+ while (index < count) {
+ val = values[++index];
+
if (val & FBTFT_OF_INIT_CMD) {
val &= 0xFFFF;
i = 0;
- while (p && !(val & 0xFFFF0000)) {
+ while ((index < count) && !(val & 0xFFFF0000)) {
if (i > 63) {
- dev_err(par->info->device,
+ dev_err(dev,
"%s: Maximum register values exceeded\n",
__func__);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out_free;
}
buf[i++] = val;
- p = of_prop_next_u32(prop, p, &val);
+ val = values[++index];
}
/* make debug message */
fbtft_par_dbg(DEBUG_INIT_DISPLAY, par,
@@ -966,17 +971,18 @@ static int fbtft_init_display_dt(struct fbtft_par *par)
fbtft_par_dbg(DEBUG_INIT_DISPLAY, par,
"init: msleep(%u)\n", val & 0xFFFF);
msleep(val & 0xFFFF);
- p = of_prop_next_u32(prop, p, &val);
+ val = values[++index];
} else {
- dev_err(par->info->device, "illegal init value 0x%X\n",
- val);
- return -EINVAL;
+ dev_err(dev, "illegal init value 0x%X\n", val);
+ ret = -EINVAL;
+ goto out_free;
}
}
- return 0;
+out_free:
+ kfree(values);
+ return ret;
}
-#endif
/**
* fbtft_init_display() - Generic init_display() function
@@ -1137,27 +1143,25 @@ static int fbtft_verify_gpios(struct fbtft_par *par)
return 0;
}
-#ifdef CONFIG_OF
/* returns 0 if the property is not present */
-static u32 fbtft_of_value(struct device_node *node, const char *propname)
+static u32 fbtft_property_value(struct device *dev, const char *propname)
{
int ret;
u32 val = 0;
- ret = of_property_read_u32(node, propname, &val);
+ ret = device_property_read_u32(dev, propname, &val);
if (ret == 0)
- pr_info("%s: %s = %u\n", __func__, propname, val);
+ dev_info(dev, "%s: %s = %u\n", __func__, propname, val);
return val;
}
-static struct fbtft_platform_data *fbtft_probe_dt(struct device *dev)
+static struct fbtft_platform_data *fbtft_properties_read(struct device *dev)
{
- struct device_node *node = dev->of_node;
struct fbtft_platform_data *pdata;
- if (!node) {
- dev_err(dev, "Missing platform data or DT\n");
+ if (!dev_fwnode(dev)) {
+ dev_err(dev, "Missing platform data or properties\n");
return ERR_PTR(-EINVAL);
}
@@ -1165,35 +1169,28 @@ static struct fbtft_platform_data *fbtft_probe_dt(struct device *dev)
if (!pdata)
return ERR_PTR(-ENOMEM);
- pdata->display.width = fbtft_of_value(node, "width");
- pdata->display.height = fbtft_of_value(node, "height");
- pdata->display.regwidth = fbtft_of_value(node, "regwidth");
- pdata->display.buswidth = fbtft_of_value(node, "buswidth");
- pdata->display.backlight = fbtft_of_value(node, "backlight");
- pdata->display.bpp = fbtft_of_value(node, "bpp");
- pdata->display.debug = fbtft_of_value(node, "debug");
- pdata->rotate = fbtft_of_value(node, "rotate");
- pdata->bgr = of_property_read_bool(node, "bgr");
- pdata->fps = fbtft_of_value(node, "fps");
- pdata->txbuflen = fbtft_of_value(node, "txbuflen");
- pdata->startbyte = fbtft_of_value(node, "startbyte");
- of_property_read_string(node, "gamma", (const char **)&pdata->gamma);
-
- if (of_find_property(node, "led-gpios", NULL))
+ pdata->display.width = fbtft_property_value(dev, "width");
+ pdata->display.height = fbtft_property_value(dev, "height");
+ pdata->display.regwidth = fbtft_property_value(dev, "regwidth");
+ pdata->display.buswidth = fbtft_property_value(dev, "buswidth");
+ pdata->display.backlight = fbtft_property_value(dev, "backlight");
+ pdata->display.bpp = fbtft_property_value(dev, "bpp");
+ pdata->display.debug = fbtft_property_value(dev, "debug");
+ pdata->rotate = fbtft_property_value(dev, "rotate");
+ pdata->bgr = device_property_read_bool(dev, "bgr");
+ pdata->fps = fbtft_property_value(dev, "fps");
+ pdata->txbuflen = fbtft_property_value(dev, "txbuflen");
+ pdata->startbyte = fbtft_property_value(dev, "startbyte");
+ device_property_read_string(dev, "gamma", (const char **)&pdata->gamma);
+
+ if (device_property_present(dev, "led-gpios"))
pdata->display.backlight = 1;
- if (of_find_property(node, "init", NULL))
- pdata->display.fbtftops.init_display = fbtft_init_display_dt;
- pdata->display.fbtftops.request_gpios = fbtft_request_gpios_dt;
+ if (device_property_present(dev, "init"))
+ pdata->display.fbtftops.init_display = fbtft_init_display_from_property;
+ pdata->display.fbtftops.request_gpios = fbtft_request_gpios;
return pdata;
}
-#else
-static struct fbtft_platform_data *fbtft_probe_dt(struct device *dev)
-{
- dev_err(dev, "Missing platform data\n");
- return ERR_PTR(-EINVAL);
-}
-#endif
/**
* fbtft_probe_common() - Generic device probe() helper function
@@ -1227,7 +1224,7 @@ int fbtft_probe_common(struct fbtft_display *display,
pdata = dev->platform_data;
if (!pdata) {
- pdata = fbtft_probe_dt(dev);
+ pdata = fbtft_properties_read(dev);
if (IS_ERR(pdata))
return PTR_ERR(pdata);
}
diff --git a/drivers/staging/fbtft/fbtft.h b/drivers/staging/fbtft/fbtft.h
index 9b6bdb62093d..5f782da51959 100644
--- a/drivers/staging/fbtft/fbtft.h
+++ b/drivers/staging/fbtft/fbtft.h
@@ -309,7 +309,7 @@ MODULE_DEVICE_TABLE(of, dt_ids); \
static struct spi_driver fbtft_driver_spi_driver = { \
.driver = { \
.name = _name, \
- .of_match_table = of_match_ptr(dt_ids), \
+ .of_match_table = dt_ids, \
}, \
.probe = fbtft_driver_probe_spi, \
.remove = fbtft_driver_remove_spi, \
@@ -319,7 +319,7 @@ static struct platform_driver fbtft_driver_platform_driver = { \
.driver = { \
.name = _name, \
.owner = THIS_MODULE, \
- .of_match_table = of_match_ptr(dt_ids), \
+ .of_match_table = dt_ids, \
}, \
.probe = fbtft_driver_probe_pdev, \
.remove = fbtft_driver_remove_pdev, \
diff --git a/drivers/staging/fieldbus/anybuss/anybuss-client.h b/drivers/staging/fieldbus/anybuss/anybuss-client.h
index 0c4b6a1ffe10..8ee1f1baccf1 100644
--- a/drivers/staging/fieldbus/anybuss/anybuss-client.h
+++ b/drivers/staging/fieldbus/anybuss/anybuss-client.h
@@ -12,6 +12,9 @@
#include <linux/types.h>
#include <linux/poll.h>
+/* move to <linux/fieldbus_dev.h> when taking this out of staging */
+#include "../fieldbus_dev.h"
+
struct anybuss_host;
struct anybuss_client {
@@ -61,12 +64,6 @@ anybuss_set_drvdata(struct anybuss_client *client, void *data)
int anybuss_set_power(struct anybuss_client *client, bool power_on);
-enum anybuss_offl_mode {
- AB_OFFL_MODE_CLEAR = 0,
- AB_OFFL_MODE_FREEZE,
- AB_OFFL_MODE_SET
-};
-
struct anybuss_memcfg {
u16 input_io;
u16 input_dpram;
@@ -76,7 +73,7 @@ struct anybuss_memcfg {
u16 output_dpram;
u16 output_total;
- enum anybuss_offl_mode offl_mode;
+ enum fieldbus_dev_offl_mode offl_mode;
};
int anybuss_start_init(struct anybuss_client *client,
diff --git a/drivers/staging/fieldbus/anybuss/arcx-anybus.c b/drivers/staging/fieldbus/anybuss/arcx-anybus.c
index 2ecffa42e561..5b8d0bae9ff3 100644
--- a/drivers/staging/fieldbus/anybuss/arcx-anybus.c
+++ b/drivers/staging/fieldbus/anybuss/arcx-anybus.c
@@ -127,12 +127,10 @@ static const struct regmap_config arcx_regmap_cfg = {
static struct regmap *create_parallel_regmap(struct platform_device *pdev,
int idx)
{
- struct resource *res;
void __iomem *base;
struct device *dev = &pdev->dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, idx + 1);
- base = devm_ioremap_resource(dev, res);
+ base = devm_platform_ioremap_resource(pdev, idx + 1);
if (IS_ERR(base))
return ERR_CAST(base);
return devm_regmap_init_mmio(dev, base, &arcx_regmap_cfg);
@@ -230,7 +228,6 @@ static int controller_probe(struct platform_device *pdev)
struct regulator_config config = { };
struct regulator_dev *regulator;
int err, id;
- struct resource *res;
struct anybuss_host *host;
u8 status1, cap;
@@ -244,8 +241,7 @@ static int controller_probe(struct platform_device *pdev)
return PTR_ERR(cd->reset_gpiod);
/* CPLD control memory, sits at index 0 */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- cd->cpld_base = devm_ioremap_resource(dev, res);
+ cd->cpld_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(cd->cpld_base)) {
dev_err(dev,
"failed to map cpld base address\n");
diff --git a/drivers/staging/fieldbus/anybuss/hms-profinet.c b/drivers/staging/fieldbus/anybuss/hms-profinet.c
index 5446843e35f4..31c43a0a5776 100644
--- a/drivers/staging/fieldbus/anybuss/hms-profinet.c
+++ b/drivers/staging/fieldbus/anybuss/hms-profinet.c
@@ -96,7 +96,7 @@ static int __profi_enable(struct profi_priv *priv)
.output_io = 220,
.output_dpram = PROFI_DPRAM_SIZE,
.output_total = PROFI_DPRAM_SIZE,
- .offl_mode = AB_OFFL_MODE_CLEAR,
+ .offl_mode = FIELDBUS_DEV_OFFL_MODE_CLEAR,
};
/*
diff --git a/drivers/staging/fieldbus/anybuss/host.c b/drivers/staging/fieldbus/anybuss/host.c
index f69dc4930457..549cb7d51af8 100644
--- a/drivers/staging/fieldbus/anybuss/host.c
+++ b/drivers/staging/fieldbus/anybuss/host.c
@@ -1022,13 +1022,13 @@ int anybuss_start_init(struct anybuss_client *client,
};
switch (cfg->offl_mode) {
- case AB_OFFL_MODE_CLEAR:
+ case FIELDBUS_DEV_OFFL_MODE_CLEAR:
op_mode = 0;
break;
- case AB_OFFL_MODE_FREEZE:
+ case FIELDBUS_DEV_OFFL_MODE_FREEZE:
op_mode = OP_MODE_FBFC;
break;
- case AB_OFFL_MODE_SET:
+ case FIELDBUS_DEV_OFFL_MODE_SET:
op_mode = OP_MODE_FBS;
break;
default:
diff --git a/drivers/staging/fieldbus/dev_core.c b/drivers/staging/fieldbus/dev_core.c
index f6f5b92ba914..1ba0234cc60d 100644
--- a/drivers/staging/fieldbus/dev_core.c
+++ b/drivers/staging/fieldbus/dev_core.c
@@ -23,9 +23,6 @@ static dev_t fieldbus_devt;
static DEFINE_IDA(fieldbus_ida);
static DEFINE_MUTEX(fieldbus_mtx);
-static const char ctrl_enabled[] = "enabled";
-static const char ctrl_disabled[] = "disabled";
-
static ssize_t online_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
diff --git a/drivers/staging/fieldbus/fieldbus_dev.h b/drivers/staging/fieldbus/fieldbus_dev.h
index a10fc3b446dc..301dca3b8d71 100644
--- a/drivers/staging/fieldbus/fieldbus_dev.h
+++ b/drivers/staging/fieldbus/fieldbus_dev.h
@@ -15,6 +15,12 @@ enum fieldbus_dev_type {
FIELDBUS_DEV_TYPE_PROFINET,
};
+enum fieldbus_dev_offl_mode {
+ FIELDBUS_DEV_OFFL_MODE_CLEAR = 0,
+ FIELDBUS_DEV_OFFL_MODE_FREEZE,
+ FIELDBUS_DEV_OFFL_MODE_SET
+};
+
/**
* struct fieldbus_dev - Fieldbus device
* @read_area: [DRIVER] function to read the process data area of the
diff --git a/drivers/staging/fsl-dpaa2/ethsw/ethsw.c b/drivers/staging/fsl-dpaa2/ethsw/ethsw.c
index 14a9eebf687e..39c0fe347188 100644
--- a/drivers/staging/fsl-dpaa2/ethsw/ethsw.c
+++ b/drivers/staging/fsl-dpaa2/ethsw/ethsw.c
@@ -18,8 +18,6 @@
#include "ethsw.h"
-static struct workqueue_struct *ethsw_owq;
-
/* Minimal supported DPSW version */
#define DPSW_MIN_VER_MAJOR 8
#define DPSW_MIN_VER_MINOR 1
@@ -1174,10 +1172,6 @@ static int port_netdevice_event(struct notifier_block *unused,
return notifier_from_errno(err);
}
-static struct notifier_block port_nb __read_mostly = {
- .notifier_call = port_netdevice_event,
-};
-
struct ethsw_switchdev_event_work {
struct work_struct work;
struct switchdev_notifier_fdb_info fdb_info;
@@ -1233,8 +1227,10 @@ static int port_switchdev_event(struct notifier_block *unused,
unsigned long event, void *ptr)
{
struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
+ struct ethsw_port_priv *port_priv = netdev_priv(dev);
struct ethsw_switchdev_event_work *switchdev_work;
struct switchdev_notifier_fdb_info *fdb_info = ptr;
+ struct ethsw_core *ethsw = port_priv->ethsw_data;
if (!ethsw_port_dev_check(dev))
return NOTIFY_DONE;
@@ -1270,7 +1266,7 @@ static int port_switchdev_event(struct notifier_block *unused,
return NOTIFY_DONE;
}
- queue_work(ethsw_owq, &switchdev_work->work);
+ queue_work(ethsw->workqueue, &switchdev_work->work);
return NOTIFY_DONE;
@@ -1318,31 +1314,27 @@ static int port_switchdev_blocking_event(struct notifier_block *unused,
return NOTIFY_DONE;
}
-static struct notifier_block port_switchdev_nb = {
- .notifier_call = port_switchdev_event,
-};
-
-static struct notifier_block port_switchdev_blocking_nb = {
- .notifier_call = port_switchdev_blocking_event,
-};
-
static int ethsw_register_notifier(struct device *dev)
{
+ struct ethsw_core *ethsw = dev_get_drvdata(dev);
int err;
- err = register_netdevice_notifier(&port_nb);
+ ethsw->port_nb.notifier_call = port_netdevice_event;
+ err = register_netdevice_notifier(&ethsw->port_nb);
if (err) {
dev_err(dev, "Failed to register netdev notifier\n");
return err;
}
- err = register_switchdev_notifier(&port_switchdev_nb);
+ ethsw->port_switchdev_nb.notifier_call = port_switchdev_event;
+ err = register_switchdev_notifier(&ethsw->port_switchdev_nb);
if (err) {
dev_err(dev, "Failed to register switchdev notifier\n");
goto err_switchdev_nb;
}
- err = register_switchdev_blocking_notifier(&port_switchdev_blocking_nb);
+ ethsw->port_switchdevb_nb.notifier_call = port_switchdev_blocking_event;
+ err = register_switchdev_blocking_notifier(&ethsw->port_switchdevb_nb);
if (err) {
dev_err(dev, "Failed to register switchdev blocking notifier\n");
goto err_switchdev_blocking_nb;
@@ -1351,9 +1343,9 @@ static int ethsw_register_notifier(struct device *dev)
return 0;
err_switchdev_blocking_nb:
- unregister_switchdev_notifier(&port_switchdev_nb);
+ unregister_switchdev_notifier(&ethsw->port_switchdev_nb);
err_switchdev_nb:
- unregister_netdevice_notifier(&port_nb);
+ unregister_netdevice_notifier(&ethsw->port_nb);
return err;
}
@@ -1435,9 +1427,10 @@ static int ethsw_init(struct fsl_mc_device *sw_dev)
}
}
- ethsw_owq = alloc_ordered_workqueue("%s_ordered", WQ_MEM_RECLAIM,
- "ethsw");
- if (!ethsw_owq) {
+ ethsw->workqueue = alloc_ordered_workqueue("%s_%d_ordered",
+ WQ_MEM_RECLAIM, "ethsw",
+ ethsw->sw_attr.id);
+ if (!ethsw->workqueue) {
err = -ENOMEM;
goto err_close;
}
@@ -1449,7 +1442,7 @@ static int ethsw_init(struct fsl_mc_device *sw_dev)
return 0;
err_destroy_ordered_workqueue:
- destroy_workqueue(ethsw_owq);
+ destroy_workqueue(ethsw->workqueue);
err_close:
dpsw_close(ethsw->mc_io, 0, ethsw->dpsw_handle);
@@ -1491,21 +1484,22 @@ static int ethsw_port_init(struct ethsw_port_priv *port_priv, u16 port)
static void ethsw_unregister_notifier(struct device *dev)
{
+ struct ethsw_core *ethsw = dev_get_drvdata(dev);
struct notifier_block *nb;
int err;
- nb = &port_switchdev_blocking_nb;
+ nb = &ethsw->port_switchdevb_nb;
err = unregister_switchdev_blocking_notifier(nb);
if (err)
dev_err(dev,
"Failed to unregister switchdev blocking notifier (%d)\n", err);
- err = unregister_switchdev_notifier(&port_switchdev_nb);
+ err = unregister_switchdev_notifier(&ethsw->port_switchdev_nb);
if (err)
dev_err(dev,
"Failed to unregister switchdev notifier (%d)\n", err);
- err = unregister_netdevice_notifier(&port_nb);
+ err = unregister_netdevice_notifier(&ethsw->port_nb);
if (err)
dev_err(dev,
"Failed to unregister netdev notifier (%d)\n", err);
@@ -1536,7 +1530,7 @@ static int ethsw_remove(struct fsl_mc_device *sw_dev)
ethsw_teardown_irqs(sw_dev);
- destroy_workqueue(ethsw_owq);
+ destroy_workqueue(ethsw->workqueue);
dpsw_disable(ethsw->mc_io, 0, ethsw->dpsw_handle);
diff --git a/drivers/staging/fsl-dpaa2/ethsw/ethsw.h b/drivers/staging/fsl-dpaa2/ethsw/ethsw.h
index 3ea8a0ad8c10..a0244f7d5003 100644
--- a/drivers/staging/fsl-dpaa2/ethsw/ethsw.h
+++ b/drivers/staging/fsl-dpaa2/ethsw/ethsw.h
@@ -66,6 +66,11 @@ struct ethsw_core {
u8 vlans[VLAN_VID_MASK + 1];
bool learning;
+
+ struct notifier_block port_nb;
+ struct notifier_block port_switchdev_nb;
+ struct notifier_block port_switchdevb_nb;
+ struct workqueue_struct *workqueue;
};
#endif /* __ETHSW_H */
diff --git a/drivers/staging/fwserial/Kconfig b/drivers/staging/fwserial/Kconfig
index 9543f8454af9..6964aac2a7ed 100644
--- a/drivers/staging/fwserial/Kconfig
+++ b/drivers/staging/fwserial/Kconfig
@@ -1,9 +1,9 @@
# SPDX-License-Identifier: GPL-2.0
config FIREWIRE_SERIAL
- tristate "TTY over Firewire"
- depends on FIREWIRE && TTY
- help
- This enables TTY over IEEE 1394, providing high-speed serial
+ tristate "TTY over Firewire"
+ depends on FIREWIRE && TTY
+ help
+ This enables TTY over IEEE 1394, providing high-speed serial
connectivity to cabled peers. This driver implements a
ad-hoc transport protocol and is currently limited to
Linux-to-Linux communication.
@@ -14,18 +14,18 @@ config FIREWIRE_SERIAL
if FIREWIRE_SERIAL
config FWTTY_MAX_TOTAL_PORTS
- int "Maximum number of serial ports supported"
- default "64"
- help
- Set this to the maximum number of serial ports you want the
+ int "Maximum number of serial ports supported"
+ default "64"
+ help
+ Set this to the maximum number of serial ports you want the
firewire-serial driver to support.
config FWTTY_MAX_CARD_PORTS
- int "Maximum number of serial ports supported per adapter"
- range 0 FWTTY_MAX_TOTAL_PORTS
- default "32"
- help
- Set this to the maximum number of serial ports each firewire
+ int "Maximum number of serial ports supported per adapter"
+ range 0 FWTTY_MAX_TOTAL_PORTS
+ default "32"
+ help
+ Set this to the maximum number of serial ports each firewire
adapter supports. The actual number of serial ports registered
is set with the module parameter "ttys".
diff --git a/drivers/staging/gasket/gasket_constants.h b/drivers/staging/gasket/gasket_constants.h
index 50d87c7b178c..9ea9c8833f27 100644
--- a/drivers/staging/gasket/gasket_constants.h
+++ b/drivers/staging/gasket/gasket_constants.h
@@ -13,9 +13,6 @@
/* The maximum devices per each type. */
#define GASKET_DEV_MAX 256
-/* The number of supported (and possible) PCI BARs. */
-#define GASKET_NUM_BARS 6
-
/* The number of supported Gasket page tables per device. */
#define GASKET_MAX_NUM_PAGE_TABLES 1
diff --git a/drivers/staging/gasket/gasket_core.c b/drivers/staging/gasket/gasket_core.c
index 13179f063a61..cd8be80d2076 100644
--- a/drivers/staging/gasket/gasket_core.c
+++ b/drivers/staging/gasket/gasket_core.c
@@ -371,7 +371,7 @@ static int gasket_setup_pci(struct pci_dev *pci_dev,
{
int i, mapped_bars, ret;
- for (i = 0; i < GASKET_NUM_BARS; i++) {
+ for (i = 0; i < PCI_STD_NUM_BARS; i++) {
ret = gasket_map_pci_bar(gasket_dev, i);
if (ret) {
mapped_bars = i;
@@ -393,7 +393,7 @@ static void gasket_cleanup_pci(struct gasket_dev *gasket_dev)
{
int i;
- for (i = 0; i < GASKET_NUM_BARS; i++)
+ for (i = 0; i < PCI_STD_NUM_BARS; i++)
gasket_unmap_pci_bar(gasket_dev, i);
}
@@ -493,7 +493,7 @@ static ssize_t gasket_sysfs_data_show(struct device *device,
(enum gasket_sysfs_attribute_type)gasket_attr->data.attr_type;
switch (sysfs_type) {
case ATTR_BAR_OFFSETS:
- for (i = 0; i < GASKET_NUM_BARS; i++) {
+ for (i = 0; i < PCI_STD_NUM_BARS; i++) {
bar_desc = &driver_desc->bar_descriptions[i];
if (bar_desc->size == 0)
continue;
@@ -505,7 +505,7 @@ static ssize_t gasket_sysfs_data_show(struct device *device,
}
break;
case ATTR_BAR_SIZES:
- for (i = 0; i < GASKET_NUM_BARS; i++) {
+ for (i = 0; i < PCI_STD_NUM_BARS; i++) {
bar_desc = &driver_desc->bar_descriptions[i];
if (bar_desc->size == 0)
continue;
@@ -556,7 +556,7 @@ static ssize_t gasket_sysfs_data_show(struct device *device,
ret = snprintf(buf, PAGE_SIZE, "%d\n", gasket_dev->reset_count);
break;
case ATTR_USER_MEM_RANGES:
- for (i = 0; i < GASKET_NUM_BARS; ++i) {
+ for (i = 0; i < PCI_STD_NUM_BARS; ++i) {
current_written =
gasket_write_mappable_regions(buf, driver_desc,
i);
@@ -736,7 +736,7 @@ static int gasket_get_bar_index(const struct gasket_dev *gasket_dev,
const struct gasket_driver_desc *driver_desc;
driver_desc = gasket_dev->internal_desc->driver_desc;
- for (i = 0; i < GASKET_NUM_BARS; ++i) {
+ for (i = 0; i < PCI_STD_NUM_BARS; ++i) {
struct gasket_bar_desc bar_desc =
driver_desc->bar_descriptions[i];
diff --git a/drivers/staging/gasket/gasket_core.h b/drivers/staging/gasket/gasket_core.h
index be44ac1e3118..c417acadb0d5 100644
--- a/drivers/staging/gasket/gasket_core.h
+++ b/drivers/staging/gasket/gasket_core.h
@@ -268,7 +268,7 @@ struct gasket_dev {
char kobj_name[GASKET_NAME_MAX];
/* Virtual address of mapped BAR memory range. */
- struct gasket_bar_data bar_data[GASKET_NUM_BARS];
+ struct gasket_bar_data bar_data[PCI_STD_NUM_BARS];
/* Coherent buffer. */
struct gasket_coherent_buffer coherent_buffer;
@@ -369,7 +369,7 @@ struct gasket_driver_desc {
/* Set of 6 bar descriptions that describe all PCIe bars.
* Note that BUS/AXI devices (i.e. non PCI devices) use those.
*/
- struct gasket_bar_desc bar_descriptions[GASKET_NUM_BARS];
+ struct gasket_bar_desc bar_descriptions[PCI_STD_NUM_BARS];
/*
* Coherent buffer description.
diff --git a/drivers/staging/gasket/gasket_ioctl.c b/drivers/staging/gasket/gasket_ioctl.c
index 240f9bb10b71..e3047d36d8db 100644
--- a/drivers/staging/gasket/gasket_ioctl.c
+++ b/drivers/staging/gasket/gasket_ioctl.c
@@ -34,8 +34,8 @@ static int gasket_set_event_fd(struct gasket_dev *gasket_dev,
trace_gasket_ioctl_eventfd_data(die.interrupt, die.event_fd);
- return gasket_interrupt_set_eventfd(
- gasket_dev->interrupt_data, die.interrupt, die.event_fd);
+ return gasket_interrupt_set_eventfd(gasket_dev->interrupt_data,
+ die.interrupt, die.event_fd);
}
/* Read the size of the page table. */
@@ -54,9 +54,9 @@ static int gasket_read_page_table_size(struct gasket_dev *gasket_dev,
ibuf.size = gasket_page_table_num_entries(
gasket_dev->page_table[ibuf.page_table_index]);
- trace_gasket_ioctl_page_table_data(
- ibuf.page_table_index, ibuf.size, ibuf.host_address,
- ibuf.device_address);
+ trace_gasket_ioctl_page_table_data(ibuf.page_table_index, ibuf.size,
+ ibuf.host_address,
+ ibuf.device_address);
if (copy_to_user(argp, &ibuf, sizeof(ibuf)))
return -EFAULT;
@@ -101,9 +101,9 @@ static int gasket_partition_page_table(struct gasket_dev *gasket_dev,
if (copy_from_user(&ibuf, argp, sizeof(struct gasket_page_table_ioctl)))
return -EFAULT;
- trace_gasket_ioctl_page_table_data(
- ibuf.page_table_index, ibuf.size, ibuf.host_address,
- ibuf.device_address);
+ trace_gasket_ioctl_page_table_data(ibuf.page_table_index, ibuf.size,
+ ibuf.host_address,
+ ibuf.device_address);
if (ibuf.page_table_index >= gasket_dev->num_page_tables)
return -EFAULT;
diff --git a/drivers/staging/iio/accel/adis16240.c b/drivers/staging/iio/accel/adis16240.c
index 82099db4bf0c..a480409090c0 100644
--- a/drivers/staging/iio/accel/adis16240.c
+++ b/drivers/staging/iio/accel/adis16240.c
@@ -7,7 +7,6 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
-#include <linux/gpio.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/kernel.h>
diff --git a/drivers/staging/iio/adc/ad7192.c b/drivers/staging/iio/adc/ad7192.c
index e6b660489165..bf3e2a9cc07f 100644
--- a/drivers/staging/iio/adc/ad7192.c
+++ b/drivers/staging/iio/adc/ad7192.c
@@ -155,6 +155,11 @@
* The DOUT/RDY output must also be wired to an interrupt capable GPIO.
*/
+enum {
+ AD7192_SYSCALIB_ZERO_SCALE,
+ AD7192_SYSCALIB_FULL_SCALE,
+};
+
struct ad7192_state {
struct regulator *avdd;
struct regulator *dvdd;
@@ -169,10 +174,80 @@ struct ad7192_state {
u8 devid;
u8 clock_sel;
struct mutex lock; /* protect sensor state */
+ u8 syscalib_mode[8];
struct ad_sigma_delta sd;
};
+static const char * const ad7192_syscalib_modes[] = {
+ [AD7192_SYSCALIB_ZERO_SCALE] = "zero_scale",
+ [AD7192_SYSCALIB_FULL_SCALE] = "full_scale",
+};
+
+static int ad7192_set_syscalib_mode(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ unsigned int mode)
+{
+ struct ad7192_state *st = iio_priv(indio_dev);
+
+ st->syscalib_mode[chan->channel] = mode;
+
+ return 0;
+}
+
+static int ad7192_get_syscalib_mode(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ struct ad7192_state *st = iio_priv(indio_dev);
+
+ return st->syscalib_mode[chan->channel];
+}
+
+static ssize_t ad7192_write_syscalib(struct iio_dev *indio_dev,
+ uintptr_t private,
+ const struct iio_chan_spec *chan,
+ const char *buf, size_t len)
+{
+ struct ad7192_state *st = iio_priv(indio_dev);
+ bool sys_calib;
+ int ret, temp;
+
+ ret = strtobool(buf, &sys_calib);
+ if (ret)
+ return ret;
+
+ temp = st->syscalib_mode[chan->channel];
+ if (sys_calib) {
+ if (temp == AD7192_SYSCALIB_ZERO_SCALE)
+ ret = ad_sd_calibrate(&st->sd, AD7192_MODE_CAL_SYS_ZERO,
+ chan->address);
+ else
+ ret = ad_sd_calibrate(&st->sd, AD7192_MODE_CAL_SYS_FULL,
+ chan->address);
+ }
+
+ return ret ? ret : len;
+}
+
+static const struct iio_enum ad7192_syscalib_mode_enum = {
+ .items = ad7192_syscalib_modes,
+ .num_items = ARRAY_SIZE(ad7192_syscalib_modes),
+ .set = ad7192_set_syscalib_mode,
+ .get = ad7192_get_syscalib_mode
+};
+
+static const struct iio_chan_spec_ext_info ad7192_calibsys_ext_info[] = {
+ {
+ .name = "sys_calibration",
+ .write = ad7192_write_syscalib,
+ .shared = IIO_SEPARATE,
+ },
+ IIO_ENUM("sys_calibration_mode", IIO_SEPARATE,
+ &ad7192_syscalib_mode_enum),
+ IIO_ENUM_AVAILABLE("sys_calibration_mode", &ad7192_syscalib_mode_enum),
+ {}
+};
+
static struct ad7192_state *ad_sigma_delta_to_ad7192(struct ad_sigma_delta *sd)
{
return container_of(sd, struct ad7192_state, sd);
@@ -770,9 +845,11 @@ static int ad7192_channels_config(struct iio_dev *indio_dev)
*chan = channels[i];
chan->info_mask_shared_by_all |=
BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY);
- if (chan->type != IIO_TEMP)
+ if (chan->type != IIO_TEMP) {
chan->info_mask_shared_by_type_available |=
BIT(IIO_CHAN_INFO_SCALE);
+ chan->ext_info = ad7192_calibsys_ext_info;
+ }
chan++;
}
diff --git a/drivers/staging/iio/frequency/ad9834.c b/drivers/staging/iio/frequency/ad9834.c
index 038d6732c3fd..23026978a5a5 100644
--- a/drivers/staging/iio/frequency/ad9834.c
+++ b/drivers/staging/iio/frequency/ad9834.c
@@ -417,6 +417,10 @@ static int ad9834_probe(struct spi_device *spi)
st = iio_priv(indio_dev);
mutex_init(&st->lock);
st->mclk = devm_clk_get(&spi->dev, NULL);
+ if (IS_ERR(st->mclk)) {
+ ret = PTR_ERR(st->mclk);
+ goto error_disable_reg;
+ }
ret = clk_prepare_enable(st->mclk);
if (ret) {
diff --git a/drivers/staging/isdn/avm/b1.c b/drivers/staging/isdn/avm/b1.c
index 40ca1e8fa09f..32ec8cf31fd0 100644
--- a/drivers/staging/isdn/avm/b1.c
+++ b/drivers/staging/isdn/avm/b1.c
@@ -261,9 +261,10 @@ int b1_loaded(avmcard *card)
b1_put_byte(base, SEND_POLL);
for (stop = jiffies + tout * HZ; time_before(jiffies, stop);) {
if (b1_rx_full(base)) {
- if ((ans = b1_get_byte(base)) == RECEIVE_POLL) {
+ ans = b1_get_byte(base);
+ if (ans == RECEIVE_POLL)
return 1;
- }
+
printk(KERN_ERR "%s: b1_loaded: got 0x%x, firmware not running\n",
card->name, ans);
return 0;
@@ -284,8 +285,9 @@ int b1_load_firmware(struct capi_ctr *ctrl, capiloaddata *data)
int retval;
b1_reset(port);
+ retval = b1_load_t4file(card, &data->firmware);
- if ((retval = b1_load_t4file(card, &data->firmware))) {
+ if (retval) {
b1_reset(port);
printk(KERN_ERR "%s: failed to load t4file!!\n",
card->name);
@@ -295,7 +297,8 @@ int b1_load_firmware(struct capi_ctr *ctrl, capiloaddata *data)
b1_disable_irq(port);
if (data->configuration.len > 0 && data->configuration.data) {
- if ((retval = b1_load_config(card, &data->configuration))) {
+ retval = b1_load_config(card, &data->configuration);
+ if (retval) {
b1_reset(port);
printk(KERN_ERR "%s: failed to load config!!\n",
card->name);
@@ -525,7 +528,9 @@ irqreturn_t b1_interrupt(int interrupt, void *devptr)
MsgLen = 30;
CAPIMSG_SETLEN(card->msgbuf, 30);
}
- if (!(skb = alloc_skb(DataB3Len + MsgLen, GFP_ATOMIC))) {
+
+ skb = alloc_skb(DataB3Len + MsgLen, GFP_ATOMIC);
+ if (!skb) {
printk(KERN_ERR "%s: incoming packet dropped\n",
card->name);
} else {
@@ -539,7 +544,9 @@ irqreturn_t b1_interrupt(int interrupt, void *devptr)
ApplId = (unsigned) b1_get_word(card->port);
MsgLen = b1_get_slice(card->port, card->msgbuf);
- if (!(skb = alloc_skb(MsgLen, GFP_ATOMIC))) {
+ skb = alloc_skb(MsgLen, GFP_ATOMIC);
+
+ if (!skb) {
printk(KERN_ERR "%s: incoming packet dropped\n",
card->name);
spin_unlock_irqrestore(&card->lock, flags);
@@ -663,11 +670,17 @@ int b1_proc_show(struct seq_file *m, void *v)
seq_printf(m, "%-16s %s\n", "type", s);
if (card->cardtype == avm_t1isa)
seq_printf(m, "%-16s %d\n", "cardnr", card->cardnr);
- if ((s = cinfo->version[VER_DRIVER]) != NULL)
+
+ s = cinfo->version[VER_DRIVER];
+ if (s)
seq_printf(m, "%-16s %s\n", "ver_driver", s);
- if ((s = cinfo->version[VER_CARDTYPE]) != NULL)
+
+ s = cinfo->version[VER_CARDTYPE];
+ if (s)
seq_printf(m, "%-16s %s\n", "ver_cardtype", s);
- if ((s = cinfo->version[VER_SERIAL]) != NULL)
+
+ s = cinfo->version[VER_SERIAL];
+ if (s)
seq_printf(m, "%-16s %s\n", "ver_serial", s);
if (card->cardtype != avm_m1) {
@@ -784,13 +797,15 @@ static int __init b1_init(void)
char *p;
char rev[32];
- if ((p = strchr(revision, ':')) != NULL && p[1]) {
+ p = strchr(revision, ':');
+ if (p && p[1]) {
strlcpy(rev, p + 2, 32);
- if ((p = strchr(rev, '$')) != NULL && p > rev)
+ p = strchr(rev, '$');
+ if (p && p > rev)
*(p - 1) = 0;
- } else
+ } else {
strcpy(rev, "1.0");
-
+ }
printk(KERN_INFO "b1: revision %s\n", rev);
return 0;
diff --git a/drivers/staging/isdn/gigaset/interface.c b/drivers/staging/isdn/gigaset/interface.c
index 17fa615a8c68..9ddadd07e707 100644
--- a/drivers/staging/isdn/gigaset/interface.c
+++ b/drivers/staging/isdn/gigaset/interface.c
@@ -518,7 +518,7 @@ void gigaset_if_init(struct cardstate *cs)
if (!IS_ERR(cs->tty_dev))
dev_set_drvdata(cs->tty_dev, cs);
else {
- pr_warning("could not register device to the tty subsystem\n");
+ pr_warn("could not register device to the tty subsystem\n");
cs->tty_dev = NULL;
}
mutex_unlock(&cs->mutex);
diff --git a/drivers/staging/kpc2000/kpc2000_i2c.c b/drivers/staging/kpc2000/kpc2000_i2c.c
index bc02534d8dc3..5460bf973c9c 100644
--- a/drivers/staging/kpc2000/kpc2000_i2c.c
+++ b/drivers/staging/kpc2000/kpc2000_i2c.c
@@ -99,7 +99,8 @@ struct i2c_device {
#define SMBHSTSTS_INTR 0x02
#define SMBHSTSTS_HOST_BUSY 0x01
-#define STATUS_FLAGS (SMBHSTSTS_BYTE_DONE | SMBHSTSTS_FAILED | SMBHSTSTS_BUS_ERR | SMBHSTSTS_DEV_ERR | SMBHSTSTS_INTR)
+#define STATUS_FLAGS (SMBHSTSTS_BYTE_DONE | SMBHSTSTS_FAILED | \
+ SMBHSTSTS_BUS_ERR | SMBHSTSTS_DEV_ERR | SMBHSTSTS_INTR)
/* Older devices have their ID defined in <linux/pci_ids.h> */
#define PCI_DEVICE_ID_INTEL_COUGARPOINT_SMBUS 0x1c22
@@ -136,17 +137,18 @@ static int i801_check_pre(struct i2c_device *priv)
status = inb_p(SMBHSTSTS(priv));
if (status & SMBHSTSTS_HOST_BUSY) {
- dev_err(&priv->adapter.dev, "SMBus is busy, can't use it! (status=%x)\n", status);
+ dev_err(&priv->adapter.dev,
+ "SMBus is busy, can't use it! (status=%x)\n", status);
return -EBUSY;
}
status &= STATUS_FLAGS;
if (status) {
- //dev_dbg(&priv->adapter.dev, "Clearing status flags (%02x)\n", status);
outb_p(status, SMBHSTSTS(priv));
status = inb_p(SMBHSTSTS(priv)) & STATUS_FLAGS;
if (status) {
- dev_err(&priv->adapter.dev, "Failed clearing status flags (%02x)\n", status);
+ dev_err(&priv->adapter.dev,
+ "Failed clearing status flags (%02x)\n", status);
return -EBUSY;
}
}
@@ -162,15 +164,20 @@ static int i801_check_post(struct i2c_device *priv, int status, int timeout)
if (timeout) {
dev_err(&priv->adapter.dev, "Transaction timeout\n");
/* try to stop the current command */
- dev_dbg(&priv->adapter.dev, "Terminating the current operation\n");
- outb_p(inb_p(SMBHSTCNT(priv)) | SMBHSTCNT_KILL, SMBHSTCNT(priv));
+ dev_dbg(&priv->adapter.dev,
+ "Terminating the current operation\n");
+ outb_p(inb_p(SMBHSTCNT(priv)) | SMBHSTCNT_KILL,
+ SMBHSTCNT(priv));
usleep_range(1000, 2000);
- outb_p(inb_p(SMBHSTCNT(priv)) & (~SMBHSTCNT_KILL), SMBHSTCNT(priv));
+ outb_p(inb_p(SMBHSTCNT(priv)) & (~SMBHSTCNT_KILL),
+ SMBHSTCNT(priv));
/* Check if it worked */
status = inb_p(SMBHSTSTS(priv));
- if ((status & SMBHSTSTS_HOST_BUSY) || !(status & SMBHSTSTS_FAILED))
- dev_err(&priv->adapter.dev, "Failed terminating the transaction\n");
+ if ((status & SMBHSTSTS_HOST_BUSY) ||
+ !(status & SMBHSTSTS_FAILED))
+ dev_err(&priv->adapter.dev,
+ "Failed terminating the transaction\n");
outb_p(STATUS_FLAGS, SMBHSTSTS(priv));
return -ETIMEDOUT;
}
@@ -244,7 +251,9 @@ static void i801_wait_hwpec(struct i2c_device *priv)
outb_p(status, SMBHSTSTS(priv));
}
-static int i801_block_transaction_by_block(struct i2c_device *priv, union i2c_smbus_data *data, char read_write, int hwpec)
+static int i801_block_transaction_by_block(struct i2c_device *priv,
+ union i2c_smbus_data *data,
+ char read_write, int hwpec)
{
int i, len;
int status;
@@ -259,7 +268,8 @@ static int i801_block_transaction_by_block(struct i2c_device *priv, union i2c_sm
outb_p(data->block[i + 1], SMBBLKDAT(priv));
}
- status = i801_transaction(priv, I801_BLOCK_DATA | ENABLE_INT9 | I801_PEC_EN * hwpec);
+ status = i801_transaction(priv,
+ I801_BLOCK_DATA | ENABLE_INT9 | I801_PEC_EN * hwpec);
if (status)
return status;
@@ -275,7 +285,10 @@ static int i801_block_transaction_by_block(struct i2c_device *priv, union i2c_sm
return 0;
}
-static int i801_block_transaction_byte_by_byte(struct i2c_device *priv, union i2c_smbus_data *data, char read_write, int command, int hwpec)
+static int i801_block_transaction_byte_by_byte(struct i2c_device *priv,
+ union i2c_smbus_data *data,
+ char read_write, int command,
+ int hwpec)
{
int i, len;
int smbcmd;
@@ -301,7 +314,8 @@ static int i801_block_transaction_byte_by_byte(struct i2c_device *priv, union i2
else
smbcmd = I801_BLOCK_LAST;
} else {
- if (command == I2C_SMBUS_I2C_BLOCK_DATA && read_write == I2C_SMBUS_READ)
+ if (command == I2C_SMBUS_I2C_BLOCK_DATA &&
+ read_write == I2C_SMBUS_READ)
smbcmd = I801_I2C_BLOCK_DATA;
else
smbcmd = I801_BLOCK_DATA;
@@ -309,25 +323,33 @@ static int i801_block_transaction_byte_by_byte(struct i2c_device *priv, union i2
outb_p(smbcmd | ENABLE_INT9, SMBHSTCNT(priv));
if (i == 1)
- outb_p(inb(SMBHSTCNT(priv)) | I801_START, SMBHSTCNT(priv));
+ outb_p(inb(SMBHSTCNT(priv)) | I801_START,
+ SMBHSTCNT(priv));
/* We will always wait for a fraction of a second! */
timeout = 0;
do {
usleep_range(250, 500);
status = inb_p(SMBHSTSTS(priv));
- } while ((!(status & SMBHSTSTS_BYTE_DONE)) && (timeout++ < MAX_RETRIES));
+ } while (!(status & SMBHSTSTS_BYTE_DONE) &&
+ (timeout++ < MAX_RETRIES));
result = i801_check_post(priv, status, timeout > MAX_RETRIES);
if (result < 0)
return result;
- if (i == 1 && read_write == I2C_SMBUS_READ && command != I2C_SMBUS_I2C_BLOCK_DATA) {
+ if (i == 1 && read_write == I2C_SMBUS_READ &&
+ command != I2C_SMBUS_I2C_BLOCK_DATA) {
len = inb_p(SMBHSTDAT0(priv));
if (len < 1 || len > I2C_SMBUS_BLOCK_MAX) {
- dev_err(&priv->adapter.dev, "Illegal SMBus block read size %d\n", len);
+ dev_err(&priv->adapter.dev,
+ "Illegal SMBus block read size %d\n",
+ len);
/* Recover */
- while (inb_p(SMBHSTSTS(priv)) & SMBHSTSTS_HOST_BUSY)
- outb_p(SMBHSTSTS_BYTE_DONE, SMBHSTSTS(priv));
- outb_p(SMBHSTSTS_INTR, SMBHSTSTS(priv));
+ while (inb_p(SMBHSTSTS(priv)) &
+ SMBHSTSTS_HOST_BUSY)
+ outb_p(SMBHSTSTS_BYTE_DONE,
+ SMBHSTSTS(priv));
+ outb_p(SMBHSTSTS_INTR,
+ SMBHSTSTS(priv));
return -EPROTO;
}
data->block[0] = len;
@@ -354,7 +376,9 @@ static int i801_set_block_buffer_mode(struct i2c_device *priv)
}
/* Block transaction function */
-static int i801_block_transaction(struct i2c_device *priv, union i2c_smbus_data *data, char read_write, int command, int hwpec)
+static int i801_block_transaction(struct i2c_device *priv,
+ union i2c_smbus_data *data, char read_write,
+ int command, int hwpec)
{
int result = 0;
//unsigned char hostc;
@@ -366,12 +390,14 @@ static int i801_block_transaction(struct i2c_device *priv, union i2c_smbus_data
//pci_read_config_byte(priv->pci_dev, SMBHSTCFG, &hostc);
//pci_write_config_byte(priv->pci_dev, SMBHSTCFG, hostc | SMBHSTCFG_I2C_EN);
} else if (!(priv->features & FEATURE_I2C_BLOCK_READ)) {
- dev_err(&priv->adapter.dev, "I2C block read is unsupported!\n");
+ dev_err(&priv->adapter.dev,
+ "I2C block read is unsupported!\n");
return -EOPNOTSUPP;
}
}
- if (read_write == I2C_SMBUS_WRITE || command == I2C_SMBUS_I2C_BLOCK_DATA) {
+ if (read_write == I2C_SMBUS_WRITE ||
+ command == I2C_SMBUS_I2C_BLOCK_DATA) {
if (data->block[0] < 1)
data->block[0] = 1;
if (data->block[0] > I2C_SMBUS_BLOCK_MAX)
@@ -384,13 +410,21 @@ static int i801_block_transaction(struct i2c_device *priv, union i2c_smbus_data
* SMBus (not I2C) block transactions, even though the datasheet
* doesn't mention this limitation.
*/
- if ((priv->features & FEATURE_BLOCK_BUFFER) && command != I2C_SMBUS_I2C_BLOCK_DATA && i801_set_block_buffer_mode(priv) == 0)
- result = i801_block_transaction_by_block(priv, data, read_write, hwpec);
- else
- result = i801_block_transaction_byte_by_byte(priv, data, read_write, command, hwpec);
+ if ((priv->features & FEATURE_BLOCK_BUFFER) &&
+ command != I2C_SMBUS_I2C_BLOCK_DATA &&
+ i801_set_block_buffer_mode(priv) == 0) {
+ result = i801_block_transaction_by_block(priv, data,
+ read_write, hwpec);
+ } else {
+ result = i801_block_transaction_byte_by_byte(priv, data,
+ read_write,
+ command, hwpec);
+ }
+
if (result == 0 && hwpec)
i801_wait_hwpec(priv);
- if (command == I2C_SMBUS_I2C_BLOCK_DATA && read_write == I2C_SMBUS_WRITE) {
+ if (command == I2C_SMBUS_I2C_BLOCK_DATA &&
+ read_write == I2C_SMBUS_WRITE) {
/* restore saved configuration register value */
//TODO: Figure out the right thing to do here...
//pci_write_config_byte(priv->pci_dev, SMBHSTCFG, hostc);
@@ -399,32 +433,41 @@ static int i801_block_transaction(struct i2c_device *priv, union i2c_smbus_data
}
/* Return negative errno on error. */
-static s32 i801_access(struct i2c_adapter *adap, u16 addr, unsigned short flags, char read_write, u8 command, int size, union i2c_smbus_data *data)
+static s32 i801_access(struct i2c_adapter *adap, u16 addr,
+ unsigned short flags, char read_write, u8 command,
+ int size, union i2c_smbus_data *data)
{
int hwpec;
int block = 0;
int ret, xact = 0;
struct i2c_device *priv = i2c_get_adapdata(adap);
- hwpec = (priv->features & FEATURE_SMBUS_PEC) && (flags & I2C_CLIENT_PEC) && size != I2C_SMBUS_QUICK && size != I2C_SMBUS_I2C_BLOCK_DATA;
+ hwpec = (priv->features & FEATURE_SMBUS_PEC) &&
+ (flags & I2C_CLIENT_PEC) &&
+ size != I2C_SMBUS_QUICK && size != I2C_SMBUS_I2C_BLOCK_DATA;
switch (size) {
case I2C_SMBUS_QUICK:
dev_dbg(&priv->adapter.dev, " [acc] SMBUS_QUICK\n");
- outb_p(((addr & 0x7f) << 1) | (read_write & 0x01), SMBHSTADD(priv));
+ outb_p(((addr & 0x7f) << 1) | (read_write & 0x01),
+ SMBHSTADD(priv));
+
xact = I801_QUICK;
break;
case I2C_SMBUS_BYTE:
dev_dbg(&priv->adapter.dev, " [acc] SMBUS_BYTE\n");
- outb_p(((addr & 0x7f) << 1) | (read_write & 0x01), SMBHSTADD(priv));
+ outb_p(((addr & 0x7f) << 1) | (read_write & 0x01),
+ SMBHSTADD(priv));
if (read_write == I2C_SMBUS_WRITE)
outb_p(command, SMBHSTCMD(priv));
xact = I801_BYTE;
break;
case I2C_SMBUS_BYTE_DATA:
dev_dbg(&priv->adapter.dev, " [acc] SMBUS_BYTE_DATA\n");
- outb_p(((addr & 0x7f) << 1) | (read_write & 0x01), SMBHSTADD(priv));
+ outb_p(((addr & 0x7f) << 1) | (read_write & 0x01),
+ SMBHSTADD(priv));
+
outb_p(command, SMBHSTCMD(priv));
if (read_write == I2C_SMBUS_WRITE)
outb_p(data->byte, SMBHSTDAT0(priv));
@@ -432,7 +475,9 @@ static s32 i801_access(struct i2c_adapter *adap, u16 addr, unsigned short flags,
break;
case I2C_SMBUS_WORD_DATA:
dev_dbg(&priv->adapter.dev, " [acc] SMBUS_WORD_DATA\n");
- outb_p(((addr & 0x7f) << 1) | (read_write & 0x01), SMBHSTADD(priv));
+ outb_p(((addr & 0x7f) << 1) | (read_write & 0x01),
+ SMBHSTADD(priv));
+
outb_p(command, SMBHSTCMD(priv));
if (read_write == I2C_SMBUS_WRITE) {
outb_p(data->word & 0xff, SMBHSTDAT0(priv));
@@ -442,7 +487,9 @@ static s32 i801_access(struct i2c_adapter *adap, u16 addr, unsigned short flags,
break;
case I2C_SMBUS_BLOCK_DATA:
dev_dbg(&priv->adapter.dev, " [acc] SMBUS_BLOCK_DATA\n");
- outb_p(((addr & 0x7f) << 1) | (read_write & 0x01), SMBHSTADD(priv));
+ outb_p(((addr & 0x7f) << 1) | (read_write & 0x01),
+ SMBHSTADD(priv));
+
outb_p(command, SMBHSTCMD(priv));
block = 1;
break;
@@ -463,7 +510,8 @@ static s32 i801_access(struct i2c_adapter *adap, u16 addr, unsigned short flags,
block = 1;
break;
default:
- dev_dbg(&priv->adapter.dev, " [acc] Unsupported transaction %d\n", size);
+ dev_dbg(&priv->adapter.dev,
+ " [acc] Unsupported transaction %d\n", size);
return -EOPNOTSUPP;
}
@@ -472,13 +520,14 @@ static s32 i801_access(struct i2c_adapter *adap, u16 addr, unsigned short flags,
outb_p(inb_p(SMBAUXCTL(priv)) | SMBAUXCTL_CRC, SMBAUXCTL(priv));
} else {
dev_dbg(&priv->adapter.dev, " [acc] hwpec: no\n");
- outb_p(inb_p(SMBAUXCTL(priv)) & (~SMBAUXCTL_CRC), SMBAUXCTL(priv));
+ outb_p(inb_p(SMBAUXCTL(priv)) &
+ (~SMBAUXCTL_CRC), SMBAUXCTL(priv));
}
if (block) {
- //ret = 0;
dev_dbg(&priv->adapter.dev, " [acc] block: yes\n");
- ret = i801_block_transaction(priv, data, read_write, size, hwpec);
+ ret = i801_block_transaction(priv, data, read_write, size,
+ hwpec);
} else {
dev_dbg(&priv->adapter.dev, " [acc] block: no\n");
ret = i801_transaction(priv, xact | ENABLE_INT9);
@@ -490,7 +539,8 @@ static s32 i801_access(struct i2c_adapter *adap, u16 addr, unsigned short flags,
*/
if (hwpec || block) {
dev_dbg(&priv->adapter.dev, " [acc] hwpec || block\n");
- outb_p(inb_p(SMBAUXCTL(priv)) & ~(SMBAUXCTL_CRC | SMBAUXCTL_E32B), SMBAUXCTL(priv));
+ outb_p(inb_p(SMBAUXCTL(priv)) & ~(SMBAUXCTL_CRC |
+ SMBAUXCTL_E32B), SMBAUXCTL(priv));
}
if (block) {
dev_dbg(&priv->adapter.dev, " [acc] block\n");
@@ -501,19 +551,22 @@ static s32 i801_access(struct i2c_adapter *adap, u16 addr, unsigned short flags,
return ret;
}
if ((read_write == I2C_SMBUS_WRITE) || (xact == I801_QUICK)) {
- dev_dbg(&priv->adapter.dev, " [acc] I2C_SMBUS_WRITE || I801_QUICK -> ret 0\n");
+ dev_dbg(&priv->adapter.dev,
+ " [acc] I2C_SMBUS_WRITE || I801_QUICK -> ret 0\n");
return 0;
}
switch (xact & 0x7f) {
case I801_BYTE: /* Result put in SMBHSTDAT0 */
case I801_BYTE_DATA:
- dev_dbg(&priv->adapter.dev, " [acc] I801_BYTE or I801_BYTE_DATA\n");
+ dev_dbg(&priv->adapter.dev,
+ " [acc] I801_BYTE or I801_BYTE_DATA\n");
data->byte = inb_p(SMBHSTDAT0(priv));
break;
case I801_WORD_DATA:
dev_dbg(&priv->adapter.dev, " [acc] I801_WORD_DATA\n");
- data->word = inb_p(SMBHSTDAT0(priv)) + (inb_p(SMBHSTDAT1(priv)) << 8);
+ data->word = inb_p(SMBHSTDAT0(priv)) +
+ (inb_p(SMBHSTDAT1(priv)) << 8);
break;
}
return 0;
@@ -535,30 +588,47 @@ static u32 i801_func(struct i2c_adapter *adapter)
// http://lxr.free-electrons.com/source/include/uapi/linux/i2c.h#L85
u32 f =
- I2C_FUNC_I2C | /* 0x00000001 (I enabled this one) */
- !I2C_FUNC_10BIT_ADDR | /* 0x00000002 */
- !I2C_FUNC_PROTOCOL_MANGLING | /* 0x00000004 */
- ((priv->features & FEATURE_SMBUS_PEC) ? I2C_FUNC_SMBUS_PEC : 0) | /* 0x00000008 */
- !I2C_FUNC_SMBUS_BLOCK_PROC_CALL | /* 0x00008000 */
- I2C_FUNC_SMBUS_QUICK | /* 0x00010000 */
- !I2C_FUNC_SMBUS_READ_BYTE | /* 0x00020000 */
- !I2C_FUNC_SMBUS_WRITE_BYTE | /* 0x00040000 */
- !I2C_FUNC_SMBUS_READ_BYTE_DATA | /* 0x00080000 */
- !I2C_FUNC_SMBUS_WRITE_BYTE_DATA | /* 0x00100000 */
- !I2C_FUNC_SMBUS_READ_WORD_DATA | /* 0x00200000 */
- !I2C_FUNC_SMBUS_WRITE_WORD_DATA | /* 0x00400000 */
- !I2C_FUNC_SMBUS_PROC_CALL | /* 0x00800000 */
- !I2C_FUNC_SMBUS_READ_BLOCK_DATA | /* 0x01000000 */
- !I2C_FUNC_SMBUS_WRITE_BLOCK_DATA | /* 0x02000000 */
- ((priv->features & FEATURE_I2C_BLOCK_READ) ? I2C_FUNC_SMBUS_READ_I2C_BLOCK : 0) | /* 0x04000000 */
- I2C_FUNC_SMBUS_WRITE_I2C_BLOCK | /* 0x08000000 */
+ I2C_FUNC_I2C | /* 0x00000001(I enabled this
+ * one)
+ */
+ !I2C_FUNC_10BIT_ADDR | /* 0x00000002 */
+ !I2C_FUNC_PROTOCOL_MANGLING | /* 0x00000004 */
+ ((priv->features & FEATURE_SMBUS_PEC) ?
+ I2C_FUNC_SMBUS_PEC : 0) | /* 0x00000008 */
+ !I2C_FUNC_SMBUS_BLOCK_PROC_CALL | /* 0x00008000 */
+ I2C_FUNC_SMBUS_QUICK | /* 0x00010000 */
+ !I2C_FUNC_SMBUS_READ_BYTE | /* 0x00020000 */
+ !I2C_FUNC_SMBUS_WRITE_BYTE | /* 0x00040000 */
+ !I2C_FUNC_SMBUS_READ_BYTE_DATA | /* 0x00080000 */
+ !I2C_FUNC_SMBUS_WRITE_BYTE_DATA | /* 0x00100000 */
+ !I2C_FUNC_SMBUS_READ_WORD_DATA | /* 0x00200000 */
+ !I2C_FUNC_SMBUS_WRITE_WORD_DATA | /* 0x00400000 */
+ !I2C_FUNC_SMBUS_PROC_CALL | /* 0x00800000 */
+ !I2C_FUNC_SMBUS_READ_BLOCK_DATA | /* 0x01000000 */
+ !I2C_FUNC_SMBUS_WRITE_BLOCK_DATA | /* 0x02000000 */
+ ((priv->features & FEATURE_I2C_BLOCK_READ) ?
+ I2C_FUNC_SMBUS_READ_I2C_BLOCK : 0) | /* 0x04000000 */
+ I2C_FUNC_SMBUS_WRITE_I2C_BLOCK | /* 0x08000000 */
I2C_FUNC_SMBUS_BYTE | /* _READ_BYTE _WRITE_BYTE */
- I2C_FUNC_SMBUS_BYTE_DATA | /* _READ_BYTE_DATA _WRITE_BYTE_DATA */
- I2C_FUNC_SMBUS_WORD_DATA | /* _READ_WORD_DATA _WRITE_WORD_DATA */
- I2C_FUNC_SMBUS_BLOCK_DATA | /* _READ_BLOCK_DATA _WRITE_BLOCK_DATA */
- !I2C_FUNC_SMBUS_I2C_BLOCK | /* _READ_I2C_BLOCK _WRITE_I2C_BLOCK */
- !I2C_FUNC_SMBUS_EMUL; /* _QUICK _BYTE _BYTE_DATA _WORD_DATA _PROC_CALL _WRITE_BLOCK_DATA _I2C_BLOCK _PEC */
+ I2C_FUNC_SMBUS_BYTE_DATA | /* _READ_BYTE_DATA
+ * _WRITE_BYTE_DATA
+ */
+ I2C_FUNC_SMBUS_WORD_DATA | /* _READ_WORD_DATA
+ * _WRITE_WORD_DATA
+ */
+ I2C_FUNC_SMBUS_BLOCK_DATA | /* _READ_BLOCK_DATA
+ * _WRITE_BLOCK_DATA
+ */
+ !I2C_FUNC_SMBUS_I2C_BLOCK | /* _READ_I2C_BLOCK
+ * _WRITE_I2C_BLOCK
+ */
+ !I2C_FUNC_SMBUS_EMUL; /* _QUICK _BYTE
+ * _BYTE_DATA _WORD_DATA
+ * _PROC_CALL
+ * _WRITE_BLOCK_DATA
+ * _I2C_BLOCK _PEC
+ */
return f;
}
@@ -610,8 +680,8 @@ static int pi2c_probe(struct platform_device *pldev)
/* Retry up to 3 times on lost arbitration */
priv->adapter.retries = 3;
- //snprintf(priv->adapter.name, sizeof(priv->adapter.name), "Fake SMBus I801 adapter at %04lx", priv->smba);
- snprintf(priv->adapter.name, sizeof(priv->adapter.name), "Fake SMBus I801 adapter");
+ snprintf(priv->adapter.name, sizeof(priv->adapter.name),
+ "Fake SMBus I801 adapter");
err = i2c_add_adapter(&priv->adapter);
if (err) {
diff --git a/drivers/staging/kpc2000/kpc2000_spi.c b/drivers/staging/kpc2000/kpc2000_spi.c
index 3be33c450cab..8becf972af9c 100644
--- a/drivers/staging/kpc2000/kpc2000_spi.c
+++ b/drivers/staging/kpc2000/kpc2000_spi.c
@@ -50,6 +50,7 @@ static struct flash_platform_data p2kr0_spi0_pdata = {
.nr_parts = ARRAY_SIZE(p2kr0_spi0_parts),
.parts = p2kr0_spi0_parts,
};
+
static struct flash_platform_data p2kr0_spi1_pdata = {
.name = "SPI1",
.nr_parts = ARRAY_SIZE(p2kr0_spi1_parts),
@@ -162,14 +163,12 @@ union kp_spi_ffctrl {
kp_spi_read_reg(struct kp_spi_controller_state *cs, int idx)
{
u64 __iomem *addr = cs->base;
- u64 val;
addr += idx;
if ((idx == KP_SPI_REG_CONFIG) && (cs->conf_cache >= 0))
return cs->conf_cache;
- val = readq(addr);
- return val;
+ return readq(addr);
}
static inline void
@@ -227,8 +226,7 @@ kp_spi_txrx_pio(struct spi_device *spidev, struct spi_transfer *transfer)
kp_spi_write_reg(cs, KP_SPI_REG_TXDATA, val);
processed++;
}
- }
- else if (rx) {
+ } else if (rx) {
for (i = 0 ; i < c ; i++) {
char test = 0;
@@ -315,19 +313,19 @@ kp_spi_transfer_one_message(struct spi_master *master, struct spi_message *m)
if (transfer->speed_hz > KP_SPI_CLK ||
(len && !(rx_buf || tx_buf))) {
dev_dbg(kpspi->dev, " transfer: %d Hz, %d %s%s, %d bpw\n",
- transfer->speed_hz,
- len,
- tx_buf ? "tx" : "",
- rx_buf ? "rx" : "",
- transfer->bits_per_word);
+ transfer->speed_hz,
+ len,
+ tx_buf ? "tx" : "",
+ rx_buf ? "rx" : "",
+ transfer->bits_per_word);
dev_dbg(kpspi->dev, " transfer -EINVAL\n");
return -EINVAL;
}
if (transfer->speed_hz &&
transfer->speed_hz < (KP_SPI_CLK >> 15)) {
dev_dbg(kpspi->dev, "speed_hz %d below minimum %d Hz\n",
- transfer->speed_hz,
- KP_SPI_CLK >> 15);
+ transfer->speed_hz,
+ KP_SPI_CLK >> 15);
dev_dbg(kpspi->dev, " speed_hz -EINVAL\n");
return -EINVAL;
}
@@ -478,7 +476,7 @@ kp_spi_probe(struct platform_device *pldev)
/* register the slave boards */
#define NEW_SPI_DEVICE_FROM_BOARD_INFO_TABLE(table) \
for (i = 0 ; i < ARRAY_SIZE(table) ; i++) { \
- spi_new_device(master, &(table[i])); \
+ spi_new_device(master, &table[i]); \
}
switch ((drvdata->card_id & 0xFFFF0000) >> 16) {
diff --git a/drivers/staging/media/allegro-dvt/nal-h264.c b/drivers/staging/media/allegro-dvt/nal-h264.c
index 4e14b77851e1..bd48b8883572 100644
--- a/drivers/staging/media/allegro-dvt/nal-h264.c
+++ b/drivers/staging/media/allegro-dvt/nal-h264.c
@@ -235,7 +235,7 @@ static inline int rbsp_write_bit(struct rbsp *rbsp, bool value)
rbsp->pos++;
- if (value == 1 ||
+ if (value ||
(rbsp->num_consecutive_zeros < 7 && (rbsp->pos % 8 == 0))) {
rbsp->num_consecutive_zeros = 0;
} else {
diff --git a/drivers/staging/media/hantro/hantro.h b/drivers/staging/media/hantro/hantro.h
index f670bbde4159..deb90ae37859 100644
--- a/drivers/staging/media/hantro/hantro.h
+++ b/drivers/staging/media/hantro/hantro.h
@@ -26,21 +26,9 @@
#include "hantro_hw.h"
-#define VP8_MB_DIM 16
-#define VP8_MB_WIDTH(w) DIV_ROUND_UP(w, VP8_MB_DIM)
-#define VP8_MB_HEIGHT(h) DIV_ROUND_UP(h, VP8_MB_DIM)
-
-#define H264_MB_DIM 16
-#define H264_MB_WIDTH(w) DIV_ROUND_UP(w, H264_MB_DIM)
-#define H264_MB_HEIGHT(h) DIV_ROUND_UP(h, H264_MB_DIM)
-
-#define MPEG2_MB_DIM 16
-#define MPEG2_MB_WIDTH(w) DIV_ROUND_UP(w, MPEG2_MB_DIM)
-#define MPEG2_MB_HEIGHT(h) DIV_ROUND_UP(h, MPEG2_MB_DIM)
-
-#define JPEG_MB_DIM 16
-#define JPEG_MB_WIDTH(w) DIV_ROUND_UP(w, JPEG_MB_DIM)
-#define JPEG_MB_HEIGHT(h) DIV_ROUND_UP(h, JPEG_MB_DIM)
+#define MB_DIM 16
+#define MB_WIDTH(w) DIV_ROUND_UP(w, MB_DIM)
+#define MB_HEIGHT(h) DIV_ROUND_UP(h, MB_DIM)
struct hantro_ctx;
struct hantro_codec_ops;
@@ -379,7 +367,7 @@ static inline void hantro_reg_write(struct hantro_dev *vpu,
bool hantro_is_encoder_ctx(const struct hantro_ctx *ctx);
void *hantro_get_ctrl(struct hantro_ctx *ctx, u32 id);
-dma_addr_t hantro_get_ref(struct vb2_queue *q, u64 ts);
+dma_addr_t hantro_get_ref(struct hantro_ctx *ctx, u64 ts);
static inline struct vb2_v4l2_buffer *
hantro_get_src_buf(struct hantro_ctx *ctx)
diff --git a/drivers/staging/media/hantro/hantro_drv.c b/drivers/staging/media/hantro/hantro_drv.c
index 6d9d41170832..26108c96b674 100644
--- a/drivers/staging/media/hantro/hantro_drv.c
+++ b/drivers/staging/media/hantro/hantro_drv.c
@@ -43,8 +43,9 @@ void *hantro_get_ctrl(struct hantro_ctx *ctx, u32 id)
return ctrl ? ctrl->p_cur.p : NULL;
}
-dma_addr_t hantro_get_ref(struct vb2_queue *q, u64 ts)
+dma_addr_t hantro_get_ref(struct hantro_ctx *ctx, u64 ts)
{
+ struct vb2_queue *q = v4l2_m2m_get_dst_vq(ctx->fh.m2m_ctx);
struct vb2_buffer *buf;
int index;
@@ -413,20 +414,18 @@ static int hantro_open(struct file *filp)
if (func->id == MEDIA_ENT_F_PROC_VIDEO_ENCODER) {
allowed_codecs = vpu->variant->codec & HANTRO_ENCODERS;
ctx->buf_finish = hantro_enc_buf_finish;
- ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(vpu->m2m_dev, ctx,
- queue_init);
} else if (func->id == MEDIA_ENT_F_PROC_VIDEO_DECODER) {
allowed_codecs = vpu->variant->codec & HANTRO_DECODERS;
ctx->buf_finish = hantro_dec_buf_finish;
- ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(vpu->m2m_dev, ctx,
- queue_init);
} else {
- ctx->fh.m2m_ctx = ERR_PTR(-ENODEV);
+ ret = -ENODEV;
+ goto err_ctx_free;
}
+
+ ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(vpu->m2m_dev, ctx, queue_init);
if (IS_ERR(ctx->fh.m2m_ctx)) {
ret = PTR_ERR(ctx->fh.m2m_ctx);
- kfree(ctx);
- return ret;
+ goto err_ctx_free;
}
v4l2_fh_init(&ctx->fh, vdev);
@@ -447,6 +446,7 @@ static int hantro_open(struct file *filp)
err_fh_free:
v4l2_fh_del(&ctx->fh);
v4l2_fh_exit(&ctx->fh);
+err_ctx_free:
kfree(ctx);
return ret;
}
diff --git a/drivers/staging/media/hantro/hantro_g1_h264_dec.c b/drivers/staging/media/hantro/hantro_g1_h264_dec.c
index 7ab534936843..3cd40a8f0daa 100644
--- a/drivers/staging/media/hantro/hantro_g1_h264_dec.c
+++ b/drivers/staging/media/hantro/hantro_g1_h264_dec.c
@@ -34,9 +34,11 @@ static void set_params(struct hantro_ctx *ctx)
reg = G1_REG_DEC_CTRL0_DEC_AXI_WR_ID(0x0);
if (sps->flags & V4L2_H264_SPS_FLAG_MB_ADAPTIVE_FRAME_FIELD)
reg |= G1_REG_DEC_CTRL0_SEQ_MBAFF_E;
- reg |= G1_REG_DEC_CTRL0_PICORD_COUNT_E;
- if (dec_param->nal_ref_idc)
- reg |= G1_REG_DEC_CTRL0_WRITE_MVS_E;
+ if (sps->profile_idc > 66) {
+ reg |= G1_REG_DEC_CTRL0_PICORD_COUNT_E;
+ if (dec_param->nal_ref_idc)
+ reg |= G1_REG_DEC_CTRL0_WRITE_MVS_E;
+ }
if (!(sps->flags & V4L2_H264_SPS_FLAG_FRAME_MBS_ONLY) &&
(sps->flags & V4L2_H264_SPS_FLAG_MB_ADAPTIVE_FRAME_FIELD ||
@@ -49,8 +51,8 @@ static void set_params(struct hantro_ctx *ctx)
vdpu_write_relaxed(vpu, reg, G1_REG_DEC_CTRL0);
/* Decoder control register 1. */
- reg = G1_REG_DEC_CTRL1_PIC_MB_WIDTH(sps->pic_width_in_mbs_minus1 + 1) |
- G1_REG_DEC_CTRL1_PIC_MB_HEIGHT_P(sps->pic_height_in_map_units_minus1 + 1) |
+ reg = G1_REG_DEC_CTRL1_PIC_MB_WIDTH(MB_WIDTH(ctx->src_fmt.width)) |
+ G1_REG_DEC_CTRL1_PIC_MB_HEIGHT_P(MB_HEIGHT(ctx->src_fmt.height)) |
G1_REG_DEC_CTRL1_REF_FRAMES(sps->max_num_ref_frames);
vdpu_write_relaxed(vpu, reg, G1_REG_DEC_CTRL1);
@@ -61,7 +63,7 @@ static void set_params(struct hantro_ctx *ctx)
/* always use the matrix sent from userspace */
reg |= G1_REG_DEC_CTRL2_TYPE1_QUANT_E;
- if (slices[0].flags & V4L2_H264_SLICE_FLAG_FIELD_PIC)
+ if (!(sps->flags & V4L2_H264_SPS_FLAG_FRAME_MBS_ONLY))
reg |= G1_REG_DEC_CTRL2_FIELDPIC_FLAG_E;
vdpu_write_relaxed(vpu, reg, G1_REG_DEC_CTRL2);
@@ -79,7 +81,7 @@ static void set_params(struct hantro_ctx *ctx)
reg |= G1_REG_DEC_CTRL4_CABAC_E;
if (sps->flags & V4L2_H264_SPS_FLAG_DIRECT_8X8_INFERENCE)
reg |= G1_REG_DEC_CTRL4_DIR_8X8_INFER_E;
- if (sps->chroma_format_idc == 0)
+ if (sps->profile_idc >= 100 && sps->chroma_format_idc == 0)
reg |= G1_REG_DEC_CTRL4_BLACKWHITE_E;
if (pps->flags & V4L2_H264_PPS_FLAG_WEIGHTED_PRED)
reg |= G1_REG_DEC_CTRL4_WEIGHT_PRED_E;
@@ -220,10 +222,9 @@ static void set_ref(struct hantro_ctx *ctx)
/* Set up addresses of DPB buffers. */
for (i = 0; i < HANTRO_H264_DPB_SIZE; i++) {
- struct vb2_buffer *buf = hantro_h264_get_ref_buf(ctx, i);
+ dma_addr_t dma_addr = hantro_h264_get_ref_buf(ctx, i);
- vdpu_write_relaxed(vpu, vb2_dma_contig_plane_dma_addr(buf, 0),
- G1_REG_ADDR_REF(i));
+ vdpu_write_relaxed(vpu, dma_addr, G1_REG_ADDR_REF(i));
}
}
@@ -233,6 +234,7 @@ static void set_buffers(struct hantro_ctx *ctx)
struct vb2_v4l2_buffer *src_buf, *dst_buf;
struct hantro_dev *vpu = ctx->dev;
dma_addr_t src_dma, dst_dma;
+ size_t offset = 0;
src_buf = hantro_get_src_buf(ctx);
dst_buf = hantro_get_dst_buf(ctx);
@@ -243,18 +245,30 @@ static void set_buffers(struct hantro_ctx *ctx)
/* Destination (decoded frame) buffer. */
dst_dma = vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0);
- vdpu_write_relaxed(vpu, dst_dma, G1_REG_ADDR_DST);
+ /* Adjust dma addr to start at second line for bottom field */
+ if (ctrls->slices[0].flags & V4L2_H264_SLICE_FLAG_BOTTOM_FIELD)
+ offset = ALIGN(ctx->src_fmt.width, MB_DIM);
+ vdpu_write_relaxed(vpu, dst_dma + offset, G1_REG_ADDR_DST);
/* Higher profiles require DMV buffer appended to reference frames. */
- if (ctrls->sps->profile_idc > 66) {
- size_t pic_size = ctx->h264_dec.pic_size;
- size_t mv_offset = round_up(pic_size, 8);
-
+ if (ctrls->sps->profile_idc > 66 && ctrls->decode->nal_ref_idc) {
+ unsigned int bytes_per_mb = 384;
+
+ /* DMV buffer for monochrome start directly after Y-plane */
+ if (ctrls->sps->profile_idc >= 100 &&
+ ctrls->sps->chroma_format_idc == 0)
+ bytes_per_mb = 256;
+ offset = bytes_per_mb * MB_WIDTH(ctx->src_fmt.width) *
+ MB_HEIGHT(ctx->src_fmt.height);
+
+ /*
+ * DMV buffer is split in two for field encoded frames,
+ * adjust offset for bottom field
+ */
if (ctrls->slices[0].flags & V4L2_H264_SLICE_FLAG_BOTTOM_FIELD)
- mv_offset += 32 * H264_MB_WIDTH(ctx->dst_fmt.width);
-
- vdpu_write_relaxed(vpu, dst_dma + mv_offset,
- G1_REG_ADDR_DIR_MV);
+ offset += 32 * MB_WIDTH(ctx->src_fmt.width) *
+ MB_HEIGHT(ctx->src_fmt.height);
+ vdpu_write_relaxed(vpu, dst_dma + offset, G1_REG_ADDR_DIR_MV);
}
/* Auxiliary buffer prepared in hantro_g1_h264_dec_prepare_table(). */
diff --git a/drivers/staging/media/hantro/hantro_g1_mpeg2_dec.c b/drivers/staging/media/hantro/hantro_g1_mpeg2_dec.c
index 80f0e94f8afa..f3bf67d8a289 100644
--- a/drivers/staging/media/hantro/hantro_g1_mpeg2_dec.c
+++ b/drivers/staging/media/hantro/hantro_g1_mpeg2_dec.c
@@ -105,17 +105,14 @@ hantro_g1_mpeg2_dec_set_buffers(struct hantro_dev *vpu, struct hantro_ctx *ctx,
{
dma_addr_t forward_addr = 0, backward_addr = 0;
dma_addr_t current_addr, addr;
- struct vb2_queue *vq;
-
- vq = v4l2_m2m_get_dst_vq(ctx->fh.m2m_ctx);
switch (picture->picture_coding_type) {
case V4L2_MPEG2_PICTURE_CODING_TYPE_B:
- backward_addr = hantro_get_ref(vq,
+ backward_addr = hantro_get_ref(ctx,
slice_params->backward_ref_ts);
/* fall-through */
case V4L2_MPEG2_PICTURE_CODING_TYPE_P:
- forward_addr = hantro_get_ref(vq,
+ forward_addr = hantro_get_ref(ctx,
slice_params->forward_ref_ts);
}
@@ -207,8 +204,8 @@ void hantro_g1_mpeg2_dec_run(struct hantro_ctx *ctx)
G1_REG_DEC_AXI_WR_ID(0);
vdpu_write_relaxed(vpu, reg, G1_SWREG(3));
- reg = G1_REG_PIC_MB_WIDTH(MPEG2_MB_WIDTH(ctx->dst_fmt.width)) |
- G1_REG_PIC_MB_HEIGHT_P(MPEG2_MB_HEIGHT(ctx->dst_fmt.height)) |
+ reg = G1_REG_PIC_MB_WIDTH(MB_WIDTH(ctx->dst_fmt.width)) |
+ G1_REG_PIC_MB_HEIGHT_P(MB_HEIGHT(ctx->dst_fmt.height)) |
G1_REG_ALT_SCAN_E(picture->alternate_scan) |
G1_REG_TOPFIELDFIRST_E(picture->top_field_first);
vdpu_write_relaxed(vpu, reg, G1_SWREG(4));
diff --git a/drivers/staging/media/hantro/hantro_g1_vp8_dec.c b/drivers/staging/media/hantro/hantro_g1_vp8_dec.c
index 6d99c2be01cf..cad18094fee0 100644
--- a/drivers/staging/media/hantro/hantro_g1_vp8_dec.c
+++ b/drivers/staging/media/hantro/hantro_g1_vp8_dec.c
@@ -370,19 +370,18 @@ static void cfg_tap(struct hantro_ctx *ctx,
static void cfg_ref(struct hantro_ctx *ctx,
const struct v4l2_ctrl_vp8_frame_header *hdr)
{
- struct vb2_queue *cap_q = &ctx->fh.m2m_ctx->cap_q_ctx.q;
struct hantro_dev *vpu = ctx->dev;
struct vb2_v4l2_buffer *vb2_dst;
dma_addr_t ref;
vb2_dst = hantro_get_dst_buf(ctx);
- ref = hantro_get_ref(cap_q, hdr->last_frame_ts);
+ ref = hantro_get_ref(ctx, hdr->last_frame_ts);
if (!ref)
ref = vb2_dma_contig_plane_dma_addr(&vb2_dst->vb2_buf, 0);
vdpu_write_relaxed(vpu, ref, G1_REG_ADDR_REF(0));
- ref = hantro_get_ref(cap_q, hdr->golden_frame_ts);
+ ref = hantro_get_ref(ctx, hdr->golden_frame_ts);
WARN_ON(!ref && hdr->golden_frame_ts);
if (!ref)
ref = vb2_dma_contig_plane_dma_addr(&vb2_dst->vb2_buf, 0);
@@ -390,7 +389,7 @@ static void cfg_ref(struct hantro_ctx *ctx,
ref |= G1_REG_ADDR_REF_TOPC_E;
vdpu_write_relaxed(vpu, ref, G1_REG_ADDR_REF(4));
- ref = hantro_get_ref(cap_q, hdr->alt_frame_ts);
+ ref = hantro_get_ref(ctx, hdr->alt_frame_ts);
WARN_ON(!ref && hdr->alt_frame_ts);
if (!ref)
ref = vb2_dma_contig_plane_dma_addr(&vb2_dst->vb2_buf, 0);
@@ -470,8 +469,8 @@ void hantro_g1_vp8_dec_run(struct hantro_ctx *ctx)
vdpu_write_relaxed(vpu, reg, G1_REG_DEC_CTRL0);
/* Frame dimensions */
- mb_width = VP8_MB_WIDTH(width);
- mb_height = VP8_MB_HEIGHT(height);
+ mb_width = MB_WIDTH(width);
+ mb_height = MB_HEIGHT(height);
reg = G1_REG_DEC_CTRL1_PIC_MB_WIDTH(mb_width) |
G1_REG_DEC_CTRL1_PIC_MB_HEIGHT_P(mb_height) |
G1_REG_DEC_CTRL1_PIC_MB_W_EXT(mb_width >> 9) |
diff --git a/drivers/staging/media/hantro/hantro_h1_jpeg_enc.c b/drivers/staging/media/hantro/hantro_h1_jpeg_enc.c
index ecd34a7db190..938b48d4d3d9 100644
--- a/drivers/staging/media/hantro/hantro_h1_jpeg_enc.c
+++ b/drivers/staging/media/hantro/hantro_h1_jpeg_enc.c
@@ -116,8 +116,8 @@ void hantro_h1_jpeg_enc_run(struct hantro_ctx *ctx)
/* Make sure that all registers are written at this point. */
vepu_write(vpu, reg, H1_REG_AXI_CTRL);
- reg = H1_REG_ENC_CTRL_WIDTH(JPEG_MB_WIDTH(ctx->src_fmt.width))
- | H1_REG_ENC_CTRL_HEIGHT(JPEG_MB_HEIGHT(ctx->src_fmt.height))
+ reg = H1_REG_ENC_CTRL_WIDTH(MB_WIDTH(ctx->src_fmt.width))
+ | H1_REG_ENC_CTRL_HEIGHT(MB_HEIGHT(ctx->src_fmt.height))
| H1_REG_ENC_CTRL_ENC_MODE_JPEG
| H1_REG_ENC_PIC_INTRA
| H1_REG_ENC_CTRL_EN_BIT;
diff --git a/drivers/staging/media/hantro/hantro_h264.c b/drivers/staging/media/hantro/hantro_h264.c
index 0d758e0c0f99..568640eab3a6 100644
--- a/drivers/staging/media/hantro/hantro_h264.c
+++ b/drivers/staging/media/hantro/hantro_h264.c
@@ -20,9 +20,9 @@
/* Size with u32 units. */
#define CABAC_INIT_BUFFER_SIZE (460 * 2)
#define POC_BUFFER_SIZE 34
-#define SCALING_LIST_SIZE (6 * 16 + 6 * 64)
+#define SCALING_LIST_SIZE (6 * 16 + 2 * 64)
-#define POC_CMP(p0, p1) ((p0) < (p1) ? -1 : 1)
+#define HANTRO_CMP(a, b) ((a) < (b) ? -1 : 1)
/* Data structure describing auxiliary buffer format. */
struct hantro_h264_dec_priv_tbl {
@@ -194,23 +194,6 @@ static const u32 h264_cabac_table[] = {
0x1f0c2517, 0x1f261440
};
-/*
- * NOTE: The scaling lists are in zig-zag order, apply inverse scanning process
- * to get the values in matrix order. In addition, the hardware requires bytes
- * swapped within each subsequent 4 bytes. Both arrays below include both
- * transformations.
- */
-static const u32 zig_zag_4x4[] = {
- 3, 2, 7, 11, 6, 1, 0, 5, 10, 15, 14, 9, 4, 8, 13, 12
-};
-
-static const u32 zig_zag_8x8[] = {
- 3, 2, 11, 19, 10, 1, 0, 9, 18, 27, 35, 26, 17, 8, 7, 6,
- 15, 16, 25, 34, 43, 51, 42, 33, 24, 23, 14, 5, 4, 13, 22, 31,
- 32, 41, 50, 59, 58, 49, 40, 39, 30, 21, 12, 20, 29, 38, 47, 48,
- 57, 56, 55, 46, 37, 28, 36, 45, 54, 63, 62, 53, 44, 52, 61, 60
-};
-
static void
reorder_scaling_list(struct hantro_ctx *ctx)
{
@@ -218,33 +201,23 @@ reorder_scaling_list(struct hantro_ctx *ctx)
const struct v4l2_ctrl_h264_scaling_matrix *scaling = ctrls->scaling;
const size_t num_list_4x4 = ARRAY_SIZE(scaling->scaling_list_4x4);
const size_t list_len_4x4 = ARRAY_SIZE(scaling->scaling_list_4x4[0]);
- const size_t num_list_8x8 = ARRAY_SIZE(scaling->scaling_list_8x8);
const size_t list_len_8x8 = ARRAY_SIZE(scaling->scaling_list_8x8[0]);
struct hantro_h264_dec_priv_tbl *tbl = ctx->h264_dec.priv.cpu;
- u8 *dst = tbl->scaling_list;
- const u8 *src;
+ u32 *dst = (u32 *)tbl->scaling_list;
+ const u32 *src;
int i, j;
- BUILD_BUG_ON(ARRAY_SIZE(zig_zag_4x4) != list_len_4x4);
- BUILD_BUG_ON(ARRAY_SIZE(zig_zag_8x8) != list_len_8x8);
- BUILD_BUG_ON(ARRAY_SIZE(tbl->scaling_list) !=
- num_list_4x4 * list_len_4x4 +
- num_list_8x8 * list_len_8x8);
-
- src = &scaling->scaling_list_4x4[0][0];
- for (i = 0; i < num_list_4x4; ++i) {
- for (j = 0; j < list_len_4x4; ++j)
- dst[zig_zag_4x4[j]] = src[j];
- src += list_len_4x4;
- dst += list_len_4x4;
+ for (i = 0; i < num_list_4x4; i++) {
+ src = (u32 *)&scaling->scaling_list_4x4[i];
+ for (j = 0; j < list_len_4x4 / 4; j++)
+ *dst++ = swab32(src[j]);
}
- src = &scaling->scaling_list_8x8[0][0];
- for (i = 0; i < num_list_8x8; ++i) {
- for (j = 0; j < list_len_8x8; ++j)
- dst[zig_zag_8x8[j]] = src[j];
- src += list_len_8x8;
- dst += list_len_8x8;
+ /* Only Intra/Inter Y lists */
+ for (i = 0; i < 2; i++) {
+ src = (u32 *)&scaling->scaling_list_8x8[i];
+ for (j = 0; j < list_len_8x8 / 4; j++)
+ *dst++ = swab32(src[j]);
}
}
@@ -271,6 +244,7 @@ struct hantro_h264_reflist_builder {
const struct v4l2_h264_dpb_entry *dpb;
s32 pocs[HANTRO_H264_DPB_SIZE];
u8 unordered_reflist[HANTRO_H264_DPB_SIZE];
+ int frame_nums[HANTRO_H264_DPB_SIZE];
s32 curpoc;
u8 num_valid;
};
@@ -294,13 +268,20 @@ static void
init_reflist_builder(struct hantro_ctx *ctx,
struct hantro_h264_reflist_builder *b)
{
+ const struct v4l2_ctrl_h264_slice_params *slice_params;
const struct v4l2_ctrl_h264_decode_params *dec_param;
+ const struct v4l2_ctrl_h264_sps *sps;
struct vb2_v4l2_buffer *buf = hantro_get_dst_buf(ctx);
const struct v4l2_h264_dpb_entry *dpb = ctx->h264_dec.dpb;
struct vb2_queue *cap_q = &ctx->fh.m2m_ctx->cap_q_ctx.q;
+ int cur_frame_num, max_frame_num;
unsigned int i;
dec_param = ctx->h264_dec.ctrls.decode;
+ slice_params = &ctx->h264_dec.ctrls.slices[0];
+ sps = ctx->h264_dec.ctrls.sps;
+ max_frame_num = 1 << (sps->log2_max_frame_num_minus4 + 4);
+ cur_frame_num = slice_params->frame_num;
memset(b, 0, sizeof(*b));
b->dpb = dpb;
@@ -318,6 +299,18 @@ init_reflist_builder(struct hantro_ctx *ctx,
continue;
buf = to_vb2_v4l2_buffer(vb2_get_buffer(cap_q, buf_idx));
+
+ /*
+ * Handle frame_num wraparound as described in section
+ * '8.2.4.1 Decoding process for picture numbers' of the spec.
+ * TODO: This logic will have to be adjusted when we start
+ * supporting interlaced content.
+ */
+ if (dpb[i].frame_num > cur_frame_num)
+ b->frame_nums[i] = (int)dpb[i].frame_num - max_frame_num;
+ else
+ b->frame_nums[i] = dpb[i].frame_num;
+
b->pocs[i] = get_poc(buf->field, dpb[i].top_field_order_cnt,
dpb[i].bottom_field_order_cnt);
b->unordered_reflist[b->num_valid] = i;
@@ -353,9 +346,10 @@ static int p_ref_list_cmp(const void *ptra, const void *ptrb, const void *data)
* ascending order.
*/
if (!(a->flags & V4L2_H264_DPB_ENTRY_FLAG_LONG_TERM))
- return b->frame_num - a->frame_num;
+ return HANTRO_CMP(builder->frame_nums[idxb],
+ builder->frame_nums[idxa]);
- return a->pic_num - b->pic_num;
+ return HANTRO_CMP(a->pic_num, b->pic_num);
}
static int b0_ref_list_cmp(const void *ptra, const void *ptrb, const void *data)
@@ -381,7 +375,7 @@ static int b0_ref_list_cmp(const void *ptra, const void *ptrb, const void *data)
/* Long term pics in ascending pic num order. */
if (a->flags & V4L2_H264_DPB_ENTRY_FLAG_LONG_TERM)
- return a->pic_num - b->pic_num;
+ return HANTRO_CMP(a->pic_num, b->pic_num);
poca = builder->pocs[idxa];
pocb = builder->pocs[idxb];
@@ -392,11 +386,11 @@ static int b0_ref_list_cmp(const void *ptra, const void *ptrb, const void *data)
* order.
*/
if ((poca < builder->curpoc) != (pocb < builder->curpoc))
- return POC_CMP(poca, pocb);
+ return HANTRO_CMP(poca, pocb);
else if (poca < builder->curpoc)
- return POC_CMP(pocb, poca);
+ return HANTRO_CMP(pocb, poca);
- return POC_CMP(poca, pocb);
+ return HANTRO_CMP(poca, pocb);
}
static int b1_ref_list_cmp(const void *ptra, const void *ptrb, const void *data)
@@ -422,22 +416,22 @@ static int b1_ref_list_cmp(const void *ptra, const void *ptrb, const void *data)
/* Long term pics in ascending pic num order. */
if (a->flags & V4L2_H264_DPB_ENTRY_FLAG_LONG_TERM)
- return a->pic_num - b->pic_num;
+ return HANTRO_CMP(a->pic_num, b->pic_num);
poca = builder->pocs[idxa];
pocb = builder->pocs[idxb];
/*
* Short term pics with POC > cur POC first in POC ascending order
- * followed by short term pics with POC > cur POC in POC descending
+ * followed by short term pics with POC < cur POC in POC descending
* order.
*/
if ((poca < builder->curpoc) != (pocb < builder->curpoc))
- return POC_CMP(pocb, poca);
+ return HANTRO_CMP(pocb, poca);
else if (poca < builder->curpoc)
- return POC_CMP(pocb, poca);
+ return HANTRO_CMP(pocb, poca);
- return POC_CMP(poca, pocb);
+ return HANTRO_CMP(poca, pocb);
}
static void
@@ -537,22 +531,18 @@ static void update_dpb(struct hantro_ctx *ctx)
}
}
-struct vb2_buffer *hantro_h264_get_ref_buf(struct hantro_ctx *ctx,
- unsigned int dpb_idx)
+dma_addr_t hantro_h264_get_ref_buf(struct hantro_ctx *ctx,
+ unsigned int dpb_idx)
{
- struct vb2_queue *cap_q = &ctx->fh.m2m_ctx->cap_q_ctx.q;
struct v4l2_h264_dpb_entry *dpb = ctx->h264_dec.dpb;
- struct vb2_buffer *buf;
- int buf_idx = -1;
+ dma_addr_t dma_addr = 0;
if (dpb[dpb_idx].flags & V4L2_H264_DPB_ENTRY_FLAG_ACTIVE)
- buf_idx = vb2_find_timestamp(cap_q,
- dpb[dpb_idx].reference_ts, 0);
+ dma_addr = hantro_get_ref(ctx, dpb[dpb_idx].reference_ts);
- if (buf_idx >= 0) {
- buf = vb2_get_buffer(cap_q, buf_idx);
- } else {
+ if (!dma_addr) {
struct vb2_v4l2_buffer *dst_buf;
+ struct vb2_buffer *buf;
/*
* If a DPB entry is unused or invalid, address of current
@@ -560,9 +550,10 @@ struct vb2_buffer *hantro_h264_get_ref_buf(struct hantro_ctx *ctx,
*/
dst_buf = hantro_get_dst_buf(ctx);
buf = &dst_buf->vb2_buf;
+ dma_addr = vb2_dma_contig_plane_dma_addr(buf, 0);
}
- return buf;
+ return dma_addr;
}
int hantro_h264_dec_prepare_run(struct hantro_ctx *ctx)
@@ -627,7 +618,6 @@ int hantro_h264_dec_init(struct hantro_ctx *ctx)
struct hantro_h264_dec_hw_ctx *h264_dec = &ctx->h264_dec;
struct hantro_aux_buf *priv = &h264_dec->priv;
struct hantro_h264_dec_priv_tbl *tbl;
- struct v4l2_pix_format_mplane pix_mp;
priv->cpu = dma_alloc_coherent(vpu->dev, sizeof(*tbl), &priv->dma,
GFP_KERNEL);
@@ -638,9 +628,5 @@ int hantro_h264_dec_init(struct hantro_ctx *ctx)
tbl = priv->cpu;
memcpy(tbl->cabac_table, h264_cabac_table, sizeof(tbl->cabac_table));
- v4l2_fill_pixfmt_mp(&pix_mp, ctx->dst_fmt.pixelformat,
- ctx->dst_fmt.width, ctx->dst_fmt.height);
- h264_dec->pic_size = pix_mp.plane_fmt[0].sizeimage;
-
return 0;
}
diff --git a/drivers/staging/media/hantro/hantro_hw.h b/drivers/staging/media/hantro/hantro_hw.h
index 2fab655bf098..fa91dd1848b7 100644
--- a/drivers/staging/media/hantro/hantro_hw.h
+++ b/drivers/staging/media/hantro/hantro_hw.h
@@ -80,15 +80,12 @@ struct hantro_h264_dec_reflists {
* @dpb: DPB
* @reflists: P/B0/B1 reflists
* @ctrls: V4L2 controls attached to a run
- * @pic_size: Size in bytes of decoded picture, this is needed
- * to pass the location of motion vectors.
*/
struct hantro_h264_dec_hw_ctx {
struct hantro_aux_buf priv;
struct v4l2_h264_dpb_entry dpb[HANTRO_H264_DPB_SIZE];
struct hantro_h264_dec_reflists reflists;
struct hantro_h264_dec_ctrls ctrls;
- size_t pic_size;
};
/**
@@ -158,8 +155,8 @@ void rk3399_vpu_jpeg_enc_run(struct hantro_ctx *ctx);
int hantro_jpeg_enc_init(struct hantro_ctx *ctx);
void hantro_jpeg_enc_exit(struct hantro_ctx *ctx);
-struct vb2_buffer *hantro_h264_get_ref_buf(struct hantro_ctx *ctx,
- unsigned int dpb_idx);
+dma_addr_t hantro_h264_get_ref_buf(struct hantro_ctx *ctx,
+ unsigned int dpb_idx);
int hantro_h264_dec_prepare_run(struct hantro_ctx *ctx);
void hantro_g1_h264_dec_run(struct hantro_ctx *ctx);
int hantro_h264_dec_init(struct hantro_ctx *ctx);
diff --git a/drivers/staging/media/hantro/hantro_v4l2.c b/drivers/staging/media/hantro/hantro_v4l2.c
index 3dae52abb96c..1dae76f20034 100644
--- a/drivers/staging/media/hantro/hantro_v4l2.c
+++ b/drivers/staging/media/hantro/hantro_v4l2.c
@@ -240,14 +240,30 @@ static int vidioc_try_fmt(struct file *file, void *priv, struct v4l2_format *f,
v4l2_fill_pixfmt_mp(pix_mp, fmt->fourcc, pix_mp->width,
pix_mp->height);
/*
+ * A decoded 8-bit 4:2:0 NV12 frame may need memory for up to
+ * 448 bytes per macroblock with additional 32 bytes on
+ * multi-core variants.
+ *
* The H264 decoder needs extra space on the output buffers
* to store motion vectors. This is needed for reference
* frames.
+ *
+ * Memory layout is as follow:
+ *
+ * +---------------------------+
+ * | Y-plane 256 bytes x MBs |
+ * +---------------------------+
+ * | UV-plane 128 bytes x MBs |
+ * +---------------------------+
+ * | MV buffer 64 bytes x MBs |
+ * +---------------------------+
+ * | MC sync 32 bytes |
+ * +---------------------------+
*/
if (ctx->vpu_src_fmt->fourcc == V4L2_PIX_FMT_H264_SLICE)
pix_mp->plane_fmt[0].sizeimage +=
- 128 * DIV_ROUND_UP(pix_mp->width, 16) *
- DIV_ROUND_UP(pix_mp->height, 16);
+ 64 * MB_WIDTH(pix_mp->width) *
+ MB_WIDTH(pix_mp->height) + 32;
} else if (!pix_mp->plane_fmt[0].sizeimage) {
/*
* For coded formats the application can specify
@@ -367,20 +383,27 @@ vidioc_s_fmt_out_mplane(struct file *file, void *priv, struct v4l2_format *f)
{
struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
struct hantro_ctx *ctx = fh_to_ctx(priv);
+ struct vb2_queue *vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
const struct hantro_fmt *formats;
unsigned int num_fmts;
- struct vb2_queue *vq;
int ret;
- /* Change not allowed if queue is busy. */
- vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
- if (vb2_is_busy(vq))
- return -EBUSY;
+ ret = vidioc_try_fmt_out_mplane(file, priv, f);
+ if (ret)
+ return ret;
if (!hantro_is_encoder_ctx(ctx)) {
struct vb2_queue *peer_vq;
/*
+ * In order to support dynamic resolution change,
+ * the decoder admits a resolution change, as long
+ * as the pixelformat remains. Can't be done if streaming.
+ */
+ if (vb2_is_streaming(vq) || (vb2_is_busy(vq) &&
+ pix_mp->pixelformat != ctx->src_fmt.pixelformat))
+ return -EBUSY;
+ /*
* Since format change on the OUTPUT queue will reset
* the CAPTURE queue, we can't allow doing so
* when the CAPTURE queue has buffers allocated.
@@ -389,12 +412,15 @@ vidioc_s_fmt_out_mplane(struct file *file, void *priv, struct v4l2_format *f)
V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
if (vb2_is_busy(peer_vq))
return -EBUSY;
+ } else {
+ /*
+ * The encoder doesn't admit a format change if
+ * there are OUTPUT buffers allocated.
+ */
+ if (vb2_is_busy(vq))
+ return -EBUSY;
}
- ret = vidioc_try_fmt_out_mplane(file, priv, f);
- if (ret)
- return ret;
-
formats = hantro_get_formats(ctx, &num_fmts);
ctx->vpu_src_fmt = hantro_find_format(formats, num_fmts,
pix_mp->pixelformat);
diff --git a/drivers/staging/media/hantro/rk3288_vpu_hw.c b/drivers/staging/media/hantro/rk3288_vpu_hw.c
index 6bfcc47d1e58..f8db6fcaad73 100644
--- a/drivers/staging/media/hantro/rk3288_vpu_hw.c
+++ b/drivers/staging/media/hantro/rk3288_vpu_hw.c
@@ -48,10 +48,10 @@ static const struct hantro_fmt rk3288_vpu_enc_fmts[] = {
.frmsize = {
.min_width = 96,
.max_width = 8192,
- .step_width = JPEG_MB_DIM,
+ .step_width = MB_DIM,
.min_height = 32,
.max_height = 8192,
- .step_height = JPEG_MB_DIM,
+ .step_height = MB_DIM,
},
},
};
@@ -67,11 +67,11 @@ static const struct hantro_fmt rk3288_vpu_dec_fmts[] = {
.max_depth = 2,
.frmsize = {
.min_width = 48,
- .max_width = 3840,
- .step_width = H264_MB_DIM,
+ .max_width = 4096,
+ .step_width = MB_DIM,
.min_height = 48,
- .max_height = 2160,
- .step_height = H264_MB_DIM,
+ .max_height = 2304,
+ .step_height = MB_DIM,
},
},
{
@@ -81,10 +81,10 @@ static const struct hantro_fmt rk3288_vpu_dec_fmts[] = {
.frmsize = {
.min_width = 48,
.max_width = 1920,
- .step_width = MPEG2_MB_DIM,
+ .step_width = MB_DIM,
.min_height = 48,
.max_height = 1088,
- .step_height = MPEG2_MB_DIM,
+ .step_height = MB_DIM,
},
},
{
@@ -94,10 +94,10 @@ static const struct hantro_fmt rk3288_vpu_dec_fmts[] = {
.frmsize = {
.min_width = 48,
.max_width = 3840,
- .step_width = VP8_MB_DIM,
+ .step_width = MB_DIM,
.min_height = 48,
.max_height = 2160,
- .step_height = VP8_MB_DIM,
+ .step_height = MB_DIM,
},
},
};
diff --git a/drivers/staging/media/hantro/rk3399_vpu_hw.c b/drivers/staging/media/hantro/rk3399_vpu_hw.c
index 14d14bc6b12b..9ac1f2cb6a16 100644
--- a/drivers/staging/media/hantro/rk3399_vpu_hw.c
+++ b/drivers/staging/media/hantro/rk3399_vpu_hw.c
@@ -47,10 +47,10 @@ static const struct hantro_fmt rk3399_vpu_enc_fmts[] = {
.frmsize = {
.min_width = 96,
.max_width = 8192,
- .step_width = JPEG_MB_DIM,
+ .step_width = MB_DIM,
.min_height = 32,
.max_height = 8192,
- .step_height = JPEG_MB_DIM,
+ .step_height = MB_DIM,
},
},
};
@@ -67,10 +67,10 @@ static const struct hantro_fmt rk3399_vpu_dec_fmts[] = {
.frmsize = {
.min_width = 48,
.max_width = 1920,
- .step_width = MPEG2_MB_DIM,
+ .step_width = MB_DIM,
.min_height = 48,
.max_height = 1088,
- .step_height = MPEG2_MB_DIM,
+ .step_height = MB_DIM,
},
},
{
@@ -80,10 +80,10 @@ static const struct hantro_fmt rk3399_vpu_dec_fmts[] = {
.frmsize = {
.min_width = 48,
.max_width = 3840,
- .step_width = VP8_MB_DIM,
+ .step_width = MB_DIM,
.min_height = 48,
.max_height = 2160,
- .step_height = VP8_MB_DIM,
+ .step_height = MB_DIM,
},
},
};
diff --git a/drivers/staging/media/hantro/rk3399_vpu_hw_jpeg_enc.c b/drivers/staging/media/hantro/rk3399_vpu_hw_jpeg_enc.c
index 06162f569b5e..067892345b5d 100644
--- a/drivers/staging/media/hantro/rk3399_vpu_hw_jpeg_enc.c
+++ b/drivers/staging/media/hantro/rk3399_vpu_hw_jpeg_enc.c
@@ -149,8 +149,8 @@ void rk3399_vpu_jpeg_enc_run(struct hantro_ctx *ctx)
reg = VEPU_REG_AXI_CTRL_BURST_LEN(16);
vepu_write_relaxed(vpu, reg, VEPU_REG_AXI_CTRL);
- reg = VEPU_REG_MB_WIDTH(JPEG_MB_WIDTH(ctx->src_fmt.width))
- | VEPU_REG_MB_HEIGHT(JPEG_MB_HEIGHT(ctx->src_fmt.height))
+ reg = VEPU_REG_MB_WIDTH(MB_WIDTH(ctx->src_fmt.width))
+ | VEPU_REG_MB_HEIGHT(MB_HEIGHT(ctx->src_fmt.height))
| VEPU_REG_FRAME_TYPE_INTRA
| VEPU_REG_ENCODE_FORMAT_JPEG
| VEPU_REG_ENCODE_ENABLE;
diff --git a/drivers/staging/media/hantro/rk3399_vpu_hw_mpeg2_dec.c b/drivers/staging/media/hantro/rk3399_vpu_hw_mpeg2_dec.c
index e7ba5c0441cc..b40d2cdf832f 100644
--- a/drivers/staging/media/hantro/rk3399_vpu_hw_mpeg2_dec.c
+++ b/drivers/staging/media/hantro/rk3399_vpu_hw_mpeg2_dec.c
@@ -107,17 +107,14 @@ rk3399_vpu_mpeg2_dec_set_buffers(struct hantro_dev *vpu,
{
dma_addr_t forward_addr = 0, backward_addr = 0;
dma_addr_t current_addr, addr;
- struct vb2_queue *vq;
-
- vq = v4l2_m2m_get_dst_vq(ctx->fh.m2m_ctx);
switch (picture->picture_coding_type) {
case V4L2_MPEG2_PICTURE_CODING_TYPE_B:
- backward_addr = hantro_get_ref(vq,
+ backward_addr = hantro_get_ref(ctx,
slice_params->backward_ref_ts);
/* fall-through */
case V4L2_MPEG2_PICTURE_CODING_TYPE_P:
- forward_addr = hantro_get_ref(vq,
+ forward_addr = hantro_get_ref(ctx,
slice_params->forward_ref_ts);
}
@@ -223,8 +220,8 @@ void rk3399_vpu_mpeg2_dec_run(struct hantro_ctx *ctx)
VDPU_REG_DEC_CLK_GATE_E(1);
vdpu_write_relaxed(vpu, reg, VDPU_SWREG(57));
- reg = VDPU_REG_PIC_MB_WIDTH(MPEG2_MB_WIDTH(ctx->dst_fmt.width)) |
- VDPU_REG_PIC_MB_HEIGHT_P(MPEG2_MB_HEIGHT(ctx->dst_fmt.height)) |
+ reg = VDPU_REG_PIC_MB_WIDTH(MB_WIDTH(ctx->dst_fmt.width)) |
+ VDPU_REG_PIC_MB_HEIGHT_P(MB_HEIGHT(ctx->dst_fmt.height)) |
VDPU_REG_ALT_SCAN_E(picture->alternate_scan) |
VDPU_REG_TOPFIELDFIRST_E(picture->top_field_first);
vdpu_write_relaxed(vpu, reg, VDPU_SWREG(120));
diff --git a/drivers/staging/media/hantro/rk3399_vpu_hw_vp8_dec.c b/drivers/staging/media/hantro/rk3399_vpu_hw_vp8_dec.c
index f17e32620b08..76d7ed3fd69a 100644
--- a/drivers/staging/media/hantro/rk3399_vpu_hw_vp8_dec.c
+++ b/drivers/staging/media/hantro/rk3399_vpu_hw_vp8_dec.c
@@ -449,18 +449,16 @@ static void cfg_ref(struct hantro_ctx *ctx,
{
struct hantro_dev *vpu = ctx->dev;
struct vb2_v4l2_buffer *vb2_dst;
- struct vb2_queue *cap_q;
dma_addr_t ref;
- cap_q = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
vb2_dst = hantro_get_dst_buf(ctx);
- ref = hantro_get_ref(cap_q, hdr->last_frame_ts);
+ ref = hantro_get_ref(ctx, hdr->last_frame_ts);
if (!ref)
ref = vb2_dma_contig_plane_dma_addr(&vb2_dst->vb2_buf, 0);
vdpu_write_relaxed(vpu, ref, VDPU_REG_VP8_ADDR_REF0);
- ref = hantro_get_ref(cap_q, hdr->golden_frame_ts);
+ ref = hantro_get_ref(ctx, hdr->golden_frame_ts);
WARN_ON(!ref && hdr->golden_frame_ts);
if (!ref)
ref = vb2_dma_contig_plane_dma_addr(&vb2_dst->vb2_buf, 0);
@@ -468,7 +466,7 @@ static void cfg_ref(struct hantro_ctx *ctx,
ref |= VDPU_REG_VP8_GREF_SIGN_BIAS;
vdpu_write_relaxed(vpu, ref, VDPU_REG_VP8_ADDR_REF2_5(2));
- ref = hantro_get_ref(cap_q, hdr->alt_frame_ts);
+ ref = hantro_get_ref(ctx, hdr->alt_frame_ts);
WARN_ON(!ref && hdr->alt_frame_ts);
if (!ref)
ref = vb2_dma_contig_plane_dma_addr(&vb2_dst->vb2_buf, 0);
@@ -563,8 +561,8 @@ void rk3399_vpu_vp8_dec_run(struct hantro_ctx *ctx)
hantro_reg_write(vpu, &vp8_dec_filter_disable, 1);
/* Frame dimensions */
- mb_width = VP8_MB_WIDTH(width);
- mb_height = VP8_MB_HEIGHT(height);
+ mb_width = MB_WIDTH(width);
+ mb_height = MB_HEIGHT(height);
hantro_reg_write(vpu, &vp8_dec_mb_width, mb_width);
hantro_reg_write(vpu, &vp8_dec_mb_height, mb_height);
diff --git a/drivers/staging/media/imx/imx-ic-prp.c b/drivers/staging/media/imx/imx-ic-prp.c
index 35e60a120dc1..2a4f77e83ed3 100644
--- a/drivers/staging/media/imx/imx-ic-prp.c
+++ b/drivers/staging/media/imx/imx-ic-prp.c
@@ -428,32 +428,19 @@ static int prp_s_frame_interval(struct v4l2_subdev *sd,
return 0;
}
-/*
- * retrieve our pads parsed from the OF graph by the media device
- */
static int prp_registered(struct v4l2_subdev *sd)
{
struct prp_priv *priv = sd_to_priv(sd);
- int i, ret;
u32 code;
- for (i = 0; i < PRP_NUM_PADS; i++) {
- priv->pad[i].flags = (i == PRP_SINK_PAD) ?
- MEDIA_PAD_FL_SINK : MEDIA_PAD_FL_SOURCE;
- }
-
/* init default frame interval */
priv->frame_interval.numerator = 1;
priv->frame_interval.denominator = 30;
/* set a default mbus format */
imx_media_enum_ipu_format(&code, 0, CS_SEL_YUV);
- ret = imx_media_init_mbus_fmt(&priv->format_mbus, 640, 480, code,
- V4L2_FIELD_NONE, NULL);
- if (ret)
- return ret;
-
- return media_entity_pads_init(&sd->entity, PRP_NUM_PADS, priv->pad);
+ return imx_media_init_mbus_fmt(&priv->format_mbus, 640, 480, code,
+ V4L2_FIELD_NONE, NULL);
}
static const struct v4l2_subdev_pad_ops prp_pad_ops = {
@@ -487,6 +474,7 @@ static const struct v4l2_subdev_internal_ops prp_internal_ops = {
static int prp_init(struct imx_ic_priv *ic_priv)
{
struct prp_priv *priv;
+ int i;
priv = devm_kzalloc(ic_priv->ipu_dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -496,7 +484,12 @@ static int prp_init(struct imx_ic_priv *ic_priv)
ic_priv->task_priv = priv;
priv->ic_priv = ic_priv;
- return 0;
+ for (i = 0; i < PRP_NUM_PADS; i++)
+ priv->pad[i].flags = (i == PRP_SINK_PAD) ?
+ MEDIA_PAD_FL_SINK : MEDIA_PAD_FL_SOURCE;
+
+ return media_entity_pads_init(&ic_priv->sd.entity, PRP_NUM_PADS,
+ priv->pad);
}
static void prp_remove(struct imx_ic_priv *ic_priv)
diff --git a/drivers/staging/media/imx/imx-ic-prpencvf.c b/drivers/staging/media/imx/imx-ic-prpencvf.c
index 67ffa46a8e96..09c4e3f33807 100644
--- a/drivers/staging/media/imx/imx-ic-prpencvf.c
+++ b/drivers/staging/media/imx/imx-ic-prpencvf.c
@@ -1240,21 +1240,16 @@ static int prp_s_frame_interval(struct v4l2_subdev *sd,
return 0;
}
-/*
- * retrieve our pads parsed from the OF graph by the media device
- */
static int prp_registered(struct v4l2_subdev *sd)
{
struct prp_priv *priv = sd_to_priv(sd);
+ struct imx_ic_priv *ic_priv = priv->ic_priv;
int i, ret;
u32 code;
+ /* set a default mbus format */
+ imx_media_enum_ipu_format(&code, 0, CS_SEL_YUV);
for (i = 0; i < PRPENCVF_NUM_PADS; i++) {
- priv->pad[i].flags = (i == PRPENCVF_SINK_PAD) ?
- MEDIA_PAD_FL_SINK : MEDIA_PAD_FL_SOURCE;
-
- /* set a default mbus format */
- imx_media_enum_ipu_format(&code, 0, CS_SEL_YUV);
ret = imx_media_init_mbus_fmt(&priv->format_mbus[i],
640, 480, code, V4L2_FIELD_NONE,
&priv->cc[i]);
@@ -1266,22 +1261,26 @@ static int prp_registered(struct v4l2_subdev *sd)
priv->frame_interval.numerator = 1;
priv->frame_interval.denominator = 30;
- ret = media_entity_pads_init(&sd->entity, PRPENCVF_NUM_PADS,
- priv->pad);
- if (ret)
- return ret;
+ priv->vdev = imx_media_capture_device_init(ic_priv->ipu_dev,
+ &ic_priv->sd,
+ PRPENCVF_SRC_PAD);
+ if (IS_ERR(priv->vdev))
+ return PTR_ERR(priv->vdev);
ret = imx_media_capture_device_register(priv->vdev);
if (ret)
- return ret;
+ goto remove_vdev;
ret = prp_init_controls(priv);
if (ret)
- goto unreg;
+ goto unreg_vdev;
return 0;
-unreg:
+
+unreg_vdev:
imx_media_capture_device_unregister(priv->vdev);
+remove_vdev:
+ imx_media_capture_device_remove(priv->vdev);
return ret;
}
@@ -1290,6 +1289,8 @@ static void prp_unregistered(struct v4l2_subdev *sd)
struct prp_priv *priv = sd_to_priv(sd);
imx_media_capture_device_unregister(priv->vdev);
+ imx_media_capture_device_remove(priv->vdev);
+
v4l2_ctrl_handler_free(&priv->ctrl_hdlr);
}
@@ -1325,6 +1326,7 @@ static const struct v4l2_subdev_internal_ops prp_internal_ops = {
static int prp_init(struct imx_ic_priv *ic_priv)
{
struct prp_priv *priv;
+ int i, ret;
priv = devm_kzalloc(ic_priv->ipu_dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -1336,15 +1338,19 @@ static int prp_init(struct imx_ic_priv *ic_priv)
spin_lock_init(&priv->irqlock);
timer_setup(&priv->eof_timeout_timer, prp_eof_timeout, 0);
- priv->vdev = imx_media_capture_device_init(ic_priv->ipu_dev,
- &ic_priv->sd,
- PRPENCVF_SRC_PAD);
- if (IS_ERR(priv->vdev))
- return PTR_ERR(priv->vdev);
-
mutex_init(&priv->lock);
- return 0;
+ for (i = 0; i < PRPENCVF_NUM_PADS; i++) {
+ priv->pad[i].flags = (i == PRPENCVF_SINK_PAD) ?
+ MEDIA_PAD_FL_SINK : MEDIA_PAD_FL_SOURCE;
+ }
+
+ ret = media_entity_pads_init(&ic_priv->sd.entity, PRPENCVF_NUM_PADS,
+ priv->pad);
+ if (ret)
+ mutex_destroy(&priv->lock);
+
+ return ret;
}
static void prp_remove(struct imx_ic_priv *ic_priv)
@@ -1352,7 +1358,6 @@ static void prp_remove(struct imx_ic_priv *ic_priv)
struct prp_priv *priv = ic_priv->task_priv;
mutex_destroy(&priv->lock);
- imx_media_capture_device_remove(priv->vdev);
}
struct imx_ic_ops imx_ic_prpencvf_ops = {
diff --git a/drivers/staging/media/imx/imx-media-capture.c b/drivers/staging/media/imx/imx-media-capture.c
index b33a07bc9105..7712e7be8625 100644
--- a/drivers/staging/media/imx/imx-media-capture.c
+++ b/drivers/staging/media/imx/imx-media-capture.c
@@ -26,6 +26,8 @@
#include <media/imx.h>
#include "imx-media.h"
+#define IMX_CAPTURE_NAME "imx-capture"
+
struct capture_priv {
struct imx_media_video_dev vdev;
@@ -69,8 +71,8 @@ static int vidioc_querycap(struct file *file, void *fh,
{
struct capture_priv *priv = video_drvdata(file);
- strscpy(cap->driver, "imx-media-capture", sizeof(cap->driver));
- strscpy(cap->card, "imx-media-capture", sizeof(cap->card));
+ strscpy(cap->driver, IMX_CAPTURE_NAME, sizeof(cap->driver));
+ strscpy(cap->card, IMX_CAPTURE_NAME, sizeof(cap->card));
snprintf(cap->bus_info, sizeof(cap->bus_info),
"platform:%s", priv->src_sd->name);
@@ -765,13 +767,6 @@ int imx_media_capture_device_register(struct imx_media_video_dev *vdev)
INIT_LIST_HEAD(&priv->ready_q);
- priv->vdev_pad.flags = MEDIA_PAD_FL_SINK;
- ret = media_entity_pads_init(&vfd->entity, 1, &priv->vdev_pad);
- if (ret) {
- v4l2_err(sd, "failed to init dev pad\n");
- goto unreg;
- }
-
/* create the link from the src_sd devnode pad to device node */
ret = media_create_pad_link(&sd->entity, priv->src_sd_pad,
&vfd->entity, 0, 0);
@@ -834,6 +829,7 @@ imx_media_capture_device_init(struct device *dev, struct v4l2_subdev *src_sd,
{
struct capture_priv *priv;
struct video_device *vfd;
+ int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -858,6 +854,13 @@ imx_media_capture_device_init(struct device *dev, struct v4l2_subdev *src_sd,
vfd->queue = &priv->q;
priv->vdev.vfd = vfd;
+ priv->vdev_pad.flags = MEDIA_PAD_FL_SINK;
+ ret = media_entity_pads_init(&vfd->entity, 1, &priv->vdev_pad);
+ if (ret) {
+ video_device_release(vfd);
+ return ERR_PTR(ret);
+ }
+
INIT_LIST_HEAD(&priv->vdev.list);
video_set_drvdata(vfd, priv);
diff --git a/drivers/staging/media/imx/imx-media-csi.c b/drivers/staging/media/imx/imx-media-csi.c
index 367e39f5b382..b60ed4f22f6d 100644
--- a/drivers/staging/media/imx/imx-media-csi.c
+++ b/drivers/staging/media/imx/imx-media-csi.c
@@ -627,8 +627,8 @@ static int csi_idmac_start(struct csi_priv *priv)
}
priv->nfb4eof_irq = ipu_idmac_channel_irq(priv->ipu,
- priv->idmac_ch,
- IPU_IRQ_NFB4EOF);
+ priv->idmac_ch,
+ IPU_IRQ_NFB4EOF);
ret = devm_request_irq(priv->dev, priv->nfb4eof_irq,
csi_idmac_nfb4eof_interrupt, 0,
"imx-smfc-nfb4eof", priv);
@@ -1472,7 +1472,7 @@ static void csi_try_fmt(struct csi_priv *priv,
imx_media_enum_mbus_format(&code, 0,
CS_SEL_ANY, false);
*cc = imx_media_find_mbus_format(code,
- CS_SEL_ANY, false);
+ CS_SEL_ANY, false);
sdformat->format.code = (*cc)->codes[0];
}
@@ -1740,9 +1740,6 @@ static int csi_unsubscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
return v4l2_event_unsubscribe(fh, sub);
}
-/*
- * retrieve our pads parsed from the OF graph by the media device
- */
static int csi_registered(struct v4l2_subdev *sd)
{
struct csi_priv *priv = v4l2_get_subdevdata(sd);
@@ -1759,9 +1756,6 @@ static int csi_registered(struct v4l2_subdev *sd)
priv->csi = csi;
for (i = 0; i < CSI_NUM_PADS; i++) {
- priv->pad[i].flags = (i == CSI_SINK_PAD) ?
- MEDIA_PAD_FL_SINK : MEDIA_PAD_FL_SOURCE;
-
code = 0;
if (i != CSI_SINK_PAD)
imx_media_enum_ipu_format(&code, 0, CS_SEL_YUV);
@@ -1793,16 +1787,22 @@ static int csi_registered(struct v4l2_subdev *sd)
goto put_csi;
}
- ret = media_entity_pads_init(&sd->entity, CSI_NUM_PADS, priv->pad);
- if (ret)
+ priv->vdev = imx_media_capture_device_init(priv->sd.dev,
+ &priv->sd,
+ CSI_SRC_PAD_IDMAC);
+ if (IS_ERR(priv->vdev)) {
+ ret = PTR_ERR(priv->vdev);
goto free_fim;
+ }
ret = imx_media_capture_device_register(priv->vdev);
if (ret)
- goto free_fim;
+ goto remove_vdev;
return 0;
+remove_vdev:
+ imx_media_capture_device_remove(priv->vdev);
free_fim:
if (priv->fim)
imx_media_fim_free(priv->fim);
@@ -1816,6 +1816,7 @@ static void csi_unregistered(struct v4l2_subdev *sd)
struct csi_priv *priv = v4l2_get_subdevdata(sd);
imx_media_capture_device_unregister(priv->vdev);
+ imx_media_capture_device_remove(priv->vdev);
if (priv->fim)
imx_media_fim_free(priv->fim);
@@ -1923,7 +1924,7 @@ static int imx_csi_probe(struct platform_device *pdev)
struct ipu_client_platformdata *pdata;
struct pinctrl *pinctrl;
struct csi_priv *priv;
- int ret;
+ int i, ret;
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -1963,10 +1964,14 @@ static int imx_csi_probe(struct platform_device *pdev)
imx_media_grp_id_to_sd_name(priv->sd.name, sizeof(priv->sd.name),
priv->sd.grp_id, ipu_get_num(priv->ipu));
- priv->vdev = imx_media_capture_device_init(priv->sd.dev, &priv->sd,
- CSI_SRC_PAD_IDMAC);
- if (IS_ERR(priv->vdev))
- return PTR_ERR(priv->vdev);
+ for (i = 0; i < CSI_NUM_PADS; i++)
+ priv->pad[i].flags = (i == CSI_SINK_PAD) ?
+ MEDIA_PAD_FL_SINK : MEDIA_PAD_FL_SOURCE;
+
+ ret = media_entity_pads_init(&priv->sd.entity, CSI_NUM_PADS,
+ priv->pad);
+ if (ret)
+ return ret;
mutex_init(&priv->lock);
@@ -1997,7 +2002,6 @@ static int imx_csi_probe(struct platform_device *pdev)
free:
v4l2_ctrl_handler_free(&priv->ctrl_hdlr);
mutex_destroy(&priv->lock);
- imx_media_capture_device_remove(priv->vdev);
return ret;
}
@@ -2008,7 +2012,6 @@ static int imx_csi_remove(struct platform_device *pdev)
v4l2_ctrl_handler_free(&priv->ctrl_hdlr);
mutex_destroy(&priv->lock);
- imx_media_capture_device_remove(priv->vdev);
v4l2_async_unregister_subdev(sd);
media_entity_cleanup(&sd->entity);
diff --git a/drivers/staging/media/imx/imx-media-utils.c b/drivers/staging/media/imx/imx-media-utils.c
index 4cc6a7462ae2..0788a1874557 100644
--- a/drivers/staging/media/imx/imx-media-utils.c
+++ b/drivers/staging/media/imx/imx-media-utils.c
@@ -184,7 +184,15 @@ static const struct imx_media_pixfmt rgb_formats[] = {
.cs = IPUV3_COLORSPACE_RGB,
.bpp = 24,
}, {
- .fourcc = V4L2_PIX_FMT_BGR32,
+ .fourcc = V4L2_PIX_FMT_XBGR32,
+ .cs = IPUV3_COLORSPACE_RGB,
+ .bpp = 32,
+ }, {
+ .fourcc = V4L2_PIX_FMT_BGRX32,
+ .cs = IPUV3_COLORSPACE_RGB,
+ .bpp = 32,
+ }, {
+ .fourcc = V4L2_PIX_FMT_RGBX32,
.cs = IPUV3_COLORSPACE_RGB,
.bpp = 32,
},
diff --git a/drivers/staging/media/imx/imx-media-vdic.c b/drivers/staging/media/imx/imx-media-vdic.c
index cfad65a16917..0d83c2c41606 100644
--- a/drivers/staging/media/imx/imx-media-vdic.c
+++ b/drivers/staging/media/imx/imx-media-vdic.c
@@ -841,9 +841,6 @@ out:
return ret;
}
-/*
- * retrieve our pads parsed from the OF graph by the media device
- */
static int vdic_registered(struct v4l2_subdev *sd)
{
struct vdic_priv *priv = v4l2_get_subdevdata(sd);
@@ -851,9 +848,6 @@ static int vdic_registered(struct v4l2_subdev *sd)
u32 code;
for (i = 0; i < VDIC_NUM_PADS; i++) {
- priv->pad[i].flags = (i == VDIC_SRC_PAD_DIRECT) ?
- MEDIA_PAD_FL_SOURCE : MEDIA_PAD_FL_SINK;
-
code = 0;
if (i != VDIC_SINK_PAD_IDMAC)
imx_media_enum_ipu_format(&code, 0, CS_SEL_YUV);
@@ -874,15 +868,7 @@ static int vdic_registered(struct v4l2_subdev *sd)
priv->active_input_pad = VDIC_SINK_PAD_DIRECT;
- ret = vdic_init_controls(priv);
- if (ret)
- return ret;
-
- ret = media_entity_pads_init(&sd->entity, VDIC_NUM_PADS, priv->pad);
- if (ret)
- v4l2_ctrl_handler_free(&priv->ctrl_hdlr);
-
- return ret;
+ return vdic_init_controls(priv);
}
static void vdic_unregistered(struct v4l2_subdev *sd)
@@ -927,7 +913,7 @@ struct v4l2_subdev *imx_media_vdic_register(struct v4l2_device *v4l2_dev,
u32 grp_id)
{
struct vdic_priv *priv;
- int ret;
+ int i, ret;
priv = devm_kzalloc(ipu_dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -949,6 +935,15 @@ struct v4l2_subdev *imx_media_vdic_register(struct v4l2_device *v4l2_dev,
mutex_init(&priv->lock);
+ for (i = 0; i < VDIC_NUM_PADS; i++)
+ priv->pad[i].flags = (i == VDIC_SRC_PAD_DIRECT) ?
+ MEDIA_PAD_FL_SOURCE : MEDIA_PAD_FL_SINK;
+
+ ret = media_entity_pads_init(&priv->sd.entity, VDIC_NUM_PADS,
+ priv->pad);
+ if (ret)
+ goto free;
+
ret = v4l2_device_register_subdev(v4l2_dev, &priv->sd);
if (ret)
goto free;
diff --git a/drivers/staging/media/imx/imx6-mipi-csi2.c b/drivers/staging/media/imx/imx6-mipi-csi2.c
index bfa4b254c4e4..cd3dd6e33ef0 100644
--- a/drivers/staging/media/imx/imx6-mipi-csi2.c
+++ b/drivers/staging/media/imx/imx6-mipi-csi2.c
@@ -497,26 +497,13 @@ out:
return ret;
}
-/*
- * retrieve our pads parsed from the OF graph by the media device
- */
static int csi2_registered(struct v4l2_subdev *sd)
{
struct csi2_dev *csi2 = sd_to_dev(sd);
- int i, ret;
-
- for (i = 0; i < CSI2_NUM_PADS; i++) {
- csi2->pad[i].flags = (i == CSI2_SINK_PAD) ?
- MEDIA_PAD_FL_SINK : MEDIA_PAD_FL_SOURCE;
- }
/* set a default mbus format */
- ret = imx_media_init_mbus_fmt(&csi2->format_mbus,
+ return imx_media_init_mbus_fmt(&csi2->format_mbus,
640, 480, 0, V4L2_FIELD_NONE, NULL);
- if (ret)
- return ret;
-
- return media_entity_pads_init(&sd->entity, CSI2_NUM_PADS, csi2->pad);
}
static const struct media_entity_operations csi2_entity_ops = {
@@ -573,7 +560,7 @@ static int csi2_probe(struct platform_device *pdev)
unsigned int sink_port = 0;
struct csi2_dev *csi2;
struct resource *res;
- int ret;
+ int i, ret;
csi2 = devm_kzalloc(&pdev->dev, sizeof(*csi2), GFP_KERNEL);
if (!csi2)
@@ -592,6 +579,16 @@ static int csi2_probe(struct platform_device *pdev)
csi2->sd.entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
csi2->sd.grp_id = IMX_MEDIA_GRP_ID_CSI2;
+ for (i = 0; i < CSI2_NUM_PADS; i++) {
+ csi2->pad[i].flags = (i == CSI2_SINK_PAD) ?
+ MEDIA_PAD_FL_SINK : MEDIA_PAD_FL_SOURCE;
+ }
+
+ ret = media_entity_pads_init(&csi2->sd.entity, CSI2_NUM_PADS,
+ csi2->pad);
+ if (ret)
+ return ret;
+
csi2->pllref_clk = devm_clk_get(&pdev->dev, "ref");
if (IS_ERR(csi2->pllref_clk)) {
v4l2_err(&csi2->sd, "failed to get pll reference clock\n");
diff --git a/drivers/staging/media/imx/imx7-media-csi.c b/drivers/staging/media/imx/imx7-media-csi.c
index bfd6b5fbf484..db30e2c70f2f 100644
--- a/drivers/staging/media/imx/imx7-media-csi.c
+++ b/drivers/staging/media/imx/imx7-media-csi.c
@@ -1100,9 +1100,6 @@ static int imx7_csi_registered(struct v4l2_subdev *sd)
int i;
for (i = 0; i < IMX7_CSI_PADS_NUM; i++) {
- csi->pad[i].flags = (i == IMX7_CSI_PAD_SINK) ?
- MEDIA_PAD_FL_SINK : MEDIA_PAD_FL_SOURCE;
-
/* set a default mbus format */
ret = imx_media_init_mbus_fmt(&csi->format_mbus[i],
800, 600, 0, V4L2_FIELD_NONE,
@@ -1115,11 +1112,16 @@ static int imx7_csi_registered(struct v4l2_subdev *sd)
csi->frame_interval[i].denominator = 30;
}
- ret = media_entity_pads_init(&sd->entity, IMX7_CSI_PADS_NUM, csi->pad);
- if (ret < 0)
- return ret;
+ csi->vdev = imx_media_capture_device_init(csi->sd.dev, &csi->sd,
+ IMX7_CSI_PAD_SRC);
+ if (IS_ERR(csi->vdev))
+ return PTR_ERR(csi->vdev);
+
+ ret = imx_media_capture_device_register(csi->vdev);
+ if (ret)
+ imx_media_capture_device_remove(csi->vdev);
- return imx_media_capture_device_register(csi->vdev);
+ return ret;
}
static void imx7_csi_unregistered(struct v4l2_subdev *sd)
@@ -1127,6 +1129,7 @@ static void imx7_csi_unregistered(struct v4l2_subdev *sd)
struct imx7_csi *csi = v4l2_get_subdevdata(sd);
imx_media_capture_device_unregister(csi->vdev);
+ imx_media_capture_device_remove(csi->vdev);
}
static int imx7_csi_init_cfg(struct v4l2_subdev *sd,
@@ -1189,7 +1192,7 @@ static int imx7_csi_probe(struct platform_device *pdev)
struct device_node *node = dev->of_node;
struct imx_media_dev *imxmd;
struct imx7_csi *csi;
- int ret;
+ int i, ret;
csi = devm_kzalloc(&pdev->dev, sizeof(*csi), GFP_KERNEL);
if (!csi)
@@ -1251,14 +1254,18 @@ static int imx7_csi_probe(struct platform_device *pdev)
csi->sd.grp_id = IMX_MEDIA_GRP_ID_CSI;
snprintf(csi->sd.name, sizeof(csi->sd.name), "csi");
- csi->vdev = imx_media_capture_device_init(csi->sd.dev, &csi->sd,
- IMX7_CSI_PAD_SRC);
- if (IS_ERR(csi->vdev))
- return PTR_ERR(csi->vdev);
-
v4l2_ctrl_handler_init(&csi->ctrl_hdlr, 0);
csi->sd.ctrl_handler = &csi->ctrl_hdlr;
+ for (i = 0; i < IMX7_CSI_PADS_NUM; i++)
+ csi->pad[i].flags = (i == IMX7_CSI_PAD_SINK) ?
+ MEDIA_PAD_FL_SINK : MEDIA_PAD_FL_SOURCE;
+
+ ret = media_entity_pads_init(&csi->sd.entity, IMX7_CSI_PADS_NUM,
+ csi->pad);
+ if (ret < 0)
+ goto free;
+
ret = v4l2_async_register_fwnode_subdev(&csi->sd,
sizeof(struct v4l2_async_subdev),
NULL, 0,
@@ -1269,8 +1276,6 @@ static int imx7_csi_probe(struct platform_device *pdev)
return 0;
free:
- imx_media_capture_device_unregister(csi->vdev);
- imx_media_capture_device_remove(csi->vdev);
v4l2_ctrl_handler_free(&csi->ctrl_hdlr);
cleanup:
@@ -1298,9 +1303,6 @@ static int imx7_csi_remove(struct platform_device *pdev)
v4l2_device_unregister(&imxmd->v4l2_dev);
media_device_cleanup(&imxmd->md);
- imx_media_capture_device_unregister(csi->vdev);
- imx_media_capture_device_remove(csi->vdev);
-
v4l2_async_unregister_subdev(sd);
v4l2_ctrl_handler_free(&csi->ctrl_hdlr);
diff --git a/drivers/staging/media/imx/imx7-mipi-csis.c b/drivers/staging/media/imx/imx7-mipi-csis.c
index 73d8354e618c..99166afca071 100644
--- a/drivers/staging/media/imx/imx7-mipi-csis.c
+++ b/drivers/staging/media/imx/imx7-mipi-csis.c
@@ -293,7 +293,7 @@ static int mipi_csis_dump_regs(struct csi_state *state)
struct device *dev = &state->pdev->dev;
unsigned int i;
u32 cfg;
- struct {
+ static const struct {
u32 offset;
const char * const name;
} registers[] = {
@@ -350,6 +350,8 @@ static void mipi_csis_sw_reset(struct csi_state *state)
static int mipi_csis_phy_init(struct csi_state *state)
{
state->mipi_phy_regulator = devm_regulator_get(state->dev, "phy");
+ if (IS_ERR(state->mipi_phy_regulator))
+ return PTR_ERR(state->mipi_phy_regulator);
return regulator_set_voltage(state->mipi_phy_regulator, 1000000,
1000000);
@@ -780,17 +782,6 @@ static irqreturn_t mipi_csis_irq_handler(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int mipi_csis_registered(struct v4l2_subdev *mipi_sd)
-{
- struct csi_state *state = mipi_sd_to_csis_state(mipi_sd);
-
- state->pads[CSIS_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
- state->pads[CSIS_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
-
- return media_entity_pads_init(&state->mipi_sd.entity, CSIS_PADS_NUM,
- state->pads);
-}
-
static const struct v4l2_subdev_core_ops mipi_csis_core_ops = {
.log_status = mipi_csis_log_status,
};
@@ -816,10 +807,6 @@ static const struct v4l2_subdev_ops mipi_csis_subdev_ops = {
.pad = &mipi_csis_pad_ops,
};
-static const struct v4l2_subdev_internal_ops mipi_csis_internal_ops = {
- .registered = mipi_csis_registered,
-};
-
static int mipi_csis_parse_dt(struct platform_device *pdev,
struct csi_state *state)
{
@@ -880,7 +867,6 @@ static int mipi_csis_subdev_init(struct v4l2_subdev *mipi_sd,
mipi_sd->entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
mipi_sd->entity.ops = &mipi_csis_entity_ops;
- mipi_sd->internal_ops = &mipi_csis_internal_ops;
mipi_sd->dev = &pdev->dev;
@@ -892,6 +878,13 @@ static int mipi_csis_subdev_init(struct v4l2_subdev *mipi_sd,
v4l2_set_subdevdata(mipi_sd, &pdev->dev);
+ state->pads[CSIS_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ state->pads[CSIS_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+ ret = media_entity_pads_init(&mipi_sd->entity, CSIS_PADS_NUM,
+ state->pads);
+ if (ret)
+ return ret;
+
ret = v4l2_async_register_fwnode_subdev(mipi_sd,
sizeof(struct v4l2_async_subdev),
&sink_port, 1,
@@ -947,7 +940,6 @@ static void mipi_csis_debugfs_exit(struct csi_state *state)
static int mipi_csis_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct resource *mem_res;
struct csi_state *state;
int ret;
@@ -966,11 +958,13 @@ static int mipi_csis_probe(struct platform_device *pdev)
return ret;
}
- mipi_csis_phy_init(state);
+ ret = mipi_csis_phy_init(state);
+ if (ret < 0)
+ return ret;
+
mipi_csis_phy_reset(state);
- mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- state->regs = devm_ioremap_resource(dev, mem_res);
+ state->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(state->regs))
return PTR_ERR(state->regs);
diff --git a/drivers/staging/media/ipu3/Makefile b/drivers/staging/media/ipu3/Makefile
index cc288ae6d5f2..9def80ef28f3 100644
--- a/drivers/staging/media/ipu3/Makefile
+++ b/drivers/staging/media/ipu3/Makefile
@@ -10,9 +10,3 @@ ipu3-imgu-objs += \
ipu3-css.o ipu3-v4l2.o ipu3.o
obj-$(CONFIG_VIDEO_IPU3_IMGU) += ipu3-imgu.o
-
-# HACK! While this driver is in bad shape, don't enable several warnings
-# that would be otherwise enabled with W=1
-ccflags-y += $(call cc-disable-warning, packed-not-aligned)
-ccflags-y += $(call cc-disable-warning, type-limits)
-ccflags-y += $(call cc-disable-warning, unused-const-variable)
diff --git a/drivers/staging/media/ipu3/TODO b/drivers/staging/media/ipu3/TODO
index 5e55baeaea1a..b44bb4a72ca7 100644
--- a/drivers/staging/media/ipu3/TODO
+++ b/drivers/staging/media/ipu3/TODO
@@ -9,12 +9,9 @@ staging directory.
relevant. (Sakari)
- IPU3 driver documentation (Laurent)
- Add diagram in driver rst to describe output capability.
Comments on configuring v4l2 subdevs for CIO2 and ImgU.
- uAPI documentation:
- Further clarification on some ambiguities such as data type conversion of
- IEFD CU inputs. (Sakari)
Move acronyms to doc-rst file. (Mauro)
- Switch to yavta from v4l2n in driver docs.
@@ -27,5 +24,3 @@ staging directory.
- Document different operation modes, and which buffer queues are relevant
in each mode. To process an image, which queues require a buffer an in
which ones is it optional?
-
-- Make sure it builds fine with no warnings with W=1
diff --git a/drivers/staging/media/ipu3/include/intel-ipu3.h b/drivers/staging/media/ipu3/include/intel-ipu3.h
index c7cd27efac8a..08eaa0bad0de 100644
--- a/drivers/staging/media/ipu3/include/intel-ipu3.h
+++ b/drivers/staging/media/ipu3/include/intel-ipu3.h
@@ -1217,6 +1217,11 @@ struct ipu3_uapi_shd_config {
*
* All CU inputs are unsigned, they will be converted to signed when written
* to register, i.e. a01 will be written to 9 bit register in s4.4 format.
+ * The data precision s4.4 means 4 bits for integer parts and 4 bits for the
+ * fractional part, the first bit indicates positive or negative value.
+ * For userspace software (commonly the imaging library), the computation for
+ * the CU slope values should be based on the slope resolution 1/16 (binary
+ * 0.0001 - the minimal interval value), the slope value range is [-256, +255].
* This applies to &ipu3_uapi_iefd_cux6_ed, &ipu3_uapi_iefd_cux2_1,
* &ipu3_uapi_iefd_cux2_1, &ipu3_uapi_iefd_cux4 and &ipu3_uapi_iefd_cux6_rad.
*/
diff --git a/drivers/staging/media/omap4iss/iss.c b/drivers/staging/media/omap4iss/iss.c
index 1a966cb2f3a6..6fb60b58447a 100644
--- a/drivers/staging/media/omap4iss/iss.c
+++ b/drivers/staging/media/omap4iss/iss.c
@@ -908,11 +908,7 @@ static int iss_map_mem_resource(struct platform_device *pdev,
struct iss_device *iss,
enum iss_mem_resources res)
{
- struct resource *mem;
-
- mem = platform_get_resource(pdev, IORESOURCE_MEM, res);
-
- iss->regs[res] = devm_ioremap_resource(iss->dev, mem);
+ iss->regs[res] = devm_platform_ioremap_resource(pdev, res);
return PTR_ERR_OR_ZERO(iss->regs[res]);
}
diff --git a/drivers/staging/media/omap4iss/iss_video.c b/drivers/staging/media/omap4iss/iss_video.c
index 54144dc9f509..673aa3a5f2bd 100644
--- a/drivers/staging/media/omap4iss/iss_video.c
+++ b/drivers/staging/media/omap4iss/iss_video.c
@@ -671,7 +671,7 @@ iss_video_get_selection(struct file *file, void *fh, struct v4l2_selection *sel)
return -EINVAL;
}
subdev = iss_video_remote_subdev(video, &pad);
- if (subdev == NULL)
+ if (!subdev)
return -EINVAL;
/*
@@ -726,7 +726,7 @@ iss_video_set_selection(struct file *file, void *fh, struct v4l2_selection *sel)
return -EINVAL;
}
subdev = iss_video_remote_subdev(video, &pad);
- if (subdev == NULL)
+ if (!subdev)
return -EINVAL;
sdsel.pad = pad;
diff --git a/drivers/staging/media/sunxi/cedrus/Makefile b/drivers/staging/media/sunxi/cedrus/Makefile
index c85ac6db0302..1bce49d3e7e2 100644
--- a/drivers/staging/media/sunxi/cedrus/Makefile
+++ b/drivers/staging/media/sunxi/cedrus/Makefile
@@ -2,4 +2,4 @@
obj-$(CONFIG_VIDEO_SUNXI_CEDRUS) += sunxi-cedrus.o
sunxi-cedrus-y = cedrus.o cedrus_video.o cedrus_hw.o cedrus_dec.o \
- cedrus_mpeg2.o cedrus_h264.o
+ cedrus_mpeg2.o cedrus_h264.o cedrus_h265.o
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus.c b/drivers/staging/media/sunxi/cedrus/cedrus.c
index 2d3ea8b74dfd..c6ddd46eff82 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus.c
@@ -95,6 +95,45 @@ static const struct cedrus_control cedrus_controls[] = {
.codec = CEDRUS_CODEC_H264,
.required = false,
},
+ {
+ .cfg = {
+ .id = V4L2_CID_MPEG_VIDEO_HEVC_SPS,
+ },
+ .codec = CEDRUS_CODEC_H265,
+ .required = true,
+ },
+ {
+ .cfg = {
+ .id = V4L2_CID_MPEG_VIDEO_HEVC_PPS,
+ },
+ .codec = CEDRUS_CODEC_H265,
+ .required = true,
+ },
+ {
+ .cfg = {
+ .id = V4L2_CID_MPEG_VIDEO_HEVC_SLICE_PARAMS,
+ },
+ .codec = CEDRUS_CODEC_H265,
+ .required = true,
+ },
+ {
+ .cfg = {
+ .id = V4L2_CID_MPEG_VIDEO_HEVC_DECODE_MODE,
+ .max = V4L2_MPEG_VIDEO_HEVC_DECODE_MODE_SLICE_BASED,
+ .def = V4L2_MPEG_VIDEO_HEVC_DECODE_MODE_SLICE_BASED,
+ },
+ .codec = CEDRUS_CODEC_H265,
+ .required = false,
+ },
+ {
+ .cfg = {
+ .id = V4L2_CID_MPEG_VIDEO_HEVC_START_CODE,
+ .max = V4L2_MPEG_VIDEO_HEVC_START_CODE_NONE,
+ .def = V4L2_MPEG_VIDEO_HEVC_START_CODE_NONE,
+ },
+ .codec = CEDRUS_CODEC_H265,
+ .required = false,
+ },
};
#define CEDRUS_CONTROLS_COUNT ARRAY_SIZE(cedrus_controls)
@@ -241,6 +280,16 @@ static int cedrus_open(struct file *file)
ret = PTR_ERR(ctx->fh.m2m_ctx);
goto err_ctrls;
}
+ ctx->dst_fmt.pixelformat = V4L2_PIX_FMT_SUNXI_TILED_NV12;
+ cedrus_prepare_format(&ctx->dst_fmt);
+ ctx->src_fmt.pixelformat = V4L2_PIX_FMT_MPEG2_SLICE;
+ /*
+ * TILED_NV12 has more strict requirements, so copy the width and
+ * height to src_fmt to ensure that is matches the dst_fmt resolution.
+ */
+ ctx->src_fmt.width = ctx->dst_fmt.width;
+ ctx->src_fmt.height = ctx->dst_fmt.height;
+ cedrus_prepare_format(&ctx->src_fmt);
v4l2_fh_add(&ctx->fh);
@@ -330,6 +379,7 @@ static int cedrus_probe(struct platform_device *pdev)
dev->dec_ops[CEDRUS_CODEC_MPEG2] = &cedrus_dec_ops_mpeg2;
dev->dec_ops[CEDRUS_CODEC_H264] = &cedrus_dec_ops_h264;
+ dev->dec_ops[CEDRUS_CODEC_H265] = &cedrus_dec_ops_h265;
mutex_init(&dev->dev_mutex);
@@ -357,6 +407,8 @@ static int cedrus_probe(struct platform_device *pdev)
dev->mdev.dev = &pdev->dev;
strscpy(dev->mdev.model, CEDRUS_NAME, sizeof(dev->mdev.model));
+ strscpy(dev->mdev.bus_info, "platform:" CEDRUS_NAME,
+ sizeof(dev->mdev.bus_info));
media_device_init(&dev->mdev);
dev->mdev.ops = &cedrus_m2m_media_ops;
@@ -438,22 +490,26 @@ static const struct cedrus_variant sun8i_a33_cedrus_variant = {
};
static const struct cedrus_variant sun8i_h3_cedrus_variant = {
- .capabilities = CEDRUS_CAPABILITY_UNTILED,
+ .capabilities = CEDRUS_CAPABILITY_UNTILED |
+ CEDRUS_CAPABILITY_H265_DEC,
.mod_rate = 402000000,
};
static const struct cedrus_variant sun50i_a64_cedrus_variant = {
- .capabilities = CEDRUS_CAPABILITY_UNTILED,
+ .capabilities = CEDRUS_CAPABILITY_UNTILED |
+ CEDRUS_CAPABILITY_H265_DEC,
.mod_rate = 402000000,
};
static const struct cedrus_variant sun50i_h5_cedrus_variant = {
- .capabilities = CEDRUS_CAPABILITY_UNTILED,
+ .capabilities = CEDRUS_CAPABILITY_UNTILED |
+ CEDRUS_CAPABILITY_H265_DEC,
.mod_rate = 402000000,
};
static const struct cedrus_variant sun50i_h6_cedrus_variant = {
- .capabilities = CEDRUS_CAPABILITY_UNTILED,
+ .capabilities = CEDRUS_CAPABILITY_UNTILED |
+ CEDRUS_CAPABILITY_H265_DEC,
.quirks = CEDRUS_QUIRK_NO_DMA_OFFSET,
.mod_rate = 600000000,
};
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus.h b/drivers/staging/media/sunxi/cedrus/cedrus.h
index 2f017a651848..96765555ab8a 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus.h
+++ b/drivers/staging/media/sunxi/cedrus/cedrus.h
@@ -27,12 +27,14 @@
#define CEDRUS_NAME "cedrus"
#define CEDRUS_CAPABILITY_UNTILED BIT(0)
+#define CEDRUS_CAPABILITY_H265_DEC BIT(1)
#define CEDRUS_QUIRK_NO_DMA_OFFSET BIT(0)
enum cedrus_codec {
CEDRUS_CODEC_MPEG2,
CEDRUS_CODEC_H264,
+ CEDRUS_CODEC_H265,
CEDRUS_CODEC_LAST,
};
@@ -67,6 +69,12 @@ struct cedrus_mpeg2_run {
const struct v4l2_ctrl_mpeg2_quantization *quantization;
};
+struct cedrus_h265_run {
+ const struct v4l2_ctrl_hevc_sps *sps;
+ const struct v4l2_ctrl_hevc_pps *pps;
+ const struct v4l2_ctrl_hevc_slice_params *slice_params;
+};
+
struct cedrus_run {
struct vb2_v4l2_buffer *src;
struct vb2_v4l2_buffer *dst;
@@ -74,6 +82,7 @@ struct cedrus_run {
union {
struct cedrus_h264_run h264;
struct cedrus_mpeg2_run mpeg2;
+ struct cedrus_h265_run h265;
};
};
@@ -107,9 +116,24 @@ struct cedrus_ctx {
ssize_t mv_col_buf_size;
void *pic_info_buf;
dma_addr_t pic_info_buf_dma;
+ ssize_t pic_info_buf_size;
void *neighbor_info_buf;
dma_addr_t neighbor_info_buf_dma;
+ void *deblk_buf;
+ dma_addr_t deblk_buf_dma;
+ ssize_t deblk_buf_size;
+ void *intra_pred_buf;
+ dma_addr_t intra_pred_buf_dma;
+ ssize_t intra_pred_buf_size;
} h264;
+ struct {
+ void *mv_col_buf;
+ dma_addr_t mv_col_buf_addr;
+ ssize_t mv_col_buf_size;
+ ssize_t mv_col_buf_unit_size;
+ void *neighbor_info_buf;
+ dma_addr_t neighbor_info_buf_addr;
+ } h265;
} codec;
};
@@ -155,6 +179,7 @@ struct cedrus_dev {
extern struct cedrus_dec_ops cedrus_dec_ops_mpeg2;
extern struct cedrus_dec_ops cedrus_dec_ops_h264;
+extern struct cedrus_dec_ops cedrus_dec_ops_h265;
static inline void cedrus_write(struct cedrus_dev *dev, u32 reg, u32 val)
{
@@ -179,12 +204,16 @@ static inline dma_addr_t cedrus_buf_addr(struct vb2_buffer *buf,
static inline dma_addr_t cedrus_dst_buf_addr(struct cedrus_ctx *ctx,
int index, unsigned int plane)
{
- struct vb2_buffer *buf;
+ struct vb2_buffer *buf = NULL;
+ struct vb2_queue *vq;
if (index < 0)
return 0;
- buf = ctx->fh.m2m_ctx->cap_q_ctx.q.bufs[index];
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ if (vq)
+ buf = vb2_get_buffer(vq, index);
+
return buf ? cedrus_buf_addr(buf, &ctx->dst_fmt, plane) : 0;
}
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_dec.c b/drivers/staging/media/sunxi/cedrus/cedrus_dec.c
index 56ca4c9ad01c..4a2fc33a1d79 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_dec.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_dec.c
@@ -59,6 +59,15 @@ void cedrus_device_run(void *priv)
V4L2_CID_MPEG_VIDEO_H264_SPS);
break;
+ case V4L2_PIX_FMT_HEVC_SLICE:
+ run.h265.sps = cedrus_find_control_data(ctx,
+ V4L2_CID_MPEG_VIDEO_HEVC_SPS);
+ run.h265.pps = cedrus_find_control_data(ctx,
+ V4L2_CID_MPEG_VIDEO_HEVC_PPS);
+ run.h265.slice_params = cedrus_find_control_data(ctx,
+ V4L2_CID_MPEG_VIDEO_HEVC_SLICE_PARAMS);
+ break;
+
default:
break;
}
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_h264.c b/drivers/staging/media/sunxi/cedrus/cedrus_h264.c
index d6a782703c9b..bfb4a4820a67 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_h264.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_h264.c
@@ -6,6 +6,7 @@
* Copyright (c) 2018 Bootlin
*/
+#include <linux/delay.h>
#include <linux/types.h>
#include <media/videobuf2-dma-contig.h>
@@ -38,7 +39,7 @@ struct cedrus_h264_sram_ref_pic {
#define CEDRUS_H264_FRAME_NUM 18
#define CEDRUS_NEIGHBOR_INFO_BUF_SIZE (16 * SZ_1K)
-#define CEDRUS_PIC_INFO_BUF_SIZE (128 * SZ_1K)
+#define CEDRUS_MIN_PIC_INFO_BUF_SIZE (130 * SZ_1K)
static void cedrus_h264_write_sram(struct cedrus_dev *dev,
enum cedrus_h264_sram_off off,
@@ -96,7 +97,7 @@ static void cedrus_write_frame_list(struct cedrus_ctx *ctx,
const struct v4l2_ctrl_h264_decode_params *decode = run->h264.decode_params;
const struct v4l2_ctrl_h264_slice_params *slice = run->h264.slice_params;
const struct v4l2_ctrl_h264_sps *sps = run->h264.sps;
- struct vb2_queue *cap_q = &ctx->fh.m2m_ctx->cap_q_ctx.q;
+ struct vb2_queue *cap_q;
struct cedrus_buffer *output_buf;
struct cedrus_dev *dev = ctx->dev;
unsigned long used_dpbs = 0;
@@ -104,6 +105,8 @@ static void cedrus_write_frame_list(struct cedrus_ctx *ctx,
unsigned int output = 0;
unsigned int i;
+ cap_q = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+
memset(pic_list, 0, sizeof(pic_list));
for (i = 0; i < ARRAY_SIZE(decode->dpb); i++) {
@@ -167,12 +170,14 @@ static void _cedrus_write_ref_list(struct cedrus_ctx *ctx,
enum cedrus_h264_sram_off sram)
{
const struct v4l2_ctrl_h264_decode_params *decode = run->h264.decode_params;
- struct vb2_queue *cap_q = &ctx->fh.m2m_ctx->cap_q_ctx.q;
+ struct vb2_queue *cap_q;
struct cedrus_dev *dev = ctx->dev;
u8 sram_array[CEDRUS_MAX_REF_IDX];
unsigned int i;
size_t size;
+ cap_q = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+
memset(sram_array, 0, sizeof(sram_array));
for (i = 0; i < num_ref; i++) {
@@ -240,8 +245,8 @@ static void cedrus_write_scaling_lists(struct cedrus_ctx *ctx,
sizeof(scaling->scaling_list_8x8[0]));
cedrus_h264_write_sram(dev, CEDRUS_SRAM_H264_SCALING_LIST_8x8_1,
- scaling->scaling_list_8x8[3],
- sizeof(scaling->scaling_list_8x8[3]));
+ scaling->scaling_list_8x8[1],
+ sizeof(scaling->scaling_list_8x8[1]));
cedrus_h264_write_sram(dev, CEDRUS_SRAM_H264_SCALING_LIST_4x4,
scaling->scaling_list_4x4,
@@ -289,6 +294,28 @@ static void cedrus_write_pred_weight_table(struct cedrus_ctx *ctx,
}
}
+/*
+ * It turns out that using VE_H264_VLD_OFFSET to skip bits is not reliable. In
+ * rare cases frame is not decoded correctly. However, setting offset to 0 and
+ * skipping appropriate amount of bits with flush bits trigger always works.
+ */
+static void cedrus_skip_bits(struct cedrus_dev *dev, int num)
+{
+ int count = 0;
+
+ while (count < num) {
+ int tmp = min(num - count, 32);
+
+ cedrus_write(dev, VE_H264_TRIGGER_TYPE,
+ VE_H264_TRIGGER_TYPE_FLUSH_BITS |
+ VE_H264_TRIGGER_TYPE_N_BITS(tmp));
+ while (cedrus_read(dev, VE_H264_STATUS) & VE_H264_STATUS_VLD_BUSY)
+ udelay(1);
+
+ count += tmp;
+ }
+}
+
static void cedrus_set_params(struct cedrus_ctx *ctx,
struct cedrus_run *run)
{
@@ -299,12 +326,13 @@ static void cedrus_set_params(struct cedrus_ctx *ctx,
struct vb2_buffer *src_buf = &run->src->vb2_buf;
struct cedrus_dev *dev = ctx->dev;
dma_addr_t src_buf_addr;
- u32 offset = slice->header_bit_size;
- u32 len = (slice->size * 8) - offset;
+ u32 len = slice->size * 8;
+ unsigned int pic_width_in_mbs;
+ bool mbaff_pic;
u32 reg;
cedrus_write(dev, VE_H264_VLD_LEN, len);
- cedrus_write(dev, VE_H264_VLD_OFFSET, offset);
+ cedrus_write(dev, VE_H264_VLD_OFFSET, 0);
src_buf_addr = vb2_dma_contig_plane_dma_addr(src_buf, 0);
cedrus_write(dev, VE_H264_VLD_END,
@@ -314,6 +342,20 @@ static void cedrus_set_params(struct cedrus_ctx *ctx,
VE_H264_VLD_ADDR_FIRST | VE_H264_VLD_ADDR_VALID |
VE_H264_VLD_ADDR_LAST);
+ if (ctx->src_fmt.width > 2048) {
+ cedrus_write(dev, VE_BUF_CTRL,
+ VE_BUF_CTRL_INTRAPRED_MIXED_RAM |
+ VE_BUF_CTRL_DBLK_MIXED_RAM);
+ cedrus_write(dev, VE_DBLK_DRAM_BUF_ADDR,
+ ctx->codec.h264.deblk_buf_dma);
+ cedrus_write(dev, VE_INTRAPRED_DRAM_BUF_ADDR,
+ ctx->codec.h264.intra_pred_buf_dma);
+ } else {
+ cedrus_write(dev, VE_BUF_CTRL,
+ VE_BUF_CTRL_INTRAPRED_INT_SRAM |
+ VE_BUF_CTRL_DBLK_INT_SRAM);
+ }
+
/*
* FIXME: Since the bitstream parsing is done in software, and
* in userspace, this shouldn't be needed anymore. But it
@@ -323,6 +365,8 @@ static void cedrus_set_params(struct cedrus_ctx *ctx,
cedrus_write(dev, VE_H264_TRIGGER_TYPE,
VE_H264_TRIGGER_TYPE_INIT_SWDEC);
+ cedrus_skip_bits(dev, slice->header_bit_size);
+
if (((pps->flags & V4L2_H264_PPS_FLAG_WEIGHTED_PRED) &&
(slice->slice_type == V4L2_H264_SLICE_TYPE_P ||
slice->slice_type == V4L2_H264_SLICE_TYPE_SP)) ||
@@ -370,12 +414,20 @@ static void cedrus_set_params(struct cedrus_ctx *ctx,
reg |= VE_H264_SPS_DIRECT_8X8_INFERENCE;
cedrus_write(dev, VE_H264_SPS, reg);
+ mbaff_pic = !(slice->flags & V4L2_H264_SLICE_FLAG_FIELD_PIC) &&
+ (sps->flags & V4L2_H264_SPS_FLAG_MB_ADAPTIVE_FRAME_FIELD);
+ pic_width_in_mbs = sps->pic_width_in_mbs_minus1 + 1;
+
// slice parameters
reg = 0;
+ reg |= ((slice->first_mb_in_slice % pic_width_in_mbs) & 0xff) << 24;
+ reg |= (((slice->first_mb_in_slice / pic_width_in_mbs) *
+ (mbaff_pic + 1)) & 0xff) << 16;
reg |= decode->nal_ref_idc ? BIT(12) : 0;
reg |= (slice->slice_type & 0xf) << 8;
reg |= slice->cabac_init_idc & 0x3;
- reg |= VE_H264_SHS_FIRST_SLICE_IN_PIC;
+ if (ctx->fh.m2m_ctx->new_frame)
+ reg |= VE_H264_SHS_FIRST_SLICE_IN_PIC;
if (slice->flags & V4L2_H264_SLICE_FLAG_FIELD_PIC)
reg |= VE_H264_SHS_FIELD_PIC;
if (slice->flags & V4L2_H264_SLICE_FLAG_BOTTOM_FIELD)
@@ -447,7 +499,7 @@ static void cedrus_h264_setup(struct cedrus_ctx *ctx,
{
struct cedrus_dev *dev = ctx->dev;
- cedrus_engine_enable(dev, CEDRUS_CODEC_H264);
+ cedrus_engine_enable(ctx, CEDRUS_CODEC_H264);
cedrus_write(dev, VE_H264_SDROT_CTRL, 0);
cedrus_write(dev, VE_H264_EXTRA_BUFFER1,
@@ -464,18 +516,30 @@ static void cedrus_h264_setup(struct cedrus_ctx *ctx,
static int cedrus_h264_start(struct cedrus_ctx *ctx)
{
struct cedrus_dev *dev = ctx->dev;
+ unsigned int pic_info_size;
unsigned int field_size;
unsigned int mv_col_size;
int ret;
+ /* Formula for picture buffer size is taken from CedarX source. */
+
+ if (ctx->src_fmt.width > 2048)
+ pic_info_size = CEDRUS_H264_FRAME_NUM * 0x4000;
+ else
+ pic_info_size = CEDRUS_H264_FRAME_NUM * 0x1000;
+
/*
- * FIXME: It seems that the H6 cedarX code is using a formula
- * here based on the size of the frame, while all the older
- * code is using a fixed size, so that might need to be
- * changed at some point.
+ * FIXME: If V4L2_H264_SPS_FLAG_FRAME_MBS_ONLY is set,
+ * there is no need to multiply by 2.
*/
+ pic_info_size += ctx->src_fmt.height * 2 * 64;
+
+ if (pic_info_size < CEDRUS_MIN_PIC_INFO_BUF_SIZE)
+ pic_info_size = CEDRUS_MIN_PIC_INFO_BUF_SIZE;
+
+ ctx->codec.h264.pic_info_buf_size = pic_info_size;
ctx->codec.h264.pic_info_buf =
- dma_alloc_coherent(dev->dev, CEDRUS_PIC_INFO_BUF_SIZE,
+ dma_alloc_coherent(dev->dev, ctx->codec.h264.pic_info_buf_size,
&ctx->codec.h264.pic_info_buf_dma,
GFP_KERNEL);
if (!ctx->codec.h264.pic_info_buf)
@@ -528,15 +592,56 @@ static int cedrus_h264_start(struct cedrus_ctx *ctx)
goto err_neighbor_buf;
}
+ if (ctx->src_fmt.width > 2048) {
+ /*
+ * Formulas for deblock and intra prediction buffer sizes
+ * are taken from CedarX source.
+ */
+
+ ctx->codec.h264.deblk_buf_size =
+ ALIGN(ctx->src_fmt.width, 32) * 12;
+ ctx->codec.h264.deblk_buf =
+ dma_alloc_coherent(dev->dev,
+ ctx->codec.h264.deblk_buf_size,
+ &ctx->codec.h264.deblk_buf_dma,
+ GFP_KERNEL);
+ if (!ctx->codec.h264.deblk_buf) {
+ ret = -ENOMEM;
+ goto err_mv_col_buf;
+ }
+
+ ctx->codec.h264.intra_pred_buf_size =
+ ALIGN(ctx->src_fmt.width, 64) * 5;
+ ctx->codec.h264.intra_pred_buf =
+ dma_alloc_coherent(dev->dev,
+ ctx->codec.h264.intra_pred_buf_size,
+ &ctx->codec.h264.intra_pred_buf_dma,
+ GFP_KERNEL);
+ if (!ctx->codec.h264.intra_pred_buf) {
+ ret = -ENOMEM;
+ goto err_deblk_buf;
+ }
+ }
+
return 0;
+err_deblk_buf:
+ dma_free_coherent(dev->dev, ctx->codec.h264.deblk_buf_size,
+ ctx->codec.h264.deblk_buf,
+ ctx->codec.h264.deblk_buf_dma);
+
+err_mv_col_buf:
+ dma_free_coherent(dev->dev, ctx->codec.h264.mv_col_buf_size,
+ ctx->codec.h264.mv_col_buf,
+ ctx->codec.h264.mv_col_buf_dma);
+
err_neighbor_buf:
dma_free_coherent(dev->dev, CEDRUS_NEIGHBOR_INFO_BUF_SIZE,
ctx->codec.h264.neighbor_info_buf,
ctx->codec.h264.neighbor_info_buf_dma);
err_pic_buf:
- dma_free_coherent(dev->dev, CEDRUS_PIC_INFO_BUF_SIZE,
+ dma_free_coherent(dev->dev, ctx->codec.h264.pic_info_buf_size,
ctx->codec.h264.pic_info_buf,
ctx->codec.h264.pic_info_buf_dma);
return ret;
@@ -552,9 +657,17 @@ static void cedrus_h264_stop(struct cedrus_ctx *ctx)
dma_free_coherent(dev->dev, CEDRUS_NEIGHBOR_INFO_BUF_SIZE,
ctx->codec.h264.neighbor_info_buf,
ctx->codec.h264.neighbor_info_buf_dma);
- dma_free_coherent(dev->dev, CEDRUS_PIC_INFO_BUF_SIZE,
+ dma_free_coherent(dev->dev, ctx->codec.h264.pic_info_buf_size,
ctx->codec.h264.pic_info_buf,
ctx->codec.h264.pic_info_buf_dma);
+ if (ctx->codec.h264.deblk_buf_size)
+ dma_free_coherent(dev->dev, ctx->codec.h264.deblk_buf_size,
+ ctx->codec.h264.deblk_buf,
+ ctx->codec.h264.deblk_buf_dma);
+ if (ctx->codec.h264.intra_pred_buf_size)
+ dma_free_coherent(dev->dev, ctx->codec.h264.intra_pred_buf_size,
+ ctx->codec.h264.intra_pred_buf,
+ ctx->codec.h264.intra_pred_buf_dma);
}
static void cedrus_h264_trigger(struct cedrus_ctx *ctx)
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_h265.c b/drivers/staging/media/sunxi/cedrus/cedrus_h265.c
new file mode 100644
index 000000000000..6945dc74e1d7
--- /dev/null
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_h265.c
@@ -0,0 +1,616 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Cedrus VPU driver
+ *
+ * Copyright (C) 2013 Jens Kuske <jenskuske@gmail.com>
+ * Copyright (C) 2018 Paul Kocialkowski <paul.kocialkowski@bootlin.com>
+ * Copyright (C) 2018 Bootlin
+ */
+
+#include <linux/types.h>
+
+#include <media/videobuf2-dma-contig.h>
+
+#include "cedrus.h"
+#include "cedrus_hw.h"
+#include "cedrus_regs.h"
+
+/*
+ * These are the sizes for side buffers required by the hardware for storing
+ * internal decoding metadata. They match the values used by the early BSP
+ * implementations, that were initially exposed in libvdpau-sunxi.
+ * Subsequent BSP implementations seem to double the neighbor info buffer size
+ * for the H6 SoC, which may be related to 10 bit H265 support.
+ */
+#define CEDRUS_H265_NEIGHBOR_INFO_BUF_SIZE (397 * SZ_1K)
+#define CEDRUS_H265_ENTRY_POINTS_BUF_SIZE (4 * SZ_1K)
+#define CEDRUS_H265_MV_COL_BUF_UNIT_CTB_SIZE 160
+
+struct cedrus_h265_sram_frame_info {
+ __le32 top_pic_order_cnt;
+ __le32 bottom_pic_order_cnt;
+ __le32 top_mv_col_buf_addr;
+ __le32 bottom_mv_col_buf_addr;
+ __le32 luma_addr;
+ __le32 chroma_addr;
+} __packed;
+
+struct cedrus_h265_sram_pred_weight {
+ __s8 delta_weight;
+ __s8 offset;
+} __packed;
+
+static enum cedrus_irq_status cedrus_h265_irq_status(struct cedrus_ctx *ctx)
+{
+ struct cedrus_dev *dev = ctx->dev;
+ u32 reg;
+
+ reg = cedrus_read(dev, VE_DEC_H265_STATUS);
+ reg &= VE_DEC_H265_STATUS_CHECK_MASK;
+
+ if (reg & VE_DEC_H265_STATUS_CHECK_ERROR ||
+ !(reg & VE_DEC_H265_STATUS_SUCCESS))
+ return CEDRUS_IRQ_ERROR;
+
+ return CEDRUS_IRQ_OK;
+}
+
+static void cedrus_h265_irq_clear(struct cedrus_ctx *ctx)
+{
+ struct cedrus_dev *dev = ctx->dev;
+
+ cedrus_write(dev, VE_DEC_H265_STATUS, VE_DEC_H265_STATUS_CHECK_MASK);
+}
+
+static void cedrus_h265_irq_disable(struct cedrus_ctx *ctx)
+{
+ struct cedrus_dev *dev = ctx->dev;
+ u32 reg = cedrus_read(dev, VE_DEC_H265_CTRL);
+
+ reg &= ~VE_DEC_H265_CTRL_IRQ_MASK;
+
+ cedrus_write(dev, VE_DEC_H265_CTRL, reg);
+}
+
+static void cedrus_h265_sram_write_offset(struct cedrus_dev *dev, u32 offset)
+{
+ cedrus_write(dev, VE_DEC_H265_SRAM_OFFSET, offset);
+}
+
+static void cedrus_h265_sram_write_data(struct cedrus_dev *dev, void *data,
+ unsigned int size)
+{
+ u32 *word = data;
+
+ while (size >= sizeof(u32)) {
+ cedrus_write(dev, VE_DEC_H265_SRAM_DATA, *word++);
+ size -= sizeof(u32);
+ }
+}
+
+static inline dma_addr_t
+cedrus_h265_frame_info_mv_col_buf_addr(struct cedrus_ctx *ctx,
+ unsigned int index, unsigned int field)
+{
+ return ctx->codec.h265.mv_col_buf_addr + index *
+ ctx->codec.h265.mv_col_buf_unit_size +
+ field * ctx->codec.h265.mv_col_buf_unit_size / 2;
+}
+
+static void cedrus_h265_frame_info_write_single(struct cedrus_ctx *ctx,
+ unsigned int index,
+ bool field_pic,
+ u32 pic_order_cnt[],
+ int buffer_index)
+{
+ struct cedrus_dev *dev = ctx->dev;
+ dma_addr_t dst_luma_addr = cedrus_dst_buf_addr(ctx, buffer_index, 0);
+ dma_addr_t dst_chroma_addr = cedrus_dst_buf_addr(ctx, buffer_index, 1);
+ dma_addr_t mv_col_buf_addr[2] = {
+ cedrus_h265_frame_info_mv_col_buf_addr(ctx, buffer_index, 0),
+ cedrus_h265_frame_info_mv_col_buf_addr(ctx, buffer_index,
+ field_pic ? 1 : 0)
+ };
+ u32 offset = VE_DEC_H265_SRAM_OFFSET_FRAME_INFO +
+ VE_DEC_H265_SRAM_OFFSET_FRAME_INFO_UNIT * index;
+ struct cedrus_h265_sram_frame_info frame_info = {
+ .top_pic_order_cnt = cpu_to_le32(pic_order_cnt[0]),
+ .bottom_pic_order_cnt = cpu_to_le32(field_pic ?
+ pic_order_cnt[1] :
+ pic_order_cnt[0]),
+ .top_mv_col_buf_addr =
+ cpu_to_le32(VE_DEC_H265_SRAM_DATA_ADDR_BASE(mv_col_buf_addr[0])),
+ .bottom_mv_col_buf_addr = cpu_to_le32(field_pic ?
+ VE_DEC_H265_SRAM_DATA_ADDR_BASE(mv_col_buf_addr[1]) :
+ VE_DEC_H265_SRAM_DATA_ADDR_BASE(mv_col_buf_addr[0])),
+ .luma_addr = cpu_to_le32(VE_DEC_H265_SRAM_DATA_ADDR_BASE(dst_luma_addr)),
+ .chroma_addr = cpu_to_le32(VE_DEC_H265_SRAM_DATA_ADDR_BASE(dst_chroma_addr)),
+ };
+
+ cedrus_h265_sram_write_offset(dev, offset);
+ cedrus_h265_sram_write_data(dev, &frame_info, sizeof(frame_info));
+}
+
+static void cedrus_h265_frame_info_write_dpb(struct cedrus_ctx *ctx,
+ const struct v4l2_hevc_dpb_entry *dpb,
+ u8 num_active_dpb_entries)
+{
+ struct vb2_queue *vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ unsigned int i;
+
+ for (i = 0; i < num_active_dpb_entries; i++) {
+ int buffer_index = vb2_find_timestamp(vq, dpb[i].timestamp, 0);
+ u32 pic_order_cnt[2] = {
+ dpb[i].pic_order_cnt[0],
+ dpb[i].pic_order_cnt[1]
+ };
+
+ cedrus_h265_frame_info_write_single(ctx, i, dpb[i].field_pic,
+ pic_order_cnt,
+ buffer_index);
+ }
+}
+
+static void cedrus_h265_ref_pic_list_write(struct cedrus_dev *dev,
+ const struct v4l2_hevc_dpb_entry *dpb,
+ const u8 list[],
+ u8 num_ref_idx_active,
+ u32 sram_offset)
+{
+ unsigned int i;
+ u32 word = 0;
+
+ cedrus_h265_sram_write_offset(dev, sram_offset);
+
+ for (i = 0; i < num_ref_idx_active; i++) {
+ unsigned int shift = (i % 4) * 8;
+ unsigned int index = list[i];
+ u8 value = list[i];
+
+ if (dpb[index].rps == V4L2_HEVC_DPB_ENTRY_RPS_LT_CURR)
+ value |= VE_DEC_H265_SRAM_REF_PIC_LIST_LT_REF;
+
+ /* Each SRAM word gathers up to 4 references. */
+ word |= value << shift;
+
+ /* Write the word to SRAM and clear it for the next batch. */
+ if ((i % 4) == 3 || i == (num_ref_idx_active - 1)) {
+ cedrus_h265_sram_write_data(dev, &word, sizeof(word));
+ word = 0;
+ }
+ }
+}
+
+static void cedrus_h265_pred_weight_write(struct cedrus_dev *dev,
+ const s8 delta_luma_weight[],
+ const s8 luma_offset[],
+ const s8 delta_chroma_weight[][2],
+ const s8 chroma_offset[][2],
+ u8 num_ref_idx_active,
+ u32 sram_luma_offset,
+ u32 sram_chroma_offset)
+{
+ struct cedrus_h265_sram_pred_weight pred_weight[2] = { { 0 } };
+ unsigned int i, j;
+
+ cedrus_h265_sram_write_offset(dev, sram_luma_offset);
+
+ for (i = 0; i < num_ref_idx_active; i++) {
+ unsigned int index = i % 2;
+
+ pred_weight[index].delta_weight = delta_luma_weight[i];
+ pred_weight[index].offset = luma_offset[i];
+
+ if (index == 1 || i == (num_ref_idx_active - 1))
+ cedrus_h265_sram_write_data(dev, (u32 *)&pred_weight,
+ sizeof(pred_weight));
+ }
+
+ cedrus_h265_sram_write_offset(dev, sram_chroma_offset);
+
+ for (i = 0; i < num_ref_idx_active; i++) {
+ for (j = 0; j < 2; j++) {
+ pred_weight[j].delta_weight = delta_chroma_weight[i][j];
+ pred_weight[j].offset = chroma_offset[i][j];
+ }
+
+ cedrus_h265_sram_write_data(dev, &pred_weight,
+ sizeof(pred_weight));
+ }
+}
+
+static void cedrus_h265_setup(struct cedrus_ctx *ctx,
+ struct cedrus_run *run)
+{
+ struct cedrus_dev *dev = ctx->dev;
+ const struct v4l2_ctrl_hevc_sps *sps;
+ const struct v4l2_ctrl_hevc_pps *pps;
+ const struct v4l2_ctrl_hevc_slice_params *slice_params;
+ const struct v4l2_hevc_pred_weight_table *pred_weight_table;
+ dma_addr_t src_buf_addr;
+ dma_addr_t src_buf_end_addr;
+ u32 chroma_log2_weight_denom;
+ u32 output_pic_list_index;
+ u32 pic_order_cnt[2];
+ u32 reg;
+
+ sps = run->h265.sps;
+ pps = run->h265.pps;
+ slice_params = run->h265.slice_params;
+ pred_weight_table = &slice_params->pred_weight_table;
+
+ /* MV column buffer size and allocation. */
+ if (!ctx->codec.h265.mv_col_buf_size) {
+ unsigned int num_buffers =
+ run->dst->vb2_buf.vb2_queue->num_buffers;
+ unsigned int log2_max_luma_coding_block_size =
+ sps->log2_min_luma_coding_block_size_minus3 + 3 +
+ sps->log2_diff_max_min_luma_coding_block_size;
+ unsigned int ctb_size_luma =
+ 1UL << log2_max_luma_coding_block_size;
+
+ /*
+ * Each CTB requires a MV col buffer with a specific unit size.
+ * Since the address is given with missing lsb bits, 1 KiB is
+ * added to each buffer to ensure proper alignment.
+ */
+ ctx->codec.h265.mv_col_buf_unit_size =
+ DIV_ROUND_UP(ctx->src_fmt.width, ctb_size_luma) *
+ DIV_ROUND_UP(ctx->src_fmt.height, ctb_size_luma) *
+ CEDRUS_H265_MV_COL_BUF_UNIT_CTB_SIZE + SZ_1K;
+
+ ctx->codec.h265.mv_col_buf_size = num_buffers *
+ ctx->codec.h265.mv_col_buf_unit_size;
+
+ ctx->codec.h265.mv_col_buf =
+ dma_alloc_coherent(dev->dev,
+ ctx->codec.h265.mv_col_buf_size,
+ &ctx->codec.h265.mv_col_buf_addr,
+ GFP_KERNEL);
+ if (!ctx->codec.h265.mv_col_buf) {
+ ctx->codec.h265.mv_col_buf_size = 0;
+ // TODO: Abort the process here.
+ return;
+ }
+ }
+
+ /* Activate H265 engine. */
+ cedrus_engine_enable(ctx, CEDRUS_CODEC_H265);
+
+ /* Source offset and length in bits. */
+
+ reg = slice_params->data_bit_offset;
+ cedrus_write(dev, VE_DEC_H265_BITS_OFFSET, reg);
+
+ reg = slice_params->bit_size - slice_params->data_bit_offset;
+ cedrus_write(dev, VE_DEC_H265_BITS_LEN, reg);
+
+ /* Source beginning and end addresses. */
+
+ src_buf_addr = vb2_dma_contig_plane_dma_addr(&run->src->vb2_buf, 0);
+
+ reg = VE_DEC_H265_BITS_ADDR_BASE(src_buf_addr);
+ reg |= VE_DEC_H265_BITS_ADDR_VALID_SLICE_DATA;
+ reg |= VE_DEC_H265_BITS_ADDR_LAST_SLICE_DATA;
+ reg |= VE_DEC_H265_BITS_ADDR_FIRST_SLICE_DATA;
+
+ cedrus_write(dev, VE_DEC_H265_BITS_ADDR, reg);
+
+ src_buf_end_addr = src_buf_addr +
+ DIV_ROUND_UP(slice_params->bit_size, 8);
+
+ reg = VE_DEC_H265_BITS_END_ADDR_BASE(src_buf_end_addr);
+ cedrus_write(dev, VE_DEC_H265_BITS_END_ADDR, reg);
+
+ /* Coding tree block address: start at the beginning. */
+ reg = VE_DEC_H265_DEC_CTB_ADDR_X(0) | VE_DEC_H265_DEC_CTB_ADDR_Y(0);
+ cedrus_write(dev, VE_DEC_H265_DEC_CTB_ADDR, reg);
+
+ cedrus_write(dev, VE_DEC_H265_TILE_START_CTB, 0);
+ cedrus_write(dev, VE_DEC_H265_TILE_END_CTB, 0);
+
+ /* Clear the number of correctly-decoded coding tree blocks. */
+ cedrus_write(dev, VE_DEC_H265_DEC_CTB_NUM, 0);
+
+ /* Initialize bitstream access. */
+ cedrus_write(dev, VE_DEC_H265_TRIGGER, VE_DEC_H265_TRIGGER_INIT_SWDEC);
+
+ /* Bitstream parameters. */
+
+ reg = VE_DEC_H265_DEC_NAL_HDR_NAL_UNIT_TYPE(slice_params->nal_unit_type) |
+ VE_DEC_H265_DEC_NAL_HDR_NUH_TEMPORAL_ID_PLUS1(slice_params->nuh_temporal_id_plus1);
+
+ cedrus_write(dev, VE_DEC_H265_DEC_NAL_HDR, reg);
+
+ /* SPS. */
+
+ reg = VE_DEC_H265_DEC_SPS_HDR_MAX_TRANSFORM_HIERARCHY_DEPTH_INTRA(sps->max_transform_hierarchy_depth_intra) |
+ VE_DEC_H265_DEC_SPS_HDR_MAX_TRANSFORM_HIERARCHY_DEPTH_INTER(sps->max_transform_hierarchy_depth_inter) |
+ VE_DEC_H265_DEC_SPS_HDR_LOG2_DIFF_MAX_MIN_TRANSFORM_BLOCK_SIZE(sps->log2_diff_max_min_luma_transform_block_size) |
+ VE_DEC_H265_DEC_SPS_HDR_LOG2_MIN_TRANSFORM_BLOCK_SIZE_MINUS2(sps->log2_min_luma_transform_block_size_minus2) |
+ VE_DEC_H265_DEC_SPS_HDR_LOG2_DIFF_MAX_MIN_LUMA_CODING_BLOCK_SIZE(sps->log2_diff_max_min_luma_coding_block_size) |
+ VE_DEC_H265_DEC_SPS_HDR_LOG2_MIN_LUMA_CODING_BLOCK_SIZE_MINUS3(sps->log2_min_luma_coding_block_size_minus3) |
+ VE_DEC_H265_DEC_SPS_HDR_BIT_DEPTH_CHROMA_MINUS8(sps->bit_depth_chroma_minus8) |
+ VE_DEC_H265_DEC_SPS_HDR_CHROMA_FORMAT_IDC(sps->chroma_format_idc);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_SPS_HDR_FLAG_STRONG_INTRA_SMOOTHING_ENABLE,
+ V4L2_HEVC_SPS_FLAG_STRONG_INTRA_SMOOTHING_ENABLED,
+ sps->flags);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_SPS_HDR_FLAG_SPS_TEMPORAL_MVP_ENABLED,
+ V4L2_HEVC_SPS_FLAG_SPS_TEMPORAL_MVP_ENABLED,
+ sps->flags);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_SPS_HDR_FLAG_SAMPLE_ADAPTIVE_OFFSET_ENABLED,
+ V4L2_HEVC_SPS_FLAG_SAMPLE_ADAPTIVE_OFFSET,
+ sps->flags);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_SPS_HDR_FLAG_AMP_ENABLED,
+ V4L2_HEVC_SPS_FLAG_AMP_ENABLED, sps->flags);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_SPS_HDR_FLAG_SEPARATE_COLOUR_PLANE,
+ V4L2_HEVC_SPS_FLAG_SEPARATE_COLOUR_PLANE,
+ sps->flags);
+
+ cedrus_write(dev, VE_DEC_H265_DEC_SPS_HDR, reg);
+
+ reg = VE_DEC_H265_DEC_PCM_CTRL_LOG2_DIFF_MAX_MIN_PCM_LUMA_CODING_BLOCK_SIZE(sps->log2_diff_max_min_pcm_luma_coding_block_size) |
+ VE_DEC_H265_DEC_PCM_CTRL_LOG2_MIN_PCM_LUMA_CODING_BLOCK_SIZE_MINUS3(sps->log2_min_pcm_luma_coding_block_size_minus3) |
+ VE_DEC_H265_DEC_PCM_CTRL_PCM_SAMPLE_BIT_DEPTH_CHROMA_MINUS1(sps->pcm_sample_bit_depth_chroma_minus1) |
+ VE_DEC_H265_DEC_PCM_CTRL_PCM_SAMPLE_BIT_DEPTH_LUMA_MINUS1(sps->pcm_sample_bit_depth_luma_minus1);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_PCM_CTRL_FLAG_PCM_ENABLED,
+ V4L2_HEVC_SPS_FLAG_PCM_ENABLED, sps->flags);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_PCM_CTRL_FLAG_PCM_LOOP_FILTER_DISABLED,
+ V4L2_HEVC_SPS_FLAG_PCM_LOOP_FILTER_DISABLED,
+ sps->flags);
+
+ cedrus_write(dev, VE_DEC_H265_DEC_PCM_CTRL, reg);
+
+ /* PPS. */
+
+ reg = VE_DEC_H265_DEC_PPS_CTRL0_PPS_CR_QP_OFFSET(pps->pps_cr_qp_offset) |
+ VE_DEC_H265_DEC_PPS_CTRL0_PPS_CB_QP_OFFSET(pps->pps_cb_qp_offset) |
+ VE_DEC_H265_DEC_PPS_CTRL0_INIT_QP_MINUS26(pps->init_qp_minus26) |
+ VE_DEC_H265_DEC_PPS_CTRL0_DIFF_CU_QP_DELTA_DEPTH(pps->diff_cu_qp_delta_depth);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_PPS_CTRL0_FLAG_CU_QP_DELTA_ENABLED,
+ V4L2_HEVC_PPS_FLAG_CU_QP_DELTA_ENABLED,
+ pps->flags);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_PPS_CTRL0_FLAG_TRANSFORM_SKIP_ENABLED,
+ V4L2_HEVC_PPS_FLAG_TRANSFORM_SKIP_ENABLED,
+ pps->flags);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_PPS_CTRL0_FLAG_CONSTRAINED_INTRA_PRED,
+ V4L2_HEVC_PPS_FLAG_CONSTRAINED_INTRA_PRED,
+ pps->flags);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_PPS_CTRL0_FLAG_SIGN_DATA_HIDING_ENABLED,
+ V4L2_HEVC_PPS_FLAG_SIGN_DATA_HIDING_ENABLED,
+ pps->flags);
+
+ cedrus_write(dev, VE_DEC_H265_DEC_PPS_CTRL0, reg);
+
+ reg = VE_DEC_H265_DEC_PPS_CTRL1_LOG2_PARALLEL_MERGE_LEVEL_MINUS2(pps->log2_parallel_merge_level_minus2);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_PPS_CTRL1_FLAG_PPS_LOOP_FILTER_ACROSS_SLICES_ENABLED,
+ V4L2_HEVC_PPS_FLAG_PPS_LOOP_FILTER_ACROSS_SLICES_ENABLED,
+ pps->flags);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_PPS_CTRL1_FLAG_LOOP_FILTER_ACROSS_TILES_ENABLED,
+ V4L2_HEVC_PPS_FLAG_LOOP_FILTER_ACROSS_TILES_ENABLED,
+ pps->flags);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_PPS_CTRL1_FLAG_ENTROPY_CODING_SYNC_ENABLED,
+ V4L2_HEVC_PPS_FLAG_ENTROPY_CODING_SYNC_ENABLED,
+ pps->flags);
+
+ /* TODO: VE_DEC_H265_DEC_PPS_CTRL1_FLAG_TILES_ENABLED */
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_PPS_CTRL1_FLAG_TRANSQUANT_BYPASS_ENABLED,
+ V4L2_HEVC_PPS_FLAG_TRANSQUANT_BYPASS_ENABLED,
+ pps->flags);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_PPS_CTRL1_FLAG_WEIGHTED_BIPRED,
+ V4L2_HEVC_PPS_FLAG_WEIGHTED_BIPRED, pps->flags);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_PPS_CTRL1_FLAG_WEIGHTED_PRED,
+ V4L2_HEVC_PPS_FLAG_WEIGHTED_PRED, pps->flags);
+
+ cedrus_write(dev, VE_DEC_H265_DEC_PPS_CTRL1, reg);
+
+ /* Slice Parameters. */
+
+ reg = VE_DEC_H265_DEC_SLICE_HDR_INFO0_PICTURE_TYPE(slice_params->pic_struct) |
+ VE_DEC_H265_DEC_SLICE_HDR_INFO0_FIVE_MINUS_MAX_NUM_MERGE_CAND(slice_params->five_minus_max_num_merge_cand) |
+ VE_DEC_H265_DEC_SLICE_HDR_INFO0_NUM_REF_IDX_L1_ACTIVE_MINUS1(slice_params->num_ref_idx_l1_active_minus1) |
+ VE_DEC_H265_DEC_SLICE_HDR_INFO0_NUM_REF_IDX_L0_ACTIVE_MINUS1(slice_params->num_ref_idx_l0_active_minus1) |
+ VE_DEC_H265_DEC_SLICE_HDR_INFO0_COLLOCATED_REF_IDX(slice_params->collocated_ref_idx) |
+ VE_DEC_H265_DEC_SLICE_HDR_INFO0_COLOUR_PLANE_ID(slice_params->colour_plane_id) |
+ VE_DEC_H265_DEC_SLICE_HDR_INFO0_SLICE_TYPE(slice_params->slice_type);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_SLICE_HDR_INFO0_FLAG_COLLOCATED_FROM_L0,
+ V4L2_HEVC_SLICE_PARAMS_FLAG_COLLOCATED_FROM_L0,
+ slice_params->flags);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_SLICE_HDR_INFO0_FLAG_CABAC_INIT,
+ V4L2_HEVC_SLICE_PARAMS_FLAG_CABAC_INIT,
+ slice_params->flags);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_SLICE_HDR_INFO0_FLAG_MVD_L1_ZERO,
+ V4L2_HEVC_SLICE_PARAMS_FLAG_MVD_L1_ZERO,
+ slice_params->flags);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_SLICE_HDR_INFO0_FLAG_SLICE_SAO_CHROMA,
+ V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_SAO_CHROMA,
+ slice_params->flags);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_SLICE_HDR_INFO0_FLAG_SLICE_SAO_LUMA,
+ V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_SAO_LUMA,
+ slice_params->flags);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_SLICE_HDR_INFO0_FLAG_SLICE_TEMPORAL_MVP_ENABLE,
+ V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_TEMPORAL_MVP_ENABLED,
+ slice_params->flags);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_SLICE_HDR_INFO0_FLAG_DEPENDENT_SLICE_SEGMENT,
+ V4L2_HEVC_PPS_FLAG_DEPENDENT_SLICE_SEGMENT,
+ pps->flags);
+
+ /* FIXME: For multi-slice support. */
+ reg |= VE_DEC_H265_DEC_SLICE_HDR_INFO0_FLAG_FIRST_SLICE_SEGMENT_IN_PIC;
+
+ cedrus_write(dev, VE_DEC_H265_DEC_SLICE_HDR_INFO0, reg);
+
+ reg = VE_DEC_H265_DEC_SLICE_HDR_INFO1_SLICE_TC_OFFSET_DIV2(slice_params->slice_tc_offset_div2) |
+ VE_DEC_H265_DEC_SLICE_HDR_INFO1_SLICE_BETA_OFFSET_DIV2(slice_params->slice_beta_offset_div2) |
+ VE_DEC_H265_DEC_SLICE_HDR_INFO1_SLICE_POC_BIGEST_IN_RPS_ST(slice_params->num_rps_poc_st_curr_after == 0) |
+ VE_DEC_H265_DEC_SLICE_HDR_INFO1_SLICE_CR_QP_OFFSET(slice_params->slice_cr_qp_offset) |
+ VE_DEC_H265_DEC_SLICE_HDR_INFO1_SLICE_CB_QP_OFFSET(slice_params->slice_cb_qp_offset) |
+ VE_DEC_H265_DEC_SLICE_HDR_INFO1_SLICE_QP_DELTA(slice_params->slice_qp_delta);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_SLICE_HDR_INFO1_FLAG_SLICE_DEBLOCKING_FILTER_DISABLED,
+ V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_DEBLOCKING_FILTER_DISABLED,
+ slice_params->flags);
+
+ reg |= VE_DEC_H265_FLAG(VE_DEC_H265_DEC_SLICE_HDR_INFO1_FLAG_SLICE_LOOP_FILTER_ACROSS_SLICES_ENABLED,
+ V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_LOOP_FILTER_ACROSS_SLICES_ENABLED,
+ slice_params->flags);
+
+ cedrus_write(dev, VE_DEC_H265_DEC_SLICE_HDR_INFO1, reg);
+
+ chroma_log2_weight_denom = pred_weight_table->luma_log2_weight_denom +
+ pred_weight_table->delta_chroma_log2_weight_denom;
+ reg = VE_DEC_H265_DEC_SLICE_HDR_INFO2_NUM_ENTRY_POINT_OFFSETS(0) |
+ VE_DEC_H265_DEC_SLICE_HDR_INFO2_CHROMA_LOG2_WEIGHT_DENOM(chroma_log2_weight_denom) |
+ VE_DEC_H265_DEC_SLICE_HDR_INFO2_LUMA_LOG2_WEIGHT_DENOM(pred_weight_table->luma_log2_weight_denom);
+
+ cedrus_write(dev, VE_DEC_H265_DEC_SLICE_HDR_INFO2, reg);
+
+ /* Decoded picture size. */
+
+ reg = VE_DEC_H265_DEC_PIC_SIZE_WIDTH(ctx->src_fmt.width) |
+ VE_DEC_H265_DEC_PIC_SIZE_HEIGHT(ctx->src_fmt.height);
+
+ cedrus_write(dev, VE_DEC_H265_DEC_PIC_SIZE, reg);
+
+ /* Scaling list. */
+
+ reg = VE_DEC_H265_SCALING_LIST_CTRL0_DEFAULT;
+ cedrus_write(dev, VE_DEC_H265_SCALING_LIST_CTRL0, reg);
+
+ /* Neightbor information address. */
+ reg = VE_DEC_H265_NEIGHBOR_INFO_ADDR_BASE(ctx->codec.h265.neighbor_info_buf_addr);
+ cedrus_write(dev, VE_DEC_H265_NEIGHBOR_INFO_ADDR, reg);
+
+ /* Write decoded picture buffer in pic list. */
+ cedrus_h265_frame_info_write_dpb(ctx, slice_params->dpb,
+ slice_params->num_active_dpb_entries);
+
+ /* Output frame. */
+
+ output_pic_list_index = V4L2_HEVC_DPB_ENTRIES_NUM_MAX;
+ pic_order_cnt[0] = slice_params->slice_pic_order_cnt;
+ pic_order_cnt[1] = slice_params->slice_pic_order_cnt;
+
+ cedrus_h265_frame_info_write_single(ctx, output_pic_list_index,
+ slice_params->pic_struct != 0,
+ pic_order_cnt,
+ run->dst->vb2_buf.index);
+
+ cedrus_write(dev, VE_DEC_H265_OUTPUT_FRAME_IDX, output_pic_list_index);
+
+ /* Reference picture list 0 (for P/B frames). */
+ if (slice_params->slice_type != V4L2_HEVC_SLICE_TYPE_I) {
+ cedrus_h265_ref_pic_list_write(dev, slice_params->dpb,
+ slice_params->ref_idx_l0,
+ slice_params->num_ref_idx_l0_active_minus1 + 1,
+ VE_DEC_H265_SRAM_OFFSET_REF_PIC_LIST0);
+
+ if ((pps->flags & V4L2_HEVC_PPS_FLAG_WEIGHTED_PRED) ||
+ (pps->flags & V4L2_HEVC_PPS_FLAG_WEIGHTED_BIPRED))
+ cedrus_h265_pred_weight_write(dev,
+ pred_weight_table->delta_luma_weight_l0,
+ pred_weight_table->luma_offset_l0,
+ pred_weight_table->delta_chroma_weight_l0,
+ pred_weight_table->chroma_offset_l0,
+ slice_params->num_ref_idx_l0_active_minus1 + 1,
+ VE_DEC_H265_SRAM_OFFSET_PRED_WEIGHT_LUMA_L0,
+ VE_DEC_H265_SRAM_OFFSET_PRED_WEIGHT_CHROMA_L0);
+ }
+
+ /* Reference picture list 1 (for B frames). */
+ if (slice_params->slice_type == V4L2_HEVC_SLICE_TYPE_B) {
+ cedrus_h265_ref_pic_list_write(dev, slice_params->dpb,
+ slice_params->ref_idx_l1,
+ slice_params->num_ref_idx_l1_active_minus1 + 1,
+ VE_DEC_H265_SRAM_OFFSET_REF_PIC_LIST1);
+
+ if (pps->flags & V4L2_HEVC_PPS_FLAG_WEIGHTED_BIPRED)
+ cedrus_h265_pred_weight_write(dev,
+ pred_weight_table->delta_luma_weight_l1,
+ pred_weight_table->luma_offset_l1,
+ pred_weight_table->delta_chroma_weight_l1,
+ pred_weight_table->chroma_offset_l1,
+ slice_params->num_ref_idx_l1_active_minus1 + 1,
+ VE_DEC_H265_SRAM_OFFSET_PRED_WEIGHT_LUMA_L1,
+ VE_DEC_H265_SRAM_OFFSET_PRED_WEIGHT_CHROMA_L1);
+ }
+
+ /* Enable appropriate interruptions. */
+ cedrus_write(dev, VE_DEC_H265_CTRL, VE_DEC_H265_CTRL_IRQ_MASK);
+}
+
+static int cedrus_h265_start(struct cedrus_ctx *ctx)
+{
+ struct cedrus_dev *dev = ctx->dev;
+
+ /* The buffer size is calculated at setup time. */
+ ctx->codec.h265.mv_col_buf_size = 0;
+
+ ctx->codec.h265.neighbor_info_buf =
+ dma_alloc_coherent(dev->dev, CEDRUS_H265_NEIGHBOR_INFO_BUF_SIZE,
+ &ctx->codec.h265.neighbor_info_buf_addr,
+ GFP_KERNEL);
+ if (!ctx->codec.h265.neighbor_info_buf)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void cedrus_h265_stop(struct cedrus_ctx *ctx)
+{
+ struct cedrus_dev *dev = ctx->dev;
+
+ if (ctx->codec.h265.mv_col_buf_size > 0) {
+ dma_free_coherent(dev->dev, ctx->codec.h265.mv_col_buf_size,
+ ctx->codec.h265.mv_col_buf,
+ ctx->codec.h265.mv_col_buf_addr);
+
+ ctx->codec.h265.mv_col_buf_size = 0;
+ }
+
+ dma_free_coherent(dev->dev, CEDRUS_H265_NEIGHBOR_INFO_BUF_SIZE,
+ ctx->codec.h265.neighbor_info_buf,
+ ctx->codec.h265.neighbor_info_buf_addr);
+}
+
+static void cedrus_h265_trigger(struct cedrus_ctx *ctx)
+{
+ struct cedrus_dev *dev = ctx->dev;
+
+ cedrus_write(dev, VE_DEC_H265_TRIGGER, VE_DEC_H265_TRIGGER_DEC_SLICE);
+}
+
+struct cedrus_dec_ops cedrus_dec_ops_h265 = {
+ .irq_clear = cedrus_h265_irq_clear,
+ .irq_disable = cedrus_h265_irq_disable,
+ .irq_status = cedrus_h265_irq_status,
+ .setup = cedrus_h265_setup,
+ .start = cedrus_h265_start,
+ .stop = cedrus_h265_stop,
+ .trigger = cedrus_h265_trigger,
+};
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_hw.c b/drivers/staging/media/sunxi/cedrus/cedrus_hw.c
index a942cd9bed57..daf5f244f93b 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_hw.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_hw.c
@@ -30,7 +30,7 @@
#include "cedrus_hw.h"
#include "cedrus_regs.h"
-int cedrus_engine_enable(struct cedrus_dev *dev, enum cedrus_codec codec)
+int cedrus_engine_enable(struct cedrus_ctx *ctx, enum cedrus_codec codec)
{
u32 reg = 0;
@@ -50,11 +50,20 @@ int cedrus_engine_enable(struct cedrus_dev *dev, enum cedrus_codec codec)
reg |= VE_MODE_DEC_H264;
break;
+ case CEDRUS_CODEC_H265:
+ reg |= VE_MODE_DEC_H265;
+ break;
+
default:
return -EINVAL;
}
- cedrus_write(dev, VE_MODE, reg);
+ if (ctx->src_fmt.width == 4096)
+ reg |= VE_MODE_PIC_WIDTH_IS_4096;
+ if (ctx->src_fmt.width > 2048)
+ reg |= VE_MODE_PIC_WIDTH_MORE_2048;
+
+ cedrus_write(ctx->dev, VE_MODE, reg);
return 0;
}
@@ -103,7 +112,6 @@ static irqreturn_t cedrus_irq(int irq, void *data)
{
struct cedrus_dev *dev = data;
struct cedrus_ctx *ctx;
- struct vb2_v4l2_buffer *src_buf, *dst_buf;
enum vb2_buffer_state state;
enum cedrus_irq_status status;
@@ -121,24 +129,13 @@ static irqreturn_t cedrus_irq(int irq, void *data)
dev->dec_ops[ctx->current_codec]->irq_disable(ctx);
dev->dec_ops[ctx->current_codec]->irq_clear(ctx);
- src_buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
- dst_buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
-
- if (!src_buf || !dst_buf) {
- v4l2_err(&dev->v4l2_dev,
- "Missing source and/or destination buffers\n");
- return IRQ_HANDLED;
- }
-
if (status == CEDRUS_IRQ_ERROR)
state = VB2_BUF_STATE_ERROR;
else
state = VB2_BUF_STATE_DONE;
- v4l2_m2m_buf_done(src_buf, state);
- v4l2_m2m_buf_done(dst_buf, state);
-
- v4l2_m2m_job_finish(ctx->dev->m2m_dev, ctx->fh.m2m_ctx);
+ v4l2_m2m_buf_done_and_job_finish(ctx->dev->m2m_dev, ctx->fh.m2m_ctx,
+ state);
return IRQ_HANDLED;
}
@@ -146,7 +143,6 @@ static irqreturn_t cedrus_irq(int irq, void *data)
int cedrus_hw_probe(struct cedrus_dev *dev)
{
const struct cedrus_variant *variant;
- struct resource *res;
int irq_dec;
int ret;
@@ -225,8 +221,7 @@ int cedrus_hw_probe(struct cedrus_dev *dev)
goto err_sram;
}
- res = platform_get_resource(dev->pdev, IORESOURCE_MEM, 0);
- dev->base = devm_ioremap_resource(dev->dev, res);
+ dev->base = devm_platform_ioremap_resource(dev->pdev, 0);
if (IS_ERR(dev->base)) {
dev_err(dev->dev, "Failed to map registers\n");
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_hw.h b/drivers/staging/media/sunxi/cedrus/cedrus_hw.h
index 27d0882397aa..604ff932fbf5 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_hw.h
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_hw.h
@@ -16,7 +16,7 @@
#ifndef _CEDRUS_HW_H_
#define _CEDRUS_HW_H_
-int cedrus_engine_enable(struct cedrus_dev *dev, enum cedrus_codec codec);
+int cedrus_engine_enable(struct cedrus_ctx *ctx, enum cedrus_codec codec);
void cedrus_engine_disable(struct cedrus_dev *dev);
void cedrus_dst_format_set(struct cedrus_dev *dev,
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_mpeg2.c b/drivers/staging/media/sunxi/cedrus/cedrus_mpeg2.c
index 13c34927bad5..8bcd6b8f9e2d 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_mpeg2.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_mpeg2.c
@@ -96,7 +96,7 @@ static void cedrus_mpeg2_setup(struct cedrus_ctx *ctx, struct cedrus_run *run)
quantization = run->mpeg2.quantization;
/* Activate MPEG engine. */
- cedrus_engine_enable(dev, CEDRUS_CODEC_MPEG2);
+ cedrus_engine_enable(ctx, CEDRUS_CODEC_MPEG2);
/* Set intra quantization matrix. */
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_regs.h b/drivers/staging/media/sunxi/cedrus/cedrus_regs.h
index ddd29788d685..7beb03d3bb39 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_regs.h
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_regs.h
@@ -10,6 +10,9 @@
#ifndef _CEDRUS_REGS_H_
#define _CEDRUS_REGS_H_
+#define SHIFT_AND_MASK_BITS(v, h, l) \
+ (((unsigned long)(v) << (l)) & GENMASK(h, l))
+
/*
* Common acronyms and contractions used in register descriptions:
* * VLD : Variable-Length Decoder
@@ -18,13 +21,22 @@
* * MC: Motion Compensation
* * STCD: Start Code Detect
* * SDRT: Scale Down and Rotate
+ * * WB: Writeback
+ * * BITS/BS: Bitstream
+ * * MB: Macroblock
+ * * CTU: Coding Tree Unit
+ * * CTB: Coding Tree Block
+ * * IDX: Index
*/
#define VE_ENGINE_DEC_MPEG 0x100
#define VE_ENGINE_DEC_H264 0x200
+#define VE_ENGINE_DEC_H265 0x500
#define VE_MODE 0x00
+#define VE_MODE_PIC_WIDTH_IS_4096 BIT(22)
+#define VE_MODE_PIC_WIDTH_MORE_2048 BIT(21)
#define VE_MODE_REC_WR_MODE_2MB (0x01 << 20)
#define VE_MODE_REC_WR_MODE_1MB (0x00 << 20)
#define VE_MODE_DDR_MODE_BW_128 (0x03 << 16)
@@ -34,11 +46,22 @@
#define VE_MODE_DEC_H264 (0x01 << 0)
#define VE_MODE_DEC_MPEG (0x00 << 0)
+#define VE_BUF_CTRL 0x50
+
+#define VE_BUF_CTRL_INTRAPRED_EXT_RAM (0x02 << 2)
+#define VE_BUF_CTRL_INTRAPRED_MIXED_RAM (0x01 << 2)
+#define VE_BUF_CTRL_INTRAPRED_INT_SRAM (0x00 << 2)
+#define VE_BUF_CTRL_DBLK_EXT_RAM (0x02 << 0)
+#define VE_BUF_CTRL_DBLK_MIXED_RAM (0x01 << 0)
+#define VE_BUF_CTRL_DBLK_INT_SRAM (0x00 << 0)
+
+#define VE_DBLK_DRAM_BUF_ADDR 0x54
+#define VE_INTRAPRED_DRAM_BUF_ADDR 0x58
#define VE_PRIMARY_CHROMA_BUF_LEN 0xc4
#define VE_PRIMARY_FB_LINE_STRIDE 0xc8
-#define VE_PRIMARY_FB_LINE_STRIDE_CHROMA(s) (((s) << 16) & GENMASK(31, 16))
-#define VE_PRIMARY_FB_LINE_STRIDE_LUMA(s) (((s) << 0) & GENMASK(15, 0))
+#define VE_PRIMARY_FB_LINE_STRIDE_CHROMA(s) SHIFT_AND_MASK_BITS(s, 31, 16)
+#define VE_PRIMARY_FB_LINE_STRIDE_LUMA(s) SHIFT_AND_MASK_BITS(s, 15, 0)
#define VE_CHROMA_BUF_LEN 0xe8
@@ -46,7 +69,7 @@
#define VE_SECONDARY_OUT_FMT_EXT (0x01 << 30)
#define VE_SECONDARY_OUT_FMT_YU12 (0x02 << 30)
#define VE_SECONDARY_OUT_FMT_YV12 (0x03 << 30)
-#define VE_CHROMA_BUF_LEN_SDRT(l) ((l) & GENMASK(27, 0))
+#define VE_CHROMA_BUF_LEN_SDRT(l) SHIFT_AND_MASK_BITS(l, 27, 0)
#define VE_PRIMARY_OUT_FMT 0xec
@@ -69,15 +92,15 @@
#define VE_DEC_MPEG_MP12HDR (VE_ENGINE_DEC_MPEG + 0x00)
-#define VE_DEC_MPEG_MP12HDR_SLICE_TYPE(t) (((t) << 28) & GENMASK(30, 28))
+#define VE_DEC_MPEG_MP12HDR_SLICE_TYPE(t) SHIFT_AND_MASK_BITS(t, 30, 28)
#define VE_DEC_MPEG_MP12HDR_F_CODE_SHIFT(x, y) (24 - 4 * (y) - 8 * (x))
#define VE_DEC_MPEG_MP12HDR_F_CODE(__x, __y, __v) \
- (((__v) & GENMASK(3, 0)) << VE_DEC_MPEG_MP12HDR_F_CODE_SHIFT(__x, __y))
+ (((unsigned long)(__v) & GENMASK(3, 0)) << VE_DEC_MPEG_MP12HDR_F_CODE_SHIFT(__x, __y))
#define VE_DEC_MPEG_MP12HDR_INTRA_DC_PRECISION(p) \
- (((p) << 10) & GENMASK(11, 10))
+ SHIFT_AND_MASK_BITS(p, 11, 10)
#define VE_DEC_MPEG_MP12HDR_INTRA_PICTURE_STRUCTURE(s) \
- (((s) << 8) & GENMASK(9, 8))
+ SHIFT_AND_MASK_BITS(s, 9, 8)
#define VE_DEC_MPEG_MP12HDR_TOP_FIELD_FIRST(v) \
((v) ? BIT(7) : 0)
#define VE_DEC_MPEG_MP12HDR_FRAME_PRED_FRAME_DCT(v) \
@@ -98,19 +121,19 @@
#define VE_DEC_MPEG_PICCODEDSIZE (VE_ENGINE_DEC_MPEG + 0x08)
#define VE_DEC_MPEG_PICCODEDSIZE_WIDTH(w) \
- ((DIV_ROUND_UP((w), 16) << 8) & GENMASK(15, 8))
+ SHIFT_AND_MASK_BITS(DIV_ROUND_UP(w, 16), 15, 8)
#define VE_DEC_MPEG_PICCODEDSIZE_HEIGHT(h) \
- ((DIV_ROUND_UP((h), 16) << 0) & GENMASK(7, 0))
+ SHIFT_AND_MASK_BITS(DIV_ROUND_UP(h, 16), 7, 0)
#define VE_DEC_MPEG_PICBOUNDSIZE (VE_ENGINE_DEC_MPEG + 0x0c)
-#define VE_DEC_MPEG_PICBOUNDSIZE_WIDTH(w) (((w) << 16) & GENMASK(27, 16))
-#define VE_DEC_MPEG_PICBOUNDSIZE_HEIGHT(h) (((h) << 0) & GENMASK(11, 0))
+#define VE_DEC_MPEG_PICBOUNDSIZE_WIDTH(w) SHIFT_AND_MASK_BITS(w, 27, 16)
+#define VE_DEC_MPEG_PICBOUNDSIZE_HEIGHT(h) SHIFT_AND_MASK_BITS(h, 11, 0)
#define VE_DEC_MPEG_MBADDR (VE_ENGINE_DEC_MPEG + 0x10)
-#define VE_DEC_MPEG_MBADDR_X(w) (((w) << 8) & GENMASK(15, 8))
-#define VE_DEC_MPEG_MBADDR_Y(h) (((h) << 0) & GENMASK(7, 0))
+#define VE_DEC_MPEG_MBADDR_X(w) SHIFT_AND_MASK_BITS(w, 15, 8)
+#define VE_DEC_MPEG_MBADDR_Y(h) SHIFT_AND_MASK_BITS(h, 7, 0)
#define VE_DEC_MPEG_CTRL (VE_ENGINE_DEC_MPEG + 0x14)
@@ -225,13 +248,277 @@
#define VE_DEC_MPEG_IQMINPUT_FLAG_INTRA (0x01 << 14)
#define VE_DEC_MPEG_IQMINPUT_FLAG_NON_INTRA (0x00 << 14)
#define VE_DEC_MPEG_IQMINPUT_WEIGHT(i, v) \
- (((v) & GENMASK(7, 0)) | (((i) << 8) & GENMASK(13, 8)))
+ (SHIFT_AND_MASK_BITS(i, 13, 8) | SHIFT_AND_MASK_BITS(v, 7, 0))
#define VE_DEC_MPEG_ERROR (VE_ENGINE_DEC_MPEG + 0xc4)
#define VE_DEC_MPEG_CRTMBADDR (VE_ENGINE_DEC_MPEG + 0xc8)
#define VE_DEC_MPEG_ROT_LUMA (VE_ENGINE_DEC_MPEG + 0xcc)
#define VE_DEC_MPEG_ROT_CHROMA (VE_ENGINE_DEC_MPEG + 0xd0)
+#define VE_DEC_H265_DEC_NAL_HDR (VE_ENGINE_DEC_H265 + 0x00)
+
+#define VE_DEC_H265_DEC_NAL_HDR_NUH_TEMPORAL_ID_PLUS1(v) \
+ SHIFT_AND_MASK_BITS(v, 8, 6)
+#define VE_DEC_H265_DEC_NAL_HDR_NAL_UNIT_TYPE(v) \
+ SHIFT_AND_MASK_BITS(v, 5, 0)
+
+#define VE_DEC_H265_FLAG(reg_flag, ctrl_flag, flags) \
+ (((flags) & (ctrl_flag)) ? reg_flag : 0)
+
+#define VE_DEC_H265_DEC_SPS_HDR (VE_ENGINE_DEC_H265 + 0x04)
+
+#define VE_DEC_H265_DEC_SPS_HDR_FLAG_STRONG_INTRA_SMOOTHING_ENABLE BIT(26)
+#define VE_DEC_H265_DEC_SPS_HDR_FLAG_SPS_TEMPORAL_MVP_ENABLED BIT(25)
+#define VE_DEC_H265_DEC_SPS_HDR_FLAG_SAMPLE_ADAPTIVE_OFFSET_ENABLED BIT(24)
+#define VE_DEC_H265_DEC_SPS_HDR_FLAG_AMP_ENABLED BIT(23)
+#define VE_DEC_H265_DEC_SPS_HDR_FLAG_SEPARATE_COLOUR_PLANE BIT(2)
+
+#define VE_DEC_H265_DEC_SPS_HDR_MAX_TRANSFORM_HIERARCHY_DEPTH_INTRA(v) \
+ SHIFT_AND_MASK_BITS(v, 22, 20)
+#define VE_DEC_H265_DEC_SPS_HDR_MAX_TRANSFORM_HIERARCHY_DEPTH_INTER(v) \
+ SHIFT_AND_MASK_BITS(v, 19, 17)
+#define VE_DEC_H265_DEC_SPS_HDR_LOG2_DIFF_MAX_MIN_TRANSFORM_BLOCK_SIZE(v) \
+ SHIFT_AND_MASK_BITS(v, 16, 15)
+#define VE_DEC_H265_DEC_SPS_HDR_LOG2_MIN_TRANSFORM_BLOCK_SIZE_MINUS2(v) \
+ SHIFT_AND_MASK_BITS(v, 14, 13)
+#define VE_DEC_H265_DEC_SPS_HDR_LOG2_DIFF_MAX_MIN_LUMA_CODING_BLOCK_SIZE(v) \
+ SHIFT_AND_MASK_BITS(v, 12, 11)
+#define VE_DEC_H265_DEC_SPS_HDR_LOG2_MIN_LUMA_CODING_BLOCK_SIZE_MINUS3(v) \
+ SHIFT_AND_MASK_BITS(v, 10, 9)
+#define VE_DEC_H265_DEC_SPS_HDR_BIT_DEPTH_CHROMA_MINUS8(v) \
+ SHIFT_AND_MASK_BITS(v, 8, 6)
+#define VE_DEC_H265_DEC_SPS_HDR_BIT_DEPTH_LUMA_MINUS8(v) \
+ SHIFT_AND_MASK_BITS(v, 5, 3)
+#define VE_DEC_H265_DEC_SPS_HDR_CHROMA_FORMAT_IDC(v) \
+ SHIFT_AND_MASK_BITS(v, 1, 0)
+
+#define VE_DEC_H265_DEC_PIC_SIZE (VE_ENGINE_DEC_H265 + 0x08)
+
+#define VE_DEC_H265_DEC_PIC_SIZE_WIDTH(w) (((w) << 0) & GENMASK(13, 0))
+#define VE_DEC_H265_DEC_PIC_SIZE_HEIGHT(h) (((h) << 16) & GENMASK(29, 16))
+
+#define VE_DEC_H265_DEC_PCM_CTRL (VE_ENGINE_DEC_H265 + 0x0c)
+
+#define VE_DEC_H265_DEC_PCM_CTRL_FLAG_PCM_ENABLED BIT(15)
+#define VE_DEC_H265_DEC_PCM_CTRL_FLAG_PCM_LOOP_FILTER_DISABLED BIT(14)
+
+#define VE_DEC_H265_DEC_PCM_CTRL_LOG2_DIFF_MAX_MIN_PCM_LUMA_CODING_BLOCK_SIZE(v) \
+ SHIFT_AND_MASK_BITS(v, 11, 10)
+#define VE_DEC_H265_DEC_PCM_CTRL_LOG2_MIN_PCM_LUMA_CODING_BLOCK_SIZE_MINUS3(v) \
+ SHIFT_AND_MASK_BITS(v, 9, 8)
+#define VE_DEC_H265_DEC_PCM_CTRL_PCM_SAMPLE_BIT_DEPTH_CHROMA_MINUS1(v) \
+ SHIFT_AND_MASK_BITS(v, 7, 4)
+#define VE_DEC_H265_DEC_PCM_CTRL_PCM_SAMPLE_BIT_DEPTH_LUMA_MINUS1(v) \
+ SHIFT_AND_MASK_BITS(v, 3, 0)
+
+#define VE_DEC_H265_DEC_PPS_CTRL0 (VE_ENGINE_DEC_H265 + 0x10)
+
+#define VE_DEC_H265_DEC_PPS_CTRL0_FLAG_CU_QP_DELTA_ENABLED BIT(3)
+#define VE_DEC_H265_DEC_PPS_CTRL0_FLAG_TRANSFORM_SKIP_ENABLED BIT(2)
+#define VE_DEC_H265_DEC_PPS_CTRL0_FLAG_CONSTRAINED_INTRA_PRED BIT(1)
+#define VE_DEC_H265_DEC_PPS_CTRL0_FLAG_SIGN_DATA_HIDING_ENABLED BIT(0)
+
+#define VE_DEC_H265_DEC_PPS_CTRL0_PPS_CR_QP_OFFSET(v) \
+ SHIFT_AND_MASK_BITS(v, 29, 24)
+#define VE_DEC_H265_DEC_PPS_CTRL0_PPS_CB_QP_OFFSET(v) \
+ SHIFT_AND_MASK_BITS(v, 21, 16)
+#define VE_DEC_H265_DEC_PPS_CTRL0_INIT_QP_MINUS26(v) \
+ SHIFT_AND_MASK_BITS(v, 14, 8)
+#define VE_DEC_H265_DEC_PPS_CTRL0_DIFF_CU_QP_DELTA_DEPTH(v) \
+ SHIFT_AND_MASK_BITS(v, 5, 4)
+
+#define VE_DEC_H265_DEC_PPS_CTRL1 (VE_ENGINE_DEC_H265 + 0x14)
+
+#define VE_DEC_H265_DEC_PPS_CTRL1_FLAG_PPS_LOOP_FILTER_ACROSS_SLICES_ENABLED BIT(6)
+#define VE_DEC_H265_DEC_PPS_CTRL1_FLAG_LOOP_FILTER_ACROSS_TILES_ENABLED BIT(5)
+#define VE_DEC_H265_DEC_PPS_CTRL1_FLAG_ENTROPY_CODING_SYNC_ENABLED BIT(4)
+#define VE_DEC_H265_DEC_PPS_CTRL1_FLAG_TILES_ENABLED BIT(3)
+#define VE_DEC_H265_DEC_PPS_CTRL1_FLAG_TRANSQUANT_BYPASS_ENABLED BIT(2)
+#define VE_DEC_H265_DEC_PPS_CTRL1_FLAG_WEIGHTED_BIPRED BIT(1)
+#define VE_DEC_H265_DEC_PPS_CTRL1_FLAG_WEIGHTED_PRED BIT(0)
+
+#define VE_DEC_H265_DEC_PPS_CTRL1_LOG2_PARALLEL_MERGE_LEVEL_MINUS2(v) \
+ SHIFT_AND_MASK_BITS(v, 10, 8)
+
+#define VE_DEC_H265_SCALING_LIST_CTRL0 (VE_ENGINE_DEC_H265 + 0x18)
+
+#define VE_DEC_H265_SCALING_LIST_CTRL0_FLAG_ENABLED BIT(31)
+
+#define VE_DEC_H265_SCALING_LIST_CTRL0_SRAM (0 << 30)
+#define VE_DEC_H265_SCALING_LIST_CTRL0_DEFAULT (1 << 30)
+
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO0 (VE_ENGINE_DEC_H265 + 0x20)
+
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO0_FLAG_COLLOCATED_FROM_L0 BIT(11)
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO0_FLAG_CABAC_INIT BIT(10)
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO0_FLAG_MVD_L1_ZERO BIT(9)
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO0_FLAG_SLICE_SAO_CHROMA BIT(8)
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO0_FLAG_SLICE_SAO_LUMA BIT(7)
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO0_FLAG_SLICE_TEMPORAL_MVP_ENABLE BIT(6)
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO0_FLAG_DEPENDENT_SLICE_SEGMENT BIT(1)
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO0_FLAG_FIRST_SLICE_SEGMENT_IN_PIC BIT(0)
+
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO0_PICTURE_TYPE(v) \
+ SHIFT_AND_MASK_BITS(v, 29, 28)
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO0_FIVE_MINUS_MAX_NUM_MERGE_CAND(v) \
+ SHIFT_AND_MASK_BITS(v, 26, 24)
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO0_NUM_REF_IDX_L1_ACTIVE_MINUS1(v) \
+ SHIFT_AND_MASK_BITS(v, 23, 20)
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO0_NUM_REF_IDX_L0_ACTIVE_MINUS1(v) \
+ SHIFT_AND_MASK_BITS(v, 19, 16)
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO0_COLLOCATED_REF_IDX(v) \
+ SHIFT_AND_MASK_BITS(v, 15, 12)
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO0_COLOUR_PLANE_ID(v) \
+ SHIFT_AND_MASK_BITS(v, 5, 4)
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO0_SLICE_TYPE(v) \
+ SHIFT_AND_MASK_BITS(v, 3, 2)
+
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO1 (VE_ENGINE_DEC_H265 + 0x24)
+
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO1_FLAG_SLICE_DEBLOCKING_FILTER_DISABLED BIT(23)
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO1_FLAG_SLICE_LOOP_FILTER_ACROSS_SLICES_ENABLED BIT(22)
+
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO1_SLICE_TC_OFFSET_DIV2(v) \
+ SHIFT_AND_MASK_BITS(v, 31, 28)
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO1_SLICE_BETA_OFFSET_DIV2(v) \
+ SHIFT_AND_MASK_BITS(v, 27, 24)
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO1_SLICE_POC_BIGEST_IN_RPS_ST(v) \
+ ((v) ? BIT(21) : 0)
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO1_SLICE_CR_QP_OFFSET(v) \
+ SHIFT_AND_MASK_BITS(v, 20, 16)
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO1_SLICE_CB_QP_OFFSET(v) \
+ SHIFT_AND_MASK_BITS(v, 12, 8)
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO1_SLICE_QP_DELTA(v) \
+ SHIFT_AND_MASK_BITS(v, 6, 0)
+
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO2 (VE_ENGINE_DEC_H265 + 0x28)
+
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO2_NUM_ENTRY_POINT_OFFSETS(v) \
+ SHIFT_AND_MASK_BITS(v, 21, 8)
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO2_CHROMA_LOG2_WEIGHT_DENOM(v) \
+ SHIFT_AND_MASK_BITS(v, 6, 4)
+#define VE_DEC_H265_DEC_SLICE_HDR_INFO2_LUMA_LOG2_WEIGHT_DENOM(v) \
+ SHIFT_AND_MASK_BITS(v, 2, 0)
+
+#define VE_DEC_H265_DEC_CTB_ADDR (VE_ENGINE_DEC_H265 + 0x2c)
+
+#define VE_DEC_H265_DEC_CTB_ADDR_Y(y) SHIFT_AND_MASK_BITS(y, 25, 16)
+#define VE_DEC_H265_DEC_CTB_ADDR_X(x) SHIFT_AND_MASK_BITS(x, 9, 0)
+
+#define VE_DEC_H265_CTRL (VE_ENGINE_DEC_H265 + 0x30)
+
+#define VE_DEC_H265_CTRL_DDR_CONSISTENCY_EN BIT(31)
+#define VE_DEC_H265_CTRL_STCD_EN BIT(25)
+#define VE_DEC_H265_CTRL_EPTB_DEC_BYPASS_EN BIT(24)
+#define VE_DEC_H265_CTRL_TQ_BYPASS_EN BIT(12)
+#define VE_DEC_H265_CTRL_VLD_BYPASS_EN BIT(11)
+#define VE_DEC_H265_CTRL_NCRI_CACHE_DISABLE BIT(10)
+#define VE_DEC_H265_CTRL_ROTATE_SCALE_OUT_EN BIT(9)
+#define VE_DEC_H265_CTRL_MC_NO_WRITEBACK BIT(8)
+#define VE_DEC_H265_CTRL_VLD_DATA_REQ_IRQ_EN BIT(2)
+#define VE_DEC_H265_CTRL_ERROR_IRQ_EN BIT(1)
+#define VE_DEC_H265_CTRL_FINISH_IRQ_EN BIT(0)
+#define VE_DEC_H265_CTRL_IRQ_MASK \
+ (VE_DEC_H265_CTRL_FINISH_IRQ_EN | VE_DEC_H265_CTRL_ERROR_IRQ_EN | \
+ VE_DEC_H265_CTRL_VLD_DATA_REQ_IRQ_EN)
+
+#define VE_DEC_H265_TRIGGER (VE_ENGINE_DEC_H265 + 0x34)
+
+#define VE_DEC_H265_TRIGGER_STCD_VC1 (0x02 << 4)
+#define VE_DEC_H265_TRIGGER_STCD_AVS (0x01 << 4)
+#define VE_DEC_H265_TRIGGER_STCD_HEVC (0x00 << 4)
+#define VE_DEC_H265_TRIGGER_DEC_SLICE (0x08 << 0)
+#define VE_DEC_H265_TRIGGER_INIT_SWDEC (0x07 << 0)
+#define VE_DEC_H265_TRIGGER_BYTE_ALIGN (0x06 << 0)
+#define VE_DEC_H265_TRIGGER_GET_VLCUE (0x05 << 0)
+#define VE_DEC_H265_TRIGGER_GET_VLCSE (0x04 << 0)
+#define VE_DEC_H265_TRIGGER_FLUSH_BITS (0x03 << 0)
+#define VE_DEC_H265_TRIGGER_GET_BITS (0x02 << 0)
+#define VE_DEC_H265_TRIGGER_SHOW_BITS (0x01 << 0)
+
+#define VE_DEC_H265_STATUS (VE_ENGINE_DEC_H265 + 0x38)
+
+#define VE_DEC_H265_STATUS_STCD BIT(24)
+#define VE_DEC_H265_STATUS_STCD_BUSY BIT(21)
+#define VE_DEC_H265_STATUS_WB_BUSY BIT(20)
+#define VE_DEC_H265_STATUS_BS_DMA_BUSY BIT(19)
+#define VE_DEC_H265_STATUS_IQIT_BUSY BIT(18)
+#define VE_DEC_H265_STATUS_INTER_BUSY BIT(17)
+#define VE_DEC_H265_STATUS_MORE_DATA BIT(16)
+#define VE_DEC_H265_STATUS_VLD_BUSY BIT(14)
+#define VE_DEC_H265_STATUS_DEBLOCKING_BUSY BIT(13)
+#define VE_DEC_H265_STATUS_DEBLOCKING_DRAM_BUSY BIT(12)
+#define VE_DEC_H265_STATUS_INTRA_BUSY BIT(11)
+#define VE_DEC_H265_STATUS_SAO_BUSY BIT(10)
+#define VE_DEC_H265_STATUS_MVP_BUSY BIT(9)
+#define VE_DEC_H265_STATUS_SWDEC_BUSY BIT(8)
+#define VE_DEC_H265_STATUS_OVER_TIME BIT(3)
+#define VE_DEC_H265_STATUS_VLD_DATA_REQ BIT(2)
+#define VE_DEC_H265_STATUS_ERROR BIT(1)
+#define VE_DEC_H265_STATUS_SUCCESS BIT(0)
+#define VE_DEC_H265_STATUS_STCD_TYPE_MASK GENMASK(23, 22)
+#define VE_DEC_H265_STATUS_CHECK_MASK \
+ (VE_DEC_H265_STATUS_SUCCESS | VE_DEC_H265_STATUS_ERROR | \
+ VE_DEC_H265_STATUS_VLD_DATA_REQ)
+#define VE_DEC_H265_STATUS_CHECK_ERROR \
+ (VE_DEC_H265_STATUS_ERROR | VE_DEC_H265_STATUS_VLD_DATA_REQ)
+
+#define VE_DEC_H265_DEC_CTB_NUM (VE_ENGINE_DEC_H265 + 0x3c)
+
+#define VE_DEC_H265_BITS_ADDR (VE_ENGINE_DEC_H265 + 0x40)
+
+#define VE_DEC_H265_BITS_ADDR_FIRST_SLICE_DATA BIT(30)
+#define VE_DEC_H265_BITS_ADDR_LAST_SLICE_DATA BIT(29)
+#define VE_DEC_H265_BITS_ADDR_VALID_SLICE_DATA BIT(28)
+#define VE_DEC_H265_BITS_ADDR_BASE(a) (((a) >> 8) & GENMASK(27, 0))
+
+#define VE_DEC_H265_BITS_OFFSET (VE_ENGINE_DEC_H265 + 0x44)
+#define VE_DEC_H265_BITS_LEN (VE_ENGINE_DEC_H265 + 0x48)
+
+#define VE_DEC_H265_BITS_END_ADDR (VE_ENGINE_DEC_H265 + 0x4c)
+
+#define VE_DEC_H265_BITS_END_ADDR_BASE(a) ((a) >> 8)
+
+#define VE_DEC_H265_SDRT_CTRL (VE_ENGINE_DEC_H265 + 0x50)
+#define VE_DEC_H265_SDRT_LUMA_ADDR (VE_ENGINE_DEC_H265 + 0x54)
+#define VE_DEC_H265_SDRT_CHROMA_ADDR (VE_ENGINE_DEC_H265 + 0x58)
+
+#define VE_DEC_H265_OUTPUT_FRAME_IDX (VE_ENGINE_DEC_H265 + 0x5c)
+
+#define VE_DEC_H265_NEIGHBOR_INFO_ADDR (VE_ENGINE_DEC_H265 + 0x60)
+
+#define VE_DEC_H265_NEIGHBOR_INFO_ADDR_BASE(a) ((a) >> 8)
+
+#define VE_DEC_H265_ENTRY_POINT_OFFSET_ADDR (VE_ENGINE_DEC_H265 + 0x64)
+#define VE_DEC_H265_TILE_START_CTB (VE_ENGINE_DEC_H265 + 0x68)
+#define VE_DEC_H265_TILE_END_CTB (VE_ENGINE_DEC_H265 + 0x6c)
+
+#define VE_DEC_H265_LOW_ADDR (VE_ENGINE_DEC_H265 + 0x80)
+
+#define VE_DEC_H265_LOW_ADDR_PRIMARY_CHROMA(a) \
+ SHIFT_AND_MASK_BITS(a, 31, 24)
+#define VE_DEC_H265_LOW_ADDR_SECONDARY_CHROMA(a) \
+ SHIFT_AND_MASK_BITS(a, 23, 16)
+#define VE_DEC_H265_LOW_ADDR_ENTRY_POINTS_BUF(a) \
+ SHIFT_AND_MASK_BITS(a, 7, 0)
+
+#define VE_DEC_H265_SRAM_OFFSET (VE_ENGINE_DEC_H265 + 0xe0)
+
+#define VE_DEC_H265_SRAM_OFFSET_PRED_WEIGHT_LUMA_L0 0x00
+#define VE_DEC_H265_SRAM_OFFSET_PRED_WEIGHT_CHROMA_L0 0x20
+#define VE_DEC_H265_SRAM_OFFSET_PRED_WEIGHT_LUMA_L1 0x60
+#define VE_DEC_H265_SRAM_OFFSET_PRED_WEIGHT_CHROMA_L1 0x80
+#define VE_DEC_H265_SRAM_OFFSET_FRAME_INFO 0x400
+#define VE_DEC_H265_SRAM_OFFSET_FRAME_INFO_UNIT 0x20
+#define VE_DEC_H265_SRAM_OFFSET_SCALING_LISTS 0x800
+#define VE_DEC_H265_SRAM_OFFSET_REF_PIC_LIST0 0xc00
+#define VE_DEC_H265_SRAM_OFFSET_REF_PIC_LIST1 0xc10
+
+#define VE_DEC_H265_SRAM_DATA (VE_ENGINE_DEC_H265 + 0xe4)
+
+#define VE_DEC_H265_SRAM_DATA_ADDR_BASE(a) ((a) >> 8)
+#define VE_DEC_H265_SRAM_REF_PIC_LIST_LT_REF BIT(7)
+
#define VE_H264_SPS 0x200
#define VE_H264_SPS_MBS_ONLY BIT(18)
#define VE_H264_SPS_MB_ADAPTIVE_FRAME_FIELD BIT(17)
@@ -267,13 +554,16 @@
VE_H264_CTRL_SLICE_DECODE_INT)
#define VE_H264_TRIGGER_TYPE 0x224
+#define VE_H264_TRIGGER_TYPE_N_BITS(x) (((x) & 0x3f) << 8)
#define VE_H264_TRIGGER_TYPE_AVC_SLICE_DECODE (8 << 0)
#define VE_H264_TRIGGER_TYPE_INIT_SWDEC (7 << 0)
+#define VE_H264_TRIGGER_TYPE_FLUSH_BITS (3 << 0)
#define VE_H264_STATUS 0x228
#define VE_H264_STATUS_VLD_DATA_REQ_INT VE_H264_CTRL_VLD_DATA_REQ_INT
#define VE_H264_STATUS_DECODE_ERR_INT VE_H264_CTRL_DECODE_ERR_INT
#define VE_H264_STATUS_SLICE_DECODE_INT VE_H264_CTRL_SLICE_DECODE_INT
+#define VE_H264_STATUS_VLD_BUSY BIT(8)
#define VE_H264_STATUS_INT_MASK VE_H264_CTRL_INT_MASK
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_video.c b/drivers/staging/media/sunxi/cedrus/cedrus_video.c
index eeee3efd247b..15cf1f10221b 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_video.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_video.c
@@ -29,8 +29,8 @@
#define CEDRUS_MIN_WIDTH 16U
#define CEDRUS_MIN_HEIGHT 16U
-#define CEDRUS_MAX_WIDTH 3840U
-#define CEDRUS_MAX_HEIGHT 2160U
+#define CEDRUS_MAX_WIDTH 4096U
+#define CEDRUS_MAX_HEIGHT 2304U
static struct cedrus_format cedrus_formats[] = {
{
@@ -42,6 +42,11 @@ static struct cedrus_format cedrus_formats[] = {
.directions = CEDRUS_DECODE_SRC,
},
{
+ .pixelformat = V4L2_PIX_FMT_HEVC_SLICE,
+ .directions = CEDRUS_DECODE_SRC,
+ .capabilities = CEDRUS_CAPABILITY_H265_DEC,
+ },
+ {
.pixelformat = V4L2_PIX_FMT_SUNXI_TILED_NV12,
.directions = CEDRUS_DECODE_DST,
},
@@ -62,34 +67,31 @@ static inline struct cedrus_ctx *cedrus_file2ctx(struct file *file)
static struct cedrus_format *cedrus_find_format(u32 pixelformat, u32 directions,
unsigned int capabilities)
{
+ struct cedrus_format *first_valid_fmt = NULL;
struct cedrus_format *fmt;
unsigned int i;
for (i = 0; i < CEDRUS_FORMATS_COUNT; i++) {
fmt = &cedrus_formats[i];
- if (fmt->capabilities && (fmt->capabilities & capabilities) !=
- fmt->capabilities)
+ if ((fmt->capabilities & capabilities) != fmt->capabilities ||
+ !(fmt->directions & directions))
continue;
- if (fmt->pixelformat == pixelformat &&
- (fmt->directions & directions) != 0)
+ if (fmt->pixelformat == pixelformat)
break;
+
+ if (!first_valid_fmt)
+ first_valid_fmt = fmt;
}
if (i == CEDRUS_FORMATS_COUNT)
- return NULL;
+ return first_valid_fmt;
return &cedrus_formats[i];
}
-static bool cedrus_check_format(u32 pixelformat, u32 directions,
- unsigned int capabilities)
-{
- return cedrus_find_format(pixelformat, directions, capabilities);
-}
-
-static void cedrus_prepare_format(struct v4l2_pix_format *pix_fmt)
+void cedrus_prepare_format(struct v4l2_pix_format *pix_fmt)
{
unsigned int width = pix_fmt->width;
unsigned int height = pix_fmt->height;
@@ -105,9 +107,11 @@ static void cedrus_prepare_format(struct v4l2_pix_format *pix_fmt)
switch (pix_fmt->pixelformat) {
case V4L2_PIX_FMT_MPEG2_SLICE:
case V4L2_PIX_FMT_H264_SLICE:
+ case V4L2_PIX_FMT_HEVC_SLICE:
/* Zero bytes per line for encoded source. */
bytesperline = 0;
-
+ /* Choose some minimum size since this can't be 0 */
+ sizeimage = max_t(u32, SZ_1K, sizeimage);
break;
case V4L2_PIX_FMT_SUNXI_TILED_NV12:
@@ -214,16 +218,7 @@ static int cedrus_g_fmt_vid_cap(struct file *file, void *priv,
{
struct cedrus_ctx *ctx = cedrus_file2ctx(file);
- /* Fall back to dummy default by lack of hardware configuration. */
- if (!ctx->dst_fmt.width || !ctx->dst_fmt.height) {
- f->fmt.pix.pixelformat = V4L2_PIX_FMT_SUNXI_TILED_NV12;
- cedrus_prepare_format(&f->fmt.pix);
-
- return 0;
- }
-
f->fmt.pix = ctx->dst_fmt;
-
return 0;
}
@@ -232,17 +227,7 @@ static int cedrus_g_fmt_vid_out(struct file *file, void *priv,
{
struct cedrus_ctx *ctx = cedrus_file2ctx(file);
- /* Fall back to dummy default by lack of hardware configuration. */
- if (!ctx->dst_fmt.width || !ctx->dst_fmt.height) {
- f->fmt.pix.pixelformat = V4L2_PIX_FMT_MPEG2_SLICE;
- f->fmt.pix.sizeimage = SZ_1K;
- cedrus_prepare_format(&f->fmt.pix);
-
- return 0;
- }
-
f->fmt.pix = ctx->src_fmt;
-
return 0;
}
@@ -252,11 +237,14 @@ static int cedrus_try_fmt_vid_cap(struct file *file, void *priv,
struct cedrus_ctx *ctx = cedrus_file2ctx(file);
struct cedrus_dev *dev = ctx->dev;
struct v4l2_pix_format *pix_fmt = &f->fmt.pix;
+ struct cedrus_format *fmt =
+ cedrus_find_format(pix_fmt->pixelformat, CEDRUS_DECODE_DST,
+ dev->capabilities);
- if (!cedrus_check_format(pix_fmt->pixelformat, CEDRUS_DECODE_DST,
- dev->capabilities))
+ if (!fmt)
return -EINVAL;
+ pix_fmt->pixelformat = fmt->pixelformat;
cedrus_prepare_format(pix_fmt);
return 0;
@@ -268,15 +256,14 @@ static int cedrus_try_fmt_vid_out(struct file *file, void *priv,
struct cedrus_ctx *ctx = cedrus_file2ctx(file);
struct cedrus_dev *dev = ctx->dev;
struct v4l2_pix_format *pix_fmt = &f->fmt.pix;
+ struct cedrus_format *fmt =
+ cedrus_find_format(pix_fmt->pixelformat, CEDRUS_DECODE_SRC,
+ dev->capabilities);
- if (!cedrus_check_format(pix_fmt->pixelformat, CEDRUS_DECODE_SRC,
- dev->capabilities))
- return -EINVAL;
-
- /* Source image size has to be provided by userspace. */
- if (pix_fmt->sizeimage == 0)
+ if (!fmt)
return -EINVAL;
+ pix_fmt->pixelformat = fmt->pixelformat;
cedrus_prepare_format(pix_fmt);
return 0;
@@ -322,6 +309,17 @@ static int cedrus_s_fmt_vid_out(struct file *file, void *priv,
ctx->src_fmt = f->fmt.pix;
+ switch (ctx->src_fmt.pixelformat) {
+ case V4L2_PIX_FMT_H264_SLICE:
+ vq->subsystem_flags |=
+ VB2_V4L2_FL_SUPPORTS_M2M_HOLD_CAPTURE_BUF;
+ break;
+ default:
+ vq->subsystem_flags &=
+ ~VB2_V4L2_FL_SUPPORTS_M2M_HOLD_CAPTURE_BUF;
+ break;
+ }
+
/* Propagate colorspace information to capture. */
ctx->dst_fmt.colorspace = f->fmt.pix.colorspace;
ctx->dst_fmt.xfer_func = f->fmt.pix.xfer_func;
@@ -355,6 +353,9 @@ const struct v4l2_ioctl_ops cedrus_ioctl_ops = {
.vidioc_streamon = v4l2_m2m_ioctl_streamon,
.vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
+ .vidioc_try_decoder_cmd = v4l2_m2m_ioctl_stateless_try_decoder_cmd,
+ .vidioc_decoder_cmd = v4l2_m2m_ioctl_stateless_decoder_cmd,
+
.vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
.vidioc_unsubscribe_event = v4l2_event_unsubscribe,
};
@@ -364,21 +365,12 @@ static int cedrus_queue_setup(struct vb2_queue *vq, unsigned int *nbufs,
struct device *alloc_devs[])
{
struct cedrus_ctx *ctx = vb2_get_drv_priv(vq);
- struct cedrus_dev *dev = ctx->dev;
struct v4l2_pix_format *pix_fmt;
- u32 directions;
- if (V4L2_TYPE_IS_OUTPUT(vq->type)) {
- directions = CEDRUS_DECODE_SRC;
+ if (V4L2_TYPE_IS_OUTPUT(vq->type))
pix_fmt = &ctx->src_fmt;
- } else {
- directions = CEDRUS_DECODE_DST;
+ else
pix_fmt = &ctx->dst_fmt;
- }
-
- if (!cedrus_check_format(pix_fmt->pixelformat, directions,
- dev->capabilities))
- return -EINVAL;
if (*nplanes) {
if (sizes[0] < pix_fmt->sizeimage)
@@ -453,6 +445,10 @@ static int cedrus_start_streaming(struct vb2_queue *vq, unsigned int count)
ctx->current_codec = CEDRUS_CODEC_H264;
break;
+ case V4L2_PIX_FMT_HEVC_SLICE:
+ ctx->current_codec = CEDRUS_CODEC_H265;
+ break;
+
default:
return -EINVAL;
}
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_video.h b/drivers/staging/media/sunxi/cedrus/cedrus_video.h
index 0e4f7a8cccf2..05050c0a0921 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_video.h
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_video.h
@@ -26,5 +26,6 @@ extern const struct v4l2_ioctl_ops cedrus_ioctl_ops;
int cedrus_queue_init(void *priv, struct vb2_queue *src_vq,
struct vb2_queue *dst_vq);
+void cedrus_prepare_format(struct v4l2_pix_format *pix_fmt);
#endif
diff --git a/drivers/staging/most/Kconfig b/drivers/staging/most/Kconfig
index 8948d5246409..6262eb25c80b 100644
--- a/drivers/staging/most/Kconfig
+++ b/drivers/staging/most/Kconfig
@@ -1,9 +1,9 @@
# SPDX-License-Identifier: GPL-2.0
menuconfig MOST
- tristate "MOST support"
+ tristate "MOST support"
depends on HAS_DMA && CONFIGFS_FS
- default n
- help
+ default n
+ help
Say Y here if you want to enable MOST support.
This driver needs at least one additional component to enable the
desired access from userspace (e.g. character devices) and one that
@@ -12,7 +12,7 @@ menuconfig MOST
To compile this driver as a module, choose M here: the
module will be called most_core.
- If in doubt, say N here.
+ If in doubt, say N here.
diff --git a/drivers/staging/most/cdev/cdev.c b/drivers/staging/most/cdev/cdev.c
index 724d098aeef0..f880147c82fd 100644
--- a/drivers/staging/most/cdev/cdev.c
+++ b/drivers/staging/most/cdev/cdev.c
@@ -494,6 +494,7 @@ err_remove_ida:
static struct cdev_component comp = {
.cc = {
+ .mod = THIS_MODULE,
.name = "cdev",
.probe_channel = comp_probe,
.disconnect_channel = comp_disconnect_channel,
diff --git a/drivers/staging/most/configfs.c b/drivers/staging/most/configfs.c
index 025495657b68..34a9fb53985c 100644
--- a/drivers/staging/most/configfs.c
+++ b/drivers/staging/most/configfs.c
@@ -164,6 +164,7 @@ static ssize_t mdev_link_direction_store(struct config_item *item,
!sysfs_streq(page, "dir_tx") && !sysfs_streq(page, "tx"))
return -EINVAL;
strcpy(mdev_link->direction, page);
+ strim(mdev_link->direction);
return count;
}
@@ -182,6 +183,7 @@ static ssize_t mdev_link_datatype_store(struct config_item *item,
!sysfs_streq(page, "isoc_avp"))
return -EINVAL;
strcpy(mdev_link->datatype, page);
+ strim(mdev_link->datatype);
return count;
}
@@ -196,6 +198,7 @@ static ssize_t mdev_link_device_store(struct config_item *item,
struct mdev_link *mdev_link = to_mdev_link(item);
strcpy(mdev_link->device, page);
+ strim(mdev_link->device);
return count;
}
@@ -210,6 +213,7 @@ static ssize_t mdev_link_channel_store(struct config_item *item,
struct mdev_link *mdev_link = to_mdev_link(item);
strcpy(mdev_link->channel, page);
+ strim(mdev_link->channel);
return count;
}
@@ -391,22 +395,29 @@ static const struct config_item_type mdev_link_type = {
struct most_common {
struct config_group group;
+ struct module *mod;
+ struct configfs_subsystem subsys;
};
-static struct most_common *to_most_common(struct config_item *item)
+static struct most_common *to_most_common(struct configfs_subsystem *subsys)
{
- return container_of(to_config_group(item), struct most_common, group);
+ return container_of(subsys, struct most_common, subsys);
}
static struct config_item *most_common_make_item(struct config_group *group,
const char *name)
{
struct mdev_link *mdev_link;
+ struct most_common *mc = to_most_common(group->cg_subsys);
mdev_link = kzalloc(sizeof(*mdev_link), GFP_KERNEL);
if (!mdev_link)
return ERR_PTR(-ENOMEM);
+ if (!try_module_get(mc->mod)) {
+ kfree(mdev_link);
+ return ERR_PTR(-ENOLCK);
+ }
config_item_init_type_name(&mdev_link->item, name,
&mdev_link_type);
@@ -422,15 +433,26 @@ static struct config_item *most_common_make_item(struct config_group *group,
static void most_common_release(struct config_item *item)
{
- kfree(to_most_common(item));
+ struct config_group *group = to_config_group(item);
+
+ kfree(to_most_common(group->cg_subsys));
}
static struct configfs_item_operations most_common_item_ops = {
.release = most_common_release,
};
+static void most_common_disconnect(struct config_group *group,
+ struct config_item *item)
+{
+ struct most_common *mc = to_most_common(group->cg_subsys);
+
+ module_put(mc->mod);
+}
+
static struct configfs_group_operations most_common_group_ops = {
.make_item = most_common_make_item,
+ .disconnect_notify = most_common_disconnect,
};
static const struct config_item_type most_common_type = {
@@ -439,29 +461,35 @@ static const struct config_item_type most_common_type = {
.ct_owner = THIS_MODULE,
};
-static struct configfs_subsystem most_cdev_subsys = {
- .su_group = {
- .cg_item = {
- .ci_namebuf = "most_cdev",
- .ci_type = &most_common_type,
+static struct most_common most_cdev = {
+ .subsys = {
+ .su_group = {
+ .cg_item = {
+ .ci_namebuf = "most_cdev",
+ .ci_type = &most_common_type,
+ },
},
},
};
-static struct configfs_subsystem most_net_subsys = {
- .su_group = {
- .cg_item = {
- .ci_namebuf = "most_net",
- .ci_type = &most_common_type,
+static struct most_common most_net = {
+ .subsys = {
+ .su_group = {
+ .cg_item = {
+ .ci_namebuf = "most_net",
+ .ci_type = &most_common_type,
+ },
},
},
};
-static struct configfs_subsystem most_video_subsys = {
- .su_group = {
- .cg_item = {
- .ci_namebuf = "most_video",
- .ci_type = &most_common_type,
+static struct most_common most_video = {
+ .subsys = {
+ .su_group = {
+ .cg_item = {
+ .ci_namebuf = "most_video",
+ .ci_type = &most_common_type,
+ },
},
},
};
@@ -487,7 +515,7 @@ static struct config_item *most_snd_grp_make_item(struct config_group *group,
return ERR_PTR(-ENOMEM);
config_item_init_type_name(&mdev_link->item, name, &mdev_link_type);
- mdev_link->create_link = 0;
+ mdev_link->create_link = false;
strcpy(mdev_link->name, name);
strcpy(mdev_link->comp, "sound");
return &mdev_link->item;
@@ -545,13 +573,14 @@ static const struct config_item_type most_snd_grp_type = {
struct most_sound {
struct configfs_subsystem subsys;
struct list_head soundcard_list;
+ struct module *mod;
};
static struct config_group *most_sound_make_group(struct config_group *group,
const char *name)
{
struct most_snd_grp *most;
- struct most_sound *ms = container_of(to_configfs_subsystem(group),
+ struct most_sound *ms = container_of(group->cg_subsys,
struct most_sound, subsys);
list_for_each_entry(most, &ms->soundcard_list, list) {
@@ -560,17 +589,29 @@ static struct config_group *most_sound_make_group(struct config_group *group,
return ERR_PTR(-EPROTO);
}
}
+ if (!try_module_get(ms->mod))
+ return ERR_PTR(-ENOLCK);
most = kzalloc(sizeof(*most), GFP_KERNEL);
- if (!most)
+ if (!most) {
+ module_put(ms->mod);
return ERR_PTR(-ENOMEM);
-
+ }
config_group_init_type_name(&most->group, name, &most_snd_grp_type);
list_add_tail(&most->list, &ms->soundcard_list);
return &most->group;
}
+static void most_sound_disconnect(struct config_group *group,
+ struct config_item *item)
+{
+ struct most_sound *ms = container_of(group->cg_subsys,
+ struct most_sound, subsys);
+ module_put(ms->mod);
+}
+
static struct configfs_group_operations most_sound_group_ops = {
.make_group = most_sound_make_group,
+ .disconnect_notify = most_sound_disconnect,
};
static const struct config_item_type most_sound_type = {
@@ -593,16 +634,21 @@ int most_register_configfs_subsys(struct core_component *c)
{
int ret;
- if (!strcmp(c->name, "cdev"))
- ret = configfs_register_subsystem(&most_cdev_subsys);
- else if (!strcmp(c->name, "net"))
- ret = configfs_register_subsystem(&most_net_subsys);
- else if (!strcmp(c->name, "video"))
- ret = configfs_register_subsystem(&most_video_subsys);
- else if (!strcmp(c->name, "sound"))
+ if (!strcmp(c->name, "cdev")) {
+ most_cdev.mod = c->mod;
+ ret = configfs_register_subsystem(&most_cdev.subsys);
+ } else if (!strcmp(c->name, "net")) {
+ most_net.mod = c->mod;
+ ret = configfs_register_subsystem(&most_net.subsys);
+ } else if (!strcmp(c->name, "video")) {
+ most_video.mod = c->mod;
+ ret = configfs_register_subsystem(&most_video.subsys);
+ } else if (!strcmp(c->name, "sound")) {
+ most_sound_subsys.mod = c->mod;
ret = configfs_register_subsystem(&most_sound_subsys.subsys);
- else
+ } else {
return -ENODEV;
+ }
if (ret) {
pr_err("Error %d while registering subsystem %s\n",
@@ -631,11 +677,11 @@ void most_interface_register_notify(const char *mdev)
void most_deregister_configfs_subsys(struct core_component *c)
{
if (!strcmp(c->name, "cdev"))
- configfs_unregister_subsystem(&most_cdev_subsys);
+ configfs_unregister_subsystem(&most_cdev.subsys);
else if (!strcmp(c->name, "net"))
- configfs_unregister_subsystem(&most_net_subsys);
+ configfs_unregister_subsystem(&most_net.subsys);
else if (!strcmp(c->name, "video"))
- configfs_unregister_subsystem(&most_video_subsys);
+ configfs_unregister_subsystem(&most_video.subsys);
else if (!strcmp(c->name, "sound"))
configfs_unregister_subsystem(&most_sound_subsys.subsys);
}
@@ -643,14 +689,14 @@ EXPORT_SYMBOL_GPL(most_deregister_configfs_subsys);
int __init configfs_init(void)
{
- config_group_init(&most_cdev_subsys.su_group);
- mutex_init(&most_cdev_subsys.su_mutex);
+ config_group_init(&most_cdev.subsys.su_group);
+ mutex_init(&most_cdev.subsys.su_mutex);
- config_group_init(&most_net_subsys.su_group);
- mutex_init(&most_net_subsys.su_mutex);
+ config_group_init(&most_net.subsys.su_group);
+ mutex_init(&most_net.subsys.su_mutex);
- config_group_init(&most_video_subsys.su_group);
- mutex_init(&most_video_subsys.su_mutex);
+ config_group_init(&most_video.subsys.su_group);
+ mutex_init(&most_video.subsys.su_mutex);
config_group_init(&most_sound_subsys.subsys.su_group);
mutex_init(&most_sound_subsys.subsys.su_mutex);
diff --git a/drivers/staging/most/core.c b/drivers/staging/most/core.c
index 8e9a0b67c6ed..51a6b41d5b82 100644
--- a/drivers/staging/most/core.c
+++ b/drivers/staging/most/core.c
@@ -52,7 +52,7 @@ struct most_channel {
u16 channel_id;
char name[STRING_SIZE];
bool is_poisoned;
- struct mutex start_mutex;
+ struct mutex start_mutex; /* channel activation synchronization */
struct mutex nq_mutex; /* nq thread synchronization */
int is_starving;
struct most_interface *iface;
@@ -60,7 +60,7 @@ struct most_channel {
bool keep_mbo;
bool enqueue_halt;
struct list_head fifo;
- spinlock_t fifo_lock;
+ spinlock_t fifo_lock; /* fifo access synchronization */
struct list_head halt_fifo;
struct list_head list;
struct pipe pipe0;
@@ -84,11 +84,11 @@ static const struct {
int most_ch_data_type;
const char *name;
} ch_data_type[] = {
- { MOST_CH_CONTROL, "control\n" },
- { MOST_CH_ASYNC, "async\n" },
- { MOST_CH_SYNC, "sync\n" },
- { MOST_CH_ISOC, "isoc\n"},
- { MOST_CH_ISOC, "isoc_avp\n"},
+ { MOST_CH_CONTROL, "control" },
+ { MOST_CH_ASYNC, "async" },
+ { MOST_CH_SYNC, "sync" },
+ { MOST_CH_ISOC, "isoc"},
+ { MOST_CH_ISOC, "isoc_avp"},
};
/**
@@ -521,48 +521,6 @@ static ssize_t components_show(struct device_driver *drv, char *buf)
}
/**
- * split_string - parses buf and extracts ':' separated substrings.
- *
- * @buf: complete string from attribute 'add_channel'
- * @a: storage for 1st substring (=interface name)
- * @b: storage for 2nd substring (=channel name)
- * @c: storage for 3rd substring (=component name)
- * @d: storage optional 4th substring (=user defined name)
- *
- * Examples:
- *
- * Input: "mdev0:ch6:cdev:my_channel\n" or
- * "mdev0:ch6:cdev:my_channel"
- *
- * Output: *a -> "mdev0", *b -> "ch6", *c -> "cdev" *d -> "my_channel"
- *
- * Input: "mdev1:ep81:cdev\n"
- * Output: *a -> "mdev1", *b -> "ep81", *c -> "cdev" *d -> ""
- *
- * Input: "mdev1:ep81"
- * Output: *a -> "mdev1", *b -> "ep81", *c -> "cdev" *d == NULL
- */
-static int split_string(char *buf, char **a, char **b, char **c, char **d)
-{
- *a = strsep(&buf, ":");
- if (!*a)
- return -EIO;
-
- *b = strsep(&buf, ":\n");
- if (!*b)
- return -EIO;
-
- *c = strsep(&buf, ":\n");
- if (!*c)
- return -EIO;
-
- if (d)
- *d = strsep(&buf, ":\n");
-
- return 0;
-}
-
-/**
* get_channel - get pointer to channel
* @mdev: name of the device interface
* @mdev_ch: name of channel
@@ -675,13 +633,13 @@ int most_set_cfg_direction(char *mdev, char *mdev_ch, char *buf)
if (!c)
return -ENODEV;
- if (!strcmp(buf, "dir_rx\n")) {
+ if (!strcmp(buf, "dir_rx")) {
c->cfg.direction = MOST_CH_RX;
- } else if (!strcmp(buf, "rx\n")) {
+ } else if (!strcmp(buf, "rx")) {
c->cfg.direction = MOST_CH_RX;
- } else if (!strcmp(buf, "dir_tx\n")) {
+ } else if (!strcmp(buf, "dir_tx")) {
c->cfg.direction = MOST_CH_TX;
- } else if (!strcmp(buf, "tx\n")) {
+ } else if (!strcmp(buf, "tx")) {
c->cfg.direction = MOST_CH_TX;
} else {
pr_info("Invalid direction\n");
@@ -723,48 +681,6 @@ int most_add_link(char *mdev, char *mdev_ch, char *comp_name, char *link_name,
return link_channel_to_component(c, comp, link_name, comp_param);
}
-/**
- * remove_link_store - store function for remove_link attribute
- * @drv: device driver
- * @buf: buffer
- * @len: buffer length
- *
- * Example:
- * echo "mdev0:ep81" >remove_link
- */
-static ssize_t remove_link_store(struct device_driver *drv,
- const char *buf,
- size_t len)
-{
- struct most_channel *c;
- struct core_component *comp;
- char buffer[STRING_SIZE];
- char *mdev;
- char *mdev_ch;
- char *comp_name;
- int ret;
- size_t max_len = min_t(size_t, len + 1, STRING_SIZE);
-
- strlcpy(buffer, buf, max_len);
- ret = split_string(buffer, &mdev, &mdev_ch, &comp_name, NULL);
- if (ret)
- return ret;
- comp = match_component(comp_name);
- if (!comp)
- return -ENODEV;
- c = get_channel(mdev, mdev_ch);
- if (!c)
- return -ENODEV;
-
- if (comp->disconnect_channel(c->iface, c->channel_id))
- return -EIO;
- if (c->pipe0.comp == comp)
- c->pipe0.comp = NULL;
- if (c->pipe1.comp == comp)
- c->pipe1.comp = NULL;
- return len;
-}
-
int most_remove_link(char *mdev, char *mdev_ch, char *comp_name)
{
struct most_channel *c;
@@ -790,12 +706,10 @@ int most_remove_link(char *mdev, char *mdev_ch, char *comp_name)
static DRIVER_ATTR_RO(links);
static DRIVER_ATTR_RO(components);
-static DRIVER_ATTR_WO(remove_link);
static struct attribute *mc_attrs[] = {
DRV_ATTR(links),
DRV_ATTR(components),
- DRV_ATTR(remove_link),
NULL,
};
diff --git a/drivers/staging/most/core.h b/drivers/staging/most/core.h
index 652aaa771029..49859aef98df 100644
--- a/drivers/staging/most/core.h
+++ b/drivers/staging/most/core.h
@@ -265,6 +265,7 @@ struct most_interface {
struct core_component {
struct list_head list;
const char *name;
+ struct module *mod;
int (*probe_channel)(struct most_interface *iface, int channel_idx,
struct most_channel_config *cfg, char *name,
char *param);
diff --git a/drivers/staging/most/net/net.c b/drivers/staging/most/net/net.c
index 26a31854c636..6cab1bb8956e 100644
--- a/drivers/staging/most/net/net.c
+++ b/drivers/staging/most/net/net.c
@@ -498,6 +498,7 @@ put_nd:
}
static struct core_component comp = {
+ .mod = THIS_MODULE,
.name = "net",
.probe_channel = comp_probe_channel,
.disconnect_channel = comp_disconnect_channel,
diff --git a/drivers/staging/most/sound/sound.c b/drivers/staging/most/sound/sound.c
index 79817061fcfa..723d0bd1cc21 100644
--- a/drivers/staging/most/sound/sound.c
+++ b/drivers/staging/most/sound/sound.c
@@ -344,8 +344,7 @@ static int pcm_hw_params(struct snd_pcm_substream *substream,
pr_err("Requested number of channels not supported.\n");
return -EINVAL;
}
- return snd_pcm_lib_alloc_vmalloc_buffer(substream,
- params_buffer_bytes(hw_params));
+ return snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params));
}
/**
@@ -359,7 +358,7 @@ static int pcm_hw_params(struct snd_pcm_substream *substream,
*/
static int pcm_hw_free(struct snd_pcm_substream *substream)
{
- return snd_pcm_lib_free_vmalloc_buffer(substream);
+ return snd_pcm_lib_free_pages(substream);
}
/**
@@ -469,7 +468,6 @@ static const struct snd_pcm_ops pcm_ops = {
.prepare = pcm_prepare,
.trigger = pcm_trigger,
.pointer = pcm_pointer,
- .page = snd_pcm_lib_get_vmalloc_page,
};
static int split_arg_list(char *buf, u16 *ch_num, char **sample_res)
@@ -663,6 +661,8 @@ skip_adpt_alloc:
pcm->private_data = channel;
strscpy(pcm->name, device_name, sizeof(pcm->name));
snd_pcm_set_ops(pcm, direction, &pcm_ops);
+ snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_VMALLOC,
+ NULL, 0, 0);
return 0;
@@ -782,6 +782,7 @@ static int audio_tx_completion(struct most_interface *iface, int channel_id)
* Initialization of the struct core_component
*/
static struct core_component comp = {
+ .mod = THIS_MODULE,
.name = DRIVER_NAME,
.probe_channel = audio_probe_channel,
.disconnect_channel = audio_disconnect_channel,
diff --git a/drivers/staging/most/video/video.c b/drivers/staging/most/video/video.c
index 250af9fb704d..10c1ef7e3a3e 100644
--- a/drivers/staging/most/video/video.c
+++ b/drivers/staging/most/video/video.c
@@ -528,6 +528,7 @@ static int comp_disconnect_channel(struct most_interface *iface,
}
static struct core_component comp = {
+ .mod = THIS_MODULE,
.name = "video",
.probe_channel = comp_probe_channel,
.disconnect_channel = comp_disconnect_channel,
diff --git a/drivers/staging/mt7621-dma/mtk-hsdma.c b/drivers/staging/mt7621-dma/mtk-hsdma.c
index d964642d95a3..20b898954416 100644
--- a/drivers/staging/mt7621-dma/mtk-hsdma.c
+++ b/drivers/staging/mt7621-dma/mtk-hsdma.c
@@ -208,8 +208,8 @@ static void mtk_hsdma_reset_chan(struct mtk_hsdam_engine *hsdma,
static void hsdma_dump_reg(struct mtk_hsdam_engine *hsdma)
{
- dev_dbg(hsdma->ddev.dev, "tbase %08x, tcnt %08x, " \
- "tctx %08x, tdtx: %08x, rbase %08x, " \
+ dev_dbg(hsdma->ddev.dev, "tbase %08x, tcnt %08x, "
+ "tctx %08x, tdtx: %08x, rbase %08x, "
"rcnt %08x, rctx %08x, rdtx %08x\n",
mtk_hsdma_read(hsdma, HSDMA_REG_TX_BASE),
mtk_hsdma_read(hsdma, HSDMA_REG_TX_CNT),
@@ -220,7 +220,7 @@ static void hsdma_dump_reg(struct mtk_hsdam_engine *hsdma)
mtk_hsdma_read(hsdma, HSDMA_REG_RX_CRX),
mtk_hsdma_read(hsdma, HSDMA_REG_RX_DRX));
- dev_dbg(hsdma->ddev.dev, "info %08x, glo %08x, delay %08x, " \
+ dev_dbg(hsdma->ddev.dev, "info %08x, glo %08x, delay %08x, "
"intr_stat %08x, intr_mask %08x\n",
mtk_hsdma_read(hsdma, HSDMA_REG_INFO),
mtk_hsdma_read(hsdma, HSDMA_REG_GLO_CFG),
@@ -243,9 +243,9 @@ static void hsdma_dump_desc(struct mtk_hsdam_engine *hsdma,
tx_desc = &chan->tx_ring[i];
rx_desc = &chan->rx_ring[i];
- dev_dbg(hsdma->ddev.dev, "%d tx addr0: %08x, flags %08x, " \
+ dev_dbg(hsdma->ddev.dev, "%d tx addr0: %08x, flags %08x, "
"tx addr1: %08x, rx addr0 %08x, flags %08x\n",
- i, tx_desc->addr0, tx_desc->flags, \
+ i, tx_desc->addr0, tx_desc->flags,
tx_desc->addr1, rx_desc->addr0, rx_desc->flags);
}
}
@@ -548,7 +548,8 @@ static int mtk_hsdam_alloc_desc(struct mtk_hsdam_engine *hsdma,
int i;
chan->tx_ring = dma_alloc_coherent(hsdma->ddev.dev,
- 2 * HSDMA_DESCS_NUM * sizeof(*chan->tx_ring),
+ 2 * HSDMA_DESCS_NUM *
+ sizeof(*chan->tx_ring),
&chan->desc_addr, GFP_ATOMIC | __GFP_ZERO);
if (!chan->tx_ring)
goto no_mem;
@@ -569,8 +570,8 @@ static void mtk_hsdam_free_desc(struct mtk_hsdam_engine *hsdma,
{
if (chan->tx_ring) {
dma_free_coherent(hsdma->ddev.dev,
- 2 * HSDMA_DESCS_NUM * sizeof(*chan->tx_ring),
- chan->tx_ring, chan->desc_addr);
+ 2 * HSDMA_DESCS_NUM * sizeof(*chan->tx_ring),
+ chan->tx_ring, chan->desc_addr);
chan->tx_ring = NULL;
chan->rx_ring = NULL;
}
@@ -650,7 +651,6 @@ static int mtk_hsdma_probe(struct platform_device *pdev)
struct mtk_hsdma_chan *chan;
struct mtk_hsdam_engine *hsdma;
struct dma_device *dd;
- struct resource *res;
int ret;
int irq;
void __iomem *base;
@@ -667,8 +667,7 @@ static int mtk_hsdma_probe(struct platform_device *pdev)
if (!hsdma)
return -EINVAL;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
hsdma->base = base + HSDMA_BASE_OFFSET;
diff --git a/drivers/staging/mt7621-pci/Kconfig b/drivers/staging/mt7621-pci/Kconfig
index af928b75a940..ce58042f2f21 100644
--- a/drivers/staging/mt7621-pci/Kconfig
+++ b/drivers/staging/mt7621-pci/Kconfig
@@ -2,7 +2,6 @@
config PCI_MT7621
tristate "MediaTek MT7621 PCI Controller"
depends on RALINK
- depends on PCI
select PCI_DRIVERS_GENERIC
help
This selects a driver for the MediaTek MT7621 PCI Controller.
diff --git a/drivers/staging/mt7621-pci/pci-mt7621.c b/drivers/staging/mt7621-pci/pci-mt7621.c
index 6b98827da57f..3633c924848e 100644
--- a/drivers/staging/mt7621-pci/pci-mt7621.c
+++ b/drivers/staging/mt7621-pci/pci-mt7621.c
@@ -29,15 +29,14 @@
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
+#include <linux/sys_soc.h>
#include <mt7621.h>
#include <ralink_regs.h>
#include "../../pci/pci.h"
/* sysctl */
-#define MT7621_CHIP_REV_ID 0x0c
#define MT7621_GPIO_MODE 0x60
-#define CHIP_REV_MT7621_E2 0x0101
/* MediaTek specific configuration registers */
#define PCIE_FTS_NUM 0x70c
@@ -126,6 +125,8 @@ struct mt7621_pcie_port {
* @ports: pointer to PCIe port information
* @perst: gpio reset
* @rst: pointer to pcie reset
+ * @resets_inverted: depends on chip revision
+ * reset lines are inverted.
*/
struct mt7621_pcie {
void __iomem *base;
@@ -140,6 +141,7 @@ struct mt7621_pcie {
struct list_head ports;
struct gpio_desc *perst;
struct reset_control *rst;
+ bool resets_inverted;
};
static inline u32 pcie_read(struct mt7621_pcie *pcie, u32 reg)
@@ -229,9 +231,9 @@ static inline void mt7621_pcie_port_clk_disable(struct mt7621_pcie_port *port)
static inline void mt7621_control_assert(struct mt7621_pcie_port *port)
{
- u32 chip_rev_id = rt_sysc_r32(MT7621_CHIP_REV_ID);
+ struct mt7621_pcie *pcie = port->pcie;
- if ((chip_rev_id & 0xFFFF) == CHIP_REV_MT7621_E2)
+ if (pcie->resets_inverted)
reset_control_assert(port->pcie_rst);
else
reset_control_deassert(port->pcie_rst);
@@ -239,9 +241,9 @@ static inline void mt7621_control_assert(struct mt7621_pcie_port *port)
static inline void mt7621_control_deassert(struct mt7621_pcie_port *port)
{
- u32 chip_rev_id = rt_sysc_r32(MT7621_CHIP_REV_ID);
+ struct mt7621_pcie *pcie = port->pcie;
- if ((chip_rev_id & 0xFFFF) == CHIP_REV_MT7621_E2)
+ if (pcie->resets_inverted)
reset_control_deassert(port->pcie_rst);
else
reset_control_assert(port->pcie_rst);
@@ -641,9 +643,14 @@ static int mt7621_pcie_register_host(struct pci_host_bridge *host,
return pci_host_probe(host);
}
+static const struct soc_device_attribute mt7621_pci_quirks_match[] = {
+ { .soc_id = "mt7621", .revision = "E2" }
+};
+
static int mt7621_pci_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ const struct soc_device_attribute *attr;
struct mt7621_pcie *pcie;
struct pci_host_bridge *bridge;
int err;
@@ -661,6 +668,10 @@ static int mt7621_pci_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, pcie);
INIT_LIST_HEAD(&pcie->ports);
+ attr = soc_device_match(mt7621_pci_quirks_match);
+ if (attr)
+ pcie->resets_inverted = true;
+
err = mt7621_pcie_parse_dt(pcie);
if (err) {
dev_err(dev, "Parsing DT failed\n");
diff --git a/drivers/staging/netlogic/TODO b/drivers/staging/netlogic/TODO
index 8f172b017b94..20e22ecb9903 100644
--- a/drivers/staging/netlogic/TODO
+++ b/drivers/staging/netlogic/TODO
@@ -1,6 +1,6 @@
* Implementing 64bit stat counter in software
* All memory allocation should be changed to DMA allocations
-* Changing comments in to linux standred format
+* Changing comments into linux standard format
Please send patches
To:
diff --git a/drivers/staging/netlogic/xlr_net.c b/drivers/staging/netlogic/xlr_net.c
index 05079f7be841..204fcdfc022f 100644
--- a/drivers/staging/netlogic/xlr_net.c
+++ b/drivers/staging/netlogic/xlr_net.c
@@ -976,8 +976,7 @@ static int xlr_net_probe(struct platform_device *pdev)
priv->ndev = ndev;
priv->port_id = (pdev->id * 4) + port;
priv->nd = (struct xlr_net_data *)pdev->dev.platform_data;
- res = platform_get_resource(pdev, IORESOURCE_MEM, port);
- priv->base_addr = devm_ioremap_resource(&pdev->dev, res);
+ priv->base_addr = devm_platform_ioremap_resource(pdev, port);
if (IS_ERR(priv->base_addr)) {
err = PTR_ERR(priv->base_addr);
goto err_gmac;
diff --git a/drivers/staging/nvec/Kconfig b/drivers/staging/nvec/Kconfig
index 5c12cacf75e1..9fa98c16f1d9 100644
--- a/drivers/staging/nvec/Kconfig
+++ b/drivers/staging/nvec/Kconfig
@@ -8,7 +8,7 @@ config MFD_NVEC
controller.
To compile this driver as a module, say M here: the module will be
- called mfd-nvec
+ called mfd-nvec
config KEYBOARD_NVEC
tristate "Keyboard on nVidia compliant EC"
@@ -18,7 +18,7 @@ config KEYBOARD_NVEC
a nVidia compliant embedded controller.
To compile this driver as a module, say M here: the module will be
- called keyboard-nvec
+ called keyboard-nvec
config SERIO_NVEC_PS2
tristate "PS2 on nVidia EC"
@@ -28,7 +28,7 @@ config SERIO_NVEC_PS2
to a nVidia compliant embedded controller.
To compile this driver as a module, say M here: the module will be
- called serio-nvec-ps2
+ called serio-nvec-ps2
config NVEC_POWER
@@ -39,7 +39,7 @@ config NVEC_POWER
nVidia compliant embedded controllers.
To compile this driver as a module, say M here: the module will be
- called nvec-power
+ called nvec-power
config NVEC_PAZ00
@@ -50,5 +50,5 @@ config NVEC_PAZ00
devices, e.g. Toshbia AC100 and Dynabooks AZ netbooks.
To compile this driver as a module, say M here: the module will be
- called nvec-paz00
+ called nvec-paz00
diff --git a/drivers/staging/octeon-usb/octeon-hcd.c b/drivers/staging/octeon-usb/octeon-hcd.c
index a5321cc692c5..582c9187559d 100644
--- a/drivers/staging/octeon-usb/octeon-hcd.c
+++ b/drivers/staging/octeon-usb/octeon-hcd.c
@@ -1836,8 +1836,7 @@ static void cvmx_usb_start_channel(struct octeon_hcd *usb, int channel,
*
* Returns: Pipe or NULL if none are ready
*/
-static struct cvmx_usb_pipe *cvmx_usb_find_ready_pipe(
- struct octeon_hcd *usb,
+static struct cvmx_usb_pipe *cvmx_usb_find_ready_pipe(struct octeon_hcd *usb,
enum cvmx_usb_transfer xfer_type)
{
struct list_head *list = usb->active_pipes + xfer_type;
diff --git a/drivers/staging/octeon/ethernet-mdio.c b/drivers/staging/octeon/ethernet-mdio.c
index ffac0c4b3f5c..c798672d61b2 100644
--- a/drivers/staging/octeon/ethernet-mdio.c
+++ b/drivers/staging/octeon/ethernet-mdio.c
@@ -65,7 +65,7 @@ int cvm_oct_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
}
void cvm_oct_note_carrier(struct octeon_ethernet *priv,
- cvmx_helper_link_info_t li)
+ union cvmx_helper_link_info li)
{
if (li.s.link_up) {
pr_notice_ratelimited("%s: %u Mbps %s duplex, port %d, queue %d\n",
@@ -81,7 +81,7 @@ void cvm_oct_note_carrier(struct octeon_ethernet *priv,
void cvm_oct_adjust_link(struct net_device *dev)
{
struct octeon_ethernet *priv = netdev_priv(dev);
- cvmx_helper_link_info_t link_info;
+ union cvmx_helper_link_info link_info;
link_info.u64 = 0;
link_info.s.link_up = dev->phydev->link ? 1 : 0;
@@ -106,7 +106,7 @@ int cvm_oct_common_stop(struct net_device *dev)
{
struct octeon_ethernet *priv = netdev_priv(dev);
int interface = INTERFACE(priv->port);
- cvmx_helper_link_info_t link_info;
+ union cvmx_helper_link_info link_info;
union cvmx_gmxx_prtx_cfg gmx_cfg;
int index = INDEX(priv->port);
diff --git a/drivers/staging/octeon/ethernet-rgmii.c b/drivers/staging/octeon/ethernet-rgmii.c
index d91fd5ce9e68..0c4fac31540a 100644
--- a/drivers/staging/octeon/ethernet-rgmii.c
+++ b/drivers/staging/octeon/ethernet-rgmii.c
@@ -53,7 +53,7 @@ static void cvm_oct_set_hw_preamble(struct octeon_ethernet *priv, bool enable)
static void cvm_oct_check_preamble_errors(struct net_device *dev)
{
struct octeon_ethernet *priv = netdev_priv(dev);
- cvmx_helper_link_info_t link_info;
+ union cvmx_helper_link_info link_info;
unsigned long flags;
link_info.u64 = priv->link_info;
@@ -103,7 +103,7 @@ static void cvm_oct_check_preamble_errors(struct net_device *dev)
static void cvm_oct_rgmii_poll(struct net_device *dev)
{
struct octeon_ethernet *priv = netdev_priv(dev);
- cvmx_helper_link_info_t link_info;
+ union cvmx_helper_link_info link_info;
bool status_change;
link_info = cvmx_helper_link_get(priv->port);
diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
index 0e65955c746b..2c16230f993c 100644
--- a/drivers/staging/octeon/ethernet-rx.c
+++ b/drivers/staging/octeon/ethernet-rx.c
@@ -60,7 +60,7 @@ static irqreturn_t cvm_oct_do_interrupt(int irq, void *napi_id)
*
* Returns Non-zero if the packet can be dropped, zero otherwise.
*/
-static inline int cvm_oct_check_rcv_error(cvmx_wqe_t *work)
+static inline int cvm_oct_check_rcv_error(struct cvmx_wqe *work)
{
int port;
@@ -135,7 +135,7 @@ static inline int cvm_oct_check_rcv_error(cvmx_wqe_t *work)
return 0;
}
-static void copy_segments_to_skb(cvmx_wqe_t *work, struct sk_buff *skb)
+static void copy_segments_to_skb(struct cvmx_wqe *work, struct sk_buff *skb)
{
int segments = work->word2.s.bufs;
union cvmx_buf_ptr segment_ptr = work->packet_ptr;
@@ -215,7 +215,7 @@ static int cvm_oct_poll(struct oct_rx_group *rx_group, int budget)
struct sk_buff *skb = NULL;
struct sk_buff **pskb = NULL;
int skb_in_hw;
- cvmx_wqe_t *work;
+ struct cvmx_wqe *work;
int port;
if (USE_ASYNC_IOBDMA && did_work_request)
diff --git a/drivers/staging/octeon/ethernet-tx.c b/drivers/staging/octeon/ethernet-tx.c
index 83469061a542..b334cf89794e 100644
--- a/drivers/staging/octeon/ethernet-tx.c
+++ b/drivers/staging/octeon/ethernet-tx.c
@@ -127,7 +127,7 @@ static void cvm_oct_free_tx_skbs(struct net_device *dev)
*/
int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
{
- cvmx_pko_command_word0_t pko_command;
+ union cvmx_pko_command_word0 pko_command;
union cvmx_buf_ptr hw_buffer;
u64 old_scratch;
u64 old_scratch2;
@@ -514,7 +514,7 @@ int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev)
void *copy_location;
/* Get a work queue entry */
- cvmx_wqe_t *work = cvmx_fpa_alloc(CVMX_FPA_WQE_POOL);
+ struct cvmx_wqe *work = cvmx_fpa_alloc(CVMX_FPA_WQE_POOL);
if (unlikely(!work)) {
printk_ratelimited("%s: Failed to allocate a work queue entry\n",
@@ -598,7 +598,7 @@ int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev)
#endif
work->word2.s.is_frag = !((ip_hdr(skb)->frag_off == 0) ||
(ip_hdr(skb)->frag_off ==
- 1 << 14));
+ cpu_to_be16(1 << 14)));
#if 0
/* Assume Linux is sending a good packet */
work->word2.s.IP_exc = 0;
diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
index cf8e9a23ebf9..f42c3816ce49 100644
--- a/drivers/staging/octeon/ethernet.c
+++ b/drivers/staging/octeon/ethernet.c
@@ -172,7 +172,7 @@ static void cvm_oct_configure_common_hw(void)
*/
int cvm_oct_free_work(void *work_queue_entry)
{
- cvmx_wqe_t *work = work_queue_entry;
+ struct cvmx_wqe *work = work_queue_entry;
int segments = work->word2.s.bufs;
union cvmx_buf_ptr segment_ptr = work->packet_ptr;
@@ -460,7 +460,7 @@ int cvm_oct_common_open(struct net_device *dev,
struct octeon_ethernet *priv = netdev_priv(dev);
int interface = INTERFACE(priv->port);
int index = INDEX(priv->port);
- cvmx_helper_link_info_t link_info;
+ union cvmx_helper_link_info link_info;
int rv;
rv = cvm_oct_phy_setup_device(dev);
@@ -496,7 +496,7 @@ int cvm_oct_common_open(struct net_device *dev,
void cvm_oct_link_poll(struct net_device *dev)
{
struct octeon_ethernet *priv = netdev_priv(dev);
- cvmx_helper_link_info_t link_info;
+ union cvmx_helper_link_info link_info;
link_info = cvmx_helper_link_get(priv->port);
if (link_info.u64 == priv->link_info)
diff --git a/drivers/staging/octeon/octeon-ethernet.h b/drivers/staging/octeon/octeon-ethernet.h
index a8a864b40913..a6140705706f 100644
--- a/drivers/staging/octeon/octeon-ethernet.h
+++ b/drivers/staging/octeon/octeon-ethernet.h
@@ -14,7 +14,7 @@
#include <linux/of.h>
#include <linux/phy.h>
-#ifdef CONFIG_MIPS
+#ifdef CONFIG_CAVIUM_OCTEON_SOC
#include <asm/octeon/octeon.h>
@@ -91,7 +91,7 @@ int cvm_oct_common_stop(struct net_device *dev);
int cvm_oct_common_open(struct net_device *dev,
void (*link_poll)(struct net_device *));
void cvm_oct_note_carrier(struct octeon_ethernet *priv,
- cvmx_helper_link_info_t li);
+ union cvmx_helper_link_info li);
void cvm_oct_link_poll(struct net_device *dev);
extern int always_use_pow;
diff --git a/drivers/staging/octeon/octeon-stubs.h b/drivers/staging/octeon/octeon-stubs.h
index b78ce9eaab85..79213c045504 100644
--- a/drivers/staging/octeon/octeon-stubs.h
+++ b/drivers/staging/octeon/octeon-stubs.h
@@ -1,5 +1,8 @@
#define CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE 512
-#define XKPHYS_TO_PHYS(p) (p)
+
+#ifndef XKPHYS_TO_PHYS
+# define XKPHYS_TO_PHYS(p) (p)
+#endif
#define OCTEON_IRQ_WORKQ0 0
#define OCTEON_IRQ_RML 0
@@ -38,7 +41,7 @@
#define CVMX_NPI_RSL_INT_BLOCKS 0
#define CVMX_POW_WQ_INT_PC 0
-typedef union {
+union cvmx_pip_wqe_word2 {
uint64_t u64;
struct {
uint64_t bufs:8;
@@ -114,13 +117,13 @@ typedef union {
uint64_t err_code:8;
} snoip;
-} cvmx_pip_wqe_word2;
+};
union cvmx_pip_wqe_word0 {
struct {
uint64_t next_ptr:40;
uint8_t unused;
- uint16_t hw_chksum;
+ __wsum hw_chksum;
} cn38xx;
struct {
uint64_t pknd:6; /* 0..5 */
@@ -180,15 +183,15 @@ union cvmx_buf_ptr {
} s;
};
-typedef struct {
+struct cvmx_wqe {
union cvmx_wqe_word0 word0;
union cvmx_wqe_word1 word1;
- cvmx_pip_wqe_word2 word2;
+ union cvmx_pip_wqe_word2 word2;
union cvmx_buf_ptr packet_ptr;
uint8_t packet_data[96];
-} cvmx_wqe_t;
+};
-typedef union {
+union cvmx_helper_link_info {
uint64_t u64;
struct {
uint64_t reserved_20_63:44;
@@ -196,18 +199,18 @@ typedef union {
uint64_t full_duplex:1; /**< 1 if the link is full duplex */
uint64_t speed:18; /**< Speed of the link in Mbps */
} s;
-} cvmx_helper_link_info_t;
+};
-typedef enum {
+enum cvmx_fau_reg_32 {
CVMX_FAU_REG_32_START = 0,
-} cvmx_fau_reg_32_t;
+};
-typedef enum {
+enum cvmx_fau_op_size {
CVMX_FAU_OP_SIZE_8 = 0,
CVMX_FAU_OP_SIZE_16 = 1,
CVMX_FAU_OP_SIZE_32 = 2,
CVMX_FAU_OP_SIZE_64 = 3
-} cvmx_fau_op_size_t;
+};
typedef enum {
CVMX_SPI_MODE_UNKNOWN = 0,
@@ -1134,27 +1137,27 @@ union cvmx_npi_rsl_int_blocks {
} cn50xx;
};
-typedef union {
+union cvmx_pko_command_word0 {
uint64_t u64;
struct {
- uint64_t total_bytes:16;
- uint64_t segs:6;
- uint64_t dontfree:1;
- uint64_t ignore_i:1;
- uint64_t ipoffp1:7;
- uint64_t gather:1;
- uint64_t rsp:1;
- uint64_t wqp:1;
- uint64_t n2:1;
- uint64_t le:1;
- uint64_t reg0:11;
- uint64_t subone0:1;
- uint64_t reg1:11;
- uint64_t subone1:1;
- uint64_t size0:2;
- uint64_t size1:2;
+ uint64_t total_bytes:16;
+ uint64_t segs:6;
+ uint64_t dontfree:1;
+ uint64_t ignore_i:1;
+ uint64_t ipoffp1:7;
+ uint64_t gather:1;
+ uint64_t rsp:1;
+ uint64_t wqp:1;
+ uint64_t n2:1;
+ uint64_t le:1;
+ uint64_t reg0:11;
+ uint64_t subone0:1;
+ uint64_t reg1:11;
+ uint64_t subone1:1;
+ uint64_t size0:2;
+ uint64_t size1:2;
} s;
-} cvmx_pko_command_word0_t;
+};
union cvmx_ciu_timx {
uint64_t u64;
@@ -1175,16 +1178,18 @@ union cvmx_gmxx_rxx_rx_inbnd {
} s;
};
-static inline int32_t cvmx_fau_fetch_and_add32(cvmx_fau_reg_32_t reg,
+static inline int32_t cvmx_fau_fetch_and_add32(enum cvmx_fau_reg_32 reg,
int32_t value)
{
return value;
}
-static inline void cvmx_fau_atomic_add32(cvmx_fau_reg_32_t reg, int32_t value)
+static inline void cvmx_fau_atomic_add32(enum cvmx_fau_reg_32 reg,
+ int32_t value)
{ }
-static inline void cvmx_fau_atomic_write32(cvmx_fau_reg_32_t reg, int32_t value)
+static inline void cvmx_fau_atomic_write32(enum cvmx_fau_reg_32 reg,
+ int32_t value)
{ }
static inline uint64_t cvmx_scratch_read64(uint64_t address)
@@ -1195,7 +1200,7 @@ static inline uint64_t cvmx_scratch_read64(uint64_t address)
static inline void cvmx_scratch_write64(uint64_t address, uint64_t value)
{ }
-static inline int cvmx_wqe_get_grp(cvmx_wqe_t *work)
+static inline int cvmx_wqe_get_grp(struct cvmx_wqe *work)
{
return 0;
}
@@ -1264,15 +1269,15 @@ static inline cvmx_helper_interface_mode_t cvmx_helper_interface_get_mode(int
return 0;
}
-static inline cvmx_helper_link_info_t cvmx_helper_link_get(int ipd_port)
+static inline union cvmx_helper_link_info cvmx_helper_link_get(int ipd_port)
{
- cvmx_helper_link_info_t ret = { .u64 = 0 };
+ union cvmx_helper_link_info ret = { .u64 = 0 };
return ret;
}
static inline int cvmx_helper_link_set(int ipd_port,
- cvmx_helper_link_info_t link_info)
+ union cvmx_helper_link_info link_info)
{
return 0;
}
@@ -1342,14 +1347,14 @@ static inline void cvmx_pow_work_request_async(int scr_addr,
cvmx_pow_wait_t wait)
{ }
-static inline cvmx_wqe_t *cvmx_pow_work_response_async(int scr_addr)
+static inline struct cvmx_wqe *cvmx_pow_work_response_async(int scr_addr)
{
- cvmx_wqe_t *wqe = (void *)(unsigned long)scr_addr;
+ struct cvmx_wqe *wqe = (void *)(unsigned long)scr_addr;
return wqe;
}
-static inline cvmx_wqe_t *cvmx_pow_work_request_sync(cvmx_pow_wait_t wait)
+static inline struct cvmx_wqe *cvmx_pow_work_request_sync(cvmx_pow_wait_t wait)
{
return (void *)(unsigned long)wait;
}
@@ -1361,7 +1366,7 @@ static inline int cvmx_spi_restart_interface(int interface,
}
static inline void cvmx_fau_async_fetch_and_add32(uint64_t scraddr,
- cvmx_fau_reg_32_t reg,
+ enum cvmx_fau_reg_32 reg,
int32_t value)
{ }
@@ -1370,6 +1375,7 @@ static inline union cvmx_gmxx_rxx_rx_inbnd cvmx_spi4000_check_speed(
int port)
{
union cvmx_gmxx_rxx_rx_inbnd r;
+
r.u64 = 0;
return r;
}
@@ -1379,29 +1385,27 @@ static inline void cvmx_pko_send_packet_prepare(uint64_t port, uint64_t queue,
{ }
static inline cvmx_pko_status_t cvmx_pko_send_packet_finish(uint64_t port,
- uint64_t queue, cvmx_pko_command_word0_t pko_command,
+ uint64_t queue, union cvmx_pko_command_word0 pko_command,
union cvmx_buf_ptr packet, cvmx_pko_lock_t use_locking)
{
- cvmx_pko_status_t ret = 0;
-
- return ret;
+ return 0;
}
-static inline void cvmx_wqe_set_port(cvmx_wqe_t *work, int port)
+static inline void cvmx_wqe_set_port(struct cvmx_wqe *work, int port)
{ }
-static inline void cvmx_wqe_set_qos(cvmx_wqe_t *work, int qos)
+static inline void cvmx_wqe_set_qos(struct cvmx_wqe *work, int qos)
{ }
-static inline int cvmx_wqe_get_qos(cvmx_wqe_t *work)
+static inline int cvmx_wqe_get_qos(struct cvmx_wqe *work)
{
return 0;
}
-static inline void cvmx_wqe_set_grp(cvmx_wqe_t *work, int grp)
+static inline void cvmx_wqe_set_grp(struct cvmx_wqe *work, int grp)
{ }
-static inline void cvmx_pow_work_submit(cvmx_wqe_t *wqp, uint32_t tag,
+static inline void cvmx_pow_work_submit(struct cvmx_wqe *wqp, uint32_t tag,
enum cvmx_pow_tag_type tag_type,
uint64_t qos, uint64_t grp)
{ }
diff --git a/drivers/staging/olpc_dcon/Kconfig b/drivers/staging/olpc_dcon/Kconfig
index f5c716bb3413..d1a0dea09ef0 100644
--- a/drivers/staging/olpc_dcon/Kconfig
+++ b/drivers/staging/olpc_dcon/Kconfig
@@ -3,7 +3,7 @@ config FB_OLPC_DCON
tristate "One Laptop Per Child Display CONtroller support"
depends on OLPC && FB
depends on I2C
- depends on (GPIO_CS5535 || GPIO_CS5535=n)
+ depends on GPIO_CS5535 && ACPI
select BACKLIGHT_CLASS_DEVICE
help
In order to support very low power operation, the XO laptop uses a
@@ -15,22 +15,3 @@ config FB_OLPC_DCON
This controller is only available on OLPC platforms. Unless you have
one of these platforms, you will want to say 'N'.
-config FB_OLPC_DCON_1
- bool "OLPC XO-1 DCON support"
- depends on FB_OLPC_DCON && GPIO_CS5535
- default y
- help
- Enable support for the DCON in XO-1 model laptops. The kernel
- communicates with the DCON using model-specific code. If you
- have an XO-1 (or if you're unsure what model you have), you should
- say 'Y'.
-
-config FB_OLPC_DCON_1_5
- bool "OLPC XO-1.5 DCON support"
- depends on FB_OLPC_DCON && ACPI
- default y
- help
- Enable support for the DCON in XO-1.5 model laptops. The kernel
- communicates with the DCON using model-specific code. If you
- have an XO-1.5 (or if you're unsure what model you have), you
- should say 'Y'.
diff --git a/drivers/staging/olpc_dcon/Makefile b/drivers/staging/olpc_dcon/Makefile
index cb1248c5c162..734b2ce26066 100644
--- a/drivers/staging/olpc_dcon/Makefile
+++ b/drivers/staging/olpc_dcon/Makefile
@@ -1,7 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-olpc-dcon-objs += olpc_dcon.o
-olpc-dcon-$(CONFIG_FB_OLPC_DCON_1) += olpc_dcon_xo_1.o
-olpc-dcon-$(CONFIG_FB_OLPC_DCON_1_5) += olpc_dcon_xo_1_5.o
+olpc-dcon-objs += olpc_dcon.o olpc_dcon_xo_1.o olpc_dcon_xo_1_5.o
obj-$(CONFIG_FB_OLPC_DCON) += olpc-dcon.o
diff --git a/drivers/staging/olpc_dcon/TODO b/drivers/staging/olpc_dcon/TODO
index d8296f2ae872..7c263358b44a 100644
--- a/drivers/staging/olpc_dcon/TODO
+++ b/drivers/staging/olpc_dcon/TODO
@@ -8,7 +8,6 @@ TODO:
internals, but isn't properly integrated, is not the correct solution.
- see if vx855 gpio API can be made similar enough to cs5535 so we can
share more code
- - allow simultaneous XO-1 and XO-1.5 support
Please send patches to Greg Kroah-Hartman <greg@kroah.com> and
copy:
diff --git a/drivers/staging/olpc_dcon/olpc_dcon.c b/drivers/staging/olpc_dcon/olpc_dcon.c
index a254238be181..a0d6d90f4cc8 100644
--- a/drivers/staging/olpc_dcon/olpc_dcon.c
+++ b/drivers/staging/olpc_dcon/olpc_dcon.c
@@ -790,15 +790,11 @@ static struct i2c_driver dcon_driver = {
static int __init olpc_dcon_init(void)
{
-#ifdef CONFIG_FB_OLPC_DCON_1_5
/* XO-1.5 */
if (olpc_board_at_least(olpc_board(0xd0)))
pdata = &dcon_pdata_xo_1_5;
-#endif
-#ifdef CONFIG_FB_OLPC_DCON_1
- if (!pdata)
+ else
pdata = &dcon_pdata_xo_1;
-#endif
return i2c_add_driver(&dcon_driver);
}
diff --git a/drivers/staging/olpc_dcon/olpc_dcon.h b/drivers/staging/olpc_dcon/olpc_dcon.h
index 22d976a09785..41bd1360b56e 100644
--- a/drivers/staging/olpc_dcon/olpc_dcon.h
+++ b/drivers/staging/olpc_dcon/olpc_dcon.h
@@ -106,12 +106,7 @@ struct dcon_gpio {
irqreturn_t dcon_interrupt(int irq, void *id);
-#ifdef CONFIG_FB_OLPC_DCON_1
extern struct dcon_platform_data dcon_pdata_xo_1;
-#endif
-
-#ifdef CONFIG_FB_OLPC_DCON_1_5
extern struct dcon_platform_data dcon_pdata_xo_1_5;
-#endif
#endif
diff --git a/drivers/staging/pi433/Kconfig b/drivers/staging/pi433/Kconfig
index 8acde0814206..dd9e4709d1a8 100644
--- a/drivers/staging/pi433/Kconfig
+++ b/drivers/staging/pi433/Kconfig
@@ -1,17 +1,17 @@
# SPDX-License-Identifier: GPL-2.0
config PI433
- tristate "Pi433 - a 433MHz radio module for Raspberry Pi"
- depends on SPI
- help
- This option allows you to enable support for the radio module Pi433.
+ tristate "Pi433 - a 433MHz radio module for Raspberry Pi"
+ depends on SPI
+ help
+ This option allows you to enable support for the radio module Pi433.
- Pi433 is a shield that fits onto the GPIO header of a Raspberry Pi
- or compatible. It extends the Raspberry Pi with the option, to
- send and receive data in the 433MHz ISM band - for example to
- communicate between two systems without using ethernet or bluetooth
- or for control or read sockets, actors, sensors, widely available
- for low price.
+ Pi433 is a shield that fits onto the GPIO header of a Raspberry Pi
+ or compatible. It extends the Raspberry Pi with the option, to
+ send and receive data in the 433MHz ISM band - for example to
+ communicate between two systems without using ethernet or bluetooth
+ or for control or read sockets, actors, sensors, widely available
+ for low price.
- For details or the option to buy, please visit https://pi433.de/en.html
+ For details or the option to buy, please visit https://pi433.de/en.html
- If in doubt, say N here, but saying yes most probably won't hurt
+ If in doubt, say N here, but saying yes most probably won't hurt
diff --git a/drivers/staging/pi433/pi433_if.c b/drivers/staging/pi433/pi433_if.c
index 40c6f4e7632f..313d22f6210f 100644
--- a/drivers/staging/pi433/pi433_if.c
+++ b/drivers/staging/pi433/pi433_if.c
@@ -928,16 +928,6 @@ pi433_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
return 0;
}
-#ifdef CONFIG_COMPAT
-static long
-pi433_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
-{
- return pi433_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
-}
-#else
-#define pi433_compat_ioctl NULL
-#endif /* CONFIG_COMPAT */
-
/*-------------------------------------------------------------------------*/
static int pi433_open(struct inode *inode, struct file *filp)
@@ -1094,7 +1084,7 @@ static const struct file_operations pi433_fops = {
.write = pi433_write,
.read = pi433_read,
.unlocked_ioctl = pi433_ioctl,
- .compat_ioctl = pi433_compat_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.open = pi433_open,
.release = pi433_release,
.llseek = no_llseek,
diff --git a/drivers/staging/qlge/TODO b/drivers/staging/qlge/TODO
index 51c509084e80..f93f7428f5d5 100644
--- a/drivers/staging/qlge/TODO
+++ b/drivers/staging/qlge/TODO
@@ -1,6 +1,3 @@
-* reception stalls permanently (until admin intervention) if the rx buffer
- queues become empty because of allocation failures (ex. under memory
- pressure)
* commit 7c734359d350 ("qlge: Size RX buffers based on MTU.", v2.6.33-rc1)
introduced dead code in the receive routines, which should be rewritten
anyways by the admission of the author himself, see the comment above
diff --git a/drivers/staging/qlge/qlge.h b/drivers/staging/qlge/qlge.h
index ad7c5eb8a3b6..6ec7e3ce3863 100644
--- a/drivers/staging/qlge/qlge.h
+++ b/drivers/staging/qlge/qlge.h
@@ -34,8 +34,13 @@
#define NUM_TX_RING_ENTRIES 256
#define NUM_RX_RING_ENTRIES 256
-#define NUM_SMALL_BUFFERS 512
-#define NUM_LARGE_BUFFERS 512
+/* Use the same len for sbq and lbq. Note that it seems like the device might
+ * support different sizes.
+ */
+#define QLGE_BQ_SHIFT 9
+#define QLGE_BQ_LEN BIT(QLGE_BQ_SHIFT)
+#define QLGE_BQ_SIZE (QLGE_BQ_LEN * sizeof(__le64))
+
#define DB_PAGE_SIZE 4096
/* Calculate the number of (4k) pages required to
@@ -46,8 +51,8 @@
(((x * sizeof(u64)) % DB_PAGE_SIZE) ? 1 : 0))
#define RX_RING_SHADOW_SPACE (sizeof(u64) + \
- MAX_DB_PAGES_PER_BQ(NUM_SMALL_BUFFERS) * sizeof(u64) + \
- MAX_DB_PAGES_PER_BQ(NUM_LARGE_BUFFERS) * sizeof(u64))
+ MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN) * sizeof(u64) + \
+ MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN) * sizeof(u64))
#define LARGE_BUFFER_MAX_SIZE 8192
#define LARGE_BUFFER_MIN_SIZE 2048
@@ -77,6 +82,11 @@
#define LSD(x) ((u32)((u64)(x)))
#define MSD(x) ((u32)((((u64)(x)) >> 32)))
+/* In some cases, the device interprets a value of 0x0000 as 65536. These
+ * cases are marked using the following macro.
+ */
+#define QLGE_FIT16(value) ((u16)(value))
+
/* MPI test register definitions. This register
* is used for determining alternate NIC function's
* PCI->func number.
@@ -1358,25 +1368,6 @@ struct tx_ring_desc {
struct tx_ring_desc *next;
};
-struct page_chunk {
- struct page *page; /* master page */
- char *va; /* virt addr for this chunk */
- u64 map; /* mapping for master */
- unsigned int offset; /* offset for this chunk */
- unsigned int last_flag; /* flag set for last chunk in page */
-};
-
-struct bq_desc {
- union {
- struct page_chunk pg_chunk;
- struct sk_buff *skb;
- } p;
- __le64 *addr;
- u32 index;
- DEFINE_DMA_UNMAP_ADDR(mapaddr);
- DEFINE_DMA_UNMAP_LEN(maplen);
-};
-
#define QL_TXQ_IDX(qdev, skb) (smp_processor_id()%(qdev->tx_ring_count))
struct tx_ring {
@@ -1406,15 +1397,68 @@ struct tx_ring {
u64 tx_errors;
};
-/*
- * Type of inbound queue.
- */
-enum {
- DEFAULT_Q = 2, /* Handles slow queue and chip/MPI events. */
- TX_Q = 3, /* Handles outbound completions. */
- RX_Q = 4, /* Handles inbound completions. */
+struct qlge_page_chunk {
+ struct page *page;
+ void *va; /* virt addr including offset */
+ unsigned int offset;
};
+struct qlge_bq_desc {
+ union {
+ /* for large buffers */
+ struct qlge_page_chunk pg_chunk;
+ /* for small buffers */
+ struct sk_buff *skb;
+ } p;
+ dma_addr_t dma_addr;
+ /* address in ring where the buffer address is written for the device */
+ __le64 *buf_ptr;
+ u32 index;
+};
+
+/* buffer queue */
+struct qlge_bq {
+ __le64 *base;
+ dma_addr_t base_dma;
+ __le64 *base_indirect;
+ dma_addr_t base_indirect_dma;
+ struct qlge_bq_desc *queue;
+ /* prod_idx is the index of the first buffer that may NOT be used by
+ * hw, ie. one after the last. Advanced by sw.
+ */
+ void __iomem *prod_idx_db_reg;
+ /* next index where sw should refill a buffer for hw */
+ u16 next_to_use;
+ /* next index where sw expects to find a buffer filled by hw */
+ u16 next_to_clean;
+ enum {
+ QLGE_SB, /* small buffer */
+ QLGE_LB, /* large buffer */
+ } type;
+};
+
+#define QLGE_BQ_CONTAINER(bq) \
+({ \
+ typeof(bq) _bq = bq; \
+ (struct rx_ring *)((char *)_bq - (_bq->type == QLGE_SB ? \
+ offsetof(struct rx_ring, sbq) : \
+ offsetof(struct rx_ring, lbq))); \
+})
+
+/* Experience shows that the device ignores the low 4 bits of the tail index.
+ * Refill up to a x16 multiple.
+ */
+#define QLGE_BQ_ALIGN(index) ALIGN_DOWN(index, 16)
+
+#define QLGE_BQ_WRAP(index) ((index) & (QLGE_BQ_LEN - 1))
+
+#define QLGE_BQ_HW_OWNED(bq) \
+({ \
+ typeof(bq) _bq = bq; \
+ QLGE_BQ_WRAP(QLGE_BQ_ALIGN((_bq)->next_to_use) - \
+ (_bq)->next_to_clean); \
+})
+
struct rx_ring {
struct cqicb cqicb; /* The chip's completion queue init control block. */
@@ -1432,40 +1476,17 @@ struct rx_ring {
void __iomem *valid_db_reg; /* PCI doorbell mem area + 0x04 */
/* Large buffer queue elements. */
- u32 lbq_len; /* entry count */
- u32 lbq_size; /* size in bytes of queue */
- u32 lbq_buf_size;
- void *lbq_base;
- dma_addr_t lbq_base_dma;
- void *lbq_base_indirect;
- dma_addr_t lbq_base_indirect_dma;
- struct page_chunk pg_chunk; /* current page for chunks */
- struct bq_desc *lbq; /* array of control blocks */
- void __iomem *lbq_prod_idx_db_reg; /* PCI doorbell mem area + 0x18 */
- u32 lbq_prod_idx; /* current sw prod idx */
- u32 lbq_curr_idx; /* next entry we expect */
- u32 lbq_clean_idx; /* beginning of new descs */
- u32 lbq_free_cnt; /* free buffer desc cnt */
+ struct qlge_bq lbq;
+ struct qlge_page_chunk master_chunk;
+ dma_addr_t chunk_dma_addr;
/* Small buffer queue elements. */
- u32 sbq_len; /* entry count */
- u32 sbq_size; /* size in bytes of queue */
- u32 sbq_buf_size;
- void *sbq_base;
- dma_addr_t sbq_base_dma;
- void *sbq_base_indirect;
- dma_addr_t sbq_base_indirect_dma;
- struct bq_desc *sbq; /* array of control blocks */
- void __iomem *sbq_prod_idx_db_reg; /* PCI doorbell mem area + 0x1c */
- u32 sbq_prod_idx; /* current sw prod idx */
- u32 sbq_curr_idx; /* next entry we expect */
- u32 sbq_clean_idx; /* beginning of new descs */
- u32 sbq_free_cnt; /* free buffer desc cnt */
+ struct qlge_bq sbq;
/* Misc. handler elements. */
- u32 type; /* Type of queue, tx, rx. */
u32 irq; /* Which vector this ring is assigned. */
u32 cpu; /* Which CPU this should run on. */
+ struct delayed_work refill_work;
char name[IFNAMSIZ + 5];
struct napi_struct napi;
u8 reserved;
@@ -1982,11 +2003,6 @@ struct intr_context {
u32 intr_dis_mask; /* value/mask used to disable this intr */
u32 intr_read_mask; /* value/mask used to read this intr */
char name[IFNAMSIZ * 2];
- atomic_t irq_cnt; /* irq_cnt is used in single vector
- * environment. It's incremented for each
- * irq handler that is scheduled. When each
- * handler finishes it decrements irq_cnt and
- * enables interrupts if it's zero. */
irq_handler_t handler;
};
@@ -2074,7 +2090,6 @@ struct ql_adapter {
u32 port; /* Port number this adapter */
spinlock_t adapter_lock;
- spinlock_t hw_lock;
spinlock_t stats_lock;
/* PCI Bus Relative Register Addresses */
@@ -2115,6 +2130,7 @@ struct ql_adapter {
struct rx_ring rx_ring[MAX_RX_RINGS];
struct tx_ring tx_ring[MAX_TX_RINGS];
unsigned int lbq_buf_order;
+ u32 lbq_buf_size;
int rx_csum;
u32 default_rx_queue;
@@ -2235,7 +2251,6 @@ void ql_mpi_reset_work(struct work_struct *work);
void ql_mpi_core_to_log(struct work_struct *work);
int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 ebit);
void ql_queue_asic_error(struct ql_adapter *qdev);
-u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr);
void ql_set_ethtool_ops(struct net_device *ndev);
int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data);
void ql_mpi_idc_work(struct work_struct *work);
diff --git a/drivers/staging/qlge/qlge_dbg.c b/drivers/staging/qlge/qlge_dbg.c
index 31389ab8bdf7..83f34ca43aa4 100644
--- a/drivers/staging/qlge/qlge_dbg.c
+++ b/drivers/staging/qlge/qlge_dbg.c
@@ -7,7 +7,7 @@
/* Read a NIC register from the alternate function. */
static u32 ql_read_other_func_reg(struct ql_adapter *qdev,
- u32 reg)
+ u32 reg)
{
u32 register_to_read;
u32 reg_val;
@@ -26,7 +26,7 @@ static u32 ql_read_other_func_reg(struct ql_adapter *qdev,
/* Write a NIC register from the alternate function. */
static int ql_write_other_func_reg(struct ql_adapter *qdev,
- u32 reg, u32 reg_val)
+ u32 reg, u32 reg_val)
{
u32 register_to_read;
int status = 0;
@@ -41,7 +41,7 @@ static int ql_write_other_func_reg(struct ql_adapter *qdev,
}
static int ql_wait_other_func_reg_rdy(struct ql_adapter *qdev, u32 reg,
- u32 bit, u32 err_bit)
+ u32 bit, u32 err_bit)
{
u32 temp;
int count = 10;
@@ -61,22 +61,22 @@ static int ql_wait_other_func_reg_rdy(struct ql_adapter *qdev, u32 reg,
}
static int ql_read_other_func_serdes_reg(struct ql_adapter *qdev, u32 reg,
- u32 *data)
+ u32 *data)
{
int status;
/* wait for reg to come ready */
status = ql_wait_other_func_reg_rdy(qdev, XG_SERDES_ADDR / 4,
- XG_SERDES_ADDR_RDY, 0);
+ XG_SERDES_ADDR_RDY, 0);
if (status)
goto exit;
/* set up for reg read */
- ql_write_other_func_reg(qdev, XG_SERDES_ADDR/4, reg | PROC_ADDR_R);
+ ql_write_other_func_reg(qdev, XG_SERDES_ADDR / 4, reg | PROC_ADDR_R);
/* wait for reg to come ready */
status = ql_wait_other_func_reg_rdy(qdev, XG_SERDES_ADDR / 4,
- XG_SERDES_ADDR_RDY, 0);
+ XG_SERDES_ADDR_RDY, 0);
if (status)
goto exit;
@@ -111,8 +111,8 @@ exit:
}
static void ql_get_both_serdes(struct ql_adapter *qdev, u32 addr,
- u32 *direct_ptr, u32 *indirect_ptr,
- unsigned int direct_valid, unsigned int indirect_valid)
+ u32 *direct_ptr, u32 *indirect_ptr,
+ bool direct_valid, bool indirect_valid)
{
unsigned int status;
@@ -133,16 +133,15 @@ static void ql_get_both_serdes(struct ql_adapter *qdev, u32 addr,
}
static int ql_get_serdes_regs(struct ql_adapter *qdev,
- struct ql_mpi_coredump *mpi_coredump)
+ struct ql_mpi_coredump *mpi_coredump)
{
int status;
- unsigned int xfi_direct_valid, xfi_indirect_valid, xaui_direct_valid;
- unsigned int xaui_indirect_valid, i;
+ bool xfi_direct_valid = false, xfi_indirect_valid = false;
+ bool xaui_direct_valid = true, xaui_indirect_valid = true;
+ unsigned int i;
u32 *direct_ptr, temp;
u32 *indirect_ptr;
- xfi_direct_valid = xfi_indirect_valid = 0;
- xaui_direct_valid = xaui_indirect_valid = 1;
/* The XAUI needs to be read out per port */
status = ql_read_other_func_serdes_reg(qdev,
@@ -152,7 +151,7 @@ static int ql_get_serdes_regs(struct ql_adapter *qdev,
if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) ==
XG_SERDES_ADDR_XAUI_PWR_DOWN)
- xaui_indirect_valid = 0;
+ xaui_indirect_valid = false;
status = ql_read_serdes_reg(qdev, XG_SERDES_XAUI_HSS_PCS_START, &temp);
@@ -161,7 +160,7 @@ static int ql_get_serdes_regs(struct ql_adapter *qdev,
if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) ==
XG_SERDES_ADDR_XAUI_PWR_DOWN)
- xaui_direct_valid = 0;
+ xaui_direct_valid = false;
/*
* XFI register is shared so only need to read one
@@ -176,18 +175,18 @@ static int ql_get_serdes_regs(struct ql_adapter *qdev,
/* now see if i'm NIC 1 or NIC 2 */
if (qdev->func & 1)
/* I'm NIC 2, so the indirect (NIC1) xfi is up. */
- xfi_indirect_valid = 1;
+ xfi_indirect_valid = true;
else
- xfi_direct_valid = 1;
+ xfi_direct_valid = true;
}
if ((temp & XG_SERDES_ADDR_XFI2_PWR_UP) ==
XG_SERDES_ADDR_XFI2_PWR_UP) {
/* now see if i'm NIC 1 or NIC 2 */
if (qdev->func & 1)
/* I'm NIC 2, so the indirect (NIC1) xfi is up. */
- xfi_direct_valid = 1;
+ xfi_direct_valid = true;
else
- xfi_indirect_valid = 1;
+ xfi_indirect_valid = true;
}
/* Get XAUI_AN register block. */
@@ -203,7 +202,7 @@ static int ql_get_serdes_regs(struct ql_adapter *qdev,
for (i = 0; i <= 0x000000034; i += 4, direct_ptr++, indirect_ptr++)
ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
- xaui_direct_valid, xaui_indirect_valid);
+ xaui_direct_valid, xaui_indirect_valid);
/* Get XAUI_HSS_PCS register block. */
if (qdev->func & 1) {
@@ -220,7 +219,7 @@ static int ql_get_serdes_regs(struct ql_adapter *qdev,
for (i = 0x800; i <= 0x880; i += 4, direct_ptr++, indirect_ptr++)
ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
- xaui_direct_valid, xaui_indirect_valid);
+ xaui_direct_valid, xaui_indirect_valid);
/* Get XAUI_XFI_AN register block. */
if (qdev->func & 1) {
@@ -233,7 +232,7 @@ static int ql_get_serdes_regs(struct ql_adapter *qdev,
for (i = 0x1000; i <= 0x1034; i += 4, direct_ptr++, indirect_ptr++)
ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
- xfi_direct_valid, xfi_indirect_valid);
+ xfi_direct_valid, xfi_indirect_valid);
/* Get XAUI_XFI_TRAIN register block. */
if (qdev->func & 1) {
@@ -248,7 +247,7 @@ static int ql_get_serdes_regs(struct ql_adapter *qdev,
for (i = 0x1050; i <= 0x107c; i += 4, direct_ptr++, indirect_ptr++)
ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
- xfi_direct_valid, xfi_indirect_valid);
+ xfi_direct_valid, xfi_indirect_valid);
/* Get XAUI_XFI_HSS_PCS register block. */
if (qdev->func & 1) {
@@ -265,7 +264,7 @@ static int ql_get_serdes_regs(struct ql_adapter *qdev,
for (i = 0x1800; i <= 0x1838; i += 4, direct_ptr++, indirect_ptr++)
ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
- xfi_direct_valid, xfi_indirect_valid);
+ xfi_direct_valid, xfi_indirect_valid);
/* Get XAUI_XFI_HSS_TX register block. */
if (qdev->func & 1) {
@@ -280,7 +279,7 @@ static int ql_get_serdes_regs(struct ql_adapter *qdev,
}
for (i = 0x1c00; i <= 0x1c1f; i++, direct_ptr++, indirect_ptr++)
ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
- xfi_direct_valid, xfi_indirect_valid);
+ xfi_direct_valid, xfi_indirect_valid);
/* Get XAUI_XFI_HSS_RX register block. */
if (qdev->func & 1) {
@@ -296,7 +295,7 @@ static int ql_get_serdes_regs(struct ql_adapter *qdev,
for (i = 0x1c40; i <= 0x1c5f; i++, direct_ptr++, indirect_ptr++)
ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
- xfi_direct_valid, xfi_indirect_valid);
+ xfi_direct_valid, xfi_indirect_valid);
/* Get XAUI_XFI_HSS_PLL register block. */
@@ -313,18 +312,18 @@ static int ql_get_serdes_regs(struct ql_adapter *qdev,
}
for (i = 0x1e00; i <= 0x1e1f; i++, direct_ptr++, indirect_ptr++)
ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
- xfi_direct_valid, xfi_indirect_valid);
+ xfi_direct_valid, xfi_indirect_valid);
return 0;
}
static int ql_read_other_func_xgmac_reg(struct ql_adapter *qdev, u32 reg,
- u32 *data)
+ u32 *data)
{
int status = 0;
/* wait for reg to come ready */
status = ql_wait_other_func_reg_rdy(qdev, XGMAC_ADDR / 4,
- XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
+ XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
if (status)
goto exit;
@@ -333,7 +332,7 @@ static int ql_read_other_func_xgmac_reg(struct ql_adapter *qdev, u32 reg,
/* wait for reg to come ready */
status = ql_wait_other_func_reg_rdy(qdev, XGMAC_ADDR / 4,
- XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
+ XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
if (status)
goto exit;
@@ -347,17 +346,17 @@ exit:
* skipping unused locations.
*/
static int ql_get_xgmac_regs(struct ql_adapter *qdev, u32 *buf,
- unsigned int other_function)
+ unsigned int other_function)
{
int status = 0;
int i;
for (i = PAUSE_SRC_LO; i < XGMAC_REGISTER_END; i += 4, buf++) {
/* We're reading 400 xgmac registers, but we filter out
- * serveral locations that are non-responsive to reads.
+ * several locations that are non-responsive to reads.
*/
if ((i == 0x00000114) ||
- (i == 0x00000118) ||
+ (i == 0x00000118) ||
(i == 0x0000013c) ||
(i == 0x00000140) ||
(i > 0x00000150 && i < 0x000001fc) ||
@@ -389,7 +388,6 @@ static int ql_get_xgmac_regs(struct ql_adapter *qdev, u32 *buf,
static int ql_get_ets_regs(struct ql_adapter *qdev, u32 *buf)
{
- int status = 0;
int i;
for (i = 0; i < 8; i++, buf++) {
@@ -402,7 +400,7 @@ static int ql_get_ets_regs(struct ql_adapter *qdev, u32 *buf)
*buf = ql_read32(qdev, CNA_ETS);
}
- return status;
+ return 0;
}
static void ql_get_intr_states(struct ql_adapter *qdev, u32 *buf)
@@ -411,7 +409,7 @@ static void ql_get_intr_states(struct ql_adapter *qdev, u32 *buf)
for (i = 0; i < qdev->rx_ring_count; i++, buf++) {
ql_write32(qdev, INTR_EN,
- qdev->intr_context[i].intr_read_mask);
+ qdev->intr_context[i].intr_read_mask);
*buf = ql_read32(qdev, INTR_EN);
}
}
@@ -427,7 +425,7 @@ static int ql_get_cam_entries(struct ql_adapter *qdev, u32 *buf)
for (i = 0; i < 16; i++) {
status = ql_get_mac_addr_reg(qdev,
- MAC_ADDR_TYPE_CAM_MAC, i, value);
+ MAC_ADDR_TYPE_CAM_MAC, i, value);
if (status) {
netif_err(qdev, drv, qdev->ndev,
"Failed read of mac index register\n");
@@ -439,7 +437,7 @@ static int ql_get_cam_entries(struct ql_adapter *qdev, u32 *buf)
}
for (i = 0; i < 32; i++) {
status = ql_get_mac_addr_reg(qdev,
- MAC_ADDR_TYPE_MULTI_MAC, i, value);
+ MAC_ADDR_TYPE_MULTI_MAC, i, value);
if (status) {
netif_err(qdev, drv, qdev->ndev,
"Failed read of mac index register\n");
@@ -498,7 +496,7 @@ end:
/* Read the MPI Processor core registers */
static int ql_get_mpi_regs(struct ql_adapter *qdev, u32 *buf,
- u32 offset, u32 count)
+ u32 offset, u32 count)
{
int i, status = 0;
for (i = 0; i < count; i++, buf++) {
@@ -511,7 +509,7 @@ static int ql_get_mpi_regs(struct ql_adapter *qdev, u32 *buf,
/* Read the ASIC probe dump */
static unsigned int *ql_get_probe(struct ql_adapter *qdev, u32 clock,
- u32 valid, u32 *buf)
+ u32 valid, u32 *buf)
{
u32 module, mux_sel, probe, lo_val, hi_val;
@@ -546,13 +544,13 @@ static int ql_get_probe_dump(struct ql_adapter *qdev, unsigned int *buf)
/* First we have to enable the probe mux */
ql_write_mpi_reg(qdev, MPI_TEST_FUNC_PRB_CTL, MPI_TEST_FUNC_PRB_EN);
buf = ql_get_probe(qdev, PRB_MX_ADDR_SYS_CLOCK,
- PRB_MX_ADDR_VALID_SYS_MOD, buf);
+ PRB_MX_ADDR_VALID_SYS_MOD, buf);
buf = ql_get_probe(qdev, PRB_MX_ADDR_PCI_CLOCK,
- PRB_MX_ADDR_VALID_PCI_MOD, buf);
+ PRB_MX_ADDR_VALID_PCI_MOD, buf);
buf = ql_get_probe(qdev, PRB_MX_ADDR_XGM_CLOCK,
- PRB_MX_ADDR_VALID_XGM_MOD, buf);
+ PRB_MX_ADDR_VALID_XGM_MOD, buf);
buf = ql_get_probe(qdev, PRB_MX_ADDR_FC_CLOCK,
- PRB_MX_ADDR_VALID_FC_MOD, buf);
+ PRB_MX_ADDR_VALID_FC_MOD, buf);
return 0;
}
@@ -667,7 +665,7 @@ static void ql_get_mac_protocol_registers(struct ql_adapter *qdev, u32 *buf)
result_index = 0;
while ((result_index & MAC_ADDR_MR) == 0) {
result_index = ql_read32(qdev,
- MAC_ADDR_IDX);
+ MAC_ADDR_IDX);
}
result_data = ql_read32(qdev, MAC_ADDR_DATA);
*buf = result_index;
@@ -741,7 +739,7 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
/* Insert the global header */
memset(&(mpi_coredump->mpi_global_header), 0,
- sizeof(struct mpi_coredump_global_header));
+ sizeof(struct mpi_coredump_global_header));
mpi_coredump->mpi_global_header.cookie = MPI_COREDUMP_COOKIE;
mpi_coredump->mpi_global_header.headerSize =
sizeof(struct mpi_coredump_global_header);
@@ -752,23 +750,23 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
/* Get generic NIC reg dump */
ql_build_coredump_seg_header(&mpi_coredump->nic_regs_seg_hdr,
- NIC1_CONTROL_SEG_NUM,
+ NIC1_CONTROL_SEG_NUM,
sizeof(struct mpi_coredump_segment_header) +
sizeof(mpi_coredump->nic_regs), "NIC1 Registers");
ql_build_coredump_seg_header(&mpi_coredump->nic2_regs_seg_hdr,
- NIC2_CONTROL_SEG_NUM,
+ NIC2_CONTROL_SEG_NUM,
sizeof(struct mpi_coredump_segment_header) +
sizeof(mpi_coredump->nic2_regs), "NIC2 Registers");
/* Get XGMac registers. (Segment 18, Rev C. step 21) */
ql_build_coredump_seg_header(&mpi_coredump->xgmac1_seg_hdr,
- NIC1_XGMAC_SEG_NUM,
+ NIC1_XGMAC_SEG_NUM,
sizeof(struct mpi_coredump_segment_header) +
sizeof(mpi_coredump->xgmac1), "NIC1 XGMac Registers");
ql_build_coredump_seg_header(&mpi_coredump->xgmac2_seg_hdr,
- NIC2_XGMAC_SEG_NUM,
+ NIC2_XGMAC_SEG_NUM,
sizeof(struct mpi_coredump_segment_header) +
sizeof(mpi_coredump->xgmac2), "NIC2 XGMac Registers");
@@ -799,97 +797,97 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
/* Rev C. Step 20a */
ql_build_coredump_seg_header(&mpi_coredump->xaui_an_hdr,
- XAUI_AN_SEG_NUM,
+ XAUI_AN_SEG_NUM,
sizeof(struct mpi_coredump_segment_header) +
sizeof(mpi_coredump->serdes_xaui_an),
"XAUI AN Registers");
/* Rev C. Step 20b */
ql_build_coredump_seg_header(&mpi_coredump->xaui_hss_pcs_hdr,
- XAUI_HSS_PCS_SEG_NUM,
+ XAUI_HSS_PCS_SEG_NUM,
sizeof(struct mpi_coredump_segment_header) +
sizeof(mpi_coredump->serdes_xaui_hss_pcs),
"XAUI HSS PCS Registers");
ql_build_coredump_seg_header(&mpi_coredump->xfi_an_hdr, XFI_AN_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header) +
+ sizeof(struct mpi_coredump_segment_header) +
sizeof(mpi_coredump->serdes_xfi_an),
"XFI AN Registers");
ql_build_coredump_seg_header(&mpi_coredump->xfi_train_hdr,
- XFI_TRAIN_SEG_NUM,
+ XFI_TRAIN_SEG_NUM,
sizeof(struct mpi_coredump_segment_header) +
sizeof(mpi_coredump->serdes_xfi_train),
"XFI TRAIN Registers");
ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_pcs_hdr,
- XFI_HSS_PCS_SEG_NUM,
+ XFI_HSS_PCS_SEG_NUM,
sizeof(struct mpi_coredump_segment_header) +
sizeof(mpi_coredump->serdes_xfi_hss_pcs),
"XFI HSS PCS Registers");
ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_tx_hdr,
- XFI_HSS_TX_SEG_NUM,
+ XFI_HSS_TX_SEG_NUM,
sizeof(struct mpi_coredump_segment_header) +
sizeof(mpi_coredump->serdes_xfi_hss_tx),
"XFI HSS TX Registers");
ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_rx_hdr,
- XFI_HSS_RX_SEG_NUM,
+ XFI_HSS_RX_SEG_NUM,
sizeof(struct mpi_coredump_segment_header) +
sizeof(mpi_coredump->serdes_xfi_hss_rx),
"XFI HSS RX Registers");
ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_pll_hdr,
- XFI_HSS_PLL_SEG_NUM,
+ XFI_HSS_PLL_SEG_NUM,
sizeof(struct mpi_coredump_segment_header) +
sizeof(mpi_coredump->serdes_xfi_hss_pll),
"XFI HSS PLL Registers");
ql_build_coredump_seg_header(&mpi_coredump->xaui2_an_hdr,
- XAUI2_AN_SEG_NUM,
+ XAUI2_AN_SEG_NUM,
sizeof(struct mpi_coredump_segment_header) +
sizeof(mpi_coredump->serdes2_xaui_an),
"XAUI2 AN Registers");
ql_build_coredump_seg_header(&mpi_coredump->xaui2_hss_pcs_hdr,
- XAUI2_HSS_PCS_SEG_NUM,
+ XAUI2_HSS_PCS_SEG_NUM,
sizeof(struct mpi_coredump_segment_header) +
sizeof(mpi_coredump->serdes2_xaui_hss_pcs),
"XAUI2 HSS PCS Registers");
ql_build_coredump_seg_header(&mpi_coredump->xfi2_an_hdr,
- XFI2_AN_SEG_NUM,
+ XFI2_AN_SEG_NUM,
sizeof(struct mpi_coredump_segment_header) +
sizeof(mpi_coredump->serdes2_xfi_an),
"XFI2 AN Registers");
ql_build_coredump_seg_header(&mpi_coredump->xfi2_train_hdr,
- XFI2_TRAIN_SEG_NUM,
+ XFI2_TRAIN_SEG_NUM,
sizeof(struct mpi_coredump_segment_header) +
sizeof(mpi_coredump->serdes2_xfi_train),
"XFI2 TRAIN Registers");
ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_pcs_hdr,
- XFI2_HSS_PCS_SEG_NUM,
+ XFI2_HSS_PCS_SEG_NUM,
sizeof(struct mpi_coredump_segment_header) +
sizeof(mpi_coredump->serdes2_xfi_hss_pcs),
"XFI2 HSS PCS Registers");
ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_tx_hdr,
- XFI2_HSS_TX_SEG_NUM,
+ XFI2_HSS_TX_SEG_NUM,
sizeof(struct mpi_coredump_segment_header) +
sizeof(mpi_coredump->serdes2_xfi_hss_tx),
"XFI2 HSS TX Registers");
ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_rx_hdr,
- XFI2_HSS_RX_SEG_NUM,
+ XFI2_HSS_RX_SEG_NUM,
sizeof(struct mpi_coredump_segment_header) +
sizeof(mpi_coredump->serdes2_xfi_hss_rx),
"XFI2 HSS RX Registers");
ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_pll_hdr,
- XFI2_HSS_PLL_SEG_NUM,
+ XFI2_HSS_PLL_SEG_NUM,
sizeof(struct mpi_coredump_segment_header) +
sizeof(mpi_coredump->serdes2_xfi_hss_pll),
"XFI2 HSS PLL Registers");
@@ -903,7 +901,7 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
}
ql_build_coredump_seg_header(&mpi_coredump->core_regs_seg_hdr,
- CORE_SEG_NUM,
+ CORE_SEG_NUM,
sizeof(mpi_coredump->core_regs_seg_hdr) +
sizeof(mpi_coredump->mpi_core_regs) +
sizeof(mpi_coredump->mpi_core_sh_regs),
@@ -922,7 +920,7 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
/* Get the Test Logic Registers */
ql_build_coredump_seg_header(&mpi_coredump->test_logic_regs_seg_hdr,
- TEST_LOGIC_SEG_NUM,
+ TEST_LOGIC_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->test_logic_regs),
"Test Logic Regs");
@@ -933,7 +931,7 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
/* Get the RMII Registers */
ql_build_coredump_seg_header(&mpi_coredump->rmii_regs_seg_hdr,
- RMII_SEG_NUM,
+ RMII_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->rmii_regs),
"RMII Registers");
@@ -944,7 +942,7 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
/* Get the FCMAC1 Registers */
ql_build_coredump_seg_header(&mpi_coredump->fcmac1_regs_seg_hdr,
- FCMAC1_SEG_NUM,
+ FCMAC1_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->fcmac1_regs),
"FCMAC1 Registers");
@@ -956,7 +954,7 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
/* Get the FCMAC2 Registers */
ql_build_coredump_seg_header(&mpi_coredump->fcmac2_regs_seg_hdr,
- FCMAC2_SEG_NUM,
+ FCMAC2_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->fcmac2_regs),
"FCMAC2 Registers");
@@ -968,7 +966,7 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
/* Get the FC1 MBX Registers */
ql_build_coredump_seg_header(&mpi_coredump->fc1_mbx_regs_seg_hdr,
- FC1_MBOX_SEG_NUM,
+ FC1_MBOX_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->fc1_mbx_regs),
"FC1 MBox Regs");
@@ -979,7 +977,7 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
/* Get the IDE Registers */
ql_build_coredump_seg_header(&mpi_coredump->ide_regs_seg_hdr,
- IDE_SEG_NUM,
+ IDE_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->ide_regs),
"IDE Registers");
@@ -990,7 +988,7 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
/* Get the NIC1 MBX Registers */
ql_build_coredump_seg_header(&mpi_coredump->nic1_mbx_regs_seg_hdr,
- NIC1_MBOX_SEG_NUM,
+ NIC1_MBOX_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->nic1_mbx_regs),
"NIC1 MBox Regs");
@@ -1001,7 +999,7 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
/* Get the SMBus Registers */
ql_build_coredump_seg_header(&mpi_coredump->smbus_regs_seg_hdr,
- SMBUS_SEG_NUM,
+ SMBUS_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->smbus_regs),
"SMBus Registers");
@@ -1012,7 +1010,7 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
/* Get the FC2 MBX Registers */
ql_build_coredump_seg_header(&mpi_coredump->fc2_mbx_regs_seg_hdr,
- FC2_MBOX_SEG_NUM,
+ FC2_MBOX_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->fc2_mbx_regs),
"FC2 MBox Regs");
@@ -1023,7 +1021,7 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
/* Get the NIC2 MBX Registers */
ql_build_coredump_seg_header(&mpi_coredump->nic2_mbx_regs_seg_hdr,
- NIC2_MBOX_SEG_NUM,
+ NIC2_MBOX_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->nic2_mbx_regs),
"NIC2 MBox Regs");
@@ -1034,7 +1032,7 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
/* Get the I2C Registers */
ql_build_coredump_seg_header(&mpi_coredump->i2c_regs_seg_hdr,
- I2C_SEG_NUM,
+ I2C_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->i2c_regs),
"I2C Registers");
@@ -1045,7 +1043,7 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
/* Get the MEMC Registers */
ql_build_coredump_seg_header(&mpi_coredump->memc_regs_seg_hdr,
- MEMC_SEG_NUM,
+ MEMC_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->memc_regs),
"MEMC Registers");
@@ -1056,7 +1054,7 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
/* Get the PBus Registers */
ql_build_coredump_seg_header(&mpi_coredump->pbus_regs_seg_hdr,
- PBUS_SEG_NUM,
+ PBUS_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->pbus_regs),
"PBUS Registers");
@@ -1067,7 +1065,7 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
/* Get the MDE Registers */
ql_build_coredump_seg_header(&mpi_coredump->mde_regs_seg_hdr,
- MDE_SEG_NUM,
+ MDE_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->mde_regs),
"MDE Registers");
@@ -1077,7 +1075,7 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
goto err;
ql_build_coredump_seg_header(&mpi_coredump->misc_nic_seg_hdr,
- MISC_NIC_INFO_SEG_NUM,
+ MISC_NIC_INFO_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->misc_nic_info),
"MISC NIC INFO");
@@ -1089,14 +1087,14 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
/* Segment 31 */
/* Get indexed register values. */
ql_build_coredump_seg_header(&mpi_coredump->intr_states_seg_hdr,
- INTR_STATES_SEG_NUM,
+ INTR_STATES_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->intr_states),
"INTR States");
ql_get_intr_states(qdev, &mpi_coredump->intr_states[0]);
ql_build_coredump_seg_header(&mpi_coredump->cam_entries_seg_hdr,
- CAM_ENTRIES_SEG_NUM,
+ CAM_ENTRIES_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->cam_entries),
"CAM Entries");
@@ -1105,18 +1103,18 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
goto err;
ql_build_coredump_seg_header(&mpi_coredump->nic_routing_words_seg_hdr,
- ROUTING_WORDS_SEG_NUM,
+ ROUTING_WORDS_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->nic_routing_words),
"Routing Words");
status = ql_get_routing_entries(qdev,
- &mpi_coredump->nic_routing_words[0]);
+ &mpi_coredump->nic_routing_words[0]);
if (status)
goto err;
/* Segment 34 (Rev C. step 23) */
ql_build_coredump_seg_header(&mpi_coredump->ets_seg_hdr,
- ETS_SEG_NUM,
+ ETS_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->ets),
"ETS Registers");
@@ -1125,24 +1123,24 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
goto err;
ql_build_coredump_seg_header(&mpi_coredump->probe_dump_seg_hdr,
- PROBE_DUMP_SEG_NUM,
+ PROBE_DUMP_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->probe_dump),
"Probe Dump");
ql_get_probe_dump(qdev, &mpi_coredump->probe_dump[0]);
ql_build_coredump_seg_header(&mpi_coredump->routing_reg_seg_hdr,
- ROUTING_INDEX_SEG_NUM,
+ ROUTING_INDEX_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->routing_regs),
"Routing Regs");
status = ql_get_routing_index_registers(qdev,
- &mpi_coredump->routing_regs[0]);
+ &mpi_coredump->routing_regs[0]);
if (status)
goto err;
ql_build_coredump_seg_header(&mpi_coredump->mac_prot_reg_seg_hdr,
- MAC_PROTOCOL_SEG_NUM,
+ MAC_PROTOCOL_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->mac_prot_regs),
"MAC Prot Regs");
@@ -1150,7 +1148,7 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
/* Get the semaphore registers for all 5 functions */
ql_build_coredump_seg_header(&mpi_coredump->sem_regs_seg_hdr,
- SEM_REGS_SEG_NUM,
+ SEM_REGS_SEG_NUM,
sizeof(struct mpi_coredump_segment_header) +
sizeof(mpi_coredump->sem_regs), "Sem Registers");
@@ -1176,12 +1174,12 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
}
ql_build_coredump_seg_header(&mpi_coredump->code_ram_seg_hdr,
- WCS_RAM_SEG_NUM,
+ WCS_RAM_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->code_ram),
"WCS RAM");
status = ql_dump_risc_ram_area(qdev, &mpi_coredump->code_ram[0],
- CODE_RAM_ADDR, CODE_RAM_CNT);
+ CODE_RAM_ADDR, CODE_RAM_CNT);
if (status) {
netif_err(qdev, drv, qdev->ndev,
"Failed Dump of CODE RAM. Status = 0x%.08x\n",
@@ -1191,12 +1189,12 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
/* Insert the segment header */
ql_build_coredump_seg_header(&mpi_coredump->memc_ram_seg_hdr,
- MEMC_RAM_SEG_NUM,
+ MEMC_RAM_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->memc_ram),
"MEMC RAM");
status = ql_dump_risc_ram_area(qdev, &mpi_coredump->memc_ram[0],
- MEMC_RAM_ADDR, MEMC_RAM_CNT);
+ MEMC_RAM_ADDR, MEMC_RAM_CNT);
if (status) {
netif_err(qdev, drv, qdev->ndev,
"Failed Dump of MEMC RAM. Status = 0x%.08x\n",
@@ -1231,7 +1229,7 @@ static void ql_gen_reg_dump(struct ql_adapter *qdev,
memset(&(mpi_coredump->mpi_global_header), 0,
- sizeof(struct mpi_coredump_global_header));
+ sizeof(struct mpi_coredump_global_header));
mpi_coredump->mpi_global_header.cookie = MPI_COREDUMP_COOKIE;
mpi_coredump->mpi_global_header.headerSize =
sizeof(struct mpi_coredump_global_header);
@@ -1243,7 +1241,7 @@ static void ql_gen_reg_dump(struct ql_adapter *qdev,
/* segment 16 */
ql_build_coredump_seg_header(&mpi_coredump->misc_nic_seg_hdr,
- MISC_NIC_INFO_SEG_NUM,
+ MISC_NIC_INFO_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->misc_nic_info),
"MISC NIC INFO");
@@ -1254,7 +1252,7 @@ static void ql_gen_reg_dump(struct ql_adapter *qdev,
/* Segment 16, Rev C. Step 18 */
ql_build_coredump_seg_header(&mpi_coredump->nic_regs_seg_hdr,
- NIC1_CONTROL_SEG_NUM,
+ NIC1_CONTROL_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->nic_regs),
"NIC Registers");
@@ -1265,14 +1263,14 @@ static void ql_gen_reg_dump(struct ql_adapter *qdev,
/* Segment 31 */
/* Get indexed register values. */
ql_build_coredump_seg_header(&mpi_coredump->intr_states_seg_hdr,
- INTR_STATES_SEG_NUM,
+ INTR_STATES_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->intr_states),
"INTR States");
ql_get_intr_states(qdev, &mpi_coredump->intr_states[0]);
ql_build_coredump_seg_header(&mpi_coredump->cam_entries_seg_hdr,
- CAM_ENTRIES_SEG_NUM,
+ CAM_ENTRIES_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->cam_entries),
"CAM Entries");
@@ -1281,18 +1279,18 @@ static void ql_gen_reg_dump(struct ql_adapter *qdev,
return;
ql_build_coredump_seg_header(&mpi_coredump->nic_routing_words_seg_hdr,
- ROUTING_WORDS_SEG_NUM,
+ ROUTING_WORDS_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->nic_routing_words),
"Routing Words");
status = ql_get_routing_entries(qdev,
- &mpi_coredump->nic_routing_words[0]);
+ &mpi_coredump->nic_routing_words[0]);
if (status)
return;
/* Segment 34 (Rev C. step 23) */
ql_build_coredump_seg_header(&mpi_coredump->ets_seg_hdr,
- ETS_SEG_NUM,
+ ETS_SEG_NUM,
sizeof(struct mpi_coredump_segment_header)
+ sizeof(mpi_coredump->ets),
"ETS Registers");
@@ -1630,6 +1628,7 @@ void ql_dump_qdev(struct ql_adapter *qdev)
DUMP_QDEV_FIELD(qdev, "0x%08x", xg_sem_mask);
DUMP_QDEV_FIELD(qdev, "0x%08x", port_link_up);
DUMP_QDEV_FIELD(qdev, "0x%08x", port_init);
+ DUMP_QDEV_FIELD(qdev, "%u", lbq_buf_size);
}
#endif
@@ -1650,7 +1649,7 @@ void ql_dump_wqicb(struct wqicb *wqicb)
void ql_dump_tx_ring(struct tx_ring *tx_ring)
{
- if (tx_ring == NULL)
+ if (!tx_ring)
return;
pr_err("===================== Dumping tx_ring %d ===============\n",
tx_ring->wq_id);
@@ -1730,16 +1729,24 @@ void ql_dump_cqicb(struct cqicb *cqicb)
le16_to_cpu(cqicb->sbq_len));
}
+static const char *qlge_rx_ring_type_name(struct rx_ring *rx_ring)
+{
+ struct ql_adapter *qdev = rx_ring->qdev;
+
+ if (rx_ring->cq_id < qdev->rss_ring_count)
+ return "RX COMPLETION";
+ else
+ return "TX COMPLETION";
+};
+
void ql_dump_rx_ring(struct rx_ring *rx_ring)
{
- if (rx_ring == NULL)
+ if (!rx_ring)
return;
pr_err("===================== Dumping rx_ring %d ===============\n",
rx_ring->cq_id);
- pr_err("Dumping rx_ring %d, type = %s%s%s\n",
- rx_ring->cq_id, rx_ring->type == DEFAULT_Q ? "DEFAULT" : "",
- rx_ring->type == TX_Q ? "OUTBOUND COMPLETIONS" : "",
- rx_ring->type == RX_Q ? "INBOUND_COMPLETIONS" : "");
+ pr_err("Dumping rx_ring %d, type = %s\n", rx_ring->cq_id,
+ qlge_rx_ring_type_name(rx_ring));
pr_err("rx_ring->cqicb = %p\n", &rx_ring->cqicb);
pr_err("rx_ring->cq_base = %p\n", rx_ring->cq_base);
pr_err("rx_ring->cq_base_dma = %llx\n",
@@ -1758,41 +1765,33 @@ void ql_dump_rx_ring(struct rx_ring *rx_ring)
pr_err("rx_ring->curr_entry = %p\n", rx_ring->curr_entry);
pr_err("rx_ring->valid_db_reg = %p\n", rx_ring->valid_db_reg);
- pr_err("rx_ring->lbq_base = %p\n", rx_ring->lbq_base);
- pr_err("rx_ring->lbq_base_dma = %llx\n",
- (unsigned long long) rx_ring->lbq_base_dma);
- pr_err("rx_ring->lbq_base_indirect = %p\n",
- rx_ring->lbq_base_indirect);
- pr_err("rx_ring->lbq_base_indirect_dma = %llx\n",
- (unsigned long long) rx_ring->lbq_base_indirect_dma);
- pr_err("rx_ring->lbq = %p\n", rx_ring->lbq);
- pr_err("rx_ring->lbq_len = %d\n", rx_ring->lbq_len);
- pr_err("rx_ring->lbq_size = %d\n", rx_ring->lbq_size);
- pr_err("rx_ring->lbq_prod_idx_db_reg = %p\n",
- rx_ring->lbq_prod_idx_db_reg);
- pr_err("rx_ring->lbq_prod_idx = %d\n", rx_ring->lbq_prod_idx);
- pr_err("rx_ring->lbq_curr_idx = %d\n", rx_ring->lbq_curr_idx);
+ pr_err("rx_ring->lbq.base = %p\n", rx_ring->lbq.base);
+ pr_err("rx_ring->lbq.base_dma = %llx\n",
+ (unsigned long long)rx_ring->lbq.base_dma);
+ pr_err("rx_ring->lbq.base_indirect = %p\n",
+ rx_ring->lbq.base_indirect);
+ pr_err("rx_ring->lbq.base_indirect_dma = %llx\n",
+ (unsigned long long)rx_ring->lbq.base_indirect_dma);
+ pr_err("rx_ring->lbq = %p\n", rx_ring->lbq.queue);
+ pr_err("rx_ring->lbq.prod_idx_db_reg = %p\n",
+ rx_ring->lbq.prod_idx_db_reg);
+ pr_err("rx_ring->lbq.next_to_use = %d\n", rx_ring->lbq.next_to_use);
+ pr_err("rx_ring->lbq.next_to_clean = %d\n", rx_ring->lbq.next_to_clean);
pr_err("rx_ring->lbq_clean_idx = %d\n", rx_ring->lbq_clean_idx);
pr_err("rx_ring->lbq_free_cnt = %d\n", rx_ring->lbq_free_cnt);
- pr_err("rx_ring->lbq_buf_size = %d\n", rx_ring->lbq_buf_size);
-
- pr_err("rx_ring->sbq_base = %p\n", rx_ring->sbq_base);
- pr_err("rx_ring->sbq_base_dma = %llx\n",
- (unsigned long long) rx_ring->sbq_base_dma);
- pr_err("rx_ring->sbq_base_indirect = %p\n",
- rx_ring->sbq_base_indirect);
- pr_err("rx_ring->sbq_base_indirect_dma = %llx\n",
- (unsigned long long) rx_ring->sbq_base_indirect_dma);
- pr_err("rx_ring->sbq = %p\n", rx_ring->sbq);
- pr_err("rx_ring->sbq_len = %d\n", rx_ring->sbq_len);
- pr_err("rx_ring->sbq_size = %d\n", rx_ring->sbq_size);
- pr_err("rx_ring->sbq_prod_idx_db_reg addr = %p\n",
- rx_ring->sbq_prod_idx_db_reg);
- pr_err("rx_ring->sbq_prod_idx = %d\n", rx_ring->sbq_prod_idx);
- pr_err("rx_ring->sbq_curr_idx = %d\n", rx_ring->sbq_curr_idx);
- pr_err("rx_ring->sbq_clean_idx = %d\n", rx_ring->sbq_clean_idx);
- pr_err("rx_ring->sbq_free_cnt = %d\n", rx_ring->sbq_free_cnt);
- pr_err("rx_ring->sbq_buf_size = %d\n", rx_ring->sbq_buf_size);
+
+ pr_err("rx_ring->sbq.base = %p\n", rx_ring->sbq.base);
+ pr_err("rx_ring->sbq.base_dma = %llx\n",
+ (unsigned long long)rx_ring->sbq.base_dma);
+ pr_err("rx_ring->sbq.base_indirect = %p\n",
+ rx_ring->sbq.base_indirect);
+ pr_err("rx_ring->sbq.base_indirect_dma = %llx\n",
+ (unsigned long long)rx_ring->sbq.base_indirect_dma);
+ pr_err("rx_ring->sbq = %p\n", rx_ring->sbq.queue);
+ pr_err("rx_ring->sbq.prod_idx_db_reg addr = %p\n",
+ rx_ring->sbq.prod_idx_db_reg);
+ pr_err("rx_ring->sbq.next_to_use = %d\n", rx_ring->sbq.next_to_use);
+ pr_err("rx_ring->sbq.next_to_clean = %d\n", rx_ring->sbq.next_to_clean);
pr_err("rx_ring->cq_id = %d\n", rx_ring->cq_id);
pr_err("rx_ring->irq = %d\n", rx_ring->irq);
pr_err("rx_ring->cpu = %d\n", rx_ring->cpu);
@@ -1806,7 +1805,7 @@ void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id)
pr_err("%s: Enter\n", __func__);
ptr = kmalloc(size, GFP_ATOMIC);
- if (ptr == NULL)
+ if (!ptr)
return;
if (ql_write_cfg(qdev, ptr, size, bit, q_id)) {
@@ -1992,7 +1991,7 @@ void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp)
le16_to_cpu(ib_mac_rsp->vlan_id));
pr_err("flags4 = %s%s%s\n",
- ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV ? "HV " : "",
+ ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV ? "HV " : "",
ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS ? "HS " : "",
ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HL ? "HL " : "");
diff --git a/drivers/staging/qlge/qlge_main.c b/drivers/staging/qlge/qlge_main.c
index 6cae33072496..6ad4515311f7 100644
--- a/drivers/staging/qlge/qlge_main.c
+++ b/drivers/staging/qlge/qlge_main.c
@@ -167,9 +167,9 @@ void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
{
u32 temp;
- int count = UDELAY_COUNT;
+ int count;
- while (count) {
+ for (count = 0; count < UDELAY_COUNT; count++) {
temp = ql_read32(qdev, reg);
/* check for errors */
@@ -181,7 +181,6 @@ int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
} else if (temp & bit)
return 0;
udelay(UDELAY_DELAY);
- count--;
}
netif_alert(qdev, probe, qdev->ndev,
"Timed out waiting for reg %x to come ready.\n", reg);
@@ -193,17 +192,16 @@ int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
*/
static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
{
- int count = UDELAY_COUNT;
+ int count;
u32 temp;
- while (count) {
+ for (count = 0; count < UDELAY_COUNT; count++) {
temp = ql_read32(qdev, CFG);
if (temp & CFG_LE)
return -EIO;
if (!(temp & bit))
return 0;
udelay(UDELAY_DELAY);
- count--;
}
return -ETIMEDOUT;
}
@@ -625,75 +623,26 @@ static void ql_disable_interrupts(struct ql_adapter *qdev)
ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
}
-/* If we're running with multiple MSI-X vectors then we enable on the fly.
- * Otherwise, we may have multiple outstanding workers and don't want to
- * enable until the last one finishes. In this case, the irq_cnt gets
- * incremented every time we queue a worker and decremented every time
- * a worker finishes. Once it hits zero we enable the interrupt.
- */
-u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
+static void ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
{
- u32 var = 0;
- unsigned long hw_flags = 0;
- struct intr_context *ctx = qdev->intr_context + intr;
-
- if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
- /* Always enable if we're MSIX multi interrupts and
- * it's not the default (zeroeth) interrupt.
- */
- ql_write32(qdev, INTR_EN,
- ctx->intr_en_mask);
- var = ql_read32(qdev, STS);
- return var;
- }
+ struct intr_context *ctx = &qdev->intr_context[intr];
- spin_lock_irqsave(&qdev->hw_lock, hw_flags);
- if (atomic_dec_and_test(&ctx->irq_cnt)) {
- ql_write32(qdev, INTR_EN,
- ctx->intr_en_mask);
- var = ql_read32(qdev, STS);
- }
- spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
- return var;
+ ql_write32(qdev, INTR_EN, ctx->intr_en_mask);
}
-static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
+static void ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
{
- u32 var = 0;
- struct intr_context *ctx;
-
- /* HW disables for us if we're MSIX multi interrupts and
- * it's not the default (zeroeth) interrupt.
- */
- if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
- return 0;
+ struct intr_context *ctx = &qdev->intr_context[intr];
- ctx = qdev->intr_context + intr;
- spin_lock(&qdev->hw_lock);
- if (!atomic_read(&ctx->irq_cnt)) {
- ql_write32(qdev, INTR_EN,
- ctx->intr_dis_mask);
- var = ql_read32(qdev, STS);
- }
- atomic_inc(&ctx->irq_cnt);
- spin_unlock(&qdev->hw_lock);
- return var;
+ ql_write32(qdev, INTR_EN, ctx->intr_dis_mask);
}
static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
{
int i;
- for (i = 0; i < qdev->intr_count; i++) {
- /* The enable call does a atomic_dec_and_test
- * and enables only if the result is zero.
- * So we precharge it here.
- */
- if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
- i == 0))
- atomic_set(&qdev->intr_context[i].irq_cnt, 1);
- ql_enable_completion_interrupt(qdev, i);
- }
+ for (i = 0; i < qdev->intr_count; i++)
+ ql_enable_completion_interrupt(qdev, i);
}
static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
@@ -1027,48 +976,32 @@ static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
return PAGE_SIZE << qdev->lbq_buf_order;
}
-/* Get the next large buffer. */
-static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
+static struct qlge_bq_desc *qlge_get_curr_buf(struct qlge_bq *bq)
{
- struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
- rx_ring->lbq_curr_idx++;
- if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
- rx_ring->lbq_curr_idx = 0;
- rx_ring->lbq_free_cnt++;
- return lbq_desc;
+ struct qlge_bq_desc *bq_desc;
+
+ bq_desc = &bq->queue[bq->next_to_clean];
+ bq->next_to_clean = QLGE_BQ_WRAP(bq->next_to_clean + 1);
+
+ return bq_desc;
}
-static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
- struct rx_ring *rx_ring)
+static struct qlge_bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
+ struct rx_ring *rx_ring)
{
- struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring);
+ struct qlge_bq_desc *lbq_desc = qlge_get_curr_buf(&rx_ring->lbq);
- pci_dma_sync_single_for_cpu(qdev->pdev,
- dma_unmap_addr(lbq_desc, mapaddr),
- rx_ring->lbq_buf_size,
- PCI_DMA_FROMDEVICE);
+ pci_dma_sync_single_for_cpu(qdev->pdev, lbq_desc->dma_addr,
+ qdev->lbq_buf_size, PCI_DMA_FROMDEVICE);
- /* If it's the last chunk of our master page then
- * we unmap it.
- */
- if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size)
- == ql_lbq_block_size(qdev))
- pci_unmap_page(qdev->pdev,
- lbq_desc->p.pg_chunk.map,
- ql_lbq_block_size(qdev),
- PCI_DMA_FROMDEVICE);
- return lbq_desc;
-}
+ if ((lbq_desc->p.pg_chunk.offset + qdev->lbq_buf_size) ==
+ ql_lbq_block_size(qdev)) {
+ /* last chunk of the master page */
+ pci_unmap_page(qdev->pdev, lbq_desc->dma_addr,
+ ql_lbq_block_size(qdev), PCI_DMA_FROMDEVICE);
+ }
-/* Get the next small buffer. */
-static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
-{
- struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
- rx_ring->sbq_curr_idx++;
- if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
- rx_ring->sbq_curr_idx = 0;
- rx_ring->sbq_free_cnt++;
- return sbq_desc;
+ return lbq_desc;
}
/* Update an rx ring index. */
@@ -1087,178 +1020,192 @@ static void ql_write_cq_idx(struct rx_ring *rx_ring)
ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
}
-static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
- struct bq_desc *lbq_desc)
+static const char * const bq_type_name[] = {
+ [QLGE_SB] = "sbq",
+ [QLGE_LB] = "lbq",
+};
+
+/* return 0 or negative error */
+static int qlge_refill_sb(struct rx_ring *rx_ring,
+ struct qlge_bq_desc *sbq_desc, gfp_t gfp)
{
- if (!rx_ring->pg_chunk.page) {
- u64 map;
- rx_ring->pg_chunk.page = alloc_pages(__GFP_COMP | GFP_ATOMIC,
- qdev->lbq_buf_order);
- if (unlikely(!rx_ring->pg_chunk.page)) {
- netif_err(qdev, drv, qdev->ndev,
- "page allocation failed.\n");
+ struct ql_adapter *qdev = rx_ring->qdev;
+ struct sk_buff *skb;
+
+ if (sbq_desc->p.skb)
+ return 0;
+
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "ring %u sbq: getting new skb for index %d.\n",
+ rx_ring->cq_id, sbq_desc->index);
+
+ skb = __netdev_alloc_skb(qdev->ndev, SMALL_BUFFER_SIZE, gfp);
+ if (!skb)
+ return -ENOMEM;
+ skb_reserve(skb, QLGE_SB_PAD);
+
+ sbq_desc->dma_addr = pci_map_single(qdev->pdev, skb->data,
+ SMALL_BUF_MAP_SIZE,
+ PCI_DMA_FROMDEVICE);
+ if (pci_dma_mapping_error(qdev->pdev, sbq_desc->dma_addr)) {
+ netif_err(qdev, ifup, qdev->ndev, "PCI mapping failed.\n");
+ dev_kfree_skb_any(skb);
+ return -EIO;
+ }
+ *sbq_desc->buf_ptr = cpu_to_le64(sbq_desc->dma_addr);
+
+ sbq_desc->p.skb = skb;
+ return 0;
+}
+
+/* return 0 or negative error */
+static int qlge_refill_lb(struct rx_ring *rx_ring,
+ struct qlge_bq_desc *lbq_desc, gfp_t gfp)
+{
+ struct ql_adapter *qdev = rx_ring->qdev;
+ struct qlge_page_chunk *master_chunk = &rx_ring->master_chunk;
+
+ if (!master_chunk->page) {
+ struct page *page;
+ dma_addr_t dma_addr;
+
+ page = alloc_pages(gfp | __GFP_COMP, qdev->lbq_buf_order);
+ if (unlikely(!page))
return -ENOMEM;
- }
- rx_ring->pg_chunk.offset = 0;
- map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page,
- 0, ql_lbq_block_size(qdev),
+ dma_addr = pci_map_page(qdev->pdev, page, 0,
+ ql_lbq_block_size(qdev),
PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(qdev->pdev, map)) {
- __free_pages(rx_ring->pg_chunk.page,
- qdev->lbq_buf_order);
- rx_ring->pg_chunk.page = NULL;
+ if (pci_dma_mapping_error(qdev->pdev, dma_addr)) {
+ __free_pages(page, qdev->lbq_buf_order);
netif_err(qdev, drv, qdev->ndev,
"PCI mapping failed.\n");
- return -ENOMEM;
+ return -EIO;
}
- rx_ring->pg_chunk.map = map;
- rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page);
+ master_chunk->page = page;
+ master_chunk->va = page_address(page);
+ master_chunk->offset = 0;
+ rx_ring->chunk_dma_addr = dma_addr;
}
- /* Copy the current master pg_chunk info
- * to the current descriptor.
- */
- lbq_desc->p.pg_chunk = rx_ring->pg_chunk;
+ lbq_desc->p.pg_chunk = *master_chunk;
+ lbq_desc->dma_addr = rx_ring->chunk_dma_addr;
+ *lbq_desc->buf_ptr = cpu_to_le64(lbq_desc->dma_addr +
+ lbq_desc->p.pg_chunk.offset);
/* Adjust the master page chunk for next
* buffer get.
*/
- rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size;
- if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) {
- rx_ring->pg_chunk.page = NULL;
- lbq_desc->p.pg_chunk.last_flag = 1;
+ master_chunk->offset += qdev->lbq_buf_size;
+ if (master_chunk->offset == ql_lbq_block_size(qdev)) {
+ master_chunk->page = NULL;
} else {
- rx_ring->pg_chunk.va += rx_ring->lbq_buf_size;
- get_page(rx_ring->pg_chunk.page);
- lbq_desc->p.pg_chunk.last_flag = 0;
+ master_chunk->va += qdev->lbq_buf_size;
+ get_page(master_chunk->page);
}
+
return 0;
}
-/* Process (refill) a large buffer queue. */
-static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
+
+/* return 0 or negative error */
+static int qlge_refill_bq(struct qlge_bq *bq, gfp_t gfp)
{
- u32 clean_idx = rx_ring->lbq_clean_idx;
- u32 start_idx = clean_idx;
- struct bq_desc *lbq_desc;
- u64 map;
+ struct rx_ring *rx_ring = QLGE_BQ_CONTAINER(bq);
+ struct ql_adapter *qdev = rx_ring->qdev;
+ struct qlge_bq_desc *bq_desc;
+ int refill_count;
+ int retval;
int i;
- while (rx_ring->lbq_free_cnt > 32) {
- for (i = (rx_ring->lbq_clean_idx % 16); i < 16; i++) {
- netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
- "lbq: try cleaning clean_idx = %d.\n",
- clean_idx);
- lbq_desc = &rx_ring->lbq[clean_idx];
- if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
- rx_ring->lbq_clean_idx = clean_idx;
- netif_err(qdev, ifup, qdev->ndev,
- "Could not get a page chunk, i=%d, clean_idx =%d .\n",
- i, clean_idx);
- return;
- }
+ refill_count = QLGE_BQ_WRAP(QLGE_BQ_ALIGN(bq->next_to_clean - 1) -
+ bq->next_to_use);
+ if (!refill_count)
+ return 0;
+
+ i = bq->next_to_use;
+ bq_desc = &bq->queue[i];
+ i -= QLGE_BQ_LEN;
+ do {
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "ring %u %s: try cleaning idx %d\n",
+ rx_ring->cq_id, bq_type_name[bq->type], i);
- map = lbq_desc->p.pg_chunk.map +
- lbq_desc->p.pg_chunk.offset;
- dma_unmap_addr_set(lbq_desc, mapaddr, map);
- dma_unmap_len_set(lbq_desc, maplen,
- rx_ring->lbq_buf_size);
- *lbq_desc->addr = cpu_to_le64(map);
-
- pci_dma_sync_single_for_device(qdev->pdev, map,
- rx_ring->lbq_buf_size,
- PCI_DMA_FROMDEVICE);
- clean_idx++;
- if (clean_idx == rx_ring->lbq_len)
- clean_idx = 0;
+ if (bq->type == QLGE_SB)
+ retval = qlge_refill_sb(rx_ring, bq_desc, gfp);
+ else
+ retval = qlge_refill_lb(rx_ring, bq_desc, gfp);
+ if (retval < 0) {
+ netif_err(qdev, ifup, qdev->ndev,
+ "ring %u %s: Could not get a page chunk, idx %d\n",
+ rx_ring->cq_id, bq_type_name[bq->type], i);
+ break;
}
- rx_ring->lbq_clean_idx = clean_idx;
- rx_ring->lbq_prod_idx += 16;
- if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
- rx_ring->lbq_prod_idx = 0;
- rx_ring->lbq_free_cnt -= 16;
- }
+ bq_desc++;
+ i++;
+ if (unlikely(!i)) {
+ bq_desc = &bq->queue[0];
+ i -= QLGE_BQ_LEN;
+ }
+ refill_count--;
+ } while (refill_count);
+ i += QLGE_BQ_LEN;
- if (start_idx != clean_idx) {
- netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
- "lbq: updating prod idx = %d.\n",
- rx_ring->lbq_prod_idx);
- ql_write_db_reg(rx_ring->lbq_prod_idx,
- rx_ring->lbq_prod_idx_db_reg);
+ if (bq->next_to_use != i) {
+ if (QLGE_BQ_ALIGN(bq->next_to_use) != QLGE_BQ_ALIGN(i)) {
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "ring %u %s: updating prod idx = %d.\n",
+ rx_ring->cq_id, bq_type_name[bq->type],
+ i);
+ ql_write_db_reg(i, bq->prod_idx_db_reg);
+ }
+ bq->next_to_use = i;
}
+
+ return retval;
}
-/* Process (refill) a small buffer queue. */
-static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
+static void ql_update_buffer_queues(struct rx_ring *rx_ring, gfp_t gfp,
+ unsigned long delay)
{
- u32 clean_idx = rx_ring->sbq_clean_idx;
- u32 start_idx = clean_idx;
- struct bq_desc *sbq_desc;
- u64 map;
- int i;
-
- while (rx_ring->sbq_free_cnt > 16) {
- for (i = (rx_ring->sbq_clean_idx % 16); i < 16; i++) {
- sbq_desc = &rx_ring->sbq[clean_idx];
- netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
- "sbq: try cleaning clean_idx = %d.\n",
- clean_idx);
- if (sbq_desc->p.skb == NULL) {
- netif_printk(qdev, rx_status, KERN_DEBUG,
- qdev->ndev,
- "sbq: getting new skb for index %d.\n",
- sbq_desc->index);
- sbq_desc->p.skb =
- netdev_alloc_skb(qdev->ndev,
- SMALL_BUFFER_SIZE);
- if (sbq_desc->p.skb == NULL) {
- rx_ring->sbq_clean_idx = clean_idx;
- return;
- }
- skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
- map = pci_map_single(qdev->pdev,
- sbq_desc->p.skb->data,
- rx_ring->sbq_buf_size,
- PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(qdev->pdev, map)) {
- netif_err(qdev, ifup, qdev->ndev,
- "PCI mapping failed.\n");
- rx_ring->sbq_clean_idx = clean_idx;
- dev_kfree_skb_any(sbq_desc->p.skb);
- sbq_desc->p.skb = NULL;
- return;
- }
- dma_unmap_addr_set(sbq_desc, mapaddr, map);
- dma_unmap_len_set(sbq_desc, maplen,
- rx_ring->sbq_buf_size);
- *sbq_desc->addr = cpu_to_le64(map);
- }
+ bool sbq_fail, lbq_fail;
- clean_idx++;
- if (clean_idx == rx_ring->sbq_len)
- clean_idx = 0;
- }
- rx_ring->sbq_clean_idx = clean_idx;
- rx_ring->sbq_prod_idx += 16;
- if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
- rx_ring->sbq_prod_idx = 0;
- rx_ring->sbq_free_cnt -= 16;
- }
+ sbq_fail = !!qlge_refill_bq(&rx_ring->sbq, gfp);
+ lbq_fail = !!qlge_refill_bq(&rx_ring->lbq, gfp);
- if (start_idx != clean_idx) {
- netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
- "sbq: updating prod idx = %d.\n",
- rx_ring->sbq_prod_idx);
- ql_write_db_reg(rx_ring->sbq_prod_idx,
- rx_ring->sbq_prod_idx_db_reg);
- }
+ /* Minimum number of buffers needed to be able to receive at least one
+ * frame of any format:
+ * sbq: 1 for header + 1 for data
+ * lbq: mtu 9000 / lb size
+ * Below this, the queue might stall.
+ */
+ if ((sbq_fail && QLGE_BQ_HW_OWNED(&rx_ring->sbq) < 2) ||
+ (lbq_fail && QLGE_BQ_HW_OWNED(&rx_ring->lbq) <
+ DIV_ROUND_UP(9000, LARGE_BUFFER_MAX_SIZE)))
+ /* Allocations can take a long time in certain cases (ex.
+ * reclaim). Therefore, use a workqueue for long-running
+ * work items.
+ */
+ queue_delayed_work_on(smp_processor_id(), system_long_wq,
+ &rx_ring->refill_work, delay);
}
-static void ql_update_buffer_queues(struct ql_adapter *qdev,
- struct rx_ring *rx_ring)
+static void qlge_slow_refill(struct work_struct *work)
{
- ql_update_sbq(qdev, rx_ring);
- ql_update_lbq(qdev, rx_ring);
+ struct rx_ring *rx_ring = container_of(work, struct rx_ring,
+ refill_work.work);
+ struct napi_struct *napi = &rx_ring->napi;
+
+ napi_disable(napi);
+ ql_update_buffer_queues(rx_ring, GFP_KERNEL, HZ / 2);
+ napi_enable(napi);
+
+ local_bh_disable();
+ /* napi_disable() might have prevented incomplete napi work from being
+ * rescheduled.
+ */
+ napi_schedule(napi);
+ /* trigger softirq processing */
+ local_bh_enable();
}
/* Unmaps tx buffers. Can be called from send() if a pci mapping
@@ -1495,7 +1442,7 @@ static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
u16 vlan_id)
{
struct sk_buff *skb;
- struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
+ struct qlge_bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
struct napi_struct *napi = &rx_ring->napi;
/* Frame error, so drop the packet. */
@@ -1544,7 +1491,7 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev,
struct net_device *ndev = qdev->ndev;
struct sk_buff *skb = NULL;
void *addr;
- struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
+ struct qlge_bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
struct napi_struct *napi = &rx_ring->napi;
size_t hlen = ETH_HLEN;
@@ -1634,31 +1581,24 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
u32 length,
u16 vlan_id)
{
+ struct qlge_bq_desc *sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
struct net_device *ndev = qdev->ndev;
- struct sk_buff *skb = NULL;
- struct sk_buff *new_skb = NULL;
- struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring);
+ struct sk_buff *skb, *new_skb;
skb = sbq_desc->p.skb;
/* Allocate new_skb and copy */
new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
- if (new_skb == NULL) {
+ if (!new_skb) {
rx_ring->rx_dropped++;
return;
}
skb_reserve(new_skb, NET_IP_ALIGN);
- pci_dma_sync_single_for_cpu(qdev->pdev,
- dma_unmap_addr(sbq_desc, mapaddr),
- dma_unmap_len(sbq_desc, maplen),
- PCI_DMA_FROMDEVICE);
+ pci_dma_sync_single_for_cpu(qdev->pdev, sbq_desc->dma_addr,
+ SMALL_BUF_MAP_SIZE, PCI_DMA_FROMDEVICE);
skb_put_data(new_skb, skb->data, length);
- pci_dma_sync_single_for_device(qdev->pdev,
- dma_unmap_addr(sbq_desc, mapaddr),
- dma_unmap_len(sbq_desc, maplen),
- PCI_DMA_FROMDEVICE);
skb = new_skb;
/* Frame error, so drop the packet. */
@@ -1759,11 +1699,10 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
struct rx_ring *rx_ring,
struct ib_mac_iocb_rsp *ib_mac_rsp)
{
- struct bq_desc *lbq_desc;
- struct bq_desc *sbq_desc;
- struct sk_buff *skb = NULL;
u32 length = le32_to_cpu(ib_mac_rsp->data_len);
u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
+ struct qlge_bq_desc *lbq_desc, *sbq_desc;
+ struct sk_buff *skb = NULL;
size_t hlen = ETH_HLEN;
/*
@@ -1776,11 +1715,9 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
/*
* Headers fit nicely into a small buffer.
*/
- sbq_desc = ql_get_curr_sbuf(rx_ring);
- pci_unmap_single(qdev->pdev,
- dma_unmap_addr(sbq_desc, mapaddr),
- dma_unmap_len(sbq_desc, maplen),
- PCI_DMA_FROMDEVICE);
+ sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
+ pci_unmap_single(qdev->pdev, sbq_desc->dma_addr,
+ SMALL_BUF_MAP_SIZE, PCI_DMA_FROMDEVICE);
skb = sbq_desc->p.skb;
ql_realign_skb(skb, hdr_len);
skb_put(skb, hdr_len);
@@ -1808,35 +1745,22 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
* from the "data" small buffer to the "header" small
* buffer.
*/
- sbq_desc = ql_get_curr_sbuf(rx_ring);
+ sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
pci_dma_sync_single_for_cpu(qdev->pdev,
- dma_unmap_addr
- (sbq_desc, mapaddr),
- dma_unmap_len
- (sbq_desc, maplen),
+ sbq_desc->dma_addr,
+ SMALL_BUF_MAP_SIZE,
PCI_DMA_FROMDEVICE);
skb_put_data(skb, sbq_desc->p.skb->data, length);
- pci_dma_sync_single_for_device(qdev->pdev,
- dma_unmap_addr
- (sbq_desc,
- mapaddr),
- dma_unmap_len
- (sbq_desc,
- maplen),
- PCI_DMA_FROMDEVICE);
} else {
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"%d bytes in a single small buffer.\n",
length);
- sbq_desc = ql_get_curr_sbuf(rx_ring);
+ sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
skb = sbq_desc->p.skb;
ql_realign_skb(skb, length);
skb_put(skb, length);
- pci_unmap_single(qdev->pdev,
- dma_unmap_addr(sbq_desc,
- mapaddr),
- dma_unmap_len(sbq_desc,
- maplen),
+ pci_unmap_single(qdev->pdev, sbq_desc->dma_addr,
+ SMALL_BUF_MAP_SIZE,
PCI_DMA_FROMDEVICE);
sbq_desc->p.skb = NULL;
}
@@ -1868,15 +1792,13 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
*/
lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
skb = netdev_alloc_skb(qdev->ndev, length);
- if (skb == NULL) {
+ if (!skb) {
netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev,
"No skb available, drop the packet.\n");
return NULL;
}
- pci_unmap_page(qdev->pdev,
- dma_unmap_addr(lbq_desc,
- mapaddr),
- dma_unmap_len(lbq_desc, maplen),
+ pci_unmap_page(qdev->pdev, lbq_desc->dma_addr,
+ qdev->lbq_buf_size,
PCI_DMA_FROMDEVICE);
skb_reserve(skb, NET_IP_ALIGN);
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
@@ -1907,11 +1829,9 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
* eventually be in trouble.
*/
int size, i = 0;
- sbq_desc = ql_get_curr_sbuf(rx_ring);
- pci_unmap_single(qdev->pdev,
- dma_unmap_addr(sbq_desc, mapaddr),
- dma_unmap_len(sbq_desc, maplen),
- PCI_DMA_FROMDEVICE);
+ sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
+ pci_unmap_single(qdev->pdev, sbq_desc->dma_addr,
+ SMALL_BUF_MAP_SIZE, PCI_DMA_FROMDEVICE);
if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
/*
* This is an non TCP/UDP IP frame, so
@@ -1931,8 +1851,7 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
}
do {
lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
- size = (length < rx_ring->lbq_buf_size) ? length :
- rx_ring->lbq_buf_size;
+ size = min(length, qdev->lbq_buf_size);
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"Adding page %d to skb for %d bytes.\n",
@@ -2286,7 +2205,7 @@ static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
if (count == budget)
break;
}
- ql_update_buffer_queues(qdev, rx_ring);
+ ql_update_buffer_queues(rx_ring, GFP_ATOMIC, 0);
ql_write_cq_idx(rx_ring);
return count;
}
@@ -2500,21 +2419,22 @@ static irqreturn_t qlge_isr(int irq, void *dev_id)
u32 var;
int work_done = 0;
- spin_lock(&qdev->hw_lock);
- if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
- netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
- "Shared Interrupt, Not ours!\n");
- spin_unlock(&qdev->hw_lock);
- return IRQ_NONE;
- }
- spin_unlock(&qdev->hw_lock);
+ /* Experience shows that when using INTx interrupts, interrupts must
+ * be masked manually.
+ * When using MSI mode, INTR_EN_EN must be explicitly disabled
+ * (even though it is auto-masked), otherwise a later command to
+ * enable it is not effective.
+ */
+ if (!test_bit(QL_MSIX_ENABLED, &qdev->flags))
+ ql_disable_completion_interrupt(qdev, 0);
- var = ql_disable_completion_interrupt(qdev, intr_context->intr);
+ var = ql_read32(qdev, STS);
/*
* Check for fatal error.
*/
if (var & STS_FE) {
+ ql_disable_completion_interrupt(qdev, 0);
ql_queue_asic_error(qdev);
netdev_err(qdev->ndev, "Got fatal error, STS = %x.\n", var);
var = ql_read32(qdev, ERR_STS);
@@ -2534,7 +2454,6 @@ static irqreturn_t qlge_isr(int irq, void *dev_id)
*/
netif_err(qdev, intr, qdev->ndev,
"Got MPI processor interrupt.\n");
- ql_disable_completion_interrupt(qdev, intr_context->intr);
ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
queue_delayed_work_on(smp_processor_id(),
qdev->workqueue, &qdev->mpi_work, 0);
@@ -2550,11 +2469,18 @@ static irqreturn_t qlge_isr(int irq, void *dev_id)
if (var & intr_context->irq_mask) {
netif_info(qdev, intr, qdev->ndev,
"Waking handler for rx_ring[0].\n");
- ql_disable_completion_interrupt(qdev, intr_context->intr);
napi_schedule(&rx_ring->napi);
work_done++;
+ } else {
+ /* Experience shows that the device sometimes signals an
+ * interrupt but no work is scheduled from this function.
+ * Nevertheless, the interrupt is auto-masked. Therefore, we
+ * systematically re-enable the interrupt if we didn't
+ * schedule napi.
+ */
+ ql_enable_completion_interrupt(qdev, 0);
}
- ql_enable_completion_interrupt(qdev, intr_context->intr);
+
return work_done ? IRQ_HANDLED : IRQ_NONE;
}
@@ -2737,7 +2663,7 @@ static int ql_alloc_shadow_space(struct ql_adapter *qdev)
qdev->rx_ring_shadow_reg_area =
pci_zalloc_consistent(qdev->pdev, PAGE_SIZE,
&qdev->rx_ring_shadow_reg_dma);
- if (qdev->rx_ring_shadow_reg_area == NULL) {
+ if (!qdev->rx_ring_shadow_reg_area) {
netif_err(qdev, ifup, qdev->ndev,
"Allocation of RX shadow space failed.\n");
return -ENOMEM;
@@ -2746,7 +2672,7 @@ static int ql_alloc_shadow_space(struct ql_adapter *qdev)
qdev->tx_ring_shadow_reg_area =
pci_zalloc_consistent(qdev->pdev, PAGE_SIZE,
&qdev->tx_ring_shadow_reg_dma);
- if (qdev->tx_ring_shadow_reg_area == NULL) {
+ if (!qdev->tx_ring_shadow_reg_area) {
netif_err(qdev, ifup, qdev->ndev,
"Allocation of TX shadow space failed.\n");
goto err_wqp_sh_area;
@@ -2798,14 +2724,14 @@ static int ql_alloc_tx_resources(struct ql_adapter *qdev,
pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
&tx_ring->wq_base_dma);
- if ((tx_ring->wq_base == NULL) ||
+ if (!tx_ring->wq_base ||
tx_ring->wq_base_dma & WQ_ADDR_ALIGN)
goto pci_alloc_err;
tx_ring->q =
kmalloc_array(tx_ring->wq_len, sizeof(struct tx_ring_desc),
GFP_KERNEL);
- if (tx_ring->q == NULL)
+ if (!tx_ring->q)
goto err;
return 0;
@@ -2820,54 +2746,46 @@ pci_alloc_err:
static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
{
- struct bq_desc *lbq_desc;
+ struct qlge_bq *lbq = &rx_ring->lbq;
+ unsigned int last_offset;
- uint32_t curr_idx, clean_idx;
+ last_offset = ql_lbq_block_size(qdev) - qdev->lbq_buf_size;
+ while (lbq->next_to_clean != lbq->next_to_use) {
+ struct qlge_bq_desc *lbq_desc =
+ &lbq->queue[lbq->next_to_clean];
- curr_idx = rx_ring->lbq_curr_idx;
- clean_idx = rx_ring->lbq_clean_idx;
- while (curr_idx != clean_idx) {
- lbq_desc = &rx_ring->lbq[curr_idx];
-
- if (lbq_desc->p.pg_chunk.last_flag) {
- pci_unmap_page(qdev->pdev,
- lbq_desc->p.pg_chunk.map,
- ql_lbq_block_size(qdev),
+ if (lbq_desc->p.pg_chunk.offset == last_offset)
+ pci_unmap_page(qdev->pdev, lbq_desc->dma_addr,
+ ql_lbq_block_size(qdev),
PCI_DMA_FROMDEVICE);
- lbq_desc->p.pg_chunk.last_flag = 0;
- }
-
put_page(lbq_desc->p.pg_chunk.page);
- lbq_desc->p.pg_chunk.page = NULL;
-
- if (++curr_idx == rx_ring->lbq_len)
- curr_idx = 0;
+ lbq->next_to_clean = QLGE_BQ_WRAP(lbq->next_to_clean + 1);
}
- if (rx_ring->pg_chunk.page) {
- pci_unmap_page(qdev->pdev, rx_ring->pg_chunk.map,
- ql_lbq_block_size(qdev), PCI_DMA_FROMDEVICE);
- put_page(rx_ring->pg_chunk.page);
- rx_ring->pg_chunk.page = NULL;
+
+ if (rx_ring->master_chunk.page) {
+ pci_unmap_page(qdev->pdev, rx_ring->chunk_dma_addr,
+ ql_lbq_block_size(qdev), PCI_DMA_FROMDEVICE);
+ put_page(rx_ring->master_chunk.page);
+ rx_ring->master_chunk.page = NULL;
}
}
static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
{
int i;
- struct bq_desc *sbq_desc;
- for (i = 0; i < rx_ring->sbq_len; i++) {
- sbq_desc = &rx_ring->sbq[i];
- if (sbq_desc == NULL) {
+ for (i = 0; i < QLGE_BQ_LEN; i++) {
+ struct qlge_bq_desc *sbq_desc = &rx_ring->sbq.queue[i];
+
+ if (!sbq_desc) {
netif_err(qdev, ifup, qdev->ndev,
"sbq_desc %d is NULL.\n", i);
return;
}
if (sbq_desc->p.skb) {
- pci_unmap_single(qdev->pdev,
- dma_unmap_addr(sbq_desc, mapaddr),
- dma_unmap_len(sbq_desc, maplen),
+ pci_unmap_single(qdev->pdev, sbq_desc->dma_addr,
+ SMALL_BUF_MAP_SIZE,
PCI_DMA_FROMDEVICE);
dev_kfree_skb(sbq_desc->p.skb);
sbq_desc->p.skb = NULL;
@@ -2881,89 +2799,83 @@ static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring
static void ql_free_rx_buffers(struct ql_adapter *qdev)
{
int i;
- struct rx_ring *rx_ring;
for (i = 0; i < qdev->rx_ring_count; i++) {
- rx_ring = &qdev->rx_ring[i];
- if (rx_ring->lbq)
+ struct rx_ring *rx_ring = &qdev->rx_ring[i];
+
+ if (rx_ring->lbq.queue)
ql_free_lbq_buffers(qdev, rx_ring);
- if (rx_ring->sbq)
+ if (rx_ring->sbq.queue)
ql_free_sbq_buffers(qdev, rx_ring);
}
}
static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
{
- struct rx_ring *rx_ring;
int i;
- for (i = 0; i < qdev->rx_ring_count; i++) {
- rx_ring = &qdev->rx_ring[i];
- if (rx_ring->type != TX_Q)
- ql_update_buffer_queues(qdev, rx_ring);
- }
+ for (i = 0; i < qdev->rss_ring_count; i++)
+ ql_update_buffer_queues(&qdev->rx_ring[i], GFP_KERNEL,
+ HZ / 2);
}
-static void ql_init_lbq_ring(struct ql_adapter *qdev,
- struct rx_ring *rx_ring)
+static int qlge_init_bq(struct qlge_bq *bq)
{
+ struct rx_ring *rx_ring = QLGE_BQ_CONTAINER(bq);
+ struct ql_adapter *qdev = rx_ring->qdev;
+ struct qlge_bq_desc *bq_desc;
+ __le64 *buf_ptr;
int i;
- struct bq_desc *lbq_desc;
- __le64 *bq = rx_ring->lbq_base;
- memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc));
- for (i = 0; i < rx_ring->lbq_len; i++) {
- lbq_desc = &rx_ring->lbq[i];
- memset(lbq_desc, 0, sizeof(*lbq_desc));
- lbq_desc->index = i;
- lbq_desc->addr = bq;
- bq++;
+ bq->base = pci_alloc_consistent(qdev->pdev, QLGE_BQ_SIZE,
+ &bq->base_dma);
+ if (!bq->base) {
+ netif_err(qdev, ifup, qdev->ndev,
+ "ring %u %s allocation failed.\n", rx_ring->cq_id,
+ bq_type_name[bq->type]);
+ return -ENOMEM;
}
-}
-static void ql_init_sbq_ring(struct ql_adapter *qdev,
- struct rx_ring *rx_ring)
-{
- int i;
- struct bq_desc *sbq_desc;
- __le64 *bq = rx_ring->sbq_base;
+ bq->queue = kmalloc_array(QLGE_BQ_LEN, sizeof(struct qlge_bq_desc),
+ GFP_KERNEL);
+ if (!bq->queue)
+ return -ENOMEM;
- memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc));
- for (i = 0; i < rx_ring->sbq_len; i++) {
- sbq_desc = &rx_ring->sbq[i];
- memset(sbq_desc, 0, sizeof(*sbq_desc));
- sbq_desc->index = i;
- sbq_desc->addr = bq;
- bq++;
+ buf_ptr = bq->base;
+ bq_desc = &bq->queue[0];
+ for (i = 0; i < QLGE_BQ_LEN; i++, buf_ptr++, bq_desc++) {
+ bq_desc->p.skb = NULL;
+ bq_desc->index = i;
+ bq_desc->buf_ptr = buf_ptr;
}
+
+ return 0;
}
static void ql_free_rx_resources(struct ql_adapter *qdev,
struct rx_ring *rx_ring)
{
/* Free the small buffer queue. */
- if (rx_ring->sbq_base) {
- pci_free_consistent(qdev->pdev,
- rx_ring->sbq_size,
- rx_ring->sbq_base, rx_ring->sbq_base_dma);
- rx_ring->sbq_base = NULL;
+ if (rx_ring->sbq.base) {
+ pci_free_consistent(qdev->pdev, QLGE_BQ_SIZE,
+ rx_ring->sbq.base, rx_ring->sbq.base_dma);
+ rx_ring->sbq.base = NULL;
}
/* Free the small buffer queue control blocks. */
- kfree(rx_ring->sbq);
- rx_ring->sbq = NULL;
+ kfree(rx_ring->sbq.queue);
+ rx_ring->sbq.queue = NULL;
/* Free the large buffer queue. */
- if (rx_ring->lbq_base) {
- pci_free_consistent(qdev->pdev,
- rx_ring->lbq_size,
- rx_ring->lbq_base, rx_ring->lbq_base_dma);
- rx_ring->lbq_base = NULL;
+ if (rx_ring->lbq.base) {
+ pci_free_consistent(qdev->pdev, QLGE_BQ_SIZE,
+ rx_ring->lbq.base, rx_ring->lbq.base_dma);
+ rx_ring->lbq.base = NULL;
}
/* Free the large buffer queue control blocks. */
- kfree(rx_ring->lbq);
- rx_ring->lbq = NULL;
+ kfree(rx_ring->lbq.queue);
+ rx_ring->lbq.queue = NULL;
/* Free the rx queue. */
if (rx_ring->cq_base) {
@@ -2987,67 +2899,18 @@ static int ql_alloc_rx_resources(struct ql_adapter *qdev,
pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
&rx_ring->cq_base_dma);
- if (rx_ring->cq_base == NULL) {
+ if (!rx_ring->cq_base) {
netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n");
return -ENOMEM;
}
- if (rx_ring->sbq_len) {
- /*
- * Allocate small buffer queue.
- */
- rx_ring->sbq_base =
- pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
- &rx_ring->sbq_base_dma);
-
- if (rx_ring->sbq_base == NULL) {
- netif_err(qdev, ifup, qdev->ndev,
- "Small buffer queue allocation failed.\n");
- goto err_mem;
- }
-
- /*
- * Allocate small buffer queue control blocks.
- */
- rx_ring->sbq = kmalloc_array(rx_ring->sbq_len,
- sizeof(struct bq_desc),
- GFP_KERNEL);
- if (rx_ring->sbq == NULL)
- goto err_mem;
-
- ql_init_sbq_ring(qdev, rx_ring);
- }
-
- if (rx_ring->lbq_len) {
- /*
- * Allocate large buffer queue.
- */
- rx_ring->lbq_base =
- pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
- &rx_ring->lbq_base_dma);
-
- if (rx_ring->lbq_base == NULL) {
- netif_err(qdev, ifup, qdev->ndev,
- "Large buffer queue allocation failed.\n");
- goto err_mem;
- }
- /*
- * Allocate large buffer queue control blocks.
- */
- rx_ring->lbq = kmalloc_array(rx_ring->lbq_len,
- sizeof(struct bq_desc),
- GFP_KERNEL);
- if (rx_ring->lbq == NULL)
- goto err_mem;
-
- ql_init_lbq_ring(qdev, rx_ring);
+ if (rx_ring->cq_id < qdev->rss_ring_count &&
+ (qlge_init_bq(&rx_ring->sbq) || qlge_init_bq(&rx_ring->lbq))) {
+ ql_free_rx_resources(qdev, rx_ring);
+ return -ENOMEM;
}
return 0;
-
-err_mem:
- ql_free_rx_resources(qdev, rx_ring);
- return -ENOMEM;
}
static void ql_tx_ring_clean(struct ql_adapter *qdev)
@@ -3133,7 +2996,6 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
void __iomem *doorbell_area =
qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
int err = 0;
- u16 bq_len;
u64 tmp;
__le64 *base_indirect_ptr;
int page_entries;
@@ -3144,12 +3006,12 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
*rx_ring->prod_idx_sh_reg = 0;
shadow_reg += sizeof(u64);
shadow_reg_dma += sizeof(u64);
- rx_ring->lbq_base_indirect = shadow_reg;
- rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
- shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
- shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
- rx_ring->sbq_base_indirect = shadow_reg;
- rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
+ rx_ring->lbq.base_indirect = shadow_reg;
+ rx_ring->lbq.base_indirect_dma = shadow_reg_dma;
+ shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN));
+ shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN));
+ rx_ring->sbq.base_indirect = shadow_reg;
+ rx_ring->sbq.base_indirect_dma = shadow_reg_dma;
/* PCI doorbell mem area + 0x00 for consumer index register */
rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
@@ -3160,16 +3022,16 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
rx_ring->valid_db_reg = doorbell_area + 0x04;
/* PCI doorbell mem area + 0x18 for large buffer consumer */
- rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18);
+ rx_ring->lbq.prod_idx_db_reg = (u32 __iomem *)(doorbell_area + 0x18);
/* PCI doorbell mem area + 0x1c */
- rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c);
+ rx_ring->sbq.prod_idx_db_reg = (u32 __iomem *)(doorbell_area + 0x1c);
memset((void *)cqicb, 0, sizeof(struct cqicb));
cqicb->msix_vect = rx_ring->irq;
- bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len;
- cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
+ cqicb->len = cpu_to_le16(QLGE_FIT16(rx_ring->cq_len) | LEN_V |
+ LEN_CPP_CONT);
cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
@@ -3181,59 +3043,42 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
cqicb->flags = FLAGS_LC | /* Load queue base address */
FLAGS_LV | /* Load MSI-X vector */
FLAGS_LI; /* Load irq delay values */
- if (rx_ring->lbq_len) {
+ if (rx_ring->cq_id < qdev->rss_ring_count) {
cqicb->flags |= FLAGS_LL; /* Load lbq values */
- tmp = (u64)rx_ring->lbq_base_dma;
- base_indirect_ptr = rx_ring->lbq_base_indirect;
+ tmp = (u64)rx_ring->lbq.base_dma;
+ base_indirect_ptr = rx_ring->lbq.base_indirect;
page_entries = 0;
do {
*base_indirect_ptr = cpu_to_le64(tmp);
tmp += DB_PAGE_SIZE;
base_indirect_ptr++;
page_entries++;
- } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
- cqicb->lbq_addr =
- cpu_to_le64(rx_ring->lbq_base_indirect_dma);
- bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
- (u16) rx_ring->lbq_buf_size;
- cqicb->lbq_buf_size = cpu_to_le16(bq_len);
- bq_len = (rx_ring->lbq_len == 65536) ? 0 :
- (u16) rx_ring->lbq_len;
- cqicb->lbq_len = cpu_to_le16(bq_len);
- rx_ring->lbq_prod_idx = 0;
- rx_ring->lbq_curr_idx = 0;
- rx_ring->lbq_clean_idx = 0;
- rx_ring->lbq_free_cnt = rx_ring->lbq_len;
- }
- if (rx_ring->sbq_len) {
+ } while (page_entries < MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN));
+ cqicb->lbq_addr = cpu_to_le64(rx_ring->lbq.base_indirect_dma);
+ cqicb->lbq_buf_size =
+ cpu_to_le16(QLGE_FIT16(qdev->lbq_buf_size));
+ cqicb->lbq_len = cpu_to_le16(QLGE_FIT16(QLGE_BQ_LEN));
+ rx_ring->lbq.next_to_use = 0;
+ rx_ring->lbq.next_to_clean = 0;
+
cqicb->flags |= FLAGS_LS; /* Load sbq values */
- tmp = (u64)rx_ring->sbq_base_dma;
- base_indirect_ptr = rx_ring->sbq_base_indirect;
+ tmp = (u64)rx_ring->sbq.base_dma;
+ base_indirect_ptr = rx_ring->sbq.base_indirect;
page_entries = 0;
do {
*base_indirect_ptr = cpu_to_le64(tmp);
tmp += DB_PAGE_SIZE;
base_indirect_ptr++;
page_entries++;
- } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len));
+ } while (page_entries < MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN));
cqicb->sbq_addr =
- cpu_to_le64(rx_ring->sbq_base_indirect_dma);
- cqicb->sbq_buf_size =
- cpu_to_le16((u16)(rx_ring->sbq_buf_size));
- bq_len = (rx_ring->sbq_len == 65536) ? 0 :
- (u16) rx_ring->sbq_len;
- cqicb->sbq_len = cpu_to_le16(bq_len);
- rx_ring->sbq_prod_idx = 0;
- rx_ring->sbq_curr_idx = 0;
- rx_ring->sbq_clean_idx = 0;
- rx_ring->sbq_free_cnt = rx_ring->sbq_len;
- }
- switch (rx_ring->type) {
- case TX_Q:
- cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
- cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
- break;
- case RX_Q:
+ cpu_to_le64(rx_ring->sbq.base_indirect_dma);
+ cqicb->sbq_buf_size = cpu_to_le16(SMALL_BUFFER_SIZE);
+ cqicb->sbq_len = cpu_to_le16(QLGE_FIT16(QLGE_BQ_LEN));
+ rx_ring->sbq.next_to_use = 0;
+ rx_ring->sbq.next_to_clean = 0;
+ }
+ if (rx_ring->cq_id < qdev->rss_ring_count) {
/* Inbound completion handling rx_rings run in
* separate NAPI contexts.
*/
@@ -3241,10 +3086,9 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
64);
cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
- break;
- default:
- netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
- "Invalid rx_ring->type = %d.\n", rx_ring->type);
+ } else {
+ cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
+ cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
}
err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
CFG_LCQ, rx_ring->cq_id);
@@ -3366,6 +3210,7 @@ msi:
}
}
qlge_irq_type = LEG_IRQ;
+ set_bit(QL_LEGACY_ENABLED, &qdev->flags);
netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
"Running with legacy interrupts.\n");
}
@@ -3509,6 +3354,16 @@ static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
intr_context->intr_dis_mask =
INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
INTR_EN_TYPE_DISABLE;
+ if (test_bit(QL_LEGACY_ENABLED, &qdev->flags)) {
+ /* Experience shows that when using INTx interrupts,
+ * the device does not always auto-mask INTR_EN_EN.
+ * Moreover, masking INTR_EN_EN manually does not
+ * immediately prevent interrupt generation.
+ */
+ intr_context->intr_en_mask |= INTR_EN_EI << 16 |
+ INTR_EN_EI;
+ intr_context->intr_dis_mask |= INTR_EN_EI << 16;
+ }
intr_context->intr_read_mask =
INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
/*
@@ -3557,7 +3412,6 @@ static int ql_request_irq(struct ql_adapter *qdev)
ql_resolve_queues_to_irqs(qdev);
for (i = 0; i < qdev->intr_count; i++, intr_context++) {
- atomic_set(&intr_context->irq_cnt, 0);
if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
status = request_irq(qdev->msi_x_entry[i].vector,
intr_context->handler,
@@ -3591,12 +3445,7 @@ static int ql_request_irq(struct ql_adapter *qdev)
goto err_irq;
netif_err(qdev, ifup, qdev->ndev,
- "Hooked intr %d, queue type %s, with name %s.\n",
- i,
- qdev->rx_ring[0].type == DEFAULT_Q ?
- "DEFAULT_Q" :
- qdev->rx_ring[0].type == TX_Q ? "TX_Q" :
- qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
+ "Hooked intr 0, queue type RX_Q, with name %s.\n",
intr_context->name);
}
intr_context->hooked = 1;
@@ -4072,6 +3921,7 @@ static int ql_get_adapter_resources(struct ql_adapter *qdev)
static int qlge_close(struct net_device *ndev)
{
struct ql_adapter *qdev = netdev_priv(ndev);
+ int i;
/* If we hit pci_channel_io_perm_failure
* failure condition, then we already
@@ -4089,21 +3939,31 @@ static int qlge_close(struct net_device *ndev)
*/
while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
msleep(1);
+
+ /* Make sure refill_work doesn't re-enable napi */
+ for (i = 0; i < qdev->rss_ring_count; i++)
+ cancel_delayed_work_sync(&qdev->rx_ring[i].refill_work);
+
ql_adapter_down(qdev);
ql_release_adapter_resources(qdev);
return 0;
}
+static void qlge_set_lb_size(struct ql_adapter *qdev)
+{
+ if (qdev->ndev->mtu <= 1500)
+ qdev->lbq_buf_size = LARGE_BUFFER_MIN_SIZE;
+ else
+ qdev->lbq_buf_size = LARGE_BUFFER_MAX_SIZE;
+ qdev->lbq_buf_order = get_order(qdev->lbq_buf_size);
+}
+
static int ql_configure_rings(struct ql_adapter *qdev)
{
int i;
struct rx_ring *rx_ring;
struct tx_ring *tx_ring;
int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus());
- unsigned int lbq_buf_len = (qdev->ndev->mtu > 1500) ?
- LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
-
- qdev->lbq_buf_order = get_order(lbq_buf_len);
/* In a perfect world we have one RSS ring for each CPU
* and each has it's own vector. To do that we ask for
@@ -4148,15 +4008,10 @@ static int ql_configure_rings(struct ql_adapter *qdev)
rx_ring->cq_len = qdev->rx_ring_size;
rx_ring->cq_size =
rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
- rx_ring->lbq_len = NUM_LARGE_BUFFERS;
- rx_ring->lbq_size =
- rx_ring->lbq_len * sizeof(__le64);
- rx_ring->lbq_buf_size = (u16)lbq_buf_len;
- rx_ring->sbq_len = NUM_SMALL_BUFFERS;
- rx_ring->sbq_size =
- rx_ring->sbq_len * sizeof(__le64);
- rx_ring->sbq_buf_size = SMALL_BUF_MAP_SIZE;
- rx_ring->type = RX_Q;
+ rx_ring->lbq.type = QLGE_LB;
+ rx_ring->sbq.type = QLGE_SB;
+ INIT_DELAYED_WORK(&rx_ring->refill_work,
+ &qlge_slow_refill);
} else {
/*
* Outbound queue handles outbound completions only.
@@ -4165,13 +4020,6 @@ static int ql_configure_rings(struct ql_adapter *qdev)
rx_ring->cq_len = qdev->tx_ring_size;
rx_ring->cq_size =
rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
- rx_ring->lbq_len = 0;
- rx_ring->lbq_size = 0;
- rx_ring->lbq_buf_size = 0;
- rx_ring->sbq_len = 0;
- rx_ring->sbq_size = 0;
- rx_ring->sbq_buf_size = 0;
- rx_ring->type = TX_Q;
}
}
return 0;
@@ -4186,6 +4034,7 @@ static int qlge_open(struct net_device *ndev)
if (err)
return err;
+ qlge_set_lb_size(qdev);
err = ql_configure_rings(qdev);
if (err)
return err;
@@ -4207,9 +4056,7 @@ error_up:
static int ql_change_rx_buffers(struct ql_adapter *qdev)
{
- struct rx_ring *rx_ring;
- int i, status;
- u32 lbq_buf_len;
+ int status;
/* Wait for an outstanding reset to complete. */
if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
@@ -4232,16 +4079,7 @@ static int ql_change_rx_buffers(struct ql_adapter *qdev)
if (status)
goto error;
- /* Get the new rx buffer size. */
- lbq_buf_len = (qdev->ndev->mtu > 1500) ?
- LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
- qdev->lbq_buf_order = get_order(lbq_buf_len);
-
- for (i = 0; i < qdev->rss_ring_count; i++) {
- rx_ring = &qdev->rx_ring[i];
- /* Set the new size. */
- rx_ring->lbq_buf_size = lbq_buf_len;
- }
+ qlge_set_lb_size(qdev);
status = ql_adapter_up(qdev);
if (status)
@@ -4642,13 +4480,12 @@ static int ql_init_device(struct pci_dev *pdev, struct net_device *ndev,
goto err_out2;
}
qdev->msg_enable = netif_msg_init(debug, default_msg);
- spin_lock_init(&qdev->hw_lock);
spin_lock_init(&qdev->stats_lock);
if (qlge_mpi_coredump) {
qdev->mpi_coredump =
vmalloc(sizeof(struct ql_mpi_coredump));
- if (qdev->mpi_coredump == NULL) {
+ if (!qdev->mpi_coredump) {
err = -ENOMEM;
goto err_out2;
}
diff --git a/drivers/staging/qlge/qlge_mpi.c b/drivers/staging/qlge/qlge_mpi.c
index 957c72985a06..9e422bbbb6ab 100644
--- a/drivers/staging/qlge/qlge_mpi.c
+++ b/drivers/staging/qlge/qlge_mpi.c
@@ -1257,7 +1257,6 @@ void ql_mpi_work(struct work_struct *work)
/* End polled mode for MPI */
ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
mutex_unlock(&qdev->mpi_mutex);
- ql_enable_completion_interrupt(qdev, 0);
}
void ql_mpi_reset_work(struct work_struct *work)
diff --git a/drivers/staging/ralink-gdma/ralink-gdma.c b/drivers/staging/ralink-gdma/ralink-gdma.c
index 900424db9b97..eabf1093328e 100644
--- a/drivers/staging/ralink-gdma/ralink-gdma.c
+++ b/drivers/staging/ralink-gdma/ralink-gdma.c
@@ -796,7 +796,6 @@ static int gdma_dma_probe(struct platform_device *pdev)
struct gdma_dma_dev *dma_dev;
struct dma_device *dd;
unsigned int i;
- struct resource *res;
int ret;
int irq;
void __iomem *base;
@@ -818,8 +817,7 @@ static int gdma_dma_probe(struct platform_device *pdev)
return -EINVAL;
dma_dev->data = data;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
dma_dev->base = base;
diff --git a/drivers/staging/rtl8188eu/core/rtw_ap.c b/drivers/staging/rtl8188eu/core/rtw_ap.c
index 51a5b71f8c25..88e42cc1d837 100644
--- a/drivers/staging/rtl8188eu/core/rtw_ap.c
+++ b/drivers/staging/rtl8188eu/core/rtw_ap.c
@@ -440,15 +440,9 @@ static void update_bmc_sta(struct adapter *padapter)
tx_ra_bitmap |= rtw_get_bit_value_from_ieee_value(psta->bssrateset[i] & 0x7f);
}
- if (pcur_network->Configuration.DSConfig > 14) {
- /* force to A mode. 5G doesn't support CCK rates */
- network_type = WIRELESS_11A;
- tx_ra_bitmap = 0x150; /* 6, 12, 24 Mbps */
- } else {
- /* force to b mode */
- network_type = WIRELESS_11B;
- tx_ra_bitmap = 0xf;
- }
+ /* force to b mode */
+ network_type = WIRELESS_11B;
+ tx_ra_bitmap = 0xf;
raid = networktype_to_raid(network_type);
init_rate = get_highest_rate_idx(tx_ra_bitmap & 0x0fffffff) & 0x3f;
@@ -560,29 +554,24 @@ void update_sta_info_apmode(struct adapter *padapter, struct sta_info *psta)
static void update_hw_ht_param(struct adapter *padapter)
{
- unsigned char max_AMPDU_len;
- unsigned char min_MPDU_spacing;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+ u8 max_ampdu_len;
+ u8 min_mpdu_spacing;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
DBG_88E("%s\n", __func__);
- /* handle A-MPDU parameter field */
- /*
- ampdu_params_info [1:0]:Max AMPDU Len => 0:8k , 1:16k, 2:32k, 3:64k
- ampdu_params_info [4:2]:Min MPDU Start Spacing
- */
- max_AMPDU_len = pmlmeinfo->HT_caps.ampdu_params_info & 0x03;
-
- min_MPDU_spacing = (pmlmeinfo->HT_caps.ampdu_params_info & 0x1c) >> 2;
-
- rtw_hal_set_hwreg(padapter, HW_VAR_AMPDU_MIN_SPACE, (u8 *)(&min_MPDU_spacing));
+ /* handle A-MPDU parameter field
+ * ampdu_params_info [1:0]:Max AMPDU Len => 0:8k , 1:16k, 2:32k, 3:64k
+ * ampdu_params_info [4:2]:Min MPDU Start Spacing
+ */
+ max_ampdu_len = pmlmeinfo->HT_caps.ampdu_params_info & 0x03;
+ min_mpdu_spacing = (pmlmeinfo->HT_caps.ampdu_params_info & 0x1c) >> 2;
- rtw_hal_set_hwreg(padapter, HW_VAR_AMPDU_FACTOR, (u8 *)(&max_AMPDU_len));
+ rtw_hal_set_hwreg(padapter, HW_VAR_AMPDU_MIN_SPACE, &min_mpdu_spacing);
+ rtw_hal_set_hwreg(padapter, HW_VAR_AMPDU_FACTOR, &max_ampdu_len);
- /* */
- /* Config SM Power Save setting */
- /* */
+ /* Config SM Power Save setting */
pmlmeinfo->SM_PS = (le16_to_cpu(pmlmeinfo->HT_caps.cap_info) & 0x0C) >> 2;
if (pmlmeinfo->SM_PS == WLAN_HT_CAP_SM_PS_STATIC)
DBG_88E("%s(): WLAN_HT_CAP_SM_PS_STATIC\n", __func__);
diff --git a/drivers/staging/rtl8188eu/core/rtw_efuse.c b/drivers/staging/rtl8188eu/core/rtw_efuse.c
index 02c476f45b33..d9b0f9e6235c 100644
--- a/drivers/staging/rtl8188eu/core/rtw_efuse.c
+++ b/drivers/staging/rtl8188eu/core/rtw_efuse.c
@@ -25,7 +25,7 @@ enum{
* When we want to enable write operation, we should change to pwr on state.
* When we stop write, we should switch to 500k mode and disable LDO 2.5V.
*/
-void efuse_power_switch(struct adapter *pAdapter, u8 write, u8 pwrstate)
+static void efuse_power_switch(struct adapter *pAdapter, u8 write, u8 pwrstate)
{
u8 tempval;
u16 tmpv16;
@@ -615,10 +615,9 @@ static bool hal_EfusePgPacketWrite1ByteHeader(struct adapter *pAdapter, u8 efuse
static bool hal_EfusePgPacketWriteData(struct adapter *pAdapter, u8 efuseType, u16 *pAddr, struct pgpkt *pTargetPkt)
{
u16 efuse_addr = *pAddr;
- u8 badworden = 0;
+ u8 badworden;
u32 PgWriteSuccess = 0;
- badworden = 0x0f;
badworden = Efuse_WordEnableDataWrite(pAdapter, efuse_addr + 1, pTargetPkt->word_en, pTargetPkt->data);
if (badworden == 0x0F) {
/* write ok */
diff --git a/drivers/staging/rtl8188eu/core/rtw_mlme.c b/drivers/staging/rtl8188eu/core/rtw_mlme.c
index 1ec3b237212e..e764436e120f 100644
--- a/drivers/staging/rtl8188eu/core/rtw_mlme.c
+++ b/drivers/staging/rtl8188eu/core/rtw_mlme.c
@@ -2045,9 +2045,9 @@ void _rtw_roaming(struct adapter *padapter, struct wlan_network *tgt_network)
while (1) {
do_join_r = rtw_do_join(padapter);
- if (do_join_r == _SUCCESS) {
+ if (do_join_r == _SUCCESS)
break;
- }
+
DBG_88E("roaming do_join return %d\n", do_join_r);
pmlmepriv->to_roaming--;
diff --git a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
index 18dc9fc1c04a..e984b4605e91 100644
--- a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
+++ b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
@@ -507,7 +507,7 @@ static void issue_probersp(struct adapter *padapter, unsigned char *da)
pwps_ie = rtw_get_wps_ie(cur_network->ies+_FIXED_IE_LENGTH_, cur_network->ie_length-_FIXED_IE_LENGTH_, NULL, &wps_ielen);
/* inerset & update wps_probe_resp_ie */
- if ((pmlmepriv->wps_probe_resp_ie != NULL) && pwps_ie && (wps_ielen > 0)) {
+ if (pmlmepriv->wps_probe_resp_ie && pwps_ie && wps_ielen > 0) {
uint wps_offset, remainder_ielen;
u8 *premainder_ie;
diff --git a/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c b/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c
index 7b16632048b7..03dc7e5fcc38 100644
--- a/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c
+++ b/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c
@@ -514,7 +514,7 @@ void rtw_init_pwrctrl_priv(struct adapter *padapter)
pwrctrlpriv->power_mgnt = PS_MODE_ACTIVE;
else
pwrctrlpriv->power_mgnt = padapter->registrypriv.power_mgnt;/* PS_MODE_MIN; */
- pwrctrlpriv->bLeisurePs = (pwrctrlpriv->power_mgnt != PS_MODE_ACTIVE) ? true : false;
+ pwrctrlpriv->bLeisurePs = (pwrctrlpriv->power_mgnt != PS_MODE_ACTIVE);
pwrctrlpriv->bFwCurrentInPSMode = false;
@@ -621,7 +621,7 @@ int rtw_pm_set_lps(struct adapter *padapter, u8 mode)
else
pwrctrlpriv->LpsIdleCount = 2;
pwrctrlpriv->power_mgnt = mode;
- pwrctrlpriv->bLeisurePs = (pwrctrlpriv->power_mgnt != PS_MODE_ACTIVE) ? true : false;
+ pwrctrlpriv->bLeisurePs = (pwrctrlpriv->power_mgnt != PS_MODE_ACTIVE);
}
} else {
ret = -EINVAL;
diff --git a/drivers/staging/rtl8188eu/core/rtw_sta_mgt.c b/drivers/staging/rtl8188eu/core/rtw_sta_mgt.c
index 91a30142c567..73f2cb5ebaa6 100644
--- a/drivers/staging/rtl8188eu/core/rtw_sta_mgt.c
+++ b/drivers/staging/rtl8188eu/core/rtw_sta_mgt.c
@@ -83,7 +83,8 @@ u32 _rtw_init_sta_priv(struct sta_priv *pstapriv)
INIT_LIST_HEAD(&pstapriv->sta_hash[i]);
- list_add_tail(&psta->list, get_list_head(&pstapriv->free_sta_queue));
+ list_add_tail(&psta->list,
+ get_list_head(&pstapriv->free_sta_queue));
psta++;
}
@@ -134,31 +135,30 @@ u32 _rtw_free_sta_priv(struct sta_priv *pstapriv)
struct recv_reorder_ctrl *preorder_ctrl;
int index;
- if (pstapriv) {
- /* delete all reordering_ctrl_timer */
- spin_lock_bh(&pstapriv->sta_hash_lock);
- for (index = 0; index < NUM_STA; index++) {
- phead = &pstapriv->sta_hash[index];
- plist = phead->next;
+ if (!pstapriv)
+ return _SUCCESS;
- while (phead != plist) {
- int i;
+ /* delete all reordering_ctrl_timer */
+ spin_lock_bh(&pstapriv->sta_hash_lock);
+ for (index = 0; index < NUM_STA; index++) {
+ phead = &pstapriv->sta_hash[index];
+ plist = phead->next;
- psta = container_of(plist, struct sta_info,
- hash_list);
- plist = plist->next;
+ while (phead != plist) {
+ int i;
- for (i = 0; i < 16; i++) {
- preorder_ctrl = &psta->recvreorder_ctrl[i];
- del_timer_sync(&preorder_ctrl->reordering_ctrl_timer);
- }
+ psta = container_of(plist, struct sta_info, hash_list);
+ plist = plist->next;
+
+ for (i = 0; i < 16; i++) {
+ preorder_ctrl = &psta->recvreorder_ctrl[i];
+ del_timer_sync(&preorder_ctrl->reordering_ctrl_timer);
}
}
- spin_unlock_bh(&pstapriv->sta_hash_lock);
- /*===============================*/
-
- vfree(pstapriv->pallocated_stainfo_buf);
}
+ spin_unlock_bh(&pstapriv->sta_hash_lock);
+
+ vfree(pstapriv->pallocated_stainfo_buf);
return _SUCCESS;
}
@@ -167,7 +167,7 @@ struct sta_info *rtw_alloc_stainfo(struct sta_priv *pstapriv, u8 *hwaddr)
{
s32 index;
struct list_head *phash_list;
- struct sta_info *psta;
+ struct sta_info *psta;
struct __queue *pfree_sta_queue;
struct recv_reorder_ctrl *preorder_ctrl;
int i = 0;
@@ -180,65 +180,70 @@ struct sta_info *rtw_alloc_stainfo(struct sta_priv *pstapriv, u8 *hwaddr)
struct sta_info, list);
if (!psta) {
spin_unlock_bh(&pfree_sta_queue->lock);
- } else {
- list_del_init(&psta->list);
- spin_unlock_bh(&pfree_sta_queue->lock);
- _rtw_init_stainfo(psta);
- memcpy(psta->hwaddr, hwaddr, ETH_ALEN);
- index = wifi_mac_hash(hwaddr);
- RT_TRACE(_module_rtl871x_sta_mgt_c_, _drv_info_, ("%s: index=%x", __func__, index));
- if (index >= NUM_STA) {
- RT_TRACE(_module_rtl871x_sta_mgt_c_, _drv_err_, ("ERROR => %s: index >= NUM_STA", __func__));
- psta = NULL;
- goto exit;
- }
- phash_list = &pstapriv->sta_hash[index];
-
- spin_lock_bh(&pstapriv->sta_hash_lock);
- list_add_tail(&psta->hash_list, phash_list);
- pstapriv->asoc_sta_count++;
- spin_unlock_bh(&pstapriv->sta_hash_lock);
+ return NULL;
+ }
-/* Commented by Albert 2009/08/13 */
-/* For the SMC router, the sequence number of first packet of WPS handshake will be 0. */
-/* In this case, this packet will be dropped by recv_decache function if we use the 0x00 as the default value for tid_rxseq variable. */
-/* So, we initialize the tid_rxseq variable as the 0xffff. */
+ list_del_init(&psta->list);
+ spin_unlock_bh(&pfree_sta_queue->lock);
+ _rtw_init_stainfo(psta);
+ memcpy(psta->hwaddr, hwaddr, ETH_ALEN);
+ index = wifi_mac_hash(hwaddr);
+ RT_TRACE(_module_rtl871x_sta_mgt_c_, _drv_info_,
+ ("%s: index=%x", __func__, index));
+ if (index >= NUM_STA) {
+ RT_TRACE(_module_rtl871x_sta_mgt_c_, _drv_err_,
+ ("ERROR => %s: index >= NUM_STA", __func__));
+ return NULL;
+ }
+ phash_list = &pstapriv->sta_hash[index];
- for (i = 0; i < 16; i++)
- memcpy(&psta->sta_recvpriv.rxcache.tid_rxseq[i], &wRxSeqInitialValue, 2);
+ spin_lock_bh(&pstapriv->sta_hash_lock);
+ list_add_tail(&psta->hash_list, phash_list);
+ pstapriv->asoc_sta_count++;
+ spin_unlock_bh(&pstapriv->sta_hash_lock);
- RT_TRACE(_module_rtl871x_sta_mgt_c_, _drv_info_,
- ("alloc number_%d stainfo with hwaddr = %pM\n",
- pstapriv->asoc_sta_count, hwaddr));
+ /* Commented by Albert 2009/08/13
+ * For the SMC router, the sequence number of first packet of
+ * WPS handshake will be 0. In this case, this packet will be
+ * dropped by recv_decache function if we use the 0x00 as the
+ * default value for tid_rxseq variable. So, we initialize the
+ * tid_rxseq variable as the 0xffff.
+ */
- init_addba_retry_timer(pstapriv->padapter, psta);
+ for (i = 0; i < 16; i++)
+ memcpy(&psta->sta_recvpriv.rxcache.tid_rxseq[i],
+ &wRxSeqInitialValue, 2);
- /* for A-MPDU Rx reordering buffer control */
- for (i = 0; i < 16; i++) {
- preorder_ctrl = &psta->recvreorder_ctrl[i];
+ RT_TRACE(_module_rtl871x_sta_mgt_c_, _drv_info_,
+ ("alloc number_%d stainfo with hwaddr = %pM\n",
+ pstapriv->asoc_sta_count, hwaddr));
- preorder_ctrl->padapter = pstapriv->padapter;
+ init_addba_retry_timer(pstapriv->padapter, psta);
- preorder_ctrl->enable = false;
+ /* for A-MPDU Rx reordering buffer control */
+ for (i = 0; i < 16; i++) {
+ preorder_ctrl = &psta->recvreorder_ctrl[i];
- preorder_ctrl->indicate_seq = 0xffff;
- preorder_ctrl->wend_b = 0xffff;
- preorder_ctrl->wsize_b = 64;/* 64; */
+ preorder_ctrl->padapter = pstapriv->padapter;
- _rtw_init_queue(&preorder_ctrl->pending_recvframe_queue);
+ preorder_ctrl->enable = false;
- rtw_init_recv_timer(preorder_ctrl);
- }
+ preorder_ctrl->indicate_seq = 0xffff;
+ preorder_ctrl->wend_b = 0xffff;
+ preorder_ctrl->wsize_b = 64;/* 64; */
- /* init for DM */
- psta->rssi_stat.UndecoratedSmoothedPWDB = -1;
- psta->rssi_stat.UndecoratedSmoothedCCK = -1;
+ _rtw_init_queue(&preorder_ctrl->pending_recvframe_queue);
- /* init for the sequence number of received management frame */
- psta->RxMgmtFrameSeqNum = 0xffff;
+ rtw_init_recv_timer(preorder_ctrl);
}
-exit:
+ /* init for DM */
+ psta->rssi_stat.UndecoratedSmoothedPWDB = -1;
+ psta->rssi_stat.UndecoratedSmoothedCCK = -1;
+
+ /* init for the sequence number of received management frame */
+ psta->RxMgmtFrameSeqNum = 0xffff;
+
return psta;
}
@@ -296,7 +301,9 @@ u32 rtw_free_stainfo(struct adapter *padapter, struct sta_info *psta)
del_timer_sync(&psta->addba_retry_timer);
- /* for A-MPDU Rx reordering buffer control, cancel reordering_ctrl_timer */
+ /* for A-MPDU Rx reordering buffer control, cancel
+ * reordering_ctrl_timer
+ */
for (i = 0; i < 16; i++) {
struct list_head *phead, *plist;
struct recv_frame *prframe;
@@ -311,7 +318,7 @@ u32 rtw_free_stainfo(struct adapter *padapter, struct sta_info *psta)
spin_lock_bh(&ppending_recvframe_queue->lock);
- phead = get_list_head(ppending_recvframe_queue);
+ phead = get_list_head(ppending_recvframe_queue);
plist = phead->next;
while (!list_empty(phead)) {
@@ -444,23 +451,21 @@ struct sta_info *rtw_get_stainfo(struct sta_priv *pstapriv, u8 *hwaddr)
u32 rtw_init_bcmc_stainfo(struct adapter *padapter)
{
struct sta_info *psta;
- u32 res = _SUCCESS;
- unsigned char bcast_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+ u8 bc_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
struct sta_priv *pstapriv = &padapter->stapriv;
- psta = rtw_alloc_stainfo(pstapriv, bcast_addr);
+ psta = rtw_alloc_stainfo(pstapriv, bc_addr);
if (!psta) {
- res = _FAIL;
- RT_TRACE(_module_rtl871x_sta_mgt_c_, _drv_err_, ("rtw_alloc_stainfo fail"));
- goto exit;
+ RT_TRACE(_module_rtl871x_sta_mgt_c_, _drv_err_,
+ ("rtw_alloc_stainfo fail"));
+ return _FAIL;
}
/* default broadcast & multicast use macid 1 */
psta->mac_id = 1;
-exit:
- return res;
+ return _SUCCESS;
}
struct sta_info *rtw_get_bcmc_stainfo(struct adapter *padapter)
@@ -471,13 +476,13 @@ struct sta_info *rtw_get_bcmc_stainfo(struct adapter *padapter)
return rtw_get_stainfo(pstapriv, bc_addr);
}
-u8 rtw_access_ctrl(struct adapter *padapter, u8 *mac_addr)
+bool rtw_access_ctrl(struct adapter *padapter, u8 *mac_addr)
{
- u8 res = true;
+ bool res = true;
#ifdef CONFIG_88EU_AP_MODE
struct list_head *plist, *phead;
struct rtw_wlan_acl_node *paclnode;
- u8 match = false;
+ bool match = false;
struct sta_priv *pstapriv = &padapter->stapriv;
struct wlan_acl_pool *pacl_list = &pstapriv->acl_list;
struct __queue *pacl_node_q = &pacl_list->acl_node_q;
@@ -499,9 +504,9 @@ u8 rtw_access_ctrl(struct adapter *padapter, u8 *mac_addr)
spin_unlock_bh(&pacl_node_q->lock);
if (pacl_list->mode == 1)/* accept unless in deny list */
- res = (match) ? false : true;
+ res = !match;
else if (pacl_list->mode == 2)/* deny unless in accept list */
- res = (match) ? true : false;
+ res = match;
else
res = true;
diff --git a/drivers/staging/rtl8188eu/core/rtw_wlan_util.c b/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
index c985b1468d41..af8a79ce8736 100644
--- a/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
+++ b/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
@@ -667,7 +667,7 @@ static void bwmode_update_check(struct adapter *padapter, struct ndis_802_11_var
void HT_caps_handler(struct adapter *padapter, struct ndis_802_11_var_ie *pIE)
{
unsigned int i;
- u8 max_AMPDU_len, min_MPDU_spacing;
+ u8 max_ampdu_len, min_mpdu_spacing;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
@@ -689,16 +689,16 @@ void HT_caps_handler(struct adapter *padapter, struct ndis_802_11_var_ie *pIE)
} else {
/* modify from fw by Thomas 2010/11/17 */
if ((pmlmeinfo->HT_caps.ampdu_params_info & 0x3) > (pIE->data[i] & 0x3))
- max_AMPDU_len = pIE->data[i] & 0x3;
+ max_ampdu_len = pIE->data[i] & 0x3;
else
- max_AMPDU_len = pmlmeinfo->HT_caps.ampdu_params_info & 0x3;
+ max_ampdu_len = pmlmeinfo->HT_caps.ampdu_params_info & 0x3;
if ((pmlmeinfo->HT_caps.ampdu_params_info & 0x1c) > (pIE->data[i] & 0x1c))
- min_MPDU_spacing = pmlmeinfo->HT_caps.ampdu_params_info & 0x1c;
+ min_mpdu_spacing = pmlmeinfo->HT_caps.ampdu_params_info & 0x1c;
else
- min_MPDU_spacing = pIE->data[i] & 0x1c;
+ min_mpdu_spacing = pIE->data[i] & 0x1c;
- pmlmeinfo->HT_caps.ampdu_params_info = max_AMPDU_len | min_MPDU_spacing;
+ pmlmeinfo->HT_caps.ampdu_params_info = max_ampdu_len | min_mpdu_spacing;
}
}
@@ -729,8 +729,8 @@ void HT_info_handler(struct adapter *padapter, struct ndis_802_11_var_ie *pIE)
void HTOnAssocRsp(struct adapter *padapter)
{
- unsigned char max_AMPDU_len;
- unsigned char min_MPDU_spacing;
+ u8 max_ampdu_len;
+ u8 min_mpdu_spacing;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
@@ -748,13 +748,11 @@ void HTOnAssocRsp(struct adapter *padapter)
* AMPDU_para [1:0]:Max AMPDU Len => 0:8k , 1:16k, 2:32k, 3:64k
* AMPDU_para [4:2]:Min MPDU Start Spacing
*/
- max_AMPDU_len = pmlmeinfo->HT_caps.ampdu_params_info & 0x03;
+ max_ampdu_len = pmlmeinfo->HT_caps.ampdu_params_info & 0x03;
+ min_mpdu_spacing = (pmlmeinfo->HT_caps.ampdu_params_info & 0x1c) >> 2;
- min_MPDU_spacing = (pmlmeinfo->HT_caps.ampdu_params_info & 0x1c) >> 2;
-
- rtw_hal_set_hwreg(padapter, HW_VAR_AMPDU_MIN_SPACE, (u8 *)(&min_MPDU_spacing));
-
- rtw_hal_set_hwreg(padapter, HW_VAR_AMPDU_FACTOR, (u8 *)(&max_AMPDU_len));
+ rtw_hal_set_hwreg(padapter, HW_VAR_AMPDU_MIN_SPACE, &min_mpdu_spacing);
+ rtw_hal_set_hwreg(padapter, HW_VAR_AMPDU_FACTOR, &max_ampdu_len);
}
void ERP_IE_handler(struct adapter *padapter, struct ndis_802_11_var_ie *pIE)
diff --git a/drivers/staging/rtl8188eu/core/rtw_xmit.c b/drivers/staging/rtl8188eu/core/rtw_xmit.c
index 952f2ab51347..c37591657bac 100644
--- a/drivers/staging/rtl8188eu/core/rtw_xmit.c
+++ b/drivers/staging/rtl8188eu/core/rtw_xmit.c
@@ -776,7 +776,7 @@ s32 rtw_make_wlanhdr(struct adapter *padapter, u8 *hdr, struct pkt_attrib *pattr
memcpy(pwlanhdr->addr2, get_bssid(pmlmepriv), ETH_ALEN);
memcpy(pwlanhdr->addr3, pattrib->src, ETH_ALEN);
- if (psta->qos_option)
+ if (psta && psta->qos_option)
qos_option = true;
} else if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) ||
check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE)) {
@@ -784,7 +784,7 @@ s32 rtw_make_wlanhdr(struct adapter *padapter, u8 *hdr, struct pkt_attrib *pattr
memcpy(pwlanhdr->addr2, pattrib->src, ETH_ALEN);
memcpy(pwlanhdr->addr3, get_bssid(pmlmepriv), ETH_ALEN);
- if (psta->qos_option)
+ if (psta && psta->qos_option)
qos_option = true;
} else {
RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("fw_state:%x is not allowed to xmit frame\n", get_fwstate(pmlmepriv)));
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c b/drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c
index 47352f210c0b..7646167a0b36 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c
@@ -47,8 +47,6 @@ static u8 _is_fw_read_cmd_down(struct adapter *adapt, u8 msgbox_num)
******************************************/
static s32 FillH2CCmd_88E(struct adapter *adapt, u8 ElementID, u32 CmdLen, u8 *pCmdBuffer)
{
- u8 bcmd_down = false;
- s32 retry_cnts = 100;
u8 h2c_box_num;
u32 msgbox_addr;
u32 msgbox_ex_addr;
@@ -71,39 +69,34 @@ static s32 FillH2CCmd_88E(struct adapter *adapt, u8 ElementID, u32 CmdLen, u8 *p
goto exit;
/* pay attention to if race condition happened in H2C cmd setting. */
- do {
- h2c_box_num = adapt->HalData->LastHMEBoxNum;
-
- if (!_is_fw_read_cmd_down(adapt, h2c_box_num)) {
- DBG_88E(" fw read cmd failed...\n");
- goto exit;
- }
-
- *(u8 *)(&h2c_cmd) = ElementID;
-
- if (CmdLen <= 3) {
- memcpy((u8 *)(&h2c_cmd)+1, pCmdBuffer, CmdLen);
- } else {
- memcpy((u8 *)(&h2c_cmd)+1, pCmdBuffer, 3);
- ext_cmd_len = CmdLen-3;
- memcpy((u8 *)(&h2c_cmd_ex), pCmdBuffer+3, ext_cmd_len);
+ h2c_box_num = adapt->HalData->LastHMEBoxNum;
- /* Write Ext command */
- msgbox_ex_addr = REG_HMEBOX_EXT_0 + (h2c_box_num * RTL88E_EX_MESSAGE_BOX_SIZE);
- for (cmd_idx = 0; cmd_idx < ext_cmd_len; cmd_idx++)
- usb_write8(adapt, msgbox_ex_addr+cmd_idx, *((u8 *)(&h2c_cmd_ex)+cmd_idx));
- }
- /* Write command */
- msgbox_addr = REG_HMEBOX_0 + (h2c_box_num * RTL88E_MESSAGE_BOX_SIZE);
- for (cmd_idx = 0; cmd_idx < RTL88E_MESSAGE_BOX_SIZE; cmd_idx++)
- usb_write8(adapt, msgbox_addr+cmd_idx, *((u8 *)(&h2c_cmd)+cmd_idx));
+ if (!_is_fw_read_cmd_down(adapt, h2c_box_num)) {
+ DBG_88E(" fw read cmd failed...\n");
+ goto exit;
+ }
- bcmd_down = true;
+ *(u8 *)(&h2c_cmd) = ElementID;
- adapt->HalData->LastHMEBoxNum =
- (h2c_box_num+1) % RTL88E_MAX_H2C_BOX_NUMS;
+ if (CmdLen <= 3) {
+ memcpy((u8 *)(&h2c_cmd) + 1, pCmdBuffer, CmdLen);
+ } else {
+ memcpy((u8 *)(&h2c_cmd) + 1, pCmdBuffer, 3);
+ ext_cmd_len = CmdLen - 3;
+ memcpy((u8 *)(&h2c_cmd_ex), pCmdBuffer + 3, ext_cmd_len);
+
+ /* Write Ext command */
+ msgbox_ex_addr = REG_HMEBOX_EXT_0 + (h2c_box_num * RTL88E_EX_MESSAGE_BOX_SIZE);
+ for (cmd_idx = 0; cmd_idx < ext_cmd_len; cmd_idx++)
+ usb_write8(adapt, msgbox_ex_addr + cmd_idx, *((u8 *)(&h2c_cmd_ex) + cmd_idx));
+ }
+ /* Write command */
+ msgbox_addr = REG_HMEBOX_0 + (h2c_box_num * RTL88E_MESSAGE_BOX_SIZE);
+ for (cmd_idx = 0; cmd_idx < RTL88E_MESSAGE_BOX_SIZE; cmd_idx++)
+ usb_write8(adapt, msgbox_addr + cmd_idx, *((u8 *)(&h2c_cmd) + cmd_idx));
- } while ((!bcmd_down) && (retry_cnts--));
+ adapt->HalData->LastHMEBoxNum =
+ (h2c_box_num + 1) % RTL88E_MAX_H2C_BOX_NUMS;
ret = _SUCCESS;
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c b/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c
index 086f98d38cba..57ae0e83dd3e 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c
@@ -552,7 +552,6 @@ void Hal_ReadAntennaDiversity88E(struct adapter *pAdapter, u8 *PROMContent, bool
pHalData->AntDivCfg = 1; /* 0xC1[3] is ignored. */
} else {
pHalData->AntDivCfg = 0;
- pHalData->TRxAntDivType = pHalData->TRxAntDivType; /* The value in the driver setting of device manager. */
}
DBG_88E("EEPROM : AntDivCfg = %x, TRxAntDivType = %x\n", pHalData->AntDivCfg, pHalData->TRxAntDivType);
}
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c b/drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c
index c0d51ba70a75..1cf8cff9a2a4 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c
@@ -22,8 +22,7 @@ int rtw_hal_init_recv_priv(struct adapter *padapter)
int i, res = _SUCCESS;
struct recv_buf *precvbuf;
- tasklet_init(&precvpriv->recv_tasklet,
- (void(*)(unsigned long))rtl8188eu_recv_tasklet,
+ tasklet_init(&precvpriv->recv_tasklet, rtl8188eu_recv_tasklet,
(unsigned long)padapter);
/* init recv_buf */
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c b/drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c
index ab94ad9d608a..2808f2b119bf 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c
@@ -17,8 +17,7 @@ s32 rtw_hal_init_xmit_priv(struct adapter *adapt)
{
struct xmit_priv *pxmitpriv = &adapt->xmitpriv;
- tasklet_init(&pxmitpriv->xmit_tasklet,
- (void(*)(unsigned long))rtl8188eu_xmit_tasklet,
+ tasklet_init(&pxmitpriv->xmit_tasklet, rtl8188eu_xmit_tasklet,
(unsigned long)adapt);
return _SUCCESS;
}
diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_recv.h b/drivers/staging/rtl8188eu/include/rtl8188e_recv.h
index c2c7ef974dc5..23251ffa8404 100644
--- a/drivers/staging/rtl8188eu/include/rtl8188e_recv.h
+++ b/drivers/staging/rtl8188eu/include/rtl8188e_recv.h
@@ -43,7 +43,7 @@ enum rx_packet_type {
};
#define INTERRUPT_MSG_FORMAT_LEN 60
-void rtl8188eu_recv_tasklet(void *priv);
+void rtl8188eu_recv_tasklet(unsigned long priv);
void rtl8188e_process_phy_info(struct adapter *padapter,
struct recv_frame *prframe);
void update_recvframe_phyinfo_88e(struct recv_frame *fra, struct phy_stat *phy);
diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_xmit.h b/drivers/staging/rtl8188eu/include/rtl8188e_xmit.h
index 421e9f45306f..c6c2ad20d9cf 100644
--- a/drivers/staging/rtl8188eu/include/rtl8188e_xmit.h
+++ b/drivers/staging/rtl8188eu/include/rtl8188e_xmit.h
@@ -148,7 +148,7 @@ void rtl8188e_fill_fake_txdesc(struct adapter *padapter, u8 *pDesc,
s32 rtl8188eu_init_xmit_priv(struct adapter *padapter);
s32 rtl8188eu_xmit_buf_handler(struct adapter *padapter);
#define hal_xmit_handler rtl8188eu_xmit_buf_handler
-void rtl8188eu_xmit_tasklet(void *priv);
+void rtl8188eu_xmit_tasklet(unsigned long priv);
bool rtl8188eu_xmitframe_complete(struct adapter *padapter,
struct xmit_priv *pxmitpriv);
diff --git a/drivers/staging/rtl8188eu/include/rtw_efuse.h b/drivers/staging/rtl8188eu/include/rtw_efuse.h
index 3ec53761e9fd..7a9c8ff0daa9 100644
--- a/drivers/staging/rtl8188eu/include/rtw_efuse.h
+++ b/drivers/staging/rtl8188eu/include/rtw_efuse.h
@@ -82,7 +82,6 @@ u8 efuse_OneByteWrite(struct adapter *adapter, u16 addr, u8 data);
void efuse_ReadEFuse(struct adapter *Adapter, u8 efuseType, u16 _offset,
u16 _size_byte, u8 *pbuf);
-void efuse_power_switch(struct adapter *adapt, u8 write, u8 pwrstate);
int Efuse_PgPacketRead(struct adapter *adapt, u8 offset, u8 *data);
bool Efuse_PgPacketWrite(struct adapter *adapter, u8 offset, u8 word, u8 *data);
void efuse_WordEnableDataRead(u8 word_en, u8 *sourdata, u8 *targetdata);
diff --git a/drivers/staging/rtl8188eu/include/sta_info.h b/drivers/staging/rtl8188eu/include/sta_info.h
index dc685a14aeb8..6165adafc451 100644
--- a/drivers/staging/rtl8188eu/include/sta_info.h
+++ b/drivers/staging/rtl8188eu/include/sta_info.h
@@ -354,6 +354,6 @@ void rtw_free_all_stainfo(struct adapter *adapt);
struct sta_info *rtw_get_stainfo(struct sta_priv *stapriv, u8 *hwaddr);
u32 rtw_init_bcmc_stainfo(struct adapter *adapt);
struct sta_info *rtw_get_bcmc_stainfo(struct adapter *padapter);
-u8 rtw_access_ctrl(struct adapter *padapter, u8 *mac_addr);
+bool rtw_access_ctrl(struct adapter *padapter, u8 *mac_addr);
#endif /* _STA_INFO_H_ */
diff --git a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
index ec5835d1aa8c..710c33fd4965 100644
--- a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
@@ -148,17 +148,10 @@ static char *translate_scan(struct adapter *padapter,
else
snprintf(iwe.u.name, IFNAMSIZ, "IEEE 802.11bg");
} else {
- if (pnetwork->network.Configuration.DSConfig > 14) {
- if (ht_cap)
- snprintf(iwe.u.name, IFNAMSIZ, "IEEE 802.11an");
- else
- snprintf(iwe.u.name, IFNAMSIZ, "IEEE 802.11a");
- } else {
- if (ht_cap)
- snprintf(iwe.u.name, IFNAMSIZ, "IEEE 802.11gn");
- else
- snprintf(iwe.u.name, IFNAMSIZ, "IEEE 802.11g");
- }
+ if (ht_cap)
+ snprintf(iwe.u.name, IFNAMSIZ, "IEEE 802.11gn");
+ else
+ snprintf(iwe.u.name, IFNAMSIZ, "IEEE 802.11g");
}
start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_CHAR_LEN);
@@ -650,17 +643,10 @@ static int rtw_wx_get_name(struct net_device *dev,
else
snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11bg");
} else {
- if (pcur_bss->Configuration.DSConfig > 14) {
- if (ht_cap)
- snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11an");
- else
- snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11a");
- } else {
- if (ht_cap)
- snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11gn");
- else
- snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11g");
- }
+ if (ht_cap)
+ snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11gn");
+ else
+ snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11g");
}
} else {
snprintf(wrqu->name, IFNAMSIZ, "unassociated");
diff --git a/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c b/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c
index aaab0d577453..3cd6da1f843d 100644
--- a/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c
@@ -773,10 +773,10 @@ void usb_write_port_cancel(struct adapter *padapter)
}
}
-void rtl8188eu_recv_tasklet(void *priv)
+void rtl8188eu_recv_tasklet(unsigned long priv)
{
struct sk_buff *pskb;
- struct adapter *adapt = priv;
+ struct adapter *adapt = (struct adapter *)priv;
struct recv_priv *precvpriv = &adapt->recvpriv;
while (NULL != (pskb = skb_dequeue(&precvpriv->rx_skb_queue))) {
@@ -792,9 +792,9 @@ void rtl8188eu_recv_tasklet(void *priv)
}
}
-void rtl8188eu_xmit_tasklet(void *priv)
+void rtl8188eu_xmit_tasklet(unsigned long priv)
{
- struct adapter *adapt = priv;
+ struct adapter *adapt = (struct adapter *)priv;
struct xmit_priv *pxmitpriv = &adapt->xmitpriv;
if (check_fwstate(&adapt->mlmepriv, _FW_UNDER_SURVEY))
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
index ef92ce957466..980b850d729a 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
@@ -1686,11 +1686,10 @@ static void _rtl92e_process_phyinfo(struct r8192_priv *priv, u8 *buffer,
static u32 last_beacon_adc_pwdb;
struct rtllib_hdr_3addr *hdr;
u16 sc;
- unsigned int frag, seq;
+ unsigned int seq;
hdr = (struct rtllib_hdr_3addr *)buffer;
sc = le16_to_cpu(hdr->seq_ctl);
- frag = WLAN_GET_SEQ_FRAG(sc);
seq = WLAN_GET_SEQ_SEQ(sc);
curr_st->Seq_Num = seq;
if (!prev_st->bIsAMPDU)
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
index f932cb15e4e5..dace81a7d1ba 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
@@ -715,8 +715,8 @@ void rtl92e_set_wireless_mode(struct net_device *dev, u8 wireless_mode)
if ((wireless_mode == WIRELESS_MODE_N_24G) ||
(wireless_mode == WIRELESS_MODE_N_5G)) {
priv->rtllib->pHTInfo->bEnableHT = 1;
- RT_TRACE(COMP_DBG, "%s(), wireless_mode:%x, bEnableHT = 1\n",
- __func__, wireless_mode);
+ RT_TRACE(COMP_DBG, "%s(), wireless_mode:%x, bEnableHT = 1\n",
+ __func__, wireless_mode);
} else {
priv->rtllib->pHTInfo->bEnableHT = 0;
RT_TRACE(COMP_DBG, "%s(), wireless_mode:%x, bEnableHT = 0\n",
@@ -1616,14 +1616,15 @@ static void _rtl92e_hard_data_xmit(struct sk_buff *skb, struct net_device *dev,
memcpy((unsigned char *)(skb->cb), &dev, sizeof(dev));
skb_push(skb, priv->rtllib->tx_headroom);
ret = _rtl92e_tx(dev, skb);
- if (ret != 0)
- kfree_skb(skb);
if (queue_index != MGNT_QUEUE) {
priv->rtllib->stats.tx_bytes += (skb->len -
priv->rtllib->tx_headroom);
priv->rtllib->stats.tx_packets++;
}
+
+ if (ret != 0)
+ kfree_skb(skb);
}
static int _rtl92e_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
diff --git a/drivers/staging/rtl8192e/rtllib_softmac.c b/drivers/staging/rtl8192e/rtllib_softmac.c
index f2f7529e7c80..6e2f620afd14 100644
--- a/drivers/staging/rtl8192e/rtllib_softmac.c
+++ b/drivers/staging/rtl8192e/rtllib_softmac.c
@@ -2044,8 +2044,9 @@ static short rtllib_sta_ps_sleep(struct rtllib_device *ieee, u64 *time)
}
-static inline void rtllib_sta_ps(struct rtllib_device *ieee)
+static inline void rtllib_sta_ps(unsigned long data)
{
+ struct rtllib_device *ieee = (struct rtllib_device *)data;
u64 time;
short sleep;
unsigned long flags, flags2;
@@ -3027,9 +3028,7 @@ void rtllib_softmac_init(struct rtllib_device *ieee)
spin_lock_init(&ieee->mgmt_tx_lock);
spin_lock_init(&ieee->beacon_lock);
- tasklet_init(&ieee->ps_task,
- (void(*)(unsigned long)) rtllib_sta_ps,
- (unsigned long)ieee);
+ tasklet_init(&ieee->ps_task, rtllib_sta_ps, (unsigned long)ieee);
}
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
index 5c33bcb0db2e..00fea127bdc3 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
@@ -1620,7 +1620,7 @@ int ieee80211_parse_info_param(struct ieee80211_device *ieee,
for (i = 0; i < network->rates_len; i++) {
network->rates[i] = info_element->data[i];
#ifdef CONFIG_IEEE80211_DEBUG
- p += snprintf(p, sizeof(rates_str) -
+ p += scnprintf(p, sizeof(rates_str) -
(p - rates_str), "%02X ",
network->rates[i]);
#endif
@@ -1647,7 +1647,7 @@ int ieee80211_parse_info_param(struct ieee80211_device *ieee,
for (i = 0; i < network->rates_ex_len; i++) {
network->rates_ex[i] = info_element->data[i];
#ifdef CONFIG_IEEE80211_DEBUG
- p += snprintf(p, sizeof(rates_str) -
+ p += scnprintf(p, sizeof(rates_str) -
(p - rates_str), "%02X ",
network->rates_ex[i]);
#endif
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
index 33a6af7aad22..90692db81b71 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
@@ -1683,8 +1683,9 @@ static short ieee80211_sta_ps_sleep(struct ieee80211_device *ieee, u32 *time_h,
return 1;
}
-static inline void ieee80211_sta_ps(struct ieee80211_device *ieee)
+static inline void ieee80211_sta_ps(unsigned long data)
{
+ struct ieee80211_device *ieee = (struct ieee80211_device *)data;
u32 th, tl;
short sleep;
@@ -2331,7 +2332,7 @@ void ieee80211_start_bss(struct ieee80211_device *ieee)
/* ensure no-one start an associating process (thus setting
* the ieee->state to ieee80211_ASSOCIATING) while we
- * have just cheked it and we are going to enable scan.
+ * have just checked it and we are going to enable scan.
* The ieee80211_new_net function is always called with
* lock held (from both ieee80211_softmac_check_all_nets and
* the rx path), so we cannot be in the middle of such function
@@ -2593,9 +2594,7 @@ void ieee80211_softmac_init(struct ieee80211_device *ieee)
spin_lock_init(&ieee->mgmt_tx_lock);
spin_lock_init(&ieee->beacon_lock);
- tasklet_init(&ieee->ps_task,
- (void(*)(unsigned long)) ieee80211_sta_ps,
- (unsigned long)ieee);
+ tasklet_init(&ieee->ps_task, ieee80211_sta_ps, (unsigned long)ieee);
}
void ieee80211_softmac_free(struct ieee80211_device *ieee)
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c
index 9dd5c04181ea..33c596f9ec96 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c
@@ -109,7 +109,7 @@ static inline char *rtl819x_translate_scan(struct ieee80211_device *ieee,
/* Add basic and extended rates */
max_rate = 0;
p = custom;
- p += snprintf(p, MAX_CUSTOM_LEN - (p - custom), " Rates (Mb/s): ");
+ p += scnprintf(p, MAX_CUSTOM_LEN - (p - custom), " Rates (Mb/s): ");
for (i = 0, j = 0; i < network->rates_len; ) {
if (j < network->rates_ex_len &&
((network->rates_ex[j] & 0x7F) <
@@ -119,12 +119,12 @@ static inline char *rtl819x_translate_scan(struct ieee80211_device *ieee,
rate = network->rates[i++] & 0x7F;
if (rate > max_rate)
max_rate = rate;
- p += snprintf(p, MAX_CUSTOM_LEN - (p - custom),
+ p += scnprintf(p, MAX_CUSTOM_LEN - (p - custom),
"%d%s ", rate >> 1, (rate & 1) ? ".5" : "");
}
for (; j < network->rates_ex_len; j++) {
rate = network->rates_ex[j] & 0x7F;
- p += snprintf(p, MAX_CUSTOM_LEN - (p - custom),
+ p += scnprintf(p, MAX_CUSTOM_LEN - (p - custom),
"%d%s ", rate >> 1, (rate & 1) ? ".5" : "");
if (rate > max_rate)
max_rate = rate;
@@ -214,7 +214,7 @@ static inline char *rtl819x_translate_scan(struct ieee80211_device *ieee,
* for given network. */
iwe.cmd = IWEVCUSTOM;
p = custom;
- p += snprintf(p, MAX_CUSTOM_LEN - (p - custom),
+ p += scnprintf(p, MAX_CUSTOM_LEN - (p - custom),
" Last beacon: %lums ago", (jiffies - network->last_scanned) / (HZ / 100));
iwe.u.data.length = p - custom;
if (iwe.u.data.length)
diff --git a/drivers/staging/rtl8192u/r8190_rtl8256.c b/drivers/staging/rtl8192u/r8190_rtl8256.c
index b169460b9f26..63e0f7b1b852 100644
--- a/drivers/staging/rtl8192u/r8190_rtl8256.c
+++ b/drivers/staging/rtl8192u/r8190_rtl8256.c
@@ -43,8 +43,8 @@ void phy_set_rf8256_bandwidth(struct net_device *dev, enum ht_channel_width Band
switch (Bandwidth) {
case HT_CHANNEL_WIDTH_20:
if (priv->card_8192_version == VERSION_819XU_A ||
- priv->card_8192_version ==
- VERSION_819XU_B) { /* 8256 D-cut, E-cut, xiong: consider it later! */
+ priv->card_8192_version == VERSION_819XU_B) {
+ /* 8256 D-cut, E-cut, xiong: consider it later! */
rtl8192_phy_SetRFReg(dev,
(enum rf90_radio_path_e)eRFPath,
0x0b, bMask12Bits, 0x100); /* phy para:1ba */
diff --git a/drivers/staging/rtl8192u/r8192U_core.c b/drivers/staging/rtl8192u/r8192U_core.c
index 2821411878ce..7e2cabd16e88 100644
--- a/drivers/staging/rtl8192u/r8192U_core.c
+++ b/drivers/staging/rtl8192u/r8192U_core.c
@@ -98,8 +98,6 @@ static char *ifname = "wlan%d";
static int hwwep = 1; /* default use hw. set 0 to use software security */
static int channels = 0x3fff;
-
-
module_param(ifname, charp, 0644);
module_param(hwwep, int, 0644);
module_param(channels, int, 0644);
@@ -112,7 +110,6 @@ static int rtl8192_usb_probe(struct usb_interface *intf,
const struct usb_device_id *id);
static void rtl8192_usb_disconnect(struct usb_interface *intf);
-
static struct usb_driver rtl8192_usb_driver = {
.name = RTL819XU_MODULE_NAME, /* Driver name */
.id_table = rtl8192_usb_id_tbl, /* PCI_ID table */
@@ -122,7 +119,6 @@ static struct usb_driver rtl8192_usb_driver = {
.resume = NULL, /* PM resume fn */
};
-
struct CHANNEL_LIST {
u8 Channel[32];
u8 Len;
@@ -207,9 +203,6 @@ static void rtl819x_set_channel_map(u8 channel_plan, struct r8192_priv *priv)
}
}
-
-
-
static void CamResetAllEntry(struct net_device *dev)
{
u32 ulcommand = 0;
@@ -297,7 +290,6 @@ int write_nic_byte(struct net_device *dev, int indx, u8 data)
return 0;
}
-
int write_nic_word(struct net_device *dev, int indx, u16 data)
{
int status;
@@ -324,7 +316,6 @@ int write_nic_word(struct net_device *dev, int indx, u16 data)
return 0;
}
-
int write_nic_dword(struct net_device *dev, int indx, u32 data)
{
int status;
@@ -343,7 +334,6 @@ int write_nic_dword(struct net_device *dev, int indx, u32 data)
usbdata, 4, HZ / 2);
kfree(usbdata);
-
if (status < 0) {
netdev_err(dev, "%s TimeOut! status: %d\n", __func__, status);
return status;
@@ -352,8 +342,6 @@ int write_nic_dword(struct net_device *dev, int indx, u32 data)
return 0;
}
-
-
int read_nic_byte(struct net_device *dev, int indx, u8 *data)
{
int status;
@@ -379,8 +367,6 @@ int read_nic_byte(struct net_device *dev, int indx, u8 *data)
return 0;
}
-
-
int read_nic_word(struct net_device *dev, int indx, u16 *data)
{
int status;
@@ -628,13 +614,13 @@ static void rtl8192_proc_init_one(struct net_device *dev)
return;
proc_create_single("stats-rx", S_IFREG | S_IRUGO, dir,
- proc_get_stats_rx);
+ proc_get_stats_rx);
proc_create_single("stats-tx", S_IFREG | S_IRUGO, dir,
- proc_get_stats_tx);
+ proc_get_stats_tx);
proc_create_single("stats-ap", S_IFREG | S_IRUGO, dir,
- proc_get_stats_ap);
+ proc_get_stats_ap);
proc_create_single("registers", S_IFREG | S_IRUGO, dir,
- proc_get_registers);
+ proc_get_registers);
}
static void rtl8192_proc_remove_one(struct net_device *dev)
@@ -788,7 +774,6 @@ void rtl8192_set_rxconf(struct net_device *dev)
rxconf = rxconf | RCR_CBSSID;
}
-
if (priv->ieee80211->iw_mode == IW_MODE_MONITOR) {
rxconf = rxconf | RCR_AICV;
rxconf = rxconf | RCR_APWRMGT;
@@ -797,7 +782,6 @@ void rtl8192_set_rxconf(struct net_device *dev)
if (priv->crcmon == 1 && priv->ieee80211->iw_mode == IW_MODE_MONITOR)
rxconf = rxconf | RCR_ACRC32;
-
rxconf = rxconf & ~RX_FIFO_THRESHOLD_MASK;
rxconf = rxconf | (RX_FIFO_THRESHOLD_NONE << RX_FIFO_THRESHOLD_SHIFT);
rxconf = rxconf & ~MAX_RX_DMA_MASK;
@@ -901,13 +885,11 @@ static u32 rtl819xusb_rx_command_packet(struct net_device *dev,
return status;
}
-
static void rtl8192_data_hard_stop(struct net_device *dev)
{
/* FIXME !! */
}
-
static void rtl8192_data_hard_resume(struct net_device *dev)
{
/* FIXME !! */
@@ -951,7 +933,6 @@ static int rtl8192_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
u8 queue_index = tcb_desc->queue_index;
-
spin_lock_irqsave(&priv->tx_lock, flags);
memcpy((unsigned char *)(skb->cb), &dev, sizeof(dev));
@@ -1123,7 +1104,6 @@ static void rtl8192_config_rate(struct net_device *dev, u16 *rate_config)
}
}
-
#define SHORT_SLOT_TIME 9
#define NON_SHORT_SLOT_TIME 20
@@ -1188,7 +1168,6 @@ static void rtl8192_net_update(struct net_device *dev)
*/
void rtl819xusb_beacon_tx(struct net_device *dev, u16 tx_rate)
{
-
}
short rtl819xU_tx_cmd(struct net_device *dev, struct sk_buff *skb)
@@ -1232,6 +1211,8 @@ short rtl819xU_tx_cmd(struct net_device *dev, struct sk_buff *skb)
return 0;
DMESGE("Error TX CMD URB, error %d", status);
+ dev_kfree_skb(skb);
+ usb_free_urb(tx_urb);
return -1;
}
@@ -1389,7 +1370,6 @@ static u8 MRateToHwRate8190Pci(u8 rate)
return ret;
}
-
static u8 QueryIsShort(u8 TxHT, u8 TxRate, struct cb_desc *tcb_desc)
{
u8 tmp_Short;
@@ -1422,7 +1402,7 @@ short rtl8192_tx(struct net_device *dev, struct sk_buff *skb)
(struct tx_fwinfo_819x_usb *)(skb->data + USB_HWDESC_HEADER_LEN);
struct usb_device *udev = priv->udev;
int pend;
- int status;
+ int status, rt = -1;
struct urb *tx_urb = NULL, *tx_urb_zero = NULL;
unsigned int idx_pipe;
@@ -1566,8 +1546,10 @@ short rtl8192_tx(struct net_device *dev, struct sk_buff *skb)
}
if (bSend0Byte) {
tx_urb_zero = usb_alloc_urb(0, GFP_ATOMIC);
- if (!tx_urb_zero)
- return -ENOMEM;
+ if (!tx_urb_zero) {
+ rt = -ENOMEM;
+ goto error;
+ }
usb_fill_bulk_urb(tx_urb_zero, udev,
usb_sndbulkpipe(udev, idx_pipe),
&zero, 0, tx_zero_isr, dev);
@@ -1577,7 +1559,7 @@ short rtl8192_tx(struct net_device *dev, struct sk_buff *skb)
"Error TX URB for zero byte %d, error %d",
atomic_read(&priv->tx_pending[tcb_desc->queue_index]),
status);
- return -1;
+ goto error;
}
}
netif_trans_update(dev);
@@ -1588,7 +1570,12 @@ short rtl8192_tx(struct net_device *dev, struct sk_buff *skb)
RT_TRACE(COMP_ERR, "Error TX URB %d, error %d",
atomic_read(&priv->tx_pending[tcb_desc->queue_index]),
status);
- return -1;
+
+error:
+ dev_kfree_skb_any(skb);
+ usb_free_urb(tx_urb);
+ usb_free_urb(tx_urb_zero);
+ return rt;
}
static short rtl8192_usb_initendpoints(struct net_device *dev)
@@ -1742,7 +1729,6 @@ static const struct ieee80211_qos_parameters def_qos_parameters = {
{0, 0, 0, 0} /* tx_op_limit */
};
-
static void rtl8192_update_beacon(struct work_struct *work)
{
struct r8192_priv *priv = container_of(work, struct r8192_priv,
@@ -1913,15 +1899,12 @@ static int rtl8192_qos_association_resp(struct r8192_priv *priv,
if (set_qos_param == 1)
schedule_work(&priv->qos_activate);
-
return 0;
}
-
-static int rtl8192_handle_assoc_response(
- struct net_device *dev,
- struct ieee80211_assoc_response_frame *resp,
- struct ieee80211_network *network)
+static int rtl8192_handle_assoc_response(struct net_device *dev,
+ struct ieee80211_assoc_response_frame *resp,
+ struct ieee80211_network *network)
{
struct r8192_priv *priv = ieee80211_priv(dev);
@@ -1929,7 +1912,6 @@ static int rtl8192_handle_assoc_response(
return 0;
}
-
static void rtl8192_update_ratr_table(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
@@ -2211,14 +2193,13 @@ static void rtl8192_init_priv_lock(struct r8192_priv *priv)
static void rtl819x_watchdog_wqcallback(struct work_struct *work);
-static void rtl8192_irq_rx_tasklet(struct r8192_priv *priv);
+static void rtl8192_irq_rx_tasklet(unsigned long data);
/* init tasklet and wait_queue here. only 2.6 above kernel is considered */
#define DRV_NAME "wlan0"
static void rtl8192_init_priv_task(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
-
INIT_WORK(&priv->reset_wq, rtl8192_restart);
INIT_DELAYED_WORK(&priv->watch_dog_wq,
@@ -2233,8 +2214,7 @@ static void rtl8192_init_priv_task(struct net_device *dev)
InitialGainOperateWorkItemCallBack);
INIT_WORK(&priv->qos_activate, rtl8192_qos_activate);
- tasklet_init(&priv->irq_rx_tasklet,
- (void(*)(unsigned long))rtl8192_irq_rx_tasklet,
+ tasklet_init(&priv->irq_rx_tasklet, rtl8192_irq_rx_tasklet,
(unsigned long)priv);
}
@@ -2515,7 +2495,6 @@ static int rtl8192_read_eeprom_info(struct net_device *dev)
break;
}
-
if (priv->rf_type == RF_1T2R)
RT_TRACE(COMP_EPROM, "\n1T2R config\n");
else
@@ -2668,7 +2647,6 @@ static void rtl8192_hwconfig(struct net_device *dev)
/* Set Auto Rate fallback control */
}
-
/* InitializeAdapter and PhyCfg */
static bool rtl8192_adapter_start(struct net_device *dev)
{
@@ -2803,14 +2781,12 @@ static bool rtl8192_adapter_start(struct net_device *dev)
RT_TRACE(COMP_INIT, "%s():after phy RF config\n", __func__);
}
-
if (priv->ieee80211->FwRWRF)
/* We can force firmware to do RF-R/W */
priv->Rf_Mode = RF_OP_By_FW;
else
priv->Rf_Mode = RF_OP_By_SW_3wire;
-
rtl8192_phy_updateInitGain(dev);
/*--set CCK and OFDM Block "ON"--*/
rtl8192_setBBreg(dev, rFPGA0_RFMOD, bCCKEn, 0x1);
@@ -2865,7 +2841,6 @@ static bool rtl8192_adapter_start(struct net_device *dev)
}
write_nic_byte(dev, 0x87, 0x0);
-
return init_status;
}
@@ -2996,7 +2971,6 @@ static RESET_TYPE RxCheckStuck(struct net_device *dev)
return RESET_TYPE_NORESET;
}
-
/**
* This function is called by Checkforhang to check whether we should
* ask OS to reset driver
@@ -3052,8 +3026,6 @@ static void rtl8192_cancel_deferred_work(struct r8192_priv *priv);
static int _rtl8192_up(struct net_device *dev);
static int rtl8192_close(struct net_device *dev);
-
-
static void CamRestoreAllEntry(struct net_device *dev)
{
u8 EntryId = 0;
@@ -3070,7 +3042,6 @@ static void CamRestoreAllEntry(struct net_device *dev)
RT_TRACE(COMP_SEC, "%s:\n", __func__);
-
if ((priv->ieee80211->pairwise_key_type == KEY_TYPE_WEP40) ||
(priv->ieee80211->pairwise_key_type == KEY_TYPE_WEP104)) {
for (EntryId = 0; EntryId < 4; EntryId++) {
@@ -3096,8 +3067,6 @@ static void CamRestoreAllEntry(struct net_device *dev)
MacAddr, 0, NULL);
}
-
-
if (priv->ieee80211->group_key_type == KEY_TYPE_TKIP) {
MacAddr = CAM_CONST_BROAD;
for (EntryId = 1; EntryId < 4; EntryId++) {
@@ -3134,7 +3103,6 @@ static void rtl819x_ifsilentreset(struct net_device *dev)
int reset_status = 0;
struct ieee80211_device *ieee = priv->ieee80211;
-
/* If we need to check CCK stop, please uncomment this line. */
/* bStuck = Adapter->HalFunc.CheckHWStopHandler(Adapter); */
@@ -3258,7 +3226,6 @@ static void rtl819x_update_rxcounts(struct r8192_priv *priv, u32 *TotalRxBcnNum,
}
}
-
static void rtl819x_watchdog_wqcallback(struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
@@ -3369,7 +3336,6 @@ static int _rtl8192_up(struct net_device *dev)
return 0;
}
-
static int rtl8192_open(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
@@ -3381,7 +3347,6 @@ static int rtl8192_open(struct net_device *dev)
return ret;
}
-
int rtl8192_up(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
@@ -3392,7 +3357,6 @@ int rtl8192_up(struct net_device *dev)
return _rtl8192_up(dev);
}
-
static int rtl8192_close(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
@@ -3440,7 +3404,6 @@ int rtl8192_down(struct net_device *dev)
deinit_hal_dm(dev);
del_timer_sync(&priv->watch_dog_timer);
-
ieee80211_softmac_stop_protocol(priv->ieee80211);
memset(&priv->ieee80211->current_network, 0,
offsetof(struct ieee80211_network, list));
@@ -3449,7 +3412,6 @@ int rtl8192_down(struct net_device *dev)
return 0;
}
-
void rtl8192_commit(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
@@ -3495,7 +3457,6 @@ static void r8192_set_multicast(struct net_device *dev)
priv->promisc = promisc;
}
-
static int r8192_set_mac_adr(struct net_device *dev, void *mac)
{
struct r8192_priv *priv = ieee80211_priv(dev);
@@ -3525,7 +3486,6 @@ static int rtl8192_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
mutex_lock(&priv->wx_mutex);
-
if (p->length < sizeof(struct ieee_param) || !p->pointer) {
ret = -EINVAL;
goto out;
@@ -3778,7 +3738,6 @@ static long rtl819x_translate_todbm(u8 signal_strength_index)
return signal_power;
}
-
/* We can not declare RSSI/EVM total value of sliding window to
* be a local static. Otherwise, it may increase when we return from S3/S4. The
* value will be kept in memory or disk. Declare the value in the adaptor
@@ -3841,7 +3800,6 @@ static void rtl8192_process_phyinfo(struct r8192_priv *priv, u8 *buffer,
if (!bcheck)
return;
-
/* only rtl8190 supported
* rtl8190_process_cck_rxpathsel(priv,pprevious_stats);
*/
@@ -3851,17 +3809,15 @@ static void rtl8192_process_phyinfo(struct r8192_priv *priv, u8 *buffer,
/* record the general signal strength to the sliding window. */
-
/* <2> Showed on UI for engineering
* hardware does not provide rssi information for each rf path in CCK
*/
if (!pprevious_stats->bIsCCK &&
(pprevious_stats->bPacketToSelf || pprevious_stats->bToSelfBA)) {
for (rfpath = RF90_PATH_A; rfpath < priv->NumTotalRFPath; rfpath++) {
- if (!rtl8192_phy_CheckIsLegalRFPath(
- priv->ieee80211->dev, rfpath))
+ if (!rtl8192_phy_CheckIsLegalRFPath(priv->ieee80211->dev,
+ rfpath))
continue;
-
if (priv->stats.rx_rssi_percentage[rfpath] == 0)
priv->stats.rx_rssi_percentage[rfpath] =
pprevious_stats->RxMIMOSignalStrength[rfpath];
@@ -3881,7 +3837,6 @@ static void rtl8192_process_phyinfo(struct r8192_priv *priv, u8 *buffer,
}
}
-
/* Check PWDB. */
RT_TRACE(COMP_RXDESC, "Smooth %s PWDB = %d\n",
pprevious_stats->bIsCCK ? "CCK" : "OFDM",
@@ -3908,7 +3863,6 @@ static void rtl8192_process_phyinfo(struct r8192_priv *priv, u8 *buffer,
pprevious_stats->bIsCCK ? "CCK" : "OFDM",
pprevious_stats->RxPWDBAll);
-
if (pprevious_stats->bPacketToSelf ||
pprevious_stats->bPacketBeacon ||
pprevious_stats->bToSelfBA) {
@@ -4083,7 +4037,6 @@ static void rtl8192_query_rxphystatus(struct r8192_priv *priv,
u8 rf_rx_num = 0;
u8 sq;
-
priv->stats.numqry_phystatus++;
is_cck_rate = rx_hal_is_cck_rate(pdrvinfo);
@@ -4192,8 +4145,7 @@ static void rtl8192_query_rxphystatus(struct r8192_priv *priv,
else
continue;
- if (!rtl8192_phy_CheckIsLegalRFPath(
- priv->ieee80211->dev, i))
+ if (!rtl8192_phy_CheckIsLegalRFPath(priv->ieee80211->dev, i))
continue;
rx_pwr[i] =
@@ -4214,7 +4166,6 @@ static void rtl8192_query_rxphystatus(struct r8192_priv *priv,
precord_stats->RxMIMOSignalStrength[i] = (u8)RSSI;
}
-
/* (2)PWDB, Average PWDB calculated by hardware
* (for rate adaptive)
*/
@@ -4259,7 +4210,6 @@ static void rtl8192_query_rxphystatus(struct r8192_priv *priv,
evm & 0xff;
}
-
/* record rx statistics for debug */
rxsc_sgien_exflg = pofdm_buf->rxsc_sgien_exflg;
prxsc = (struct phy_ofdm_rx_status_rxsc_sgien_exintfflag *)
@@ -4288,16 +4238,14 @@ static void rtl8192_query_rxphystatus(struct r8192_priv *priv,
}
} /* QueryRxPhyStatus8190Pci */
-static void rtl8192_record_rxdesc_forlateruse(
- struct ieee80211_rx_stats *psrc_stats,
- struct ieee80211_rx_stats *ptarget_stats)
+static void rtl8192_record_rxdesc_forlateruse(struct ieee80211_rx_stats *psrc_stats,
+ struct ieee80211_rx_stats *ptarget_stats)
{
ptarget_stats->bIsAMPDU = psrc_stats->bIsAMPDU;
ptarget_stats->bFirstMPDU = psrc_stats->bFirstMPDU;
ptarget_stats->Seq_Num = psrc_stats->Seq_Num;
}
-
static void TranslateRxSignalStuff819xUsb(struct sk_buff *skb,
struct ieee80211_rx_stats *pstats,
struct rx_drvinfo_819x_usb *pdrvinfo)
@@ -4341,8 +4289,6 @@ static void TranslateRxSignalStuff819xUsb(struct sk_buff *skb,
bToSelfBA = true;
}
-
-
if (bpacket_match_bssid)
priv->stats.numpacket_matchbssid++;
if (bpacket_toself)
@@ -4383,7 +4329,6 @@ UpdateReceivedRateHistogramStatistics8190(struct net_device *dev,
/* 1: short preamble/GI, 0: long preamble/GI */
u32 preamble_guardinterval;
-
if (stats->bCRC)
rcvType = 2;
else if (stats->bICV)
@@ -4491,7 +4436,6 @@ UpdateReceivedRateHistogramStatistics8190(struct net_device *dev,
priv->stats.received_rate_histogram[rcvType][rateIndex]++;
}
-
static void query_rxdesc_status(struct sk_buff *skb,
struct ieee80211_rx_stats *stats,
bool bIsRxAggrSubframe)
@@ -4526,8 +4470,7 @@ static void query_rxdesc_status(struct sk_buff *skb,
* Driver info are written to the RxBuffer following rx desc
*/
if (stats->RxDrvInfoSize != 0) {
- driver_info = (struct rx_drvinfo_819x_usb *)(
- skb->data
+ driver_info = (struct rx_drvinfo_819x_usb *)(skb->data
+ sizeof(struct rx_desc_819x_usb)
+ stats->RxBufShift
);
@@ -4556,7 +4499,6 @@ static void query_rxdesc_status(struct sk_buff *skb,
stats->bShortPreamble = driver_info->SPLCP;
-
UpdateReceivedRateHistogramStatistics8190(dev, stats);
stats->bIsAMPDU = (driver_info->PartAggr == 1);
@@ -4569,7 +4511,7 @@ static void query_rxdesc_status(struct sk_buff *skb,
/* Rx A-MPDU */
if (driver_info->FirstAGGR == 1 || driver_info->PartAggr == 1)
RT_TRACE(COMP_RXDESC,
- "driver_info->FirstAGGR = %d, driver_info->PartAggr = %d\n",
+ "driver_info->FirstAGGR = %d, driver_info->PartAggr = %d\n",
driver_info->FirstAGGR, driver_info->PartAggr);
}
@@ -4636,9 +4578,8 @@ static void rtl8192_rx_nomal(struct sk_buff *skb)
}
}
-static void rtl819xusb_process_received_packet(
- struct net_device *dev,
- struct ieee80211_rx_stats *pstats)
+static void rtl819xusb_process_received_packet(struct net_device *dev,
+ struct ieee80211_rx_stats *pstats)
{
struct r8192_priv *priv = ieee80211_priv(dev);
@@ -4671,8 +4612,6 @@ static void rtl819xusb_process_received_packet(
#ifdef SW_CRC_CHECK
SwCrcCheck();
#endif
-
-
}
static void query_rx_cmdpkt_desc_status(struct sk_buff *skb,
@@ -4691,7 +4630,6 @@ static void query_rx_cmdpkt_desc_status(struct sk_buff *skb,
stats->ntotalfrag = 1;
}
-
static void rtl8192_rx_cmd(struct sk_buff *skb)
{
struct rtl8192_rx_info *info = (struct rtl8192_rx_info *)skb->cb;
@@ -4716,8 +4654,9 @@ static void rtl8192_rx_cmd(struct sk_buff *skb)
}
}
-static void rtl8192_irq_rx_tasklet(struct r8192_priv *priv)
+static void rtl8192_irq_rx_tasklet(unsigned long data)
{
+ struct r8192_priv *priv = (struct r8192_priv *)data;
struct sk_buff *skb;
struct rtl8192_rx_info *info;
@@ -4759,7 +4698,6 @@ static const struct net_device_ops rtl8192_netdev_ops = {
.ndo_start_xmit = ieee80211_xmit,
};
-
/****************************************************************************
* ---------------------------- USB_STUFF---------------------------
*****************************************************************************/
@@ -4815,7 +4753,6 @@ static int rtl8192_usb_probe(struct usb_interface *intf,
RT_TRACE(COMP_INIT, "dev name=======> %s\n", dev->name);
rtl8192_proc_init_one(dev);
-
RT_TRACE(COMP_INIT, "Driver probe completed\n");
return 0;
@@ -4843,7 +4780,6 @@ static void rtl8192_cancel_deferred_work(struct r8192_priv *priv)
cancel_work_sync(&priv->qos_activate);
}
-
static void rtl8192_usb_disconnect(struct usb_interface *intf)
{
struct net_device *dev = usb_get_intfdata(intf);
@@ -4907,7 +4843,6 @@ static int __init rtl8192_usb_module_init(void)
return usb_register(&rtl8192_usb_driver);
}
-
static void __exit rtl8192_usb_module_exit(void)
{
usb_deregister(&rtl8192_usb_driver);
@@ -4949,7 +4884,6 @@ void EnableHWSecurityConfig8192(struct net_device *dev)
write_nic_byte(dev, SECR, SECR_value);
}
-
void setKey(struct net_device *dev, u8 EntryNo, u8 KeyIndex, u16 KeyType,
u8 *MacAddr, u8 DefaultKey, u32 *KeyContent)
{
@@ -4970,7 +4904,6 @@ void setKey(struct net_device *dev, u8 EntryNo, u8 KeyIndex, u16 KeyType,
else
usConfig |= BIT(15) | (KeyType << 2) | KeyIndex;
-
for (i = 0; i < CAM_CONTENT_COUNT; i++) {
TargetCommand = i + CAM_CONTENT_COUNT * EntryNo;
TargetCommand |= BIT(31) | BIT(16);
diff --git a/drivers/staging/rtl8192u/r819xU_cmdpkt.c b/drivers/staging/rtl8192u/r819xU_cmdpkt.c
index e064f43fd8b6..bc98cdaf61ec 100644
--- a/drivers/staging/rtl8192u/r819xU_cmdpkt.c
+++ b/drivers/staging/rtl8192u/r819xU_cmdpkt.c
@@ -169,19 +169,20 @@ static void cmdpkt_beacontimerinterrupt_819xusb(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
u16 tx_rate;
- /* 87B have to S/W beacon for DTM encryption_cmn. */
- if (priv->ieee80211->current_network.mode == IEEE_A ||
- priv->ieee80211->current_network.mode == IEEE_N_5G ||
- (priv->ieee80211->current_network.mode == IEEE_N_24G &&
- (!priv->ieee80211->pHTInfo->bCurSuppCCK))) {
- tx_rate = 60;
- DMESG("send beacon frame tx rate is 6Mbpm\n");
- } else {
- tx_rate = 10;
- DMESG("send beacon frame tx rate is 1Mbpm\n");
- }
- rtl819xusb_beacon_tx(dev, tx_rate); /* HW Beacon */
+ /* 87B have to S/W beacon for DTM encryption_cmn. */
+ if (priv->ieee80211->current_network.mode == IEEE_A ||
+ priv->ieee80211->current_network.mode == IEEE_N_5G ||
+ (priv->ieee80211->current_network.mode == IEEE_N_24G &&
+ (!priv->ieee80211->pHTInfo->bCurSuppCCK))) {
+ tx_rate = 60;
+ DMESG("send beacon frame tx rate is 6Mbpm\n");
+ } else {
+ tx_rate = 10;
+ DMESG("send beacon frame tx rate is 1Mbpm\n");
+ }
+
+ rtl819xusb_beacon_tx(dev, tx_rate); /* HW Beacon */
}
/*-----------------------------------------------------------------------------
diff --git a/drivers/staging/rtl8712/rtl8712_led.c b/drivers/staging/rtl8712/rtl8712_led.c
index db99129d3169..5901026949f2 100644
--- a/drivers/staging/rtl8712/rtl8712_led.c
+++ b/drivers/staging/rtl8712/rtl8712_led.c
@@ -75,7 +75,7 @@ static void BlinkWorkItemCallback(struct work_struct *work);
* Initialize an LED_871x object.
*/
static void InitLed871x(struct _adapter *padapter, struct LED_871x *pLed,
- enum LED_PIN_871x LedPin)
+ enum LED_PIN_871x LedPin)
{
pLed->padapter = padapter;
pLed->LedPin = LedPin;
diff --git a/drivers/staging/rtl8712/rtl8712_recv.c b/drivers/staging/rtl8712/rtl8712_recv.c
index 9901815604f4..00ea0beb12c9 100644
--- a/drivers/staging/rtl8712/rtl8712_recv.c
+++ b/drivers/staging/rtl8712/rtl8712_recv.c
@@ -33,7 +33,7 @@ static u8 bridge_tunnel_header[] = {0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8};
/* Ethernet-II snap header (RFC1042 for most EtherTypes) */
static u8 rfc1042_header[] = {0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00};
-static void recv_tasklet(void *priv);
+static void recv_tasklet(unsigned long priv);
void r8712_init_recv_priv(struct recv_priv *precvpriv,
struct _adapter *padapter)
@@ -61,13 +61,12 @@ void r8712_init_recv_priv(struct recv_priv *precvpriv,
precvbuf->ref_cnt = 0;
precvbuf->adapter = padapter;
list_add_tail(&precvbuf->list,
- &(precvpriv->free_recv_buf_queue.queue));
+ &(precvpriv->free_recv_buf_queue.queue));
precvbuf++;
}
precvpriv->free_recv_buf_queue_cnt = NR_RECVBUFF;
- tasklet_init(&precvpriv->recv_tasklet,
- (void(*)(unsigned long))recv_tasklet,
- (unsigned long)padapter);
+ tasklet_init(&precvpriv->recv_tasklet, recv_tasklet,
+ (unsigned long)padapter);
skb_queue_head_init(&precvpriv->rx_skb_queue);
skb_queue_head_init(&precvpriv->free_recv_skb_queue);
@@ -119,7 +118,7 @@ void r8712_init_recvbuf(struct _adapter *padapter, struct recv_buf *precvbuf)
}
void r8712_free_recvframe(union recv_frame *precvframe,
- struct __queue *pfree_recv_queue)
+ struct __queue *pfree_recv_queue)
{
unsigned long irqL;
struct _adapter *padapter = precvframe->u.hdr.adapter;
@@ -140,7 +139,7 @@ void r8712_free_recvframe(union recv_frame *precvframe,
}
static void update_recvframe_attrib_from_recvstat(struct rx_pkt_attrib *pattrib,
- struct recv_stat *prxstat)
+ struct recv_stat *prxstat)
{
u16 drvinfo_sz;
@@ -177,7 +176,7 @@ static void update_recvframe_attrib_from_recvstat(struct rx_pkt_attrib *pattrib,
/*perform defrag*/
static union recv_frame *recvframe_defrag(struct _adapter *adapter,
- struct __queue *defrag_q)
+ struct __queue *defrag_q)
{
struct list_head *plist, *phead;
u8 wlanhdr_offset;
@@ -289,7 +288,6 @@ union recv_frame *r8712_recvframe_chk_defrag(struct _adapter *padapter,
r8712_free_recvframe(precv_frame, pfree_recv_queue);
prtnframe = NULL;
}
-
}
if ((ismfrag == 0) && (fragnum != 0)) {
/* the last fragment frame
@@ -379,26 +377,26 @@ static void amsdu_to_msdu(struct _adapter *padapter, union recv_frame *prframe)
/* convert hdr + possible LLC headers into Ethernet header */
eth_type = (sub_skb->data[6] << 8) | sub_skb->data[7];
if (sub_skb->len >= 8 &&
- ((!memcmp(sub_skb->data, rfc1042_header, SNAP_SIZE) &&
- eth_type != ETH_P_AARP && eth_type != ETH_P_IPX) ||
- !memcmp(sub_skb->data, bridge_tunnel_header, SNAP_SIZE))) {
+ ((!memcmp(sub_skb->data, rfc1042_header, SNAP_SIZE) &&
+ eth_type != ETH_P_AARP && eth_type != ETH_P_IPX) ||
+ !memcmp(sub_skb->data, bridge_tunnel_header, SNAP_SIZE))) {
/* remove RFC1042 or Bridge-Tunnel encapsulation and
* replace EtherType
*/
skb_pull(sub_skb, SNAP_SIZE);
memcpy(skb_push(sub_skb, ETH_ALEN), pattrib->src,
- ETH_ALEN);
+ ETH_ALEN);
memcpy(skb_push(sub_skb, ETH_ALEN), pattrib->dst,
- ETH_ALEN);
+ ETH_ALEN);
} else {
__be16 len;
/* Leave Ethernet header part of hdr and full payload */
len = htons(sub_skb->len);
memcpy(skb_push(sub_skb, 2), &len, 2);
memcpy(skb_push(sub_skb, ETH_ALEN), pattrib->src,
- ETH_ALEN);
+ ETH_ALEN);
memcpy(skb_push(sub_skb, ETH_ALEN), pattrib->dst,
- ETH_ALEN);
+ ETH_ALEN);
}
/* Indicate the packets to upper layer */
if (sub_skb) {
@@ -438,7 +436,6 @@ void r8712_rxcmd_event_hdl(struct _adapter *padapter, void *prxcmdbuf)
r8712_event_handle(padapter, (__le32 *)poffset);
poffset += (cmd_len + 8);/*8 bytes alignment*/
} while (le32_to_cpu(voffset) & BIT(31));
-
}
static int check_indicate_seq(struct recv_reorder_ctrl *preorder_ctrl,
@@ -472,7 +469,7 @@ static int check_indicate_seq(struct recv_reorder_ctrl *preorder_ctrl,
}
static int enqueue_reorder_recvframe(struct recv_reorder_ctrl *preorder_ctrl,
- union recv_frame *prframe)
+ union recv_frame *prframe)
{
struct list_head *phead, *plist;
union recv_frame *pnextrframe;
@@ -499,8 +496,8 @@ static int enqueue_reorder_recvframe(struct recv_reorder_ctrl *preorder_ctrl,
}
int r8712_recv_indicatepkts_in_order(struct _adapter *padapter,
- struct recv_reorder_ctrl *preorder_ctrl,
- int bforced)
+ struct recv_reorder_ctrl *preorder_ctrl,
+ int bforced)
{
struct list_head *phead, *plist;
union recv_frame *prframe;
@@ -530,7 +527,7 @@ int r8712_recv_indicatepkts_in_order(struct _adapter *padapter,
plist = plist->next;
list_del_init(&(prframe->u.hdr.list));
if (SN_EQUAL(preorder_ctrl->indicate_seq,
- pattrib->seq_num))
+ pattrib->seq_num))
preorder_ctrl->indicate_seq =
(preorder_ctrl->indicate_seq + 1) % 4096;
/*indicate this recv_frame*/
@@ -555,7 +552,7 @@ int r8712_recv_indicatepkts_in_order(struct _adapter *padapter,
}
static int recv_indicatepkt_reorder(struct _adapter *padapter,
- union recv_frame *prframe)
+ union recv_frame *prframe)
{
unsigned long irql;
struct rx_pkt_attrib *pattrib = &prframe->u.hdr.attrib;
@@ -624,7 +621,7 @@ void r8712_reordering_ctrl_timeout_handler(void *pcontext)
}
static int r8712_process_recv_indicatepkts(struct _adapter *padapter,
- union recv_frame *prframe)
+ union recv_frame *prframe)
{
int retval = _SUCCESS;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
@@ -1080,10 +1077,10 @@ static void recvbuf2recvframe(struct _adapter *padapter, struct sk_buff *pskb)
} while ((transfer_len > 0) && pkt_cnt > 0);
}
-static void recv_tasklet(void *priv)
+static void recv_tasklet(unsigned long priv)
{
struct sk_buff *pskb;
- struct _adapter *padapter = priv;
+ struct _adapter *padapter = (struct _adapter *)priv;
struct recv_priv *precvpriv = &padapter->recvpriv;
while (NULL != (pskb = skb_dequeue(&precvpriv->rx_skb_queue))) {
diff --git a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
index 944336e0d2e2..363b82e3e7c6 100644
--- a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
+++ b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
@@ -142,9 +142,9 @@ static noinline_for_stack char *translate_scan_wpa(struct iw_request_info *info,
memset(buf, 0, MAX_WPA_IE_LEN);
n = sprintf(buf, "wpa_ie=");
for (i = 0; i < wpa_len; i++) {
- n += snprintf(buf + n, MAX_WPA_IE_LEN - n,
+ n += scnprintf(buf + n, MAX_WPA_IE_LEN - n,
"%02x", wpa_ie[i]);
- if (n >= MAX_WPA_IE_LEN)
+ if (n == MAX_WPA_IE_LEN-1)
break;
}
memset(iwe, 0, sizeof(*iwe));
@@ -162,9 +162,9 @@ static noinline_for_stack char *translate_scan_wpa(struct iw_request_info *info,
memset(buf, 0, MAX_WPA_IE_LEN);
n = sprintf(buf, "rsn_ie=");
for (i = 0; i < rsn_len; i++) {
- n += snprintf(buf + n, MAX_WPA_IE_LEN - n,
+ n += scnprintf(buf + n, MAX_WPA_IE_LEN - n,
"%02x", rsn_ie[i]);
- if (n >= MAX_WPA_IE_LEN)
+ if (n == MAX_WPA_IE_LEN-1)
break;
}
memset(iwe, 0, sizeof(*iwe));
diff --git a/drivers/staging/rtl8712/rtl871x_mp_ioctl.c b/drivers/staging/rtl8712/rtl871x_mp_ioctl.c
index aa8f8500cbb2..29b85330815f 100644
--- a/drivers/staging/rtl8712/rtl871x_mp_ioctl.c
+++ b/drivers/staging/rtl8712/rtl871x_mp_ioctl.c
@@ -231,8 +231,7 @@ end_of_mp_stop_test:
return _SUCCESS;
}
-uint oid_rt_pro_set_data_rate_hdl(struct oid_par_priv
- *poid_par_priv)
+uint oid_rt_pro_set_data_rate_hdl(struct oid_par_priv *poid_par_priv)
{
struct _adapter *Adapter = (struct _adapter *)
(poid_par_priv->adapter_context);
@@ -283,17 +282,15 @@ uint oid_rt_pro_stop_test_hdl(struct oid_par_priv *poid_par_priv)
{
struct _adapter *Adapter = (struct _adapter *)
(poid_par_priv->adapter_context);
- uint status = RNDIS_STATUS_SUCCESS;
if (poid_par_priv->type_of_oid != SET_OID)
return RNDIS_STATUS_NOT_ACCEPTED;
if (mp_stop_test(Adapter) == _FAIL)
- status = RNDIS_STATUS_NOT_ACCEPTED;
- return status;
+ return RNDIS_STATUS_NOT_ACCEPTED;
+ return RNDIS_STATUS_SUCCESS;
}
-uint oid_rt_pro_set_channel_direct_call_hdl(struct oid_par_priv
- *poid_par_priv)
+uint oid_rt_pro_set_channel_direct_call_hdl(struct oid_par_priv *poid_par_priv)
{
struct _adapter *Adapter = (struct _adapter *)
(poid_par_priv->adapter_context);
@@ -328,8 +325,7 @@ uint oid_rt_pro_set_antenna_bb_hdl(struct oid_par_priv *poid_par_priv)
return RNDIS_STATUS_SUCCESS;
}
-uint oid_rt_pro_set_tx_power_control_hdl(
- struct oid_par_priv *poid_par_priv)
+uint oid_rt_pro_set_tx_power_control_hdl(struct oid_par_priv *poid_par_priv)
{
struct _adapter *Adapter = (struct _adapter *)
(poid_par_priv->adapter_context);
@@ -347,71 +343,61 @@ uint oid_rt_pro_set_tx_power_control_hdl(
return RNDIS_STATUS_SUCCESS;
}
-uint oid_rt_pro_query_tx_packet_sent_hdl(
- struct oid_par_priv *poid_par_priv)
+uint oid_rt_pro_query_tx_packet_sent_hdl(struct oid_par_priv *poid_par_priv)
{
- uint status = RNDIS_STATUS_SUCCESS;
struct _adapter *Adapter = (struct _adapter *)
(poid_par_priv->adapter_context);
- if (poid_par_priv->type_of_oid != QUERY_OID) {
- status = RNDIS_STATUS_NOT_ACCEPTED;
- return status;
- }
+ if (poid_par_priv->type_of_oid != QUERY_OID)
+ return RNDIS_STATUS_NOT_ACCEPTED;
+
if (poid_par_priv->information_buf_len == sizeof(u32)) {
*(u32 *)poid_par_priv->information_buf =
Adapter->mppriv.tx_pktcount;
*poid_par_priv->bytes_rw = poid_par_priv->information_buf_len;
} else {
- status = RNDIS_STATUS_INVALID_LENGTH;
+ return RNDIS_STATUS_INVALID_LENGTH;
}
- return status;
+ return RNDIS_STATUS_SUCCESS;
}
-uint oid_rt_pro_query_rx_packet_received_hdl(
- struct oid_par_priv *poid_par_priv)
+uint oid_rt_pro_query_rx_packet_received_hdl(struct oid_par_priv *poid_par_priv)
{
- uint status = RNDIS_STATUS_SUCCESS;
struct _adapter *Adapter = (struct _adapter *)
(poid_par_priv->adapter_context);
- if (poid_par_priv->type_of_oid != QUERY_OID) {
- status = RNDIS_STATUS_NOT_ACCEPTED;
- return status;
- }
+ if (poid_par_priv->type_of_oid != QUERY_OID)
+ return RNDIS_STATUS_NOT_ACCEPTED;
+
if (poid_par_priv->information_buf_len == sizeof(u32)) {
*(u32 *)poid_par_priv->information_buf =
Adapter->mppriv.rx_pktcount;
*poid_par_priv->bytes_rw = poid_par_priv->information_buf_len;
} else {
- status = RNDIS_STATUS_INVALID_LENGTH;
+ return RNDIS_STATUS_INVALID_LENGTH;
}
- return status;
+ return RNDIS_STATUS_SUCCESS;
}
-uint oid_rt_pro_query_rx_packet_crc32_error_hdl(
- struct oid_par_priv *poid_par_priv)
+uint oid_rt_pro_query_rx_packet_crc32_error_hdl(struct oid_par_priv *poid_par_priv)
{
- uint status = RNDIS_STATUS_SUCCESS;
struct _adapter *Adapter = (struct _adapter *)
(poid_par_priv->adapter_context);
- if (poid_par_priv->type_of_oid != QUERY_OID) {
- status = RNDIS_STATUS_NOT_ACCEPTED;
- return status;
- }
+ if (poid_par_priv->type_of_oid != QUERY_OID)
+ return RNDIS_STATUS_NOT_ACCEPTED;
+
if (poid_par_priv->information_buf_len == sizeof(u32)) {
*(u32 *)poid_par_priv->information_buf =
Adapter->mppriv.rx_crcerrpktcount;
*poid_par_priv->bytes_rw = poid_par_priv->information_buf_len;
} else {
- status = RNDIS_STATUS_INVALID_LENGTH;
+ return RNDIS_STATUS_INVALID_LENGTH;
}
- return status;
+ return RNDIS_STATUS_SUCCESS;
}
-uint oid_rt_pro_reset_tx_packet_sent_hdl(struct oid_par_priv
- *poid_par_priv)
+uint oid_rt_pro_reset_tx_packet_sent_hdl(struct oid_par_priv *poid_par_priv)
{
struct _adapter *Adapter = (struct _adapter *)
(poid_par_priv->adapter_context);
@@ -422,10 +408,8 @@ uint oid_rt_pro_reset_tx_packet_sent_hdl(struct oid_par_priv
return RNDIS_STATUS_SUCCESS;
}
-uint oid_rt_pro_reset_rx_packet_received_hdl(struct oid_par_priv
- *poid_par_priv)
+uint oid_rt_pro_reset_rx_packet_received_hdl(struct oid_par_priv *poid_par_priv)
{
- uint status = RNDIS_STATUS_SUCCESS;
struct _adapter *Adapter = (struct _adapter *)
(poid_par_priv->adapter_context);
@@ -435,13 +419,12 @@ uint oid_rt_pro_reset_rx_packet_received_hdl(struct oid_par_priv
Adapter->mppriv.rx_pktcount = 0;
Adapter->mppriv.rx_crcerrpktcount = 0;
} else {
- status = RNDIS_STATUS_INVALID_LENGTH;
+ return RNDIS_STATUS_INVALID_LENGTH;
}
- return status;
+ return RNDIS_STATUS_SUCCESS;
}
-uint oid_rt_reset_phy_rx_packet_count_hdl(struct oid_par_priv
- *poid_par_priv)
+uint oid_rt_reset_phy_rx_packet_count_hdl(struct oid_par_priv *poid_par_priv)
{
struct _adapter *Adapter = (struct _adapter *)
(poid_par_priv->adapter_context);
@@ -452,8 +435,7 @@ uint oid_rt_reset_phy_rx_packet_count_hdl(struct oid_par_priv
return RNDIS_STATUS_SUCCESS;
}
-uint oid_rt_get_phy_rx_packet_received_hdl(struct oid_par_priv
- *poid_par_priv)
+uint oid_rt_get_phy_rx_packet_received_hdl(struct oid_par_priv *poid_par_priv)
{
struct _adapter *Adapter = (struct _adapter *)
(poid_par_priv->adapter_context);
@@ -468,8 +450,7 @@ uint oid_rt_get_phy_rx_packet_received_hdl(struct oid_par_priv
return RNDIS_STATUS_SUCCESS;
}
-uint oid_rt_get_phy_rx_packet_crc32_error_hdl(struct oid_par_priv
- *poid_par_priv)
+uint oid_rt_get_phy_rx_packet_crc32_error_hdl(struct oid_par_priv *poid_par_priv)
{
struct _adapter *Adapter = (struct _adapter *)
(poid_par_priv->adapter_context);
@@ -484,8 +465,7 @@ uint oid_rt_get_phy_rx_packet_crc32_error_hdl(struct oid_par_priv
return RNDIS_STATUS_SUCCESS;
}
-uint oid_rt_pro_set_modulation_hdl(struct oid_par_priv
- *poid_par_priv)
+uint oid_rt_pro_set_modulation_hdl(struct oid_par_priv *poid_par_priv)
{
struct _adapter *Adapter = (struct _adapter *)
(poid_par_priv->adapter_context);
@@ -497,8 +477,7 @@ uint oid_rt_pro_set_modulation_hdl(struct oid_par_priv
return RNDIS_STATUS_SUCCESS;
}
-uint oid_rt_pro_set_continuous_tx_hdl(struct oid_par_priv
- *poid_par_priv)
+uint oid_rt_pro_set_continuous_tx_hdl(struct oid_par_priv *poid_par_priv)
{
struct _adapter *Adapter = (struct _adapter *)
(poid_par_priv->adapter_context);
@@ -511,8 +490,7 @@ uint oid_rt_pro_set_continuous_tx_hdl(struct oid_par_priv
return RNDIS_STATUS_SUCCESS;
}
-uint oid_rt_pro_set_single_carrier_tx_hdl(struct oid_par_priv
- *poid_par_priv)
+uint oid_rt_pro_set_single_carrier_tx_hdl(struct oid_par_priv *poid_par_priv)
{
struct _adapter *Adapter = (struct _adapter *)
(poid_par_priv->adapter_context);
@@ -525,8 +503,7 @@ uint oid_rt_pro_set_single_carrier_tx_hdl(struct oid_par_priv
return RNDIS_STATUS_SUCCESS;
}
-uint oid_rt_pro_set_carrier_suppression_tx_hdl(struct oid_par_priv
- *poid_par_priv)
+uint oid_rt_pro_set_carrier_suppression_tx_hdl(struct oid_par_priv *poid_par_priv)
{
struct _adapter *Adapter = (struct _adapter *)
(poid_par_priv->adapter_context);
@@ -539,8 +516,7 @@ uint oid_rt_pro_set_carrier_suppression_tx_hdl(struct oid_par_priv
return RNDIS_STATUS_SUCCESS;
}
-uint oid_rt_pro_set_single_tone_tx_hdl(struct oid_par_priv
- *poid_par_priv)
+uint oid_rt_pro_set_single_tone_tx_hdl(struct oid_par_priv *poid_par_priv)
{
struct _adapter *Adapter = (struct _adapter *)
(poid_par_priv->adapter_context);
@@ -553,8 +529,7 @@ uint oid_rt_pro_set_single_tone_tx_hdl(struct oid_par_priv
return RNDIS_STATUS_SUCCESS;
}
-uint oid_rt_pro_read_register_hdl(struct oid_par_priv
- *poid_par_priv)
+uint oid_rt_pro_read_register_hdl(struct oid_par_priv *poid_par_priv)
{
struct _adapter *Adapter = (struct _adapter *)
(poid_par_priv->adapter_context);
@@ -735,8 +710,7 @@ uint oid_rt_pro_write_efuse_hdl(struct oid_par_priv *poid_par_priv)
}
/*----------------------------------------------------------------------*/
-uint oid_rt_get_efuse_current_size_hdl(struct oid_par_priv
- *poid_par_priv)
+uint oid_rt_get_efuse_current_size_hdl(struct oid_par_priv *poid_par_priv)
{
struct _adapter *Adapter = (struct _adapter *)
(poid_par_priv->adapter_context);
@@ -829,8 +803,7 @@ uint oid_rt_set_bandwidth_hdl(struct oid_par_priv *poid_par_priv)
return RNDIS_STATUS_SUCCESS;
}
-uint oid_rt_set_rx_packet_type_hdl(struct oid_par_priv
- *poid_par_priv)
+uint oid_rt_set_rx_packet_type_hdl(struct oid_par_priv *poid_par_priv)
{
struct _adapter *Adapter = (struct _adapter *)
(poid_par_priv->adapter_context);
diff --git a/drivers/staging/rtl8712/rtl871x_xmit.c b/drivers/staging/rtl8712/rtl871x_xmit.c
index cc5809e49e35..f0b85338b567 100644
--- a/drivers/staging/rtl8712/rtl871x_xmit.c
+++ b/drivers/staging/rtl8712/rtl871x_xmit.c
@@ -143,9 +143,8 @@ int _r8712_init_xmit_priv(struct xmit_priv *pxmitpriv,
INIT_WORK(&padapter->wk_filter_rx_ff0, r8712_SetFilter);
alloc_hwxmits(padapter);
init_hwxmits(pxmitpriv->hwxmits, pxmitpriv->hwxmit_entry);
- tasklet_init(&pxmitpriv->xmit_tasklet,
- (void(*)(unsigned long))r8712_xmit_bh,
- (unsigned long)padapter);
+ tasklet_init(&pxmitpriv->xmit_tasklet, r8712_xmit_bh,
+ (unsigned long)padapter);
return 0;
}
diff --git a/drivers/staging/rtl8712/rtl871x_xmit.h b/drivers/staging/rtl8712/rtl871x_xmit.h
index b14da38bf652..f227828094bf 100644
--- a/drivers/staging/rtl8712/rtl871x_xmit.h
+++ b/drivers/staging/rtl8712/rtl871x_xmit.h
@@ -277,7 +277,7 @@ int r8712_pre_xmit(struct _adapter *padapter, struct xmit_frame *pxmitframe);
int r8712_xmit_enqueue(struct _adapter *padapter,
struct xmit_frame *pxmitframe);
void r8712_xmit_direct(struct _adapter *padapter, struct xmit_frame *pxmitframe);
-void r8712_xmit_bh(void *priv);
+void r8712_xmit_bh(unsigned long priv);
void xmitframe_xmitbuf_attach(struct xmit_frame *pxmitframe,
struct xmit_buf *pxmitbuf);
diff --git a/drivers/staging/rtl8712/usb_ops_linux.c b/drivers/staging/rtl8712/usb_ops_linux.c
index 9d290bc2fdb7..0045da3bb69a 100644
--- a/drivers/staging/rtl8712/usb_ops_linux.c
+++ b/drivers/staging/rtl8712/usb_ops_linux.c
@@ -308,10 +308,10 @@ void r8712_usb_read_port_cancel(struct _adapter *padapter)
}
}
-void r8712_xmit_bh(void *priv)
+void r8712_xmit_bh(unsigned long priv)
{
int ret = false;
- struct _adapter *padapter = priv;
+ struct _adapter *padapter = (struct _adapter *)priv;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
if (padapter->driver_stopped ||
diff --git a/drivers/staging/rtl8723bs/core/rtw_ap.c b/drivers/staging/rtl8723bs/core/rtw_ap.c
index 6d18d23acdc0..7117d16a30f9 100644
--- a/drivers/staging/rtl8723bs/core/rtw_ap.c
+++ b/drivers/staging/rtl8723bs/core/rtw_ap.c
@@ -216,8 +216,9 @@ void expire_timeout_chk(struct adapter *padapter)
/* check auth_queue */
#ifdef DBG_EXPIRATION_CHK
if (phead != plist) {
- DBG_871X(FUNC_NDEV_FMT" auth_list, cnt:%u\n"
- , FUNC_NDEV_ARG(padapter->pnetdev), pstapriv->auth_list_cnt);
+ DBG_871X(FUNC_NDEV_FMT " auth_list, cnt:%u\n",
+ FUNC_NDEV_ARG(padapter->pnetdev),
+ pstapriv->auth_list_cnt);
}
#endif
while (phead != plist) {
@@ -1446,14 +1447,14 @@ u8 rtw_ap_set_pairwise_key(struct adapter *padapter, struct sta_info *psta)
u8 res = _SUCCESS;
ph2c = rtw_zmalloc(sizeof(struct cmd_obj));
- if (ph2c == NULL) {
+ if (!ph2c) {
res = _FAIL;
goto exit;
}
psetstakey_para = rtw_zmalloc(sizeof(struct set_stakey_parm));
if (psetstakey_para == NULL) {
- kfree((u8 *)ph2c);
+ kfree(ph2c);
res = _FAIL;
goto exit;
}
@@ -1496,7 +1497,7 @@ static int rtw_ap_set_key(
}
psetkeyparm = rtw_zmalloc(sizeof(struct setkey_parm));
if (psetkeyparm == NULL) {
- kfree((unsigned char *)pcmd);
+ kfree(pcmd);
res = _FAIL;
goto exit;
}
diff --git a/drivers/staging/rtl8723bs/core/rtw_cmd.c b/drivers/staging/rtl8723bs/core/rtw_cmd.c
index 8d93c2f26890..13a9b54b4561 100644
--- a/drivers/staging/rtl8723bs/core/rtw_cmd.c
+++ b/drivers/staging/rtl8723bs/core/rtw_cmd.c
@@ -372,13 +372,13 @@ void rtw_free_cmd_obj(struct cmd_obj *pcmd)
if ((pcmd->cmdcode != _JoinBss_CMD_) &&
(pcmd->cmdcode != _CreateBss_CMD_)) {
/* free parmbuf in cmd_obj */
- kfree((unsigned char *)pcmd->parmbuf);
+ kfree(pcmd->parmbuf);
}
if (pcmd->rsp != NULL) {
if (pcmd->rspsz != 0) {
/* free rsp in cmd_obj */
- kfree((unsigned char *)pcmd->rsp);
+ kfree(pcmd->rsp);
}
}
@@ -507,19 +507,9 @@ post_process:
cmd_process_time = jiffies_to_msecs(jiffies - cmd_start_time);
if (cmd_process_time > 1000) {
- if (pcmd->cmdcode == GEN_CMD_CODE(_Set_Drv_Extra)) {
- DBG_871X(ADPT_FMT" cmd =%d process_time =%lu > 1 sec\n",
- ADPT_ARG(pcmd->padapter), pcmd->cmdcode, cmd_process_time);
- /* rtw_warn_on(1); */
- } else if (pcmd->cmdcode == GEN_CMD_CODE(_Set_MLME_EVT)) {
- DBG_871X(ADPT_FMT" cmd =%d, process_time =%lu > 1 sec\n",
- ADPT_ARG(pcmd->padapter), pcmd->cmdcode, cmd_process_time);
- /* rtw_warn_on(1); */
- } else {
- DBG_871X(ADPT_FMT" cmd =%d, process_time =%lu > 1 sec\n",
- ADPT_ARG(pcmd->padapter), pcmd->cmdcode, cmd_process_time);
- /* rtw_warn_on(1); */
- }
+ DBG_871X(ADPT_FMT "cmd= %d process_time= %lu > 1 sec\n",
+ ADPT_ARG(pcmd->padapter), pcmd->cmdcode,
+ cmd_process_time);
}
/* call callback function for post-processed */
diff --git a/drivers/staging/rtl8723bs/core/rtw_mlme.c b/drivers/staging/rtl8723bs/core/rtw_mlme.c
index 34adf5789c98..71fcb466019a 100644
--- a/drivers/staging/rtl8723bs/core/rtw_mlme.c
+++ b/drivers/staging/rtl8723bs/core/rtw_mlme.c
@@ -19,7 +19,7 @@ int rtw_init_mlme_priv(struct adapter *padapter)
int i;
u8 *pbuf;
struct wlan_network *pnetwork;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
int res = _SUCCESS;
pmlmepriv->nic_hdl = (u8 *)padapter;
@@ -40,7 +40,7 @@ int rtw_init_mlme_priv(struct adapter *padapter)
pbuf = vzalloc(array_size(MAX_BSS_CNT, sizeof(struct wlan_network)));
- if (pbuf == NULL) {
+ if (!pbuf) {
res = _FAIL;
goto exit;
}
@@ -112,9 +112,8 @@ void _rtw_free_mlme_priv(struct mlme_priv *pmlmepriv)
{
if (pmlmepriv) {
rtw_free_mlme_priv_ie_data(pmlmepriv);
- if (pmlmepriv->free_bss_buf) {
+ if (pmlmepriv->free_bss_buf)
vfree(pmlmepriv->free_bss_buf);
- }
}
}
@@ -185,10 +184,10 @@ void _rtw_free_network(struct mlme_priv *pmlmepriv, struct wlan_network *pnetwor
/* _irqL irqL; */
struct __queue *free_queue = &(pmlmepriv->free_bss_pool);
- if (pnetwork == NULL)
+ if (!pnetwork)
return;
- if (pnetwork->fixed == true)
+ if (pnetwork->fixed)
return;
if ((check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) == true) ||
@@ -209,7 +208,6 @@ void _rtw_free_network(struct mlme_priv *pmlmepriv, struct wlan_network *pnetwor
pmlmepriv->num_of_scanned--;
-
/* DBG_871X("_rtw_free_network:SSID =%s\n", pnetwork->network.Ssid.Ssid); */
spin_unlock_bh(&free_queue->lock);
@@ -220,10 +218,10 @@ void _rtw_free_network_nolock(struct mlme_priv *pmlmepriv, struct wlan_network *
struct __queue *free_queue = &(pmlmepriv->free_bss_pool);
- if (pnetwork == NULL)
+ if (!pnetwork)
return;
- if (pnetwork->fixed == true)
+ if (pnetwork->fixed)
return;
/* spin_lock_irqsave(&free_queue->lock, irqL); */
@@ -301,12 +299,8 @@ void rtw_free_network_queue(struct adapter *padapter, u8 isfreeall)
spin_unlock_bh(&scanned_queue->lock);
}
-
-
-
sint rtw_if_up(struct adapter *padapter)
{
-
sint res;
if (padapter->bDriverStopped || padapter->bSurpriseRemoved ||
@@ -318,7 +312,6 @@ sint rtw_if_up(struct adapter *padapter)
return res;
}
-
void rtw_generate_random_ibss(u8 *pibss)
{
unsigned long curtime = jiffies;
@@ -329,7 +322,6 @@ void rtw_generate_random_ibss(u8 *pibss)
pibss[3] = (u8)(curtime & 0xff) ;/* p[0]; */
pibss[4] = (u8)((curtime>>8) & 0xff) ;/* p[1]; */
pibss[5] = (u8)((curtime>>16) & 0xff) ;/* p[2]; */
- return;
}
u8 *rtw_get_capability_from_ie(u8 *ie)
@@ -337,7 +329,6 @@ u8 *rtw_get_capability_from_ie(u8 *ie)
return ie + 8 + 2;
}
-
u16 rtw_get_capability(struct wlan_bssid_ex *bss)
{
__le16 val;
@@ -425,7 +416,6 @@ int is_same_network(struct wlan_bssid_ex *src, struct wlan_bssid_ex *dst, u8 fea
memcpy((u8 *)&tmps, rtw_get_capability_from_ie(src->IEs), 2);
memcpy((u8 *)&tmpd, rtw_get_capability_from_ie(dst->IEs), 2);
-
s_cap = le16_to_cpu(tmps);
d_cap = le16_to_cpu(tmpd);
@@ -467,7 +457,6 @@ struct wlan_network *rtw_get_oldest_wlan_network(struct __queue *scanned_queue)
{
struct list_head *plist, *phead;
-
struct wlan_network *pwlan = NULL;
struct wlan_network *oldest = NULL;
@@ -482,7 +471,7 @@ struct wlan_network *rtw_get_oldest_wlan_network(struct __queue *scanned_queue)
pwlan = LIST_CONTAINOR(plist, struct wlan_network, list);
- if (pwlan->fixed != true) {
+ if (!pwlan->fixed) {
if (oldest == NULL || time_after(oldest->last_scanned, pwlan->last_scanned))
oldest = pwlan;
}
@@ -579,12 +568,8 @@ static void update_current_network(struct adapter *adapter, struct wlan_bssid_ex
}
}
-
/*
-
Caller must hold pmlmepriv->lock first.
-
-
*/
void rtw_update_scanned_network(struct adapter *adapter, struct wlan_bssid_ex *target)
{
@@ -625,7 +610,6 @@ void rtw_update_scanned_network(struct adapter *adapter, struct wlan_bssid_ex *t
}
-
/* If we didn't find a match, then get a new network slot to initialize
* with this beacon's information */
/* if (phead == plist) { */
@@ -634,7 +618,7 @@ void rtw_update_scanned_network(struct adapter *adapter, struct wlan_bssid_ex *t
/* If there are no more slots, expire the oldest */
/* list_del_init(&oldest->list); */
pnetwork = oldest;
- if (pnetwork == NULL) {
+ if (!pnetwork) {
RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("\n\n\nsomething wrong here\n\n\n"));
goto exit;
}
@@ -655,7 +639,7 @@ void rtw_update_scanned_network(struct adapter *adapter, struct wlan_bssid_ex *t
pnetwork = rtw_alloc_network(pmlmepriv); /* will update scan_time */
- if (pnetwork == NULL) {
+ if (!pnetwork) {
RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("\n\n\nsomething wrong here\n\n\n"));
goto exit;
}
@@ -739,7 +723,7 @@ int rtw_is_desired_network(struct adapter *adapter, struct wlan_network *pnetwor
privacy = pnetwork->network.Privacy;
if (check_fwstate(pmlmepriv, WIFI_UNDER_WPS)) {
- if (rtw_get_wps_ie(pnetwork->network.IEs+_FIXED_IE_LENGTH_, pnetwork->network.IELength-_FIXED_IE_LENGTH_, NULL, &wps_ielen) != NULL)
+ if (rtw_get_wps_ie(pnetwork->network.IEs+_FIXED_IE_LENGTH_, pnetwork->network.IELength-_FIXED_IE_LENGTH_, NULL, &wps_ielen))
return true;
else
return false;
@@ -754,15 +738,13 @@ int rtw_is_desired_network(struct adapter *adapter, struct wlan_network *pnetwor
if (psecuritypriv->ndisauthtype == Ndis802_11AuthModeWPA2PSK) {
p = rtw_get_ie(pnetwork->network.IEs + _BEACON_IE_OFFSET_, _RSN_IE_2_, &ie_len, (pnetwork->network.IELength - _BEACON_IE_OFFSET_));
- if (p && ie_len > 0) {
+ if (p && ie_len > 0)
bselected = true;
- } else {
+ else
bselected = false;
- }
}
}
-
if ((desired_encmode != Ndis802_11EncryptionDisabled) && (privacy == 0)) {
DBG_871X("desired_encmode: %d, privacy: %d\n", desired_encmode, privacy);
bselected = false;
@@ -773,7 +755,6 @@ int rtw_is_desired_network(struct adapter *adapter, struct wlan_network *pnetwor
bselected = false;
}
-
return bselected;
}
@@ -783,7 +764,6 @@ void rtw_atimdone_event_callback(struct adapter *adapter, u8 *pbuf)
RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("receive atimdone_event\n"));
}
-
void rtw_survey_event_callback(struct adapter *adapter, u8 *pbuf)
{
u32 len;
@@ -800,7 +780,6 @@ void rtw_survey_event_callback(struct adapter *adapter, u8 *pbuf)
return;
}
-
spin_lock_bh(&pmlmepriv->lock);
/* update IBSS_network 's timestamp */
@@ -823,21 +802,16 @@ void rtw_survey_event_callback(struct adapter *adapter, u8 *pbuf)
/* lock pmlmepriv->lock when you accessing network_q */
if ((check_fwstate(pmlmepriv, _FW_UNDER_LINKING)) == false) {
- if (pnetwork->Ssid.Ssid[0] == 0) {
+ if (pnetwork->Ssid.Ssid[0] == 0)
pnetwork->Ssid.SsidLength = 0;
- }
rtw_add_network(adapter, pnetwork);
}
exit:
spin_unlock_bh(&pmlmepriv->lock);
-
- return;
}
-
-
void rtw_surveydone_event_callback(struct adapter *adapter, u8 *pbuf)
{
u8 timer_cancelled = false;
@@ -868,12 +842,11 @@ void rtw_surveydone_event_callback(struct adapter *adapter, u8 *pbuf)
if (timer_cancelled)
_cancel_timer(&pmlmepriv->scan_to_timer, &timer_cancelled);
-
spin_lock_bh(&pmlmepriv->lock);
rtw_set_signal_stat_timer(&adapter->recvpriv);
- if (pmlmepriv->to_join == true) {
+ if (pmlmepriv->to_join) {
if ((check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) == true)) {
if (check_fwstate(pmlmepriv, _FW_LINKED) == false) {
set_fwstate(pmlmepriv, _FW_UNDER_LINKING);
@@ -896,9 +869,8 @@ void rtw_surveydone_event_callback(struct adapter *adapter, u8 *pbuf)
pmlmepriv->fw_state = WIFI_ADHOC_MASTER_STATE;
- if (rtw_createbss_cmd(adapter) != _SUCCESS) {
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("Error =>rtw_createbss_cmd status FAIL\n"));
- }
+ if (rtw_createbss_cmd(adapter) != _SUCCESS)
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("Error =>rtw_createbss_cmd status FAIL\n"));
pmlmepriv->to_join = false;
}
@@ -1009,7 +981,6 @@ static void find_network(struct adapter *adapter)
else
RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("rtw_free_assoc_resources : pwlan == NULL\n\n"));
-
if (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) &&
(adapter->stapriv.asoc_sta_count == 1))
rtw_free_network_nolock(adapter, pwlan);
@@ -1169,9 +1140,8 @@ static struct sta_info *rtw_joinbss_update_stainfo(struct adapter *padapter, str
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
psta = rtw_get_stainfo(pstapriv, pnetwork->network.MacAddress);
- if (psta == NULL) {
+ if (!psta)
psta = rtw_alloc_stainfo(pstapriv, pnetwork->network.MacAddress);
- }
if (psta) { /* update ptarget_sta */
@@ -1189,7 +1159,6 @@ static struct sta_info *rtw_joinbss_update_stainfo(struct adapter *padapter, str
psta->wireless_mode = pmlmeext->cur_wireless_mode;
psta->raid = networktype_to_raid_ex(padapter, psta);
-
/* sta mode */
rtw_hal_set_odm_var(padapter, HAL_ODM_STA_INFO, psta, true);
@@ -1221,7 +1190,6 @@ static struct sta_info *rtw_joinbss_update_stainfo(struct adapter *padapter, str
padapter->securitypriv.wps_ie_len = 0;
}
-
/* for A-MPDU Rx reordering buffer control for bmc_sta & sta_info */
/* if A-MPDU Rx is enabled, resetting rx_ordering_ctrl wstart_b(indicate_seq) to default value = 0xffff */
/* todo: check if AP can send A-MPDU packets */
@@ -1238,7 +1206,6 @@ static struct sta_info *rtw_joinbss_update_stainfo(struct adapter *padapter, str
preorder_ctrl->wsize_b = 64;/* max_ampdu_sz;ex. 32(kbytes) -> wsize_b =32 */
}
-
bmc_sta = rtw_get_bcmc_stainfo(padapter);
if (bmc_sta) {
for (i = 0; i < 16 ; i++) {
@@ -1272,7 +1239,6 @@ static void rtw_joinbss_update_network(struct adapter *padapter, struct wlan_net
RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("\nfw_state:%x, BSSID:"MAC_FMT"\n"
, get_fwstate(pmlmepriv), MAC_ARG(pnetwork->network.MacAddress)));
-
/* why not use ptarget_wlan?? */
memcpy(&cur_network->network, &pnetwork->network, pnetwork->network.Length);
/* some IEs in pnetwork is wrong, so we should use ptarget_wlan IEs */
@@ -1281,7 +1247,6 @@ static void rtw_joinbss_update_network(struct adapter *padapter, struct wlan_net
cur_network->aid = pnetwork->join_res;
-
rtw_set_signal_stat_timer(&padapter->recvpriv);
padapter->recvpriv.signal_strength = ptarget_wlan->network.PhyInfo.SignalStrength;
@@ -1349,12 +1314,10 @@ void rtw_joinbss_event_prehandle(struct adapter *adapter, u8 *pbuf)
rtw_get_encrypt_decrypt_from_registrypriv(adapter);
-
- if (pmlmepriv->assoc_ssid.SsidLength == 0) {
+ if (pmlmepriv->assoc_ssid.SsidLength == 0)
RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("@@@@@ joinbss event call back for Any SSid\n"));
- } else {
+ else
RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("@@@@@ rtw_joinbss_event_callback for SSid:%s\n", pmlmepriv->assoc_ssid.Ssid));
- }
the_same_macaddr = !memcmp(pnetwork->network.MacAddress, cur_network->network.MacAddress, ETH_ALEN);
@@ -1377,7 +1340,7 @@ void rtw_joinbss_event_prehandle(struct adapter *adapter, u8 *pbuf)
if (check_fwstate(pmlmepriv, _FW_UNDER_LINKING)) {
/* s1. find ptarget_wlan */
if (check_fwstate(pmlmepriv, _FW_LINKED)) {
- if (the_same_macaddr == true) {
+ if (the_same_macaddr) {
ptarget_wlan = rtw_find_network(&pmlmepriv->scanned_queue, cur_network->network.MacAddress);
} else {
pcur_wlan = rtw_find_network(&pmlmepriv->scanned_queue, cur_network->network.MacAddress);
@@ -1412,11 +1375,10 @@ void rtw_joinbss_event_prehandle(struct adapter *adapter, u8 *pbuf)
goto ignore_joinbss_callback;
}
-
/* s3. find ptarget_sta & update ptarget_sta after update cur_network only for station mode */
if (check_fwstate(pmlmepriv, WIFI_STATION_STATE) == true) {
ptarget_sta = rtw_joinbss_update_stainfo(adapter, pnetwork);
- if (ptarget_sta == NULL) {
+ if (!ptarget_sta) {
RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("Can't update stainfo when joinbss_event callback\n"));
spin_unlock_bh(&(pmlmepriv->scanned_queue.lock));
goto ignore_joinbss_callback;
@@ -1432,7 +1394,6 @@ void rtw_joinbss_event_prehandle(struct adapter *adapter, u8 *pbuf)
RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("adhoc mode, fw_state:%x", get_fwstate(pmlmepriv)));
}
-
/* s5. Cancel assoc_timer */
_cancel_timer(&pmlmepriv->assoc_timer, &timer_cancelled);
@@ -1506,7 +1467,7 @@ void rtw_sta_media_status_rpt(struct adapter *adapter, struct sta_info *psta, u3
{
u16 media_status_rpt;
- if (psta == NULL)
+ if (!psta)
return;
media_status_rpt = (u16)((psta->mac_id<<8)|mstatus); /* MACID|OPMODE:1 connect */
@@ -1564,7 +1525,7 @@ void rtw_stassoc_event_callback(struct adapter *adapter, u8 *pbuf)
/* for AD-HOC mode */
psta = rtw_get_stainfo(&adapter->stapriv, pstassoc->macaddr);
- if (psta != NULL) {
+ if (psta) {
/* the sta have been in sta_info_queue => do nothing */
RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("Error: rtw_stassoc_event_callback: sta has been in sta_hash_queue\n"));
@@ -1573,7 +1534,7 @@ void rtw_stassoc_event_callback(struct adapter *adapter, u8 *pbuf)
}
psta = rtw_alloc_stainfo(&adapter->stapriv, pstassoc->macaddr);
- if (psta == NULL) {
+ if (!psta) {
RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("Can't alloc sta_info when rtw_stassoc_event_callback\n"));
return;
}
@@ -1591,7 +1552,6 @@ void rtw_stassoc_event_callback(struct adapter *adapter, u8 *pbuf)
if (adapter->securitypriv.dot11AuthAlgrthm == dot11AuthAlgrthm_8021X)
psta->dot118021XPrivacy = adapter->securitypriv.dot11PrivacyAlgrthm;
-
psta->ieee8021x_blocked = false;
spin_lock_bh(&pmlmepriv->lock);
@@ -1612,7 +1572,6 @@ void rtw_stassoc_event_callback(struct adapter *adapter, u8 *pbuf)
spin_unlock_bh(&pmlmepriv->lock);
-
mlmeext_sta_add_event_callback(adapter, psta);
}
@@ -1648,7 +1607,6 @@ void rtw_stadel_event_callback(struct adapter *adapter, u8 *pbuf)
if ((pmlmeinfo->state&0x03) == WIFI_FW_AP_STATE)
return;
-
mlmeext_sta_del_event_callback(adapter);
spin_lock_bh(&pmlmepriv->lock);
@@ -1726,13 +1684,8 @@ void rtw_stadel_event_callback(struct adapter *adapter, u8 *pbuf)
_clr_fwstate_(pmlmepriv, WIFI_ADHOC_STATE);
}
- if (rtw_createbss_cmd(adapter) != _SUCCESS) {
-
+ if (rtw_createbss_cmd(adapter) != _SUCCESS)
RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("***Error =>stadel_event_callback: rtw_createbss_cmd status FAIL***\n "));
-
- }
-
-
}
}
@@ -1750,7 +1703,6 @@ void rtw_cpwm_event_callback(struct adapter *padapter, u8 *pbuf)
cpwm_int_hdl(padapter, preportpwrstate);
}
-
void rtw_wmm_event_callback(struct adapter *padapter, u8 *pbuf)
{
WMMOnAssocRsp(padapter);
@@ -1840,8 +1792,6 @@ void rtw_mlme_reset_auto_scan_int(struct adapter *adapter)
mlme->auto_scan_int_ms = mlme->roam_scan_int_ms;
} else
mlme->auto_scan_int_ms = 0; /* disabled */
-
- return;
}
static void rtw_auto_scan_handler(struct adapter *padapter)
@@ -1859,7 +1809,7 @@ static void rtw_auto_scan_handler(struct adapter *padapter)
goto exit;
}
- if (pmlmepriv->LinkDetectInfo.bBusyTraffic == true) {
+ if (pmlmepriv->LinkDetectInfo.bBusyTraffic) {
DBG_871X(FUNC_ADPT_FMT" exit BusyTraffic\n", FUNC_ADPT_ARG(padapter));
goto exit;
}
@@ -1879,20 +1829,20 @@ void rtw_dynamic_check_timer_handler(struct adapter *adapter)
if (!adapter)
return;
- if (adapter->hw_init_completed == false)
+ if (!adapter->hw_init_completed)
return;
- if ((adapter->bDriverStopped == true) || (adapter->bSurpriseRemoved == true))
+ if (adapter->bDriverStopped || adapter->bSurpriseRemoved)
return;
- if (adapter->net_closed == true)
+ if (adapter->net_closed)
return;
if (is_primary_adapter(adapter))
DBG_871X("IsBtDisabled =%d, IsBtControlLps =%d\n", hal_btcoex_IsBtDisabled(adapter), hal_btcoex_IsBtControlLps(adapter));
- if ((adapter_to_pwrctl(adapter)->bFwCurrentInPSMode == true)
- && (hal_btcoex_IsBtControlLps(adapter) == false)
+ if ((adapter_to_pwrctl(adapter)->bFwCurrentInPSMode)
+ && !(hal_btcoex_IsBtControlLps(adapter))
) {
u8 bEnterPS;
@@ -1907,16 +1857,14 @@ void rtw_dynamic_check_timer_handler(struct adapter *adapter)
}
} else {
- if (is_primary_adapter(adapter)) {
+ if (is_primary_adapter(adapter))
rtw_dynamic_chk_wk_cmd(adapter);
- }
}
/* auto site survey */
rtw_auto_scan_handler(adapter);
}
-
inline bool rtw_is_scan_deny(struct adapter *adapter)
{
struct mlme_priv *mlmepriv = &adapter->mlmepriv;
@@ -1994,26 +1942,24 @@ int rtw_select_roaming_candidate(struct mlme_priv *mlme)
{
int ret = _FAIL;
struct list_head *phead;
- struct adapter *adapter;
struct __queue *queue = &(mlme->scanned_queue);
struct wlan_network *pnetwork = NULL;
struct wlan_network *candidate = NULL;
- if (mlme->cur_network_scanned == NULL) {
+ if (!mlme->cur_network_scanned) {
rtw_warn_on(1);
return ret;
}
spin_lock_bh(&(mlme->scanned_queue.lock));
phead = get_list_head(queue);
- adapter = (struct adapter *)mlme->nic_hdl;
mlme->pscanned = get_next(phead);
while (phead != mlme->pscanned) {
pnetwork = LIST_CONTAINOR(mlme->pscanned, struct wlan_network, list);
- if (pnetwork == NULL) {
+ if (!pnetwork) {
RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("%s return _FAIL:(pnetwork == NULL)\n", __func__));
ret = _FAIL;
goto exit;
@@ -2031,7 +1977,7 @@ int rtw_select_roaming_candidate(struct mlme_priv *mlme)
}
- if (candidate == NULL) {
+ if (!candidate) {
DBG_871X("%s: return _FAIL(candidate == NULL)\n", __func__);
ret = _FAIL;
goto exit;
@@ -2064,9 +2010,8 @@ static int rtw_check_join_candidate(struct mlme_priv *mlme
int updated = false;
struct adapter *adapter = container_of(mlme, struct adapter, mlmepriv);
-
/* check bssid, if needed */
- if (mlme->assoc_by_bssid == true) {
+ if (mlme->assoc_by_bssid) {
if (memcmp(competitor->network.MacAddress, mlme->assoc_bssid, ETH_ALEN))
goto exit;
}
@@ -2115,12 +2060,8 @@ exit:
/*
Calling context:
The caller of the sub-routine will be in critical section...
-
The caller must hold the following spinlock
-
pmlmepriv->lock
-
-
*/
int rtw_select_and_join_from_scanned_queue(struct mlme_priv *pmlmepriv)
@@ -2148,7 +2089,7 @@ int rtw_select_and_join_from_scanned_queue(struct mlme_priv *pmlmepriv)
while (phead != pmlmepriv->pscanned) {
pnetwork = LIST_CONTAINOR(pmlmepriv->pscanned, struct wlan_network, list);
- if (pnetwork == NULL) {
+ if (!pnetwork) {
RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("%s return _FAIL:(pnetwork == NULL)\n", __func__));
ret = _FAIL;
goto exit;
@@ -2166,7 +2107,7 @@ int rtw_select_and_join_from_scanned_queue(struct mlme_priv *pmlmepriv)
}
- if (candidate == NULL) {
+ if (!candidate) {
DBG_871X("%s: return _FAIL(candidate == NULL)\n", __func__);
#ifdef CONFIG_WOWLAN
_clr_fwstate_(pmlmepriv, _FW_LINKED|_FW_UNDER_LINKING);
@@ -2207,14 +2148,14 @@ sint rtw_set_auth(struct adapter *adapter, struct security_priv *psecuritypriv)
sint res = _SUCCESS;
pcmd = rtw_zmalloc(sizeof(struct cmd_obj));
- if (pcmd == NULL) {
+ if (!pcmd) {
res = _FAIL; /* try again */
goto exit;
}
psetauthparm = rtw_zmalloc(sizeof(struct setauth_parm));
- if (psetauthparm == NULL) {
- kfree((unsigned char *)pcmd);
+ if (!psetauthparm) {
+ kfree(pcmd);
res = _FAIL;
goto exit;
}
@@ -2227,7 +2168,6 @@ sint rtw_set_auth(struct adapter *adapter, struct security_priv *psecuritypriv)
pcmd->rsp = NULL;
pcmd->rspsz = 0;
-
INIT_LIST_HEAD(&pcmd->list);
RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("after enqueue set_auth_cmd, auth_mode =%x\n", psecuritypriv->dot11AuthAlgrthm));
@@ -2247,7 +2187,7 @@ sint rtw_set_key(struct adapter *adapter, struct security_priv *psecuritypriv, s
sint res = _SUCCESS;
psetkeyparm = rtw_zmalloc(sizeof(struct setkey_parm));
- if (psetkeyparm == NULL) {
+ if (!psetkeyparm) {
res = _FAIL;
goto exit;
}
@@ -2291,15 +2231,14 @@ sint rtw_set_key(struct adapter *adapter, struct security_priv *psecuritypriv, s
default:
RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("\n rtw_set_key:psecuritypriv->dot11PrivacyAlgrthm = %x (must be 1 or 2 or 4 or 5)\n", psecuritypriv->dot11PrivacyAlgrthm));
res = _FAIL;
- kfree((unsigned char *)psetkeyparm);
+ kfree(psetkeyparm);
goto exit;
}
-
if (enqueue) {
pcmd = rtw_zmalloc(sizeof(struct cmd_obj));
- if (pcmd == NULL) {
- kfree((unsigned char *)psetkeyparm);
+ if (!pcmd) {
+ kfree(psetkeyparm);
res = _FAIL; /* try again */
goto exit;
}
@@ -2315,7 +2254,7 @@ sint rtw_set_key(struct adapter *adapter, struct security_priv *psecuritypriv, s
res = rtw_enqueue_cmd(pcmdpriv, pcmd);
} else {
setkey_hdl(adapter, (u8 *)psetkeyparm);
- kfree((u8 *) psetkeyparm);
+ kfree(psetkeyparm);
}
exit:
return res;
@@ -2350,7 +2289,6 @@ int rtw_restruct_wmm_ie(struct adapter *adapter, u8 *in_ie, u8 *out_ie, uint in_
}
-
/* */
/* Ported from 8185: IsInPreAuthKeyList(). (Renamed from SecIsInPreAuthKeyList(), 2006-10-13.) */
/* Added by Annie, 2006-05-07. */
@@ -2679,7 +2617,7 @@ unsigned int rtw_restructure_ht_ie(struct adapter *padapter, u8 *in_ie, u8 *out_
ht_capie.cap_info |= cpu_to_le16(IEEE80211_HT_CAP_SGI_20);
/* Get HT BW */
- if (in_ie == NULL) {
+ if (!in_ie) {
/* TDLS: TODO 20/40 issue */
if (check_fwstate(pmlmepriv, WIFI_STATION_STATE)) {
operation_bw = padapter->mlmeextpriv.cur_bwmode;
@@ -2794,7 +2732,7 @@ unsigned int rtw_restructure_ht_ie(struct adapter *padapter, u8 *in_ie, u8 *out_
phtpriv->ht_option = true;
- if (in_ie != NULL) {
+ if (in_ie) {
p = rtw_get_ie(in_ie, _HT_ADD_INFO_IE_, &ielen, in_len);
if (p && (ielen == sizeof(struct ieee80211_ht_addt_info))) {
out_len = *pout_len;
@@ -2824,7 +2762,6 @@ void rtw_update_ht_cap(struct adapter *padapter, u8 *pie, uint ie_len, u8 channe
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
u8 cbw40_enable = 0;
-
if (!phtpriv->ht_option)
return;
@@ -2834,7 +2771,7 @@ void rtw_update_ht_cap(struct adapter *padapter, u8 *pie, uint ie_len, u8 channe
DBG_871X("+rtw_update_ht_cap()\n");
/* maybe needs check if ap supports rx ampdu. */
- if ((phtpriv->ampdu_enable == false) && (pregistrypriv->ampdu_enable == 1)) {
+ if (!(phtpriv->ampdu_enable) && pregistrypriv->ampdu_enable == 1) {
if (pregistrypriv->wifi_spec == 1) {
/* remove this part because testbed AP should disable RX AMPDU */
/* phtpriv->ampdu_enable = false; */
@@ -2847,7 +2784,6 @@ void rtw_update_ht_cap(struct adapter *padapter, u8 *pie, uint ie_len, u8 channe
/* phtpriv->ampdu_enable = true; */
}
-
/* check Max Rx A-MPDU Size */
len = 0;
p = rtw_get_ie(pie+sizeof(struct ndis_802_11_fix_ie), _HT_CAPABILITY_IE_, &len, ie_len-sizeof(struct ndis_802_11_fix_ie));
@@ -2861,7 +2797,6 @@ void rtw_update_ht_cap(struct adapter *padapter, u8 *pie, uint ie_len, u8 channe
}
-
len = 0;
p = rtw_get_ie(pie+sizeof(struct ndis_802_11_fix_ie), _HT_ADD_INFO_IE_, &len, ie_len-sizeof(struct ndis_802_11_fix_ie));
if (p && len > 0) {
@@ -2961,7 +2896,7 @@ void rtw_issue_addbareq_cmd(struct adapter *padapter, struct xmit_frame *pxmitfr
return;
}
- if (psta == NULL) {
+ if (!psta) {
DBG_871X("%s, psta ==NUL\n", __func__);
return;
}
@@ -2971,10 +2906,9 @@ void rtw_issue_addbareq_cmd(struct adapter *padapter, struct xmit_frame *pxmitfr
return;
}
-
phtpriv = &psta->htpriv;
- if ((phtpriv->ht_option == true) && (phtpriv->ampdu_enable == true)) {
+ if (phtpriv->ht_option && phtpriv->ampdu_enable) {
issued = (phtpriv->agg_enable_bitmap>>priority)&0x1;
issued |= (phtpriv->candidate_tid_bitmap>>priority)&0x1;
@@ -2994,10 +2928,8 @@ void rtw_append_exented_cap(struct adapter *padapter, u8 *out_ie, uint *pout_len
u8 cap_content[8] = {0};
u8 *pframe;
-
- if (phtpriv->bss_coexist) {
+ if (phtpriv->bss_coexist)
SET_EXT_CAPABILITY_ELE_BSS_COEXIST(cap_content, 1);
- }
pframe = rtw_set_ie(out_ie + *pout_len, EID_EXTCapability, 8, cap_content, pout_len);
}
diff --git a/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c b/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
index 2128886c9924..5e687f6d2c3e 100644
--- a/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
+++ b/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
@@ -344,7 +344,7 @@ static void init_channel_list(struct adapter *padapter, RT_CHANNEL_INFO *channel
struct p2p_channels *channel_list)
{
- struct p2p_oper_class_map op_class[] = {
+ static const struct p2p_oper_class_map op_class[] = {
{ IEEE80211G, 81, 1, 13, 1, BW20 },
{ IEEE80211G, 82, 14, 14, 1, BW20 },
{ IEEE80211A, 115, 36, 48, 4, BW20 },
@@ -363,7 +363,7 @@ static void init_channel_list(struct adapter *padapter, RT_CHANNEL_INFO *channel
for (op = 0; op_class[op].op_class; op++) {
u8 ch;
- struct p2p_oper_class_map *o = &op_class[op];
+ const struct p2p_oper_class_map *o = &op_class[op];
struct p2p_reg_class *reg = NULL;
for (ch = o->min_chan; ch <= o->max_chan; ch += o->inc) {
@@ -2922,7 +2922,8 @@ int issue_probereq_ex(struct adapter *padapter, struct ndis_802_11_ssid *pssid,
int i = 0;
do {
- ret = _issue_probereq(padapter, pssid, da, ch, append_wps, wait_ms > 0?true:false);
+ ret = _issue_probereq(padapter, pssid, da, ch, append_wps,
+ wait_ms > 0);
i++;
@@ -3086,8 +3087,6 @@ void issue_auth(struct adapter *padapter, struct sta_info *psta, unsigned short
rtw_wep_encrypt(padapter, (u8 *)pmgntframe);
DBG_871X("%s\n", __func__);
dump_mgntframe(padapter, pmgntframe);
-
- return;
}
@@ -3405,8 +3404,6 @@ exit:
rtw_buf_update(&pmlmepriv->assoc_req, &pmlmepriv->assoc_req_len, (u8 *)pwlanhdr, pattrib->pktlen);
else
rtw_buf_free(&pmlmepriv->assoc_req, &pmlmepriv->assoc_req_len);
-
- return;
}
/* when wait_ack is ture, this function shoule be called at process context */
@@ -3513,7 +3510,7 @@ int issue_nulldata(struct adapter *padapter, unsigned char *da, unsigned int pow
}
do {
- ret = _issue_nulldata(padapter, da, power_mode, wait_ms > 0?true:false);
+ ret = _issue_nulldata(padapter, da, power_mode, wait_ms > 0);
i++;
@@ -3661,7 +3658,7 @@ int issue_qos_nulldata(struct adapter *padapter, unsigned char *da, u16 tid, int
da = get_my_bssid(&(pmlmeinfo->network));
do {
- ret = _issue_qos_nulldata(padapter, da, tid, wait_ms > 0?true:false);
+ ret = _issue_qos_nulldata(padapter, da, tid, wait_ms > 0);
i++;
@@ -3769,7 +3766,7 @@ int issue_deauth_ex(struct adapter *padapter, u8 *da, unsigned short reason, int
int i = 0;
do {
- ret = _issue_deauth(padapter, da, reason, wait_ms > 0?true:false);
+ ret = _issue_deauth(padapter, da, reason, wait_ms > 0);
i++;
@@ -5260,8 +5257,6 @@ void report_del_sta_event(struct adapter *padapter, unsigned char *MacAddr, unsi
DBG_871X("report_del_sta_event: delete STA, mac_id =%d\n", mac_id);
rtw_enqueue_cmd(pcmdpriv, pcmd_obj);
-
- return;
}
void report_add_sta_event(struct adapter *padapter, unsigned char *MacAddr, int cam_idx)
@@ -5306,8 +5301,6 @@ void report_add_sta_event(struct adapter *padapter, unsigned char *MacAddr, int
DBG_871X("report_add_sta_event: add STA\n");
rtw_enqueue_cmd(pcmdpriv, pcmd_obj);
-
- return;
}
/****************************************************************************
@@ -5869,8 +5862,6 @@ void link_timer_hdl(struct timer_list *t)
issue_assocreq(padapter);
set_link_timer(pmlmeext, REASSOC_TO);
}
-
- return;
}
void addba_timer_hdl(struct timer_list *t)
diff --git a/drivers/staging/rtl8723bs/core/rtw_pwrctrl.c b/drivers/staging/rtl8723bs/core/rtw_pwrctrl.c
index 4075de07e0a9..30137f0bd984 100644
--- a/drivers/staging/rtl8723bs/core/rtw_pwrctrl.c
+++ b/drivers/staging/rtl8723bs/core/rtw_pwrctrl.c
@@ -190,7 +190,6 @@ void rtw_ps_processor(struct adapter *padapter)
}
exit:
pwrpriv->ps_processing = false;
- return;
}
static void pwr_state_check_handler(struct timer_list *t)
diff --git a/drivers/staging/rtl8723bs/core/rtw_recv.c b/drivers/staging/rtl8723bs/core/rtw_recv.c
index 687ff3c6f09f..7fa8c84cf5f4 100644
--- a/drivers/staging/rtl8723bs/core/rtw_recv.c
+++ b/drivers/staging/rtl8723bs/core/rtw_recv.c
@@ -1400,10 +1400,8 @@ static sint validate_80211w_mgmt(struct adapter *adapter, union recv_frame *prec
struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
struct rx_pkt_attrib *pattrib = &precv_frame->u.hdr.attrib;
u8 *ptr = precv_frame->u.hdr.rx_data;
- u8 type;
u8 subtype;
- type = GetFrameType(ptr);
subtype = GetFrameSubType(ptr); /* bit(7)~bit(2) */
/* only support station mode */
@@ -1412,9 +1410,8 @@ static sint validate_80211w_mgmt(struct adapter *adapter, union recv_frame *prec
/* unicast management frame decrypt */
if (pattrib->privacy && !(IS_MCAST(GetAddr1Ptr(ptr))) &&
(subtype == WIFI_DEAUTH || subtype == WIFI_DISASSOC || subtype == WIFI_ACTION)) {
- u8 *ppp, *mgmt_DATA;
+ u8 *mgmt_DATA;
u32 data_len = 0;
- ppp = GetAddr2Ptr(ptr);
pattrib->bdecrypted = 0;
pattrib->encrypt = _AES_;
@@ -1709,7 +1706,7 @@ static union recv_frame *recvframe_defrag(struct adapter *adapter,
struct __queue *defrag_q)
{
struct list_head *plist, *phead;
- u8 *data, wlanhdr_offset;
+ u8 wlanhdr_offset;
u8 curfragnum;
struct recv_frame_hdr *pfhdr, *pnfhdr;
union recv_frame *prframe, *pnextrframe;
@@ -1739,8 +1736,6 @@ static union recv_frame *recvframe_defrag(struct adapter *adapter,
plist = get_next(plist);
- data = get_recvframe_data(prframe);
-
while (phead != plist) {
pnextrframe = (union recv_frame *)plist;
pnfhdr = &pnextrframe->u.hdr;
diff --git a/drivers/staging/rtl8723bs/core/rtw_security.c b/drivers/staging/rtl8723bs/core/rtw_security.c
index 57cfe06d7d73..9c4607114cea 100644
--- a/drivers/staging/rtl8723bs/core/rtw_security.c
+++ b/drivers/staging/rtl8723bs/core/rtw_security.c
@@ -303,13 +303,18 @@ void rtw_wep_decrypt(struct adapter *padapter, u8 *precvframe)
*((u32 *)crc) = le32_to_cpu(getcrc32(payload, length-4));
if (crc[3] != payload[length-1] || crc[2] != payload[length-2] || crc[1] != payload[length-3] || crc[0] != payload[length-4]) {
- RT_TRACE(_module_rtl871x_security_c_, _drv_err_, ("rtw_wep_decrypt:icv error crc[3](%x)!=payload[length-1](%x) || crc[2](%x)!=payload[length-2](%x) || crc[1](%x)!=payload[length-3](%x) || crc[0](%x)!=payload[length-4](%x)\n",
- crc[3], payload[length-1], crc[2], payload[length-2], crc[1], payload[length-3], crc[0], payload[length-4]));
+ RT_TRACE(_module_rtl871x_security_c_,
+ _drv_err_,
+ ("%s:icv error crc[3](%x)!=payload[length-1](%x) || crc[2](%x)!=payload[length-2](%x) || crc[1](%x)!=payload[length-3](%x) || crc[0](%x)!=payload[length-4](%x)\n",
+ __func__,
+ crc[3], payload[length - 1],
+ crc[2], payload[length - 2],
+ crc[1], payload[length - 3],
+ crc[0], payload[length - 4]));
}
WEP_SW_DEC_CNT_INC(psecuritypriv, prxattrib->ra);
}
- return;
}
/* 3 =====TKIP related ===== */
@@ -657,11 +662,9 @@ u32 rtw_tkip_encrypt(struct adapter *padapter, u8 *pxmitframe)
u8 hw_hdr_offset = 0;
struct arc4context mycontext;
sint curfragnum, length;
- u32 prwskeylen;
u8 *pframe, *payload, *iv, *prwskey;
union pn48 dot11txpn;
- /* struct sta_info *stainfo; */
struct pkt_attrib *pattrib = &((struct xmit_frame *)pxmitframe)->attrib;
struct security_priv *psecuritypriv = &padapter->securitypriv;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
@@ -676,36 +679,14 @@ u32 rtw_tkip_encrypt(struct adapter *padapter, u8 *pxmitframe)
/* 4 start to encrypt each fragment */
if (pattrib->encrypt == _TKIP_) {
-/*
- if (pattrib->psta)
- {
- stainfo = pattrib->psta;
- }
- else
{
- DBG_871X("%s, call rtw_get_stainfo()\n", __func__);
- stainfo =rtw_get_stainfo(&padapter->stapriv ,&pattrib->ra[0]);
- }
-*/
- /* if (stainfo!= NULL) */
- {
-/*
- if (!(stainfo->state &_FW_LINKED))
- {
- DBG_871X("%s, psta->state(0x%x) != _FW_LINKED\n", __func__, stainfo->state);
- return _FAIL;
- }
-*/
- RT_TRACE(_module_rtl871x_security_c_, _drv_err_, ("rtw_tkip_encrypt: stainfo!= NULL!!!\n"));
+ RT_TRACE(_module_rtl871x_security_c_, _drv_err_, ("%s: stainfo!= NULL!!!\n", __func__));
if (IS_MCAST(pattrib->ra))
prwskey = psecuritypriv->dot118021XGrpKey[psecuritypriv->dot118021XGrpKeyid].skey;
else
- /* prwskey =&stainfo->dot118021x_UncstKey.skey[0]; */
prwskey = pattrib->dot118021x_UncstKey.skey;
- prwskeylen = 16;
-
for (curfragnum = 0; curfragnum < pattrib->nr_frags; curfragnum++) {
iv = pframe+pattrib->hdrlen;
payload = pframe+pattrib->iv_len+pattrib->hdrlen;
@@ -742,13 +723,6 @@ u32 rtw_tkip_encrypt(struct adapter *padapter, u8 *pxmitframe)
TKIP_SW_ENC_CNT_INC(psecuritypriv, pattrib->ra);
}
-/*
- else {
- RT_TRACE(_module_rtl871x_security_c_, _drv_err_, ("rtw_tkip_encrypt: stainfo == NULL!!!\n"));
- DBG_871X("%s, psta ==NUL\n", __func__);
- res = _FAIL;
- }
-*/
}
return res;
@@ -765,14 +739,12 @@ u32 rtw_tkip_decrypt(struct adapter *padapter, u8 *precvframe)
u8 crc[4];
struct arc4context mycontext;
sint length;
- u32 prwskeylen;
u8 *pframe, *payload, *iv, *prwskey;
union pn48 dot11txpn;
struct sta_info *stainfo;
struct rx_pkt_attrib *prxattrib = &((union recv_frame *)precvframe)->u.hdr.attrib;
struct security_priv *psecuritypriv = &padapter->securitypriv;
-/* struct recv_priv *precvpriv =&padapter->recvpriv; */
u32 res = _SUCCESS;
pframe = (unsigned char *)((union recv_frame *)precvframe)->u.hdr.rx_data;
@@ -817,13 +789,9 @@ u32 rtw_tkip_decrypt(struct adapter *padapter, u8 *precvframe)
no_gkey_bc_cnt = 0;
no_gkey_mc_cnt = 0;
- /* DBG_871X("rx bc/mc packets, to perform sw rtw_tkip_decrypt\n"); */
- /* prwskey = psecuritypriv->dot118021XGrpKey[psecuritypriv->dot118021XGrpKeyid].skey; */
prwskey = psecuritypriv->dot118021XGrpKey[prxattrib->key_index].skey;
- prwskeylen = 16;
} else {
prwskey = &stainfo->dot118021x_UncstKey.skey[0];
- prwskeylen = 16;
}
iv = pframe+prxattrib->hdrlen;
@@ -846,15 +814,19 @@ u32 rtw_tkip_decrypt(struct adapter *padapter, u8 *precvframe)
*((u32 *)crc) = le32_to_cpu(getcrc32(payload, length-4));
if (crc[3] != payload[length-1] || crc[2] != payload[length-2] || crc[1] != payload[length-3] || crc[0] != payload[length-4]) {
- RT_TRACE(_module_rtl871x_security_c_, _drv_err_,
+ RT_TRACE(_module_rtl871x_security_c_,
+ _drv_err_,
("rtw_wep_decrypt:icv error crc[3](%x)!=payload[length-1](%x) || crc[2](%x)!=payload[length-2](%x) || crc[1](%x)!=payload[length-3](%x) || crc[0](%x)!=payload[length-4](%x)\n",
- crc[3], payload[length-1], crc[2], payload[length-2], crc[1], payload[length-3], crc[0], payload[length-4]));
+ crc[3], payload[length - 1],
+ crc[2], payload[length - 2],
+ crc[1], payload[length - 3],
+ crc[0], payload[length - 4]));
res = _FAIL;
}
TKIP_SW_DEC_CNT_INC(psecuritypriv, prxattrib->ra);
} else {
- RT_TRACE(_module_rtl871x_security_c_, _drv_err_, ("rtw_tkip_decrypt: stainfo == NULL!!!\n"));
+ RT_TRACE(_module_rtl871x_security_c_, _drv_err_, ("%s: stainfo == NULL!!!\n", __func__));
res = _FAIL;
}
@@ -1426,7 +1398,7 @@ static sint aes_cipher(u8 *key, uint hdrlen,
aes128k128d(key, chain_buffer, aes_out);
for (i = 0; i < num_blocks; i++) {
- bitwise_xor(aes_out, &pframe[payload_index], chain_buffer);/* bitwise_xor(aes_out, &message[payload_index], chain_buffer); */
+ bitwise_xor(aes_out, &pframe[payload_index], chain_buffer);
payload_index += 16;
aes128k128d(key, chain_buffer, aes_out);
@@ -1437,7 +1409,7 @@ static sint aes_cipher(u8 *key, uint hdrlen,
for (j = 0; j < 16; j++)
padded_buffer[j] = 0x00;
for (j = 0; j < payload_remainder; j++) {
- padded_buffer[j] = pframe[payload_index++];/* padded_buffer[j] = message[payload_index++]; */
+ padded_buffer[j] = pframe[payload_index++];
}
bitwise_xor(aes_out, padded_buffer, chain_buffer);
aes128k128d(key, chain_buffer, aes_out);
@@ -1449,7 +1421,7 @@ static sint aes_cipher(u8 *key, uint hdrlen,
/* Insert MIC into payload */
for (j = 0; j < 8; j++)
- pframe[payload_index+j] = mic[j]; /* message[payload_index+j] = mic[j]; */
+ pframe[payload_index+j] = mic[j];
payload_index = hdrlen + 8;
for (i = 0; i < num_blocks; i++) {
@@ -1463,9 +1435,9 @@ static sint aes_cipher(u8 *key, uint hdrlen,
frtype
); /* add for CONFIG_IEEE80211W, none 11w also can use */
aes128k128d(key, ctr_preload, aes_out);
- bitwise_xor(aes_out, &pframe[payload_index], chain_buffer);/* bitwise_xor(aes_out, &message[payload_index], chain_buffer); */
+ bitwise_xor(aes_out, &pframe[payload_index], chain_buffer);
for (j = 0; j < 16; j++)
- pframe[payload_index++] = chain_buffer[j];/* for (j = 0; j<16;j++) message[payload_index++] = chain_buffer[j]; */
+ pframe[payload_index++] = chain_buffer[j];
}
if (payload_remainder > 0) {
@@ -1484,12 +1456,12 @@ static sint aes_cipher(u8 *key, uint hdrlen,
for (j = 0; j < 16; j++)
padded_buffer[j] = 0x00;
for (j = 0; j < payload_remainder; j++)
- padded_buffer[j] = pframe[payload_index+j];/* padded_buffer[j] = message[payload_index+j]; */
+ padded_buffer[j] = pframe[payload_index+j];
aes128k128d(key, ctr_preload, aes_out);
bitwise_xor(aes_out, padded_buffer, chain_buffer);
for (j = 0; j < payload_remainder; j++)
- pframe[payload_index++] = chain_buffer[j];/* for (j = 0; j<payload_remainder;j++) message[payload_index++] = chain_buffer[j]; */
+ pframe[payload_index++] = chain_buffer[j];
}
/* Encrypt the MIC */
@@ -1506,12 +1478,12 @@ static sint aes_cipher(u8 *key, uint hdrlen,
for (j = 0; j < 16; j++)
padded_buffer[j] = 0x00;
for (j = 0; j < 8; j++)
- padded_buffer[j] = pframe[j+hdrlen+8+plen];/* padded_buffer[j] = message[j+hdrlen+8+plen]; */
+ padded_buffer[j] = pframe[j+hdrlen+8+plen];
aes128k128d(key, ctr_preload, aes_out);
bitwise_xor(aes_out, padded_buffer, chain_buffer);
for (j = 0; j < 8; j++)
- pframe[payload_index++] = chain_buffer[j];/* for (j = 0; j<8;j++) message[payload_index++] = chain_buffer[j]; */
+ pframe[payload_index++] = chain_buffer[j];
return _SUCCESS;
}
@@ -1525,15 +1497,12 @@ u32 rtw_aes_encrypt(struct adapter *padapter, u8 *pxmitframe)
/* Intermediate Buffers */
sint curfragnum, length;
- u32 prwskeylen;
u8 *pframe, *prwskey; /* *payload,*iv */
u8 hw_hdr_offset = 0;
- /* struct sta_info *stainfo = NULL; */
struct pkt_attrib *pattrib = &((struct xmit_frame *)pxmitframe)->attrib;
struct security_priv *psecuritypriv = &padapter->securitypriv;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
-/* uint offset = 0; */
u32 res = _SUCCESS;
if (((struct xmit_frame *)pxmitframe)->buf_addr == NULL)
@@ -1544,16 +1513,13 @@ u32 rtw_aes_encrypt(struct adapter *padapter, u8 *pxmitframe)
/* 4 start to encrypt each fragment */
if (pattrib->encrypt == _AES_) {
- RT_TRACE(_module_rtl871x_security_c_, _drv_err_, ("rtw_aes_encrypt: stainfo!= NULL!!!\n"));
+ RT_TRACE(_module_rtl871x_security_c_, _drv_err_, ("%s: stainfo!= NULL!!!\n", __func__));
if (IS_MCAST(pattrib->ra))
prwskey = psecuritypriv->dot118021XGrpKey[psecuritypriv->dot118021XGrpKeyid].skey;
else
- /* prwskey =&stainfo->dot118021x_UncstKey.skey[0]; */
prwskey = pattrib->dot118021x_UncstKey.skey;
- prwskeylen = 16;
-
for (curfragnum = 0; curfragnum < pattrib->nr_frags; curfragnum++) {
if ((curfragnum+1) == pattrib->nr_frags) { /* 4 the last fragment */
length = pattrib->last_txcmdsz-pattrib->hdrlen-pattrib->iv_len-pattrib->icv_len;
@@ -1574,10 +1540,10 @@ u32 rtw_aes_encrypt(struct adapter *padapter, u8 *pxmitframe)
}
static sint aes_decipher(u8 *key, uint hdrlen,
- u8 *pframe, uint plen)
+ u8 *pframe, uint plen)
{
static u8 message[MAX_MSG_SIZE];
- uint qc_exists, a4_exists, i, j, payload_remainder,
+ uint qc_exists, a4_exists, i, j, payload_remainder,
num_blocks, payload_index;
sint res = _SUCCESS;
u8 pn_vector[6];
@@ -1593,9 +1559,8 @@ static sint aes_decipher(u8 *key, uint hdrlen,
u8 mic[8];
-/* uint offset = 0; */
- uint frtype = GetFrameType(pframe);
- uint frsubtype = GetFrameSubType(pframe);
+ uint frtype = GetFrameType(pframe);
+ uint frsubtype = GetFrameSubType(pframe);
frsubtype = frsubtype>>4;
@@ -1615,11 +1580,11 @@ static sint aes_decipher(u8 *key, uint hdrlen,
payload_remainder = (plen-8) % 16;
pn_vector[0] = pframe[hdrlen];
- pn_vector[1] = pframe[hdrlen+1];
- pn_vector[2] = pframe[hdrlen+4];
- pn_vector[3] = pframe[hdrlen+5];
- pn_vector[4] = pframe[hdrlen+6];
- pn_vector[5] = pframe[hdrlen+7];
+ pn_vector[1] = pframe[hdrlen + 1];
+ pn_vector[2] = pframe[hdrlen + 4];
+ pn_vector[3] = pframe[hdrlen + 5];
+ pn_vector[4] = pframe[hdrlen + 6];
+ pn_vector[5] = pframe[hdrlen + 7];
if ((hdrlen == WLAN_HDR_A3_LEN) || (hdrlen == WLAN_HDR_A3_QOS_LEN))
a4_exists = 0;
@@ -1651,22 +1616,17 @@ static sint aes_decipher(u8 *key, uint hdrlen,
payload_index = hdrlen + 8; /* 8 is for extiv */
for (i = 0; i < num_blocks; i++) {
- construct_ctr_preload(
- ctr_preload,
- a4_exists,
- qc_exists,
- pframe,
- pn_vector,
- i+1,
- frtype /* add for CONFIG_IEEE80211W, none 11w also can use */
- );
-
- aes128k128d(key, ctr_preload, aes_out);
- bitwise_xor(aes_out, &pframe[payload_index], chain_buffer);
-
- for (j = 0; j < 16; j++)
- pframe[payload_index++] = chain_buffer[j];
- }
+ construct_ctr_preload(ctr_preload, a4_exists,
+ qc_exists, pframe,
+ pn_vector, i + 1,
+ frtype); /* add for CONFIG_IEEE80211W, none 11w also can use */
+
+ aes128k128d(key, ctr_preload, aes_out);
+ bitwise_xor(aes_out, &pframe[payload_index], chain_buffer);
+
+ for (j = 0; j < 16; j++)
+ pframe[payload_index++] = chain_buffer[j];
+ }
if (payload_remainder > 0) {
/* If there is a short final block, then pad it,*/
@@ -1835,10 +1795,18 @@ static sint aes_decipher(u8 *key, uint hdrlen,
/* compare the mic */
for (i = 0; i < 8; i++) {
if (pframe[hdrlen+8+plen-8+i] != message[hdrlen+8+plen-8+i]) {
- RT_TRACE(_module_rtl871x_security_c_, _drv_err_, ("aes_decipher:mic check error mic[%d]: pframe(%x) != message(%x)\n",
- i, pframe[hdrlen+8+plen-8+i], message[hdrlen+8+plen-8+i]));
- DBG_871X("aes_decipher:mic check error mic[%d]: pframe(%x) != message(%x)\n",
- i, pframe[hdrlen+8+plen-8+i], message[hdrlen+8+plen-8+i]);
+ RT_TRACE(_module_rtl871x_security_c_,
+ _drv_err_,
+ ("%s:mic check error mic[%d]: pframe(%x) != message(%x)\n",
+ __func__,
+ i,
+ pframe[hdrlen + 8 + plen - 8 + i],
+ message[hdrlen + 8 + plen - 8 + i]));
+ DBG_871X("%s:mic check error mic[%d]: pframe(%x) != message(%x)\n",
+ __func__,
+ i,
+ pframe[hdrlen + 8 + plen - 8 + i],
+ message[hdrlen + 8 + plen - 8 + i]);
res = _FAIL;
}
}
@@ -1861,7 +1829,6 @@ u32 rtw_aes_decrypt(struct adapter *padapter, u8 *precvframe)
struct sta_info *stainfo;
struct rx_pkt_attrib *prxattrib = &((union recv_frame *)precvframe)->u.hdr.attrib;
struct security_priv *psecuritypriv = &padapter->securitypriv;
-/* struct recv_priv *precvpriv =&padapter->recvpriv; */
u32 res = _SUCCESS;
pframe = (unsigned char *)((union recv_frame *)precvframe)->u.hdr.rx_data;
@@ -1869,15 +1836,15 @@ u32 rtw_aes_decrypt(struct adapter *padapter, u8 *precvframe)
if (prxattrib->encrypt == _AES_) {
stainfo = rtw_get_stainfo(&padapter->stapriv, &prxattrib->ta[0]);
if (stainfo != NULL) {
- RT_TRACE(_module_rtl871x_security_c_, _drv_err_, ("rtw_aes_decrypt: stainfo!= NULL!!!\n"));
+ RT_TRACE(_module_rtl871x_security_c_,
+ _drv_err_,
+ ("%s: stainfo!= NULL!!!\n", __func__));
if (IS_MCAST(prxattrib->ra)) {
static unsigned long start;
static u32 no_gkey_bc_cnt;
static u32 no_gkey_mc_cnt;
- /* DBG_871X("rx bc/mc packets, to perform sw rtw_aes_decrypt\n"); */
- /* prwskey = psecuritypriv->dot118021XGrpKey[psecuritypriv->dot118021XGrpKeyid].skey; */
if (psecuritypriv->binstallGrpkey == false) {
res = _FAIL;
@@ -1927,7 +1894,9 @@ u32 rtw_aes_decrypt(struct adapter *padapter, u8 *precvframe)
AES_SW_DEC_CNT_INC(psecuritypriv, prxattrib->ra);
} else {
- RT_TRACE(_module_rtl871x_security_c_, _drv_err_, ("rtw_aes_decrypt: stainfo == NULL!!!\n"));
+ RT_TRACE(_module_rtl871x_security_c_,
+ _drv_err_,
+ ("%s: stainfo == NULL!!!\n", __func__));
res = _FAIL;
}
}
diff --git a/drivers/staging/rtl8723bs/core/rtw_sta_mgt.c b/drivers/staging/rtl8723bs/core/rtw_sta_mgt.c
index bdc52d8d5625..09d2ca30d653 100644
--- a/drivers/staging/rtl8723bs/core/rtw_sta_mgt.c
+++ b/drivers/staging/rtl8723bs/core/rtw_sta_mgt.c
@@ -187,7 +187,6 @@ u32 _rtw_free_sta_priv(struct sta_priv *pstapriv)
/* struct sta_info *rtw_alloc_stainfo(_queue *pfree_sta_queue, unsigned char *hwaddr) */
struct sta_info *rtw_alloc_stainfo(struct sta_priv *pstapriv, u8 *hwaddr)
{
- uint tmp_aid;
s32 index;
struct list_head *phash_list;
struct sta_info *psta;
@@ -211,8 +210,6 @@ struct sta_info *rtw_alloc_stainfo(struct sta_priv *pstapriv, u8 *hwaddr)
/* spin_unlock_bh(&(pfree_sta_queue->lock)); */
- tmp_aid = psta->aid;
-
_rtw_init_stainfo(psta);
psta->padapter = pstapriv->padapter;
diff --git a/drivers/staging/rtl8723bs/core/rtw_wlan_util.c b/drivers/staging/rtl8723bs/core/rtw_wlan_util.c
index ea3ea2a6b314..9590e6f351c1 100644
--- a/drivers/staging/rtl8723bs/core/rtw_wlan_util.c
+++ b/drivers/staging/rtl8723bs/core/rtw_wlan_util.c
@@ -606,19 +606,6 @@ inline void clear_cam_entry(struct adapter *adapter, u8 id)
clear_cam_cache(adapter, id);
}
-inline void write_cam_from_cache(struct adapter *adapter, u8 id)
-{
- struct dvobj_priv *dvobj = adapter_to_dvobj(adapter);
- struct cam_ctl_t *cam_ctl = &dvobj->cam_ctl;
- struct cam_entry_cache cache;
-
- spin_lock_bh(&cam_ctl->lock);
- memcpy(&cache, &dvobj->cam_cache[id], sizeof(struct cam_entry_cache));
- spin_unlock_bh(&cam_ctl->lock);
-
- _write_cam(adapter, id, cache.ctrl, cache.mac, cache.key);
-}
-
void write_cam_cache(struct adapter *adapter, u8 id, u16 ctrl, u8 *mac, u8 *key)
{
struct dvobj_priv *dvobj = adapter_to_dvobj(adapter);
@@ -1170,8 +1157,6 @@ void HT_info_handler(struct adapter *padapter, struct ndis_80211_var_ie *pIE)
pmlmeinfo->HT_info_enable = 1;
memcpy(&(pmlmeinfo->HT_info), pIE->data, pIE->Length);
-
- return;
}
void HTOnAssocRsp(struct adapter *padapter)
@@ -1481,11 +1466,11 @@ int rtw_check_bcn_info(struct adapter *Adapter, u8 *pframe, u32 packet_len)
}
}
- kfree((u8 *)bssid);
+ kfree(bssid);
return _SUCCESS;
_mismatch:
- kfree((u8 *)bssid);
+ kfree(bssid);
if (pmlmepriv->NumOfBcnInfoChkFail == 0)
pmlmepriv->timeBcnInfoChkStart = jiffies;
diff --git a/drivers/staging/rtl8723bs/core/rtw_xmit.c b/drivers/staging/rtl8723bs/core/rtw_xmit.c
index b5dcb78fb4f4..fdb585ff5925 100644
--- a/drivers/staging/rtl8723bs/core/rtw_xmit.c
+++ b/drivers/staging/rtl8723bs/core/rtw_xmit.c
@@ -25,9 +25,6 @@ void _rtw_init_sta_xmit_priv(struct sta_xmit_priv *psta_xmitpriv)
spin_lock_init(&psta_xmitpriv->lock);
- /* for (i = 0 ; i < MAX_NUMBLKS; i++) */
- /* _init_txservq(&(psta_xmitpriv->blk_q[i])); */
-
_init_txservq(&psta_xmitpriv->be_q);
_init_txservq(&psta_xmitpriv->bk_q);
_init_txservq(&psta_xmitpriv->vi_q);
@@ -54,18 +51,12 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
pxmitpriv->adapter = padapter;
- /* for (i = 0 ; i < MAX_NUMBLKS; i++) */
- /* _rtw_init_queue(&pxmitpriv->blk_strms[i]); */
-
_rtw_init_queue(&pxmitpriv->be_pending);
_rtw_init_queue(&pxmitpriv->bk_pending);
_rtw_init_queue(&pxmitpriv->vi_pending);
_rtw_init_queue(&pxmitpriv->vo_pending);
_rtw_init_queue(&pxmitpriv->bm_pending);
- /* _rtw_init_queue(&pxmitpriv->legacy_dz_queue); */
- /* _rtw_init_queue(&pxmitpriv->apsd_queue); */
-
_rtw_init_queue(&pxmitpriv->free_xmit_queue);
/*
@@ -83,13 +74,11 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
goto exit;
}
pxmitpriv->pxmit_frame_buf = (u8 *)N_BYTE_ALIGMENT((SIZE_PTR)(pxmitpriv->pallocated_frame_buf), 4);
- /* pxmitpriv->pxmit_frame_buf = pxmitpriv->pallocated_frame_buf + 4 - */
- /* ((SIZE_PTR) (pxmitpriv->pallocated_frame_buf) &3); */
pxframe = (struct xmit_frame *) pxmitpriv->pxmit_frame_buf;
for (i = 0; i < NR_XMITFRAME; i++) {
- INIT_LIST_HEAD(&(pxframe->list));
+ INIT_LIST_HEAD(&pxframe->list);
pxframe->padapter = padapter;
pxframe->frame_tag = NULL_FRAMETAG;
@@ -99,7 +88,8 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
pxframe->buf_addr = NULL;
pxframe->pxmitbuf = NULL;
- list_add_tail(&(pxframe->list), &(pxmitpriv->free_xmit_queue.queue));
+ list_add_tail(&pxframe->list,
+ &pxmitpriv->free_xmit_queue.queue);
pxframe++;
}
@@ -108,7 +98,6 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
pxmitpriv->frag_len = MAX_FRAG_THRESHOLD;
-
/* init xmit_buf */
_rtw_init_queue(&pxmitpriv->free_xmitbuf_queue);
_rtw_init_queue(&pxmitpriv->pending_xmitbuf_queue);
@@ -122,8 +111,6 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
}
pxmitpriv->pxmitbuf = (u8 *)N_BYTE_ALIGMENT((SIZE_PTR)(pxmitpriv->pallocated_xmitbuf), 4);
- /* pxmitpriv->pxmitbuf = pxmitpriv->pallocated_xmitbuf + 4 - */
- /* ((SIZE_PTR) (pxmitpriv->pallocated_xmitbuf) &3); */
pxmitbuf = (struct xmit_buf *)pxmitpriv->pxmitbuf;
@@ -150,13 +137,13 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
pxmitbuf->flags = XMIT_VO_QUEUE;
- list_add_tail(&pxmitbuf->list, &(pxmitpriv->free_xmitbuf_queue.queue));
+ list_add_tail(&pxmitbuf->list,
+ &pxmitpriv->free_xmitbuf_queue.queue);
#ifdef DBG_XMIT_BUF
pxmitbuf->no = i;
#endif
pxmitbuf++;
-
}
pxmitpriv->free_xmitbuf_cnt = NR_XMITBUFF;
@@ -176,7 +163,7 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
pxframe = (struct xmit_frame *)pxmitpriv->xframe_ext;
for (i = 0; i < NR_XMIT_EXTBUFF; i++) {
- INIT_LIST_HEAD(&(pxframe->list));
+ INIT_LIST_HEAD(&pxframe->list);
pxframe->padapter = padapter;
pxframe->frame_tag = NULL_FRAMETAG;
@@ -188,7 +175,8 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
pxframe->ext_tag = 1;
- list_add_tail(&(pxframe->list), &(pxmitpriv->free_xframe_ext_queue.queue));
+ list_add_tail(&pxframe->list,
+ &pxmitpriv->free_xframe_ext_queue.queue);
pxframe++;
}
@@ -227,12 +215,12 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
pxmitbuf->len = 0;
pxmitbuf->pdata = pxmitbuf->ptail = pxmitbuf->phead;
- list_add_tail(&pxmitbuf->list, &(pxmitpriv->free_xmit_extbuf_queue.queue));
+ list_add_tail(&pxmitbuf->list,
+ &pxmitpriv->free_xmit_extbuf_queue.queue);
#ifdef DBG_XMIT_BUF_EXT
pxmitbuf->no = i;
#endif
pxmitbuf++;
-
}
pxmitpriv->free_xmit_extbuf_cnt = NR_XMIT_EXTBUFF;
@@ -265,9 +253,8 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
goto exit;
rtw_init_hwxmits(pxmitpriv->hwxmits, pxmitpriv->hwxmit_entry);
- for (i = 0; i < 4; i++) {
+ for (i = 0; i < 4; i++)
pxmitpriv->wmm_para_seq[i] = i;
- }
pxmitpriv->ack_tx = false;
mutex_init(&pxmitpriv->ack_tx_mutex);
@@ -306,7 +293,6 @@ void _rtw_free_xmit_priv(struct xmit_priv *pxmitpriv)
if (pxmitpriv->pallocated_frame_buf)
vfree(pxmitpriv->pallocated_frame_buf);
-
if (pxmitpriv->pallocated_xmitbuf)
vfree(pxmitpriv->pallocated_xmitbuf);
@@ -329,9 +315,8 @@ void _rtw_free_xmit_priv(struct xmit_priv *pxmitpriv)
pxmitbuf++;
}
- if (pxmitpriv->pallocated_xmit_extbuf) {
+ if (pxmitpriv->pallocated_xmit_extbuf)
vfree(pxmitpriv->pallocated_xmit_extbuf);
- }
for (i = 0; i < CMDBUF_MAX; i++) {
pxmitbuf = &pxmitpriv->pcmd_xmitbuf[i];
@@ -372,8 +357,8 @@ static void update_attrib_vcs_info(struct adapter *padapter, struct xmit_frame *
u32 sz;
struct pkt_attrib *pattrib = &pxmitframe->attrib;
/* struct sta_info *psta = pattrib->psta; */
- struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
- struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
if (pattrib->nr_frags != 1)
sz = padapter->xmitpriv.frag_len;
@@ -404,7 +389,6 @@ static void update_attrib_vcs_info(struct adapter *padapter, struct xmit_frame *
break;
}
-
/* check ERP protection */
if (pattrib->rtsen || pattrib->cts2self) {
if (pattrib->rtsen)
@@ -485,20 +469,12 @@ static void update_attrib_phy_info(struct adapter *padapter, struct pkt_attrib *
else
pattrib->ampdu_spacing = psta->htpriv.rx_ampdu_min_spacing;
- /* if (pattrib->ht_en && psta->htpriv.ampdu_enable) */
- /* */
- /* if (psta->htpriv.agg_enable_bitmap & BIT(pattrib->priority)) */
- /* pattrib->ampdu_en = true; */
- /* */
-
-
pattrib->retry_ctrl = false;
#ifdef CONFIG_AUTO_AP_MODE
if (psta->isrc && psta->pid > 0)
pattrib->pctrl = true;
#endif
-
}
static s32 update_attrib_sec_info(struct adapter *padapter, struct pkt_attrib *pattrib, struct sta_info *psta)
@@ -548,7 +524,6 @@ static s32 update_attrib_sec_info(struct adapter *padapter, struct pkt_attrib *p
/* For WPS 1.0 WEP, driver should not encrypt EAPOL Packet for WPS handshake. */
if (((pattrib->encrypt == _WEP40_) || (pattrib->encrypt == _WEP104_)) && (pattrib->ether_type == 0x888e))
pattrib->encrypt = _NO_PRIVACY_;
-
}
switch (pattrib->encrypt) {
@@ -576,7 +551,6 @@ static s32 update_attrib_sec_info(struct adapter *padapter, struct pkt_attrib *p
else
TKIP_IV(pattrib->iv, psta->dot11txpn, 0);
-
memcpy(pattrib->dot11tkiptxmickey.skey, psta->dot11tkiptxmickey.skey, 16);
break;
@@ -620,7 +594,6 @@ static s32 update_attrib_sec_info(struct adapter *padapter, struct pkt_attrib *p
exit:
return res;
-
}
u8 qos_acm(u8 acm_mask, u8 priority)
@@ -658,14 +631,12 @@ static void set_qos(struct pkt_file *ppktfile, struct pkt_attrib *pattrib)
struct iphdr ip_hdr;
s32 UserPriority = 0;
-
_rtw_open_pktfile(ppktfile->pkt, ppktfile);
_rtw_pktfile_read(ppktfile, (unsigned char *)&etherhdr, ETH_HLEN);
/* get UserPriority from IP hdr */
if (pattrib->ether_type == 0x0800) {
_rtw_pktfile_read(ppktfile, (u8 *)&ip_hdr, sizeof(ip_hdr));
-/* UserPriority = (ntohs(ip_hdr.tos) >> 5) & 0x3; */
UserPriority = ip_hdr.tos >> 5;
}
pattrib->priority = UserPriority;
@@ -675,7 +646,6 @@ static void set_qos(struct pkt_file *ppktfile, struct pkt_attrib *pattrib)
static s32 update_attrib(struct adapter *padapter, _pkt *pkt, struct pkt_attrib *pattrib)
{
- uint i;
struct pkt_file pktfile;
struct sta_info *psta = NULL;
struct ethhdr etherhdr;
@@ -689,15 +659,13 @@ static s32 update_attrib(struct adapter *padapter, _pkt *pkt, struct pkt_attrib
DBG_COUNTER(padapter->tx_logs.core_tx_upd_attrib);
_rtw_open_pktfile(pkt, &pktfile);
- i = _rtw_pktfile_read(&pktfile, (u8 *)&etherhdr, ETH_HLEN);
+ _rtw_pktfile_read(&pktfile, (u8 *)&etherhdr, ETH_HLEN);
pattrib->ether_type = ntohs(etherhdr.h_proto);
-
memcpy(pattrib->dst, &etherhdr.h_dest, ETH_ALEN);
memcpy(pattrib->src, &etherhdr.h_source, ETH_ALEN);
-
if ((check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) == true) ||
(check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) == true)) {
memcpy(pattrib->ra, pattrib->dst, ETH_ALEN);
@@ -748,8 +716,6 @@ static s32 update_attrib(struct adapter *padapter, _pkt *pkt, struct pkt_attrib
DBG_COUNTER(padapter->tx_logs.core_tx_upd_attrib_icmp);
}
}
-
-
} else if (0x888e == pattrib->ether_type) {
DBG_871X_LEVEL(_drv_always_, "send eapol packet\n");
}
@@ -804,8 +770,6 @@ static s32 update_attrib(struct adapter *padapter, _pkt *pkt, struct pkt_attrib
return _FAIL;
}
-
-
/* TODO:_lock */
if (update_attrib_sec_info(padapter, pattrib, psta) == _FAIL) {
DBG_COUNTER(padapter->tx_logs.core_tx_upd_attrib_err_sec);
@@ -815,8 +779,6 @@ static s32 update_attrib(struct adapter *padapter, _pkt *pkt, struct pkt_attrib
update_attrib_phy_info(padapter, pattrib, psta);
- /* DBG_8192C("%s ==> mac_id(%d)\n", __func__, pattrib->mac_id); */
-
pattrib->psta = psta;
/* TODO:_unlock */
@@ -839,7 +801,6 @@ static s32 update_attrib(struct adapter *padapter, _pkt *pkt, struct pkt_attrib
if (pmlmepriv->acm_mask != 0)
pattrib->priority = qos_acm(pmlmepriv->acm_mask, pattrib->priority);
-
}
}
@@ -854,7 +815,6 @@ static s32 xmitframe_addmic(struct adapter *padapter, struct xmit_frame *pxmitfr
sint curfragnum, length;
u8 *pframe, *payload, mic[8];
struct mic_data micdata;
- /* struct sta_info *stainfo; */
struct pkt_attrib *pattrib = &pxmitframe->attrib;
struct security_priv *psecuritypriv = &padapter->securitypriv;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
@@ -862,54 +822,23 @@ static s32 xmitframe_addmic(struct adapter *padapter, struct xmit_frame *pxmitfr
u8 hw_hdr_offset = 0;
sint bmcst = IS_MCAST(pattrib->ra);
-/*
- if (pattrib->psta)
- {
- stainfo = pattrib->psta;
- }
- else
- {
- DBG_871X("%s, call rtw_get_stainfo()\n", __func__);
- stainfo =rtw_get_stainfo(&padapter->stapriv ,&pattrib->ra[0]);
- }
-
- if (stainfo == NULL)
- {
- DBG_871X("%s, psta ==NUL\n", __func__);
- return _FAIL;
- }
-
- if (!(stainfo->state &_FW_LINKED))
- {
- DBG_871X("%s, psta->state(0x%x) != _FW_LINKED\n", __func__, stainfo->state);
- return _FAIL;
- }
-*/
-
hw_hdr_offset = TXDESC_OFFSET;
- if (pattrib->encrypt == _TKIP_) { /* if (psecuritypriv->dot11PrivacyAlgrthm == _TKIP_PRIVACY_) */
+ if (pattrib->encrypt == _TKIP_) {
/* encode mic code */
- /* if (stainfo!= NULL) */
{
u8 null_key[16] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
pframe = pxmitframe->buf_addr + hw_hdr_offset;
if (bmcst) {
- if (!memcmp(psecuritypriv->dot118021XGrptxmickey[psecuritypriv->dot118021XGrpKeyid].skey, null_key, 16)) {
- /* DbgPrint("\nxmitframe_addmic:stainfo->dot11tkiptxmickey == 0\n"); */
- /* msleep(10); */
+ if (!memcmp(psecuritypriv->dot118021XGrptxmickey[psecuritypriv->dot118021XGrpKeyid].skey, null_key, 16))
return _FAIL;
- }
/* start to calculate the mic code */
rtw_secmicsetkey(&micdata, psecuritypriv->dot118021XGrptxmickey[psecuritypriv->dot118021XGrpKeyid].skey);
} else {
- if (!memcmp(&pattrib->dot11tkiptxmickey.skey[0], null_key, 16)) {
- /* DbgPrint("\nxmitframe_addmic:stainfo->dot11tkiptxmickey == 0\n"); */
- /* msleep(10); */
+ if (!memcmp(&pattrib->dot11tkiptxmickey.skey[0], null_key, 16))
return _FAIL;
- }
/* start to calculate the mic code */
rtw_secmicsetkey(&micdata, &pattrib->dot11tkiptxmickey.skey[0]);
}
@@ -926,14 +855,11 @@ static s32 xmitframe_addmic(struct adapter *padapter, struct xmit_frame *pxmitfr
rtw_secmicappend(&micdata, &pframe[16], 6);
else
rtw_secmicappend(&micdata, &pframe[10], 6);
-
}
- /* if (pqospriv->qos_option == 1) */
if (pattrib->qos_en)
priority[0] = (u8)pxmitframe->attrib.priority;
-
rtw_secmicappend(&micdata, &priority[0], 4);
payload = pframe;
@@ -956,7 +882,7 @@ static s32 xmitframe_addmic(struct adapter *padapter, struct xmit_frame *pxmitfr
RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("curfragnum =%d length =%d pattrib->icv_len =%d", curfragnum, length, pattrib->icv_len));
}
}
- rtw_secgetmic(&micdata, &(mic[0]));
+ rtw_secgetmic(&micdata, &mic[0]);
RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("xmitframe_addmic: before add mic code!!!\n"));
RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("xmitframe_addmic: pattrib->last_txcmdsz =%d!!!\n", pattrib->last_txcmdsz));
RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("xmitframe_addmic: mic[0]= 0x%.2x , mic[1]= 0x%.2x , mic[2]= 0x%.2x , mic[3]= 0x%.2x\n\
@@ -964,7 +890,7 @@ static s32 xmitframe_addmic(struct adapter *padapter, struct xmit_frame *pxmitfr
mic[0], mic[1], mic[2], mic[3], mic[4], mic[5], mic[6], mic[7]));
/* add mic code and add the mic code length in last_txcmdsz */
- memcpy(payload, &(mic[0]), 8);
+ memcpy(payload, &mic[0], 8);
pattrib->last_txcmdsz += 8;
RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("\n ========last pkt ========\n"));
@@ -975,9 +901,6 @@ static s32 xmitframe_addmic(struct adapter *padapter, struct xmit_frame *pxmitfr
*(payload+curfragnum+4), *(payload+curfragnum+5), *(payload+curfragnum+6), *(payload+curfragnum+7)));
}
/*
- else {
- RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("xmitframe_addmic: rtw_get_stainfo == NULL!!!\n"));
- }
*/
}
return _SUCCESS;
@@ -985,13 +908,9 @@ static s32 xmitframe_addmic(struct adapter *padapter, struct xmit_frame *pxmitfr
static s32 xmitframe_swencrypt(struct adapter *padapter, struct xmit_frame *pxmitframe)
{
-
struct pkt_attrib *pattrib = &pxmitframe->attrib;
- /* struct security_priv *psecuritypriv =&padapter->securitypriv; */
- /* if ((psecuritypriv->sw_encrypt)||(pattrib->bswenc)) */
if (pattrib->bswenc) {
- /* DBG_871X("start xmitframe_swencrypt\n"); */
RT_TRACE(_module_rtl871x_xmit_c_, _drv_alert_, ("### xmitframe_swencrypt\n"));
switch (pattrib->encrypt) {
case _WEP40_:
@@ -1007,7 +926,6 @@ static s32 xmitframe_swencrypt(struct adapter *padapter, struct xmit_frame *pxmi
default:
break;
}
-
} else
RT_TRACE(_module_rtl871x_xmit_c_, _drv_notice_, ("### xmitframe_hwencrypt\n"));
@@ -1030,7 +948,7 @@ s32 rtw_make_wlanhdr(struct adapter *padapter, u8 *hdr, struct pkt_attrib *pattr
SetFrameSubType(fctrl, pattrib->subtype);
if (pattrib->subtype & WIFI_DATA_TYPE) {
- if ((check_fwstate(pmlmepriv, WIFI_STATION_STATE) == true)) {
+ if (check_fwstate(pmlmepriv, WIFI_STATION_STATE) == true) {
/* to_ds = 1, fr_ds = 0; */
{
@@ -1044,8 +962,7 @@ s32 rtw_make_wlanhdr(struct adapter *padapter, u8 *hdr, struct pkt_attrib *pattr
if (pqospriv->qos_option)
qos_option = true;
-
- } else if ((check_fwstate(pmlmepriv, WIFI_AP_STATE) == true)) {
+ } else if (check_fwstate(pmlmepriv, WIFI_AP_STATE) == true) {
/* to_ds = 0, fr_ds = 1; */
SetFrDs(fctrl);
memcpy(pwlanhdr->addr1, pattrib->dst, ETH_ALEN);
@@ -1106,7 +1023,6 @@ s32 rtw_make_wlanhdr(struct adapter *padapter, u8 *hdr, struct pkt_attrib *pattr
return _FAIL;
}
-
if (psta) {
psta->sta_xmitpriv.txseq_tid[pattrib->priority]++;
psta->sta_xmitpriv.txseq_tid[pattrib->priority] &= 0xFFF;
@@ -1119,7 +1035,6 @@ s32 rtw_make_wlanhdr(struct adapter *padapter, u8 *hdr, struct pkt_attrib *pattr
if (psta->htpriv.agg_enable_bitmap & BIT(pattrib->priority))
pattrib->ampdu_en = true;
-
/* re-check if enable ampdu by BA_starting_seqctrl */
if (pattrib->ampdu_en == true) {
u16 tx_seq;
@@ -1128,24 +1043,19 @@ s32 rtw_make_wlanhdr(struct adapter *padapter, u8 *hdr, struct pkt_attrib *pattr
/* check BA_starting_seqctrl */
if (SN_LESS(pattrib->seqnum, tx_seq)) {
- /* DBG_871X("tx ampdu seqnum(%d) < tx_seq(%d)\n", pattrib->seqnum, tx_seq); */
pattrib->ampdu_en = false;/* AGG BK */
} else if (SN_EQUAL(pattrib->seqnum, tx_seq)) {
psta->BA_starting_seqctrl[pattrib->priority & 0x0f] = (tx_seq+1)&0xfff;
pattrib->ampdu_en = true;/* AGG EN */
} else {
- /* DBG_871X("tx ampdu over run\n"); */
psta->BA_starting_seqctrl[pattrib->priority & 0x0f] = (pattrib->seqnum+1)&0xfff;
pattrib->ampdu_en = true;/* AGG EN */
}
-
}
}
}
-
} else {
-
}
exit:
@@ -1203,9 +1113,6 @@ s32 rtw_xmitframe_coalesce(struct adapter *padapter, _pkt *pkt, struct xmit_fram
u8 *pframe, *mem_start;
u8 hw_hdr_offset;
- /* struct sta_info *psta; */
- /* struct sta_priv *pstapriv = &padapter->stapriv; */
- /* struct mlme_priv *pmlmepriv = &padapter->mlmepriv; */
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
struct pkt_attrib *pattrib = &pxmitframe->attrib;
@@ -1215,30 +1122,6 @@ s32 rtw_xmitframe_coalesce(struct adapter *padapter, _pkt *pkt, struct xmit_fram
s32 bmcst = IS_MCAST(pattrib->ra);
s32 res = _SUCCESS;
-/*
- if (pattrib->psta)
- {
- psta = pattrib->psta;
- } else
- {
- DBG_871X("%s, call rtw_get_stainfo()\n", __func__);
- psta = rtw_get_stainfo(&padapter->stapriv, pattrib->ra);
- }
-
- if (psta == NULL)
- {
-
- DBG_871X("%s, psta ==NUL\n", __func__);
- return _FAIL;
- }
-
-
- if (!(psta->state &_FW_LINKED))
- {
- DBG_871X("%s, psta->state(0x%x) != _FW_LINKED\n", __func__, psta->state);
- return _FAIL;
- }
-*/
if (!pxmitframe->buf_addr) {
DBG_8192C("==> %s buf_addr == NULL\n", __func__);
return _FAIL;
@@ -1293,10 +1176,8 @@ s32 rtw_xmitframe_coalesce(struct adapter *padapter, _pkt *pkt, struct xmit_fram
mpdu_len -= llc_sz;
}
- if ((pattrib->icv_len > 0) && (pattrib->bswenc)) {
+ if ((pattrib->icv_len > 0) && (pattrib->bswenc))
mpdu_len -= pattrib->icv_len;
- }
-
if (bmcst) {
/* don't do fragment to broadcat/multicast packets */
@@ -1330,7 +1211,6 @@ s32 rtw_xmitframe_coalesce(struct adapter *padapter, _pkt *pkt, struct xmit_fram
mem_start = (unsigned char *)RND4(addr) + hw_hdr_offset;
memcpy(mem_start, pbuf_start + hw_hdr_offset, pattrib->hdrlen);
-
}
if (xmitframe_addmic(padapter, pxmitframe) == _FAIL) {
@@ -1410,7 +1290,8 @@ s32 rtw_mgmt_xmitframe_coalesce(struct adapter *padapter, _pkt *pkt, struct xmit
pmlmeext->mgnt_80211w_IPN++;
/* add MME IE with MIC all zero, MME string doesn't include element id and length */
- pframe = rtw_set_ie(pframe, _MME_IE_, 16, MME, &(pattrib->pktlen));
+ pframe = rtw_set_ie(pframe, _MME_IE_, 16,
+ MME, &pattrib->pktlen);
pattrib->last_txcmdsz = pattrib->pktlen;
/* total frame length - header length */
frame_body_len = pattrib->pktlen - sizeof(struct ieee80211_hdr_3addr);
@@ -1441,7 +1322,6 @@ s32 rtw_mgmt_xmitframe_coalesce(struct adapter *padapter, _pkt *pkt, struct xmit
psta = rtw_get_stainfo(&padapter->stapriv, pattrib->ra);
if (!psta) {
-
DBG_871X("%s, psta ==NUL\n", __func__);
goto xmitframe_coalesce_fail;
}
@@ -1451,7 +1331,6 @@ s32 rtw_mgmt_xmitframe_coalesce(struct adapter *padapter, _pkt *pkt, struct xmit
goto xmitframe_coalesce_fail;
}
- /* DBG_871X("%s, action frame category =%d\n", __func__, pframe[WLAN_HDR_A3_LEN]); */
/* according 802.11-2012 standard, these five types are not robust types */
if (subtype == WIFI_ACTION &&
(pframe[WLAN_HDR_A3_LEN] == RTW_WLAN_CATEGORY_PUBLIC ||
@@ -1550,7 +1429,6 @@ s32 rtw_put_snap(u8 *data, u16 h_proto)
void rtw_update_protection(struct adapter *padapter, u8 *ie, uint ie_len)
{
-
uint protection;
u8 *perp;
sint erp_len;
@@ -1582,7 +1460,6 @@ void rtw_update_protection(struct adapter *padapter, u8 *ie, uint ie_len)
}
break;
-
}
}
@@ -1666,7 +1543,6 @@ struct xmit_frame *__rtw_alloc_cmdxmitframe(struct xmit_priv *pxmitpriv,
pxmitbuf->priv_data = pcmdframe;
return pcmdframe;
-
}
struct xmit_buf *rtw_alloc_xmitbuf_ext(struct xmit_priv *pxmitpriv)
@@ -1681,14 +1557,13 @@ struct xmit_buf *rtw_alloc_xmitbuf_ext(struct xmit_priv *pxmitpriv)
if (list_empty(&pfree_queue->queue)) {
pxmitbuf = NULL;
} else {
-
phead = get_list_head(pfree_queue);
plist = get_next(phead);
pxmitbuf = LIST_CONTAINOR(plist, struct xmit_buf, list);
- list_del_init(&(pxmitbuf->list));
+ list_del_init(&pxmitbuf->list);
}
if (pxmitbuf) {
@@ -1697,7 +1572,6 @@ struct xmit_buf *rtw_alloc_xmitbuf_ext(struct xmit_priv *pxmitpriv)
DBG_871X("DBG_XMIT_BUF_EXT ALLOC no =%d, free_xmit_extbuf_cnt =%d\n", pxmitbuf->no, pxmitpriv->free_xmit_extbuf_cnt);
#endif
-
pxmitbuf->priv_data = NULL;
pxmitbuf->len = 0;
@@ -1708,7 +1582,6 @@ struct xmit_buf *rtw_alloc_xmitbuf_ext(struct xmit_priv *pxmitpriv)
DBG_871X("%s pxmitbuf->sctx is not NULL\n", __func__);
rtw_sctx_done_err(&pxmitbuf->sctx, RTW_SCTX_DONE_BUF_ALLOC);
}
-
}
spin_unlock_irqrestore(&pfree_queue->lock, irqL);
@@ -1728,7 +1601,7 @@ s32 rtw_free_xmitbuf_ext(struct xmit_priv *pxmitpriv, struct xmit_buf *pxmitbuf)
list_del_init(&pxmitbuf->list);
- list_add_tail(&(pxmitbuf->list), get_list_head(pfree_queue));
+ list_add_tail(&pxmitbuf->list, get_list_head(pfree_queue));
pxmitpriv->free_xmit_extbuf_cnt++;
#ifdef DBG_XMIT_BUF_EXT
DBG_871X("DBG_XMIT_BUF_EXT FREE no =%d, free_xmit_extbuf_cnt =%d\n", pxmitbuf->no, pxmitpriv->free_xmit_extbuf_cnt);
@@ -1746,21 +1619,18 @@ struct xmit_buf *rtw_alloc_xmitbuf(struct xmit_priv *pxmitpriv)
struct list_head *plist, *phead;
struct __queue *pfree_xmitbuf_queue = &pxmitpriv->free_xmitbuf_queue;
- /* DBG_871X("+rtw_alloc_xmitbuf\n"); */
-
spin_lock_irqsave(&pfree_xmitbuf_queue->lock, irqL);
if (list_empty(&pfree_xmitbuf_queue->queue)) {
pxmitbuf = NULL;
} else {
-
phead = get_list_head(pfree_xmitbuf_queue);
plist = get_next(phead);
pxmitbuf = LIST_CONTAINOR(plist, struct xmit_buf, list);
- list_del_init(&(pxmitbuf->list));
+ list_del_init(&pxmitbuf->list);
}
if (pxmitbuf) {
@@ -1768,7 +1638,6 @@ struct xmit_buf *rtw_alloc_xmitbuf(struct xmit_priv *pxmitpriv)
#ifdef DBG_XMIT_BUF
DBG_871X("DBG_XMIT_BUF ALLOC no =%d, free_xmitbuf_cnt =%d\n", pxmitbuf->no, pxmitpriv->free_xmitbuf_cnt);
#endif
- /* DBG_871X("alloc, free_xmitbuf_cnt =%d\n", pxmitpriv->free_xmitbuf_cnt); */
pxmitbuf->priv_data = NULL;
@@ -1797,8 +1666,6 @@ s32 rtw_free_xmitbuf(struct xmit_priv *pxmitpriv, struct xmit_buf *pxmitbuf)
_irqL irqL;
struct __queue *pfree_xmitbuf_queue = &pxmitpriv->free_xmitbuf_queue;
- /* DBG_871X("+rtw_free_xmitbuf\n"); */
-
if (!pxmitbuf)
return _FAIL;
@@ -1815,10 +1682,10 @@ s32 rtw_free_xmitbuf(struct xmit_priv *pxmitpriv, struct xmit_buf *pxmitbuf)
list_del_init(&pxmitbuf->list);
- list_add_tail(&(pxmitbuf->list), get_list_head(pfree_xmitbuf_queue));
+ list_add_tail(&pxmitbuf->list,
+ get_list_head(pfree_xmitbuf_queue));
pxmitpriv->free_xmitbuf_cnt++;
- /* DBG_871X("FREE, free_xmitbuf_cnt =%d\n", pxmitpriv->free_xmitbuf_cnt); */
#ifdef DBG_XMIT_BUF
DBG_871X("DBG_XMIT_BUF FREE no =%d, free_xmitbuf_cnt =%d\n", pxmitbuf->no, pxmitpriv->free_xmitbuf_cnt);
#endif
@@ -1834,7 +1701,6 @@ static void rtw_init_xmitframe(struct xmit_frame *pxframe)
pxframe->pxmitbuf = NULL;
memset(&pxframe->attrib, 0, sizeof(struct pkt_attrib));
- /* pxframe->attrib.psta = NULL; */
pxframe->frame_tag = DATA_FRAMETAG;
@@ -1879,7 +1745,7 @@ struct xmit_frame *rtw_alloc_xmitframe(struct xmit_priv *pxmitpriv)/* _queue *pf
pxframe = LIST_CONTAINOR(plist, struct xmit_frame, list);
- list_del_init(&(pxframe->list));
+ list_del_init(&pxframe->list);
pxmitpriv->free_xmitframe_cnt--;
RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("rtw_alloc_xmitframe():free_xmitframe_cnt =%d\n", pxmitpriv->free_xmitframe_cnt));
}
@@ -1906,7 +1772,7 @@ struct xmit_frame *rtw_alloc_xmitframe_ext(struct xmit_priv *pxmitpriv)
plist = get_next(phead);
pxframe = LIST_CONTAINOR(plist, struct xmit_frame, list);
- list_del_init(&(pxframe->list));
+ list_del_init(&pxframe->list);
pxmitpriv->free_xframe_ext_cnt--;
RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("rtw_alloc_xmitframe_ext():free_xmitframe_cnt =%d\n", pxmitpriv->free_xframe_ext_cnt));
}
@@ -1974,7 +1840,6 @@ s32 rtw_free_xmitframe(struct xmit_priv *pxmitpriv, struct xmit_frame *pxmitfram
else if (pxmitframe->ext_tag == 1)
queue = &pxmitpriv->free_xframe_ext_queue;
else {
-
}
spin_lock_bh(&queue->lock);
@@ -2006,21 +1871,19 @@ void rtw_free_xmitframe_queue(struct xmit_priv *pxmitpriv, struct __queue *pfram
struct list_head *plist, *phead;
struct xmit_frame *pxmitframe;
- spin_lock_bh(&(pframequeue->lock));
+ spin_lock_bh(&pframequeue->lock);
phead = get_list_head(pframequeue);
plist = get_next(phead);
while (phead != plist) {
-
pxmitframe = LIST_CONTAINOR(plist, struct xmit_frame, list);
plist = get_next(plist);
rtw_free_xmitframe(pxmitpriv, pxmitframe);
-
}
- spin_unlock_bh(&(pframequeue->lock));
+ spin_unlock_bh(&pframequeue->lock);
}
s32 rtw_xmitframe_enqueue(struct adapter *padapter, struct xmit_frame *pxmitframe)
@@ -2029,7 +1892,6 @@ s32 rtw_xmitframe_enqueue(struct adapter *padapter, struct xmit_frame *pxmitfram
if (rtw_xmit_classifier(padapter, pxmitframe) == _FAIL) {
RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_,
("rtw_xmitframe_enqueue: drop xmit pkt for classifier fail\n"));
-/* pxmitframe->pkt = NULL; */
return _FAIL;
}
@@ -2043,21 +1905,21 @@ struct tx_servq *rtw_get_sta_pending(struct adapter *padapter, struct sta_info *
switch (up) {
case 1:
case 2:
- ptxservq = &(psta->sta_xmitpriv.bk_q);
+ ptxservq = &psta->sta_xmitpriv.bk_q;
*(ac) = 3;
RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("rtw_get_sta_pending : BK\n"));
break;
case 4:
case 5:
- ptxservq = &(psta->sta_xmitpriv.vi_q);
+ ptxservq = &psta->sta_xmitpriv.vi_q;
*(ac) = 1;
RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("rtw_get_sta_pending : VI\n"));
break;
case 6:
case 7:
- ptxservq = &(psta->sta_xmitpriv.vo_q);
+ ptxservq = &psta->sta_xmitpriv.vo_q;
*(ac) = 0;
RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("rtw_get_sta_pending : VO\n"));
break;
@@ -2065,11 +1927,10 @@ struct tx_servq *rtw_get_sta_pending(struct adapter *padapter, struct sta_info *
case 0:
case 3:
default:
- ptxservq = &(psta->sta_xmitpriv.be_q);
+ ptxservq = &psta->sta_xmitpriv.be_q;
*(ac) = 2;
RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("rtw_get_sta_pending : BE\n"));
break;
-
}
return ptxservq;
@@ -2081,7 +1942,6 @@ struct tx_servq *rtw_get_sta_pending(struct adapter *padapter, struct sta_info *
*/
s32 rtw_xmit_classifier(struct adapter *padapter, struct xmit_frame *pxmitframe)
{
- /* _irqL irqL0; */
u8 ac_index;
struct sta_info *psta;
struct tx_servq *ptxservq;
@@ -2091,15 +1951,6 @@ s32 rtw_xmit_classifier(struct adapter *padapter, struct xmit_frame *pxmitframe)
DBG_COUNTER(padapter->tx_logs.core_tx_enqueue_class);
-/*
- if (pattrib->psta) {
- psta = pattrib->psta;
- } else {
- DBG_871X("%s, call rtw_get_stainfo()\n", __func__);
- psta = rtw_get_stainfo(pstapriv, pattrib->ra);
- }
-*/
-
psta = rtw_get_stainfo(&padapter->stapriv, pattrib->ra);
if (pattrib->psta != psta) {
DBG_COUNTER(padapter->tx_logs.core_tx_enqueue_class_err_sta);
@@ -2123,22 +1974,13 @@ s32 rtw_xmit_classifier(struct adapter *padapter, struct xmit_frame *pxmitframe)
ptxservq = rtw_get_sta_pending(padapter, psta, pattrib->priority, (u8 *)(&ac_index));
- /* spin_lock_irqsave(&pstapending->lock, irqL0); */
-
- if (list_empty(&ptxservq->tx_pending)) {
+ if (list_empty(&ptxservq->tx_pending))
list_add_tail(&ptxservq->tx_pending, get_list_head(phwxmits[ac_index].sta_queue));
- }
-
- /* spin_lock_irqsave(&ptxservq->sta_pending.lock, irqL1); */
list_add_tail(&pxmitframe->list, get_list_head(&ptxservq->sta_pending));
ptxservq->qcnt++;
phwxmits[ac_index].accnt++;
- /* spin_unlock_irqrestore(&ptxservq->sta_pending.lock, irqL1); */
-
- /* spin_unlock_irqrestore(&pstapending->lock, irqL0); */
-
exit:
return res;
@@ -2161,45 +2003,24 @@ s32 rtw_alloc_hwxmits(struct adapter *padapter)
hwxmits = pxmitpriv->hwxmits;
if (pxmitpriv->hwxmit_entry == 5) {
- /* pxmitpriv->bmc_txqueue.head = 0; */
- /* hwxmits[0] .phwtxqueue = &pxmitpriv->bmc_txqueue; */
hwxmits[0] .sta_queue = &pxmitpriv->bm_pending;
- /* pxmitpriv->vo_txqueue.head = 0; */
- /* hwxmits[1] .phwtxqueue = &pxmitpriv->vo_txqueue; */
hwxmits[1] .sta_queue = &pxmitpriv->vo_pending;
- /* pxmitpriv->vi_txqueue.head = 0; */
- /* hwxmits[2] .phwtxqueue = &pxmitpriv->vi_txqueue; */
hwxmits[2] .sta_queue = &pxmitpriv->vi_pending;
- /* pxmitpriv->bk_txqueue.head = 0; */
- /* hwxmits[3] .phwtxqueue = &pxmitpriv->bk_txqueue; */
hwxmits[3] .sta_queue = &pxmitpriv->bk_pending;
- /* pxmitpriv->be_txqueue.head = 0; */
- /* hwxmits[4] .phwtxqueue = &pxmitpriv->be_txqueue; */
hwxmits[4] .sta_queue = &pxmitpriv->be_pending;
-
} else if (pxmitpriv->hwxmit_entry == 4) {
-
- /* pxmitpriv->vo_txqueue.head = 0; */
- /* hwxmits[0] .phwtxqueue = &pxmitpriv->vo_txqueue; */
hwxmits[0] .sta_queue = &pxmitpriv->vo_pending;
- /* pxmitpriv->vi_txqueue.head = 0; */
- /* hwxmits[1] .phwtxqueue = &pxmitpriv->vi_txqueue; */
hwxmits[1] .sta_queue = &pxmitpriv->vi_pending;
- /* pxmitpriv->be_txqueue.head = 0; */
- /* hwxmits[2] .phwtxqueue = &pxmitpriv->be_txqueue; */
hwxmits[2] .sta_queue = &pxmitpriv->be_pending;
- /* pxmitpriv->bk_txqueue.head = 0; */
- /* hwxmits[3] .phwtxqueue = &pxmitpriv->bk_txqueue; */
hwxmits[3] .sta_queue = &pxmitpriv->bk_pending;
} else {
-
}
return _SUCCESS;
@@ -2207,24 +2028,17 @@ s32 rtw_alloc_hwxmits(struct adapter *padapter)
void rtw_free_hwxmits(struct adapter *padapter)
{
- struct hw_xmit *hwxmits;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
- hwxmits = pxmitpriv->hwxmits;
- if (hwxmits)
- kfree((u8 *)hwxmits);
+ kfree(pxmitpriv->hwxmits);
}
void rtw_init_hwxmits(struct hw_xmit *phwxmit, sint entry)
{
sint i;
- for (i = 0; i < entry; i++, phwxmit++) {
- /* spin_lock_init(&phwxmit->xmit_lock); */
- /* INIT_LIST_HEAD(&phwxmit->pending); */
- /* phwxmit->txcmdcnt = 0; */
+ for (i = 0; i < entry; i++, phwxmit++)
phwxmit->accnt = 0;
- }
}
u32 rtw_get_ff_hwaddr(struct xmit_frame *pxmitframe)
@@ -2259,11 +2073,9 @@ u32 rtw_get_ff_hwaddr(struct xmit_frame *pxmitframe)
default:
addr = MGT_QUEUE_INX;
break;
-
}
return addr;
-
}
static void do_queue_select(struct adapter *padapter, struct pkt_attrib *pattrib)
@@ -2310,7 +2122,7 @@ s32 rtw_xmit(struct adapter *padapter, _pkt **ppkt)
if (!pxmitframe) {
drop_cnt++;
- RT_TRACE(_module_xmit_osdep_c_, _drv_err_, ("rtw_xmit: no more pxmitframe\n"));
+ RT_TRACE(_module_xmit_osdep_c_, _drv_err_, ("%s: no more pxmitframe\n", __func__));
DBG_COUNTER(padapter->tx_logs.core_tx_err_pxmitframe);
return -1;
}
@@ -2318,7 +2130,7 @@ s32 rtw_xmit(struct adapter *padapter, _pkt **ppkt)
res = update_attrib(padapter, *ppkt, &pxmitframe->attrib);
if (res == _FAIL) {
- RT_TRACE(_module_xmit_osdep_c_, _drv_err_, ("rtw_xmit: update attrib fail\n"));
+ RT_TRACE(_module_xmit_osdep_c_, _drv_err_, ("%s: update attrib fail\n", __func__));
#ifdef DBG_TX_DROP_FRAME
DBG_871X("DBG_TX_DROP_FRAME %s update attrib fail\n", __func__);
#endif
@@ -2355,7 +2167,6 @@ inline bool xmitframe_hiq_filter(struct xmit_frame *xmitframe)
struct registry_priv *registry = &adapter->registrypriv;
if (registry->hiq_filter == RTW_HIQ_FILTER_ALLOW_SPECIAL) {
-
struct pkt_attrib *attrib = &xmitframe->attrib;
if (attrib->ether_type == 0x0806
@@ -2389,17 +2200,6 @@ sint xmitframe_enqueue_for_sleeping_sta(struct adapter *padapter, struct xmit_fr
DBG_COUNTER(padapter->tx_logs.core_tx_ap_enqueue_warn_fwstate);
return ret;
}
-/*
- if (pattrib->psta)
- {
- psta = pattrib->psta;
- }
- else
- {
- DBG_871X("%s, call rtw_get_stainfo()\n", __func__);
- psta =rtw_get_stainfo(pstapriv, pattrib->ra);
- }
-*/
psta = rtw_get_stainfo(&padapter->stapriv, pattrib->ra);
if (pattrib->psta != psta) {
DBG_COUNTER(padapter->tx_logs.core_tx_ap_enqueue_warn_sta);
@@ -2421,16 +2221,13 @@ sint xmitframe_enqueue_for_sleeping_sta(struct adapter *padapter, struct xmit_fr
if (pattrib->triggered == 1) {
DBG_COUNTER(padapter->tx_logs.core_tx_ap_enqueue_warn_trigger);
- /* DBG_871X("directly xmit pspoll_triggered packet\n"); */
- /* pattrib->triggered = 0; */
if (bmcst && xmitframe_hiq_filter(pxmitframe))
pattrib->qsel = 0x11;/* HIQ */
return ret;
}
-
if (bmcst) {
spin_lock_bh(&psta->sleep_q.lock);
@@ -2439,8 +2236,6 @@ sint xmitframe_enqueue_for_sleeping_sta(struct adapter *padapter, struct xmit_fr
list_del_init(&pxmitframe->list);
- /* spin_lock_bh(&psta->sleep_q.lock); */
-
list_add_tail(&pxmitframe->list, get_list_head(&psta->sleep_q));
psta->sleepq_len++;
@@ -2448,32 +2243,24 @@ sint xmitframe_enqueue_for_sleeping_sta(struct adapter *padapter, struct xmit_fr
if (!(pstapriv->tim_bitmap & BIT(0)))
update_tim = true;
- pstapriv->tim_bitmap |= BIT(0);/* */
+ pstapriv->tim_bitmap |= BIT(0);
pstapriv->sta_dz_bitmap |= BIT(0);
- /* DBG_871X("enqueue, sq_len =%d, tim =%x\n", psta->sleepq_len, pstapriv->tim_bitmap); */
-
- if (update_tim) {
+ if (update_tim)
update_beacon(padapter, _TIM_IE_, NULL, true);
- } else {
+ else
chk_bmc_sleepq_cmd(padapter);
- }
-
- /* spin_unlock_bh(&psta->sleep_q.lock); */
ret = true;
DBG_COUNTER(padapter->tx_logs.core_tx_ap_enqueue_mcast);
-
}
spin_unlock_bh(&psta->sleep_q.lock);
return ret;
-
}
-
spin_lock_bh(&psta->sleep_q.lock);
if (psta->state&WIFI_SLEEP_STATE) {
@@ -2482,8 +2269,6 @@ sint xmitframe_enqueue_for_sleeping_sta(struct adapter *padapter, struct xmit_fr
if (pstapriv->sta_dz_bitmap & BIT(psta->aid)) {
list_del_init(&pxmitframe->list);
- /* spin_lock_bh(&psta->sleep_q.lock); */
-
list_add_tail(&pxmitframe->list, get_list_head(&psta->sleep_q));
psta->sleepq_len++;
@@ -2517,32 +2302,20 @@ sint xmitframe_enqueue_for_sleeping_sta(struct adapter *padapter, struct xmit_fr
pstapriv->tim_bitmap |= BIT(psta->aid);
- /* DBG_871X("enqueue, sq_len =%d, tim =%x\n", psta->sleepq_len, pstapriv->tim_bitmap); */
-
if (update_tim)
- /* DBG_871X("sleepq_len == 1, update BCNTIM\n"); */
/* upate BCN for TIM IE */
update_beacon(padapter, _TIM_IE_, NULL, true);
}
- /* spin_unlock_bh(&psta->sleep_q.lock); */
-
- /* if (psta->sleepq_len > (NR_XMITFRAME>>3)) */
- /* */
- /* wakeup_sta_to_xmit(padapter, psta); */
- /* */
-
ret = true;
DBG_COUNTER(padapter->tx_logs.core_tx_ap_enqueue_ucast);
}
-
}
spin_unlock_bh(&psta->sleep_q.lock);
return ret;
-
}
static void dequeue_xmitframes_to_sleeping_queue(struct adapter *padapter, struct sta_info *psta, struct __queue *pframequeue)
@@ -2575,11 +2348,8 @@ static void dequeue_xmitframes_to_sleeping_queue(struct adapter *padapter, struc
ptxservq->qcnt--;
phwxmits[ac_index].accnt--;
} else {
- /* DBG_871X("xmitframe_enqueue_for_sleeping_sta return false\n"); */
}
-
}
-
}
void stop_sta_xmit(struct adapter *padapter, struct sta_info *psta)
@@ -2594,34 +2364,28 @@ void stop_sta_xmit(struct adapter *padapter, struct sta_info *psta)
/* for BC/MC Frames */
psta_bmc = rtw_get_bcmc_stainfo(padapter);
-
spin_lock_bh(&pxmitpriv->lock);
psta->state |= WIFI_SLEEP_STATE;
pstapriv->sta_dz_bitmap |= BIT(psta->aid);
-
-
dequeue_xmitframes_to_sleeping_queue(padapter, psta, &pstaxmitpriv->vo_q.sta_pending);
- list_del_init(&(pstaxmitpriv->vo_q.tx_pending));
-
+ list_del_init(&pstaxmitpriv->vo_q.tx_pending);
dequeue_xmitframes_to_sleeping_queue(padapter, psta, &pstaxmitpriv->vi_q.sta_pending);
- list_del_init(&(pstaxmitpriv->vi_q.tx_pending));
-
+ list_del_init(&pstaxmitpriv->vi_q.tx_pending);
dequeue_xmitframes_to_sleeping_queue(padapter, psta, &pstaxmitpriv->be_q.sta_pending);
- list_del_init(&(pstaxmitpriv->be_q.tx_pending));
-
+ list_del_init(&pstaxmitpriv->be_q.tx_pending);
dequeue_xmitframes_to_sleeping_queue(padapter, psta, &pstaxmitpriv->bk_q.sta_pending);
- list_del_init(&(pstaxmitpriv->bk_q.tx_pending));
+ list_del_init(&pstaxmitpriv->bk_q.tx_pending);
/* for BC/MC Frames */
pstaxmitpriv = &psta_bmc->sta_xmitpriv;
dequeue_xmitframes_to_sleeping_queue(padapter, psta_bmc, &pstaxmitpriv->be_q.sta_pending);
- list_del_init(&(pstaxmitpriv->be_q.tx_pending));
+ list_del_init(&pstaxmitpriv->be_q.tx_pending);
spin_unlock_bh(&pxmitpriv->lock);
}
@@ -2637,8 +2401,6 @@ void wakeup_sta_to_xmit(struct adapter *padapter, struct sta_info *psta)
psta_bmc = rtw_get_bcmc_stainfo(padapter);
-
- /* spin_lock_bh(&psta->sleep_q.lock); */
spin_lock_bh(&pxmitpriv->lock);
xmitframe_phead = get_list_head(&psta->sleep_q);
@@ -2690,26 +2452,12 @@ void wakeup_sta_to_xmit(struct adapter *padapter, struct sta_info *psta)
pxmitframe->attrib.triggered = 1;
-/*
- spin_unlock_bh(&psta->sleep_q.lock);
- if (rtw_hal_xmit(padapter, pxmitframe) == true)
- {
- rtw_os_xmit_complete(padapter, pxmitframe);
- }
- spin_lock_bh(&psta->sleep_q.lock);
-*/
rtw_hal_xmitframe_enqueue(padapter, pxmitframe);
-
-
}
if (psta->sleepq_len == 0) {
- if (pstapriv->tim_bitmap & BIT(psta->aid)) {
- /* DBG_871X("wakeup to xmit, qlen == 0, update_BCNTIM, tim =%x\n", pstapriv->tim_bitmap); */
- /* upate BCN for TIM IE */
- /* update_BCNTIM(padapter); */
+ if (pstapriv->tim_bitmap & BIT(psta->aid))
update_mask = BIT(0);
- }
pstapriv->tim_bitmap &= ~BIT(psta->aid);
@@ -2746,44 +2494,25 @@ void wakeup_sta_to_xmit(struct adapter *padapter, struct sta_info *psta)
else
pxmitframe->attrib.mdata = 0;
-
pxmitframe->attrib.triggered = 1;
-/*
- spin_unlock_bh(&psta_bmc->sleep_q.lock);
- if (rtw_hal_xmit(padapter, pxmitframe) == true)
- {
- rtw_os_xmit_complete(padapter, pxmitframe);
- }
- spin_lock_bh(&psta_bmc->sleep_q.lock);
-
-*/
rtw_hal_xmitframe_enqueue(padapter, pxmitframe);
-
}
if (psta_bmc->sleepq_len == 0) {
- if (pstapriv->tim_bitmap & BIT(0)) {
- /* DBG_871X("wakeup to xmit, qlen == 0, update_BCNTIM, tim =%x\n", pstapriv->tim_bitmap); */
- /* upate BCN for TIM IE */
- /* update_BCNTIM(padapter); */
+ if (pstapriv->tim_bitmap & BIT(0))
update_mask |= BIT(1);
- }
+
pstapriv->tim_bitmap &= ~BIT(0);
pstapriv->sta_dz_bitmap &= ~BIT(0);
}
-
}
_exit:
- /* spin_unlock_bh(&psta_bmc->sleep_q.lock); */
spin_unlock_bh(&pxmitpriv->lock);
if (update_mask)
- /* update_BCNTIM(padapter); */
- /* printk("%s => call update_beacon\n", __func__); */
update_beacon(padapter, _TIM_IE_, NULL, true);
-
}
void xmit_delivery_enabled_frames(struct adapter *padapter, struct sta_info *psta)
@@ -2794,8 +2523,6 @@ void xmit_delivery_enabled_frames(struct adapter *padapter, struct sta_info *pst
struct sta_priv *pstapriv = &padapter->stapriv;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
-
- /* spin_lock_bh(&psta->sleep_q.lock); */
spin_lock_bh(&pxmitpriv->lock);
xmitframe_phead = get_list_head(&psta->sleep_q);
@@ -2848,16 +2575,10 @@ void xmit_delivery_enabled_frames(struct adapter *padapter, struct sta_info *pst
if ((psta->sleepq_ac_len == 0) && (!psta->has_legacy_ac) && (wmmps_ac)) {
pstapriv->tim_bitmap &= ~BIT(psta->aid);
- /* DBG_871X("wakeup to xmit, qlen == 0, update_BCNTIM, tim =%x\n", pstapriv->tim_bitmap); */
- /* upate BCN for TIM IE */
- /* update_BCNTIM(padapter); */
update_beacon(padapter, _TIM_IE_, NULL, true);
- /* update_mask = BIT(0); */
}
-
}
- /* spin_unlock_bh(&psta->sleep_q.lock); */
spin_unlock_bh(&pxmitpriv->lock);
}
@@ -2875,7 +2596,7 @@ void enqueue_pending_xmitbuf(
list_add_tail(&pxmitbuf->list, get_list_head(pqueue));
spin_unlock_bh(&pqueue->lock);
- complete(&(pri_adapter->xmitpriv.xmit_comp));
+ complete(&pri_adapter->xmitpriv.xmit_comp);
}
void enqueue_pending_xmitbuf_to_head(
@@ -2898,7 +2619,6 @@ struct xmit_buf *dequeue_pending_xmitbuf(
struct xmit_buf *pxmitbuf;
struct __queue *pqueue;
-
pxmitbuf = NULL;
pqueue = &pxmitpriv->pending_xmitbuf_queue;
@@ -2924,7 +2644,6 @@ struct xmit_buf *dequeue_pending_xmitbuf_under_survey(
struct xmit_buf *pxmitbuf;
struct __queue *pqueue;
-
pxmitbuf = NULL;
pqueue = &pxmitpriv->pending_xmitbuf_queue;
@@ -2983,7 +2702,6 @@ int rtw_xmit_thread(void *context)
s32 err;
struct adapter *padapter;
-
err = _SUCCESS;
padapter = context;
@@ -2992,7 +2710,7 @@ int rtw_xmit_thread(void *context)
do {
err = rtw_hal_xmit_thread_handler(padapter);
flush_signals_thread();
- } while (_SUCCESS == err);
+ } while (err == _SUCCESS);
complete(&padapter->xmitpriv.terminate_xmitthread_comp);
@@ -3022,9 +2740,8 @@ int rtw_sctx_wait(struct submit_ctx *sctx, const char *msg)
status = sctx->status;
}
- if (status == RTW_SCTX_DONE_SUCCESS) {
+ if (status == RTW_SCTX_DONE_SUCCESS)
ret = _SUCCESS;
- }
return ret;
}
@@ -3075,9 +2792,8 @@ void rtw_ack_tx_done(struct xmit_priv *pxmitpriv, int status)
{
struct submit_ctx *pack_tx_ops = &pxmitpriv->ack_tx_ops;
- if (pxmitpriv->ack_tx) {
+ if (pxmitpriv->ack_tx)
rtw_sctx_done_err(&pack_tx_ops, status);
- } else {
+ else
DBG_871X("%s ack_tx not set\n", __func__);
- }
}
diff --git a/drivers/staging/rtl8723bs/hal/HalPhyRf_8723B.c b/drivers/staging/rtl8723bs/hal/HalPhyRf_8723B.c
index 3239d37087c8..1ca9063a269f 100644
--- a/drivers/staging/rtl8723bs/hal/HalPhyRf_8723B.c
+++ b/drivers/staging/rtl8723bs/hal/HalPhyRf_8723B.c
@@ -402,8 +402,6 @@ static void GetDeltaSwingTable_8723B(
*TemperatureUP_B = (u8 *)DeltaSwingTableIdx_2GA_P_8188E;
*TemperatureDOWN_B = (u8 *)DeltaSwingTableIdx_2GA_N_8188E;
}
-
- return;
}
diff --git a/drivers/staging/rtl8723bs/hal/hal_btcoex.c b/drivers/staging/rtl8723bs/hal/hal_btcoex.c
index 6e4a1fcb8790..d5793e4614bf 100644
--- a/drivers/staging/rtl8723bs/hal/hal_btcoex.c
+++ b/drivers/staging/rtl8723bs/hal/hal_btcoex.c
@@ -1315,7 +1315,7 @@ void EXhalbtcoutsrc_DisplayBtCoexInfo(PBTC_COEXIST pBtCoexist)
/*
* Description:
- *Run BT-Coexist mechansim or not
+ *Run BT-Coexist mechanism or not
*
*/
void hal_btcoex_SetBTCoexist(struct adapter *padapter, u8 bBtExist)
diff --git a/drivers/staging/rtl8723bs/hal/hal_com.c b/drivers/staging/rtl8723bs/hal/hal_com.c
index eddd56abbb2d..109bd85b0cd8 100644
--- a/drivers/staging/rtl8723bs/hal/hal_com.c
+++ b/drivers/staging/rtl8723bs/hal/hal_com.c
@@ -30,7 +30,6 @@ void rtw_hal_data_deinit(struct adapter *padapter)
{
if (is_primary_adapter(padapter)) { /* if (padapter->isprimary) */
if (padapter->HalData) {
- phy_free_filebuf(padapter);
vfree(padapter->HalData);
padapter->HalData = NULL;
padapter->hal_data_sz = 0;
diff --git a/drivers/staging/rtl8723bs/hal/hal_com_phycfg.c b/drivers/staging/rtl8723bs/hal/hal_com_phycfg.c
index 6539bee9b5ba..eb7de3617d83 100644
--- a/drivers/staging/rtl8723bs/hal/hal_com_phycfg.c
+++ b/drivers/staging/rtl8723bs/hal/hal_com_phycfg.c
@@ -2202,1079 +2202,3 @@ void Hal_ChannelPlanToRegulation(struct adapter *Adapter, u16 ChannelPlan)
break;
}
}
-
-
-static char file_path_bs[PATH_MAX];
-
-#define GetLineFromBuffer(buffer) strsep(&buffer, "\n")
-
-int phy_ConfigMACWithParaFile(struct adapter *Adapter, char *pFileName)
-{
- struct hal_com_data *pHalData = GET_HAL_DATA(Adapter);
- int rlen = 0, rtStatus = _FAIL;
- char *szLine, *ptmp;
- u32 u4bRegOffset, u4bRegValue, u4bMove;
-
- if (!(Adapter->registrypriv.load_phy_file & LOAD_MAC_PARA_FILE))
- return rtStatus;
-
- memset(pHalData->para_file_buf, 0, MAX_PARA_FILE_BUF_LEN);
-
- if ((pHalData->mac_reg_len == 0) && !pHalData->mac_reg) {
- rtw_merge_string(file_path_bs, PATH_MAX, rtw_phy_file_path, pFileName);
-
- if (rtw_is_file_readable(file_path_bs) == true) {
- rlen = rtw_retrive_from_file(file_path_bs, pHalData->para_file_buf, MAX_PARA_FILE_BUF_LEN);
- if (rlen > 0) {
- rtStatus = _SUCCESS;
- pHalData->mac_reg = vzalloc(rlen);
- if (pHalData->mac_reg) {
- memcpy(pHalData->mac_reg, pHalData->para_file_buf, rlen);
- pHalData->mac_reg_len = rlen;
- } else
- DBG_871X("%s mac_reg alloc fail !\n", __func__);
- }
- }
- } else {
- if ((pHalData->mac_reg_len != 0) && (pHalData->mac_reg != NULL)) {
- memcpy(pHalData->para_file_buf, pHalData->mac_reg, pHalData->mac_reg_len);
- rtStatus = _SUCCESS;
- } else
- DBG_871X("%s(): Critical Error !!!\n", __func__);
- }
-
- if (rtStatus == _SUCCESS) {
- ptmp = pHalData->para_file_buf;
- for (szLine = GetLineFromBuffer(ptmp); szLine != NULL; szLine = GetLineFromBuffer(ptmp)) {
- if (!IsCommentString(szLine)) {
- /* Get 1st hex value as register offset */
- if (GetHexValueFromString(szLine, &u4bRegOffset, &u4bMove)) {
- if (u4bRegOffset == 0xffff) /* Ending. */
- break;
-
- /* Get 2nd hex value as register value. */
- szLine += u4bMove;
- if (GetHexValueFromString(szLine, &u4bRegValue, &u4bMove))
- rtw_write8(Adapter, u4bRegOffset, (u8)u4bRegValue);
- }
- }
- }
- } else
- DBG_871X("%s(): No File %s, Load from HWImg Array!\n", __func__, pFileName);
-
- return rtStatus;
-}
-
-int phy_ConfigBBWithParaFile(
- struct adapter *Adapter, char *pFileName, u32 ConfigType
-)
-{
- struct hal_com_data *pHalData = GET_HAL_DATA(Adapter);
- int rlen = 0, rtStatus = _FAIL;
- char *szLine, *ptmp;
- u32 u4bRegOffset, u4bRegValue, u4bMove;
- char *pBuf = NULL;
- u32 *pBufLen = NULL;
-
- if (!(Adapter->registrypriv.load_phy_file & LOAD_BB_PARA_FILE))
- return rtStatus;
-
- switch (ConfigType) {
- case CONFIG_BB_PHY_REG:
- pBuf = pHalData->bb_phy_reg;
- pBufLen = &pHalData->bb_phy_reg_len;
- break;
- case CONFIG_BB_AGC_TAB:
- pBuf = pHalData->bb_agc_tab;
- pBufLen = &pHalData->bb_agc_tab_len;
- break;
- default:
- DBG_871X("Unknown ConfigType!! %d\r\n", ConfigType);
- break;
- }
-
- memset(pHalData->para_file_buf, 0, MAX_PARA_FILE_BUF_LEN);
-
- if (pBufLen && (*pBufLen == 0) && !pBuf) {
- rtw_merge_string(file_path_bs, PATH_MAX, rtw_phy_file_path, pFileName);
-
- if (rtw_is_file_readable(file_path_bs) == true) {
- rlen = rtw_retrive_from_file(file_path_bs, pHalData->para_file_buf, MAX_PARA_FILE_BUF_LEN);
- if (rlen > 0) {
- rtStatus = _SUCCESS;
- pBuf = vzalloc(rlen);
- if (pBuf) {
- memcpy(pBuf, pHalData->para_file_buf, rlen);
- *pBufLen = rlen;
-
- switch (ConfigType) {
- case CONFIG_BB_PHY_REG:
- pHalData->bb_phy_reg = pBuf;
- break;
- case CONFIG_BB_AGC_TAB:
- pHalData->bb_agc_tab = pBuf;
- break;
- }
- } else
- DBG_871X("%s(): ConfigType %d alloc fail !\n", __func__, ConfigType);
- }
- }
- } else {
- if (pBufLen && (*pBufLen == 0) && !pBuf) {
- memcpy(pHalData->para_file_buf, pBuf, *pBufLen);
- rtStatus = _SUCCESS;
- } else
- DBG_871X("%s(): Critical Error !!!\n", __func__);
- }
-
- if (rtStatus == _SUCCESS) {
- ptmp = pHalData->para_file_buf;
- for (szLine = GetLineFromBuffer(ptmp); szLine != NULL; szLine = GetLineFromBuffer(ptmp)) {
- if (!IsCommentString(szLine)) {
- /* Get 1st hex value as register offset. */
- if (GetHexValueFromString(szLine, &u4bRegOffset, &u4bMove)) {
- if (u4bRegOffset == 0xffff) /* Ending. */
- break;
- else if (u4bRegOffset == 0xfe || u4bRegOffset == 0xffe)
- msleep(50);
- else if (u4bRegOffset == 0xfd)
- mdelay(5);
- else if (u4bRegOffset == 0xfc)
- mdelay(1);
- else if (u4bRegOffset == 0xfb)
- udelay(50);
- else if (u4bRegOffset == 0xfa)
- udelay(5);
- else if (u4bRegOffset == 0xf9)
- udelay(1);
-
- /* Get 2nd hex value as register value. */
- szLine += u4bMove;
- if (GetHexValueFromString(szLine, &u4bRegValue, &u4bMove)) {
- /* DBG_871X("[BB-ADDR]%03lX =%08lX\n", u4bRegOffset, u4bRegValue); */
- PHY_SetBBReg(Adapter, u4bRegOffset, bMaskDWord, u4bRegValue);
-
- if (u4bRegOffset == 0xa24)
- pHalData->odmpriv.RFCalibrateInfo.RegA24 = u4bRegValue;
-
- /* Add 1us delay between BB/RF register setting. */
- udelay(1);
- }
- }
- }
- }
- } else
- DBG_871X("%s(): No File %s, Load from HWImg Array!\n", __func__, pFileName);
-
- return rtStatus;
-}
-
-static void phy_DecryptBBPgParaFile(struct adapter *Adapter, char *buffer)
-{
- u32 i = 0, j = 0;
- u8 map[95] = {0};
- u8 currentChar;
- char *BufOfLines, *ptmp;
-
- /* DBG_871X("=====>phy_DecryptBBPgParaFile()\n"); */
- /* 32 the ascii code of the first visable char, 126 the last one */
- for (i = 0; i < 95; ++i)
- map[i] = (u8) (94 - i);
-
- ptmp = buffer;
- i = 0;
- for (BufOfLines = GetLineFromBuffer(ptmp); BufOfLines != NULL; BufOfLines = GetLineFromBuffer(ptmp)) {
- /* DBG_871X("Encrypted Line: %s\n", BufOfLines); */
-
- for (j = 0; j < strlen(BufOfLines); ++j) {
- currentChar = BufOfLines[j];
-
- if (currentChar == '\0')
- break;
-
- currentChar -= (u8) ((((i + j) * 3) % 128));
-
- BufOfLines[j] = map[currentChar - 32] + 32;
- }
- /* DBG_871X("Decrypted Line: %s\n", BufOfLines); */
- if (strlen(BufOfLines) != 0)
- i++;
- BufOfLines[strlen(BufOfLines)] = '\n';
- }
-}
-
-static int phy_ParseBBPgParaFile(struct adapter *Adapter, char *buffer)
-{
- int rtStatus = _SUCCESS;
- struct hal_com_data *pHalData = GET_HAL_DATA(Adapter);
- char *szLine, *ptmp;
- u32 u4bRegOffset, u4bRegMask, u4bRegValue;
- u32 u4bMove;
- bool firstLine = true;
- u8 tx_num = 0;
- u8 band = 0, rf_path = 0;
-
- /* DBG_871X("=====>phy_ParseBBPgParaFile()\n"); */
-
- if (Adapter->registrypriv.RegDecryptCustomFile == 1)
- phy_DecryptBBPgParaFile(Adapter, buffer);
-
- ptmp = buffer;
- for (szLine = GetLineFromBuffer(ptmp); szLine != NULL; szLine = GetLineFromBuffer(ptmp)) {
- if (!IsCommentString(szLine)) {
- if (isAllSpaceOrTab(szLine, sizeof(*szLine)))
- continue;
-
- /* Get header info (relative value or exact value) */
- if (firstLine) {
- if (eqNByte(szLine, (u8 *)("#[v1]"), 5)) {
-
- pHalData->odmpriv.PhyRegPgVersion = szLine[3] - '0';
- /* DBG_871X("This is a new format PHY_REG_PG.txt\n"); */
- } else if (eqNByte(szLine, (u8 *)("#[v0]"), 5)) {
- pHalData->odmpriv.PhyRegPgVersion = szLine[3] - '0';
- /* DBG_871X("This is a old format PHY_REG_PG.txt ok\n"); */
- } else {
- DBG_871X("The format in PHY_REG_PG are invalid %s\n", szLine);
- return _FAIL;
- }
-
- if (eqNByte(szLine + 5, (u8 *)("[Exact]#"), 8)) {
- pHalData->odmpriv.PhyRegPgValueType = PHY_REG_PG_EXACT_VALUE;
- /* DBG_871X("The values in PHY_REG_PG are exact values ok\n"); */
- firstLine = false;
- continue;
- } else if (eqNByte(szLine + 5, (u8 *)("[Relative]#"), 11)) {
- pHalData->odmpriv.PhyRegPgValueType = PHY_REG_PG_RELATIVE_VALUE;
- /* DBG_871X("The values in PHY_REG_PG are relative values ok\n"); */
- firstLine = false;
- continue;
- } else {
- DBG_871X("The values in PHY_REG_PG are invalid %s\n", szLine);
- return _FAIL;
- }
- }
-
- if (pHalData->odmpriv.PhyRegPgVersion == 0) {
- /* Get 1st hex value as register offset. */
- if (GetHexValueFromString(szLine, &u4bRegOffset, &u4bMove)) {
- szLine += u4bMove;
- if (u4bRegOffset == 0xffff) /* Ending. */
- break;
-
- /* Get 2nd hex value as register mask. */
- if (GetHexValueFromString(szLine, &u4bRegMask, &u4bMove))
- szLine += u4bMove;
- else
- return _FAIL;
-
- if (pHalData->odmpriv.PhyRegPgValueType == PHY_REG_PG_RELATIVE_VALUE) {
- /* Get 3rd hex value as register value. */
- if (GetHexValueFromString(szLine, &u4bRegValue, &u4bMove)) {
- PHY_StoreTxPowerByRate(Adapter, 0, 0, 1, u4bRegOffset, u4bRegMask, u4bRegValue);
- /* DBG_871X("[ADDR] %03X =%08X Mask =%08x\n", u4bRegOffset, u4bRegValue, u4bRegMask); */
- } else
- return _FAIL;
- } else if (pHalData->odmpriv.PhyRegPgValueType == PHY_REG_PG_EXACT_VALUE) {
- u32 combineValue = 0;
- u8 integer = 0, fraction = 0;
-
- if (GetFractionValueFromString(szLine, &integer, &fraction, &u4bMove))
- szLine += u4bMove;
- else
- return _FAIL;
-
- integer *= 2;
- if (fraction == 5)
- integer += 1;
- combineValue |= (((integer / 10) << 4) + (integer % 10));
- /* DBG_871X(" %d", integer); */
-
- if (GetFractionValueFromString(szLine, &integer, &fraction, &u4bMove))
- szLine += u4bMove;
- else
- return _FAIL;
-
- integer *= 2;
- if (fraction == 5)
- integer += 1;
- combineValue <<= 8;
- combineValue |= (((integer / 10) << 4) + (integer % 10));
- /* DBG_871X(" %d", integer); */
-
- if (GetFractionValueFromString(szLine, &integer, &fraction, &u4bMove))
- szLine += u4bMove;
- else
- return _FAIL;
-
- integer *= 2;
- if (fraction == 5)
- integer += 1;
- combineValue <<= 8;
- combineValue |= (((integer / 10) << 4) + (integer % 10));
- /* DBG_871X(" %d", integer); */
-
- if (GetFractionValueFromString(szLine, &integer, &fraction, &u4bMove))
- szLine += u4bMove;
- else
- return _FAIL;
-
- integer *= 2;
- if (fraction == 5)
- integer += 1;
- combineValue <<= 8;
- combineValue |= (((integer / 10) << 4) + (integer % 10));
- /* DBG_871X(" %d", integer); */
- PHY_StoreTxPowerByRate(Adapter, 0, 0, 1, u4bRegOffset, u4bRegMask, combineValue);
-
- /* DBG_871X("[ADDR] 0x%3x = 0x%4x\n", u4bRegOffset, combineValue); */
- }
- }
- } else if (pHalData->odmpriv.PhyRegPgVersion > 0) {
- u32 index = 0;
-
- if (eqNByte(szLine, "0xffff", 6))
- break;
-
- if (!eqNByte("#[END]#", szLine, 7)) {
- /* load the table label info */
- if (szLine[0] == '#') {
- index = 0;
- if (eqNByte(szLine, "#[2.4G]", 7)) {
- band = BAND_ON_2_4G;
- index += 8;
- } else if (eqNByte(szLine, "#[5G]", 5)) {
- band = BAND_ON_5G;
- index += 6;
- } else {
- DBG_871X("Invalid band %s in PHY_REG_PG.txt\n", szLine);
- return _FAIL;
- }
-
- rf_path = szLine[index] - 'A';
- /* DBG_871X(" Table label Band %d, RfPath %d\n", band, rf_path); */
- } else { /* load rows of tables */
- if (szLine[1] == '1')
- tx_num = RF_1TX;
- else if (szLine[1] == '2')
- tx_num = RF_2TX;
- else if (szLine[1] == '3')
- tx_num = RF_3TX;
- else if (szLine[1] == '4')
- tx_num = RF_4TX;
- else {
- DBG_871X("Invalid row in PHY_REG_PG.txt %c\n", szLine[1]);
- return _FAIL;
- }
-
- while (szLine[index] != ']')
- ++index;
- ++index;/* skip ] */
-
- /* Get 2nd hex value as register offset. */
- szLine += index;
- if (GetHexValueFromString(szLine, &u4bRegOffset, &u4bMove))
- szLine += u4bMove;
- else
- return _FAIL;
-
- /* Get 2nd hex value as register mask. */
- if (GetHexValueFromString(szLine, &u4bRegMask, &u4bMove))
- szLine += u4bMove;
- else
- return _FAIL;
-
- if (pHalData->odmpriv.PhyRegPgValueType == PHY_REG_PG_RELATIVE_VALUE) {
- /* Get 3rd hex value as register value. */
- if (GetHexValueFromString(szLine, &u4bRegValue, &u4bMove)) {
- PHY_StoreTxPowerByRate(Adapter, band, rf_path, tx_num, u4bRegOffset, u4bRegMask, u4bRegValue);
- /* DBG_871X("[ADDR] %03X (tx_num %d) =%08X Mask =%08x\n", u4bRegOffset, tx_num, u4bRegValue, u4bRegMask); */
- } else
- return _FAIL;
- } else if (pHalData->odmpriv.PhyRegPgValueType == PHY_REG_PG_EXACT_VALUE) {
- u32 combineValue = 0;
- u8 integer = 0, fraction = 0;
-
- if (GetFractionValueFromString(szLine, &integer, &fraction, &u4bMove))
- szLine += u4bMove;
- else
- return _FAIL;
-
- integer *= 2;
- if (fraction == 5)
- integer += 1;
- combineValue |= (((integer / 10) << 4) + (integer % 10));
- /* DBG_871X(" %d", integer); */
-
- if (GetFractionValueFromString(szLine, &integer, &fraction, &u4bMove))
- szLine += u4bMove;
- else
- return _FAIL;
-
- integer *= 2;
- if (fraction == 5)
- integer += 1;
- combineValue <<= 8;
- combineValue |= (((integer / 10) << 4) + (integer % 10));
- /* DBG_871X(" %d", integer); */
-
- if (GetFractionValueFromString(szLine, &integer, &fraction, &u4bMove))
- szLine += u4bMove;
- else
- return _FAIL;
-
- integer *= 2;
- if (fraction == 5)
- integer += 1;
- combineValue <<= 8;
- combineValue |= (((integer / 10) << 4) + (integer % 10));
- /* DBG_871X(" %d", integer); */
-
- if (GetFractionValueFromString(szLine, &integer, &fraction, &u4bMove))
- szLine += u4bMove;
- else
- return _FAIL;
-
- integer *= 2;
- if (fraction == 5)
- integer += 1;
- combineValue <<= 8;
- combineValue |= (((integer / 10) << 4) + (integer % 10));
- /* DBG_871X(" %d", integer); */
- PHY_StoreTxPowerByRate(Adapter, band, rf_path, tx_num, u4bRegOffset, u4bRegMask, combineValue);
-
- /* DBG_871X("[ADDR] 0x%3x (tx_num %d) = 0x%4x\n", u4bRegOffset, tx_num, combineValue); */
- }
- }
- }
- }
- }
- }
- /* DBG_871X("<=====phy_ParseBBPgParaFile()\n"); */
- return rtStatus;
-}
-
-int phy_ConfigBBWithPgParaFile(struct adapter *Adapter, char *pFileName)
-{
- struct hal_com_data *pHalData = GET_HAL_DATA(Adapter);
- int rlen = 0, rtStatus = _FAIL;
-
- if (!(Adapter->registrypriv.load_phy_file & LOAD_BB_PG_PARA_FILE))
- return rtStatus;
-
- memset(pHalData->para_file_buf, 0, MAX_PARA_FILE_BUF_LEN);
-
- if ((pHalData->bb_phy_reg_pg_len == 0) && !pHalData->bb_phy_reg_pg) {
- rtw_merge_string(file_path_bs, PATH_MAX, rtw_phy_file_path, pFileName);
-
- if (rtw_is_file_readable(file_path_bs) == true) {
- rlen = rtw_retrive_from_file(file_path_bs, pHalData->para_file_buf, MAX_PARA_FILE_BUF_LEN);
- if (rlen > 0) {
- rtStatus = _SUCCESS;
- pHalData->bb_phy_reg_pg = vzalloc(rlen);
- if (pHalData->bb_phy_reg_pg) {
- memcpy(pHalData->bb_phy_reg_pg, pHalData->para_file_buf, rlen);
- pHalData->bb_phy_reg_pg_len = rlen;
- } else
- DBG_871X("%s bb_phy_reg_pg alloc fail !\n", __func__);
- }
- }
- } else {
- if ((pHalData->bb_phy_reg_pg_len != 0) && (pHalData->bb_phy_reg_pg != NULL)) {
- memcpy(pHalData->para_file_buf, pHalData->bb_phy_reg_pg, pHalData->bb_phy_reg_pg_len);
- rtStatus = _SUCCESS;
- } else
- DBG_871X("%s(): Critical Error !!!\n", __func__);
- }
-
- if (rtStatus == _SUCCESS) {
- /* DBG_871X("phy_ConfigBBWithPgParaFile(): read %s ok\n", pFileName); */
- phy_ParseBBPgParaFile(Adapter, pHalData->para_file_buf);
- } else
- DBG_871X("%s(): No File %s, Load from HWImg Array!\n", __func__, pFileName);
-
- return rtStatus;
-}
-
-int PHY_ConfigRFWithParaFile(
- struct adapter *Adapter, char *pFileName, u8 eRFPath
-)
-{
- struct hal_com_data *pHalData = GET_HAL_DATA(Adapter);
- int rlen = 0, rtStatus = _FAIL;
- char *szLine, *ptmp;
- u32 u4bRegOffset, u4bRegValue, u4bMove;
- u16 i;
- char *pBuf = NULL;
- u32 *pBufLen = NULL;
-
- if (!(Adapter->registrypriv.load_phy_file & LOAD_RF_PARA_FILE))
- return rtStatus;
-
- switch (eRFPath) {
- case ODM_RF_PATH_A:
- pBuf = pHalData->rf_radio_a;
- pBufLen = &pHalData->rf_radio_a_len;
- break;
- case ODM_RF_PATH_B:
- pBuf = pHalData->rf_radio_b;
- pBufLen = &pHalData->rf_radio_b_len;
- break;
- default:
- DBG_871X("Unknown RF path!! %d\r\n", eRFPath);
- break;
- }
-
- memset(pHalData->para_file_buf, 0, MAX_PARA_FILE_BUF_LEN);
-
- if (pBufLen && (*pBufLen == 0) && !pBuf) {
- rtw_merge_string(file_path_bs, PATH_MAX, rtw_phy_file_path, pFileName);
-
- if (rtw_is_file_readable(file_path_bs) == true) {
- rlen = rtw_retrive_from_file(file_path_bs, pHalData->para_file_buf, MAX_PARA_FILE_BUF_LEN);
- if (rlen > 0) {
- rtStatus = _SUCCESS;
- pBuf = vzalloc(rlen);
- if (pBuf) {
- memcpy(pBuf, pHalData->para_file_buf, rlen);
- *pBufLen = rlen;
-
- switch (eRFPath) {
- case ODM_RF_PATH_A:
- pHalData->rf_radio_a = pBuf;
- break;
- case ODM_RF_PATH_B:
- pHalData->rf_radio_b = pBuf;
- break;
- }
- } else
- DBG_871X("%s(): eRFPath =%d alloc fail !\n", __func__, eRFPath);
- }
- }
- } else {
- if (pBufLen && (*pBufLen == 0) && !pBuf) {
- memcpy(pHalData->para_file_buf, pBuf, *pBufLen);
- rtStatus = _SUCCESS;
- } else
- DBG_871X("%s(): Critical Error !!!\n", __func__);
- }
-
- if (rtStatus == _SUCCESS) {
- /* DBG_871X("%s(): read %s successfully\n", __func__, pFileName); */
-
- ptmp = pHalData->para_file_buf;
- for (szLine = GetLineFromBuffer(ptmp); szLine != NULL; szLine = GetLineFromBuffer(ptmp)) {
- if (!IsCommentString(szLine)) {
- /* Get 1st hex value as register offset. */
- if (GetHexValueFromString(szLine, &u4bRegOffset, &u4bMove)) {
- if (u4bRegOffset == 0xfe || u4bRegOffset == 0xffe) /* Deay specific ms. Only RF configuration require delay. */
- msleep(50);
- else if (u4bRegOffset == 0xfd) {
- /* mdelay(5); */
- for (i = 0; i < 100; i++)
- udelay(MAX_STALL_TIME);
- } else if (u4bRegOffset == 0xfc) {
- /* mdelay(1); */
- for (i = 0; i < 20; i++)
- udelay(MAX_STALL_TIME);
- } else if (u4bRegOffset == 0xfb)
- udelay(50);
- else if (u4bRegOffset == 0xfa)
- udelay(5);
- else if (u4bRegOffset == 0xf9)
- udelay(1);
- else if (u4bRegOffset == 0xffff)
- break;
-
- /* Get 2nd hex value as register value. */
- szLine += u4bMove;
- if (GetHexValueFromString(szLine, &u4bRegValue, &u4bMove)) {
- PHY_SetRFReg(Adapter, eRFPath, u4bRegOffset, bRFRegOffsetMask, u4bRegValue);
-
- /* Temp add, for frequency lock, if no delay, that may cause */
- /* frequency shift, ex: 2412MHz => 2417MHz */
- /* If frequency shift, the following action may works. */
- /* Fractional-N table in radio_a.txt */
- /* 0x2a 0x00001 channel 1 */
- /* 0x2b 0x00808 frequency divider. */
- /* 0x2b 0x53333 */
- /* 0x2c 0x0000c */
- udelay(1);
- }
- }
- }
- }
- } else
- DBG_871X("%s(): No File %s, Load from HWImg Array!\n", __func__, pFileName);
-
- return rtStatus;
-}
-
-static void initDeltaSwingIndexTables(
- struct adapter *Adapter,
- char *Band,
- char *Path,
- char *Sign,
- char *Channel,
- char *Rate,
- char *Data
-)
-{
- #define STR_EQUAL_5G(_band, _path, _sign, _rate, _chnl) \
- ((strcmp(Band, _band) == 0) && (strcmp(Path, _path) == 0) && (strcmp(Sign, _sign) == 0) &&\
- (strcmp(Rate, _rate) == 0) && (strcmp(Channel, _chnl) == 0)\
- )
- #define STR_EQUAL_2G(_band, _path, _sign, _rate) \
- ((strcmp(Band, _band) == 0) && (strcmp(Path, _path) == 0) && (strcmp(Sign, _sign) == 0) &&\
- (strcmp(Rate, _rate) == 0)\
- )
-
- #define STORE_SWING_TABLE(_array, _iteratedIdx) \
- for (token = strsep(&Data, delim); token != NULL; token = strsep(&Data, delim)) {\
- sscanf(token, "%d", &idx);\
- _array[_iteratedIdx++] = (u8)idx;\
- } \
-
- struct hal_com_data *pHalData = GET_HAL_DATA(Adapter);
- PDM_ODM_T pDM_Odm = &pHalData->odmpriv;
- PODM_RF_CAL_T pRFCalibrateInfo = &(pDM_Odm->RFCalibrateInfo);
- u32 j = 0;
- char *token;
- char delim[] = ",";
- u32 idx = 0;
-
- /* DBG_871X("===>initDeltaSwingIndexTables(): Band: %s;\nPath: %s;\nSign: %s;\nChannel: %s;\nRate: %s;\n, Data: %s;\n", */
- /* Band, Path, Sign, Channel, Rate, Data); */
-
- if (STR_EQUAL_2G("2G", "A", "+", "CCK")) {
- STORE_SWING_TABLE(pRFCalibrateInfo->DeltaSwingTableIdx_2GCCKA_P, j);
- } else if (STR_EQUAL_2G("2G", "A", "-", "CCK")) {
- STORE_SWING_TABLE(pRFCalibrateInfo->DeltaSwingTableIdx_2GCCKA_N, j);
- } else if (STR_EQUAL_2G("2G", "B", "+", "CCK")) {
- STORE_SWING_TABLE(pRFCalibrateInfo->DeltaSwingTableIdx_2GCCKB_P, j);
- } else if (STR_EQUAL_2G("2G", "B", "-", "CCK")) {
- STORE_SWING_TABLE(pRFCalibrateInfo->DeltaSwingTableIdx_2GCCKB_N, j);
- } else if (STR_EQUAL_2G("2G", "A", "+", "ALL")) {
- STORE_SWING_TABLE(pRFCalibrateInfo->DeltaSwingTableIdx_2GA_P, j);
- } else if (STR_EQUAL_2G("2G", "A", "-", "ALL")) {
- STORE_SWING_TABLE(pRFCalibrateInfo->DeltaSwingTableIdx_2GA_N, j);
- } else if (STR_EQUAL_2G("2G", "B", "+", "ALL")) {
- STORE_SWING_TABLE(pRFCalibrateInfo->DeltaSwingTableIdx_2GB_P, j);
- } else if (STR_EQUAL_2G("2G", "B", "-", "ALL")) {
- STORE_SWING_TABLE(pRFCalibrateInfo->DeltaSwingTableIdx_2GB_N, j);
- } else if (STR_EQUAL_5G("5G", "A", "+", "ALL", "0")) {
- STORE_SWING_TABLE(pRFCalibrateInfo->DeltaSwingTableIdx_5GA_P[0], j);
- } else if (STR_EQUAL_5G("5G", "A", "-", "ALL", "0")) {
- STORE_SWING_TABLE(pRFCalibrateInfo->DeltaSwingTableIdx_5GA_N[0], j);
- } else if (STR_EQUAL_5G("5G", "B", "+", "ALL", "0")) {
- STORE_SWING_TABLE(pRFCalibrateInfo->DeltaSwingTableIdx_5GB_P[0], j);
- } else if (STR_EQUAL_5G("5G", "B", "-", "ALL", "0")) {
- STORE_SWING_TABLE(pRFCalibrateInfo->DeltaSwingTableIdx_5GB_N[0], j);
- } else if (STR_EQUAL_5G("5G", "A", "+", "ALL", "1")) {
- STORE_SWING_TABLE(pRFCalibrateInfo->DeltaSwingTableIdx_5GA_P[1], j);
- } else if (STR_EQUAL_5G("5G", "A", "-", "ALL", "1")) {
- STORE_SWING_TABLE(pRFCalibrateInfo->DeltaSwingTableIdx_5GA_N[1], j);
- } else if (STR_EQUAL_5G("5G", "B", "+", "ALL", "1")) {
- STORE_SWING_TABLE(pRFCalibrateInfo->DeltaSwingTableIdx_5GB_P[1], j);
- } else if (STR_EQUAL_5G("5G", "B", "-", "ALL", "1")) {
- STORE_SWING_TABLE(pRFCalibrateInfo->DeltaSwingTableIdx_5GB_N[1], j);
- } else if (STR_EQUAL_5G("5G", "A", "+", "ALL", "2")) {
- STORE_SWING_TABLE(pRFCalibrateInfo->DeltaSwingTableIdx_5GA_P[2], j);
- } else if (STR_EQUAL_5G("5G", "A", "-", "ALL", "2")) {
- STORE_SWING_TABLE(pRFCalibrateInfo->DeltaSwingTableIdx_5GA_N[2], j);
- } else if (STR_EQUAL_5G("5G", "B", "+", "ALL", "2")) {
- STORE_SWING_TABLE(pRFCalibrateInfo->DeltaSwingTableIdx_5GB_P[2], j);
- } else if (STR_EQUAL_5G("5G", "B", "-", "ALL", "2")) {
- STORE_SWING_TABLE(pRFCalibrateInfo->DeltaSwingTableIdx_5GB_N[2], j);
- } else if (STR_EQUAL_5G("5G", "A", "+", "ALL", "3")) {
- STORE_SWING_TABLE(pRFCalibrateInfo->DeltaSwingTableIdx_5GA_P[3], j);
- } else if (STR_EQUAL_5G("5G", "A", "-", "ALL", "3")) {
- STORE_SWING_TABLE(pRFCalibrateInfo->DeltaSwingTableIdx_5GA_N[3], j);
- } else if (STR_EQUAL_5G("5G", "B", "+", "ALL", "3")) {
- STORE_SWING_TABLE(pRFCalibrateInfo->DeltaSwingTableIdx_5GB_P[3], j);
- } else if (STR_EQUAL_5G("5G", "B", "-", "ALL", "3")) {
- STORE_SWING_TABLE(pRFCalibrateInfo->DeltaSwingTableIdx_5GB_N[3], j);
- } else
- DBG_871X("===>initDeltaSwingIndexTables(): The input is invalid!!\n");
-}
-
-int PHY_ConfigRFWithTxPwrTrackParaFile(struct adapter *Adapter, char *pFileName)
-{
- struct hal_com_data *pHalData = GET_HAL_DATA(Adapter);
- int rlen = 0, rtStatus = _FAIL;
- char *szLine, *ptmp;
-
- if (!(Adapter->registrypriv.load_phy_file & LOAD_RF_TXPWR_TRACK_PARA_FILE))
- return rtStatus;
-
- memset(pHalData->para_file_buf, 0, MAX_PARA_FILE_BUF_LEN);
-
- if ((pHalData->rf_tx_pwr_track_len == 0) && !pHalData->rf_tx_pwr_track) {
- rtw_merge_string(file_path_bs, PATH_MAX, rtw_phy_file_path, pFileName);
-
- if (rtw_is_file_readable(file_path_bs) == true) {
- rlen = rtw_retrive_from_file(file_path_bs, pHalData->para_file_buf, MAX_PARA_FILE_BUF_LEN);
- if (rlen > 0) {
- rtStatus = _SUCCESS;
- pHalData->rf_tx_pwr_track = vzalloc(rlen);
- if (pHalData->rf_tx_pwr_track) {
- memcpy(pHalData->rf_tx_pwr_track, pHalData->para_file_buf, rlen);
- pHalData->rf_tx_pwr_track_len = rlen;
- } else
- DBG_871X("%s rf_tx_pwr_track alloc fail !\n", __func__);
- }
- }
- } else {
- if ((pHalData->rf_tx_pwr_track_len != 0) && (pHalData->rf_tx_pwr_track != NULL)) {
- memcpy(pHalData->para_file_buf, pHalData->rf_tx_pwr_track, pHalData->rf_tx_pwr_track_len);
- rtStatus = _SUCCESS;
- } else
- DBG_871X("%s(): Critical Error !!!\n", __func__);
- }
-
- if (rtStatus == _SUCCESS) {
- /* DBG_871X("%s(): read %s successfully\n", __func__, pFileName); */
-
- ptmp = pHalData->para_file_buf;
- for (szLine = GetLineFromBuffer(ptmp); szLine != NULL; szLine = GetLineFromBuffer(ptmp)) {
- if (!IsCommentString(szLine)) {
- char band[5] = "", path[5] = "", sign[5] = "";
- char chnl[5] = "", rate[10] = "";
- char data[300] = ""; /* 100 is too small */
- const int len = strlen(szLine);
- int i;
-
- if (len < 10 || szLine[0] != '[')
- continue;
-
- strncpy(band, szLine+1, 2);
- strncpy(path, szLine+5, 1);
- strncpy(sign, szLine+8, 1);
-
- i = 10; /* szLine+10 */
- if (!ParseQualifiedString(szLine, &i, rate, '[', ']')) {
- /* DBG_871X("Fail to parse rate!\n"); */
- }
- if (!ParseQualifiedString(szLine, &i, chnl, '[', ']')) {
- /* DBG_871X("Fail to parse channel group!\n"); */
- }
- while (i < len && szLine[i] != '{')
- i++;
- if (!ParseQualifiedString(szLine, &i, data, '{', '}')) {
- /* DBG_871X("Fail to parse data!\n"); */
- }
-
- initDeltaSwingIndexTables(Adapter, band, path, sign, chnl, rate, data);
- }
- }
- } else
- DBG_871X("%s(): No File %s, Load from HWImg Array!\n", __func__, pFileName);
-
- return rtStatus;
-}
-
-static int phy_ParsePowerLimitTableFile(struct adapter *Adapter, char *buffer)
-{
- u32 i = 0, forCnt = 0;
- u8 loadingStage = 0, limitValue = 0, fraction = 0;
- char *szLine, *ptmp;
- int rtStatus = _SUCCESS;
- char band[10], bandwidth[10], rateSection[10],
- regulation[TXPWR_LMT_MAX_REGULATION_NUM][10], rfPath[10], colNumBuf[10];
- u8 colNum = 0;
-
- DBG_871X("===>phy_ParsePowerLimitTableFile()\n");
-
- if (Adapter->registrypriv.RegDecryptCustomFile == 1)
- phy_DecryptBBPgParaFile(Adapter, buffer);
-
- ptmp = buffer;
- for (szLine = GetLineFromBuffer(ptmp); szLine != NULL; szLine = GetLineFromBuffer(ptmp)) {
- /* skip comment */
- if (IsCommentString(szLine)) {
- continue;
- }
-
- if (loadingStage == 0) {
- for (forCnt = 0; forCnt < TXPWR_LMT_MAX_REGULATION_NUM; ++forCnt)
- memset((void *) regulation[forCnt], 0, 10);
-
- memset((void *) band, 0, 10);
- memset((void *) bandwidth, 0, 10);
- memset((void *) rateSection, 0, 10);
- memset((void *) rfPath, 0, 10);
- memset((void *) colNumBuf, 0, 10);
-
- if (szLine[0] != '#' || szLine[1] != '#')
- continue;
-
- /* skip the space */
- i = 2;
- while (szLine[i] == ' ' || szLine[i] == '\t')
- ++i;
-
- szLine[--i] = ' '; /* return the space in front of the regulation info */
-
- /* Parse the label of the table */
- if (!ParseQualifiedString(szLine, &i, band, ' ', ',')) {
- DBG_871X("Fail to parse band!\n");
- return _FAIL;
- }
- if (!ParseQualifiedString(szLine, &i, bandwidth, ' ', ',')) {
- DBG_871X("Fail to parse bandwidth!\n");
- return _FAIL;
- }
- if (!ParseQualifiedString(szLine, &i, rfPath, ' ', ',')) {
- DBG_871X("Fail to parse rf path!\n");
- return _FAIL;
- }
- if (!ParseQualifiedString(szLine, &i, rateSection, ' ', ',')) {
- DBG_871X("Fail to parse rate!\n");
- return _FAIL;
- }
-
- loadingStage = 1;
- } else if (loadingStage == 1) {
- if (szLine[0] != '#' || szLine[1] != '#')
- continue;
-
- /* skip the space */
- i = 2;
- while (szLine[i] == ' ' || szLine[i] == '\t')
- ++i;
-
- if (!eqNByte((u8 *)(szLine + i), (u8 *)("START"), 5)) {
- DBG_871X("Lost \"## START\" label\n");
- return _FAIL;
- }
-
- loadingStage = 2;
- } else if (loadingStage == 2) {
- if (szLine[0] != '#' || szLine[1] != '#')
- continue;
-
- /* skip the space */
- i = 2;
- while (szLine[i] == ' ' || szLine[i] == '\t')
- ++i;
-
- if (!ParseQualifiedString(szLine, &i, colNumBuf, '#', '#')) {
- DBG_871X("Fail to parse column number!\n");
- return _FAIL;
- }
-
- if (!GetU1ByteIntegerFromStringInDecimal(colNumBuf, &colNum))
- return _FAIL;
-
- if (colNum > TXPWR_LMT_MAX_REGULATION_NUM) {
- DBG_871X(
- "invalid col number %d (greater than max %d)\n",
- colNum, TXPWR_LMT_MAX_REGULATION_NUM
- );
- return _FAIL;
- }
-
- for (forCnt = 0; forCnt < colNum; ++forCnt) {
- u8 regulation_name_cnt = 0;
-
- /* skip the space */
- while (szLine[i] == ' ' || szLine[i] == '\t')
- ++i;
-
- while (szLine[i] != ' ' && szLine[i] != '\t' && szLine[i] != '\0')
- regulation[forCnt][regulation_name_cnt++] = szLine[i++];
- /* DBG_871X("regulation %s!\n", regulation[forCnt]); */
-
- if (regulation_name_cnt == 0) {
- DBG_871X("invalid number of regulation!\n");
- return _FAIL;
- }
- }
-
- loadingStage = 3;
- } else if (loadingStage == 3) {
- char channel[10] = {0}, powerLimit[10] = {0};
- u8 cnt = 0;
-
- /* the table ends */
- if (szLine[0] == '#' && szLine[1] == '#') {
- i = 2;
- while (szLine[i] == ' ' || szLine[i] == '\t')
- ++i;
-
- if (eqNByte((u8 *)(szLine + i), (u8 *)("END"), 3)) {
- loadingStage = 0;
- continue;
- } else {
- DBG_871X("Wrong format\n");
- DBG_871X("<===== phy_ParsePowerLimitTableFile()\n");
- return _FAIL;
- }
- }
-
- if ((szLine[0] != 'c' && szLine[0] != 'C') ||
- (szLine[1] != 'h' && szLine[1] != 'H')) {
- DBG_871X("Meet wrong channel => power limt pair\n");
- continue;
- }
- i = 2;/* move to the location behind 'h' */
-
- /* load the channel number */
- cnt = 0;
- while (szLine[i] >= '0' && szLine[i] <= '9') {
- channel[cnt] = szLine[i];
- ++cnt;
- ++i;
- }
- /* DBG_871X("chnl %s!\n", channel); */
-
- for (forCnt = 0; forCnt < colNum; ++forCnt) {
- /* skip the space between channel number and the power limit value */
- while (szLine[i] == ' ' || szLine[i] == '\t')
- ++i;
-
- /* load the power limit value */
- cnt = 0;
- fraction = 0;
- memset((void *) powerLimit, 0, 10);
- while ((szLine[i] >= '0' && szLine[i] <= '9') || szLine[i] == '.') {
- if (szLine[i] == '.') {
- if ((szLine[i+1] >= '0' && szLine[i+1] <= '9')) {
- fraction = szLine[i+1];
- i += 2;
- } else {
- DBG_871X("Wrong fraction in TXPWR_LMT.txt\n");
- return _FAIL;
- }
-
- break;
- }
-
- powerLimit[cnt] = szLine[i];
- ++cnt;
- ++i;
- }
-
- if (powerLimit[0] == '\0') {
- powerLimit[0] = '6';
- powerLimit[1] = '3';
- i += 2;
- } else {
- if (!GetU1ByteIntegerFromStringInDecimal(powerLimit, &limitValue))
- return _FAIL;
-
- limitValue *= 2;
- cnt = 0;
- if (fraction == '5')
- ++limitValue;
-
- /* the value is greater or equal to 100 */
- if (limitValue >= 100) {
- powerLimit[cnt++] = limitValue/100 + '0';
- limitValue %= 100;
-
- if (limitValue >= 10) {
- powerLimit[cnt++] = limitValue/10 + '0';
- limitValue %= 10;
- } else
- powerLimit[cnt++] = '0';
-
- powerLimit[cnt++] = limitValue + '0';
- } else if (limitValue >= 10) { /* the value is greater or equal to 10 */
- powerLimit[cnt++] = limitValue/10 + '0';
- limitValue %= 10;
- powerLimit[cnt++] = limitValue + '0';
- }
- /* the value is less than 10 */
- else
- powerLimit[cnt++] = limitValue + '0';
-
- powerLimit[cnt] = '\0';
- }
-
- /* DBG_871X("ch%s => %s\n", channel, powerLimit); */
-
- /* store the power limit value */
- PHY_SetTxPowerLimit(Adapter, (u8 *)regulation[forCnt], (u8 *)band,
- (u8 *)bandwidth, (u8 *)rateSection, (u8 *)rfPath, (u8 *)channel, (u8 *)powerLimit);
-
- }
- } else {
- DBG_871X("Abnormal loading stage in phy_ParsePowerLimitTableFile()!\n");
- rtStatus = _FAIL;
- break;
- }
- }
-
- DBG_871X("<===phy_ParsePowerLimitTableFile()\n");
- return rtStatus;
-}
-
-int PHY_ConfigRFWithPowerLimitTableParaFile(
- struct adapter *Adapter, char *pFileName
-)
-{
- struct hal_com_data *pHalData = GET_HAL_DATA(Adapter);
- int rlen = 0, rtStatus = _FAIL;
-
- if (!(Adapter->registrypriv.load_phy_file & LOAD_RF_TXPWR_LMT_PARA_FILE))
- return rtStatus;
-
- memset(pHalData->para_file_buf, 0, MAX_PARA_FILE_BUF_LEN);
-
- if ((pHalData->rf_tx_pwr_lmt_len == 0) && !pHalData->rf_tx_pwr_lmt) {
- rtw_merge_string(file_path_bs, PATH_MAX, rtw_phy_file_path, pFileName);
-
- if (rtw_is_file_readable(file_path_bs) == true) {
- rlen = rtw_retrive_from_file(file_path_bs, pHalData->para_file_buf, MAX_PARA_FILE_BUF_LEN);
- if (rlen > 0) {
- rtStatus = _SUCCESS;
- pHalData->rf_tx_pwr_lmt = vzalloc(rlen);
- if (pHalData->rf_tx_pwr_lmt) {
- memcpy(pHalData->rf_tx_pwr_lmt, pHalData->para_file_buf, rlen);
- pHalData->rf_tx_pwr_lmt_len = rlen;
- } else
- DBG_871X("%s rf_tx_pwr_lmt alloc fail !\n", __func__);
- }
- }
- } else {
- if ((pHalData->rf_tx_pwr_lmt_len != 0) && (pHalData->rf_tx_pwr_lmt != NULL)) {
- memcpy(pHalData->para_file_buf, pHalData->rf_tx_pwr_lmt, pHalData->rf_tx_pwr_lmt_len);
- rtStatus = _SUCCESS;
- } else
- DBG_871X("%s(): Critical Error !!!\n", __func__);
- }
-
- if (rtStatus == _SUCCESS) {
- /* DBG_871X("%s(): read %s ok\n", __func__, pFileName); */
- rtStatus = phy_ParsePowerLimitTableFile(Adapter, pHalData->para_file_buf);
- } else
- DBG_871X("%s(): No File %s, Load from HWImg Array!\n", __func__, pFileName);
-
- return rtStatus;
-}
-
-void phy_free_filebuf(struct adapter *padapter)
-{
- struct hal_com_data *pHalData = GET_HAL_DATA(padapter);
-
- if (pHalData->mac_reg)
- vfree(pHalData->mac_reg);
- if (pHalData->bb_phy_reg)
- vfree(pHalData->bb_phy_reg);
- if (pHalData->bb_agc_tab)
- vfree(pHalData->bb_agc_tab);
- if (pHalData->bb_phy_reg_pg)
- vfree(pHalData->bb_phy_reg_pg);
- if (pHalData->bb_phy_reg_mp)
- vfree(pHalData->bb_phy_reg_mp);
- if (pHalData->rf_radio_a)
- vfree(pHalData->rf_radio_a);
- if (pHalData->rf_radio_b)
- vfree(pHalData->rf_radio_b);
- if (pHalData->rf_tx_pwr_track)
- vfree(pHalData->rf_tx_pwr_track);
- if (pHalData->rf_tx_pwr_lmt)
- vfree(pHalData->rf_tx_pwr_lmt);
-
-}
diff --git a/drivers/staging/rtl8723bs/hal/odm_DIG.c b/drivers/staging/rtl8723bs/hal/odm_DIG.c
index 70d98c58ca97..40fe43c62c45 100644
--- a/drivers/staging/rtl8723bs/hal/odm_DIG.c
+++ b/drivers/staging/rtl8723bs/hal/odm_DIG.c
@@ -1074,7 +1074,6 @@ void odm_FAThresholdCheck(
dm_FA_thres[1] = 4000;
dm_FA_thres[2] = 5000;
}
- return;
}
u8 odm_ForbiddenIGICheck(void *pDM_VOID, u8 DIG_Dynamic_MIN, u8 CurrentIGI)
diff --git a/drivers/staging/rtl8723bs/hal/rtl8723b_cmd.c b/drivers/staging/rtl8723bs/hal/rtl8723b_cmd.c
index 7760fd0eb6c9..71b5a50b6ef6 100644
--- a/drivers/staging/rtl8723bs/hal/rtl8723b_cmd.c
+++ b/drivers/staging/rtl8723bs/hal/rtl8723b_cmd.c
@@ -2071,8 +2071,6 @@ static void ConstructBtNullFunctionData(
struct ieee80211_hdr *pwlanhdr;
__le16 *fctrl;
u32 pktlen;
- struct mlme_ext_priv *pmlmeext;
- struct mlme_ext_info *pmlmeinfo;
u8 bssid[ETH_ALEN];
@@ -2080,8 +2078,6 @@ static void ConstructBtNullFunctionData(
FUNC_ADPT_ARG(padapter), bQoS, bEosp, bForcePowerSave);
pwlanhdr = (struct ieee80211_hdr *)pframe;
- pmlmeext = &padapter->mlmeextpriv;
- pmlmeinfo = &pmlmeext->mlmext_info;
if (!StaAddr) {
memcpy(bssid, myid(&padapter->eeprompriv), ETH_ALEN);
@@ -2122,12 +2118,9 @@ static void ConstructBtNullFunctionData(
static void SetFwRsvdPagePkt_BTCoex(struct adapter *padapter)
{
- struct hal_com_data *pHalData;
struct xmit_frame *pcmdframe;
struct pkt_attrib *pattrib;
struct xmit_priv *pxmitpriv;
- struct mlme_ext_priv *pmlmeext;
- struct mlme_ext_info *pmlmeinfo;
u32 BeaconLength = 0;
u32 BTQosNullLength = 0;
u8 *ReservedPagePacket;
@@ -2140,10 +2133,7 @@ static void SetFwRsvdPagePkt_BTCoex(struct adapter *padapter)
/* DBG_8192C("+" FUNC_ADPT_FMT "\n", FUNC_ADPT_ARG(padapter)); */
- pHalData = GET_HAL_DATA(padapter);
pxmitpriv = &padapter->xmitpriv;
- pmlmeext = &padapter->mlmeextpriv;
- pmlmeinfo = &pmlmeext->mlmext_info;
TxDescLen = TXDESC_SIZE;
TxDescOffset = TXDESC_OFFSET;
PageSize = PAGE_SIZE_TX_8723B;
diff --git a/drivers/staging/rtl8723bs/hal/rtl8723b_dm.c b/drivers/staging/rtl8723bs/hal/rtl8723b_dm.c
index c514cb735afd..650fbedd34e8 100644
--- a/drivers/staging/rtl8723bs/hal/rtl8723b_dm.c
+++ b/drivers/staging/rtl8723bs/hal/rtl8723b_dm.c
@@ -188,7 +188,8 @@ void rtl8723b_HalDmWatchDog(struct adapter *Adapter)
bBtDisabled = hal_btcoex_IsBtDisabled(Adapter);
- ODM_CmnInfoUpdate(&pHalData->odmpriv, ODM_CMNINFO_BT_ENABLED, ((bBtDisabled == true)?false:true));
+ ODM_CmnInfoUpdate(&pHalData->odmpriv, ODM_CMNINFO_BT_ENABLED,
+ !bBtDisabled);
ODM_DMWatchdog(&pHalData->odmpriv);
}
diff --git a/drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c b/drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c
index faeaf24fa833..66127f6c8e4d 100644
--- a/drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c
+++ b/drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c
@@ -2234,12 +2234,8 @@ void rtl8723b_set_hal_ops(struct hal_ops *pHalFunc)
void rtl8723b_InitAntenna_Selection(struct adapter *padapter)
{
- struct hal_com_data *pHalData;
u8 val;
-
- pHalData = GET_HAL_DATA(padapter);
-
val = rtw_read8(padapter, REG_LEDCFG2);
/* Let 8051 take control antenna settting */
val |= BIT(7); /* DPDT_SEL_EN, 0x4C[23] */
@@ -3053,7 +3049,6 @@ static void rtl8723b_fill_default_txdesc(
{
struct adapter *padapter;
struct hal_com_data *pHalData;
- struct dm_priv *pdmpriv;
struct mlme_ext_priv *pmlmeext;
struct mlme_ext_info *pmlmeinfo;
struct pkt_attrib *pattrib;
@@ -3064,7 +3059,6 @@ static void rtl8723b_fill_default_txdesc(
padapter = pxmitframe->padapter;
pHalData = GET_HAL_DATA(padapter);
- pdmpriv = &pHalData->dmpriv;
pmlmeext = &padapter->mlmeextpriv;
pmlmeinfo = &(pmlmeext->mlmext_info);
@@ -3773,7 +3767,6 @@ void C2HPacketHandler_8723B(struct adapter *padapter, u8 *pbuffer, u16 length)
process_c2h_event(padapter, &C2hEvent, tmpBuf);
/* c2h_handler_8723b(padapter,&C2hEvent); */
- return;
}
void SetHwReg8723B(struct adapter *padapter, u8 variable, u8 *val)
@@ -4157,9 +4150,8 @@ void SetHwReg8723B(struct adapter *padapter, u8 variable, u8 *val)
break;
}
- /* The value of ((usNavUpper + HAL_NAV_UPPER_UNIT_8723B - 1) / HAL_NAV_UPPER_UNIT_8723B) */
- /* is getting the upper integer. */
- usNavUpper = (usNavUpper + HAL_NAV_UPPER_UNIT_8723B - 1) / HAL_NAV_UPPER_UNIT_8723B;
+ usNavUpper = DIV_ROUND_UP(usNavUpper,
+ HAL_NAV_UPPER_UNIT_8723B);
rtw_write8(padapter, REG_NAV_UPPER, (u8)usNavUpper);
}
break;
diff --git a/drivers/staging/rtl8723bs/hal/rtl8723b_phycfg.c b/drivers/staging/rtl8723bs/hal/rtl8723b_phycfg.c
index 6df2b58bdc67..cf23414d7224 100644
--- a/drivers/staging/rtl8723bs/hal/rtl8723b_phycfg.c
+++ b/drivers/staging/rtl8723bs/hal/rtl8723b_phycfg.c
@@ -362,24 +362,10 @@ void PHY_SetRFReg_8723B(
*/
s32 PHY_MACConfig8723B(struct adapter *Adapter)
{
- int rtStatus = _SUCCESS;
struct hal_com_data *pHalData = GET_HAL_DATA(Adapter);
- s8 *pszMACRegFile;
- s8 sz8723MACRegFile[] = RTL8723B_PHY_MACREG;
-
-
- pszMACRegFile = sz8723MACRegFile;
-
- /* */
- /* Config MAC */
- /* */
- rtStatus = phy_ConfigMACWithParaFile(Adapter, pszMACRegFile);
- if (rtStatus == _FAIL) {
- ODM_ReadAndConfig_MP_8723B_MAC_REG(&pHalData->odmpriv);
- rtStatus = _SUCCESS;
- }
- return rtStatus;
+ ODM_ReadAndConfig_MP_8723B_MAC_REG(&pHalData->odmpriv);
+ return _SUCCESS;
}
/**
@@ -427,17 +413,6 @@ static void phy_InitBBRFRegisterDefinition(struct adapter *Adapter)
static int phy_BB8723b_Config_ParaFile(struct adapter *Adapter)
{
struct hal_com_data *pHalData = GET_HAL_DATA(Adapter);
- int rtStatus = _SUCCESS;
- u8 sz8723BBRegFile[] = RTL8723B_PHY_REG;
- u8 sz8723AGCTableFile[] = RTL8723B_AGC_TAB;
- u8 sz8723BBBRegPgFile[] = RTL8723B_PHY_REG_PG;
- u8 sz8723BRFTxPwrLmtFile[] = RTL8723B_TXPWR_LMT;
- u8 *pszBBRegFile = NULL, *pszAGCTableFile = NULL, *pszBBRegPgFile = NULL, *pszRFTxPwrLmtFile = NULL;
-
- pszBBRegFile = sz8723BBRegFile;
- pszAGCTableFile = sz8723AGCTableFile;
- pszBBRegPgFile = sz8723BBBRegPgFile;
- pszRFTxPwrLmtFile = sz8723BRFTxPwrLmtFile;
/* Read Tx Power Limit File */
PHY_InitTxPowerLimit(Adapter);
@@ -445,30 +420,14 @@ static int phy_BB8723b_Config_ParaFile(struct adapter *Adapter)
Adapter->registrypriv.RegEnableTxPowerLimit == 1 ||
(Adapter->registrypriv.RegEnableTxPowerLimit == 2 && pHalData->EEPROMRegulatory == 1)
) {
- if (PHY_ConfigRFWithPowerLimitTableParaFile(Adapter, pszRFTxPwrLmtFile) == _FAIL) {
- if (HAL_STATUS_SUCCESS != ODM_ConfigRFWithHeaderFile(&pHalData->odmpriv, CONFIG_RF_TXPWR_LMT, (ODM_RF_RADIO_PATH_E)0))
- rtStatus = _FAIL;
- }
-
- if (rtStatus != _SUCCESS) {
- DBG_871X("%s():Read Tx power limit fail\n", __func__);
- goto phy_BB8190_Config_ParaFile_Fail;
- }
+ ODM_ConfigRFWithHeaderFile(&pHalData->odmpriv,
+ CONFIG_RF_TXPWR_LMT, 0);
}
/* */
/* 1. Read PHY_REG.TXT BB INIT!! */
/* */
- if (phy_ConfigBBWithParaFile(Adapter, pszBBRegFile, CONFIG_BB_PHY_REG) ==
- _FAIL) {
- if (HAL_STATUS_SUCCESS != ODM_ConfigBBWithHeaderFile(&pHalData->odmpriv, CONFIG_BB_PHY_REG))
- rtStatus = _FAIL;
- }
-
- if (rtStatus != _SUCCESS) {
- DBG_8192C("%s():Write BB Reg Fail!!", __func__);
- goto phy_BB8190_Config_ParaFile_Fail;
- }
+ ODM_ConfigBBWithHeaderFile(&pHalData->odmpriv, CONFIG_BB_PHY_REG);
/* If EEPROM or EFUSE autoload OK, We must config by PHY_REG_PG.txt */
PHY_InitTxPowerByRate(Adapter);
@@ -476,11 +435,8 @@ static int phy_BB8723b_Config_ParaFile(struct adapter *Adapter)
Adapter->registrypriv.RegEnableTxPowerByRate == 1 ||
(Adapter->registrypriv.RegEnableTxPowerByRate == 2 && pHalData->EEPROMRegulatory != 2)
) {
- if (phy_ConfigBBWithPgParaFile(Adapter, pszBBRegPgFile) ==
- _FAIL) {
- if (HAL_STATUS_SUCCESS != ODM_ConfigBBWithHeaderFile(&pHalData->odmpriv, CONFIG_BB_PHY_REG_PG))
- rtStatus = _FAIL;
- }
+ ODM_ConfigBBWithHeaderFile(&pHalData->odmpriv,
+ CONFIG_BB_PHY_REG_PG);
if (pHalData->odmpriv.PhyRegPgValueType == PHY_REG_PG_EXACT_VALUE)
PHY_TxPowerByRateConfiguration(Adapter);
@@ -490,29 +446,14 @@ static int phy_BB8723b_Config_ParaFile(struct adapter *Adapter)
(Adapter->registrypriv.RegEnableTxPowerLimit == 2 && pHalData->EEPROMRegulatory == 1)
)
PHY_ConvertTxPowerLimitToPowerIndex(Adapter);
-
- if (rtStatus != _SUCCESS) {
- DBG_8192C("%s():BB_PG Reg Fail!!\n", __func__);
- }
}
/* */
/* 2. Read BB AGC table Initialization */
/* */
- if (phy_ConfigBBWithParaFile(Adapter, pszAGCTableFile,
- CONFIG_BB_AGC_TAB) == _FAIL) {
- if (HAL_STATUS_SUCCESS != ODM_ConfigBBWithHeaderFile(&pHalData->odmpriv, CONFIG_BB_AGC_TAB))
- rtStatus = _FAIL;
- }
-
- if (rtStatus != _SUCCESS) {
- DBG_8192C("%s():AGC Table Fail\n", __func__);
- goto phy_BB8190_Config_ParaFile_Fail;
- }
-
-phy_BB8190_Config_ParaFile_Fail:
+ ODM_ConfigBBWithHeaderFile(&pHalData->odmpriv, CONFIG_BB_AGC_TAB);
- return rtStatus;
+ return _SUCCESS;
}
diff --git a/drivers/staging/rtl8723bs/hal/rtl8723b_rf6052.c b/drivers/staging/rtl8723bs/hal/rtl8723b_rf6052.c
index d0ffe0af5339..aafceaf9b139 100644
--- a/drivers/staging/rtl8723bs/hal/rtl8723b_rf6052.c
+++ b/drivers/staging/rtl8723bs/hal/rtl8723b_rf6052.c
@@ -85,19 +85,8 @@ static int phy_RF6052_Config_ParaFile(struct adapter *Adapter)
u32 u4RegValue = 0;
u8 eRFPath;
struct bb_register_def *pPhyReg;
-
- int rtStatus = _SUCCESS;
struct hal_com_data *pHalData = GET_HAL_DATA(Adapter);
- static char sz8723RadioAFile[] = RTL8723B_PHY_RADIO_A;
- static char sz8723RadioBFile[] = RTL8723B_PHY_RADIO_B;
- static s8 sz8723BTxPwrTrackFile[] = RTL8723B_TXPWR_TRACK;
- char *pszRadioAFile, *pszRadioBFile, *pszTxPwrTrackFile;
-
- pszRadioAFile = sz8723RadioAFile;
- pszRadioBFile = sz8723RadioBFile;
- pszTxPwrTrackFile = sz8723BTxPwrTrackFile;
-
/* 3----------------------------------------------------------------- */
/* 3 <2> Initialize RF */
/* 3----------------------------------------------------------------- */
@@ -136,21 +125,11 @@ static int phy_RF6052_Config_ParaFile(struct adapter *Adapter)
/*----Initialize RF fom connfiguration file----*/
switch (eRFPath) {
case RF_PATH_A:
- if (PHY_ConfigRFWithParaFile(Adapter, pszRadioAFile,
- eRFPath) == _FAIL) {
- if (HAL_STATUS_FAILURE == ODM_ConfigRFWithHeaderFile(&pHalData->odmpriv, CONFIG_RF_RADIO, (ODM_RF_RADIO_PATH_E)eRFPath))
- rtStatus = _FAIL;
- }
- break;
case RF_PATH_B:
- if (PHY_ConfigRFWithParaFile(Adapter, pszRadioBFile,
- eRFPath) == _FAIL) {
- if (HAL_STATUS_FAILURE == ODM_ConfigRFWithHeaderFile(&pHalData->odmpriv, CONFIG_RF_RADIO, (ODM_RF_RADIO_PATH_E)eRFPath))
- rtStatus = _FAIL;
- }
+ ODM_ConfigRFWithHeaderFile(&pHalData->odmpriv,
+ CONFIG_RF_RADIO, eRFPath);
break;
case RF_PATH_C:
- break;
case RF_PATH_D:
break;
}
@@ -166,28 +145,16 @@ static int phy_RF6052_Config_ParaFile(struct adapter *Adapter)
PHY_SetBBReg(Adapter, pPhyReg->rfintfs, bRFSI_RFENV << 16, u4RegValue);
break;
}
-
- if (rtStatus != _SUCCESS) {
- /* RT_TRACE(COMP_FPGA, DBG_LOUD, ("phy_RF6052_Config_ParaFile():Radio[%d] Fail!!", eRFPath)); */
- goto phy_RF6052_Config_ParaFile_Fail;
- }
-
}
/* 3 ----------------------------------------------------------------- */
/* 3 Configuration of Tx Power Tracking */
/* 3 ----------------------------------------------------------------- */
- if (PHY_ConfigRFWithTxPwrTrackParaFile(Adapter, pszTxPwrTrackFile) ==
- _FAIL) {
- ODM_ConfigRFWithTxPwrTrackHeaderFile(&pHalData->odmpriv);
- }
+ ODM_ConfigRFWithTxPwrTrackHeaderFile(&pHalData->odmpriv);
/* RT_TRACE(COMP_INIT, DBG_LOUD, ("<---phy_RF6052_Config_ParaFile()\n")); */
- return rtStatus;
-
-phy_RF6052_Config_ParaFile_Fail:
- return rtStatus;
+ return _SUCCESS;
}
diff --git a/drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c b/drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c
index 0f3301091258..1e8b61443408 100644
--- a/drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c
+++ b/drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c
@@ -179,8 +179,6 @@ static void rtl8723bs_c2h_packet_handler(struct adapter *padapter,
kfree(tmp);
/* DBG_871X("-%s res(%d)\n", __func__, res); */
-
- return;
}
static inline union recv_frame *try_alloc_recvframe(struct recv_priv *precvpriv,
@@ -232,7 +230,7 @@ static inline bool pkt_exceeds_tail(struct recv_priv *precvpriv,
return false;
}
-static void rtl8723bs_recv_tasklet(void *priv)
+static void rtl8723bs_recv_tasklet(unsigned long priv)
{
struct adapter *padapter;
struct hal_com_data *p_hal_data;
@@ -246,7 +244,7 @@ static void rtl8723bs_recv_tasklet(void *priv)
_pkt *pkt_copy = NULL;
u8 shift_sz = 0, rx_report_sz = 0;
- padapter = priv;
+ padapter = (struct adapter *)priv;
p_hal_data = GET_HAL_DATA(padapter);
precvpriv = &padapter->recvpriv;
recv_buf_queue = &precvpriv->recv_buf_pending_queue;
@@ -464,11 +462,8 @@ s32 rtl8723bs_init_recv_priv(struct adapter *padapter)
goto initbuferror;
/* 3 2. init tasklet */
- tasklet_init(
- &precvpriv->recv_tasklet,
- (void(*)(unsigned long))rtl8723bs_recv_tasklet,
- (unsigned long)padapter
- );
+ tasklet_init(&precvpriv->recv_tasklet, rtl8723bs_recv_tasklet,
+ (unsigned long)padapter);
goto exit;
diff --git a/drivers/staging/rtl8723bs/hal/sdio_halinit.c b/drivers/staging/rtl8723bs/hal/sdio_halinit.c
index 0f5dd4629e6f..e813382e78a6 100644
--- a/drivers/staging/rtl8723bs/hal/sdio_halinit.c
+++ b/drivers/staging/rtl8723bs/hal/sdio_halinit.c
@@ -570,14 +570,11 @@ static void HalRxAggr8723BSdio(struct adapter *padapter)
static void sdio_AggSettingRxUpdate(struct adapter *padapter)
{
- struct hal_com_data *pHalData;
u8 valueDMA;
u8 valueRxAggCtrl = 0;
u8 aggBurstNum = 3; /* 0:1, 1:2, 2:3, 3:4 */
u8 aggBurstSize = 0; /* 0:1K, 1:512Byte, 2:256Byte... */
- pHalData = GET_HAL_DATA(padapter);
-
valueDMA = rtw_read8(padapter, REG_TRXDMA_CTRL);
valueDMA |= RXDMA_AGG_EN;
rtw_write8(padapter, REG_TRXDMA_CTRL, valueDMA);
@@ -713,13 +710,11 @@ static u32 rtl8723bs_hal_init(struct adapter *padapter)
s32 ret;
struct hal_com_data *pHalData;
struct pwrctrl_priv *pwrctrlpriv;
- struct registry_priv *pregistrypriv;
u32 NavUpper = WiFiNavUpperUs;
u8 u1bTmp;
pHalData = GET_HAL_DATA(padapter);
pwrctrlpriv = adapter_to_pwrctl(padapter);
- pregistrypriv = &padapter->registrypriv;
if (
adapter_to_pwrctl(padapter)->bips_processing == true &&
diff --git a/drivers/staging/rtl8723bs/hal/sdio_ops.c b/drivers/staging/rtl8723bs/hal/sdio_ops.c
index 301d327d0624..b6b4adb5a28a 100644
--- a/drivers/staging/rtl8723bs/hal/sdio_ops.c
+++ b/drivers/staging/rtl8723bs/hal/sdio_ops.c
@@ -15,7 +15,7 @@
/* */
/* Description: */
-/* The following mapping is for SDIO host local register space. */
+/* The following mapping is for SDIO host local register space. */
/* */
/* Creadted by Roger, 2011.01.31. */
/* */
@@ -61,7 +61,6 @@ static u8 get_deviceid(u32 addr)
u8 devide_id;
u16 pseudo_id;
-
pseudo_id = (u16)(addr >> 16);
switch (pseudo_id) {
case 0x1025:
@@ -72,10 +71,6 @@ static u8 get_deviceid(u32 addr)
devide_id = WLAN_IOREG_DEVICE_ID;
break;
-/* case 0x1027: */
-/* devide_id = SDIO_FIRMWARE_FIFO; */
-/* break; */
-
case 0x1031:
devide_id = WLAN_TX_HIQ_DEVICE_ID;
break;
@@ -93,7 +88,6 @@ static u8 get_deviceid(u32 addr)
break;
default:
-/* devide_id = (u8)((addr >> 13) & 0xF); */
devide_id = WLAN_IOREG_DEVICE_ID;
break;
}
@@ -111,7 +105,6 @@ static u32 _cvrt2ftaddr(const u32 addr, u8 *pdevice_id, u16 *poffset)
u16 offset;
u32 ftaddr;
-
device_id = get_deviceid(addr);
offset = 0;
@@ -427,20 +420,16 @@ static u32 sdio_read_port(
struct adapter *adapter;
struct sdio_data *psdio;
struct hal_com_data *hal;
- u32 oldcnt;
s32 err;
-
adapter = intfhdl->padapter;
psdio = &adapter_to_dvobj(adapter)->intf_data;
hal = GET_HAL_DATA(adapter);
HalSdioGetCmdAddr8723BSdio(adapter, addr, hal->SdioRxFIFOCnt++, &addr);
- oldcnt = cnt;
if (cnt > psdio->block_transfer_len)
cnt = _RND(cnt, psdio->block_transfer_len);
-/* cnt = sdio_align_size(cnt); */
err = _sd_read(intfhdl, addr, cnt, mem);
@@ -490,7 +479,6 @@ static u32 sdio_write_port(
if (cnt > psdio->block_transfer_len)
cnt = _RND(cnt, psdio->block_transfer_len);
-/* cnt = sdio_align_size(cnt); */
err = sd_write(intfhdl, addr, cnt, xmitbuf->pdata);
@@ -538,7 +526,6 @@ static s32 _sdio_local_read(
u8 *tmpbuf;
u32 n;
-
intfhdl = &adapter->iopriv.intf;
HalSdioGetCmdAddr8723BSdio(adapter, SDIO_LOCAL_DEVICE_ID, addr, &addr);
@@ -711,7 +698,6 @@ static s32 ReadInterrupt8723BSdio(struct adapter *adapter, u32 *phisr)
u32 hisr, himr;
u8 val8, hisr_len;
-
if (!phisr)
return false;
@@ -737,73 +723,48 @@ static s32 ReadInterrupt8723BSdio(struct adapter *adapter, u32 *phisr)
}
/* */
-/* Description: */
-/* Initialize SDIO Host Interrupt Mask configuration variables for future use. */
+/* Description: */
+/* Initialize SDIO Host Interrupt Mask configuration variables for future use. */
/* */
-/* Assumption: */
-/* Using SDIO Local register ONLY for configuration. */
+/* Assumption: */
+/* Using SDIO Local register ONLY for configuration. */
/* */
-/* Created by Roger, 2011.02.11. */
+/* Created by Roger, 2011.02.11. */
/* */
void InitInterrupt8723BSdio(struct adapter *adapter)
{
struct hal_com_data *haldata;
-
haldata = GET_HAL_DATA(adapter);
- haldata->sdio_himr = (u32)( \
- SDIO_HIMR_RX_REQUEST_MSK |
- SDIO_HIMR_AVAL_MSK |
-/* SDIO_HIMR_TXERR_MSK | */
-/* SDIO_HIMR_RXERR_MSK | */
-/* SDIO_HIMR_TXFOVW_MSK | */
-/* SDIO_HIMR_RXFOVW_MSK | */
-/* SDIO_HIMR_TXBCNOK_MSK | */
-/* SDIO_HIMR_TXBCNERR_MSK | */
-/* SDIO_HIMR_BCNERLY_INT_MSK | */
-/* SDIO_HIMR_C2HCMD_MSK | */
-/* SDIO_HIMR_HSISR_IND_MSK | */
-/* SDIO_HIMR_GTINT3_IND_MSK | */
-/* SDIO_HIMR_GTINT4_IND_MSK | */
-/* SDIO_HIMR_PSTIMEOUT_MSK | */
-/* SDIO_HIMR_OCPINT_MSK | */
-/* SDIO_HIMR_ATIMEND_MSK | */
-/* SDIO_HIMR_ATIMEND_E_MSK | */
-/* SDIO_HIMR_CTWEND_MSK | */
- 0);
+ haldata->sdio_himr = (u32)(SDIO_HIMR_RX_REQUEST_MSK |
+ SDIO_HIMR_AVAL_MSK |
+ 0);
}
/* */
-/* Description: */
-/* Initialize System Host Interrupt Mask configuration variables for future use. */
+/* Description: */
+/* Initialize System Host Interrupt Mask configuration variables for future use. */
/* */
-/* Created by Roger, 2011.08.03. */
+/* Created by Roger, 2011.08.03. */
/* */
void InitSysInterrupt8723BSdio(struct adapter *adapter)
{
struct hal_com_data *haldata;
-
haldata = GET_HAL_DATA(adapter);
- haldata->SysIntrMask = ( \
-/* HSIMR_GPIO12_0_INT_EN | */
-/* HSIMR_SPS_OCP_INT_EN | */
-/* HSIMR_RON_INT_EN | */
-/* HSIMR_PDNINT_EN | */
-/* HSIMR_GPIO9_INT_EN | */
- 0);
+ haldata->SysIntrMask = (0);
}
/* */
-/* Description: */
-/* Enalbe SDIO Host Interrupt Mask configuration on SDIO local domain. */
+/* Description: */
+/* Enalbe SDIO Host Interrupt Mask configuration on SDIO local domain. */
/* */
-/* Assumption: */
-/* 1. Using SDIO Local register ONLY for configuration. */
-/* 2. PASSIVE LEVEL */
+/* Assumption: */
+/* 1. Using SDIO Local register ONLY for configuration. */
+/* 2. PASSIVE LEVEL */
/* */
-/* Created by Roger, 2011.02.11. */
+/* Created by Roger, 2011.02.11. */
/* */
void EnableInterrupt8723BSdio(struct adapter *adapter)
{
@@ -849,13 +810,13 @@ void EnableInterrupt8723BSdio(struct adapter *adapter)
}
/* */
-/* Description: */
-/* Disable SDIO Host IMR configuration to mask unnecessary interrupt service. */
+/* Description: */
+/* Disable SDIO Host IMR configuration to mask unnecessary interrupt service. */
/* */
-/* Assumption: */
-/* Using SDIO Local register ONLY for configuration. */
+/* Assumption: */
+/* Using SDIO Local register ONLY for configuration. */
/* */
-/* Created by Roger, 2011.02.11. */
+/* Created by Roger, 2011.02.11. */
/* */
void DisableInterrupt8723BSdio(struct adapter *adapter)
{
@@ -866,13 +827,13 @@ void DisableInterrupt8723BSdio(struct adapter *adapter)
}
/* */
-/* Description: */
-/* Using 0x100 to check the power status of FW. */
+/* Description: */
+/* Using 0x100 to check the power status of FW. */
/* */
-/* Assumption: */
-/* Using SDIO Local register ONLY for configuration. */
+/* Assumption: */
+/* Using SDIO Local register ONLY for configuration. */
/* */
-/* Created by Isaac, 2013.09.10. */
+/* Created by Isaac, 2013.09.10. */
/* */
u8 CheckIPSStatus(struct adapter *adapter)
{
@@ -896,7 +857,6 @@ static struct recv_buf *sd_recv_rxfifo(struct adapter *adapter, u32 size)
struct recv_priv *recv_priv;
struct recv_buf *recvbuf;
-
/* Patch for some SDIO Host 4 bytes issue */
/* ex. RK3188 */
readsize = RND4(size);
@@ -938,7 +898,6 @@ static struct recv_buf *sd_recv_rxfifo(struct adapter *adapter, u32 size)
return NULL;
}
-
/* 3 4. init recvbuf */
recvbuf->len = size;
recvbuf->phead = recvbuf->pskb->head;
@@ -972,7 +931,6 @@ void sd_int_dpc(struct adapter *adapter)
struct intf_hdl *intfhdl = &adapter->iopriv.intf;
struct pwrctrl_priv *pwrctl;
-
hal = GET_HAL_DATA(adapter);
dvobj = adapter_to_dvobj(adapter);
pwrctl = dvobj_to_pwrctl(dvobj);
@@ -992,7 +950,6 @@ void sd_int_dpc(struct adapter *adapter)
report.state = SdioLocalCmd52Read1Byte(adapter, SDIO_REG_HCPWM1_8723B);
- /* cpwm_int_hdl(adapter, &report); */
_set_workitem(&(pwrctl->cpwm_event));
}
@@ -1029,7 +986,7 @@ void sd_int_dpc(struct adapter *adapter)
if (c2h_id_filter_ccx_8723b((u8 *)c2h_evt)) {
/* Handle CCX report here */
rtw_hal_c2h_handler(adapter, (u8 *)c2h_evt);
- kfree((u8 *)c2h_evt);
+ kfree(c2h_evt);
} else {
rtw_c2h_wk_cmd(adapter, (u8 *)c2h_evt);
}
@@ -1049,13 +1006,11 @@ void sd_int_dpc(struct adapter *adapter)
if (hal->sdio_hisr & SDIO_HISR_RXERR)
DBG_8192C("%s: Rx Error\n", __func__);
-
if (hal->sdio_hisr & SDIO_HISR_RX_REQUEST) {
struct recv_buf *recvbuf;
int alloc_fail_time = 0;
u32 hisr;
-/* DBG_8192C("%s: RX Request, size =%d\n", __func__, hal->SdioRxFIFOSize); */
hal->sdio_hisr ^= SDIO_HISR_RX_REQUEST;
do {
hal->SdioRxFIFOSize = SdioLocalCmd52Read2Byte(adapter, SDIO_REG_RX0_REQ_LEN);
@@ -1090,7 +1045,6 @@ void sd_int_hdl(struct adapter *adapter)
{
struct hal_com_data *hal;
-
if (
(adapter->bDriverStopped) || (adapter->bSurpriseRemoved)
)
@@ -1120,27 +1074,24 @@ void sd_int_hdl(struct adapter *adapter)
}
/* */
-/* Description: */
-/* Query SDIO Local register to query current the number of Free TxPacketBuffer page. */
+/* Description: */
+/* Query SDIO Local register to query current the number of Free TxPacketBuffer page. */
/* */
-/* Assumption: */
-/* 1. Running at PASSIVE_LEVEL */
-/* 2. RT_TX_SPINLOCK is NOT acquired. */
+/* Assumption: */
+/* 1. Running at PASSIVE_LEVEL */
+/* 2. RT_TX_SPINLOCK is NOT acquired. */
/* */
-/* Created by Roger, 2011.01.28. */
+/* Created by Roger, 2011.01.28. */
/* */
u8 HalQueryTxBufferStatus8723BSdio(struct adapter *adapter)
{
struct hal_com_data *hal;
u32 numof_free_page;
- /* _irql irql; */
-
hal = GET_HAL_DATA(adapter);
numof_free_page = SdioLocalCmd53Read4Byte(adapter, SDIO_REG_FREE_TXPG);
- /* spin_lock_bh(&phal->SdioTxFIFOFreePageLock); */
memcpy(hal->SdioTxFIFOFreePage, &numof_free_page, 4);
RT_TRACE(_module_hci_ops_c_, _drv_notice_,
("%s: Free page for HIQ(%#x), MIDQ(%#x), LOWQ(%#x), PUBQ(%#x)\n",
@@ -1149,14 +1100,13 @@ u8 HalQueryTxBufferStatus8723BSdio(struct adapter *adapter)
hal->SdioTxFIFOFreePage[MID_QUEUE_IDX],
hal->SdioTxFIFOFreePage[LOW_QUEUE_IDX],
hal->SdioTxFIFOFreePage[PUBLIC_QUEUE_IDX]));
- /* spin_unlock_bh(&hal->SdioTxFIFOFreePageLock); */
return true;
}
/* */
-/* Description: */
-/* Query SDIO Local register to get the current number of TX OQT Free Space. */
+/* Description: */
+/* Query SDIO Local register to get the current number of TX OQT Free Space. */
/* */
void HalQueryTxOQTBufferStatus8723BSdio(struct adapter *adapter)
{
@@ -1190,7 +1140,6 @@ u8 RecvOnePkt(struct adapter *adapter, u32 size)
recvbuf = sd_recv_rxfifo(adapter, size);
if (recvbuf) {
- /* printk("Completed Recv One Pkt.\n"); */
sd_rxhandler(adapter, recvbuf);
res = true;
} else {
diff --git a/drivers/staging/rtl8723bs/include/drv_types.h b/drivers/staging/rtl8723bs/include/drv_types.h
index 8d7fce1e39b7..6ec9087f2eb1 100644
--- a/drivers/staging/rtl8723bs/include/drv_types.h
+++ b/drivers/staging/rtl8723bs/include/drv_types.h
@@ -197,9 +197,6 @@ struct registry_priv
u8 RFE_Type;
u8 check_fw_ps;
- u8 load_phy_file;
- u8 RegDecryptCustomFile;
-
#ifdef CONFIG_MULTI_VIR_IFACES
u8 ext_iface_num;/* primary/secondary iface is excluded */
#endif
@@ -693,7 +690,6 @@ void rtw_indicate_wx_disassoc_event(struct adapter *padapter);
void indicate_wx_scan_complete_event(struct adapter *padapter);
int rtw_change_ifname(struct adapter *padapter, const char *ifname);
-extern char *rtw_phy_file_path;
extern char *rtw_initmac;
extern int rtw_mc2u_disable;
extern int rtw_ht_enable;
diff --git a/drivers/staging/rtl8723bs/include/hal_com_phycfg.h b/drivers/staging/rtl8723bs/include/hal_com_phycfg.h
index 9167f1e7827f..e9a3006a3e20 100644
--- a/drivers/staging/rtl8723bs/include/hal_com_phycfg.h
+++ b/drivers/staging/rtl8723bs/include/hal_com_phycfg.h
@@ -219,30 +219,4 @@ struct adapter * Adapter,
u16 ChannelPlan
);
-#define MAX_PARA_FILE_BUF_LEN 25600
-
-#define LOAD_MAC_PARA_FILE BIT0
-#define LOAD_BB_PARA_FILE BIT1
-#define LOAD_BB_PG_PARA_FILE BIT2
-#define LOAD_BB_MP_PARA_FILE BIT3
-#define LOAD_RF_PARA_FILE BIT4
-#define LOAD_RF_TXPWR_TRACK_PARA_FILE BIT5
-#define LOAD_RF_TXPWR_LMT_PARA_FILE BIT6
-
-int phy_ConfigMACWithParaFile(struct adapter *Adapter, char*pFileName);
-
-int phy_ConfigBBWithParaFile(struct adapter *Adapter, char*pFileName, u32 ConfigType);
-
-int phy_ConfigBBWithPgParaFile(struct adapter *Adapter, char*pFileName);
-
-int phy_ConfigBBWithMpParaFile(struct adapter *Adapter, char*pFileName);
-
-int PHY_ConfigRFWithParaFile(struct adapter *Adapter, char*pFileName, u8 eRFPath);
-
-int PHY_ConfigRFWithTxPwrTrackParaFile(struct adapter *Adapter, char*pFileName);
-
-int PHY_ConfigRFWithPowerLimitTableParaFile(struct adapter *Adapter, char*pFileName);
-
-void phy_free_filebuf(struct adapter *padapter);
-
#endif /* __HAL_COMMON_H__ */
diff --git a/drivers/staging/rtl8723bs/include/hal_data.h b/drivers/staging/rtl8723bs/include/hal_data.h
index 7d782659a84f..e5e667df6154 100644
--- a/drivers/staging/rtl8723bs/include/hal_data.h
+++ b/drivers/staging/rtl8723bs/include/hal_data.h
@@ -440,27 +440,6 @@ struct hal_com_data {
u32 SysIntrStatus;
u32 SysIntrMask;
-
- char para_file_buf[MAX_PARA_FILE_BUF_LEN];
- char *mac_reg;
- u32 mac_reg_len;
- char *bb_phy_reg;
- u32 bb_phy_reg_len;
- char *bb_agc_tab;
- u32 bb_agc_tab_len;
- char *bb_phy_reg_pg;
- u32 bb_phy_reg_pg_len;
- char *bb_phy_reg_mp;
- u32 bb_phy_reg_mp_len;
- char *rf_radio_a;
- u32 rf_radio_a_len;
- char *rf_radio_b;
- u32 rf_radio_b_len;
- char *rf_tx_pwr_track;
- u32 rf_tx_pwr_track_len;
- char *rf_tx_pwr_lmt;
- u32 rf_tx_pwr_lmt_len;
-
#ifdef CONFIG_BACKGROUND_NOISE_MONITOR
s16 noise[ODM_MAX_CHANNEL_NUM];
#endif
diff --git a/drivers/staging/rtl8723bs/include/osdep_service.h b/drivers/staging/rtl8723bs/include/osdep_service.h
index 81a9c19ecc6a..a40cf7b60a69 100644
--- a/drivers/staging/rtl8723bs/include/osdep_service.h
+++ b/drivers/staging/rtl8723bs/include/osdep_service.h
@@ -171,10 +171,6 @@ extern void rtw_softap_lock_suspend(void);
extern void rtw_softap_unlock_suspend(void);
#endif
-/* File operation APIs, just for linux now */
-extern int rtw_is_file_readable(char *path);
-extern int rtw_retrive_from_file(char *path, u8 *buf, u32 sz);
-
extern void rtw_free_netdev(struct net_device * netdev);
diff --git a/drivers/staging/rtl8723bs/include/osdep_service_linux.h b/drivers/staging/rtl8723bs/include/osdep_service_linux.h
index c582ede1ac12..a2d9de866c4b 100644
--- a/drivers/staging/rtl8723bs/include/osdep_service_linux.h
+++ b/drivers/staging/rtl8723bs/include/osdep_service_linux.h
@@ -127,13 +127,6 @@ static inline void rtw_netif_stop_queue(struct net_device *pnetdev)
netif_tx_stop_all_queues(pnetdev);
}
-static inline void rtw_merge_string(char *dst, int dst_len, char *src1, char *src2)
-{
- int len = 0;
- len += snprintf(dst+len, dst_len - len, "%s", src1);
- len += snprintf(dst+len, dst_len - len, "%s", src2);
-}
-
#define rtw_signal_process(pid, sig) kill_pid(find_vpid((pid)), (sig), 1)
#define rtw_netdev_priv(netdev) (((struct rtw_netdev_priv_indicator *)netdev_priv(netdev))->priv)
diff --git a/drivers/staging/rtl8723bs/include/rtl8723b_hal.h b/drivers/staging/rtl8723bs/include/rtl8723b_hal.h
index 8f00ced1c697..f36516fa84c7 100644
--- a/drivers/staging/rtl8723bs/include/rtl8723b_hal.h
+++ b/drivers/staging/rtl8723bs/include/rtl8723b_hal.h
@@ -21,21 +21,6 @@
#include "hal_phy_cfg.h"
/* */
-/* RTL8723B From file */
-/* */
-#define RTL8723B_FW_IMG "rtl8723b/FW_NIC.bin"
-#define RTL8723B_FW_WW_IMG "rtl8723b/FW_WoWLAN.bin"
-#define RTL8723B_PHY_REG "rtl8723b/PHY_REG.txt"
-#define RTL8723B_PHY_RADIO_A "rtl8723b/RadioA.txt"
-#define RTL8723B_PHY_RADIO_B "rtl8723b/RadioB.txt"
-#define RTL8723B_TXPWR_TRACK "rtl8723b/TxPowerTrack.txt"
-#define RTL8723B_AGC_TAB "rtl8723b/AGC_TAB.txt"
-#define RTL8723B_PHY_MACREG "rtl8723b/MAC_REG.txt"
-#define RTL8723B_PHY_REG_PG "rtl8723b/PHY_REG_PG.txt"
-#define RTL8723B_PHY_REG_MP "rtl8723b/PHY_REG_MP.txt"
-#define RTL8723B_TXPWR_LMT "rtl8723b/TXPWR_LMT.txt"
-
-/* */
/* RTL8723B From header */
/* */
diff --git a/drivers/staging/rtl8723bs/include/rtw_mlme_ext.h b/drivers/staging/rtl8723bs/include/rtw_mlme_ext.h
index fd3cf955c9f8..73e8ec09b6e1 100644
--- a/drivers/staging/rtl8723bs/include/rtw_mlme_ext.h
+++ b/drivers/staging/rtl8723bs/include/rtw_mlme_ext.h
@@ -576,7 +576,6 @@ void read_cam(struct adapter *padapter , u8 entry, u8 *get_key);
/* modify HW only */
void _write_cam(struct adapter *padapter, u8 entry, u16 ctrl, u8 *mac, u8 *key);
void _clear_cam_entry(struct adapter *padapter, u8 entry);
-void write_cam_from_cache(struct adapter *adapter, u8 id);
/* modify both HW and cache */
void write_cam(struct adapter *padapter, u8 id, u16 ctrl, u8 *mac, u8 *key);
diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
index f819abb756dc..322cabb97b99 100644
--- a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
+++ b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
@@ -165,7 +165,7 @@ static void rtw_spt_band_free(struct ieee80211_supported_band *spt_band)
+ sizeof(struct ieee80211_channel)*RTW_2G_CHANNELS_NUM
+ sizeof(struct ieee80211_rate)*RTW_G_RATES_NUM;
}
- kfree((u8 *)spt_band);
+ kfree(spt_band);
}
static const struct ieee80211_txrx_stypes
@@ -240,10 +240,6 @@ struct cfg80211_bss *rtw_cfg80211_inform_bss(struct adapter *padapter, struct wl
u16 channel;
u32 freq;
u64 notify_timestamp;
- u16 notify_capability;
- u16 notify_interval;
- u8 *notify_ie;
- size_t notify_ielen;
s32 notify_signal;
u8 *buf = NULL, *pbuf;
size_t len, bssinf_len = 0;
@@ -324,12 +320,6 @@ struct cfg80211_bss *rtw_cfg80211_inform_bss(struct adapter *padapter, struct wl
notify_timestamp = ktime_to_us(ktime_get_boottime());
- notify_interval = le16_to_cpu(*(__le16 *)rtw_get_beacon_interval_from_ie(pnetwork->network.IEs));
- notify_capability = le16_to_cpu(*(__le16 *)rtw_get_capability_from_ie(pnetwork->network.IEs));
-
- notify_ie = pnetwork->network.IEs+_FIXED_IE_LENGTH_;
- notify_ielen = pnetwork->network.IELength-_FIXED_IE_LENGTH_;
-
/* We've set wiphy's signal_type as CFG80211_SIGNAL_TYPE_MBM: signal strength in mBm (100*dBm) */
if (check_fwstate(pmlmepriv, _FW_LINKED) == true &&
is_same_network(&pmlmepriv->cur_network.network, &pnetwork->network, 0)) {
@@ -1156,7 +1146,7 @@ static int cfg80211_rtw_add_key(struct wiphy *wiphy, struct net_device *ndev,
}
addkey_end:
- kfree((u8 *)param);
+ kfree(param);
return ret;
@@ -1305,7 +1295,6 @@ static int cfg80211_rtw_change_iface(struct wiphy *wiphy,
struct wireless_dev *rtw_wdev = padapter->rtw_wdev;
struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
int ret = 0;
- u8 change = false;
DBG_871X(FUNC_NDEV_FMT" type =%d\n", FUNC_NDEV_ARG(ndev), type);
@@ -1336,7 +1325,6 @@ static int cfg80211_rtw_change_iface(struct wiphy *wiphy,
if (old_type != type)
{
- change = true;
pmlmeext->action_public_rxseq = 0xffff;
pmlmeext->action_public_dialog_token = 0xff;
}
@@ -1410,19 +1398,19 @@ void rtw_cfg80211_unlink_bss(struct adapter *padapter, struct wlan_network *pnet
struct wireless_dev *pwdev = padapter->rtw_wdev;
struct wiphy *wiphy = pwdev->wiphy;
struct cfg80211_bss *bss = NULL;
- struct wlan_bssid_ex select_network = pnetwork->network;
+ struct wlan_bssid_ex *select_network = &pnetwork->network;
bss = cfg80211_get_bss(wiphy, NULL/*notify_channel*/,
- select_network.MacAddress, select_network.Ssid.Ssid,
- select_network.Ssid.SsidLength, 0/*WLAN_CAPABILITY_ESS*/,
+ select_network->MacAddress, select_network->Ssid.Ssid,
+ select_network->Ssid.SsidLength, 0/*WLAN_CAPABILITY_ESS*/,
0/*WLAN_CAPABILITY_ESS*/);
if (bss) {
cfg80211_unlink_bss(wiphy, bss);
- DBG_8192C("%s(): cfg80211_unlink %s!! () ", __func__, select_network.Ssid.Ssid);
+ DBG_8192C("%s(): cfg80211_unlink %s!! () ", __func__,
+ select_network->Ssid.Ssid);
cfg80211_put_bss(padapter->rtw_wdev->wiphy, bss);
}
- return;
}
void rtw_cfg80211_surveydone_event_callback(struct adapter *padapter)
@@ -1513,7 +1501,7 @@ static int cfg80211_rtw_scan(struct wiphy *wiphy
int i;
u8 _status = false;
int ret = 0;
- struct ndis_802_11_ssid ssid[RTW_SSID_SCAN_AMOUNT];
+ struct ndis_802_11_ssid *ssid = NULL;
struct rtw_ieee80211_channel ch[RTW_CHANNEL_SCAN_AMOUNT];
u8 survey_times =3;
u8 survey_times_for_one_ch =6;
@@ -1604,7 +1592,13 @@ static int cfg80211_rtw_scan(struct wiphy *wiphy
goto check_need_indicate_scan_done;
}
- memset(ssid, 0, sizeof(struct ndis_802_11_ssid)*RTW_SSID_SCAN_AMOUNT);
+ ssid = kzalloc(RTW_SSID_SCAN_AMOUNT * sizeof(struct ndis_802_11_ssid),
+ GFP_KERNEL);
+ if (!ssid) {
+ ret = -ENOMEM;
+ goto check_need_indicate_scan_done;
+ }
+
/* parsing request ssids, n_ssids */
for (i = 0; i < request->n_ssids && i < RTW_SSID_SCAN_AMOUNT; i++) {
#ifdef DEBUG_CFG80211
@@ -1648,6 +1642,7 @@ static int cfg80211_rtw_scan(struct wiphy *wiphy
}
check_need_indicate_scan_done:
+ kfree(ssid);
if (need_indicate_scan_done)
{
rtw_cfg80211_surveydone_event_callback(padapter);
@@ -1798,7 +1793,7 @@ static int rtw_cfg80211_set_key_mgt(struct security_priv *psecuritypriv, u32 key
static int rtw_cfg80211_set_wpa_ie(struct adapter *padapter, u8 *pie, size_t ielen)
{
- u8 *buf = NULL, *pos = NULL;
+ u8 *buf = NULL;
int group_cipher = 0, pairwise_cipher = 0;
int ret = 0;
int wpa_ielen = 0;
@@ -1833,7 +1828,6 @@ static int rtw_cfg80211_set_wpa_ie(struct adapter *padapter, u8 *pie, size_t iel
DBG_8192C("0x%.2x 0x%.2x 0x%.2x 0x%.2x 0x%.2x 0x%.2x 0x%.2x 0x%.2x\n", buf[i], buf[i+1], buf[i+2], buf[i+3], buf[i+4], buf[i+5], buf[i+6], buf[i+7]);
}
- pos = buf;
if (ielen < RSN_HEADER_LEN) {
RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_err_, ("Ie len too short %d\n", ielen));
ret = -1;
@@ -2194,7 +2188,7 @@ static int cfg80211_rtw_connect(struct wiphy *wiphy, struct net_device *ndev,
ret = -EOPNOTSUPP ;
}
- kfree((u8 *)pwep);
+ kfree(pwep);
if (ret < 0)
goto exit;
@@ -2433,7 +2427,6 @@ void rtw_cfg80211_indicate_sta_disassoc(struct adapter *padapter, unsigned char
static netdev_tx_t rtw_cfg80211_monitor_if_xmit_entry(struct sk_buff *skb, struct net_device *ndev)
{
- int ret = 0;
int rtap_len;
int qos_len = 0;
int dot11_hdr_len = 24;
@@ -2499,9 +2492,7 @@ static netdev_tx_t rtw_cfg80211_monitor_if_xmit_entry(struct sk_buff *skb, struc
DBG_8192C("should be eapol packet\n");
/* Use the real net device to transmit the packet */
- ret = _rtw_xmit_entry(skb, padapter->pnetdev);
-
- return ret;
+ return _rtw_xmit_entry(skb, padapter->pnetdev);
}
else if ((frame_control & (IEEE80211_FCTL_FTYPE|IEEE80211_FCTL_STYPE))
@@ -2647,7 +2638,7 @@ static int rtw_cfg80211_add_monitor_if (struct adapter *padapter, char *name, st
out:
if (ret && mon_wdev) {
- kfree((u8 *)mon_wdev);
+ kfree(mon_wdev);
mon_wdev = NULL;
}
@@ -2808,14 +2799,11 @@ static int cfg80211_rtw_start_ap(struct wiphy *wiphy, struct net_device *ndev,
static int cfg80211_rtw_change_beacon(struct wiphy *wiphy, struct net_device *ndev,
struct cfg80211_beacon_data *info)
{
- int ret = 0;
struct adapter *adapter = (struct adapter *)rtw_netdev_priv(ndev);
DBG_871X(FUNC_NDEV_FMT"\n", FUNC_NDEV_ARG(ndev));
- ret = rtw_add_beacon(adapter, info->head, info->head_len, info->tail, info->tail_len);
-
- return ret;
+ return rtw_add_beacon(adapter, info->head, info->head_len, info->tail, info->tail_len);
}
static int cfg80211_rtw_stop_ap(struct wiphy *wiphy, struct net_device *ndev)
@@ -3503,7 +3491,7 @@ void rtw_wdev_free(struct wireless_dev *wdev)
wiphy_free(wdev->wiphy);
- kfree((u8 *)wdev);
+ kfree(wdev);
}
void rtw_wdev_unregister(struct wireless_dev *wdev)
diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
index d1b199e3e5bd..db6528a01229 100644
--- a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
+++ b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
@@ -2425,19 +2425,13 @@ static int rtw_drvext_hdl(struct net_device *dev, struct iw_request_info *info,
return 0;
}
-static int rtw_mp_ioctl_hdl(struct net_device *dev, struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- int ret = 0;
- return ret;
-}
-
static int rtw_get_ap_info(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
int ret = 0;
- u32 cnt = 0, wpa_ielen;
+ int wpa_ielen;
+ u32 cnt = 0;
struct list_head *plist, *phead;
unsigned char *pbuf;
u8 bssid[ETH_ALEN];
@@ -2793,7 +2787,7 @@ static int rtw_dbg_port(struct net_device *dev,
DBG_871X("oper_ch =%d\n", rtw_get_oper_ch(padapter));
DBG_871X("oper_bw =%d\n", rtw_get_oper_bw(padapter));
- DBG_871X("oper_ch_offet =%d\n", rtw_get_oper_choffset(padapter));
+ DBG_871X("oper_ch_offset =%d\n", rtw_get_oper_choffset(padapter));
break;
case 0x05:
@@ -4458,43 +4452,6 @@ static int rtw_pm_set(struct net_device *dev,
return ret;
}
-static int rtw_mp_efuse_get(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wdata, char *extra)
-{
- int err = 0;
- return err;
-}
-
-static int rtw_mp_efuse_set(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wdata, char *extra)
-{
- int err = 0;
- return err;
-}
-
-static int rtw_tdls(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- int ret = 0;
- return ret;
-}
-
-
-static int rtw_tdls_get(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- int ret = 0;
- return ret;
-}
-
-
-
-
-
static int rtw_test(
struct net_device *dev,
struct iw_request_info *info,
@@ -4744,7 +4701,7 @@ static iw_handler rtw_private_handler[] = {
rtw_wx_write32, /* 0x00 */
rtw_wx_read32, /* 0x01 */
rtw_drvext_hdl, /* 0x02 */
- rtw_mp_ioctl_hdl, /* 0x03 */
+ NULL, /* 0x03 */
/* for MM DTV platform */
rtw_get_ap_info, /* 0x04 */
@@ -4771,15 +4728,15 @@ static iw_handler rtw_private_handler[] = {
NULL, /* 0x12 */
rtw_p2p_get2, /* 0x13 */
- rtw_tdls, /* 0x14 */
- rtw_tdls_get, /* 0x15 */
+ NULL, /* 0x14 */
+ NULL, /* 0x15 */
rtw_pm_set, /* 0x16 */
rtw_wx_priv_null, /* 0x17 */
rtw_rereg_nd_name, /* 0x18 */
rtw_wx_priv_null, /* 0x19 */
- rtw_mp_efuse_set, /* 0x1A */
- rtw_mp_efuse_get, /* 0x1B */
+ NULL, /* 0x1A */
+ NULL, /* 0x1B */
NULL, /* 0x1C is reserved for hostapd */
rtw_test, /* 0x1D */
};
diff --git a/drivers/staging/rtl8723bs/os_dep/os_intfs.c b/drivers/staging/rtl8723bs/os_dep/os_intfs.c
index ec3a75485233..47e984d5b7cb 100644
--- a/drivers/staging/rtl8723bs/os_dep/os_intfs.c
+++ b/drivers/staging/rtl8723bs/os_dep/os_intfs.c
@@ -201,24 +201,6 @@ MODULE_PARM_DESC(rtw_tx_pwr_lmt_enable, "0:Disable, 1:Enable, 2: Depend on efuse
module_param(rtw_tx_pwr_by_rate, int, 0644);
MODULE_PARM_DESC(rtw_tx_pwr_by_rate, "0:Disable, 1:Enable, 2: Depend on efuse");
-char *rtw_phy_file_path = "";
-module_param(rtw_phy_file_path, charp, 0644);
-MODULE_PARM_DESC(rtw_phy_file_path, "The path of phy parameter");
-/* PHY FILE Bit Map */
-/* BIT0 - MAC, 0: non-support, 1: support */
-/* BIT1 - BB, 0: non-support, 1: support */
-/* BIT2 - BB_PG, 0: non-support, 1: support */
-/* BIT3 - BB_MP, 0: non-support, 1: support */
-/* BIT4 - RF, 0: non-support, 1: support */
-/* BIT5 - RF_TXPWR_TRACK, 0: non-support, 1: support */
-/* BIT6 - RF_TXPWR_LMT, 0: non-support, 1: support */
-static int rtw_load_phy_file = (BIT2 | BIT6);
-module_param(rtw_load_phy_file, int, 0644);
-MODULE_PARM_DESC(rtw_load_phy_file, "PHY File Bit Map");
-static int rtw_decrypt_phy_file;
-module_param(rtw_decrypt_phy_file, int, 0644);
-MODULE_PARM_DESC(rtw_decrypt_phy_file, "Enable Decrypt PHY File");
-
int _netdev_open(struct net_device *pnetdev);
int netdev_open (struct net_device *pnetdev);
static int netdev_close (struct net_device *pnetdev);
@@ -321,8 +303,6 @@ static void loadparam(struct adapter *padapter, _nic_hdl pnetdev)
registry_par->bEn_RFE = 1;
registry_par->RFE_Type = 64;
- registry_par->load_phy_file = (u8)rtw_load_phy_file;
- registry_par->RegDecryptCustomFile = (u8)rtw_decrypt_phy_file;
registry_par->qos_opt_enable = (u8)rtw_qos_opt_enable;
registry_par->hiq_filter = (u8)rtw_hiq_filter;
@@ -1141,8 +1121,7 @@ void rtw_ndev_destructor(struct net_device *ndev)
{
DBG_871X(FUNC_NDEV_FMT "\n", FUNC_NDEV_ARG(ndev));
- if (ndev->ieee80211_ptr)
- kfree((u8 *)ndev->ieee80211_ptr);
+ kfree(ndev->ieee80211_ptr);
}
void rtw_dev_unload(struct adapter *padapter)
diff --git a/drivers/staging/rtl8723bs/os_dep/osdep_service.c b/drivers/staging/rtl8723bs/os_dep/osdep_service.c
index 25a80041ce87..f5614e56371e 100644
--- a/drivers/staging/rtl8723bs/os_dep/osdep_service.c
+++ b/drivers/staging/rtl8723bs/os_dep/osdep_service.c
@@ -65,142 +65,6 @@ void _rtw_init_queue(struct __queue *pqueue)
spin_lock_init(&(pqueue->lock));
}
-/*
-* Open a file with the specific @param path, @param flag, @param mode
-* @param fpp the pointer of struct file pointer to get struct file pointer while file opening is success
-* @param path the path of the file to open
-* @param flag file operation flags, please refer to linux document
-* @param mode please refer to linux document
-* @return Linux specific error code
-*/
-static int openFile(struct file **fpp, char *path, int flag, int mode)
-{
- struct file *fp;
-
- fp = filp_open(path, flag, mode);
- if (IS_ERR(fp)) {
- *fpp = NULL;
- return PTR_ERR(fp);
- }
- else {
- *fpp = fp;
- return 0;
- }
-}
-
-/*
-* Close the file with the specific @param fp
-* @param fp the pointer of struct file to close
-* @return always 0
-*/
-static int closeFile(struct file *fp)
-{
- filp_close(fp, NULL);
- return 0;
-}
-
-static int readFile(struct file *fp, char *buf, int len)
-{
- int rlen = 0, sum = 0;
-
- if (!fp->f_op || !fp->f_op->read)
- return -EPERM;
-
- while (sum < len) {
- rlen = kernel_read(fp, buf + sum, len - sum, &fp->f_pos);
- if (rlen > 0)
- sum += rlen;
- else if (0 != rlen)
- return rlen;
- else
- break;
- }
-
- return sum;
-
-}
-
-/*
-* Test if the specifi @param path is a file and readable
-* @param path the path of the file to test
-* @return Linux specific error code
-*/
-static int isFileReadable(char *path)
-{
- struct file *fp;
- int ret = 0;
- char buf;
-
- fp = filp_open(path, O_RDONLY, 0);
- if (IS_ERR(fp))
- return PTR_ERR(fp);
-
- if (readFile(fp, &buf, 1) != 1)
- ret = -EINVAL;
-
- filp_close(fp, NULL);
- return ret;
-}
-
-/*
-* Open the file with @param path and retrive the file content into memory starting from @param buf for @param sz at most
-* @param path the path of the file to open and read
-* @param buf the starting address of the buffer to store file content
-* @param sz how many bytes to read at most
-* @return the byte we've read, or Linux specific error code
-*/
-static int retriveFromFile(char *path, u8 *buf, u32 sz)
-{
- int ret = -1;
- struct file *fp;
-
- if (path && buf) {
- ret = openFile(&fp, path, O_RDONLY, 0);
-
- if (ret == 0) {
- DBG_871X("%s openFile path:%s fp =%p\n", __func__, path , fp);
-
- ret = readFile(fp, buf, sz);
- closeFile(fp);
-
- DBG_871X("%s readFile, ret:%d\n", __func__, ret);
-
- } else {
- DBG_871X("%s openFile path:%s Fail, ret:%d\n", __func__, path, ret);
- }
- } else {
- DBG_871X("%s NULL pointer\n", __func__);
- ret = -EINVAL;
- }
- return ret;
-}
-
-/*
-* Test if the specifi @param path is a file and readable
-* @param path the path of the file to test
-* @return true or false
-*/
-int rtw_is_file_readable(char *path)
-{
- if (isFileReadable(path) == 0)
- return true;
- else
- return false;
-}
-
-/*
-* Open the file with @param path and retrive the file content into memory starting from @param buf for @param sz at most
-* @param path the path of the file to open and read
-* @param buf the starting address of the buffer to store file content
-* @param sz how many bytes to read at most
-* @return the byte we've read
-*/
-int rtw_retrive_from_file(char *path, u8 *buf, u32 sz)
-{
- int ret = retriveFromFile(path, buf, sz);
- return ret >= 0 ? ret : 0;
-}
-
struct net_device *rtw_alloc_etherdev_with_old_priv(int sizeof_priv, void *old_priv)
{
struct net_device *pnetdev;
diff --git a/drivers/staging/rtl8723bs/os_dep/sdio_intf.c b/drivers/staging/rtl8723bs/os_dep/sdio_intf.c
index d3784c44f6d0..859f4a0afb95 100644
--- a/drivers/staging/rtl8723bs/os_dep/sdio_intf.c
+++ b/drivers/staging/rtl8723bs/os_dep/sdio_intf.c
@@ -18,18 +18,13 @@
static const struct sdio_device_id sdio_ids[] =
{
{ SDIO_DEVICE(0x024c, 0x0523), },
+ { SDIO_DEVICE(0x024c, 0x0525), },
{ SDIO_DEVICE(0x024c, 0x0623), },
{ SDIO_DEVICE(0x024c, 0x0626), },
{ SDIO_DEVICE(0x024c, 0xb723), },
{ /* end: all zeroes */ },
};
-static const struct acpi_device_id acpi_ids[] = {
- {"OBDA8723", 0x0000},
- {}
-};
-
MODULE_DEVICE_TABLE(sdio, sdio_ids);
-MODULE_DEVICE_TABLE(acpi, acpi_ids);
static int rtw_drv_init(struct sdio_func *func, const struct sdio_device_id *id);
static void rtw_dev_remove(struct sdio_func *func);
@@ -281,7 +276,6 @@ static void sdio_dvobj_deinit(struct sdio_func *func)
sdio_deinit(dvobj);
devobj_deinit(dvobj);
}
- return;
}
void rtw_set_hal_ops(struct adapter *padapter)
diff --git a/drivers/staging/rts5208/ms.c b/drivers/staging/rts5208/ms.c
index e853fa9cc950..d53dd138a356 100644
--- a/drivers/staging/rts5208/ms.c
+++ b/drivers/staging/rts5208/ms.c
@@ -590,7 +590,7 @@ static int ms_identify_media_type(struct rtsx_chip *chip, int switch_8bit_bus)
int retval, i;
u8 val;
- retval = ms_set_rw_reg_addr(chip, Pro_StatusReg, 6, SystemParm, 1);
+ retval = ms_set_rw_reg_addr(chip, PRO_STATUS_REG, 6, SYSTEM_PARAM, 1);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
@@ -840,7 +840,7 @@ static int msxc_change_power(struct rtsx_chip *chip, u8 mode)
ms_cleanup_work(chip);
- retval = ms_set_rw_reg_addr(chip, 0, 0, Pro_DataCount1, 6);
+ retval = ms_set_rw_reg_addr(chip, 0, 0, PRO_DATA_COUNT1, 6);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
@@ -885,7 +885,7 @@ static int ms_read_attribute_info(struct rtsx_chip *chip)
int found_sys_info = 0, found_model_name = 0;
#endif
- retval = ms_set_rw_reg_addr(chip, Pro_IntReg, 2, Pro_SystemParm, 7);
+ retval = ms_set_rw_reg_addr(chip, PRO_INT_REG, 2, PRO_SYSTEM_PARAM, 7);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
@@ -1232,7 +1232,7 @@ static int ms_read_status_reg(struct rtsx_chip *chip)
int retval;
u8 val[2];
- retval = ms_set_rw_reg_addr(chip, StatusReg0, 2, 0, 0);
+ retval = ms_set_rw_reg_addr(chip, STATUS_REG0, 2, 0, 0);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
@@ -1255,8 +1255,8 @@ static int ms_read_extra_data(struct rtsx_chip *chip,
int retval, i;
u8 val, data[10];
- retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE,
- SystemParm, 6);
+ retval = ms_set_rw_reg_addr(chip, OVERWRITE_FLAG, MS_EXTRA_SIZE,
+ SYSTEM_PARAM, 6);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
@@ -1307,8 +1307,8 @@ static int ms_read_extra_data(struct rtsx_chip *chip,
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
- retval = ms_set_rw_reg_addr(chip, OverwriteFlag,
- MS_EXTRA_SIZE, SystemParm,
+ retval = ms_set_rw_reg_addr(chip, OVERWRITE_FLAG,
+ MS_EXTRA_SIZE, SYSTEM_PARAM,
6);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
@@ -1339,8 +1339,8 @@ static int ms_write_extra_data(struct rtsx_chip *chip, u16 block_addr,
if (!buf || (buf_len < MS_EXTRA_SIZE))
return STATUS_FAIL;
- retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE,
- SystemParm, 6 + MS_EXTRA_SIZE);
+ retval = ms_set_rw_reg_addr(chip, OVERWRITE_FLAG, MS_EXTRA_SIZE,
+ SYSTEM_PARAM, 6 + MS_EXTRA_SIZE);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
@@ -1392,8 +1392,8 @@ static int ms_read_page(struct rtsx_chip *chip, u16 block_addr, u8 page_num)
int retval;
u8 val, data[6];
- retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE,
- SystemParm, 6);
+ retval = ms_set_rw_reg_addr(chip, OVERWRITE_FLAG, MS_EXTRA_SIZE,
+ SYSTEM_PARAM, 6);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
@@ -1465,8 +1465,8 @@ static int ms_set_bad_block(struct rtsx_chip *chip, u16 phy_blk)
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
- retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE,
- SystemParm, 7);
+ retval = ms_set_rw_reg_addr(chip, OVERWRITE_FLAG, MS_EXTRA_SIZE,
+ SYSTEM_PARAM, 7);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
@@ -1519,8 +1519,8 @@ static int ms_erase_block(struct rtsx_chip *chip, u16 phy_blk)
int retval, i = 0;
u8 val, data[6];
- retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE,
- SystemParm, 6);
+ retval = ms_set_rw_reg_addr(chip, OVERWRITE_FLAG, MS_EXTRA_SIZE,
+ SYSTEM_PARAM, 6);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
@@ -1579,7 +1579,7 @@ static void ms_set_page_status(u16 log_blk, u8 type, u8 *extra, int extra_len)
memset(extra, 0xFF, MS_EXTRA_SIZE);
- if (type == setPS_NG) {
+ if (type == set_PS_NG) {
/* set page status as 1:NG,and block status keep 1:OK */
extra[0] = 0xB8;
} else {
@@ -1670,8 +1670,8 @@ static int ms_copy_page(struct rtsx_chip *chip, u16 old_blk, u16 new_blk,
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
- retval = ms_set_rw_reg_addr(chip, OverwriteFlag,
- MS_EXTRA_SIZE, SystemParm, 6);
+ retval = ms_set_rw_reg_addr(chip, OVERWRITE_FLAG,
+ MS_EXTRA_SIZE, SYSTEM_PARAM, 6);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
@@ -1725,7 +1725,7 @@ static int ms_copy_page(struct rtsx_chip *chip, u16 old_blk, u16 new_blk,
return STATUS_FAIL;
if (uncorrect_flag) {
- ms_set_page_status(log_blk, setPS_NG,
+ ms_set_page_status(log_blk, set_PS_NG,
extra,
MS_EXTRA_SIZE);
if (i == 0)
@@ -1738,8 +1738,8 @@ static int ms_copy_page(struct rtsx_chip *chip, u16 old_blk, u16 new_blk,
i, extra[0]);
MS_SET_BAD_BLOCK_FLG(ms_card);
- ms_set_page_status(log_blk, setPS_Error,
- extra,
+ ms_set_page_status(log_blk,
+ set_PS_error, extra,
MS_EXTRA_SIZE);
ms_write_extra_data(chip, new_blk, i,
extra,
@@ -1767,8 +1767,8 @@ static int ms_copy_page(struct rtsx_chip *chip, u16 old_blk, u16 new_blk,
}
}
- retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE,
- SystemParm, (6 + MS_EXTRA_SIZE));
+ retval = ms_set_rw_reg_addr(chip, OVERWRITE_FLAG, MS_EXTRA_SIZE,
+ SYSTEM_PARAM, (6 + MS_EXTRA_SIZE));
ms_set_err_code(chip, MS_NO_ERROR);
@@ -1822,8 +1822,8 @@ static int ms_copy_page(struct rtsx_chip *chip, u16 old_blk, u16 new_blk,
}
if (i == 0) {
- retval = ms_set_rw_reg_addr(chip, OverwriteFlag,
- MS_EXTRA_SIZE, SystemParm,
+ retval = ms_set_rw_reg_addr(chip, OVERWRITE_FLAG,
+ MS_EXTRA_SIZE, SYSTEM_PARAM,
7);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
@@ -1980,8 +1980,8 @@ RE_SEARCH:
for (reg_addr = BLOCK_SIZE_0; reg_addr <= PAGE_SIZE_1; reg_addr++)
rtsx_add_cmd(chip, READ_REG_CMD, reg_addr, 0, 0);
- rtsx_add_cmd(chip, READ_REG_CMD, MS_Device_Type, 0, 0);
- rtsx_add_cmd(chip, READ_REG_CMD, MS_4bit_Support, 0, 0);
+ rtsx_add_cmd(chip, READ_REG_CMD, MS_device_type, 0, 0);
+ rtsx_add_cmd(chip, READ_REG_CMD, MS_4bit_support, 0, 0);
retval = rtsx_send_cmd(chip, MS_CARD, 100);
if (retval < 0)
@@ -2057,7 +2057,7 @@ RE_SEARCH:
/* Switch I/F Mode */
if (ptr[15]) {
- retval = ms_set_rw_reg_addr(chip, 0, 0, SystemParm, 1);
+ retval = ms_set_rw_reg_addr(chip, 0, 0, SYSTEM_PARAM, 1);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
@@ -2887,7 +2887,7 @@ int mspro_format(struct scsi_cmnd *srb, struct rtsx_chip *chip,
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
- retval = ms_set_rw_reg_addr(chip, 0x00, 0x00, Pro_TPCParm, 0x01);
+ retval = ms_set_rw_reg_addr(chip, 0x00, 0x00, PRO_TPC_PARM, 0x01);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
@@ -2970,8 +2970,8 @@ static int ms_read_multiple_pages(struct rtsx_chip *chip, u16 phy_blk,
}
}
- retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE,
- SystemParm, 6);
+ retval = ms_set_rw_reg_addr(chip, OVERWRITE_FLAG, MS_EXTRA_SIZE,
+ SYSTEM_PARAM, 6);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
@@ -3026,7 +3026,7 @@ static int ms_read_multiple_pages(struct rtsx_chip *chip, u16 phy_blk,
if (!(chip->card_wp & MS_CARD)) {
reset_ms(chip);
ms_set_page_status
- (log_blk, setPS_NG,
+ (log_blk, set_PS_NG,
extra,
MS_EXTRA_SIZE);
ms_write_extra_data
@@ -3131,8 +3131,8 @@ static int ms_write_multiple_pages(struct rtsx_chip *chip, u16 old_blk,
u8 *ptr;
if (!start_page) {
- retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE,
- SystemParm, 7);
+ retval = ms_set_rw_reg_addr(chip, OVERWRITE_FLAG, MS_EXTRA_SIZE,
+ SYSTEM_PARAM, 7);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
@@ -3165,8 +3165,8 @@ static int ms_write_multiple_pages(struct rtsx_chip *chip, u16 old_blk,
return STATUS_FAIL;
}
- retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE,
- SystemParm, (6 + MS_EXTRA_SIZE));
+ retval = ms_set_rw_reg_addr(chip, OVERWRITE_FLAG, MS_EXTRA_SIZE,
+ SYSTEM_PARAM, (6 + MS_EXTRA_SIZE));
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
@@ -3773,9 +3773,9 @@ static int mg_set_tpc_para_sub(struct rtsx_chip *chip, int type,
u8 buf[6];
if (type == 0)
- retval = ms_set_rw_reg_addr(chip, 0, 0, Pro_TPCParm, 1);
+ retval = ms_set_rw_reg_addr(chip, 0, 0, PRO_TPC_PARM, 1);
else
- retval = ms_set_rw_reg_addr(chip, 0, 0, Pro_DataCount1, 6);
+ retval = ms_set_rw_reg_addr(chip, 0, 0, PRO_DATA_COUNT1, 6);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
@@ -4154,7 +4154,7 @@ int mg_set_ICV(struct scsi_cmnd *srb, struct rtsx_chip *chip)
} else {
set_sense_type(chip, lun, SENSE_TYPE_MG_WRITE_ERR);
}
- goto SetICVFinish;
+ goto set_ICV_finish;
}
#ifdef MG_SET_ICV_SLOW
@@ -4195,7 +4195,7 @@ int mg_set_ICV(struct scsi_cmnd *srb, struct rtsx_chip *chip)
SENSE_TYPE_MG_WRITE_ERR);
}
retval = STATUS_FAIL;
- goto SetICVFinish;
+ goto set_ICV_finish;
}
}
#else
@@ -4214,11 +4214,11 @@ int mg_set_ICV(struct scsi_cmnd *srb, struct rtsx_chip *chip)
} else {
set_sense_type(chip, lun, SENSE_TYPE_MG_WRITE_ERR);
}
- goto SetICVFinish;
+ goto set_ICV_finish;
}
#endif
-SetICVFinish:
+set_ICV_finish:
kfree(buf);
return retval;
}
diff --git a/drivers/staging/rts5208/ms.h b/drivers/staging/rts5208/ms.h
index 952cc14dd079..33bda9ce36b6 100644
--- a/drivers/staging/rts5208/ms.h
+++ b/drivers/staging/rts5208/ms.h
@@ -92,37 +92,37 @@
#define PRO_FORMAT 0x10
#define PRO_SLEEP 0x11
-#define IntReg 0x01
-#define StatusReg0 0x02
-#define StatusReg1 0x03
-
-#define SystemParm 0x10
-#define BlockAdrs 0x11
-#define CMDParm 0x14
-#define PageAdrs 0x15
-
-#define OverwriteFlag 0x16
-#define ManagemenFlag 0x17
-#define LogicalAdrs 0x18
-#define ReserveArea 0x1A
-
-#define Pro_IntReg 0x01
-#define Pro_StatusReg 0x02
-#define Pro_TypeReg 0x04
-#define Pro_IFModeReg 0x05
-#define Pro_CatagoryReg 0x06
-#define Pro_ClassReg 0x07
-
-#define Pro_SystemParm 0x10
-#define Pro_DataCount1 0x11
-#define Pro_DataCount0 0x12
-#define Pro_DataAddr3 0x13
-#define Pro_DataAddr2 0x14
-#define Pro_DataAddr1 0x15
-#define Pro_DataAddr0 0x16
-
-#define Pro_TPCParm 0x17
-#define Pro_CMDParm 0x18
+#define INT_REG 0x01
+#define STATUS_REG0 0x02
+#define STATUS_REG1 0x03
+
+#define SYSTEM_PARAM 0x10
+#define BLOCK_ADRS 0x11
+#define CMD_PARM 0x14
+#define PAGE_ADRS 0x15
+
+#define OVERWRITE_FLAG 0x16
+#define MANAGEMEN_FLAG 0x17
+#define LOGICAL_ADRS 0x18
+#define RESERVE_AREA 0x1A
+
+#define PRO_INT_REG 0x01
+#define PRO_STATUS_REG 0x02
+#define PRO_TYPE_REG 0x04
+#define PRO_IF_mode_REG 0x05
+#define PRO_CATEGORY_REG 0x06
+#define PRO_CLASS_REG 0x07
+
+#define PRO_SYSTEM_PARAM 0x10
+#define PRO_DATA_COUNT1 0x11
+#define PRO_DATA_COUNT0 0x12
+#define PRO_DATA_ADDR3 0x13
+#define PRO_DATA_ADDR2 0x14
+#define PRO_DATA_ADDR1 0x15
+#define PRO_DATA_ADDR0 0x16
+
+#define PRO_TPC_PARM 0x17
+#define PRO_CMD_PARM 0x18
#define INT_REG_CED 0x80
#define INT_REG_ERR 0x40
@@ -152,12 +152,12 @@
#define PAGE_SIZE_0 (PPBUF_BASE2 + 0x1a0 + 8)
#define PAGE_SIZE_1 (PPBUF_BASE2 + 0x1a0 + 9)
-#define MS_Device_Type (PPBUF_BASE2 + 0x1D8)
+#define MS_device_type (PPBUF_BASE2 + 0x1D8)
-#define MS_4bit_Support (PPBUF_BASE2 + 0x1D3)
+#define MS_4bit_support (PPBUF_BASE2 + 0x1D3)
-#define setPS_NG 1
-#define setPS_Error 0
+#define set_PS_NG 1
+#define set_PS_error 0
#define PARALLEL_8BIT_IF 0x40
#define PARALLEL_4BIT_IF 0x00
diff --git a/drivers/staging/rts5208/rtsx.c b/drivers/staging/rts5208/rtsx.c
index fa597953e9a0..cb95ad6fa4f9 100644
--- a/drivers/staging/rts5208/rtsx.c
+++ b/drivers/staging/rts5208/rtsx.c
@@ -873,7 +873,8 @@ static int rtsx_probe(struct pci_dev *pci,
(unsigned long)(dev->addr), (unsigned long)(dev->remap_addr));
dev->rtsx_resv_buf = dmam_alloc_coherent(&pci->dev, RTSX_RESV_BUF_LEN,
- &dev->rtsx_resv_buf_addr, GFP_KERNEL);
+ &dev->rtsx_resv_buf_addr,
+ GFP_KERNEL);
if (!dev->rtsx_resv_buf) {
dev_err(&pci->dev, "alloc dma buffer fail\n");
err = -ENXIO;
diff --git a/drivers/staging/rts5208/rtsx_transport.c b/drivers/staging/rts5208/rtsx_transport.c
index 561851cc8780..5f1eefe80f1e 100644
--- a/drivers/staging/rts5208/rtsx_transport.c
+++ b/drivers/staging/rts5208/rtsx_transport.c
@@ -677,8 +677,8 @@ static int rtsx_transfer_buf(struct rtsx_chip *chip, u8 card, void *buf,
spin_unlock_irq(&rtsx->reg_lock);
/* Wait for TRANS_OK_INT */
- timeleft = wait_for_completion_interruptible_timeout(
- &trans_done, msecs_to_jiffies(timeout));
+ timeleft = wait_for_completion_interruptible_timeout(&trans_done,
+ msecs_to_jiffies(timeout));
if (timeleft <= 0) {
dev_dbg(rtsx_dev(chip), "Timeout (%s %d)\n",
__func__, __LINE__);
diff --git a/drivers/staging/rts5208/sd.h b/drivers/staging/rts5208/sd.h
index dc9e8cad7a74..f4ff62653b56 100644
--- a/drivers/staging/rts5208/sd.h
+++ b/drivers/staging/rts5208/sd.h
@@ -232,7 +232,7 @@
#define DCM_LOW_FREQUENCY_MODE 0x01
#define DCM_HIGH_FREQUENCY_MODE_SET 0x0C
-#define DCM_Low_FREQUENCY_MODE_SET 0x00
+#define DCM_LOW_FREQUENCY_MODE_SET 0x00
#define MULTIPLY_BY_1 0x00
#define MULTIPLY_BY_2 0x01
diff --git a/drivers/staging/rts5208/xd.c b/drivers/staging/rts5208/xd.c
index f3dc96a4c59d..0f369935fb6c 100644
--- a/drivers/staging/rts5208/xd.c
+++ b/drivers/staging/rts5208/xd.c
@@ -630,13 +630,13 @@ static int reset_xd(struct rtsx_chip *chip)
xd_card->zone_cnt = 32;
xd_card->capacity = 1024000;
break;
- case xD_1G_X8_512:
+ case XD_1G_X8_512:
XD_PAGE_512(xd_card);
xd_card->addr_cycle = 4;
xd_card->zone_cnt = 64;
xd_card->capacity = 2048000;
break;
- case xD_2G_X8_512:
+ case XD_2G_X8_512:
XD_PAGE_512(xd_card);
xd_card->addr_cycle = 4;
xd_card->zone_cnt = 128;
@@ -669,10 +669,10 @@ static int reset_xd(struct rtsx_chip *chip)
return STATUS_FAIL;
}
- retval = xd_read_id(chip, READ_xD_ID, id_buf, 4);
+ retval = xd_read_id(chip, READ_XD_ID, id_buf, 4);
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
- dev_dbg(rtsx_dev(chip), "READ_xD_ID: 0x%x 0x%x 0x%x 0x%x\n",
+ dev_dbg(rtsx_dev(chip), "READ_XD_ID: 0x%x 0x%x 0x%x 0x%x\n",
id_buf[0], id_buf[1], id_buf[2], id_buf[3]);
if (id_buf[2] != XD_ID_CODE)
return STATUS_FAIL;
diff --git a/drivers/staging/rts5208/xd.h b/drivers/staging/rts5208/xd.h
index 57b94129b26f..98c00f268e56 100644
--- a/drivers/staging/rts5208/xd.h
+++ b/drivers/staging/rts5208/xd.h
@@ -36,7 +36,7 @@
#define BLK_ERASE_1 0x60
#define BLK_ERASE_2 0xD0
#define READ_STS 0x70
-#define READ_xD_ID 0x9A
+#define READ_XD_ID 0x9A
#define COPY_BACK_512 0x8A
#define COPY_BACK_2K 0x85
#define READ1_1_2 0x30
@@ -72,8 +72,8 @@
#define XD_128M_X16_2048 0xC1
#define XD_4M_X8_512_1 0xE3
#define XD_4M_X8_512_2 0xE5
-#define xD_1G_X8_512 0xD3
-#define xD_2G_X8_512 0xD5
+#define XD_1G_X8_512 0xD3
+#define XD_2G_X8_512 0xD5
#define XD_ID_CODE 0xB5
diff --git a/drivers/staging/sm750fb/ddk750_chip.c b/drivers/staging/sm750fb/ddk750_chip.c
index 5a317cc98a4b..02860d3ec365 100644
--- a/drivers/staging/sm750fb/ddk750_chip.c
+++ b/drivers/staging/sm750fb/ddk750_chip.c
@@ -56,7 +56,6 @@ static unsigned int get_mxclk_freq(void)
static void set_chip_clock(unsigned int frequency)
{
struct pll_value pll;
- unsigned int actual_mx_clk;
/* Cheok_0509: For SM750LE, the chip clock is fixed. Nothing to set. */
if (sm750_get_chip_type() == SM750LE)
@@ -66,8 +65,8 @@ static void set_chip_clock(unsigned int frequency)
/*
* Set up PLL structure to hold the value to be set in clocks.
*/
- pll.inputFreq = DEFAULT_INPUT_CLOCK; /* Defined in CLOCK.H */
- pll.clockType = MXCLK_PLL;
+ pll.input_freq = DEFAULT_INPUT_CLOCK; /* Defined in CLOCK.H */
+ pll.clock_type = MXCLK_PLL;
/*
* Call sm750_calc_pll_value() to fill the other fields
@@ -76,7 +75,7 @@ static void set_chip_clock(unsigned int frequency)
* Return value of sm750_calc_pll_value gives the actual
* possible clock.
*/
- actual_mx_clk = sm750_calc_pll_value(frequency, &pll);
+ sm750_calc_pll_value(frequency, &pll);
/* Master Clock Control: MXCLK_PLL */
poke32(MXCLK_PLL_CTRL, sm750_format_pll_reg(&pll));
@@ -211,13 +210,13 @@ unsigned int ddk750_get_vm_size(void)
return data;
}
-int ddk750_init_hw(struct initchip_param *pInitParam)
+int ddk750_init_hw(struct initchip_param *p_init_param)
{
unsigned int reg;
- if (pInitParam->powerMode != 0)
- pInitParam->powerMode = 0;
- sm750_set_power_mode(pInitParam->powerMode);
+ if (p_init_param->power_mode != 0)
+ p_init_param->power_mode = 0;
+ sm750_set_power_mode(p_init_param->power_mode);
/* Enable display power gate & LOCALMEM power gate*/
reg = peek32(CURRENT_GATE);
@@ -238,13 +237,13 @@ int ddk750_init_hw(struct initchip_param *pInitParam)
}
/* Set the Main Chip Clock */
- set_chip_clock(MHz((unsigned int)pInitParam->chipClock));
+ set_chip_clock(MHz((unsigned int)p_init_param->chip_clock));
/* Set up memory clock. */
- set_memory_clock(MHz(pInitParam->memClock));
+ set_memory_clock(MHz(p_init_param->mem_clock));
/* Set up master clock */
- set_master_clock(MHz(pInitParam->masterClock));
+ set_master_clock(MHz(p_init_param->master_clock));
/*
* Reset the memory controller.
@@ -252,7 +251,7 @@ int ddk750_init_hw(struct initchip_param *pInitParam)
* the system might hang when sw accesses the memory.
* The memory should be resetted after changing the MXCLK.
*/
- if (pInitParam->resetMemory == 1) {
+ if (p_init_param->reset_memory == 1) {
reg = peek32(MISC_CTRL);
reg &= ~MISC_CTRL_LOCALMEM_RESET;
poke32(MISC_CTRL, reg);
@@ -261,7 +260,7 @@ int ddk750_init_hw(struct initchip_param *pInitParam)
poke32(MISC_CTRL, reg);
}
- if (pInitParam->setAllEngOff == 1) {
+ if (p_init_param->set_all_eng_off == 1) {
sm750_enable_2d_engine(0);
/* Disable Overlay, if a former application left it on */
@@ -337,13 +336,13 @@ unsigned int sm750_calc_pll_value(unsigned int request_orig,
ret = 0;
mini_diff = ~0;
request = request_orig / 1000;
- input = pll->inputFreq / 1000;
+ input = pll->input_freq / 1000;
/*
* for MXCLK register,
* no POD provided, so need be treated differently
*/
- if (pll->clockType == MXCLK_PLL)
+ if (pll->clock_type == MXCLK_PLL)
max_d = 3;
for (N = 15; N > 1; N--) {
@@ -365,7 +364,7 @@ unsigned int sm750_calc_pll_value(unsigned int request_orig,
if (M < 256 && M > 0) {
unsigned int diff;
- tmp_clock = pll->inputFreq * M / N / X;
+ tmp_clock = pll->input_freq * M / N / X;
diff = abs(tmp_clock - request_orig);
if (diff < mini_diff) {
pll->M = M;
@@ -383,14 +382,14 @@ unsigned int sm750_calc_pll_value(unsigned int request_orig,
return ret;
}
-unsigned int sm750_format_pll_reg(struct pll_value *pPLL)
+unsigned int sm750_format_pll_reg(struct pll_value *p_PLL)
{
#ifndef VALIDATION_CHIP
- unsigned int POD = pPLL->POD;
+ unsigned int POD = p_PLL->POD;
#endif
- unsigned int OD = pPLL->OD;
- unsigned int M = pPLL->M;
- unsigned int N = pPLL->N;
+ unsigned int OD = p_PLL->OD;
+ unsigned int M = p_PLL->M;
+ unsigned int N = p_PLL->N;
/*
* Note that all PLL's have the same format. Here, we just use
diff --git a/drivers/staging/sm750fb/ddk750_chip.h b/drivers/staging/sm750fb/ddk750_chip.h
index 3e92b3297160..ee2e9d90f7dd 100644
--- a/drivers/staging/sm750fb/ddk750_chip.h
+++ b/drivers/staging/sm750fb/ddk750_chip.h
@@ -40,8 +40,8 @@ enum clock_type {
};
struct pll_value {
- enum clock_type clockType;
- unsigned long inputFreq; /* Input clock frequency to the PLL */
+ enum clock_type clock_type;
+ unsigned long input_freq; /* Input clock frequency to the PLL */
/* Use this when clockType = PANEL_PLL */
unsigned long M;
@@ -53,41 +53,41 @@ struct pll_value {
/* input struct to initChipParam() function */
struct initchip_param {
/* Use power mode 0 or 1 */
- unsigned short powerMode;
+ unsigned short power_mode;
/*
* Speed of main chip clock in MHz unit
* 0 = keep the current clock setting
* Others = the new main chip clock
*/
- unsigned short chipClock;
+ unsigned short chip_clock;
/*
* Speed of memory clock in MHz unit
* 0 = keep the current clock setting
* Others = the new memory clock
*/
- unsigned short memClock;
+ unsigned short mem_clock;
/*
* Speed of master clock in MHz unit
* 0 = keep the current clock setting
* Others = the new master clock
*/
- unsigned short masterClock;
+ unsigned short master_clock;
/*
* 0 = leave all engine state untouched.
* 1 = make sure they are off: 2D, Overlay,
* video alpha, alpha, hardware cursors
*/
- unsigned short setAllEngOff;
+ unsigned short set_all_eng_off;
/*
* 0 = Do not reset the memory controller
* 1 = Reset the memory controller
*/
- unsigned char resetMemory;
+ unsigned char reset_memory;
/* More initialization parameter can be added if needed */
};
@@ -95,7 +95,7 @@ struct initchip_param {
enum logical_chip_type sm750_get_chip_type(void);
void sm750_set_chip_type(unsigned short dev_id, u8 rev_id);
unsigned int sm750_calc_pll_value(unsigned int request, struct pll_value *pll);
-unsigned int sm750_format_pll_reg(struct pll_value *pPLL);
+unsigned int sm750_format_pll_reg(struct pll_value *p_PLL);
unsigned int ddk750_get_vm_size(void);
int ddk750_init_hw(struct initchip_param *pinit_param);
diff --git a/drivers/staging/sm750fb/ddk750_display.c b/drivers/staging/sm750fb/ddk750_display.c
index 887ea8aef43f..172624ff98b0 100644
--- a/drivers/staging/sm750fb/ddk750_display.c
+++ b/drivers/staging/sm750fb/ddk750_display.c
@@ -147,8 +147,8 @@ void ddk750_set_logical_disp_out(enum disp_output output)
if (output & PNL_SEQ_USAGE) {
/* set panel sequence */
- sw_panel_power_sequence((output & PNL_SEQ_MASK) >> PNL_SEQ_OFFSET,
- 4);
+ sw_panel_power_sequence((output & PNL_SEQ_MASK) >>
+ PNL_SEQ_OFFSET, 4);
}
if (output & DAC_USAGE)
diff --git a/drivers/staging/sm750fb/ddk750_mode.c b/drivers/staging/sm750fb/ddk750_mode.c
index 4dac691ad1b1..e00a6cb31947 100644
--- a/drivers/staging/sm750fb/ddk750_mode.c
+++ b/drivers/staging/sm750fb/ddk750_mode.c
@@ -13,8 +13,9 @@
* HW only supports 7 predefined pixel clocks, and clock select is
* in bit 29:27 of Display Control register.
*/
-static unsigned long displayControlAdjust_SM750LE(struct mode_parameter *pModeParam,
- unsigned long dispControl)
+static unsigned long
+displayControlAdjust_SM750LE(struct mode_parameter *pModeParam,
+ unsigned long dispControl)
{
unsigned long x, y;
@@ -81,7 +82,7 @@ static int programModeRegisters(struct mode_parameter *pModeParam,
int cnt = 0;
unsigned int tmp, reg;
- if (pll->clockType == SECONDARY_PLL) {
+ if (pll->clock_type == SECONDARY_PLL) {
/* programe secondary pixel clock */
poke32(CRT_PLL_CTRL, sm750_format_pll_reg(pll));
@@ -134,7 +135,7 @@ static int programModeRegisters(struct mode_parameter *pModeParam,
poke32(CRT_DISPLAY_CTRL, tmp | reg);
}
- } else if (pll->clockType == PRIMARY_PLL) {
+ } else if (pll->clock_type == PRIMARY_PLL) {
unsigned int reserved;
poke32(PANEL_PLL_CTRL, sm750_format_pll_reg(pll));
@@ -209,12 +210,11 @@ static int programModeRegisters(struct mode_parameter *pModeParam,
int ddk750_setModeTiming(struct mode_parameter *parm, enum clock_type clock)
{
struct pll_value pll;
- unsigned int uiActualPixelClk;
- pll.inputFreq = DEFAULT_INPUT_CLOCK;
- pll.clockType = clock;
+ pll.input_freq = DEFAULT_INPUT_CLOCK;
+ pll.clock_type = clock;
- uiActualPixelClk = sm750_calc_pll_value(parm->pixel_clock, &pll);
+ sm750_calc_pll_value(parm->pixel_clock, &pll);
if (sm750_get_chip_type() == SM750LE) {
/* set graphic mode via IO method */
outb_p(0x88, 0x3d4);
diff --git a/drivers/staging/sm750fb/ddk750_sii164.c b/drivers/staging/sm750fb/ddk750_sii164.c
index c8e856c13912..73e0e9f41ec5 100644
--- a/drivers/staging/sm750fb/ddk750_sii164.c
+++ b/drivers/staging/sm750fb/ddk750_sii164.c
@@ -39,8 +39,10 @@ unsigned short sii164GetVendorID(void)
{
unsigned short vendorID;
- vendorID = ((unsigned short)i2cReadReg(SII164_I2C_ADDRESS, SII164_VENDOR_ID_HIGH) << 8) |
- (unsigned short)i2cReadReg(SII164_I2C_ADDRESS, SII164_VENDOR_ID_LOW);
+ vendorID = ((unsigned short)i2cReadReg(SII164_I2C_ADDRESS,
+ SII164_VENDOR_ID_HIGH) << 8) |
+ (unsigned short)i2cReadReg(SII164_I2C_ADDRESS,
+ SII164_VENDOR_ID_LOW);
return vendorID;
}
@@ -56,13 +58,18 @@ unsigned short sii164GetDeviceID(void)
{
unsigned short deviceID;
- deviceID = ((unsigned short)i2cReadReg(SII164_I2C_ADDRESS, SII164_DEVICE_ID_HIGH) << 8) |
- (unsigned short)i2cReadReg(SII164_I2C_ADDRESS, SII164_DEVICE_ID_LOW);
+ deviceID = ((unsigned short)i2cReadReg(SII164_I2C_ADDRESS,
+ SII164_DEVICE_ID_HIGH) << 8) |
+ (unsigned short)i2cReadReg(SII164_I2C_ADDRESS,
+ SII164_DEVICE_ID_LOW);
return deviceID;
}
-/* DVI.C will handle all SiI164 chip stuffs and try it best to make code minimal and useful */
+/*
+ * DVI.C will handle all SiI164 chip stuffs and try its best to make code
+ * minimal and useful
+ */
/*
* sii164InitChip
@@ -133,7 +140,8 @@ long sii164InitChip(unsigned char edge_select,
#endif
/* Check if SII164 Chip exists */
- if ((sii164GetVendorID() == SII164_VENDOR_ID) && (sii164GetDeviceID() == SII164_DEVICE_ID)) {
+ if ((sii164GetVendorID() == SII164_VENDOR_ID) &&
+ (sii164GetDeviceID() == SII164_DEVICE_ID)) {
/*
* Initialize SII164 controller chip.
*/
@@ -254,7 +262,9 @@ void sii164ResetChip(void)
/*
* sii164GetChipString
- * This function returns a char string name of the current DVI Controller chip.
+ * This function returns a char string name of the current DVI Controller
+ * chip.
+ *
* It's convenient for application need to display the chip name.
*/
char *sii164GetChipString(void)
@@ -330,8 +340,8 @@ void sii164EnableHotPlugDetection(unsigned char enableHotPlug)
detectReg = i2cReadReg(SII164_I2C_ADDRESS, SII164_DETECT);
- /* Depending on each DVI controller, need to enable the hot plug based on each
- * individual chip design.
+ /* Depending on each DVI controller, need to enable the hot plug based
+ * on each individual chip design.
*/
if (enableHotPlug != 0)
sii164SelectHotPlugDetectionMode(SII164_HOTPLUG_USE_MDI);
diff --git a/drivers/staging/sm750fb/ddk750_sii164.h b/drivers/staging/sm750fb/ddk750_sii164.h
index 862e7bf27353..d940cb729066 100644
--- a/drivers/staging/sm750fb/ddk750_sii164.h
+++ b/drivers/staging/sm750fb/ddk750_sii164.h
@@ -6,10 +6,13 @@
/* Hot Plug detection mode structure */
enum sii164_hot_plug_mode {
- SII164_HOTPLUG_DISABLE = 0, /* Disable Hot Plug output bit (always high). */
- SII164_HOTPLUG_USE_MDI, /* Use Monitor Detect Interrupt bit. */
- SII164_HOTPLUG_USE_RSEN, /* Use Receiver Sense detect bit. */
- SII164_HOTPLUG_USE_HTPLG /* Use Hot Plug detect bit. */
+ SII164_HOTPLUG_DISABLE = 0, /* Disable Hot Plug output bit
+ * (always high).
+ */
+
+ SII164_HOTPLUG_USE_MDI, /* Use Monitor Detect Interrupt bit. */
+ SII164_HOTPLUG_USE_RSEN, /* Use Receiver Sense detect bit. */
+ SII164_HOTPLUG_USE_HTPLG /* Use Hot Plug detect bit. */
};
/* Silicon Image SiI164 chip prototype */
diff --git a/drivers/staging/sm750fb/sm750_accel.c b/drivers/staging/sm750fb/sm750_accel.c
index dbcbbd1055da..8faa601c700b 100644
--- a/drivers/staging/sm750fb/sm750_accel.c
+++ b/drivers/staging/sm750fb/sm750_accel.c
@@ -130,20 +130,28 @@ int sm750_hw_fillrect(struct lynx_accel *accel,
return 0;
}
-int sm750_hw_copyarea(
-struct lynx_accel *accel,
-unsigned int sBase, /* Address of source: offset in frame buffer */
-unsigned int sPitch, /* Pitch value of source surface in BYTE */
-unsigned int sx,
-unsigned int sy, /* Starting coordinate of source surface */
-unsigned int dBase, /* Address of destination: offset in frame buffer */
-unsigned int dPitch, /* Pitch value of destination surface in BYTE */
-unsigned int Bpp, /* Color depth of destination surface */
-unsigned int dx,
-unsigned int dy, /* Starting coordinate of destination surface */
-unsigned int width,
-unsigned int height, /* width and height of rectangle in pixel value */
-unsigned int rop2) /* ROP value */
+/**
+ * sm750_hm_copyarea
+ * @sBase: Address of source: offset in frame buffer
+ * @sPitch: Pitch value of source surface in BYTE
+ * @sx: Starting x coordinate of source surface
+ * @sy: Starting y coordinate of source surface
+ * @dBase: Address of destination: offset in frame buffer
+ * @dPitch: Pitch value of destination surface in BYTE
+ * @Bpp: Color depth of destination surface
+ * @dx: Starting x coordinate of destination surface
+ * @dy: Starting y coordinate of destination surface
+ * @width: width of rectangle in pixel value
+ * @height: height of rectangle in pixel value
+ * @rop2: ROP value
+ */
+int sm750_hw_copyarea(struct lynx_accel *accel,
+ unsigned int sBase, unsigned int sPitch,
+ unsigned int sx, unsigned int sy,
+ unsigned int dBase, unsigned int dPitch,
+ unsigned int Bpp, unsigned int dx, unsigned int dy,
+ unsigned int width, unsigned int height,
+ unsigned int rop2)
{
unsigned int nDirection, de_ctrl;
@@ -216,7 +224,7 @@ unsigned int rop2) /* ROP value */
/*
* Note:
- * DE_FOREGROUND are DE_BACKGROUND are don't care.
+ * DE_FOREGROUND and DE_BACKGROUND are don't care.
* DE_COLOR_COMPARE and DE_COLOR_COMPARE_MAKS
* are set by set deSetTransparency().
*/
@@ -235,21 +243,21 @@ unsigned int rop2) /* ROP value */
*/
write_dpr(accel, DE_WINDOW_DESTINATION_BASE, dBase); /* dpr44 */
- /*
- * Program pitch (distance between the 1st points of two adjacent lines).
- * Note that input pitch is BYTE value, but the 2D Pitch register uses
- * pixel values. Need Byte to pixel conversion.
- */
+ /*
+ * Program pitch (distance between the 1st points of two adjacent lines).
+ * Note that input pitch is BYTE value, but the 2D Pitch register uses
+ * pixel values. Need Byte to pixel conversion.
+ */
write_dpr(accel, DE_PITCH,
((dPitch / Bpp << DE_PITCH_DESTINATION_SHIFT) &
DE_PITCH_DESTINATION_MASK) |
(sPitch / Bpp & DE_PITCH_SOURCE_MASK)); /* dpr10 */
- /*
- * Screen Window width in Pixels.
- * 2D engine uses this value to calculate the linear address in frame buffer
- * for a given point.
- */
+ /*
+ * Screen Window width in Pixels.
+ * 2D engine uses this value to calculate the linear address in frame buffer
+ * for a given point.
+ */
write_dpr(accel, DE_WINDOW_WIDTH,
((dPitch / Bpp << DE_WINDOW_WIDTH_DST_SHIFT) &
DE_WINDOW_WIDTH_DST_MASK) |
@@ -288,20 +296,28 @@ static unsigned int deGetTransparency(struct lynx_accel *accel)
return de_ctrl;
}
-int sm750_hw_imageblit(struct lynx_accel *accel,
- const char *pSrcbuf, /* pointer to start of source buffer in system memory */
- u32 srcDelta, /* Pitch value (in bytes) of the source buffer, +ive means top down and -ive mean button up */
- u32 startBit, /* Mono data can start at any bit in a byte, this value should be 0 to 7 */
- u32 dBase, /* Address of destination: offset in frame buffer */
- u32 dPitch, /* Pitch value of destination surface in BYTE */
- u32 bytePerPixel, /* Color depth of destination surface */
- u32 dx,
- u32 dy, /* Starting coordinate of destination surface */
- u32 width,
- u32 height, /* width and height of rectangle in pixel value */
- u32 fColor, /* Foreground color (corresponding to a 1 in the monochrome data */
- u32 bColor, /* Background color (corresponding to a 0 in the monochrome data */
- u32 rop2) /* ROP value */
+/**
+ * sm750_hw_imageblit
+ * @pSrcbuf: pointer to start of source buffer in system memory
+ * @srcDelta: Pitch value (in bytes) of the source buffer, +ive means top down
+ * and -ive mean button up
+ * @startBit: Mono data can start at any bit in a byte, this value should be
+ * 0 to 7
+ * @dBase: Address of destination: offset in frame buffer
+ * @dPitch: Pitch value of destination surface in BYTE
+ * @bytePerPixel: Color depth of destination surface
+ * @dx: Starting x coordinate of destination surface
+ * @dy: Starting y coordinate of destination surface
+ * @width: width of rectangle in pixel value
+ * @height: height of rectangle in pixel value
+ * @fColor: Foreground color (corresponding to a 1 in the monochrome data
+ * @bColor: Background color (corresponding to a 0 in the monochrome data
+ * @rop2: ROP value
+ */
+int sm750_hw_imageblit(struct lynx_accel *accel, const char *pSrcbuf,
+ u32 srcDelta, u32 startBit, u32 dBase, u32 dPitch,
+ u32 bytePerPixel, u32 dx, u32 dy, u32 width,
+ u32 height, u32 fColor, u32 bColor, u32 rop2)
{
unsigned int ulBytesPerScan;
unsigned int ul4BytesPerScan;
diff --git a/drivers/staging/sm750fb/sm750_accel.h b/drivers/staging/sm750fb/sm750_accel.h
index c4f42002a50f..2c79cb730a0a 100644
--- a/drivers/staging/sm750fb/sm750_accel.h
+++ b/drivers/staging/sm750fb/sm750_accel.h
@@ -190,37 +190,54 @@ void sm750_hw_set2dformat(struct lynx_accel *accel, int fmt);
void sm750_hw_de_init(struct lynx_accel *accel);
int sm750_hw_fillrect(struct lynx_accel *accel,
- u32 base, u32 pitch, u32 Bpp,
- u32 x, u32 y, u32 width, u32 height,
- u32 color, u32 rop);
-
-int sm750_hw_copyarea(
-struct lynx_accel *accel,
-unsigned int sBase, /* Address of source: offset in frame buffer */
-unsigned int sPitch, /* Pitch value of source surface in BYTE */
-unsigned int sx,
-unsigned int sy, /* Starting coordinate of source surface */
-unsigned int dBase, /* Address of destination: offset in frame buffer */
-unsigned int dPitch, /* Pitch value of destination surface in BYTE */
-unsigned int bpp, /* Color depth of destination surface */
-unsigned int dx,
-unsigned int dy, /* Starting coordinate of destination surface */
-unsigned int width,
-unsigned int height, /* width and height of rectangle in pixel value */
-unsigned int rop2);
-
-int sm750_hw_imageblit(struct lynx_accel *accel,
- const char *pSrcbuf, /* pointer to start of source buffer in system memory */
- u32 srcDelta, /* Pitch value (in bytes) of the source buffer, +ive means top down and -ive mean button up */
- u32 startBit, /* Mono data can start at any bit in a byte, this value should be 0 to 7 */
- u32 dBase, /* Address of destination: offset in frame buffer */
- u32 dPitch, /* Pitch value of destination surface in BYTE */
- u32 bytePerPixel, /* Color depth of destination surface */
- u32 dx,
- u32 dy, /* Starting coordinate of destination surface */
- u32 width,
- u32 height, /* width and height of rectangle in pixel value */
- u32 fColor, /* Foreground color (corresponding to a 1 in the monochrome data */
- u32 bColor, /* Background color (corresponding to a 0 in the monochrome data */
- u32 rop2);
+ u32 base, u32 pitch, u32 Bpp,
+ u32 x, u32 y, u32 width, u32 height,
+ u32 color, u32 rop);
+
+/**
+ * sm750_hm_copyarea
+ * @sBase: Address of source: offset in frame buffer
+ * @sPitch: Pitch value of source surface in BYTE
+ * @sx: Starting x coordinate of source surface
+ * @sy: Starting y coordinate of source surface
+ * @dBase: Address of destination: offset in frame buffer
+ * @dPitch: Pitch value of destination surface in BYTE
+ * @Bpp: Color depth of destination surface
+ * @dx: Starting x coordinate of destination surface
+ * @dy: Starting y coordinate of destination surface
+ * @width: width of rectangle in pixel value
+ * @height: height of rectangle in pixel value
+ * @rop2: ROP value
+ */
+int sm750_hw_copyarea(struct lynx_accel *accel,
+ unsigned int sBase, unsigned int sPitch,
+ unsigned int sx, unsigned int sy,
+ unsigned int dBase, unsigned int dPitch,
+ unsigned int Bpp, unsigned int dx, unsigned int dy,
+ unsigned int width, unsigned int height,
+ unsigned int rop2);
+
+/**
+ * sm750_hw_imageblit
+ * @pSrcbuf: pointer to start of source buffer in system memory
+ * @srcDelta: Pitch value (in bytes) of the source buffer, +ive means top down
+ *>----- and -ive mean button up
+ * @startBit: Mono data can start at any bit in a byte, this value should be
+ *>----- 0 to 7
+ * @dBase: Address of destination: offset in frame buffer
+ * @dPitch: Pitch value of destination surface in BYTE
+ * @bytePerPixel: Color depth of destination surface
+ * @dx: Starting x coordinate of destination surface
+ * @dy: Starting y coordinate of destination surface
+ * @width: width of rectangle in pixel value
+ * @height: height of rectangle in pixel value
+ * @fColor: Foreground color (corresponding to a 1 in the monochrome data
+ * @bColor: Background color (corresponding to a 0 in the monochrome data
+ * @rop2: ROP value
+ */
+int sm750_hw_imageblit(struct lynx_accel *accel, const char *pSrcbuf,
+ u32 srcDelta, u32 startBit, u32 dBase, u32 dPitch,
+ u32 bytePerPixel, u32 dx, u32 dy, u32 width,
+ u32 height, u32 fColor, u32 bColor, u32 rop2);
+
#endif
diff --git a/drivers/staging/sm750fb/sm750_cursor.h b/drivers/staging/sm750fb/sm750_cursor.h
index 16ac07eb58d6..b59643dd61ed 100644
--- a/drivers/staging/sm750fb/sm750_cursor.h
+++ b/drivers/staging/sm750fb/sm750_cursor.h
@@ -5,14 +5,11 @@
/* hw_cursor_xxx works for voyager,718 and 750 */
void sm750_hw_cursor_enable(struct lynx_cursor *cursor);
void sm750_hw_cursor_disable(struct lynx_cursor *cursor);
-void sm750_hw_cursor_setSize(struct lynx_cursor *cursor,
- int w, int h);
-void sm750_hw_cursor_setPos(struct lynx_cursor *cursor,
- int x, int y);
-void sm750_hw_cursor_setColor(struct lynx_cursor *cursor,
- u32 fg, u32 bg);
-void sm750_hw_cursor_setData(struct lynx_cursor *cursor,
- u16 rop, const u8 *data, const u8 *mask);
-void sm750_hw_cursor_setData2(struct lynx_cursor *cursor,
- u16 rop, const u8 *data, const u8 *mask);
+void sm750_hw_cursor_setSize(struct lynx_cursor *cursor, int w, int h);
+void sm750_hw_cursor_setPos(struct lynx_cursor *cursor, int x, int y);
+void sm750_hw_cursor_setColor(struct lynx_cursor *cursor, u32 fg, u32 bg);
+void sm750_hw_cursor_setData(struct lynx_cursor *cursor, u16 rop,
+ const u8 *data, const u8 *mask);
+void sm750_hw_cursor_setData2(struct lynx_cursor *cursor, u16 rop,
+ const u8 *data, const u8 *mask);
#endif
diff --git a/drivers/staging/uwb/rsv.c b/drivers/staging/uwb/rsv.c
index f45a04ff7275..d593a41c3d8d 100644
--- a/drivers/staging/uwb/rsv.c
+++ b/drivers/staging/uwb/rsv.c
@@ -614,7 +614,7 @@ int uwb_rsv_try_move(struct uwb_rsv *rsv, struct uwb_mas_bm *available)
struct uwb_rsv_move *mv;
int ret = 0;
- if (bow->can_reserve_extra_mases == false)
+ if (!bow->can_reserve_extra_mases)
return -EBUSY;
mv = &rsv->mv;
@@ -643,7 +643,7 @@ void uwb_rsv_handle_drp_avail_change(struct uwb_rc *rc)
struct uwb_rsv *rsv;
struct uwb_mas_bm mas;
- if (bow->can_reserve_extra_mases == false)
+ if (!bow->can_reserve_extra_mases)
return;
list_for_each_entry(rsv, &rc->reservations, rc_node) {
diff --git a/drivers/staging/vc04_services/bcm2835-audio/Kconfig b/drivers/staging/vc04_services/bcm2835-audio/Kconfig
index f66319512faf..d32ea348e846 100644
--- a/drivers/staging/vc04_services/bcm2835-audio/Kconfig
+++ b/drivers/staging/vc04_services/bcm2835-audio/Kconfig
@@ -1,9 +1,9 @@
# SPDX-License-Identifier: GPL-2.0
config SND_BCM2835
- tristate "BCM2835 Audio"
- depends on (ARCH_BCM2835 || COMPILE_TEST) && SND
- select SND_PCM
- select BCM2835_VCHIQ
- help
- Say Y or M if you want to support BCM2835 built in audio
+ tristate "BCM2835 Audio"
+ depends on (ARCH_BCM2835 || COMPILE_TEST) && SND
+ select SND_PCM
+ select BCM2835_VCHIQ
+ help
+ Say Y or M if you want to support BCM2835 built in audio
diff --git a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c
index c6f9cf1913d2..73144f1ce45e 100644
--- a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c
+++ b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c
@@ -9,7 +9,7 @@
struct bcm2835_audio_instance {
struct device *dev;
- VCHI_SERVICE_HANDLE_T vchi_handle;
+ struct vchi_service_handle *vchi_handle;
struct completion msg_avail_comp;
struct mutex vchi_mutex;
struct bcm2835_alsa_stream *alsa_stream;
@@ -90,7 +90,7 @@ static int bcm2835_audio_send_simple(struct bcm2835_audio_instance *instance,
}
static void audio_vchi_callback(void *param,
- const VCHI_CALLBACK_REASON_T reason,
+ const enum vchi_callback_reason reason,
void *msg_handle)
{
struct bcm2835_audio_instance *instance = param;
@@ -103,6 +103,9 @@ static void audio_vchi_callback(void *param,
status = vchi_msg_dequeue(instance->vchi_handle,
&m, sizeof(m), &msg_len, VCHI_FLAGS_NONE);
+ if (status)
+ return;
+
if (m.type == VC_AUDIO_MSG_TYPE_RESULT) {
instance->result = m.result.success;
complete(&instance->msg_avail_comp);
@@ -119,7 +122,7 @@ static void audio_vchi_callback(void *param,
}
static int
-vc_vchi_audio_init(VCHI_INSTANCE_T vchi_instance,
+vc_vchi_audio_init(struct vchi_instance_handle *vchi_instance,
struct bcm2835_audio_instance *instance)
{
struct service_creation params = {
diff --git a/drivers/staging/vc04_services/bcm2835-audio/bcm2835.h b/drivers/staging/vc04_services/bcm2835-audio/bcm2835.h
index ed0feb34b6c8..d2fe8d36ab7d 100644
--- a/drivers/staging/vc04_services/bcm2835-audio/bcm2835.h
+++ b/drivers/staging/vc04_services/bcm2835-audio/bcm2835.h
@@ -44,7 +44,7 @@ enum snd_bcm2835_ctrl {
};
struct bcm2835_vchi_ctx {
- VCHI_INSTANCE_T vchi_instance;
+ struct vchi_instance_handle *vchi_instance;
};
/* definition of the chip-specific record */
diff --git a/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c b/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c
index d4d1e44b16b2..beb6a0063bb8 100644
--- a/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c
+++ b/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c
@@ -1090,8 +1090,8 @@ static int mmal_setup_components(struct bm2835_mmal_dev *dev,
ret = vchiq_mmal_port_set_format(dev->instance, camera_port);
- if (!ret
- && camera_port ==
+ if (!ret &&
+ camera_port ==
&dev->component[COMP_CAMERA]->output[CAM_PORT_VIDEO]) {
bool overlay_enabled =
!!dev->component[COMP_PREVIEW]->enabled;
diff --git a/drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c b/drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c
index 1c180ead4a20..de03b90021a8 100644
--- a/drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c
+++ b/drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c
@@ -155,7 +155,7 @@ struct mmal_msg_context {
};
struct vchiq_mmal_instance {
- VCHI_SERVICE_HANDLE_T handle;
+ struct vchi_service_handle *handle;
/* ensure serialised access to service */
struct mutex vchiq_mutex;
@@ -535,7 +535,7 @@ static void bulk_abort_cb(struct vchiq_mmal_instance *instance,
/* incoming event service callback */
static void service_callback(void *param,
- const VCHI_CALLBACK_REASON_T reason,
+ const enum vchi_callback_reason reason,
void *bulk_ctx)
{
struct vchiq_mmal_instance *instance = param;
@@ -1814,7 +1814,7 @@ int vchiq_mmal_init(struct vchiq_mmal_instance **out_instance)
{
int status;
struct vchiq_mmal_instance *instance;
- static VCHI_INSTANCE_T vchi_instance;
+ static struct vchi_instance_handle *vchi_instance;
struct service_creation params = {
.version = VCHI_VERSION_EX(VC_MMAL_VER, VC_MMAL_MIN_VER),
.service_id = VC_MMAL_SERVER_NAME,
diff --git a/drivers/staging/vc04_services/interface/vchi/vchi.h b/drivers/staging/vc04_services/interface/vchi/vchi.h
index f85562b9ba9e..56b1037d8e25 100644
--- a/drivers/staging/vc04_services/interface/vchi/vchi.h
+++ b/drivers/staging/vc04_services/interface/vchi/vchi.h
@@ -8,17 +8,17 @@
#include "interface/vchi/vchi_common.h"
/******************************************************************************
- Global defs
+ * Global defs
*****************************************************************************/
-#define VCHI_BULK_ROUND_UP(x) ((((unsigned long)(x))+VCHI_BULK_ALIGN-1) & ~(VCHI_BULK_ALIGN-1))
-#define VCHI_BULK_ROUND_DOWN(x) (((unsigned long)(x)) & ~(VCHI_BULK_ALIGN-1))
-#define VCHI_BULK_ALIGN_NBYTES(x) (VCHI_BULK_ALIGNED(x) ? 0 : (VCHI_BULK_ALIGN - ((unsigned long)(x) & (VCHI_BULK_ALIGN-1))))
+#define VCHI_BULK_ROUND_UP(x) ((((unsigned long)(x)) + VCHI_BULK_ALIGN - 1) & ~(VCHI_BULK_ALIGN - 1))
+#define VCHI_BULK_ROUND_DOWN(x) (((unsigned long)(x)) & ~(VCHI_BULK_ALIGN - 1))
+#define VCHI_BULK_ALIGN_NBYTES(x) (VCHI_BULK_ALIGNED(x) ? 0 : (VCHI_BULK_ALIGN - ((unsigned long)(x) & (VCHI_BULK_ALIGN - 1))))
#ifdef USE_VCHIQ_ARM
#define VCHI_BULK_ALIGNED(x) 1
#else
-#define VCHI_BULK_ALIGNED(x) (((unsigned long)(x) & (VCHI_BULK_ALIGN-1)) == 0)
+#define VCHI_BULK_ALIGNED(x) (((unsigned long)(x) & (VCHI_BULK_ALIGN - 1)) == 0)
#endif
struct vchi_version {
@@ -45,18 +45,19 @@ struct vchi_held_msg {
struct service_creation {
struct vchi_version version;
int32_t service_id;
- VCHI_CALLBACK_T callback;
+ vchi_callback callback;
void *callback_param;
};
// Opaque handle for a VCHI instance
-typedef struct opaque_vchi_instance_handle_t *VCHI_INSTANCE_T;
+struct vchi_instance_handle;
// Opaque handle for a server or client
-typedef struct opaque_vchi_service_handle_t *VCHI_SERVICE_HANDLE_T;
+struct vchi_service_handle;
/******************************************************************************
- Global funcs - implementation is specific to which side you are on (local / remote)
+ * Global funcs - implementation is specific to which side you are on
+ * (local / remote)
*****************************************************************************/
#ifdef __cplusplus
@@ -64,98 +65,99 @@ extern "C" {
#endif
// Routine used to initialise the vchi on both local + remote connections
-extern int32_t vchi_initialise(VCHI_INSTANCE_T *instance_handle);
+extern int32_t vchi_initialise(struct vchi_instance_handle **instance_handle);
extern int32_t vchi_exit(void);
-extern int32_t vchi_connect(VCHI_INSTANCE_T instance_handle);
+extern int32_t vchi_connect(struct vchi_instance_handle *instance_handle);
//When this is called, ensure that all services have no data pending.
//Bulk transfers can remain 'queued'
-extern int32_t vchi_disconnect(VCHI_INSTANCE_T instance_handle);
+extern int32_t vchi_disconnect(struct vchi_instance_handle *instance_handle);
// helper functions
-extern void *vchi_allocate_buffer(VCHI_SERVICE_HANDLE_T handle, uint32_t *length);
-extern void vchi_free_buffer(VCHI_SERVICE_HANDLE_T handle, void *address);
-extern uint32_t vchi_current_time(VCHI_INSTANCE_T instance_handle);
+extern void *vchi_allocate_buffer(struct vchi_service_handle *handle, uint32_t *length);
+extern void vchi_free_buffer(struct vchi_service_handle *handle, void *address);
+extern uint32_t vchi_current_time(struct vchi_instance_handle *instance_handle);
/******************************************************************************
- Global service API
+ * Global service API
*****************************************************************************/
// Routine to destroy a service
-extern int32_t vchi_service_destroy(const VCHI_SERVICE_HANDLE_T handle);
+extern int32_t vchi_service_destroy(const struct vchi_service_handle *handle);
// Routine to open a named service
-extern int32_t vchi_service_open(VCHI_INSTANCE_T instance_handle,
+extern int32_t vchi_service_open(struct vchi_instance_handle *instance_handle,
struct service_creation *setup,
- VCHI_SERVICE_HANDLE_T *handle);
+ struct vchi_service_handle **handle);
-extern int32_t vchi_get_peer_version(const VCHI_SERVICE_HANDLE_T handle,
+extern int32_t vchi_get_peer_version(const struct vchi_service_handle *handle,
short *peer_version);
// Routine to close a named service
-extern int32_t vchi_service_close(const VCHI_SERVICE_HANDLE_T handle);
+extern int32_t vchi_service_close(const struct vchi_service_handle *handle);
// Routine to increment ref count on a named service
-extern int32_t vchi_service_use(const VCHI_SERVICE_HANDLE_T handle);
+extern int32_t vchi_service_use(const struct vchi_service_handle *handle);
// Routine to decrement ref count on a named service
-extern int32_t vchi_service_release(const VCHI_SERVICE_HANDLE_T handle);
+extern int32_t vchi_service_release(const struct vchi_service_handle *handle);
// Routine to set a control option for a named service
-extern int32_t vchi_service_set_option(const VCHI_SERVICE_HANDLE_T handle,
- VCHI_SERVICE_OPTION_T option,
- int value);
+extern int32_t vchi_service_set_option(const struct vchi_service_handle *handle,
+ enum vchi_service_option option,
+ int value);
/* Routine to send a message from kernel memory across a service */
extern int
-vchi_queue_kernel_message(VCHI_SERVICE_HANDLE_T handle,
+vchi_queue_kernel_message(struct vchi_service_handle *handle,
void *data,
unsigned int size);
/* Routine to send a message from user memory across a service */
extern int
-vchi_queue_user_message(VCHI_SERVICE_HANDLE_T handle,
+vchi_queue_user_message(struct vchi_service_handle *handle,
void __user *data,
unsigned int size);
// Routine to receive a msg from a service
// Dequeue is equivalent to hold, copy into client buffer, release
-extern int32_t vchi_msg_dequeue(VCHI_SERVICE_HANDLE_T handle,
+extern int32_t vchi_msg_dequeue(struct vchi_service_handle *handle,
void *data,
uint32_t max_data_size_to_read,
uint32_t *actual_msg_size,
- VCHI_FLAGS_T flags);
+ enum vchi_flags flags);
// Routine to look at a message in place.
// The message is not dequeued, so a subsequent call to peek or dequeue
// will return the same message.
-extern int32_t vchi_msg_peek(VCHI_SERVICE_HANDLE_T handle,
+extern int32_t vchi_msg_peek(struct vchi_service_handle *handle,
void **data,
uint32_t *msg_size,
- VCHI_FLAGS_T flags);
+ enum vchi_flags flags);
// Routine to remove a message after it has been read in place with peek
// The first message on the queue is dequeued.
-extern int32_t vchi_msg_remove(VCHI_SERVICE_HANDLE_T handle);
+extern int32_t vchi_msg_remove(struct vchi_service_handle *handle);
// Routine to look at a message in place.
// The message is dequeued, so the caller is left holding it; the descriptor is
// filled in and must be released when the user has finished with the message.
-extern int32_t vchi_msg_hold(VCHI_SERVICE_HANDLE_T handle,
+extern int32_t vchi_msg_hold(struct vchi_service_handle *handle,
void **data, // } may be NULL, as info can be
uint32_t *msg_size, // } obtained from HELD_MSG_T
- VCHI_FLAGS_T flags,
+ enum vchi_flags flags,
struct vchi_held_msg *message_descriptor);
// Initialise an iterator to look through messages in place
-extern int32_t vchi_msg_look_ahead(VCHI_SERVICE_HANDLE_T handle,
+extern int32_t vchi_msg_look_ahead(struct vchi_service_handle *handle,
struct vchi_msg_iter *iter,
- VCHI_FLAGS_T flags);
+ enum vchi_flags flags);
-/******************************************************************************
- Global service support API - operations on held messages and message iterators
- *****************************************************************************/
+/*******************************************************************************
+ * Global service support API - operations on held messages
+ * and message iterators
+ ******************************************************************************/
// Routine to get the address of a held message
extern void *vchi_held_msg_ptr(const struct vchi_held_msg *message);
@@ -196,42 +198,42 @@ extern int32_t vchi_msg_iter_hold_next(struct vchi_msg_iter *iter,
struct vchi_held_msg *message);
/******************************************************************************
- Global bulk API
+ * Global bulk API
*****************************************************************************/
// Routine to prepare interface for a transfer from the other side
-extern int32_t vchi_bulk_queue_receive(VCHI_SERVICE_HANDLE_T handle,
+extern int32_t vchi_bulk_queue_receive(struct vchi_service_handle *handle,
void *data_dst,
uint32_t data_size,
- VCHI_FLAGS_T flags,
+ enum vchi_flags flags,
void *transfer_handle);
// Prepare interface for a transfer from the other side into relocatable memory.
-int32_t vchi_bulk_queue_receive_reloc(const VCHI_SERVICE_HANDLE_T handle,
+int32_t vchi_bulk_queue_receive_reloc(const struct vchi_service_handle *handle,
uint32_t offset,
uint32_t data_size,
- const VCHI_FLAGS_T flags,
+ const enum vchi_flags flags,
void * const bulk_handle);
// Routine to queue up data ready for transfer to the other (once they have signalled they are ready)
-extern int32_t vchi_bulk_queue_transmit(VCHI_SERVICE_HANDLE_T handle,
+extern int32_t vchi_bulk_queue_transmit(struct vchi_service_handle *handle,
const void *data_src,
uint32_t data_size,
- VCHI_FLAGS_T flags,
+ enum vchi_flags flags,
void *transfer_handle);
/******************************************************************************
- Configuration plumbing
+ * Configuration plumbing
*****************************************************************************/
#ifdef __cplusplus
}
#endif
-extern int32_t vchi_bulk_queue_transmit_reloc(VCHI_SERVICE_HANDLE_T handle,
+extern int32_t vchi_bulk_queue_transmit_reloc(struct vchi_service_handle *handle,
uint32_t offset,
uint32_t data_size,
- VCHI_FLAGS_T flags,
+ enum vchi_flags flags,
void *transfer_handle);
#endif /* VCHI_H_ */
diff --git a/drivers/staging/vc04_services/interface/vchi/vchi_cfg.h b/drivers/staging/vc04_services/interface/vchi/vchi_cfg.h
index 89aa4e6122cd..138c36151a22 100644
--- a/drivers/staging/vc04_services/interface/vchi/vchi_cfg.h
+++ b/drivers/staging/vc04_services/interface/vchi/vchi_cfg.h
@@ -4,13 +4,17 @@
#ifndef VCHI_CFG_H_
#define VCHI_CFG_H_
-/****************************************************************************************
- * Defines in this first section are part of the VCHI API and may be examined by VCHI
- * services.
- ***************************************************************************************/
-
-/* Required alignment of base addresses for bulk transfer, if unaligned transfers are not enabled */
-/* Really determined by the message driver, and should be available from a run-time call. */
+/*******************************************************************************
+ * Defines in this first section are part of the VCHI API and may be examined by
+ * VCHI services.
+ ******************************************************************************/
+
+/*
+ * Required alignment of base addresses for bulk transfer, if unaligned
+ * transfers are not enabled
+ * Really determined by the message driver, and should be available from
+ * a run-time call.
+ */
#ifndef VCHI_BULK_ALIGN
# if __VCCOREVER__ >= 0x04000000
# define VCHI_BULK_ALIGN 32 // Allows for the need to do cache cleans
@@ -19,9 +23,13 @@
# endif
#endif
-/* Required length multiple for bulk transfers, if unaligned transfers are not enabled */
-/* May be less than or greater than VCHI_BULK_ALIGN */
-/* Really determined by the message driver, and should be available from a run-time call. */
+/*
+ * Required length multiple for bulk transfers, if unaligned transfers are
+ * not enabled
+ * May be less than or greater than VCHI_BULK_ALIGN
+ * Really determined by the message driver, and should be available from
+ * a run-time call.
+ */
#ifndef VCHI_BULK_GRANULARITY
# if __VCCOREVER__ >= 0x04000000
# define VCHI_BULK_GRANULARITY 32 // Allows for the need to do cache cleans
@@ -39,19 +47,24 @@
# endif
#endif
-/******************************************************************************************
- * Defines below are system configuration options, and should not be used by VCHI services.
- *****************************************************************************************/
+/******************************************************************************
+ * Defines below are system configuration options, and should not be used by
+ * VCHI services.
+ ******************************************************************************/
-/* How many connections can we support? A localhost implementation uses 2 connections,
- * 1 for host-app, 1 for VMCS, and these are hooked together by a loopback MPHI VCFW
- * driver. */
+/*
+ * How many connections can we support? A localhost implementation uses
+ * 2 connections, 1 for host-app, 1 for VMCS, and these are hooked together
+ * by a loopback MPHI VCFW driver.
+ */
#ifndef VCHI_MAX_NUM_CONNECTIONS
# define VCHI_MAX_NUM_CONNECTIONS 3
#endif
-/* How many services can we open per connection? Extending this doesn't cost processing time, just a small
- * amount of static memory. */
+/*
+ * How many services can we open per connection? Extending this doesn't cost
+ * processing time, just a small amount of static memory.
+ */
#ifndef VCHI_MAX_SERVICES_PER_CONNECTION
# define VCHI_MAX_SERVICES_PER_CONNECTION 36
#endif
@@ -66,8 +79,10 @@
# define VCHI_MAX_BULK_RX_CHANNELS_PER_CONNECTION 1 // 1 MPHI
#endif
-/* How many receive slots do we use. This times VCHI_MAX_MSG_SIZE gives the effective
- * receive queue space, less message headers. */
+/*
+ * How many receive slots do we use. This times VCHI_MAX_MSG_SIZE gives the
+ * effective receive queue space, less message headers.
+ */
#ifndef VCHI_NUM_READ_SLOTS
# if defined(VCHI_LOCAL_HOST_PORT)
# define VCHI_NUM_READ_SLOTS 4
@@ -76,112 +91,141 @@
# endif
#endif
-/* Do we utilise overrun facility for receive message slots? Can aid peer transmit
- * performance. Only define on VideoCore end, talking to host.
+/*
+ * Do we utilise overrun facility for receive message slots? Can aid peer
+ * transmit performance. Only define on VideoCore end, talking to host.
*/
//#define VCHI_MSG_RX_OVERRUN
-/* How many transmit slots do we use. Generally don't need many, as the hardware driver
- * underneath VCHI will usually have its own buffering. */
+/*
+ * How many transmit slots do we use. Generally don't need many,
+ * as the hardware driver underneath VCHI will usually have its own buffering.
+ */
#ifndef VCHI_NUM_WRITE_SLOTS
# define VCHI_NUM_WRITE_SLOTS 4
#endif
-/* If a service has held or queued received messages in VCHI_XOFF_THRESHOLD or more slots,
- * then it's taking up too much buffer space, and the peer service will be told to stop
- * transmitting with an XOFF message. For this to be effective, the VCHI_NUM_READ_SLOTS
- * needs to be considerably bigger than VCHI_NUM_WRITE_SLOTS, or the transmit latency
- * is too high. */
+/*
+ * If a service has held or queued received messages in VCHI_XOFF_THRESHOLD or
+ * more slots, then it's taking up too much buffer space,
+ * and the peer service will be told to stop transmitting with an XOFF message.
+ * For this to be effective, the VCHI_NUM_READ_SLOTS needs to be considerably
+ * bigger than VCHI_NUM_WRITE_SLOTS, or the transmit latency is too high.
+ */
#ifndef VCHI_XOFF_THRESHOLD
# define VCHI_XOFF_THRESHOLD (VCHI_NUM_READ_SLOTS / 2)
#endif
-/* After we've sent an XOFF, the peer will be told to resume transmission once the local
- * service has dequeued/released enough messages that it's now occupying
- * VCHI_XON_THRESHOLD slots or fewer. */
+/*
+ * After we've sent an XOFF, the peer will be told to resume transmission
+ * once the local service has dequeued/released enough messages that it's now
+ * occupying VCHI_XON_THRESHOLD slots or fewer.
+ */
#ifndef VCHI_XON_THRESHOLD
# define VCHI_XON_THRESHOLD (VCHI_NUM_READ_SLOTS / 4)
#endif
-/* A size below which a bulk transfer omits the handshake completely and always goes
- * via the message channel, if bulk auxiliary is being sent on that service. (The user
- * can guarantee this by enabling unaligned transmits).
- * Not API. */
+/*
+ * A size below which a bulk transfer omits the handshake completely and always
+ * goes via the message channel, if bulk auxiliary is being sent on that
+ * service. (The user can guarantee this by enabling unaligned transmits).
+ * Not API.
+ */
#ifndef VCHI_MIN_BULK_SIZE
# define VCHI_MIN_BULK_SIZE (VCHI_MAX_MSG_SIZE / 2 < 4096 ? VCHI_MAX_MSG_SIZE / 2 : 4096)
#endif
-/* Maximum size of bulk transmission chunks, for each interface type. A trade-off between
- * speed and latency; the smaller the chunk size the better change of messages and other
- * bulk transmissions getting in when big bulk transfers are happening. Set to 0 to not
- * break transmissions into chunks.
+/*
+ * Maximum size of bulk transmission chunks, for each interface type.
+ * A trade-off between speed and latency; the smaller the chunk size the better
+ * change of messages and other bulk transmissions getting in when big bulk
+ * transfers are happening. Set to 0 to not break transmissions into chunks.
*/
#ifndef VCHI_MAX_BULK_CHUNK_SIZE_MPHI
# define VCHI_MAX_BULK_CHUNK_SIZE_MPHI (16 * 1024)
#endif
-/* NB Chunked CCP2 transmissions violate the letter of the CCP2 spec by using "JPEG8" mode
- * with multiple-line frames. Only use if the receiver can cope. */
+/*
+ * NB Chunked CCP2 transmissions violate the letter of the CCP2 spec
+ * by using "JPEG8" mode with multiple-line frames. Only use if the receiver
+ * can cope.
+ */
#ifndef VCHI_MAX_BULK_CHUNK_SIZE_CCP2
# define VCHI_MAX_BULK_CHUNK_SIZE_CCP2 0
#endif
-/* How many TX messages can we have pending in our transmit slots. Once exhausted,
- * vchi_msg_queue will be blocked. */
+/*
+ * How many TX messages can we have pending in our transmit slots.
+ * Once exhausted, vchi_msg_queue will be blocked.
+ */
#ifndef VCHI_TX_MSG_QUEUE_SIZE
# define VCHI_TX_MSG_QUEUE_SIZE 256
#endif
-/* How many RX messages can we have parsed in the receive slots. Once exhausted, parsing
- * will be suspended until older messages are dequeued/released. */
+/*
+ * How many RX messages can we have parsed in the receive slots. Once exhausted,
+ * parsing will be suspended until older messages are dequeued/released.
+ */
#ifndef VCHI_RX_MSG_QUEUE_SIZE
# define VCHI_RX_MSG_QUEUE_SIZE 256
#endif
-/* Really should be able to cope if we run out of received message descriptors, by
- * suspending parsing as the comment above says, but we don't. This sweeps the issue
- * under the carpet. */
-#if VCHI_RX_MSG_QUEUE_SIZE < (VCHI_MAX_MSG_SIZE/16 + 1) * VCHI_NUM_READ_SLOTS
+/*
+ * Really should be able to cope if we run out of received message descriptors,
+ * by suspending parsing as the comment above says, but we don't.
+ * This sweeps the issue under the carpet.
+ */
+#if VCHI_RX_MSG_QUEUE_SIZE < (VCHI_MAX_MSG_SIZE / 16 + 1) * VCHI_NUM_READ_SLOTS
# undef VCHI_RX_MSG_QUEUE_SIZE
-# define VCHI_RX_MSG_QUEUE_SIZE ((VCHI_MAX_MSG_SIZE/16 + 1) * VCHI_NUM_READ_SLOTS)
+# define VCHI_RX_MSG_QUEUE_SIZE ((VCHI_MAX_MSG_SIZE / 16 + 1) * VCHI_NUM_READ_SLOTS)
#endif
-/* How many bulk transmits can we have pending. Once exhausted, vchi_bulk_queue_transmit
- * will be blocked. */
+/*
+ * How many bulk transmits can we have pending. Once exhausted,
+ * vchi_bulk_queue_transmit will be blocked.
+ */
#ifndef VCHI_TX_BULK_QUEUE_SIZE
# define VCHI_TX_BULK_QUEUE_SIZE 64
#endif
-/* How many bulk receives can we have pending. Once exhausted, vchi_bulk_queue_receive
- * will be blocked. */
+/*
+ * How many bulk receives can we have pending. Once exhausted,
+ *vchi_bulk_queue_receive will be blocked.
+ */
#ifndef VCHI_RX_BULK_QUEUE_SIZE
# define VCHI_RX_BULK_QUEUE_SIZE 64
#endif
-/* A limit on how many outstanding bulk requests we expect the peer to give us. If
- * the peer asks for more than this, VCHI will fail and assert. The number is determined
- * by the peer's hardware - it's the number of outstanding requests that can be queued
- * on all bulk channels. VC3's MPHI peripheral allows 16. */
+/*
+ * A limit on how many outstanding bulk requests we expect the peer to give us.
+ * If the peer asks for more than this, VCHI will fail and assert.
+ * The number is determined by the peer's hardware
+ * - it's the number of outstanding requests that can be queued
+ * on all bulk channels. VC3's MPHI peripheral allows 16.
+ */
#ifndef VCHI_MAX_PEER_BULK_REQUESTS
# define VCHI_MAX_PEER_BULK_REQUESTS 32
#endif
-/* Define VCHI_CCP2TX_MANUAL_POWER if the host tells us when to turn the CCP2
+/*
+ * Define VCHI_CCP2TX_MANUAL_POWER if the host tells us when to turn the CCP2
* transmitter on and off.
*/
/*#define VCHI_CCP2TX_MANUAL_POWER*/
#ifndef VCHI_CCP2TX_MANUAL_POWER
-/* Timeout (in milliseconds) for putting the CCP2TX interface into IDLE state. Set
- * negative for no IDLE.
+/*
+ * Timeout (in milliseconds) for putting the CCP2TX interface into IDLE state.
+ * Set negative for no IDLE.
*/
# ifndef VCHI_CCP2TX_IDLE_TIMEOUT
# define VCHI_CCP2TX_IDLE_TIMEOUT 5
# endif
-/* Timeout (in milliseconds) for putting the CCP2TX interface into OFF state. Set
- * negative for no OFF.
+/*
+ * Timeout (in milliseconds) for putting the CCP2TX interface into OFF state.
+ * Set negative for no OFF.
*/
# ifndef VCHI_CCP2TX_OFF_TIMEOUT
# define VCHI_CCP2TX_OFF_TIMEOUT 1000
diff --git a/drivers/staging/vc04_services/interface/vchi/vchi_common.h b/drivers/staging/vc04_services/interface/vchi/vchi_common.h
index e7955cbaf26a..141af16ce031 100644
--- a/drivers/staging/vc04_services/interface/vchi/vchi_common.h
+++ b/drivers/staging/vc04_services/interface/vchi/vchi_common.h
@@ -5,7 +5,7 @@
#define VCHI_COMMON_H_
//flags used when sending messages (must be bitmapped)
-typedef enum {
+enum vchi_flags {
VCHI_FLAGS_NONE = 0x0,
VCHI_FLAGS_BLOCK_UNTIL_OP_COMPLETE = 0x1, // waits for message to be received, or sent (NB. not the same as being seen on other side)
VCHI_FLAGS_CALLBACK_WHEN_OP_COMPLETE = 0x2, // run a callback when message sent
@@ -20,17 +20,17 @@ typedef enum {
VCHI_FLAGS_BULK_DATA_QUEUED = 0x040000, // internal use only
VCHI_FLAGS_BULK_DATA_COMPLETE = 0x080000, // internal use only
VCHI_FLAGS_INTERNAL = 0xFF0000
-} VCHI_FLAGS_T;
+};
// constants for vchi_crc_control()
-typedef enum {
+enum vchi_crc_control {
VCHI_CRC_NOTHING = -1,
VCHI_CRC_PER_SERVICE = 0,
VCHI_CRC_EVERYTHING = 1,
-} VCHI_CRC_CONTROL_T;
+};
//callback reasons when an event occurs on a service
-typedef enum {
+enum vchi_callback_reason {
VCHI_CALLBACK_REASON_MIN,
//This indicates that there is data available
@@ -73,22 +73,22 @@ typedef enum {
VCHI_CALLBACK_BULK_TRANSMIT_ABORTED,
VCHI_CALLBACK_REASON_MAX
-} VCHI_CALLBACK_REASON_T;
+};
// service control options
-typedef enum {
+enum vchi_service_option {
VCHI_SERVICE_OPTION_MIN,
VCHI_SERVICE_OPTION_TRACE,
VCHI_SERVICE_OPTION_SYNCHRONOUS,
VCHI_SERVICE_OPTION_MAX
-} VCHI_SERVICE_OPTION_T;
+};
//Callback used by all services / bulk transfers
-typedef void (*VCHI_CALLBACK_T)(void *callback_param, //my service local param
- VCHI_CALLBACK_REASON_T reason,
- void *handle); //for transmitting msg's only
+typedef void (*vchi_callback)(void *callback_param, //my service local param
+ enum vchi_callback_reason reason,
+ void *handle); //for transmitting msg's only
/*
* Define vector struct for scatter-gather (vector) operations
@@ -112,12 +112,6 @@ struct vchi_msg_vector {
int32_t vec_len;
};
-// Opaque type for a connection API
-typedef struct opaque_vchi_connection_api_t VCHI_CONNECTION_API_T;
-
-// Opaque type for a message driver
-typedef struct opaque_vchi_message_driver_t VCHI_MESSAGE_DRIVER_T;
-
// Iterator structure for reading ahead through received message queue. Allocated by client,
// initialised by vchi_msg_look_ahead. Fields are for internal VCHI use only.
// Iterates over messages in queue at the instant of the call to vchi_msg_lookahead -
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
index 8dc730cfe7a6..ca30bfd52919 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
@@ -81,7 +81,6 @@ int vchiq_platform_init(struct platform_device *pdev, struct vchiq_state *state)
struct vchiq_drvdata *drvdata = platform_get_drvdata(pdev);
struct rpi_firmware *fw = drvdata->fw;
struct vchiq_slot_zero *vchiq_slot_zero;
- struct resource *res;
void *slot_mem;
dma_addr_t slot_phys;
u32 channelbase;
@@ -135,8 +134,7 @@ int vchiq_platform_init(struct platform_device *pdev, struct vchiq_state *state)
if (vchiq_init_state(state, vchiq_slot_zero) != VCHIQ_SUCCESS)
return -EINVAL;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- g_regs = devm_ioremap_resource(&pdev->dev, res);
+ g_regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(g_regs))
return PTR_ERR(g_regs);
@@ -170,10 +168,10 @@ int vchiq_platform_init(struct platform_device *pdev, struct vchiq_state *state)
return 0;
}
-VCHIQ_STATUS_T
+enum vchiq_status
vchiq_platform_init_state(struct vchiq_state *state)
{
- VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
+ enum vchiq_status status = VCHIQ_SUCCESS;
struct vchiq_2835_state *platform_state;
state->platform_state = kzalloc(sizeof(*platform_state), GFP_KERNEL);
@@ -216,7 +214,7 @@ remote_event_signal(struct remote_event *event)
writel(0, g_regs + BELL2); /* trigger vc interrupt */
}
-VCHIQ_STATUS_T
+enum vchiq_status
vchiq_prepare_bulk_data(struct vchiq_bulk *bulk, void *offset, int size,
int dir)
{
@@ -249,24 +247,23 @@ vchiq_complete_bulk(struct vchiq_bulk *bulk)
bulk->actual);
}
-void
-vchiq_dump_platform_state(void *dump_context)
+int vchiq_dump_platform_state(void *dump_context)
{
char buf[80];
int len;
len = snprintf(buf, sizeof(buf),
" Platform: 2835 (VC master)");
- vchiq_dump(dump_context, buf, len + 1);
+ return vchiq_dump(dump_context, buf, len + 1);
}
-VCHIQ_STATUS_T
+enum vchiq_status
vchiq_platform_suspend(struct vchiq_state *state)
{
return VCHIQ_ERROR;
}
-VCHIQ_STATUS_T
+enum vchiq_status
vchiq_platform_resume(struct vchiq_state *state)
{
return VCHIQ_SUCCESS;
@@ -526,11 +523,11 @@ create_pagelist(char __user *buf, size_t count, unsigned short type)
return NULL;
}
- WARN_ON(g_free_fragments == NULL);
+ WARN_ON(!g_free_fragments);
down(&g_free_fragments_mutex);
fragments = g_free_fragments;
- WARN_ON(fragments == NULL);
+ WARN_ON(!fragments);
g_free_fragments = *(char **) g_free_fragments;
up(&g_free_fragments_mutex);
pagelist->type = PAGELIST_READ_WITH_FRAGMENTS +
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
index b1595b13dea8..02148a24818a 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
@@ -84,7 +84,7 @@ static void suspend_timer_callback(struct timer_list *t);
struct user_service {
struct vchiq_service *service;
void *userdata;
- VCHIQ_INSTANCE_T instance;
+ struct vchiq_instance *instance;
char is_vchi;
char dequeue_pending;
char close_pending;
@@ -103,7 +103,7 @@ struct bulk_waiter_node {
struct list_head list;
};
-struct vchiq_instance_struct {
+struct vchiq_instance {
struct vchiq_state *state;
struct vchiq_completion_data completions[MAX_COMPLETIONS];
int completion_insert;
@@ -172,16 +172,16 @@ static const char *const ioctl_names[] = {
vchiq_static_assert(ARRAY_SIZE(ioctl_names) ==
(VCHIQ_IOC_MAX + 1));
-static VCHIQ_STATUS_T
-vchiq_blocking_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle, void *data,
- unsigned int size, VCHIQ_BULK_DIR_T dir);
+static enum vchiq_status
+vchiq_blocking_bulk_transfer(unsigned int handle, void *data,
+ unsigned int size, enum vchiq_bulk_dir dir);
#define VCHIQ_INIT_RETRIES 10
-VCHIQ_STATUS_T vchiq_initialise(VCHIQ_INSTANCE_T *instance_out)
+enum vchiq_status vchiq_initialise(struct vchiq_instance **instance_out)
{
- VCHIQ_STATUS_T status = VCHIQ_ERROR;
+ enum vchiq_status status = VCHIQ_ERROR;
struct vchiq_state *state;
- VCHIQ_INSTANCE_T instance = NULL;
+ struct vchiq_instance *instance = NULL;
int i;
vchiq_log_trace(vchiq_core_log_level, "%s called", __func__);
@@ -230,9 +230,9 @@ failed:
}
EXPORT_SYMBOL(vchiq_initialise);
-VCHIQ_STATUS_T vchiq_shutdown(VCHIQ_INSTANCE_T instance)
+enum vchiq_status vchiq_shutdown(struct vchiq_instance *instance)
{
- VCHIQ_STATUS_T status;
+ enum vchiq_status status;
struct vchiq_state *state = instance->state;
vchiq_log_trace(vchiq_core_log_level,
@@ -267,14 +267,14 @@ VCHIQ_STATUS_T vchiq_shutdown(VCHIQ_INSTANCE_T instance)
}
EXPORT_SYMBOL(vchiq_shutdown);
-static int vchiq_is_connected(VCHIQ_INSTANCE_T instance)
+static int vchiq_is_connected(struct vchiq_instance *instance)
{
return instance->connected;
}
-VCHIQ_STATUS_T vchiq_connect(VCHIQ_INSTANCE_T instance)
+enum vchiq_status vchiq_connect(struct vchiq_instance *instance)
{
- VCHIQ_STATUS_T status;
+ enum vchiq_status status;
struct vchiq_state *state = instance->state;
vchiq_log_trace(vchiq_core_log_level,
@@ -301,12 +301,12 @@ failed:
}
EXPORT_SYMBOL(vchiq_connect);
-VCHIQ_STATUS_T vchiq_add_service(
- VCHIQ_INSTANCE_T instance,
+enum vchiq_status vchiq_add_service(
+ struct vchiq_instance *instance,
const struct vchiq_service_params *params,
- VCHIQ_SERVICE_HANDLE_T *phandle)
+ unsigned int *phandle)
{
- VCHIQ_STATUS_T status;
+ enum vchiq_status status;
struct vchiq_state *state = instance->state;
struct vchiq_service *service = NULL;
int srvstate;
@@ -340,12 +340,12 @@ VCHIQ_STATUS_T vchiq_add_service(
}
EXPORT_SYMBOL(vchiq_add_service);
-VCHIQ_STATUS_T vchiq_open_service(
- VCHIQ_INSTANCE_T instance,
+enum vchiq_status vchiq_open_service(
+ struct vchiq_instance *instance,
const struct vchiq_service_params *params,
- VCHIQ_SERVICE_HANDLE_T *phandle)
+ unsigned int *phandle)
{
- VCHIQ_STATUS_T status = VCHIQ_ERROR;
+ enum vchiq_status status = VCHIQ_ERROR;
struct vchiq_state *state = instance->state;
struct vchiq_service *service = NULL;
@@ -380,11 +380,11 @@ failed:
}
EXPORT_SYMBOL(vchiq_open_service);
-VCHIQ_STATUS_T
-vchiq_bulk_transmit(VCHIQ_SERVICE_HANDLE_T handle, const void *data,
- unsigned int size, void *userdata, VCHIQ_BULK_MODE_T mode)
+enum vchiq_status
+vchiq_bulk_transmit(unsigned int handle, const void *data,
+ unsigned int size, void *userdata, enum vchiq_bulk_mode mode)
{
- VCHIQ_STATUS_T status;
+ enum vchiq_status status;
switch (mode) {
case VCHIQ_BULK_MODE_NOCALLBACK:
@@ -405,11 +405,11 @@ vchiq_bulk_transmit(VCHIQ_SERVICE_HANDLE_T handle, const void *data,
}
EXPORT_SYMBOL(vchiq_bulk_transmit);
-VCHIQ_STATUS_T
-vchiq_bulk_receive(VCHIQ_SERVICE_HANDLE_T handle, void *data,
- unsigned int size, void *userdata, VCHIQ_BULK_MODE_T mode)
+enum vchiq_status
+vchiq_bulk_receive(unsigned int handle, void *data,
+ unsigned int size, void *userdata, enum vchiq_bulk_mode mode)
{
- VCHIQ_STATUS_T status;
+ enum vchiq_status status;
switch (mode) {
case VCHIQ_BULK_MODE_NOCALLBACK:
@@ -429,13 +429,13 @@ vchiq_bulk_receive(VCHIQ_SERVICE_HANDLE_T handle, void *data,
}
EXPORT_SYMBOL(vchiq_bulk_receive);
-static VCHIQ_STATUS_T
-vchiq_blocking_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle, void *data,
- unsigned int size, VCHIQ_BULK_DIR_T dir)
+static enum vchiq_status
+vchiq_blocking_bulk_transfer(unsigned int handle, void *data,
+ unsigned int size, enum vchiq_bulk_dir dir)
{
- VCHIQ_INSTANCE_T instance;
+ struct vchiq_instance *instance;
struct vchiq_service *service;
- VCHIQ_STATUS_T status;
+ enum vchiq_status status;
struct bulk_waiter_node *waiter = NULL;
service = find_service_by_handle(handle);
@@ -515,8 +515,8 @@ vchiq_blocking_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle, void *data,
*
***************************************************************************/
-static VCHIQ_STATUS_T
-add_completion(VCHIQ_INSTANCE_T instance, VCHIQ_REASON_T reason,
+static enum vchiq_status
+add_completion(struct vchiq_instance *instance, enum vchiq_reason reason,
struct vchiq_header *header, struct user_service *user_service,
void *bulk_userdata)
{
@@ -582,9 +582,9 @@ add_completion(VCHIQ_INSTANCE_T instance, VCHIQ_REASON_T reason,
*
***************************************************************************/
-static VCHIQ_STATUS_T
-service_callback(VCHIQ_REASON_T reason, struct vchiq_header *header,
- VCHIQ_SERVICE_HANDLE_T handle, void *bulk_userdata)
+static enum vchiq_status
+service_callback(enum vchiq_reason reason, struct vchiq_header *header,
+ unsigned int handle, void *bulk_userdata)
{
/* How do we ensure the callback goes to the right client?
** The service_user data points to a user_service record
@@ -593,7 +593,7 @@ service_callback(VCHIQ_REASON_T reason, struct vchiq_header *header,
*/
struct user_service *user_service;
struct vchiq_service *service;
- VCHIQ_INSTANCE_T instance;
+ struct vchiq_instance *instance;
bool skip_completion = false;
DEBUG_INITIALISE(g_state.local)
@@ -630,7 +630,7 @@ service_callback(VCHIQ_REASON_T reason, struct vchiq_header *header,
*/
if ((user_service->message_available_pos -
instance->completion_remove) < 0) {
- VCHIQ_STATUS_T status;
+ enum vchiq_status status;
vchiq_log_info(vchiq_arm_log_level,
"Inserting extra MESSAGE_AVAILABLE");
@@ -772,8 +772,8 @@ static ssize_t vchiq_ioc_copy_element_data(void *context, void *dest,
* vchiq_ioc_queue_message
*
**************************************************************************/
-static VCHIQ_STATUS_T
-vchiq_ioc_queue_message(VCHIQ_SERVICE_HANDLE_T handle,
+static enum vchiq_status
+vchiq_ioc_queue_message(unsigned int handle,
struct vchiq_element *elements,
unsigned long count)
{
@@ -804,8 +804,8 @@ vchiq_ioc_queue_message(VCHIQ_SERVICE_HANDLE_T handle,
static long
vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
- VCHIQ_INSTANCE_T instance = file->private_data;
- VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
+ struct vchiq_instance *instance = file->private_data;
+ enum vchiq_status status = VCHIQ_SUCCESS;
struct vchiq_service *service = NULL;
long ret = 0;
int i, rc;
@@ -827,7 +827,7 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
/* Remove all services */
i = 0;
while ((service = next_service_by_instance(instance->state,
- instance, &i)) != NULL) {
+ instance, &i))) {
status = vchiq_remove_service(service->handle);
unlock_service(service);
if (status != VCHIQ_SUCCESS)
@@ -907,7 +907,7 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
&args.params, srvstate,
instance, user_service_free);
- if (service != NULL) {
+ if (service) {
user_service->service = service;
user_service->userdata = userdata;
user_service->instance = instance;
@@ -952,7 +952,7 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
case VCHIQ_IOC_CLOSE_SERVICE:
case VCHIQ_IOC_REMOVE_SERVICE: {
- VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg;
+ unsigned int handle = (unsigned int)arg;
struct user_service *user_service;
service = find_service_for_instance(instance, handle);
@@ -985,10 +985,10 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
case VCHIQ_IOC_USE_SERVICE:
case VCHIQ_IOC_RELEASE_SERVICE: {
- VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg;
+ unsigned int handle = (unsigned int)arg;
service = find_service_for_instance(instance, handle);
- if (service != NULL) {
+ if (service) {
status = (cmd == VCHIQ_IOC_USE_SERVICE) ?
vchiq_use_service_internal(service) :
vchiq_release_service_internal(service);
@@ -1021,7 +1021,7 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
service = find_service_for_instance(instance, args.handle);
- if ((service != NULL) && (args.count <= MAX_ELEMENTS)) {
+ if (service && (args.count <= MAX_ELEMENTS)) {
/* Copy elements into kernel space */
struct vchiq_element elements[MAX_ELEMENTS];
@@ -1042,7 +1042,7 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
struct vchiq_queue_bulk_transfer args;
struct bulk_waiter_node *waiter = NULL;
- VCHIQ_BULK_DIR_T dir =
+ enum vchiq_bulk_dir dir =
(cmd == VCHIQ_IOC_QUEUE_BULK_TRANSMIT) ?
VCHIQ_BULK_TRANSMIT : VCHIQ_BULK_RECEIVE;
@@ -1107,7 +1107,7 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
}
kfree(waiter);
} else {
- const VCHIQ_BULK_MODE_T mode_waiting =
+ const enum vchiq_bulk_mode mode_waiting =
VCHIQ_BULK_MODE_WAITING;
waiter->pid = current->pid;
mutex_lock(&instance->bulk_waiter_list_mutex);
@@ -1343,11 +1343,11 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
spin_unlock(&msg_queue_spinlock);
complete(&user_service->remove_event);
- if (header == NULL)
+ if (!header)
ret = -ENOTCONN;
else if (header->size <= args.bufsize) {
/* Copy to user space if msgbuf is not NULL */
- if ((args.buf == NULL) ||
+ if (!args.buf ||
(copy_to_user((void __user *)args.buf,
header->data,
header->size) == 0)) {
@@ -1368,7 +1368,7 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
} break;
case VCHIQ_IOC_GET_CLIENT_ID: {
- VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg;
+ unsigned int handle = (unsigned int)arg;
ret = vchiq_get_client_id(handle);
} break;
@@ -1423,10 +1423,10 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
} break;
case VCHIQ_IOC_CLOSE_DELIVERED: {
- VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg;
+ unsigned int handle = (unsigned int)arg;
service = find_closed_service_for_instance(instance, handle);
- if (service != NULL) {
+ if (service) {
struct user_service *user_service =
(struct user_service *)service->base.userdata;
close_delivered(user_service);
@@ -1611,7 +1611,7 @@ struct vchiq_queue_bulk_transfer32 {
compat_uptr_t data;
unsigned int size;
compat_uptr_t userdata;
- VCHIQ_BULK_MODE_T mode;
+ enum vchiq_bulk_mode mode;
};
#define VCHIQ_IOC_QUEUE_BULK_TRANSMIT32 \
@@ -1666,7 +1666,7 @@ vchiq_compat_ioctl_queue_bulk(struct file *file,
}
struct vchiq_completion_data32 {
- VCHIQ_REASON_T reason;
+ enum vchiq_reason reason;
compat_uptr_t header;
compat_uptr_t service_userdata;
compat_uptr_t bulk_userdata;
@@ -1919,7 +1919,7 @@ vchiq_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
static int vchiq_open(struct inode *inode, struct file *file)
{
struct vchiq_state *state = vchiq_get_state();
- VCHIQ_INSTANCE_T instance;
+ struct vchiq_instance *instance;
vchiq_log_info(vchiq_arm_log_level, "vchiq_open");
@@ -1951,7 +1951,7 @@ static int vchiq_open(struct inode *inode, struct file *file)
static int vchiq_release(struct inode *inode, struct file *file)
{
- VCHIQ_INSTANCE_T instance = file->private_data;
+ struct vchiq_instance *instance = file->private_data;
struct vchiq_state *state = vchiq_get_state();
struct vchiq_service *service;
int ret = 0;
@@ -2072,43 +2072,45 @@ out:
*
***************************************************************************/
-void
-vchiq_dump(void *dump_context, const char *str, int len)
+int vchiq_dump(void *dump_context, const char *str, int len)
{
struct dump_context *context = (struct dump_context *)dump_context;
+ int copy_bytes;
- if (context->actual < context->space) {
- int copy_bytes;
+ if (context->actual >= context->space)
+ return 0;
- if (context->offset > 0) {
- int skip_bytes = min(len, (int)context->offset);
+ if (context->offset > 0) {
+ int skip_bytes = min_t(int, len, context->offset);
- str += skip_bytes;
- len -= skip_bytes;
- context->offset -= skip_bytes;
- if (context->offset > 0)
- return;
- }
- copy_bytes = min(len, (int)(context->space - context->actual));
- if (copy_bytes == 0)
- return;
- if (copy_to_user(context->buf + context->actual, str,
- copy_bytes))
- context->actual = -EFAULT;
- context->actual += copy_bytes;
- len -= copy_bytes;
-
- /* If tne terminating NUL is included in the length, then it
- ** marks the end of a line and should be replaced with a
- ** carriage return. */
- if ((len == 0) && (str[copy_bytes - 1] == '\0')) {
- char cr = '\n';
-
- if (copy_to_user(context->buf + context->actual - 1,
- &cr, 1))
- context->actual = -EFAULT;
- }
+ str += skip_bytes;
+ len -= skip_bytes;
+ context->offset -= skip_bytes;
+ if (context->offset > 0)
+ return 0;
+ }
+ copy_bytes = min_t(int, len, context->space - context->actual);
+ if (copy_bytes == 0)
+ return 0;
+ if (copy_to_user(context->buf + context->actual, str,
+ copy_bytes))
+ return -EFAULT;
+ context->actual += copy_bytes;
+ len -= copy_bytes;
+
+ /*
+ * If the terminating NUL is included in the length, then it
+ * marks the end of a line and should be replaced with a
+ * carriage return.
+ */
+ if ((len == 0) && (str[copy_bytes - 1] == '\0')) {
+ char cr = '\n';
+
+ if (copy_to_user(context->buf + context->actual - 1,
+ &cr, 1))
+ return -EFAULT;
}
+ return 0;
}
/****************************************************************************
@@ -2117,8 +2119,7 @@ vchiq_dump(void *dump_context, const char *str, int len)
*
***************************************************************************/
-void
-vchiq_dump_platform_instances(void *dump_context)
+int vchiq_dump_platform_instances(void *dump_context)
{
struct vchiq_state *state = vchiq_get_state();
char buf[80];
@@ -2130,37 +2131,43 @@ vchiq_dump_platform_instances(void *dump_context)
for (i = 0; i < state->unused_service; i++) {
struct vchiq_service *service = state->services[i];
- VCHIQ_INSTANCE_T instance;
+ struct vchiq_instance *instance;
- if (service && (service->base.callback == service_callback)) {
- instance = service->instance;
- if (instance)
- instance->mark = 0;
- }
+ if (!service || service->base.callback != service_callback)
+ continue;
+
+ instance = service->instance;
+ if (instance)
+ instance->mark = 0;
}
for (i = 0; i < state->unused_service; i++) {
struct vchiq_service *service = state->services[i];
- VCHIQ_INSTANCE_T instance;
-
- if (service && (service->base.callback == service_callback)) {
- instance = service->instance;
- if (instance && !instance->mark) {
- len = snprintf(buf, sizeof(buf),
- "Instance %pK: pid %d,%s completions %d/%d",
- instance, instance->pid,
- instance->connected ? " connected, " :
- "",
- instance->completion_insert -
- instance->completion_remove,
- MAX_COMPLETIONS);
-
- vchiq_dump(dump_context, buf, len + 1);
-
- instance->mark = 1;
- }
- }
+ struct vchiq_instance *instance;
+ int err;
+
+ if (!service || service->base.callback != service_callback)
+ continue;
+
+ instance = service->instance;
+ if (!instance || instance->mark)
+ continue;
+
+ len = snprintf(buf, sizeof(buf),
+ "Instance %pK: pid %d,%s completions %d/%d",
+ instance, instance->pid,
+ instance->connected ? " connected, " :
+ "",
+ instance->completion_insert -
+ instance->completion_remove,
+ MAX_COMPLETIONS);
+
+ err = vchiq_dump(dump_context, buf, len + 1);
+ if (err)
+ return err;
+ instance->mark = 1;
}
+ return 0;
}
/****************************************************************************
@@ -2169,9 +2176,8 @@ vchiq_dump_platform_instances(void *dump_context)
*
***************************************************************************/
-void
-vchiq_dump_platform_service_state(void *dump_context,
- struct vchiq_service *service)
+int vchiq_dump_platform_service_state(void *dump_context,
+ struct vchiq_service *service)
{
struct user_service *user_service =
(struct user_service *)service->base.userdata;
@@ -2192,7 +2198,7 @@ vchiq_dump_platform_service_state(void *dump_context,
" (dequeue pending)");
}
- vchiq_dump(dump_context, buf, len + 1);
+ return vchiq_dump(dump_context, buf, len + 1);
}
/****************************************************************************
@@ -2206,13 +2212,16 @@ vchiq_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
struct dump_context context;
+ int err;
context.buf = buf;
context.actual = 0;
context.space = count;
context.offset = *ppos;
- vchiq_dump_state(&context, &g_state);
+ err = vchiq_dump_state(&context, &g_state);
+ if (err)
+ return err;
*ppos += context.actual;
@@ -2223,13 +2232,13 @@ struct vchiq_state *
vchiq_get_state(void)
{
- if (g_state.remote == NULL)
+ if (!g_state.remote)
printk(KERN_ERR "%s: g_state.remote == NULL\n", __func__);
else if (g_state.remote->initialised != 1)
printk(KERN_NOTICE "%s: g_state.remote->initialised != 1 (%d)\n",
__func__, g_state.remote->initialised);
- return ((g_state.remote != NULL) &&
+ return (g_state.remote &&
(g_state.remote->initialised == 1)) ? &g_state : NULL;
}
@@ -2270,10 +2279,10 @@ vchiq_videocore_wanted(struct vchiq_state *state)
return 1;
}
-static VCHIQ_STATUS_T
-vchiq_keepalive_vchiq_callback(VCHIQ_REASON_T reason,
+static enum vchiq_status
+vchiq_keepalive_vchiq_callback(enum vchiq_reason reason,
struct vchiq_header *header,
- VCHIQ_SERVICE_HANDLE_T service_user,
+ unsigned int service_user,
void *bulk_user)
{
vchiq_log_error(vchiq_susp_log_level,
@@ -2287,9 +2296,9 @@ vchiq_keepalive_thread_func(void *v)
struct vchiq_state *state = (struct vchiq_state *)v;
struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
- VCHIQ_STATUS_T status;
- VCHIQ_INSTANCE_T instance;
- VCHIQ_SERVICE_HANDLE_T ka_handle;
+ enum vchiq_status status;
+ struct vchiq_instance *instance;
+ unsigned int ka_handle;
struct vchiq_service_params params = {
.fourcc = VCHIQ_MAKE_FOURCC('K', 'E', 'E', 'P'),
@@ -2361,7 +2370,7 @@ exit:
return 0;
}
-VCHIQ_STATUS_T
+enum vchiq_status
vchiq_arm_init_state(struct vchiq_state *state,
struct vchiq_arm_state *arm_state)
{
@@ -2563,10 +2572,10 @@ unblock_resume(struct vchiq_arm_state *arm_state)
/* Initiate suspend via slot handler. Should be called with the write lock
* held */
-VCHIQ_STATUS_T
+enum vchiq_status
vchiq_arm_vcsuspend(struct vchiq_state *state)
{
- VCHIQ_STATUS_T status = VCHIQ_ERROR;
+ enum vchiq_status status = VCHIQ_ERROR;
struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
if (!arm_state)
@@ -2684,12 +2693,12 @@ out:
return resume;
}
-VCHIQ_STATUS_T
+enum vchiq_status
vchiq_use_internal(struct vchiq_state *state, struct vchiq_service *service,
enum USE_TYPE_E use_type)
{
struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
- VCHIQ_STATUS_T ret = VCHIQ_SUCCESS;
+ enum vchiq_status ret = VCHIQ_SUCCESS;
char entity[16];
int *entity_uc;
int local_uc, local_entity_uc;
@@ -2798,7 +2807,7 @@ vchiq_use_internal(struct vchiq_state *state, struct vchiq_service *service,
}
if (ret == VCHIQ_SUCCESS) {
- VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
+ enum vchiq_status status = VCHIQ_SUCCESS;
long ack_cnt = atomic_xchg(&arm_state->ka_use_ack_count, 0);
while (ack_cnt && (status == VCHIQ_SUCCESS)) {
@@ -2817,11 +2826,11 @@ out:
return ret;
}
-VCHIQ_STATUS_T
+enum vchiq_status
vchiq_release_internal(struct vchiq_state *state, struct vchiq_service *service)
{
struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
- VCHIQ_STATUS_T ret = VCHIQ_SUCCESS;
+ enum vchiq_status ret = VCHIQ_SUCCESS;
char entity[16];
int *entity_uc;
@@ -2898,33 +2907,33 @@ vchiq_on_remote_release(struct vchiq_state *state)
complete(&arm_state->ka_evt);
}
-VCHIQ_STATUS_T
+enum vchiq_status
vchiq_use_service_internal(struct vchiq_service *service)
{
return vchiq_use_internal(service->state, service, USE_TYPE_SERVICE);
}
-VCHIQ_STATUS_T
+enum vchiq_status
vchiq_release_service_internal(struct vchiq_service *service)
{
return vchiq_release_internal(service->state, service);
}
struct vchiq_debugfs_node *
-vchiq_instance_get_debugfs_node(VCHIQ_INSTANCE_T instance)
+vchiq_instance_get_debugfs_node(struct vchiq_instance *instance)
{
return &instance->debugfs_node;
}
int
-vchiq_instance_get_use_count(VCHIQ_INSTANCE_T instance)
+vchiq_instance_get_use_count(struct vchiq_instance *instance)
{
struct vchiq_service *service;
int use_count = 0, i;
i = 0;
while ((service = next_service_by_instance(instance->state,
- instance, &i)) != NULL) {
+ instance, &i))) {
use_count += service->service_use_count;
unlock_service(service);
}
@@ -2932,26 +2941,26 @@ vchiq_instance_get_use_count(VCHIQ_INSTANCE_T instance)
}
int
-vchiq_instance_get_pid(VCHIQ_INSTANCE_T instance)
+vchiq_instance_get_pid(struct vchiq_instance *instance)
{
return instance->pid;
}
int
-vchiq_instance_get_trace(VCHIQ_INSTANCE_T instance)
+vchiq_instance_get_trace(struct vchiq_instance *instance)
{
return instance->trace;
}
void
-vchiq_instance_set_trace(VCHIQ_INSTANCE_T instance, int trace)
+vchiq_instance_set_trace(struct vchiq_instance *instance, int trace)
{
struct vchiq_service *service;
int i;
i = 0;
while ((service = next_service_by_instance(instance->state,
- instance, &i)) != NULL) {
+ instance, &i))) {
service->trace = trace;
unlock_service(service);
}
@@ -2969,10 +2978,10 @@ static void suspend_timer_callback(struct timer_list *t)
vchiq_check_suspend(state);
}
-VCHIQ_STATUS_T
-vchiq_use_service(VCHIQ_SERVICE_HANDLE_T handle)
+enum vchiq_status
+vchiq_use_service(unsigned int handle)
{
- VCHIQ_STATUS_T ret = VCHIQ_ERROR;
+ enum vchiq_status ret = VCHIQ_ERROR;
struct vchiq_service *service = find_service_by_handle(handle);
if (service) {
@@ -2983,10 +2992,10 @@ vchiq_use_service(VCHIQ_SERVICE_HANDLE_T handle)
return ret;
}
-VCHIQ_STATUS_T
-vchiq_release_service(VCHIQ_SERVICE_HANDLE_T handle)
+enum vchiq_status
+vchiq_release_service(unsigned int handle)
{
- VCHIQ_STATUS_T ret = VCHIQ_ERROR;
+ enum vchiq_status ret = VCHIQ_ERROR;
struct vchiq_service *service = find_service_by_handle(handle);
if (service) {
@@ -3088,11 +3097,11 @@ vchiq_dump_service_use_state(struct vchiq_state *state)
vchiq_dump_platform_use_state(state);
}
-VCHIQ_STATUS_T
+enum vchiq_status
vchiq_check_service(struct vchiq_service *service)
{
struct vchiq_arm_state *arm_state;
- VCHIQ_STATUS_T ret = VCHIQ_ERROR;
+ enum vchiq_status ret = VCHIQ_ERROR;
if (!service || !service->state)
goto out;
@@ -3128,35 +3137,36 @@ void vchiq_on_remote_use_active(struct vchiq_state *state)
}
void vchiq_platform_conn_state_changed(struct vchiq_state *state,
- VCHIQ_CONNSTATE_T oldstate,
- VCHIQ_CONNSTATE_T newstate)
+ enum vchiq_connstate oldstate,
+ enum vchiq_connstate newstate)
{
struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
+ char threadname[16];
vchiq_log_info(vchiq_susp_log_level, "%d: %s->%s", state->id,
get_conn_state_name(oldstate), get_conn_state_name(newstate));
- if (state->conn_state == VCHIQ_CONNSTATE_CONNECTED) {
- write_lock_bh(&arm_state->susp_res_lock);
- if (!arm_state->first_connect) {
- char threadname[16];
+ if (state->conn_state != VCHIQ_CONNSTATE_CONNECTED)
+ return;
- arm_state->first_connect = 1;
- write_unlock_bh(&arm_state->susp_res_lock);
- snprintf(threadname, sizeof(threadname), "vchiq-keep/%d",
- state->id);
- arm_state->ka_thread = kthread_create(
- &vchiq_keepalive_thread_func,
- (void *)state,
+ write_lock_bh(&arm_state->susp_res_lock);
+ if (arm_state->first_connect) {
+ write_unlock_bh(&arm_state->susp_res_lock);
+ return;
+ }
+
+ arm_state->first_connect = 1;
+ write_unlock_bh(&arm_state->susp_res_lock);
+ snprintf(threadname, sizeof(threadname), "vchiq-keep/%d",
+ state->id);
+ arm_state->ka_thread = kthread_create(&vchiq_keepalive_thread_func,
+ (void *)state,
+ threadname);
+ if (IS_ERR(arm_state->ka_thread)) {
+ vchiq_log_error(vchiq_susp_log_level,
+ "vchiq: FATAL: couldn't create thread %s",
threadname);
- if (IS_ERR(arm_state->ka_thread)) {
- vchiq_log_error(vchiq_susp_log_level,
- "vchiq: FATAL: couldn't create thread %s",
- threadname);
- } else {
- wake_up_process(arm_state->ka_thread);
- }
- } else
- write_unlock_bh(&arm_state->susp_res_lock);
+ } else {
+ wake_up_process(arm_state->ka_thread);
}
}
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.h
index b424323e9613..19d2a2eefb6a 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.h
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.h
@@ -109,13 +109,13 @@ int vchiq_platform_init(struct platform_device *pdev,
extern struct vchiq_state *
vchiq_get_state(void);
-extern VCHIQ_STATUS_T
+extern enum vchiq_status
vchiq_arm_vcsuspend(struct vchiq_state *state);
-extern VCHIQ_STATUS_T
+extern enum vchiq_status
vchiq_arm_vcresume(struct vchiq_state *state);
-extern VCHIQ_STATUS_T
+extern enum vchiq_status
vchiq_arm_init_state(struct vchiq_state *state,
struct vchiq_arm_state *arm_state);
@@ -124,16 +124,16 @@ vchiq_check_resume(struct vchiq_state *state);
extern void
vchiq_check_suspend(struct vchiq_state *state);
-VCHIQ_STATUS_T
-vchiq_use_service(VCHIQ_SERVICE_HANDLE_T handle);
+enum vchiq_status
+vchiq_use_service(unsigned int handle);
-extern VCHIQ_STATUS_T
-vchiq_release_service(VCHIQ_SERVICE_HANDLE_T handle);
+extern enum vchiq_status
+vchiq_release_service(unsigned int handle);
-extern VCHIQ_STATUS_T
+extern enum vchiq_status
vchiq_check_service(struct vchiq_service *service);
-extern VCHIQ_STATUS_T
+extern enum vchiq_status
vchiq_platform_suspend(struct vchiq_state *state);
extern int
@@ -154,27 +154,27 @@ vchiq_platform_get_arm_state(struct vchiq_state *state);
extern int
vchiq_videocore_wanted(struct vchiq_state *state);
-extern VCHIQ_STATUS_T
+extern enum vchiq_status
vchiq_use_internal(struct vchiq_state *state, struct vchiq_service *service,
enum USE_TYPE_E use_type);
-extern VCHIQ_STATUS_T
+extern enum vchiq_status
vchiq_release_internal(struct vchiq_state *state,
struct vchiq_service *service);
extern struct vchiq_debugfs_node *
-vchiq_instance_get_debugfs_node(VCHIQ_INSTANCE_T instance);
+vchiq_instance_get_debugfs_node(struct vchiq_instance *instance);
extern int
-vchiq_instance_get_use_count(VCHIQ_INSTANCE_T instance);
+vchiq_instance_get_use_count(struct vchiq_instance *instance);
extern int
-vchiq_instance_get_pid(VCHIQ_INSTANCE_T instance);
+vchiq_instance_get_pid(struct vchiq_instance *instance);
extern int
-vchiq_instance_get_trace(VCHIQ_INSTANCE_T instance);
+vchiq_instance_get_trace(struct vchiq_instance *instance);
extern void
-vchiq_instance_set_trace(VCHIQ_INSTANCE_T instance, int trace);
+vchiq_instance_set_trace(struct vchiq_instance *instance, int trace);
extern void
set_suspend_state(struct vchiq_arm_state *arm_state,
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
index 56a23a297fa4..76351078affb 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
@@ -132,7 +132,7 @@ vchiq_set_service_state(struct vchiq_service *service, int newstate)
}
struct vchiq_service *
-find_service_by_handle(VCHIQ_SERVICE_HANDLE_T handle)
+find_service_by_handle(unsigned int handle)
{
struct vchiq_service *service;
@@ -177,8 +177,8 @@ find_service_by_port(struct vchiq_state *state, int localport)
}
struct vchiq_service *
-find_service_for_instance(VCHIQ_INSTANCE_T instance,
- VCHIQ_SERVICE_HANDLE_T handle)
+find_service_for_instance(struct vchiq_instance *instance,
+ unsigned int handle)
{
struct vchiq_service *service;
@@ -201,8 +201,8 @@ find_service_for_instance(VCHIQ_INSTANCE_T instance,
}
struct vchiq_service *
-find_closed_service_for_instance(VCHIQ_INSTANCE_T instance,
- VCHIQ_SERVICE_HANDLE_T handle)
+find_closed_service_for_instance(struct vchiq_instance *instance,
+ unsigned int handle)
{
struct vchiq_service *service;
@@ -227,7 +227,7 @@ find_closed_service_for_instance(VCHIQ_INSTANCE_T instance,
}
struct vchiq_service *
-next_service_by_instance(struct vchiq_state *state, VCHIQ_INSTANCE_T instance,
+next_service_by_instance(struct vchiq_state *state, struct vchiq_instance *instance,
int *pidx)
{
struct vchiq_service *service = NULL;
@@ -295,7 +295,7 @@ unlock:
}
int
-vchiq_get_client_id(VCHIQ_SERVICE_HANDLE_T handle)
+vchiq_get_client_id(unsigned int handle)
{
struct vchiq_service *service = find_service_by_handle(handle);
int id;
@@ -308,7 +308,7 @@ vchiq_get_client_id(VCHIQ_SERVICE_HANDLE_T handle)
}
void *
-vchiq_get_service_userdata(VCHIQ_SERVICE_HANDLE_T handle)
+vchiq_get_service_userdata(unsigned int handle)
{
struct vchiq_service *service = handle_to_service(handle);
@@ -316,7 +316,7 @@ vchiq_get_service_userdata(VCHIQ_SERVICE_HANDLE_T handle)
}
int
-vchiq_get_service_fourcc(VCHIQ_SERVICE_HANDLE_T handle)
+vchiq_get_service_fourcc(unsigned int handle)
{
struct vchiq_service *service = handle_to_service(handle);
@@ -354,11 +354,11 @@ mark_service_closing(struct vchiq_service *service)
mark_service_closing_internal(service, 0);
}
-static inline VCHIQ_STATUS_T
-make_service_callback(struct vchiq_service *service, VCHIQ_REASON_T reason,
+static inline enum vchiq_status
+make_service_callback(struct vchiq_service *service, enum vchiq_reason reason,
struct vchiq_header *header, void *bulk_userdata)
{
- VCHIQ_STATUS_T status;
+ enum vchiq_status status;
vchiq_log_trace(vchiq_core_log_level, "%d: callback:%d (%s, %pK, %pK)",
service->state->id, service->localport, reason_names[reason],
@@ -375,9 +375,9 @@ make_service_callback(struct vchiq_service *service, VCHIQ_REASON_T reason,
}
inline void
-vchiq_set_conn_state(struct vchiq_state *state, VCHIQ_CONNSTATE_T newstate)
+vchiq_set_conn_state(struct vchiq_state *state, enum vchiq_connstate newstate)
{
- VCHIQ_CONNSTATE_T oldstate = state->conn_state;
+ enum vchiq_connstate oldstate = state->conn_state;
vchiq_log_info(vchiq_core_log_level, "%d: %s->%s", state->id,
conn_state_names[oldstate],
@@ -542,7 +542,7 @@ reserve_space(struct vchiq_state *state, size_t space, int is_blocking)
if (space > slot_space) {
struct vchiq_header *header;
/* Fill the remaining space with padding */
- WARN_ON(state->tx_data == NULL);
+ WARN_ON(!state->tx_data);
header = (struct vchiq_header *)
(state->tx_data + (tx_pos & VCHIQ_SLOT_MASK));
header->msgid = VCHIQ_MSGID_PADDING;
@@ -779,7 +779,7 @@ copy_message_data(
}
/* Called by the slot handler and application threads */
-static VCHIQ_STATUS_T
+static enum vchiq_status
queue_message(struct vchiq_state *state, struct vchiq_service *service,
int msgid,
ssize_t (*copy_callback)(void *context, void *dest,
@@ -1027,7 +1027,7 @@ queue_message(struct vchiq_state *state, struct vchiq_service *service,
}
/* Called by the slot handler and application threads */
-static VCHIQ_STATUS_T
+static enum vchiq_status
queue_message_sync(struct vchiq_state *state, struct vchiq_service *service,
int msgid,
ssize_t (*copy_callback)(void *context, void *dest,
@@ -1178,11 +1178,11 @@ release_slot(struct vchiq_state *state, struct vchiq_slot_info *slot_info,
}
/* Called by the slot handler - don't hold the bulk mutex */
-static VCHIQ_STATUS_T
+static enum vchiq_status
notify_bulks(struct vchiq_service *service, struct vchiq_bulk_queue *queue,
int retry_poll)
{
- VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
+ enum vchiq_status status = VCHIQ_SUCCESS;
vchiq_log_trace(vchiq_core_log_level,
"%d: nb:%d %cx - p=%x rn=%x r=%x",
@@ -1230,7 +1230,7 @@ notify_bulks(struct vchiq_service *service, struct vchiq_bulk_queue *queue,
spin_unlock(&bulk_waiter_spinlock);
} else if (bulk->mode ==
VCHIQ_BULK_MODE_CALLBACK) {
- VCHIQ_REASON_T reason = (bulk->dir ==
+ enum vchiq_reason reason = (bulk->dir ==
VCHIQ_BULK_TRANSMIT) ?
((bulk->actual ==
VCHIQ_BULK_ACTUAL_ABORTED) ?
@@ -2078,7 +2078,7 @@ init_bulk_queue(struct vchiq_bulk_queue *queue)
}
inline const char *
-get_conn_state_name(VCHIQ_CONNSTATE_T conn_state)
+get_conn_state_name(enum vchiq_connstate conn_state)
{
return conn_state_names[conn_state];
}
@@ -2123,18 +2123,15 @@ vchiq_init_slots(void *mem_base, int mem_size)
return slot_zero;
}
-VCHIQ_STATUS_T
+enum vchiq_status
vchiq_init_state(struct vchiq_state *state, struct vchiq_slot_zero *slot_zero)
{
struct vchiq_shared_state *local;
struct vchiq_shared_state *remote;
- VCHIQ_STATUS_T status;
+ enum vchiq_status status;
char threadname[16];
int i;
- vchiq_log_warning(vchiq_core_log_level,
- "%s: slot_zero = %pK", __func__, slot_zero);
-
if (vchiq_states[0]) {
pr_err("%s: VCHIQ state already initialized\n", __func__);
return VCHIQ_ERROR;
@@ -2283,8 +2280,8 @@ fail_free_handler_thread:
struct vchiq_service *
vchiq_add_service_internal(struct vchiq_state *state,
const struct vchiq_service_params *params,
- int srvstate, VCHIQ_INSTANCE_T instance,
- VCHIQ_USERDATA_TERM_T userdata_term)
+ int srvstate, struct vchiq_instance *instance,
+ vchiq_userdata_term userdata_term)
{
struct vchiq_service *service;
struct vchiq_service **pservice = NULL;
@@ -2412,7 +2409,7 @@ vchiq_add_service_internal(struct vchiq_state *state,
return service;
}
-VCHIQ_STATUS_T
+enum vchiq_status
vchiq_open_service_internal(struct vchiq_service *service, int client_id)
{
struct vchiq_open_payload payload = {
@@ -2421,7 +2418,7 @@ vchiq_open_service_internal(struct vchiq_service *service, int client_id)
service->version,
service->version_min
};
- VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
+ enum vchiq_status status = VCHIQ_SUCCESS;
service->client_id = client_id;
vchiq_use_service_internal(service);
@@ -2519,7 +2516,7 @@ release_service_messages(struct vchiq_service *service)
static int
do_abort_bulks(struct vchiq_service *service)
{
- VCHIQ_STATUS_T status;
+ enum vchiq_status status;
/* Abort any outstanding bulk transfers */
if (mutex_lock_killable(&service->bulk_mutex))
@@ -2535,10 +2532,10 @@ do_abort_bulks(struct vchiq_service *service)
return (status == VCHIQ_SUCCESS);
}
-static VCHIQ_STATUS_T
+static enum vchiq_status
close_service_complete(struct vchiq_service *service, int failstate)
{
- VCHIQ_STATUS_T status;
+ enum vchiq_status status;
int is_server = (service->public_fourcc != VCHIQ_FOURCC_INVALID);
int newstate;
@@ -2597,11 +2594,11 @@ close_service_complete(struct vchiq_service *service, int failstate)
}
/* Called by the slot handler */
-VCHIQ_STATUS_T
+enum vchiq_status
vchiq_close_service_internal(struct vchiq_service *service, int close_recvd)
{
struct vchiq_state *state = service->state;
- VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
+ enum vchiq_status status = VCHIQ_SUCCESS;
int is_server = (service->public_fourcc != VCHIQ_FOURCC_INVALID);
vchiq_log_info(vchiq_core_log_level, "%d: csi:%d,%d (%s)",
@@ -2777,8 +2774,8 @@ vchiq_free_service_internal(struct vchiq_service *service)
unlock_service(service);
}
-VCHIQ_STATUS_T
-vchiq_connect_internal(struct vchiq_state *state, VCHIQ_INSTANCE_T instance)
+enum vchiq_status
+vchiq_connect_internal(struct vchiq_state *state, struct vchiq_instance *instance)
{
struct vchiq_service *service;
int i;
@@ -2813,8 +2810,8 @@ vchiq_connect_internal(struct vchiq_state *state, VCHIQ_INSTANCE_T instance)
return VCHIQ_SUCCESS;
}
-VCHIQ_STATUS_T
-vchiq_shutdown_internal(struct vchiq_state *state, VCHIQ_INSTANCE_T instance)
+enum vchiq_status
+vchiq_shutdown_internal(struct vchiq_state *state, struct vchiq_instance *instance)
{
struct vchiq_service *service;
int i;
@@ -2830,12 +2827,12 @@ vchiq_shutdown_internal(struct vchiq_state *state, VCHIQ_INSTANCE_T instance)
return VCHIQ_SUCCESS;
}
-VCHIQ_STATUS_T
-vchiq_close_service(VCHIQ_SERVICE_HANDLE_T handle)
+enum vchiq_status
+vchiq_close_service(unsigned int handle)
{
/* Unregister the service */
struct vchiq_service *service = find_service_by_handle(handle);
- VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
+ enum vchiq_status status = VCHIQ_SUCCESS;
if (!service)
return VCHIQ_ERROR;
@@ -2889,12 +2886,12 @@ vchiq_close_service(VCHIQ_SERVICE_HANDLE_T handle)
return status;
}
-VCHIQ_STATUS_T
-vchiq_remove_service(VCHIQ_SERVICE_HANDLE_T handle)
+enum vchiq_status
+vchiq_remove_service(unsigned int handle)
{
/* Unregister the service */
struct vchiq_service *service = find_service_by_handle(handle);
- VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
+ enum vchiq_status status = VCHIQ_SUCCESS;
if (!service)
return VCHIQ_ERROR;
@@ -2955,10 +2952,10 @@ vchiq_remove_service(VCHIQ_SERVICE_HANDLE_T handle)
* When called in blocking mode, the userdata field points to a bulk_waiter
* structure.
*/
-VCHIQ_STATUS_T vchiq_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle,
+enum vchiq_status vchiq_bulk_transfer(unsigned int handle,
void *offset, int size, void *userdata,
- VCHIQ_BULK_MODE_T mode,
- VCHIQ_BULK_DIR_T dir)
+ enum vchiq_bulk_mode mode,
+ enum vchiq_bulk_dir dir)
{
struct vchiq_service *service = find_service_by_handle(handle);
struct vchiq_bulk_queue *queue;
@@ -2968,7 +2965,7 @@ VCHIQ_STATUS_T vchiq_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle,
const char dir_char = (dir == VCHIQ_BULK_TRANSMIT) ? 't' : 'r';
const int dir_msgtype = (dir == VCHIQ_BULK_TRANSMIT) ?
VCHIQ_MSG_BULK_TX : VCHIQ_MSG_BULK_RX;
- VCHIQ_STATUS_T status = VCHIQ_ERROR;
+ enum vchiq_status status = VCHIQ_ERROR;
int payload[2];
if (!service || service->srvstate != VCHIQ_SRVSTATE_OPEN ||
@@ -3103,15 +3100,15 @@ error_exit:
return status;
}
-VCHIQ_STATUS_T
-vchiq_queue_message(VCHIQ_SERVICE_HANDLE_T handle,
+enum vchiq_status
+vchiq_queue_message(unsigned int handle,
ssize_t (*copy_callback)(void *context, void *dest,
size_t offset, size_t maxsize),
void *context,
size_t size)
{
struct vchiq_service *service = find_service_by_handle(handle);
- VCHIQ_STATUS_T status = VCHIQ_ERROR;
+ enum vchiq_status status = VCHIQ_ERROR;
if (!service ||
(vchiq_check_service(service) != VCHIQ_SUCCESS))
@@ -3156,7 +3153,7 @@ error_exit:
}
void
-vchiq_release_message(VCHIQ_SERVICE_HANDLE_T handle,
+vchiq_release_message(unsigned int handle,
struct vchiq_header *header)
{
struct vchiq_service *service = find_service_by_handle(handle);
@@ -3195,10 +3192,10 @@ release_message_sync(struct vchiq_state *state, struct vchiq_header *header)
remote_event_signal(&state->remote->sync_release);
}
-VCHIQ_STATUS_T
-vchiq_get_peer_version(VCHIQ_SERVICE_HANDLE_T handle, short *peer_version)
+enum vchiq_status
+vchiq_get_peer_version(unsigned int handle, short *peer_version)
{
- VCHIQ_STATUS_T status = VCHIQ_ERROR;
+ enum vchiq_status status = VCHIQ_ERROR;
struct vchiq_service *service = find_service_by_handle(handle);
if (!service ||
@@ -3224,12 +3221,12 @@ void vchiq_get_config(struct vchiq_config *config)
config->version_min = VCHIQ_VERSION_MIN;
}
-VCHIQ_STATUS_T
-vchiq_set_service_option(VCHIQ_SERVICE_HANDLE_T handle,
- VCHIQ_SERVICE_OPTION_T option, int value)
+enum vchiq_status
+vchiq_set_service_option(unsigned int handle,
+ enum vchiq_service_option option, int value)
{
struct vchiq_service *service = find_service_by_handle(handle);
- VCHIQ_STATUS_T status = VCHIQ_ERROR;
+ enum vchiq_status status = VCHIQ_ERROR;
if (service) {
switch (option) {
@@ -3301,7 +3298,7 @@ vchiq_set_service_option(VCHIQ_SERVICE_HANDLE_T handle,
return status;
}
-static void
+static int
vchiq_dump_shared_state(void *dump_context, struct vchiq_state *state,
struct vchiq_shared_state *shared, const char *label)
{
@@ -3321,16 +3318,21 @@ vchiq_dump_shared_state(void *dump_context, struct vchiq_state *state,
int i;
char buf[80];
int len;
+ int err;
len = scnprintf(buf, sizeof(buf),
" %s: slots %d-%d tx_pos=%x recycle=%x",
label, shared->slot_first, shared->slot_last,
shared->tx_pos, shared->slot_queue_recycle);
- vchiq_dump(dump_context, buf, len + 1);
+ err = vchiq_dump(dump_context, buf, len + 1);
+ if (err)
+ return err;
len = scnprintf(buf, sizeof(buf),
" Slots claimed:");
- vchiq_dump(dump_context, buf, len + 1);
+ err = vchiq_dump(dump_context, buf, len + 1);
+ if (err)
+ return err;
for (i = shared->slot_first; i <= shared->slot_last; i++) {
struct vchiq_slot_info slot_info =
@@ -3339,27 +3341,34 @@ vchiq_dump_shared_state(void *dump_context, struct vchiq_state *state,
len = scnprintf(buf, sizeof(buf),
" %d: %d/%d", i, slot_info.use_count,
slot_info.release_count);
- vchiq_dump(dump_context, buf, len + 1);
+ err = vchiq_dump(dump_context, buf, len + 1);
+ if (err)
+ return err;
}
}
for (i = 1; i < shared->debug[DEBUG_ENTRIES]; i++) {
len = scnprintf(buf, sizeof(buf), " DEBUG: %s = %d(%x)",
debug_names[i], shared->debug[i], shared->debug[i]);
- vchiq_dump(dump_context, buf, len + 1);
+ err = vchiq_dump(dump_context, buf, len + 1);
+ if (err)
+ return err;
}
+ return 0;
}
-void
-vchiq_dump_state(void *dump_context, struct vchiq_state *state)
+int vchiq_dump_state(void *dump_context, struct vchiq_state *state)
{
char buf[80];
int len;
int i;
+ int err;
len = scnprintf(buf, sizeof(buf), "State %d: %s", state->id,
conn_state_names[state->conn_state]);
- vchiq_dump(dump_context, buf, len + 1);
+ err = vchiq_dump(dump_context, buf, len + 1);
+ if (err)
+ return err;
len = scnprintf(buf, sizeof(buf),
" tx_pos=%x(@%pK), rx_pos=%x(@%pK)",
@@ -3367,12 +3376,16 @@ vchiq_dump_state(void *dump_context, struct vchiq_state *state)
state->tx_data + (state->local_tx_pos & VCHIQ_SLOT_MASK),
state->rx_pos,
state->rx_data + (state->rx_pos & VCHIQ_SLOT_MASK));
- vchiq_dump(dump_context, buf, len + 1);
+ err = vchiq_dump(dump_context, buf, len + 1);
+ if (err)
+ return err;
len = scnprintf(buf, sizeof(buf),
" Version: %d (min %d)",
VCHIQ_VERSION, VCHIQ_VERSION_MIN);
- vchiq_dump(dump_context, buf, len + 1);
+ err = vchiq_dump(dump_context, buf, len + 1);
+ if (err)
+ return err;
if (VCHIQ_ENABLE_STATS) {
len = scnprintf(buf, sizeof(buf),
@@ -3380,7 +3393,9 @@ vchiq_dump_state(void *dump_context, struct vchiq_state *state)
"error_count=%d",
state->stats.ctrl_tx_count, state->stats.ctrl_rx_count,
state->stats.error_count);
- vchiq_dump(dump_context, buf, len + 1);
+ err = vchiq_dump(dump_context, buf, len + 1);
+ if (err)
+ return err;
}
len = scnprintf(buf, sizeof(buf),
@@ -3391,30 +3406,49 @@ vchiq_dump_state(void *dump_context, struct vchiq_state *state)
state->data_quota - state->data_use_count,
state->local->slot_queue_recycle - state->slot_queue_available,
state->stats.slot_stalls, state->stats.data_stalls);
- vchiq_dump(dump_context, buf, len + 1);
-
- vchiq_dump_platform_state(dump_context);
-
- vchiq_dump_shared_state(dump_context, state, state->local, "Local");
- vchiq_dump_shared_state(dump_context, state, state->remote, "Remote");
-
- vchiq_dump_platform_instances(dump_context);
+ err = vchiq_dump(dump_context, buf, len + 1);
+ if (err)
+ return err;
+
+ err = vchiq_dump_platform_state(dump_context);
+ if (err)
+ return err;
+
+ err = vchiq_dump_shared_state(dump_context,
+ state,
+ state->local,
+ "Local");
+ if (err)
+ return err;
+ err = vchiq_dump_shared_state(dump_context,
+ state,
+ state->remote,
+ "Remote");
+ if (err)
+ return err;
+
+ err = vchiq_dump_platform_instances(dump_context);
+ if (err)
+ return err;
for (i = 0; i < state->unused_service; i++) {
struct vchiq_service *service = find_service_by_port(state, i);
if (service) {
- vchiq_dump_service_state(dump_context, service);
+ err = vchiq_dump_service_state(dump_context, service);
unlock_service(service);
+ if (err)
+ return err;
}
}
+ return 0;
}
-void
-vchiq_dump_service_state(void *dump_context, struct vchiq_service *service)
+int vchiq_dump_service_state(void *dump_context, struct vchiq_service *service)
{
char buf[80];
int len;
+ int err;
len = scnprintf(buf, sizeof(buf), "Service %u: %s (ref %u)",
service->localport, srvstate_names[service->srvstate],
@@ -3447,7 +3481,9 @@ vchiq_dump_service_state(void *dump_context, struct vchiq_service *service)
service_quota->slot_use_count,
service_quota->slot_quota);
- vchiq_dump(dump_context, buf, len + 1);
+ err = vchiq_dump(dump_context, buf, len + 1);
+ if (err)
+ return err;
tx_pending = service->bulk_tx.local_insert -
service->bulk_tx.remote_insert;
@@ -3466,7 +3502,9 @@ vchiq_dump_service_state(void *dump_context, struct vchiq_service *service)
BULK_INDEX(service->bulk_rx.remove)].size : 0);
if (VCHIQ_ENABLE_STATS) {
- vchiq_dump(dump_context, buf, len + 1);
+ err = vchiq_dump(dump_context, buf, len + 1);
+ if (err)
+ return err;
len = scnprintf(buf, sizeof(buf),
" Ctrl: tx_count=%d, tx_bytes=%llu, "
@@ -3475,7 +3513,9 @@ vchiq_dump_service_state(void *dump_context, struct vchiq_service *service)
service->stats.ctrl_tx_bytes,
service->stats.ctrl_rx_count,
service->stats.ctrl_rx_bytes);
- vchiq_dump(dump_context, buf, len + 1);
+ err = vchiq_dump(dump_context, buf, len + 1);
+ if (err)
+ return err;
len = scnprintf(buf, sizeof(buf),
" Bulk: tx_count=%d, tx_bytes=%llu, "
@@ -3484,7 +3524,9 @@ vchiq_dump_service_state(void *dump_context, struct vchiq_service *service)
service->stats.bulk_tx_bytes,
service->stats.bulk_rx_count,
service->stats.bulk_rx_bytes);
- vchiq_dump(dump_context, buf, len + 1);
+ err = vchiq_dump(dump_context, buf, len + 1);
+ if (err)
+ return err;
len = scnprintf(buf, sizeof(buf),
" %d quota stalls, %d slot stalls, "
@@ -3497,10 +3539,13 @@ vchiq_dump_service_state(void *dump_context, struct vchiq_service *service)
}
}
- vchiq_dump(dump_context, buf, len + 1);
+ err = vchiq_dump(dump_context, buf, len + 1);
+ if (err)
+ return err;
if (service->srvstate != VCHIQ_SRVSTATE_FREE)
- vchiq_dump_platform_service_state(dump_context, service);
+ err = vchiq_dump_platform_service_state(dump_context, service);
+ return err;
}
void
@@ -3527,9 +3572,9 @@ vchiq_loud_error_footer(void)
"================");
}
-VCHIQ_STATUS_T vchiq_send_remote_use(struct vchiq_state *state)
+enum vchiq_status vchiq_send_remote_use(struct vchiq_state *state)
{
- VCHIQ_STATUS_T status = VCHIQ_RETRY;
+ enum vchiq_status status = VCHIQ_RETRY;
if (state->conn_state != VCHIQ_CONNSTATE_DISCONNECTED)
status = queue_message(state, NULL,
@@ -3538,9 +3583,9 @@ VCHIQ_STATUS_T vchiq_send_remote_use(struct vchiq_state *state)
return status;
}
-VCHIQ_STATUS_T vchiq_send_remote_use_active(struct vchiq_state *state)
+enum vchiq_status vchiq_send_remote_use_active(struct vchiq_state *state)
{
- VCHIQ_STATUS_T status = VCHIQ_RETRY;
+ enum vchiq_status status = VCHIQ_RETRY;
if (state->conn_state != VCHIQ_CONNSTATE_DISCONNECTED)
status = queue_message(state, NULL,
@@ -3578,7 +3623,7 @@ void vchiq_log_dump_mem(const char *label, u32 addr, const void *void_mem,
}
*s++ = '\0';
- if ((label != NULL) && (*label != '\0'))
+ if (label && (*label != '\0'))
vchiq_log_trace(VCHIQ_LOG_TRACE,
"%s: %08x: %s", label, addr, line_buf);
else
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h
index 63f71b2a492f..c31f953a9986 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h
@@ -60,8 +60,8 @@ vchiq_static_assert(IS_POW2(VCHIQ_MAX_SLOTS_PER_SIDE));
#define VCHIQ_SLOT_MASK (VCHIQ_SLOT_SIZE - 1)
#define VCHIQ_SLOT_QUEUE_MASK (VCHIQ_MAX_SLOTS_PER_SIDE - 1)
-#define VCHIQ_SLOT_ZERO_SLOTS ((sizeof(struct vchiq_slot_zero) + \
- VCHIQ_SLOT_SIZE - 1) / VCHIQ_SLOT_SIZE)
+#define VCHIQ_SLOT_ZERO_SLOTS DIV_ROUND_UP(sizeof(struct vchiq_slot_zero), \
+ VCHIQ_SLOT_SIZE)
#define VCHIQ_MSG_PADDING 0 /* - */
#define VCHIQ_MSG_CONNECT 1 /* - */
@@ -169,7 +169,7 @@ enum {
#endif /* VCHIQ_ENABLE_DEBUG */
-typedef enum {
+enum vchiq_connstate {
VCHIQ_CONNSTATE_DISCONNECTED,
VCHIQ_CONNSTATE_CONNECTING,
VCHIQ_CONNSTATE_CONNECTED,
@@ -179,7 +179,7 @@ typedef enum {
VCHIQ_CONNSTATE_RESUMING,
VCHIQ_CONNSTATE_PAUSE_TIMEOUT,
VCHIQ_CONNSTATE_RESUME_TIMEOUT
-} VCHIQ_CONNSTATE_T;
+};
enum {
VCHIQ_SRVSTATE_FREE,
@@ -202,12 +202,12 @@ enum {
VCHIQ_POLL_COUNT
};
-typedef enum {
+enum vchiq_bulk_dir {
VCHIQ_BULK_TRANSMIT,
VCHIQ_BULK_RECEIVE
-} VCHIQ_BULK_DIR_T;
+};
-typedef void (*VCHIQ_USERDATA_TERM_T)(void *userdata);
+typedef void (*vchiq_userdata_term)(void *userdata);
struct vchiq_bulk {
short mode;
@@ -236,7 +236,7 @@ struct remote_event {
u32 __unused;
};
-typedef struct opaque_platform_state_t *VCHIQ_PLATFORM_STATE_T;
+struct opaque_platform_state;
struct vchiq_slot {
char data[VCHIQ_SLOT_SIZE];
@@ -250,10 +250,10 @@ struct vchiq_slot_info {
struct vchiq_service {
struct vchiq_service_base base;
- VCHIQ_SERVICE_HANDLE_T handle;
+ unsigned int handle;
unsigned int ref_count;
int srvstate;
- VCHIQ_USERDATA_TERM_T userdata_term;
+ vchiq_userdata_term userdata_term;
unsigned int localport;
unsigned int remoteport;
int public_fourcc;
@@ -268,7 +268,7 @@ struct vchiq_service {
short peer_version;
struct vchiq_state *state;
- VCHIQ_INSTANCE_T instance;
+ struct vchiq_instance *instance;
int service_use_count;
@@ -367,7 +367,7 @@ struct vchiq_slot_zero {
struct vchiq_state {
int id;
int initialised;
- VCHIQ_CONNSTATE_T conn_state;
+ enum vchiq_connstate conn_state;
short version_common;
struct vchiq_shared_state *local;
@@ -382,7 +382,7 @@ struct vchiq_state {
/* Mutex protecting services */
struct mutex mutex;
- VCHIQ_INSTANCE_T *instance;
+ struct vchiq_instance **instance;
/* Processes incoming messages */
struct task_struct *slot_handler_thread;
@@ -468,7 +468,7 @@ struct vchiq_state {
struct vchiq_service_quota service_quotas[VCHIQ_MAX_SERVICES];
struct vchiq_slot_info slot_info[VCHIQ_MAX_SLOTS];
- VCHIQ_PLATFORM_STATE_T platform_state;
+ struct opaque_platform_state *platform_state;
};
struct bulk_waiter {
@@ -486,27 +486,27 @@ extern int vchiq_sync_log_level;
extern struct vchiq_state *vchiq_states[VCHIQ_MAX_STATES];
extern const char *
-get_conn_state_name(VCHIQ_CONNSTATE_T conn_state);
+get_conn_state_name(enum vchiq_connstate conn_state);
extern struct vchiq_slot_zero *
vchiq_init_slots(void *mem_base, int mem_size);
-extern VCHIQ_STATUS_T
+extern enum vchiq_status
vchiq_init_state(struct vchiq_state *state, struct vchiq_slot_zero *slot_zero);
-extern VCHIQ_STATUS_T
-vchiq_connect_internal(struct vchiq_state *state, VCHIQ_INSTANCE_T instance);
+extern enum vchiq_status
+vchiq_connect_internal(struct vchiq_state *state, struct vchiq_instance *instance);
extern struct vchiq_service *
vchiq_add_service_internal(struct vchiq_state *state,
const struct vchiq_service_params *params,
- int srvstate, VCHIQ_INSTANCE_T instance,
- VCHIQ_USERDATA_TERM_T userdata_term);
+ int srvstate, struct vchiq_instance *instance,
+ vchiq_userdata_term userdata_term);
-extern VCHIQ_STATUS_T
+extern enum vchiq_status
vchiq_open_service_internal(struct vchiq_service *service, int client_id);
-extern VCHIQ_STATUS_T
+extern enum vchiq_status
vchiq_close_service_internal(struct vchiq_service *service, int close_recvd);
extern void
@@ -515,21 +515,21 @@ vchiq_terminate_service_internal(struct vchiq_service *service);
extern void
vchiq_free_service_internal(struct vchiq_service *service);
-extern VCHIQ_STATUS_T
-vchiq_shutdown_internal(struct vchiq_state *state, VCHIQ_INSTANCE_T instance);
+extern enum vchiq_status
+vchiq_shutdown_internal(struct vchiq_state *state, struct vchiq_instance *instance);
extern void
remote_event_pollall(struct vchiq_state *state);
-extern VCHIQ_STATUS_T
-vchiq_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle, void *offset, int size,
- void *userdata, VCHIQ_BULK_MODE_T mode,
- VCHIQ_BULK_DIR_T dir);
+extern enum vchiq_status
+vchiq_bulk_transfer(unsigned int handle, void *offset, int size,
+ void *userdata, enum vchiq_bulk_mode mode,
+ enum vchiq_bulk_dir dir);
-extern void
+extern int
vchiq_dump_state(void *dump_context, struct vchiq_state *state);
-extern void
+extern int
vchiq_dump_service_state(void *dump_context, struct vchiq_service *service);
extern void
@@ -543,7 +543,7 @@ request_poll(struct vchiq_state *state, struct vchiq_service *service,
int poll_type);
static inline struct vchiq_service *
-handle_to_service(VCHIQ_SERVICE_HANDLE_T handle)
+handle_to_service(unsigned int handle)
{
struct vchiq_state *state = vchiq_states[(handle / VCHIQ_MAX_SERVICES) &
(VCHIQ_MAX_STATES - 1)];
@@ -554,21 +554,21 @@ handle_to_service(VCHIQ_SERVICE_HANDLE_T handle)
}
extern struct vchiq_service *
-find_service_by_handle(VCHIQ_SERVICE_HANDLE_T handle);
+find_service_by_handle(unsigned int handle);
extern struct vchiq_service *
find_service_by_port(struct vchiq_state *state, int localport);
extern struct vchiq_service *
-find_service_for_instance(VCHIQ_INSTANCE_T instance,
- VCHIQ_SERVICE_HANDLE_T handle);
+find_service_for_instance(struct vchiq_instance *instance,
+ unsigned int handle);
extern struct vchiq_service *
-find_closed_service_for_instance(VCHIQ_INSTANCE_T instance,
- VCHIQ_SERVICE_HANDLE_T handle);
+find_closed_service_for_instance(struct vchiq_instance *instance,
+ unsigned int handle);
extern struct vchiq_service *
-next_service_by_instance(struct vchiq_state *state, VCHIQ_INSTANCE_T instance,
+next_service_by_instance(struct vchiq_state *state, struct vchiq_instance *instance,
int *pidx);
extern void
@@ -580,7 +580,7 @@ unlock_service(struct vchiq_service *service);
/* The following functions are called from vchiq_core, and external
** implementations must be provided. */
-extern VCHIQ_STATUS_T
+extern enum vchiq_status
vchiq_prepare_bulk_data(struct vchiq_bulk *bulk, void *offset, int size,
int dir);
@@ -596,29 +596,29 @@ vchiq_platform_check_suspend(struct vchiq_state *state);
extern void
vchiq_platform_paused(struct vchiq_state *state);
-extern VCHIQ_STATUS_T
+extern enum vchiq_status
vchiq_platform_resume(struct vchiq_state *state);
extern void
vchiq_platform_resumed(struct vchiq_state *state);
-extern void
+extern int
vchiq_dump(void *dump_context, const char *str, int len);
-extern void
+extern int
vchiq_dump_platform_state(void *dump_context);
-extern void
+extern int
vchiq_dump_platform_instances(void *dump_context);
-extern void
+extern int
vchiq_dump_platform_service_state(void *dump_context,
struct vchiq_service *service);
-extern VCHIQ_STATUS_T
+extern enum vchiq_status
vchiq_use_service_internal(struct vchiq_service *service);
-extern VCHIQ_STATUS_T
+extern enum vchiq_status
vchiq_release_service_internal(struct vchiq_service *service);
extern void
@@ -627,31 +627,31 @@ vchiq_on_remote_use(struct vchiq_state *state);
extern void
vchiq_on_remote_release(struct vchiq_state *state);
-extern VCHIQ_STATUS_T
+extern enum vchiq_status
vchiq_platform_init_state(struct vchiq_state *state);
-extern VCHIQ_STATUS_T
+extern enum vchiq_status
vchiq_check_service(struct vchiq_service *service);
extern void
vchiq_on_remote_use_active(struct vchiq_state *state);
-extern VCHIQ_STATUS_T
+extern enum vchiq_status
vchiq_send_remote_use(struct vchiq_state *state);
-extern VCHIQ_STATUS_T
+extern enum vchiq_status
vchiq_send_remote_use_active(struct vchiq_state *state);
extern void
vchiq_platform_conn_state_changed(struct vchiq_state *state,
- VCHIQ_CONNSTATE_T oldstate,
- VCHIQ_CONNSTATE_T newstate);
+ enum vchiq_connstate oldstate,
+ enum vchiq_connstate newstate);
extern void
vchiq_platform_handle_timeout(struct vchiq_state *state);
extern void
-vchiq_set_conn_state(struct vchiq_state *state, VCHIQ_CONNSTATE_T newstate);
+vchiq_set_conn_state(struct vchiq_state *state, enum vchiq_connstate newstate);
extern void
vchiq_log_dump_mem(const char *label, uint32_t addr, const void *voidMem,
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c
index f217b78d95a0..89cc52211de4 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c
@@ -117,7 +117,7 @@ static const struct file_operations debugfs_log_fops = {
static int debugfs_usecount_show(struct seq_file *f, void *offset)
{
- VCHIQ_INSTANCE_T instance = f->private;
+ struct vchiq_instance *instance = f->private;
int use_count;
use_count = vchiq_instance_get_use_count(instance);
@@ -129,7 +129,7 @@ DEFINE_SHOW_ATTRIBUTE(debugfs_usecount);
static int debugfs_trace_show(struct seq_file *f, void *offset)
{
- VCHIQ_INSTANCE_T instance = f->private;
+ struct vchiq_instance *instance = f->private;
int trace;
trace = vchiq_instance_get_trace(instance);
@@ -148,7 +148,7 @@ static ssize_t debugfs_trace_write(struct file *file,
size_t count, loff_t *ppos)
{
struct seq_file *f = (struct seq_file *)file->private_data;
- VCHIQ_INSTANCE_T instance = f->private;
+ struct vchiq_instance *instance = f->private;
char firstchar;
if (copy_from_user(&firstchar, buffer, 1))
@@ -184,7 +184,7 @@ static const struct file_operations debugfs_trace_fops = {
};
/* add an instance (process) to the debugfs entries */
-void vchiq_debugfs_add_instance(VCHIQ_INSTANCE_T instance)
+void vchiq_debugfs_add_instance(struct vchiq_instance *instance)
{
char pidstr[16];
struct dentry *top;
@@ -201,7 +201,7 @@ void vchiq_debugfs_add_instance(VCHIQ_INSTANCE_T instance)
vchiq_instance_get_debugfs_node(instance)->dentry = top;
}
-void vchiq_debugfs_remove_instance(VCHIQ_INSTANCE_T instance)
+void vchiq_debugfs_remove_instance(struct vchiq_instance *instance)
{
struct vchiq_debugfs_node *node =
vchiq_instance_get_debugfs_node(instance);
@@ -242,11 +242,11 @@ void vchiq_debugfs_deinit(void)
{
}
-void vchiq_debugfs_add_instance(VCHIQ_INSTANCE_T instance)
+void vchiq_debugfs_add_instance(struct vchiq_instance *instance)
{
}
-void vchiq_debugfs_remove_instance(VCHIQ_INSTANCE_T instance)
+void vchiq_debugfs_remove_instance(struct vchiq_instance *instance)
{
}
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.h
index 9b563d105fdb..ec2f033cdf32 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.h
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.h
@@ -14,8 +14,8 @@ void vchiq_debugfs_init(void);
void vchiq_debugfs_deinit(void);
-void vchiq_debugfs_add_instance(VCHIQ_INSTANCE_T instance);
+void vchiq_debugfs_add_instance(struct vchiq_instance *instance);
-void vchiq_debugfs_remove_instance(VCHIQ_INSTANCE_T instance);
+void vchiq_debugfs_remove_instance(struct vchiq_instance *instance);
#endif /* VCHIQ_DEBUGFS_H */
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_if.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_if.h
index c23bd105c40f..07c6a3db5ab6 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_if.h
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_if.h
@@ -15,7 +15,7 @@
#define VCHIQ_GET_SERVICE_USERDATA(service) vchiq_get_service_userdata(service)
#define VCHIQ_GET_SERVICE_FOURCC(service) vchiq_get_service_fourcc(service)
-typedef enum {
+enum vchiq_reason {
VCHIQ_SERVICE_OPENED, /* service, -, - */
VCHIQ_SERVICE_CLOSED, /* service, -, - */
VCHIQ_MESSAGE_AVAILABLE, /* service, header, - */
@@ -23,28 +23,28 @@ typedef enum {
VCHIQ_BULK_RECEIVE_DONE, /* service, -, bulk_userdata */
VCHIQ_BULK_TRANSMIT_ABORTED, /* service, -, bulk_userdata */
VCHIQ_BULK_RECEIVE_ABORTED /* service, -, bulk_userdata */
-} VCHIQ_REASON_T;
+};
-typedef enum {
+enum vchiq_status {
VCHIQ_ERROR = -1,
VCHIQ_SUCCESS = 0,
VCHIQ_RETRY = 1
-} VCHIQ_STATUS_T;
+};
-typedef enum {
+enum vchiq_bulk_mode {
VCHIQ_BULK_MODE_CALLBACK,
VCHIQ_BULK_MODE_BLOCKING,
VCHIQ_BULK_MODE_NOCALLBACK,
VCHIQ_BULK_MODE_WAITING /* Reserved for internal use */
-} VCHIQ_BULK_MODE_T;
+};
-typedef enum {
+enum vchiq_service_option {
VCHIQ_SERVICE_OPTION_AUTOCLOSE,
VCHIQ_SERVICE_OPTION_SLOT_QUOTA,
VCHIQ_SERVICE_OPTION_MESSAGE_QUOTA,
VCHIQ_SERVICE_OPTION_SYNCHRONOUS,
VCHIQ_SERVICE_OPTION_TRACE
-} VCHIQ_SERVICE_OPTION_T;
+};
struct vchiq_header {
/* The message identifier - opaque to applications. */
@@ -61,21 +61,19 @@ struct vchiq_element {
unsigned int size;
};
-typedef unsigned int VCHIQ_SERVICE_HANDLE_T;
-
-typedef VCHIQ_STATUS_T (*VCHIQ_CALLBACK_T)(VCHIQ_REASON_T,
- struct vchiq_header *,
- VCHIQ_SERVICE_HANDLE_T, void *);
+typedef enum vchiq_status (*vchiq_callback)(enum vchiq_reason,
+ struct vchiq_header *,
+ unsigned int, void *);
struct vchiq_service_base {
int fourcc;
- VCHIQ_CALLBACK_T callback;
+ vchiq_callback callback;
void *userdata;
};
struct vchiq_service_params {
int fourcc;
- VCHIQ_CALLBACK_T callback;
+ vchiq_callback callback;
void *userdata;
short version; /* Increment for non-trivial changes */
short version_min; /* Update for incompatible changes */
@@ -92,57 +90,57 @@ struct vchiq_config {
short version_min; /* The minimum compatible version of VCHIQ */
};
-typedef struct vchiq_instance_struct *VCHIQ_INSTANCE_T;
-typedef void (*VCHIQ_REMOTE_USE_CALLBACK_T)(void *cb_arg);
+struct vchiq_instance;
+typedef void (*vchiq_remote_callback)(void *cb_arg);
-extern VCHIQ_STATUS_T vchiq_initialise(VCHIQ_INSTANCE_T *pinstance);
-extern VCHIQ_STATUS_T vchiq_shutdown(VCHIQ_INSTANCE_T instance);
-extern VCHIQ_STATUS_T vchiq_connect(VCHIQ_INSTANCE_T instance);
-extern VCHIQ_STATUS_T vchiq_add_service(VCHIQ_INSTANCE_T instance,
+extern enum vchiq_status vchiq_initialise(struct vchiq_instance **pinstance);
+extern enum vchiq_status vchiq_shutdown(struct vchiq_instance *instance);
+extern enum vchiq_status vchiq_connect(struct vchiq_instance *instance);
+extern enum vchiq_status vchiq_add_service(struct vchiq_instance *instance,
const struct vchiq_service_params *params,
- VCHIQ_SERVICE_HANDLE_T *pservice);
-extern VCHIQ_STATUS_T vchiq_open_service(VCHIQ_INSTANCE_T instance,
+ unsigned int *pservice);
+extern enum vchiq_status vchiq_open_service(struct vchiq_instance *instance,
const struct vchiq_service_params *params,
- VCHIQ_SERVICE_HANDLE_T *pservice);
-extern VCHIQ_STATUS_T vchiq_close_service(VCHIQ_SERVICE_HANDLE_T service);
-extern VCHIQ_STATUS_T vchiq_remove_service(VCHIQ_SERVICE_HANDLE_T service);
-extern VCHIQ_STATUS_T vchiq_use_service(VCHIQ_SERVICE_HANDLE_T service);
-extern VCHIQ_STATUS_T vchiq_release_service(VCHIQ_SERVICE_HANDLE_T service);
-extern VCHIQ_STATUS_T
-vchiq_queue_message(VCHIQ_SERVICE_HANDLE_T handle,
+ unsigned int *pservice);
+extern enum vchiq_status vchiq_close_service(unsigned int service);
+extern enum vchiq_status vchiq_remove_service(unsigned int service);
+extern enum vchiq_status vchiq_use_service(unsigned int service);
+extern enum vchiq_status vchiq_release_service(unsigned int service);
+extern enum vchiq_status
+vchiq_queue_message(unsigned int handle,
ssize_t (*copy_callback)(void *context, void *dest,
size_t offset, size_t maxsize),
void *context,
size_t size);
-extern void vchiq_release_message(VCHIQ_SERVICE_HANDLE_T service,
+extern void vchiq_release_message(unsigned int service,
struct vchiq_header *header);
-extern VCHIQ_STATUS_T vchiq_bulk_transmit(VCHIQ_SERVICE_HANDLE_T service,
+extern enum vchiq_status vchiq_bulk_transmit(unsigned int service,
const void *data, unsigned int size, void *userdata,
- VCHIQ_BULK_MODE_T mode);
-extern VCHIQ_STATUS_T vchiq_bulk_receive(VCHIQ_SERVICE_HANDLE_T service,
+ enum vchiq_bulk_mode mode);
+extern enum vchiq_status vchiq_bulk_receive(unsigned int service,
void *data, unsigned int size, void *userdata,
- VCHIQ_BULK_MODE_T mode);
-extern VCHIQ_STATUS_T vchiq_bulk_transmit_handle(VCHIQ_SERVICE_HANDLE_T service,
+ enum vchiq_bulk_mode mode);
+extern enum vchiq_status vchiq_bulk_transmit_handle(unsigned int service,
const void *offset, unsigned int size,
- void *userdata, VCHIQ_BULK_MODE_T mode);
-extern VCHIQ_STATUS_T vchiq_bulk_receive_handle(VCHIQ_SERVICE_HANDLE_T service,
+ void *userdata, enum vchiq_bulk_mode mode);
+extern enum vchiq_status vchiq_bulk_receive_handle(unsigned int service,
void *offset, unsigned int size, void *userdata,
- VCHIQ_BULK_MODE_T mode);
-extern int vchiq_get_client_id(VCHIQ_SERVICE_HANDLE_T service);
-extern void *vchiq_get_service_userdata(VCHIQ_SERVICE_HANDLE_T service);
-extern int vchiq_get_service_fourcc(VCHIQ_SERVICE_HANDLE_T service);
+ enum vchiq_bulk_mode mode);
+extern int vchiq_get_client_id(unsigned int service);
+extern void *vchiq_get_service_userdata(unsigned int service);
+extern int vchiq_get_service_fourcc(unsigned int service);
extern void vchiq_get_config(struct vchiq_config *config);
-extern VCHIQ_STATUS_T vchiq_set_service_option(VCHIQ_SERVICE_HANDLE_T service,
- VCHIQ_SERVICE_OPTION_T option, int value);
+extern enum vchiq_status vchiq_set_service_option(unsigned int service,
+ enum vchiq_service_option option, int value);
-extern VCHIQ_STATUS_T vchiq_remote_use(VCHIQ_INSTANCE_T instance,
- VCHIQ_REMOTE_USE_CALLBACK_T callback, void *cb_arg);
-extern VCHIQ_STATUS_T vchiq_remote_release(VCHIQ_INSTANCE_T instance);
+extern enum vchiq_status vchiq_remote_use(struct vchiq_instance *instance,
+ vchiq_remote_callback callback, void *cb_arg);
+extern enum vchiq_status vchiq_remote_release(struct vchiq_instance *instance);
-extern VCHIQ_STATUS_T vchiq_dump_phys_mem(VCHIQ_SERVICE_HANDLE_T service,
+extern enum vchiq_status vchiq_dump_phys_mem(unsigned int service,
void *ptr, size_t num_bytes);
-extern VCHIQ_STATUS_T vchiq_get_peer_version(VCHIQ_SERVICE_HANDLE_T handle,
+extern enum vchiq_status vchiq_get_peer_version(unsigned int handle,
short *peer_version);
#endif /* VCHIQ_IF_H */
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_ioctl.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_ioctl.h
index 460ccea088bf..202889b3774f 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_ioctl.h
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_ioctl.h
@@ -28,11 +28,11 @@ struct vchiq_queue_bulk_transfer {
void *data;
unsigned int size;
void *userdata;
- VCHIQ_BULK_MODE_T mode;
+ enum vchiq_bulk_mode mode;
};
struct vchiq_completion_data {
- VCHIQ_REASON_T reason;
+ enum vchiq_reason reason;
struct vchiq_header *header;
void *service_userdata;
void *bulk_userdata;
@@ -60,7 +60,7 @@ struct vchiq_get_config {
struct vchiq_set_service_option {
unsigned int handle;
- VCHIQ_SERVICE_OPTION_T option;
+ enum vchiq_service_option option;
int value;
};
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_shim.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_shim.c
index 17a4f2c8d8b1..0ce3b08b3441 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_shim.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_shim.c
@@ -12,22 +12,22 @@
#define vchiq_status_to_vchi(status) ((int32_t)status)
struct shim_service {
- VCHIQ_SERVICE_HANDLE_T handle;
+ unsigned int handle;
struct vchiu_queue queue;
- VCHI_CALLBACK_T callback;
+ vchi_callback callback;
void *callback_param;
};
/***********************************************************
* Name: vchi_msg_peek
*
- * Arguments: const VCHI_SERVICE_HANDLE_T handle,
+ * Arguments: struct vchi_service_handle *handle,
* void **data,
* uint32_t *msg_size,
- * VCHI_FLAGS_T flags
+ * enum vchi_flags flags
*
* Description: Routine to return a pointer to the current message (to allow in
* place processing). The message can be removed using
@@ -36,10 +36,10 @@ struct shim_service {
* Returns: int32_t - success == 0
*
***********************************************************/
-int32_t vchi_msg_peek(VCHI_SERVICE_HANDLE_T handle,
- void **data,
- uint32_t *msg_size,
- VCHI_FLAGS_T flags)
+int32_t vchi_msg_peek(struct vchi_service_handle *handle,
+ void **data,
+ uint32_t *msg_size,
+ enum vchi_flags flags)
{
struct shim_service *service = (struct shim_service *)handle;
struct vchiq_header *header;
@@ -63,7 +63,7 @@ EXPORT_SYMBOL(vchi_msg_peek);
/***********************************************************
* Name: vchi_msg_remove
*
- * Arguments: const VCHI_SERVICE_HANDLE_T handle,
+ * Arguments: struct vchi_service_handle *handle,
*
* Description: Routine to remove a message (after it has been read with
* vchi_msg_peek)
@@ -71,7 +71,7 @@ EXPORT_SYMBOL(vchi_msg_peek);
* Returns: int32_t - success == 0
*
***********************************************************/
-int32_t vchi_msg_remove(VCHI_SERVICE_HANDLE_T handle)
+int32_t vchi_msg_remove(struct vchi_service_handle *handle)
{
struct shim_service *service = (struct shim_service *)handle;
struct vchiq_header *header;
@@ -87,7 +87,7 @@ EXPORT_SYMBOL(vchi_msg_remove);
/***********************************************************
* Name: vchi_msg_queue
*
- * Arguments: VCHI_SERVICE_HANDLE_T handle,
+ * Arguments: struct vchi_service_handle *handle,
* ssize_t (*copy_callback)(void *context, void *dest,
* size_t offset, size_t maxsize),
* void *context,
@@ -99,14 +99,14 @@ EXPORT_SYMBOL(vchi_msg_remove);
*
***********************************************************/
static
-int32_t vchi_msg_queue(VCHI_SERVICE_HANDLE_T handle,
+int32_t vchi_msg_queue(struct vchi_service_handle *handle,
ssize_t (*copy_callback)(void *context, void *dest,
size_t offset, size_t maxsize),
void *context,
uint32_t data_size)
{
struct shim_service *service = (struct shim_service *)handle;
- VCHIQ_STATUS_T status;
+ enum vchiq_status status;
while (1) {
status = vchiq_queue_message(service->handle,
@@ -139,7 +139,7 @@ vchi_queue_kernel_message_callback(void *context,
}
int
-vchi_queue_kernel_message(VCHI_SERVICE_HANDLE_T handle,
+vchi_queue_kernel_message(struct vchi_service_handle *handle,
void *data,
unsigned int size)
{
@@ -169,7 +169,7 @@ vchi_queue_user_message_callback(void *context,
}
int
-vchi_queue_user_message(VCHI_SERVICE_HANDLE_T handle,
+vchi_queue_user_message(struct vchi_service_handle *handle,
void __user *data,
unsigned int size)
{
@@ -190,7 +190,7 @@ EXPORT_SYMBOL(vchi_queue_user_message);
* Arguments: VCHI_BULK_HANDLE_T handle,
* void *data_dst,
* const uint32_t data_size,
- * VCHI_FLAGS_T flags
+ * enum vchi_flags flags
* void *bulk_handle
*
* Description: Routine to setup a rcv buffer
@@ -198,15 +198,13 @@ EXPORT_SYMBOL(vchi_queue_user_message);
* Returns: int32_t - success == 0
*
***********************************************************/
-int32_t vchi_bulk_queue_receive(VCHI_SERVICE_HANDLE_T handle,
- void *data_dst,
- uint32_t data_size,
- VCHI_FLAGS_T flags,
- void *bulk_handle)
+int32_t vchi_bulk_queue_receive(struct vchi_service_handle *handle, void *data_dst,
+ uint32_t data_size, enum vchi_flags flags,
+ void *bulk_handle)
{
struct shim_service *service = (struct shim_service *)handle;
- VCHIQ_BULK_MODE_T mode;
- VCHIQ_STATUS_T status;
+ enum vchiq_bulk_mode mode;
+ enum vchiq_status status;
switch ((int)flags) {
case VCHI_FLAGS_CALLBACK_WHEN_OP_COMPLETE
@@ -250,7 +248,7 @@ EXPORT_SYMBOL(vchi_bulk_queue_receive);
* Arguments: VCHI_BULK_HANDLE_T handle,
* const void *data_src,
* uint32_t data_size,
- * VCHI_FLAGS_T flags,
+ * enum vchi_flags flags,
* void *bulk_handle
*
* Description: Routine to transmit some data
@@ -258,15 +256,15 @@ EXPORT_SYMBOL(vchi_bulk_queue_receive);
* Returns: int32_t - success == 0
*
***********************************************************/
-int32_t vchi_bulk_queue_transmit(VCHI_SERVICE_HANDLE_T handle,
- const void *data_src,
- uint32_t data_size,
- VCHI_FLAGS_T flags,
- void *bulk_handle)
+int32_t vchi_bulk_queue_transmit(struct vchi_service_handle *handle,
+ const void *data_src,
+ uint32_t data_size,
+ enum vchi_flags flags,
+ void *bulk_handle)
{
struct shim_service *service = (struct shim_service *)handle;
- VCHIQ_BULK_MODE_T mode;
- VCHIQ_STATUS_T status;
+ enum vchiq_bulk_mode mode;
+ enum vchiq_status status;
switch ((int)flags) {
case VCHI_FLAGS_CALLBACK_WHEN_OP_COMPLETE
@@ -309,22 +307,20 @@ EXPORT_SYMBOL(vchi_bulk_queue_transmit);
/***********************************************************
* Name: vchi_msg_dequeue
*
- * Arguments: VCHI_SERVICE_HANDLE_T handle,
+ * Arguments: struct vchi_service_handle *handle,
* void *data,
* uint32_t max_data_size_to_read,
* uint32_t *actual_msg_size
- * VCHI_FLAGS_T flags
+ * enum vchi_flags flags
*
* Description: Routine to dequeue a message into the supplied buffer
*
* Returns: int32_t - success == 0
*
***********************************************************/
-int32_t vchi_msg_dequeue(VCHI_SERVICE_HANDLE_T handle,
- void *data,
- uint32_t max_data_size_to_read,
- uint32_t *actual_msg_size,
- VCHI_FLAGS_T flags)
+int32_t vchi_msg_dequeue(struct vchi_service_handle *handle, void *data,
+ uint32_t max_data_size_to_read,
+ uint32_t *actual_msg_size, enum vchi_flags flags)
{
struct shim_service *service = (struct shim_service *)handle;
struct vchiq_header *header;
@@ -364,13 +360,13 @@ int32_t vchi_held_msg_release(struct vchi_held_msg *message)
{
/*
* Convert the service field pointer back to an
- * VCHIQ_SERVICE_HANDLE_T which is an int.
+ * unsigned int which is an int.
* This pointer is opaque to everything except
* vchi_msg_hold which simply upcasted the int
* to a pointer.
*/
- vchiq_release_message((VCHIQ_SERVICE_HANDLE_T)(long)message->service,
+ vchiq_release_message((unsigned int)(long)message->service,
(struct vchiq_header *)message->message);
return 0;
@@ -380,10 +376,10 @@ EXPORT_SYMBOL(vchi_held_msg_release);
/***********************************************************
* Name: vchi_msg_hold
*
- * Arguments: VCHI_SERVICE_HANDLE_T handle,
+ * Arguments: struct vchi_service_handle *handle,
* void **data,
* uint32_t *msg_size,
- * VCHI_FLAGS_T flags,
+ * enum vchi_flags flags,
* struct vchi_held_msg *message_handle
*
* Description: Routine to return a pointer to the current message (to allow
@@ -394,11 +390,9 @@ EXPORT_SYMBOL(vchi_held_msg_release);
* Returns: int32_t - success == 0
*
***********************************************************/
-int32_t vchi_msg_hold(VCHI_SERVICE_HANDLE_T handle,
- void **data,
- uint32_t *msg_size,
- VCHI_FLAGS_T flags,
- struct vchi_held_msg *message_handle)
+int32_t vchi_msg_hold(struct vchi_service_handle *handle, void **data,
+ uint32_t *msg_size, enum vchi_flags flags,
+ struct vchi_held_msg *message_handle)
{
struct shim_service *service = (struct shim_service *)handle;
struct vchiq_header *header;
@@ -416,7 +410,7 @@ int32_t vchi_msg_hold(VCHI_SERVICE_HANDLE_T handle,
*msg_size = header->size;
/*
- * upcast the VCHIQ_SERVICE_HANDLE_T which is an int
+ * upcast the unsigned int which is an int
* to a pointer and stuff it in the held message.
* This pointer is opaque to everything except
* vchi_held_msg_release which simply downcasts it back
@@ -434,7 +428,7 @@ EXPORT_SYMBOL(vchi_msg_hold);
/***********************************************************
* Name: vchi_initialise
*
- * Arguments: VCHI_INSTANCE_T *instance_handle
+ * Arguments: struct vchi_instance_handle **instance_handle
*
* Description: Initialises the hardware but does not transmit anything
* When run as a Host App this will be called twice hence the need
@@ -444,14 +438,14 @@ EXPORT_SYMBOL(vchi_msg_hold);
*
***********************************************************/
-int32_t vchi_initialise(VCHI_INSTANCE_T *instance_handle)
+int32_t vchi_initialise(struct vchi_instance_handle **instance_handle)
{
- VCHIQ_INSTANCE_T instance;
- VCHIQ_STATUS_T status;
+ struct vchiq_instance *instance;
+ enum vchiq_status status;
status = vchiq_initialise(&instance);
- *instance_handle = (VCHI_INSTANCE_T)instance;
+ *instance_handle = (struct vchi_instance_handle *)instance;
return vchiq_status_to_vchi(status);
}
@@ -460,7 +454,7 @@ EXPORT_SYMBOL(vchi_initialise);
/***********************************************************
* Name: vchi_connect
*
- * Arguments: VCHI_INSTANCE_T instance_handle
+ * Arguments: struct vchi_instance_handle *instance_handle
*
* Description: Starts the command service on each connection,
* causing INIT messages to be pinged back and forth
@@ -468,9 +462,9 @@ EXPORT_SYMBOL(vchi_initialise);
* Returns: 0 if successful, failure otherwise
*
***********************************************************/
-int32_t vchi_connect(VCHI_INSTANCE_T instance_handle)
+int32_t vchi_connect(struct vchi_instance_handle *instance_handle)
{
- VCHIQ_INSTANCE_T instance = (VCHIQ_INSTANCE_T)instance_handle;
+ struct vchiq_instance *instance = (struct vchiq_instance *)instance_handle;
return vchiq_connect(instance);
}
@@ -479,7 +473,7 @@ EXPORT_SYMBOL(vchi_connect);
/***********************************************************
* Name: vchi_disconnect
*
- * Arguments: VCHI_INSTANCE_T instance_handle
+ * Arguments: struct vchi_instance_handle *instance_handle
*
* Description: Stops the command service on each connection,
* causing DE-INIT messages to be pinged back and forth
@@ -487,9 +481,9 @@ EXPORT_SYMBOL(vchi_connect);
* Returns: 0 if successful, failure otherwise
*
***********************************************************/
-int32_t vchi_disconnect(VCHI_INSTANCE_T instance_handle)
+int32_t vchi_disconnect(struct vchi_instance_handle *instance_handle)
{
- VCHIQ_INSTANCE_T instance = (VCHIQ_INSTANCE_T)instance_handle;
+ struct vchiq_instance *instance = (struct vchiq_instance *)instance_handle;
return vchiq_status_to_vchi(vchiq_shutdown(instance));
}
@@ -499,9 +493,9 @@ EXPORT_SYMBOL(vchi_disconnect);
* Name: vchi_service_open
* Name: vchi_service_create
*
- * Arguments: VCHI_INSTANCE_T *instance_handle
+ * Arguments: struct vchi_instance_handle *instance_handle
* struct service_creation *setup,
- * VCHI_SERVICE_HANDLE_T *handle
+ * struct vchi_service_handle **handle
*
* Description: Routine to open a service
*
@@ -509,9 +503,9 @@ EXPORT_SYMBOL(vchi_disconnect);
*
***********************************************************/
-static VCHIQ_STATUS_T shim_callback(VCHIQ_REASON_T reason,
+static enum vchiq_status shim_callback(enum vchiq_reason reason,
struct vchiq_header *header,
- VCHIQ_SERVICE_HANDLE_T handle,
+ unsigned int handle,
void *bulk_user)
{
struct shim_service *service =
@@ -571,7 +565,7 @@ done:
return VCHIQ_SUCCESS;
}
-static struct shim_service *service_alloc(VCHIQ_INSTANCE_T instance,
+static struct shim_service *service_alloc(struct vchiq_instance *instance,
struct service_creation *setup)
{
struct shim_service *service = kzalloc(sizeof(struct shim_service), GFP_KERNEL);
@@ -579,7 +573,7 @@ static struct shim_service *service_alloc(VCHIQ_INSTANCE_T instance,
(void)instance;
if (service) {
- if (vchiu_queue_init(&service->queue, 64)) {
+ if (!vchiu_queue_init(&service->queue, 64)) {
service->callback = setup->callback;
service->callback_param = setup->callback_param;
} else {
@@ -599,18 +593,18 @@ static void service_free(struct shim_service *service)
}
}
-int32_t vchi_service_open(VCHI_INSTANCE_T instance_handle,
+int32_t vchi_service_open(struct vchi_instance_handle *instance_handle,
struct service_creation *setup,
- VCHI_SERVICE_HANDLE_T *handle)
+ struct vchi_service_handle **handle)
{
- VCHIQ_INSTANCE_T instance = (VCHIQ_INSTANCE_T)instance_handle;
+ struct vchiq_instance *instance = (struct vchiq_instance *)instance_handle;
struct shim_service *service = service_alloc(instance, setup);
- *handle = (VCHI_SERVICE_HANDLE_T)service;
+ *handle = (struct vchi_service_handle *)service;
if (service) {
struct vchiq_service_params params;
- VCHIQ_STATUS_T status;
+ enum vchiq_status status;
memset(&params, 0, sizeof(params));
params.fourcc = setup->service_id;
@@ -628,17 +622,17 @@ int32_t vchi_service_open(VCHI_INSTANCE_T instance_handle,
}
}
- return (service != NULL) ? 0 : -1;
+ return service ? 0 : -1;
}
EXPORT_SYMBOL(vchi_service_open);
-int32_t vchi_service_close(const VCHI_SERVICE_HANDLE_T handle)
+int32_t vchi_service_close(const struct vchi_service_handle *handle)
{
int32_t ret = -1;
struct shim_service *service = (struct shim_service *)handle;
if (service) {
- VCHIQ_STATUS_T status = vchiq_close_service(service->handle);
+ enum vchiq_status status = vchiq_close_service(service->handle);
if (status == VCHIQ_SUCCESS)
service_free(service);
@@ -648,13 +642,13 @@ int32_t vchi_service_close(const VCHI_SERVICE_HANDLE_T handle)
}
EXPORT_SYMBOL(vchi_service_close);
-int32_t vchi_service_destroy(const VCHI_SERVICE_HANDLE_T handle)
+int32_t vchi_service_destroy(const struct vchi_service_handle *handle)
{
int32_t ret = -1;
struct shim_service *service = (struct shim_service *)handle;
if (service) {
- VCHIQ_STATUS_T status = vchiq_remove_service(service->handle);
+ enum vchiq_status status = vchiq_remove_service(service->handle);
if (status == VCHIQ_SUCCESS) {
service_free(service);
@@ -667,13 +661,13 @@ int32_t vchi_service_destroy(const VCHI_SERVICE_HANDLE_T handle)
}
EXPORT_SYMBOL(vchi_service_destroy);
-int32_t vchi_service_set_option(const VCHI_SERVICE_HANDLE_T handle,
- VCHI_SERVICE_OPTION_T option,
+int32_t vchi_service_set_option(const struct vchi_service_handle *handle,
+ enum vchi_service_option option,
int value)
{
int32_t ret = -1;
struct shim_service *service = (struct shim_service *)handle;
- VCHIQ_SERVICE_OPTION_T vchiq_option;
+ enum vchiq_service_option vchiq_option;
switch (option) {
case VCHI_SERVICE_OPTION_TRACE:
@@ -687,7 +681,7 @@ int32_t vchi_service_set_option(const VCHI_SERVICE_HANDLE_T handle,
break;
}
if (service) {
- VCHIQ_STATUS_T status =
+ enum vchiq_status status =
vchiq_set_service_option(service->handle,
vchiq_option,
value);
@@ -698,13 +692,13 @@ int32_t vchi_service_set_option(const VCHI_SERVICE_HANDLE_T handle,
}
EXPORT_SYMBOL(vchi_service_set_option);
-int32_t vchi_get_peer_version(const VCHI_SERVICE_HANDLE_T handle, short *peer_version)
+int32_t vchi_get_peer_version(const struct vchi_service_handle *handle, short *peer_version)
{
int32_t ret = -1;
struct shim_service *service = (struct shim_service *)handle;
if (service) {
- VCHIQ_STATUS_T status;
+ enum vchiq_status status;
status = vchiq_get_peer_version(service->handle, peer_version);
ret = vchiq_status_to_vchi(status);
@@ -716,14 +710,14 @@ EXPORT_SYMBOL(vchi_get_peer_version);
/***********************************************************
* Name: vchi_service_use
*
- * Arguments: const VCHI_SERVICE_HANDLE_T handle
+ * Arguments: const struct vchi_service_handle *handle
*
* Description: Routine to increment refcount on a service
*
* Returns: void
*
***********************************************************/
-int32_t vchi_service_use(const VCHI_SERVICE_HANDLE_T handle)
+int32_t vchi_service_use(const struct vchi_service_handle *handle)
{
int32_t ret = -1;
@@ -737,14 +731,14 @@ EXPORT_SYMBOL(vchi_service_use);
/***********************************************************
* Name: vchi_service_release
*
- * Arguments: const VCHI_SERVICE_HANDLE_T handle
+ * Arguments: const struct vchi_service_handle *handle
*
* Description: Routine to decrement refcount on a service
*
* Returns: void
*
***********************************************************/
-int32_t vchi_service_release(const VCHI_SERVICE_HANDLE_T handle)
+int32_t vchi_service_release(const struct vchi_service_handle *handle)
{
int32_t ret = -1;
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.c
index 5e6d3035dc05..644844d88fed 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.c
@@ -24,9 +24,9 @@ int vchiu_queue_init(struct vchiu_queue *queue, int size)
GFP_KERNEL);
if (!queue->storage) {
vchiu_queue_delete(queue);
- return 0;
+ return -ENOMEM;
}
- return 1;
+ return 0;
}
void vchiu_queue_delete(struct vchiu_queue *queue)
diff --git a/drivers/staging/vme/devices/vme_user.c b/drivers/staging/vme/devices/vme_user.c
index 6a33aaa1a49f..fd0ea4dbcb91 100644
--- a/drivers/staging/vme/devices/vme_user.c
+++ b/drivers/staging/vme/devices/vme_user.c
@@ -494,7 +494,7 @@ static const struct file_operations vme_user_fops = {
.write = vme_user_write,
.llseek = vme_user_llseek,
.unlocked_ioctl = vme_user_unlocked_ioctl,
- .compat_ioctl = vme_user_unlocked_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.mmap = vme_user_mmap,
};
diff --git a/drivers/staging/vt6655/card.c b/drivers/staging/vt6655/card.c
index eba4ee0750dc..e65c9825ea5a 100644
--- a/drivers/staging/vt6655/card.c
+++ b/drivers/staging/vt6655/card.c
@@ -79,14 +79,10 @@ static void s_vCalculateOFDMRParameter(unsigned char byRate, u8 bb_type,
*
* Return Value: none
*/
-static
-void
-s_vCalculateOFDMRParameter(
- unsigned char byRate,
- u8 bb_type,
- unsigned char *pbyTxRate,
- unsigned char *pbyRsvTime
-)
+static void s_vCalculateOFDMRParameter(unsigned char byRate,
+ u8 bb_type,
+ unsigned char *pbyTxRate,
+ unsigned char *pbyRsvTime)
{
switch (byRate) {
case RATE_6M:
@@ -736,8 +732,7 @@ void CARDvSetRSPINF(struct vnt_private *priv, u8 bb_type)
VNSvOutPortW(priv->PortOffset + MAC_REG_RSPINF_A_24,
MAKEWORD(byTxRate, byRsvTime));
/* RSPINF_a_36 */
- s_vCalculateOFDMRParameter(CARDwGetOFDMControlRate(
- (void *)priv,
+ s_vCalculateOFDMRParameter(CARDwGetOFDMControlRate((void *)priv,
RATE_36M),
bb_type,
&byTxRate,
@@ -745,8 +740,7 @@ void CARDvSetRSPINF(struct vnt_private *priv, u8 bb_type)
VNSvOutPortW(priv->PortOffset + MAC_REG_RSPINF_A_36,
MAKEWORD(byTxRate, byRsvTime));
/* RSPINF_a_48 */
- s_vCalculateOFDMRParameter(CARDwGetOFDMControlRate(
- (void *)priv,
+ s_vCalculateOFDMRParameter(CARDwGetOFDMControlRate((void *)priv,
RATE_48M),
bb_type,
&byTxRate,
@@ -754,8 +748,7 @@ void CARDvSetRSPINF(struct vnt_private *priv, u8 bb_type)
VNSvOutPortW(priv->PortOffset + MAC_REG_RSPINF_A_48,
MAKEWORD(byTxRate, byRsvTime));
/* RSPINF_a_54 */
- s_vCalculateOFDMRParameter(CARDwGetOFDMControlRate(
- (void *)priv,
+ s_vCalculateOFDMRParameter(CARDwGetOFDMControlRate((void *)priv,
RATE_54M),
bb_type,
&byTxRate,
@@ -763,8 +756,7 @@ void CARDvSetRSPINF(struct vnt_private *priv, u8 bb_type)
VNSvOutPortW(priv->PortOffset + MAC_REG_RSPINF_A_54,
MAKEWORD(byTxRate, byRsvTime));
/* RSPINF_a_72 */
- s_vCalculateOFDMRParameter(CARDwGetOFDMControlRate(
- (void *)priv,
+ s_vCalculateOFDMRParameter(CARDwGetOFDMControlRate((void *)priv,
RATE_54M),
bb_type,
&byTxRate,
diff --git a/drivers/staging/vt6655/card.h b/drivers/staging/vt6655/card.h
index 887c1692e05b..25867cb9d13d 100644
--- a/drivers/staging/vt6655/card.h
+++ b/drivers/staging/vt6655/card.h
@@ -45,7 +45,7 @@ void CARDvSetRSPINF(struct vnt_private *priv, u8 bb_type);
void CARDvUpdateBasicTopRate(struct vnt_private *priv);
bool CARDbIsOFDMinBasicRate(struct vnt_private *priv);
void CARDvSetLoopbackMode(struct vnt_private *priv,
- unsigned short wLoopbackMode);
+ unsigned short wLoopbackMode);
bool CARDbSoftwareReset(struct vnt_private *priv);
void CARDvSetFirstNextTBTT(struct vnt_private *priv,
unsigned short wBeaconInterval);
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
index 082302944c37..f69fc687d4c3 100644
--- a/drivers/staging/vt6655/device_main.c
+++ b/drivers/staging/vt6655/device_main.c
@@ -295,7 +295,8 @@ static void device_init_registers(struct vnt_private *priv)
/* Get Desire Power Value */
priv->byCurPwr = 0xFF;
priv->byCCKPwr = SROMbyReadEmbedded(priv->PortOffset, EEP_OFS_PWR_CCK);
- priv->byOFDMPwrG = SROMbyReadEmbedded(priv->PortOffset, EEP_OFS_PWR_OFDMG);
+ priv->byOFDMPwrG = SROMbyReadEmbedded(priv->PortOffset,
+ EEP_OFS_PWR_OFDMG);
/* Load power Table */
for (ii = 0; ii < CB_MAX_CHANNEL_24G; ii++) {
@@ -373,7 +374,7 @@ static void device_init_registers(struct vnt_private *priv)
priv->bRadioOff = false;
priv->byRadioCtl = SROMbyReadEmbedded(priv->PortOffset,
- EEP_OFS_RADIOCTL);
+ EEP_OFS_RADIOCTL);
priv->bHWRadioOff = false;
if (priv->byRadioCtl & EEP_RADIOCTL_ENABLE) {
@@ -659,12 +660,13 @@ static int device_init_td0_ring(struct vnt_private *priv)
desc->td_info->buf = priv->tx0_bufs + i * PKT_BUF_SZ;
desc->td_info->buf_dma = priv->tx_bufs_dma0 + i * PKT_BUF_SZ;
- desc->next = &(priv->apTD0Rings[(i+1) % priv->opts.tx_descs[0]]);
- desc->next_desc = cpu_to_le32(curr + sizeof(struct vnt_tx_desc));
+ desc->next = &(priv->apTD0Rings[(i + 1) % priv->opts.tx_descs[0]]);
+ desc->next_desc = cpu_to_le32(curr +
+ sizeof(struct vnt_tx_desc));
}
if (i > 0)
- priv->apTD0Rings[i-1].next_desc = cpu_to_le32(priv->td0_pool_dma);
+ priv->apTD0Rings[i - 1].next_desc = cpu_to_le32(priv->td0_pool_dma);
priv->apTailTD[0] = priv->apCurrTD[0] = &priv->apTD0Rings[0];
return 0;
@@ -704,7 +706,7 @@ static int device_init_td1_ring(struct vnt_private *priv)
}
if (i > 0)
- priv->apTD1Rings[i-1].next_desc = cpu_to_le32(priv->td1_pool_dma);
+ priv->apTD1Rings[i - 1].next_desc = cpu_to_le32(priv->td1_pool_dma);
priv->apTailTD[1] = priv->apCurrTD[1] = &priv->apTD1Rings[0];
return 0;
diff --git a/drivers/staging/vt6655/power.c b/drivers/staging/vt6655/power.c
index 9725de3bca6a..bfd598a93b04 100644
--- a/drivers/staging/vt6655/power.c
+++ b/drivers/staging/vt6655/power.c
@@ -97,10 +97,7 @@ void PSvEnablePowerSaving(struct vnt_private *priv,
*
*/
-void
-PSvDisablePowerSaving(
- struct vnt_private *priv
-)
+void PSvDisablePowerSaving(struct vnt_private *priv)
{
/* disable power saving hw function */
MACbPSWakeup(priv);
@@ -126,10 +123,7 @@ PSvDisablePowerSaving(
*
*/
-bool
-PSbIsNextTBTTWakeUp(
- struct vnt_private *priv
-)
+bool PSbIsNextTBTTWakeUp(struct vnt_private *priv)
{
struct ieee80211_hw *hw = priv->hw;
struct ieee80211_conf *conf = &hw->conf;
diff --git a/drivers/staging/vt6655/rf.c b/drivers/staging/vt6655/rf.c
index e80fed69bafe..fb2855e686a7 100644
--- a/drivers/staging/vt6655/rf.c
+++ b/drivers/staging/vt6655/rf.c
@@ -436,7 +436,7 @@ static bool s_bAL7230Init(struct vnt_private *priv)
ret &= IFRFbWriteEmbedded(priv, (0x3ABA8F00 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW));
MACvTimer0MicroSDelay(priv, 30);/* 30us */
/* TXDCOC:disable, RCK:disable */
- ret &= IFRFbWriteEmbedded(priv, dwAL7230InitTable[CB_AL7230_INIT_SEQ-1]);
+ ret &= IFRFbWriteEmbedded(priv, dwAL7230InitTable[CB_AL7230_INIT_SEQ - 1]);
MACvWordRegBitsOn(iobase, MAC_REG_SOFTPWRCTL, (SOFTPWRCTL_SWPE3 |
SOFTPWRCTL_SWPE2 |
@@ -558,7 +558,8 @@ static bool RFbAL2230Init(struct vnt_private *priv)
MACvTimer0MicroSDelay(priv, 30);/* 30us */
ret &= IFRFbWriteEmbedded(priv, (0x00780f00 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW));
MACvTimer0MicroSDelay(priv, 30);/* 30us */
- ret &= IFRFbWriteEmbedded(priv, dwAL2230InitTable[CB_AL2230_INIT_SEQ-1]);
+ ret &= IFRFbWriteEmbedded(priv,
+ dwAL2230InitTable[CB_AL2230_INIT_SEQ - 1]);
MACvWordRegBitsOn(iobase, MAC_REG_SOFTPWRCTL, (SOFTPWRCTL_SWPE3 |
SOFTPWRCTL_SWPE2 |
diff --git a/drivers/staging/vt6655/rf.h b/drivers/staging/vt6655/rf.h
index 042ac67a9709..affb70eba10f 100644
--- a/drivers/staging/vt6655/rf.h
+++ b/drivers/staging/vt6655/rf.h
@@ -61,23 +61,14 @@
bool IFRFbWriteEmbedded(struct vnt_private *priv, unsigned long dwData);
bool RFbSelectChannel(struct vnt_private *priv, unsigned char byRFType, u16 byChannel);
-bool RFbInit(
- struct vnt_private *priv
-);
+bool RFbInit(struct vnt_private *priv);
bool RFvWriteWakeProgSyn(struct vnt_private *priv, unsigned char byRFType, u16 uChannel);
bool RFbSetPower(struct vnt_private *priv, unsigned int rate, u16 uCH);
-bool RFbRawSetPower(
- struct vnt_private *priv,
- unsigned char byPwr,
- unsigned int rate
-);
+bool RFbRawSetPower(struct vnt_private *priv, unsigned char byPwr,
+ unsigned int rate);
-void
-RFvRSSITodBm(
- struct vnt_private *priv,
- unsigned char byCurrRSSI,
- long *pldBm
-);
+void RFvRSSITodBm(struct vnt_private *priv, unsigned char byCurrRSSI,
+ long *pldBm);
/* {{ RobertYu: 20050104 */
bool RFbAL7230SelectChannelPostProcess(struct vnt_private *priv, u16 byOldChannel, u16 byNewChannel);
diff --git a/drivers/staging/vt6655/rxtx.c b/drivers/staging/vt6655/rxtx.c
index a14908895b9e..37fcc42ed000 100644
--- a/drivers/staging/vt6655/rxtx.c
+++ b/drivers/staging/vt6655/rxtx.c
@@ -1289,7 +1289,7 @@ int vnt_generate_fifo_header(struct vnt_private *priv, u32 dma_idx,
current_rate = rate->hw_value;
if (priv->wCurrentRate != current_rate &&
- !(priv->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)) {
+ !(priv->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)) {
priv->wCurrentRate = current_rate;
RFbSetPower(priv, priv->wCurrentRate,
@@ -1396,7 +1396,8 @@ int vnt_generate_fifo_header(struct vnt_private *priv, u32 dma_idx,
tx_key = info->control.hw_key;
if (tx_key->keylen > 0)
vnt_fill_txkey(hdr, tx_buffer_head->tx_key,
- tx_key, skb, tx_body_size, td_info->mic_hdr);
+ tx_key, skb, tx_body_size,
+ td_info->mic_hdr);
}
return 0;
diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c
index 856ba97aec4f..4ac85ecb0921 100644
--- a/drivers/staging/vt6656/main_usb.c
+++ b/drivers/staging/vt6656/main_usb.c
@@ -249,10 +249,10 @@ static int vnt_init_registers(struct vnt_private *priv)
} else {
priv->tx_antenna_mode = ANT_B;
- if (priv->tx_rx_ant_inv)
- priv->rx_antenna_mode = ANT_A;
- else
- priv->rx_antenna_mode = ANT_B;
+ if (priv->tx_rx_ant_inv)
+ priv->rx_antenna_mode = ANT_A;
+ else
+ priv->rx_antenna_mode = ANT_B;
}
}
@@ -362,7 +362,6 @@ static int vnt_init_registers(struct vnt_private *priv)
goto end;
}
-
ret = vnt_mac_set_led(priv, LEDSTS_TMLEN, 0x38);
if (ret)
goto end;
diff --git a/drivers/staging/vt6656/rxtx.c b/drivers/staging/vt6656/rxtx.c
index 4e9cfacf75f2..f9020a4f7bbf 100644
--- a/drivers/staging/vt6656/rxtx.c
+++ b/drivers/staging/vt6656/rxtx.c
@@ -112,11 +112,11 @@ static u32 vnt_get_rsvtime(struct vnt_private *priv, u8 pkt_type,
frame_length, rate);
if (pkt_type == PK_TYPE_11B)
- ack_time = vnt_get_frame_time(priv->preamble_type, pkt_type,
- 14, (u16)priv->top_cck_basic_rate);
+ ack_time = vnt_get_frame_time(priv->preamble_type, pkt_type, 14,
+ (u16)priv->top_cck_basic_rate);
else
- ack_time = vnt_get_frame_time(priv->preamble_type, pkt_type,
- 14, (u16)priv->top_ofdm_basic_rate);
+ ack_time = vnt_get_frame_time(priv->preamble_type, pkt_type, 14,
+ (u16)priv->top_ofdm_basic_rate);
if (need_ack)
return data_time + priv->sifs + ack_time;
diff --git a/drivers/staging/wfx/Documentation/devicetree/bindings/net/wireless/siliabs,wfx.txt b/drivers/staging/wfx/Documentation/devicetree/bindings/net/wireless/siliabs,wfx.txt
new file mode 100644
index 000000000000..26de6762b942
--- /dev/null
+++ b/drivers/staging/wfx/Documentation/devicetree/bindings/net/wireless/siliabs,wfx.txt
@@ -0,0 +1,97 @@
+The WFxxx chip series can be connected via SPI or via SDIO.
+
+SPI
+---
+
+You have to declare the WFxxx chip in your device tree.
+
+Required properties:
+ - compatible: Should be "silabs,wfx-spi"
+ - reg: Chip select address of device
+ - spi-max-frequency: Maximum SPI clocking speed of device in Hz
+ - interrupts-extended: Should contain interrupt line (interrupt-parent +
+ interrupt can also been used). Trigger should be `IRQ_TYPE_EDGE_RISING`.
+
+Optional properties:
+ - reset-gpios: phandle of gpio that will be used to reset chip during probe.
+ Without this property, you may encounter issues with warm boot.
+
+Please consult Documentation/devicetree/bindings/spi/spi-bus.txt for optional
+SPI connection related properties,
+
+Example:
+
+&spi1 {
+ wfx {
+ compatible = "silabs,wfx-spi";
+ pinctrl-names = "default";
+ pinctrl-0 = <&wfx_irq &wfx_gpios>;
+ interrupts-extended = <&gpio 16 IRQ_TYPE_EDGE_RISING>;
+ wakeup-gpios = <&gpio 12 GPIO_ACTIVE_HIGH>;
+ reset-gpios = <&gpio 13 GPIO_ACTIVE_HIGH>;
+ reg = <0>;
+ spi-max-frequency = <42000000>;
+ };
+};
+
+
+SDIO
+----
+
+The driver is able to detect a WFxxx chip on SDIO bus by matching its Vendor ID
+and Product ID. However, driver will only provide limited features in this
+case. Thus declaring WFxxx chip in device tree is strongly recommended (and may
+become mandatory in the future).
+
+Required properties:
+ - compatible: Should be "silabs,wfx-sdio"
+ - reg: Should be 1
+
+In addition, it is recommended to declare a mmc-pwrseq on SDIO host above WFx.
+Without it, you may encounter issues with warm boot. mmc-pwrseq should be
+compatible with mmc-pwrseq-simple. Please consult
+Documentation/devicetree/bindings/mmc/mmc-pwrseq-simple.txt for more
+information.
+
+Example:
+
+/ {
+ wfx_pwrseq: wfx_pwrseq {
+ compatible = "mmc-pwrseq-simple";
+ pinctrl-names = "default";
+ pinctrl-0 = <&wfx_reset>;
+ reset-gpios = <&gpio 13 GPIO_ACTIVE_LOW>;
+ };
+};
+
+&mmc1 {
+ mmc-pwrseq = <&wfx_pwrseq>;
+ #address-size = <1>;
+ #size = <0>;
+
+ mmc@1 {
+ compatible = "silabs,wfx-sdio";
+ reg = <1>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&wfx_wakeup>;
+ wakeup-gpios = <&gpio 12 GPIO_ACTIVE_HIGH>;
+ };
+};
+
+Note that #address-size and #size shoud already be defined in node mmc1, but it
+is rarely the case.
+
+Common properties
+-----------------
+
+Some properties are recognized either by SPI and SDIO versions:
+ - wakeup-gpios: phandle of gpio that will be used to wake-up chip. Without
+ this property, driver will disable most of power saving features.
+ - config-file: Use an alternative file as PDS. Default is `wf200.pds`. Only
+ necessary for development/debug purpose.
+ - slk_key: String representing hexadecimal value of secure link key to use.
+ Must contains 64 hexadecimal digits. Not supported in current version.
+
+WFx driver also supports `mac-address` and `local-mac-address` as described in
+Documentation/devicetree/binding/net/ethernet.txt
+
diff --git a/drivers/staging/wfx/Kconfig b/drivers/staging/wfx/Kconfig
new file mode 100644
index 000000000000..83ee4d0ca8c6
--- /dev/null
+++ b/drivers/staging/wfx/Kconfig
@@ -0,0 +1,8 @@
+config WFX
+ tristate "Silicon Labs wireless chips WF200 and further"
+ depends on MAC80211
+ depends on MMC || !MMC # do not allow WFX=y if MMC=m
+ depends on (SPI || MMC)
+ help
+ This is a driver for Silicons Labs WFxxx series (WF200 and further)
+ chipsets. This chip can be found on SPI or SDIO buses.
diff --git a/drivers/staging/wfx/Makefile b/drivers/staging/wfx/Makefile
new file mode 100644
index 000000000000..0d9c1ed092f6
--- /dev/null
+++ b/drivers/staging/wfx/Makefile
@@ -0,0 +1,24 @@
+# SPDX-License-Identifier: GPL-2.0
+
+# Necessary for CREATE_TRACE_POINTS
+CFLAGS_debug.o = -I$(src)
+
+wfx-y := \
+ bh.o \
+ hwio.o \
+ fwio.o \
+ hif_tx.o \
+ hif_rx.o \
+ queue.o \
+ data_tx.o \
+ data_rx.o \
+ scan.o \
+ sta.o \
+ key.o \
+ main.o \
+ sta.o \
+ debug.o
+wfx-$(CONFIG_SPI) += bus_spi.o
+wfx-$(subst m,y,$(CONFIG_MMC)) += bus_sdio.o
+
+obj-$(CONFIG_WFX) += wfx.o
diff --git a/drivers/staging/wfx/TODO b/drivers/staging/wfx/TODO
new file mode 100644
index 000000000000..e44772289af8
--- /dev/null
+++ b/drivers/staging/wfx/TODO
@@ -0,0 +1,17 @@
+This is a list of things that need to be done to get this driver out of the
+staging directory.
+
+ - I have to take a decision about secure link support. I can:
+ - drop completely
+ - keep it in an external patch (my preferred option)
+ - replace call to mbedtls with kernel crypto API (necessitate a
+ bunch of work)
+ - pull mbedtls in kernel (non-realistic)
+
+ - mac80211 interface does not (yet) have expected quality to be placed
+ outside of staging:
+ - Some processings are redundant with mac80211 ones
+ - Many members from wfx_dev/wfx_vif can be retrieved from mac80211
+ structures
+ - Some functions are too complex
+ - ...
diff --git a/drivers/staging/wfx/bh.c b/drivers/staging/wfx/bh.c
new file mode 100644
index 000000000000..2432ba95c2f5
--- /dev/null
+++ b/drivers/staging/wfx/bh.c
@@ -0,0 +1,321 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Interrupt bottom half (BH).
+ *
+ * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ */
+#include <linux/gpio/consumer.h>
+#include <net/mac80211.h>
+
+#include "bh.h"
+#include "wfx.h"
+#include "hwio.h"
+#include "traces.h"
+#include "secure_link.h"
+#include "hif_rx.h"
+#include "hif_api_cmd.h"
+
+static void device_wakeup(struct wfx_dev *wdev)
+{
+ if (!wdev->pdata.gpio_wakeup)
+ return;
+ if (gpiod_get_value(wdev->pdata.gpio_wakeup))
+ return;
+
+ gpiod_set_value(wdev->pdata.gpio_wakeup, 1);
+ if (wfx_api_older_than(wdev, 1, 4)) {
+ if (!completion_done(&wdev->hif.ctrl_ready))
+ udelay(2000);
+ } else {
+ // completion.h does not provide any function to wait
+ // completion without consume it (a kind of
+ // wait_for_completion_done_timeout()). So we have to emulate
+ // it.
+ if (wait_for_completion_timeout(&wdev->hif.ctrl_ready,
+ msecs_to_jiffies(2) + 1))
+ complete(&wdev->hif.ctrl_ready);
+ else
+ dev_err(wdev->dev, "timeout while wake up chip\n");
+ }
+}
+
+static void device_release(struct wfx_dev *wdev)
+{
+ if (!wdev->pdata.gpio_wakeup)
+ return;
+
+ gpiod_set_value(wdev->pdata.gpio_wakeup, 0);
+}
+
+static int rx_helper(struct wfx_dev *wdev, size_t read_len, int *is_cnf)
+{
+ struct sk_buff *skb;
+ struct hif_msg *hif;
+ size_t alloc_len;
+ size_t computed_len;
+ int release_count;
+ int piggyback = 0;
+
+ WARN(read_len < 4, "corrupted read");
+ WARN(read_len > round_down(0xFFF, 2) * sizeof(u16),
+ "%s: request exceed WFx capability", __func__);
+
+ // Add 2 to take into account piggyback size
+ alloc_len = wdev->hwbus_ops->align_size(wdev->hwbus_priv, read_len + 2);
+ skb = dev_alloc_skb(alloc_len);
+ if (!skb)
+ return -ENOMEM;
+
+ if (wfx_data_read(wdev, skb->data, alloc_len))
+ goto err;
+
+ piggyback = le16_to_cpup((u16 *)(skb->data + alloc_len - 2));
+ _trace_piggyback(piggyback, false);
+
+ hif = (struct hif_msg *)skb->data;
+ WARN(hif->encrypted & 0x1, "unsupported encryption type");
+ if (hif->encrypted == 0x2) {
+ if (wfx_sl_decode(wdev, (void *)hif)) {
+ dev_kfree_skb(skb);
+ // If frame was a confirmation, expect trouble in next
+ // exchange. However, it is harmless to fail to decode
+ // an indication frame, so try to continue. Anyway,
+ // piggyback is probably correct.
+ return piggyback;
+ }
+ le16_to_cpus(&hif->len);
+ computed_len = round_up(hif->len - sizeof(hif->len), 16)
+ + sizeof(struct hif_sl_msg)
+ + sizeof(struct hif_sl_tag);
+ } else {
+ le16_to_cpus(&hif->len);
+ computed_len = round_up(hif->len, 2);
+ }
+ if (computed_len != read_len) {
+ dev_err(wdev->dev, "inconsistent message length: %zu != %zu\n",
+ computed_len, read_len);
+ print_hex_dump(KERN_INFO, "hif: ", DUMP_PREFIX_OFFSET, 16, 1,
+ hif, read_len, true);
+ goto err;
+ }
+
+ if (!(hif->id & HIF_ID_IS_INDICATION)) {
+ (*is_cnf)++;
+ if (hif->id == HIF_CNF_ID_MULTI_TRANSMIT)
+ release_count = le32_to_cpu(((struct hif_cnf_multi_transmit *)hif->body)->num_tx_confs);
+ else
+ release_count = 1;
+ WARN(wdev->hif.tx_buffers_used < release_count, "corrupted buffer counter");
+ wdev->hif.tx_buffers_used -= release_count;
+ if (!wdev->hif.tx_buffers_used)
+ wake_up(&wdev->hif.tx_buffers_empty);
+ }
+ _trace_hif_recv(hif, wdev->hif.tx_buffers_used);
+
+ if (hif->id != HIF_IND_ID_EXCEPTION && hif->id != HIF_IND_ID_ERROR) {
+ if (hif->seqnum != wdev->hif.rx_seqnum)
+ dev_warn(wdev->dev, "wrong message sequence: %d != %d\n",
+ hif->seqnum, wdev->hif.rx_seqnum);
+ wdev->hif.rx_seqnum = (hif->seqnum + 1) % (HIF_COUNTER_MAX + 1);
+ }
+
+ skb_put(skb, hif->len);
+ // wfx_handle_rx takes care on SKB livetime
+ wfx_handle_rx(wdev, skb);
+
+ return piggyback;
+
+err:
+ if (skb)
+ dev_kfree_skb(skb);
+ return -EIO;
+}
+
+static int bh_work_rx(struct wfx_dev *wdev, int max_msg, int *num_cnf)
+{
+ size_t len;
+ int i;
+ int ctrl_reg, piggyback;
+
+ piggyback = 0;
+ for (i = 0; i < max_msg; i++) {
+ if (piggyback & CTRL_NEXT_LEN_MASK)
+ ctrl_reg = piggyback;
+ else if (try_wait_for_completion(&wdev->hif.ctrl_ready))
+ ctrl_reg = atomic_xchg(&wdev->hif.ctrl_reg, 0);
+ else
+ ctrl_reg = 0;
+ if (!(ctrl_reg & CTRL_NEXT_LEN_MASK))
+ return i;
+ // ctrl_reg units are 16bits words
+ len = (ctrl_reg & CTRL_NEXT_LEN_MASK) * 2;
+ piggyback = rx_helper(wdev, len, num_cnf);
+ if (piggyback < 0)
+ return i;
+ if (!(piggyback & CTRL_WLAN_READY))
+ dev_err(wdev->dev, "unexpected piggyback value: ready bit not set: %04x\n",
+ piggyback);
+ }
+ if (piggyback & CTRL_NEXT_LEN_MASK) {
+ ctrl_reg = atomic_xchg(&wdev->hif.ctrl_reg, piggyback);
+ complete(&wdev->hif.ctrl_ready);
+ if (ctrl_reg)
+ dev_err(wdev->dev, "unexpected IRQ happened: %04x/%04x\n",
+ ctrl_reg, piggyback);
+ }
+ return i;
+}
+
+static void tx_helper(struct wfx_dev *wdev, struct hif_msg *hif)
+{
+ int ret;
+ void *data;
+ bool is_encrypted = false;
+ size_t len = le16_to_cpu(hif->len);
+
+ WARN(len < sizeof(*hif), "try to send corrupted data");
+
+ hif->seqnum = wdev->hif.tx_seqnum;
+ wdev->hif.tx_seqnum = (wdev->hif.tx_seqnum + 1) % (HIF_COUNTER_MAX + 1);
+
+ if (wfx_is_secure_command(wdev, hif->id)) {
+ len = round_up(len - sizeof(hif->len), 16) + sizeof(hif->len) +
+ sizeof(struct hif_sl_msg_hdr) +
+ sizeof(struct hif_sl_tag);
+ // AES support encryption in-place. However, mac80211 access to
+ // 802.11 header after frame was sent (to get MAC addresses).
+ // So, keep origin buffer clear.
+ data = kmalloc(len, GFP_KERNEL);
+ if (!data)
+ goto end;
+ is_encrypted = true;
+ ret = wfx_sl_encode(wdev, hif, data);
+ if (ret)
+ goto end;
+ } else {
+ data = hif;
+ }
+ WARN(len > wdev->hw_caps.size_inp_ch_buf,
+ "%s: request exceed WFx capability: %zu > %d\n", __func__,
+ len, wdev->hw_caps.size_inp_ch_buf);
+ len = wdev->hwbus_ops->align_size(wdev->hwbus_priv, len);
+ ret = wfx_data_write(wdev, data, len);
+ if (ret)
+ goto end;
+
+ wdev->hif.tx_buffers_used++;
+ _trace_hif_send(hif, wdev->hif.tx_buffers_used);
+end:
+ if (is_encrypted)
+ kfree(data);
+}
+
+static int bh_work_tx(struct wfx_dev *wdev, int max_msg)
+{
+ struct hif_msg *hif;
+ int i;
+
+ for (i = 0; i < max_msg; i++) {
+ hif = NULL;
+ if (wdev->hif.tx_buffers_used < wdev->hw_caps.num_inp_ch_bufs) {
+ if (try_wait_for_completion(&wdev->hif_cmd.ready)) {
+ WARN(!mutex_is_locked(&wdev->hif_cmd.lock), "data locking error");
+ hif = wdev->hif_cmd.buf_send;
+ } else {
+ hif = wfx_tx_queues_get(wdev);
+ }
+ }
+ if (!hif)
+ return i;
+ tx_helper(wdev, hif);
+ }
+ return i;
+}
+
+/* In SDIO mode, it is necessary to make an access to a register to acknowledge
+ * last received message. It could be possible to restrict this acknowledge to
+ * SDIO mode and only if last operation was rx.
+ */
+static void ack_sdio_data(struct wfx_dev *wdev)
+{
+ u32 cfg_reg;
+
+ config_reg_read(wdev, &cfg_reg);
+ if (cfg_reg & 0xFF) {
+ dev_warn(wdev->dev, "chip reports errors: %02x\n",
+ cfg_reg & 0xFF);
+ config_reg_write_bits(wdev, 0xFF, 0x00);
+ }
+}
+
+static void bh_work(struct work_struct *work)
+{
+ struct wfx_dev *wdev = container_of(work, struct wfx_dev, hif.bh);
+ int stats_req = 0, stats_cnf = 0, stats_ind = 0;
+ bool release_chip = false, last_op_is_rx = false;
+ int num_tx, num_rx;
+
+ device_wakeup(wdev);
+ do {
+ num_tx = bh_work_tx(wdev, 32);
+ stats_req += num_tx;
+ if (num_tx)
+ last_op_is_rx = false;
+ num_rx = bh_work_rx(wdev, 32, &stats_cnf);
+ stats_ind += num_rx;
+ if (num_rx)
+ last_op_is_rx = true;
+ } while (num_rx || num_tx);
+ stats_ind -= stats_cnf;
+
+ if (last_op_is_rx)
+ ack_sdio_data(wdev);
+ if (!wdev->hif.tx_buffers_used && !work_pending(work) &&
+ !atomic_read(&wdev->scan_in_progress)) {
+ device_release(wdev);
+ release_chip = true;
+ }
+ _trace_bh_stats(stats_ind, stats_req, stats_cnf,
+ wdev->hif.tx_buffers_used, release_chip);
+}
+
+/*
+ * An IRQ from chip did occur
+ */
+void wfx_bh_request_rx(struct wfx_dev *wdev)
+{
+ u32 cur, prev;
+
+ control_reg_read(wdev, &cur);
+ prev = atomic_xchg(&wdev->hif.ctrl_reg, cur);
+ complete(&wdev->hif.ctrl_ready);
+ queue_work(system_highpri_wq, &wdev->hif.bh);
+
+ if (!(cur & CTRL_NEXT_LEN_MASK))
+ dev_err(wdev->dev, "unexpected control register value: length field is 0: %04x\n",
+ cur);
+ if (prev != 0)
+ dev_err(wdev->dev, "received IRQ but previous data was not (yet) read: %04x/%04x\n",
+ prev, cur);
+}
+
+/*
+ * Driver want to send data
+ */
+void wfx_bh_request_tx(struct wfx_dev *wdev)
+{
+ queue_work(system_highpri_wq, &wdev->hif.bh);
+}
+
+void wfx_bh_register(struct wfx_dev *wdev)
+{
+ INIT_WORK(&wdev->hif.bh, bh_work);
+ init_completion(&wdev->hif.ctrl_ready);
+ init_waitqueue_head(&wdev->hif.tx_buffers_empty);
+}
+
+void wfx_bh_unregister(struct wfx_dev *wdev)
+{
+ flush_work(&wdev->hif.bh);
+}
diff --git a/drivers/staging/wfx/bh.h b/drivers/staging/wfx/bh.h
new file mode 100644
index 000000000000..93ca98424e0b
--- /dev/null
+++ b/drivers/staging/wfx/bh.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Interrupt bottom half.
+ *
+ * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ */
+#ifndef WFX_BH_H
+#define WFX_BH_H
+
+#include <linux/atomic.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+
+struct wfx_dev;
+
+struct wfx_hif {
+ struct work_struct bh;
+ struct completion ctrl_ready;
+ wait_queue_head_t tx_buffers_empty;
+ atomic_t ctrl_reg;
+ int rx_seqnum;
+ int tx_seqnum;
+ int tx_buffers_used;
+};
+
+void wfx_bh_register(struct wfx_dev *wdev);
+void wfx_bh_unregister(struct wfx_dev *wdev);
+void wfx_bh_request_rx(struct wfx_dev *wdev);
+void wfx_bh_request_tx(struct wfx_dev *wdev);
+
+#endif /* WFX_BH_H */
diff --git a/drivers/staging/wfx/bus.h b/drivers/staging/wfx/bus.h
new file mode 100644
index 000000000000..62d6ecabe4cb
--- /dev/null
+++ b/drivers/staging/wfx/bus.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Common bus abstraction layer.
+ *
+ * Copyright (c) 2017-2018, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ */
+#ifndef WFX_BUS_H
+#define WFX_BUS_H
+
+#include <linux/mmc/sdio_func.h>
+#include <linux/spi/spi.h>
+
+#define WFX_REG_CONFIG 0x0
+#define WFX_REG_CONTROL 0x1
+#define WFX_REG_IN_OUT_QUEUE 0x2
+#define WFX_REG_AHB_DPORT 0x3
+#define WFX_REG_BASE_ADDR 0x4
+#define WFX_REG_SRAM_DPORT 0x5
+#define WFX_REG_SET_GEN_R_W 0x6
+#define WFX_REG_FRAME_OUT 0x7
+
+struct hwbus_ops {
+ int (*copy_from_io)(void *bus_priv, unsigned int addr,
+ void *dst, size_t count);
+ int (*copy_to_io)(void *bus_priv, unsigned int addr,
+ const void *src, size_t count);
+ void (*lock)(void *bus_priv);
+ void (*unlock)(void *bus_priv);
+ size_t (*align_size)(void *bus_priv, size_t size);
+};
+
+extern struct sdio_driver wfx_sdio_driver;
+extern struct spi_driver wfx_spi_driver;
+
+#endif
diff --git a/drivers/staging/wfx/bus_sdio.c b/drivers/staging/wfx/bus_sdio.c
new file mode 100644
index 000000000000..f8901164c206
--- /dev/null
+++ b/drivers/staging/wfx/bus_sdio.c
@@ -0,0 +1,271 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * SDIO interface.
+ *
+ * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ */
+#include <linux/module.h>
+#include <linux/mmc/sdio_func.h>
+#include <linux/mmc/card.h>
+#include <linux/interrupt.h>
+#include <linux/of_irq.h>
+
+#include "bus.h"
+#include "wfx.h"
+#include "hwio.h"
+#include "main.h"
+#include "bh.h"
+
+static const struct wfx_platform_data wfx_sdio_pdata = {
+ .file_fw = "wfm_wf200",
+ .file_pds = "wf200.pds",
+};
+
+struct wfx_sdio_priv {
+ struct sdio_func *func;
+ struct wfx_dev *core;
+ u8 buf_id_tx;
+ u8 buf_id_rx;
+ int of_irq;
+};
+
+static int wfx_sdio_copy_from_io(void *priv, unsigned int reg_id,
+ void *dst, size_t count)
+{
+ struct wfx_sdio_priv *bus = priv;
+ unsigned int sdio_addr = reg_id << 2;
+ int ret;
+
+ WARN(reg_id > 7, "chip only has 7 registers");
+ WARN(((uintptr_t)dst) & 3, "unaligned buffer size");
+ WARN(count & 3, "unaligned buffer address");
+
+ /* Use queue mode buffers */
+ if (reg_id == WFX_REG_IN_OUT_QUEUE)
+ sdio_addr |= (bus->buf_id_rx + 1) << 7;
+ ret = sdio_memcpy_fromio(bus->func, dst, sdio_addr, count);
+ if (!ret && reg_id == WFX_REG_IN_OUT_QUEUE)
+ bus->buf_id_rx = (bus->buf_id_rx + 1) % 4;
+
+ return ret;
+}
+
+static int wfx_sdio_copy_to_io(void *priv, unsigned int reg_id,
+ const void *src, size_t count)
+{
+ struct wfx_sdio_priv *bus = priv;
+ unsigned int sdio_addr = reg_id << 2;
+ int ret;
+
+ WARN(reg_id > 7, "chip only has 7 registers");
+ WARN(((uintptr_t)src) & 3, "unaligned buffer size");
+ WARN(count & 3, "unaligned buffer address");
+
+ /* Use queue mode buffers */
+ if (reg_id == WFX_REG_IN_OUT_QUEUE)
+ sdio_addr |= bus->buf_id_tx << 7;
+ // FIXME: discards 'const' qualifier for src
+ ret = sdio_memcpy_toio(bus->func, sdio_addr, (void *)src, count);
+ if (!ret && reg_id == WFX_REG_IN_OUT_QUEUE)
+ bus->buf_id_tx = (bus->buf_id_tx + 1) % 32;
+
+ return ret;
+}
+
+static void wfx_sdio_lock(void *priv)
+{
+ struct wfx_sdio_priv *bus = priv;
+
+ sdio_claim_host(bus->func);
+}
+
+static void wfx_sdio_unlock(void *priv)
+{
+ struct wfx_sdio_priv *bus = priv;
+
+ sdio_release_host(bus->func);
+}
+
+static void wfx_sdio_irq_handler(struct sdio_func *func)
+{
+ struct wfx_sdio_priv *bus = sdio_get_drvdata(func);
+
+ if (bus->core)
+ wfx_bh_request_rx(bus->core);
+ else
+ WARN(!bus->core, "race condition in driver init/deinit");
+}
+
+static irqreturn_t wfx_sdio_irq_handler_ext(int irq, void *priv)
+{
+ struct wfx_sdio_priv *bus = priv;
+
+ if (!bus->core) {
+ WARN(!bus->core, "race condition in driver init/deinit");
+ return IRQ_NONE;
+ }
+ sdio_claim_host(bus->func);
+ wfx_bh_request_rx(bus->core);
+ sdio_release_host(bus->func);
+ return IRQ_HANDLED;
+}
+
+static int wfx_sdio_irq_subscribe(struct wfx_sdio_priv *bus)
+{
+ int ret;
+
+ if (bus->of_irq) {
+ ret = request_irq(bus->of_irq, wfx_sdio_irq_handler_ext,
+ IRQF_TRIGGER_RISING, "wfx", bus);
+ } else {
+ sdio_claim_host(bus->func);
+ ret = sdio_claim_irq(bus->func, wfx_sdio_irq_handler);
+ sdio_release_host(bus->func);
+ }
+ return ret;
+}
+
+static int wfx_sdio_irq_unsubscribe(struct wfx_sdio_priv *bus)
+{
+ int ret;
+
+ if (bus->of_irq) {
+ free_irq(bus->of_irq, bus);
+ ret = 0;
+ } else {
+ sdio_claim_host(bus->func);
+ ret = sdio_release_irq(bus->func);
+ sdio_release_host(bus->func);
+ }
+ return ret;
+}
+
+static size_t wfx_sdio_align_size(void *priv, size_t size)
+{
+ struct wfx_sdio_priv *bus = priv;
+
+ return sdio_align_size(bus->func, size);
+}
+
+static const struct hwbus_ops wfx_sdio_hwbus_ops = {
+ .copy_from_io = wfx_sdio_copy_from_io,
+ .copy_to_io = wfx_sdio_copy_to_io,
+ .lock = wfx_sdio_lock,
+ .unlock = wfx_sdio_unlock,
+ .align_size = wfx_sdio_align_size,
+};
+
+static const struct of_device_id wfx_sdio_of_match[];
+static int wfx_sdio_probe(struct sdio_func *func,
+ const struct sdio_device_id *id)
+{
+ struct device_node *np = func->dev.of_node;
+ struct wfx_sdio_priv *bus;
+ int ret;
+
+ if (func->num != 1) {
+ dev_err(&func->dev, "SDIO function number is %d while it should always be 1 (unsupported chip?)\n", func->num);
+ return -ENODEV;
+ }
+
+ bus = devm_kzalloc(&func->dev, sizeof(*bus), GFP_KERNEL);
+ if (!bus)
+ return -ENOMEM;
+
+ if (np) {
+ if (!of_match_node(wfx_sdio_of_match, np)) {
+ dev_warn(&func->dev, "no compatible device found in DT\n");
+ return -ENODEV;
+ }
+ bus->of_irq = irq_of_parse_and_map(np, 0);
+ } else {
+ dev_warn(&func->dev,
+ "device is not declared in DT, features will be limited\n");
+ // FIXME: ignore VID/PID and only rely on device tree
+ // return -ENODEV;
+ }
+
+ bus->func = func;
+ sdio_set_drvdata(func, bus);
+ func->card->quirks |= MMC_QUIRK_LENIENT_FN0 |
+ MMC_QUIRK_BLKSZ_FOR_BYTE_MODE |
+ MMC_QUIRK_BROKEN_BYTE_MODE_512;
+
+ sdio_claim_host(func);
+ ret = sdio_enable_func(func);
+ // Block of 64 bytes is more efficient than 512B for frame sizes < 4k
+ sdio_set_block_size(func, 64);
+ sdio_release_host(func);
+ if (ret)
+ goto err0;
+
+ ret = wfx_sdio_irq_subscribe(bus);
+ if (ret)
+ goto err1;
+
+ bus->core = wfx_init_common(&func->dev, &wfx_sdio_pdata,
+ &wfx_sdio_hwbus_ops, bus);
+ if (!bus->core) {
+ ret = -EIO;
+ goto err2;
+ }
+
+ ret = wfx_probe(bus->core);
+ if (ret)
+ goto err3;
+
+ return 0;
+
+err3:
+ wfx_free_common(bus->core);
+err2:
+ wfx_sdio_irq_unsubscribe(bus);
+err1:
+ sdio_claim_host(func);
+ sdio_disable_func(func);
+ sdio_release_host(func);
+err0:
+ return ret;
+}
+
+static void wfx_sdio_remove(struct sdio_func *func)
+{
+ struct wfx_sdio_priv *bus = sdio_get_drvdata(func);
+
+ wfx_release(bus->core);
+ wfx_free_common(bus->core);
+ wfx_sdio_irq_unsubscribe(bus);
+ sdio_claim_host(func);
+ sdio_disable_func(func);
+ sdio_release_host(func);
+}
+
+#define SDIO_VENDOR_ID_SILABS 0x0000
+#define SDIO_DEVICE_ID_SILABS_WF200 0x1000
+static const struct sdio_device_id wfx_sdio_ids[] = {
+ { SDIO_DEVICE(SDIO_VENDOR_ID_SILABS, SDIO_DEVICE_ID_SILABS_WF200) },
+ // FIXME: ignore VID/PID and only rely on device tree
+ // { SDIO_DEVICE(SDIO_ANY_ID, SDIO_ANY_ID) },
+ { },
+};
+MODULE_DEVICE_TABLE(sdio, wfx_sdio_ids);
+
+#ifdef CONFIG_OF
+static const struct of_device_id wfx_sdio_of_match[] = {
+ { .compatible = "silabs,wfx-sdio" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, wfx_sdio_of_match);
+#endif
+
+struct sdio_driver wfx_sdio_driver = {
+ .name = "wfx-sdio",
+ .id_table = wfx_sdio_ids,
+ .probe = wfx_sdio_probe,
+ .remove = wfx_sdio_remove,
+ .drv = {
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(wfx_sdio_of_match),
+ }
+};
diff --git a/drivers/staging/wfx/bus_spi.c b/drivers/staging/wfx/bus_spi.c
new file mode 100644
index 000000000000..ab0cda1e124f
--- /dev/null
+++ b/drivers/staging/wfx/bus_spi.c
@@ -0,0 +1,267 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * SPI interface.
+ *
+ * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2011, Sagrad Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ */
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
+#include <linux/spi/spi.h>
+#include <linux/interrupt.h>
+#include <linux/of.h>
+
+#include "bus.h"
+#include "wfx.h"
+#include "hwio.h"
+#include "main.h"
+#include "bh.h"
+
+static int gpio_reset = -2;
+module_param(gpio_reset, int, 0644);
+MODULE_PARM_DESC(gpio_reset, "gpio number for reset. -1 for none.");
+
+#define SET_WRITE 0x7FFF /* usage: and operation */
+#define SET_READ 0x8000 /* usage: or operation */
+
+static const struct wfx_platform_data wfx_spi_pdata = {
+ .file_fw = "wfm_wf200",
+ .file_pds = "wf200.pds",
+ .use_rising_clk = true,
+};
+
+struct wfx_spi_priv {
+ struct spi_device *func;
+ struct wfx_dev *core;
+ struct gpio_desc *gpio_reset;
+ struct work_struct request_rx;
+ bool need_swab;
+};
+
+/*
+ * WFx chip read data 16bits at time and place them directly into (little
+ * endian) CPU register. So, chip expect byte order like "B1 B0 B3 B2" (while
+ * LE is "B0 B1 B2 B3" and BE is "B3 B2 B1 B0")
+ *
+ * A little endian host with bits_per_word == 16 should do the right job
+ * natively. The code below to support big endian host and commonly used SPI
+ * 8bits.
+ */
+static int wfx_spi_copy_from_io(void *priv, unsigned int addr,
+ void *dst, size_t count)
+{
+ struct wfx_spi_priv *bus = priv;
+ u16 regaddr = (addr << 12) | (count / 2) | SET_READ;
+ struct spi_message m;
+ struct spi_transfer t_addr = {
+ .tx_buf = &regaddr,
+ .len = sizeof(regaddr),
+ };
+ struct spi_transfer t_msg = {
+ .rx_buf = dst,
+ .len = count,
+ };
+ u16 *dst16 = dst;
+ int ret, i;
+
+ WARN(count % 2, "buffer size must be a multiple of 2");
+
+ cpu_to_le16s(&regaddr);
+ if (bus->need_swab)
+ swab16s(&regaddr);
+
+ spi_message_init(&m);
+ spi_message_add_tail(&t_addr, &m);
+ spi_message_add_tail(&t_msg, &m);
+ ret = spi_sync(bus->func, &m);
+
+ if (bus->need_swab && addr == WFX_REG_CONFIG)
+ for (i = 0; i < count / 2; i++)
+ swab16s(&dst16[i]);
+ return ret;
+}
+
+static int wfx_spi_copy_to_io(void *priv, unsigned int addr,
+ const void *src, size_t count)
+{
+ struct wfx_spi_priv *bus = priv;
+ u16 regaddr = (addr << 12) | (count / 2);
+ // FIXME: use a bounce buffer
+ u16 *src16 = (void *)src;
+ int ret, i;
+ struct spi_message m;
+ struct spi_transfer t_addr = {
+ .tx_buf = &regaddr,
+ .len = sizeof(regaddr),
+ };
+ struct spi_transfer t_msg = {
+ .tx_buf = src,
+ .len = count,
+ };
+
+ WARN(count % 2, "buffer size must be a multiple of 2");
+ WARN(regaddr & SET_READ, "bad addr or size overflow");
+
+ cpu_to_le16s(&regaddr);
+
+ if (bus->need_swab)
+ swab16s(&regaddr);
+ if (bus->need_swab && addr == WFX_REG_CONFIG)
+ for (i = 0; i < count / 2; i++)
+ swab16s(&src16[i]);
+
+ spi_message_init(&m);
+ spi_message_add_tail(&t_addr, &m);
+ spi_message_add_tail(&t_msg, &m);
+ ret = spi_sync(bus->func, &m);
+
+ if (bus->need_swab && addr == WFX_REG_CONFIG)
+ for (i = 0; i < count / 2; i++)
+ swab16s(&src16[i]);
+ return ret;
+}
+
+static void wfx_spi_lock(void *priv)
+{
+}
+
+static void wfx_spi_unlock(void *priv)
+{
+}
+
+static irqreturn_t wfx_spi_irq_handler(int irq, void *priv)
+{
+ struct wfx_spi_priv *bus = priv;
+
+ if (!bus->core) {
+ WARN(!bus->core, "race condition in driver init/deinit");
+ return IRQ_NONE;
+ }
+ queue_work(system_highpri_wq, &bus->request_rx);
+ return IRQ_HANDLED;
+}
+
+static void wfx_spi_request_rx(struct work_struct *work)
+{
+ struct wfx_spi_priv *bus =
+ container_of(work, struct wfx_spi_priv, request_rx);
+
+ wfx_bh_request_rx(bus->core);
+}
+
+static size_t wfx_spi_align_size(void *priv, size_t size)
+{
+ // Most of SPI controllers avoid DMA if buffer size is not 32bit aligned
+ return ALIGN(size, 4);
+}
+
+static const struct hwbus_ops wfx_spi_hwbus_ops = {
+ .copy_from_io = wfx_spi_copy_from_io,
+ .copy_to_io = wfx_spi_copy_to_io,
+ .lock = wfx_spi_lock,
+ .unlock = wfx_spi_unlock,
+ .align_size = wfx_spi_align_size,
+};
+
+static int wfx_spi_probe(struct spi_device *func)
+{
+ struct wfx_spi_priv *bus;
+ int ret;
+
+ if (!func->bits_per_word)
+ func->bits_per_word = 16;
+ ret = spi_setup(func);
+ if (ret)
+ return ret;
+ // Trace below is also displayed by spi_setup() if compiled with DEBUG
+ dev_dbg(&func->dev, "SPI params: CS=%d, mode=%d bits/word=%d speed=%d\n",
+ func->chip_select, func->mode, func->bits_per_word,
+ func->max_speed_hz);
+ if (func->bits_per_word != 16 && func->bits_per_word != 8)
+ dev_warn(&func->dev, "unusual bits/word value: %d\n",
+ func->bits_per_word);
+ if (func->max_speed_hz > 49000000)
+ dev_warn(&func->dev, "%dHz is a very high speed\n",
+ func->max_speed_hz);
+
+ bus = devm_kzalloc(&func->dev, sizeof(*bus), GFP_KERNEL);
+ if (!bus)
+ return -ENOMEM;
+ bus->func = func;
+ if (func->bits_per_word == 8 || IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
+ bus->need_swab = true;
+ spi_set_drvdata(func, bus);
+
+ bus->gpio_reset = wfx_get_gpio(&func->dev, gpio_reset, "reset");
+ if (!bus->gpio_reset) {
+ dev_warn(&func->dev, "try to load firmware anyway\n");
+ } else {
+ gpiod_set_value(bus->gpio_reset, 0);
+ udelay(100);
+ gpiod_set_value(bus->gpio_reset, 1);
+ udelay(2000);
+ }
+
+ ret = devm_request_irq(&func->dev, func->irq, wfx_spi_irq_handler,
+ IRQF_TRIGGER_RISING, "wfx", bus);
+ if (ret)
+ return ret;
+
+ INIT_WORK(&bus->request_rx, wfx_spi_request_rx);
+ bus->core = wfx_init_common(&func->dev, &wfx_spi_pdata,
+ &wfx_spi_hwbus_ops, bus);
+ if (!bus->core)
+ return -EIO;
+
+ ret = wfx_probe(bus->core);
+ if (ret)
+ wfx_free_common(bus->core);
+
+ return ret;
+}
+
+/* Disconnect Function to be called by SPI stack when device is disconnected */
+static int wfx_spi_disconnect(struct spi_device *func)
+{
+ struct wfx_spi_priv *bus = spi_get_drvdata(func);
+
+ wfx_release(bus->core);
+ wfx_free_common(bus->core);
+ // A few IRQ will be sent during device release. Hopefully, no IRQ
+ // should happen after wdev/wvif are released.
+ devm_free_irq(&func->dev, func->irq, bus);
+ flush_work(&bus->request_rx);
+ return 0;
+}
+
+/*
+ * For dynamic driver binding, kernel does not use OF to match driver. It only
+ * use modalias and modalias is a copy of 'compatible' DT node with vendor
+ * stripped.
+ */
+static const struct spi_device_id wfx_spi_id[] = {
+ { "wfx-spi", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(spi, wfx_spi_id);
+
+#ifdef CONFIG_OF
+static const struct of_device_id wfx_spi_of_match[] = {
+ { .compatible = "silabs,wfx-spi" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, wfx_spi_of_match);
+#endif
+
+struct spi_driver wfx_spi_driver = {
+ .driver = {
+ .name = "wfx-spi",
+ .of_match_table = of_match_ptr(wfx_spi_of_match),
+ },
+ .id_table = wfx_spi_id,
+ .probe = wfx_spi_probe,
+ .remove = wfx_spi_disconnect,
+};
diff --git a/drivers/staging/wfx/data_rx.c b/drivers/staging/wfx/data_rx.c
new file mode 100644
index 000000000000..e7fcce8d0cc4
--- /dev/null
+++ b/drivers/staging/wfx/data_rx.c
@@ -0,0 +1,213 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Datapath implementation.
+ *
+ * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ */
+#include <linux/etherdevice.h>
+#include <net/mac80211.h>
+
+#include "data_rx.h"
+#include "wfx.h"
+#include "bh.h"
+#include "sta.h"
+
+static int wfx_handle_pspoll(struct wfx_vif *wvif, struct sk_buff *skb)
+{
+ struct ieee80211_sta *sta;
+ struct ieee80211_pspoll *pspoll = (struct ieee80211_pspoll *)skb->data;
+ int link_id = 0;
+ u32 pspoll_mask = 0;
+ int i;
+
+ if (wvif->state != WFX_STATE_AP)
+ return 1;
+ if (!ether_addr_equal(wvif->vif->addr, pspoll->bssid))
+ return 1;
+
+ rcu_read_lock();
+ sta = ieee80211_find_sta(wvif->vif, pspoll->ta);
+ if (sta)
+ link_id = ((struct wfx_sta_priv *)&sta->drv_priv)->link_id;
+ rcu_read_unlock();
+ if (link_id)
+ pspoll_mask = BIT(link_id);
+ else
+ return 1;
+
+ wvif->pspoll_mask |= pspoll_mask;
+ /* Do not report pspols if data for given link id is queued already. */
+ for (i = 0; i < IEEE80211_NUM_ACS; ++i) {
+ if (wfx_tx_queue_get_num_queued(&wvif->wdev->tx_queue[i],
+ pspoll_mask)) {
+ wfx_bh_request_tx(wvif->wdev);
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static int wfx_drop_encrypt_data(struct wfx_dev *wdev, struct hif_ind_rx *arg, struct sk_buff *skb)
+{
+ struct ieee80211_hdr *frame = (struct ieee80211_hdr *) skb->data;
+ size_t hdrlen = ieee80211_hdrlen(frame->frame_control);
+ size_t iv_len, icv_len;
+
+ /* Oops... There is no fast way to ask mac80211 about
+ * IV/ICV lengths. Even defineas are not exposed.
+ */
+ switch (arg->rx_flags.encryp) {
+ case HIF_RI_FLAGS_WEP_ENCRYPTED:
+ iv_len = 4 /* WEP_IV_LEN */;
+ icv_len = 4 /* WEP_ICV_LEN */;
+ break;
+ case HIF_RI_FLAGS_TKIP_ENCRYPTED:
+ iv_len = 8 /* TKIP_IV_LEN */;
+ icv_len = 4 /* TKIP_ICV_LEN */
+ + 8 /*MICHAEL_MIC_LEN*/;
+ break;
+ case HIF_RI_FLAGS_AES_ENCRYPTED:
+ iv_len = 8 /* CCMP_HDR_LEN */;
+ icv_len = 8 /* CCMP_MIC_LEN */;
+ break;
+ case HIF_RI_FLAGS_WAPI_ENCRYPTED:
+ iv_len = 18 /* WAPI_HDR_LEN */;
+ icv_len = 16 /* WAPI_MIC_LEN */;
+ break;
+ default:
+ dev_err(wdev->dev, "unknown encryption type %d\n",
+ arg->rx_flags.encryp);
+ return -EIO;
+ }
+
+ /* Firmware strips ICV in case of MIC failure. */
+ if (arg->status == HIF_STATUS_MICFAILURE)
+ icv_len = 0;
+
+ if (skb->len < hdrlen + iv_len + icv_len) {
+ dev_warn(wdev->dev, "malformed SDU received\n");
+ return -EIO;
+ }
+
+ /* Remove IV, ICV and MIC */
+ skb_trim(skb, skb->len - icv_len);
+ memmove(skb->data + iv_len, skb->data, hdrlen);
+ skb_pull(skb, iv_len);
+ return 0;
+
+}
+
+void wfx_rx_cb(struct wfx_vif *wvif, struct hif_ind_rx *arg,
+ struct sk_buff *skb)
+{
+ int link_id = arg->rx_flags.peer_sta_id;
+ struct ieee80211_rx_status *hdr = IEEE80211_SKB_RXCB(skb);
+ struct ieee80211_hdr *frame = (struct ieee80211_hdr *)skb->data;
+ struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
+ struct wfx_link_entry *entry = NULL;
+ bool early_data = false;
+
+ memset(hdr, 0, sizeof(*hdr));
+
+ // FIXME: Why do we drop these frames?
+ if (!arg->rcpi_rssi &&
+ (ieee80211_is_probe_resp(frame->frame_control) ||
+ ieee80211_is_beacon(frame->frame_control)))
+ goto drop;
+
+ if (link_id && link_id <= WFX_MAX_STA_IN_AP_MODE) {
+ entry = &wvif->link_id_db[link_id - 1];
+ entry->timestamp = jiffies;
+ if (entry->status == WFX_LINK_SOFT &&
+ ieee80211_is_data(frame->frame_control))
+ early_data = true;
+ }
+
+ if (arg->status == HIF_STATUS_MICFAILURE)
+ hdr->flag |= RX_FLAG_MMIC_ERROR;
+ else if (arg->status)
+ goto drop;
+
+ if (skb->len < sizeof(struct ieee80211_pspoll)) {
+ dev_warn(wvif->wdev->dev, "malformed SDU received\n");
+ goto drop;
+ }
+
+ if (ieee80211_is_pspoll(frame->frame_control))
+ if (wfx_handle_pspoll(wvif, skb))
+ goto drop;
+
+ hdr->band = NL80211_BAND_2GHZ;
+ hdr->freq = ieee80211_channel_to_frequency(arg->channel_number,
+ hdr->band);
+
+ if (arg->rxed_rate >= 14) {
+ hdr->encoding = RX_ENC_HT;
+ hdr->rate_idx = arg->rxed_rate - 14;
+ } else if (arg->rxed_rate >= 4) {
+ hdr->rate_idx = arg->rxed_rate - 2;
+ } else {
+ hdr->rate_idx = arg->rxed_rate;
+ }
+
+ hdr->signal = arg->rcpi_rssi / 2 - 110;
+ hdr->antenna = 0;
+
+ if (arg->rx_flags.encryp) {
+ if (wfx_drop_encrypt_data(wvif->wdev, arg, skb))
+ goto drop;
+ hdr->flag |= RX_FLAG_DECRYPTED | RX_FLAG_IV_STRIPPED;
+ if (arg->rx_flags.encryp == HIF_RI_FLAGS_TKIP_ENCRYPTED)
+ hdr->flag |= RX_FLAG_MMIC_STRIPPED;
+ }
+
+ /* Filter block ACK negotiation: fully controlled by firmware */
+ if (ieee80211_is_action(frame->frame_control) &&
+ arg->rx_flags.match_uc_addr &&
+ mgmt->u.action.category == WLAN_CATEGORY_BACK)
+ goto drop;
+ if (ieee80211_is_beacon(frame->frame_control) &&
+ !arg->status && wvif->vif &&
+ ether_addr_equal(ieee80211_get_SA(frame),
+ wvif->vif->bss_conf.bssid)) {
+ const u8 *tim_ie;
+ u8 *ies = mgmt->u.beacon.variable;
+ size_t ies_len = skb->len - (ies - skb->data);
+
+ tim_ie = cfg80211_find_ie(WLAN_EID_TIM, ies, ies_len);
+ if (tim_ie) {
+ struct ieee80211_tim_ie *tim = (struct ieee80211_tim_ie *)&tim_ie[2];
+
+ if (wvif->dtim_period != tim->dtim_period) {
+ wvif->dtim_period = tim->dtim_period;
+ schedule_work(&wvif->set_beacon_wakeup_period_work);
+ }
+ }
+
+ /* Disable beacon filter once we're associated... */
+ if (wvif->disable_beacon_filter &&
+ (wvif->vif->bss_conf.assoc ||
+ wvif->vif->bss_conf.ibss_joined)) {
+ wvif->disable_beacon_filter = false;
+ schedule_work(&wvif->update_filtering_work);
+ }
+ }
+
+ if (early_data) {
+ spin_lock_bh(&wvif->ps_state_lock);
+ /* Double-check status with lock held */
+ if (entry->status == WFX_LINK_SOFT)
+ skb_queue_tail(&entry->rx_queue, skb);
+ else
+ ieee80211_rx_irqsafe(wvif->wdev->hw, skb);
+ spin_unlock_bh(&wvif->ps_state_lock);
+ } else {
+ ieee80211_rx_irqsafe(wvif->wdev->hw, skb);
+ }
+
+ return;
+
+drop:
+ dev_kfree_skb(skb);
+}
diff --git a/drivers/staging/wfx/data_rx.h b/drivers/staging/wfx/data_rx.h
new file mode 100644
index 000000000000..a50ce352bc5e
--- /dev/null
+++ b/drivers/staging/wfx/data_rx.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Datapath implementation.
+ *
+ * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ */
+#ifndef WFX_DATA_RX_H
+#define WFX_DATA_RX_H
+
+#include "hif_api_cmd.h"
+
+struct wfx_vif;
+struct sk_buff;
+
+void wfx_rx_cb(struct wfx_vif *wvif, struct hif_ind_rx *arg,
+ struct sk_buff *skb);
+
+#endif /* WFX_DATA_RX_H */
diff --git a/drivers/staging/wfx/data_tx.c b/drivers/staging/wfx/data_tx.c
new file mode 100644
index 000000000000..b722e9773232
--- /dev/null
+++ b/drivers/staging/wfx/data_tx.c
@@ -0,0 +1,837 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Datapath implementation.
+ *
+ * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ */
+#include <net/mac80211.h>
+
+#include "data_tx.h"
+#include "wfx.h"
+#include "bh.h"
+#include "sta.h"
+#include "queue.h"
+#include "debug.h"
+#include "traces.h"
+#include "hif_tx_mib.h"
+
+#define WFX_INVALID_RATE_ID (0xFF)
+#define WFX_LINK_ID_NO_ASSOC 15
+#define WFX_LINK_ID_GC_TIMEOUT ((unsigned long)(10 * HZ))
+
+static int wfx_get_hw_rate(struct wfx_dev *wdev,
+ const struct ieee80211_tx_rate *rate)
+{
+ if (rate->idx < 0)
+ return -1;
+ if (rate->flags & IEEE80211_TX_RC_MCS) {
+ if (rate->idx > 7) {
+ WARN(1, "wrong rate->idx value: %d", rate->idx);
+ return -1;
+ }
+ return rate->idx + 14;
+ }
+ // WFx only support 2GHz, else band information should be retrieved
+ // from ieee80211_tx_info
+ return wdev->hw->wiphy->bands[NL80211_BAND_2GHZ]->bitrates[rate->idx].hw_value;
+}
+
+/* TX policy cache implementation */
+
+static void wfx_tx_policy_build(struct wfx_vif *wvif, struct tx_policy *policy,
+ struct ieee80211_tx_rate *rates)
+{
+ int i;
+ size_t count;
+ struct wfx_dev *wdev = wvif->wdev;
+
+ WARN(rates[0].idx < 0, "invalid rate policy");
+ memset(policy, 0, sizeof(*policy));
+ for (i = 1; i < IEEE80211_TX_MAX_RATES; i++)
+ if (rates[i].idx < 0)
+ break;
+ count = i;
+
+ /* HACK!!! Device has problems (at least) switching from
+ * 54Mbps CTS to 1Mbps. This switch takes enormous amount
+ * of time (100-200 ms), leading to valuable throughput drop.
+ * As a workaround, additional g-rates are injected to the
+ * policy.
+ */
+ if (count == 2 && !(rates[0].flags & IEEE80211_TX_RC_MCS) &&
+ rates[0].idx > 4 && rates[0].count > 2 &&
+ rates[1].idx < 2) {
+ int mid_rate = (rates[0].idx + 4) >> 1;
+
+ /* Decrease number of retries for the initial rate */
+ rates[0].count -= 2;
+
+ if (mid_rate != 4) {
+ /* Keep fallback rate at 1Mbps. */
+ rates[3] = rates[1];
+
+ /* Inject 1 transmission on lowest g-rate */
+ rates[2].idx = 4;
+ rates[2].count = 1;
+ rates[2].flags = rates[1].flags;
+
+ /* Inject 1 transmission on mid-rate */
+ rates[1].idx = mid_rate;
+ rates[1].count = 1;
+
+ /* Fallback to 1 Mbps is a really bad thing,
+ * so let's try to increase probability of
+ * successful transmission on the lowest g rate
+ * even more
+ */
+ if (rates[0].count >= 3) {
+ --rates[0].count;
+ ++rates[2].count;
+ }
+
+ /* Adjust amount of rates defined */
+ count += 2;
+ } else {
+ /* Keep fallback rate at 1Mbps. */
+ rates[2] = rates[1];
+
+ /* Inject 2 transmissions on lowest g-rate */
+ rates[1].idx = 4;
+ rates[1].count = 2;
+
+ /* Adjust amount of rates defined */
+ count += 1;
+ }
+ }
+
+ for (i = 0; i < IEEE80211_TX_MAX_RATES; ++i) {
+ int rateid;
+ u8 count;
+
+ if (rates[i].idx < 0)
+ break;
+ WARN_ON(rates[i].count > 15);
+ rateid = wfx_get_hw_rate(wdev, &rates[i]);
+ // Pack two values in each byte of policy->rates
+ count = rates[i].count;
+ if (rateid % 2)
+ count <<= 4;
+ policy->rates[rateid / 2] |= count;
+ }
+}
+
+static bool tx_policy_is_equal(const struct tx_policy *a,
+ const struct tx_policy *b)
+{
+ return !memcmp(a->rates, b->rates, sizeof(a->rates));
+}
+
+static int wfx_tx_policy_find(struct tx_policy_cache *cache,
+ struct tx_policy *wanted)
+{
+ struct tx_policy *it;
+
+ list_for_each_entry(it, &cache->used, link)
+ if (tx_policy_is_equal(wanted, it))
+ return it - cache->cache;
+ list_for_each_entry(it, &cache->free, link)
+ if (tx_policy_is_equal(wanted, it))
+ return it - cache->cache;
+ return -1;
+}
+
+static void wfx_tx_policy_use(struct tx_policy_cache *cache,
+ struct tx_policy *entry)
+{
+ ++entry->usage_count;
+ list_move(&entry->link, &cache->used);
+}
+
+static int wfx_tx_policy_release(struct tx_policy_cache *cache,
+ struct tx_policy *entry)
+{
+ int ret = --entry->usage_count;
+
+ if (!ret)
+ list_move(&entry->link, &cache->free);
+ return ret;
+}
+
+static int wfx_tx_policy_get(struct wfx_vif *wvif,
+ struct ieee80211_tx_rate *rates,
+ bool *renew)
+{
+ int idx;
+ struct tx_policy_cache *cache = &wvif->tx_policy_cache;
+ struct tx_policy wanted;
+
+ wfx_tx_policy_build(wvif, &wanted, rates);
+
+ spin_lock_bh(&cache->lock);
+ if (WARN_ON(list_empty(&cache->free))) {
+ spin_unlock_bh(&cache->lock);
+ return WFX_INVALID_RATE_ID;
+ }
+ idx = wfx_tx_policy_find(cache, &wanted);
+ if (idx >= 0) {
+ *renew = false;
+ } else {
+ struct tx_policy *entry;
+ *renew = true;
+ /* If policy is not found create a new one
+ * using the oldest entry in "free" list
+ */
+ entry = list_entry(cache->free.prev, struct tx_policy, link);
+ memcpy(entry->rates, wanted.rates, sizeof(entry->rates));
+ entry->uploaded = 0;
+ entry->usage_count = 0;
+ idx = entry - cache->cache;
+ }
+ wfx_tx_policy_use(cache, &cache->cache[idx]);
+ if (list_empty(&cache->free)) {
+ /* Lock TX queues. */
+ wfx_tx_queues_lock(wvif->wdev);
+ }
+ spin_unlock_bh(&cache->lock);
+ return idx;
+}
+
+static void wfx_tx_policy_put(struct wfx_vif *wvif, int idx)
+{
+ int usage, locked;
+ struct tx_policy_cache *cache = &wvif->tx_policy_cache;
+
+ spin_lock_bh(&cache->lock);
+ locked = list_empty(&cache->free);
+ usage = wfx_tx_policy_release(cache, &cache->cache[idx]);
+ if (locked && !usage) {
+ /* Unlock TX queues. */
+ wfx_tx_queues_unlock(wvif->wdev);
+ }
+ spin_unlock_bh(&cache->lock);
+}
+
+static int wfx_tx_policy_upload(struct wfx_vif *wvif)
+{
+ int i;
+ struct tx_policy_cache *cache = &wvif->tx_policy_cache;
+ struct hif_mib_set_tx_rate_retry_policy *arg =
+ kzalloc(struct_size(arg,
+ tx_rate_retry_policy,
+ HIF_MIB_NUM_TX_RATE_RETRY_POLICIES),
+ GFP_KERNEL);
+ struct hif_mib_tx_rate_retry_policy *dst;
+
+ spin_lock_bh(&cache->lock);
+ /* Upload only modified entries. */
+ for (i = 0; i < HIF_MIB_NUM_TX_RATE_RETRY_POLICIES; ++i) {
+ struct tx_policy *src = &cache->cache[i];
+
+ if (!src->uploaded && memzcmp(src->rates, sizeof(src->rates))) {
+ dst = arg->tx_rate_retry_policy +
+ arg->num_tx_rate_policies;
+
+ dst->policy_index = i;
+ dst->short_retry_count = 255;
+ dst->long_retry_count = 255;
+ dst->first_rate_sel = 1;
+ dst->terminate = 1;
+ dst->count_init = 1;
+ memcpy(&dst->rates, src->rates, sizeof(src->rates));
+ src->uploaded = 1;
+ arg->num_tx_rate_policies++;
+ }
+ }
+ spin_unlock_bh(&cache->lock);
+ hif_set_tx_rate_retry_policy(wvif, arg);
+ kfree(arg);
+ return 0;
+}
+
+static void wfx_tx_policy_upload_work(struct work_struct *work)
+{
+ struct wfx_vif *wvif =
+ container_of(work, struct wfx_vif, tx_policy_upload_work);
+
+ wfx_tx_policy_upload(wvif);
+
+ wfx_tx_unlock(wvif->wdev);
+ wfx_tx_queues_unlock(wvif->wdev);
+}
+
+void wfx_tx_policy_init(struct wfx_vif *wvif)
+{
+ struct tx_policy_cache *cache = &wvif->tx_policy_cache;
+ int i;
+
+ memset(cache, 0, sizeof(*cache));
+
+ spin_lock_init(&cache->lock);
+ INIT_LIST_HEAD(&cache->used);
+ INIT_LIST_HEAD(&cache->free);
+ INIT_WORK(&wvif->tx_policy_upload_work, wfx_tx_policy_upload_work);
+
+ for (i = 0; i < HIF_MIB_NUM_TX_RATE_RETRY_POLICIES; ++i)
+ list_add(&cache->cache[i].link, &cache->free);
+}
+
+/* Link ID related functions */
+
+static int wfx_alloc_link_id(struct wfx_vif *wvif, const u8 *mac)
+{
+ int i, ret = 0;
+ unsigned long max_inactivity = 0;
+ unsigned long now = jiffies;
+
+ spin_lock_bh(&wvif->ps_state_lock);
+ for (i = 0; i < WFX_MAX_STA_IN_AP_MODE; ++i) {
+ if (!wvif->link_id_db[i].status) {
+ ret = i + 1;
+ break;
+ } else if (wvif->link_id_db[i].status != WFX_LINK_HARD &&
+ !wvif->wdev->tx_queue_stats.link_map_cache[i + 1]) {
+ unsigned long inactivity =
+ now - wvif->link_id_db[i].timestamp;
+
+ if (inactivity < max_inactivity)
+ continue;
+ max_inactivity = inactivity;
+ ret = i + 1;
+ }
+ }
+
+ if (ret) {
+ struct wfx_link_entry *entry = &wvif->link_id_db[ret - 1];
+
+ entry->status = WFX_LINK_RESERVE;
+ ether_addr_copy(entry->mac, mac);
+ memset(&entry->buffered, 0, WFX_MAX_TID);
+ skb_queue_head_init(&entry->rx_queue);
+ wfx_tx_lock(wvif->wdev);
+
+ if (!schedule_work(&wvif->link_id_work))
+ wfx_tx_unlock(wvif->wdev);
+ } else {
+ dev_info(wvif->wdev->dev, "no more link-id available\n");
+ }
+ spin_unlock_bh(&wvif->ps_state_lock);
+ return ret;
+}
+
+int wfx_find_link_id(struct wfx_vif *wvif, const u8 *mac)
+{
+ int i, ret = 0;
+
+ spin_lock_bh(&wvif->ps_state_lock);
+ for (i = 0; i < WFX_MAX_STA_IN_AP_MODE; ++i) {
+ if (ether_addr_equal(mac, wvif->link_id_db[i].mac) &&
+ wvif->link_id_db[i].status) {
+ wvif->link_id_db[i].timestamp = jiffies;
+ ret = i + 1;
+ break;
+ }
+ }
+ spin_unlock_bh(&wvif->ps_state_lock);
+ return ret;
+}
+
+static int wfx_map_link(struct wfx_vif *wvif,
+ struct wfx_link_entry *link_entry, int sta_id)
+{
+ int ret;
+
+ ret = hif_map_link(wvif, link_entry->mac, 0, sta_id);
+
+ if (ret == 0)
+ /* Save the MAC address currently associated with the peer
+ * for future unmap request
+ */
+ ether_addr_copy(link_entry->old_mac, link_entry->mac);
+
+ return ret;
+}
+
+int wfx_unmap_link(struct wfx_vif *wvif, int sta_id)
+{
+ u8 *mac_addr = NULL;
+
+ if (sta_id)
+ mac_addr = wvif->link_id_db[sta_id - 1].old_mac;
+
+ return hif_map_link(wvif, mac_addr, 1, sta_id);
+}
+
+void wfx_link_id_gc_work(struct work_struct *work)
+{
+ struct wfx_vif *wvif =
+ container_of(work, struct wfx_vif, link_id_gc_work.work);
+ unsigned long now = jiffies;
+ unsigned long next_gc = -1;
+ long ttl;
+ u32 mask;
+ int i;
+
+ if (wvif->state != WFX_STATE_AP)
+ return;
+
+ wfx_tx_lock_flush(wvif->wdev);
+ spin_lock_bh(&wvif->ps_state_lock);
+ for (i = 0; i < WFX_MAX_STA_IN_AP_MODE; ++i) {
+ bool need_reset = false;
+
+ mask = BIT(i + 1);
+ if (wvif->link_id_db[i].status == WFX_LINK_RESERVE ||
+ (wvif->link_id_db[i].status == WFX_LINK_HARD &&
+ !(wvif->link_id_map & mask))) {
+ if (wvif->link_id_map & mask) {
+ wvif->sta_asleep_mask &= ~mask;
+ wvif->pspoll_mask &= ~mask;
+ need_reset = true;
+ }
+ wvif->link_id_map |= mask;
+ if (wvif->link_id_db[i].status != WFX_LINK_HARD)
+ wvif->link_id_db[i].status = WFX_LINK_SOFT;
+
+ spin_unlock_bh(&wvif->ps_state_lock);
+ if (need_reset)
+ wfx_unmap_link(wvif, i + 1);
+ wfx_map_link(wvif, &wvif->link_id_db[i], i + 1);
+ next_gc = min(next_gc, WFX_LINK_ID_GC_TIMEOUT);
+ spin_lock_bh(&wvif->ps_state_lock);
+ } else if (wvif->link_id_db[i].status == WFX_LINK_SOFT) {
+ ttl = wvif->link_id_db[i].timestamp - now +
+ WFX_LINK_ID_GC_TIMEOUT;
+ if (ttl <= 0) {
+ need_reset = true;
+ wvif->link_id_db[i].status = WFX_LINK_OFF;
+ wvif->link_id_map &= ~mask;
+ wvif->sta_asleep_mask &= ~mask;
+ wvif->pspoll_mask &= ~mask;
+ spin_unlock_bh(&wvif->ps_state_lock);
+ wfx_unmap_link(wvif, i + 1);
+ spin_lock_bh(&wvif->ps_state_lock);
+ } else {
+ next_gc = min_t(unsigned long, next_gc, ttl);
+ }
+ }
+ if (need_reset)
+ skb_queue_purge(&wvif->link_id_db[i].rx_queue);
+ }
+ spin_unlock_bh(&wvif->ps_state_lock);
+ if (next_gc != -1)
+ schedule_delayed_work(&wvif->link_id_gc_work, next_gc);
+ wfx_tx_unlock(wvif->wdev);
+}
+
+void wfx_link_id_work(struct work_struct *work)
+{
+ struct wfx_vif *wvif =
+ container_of(work, struct wfx_vif, link_id_work);
+
+ wfx_tx_flush(wvif->wdev);
+ wfx_link_id_gc_work(&wvif->link_id_gc_work.work);
+ wfx_tx_unlock(wvif->wdev);
+}
+
+/* Tx implementation */
+
+static bool ieee80211_is_action_back(struct ieee80211_hdr *hdr)
+{
+ struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)hdr;
+
+ if (!ieee80211_is_action(mgmt->frame_control))
+ return false;
+ if (mgmt->u.action.category != WLAN_CATEGORY_BACK)
+ return false;
+ return true;
+}
+
+static void wfx_tx_manage_pm(struct wfx_vif *wvif, struct ieee80211_hdr *hdr,
+ struct wfx_tx_priv *tx_priv,
+ struct ieee80211_sta *sta)
+{
+ u32 mask = ~BIT(tx_priv->raw_link_id);
+
+ spin_lock_bh(&wvif->ps_state_lock);
+ if (ieee80211_is_auth(hdr->frame_control)) {
+ wvif->sta_asleep_mask &= mask;
+ wvif->pspoll_mask &= mask;
+ }
+
+ if (tx_priv->link_id == WFX_LINK_ID_AFTER_DTIM &&
+ !wvif->mcast_buffered) {
+ wvif->mcast_buffered = true;
+ if (wvif->sta_asleep_mask)
+ schedule_work(&wvif->mcast_start_work);
+ }
+
+ if (tx_priv->raw_link_id) {
+ wvif->link_id_db[tx_priv->raw_link_id - 1].timestamp = jiffies;
+ if (tx_priv->tid < WFX_MAX_TID)
+ wvif->link_id_db[tx_priv->raw_link_id - 1].buffered[tx_priv->tid]++;
+ }
+ spin_unlock_bh(&wvif->ps_state_lock);
+
+ if (sta)
+ ieee80211_sta_set_buffered(sta, tx_priv->tid, true);
+}
+
+static u8 wfx_tx_get_raw_link_id(struct wfx_vif *wvif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_hdr *hdr)
+{
+ struct wfx_sta_priv *sta_priv =
+ sta ? (struct wfx_sta_priv *) &sta->drv_priv : NULL;
+ const u8 *da = ieee80211_get_DA(hdr);
+ int ret;
+
+ if (sta_priv && sta_priv->link_id)
+ return sta_priv->link_id;
+ if (wvif->vif->type != NL80211_IFTYPE_AP)
+ return 0;
+ if (is_multicast_ether_addr(da))
+ return 0;
+ ret = wfx_find_link_id(wvif, da);
+ if (!ret)
+ ret = wfx_alloc_link_id(wvif, da);
+ if (!ret) {
+ dev_err(wvif->wdev->dev, "no more link-id available\n");
+ return WFX_LINK_ID_NO_ASSOC;
+ }
+ return ret;
+}
+
+static void wfx_tx_fixup_rates(struct ieee80211_tx_rate *rates)
+{
+ int i;
+ bool finished;
+
+ // Firmware is not able to mix rates with differents flags
+ for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
+ if (rates[0].flags & IEEE80211_TX_RC_SHORT_GI)
+ rates[i].flags |= IEEE80211_TX_RC_SHORT_GI;
+ if (!(rates[0].flags & IEEE80211_TX_RC_SHORT_GI))
+ rates[i].flags &= ~IEEE80211_TX_RC_SHORT_GI;
+ if (!(rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS))
+ rates[i].flags &= ~IEEE80211_TX_RC_USE_RTS_CTS;
+ }
+
+ // Sort rates and remove duplicates
+ do {
+ finished = true;
+ for (i = 0; i < IEEE80211_TX_MAX_RATES - 1; i++) {
+ if (rates[i + 1].idx == rates[i].idx &&
+ rates[i].idx != -1) {
+ rates[i].count =
+ max_t(int, rates[i].count,
+ rates[i + 1].count);
+ rates[i + 1].idx = -1;
+ rates[i + 1].count = 0;
+
+ finished = false;
+ }
+ if (rates[i + 1].idx > rates[i].idx) {
+ swap(rates[i + 1], rates[i]);
+ finished = false;
+ }
+ }
+ } while (!finished);
+ // All retries use long GI
+ for (i = 1; i < IEEE80211_TX_MAX_RATES; i++)
+ rates[i].flags &= ~IEEE80211_TX_RC_SHORT_GI;
+}
+
+static u8 wfx_tx_get_rate_id(struct wfx_vif *wvif,
+ struct ieee80211_tx_info *tx_info)
+{
+ bool tx_policy_renew = false;
+ u8 rate_id;
+
+ rate_id = wfx_tx_policy_get(wvif,
+ tx_info->driver_rates, &tx_policy_renew);
+ WARN(rate_id == WFX_INVALID_RATE_ID, "unable to get a valid Tx policy");
+
+ if (tx_policy_renew) {
+ /* FIXME: It's not so optimal to stop TX queues every now and
+ * then. Better to reimplement task scheduling with a counter.
+ */
+ wfx_tx_lock(wvif->wdev);
+ wfx_tx_queues_lock(wvif->wdev);
+ if (!schedule_work(&wvif->tx_policy_upload_work)) {
+ wfx_tx_queues_unlock(wvif->wdev);
+ wfx_tx_unlock(wvif->wdev);
+ }
+ }
+ return rate_id;
+}
+
+static struct hif_ht_tx_parameters wfx_tx_get_tx_parms(struct wfx_dev *wdev, struct ieee80211_tx_info *tx_info)
+{
+ struct ieee80211_tx_rate *rate = &tx_info->driver_rates[0];
+ struct hif_ht_tx_parameters ret = { };
+
+ if (!(rate->flags & IEEE80211_TX_RC_MCS))
+ ret.frame_format = HIF_FRAME_FORMAT_NON_HT;
+ else if (!(rate->flags & IEEE80211_TX_RC_GREEN_FIELD))
+ ret.frame_format = HIF_FRAME_FORMAT_MIXED_FORMAT_HT;
+ else
+ ret.frame_format = HIF_FRAME_FORMAT_GF_HT_11N;
+ if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
+ ret.short_gi = 1;
+ if (tx_info->flags & IEEE80211_TX_CTL_STBC)
+ ret.stbc = 0; // FIXME: Not yet supported by firmware?
+ return ret;
+}
+
+static u8 wfx_tx_get_tid(struct ieee80211_hdr *hdr)
+{
+ // FIXME: ieee80211_get_tid(hdr) should be sufficient for all cases.
+ if (!ieee80211_is_data(hdr->frame_control))
+ return WFX_MAX_TID;
+ if (ieee80211_is_data_qos(hdr->frame_control))
+ return ieee80211_get_tid(hdr);
+ else
+ return 0;
+}
+
+static int wfx_tx_get_icv_len(struct ieee80211_key_conf *hw_key)
+{
+ int mic_space;
+
+ if (!hw_key)
+ return 0;
+ mic_space = (hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) ? 8 : 0;
+ return hw_key->icv_len + mic_space;
+}
+
+static int wfx_tx_inner(struct wfx_vif *wvif, struct ieee80211_sta *sta,
+ struct sk_buff *skb)
+{
+ struct hif_msg *hif_msg;
+ struct hif_req_tx *req;
+ struct wfx_tx_priv *tx_priv;
+ struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ int queue_id = tx_info->hw_queue;
+ size_t offset = (size_t) skb->data & 3;
+ int wmsg_len = sizeof(struct hif_msg) +
+ sizeof(struct hif_req_tx) + offset;
+
+ WARN(queue_id >= IEEE80211_NUM_ACS, "unsupported queue_id");
+ wfx_tx_fixup_rates(tx_info->driver_rates);
+
+ // From now tx_info->control is unusable
+ memset(tx_info->rate_driver_data, 0, sizeof(struct wfx_tx_priv));
+ // Fill tx_priv
+ tx_priv = (struct wfx_tx_priv *)tx_info->rate_driver_data;
+ tx_priv->tid = wfx_tx_get_tid(hdr);
+ tx_priv->raw_link_id = wfx_tx_get_raw_link_id(wvif, sta, hdr);
+ tx_priv->link_id = tx_priv->raw_link_id;
+ if (ieee80211_has_protected(hdr->frame_control))
+ tx_priv->hw_key = hw_key;
+ if (tx_info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM)
+ tx_priv->link_id = WFX_LINK_ID_AFTER_DTIM;
+ if (sta && (sta->uapsd_queues & BIT(queue_id)))
+ tx_priv->link_id = WFX_LINK_ID_UAPSD;
+
+ // Fill hif_msg
+ WARN(skb_headroom(skb) < wmsg_len, "not enough space in skb");
+ WARN(offset & 1, "attempt to transmit an unaligned frame");
+ skb_put(skb, wfx_tx_get_icv_len(tx_priv->hw_key));
+ skb_push(skb, wmsg_len);
+ memset(skb->data, 0, wmsg_len);
+ hif_msg = (struct hif_msg *)skb->data;
+ hif_msg->len = cpu_to_le16(skb->len);
+ hif_msg->id = HIF_REQ_ID_TX;
+ hif_msg->interface = wvif->id;
+ if (skb->len > wvif->wdev->hw_caps.size_inp_ch_buf) {
+ dev_warn(wvif->wdev->dev, "requested frame size (%d) is larger than maximum supported (%d)\n",
+ skb->len, wvif->wdev->hw_caps.size_inp_ch_buf);
+ skb_pull(skb, wmsg_len);
+ return -EIO;
+ }
+
+ // Fill tx request
+ req = (struct hif_req_tx *)hif_msg->body;
+ req->packet_id = queue_id << 16 |
+ IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
+ req->data_flags.fc_offset = offset;
+ req->queue_id.peer_sta_id = tx_priv->raw_link_id;
+ // Queue index are inverted between firmware and Linux
+ req->queue_id.queue_id = 3 - queue_id;
+ req->ht_tx_parameters = wfx_tx_get_tx_parms(wvif->wdev, tx_info);
+ req->tx_flags.retry_policy_index = wfx_tx_get_rate_id(wvif, tx_info);
+
+ // Auxiliary operations
+ wfx_tx_manage_pm(wvif, hdr, tx_priv, sta);
+ wfx_tx_queue_put(wvif->wdev, &wvif->wdev->tx_queue[queue_id], skb);
+ wfx_bh_request_tx(wvif->wdev);
+ return 0;
+}
+
+void wfx_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
+ struct sk_buff *skb)
+{
+ struct wfx_dev *wdev = hw->priv;
+ struct wfx_vif *wvif;
+ struct ieee80211_sta *sta = control ? control->sta : NULL;
+ struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ size_t driver_data_room = FIELD_SIZEOF(struct ieee80211_tx_info,
+ rate_driver_data);
+
+ compiletime_assert(sizeof(struct wfx_tx_priv) <= driver_data_room,
+ "struct tx_priv is too large");
+ WARN(skb->next || skb->prev, "skb is already member of a list");
+ // control.vif can be NULL for injected frames
+ if (tx_info->control.vif)
+ wvif = (struct wfx_vif *)tx_info->control.vif->drv_priv;
+ else
+ wvif = wvif_iterate(wdev, NULL);
+ if (WARN_ON(!wvif))
+ goto drop;
+ // FIXME: why?
+ if (ieee80211_is_action_back(hdr)) {
+ dev_info(wdev->dev, "drop BA action\n");
+ goto drop;
+ }
+ if (wfx_tx_inner(wvif, sta, skb))
+ goto drop;
+
+ return;
+
+drop:
+ ieee80211_tx_status_irqsafe(wdev->hw, skb);
+}
+
+void wfx_tx_confirm_cb(struct wfx_vif *wvif, struct hif_cnf_tx *arg)
+{
+ int i;
+ int tx_count;
+ struct sk_buff *skb;
+ struct ieee80211_tx_rate *rate;
+ struct ieee80211_tx_info *tx_info;
+ const struct wfx_tx_priv *tx_priv;
+
+ skb = wfx_pending_get(wvif->wdev, arg->packet_id);
+ if (!skb) {
+ dev_warn(wvif->wdev->dev,
+ "received unknown packet_id (%#.8x) from chip\n",
+ arg->packet_id);
+ return;
+ }
+ tx_info = IEEE80211_SKB_CB(skb);
+ tx_priv = wfx_skb_tx_priv(skb);
+ _trace_tx_stats(arg, skb,
+ wfx_pending_get_pkt_us_delay(wvif->wdev, skb));
+
+ // You can touch to tx_priv, but don't touch to tx_info->status.
+ tx_count = arg->ack_failures;
+ if (!arg->status || arg->ack_failures)
+ tx_count += 1; // Also report success
+ for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
+ rate = &tx_info->status.rates[i];
+ if (rate->idx < 0)
+ break;
+ if (tx_count < rate->count && arg->status && arg->ack_failures)
+ dev_dbg(wvif->wdev->dev, "all retries were not consumed: %d != %d\n",
+ rate->count, tx_count);
+ if (tx_count <= rate->count && tx_count &&
+ arg->txed_rate != wfx_get_hw_rate(wvif->wdev, rate))
+ dev_dbg(wvif->wdev->dev,
+ "inconsistent tx_info rates: %d != %d\n",
+ arg->txed_rate,
+ wfx_get_hw_rate(wvif->wdev, rate));
+ if (tx_count > rate->count) {
+ tx_count -= rate->count;
+ } else if (!tx_count) {
+ rate->count = 0;
+ rate->idx = -1;
+ } else {
+ rate->count = tx_count;
+ tx_count = 0;
+ }
+ }
+ if (tx_count)
+ dev_dbg(wvif->wdev->dev,
+ "%d more retries than expected\n", tx_count);
+ skb_trim(skb, skb->len - wfx_tx_get_icv_len(tx_priv->hw_key));
+
+ // From now, you can touch to tx_info->status, but do not touch to
+ // tx_priv anymore
+ // FIXME: use ieee80211_tx_info_clear_status()
+ memset(tx_info->rate_driver_data, 0, sizeof(tx_info->rate_driver_data));
+ memset(tx_info->pad, 0, sizeof(tx_info->pad));
+
+ if (!arg->status) {
+ if (wvif->bss_loss_state &&
+ arg->packet_id == wvif->bss_loss_confirm_id)
+ wfx_cqm_bssloss_sm(wvif, 0, 1, 0);
+ tx_info->status.tx_time =
+ arg->media_delay - arg->tx_queue_delay;
+ if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
+ tx_info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
+ else
+ tx_info->flags |= IEEE80211_TX_STAT_ACK;
+ } else if (arg->status == HIF_REQUEUE) {
+ /* "REQUEUE" means "implicit suspend" */
+ struct hif_ind_suspend_resume_tx suspend = {
+ .suspend_resume_flags.resume = 0,
+ .suspend_resume_flags.bc_mc_only = 1,
+ };
+
+ WARN(!arg->tx_result_flags.requeue, "incoherent status and result_flags");
+ wfx_suspend_resume(wvif, &suspend);
+ tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
+ } else {
+ if (wvif->bss_loss_state &&
+ arg->packet_id == wvif->bss_loss_confirm_id)
+ wfx_cqm_bssloss_sm(wvif, 0, 0, 1);
+ }
+ wfx_pending_remove(wvif->wdev, skb);
+}
+
+static void wfx_notify_buffered_tx(struct wfx_vif *wvif, struct sk_buff *skb,
+ struct hif_req_tx *req)
+{
+ struct ieee80211_sta *sta;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ int tid = wfx_tx_get_tid(hdr);
+ int raw_link_id = req->queue_id.peer_sta_id;
+ u8 *buffered;
+
+ if (raw_link_id && tid < WFX_MAX_TID) {
+ buffered = wvif->link_id_db[raw_link_id - 1].buffered;
+
+ spin_lock_bh(&wvif->ps_state_lock);
+ WARN(!buffered[tid], "inconsistent notification");
+ buffered[tid]--;
+ spin_unlock_bh(&wvif->ps_state_lock);
+
+ if (!buffered[tid]) {
+ rcu_read_lock();
+ sta = ieee80211_find_sta(wvif->vif, hdr->addr1);
+ if (sta)
+ ieee80211_sta_set_buffered(sta, tid, false);
+ rcu_read_unlock();
+ }
+ }
+}
+
+void wfx_skb_dtor(struct wfx_dev *wdev, struct sk_buff *skb)
+{
+ struct hif_msg *hif = (struct hif_msg *)skb->data;
+ struct hif_req_tx *req = (struct hif_req_tx *)hif->body;
+ struct wfx_vif *wvif = wdev_to_wvif(wdev, hif->interface);
+ unsigned int offset = sizeof(struct hif_req_tx) +
+ sizeof(struct hif_msg) +
+ req->data_flags.fc_offset;
+
+ WARN_ON(!wvif);
+ skb_pull(skb, offset);
+ wfx_notify_buffered_tx(wvif, skb, req);
+ wfx_tx_policy_put(wvif, req->tx_flags.retry_policy_index);
+ ieee80211_tx_status_irqsafe(wdev->hw, skb);
+}
diff --git a/drivers/staging/wfx/data_tx.h b/drivers/staging/wfx/data_tx.h
new file mode 100644
index 000000000000..29faa5640516
--- /dev/null
+++ b/drivers/staging/wfx/data_tx.h
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Datapath implementation.
+ *
+ * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ */
+#ifndef WFX_DATA_TX_H
+#define WFX_DATA_TX_H
+
+#include <linux/list.h>
+#include <net/mac80211.h>
+
+#include "hif_api_cmd.h"
+#include "hif_api_mib.h"
+
+// FIXME: use IEEE80211_NUM_TIDS
+#define WFX_MAX_TID 8
+
+struct wfx_tx_priv;
+struct wfx_dev;
+struct wfx_vif;
+
+enum wfx_link_status {
+ WFX_LINK_OFF,
+ WFX_LINK_RESERVE,
+ WFX_LINK_SOFT,
+ WFX_LINK_HARD,
+};
+
+struct wfx_link_entry {
+ unsigned long timestamp;
+ enum wfx_link_status status;
+ u8 mac[ETH_ALEN];
+ u8 old_mac[ETH_ALEN];
+ u8 buffered[WFX_MAX_TID];
+ struct sk_buff_head rx_queue;
+};
+
+struct tx_policy {
+ struct list_head link;
+ u8 rates[12];
+ u8 usage_count;
+ u8 uploaded;
+};
+
+struct tx_policy_cache {
+ struct tx_policy cache[HIF_MIB_NUM_TX_RATE_RETRY_POLICIES];
+ // FIXME: use a trees and drop hash from tx_policy
+ struct list_head used;
+ struct list_head free;
+ spinlock_t lock;
+};
+
+struct wfx_tx_priv {
+ ktime_t xmit_timestamp;
+ struct ieee80211_key_conf *hw_key;
+ u8 link_id;
+ u8 raw_link_id;
+ u8 tid;
+} __packed;
+
+void wfx_tx_policy_init(struct wfx_vif *wvif);
+
+void wfx_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
+ struct sk_buff *skb);
+void wfx_tx_confirm_cb(struct wfx_vif *wvif, struct hif_cnf_tx *arg);
+void wfx_skb_dtor(struct wfx_dev *wdev, struct sk_buff *skb);
+
+int wfx_unmap_link(struct wfx_vif *wvif, int link_id);
+void wfx_link_id_work(struct work_struct *work);
+void wfx_link_id_gc_work(struct work_struct *work);
+int wfx_find_link_id(struct wfx_vif *wvif, const u8 *mac);
+
+static inline struct wfx_tx_priv *wfx_skb_tx_priv(struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *tx_info;
+
+ if (!skb)
+ return NULL;
+ tx_info = IEEE80211_SKB_CB(skb);
+ return (struct wfx_tx_priv *)tx_info->rate_driver_data;
+}
+
+static inline struct hif_req_tx *wfx_skb_txreq(struct sk_buff *skb)
+{
+ struct hif_msg *hif = (struct hif_msg *)skb->data;
+ struct hif_req_tx *req = (struct hif_req_tx *) hif->body;
+
+ return req;
+}
+
+#endif /* WFX_DATA_TX_H */
diff --git a/drivers/staging/wfx/debug.c b/drivers/staging/wfx/debug.c
new file mode 100644
index 000000000000..d17a75242365
--- /dev/null
+++ b/drivers/staging/wfx/debug.c
@@ -0,0 +1,311 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Debugfs interface.
+ *
+ * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ */
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/crc32.h>
+
+#include "debug.h"
+#include "wfx.h"
+#include "sta.h"
+#include "main.h"
+#include "hif_tx.h"
+#include "hif_tx_mib.h"
+
+#define CREATE_TRACE_POINTS
+#include "traces.h"
+
+static const struct trace_print_flags hif_msg_print_map[] = {
+ hif_msg_list,
+};
+
+static const struct trace_print_flags hif_mib_print_map[] = {
+ hif_mib_list,
+};
+
+static const struct trace_print_flags wfx_reg_print_map[] = {
+ wfx_reg_list,
+};
+
+static const char *get_symbol(unsigned long val,
+ const struct trace_print_flags *symbol_array)
+{
+ int i;
+
+ for (i = 0; symbol_array[i].mask != -1; i++) {
+ if (val == symbol_array[i].mask)
+ return symbol_array[i].name;
+ }
+
+ return "unknown";
+}
+
+const char *get_hif_name(unsigned long id)
+{
+ return get_symbol(id, hif_msg_print_map);
+}
+
+const char *get_mib_name(unsigned long id)
+{
+ return get_symbol(id, hif_mib_print_map);
+}
+
+const char *get_reg_name(unsigned long id)
+{
+ return get_symbol(id, wfx_reg_print_map);
+}
+
+static int wfx_counters_show(struct seq_file *seq, void *v)
+{
+ int ret;
+ struct wfx_dev *wdev = seq->private;
+ struct hif_mib_extended_count_table counters;
+
+ ret = hif_get_counters_table(wdev, &counters);
+ if (ret < 0)
+ return ret;
+ if (ret > 0)
+ return -EIO;
+
+#define PUT_COUNTER(name) \
+ seq_printf(seq, "%24s %d\n", #name ":",\
+ le32_to_cpu(counters.count_##name))
+
+ PUT_COUNTER(tx_packets);
+ PUT_COUNTER(tx_multicast_frames);
+ PUT_COUNTER(tx_frames_success);
+ PUT_COUNTER(tx_frame_failures);
+ PUT_COUNTER(tx_frames_retried);
+ PUT_COUNTER(tx_frames_multi_retried);
+
+ PUT_COUNTER(rts_success);
+ PUT_COUNTER(rts_failures);
+ PUT_COUNTER(ack_failures);
+
+ PUT_COUNTER(rx_packets);
+ PUT_COUNTER(rx_frames_success);
+ PUT_COUNTER(rx_packet_errors);
+ PUT_COUNTER(plcp_errors);
+ PUT_COUNTER(fcs_errors);
+ PUT_COUNTER(rx_decryption_failures);
+ PUT_COUNTER(rx_mic_failures);
+ PUT_COUNTER(rx_no_key_failures);
+ PUT_COUNTER(rx_frame_duplicates);
+ PUT_COUNTER(rx_multicast_frames);
+ PUT_COUNTER(rx_cmacicv_errors);
+ PUT_COUNTER(rx_cmac_replays);
+ PUT_COUNTER(rx_mgmt_ccmp_replays);
+
+ PUT_COUNTER(rx_beacon);
+ PUT_COUNTER(miss_beacon);
+
+#undef PUT_COUNTER
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(wfx_counters);
+
+static const char * const channel_names[] = {
+ [0] = "1M",
+ [1] = "2M",
+ [2] = "5.5M",
+ [3] = "11M",
+ /* Entries 4 and 5 does not exist */
+ [6] = "6M",
+ [7] = "9M",
+ [8] = "12M",
+ [9] = "18M",
+ [10] = "24M",
+ [11] = "36M",
+ [12] = "48M",
+ [13] = "54M",
+ [14] = "MCS0",
+ [15] = "MCS1",
+ [16] = "MCS2",
+ [17] = "MCS3",
+ [18] = "MCS4",
+ [19] = "MCS5",
+ [20] = "MCS6",
+ [21] = "MCS7",
+};
+
+static int wfx_rx_stats_show(struct seq_file *seq, void *v)
+{
+ struct wfx_dev *wdev = seq->private;
+ struct hif_rx_stats *st = &wdev->rx_stats;
+ int i;
+
+ mutex_lock(&wdev->rx_stats_lock);
+ seq_printf(seq, "Timestamp: %dus\n", st->date);
+ seq_printf(seq, "Low power clock: frequency %uHz, external %s\n",
+ st->pwr_clk_freq,
+ st->is_ext_pwr_clk ? "yes" : "no");
+ seq_printf(seq,
+ "N. of frames: %d, PER (x10e4): %d, Throughput: %dKbps/s\n",
+ st->nb_rx_frame, st->per_total, st->throughput);
+ seq_puts(seq, " Num. of PER RSSI SNR CFO\n");
+ seq_puts(seq, " frames (x10e4) (dBm) (dB) (kHz)\n");
+ for (i = 0; i < ARRAY_SIZE(channel_names); i++) {
+ if (channel_names[i])
+ seq_printf(seq, "%5s %8d %8d %8d %8d %8d\n",
+ channel_names[i], st->nb_rx_by_rate[i],
+ st->per[i], st->rssi[i] / 100,
+ st->snr[i] / 100, st->cfo[i]);
+ }
+ mutex_unlock(&wdev->rx_stats_lock);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(wfx_rx_stats);
+
+static ssize_t wfx_send_pds_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct wfx_dev *wdev = file->private_data;
+ char *buf;
+ int ret;
+
+ if (*ppos != 0) {
+ dev_dbg(wdev->dev, "PDS data must be written in one transaction");
+ return -EBUSY;
+ }
+ buf = memdup_user(user_buf, count);
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
+ *ppos = *ppos + count;
+ ret = wfx_send_pds(wdev, buf, count);
+ kfree(buf);
+ if (ret < 0)
+ return ret;
+ return count;
+}
+
+static const struct file_operations wfx_send_pds_fops = {
+ .open = simple_open,
+ .write = wfx_send_pds_write,
+};
+
+static ssize_t wfx_burn_slk_key_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct wfx_dev *wdev = file->private_data;
+
+ dev_info(wdev->dev, "this driver does not support secure link\n");
+ return -EINVAL;
+}
+
+static const struct file_operations wfx_burn_slk_key_fops = {
+ .open = simple_open,
+ .write = wfx_burn_slk_key_write,
+};
+
+struct dbgfs_hif_msg {
+ struct wfx_dev *wdev;
+ struct completion complete;
+ u8 reply[1024];
+ int ret;
+};
+
+static ssize_t wfx_send_hif_msg_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct dbgfs_hif_msg *context = file->private_data;
+ struct wfx_dev *wdev = context->wdev;
+ struct hif_msg *request;
+
+ if (completion_done(&context->complete)) {
+ dev_dbg(wdev->dev, "read previous result before start a new one\n");
+ return -EBUSY;
+ }
+ if (count < sizeof(struct hif_msg))
+ return -EINVAL;
+
+ // wfx_cmd_send() chekc that reply buffer is wide enough, but do not
+ // return precise length read. User have to know how many bytes should
+ // be read. Filling reply buffer with a memory pattern may help user.
+ memset(context->reply, 0xFF, sizeof(context->reply));
+ request = memdup_user(user_buf, count);
+ if (IS_ERR(request))
+ return PTR_ERR(request);
+ if (request->len != count) {
+ kfree(request);
+ return -EINVAL;
+ }
+ context->ret = wfx_cmd_send(wdev, request, context->reply,
+ sizeof(context->reply), false);
+
+ kfree(request);
+ complete(&context->complete);
+ return count;
+}
+
+static ssize_t wfx_send_hif_msg_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct dbgfs_hif_msg *context = file->private_data;
+ int ret;
+
+ if (count > sizeof(context->reply))
+ return -EINVAL;
+ ret = wait_for_completion_interruptible(&context->complete);
+ if (ret)
+ return ret;
+ if (context->ret < 0)
+ return context->ret;
+ // Be carefull, write() is waiting for a full message while read()
+ // only return a payload
+ if (copy_to_user(user_buf, context->reply, count))
+ return -EFAULT;
+
+ return count;
+}
+
+static int wfx_send_hif_msg_open(struct inode *inode, struct file *file)
+{
+ struct dbgfs_hif_msg *context = kzalloc(sizeof(*context), GFP_KERNEL);
+
+ if (!context)
+ return -ENOMEM;
+ context->wdev = inode->i_private;
+ init_completion(&context->complete);
+ file->private_data = context;
+ return 0;
+}
+
+static int wfx_send_hif_msg_release(struct inode *inode, struct file *file)
+{
+ struct dbgfs_hif_msg *context = file->private_data;
+
+ kfree(context);
+ return 0;
+}
+
+static const struct file_operations wfx_send_hif_msg_fops = {
+ .open = wfx_send_hif_msg_open,
+ .release = wfx_send_hif_msg_release,
+ .write = wfx_send_hif_msg_write,
+ .read = wfx_send_hif_msg_read,
+};
+
+int wfx_debug_init(struct wfx_dev *wdev)
+{
+ struct dentry *d;
+
+ d = debugfs_create_dir("wfx", wdev->hw->wiphy->debugfsdir);
+ debugfs_create_file("counters", 0444, d, wdev, &wfx_counters_fops);
+ debugfs_create_file("rx_stats", 0444, d, wdev, &wfx_rx_stats_fops);
+ debugfs_create_file("send_pds", 0200, d, wdev, &wfx_send_pds_fops);
+ debugfs_create_file("burn_slk_key", 0200, d, wdev,
+ &wfx_burn_slk_key_fops);
+ debugfs_create_file("send_hif_msg", 0600, d, wdev,
+ &wfx_send_hif_msg_fops);
+
+ return 0;
+}
diff --git a/drivers/staging/wfx/debug.h b/drivers/staging/wfx/debug.h
new file mode 100644
index 000000000000..6f2f84d64c9e
--- /dev/null
+++ b/drivers/staging/wfx/debug.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Debugfs interface.
+ *
+ * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2011, ST-Ericsson
+ */
+#ifndef WFX_DEBUG_H
+#define WFX_DEBUG_H
+
+struct wfx_dev;
+
+int wfx_debug_init(struct wfx_dev *wdev);
+
+const char *get_hif_name(unsigned long id);
+const char *get_mib_name(unsigned long id);
+const char *get_reg_name(unsigned long id);
+
+#endif /* WFX_DEBUG_H */
diff --git a/drivers/staging/wfx/fwio.c b/drivers/staging/wfx/fwio.c
new file mode 100644
index 000000000000..dbf8bda71ff7
--- /dev/null
+++ b/drivers/staging/wfx/fwio.c
@@ -0,0 +1,413 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Firmware loading.
+ *
+ * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ */
+#include <linux/firmware.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/bitfield.h>
+
+#include "fwio.h"
+#include "wfx.h"
+#include "hwio.h"
+
+// Addresses below are in SRAM area
+#define WFX_DNLD_FIFO 0x09004000
+#define DNLD_BLOCK_SIZE 0x0400
+#define DNLD_FIFO_SIZE 0x8000 // (32 * DNLD_BLOCK_SIZE)
+// Download Control Area (DCA)
+#define WFX_DCA_IMAGE_SIZE 0x0900C000
+#define WFX_DCA_PUT 0x0900C004
+#define WFX_DCA_GET 0x0900C008
+#define WFX_DCA_HOST_STATUS 0x0900C00C
+#define HOST_READY 0x87654321
+#define HOST_INFO_READ 0xA753BD99
+#define HOST_UPLOAD_PENDING 0xABCDDCBA
+#define HOST_UPLOAD_COMPLETE 0xD4C64A99
+#define HOST_OK_TO_JUMP 0x174FC882
+#define WFX_DCA_NCP_STATUS 0x0900C010
+#define NCP_NOT_READY 0x12345678
+#define NCP_READY 0x87654321
+#define NCP_INFO_READY 0xBD53EF99
+#define NCP_DOWNLOAD_PENDING 0xABCDDCBA
+#define NCP_DOWNLOAD_COMPLETE 0xCAFEFECA
+#define NCP_AUTH_OK 0xD4C64A99
+#define NCP_AUTH_FAIL 0x174FC882
+#define NCP_PUB_KEY_RDY 0x7AB41D19
+#define WFX_DCA_FW_SIGNATURE 0x0900C014
+#define FW_SIGNATURE_SIZE 0x40
+#define WFX_DCA_FW_HASH 0x0900C054
+#define FW_HASH_SIZE 0x08
+#define WFX_DCA_FW_VERSION 0x0900C05C
+#define FW_VERSION_SIZE 0x04
+#define WFX_DCA_RESERVED 0x0900C060
+#define DCA_RESERVED_SIZE 0x20
+#define WFX_STATUS_INFO 0x0900C080
+#define WFX_BOOTLOADER_LABEL 0x0900C084
+#define BOOTLOADER_LABEL_SIZE 0x3C
+#define WFX_PTE_INFO 0x0900C0C0
+#define PTE_INFO_KEYSET_IDX 0x0D
+#define PTE_INFO_SIZE 0x10
+#define WFX_ERR_INFO 0x0900C0D0
+#define ERR_INVALID_SEC_TYPE 0x05
+#define ERR_SIG_VERIF_FAILED 0x0F
+#define ERR_AES_CTRL_KEY 0x10
+#define ERR_ECC_PUB_KEY 0x11
+#define ERR_MAC_KEY 0x18
+
+#define DCA_TIMEOUT 50 // milliseconds
+#define WAKEUP_TIMEOUT 200 // milliseconds
+
+static const char * const fwio_error_strings[] = {
+ [ERR_INVALID_SEC_TYPE] = "Invalid section type or wrong encryption",
+ [ERR_SIG_VERIF_FAILED] = "Signature verification failed",
+ [ERR_AES_CTRL_KEY] = "AES control key not initialized",
+ [ERR_ECC_PUB_KEY] = "ECC public key not initialized",
+ [ERR_MAC_KEY] = "MAC key not initialized",
+};
+
+/*
+ * request_firmware() allocate data using vmalloc(). It is not compatible with
+ * underlying hardware that use DMA. Function below detect this case and
+ * allocate a bounce buffer if necessary.
+ *
+ * Notice that, in doubt, you can enable CONFIG_DEBUG_SG to ask kernel to
+ * detect this problem at runtime (else, kernel silently fail).
+ *
+ * NOTE: it may also be possible to use 'pages' from struct firmware and avoid
+ * bounce buffer
+ */
+static int sram_write_dma_safe(struct wfx_dev *wdev, u32 addr, const u8 *buf,
+ size_t len)
+{
+ int ret;
+ const u8 *tmp;
+
+ if (!virt_addr_valid(buf)) {
+ tmp = kmemdup(buf, len, GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+ } else {
+ tmp = buf;
+ }
+ ret = sram_buf_write(wdev, addr, tmp, len);
+ if (!virt_addr_valid(buf))
+ kfree(tmp);
+ return ret;
+}
+
+int get_firmware(struct wfx_dev *wdev, u32 keyset_chip,
+ const struct firmware **fw, int *file_offset)
+{
+ int keyset_file;
+ char filename[256];
+ const char *data;
+ int ret;
+
+ snprintf(filename, sizeof(filename), "%s_%02X.sec", wdev->pdata.file_fw,
+ keyset_chip);
+ ret = firmware_request_nowarn(fw, filename, wdev->dev);
+ if (ret) {
+ dev_info(wdev->dev, "can't load %s, falling back to %s.sec\n",
+ filename, wdev->pdata.file_fw);
+ snprintf(filename, sizeof(filename), "%s.sec",
+ wdev->pdata.file_fw);
+ ret = request_firmware(fw, filename, wdev->dev);
+ if (ret) {
+ dev_err(wdev->dev, "can't load %s\n", filename);
+ *fw = NULL;
+ return ret;
+ }
+ }
+
+ data = (*fw)->data;
+ if (memcmp(data, "KEYSET", 6) != 0) {
+ // Legacy firmware format
+ *file_offset = 0;
+ keyset_file = 0x90;
+ } else {
+ *file_offset = 8;
+ keyset_file = (hex_to_bin(data[6]) * 16) | hex_to_bin(data[7]);
+ if (keyset_file < 0) {
+ dev_err(wdev->dev, "%s corrupted\n", filename);
+ release_firmware(*fw);
+ *fw = NULL;
+ return -EINVAL;
+ }
+ }
+ if (keyset_file != keyset_chip) {
+ dev_err(wdev->dev, "firmware keyset is incompatible with chip (file: 0x%02X, chip: 0x%02X)\n",
+ keyset_file, keyset_chip);
+ release_firmware(*fw);
+ *fw = NULL;
+ return -ENODEV;
+ }
+ wdev->keyset = keyset_file;
+ return 0;
+}
+
+static int wait_ncp_status(struct wfx_dev *wdev, u32 status)
+{
+ ktime_t now, start;
+ u32 reg;
+ int ret;
+
+ start = ktime_get();
+ for (;;) {
+ ret = sram_reg_read(wdev, WFX_DCA_NCP_STATUS, &reg);
+ if (ret < 0)
+ return -EIO;
+ now = ktime_get();
+ if (reg == status)
+ break;
+ if (ktime_after(now, ktime_add_ms(start, DCA_TIMEOUT)))
+ return -ETIMEDOUT;
+ }
+ if (ktime_compare(now, start))
+ dev_dbg(wdev->dev, "chip answer after %lldus\n",
+ ktime_us_delta(now, start));
+ else
+ dev_dbg(wdev->dev, "chip answer immediately\n");
+ return 0;
+}
+
+static int upload_firmware(struct wfx_dev *wdev, const u8 *data, size_t len)
+{
+ int ret;
+ u32 offs, bytes_done;
+ ktime_t now, start;
+
+ if (len % DNLD_BLOCK_SIZE) {
+ dev_err(wdev->dev, "firmware size is not aligned. Buffer overrun will occur\n");
+ return -EIO;
+ }
+ offs = 0;
+ while (offs < len) {
+ start = ktime_get();
+ for (;;) {
+ ret = sram_reg_read(wdev, WFX_DCA_GET, &bytes_done);
+ if (ret < 0)
+ return ret;
+ now = ktime_get();
+ if (offs +
+ DNLD_BLOCK_SIZE - bytes_done < DNLD_FIFO_SIZE)
+ break;
+ if (ktime_after(now, ktime_add_ms(start, DCA_TIMEOUT)))
+ return -ETIMEDOUT;
+ }
+ if (ktime_compare(now, start))
+ dev_dbg(wdev->dev, "answer after %lldus\n",
+ ktime_us_delta(now, start));
+
+ ret = sram_write_dma_safe(wdev, WFX_DNLD_FIFO +
+ (offs % DNLD_FIFO_SIZE),
+ data + offs, DNLD_BLOCK_SIZE);
+ if (ret < 0)
+ return ret;
+
+ // WFx seems to not support writing 0 in this register during
+ // first loop
+ offs += DNLD_BLOCK_SIZE;
+ ret = sram_reg_write(wdev, WFX_DCA_PUT, offs);
+ if (ret < 0)
+ return ret;
+ }
+ return 0;
+}
+
+static void print_boot_status(struct wfx_dev *wdev)
+{
+ u32 val32;
+
+ sram_reg_read(wdev, WFX_STATUS_INFO, &val32);
+ if (val32 == 0x12345678) {
+ dev_info(wdev->dev, "no error reported by secure boot\n");
+ } else {
+ sram_reg_read(wdev, WFX_ERR_INFO, &val32);
+ if (val32 < ARRAY_SIZE(fwio_error_strings) &&
+ fwio_error_strings[val32])
+ dev_info(wdev->dev, "secure boot error: %s\n",
+ fwio_error_strings[val32]);
+ else
+ dev_info(wdev->dev,
+ "secure boot error: Unknown (0x%02x)\n",
+ val32);
+ }
+}
+
+static int load_firmware_secure(struct wfx_dev *wdev)
+{
+ const struct firmware *fw = NULL;
+ int header_size;
+ int fw_offset;
+ ktime_t start;
+ u8 *buf;
+ int ret;
+
+ BUILD_BUG_ON(PTE_INFO_SIZE > BOOTLOADER_LABEL_SIZE);
+ buf = kmalloc(BOOTLOADER_LABEL_SIZE + 1, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ sram_reg_write(wdev, WFX_DCA_HOST_STATUS, HOST_READY);
+ ret = wait_ncp_status(wdev, NCP_INFO_READY);
+ if (ret)
+ goto error;
+
+ sram_buf_read(wdev, WFX_BOOTLOADER_LABEL, buf, BOOTLOADER_LABEL_SIZE);
+ buf[BOOTLOADER_LABEL_SIZE] = 0;
+ dev_dbg(wdev->dev, "bootloader: \"%s\"\n", buf);
+
+ sram_buf_read(wdev, WFX_PTE_INFO, buf, PTE_INFO_SIZE);
+ ret = get_firmware(wdev, buf[PTE_INFO_KEYSET_IDX], &fw, &fw_offset);
+ if (ret)
+ goto error;
+ header_size = fw_offset + FW_SIGNATURE_SIZE + FW_HASH_SIZE;
+
+ sram_reg_write(wdev, WFX_DCA_HOST_STATUS, HOST_INFO_READ);
+ ret = wait_ncp_status(wdev, NCP_READY);
+ if (ret)
+ goto error;
+
+ sram_reg_write(wdev, WFX_DNLD_FIFO, 0xFFFFFFFF); // Fifo init
+ sram_write_dma_safe(wdev, WFX_DCA_FW_VERSION, "\x01\x00\x00\x00",
+ FW_VERSION_SIZE);
+ sram_write_dma_safe(wdev, WFX_DCA_FW_SIGNATURE, fw->data + fw_offset,
+ FW_SIGNATURE_SIZE);
+ sram_write_dma_safe(wdev, WFX_DCA_FW_HASH,
+ fw->data + fw_offset + FW_SIGNATURE_SIZE,
+ FW_HASH_SIZE);
+ sram_reg_write(wdev, WFX_DCA_IMAGE_SIZE, fw->size - header_size);
+ sram_reg_write(wdev, WFX_DCA_HOST_STATUS, HOST_UPLOAD_PENDING);
+ ret = wait_ncp_status(wdev, NCP_DOWNLOAD_PENDING);
+ if (ret)
+ goto error;
+
+ start = ktime_get();
+ ret = upload_firmware(wdev, fw->data + header_size,
+ fw->size - header_size);
+ if (ret)
+ goto error;
+ dev_dbg(wdev->dev, "firmware load after %lldus\n",
+ ktime_us_delta(ktime_get(), start));
+
+ sram_reg_write(wdev, WFX_DCA_HOST_STATUS, HOST_UPLOAD_COMPLETE);
+ ret = wait_ncp_status(wdev, NCP_AUTH_OK);
+ // Legacy ROM support
+ if (ret < 0)
+ ret = wait_ncp_status(wdev, NCP_PUB_KEY_RDY);
+ if (ret < 0)
+ goto error;
+ sram_reg_write(wdev, WFX_DCA_HOST_STATUS, HOST_OK_TO_JUMP);
+
+error:
+ kfree(buf);
+ if (fw)
+ release_firmware(fw);
+ if (ret)
+ print_boot_status(wdev);
+ return ret;
+}
+
+static int init_gpr(struct wfx_dev *wdev)
+{
+ int ret, i;
+ static const struct {
+ int index;
+ u32 value;
+ } gpr_init[] = {
+ { 0x07, 0x208775 },
+ { 0x08, 0x2EC020 },
+ { 0x09, 0x3C3C3C },
+ { 0x0B, 0x322C44 },
+ { 0x0C, 0xA06497 },
+ };
+
+ for (i = 0; i < ARRAY_SIZE(gpr_init); i++) {
+ ret = igpr_reg_write(wdev, gpr_init[i].index,
+ gpr_init[i].value);
+ if (ret < 0)
+ return ret;
+ dev_dbg(wdev->dev, " index %02x: %08x\n", gpr_init[i].index,
+ gpr_init[i].value);
+ }
+ return 0;
+}
+
+int wfx_init_device(struct wfx_dev *wdev)
+{
+ int ret;
+ int hw_revision, hw_type;
+ int wakeup_timeout = 50; // ms
+ ktime_t now, start;
+ u32 reg;
+
+ reg = CFG_DIRECT_ACCESS_MODE | CFG_CPU_RESET | CFG_WORD_MODE2;
+ if (wdev->pdata.use_rising_clk)
+ reg |= CFG_CLK_RISE_EDGE;
+ ret = config_reg_write(wdev, reg);
+ if (ret < 0) {
+ dev_err(wdev->dev, "bus returned an error during first write access. Host configuration error?\n");
+ return -EIO;
+ }
+
+ ret = config_reg_read(wdev, &reg);
+ if (ret < 0) {
+ dev_err(wdev->dev, "bus returned an error during first read access. Bus configuration error?\n");
+ return -EIO;
+ }
+ if (reg == 0 || reg == ~0) {
+ dev_err(wdev->dev, "chip mute. Bus configuration error or chip wasn't reset?\n");
+ return -EIO;
+ }
+ dev_dbg(wdev->dev, "initial config register value: %08x\n", reg);
+
+ hw_revision = FIELD_GET(CFG_DEVICE_ID_MAJOR, reg);
+ if (hw_revision == 0 || hw_revision > 2) {
+ dev_err(wdev->dev, "bad hardware revision number: %d\n",
+ hw_revision);
+ return -ENODEV;
+ }
+ hw_type = FIELD_GET(CFG_DEVICE_ID_TYPE, reg);
+ if (hw_type == 1) {
+ dev_notice(wdev->dev, "development hardware detected\n");
+ wakeup_timeout = 2000;
+ }
+
+ ret = init_gpr(wdev);
+ if (ret < 0)
+ return ret;
+
+ ret = control_reg_write(wdev, CTRL_WLAN_WAKEUP);
+ if (ret < 0)
+ return -EIO;
+ start = ktime_get();
+ for (;;) {
+ ret = control_reg_read(wdev, &reg);
+ now = ktime_get();
+ if (reg & CTRL_WLAN_READY)
+ break;
+ if (ktime_after(now, ktime_add_ms(start, wakeup_timeout))) {
+ dev_err(wdev->dev, "chip didn't wake up. Chip wasn't reset?\n");
+ return -ETIMEDOUT;
+ }
+ }
+ dev_dbg(wdev->dev, "chip wake up after %lldus\n",
+ ktime_us_delta(now, start));
+
+ ret = config_reg_write_bits(wdev, CFG_CPU_RESET, 0);
+ if (ret < 0)
+ return ret;
+ ret = load_firmware_secure(wdev);
+ if (ret < 0)
+ return ret;
+ ret = config_reg_write_bits(wdev,
+ CFG_DIRECT_ACCESS_MODE |
+ CFG_IRQ_ENABLE_DATA |
+ CFG_IRQ_ENABLE_WRDY,
+ CFG_IRQ_ENABLE_DATA);
+ return ret;
+}
diff --git a/drivers/staging/wfx/fwio.h b/drivers/staging/wfx/fwio.h
new file mode 100644
index 000000000000..6028f92503fe
--- /dev/null
+++ b/drivers/staging/wfx/fwio.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Firmware loading.
+ *
+ * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ */
+#ifndef WFX_FWIO_H
+#define WFX_FWIO_H
+
+struct wfx_dev;
+
+int wfx_init_device(struct wfx_dev *wdev);
+
+#endif /* WFX_FWIO_H */
diff --git a/drivers/staging/wfx/hif_api_cmd.h b/drivers/staging/wfx/hif_api_cmd.h
new file mode 100644
index 000000000000..c15831de4ff4
--- /dev/null
+++ b/drivers/staging/wfx/hif_api_cmd.h
@@ -0,0 +1,681 @@
+/* SPDX-License-Identifier: Apache-2.0 */
+/*
+ * WFx hardware interface definitions
+ *
+ * Copyright (c) 2018-2019, Silicon Laboratories Inc.
+ */
+
+#ifndef WFX_HIF_API_CMD_H
+#define WFX_HIF_API_CMD_H
+
+#include "hif_api_general.h"
+
+#define HIF_NUM_AC 4
+
+#define HIF_API_SSID_SIZE API_SSID_SIZE
+
+enum hif_requests_ids {
+ HIF_REQ_ID_RESET = 0x0a,
+ HIF_REQ_ID_READ_MIB = 0x05,
+ HIF_REQ_ID_WRITE_MIB = 0x06,
+ HIF_REQ_ID_START_SCAN = 0x07,
+ HIF_REQ_ID_STOP_SCAN = 0x08,
+ HIF_REQ_ID_TX = 0x04,
+ HIF_REQ_ID_JOIN = 0x0b,
+ HIF_REQ_ID_SET_PM_MODE = 0x10,
+ HIF_REQ_ID_SET_BSS_PARAMS = 0x11,
+ HIF_REQ_ID_ADD_KEY = 0x0c,
+ HIF_REQ_ID_REMOVE_KEY = 0x0d,
+ HIF_REQ_ID_EDCA_QUEUE_PARAMS = 0x13,
+ HIF_REQ_ID_START = 0x17,
+ HIF_REQ_ID_BEACON_TRANSMIT = 0x18,
+ HIF_REQ_ID_UPDATE_IE = 0x1b,
+ HIF_REQ_ID_MAP_LINK = 0x1c,
+};
+
+enum hif_confirmations_ids {
+ HIF_CNF_ID_RESET = 0x0a,
+ HIF_CNF_ID_READ_MIB = 0x05,
+ HIF_CNF_ID_WRITE_MIB = 0x06,
+ HIF_CNF_ID_START_SCAN = 0x07,
+ HIF_CNF_ID_STOP_SCAN = 0x08,
+ HIF_CNF_ID_TX = 0x04,
+ HIF_CNF_ID_MULTI_TRANSMIT = 0x1e,
+ HIF_CNF_ID_JOIN = 0x0b,
+ HIF_CNF_ID_SET_PM_MODE = 0x10,
+ HIF_CNF_ID_SET_BSS_PARAMS = 0x11,
+ HIF_CNF_ID_ADD_KEY = 0x0c,
+ HIF_CNF_ID_REMOVE_KEY = 0x0d,
+ HIF_CNF_ID_EDCA_QUEUE_PARAMS = 0x13,
+ HIF_CNF_ID_START = 0x17,
+ HIF_CNF_ID_BEACON_TRANSMIT = 0x18,
+ HIF_CNF_ID_UPDATE_IE = 0x1b,
+ HIF_CNF_ID_MAP_LINK = 0x1c,
+};
+
+enum hif_indications_ids {
+ HIF_IND_ID_RX = 0x84,
+ HIF_IND_ID_SCAN_CMPL = 0x86,
+ HIF_IND_ID_JOIN_COMPLETE = 0x8f,
+ HIF_IND_ID_SET_PM_MODE_CMPL = 0x89,
+ HIF_IND_ID_SUSPEND_RESUME_TX = 0x8c,
+ HIF_IND_ID_EVENT = 0x85
+};
+
+union hif_commands_ids {
+ enum hif_requests_ids request;
+ enum hif_confirmations_ids confirmation;
+ enum hif_indications_ids indication;
+};
+
+enum hif_status {
+ HIF_STATUS_SUCCESS = 0x0,
+ HIF_STATUS_FAILURE = 0x1,
+ HIF_INVALID_PARAMETER = 0x2,
+ HIF_STATUS_WARNING = 0x3,
+ HIF_ERROR_UNSUPPORTED_MSG_ID = 0x4,
+ HIF_STATUS_DECRYPTFAILURE = 0x10,
+ HIF_STATUS_MICFAILURE = 0x11,
+ HIF_STATUS_NO_KEY_FOUND = 0x12,
+ HIF_STATUS_RETRY_EXCEEDED = 0x13,
+ HIF_STATUS_TX_LIFETIME_EXCEEDED = 0x14,
+ HIF_REQUEUE = 0x15,
+ HIF_STATUS_REFUSED = 0x16,
+ HIF_STATUS_BUSY = 0x17
+};
+
+struct hif_reset_flags {
+ u8 reset_stat:1;
+ u8 reset_all_int:1;
+ u8 reserved1:6;
+ u8 reserved2[3];
+} __packed;
+
+struct hif_req_reset {
+ struct hif_reset_flags reset_flags;
+} __packed;
+
+struct hif_cnf_reset {
+ u32 status;
+} __packed;
+
+struct hif_req_read_mib {
+ u16 mib_id;
+ u16 reserved;
+} __packed;
+
+struct hif_cnf_read_mib {
+ u32 status;
+ u16 mib_id;
+ u16 length;
+ u8 mib_data[];
+} __packed;
+
+struct hif_req_write_mib {
+ u16 mib_id;
+ u16 length;
+ u8 mib_data[];
+} __packed;
+
+struct hif_cnf_write_mib {
+ u32 status;
+} __packed;
+
+struct hif_ie_flags {
+ u8 beacon:1;
+ u8 probe_resp:1;
+ u8 probe_req:1;
+ u8 reserved1:5;
+ u8 reserved2;
+} __packed;
+
+struct hif_ie_tlv {
+ u8 type;
+ u8 length;
+ u8 data[];
+} __packed;
+
+struct hif_req_update_ie {
+ struct hif_ie_flags ie_flags;
+ u16 num_i_es;
+ struct hif_ie_tlv ie[];
+} __packed;
+
+struct hif_cnf_update_ie {
+ u32 status;
+} __packed;
+
+struct hif_scan_type {
+ u8 type:1;
+ u8 mode:1;
+ u8 reserved:6;
+} __packed;
+
+struct hif_scan_flags {
+ u8 fbg:1;
+ u8 reserved1:1;
+ u8 pre:1;
+ u8 reserved2:5;
+} __packed;
+
+struct hif_auto_scan_param {
+ u16 interval;
+ u8 reserved;
+ s8 rssi_thr;
+} __packed;
+
+struct hif_ssid_def {
+ u32 ssid_length;
+ u8 ssid[HIF_API_SSID_SIZE];
+} __packed;
+
+#define HIF_API_MAX_NB_SSIDS 2
+#define HIF_API_MAX_NB_CHANNELS 14
+
+struct hif_req_start_scan {
+ u8 band;
+ struct hif_scan_type scan_type;
+ struct hif_scan_flags scan_flags;
+ u8 max_transmit_rate;
+ struct hif_auto_scan_param auto_scan_param;
+ u8 num_of_probe_requests;
+ u8 probe_delay;
+ u8 num_of_ssi_ds;
+ u8 num_of_channels;
+ u32 min_channel_time;
+ u32 max_channel_time;
+ s32 tx_power_level;
+ u8 ssid_and_channel_lists[];
+} __packed;
+
+struct hif_start_scan_req_cstnbssid_body {
+ u8 band;
+ struct hif_scan_type scan_type;
+ struct hif_scan_flags scan_flags;
+ u8 max_transmit_rate;
+ struct hif_auto_scan_param auto_scan_param;
+ u8 num_of_probe_requests;
+ u8 probe_delay;
+ u8 num_of_ssi_ds;
+ u8 num_of_channels;
+ u32 min_channel_time;
+ u32 max_channel_time;
+ s32 tx_power_level;
+ struct hif_ssid_def ssid_def[HIF_API_MAX_NB_SSIDS];
+ u8 channel_list[];
+} __packed;
+
+struct hif_cnf_start_scan {
+ u32 status;
+} __packed;
+
+struct hif_cnf_stop_scan {
+ u32 status;
+} __packed;
+
+enum hif_pm_mode_status {
+ HIF_PM_MODE_ACTIVE = 0x0,
+ HIF_PM_MODE_PS = 0x1,
+ HIF_PM_MODE_UNDETERMINED = 0x2
+};
+
+struct hif_ind_scan_cmpl {
+ u32 status;
+ u8 pm_mode;
+ u8 num_channels_completed;
+ u16 reserved;
+} __packed;
+
+enum hif_queue_id {
+ HIF_QUEUE_ID_BACKGROUND = 0x0,
+ HIF_QUEUE_ID_BESTEFFORT = 0x1,
+ HIF_QUEUE_ID_VIDEO = 0x2,
+ HIF_QUEUE_ID_VOICE = 0x3
+};
+
+enum hif_frame_format {
+ HIF_FRAME_FORMAT_NON_HT = 0x0,
+ HIF_FRAME_FORMAT_MIXED_FORMAT_HT = 0x1,
+ HIF_FRAME_FORMAT_GF_HT_11N = 0x2
+};
+
+enum hif_stbc {
+ HIF_STBC_NOT_ALLOWED = 0x0,
+ HIF_STBC_ALLOWED = 0x1
+};
+
+struct hif_queue {
+ u8 queue_id:2;
+ u8 peer_sta_id:4;
+ u8 reserved:2;
+} __packed;
+
+struct hif_data_flags {
+ u8 more:1;
+ u8 fc_offset:3;
+ u8 reserved:4;
+} __packed;
+
+struct hif_tx_flags {
+ u8 start_exp:1;
+ u8 reserved:3;
+ u8 retry_policy_index:4;
+} __packed;
+
+struct hif_ht_tx_parameters {
+ u8 frame_format:4;
+ u8 fec_coding:1;
+ u8 short_gi:1;
+ u8 reserved1:1;
+ u8 stbc:1;
+ u8 reserved2;
+ u8 aggregation:1;
+ u8 reserved3:7;
+ u8 reserved4;
+} __packed;
+
+struct hif_req_tx {
+ u32 packet_id;
+ u8 max_tx_rate;
+ struct hif_queue queue_id;
+ struct hif_data_flags data_flags;
+ struct hif_tx_flags tx_flags;
+ u32 reserved;
+ u32 expire_time;
+ struct hif_ht_tx_parameters ht_tx_parameters;
+ u8 frame[];
+} __packed;
+
+enum hif_qos_ackplcy {
+ HIF_QOS_ACKPLCY_NORMAL = 0x0,
+ HIF_QOS_ACKPLCY_TXNOACK = 0x1,
+ HIF_QOS_ACKPLCY_NOEXPACK = 0x2,
+ HIF_QOS_ACKPLCY_BLCKACK = 0x3
+};
+
+struct hif_tx_result_flags {
+ u8 aggr:1;
+ u8 requeue:1;
+ u8 ack_policy:2;
+ u8 txop_limit:1;
+ u8 reserved1:3;
+ u8 reserved2;
+} __packed;
+
+struct hif_cnf_tx {
+ u32 status;
+ u32 packet_id;
+ u8 txed_rate;
+ u8 ack_failures;
+ struct hif_tx_result_flags tx_result_flags;
+ u32 media_delay;
+ u32 tx_queue_delay;
+} __packed;
+
+struct hif_cnf_multi_transmit {
+ u32 num_tx_confs;
+ struct hif_cnf_tx tx_conf_payload[];
+} __packed;
+
+enum hif_ri_flags_encrypt {
+ HIF_RI_FLAGS_UNENCRYPTED = 0x0,
+ HIF_RI_FLAGS_WEP_ENCRYPTED = 0x1,
+ HIF_RI_FLAGS_TKIP_ENCRYPTED = 0x2,
+ HIF_RI_FLAGS_AES_ENCRYPTED = 0x3,
+ HIF_RI_FLAGS_WAPI_ENCRYPTED = 0x4
+};
+
+struct hif_rx_flags {
+ u8 encryp:3;
+ u8 in_aggr:1;
+ u8 first_aggr:1;
+ u8 last_aggr:1;
+ u8 defrag:1;
+ u8 beacon:1;
+ u8 tim:1;
+ u8 bitmap:1;
+ u8 match_ssid:1;
+ u8 match_bssid:1;
+ u8 more:1;
+ u8 reserved1:1;
+ u8 ht:1;
+ u8 stbc:1;
+ u8 match_uc_addr:1;
+ u8 match_mc_addr:1;
+ u8 match_bc_addr:1;
+ u8 key_type:1;
+ u8 key_index:4;
+ u8 reserved2:1;
+ u8 peer_sta_id:4;
+ u8 reserved3:2;
+ u8 reserved4:1;
+} __packed;
+
+struct hif_ind_rx {
+ u32 status;
+ u16 channel_number;
+ u8 rxed_rate;
+ u8 rcpi_rssi;
+ struct hif_rx_flags rx_flags;
+ u8 frame[];
+} __packed;
+
+
+struct hif_req_edca_queue_params {
+ u8 queue_id;
+ u8 reserved1;
+ u8 aifsn;
+ u8 reserved2;
+ u16 cw_min;
+ u16 cw_max;
+ u16 tx_op_limit;
+ u16 allowed_medium_time;
+ u32 reserved3;
+} __packed;
+
+struct hif_cnf_edca_queue_params {
+ u32 status;
+} __packed;
+
+enum hif_ap_mode {
+ HIF_MODE_IBSS = 0x0,
+ HIF_MODE_BSS = 0x1
+};
+
+enum hif_preamble {
+ HIF_PREAMBLE_LONG = 0x0,
+ HIF_PREAMBLE_SHORT = 0x1,
+ HIF_PREAMBLE_SHORT_LONG12 = 0x2
+};
+
+struct hif_join_flags {
+ u8 reserved1:2;
+ u8 force_no_beacon:1;
+ u8 force_with_ind:1;
+ u8 reserved2:4;
+} __packed;
+
+struct hif_req_join {
+ u8 mode;
+ u8 band;
+ u16 channel_number;
+ u8 bssid[ETH_ALEN];
+ u16 atim_window;
+ u8 preamble_type;
+ u8 probe_for_join;
+ u8 reserved;
+ struct hif_join_flags join_flags;
+ u32 ssid_length;
+ u8 ssid[HIF_API_SSID_SIZE];
+ u32 beacon_interval;
+ u32 basic_rate_set;
+} __packed;
+
+struct hif_cnf_join {
+ u32 status;
+} __packed;
+
+struct hif_ind_join_complete {
+ u32 status;
+} __packed;
+
+struct hif_bss_flags {
+ u8 lost_count_only:1;
+ u8 reserved:7;
+} __packed;
+
+struct hif_req_set_bss_params {
+ struct hif_bss_flags bss_flags;
+ u8 beacon_lost_count;
+ u16 aid;
+ u32 operational_rate_set;
+} __packed;
+
+struct hif_cnf_set_bss_params {
+ u32 status;
+} __packed;
+
+struct hif_pm_mode {
+ u8 enter_psm:1;
+ u8 reserved:6;
+ u8 fast_psm:1;
+} __packed;
+
+struct hif_req_set_pm_mode {
+ struct hif_pm_mode pm_mode;
+ u8 fast_psm_idle_period;
+ u8 ap_psm_change_period;
+ u8 min_auto_ps_poll_period;
+} __packed;
+
+struct hif_cnf_set_pm_mode {
+ u32 status;
+} __packed;
+
+struct hif_ind_set_pm_mode_cmpl {
+ u32 status;
+ u8 pm_mode;
+ u8 reserved[3];
+} __packed;
+
+
+struct hif_req_start {
+ u8 mode;
+ u8 band;
+ u16 channel_number;
+ u32 reserved1;
+ u32 beacon_interval;
+ u8 dtim_period;
+ u8 preamble_type;
+ u8 reserved2;
+ u8 ssid_length;
+ u8 ssid[HIF_API_SSID_SIZE];
+ u32 basic_rate_set;
+} __packed;
+
+struct hif_cnf_start {
+ u32 status;
+} __packed;
+
+enum hif_beacon {
+ HIF_BEACON_STOP = 0x0,
+ HIF_BEACON_START = 0x1
+};
+
+struct hif_req_beacon_transmit {
+ u8 enable_beaconing;
+ u8 reserved[3];
+} __packed;
+
+struct hif_cnf_beacon_transmit {
+ u32 status;
+} __packed;
+
+enum hif_sta_map_direction {
+ HIF_STA_MAP = 0x0,
+ HIF_STA_UNMAP = 0x1
+};
+
+struct hif_map_link_flags {
+ u8 map_direction:1;
+ u8 mfpc:1;
+ u8 reserved:6;
+} __packed;
+
+struct hif_req_map_link {
+ u8 mac_addr[ETH_ALEN];
+ struct hif_map_link_flags map_link_flags;
+ u8 peer_sta_id;
+} __packed;
+
+struct hif_cnf_map_link {
+ u32 status;
+} __packed;
+
+struct hif_suspend_resume_flags {
+ u8 resume:1;
+ u8 reserved1:2;
+ u8 bc_mc_only:1;
+ u8 reserved2:4;
+ u8 reserved3;
+} __packed;
+
+struct hif_ind_suspend_resume_tx {
+ struct hif_suspend_resume_flags suspend_resume_flags;
+ u16 peer_sta_set;
+} __packed;
+
+
+#define MAX_KEY_ENTRIES 24
+#define HIF_API_WEP_KEY_DATA_SIZE 16
+#define HIF_API_TKIP_KEY_DATA_SIZE 16
+#define HIF_API_RX_MIC_KEY_SIZE 8
+#define HIF_API_TX_MIC_KEY_SIZE 8
+#define HIF_API_AES_KEY_DATA_SIZE 16
+#define HIF_API_WAPI_KEY_DATA_SIZE 16
+#define HIF_API_MIC_KEY_DATA_SIZE 16
+#define HIF_API_IGTK_KEY_DATA_SIZE 16
+#define HIF_API_RX_SEQUENCE_COUNTER_SIZE 8
+#define HIF_API_IPN_SIZE 8
+
+enum hif_key_type {
+ HIF_KEY_TYPE_WEP_DEFAULT = 0x0,
+ HIF_KEY_TYPE_WEP_PAIRWISE = 0x1,
+ HIF_KEY_TYPE_TKIP_GROUP = 0x2,
+ HIF_KEY_TYPE_TKIP_PAIRWISE = 0x3,
+ HIF_KEY_TYPE_AES_GROUP = 0x4,
+ HIF_KEY_TYPE_AES_PAIRWISE = 0x5,
+ HIF_KEY_TYPE_WAPI_GROUP = 0x6,
+ HIF_KEY_TYPE_WAPI_PAIRWISE = 0x7,
+ HIF_KEY_TYPE_IGTK_GROUP = 0x8,
+ HIF_KEY_TYPE_NONE = 0x9
+};
+
+struct hif_wep_pairwise_key {
+ u8 peer_address[ETH_ALEN];
+ u8 reserved;
+ u8 key_length;
+ u8 key_data[HIF_API_WEP_KEY_DATA_SIZE];
+} __packed;
+
+struct hif_wep_group_key {
+ u8 key_id;
+ u8 key_length;
+ u8 reserved[2];
+ u8 key_data[HIF_API_WEP_KEY_DATA_SIZE];
+} __packed;
+
+struct hif_tkip_pairwise_key {
+ u8 peer_address[ETH_ALEN];
+ u8 reserved[2];
+ u8 tkip_key_data[HIF_API_TKIP_KEY_DATA_SIZE];
+ u8 rx_mic_key[HIF_API_RX_MIC_KEY_SIZE];
+ u8 tx_mic_key[HIF_API_TX_MIC_KEY_SIZE];
+} __packed;
+
+struct hif_tkip_group_key {
+ u8 tkip_key_data[HIF_API_TKIP_KEY_DATA_SIZE];
+ u8 rx_mic_key[HIF_API_RX_MIC_KEY_SIZE];
+ u8 key_id;
+ u8 reserved[3];
+ u8 rx_sequence_counter[HIF_API_RX_SEQUENCE_COUNTER_SIZE];
+} __packed;
+
+struct hif_aes_pairwise_key {
+ u8 peer_address[ETH_ALEN];
+ u8 reserved[2];
+ u8 aes_key_data[HIF_API_AES_KEY_DATA_SIZE];
+} __packed;
+
+struct hif_aes_group_key {
+ u8 aes_key_data[HIF_API_AES_KEY_DATA_SIZE];
+ u8 key_id;
+ u8 reserved[3];
+ u8 rx_sequence_counter[HIF_API_RX_SEQUENCE_COUNTER_SIZE];
+} __packed;
+
+struct hif_wapi_pairwise_key {
+ u8 peer_address[ETH_ALEN];
+ u8 key_id;
+ u8 reserved;
+ u8 wapi_key_data[HIF_API_WAPI_KEY_DATA_SIZE];
+ u8 mic_key_data[HIF_API_MIC_KEY_DATA_SIZE];
+} __packed;
+
+struct hif_wapi_group_key {
+ u8 wapi_key_data[HIF_API_WAPI_KEY_DATA_SIZE];
+ u8 mic_key_data[HIF_API_MIC_KEY_DATA_SIZE];
+ u8 key_id;
+ u8 reserved[3];
+} __packed;
+
+struct hif_igtk_group_key {
+ u8 igtk_key_data[HIF_API_IGTK_KEY_DATA_SIZE];
+ u8 key_id;
+ u8 reserved[3];
+ u8 ipn[HIF_API_IPN_SIZE];
+} __packed;
+
+union hif_privacy_key_data {
+ struct hif_wep_pairwise_key wep_pairwise_key;
+ struct hif_wep_group_key wep_group_key;
+ struct hif_tkip_pairwise_key tkip_pairwise_key;
+ struct hif_tkip_group_key tkip_group_key;
+ struct hif_aes_pairwise_key aes_pairwise_key;
+ struct hif_aes_group_key aes_group_key;
+ struct hif_wapi_pairwise_key wapi_pairwise_key;
+ struct hif_wapi_group_key wapi_group_key;
+ struct hif_igtk_group_key igtk_group_key;
+};
+
+struct hif_req_add_key {
+ u8 type;
+ u8 entry_index;
+ u8 int_id:2;
+ u8 reserved1:6;
+ u8 reserved2;
+ union hif_privacy_key_data key;
+} __packed;
+
+struct hif_cnf_add_key {
+ u32 status;
+} __packed;
+
+struct hif_req_remove_key {
+ u8 entry_index;
+ u8 reserved[3];
+} __packed;
+
+struct hif_cnf_remove_key {
+ u32 status;
+} __packed;
+
+enum hif_event_ind {
+ HIF_EVENT_IND_BSSLOST = 0x1,
+ HIF_EVENT_IND_BSSREGAINED = 0x2,
+ HIF_EVENT_IND_RCPI_RSSI = 0x3,
+ HIF_EVENT_IND_PS_MODE_ERROR = 0x4,
+ HIF_EVENT_IND_INACTIVITY = 0x5
+};
+
+enum hif_ps_mode_error {
+ HIF_PS_ERROR_NO_ERROR = 0,
+ HIF_PS_ERROR_AP_NOT_RESP_TO_POLL = 1,
+ HIF_PS_ERROR_AP_NOT_RESP_TO_UAPSD_TRIGGER = 2,
+ HIF_PS_ERROR_AP_SENT_UNICAST_IN_DOZE = 3,
+ HIF_PS_ERROR_AP_NO_DATA_AFTER_TIM = 4
+};
+
+union hif_event_data {
+ u8 rcpi_rssi;
+ u32 ps_mode_error;
+ u32 peer_sta_set;
+};
+
+struct hif_ind_event {
+ u32 event_id;
+ union hif_event_data event_data;
+} __packed;
+
+
+#endif
diff --git a/drivers/staging/wfx/hif_api_general.h b/drivers/staging/wfx/hif_api_general.h
new file mode 100644
index 000000000000..a069c3a21b4d
--- /dev/null
+++ b/drivers/staging/wfx/hif_api_general.h
@@ -0,0 +1,437 @@
+/* SPDX-License-Identifier: Apache-2.0 */
+/*
+ * WFx hardware interface definitions
+ *
+ * Copyright (c) 2018-2019, Silicon Laboratories Inc.
+ */
+
+#ifndef WFX_HIF_API_GENERAL_H
+#define WFX_HIF_API_GENERAL_H
+
+#ifdef __KERNEL__
+#include <linux/types.h>
+#include <linux/if_ether.h>
+#else
+#include <net/ethernet.h>
+#include <stdint.h>
+#define __packed __attribute__((__packed__))
+#endif
+
+#define API_SSID_SIZE 32
+
+#define HIF_ID_IS_INDICATION 0x80
+#define HIF_COUNTER_MAX 7
+
+struct hif_msg {
+ u16 len;
+ u8 id;
+ u8 reserved:1;
+ u8 interface:2;
+ u8 seqnum:3;
+ u8 encrypted:2;
+ u8 body[];
+} __packed;
+
+enum hif_general_requests_ids {
+ HIF_REQ_ID_CONFIGURATION = 0x09,
+ HIF_REQ_ID_CONTROL_GPIO = 0x26,
+ HIF_REQ_ID_SET_SL_MAC_KEY = 0x27,
+ HIF_REQ_ID_SL_EXCHANGE_PUB_KEYS = 0x28,
+ HIF_REQ_ID_SL_CONFIGURE = 0x29,
+ HIF_REQ_ID_PREVENT_ROLLBACK = 0x2a,
+ HIF_REQ_ID_PTA_SETTINGS = 0x2b,
+ HIF_REQ_ID_PTA_PRIORITY = 0x2c,
+ HIF_REQ_ID_PTA_STATE = 0x2d,
+ HIF_REQ_ID_SHUT_DOWN = 0x32,
+};
+
+enum hif_general_confirmations_ids {
+ HIF_CNF_ID_CONFIGURATION = 0x09,
+ HIF_CNF_ID_CONTROL_GPIO = 0x26,
+ HIF_CNF_ID_SET_SL_MAC_KEY = 0x27,
+ HIF_CNF_ID_SL_EXCHANGE_PUB_KEYS = 0x28,
+ HIF_CNF_ID_SL_CONFIGURE = 0x29,
+ HIF_CNF_ID_PREVENT_ROLLBACK = 0x2a,
+ HIF_CNF_ID_PTA_SETTINGS = 0x2b,
+ HIF_CNF_ID_PTA_PRIORITY = 0x2c,
+ HIF_CNF_ID_PTA_STATE = 0x2d,
+ HIF_CNF_ID_SHUT_DOWN = 0x32,
+};
+
+enum hif_general_indications_ids {
+ HIF_IND_ID_EXCEPTION = 0xe0,
+ HIF_IND_ID_STARTUP = 0xe1,
+ HIF_IND_ID_WAKEUP = 0xe2,
+ HIF_IND_ID_GENERIC = 0xe3,
+ HIF_IND_ID_ERROR = 0xe4,
+ HIF_IND_ID_SL_EXCHANGE_PUB_KEYS = 0xe5
+};
+
+enum hif_hi_status {
+ HI_STATUS_SUCCESS = 0x0000,
+ HI_STATUS_FAILURE = 0x0001,
+ HI_INVALID_PARAMETER = 0x0002,
+ HI_STATUS_GPIO_WARNING = 0x0003,
+ HI_ERROR_UNSUPPORTED_MSG_ID = 0x0004,
+ SL_MAC_KEY_STATUS_SUCCESS = 0x005A,
+ SL_MAC_KEY_STATUS_FAILED_KEY_ALREADY_BURNED = 0x006B,
+ SL_MAC_KEY_STATUS_FAILED_RAM_MODE_NOT_ALLOWED = 0x007C,
+ SL_MAC_KEY_STATUS_FAILED_UNKNOWN_MODE = 0x008D,
+ SL_PUB_KEY_EXCHANGE_STATUS_SUCCESS = 0x009E,
+ SL_PUB_KEY_EXCHANGE_STATUS_FAILED = 0x00AF,
+ PREVENT_ROLLBACK_CNF_SUCCESS = 0x1234,
+ PREVENT_ROLLBACK_CNF_WRONG_MAGIC_WORD = 0x1256
+};
+
+enum hif_api_rate_index {
+ API_RATE_INDEX_B_1MBPS = 0,
+ API_RATE_INDEX_B_2MBPS = 1,
+ API_RATE_INDEX_B_5P5MBPS = 2,
+ API_RATE_INDEX_B_11MBPS = 3,
+ API_RATE_INDEX_PBCC_22MBPS = 4,
+ API_RATE_INDEX_PBCC_33MBPS = 5,
+ API_RATE_INDEX_G_6MBPS = 6,
+ API_RATE_INDEX_G_9MBPS = 7,
+ API_RATE_INDEX_G_12MBPS = 8,
+ API_RATE_INDEX_G_18MBPS = 9,
+ API_RATE_INDEX_G_24MBPS = 10,
+ API_RATE_INDEX_G_36MBPS = 11,
+ API_RATE_INDEX_G_48MBPS = 12,
+ API_RATE_INDEX_G_54MBPS = 13,
+ API_RATE_INDEX_N_6P5MBPS = 14,
+ API_RATE_INDEX_N_13MBPS = 15,
+ API_RATE_INDEX_N_19P5MBPS = 16,
+ API_RATE_INDEX_N_26MBPS = 17,
+ API_RATE_INDEX_N_39MBPS = 18,
+ API_RATE_INDEX_N_52MBPS = 19,
+ API_RATE_INDEX_N_58P5MBPS = 20,
+ API_RATE_INDEX_N_65MBPS = 21,
+ API_RATE_NUM_ENTRIES = 22
+};
+
+
+enum hif_fw_type {
+ HIF_FW_TYPE_ETF = 0x0,
+ HIF_FW_TYPE_WFM = 0x1,
+ HIF_FW_TYPE_WSM = 0x2
+};
+
+struct hif_capabilities {
+ u8 link_mode:2;
+ u8 reserved1:6;
+ u8 reserved2;
+ u8 reserved3;
+ u8 reserved4;
+} __packed;
+
+struct hif_otp_regul_sel_mode_info {
+ u8 region_sel_mode:4;
+ u8 reserved:4;
+} __packed;
+
+struct hif_otp_phy_info {
+ u8 phy1_region:3;
+ u8 phy0_region:3;
+ u8 otp_phy_ver:2;
+} __packed;
+
+#define API_OPN_SIZE 14
+#define API_UID_SIZE 8
+#define API_DISABLED_CHANNEL_LIST_SIZE 2
+#define API_FIRMWARE_LABEL_SIZE 128
+
+struct hif_ind_startup {
+ u32 status;
+ u16 hardware_id;
+ u8 opn[API_OPN_SIZE];
+ u8 uid[API_UID_SIZE];
+ u16 num_inp_ch_bufs;
+ u16 size_inp_ch_buf;
+ u8 num_links_ap;
+ u8 num_interfaces;
+ u8 mac_addr[2][ETH_ALEN];
+ u8 api_version_minor;
+ u8 api_version_major;
+ struct hif_capabilities capabilities;
+ u8 firmware_build;
+ u8 firmware_minor;
+ u8 firmware_major;
+ u8 firmware_type;
+ u8 disabled_channel_list[API_DISABLED_CHANNEL_LIST_SIZE];
+ struct hif_otp_regul_sel_mode_info regul_sel_mode_info;
+ struct hif_otp_phy_info otp_phy_info;
+ u32 supported_rate_mask;
+ u8 firmware_label[API_FIRMWARE_LABEL_SIZE];
+} __packed;
+
+struct hif_ind_wakeup {
+} __packed;
+
+struct hif_req_configuration {
+ u16 length;
+ u8 pds_data[];
+} __packed;
+
+struct hif_cnf_configuration {
+ u32 status;
+} __packed;
+
+enum hif_gpio_mode {
+ HIF_GPIO_MODE_D0 = 0x0,
+ HIF_GPIO_MODE_D1 = 0x1,
+ HIF_GPIO_MODE_OD0 = 0x2,
+ HIF_GPIO_MODE_OD1 = 0x3,
+ HIF_GPIO_MODE_TRISTATE = 0x4,
+ HIF_GPIO_MODE_TOGGLE = 0x5,
+ HIF_GPIO_MODE_READ = 0x6
+};
+
+struct hif_req_control_gpio {
+ u8 gpio_label;
+ u8 gpio_mode;
+} __packed;
+
+enum hif_gpio_error {
+ HIF_GPIO_ERROR_0 = 0x0,
+ HIF_GPIO_ERROR_1 = 0x1,
+ HIF_GPIO_ERROR_2 = 0x2
+};
+
+struct hif_cnf_control_gpio {
+ u32 status;
+ u32 value;
+} __packed;
+
+enum hif_generic_indication_type {
+ HIF_GENERIC_INDICATION_TYPE_RAW = 0x0,
+ HIF_GENERIC_INDICATION_TYPE_STRING = 0x1,
+ HIF_GENERIC_INDICATION_TYPE_RX_STATS = 0x2
+};
+
+struct hif_rx_stats {
+ u32 nb_rx_frame;
+ u32 nb_crc_frame;
+ u32 per_total;
+ u32 throughput;
+ u32 nb_rx_by_rate[API_RATE_NUM_ENTRIES];
+ u16 per[API_RATE_NUM_ENTRIES];
+ s16 snr[API_RATE_NUM_ENTRIES];
+ s16 rssi[API_RATE_NUM_ENTRIES];
+ s16 cfo[API_RATE_NUM_ENTRIES];
+ u32 date;
+ u32 pwr_clk_freq;
+ u8 is_ext_pwr_clk;
+ s8 current_temp;
+} __packed;
+
+union hif_indication_data {
+ struct hif_rx_stats rx_stats;
+ u8 raw_data[1];
+};
+
+struct hif_ind_generic {
+ u32 indication_type;
+ union hif_indication_data indication_data;
+} __packed;
+
+
+#define HIF_EXCEPTION_DATA_SIZE 124
+
+struct hif_ind_exception {
+ u8 data[HIF_EXCEPTION_DATA_SIZE];
+} __packed;
+
+
+enum hif_error {
+ HIF_ERROR_FIRMWARE_ROLLBACK = 0x0,
+ HIF_ERROR_FIRMWARE_DEBUG_ENABLED = 0x1,
+ HIF_ERROR_OUTDATED_SESSION_KEY = 0x2,
+ HIF_ERROR_INVALID_SESSION_KEY = 0x3,
+ HIF_ERROR_OOR_VOLTAGE = 0x4,
+ HIF_ERROR_PDS_VERSION = 0x5,
+ HIF_ERROR_OOR_TEMPERATURE = 0x6,
+ HIF_ERROR_REQ_DURING_KEY_EXCHANGE = 0x7,
+ HIF_ERROR_MULTI_TX_CNF_SECURELINK = 0x8,
+ HIF_ERROR_SECURELINK_OVERFLOW = 0x9,
+ HIF_ERROR_SECURELINK_DECRYPTION = 0xa
+};
+
+struct hif_ind_error {
+ u32 type;
+ u8 data[];
+} __packed;
+
+enum hif_secure_link_state {
+ SEC_LINK_UNAVAILABLE = 0x0,
+ SEC_LINK_RESERVED = 0x1,
+ SEC_LINK_EVAL = 0x2,
+ SEC_LINK_ENFORCED = 0x3
+};
+
+enum hif_sl_encryption_type {
+ NO_ENCRYPTION = 0,
+ TX_ENCRYPTION = 1,
+ RX_ENCRYPTION = 2,
+ HP_ENCRYPTION = 3
+};
+
+struct hif_sl_msg_hdr {
+ u32 seqnum:30;
+ u32 encrypted:2;
+} __packed;
+
+struct hif_sl_msg {
+ struct hif_sl_msg_hdr hdr;
+ u16 len;
+ u8 payload[];
+} __packed;
+
+#define AES_CCM_TAG_SIZE 16
+
+struct hif_sl_tag {
+ u8 tag[16];
+} __packed;
+
+enum hif_sl_mac_key_dest {
+ SL_MAC_KEY_DEST_OTP = 0x78,
+ SL_MAC_KEY_DEST_RAM = 0x87
+};
+
+#define API_KEY_VALUE_SIZE 32
+
+struct hif_req_set_sl_mac_key {
+ u8 otp_or_ram;
+ u8 key_value[API_KEY_VALUE_SIZE];
+} __packed;
+
+struct hif_cnf_set_sl_mac_key {
+ u32 status;
+} __packed;
+
+#define API_HOST_PUB_KEY_SIZE 32
+#define API_HOST_PUB_KEY_MAC_SIZE 64
+
+enum hif_sl_session_key_alg {
+ HIF_SL_CURVE25519 = 0x01,
+ HIF_SL_KDF = 0x02
+};
+
+struct hif_req_sl_exchange_pub_keys {
+ u8 algorithm:2;
+ u8 reserved1:6;
+ u8 reserved2[3];
+ u8 host_pub_key[API_HOST_PUB_KEY_SIZE];
+ u8 host_pub_key_mac[API_HOST_PUB_KEY_MAC_SIZE];
+} __packed;
+
+struct hif_cnf_sl_exchange_pub_keys {
+ u32 status;
+} __packed;
+
+#define API_NCP_PUB_KEY_SIZE 32
+#define API_NCP_PUB_KEY_MAC_SIZE 64
+
+struct hif_ind_sl_exchange_pub_keys {
+ u32 status;
+ u8 ncp_pub_key[API_NCP_PUB_KEY_SIZE];
+ u8 ncp_pub_key_mac[API_NCP_PUB_KEY_MAC_SIZE];
+} __packed;
+
+#define API_ENCR_BMP_SIZE 32
+
+struct hif_req_sl_configure {
+ u8 encr_bmp[API_ENCR_BMP_SIZE];
+ u8 disable_session_key_protection:1;
+ u8 reserved1:7;
+ u8 reserved2[3];
+} __packed;
+
+struct hif_cnf_sl_configure {
+ u32 status;
+} __packed;
+
+struct hif_req_prevent_rollback {
+ u32 magic_word;
+} __packed;
+
+struct hif_cnf_prevent_rollback {
+ u32 status;
+} __packed;
+
+enum hif_pta_mode {
+ PTA_1W_WLAN_MASTER = 0,
+ PTA_1W_COEX_MASTER = 1,
+ PTA_2W = 2,
+ PTA_3W = 3,
+ PTA_4W = 4
+};
+
+enum hif_signal_level {
+ SIGNAL_LOW = 0,
+ SIGNAL_HIGH = 1
+};
+
+enum hif_coex_type {
+ COEX_TYPE_GENERIC = 0,
+ COEX_TYPE_BLE = 1
+};
+
+enum hif_grant_state {
+ NO_GRANT = 0,
+ GRANT = 1
+};
+
+struct hif_req_pta_settings {
+ u8 pta_mode;
+ u8 request_signal_active_level;
+ u8 priority_signal_active_level;
+ u8 freq_signal_active_level;
+ u8 grant_signal_active_level;
+ u8 coex_type;
+ u8 default_grant_state;
+ u8 simultaneous_rx_accesses;
+ u8 priority_sampling_time;
+ u8 tx_rx_sampling_time;
+ u8 freq_sampling_time;
+ u8 grant_valid_time;
+ u8 fem_control_time;
+ u8 first_slot_time;
+ u16 periodic_tx_rx_sampling_time;
+ u16 coex_quota;
+ u16 wlan_quota;
+} __packed;
+
+struct hif_cnf_pta_settings {
+ u32 status;
+} __packed;
+
+enum hif_pta_priority {
+ HIF_PTA_PRIORITY_COEX_MAXIMIZED = 0x00000562,
+ HIF_PTA_PRIORITY_COEX_HIGH = 0x00000462,
+ HIF_PTA_PRIORITY_BALANCED = 0x00001461,
+ HIF_PTA_PRIORITY_WLAN_HIGH = 0x00001851,
+ HIF_PTA_PRIORITY_WLAN_MAXIMIZED = 0x00001A51
+};
+
+struct hif_req_pta_priority {
+ u32 priority;
+} __packed;
+
+struct hif_cnf_pta_priority {
+ u32 status;
+} __packed;
+
+enum hif_pta_state {
+ PTA_OFF = 0,
+ PTA_ON = 1
+};
+
+struct hif_req_pta_state {
+ u32 pta_state;
+} __packed;
+
+struct hif_cnf_pta_state {
+ u32 status;
+} __packed;
+
+#endif
diff --git a/drivers/staging/wfx/hif_api_mib.h b/drivers/staging/wfx/hif_api_mib.h
new file mode 100644
index 000000000000..94b789ceb4ff
--- /dev/null
+++ b/drivers/staging/wfx/hif_api_mib.h
@@ -0,0 +1,557 @@
+/* SPDX-License-Identifier: Apache-2.0 */
+/*
+ * WFx hardware interface definitions
+ *
+ * Copyright (c) 2018-2019, Silicon Laboratories Inc.
+ */
+
+#ifndef WFX_HIF_API_MIB_H
+#define WFX_HIF_API_MIB_H
+
+#include "hif_api_general.h"
+
+#define HIF_API_IPV4_ADDRESS_SIZE 4
+#define HIF_API_IPV6_ADDRESS_SIZE 16
+
+enum hif_mib_ids {
+ HIF_MIB_ID_GL_OPERATIONAL_POWER_MODE = 0x2000,
+ HIF_MIB_ID_GL_BLOCK_ACK_INFO = 0x2001,
+ HIF_MIB_ID_GL_SET_MULTI_MSG = 0x2002,
+ HIF_MIB_ID_CCA_CONFIG = 0x2003,
+ HIF_MIB_ID_ETHERTYPE_DATAFRAME_CONDITION = 0x2010,
+ HIF_MIB_ID_PORT_DATAFRAME_CONDITION = 0x2011,
+ HIF_MIB_ID_MAGIC_DATAFRAME_CONDITION = 0x2012,
+ HIF_MIB_ID_MAC_ADDR_DATAFRAME_CONDITION = 0x2013,
+ HIF_MIB_ID_IPV4_ADDR_DATAFRAME_CONDITION = 0x2014,
+ HIF_MIB_ID_IPV6_ADDR_DATAFRAME_CONDITION = 0x2015,
+ HIF_MIB_ID_UC_MC_BC_DATAFRAME_CONDITION = 0x2016,
+ HIF_MIB_ID_CONFIG_DATA_FILTER = 0x2017,
+ HIF_MIB_ID_SET_DATA_FILTERING = 0x2018,
+ HIF_MIB_ID_ARP_IP_ADDRESSES_TABLE = 0x2019,
+ HIF_MIB_ID_NS_IP_ADDRESSES_TABLE = 0x201A,
+ HIF_MIB_ID_RX_FILTER = 0x201B,
+ HIF_MIB_ID_BEACON_FILTER_TABLE = 0x201C,
+ HIF_MIB_ID_BEACON_FILTER_ENABLE = 0x201D,
+ HIF_MIB_ID_GRP_SEQ_COUNTER = 0x2030,
+ HIF_MIB_ID_TSF_COUNTER = 0x2031,
+ HIF_MIB_ID_STATISTICS_TABLE = 0x2032,
+ HIF_MIB_ID_COUNTERS_TABLE = 0x2033,
+ HIF_MIB_ID_MAX_TX_POWER_LEVEL = 0x2034,
+ HIF_MIB_ID_EXTENDED_COUNTERS_TABLE = 0x2035,
+ HIF_MIB_ID_DOT11_MAC_ADDRESS = 0x2040,
+ HIF_MIB_ID_DOT11_MAX_TRANSMIT_MSDU_LIFETIME = 0x2041,
+ HIF_MIB_ID_DOT11_MAX_RECEIVE_LIFETIME = 0x2042,
+ HIF_MIB_ID_DOT11_WEP_DEFAULT_KEY_ID = 0x2043,
+ HIF_MIB_ID_DOT11_RTS_THRESHOLD = 0x2044,
+ HIF_MIB_ID_SLOT_TIME = 0x2045,
+ HIF_MIB_ID_CURRENT_TX_POWER_LEVEL = 0x2046,
+ HIF_MIB_ID_NON_ERP_PROTECTION = 0x2047,
+ HIF_MIB_ID_TEMPLATE_FRAME = 0x2048,
+ HIF_MIB_ID_BEACON_WAKEUP_PERIOD = 0x2049,
+ HIF_MIB_ID_RCPI_RSSI_THRESHOLD = 0x204A,
+ HIF_MIB_ID_BLOCK_ACK_POLICY = 0x204B,
+ HIF_MIB_ID_OVERRIDE_INTERNAL_TX_RATE = 0x204C,
+ HIF_MIB_ID_SET_ASSOCIATION_MODE = 0x204D,
+ HIF_MIB_ID_SET_UAPSD_INFORMATION = 0x204E,
+ HIF_MIB_ID_SET_TX_RATE_RETRY_POLICY = 0x204F,
+ HIF_MIB_ID_PROTECTED_MGMT_POLICY = 0x2050,
+ HIF_MIB_ID_SET_HT_PROTECTION = 0x2051,
+ HIF_MIB_ID_KEEP_ALIVE_PERIOD = 0x2052,
+ HIF_MIB_ID_ARP_KEEP_ALIVE_PERIOD = 0x2053,
+ HIF_MIB_ID_INACTIVITY_TIMER = 0x2054,
+ HIF_MIB_ID_INTERFACE_PROTECTION = 0x2055,
+ HIF_MIB_ID_BEACON_STATS = 0x2056,
+};
+
+#define HIF_OP_POWER_MODE_MASK 0xf
+
+enum hif_op_power_mode {
+ HIF_OP_POWER_MODE_ACTIVE = 0x0,
+ HIF_OP_POWER_MODE_DOZE = 0x1,
+ HIF_OP_POWER_MODE_QUIESCENT = 0x2
+};
+
+struct hif_mib_gl_operational_power_mode {
+ u8 power_mode:4;
+ u8 reserved1:3;
+ u8 wup_ind_activation:1;
+ u8 reserved2[3];
+} __packed;
+
+struct hif_mib_gl_block_ack_info {
+ u8 rx_buffer_size;
+ u8 rx_max_num_agreements;
+ u8 tx_buffer_size;
+ u8 tx_max_num_agreements;
+} __packed;
+
+struct hif_mib_gl_set_multi_msg {
+ u8 enable_multi_tx_conf:1;
+ u8 reserved1:7;
+ u8 reserved2[3];
+} __packed;
+
+enum hif_cca_thr_mode {
+ HIF_CCA_THR_MODE_RELATIVE = 0x0,
+ HIF_CCA_THR_MODE_ABSOLUTE = 0x1
+};
+
+struct hif_mib_gl_cca_config {
+ u8 cca_thr_mode;
+ u8 reserved[3];
+} __packed;
+
+#define MAX_NUMBER_DATA_FILTERS 0xA
+
+#define MAX_NUMBER_IPV4_ADDR_CONDITIONS 0x4
+#define MAX_NUMBER_IPV6_ADDR_CONDITIONS 0x4
+#define MAX_NUMBER_MAC_ADDR_CONDITIONS 0x4
+#define MAX_NUMBER_UC_MC_BC_CONDITIONS 0x4
+#define MAX_NUMBER_ETHER_TYPE_CONDITIONS 0x4
+#define MAX_NUMBER_PORT_CONDITIONS 0x4
+#define MAX_NUMBER_MAGIC_CONDITIONS 0x4
+#define MAX_NUMBER_ARP_CONDITIONS 0x2
+#define MAX_NUMBER_NS_CONDITIONS 0x2
+
+struct hif_mib_ethertype_data_frame_condition {
+ u8 condition_idx;
+ u8 reserved;
+ u16 ether_type;
+} __packed;
+
+enum hif_udp_tcp_protocol {
+ HIF_PROTOCOL_UDP = 0x0,
+ HIF_PROTOCOL_TCP = 0x1,
+ HIF_PROTOCOL_BOTH_UDP_TCP = 0x2
+};
+
+enum hif_which_port {
+ HIF_PORT_DST = 0x0,
+ HIF_PORT_SRC = 0x1,
+ HIF_PORT_SRC_OR_DST = 0x2
+};
+
+struct hif_mib_ports_data_frame_condition {
+ u8 condition_idx;
+ u8 protocol;
+ u8 which_port;
+ u8 reserved1;
+ u16 port_number;
+ u8 reserved2[2];
+} __packed;
+
+#define HIF_API_MAGIC_PATTERN_SIZE 32
+
+struct hif_mib_magic_data_frame_condition {
+ u8 condition_idx;
+ u8 offset;
+ u8 magic_pattern_length;
+ u8 reserved;
+ u8 magic_pattern[HIF_API_MAGIC_PATTERN_SIZE];
+} __packed;
+
+enum hif_mac_addr_type {
+ HIF_MAC_ADDR_A1 = 0x0,
+ HIF_MAC_ADDR_A2 = 0x1,
+ HIF_MAC_ADDR_A3 = 0x2
+};
+
+struct hif_mib_mac_addr_data_frame_condition {
+ u8 condition_idx;
+ u8 address_type;
+ u8 mac_address[ETH_ALEN];
+} __packed;
+
+enum hif_ip_addr_mode {
+ HIF_IP_ADDR_SRC = 0x0,
+ HIF_IP_ADDR_DST = 0x1
+};
+
+struct hif_mib_ipv4_addr_data_frame_condition {
+ u8 condition_idx;
+ u8 address_mode;
+ u8 reserved[2];
+ u8 i_pv4_address[HIF_API_IPV4_ADDRESS_SIZE];
+} __packed;
+
+struct hif_mib_ipv6_addr_data_frame_condition {
+ u8 condition_idx;
+ u8 address_mode;
+ u8 reserved[2];
+ u8 i_pv6_address[HIF_API_IPV6_ADDRESS_SIZE];
+} __packed;
+
+union hif_addr_type {
+ u8 value;
+ struct {
+ u8 type_unicast:1;
+ u8 type_multicast:1;
+ u8 type_broadcast:1;
+ u8 reserved:5;
+ } bits;
+};
+
+struct hif_mib_uc_mc_bc_data_frame_condition {
+ u8 condition_idx;
+ union hif_addr_type param;
+ u8 reserved[2];
+} __packed;
+
+struct hif_mib_config_data_filter {
+ u8 filter_idx;
+ u8 enable;
+ u8 reserved1[2];
+ u8 eth_type_cond;
+ u8 port_cond;
+ u8 magic_cond;
+ u8 mac_cond;
+ u8 ipv4_cond;
+ u8 ipv6_cond;
+ u8 uc_mc_bc_cond;
+ u8 reserved2;
+} __packed;
+
+struct hif_mib_set_data_filtering {
+ u8 default_filter;
+ u8 enable;
+ u8 reserved[2];
+} __packed;
+
+enum hif_arp_ns_frame_treatment {
+ HIF_ARP_NS_FILTERING_DISABLE = 0x0,
+ HIF_ARP_NS_FILTERING_ENABLE = 0x1,
+ HIF_ARP_NS_REPLY_ENABLE = 0x2
+};
+
+struct hif_mib_arp_ip_addr_table {
+ u8 condition_idx;
+ u8 arp_enable;
+ u8 reserved[2];
+ u8 ipv4_address[HIF_API_IPV4_ADDRESS_SIZE];
+} __packed;
+
+struct hif_mib_ns_ip_addr_table {
+ u8 condition_idx;
+ u8 ns_enable;
+ u8 reserved[2];
+ u8 ipv6_address[HIF_API_IPV6_ADDRESS_SIZE];
+} __packed;
+
+struct hif_mib_rx_filter {
+ u8 reserved1:1;
+ u8 bssid_filter:1;
+ u8 reserved2:1;
+ u8 fwd_probe_req:1;
+ u8 keep_alive_filter:1;
+ u8 reserved3:3;
+ u8 reserved4[3];
+} __packed;
+
+#define HIF_API_OUI_SIZE 3
+#define HIF_API_MATCH_DATA_SIZE 3
+
+struct hif_ie_table_entry {
+ u8 ie_id;
+ u8 has_changed:1;
+ u8 no_longer:1;
+ u8 has_appeared:1;
+ u8 reserved:1;
+ u8 num_match_data:4;
+ u8 oui[HIF_API_OUI_SIZE];
+ u8 match_data[HIF_API_MATCH_DATA_SIZE];
+} __packed;
+
+struct hif_mib_bcn_filter_table {
+ u32 num_of_info_elmts;
+ struct hif_ie_table_entry ie_table[];
+} __packed;
+
+enum hif_beacon_filter {
+ HIF_BEACON_FILTER_DISABLE = 0x0,
+ HIF_BEACON_FILTER_ENABLE = 0x1,
+ HIF_BEACON_FILTER_AUTO_ERP = 0x2
+};
+
+struct hif_mib_bcn_filter_enable {
+ u32 enable;
+ u32 bcn_count;
+} __packed;
+
+struct hif_mib_group_seq_counter {
+ u32 bits4716;
+ u16 bits1500;
+ u16 reserved;
+} __packed;
+
+struct hif_mib_tsf_counter {
+ u32 tsf_counterlo;
+ u32 tsf_counterhi;
+} __packed;
+
+struct hif_mib_stats_table {
+ s16 latest_snr;
+ u8 latest_rcpi;
+ s8 latest_rssi;
+} __packed;
+
+struct hif_mib_extended_count_table {
+ u32 count_plcp_errors;
+ u32 count_fcs_errors;
+ u32 count_tx_packets;
+ u32 count_rx_packets;
+ u32 count_rx_packet_errors;
+ u32 count_rx_decryption_failures;
+ u32 count_rx_mic_failures;
+ u32 count_rx_no_key_failures;
+ u32 count_tx_multicast_frames;
+ u32 count_tx_frames_success;
+ u32 count_tx_frame_failures;
+ u32 count_tx_frames_retried;
+ u32 count_tx_frames_multi_retried;
+ u32 count_rx_frame_duplicates;
+ u32 count_rts_success;
+ u32 count_rts_failures;
+ u32 count_ack_failures;
+ u32 count_rx_multicast_frames;
+ u32 count_rx_frames_success;
+ u32 count_rx_cmacicv_errors;
+ u32 count_rx_cmac_replays;
+ u32 count_rx_mgmt_ccmp_replays;
+ u32 count_rx_bipmic_errors;
+ u32 count_rx_beacon;
+ u32 count_miss_beacon;
+ u32 reserved[15];
+} __packed;
+
+struct hif_mib_count_table {
+ u32 count_plcp_errors;
+ u32 count_fcs_errors;
+ u32 count_tx_packets;
+ u32 count_rx_packets;
+ u32 count_rx_packet_errors;
+ u32 count_rx_decryption_failures;
+ u32 count_rx_mic_failures;
+ u32 count_rx_no_key_failures;
+ u32 count_tx_multicast_frames;
+ u32 count_tx_frames_success;
+ u32 count_tx_frame_failures;
+ u32 count_tx_frames_retried;
+ u32 count_tx_frames_multi_retried;
+ u32 count_rx_frame_duplicates;
+ u32 count_rts_success;
+ u32 count_rts_failures;
+ u32 count_ack_failures;
+ u32 count_rx_multicast_frames;
+ u32 count_rx_frames_success;
+ u32 count_rx_cmacicv_errors;
+ u32 count_rx_cmac_replays;
+ u32 count_rx_mgmt_ccmp_replays;
+ u32 count_rx_bipmic_errors;
+} __packed;
+
+struct hif_mib_max_tx_power_level {
+ s32 max_tx_power_level_rf_port1;
+ s32 max_tx_power_level_rf_port2;
+} __packed;
+
+struct hif_mib_beacon_stats {
+ s32 latest_tbtt_diff;
+ u32 reserved[4];
+} __packed;
+
+struct hif_mib_mac_address {
+ u8 mac_addr[ETH_ALEN];
+ u16 reserved;
+} __packed;
+
+struct hif_mib_dot11_max_transmit_msdu_lifetime {
+ u32 max_life_time;
+} __packed;
+
+struct hif_mib_dot11_max_receive_lifetime {
+ u32 max_life_time;
+} __packed;
+
+struct hif_mib_wep_default_key_id {
+ u8 wep_default_key_id;
+ u8 reserved[3];
+} __packed;
+
+struct hif_mib_dot11_rts_threshold {
+ u32 threshold;
+} __packed;
+
+struct hif_mib_slot_time {
+ u32 slot_time;
+} __packed;
+
+struct hif_mib_current_tx_power_level {
+ s32 power_level;
+} __packed;
+
+struct hif_mib_non_erp_protection {
+ u8 use_cts_to_self:1;
+ u8 reserved1:7;
+ u8 reserved2[3];
+} __packed;
+
+enum hif_tx_mode {
+ HIF_TX_MODE_MIXED = 0x0,
+ HIF_TX_MODE_GREENFIELD = 0x1
+};
+
+enum hif_tmplt {
+ HIF_TMPLT_PRBREQ = 0x0,
+ HIF_TMPLT_BCN = 0x1,
+ HIF_TMPLT_NULL = 0x2,
+ HIF_TMPLT_QOSNUL = 0x3,
+ HIF_TMPLT_PSPOLL = 0x4,
+ HIF_TMPLT_PRBRES = 0x5,
+ HIF_TMPLT_ARP = 0x6,
+ HIF_TMPLT_NA = 0x7
+};
+
+#define HIF_API_MAX_TEMPLATE_FRAME_SIZE 700
+
+struct hif_mib_template_frame {
+ u8 frame_type;
+ u8 init_rate:7;
+ u8 mode:1;
+ u16 frame_length;
+ u8 frame[HIF_API_MAX_TEMPLATE_FRAME_SIZE];
+} __packed;
+
+struct hif_mib_beacon_wake_up_period {
+ u8 wakeup_period_min;
+ u8 receive_dtim:1;
+ u8 reserved1:7;
+ u8 wakeup_period_max;
+ u8 reserved2;
+} __packed;
+
+struct hif_mib_rcpi_rssi_threshold {
+ u8 detection:1;
+ u8 rcpi_rssi:1;
+ u8 upperthresh:1;
+ u8 lowerthresh:1;
+ u8 reserved:4;
+ u8 lower_threshold;
+ u8 upper_threshold;
+ u8 rolling_average_count;
+} __packed;
+
+#define DEFAULT_BA_MAX_RX_BUFFER_SIZE 16
+
+struct hif_mib_block_ack_policy {
+ u8 block_ack_tx_tid_policy;
+ u8 reserved1;
+ u8 block_ack_rx_tid_policy;
+ u8 block_ack_rx_max_buffer_size;
+} __packed;
+
+struct hif_mib_override_int_rate {
+ u8 internal_tx_rate;
+ u8 non_erp_internal_tx_rate;
+ u8 reserved[2];
+} __packed;
+
+enum hif_mpdu_start_spacing {
+ HIF_MPDU_START_SPACING_NO_RESTRIC = 0x0,
+ HIF_MPDU_START_SPACING_QUARTER = 0x1,
+ HIF_MPDU_START_SPACING_HALF = 0x2,
+ HIF_MPDU_START_SPACING_ONE = 0x3,
+ HIF_MPDU_START_SPACING_TWO = 0x4,
+ HIF_MPDU_START_SPACING_FOUR = 0x5,
+ HIF_MPDU_START_SPACING_EIGHT = 0x6,
+ HIF_MPDU_START_SPACING_SIXTEEN = 0x7
+};
+
+struct hif_mib_set_association_mode {
+ u8 preambtype_use:1;
+ u8 mode:1;
+ u8 rateset:1;
+ u8 spacing:1;
+ u8 reserved:4;
+ u8 preamble_type;
+ u8 mixed_or_greenfield_type;
+ u8 mpdu_start_spacing;
+ u32 basic_rate_set;
+} __packed;
+
+struct hif_mib_set_uapsd_information {
+ u8 trig_bckgrnd:1;
+ u8 trig_be:1;
+ u8 trig_video:1;
+ u8 trig_voice:1;
+ u8 reserved1:4;
+ u8 deliv_bckgrnd:1;
+ u8 deliv_be:1;
+ u8 deliv_video:1;
+ u8 deliv_voice:1;
+ u8 reserved2:4;
+ u16 min_auto_trigger_interval;
+ u16 max_auto_trigger_interval;
+ u16 auto_trigger_step;
+} __packed;
+
+struct hif_mib_tx_rate_retry_policy {
+ u8 policy_index;
+ u8 short_retry_count;
+ u8 long_retry_count;
+ u8 first_rate_sel:2;
+ u8 terminate:1;
+ u8 count_init:1;
+ u8 reserved1:4;
+ u8 rate_recovery_count;
+ u8 reserved2[3];
+ u8 rates[12];
+} __packed;
+
+#define HIF_MIB_NUM_TX_RATE_RETRY_POLICIES 15
+
+struct hif_mib_set_tx_rate_retry_policy {
+ u8 num_tx_rate_policies;
+ u8 reserved[3];
+ struct hif_mib_tx_rate_retry_policy tx_rate_retry_policy[];
+} __packed;
+
+struct hif_mib_protected_mgmt_policy {
+ u8 pmf_enable:1;
+ u8 unpmf_allowed:1;
+ u8 host_enc_auth_frames:1;
+ u8 reserved1:5;
+ u8 reserved2[3];
+} __packed;
+
+struct hif_mib_set_ht_protection {
+ u8 dual_cts_prot:1;
+ u8 reserved1:7;
+ u8 reserved2[3];
+} __packed;
+
+struct hif_mib_keep_alive_period {
+ u16 keep_alive_period;
+ u8 reserved[2];
+} __packed;
+
+struct hif_mib_arp_keep_alive_period {
+ u16 arp_keep_alive_period;
+ u8 encr_type;
+ u8 reserved;
+ u8 sender_ipv4_address[HIF_API_IPV4_ADDRESS_SIZE];
+ u8 target_ipv4_address[HIF_API_IPV4_ADDRESS_SIZE];
+} __packed;
+
+struct hif_mib_inactivity_timer {
+ u8 min_active_time;
+ u8 max_active_time;
+ u16 reserved;
+} __packed;
+
+struct hif_mib_interface_protection {
+ u8 use_cts_prot:1;
+ u8 reserved1:7;
+ u8 reserved2[3];
+} __packed;
+
+#endif
diff --git a/drivers/staging/wfx/hif_rx.c b/drivers/staging/wfx/hif_rx.c
new file mode 100644
index 000000000000..820de216be0c
--- /dev/null
+++ b/drivers/staging/wfx/hif_rx.c
@@ -0,0 +1,364 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Implementation of chip-to-host event (aka indications) of WFxxx Split Mac
+ * (WSM) API.
+ *
+ * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ */
+#include <linux/skbuff.h>
+#include <linux/etherdevice.h>
+
+#include "hif_rx.h"
+#include "wfx.h"
+#include "scan.h"
+#include "bh.h"
+#include "sta.h"
+#include "data_rx.h"
+#include "secure_link.h"
+#include "hif_api_cmd.h"
+
+static int hif_generic_confirm(struct wfx_dev *wdev, struct hif_msg *hif,
+ void *buf)
+{
+ // All confirm messages start with status
+ int status = le32_to_cpu(*((__le32 *) buf));
+ int cmd = hif->id;
+ int len = hif->len - 4; // drop header
+
+ WARN(!mutex_is_locked(&wdev->hif_cmd.lock), "data locking error");
+
+ if (!wdev->hif_cmd.buf_send) {
+ dev_warn(wdev->dev, "unexpected confirmation: 0x%.2x\n", cmd);
+ return -EINVAL;
+ }
+
+ if (cmd != wdev->hif_cmd.buf_send->id) {
+ dev_warn(wdev->dev,
+ "chip response mismatch request: 0x%.2x vs 0x%.2x\n",
+ cmd, wdev->hif_cmd.buf_send->id);
+ return -EINVAL;
+ }
+
+ if (wdev->hif_cmd.buf_recv) {
+ if (wdev->hif_cmd.len_recv >= len)
+ memcpy(wdev->hif_cmd.buf_recv, buf, len);
+ else
+ status = -ENOMEM;
+ }
+ wdev->hif_cmd.ret = status;
+
+ if (!wdev->hif_cmd.async) {
+ complete(&wdev->hif_cmd.done);
+ } else {
+ wdev->hif_cmd.buf_send = NULL;
+ mutex_unlock(&wdev->hif_cmd.lock);
+ if (cmd != HIF_REQ_ID_SL_EXCHANGE_PUB_KEYS)
+ mutex_unlock(&wdev->hif_cmd.key_renew_lock);
+ }
+ return status;
+}
+
+static int hif_tx_confirm(struct wfx_dev *wdev, struct hif_msg *hif, void *buf)
+{
+ struct hif_cnf_tx *body = buf;
+ struct wfx_vif *wvif = wdev_to_wvif(wdev, hif->interface);
+
+ WARN_ON(!wvif);
+ if (!wvif)
+ return -EFAULT;
+
+ wfx_tx_confirm_cb(wvif, body);
+ return 0;
+}
+
+static int hif_multi_tx_confirm(struct wfx_dev *wdev, struct hif_msg *hif,
+ void *buf)
+{
+ struct hif_cnf_multi_transmit *body = buf;
+ struct hif_cnf_tx *buf_loc = (struct hif_cnf_tx *) &body->tx_conf_payload;
+ struct wfx_vif *wvif = wdev_to_wvif(wdev, hif->interface);
+ int count = body->num_tx_confs;
+ int i;
+
+ WARN(count <= 0, "corrupted message");
+ WARN_ON(!wvif);
+ if (!wvif)
+ return -EFAULT;
+
+ for (i = 0; i < count; ++i) {
+ wfx_tx_confirm_cb(wvif, buf_loc);
+ buf_loc++;
+ }
+ return 0;
+}
+
+static int hif_startup_indication(struct wfx_dev *wdev, struct hif_msg *hif,
+ void *buf)
+{
+ struct hif_ind_startup *body = buf;
+
+ if (body->status || body->firmware_type > 4) {
+ dev_err(wdev->dev, "received invalid startup indication");
+ return -EINVAL;
+ }
+ memcpy(&wdev->hw_caps, body, sizeof(struct hif_ind_startup));
+ le32_to_cpus(&wdev->hw_caps.status);
+ le16_to_cpus(&wdev->hw_caps.hardware_id);
+ le16_to_cpus(&wdev->hw_caps.num_inp_ch_bufs);
+ le16_to_cpus(&wdev->hw_caps.size_inp_ch_buf);
+
+ complete(&wdev->firmware_ready);
+ return 0;
+}
+
+static int hif_wakeup_indication(struct wfx_dev *wdev, struct hif_msg *hif,
+ void *buf)
+{
+ if (!wdev->pdata.gpio_wakeup
+ || !gpiod_get_value(wdev->pdata.gpio_wakeup)) {
+ dev_warn(wdev->dev, "unexpected wake-up indication\n");
+ return -EIO;
+ }
+ return 0;
+}
+
+static int hif_keys_indication(struct wfx_dev *wdev, struct hif_msg *hif,
+ void *buf)
+{
+ struct hif_ind_sl_exchange_pub_keys *body = buf;
+
+ // Compatibility with legacy secure link
+ if (body->status == SL_PUB_KEY_EXCHANGE_STATUS_SUCCESS)
+ body->status = 0;
+ if (body->status)
+ dev_warn(wdev->dev, "secure link negociation error\n");
+ wfx_sl_check_pubkey(wdev, body->ncp_pub_key, body->ncp_pub_key_mac);
+ return 0;
+}
+
+static int hif_receive_indication(struct wfx_dev *wdev, struct hif_msg *hif,
+ void *buf, struct sk_buff *skb)
+{
+ struct wfx_vif *wvif = wdev_to_wvif(wdev, hif->interface);
+ struct hif_ind_rx *body = buf;
+
+ if (!wvif) {
+ dev_warn(wdev->dev, "ignore rx data for non-existent vif %d\n",
+ hif->interface);
+ return 0;
+ }
+ skb_pull(skb, sizeof(struct hif_msg) + sizeof(struct hif_ind_rx));
+ wfx_rx_cb(wvif, body, skb);
+
+ return 0;
+}
+
+static int hif_event_indication(struct wfx_dev *wdev, struct hif_msg *hif,
+ void *buf)
+{
+ struct wfx_vif *wvif = wdev_to_wvif(wdev, hif->interface);
+ struct hif_ind_event *body = buf;
+ struct wfx_hif_event *event;
+ int first;
+
+ WARN_ON(!wvif);
+ if (!wvif)
+ return 0;
+
+ event = kzalloc(sizeof(*event), GFP_KERNEL);
+ if (!event)
+ return -ENOMEM;
+
+ memcpy(&event->evt, body, sizeof(struct hif_ind_event));
+ spin_lock(&wvif->event_queue_lock);
+ first = list_empty(&wvif->event_queue);
+ list_add_tail(&event->link, &wvif->event_queue);
+ spin_unlock(&wvif->event_queue_lock);
+
+ if (first)
+ schedule_work(&wvif->event_handler_work);
+
+ return 0;
+}
+
+static int hif_pm_mode_complete_indication(struct wfx_dev *wdev,
+ struct hif_msg *hif, void *buf)
+{
+ struct wfx_vif *wvif = wdev_to_wvif(wdev, hif->interface);
+
+ WARN_ON(!wvif);
+ complete(&wvif->set_pm_mode_complete);
+
+ return 0;
+}
+
+static int hif_scan_complete_indication(struct wfx_dev *wdev,
+ struct hif_msg *hif, void *buf)
+{
+ struct wfx_vif *wvif = wdev_to_wvif(wdev, hif->interface);
+ struct hif_ind_scan_cmpl *body = buf;
+
+ WARN_ON(!wvif);
+ wfx_scan_complete_cb(wvif, body);
+
+ return 0;
+}
+
+static int hif_join_complete_indication(struct wfx_dev *wdev,
+ struct hif_msg *hif, void *buf)
+{
+ struct wfx_vif *wvif = wdev_to_wvif(wdev, hif->interface);
+
+ WARN_ON(!wvif);
+ dev_warn(wdev->dev, "unattended JoinCompleteInd\n");
+
+ return 0;
+}
+
+static int hif_suspend_resume_indication(struct wfx_dev *wdev,
+ struct hif_msg *hif, void *buf)
+{
+ struct wfx_vif *wvif = wdev_to_wvif(wdev, hif->interface);
+ struct hif_ind_suspend_resume_tx *body = buf;
+
+ WARN_ON(!wvif);
+ wfx_suspend_resume(wvif, body);
+
+ return 0;
+}
+
+static int hif_error_indication(struct wfx_dev *wdev, struct hif_msg *hif,
+ void *buf)
+{
+ struct hif_ind_error *body = buf;
+ u8 *pRollback = (u8 *) body->data;
+ u32 *pStatus = (u32 *) body->data;
+
+ switch (body->type) {
+ case HIF_ERROR_FIRMWARE_ROLLBACK:
+ dev_err(wdev->dev,
+ "asynchronous error: firmware rollback error %d\n",
+ *pRollback);
+ break;
+ case HIF_ERROR_FIRMWARE_DEBUG_ENABLED:
+ dev_err(wdev->dev, "asynchronous error: firmware debug feature enabled\n");
+ break;
+ case HIF_ERROR_OUTDATED_SESSION_KEY:
+ dev_err(wdev->dev, "asynchronous error: secure link outdated key: %#.8x\n",
+ *pStatus);
+ break;
+ case HIF_ERROR_INVALID_SESSION_KEY:
+ dev_err(wdev->dev, "asynchronous error: invalid session key\n");
+ break;
+ case HIF_ERROR_OOR_VOLTAGE:
+ dev_err(wdev->dev, "asynchronous error: out-of-range overvoltage: %#.8x\n",
+ *pStatus);
+ break;
+ case HIF_ERROR_PDS_VERSION:
+ dev_err(wdev->dev,
+ "asynchronous error: wrong PDS payload or version: %#.8x\n",
+ *pStatus);
+ break;
+ default:
+ dev_err(wdev->dev, "asynchronous error: unknown (%d)\n",
+ body->type);
+ break;
+ }
+ return 0;
+}
+
+static int hif_generic_indication(struct wfx_dev *wdev, struct hif_msg *hif,
+ void *buf)
+{
+ struct hif_ind_generic *body = buf;
+
+ switch (body->indication_type) {
+ case HIF_GENERIC_INDICATION_TYPE_RAW:
+ return 0;
+ case HIF_GENERIC_INDICATION_TYPE_STRING:
+ dev_info(wdev->dev, "firmware says: %s\n",
+ (char *) body->indication_data.raw_data);
+ return 0;
+ case HIF_GENERIC_INDICATION_TYPE_RX_STATS:
+ mutex_lock(&wdev->rx_stats_lock);
+ // Older firmware send a generic indication beside RxStats
+ if (!wfx_api_older_than(wdev, 1, 4))
+ dev_info(wdev->dev, "Rx test ongoing. Temperature: %d°C\n",
+ body->indication_data.rx_stats.current_temp);
+ memcpy(&wdev->rx_stats, &body->indication_data.rx_stats,
+ sizeof(wdev->rx_stats));
+ mutex_unlock(&wdev->rx_stats_lock);
+ return 0;
+ default:
+ dev_err(wdev->dev,
+ "generic_indication: unknown indication type: %#.8x\n",
+ body->indication_type);
+ return -EIO;
+ }
+}
+
+static int hif_exception_indication(struct wfx_dev *wdev,
+ struct hif_msg *hif, void *buf)
+{
+ size_t len = hif->len - 4; // drop header
+ dev_err(wdev->dev, "firmware exception\n");
+ print_hex_dump_bytes("Dump: ", DUMP_PREFIX_NONE, buf, len);
+ wdev->chip_frozen = 1;
+
+ return -1;
+}
+
+static const struct {
+ int msg_id;
+ int (*handler)(struct wfx_dev *wdev, struct hif_msg *hif, void *buf);
+} hif_handlers[] = {
+ /* Confirmations */
+ { HIF_CNF_ID_TX, hif_tx_confirm },
+ { HIF_CNF_ID_MULTI_TRANSMIT, hif_multi_tx_confirm },
+ /* Indications */
+ { HIF_IND_ID_STARTUP, hif_startup_indication },
+ { HIF_IND_ID_WAKEUP, hif_wakeup_indication },
+ { HIF_IND_ID_JOIN_COMPLETE, hif_join_complete_indication },
+ { HIF_IND_ID_SET_PM_MODE_CMPL, hif_pm_mode_complete_indication },
+ { HIF_IND_ID_SCAN_CMPL, hif_scan_complete_indication },
+ { HIF_IND_ID_SUSPEND_RESUME_TX, hif_suspend_resume_indication },
+ { HIF_IND_ID_SL_EXCHANGE_PUB_KEYS, hif_keys_indication },
+ { HIF_IND_ID_EVENT, hif_event_indication },
+ { HIF_IND_ID_GENERIC, hif_generic_indication },
+ { HIF_IND_ID_ERROR, hif_error_indication },
+ { HIF_IND_ID_EXCEPTION, hif_exception_indication },
+ // FIXME: allocate skb_p from hif_receive_indication and make it generic
+ //{ HIF_IND_ID_RX, hif_receive_indication },
+};
+
+void wfx_handle_rx(struct wfx_dev *wdev, struct sk_buff *skb)
+{
+ int i;
+ struct hif_msg *hif = (struct hif_msg *) skb->data;
+ int hif_id = hif->id;
+
+ if (hif_id == HIF_IND_ID_RX) {
+ // hif_receive_indication take care of skb lifetime
+ hif_receive_indication(wdev, hif, hif->body, skb);
+ return;
+ }
+ // Note: mutex_is_lock cause an implicit memory barrier that protect
+ // buf_send
+ if (mutex_is_locked(&wdev->hif_cmd.lock)
+ && wdev->hif_cmd.buf_send
+ && wdev->hif_cmd.buf_send->id == hif_id) {
+ hif_generic_confirm(wdev, hif, hif->body);
+ goto free;
+ }
+ for (i = 0; i < ARRAY_SIZE(hif_handlers); i++) {
+ if (hif_handlers[i].msg_id == hif_id) {
+ if (hif_handlers[i].handler)
+ hif_handlers[i].handler(wdev, hif, hif->body);
+ goto free;
+ }
+ }
+ dev_err(wdev->dev, "unsupported HIF ID %02x\n", hif_id);
+free:
+ dev_kfree_skb(skb);
+}
diff --git a/drivers/staging/wfx/hif_rx.h b/drivers/staging/wfx/hif_rx.h
new file mode 100644
index 000000000000..f07c10c8c6bd
--- /dev/null
+++ b/drivers/staging/wfx/hif_rx.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Implementation of chip-to-host event (aka indications) of WFxxx Split Mac
+ * (WSM) API.
+ *
+ * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ * Copyright (C) 2010, ST-Ericsson SA
+ */
+#ifndef WFX_HIF_RX_H
+#define WFX_HIF_RX_H
+
+struct wfx_dev;
+struct sk_buff;
+
+void wfx_handle_rx(struct wfx_dev *wdev, struct sk_buff *skb);
+
+#endif
diff --git a/drivers/staging/wfx/hif_tx.c b/drivers/staging/wfx/hif_tx.c
new file mode 100644
index 000000000000..cb7cddcb9815
--- /dev/null
+++ b/drivers/staging/wfx/hif_tx.c
@@ -0,0 +1,493 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Implementation of host-to-chip commands (aka request/confirmation) of WFxxx
+ * Split Mac (WSM) API.
+ *
+ * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ */
+#include <linux/skbuff.h>
+#include <linux/etherdevice.h>
+
+#include "hif_tx.h"
+#include "wfx.h"
+#include "bh.h"
+#include "hwio.h"
+#include "debug.h"
+#include "sta.h"
+
+void wfx_init_hif_cmd(struct wfx_hif_cmd *hif_cmd)
+{
+ init_completion(&hif_cmd->ready);
+ init_completion(&hif_cmd->done);
+ mutex_init(&hif_cmd->lock);
+ mutex_init(&hif_cmd->key_renew_lock);
+}
+
+static void wfx_fill_header(struct hif_msg *hif, int if_id, unsigned int cmd,
+ size_t size)
+{
+ if (if_id == -1)
+ if_id = 2;
+
+ WARN(cmd > 0x3f, "invalid WSM command %#.2x", cmd);
+ WARN(size > 0xFFF, "requested buffer is too large: %zu bytes", size);
+ WARN(if_id > 0x3, "invalid interface ID %d", if_id);
+
+ hif->len = cpu_to_le16(size + 4);
+ hif->id = cmd;
+ hif->interface = if_id;
+}
+
+static void *wfx_alloc_hif(size_t body_len, struct hif_msg **hif)
+{
+ *hif = kzalloc(sizeof(struct hif_msg) + body_len, GFP_KERNEL);
+ if (*hif)
+ return (*hif)->body;
+ else
+ return NULL;
+}
+
+int wfx_cmd_send(struct wfx_dev *wdev, struct hif_msg *request, void *reply,
+ size_t reply_len, bool async)
+{
+ const char *mib_name = "";
+ const char *mib_sep = "";
+ int cmd = request->id;
+ int vif = request->interface;
+ int ret;
+
+ WARN(wdev->hif_cmd.buf_recv && wdev->hif_cmd.async, "API usage error");
+
+ // Do not wait for any reply if chip is frozen
+ if (wdev->chip_frozen)
+ return -ETIMEDOUT;
+
+ if (cmd != HIF_REQ_ID_SL_EXCHANGE_PUB_KEYS)
+ mutex_lock(&wdev->hif_cmd.key_renew_lock);
+
+ mutex_lock(&wdev->hif_cmd.lock);
+ WARN(wdev->hif_cmd.buf_send, "data locking error");
+
+ // Note: call to complete() below has an implicit memory barrier that
+ // hopefully protect buf_send
+ wdev->hif_cmd.buf_send = request;
+ wdev->hif_cmd.buf_recv = reply;
+ wdev->hif_cmd.len_recv = reply_len;
+ wdev->hif_cmd.async = async;
+ complete(&wdev->hif_cmd.ready);
+
+ wfx_bh_request_tx(wdev);
+
+ // NOTE: no timeout is catched async is enabled
+ if (async)
+ return 0;
+
+ ret = wait_for_completion_timeout(&wdev->hif_cmd.done, 1 * HZ);
+ if (!ret) {
+ dev_err(wdev->dev, "chip is abnormally long to answer\n");
+ reinit_completion(&wdev->hif_cmd.ready);
+ ret = wait_for_completion_timeout(&wdev->hif_cmd.done, 3 * HZ);
+ }
+ if (!ret) {
+ dev_err(wdev->dev, "chip did not answer\n");
+ wfx_pending_dump_old_frames(wdev, 3000);
+ wdev->chip_frozen = 1;
+ reinit_completion(&wdev->hif_cmd.done);
+ ret = -ETIMEDOUT;
+ } else {
+ ret = wdev->hif_cmd.ret;
+ }
+
+ wdev->hif_cmd.buf_send = NULL;
+ mutex_unlock(&wdev->hif_cmd.lock);
+
+ if (ret &&
+ (cmd == HIF_REQ_ID_READ_MIB || cmd == HIF_REQ_ID_WRITE_MIB)) {
+ mib_name = get_mib_name(((u16 *) request)[2]);
+ mib_sep = "/";
+ }
+ if (ret < 0)
+ dev_err(wdev->dev,
+ "WSM request %s%s%s (%#.2x) on vif %d returned error %d\n",
+ get_hif_name(cmd), mib_sep, mib_name, cmd, vif, ret);
+ if (ret > 0)
+ dev_warn(wdev->dev,
+ "WSM request %s%s%s (%#.2x) on vif %d returned status %d\n",
+ get_hif_name(cmd), mib_sep, mib_name, cmd, vif, ret);
+
+ if (cmd != HIF_REQ_ID_SL_EXCHANGE_PUB_KEYS)
+ mutex_unlock(&wdev->hif_cmd.key_renew_lock);
+ return ret;
+}
+
+// This function is special. After HIF_REQ_ID_SHUT_DOWN, chip won't reply to any
+// request anymore. We need to slightly hack struct wfx_hif_cmd for that job. Be
+// carefull to only call this funcion during device unregister.
+int hif_shutdown(struct wfx_dev *wdev)
+{
+ int ret;
+ struct hif_msg *hif;
+
+ wfx_alloc_hif(0, &hif);
+ wfx_fill_header(hif, -1, HIF_REQ_ID_SHUT_DOWN, 0);
+ ret = wfx_cmd_send(wdev, hif, NULL, 0, true);
+ // After this command, chip won't reply. Be sure to give enough time to
+ // bh to send buffer:
+ msleep(100);
+ wdev->hif_cmd.buf_send = NULL;
+ if (wdev->pdata.gpio_wakeup)
+ gpiod_set_value(wdev->pdata.gpio_wakeup, 0);
+ else
+ control_reg_write(wdev, 0);
+ mutex_unlock(&wdev->hif_cmd.lock);
+ kfree(hif);
+ return ret;
+}
+
+int hif_configuration(struct wfx_dev *wdev, const u8 *conf, size_t len)
+{
+ int ret;
+ size_t buf_len = sizeof(struct hif_req_configuration) + len;
+ struct hif_msg *hif;
+ struct hif_req_configuration *body = wfx_alloc_hif(buf_len, &hif);
+
+ body->length = cpu_to_le16(len);
+ memcpy(body->pds_data, conf, len);
+ wfx_fill_header(hif, -1, HIF_REQ_ID_CONFIGURATION, buf_len);
+ ret = wfx_cmd_send(wdev, hif, NULL, 0, false);
+ kfree(hif);
+ return ret;
+}
+
+int hif_reset(struct wfx_vif *wvif, bool reset_stat)
+{
+ int ret;
+ struct hif_msg *hif;
+ struct hif_req_reset *body = wfx_alloc_hif(sizeof(*body), &hif);
+
+ body->reset_flags.reset_stat = reset_stat;
+ wfx_fill_header(hif, wvif->id, HIF_REQ_ID_RESET, sizeof(*body));
+ ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false);
+ kfree(hif);
+ return ret;
+}
+
+int hif_read_mib(struct wfx_dev *wdev, int vif_id, u16 mib_id, void *val,
+ size_t val_len)
+{
+ int ret;
+ struct hif_msg *hif;
+ int buf_len = sizeof(struct hif_cnf_read_mib) + val_len;
+ struct hif_req_read_mib *body = wfx_alloc_hif(sizeof(*body), &hif);
+ struct hif_cnf_read_mib *reply = kmalloc(buf_len, GFP_KERNEL);
+
+ body->mib_id = cpu_to_le16(mib_id);
+ wfx_fill_header(hif, vif_id, HIF_REQ_ID_READ_MIB, sizeof(*body));
+ ret = wfx_cmd_send(wdev, hif, reply, buf_len, false);
+
+ if (!ret && mib_id != reply->mib_id) {
+ dev_warn(wdev->dev,
+ "%s: confirmation mismatch request\n", __func__);
+ ret = -EIO;
+ }
+ if (ret == -ENOMEM)
+ dev_err(wdev->dev,
+ "buffer is too small to receive %s (%zu < %d)\n",
+ get_mib_name(mib_id), val_len, reply->length);
+ if (!ret)
+ memcpy(val, &reply->mib_data, reply->length);
+ else
+ memset(val, 0xFF, val_len);
+ kfree(hif);
+ kfree(reply);
+ return ret;
+}
+
+int hif_write_mib(struct wfx_dev *wdev, int vif_id, u16 mib_id, void *val,
+ size_t val_len)
+{
+ int ret;
+ struct hif_msg *hif;
+ int buf_len = sizeof(struct hif_req_write_mib) + val_len;
+ struct hif_req_write_mib *body = wfx_alloc_hif(buf_len, &hif);
+
+ body->mib_id = cpu_to_le16(mib_id);
+ body->length = cpu_to_le16(val_len);
+ memcpy(&body->mib_data, val, val_len);
+ wfx_fill_header(hif, vif_id, HIF_REQ_ID_WRITE_MIB, buf_len);
+ ret = wfx_cmd_send(wdev, hif, NULL, 0, false);
+ kfree(hif);
+ return ret;
+}
+
+int hif_scan(struct wfx_vif *wvif, const struct wfx_scan_params *arg)
+{
+ int ret, i;
+ struct hif_msg *hif;
+ struct hif_ssid_def *ssids;
+ size_t buf_len = sizeof(struct hif_req_start_scan) +
+ arg->scan_req.num_of_channels * sizeof(u8) +
+ arg->scan_req.num_of_ssi_ds * sizeof(struct hif_ssid_def);
+ struct hif_req_start_scan *body = wfx_alloc_hif(buf_len, &hif);
+ u8 *ptr = (u8 *) body + sizeof(*body);
+
+ WARN(arg->scan_req.num_of_channels > HIF_API_MAX_NB_CHANNELS, "invalid params");
+ WARN(arg->scan_req.num_of_ssi_ds > 2, "invalid params");
+ WARN(arg->scan_req.band > 1, "invalid params");
+
+ // FIXME: This API is unnecessary complex, fixing NumOfChannels and
+ // adding a member SsidDef at end of struct hif_req_start_scan would
+ // simplify that a lot.
+ memcpy(body, &arg->scan_req, sizeof(*body));
+ cpu_to_le32s(&body->min_channel_time);
+ cpu_to_le32s(&body->max_channel_time);
+ cpu_to_le32s(&body->tx_power_level);
+ memcpy(ptr, arg->ssids,
+ arg->scan_req.num_of_ssi_ds * sizeof(struct hif_ssid_def));
+ ssids = (struct hif_ssid_def *) ptr;
+ for (i = 0; i < body->num_of_ssi_ds; ++i)
+ cpu_to_le32s(&ssids[i].ssid_length);
+ ptr += arg->scan_req.num_of_ssi_ds * sizeof(struct hif_ssid_def);
+ memcpy(ptr, arg->ch, arg->scan_req.num_of_channels * sizeof(u8));
+ ptr += arg->scan_req.num_of_channels * sizeof(u8);
+ WARN(buf_len != ptr - (u8 *) body, "allocation size mismatch");
+ wfx_fill_header(hif, wvif->id, HIF_REQ_ID_START_SCAN, buf_len);
+ ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false);
+ kfree(hif);
+ return ret;
+}
+
+int hif_stop_scan(struct wfx_vif *wvif)
+{
+ int ret;
+ struct hif_msg *hif;
+ // body associated to HIF_REQ_ID_STOP_SCAN is empty
+ wfx_alloc_hif(0, &hif);
+
+ wfx_fill_header(hif, wvif->id, HIF_REQ_ID_STOP_SCAN, 0);
+ ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false);
+ kfree(hif);
+ return ret;
+}
+
+int hif_join(struct wfx_vif *wvif, const struct hif_req_join *arg)
+{
+ int ret;
+ struct hif_msg *hif;
+ struct hif_req_join *body = wfx_alloc_hif(sizeof(*body), &hif);
+
+ memcpy(body, arg, sizeof(struct hif_req_join));
+ cpu_to_le16s(&body->channel_number);
+ cpu_to_le16s(&body->atim_window);
+ cpu_to_le32s(&body->ssid_length);
+ cpu_to_le32s(&body->beacon_interval);
+ cpu_to_le32s(&body->basic_rate_set);
+ wfx_fill_header(hif, wvif->id, HIF_REQ_ID_JOIN, sizeof(*body));
+ ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false);
+ kfree(hif);
+ return ret;
+}
+
+int hif_set_bss_params(struct wfx_vif *wvif,
+ const struct hif_req_set_bss_params *arg)
+{
+ int ret;
+ struct hif_msg *hif;
+ struct hif_req_set_bss_params *body = wfx_alloc_hif(sizeof(*body),
+ &hif);
+
+ memcpy(body, arg, sizeof(*body));
+ cpu_to_le16s(&body->aid);
+ cpu_to_le32s(&body->operational_rate_set);
+ wfx_fill_header(hif, wvif->id, HIF_REQ_ID_SET_BSS_PARAMS,
+ sizeof(*body));
+ ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false);
+ kfree(hif);
+ return ret;
+}
+
+int hif_add_key(struct wfx_dev *wdev, const struct hif_req_add_key *arg)
+{
+ int ret;
+ struct hif_msg *hif;
+ // FIXME: only send necessary bits
+ struct hif_req_add_key *body = wfx_alloc_hif(sizeof(*body), &hif);
+
+ // FIXME: swap bytes as necessary in body
+ memcpy(body, arg, sizeof(*body));
+ if (wfx_api_older_than(wdev, 1, 5))
+ // Legacy firmwares expect that add_key to be sent on right
+ // interface.
+ wfx_fill_header(hif, arg->int_id, HIF_REQ_ID_ADD_KEY,
+ sizeof(*body));
+ else
+ wfx_fill_header(hif, -1, HIF_REQ_ID_ADD_KEY, sizeof(*body));
+ ret = wfx_cmd_send(wdev, hif, NULL, 0, false);
+ kfree(hif);
+ return ret;
+}
+
+int hif_remove_key(struct wfx_dev *wdev, int idx)
+{
+ int ret;
+ struct hif_msg *hif;
+ struct hif_req_remove_key *body = wfx_alloc_hif(sizeof(*body), &hif);
+
+ body->entry_index = idx;
+ wfx_fill_header(hif, -1, HIF_REQ_ID_REMOVE_KEY, sizeof(*body));
+ ret = wfx_cmd_send(wdev, hif, NULL, 0, false);
+ kfree(hif);
+ return ret;
+}
+
+int hif_set_edca_queue_params(struct wfx_vif *wvif,
+ const struct hif_req_edca_queue_params *arg)
+{
+ int ret;
+ struct hif_msg *hif;
+ struct hif_req_edca_queue_params *body = wfx_alloc_hif(sizeof(*body),
+ &hif);
+
+ // NOTE: queues numerotation are not the same between WFx and Linux
+ memcpy(body, arg, sizeof(*body));
+ cpu_to_le16s(&body->cw_min);
+ cpu_to_le16s(&body->cw_max);
+ cpu_to_le16s(&body->tx_op_limit);
+ wfx_fill_header(hif, wvif->id, HIF_REQ_ID_EDCA_QUEUE_PARAMS,
+ sizeof(*body));
+ ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false);
+ kfree(hif);
+ return ret;
+}
+
+int hif_set_pm(struct wfx_vif *wvif, const struct hif_req_set_pm_mode *arg)
+{
+ int ret;
+ struct hif_msg *hif;
+ struct hif_req_set_pm_mode *body = wfx_alloc_hif(sizeof(*body), &hif);
+
+ memcpy(body, arg, sizeof(*body));
+ wfx_fill_header(hif, wvif->id, HIF_REQ_ID_SET_PM_MODE, sizeof(*body));
+ ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false);
+ kfree(hif);
+ return ret;
+}
+
+int hif_start(struct wfx_vif *wvif, const struct hif_req_start *arg)
+{
+ int ret;
+ struct hif_msg *hif;
+ struct hif_req_start *body = wfx_alloc_hif(sizeof(*body), &hif);
+
+ memcpy(body, arg, sizeof(*body));
+ cpu_to_le16s(&body->channel_number);
+ cpu_to_le32s(&body->beacon_interval);
+ cpu_to_le32s(&body->basic_rate_set);
+ wfx_fill_header(hif, wvif->id, HIF_REQ_ID_START, sizeof(*body));
+ ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false);
+ kfree(hif);
+ return ret;
+}
+
+int hif_beacon_transmit(struct wfx_vif *wvif, bool enable_beaconing)
+{
+ int ret;
+ struct hif_msg *hif;
+ struct hif_req_beacon_transmit *body = wfx_alloc_hif(sizeof(*body),
+ &hif);
+
+ body->enable_beaconing = enable_beaconing ? 1 : 0;
+ wfx_fill_header(hif, wvif->id, HIF_REQ_ID_BEACON_TRANSMIT,
+ sizeof(*body));
+ ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false);
+ kfree(hif);
+ return ret;
+}
+
+int hif_map_link(struct wfx_vif *wvif, u8 *mac_addr, int flags, int sta_id)
+{
+ int ret;
+ struct hif_msg *hif;
+ struct hif_req_map_link *body = wfx_alloc_hif(sizeof(*body), &hif);
+
+ if (mac_addr)
+ ether_addr_copy(body->mac_addr, mac_addr);
+ body->map_link_flags = *(struct hif_map_link_flags *) &flags;
+ body->peer_sta_id = sta_id;
+ wfx_fill_header(hif, wvif->id, HIF_REQ_ID_MAP_LINK, sizeof(*body));
+ ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false);
+ kfree(hif);
+ return ret;
+}
+
+int hif_update_ie(struct wfx_vif *wvif, const struct hif_ie_flags *target_frame,
+ const u8 *ies, size_t ies_len)
+{
+ int ret;
+ struct hif_msg *hif;
+ int buf_len = sizeof(struct hif_req_update_ie) + ies_len;
+ struct hif_req_update_ie *body = wfx_alloc_hif(buf_len, &hif);
+
+ memcpy(&body->ie_flags, target_frame, sizeof(struct hif_ie_flags));
+ body->num_i_es = cpu_to_le16(1);
+ memcpy(body->ie, ies, ies_len);
+ wfx_fill_header(hif, wvif->id, HIF_REQ_ID_UPDATE_IE, buf_len);
+ ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false);
+ kfree(hif);
+ return ret;
+}
+
+int hif_sl_send_pub_keys(struct wfx_dev *wdev, const uint8_t *pubkey,
+ const uint8_t *pubkey_hmac)
+{
+ int ret;
+ struct hif_msg *hif;
+ struct hif_req_sl_exchange_pub_keys *body = wfx_alloc_hif(sizeof(*body),
+ &hif);
+
+ body->algorithm = HIF_SL_CURVE25519;
+ memcpy(body->host_pub_key, pubkey, sizeof(body->host_pub_key));
+ memcpy(body->host_pub_key_mac, pubkey_hmac,
+ sizeof(body->host_pub_key_mac));
+ wfx_fill_header(hif, -1, HIF_REQ_ID_SL_EXCHANGE_PUB_KEYS,
+ sizeof(*body));
+ ret = wfx_cmd_send(wdev, hif, NULL, 0, false);
+ kfree(hif);
+ // Compatibility with legacy secure link
+ if (ret == SL_PUB_KEY_EXCHANGE_STATUS_SUCCESS)
+ ret = 0;
+ return ret;
+}
+
+int hif_sl_config(struct wfx_dev *wdev, const unsigned long *bitmap)
+{
+ int ret;
+ struct hif_msg *hif;
+ struct hif_req_sl_configure *body = wfx_alloc_hif(sizeof(*body), &hif);
+
+ memcpy(body->encr_bmp, bitmap, sizeof(body->encr_bmp));
+ wfx_fill_header(hif, -1, HIF_REQ_ID_SL_CONFIGURE, sizeof(*body));
+ ret = wfx_cmd_send(wdev, hif, NULL, 0, false);
+ kfree(hif);
+ return ret;
+}
+
+int hif_sl_set_mac_key(struct wfx_dev *wdev, const u8 *slk_key,
+ int destination)
+{
+ int ret;
+ struct hif_msg *hif;
+ struct hif_req_set_sl_mac_key *body = wfx_alloc_hif(sizeof(*body),
+ &hif);
+
+ memcpy(body->key_value, slk_key, sizeof(body->key_value));
+ body->otp_or_ram = destination;
+ wfx_fill_header(hif, -1, HIF_REQ_ID_SET_SL_MAC_KEY, sizeof(*body));
+ ret = wfx_cmd_send(wdev, hif, NULL, 0, false);
+ kfree(hif);
+ // Compatibility with legacy secure link
+ if (ret == SL_MAC_KEY_STATUS_SUCCESS)
+ ret = 0;
+ return ret;
+}
diff --git a/drivers/staging/wfx/hif_tx.h b/drivers/staging/wfx/hif_tx.h
new file mode 100644
index 000000000000..f61ae7b0d41c
--- /dev/null
+++ b/drivers/staging/wfx/hif_tx.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Implementation of host-to-chip commands (aka request/confirmation) of WFxxx
+ * Split Mac (WSM) API.
+ *
+ * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ * Copyright (C) 2010, ST-Ericsson SA
+ */
+#ifndef WFX_HIF_TX_H
+#define WFX_HIF_TX_H
+
+#include "hif_api_cmd.h"
+
+struct wfx_dev;
+struct wfx_vif;
+
+struct wfx_scan_params {
+ struct hif_req_start_scan scan_req;
+ struct hif_ssid_def *ssids;
+ u8 *ch;
+};
+
+struct wfx_hif_cmd {
+ struct mutex lock;
+ struct mutex key_renew_lock;
+ struct completion ready;
+ struct completion done;
+ bool async;
+ struct hif_msg *buf_send;
+ void *buf_recv;
+ size_t len_recv;
+ int ret;
+};
+
+void wfx_init_hif_cmd(struct wfx_hif_cmd *wfx_hif_cmd);
+int wfx_cmd_send(struct wfx_dev *wdev, struct hif_msg *request,
+ void *reply, size_t reply_len, bool async);
+
+int hif_shutdown(struct wfx_dev *wdev);
+int hif_configuration(struct wfx_dev *wdev, const u8 *conf, size_t len);
+int hif_reset(struct wfx_vif *wvif, bool reset_stat);
+int hif_read_mib(struct wfx_dev *wdev, int vif_id, u16 mib_id,
+ void *buf, size_t buf_size);
+int hif_write_mib(struct wfx_dev *wdev, int vif_id, u16 mib_id,
+ void *buf, size_t buf_size);
+int hif_scan(struct wfx_vif *wvif, const struct wfx_scan_params *arg);
+int hif_stop_scan(struct wfx_vif *wvif);
+int hif_join(struct wfx_vif *wvif, const struct hif_req_join *arg);
+int hif_set_pm(struct wfx_vif *wvif, const struct hif_req_set_pm_mode *arg);
+int hif_set_bss_params(struct wfx_vif *wvif,
+ const struct hif_req_set_bss_params *arg);
+int hif_add_key(struct wfx_dev *wdev, const struct hif_req_add_key *arg);
+int hif_remove_key(struct wfx_dev *wdev, int idx);
+int hif_set_edca_queue_params(struct wfx_vif *wvif,
+ const struct hif_req_edca_queue_params *arg);
+int hif_start(struct wfx_vif *wvif, const struct hif_req_start *arg);
+int hif_beacon_transmit(struct wfx_vif *wvif, bool enable);
+int hif_map_link(struct wfx_vif *wvif, u8 *mac_addr, int flags, int sta_id);
+int hif_update_ie(struct wfx_vif *wvif, const struct hif_ie_flags *target_frame,
+ const u8 *ies, size_t ies_len);
+int hif_sl_set_mac_key(struct wfx_dev *wdev, const u8 *slk_key,
+ int destination);
+int hif_sl_config(struct wfx_dev *wdev, const unsigned long *bitmap);
+int hif_sl_send_pub_keys(struct wfx_dev *wdev,
+ const u8 *pubkey, const u8 *pubkey_hmac);
+
+#endif
diff --git a/drivers/staging/wfx/hif_tx_mib.h b/drivers/staging/wfx/hif_tx_mib.h
new file mode 100644
index 000000000000..bb091e395ff5
--- /dev/null
+++ b/drivers/staging/wfx/hif_tx_mib.h
@@ -0,0 +1,293 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Implementation of host-to-chip MIBs of WFxxx Split Mac (WSM) API.
+ *
+ * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ * Copyright (C) 2010, ST-Ericsson SA
+ */
+#ifndef WFX_HIF_TX_MIB_H
+#define WFX_HIF_TX_MIB_H
+
+#include <linux/etherdevice.h>
+
+#include "wfx.h"
+#include "hif_tx.h"
+#include "hif_api_mib.h"
+
+static inline int hif_set_output_power(struct wfx_vif *wvif, int power_level)
+{
+ __le32 val = cpu_to_le32(power_level);
+
+ return hif_write_mib(wvif->wdev, wvif->id,
+ HIF_MIB_ID_CURRENT_TX_POWER_LEVEL,
+ &val, sizeof(val));
+}
+
+static inline int hif_set_beacon_wakeup_period(struct wfx_vif *wvif,
+ unsigned int dtim_interval,
+ unsigned int listen_interval)
+{
+ struct hif_mib_beacon_wake_up_period val = {
+ .wakeup_period_min = dtim_interval,
+ .receive_dtim = 0,
+ .wakeup_period_max = cpu_to_le16(listen_interval),
+ };
+
+ if (dtim_interval > 0xFF || listen_interval > 0xFFFF)
+ return -EINVAL;
+ return hif_write_mib(wvif->wdev, wvif->id,
+ HIF_MIB_ID_BEACON_WAKEUP_PERIOD,
+ &val, sizeof(val));
+}
+
+static inline int hif_set_rcpi_rssi_threshold(struct wfx_vif *wvif,
+ struct hif_mib_rcpi_rssi_threshold *arg)
+{
+ return hif_write_mib(wvif->wdev, wvif->id,
+ HIF_MIB_ID_RCPI_RSSI_THRESHOLD, arg, sizeof(*arg));
+}
+
+static inline int hif_get_counters_table(struct wfx_dev *wdev,
+ struct hif_mib_extended_count_table *arg)
+{
+ if (wfx_api_older_than(wdev, 1, 3)) {
+ // extended_count_table is wider than count_table
+ memset(arg, 0xFF, sizeof(*arg));
+ return hif_read_mib(wdev, 0, HIF_MIB_ID_COUNTERS_TABLE,
+ arg, sizeof(struct hif_mib_count_table));
+ } else {
+ return hif_read_mib(wdev, 0,
+ HIF_MIB_ID_EXTENDED_COUNTERS_TABLE, arg,
+ sizeof(struct hif_mib_extended_count_table));
+ }
+}
+
+static inline int hif_set_macaddr(struct wfx_vif *wvif, u8 *mac)
+{
+ struct hif_mib_mac_address msg = { };
+
+ if (mac)
+ ether_addr_copy(msg.mac_addr, mac);
+ return hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_DOT11_MAC_ADDRESS,
+ &msg, sizeof(msg));
+}
+
+static inline int hif_set_rx_filter(struct wfx_vif *wvif, bool filter_bssid,
+ bool fwd_probe_req)
+{
+ struct hif_mib_rx_filter val = { };
+
+ if (filter_bssid)
+ val.bssid_filter = 1;
+ if (fwd_probe_req)
+ val.fwd_probe_req = 1;
+ return hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_RX_FILTER,
+ &val, sizeof(val));
+}
+
+static inline int hif_set_beacon_filter_table(struct wfx_vif *wvif,
+ int tbl_len,
+ struct hif_ie_table_entry *tbl)
+{
+ int ret;
+ struct hif_mib_bcn_filter_table *val;
+ int buf_len = struct_size(val, ie_table, tbl_len);
+
+ val = kzalloc(buf_len, GFP_KERNEL);
+ if (!val)
+ return -ENOMEM;
+ val->num_of_info_elmts = cpu_to_le32(tbl_len);
+ memcpy(val->ie_table, tbl, tbl_len * sizeof(*tbl));
+ ret = hif_write_mib(wvif->wdev, wvif->id,
+ HIF_MIB_ID_BEACON_FILTER_TABLE, val, buf_len);
+ kfree(val);
+ return ret;
+}
+
+static inline int hif_beacon_filter_control(struct wfx_vif *wvif,
+ int enable, int beacon_count)
+{
+ struct hif_mib_bcn_filter_enable arg = {
+ .enable = cpu_to_le32(enable),
+ .bcn_count = cpu_to_le32(beacon_count),
+ };
+ return hif_write_mib(wvif->wdev, wvif->id,
+ HIF_MIB_ID_BEACON_FILTER_ENABLE,
+ &arg, sizeof(arg));
+}
+
+static inline int hif_set_operational_mode(struct wfx_dev *wdev,
+ enum hif_op_power_mode mode)
+{
+ struct hif_mib_gl_operational_power_mode val = {
+ .power_mode = mode,
+ .wup_ind_activation = 1,
+ };
+
+ return hif_write_mib(wdev, -1, HIF_MIB_ID_GL_OPERATIONAL_POWER_MODE,
+ &val, sizeof(val));
+}
+
+static inline int hif_set_template_frame(struct wfx_vif *wvif,
+ struct hif_mib_template_frame *arg)
+{
+ return hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_TEMPLATE_FRAME,
+ arg, sizeof(*arg));
+}
+
+static inline int hif_set_mfp(struct wfx_vif *wvif, bool capable, bool required)
+{
+ struct hif_mib_protected_mgmt_policy val = { };
+
+ WARN(required && !capable, "incoherent arguments");
+ if (capable) {
+ val.pmf_enable = 1;
+ val.host_enc_auth_frames = 1;
+ }
+ if (!required)
+ val.unpmf_allowed = 1;
+ cpu_to_le32s((u32 *) &val);
+ return hif_write_mib(wvif->wdev, wvif->id,
+ HIF_MIB_ID_PROTECTED_MGMT_POLICY,
+ &val, sizeof(val));
+}
+
+static inline int hif_set_block_ack_policy(struct wfx_vif *wvif,
+ u8 tx_tid_policy, u8 rx_tid_policy)
+{
+ struct hif_mib_block_ack_policy val = {
+ .block_ack_tx_tid_policy = tx_tid_policy,
+ .block_ack_rx_tid_policy = rx_tid_policy,
+ };
+
+ return hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_BLOCK_ACK_POLICY,
+ &val, sizeof(val));
+}
+
+static inline int hif_set_association_mode(struct wfx_vif *wvif,
+ struct hif_mib_set_association_mode *arg)
+{
+ return hif_write_mib(wvif->wdev, wvif->id,
+ HIF_MIB_ID_SET_ASSOCIATION_MODE, arg, sizeof(*arg));
+}
+
+static inline int hif_set_tx_rate_retry_policy(struct wfx_vif *wvif,
+ struct hif_mib_set_tx_rate_retry_policy *arg)
+{
+ size_t size = struct_size(arg, tx_rate_retry_policy,
+ arg->num_tx_rate_policies);
+
+ return hif_write_mib(wvif->wdev, wvif->id,
+ HIF_MIB_ID_SET_TX_RATE_RETRY_POLICY, arg, size);
+}
+
+static inline int hif_set_mac_addr_condition(struct wfx_vif *wvif,
+ struct hif_mib_mac_addr_data_frame_condition *arg)
+{
+ return hif_write_mib(wvif->wdev, wvif->id,
+ HIF_MIB_ID_MAC_ADDR_DATAFRAME_CONDITION,
+ arg, sizeof(*arg));
+}
+
+static inline int hif_set_uc_mc_bc_condition(struct wfx_vif *wvif,
+ struct hif_mib_uc_mc_bc_data_frame_condition *arg)
+{
+ return hif_write_mib(wvif->wdev, wvif->id,
+ HIF_MIB_ID_UC_MC_BC_DATAFRAME_CONDITION,
+ arg, sizeof(*arg));
+}
+
+static inline int hif_set_config_data_filter(struct wfx_vif *wvif,
+ struct hif_mib_config_data_filter *arg)
+{
+ return hif_write_mib(wvif->wdev, wvif->id,
+ HIF_MIB_ID_CONFIG_DATA_FILTER, arg, sizeof(*arg));
+}
+
+static inline int hif_set_data_filtering(struct wfx_vif *wvif,
+ struct hif_mib_set_data_filtering *arg)
+{
+ return hif_write_mib(wvif->wdev, wvif->id,
+ HIF_MIB_ID_SET_DATA_FILTERING, arg, sizeof(*arg));
+}
+
+static inline int hif_keep_alive_period(struct wfx_vif *wvif, int period)
+{
+ struct hif_mib_keep_alive_period arg = {
+ .keep_alive_period = cpu_to_le16(period),
+ };
+
+ return hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_KEEP_ALIVE_PERIOD,
+ &arg, sizeof(arg));
+};
+
+static inline int hif_set_arp_ipv4_filter(struct wfx_vif *wvif,
+ struct hif_mib_arp_ip_addr_table *fp)
+{
+ return hif_write_mib(wvif->wdev, wvif->id,
+ HIF_MIB_ID_ARP_IP_ADDRESSES_TABLE,
+ fp, sizeof(*fp));
+}
+
+static inline int hif_use_multi_tx_conf(struct wfx_dev *wdev,
+ bool enabled)
+{
+ __le32 arg = enabled ? cpu_to_le32(1) : 0;
+
+ return hif_write_mib(wdev, -1, HIF_MIB_ID_GL_SET_MULTI_MSG,
+ &arg, sizeof(arg));
+}
+
+static inline int hif_set_uapsd_info(struct wfx_vif *wvif,
+ struct hif_mib_set_uapsd_information *arg)
+{
+ return hif_write_mib(wvif->wdev, wvif->id,
+ HIF_MIB_ID_SET_UAPSD_INFORMATION,
+ arg, sizeof(*arg));
+}
+
+static inline int hif_erp_use_protection(struct wfx_vif *wvif, bool enable)
+{
+ __le32 arg = enable ? cpu_to_le32(1) : 0;
+
+ return hif_write_mib(wvif->wdev, wvif->id,
+ HIF_MIB_ID_NON_ERP_PROTECTION, &arg, sizeof(arg));
+}
+
+static inline int hif_slot_time(struct wfx_vif *wvif, int val)
+{
+ __le32 arg = cpu_to_le32(val);
+
+ return hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_SLOT_TIME,
+ &arg, sizeof(arg));
+}
+
+static inline int hif_dual_cts_protection(struct wfx_vif *wvif, bool val)
+{
+ struct hif_mib_set_ht_protection arg = {
+ .dual_cts_prot = val,
+ };
+
+ return hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_SET_HT_PROTECTION,
+ &arg, sizeof(arg));
+}
+
+static inline int hif_wep_default_key_id(struct wfx_vif *wvif, int val)
+{
+ __le32 arg = cpu_to_le32(val);
+
+ return hif_write_mib(wvif->wdev, wvif->id,
+ HIF_MIB_ID_DOT11_WEP_DEFAULT_KEY_ID,
+ &arg, sizeof(arg));
+}
+
+static inline int hif_rts_threshold(struct wfx_vif *wvif, int val)
+{
+ __le32 arg = cpu_to_le32(val > 0 ? val : 0xFFFF);
+
+ return hif_write_mib(wvif->wdev, wvif->id,
+ HIF_MIB_ID_DOT11_RTS_THRESHOLD, &arg, sizeof(arg));
+}
+
+#endif
diff --git a/drivers/staging/wfx/hwio.c b/drivers/staging/wfx/hwio.c
new file mode 100644
index 000000000000..47e04c59ed93
--- /dev/null
+++ b/drivers/staging/wfx/hwio.c
@@ -0,0 +1,352 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Low-level I/O functions.
+ *
+ * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ */
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+
+#include "hwio.h"
+#include "wfx.h"
+#include "bus.h"
+#include "traces.h"
+
+/*
+ * Internal helpers.
+ *
+ * About CONFIG_VMAP_STACK:
+ * When CONFIG_VMAP_STACK is enabled, it is not possible to run DMA on stack
+ * allocated data. Functions below that work with registers (aka functions
+ * ending with "32") automatically reallocate buffers with kmalloc. However,
+ * functions that work with arbitrary length buffers let's caller to handle
+ * memory location. In doubt, enable CONFIG_DEBUG_SG to detect badly located
+ * buffer.
+ */
+
+static int read32(struct wfx_dev *wdev, int reg, u32 *val)
+{
+ int ret;
+ __le32 *tmp = kmalloc(sizeof(u32), GFP_KERNEL);
+
+ *val = ~0; // Never return undefined value
+ if (!tmp)
+ return -ENOMEM;
+ ret = wdev->hwbus_ops->copy_from_io(wdev->hwbus_priv, reg, tmp,
+ sizeof(u32));
+ if (ret >= 0)
+ *val = le32_to_cpu(*tmp);
+ kfree(tmp);
+ if (ret)
+ dev_err(wdev->dev, "%s: bus communication error: %d\n",
+ __func__, ret);
+ return ret;
+}
+
+static int write32(struct wfx_dev *wdev, int reg, u32 val)
+{
+ int ret;
+ __le32 *tmp = kmalloc(sizeof(u32), GFP_KERNEL);
+
+ if (!tmp)
+ return -ENOMEM;
+ *tmp = cpu_to_le32(val);
+ ret = wdev->hwbus_ops->copy_to_io(wdev->hwbus_priv, reg, tmp,
+ sizeof(u32));
+ kfree(tmp);
+ if (ret)
+ dev_err(wdev->dev, "%s: bus communication error: %d\n",
+ __func__, ret);
+ return ret;
+}
+
+static int read32_locked(struct wfx_dev *wdev, int reg, u32 *val)
+{
+ int ret;
+
+ wdev->hwbus_ops->lock(wdev->hwbus_priv);
+ ret = read32(wdev, reg, val);
+ _trace_io_read32(reg, *val);
+ wdev->hwbus_ops->unlock(wdev->hwbus_priv);
+ return ret;
+}
+
+static int write32_locked(struct wfx_dev *wdev, int reg, u32 val)
+{
+ int ret;
+
+ wdev->hwbus_ops->lock(wdev->hwbus_priv);
+ ret = write32(wdev, reg, val);
+ _trace_io_write32(reg, val);
+ wdev->hwbus_ops->unlock(wdev->hwbus_priv);
+ return ret;
+}
+
+static int write32_bits_locked(struct wfx_dev *wdev, int reg, u32 mask, u32 val)
+{
+ int ret;
+ u32 val_r, val_w;
+
+ WARN_ON(~mask & val);
+ val &= mask;
+ wdev->hwbus_ops->lock(wdev->hwbus_priv);
+ ret = read32(wdev, reg, &val_r);
+ _trace_io_read32(reg, val_r);
+ if (ret < 0)
+ goto err;
+ val_w = (val_r & ~mask) | val;
+ if (val_w != val_r) {
+ ret = write32(wdev, reg, val_w);
+ _trace_io_write32(reg, val_w);
+ }
+err:
+ wdev->hwbus_ops->unlock(wdev->hwbus_priv);
+ return ret;
+}
+
+static int indirect_read(struct wfx_dev *wdev, int reg, u32 addr, void *buf,
+ size_t len)
+{
+ int ret;
+ int i;
+ u32 cfg;
+ u32 prefetch;
+
+ WARN_ON(len >= 0x2000);
+ WARN_ON(reg != WFX_REG_AHB_DPORT && reg != WFX_REG_SRAM_DPORT);
+
+ if (reg == WFX_REG_AHB_DPORT)
+ prefetch = CFG_PREFETCH_AHB;
+ else if (reg == WFX_REG_SRAM_DPORT)
+ prefetch = CFG_PREFETCH_SRAM;
+ else
+ return -ENODEV;
+
+ ret = write32(wdev, WFX_REG_BASE_ADDR, addr);
+ if (ret < 0)
+ goto err;
+
+ ret = read32(wdev, WFX_REG_CONFIG, &cfg);
+ if (ret < 0)
+ goto err;
+
+ ret = write32(wdev, WFX_REG_CONFIG, cfg | prefetch);
+ if (ret < 0)
+ goto err;
+
+ for (i = 0; i < 20; i++) {
+ ret = read32(wdev, WFX_REG_CONFIG, &cfg);
+ if (ret < 0)
+ goto err;
+ if (!(cfg & prefetch))
+ break;
+ udelay(200);
+ }
+ if (i == 20) {
+ ret = -ETIMEDOUT;
+ goto err;
+ }
+
+ ret = wdev->hwbus_ops->copy_from_io(wdev->hwbus_priv, reg, buf, len);
+
+err:
+ if (ret < 0)
+ memset(buf, 0xFF, len); // Never return undefined value
+ return ret;
+}
+
+static int indirect_write(struct wfx_dev *wdev, int reg, u32 addr,
+ const void *buf, size_t len)
+{
+ int ret;
+
+ WARN_ON(len >= 0x2000);
+ WARN_ON(reg != WFX_REG_AHB_DPORT && reg != WFX_REG_SRAM_DPORT);
+ ret = write32(wdev, WFX_REG_BASE_ADDR, addr);
+ if (ret < 0)
+ return ret;
+
+ return wdev->hwbus_ops->copy_to_io(wdev->hwbus_priv, reg, buf, len);
+}
+
+static int indirect_read_locked(struct wfx_dev *wdev, int reg, u32 addr,
+ void *buf, size_t len)
+{
+ int ret;
+
+ wdev->hwbus_ops->lock(wdev->hwbus_priv);
+ ret = indirect_read(wdev, reg, addr, buf, len);
+ _trace_io_ind_read(reg, addr, buf, len);
+ wdev->hwbus_ops->unlock(wdev->hwbus_priv);
+ return ret;
+}
+
+static int indirect_write_locked(struct wfx_dev *wdev, int reg, u32 addr,
+ const void *buf, size_t len)
+{
+ int ret;
+
+ wdev->hwbus_ops->lock(wdev->hwbus_priv);
+ ret = indirect_write(wdev, reg, addr, buf, len);
+ _trace_io_ind_write(reg, addr, buf, len);
+ wdev->hwbus_ops->unlock(wdev->hwbus_priv);
+ return ret;
+}
+
+static int indirect_read32_locked(struct wfx_dev *wdev, int reg, u32 addr,
+ u32 *val)
+{
+ int ret;
+ __le32 *tmp = kmalloc(sizeof(u32), GFP_KERNEL);
+
+ if (!tmp)
+ return -ENOMEM;
+ wdev->hwbus_ops->lock(wdev->hwbus_priv);
+ ret = indirect_read(wdev, reg, addr, tmp, sizeof(u32));
+ *val = cpu_to_le32(*tmp);
+ _trace_io_ind_read32(reg, addr, *val);
+ wdev->hwbus_ops->unlock(wdev->hwbus_priv);
+ kfree(tmp);
+ return ret;
+}
+
+static int indirect_write32_locked(struct wfx_dev *wdev, int reg, u32 addr,
+ u32 val)
+{
+ int ret;
+ __le32 *tmp = kmalloc(sizeof(u32), GFP_KERNEL);
+
+ if (!tmp)
+ return -ENOMEM;
+ *tmp = cpu_to_le32(val);
+ wdev->hwbus_ops->lock(wdev->hwbus_priv);
+ ret = indirect_write(wdev, reg, addr, tmp, sizeof(u32));
+ _trace_io_ind_write32(reg, addr, val);
+ wdev->hwbus_ops->unlock(wdev->hwbus_priv);
+ kfree(tmp);
+ return ret;
+}
+
+int wfx_data_read(struct wfx_dev *wdev, void *buf, size_t len)
+{
+ int ret;
+
+ WARN((long) buf & 3, "%s: unaligned buffer", __func__);
+ wdev->hwbus_ops->lock(wdev->hwbus_priv);
+ ret = wdev->hwbus_ops->copy_from_io(wdev->hwbus_priv,
+ WFX_REG_IN_OUT_QUEUE, buf, len);
+ _trace_io_read(WFX_REG_IN_OUT_QUEUE, buf, len);
+ wdev->hwbus_ops->unlock(wdev->hwbus_priv);
+ if (ret)
+ dev_err(wdev->dev, "%s: bus communication error: %d\n",
+ __func__, ret);
+ return ret;
+}
+
+int wfx_data_write(struct wfx_dev *wdev, const void *buf, size_t len)
+{
+ int ret;
+
+ WARN((long) buf & 3, "%s: unaligned buffer", __func__);
+ wdev->hwbus_ops->lock(wdev->hwbus_priv);
+ ret = wdev->hwbus_ops->copy_to_io(wdev->hwbus_priv,
+ WFX_REG_IN_OUT_QUEUE, buf, len);
+ _trace_io_write(WFX_REG_IN_OUT_QUEUE, buf, len);
+ wdev->hwbus_ops->unlock(wdev->hwbus_priv);
+ if (ret)
+ dev_err(wdev->dev, "%s: bus communication error: %d\n",
+ __func__, ret);
+ return ret;
+}
+
+int sram_buf_read(struct wfx_dev *wdev, u32 addr, void *buf, size_t len)
+{
+ return indirect_read_locked(wdev, WFX_REG_SRAM_DPORT, addr, buf, len);
+}
+
+int ahb_buf_read(struct wfx_dev *wdev, u32 addr, void *buf, size_t len)
+{
+ return indirect_read_locked(wdev, WFX_REG_AHB_DPORT, addr, buf, len);
+}
+
+int sram_buf_write(struct wfx_dev *wdev, u32 addr, const void *buf, size_t len)
+{
+ return indirect_write_locked(wdev, WFX_REG_SRAM_DPORT, addr, buf, len);
+}
+
+int ahb_buf_write(struct wfx_dev *wdev, u32 addr, const void *buf, size_t len)
+{
+ return indirect_write_locked(wdev, WFX_REG_AHB_DPORT, addr, buf, len);
+}
+
+int sram_reg_read(struct wfx_dev *wdev, u32 addr, u32 *val)
+{
+ return indirect_read32_locked(wdev, WFX_REG_SRAM_DPORT, addr, val);
+}
+
+int ahb_reg_read(struct wfx_dev *wdev, u32 addr, u32 *val)
+{
+ return indirect_read32_locked(wdev, WFX_REG_AHB_DPORT, addr, val);
+}
+
+int sram_reg_write(struct wfx_dev *wdev, u32 addr, u32 val)
+{
+ return indirect_write32_locked(wdev, WFX_REG_SRAM_DPORT, addr, val);
+}
+
+int ahb_reg_write(struct wfx_dev *wdev, u32 addr, u32 val)
+{
+ return indirect_write32_locked(wdev, WFX_REG_AHB_DPORT, addr, val);
+}
+
+int config_reg_read(struct wfx_dev *wdev, u32 *val)
+{
+ return read32_locked(wdev, WFX_REG_CONFIG, val);
+}
+
+int config_reg_write(struct wfx_dev *wdev, u32 val)
+{
+ return write32_locked(wdev, WFX_REG_CONFIG, val);
+}
+
+int config_reg_write_bits(struct wfx_dev *wdev, u32 mask, u32 val)
+{
+ return write32_bits_locked(wdev, WFX_REG_CONFIG, mask, val);
+}
+
+int control_reg_read(struct wfx_dev *wdev, u32 *val)
+{
+ return read32_locked(wdev, WFX_REG_CONTROL, val);
+}
+
+int control_reg_write(struct wfx_dev *wdev, u32 val)
+{
+ return write32_locked(wdev, WFX_REG_CONTROL, val);
+}
+
+int control_reg_write_bits(struct wfx_dev *wdev, u32 mask, u32 val)
+{
+ return write32_bits_locked(wdev, WFX_REG_CONTROL, mask, val);
+}
+
+int igpr_reg_read(struct wfx_dev *wdev, int index, u32 *val)
+{
+ int ret;
+
+ *val = ~0; // Never return undefined value
+ ret = write32_locked(wdev, WFX_REG_SET_GEN_R_W, IGPR_RW | index << 24);
+ if (ret)
+ return ret;
+ ret = read32_locked(wdev, WFX_REG_SET_GEN_R_W, val);
+ if (ret)
+ return ret;
+ *val &= IGPR_VALUE;
+ return ret;
+}
+
+int igpr_reg_write(struct wfx_dev *wdev, int index, u32 val)
+{
+ return write32_locked(wdev, WFX_REG_SET_GEN_R_W, index << 24 | val);
+}
diff --git a/drivers/staging/wfx/hwio.h b/drivers/staging/wfx/hwio.h
new file mode 100644
index 000000000000..b2c1a66de963
--- /dev/null
+++ b/drivers/staging/wfx/hwio.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Low-level API.
+ *
+ * Copyright (c) 2017-2018, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ */
+#ifndef WFX_HWIO_H
+#define WFX_HWIO_H
+
+#include <linux/types.h>
+
+struct wfx_dev;
+
+int wfx_data_read(struct wfx_dev *wdev, void *buf, size_t buf_len);
+int wfx_data_write(struct wfx_dev *wdev, const void *buf, size_t buf_len);
+
+int sram_buf_read(struct wfx_dev *wdev, u32 addr, void *buf, size_t len);
+int sram_buf_write(struct wfx_dev *wdev, u32 addr, const void *buf, size_t len);
+
+int ahb_buf_read(struct wfx_dev *wdev, u32 addr, void *buf, size_t len);
+int ahb_buf_write(struct wfx_dev *wdev, u32 addr, const void *buf, size_t len);
+
+int sram_reg_read(struct wfx_dev *wdev, u32 addr, u32 *val);
+int sram_reg_write(struct wfx_dev *wdev, u32 addr, u32 val);
+
+int ahb_reg_read(struct wfx_dev *wdev, u32 addr, u32 *val);
+int ahb_reg_write(struct wfx_dev *wdev, u32 addr, u32 val);
+
+#define CFG_ERR_SPI_FRAME 0x00000001 // only with SPI
+#define CFG_ERR_SDIO_BUF_MISMATCH 0x00000001 // only with SDIO
+#define CFG_ERR_BUF_UNDERRUN 0x00000002
+#define CFG_ERR_DATA_IN_TOO_LARGE 0x00000004
+#define CFG_ERR_HOST_NO_OUT_QUEUE 0x00000008
+#define CFG_ERR_BUF_OVERRUN 0x00000010
+#define CFG_ERR_DATA_OUT_TOO_LARGE 0x00000020
+#define CFG_ERR_HOST_NO_IN_QUEUE 0x00000040
+#define CFG_ERR_HOST_CRC_MISS 0x00000080 // only with SDIO
+#define CFG_SPI_IGNORE_CS 0x00000080 // only with SPI
+/* Bytes ordering (only writable in SPI): */
+#define CFG_WORD_MODE_MASK 0x00000300
+/*
+ * B1,B0,B3,B2 (In SPI, register address and
+ * CONFIG data always use this mode)
+ */
+#define CFG_WORD_MODE0 0x00000000
+#define CFG_WORD_MODE1 0x00000100 // B3,B2,B1,B0
+#define CFG_WORD_MODE2 0x00000200 // B0,B1,B2,B3 (SDIO)
+#define CFG_DIRECT_ACCESS_MODE 0x00000400 // Direct or queue access mode
+#define CFG_PREFETCH_AHB 0x00000800
+#define CFG_DISABLE_CPU_CLK 0x00001000
+#define CFG_PREFETCH_SRAM 0x00002000
+#define CFG_CPU_RESET 0x00004000
+#define CFG_SDIO_DISABLE_IRQ 0x00008000 // only with SDIO
+#define CFG_IRQ_ENABLE_DATA 0x00010000
+#define CFG_IRQ_ENABLE_WRDY 0x00020000
+#define CFG_CLK_RISE_EDGE 0x00040000
+#define CFG_SDIO_DISABLE_CRC_CHK 0x00080000 // only with SDIO
+#define CFG_RESERVED 0x00F00000
+#define CFG_DEVICE_ID_MAJOR 0x07000000
+#define CFG_DEVICE_ID_RESERVED 0x78000000
+#define CFG_DEVICE_ID_TYPE 0x80000000
+int config_reg_read(struct wfx_dev *wdev, u32 *val);
+int config_reg_write(struct wfx_dev *wdev, u32 val);
+int config_reg_write_bits(struct wfx_dev *wdev, u32 mask, u32 val);
+
+#define CTRL_NEXT_LEN_MASK 0x00000FFF
+#define CTRL_WLAN_WAKEUP 0x00001000
+#define CTRL_WLAN_READY 0x00002000
+int control_reg_read(struct wfx_dev *wdev, u32 *val);
+int control_reg_write(struct wfx_dev *wdev, u32 val);
+int control_reg_write_bits(struct wfx_dev *wdev, u32 mask, u32 val);
+
+#define IGPR_RW 0x80000000
+#define IGPR_INDEX 0x7F000000
+#define IGPR_VALUE 0x00FFFFFF
+int igpr_reg_read(struct wfx_dev *wdev, int index, u32 *val);
+int igpr_reg_write(struct wfx_dev *wdev, int index, u32 val);
+
+#endif /* WFX_HWIO_H */
diff --git a/drivers/staging/wfx/key.c b/drivers/staging/wfx/key.c
new file mode 100644
index 000000000000..96adfa330604
--- /dev/null
+++ b/drivers/staging/wfx/key.c
@@ -0,0 +1,268 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Key management related functions.
+ *
+ * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ */
+#include <net/mac80211.h>
+
+#include "key.h"
+#include "wfx.h"
+#include "hif_tx_mib.h"
+
+static int wfx_alloc_key(struct wfx_dev *wdev)
+{
+ int idx;
+
+ idx = ffs(~wdev->key_map) - 1;
+ if (idx < 0 || idx >= MAX_KEY_ENTRIES)
+ return -1;
+
+ wdev->key_map |= BIT(idx);
+ wdev->keys[idx].entry_index = idx;
+ return idx;
+}
+
+static void wfx_free_key(struct wfx_dev *wdev, int idx)
+{
+ WARN(!(wdev->key_map & BIT(idx)), "inconsistent key allocation");
+ memset(&wdev->keys[idx], 0, sizeof(wdev->keys[idx]));
+ wdev->key_map &= ~BIT(idx);
+}
+
+static u8 fill_wep_pair(struct hif_wep_pairwise_key *msg,
+ struct ieee80211_key_conf *key, u8 *peer_addr)
+{
+ WARN(key->keylen > sizeof(msg->key_data), "inconsistent data");
+ msg->key_length = key->keylen;
+ memcpy(msg->key_data, key->key, key->keylen);
+ ether_addr_copy(msg->peer_address, peer_addr);
+ return HIF_KEY_TYPE_WEP_PAIRWISE;
+}
+
+static u8 fill_wep_group(struct hif_wep_group_key *msg,
+ struct ieee80211_key_conf *key)
+{
+ WARN(key->keylen > sizeof(msg->key_data), "inconsistent data");
+ msg->key_id = key->keyidx;
+ msg->key_length = key->keylen;
+ memcpy(msg->key_data, key->key, key->keylen);
+ return HIF_KEY_TYPE_WEP_DEFAULT;
+}
+
+static u8 fill_tkip_pair(struct hif_tkip_pairwise_key *msg,
+ struct ieee80211_key_conf *key, u8 *peer_addr)
+{
+ u8 *keybuf = key->key;
+
+ WARN(key->keylen != sizeof(msg->tkip_key_data)
+ + sizeof(msg->tx_mic_key)
+ + sizeof(msg->rx_mic_key), "inconsistent data");
+ memcpy(msg->tkip_key_data, keybuf, sizeof(msg->tkip_key_data));
+ keybuf += sizeof(msg->tkip_key_data);
+ memcpy(msg->tx_mic_key, keybuf, sizeof(msg->tx_mic_key));
+ keybuf += sizeof(msg->tx_mic_key);
+ memcpy(msg->rx_mic_key, keybuf, sizeof(msg->rx_mic_key));
+ ether_addr_copy(msg->peer_address, peer_addr);
+ return HIF_KEY_TYPE_TKIP_PAIRWISE;
+}
+
+static u8 fill_tkip_group(struct hif_tkip_group_key *msg,
+ struct ieee80211_key_conf *key,
+ struct ieee80211_key_seq *seq,
+ enum nl80211_iftype iftype)
+{
+ u8 *keybuf = key->key;
+
+ WARN(key->keylen != sizeof(msg->tkip_key_data)
+ + 2 * sizeof(msg->rx_mic_key), "inconsistent data");
+ msg->key_id = key->keyidx;
+ memcpy(msg->rx_sequence_counter,
+ &seq->tkip.iv16, sizeof(seq->tkip.iv16));
+ memcpy(msg->rx_sequence_counter + sizeof(u16),
+ &seq->tkip.iv32, sizeof(seq->tkip.iv32));
+ memcpy(msg->tkip_key_data, keybuf, sizeof(msg->tkip_key_data));
+ keybuf += sizeof(msg->tkip_key_data);
+ if (iftype == NL80211_IFTYPE_AP)
+ // Use Tx MIC Key
+ memcpy(msg->rx_mic_key, keybuf + 0, sizeof(msg->rx_mic_key));
+ else
+ // Use Rx MIC Key
+ memcpy(msg->rx_mic_key, keybuf + 8, sizeof(msg->rx_mic_key));
+ return HIF_KEY_TYPE_TKIP_GROUP;
+}
+
+static u8 fill_ccmp_pair(struct hif_aes_pairwise_key *msg,
+ struct ieee80211_key_conf *key, u8 *peer_addr)
+{
+ WARN(key->keylen != sizeof(msg->aes_key_data), "inconsistent data");
+ ether_addr_copy(msg->peer_address, peer_addr);
+ memcpy(msg->aes_key_data, key->key, key->keylen);
+ return HIF_KEY_TYPE_AES_PAIRWISE;
+}
+
+static u8 fill_ccmp_group(struct hif_aes_group_key *msg,
+ struct ieee80211_key_conf *key,
+ struct ieee80211_key_seq *seq)
+{
+ WARN(key->keylen != sizeof(msg->aes_key_data), "inconsistent data");
+ memcpy(msg->aes_key_data, key->key, key->keylen);
+ memcpy(msg->rx_sequence_counter, seq->ccmp.pn, sizeof(seq->ccmp.pn));
+ memreverse(msg->rx_sequence_counter, sizeof(seq->ccmp.pn));
+ msg->key_id = key->keyidx;
+ return HIF_KEY_TYPE_AES_GROUP;
+}
+
+static u8 fill_sms4_pair(struct hif_wapi_pairwise_key *msg,
+ struct ieee80211_key_conf *key, u8 *peer_addr)
+{
+ u8 *keybuf = key->key;
+
+ WARN(key->keylen != sizeof(msg->wapi_key_data)
+ + sizeof(msg->mic_key_data), "inconsistent data");
+ ether_addr_copy(msg->peer_address, peer_addr);
+ memcpy(msg->wapi_key_data, keybuf, sizeof(msg->wapi_key_data));
+ keybuf += sizeof(msg->wapi_key_data);
+ memcpy(msg->mic_key_data, keybuf, sizeof(msg->mic_key_data));
+ msg->key_id = key->keyidx;
+ return HIF_KEY_TYPE_WAPI_PAIRWISE;
+}
+
+static u8 fill_sms4_group(struct hif_wapi_group_key *msg,
+ struct ieee80211_key_conf *key)
+{
+ u8 *keybuf = key->key;
+
+ WARN(key->keylen != sizeof(msg->wapi_key_data)
+ + sizeof(msg->mic_key_data), "inconsistent data");
+ memcpy(msg->wapi_key_data, keybuf, sizeof(msg->wapi_key_data));
+ keybuf += sizeof(msg->wapi_key_data);
+ memcpy(msg->mic_key_data, keybuf, sizeof(msg->mic_key_data));
+ msg->key_id = key->keyidx;
+ return HIF_KEY_TYPE_WAPI_GROUP;
+}
+
+static u8 fill_aes_cmac_group(struct hif_igtk_group_key *msg,
+ struct ieee80211_key_conf *key,
+ struct ieee80211_key_seq *seq)
+{
+ WARN(key->keylen != sizeof(msg->igtk_key_data), "inconsistent data");
+ memcpy(msg->igtk_key_data, key->key, key->keylen);
+ memcpy(msg->ipn, seq->aes_cmac.pn, sizeof(seq->aes_cmac.pn));
+ memreverse(msg->ipn, sizeof(seq->aes_cmac.pn));
+ msg->key_id = key->keyidx;
+ return HIF_KEY_TYPE_IGTK_GROUP;
+}
+
+static int wfx_add_key(struct wfx_vif *wvif, struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ int ret;
+ struct hif_req_add_key *k;
+ struct ieee80211_key_seq seq;
+ struct wfx_dev *wdev = wvif->wdev;
+ int idx = wfx_alloc_key(wvif->wdev);
+ bool pairwise = key->flags & IEEE80211_KEY_FLAG_PAIRWISE;
+
+ WARN(key->flags & IEEE80211_KEY_FLAG_PAIRWISE && !sta, "inconsistent data");
+ ieee80211_get_key_rx_seq(key, 0, &seq);
+ if (idx < 0)
+ return -EINVAL;
+ k = &wdev->keys[idx];
+ k->int_id = wvif->id;
+ if (key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
+ key->cipher == WLAN_CIPHER_SUITE_WEP104) {
+ if (pairwise)
+ k->type = fill_wep_pair(&k->key.wep_pairwise_key, key,
+ sta->addr);
+ else
+ k->type = fill_wep_group(&k->key.wep_group_key, key);
+ } else if (key->cipher == WLAN_CIPHER_SUITE_TKIP) {
+ if (pairwise)
+ k->type = fill_tkip_pair(&k->key.tkip_pairwise_key, key,
+ sta->addr);
+ else
+ k->type = fill_tkip_group(&k->key.tkip_group_key, key,
+ &seq, wvif->vif->type);
+ } else if (key->cipher == WLAN_CIPHER_SUITE_CCMP) {
+ if (pairwise)
+ k->type = fill_ccmp_pair(&k->key.aes_pairwise_key, key,
+ sta->addr);
+ else
+ k->type = fill_ccmp_group(&k->key.aes_group_key, key,
+ &seq);
+ } else if (key->cipher == WLAN_CIPHER_SUITE_SMS4) {
+ if (pairwise)
+ k->type = fill_sms4_pair(&k->key.wapi_pairwise_key, key,
+ sta->addr);
+ else
+ k->type = fill_sms4_group(&k->key.wapi_group_key, key);
+ } else if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC) {
+ k->type = fill_aes_cmac_group(&k->key.igtk_group_key, key,
+ &seq);
+ } else {
+ dev_warn(wdev->dev, "unsupported key type %d\n", key->cipher);
+ wfx_free_key(wdev, idx);
+ return -EOPNOTSUPP;
+ }
+ ret = hif_add_key(wdev, k);
+ if (ret) {
+ wfx_free_key(wdev, idx);
+ return -EOPNOTSUPP;
+ }
+ key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE |
+ IEEE80211_KEY_FLAG_RESERVE_TAILROOM;
+ key->hw_key_idx = idx;
+ return 0;
+}
+
+static int wfx_remove_key(struct wfx_vif *wvif, struct ieee80211_key_conf *key)
+{
+ WARN(key->hw_key_idx >= MAX_KEY_ENTRIES, "corrupted hw_key_idx");
+ wfx_free_key(wvif->wdev, key->hw_key_idx);
+ return hif_remove_key(wvif->wdev, key->hw_key_idx);
+}
+
+int wfx_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ int ret = -EOPNOTSUPP;
+ struct wfx_vif *wvif = (struct wfx_vif *) vif->drv_priv;
+
+ mutex_lock(&wvif->wdev->conf_mutex);
+ if (cmd == SET_KEY)
+ ret = wfx_add_key(wvif, sta, key);
+ if (cmd == DISABLE_KEY)
+ ret = wfx_remove_key(wvif, key);
+ mutex_unlock(&wvif->wdev->conf_mutex);
+ return ret;
+}
+
+int wfx_upload_keys(struct wfx_vif *wvif)
+{
+ int i;
+ struct hif_req_add_key *key;
+ struct wfx_dev *wdev = wvif->wdev;
+
+ for (i = 0; i < ARRAY_SIZE(wdev->keys); i++) {
+ if (wdev->key_map & BIT(i)) {
+ key = &wdev->keys[i];
+ if (key->int_id == wvif->id)
+ hif_add_key(wdev, key);
+ }
+ }
+ return 0;
+}
+
+void wfx_wep_key_work(struct work_struct *work)
+{
+ struct wfx_vif *wvif = container_of(work, struct wfx_vif, wep_key_work);
+
+ wfx_tx_flush(wvif->wdev);
+ hif_wep_default_key_id(wvif, wvif->wep_default_key_id);
+ wfx_pending_requeue(wvif->wdev, wvif->wep_pending_skb);
+ wvif->wep_pending_skb = NULL;
+ wfx_tx_unlock(wvif->wdev);
+}
diff --git a/drivers/staging/wfx/key.h b/drivers/staging/wfx/key.h
new file mode 100644
index 000000000000..9436ccdf4d3b
--- /dev/null
+++ b/drivers/staging/wfx/key.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Implementation of mac80211 API.
+ *
+ * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ */
+#ifndef WFX_KEY_H
+#define WFX_KEY_H
+
+#include <net/mac80211.h>
+
+struct wfx_dev;
+struct wfx_vif;
+
+int wfx_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key);
+int wfx_upload_keys(struct wfx_vif *wvif);
+void wfx_wep_key_work(struct work_struct *work);
+
+#endif /* WFX_STA_H */
diff --git a/drivers/staging/wfx/main.c b/drivers/staging/wfx/main.c
new file mode 100644
index 000000000000..986a2ef678b9
--- /dev/null
+++ b/drivers/staging/wfx/main.c
@@ -0,0 +1,491 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Device probe and register.
+ *
+ * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ * Copyright (c) 2008, Johannes Berg <johannes@sipsolutions.net>
+ * Copyright (c) 2008 Nokia Corporation and/or its subsidiary(-ies).
+ * Copyright (c) 2007-2009, Christian Lamparter <chunkeey@web.de>
+ * Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net>
+ * Copyright (c) 2004-2006 Jean-Baptiste Note <jbnote@gmail.com>, et al.
+ */
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_net.h>
+#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
+#include <linux/mmc/sdio_func.h>
+#include <linux/spi/spi.h>
+#include <linux/etherdevice.h>
+#include <linux/firmware.h>
+
+#include "main.h"
+#include "wfx.h"
+#include "fwio.h"
+#include "hwio.h"
+#include "bus.h"
+#include "bh.h"
+#include "sta.h"
+#include "key.h"
+#include "debug.h"
+#include "data_tx.h"
+#include "secure_link.h"
+#include "hif_tx_mib.h"
+#include "hif_api_cmd.h"
+
+#define WFX_PDS_MAX_SIZE 1500
+
+MODULE_DESCRIPTION("Silicon Labs 802.11 Wireless LAN driver for WFx");
+MODULE_AUTHOR("Jérôme Pouiller <jerome.pouiller@silabs.com>");
+MODULE_LICENSE("GPL");
+
+static int gpio_wakeup = -2;
+module_param(gpio_wakeup, int, 0644);
+MODULE_PARM_DESC(gpio_wakeup, "gpio number for wakeup. -1 for none.");
+
+#define RATETAB_ENT(_rate, _rateid, _flags) { \
+ .bitrate = (_rate), \
+ .hw_value = (_rateid), \
+ .flags = (_flags), \
+}
+
+static struct ieee80211_rate wfx_rates[] = {
+ RATETAB_ENT(10, 0, 0),
+ RATETAB_ENT(20, 1, IEEE80211_RATE_SHORT_PREAMBLE),
+ RATETAB_ENT(55, 2, IEEE80211_RATE_SHORT_PREAMBLE),
+ RATETAB_ENT(110, 3, IEEE80211_RATE_SHORT_PREAMBLE),
+ RATETAB_ENT(60, 6, 0),
+ RATETAB_ENT(90, 7, 0),
+ RATETAB_ENT(120, 8, 0),
+ RATETAB_ENT(180, 9, 0),
+ RATETAB_ENT(240, 10, 0),
+ RATETAB_ENT(360, 11, 0),
+ RATETAB_ENT(480, 12, 0),
+ RATETAB_ENT(540, 13, 0),
+};
+
+#define CHAN2G(_channel, _freq, _flags) { \
+ .band = NL80211_BAND_2GHZ, \
+ .center_freq = (_freq), \
+ .hw_value = (_channel), \
+ .flags = (_flags), \
+ .max_antenna_gain = 0, \
+ .max_power = 30, \
+}
+
+static struct ieee80211_channel wfx_2ghz_chantable[] = {
+ CHAN2G(1, 2412, 0),
+ CHAN2G(2, 2417, 0),
+ CHAN2G(3, 2422, 0),
+ CHAN2G(4, 2427, 0),
+ CHAN2G(5, 2432, 0),
+ CHAN2G(6, 2437, 0),
+ CHAN2G(7, 2442, 0),
+ CHAN2G(8, 2447, 0),
+ CHAN2G(9, 2452, 0),
+ CHAN2G(10, 2457, 0),
+ CHAN2G(11, 2462, 0),
+ CHAN2G(12, 2467, 0),
+ CHAN2G(13, 2472, 0),
+ CHAN2G(14, 2484, 0),
+};
+
+static const struct ieee80211_supported_band wfx_band_2ghz = {
+ .channels = wfx_2ghz_chantable,
+ .n_channels = ARRAY_SIZE(wfx_2ghz_chantable),
+ .bitrates = wfx_rates,
+ .n_bitrates = ARRAY_SIZE(wfx_rates),
+ .ht_cap = {
+ // Receive caps
+ .cap = IEEE80211_HT_CAP_GRN_FLD | IEEE80211_HT_CAP_SGI_20 |
+ IEEE80211_HT_CAP_MAX_AMSDU |
+ (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT),
+ .ht_supported = 1,
+ .ampdu_factor = IEEE80211_HT_MAX_AMPDU_16K,
+ .ampdu_density = IEEE80211_HT_MPDU_DENSITY_NONE,
+ .mcs = {
+ .rx_mask = { 0xFF }, // MCS0 to MCS7
+ .rx_highest = 65,
+ .tx_params = IEEE80211_HT_MCS_TX_DEFINED,
+ },
+ },
+};
+
+static const struct ieee80211_iface_limit wdev_iface_limits[] = {
+ { .max = 1, .types = BIT(NL80211_IFTYPE_STATION) },
+ { .max = 1, .types = BIT(NL80211_IFTYPE_AP) },
+};
+
+static const struct ieee80211_iface_combination wfx_iface_combinations[] = {
+ {
+ .num_different_channels = 2,
+ .max_interfaces = 2,
+ .limits = wdev_iface_limits,
+ .n_limits = ARRAY_SIZE(wdev_iface_limits),
+ }
+};
+
+static const struct ieee80211_ops wfx_ops = {
+ .start = wfx_start,
+ .stop = wfx_stop,
+ .add_interface = wfx_add_interface,
+ .remove_interface = wfx_remove_interface,
+ .config = wfx_config,
+ .tx = wfx_tx,
+ .conf_tx = wfx_conf_tx,
+ .hw_scan = wfx_hw_scan,
+ .sta_add = wfx_sta_add,
+ .sta_remove = wfx_sta_remove,
+ .sta_notify = wfx_sta_notify,
+ .set_tim = wfx_set_tim,
+ .set_key = wfx_set_key,
+ .set_rts_threshold = wfx_set_rts_threshold,
+ .bss_info_changed = wfx_bss_info_changed,
+ .prepare_multicast = wfx_prepare_multicast,
+ .configure_filter = wfx_configure_filter,
+ .ampdu_action = wfx_ampdu_action,
+ .flush = wfx_flush,
+ .add_chanctx = wfx_add_chanctx,
+ .remove_chanctx = wfx_remove_chanctx,
+ .change_chanctx = wfx_change_chanctx,
+ .assign_vif_chanctx = wfx_assign_vif_chanctx,
+ .unassign_vif_chanctx = wfx_unassign_vif_chanctx,
+};
+
+bool wfx_api_older_than(struct wfx_dev *wdev, int major, int minor)
+{
+ if (wdev->hw_caps.api_version_major < major)
+ return true;
+ if (wdev->hw_caps.api_version_major > major)
+ return false;
+ if (wdev->hw_caps.api_version_minor < minor)
+ return true;
+ return false;
+}
+
+struct gpio_desc *wfx_get_gpio(struct device *dev, int override,
+ const char *label)
+{
+ struct gpio_desc *ret;
+ char label_buf[256];
+
+ if (override >= 0) {
+ snprintf(label_buf, sizeof(label_buf), "wfx_%s", label);
+ ret = ERR_PTR(devm_gpio_request_one(dev, override,
+ GPIOF_OUT_INIT_LOW,
+ label_buf));
+ if (!ret)
+ ret = gpio_to_desc(override);
+ } else if (override == -1) {
+ ret = NULL;
+ } else {
+ ret = devm_gpiod_get(dev, label, GPIOD_OUT_LOW);
+ }
+ if (IS_ERR(ret) || !ret) {
+ if (!ret || PTR_ERR(ret) == -ENOENT)
+ dev_warn(dev, "gpio %s is not defined\n", label);
+ else
+ dev_warn(dev,
+ "error while requesting gpio %s\n", label);
+ ret = NULL;
+ } else {
+ dev_dbg(dev,
+ "using gpio %d for %s\n", desc_to_gpio(ret), label);
+ }
+ return ret;
+}
+
+/* NOTE: wfx_send_pds() destroy buf */
+int wfx_send_pds(struct wfx_dev *wdev, unsigned char *buf, size_t len)
+{
+ int ret;
+ int start, brace_level, i;
+
+ start = 0;
+ brace_level = 0;
+ if (buf[0] != '{') {
+ dev_err(wdev->dev, "valid PDS start with '{'. Did you forget to compress it?\n");
+ return -EINVAL;
+ }
+ for (i = 1; i < len - 1; i++) {
+ if (buf[i] == '{')
+ brace_level++;
+ if (buf[i] == '}')
+ brace_level--;
+ if (buf[i] == '}' && !brace_level) {
+ i++;
+ if (i - start + 1 > WFX_PDS_MAX_SIZE)
+ return -EFBIG;
+ buf[start] = '{';
+ buf[i] = 0;
+ dev_dbg(wdev->dev, "send PDS '%s}'\n", buf + start);
+ buf[i] = '}';
+ ret = hif_configuration(wdev, buf + start,
+ i - start + 1);
+ if (ret == HIF_STATUS_FAILURE) {
+ dev_err(wdev->dev, "PDS bytes %d to %d: invalid data (unsupported options?)\n", start, i);
+ return -EINVAL;
+ }
+ if (ret == -ETIMEDOUT) {
+ dev_err(wdev->dev, "PDS bytes %d to %d: chip didn't reply (corrupted file?)\n", start, i);
+ return ret;
+ }
+ if (ret) {
+ dev_err(wdev->dev, "PDS bytes %d to %d: chip returned an unknown error\n", start, i);
+ return -EIO;
+ }
+ buf[i] = ',';
+ start = i;
+ }
+ }
+ return 0;
+}
+
+static int wfx_send_pdata_pds(struct wfx_dev *wdev)
+{
+ int ret = 0;
+ const struct firmware *pds;
+ unsigned char *tmp_buf;
+
+ ret = request_firmware(&pds, wdev->pdata.file_pds, wdev->dev);
+ if (ret) {
+ dev_err(wdev->dev, "can't load PDS file %s\n",
+ wdev->pdata.file_pds);
+ return ret;
+ }
+ tmp_buf = kmemdup(pds->data, pds->size, GFP_KERNEL);
+ ret = wfx_send_pds(wdev, tmp_buf, pds->size);
+ kfree(tmp_buf);
+ release_firmware(pds);
+ return ret;
+}
+
+struct wfx_dev *wfx_init_common(struct device *dev,
+ const struct wfx_platform_data *pdata,
+ const struct hwbus_ops *hwbus_ops,
+ void *hwbus_priv)
+{
+ struct ieee80211_hw *hw;
+ struct wfx_dev *wdev;
+
+ hw = ieee80211_alloc_hw(sizeof(struct wfx_dev), &wfx_ops);
+ if (!hw)
+ return NULL;
+
+ SET_IEEE80211_DEV(hw, dev);
+
+ ieee80211_hw_set(hw, NEED_DTIM_BEFORE_ASSOC);
+ ieee80211_hw_set(hw, TX_AMPDU_SETUP_IN_HW);
+ ieee80211_hw_set(hw, AMPDU_AGGREGATION);
+ ieee80211_hw_set(hw, CONNECTION_MONITOR);
+ ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
+ ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS);
+ ieee80211_hw_set(hw, SIGNAL_DBM);
+ ieee80211_hw_set(hw, SUPPORTS_PS);
+ ieee80211_hw_set(hw, MFP_CAPABLE);
+
+ hw->vif_data_size = sizeof(struct wfx_vif);
+ hw->sta_data_size = sizeof(struct wfx_sta_priv);
+ hw->queues = 4;
+ hw->max_rates = 8;
+ hw->max_rate_tries = 15;
+ hw->extra_tx_headroom = sizeof(struct hif_sl_msg_hdr) +
+ sizeof(struct hif_msg)
+ + sizeof(struct hif_req_tx)
+ + 4 /* alignment */ + 8 /* TKIP IV */;
+ hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_ADHOC) |
+ BIT(NL80211_IFTYPE_AP);
+ hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
+ hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
+ hw->wiphy->max_ap_assoc_sta = WFX_MAX_STA_IN_AP_MODE;
+ hw->wiphy->max_scan_ssids = 2;
+ hw->wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN;
+ hw->wiphy->n_iface_combinations = ARRAY_SIZE(wfx_iface_combinations);
+ hw->wiphy->iface_combinations = wfx_iface_combinations;
+ hw->wiphy->bands[NL80211_BAND_2GHZ] = devm_kmalloc(dev, sizeof(wfx_band_2ghz), GFP_KERNEL);
+ // FIXME: also copy wfx_rates and wfx_2ghz_chantable
+ memcpy(hw->wiphy->bands[NL80211_BAND_2GHZ], &wfx_band_2ghz,
+ sizeof(wfx_band_2ghz));
+
+ wdev = hw->priv;
+ wdev->hw = hw;
+ wdev->dev = dev;
+ wdev->hwbus_ops = hwbus_ops;
+ wdev->hwbus_priv = hwbus_priv;
+ memcpy(&wdev->pdata, pdata, sizeof(*pdata));
+ of_property_read_string(dev->of_node, "config-file",
+ &wdev->pdata.file_pds);
+ wdev->pdata.gpio_wakeup = wfx_get_gpio(dev, gpio_wakeup, "wakeup");
+ wfx_sl_fill_pdata(dev, &wdev->pdata);
+
+ mutex_init(&wdev->conf_mutex);
+ mutex_init(&wdev->rx_stats_lock);
+ init_completion(&wdev->firmware_ready);
+ wfx_init_hif_cmd(&wdev->hif_cmd);
+ wfx_tx_queues_init(wdev);
+
+ return wdev;
+}
+
+void wfx_free_common(struct wfx_dev *wdev)
+{
+ mutex_destroy(&wdev->rx_stats_lock);
+ mutex_destroy(&wdev->conf_mutex);
+ wfx_tx_queues_deinit(wdev);
+ ieee80211_free_hw(wdev->hw);
+}
+
+int wfx_probe(struct wfx_dev *wdev)
+{
+ int i;
+ int err;
+ const void *macaddr;
+ struct gpio_desc *gpio_saved;
+
+ // During first part of boot, gpio_wakeup cannot yet been used. So
+ // prevent bh() to touch it.
+ gpio_saved = wdev->pdata.gpio_wakeup;
+ wdev->pdata.gpio_wakeup = NULL;
+
+ wfx_bh_register(wdev);
+
+ err = wfx_init_device(wdev);
+ if (err)
+ goto err1;
+
+ err = wait_for_completion_interruptible_timeout(&wdev->firmware_ready,
+ 10 * HZ);
+ if (err <= 0) {
+ if (err == 0) {
+ dev_err(wdev->dev, "timeout while waiting for startup indication. IRQ configuration error?\n");
+ err = -ETIMEDOUT;
+ } else if (err == -ERESTARTSYS) {
+ dev_info(wdev->dev, "probe interrupted by user\n");
+ }
+ goto err1;
+ }
+
+ // FIXME: fill wiphy::hw_version
+ dev_info(wdev->dev, "started firmware %d.%d.%d \"%s\" (API: %d.%d, keyset: %02X, caps: 0x%.8X)\n",
+ wdev->hw_caps.firmware_major, wdev->hw_caps.firmware_minor,
+ wdev->hw_caps.firmware_build, wdev->hw_caps.firmware_label,
+ wdev->hw_caps.api_version_major,
+ wdev->hw_caps.api_version_minor,
+ wdev->keyset, *((u32 *) &wdev->hw_caps.capabilities));
+ snprintf(wdev->hw->wiphy->fw_version,
+ sizeof(wdev->hw->wiphy->fw_version),
+ "%d.%d.%d",
+ wdev->hw_caps.firmware_major,
+ wdev->hw_caps.firmware_minor,
+ wdev->hw_caps.firmware_build);
+
+ if (wfx_api_older_than(wdev, 1, 0)) {
+ dev_err(wdev->dev,
+ "unsupported firmware API version (expect 1 while firmware returns %d)\n",
+ wdev->hw_caps.api_version_major);
+ err = -ENOTSUPP;
+ goto err1;
+ }
+
+ err = wfx_sl_init(wdev);
+ if (err && wdev->hw_caps.capabilities.link_mode == SEC_LINK_ENFORCED) {
+ dev_err(wdev->dev,
+ "chip require secure_link, but can't negociate it\n");
+ goto err1;
+ }
+
+ if (wdev->hw_caps.regul_sel_mode_info.region_sel_mode) {
+ wdev->hw->wiphy->bands[NL80211_BAND_2GHZ]->channels[11].flags |= IEEE80211_CHAN_NO_IR;
+ wdev->hw->wiphy->bands[NL80211_BAND_2GHZ]->channels[12].flags |= IEEE80211_CHAN_NO_IR;
+ wdev->hw->wiphy->bands[NL80211_BAND_2GHZ]->channels[13].flags |= IEEE80211_CHAN_DISABLED;
+ }
+
+ dev_dbg(wdev->dev, "sending configuration file %s\n",
+ wdev->pdata.file_pds);
+ err = wfx_send_pdata_pds(wdev);
+ if (err < 0)
+ goto err1;
+
+ wdev->pdata.gpio_wakeup = gpio_saved;
+ if (wdev->pdata.gpio_wakeup) {
+ dev_dbg(wdev->dev,
+ "enable 'quiescent' power mode with gpio %d and PDS file %s\n",
+ desc_to_gpio(wdev->pdata.gpio_wakeup),
+ wdev->pdata.file_pds);
+ gpiod_set_value(wdev->pdata.gpio_wakeup, 1);
+ control_reg_write(wdev, 0);
+ hif_set_operational_mode(wdev, HIF_OP_POWER_MODE_QUIESCENT);
+ } else {
+ hif_set_operational_mode(wdev, HIF_OP_POWER_MODE_DOZE);
+ }
+
+ hif_use_multi_tx_conf(wdev, true);
+
+ for (i = 0; i < ARRAY_SIZE(wdev->addresses); i++) {
+ eth_zero_addr(wdev->addresses[i].addr);
+ macaddr = of_get_mac_address(wdev->dev->of_node);
+ if (!IS_ERR_OR_NULL(macaddr)) {
+ ether_addr_copy(wdev->addresses[i].addr, macaddr);
+ wdev->addresses[i].addr[ETH_ALEN - 1] += i;
+ } else {
+ ether_addr_copy(wdev->addresses[i].addr,
+ wdev->hw_caps.mac_addr[i]);
+ }
+ if (!is_valid_ether_addr(wdev->addresses[i].addr)) {
+ dev_warn(wdev->dev, "using random MAC address\n");
+ eth_random_addr(wdev->addresses[i].addr);
+ }
+ dev_info(wdev->dev, "MAC address %d: %pM\n", i,
+ wdev->addresses[i].addr);
+ }
+ wdev->hw->wiphy->n_addresses = ARRAY_SIZE(wdev->addresses);
+ wdev->hw->wiphy->addresses = wdev->addresses;
+
+ err = ieee80211_register_hw(wdev->hw);
+ if (err)
+ goto err1;
+
+ err = wfx_debug_init(wdev);
+ if (err)
+ goto err2;
+
+ return 0;
+
+err2:
+ ieee80211_unregister_hw(wdev->hw);
+ ieee80211_free_hw(wdev->hw);
+err1:
+ wfx_bh_unregister(wdev);
+ return err;
+}
+
+void wfx_release(struct wfx_dev *wdev)
+{
+ ieee80211_unregister_hw(wdev->hw);
+ hif_shutdown(wdev);
+ wfx_bh_unregister(wdev);
+ wfx_sl_deinit(wdev);
+}
+
+static int __init wfx_core_init(void)
+{
+ int ret = 0;
+
+ if (IS_ENABLED(CONFIG_SPI))
+ ret = spi_register_driver(&wfx_spi_driver);
+ if (IS_ENABLED(CONFIG_MMC) && !ret)
+ ret = sdio_register_driver(&wfx_sdio_driver);
+ return ret;
+}
+module_init(wfx_core_init);
+
+static void __exit wfx_core_exit(void)
+{
+ if (IS_ENABLED(CONFIG_MMC))
+ sdio_unregister_driver(&wfx_sdio_driver);
+ if (IS_ENABLED(CONFIG_SPI))
+ spi_unregister_driver(&wfx_spi_driver);
+}
+module_exit(wfx_core_exit);
diff --git a/drivers/staging/wfx/main.h b/drivers/staging/wfx/main.h
new file mode 100644
index 000000000000..875f8c227803
--- /dev/null
+++ b/drivers/staging/wfx/main.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Device probe and register.
+ *
+ * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ * Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net>
+ * Copyright 2004-2006 Jean-Baptiste Note <jbnote@gmail.com>, et al.
+ */
+#ifndef WFX_MAIN_H
+#define WFX_MAIN_H
+
+#include <linux/device.h>
+#include <linux/gpio/consumer.h>
+
+#include "bus.h"
+#include "hif_api_general.h"
+
+struct wfx_dev;
+
+struct wfx_platform_data {
+ /* Keyset and ".sec" extention will appended to this string */
+ const char *file_fw;
+ const char *file_pds;
+ struct gpio_desc *gpio_wakeup;
+ /*
+ * if true HIF D_out is sampled on the rising edge of the clock
+ * (intended to be used in 50Mhz SDIO)
+ */
+ bool use_rising_clk;
+};
+
+struct wfx_dev *wfx_init_common(struct device *dev,
+ const struct wfx_platform_data *pdata,
+ const struct hwbus_ops *hwbus_ops,
+ void *hwbus_priv);
+void wfx_free_common(struct wfx_dev *wdev);
+
+int wfx_probe(struct wfx_dev *wdev);
+void wfx_release(struct wfx_dev *wdev);
+
+struct gpio_desc *wfx_get_gpio(struct device *dev, int override,
+ const char *label);
+bool wfx_api_older_than(struct wfx_dev *wdev, int major, int minor);
+int wfx_send_pds(struct wfx_dev *wdev, unsigned char *buf, size_t len);
+
+#endif
diff --git a/drivers/staging/wfx/queue.c b/drivers/staging/wfx/queue.c
new file mode 100644
index 000000000000..c7ee90888f69
--- /dev/null
+++ b/drivers/staging/wfx/queue.c
@@ -0,0 +1,619 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * O(1) TX queue with built-in allocator.
+ *
+ * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ */
+#include <linux/sched.h>
+#include <net/mac80211.h>
+
+#include "queue.h"
+#include "wfx.h"
+#include "sta.h"
+#include "data_tx.h"
+
+void wfx_tx_lock(struct wfx_dev *wdev)
+{
+ atomic_inc(&wdev->tx_lock);
+}
+
+void wfx_tx_unlock(struct wfx_dev *wdev)
+{
+ int tx_lock = atomic_dec_return(&wdev->tx_lock);
+
+ WARN(tx_lock < 0, "inconsistent tx_lock value");
+ if (!tx_lock)
+ wfx_bh_request_tx(wdev);
+}
+
+void wfx_tx_flush(struct wfx_dev *wdev)
+{
+ int ret;
+
+ WARN(!atomic_read(&wdev->tx_lock), "tx_lock is not locked");
+
+ // Do not wait for any reply if chip is frozen
+ if (wdev->chip_frozen)
+ return;
+
+ mutex_lock(&wdev->hif_cmd.lock);
+ ret = wait_event_timeout(wdev->hif.tx_buffers_empty,
+ !wdev->hif.tx_buffers_used,
+ msecs_to_jiffies(3000));
+ if (!ret) {
+ dev_warn(wdev->dev, "cannot flush tx buffers (%d still busy)\n",
+ wdev->hif.tx_buffers_used);
+ wfx_pending_dump_old_frames(wdev, 3000);
+ // FIXME: drop pending frames here
+ wdev->chip_frozen = 1;
+ }
+ mutex_unlock(&wdev->hif_cmd.lock);
+}
+
+void wfx_tx_lock_flush(struct wfx_dev *wdev)
+{
+ wfx_tx_lock(wdev);
+ wfx_tx_flush(wdev);
+}
+
+void wfx_tx_queues_lock(struct wfx_dev *wdev)
+{
+ int i;
+ struct wfx_queue *queue;
+
+ for (i = 0; i < IEEE80211_NUM_ACS; ++i) {
+ queue = &wdev->tx_queue[i];
+ spin_lock_bh(&queue->queue.lock);
+ if (queue->tx_locked_cnt++ == 0)
+ ieee80211_stop_queue(wdev->hw, queue->queue_id);
+ spin_unlock_bh(&queue->queue.lock);
+ }
+}
+
+void wfx_tx_queues_unlock(struct wfx_dev *wdev)
+{
+ int i;
+ struct wfx_queue *queue;
+
+ for (i = 0; i < IEEE80211_NUM_ACS; ++i) {
+ queue = &wdev->tx_queue[i];
+ spin_lock_bh(&queue->queue.lock);
+ WARN(!queue->tx_locked_cnt, "queue already unlocked");
+ if (--queue->tx_locked_cnt == 0)
+ ieee80211_wake_queue(wdev->hw, queue->queue_id);
+ spin_unlock_bh(&queue->queue.lock);
+ }
+}
+
+/* If successful, LOCKS the TX queue! */
+void wfx_tx_queues_wait_empty_vif(struct wfx_vif *wvif)
+{
+ int i;
+ bool done;
+ struct wfx_queue *queue;
+ struct sk_buff *item;
+ struct wfx_dev *wdev = wvif->wdev;
+ struct hif_msg *hif;
+
+ if (wvif->wdev->chip_frozen) {
+ wfx_tx_lock_flush(wdev);
+ wfx_tx_queues_clear(wdev);
+ return;
+ }
+
+ do {
+ done = true;
+ wfx_tx_lock_flush(wdev);
+ for (i = 0; i < IEEE80211_NUM_ACS && done; ++i) {
+ queue = &wdev->tx_queue[i];
+ spin_lock_bh(&queue->queue.lock);
+ skb_queue_walk(&queue->queue, item) {
+ hif = (struct hif_msg *) item->data;
+ if (hif->interface == wvif->id)
+ done = false;
+ }
+ spin_unlock_bh(&queue->queue.lock);
+ }
+ if (!done) {
+ wfx_tx_unlock(wdev);
+ msleep(20);
+ }
+ } while (!done);
+}
+
+static void wfx_tx_queue_clear(struct wfx_dev *wdev, struct wfx_queue *queue,
+ struct sk_buff_head *gc_list)
+{
+ int i;
+ struct sk_buff *item;
+ struct wfx_queue_stats *stats = &wdev->tx_queue_stats;
+
+ spin_lock_bh(&queue->queue.lock);
+ while ((item = __skb_dequeue(&queue->queue)) != NULL)
+ skb_queue_head(gc_list, item);
+ spin_lock_bh(&stats->pending.lock);
+ for (i = 0; i < ARRAY_SIZE(stats->link_map_cache); ++i) {
+ stats->link_map_cache[i] -= queue->link_map_cache[i];
+ queue->link_map_cache[i] = 0;
+ }
+ spin_unlock_bh(&stats->pending.lock);
+ spin_unlock_bh(&queue->queue.lock);
+}
+
+void wfx_tx_queues_clear(struct wfx_dev *wdev)
+{
+ int i;
+ struct sk_buff *item;
+ struct sk_buff_head gc_list;
+ struct wfx_queue_stats *stats = &wdev->tx_queue_stats;
+
+ skb_queue_head_init(&gc_list);
+ for (i = 0; i < IEEE80211_NUM_ACS; ++i)
+ wfx_tx_queue_clear(wdev, &wdev->tx_queue[i], &gc_list);
+ wake_up(&stats->wait_link_id_empty);
+ while ((item = skb_dequeue(&gc_list)) != NULL)
+ wfx_skb_dtor(wdev, item);
+}
+
+void wfx_tx_queues_init(struct wfx_dev *wdev)
+{
+ int i;
+
+ memset(&wdev->tx_queue_stats, 0, sizeof(wdev->tx_queue_stats));
+ memset(wdev->tx_queue, 0, sizeof(wdev->tx_queue));
+ skb_queue_head_init(&wdev->tx_queue_stats.pending);
+ init_waitqueue_head(&wdev->tx_queue_stats.wait_link_id_empty);
+
+ for (i = 0; i < IEEE80211_NUM_ACS; ++i) {
+ wdev->tx_queue[i].queue_id = i;
+ skb_queue_head_init(&wdev->tx_queue[i].queue);
+ }
+}
+
+void wfx_tx_queues_deinit(struct wfx_dev *wdev)
+{
+ WARN_ON(!skb_queue_empty(&wdev->tx_queue_stats.pending));
+ wfx_tx_queues_clear(wdev);
+}
+
+size_t wfx_tx_queue_get_num_queued(struct wfx_queue *queue,
+ u32 link_id_map)
+{
+ size_t ret;
+ int i, bit;
+
+ if (!link_id_map)
+ return 0;
+
+ spin_lock_bh(&queue->queue.lock);
+ if (link_id_map == (u32)-1) {
+ ret = skb_queue_len(&queue->queue);
+ } else {
+ ret = 0;
+ for (i = 0, bit = 1; i < ARRAY_SIZE(queue->link_map_cache);
+ ++i, bit <<= 1) {
+ if (link_id_map & bit)
+ ret += queue->link_map_cache[i];
+ }
+ }
+ spin_unlock_bh(&queue->queue.lock);
+ return ret;
+}
+
+void wfx_tx_queue_put(struct wfx_dev *wdev, struct wfx_queue *queue,
+ struct sk_buff *skb)
+{
+ struct wfx_queue_stats *stats = &wdev->tx_queue_stats;
+ struct wfx_tx_priv *tx_priv = wfx_skb_tx_priv(skb);
+
+ WARN(tx_priv->link_id >= ARRAY_SIZE(stats->link_map_cache), "invalid link-id value");
+ spin_lock_bh(&queue->queue.lock);
+ __skb_queue_tail(&queue->queue, skb);
+
+ ++queue->link_map_cache[tx_priv->link_id];
+
+ spin_lock_bh(&stats->pending.lock);
+ ++stats->link_map_cache[tx_priv->link_id];
+ spin_unlock_bh(&stats->pending.lock);
+ spin_unlock_bh(&queue->queue.lock);
+}
+
+static struct sk_buff *wfx_tx_queue_get(struct wfx_dev *wdev,
+ struct wfx_queue *queue,
+ u32 link_id_map)
+{
+ struct sk_buff *skb = NULL;
+ struct sk_buff *item;
+ struct wfx_queue_stats *stats = &wdev->tx_queue_stats;
+ struct wfx_tx_priv *tx_priv;
+ bool wakeup_stats = false;
+
+ spin_lock_bh(&queue->queue.lock);
+ skb_queue_walk(&queue->queue, item) {
+ tx_priv = wfx_skb_tx_priv(item);
+ if (link_id_map & BIT(tx_priv->link_id)) {
+ skb = item;
+ break;
+ }
+ }
+ WARN_ON(!skb);
+ if (skb) {
+ tx_priv = wfx_skb_tx_priv(skb);
+ tx_priv->xmit_timestamp = ktime_get();
+ __skb_unlink(skb, &queue->queue);
+ --queue->link_map_cache[tx_priv->link_id];
+
+ spin_lock_bh(&stats->pending.lock);
+ __skb_queue_tail(&stats->pending, skb);
+ if (!--stats->link_map_cache[tx_priv->link_id])
+ wakeup_stats = true;
+ spin_unlock_bh(&stats->pending.lock);
+ }
+ spin_unlock_bh(&queue->queue.lock);
+ if (wakeup_stats)
+ wake_up(&stats->wait_link_id_empty);
+ return skb;
+}
+
+int wfx_pending_requeue(struct wfx_dev *wdev, struct sk_buff *skb)
+{
+ struct wfx_queue_stats *stats = &wdev->tx_queue_stats;
+ struct wfx_tx_priv *tx_priv = wfx_skb_tx_priv(skb);
+ struct wfx_queue *queue = &wdev->tx_queue[skb_get_queue_mapping(skb)];
+
+ WARN_ON(skb_get_queue_mapping(skb) > 3);
+ spin_lock_bh(&queue->queue.lock);
+ ++queue->link_map_cache[tx_priv->link_id];
+
+ spin_lock_bh(&stats->pending.lock);
+ ++stats->link_map_cache[tx_priv->link_id];
+ __skb_unlink(skb, &stats->pending);
+ spin_unlock_bh(&stats->pending.lock);
+ __skb_queue_tail(&queue->queue, skb);
+ spin_unlock_bh(&queue->queue.lock);
+ return 0;
+}
+
+int wfx_pending_remove(struct wfx_dev *wdev, struct sk_buff *skb)
+{
+ struct wfx_queue_stats *stats = &wdev->tx_queue_stats;
+
+ spin_lock_bh(&stats->pending.lock);
+ __skb_unlink(skb, &stats->pending);
+ spin_unlock_bh(&stats->pending.lock);
+ wfx_skb_dtor(wdev, skb);
+
+ return 0;
+}
+
+struct sk_buff *wfx_pending_get(struct wfx_dev *wdev, u32 packet_id)
+{
+ struct sk_buff *skb;
+ struct hif_req_tx *req;
+ struct wfx_queue_stats *stats = &wdev->tx_queue_stats;
+
+ spin_lock_bh(&stats->pending.lock);
+ skb_queue_walk(&stats->pending, skb) {
+ req = wfx_skb_txreq(skb);
+ if (req->packet_id == packet_id) {
+ spin_unlock_bh(&stats->pending.lock);
+ return skb;
+ }
+ }
+ spin_unlock_bh(&stats->pending.lock);
+ WARN(1, "cannot find packet in pending queue");
+ return NULL;
+}
+
+void wfx_pending_dump_old_frames(struct wfx_dev *wdev, unsigned int limit_ms)
+{
+ struct wfx_queue_stats *stats = &wdev->tx_queue_stats;
+ ktime_t now = ktime_get();
+ struct wfx_tx_priv *tx_priv;
+ struct hif_req_tx *req;
+ struct sk_buff *skb;
+ bool first = true;
+
+ spin_lock_bh(&stats->pending.lock);
+ skb_queue_walk(&stats->pending, skb) {
+ tx_priv = wfx_skb_tx_priv(skb);
+ req = wfx_skb_txreq(skb);
+ if (ktime_after(now, ktime_add_ms(tx_priv->xmit_timestamp,
+ limit_ms))) {
+ if (first) {
+ dev_info(wdev->dev, "frames stuck in firmware since %dms or more:\n",
+ limit_ms);
+ first = false;
+ }
+ dev_info(wdev->dev, " id %08x sent %lldms ago\n",
+ req->packet_id,
+ ktime_ms_delta(now, tx_priv->xmit_timestamp));
+ }
+ }
+ spin_unlock_bh(&stats->pending.lock);
+}
+
+unsigned int wfx_pending_get_pkt_us_delay(struct wfx_dev *wdev,
+ struct sk_buff *skb)
+{
+ ktime_t now = ktime_get();
+ struct wfx_tx_priv *tx_priv = wfx_skb_tx_priv(skb);
+
+ return ktime_us_delta(now, tx_priv->xmit_timestamp);
+}
+
+bool wfx_tx_queues_is_empty(struct wfx_dev *wdev)
+{
+ int i;
+ struct sk_buff_head *queue;
+ bool ret = true;
+
+ for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+ queue = &wdev->tx_queue[i].queue;
+ spin_lock_bh(&queue->lock);
+ if (!skb_queue_empty(queue))
+ ret = false;
+ spin_unlock_bh(&queue->lock);
+ }
+ return ret;
+}
+
+static bool hif_handle_tx_data(struct wfx_vif *wvif, struct sk_buff *skb,
+ struct wfx_queue *queue)
+{
+ bool handled = false;
+ struct wfx_tx_priv *tx_priv = wfx_skb_tx_priv(skb);
+ struct hif_req_tx *req = wfx_skb_txreq(skb);
+ struct ieee80211_hdr *frame = (struct ieee80211_hdr *) (req->frame + req->data_flags.fc_offset);
+
+ enum {
+ do_probe,
+ do_drop,
+ do_wep,
+ do_tx,
+ } action = do_tx;
+
+ switch (wvif->vif->type) {
+ case NL80211_IFTYPE_STATION:
+ if (wvif->state < WFX_STATE_PRE_STA)
+ action = do_drop;
+ break;
+ case NL80211_IFTYPE_AP:
+ if (!wvif->state) {
+ action = do_drop;
+ } else if (!(BIT(tx_priv->raw_link_id) &
+ (BIT(0) | wvif->link_id_map))) {
+ dev_warn(wvif->wdev->dev, "a frame with expired link-id is dropped\n");
+ action = do_drop;
+ }
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ if (wvif->state != WFX_STATE_IBSS)
+ action = do_drop;
+ break;
+ case NL80211_IFTYPE_MONITOR:
+ default:
+ action = do_drop;
+ break;
+ }
+
+ if (action == do_tx) {
+ if (ieee80211_is_nullfunc(frame->frame_control)) {
+ mutex_lock(&wvif->bss_loss_lock);
+ if (wvif->bss_loss_state) {
+ wvif->bss_loss_confirm_id = req->packet_id;
+ req->queue_id.queue_id = HIF_QUEUE_ID_VOICE;
+ }
+ mutex_unlock(&wvif->bss_loss_lock);
+ } else if (ieee80211_has_protected(frame->frame_control) &&
+ tx_priv->hw_key &&
+ tx_priv->hw_key->keyidx != wvif->wep_default_key_id &&
+ (tx_priv->hw_key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
+ tx_priv->hw_key->cipher == WLAN_CIPHER_SUITE_WEP104)) {
+ action = do_wep;
+ }
+ }
+
+ switch (action) {
+ case do_drop:
+ wfx_pending_remove(wvif->wdev, skb);
+ handled = true;
+ break;
+ case do_wep:
+ wfx_tx_lock(wvif->wdev);
+ wvif->wep_default_key_id = tx_priv->hw_key->keyidx;
+ wvif->wep_pending_skb = skb;
+ if (!schedule_work(&wvif->wep_key_work))
+ wfx_tx_unlock(wvif->wdev);
+ handled = true;
+ break;
+ case do_tx:
+ break;
+ default:
+ /* Do nothing */
+ break;
+ }
+ return handled;
+}
+
+static int wfx_get_prio_queue(struct wfx_vif *wvif,
+ u32 tx_allowed_mask, int *total)
+{
+ static const int urgent = BIT(WFX_LINK_ID_AFTER_DTIM) |
+ BIT(WFX_LINK_ID_UAPSD);
+ struct hif_req_edca_queue_params *edca;
+ unsigned int score, best = -1;
+ int winner = -1;
+ int i;
+
+ /* search for a winner using edca params */
+ for (i = 0; i < IEEE80211_NUM_ACS; ++i) {
+ int queued;
+
+ edca = &wvif->edca.params[i];
+ queued = wfx_tx_queue_get_num_queued(&wvif->wdev->tx_queue[i],
+ tx_allowed_mask);
+ if (!queued)
+ continue;
+ *total += queued;
+ score = ((edca->aifsn + edca->cw_min) << 16) +
+ ((edca->cw_max - edca->cw_min) *
+ (get_random_int() & 0xFFFF));
+ if (score < best && (winner < 0 || i != 3)) {
+ best = score;
+ winner = i;
+ }
+ }
+
+ /* override winner if bursting */
+ if (winner >= 0 && wvif->wdev->tx_burst_idx >= 0 &&
+ winner != wvif->wdev->tx_burst_idx &&
+ !wfx_tx_queue_get_num_queued(&wvif->wdev->tx_queue[winner],
+ tx_allowed_mask & urgent) &&
+ wfx_tx_queue_get_num_queued(&wvif->wdev->tx_queue[wvif->wdev->tx_burst_idx], tx_allowed_mask))
+ winner = wvif->wdev->tx_burst_idx;
+
+ return winner;
+}
+
+static int wfx_tx_queue_mask_get(struct wfx_vif *wvif,
+ struct wfx_queue **queue_p,
+ u32 *tx_allowed_mask_p,
+ bool *more)
+{
+ int idx;
+ u32 tx_allowed_mask;
+ int total = 0;
+
+ /* Search for a queue with multicast frames buffered */
+ if (wvif->mcast_tx) {
+ tx_allowed_mask = BIT(WFX_LINK_ID_AFTER_DTIM);
+ idx = wfx_get_prio_queue(wvif, tx_allowed_mask, &total);
+ if (idx >= 0) {
+ *more = total > 1;
+ goto found;
+ }
+ }
+
+ /* Search for unicast traffic */
+ tx_allowed_mask = ~wvif->sta_asleep_mask;
+ tx_allowed_mask |= BIT(WFX_LINK_ID_UAPSD);
+ if (wvif->sta_asleep_mask) {
+ tx_allowed_mask |= wvif->pspoll_mask;
+ tx_allowed_mask &= ~BIT(WFX_LINK_ID_AFTER_DTIM);
+ } else {
+ tx_allowed_mask |= BIT(WFX_LINK_ID_AFTER_DTIM);
+ }
+ idx = wfx_get_prio_queue(wvif, tx_allowed_mask, &total);
+ if (idx < 0)
+ return -ENOENT;
+
+found:
+ *queue_p = &wvif->wdev->tx_queue[idx];
+ *tx_allowed_mask_p = tx_allowed_mask;
+ return 0;
+}
+
+struct hif_msg *wfx_tx_queues_get(struct wfx_dev *wdev)
+{
+ struct sk_buff *skb;
+ struct hif_msg *hif = NULL;
+ struct hif_req_tx *req = NULL;
+ struct wfx_queue *queue = NULL;
+ struct wfx_queue *vif_queue = NULL;
+ u32 tx_allowed_mask = 0;
+ u32 vif_tx_allowed_mask = 0;
+ const struct wfx_tx_priv *tx_priv = NULL;
+ struct wfx_vif *wvif;
+ /* More is used only for broadcasts. */
+ bool more = false;
+ bool vif_more = false;
+ int not_found;
+ int burst;
+
+ for (;;) {
+ int ret = -ENOENT;
+ int queue_num;
+ struct ieee80211_hdr *hdr;
+
+ if (atomic_read(&wdev->tx_lock))
+ return NULL;
+
+ wvif = NULL;
+ while ((wvif = wvif_iterate(wdev, wvif)) != NULL) {
+ spin_lock_bh(&wvif->ps_state_lock);
+
+ not_found = wfx_tx_queue_mask_get(wvif, &vif_queue,
+ &vif_tx_allowed_mask,
+ &vif_more);
+
+ if (wvif->mcast_buffered && (not_found || !vif_more) &&
+ (wvif->mcast_tx ||
+ !wvif->sta_asleep_mask)) {
+ wvif->mcast_buffered = false;
+ if (wvif->mcast_tx) {
+ wvif->mcast_tx = false;
+ schedule_work(&wvif->mcast_stop_work);
+ }
+ }
+
+ spin_unlock_bh(&wvif->ps_state_lock);
+
+ if (vif_more) {
+ more = true;
+ tx_allowed_mask = vif_tx_allowed_mask;
+ queue = vif_queue;
+ ret = 0;
+ break;
+ } else if (!not_found) {
+ if (queue && queue != vif_queue)
+ dev_info(wdev->dev, "vifs disagree about queue priority\n");
+ tx_allowed_mask |= vif_tx_allowed_mask;
+ queue = vif_queue;
+ ret = 0;
+ }
+ }
+
+ if (ret)
+ return NULL;
+
+ queue_num = queue - wdev->tx_queue;
+
+ skb = wfx_tx_queue_get(wdev, queue, tx_allowed_mask);
+ if (!skb)
+ continue;
+ tx_priv = wfx_skb_tx_priv(skb);
+ hif = (struct hif_msg *) skb->data;
+ wvif = wdev_to_wvif(wdev, hif->interface);
+ WARN_ON(!wvif);
+
+ if (hif_handle_tx_data(wvif, skb, queue))
+ continue; /* Handled by WSM */
+
+ wvif->pspoll_mask &= ~BIT(tx_priv->raw_link_id);
+
+ /* allow bursting if txop is set */
+ if (wvif->edca.params[queue_num].tx_op_limit)
+ burst = (int)wfx_tx_queue_get_num_queued(queue, tx_allowed_mask) + 1;
+ else
+ burst = 1;
+
+ /* store index of bursting queue */
+ if (burst > 1)
+ wdev->tx_burst_idx = queue_num;
+ else
+ wdev->tx_burst_idx = -1;
+
+ /* more buffered multicast/broadcast frames
+ * ==> set MoreData flag in IEEE 802.11 header
+ * to inform PS STAs
+ */
+ if (more) {
+ req = (struct hif_req_tx *) hif->body;
+ hdr = (struct ieee80211_hdr *) (req->frame + req->data_flags.fc_offset);
+ hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
+ }
+ return hif;
+ }
+}
diff --git a/drivers/staging/wfx/queue.h b/drivers/staging/wfx/queue.h
new file mode 100644
index 000000000000..21566e48b2c2
--- /dev/null
+++ b/drivers/staging/wfx/queue.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * O(1) TX queue with built-in allocator.
+ *
+ * Copyright (c) 2017-2018, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ */
+#ifndef WFX_QUEUE_H
+#define WFX_QUEUE_H
+
+#include <linux/skbuff.h>
+
+#include "hif_api_cmd.h"
+
+#define WFX_MAX_STA_IN_AP_MODE 14
+#define WFX_LINK_ID_AFTER_DTIM (WFX_MAX_STA_IN_AP_MODE + 1)
+#define WFX_LINK_ID_UAPSD (WFX_MAX_STA_IN_AP_MODE + 2)
+#define WFX_LINK_ID_MAX (WFX_MAX_STA_IN_AP_MODE + 3)
+
+struct wfx_dev;
+struct wfx_vif;
+
+struct wfx_queue {
+ struct sk_buff_head queue;
+ int tx_locked_cnt;
+ int link_map_cache[WFX_LINK_ID_MAX];
+ u8 queue_id;
+};
+
+struct wfx_queue_stats {
+ int link_map_cache[WFX_LINK_ID_MAX];
+ struct sk_buff_head pending;
+ wait_queue_head_t wait_link_id_empty;
+};
+
+void wfx_tx_lock(struct wfx_dev *wdev);
+void wfx_tx_unlock(struct wfx_dev *wdev);
+void wfx_tx_flush(struct wfx_dev *wdev);
+void wfx_tx_lock_flush(struct wfx_dev *wdev);
+
+void wfx_tx_queues_init(struct wfx_dev *wdev);
+void wfx_tx_queues_deinit(struct wfx_dev *wdev);
+void wfx_tx_queues_lock(struct wfx_dev *wdev);
+void wfx_tx_queues_unlock(struct wfx_dev *wdev);
+void wfx_tx_queues_clear(struct wfx_dev *wdev);
+bool wfx_tx_queues_is_empty(struct wfx_dev *wdev);
+void wfx_tx_queues_wait_empty_vif(struct wfx_vif *wvif);
+struct hif_msg *wfx_tx_queues_get(struct wfx_dev *wdev);
+
+void wfx_tx_queue_put(struct wfx_dev *wdev, struct wfx_queue *queue,
+ struct sk_buff *skb);
+size_t wfx_tx_queue_get_num_queued(struct wfx_queue *queue, u32 link_id_map);
+
+struct sk_buff *wfx_pending_get(struct wfx_dev *wdev, u32 packet_id);
+int wfx_pending_remove(struct wfx_dev *wdev, struct sk_buff *skb);
+int wfx_pending_requeue(struct wfx_dev *wdev, struct sk_buff *skb);
+unsigned int wfx_pending_get_pkt_us_delay(struct wfx_dev *wdev,
+ struct sk_buff *skb);
+void wfx_pending_dump_old_frames(struct wfx_dev *wdev, unsigned int limit_ms);
+
+#endif /* WFX_QUEUE_H */
diff --git a/drivers/staging/wfx/scan.c b/drivers/staging/wfx/scan.c
new file mode 100644
index 000000000000..35fcf9119f96
--- /dev/null
+++ b/drivers/staging/wfx/scan.c
@@ -0,0 +1,294 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Scan related functions.
+ *
+ * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ */
+#include <net/mac80211.h>
+
+#include "scan.h"
+#include "wfx.h"
+#include "sta.h"
+#include "hif_tx_mib.h"
+
+static void __ieee80211_scan_completed_compat(struct ieee80211_hw *hw,
+ bool aborted)
+{
+ struct cfg80211_scan_info info = {
+ .aborted = aborted ? 1 : 0,
+ };
+
+ ieee80211_scan_completed(hw, &info);
+}
+
+static void wfx_scan_restart_delayed(struct wfx_vif *wvif)
+{
+ if (wvif->delayed_unjoin) {
+ wvif->delayed_unjoin = false;
+ if (!schedule_work(&wvif->unjoin_work))
+ wfx_tx_unlock(wvif->wdev);
+ } else if (wvif->delayed_link_loss) {
+ wvif->delayed_link_loss = 0;
+ wfx_cqm_bssloss_sm(wvif, 1, 0, 0);
+ }
+}
+
+static int wfx_scan_start(struct wfx_vif *wvif, struct wfx_scan_params *scan)
+{
+ int ret;
+ int tmo = 500;
+
+ if (wvif->state == WFX_STATE_PRE_STA)
+ return -EBUSY;
+
+ tmo += scan->scan_req.num_of_channels *
+ ((20 * (scan->scan_req.max_channel_time)) + 10);
+ atomic_set(&wvif->scan.in_progress, 1);
+ atomic_set(&wvif->wdev->scan_in_progress, 1);
+
+ schedule_delayed_work(&wvif->scan.timeout, msecs_to_jiffies(tmo));
+ ret = hif_scan(wvif, scan);
+ if (ret) {
+ wfx_scan_failed_cb(wvif);
+ atomic_set(&wvif->scan.in_progress, 0);
+ atomic_set(&wvif->wdev->scan_in_progress, 0);
+ cancel_delayed_work_sync(&wvif->scan.timeout);
+ wfx_scan_restart_delayed(wvif);
+ }
+ return ret;
+}
+
+int wfx_hw_scan(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_scan_request *hw_req)
+{
+ struct wfx_dev *wdev = hw->priv;
+ struct wfx_vif *wvif = (struct wfx_vif *) vif->drv_priv;
+ struct cfg80211_scan_request *req = &hw_req->req;
+ struct sk_buff *skb;
+ int i, ret;
+ struct hif_mib_template_frame *p;
+
+ if (!wvif)
+ return -EINVAL;
+
+ if (wvif->state == WFX_STATE_AP)
+ return -EOPNOTSUPP;
+
+ if (req->n_ssids == 1 && !req->ssids[0].ssid_len)
+ req->n_ssids = 0;
+
+ if (req->n_ssids > HIF_API_MAX_NB_SSIDS)
+ return -EINVAL;
+
+ skb = ieee80211_probereq_get(hw, wvif->vif->addr, NULL, 0, req->ie_len);
+ if (!skb)
+ return -ENOMEM;
+
+ if (req->ie_len)
+ memcpy(skb_put(skb, req->ie_len), req->ie, req->ie_len);
+
+ mutex_lock(&wdev->conf_mutex);
+
+ p = (struct hif_mib_template_frame *)skb_push(skb, 4);
+ p->frame_type = HIF_TMPLT_PRBREQ;
+ p->frame_length = cpu_to_le16(skb->len - 4);
+ ret = hif_set_template_frame(wvif, p);
+ skb_pull(skb, 4);
+
+ if (!ret)
+ /* Host want to be the probe responder. */
+ ret = wfx_fwd_probe_req(wvif, true);
+ if (ret) {
+ mutex_unlock(&wdev->conf_mutex);
+ dev_kfree_skb(skb);
+ return ret;
+ }
+
+ wfx_tx_lock_flush(wdev);
+
+ WARN(wvif->scan.req, "unexpected concurrent scan");
+ wvif->scan.req = req;
+ wvif->scan.n_ssids = 0;
+ wvif->scan.status = 0;
+ wvif->scan.begin = &req->channels[0];
+ wvif->scan.curr = wvif->scan.begin;
+ wvif->scan.end = &req->channels[req->n_channels];
+ wvif->scan.output_power = wdev->output_power;
+
+ for (i = 0; i < req->n_ssids; ++i) {
+ struct hif_ssid_def *dst = &wvif->scan.ssids[wvif->scan.n_ssids];
+
+ memcpy(&dst->ssid[0], req->ssids[i].ssid, sizeof(dst->ssid));
+ dst->ssid_length = req->ssids[i].ssid_len;
+ ++wvif->scan.n_ssids;
+ }
+
+ mutex_unlock(&wdev->conf_mutex);
+
+ if (skb)
+ dev_kfree_skb(skb);
+ schedule_work(&wvif->scan.work);
+ return 0;
+}
+
+void wfx_scan_work(struct work_struct *work)
+{
+ struct wfx_vif *wvif = container_of(work, struct wfx_vif, scan.work);
+ struct ieee80211_channel **it;
+ struct wfx_scan_params scan = {
+ .scan_req.scan_type.type = 0, /* Foreground */
+ };
+ struct ieee80211_channel *first;
+ bool first_run = (wvif->scan.begin == wvif->scan.curr &&
+ wvif->scan.begin != wvif->scan.end);
+ int i;
+
+ down(&wvif->scan.lock);
+ mutex_lock(&wvif->wdev->conf_mutex);
+
+ if (first_run) {
+ if (wvif->state == WFX_STATE_STA &&
+ !(wvif->powersave_mode.pm_mode.enter_psm)) {
+ struct hif_req_set_pm_mode pm = wvif->powersave_mode;
+
+ pm.pm_mode.enter_psm = 1;
+ wfx_set_pm(wvif, &pm);
+ }
+ }
+
+ if (!wvif->scan.req || wvif->scan.curr == wvif->scan.end) {
+ if (wvif->scan.output_power != wvif->wdev->output_power)
+ hif_set_output_power(wvif,
+ wvif->wdev->output_power * 10);
+
+ if (wvif->scan.status < 0)
+ dev_warn(wvif->wdev->dev, "scan failed\n");
+ else if (wvif->scan.req)
+ dev_dbg(wvif->wdev->dev, "scan completed\n");
+ else
+ dev_dbg(wvif->wdev->dev, "scan canceled\n");
+
+ wvif->scan.req = NULL;
+ wfx_scan_restart_delayed(wvif);
+ wfx_tx_unlock(wvif->wdev);
+ mutex_unlock(&wvif->wdev->conf_mutex);
+ __ieee80211_scan_completed_compat(wvif->wdev->hw,
+ wvif->scan.status ? 1 : 0);
+ up(&wvif->scan.lock);
+ if (wvif->state == WFX_STATE_STA &&
+ !(wvif->powersave_mode.pm_mode.enter_psm))
+ wfx_set_pm(wvif, &wvif->powersave_mode);
+ return;
+ }
+ first = *wvif->scan.curr;
+
+ for (it = wvif->scan.curr + 1, i = 1;
+ it != wvif->scan.end && i < HIF_API_MAX_NB_CHANNELS;
+ ++it, ++i) {
+ if ((*it)->band != first->band)
+ break;
+ if (((*it)->flags ^ first->flags) &
+ IEEE80211_CHAN_NO_IR)
+ break;
+ if (!(first->flags & IEEE80211_CHAN_NO_IR) &&
+ (*it)->max_power != first->max_power)
+ break;
+ }
+ scan.scan_req.band = first->band;
+
+ if (wvif->scan.req->no_cck)
+ scan.scan_req.max_transmit_rate = API_RATE_INDEX_G_6MBPS;
+ else
+ scan.scan_req.max_transmit_rate = API_RATE_INDEX_B_1MBPS;
+ scan.scan_req.num_of_probe_requests =
+ (first->flags & IEEE80211_CHAN_NO_IR) ? 0 : 2;
+ scan.scan_req.num_of_ssi_ds = wvif->scan.n_ssids;
+ scan.ssids = &wvif->scan.ssids[0];
+ scan.scan_req.num_of_channels = it - wvif->scan.curr;
+ scan.scan_req.probe_delay = 100;
+ // FIXME: Check if FW can do active scan while joined.
+ if (wvif->state == WFX_STATE_STA) {
+ scan.scan_req.scan_type.type = 1;
+ scan.scan_req.scan_flags.fbg = 1;
+ }
+
+ scan.ch = kcalloc(scan.scan_req.num_of_channels,
+ sizeof(u8), GFP_KERNEL);
+
+ if (!scan.ch) {
+ wvif->scan.status = -ENOMEM;
+ goto fail;
+ }
+ for (i = 0; i < scan.scan_req.num_of_channels; ++i)
+ scan.ch[i] = wvif->scan.curr[i]->hw_value;
+
+ if (wvif->scan.curr[0]->flags & IEEE80211_CHAN_NO_IR) {
+ scan.scan_req.min_channel_time = 50;
+ scan.scan_req.max_channel_time = 150;
+ } else {
+ scan.scan_req.min_channel_time = 10;
+ scan.scan_req.max_channel_time = 50;
+ }
+ if (!(first->flags & IEEE80211_CHAN_NO_IR) &&
+ wvif->scan.output_power != first->max_power) {
+ wvif->scan.output_power = first->max_power;
+ hif_set_output_power(wvif, wvif->scan.output_power * 10);
+ }
+ wvif->scan.status = wfx_scan_start(wvif, &scan);
+ kfree(scan.ch);
+ if (wvif->scan.status)
+ goto fail;
+ wvif->scan.curr = it;
+ mutex_unlock(&wvif->wdev->conf_mutex);
+ return;
+
+fail:
+ wvif->scan.curr = wvif->scan.end;
+ mutex_unlock(&wvif->wdev->conf_mutex);
+ up(&wvif->scan.lock);
+ schedule_work(&wvif->scan.work);
+}
+
+static void wfx_scan_complete(struct wfx_vif *wvif)
+{
+ up(&wvif->scan.lock);
+ atomic_set(&wvif->wdev->scan_in_progress, 0);
+
+ wfx_scan_work(&wvif->scan.work);
+}
+
+void wfx_scan_failed_cb(struct wfx_vif *wvif)
+{
+ if (cancel_delayed_work_sync(&wvif->scan.timeout) > 0) {
+ wvif->scan.status = -EIO;
+ schedule_work(&wvif->scan.timeout.work);
+ }
+}
+
+void wfx_scan_complete_cb(struct wfx_vif *wvif, struct hif_ind_scan_cmpl *arg)
+{
+ if (cancel_delayed_work_sync(&wvif->scan.timeout) > 0) {
+ wvif->scan.status = 1;
+ schedule_work(&wvif->scan.timeout.work);
+ }
+}
+
+void wfx_scan_timeout(struct work_struct *work)
+{
+ struct wfx_vif *wvif = container_of(work, struct wfx_vif,
+ scan.timeout.work);
+
+ if (atomic_xchg(&wvif->scan.in_progress, 0)) {
+ if (wvif->scan.status > 0) {
+ wvif->scan.status = 0;
+ } else if (!wvif->scan.status) {
+ dev_warn(wvif->wdev->dev, "timeout waiting for scan complete notification\n");
+ wvif->scan.status = -ETIMEDOUT;
+ wvif->scan.curr = wvif->scan.end;
+ hif_stop_scan(wvif);
+ }
+ wfx_scan_complete(wvif);
+ }
+}
diff --git a/drivers/staging/wfx/scan.h b/drivers/staging/wfx/scan.h
new file mode 100644
index 000000000000..b4ddd0771a9b
--- /dev/null
+++ b/drivers/staging/wfx/scan.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Scan related functions.
+ *
+ * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ */
+#ifndef WFX_SCAN_H
+#define WFX_SCAN_H
+
+#include <linux/semaphore.h>
+#include <linux/workqueue.h>
+#include <net/mac80211.h>
+
+#include "hif_api_cmd.h"
+
+struct wfx_dev;
+struct wfx_vif;
+
+struct wfx_scan {
+ struct semaphore lock;
+ struct work_struct work;
+ struct delayed_work timeout;
+ struct cfg80211_scan_request *req;
+ struct ieee80211_channel **begin;
+ struct ieee80211_channel **curr;
+ struct ieee80211_channel **end;
+ struct hif_ssid_def ssids[HIF_API_MAX_NB_SSIDS];
+ int output_power;
+ int n_ssids;
+ int status;
+ atomic_t in_progress;
+};
+
+int wfx_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_scan_request *req);
+void wfx_scan_work(struct work_struct *work);
+void wfx_scan_timeout(struct work_struct *work);
+void wfx_scan_complete_cb(struct wfx_vif *wvif, struct hif_ind_scan_cmpl *arg);
+void wfx_scan_failed_cb(struct wfx_vif *wvif);
+
+#endif /* WFX_SCAN_H */
diff --git a/drivers/staging/wfx/secure_link.h b/drivers/staging/wfx/secure_link.h
new file mode 100644
index 000000000000..666b26e5308d
--- /dev/null
+++ b/drivers/staging/wfx/secure_link.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019, Silicon Laboratories, Inc.
+ */
+#ifndef WFX_SECURE_LINK_H
+#define WFX_SECURE_LINK_H
+
+#include <linux/of.h>
+
+#include "hif_api_general.h"
+
+struct wfx_dev;
+
+
+struct sl_context {
+};
+
+static inline bool wfx_is_secure_command(struct wfx_dev *wdev, int cmd_id)
+{
+ return false;
+}
+
+static inline int wfx_sl_decode(struct wfx_dev *wdev, struct hif_sl_msg *m)
+{
+ return -EIO;
+}
+
+static inline int wfx_sl_encode(struct wfx_dev *wdev, struct hif_msg *input,
+ struct hif_sl_msg *output)
+{
+ return -EIO;
+}
+
+static inline int wfx_sl_check_pubkey(struct wfx_dev *wdev, u8 *ncp_pubkey,
+ u8 *ncp_pubmac)
+{
+ return -EIO;
+}
+
+static inline void wfx_sl_fill_pdata(struct device *dev,
+ struct wfx_platform_data *pdata)
+{
+ if (of_find_property(dev->of_node, "slk_key", NULL))
+ dev_err(dev, "secure link is not supported by this driver, ignoring provided key\n");
+}
+
+static inline int wfx_sl_init(struct wfx_dev *wdev)
+{
+ return -EIO;
+}
+
+static inline void wfx_sl_deinit(struct wfx_dev *wdev)
+{
+}
+
+
+#endif
diff --git a/drivers/staging/wfx/sta.c b/drivers/staging/wfx/sta.c
new file mode 100644
index 000000000000..29848a202ab4
--- /dev/null
+++ b/drivers/staging/wfx/sta.c
@@ -0,0 +1,1684 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Implementation of mac80211 API.
+ *
+ * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ */
+#include <net/mac80211.h>
+
+#include "sta.h"
+#include "wfx.h"
+#include "fwio.h"
+#include "bh.h"
+#include "key.h"
+#include "scan.h"
+#include "debug.h"
+#include "hif_tx.h"
+#include "hif_tx_mib.h"
+
+#define TXOP_UNIT 32
+#define HIF_MAX_ARP_IP_ADDRTABLE_ENTRIES 2
+
+static u32 wfx_rate_mask_to_hw(struct wfx_dev *wdev, u32 rates)
+{
+ int i;
+ u32 ret = 0;
+ // WFx only support 2GHz
+ struct ieee80211_supported_band *sband = wdev->hw->wiphy->bands[NL80211_BAND_2GHZ];
+
+ for (i = 0; i < sband->n_bitrates; i++) {
+ if (rates & BIT(i)) {
+ if (i >= sband->n_bitrates)
+ dev_warn(wdev->dev, "unsupported basic rate\n");
+ else
+ ret |= BIT(sband->bitrates[i].hw_value);
+ }
+ }
+ return ret;
+}
+
+static void __wfx_free_event_queue(struct list_head *list)
+{
+ struct wfx_hif_event *event, *tmp;
+
+ list_for_each_entry_safe(event, tmp, list, link) {
+ list_del(&event->link);
+ kfree(event);
+ }
+}
+
+static void wfx_free_event_queue(struct wfx_vif *wvif)
+{
+ LIST_HEAD(list);
+
+ spin_lock(&wvif->event_queue_lock);
+ list_splice_init(&wvif->event_queue, &list);
+ spin_unlock(&wvif->event_queue_lock);
+
+ __wfx_free_event_queue(&list);
+}
+
+void wfx_cqm_bssloss_sm(struct wfx_vif *wvif, int init, int good, int bad)
+{
+ int tx = 0;
+
+ mutex_lock(&wvif->bss_loss_lock);
+ wvif->delayed_link_loss = 0;
+ cancel_work_sync(&wvif->bss_params_work);
+
+ /* If we have a pending unjoin */
+ if (wvif->delayed_unjoin)
+ goto end;
+
+ if (init) {
+ schedule_delayed_work(&wvif->bss_loss_work, HZ);
+ wvif->bss_loss_state = 0;
+
+ if (!atomic_read(&wvif->wdev->tx_lock))
+ tx = 1;
+ } else if (good) {
+ cancel_delayed_work_sync(&wvif->bss_loss_work);
+ wvif->bss_loss_state = 0;
+ schedule_work(&wvif->bss_params_work);
+ } else if (bad) {
+ /* FIXME Should we just keep going until we time out? */
+ if (wvif->bss_loss_state < 3)
+ tx = 1;
+ } else {
+ cancel_delayed_work_sync(&wvif->bss_loss_work);
+ wvif->bss_loss_state = 0;
+ }
+
+ /* Spit out a NULL packet to our AP if necessary */
+ // FIXME: call ieee80211_beacon_loss/ieee80211_connection_loss instead
+ if (tx) {
+ struct sk_buff *skb;
+
+ wvif->bss_loss_state++;
+
+ skb = ieee80211_nullfunc_get(wvif->wdev->hw, wvif->vif, false);
+ if (!skb)
+ goto end;
+ memset(IEEE80211_SKB_CB(skb), 0,
+ sizeof(*IEEE80211_SKB_CB(skb)));
+ IEEE80211_SKB_CB(skb)->control.vif = wvif->vif;
+ IEEE80211_SKB_CB(skb)->driver_rates[0].idx = 0;
+ IEEE80211_SKB_CB(skb)->driver_rates[0].count = 1;
+ IEEE80211_SKB_CB(skb)->driver_rates[1].idx = -1;
+ wfx_tx(wvif->wdev->hw, NULL, skb);
+ }
+end:
+ mutex_unlock(&wvif->bss_loss_lock);
+}
+
+static int wfx_set_uapsd_param(struct wfx_vif *wvif,
+ const struct wfx_edca_params *arg)
+{
+ /* Here's the mapping AC [queue, bit]
+ * VO [0,3], VI [1, 2], BE [2, 1], BK [3, 0]
+ */
+
+ if (arg->uapsd_enable[IEEE80211_AC_VO])
+ wvif->uapsd_info.trig_voice = 1;
+ else
+ wvif->uapsd_info.trig_voice = 0;
+
+ if (arg->uapsd_enable[IEEE80211_AC_VI])
+ wvif->uapsd_info.trig_video = 1;
+ else
+ wvif->uapsd_info.trig_video = 0;
+
+ if (arg->uapsd_enable[IEEE80211_AC_BE])
+ wvif->uapsd_info.trig_be = 1;
+ else
+ wvif->uapsd_info.trig_be = 0;
+
+ if (arg->uapsd_enable[IEEE80211_AC_BK])
+ wvif->uapsd_info.trig_bckgrnd = 1;
+ else
+ wvif->uapsd_info.trig_bckgrnd = 0;
+
+ /* Currently pseudo U-APSD operation is not supported, so setting
+ * MinAutoTriggerInterval, MaxAutoTriggerInterval and
+ * AutoTriggerStep to 0
+ */
+ wvif->uapsd_info.min_auto_trigger_interval = 0;
+ wvif->uapsd_info.max_auto_trigger_interval = 0;
+ wvif->uapsd_info.auto_trigger_step = 0;
+
+ return hif_set_uapsd_info(wvif, &wvif->uapsd_info);
+}
+
+int wfx_fwd_probe_req(struct wfx_vif *wvif, bool enable)
+{
+ wvif->fwd_probe_req = enable;
+ return hif_set_rx_filter(wvif, wvif->filter_bssid,
+ wvif->fwd_probe_req);
+}
+
+static int wfx_set_mcast_filter(struct wfx_vif *wvif,
+ struct wfx_grp_addr_table *fp)
+{
+ int i, ret;
+ struct hif_mib_config_data_filter config = { };
+ struct hif_mib_set_data_filtering filter_data = { };
+ struct hif_mib_mac_addr_data_frame_condition filter_addr_val = { };
+ struct hif_mib_uc_mc_bc_data_frame_condition filter_addr_type = { };
+
+ // Temporary workaround for filters
+ return hif_set_data_filtering(wvif, &filter_data);
+
+ if (!fp->enable) {
+ filter_data.enable = 0;
+ return hif_set_data_filtering(wvif, &filter_data);
+ }
+
+ // A1 Address match on list
+ for (i = 0; i < fp->num_addresses; i++) {
+ filter_addr_val.condition_idx = i;
+ filter_addr_val.address_type = HIF_MAC_ADDR_A1;
+ ether_addr_copy(filter_addr_val.mac_address,
+ fp->address_list[i]);
+ ret = hif_set_mac_addr_condition(wvif,
+ &filter_addr_val);
+ if (ret)
+ return ret;
+ config.mac_cond |= 1 << i;
+ }
+
+ // Accept unicast and broadcast
+ filter_addr_type.condition_idx = 0;
+ filter_addr_type.param.bits.type_unicast = 1;
+ filter_addr_type.param.bits.type_broadcast = 1;
+ ret = hif_set_uc_mc_bc_condition(wvif, &filter_addr_type);
+ if (ret)
+ return ret;
+
+ config.uc_mc_bc_cond = 1;
+ config.filter_idx = 0; // TODO #define MULTICAST_FILTERING 0
+ config.enable = 1;
+ ret = hif_set_config_data_filter(wvif, &config);
+ if (ret)
+ return ret;
+
+ // discard all data frames except match filter
+ filter_data.enable = 1;
+ filter_data.default_filter = 1; // discard all
+ ret = hif_set_data_filtering(wvif, &filter_data);
+
+ return ret;
+}
+
+void wfx_update_filtering(struct wfx_vif *wvif)
+{
+ int ret;
+ bool is_sta = wvif->vif && NL80211_IFTYPE_STATION == wvif->vif->type;
+ bool filter_bssid = wvif->filter_bssid;
+ bool fwd_probe_req = wvif->fwd_probe_req;
+ struct hif_mib_bcn_filter_enable bf_ctrl;
+ struct hif_ie_table_entry filter_ies[] = {
+ {
+ .ie_id = WLAN_EID_VENDOR_SPECIFIC,
+ .has_changed = 1,
+ .no_longer = 1,
+ .has_appeared = 1,
+ .oui = { 0x50, 0x6F, 0x9A },
+ }, {
+ .ie_id = WLAN_EID_HT_OPERATION,
+ .has_changed = 1,
+ .no_longer = 1,
+ .has_appeared = 1,
+ }, {
+ .ie_id = WLAN_EID_ERP_INFO,
+ .has_changed = 1,
+ .no_longer = 1,
+ .has_appeared = 1,
+ }
+ };
+ int n_filter_ies;
+
+ if (wvif->state == WFX_STATE_PASSIVE)
+ return;
+
+ if (wvif->disable_beacon_filter) {
+ bf_ctrl.enable = 0;
+ bf_ctrl.bcn_count = 1;
+ n_filter_ies = 0;
+ } else if (!is_sta) {
+ bf_ctrl.enable = HIF_BEACON_FILTER_ENABLE |
+ HIF_BEACON_FILTER_AUTO_ERP;
+ bf_ctrl.bcn_count = 0;
+ n_filter_ies = 2;
+ } else {
+ bf_ctrl.enable = HIF_BEACON_FILTER_ENABLE;
+ bf_ctrl.bcn_count = 0;
+ n_filter_ies = 3;
+ }
+
+ ret = hif_set_rx_filter(wvif, filter_bssid, fwd_probe_req);
+ if (!ret)
+ ret = hif_set_beacon_filter_table(wvif, n_filter_ies,
+ filter_ies);
+ if (!ret)
+ ret = hif_beacon_filter_control(wvif, bf_ctrl.enable,
+ bf_ctrl.bcn_count);
+ if (!ret)
+ ret = wfx_set_mcast_filter(wvif, &wvif->mcast_filter);
+ if (ret)
+ dev_err(wvif->wdev->dev, "update filtering failed: %d\n", ret);
+}
+
+static void wfx_update_filtering_work(struct work_struct *work)
+{
+ struct wfx_vif *wvif = container_of(work, struct wfx_vif,
+ update_filtering_work);
+
+ wfx_update_filtering(wvif);
+}
+
+u64 wfx_prepare_multicast(struct ieee80211_hw *hw,
+ struct netdev_hw_addr_list *mc_list)
+{
+ int i;
+ struct netdev_hw_addr *ha;
+ struct wfx_vif *wvif = NULL;
+ struct wfx_dev *wdev = hw->priv;
+ int count = netdev_hw_addr_list_count(mc_list);
+
+ while ((wvif = wvif_iterate(wdev, wvif)) != NULL) {
+ memset(&wvif->mcast_filter, 0x00, sizeof(wvif->mcast_filter));
+ if (!count ||
+ count > ARRAY_SIZE(wvif->mcast_filter.address_list))
+ continue;
+
+ i = 0;
+ netdev_hw_addr_list_for_each(ha, mc_list) {
+ ether_addr_copy(wvif->mcast_filter.address_list[i],
+ ha->addr);
+ i++;
+ }
+ wvif->mcast_filter.enable = true;
+ wvif->mcast_filter.num_addresses = count;
+ }
+
+ return 0;
+}
+
+void wfx_configure_filter(struct ieee80211_hw *hw,
+ unsigned int changed_flags,
+ unsigned int *total_flags,
+ u64 unused)
+{
+ struct wfx_vif *wvif = NULL;
+ struct wfx_dev *wdev = hw->priv;
+
+ *total_flags &= FIF_OTHER_BSS | FIF_FCSFAIL | FIF_PROBE_REQ;
+
+ while ((wvif = wvif_iterate(wdev, wvif)) != NULL) {
+ down(&wvif->scan.lock);
+ wvif->filter_bssid = (*total_flags &
+ (FIF_OTHER_BSS | FIF_PROBE_REQ)) ? 0 : 1;
+ wvif->disable_beacon_filter = !(*total_flags & FIF_PROBE_REQ);
+ wfx_fwd_probe_req(wvif, true);
+ wfx_update_filtering(wvif);
+ up(&wvif->scan.lock);
+ }
+}
+
+int wfx_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u16 queue, const struct ieee80211_tx_queue_params *params)
+{
+ struct wfx_dev *wdev = hw->priv;
+ struct wfx_vif *wvif = (struct wfx_vif *) vif->drv_priv;
+ int ret = 0;
+ /* To prevent re-applying PM request OID again and again*/
+ u16 old_uapsd_flags, new_uapsd_flags;
+ struct hif_req_edca_queue_params *edca;
+
+ mutex_lock(&wdev->conf_mutex);
+
+ if (queue < hw->queues) {
+ old_uapsd_flags = *((u16 *) &wvif->uapsd_info);
+ edca = &wvif->edca.params[queue];
+
+ wvif->edca.uapsd_enable[queue] = params->uapsd;
+ edca->aifsn = params->aifs;
+ edca->cw_min = params->cw_min;
+ edca->cw_max = params->cw_max;
+ edca->tx_op_limit = params->txop * TXOP_UNIT;
+ edca->allowed_medium_time = 0;
+ ret = hif_set_edca_queue_params(wvif, edca);
+ if (ret) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (wvif->vif->type == NL80211_IFTYPE_STATION) {
+ ret = wfx_set_uapsd_param(wvif, &wvif->edca);
+ new_uapsd_flags = *((u16 *) &wvif->uapsd_info);
+ if (!ret && wvif->setbssparams_done &&
+ wvif->state == WFX_STATE_STA &&
+ old_uapsd_flags != new_uapsd_flags)
+ ret = wfx_set_pm(wvif, &wvif->powersave_mode);
+ }
+ } else {
+ ret = -EINVAL;
+ }
+
+out:
+ mutex_unlock(&wdev->conf_mutex);
+ return ret;
+}
+
+int wfx_set_pm(struct wfx_vif *wvif, const struct hif_req_set_pm_mode *arg)
+{
+ struct hif_req_set_pm_mode pm = *arg;
+ u16 uapsd_flags;
+ int ret;
+
+ if (wvif->state != WFX_STATE_STA || !wvif->bss_params.aid)
+ return 0;
+
+ memcpy(&uapsd_flags, &wvif->uapsd_info, sizeof(uapsd_flags));
+
+ if (uapsd_flags != 0)
+ pm.pm_mode.fast_psm = 0;
+
+ // Kernel disable PowerSave when multiple vifs are in use. In contrary,
+ // it is absolutly necessary to enable PowerSave for WF200
+ if (wvif_count(wvif->wdev) > 1) {
+ pm.pm_mode.enter_psm = 1;
+ pm.pm_mode.fast_psm = 0;
+ }
+
+ if (!wait_for_completion_timeout(&wvif->set_pm_mode_complete,
+ msecs_to_jiffies(300)))
+ dev_warn(wvif->wdev->dev,
+ "timeout while waiting of set_pm_mode_complete\n");
+ ret = hif_set_pm(wvif, &pm);
+ // FIXME: why ?
+ if (-ETIMEDOUT == wvif->scan.status)
+ wvif->scan.status = 1;
+ return ret;
+}
+
+int wfx_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
+{
+ struct wfx_dev *wdev = hw->priv;
+ struct wfx_vif *wvif = NULL;
+
+ while ((wvif = wvif_iterate(wdev, wvif)) != NULL)
+ hif_rts_threshold(wvif, value);
+ return 0;
+}
+
+/* If successful, LOCKS the TX queue! */
+static int __wfx_flush(struct wfx_dev *wdev, bool drop)
+{
+ int ret;
+
+ for (;;) {
+ if (drop) {
+ wfx_tx_queues_clear(wdev);
+ } else {
+ ret = wait_event_timeout(
+ wdev->tx_queue_stats.wait_link_id_empty,
+ wfx_tx_queues_is_empty(wdev),
+ 2 * HZ);
+ }
+
+ if (!drop && ret <= 0) {
+ ret = -ETIMEDOUT;
+ break;
+ }
+ ret = 0;
+
+ wfx_tx_lock_flush(wdev);
+ if (!wfx_tx_queues_is_empty(wdev)) {
+ /* Highly unlikely: WSM requeued frames. */
+ wfx_tx_unlock(wdev);
+ continue;
+ }
+ break;
+ }
+ return ret;
+}
+
+void wfx_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u32 queues, bool drop)
+{
+ struct wfx_dev *wdev = hw->priv;
+ struct wfx_vif *wvif;
+
+ if (vif) {
+ wvif = (struct wfx_vif *) vif->drv_priv;
+ if (wvif->vif->type == NL80211_IFTYPE_MONITOR)
+ drop = true;
+ if (wvif->vif->type == NL80211_IFTYPE_AP &&
+ !wvif->enable_beacon)
+ drop = true;
+ }
+
+ // FIXME: only flush requested vif
+ if (!__wfx_flush(wdev, drop))
+ wfx_tx_unlock(wdev);
+}
+
+/* WSM callbacks */
+
+static void wfx_event_report_rssi(struct wfx_vif *wvif, u8 raw_rcpi_rssi)
+{
+ /* RSSI: signed Q8.0, RCPI: unsigned Q7.1
+ * RSSI = RCPI / 2 - 110
+ */
+ int rcpi_rssi;
+ int cqm_evt;
+
+ rcpi_rssi = raw_rcpi_rssi / 2 - 110;
+ if (rcpi_rssi <= wvif->cqm_rssi_thold)
+ cqm_evt = NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW;
+ else
+ cqm_evt = NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH;
+ ieee80211_cqm_rssi_notify(wvif->vif, cqm_evt, rcpi_rssi, GFP_KERNEL);
+}
+
+static void wfx_event_handler_work(struct work_struct *work)
+{
+ struct wfx_vif *wvif =
+ container_of(work, struct wfx_vif, event_handler_work);
+ struct wfx_hif_event *event;
+
+ LIST_HEAD(list);
+
+ spin_lock(&wvif->event_queue_lock);
+ list_splice_init(&wvif->event_queue, &list);
+ spin_unlock(&wvif->event_queue_lock);
+
+ list_for_each_entry(event, &list, link) {
+ switch (event->evt.event_id) {
+ case HIF_EVENT_IND_BSSLOST:
+ cancel_work_sync(&wvif->unjoin_work);
+ if (!down_trylock(&wvif->scan.lock)) {
+ wfx_cqm_bssloss_sm(wvif, 1, 0, 0);
+ up(&wvif->scan.lock);
+ } else {
+ /* Scan is in progress. Delay reporting.
+ * Scan complete will trigger bss_loss_work
+ */
+ wvif->delayed_link_loss = 1;
+ /* Also start a watchdog. */
+ schedule_delayed_work(&wvif->bss_loss_work,
+ 5 * HZ);
+ }
+ break;
+ case HIF_EVENT_IND_BSSREGAINED:
+ wfx_cqm_bssloss_sm(wvif, 0, 0, 0);
+ cancel_work_sync(&wvif->unjoin_work);
+ break;
+ case HIF_EVENT_IND_RCPI_RSSI:
+ wfx_event_report_rssi(wvif,
+ event->evt.event_data.rcpi_rssi);
+ break;
+ case HIF_EVENT_IND_PS_MODE_ERROR:
+ dev_warn(wvif->wdev->dev,
+ "error while processing power save request\n");
+ break;
+ default:
+ dev_warn(wvif->wdev->dev,
+ "unhandled event indication: %.2x\n",
+ event->evt.event_id);
+ break;
+ }
+ }
+ __wfx_free_event_queue(&list);
+}
+
+static void wfx_bss_loss_work(struct work_struct *work)
+{
+ struct wfx_vif *wvif = container_of(work, struct wfx_vif,
+ bss_loss_work.work);
+
+ ieee80211_connection_loss(wvif->vif);
+}
+
+static void wfx_bss_params_work(struct work_struct *work)
+{
+ struct wfx_vif *wvif = container_of(work, struct wfx_vif,
+ bss_params_work);
+
+ mutex_lock(&wvif->wdev->conf_mutex);
+ wvif->bss_params.bss_flags.lost_count_only = 1;
+ hif_set_bss_params(wvif, &wvif->bss_params);
+ wvif->bss_params.bss_flags.lost_count_only = 0;
+ mutex_unlock(&wvif->wdev->conf_mutex);
+}
+
+static void wfx_set_beacon_wakeup_period_work(struct work_struct *work)
+{
+ struct wfx_vif *wvif = container_of(work, struct wfx_vif,
+ set_beacon_wakeup_period_work);
+
+ hif_set_beacon_wakeup_period(wvif, wvif->dtim_period,
+ wvif->dtim_period);
+}
+
+static void wfx_do_unjoin(struct wfx_vif *wvif)
+{
+ mutex_lock(&wvif->wdev->conf_mutex);
+
+ if (atomic_read(&wvif->scan.in_progress)) {
+ if (wvif->delayed_unjoin)
+ dev_dbg(wvif->wdev->dev,
+ "delayed unjoin is already scheduled\n");
+ else
+ wvif->delayed_unjoin = true;
+ goto done;
+ }
+
+ wvif->delayed_link_loss = false;
+
+ if (!wvif->state)
+ goto done;
+
+ if (wvif->state == WFX_STATE_AP)
+ goto done;
+
+ cancel_work_sync(&wvif->update_filtering_work);
+ cancel_work_sync(&wvif->set_beacon_wakeup_period_work);
+ wvif->state = WFX_STATE_PASSIVE;
+
+ /* Unjoin is a reset. */
+ wfx_tx_flush(wvif->wdev);
+ hif_keep_alive_period(wvif, 0);
+ hif_reset(wvif, false);
+ hif_set_output_power(wvif, wvif->wdev->output_power * 10);
+ wvif->dtim_period = 0;
+ hif_set_macaddr(wvif, wvif->vif->addr);
+ wfx_free_event_queue(wvif);
+ cancel_work_sync(&wvif->event_handler_work);
+ wfx_cqm_bssloss_sm(wvif, 0, 0, 0);
+
+ /* Disable Block ACKs */
+ hif_set_block_ack_policy(wvif, 0, 0);
+
+ wvif->disable_beacon_filter = false;
+ wfx_update_filtering(wvif);
+ memset(&wvif->bss_params, 0, sizeof(wvif->bss_params));
+ wvif->setbssparams_done = false;
+ memset(&wvif->ht_info, 0, sizeof(wvif->ht_info));
+
+done:
+ mutex_unlock(&wvif->wdev->conf_mutex);
+}
+
+static void wfx_set_mfp(struct wfx_vif *wvif,
+ struct cfg80211_bss *bss)
+{
+ const int pairwise_cipher_suite_count_offset = 8 / sizeof(u16);
+ const int pairwise_cipher_suite_size = 4 / sizeof(u16);
+ const int akm_suite_size = 4 / sizeof(u16);
+ const u16 *ptr = NULL;
+ bool mfpc = false;
+ bool mfpr = false;
+
+ /* 802.11w protected mgmt frames */
+
+ /* retrieve MFPC and MFPR flags from beacon or PBRSP */
+
+ rcu_read_lock();
+ if (bss)
+ ptr = (const u16 *) ieee80211_bss_get_ie(bss,
+ WLAN_EID_RSN);
+
+ if (ptr) {
+ ptr += pairwise_cipher_suite_count_offset;
+ ptr += 1 + pairwise_cipher_suite_size * *ptr;
+ ptr += 1 + akm_suite_size * *ptr;
+ mfpr = *ptr & BIT(6);
+ mfpc = *ptr & BIT(7);
+ }
+ rcu_read_unlock();
+
+ hif_set_mfp(wvif, mfpc, mfpr);
+}
+
+/* MUST be called with tx_lock held! It will be unlocked for us. */
+static void wfx_do_join(struct wfx_vif *wvif)
+{
+ const u8 *bssid;
+ struct ieee80211_bss_conf *conf = &wvif->vif->bss_conf;
+ struct cfg80211_bss *bss = NULL;
+ struct hif_req_join join = {
+ .mode = conf->ibss_joined ? HIF_MODE_IBSS : HIF_MODE_BSS,
+ .preamble_type = conf->use_short_preamble ? HIF_PREAMBLE_SHORT : HIF_PREAMBLE_LONG,
+ .probe_for_join = 1,
+ .atim_window = 0,
+ .basic_rate_set = wfx_rate_mask_to_hw(wvif->wdev,
+ conf->basic_rates),
+ };
+
+ if (wvif->channel->flags & IEEE80211_CHAN_NO_IR)
+ join.probe_for_join = 0;
+
+ if (wvif->state)
+ wfx_do_unjoin(wvif);
+
+ bssid = wvif->vif->bss_conf.bssid;
+
+ bss = cfg80211_get_bss(wvif->wdev->hw->wiphy, wvif->channel,
+ bssid, NULL, 0,
+ IEEE80211_BSS_TYPE_ANY, IEEE80211_PRIVACY_ANY);
+
+ if (!bss && !conf->ibss_joined) {
+ wfx_tx_unlock(wvif->wdev);
+ return;
+ }
+
+ mutex_lock(&wvif->wdev->conf_mutex);
+
+ /* Under the conf lock: check scan status and
+ * bail out if it is in progress.
+ */
+ if (atomic_read(&wvif->scan.in_progress)) {
+ wfx_tx_unlock(wvif->wdev);
+ goto done_put;
+ }
+
+ /* Sanity check basic rates */
+ if (!join.basic_rate_set)
+ join.basic_rate_set = 7;
+
+ /* Sanity check beacon interval */
+ if (!wvif->beacon_int)
+ wvif->beacon_int = 1;
+
+ join.beacon_interval = wvif->beacon_int;
+
+ // DTIM period will be set on first Beacon
+ wvif->dtim_period = 0;
+
+ join.channel_number = wvif->channel->hw_value;
+ memcpy(join.bssid, bssid, sizeof(join.bssid));
+
+ if (!conf->ibss_joined) {
+ const u8 *ssidie;
+
+ rcu_read_lock();
+ ssidie = ieee80211_bss_get_ie(bss, WLAN_EID_SSID);
+ if (ssidie) {
+ join.ssid_length = ssidie[1];
+ memcpy(join.ssid, &ssidie[2], join.ssid_length);
+ }
+ rcu_read_unlock();
+ }
+
+ wfx_tx_flush(wvif->wdev);
+
+ if (wvif_count(wvif->wdev) <= 1)
+ hif_set_block_ack_policy(wvif, 0xFF, 0xFF);
+
+ wfx_set_mfp(wvif, bss);
+
+ /* Perform actual join */
+ wvif->wdev->tx_burst_idx = -1;
+ if (hif_join(wvif, &join)) {
+ ieee80211_connection_loss(wvif->vif);
+ wvif->join_complete_status = -1;
+ /* Tx lock still held, unjoin will clear it. */
+ if (!schedule_work(&wvif->unjoin_work))
+ wfx_tx_unlock(wvif->wdev);
+ } else {
+ wvif->join_complete_status = 0;
+ if (wvif->vif->type == NL80211_IFTYPE_ADHOC)
+ wvif->state = WFX_STATE_IBSS;
+ else
+ wvif->state = WFX_STATE_PRE_STA;
+ wfx_tx_unlock(wvif->wdev);
+
+ /* Upload keys */
+ wfx_upload_keys(wvif);
+
+ /* Due to beacon filtering it is possible that the
+ * AP's beacon is not known for the mac80211 stack.
+ * Disable filtering temporary to make sure the stack
+ * receives at least one
+ */
+ wvif->disable_beacon_filter = true;
+ }
+ wfx_update_filtering(wvif);
+
+done_put:
+ mutex_unlock(&wvif->wdev->conf_mutex);
+ if (bss)
+ cfg80211_put_bss(wvif->wdev->hw->wiphy, bss);
+}
+
+static void wfx_unjoin_work(struct work_struct *work)
+{
+ struct wfx_vif *wvif = container_of(work, struct wfx_vif, unjoin_work);
+
+ wfx_do_unjoin(wvif);
+ wfx_tx_unlock(wvif->wdev);
+}
+
+int wfx_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct wfx_dev *wdev = hw->priv;
+ struct wfx_vif *wvif = (struct wfx_vif *) vif->drv_priv;
+ struct wfx_sta_priv *sta_priv = (struct wfx_sta_priv *) &sta->drv_priv;
+ struct wfx_link_entry *entry;
+ struct sk_buff *skb;
+
+ if (wvif->vif->type != NL80211_IFTYPE_AP)
+ return 0;
+
+ sta_priv->vif_id = wvif->id;
+ sta_priv->link_id = wfx_find_link_id(wvif, sta->addr);
+ if (!sta_priv->link_id) {
+ dev_warn(wdev->dev, "mo more link-id available\n");
+ return -ENOENT;
+ }
+
+ entry = &wvif->link_id_db[sta_priv->link_id - 1];
+ spin_lock_bh(&wvif->ps_state_lock);
+ if ((sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_MASK) ==
+ IEEE80211_WMM_IE_STA_QOSINFO_AC_MASK)
+ wvif->sta_asleep_mask |= BIT(sta_priv->link_id);
+ entry->status = WFX_LINK_HARD;
+ while ((skb = skb_dequeue(&entry->rx_queue)))
+ ieee80211_rx_irqsafe(wdev->hw, skb);
+ spin_unlock_bh(&wvif->ps_state_lock);
+ return 0;
+}
+
+int wfx_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct wfx_dev *wdev = hw->priv;
+ struct wfx_vif *wvif = (struct wfx_vif *) vif->drv_priv;
+ struct wfx_sta_priv *sta_priv = (struct wfx_sta_priv *) &sta->drv_priv;
+ struct wfx_link_entry *entry;
+
+ if (wvif->vif->type != NL80211_IFTYPE_AP || !sta_priv->link_id)
+ return 0;
+
+ entry = &wvif->link_id_db[sta_priv->link_id - 1];
+ spin_lock_bh(&wvif->ps_state_lock);
+ entry->status = WFX_LINK_RESERVE;
+ entry->timestamp = jiffies;
+ wfx_tx_lock(wdev);
+ if (!schedule_work(&wvif->link_id_work))
+ wfx_tx_unlock(wdev);
+ spin_unlock_bh(&wvif->ps_state_lock);
+ flush_work(&wvif->link_id_work);
+ return 0;
+}
+
+static void wfx_set_cts_work(struct work_struct *work)
+{
+ struct wfx_vif *wvif = container_of(work, struct wfx_vif, set_cts_work);
+ u8 erp_ie[3] = { WLAN_EID_ERP_INFO, 1, 0 };
+ struct hif_ie_flags target_frame = {
+ .beacon = 1,
+ };
+
+ mutex_lock(&wvif->wdev->conf_mutex);
+ erp_ie[2] = wvif->erp_info;
+ mutex_unlock(&wvif->wdev->conf_mutex);
+
+ hif_erp_use_protection(wvif, erp_ie[2] & WLAN_ERP_USE_PROTECTION);
+
+ if (wvif->vif->type != NL80211_IFTYPE_STATION)
+ hif_update_ie(wvif, &target_frame, erp_ie, sizeof(erp_ie));
+}
+
+static int wfx_start_ap(struct wfx_vif *wvif)
+{
+ int ret;
+ struct ieee80211_bss_conf *conf = &wvif->vif->bss_conf;
+ struct hif_req_start start = {
+ .channel_number = wvif->channel->hw_value,
+ .beacon_interval = conf->beacon_int,
+ .dtim_period = conf->dtim_period,
+ .preamble_type = conf->use_short_preamble ? HIF_PREAMBLE_SHORT : HIF_PREAMBLE_LONG,
+ .basic_rate_set = wfx_rate_mask_to_hw(wvif->wdev,
+ conf->basic_rates),
+ };
+
+ memset(start.ssid, 0, sizeof(start.ssid));
+ if (!conf->hidden_ssid) {
+ start.ssid_length = conf->ssid_len;
+ memcpy(start.ssid, conf->ssid, start.ssid_length);
+ }
+
+ wvif->beacon_int = conf->beacon_int;
+ wvif->dtim_period = conf->dtim_period;
+
+ memset(&wvif->link_id_db, 0, sizeof(wvif->link_id_db));
+
+ wvif->wdev->tx_burst_idx = -1;
+ ret = hif_start(wvif, &start);
+ if (!ret)
+ ret = wfx_upload_keys(wvif);
+ if (!ret) {
+ if (wvif_count(wvif->wdev) <= 1)
+ hif_set_block_ack_policy(wvif, 0xFF, 0xFF);
+ wvif->state = WFX_STATE_AP;
+ wfx_update_filtering(wvif);
+ }
+ return ret;
+}
+
+static int wfx_update_beaconing(struct wfx_vif *wvif)
+{
+ struct ieee80211_bss_conf *conf = &wvif->vif->bss_conf;
+
+ if (wvif->vif->type == NL80211_IFTYPE_AP) {
+ /* TODO: check if changed channel, band */
+ if (wvif->state != WFX_STATE_AP ||
+ wvif->beacon_int != conf->beacon_int) {
+ wfx_tx_lock_flush(wvif->wdev);
+ if (wvif->state != WFX_STATE_PASSIVE)
+ hif_reset(wvif, false);
+ wvif->state = WFX_STATE_PASSIVE;
+ wfx_start_ap(wvif);
+ wfx_tx_unlock(wvif->wdev);
+ } else {
+ }
+ }
+ return 0;
+}
+
+static int wfx_upload_beacon(struct wfx_vif *wvif)
+{
+ int ret = 0;
+ struct sk_buff *skb = NULL;
+ struct ieee80211_mgmt *mgmt;
+ struct hif_mib_template_frame *p;
+
+ if (wvif->vif->type == NL80211_IFTYPE_STATION ||
+ wvif->vif->type == NL80211_IFTYPE_MONITOR ||
+ wvif->vif->type == NL80211_IFTYPE_UNSPECIFIED)
+ goto done;
+
+ skb = ieee80211_beacon_get(wvif->wdev->hw, wvif->vif);
+
+ if (!skb)
+ return -ENOMEM;
+
+ p = (struct hif_mib_template_frame *) skb_push(skb, 4);
+ p->frame_type = HIF_TMPLT_BCN;
+ p->init_rate = API_RATE_INDEX_B_1MBPS; /* 1Mbps DSSS */
+ p->frame_length = cpu_to_le16(skb->len - 4);
+
+ ret = hif_set_template_frame(wvif, p);
+
+ skb_pull(skb, 4);
+
+ if (ret)
+ goto done;
+ /* TODO: Distill probe resp; remove TIM and any other beacon-specific
+ * IEs
+ */
+ mgmt = (void *)skb->data;
+ mgmt->frame_control =
+ cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_PROBE_RESP);
+
+ p->frame_type = HIF_TMPLT_PRBRES;
+
+ ret = hif_set_template_frame(wvif, p);
+ wfx_fwd_probe_req(wvif, false);
+
+done:
+ dev_kfree_skb(skb);
+ return ret;
+}
+
+static int wfx_is_ht(const struct wfx_ht_info *ht_info)
+{
+ return ht_info->channel_type != NL80211_CHAN_NO_HT;
+}
+
+static int wfx_ht_greenfield(const struct wfx_ht_info *ht_info)
+{
+ return wfx_is_ht(ht_info) &&
+ (ht_info->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD) &&
+ !(ht_info->operation_mode &
+ IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
+}
+
+static int wfx_ht_ampdu_density(const struct wfx_ht_info *ht_info)
+{
+ if (!wfx_is_ht(ht_info))
+ return 0;
+ return ht_info->ht_cap.ampdu_density;
+}
+
+static void wfx_join_finalize(struct wfx_vif *wvif,
+ struct ieee80211_bss_conf *info)
+{
+ struct ieee80211_sta *sta = NULL;
+ struct hif_mib_set_association_mode association_mode = { };
+
+ if (info->dtim_period)
+ wvif->dtim_period = info->dtim_period;
+ wvif->beacon_int = info->beacon_int;
+
+ rcu_read_lock();
+ if (info->bssid && !info->ibss_joined)
+ sta = ieee80211_find_sta(wvif->vif, info->bssid);
+ if (sta) {
+ wvif->ht_info.ht_cap = sta->ht_cap;
+ wvif->bss_params.operational_rate_set =
+ wfx_rate_mask_to_hw(wvif->wdev, sta->supp_rates[wvif->channel->band]);
+ wvif->ht_info.operation_mode = info->ht_operation_mode;
+ } else {
+ memset(&wvif->ht_info, 0, sizeof(wvif->ht_info));
+ wvif->bss_params.operational_rate_set = -1;
+ }
+ rcu_read_unlock();
+
+ /* Non Greenfield stations present */
+ if (wvif->ht_info.operation_mode &
+ IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT)
+ hif_dual_cts_protection(wvif, true);
+ else
+ hif_dual_cts_protection(wvif, false);
+
+ association_mode.preambtype_use = 1;
+ association_mode.mode = 1;
+ association_mode.rateset = 1;
+ association_mode.spacing = 1;
+ association_mode.preamble_type = info->use_short_preamble ? HIF_PREAMBLE_SHORT : HIF_PREAMBLE_LONG;
+ association_mode.basic_rate_set = cpu_to_le32(wfx_rate_mask_to_hw(wvif->wdev, info->basic_rates));
+ association_mode.mixed_or_greenfield_type = wfx_ht_greenfield(&wvif->ht_info);
+ association_mode.mpdu_start_spacing = wfx_ht_ampdu_density(&wvif->ht_info);
+
+ wfx_cqm_bssloss_sm(wvif, 0, 0, 0);
+ cancel_work_sync(&wvif->unjoin_work);
+
+ wvif->bss_params.beacon_lost_count = 20;
+ wvif->bss_params.aid = info->aid;
+
+ if (wvif->dtim_period < 1)
+ wvif->dtim_period = 1;
+
+ hif_set_association_mode(wvif, &association_mode);
+
+ if (!info->ibss_joined) {
+ hif_keep_alive_period(wvif, 30 /* sec */);
+ hif_set_bss_params(wvif, &wvif->bss_params);
+ wvif->setbssparams_done = true;
+ wfx_set_beacon_wakeup_period_work(&wvif->set_beacon_wakeup_period_work);
+ wfx_set_pm(wvif, &wvif->powersave_mode);
+ }
+}
+
+void wfx_bss_info_changed(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *info,
+ u32 changed)
+{
+ struct wfx_dev *wdev = hw->priv;
+ struct wfx_vif *wvif = (struct wfx_vif *) vif->drv_priv;
+ bool do_join = false;
+ int i;
+ int nb_arp_addr;
+
+ mutex_lock(&wdev->conf_mutex);
+
+ /* TODO: BSS_CHANGED_QOS */
+ if (changed & BSS_CHANGED_ARP_FILTER) {
+ struct hif_mib_arp_ip_addr_table filter = { };
+
+ nb_arp_addr = info->arp_addr_cnt;
+ if (nb_arp_addr <= 0 || nb_arp_addr > HIF_MAX_ARP_IP_ADDRTABLE_ENTRIES)
+ nb_arp_addr = 0;
+
+ for (i = 0; i < HIF_MAX_ARP_IP_ADDRTABLE_ENTRIES; i++) {
+ filter.condition_idx = i;
+ if (i < nb_arp_addr) {
+ // Caution: type of arp_addr_list[i] is __be32
+ memcpy(filter.ipv4_address,
+ &info->arp_addr_list[i],
+ sizeof(filter.ipv4_address));
+ filter.arp_enable = HIF_ARP_NS_FILTERING_ENABLE;
+ } else {
+ filter.arp_enable = HIF_ARP_NS_FILTERING_DISABLE;
+ }
+ hif_set_arp_ipv4_filter(wvif, &filter);
+ }
+ }
+
+ if (changed &
+ (BSS_CHANGED_BEACON | BSS_CHANGED_AP_PROBE_RESP |
+ BSS_CHANGED_BSSID | BSS_CHANGED_SSID | BSS_CHANGED_IBSS)) {
+ wvif->beacon_int = info->beacon_int;
+ wfx_update_beaconing(wvif);
+ wfx_upload_beacon(wvif);
+ }
+
+ if (changed & BSS_CHANGED_BEACON_ENABLED &&
+ wvif->state != WFX_STATE_IBSS) {
+ if (wvif->enable_beacon != info->enable_beacon) {
+ hif_beacon_transmit(wvif, info->enable_beacon);
+ wvif->enable_beacon = info->enable_beacon;
+ }
+ }
+
+ /* assoc/disassoc, or maybe AID changed */
+ if (changed & BSS_CHANGED_ASSOC) {
+ wfx_tx_lock_flush(wdev);
+ wvif->wep_default_key_id = -1;
+ wfx_tx_unlock(wdev);
+ }
+
+ if (changed & BSS_CHANGED_ASSOC && !info->assoc &&
+ (wvif->state == WFX_STATE_STA || wvif->state == WFX_STATE_IBSS)) {
+ /* Shedule unjoin work */
+ wfx_tx_lock(wdev);
+ if (!schedule_work(&wvif->unjoin_work))
+ wfx_tx_unlock(wdev);
+ } else {
+ if (changed & BSS_CHANGED_BEACON_INT) {
+ if (info->ibss_joined)
+ do_join = true;
+ else if (wvif->state == WFX_STATE_AP)
+ wfx_update_beaconing(wvif);
+ }
+
+ if (changed & BSS_CHANGED_BSSID)
+ do_join = true;
+
+ if (changed &
+ (BSS_CHANGED_ASSOC | BSS_CHANGED_BSSID |
+ BSS_CHANGED_IBSS | BSS_CHANGED_BASIC_RATES |
+ BSS_CHANGED_HT)) {
+ if (info->assoc) {
+ if (wvif->state < WFX_STATE_PRE_STA) {
+ ieee80211_connection_loss(vif);
+ mutex_unlock(&wdev->conf_mutex);
+ return;
+ } else if (wvif->state == WFX_STATE_PRE_STA) {
+ wvif->state = WFX_STATE_STA;
+ }
+ } else {
+ do_join = true;
+ }
+
+ if (info->assoc || info->ibss_joined)
+ wfx_join_finalize(wvif, info);
+ else
+ memset(&wvif->bss_params, 0,
+ sizeof(wvif->bss_params));
+ }
+ }
+
+ /* ERP Protection */
+ if (changed & (BSS_CHANGED_ASSOC |
+ BSS_CHANGED_ERP_CTS_PROT |
+ BSS_CHANGED_ERP_PREAMBLE)) {
+ u32 prev_erp_info = wvif->erp_info;
+
+ if (info->use_cts_prot)
+ wvif->erp_info |= WLAN_ERP_USE_PROTECTION;
+ else if (!(prev_erp_info & WLAN_ERP_NON_ERP_PRESENT))
+ wvif->erp_info &= ~WLAN_ERP_USE_PROTECTION;
+
+ if (info->use_short_preamble)
+ wvif->erp_info |= WLAN_ERP_BARKER_PREAMBLE;
+ else
+ wvif->erp_info &= ~WLAN_ERP_BARKER_PREAMBLE;
+
+ if (prev_erp_info != wvif->erp_info)
+ schedule_work(&wvif->set_cts_work);
+ }
+
+ if (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_ERP_SLOT))
+ hif_slot_time(wvif, info->use_short_slot ? 9 : 20);
+
+ if (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_CQM)) {
+ struct hif_mib_rcpi_rssi_threshold th = {
+ .rolling_average_count = 8,
+ .detection = 1,
+ };
+
+ wvif->cqm_rssi_thold = info->cqm_rssi_thold;
+
+ if (!info->cqm_rssi_thold && !info->cqm_rssi_hyst) {
+ th.upperthresh = 1;
+ th.lowerthresh = 1;
+ } else {
+ /* FIXME It's not a correct way of setting threshold.
+ * Upper and lower must be set equal here and adjusted
+ * in callback. However current implementation is much
+ * more reliable and stable.
+ */
+ /* RSSI: signed Q8.0, RCPI: unsigned Q7.1
+ * RSSI = RCPI / 2 - 110
+ */
+ th.upper_threshold = info->cqm_rssi_thold + info->cqm_rssi_hyst;
+ th.upper_threshold = (th.upper_threshold + 110) * 2;
+ th.lower_threshold = info->cqm_rssi_thold;
+ th.lower_threshold = (th.lower_threshold + 110) * 2;
+ }
+ hif_set_rcpi_rssi_threshold(wvif, &th);
+ }
+
+ if (changed & BSS_CHANGED_TXPOWER &&
+ info->txpower != wdev->output_power) {
+ wdev->output_power = info->txpower;
+ hif_set_output_power(wvif, wdev->output_power * 10);
+ }
+ mutex_unlock(&wdev->conf_mutex);
+
+ if (do_join) {
+ wfx_tx_lock_flush(wdev);
+ wfx_do_join(wvif); /* Will unlock it for us */
+ }
+}
+
+static void wfx_ps_notify(struct wfx_vif *wvif, enum sta_notify_cmd notify_cmd,
+ int link_id)
+{
+ u32 bit, prev;
+
+ spin_lock_bh(&wvif->ps_state_lock);
+ /* Zero link id means "for all link IDs" */
+ if (link_id) {
+ bit = BIT(link_id);
+ } else if (notify_cmd != STA_NOTIFY_AWAKE) {
+ dev_warn(wvif->wdev->dev, "unsupported notify command\n");
+ bit = 0;
+ } else {
+ bit = wvif->link_id_map;
+ }
+ prev = wvif->sta_asleep_mask & bit;
+
+ switch (notify_cmd) {
+ case STA_NOTIFY_SLEEP:
+ if (!prev) {
+ if (wvif->mcast_buffered && !wvif->sta_asleep_mask)
+ schedule_work(&wvif->mcast_start_work);
+ wvif->sta_asleep_mask |= bit;
+ }
+ break;
+ case STA_NOTIFY_AWAKE:
+ if (prev) {
+ wvif->sta_asleep_mask &= ~bit;
+ wvif->pspoll_mask &= ~bit;
+ if (link_id && !wvif->sta_asleep_mask)
+ schedule_work(&wvif->mcast_stop_work);
+ wfx_bh_request_tx(wvif->wdev);
+ }
+ break;
+ }
+ spin_unlock_bh(&wvif->ps_state_lock);
+}
+
+void wfx_sta_notify(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ enum sta_notify_cmd notify_cmd, struct ieee80211_sta *sta)
+{
+ struct wfx_vif *wvif = (struct wfx_vif *) vif->drv_priv;
+ struct wfx_sta_priv *sta_priv = (struct wfx_sta_priv *) &sta->drv_priv;
+
+ wfx_ps_notify(wvif, notify_cmd, sta_priv->link_id);
+}
+
+static int wfx_set_tim_impl(struct wfx_vif *wvif, bool aid0_bit_set)
+{
+ struct sk_buff *skb;
+ struct hif_ie_flags target_frame = {
+ .beacon = 1,
+ };
+ u16 tim_offset, tim_length;
+ u8 *tim_ptr;
+
+ skb = ieee80211_beacon_get_tim(wvif->wdev->hw, wvif->vif,
+ &tim_offset, &tim_length);
+ if (!skb) {
+ if (!__wfx_flush(wvif->wdev, true))
+ wfx_tx_unlock(wvif->wdev);
+ return -ENOENT;
+ }
+ tim_ptr = skb->data + tim_offset;
+
+ if (tim_offset && tim_length >= 6) {
+ /* Ignore DTIM count from mac80211:
+ * firmware handles DTIM internally.
+ */
+ tim_ptr[2] = 0;
+
+ /* Set/reset aid0 bit */
+ if (aid0_bit_set)
+ tim_ptr[4] |= 1;
+ else
+ tim_ptr[4] &= ~1;
+ }
+
+ hif_update_ie(wvif, &target_frame, tim_ptr, tim_length);
+ dev_kfree_skb(skb);
+
+ return 0;
+}
+
+static void wfx_set_tim_work(struct work_struct *work)
+{
+ struct wfx_vif *wvif = container_of(work, struct wfx_vif, set_tim_work);
+
+ wfx_set_tim_impl(wvif, wvif->aid0_bit_set);
+}
+
+int wfx_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set)
+{
+ struct wfx_dev *wdev = hw->priv;
+ struct wfx_sta_priv *sta_dev = (struct wfx_sta_priv *) &sta->drv_priv;
+ struct wfx_vif *wvif = wdev_to_wvif(wdev, sta_dev->vif_id);
+
+ schedule_work(&wvif->set_tim_work);
+ return 0;
+}
+
+static void wfx_mcast_start_work(struct work_struct *work)
+{
+ struct wfx_vif *wvif = container_of(work, struct wfx_vif,
+ mcast_start_work);
+ long tmo = wvif->dtim_period * TU_TO_JIFFIES(wvif->beacon_int + 20);
+
+ cancel_work_sync(&wvif->mcast_stop_work);
+ if (!wvif->aid0_bit_set) {
+ wfx_tx_lock_flush(wvif->wdev);
+ wfx_set_tim_impl(wvif, true);
+ wvif->aid0_bit_set = true;
+ mod_timer(&wvif->mcast_timeout, jiffies + tmo);
+ wfx_tx_unlock(wvif->wdev);
+ }
+}
+
+static void wfx_mcast_stop_work(struct work_struct *work)
+{
+ struct wfx_vif *wvif = container_of(work, struct wfx_vif,
+ mcast_stop_work);
+
+ if (wvif->aid0_bit_set) {
+ del_timer_sync(&wvif->mcast_timeout);
+ wfx_tx_lock_flush(wvif->wdev);
+ wvif->aid0_bit_set = false;
+ wfx_set_tim_impl(wvif, false);
+ wfx_tx_unlock(wvif->wdev);
+ }
+}
+
+static void wfx_mcast_timeout(struct timer_list *t)
+{
+ struct wfx_vif *wvif = from_timer(wvif, t, mcast_timeout);
+
+ dev_warn(wvif->wdev->dev, "multicast delivery timeout\n");
+ spin_lock_bh(&wvif->ps_state_lock);
+ wvif->mcast_tx = wvif->aid0_bit_set && wvif->mcast_buffered;
+ if (wvif->mcast_tx)
+ wfx_bh_request_tx(wvif->wdev);
+ spin_unlock_bh(&wvif->ps_state_lock);
+}
+
+int wfx_ampdu_action(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_ampdu_params *params)
+{
+ /* Aggregation is implemented fully in firmware,
+ * including block ack negotiation. Do not allow
+ * mac80211 stack to do anything: it interferes with
+ * the firmware.
+ */
+
+ /* Note that we still need this function stubbed. */
+
+ return -ENOTSUPP;
+}
+
+void wfx_suspend_resume(struct wfx_vif *wvif,
+ struct hif_ind_suspend_resume_tx *arg)
+{
+ if (arg->suspend_resume_flags.bc_mc_only) {
+ bool cancel_tmo = false;
+
+ spin_lock_bh(&wvif->ps_state_lock);
+ if (!arg->suspend_resume_flags.resume)
+ wvif->mcast_tx = false;
+ else
+ wvif->mcast_tx = wvif->aid0_bit_set &&
+ wvif->mcast_buffered;
+ if (wvif->mcast_tx) {
+ cancel_tmo = true;
+ wfx_bh_request_tx(wvif->wdev);
+ }
+ spin_unlock_bh(&wvif->ps_state_lock);
+ if (cancel_tmo)
+ del_timer_sync(&wvif->mcast_timeout);
+ } else if (arg->suspend_resume_flags.resume) {
+ // FIXME: should change each station status independently
+ wfx_ps_notify(wvif, STA_NOTIFY_AWAKE, 0);
+ wfx_bh_request_tx(wvif->wdev);
+ } else {
+ // FIXME: should change each station status independently
+ wfx_ps_notify(wvif, STA_NOTIFY_SLEEP, 0);
+ }
+}
+
+int wfx_add_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *conf)
+{
+ return 0;
+}
+
+void wfx_remove_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *conf)
+{
+}
+
+void wfx_change_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *conf,
+ u32 changed)
+{
+}
+
+int wfx_assign_vif_chanctx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_chanctx_conf *conf)
+{
+ struct wfx_vif *wvif = (struct wfx_vif *) vif->drv_priv;
+ struct ieee80211_channel *ch = conf->def.chan;
+
+ WARN(wvif->channel, "channel overwrite");
+ wvif->channel = ch;
+ wvif->ht_info.channel_type = cfg80211_get_chandef_type(&conf->def);
+
+ return 0;
+}
+
+void wfx_unassign_vif_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_chanctx_conf *conf)
+{
+ struct wfx_vif *wvif = (struct wfx_vif *) vif->drv_priv;
+ struct ieee80211_channel *ch = conf->def.chan;
+
+ WARN(wvif->channel != ch, "channel mismatch");
+ wvif->channel = NULL;
+}
+
+int wfx_config(struct ieee80211_hw *hw, u32 changed)
+{
+ int ret = 0;
+ struct wfx_dev *wdev = hw->priv;
+ struct ieee80211_conf *conf = &hw->conf;
+ struct wfx_vif *wvif;
+
+ // FIXME: Interface id should not been hardcoded
+ wvif = wdev_to_wvif(wdev, 0);
+ if (!wvif) {
+ WARN(1, "interface 0 does not exist anymore");
+ return 0;
+ }
+
+ down(&wvif->scan.lock);
+ mutex_lock(&wdev->conf_mutex);
+ if (changed & IEEE80211_CONF_CHANGE_POWER) {
+ wdev->output_power = conf->power_level;
+ hif_set_output_power(wvif, wdev->output_power * 10);
+ }
+
+ if (changed & IEEE80211_CONF_CHANGE_PS) {
+ wvif = NULL;
+ while ((wvif = wvif_iterate(wdev, wvif)) != NULL) {
+ memset(&wvif->powersave_mode, 0,
+ sizeof(wvif->powersave_mode));
+ if (conf->flags & IEEE80211_CONF_PS) {
+ wvif->powersave_mode.pm_mode.enter_psm = 1;
+ if (conf->dynamic_ps_timeout > 0) {
+ wvif->powersave_mode.pm_mode.fast_psm = 1;
+ /*
+ * Firmware does not support more than
+ * 128ms
+ */
+ wvif->powersave_mode.fast_psm_idle_period =
+ min(conf->dynamic_ps_timeout *
+ 2, 255);
+ }
+ }
+ if (wvif->state == WFX_STATE_STA && wvif->bss_params.aid)
+ wfx_set_pm(wvif, &wvif->powersave_mode);
+ }
+ wvif = wdev_to_wvif(wdev, 0);
+ }
+
+ mutex_unlock(&wdev->conf_mutex);
+ up(&wvif->scan.lock);
+ return ret;
+}
+
+int wfx_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ int i;
+ struct wfx_dev *wdev = hw->priv;
+ struct wfx_vif *wvif = (struct wfx_vif *) vif->drv_priv;
+ // FIXME: parameters are set by kernel juste after interface_add.
+ // Keep struct hif_req_edca_queue_params blank?
+ struct hif_req_edca_queue_params default_edca_params[] = {
+ [IEEE80211_AC_VO] = {
+ .queue_id = HIF_QUEUE_ID_VOICE,
+ .aifsn = 2,
+ .cw_min = 3,
+ .cw_max = 7,
+ .tx_op_limit = TXOP_UNIT * 47,
+ },
+ [IEEE80211_AC_VI] = {
+ .queue_id = HIF_QUEUE_ID_VIDEO,
+ .aifsn = 2,
+ .cw_min = 7,
+ .cw_max = 15,
+ .tx_op_limit = TXOP_UNIT * 94,
+ },
+ [IEEE80211_AC_BE] = {
+ .queue_id = HIF_QUEUE_ID_BESTEFFORT,
+ .aifsn = 3,
+ .cw_min = 15,
+ .cw_max = 1023,
+ .tx_op_limit = TXOP_UNIT * 0,
+ },
+ [IEEE80211_AC_BK] = {
+ .queue_id = HIF_QUEUE_ID_BACKGROUND,
+ .aifsn = 7,
+ .cw_min = 15,
+ .cw_max = 1023,
+ .tx_op_limit = TXOP_UNIT * 0,
+ },
+ };
+
+ BUILD_BUG_ON(ARRAY_SIZE(default_edca_params) != ARRAY_SIZE(wvif->edca.params));
+ if (wfx_api_older_than(wdev, 2, 0)) {
+ default_edca_params[IEEE80211_AC_BE].queue_id = HIF_QUEUE_ID_BACKGROUND;
+ default_edca_params[IEEE80211_AC_BK].queue_id = HIF_QUEUE_ID_BESTEFFORT;
+ }
+
+ vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
+ IEEE80211_VIF_SUPPORTS_UAPSD |
+ IEEE80211_VIF_SUPPORTS_CQM_RSSI;
+
+ mutex_lock(&wdev->conf_mutex);
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_ADHOC:
+ case NL80211_IFTYPE_AP:
+ break;
+ default:
+ mutex_unlock(&wdev->conf_mutex);
+ return -EOPNOTSUPP;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(wdev->vif); i++) {
+ if (!wdev->vif[i]) {
+ wdev->vif[i] = vif;
+ wvif->id = i;
+ break;
+ }
+ }
+ if (i == ARRAY_SIZE(wdev->vif)) {
+ mutex_unlock(&wdev->conf_mutex);
+ return -EOPNOTSUPP;
+ }
+ // FIXME: prefer use of container_of() to get vif
+ wvif->vif = vif;
+ wvif->wdev = wdev;
+
+ INIT_WORK(&wvif->link_id_work, wfx_link_id_work);
+ INIT_DELAYED_WORK(&wvif->link_id_gc_work, wfx_link_id_gc_work);
+
+ spin_lock_init(&wvif->ps_state_lock);
+ INIT_WORK(&wvif->set_tim_work, wfx_set_tim_work);
+
+ INIT_WORK(&wvif->mcast_start_work, wfx_mcast_start_work);
+ INIT_WORK(&wvif->mcast_stop_work, wfx_mcast_stop_work);
+ timer_setup(&wvif->mcast_timeout, wfx_mcast_timeout, 0);
+
+ wvif->setbssparams_done = false;
+ mutex_init(&wvif->bss_loss_lock);
+ INIT_DELAYED_WORK(&wvif->bss_loss_work, wfx_bss_loss_work);
+
+ wvif->wep_default_key_id = -1;
+ INIT_WORK(&wvif->wep_key_work, wfx_wep_key_work);
+
+ sema_init(&wvif->scan.lock, 1);
+ INIT_WORK(&wvif->scan.work, wfx_scan_work);
+ INIT_DELAYED_WORK(&wvif->scan.timeout, wfx_scan_timeout);
+
+ spin_lock_init(&wvif->event_queue_lock);
+ INIT_LIST_HEAD(&wvif->event_queue);
+ INIT_WORK(&wvif->event_handler_work, wfx_event_handler_work);
+
+ init_completion(&wvif->set_pm_mode_complete);
+ complete(&wvif->set_pm_mode_complete);
+ INIT_WORK(&wvif->set_beacon_wakeup_period_work,
+ wfx_set_beacon_wakeup_period_work);
+ INIT_WORK(&wvif->update_filtering_work, wfx_update_filtering_work);
+ INIT_WORK(&wvif->bss_params_work, wfx_bss_params_work);
+ INIT_WORK(&wvif->set_cts_work, wfx_set_cts_work);
+ INIT_WORK(&wvif->unjoin_work, wfx_unjoin_work);
+
+ mutex_unlock(&wdev->conf_mutex);
+
+ hif_set_macaddr(wvif, vif->addr);
+ for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+ memcpy(&wvif->edca.params[i], &default_edca_params[i],
+ sizeof(default_edca_params[i]));
+ wvif->edca.uapsd_enable[i] = false;
+ hif_set_edca_queue_params(wvif, &wvif->edca.params[i]);
+ }
+ wfx_set_uapsd_param(wvif, &wvif->edca);
+
+ wfx_tx_policy_init(wvif);
+ wvif = NULL;
+ while ((wvif = wvif_iterate(wdev, wvif)) != NULL) {
+ // Combo mode does not support Block Acks. We can re-enable them
+ if (wvif_count(wdev) == 1)
+ hif_set_block_ack_policy(wvif, 0xFF, 0xFF);
+ else
+ hif_set_block_ack_policy(wvif, 0x00, 0x00);
+ // Combo force powersave mode. We can re-enable it now
+ wfx_set_pm(wvif, &wvif->powersave_mode);
+ }
+ return 0;
+}
+
+void wfx_remove_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct wfx_dev *wdev = hw->priv;
+ struct wfx_vif *wvif = (struct wfx_vif *) vif->drv_priv;
+ int i;
+
+ // If scan is in progress, stop it
+ while (down_trylock(&wvif->scan.lock))
+ schedule();
+ up(&wvif->scan.lock);
+ wait_for_completion_timeout(&wvif->set_pm_mode_complete, msecs_to_jiffies(300));
+
+ mutex_lock(&wdev->conf_mutex);
+ switch (wvif->state) {
+ case WFX_STATE_PRE_STA:
+ case WFX_STATE_STA:
+ case WFX_STATE_IBSS:
+ wfx_tx_lock_flush(wdev);
+ if (!schedule_work(&wvif->unjoin_work))
+ wfx_tx_unlock(wdev);
+ break;
+ case WFX_STATE_AP:
+ for (i = 0; wvif->link_id_map; ++i) {
+ if (wvif->link_id_map & BIT(i)) {
+ wfx_unmap_link(wvif, i);
+ wvif->link_id_map &= ~BIT(i);
+ }
+ }
+ memset(wvif->link_id_db, 0, sizeof(wvif->link_id_db));
+ wvif->sta_asleep_mask = 0;
+ wvif->enable_beacon = false;
+ wvif->mcast_tx = false;
+ wvif->aid0_bit_set = false;
+ wvif->mcast_buffered = false;
+ wvif->pspoll_mask = 0;
+ /* reset.link_id = 0; */
+ hif_reset(wvif, false);
+ break;
+ default:
+ break;
+ }
+
+ wvif->state = WFX_STATE_PASSIVE;
+ wfx_tx_queues_wait_empty_vif(wvif);
+ wfx_tx_unlock(wdev);
+
+ /* FIXME: In add to reset MAC address, try to reset interface */
+ hif_set_macaddr(wvif, NULL);
+
+ cancel_delayed_work_sync(&wvif->scan.timeout);
+
+ wfx_cqm_bssloss_sm(wvif, 0, 0, 0);
+ cancel_work_sync(&wvif->unjoin_work);
+ cancel_delayed_work_sync(&wvif->link_id_gc_work);
+ del_timer_sync(&wvif->mcast_timeout);
+ wfx_free_event_queue(wvif);
+
+ wdev->vif[wvif->id] = NULL;
+ wvif->vif = NULL;
+
+ mutex_unlock(&wdev->conf_mutex);
+ wvif = NULL;
+ while ((wvif = wvif_iterate(wdev, wvif)) != NULL) {
+ // Combo mode does not support Block Acks. We can re-enable them
+ if (wvif_count(wdev) == 1)
+ hif_set_block_ack_policy(wvif, 0xFF, 0xFF);
+ else
+ hif_set_block_ack_policy(wvif, 0x00, 0x00);
+ // Combo force powersave mode. We can re-enable it now
+ wfx_set_pm(wvif, &wvif->powersave_mode);
+ }
+}
+
+int wfx_start(struct ieee80211_hw *hw)
+{
+ return 0;
+}
+
+void wfx_stop(struct ieee80211_hw *hw)
+{
+ struct wfx_dev *wdev = hw->priv;
+
+ wfx_tx_lock_flush(wdev);
+ mutex_lock(&wdev->conf_mutex);
+ wfx_tx_queues_clear(wdev);
+ mutex_unlock(&wdev->conf_mutex);
+ wfx_tx_unlock(wdev);
+ WARN(atomic_read(&wdev->tx_lock), "tx_lock is locked");
+}
diff --git a/drivers/staging/wfx/sta.h b/drivers/staging/wfx/sta.h
new file mode 100644
index 000000000000..4ccf1b17632b
--- /dev/null
+++ b/drivers/staging/wfx/sta.h
@@ -0,0 +1,103 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Implementation of mac80211 API.
+ *
+ * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ */
+#ifndef WFX_STA_H
+#define WFX_STA_H
+
+#include <net/mac80211.h>
+
+#include "hif_api_cmd.h"
+
+struct wfx_dev;
+struct wfx_vif;
+
+enum wfx_state {
+ WFX_STATE_PASSIVE = 0,
+ WFX_STATE_PRE_STA,
+ WFX_STATE_STA,
+ WFX_STATE_IBSS,
+ WFX_STATE_AP,
+};
+
+struct wfx_ht_info {
+ struct ieee80211_sta_ht_cap ht_cap;
+ enum nl80211_channel_type channel_type;
+ u16 operation_mode;
+};
+
+struct wfx_hif_event {
+ struct list_head link;
+ struct hif_ind_event evt;
+};
+
+struct wfx_edca_params {
+ /* NOTE: index is a linux queue id. */
+ struct hif_req_edca_queue_params params[IEEE80211_NUM_ACS];
+ bool uapsd_enable[IEEE80211_NUM_ACS];
+};
+
+struct wfx_grp_addr_table {
+ bool enable;
+ int num_addresses;
+ u8 address_list[8][ETH_ALEN];
+};
+
+struct wfx_sta_priv {
+ int link_id;
+ int vif_id;
+};
+
+// mac80211 interface
+int wfx_start(struct ieee80211_hw *hw);
+void wfx_stop(struct ieee80211_hw *hw);
+int wfx_config(struct ieee80211_hw *hw, u32 changed);
+int wfx_set_rts_threshold(struct ieee80211_hw *hw, u32 value);
+u64 wfx_prepare_multicast(struct ieee80211_hw *hw,
+ struct netdev_hw_addr_list *mc_list);
+void wfx_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
+ unsigned int *total_flags, u64 unused);
+
+int wfx_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
+void wfx_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
+void wfx_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u32 queues, bool drop);
+int wfx_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u16 queue, const struct ieee80211_tx_queue_params *params);
+void wfx_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *info, u32 changed);
+int wfx_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+int wfx_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+void wfx_sta_notify(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ enum sta_notify_cmd cmd, struct ieee80211_sta *sta);
+int wfx_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set);
+int wfx_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_ampdu_params *params);
+int wfx_add_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *conf);
+void wfx_remove_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *conf);
+void wfx_change_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *conf, u32 changed);
+int wfx_assign_vif_chanctx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_chanctx_conf *conf);
+void wfx_unassign_vif_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_chanctx_conf *conf);
+
+// WSM Callbacks
+void wfx_suspend_resume(struct wfx_vif *wvif,
+ struct hif_ind_suspend_resume_tx *arg);
+
+// Other Helpers
+void wfx_cqm_bssloss_sm(struct wfx_vif *wvif, int init, int good, int bad);
+void wfx_update_filtering(struct wfx_vif *wvif);
+int wfx_set_pm(struct wfx_vif *wvif, const struct hif_req_set_pm_mode *arg);
+int wfx_fwd_probe_req(struct wfx_vif *wvif, bool enable);
+
+#endif /* WFX_STA_H */
diff --git a/drivers/staging/wfx/traces.h b/drivers/staging/wfx/traces.h
new file mode 100644
index 000000000000..3f6198ab2235
--- /dev/null
+++ b/drivers/staging/wfx/traces.h
@@ -0,0 +1,443 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Tracepoints definitions.
+ *
+ * Copyright (c) 2018-2019, Silicon Laboratories, Inc.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM wfx
+
+#if !defined(_WFX_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _WFX_TRACE_H
+
+#include <linux/tracepoint.h>
+#include <net/mac80211.h>
+
+#include "bus.h"
+#include "hif_api_cmd.h"
+#include "hif_api_mib.h"
+
+/* The hell below need some explanations. For each symbolic number, we need to
+ * define it with TRACE_DEFINE_ENUM() and in a list for __print_symbolic.
+ *
+ * 1. Define a new macro that call TRACE_DEFINE_ENUM():
+ *
+ * #define xxx_name(sym) TRACE_DEFINE_ENUM(sym);
+ *
+ * 2. Define list of all symbols:
+ *
+ * #define list_names \
+ * ... \
+ * xxx_name(XXX) \
+ * ...
+ *
+ * 3. Instanciate that list_names:
+ *
+ * list_names
+ *
+ * 4. Redefine xxx_name() as a entry of array for __print_symbolic()
+ *
+ * #undef xxx_name
+ * #define xxx_name(msg) { msg, #msg },
+ *
+ * 5. list_name can now nearlu be used with __print_symbolic() but,
+ * __print_symbolic() dislike last comma of list. So we define a new list
+ * with a dummy element:
+ *
+ * #define list_for_print_symbolic list_names { -1, NULL }
+ */
+
+#define _hif_msg_list \
+ hif_cnf_name(ADD_KEY) \
+ hif_cnf_name(BEACON_TRANSMIT) \
+ hif_cnf_name(EDCA_QUEUE_PARAMS) \
+ hif_cnf_name(JOIN) \
+ hif_cnf_name(MAP_LINK) \
+ hif_cnf_name(READ_MIB) \
+ hif_cnf_name(REMOVE_KEY) \
+ hif_cnf_name(RESET) \
+ hif_cnf_name(SET_BSS_PARAMS) \
+ hif_cnf_name(SET_PM_MODE) \
+ hif_cnf_name(START) \
+ hif_cnf_name(START_SCAN) \
+ hif_cnf_name(STOP_SCAN) \
+ hif_cnf_name(TX) \
+ hif_cnf_name(MULTI_TRANSMIT) \
+ hif_cnf_name(UPDATE_IE) \
+ hif_cnf_name(WRITE_MIB) \
+ hif_cnf_name(CONFIGURATION) \
+ hif_cnf_name(CONTROL_GPIO) \
+ hif_cnf_name(PREVENT_ROLLBACK) \
+ hif_cnf_name(SET_SL_MAC_KEY) \
+ hif_cnf_name(SL_CONFIGURE) \
+ hif_cnf_name(SL_EXCHANGE_PUB_KEYS) \
+ hif_cnf_name(SHUT_DOWN) \
+ hif_ind_name(EVENT) \
+ hif_ind_name(JOIN_COMPLETE) \
+ hif_ind_name(RX) \
+ hif_ind_name(SCAN_CMPL) \
+ hif_ind_name(SET_PM_MODE_CMPL) \
+ hif_ind_name(SUSPEND_RESUME_TX) \
+ hif_ind_name(SL_EXCHANGE_PUB_KEYS) \
+ hif_ind_name(ERROR) \
+ hif_ind_name(EXCEPTION) \
+ hif_ind_name(GENERIC) \
+ hif_ind_name(WAKEUP) \
+ hif_ind_name(STARTUP)
+
+#define hif_msg_list_enum _hif_msg_list
+
+#undef hif_cnf_name
+#undef hif_ind_name
+#define hif_cnf_name(msg) TRACE_DEFINE_ENUM(HIF_CNF_ID_##msg);
+#define hif_ind_name(msg) TRACE_DEFINE_ENUM(HIF_IND_ID_##msg);
+hif_msg_list_enum
+#undef hif_cnf_name
+#undef hif_ind_name
+#define hif_cnf_name(msg) { HIF_CNF_ID_##msg, #msg },
+#define hif_ind_name(msg) { HIF_IND_ID_##msg, #msg },
+#define hif_msg_list hif_msg_list_enum { -1, NULL }
+
+#define _hif_mib_list \
+ hif_mib_name(ARP_IP_ADDRESSES_TABLE) \
+ hif_mib_name(ARP_KEEP_ALIVE_PERIOD) \
+ hif_mib_name(BEACON_FILTER_ENABLE) \
+ hif_mib_name(BEACON_FILTER_TABLE) \
+ hif_mib_name(BEACON_WAKEUP_PERIOD) \
+ hif_mib_name(BLOCK_ACK_POLICY) \
+ hif_mib_name(CONFIG_DATA_FILTER) \
+ hif_mib_name(COUNTERS_TABLE) \
+ hif_mib_name(CURRENT_TX_POWER_LEVEL) \
+ hif_mib_name(DOT11_MAC_ADDRESS) \
+ hif_mib_name(DOT11_MAX_RECEIVE_LIFETIME) \
+ hif_mib_name(DOT11_MAX_TRANSMIT_MSDU_LIFETIME) \
+ hif_mib_name(DOT11_RTS_THRESHOLD) \
+ hif_mib_name(DOT11_WEP_DEFAULT_KEY_ID) \
+ hif_mib_name(GL_BLOCK_ACK_INFO) \
+ hif_mib_name(GL_OPERATIONAL_POWER_MODE) \
+ hif_mib_name(GL_SET_MULTI_MSG) \
+ hif_mib_name(INACTIVITY_TIMER) \
+ hif_mib_name(INTERFACE_PROTECTION) \
+ hif_mib_name(IPV4_ADDR_DATAFRAME_CONDITION) \
+ hif_mib_name(IPV6_ADDR_DATAFRAME_CONDITION) \
+ hif_mib_name(KEEP_ALIVE_PERIOD) \
+ hif_mib_name(MAC_ADDR_DATAFRAME_CONDITION) \
+ hif_mib_name(NON_ERP_PROTECTION) \
+ hif_mib_name(NS_IP_ADDRESSES_TABLE) \
+ hif_mib_name(OVERRIDE_INTERNAL_TX_RATE) \
+ hif_mib_name(PROTECTED_MGMT_POLICY) \
+ hif_mib_name(RX_FILTER) \
+ hif_mib_name(RCPI_RSSI_THRESHOLD) \
+ hif_mib_name(SET_ASSOCIATION_MODE) \
+ hif_mib_name(SET_DATA_FILTERING) \
+ hif_mib_name(ETHERTYPE_DATAFRAME_CONDITION) \
+ hif_mib_name(SET_HT_PROTECTION) \
+ hif_mib_name(MAGIC_DATAFRAME_CONDITION) \
+ hif_mib_name(SET_TX_RATE_RETRY_POLICY) \
+ hif_mib_name(SET_UAPSD_INFORMATION) \
+ hif_mib_name(PORT_DATAFRAME_CONDITION) \
+ hif_mib_name(SLOT_TIME) \
+ hif_mib_name(STATISTICS_TABLE) \
+ hif_mib_name(TEMPLATE_FRAME) \
+ hif_mib_name(TSF_COUNTER) \
+ hif_mib_name(UC_MC_BC_DATAFRAME_CONDITION)
+
+#define hif_mib_list_enum _hif_mib_list
+
+#undef hif_mib_name
+#define hif_mib_name(mib) TRACE_DEFINE_ENUM(HIF_MIB_ID_##mib);
+hif_mib_list_enum
+#undef hif_mib_name
+#define hif_mib_name(mib) { HIF_MIB_ID_##mib, #mib },
+#define hif_mib_list hif_mib_list_enum { -1, NULL }
+
+DECLARE_EVENT_CLASS(hif_data,
+ TP_PROTO(struct hif_msg *hif, int tx_fill_level, bool is_recv),
+ TP_ARGS(hif, tx_fill_level, is_recv),
+ TP_STRUCT__entry(
+ __field(int, tx_fill_level)
+ __field(int, msg_id)
+ __field(const char *, msg_type)
+ __field(int, msg_len)
+ __field(int, buf_len)
+ __field(int, if_id)
+ __field(int, mib)
+ __array(u8, buf, 128)
+ ),
+ TP_fast_assign(
+ int header_len;
+
+ __entry->tx_fill_level = tx_fill_level;
+ __entry->msg_len = hif->len;
+ __entry->msg_id = hif->id;
+ __entry->if_id = hif->interface;
+ if (is_recv)
+ __entry->msg_type = __entry->msg_id & 0x80 ? "IND" : "CNF";
+ else
+ __entry->msg_type = "REQ";
+ if (!is_recv &&
+ (__entry->msg_id == HIF_REQ_ID_READ_MIB ||
+ __entry->msg_id == HIF_REQ_ID_WRITE_MIB)) {
+ __entry->mib = le16_to_cpup((u16 *) hif->body);
+ header_len = 4;
+ } else {
+ __entry->mib = -1;
+ header_len = 0;
+ }
+ __entry->buf_len = min_t(int, __entry->msg_len,
+ sizeof(__entry->buf))
+ - sizeof(struct hif_msg) - header_len;
+ memcpy(__entry->buf, hif->body + header_len, __entry->buf_len);
+ ),
+ TP_printk("%d:%d:%s_%s%s%s: %s%s (%d bytes)",
+ __entry->tx_fill_level,
+ __entry->if_id,
+ __print_symbolic(__entry->msg_id, hif_msg_list),
+ __entry->msg_type,
+ __entry->mib != -1 ? "/" : "",
+ __entry->mib != -1 ? __print_symbolic(__entry->mib, hif_mib_list) : "",
+ __print_hex(__entry->buf, __entry->buf_len),
+ __entry->msg_len > sizeof(__entry->buf) ? " ..." : "",
+ __entry->msg_len
+ )
+);
+DEFINE_EVENT(hif_data, hif_send,
+ TP_PROTO(struct hif_msg *hif, int tx_fill_level, bool is_recv),
+ TP_ARGS(hif, tx_fill_level, is_recv));
+#define _trace_hif_send(hif, tx_fill_level)\
+ trace_hif_send(hif, tx_fill_level, false)
+DEFINE_EVENT(hif_data, hif_recv,
+ TP_PROTO(struct hif_msg *hif, int tx_fill_level, bool is_recv),
+ TP_ARGS(hif, tx_fill_level, is_recv));
+#define _trace_hif_recv(hif, tx_fill_level)\
+ trace_hif_recv(hif, tx_fill_level, true)
+
+#define wfx_reg_list_enum \
+ wfx_reg_name(WFX_REG_CONFIG, "CONFIG") \
+ wfx_reg_name(WFX_REG_CONTROL, "CONTROL") \
+ wfx_reg_name(WFX_REG_IN_OUT_QUEUE, "QUEUE") \
+ wfx_reg_name(WFX_REG_AHB_DPORT, "AHB") \
+ wfx_reg_name(WFX_REG_BASE_ADDR, "BASE_ADDR") \
+ wfx_reg_name(WFX_REG_SRAM_DPORT, "SRAM") \
+ wfx_reg_name(WFX_REG_SET_GEN_R_W, "SET_GEN_R_W") \
+ wfx_reg_name(WFX_REG_FRAME_OUT, "FRAME_OUT")
+
+#undef wfx_reg_name
+#define wfx_reg_name(sym, name) TRACE_DEFINE_ENUM(sym);
+wfx_reg_list_enum
+#undef wfx_reg_name
+#define wfx_reg_name(sym, name) { sym, name },
+#define wfx_reg_list wfx_reg_list_enum { -1, NULL }
+
+DECLARE_EVENT_CLASS(io_data,
+ TP_PROTO(int reg, int addr, const void *io_buf, size_t len),
+ TP_ARGS(reg, addr, io_buf, len),
+ TP_STRUCT__entry(
+ __field(int, reg)
+ __field(int, addr)
+ __field(int, msg_len)
+ __field(int, buf_len)
+ __array(u8, buf, 32)
+ __array(u8, addr_str, 10)
+ ),
+ TP_fast_assign(
+ __entry->reg = reg;
+ __entry->addr = addr;
+ __entry->msg_len = len;
+ __entry->buf_len = min_t(int, sizeof(__entry->buf),
+ __entry->msg_len);
+ memcpy(__entry->buf, io_buf, __entry->buf_len);
+ if (addr >= 0)
+ snprintf(__entry->addr_str, 10, "/%08x", addr);
+ else
+ __entry->addr_str[0] = 0;
+ ),
+ TP_printk("%s%s: %s%s (%d bytes)",
+ __print_symbolic(__entry->reg, wfx_reg_list),
+ __entry->addr_str,
+ __print_hex(__entry->buf, __entry->buf_len),
+ __entry->msg_len > sizeof(__entry->buf) ? " ..." : "",
+ __entry->msg_len
+ )
+);
+DEFINE_EVENT(io_data, io_write,
+ TP_PROTO(int reg, int addr, const void *io_buf, size_t len),
+ TP_ARGS(reg, addr, io_buf, len));
+#define _trace_io_ind_write(reg, addr, io_buf, len)\
+ trace_io_write(reg, addr, io_buf, len)
+#define _trace_io_write(reg, io_buf, len) trace_io_write(reg, -1, io_buf, len)
+DEFINE_EVENT(io_data, io_read,
+ TP_PROTO(int reg, int addr, const void *io_buf, size_t len),
+ TP_ARGS(reg, addr, io_buf, len));
+#define _trace_io_ind_read(reg, addr, io_buf, len)\
+ trace_io_read(reg, addr, io_buf, len)
+#define _trace_io_read(reg, io_buf, len) trace_io_read(reg, -1, io_buf, len)
+
+DECLARE_EVENT_CLASS(io_data32,
+ TP_PROTO(int reg, int addr, u32 val),
+ TP_ARGS(reg, addr, val),
+ TP_STRUCT__entry(
+ __field(int, reg)
+ __field(int, addr)
+ __field(int, val)
+ __array(u8, addr_str, 10)
+ ),
+ TP_fast_assign(
+ __entry->reg = reg;
+ __entry->addr = addr;
+ __entry->val = val;
+ if (addr >= 0)
+ snprintf(__entry->addr_str, 10, "/%08x", addr);
+ else
+ __entry->addr_str[0] = 0;
+ ),
+ TP_printk("%s%s: %08x",
+ __print_symbolic(__entry->reg, wfx_reg_list),
+ __entry->addr_str,
+ __entry->val
+ )
+);
+DEFINE_EVENT(io_data32, io_write32,
+ TP_PROTO(int reg, int addr, u32 val),
+ TP_ARGS(reg, addr, val));
+#define _trace_io_ind_write32(reg, addr, val) trace_io_write32(reg, addr, val)
+#define _trace_io_write32(reg, val) trace_io_write32(reg, -1, val)
+DEFINE_EVENT(io_data32, io_read32,
+ TP_PROTO(int reg, int addr, u32 val),
+ TP_ARGS(reg, addr, val));
+#define _trace_io_ind_read32(reg, addr, val) trace_io_read32(reg, addr, val)
+#define _trace_io_read32(reg, val) trace_io_read32(reg, -1, val)
+
+DECLARE_EVENT_CLASS(piggyback,
+ TP_PROTO(u32 val, bool ignored),
+ TP_ARGS(val, ignored),
+ TP_STRUCT__entry(
+ __field(int, val)
+ __field(bool, ignored)
+ ),
+ TP_fast_assign(
+ __entry->val = val;
+ __entry->ignored = ignored;
+ ),
+ TP_printk("CONTROL: %08x%s",
+ __entry->val,
+ __entry->ignored ? " (ignored)" : ""
+ )
+);
+DEFINE_EVENT(piggyback, piggyback,
+ TP_PROTO(u32 val, bool ignored),
+ TP_ARGS(val, ignored));
+#define _trace_piggyback(val, ignored) trace_piggyback(val, ignored)
+
+TRACE_EVENT(bh_stats,
+ TP_PROTO(int ind, int req, int cnf, int busy, bool release),
+ TP_ARGS(ind, req, cnf, busy, release),
+ TP_STRUCT__entry(
+ __field(int, ind)
+ __field(int, req)
+ __field(int, cnf)
+ __field(int, busy)
+ __field(bool, release)
+ ),
+ TP_fast_assign(
+ __entry->ind = ind;
+ __entry->req = req;
+ __entry->cnf = cnf;
+ __entry->busy = busy;
+ __entry->release = release;
+ ),
+ TP_printk("IND/REQ/CNF:%3d/%3d/%3d, REQ in progress:%3d, WUP: %s",
+ __entry->ind,
+ __entry->req,
+ __entry->cnf,
+ __entry->busy,
+ __entry->release ? "release" : "keep"
+ )
+);
+#define _trace_bh_stats(ind, req, cnf, busy, release)\
+ trace_bh_stats(ind, req, cnf, busy, release)
+
+TRACE_EVENT(tx_stats,
+ TP_PROTO(struct hif_cnf_tx *tx_cnf, struct sk_buff *skb, int delay),
+ TP_ARGS(tx_cnf, skb, delay),
+ TP_STRUCT__entry(
+ __field(int, pkt_id)
+ __field(int, delay_media)
+ __field(int, delay_queue)
+ __field(int, delay_fw)
+ __field(int, ack_failures)
+ __field(int, flags)
+ __array(int, rate, 4)
+ __array(int, tx_count, 4)
+ ),
+ TP_fast_assign(
+ // Keep sync with wfx_rates definition in main.c
+ static const int hw_rate[] = { 0, 1, 2, 3, 6, 7, 8, 9,
+ 10, 11, 12, 13 };
+ struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_tx_rate *rates = tx_info->driver_rates;
+ int i;
+
+ __entry->pkt_id = tx_cnf->packet_id;
+ __entry->delay_media = tx_cnf->media_delay;
+ __entry->delay_queue = tx_cnf->tx_queue_delay;
+ __entry->delay_fw = delay;
+ __entry->ack_failures = tx_cnf->ack_failures;
+ if (!tx_cnf->status || __entry->ack_failures)
+ __entry->ack_failures += 1;
+
+ for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+ if (rates[0].flags & IEEE80211_TX_RC_MCS)
+ __entry->rate[i] = rates[i].idx;
+ else
+ __entry->rate[i] = hw_rate[rates[i].idx];
+ __entry->tx_count[i] = rates[i].count;
+ }
+ __entry->flags = 0;
+ if (rates[0].flags & IEEE80211_TX_RC_MCS)
+ __entry->flags |= 0x01;
+ if (rates[0].flags & IEEE80211_TX_RC_SHORT_GI)
+ __entry->flags |= 0x02;
+ if (rates[0].flags & IEEE80211_TX_RC_GREEN_FIELD)
+ __entry->flags |= 0x04;
+ if (rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS)
+ __entry->flags |= 0x08;
+ if (tx_info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM)
+ __entry->flags |= 0x10;
+ if (tx_cnf->status)
+ __entry->flags |= 0x20;
+ if (tx_cnf->status == HIF_REQUEUE)
+ __entry->flags |= 0x40;
+ ),
+ TP_printk("packet ID: %08x, rate policy: %s %d|%d %d|%d %d|%d %d|%d -> %d attempt, Delays media/queue/total: %4dus/%4dus/%4dus",
+ __entry->pkt_id,
+ __print_flags(__entry->flags, NULL,
+ { 0x01, "M" }, { 0x02, "S" }, { 0x04, "G" },
+ { 0x08, "R" }, { 0x10, "D" }, { 0x20, "F" },
+ { 0x40, "Q" }),
+ __entry->rate[0],
+ __entry->tx_count[0],
+ __entry->rate[1],
+ __entry->tx_count[1],
+ __entry->rate[2],
+ __entry->tx_count[2],
+ __entry->rate[3],
+ __entry->tx_count[3],
+ __entry->ack_failures,
+ __entry->delay_media,
+ __entry->delay_queue,
+ __entry->delay_fw
+ )
+);
+#define _trace_tx_stats(tx_cnf, skb, delay) trace_tx_stats(tx_cnf, skb, delay)
+
+#endif
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE traces
+
+#include <trace/define_trace.h>
diff --git a/drivers/staging/wfx/wfx.h b/drivers/staging/wfx/wfx.h
new file mode 100644
index 000000000000..781a8c8ba982
--- /dev/null
+++ b/drivers/staging/wfx/wfx.h
@@ -0,0 +1,208 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Common private data for Silicon Labs WFx chips.
+ *
+ * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ * Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net>
+ * Copyright 2004-2006 Jean-Baptiste Note <jbnote@gmail.com>, et al.
+ */
+#ifndef WFX_H
+#define WFX_H
+
+#include <linux/completion.h>
+#include <linux/workqueue.h>
+#include <linux/mutex.h>
+#include <linux/nospec.h>
+#include <net/mac80211.h>
+
+#include "bh.h"
+#include "data_tx.h"
+#include "main.h"
+#include "queue.h"
+#include "secure_link.h"
+#include "sta.h"
+#include "scan.h"
+#include "hif_tx.h"
+#include "hif_api_general.h"
+
+struct hwbus_ops;
+
+struct wfx_dev {
+ struct wfx_platform_data pdata;
+ struct device *dev;
+ struct ieee80211_hw *hw;
+ struct ieee80211_vif *vif[2];
+ struct mac_address addresses[2];
+ const struct hwbus_ops *hwbus_ops;
+ void *hwbus_priv;
+
+ u8 keyset;
+ struct completion firmware_ready;
+ struct hif_ind_startup hw_caps;
+ struct wfx_hif hif;
+ struct sl_context sl;
+ int chip_frozen;
+ struct mutex conf_mutex;
+
+ struct wfx_hif_cmd hif_cmd;
+ struct wfx_queue tx_queue[4];
+ struct wfx_queue_stats tx_queue_stats;
+ int tx_burst_idx;
+ atomic_t tx_lock;
+
+ u32 key_map;
+ struct hif_req_add_key keys[MAX_KEY_ENTRIES];
+
+ struct hif_rx_stats rx_stats;
+ struct mutex rx_stats_lock;
+
+ int output_power;
+ atomic_t scan_in_progress;
+};
+
+struct wfx_vif {
+ struct wfx_dev *wdev;
+ struct ieee80211_vif *vif;
+ struct ieee80211_channel *channel;
+ int id;
+ enum wfx_state state;
+
+ int delayed_link_loss;
+ int bss_loss_state;
+ u32 bss_loss_confirm_id;
+ struct mutex bss_loss_lock;
+ struct delayed_work bss_loss_work;
+
+ u32 link_id_map;
+ struct wfx_link_entry link_id_db[WFX_MAX_STA_IN_AP_MODE];
+ struct delayed_work link_id_gc_work;
+ struct work_struct link_id_work;
+
+ bool aid0_bit_set;
+ bool mcast_tx;
+ bool mcast_buffered;
+ struct wfx_grp_addr_table mcast_filter;
+ struct timer_list mcast_timeout;
+ struct work_struct mcast_start_work;
+ struct work_struct mcast_stop_work;
+
+ s8 wep_default_key_id;
+ struct sk_buff *wep_pending_skb;
+ struct work_struct wep_key_work;
+
+ struct tx_policy_cache tx_policy_cache;
+ struct work_struct tx_policy_upload_work;
+
+ u32 sta_asleep_mask;
+ u32 pspoll_mask;
+ spinlock_t ps_state_lock;
+ struct work_struct set_tim_work;
+
+ int dtim_period;
+ int beacon_int;
+ bool enable_beacon;
+ struct work_struct set_beacon_wakeup_period_work;
+
+ bool filter_bssid;
+ bool fwd_probe_req;
+ bool disable_beacon_filter;
+ struct work_struct update_filtering_work;
+
+ u32 erp_info;
+ int cqm_rssi_thold;
+ bool setbssparams_done;
+ struct wfx_ht_info ht_info;
+ struct wfx_edca_params edca;
+ struct hif_mib_set_uapsd_information uapsd_info;
+ struct hif_req_set_bss_params bss_params;
+ struct work_struct bss_params_work;
+ struct work_struct set_cts_work;
+
+ int join_complete_status;
+ bool delayed_unjoin;
+ struct work_struct unjoin_work;
+
+ struct wfx_scan scan;
+
+ struct hif_req_set_pm_mode powersave_mode;
+ struct completion set_pm_mode_complete;
+
+ struct list_head event_queue;
+ spinlock_t event_queue_lock;
+ struct work_struct event_handler_work;
+};
+
+static inline struct wfx_vif *wdev_to_wvif(struct wfx_dev *wdev, int vif_id)
+{
+ if (vif_id >= ARRAY_SIZE(wdev->vif)) {
+ dev_dbg(wdev->dev, "requesting non-existent vif: %d\n", vif_id);
+ return NULL;
+ }
+ vif_id = array_index_nospec(vif_id, ARRAY_SIZE(wdev->vif));
+ if (!wdev->vif[vif_id]) {
+ dev_dbg(wdev->dev, "requesting non-allocated vif: %d\n",
+ vif_id);
+ return NULL;
+ }
+ return (struct wfx_vif *) wdev->vif[vif_id]->drv_priv;
+}
+
+static inline struct wfx_vif *wvif_iterate(struct wfx_dev *wdev,
+ struct wfx_vif *cur)
+{
+ int i;
+ int mark = 0;
+ struct wfx_vif *tmp;
+
+ if (!cur)
+ mark = 1;
+ for (i = 0; i < ARRAY_SIZE(wdev->vif); i++) {
+ tmp = wdev_to_wvif(wdev, i);
+ if (mark && tmp)
+ return tmp;
+ if (tmp == cur)
+ mark = 1;
+ }
+ return NULL;
+}
+
+static inline int wvif_count(struct wfx_dev *wdev)
+{
+ int i;
+ int ret = 0;
+ struct wfx_vif *wvif;
+
+ for (i = 0; i < ARRAY_SIZE(wdev->vif); i++) {
+ wvif = wdev_to_wvif(wdev, i);
+ if (wvif)
+ ret++;
+ }
+ return ret;
+}
+
+static inline void memreverse(u8 *src, u8 length)
+{
+ u8 *lo = src;
+ u8 *hi = src + length - 1;
+ u8 swap;
+
+ while (lo < hi) {
+ swap = *lo;
+ *lo++ = *hi;
+ *hi-- = swap;
+ }
+}
+
+static inline int memzcmp(void *src, unsigned int size)
+{
+ u8 *buf = src;
+
+ if (!size)
+ return 0;
+ if (*buf)
+ return 1;
+ return memcmp(buf, buf + 1, size - 1);
+}
+
+#endif /* WFX_H */
diff --git a/drivers/staging/wilc1000/Makefile b/drivers/staging/wilc1000/Makefile
index a5a8e806b98e..a3305a0a888a 100644
--- a/drivers/staging/wilc1000/Makefile
+++ b/drivers/staging/wilc1000/Makefile
@@ -4,11 +4,11 @@ obj-$(CONFIG_WILC1000) += wilc1000.o
ccflags-y += -DFIRMWARE_1002=\"atmel/wilc1002_firmware.bin\" \
-DFIRMWARE_1003=\"atmel/wilc1003_firmware.bin\"
-wilc1000-objs := wilc_wfi_cfgoperations.o wilc_netdev.o wilc_mon.o \
- wilc_hif.o wilc_wlan_cfg.o wilc_wlan.o
+wilc1000-objs := cfg80211.o netdev.o mon.o \
+ hif.o wlan_cfg.o wlan.o
obj-$(CONFIG_WILC1000_SDIO) += wilc1000-sdio.o
-wilc1000-sdio-objs += wilc_sdio.o
+wilc1000-sdio-objs += sdio.o
obj-$(CONFIG_WILC1000_SPI) += wilc1000-spi.o
-wilc1000-spi-objs += wilc_spi.o
+wilc1000-spi-objs += spi.o
diff --git a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c b/drivers/staging/wilc1000/cfg80211.c
index 22f21831649b..4863e516ff13 100644
--- a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
+++ b/drivers/staging/wilc1000/cfg80211.c
@@ -4,7 +4,7 @@
* All rights reserved.
*/
-#include "wilc_wfi_cfgoperations.h"
+#include "cfg80211.h"
#define FRAME_TYPE_ID 0
#define ACTION_CAT_ID 24
@@ -137,6 +137,7 @@ static void cfg_connect_result(enum conn_event conn_disconn_evt, u8 mac_status,
struct wilc *wl = vif->wilc;
struct host_if_drv *wfi_drv = priv->hif_drv;
struct wilc_conn_info *conn_info = &wfi_drv->conn_info;
+ struct wiphy *wiphy = dev->ieee80211_ptr->wiphy;
vif->connecting = false;
@@ -158,12 +159,16 @@ static void cfg_connect_result(enum conn_event conn_disconn_evt, u8 mac_status,
memcpy(priv->associated_bss, conn_info->bssid,
ETH_ALEN);
- cfg80211_connect_result(dev, conn_info->bssid,
- conn_info->req_ies,
- conn_info->req_ies_len,
- conn_info->resp_ies,
- conn_info->resp_ies_len, connect_status,
- GFP_KERNEL);
+ cfg80211_ref_bss(wiphy, vif->bss);
+ cfg80211_connect_bss(dev, conn_info->bssid, vif->bss,
+ conn_info->req_ies,
+ conn_info->req_ies_len,
+ conn_info->resp_ies,
+ conn_info->resp_ies_len,
+ connect_status, GFP_KERNEL,
+ NL80211_TIMEOUT_UNSPECIFIED);
+
+ vif->bss = NULL;
} else if (conn_disconn_evt == CONN_DISCONN_EVENT_DISCONN_NOTIF) {
u16 reason = 0;
@@ -186,15 +191,15 @@ static void cfg_connect_result(enum conn_event conn_disconn_evt, u8 mac_status,
}
}
-static struct wilc_vif *wilc_get_wl_to_vif(struct wilc *wl)
+struct wilc_vif *wilc_get_wl_to_vif(struct wilc *wl)
{
- int i;
+ struct wilc_vif *vif;
- for (i = 0; i < wl->vif_num; i++)
- if (wl->vif[i])
- return wl->vif[i];
+ vif = list_first_or_null_rcu(&wl->vif_list, typeof(*vif), list);
+ if (!vif)
+ return ERR_PTR(-EINVAL);
- return ERR_PTR(-EINVAL);
+ return vif;
}
static int set_channel(struct wiphy *wiphy,
@@ -204,11 +209,12 @@ static int set_channel(struct wiphy *wiphy,
struct wilc_vif *vif;
u32 channelnum;
int result;
+ int srcu_idx;
- mutex_lock(&wl->vif_mutex);
+ srcu_idx = srcu_read_lock(&wl->srcu);
vif = wilc_get_wl_to_vif(wl);
if (IS_ERR(vif)) {
- mutex_unlock(&wl->vif_mutex);
+ srcu_read_unlock(&wl->srcu, srcu_idx);
return PTR_ERR(vif);
}
@@ -219,7 +225,7 @@ static int set_channel(struct wiphy *wiphy,
if (result)
netdev_err(vif->ndev, "Error in setting channel\n");
- mutex_unlock(&wl->vif_mutex);
+ srcu_read_unlock(&wl->srcu, srcu_idx);
return result;
}
@@ -405,6 +411,7 @@ static int connect(struct wiphy *wiphy, struct net_device *dev,
goto out_put_bss;
}
kfree(join_params);
+ vif->bss = bss;
cfg80211_put_bss(wiphy, bss);
return 0;
@@ -450,6 +457,8 @@ static int disconnect(struct wiphy *wiphy, struct net_device *dev,
ret = -EINVAL;
}
+ vif->bss = NULL;
+
return ret;
}
@@ -620,29 +629,26 @@ static int del_key(struct wiphy *wiphy, struct net_device *netdev,
bool pairwise,
const u8 *mac_addr)
{
- struct wilc *wl = wiphy_priv(wiphy);
struct wilc_vif *vif = netdev_priv(netdev);
struct wilc_priv *priv = &vif->priv;
- if (netdev == wl->vif[0]->ndev) {
- if (priv->wilc_gtk[key_index]) {
- kfree(priv->wilc_gtk[key_index]->key);
- priv->wilc_gtk[key_index]->key = NULL;
- kfree(priv->wilc_gtk[key_index]->seq);
- priv->wilc_gtk[key_index]->seq = NULL;
+ if (priv->wilc_gtk[key_index]) {
+ kfree(priv->wilc_gtk[key_index]->key);
+ priv->wilc_gtk[key_index]->key = NULL;
+ kfree(priv->wilc_gtk[key_index]->seq);
+ priv->wilc_gtk[key_index]->seq = NULL;
- kfree(priv->wilc_gtk[key_index]);
- priv->wilc_gtk[key_index] = NULL;
- }
+ kfree(priv->wilc_gtk[key_index]);
+ priv->wilc_gtk[key_index] = NULL;
+ }
- if (priv->wilc_ptk[key_index]) {
- kfree(priv->wilc_ptk[key_index]->key);
- priv->wilc_ptk[key_index]->key = NULL;
- kfree(priv->wilc_ptk[key_index]->seq);
- priv->wilc_ptk[key_index]->seq = NULL;
- kfree(priv->wilc_ptk[key_index]);
- priv->wilc_ptk[key_index] = NULL;
- }
+ if (priv->wilc_ptk[key_index]) {
+ kfree(priv->wilc_ptk[key_index]->key);
+ priv->wilc_ptk[key_index]->key = NULL;
+ kfree(priv->wilc_ptk[key_index]->seq);
+ priv->wilc_ptk[key_index]->seq = NULL;
+ kfree(priv->wilc_ptk[key_index]);
+ priv->wilc_ptk[key_index] = NULL;
}
if (key_index <= 3 && priv->wep_key_len[key_index]) {
@@ -752,33 +758,19 @@ static int change_bss(struct wiphy *wiphy, struct net_device *dev,
return 0;
}
-struct wilc_vif *wilc_get_interface(struct wilc *wl)
-{
- int i;
- struct wilc_vif *vif = NULL;
-
- mutex_lock(&wl->vif_mutex);
- for (i = 0; i < wl->vif_num; i++) {
- if (wl->vif[i]) {
- vif = wl->vif[i];
- break;
- }
- }
- mutex_unlock(&wl->vif_mutex);
- return vif;
-}
-
static int set_wiphy_params(struct wiphy *wiphy, u32 changed)
{
- int ret;
+ int ret = -EINVAL;
struct cfg_param_attr cfg_param_val;
struct wilc *wl = wiphy_priv(wiphy);
struct wilc_vif *vif;
struct wilc_priv *priv;
+ int srcu_idx;
- vif = wilc_get_interface(wl);
- if (!vif)
- return -EINVAL;
+ srcu_idx = srcu_read_lock(&wl->srcu);
+ vif = wilc_get_wl_to_vif(wl);
+ if (IS_ERR(vif))
+ goto out;
priv = &vif->priv;
cfg_param_val.flag = 0;
@@ -808,7 +800,7 @@ static int set_wiphy_params(struct wiphy *wiphy, u32 changed)
} else {
netdev_err(vif->ndev,
"Fragmentation threshold out of range\n");
- return -EINVAL;
+ goto out;
}
}
@@ -821,7 +813,7 @@ static int set_wiphy_params(struct wiphy *wiphy, u32 changed)
cfg_param_val.rts_threshold = wiphy->rts_threshold;
} else {
netdev_err(vif->ndev, "RTS threshold out of range\n");
- return -EINVAL;
+ goto out;
}
}
@@ -829,6 +821,8 @@ static int set_wiphy_params(struct wiphy *wiphy, u32 changed)
if (ret)
netdev_err(priv->dev, "Error in setting WIPHY PARAMS\n");
+out:
+ srcu_read_unlock(&wl->srcu, srcu_idx);
return ret;
}
@@ -1144,7 +1138,7 @@ static int remain_on_channel(struct wiphy *wiphy,
cfg80211_ready_on_channel(wdev, *cookie, chan, duration, GFP_KERNEL);
mod_timer(&vif->hif_drv->remain_on_ch_timer,
- jiffies + msecs_to_jiffies(duration));
+ jiffies + msecs_to_jiffies(duration + 1000));
return ret;
}
@@ -1419,8 +1413,10 @@ static int change_virtual_intf(struct wiphy *wiphy, struct net_device *dev,
if (vif->iftype == WILC_AP_MODE || vif->iftype == WILC_GO_MODE)
wilc_wfi_deinit_mon_interface(wl, true);
vif->iftype = WILC_STATION_MODE;
- wilc_set_operation_mode(vif, wilc_get_vif_idx(vif),
- WILC_STATION_MODE, vif->idx);
+
+ if (wl->initialized)
+ wilc_set_operation_mode(vif, wilc_get_vif_idx(vif),
+ WILC_STATION_MODE, vif->idx);
memset(priv->assoc_stainfo.sta_associated_bss, 0,
WILC_MAX_NUM_STA * ETH_ALEN);
@@ -1432,8 +1428,10 @@ static int change_virtual_intf(struct wiphy *wiphy, struct net_device *dev,
priv->wdev.iftype = type;
vif->monitor_flag = 0;
vif->iftype = WILC_CLIENT_MODE;
- wilc_set_operation_mode(vif, wilc_get_vif_idx(vif),
- WILC_STATION_MODE, vif->idx);
+
+ if (wl->initialized)
+ wilc_set_operation_mode(vif, wilc_get_vif_idx(vif),
+ WILC_STATION_MODE, vif->idx);
break;
case NL80211_IFTYPE_AP:
@@ -1450,8 +1448,10 @@ static int change_virtual_intf(struct wiphy *wiphy, struct net_device *dev,
dev->ieee80211_ptr->iftype = type;
priv->wdev.iftype = type;
vif->iftype = WILC_GO_MODE;
- wilc_set_operation_mode(vif, wilc_get_vif_idx(vif),
- WILC_AP_MODE, vif->idx);
+
+ if (wl->initialized)
+ wilc_set_operation_mode(vif, wilc_get_vif_idx(vif),
+ WILC_AP_MODE, vif->idx);
break;
default:
@@ -1557,20 +1557,16 @@ static int change_station(struct wiphy *wiphy, struct net_device *dev,
return ret;
}
-static int wilc_get_vif_from_type(struct wilc *wl, int type)
+static struct wilc_vif *wilc_get_vif_from_type(struct wilc *wl, int type)
{
- int i;
+ struct wilc_vif *vif;
- mutex_lock(&wl->vif_mutex);
- for (i = 0; i < wl->vif_num; i++) {
- if (wl->vif[i]->iftype == type) {
- mutex_unlock(&wl->vif_mutex);
- return i;
- }
+ list_for_each_entry_rcu(vif, &wl->vif_list, list) {
+ if (vif->iftype == type)
+ return vif;
}
- mutex_unlock(&wl->vif_mutex);
- return -EINVAL;
+ return NULL;
}
static struct wireless_dev *add_virtual_intf(struct wiphy *wiphy,
@@ -1583,29 +1579,36 @@ static struct wireless_dev *add_virtual_intf(struct wiphy *wiphy,
struct wilc_vif *vif;
struct wireless_dev *wdev;
int iftype;
- int ret;
if (type == NL80211_IFTYPE_MONITOR) {
struct net_device *ndev;
- int ap_index = wilc_get_vif_from_type(wl, WILC_AP_MODE);
-
- if (ap_index < 0) {
- ap_index = wilc_get_vif_from_type(wl, WILC_GO_MODE);
- if (ap_index < 0)
+ int srcu_idx;
+
+ srcu_idx = srcu_read_lock(&wl->srcu);
+ vif = wilc_get_vif_from_type(wl, WILC_AP_MODE);
+ if (!vif) {
+ vif = wilc_get_vif_from_type(wl, WILC_GO_MODE);
+ if (!vif) {
+ srcu_read_unlock(&wl->srcu, srcu_idx);
goto validate_interface;
+ }
}
- vif = wl->vif[ap_index];
- if (vif->monitor_flag)
+ if (vif->monitor_flag) {
+ srcu_read_unlock(&wl->srcu, srcu_idx);
goto validate_interface;
+ }
ndev = wilc_wfi_init_mon_interface(wl, name, vif->ndev);
- if (ndev)
+ if (ndev) {
vif->monitor_flag = 1;
- else
+ } else {
+ srcu_read_unlock(&wl->srcu, srcu_idx);
return ERR_PTR(-EINVAL);
+ }
wdev = &vif->priv.wdev;
+ srcu_read_unlock(&wl->srcu, srcu_idx);
return wdev;
}
@@ -1613,9 +1616,10 @@ validate_interface:
mutex_lock(&wl->vif_mutex);
if (wl->vif_num == WILC_NUM_CONCURRENT_IFC) {
pr_err("Reached maximum number of interface\n");
- ret = -EINVAL;
- goto out_err;
+ mutex_unlock(&wl->vif_mutex);
+ return ERR_PTR(-EINVAL);
}
+ mutex_unlock(&wl->vif_mutex);
switch (type) {
case NL80211_IFTYPE_STATION:
@@ -1625,30 +1629,20 @@ validate_interface:
iftype = WILC_AP_MODE;
break;
default:
- ret = -EOPNOTSUPP;
- goto out_err;
+ return ERR_PTR(-EOPNOTSUPP);
}
vif = wilc_netdev_ifc_init(wl, name, iftype, type, true);
- if (IS_ERR(vif)) {
- ret = PTR_ERR(vif);
- goto out_err;
- }
-
- mutex_unlock(&wl->vif_mutex);
+ if (IS_ERR(vif))
+ return ERR_CAST(vif);
return &vif->priv.wdev;
-
-out_err:
- mutex_unlock(&wl->vif_mutex);
- return ERR_PTR(ret);
}
static int del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev)
{
struct wilc *wl = wiphy_priv(wiphy);
struct wilc_vif *vif;
- int i;
if (wdev->iftype == NL80211_IFTYPE_AP ||
wdev->iftype == NL80211_IFTYPE_P2P_GO)
@@ -1658,22 +1652,12 @@ static int del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev)
unregister_netdevice(vif->ndev);
vif->monitor_flag = 0;
- mutex_lock(&wl->vif_mutex);
wilc_set_operation_mode(vif, 0, 0, 0);
- for (i = vif->idx; i < wl->vif_num; i++) {
- if ((i + 1) >= wl->vif_num) {
- wl->vif[i] = NULL;
- } else {
- vif = wl->vif[i + 1];
- vif->idx = i;
- wl->vif[i] = vif;
- wilc_set_operation_mode(vif, wilc_get_vif_idx(vif),
- vif->iftype, vif->idx);
- }
- }
+ mutex_lock(&wl->vif_mutex);
+ list_del_rcu(&vif->list);
wl->vif_num--;
mutex_unlock(&wl->vif_mutex);
-
+ synchronize_srcu(&wl->srcu);
return 0;
}
@@ -1698,25 +1682,39 @@ static void wilc_set_wakeup(struct wiphy *wiphy, bool enabled)
{
struct wilc *wl = wiphy_priv(wiphy);
struct wilc_vif *vif;
+ int srcu_idx;
- mutex_lock(&wl->vif_mutex);
+ srcu_idx = srcu_read_lock(&wl->srcu);
vif = wilc_get_wl_to_vif(wl);
if (IS_ERR(vif)) {
- mutex_unlock(&wl->vif_mutex);
+ srcu_read_unlock(&wl->srcu, srcu_idx);
return;
}
netdev_info(vif->ndev, "cfg set wake up = %d\n", enabled);
- mutex_unlock(&wl->vif_mutex);
+ srcu_read_unlock(&wl->srcu, srcu_idx);
}
static int set_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev,
enum nl80211_tx_power_setting type, int mbm)
{
int ret;
+ int srcu_idx;
s32 tx_power = MBM_TO_DBM(mbm);
- struct wilc_vif *vif = netdev_priv(wdev->netdev);
+ struct wilc *wl = wiphy_priv(wiphy);
+ struct wilc_vif *vif;
+
+ if (!wl->initialized)
+ return -EIO;
+ srcu_idx = srcu_read_lock(&wl->srcu);
+ vif = wilc_get_wl_to_vif(wl);
+ if (IS_ERR(vif)) {
+ srcu_read_unlock(&wl->srcu, srcu_idx);
+ return -EINVAL;
+ }
+
+ netdev_info(vif->ndev, "Setting tx power %d\n", tx_power);
if (tx_power < 0)
tx_power = 0;
else if (tx_power > 18)
@@ -1724,6 +1722,7 @@ static int set_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev,
ret = wilc_set_tx_power(vif, tx_power);
if (ret)
netdev_err(vif->ndev, "Failed to set tx power\n");
+ srcu_read_unlock(&wl->srcu, srcu_idx);
return ret;
}
@@ -1803,6 +1802,17 @@ static void wlan_init_locks(struct wilc *wl)
init_completion(&wl->cfg_event);
init_completion(&wl->sync_event);
init_completion(&wl->txq_thread_started);
+ init_srcu_struct(&wl->srcu);
+}
+
+void wlan_deinit_locks(struct wilc *wilc)
+{
+ mutex_destroy(&wilc->hif_cs);
+ mutex_destroy(&wilc->rxq_cs);
+ mutex_destroy(&wilc->cfg_cmd_lock);
+ mutex_destroy(&wilc->txq_add_to_head_cs);
+ mutex_destroy(&wilc->vif_mutex);
+ cleanup_srcu_struct(&wilc->srcu);
}
int wilc_cfg80211_init(struct wilc **wilc, struct device *dev, int io_type,
@@ -1816,6 +1826,8 @@ int wilc_cfg80211_init(struct wilc **wilc, struct device *dev, int io_type,
if (!wl)
return -EINVAL;
+ wlan_init_locks(wl);
+
ret = wilc_wlan_cfg_init(wl);
if (ret)
goto free_wl;
@@ -1826,6 +1838,7 @@ int wilc_cfg80211_init(struct wilc **wilc, struct device *dev, int io_type,
wl->chip_ps_state = WILC_CHIP_WAKEDUP;
INIT_LIST_HEAD(&wl->txq_head.list);
INIT_LIST_HEAD(&wl->rxq_head.list);
+ INIT_LIST_HEAD(&wl->vif_list);
wl->hif_workqueue = create_singlethread_workqueue("WILC_wq");
if (!wl->hif_workqueue) {
@@ -1839,8 +1852,6 @@ int wilc_cfg80211_init(struct wilc **wilc, struct device *dev, int io_type,
goto free_hq;
}
- wlan_init_locks(wl);
-
return 0;
free_hq:
@@ -1850,6 +1861,7 @@ free_cfg:
wilc_wlan_cfg_deinit(wl);
free_wl:
+ wlan_deinit_locks(wl);
wiphy_unregister(wl->wiphy);
wiphy_free(wl->wiphy);
return ret;
diff --git a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.h b/drivers/staging/wilc1000/cfg80211.h
index 234faaabdb82..5e5d63f70df2 100644
--- a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.h
+++ b/drivers/staging/wilc1000/cfg80211.h
@@ -4,9 +4,9 @@
* All rights reserved.
*/
-#ifndef NM_WFI_CFGOPERATIONS
-#define NM_WFI_CFGOPERATIONS
-#include "wilc_wfi_netdevice.h"
+#ifndef WILC_CFG80211_H
+#define WILC_CFG80211_H
+#include "netdev.h"
struct wiphy *wilc_cfg_alloc(void);
int wilc_cfg80211_init(struct wilc **wilc, struct device *dev, int io_type,
@@ -24,4 +24,6 @@ struct net_device *wilc_wfi_init_mon_interface(struct wilc *wl,
void wilc_mgmt_frame_register(struct wiphy *wiphy, struct wireless_dev *wdev,
u16 frame_type, bool reg);
struct wilc_vif *wilc_get_interface(struct wilc *wl);
+struct wilc_vif *wilc_get_wl_to_vif(struct wilc *wl);
+void wlan_deinit_locks(struct wilc *wilc);
#endif
diff --git a/drivers/staging/wilc1000/wilc_hif.c b/drivers/staging/wilc1000/hif.c
index f2b7d5a1be17..349e45d58ec9 100644
--- a/drivers/staging/wilc1000/wilc_hif.c
+++ b/drivers/staging/wilc1000/hif.c
@@ -4,7 +4,7 @@
* All rights reserved.
*/
-#include "wilc_wfi_netdevice.h"
+#include "netdev.h"
#define WILC_HIF_SCAN_TIMEOUT_MS 5000
#define WILC_HIF_CONNECT_TIMEOUT_MS 9500
@@ -32,7 +32,7 @@ struct wilc_op_mode {
};
struct wilc_reg_frame {
- bool reg;
+ u8 reg;
u8 reg_id;
__le16 frame_type;
} __packed;
@@ -183,11 +183,17 @@ int wilc_get_vif_idx(struct wilc_vif *vif)
static struct wilc_vif *wilc_get_vif_from_idx(struct wilc *wilc, int idx)
{
int index = idx - 1;
+ struct wilc_vif *vif;
if (index < 0 || index >= WILC_NUM_CONCURRENT_IFC)
return NULL;
- return wilc->vif[index];
+ list_for_each_entry_rcu(vif, &wilc->vif_list, list) {
+ if (vif->idx == index)
+ return vif;
+ }
+
+ return NULL;
}
static int handle_scan_done(struct wilc_vif *vif, enum scan_event evt)
@@ -473,20 +479,27 @@ void *wilc_parse_join_bss_param(struct cfg80211_bss *bss,
rates_ie = cfg80211_find_ie(WLAN_EID_SUPP_RATES, ies->data, ies->len);
if (rates_ie) {
rates_len = rates_ie[1];
+ if (rates_len > WILC_MAX_RATES_SUPPORTED)
+ rates_len = WILC_MAX_RATES_SUPPORTED;
param->supp_rates[0] = rates_len;
memcpy(&param->supp_rates[1], rates_ie + 2, rates_len);
}
- supp_rates_ie = cfg80211_find_ie(WLAN_EID_EXT_SUPP_RATES, ies->data,
- ies->len);
- if (supp_rates_ie) {
- if (supp_rates_ie[1] > (WILC_MAX_RATES_SUPPORTED - rates_len))
- param->supp_rates[0] = WILC_MAX_RATES_SUPPORTED;
- else
- param->supp_rates[0] += supp_rates_ie[1];
+ if (rates_len < WILC_MAX_RATES_SUPPORTED) {
+ supp_rates_ie = cfg80211_find_ie(WLAN_EID_EXT_SUPP_RATES,
+ ies->data, ies->len);
+ if (supp_rates_ie) {
+ u8 ext_rates = supp_rates_ie[1];
- memcpy(&param->supp_rates[rates_len + 1], supp_rates_ie + 2,
- (param->supp_rates[0] - rates_len));
+ if (ext_rates > (WILC_MAX_RATES_SUPPORTED - rates_len))
+ param->supp_rates[0] = WILC_MAX_RATES_SUPPORTED;
+ else
+ param->supp_rates[0] += ext_rates;
+
+ memcpy(&param->supp_rates[rates_len + 1],
+ supp_rates_ie + 2,
+ (param->supp_rates[0] - rates_len));
+ }
}
ht_ie = cfg80211_find_ie(WLAN_EID_HT_CAPABILITY, ies->data, ies->len);
@@ -544,7 +557,7 @@ void *wilc_parse_join_bss_param(struct cfg80211_bss *bss,
param->mode_802_11i = 2;
param->rsn_found = true;
- //extract RSN capabilities
+ /* extract RSN capabilities */
offset += (rsn_ie[offset] * 4) + 2;
offset += (rsn_ie[offset] * 4) + 2;
memcpy(param->rsn_cap, &rsn_ie[offset], 2);
@@ -1776,7 +1789,9 @@ void wilc_frame_register(struct wilc_vif *vif, u16 frame_type, bool reg)
wid.val = (u8 *)&reg_frame;
memset(&reg_frame, 0x0, sizeof(reg_frame));
- reg_frame.reg = reg;
+
+ if (reg)
+ reg_frame.reg = 1;
switch (frame_type) {
case IEEE80211_STYPE_ACTION:
diff --git a/drivers/staging/wilc1000/wilc_hif.h b/drivers/staging/wilc1000/hif.h
index ac5fe57f872b..22ee6fffd599 100644
--- a/drivers/staging/wilc1000/wilc_hif.h
+++ b/drivers/staging/wilc1000/hif.h
@@ -4,10 +4,10 @@
* All rights reserved.
*/
-#ifndef HOST_INT_H
-#define HOST_INT_H
+#ifndef WILC_HIF_H
+#define WILC_HIF_H
#include <linux/ieee80211.h>
-#include "wilc_wlan_if.h"
+#include "wlan_if.h"
enum {
WILC_IDLE_MODE = 0x0,
diff --git a/drivers/staging/wilc1000/wilc_mon.c b/drivers/staging/wilc1000/mon.c
index d6f14f69ad64..48ac33f06f63 100644
--- a/drivers/staging/wilc1000/wilc_mon.c
+++ b/drivers/staging/wilc1000/mon.c
@@ -4,7 +4,7 @@
* All rights reserved.
*/
-#include "wilc_wfi_cfgoperations.h"
+#include "cfg80211.h"
struct wilc_wfi_radiotap_hdr {
struct ieee80211_radiotap_header hdr;
@@ -220,7 +220,7 @@ struct net_device *wilc_wfi_init_mon_interface(struct wilc *wl,
{
struct wilc_wfi_mon_priv *priv;
- /*If monitor interface is already initialized, return it*/
+ /* If monitor interface is already initialized, return it */
if (wl->monitor_dev)
return wl->monitor_dev;
diff --git a/drivers/staging/wilc1000/wilc_netdev.c b/drivers/staging/wilc1000/netdev.c
index 508acb8bb089..d2c0b0f7cf63 100644
--- a/drivers/staging/wilc1000/wilc_netdev.c
+++ b/drivers/staging/wilc1000/netdev.c
@@ -10,8 +10,8 @@
#include <linux/netdevice.h>
#include <linux/inetdevice.h>
-#include "wilc_wfi_cfgoperations.h"
-#include "wilc_wlan_cfg.h"
+#include "cfg80211.h"
+#include "wlan_cfg.h"
#define WILC_MULTICAST_TABLE_SIZE 8
@@ -97,29 +97,25 @@ void wilc_mac_indicate(struct wilc *wilc)
static struct net_device *get_if_handler(struct wilc *wilc, u8 *mac_header)
{
u8 *bssid, *bssid1;
- int i = 0;
struct net_device *ndev = NULL;
+ struct wilc_vif *vif;
bssid = mac_header + 10;
bssid1 = mac_header + 4;
- mutex_lock(&wilc->vif_mutex);
- for (i = 0; i < wilc->vif_num; i++) {
- if (wilc->vif[i]->mode == WILC_STATION_MODE)
- if (ether_addr_equal_unaligned(bssid,
- wilc->vif[i]->bssid)) {
- ndev = wilc->vif[i]->ndev;
+ list_for_each_entry_rcu(vif, &wilc->vif_list, list) {
+ if (vif->mode == WILC_STATION_MODE)
+ if (ether_addr_equal_unaligned(bssid, vif->bssid)) {
+ ndev = vif->ndev;
goto out;
}
- if (wilc->vif[i]->mode == WILC_AP_MODE)
- if (ether_addr_equal_unaligned(bssid1,
- wilc->vif[i]->bssid)) {
- ndev = wilc->vif[i]->ndev;
+ if (vif->mode == WILC_AP_MODE)
+ if (ether_addr_equal_unaligned(bssid1, vif->bssid)) {
+ ndev = vif->ndev;
goto out;
}
}
out:
- mutex_unlock(&wilc->vif_mutex);
return ndev;
}
@@ -137,13 +133,16 @@ void wilc_wlan_set_bssid(struct net_device *wilc_netdev, u8 *bssid, u8 mode)
int wilc_wlan_get_num_conn_ifcs(struct wilc *wilc)
{
- u8 i = 0;
+ int srcu_idx;
u8 ret_val = 0;
+ struct wilc_vif *vif;
- for (i = 0; i < wilc->vif_num; i++)
- if (!is_zero_ether_addr(wilc->vif[i]->bssid))
+ srcu_idx = srcu_read_lock(&wilc->srcu);
+ list_for_each_entry_rcu(vif, &wilc->vif_list, list) {
+ if (!is_zero_ether_addr(vif->bssid))
ret_val++;
-
+ }
+ srcu_read_unlock(&wilc->srcu, srcu_idx);
return ret_val;
}
@@ -167,16 +166,16 @@ static int wilc_txq_task(void *vp)
do {
ret = wilc_wlan_handle_txq(wl, &txq_count);
if (txq_count < FLOW_CONTROL_LOWER_THRESHOLD) {
- int i;
+ int srcu_idx;
struct wilc_vif *ifc;
- mutex_lock(&wl->vif_mutex);
- for (i = 0; i < wl->vif_num; i++) {
- ifc = wl->vif[i];
+ srcu_idx = srcu_read_lock(&wl->srcu);
+ list_for_each_entry_rcu(ifc, &wl->vif_list,
+ list) {
if (ifc->mac_opened && ifc->ndev)
netif_wake_queue(ifc->ndev);
}
- mutex_unlock(&wl->vif_mutex);
+ srcu_read_unlock(&wl->srcu, srcu_idx);
}
} while (ret == -ENOBUFS && !wl->close);
}
@@ -424,18 +423,6 @@ fail:
return -1;
}
-static void wlan_deinit_locks(struct net_device *dev)
-{
- struct wilc_vif *vif = netdev_priv(dev);
- struct wilc *wilc = vif->wilc;
-
- mutex_destroy(&wilc->hif_cs);
- mutex_destroy(&wilc->rxq_cs);
- mutex_destroy(&wilc->cfg_cmd_lock);
- mutex_destroy(&wilc->txq_add_to_head_cs);
- mutex_destroy(&wilc->vif_mutex);
-}
-
static void wlan_deinitialize_threads(struct net_device *dev)
{
struct wilc_vif *vif = netdev_priv(dev);
@@ -477,7 +464,6 @@ static void wilc_wlan_deinitialize(struct net_device *dev)
wilc_wlan_stop(wl, vif);
wilc_wlan_cleanup(dev);
- wlan_deinit_locks(dev);
wl->initialized = false;
@@ -738,14 +724,15 @@ netdev_tx_t wilc_mac_xmit(struct sk_buff *skb, struct net_device *ndev)
wilc_tx_complete);
if (queue_count > FLOW_CONTROL_UPPER_THRESHOLD) {
- int i;
+ int srcu_idx;
+ struct wilc_vif *vif;
- mutex_lock(&wilc->vif_mutex);
- for (i = 0; i < wilc->vif_num; i++) {
- if (wilc->vif[i]->mac_opened)
- netif_stop_queue(wilc->vif[i]->ndev);
+ srcu_idx = srcu_read_lock(&wilc->srcu);
+ list_for_each_entry_rcu(vif, &wilc->vif_list, list) {
+ if (vif->mac_opened)
+ netif_stop_queue(vif->ndev);
}
- mutex_unlock(&wilc->vif_mutex);
+ srcu_read_unlock(&wilc->srcu, srcu_idx);
}
return 0;
@@ -823,26 +810,22 @@ void wilc_frmw_to_host(struct wilc *wilc, u8 *buff, u32 size,
void wilc_wfi_mgmt_rx(struct wilc *wilc, u8 *buff, u32 size)
{
- int i = 0;
+ int srcu_idx;
struct wilc_vif *vif;
- mutex_lock(&wilc->vif_mutex);
- for (i = 0; i < wilc->vif_num; i++) {
+ srcu_idx = srcu_read_lock(&wilc->srcu);
+ list_for_each_entry_rcu(vif, &wilc->vif_list, list) {
u16 type = le16_to_cpup((__le16 *)buff);
- vif = netdev_priv(wilc->vif[i]->ndev);
- if ((type == vif->frame_reg[0].type && vif->frame_reg[0].reg) ||
- (type == vif->frame_reg[1].type && vif->frame_reg[1].reg)) {
+ if (vif->priv.p2p_listen_state &&
+ ((type == vif->frame_reg[0].type && vif->frame_reg[0].reg) ||
+ (type == vif->frame_reg[1].type && vif->frame_reg[1].reg)))
wilc_wfi_p2p_rx(vif, buff, size);
- break;
- }
- if (vif->monitor_flag) {
+ if (vif->monitor_flag)
wilc_wfi_monitor_rx(wilc->monitor_dev, buff, size);
- break;
- }
}
- mutex_unlock(&wilc->vif_mutex);
+ srcu_read_unlock(&wilc->srcu, srcu_idx);
}
static const struct net_device_ops wilc_netdev_ops = {
@@ -856,7 +839,8 @@ static const struct net_device_ops wilc_netdev_ops = {
void wilc_netdev_cleanup(struct wilc *wilc)
{
- int i;
+ struct wilc_vif *vif;
+ int srcu_idx;
if (!wilc)
return;
@@ -866,21 +850,57 @@ void wilc_netdev_cleanup(struct wilc *wilc)
wilc->firmware = NULL;
}
- for (i = 0; i < wilc->vif_num; i++) {
- if (wilc->vif[i] && wilc->vif[i]->ndev)
- unregister_netdev(wilc->vif[i]->ndev);
+ srcu_idx = srcu_read_lock(&wilc->srcu);
+ list_for_each_entry_rcu(vif, &wilc->vif_list, list) {
+ if (vif->ndev)
+ unregister_netdev(vif->ndev);
}
+ srcu_read_unlock(&wilc->srcu, srcu_idx);
wilc_wfi_deinit_mon_interface(wilc, false);
flush_workqueue(wilc->hif_workqueue);
destroy_workqueue(wilc->hif_workqueue);
+
+ do {
+ mutex_lock(&wilc->vif_mutex);
+ if (wilc->vif_num <= 0) {
+ mutex_unlock(&wilc->vif_mutex);
+ break;
+ }
+ vif = wilc_get_wl_to_vif(wilc);
+ if (!IS_ERR(vif))
+ list_del_rcu(&vif->list);
+
+ wilc->vif_num--;
+ mutex_unlock(&wilc->vif_mutex);
+ synchronize_srcu(&wilc->srcu);
+ } while (1);
+
wilc_wlan_cfg_deinit(wilc);
+ wlan_deinit_locks(wilc);
kfree(wilc->bus_data);
wiphy_unregister(wilc->wiphy);
wiphy_free(wilc->wiphy);
}
EXPORT_SYMBOL_GPL(wilc_netdev_cleanup);
+static u8 wilc_get_available_idx(struct wilc *wl)
+{
+ int idx = 0;
+ struct wilc_vif *vif;
+ int srcu_idx;
+
+ srcu_idx = srcu_read_lock(&wl->srcu);
+ list_for_each_entry_rcu(vif, &wl->vif_list, list) {
+ if (vif->idx == 0)
+ idx = 1;
+ else
+ idx = 0;
+ }
+ srcu_read_unlock(&wl->srcu, srcu_idx);
+ return idx;
+}
+
struct wilc_vif *wilc_netdev_ifc_init(struct wilc *wl, const char *name,
int vif_type, enum nl80211_iftype type,
bool rtnl_locked)
@@ -921,10 +941,14 @@ struct wilc_vif *wilc_netdev_ifc_init(struct wilc *wl, const char *name,
ndev->needs_free_netdev = true;
vif->iftype = vif_type;
- vif->wilc->vif[wl->vif_num] = vif;
- vif->idx = wl->vif_num;
- wl->vif_num += 1;
+ vif->idx = wilc_get_available_idx(wl);
vif->mac_opened = 0;
+ mutex_lock(&wl->vif_mutex);
+ list_add_tail_rcu(&vif->list, &wl->vif_list);
+ wl->vif_num += 1;
+ mutex_unlock(&wl->vif_mutex);
+ synchronize_srcu(&wl->srcu);
+
return vif;
}
diff --git a/drivers/staging/wilc1000/wilc_wfi_netdevice.h b/drivers/staging/wilc1000/netdev.h
index 978a8bdbfc40..cd8f0d72caaa 100644
--- a/drivers/staging/wilc1000/wilc_wfi_netdevice.h
+++ b/drivers/staging/wilc1000/netdev.h
@@ -4,8 +4,8 @@
* All rights reserved.
*/
-#ifndef WILC_WFI_NETDEVICE
-#define WILC_WFI_NETDEVICE
+#ifndef WILC_NETDEV_H
+#define WILC_NETDEV_H
#include <linux/tcp.h>
#include <linux/ieee80211.h>
@@ -14,9 +14,9 @@
#include <linux/if_arp.h>
#include <linux/gpio/consumer.h>
-#include "wilc_hif.h"
-#include "wilc_wlan.h"
-#include "wilc_wlan_cfg.h"
+#include "hif.h"
+#include "wlan.h"
+#include "wlan_cfg.h"
#define FLOW_CONTROL_LOWER_THRESHOLD 128
#define FLOW_CONTROL_UPPER_THRESHOLD 256
@@ -60,7 +60,7 @@ struct sta_info {
u8 sta_associated_bss[WILC_MAX_NUM_STA][ETH_ALEN];
};
-/*Parameters needed for host interface for remaining on channel*/
+/* Parameters needed for host interface for remaining on channel */
struct wilc_wfi_p2p_listen_params {
struct ieee80211_channel *listen_ch;
u32 listen_duration;
@@ -145,11 +145,13 @@ struct wilc_priv {
struct wilc_pmkid_attr pmkid_list;
u8 wep_key[4][WLAN_KEY_LEN_WEP104];
u8 wep_key_len[4];
+
/* The real interface that the monitor is on */
struct net_device *real_ndev;
struct wilc_wfi_key *wilc_gtk[WILC_MAX_NUM_STA];
struct wilc_wfi_key *wilc_ptk[WILC_MAX_NUM_STA];
u8 wilc_groupkey;
+
/* mutexes */
struct mutex scan_req_lock;
bool p2p_listen_state;
@@ -208,6 +210,8 @@ struct wilc_vif {
struct tcp_ack_filter ack_filter;
bool connecting;
struct wilc_priv priv;
+ struct list_head list;
+ struct cfg80211_bss *bss;
};
struct wilc {
@@ -221,16 +225,22 @@ struct wilc {
int dev_irq_num;
int close;
u8 vif_num;
- struct wilc_vif *vif[WILC_NUM_CONCURRENT_IFC];
- /*protect vif list*/
+ struct list_head vif_list;
+
+ /* protect vif list */
struct mutex vif_mutex;
+ struct srcu_struct srcu;
u8 open_ifcs;
- /*protect head of transmit queue*/
+
+ /* protect head of transmit queue */
struct mutex txq_add_to_head_cs;
- /*protect txq_entry_t transmit queue*/
+
+ /* protect txq_entry_t transmit queue */
spinlock_t txq_spinlock;
- /*protect rxq_entry_t receiver queue*/
+
+ /* protect rxq_entry_t receiver queue */
struct mutex rxq_cs;
+
/* lock to protect hif access */
struct mutex hif_cs;
@@ -242,6 +252,7 @@ struct wilc {
struct task_struct *txq_thread;
int quit;
+
/* lock to protect issue of wid command to firmware */
struct mutex cfg_cmd_lock;
struct wilc_cfg_frame cfg_frame;
@@ -268,6 +279,7 @@ struct wilc {
struct wilc_cfg cfg;
void *bus_data;
struct net_device *monitor_dev;
+
/* deinit lock */
struct mutex deinit_lock;
u8 sta_ch;
diff --git a/drivers/staging/wilc1000/wilc_sdio.c b/drivers/staging/wilc1000/sdio.c
index c787c5da8f2b..319e039380b0 100644
--- a/drivers/staging/wilc1000/wilc_sdio.c
+++ b/drivers/staging/wilc1000/sdio.c
@@ -8,8 +8,8 @@
#include <linux/mmc/sdio_func.h>
#include <linux/mmc/host.h>
-#include "wilc_wfi_netdevice.h"
-#include "wilc_wfi_cfgoperations.h"
+#include "netdev.h"
+#include "cfg80211.h"
#define SDIO_MODALIAS "wilc1000_sdio"
diff --git a/drivers/staging/wilc1000/wilc_spi.c b/drivers/staging/wilc1000/spi.c
index 3c1ae9e9f9aa..55f8757325f0 100644
--- a/drivers/staging/wilc1000/wilc_spi.c
+++ b/drivers/staging/wilc1000/spi.c
@@ -4,10 +4,11 @@
* All rights reserved.
*/
+#include <linux/clk.h>
#include <linux/spi/spi.h>
-#include "wilc_wfi_netdevice.h"
-#include "wilc_wfi_cfgoperations.h"
+#include "netdev.h"
+#include "cfg80211.h"
struct wilc_spi {
int crc_off;
@@ -132,6 +133,12 @@ static int wilc_bus_probe(struct spi_device *spi)
wilc->bus_data = spi_priv;
wilc->gpio_irq = gpio;
+ wilc->rtc_clk = devm_clk_get(&spi->dev, "rtc_clk");
+ if (PTR_ERR_OR_ZERO(wilc->rtc_clk) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ else if (!IS_ERR(wilc->rtc_clk))
+ clk_prepare_enable(wilc->rtc_clk);
+
return 0;
}
@@ -142,6 +149,10 @@ static int wilc_bus_remove(struct spi_device *spi)
/* free the GPIO in module remove */
if (wilc->gpio_irq)
gpiod_put(wilc->gpio_irq);
+
+ if (!IS_ERR(wilc->rtc_clk))
+ clk_disable_unprepare(wilc->rtc_clk);
+
wilc_netdev_cleanup(wilc);
return 0;
}
diff --git a/drivers/staging/wilc1000/wilc_wlan.c b/drivers/staging/wilc1000/wlan.c
index 771d8cb68dc1..d3de76126b78 100644
--- a/drivers/staging/wilc1000/wilc_wlan.c
+++ b/drivers/staging/wilc1000/wlan.c
@@ -6,8 +6,8 @@
#include <linux/if_ether.h>
#include <linux/ip.h>
-#include "wilc_wfi_cfgoperations.h"
-#include "wilc_wlan_cfg.h"
+#include "cfg80211.h"
+#include "wlan_cfg.h"
static inline bool is_wilc1000(u32 id)
{
diff --git a/drivers/staging/wilc1000/wilc_wlan.h b/drivers/staging/wilc1000/wlan.h
index 7469fa47d588..1f6957cf2e9c 100644
--- a/drivers/staging/wilc1000/wilc_wlan.h
+++ b/drivers/staging/wilc1000/wlan.h
@@ -190,7 +190,7 @@
#define ENABLE_RX_VMM (SEL_VMM_TBL1 | EN_VMM)
#define ENABLE_TX_VMM (SEL_VMM_TBL0 | EN_VMM)
-/*time for expiring the completion of cfg packets*/
+/* time for expiring the completion of cfg packets */
#define WILC_CFG_PKTS_TIMEOUT msecs_to_jiffies(2000)
#define IS_MANAGMEMENT 0x100
diff --git a/drivers/staging/wilc1000/wilc_wlan_cfg.c b/drivers/staging/wilc1000/wlan_cfg.c
index 3f53807cee0f..6f6b286788d1 100644
--- a/drivers/staging/wilc1000/wilc_wlan_cfg.c
+++ b/drivers/staging/wilc1000/wlan_cfg.c
@@ -4,10 +4,10 @@
* All rights reserved.
*/
-#include "wilc_wlan_if.h"
-#include "wilc_wlan.h"
-#include "wilc_wlan_cfg.h"
-#include "wilc_wfi_netdevice.h"
+#include "wlan_if.h"
+#include "wlan.h"
+#include "wlan_cfg.h"
+#include "netdev.h"
enum cfg_cmd_type {
CFG_BYTE_CMD = 0,
@@ -44,6 +44,11 @@ static const struct wilc_cfg_str g_cfg_str[] = {
{WID_NIL, NULL}
};
+#define WILC_RESP_MSG_TYPE_CONFIG_REPLY 'R'
+#define WILC_RESP_MSG_TYPE_STATUS_INFO 'I'
+#define WILC_RESP_MSG_TYPE_NETWORK_INFO 'N'
+#define WILC_RESP_MSG_TYPE_SCAN_COMPLETE 'S'
+
/********************************************
*
* Configuration Functions
@@ -360,33 +365,26 @@ void wilc_wlan_cfg_indicate_rx(struct wilc *wilc, u8 *frame, int size,
size -= 4;
rsp->type = 0;
- /*
- * The valid types of response messages are
- * 'R' (Response),
- * 'I' (Information), and
- * 'N' (Network Information)
- */
-
switch (msg_type) {
- case 'R':
+ case WILC_RESP_MSG_TYPE_CONFIG_REPLY:
wilc_wlan_parse_response_frame(wilc, frame, size);
rsp->type = WILC_CFG_RSP;
rsp->seq_no = msg_id;
break;
- case 'I':
+ case WILC_RESP_MSG_TYPE_STATUS_INFO:
wilc_wlan_parse_info_frame(wilc, frame);
rsp->type = WILC_CFG_RSP_STATUS;
rsp->seq_no = msg_id;
- /*call host interface info parse as well*/
+ /* call host interface info parse as well */
wilc_gnrl_async_info_received(wilc, frame - 4, size + 4);
break;
- case 'N':
+ case WILC_RESP_MSG_TYPE_NETWORK_INFO:
wilc_network_info_received(wilc, frame - 4, size + 4);
break;
- case 'S':
+ case WILC_RESP_MSG_TYPE_SCAN_COMPLETE:
wilc_scan_complete_received(wilc, frame - 4, size + 4);
break;
diff --git a/drivers/staging/wilc1000/wilc_wlan_cfg.h b/drivers/staging/wilc1000/wlan_cfg.h
index 614c5673f232..614c5673f232 100644
--- a/drivers/staging/wilc1000/wilc_wlan_cfg.h
+++ b/drivers/staging/wilc1000/wlan_cfg.h
diff --git a/drivers/staging/wilc1000/wilc_wlan_if.h b/drivers/staging/wilc1000/wlan_if.h
index 70eac586f80c..7c7ee66c35f5 100644
--- a/drivers/staging/wilc1000/wilc_wlan_if.h
+++ b/drivers/staging/wilc1000/wlan_if.h
@@ -750,10 +750,10 @@ enum {
WID_REMOVE_KEY = 0x301E,
WID_ASSOC_REQ_INFO = 0x301F,
WID_ASSOC_RES_INFO = 0x3020,
- WID_MANUFACTURER = 0x3026, /*Added for CAPI tool */
- WID_MODEL_NAME = 0x3027, /*Added for CAPI tool */
- WID_MODEL_NUM = 0x3028, /*Added for CAPI tool */
- WID_DEVICE_NAME = 0x3029, /*Added for CAPI tool */
+ WID_MANUFACTURER = 0x3026, /* Added for CAPI tool */
+ WID_MODEL_NAME = 0x3027, /* Added for CAPI tool */
+ WID_MODEL_NUM = 0x3028, /* Added for CAPI tool */
+ WID_DEVICE_NAME = 0x3029, /* Added for CAPI tool */
/* NMAC String WID list */
WID_SET_OPERATION_MODE = 0x3079,
diff --git a/drivers/staging/wlan-ng/hfa384x.h b/drivers/staging/wlan-ng/hfa384x.h
index 5ff740a8837d..bdd7f414fdbb 100644
--- a/drivers/staging/wlan-ng/hfa384x.h
+++ b/drivers/staging/wlan-ng/hfa384x.h
@@ -1181,8 +1181,6 @@ struct hfa384x_usbctlx {
ctlx_cmdcb_t cmdcb; /* Async command callback */
ctlx_usercb_t usercb; /* Async user callback, */
void *usercb_data; /* at CTLX completion */
-
- int variant; /* Identifies cmd variant */
};
struct hfa384x_usbctlxq {
@@ -1337,7 +1335,9 @@ struct hfa384x {
* interface
*/
- struct hfa384x_caplevel cap_act_sta_mfi; /* sta f/w to modem interface */
+ struct hfa384x_caplevel cap_act_sta_mfi; /*
+ * sta f/w to modem interface
+ */
struct hfa384x_caplevel cap_act_ap_cfi; /*
* ap f/w to controller
@@ -1359,7 +1359,9 @@ struct hfa384x {
struct hfa384x_inf_frame *scanresults;
- struct prism2sta_authlist authlist; /* Authenticated station list. */
+ struct prism2sta_authlist authlist; /*
+ * Authenticated station list.
+ */
unsigned int accessmode; /* Access mode. */
struct prism2sta_accesslist allow; /* Allowed station list. */
struct prism2sta_accesslist deny; /* Denied station list. */
@@ -1370,12 +1372,13 @@ void hfa384x_create(struct hfa384x *hw, struct usb_device *usb);
void hfa384x_destroy(struct hfa384x *hw);
int hfa384x_corereset(struct hfa384x *hw, int holdtime, int settletime,
- int genesis);
+ int genesis);
int hfa384x_drvr_disable(struct hfa384x *hw, u16 macport);
int hfa384x_drvr_enable(struct hfa384x *hw, u16 macport);
int hfa384x_drvr_flashdl_enable(struct hfa384x *hw);
int hfa384x_drvr_flashdl_disable(struct hfa384x *hw);
-int hfa384x_drvr_flashdl_write(struct hfa384x *hw, u32 daddr, void *buf, u32 len);
+int hfa384x_drvr_flashdl_write(struct hfa384x *hw, u32 daddr, void *buf,
+ u32 len);
int hfa384x_drvr_getconfig(struct hfa384x *hw, u16 rid, void *buf, u16 len);
int hfa384x_drvr_ramdl_enable(struct hfa384x *hw, u32 exeaddr);
int hfa384x_drvr_ramdl_disable(struct hfa384x *hw);
@@ -1383,7 +1386,8 @@ int hfa384x_drvr_ramdl_write(struct hfa384x *hw, u32 daddr, void *buf, u32 len);
int hfa384x_drvr_readpda(struct hfa384x *hw, void *buf, unsigned int len);
int hfa384x_drvr_setconfig(struct hfa384x *hw, u16 rid, void *buf, u16 len);
-static inline int hfa384x_drvr_getconfig16(struct hfa384x *hw, u16 rid, void *val)
+static inline int
+hfa384x_drvr_getconfig16(struct hfa384x *hw, u16 rid, void *val)
{
int result = 0;
diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
index 28d372a0663a..b71756ab0394 100644
--- a/drivers/staging/wlan-ng/hfa384x_usb.c
+++ b/drivers/staging/wlan-ng/hfa384x_usb.c
@@ -293,13 +293,11 @@ void dbprint_urb(struct urb *urb)
pr_debug("urb->transfer_buffer_length=0x%08x\n",
urb->transfer_buffer_length);
pr_debug("urb->actual_length=0x%08x\n", urb->actual_length);
- pr_debug("urb->bandwidth=0x%08x\n", urb->bandwidth);
pr_debug("urb->setup_packet(ctl)=0x%08x\n",
(unsigned int)urb->setup_packet);
pr_debug("urb->start_frame(iso/irq)=0x%08x\n", urb->start_frame);
pr_debug("urb->interval(irq)=0x%08x\n", urb->interval);
pr_debug("urb->error_count(iso)=0x%08x\n", urb->error_count);
- pr_debug("urb->timeout=0x%08x\n", urb->timeout);
pr_debug("urb->context=0x%08x\n", (unsigned int)urb->context);
pr_debug("urb->complete=0x%08x\n", (unsigned int)urb->complete);
}
diff --git a/drivers/staging/wlan-ng/p80211wep.c b/drivers/staging/wlan-ng/p80211wep.c
index 8bd92bba0ac1..51d917c8cdc8 100644
--- a/drivers/staging/wlan-ng/p80211wep.c
+++ b/drivers/staging/wlan-ng/p80211wep.c
@@ -49,6 +49,7 @@
/*================================================================*/
/* System Includes */
+#include <linux/crc32.h>
#include <linux/netdevice.h>
#include <linux/wireless.h>
#include <linux/random.h>
@@ -61,61 +62,6 @@
#define WEP_KEY(x) (((x) & 0xC0) >> 6)
-static const u32 wep_crc32_table[256] = {
- 0x00000000L, 0x77073096L, 0xee0e612cL, 0x990951baL, 0x076dc419L,
- 0x706af48fL, 0xe963a535L, 0x9e6495a3L, 0x0edb8832L, 0x79dcb8a4L,
- 0xe0d5e91eL, 0x97d2d988L, 0x09b64c2bL, 0x7eb17cbdL, 0xe7b82d07L,
- 0x90bf1d91L, 0x1db71064L, 0x6ab020f2L, 0xf3b97148L, 0x84be41deL,
- 0x1adad47dL, 0x6ddde4ebL, 0xf4d4b551L, 0x83d385c7L, 0x136c9856L,
- 0x646ba8c0L, 0xfd62f97aL, 0x8a65c9ecL, 0x14015c4fL, 0x63066cd9L,
- 0xfa0f3d63L, 0x8d080df5L, 0x3b6e20c8L, 0x4c69105eL, 0xd56041e4L,
- 0xa2677172L, 0x3c03e4d1L, 0x4b04d447L, 0xd20d85fdL, 0xa50ab56bL,
- 0x35b5a8faL, 0x42b2986cL, 0xdbbbc9d6L, 0xacbcf940L, 0x32d86ce3L,
- 0x45df5c75L, 0xdcd60dcfL, 0xabd13d59L, 0x26d930acL, 0x51de003aL,
- 0xc8d75180L, 0xbfd06116L, 0x21b4f4b5L, 0x56b3c423L, 0xcfba9599L,
- 0xb8bda50fL, 0x2802b89eL, 0x5f058808L, 0xc60cd9b2L, 0xb10be924L,
- 0x2f6f7c87L, 0x58684c11L, 0xc1611dabL, 0xb6662d3dL, 0x76dc4190L,
- 0x01db7106L, 0x98d220bcL, 0xefd5102aL, 0x71b18589L, 0x06b6b51fL,
- 0x9fbfe4a5L, 0xe8b8d433L, 0x7807c9a2L, 0x0f00f934L, 0x9609a88eL,
- 0xe10e9818L, 0x7f6a0dbbL, 0x086d3d2dL, 0x91646c97L, 0xe6635c01L,
- 0x6b6b51f4L, 0x1c6c6162L, 0x856530d8L, 0xf262004eL, 0x6c0695edL,
- 0x1b01a57bL, 0x8208f4c1L, 0xf50fc457L, 0x65b0d9c6L, 0x12b7e950L,
- 0x8bbeb8eaL, 0xfcb9887cL, 0x62dd1ddfL, 0x15da2d49L, 0x8cd37cf3L,
- 0xfbd44c65L, 0x4db26158L, 0x3ab551ceL, 0xa3bc0074L, 0xd4bb30e2L,
- 0x4adfa541L, 0x3dd895d7L, 0xa4d1c46dL, 0xd3d6f4fbL, 0x4369e96aL,
- 0x346ed9fcL, 0xad678846L, 0xda60b8d0L, 0x44042d73L, 0x33031de5L,
- 0xaa0a4c5fL, 0xdd0d7cc9L, 0x5005713cL, 0x270241aaL, 0xbe0b1010L,
- 0xc90c2086L, 0x5768b525L, 0x206f85b3L, 0xb966d409L, 0xce61e49fL,
- 0x5edef90eL, 0x29d9c998L, 0xb0d09822L, 0xc7d7a8b4L, 0x59b33d17L,
- 0x2eb40d81L, 0xb7bd5c3bL, 0xc0ba6cadL, 0xedb88320L, 0x9abfb3b6L,
- 0x03b6e20cL, 0x74b1d29aL, 0xead54739L, 0x9dd277afL, 0x04db2615L,
- 0x73dc1683L, 0xe3630b12L, 0x94643b84L, 0x0d6d6a3eL, 0x7a6a5aa8L,
- 0xe40ecf0bL, 0x9309ff9dL, 0x0a00ae27L, 0x7d079eb1L, 0xf00f9344L,
- 0x8708a3d2L, 0x1e01f268L, 0x6906c2feL, 0xf762575dL, 0x806567cbL,
- 0x196c3671L, 0x6e6b06e7L, 0xfed41b76L, 0x89d32be0L, 0x10da7a5aL,
- 0x67dd4accL, 0xf9b9df6fL, 0x8ebeeff9L, 0x17b7be43L, 0x60b08ed5L,
- 0xd6d6a3e8L, 0xa1d1937eL, 0x38d8c2c4L, 0x4fdff252L, 0xd1bb67f1L,
- 0xa6bc5767L, 0x3fb506ddL, 0x48b2364bL, 0xd80d2bdaL, 0xaf0a1b4cL,
- 0x36034af6L, 0x41047a60L, 0xdf60efc3L, 0xa867df55L, 0x316e8eefL,
- 0x4669be79L, 0xcb61b38cL, 0xbc66831aL, 0x256fd2a0L, 0x5268e236L,
- 0xcc0c7795L, 0xbb0b4703L, 0x220216b9L, 0x5505262fL, 0xc5ba3bbeL,
- 0xb2bd0b28L, 0x2bb45a92L, 0x5cb36a04L, 0xc2d7ffa7L, 0xb5d0cf31L,
- 0x2cd99e8bL, 0x5bdeae1dL, 0x9b64c2b0L, 0xec63f226L, 0x756aa39cL,
- 0x026d930aL, 0x9c0906a9L, 0xeb0e363fL, 0x72076785L, 0x05005713L,
- 0x95bf4a82L, 0xe2b87a14L, 0x7bb12baeL, 0x0cb61b38L, 0x92d28e9bL,
- 0xe5d5be0dL, 0x7cdcefb7L, 0x0bdbdf21L, 0x86d3d2d4L, 0xf1d4e242L,
- 0x68ddb3f8L, 0x1fda836eL, 0x81be16cdL, 0xf6b9265bL, 0x6fb077e1L,
- 0x18b74777L, 0x88085ae6L, 0xff0f6a70L, 0x66063bcaL, 0x11010b5cL,
- 0x8f659effL, 0xf862ae69L, 0x616bffd3L, 0x166ccf45L, 0xa00ae278L,
- 0xd70dd2eeL, 0x4e048354L, 0x3903b3c2L, 0xa7672661L, 0xd06016f7L,
- 0x4969474dL, 0x3e6e77dbL, 0xaed16a4aL, 0xd9d65adcL, 0x40df0b66L,
- 0x37d83bf0L, 0xa9bcae53L, 0xdebb9ec5L, 0x47b2cf7fL, 0x30b5ffe9L,
- 0xbdbdf21cL, 0xcabac28aL, 0x53b39330L, 0x24b4a3a6L, 0xbad03605L,
- 0xcdd70693L, 0x54de5729L, 0x23d967bfL, 0xb3667a2eL, 0xc4614ab8L,
- 0x5d681b02L, 0x2a6f2b94L, 0xb40bbe37L, 0xc30c8ea1L, 0x5a05df1bL,
- 0x2d02ef8dL
-};
-
/* keylen in bytes! */
int wep_change_key(struct wlandevice *wlandev, int keynum, u8 *key, int keylen)
@@ -184,7 +130,6 @@ int wep_decrypt(struct wlandevice *wlandev, u8 *buf, u32 len, int key_override,
}
/* Apply the RC4 to the data, update the CRC32 */
- crc = ~0;
i = 0;
j = 0;
for (k = 0; k < len; k++) {
@@ -192,9 +137,8 @@ int wep_decrypt(struct wlandevice *wlandev, u8 *buf, u32 len, int key_override,
j = (j + s[i]) & 0xff;
swap(i, j);
buf[k] ^= s[(s[i] + s[j]) & 0xff];
- crc = wep_crc32_table[(crc ^ buf[k]) & 0xff] ^ (crc >> 8);
}
- crc = ~crc;
+ crc = ~crc32_le(~0, buf, len);
/* now let's check the crc */
c_crc[0] = crc;
@@ -257,17 +201,15 @@ int wep_encrypt(struct wlandevice *wlandev, u8 *buf,
}
/* Update CRC32 then apply RC4 to the data */
- crc = ~0;
i = 0;
j = 0;
for (k = 0; k < len; k++) {
- crc = wep_crc32_table[(crc ^ buf[k]) & 0xff] ^ (crc >> 8);
i = (i + 1) & 0xff;
j = (j + s[i]) & 0xff;
swap(i, j);
dst[k] = buf[k] ^ s[(s[i] + s[j]) & 0xff];
}
- crc = ~crc;
+ crc = ~crc32_le(~0, buf, len);
/* now let's encrypt the crc */
icv[0] = crc;
diff --git a/drivers/staging/wlan-ng/prism2usb.c b/drivers/staging/wlan-ng/prism2usb.c
index b5ba176004c1..352556f6870a 100644
--- a/drivers/staging/wlan-ng/prism2usb.c
+++ b/drivers/staging/wlan-ng/prism2usb.c
@@ -137,7 +137,7 @@ static void prism2sta_disconnect_usb(struct usb_interface *interface)
{
struct wlandevice *wlandev;
- wlandev = (struct wlandevice *)usb_get_intfdata(interface);
+ wlandev = usb_get_intfdata(interface);
if (wlandev) {
LIST_HEAD(cleanlist);
struct hfa384x_usbctlx *ctlx, *temp;
@@ -222,7 +222,7 @@ static int prism2sta_suspend(struct usb_interface *interface,
struct hfa384x *hw = NULL;
struct wlandevice *wlandev;
- wlandev = (struct wlandevice *)usb_get_intfdata(interface);
+ wlandev = usb_get_intfdata(interface);
if (!wlandev)
return -ENODEV;
@@ -245,7 +245,7 @@ static int prism2sta_resume(struct usb_interface *interface)
struct hfa384x *hw = NULL;
struct wlandevice *wlandev;
- wlandev = (struct wlandevice *)usb_get_intfdata(interface);
+ wlandev = usb_get_intfdata(interface);
if (!wlandev)
return -ENODEV;
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_ddp.c b/drivers/target/iscsi/cxgbit/cxgbit_ddp.c
index 54bb1ebd8eb5..af35251232eb 100644
--- a/drivers/target/iscsi/cxgbit/cxgbit_ddp.c
+++ b/drivers/target/iscsi/cxgbit/cxgbit_ddp.c
@@ -297,7 +297,6 @@ int cxgbit_ddp_init(struct cxgbit_device *cdev)
struct cxgb4_lld_info *lldi = &cdev->lldi;
struct net_device *ndev = cdev->lldi.ports[0];
struct cxgbi_tag_format tformat;
- unsigned int ppmax;
int ret, i;
if (!lldi->vr->iscsi.size) {
@@ -305,8 +304,6 @@ int cxgbit_ddp_init(struct cxgbit_device *cdev)
return -EACCES;
}
- ppmax = lldi->vr->iscsi.size >> PPOD_SIZE_SHIFT;
-
memset(&tformat, 0, sizeof(struct cxgbi_tag_format));
for (i = 0; i < 4; i++)
tformat.pgsz_order[i] = (lldi->iscsi_pgsz_order >> (i << 3))
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index d19e051f2bc2..7251a87bb576 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -1165,7 +1165,9 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
hdr->cmdsn, be32_to_cpu(hdr->data_length), payload_length,
conn->cid);
- target_get_sess_cmd(&cmd->se_cmd, true);
+ if (target_get_sess_cmd(&cmd->se_cmd, true) < 0)
+ return iscsit_add_reject_cmd(cmd,
+ ISCSI_REASON_WAITING_FOR_LOGOUT, buf);
cmd->sense_reason = transport_lookup_cmd_lun(&cmd->se_cmd,
scsilun_to_int(&hdr->lun));
@@ -2002,7 +2004,9 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
conn->sess->se_sess, 0, DMA_NONE,
TCM_SIMPLE_TAG, cmd->sense_buffer + 2);
- target_get_sess_cmd(&cmd->se_cmd, true);
+ if (target_get_sess_cmd(&cmd->se_cmd, true) < 0)
+ return iscsit_add_reject_cmd(cmd,
+ ISCSI_REASON_WAITING_FOR_LOGOUT, buf);
/*
* TASK_REASSIGN for ERL=2 / connection stays inside of
@@ -2189,24 +2193,22 @@ iscsit_process_text_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
}
goto empty_sendtargets;
}
- if (strncmp("SendTargets", text_in, 11) != 0) {
+ if (strncmp("SendTargets=", text_in, 12) != 0) {
pr_err("Received Text Data that is not"
" SendTargets, cannot continue.\n");
goto reject;
}
+ /* '=' confirmed in strncmp */
text_ptr = strchr(text_in, '=');
- if (!text_ptr) {
- pr_err("No \"=\" separator found in Text Data,"
- " cannot continue.\n");
- goto reject;
- }
- if (!strncmp("=All", text_ptr, 4)) {
+ BUG_ON(!text_ptr);
+ if (!strncmp("=All", text_ptr, 5)) {
cmd->cmd_flags |= ICF_SENDTARGETS_ALL;
} else if (!strncmp("=iqn.", text_ptr, 5) ||
!strncmp("=eui.", text_ptr, 5)) {
cmd->cmd_flags |= ICF_SENDTARGETS_SINGLE;
} else {
- pr_err("Unable to locate valid SendTargets=%s value\n", text_ptr);
+ pr_err("Unable to locate valid SendTargets%s value\n",
+ text_ptr);
goto reject;
}
@@ -4232,6 +4234,8 @@ int iscsit_close_connection(
* must wait until they have completed.
*/
iscsit_check_conn_usage_count(conn);
+ target_sess_cmd_list_set_waiting(sess->se_sess);
+ target_wait_for_sess_cmds(sess->se_sess);
ahash_request_free(conn->conn_tx_hash);
if (conn->conn_rx_hash) {
diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c
index 51ddca2033e0..0e54627d9aa8 100644
--- a/drivers/target/iscsi/iscsi_target_auth.c
+++ b/drivers/target/iscsi/iscsi_target_auth.c
@@ -18,6 +18,22 @@
#include "iscsi_target_nego.h"
#include "iscsi_target_auth.h"
+static char *chap_get_digest_name(const int digest_type)
+{
+ switch (digest_type) {
+ case CHAP_DIGEST_MD5:
+ return "md5";
+ case CHAP_DIGEST_SHA1:
+ return "sha1";
+ case CHAP_DIGEST_SHA256:
+ return "sha256";
+ case CHAP_DIGEST_SHA3_256:
+ return "sha3-256";
+ default:
+ return NULL;
+ }
+}
+
static int chap_gen_challenge(
struct iscsi_conn *conn,
int caller,
@@ -25,16 +41,21 @@ static int chap_gen_challenge(
unsigned int *c_len)
{
int ret;
- unsigned char challenge_asciihex[CHAP_CHALLENGE_LENGTH * 2 + 1];
+ unsigned char *challenge_asciihex;
struct iscsi_chap *chap = conn->auth_protocol;
- memset(challenge_asciihex, 0, CHAP_CHALLENGE_LENGTH * 2 + 1);
+ challenge_asciihex = kzalloc(chap->challenge_len * 2 + 1, GFP_KERNEL);
+ if (!challenge_asciihex)
+ return -ENOMEM;
- ret = get_random_bytes_wait(chap->challenge, CHAP_CHALLENGE_LENGTH);
+ memset(chap->challenge, 0, MAX_CHAP_CHALLENGE_LEN);
+
+ ret = get_random_bytes_wait(chap->challenge, chap->challenge_len);
if (unlikely(ret))
- return ret;
+ goto out;
+
bin2hex(challenge_asciihex, chap->challenge,
- CHAP_CHALLENGE_LENGTH);
+ chap->challenge_len);
/*
* Set CHAP_C, and copy the generated challenge into c_str.
*/
@@ -43,12 +64,29 @@ static int chap_gen_challenge(
pr_debug("[%s] Sending CHAP_C=0x%s\n\n", (caller) ? "server" : "client",
challenge_asciihex);
+
+out:
+ kfree(challenge_asciihex);
+ return ret;
+}
+
+static int chap_test_algorithm(const char *name)
+{
+ struct crypto_shash *tfm;
+
+ tfm = crypto_alloc_shash(name, 0, 0);
+ if (IS_ERR(tfm))
+ return -1;
+
+ crypto_free_shash(tfm);
return 0;
}
static int chap_check_algorithm(const char *a_str)
{
- char *tmp, *orig, *token;
+ char *tmp, *orig, *token, *digest_name;
+ long digest_type;
+ int r = CHAP_DIGEST_UNKNOWN;
tmp = kstrdup(a_str, GFP_KERNEL);
if (!tmp) {
@@ -70,15 +108,24 @@ static int chap_check_algorithm(const char *a_str)
if (!token)
goto out;
- if (!strncmp(token, "5", 1)) {
- pr_debug("Selected MD5 Algorithm\n");
- kfree(orig);
- return CHAP_DIGEST_MD5;
+ if (kstrtol(token, 10, &digest_type))
+ continue;
+
+ digest_name = chap_get_digest_name(digest_type);
+ if (!digest_name)
+ continue;
+
+ pr_debug("Selected %s Algorithm\n", digest_name);
+ if (chap_test_algorithm(digest_name) < 0) {
+ pr_err("failed to allocate %s algo\n", digest_name);
+ } else {
+ r = digest_type;
+ goto out;
}
}
out:
kfree(orig);
- return CHAP_DIGEST_UNKNOWN;
+ return r;
}
static void chap_close(struct iscsi_conn *conn)
@@ -94,7 +141,7 @@ static struct iscsi_chap *chap_server_open(
char *aic_str,
unsigned int *aic_len)
{
- int ret;
+ int digest_type;
struct iscsi_chap *chap;
if (!(auth->naf_flags & NAF_USERID_SET) ||
@@ -109,17 +156,19 @@ static struct iscsi_chap *chap_server_open(
return NULL;
chap = conn->auth_protocol;
- ret = chap_check_algorithm(a_str);
- switch (ret) {
+ digest_type = chap_check_algorithm(a_str);
+ switch (digest_type) {
case CHAP_DIGEST_MD5:
- pr_debug("[server] Got CHAP_A=5\n");
- /*
- * Send back CHAP_A set to MD5.
- */
- *aic_len = sprintf(aic_str, "CHAP_A=5");
- *aic_len += 1;
- chap->digest_type = CHAP_DIGEST_MD5;
- pr_debug("[server] Sending CHAP_A=%d\n", chap->digest_type);
+ chap->digest_size = MD5_SIGNATURE_SIZE;
+ break;
+ case CHAP_DIGEST_SHA1:
+ chap->digest_size = SHA1_SIGNATURE_SIZE;
+ break;
+ case CHAP_DIGEST_SHA256:
+ chap->digest_size = SHA256_SIGNATURE_SIZE;
+ break;
+ case CHAP_DIGEST_SHA3_256:
+ chap->digest_size = SHA3_256_SIGNATURE_SIZE;
break;
case CHAP_DIGEST_UNKNOWN:
default:
@@ -128,6 +177,16 @@ static struct iscsi_chap *chap_server_open(
return NULL;
}
+ chap->digest_name = chap_get_digest_name(digest_type);
+
+ /* Tie the challenge length to the digest size */
+ chap->challenge_len = chap->digest_size;
+
+ pr_debug("[server] Got CHAP_A=%d\n", digest_type);
+ *aic_len = sprintf(aic_str, "CHAP_A=%d", digest_type);
+ *aic_len += 1;
+ pr_debug("[server] Sending CHAP_A=%d\n", digest_type);
+
/*
* Set Identifier.
*/
@@ -146,7 +205,7 @@ static struct iscsi_chap *chap_server_open(
return chap;
}
-static int chap_server_compute_md5(
+static int chap_server_compute_hash(
struct iscsi_conn *conn,
struct iscsi_node_auth *auth,
char *nr_in_ptr,
@@ -155,36 +214,57 @@ static int chap_server_compute_md5(
{
unsigned long id;
unsigned char id_as_uchar;
- unsigned char digest[MD5_SIGNATURE_SIZE];
- unsigned char type, response[MD5_SIGNATURE_SIZE * 2 + 2];
- unsigned char identifier[10], *challenge = NULL;
- unsigned char *challenge_binhex = NULL;
- unsigned char client_digest[MD5_SIGNATURE_SIZE];
- unsigned char server_digest[MD5_SIGNATURE_SIZE];
+ unsigned char type;
+ unsigned char identifier[10], *initiatorchg = NULL;
+ unsigned char *initiatorchg_binhex = NULL;
+ unsigned char *digest = NULL;
+ unsigned char *response = NULL;
+ unsigned char *client_digest = NULL;
+ unsigned char *server_digest = NULL;
unsigned char chap_n[MAX_CHAP_N_SIZE], chap_r[MAX_RESPONSE_LENGTH];
size_t compare_len;
struct iscsi_chap *chap = conn->auth_protocol;
struct crypto_shash *tfm = NULL;
struct shash_desc *desc = NULL;
- int auth_ret = -1, ret, challenge_len;
+ int auth_ret = -1, ret, initiatorchg_len;
+
+ digest = kzalloc(chap->digest_size, GFP_KERNEL);
+ if (!digest) {
+ pr_err("Unable to allocate the digest buffer\n");
+ goto out;
+ }
+
+ response = kzalloc(chap->digest_size * 2 + 2, GFP_KERNEL);
+ if (!response) {
+ pr_err("Unable to allocate the response buffer\n");
+ goto out;
+ }
+
+ client_digest = kzalloc(chap->digest_size, GFP_KERNEL);
+ if (!client_digest) {
+ pr_err("Unable to allocate the client_digest buffer\n");
+ goto out;
+ }
+
+ server_digest = kzalloc(chap->digest_size, GFP_KERNEL);
+ if (!server_digest) {
+ pr_err("Unable to allocate the server_digest buffer\n");
+ goto out;
+ }
memset(identifier, 0, 10);
memset(chap_n, 0, MAX_CHAP_N_SIZE);
memset(chap_r, 0, MAX_RESPONSE_LENGTH);
- memset(digest, 0, MD5_SIGNATURE_SIZE);
- memset(response, 0, MD5_SIGNATURE_SIZE * 2 + 2);
- memset(client_digest, 0, MD5_SIGNATURE_SIZE);
- memset(server_digest, 0, MD5_SIGNATURE_SIZE);
- challenge = kzalloc(CHAP_CHALLENGE_STR_LEN, GFP_KERNEL);
- if (!challenge) {
+ initiatorchg = kzalloc(CHAP_CHALLENGE_STR_LEN, GFP_KERNEL);
+ if (!initiatorchg) {
pr_err("Unable to allocate challenge buffer\n");
goto out;
}
- challenge_binhex = kzalloc(CHAP_CHALLENGE_STR_LEN, GFP_KERNEL);
- if (!challenge_binhex) {
- pr_err("Unable to allocate challenge_binhex buffer\n");
+ initiatorchg_binhex = kzalloc(CHAP_CHALLENGE_STR_LEN, GFP_KERNEL);
+ if (!initiatorchg_binhex) {
+ pr_err("Unable to allocate initiatorchg_binhex buffer\n");
goto out;
}
/*
@@ -219,18 +299,18 @@ static int chap_server_compute_md5(
pr_err("Could not find CHAP_R.\n");
goto out;
}
- if (strlen(chap_r) != MD5_SIGNATURE_SIZE * 2) {
+ if (strlen(chap_r) != chap->digest_size * 2) {
pr_err("Malformed CHAP_R\n");
goto out;
}
- if (hex2bin(client_digest, chap_r, MD5_SIGNATURE_SIZE) < 0) {
+ if (hex2bin(client_digest, chap_r, chap->digest_size) < 0) {
pr_err("Malformed CHAP_R\n");
goto out;
}
pr_debug("[server] Got CHAP_R=%s\n", chap_r);
- tfm = crypto_alloc_shash("md5", 0, 0);
+ tfm = crypto_alloc_shash(chap->digest_name, 0, 0);
if (IS_ERR(tfm)) {
tfm = NULL;
pr_err("Unable to allocate struct crypto_shash\n");
@@ -265,21 +345,23 @@ static int chap_server_compute_md5(
}
ret = crypto_shash_finup(desc, chap->challenge,
- CHAP_CHALLENGE_LENGTH, server_digest);
+ chap->challenge_len, server_digest);
if (ret < 0) {
pr_err("crypto_shash_finup() failed for challenge\n");
goto out;
}
- bin2hex(response, server_digest, MD5_SIGNATURE_SIZE);
- pr_debug("[server] MD5 Server Digest: %s\n", response);
+ bin2hex(response, server_digest, chap->digest_size);
+ pr_debug("[server] %s Server Digest: %s\n",
+ chap->digest_name, response);
- if (memcmp(server_digest, client_digest, MD5_SIGNATURE_SIZE) != 0) {
- pr_debug("[server] MD5 Digests do not match!\n\n");
+ if (memcmp(server_digest, client_digest, chap->digest_size) != 0) {
+ pr_debug("[server] %s Digests do not match!\n\n",
+ chap->digest_name);
goto out;
} else
- pr_debug("[server] MD5 Digests match, CHAP connection"
- " successful.\n\n");
+ pr_debug("[server] %s Digests match, CHAP connection"
+ " successful.\n\n", chap->digest_name);
/*
* One way authentication has succeeded, return now if mutual
* authentication is not enabled.
@@ -317,7 +399,7 @@ static int chap_server_compute_md5(
* Get CHAP_C.
*/
if (extract_param(nr_in_ptr, "CHAP_C", CHAP_CHALLENGE_STR_LEN,
- challenge, &type) < 0) {
+ initiatorchg, &type) < 0) {
pr_err("Could not find CHAP_C.\n");
goto out;
}
@@ -326,26 +408,28 @@ static int chap_server_compute_md5(
pr_err("Could not find CHAP_C.\n");
goto out;
}
- challenge_len = DIV_ROUND_UP(strlen(challenge), 2);
- if (!challenge_len) {
+ initiatorchg_len = DIV_ROUND_UP(strlen(initiatorchg), 2);
+ if (!initiatorchg_len) {
pr_err("Unable to convert incoming challenge\n");
goto out;
}
- if (challenge_len > 1024) {
+ if (initiatorchg_len > 1024) {
pr_err("CHAP_C exceeds maximum binary size of 1024 bytes\n");
goto out;
}
- if (hex2bin(challenge_binhex, challenge, challenge_len) < 0) {
+ if (hex2bin(initiatorchg_binhex, initiatorchg, initiatorchg_len) < 0) {
pr_err("Malformed CHAP_C\n");
goto out;
}
- pr_debug("[server] Got CHAP_C=%s\n", challenge);
+ pr_debug("[server] Got CHAP_C=%s\n", initiatorchg);
/*
* During mutual authentication, the CHAP_C generated by the
* initiator must not match the original CHAP_C generated by
* the target.
*/
- if (!memcmp(challenge_binhex, chap->challenge, CHAP_CHALLENGE_LENGTH)) {
+ if (initiatorchg_len == chap->challenge_len &&
+ !memcmp(initiatorchg_binhex, chap->challenge,
+ initiatorchg_len)) {
pr_err("initiator CHAP_C matches target CHAP_C, failing"
" login attempt\n");
goto out;
@@ -377,7 +461,7 @@ static int chap_server_compute_md5(
/*
* Convert received challenge to binary hex.
*/
- ret = crypto_shash_finup(desc, challenge_binhex, challenge_len,
+ ret = crypto_shash_finup(desc, initiatorchg_binhex, initiatorchg_len,
digest);
if (ret < 0) {
pr_err("crypto_shash_finup() failed for ma challenge\n");
@@ -393,7 +477,7 @@ static int chap_server_compute_md5(
/*
* Convert response from binary hex to ascii hext.
*/
- bin2hex(response, digest, MD5_SIGNATURE_SIZE);
+ bin2hex(response, digest, chap->digest_size);
*nr_out_len += sprintf(nr_out_ptr + *nr_out_len, "CHAP_R=0x%s",
response);
*nr_out_len += 1;
@@ -403,33 +487,15 @@ out:
kzfree(desc);
if (tfm)
crypto_free_shash(tfm);
- kfree(challenge);
- kfree(challenge_binhex);
+ kfree(initiatorchg);
+ kfree(initiatorchg_binhex);
+ kfree(digest);
+ kfree(response);
+ kfree(server_digest);
+ kfree(client_digest);
return auth_ret;
}
-static int chap_got_response(
- struct iscsi_conn *conn,
- struct iscsi_node_auth *auth,
- char *nr_in_ptr,
- char *nr_out_ptr,
- unsigned int *nr_out_len)
-{
- struct iscsi_chap *chap = conn->auth_protocol;
-
- switch (chap->digest_type) {
- case CHAP_DIGEST_MD5:
- if (chap_server_compute_md5(conn, auth, nr_in_ptr,
- nr_out_ptr, nr_out_len) < 0)
- return -1;
- return 0;
- default:
- pr_err("Unknown CHAP digest type %d!\n",
- chap->digest_type);
- return -1;
- }
-}
-
u32 chap_main_loop(
struct iscsi_conn *conn,
struct iscsi_node_auth *auth,
@@ -448,7 +514,7 @@ u32 chap_main_loop(
return 0;
} else if (chap->chap_state == CHAP_STAGE_SERVER_AIC) {
convert_null_to_semi(in_text, *in_len);
- if (chap_got_response(conn, auth, in_text, out_text,
+ if (chap_server_compute_hash(conn, auth, in_text, out_text,
out_len) < 0) {
chap_close(conn);
return 2;
diff --git a/drivers/target/iscsi/iscsi_target_auth.h b/drivers/target/iscsi/iscsi_target_auth.h
index d5600ac30b53..fc75c1c20e23 100644
--- a/drivers/target/iscsi/iscsi_target_auth.h
+++ b/drivers/target/iscsi/iscsi_target_auth.h
@@ -6,14 +6,19 @@
#define CHAP_DIGEST_UNKNOWN 0
#define CHAP_DIGEST_MD5 5
-#define CHAP_DIGEST_SHA 6
+#define CHAP_DIGEST_SHA1 6
+#define CHAP_DIGEST_SHA256 7
+#define CHAP_DIGEST_SHA3_256 8
-#define CHAP_CHALLENGE_LENGTH 16
+#define MAX_CHAP_CHALLENGE_LEN 32
#define CHAP_CHALLENGE_STR_LEN 4096
-#define MAX_RESPONSE_LENGTH 64 /* sufficient for MD5 */
+#define MAX_RESPONSE_LENGTH 128 /* sufficient for SHA3 256 */
#define MAX_CHAP_N_SIZE 512
#define MD5_SIGNATURE_SIZE 16 /* 16 bytes in a MD5 message digest */
+#define SHA1_SIGNATURE_SIZE 20 /* 20 bytes in a SHA1 message digest */
+#define SHA256_SIGNATURE_SIZE 32 /* 32 bytes in a SHA256 message digest */
+#define SHA3_256_SIGNATURE_SIZE 32 /* 32 bytes in a SHA3 256 message digest */
#define CHAP_STAGE_CLIENT_A 1
#define CHAP_STAGE_SERVER_AIC 2
@@ -28,9 +33,11 @@ extern u32 chap_main_loop(struct iscsi_conn *, struct iscsi_node_auth *, char *,
int *, int *);
struct iscsi_chap {
- unsigned char digest_type;
unsigned char id;
- unsigned char challenge[CHAP_CHALLENGE_LENGTH];
+ unsigned char challenge[MAX_CHAP_CHALLENGE_LEN];
+ unsigned int challenge_len;
+ unsigned char *digest_name;
+ unsigned int digest_size;
unsigned int authenticate_target;
unsigned int chap_state;
} ____cacheline_aligned;
diff --git a/drivers/target/iscsi/iscsi_target_parameters.h b/drivers/target/iscsi/iscsi_target_parameters.h
index daf47f38e081..240c4c4344f6 100644
--- a/drivers/target/iscsi/iscsi_target_parameters.h
+++ b/drivers/target/iscsi/iscsi_target_parameters.h
@@ -93,9 +93,6 @@ extern void iscsi_set_session_parameters(struct iscsi_sess_ops *,
#define OFMARKER "OFMarker"
#define IFMARKINT "IFMarkInt"
#define OFMARKINT "OFMarkInt"
-#define X_EXTENSIONKEY "X-com.sbei.version"
-#define X_EXTENSIONKEY_CISCO_NEW "X-com.cisco.protocol"
-#define X_EXTENSIONKEY_CISCO_OLD "X-com.cisco.iscsi.draft"
/*
* Parameter names of iSCSI Extentions for RDMA (iSER). See RFC-5046
diff --git a/drivers/target/target_core_fabric_lib.c b/drivers/target/target_core_fabric_lib.c
index 3c79411c4cd0..6b4b354c88aa 100644
--- a/drivers/target/target_core_fabric_lib.c
+++ b/drivers/target/target_core_fabric_lib.c
@@ -118,7 +118,7 @@ static int srp_get_pr_transport_id(
memset(buf + 8, 0, leading_zero_bytes);
rc = hex2bin(buf + 8 + leading_zero_bytes, p, count);
if (rc < 0) {
- pr_debug("hex2bin failed for %s: %d\n", __func__, rc);
+ pr_debug("hex2bin failed for %s: %d\n", p, rc);
return rc;
}
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index e5a71addbb06..d24e0a3ba3ff 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -32,9 +32,6 @@
extern struct se_device *g_lun0_dev;
-static DEFINE_SPINLOCK(tpg_lock);
-static LIST_HEAD(tpg_list);
-
/* __core_tpg_get_initiator_node_acl():
*
* mutex_lock(&tpg->acl_node_mutex); must be held when calling
@@ -475,7 +472,6 @@ int core_tpg_register(
se_tpg->se_tpg_wwn = se_wwn;
atomic_set(&se_tpg->tpg_pr_ref_count, 0);
INIT_LIST_HEAD(&se_tpg->acl_node_list);
- INIT_LIST_HEAD(&se_tpg->se_tpg_node);
INIT_LIST_HEAD(&se_tpg->tpg_sess_list);
spin_lock_init(&se_tpg->session_lock);
mutex_init(&se_tpg->tpg_lun_mutex);
@@ -494,10 +490,6 @@ int core_tpg_register(
}
}
- spin_lock_bh(&tpg_lock);
- list_add_tail(&se_tpg->se_tpg_node, &tpg_list);
- spin_unlock_bh(&tpg_lock);
-
pr_debug("TARGET_CORE[%s]: Allocated portal_group for endpoint: %s, "
"Proto: %d, Portal Tag: %u\n", se_tpg->se_tpg_tfo->fabric_name,
se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg) ?
@@ -519,10 +511,6 @@ int core_tpg_deregister(struct se_portal_group *se_tpg)
tfo->tpg_get_wwn(se_tpg) ? tfo->tpg_get_wwn(se_tpg) : NULL,
se_tpg->proto_id, tfo->tpg_get_tag(se_tpg));
- spin_lock_bh(&tpg_lock);
- list_del(&se_tpg->se_tpg_node);
- spin_unlock_bh(&tpg_lock);
-
while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0)
cpu_relax();
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 7f06a62f8661..ea482d4b1f00 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -584,6 +584,15 @@ void transport_free_session(struct se_session *se_sess)
}
EXPORT_SYMBOL(transport_free_session);
+static int target_release_res(struct se_device *dev, void *data)
+{
+ struct se_session *sess = data;
+
+ if (dev->reservation_holder == sess)
+ target_release_reservation(dev);
+ return 0;
+}
+
void transport_deregister_session(struct se_session *se_sess)
{
struct se_portal_group *se_tpg = se_sess->se_tpg;
@@ -600,6 +609,12 @@ void transport_deregister_session(struct se_session *se_sess)
se_sess->fabric_sess_ptr = NULL;
spin_unlock_irqrestore(&se_tpg->session_lock, flags);
+ /*
+ * Since the session is being removed, release SPC-2
+ * reservations held by the session that is disappearing.
+ */
+ target_for_each_device(target_release_res, se_sess);
+
pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n",
se_tpg->se_tpg_tfo->fabric_name);
/*
@@ -1243,6 +1258,19 @@ target_check_max_data_sg_nents(struct se_cmd *cmd, struct se_device *dev,
return TCM_NO_SENSE;
}
+/**
+ * target_cmd_size_check - Check whether there will be a residual.
+ * @cmd: SCSI command.
+ * @size: Data buffer size derived from CDB. The data buffer size provided by
+ * the SCSI transport driver is available in @cmd->data_length.
+ *
+ * Compare the data buffer size from the CDB with the data buffer limit from the transport
+ * header. Set @cmd->residual_count and SCF_OVERFLOW_BIT or SCF_UNDERFLOW_BIT if necessary.
+ *
+ * Note: target drivers set @cmd->data_length by calling transport_init_se_cmd().
+ *
+ * Return: TCM_NO_SENSE
+ */
sense_reason_t
target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
{
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 35be1be87d2a..0b9dfa6b17bc 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -499,7 +499,7 @@ static inline bool tcmu_get_empty_block(struct tcmu_dev *udev,
schedule_delayed_work(&tcmu_unmap_work, 0);
/* try to get new page from the mm */
- page = alloc_page(GFP_KERNEL);
+ page = alloc_page(GFP_NOIO);
if (!page)
goto err_alloc;
@@ -573,7 +573,7 @@ static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd)
struct tcmu_dev *udev = TCMU_DEV(se_dev);
struct tcmu_cmd *tcmu_cmd;
- tcmu_cmd = kmem_cache_zalloc(tcmu_cmd_cache, GFP_KERNEL);
+ tcmu_cmd = kmem_cache_zalloc(tcmu_cmd_cache, GFP_NOIO);
if (!tcmu_cmd)
return NULL;
@@ -584,7 +584,7 @@ static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd)
tcmu_cmd_reset_dbi_cur(tcmu_cmd);
tcmu_cmd->dbi_cnt = tcmu_cmd_get_block_cnt(tcmu_cmd);
tcmu_cmd->dbi = kcalloc(tcmu_cmd->dbi_cnt, sizeof(uint32_t),
- GFP_KERNEL);
+ GFP_NOIO);
if (!tcmu_cmd->dbi) {
kmem_cache_free(tcmu_cmd_cache, tcmu_cmd);
return NULL;
diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
index b9b1e92c6f8d..425c1070de08 100644
--- a/drivers/target/target_core_xcopy.c
+++ b/drivers/target/target_core_xcopy.c
@@ -467,7 +467,6 @@ int target_xcopy_setup_pt(void)
}
memset(&xcopy_pt_tpg, 0, sizeof(struct se_portal_group));
- INIT_LIST_HEAD(&xcopy_pt_tpg.se_tpg_node);
INIT_LIST_HEAD(&xcopy_pt_tpg.acl_node_list);
INIT_LIST_HEAD(&xcopy_pt_tpg.tpg_sess_list);
diff --git a/drivers/tee/tee_core.c b/drivers/tee/tee_core.c
index 0f16d9ffd8d1..37d22e39fd8d 100644
--- a/drivers/tee/tee_core.c
+++ b/drivers/tee/tee_core.c
@@ -675,7 +675,7 @@ static const struct file_operations tee_fops = {
.open = tee_open,
.release = tee_release,
.unlocked_ioctl = tee_ioctl,
- .compat_ioctl = tee_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
};
static void tee_release_device(struct device *dev)
diff --git a/drivers/thermal/gov_bang_bang.c b/drivers/thermal/gov_bang_bang.c
index e0575d29023a..b831fc77cf64 100644
--- a/drivers/thermal/gov_bang_bang.c
+++ b/drivers/thermal/gov_bang_bang.c
@@ -2,7 +2,7 @@
/*
* gov_bang_bang.c - A simple thermal throttling governor using hysteresis
*
- * Copyright (C) 2014 Peter Feuerer <peter@piie.net>
+ * Copyright (C) 2014 Peter Kaestle <peter@piie.net>
*
* Based on step_wise.c with following Copyrights:
* Copyright (C) 2012 Intel Corp
diff --git a/drivers/thunderbolt/cap.c b/drivers/thunderbolt/cap.c
index 8bf8e031f0bc..fdd77bb4628d 100644
--- a/drivers/thunderbolt/cap.c
+++ b/drivers/thunderbolt/cap.c
@@ -33,9 +33,9 @@ static int tb_port_enable_tmu(struct tb_port *port, bool enable)
* Legacy devices need to have TMU access enabled before port
* space can be fully accessed.
*/
- if (tb_switch_is_lr(sw))
+ if (tb_switch_is_light_ridge(sw))
offset = 0x26;
- else if (tb_switch_is_er(sw))
+ else if (tb_switch_is_eagle_ridge(sw))
offset = 0x2a;
else
return 0;
@@ -60,7 +60,7 @@ static void tb_port_dummy_read(struct tb_port *port)
* reading stale data on next read perform one dummy read after
* port capabilities are walked.
*/
- if (tb_switch_is_lr(port->sw)) {
+ if (tb_switch_is_light_ridge(port->sw)) {
u32 dummy;
tb_port_read(port, &dummy, TB_CFG_PORT, 0, 1);
diff --git a/drivers/thunderbolt/ctl.c b/drivers/thunderbolt/ctl.c
index 2ec1af8f7968..d97813e80e5f 100644
--- a/drivers/thunderbolt/ctl.c
+++ b/drivers/thunderbolt/ctl.c
@@ -962,8 +962,8 @@ int tb_cfg_read(struct tb_ctl *ctl, void *buffer, u64 route, u32 port,
return tb_cfg_get_error(ctl, space, &res);
case -ETIMEDOUT:
- tb_ctl_warn(ctl, "timeout reading config space %u from %#x\n",
- space, offset);
+ tb_ctl_warn(ctl, "%llx: timeout reading config space %u from %#x\n",
+ route, space, offset);
break;
default:
@@ -988,8 +988,8 @@ int tb_cfg_write(struct tb_ctl *ctl, const void *buffer, u64 route, u32 port,
return tb_cfg_get_error(ctl, space, &res);
case -ETIMEDOUT:
- tb_ctl_warn(ctl, "timeout writing config space %u to %#x\n",
- space, offset);
+ tb_ctl_warn(ctl, "%llx: timeout writing config space %u to %#x\n",
+ route, space, offset);
break;
default:
diff --git a/drivers/thunderbolt/eeprom.c b/drivers/thunderbolt/eeprom.c
index ee5196479854..8dd7de0cc826 100644
--- a/drivers/thunderbolt/eeprom.c
+++ b/drivers/thunderbolt/eeprom.c
@@ -514,17 +514,6 @@ int tb_drom_read(struct tb_switch *sw)
* no entries). Hardcode the configuration here.
*/
tb_drom_read_uid_only(sw, &sw->uid);
-
- sw->ports[1].link_nr = 0;
- sw->ports[2].link_nr = 1;
- sw->ports[1].dual_link_port = &sw->ports[2];
- sw->ports[2].dual_link_port = &sw->ports[1];
-
- sw->ports[3].link_nr = 0;
- sw->ports[4].link_nr = 1;
- sw->ports[3].dual_link_port = &sw->ports[4];
- sw->ports[4].dual_link_port = &sw->ports[3];
-
return 0;
}
diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c
index 245588f691e7..13e88109742e 100644
--- a/drivers/thunderbolt/icm.c
+++ b/drivers/thunderbolt/icm.c
@@ -11,6 +11,7 @@
#include <linux/delay.h>
#include <linux/mutex.h>
+#include <linux/moduleparam.h>
#include <linux/pci.h>
#include <linux/pm_runtime.h>
#include <linux/platform_data/x86/apple.h>
@@ -43,6 +44,10 @@
#define ICM_APPROVE_TIMEOUT 10000 /* ms */
#define ICM_MAX_LINK 4
+static bool start_icm;
+module_param(start_icm, bool, 0444);
+MODULE_PARM_DESC(start_icm, "start ICM firmware if it is not running (default: false)");
+
/**
* struct icm - Internal connection manager private data
* @request_lock: Makes sure only one message is send to ICM at time
@@ -147,6 +152,17 @@ static const struct intel_vss *parse_intel_vss(const void *ep_name, size_t size)
return NULL;
}
+static bool intel_vss_is_rtd3(const void *ep_name, size_t size)
+{
+ const struct intel_vss *vss;
+
+ vss = parse_intel_vss(ep_name, size);
+ if (vss)
+ return !!(vss->flags & INTEL_VSS_FLAGS_RTD3);
+
+ return false;
+}
+
static inline struct tb *icm_to_tb(struct icm *icm)
{
return ((void *)icm - sizeof(struct tb));
@@ -339,6 +355,14 @@ static void icm_veto_end(struct tb *tb)
}
}
+static bool icm_firmware_running(const struct tb_nhi *nhi)
+{
+ u32 val;
+
+ val = ioread32(nhi->iobase + REG_FW_STS);
+ return !!(val & REG_FW_STS_ICM_EN);
+}
+
static bool icm_fr_is_supported(struct tb *tb)
{
return !x86_apple_machine;
@@ -562,58 +586,42 @@ static int icm_fr_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
return 0;
}
-static struct tb_switch *add_switch(struct tb_switch *parent_sw, u64 route,
- const uuid_t *uuid, const u8 *ep_name,
- size_t ep_name_size, u8 connection_id,
- u8 connection_key, u8 link, u8 depth,
- enum tb_security_level security_level,
- bool authorized, bool boot)
+static struct tb_switch *alloc_switch(struct tb_switch *parent_sw, u64 route,
+ const uuid_t *uuid)
{
- const struct intel_vss *vss;
+ struct tb *tb = parent_sw->tb;
struct tb_switch *sw;
- int ret;
-
- pm_runtime_get_sync(&parent_sw->dev);
- sw = tb_switch_alloc(parent_sw->tb, &parent_sw->dev, route);
- if (IS_ERR(sw))
- goto out;
+ sw = tb_switch_alloc(tb, &parent_sw->dev, route);
+ if (IS_ERR(sw)) {
+ tb_warn(tb, "failed to allocate switch at %llx\n", route);
+ return sw;
+ }
sw->uuid = kmemdup(uuid, sizeof(*uuid), GFP_KERNEL);
if (!sw->uuid) {
- tb_sw_warn(sw, "cannot allocate memory for switch\n");
tb_switch_put(sw);
- goto out;
+ return ERR_PTR(-ENOMEM);
}
- sw->connection_id = connection_id;
- sw->connection_key = connection_key;
- sw->link = link;
- sw->depth = depth;
- sw->authorized = authorized;
- sw->security_level = security_level;
- sw->boot = boot;
+
init_completion(&sw->rpm_complete);
+ return sw;
+}
- vss = parse_intel_vss(ep_name, ep_name_size);
- if (vss)
- sw->rpm = !!(vss->flags & INTEL_VSS_FLAGS_RTD3);
+static int add_switch(struct tb_switch *parent_sw, struct tb_switch *sw)
+{
+ u64 route = tb_route(sw);
+ int ret;
/* Link the two switches now */
tb_port_at(route, parent_sw)->remote = tb_upstream_port(sw);
tb_upstream_port(sw)->remote = tb_port_at(route, parent_sw);
ret = tb_switch_add(sw);
- if (ret) {
+ if (ret)
tb_port_at(tb_route(sw), parent_sw)->remote = NULL;
- tb_switch_put(sw);
- sw = ERR_PTR(ret);
- }
-
-out:
- pm_runtime_mark_last_busy(&parent_sw->dev);
- pm_runtime_put_autosuspend(&parent_sw->dev);
- return sw;
+ return ret;
}
static void update_switch(struct tb_switch *parent_sw, struct tb_switch *sw,
@@ -697,11 +705,11 @@ icm_fr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr)
(const struct icm_fr_event_device_connected *)hdr;
enum tb_security_level security_level;
struct tb_switch *sw, *parent_sw;
+ bool boot, dual_lane, speed_gen3;
struct icm *icm = tb_priv(tb);
bool authorized = false;
struct tb_xdomain *xd;
u8 link, depth;
- bool boot;
u64 route;
int ret;
@@ -714,6 +722,8 @@ icm_fr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr)
security_level = (pkg->hdr.flags & ICM_FLAGS_SLEVEL_MASK) >>
ICM_FLAGS_SLEVEL_SHIFT;
boot = pkg->link_info & ICM_LINK_INFO_BOOT;
+ dual_lane = pkg->hdr.flags & ICM_FLAGS_DUAL_LANE;
+ speed_gen3 = pkg->hdr.flags & ICM_FLAGS_SPEED_GEN3;
if (pkg->link_info & ICM_LINK_INFO_REJECTED) {
tb_info(tb, "switch at %u.%u was rejected by ICM firmware because topology limit exceeded\n",
@@ -811,10 +821,27 @@ icm_fr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr)
return;
}
- add_switch(parent_sw, route, &pkg->ep_uuid, (const u8 *)pkg->ep_name,
- sizeof(pkg->ep_name), pkg->connection_id,
- pkg->connection_key, link, depth, security_level,
- authorized, boot);
+ pm_runtime_get_sync(&parent_sw->dev);
+
+ sw = alloc_switch(parent_sw, route, &pkg->ep_uuid);
+ if (!IS_ERR(sw)) {
+ sw->connection_id = pkg->connection_id;
+ sw->connection_key = pkg->connection_key;
+ sw->link = link;
+ sw->depth = depth;
+ sw->authorized = authorized;
+ sw->security_level = security_level;
+ sw->boot = boot;
+ sw->link_speed = speed_gen3 ? 20 : 10;
+ sw->link_width = dual_lane ? 2 : 1;
+ sw->rpm = intel_vss_is_rtd3(pkg->ep_name, sizeof(pkg->ep_name));
+
+ if (add_switch(parent_sw, sw))
+ tb_switch_put(sw);
+ }
+
+ pm_runtime_mark_last_busy(&parent_sw->dev);
+ pm_runtime_put_autosuspend(&parent_sw->dev);
tb_switch_put(parent_sw);
}
@@ -1142,10 +1169,10 @@ __icm_tr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr,
{
const struct icm_tr_event_device_connected *pkg =
(const struct icm_tr_event_device_connected *)hdr;
+ bool authorized, boot, dual_lane, speed_gen3;
enum tb_security_level security_level;
struct tb_switch *sw, *parent_sw;
struct tb_xdomain *xd;
- bool authorized, boot;
u64 route;
icm_postpone_rescan(tb);
@@ -1163,6 +1190,8 @@ __icm_tr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr,
security_level = (pkg->hdr.flags & ICM_FLAGS_SLEVEL_MASK) >>
ICM_FLAGS_SLEVEL_SHIFT;
boot = pkg->link_info & ICM_LINK_INFO_BOOT;
+ dual_lane = pkg->hdr.flags & ICM_FLAGS_DUAL_LANE;
+ speed_gen3 = pkg->hdr.flags & ICM_FLAGS_SPEED_GEN3;
if (pkg->link_info & ICM_LINK_INFO_REJECTED) {
tb_info(tb, "switch at %llx was rejected by ICM firmware because topology limit exceeded\n",
@@ -1205,11 +1234,27 @@ __icm_tr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr,
return;
}
- sw = add_switch(parent_sw, route, &pkg->ep_uuid, (const u8 *)pkg->ep_name,
- sizeof(pkg->ep_name), pkg->connection_id, 0, 0, 0,
- security_level, authorized, boot);
- if (!IS_ERR(sw) && force_rtd3)
- sw->rpm = true;
+ pm_runtime_get_sync(&parent_sw->dev);
+
+ sw = alloc_switch(parent_sw, route, &pkg->ep_uuid);
+ if (!IS_ERR(sw)) {
+ sw->connection_id = pkg->connection_id;
+ sw->authorized = authorized;
+ sw->security_level = security_level;
+ sw->boot = boot;
+ sw->link_speed = speed_gen3 ? 20 : 10;
+ sw->link_width = dual_lane ? 2 : 1;
+ sw->rpm = force_rtd3;
+ if (!sw->rpm)
+ sw->rpm = intel_vss_is_rtd3(pkg->ep_name,
+ sizeof(pkg->ep_name));
+
+ if (add_switch(parent_sw, sw))
+ tb_switch_put(sw);
+ }
+
+ pm_runtime_mark_last_busy(&parent_sw->dev);
+ pm_runtime_put_autosuspend(&parent_sw->dev);
tb_switch_put(parent_sw);
}
@@ -1349,9 +1394,12 @@ static bool icm_ar_is_supported(struct tb *tb)
/*
* Starting from Alpine Ridge we can use ICM on Apple machines
* as well. We just need to reset and re-enable it first.
+ * However, only start it if explicitly asked by the user.
*/
- if (!x86_apple_machine)
+ if (icm_firmware_running(tb->nhi))
return true;
+ if (!start_icm)
+ return false;
/*
* Find the upstream PCIe port in case we need to do reset
@@ -1704,8 +1752,7 @@ static int icm_firmware_start(struct tb *tb, struct tb_nhi *nhi)
u32 val;
/* Check if the ICM firmware is already running */
- val = ioread32(nhi->iobase + REG_FW_STS);
- if (val & REG_FW_STS_ICM_EN)
+ if (icm_firmware_running(nhi))
return 0;
dev_dbg(&nhi->pdev->dev, "starting ICM firmware\n");
@@ -1893,14 +1940,12 @@ static int icm_suspend(struct tb *tb)
*/
static void icm_unplug_children(struct tb_switch *sw)
{
- unsigned int i;
+ struct tb_port *port;
if (tb_route(sw))
sw->is_unplugged = true;
- for (i = 1; i <= sw->config.max_port_number; i++) {
- struct tb_port *port = &sw->ports[i];
-
+ tb_switch_for_each_port(sw, port) {
if (port->xdomain)
port->xdomain->is_unplugged = true;
else if (tb_port_has_remote(port))
@@ -1936,11 +1981,9 @@ static void remove_unplugged_switch(struct tb_switch *sw)
static void icm_free_unplugged_children(struct tb_switch *sw)
{
- unsigned int i;
-
- for (i = 1; i <= sw->config.max_port_number; i++) {
- struct tb_port *port = &sw->ports[i];
+ struct tb_port *port;
+ tb_switch_for_each_port(sw, port) {
if (port->xdomain && port->xdomain->is_unplugged) {
tb_xdomain_remove(port->xdomain);
port->xdomain = NULL;
@@ -2216,7 +2259,7 @@ struct tb *icm_probe(struct tb_nhi *nhi)
case PCI_DEVICE_ID_INTEL_ICL_NHI0:
case PCI_DEVICE_ID_INTEL_ICL_NHI1:
- icm->is_supported = icm_ar_is_supported;
+ icm->is_supported = icm_fr_is_supported;
icm->driver_ready = icm_icl_driver_ready;
icm->set_uuid = icm_icl_set_uuid;
icm->device_connected = icm_icl_device_connected;
diff --git a/drivers/thunderbolt/lc.c b/drivers/thunderbolt/lc.c
index ae1e92611c3e..bd44d50246d2 100644
--- a/drivers/thunderbolt/lc.c
+++ b/drivers/thunderbolt/lc.c
@@ -94,7 +94,7 @@ int tb_lc_configure_link(struct tb_switch *sw)
struct tb_port *up, *down;
int ret;
- if (!sw->config.enabled || !tb_route(sw))
+ if (!tb_route(sw) || tb_switch_is_icm(sw))
return 0;
up = tb_upstream_port(sw);
@@ -124,7 +124,7 @@ void tb_lc_unconfigure_link(struct tb_switch *sw)
{
struct tb_port *up, *down;
- if (sw->is_unplugged || !sw->config.enabled || !tb_route(sw))
+ if (sw->is_unplugged || !tb_route(sw) || tb_switch_is_icm(sw))
return;
up = tb_upstream_port(sw);
@@ -177,3 +177,192 @@ int tb_lc_set_sleep(struct tb_switch *sw)
return 0;
}
+
+/**
+ * tb_lc_lane_bonding_possible() - Is lane bonding possible towards switch
+ * @sw: Switch to check
+ *
+ * Checks whether conditions for lane bonding from parent to @sw are
+ * possible.
+ */
+bool tb_lc_lane_bonding_possible(struct tb_switch *sw)
+{
+ struct tb_port *up;
+ int cap, ret;
+ u32 val;
+
+ if (sw->generation < 2)
+ return false;
+
+ up = tb_upstream_port(sw);
+ cap = find_port_lc_cap(up);
+ if (cap < 0)
+ return false;
+
+ ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, cap + TB_LC_PORT_ATTR, 1);
+ if (ret)
+ return false;
+
+ return !!(val & TB_LC_PORT_ATTR_BE);
+}
+
+static int tb_lc_dp_sink_from_port(const struct tb_switch *sw,
+ struct tb_port *in)
+{
+ struct tb_port *port;
+
+ /* The first DP IN port is sink 0 and second is sink 1 */
+ tb_switch_for_each_port(sw, port) {
+ if (tb_port_is_dpin(port))
+ return in != port;
+ }
+
+ return -EINVAL;
+}
+
+static int tb_lc_dp_sink_available(struct tb_switch *sw, int sink)
+{
+ u32 val, alloc;
+ int ret;
+
+ ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
+ sw->cap_lc + TB_LC_SNK_ALLOCATION, 1);
+ if (ret)
+ return ret;
+
+ /*
+ * Sink is available for CM/SW to use if the allocation valie is
+ * either 0 or 1.
+ */
+ if (!sink) {
+ alloc = val & TB_LC_SNK_ALLOCATION_SNK0_MASK;
+ if (!alloc || alloc == TB_LC_SNK_ALLOCATION_SNK0_CM)
+ return 0;
+ } else {
+ alloc = (val & TB_LC_SNK_ALLOCATION_SNK1_MASK) >>
+ TB_LC_SNK_ALLOCATION_SNK1_SHIFT;
+ if (!alloc || alloc == TB_LC_SNK_ALLOCATION_SNK1_CM)
+ return 0;
+ }
+
+ return -EBUSY;
+}
+
+/**
+ * tb_lc_dp_sink_query() - Is DP sink available for DP IN port
+ * @sw: Switch whose DP sink is queried
+ * @in: DP IN port to check
+ *
+ * Queries through LC SNK_ALLOCATION registers whether DP sink is available
+ * for the given DP IN port or not.
+ */
+bool tb_lc_dp_sink_query(struct tb_switch *sw, struct tb_port *in)
+{
+ int sink;
+
+ /*
+ * For older generations sink is always available as there is no
+ * allocation mechanism.
+ */
+ if (sw->generation < 3)
+ return true;
+
+ sink = tb_lc_dp_sink_from_port(sw, in);
+ if (sink < 0)
+ return false;
+
+ return !tb_lc_dp_sink_available(sw, sink);
+}
+
+/**
+ * tb_lc_dp_sink_alloc() - Allocate DP sink
+ * @sw: Switch whose DP sink is allocated
+ * @in: DP IN port the DP sink is allocated for
+ *
+ * Allocate DP sink for @in via LC SNK_ALLOCATION registers. If the
+ * resource is available and allocation is successful returns %0. In all
+ * other cases returs negative errno. In particular %-EBUSY is returned if
+ * the resource was not available.
+ */
+int tb_lc_dp_sink_alloc(struct tb_switch *sw, struct tb_port *in)
+{
+ int ret, sink;
+ u32 val;
+
+ if (sw->generation < 3)
+ return 0;
+
+ sink = tb_lc_dp_sink_from_port(sw, in);
+ if (sink < 0)
+ return sink;
+
+ ret = tb_lc_dp_sink_available(sw, sink);
+ if (ret)
+ return ret;
+
+ ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
+ sw->cap_lc + TB_LC_SNK_ALLOCATION, 1);
+ if (ret)
+ return ret;
+
+ if (!sink) {
+ val &= ~TB_LC_SNK_ALLOCATION_SNK0_MASK;
+ val |= TB_LC_SNK_ALLOCATION_SNK0_CM;
+ } else {
+ val &= ~TB_LC_SNK_ALLOCATION_SNK1_MASK;
+ val |= TB_LC_SNK_ALLOCATION_SNK1_CM <<
+ TB_LC_SNK_ALLOCATION_SNK1_SHIFT;
+ }
+
+ ret = tb_sw_write(sw, &val, TB_CFG_SWITCH,
+ sw->cap_lc + TB_LC_SNK_ALLOCATION, 1);
+
+ if (ret)
+ return ret;
+
+ tb_port_dbg(in, "sink %d allocated\n", sink);
+ return 0;
+}
+
+/**
+ * tb_lc_dp_sink_dealloc() - De-allocate DP sink
+ * @sw: Switch whose DP sink is de-allocated
+ * @in: DP IN port whose DP sink is de-allocated
+ *
+ * De-allocate DP sink from @in using LC SNK_ALLOCATION registers.
+ */
+int tb_lc_dp_sink_dealloc(struct tb_switch *sw, struct tb_port *in)
+{
+ int ret, sink;
+ u32 val;
+
+ if (sw->generation < 3)
+ return 0;
+
+ sink = tb_lc_dp_sink_from_port(sw, in);
+ if (sink < 0)
+ return sink;
+
+ /* Needs to be owned by CM/SW */
+ ret = tb_lc_dp_sink_available(sw, sink);
+ if (ret)
+ return ret;
+
+ ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
+ sw->cap_lc + TB_LC_SNK_ALLOCATION, 1);
+ if (ret)
+ return ret;
+
+ if (!sink)
+ val &= ~TB_LC_SNK_ALLOCATION_SNK0_MASK;
+ else
+ val &= ~TB_LC_SNK_ALLOCATION_SNK1_MASK;
+
+ ret = tb_sw_write(sw, &val, TB_CFG_SWITCH,
+ sw->cap_lc + TB_LC_SNK_ALLOCATION, 1);
+ if (ret)
+ return ret;
+
+ tb_port_dbg(in, "sink %d de-allocated\n", sink);
+ return 0;
+}
diff --git a/drivers/thunderbolt/path.c b/drivers/thunderbolt/path.c
index afe5f8391ebf..ad58559ea88e 100644
--- a/drivers/thunderbolt/path.c
+++ b/drivers/thunderbolt/path.c
@@ -220,7 +220,8 @@ err:
* Creates path between two ports starting with given @src_hopid. Reserves
* HopIDs for each port (they can be different from @src_hopid depending on
* how many HopIDs each port already have reserved). If there are dual
- * links on the path, prioritizes using @link_nr.
+ * links on the path, prioritizes using @link_nr but takes into account
+ * that the lanes may be bonded.
*
* Return: Returns a tb_path on success or NULL on failure.
*/
@@ -259,7 +260,9 @@ struct tb_path *tb_path_alloc(struct tb *tb, struct tb_port *src, int src_hopid,
if (!in_port)
goto err;
- if (in_port->dual_link_port && in_port->link_nr != link_nr)
+ /* When lanes are bonded primary link must be used */
+ if (!in_port->bonded && in_port->dual_link_port &&
+ in_port->link_nr != link_nr)
in_port = in_port->dual_link_port;
ret = tb_port_alloc_in_hopid(in_port, in_hopid, in_hopid);
@@ -271,8 +274,27 @@ struct tb_path *tb_path_alloc(struct tb *tb, struct tb_port *src, int src_hopid,
if (!out_port)
goto err;
- if (out_port->dual_link_port && out_port->link_nr != link_nr)
- out_port = out_port->dual_link_port;
+ /*
+ * Pick up right port when going from non-bonded to
+ * bonded or from bonded to non-bonded.
+ */
+ if (out_port->dual_link_port) {
+ if (!in_port->bonded && out_port->bonded &&
+ out_port->link_nr) {
+ /*
+ * Use primary link when going from
+ * non-bonded to bonded.
+ */
+ out_port = out_port->dual_link_port;
+ } else if (!out_port->bonded &&
+ out_port->link_nr != link_nr) {
+ /*
+ * If out port is not bonded follow
+ * link_nr.
+ */
+ out_port = out_port->dual_link_port;
+ }
+ }
if (i == num_hops - 1)
ret = tb_port_alloc_out_hopid(out_port, dst_hopid,
@@ -535,3 +557,25 @@ bool tb_path_is_invalid(struct tb_path *path)
}
return false;
}
+
+/**
+ * tb_path_switch_on_path() - Does the path go through certain switch
+ * @path: Path to check
+ * @sw: Switch to check
+ *
+ * Goes over all hops on path and checks if @sw is any of them.
+ * Direction does not matter.
+ */
+bool tb_path_switch_on_path(const struct tb_path *path,
+ const struct tb_switch *sw)
+{
+ int i;
+
+ for (i = 0; i < path->path_length; i++) {
+ if (path->hops[i].in_port->sw == sw ||
+ path->hops[i].out_port->sw == sw)
+ return true;
+ }
+
+ return false;
+}
diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
index 5ea8db667e83..ca86a8e09c77 100644
--- a/drivers/thunderbolt/switch.c
+++ b/drivers/thunderbolt/switch.c
@@ -168,7 +168,7 @@ static int nvm_validate_and_write(struct tb_switch *sw)
static int nvm_authenticate_host(struct tb_switch *sw)
{
- int ret;
+ int ret = 0;
/*
* Root switch NVM upgrade requires that we disconnect the
@@ -176,6 +176,8 @@ static int nvm_authenticate_host(struct tb_switch *sw)
* already).
*/
if (!sw->safe_mode) {
+ u32 status;
+
ret = tb_domain_disconnect_all_paths(sw->tb);
if (ret)
return ret;
@@ -184,7 +186,16 @@ static int nvm_authenticate_host(struct tb_switch *sw)
* everything goes well so getting timeout is expected.
*/
ret = dma_port_flash_update_auth(sw->dma_port);
- return ret == -ETIMEDOUT ? 0 : ret;
+ if (!ret || ret == -ETIMEDOUT)
+ return 0;
+
+ /*
+ * Any error from update auth operation requires power
+ * cycling of the host router.
+ */
+ tb_sw_warn(sw, "failed to authenticate NVM, power cycling\n");
+ if (dma_port_flash_update_auth_status(sw->dma_port, &status) > 0)
+ nvm_set_auth_status(sw, status);
}
/*
@@ -192,7 +203,7 @@ static int nvm_authenticate_host(struct tb_switch *sw)
* switch.
*/
dma_port_power_cycle(sw->dma_port);
- return 0;
+ return ret;
}
static int nvm_authenticate_device(struct tb_switch *sw)
@@ -200,8 +211,16 @@ static int nvm_authenticate_device(struct tb_switch *sw)
int ret, retries = 10;
ret = dma_port_flash_update_auth(sw->dma_port);
- if (ret && ret != -ETIMEDOUT)
+ switch (ret) {
+ case 0:
+ case -ETIMEDOUT:
+ case -EACCES:
+ case -EINVAL:
+ /* Power cycle is required */
+ break;
+ default:
return ret;
+ }
/*
* Poll here for the authentication status. It takes some time
@@ -553,17 +572,17 @@ int tb_port_add_nfc_credits(struct tb_port *port, int credits)
if (credits == 0 || port->sw->is_unplugged)
return 0;
- nfc_credits = port->config.nfc_credits & TB_PORT_NFC_CREDITS_MASK;
+ nfc_credits = port->config.nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK;
nfc_credits += credits;
- tb_port_dbg(port, "adding %d NFC credits to %lu",
- credits, port->config.nfc_credits & TB_PORT_NFC_CREDITS_MASK);
+ tb_port_dbg(port, "adding %d NFC credits to %lu", credits,
+ port->config.nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK);
- port->config.nfc_credits &= ~TB_PORT_NFC_CREDITS_MASK;
+ port->config.nfc_credits &= ~ADP_CS_4_NFC_BUFFERS_MASK;
port->config.nfc_credits |= nfc_credits;
return tb_port_write(port, &port->config.nfc_credits,
- TB_CFG_PORT, 4, 1);
+ TB_CFG_PORT, ADP_CS_4, 1);
}
/**
@@ -578,14 +597,14 @@ int tb_port_set_initial_credits(struct tb_port *port, u32 credits)
u32 data;
int ret;
- ret = tb_port_read(port, &data, TB_CFG_PORT, 5, 1);
+ ret = tb_port_read(port, &data, TB_CFG_PORT, ADP_CS_5, 1);
if (ret)
return ret;
- data &= ~TB_PORT_LCA_MASK;
- data |= (credits << TB_PORT_LCA_SHIFT) & TB_PORT_LCA_MASK;
+ data &= ~ADP_CS_5_LCA_MASK;
+ data |= (credits << ADP_CS_5_LCA_SHIFT) & ADP_CS_5_LCA_MASK;
- return tb_port_write(port, &data, TB_CFG_PORT, 5, 1);
+ return tb_port_write(port, &data, TB_CFG_PORT, ADP_CS_5, 1);
}
/**
@@ -645,6 +664,7 @@ static int tb_init_port(struct tb_port *port)
ida_init(&port->out_hopids);
}
+ INIT_LIST_HEAD(&port->list);
return 0;
}
@@ -775,6 +795,132 @@ struct tb_port *tb_next_port_on_path(struct tb_port *start, struct tb_port *end,
return next;
}
+static int tb_port_get_link_speed(struct tb_port *port)
+{
+ u32 val, speed;
+ int ret;
+
+ if (!port->cap_phy)
+ return -EINVAL;
+
+ ret = tb_port_read(port, &val, TB_CFG_PORT,
+ port->cap_phy + LANE_ADP_CS_1, 1);
+ if (ret)
+ return ret;
+
+ speed = (val & LANE_ADP_CS_1_CURRENT_SPEED_MASK) >>
+ LANE_ADP_CS_1_CURRENT_SPEED_SHIFT;
+ return speed == LANE_ADP_CS_1_CURRENT_SPEED_GEN3 ? 20 : 10;
+}
+
+static int tb_port_get_link_width(struct tb_port *port)
+{
+ u32 val;
+ int ret;
+
+ if (!port->cap_phy)
+ return -EINVAL;
+
+ ret = tb_port_read(port, &val, TB_CFG_PORT,
+ port->cap_phy + LANE_ADP_CS_1, 1);
+ if (ret)
+ return ret;
+
+ return (val & LANE_ADP_CS_1_CURRENT_WIDTH_MASK) >>
+ LANE_ADP_CS_1_CURRENT_WIDTH_SHIFT;
+}
+
+static bool tb_port_is_width_supported(struct tb_port *port, int width)
+{
+ u32 phy, widths;
+ int ret;
+
+ if (!port->cap_phy)
+ return false;
+
+ ret = tb_port_read(port, &phy, TB_CFG_PORT,
+ port->cap_phy + LANE_ADP_CS_0, 1);
+ if (ret)
+ return ret;
+
+ widths = (phy & LANE_ADP_CS_0_SUPPORTED_WIDTH_MASK) >>
+ LANE_ADP_CS_0_SUPPORTED_WIDTH_SHIFT;
+
+ return !!(widths & width);
+}
+
+static int tb_port_set_link_width(struct tb_port *port, unsigned int width)
+{
+ u32 val;
+ int ret;
+
+ if (!port->cap_phy)
+ return -EINVAL;
+
+ ret = tb_port_read(port, &val, TB_CFG_PORT,
+ port->cap_phy + LANE_ADP_CS_1, 1);
+ if (ret)
+ return ret;
+
+ val &= ~LANE_ADP_CS_1_TARGET_WIDTH_MASK;
+ switch (width) {
+ case 1:
+ val |= LANE_ADP_CS_1_TARGET_WIDTH_SINGLE <<
+ LANE_ADP_CS_1_TARGET_WIDTH_SHIFT;
+ break;
+ case 2:
+ val |= LANE_ADP_CS_1_TARGET_WIDTH_DUAL <<
+ LANE_ADP_CS_1_TARGET_WIDTH_SHIFT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ val |= LANE_ADP_CS_1_LB;
+
+ return tb_port_write(port, &val, TB_CFG_PORT,
+ port->cap_phy + LANE_ADP_CS_1, 1);
+}
+
+static int tb_port_lane_bonding_enable(struct tb_port *port)
+{
+ int ret;
+
+ /*
+ * Enable lane bonding for both links if not already enabled by
+ * for example the boot firmware.
+ */
+ ret = tb_port_get_link_width(port);
+ if (ret == 1) {
+ ret = tb_port_set_link_width(port, 2);
+ if (ret)
+ return ret;
+ }
+
+ ret = tb_port_get_link_width(port->dual_link_port);
+ if (ret == 1) {
+ ret = tb_port_set_link_width(port->dual_link_port, 2);
+ if (ret) {
+ tb_port_set_link_width(port, 1);
+ return ret;
+ }
+ }
+
+ port->bonded = true;
+ port->dual_link_port->bonded = true;
+
+ return 0;
+}
+
+static void tb_port_lane_bonding_disable(struct tb_port *port)
+{
+ port->dual_link_port->bonded = false;
+ port->bonded = false;
+
+ tb_port_set_link_width(port->dual_link_port, 1);
+ tb_port_set_link_width(port, 1);
+}
+
/**
* tb_port_is_enabled() - Is the adapter port enabled
* @port: Port to check
@@ -803,10 +949,11 @@ bool tb_pci_port_is_enabled(struct tb_port *port)
{
u32 data;
- if (tb_port_read(port, &data, TB_CFG_PORT, port->cap_adap, 1))
+ if (tb_port_read(port, &data, TB_CFG_PORT,
+ port->cap_adap + ADP_PCIE_CS_0, 1))
return false;
- return !!(data & TB_PCI_EN);
+ return !!(data & ADP_PCIE_CS_0_PE);
}
/**
@@ -816,10 +963,11 @@ bool tb_pci_port_is_enabled(struct tb_port *port)
*/
int tb_pci_port_enable(struct tb_port *port, bool enable)
{
- u32 word = enable ? TB_PCI_EN : 0x0;
+ u32 word = enable ? ADP_PCIE_CS_0_PE : 0x0;
if (!port->cap_adap)
return -ENXIO;
- return tb_port_write(port, &word, TB_CFG_PORT, port->cap_adap, 1);
+ return tb_port_write(port, &word, TB_CFG_PORT,
+ port->cap_adap + ADP_PCIE_CS_0, 1);
}
/**
@@ -833,11 +981,12 @@ int tb_dp_port_hpd_is_active(struct tb_port *port)
u32 data;
int ret;
- ret = tb_port_read(port, &data, TB_CFG_PORT, port->cap_adap + 2, 1);
+ ret = tb_port_read(port, &data, TB_CFG_PORT,
+ port->cap_adap + ADP_DP_CS_2, 1);
if (ret)
return ret;
- return !!(data & TB_DP_HDP);
+ return !!(data & ADP_DP_CS_2_HDP);
}
/**
@@ -851,12 +1000,14 @@ int tb_dp_port_hpd_clear(struct tb_port *port)
u32 data;
int ret;
- ret = tb_port_read(port, &data, TB_CFG_PORT, port->cap_adap + 3, 1);
+ ret = tb_port_read(port, &data, TB_CFG_PORT,
+ port->cap_adap + ADP_DP_CS_3, 1);
if (ret)
return ret;
- data |= TB_DP_HPDC;
- return tb_port_write(port, &data, TB_CFG_PORT, port->cap_adap + 3, 1);
+ data |= ADP_DP_CS_3_HDPC;
+ return tb_port_write(port, &data, TB_CFG_PORT,
+ port->cap_adap + ADP_DP_CS_3, 1);
}
/**
@@ -874,20 +1025,23 @@ int tb_dp_port_set_hops(struct tb_port *port, unsigned int video,
u32 data[2];
int ret;
- ret = tb_port_read(port, data, TB_CFG_PORT, port->cap_adap,
- ARRAY_SIZE(data));
+ ret = tb_port_read(port, data, TB_CFG_PORT,
+ port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
if (ret)
return ret;
- data[0] &= ~TB_DP_VIDEO_HOPID_MASK;
- data[1] &= ~(TB_DP_AUX_RX_HOPID_MASK | TB_DP_AUX_TX_HOPID_MASK);
+ data[0] &= ~ADP_DP_CS_0_VIDEO_HOPID_MASK;
+ data[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK;
+ data[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK;
- data[0] |= (video << TB_DP_VIDEO_HOPID_SHIFT) & TB_DP_VIDEO_HOPID_MASK;
- data[1] |= aux_tx & TB_DP_AUX_TX_HOPID_MASK;
- data[1] |= (aux_rx << TB_DP_AUX_RX_HOPID_SHIFT) & TB_DP_AUX_RX_HOPID_MASK;
+ data[0] |= (video << ADP_DP_CS_0_VIDEO_HOPID_SHIFT) &
+ ADP_DP_CS_0_VIDEO_HOPID_MASK;
+ data[1] |= aux_tx & ADP_DP_CS_1_AUX_TX_HOPID_MASK;
+ data[1] |= (aux_rx << ADP_DP_CS_1_AUX_RX_HOPID_SHIFT) &
+ ADP_DP_CS_1_AUX_RX_HOPID_MASK;
- return tb_port_write(port, data, TB_CFG_PORT, port->cap_adap,
- ARRAY_SIZE(data));
+ return tb_port_write(port, data, TB_CFG_PORT,
+ port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
}
/**
@@ -898,11 +1052,11 @@ bool tb_dp_port_is_enabled(struct tb_port *port)
{
u32 data[2];
- if (tb_port_read(port, data, TB_CFG_PORT, port->cap_adap,
+ if (tb_port_read(port, data, TB_CFG_PORT, port->cap_adap + ADP_DP_CS_0,
ARRAY_SIZE(data)))
return false;
- return !!(data[0] & (TB_DP_VIDEO_EN | TB_DP_AUX_EN));
+ return !!(data[0] & (ADP_DP_CS_0_VE | ADP_DP_CS_0_AE));
}
/**
@@ -918,18 +1072,18 @@ int tb_dp_port_enable(struct tb_port *port, bool enable)
u32 data[2];
int ret;
- ret = tb_port_read(port, data, TB_CFG_PORT, port->cap_adap,
- ARRAY_SIZE(data));
+ ret = tb_port_read(port, data, TB_CFG_PORT,
+ port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
if (ret)
return ret;
if (enable)
- data[0] |= TB_DP_VIDEO_EN | TB_DP_AUX_EN;
+ data[0] |= ADP_DP_CS_0_VE | ADP_DP_CS_0_AE;
else
- data[0] &= ~(TB_DP_VIDEO_EN | TB_DP_AUX_EN);
+ data[0] &= ~(ADP_DP_CS_0_VE | ADP_DP_CS_0_AE);
- return tb_port_write(port, data, TB_CFG_PORT, port->cap_adap,
- ARRAY_SIZE(data));
+ return tb_port_write(port, data, TB_CFG_PORT,
+ port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
}
/* switch utility functions */
@@ -986,7 +1140,7 @@ static int tb_plug_events_active(struct tb_switch *sw, bool active)
u32 data;
int res;
- if (!sw->config.enabled)
+ if (tb_switch_is_icm(sw))
return 0;
sw->config.plug_events_delay = 0xff;
@@ -1114,6 +1268,15 @@ device_name_show(struct device *dev, struct device_attribute *attr, char *buf)
}
static DEVICE_ATTR_RO(device_name);
+static ssize_t
+generation_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct tb_switch *sw = tb_to_switch(dev);
+
+ return sprintf(buf, "%u\n", sw->generation);
+}
+static DEVICE_ATTR_RO(generation);
+
static ssize_t key_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
@@ -1166,6 +1329,36 @@ static ssize_t key_store(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR(key, 0600, key_show, key_store);
+static ssize_t speed_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct tb_switch *sw = tb_to_switch(dev);
+
+ return sprintf(buf, "%u.0 Gb/s\n", sw->link_speed);
+}
+
+/*
+ * Currently all lanes must run at the same speed but we expose here
+ * both directions to allow possible asymmetric links in the future.
+ */
+static DEVICE_ATTR(rx_speed, 0444, speed_show, NULL);
+static DEVICE_ATTR(tx_speed, 0444, speed_show, NULL);
+
+static ssize_t lanes_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct tb_switch *sw = tb_to_switch(dev);
+
+ return sprintf(buf, "%u\n", sw->link_width);
+}
+
+/*
+ * Currently link has same amount of lanes both directions (1 or 2) but
+ * expose them separately to allow possible asymmetric links in the future.
+ */
+static DEVICE_ATTR(rx_lanes, 0444, lanes_show, NULL);
+static DEVICE_ATTR(tx_lanes, 0444, lanes_show, NULL);
+
static void nvm_authenticate_start(struct tb_switch *sw)
{
struct pci_dev *root_port;
@@ -1246,8 +1439,6 @@ static ssize_t nvm_authenticate_store(struct device *dev,
*/
nvm_authenticate_start(sw);
ret = nvm_authenticate_host(sw);
- if (ret)
- nvm_authenticate_complete(sw);
} else {
ret = nvm_authenticate_device(sw);
}
@@ -1319,9 +1510,14 @@ static struct attribute *switch_attrs[] = {
&dev_attr_boot.attr,
&dev_attr_device.attr,
&dev_attr_device_name.attr,
+ &dev_attr_generation.attr,
&dev_attr_key.attr,
&dev_attr_nvm_authenticate.attr,
&dev_attr_nvm_version.attr,
+ &dev_attr_rx_speed.attr,
+ &dev_attr_rx_lanes.attr,
+ &dev_attr_tx_speed.attr,
+ &dev_attr_tx_lanes.attr,
&dev_attr_vendor.attr,
&dev_attr_vendor_name.attr,
&dev_attr_unique_id.attr,
@@ -1352,6 +1548,13 @@ static umode_t switch_attr_is_visible(struct kobject *kobj,
sw->security_level == TB_SECURITY_SECURE)
return attr->mode;
return 0;
+ } else if (attr == &dev_attr_rx_speed.attr ||
+ attr == &dev_attr_rx_lanes.attr ||
+ attr == &dev_attr_tx_speed.attr ||
+ attr == &dev_attr_tx_lanes.attr) {
+ if (tb_route(sw))
+ return attr->mode;
+ return 0;
} else if (attr == &dev_attr_nvm_authenticate.attr) {
if (sw->dma_port && !sw->no_nvm_upgrade)
return attr->mode;
@@ -1382,14 +1585,14 @@ static const struct attribute_group *switch_groups[] = {
static void tb_switch_release(struct device *dev)
{
struct tb_switch *sw = tb_to_switch(dev);
- int i;
+ struct tb_port *port;
dma_port_free(sw->dma_port);
- for (i = 1; i <= sw->config.max_port_number; i++) {
- if (!sw->ports[i].disabled) {
- ida_destroy(&sw->ports[i].in_hopids);
- ida_destroy(&sw->ports[i].out_hopids);
+ tb_switch_for_each_port(sw, port) {
+ if (!port->disabled) {
+ ida_destroy(&port->in_hopids);
+ ida_destroy(&port->out_hopids);
}
}
@@ -1690,13 +1893,16 @@ static int tb_switch_add_dma_port(struct tb_switch *sw)
int ret;
switch (sw->generation) {
- case 3:
- break;
-
case 2:
/* Only root switch can be upgraded */
if (tb_route(sw))
return 0;
+
+ /* fallthrough */
+ case 3:
+ ret = tb_switch_set_uuid(sw);
+ if (ret)
+ return ret;
break;
default:
@@ -1710,7 +1916,7 @@ static int tb_switch_add_dma_port(struct tb_switch *sw)
}
/* Root switch DMA port requires running firmware */
- if (!tb_route(sw) && sw->config.enabled)
+ if (!tb_route(sw) && !tb_switch_is_icm(sw))
return 0;
sw->dma_port = dma_port_alloc(sw);
@@ -1721,6 +1927,19 @@ static int tb_switch_add_dma_port(struct tb_switch *sw)
return 0;
/*
+ * If there is status already set then authentication failed
+ * when the dma_port_flash_update_auth() returned. Power cycling
+ * is not needed (it was done already) so only thing we do here
+ * is to unblock runtime PM of the root port.
+ */
+ nvm_get_auth_status(sw, &status);
+ if (status) {
+ if (!tb_route(sw))
+ nvm_authenticate_complete(sw);
+ return 0;
+ }
+
+ /*
* Check status of the previous flash authentication. If there
* is one we need to power cycle the switch in any case to make
* it functional again.
@@ -1735,9 +1954,6 @@ static int tb_switch_add_dma_port(struct tb_switch *sw)
if (status) {
tb_sw_info(sw, "switch flash authentication failed\n");
- ret = tb_switch_set_uuid(sw);
- if (ret)
- return ret;
nvm_set_auth_status(sw, status);
}
@@ -1751,6 +1967,153 @@ static int tb_switch_add_dma_port(struct tb_switch *sw)
return -ESHUTDOWN;
}
+static void tb_switch_default_link_ports(struct tb_switch *sw)
+{
+ int i;
+
+ for (i = 1; i <= sw->config.max_port_number; i += 2) {
+ struct tb_port *port = &sw->ports[i];
+ struct tb_port *subordinate;
+
+ if (!tb_port_is_null(port))
+ continue;
+
+ /* Check for the subordinate port */
+ if (i == sw->config.max_port_number ||
+ !tb_port_is_null(&sw->ports[i + 1]))
+ continue;
+
+ /* Link them if not already done so (by DROM) */
+ subordinate = &sw->ports[i + 1];
+ if (!port->dual_link_port && !subordinate->dual_link_port) {
+ port->link_nr = 0;
+ port->dual_link_port = subordinate;
+ subordinate->link_nr = 1;
+ subordinate->dual_link_port = port;
+
+ tb_sw_dbg(sw, "linked ports %d <-> %d\n",
+ port->port, subordinate->port);
+ }
+ }
+}
+
+static bool tb_switch_lane_bonding_possible(struct tb_switch *sw)
+{
+ const struct tb_port *up = tb_upstream_port(sw);
+
+ if (!up->dual_link_port || !up->dual_link_port->remote)
+ return false;
+
+ return tb_lc_lane_bonding_possible(sw);
+}
+
+static int tb_switch_update_link_attributes(struct tb_switch *sw)
+{
+ struct tb_port *up;
+ bool change = false;
+ int ret;
+
+ if (!tb_route(sw) || tb_switch_is_icm(sw))
+ return 0;
+
+ up = tb_upstream_port(sw);
+
+ ret = tb_port_get_link_speed(up);
+ if (ret < 0)
+ return ret;
+ if (sw->link_speed != ret)
+ change = true;
+ sw->link_speed = ret;
+
+ ret = tb_port_get_link_width(up);
+ if (ret < 0)
+ return ret;
+ if (sw->link_width != ret)
+ change = true;
+ sw->link_width = ret;
+
+ /* Notify userspace that there is possible link attribute change */
+ if (device_is_registered(&sw->dev) && change)
+ kobject_uevent(&sw->dev.kobj, KOBJ_CHANGE);
+
+ return 0;
+}
+
+/**
+ * tb_switch_lane_bonding_enable() - Enable lane bonding
+ * @sw: Switch to enable lane bonding
+ *
+ * Connection manager can call this function to enable lane bonding of a
+ * switch. If conditions are correct and both switches support the feature,
+ * lanes are bonded. It is safe to call this to any switch.
+ */
+int tb_switch_lane_bonding_enable(struct tb_switch *sw)
+{
+ struct tb_switch *parent = tb_to_switch(sw->dev.parent);
+ struct tb_port *up, *down;
+ u64 route = tb_route(sw);
+ int ret;
+
+ if (!route)
+ return 0;
+
+ if (!tb_switch_lane_bonding_possible(sw))
+ return 0;
+
+ up = tb_upstream_port(sw);
+ down = tb_port_at(route, parent);
+
+ if (!tb_port_is_width_supported(up, 2) ||
+ !tb_port_is_width_supported(down, 2))
+ return 0;
+
+ ret = tb_port_lane_bonding_enable(up);
+ if (ret) {
+ tb_port_warn(up, "failed to enable lane bonding\n");
+ return ret;
+ }
+
+ ret = tb_port_lane_bonding_enable(down);
+ if (ret) {
+ tb_port_warn(down, "failed to enable lane bonding\n");
+ tb_port_lane_bonding_disable(up);
+ return ret;
+ }
+
+ tb_switch_update_link_attributes(sw);
+
+ tb_sw_dbg(sw, "lane bonding enabled\n");
+ return ret;
+}
+
+/**
+ * tb_switch_lane_bonding_disable() - Disable lane bonding
+ * @sw: Switch whose lane bonding to disable
+ *
+ * Disables lane bonding between @sw and parent. This can be called even
+ * if lanes were not bonded originally.
+ */
+void tb_switch_lane_bonding_disable(struct tb_switch *sw)
+{
+ struct tb_switch *parent = tb_to_switch(sw->dev.parent);
+ struct tb_port *up, *down;
+
+ if (!tb_route(sw))
+ return;
+
+ up = tb_upstream_port(sw);
+ if (!up->bonded)
+ return;
+
+ down = tb_port_at(tb_route(sw), parent);
+
+ tb_port_lane_bonding_disable(up);
+ tb_port_lane_bonding_disable(down);
+
+ tb_switch_update_link_attributes(sw);
+ tb_sw_dbg(sw, "lane bonding disabled\n");
+}
+
/**
* tb_switch_add() - Add a switch to the domain
* @sw: Switch to add
@@ -1775,21 +2138,25 @@ int tb_switch_add(struct tb_switch *sw)
* configuration based mailbox.
*/
ret = tb_switch_add_dma_port(sw);
- if (ret)
+ if (ret) {
+ dev_err(&sw->dev, "failed to add DMA port\n");
return ret;
+ }
if (!sw->safe_mode) {
/* read drom */
ret = tb_drom_read(sw);
if (ret) {
- tb_sw_warn(sw, "tb_eeprom_read_rom failed\n");
+ dev_err(&sw->dev, "reading DROM failed\n");
return ret;
}
tb_sw_dbg(sw, "uid: %#llx\n", sw->uid);
ret = tb_switch_set_uuid(sw);
- if (ret)
+ if (ret) {
+ dev_err(&sw->dev, "failed to set UUID\n");
return ret;
+ }
for (i = 0; i <= sw->config.max_port_number; i++) {
if (sw->ports[i].disabled) {
@@ -1797,14 +2164,24 @@ int tb_switch_add(struct tb_switch *sw)
continue;
}
ret = tb_init_port(&sw->ports[i]);
- if (ret)
+ if (ret) {
+ dev_err(&sw->dev, "failed to initialize port %d\n", i);
return ret;
+ }
}
+
+ tb_switch_default_link_ports(sw);
+
+ ret = tb_switch_update_link_attributes(sw);
+ if (ret)
+ return ret;
}
ret = device_add(&sw->dev);
- if (ret)
+ if (ret) {
+ dev_err(&sw->dev, "failed to add device: %d\n", ret);
return ret;
+ }
if (tb_route(sw)) {
dev_info(&sw->dev, "new device found, vendor=%#x device=%#x\n",
@@ -1816,6 +2193,7 @@ int tb_switch_add(struct tb_switch *sw)
ret = tb_switch_nvm_add(sw);
if (ret) {
+ dev_err(&sw->dev, "failed to add NVM devices\n");
device_del(&sw->dev);
return ret;
}
@@ -1842,7 +2220,7 @@ int tb_switch_add(struct tb_switch *sw)
*/
void tb_switch_remove(struct tb_switch *sw)
{
- int i;
+ struct tb_port *port;
if (sw->rpm) {
pm_runtime_get_sync(&sw->dev);
@@ -1850,13 +2228,13 @@ void tb_switch_remove(struct tb_switch *sw)
}
/* port 0 is the switch itself and never has a remote */
- for (i = 1; i <= sw->config.max_port_number; i++) {
- if (tb_port_has_remote(&sw->ports[i])) {
- tb_switch_remove(sw->ports[i].remote->sw);
- sw->ports[i].remote = NULL;
- } else if (sw->ports[i].xdomain) {
- tb_xdomain_remove(sw->ports[i].xdomain);
- sw->ports[i].xdomain = NULL;
+ tb_switch_for_each_port(sw, port) {
+ if (tb_port_has_remote(port)) {
+ tb_switch_remove(port->remote->sw);
+ port->remote = NULL;
+ } else if (port->xdomain) {
+ tb_xdomain_remove(port->xdomain);
+ port->xdomain = NULL;
}
}
@@ -1876,7 +2254,8 @@ void tb_switch_remove(struct tb_switch *sw)
*/
void tb_sw_set_unplugged(struct tb_switch *sw)
{
- int i;
+ struct tb_port *port;
+
if (sw == sw->tb->root_switch) {
tb_sw_WARN(sw, "cannot unplug root switch\n");
return;
@@ -1886,17 +2265,19 @@ void tb_sw_set_unplugged(struct tb_switch *sw)
return;
}
sw->is_unplugged = true;
- for (i = 0; i <= sw->config.max_port_number; i++) {
- if (tb_port_has_remote(&sw->ports[i]))
- tb_sw_set_unplugged(sw->ports[i].remote->sw);
- else if (sw->ports[i].xdomain)
- sw->ports[i].xdomain->is_unplugged = true;
+ tb_switch_for_each_port(sw, port) {
+ if (tb_port_has_remote(port))
+ tb_sw_set_unplugged(port->remote->sw);
+ else if (port->xdomain)
+ port->xdomain->is_unplugged = true;
}
}
int tb_switch_resume(struct tb_switch *sw)
{
- int i, err;
+ struct tb_port *port;
+ int err;
+
tb_sw_dbg(sw, "resuming switch\n");
/*
@@ -1944,9 +2325,7 @@ int tb_switch_resume(struct tb_switch *sw)
return err;
/* check for surviving downstream switches */
- for (i = 1; i <= sw->config.max_port_number; i++) {
- struct tb_port *port = &sw->ports[i];
-
+ tb_switch_for_each_port(sw, port) {
if (!tb_port_has_remote(port) && !port->xdomain)
continue;
@@ -1970,19 +2349,64 @@ int tb_switch_resume(struct tb_switch *sw)
void tb_switch_suspend(struct tb_switch *sw)
{
- int i, err;
+ struct tb_port *port;
+ int err;
+
err = tb_plug_events_active(sw, false);
if (err)
return;
- for (i = 1; i <= sw->config.max_port_number; i++) {
- if (tb_port_has_remote(&sw->ports[i]))
- tb_switch_suspend(sw->ports[i].remote->sw);
+ tb_switch_for_each_port(sw, port) {
+ if (tb_port_has_remote(port))
+ tb_switch_suspend(port->remote->sw);
}
tb_lc_set_sleep(sw);
}
+/**
+ * tb_switch_query_dp_resource() - Query availability of DP resource
+ * @sw: Switch whose DP resource is queried
+ * @in: DP IN port
+ *
+ * Queries availability of DP resource for DP tunneling using switch
+ * specific means. Returns %true if resource is available.
+ */
+bool tb_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in)
+{
+ return tb_lc_dp_sink_query(sw, in);
+}
+
+/**
+ * tb_switch_alloc_dp_resource() - Allocate available DP resource
+ * @sw: Switch whose DP resource is allocated
+ * @in: DP IN port
+ *
+ * Allocates DP resource for DP tunneling. The resource must be
+ * available for this to succeed (see tb_switch_query_dp_resource()).
+ * Returns %0 in success and negative errno otherwise.
+ */
+int tb_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
+{
+ return tb_lc_dp_sink_alloc(sw, in);
+}
+
+/**
+ * tb_switch_dealloc_dp_resource() - De-allocate DP resource
+ * @sw: Switch whose DP resource is de-allocated
+ * @in: DP IN port
+ *
+ * De-allocates DP resource that was previously allocated for DP
+ * tunneling.
+ */
+void tb_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
+{
+ if (tb_lc_dp_sink_dealloc(sw, in)) {
+ tb_sw_warn(sw, "failed to de-allocate DP resource for port %d\n",
+ in->port);
+ }
+}
+
struct tb_sw_lookup {
struct tb *tb;
u8 link;
diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c
index 1f7a9e1cc09c..ea8727f769d6 100644
--- a/drivers/thunderbolt/tb.c
+++ b/drivers/thunderbolt/tb.c
@@ -9,7 +9,6 @@
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/delay.h>
-#include <linux/platform_data/x86/apple.h>
#include "tb.h"
#include "tb_regs.h"
@@ -18,6 +17,7 @@
/**
* struct tb_cm - Simple Thunderbolt connection manager
* @tunnel_list: List of active tunnels
+ * @dp_resources: List of available DP resources for DP tunneling
* @hotplug_active: tb_handle_hotplug will stop progressing plug
* events and exit if this is not set (it needs to
* acquire the lock one more time). Used to drain wq
@@ -25,6 +25,7 @@
*/
struct tb_cm {
struct list_head tunnel_list;
+ struct list_head dp_resources;
bool hotplug_active;
};
@@ -56,17 +57,51 @@ static void tb_queue_hotplug(struct tb *tb, u64 route, u8 port, bool unplug)
/* enumeration & hot plug handling */
+static void tb_add_dp_resources(struct tb_switch *sw)
+{
+ struct tb_cm *tcm = tb_priv(sw->tb);
+ struct tb_port *port;
+
+ tb_switch_for_each_port(sw, port) {
+ if (!tb_port_is_dpin(port))
+ continue;
+
+ if (!tb_switch_query_dp_resource(sw, port))
+ continue;
+
+ list_add_tail(&port->list, &tcm->dp_resources);
+ tb_port_dbg(port, "DP IN resource available\n");
+ }
+}
+
+static void tb_remove_dp_resources(struct tb_switch *sw)
+{
+ struct tb_cm *tcm = tb_priv(sw->tb);
+ struct tb_port *port, *tmp;
+
+ /* Clear children resources first */
+ tb_switch_for_each_port(sw, port) {
+ if (tb_port_has_remote(port))
+ tb_remove_dp_resources(port->remote->sw);
+ }
+
+ list_for_each_entry_safe(port, tmp, &tcm->dp_resources, list) {
+ if (port->sw == sw) {
+ tb_port_dbg(port, "DP OUT resource unavailable\n");
+ list_del_init(&port->list);
+ }
+ }
+}
+
static void tb_discover_tunnels(struct tb_switch *sw)
{
struct tb *tb = sw->tb;
struct tb_cm *tcm = tb_priv(tb);
struct tb_port *port;
- int i;
- for (i = 1; i <= sw->config.max_port_number; i++) {
+ tb_switch_for_each_port(sw, port) {
struct tb_tunnel *tunnel = NULL;
- port = &sw->ports[i];
switch (port->config.type) {
case TB_TYPE_DP_HDMI_IN:
tunnel = tb_tunnel_discover_dp(tb, port);
@@ -95,9 +130,9 @@ static void tb_discover_tunnels(struct tb_switch *sw)
list_add_tail(&tunnel->list, &tcm->tunnel_list);
}
- for (i = 1; i <= sw->config.max_port_number; i++) {
- if (tb_port_has_remote(&sw->ports[i]))
- tb_discover_tunnels(sw->ports[i].remote->sw);
+ tb_switch_for_each_port(sw, port) {
+ if (tb_port_has_remote(port))
+ tb_discover_tunnels(port->remote->sw);
}
}
@@ -130,9 +165,10 @@ static void tb_scan_port(struct tb_port *port);
*/
static void tb_scan_switch(struct tb_switch *sw)
{
- int i;
- for (i = 1; i <= sw->config.max_port_number; i++)
- tb_scan_port(&sw->ports[i]);
+ struct tb_port *port;
+
+ tb_switch_for_each_port(sw, port)
+ tb_scan_port(port);
}
/**
@@ -217,11 +253,16 @@ static void tb_scan_port(struct tb_port *port)
upstream_port->dual_link_port->remote = port->dual_link_port;
}
+ /* Enable lane bonding if supported */
+ if (tb_switch_lane_bonding_enable(sw))
+ tb_sw_warn(sw, "failed to enable lane bonding\n");
+
tb_scan_switch(sw);
}
-static int tb_free_tunnel(struct tb *tb, enum tb_tunnel_type type,
- struct tb_port *src_port, struct tb_port *dst_port)
+static struct tb_tunnel *tb_find_tunnel(struct tb *tb, enum tb_tunnel_type type,
+ struct tb_port *src_port,
+ struct tb_port *dst_port)
{
struct tb_cm *tcm = tb_priv(tb);
struct tb_tunnel *tunnel;
@@ -230,14 +271,32 @@ static int tb_free_tunnel(struct tb *tb, enum tb_tunnel_type type,
if (tunnel->type == type &&
((src_port && src_port == tunnel->src_port) ||
(dst_port && dst_port == tunnel->dst_port))) {
- tb_tunnel_deactivate(tunnel);
- list_del(&tunnel->list);
- tb_tunnel_free(tunnel);
- return 0;
+ return tunnel;
}
}
- return -ENODEV;
+ return NULL;
+}
+
+static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel)
+{
+ if (!tunnel)
+ return;
+
+ tb_tunnel_deactivate(tunnel);
+ list_del(&tunnel->list);
+
+ /*
+ * In case of DP tunnel make sure the DP IN resource is deallocated
+ * properly.
+ */
+ if (tb_tunnel_is_dp(tunnel)) {
+ struct tb_port *in = tunnel->src_port;
+
+ tb_switch_dealloc_dp_resource(in->sw, in);
+ }
+
+ tb_tunnel_free(tunnel);
}
/**
@@ -250,11 +309,8 @@ static void tb_free_invalid_tunnels(struct tb *tb)
struct tb_tunnel *n;
list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
- if (tb_tunnel_is_invalid(tunnel)) {
- tb_tunnel_deactivate(tunnel);
- list_del(&tunnel->list);
- tb_tunnel_free(tunnel);
- }
+ if (tb_tunnel_is_invalid(tunnel))
+ tb_deactivate_and_free_tunnel(tunnel);
}
}
@@ -263,14 +319,15 @@ static void tb_free_invalid_tunnels(struct tb *tb)
*/
static void tb_free_unplugged_children(struct tb_switch *sw)
{
- int i;
- for (i = 1; i <= sw->config.max_port_number; i++) {
- struct tb_port *port = &sw->ports[i];
+ struct tb_port *port;
+ tb_switch_for_each_port(sw, port) {
if (!tb_port_has_remote(port))
continue;
if (port->remote->sw->is_unplugged) {
+ tb_remove_dp_resources(port->remote->sw);
+ tb_switch_lane_bonding_disable(port->remote->sw);
tb_switch_remove(port->remote->sw);
port->remote = NULL;
if (port->dual_link_port)
@@ -289,10 +346,13 @@ static void tb_free_unplugged_children(struct tb_switch *sw)
static struct tb_port *tb_find_port(struct tb_switch *sw,
enum tb_port_type type)
{
- int i;
- for (i = 1; i <= sw->config.max_port_number; i++)
- if (sw->ports[i].config.type == type)
- return &sw->ports[i];
+ struct tb_port *port;
+
+ tb_switch_for_each_port(sw, port) {
+ if (port->config.type == type)
+ return port;
+ }
+
return NULL;
}
@@ -304,18 +364,18 @@ static struct tb_port *tb_find_port(struct tb_switch *sw,
static struct tb_port *tb_find_unused_port(struct tb_switch *sw,
enum tb_port_type type)
{
- int i;
+ struct tb_port *port;
- for (i = 1; i <= sw->config.max_port_number; i++) {
- if (tb_is_upstream_port(&sw->ports[i]))
+ tb_switch_for_each_port(sw, port) {
+ if (tb_is_upstream_port(port))
continue;
- if (sw->ports[i].config.type != type)
+ if (port->config.type != type)
continue;
- if (!sw->ports[i].cap_adap)
+ if (port->cap_adap)
continue;
- if (tb_port_is_enabled(&sw->ports[i]))
+ if (tb_port_is_enabled(port))
continue;
- return &sw->ports[i];
+ return port;
}
return NULL;
}
@@ -336,10 +396,13 @@ static struct tb_port *tb_find_pcie_down(struct tb_switch *sw,
* Hard-coded Thunderbolt port to PCIe down port mapping
* per controller.
*/
- if (tb_switch_is_cr(sw))
+ if (tb_switch_is_cactus_ridge(sw) ||
+ tb_switch_is_alpine_ridge(sw))
index = !phy_port ? 6 : 7;
- else if (tb_switch_is_fr(sw))
+ else if (tb_switch_is_falcon_ridge(sw))
index = !phy_port ? 6 : 8;
+ else if (tb_switch_is_titan_ridge(sw))
+ index = !phy_port ? 8 : 9;
else
goto out;
@@ -358,42 +421,162 @@ out:
return tb_find_unused_port(sw, TB_TYPE_PCIE_DOWN);
}
-static int tb_tunnel_dp(struct tb *tb, struct tb_port *out)
+static int tb_available_bw(struct tb_cm *tcm, struct tb_port *in,
+ struct tb_port *out)
{
- struct tb_cm *tcm = tb_priv(tb);
struct tb_switch *sw = out->sw;
struct tb_tunnel *tunnel;
- struct tb_port *in;
+ int bw, available_bw = 40000;
- if (tb_port_is_enabled(out))
- return 0;
+ while (sw && sw != in->sw) {
+ bw = sw->link_speed * sw->link_width * 1000; /* Mb/s */
+ /* Leave 10% guard band */
+ bw -= bw / 10;
+
+ /*
+ * Check for any active DP tunnels that go through this
+ * switch and reduce their consumed bandwidth from
+ * available.
+ */
+ list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
+ int consumed_bw;
+
+ if (!tb_tunnel_switch_on_path(tunnel, sw))
+ continue;
+
+ consumed_bw = tb_tunnel_consumed_bandwidth(tunnel);
+ if (consumed_bw < 0)
+ return consumed_bw;
+
+ bw -= consumed_bw;
+ }
- do {
- sw = tb_to_switch(sw->dev.parent);
- if (!sw)
- return 0;
- in = tb_find_unused_port(sw, TB_TYPE_DP_HDMI_IN);
- } while (!in);
+ if (bw < available_bw)
+ available_bw = bw;
- tunnel = tb_tunnel_alloc_dp(tb, in, out);
+ sw = tb_switch_parent(sw);
+ }
+
+ return available_bw;
+}
+
+static void tb_tunnel_dp(struct tb *tb)
+{
+ struct tb_cm *tcm = tb_priv(tb);
+ struct tb_port *port, *in, *out;
+ struct tb_tunnel *tunnel;
+ int available_bw;
+
+ /*
+ * Find pair of inactive DP IN and DP OUT adapters and then
+ * establish a DP tunnel between them.
+ */
+ tb_dbg(tb, "looking for DP IN <-> DP OUT pairs:\n");
+
+ in = NULL;
+ out = NULL;
+ list_for_each_entry(port, &tcm->dp_resources, list) {
+ if (tb_port_is_enabled(port)) {
+ tb_port_dbg(port, "in use\n");
+ continue;
+ }
+
+ tb_port_dbg(port, "available\n");
+
+ if (!in && tb_port_is_dpin(port))
+ in = port;
+ else if (!out && tb_port_is_dpout(port))
+ out = port;
+ }
+
+ if (!in) {
+ tb_dbg(tb, "no suitable DP IN adapter available, not tunneling\n");
+ return;
+ }
+ if (!out) {
+ tb_dbg(tb, "no suitable DP OUT adapter available, not tunneling\n");
+ return;
+ }
+
+ if (tb_switch_alloc_dp_resource(in->sw, in)) {
+ tb_port_dbg(in, "no resource available for DP IN, not tunneling\n");
+ return;
+ }
+
+ /* Calculate available bandwidth between in and out */
+ available_bw = tb_available_bw(tcm, in, out);
+ if (available_bw < 0) {
+ tb_warn(tb, "failed to determine available bandwidth\n");
+ return;
+ }
+
+ tb_dbg(tb, "available bandwidth for new DP tunnel %u Mb/s\n",
+ available_bw);
+
+ tunnel = tb_tunnel_alloc_dp(tb, in, out, available_bw);
if (!tunnel) {
- tb_port_dbg(out, "DP tunnel allocation failed\n");
- return -ENOMEM;
+ tb_port_dbg(out, "could not allocate DP tunnel\n");
+ goto dealloc_dp;
}
if (tb_tunnel_activate(tunnel)) {
tb_port_info(out, "DP tunnel activation failed, aborting\n");
tb_tunnel_free(tunnel);
- return -EIO;
+ goto dealloc_dp;
}
list_add_tail(&tunnel->list, &tcm->tunnel_list);
- return 0;
+ return;
+
+dealloc_dp:
+ tb_switch_dealloc_dp_resource(in->sw, in);
}
-static void tb_teardown_dp(struct tb *tb, struct tb_port *out)
+static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port)
{
- tb_free_tunnel(tb, TB_TUNNEL_DP, NULL, out);
+ struct tb_port *in, *out;
+ struct tb_tunnel *tunnel;
+
+ if (tb_port_is_dpin(port)) {
+ tb_port_dbg(port, "DP IN resource unavailable\n");
+ in = port;
+ out = NULL;
+ } else {
+ tb_port_dbg(port, "DP OUT resource unavailable\n");
+ in = NULL;
+ out = port;
+ }
+
+ tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, out);
+ tb_deactivate_and_free_tunnel(tunnel);
+ list_del_init(&port->list);
+
+ /*
+ * See if there is another DP OUT port that can be used for
+ * to create another tunnel.
+ */
+ tb_tunnel_dp(tb);
+}
+
+static void tb_dp_resource_available(struct tb *tb, struct tb_port *port)
+{
+ struct tb_cm *tcm = tb_priv(tb);
+ struct tb_port *p;
+
+ if (tb_port_is_enabled(port))
+ return;
+
+ list_for_each_entry(p, &tcm->dp_resources, list) {
+ if (p == port)
+ return;
+ }
+
+ tb_port_dbg(port, "DP %s resource available\n",
+ tb_port_is_dpin(port) ? "IN" : "OUT");
+ list_add_tail(&port->list, &tcm->dp_resources);
+
+ /* Look for suitable DP IN <-> DP OUT pairs now */
+ tb_tunnel_dp(tb);
}
static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw)
@@ -468,6 +651,7 @@ static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
static void __tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
{
struct tb_port *dst_port;
+ struct tb_tunnel *tunnel;
struct tb_switch *sw;
sw = tb_to_switch(xd->dev.parent);
@@ -478,7 +662,8 @@ static void __tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
* case of cable disconnect) so it is fine if we cannot find it
* here anymore.
*/
- tb_free_tunnel(tb, TB_TUNNEL_DMA, NULL, dst_port);
+ tunnel = tb_find_tunnel(tb, TB_TUNNEL_DMA, NULL, dst_port);
+ tb_deactivate_and_free_tunnel(tunnel);
}
static int tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
@@ -533,10 +718,14 @@ static void tb_handle_hotplug(struct work_struct *work)
tb_port_dbg(port, "switch unplugged\n");
tb_sw_set_unplugged(port->remote->sw);
tb_free_invalid_tunnels(tb);
+ tb_remove_dp_resources(port->remote->sw);
+ tb_switch_lane_bonding_disable(port->remote->sw);
tb_switch_remove(port->remote->sw);
port->remote = NULL;
if (port->dual_link_port)
port->dual_link_port->remote = NULL;
+ /* Maybe we can create another DP tunnel */
+ tb_tunnel_dp(tb);
} else if (port->xdomain) {
struct tb_xdomain *xd = tb_xdomain_get(port->xdomain);
@@ -553,8 +742,8 @@ static void tb_handle_hotplug(struct work_struct *work)
port->xdomain = NULL;
__tb_disconnect_xdomain_paths(tb, xd);
tb_xdomain_put(xd);
- } else if (tb_port_is_dpout(port)) {
- tb_teardown_dp(tb, port);
+ } else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
+ tb_dp_resource_unavailable(tb, port);
} else {
tb_port_dbg(port,
"got unplug event for disconnected port, ignoring\n");
@@ -567,8 +756,8 @@ static void tb_handle_hotplug(struct work_struct *work)
tb_scan_port(port);
if (!port->remote)
tb_port_dbg(port, "hotplug: no switch found\n");
- } else if (tb_port_is_dpout(port)) {
- tb_tunnel_dp(tb, port);
+ } else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
+ tb_dp_resource_available(tb, port);
}
}
@@ -681,6 +870,8 @@ static int tb_start(struct tb *tb)
tb_scan_switch(tb->root_switch);
/* Find out tunnels created by the boot firmware */
tb_discover_tunnels(tb->root_switch);
+ /* Add DP IN resources for the root switch */
+ tb_add_dp_resources(tb->root_switch);
/* Make the discovered switches available to the userspace */
device_for_each_child(&tb->root_switch->dev, NULL,
tb_scan_finalize_switch);
@@ -702,6 +893,21 @@ static int tb_suspend_noirq(struct tb *tb)
return 0;
}
+static void tb_restore_children(struct tb_switch *sw)
+{
+ struct tb_port *port;
+
+ tb_switch_for_each_port(sw, port) {
+ if (!tb_port_has_remote(port))
+ continue;
+
+ if (tb_switch_lane_bonding_enable(port->remote->sw))
+ dev_warn(&sw->dev, "failed to restore lane bonding\n");
+
+ tb_restore_children(port->remote->sw);
+ }
+}
+
static int tb_resume_noirq(struct tb *tb)
{
struct tb_cm *tcm = tb_priv(tb);
@@ -715,6 +921,7 @@ static int tb_resume_noirq(struct tb *tb)
tb_switch_resume(tb->root_switch);
tb_free_invalid_tunnels(tb);
tb_free_unplugged_children(tb->root_switch);
+ tb_restore_children(tb->root_switch);
list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list)
tb_tunnel_restart(tunnel);
if (!list_empty(&tcm->tunnel_list)) {
@@ -734,11 +941,10 @@ static int tb_resume_noirq(struct tb *tb)
static int tb_free_unplugged_xdomains(struct tb_switch *sw)
{
- int i, ret = 0;
-
- for (i = 1; i <= sw->config.max_port_number; i++) {
- struct tb_port *port = &sw->ports[i];
+ struct tb_port *port;
+ int ret = 0;
+ tb_switch_for_each_port(sw, port) {
if (tb_is_upstream_port(port))
continue;
if (port->xdomain && port->xdomain->is_unplugged) {
@@ -783,9 +989,6 @@ struct tb *tb_probe(struct tb_nhi *nhi)
struct tb_cm *tcm;
struct tb *tb;
- if (!x86_apple_machine)
- return NULL;
-
tb = tb_domain_alloc(nhi, sizeof(*tcm));
if (!tb)
return NULL;
@@ -795,6 +998,7 @@ struct tb *tb_probe(struct tb_nhi *nhi)
tcm = tb_priv(tb);
INIT_LIST_HEAD(&tcm->tunnel_list);
+ INIT_LIST_HEAD(&tcm->dp_resources);
return tb;
}
diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h
index 6407d529871d..ec851f20c571 100644
--- a/drivers/thunderbolt/tb.h
+++ b/drivers/thunderbolt/tb.h
@@ -61,6 +61,8 @@ struct tb_switch_nvm {
* @device: Device ID of the switch
* @vendor_name: Name of the vendor (or %NULL if not known)
* @device_name: Name of the device (or %NULL if not known)
+ * @link_speed: Speed of the link in Gb/s
+ * @link_width: Width of the link (1 or 2)
* @generation: Switch Thunderbolt generation
* @cap_plug_events: Offset to the plug events capability (%0 if not found)
* @cap_lc: Offset to the link controller capability (%0 if not found)
@@ -97,6 +99,8 @@ struct tb_switch {
u16 device;
const char *vendor_name;
const char *device_name;
+ unsigned int link_speed;
+ unsigned int link_width;
unsigned int generation;
int cap_plug_events;
int cap_lc;
@@ -127,11 +131,13 @@ struct tb_switch {
* @cap_adap: Offset of the adapter specific capability (%0 if not present)
* @port: Port number on switch
* @disabled: Disabled by eeprom
+ * @bonded: true if the port is bonded (two lanes combined as one)
* @dual_link_port: If the switch is connected using two ports, points
* to the other port.
* @link_nr: Is this primary or secondary port on the dual_link.
* @in_hopids: Currently allocated input HopIDs
* @out_hopids: Currently allocated output HopIDs
+ * @list: Used to link ports to DP resources list
*/
struct tb_port {
struct tb_regs_port_header config;
@@ -142,10 +148,12 @@ struct tb_port {
int cap_adap;
u8 port;
bool disabled;
+ bool bonded;
struct tb_port *dual_link_port;
u8 link_nr:1;
struct ida in_hopids;
struct ida out_hopids;
+ struct list_head list;
};
/**
@@ -399,7 +407,7 @@ static inline int tb_sw_read(struct tb_switch *sw, void *buffer,
length);
}
-static inline int tb_sw_write(struct tb_switch *sw, void *buffer,
+static inline int tb_sw_write(struct tb_switch *sw, const void *buffer,
enum tb_cfg_space space, u32 offset, u32 length)
{
if (sw->is_unplugged)
@@ -530,6 +538,17 @@ struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link,
struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_t *uuid);
struct tb_switch *tb_switch_find_by_route(struct tb *tb, u64 route);
+/**
+ * tb_switch_for_each_port() - Iterate over each switch port
+ * @sw: Switch whose ports to iterate
+ * @p: Port used as iterator
+ *
+ * Iterates over each switch port skipping the control port (port %0).
+ */
+#define tb_switch_for_each_port(sw, p) \
+ for ((p) = &(sw)->ports[1]; \
+ (p) <= &(sw)->ports[(sw)->config.max_port_number]; (p)++)
+
static inline struct tb_switch *tb_switch_get(struct tb_switch *sw)
{
if (sw)
@@ -559,17 +578,17 @@ static inline struct tb_switch *tb_switch_parent(struct tb_switch *sw)
return tb_to_switch(sw->dev.parent);
}
-static inline bool tb_switch_is_lr(const struct tb_switch *sw)
+static inline bool tb_switch_is_light_ridge(const struct tb_switch *sw)
{
return sw->config.device_id == PCI_DEVICE_ID_INTEL_LIGHT_RIDGE;
}
-static inline bool tb_switch_is_er(const struct tb_switch *sw)
+static inline bool tb_switch_is_eagle_ridge(const struct tb_switch *sw)
{
return sw->config.device_id == PCI_DEVICE_ID_INTEL_EAGLE_RIDGE;
}
-static inline bool tb_switch_is_cr(const struct tb_switch *sw)
+static inline bool tb_switch_is_cactus_ridge(const struct tb_switch *sw)
{
switch (sw->config.device_id) {
case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_2C:
@@ -580,7 +599,7 @@ static inline bool tb_switch_is_cr(const struct tb_switch *sw)
}
}
-static inline bool tb_switch_is_fr(const struct tb_switch *sw)
+static inline bool tb_switch_is_falcon_ridge(const struct tb_switch *sw)
{
switch (sw->config.device_id) {
case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE:
@@ -591,6 +610,52 @@ static inline bool tb_switch_is_fr(const struct tb_switch *sw)
}
}
+static inline bool tb_switch_is_alpine_ridge(const struct tb_switch *sw)
+{
+ switch (sw->config.device_id) {
+ case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE:
+ case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE:
+ case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE:
+ case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static inline bool tb_switch_is_titan_ridge(const struct tb_switch *sw)
+{
+ switch (sw->config.device_id) {
+ case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_BRIDGE:
+ case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE:
+ case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
+ * tb_switch_is_icm() - Is the switch handled by ICM firmware
+ * @sw: Switch to check
+ *
+ * In case there is a need to differentiate whether ICM firmware or SW CM
+ * is handling @sw this function can be called. It is valid to call this
+ * after tb_switch_alloc() and tb_switch_configure() has been called
+ * (latter only for SW CM case).
+ */
+static inline bool tb_switch_is_icm(const struct tb_switch *sw)
+{
+ return !sw->config.enabled;
+}
+
+int tb_switch_lane_bonding_enable(struct tb_switch *sw);
+void tb_switch_lane_bonding_disable(struct tb_switch *sw);
+
+bool tb_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in);
+int tb_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in);
+void tb_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in);
+
int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged);
int tb_port_add_nfc_credits(struct tb_port *port, int credits);
int tb_port_set_initial_credits(struct tb_port *port, u32 credits);
@@ -626,6 +691,8 @@ void tb_path_free(struct tb_path *path);
int tb_path_activate(struct tb_path *path);
void tb_path_deactivate(struct tb_path *path);
bool tb_path_is_invalid(struct tb_path *path);
+bool tb_path_switch_on_path(const struct tb_path *path,
+ const struct tb_switch *sw);
int tb_drom_read(struct tb_switch *sw);
int tb_drom_read_uid_only(struct tb_switch *sw, u64 *uid);
@@ -634,6 +701,10 @@ int tb_lc_read_uuid(struct tb_switch *sw, u32 *uuid);
int tb_lc_configure_link(struct tb_switch *sw);
void tb_lc_unconfigure_link(struct tb_switch *sw);
int tb_lc_set_sleep(struct tb_switch *sw);
+bool tb_lc_lane_bonding_possible(struct tb_switch *sw);
+bool tb_lc_dp_sink_query(struct tb_switch *sw, struct tb_port *in);
+int tb_lc_dp_sink_alloc(struct tb_switch *sw, struct tb_port *in);
+int tb_lc_dp_sink_dealloc(struct tb_switch *sw, struct tb_port *in);
static inline int tb_route_length(u64 route)
{
diff --git a/drivers/thunderbolt/tb_msgs.h b/drivers/thunderbolt/tb_msgs.h
index 4b641e4ee0c5..3705057723b6 100644
--- a/drivers/thunderbolt/tb_msgs.h
+++ b/drivers/thunderbolt/tb_msgs.h
@@ -122,6 +122,8 @@ struct icm_pkg_header {
#define ICM_FLAGS_NO_KEY BIT(1)
#define ICM_FLAGS_SLEVEL_SHIFT 3
#define ICM_FLAGS_SLEVEL_MASK GENMASK(4, 3)
+#define ICM_FLAGS_DUAL_LANE BIT(5)
+#define ICM_FLAGS_SPEED_GEN3 BIT(7)
#define ICM_FLAGS_WRITE BIT(7)
struct icm_pkg_driver_ready {
diff --git a/drivers/thunderbolt/tb_regs.h b/drivers/thunderbolt/tb_regs.h
index deb9d4a977b9..7ee45b73c7f7 100644
--- a/drivers/thunderbolt/tb_regs.h
+++ b/drivers/thunderbolt/tb_regs.h
@@ -211,37 +211,71 @@ struct tb_regs_port_header {
} __packed;
-/* DWORD 4 */
-#define TB_PORT_NFC_CREDITS_MASK GENMASK(19, 0)
-#define TB_PORT_MAX_CREDITS_SHIFT 20
-#define TB_PORT_MAX_CREDITS_MASK GENMASK(26, 20)
-/* DWORD 5 */
-#define TB_PORT_LCA_SHIFT 22
-#define TB_PORT_LCA_MASK GENMASK(28, 22)
+/* Basic adapter configuration registers */
+#define ADP_CS_4 0x04
+#define ADP_CS_4_NFC_BUFFERS_MASK GENMASK(9, 0)
+#define ADP_CS_4_TOTAL_BUFFERS_MASK GENMASK(29, 20)
+#define ADP_CS_4_TOTAL_BUFFERS_SHIFT 20
+#define ADP_CS_5 0x05
+#define ADP_CS_5_LCA_MASK GENMASK(28, 22)
+#define ADP_CS_5_LCA_SHIFT 22
+
+/* Lane adapter registers */
+#define LANE_ADP_CS_0 0x00
+#define LANE_ADP_CS_0_SUPPORTED_WIDTH_MASK GENMASK(25, 20)
+#define LANE_ADP_CS_0_SUPPORTED_WIDTH_SHIFT 20
+#define LANE_ADP_CS_1 0x01
+#define LANE_ADP_CS_1_TARGET_WIDTH_MASK GENMASK(9, 4)
+#define LANE_ADP_CS_1_TARGET_WIDTH_SHIFT 4
+#define LANE_ADP_CS_1_TARGET_WIDTH_SINGLE 0x1
+#define LANE_ADP_CS_1_TARGET_WIDTH_DUAL 0x3
+#define LANE_ADP_CS_1_LB BIT(15)
+#define LANE_ADP_CS_1_CURRENT_SPEED_MASK GENMASK(19, 16)
+#define LANE_ADP_CS_1_CURRENT_SPEED_SHIFT 16
+#define LANE_ADP_CS_1_CURRENT_SPEED_GEN2 0x8
+#define LANE_ADP_CS_1_CURRENT_SPEED_GEN3 0x4
+#define LANE_ADP_CS_1_CURRENT_WIDTH_MASK GENMASK(25, 20)
+#define LANE_ADP_CS_1_CURRENT_WIDTH_SHIFT 20
/* Display Port adapter registers */
-
-/* DWORD 0 */
-#define TB_DP_VIDEO_HOPID_SHIFT 16
-#define TB_DP_VIDEO_HOPID_MASK GENMASK(26, 16)
-#define TB_DP_AUX_EN BIT(30)
-#define TB_DP_VIDEO_EN BIT(31)
-/* DWORD 1 */
-#define TB_DP_AUX_TX_HOPID_MASK GENMASK(10, 0)
-#define TB_DP_AUX_RX_HOPID_SHIFT 11
-#define TB_DP_AUX_RX_HOPID_MASK GENMASK(21, 11)
-/* DWORD 2 */
-#define TB_DP_HDP BIT(6)
-/* DWORD 3 */
-#define TB_DP_HPDC BIT(9)
-/* DWORD 4 */
-#define TB_DP_LOCAL_CAP 0x4
-/* DWORD 5 */
-#define TB_DP_REMOTE_CAP 0x5
+#define ADP_DP_CS_0 0x00
+#define ADP_DP_CS_0_VIDEO_HOPID_MASK GENMASK(26, 16)
+#define ADP_DP_CS_0_VIDEO_HOPID_SHIFT 16
+#define ADP_DP_CS_0_AE BIT(30)
+#define ADP_DP_CS_0_VE BIT(31)
+#define ADP_DP_CS_1_AUX_TX_HOPID_MASK GENMASK(10, 0)
+#define ADP_DP_CS_1_AUX_RX_HOPID_MASK GENMASK(21, 11)
+#define ADP_DP_CS_1_AUX_RX_HOPID_SHIFT 11
+#define ADP_DP_CS_2 0x02
+#define ADP_DP_CS_2_HDP BIT(6)
+#define ADP_DP_CS_3 0x03
+#define ADP_DP_CS_3_HDPC BIT(9)
+#define DP_LOCAL_CAP 0x04
+#define DP_REMOTE_CAP 0x05
+#define DP_STATUS_CTRL 0x06
+#define DP_STATUS_CTRL_CMHS BIT(25)
+#define DP_STATUS_CTRL_UF BIT(26)
+#define DP_COMMON_CAP 0x07
+/*
+ * DP_COMMON_CAP offsets work also for DP_LOCAL_CAP and DP_REMOTE_CAP
+ * with exception of DPRX done.
+ */
+#define DP_COMMON_CAP_RATE_MASK GENMASK(11, 8)
+#define DP_COMMON_CAP_RATE_SHIFT 8
+#define DP_COMMON_CAP_RATE_RBR 0x0
+#define DP_COMMON_CAP_RATE_HBR 0x1
+#define DP_COMMON_CAP_RATE_HBR2 0x2
+#define DP_COMMON_CAP_RATE_HBR3 0x3
+#define DP_COMMON_CAP_LANES_MASK GENMASK(14, 12)
+#define DP_COMMON_CAP_LANES_SHIFT 12
+#define DP_COMMON_CAP_1_LANE 0x0
+#define DP_COMMON_CAP_2_LANES 0x1
+#define DP_COMMON_CAP_4_LANES 0x2
+#define DP_COMMON_CAP_DPRX_DONE BIT(31)
/* PCIe adapter registers */
-
-#define TB_PCI_EN BIT(31)
+#define ADP_PCIE_CS_0 0x00
+#define ADP_PCIE_CS_0_PE BIT(31)
/* Hop register from TB_CFG_HOPS. 8 byte per entry. */
struct tb_regs_hop {
@@ -278,8 +312,17 @@ struct tb_regs_hop {
#define TB_LC_DESC_PORT_SIZE_SHIFT 16
#define TB_LC_DESC_PORT_SIZE_MASK GENMASK(27, 16)
#define TB_LC_FUSE 0x03
+#define TB_LC_SNK_ALLOCATION 0x10
+#define TB_LC_SNK_ALLOCATION_SNK0_MASK GENMASK(3, 0)
+#define TB_LC_SNK_ALLOCATION_SNK0_CM 0x1
+#define TB_LC_SNK_ALLOCATION_SNK1_SHIFT 4
+#define TB_LC_SNK_ALLOCATION_SNK1_MASK GENMASK(7, 4)
+#define TB_LC_SNK_ALLOCATION_SNK1_CM 0x1
/* Link controller registers */
+#define TB_LC_PORT_ATTR 0x8d
+#define TB_LC_PORT_ATTR_BE BIT(12)
+
#define TB_LC_SX_CTRL 0x96
#define TB_LC_SX_CTRL_L1C BIT(16)
#define TB_LC_SX_CTRL_L2C BIT(20)
diff --git a/drivers/thunderbolt/tunnel.c b/drivers/thunderbolt/tunnel.c
index 5a99234826e7..0d3463c4e24a 100644
--- a/drivers/thunderbolt/tunnel.c
+++ b/drivers/thunderbolt/tunnel.c
@@ -6,6 +6,7 @@
* Copyright (C) 2019, Intel Corporation
*/
+#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/list.h>
@@ -90,6 +91,22 @@ static int tb_pci_activate(struct tb_tunnel *tunnel, bool activate)
return 0;
}
+static int tb_initial_credits(const struct tb_switch *sw)
+{
+ /* If the path is complete sw is not NULL */
+ if (sw) {
+ /* More credits for faster link */
+ switch (sw->link_speed * sw->link_width) {
+ case 40:
+ return 32;
+ case 20:
+ return 24;
+ }
+ }
+
+ return 16;
+}
+
static void tb_pci_init_path(struct tb_path *path)
{
path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
@@ -101,7 +118,8 @@ static void tb_pci_init_path(struct tb_path *path)
path->drop_packages = 0;
path->nfc_credits = 0;
path->hops[0].initial_credits = 7;
- path->hops[1].initial_credits = 16;
+ path->hops[1].initial_credits =
+ tb_initial_credits(path->hops[1].in_port->sw);
}
/**
@@ -225,11 +243,174 @@ struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up,
return tunnel;
}
+static int tb_dp_cm_handshake(struct tb_port *in, struct tb_port *out)
+{
+ int timeout = 10;
+ u32 val;
+ int ret;
+
+ /* Both ends need to support this */
+ if (!tb_switch_is_titan_ridge(in->sw) ||
+ !tb_switch_is_titan_ridge(out->sw))
+ return 0;
+
+ ret = tb_port_read(out, &val, TB_CFG_PORT,
+ out->cap_adap + DP_STATUS_CTRL, 1);
+ if (ret)
+ return ret;
+
+ val |= DP_STATUS_CTRL_UF | DP_STATUS_CTRL_CMHS;
+
+ ret = tb_port_write(out, &val, TB_CFG_PORT,
+ out->cap_adap + DP_STATUS_CTRL, 1);
+ if (ret)
+ return ret;
+
+ do {
+ ret = tb_port_read(out, &val, TB_CFG_PORT,
+ out->cap_adap + DP_STATUS_CTRL, 1);
+ if (ret)
+ return ret;
+ if (!(val & DP_STATUS_CTRL_CMHS))
+ return 0;
+ usleep_range(10, 100);
+ } while (timeout--);
+
+ return -ETIMEDOUT;
+}
+
+static inline u32 tb_dp_cap_get_rate(u32 val)
+{
+ u32 rate = (val & DP_COMMON_CAP_RATE_MASK) >> DP_COMMON_CAP_RATE_SHIFT;
+
+ switch (rate) {
+ case DP_COMMON_CAP_RATE_RBR:
+ return 1620;
+ case DP_COMMON_CAP_RATE_HBR:
+ return 2700;
+ case DP_COMMON_CAP_RATE_HBR2:
+ return 5400;
+ case DP_COMMON_CAP_RATE_HBR3:
+ return 8100;
+ default:
+ return 0;
+ }
+}
+
+static inline u32 tb_dp_cap_set_rate(u32 val, u32 rate)
+{
+ val &= ~DP_COMMON_CAP_RATE_MASK;
+ switch (rate) {
+ default:
+ WARN(1, "invalid rate %u passed, defaulting to 1620 MB/s\n", rate);
+ /* Fallthrough */
+ case 1620:
+ val |= DP_COMMON_CAP_RATE_RBR << DP_COMMON_CAP_RATE_SHIFT;
+ break;
+ case 2700:
+ val |= DP_COMMON_CAP_RATE_HBR << DP_COMMON_CAP_RATE_SHIFT;
+ break;
+ case 5400:
+ val |= DP_COMMON_CAP_RATE_HBR2 << DP_COMMON_CAP_RATE_SHIFT;
+ break;
+ case 8100:
+ val |= DP_COMMON_CAP_RATE_HBR3 << DP_COMMON_CAP_RATE_SHIFT;
+ break;
+ }
+ return val;
+}
+
+static inline u32 tb_dp_cap_get_lanes(u32 val)
+{
+ u32 lanes = (val & DP_COMMON_CAP_LANES_MASK) >> DP_COMMON_CAP_LANES_SHIFT;
+
+ switch (lanes) {
+ case DP_COMMON_CAP_1_LANE:
+ return 1;
+ case DP_COMMON_CAP_2_LANES:
+ return 2;
+ case DP_COMMON_CAP_4_LANES:
+ return 4;
+ default:
+ return 0;
+ }
+}
+
+static inline u32 tb_dp_cap_set_lanes(u32 val, u32 lanes)
+{
+ val &= ~DP_COMMON_CAP_LANES_MASK;
+ switch (lanes) {
+ default:
+ WARN(1, "invalid number of lanes %u passed, defaulting to 1\n",
+ lanes);
+ /* Fallthrough */
+ case 1:
+ val |= DP_COMMON_CAP_1_LANE << DP_COMMON_CAP_LANES_SHIFT;
+ break;
+ case 2:
+ val |= DP_COMMON_CAP_2_LANES << DP_COMMON_CAP_LANES_SHIFT;
+ break;
+ case 4:
+ val |= DP_COMMON_CAP_4_LANES << DP_COMMON_CAP_LANES_SHIFT;
+ break;
+ }
+ return val;
+}
+
+static unsigned int tb_dp_bandwidth(unsigned int rate, unsigned int lanes)
+{
+ /* Tunneling removes the DP 8b/10b encoding */
+ return rate * lanes * 8 / 10;
+}
+
+static int tb_dp_reduce_bandwidth(int max_bw, u32 in_rate, u32 in_lanes,
+ u32 out_rate, u32 out_lanes, u32 *new_rate,
+ u32 *new_lanes)
+{
+ static const u32 dp_bw[][2] = {
+ /* Mb/s, lanes */
+ { 8100, 4 }, /* 25920 Mb/s */
+ { 5400, 4 }, /* 17280 Mb/s */
+ { 8100, 2 }, /* 12960 Mb/s */
+ { 2700, 4 }, /* 8640 Mb/s */
+ { 5400, 2 }, /* 8640 Mb/s */
+ { 8100, 1 }, /* 6480 Mb/s */
+ { 1620, 4 }, /* 5184 Mb/s */
+ { 5400, 1 }, /* 4320 Mb/s */
+ { 2700, 2 }, /* 4320 Mb/s */
+ { 1620, 2 }, /* 2592 Mb/s */
+ { 2700, 1 }, /* 2160 Mb/s */
+ { 1620, 1 }, /* 1296 Mb/s */
+ };
+ unsigned int i;
+
+ /*
+ * Find a combination that can fit into max_bw and does not
+ * exceed the maximum rate and lanes supported by the DP OUT and
+ * DP IN adapters.
+ */
+ for (i = 0; i < ARRAY_SIZE(dp_bw); i++) {
+ if (dp_bw[i][0] > out_rate || dp_bw[i][1] > out_lanes)
+ continue;
+
+ if (dp_bw[i][0] > in_rate || dp_bw[i][1] > in_lanes)
+ continue;
+
+ if (tb_dp_bandwidth(dp_bw[i][0], dp_bw[i][1]) <= max_bw) {
+ *new_rate = dp_bw[i][0];
+ *new_lanes = dp_bw[i][1];
+ return 0;
+ }
+ }
+
+ return -ENOSR;
+}
+
static int tb_dp_xchg_caps(struct tb_tunnel *tunnel)
{
+ u32 out_dp_cap, out_rate, out_lanes, in_dp_cap, in_rate, in_lanes, bw;
struct tb_port *out = tunnel->dst_port;
struct tb_port *in = tunnel->src_port;
- u32 in_dp_cap, out_dp_cap;
int ret;
/*
@@ -239,25 +420,71 @@ static int tb_dp_xchg_caps(struct tb_tunnel *tunnel)
if (in->sw->generation < 2 || out->sw->generation < 2)
return 0;
+ /*
+ * Perform connection manager handshake between IN and OUT ports
+ * before capabilities exchange can take place.
+ */
+ ret = tb_dp_cm_handshake(in, out);
+ if (ret)
+ return ret;
+
/* Read both DP_LOCAL_CAP registers */
ret = tb_port_read(in, &in_dp_cap, TB_CFG_PORT,
- in->cap_adap + TB_DP_LOCAL_CAP, 1);
+ in->cap_adap + DP_LOCAL_CAP, 1);
if (ret)
return ret;
ret = tb_port_read(out, &out_dp_cap, TB_CFG_PORT,
- out->cap_adap + TB_DP_LOCAL_CAP, 1);
+ out->cap_adap + DP_LOCAL_CAP, 1);
if (ret)
return ret;
/* Write IN local caps to OUT remote caps */
ret = tb_port_write(out, &in_dp_cap, TB_CFG_PORT,
- out->cap_adap + TB_DP_REMOTE_CAP, 1);
+ out->cap_adap + DP_REMOTE_CAP, 1);
if (ret)
return ret;
+ in_rate = tb_dp_cap_get_rate(in_dp_cap);
+ in_lanes = tb_dp_cap_get_lanes(in_dp_cap);
+ tb_port_dbg(in, "maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
+ in_rate, in_lanes, tb_dp_bandwidth(in_rate, in_lanes));
+
+ /*
+ * If the tunnel bandwidth is limited (max_bw is set) then see
+ * if we need to reduce bandwidth to fit there.
+ */
+ out_rate = tb_dp_cap_get_rate(out_dp_cap);
+ out_lanes = tb_dp_cap_get_lanes(out_dp_cap);
+ bw = tb_dp_bandwidth(out_rate, out_lanes);
+ tb_port_dbg(out, "maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
+ out_rate, out_lanes, bw);
+
+ if (tunnel->max_bw && bw > tunnel->max_bw) {
+ u32 new_rate, new_lanes, new_bw;
+
+ ret = tb_dp_reduce_bandwidth(tunnel->max_bw, in_rate, in_lanes,
+ out_rate, out_lanes, &new_rate,
+ &new_lanes);
+ if (ret) {
+ tb_port_info(out, "not enough bandwidth for DP tunnel\n");
+ return ret;
+ }
+
+ new_bw = tb_dp_bandwidth(new_rate, new_lanes);
+ tb_port_dbg(out, "bandwidth reduced to %u Mb/s x%u = %u Mb/s\n",
+ new_rate, new_lanes, new_bw);
+
+ /*
+ * Set new rate and number of lanes before writing it to
+ * the IN port remote caps.
+ */
+ out_dp_cap = tb_dp_cap_set_rate(out_dp_cap, new_rate);
+ out_dp_cap = tb_dp_cap_set_lanes(out_dp_cap, new_lanes);
+ }
+
return tb_port_write(in, &out_dp_cap, TB_CFG_PORT,
- in->cap_adap + TB_DP_REMOTE_CAP, 1);
+ in->cap_adap + DP_REMOTE_CAP, 1);
}
static int tb_dp_activate(struct tb_tunnel *tunnel, bool active)
@@ -297,6 +524,56 @@ static int tb_dp_activate(struct tb_tunnel *tunnel, bool active)
return 0;
}
+static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel)
+{
+ struct tb_port *in = tunnel->src_port;
+ const struct tb_switch *sw = in->sw;
+ u32 val, rate = 0, lanes = 0;
+ int ret;
+
+ if (tb_switch_is_titan_ridge(sw)) {
+ int timeout = 10;
+
+ /*
+ * Wait for DPRX done. Normally it should be already set
+ * for active tunnel.
+ */
+ do {
+ ret = tb_port_read(in, &val, TB_CFG_PORT,
+ in->cap_adap + DP_COMMON_CAP, 1);
+ if (ret)
+ return ret;
+
+ if (val & DP_COMMON_CAP_DPRX_DONE) {
+ rate = tb_dp_cap_get_rate(val);
+ lanes = tb_dp_cap_get_lanes(val);
+ break;
+ }
+ msleep(250);
+ } while (timeout--);
+
+ if (!timeout)
+ return -ETIMEDOUT;
+ } else if (sw->generation >= 2) {
+ /*
+ * Read from the copied remote cap so that we take into
+ * account if capabilities were reduced during exchange.
+ */
+ ret = tb_port_read(in, &val, TB_CFG_PORT,
+ in->cap_adap + DP_REMOTE_CAP, 1);
+ if (ret)
+ return ret;
+
+ rate = tb_dp_cap_get_rate(val);
+ lanes = tb_dp_cap_get_lanes(val);
+ } else {
+ /* No bandwidth management for legacy devices */
+ return 0;
+ }
+
+ return tb_dp_bandwidth(rate, lanes);
+}
+
static void tb_dp_init_aux_path(struct tb_path *path)
{
int i;
@@ -324,12 +601,12 @@ static void tb_dp_init_video_path(struct tb_path *path, bool discover)
path->weight = 1;
if (discover) {
- path->nfc_credits = nfc_credits & TB_PORT_NFC_CREDITS_MASK;
+ path->nfc_credits = nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK;
} else {
u32 max_credits;
- max_credits = (nfc_credits & TB_PORT_MAX_CREDITS_MASK) >>
- TB_PORT_MAX_CREDITS_SHIFT;
+ max_credits = (nfc_credits & ADP_CS_4_TOTAL_BUFFERS_MASK) >>
+ ADP_CS_4_TOTAL_BUFFERS_SHIFT;
/* Leave some credits for AUX path */
path->nfc_credits = min(max_credits - 2, 12U);
}
@@ -361,6 +638,7 @@ struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in)
tunnel->init = tb_dp_xchg_caps;
tunnel->activate = tb_dp_activate;
+ tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth;
tunnel->src_port = in;
path = tb_path_discover(in, TB_DP_VIDEO_HOPID, NULL, -1,
@@ -419,6 +697,7 @@ err_free:
* @tb: Pointer to the domain structure
* @in: DP in adapter port
* @out: DP out adapter port
+ * @max_bw: Maximum available bandwidth for the DP tunnel (%0 if not limited)
*
* Allocates a tunnel between @in and @out that is capable of tunneling
* Display Port traffic.
@@ -426,7 +705,7 @@ err_free:
* Return: Returns a tb_tunnel on success or NULL on failure.
*/
struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
- struct tb_port *out)
+ struct tb_port *out, int max_bw)
{
struct tb_tunnel *tunnel;
struct tb_path **paths;
@@ -441,8 +720,10 @@ struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
tunnel->init = tb_dp_xchg_caps;
tunnel->activate = tb_dp_activate;
+ tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth;
tunnel->src_port = in;
tunnel->dst_port = out;
+ tunnel->max_bw = max_bw;
paths = tunnel->paths;
@@ -478,8 +759,8 @@ static u32 tb_dma_credits(struct tb_port *nhi)
{
u32 max_credits;
- max_credits = (nhi->config.nfc_credits & TB_PORT_MAX_CREDITS_MASK) >>
- TB_PORT_MAX_CREDITS_SHIFT;
+ max_credits = (nhi->config.nfc_credits & ADP_CS_4_TOTAL_BUFFERS_MASK) >>
+ ADP_CS_4_TOTAL_BUFFERS_SHIFT;
return min(max_credits, 13U);
}
@@ -689,3 +970,62 @@ void tb_tunnel_deactivate(struct tb_tunnel *tunnel)
tb_path_deactivate(tunnel->paths[i]);
}
}
+
+/**
+ * tb_tunnel_switch_on_path() - Does the tunnel go through switch
+ * @tunnel: Tunnel to check
+ * @sw: Switch to check
+ *
+ * Returns true if @tunnel goes through @sw (direction does not matter),
+ * false otherwise.
+ */
+bool tb_tunnel_switch_on_path(const struct tb_tunnel *tunnel,
+ const struct tb_switch *sw)
+{
+ int i;
+
+ for (i = 0; i < tunnel->npaths; i++) {
+ if (!tunnel->paths[i])
+ continue;
+ if (tb_path_switch_on_path(tunnel->paths[i], sw))
+ return true;
+ }
+
+ return false;
+}
+
+static bool tb_tunnel_is_active(const struct tb_tunnel *tunnel)
+{
+ int i;
+
+ for (i = 0; i < tunnel->npaths; i++) {
+ if (!tunnel->paths[i])
+ return false;
+ if (!tunnel->paths[i]->activated)
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * tb_tunnel_consumed_bandwidth() - Return bandwidth consumed by the tunnel
+ * @tunnel: Tunnel to check
+ *
+ * Returns bandwidth currently consumed by @tunnel and %0 if the @tunnel
+ * is not active or does consume bandwidth.
+ */
+int tb_tunnel_consumed_bandwidth(struct tb_tunnel *tunnel)
+{
+ if (!tb_tunnel_is_active(tunnel))
+ return 0;
+
+ if (tunnel->consumed_bandwidth) {
+ int ret = tunnel->consumed_bandwidth(tunnel);
+
+ tb_tunnel_dbg(tunnel, "consumed bandwidth %d Mb/s\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
diff --git a/drivers/thunderbolt/tunnel.h b/drivers/thunderbolt/tunnel.h
index c68bbcd3a62c..ba888da005f5 100644
--- a/drivers/thunderbolt/tunnel.h
+++ b/drivers/thunderbolt/tunnel.h
@@ -27,8 +27,11 @@ enum tb_tunnel_type {
* @npaths: Number of paths in @paths
* @init: Optional tunnel specific initialization
* @activate: Optional tunnel specific activation/deactivation
+ * @consumed_bandwidth: Return how much bandwidth the tunnel consumes
* @list: Tunnels are linked using this field
* @type: Type of the tunnel
+ * @max_bw: Maximum bandwidth (Mb/s) available for the tunnel (only for DP).
+ * Only set if the bandwidth needs to be limited.
*/
struct tb_tunnel {
struct tb *tb;
@@ -38,8 +41,10 @@ struct tb_tunnel {
size_t npaths;
int (*init)(struct tb_tunnel *tunnel);
int (*activate)(struct tb_tunnel *tunnel, bool activate);
+ int (*consumed_bandwidth)(struct tb_tunnel *tunnel);
struct list_head list;
enum tb_tunnel_type type;
+ unsigned int max_bw;
};
struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down);
@@ -47,7 +52,7 @@ struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up,
struct tb_port *down);
struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in);
struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
- struct tb_port *out);
+ struct tb_port *out, int max_bw);
struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
struct tb_port *dst, int transmit_ring,
int transmit_path, int receive_ring,
@@ -58,6 +63,9 @@ int tb_tunnel_activate(struct tb_tunnel *tunnel);
int tb_tunnel_restart(struct tb_tunnel *tunnel);
void tb_tunnel_deactivate(struct tb_tunnel *tunnel);
bool tb_tunnel_is_invalid(struct tb_tunnel *tunnel);
+bool tb_tunnel_switch_on_path(const struct tb_tunnel *tunnel,
+ const struct tb_switch *sw);
+int tb_tunnel_consumed_bandwidth(struct tb_tunnel *tunnel);
static inline bool tb_tunnel_is_pci(const struct tb_tunnel *tunnel)
{
diff --git a/drivers/thunderbolt/xdomain.c b/drivers/thunderbolt/xdomain.c
index 4e17a7c7bf0a..880d784398a3 100644
--- a/drivers/thunderbolt/xdomain.c
+++ b/drivers/thunderbolt/xdomain.c
@@ -1404,10 +1404,9 @@ struct tb_xdomain_lookup {
static struct tb_xdomain *switch_find_xdomain(struct tb_switch *sw,
const struct tb_xdomain_lookup *lookup)
{
- int i;
+ struct tb_port *port;
- for (i = 1; i <= sw->config.max_port_number; i++) {
- struct tb_port *port = &sw->ports[i];
+ tb_switch_for_each_port(sw, port) {
struct tb_xdomain *xd;
if (port->xdomain) {
diff --git a/drivers/tty/Kconfig b/drivers/tty/Kconfig
index c7623f99ac0f..a312cb33a99b 100644
--- a/drivers/tty/Kconfig
+++ b/drivers/tty/Kconfig
@@ -82,20 +82,20 @@ config HW_CONSOLE
default y
config VT_HW_CONSOLE_BINDING
- bool "Support for binding and unbinding console drivers"
- depends on HW_CONSOLE
- ---help---
- The virtual terminal is the device that interacts with the physical
- terminal through console drivers. On these systems, at least one
- console driver is loaded. In other configurations, additional console
- drivers may be enabled, such as the framebuffer console. If more than
- 1 console driver is enabled, setting this to 'y' will allow you to
- select the console driver that will serve as the backend for the
- virtual terminals.
-
- See <file:Documentation/driver-api/console.rst> for more
- information. For framebuffer console users, please refer to
- <file:Documentation/fb/fbcon.rst>.
+ bool "Support for binding and unbinding console drivers"
+ depends on HW_CONSOLE
+ ---help---
+ The virtual terminal is the device that interacts with the physical
+ terminal through console drivers. On these systems, at least one
+ console driver is loaded. In other configurations, additional console
+ drivers may be enabled, such as the framebuffer console. If more than
+ 1 console driver is enabled, setting this to 'y' will allow you to
+ select the console driver that will serve as the backend for the
+ virtual terminals.
+
+ See <file:Documentation/driver-api/console.rst> for more
+ information. For framebuffer console users, please refer to
+ <file:Documentation/fb/fbcon.rst>.
config UNIX98_PTYS
bool "Unix98 PTY support" if EXPERT
@@ -173,15 +173,15 @@ config ROCKETPORT
depends on SERIAL_NONSTANDARD && (ISA || EISA || PCI)
help
This driver supports Comtrol RocketPort and RocketModem PCI boards.
- These boards provide 2, 4, 8, 16, or 32 high-speed serial ports or
- modems. For information about the RocketPort/RocketModem boards
- and this driver read <file:Documentation/driver-api/serial/rocket.rst>.
+ These boards provide 2, 4, 8, 16, or 32 high-speed serial ports or
+ modems. For information about the RocketPort/RocketModem boards
+ and this driver read <file:Documentation/driver-api/serial/rocket.rst>.
To compile this driver as a module, choose M here: the
module will be called rocket.
If you want to compile this driver into the kernel, say Y here. If
- you don't have a Comtrol RocketPort/RocketModem card installed, say N.
+ you don't have a Comtrol RocketPort/RocketModem card installed, say N.
config CYCLADES
tristate "Cyclades async mux support"
@@ -437,8 +437,8 @@ config MIPS_EJTAG_FDC_KGDB
depends on MIPS_EJTAG_FDC_TTY && KGDB
default y
help
- This enables the use of KGDB over an FDC channel, allowing KGDB to be
- used remotely or when a serial port isn't available.
+ This enables the use of KGDB over an FDC channel, allowing KGDB to be
+ used remotely or when a serial port isn't available.
config MIPS_EJTAG_FDC_KGDB_CHAN
int "KGDB FDC channel"
diff --git a/drivers/tty/amiserial.c b/drivers/tty/amiserial.c
index 8330fd809a05..13f63c01c589 100644
--- a/drivers/tty/amiserial.c
+++ b/drivers/tty/amiserial.c
@@ -22,18 +22,8 @@
*
*/
-/*
- * Serial driver configuration section. Here are the various options:
- *
- * SERIAL_PARANOIA_CHECK
- * Check the magic number for the async_structure where
- * ever possible.
- */
-
#include <linux/delay.h>
-#undef SERIAL_PARANOIA_CHECK
-
/* Set of debugging defines */
#undef SERIAL_DEBUG_INTR
@@ -132,28 +122,6 @@ static struct serial_state rs_table[1];
#define serial_isroot() (capable(CAP_SYS_ADMIN))
-
-static inline int serial_paranoia_check(struct serial_state *info,
- char *name, const char *routine)
-{
-#ifdef SERIAL_PARANOIA_CHECK
- static const char *badmagic =
- "Warning: bad magic number for serial struct (%s) in %s\n";
- static const char *badinfo =
- "Warning: null async_struct for (%s) in %s\n";
-
- if (!info) {
- printk(badinfo, name, routine);
- return 1;
- }
- if (info->magic != SERIAL_MAGIC) {
- printk(badmagic, name, routine);
- return 1;
- }
-#endif
- return 0;
-}
-
/* some serial hardware definitions */
#define SDR_OVRUN (1<<15)
#define SDR_RBF (1<<14)
@@ -189,9 +157,6 @@ static void rs_stop(struct tty_struct *tty)
struct serial_state *info = tty->driver_data;
unsigned long flags;
- if (serial_paranoia_check(info, tty->name, "rs_stop"))
- return;
-
local_irq_save(flags);
if (info->IER & UART_IER_THRI) {
info->IER &= ~UART_IER_THRI;
@@ -209,9 +174,6 @@ static void rs_start(struct tty_struct *tty)
struct serial_state *info = tty->driver_data;
unsigned long flags;
- if (serial_paranoia_check(info, tty->name, "rs_start"))
- return;
-
local_irq_save(flags);
if (info->xmit.head != info->xmit.tail
&& info->xmit.buf
@@ -783,9 +745,6 @@ static int rs_put_char(struct tty_struct *tty, unsigned char ch)
info = tty->driver_data;
- if (serial_paranoia_check(info, tty->name, "rs_put_char"))
- return 0;
-
if (!info->xmit.buf)
return 0;
@@ -808,9 +767,6 @@ static void rs_flush_chars(struct tty_struct *tty)
struct serial_state *info = tty->driver_data;
unsigned long flags;
- if (serial_paranoia_check(info, tty->name, "rs_flush_chars"))
- return;
-
if (info->xmit.head == info->xmit.tail
|| tty->stopped
|| tty->hw_stopped
@@ -833,9 +789,6 @@ static int rs_write(struct tty_struct * tty, const unsigned char *buf, int count
struct serial_state *info = tty->driver_data;
unsigned long flags;
- if (serial_paranoia_check(info, tty->name, "rs_write"))
- return 0;
-
if (!info->xmit.buf)
return 0;
@@ -878,8 +831,6 @@ static int rs_write_room(struct tty_struct *tty)
{
struct serial_state *info = tty->driver_data;
- if (serial_paranoia_check(info, tty->name, "rs_write_room"))
- return 0;
return CIRC_SPACE(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE);
}
@@ -887,8 +838,6 @@ static int rs_chars_in_buffer(struct tty_struct *tty)
{
struct serial_state *info = tty->driver_data;
- if (serial_paranoia_check(info, tty->name, "rs_chars_in_buffer"))
- return 0;
return CIRC_CNT(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE);
}
@@ -897,8 +846,6 @@ static void rs_flush_buffer(struct tty_struct *tty)
struct serial_state *info = tty->driver_data;
unsigned long flags;
- if (serial_paranoia_check(info, tty->name, "rs_flush_buffer"))
- return;
local_irq_save(flags);
info->xmit.head = info->xmit.tail = 0;
local_irq_restore(flags);
@@ -914,9 +861,6 @@ static void rs_send_xchar(struct tty_struct *tty, char ch)
struct serial_state *info = tty->driver_data;
unsigned long flags;
- if (serial_paranoia_check(info, tty->name, "rs_send_xchar"))
- return;
-
info->x_char = ch;
if (ch) {
/* Make sure transmit interrupts are on */
@@ -952,9 +896,6 @@ static void rs_throttle(struct tty_struct * tty)
printk("throttle %s ....\n", tty_name(tty));
#endif
- if (serial_paranoia_check(info, tty->name, "rs_throttle"))
- return;
-
if (I_IXOFF(tty))
rs_send_xchar(tty, STOP_CHAR(tty));
@@ -974,9 +915,6 @@ static void rs_unthrottle(struct tty_struct * tty)
printk("unthrottle %s ....\n", tty_name(tty));
#endif
- if (serial_paranoia_check(info, tty->name, "rs_unthrottle"))
- return;
-
if (I_IXOFF(tty)) {
if (info->x_char)
info->x_char = 0;
@@ -1109,8 +1047,6 @@ static int rs_tiocmget(struct tty_struct *tty)
unsigned char control, status;
unsigned long flags;
- if (serial_paranoia_check(info, tty->name, "rs_ioctl"))
- return -ENODEV;
if (tty_io_error(tty))
return -EIO;
@@ -1131,8 +1067,6 @@ static int rs_tiocmset(struct tty_struct *tty, unsigned int set,
struct serial_state *info = tty->driver_data;
unsigned long flags;
- if (serial_paranoia_check(info, tty->name, "rs_ioctl"))
- return -ENODEV;
if (tty_io_error(tty))
return -EIO;
@@ -1155,12 +1089,8 @@ static int rs_tiocmset(struct tty_struct *tty, unsigned int set,
*/
static int rs_break(struct tty_struct *tty, int break_state)
{
- struct serial_state *info = tty->driver_data;
unsigned long flags;
- if (serial_paranoia_check(info, tty->name, "rs_break"))
- return -EINVAL;
-
local_irq_save(flags);
if (break_state == -1)
custom.adkcon = AC_SETCLR | AC_UARTBRK;
@@ -1212,9 +1142,6 @@ static int rs_ioctl(struct tty_struct *tty,
DEFINE_WAIT(wait);
int ret;
- if (serial_paranoia_check(info, tty->name, "rs_ioctl"))
- return -ENODEV;
-
if ((cmd != TIOCSERCONFIG) &&
(cmd != TIOCMIWAIT) && (cmd != TIOCGICOUNT)) {
if (tty_io_error(tty))
@@ -1333,9 +1260,6 @@ static void rs_close(struct tty_struct *tty, struct file * filp)
struct serial_state *state = tty->driver_data;
struct tty_port *port = &state->tport;
- if (serial_paranoia_check(state, tty->name, "rs_close"))
- return;
-
if (tty_port_close_start(port, tty, filp) == 0)
return;
@@ -1379,9 +1303,6 @@ static void rs_wait_until_sent(struct tty_struct *tty, int timeout)
unsigned long orig_jiffies, char_time;
int lsr;
- if (serial_paranoia_check(info, tty->name, "rs_wait_until_sent"))
- return;
-
if (info->xmit_fifo_size == 0)
return; /* Just in case.... */
@@ -1440,9 +1361,6 @@ static void rs_hangup(struct tty_struct *tty)
{
struct serial_state *info = tty->driver_data;
- if (serial_paranoia_check(info, tty->name, "rs_hangup"))
- return;
-
rs_flush_buffer(tty);
shutdown(tty, info);
info->tport.count = 0;
@@ -1467,8 +1385,6 @@ static int rs_open(struct tty_struct *tty, struct file * filp)
port->tty = tty;
tty->driver_data = info;
tty->port = port;
- if (serial_paranoia_check(info, tty->name, "rs_open"))
- return -ENODEV;
port->low_latency = (port->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
diff --git a/drivers/tty/hvc/Kconfig b/drivers/tty/hvc/Kconfig
index 4d22b911111f..6a3c97d345a0 100644
--- a/drivers/tty/hvc/Kconfig
+++ b/drivers/tty/hvc/Kconfig
@@ -70,26 +70,26 @@ config HVC_XEN_FRONTEND
Xen driver for secondary virtual consoles
config HVC_UDBG
- bool "udbg based fake hypervisor console"
- depends on PPC
- select HVC_DRIVER
- help
- This is meant to be used during HW bring up or debugging when
- no other console mechanism exist but udbg, to get you a quick
- console for userspace. Do NOT enable in production kernels.
+ bool "udbg based fake hypervisor console"
+ depends on PPC
+ select HVC_DRIVER
+ help
+ This is meant to be used during HW bring up or debugging when
+ no other console mechanism exist but udbg, to get you a quick
+ console for userspace. Do NOT enable in production kernels.
config HVC_DCC
- bool "ARM JTAG DCC console"
- depends on ARM || ARM64
- select HVC_DRIVER
- help
- This console uses the JTAG DCC on ARM to create a console under the HVC
- driver. This console is used through a JTAG only on ARM. If you don't have
- a JTAG then you probably don't want this option.
+ bool "ARM JTAG DCC console"
+ depends on ARM || ARM64
+ select HVC_DRIVER
+ help
+ This console uses the JTAG DCC on ARM to create a console under the HVC
+ driver. This console is used through a JTAG only on ARM. If you don't have
+ a JTAG then you probably don't want this option.
config HVC_RISCV_SBI
bool "RISC-V SBI console support"
- depends on RISCV
+ depends on RISCV_SBI
select HVC_DRIVER
help
This enables support for console output via RISC-V SBI calls, which
diff --git a/drivers/tty/hvc/hvc_dcc.c b/drivers/tty/hvc/hvc_dcc.c
index 02629a1f193d..8e0edb7d93fd 100644
--- a/drivers/tty/hvc/hvc_dcc.c
+++ b/drivers/tty/hvc/hvc_dcc.c
@@ -1,7 +1,10 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2010, 2014 The Linux Foundation. All rights reserved. */
+#include <linux/console.h>
#include <linux/init.h>
+#include <linux/serial.h>
+#include <linux/serial_core.h>
#include <asm/dcc.h>
#include <asm/processor.h>
@@ -12,6 +15,31 @@
#define DCC_STATUS_RX (1 << 30)
#define DCC_STATUS_TX (1 << 29)
+static void dcc_uart_console_putchar(struct uart_port *port, int ch)
+{
+ while (__dcc_getstatus() & DCC_STATUS_TX)
+ cpu_relax();
+
+ __dcc_putchar(ch);
+}
+
+static void dcc_early_write(struct console *con, const char *s, unsigned n)
+{
+ struct earlycon_device *dev = con->data;
+
+ uart_console_write(&dev->port, s, n, dcc_uart_console_putchar);
+}
+
+static int __init dcc_early_console_setup(struct earlycon_device *device,
+ const char *opt)
+{
+ device->con->write = dcc_early_write;
+
+ return 0;
+}
+
+EARLYCON_DECLARE(dcc, dcc_early_console_setup);
+
static int hvc_dcc_put_chars(uint32_t vt, const char *buf, int count)
{
int i;
diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c
index 5ba6816ebf81..fbaa4ec85560 100644
--- a/drivers/tty/rocket.c
+++ b/drivers/tty/rocket.c
@@ -1222,22 +1222,28 @@ static int set_config(struct tty_struct *tty, struct r_port *info,
*/
static int get_ports(struct r_port *info, struct rocket_ports __user *retports)
{
- struct rocket_ports tmp;
- int board;
+ struct rocket_ports *tmp;
+ int board, ret = 0;
- memset(&tmp, 0, sizeof (tmp));
- tmp.tty_major = rocket_driver->major;
+ tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
+ tmp->tty_major = rocket_driver->major;
for (board = 0; board < 4; board++) {
- tmp.rocketModel[board].model = rocketModel[board].model;
- strcpy(tmp.rocketModel[board].modelString, rocketModel[board].modelString);
- tmp.rocketModel[board].numPorts = rocketModel[board].numPorts;
- tmp.rocketModel[board].loadrm2 = rocketModel[board].loadrm2;
- tmp.rocketModel[board].startingPortNumber = rocketModel[board].startingPortNumber;
- }
- if (copy_to_user(retports, &tmp, sizeof (*retports)))
- return -EFAULT;
- return 0;
+ tmp->rocketModel[board].model = rocketModel[board].model;
+ strcpy(tmp->rocketModel[board].modelString,
+ rocketModel[board].modelString);
+ tmp->rocketModel[board].numPorts = rocketModel[board].numPorts;
+ tmp->rocketModel[board].loadrm2 = rocketModel[board].loadrm2;
+ tmp->rocketModel[board].startingPortNumber =
+ rocketModel[board].startingPortNumber;
+ }
+ if (copy_to_user(retports, tmp, sizeof(*retports)))
+ ret = -EFAULT;
+ kfree(tmp);
+ return ret;
}
static int reset_rm2(struct r_port *info, void __user *arg)
diff --git a/drivers/tty/serdev/core.c b/drivers/tty/serdev/core.c
index a0ac16ee6575..226adeec2aed 100644
--- a/drivers/tty/serdev/core.c
+++ b/drivers/tty/serdev/core.c
@@ -552,16 +552,97 @@ static int of_serdev_register_devices(struct serdev_controller *ctrl)
}
#ifdef CONFIG_ACPI
+
+#define SERDEV_ACPI_MAX_SCAN_DEPTH 32
+
+struct acpi_serdev_lookup {
+ acpi_handle device_handle;
+ acpi_handle controller_handle;
+ int n;
+ int index;
+};
+
+static int acpi_serdev_parse_resource(struct acpi_resource *ares, void *data)
+{
+ struct acpi_serdev_lookup *lookup = data;
+ struct acpi_resource_uart_serialbus *sb;
+ acpi_status status;
+
+ if (ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS)
+ return 1;
+
+ if (ares->data.common_serial_bus.type != ACPI_RESOURCE_SERIAL_TYPE_UART)
+ return 1;
+
+ if (lookup->index != -1 && lookup->n++ != lookup->index)
+ return 1;
+
+ sb = &ares->data.uart_serial_bus;
+
+ status = acpi_get_handle(lookup->device_handle,
+ sb->resource_source.string_ptr,
+ &lookup->controller_handle);
+ if (ACPI_FAILURE(status))
+ return 1;
+
+ /*
+ * NOTE: Ideally, we would also want to retreive other properties here,
+ * once setting them before opening the device is supported by serdev.
+ */
+
+ return 1;
+}
+
+static int acpi_serdev_do_lookup(struct acpi_device *adev,
+ struct acpi_serdev_lookup *lookup)
+{
+ struct list_head resource_list;
+ int ret;
+
+ lookup->device_handle = acpi_device_handle(adev);
+ lookup->controller_handle = NULL;
+ lookup->n = 0;
+
+ INIT_LIST_HEAD(&resource_list);
+ ret = acpi_dev_get_resources(adev, &resource_list,
+ acpi_serdev_parse_resource, lookup);
+ acpi_dev_free_resource_list(&resource_list);
+
+ if (ret < 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int acpi_serdev_check_resources(struct serdev_controller *ctrl,
+ struct acpi_device *adev)
+{
+ struct acpi_serdev_lookup lookup;
+ int ret;
+
+ if (acpi_bus_get_status(adev) || !adev->status.present)
+ return -EINVAL;
+
+ /* Look for UARTSerialBusV2 resource */
+ lookup.index = -1; // we only care for the last device
+
+ ret = acpi_serdev_do_lookup(adev, &lookup);
+ if (ret)
+ return ret;
+
+ /* Make sure controller and ResourceSource handle match */
+ if (ACPI_HANDLE(ctrl->dev.parent) != lookup.controller_handle)
+ return -ENODEV;
+
+ return 0;
+}
+
static acpi_status acpi_serdev_register_device(struct serdev_controller *ctrl,
- struct acpi_device *adev)
+ struct acpi_device *adev)
{
- struct serdev_device *serdev = NULL;
+ struct serdev_device *serdev;
int err;
- if (acpi_bus_get_status(adev) || !adev->status.present ||
- acpi_device_enumerated(adev))
- return AE_OK;
-
serdev = serdev_device_alloc(ctrl);
if (!serdev) {
dev_err(&ctrl->dev, "failed to allocate serdev device for %s\n",
@@ -583,7 +664,7 @@ static acpi_status acpi_serdev_register_device(struct serdev_controller *ctrl,
}
static acpi_status acpi_serdev_add_device(acpi_handle handle, u32 level,
- void *data, void **return_value)
+ void *data, void **return_value)
{
struct serdev_controller *ctrl = data;
struct acpi_device *adev;
@@ -591,22 +672,28 @@ static acpi_status acpi_serdev_add_device(acpi_handle handle, u32 level,
if (acpi_bus_get_device(handle, &adev))
return AE_OK;
+ if (acpi_device_enumerated(adev))
+ return AE_OK;
+
+ if (acpi_serdev_check_resources(ctrl, adev))
+ return AE_OK;
+
return acpi_serdev_register_device(ctrl, adev);
}
+
static int acpi_serdev_register_devices(struct serdev_controller *ctrl)
{
acpi_status status;
- acpi_handle handle;
- handle = ACPI_HANDLE(ctrl->dev.parent);
- if (!handle)
+ if (!has_acpi_companion(ctrl->dev.parent))
return -ENODEV;
- status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
+ status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
+ SERDEV_ACPI_MAX_SCAN_DEPTH,
acpi_serdev_add_device, NULL, ctrl, NULL);
if (ACPI_FAILURE(status))
- dev_dbg(&ctrl->dev, "failed to enumerate serdev slaves\n");
+ dev_warn(&ctrl->dev, "failed to enumerate serdev slaves\n");
if (!ctrl->serdev)
return -ENODEV;
diff --git a/drivers/tty/serial/8250/8250_aspeed_vuart.c b/drivers/tty/serial/8250/8250_aspeed_vuart.c
index 0438d9a905ce..6e67fd89445a 100644
--- a/drivers/tty/serial/8250/8250_aspeed_vuart.c
+++ b/drivers/tty/serial/8250/8250_aspeed_vuart.c
@@ -14,6 +14,8 @@
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/clk.h>
@@ -22,6 +24,7 @@
#define ASPEED_VUART_GCRA 0x20
#define ASPEED_VUART_GCRA_VUART_EN BIT(0)
+#define ASPEED_VUART_GCRA_HOST_SIRQ_POLARITY BIT(1)
#define ASPEED_VUART_GCRA_DISABLE_HOST_TX_DISCARD BIT(5)
#define ASPEED_VUART_GCRB 0x24
#define ASPEED_VUART_GCRB_HOST_SIRQ_MASK GENMASK(7, 4)
@@ -131,8 +134,53 @@ static ssize_t sirq_store(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR_RW(sirq);
+static ssize_t sirq_polarity_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct aspeed_vuart *vuart = dev_get_drvdata(dev);
+ u8 reg;
+
+ reg = readb(vuart->regs + ASPEED_VUART_GCRA);
+ reg &= ASPEED_VUART_GCRA_HOST_SIRQ_POLARITY;
+
+ return snprintf(buf, PAGE_SIZE - 1, "%u\n", reg ? 1 : 0);
+}
+
+static void aspeed_vuart_set_sirq_polarity(struct aspeed_vuart *vuart,
+ bool polarity)
+{
+ u8 reg = readb(vuart->regs + ASPEED_VUART_GCRA);
+
+ if (polarity)
+ reg |= ASPEED_VUART_GCRA_HOST_SIRQ_POLARITY;
+ else
+ reg &= ~ASPEED_VUART_GCRA_HOST_SIRQ_POLARITY;
+
+ writeb(reg, vuart->regs + ASPEED_VUART_GCRA);
+}
+
+static ssize_t sirq_polarity_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct aspeed_vuart *vuart = dev_get_drvdata(dev);
+ unsigned long val;
+ int err;
+
+ err = kstrtoul(buf, 0, &val);
+ if (err)
+ return err;
+
+ aspeed_vuart_set_sirq_polarity(vuart, val != 0);
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(sirq_polarity);
+
static struct attribute *aspeed_vuart_attrs[] = {
&dev_attr_sirq.attr,
+ &dev_attr_sirq_polarity.attr,
&dev_attr_lpc_address.attr,
NULL,
};
@@ -302,8 +350,30 @@ static int aspeed_vuart_handle_irq(struct uart_port *port)
return 1;
}
+static void aspeed_vuart_auto_configure_sirq_polarity(
+ struct aspeed_vuart *vuart, struct device_node *syscon_np,
+ u32 reg_offset, u32 reg_mask)
+{
+ struct regmap *regmap;
+ u32 value;
+
+ regmap = syscon_node_to_regmap(syscon_np);
+ if (IS_ERR(regmap)) {
+ dev_warn(vuart->dev,
+ "could not get regmap for aspeed,sirq-polarity-sense\n");
+ return;
+ }
+ if (regmap_read(regmap, reg_offset, &value)) {
+ dev_warn(vuart->dev, "could not read hw strap table\n");
+ return;
+ }
+
+ aspeed_vuart_set_sirq_polarity(vuart, (value & reg_mask) == 0);
+}
+
static int aspeed_vuart_probe(struct platform_device *pdev)
{
+ struct of_phandle_args sirq_polarity_sense_args;
struct uart_8250_port port;
struct aspeed_vuart *vuart;
struct device_node *np;
@@ -402,6 +472,20 @@ static int aspeed_vuart_probe(struct platform_device *pdev)
vuart->line = rc;
+ rc = of_parse_phandle_with_fixed_args(
+ np, "aspeed,sirq-polarity-sense", 2, 0,
+ &sirq_polarity_sense_args);
+ if (rc < 0) {
+ dev_dbg(&pdev->dev,
+ "aspeed,sirq-polarity-sense property not found\n");
+ } else {
+ aspeed_vuart_auto_configure_sirq_polarity(
+ vuart, sirq_polarity_sense_args.np,
+ sirq_polarity_sense_args.args[0],
+ BIT(sirq_polarity_sense_args.args[1]));
+ of_node_put(sirq_polarity_sense_args.np);
+ }
+
aspeed_vuart_set_enabled(vuart, true);
aspeed_vuart_set_host_tx_discard(vuart, true);
platform_set_drvdata(pdev, vuart);
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
index 1c72fdc2dd37..aab3cccc6789 100644
--- a/drivers/tty/serial/8250/8250_dw.c
+++ b/drivers/tty/serial/8250/8250_dw.c
@@ -280,9 +280,6 @@ static void dw8250_set_termios(struct uart_port *p, struct ktermios *termios,
long rate;
int ret;
- if (IS_ERR(d->clk))
- goto out;
-
clk_disable_unprepare(d->clk);
rate = clk_round_rate(d->clk, baud * 16);
if (rate < 0)
@@ -293,8 +290,10 @@ static void dw8250_set_termios(struct uart_port *p, struct ktermios *termios,
ret = clk_set_rate(d->clk, rate);
clk_prepare_enable(d->clk);
- if (!ret)
- p->uartclk = rate;
+ if (ret)
+ goto out;
+
+ p->uartclk = rate;
out:
p->status &= ~UPSTAT_AUTOCTS;
@@ -386,10 +385,10 @@ static int dw8250_probe(struct platform_device *pdev)
{
struct uart_8250_port uart = {}, *up = &uart;
struct resource *regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- int irq = platform_get_irq(pdev, 0);
struct uart_port *p = &up->port;
struct device *dev = &pdev->dev;
struct dw8250_data *data;
+ int irq;
int err;
u32 val;
@@ -398,11 +397,9 @@ static int dw8250_probe(struct platform_device *pdev)
return -EINVAL;
}
- if (irq < 0) {
- if (irq != -EPROBE_DEFER)
- dev_err(dev, "cannot get irq\n");
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
return irq;
- }
spin_lock_init(&p->lock);
p->mapbase = regs->start;
@@ -472,19 +469,18 @@ static int dw8250_probe(struct platform_device *pdev)
device_property_read_u32(dev, "clock-frequency", &p->uartclk);
/* If there is separate baudclk, get the rate from it. */
- data->clk = devm_clk_get(dev, "baudclk");
- if (IS_ERR(data->clk) && PTR_ERR(data->clk) != -EPROBE_DEFER)
- data->clk = devm_clk_get(dev, NULL);
- if (IS_ERR(data->clk) && PTR_ERR(data->clk) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
- if (!IS_ERR_OR_NULL(data->clk)) {
- err = clk_prepare_enable(data->clk);
- if (err)
- dev_warn(dev, "could not enable optional baudclk: %d\n",
- err);
- else
- p->uartclk = clk_get_rate(data->clk);
- }
+ data->clk = devm_clk_get_optional(dev, "baudclk");
+ if (data->clk == NULL)
+ data->clk = devm_clk_get_optional(dev, NULL);
+ if (IS_ERR(data->clk))
+ return PTR_ERR(data->clk);
+
+ err = clk_prepare_enable(data->clk);
+ if (err)
+ dev_warn(dev, "could not enable optional baudclk: %d\n", err);
+
+ if (data->clk)
+ p->uartclk = clk_get_rate(data->clk);
/* If no clock rate is defined, fail. */
if (!p->uartclk) {
@@ -493,17 +489,16 @@ static int dw8250_probe(struct platform_device *pdev)
goto err_clk;
}
- data->pclk = devm_clk_get(dev, "apb_pclk");
- if (IS_ERR(data->pclk) && PTR_ERR(data->pclk) == -EPROBE_DEFER) {
- err = -EPROBE_DEFER;
+ data->pclk = devm_clk_get_optional(dev, "apb_pclk");
+ if (IS_ERR(data->pclk)) {
+ err = PTR_ERR(data->pclk);
goto err_clk;
}
- if (!IS_ERR(data->pclk)) {
- err = clk_prepare_enable(data->pclk);
- if (err) {
- dev_err(dev, "could not enable apb_pclk\n");
- goto err_clk;
- }
+
+ err = clk_prepare_enable(data->pclk);
+ if (err) {
+ dev_err(dev, "could not enable apb_pclk\n");
+ goto err_clk;
}
data->rst = devm_reset_control_get_optional_exclusive(dev, NULL);
@@ -546,12 +541,10 @@ err_reset:
reset_control_assert(data->rst);
err_pclk:
- if (!IS_ERR(data->pclk))
- clk_disable_unprepare(data->pclk);
+ clk_disable_unprepare(data->pclk);
err_clk:
- if (!IS_ERR(data->clk))
- clk_disable_unprepare(data->clk);
+ clk_disable_unprepare(data->clk);
return err;
}
@@ -567,11 +560,9 @@ static int dw8250_remove(struct platform_device *pdev)
reset_control_assert(data->rst);
- if (!IS_ERR(data->pclk))
- clk_disable_unprepare(data->pclk);
+ clk_disable_unprepare(data->pclk);
- if (!IS_ERR(data->clk))
- clk_disable_unprepare(data->clk);
+ clk_disable_unprepare(data->clk);
pm_runtime_disable(dev);
pm_runtime_put_noidle(dev);
@@ -604,11 +595,9 @@ static int dw8250_runtime_suspend(struct device *dev)
{
struct dw8250_data *data = dev_get_drvdata(dev);
- if (!IS_ERR(data->clk))
- clk_disable_unprepare(data->clk);
+ clk_disable_unprepare(data->clk);
- if (!IS_ERR(data->pclk))
- clk_disable_unprepare(data->pclk);
+ clk_disable_unprepare(data->pclk);
return 0;
}
@@ -617,11 +606,9 @@ static int dw8250_runtime_resume(struct device *dev)
{
struct dw8250_data *data = dev_get_drvdata(dev);
- if (!IS_ERR(data->pclk))
- clk_prepare_enable(data->pclk);
+ clk_prepare_enable(data->pclk);
- if (!IS_ERR(data->clk))
- clk_prepare_enable(data->clk);
+ clk_prepare_enable(data->clk);
return 0;
}
diff --git a/drivers/tty/serial/8250/8250_exar.c b/drivers/tty/serial/8250/8250_exar.c
index 597eb9d16f21..108cd55f9c4d 100644
--- a/drivers/tty/serial/8250/8250_exar.c
+++ b/drivers/tty/serial/8250/8250_exar.c
@@ -166,6 +166,23 @@ static void xr17v35x_set_divisor(struct uart_port *p, unsigned int baud,
serial_port_out(p, 0x2, quot_frac);
}
+static int xr17v35x_startup(struct uart_port *port)
+{
+ /*
+ * First enable access to IER [7:5], ISR [5:4], FCR [5:4],
+ * MCR [7:5] and MSR [7:0]
+ */
+ serial_port_out(port, UART_XR_EFR, UART_EFR_ECB);
+
+ /*
+ * Make sure all interrups are masked until initialization is
+ * complete and the FIFOs are cleared
+ */
+ serial_port_out(port, UART_IER, 0);
+
+ return serial8250_do_startup(port);
+}
+
static void exar_shutdown(struct uart_port *port)
{
unsigned char lsr;
@@ -212,6 +229,8 @@ static int default_setup(struct exar8250 *priv, struct pci_dev *pcidev,
port->port.get_divisor = xr17v35x_get_divisor;
port->port.set_divisor = xr17v35x_set_divisor;
+
+ port->port.startup = xr17v35x_startup;
} else {
port->port.type = PORT_XR17D15X;
}
diff --git a/drivers/tty/serial/8250/8250_lpss.c b/drivers/tty/serial/8250/8250_lpss.c
index 5f72ef3ea574..60eff3240c8a 100644
--- a/drivers/tty/serial/8250/8250_lpss.c
+++ b/drivers/tty/serial/8250/8250_lpss.c
@@ -221,17 +221,6 @@ static void qrk_serial_exit_dma(struct lpss8250 *lpss) {}
static int qrk_serial_setup(struct lpss8250 *lpss, struct uart_port *port)
{
- struct pci_dev *pdev = to_pci_dev(port->dev);
- int ret;
-
- pci_set_master(pdev);
-
- ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
- if (ret < 0)
- return ret;
-
- port->irq = pci_irq_vector(pdev, 0);
-
qrk_serial_setup_dma(lpss, port);
return 0;
}
@@ -293,16 +282,22 @@ static int lpss8250_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (ret)
return ret;
+ pci_set_master(pdev);
+
lpss = devm_kzalloc(&pdev->dev, sizeof(*lpss), GFP_KERNEL);
if (!lpss)
return -ENOMEM;
+ ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
+ if (ret < 0)
+ return ret;
+
lpss->board = (struct lpss8250_board *)id->driver_data;
memset(&uart, 0, sizeof(struct uart_8250_port));
uart.port.dev = &pdev->dev;
- uart.port.irq = pdev->irq;
+ uart.port.irq = pci_irq_vector(pdev, 0);
uart.port.private_data = &lpss->data;
uart.port.type = PORT_16550A;
uart.port.iotype = UPIO_MEM;
@@ -337,6 +332,7 @@ static int lpss8250_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err_exit:
if (lpss->board->exit)
lpss->board->exit(lpss);
+ pci_free_irq_vectors(pdev);
return ret;
}
@@ -348,6 +344,7 @@ static void lpss8250_remove(struct pci_dev *pdev)
if (lpss->board->exit)
lpss->board->exit(lpss);
+ pci_free_irq_vectors(pdev);
}
static const struct lpss8250_board byt_board = {
diff --git a/drivers/tty/serial/8250/8250_men_mcb.c b/drivers/tty/serial/8250/8250_men_mcb.c
index 8df89e9cd254..e985f344b2dd 100644
--- a/drivers/tty/serial/8250/8250_men_mcb.c
+++ b/drivers/tty/serial/8250/8250_men_mcb.c
@@ -174,3 +174,4 @@ MODULE_AUTHOR("Michael Moese <michael.moese@men.de");
MODULE_ALIAS("mcb:16z125");
MODULE_ALIAS("mcb:16z025");
MODULE_ALIAS("mcb:16z057");
+MODULE_IMPORT_NS(MCB);
diff --git a/drivers/tty/serial/8250/8250_mtk.c b/drivers/tty/serial/8250/8250_mtk.c
index b411ba4eb5e9..4d067f515f74 100644
--- a/drivers/tty/serial/8250/8250_mtk.c
+++ b/drivers/tty/serial/8250/8250_mtk.c
@@ -544,7 +544,7 @@ static int mtk8250_probe(struct platform_device *pdev)
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
- data->rx_wakeup_irq = platform_get_irq(pdev, 1);
+ data->rx_wakeup_irq = platform_get_irq_optional(pdev, 1);
return 0;
}
diff --git a/drivers/tty/serial/8250/8250_of.c b/drivers/tty/serial/8250/8250_of.c
index 0826cfdbd406..92fbf46ce3bd 100644
--- a/drivers/tty/serial/8250/8250_of.c
+++ b/drivers/tty/serial/8250/8250_of.c
@@ -48,6 +48,36 @@ static inline void tegra_serial_handle_break(struct uart_port *port)
}
#endif
+static int of_8250_rs485_config(struct uart_port *port,
+ struct serial_rs485 *rs485)
+{
+ struct uart_8250_port *up = up_to_u8250p(port);
+
+ /* Clamp the delays to [0, 100ms] */
+ rs485->delay_rts_before_send = min(rs485->delay_rts_before_send, 100U);
+ rs485->delay_rts_after_send = min(rs485->delay_rts_after_send, 100U);
+
+ port->rs485 = *rs485;
+
+ /*
+ * Both serial8250_em485_init and serial8250_em485_destroy
+ * are idempotent
+ */
+ if (rs485->flags & SER_RS485_ENABLED) {
+ int ret = serial8250_em485_init(up);
+
+ if (ret) {
+ rs485->flags &= ~SER_RS485_ENABLED;
+ port->rs485.flags &= ~SER_RS485_ENABLED;
+ }
+ return ret;
+ }
+
+ serial8250_em485_destroy(up);
+
+ return 0;
+}
+
/*
* Fill a struct uart_port for a given device node
*/
@@ -178,6 +208,7 @@ static int of_platform_serial_setup(struct platform_device *ofdev,
port->flags |= UPF_SKIP_TEST;
port->dev = &ofdev->dev;
+ port->rs485_config = of_8250_rs485_config;
switch (type) {
case PORT_TEGRA:
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index 6adbadd6a56a..022924d5ad54 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -48,8 +48,6 @@ struct f815xxa_data {
int idx;
};
-#define PCI_NUM_BAR_RESOURCES 6
-
struct serial_private {
struct pci_dev *dev;
unsigned int nr;
@@ -89,7 +87,7 @@ setup_port(struct serial_private *priv, struct uart_8250_port *port,
{
struct pci_dev *dev = priv->dev;
- if (bar >= PCI_NUM_BAR_RESOURCES)
+ if (bar >= PCI_STD_NUM_BARS)
return -EINVAL;
if (pci_resource_flags(dev, bar) & IORESOURCE_MEM) {
@@ -745,16 +743,8 @@ static int pci_ni8430_init(struct pci_dev *dev)
}
/* UART Port Control Register */
-#define NI16550_PCR_OFFSET 0x0f
-#define NI16550_PCR_RS422 0x00
-#define NI16550_PCR_ECHO_RS485 0x01
-#define NI16550_PCR_DTR_RS485 0x02
-#define NI16550_PCR_AUTO_RS485 0x03
-#define NI16550_PCR_WIRE_MODE_MASK 0x03
-#define NI16550_PCR_TXVR_ENABLE_BIT BIT(3)
-#define NI16550_PCR_RS485_TERMINATION_BIT BIT(6)
-#define NI16550_ACR_DTR_AUTO_DTR (0x2 << 3)
-#define NI16550_ACR_DTR_MANUAL_DTR (0x0 << 3)
+#define NI8430_PORTCON 0x0f
+#define NI8430_PORTCON_TXVR_ENABLE (1 << 3)
static int
pci_ni8430_setup(struct serial_private *priv,
@@ -776,117 +766,14 @@ pci_ni8430_setup(struct serial_private *priv,
return -ENOMEM;
/* enable the transceiver */
- writeb(readb(p + offset + NI16550_PCR_OFFSET) | NI16550_PCR_TXVR_ENABLE_BIT,
- p + offset + NI16550_PCR_OFFSET);
+ writeb(readb(p + offset + NI8430_PORTCON) | NI8430_PORTCON_TXVR_ENABLE,
+ p + offset + NI8430_PORTCON);
iounmap(p);
return setup_port(priv, port, bar, offset, board->reg_shift);
}
-static int pci_ni8431_config_rs485(struct uart_port *port,
- struct serial_rs485 *rs485)
-{
- u8 pcr, acr;
- struct uart_8250_port *up;
-
- up = container_of(port, struct uart_8250_port, port);
- acr = up->acr;
- pcr = port->serial_in(port, NI16550_PCR_OFFSET);
- pcr &= ~NI16550_PCR_WIRE_MODE_MASK;
-
- if (rs485->flags & SER_RS485_ENABLED) {
- /* RS-485 */
- if ((rs485->flags & SER_RS485_RX_DURING_TX) &&
- (rs485->flags & SER_RS485_RTS_ON_SEND)) {
- dev_dbg(port->dev, "Invalid 2-wire mode\n");
- return -EINVAL;
- }
-
- if (rs485->flags & SER_RS485_RX_DURING_TX) {
- /* Echo */
- dev_vdbg(port->dev, "2-wire DTR with echo\n");
- pcr |= NI16550_PCR_ECHO_RS485;
- acr |= NI16550_ACR_DTR_MANUAL_DTR;
- } else {
- /* Auto or DTR */
- if (rs485->flags & SER_RS485_RTS_ON_SEND) {
- /* Auto */
- dev_vdbg(port->dev, "2-wire Auto\n");
- pcr |= NI16550_PCR_AUTO_RS485;
- acr |= NI16550_ACR_DTR_AUTO_DTR;
- } else {
- /* DTR-controlled */
- /* No Echo */
- dev_vdbg(port->dev, "2-wire DTR no echo\n");
- pcr |= NI16550_PCR_DTR_RS485;
- acr |= NI16550_ACR_DTR_MANUAL_DTR;
- }
- }
- } else {
- /* RS-422 */
- dev_vdbg(port->dev, "4-wire\n");
- pcr |= NI16550_PCR_RS422;
- acr |= NI16550_ACR_DTR_MANUAL_DTR;
- }
-
- dev_dbg(port->dev, "write pcr: 0x%08x\n", pcr);
- port->serial_out(port, NI16550_PCR_OFFSET, pcr);
-
- up->acr = acr;
- port->serial_out(port, UART_SCR, UART_ACR);
- port->serial_out(port, UART_ICR, up->acr);
-
- /* Update the cache. */
- port->rs485 = *rs485;
-
- return 0;
-}
-
-static int pci_ni8431_setup(struct serial_private *priv,
- const struct pciserial_board *board,
- struct uart_8250_port *uart, int idx)
-{
- u8 pcr, acr;
- struct pci_dev *dev = priv->dev;
- void __iomem *addr;
- unsigned int bar, offset = board->first_offset;
-
- if (idx >= board->num_ports)
- return 1;
-
- bar = FL_GET_BASE(board->flags);
- offset += idx * board->uart_offset;
-
- addr = pci_ioremap_bar(dev, bar);
- if (!addr)
- return -ENOMEM;
-
- /* enable the transceiver */
- writeb(readb(addr + NI16550_PCR_OFFSET) | NI16550_PCR_TXVR_ENABLE_BIT,
- addr + NI16550_PCR_OFFSET);
-
- pcr = readb(addr + NI16550_PCR_OFFSET);
- pcr &= ~NI16550_PCR_WIRE_MODE_MASK;
-
- /* set wire mode to default RS-422 */
- pcr |= NI16550_PCR_RS422;
- acr = NI16550_ACR_DTR_MANUAL_DTR;
-
- /* write port configuration to register */
- writeb(pcr, addr + NI16550_PCR_OFFSET);
-
- /* access and write to UART acr register */
- writeb(UART_ACR, addr + UART_SCR);
- writeb(acr, addr + UART_ICR);
-
- uart->port.rs485_config = &pci_ni8431_config_rs485;
-
- iounmap(addr);
-
- return setup_port(priv, uart, bar, offset, board->reg_shift);
-}
-
static int pci_netmos_9900_setup(struct serial_private *priv,
const struct pciserial_board *board,
struct uart_8250_port *port, int idx)
@@ -2023,15 +1910,6 @@ pci_moxa_setup(struct serial_private *priv,
#define PCI_DEVICE_ID_ACCESIO_PCIE_COM_8SM 0x10E9
#define PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4SM 0x11D8
-#define PCIE_DEVICE_ID_NI_PXIE8430_2328 0x74C2
-#define PCIE_DEVICE_ID_NI_PXIE8430_23216 0x74C1
-#define PCI_DEVICE_ID_NI_PXI8431_4852 0x7081
-#define PCI_DEVICE_ID_NI_PXI8431_4854 0x70DE
-#define PCI_DEVICE_ID_NI_PXI8431_4858 0x70E3
-#define PCI_DEVICE_ID_NI_PXI8433_4852 0x70E9
-#define PCI_DEVICE_ID_NI_PXI8433_4854 0x70ED
-#define PCIE_DEVICE_ID_NI_PXIE8431_4858 0x74C4
-#define PCIE_DEVICE_ID_NI_PXIE8431_48516 0x74C3
#define PCI_DEVICE_ID_MOXA_CP102E 0x1024
#define PCI_DEVICE_ID_MOXA_CP102EL 0x1025
@@ -2269,87 +2147,6 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
.setup = pci_ni8430_setup,
.exit = pci_ni8430_exit,
},
- {
- .vendor = PCI_VENDOR_ID_NI,
- .device = PCIE_DEVICE_ID_NI_PXIE8430_2328,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .init = pci_ni8430_init,
- .setup = pci_ni8430_setup,
- .exit = pci_ni8430_exit,
- },
- {
- .vendor = PCI_VENDOR_ID_NI,
- .device = PCIE_DEVICE_ID_NI_PXIE8430_23216,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .init = pci_ni8430_init,
- .setup = pci_ni8430_setup,
- .exit = pci_ni8430_exit,
- },
- {
- .vendor = PCI_VENDOR_ID_NI,
- .device = PCI_DEVICE_ID_NI_PXI8431_4852,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .init = pci_ni8430_init,
- .setup = pci_ni8431_setup,
- .exit = pci_ni8430_exit,
- },
- {
- .vendor = PCI_VENDOR_ID_NI,
- .device = PCI_DEVICE_ID_NI_PXI8431_4854,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .init = pci_ni8430_init,
- .setup = pci_ni8431_setup,
- .exit = pci_ni8430_exit,
- },
- {
- .vendor = PCI_VENDOR_ID_NI,
- .device = PCI_DEVICE_ID_NI_PXI8431_4858,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .init = pci_ni8430_init,
- .setup = pci_ni8431_setup,
- .exit = pci_ni8430_exit,
- },
- {
- .vendor = PCI_VENDOR_ID_NI,
- .device = PCI_DEVICE_ID_NI_PXI8433_4852,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .init = pci_ni8430_init,
- .setup = pci_ni8431_setup,
- .exit = pci_ni8430_exit,
- },
- {
- .vendor = PCI_VENDOR_ID_NI,
- .device = PCI_DEVICE_ID_NI_PXI8433_4854,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .init = pci_ni8430_init,
- .setup = pci_ni8431_setup,
- .exit = pci_ni8430_exit,
- },
- {
- .vendor = PCI_VENDOR_ID_NI,
- .device = PCIE_DEVICE_ID_NI_PXIE8431_4858,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .init = pci_ni8430_init,
- .setup = pci_ni8431_setup,
- .exit = pci_ni8430_exit,
- },
- {
- .vendor = PCI_VENDOR_ID_NI,
- .device = PCIE_DEVICE_ID_NI_PXIE8431_48516,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .init = pci_ni8430_init,
- .setup = pci_ni8431_setup,
- .exit = pci_ni8430_exit,
- },
/* Quatech */
{
.vendor = PCI_VENDOR_ID_QUATECH,
@@ -3106,13 +2903,6 @@ enum pci_board_num_t {
pbn_ni8430_4,
pbn_ni8430_8,
pbn_ni8430_16,
- pbn_ni8430_pxie_8,
- pbn_ni8430_pxie_16,
- pbn_ni8431_2,
- pbn_ni8431_4,
- pbn_ni8431_8,
- pbn_ni8431_pxie_8,
- pbn_ni8431_pxie_16,
pbn_ADDIDATA_PCIe_1_3906250,
pbn_ADDIDATA_PCIe_2_3906250,
pbn_ADDIDATA_PCIe_4_3906250,
@@ -3765,55 +3555,6 @@ static struct pciserial_board pci_boards[] = {
.uart_offset = 0x10,
.first_offset = 0x800,
},
- [pbn_ni8430_pxie_16] = {
- .flags = FL_BASE0,
- .num_ports = 16,
- .base_baud = 3125000,
- .uart_offset = 0x10,
- .first_offset = 0x800,
- },
- [pbn_ni8430_pxie_8] = {
- .flags = FL_BASE0,
- .num_ports = 8,
- .base_baud = 3125000,
- .uart_offset = 0x10,
- .first_offset = 0x800,
- },
- [pbn_ni8431_8] = {
- .flags = FL_BASE0,
- .num_ports = 8,
- .base_baud = 3686400,
- .uart_offset = 0x10,
- .first_offset = 0x800,
- },
- [pbn_ni8431_4] = {
- .flags = FL_BASE0,
- .num_ports = 4,
- .base_baud = 3686400,
- .uart_offset = 0x10,
- .first_offset = 0x800,
- },
- [pbn_ni8431_2] = {
- .flags = FL_BASE0,
- .num_ports = 2,
- .base_baud = 3686400,
- .uart_offset = 0x10,
- .first_offset = 0x800,
- },
- [pbn_ni8431_pxie_16] = {
- .flags = FL_BASE0,
- .num_ports = 16,
- .base_baud = 3125000,
- .uart_offset = 0x10,
- .first_offset = 0x800,
- },
- [pbn_ni8431_pxie_8] = {
- .flags = FL_BASE0,
- .num_ports = 8,
- .base_baud = 3125000,
- .uart_offset = 0x10,
- .first_offset = 0x800,
- },
/*
* ADDI-DATA GmbH PCI-Express communication cards <info@addi-data.com>
*/
@@ -4060,7 +3801,7 @@ serial_pci_guess_board(struct pci_dev *dev, struct pciserial_board *board)
return -ENODEV;
num_iomem = num_port = 0;
- for (i = 0; i < PCI_NUM_BAR_RESOURCES; i++) {
+ for (i = 0; i < PCI_STD_NUM_BARS; i++) {
if (pci_resource_flags(dev, i) & IORESOURCE_IO) {
num_port++;
if (first_port == -1)
@@ -4088,7 +3829,7 @@ serial_pci_guess_board(struct pci_dev *dev, struct pciserial_board *board)
*/
first_port = -1;
num_port = 0;
- for (i = 0; i < PCI_NUM_BAR_RESOURCES; i++) {
+ for (i = 0; i < PCI_STD_NUM_BARS; i++) {
if (pci_resource_flags(dev, i) & IORESOURCE_IO &&
pci_resource_len(dev, i) == 8 &&
(first_port == -1 || (first_port + num_port) == i)) {
@@ -5567,33 +5308,6 @@ static const struct pci_device_id serial_pci_tbl[] = {
{ PCI_VENDOR_ID_NI, PCI_DEVICE_ID_NI_PCI8432_2324,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
pbn_ni8430_4 },
- { PCI_VENDOR_ID_NI, PCIE_DEVICE_ID_NI_PXIE8430_2328,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_ni8430_pxie_8 },
- { PCI_VENDOR_ID_NI, PCIE_DEVICE_ID_NI_PXIE8430_23216,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_ni8430_pxie_16 },
- { PCI_VENDOR_ID_NI, PCI_DEVICE_ID_NI_PXI8431_4852,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_ni8431_2 },
- { PCI_VENDOR_ID_NI, PCI_DEVICE_ID_NI_PXI8431_4854,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_ni8431_4 },
- { PCI_VENDOR_ID_NI, PCI_DEVICE_ID_NI_PXI8431_4858,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_ni8431_8 },
- { PCI_VENDOR_ID_NI, PCIE_DEVICE_ID_NI_PXIE8431_4858,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_ni8431_pxie_8 },
- { PCI_VENDOR_ID_NI, PCIE_DEVICE_ID_NI_PXIE8431_48516,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_ni8431_pxie_16 },
- { PCI_VENDOR_ID_NI, PCI_DEVICE_ID_NI_PXI8433_4852,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_ni8431_2 },
- { PCI_VENDOR_ID_NI, PCI_DEVICE_ID_NI_PXI8433_4854,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_ni8431_4 },
/*
* MOXA
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index 8407166610ce..90655910b0c7 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -2114,20 +2114,6 @@ int serial8250_do_startup(struct uart_port *port)
enable_rsa(up);
#endif
- if (port->type == PORT_XR17V35X) {
- /*
- * First enable access to IER [7:5], ISR [5:4], FCR [5:4],
- * MCR [7:5] and MSR [7:0]
- */
- serial_port_out(port, UART_XR_EFR, UART_EFR_ECB);
-
- /*
- * Make sure all interrups are masked until initialization is
- * complete and the FIFOs are cleared
- */
- serial_port_out(port, UART_IER, 0);
- }
-
/*
* Clear the FIFO buffers and disable them.
* (they will be reenabled in set_termios())
diff --git a/drivers/tty/serial/8250/Kconfig b/drivers/tty/serial/8250/Kconfig
index 7ef60f8b6e2c..fab3d4f20667 100644
--- a/drivers/tty/serial/8250/Kconfig
+++ b/drivers/tty/serial/8250/Kconfig
@@ -243,6 +243,7 @@ config SERIAL_8250_ASPEED_VUART
tristate "Aspeed Virtual UART"
depends on SERIAL_8250
depends on OF
+ depends on REGMAP && MFD_SYSCON
help
If you want to use the virtual UART (VUART) device on Aspeed
BMC platforms, enable this option. This enables the 16550A-
@@ -334,7 +335,7 @@ config SERIAL_8250_BCM2835AUX
Features and limitations of the UART are
Registers are similar to 16650 registers,
- set bits in the control registers that are unsupported
+ set bits in the control registers that are unsupported
are ignored and read back as 0
7/8 bit operation with 1 start and 1 stop bit
8 symbols deep fifo for rx and tx
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index 67a9eb3f94ce..99f5da3bf913 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -88,7 +88,7 @@ config SERIAL_EARLYCON_ARM_SEMIHOST
config SERIAL_EARLYCON_RISCV_SBI
bool "Early console using RISC-V SBI"
- depends on RISCV
+ depends on RISCV_SBI
select SERIAL_CORE
select SERIAL_CORE_CONSOLE
select SERIAL_EARLYCON
@@ -287,26 +287,26 @@ config SERIAL_SAMSUNG_CONSOLE
boot time.)
config SERIAL_SIRFSOC
- tristate "SiRF SoC Platform Serial port support"
- depends on ARCH_SIRF
- select SERIAL_CORE
- help
- Support for the on-chip UART on the CSR SiRFprimaII series,
- providing /dev/ttySiRF0, 1 and 2 (note, some machines may not
- provide all of these ports, depending on how the serial port
- pins are configured).
+ tristate "SiRF SoC Platform Serial port support"
+ depends on ARCH_SIRF
+ select SERIAL_CORE
+ help
+ Support for the on-chip UART on the CSR SiRFprimaII series,
+ providing /dev/ttySiRF0, 1 and 2 (note, some machines may not
+ provide all of these ports, depending on how the serial port
+ pins are configured).
config SERIAL_SIRFSOC_CONSOLE
- bool "Support for console on SiRF SoC serial port"
- depends on SERIAL_SIRFSOC=y
- select SERIAL_CORE_CONSOLE
- help
- Even if you say Y here, the currently visible virtual console
- (/dev/tty0) will still be used as the system console by default, but
- you can alter that using a kernel command line option such as
- "console=ttySiRFx". (Try "man bootparam" or see the documentation of
- your boot loader about how to pass options to the kernel at
- boot time.)
+ bool "Support for console on SiRF SoC serial port"
+ depends on SERIAL_SIRFSOC=y
+ select SERIAL_CORE_CONSOLE
+ help
+ Even if you say Y here, the currently visible virtual console
+ (/dev/tty0) will still be used as the system console by default, but
+ you can alter that using a kernel command line option such as
+ "console=ttySiRFx". (Try "man bootparam" or see the documentation of
+ your boot loader about how to pass options to the kernel at
+ boot time.)
config SERIAL_TEGRA
tristate "NVIDIA Tegra20/30 SoC serial controller"
@@ -1078,41 +1078,41 @@ config SERIAL_SCCNXP_CONSOLE
Support for console on SCCNXP serial ports.
config SERIAL_SC16IS7XX_CORE
- tristate
+ tristate
config SERIAL_SC16IS7XX
- tristate "SC16IS7xx serial support"
- select SERIAL_CORE
- depends on (SPI_MASTER && !I2C) || I2C
- help
- This selects support for SC16IS7xx serial ports.
- Supported ICs are SC16IS740, SC16IS741, SC16IS750, SC16IS752,
- SC16IS760 and SC16IS762. Select supported buses using options below.
+ tristate "SC16IS7xx serial support"
+ select SERIAL_CORE
+ depends on (SPI_MASTER && !I2C) || I2C
+ help
+ This selects support for SC16IS7xx serial ports.
+ Supported ICs are SC16IS740, SC16IS741, SC16IS750, SC16IS752,
+ SC16IS760 and SC16IS762. Select supported buses using options below.
config SERIAL_SC16IS7XX_I2C
- bool "SC16IS7xx for I2C interface"
- depends on SERIAL_SC16IS7XX
- depends on I2C
- select SERIAL_SC16IS7XX_CORE if SERIAL_SC16IS7XX
- select REGMAP_I2C if I2C
- default y
- help
- Enable SC16IS7xx driver on I2C bus,
- If required say y, and say n to i2c if not required,
- Enabled by default to support oldconfig.
- You must select at least one bus for the driver to be built.
+ bool "SC16IS7xx for I2C interface"
+ depends on SERIAL_SC16IS7XX
+ depends on I2C
+ select SERIAL_SC16IS7XX_CORE if SERIAL_SC16IS7XX
+ select REGMAP_I2C if I2C
+ default y
+ help
+ Enable SC16IS7xx driver on I2C bus,
+ If required say y, and say n to i2c if not required,
+ Enabled by default to support oldconfig.
+ You must select at least one bus for the driver to be built.
config SERIAL_SC16IS7XX_SPI
- bool "SC16IS7xx for spi interface"
- depends on SERIAL_SC16IS7XX
- depends on SPI_MASTER
- select SERIAL_SC16IS7XX_CORE if SERIAL_SC16IS7XX
- select REGMAP_SPI if SPI_MASTER
- help
- Enable SC16IS7xx driver on SPI bus,
- If required say y, and say n to spi if not required,
- This is additional support to exsisting driver.
- You must select at least one bus for the driver to be built.
+ bool "SC16IS7xx for spi interface"
+ depends on SERIAL_SC16IS7XX
+ depends on SPI_MASTER
+ select SERIAL_SC16IS7XX_CORE if SERIAL_SC16IS7XX
+ select REGMAP_SPI if SPI_MASTER
+ help
+ Enable SC16IS7xx driver on SPI bus,
+ If required say y, and say n to spi if not required,
+ This is additional support to exsisting driver.
+ You must select at least one bus for the driver to be built.
config SERIAL_TIMBERDALE
tristate "Support for timberdale UART"
@@ -1212,7 +1212,7 @@ config SERIAL_ALTERA_UART_CONSOLE
Enable a Altera UART port to be the system console.
config SERIAL_IFX6X60
- tristate "SPI protocol driver for Infineon 6x60 modem (EXPERIMENTAL)"
+ tristate "SPI protocol driver for Infineon 6x60 modem (EXPERIMENTAL)"
depends on GPIOLIB || COMPILE_TEST
depends on SPI && HAS_DMA
help
@@ -1392,19 +1392,19 @@ config SERIAL_FSL_LPUART_CONSOLE
you can make it the console by answering Y to this option.
config SERIAL_FSL_LINFLEXUART
- tristate "Freescale linflexuart serial port support"
+ tristate "Freescale LINFlexD UART serial port support"
depends on PRINTK
select SERIAL_CORE
help
- Support for the on-chip linflexuart on some Freescale SOCs.
+ Support for the on-chip LINFlexD UART on some Freescale SOCs.
config SERIAL_FSL_LINFLEXUART_CONSOLE
- bool "Console on Freescale linflexuart serial port"
+ bool "Console on Freescale LINFlexD UART serial port"
depends on SERIAL_FSL_LINFLEXUART=y
select SERIAL_CORE_CONSOLE
select SERIAL_EARLYCON
help
- If you have enabled the linflexuart serial port on the Freescale
+ If you have enabled the LINFlexD UART serial port on the Freescale
SoCs, you can make it the console by answering Y to this option.
config SERIAL_CONEXANT_DIGICOLOR
diff --git a/drivers/tty/serial/Makefile b/drivers/tty/serial/Makefile
index 863f47056539..d056ee6cca33 100644
--- a/drivers/tty/serial/Makefile
+++ b/drivers/tty/serial/Makefile
@@ -30,7 +30,7 @@ obj-$(CONFIG_SERIAL_PXA_NON8250) += pxa.o
obj-$(CONFIG_SERIAL_PNX8XXX) += pnx8xxx_uart.o
obj-$(CONFIG_SERIAL_SA1100) += sa1100.o
obj-$(CONFIG_SERIAL_BCM63XX) += bcm63xx_uart.o
-obj-$(CONFIG_SERIAL_SAMSUNG) += samsung.o
+obj-$(CONFIG_SERIAL_SAMSUNG) += samsung_tty.o
obj-$(CONFIG_SERIAL_MAX3100) += max3100.o
obj-$(CONFIG_SERIAL_MAX310X) += max310x.o
obj-$(CONFIG_SERIAL_IP22_ZILOG) += ip22zilog.o
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index 3a7d1a66f79c..4b28134d596a 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -414,7 +414,7 @@ static void pl011_dma_probe(struct uart_amba_port *uap)
dma_cap_mask_t mask;
uap->dma_probed = true;
- chan = dma_request_slave_channel_reason(dev, "tx");
+ chan = dma_request_chan(dev, "tx");
if (IS_ERR(chan)) {
if (PTR_ERR(chan) == -EPROBE_DEFER) {
uap->dma_probed = false;
@@ -813,10 +813,8 @@ __acquires(&uap->port.lock)
if (!uap->using_tx_dma)
return;
- /* Avoid deadlock with the DMA engine callback */
- spin_unlock(&uap->port.lock);
- dmaengine_terminate_all(uap->dmatx.chan);
- spin_lock(&uap->port.lock);
+ dmaengine_terminate_async(uap->dmatx.chan);
+
if (uap->dmatx.queued) {
dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1,
DMA_TO_DEVICE);
@@ -1236,10 +1234,6 @@ static inline bool pl011_dma_rx_running(struct uart_amba_port *uap)
#else
/* Blank functions if the DMA engine is not available */
-static inline void pl011_dma_probe(struct uart_amba_port *uap)
-{
-}
-
static inline void pl011_dma_remove(struct uart_amba_port *uap)
{
}
diff --git a/drivers/tty/serial/fsl_linflexuart.c b/drivers/tty/serial/fsl_linflexuart.c
index a32f0d2afd59..205c31a61684 100644
--- a/drivers/tty/serial/fsl_linflexuart.c
+++ b/drivers/tty/serial/fsl_linflexuart.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * Freescale linflexuart serial port driver
+ * Freescale LINFlexD UART serial port driver
*
* Copyright 2012-2016 Freescale Semiconductor, Inc.
* Copyright 2017-2019 NXP
@@ -940,5 +940,5 @@ static void __exit linflex_serial_exit(void)
module_init(linflex_serial_init);
module_exit(linflex_serial_exit);
-MODULE_DESCRIPTION("Freescale linflex serial port driver");
+MODULE_DESCRIPTION("Freescale LINFlexD serial port driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index 537896c4d887..4e128d19e0ad 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -437,8 +437,8 @@ static void lpuart_dma_tx(struct lpuart_port *sport)
}
sport->dma_tx_desc = dmaengine_prep_slave_sg(sport->dma_tx_chan, sgl,
- sport->dma_tx_nents,
- DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT);
+ ret, DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT);
if (!sport->dma_tx_desc) {
dma_unmap_sg(dev, sgl, sport->dma_tx_nents, DMA_TO_DEVICE);
dev_err(dev, "Cannot prepare TX slave DMA!\n");
@@ -1280,6 +1280,57 @@ static int lpuart_config_rs485(struct uart_port *port,
return 0;
}
+static int lpuart32_config_rs485(struct uart_port *port,
+ struct serial_rs485 *rs485)
+{
+ struct lpuart_port *sport = container_of(port,
+ struct lpuart_port, port);
+
+ unsigned long modem = lpuart32_read(&sport->port, UARTMODIR)
+ & ~(UARTMODEM_TXRTSPOL | UARTMODEM_TXRTSE);
+ lpuart32_write(&sport->port, modem, UARTMODIR);
+
+ /* clear unsupported configurations */
+ rs485->delay_rts_before_send = 0;
+ rs485->delay_rts_after_send = 0;
+ rs485->flags &= ~SER_RS485_RX_DURING_TX;
+
+ if (rs485->flags & SER_RS485_ENABLED) {
+ /* Enable auto RS-485 RTS mode */
+ modem |= UARTMODEM_TXRTSE;
+
+ /*
+ * RTS needs to be logic HIGH either during transer _or_ after
+ * transfer, other variants are not supported by the hardware.
+ */
+
+ if (!(rs485->flags & (SER_RS485_RTS_ON_SEND |
+ SER_RS485_RTS_AFTER_SEND)))
+ rs485->flags |= SER_RS485_RTS_ON_SEND;
+
+ if (rs485->flags & SER_RS485_RTS_ON_SEND &&
+ rs485->flags & SER_RS485_RTS_AFTER_SEND)
+ rs485->flags &= ~SER_RS485_RTS_AFTER_SEND;
+
+ /*
+ * The hardware defaults to RTS logic HIGH while transfer.
+ * Switch polarity in case RTS shall be logic HIGH
+ * after transfer.
+ * Note: UART is assumed to be active high.
+ */
+ if (rs485->flags & SER_RS485_RTS_ON_SEND)
+ modem &= ~UARTMODEM_TXRTSPOL;
+ else if (rs485->flags & SER_RS485_RTS_AFTER_SEND)
+ modem |= UARTMODEM_TXRTSPOL;
+ }
+
+ /* Store the new configuration */
+ sport->port.rs485 = *rs485;
+
+ lpuart32_write(&sport->port, modem, UARTMODIR);
+ return 0;
+}
+
static unsigned int lpuart_get_mctrl(struct uart_port *port)
{
unsigned int temp = 0;
@@ -1333,18 +1384,7 @@ static void lpuart_set_mctrl(struct uart_port *port, unsigned int mctrl)
static void lpuart32_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
- unsigned long temp;
-
- temp = lpuart32_read(port, UARTMODIR) &
- ~(UARTMODIR_RXRTSE | UARTMODIR_TXCTSE);
-
- if (mctrl & TIOCM_RTS)
- temp |= UARTMODIR_RXRTSE;
-
- if (mctrl & TIOCM_CTS)
- temp |= UARTMODIR_TXCTSE;
- lpuart32_write(port, temp, UARTMODIR);
}
static void lpuart_break_ctl(struct uart_port *port, int break_state)
@@ -1889,11 +1929,18 @@ lpuart32_set_termios(struct uart_port *port, struct ktermios *termios,
ctrl |= UARTCTRL_M;
}
+ /*
+ * When auto RS-485 RTS mode is enabled,
+ * hardware flow control need to be disabled.
+ */
+ if (sport->port.rs485.flags & SER_RS485_ENABLED)
+ termios->c_cflag &= ~CRTSCTS;
+
if (termios->c_cflag & CRTSCTS) {
- modem |= UARTMODEM_RXRTSE | UARTMODEM_TXCTSE;
+ modem |= (UARTMODIR_RXRTSE | UARTMODIR_TXCTSE);
} else {
termios->c_cflag &= ~CRTSCTS;
- modem &= ~(UARTMODEM_RXRTSE | UARTMODEM_TXCTSE);
+ modem &= ~(UARTMODIR_RXRTSE | UARTMODIR_TXCTSE);
}
if (termios->c_cflag & CSTOPB)
@@ -2416,7 +2463,10 @@ static int lpuart_probe(struct platform_device *pdev)
sport->port.ops = &lpuart_pops;
sport->port.flags = UPF_BOOT_AUTOCONF;
- sport->port.rs485_config = lpuart_config_rs485;
+ if (lpuart_is_32(sport))
+ sport->port.rs485_config = lpuart32_config_rs485;
+ else
+ sport->port.rs485_config = lpuart_config_rs485;
sport->ipg_clk = devm_clk_get(&pdev->dev, "ipg");
if (IS_ERR(sport->ipg_clk)) {
@@ -2470,7 +2520,7 @@ static int lpuart_probe(struct platform_device *pdev)
sport->port.rs485.delay_rts_after_send)
dev_err(&pdev->dev, "driver doesn't support RTS delays\n");
- lpuart_config_rs485(&sport->port, &sport->port.rs485);
+ sport->port.rs485_config(&sport->port, &sport->port.rs485);
sport->dma_tx_chan = dma_request_slave_channel(sport->port.dev, "tx");
if (!sport->dma_tx_chan)
diff --git a/drivers/tty/serial/ifx6x60.c b/drivers/tty/serial/ifx6x60.c
index ffefd218761e..31033d517e82 100644
--- a/drivers/tty/serial/ifx6x60.c
+++ b/drivers/tty/serial/ifx6x60.c
@@ -1230,6 +1230,9 @@ static int ifx_spi_spi_remove(struct spi_device *spi)
struct ifx_spi_device *ifx_dev = spi_get_drvdata(spi);
/* stop activity */
tasklet_kill(&ifx_dev->io_work_tasklet);
+
+ pm_runtime_disable(&spi->dev);
+
/* free irq */
free_irq(gpio_to_irq(ifx_dev->gpio.reset_out), ifx_dev);
free_irq(gpio_to_irq(ifx_dev->gpio.srdy), ifx_dev);
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index 5e08f2657b90..a9e20e6c63ad 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -619,7 +619,7 @@ static void imx_uart_dma_tx(struct imx_port *sport)
dev_err(dev, "DMA mapping error for TX.\n");
return;
}
- desc = dmaengine_prep_slave_sg(chan, sgl, sport->dma_tx_nents,
+ desc = dmaengine_prep_slave_sg(chan, sgl, ret,
DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT);
if (!desc) {
dma_unmap_sg(dev, sgl, sport->dma_tx_nents,
@@ -1034,8 +1034,6 @@ static void imx_uart_timeout(struct timer_list *t)
}
}
-#define RX_BUF_SIZE (PAGE_SIZE)
-
/*
* There are two kinds of RX DMA interrupts(such as in the MX6Q):
* [1] the RX DMA buffer is full.
@@ -1118,7 +1116,8 @@ static void imx_uart_dma_rx_callback(void *data)
}
/* RX DMA buffer periods */
-#define RX_DMA_PERIODS 4
+#define RX_DMA_PERIODS 16
+#define RX_BUF_SIZE (RX_DMA_PERIODS * PAGE_SIZE / 4)
static int imx_uart_start_rx_dma(struct imx_port *sport)
{
diff --git a/drivers/tty/serial/men_z135_uart.c b/drivers/tty/serial/men_z135_uart.c
index e5d3ebab6dae..4f53a4caabf6 100644
--- a/drivers/tty/serial/men_z135_uart.c
+++ b/drivers/tty/serial/men_z135_uart.c
@@ -930,3 +930,4 @@ MODULE_AUTHOR("Johannes Thumshirn <johannes.thumshirn@men.de>");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("MEN 16z135 High Speed UART");
MODULE_ALIAS("mcb:16z135");
+MODULE_IMPORT_NS(MCB);
diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
index 3657a24913fc..1cbae0768b1f 100644
--- a/drivers/tty/serial/msm_serial.c
+++ b/drivers/tty/serial/msm_serial.c
@@ -301,7 +301,7 @@ static void msm_request_tx_dma(struct msm_port *msm_port, resource_size_t base)
dma = &msm_port->tx_dma;
/* allocate DMA resources, if available */
- dma->chan = dma_request_slave_channel_reason(dev, "tx");
+ dma->chan = dma_request_chan(dev, "tx");
if (IS_ERR(dma->chan))
goto no_tx;
@@ -344,7 +344,7 @@ static void msm_request_rx_dma(struct msm_port *msm_port, resource_size_t base)
dma = &msm_port->rx_dma;
/* allocate DMA resources, if available */
- dma->chan = dma_request_slave_channel_reason(dev, "rx");
+ dma->chan = dma_request_chan(dev, "rx");
if (IS_ERR(dma->chan))
goto no_rx;
@@ -980,6 +980,7 @@ static unsigned int msm_get_mctrl(struct uart_port *port)
static void msm_reset(struct uart_port *port)
{
struct msm_port *msm_port = UART_TO_MSM(port);
+ unsigned int mr;
/* reset everything */
msm_write(port, UART_CR_CMD_RESET_RX, UART_CR);
@@ -987,7 +988,10 @@ static void msm_reset(struct uart_port *port)
msm_write(port, UART_CR_CMD_RESET_ERR, UART_CR);
msm_write(port, UART_CR_CMD_RESET_BREAK_INT, UART_CR);
msm_write(port, UART_CR_CMD_RESET_CTS, UART_CR);
- msm_write(port, UART_CR_CMD_SET_RFR, UART_CR);
+ msm_write(port, UART_CR_CMD_RESET_RFR, UART_CR);
+ mr = msm_read(port, UART_MR1);
+ mr &= ~UART_MR1_RX_RDY_CTL;
+ msm_write(port, mr, UART_MR1);
/* Disable DM modes */
if (msm_port->is_uartdm)
diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
index 6157213a8359..c16234bca78f 100644
--- a/drivers/tty/serial/pch_uart.c
+++ b/drivers/tty/serial/pch_uart.c
@@ -233,6 +233,7 @@ struct eg20t_port {
struct dma_chan *chan_rx;
struct scatterlist *sg_tx_p;
int nent;
+ int orig_nent;
struct scatterlist sg_rx;
int tx_dma_use;
void *rx_buf_virt;
@@ -787,9 +788,10 @@ static void pch_dma_tx_complete(void *arg)
}
xmit->tail &= UART_XMIT_SIZE - 1;
async_tx_ack(priv->desc_tx);
- dma_unmap_sg(port->dev, sg, priv->nent, DMA_TO_DEVICE);
+ dma_unmap_sg(port->dev, sg, priv->orig_nent, DMA_TO_DEVICE);
priv->tx_dma_use = 0;
priv->nent = 0;
+ priv->orig_nent = 0;
kfree(priv->sg_tx_p);
pch_uart_hal_enable_interrupt(priv, PCH_UART_HAL_TX_INT);
}
@@ -1010,6 +1012,7 @@ static unsigned int dma_handle_tx(struct eg20t_port *priv)
dev_err(priv->port.dev, "%s:dma_map_sg Failed\n", __func__);
return 0;
}
+ priv->orig_nent = num;
priv->nent = nent;
for (i = 0; i < nent; i++, sg++) {
diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c
index 14c6306bc462..ff63728a95f4 100644
--- a/drivers/tty/serial/qcom_geni_serial.c
+++ b/drivers/tty/serial/qcom_geni_serial.c
@@ -9,10 +9,12 @@
#include <linux/console.h>
#include <linux/io.h>
#include <linux/iopoll.h>
+#include <linux/irq.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/pm_wakeirq.h>
#include <linux/qcom-geni-se.h>
#include <linux/serial.h>
#include <linux/serial_core.h>
@@ -115,6 +117,7 @@ struct qcom_geni_serial_port {
bool brk;
unsigned int tx_remaining;
+ int wakeup_irq;
};
static const struct uart_ops qcom_geni_console_pops;
@@ -754,6 +757,15 @@ out_write_wakeup:
uart_write_wakeup(uport);
}
+static irqreturn_t qcom_geni_serial_wakeup_isr(int isr, void *dev)
+{
+ struct uart_port *uport = dev;
+
+ pm_wakeup_event(uport->dev, 2000);
+
+ return IRQ_HANDLED;
+}
+
static irqreturn_t qcom_geni_serial_isr(int isr, void *dev)
{
u32 m_irq_en;
@@ -830,7 +842,7 @@ static void qcom_geni_serial_shutdown(struct uart_port *uport)
if (uart_console(uport))
console_stop(uport->cons);
- free_irq(uport->irq, uport);
+ disable_irq(uport->irq);
spin_lock_irqsave(&uport->lock, flags);
qcom_geni_serial_stop_tx(uport);
qcom_geni_serial_stop_rx(uport);
@@ -890,21 +902,14 @@ static int qcom_geni_serial_startup(struct uart_port *uport)
int ret;
struct qcom_geni_serial_port *port = to_dev_port(uport, uport);
- scnprintf(port->name, sizeof(port->name),
- "qcom_serial_%s%d",
- (uart_console(uport) ? "console" : "uart"), uport->line);
-
if (!port->setup) {
ret = qcom_geni_serial_port_setup(uport);
if (ret)
return ret;
}
+ enable_irq(uport->irq);
- ret = request_irq(uport->irq, qcom_geni_serial_isr, IRQF_TRIGGER_HIGH,
- port->name, uport);
- if (ret)
- dev_err(uport->dev, "Failed to get IRQ ret %d\n", ret);
- return ret;
+ return 0;
}
static unsigned long get_clk_cfg(unsigned long clk_freq)
@@ -1297,11 +1302,44 @@ static int qcom_geni_serial_probe(struct platform_device *pdev)
port->rx_fifo_depth = DEF_FIFO_DEPTH_WORDS;
port->tx_fifo_width = DEF_FIFO_WIDTH_BITS;
+ scnprintf(port->name, sizeof(port->name), "qcom_geni_serial_%s%d",
+ (uart_console(uport) ? "console" : "uart"), uport->line);
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
uport->irq = irq;
+ irq_set_status_flags(uport->irq, IRQ_NOAUTOEN);
+ ret = devm_request_irq(uport->dev, uport->irq, qcom_geni_serial_isr,
+ IRQF_TRIGGER_HIGH, port->name, uport);
+ if (ret) {
+ dev_err(uport->dev, "Failed to get IRQ ret %d\n", ret);
+ return ret;
+ }
+
+ if (!console) {
+ port->wakeup_irq = platform_get_irq(pdev, 1);
+ if (port->wakeup_irq < 0) {
+ dev_err(&pdev->dev, "Failed to get wakeup IRQ %d\n",
+ port->wakeup_irq);
+ } else {
+ irq_set_status_flags(port->wakeup_irq, IRQ_NOAUTOEN);
+ ret = devm_request_irq(uport->dev, port->wakeup_irq,
+ qcom_geni_serial_wakeup_isr,
+ IRQF_TRIGGER_FALLING, "uart_wakeup", uport);
+ if (ret) {
+ dev_err(uport->dev, "Failed to register wakeup IRQ ret %d\n",
+ ret);
+ return ret;
+ }
+
+ device_init_wakeup(&pdev->dev, true);
+ ret = dev_pm_set_wake_irq(&pdev->dev, port->wakeup_irq);
+ if (unlikely(ret))
+ dev_err(uport->dev, "%s:Failed to set IRQ wake:%d\n",
+ __func__, ret);
+ }
+ }
uport->private_data = drv;
platform_set_drvdata(pdev, port);
port->handle_rx = console ? handle_rx_console : handle_rx_uart;
@@ -1324,7 +1362,12 @@ static int __maybe_unused qcom_geni_serial_sys_suspend(struct device *dev)
struct qcom_geni_serial_port *port = dev_get_drvdata(dev);
struct uart_port *uport = &port->uport;
- return uart_suspend_port(uport->private_data, uport);
+ uart_suspend_port(uport->private_data, uport);
+
+ if (port->wakeup_irq > 0)
+ enable_irq(port->wakeup_irq);
+
+ return 0;
}
static int __maybe_unused qcom_geni_serial_sys_resume(struct device *dev)
@@ -1332,6 +1375,9 @@ static int __maybe_unused qcom_geni_serial_sys_resume(struct device *dev)
struct qcom_geni_serial_port *port = dev_get_drvdata(dev);
struct uart_port *uport = &port->uport;
+ if (port->wakeup_irq > 0)
+ disable_irq(port->wakeup_irq);
+
return uart_resume_port(uport->private_data, uport);
}
diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung_tty.c
index 83fd51607741..83fd51607741 100644
--- a/drivers/tty/serial/samsung.c
+++ b/drivers/tty/serial/samsung_tty.c
diff --git a/drivers/tty/serial/serial-tegra.c b/drivers/tty/serial/serial-tegra.c
index 2f599515c133..b6ace6290e23 100644
--- a/drivers/tty/serial/serial-tegra.c
+++ b/drivers/tty/serial/serial-tegra.c
@@ -1122,8 +1122,7 @@ static int tegra_uart_dma_channel_allocate(struct tegra_uart_port *tup,
int ret;
struct dma_slave_config dma_sconfig;
- dma_chan = dma_request_slave_channel_reason(tup->uport.dev,
- dma_to_memory ? "rx" : "tx");
+ dma_chan = dma_request_chan(tup->uport.dev, dma_to_memory ? "rx" : "tx");
if (IS_ERR(dma_chan)) {
ret = PTR_ERR(dma_chan);
dev_err(tup->uport.dev,
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index c4a414a46c7f..b0a6eb106edb 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -1111,7 +1111,7 @@ static int uart_break_ctl(struct tty_struct *tty, int break_state)
if (!uport)
goto out;
- if (uport->type != PORT_UNKNOWN)
+ if (uport->type != PORT_UNKNOWN && uport->ops->break_ctl)
uport->ops->break_ctl(uport, break_state);
ret = 0;
out:
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index 22e5d4e13714..58bf9d496ba5 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -54,6 +54,7 @@
#ifdef CONFIG_SUPERH
#include <asm/sh_bios.h>
+#include <asm/platform_early.h>
#endif
#include "serial_mctrl_gpio.h"
@@ -3090,6 +3091,7 @@ static struct console serial_console = {
.data = &sci_uart_driver,
};
+#ifdef CONFIG_SUPERH
static struct console early_serial_console = {
.name = "early_ttySC",
.write = serial_console_write,
@@ -3118,6 +3120,7 @@ static int sci_probe_earlyprintk(struct platform_device *pdev)
register_console(&early_serial_console);
return 0;
}
+#endif
#define SCI_CONSOLE (&serial_console)
@@ -3318,8 +3321,10 @@ static int sci_probe(struct platform_device *dev)
* the special early probe. We don't have sufficient device state
* to make it beyond this yet.
*/
- if (is_early_platform_device(dev))
+#ifdef CONFIG_SUPERH
+ if (is_sh_early_platform_device(dev))
return sci_probe_earlyprintk(dev);
+#endif
if (dev->dev.of_node) {
p = sci_parse_dt(dev, &dev_id);
@@ -3414,8 +3419,8 @@ static void __exit sci_exit(void)
uart_unregister_driver(&sci_uart_driver);
}
-#ifdef CONFIG_SERIAL_SH_SCI_CONSOLE
-early_platform_init_buffer("earlyprintk", &sci_driver,
+#if defined(CONFIG_SUPERH) && defined(CONFIG_SERIAL_SH_SCI_CONSOLE)
+sh_early_platform_init_buffer("earlyprintk", &sci_driver,
early_serial_buf, ARRAY_SIZE(early_serial_buf));
#endif
#ifdef CONFIG_SERIAL_SH_SCI_EARLYCON
diff --git a/drivers/tty/serial/sirfsoc_uart.h b/drivers/tty/serial/sirfsoc_uart.h
index 004ca684d3ae..637b09d3fe79 100644
--- a/drivers/tty/serial/sirfsoc_uart.h
+++ b/drivers/tty/serial/sirfsoc_uart.h
@@ -120,7 +120,8 @@ static u32 uart_usp_ff_empty_mask(struct uart_port *port)
empty_bit = ilog2(port->fifosize) + 1;
return (1 << empty_bit);
}
-struct sirfsoc_uart_register sirfsoc_usp = {
+
+static struct sirfsoc_uart_register sirfsoc_usp = {
.uart_reg = {
.sirfsoc_mode1 = 0x0000,
.sirfsoc_mode2 = 0x0004,
@@ -186,7 +187,7 @@ struct sirfsoc_uart_register sirfsoc_usp = {
},
};
-struct sirfsoc_uart_register sirfsoc_uart = {
+static struct sirfsoc_uart_register sirfsoc_uart = {
.uart_reg = {
.sirfsoc_line_ctrl = 0x0040,
.sirfsoc_tx_rx_en = 0x004c,
diff --git a/drivers/tty/serial/sprd_serial.c b/drivers/tty/serial/sprd_serial.c
index 771d11196523..31df23502562 100644
--- a/drivers/tty/serial/sprd_serial.c
+++ b/drivers/tty/serial/sprd_serial.c
@@ -919,6 +919,34 @@ static void sprd_pm(struct uart_port *port, unsigned int state,
}
}
+#ifdef CONFIG_CONSOLE_POLL
+static int sprd_poll_init(struct uart_port *port)
+{
+ if (port->state->pm_state != UART_PM_STATE_ON) {
+ sprd_pm(port, UART_PM_STATE_ON, 0);
+ port->state->pm_state = UART_PM_STATE_ON;
+ }
+
+ return 0;
+}
+
+static int sprd_poll_get_char(struct uart_port *port)
+{
+ while (!(serial_in(port, SPRD_STS1) & SPRD_RX_FIFO_CNT_MASK))
+ cpu_relax();
+
+ return serial_in(port, SPRD_RXD);
+}
+
+static void sprd_poll_put_char(struct uart_port *port, unsigned char ch)
+{
+ while (serial_in(port, SPRD_STS1) & SPRD_TX_FIFO_CNT_MASK)
+ cpu_relax();
+
+ serial_out(port, SPRD_TXD, ch);
+}
+#endif
+
static const struct uart_ops serial_sprd_ops = {
.tx_empty = sprd_tx_empty,
.get_mctrl = sprd_get_mctrl,
@@ -936,6 +964,11 @@ static const struct uart_ops serial_sprd_ops = {
.config_port = sprd_config_port,
.verify_port = sprd_verify_port,
.pm = sprd_pm,
+#ifdef CONFIG_CONSOLE_POLL
+ .poll_init = sprd_poll_init,
+ .poll_get_char = sprd_poll_get_char,
+ .poll_put_char = sprd_poll_put_char,
+#endif
};
#ifdef CONFIG_SERIAL_SPRD_CONSOLE
diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c
index df90747ee3a8..2f72514d63ed 100644
--- a/drivers/tty/serial/stm32-usart.c
+++ b/drivers/tty/serial/stm32-usart.c
@@ -240,8 +240,8 @@ static void stm32_receive_chars(struct uart_port *port, bool threaded)
* cleared by the sequence [read SR - read DR].
*/
if ((sr & USART_SR_ERR_MASK) && ofs->icr != UNDEF_REG)
- stm32_clr_bits(port, ofs->icr, USART_ICR_ORECF |
- USART_ICR_PECF | USART_ICR_FECF);
+ writel_relaxed(sr & USART_SR_ERR_MASK,
+ port->membase + ofs->icr);
c = stm32_get_char(port, &sr, &stm32_port->last_res);
port->icount.rx++;
@@ -435,7 +435,7 @@ static void stm32_transmit_chars(struct uart_port *port)
if (ofs->icr == UNDEF_REG)
stm32_clr_bits(port, ofs->isr, USART_SR_TC);
else
- stm32_set_bits(port, ofs->icr, USART_ICR_TCCF);
+ writel_relaxed(USART_ICR_TCCF, port->membase + ofs->icr);
if (stm32_port->tx_ch)
stm32_transmit_chars_dma(port);
diff --git a/drivers/tty/serial/uartlite.c b/drivers/tty/serial/uartlite.c
index 06e79c11141d..7dbd0c471d92 100644
--- a/drivers/tty/serial/uartlite.c
+++ b/drivers/tty/serial/uartlite.c
@@ -22,7 +22,6 @@
#include <linux/of_device.h>
#include <linux/of_platform.h>
#include <linux/clk.h>
-#include <linux/pm_runtime.h>
#define ULITE_NAME "ttyUL"
#define ULITE_MAJOR 204
@@ -55,7 +54,6 @@
#define ULITE_CONTROL_RST_TX 0x01
#define ULITE_CONTROL_RST_RX 0x02
#define ULITE_CONTROL_IE 0x10
-#define UART_AUTOSUSPEND_TIMEOUT 3000
/* Static pointer to console port */
#ifdef CONFIG_SERIAL_UARTLITE_CONSOLE
@@ -65,7 +63,6 @@ static struct uart_port *console_port;
struct uartlite_data {
const struct uartlite_reg_ops *reg_ops;
struct clk *clk;
- struct uart_driver *ulite_uart_driver;
};
struct uartlite_reg_ops {
@@ -393,12 +390,12 @@ static int ulite_verify_port(struct uart_port *port, struct serial_struct *ser)
static void ulite_pm(struct uart_port *port, unsigned int state,
unsigned int oldstate)
{
- if (!state) {
- pm_runtime_get_sync(port->dev);
- } else {
- pm_runtime_mark_last_busy(port->dev);
- pm_runtime_put_autosuspend(port->dev);
- }
+ struct uartlite_data *pdata = port->private_data;
+
+ if (!state)
+ clk_enable(pdata->clk);
+ else
+ clk_disable(pdata->clk);
}
#ifdef CONFIG_CONSOLE_POLL
@@ -697,9 +694,7 @@ static int ulite_release(struct device *dev)
int rc = 0;
if (port) {
- struct uartlite_data *pdata = port->private_data;
-
- rc = uart_remove_one_port(pdata->ulite_uart_driver, port);
+ rc = uart_remove_one_port(&ulite_uart_driver, port);
dev_set_drvdata(dev, NULL);
port->mapbase = 0;
}
@@ -717,11 +712,8 @@ static int __maybe_unused ulite_suspend(struct device *dev)
{
struct uart_port *port = dev_get_drvdata(dev);
- if (port) {
- struct uartlite_data *pdata = port->private_data;
-
- uart_suspend_port(pdata->ulite_uart_driver, port);
- }
+ if (port)
+ uart_suspend_port(&ulite_uart_driver, port);
return 0;
}
@@ -736,41 +728,17 @@ static int __maybe_unused ulite_resume(struct device *dev)
{
struct uart_port *port = dev_get_drvdata(dev);
- if (port) {
- struct uartlite_data *pdata = port->private_data;
-
- uart_resume_port(pdata->ulite_uart_driver, port);
- }
+ if (port)
+ uart_resume_port(&ulite_uart_driver, port);
return 0;
}
-static int __maybe_unused ulite_runtime_suspend(struct device *dev)
-{
- struct uart_port *port = dev_get_drvdata(dev);
- struct uartlite_data *pdata = port->private_data;
-
- clk_disable(pdata->clk);
- return 0;
-};
-
-static int __maybe_unused ulite_runtime_resume(struct device *dev)
-{
- struct uart_port *port = dev_get_drvdata(dev);
- struct uartlite_data *pdata = port->private_data;
-
- clk_enable(pdata->clk);
- return 0;
-}
/* ---------------------------------------------------------------------
* Platform bus binding
*/
-static const struct dev_pm_ops ulite_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(ulite_suspend, ulite_resume)
- SET_RUNTIME_PM_OPS(ulite_runtime_suspend,
- ulite_runtime_resume, NULL)
-};
+static SIMPLE_DEV_PM_OPS(ulite_pm_ops, ulite_suspend, ulite_resume);
#if defined(CONFIG_OF)
/* Match table for of_platform binding */
@@ -795,22 +763,6 @@ static int ulite_probe(struct platform_device *pdev)
if (prop)
id = be32_to_cpup(prop);
#endif
- if (id < 0) {
- /* Look for a serialN alias */
- id = of_alias_get_id(pdev->dev.of_node, "serial");
- if (id < 0)
- id = 0;
- }
-
- if (!ulite_uart_driver.state) {
- dev_dbg(&pdev->dev, "uartlite: calling uart_register_driver()\n");
- ret = uart_register_driver(&ulite_uart_driver);
- if (ret < 0) {
- dev_err(&pdev->dev, "Failed to register driver\n");
- return ret;
- }
- }
-
pdata = devm_kzalloc(&pdev->dev, sizeof(struct uartlite_data),
GFP_KERNEL);
if (!pdata)
@@ -836,22 +788,24 @@ static int ulite_probe(struct platform_device *pdev)
pdata->clk = NULL;
}
- pdata->ulite_uart_driver = &ulite_uart_driver;
ret = clk_prepare_enable(pdata->clk);
if (ret) {
dev_err(&pdev->dev, "Failed to prepare clock\n");
return ret;
}
- pm_runtime_use_autosuspend(&pdev->dev);
- pm_runtime_set_autosuspend_delay(&pdev->dev, UART_AUTOSUSPEND_TIMEOUT);
- pm_runtime_set_active(&pdev->dev);
- pm_runtime_enable(&pdev->dev);
+ if (!ulite_uart_driver.state) {
+ dev_dbg(&pdev->dev, "uartlite: calling uart_register_driver()\n");
+ ret = uart_register_driver(&ulite_uart_driver);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to register driver\n");
+ return ret;
+ }
+ }
ret = ulite_assign(&pdev->dev, id, res->start, irq, pdata);
- pm_runtime_mark_last_busy(&pdev->dev);
- pm_runtime_put_autosuspend(&pdev->dev);
+ clk_disable(pdata->clk);
return ret;
}
@@ -860,14 +814,9 @@ static int ulite_remove(struct platform_device *pdev)
{
struct uart_port *port = dev_get_drvdata(&pdev->dev);
struct uartlite_data *pdata = port->private_data;
- int rc;
- clk_unprepare(pdata->clk);
- rc = ulite_release(&pdev->dev);
- pm_runtime_disable(&pdev->dev);
- pm_runtime_set_suspended(&pdev->dev);
- pm_runtime_dont_use_autosuspend(&pdev->dev);
- return rc;
+ clk_disable_unprepare(pdata->clk);
+ return ulite_release(&pdev->dev);
}
/* work with hotplug and coldplug */
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 802c1210558f..d9f54c7d94f2 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -87,6 +87,7 @@
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/poll.h>
+#include <linux/ppp-ioctl.h>
#include <linux/proc_fs.h>
#include <linux/init.h>
#include <linux/module.h>
@@ -1344,9 +1345,12 @@ struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx)
if (!tty->port)
tty->port = driver->ports[idx];
- WARN_RATELIMIT(!tty->port,
- "%s: %s driver does not set tty->port. This will crash the kernel later. Fix the driver!\n",
- __func__, tty->driver->name);
+ if (WARN_RATELIMIT(!tty->port,
+ "%s: %s driver does not set tty->port. This would crash the kernel. Fix the driver!\n",
+ __func__, tty->driver->name)) {
+ retval = -EINVAL;
+ goto err_release_lock;
+ }
retval = tty_ldisc_lock(tty, 5 * HZ);
if (retval)
@@ -1924,7 +1928,6 @@ EXPORT_SYMBOL_GPL(tty_kopen);
/**
* tty_open_by_driver - open a tty device
* @device: dev_t of device to open
- * @inode: inode of device file
* @filp: file pointer to tty
*
* Performs the driver lookup, checks for a reopen, or otherwise
@@ -1937,7 +1940,7 @@ EXPORT_SYMBOL_GPL(tty_kopen);
* - concurrent tty driver removal w/ lookup
* - concurrent tty removal from driver table
*/
-static struct tty_struct *tty_open_by_driver(dev_t device, struct inode *inode,
+static struct tty_struct *tty_open_by_driver(dev_t device,
struct file *filp)
{
struct tty_struct *tty;
@@ -2029,7 +2032,7 @@ retry_open:
tty = tty_open_current_tty(device, filp);
if (!tty)
- tty = tty_open_by_driver(device, inode, filp);
+ tty = tty_open_by_driver(device, filp);
if (IS_ERR(tty)) {
tty_free_file(filp);
@@ -2755,6 +2758,7 @@ static long tty_compat_ioctl(struct file *file, unsigned int cmd,
int retval = -ENOIOCTLCMD;
switch (cmd) {
+ case TIOCOUTQ:
case TIOCSTI:
case TIOCGWINSZ:
case TIOCSWINSZ:
@@ -2810,6 +2814,9 @@ static long tty_compat_ioctl(struct file *file, unsigned int cmd,
#endif
case TIOCGSOFTCAR:
case TIOCSSOFTCAR:
+
+ case PPPIOCGCHAN:
+ case PPPIOCGUNIT:
return tty_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
case TIOCCONS:
case TIOCEXCL:
diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
index 4c49f53afa3e..ec1f6a48121e 100644
--- a/drivers/tty/tty_ldisc.c
+++ b/drivers/tty/tty_ldisc.c
@@ -156,12 +156,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
* takes tty_ldiscs_lock to guard against ldisc races
*/
-#if defined(CONFIG_LDISC_AUTOLOAD)
- #define INITIAL_AUTOLOAD_STATE 1
-#else
- #define INITIAL_AUTOLOAD_STATE 0
-#endif
-static int tty_ldisc_autoload = INITIAL_AUTOLOAD_STATE;
+static int tty_ldisc_autoload = IS_BUILTIN(CONFIG_LDISC_AUTOLOAD);
static struct tty_ldisc *tty_ldisc_get(struct tty_struct *tty, int disc)
{
diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
index 515fc095e3b4..15d33fa0c925 100644
--- a/drivers/tty/vt/keyboard.c
+++ b/drivers/tty/vt/keyboard.c
@@ -1491,7 +1491,7 @@ static void kbd_event(struct input_handle *handle, unsigned int event_type,
if (event_type == EV_MSC && event_code == MSC_RAW && HW_RAW(handle->dev))
kbd_rawcode(value);
- if (event_type == EV_KEY)
+ if (event_type == EV_KEY && event_code <= KEY_MAX)
kbd_keycode(event_code, value, HW_RAW(handle->dev));
spin_unlock(&kbd_event_lock);
diff --git a/drivers/tty/vt/vc_screen.c b/drivers/tty/vt/vc_screen.c
index 1f042346e722..778f83ea2249 100644
--- a/drivers/tty/vt/vc_screen.c
+++ b/drivers/tty/vt/vc_screen.c
@@ -456,6 +456,9 @@ vcs_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
size_t ret;
char *con_buf;
+ if (use_unicode(inode))
+ return -EOPNOTSUPP;
+
con_buf = (char *) __get_free_page(GFP_KERNEL);
if (!con_buf)
return -ENOMEM;
diff --git a/drivers/uio/uio_dmem_genirq.c b/drivers/uio/uio_dmem_genirq.c
index ebcf1434e296..81c88f7bbbcb 100644
--- a/drivers/uio/uio_dmem_genirq.c
+++ b/drivers/uio/uio_dmem_genirq.c
@@ -151,8 +151,6 @@ static int uio_dmem_genirq_probe(struct platform_device *pdev)
int i;
if (pdev->dev.of_node) {
- int irq;
-
/* alloc uioinfo for one device */
uioinfo = kzalloc(sizeof(*uioinfo), GFP_KERNEL);
if (!uioinfo) {
@@ -163,13 +161,6 @@ static int uio_dmem_genirq_probe(struct platform_device *pdev)
uioinfo->name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%pOFn",
pdev->dev.of_node);
uioinfo->version = "devicetree";
-
- /* Multiple IRQs are not supported */
- irq = platform_get_irq(pdev, 0);
- if (irq == -ENXIO)
- uioinfo->irq = UIO_IRQ_NONE;
- else
- uioinfo->irq = irq;
}
if (!uioinfo || !uioinfo->name || !uioinfo->version) {
@@ -199,8 +190,11 @@ static int uio_dmem_genirq_probe(struct platform_device *pdev)
mutex_init(&priv->alloc_lock);
if (!uioinfo->irq) {
+ /* Multiple IRQs are not supported */
ret = platform_get_irq(pdev, 0);
- if (ret < 0)
+ if (ret == -ENXIO && pdev->dev.of_node)
+ ret = UIO_IRQ_NONE;
+ else if (ret < 0)
goto bad1;
uioinfo->irq = ret;
}
diff --git a/drivers/usb/cdns3/Kconfig b/drivers/usb/cdns3/Kconfig
index d0331613a355..2a1e89d12ed9 100644
--- a/drivers/usb/cdns3/Kconfig
+++ b/drivers/usb/cdns3/Kconfig
@@ -43,4 +43,14 @@ config USB_CDNS3_PCI_WRAP
If you choose to build this driver as module it will
be dynamically linked and module will be called cdns3-pci.ko
+config USB_CDNS3_TI
+ tristate "Cadence USB3 support on TI platforms"
+ depends on ARCH_K3 || COMPILE_TEST
+ default USB_CDNS3
+ help
+ Say 'Y' or 'M' here if you are building for Texas Instruments
+ platforms that contain Cadence USB3 controller core.
+
+ e.g. J721e.
+
endif
diff --git a/drivers/usb/cdns3/Makefile b/drivers/usb/cdns3/Makefile
index a703547350bb..948e6b88d1a9 100644
--- a/drivers/usb/cdns3/Makefile
+++ b/drivers/usb/cdns3/Makefile
@@ -14,3 +14,4 @@ endif
cdns3-$(CONFIG_USB_CDNS3_HOST) += host.o
obj-$(CONFIG_USB_CDNS3_PCI_WRAP) += cdns3-pci-wrap.o
+obj-$(CONFIG_USB_CDNS3_TI) += cdns3-ti.o
diff --git a/drivers/usb/cdns3/cdns3-ti.c b/drivers/usb/cdns3/cdns3-ti.c
new file mode 100644
index 000000000000..c6a79ca15858
--- /dev/null
+++ b/drivers/usb/cdns3/cdns3-ti.c
@@ -0,0 +1,236 @@
+// SPDX-License-Identifier: GPL-2.0
+/**
+ * cdns3-ti.c - TI specific Glue layer for Cadence USB Controller
+ *
+ * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
+ */
+
+#include <linux/bits.h>
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/of_platform.h>
+#include <linux/pm_runtime.h>
+
+/* USB Wrapper register offsets */
+#define USBSS_PID 0x0
+#define USBSS_W1 0x4
+#define USBSS_STATIC_CONFIG 0x8
+#define USBSS_PHY_TEST 0xc
+#define USBSS_DEBUG_CTRL 0x10
+#define USBSS_DEBUG_INFO 0x14
+#define USBSS_DEBUG_LINK_STATE 0x18
+#define USBSS_DEVICE_CTRL 0x1c
+
+/* Wrapper 1 register bits */
+#define USBSS_W1_PWRUP_RST BIT(0)
+#define USBSS_W1_OVERCURRENT_SEL BIT(8)
+#define USBSS_W1_MODESTRAP_SEL BIT(9)
+#define USBSS_W1_OVERCURRENT BIT(16)
+#define USBSS_W1_MODESTRAP_MASK GENMASK(18, 17)
+#define USBSS_W1_MODESTRAP_SHIFT 17
+#define USBSS_W1_USB2_ONLY BIT(19)
+
+/* Static config register bits */
+#define USBSS1_STATIC_PLL_REF_SEL_MASK GENMASK(8, 5)
+#define USBSS1_STATIC_PLL_REF_SEL_SHIFT 5
+#define USBSS1_STATIC_LOOPBACK_MODE_MASK GENMASK(4, 3)
+#define USBSS1_STATIC_LOOPBACK_MODE_SHIFT 3
+#define USBSS1_STATIC_VBUS_SEL_MASK GENMASK(2, 1)
+#define USBSS1_STATIC_VBUS_SEL_SHIFT 1
+#define USBSS1_STATIC_LANE_REVERSE BIT(0)
+
+/* Modestrap modes */
+enum modestrap_mode { USBSS_MODESTRAP_MODE_NONE,
+ USBSS_MODESTRAP_MODE_HOST,
+ USBSS_MODESTRAP_MODE_PERIPHERAL};
+
+struct cdns_ti {
+ struct device *dev;
+ void __iomem *usbss;
+ int usb2_only:1;
+ int vbus_divider:1;
+ struct clk *usb2_refclk;
+ struct clk *lpm_clk;
+};
+
+static const int cdns_ti_rate_table[] = { /* in KHZ */
+ 9600,
+ 10000,
+ 12000,
+ 19200,
+ 20000,
+ 24000,
+ 25000,
+ 26000,
+ 38400,
+ 40000,
+ 58000,
+ 50000,
+ 52000,
+};
+
+static inline u32 cdns_ti_readl(struct cdns_ti *data, u32 offset)
+{
+ return readl(data->usbss + offset);
+}
+
+static inline void cdns_ti_writel(struct cdns_ti *data, u32 offset, u32 value)
+{
+ writel(value, data->usbss + offset);
+}
+
+static int cdns_ti_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = pdev->dev.of_node;
+ struct cdns_ti *data;
+ int error;
+ u32 reg;
+ int rate_code, i;
+ unsigned long rate;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, data);
+
+ data->dev = dev;
+
+ data->usbss = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(data->usbss)) {
+ dev_err(dev, "can't map IOMEM resource\n");
+ return PTR_ERR(data->usbss);
+ }
+
+ data->usb2_refclk = devm_clk_get(dev, "ref");
+ if (IS_ERR(data->usb2_refclk)) {
+ dev_err(dev, "can't get usb2_refclk\n");
+ return PTR_ERR(data->usb2_refclk);
+ }
+
+ data->lpm_clk = devm_clk_get(dev, "lpm");
+ if (IS_ERR(data->lpm_clk)) {
+ dev_err(dev, "can't get lpm_clk\n");
+ return PTR_ERR(data->lpm_clk);
+ }
+
+ rate = clk_get_rate(data->usb2_refclk);
+ rate /= 1000; /* To KHz */
+ for (i = 0; i < ARRAY_SIZE(cdns_ti_rate_table); i++) {
+ if (cdns_ti_rate_table[i] == rate)
+ break;
+ }
+
+ if (i == ARRAY_SIZE(cdns_ti_rate_table)) {
+ dev_err(dev, "unsupported usb2_refclk rate: %lu KHz\n", rate);
+ return -EINVAL;
+ }
+
+ rate_code = i;
+
+ pm_runtime_enable(dev);
+ error = pm_runtime_get_sync(dev);
+ if (error < 0) {
+ dev_err(dev, "pm_runtime_get_sync failed: %d\n", error);
+ goto err_get;
+ }
+
+ /* assert RESET */
+ reg = cdns_ti_readl(data, USBSS_W1);
+ reg &= ~USBSS_W1_PWRUP_RST;
+ cdns_ti_writel(data, USBSS_W1, reg);
+
+ /* set static config */
+ reg = cdns_ti_readl(data, USBSS_STATIC_CONFIG);
+ reg &= ~USBSS1_STATIC_PLL_REF_SEL_MASK;
+ reg |= rate_code << USBSS1_STATIC_PLL_REF_SEL_SHIFT;
+
+ reg &= ~USBSS1_STATIC_VBUS_SEL_MASK;
+ data->vbus_divider = device_property_read_bool(dev, "ti,vbus-divider");
+ if (data->vbus_divider)
+ reg |= 1 << USBSS1_STATIC_VBUS_SEL_SHIFT;
+
+ cdns_ti_writel(data, USBSS_STATIC_CONFIG, reg);
+ reg = cdns_ti_readl(data, USBSS_STATIC_CONFIG);
+
+ /* set USB2_ONLY mode if requested */
+ reg = cdns_ti_readl(data, USBSS_W1);
+ data->usb2_only = device_property_read_bool(dev, "ti,usb2-only");
+ if (data->usb2_only)
+ reg |= USBSS_W1_USB2_ONLY;
+
+ /* set default modestrap */
+ reg |= USBSS_W1_MODESTRAP_SEL;
+ reg &= ~USBSS_W1_MODESTRAP_MASK;
+ reg |= USBSS_MODESTRAP_MODE_NONE << USBSS_W1_MODESTRAP_SHIFT;
+ cdns_ti_writel(data, USBSS_W1, reg);
+
+ /* de-assert RESET */
+ reg |= USBSS_W1_PWRUP_RST;
+ cdns_ti_writel(data, USBSS_W1, reg);
+
+ error = of_platform_populate(node, NULL, NULL, dev);
+ if (error) {
+ dev_err(dev, "failed to create children: %d\n", error);
+ goto err;
+ }
+
+ return 0;
+
+err:
+ pm_runtime_put_sync(data->dev);
+err_get:
+ pm_runtime_disable(data->dev);
+
+ return error;
+}
+
+static int cdns_ti_remove_core(struct device *dev, void *c)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+
+ platform_device_unregister(pdev);
+
+ return 0;
+}
+
+static int cdns_ti_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+
+ device_for_each_child(dev, NULL, cdns_ti_remove_core);
+ pm_runtime_put_sync(dev);
+ pm_runtime_disable(dev);
+
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static const struct of_device_id cdns_ti_of_match[] = {
+ { .compatible = "ti,j721e-usb", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, cdns_ti_of_match);
+
+static struct platform_driver cdns_ti_driver = {
+ .probe = cdns_ti_probe,
+ .remove = cdns_ti_remove,
+ .driver = {
+ .name = "cdns3-ti",
+ .of_match_table = cdns_ti_of_match,
+ },
+};
+
+module_platform_driver(cdns_ti_driver);
+
+MODULE_ALIAS("platform:cdns3-ti");
+MODULE_AUTHOR("Roger Quadros <rogerq@ti.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Cadence USB3 TI Glue Layer");
diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c
index df8812c30640..d8e7eb2f97b9 100644
--- a/drivers/usb/chipidea/ci_hdrc_imx.c
+++ b/drivers/usb/chipidea/ci_hdrc_imx.c
@@ -274,11 +274,14 @@ static int ci_hdrc_imx_notify_event(struct ci_hdrc *ci, unsigned int event)
switch (event) {
case CI_HDRC_IMX_HSIC_ACTIVE_EVENT:
- ret = pinctrl_select_state(data->pinctrl,
- data->pinctrl_hsic_active);
- if (ret)
- dev_err(dev, "hsic_active select failed, err=%d\n",
- ret);
+ if (data->pinctrl) {
+ ret = pinctrl_select_state(data->pinctrl,
+ data->pinctrl_hsic_active);
+ if (ret)
+ dev_err(dev,
+ "hsic_active select failed, err=%d\n",
+ ret);
+ }
break;
case CI_HDRC_IMX_HSIC_SUSPEND_EVENT:
ret = imx_usbmisc_hsic_set_connect(data->usbmisc_data);
@@ -306,7 +309,6 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
const struct ci_hdrc_imx_platform_flag *imx_platform_flag;
struct device_node *np = pdev->dev.of_node;
struct device *dev = &pdev->dev;
- struct pinctrl_state *pinctrl_hsic_idle;
of_id = of_match_device(ci_hdrc_imx_dt_ids, dev);
if (!of_id)
@@ -330,12 +332,42 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
pdata.flags |= CI_HDRC_IMX_IS_HSIC;
data->usbmisc_data->hsic = 1;
data->pinctrl = devm_pinctrl_get(dev);
- if (IS_ERR(data->pinctrl)) {
- dev_err(dev, "pinctrl get failed, err=%ld\n",
+ if (PTR_ERR(data->pinctrl) == -ENODEV)
+ data->pinctrl = NULL;
+ else if (IS_ERR(data->pinctrl)) {
+ if (PTR_ERR(data->pinctrl) != -EPROBE_DEFER)
+ dev_err(dev, "pinctrl get failed, err=%ld\n",
PTR_ERR(data->pinctrl));
return PTR_ERR(data->pinctrl);
}
+ data->hsic_pad_regulator =
+ devm_regulator_get_optional(dev, "hsic");
+ if (PTR_ERR(data->hsic_pad_regulator) == -ENODEV) {
+ /* no pad regualator is needed */
+ data->hsic_pad_regulator = NULL;
+ } else if (IS_ERR(data->hsic_pad_regulator)) {
+ if (PTR_ERR(data->hsic_pad_regulator) != -EPROBE_DEFER)
+ dev_err(dev,
+ "Get HSIC pad regulator error: %ld\n",
+ PTR_ERR(data->hsic_pad_regulator));
+ return PTR_ERR(data->hsic_pad_regulator);
+ }
+
+ if (data->hsic_pad_regulator) {
+ ret = regulator_enable(data->hsic_pad_regulator);
+ if (ret) {
+ dev_err(dev,
+ "Failed to enable HSIC pad regulator\n");
+ return ret;
+ }
+ }
+ }
+
+ /* HSIC pinctrl handling */
+ if (data->pinctrl) {
+ struct pinctrl_state *pinctrl_hsic_idle;
+
pinctrl_hsic_idle = pinctrl_lookup_state(data->pinctrl, "idle");
if (IS_ERR(pinctrl_hsic_idle)) {
dev_err(dev,
@@ -358,27 +390,6 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
PTR_ERR(data->pinctrl_hsic_active));
return PTR_ERR(data->pinctrl_hsic_active);
}
-
- data->hsic_pad_regulator = devm_regulator_get(dev, "hsic");
- if (PTR_ERR(data->hsic_pad_regulator) == -EPROBE_DEFER) {
- return -EPROBE_DEFER;
- } else if (PTR_ERR(data->hsic_pad_regulator) == -ENODEV) {
- /* no pad regualator is needed */
- data->hsic_pad_regulator = NULL;
- } else if (IS_ERR(data->hsic_pad_regulator)) {
- dev_err(dev, "Get HSIC pad regulator error: %ld\n",
- PTR_ERR(data->hsic_pad_regulator));
- return PTR_ERR(data->hsic_pad_regulator);
- }
-
- if (data->hsic_pad_regulator) {
- ret = regulator_enable(data->hsic_pad_regulator);
- if (ret) {
- dev_err(dev,
- "Failed to enable HSIC pad regulator\n");
- return ret;
- }
- }
}
if (pdata.flags & CI_HDRC_PMQOS)
@@ -433,6 +444,16 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
goto err_clk;
}
+ if (data->usbmisc_data) {
+ if (!IS_ERR(pdata.id_extcon.edev) ||
+ of_property_read_bool(np, "usb-role-switch"))
+ data->usbmisc_data->ext_id = 1;
+
+ if (!IS_ERR(pdata.vbus_extcon.edev) ||
+ of_property_read_bool(np, "usb-role-switch"))
+ data->usbmisc_data->ext_vbus = 1;
+ }
+
ret = imx_usbmisc_init_post(data->usbmisc_data);
if (ret) {
dev_err(dev, "usbmisc post failed, ret=%d\n", ret);
diff --git a/drivers/usb/chipidea/ci_hdrc_imx.h b/drivers/usb/chipidea/ci_hdrc_imx.h
index c842e03f8767..de2aac9a2868 100644
--- a/drivers/usb/chipidea/ci_hdrc_imx.h
+++ b/drivers/usb/chipidea/ci_hdrc_imx.h
@@ -22,6 +22,8 @@ struct imx_usbmisc_data {
unsigned int evdo:1; /* set external vbus divider option */
unsigned int ulpi:1; /* connected to an ULPI phy */
unsigned int hsic:1; /* HSIC controlller */
+ unsigned int ext_id:1; /* ID from exteranl event */
+ unsigned int ext_vbus:1; /* Vbus from exteranl event */
};
int imx_usbmisc_init(struct imx_usbmisc_data *data);
diff --git a/drivers/usb/chipidea/ci_hdrc_tegra.c b/drivers/usb/chipidea/ci_hdrc_tegra.c
index 12025358bb3c..0c9911d44ee5 100644
--- a/drivers/usb/chipidea/ci_hdrc_tegra.c
+++ b/drivers/usb/chipidea/ci_hdrc_tegra.c
@@ -24,35 +24,23 @@ struct tegra_udc_soc_info {
unsigned long flags;
};
-static const struct tegra_udc_soc_info tegra20_udc_soc_info = {
- .flags = CI_HDRC_REQUIRES_ALIGNED_DMA,
-};
-
-static const struct tegra_udc_soc_info tegra30_udc_soc_info = {
- .flags = CI_HDRC_REQUIRES_ALIGNED_DMA,
-};
-
-static const struct tegra_udc_soc_info tegra114_udc_soc_info = {
- .flags = CI_HDRC_REQUIRES_ALIGNED_DMA,
-};
-
-static const struct tegra_udc_soc_info tegra124_udc_soc_info = {
+static const struct tegra_udc_soc_info tegra_udc_soc_info = {
.flags = CI_HDRC_REQUIRES_ALIGNED_DMA,
};
static const struct of_device_id tegra_udc_of_match[] = {
{
.compatible = "nvidia,tegra20-udc",
- .data = &tegra20_udc_soc_info,
+ .data = &tegra_udc_soc_info,
}, {
.compatible = "nvidia,tegra30-udc",
- .data = &tegra30_udc_soc_info,
+ .data = &tegra_udc_soc_info,
}, {
.compatible = "nvidia,tegra114-udc",
- .data = &tegra114_udc_soc_info,
+ .data = &tegra_udc_soc_info,
}, {
.compatible = "nvidia,tegra124-udc",
- .data = &tegra124_udc_soc_info,
+ .data = &tegra_udc_soc_info,
}, {
/* sentinel */
}
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
index 98ee575ee500..dce5db41501c 100644
--- a/drivers/usb/chipidea/core.c
+++ b/drivers/usb/chipidea/core.c
@@ -683,7 +683,7 @@ static int ci_get_platdata(struct device *dev,
if (platdata->dr_mode != USB_DR_MODE_PERIPHERAL) {
/* Get the vbus regulator */
- platdata->reg_vbus = devm_regulator_get(dev, "vbus");
+ platdata->reg_vbus = devm_regulator_get_optional(dev, "vbus");
if (PTR_ERR(platdata->reg_vbus) == -EPROBE_DEFER) {
return -EPROBE_DEFER;
} else if (PTR_ERR(platdata->reg_vbus) == -ENODEV) {
diff --git a/drivers/usb/chipidea/debug.c b/drivers/usb/chipidea/debug.c
index fcc91a338875..e0376ee646ad 100644
--- a/drivers/usb/chipidea/debug.c
+++ b/drivers/usb/chipidea/debug.c
@@ -342,7 +342,7 @@ DEFINE_SHOW_ATTRIBUTE(ci_registers);
*/
void dbg_create_files(struct ci_hdrc *ci)
{
- ci->debugfs = debugfs_create_dir(dev_name(ci->dev), NULL);
+ ci->debugfs = debugfs_create_dir(dev_name(ci->dev), usb_debug_root);
debugfs_create_file("device", S_IRUGO, ci->debugfs, ci,
&ci_device_fops);
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
index 8f18e7b6cadf..ffaf46f5d062 100644
--- a/drivers/usb/chipidea/udc.c
+++ b/drivers/usb/chipidea/udc.c
@@ -1524,42 +1524,53 @@ static const struct usb_ep_ops usb_ep_ops = {
/******************************************************************************
* GADGET block
*****************************************************************************/
+/**
+ * ci_hdrc_gadget_connect: caller makes sure gadget driver is binded
+ */
+static void ci_hdrc_gadget_connect(struct usb_gadget *_gadget, int is_active)
+{
+ struct ci_hdrc *ci = container_of(_gadget, struct ci_hdrc, gadget);
+ unsigned long flags;
+
+ if (is_active) {
+ pm_runtime_get_sync(&_gadget->dev);
+ hw_device_reset(ci);
+ spin_lock_irqsave(&ci->lock, flags);
+ if (ci->driver) {
+ hw_device_state(ci, ci->ep0out->qh.dma);
+ usb_gadget_set_state(_gadget, USB_STATE_POWERED);
+ usb_udc_vbus_handler(_gadget, true);
+ }
+ spin_unlock_irqrestore(&ci->lock, flags);
+ } else {
+ usb_udc_vbus_handler(_gadget, false);
+ if (ci->driver)
+ ci->driver->disconnect(&ci->gadget);
+ hw_device_state(ci, 0);
+ if (ci->platdata->notify_event)
+ ci->platdata->notify_event(ci,
+ CI_HDRC_CONTROLLER_STOPPED_EVENT);
+ _gadget_stop_activity(&ci->gadget);
+ pm_runtime_put_sync(&_gadget->dev);
+ usb_gadget_set_state(_gadget, USB_STATE_NOTATTACHED);
+ }
+}
+
static int ci_udc_vbus_session(struct usb_gadget *_gadget, int is_active)
{
struct ci_hdrc *ci = container_of(_gadget, struct ci_hdrc, gadget);
unsigned long flags;
- int gadget_ready = 0;
spin_lock_irqsave(&ci->lock, flags);
ci->vbus_active = is_active;
- if (ci->driver)
- gadget_ready = 1;
spin_unlock_irqrestore(&ci->lock, flags);
if (ci->usb_phy)
usb_phy_set_charger_state(ci->usb_phy, is_active ?
USB_CHARGER_PRESENT : USB_CHARGER_ABSENT);
- if (gadget_ready) {
- if (is_active) {
- pm_runtime_get_sync(&_gadget->dev);
- hw_device_reset(ci);
- hw_device_state(ci, ci->ep0out->qh.dma);
- usb_gadget_set_state(_gadget, USB_STATE_POWERED);
- usb_udc_vbus_handler(_gadget, true);
- } else {
- usb_udc_vbus_handler(_gadget, false);
- if (ci->driver)
- ci->driver->disconnect(&ci->gadget);
- hw_device_state(ci, 0);
- if (ci->platdata->notify_event)
- ci->platdata->notify_event(ci,
- CI_HDRC_CONTROLLER_STOPPED_EVENT);
- _gadget_stop_activity(&ci->gadget);
- pm_runtime_put_sync(&_gadget->dev);
- usb_gadget_set_state(_gadget, USB_STATE_NOTATTACHED);
- }
- }
+ if (ci->driver)
+ ci_hdrc_gadget_connect(_gadget, is_active);
return 0;
}
@@ -1612,7 +1623,7 @@ static int ci_udc_selfpowered(struct usb_gadget *_gadget, int is_on)
}
/* Change Data+ pullup status
- * this func is used by usb_gadget_connect/disconnet
+ * this func is used by usb_gadget_connect/disconnect
*/
static int ci_udc_pullup(struct usb_gadget *_gadget, int is_on)
{
@@ -1785,18 +1796,10 @@ static int ci_udc_start(struct usb_gadget *gadget,
return retval;
}
- pm_runtime_get_sync(&ci->gadget.dev);
- if (ci->vbus_active) {
- hw_device_reset(ci);
- } else {
+ if (ci->vbus_active)
+ ci_hdrc_gadget_connect(gadget, 1);
+ else
usb_udc_vbus_handler(&ci->gadget, false);
- pm_runtime_put_sync(&ci->gadget.dev);
- return retval;
- }
-
- retval = hw_device_state(ci, ci->ep0out->qh.dma);
- if (retval)
- pm_runtime_put_sync(&ci->gadget.dev);
return retval;
}
@@ -1826,6 +1829,7 @@ static int ci_udc_stop(struct usb_gadget *gadget)
unsigned long flags;
spin_lock_irqsave(&ci->lock, flags);
+ ci->driver = NULL;
if (ci->vbus_active) {
hw_device_state(ci, 0);
@@ -1838,7 +1842,6 @@ static int ci_udc_stop(struct usb_gadget *gadget)
pm_runtime_put(&ci->gadget.dev);
}
- ci->driver = NULL;
spin_unlock_irqrestore(&ci->lock, flags);
ci_udc_stop_for_otg_fsm(ci);
diff --git a/drivers/usb/chipidea/usbmisc_imx.c b/drivers/usb/chipidea/usbmisc_imx.c
index 078c1fdce493..e81e33c26e6c 100644
--- a/drivers/usb/chipidea/usbmisc_imx.c
+++ b/drivers/usb/chipidea/usbmisc_imx.c
@@ -100,6 +100,9 @@
#define MX7D_USB_VBUS_WAKEUP_SOURCE_BVALID MX7D_USB_VBUS_WAKEUP_SOURCE(2)
#define MX7D_USB_VBUS_WAKEUP_SOURCE_SESS_END MX7D_USB_VBUS_WAKEUP_SOURCE(3)
+#define MX6_USB_OTG_WAKEUP_BITS (MX6_BM_WAKEUP_ENABLE | MX6_BM_VBUS_WAKEUP | \
+ MX6_BM_ID_WAKEUP)
+
struct usbmisc_ops {
/* It's called once when probe a usb device */
int (*init)(struct imx_usbmisc_data *data);
@@ -330,14 +333,25 @@ static int usbmisc_imx53_init(struct imx_usbmisc_data *data)
return 0;
}
+static u32 usbmisc_wakeup_setting(struct imx_usbmisc_data *data)
+{
+ u32 wakeup_setting = MX6_USB_OTG_WAKEUP_BITS;
+
+ if (data->ext_id)
+ wakeup_setting &= ~MX6_BM_ID_WAKEUP;
+
+ if (data->ext_vbus)
+ wakeup_setting &= ~MX6_BM_VBUS_WAKEUP;
+
+ return wakeup_setting;
+}
+
static int usbmisc_imx6q_set_wakeup
(struct imx_usbmisc_data *data, bool enabled)
{
struct imx_usbmisc *usbmisc = dev_get_drvdata(data->dev);
unsigned long flags;
u32 val;
- u32 wakeup_setting = (MX6_BM_WAKEUP_ENABLE |
- MX6_BM_VBUS_WAKEUP | MX6_BM_ID_WAKEUP);
int ret = 0;
if (data->index > 3)
@@ -346,11 +360,12 @@ static int usbmisc_imx6q_set_wakeup
spin_lock_irqsave(&usbmisc->lock, flags);
val = readl(usbmisc->base + data->index * 4);
if (enabled) {
- val |= wakeup_setting;
+ val &= ~MX6_USB_OTG_WAKEUP_BITS;
+ val |= usbmisc_wakeup_setting(data);
} else {
if (val & MX6_BM_WAKEUP_INTR)
pr_debug("wakeup int at ci_hdrc.%d\n", data->index);
- val &= ~wakeup_setting;
+ val &= ~MX6_USB_OTG_WAKEUP_BITS;
}
writel(val, usbmisc->base + data->index * 4);
spin_unlock_irqrestore(&usbmisc->lock, flags);
@@ -547,17 +562,17 @@ static int usbmisc_imx7d_set_wakeup
struct imx_usbmisc *usbmisc = dev_get_drvdata(data->dev);
unsigned long flags;
u32 val;
- u32 wakeup_setting = (MX6_BM_WAKEUP_ENABLE |
- MX6_BM_VBUS_WAKEUP | MX6_BM_ID_WAKEUP);
spin_lock_irqsave(&usbmisc->lock, flags);
val = readl(usbmisc->base);
if (enabled) {
- writel(val | wakeup_setting, usbmisc->base);
+ val &= ~MX6_USB_OTG_WAKEUP_BITS;
+ val |= usbmisc_wakeup_setting(data);
+ writel(val, usbmisc->base);
} else {
if (val & MX6_BM_WAKEUP_INTR)
dev_dbg(data->dev, "wakeup int\n");
- writel(val & ~wakeup_setting, usbmisc->base);
+ writel(val & ~MX6_USB_OTG_WAKEUP_BITS, usbmisc->base);
}
spin_unlock_irqrestore(&usbmisc->lock, flags);
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index 70afb2ca1eab..e3db6fbeadef 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -734,7 +734,7 @@ static const struct file_operations wdm_fops = {
.release = wdm_release,
.poll = wdm_poll,
.unlocked_ioctl = wdm_ioctl,
- .compat_ioctl = wdm_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.llseek = noop_llseek,
};
diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c
index dcd7066ffba2..ffc9c6fdd7e1 100644
--- a/drivers/usb/class/usbtmc.c
+++ b/drivers/usb/class/usbtmc.c
@@ -2217,9 +2217,7 @@ static const struct file_operations fops = {
.release = usbtmc_release,
.flush = usbtmc_flush,
.unlocked_ioctl = usbtmc_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = usbtmc_ioctl,
-#endif
+ .compat_ioctl = compat_ptr_ioctl,
.fasync = usbtmc_fasync,
.poll = usbtmc_poll,
.llseek = default_llseek,
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index 1ac1095bfeac..5f40117e68e7 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -805,10 +805,10 @@ int usb_get_configuration(struct usb_device *dev)
{
struct device *ddev = &dev->dev;
int ncfg = dev->descriptor.bNumConfigurations;
- int result = -ENOMEM;
unsigned int cfgno, length;
unsigned char *bigbuffer;
struct usb_config_descriptor *desc;
+ int result;
if (ncfg > USB_MAXCONFIG) {
dev_warn(ddev, "too many configurations: %d, "
@@ -824,16 +824,16 @@ int usb_get_configuration(struct usb_device *dev)
length = ncfg * sizeof(struct usb_host_config);
dev->config = kzalloc(length, GFP_KERNEL);
if (!dev->config)
- goto err2;
+ return -ENOMEM;
length = ncfg * sizeof(char *);
dev->rawdescriptors = kzalloc(length, GFP_KERNEL);
if (!dev->rawdescriptors)
- goto err2;
+ return -ENOMEM;
desc = kmalloc(USB_DT_CONFIG_SIZE, GFP_KERNEL);
if (!desc)
- goto err2;
+ return -ENOMEM;
for (cfgno = 0; cfgno < ncfg; cfgno++) {
/* We grab just the first descriptor so we know how long
@@ -895,9 +895,7 @@ int usb_get_configuration(struct usb_device *dev)
err:
kfree(desc);
dev->descriptor.bNumConfigurations = cfgno;
-err2:
- if (result == -ENOMEM)
- dev_err(ddev, "out of memory\n");
+
return result;
}
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index 3f899552f6e3..12bb5722b420 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -764,8 +764,15 @@ static int claimintf(struct usb_dev_state *ps, unsigned int ifnum)
intf = usb_ifnum_to_if(dev, ifnum);
if (!intf)
err = -ENOENT;
- else
+ else {
+ unsigned int old_suppress;
+
+ /* suppress uevents while claiming interface */
+ old_suppress = dev_get_uevent_suppress(&intf->dev);
+ dev_set_uevent_suppress(&intf->dev, 1);
err = usb_driver_claim_interface(&usbfs_driver, intf, ps);
+ dev_set_uevent_suppress(&intf->dev, old_suppress);
+ }
if (err == 0)
set_bit(ifnum, &ps->ifclaimed);
return err;
@@ -785,7 +792,13 @@ static int releaseintf(struct usb_dev_state *ps, unsigned int ifnum)
if (!intf)
err = -ENOENT;
else if (test_and_clear_bit(ifnum, &ps->ifclaimed)) {
+ unsigned int old_suppress;
+
+ /* suppress uevents while releasing interface */
+ old_suppress = dev_get_uevent_suppress(&intf->dev);
+ dev_set_uevent_suppress(&intf->dev, 1);
usb_driver_release_interface(&usbfs_driver, intf);
+ dev_set_uevent_suppress(&intf->dev, old_suppress);
err = 0;
}
return err;
@@ -1550,10 +1563,10 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
uurb->buffer_length = le16_to_cpu(dr->wLength);
uurb->buffer += 8;
if ((dr->bRequestType & USB_DIR_IN) && uurb->buffer_length) {
- is_in = 1;
+ is_in = true;
uurb->endpoint |= USB_DIR_IN;
} else {
- is_in = 0;
+ is_in = false;
uurb->endpoint &= ~USB_DIR_IN;
}
if (is_in)
@@ -2685,18 +2698,6 @@ static long usbdev_ioctl(struct file *file, unsigned int cmd,
return ret;
}
-#ifdef CONFIG_COMPAT
-static long usbdev_compat_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- int ret;
-
- ret = usbdev_do_ioctl(file, cmd, compat_ptr(arg));
-
- return ret;
-}
-#endif
-
/* No kernel lock - fine */
static __poll_t usbdev_poll(struct file *file,
struct poll_table_struct *wait)
@@ -2720,9 +2721,7 @@ const struct file_operations usbdev_file_operations = {
.read = usbdev_read,
.poll = usbdev_poll,
.unlocked_ioctl = usbdev_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = usbdev_compat_ioctl,
-#endif
+ .compat_ioctl = compat_ptr_ioctl,
.mmap = usbdev_mmap,
.open = usbdev_open,
.release = usbdev_release,
diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
index 9e26b0143a59..9ae2a7a93df2 100644
--- a/drivers/usb/core/hcd-pci.c
+++ b/drivers/usb/core/hcd-pci.c
@@ -234,7 +234,7 @@ int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
/* UHCI */
int region;
- for (region = 0; region < PCI_ROM_RESOURCE; region++) {
+ for (region = 0; region < PCI_STD_NUM_BARS; region++) {
if (!(pci_resource_flags(dev, region) &
IORESOURCE_IO))
continue;
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index f225eaa98ff8..281568d464f9 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1410,10 +1410,7 @@ int usb_hcd_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
if (hcd->self.uses_pio_for_control)
return ret;
if (hcd_uses_dma(hcd)) {
- if (is_vmalloc_addr(urb->setup_packet)) {
- WARN_ONCE(1, "setup packet is not dma capable\n");
- return -EAGAIN;
- } else if (object_is_on_stack(urb->setup_packet)) {
+ if (object_is_on_stack(urb->setup_packet)) {
WARN_ONCE(1, "setup packet is on stack\n");
return -EAGAIN;
}
@@ -1479,9 +1476,6 @@ int usb_hcd_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
ret = -EAGAIN;
else
urb->transfer_flags |= URB_DMA_MAP_PAGE;
- } else if (is_vmalloc_addr(urb->transfer_buffer)) {
- WARN_ONCE(1, "transfer buffer not dma capable\n");
- ret = -EAGAIN;
} else if (object_is_on_stack(urb->transfer_buffer)) {
WARN_ONCE(1, "transfer buffer is on stack\n");
ret = -EAGAIN;
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 236313f41f4a..1709895387b9 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -4930,6 +4930,91 @@ hub_power_remaining(struct usb_hub *hub)
return remaining;
}
+
+static int descriptors_changed(struct usb_device *udev,
+ struct usb_device_descriptor *old_device_descriptor,
+ struct usb_host_bos *old_bos)
+{
+ int changed = 0;
+ unsigned index;
+ unsigned serial_len = 0;
+ unsigned len;
+ unsigned old_length;
+ int length;
+ char *buf;
+
+ if (memcmp(&udev->descriptor, old_device_descriptor,
+ sizeof(*old_device_descriptor)) != 0)
+ return 1;
+
+ if ((old_bos && !udev->bos) || (!old_bos && udev->bos))
+ return 1;
+ if (udev->bos) {
+ len = le16_to_cpu(udev->bos->desc->wTotalLength);
+ if (len != le16_to_cpu(old_bos->desc->wTotalLength))
+ return 1;
+ if (memcmp(udev->bos->desc, old_bos->desc, len))
+ return 1;
+ }
+
+ /* Since the idVendor, idProduct, and bcdDevice values in the
+ * device descriptor haven't changed, we will assume the
+ * Manufacturer and Product strings haven't changed either.
+ * But the SerialNumber string could be different (e.g., a
+ * different flash card of the same brand).
+ */
+ if (udev->serial)
+ serial_len = strlen(udev->serial) + 1;
+
+ len = serial_len;
+ for (index = 0; index < udev->descriptor.bNumConfigurations; index++) {
+ old_length = le16_to_cpu(udev->config[index].desc.wTotalLength);
+ len = max(len, old_length);
+ }
+
+ buf = kmalloc(len, GFP_NOIO);
+ if (!buf)
+ /* assume the worst */
+ return 1;
+
+ for (index = 0; index < udev->descriptor.bNumConfigurations; index++) {
+ old_length = le16_to_cpu(udev->config[index].desc.wTotalLength);
+ length = usb_get_descriptor(udev, USB_DT_CONFIG, index, buf,
+ old_length);
+ if (length != old_length) {
+ dev_dbg(&udev->dev, "config index %d, error %d\n",
+ index, length);
+ changed = 1;
+ break;
+ }
+ if (memcmp(buf, udev->rawdescriptors[index], old_length)
+ != 0) {
+ dev_dbg(&udev->dev, "config index %d changed (#%d)\n",
+ index,
+ ((struct usb_config_descriptor *) buf)->
+ bConfigurationValue);
+ changed = 1;
+ break;
+ }
+ }
+
+ if (!changed && serial_len) {
+ length = usb_string(udev, udev->descriptor.iSerialNumber,
+ buf, serial_len);
+ if (length + 1 != serial_len) {
+ dev_dbg(&udev->dev, "serial string error %d\n",
+ length);
+ changed = 1;
+ } else if (memcmp(buf, udev->serial, length) != 0) {
+ dev_dbg(&udev->dev, "serial string changed\n");
+ changed = 1;
+ }
+ }
+
+ kfree(buf);
+ return changed;
+}
+
static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
u16 portchange)
{
@@ -5167,7 +5252,9 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1,
{
struct usb_port *port_dev = hub->ports[port1 - 1];
struct usb_device *udev = port_dev->child;
+ struct usb_device_descriptor descriptor;
int status = -ENODEV;
+ int retval;
dev_dbg(&port_dev->dev, "status %04x, change %04x, %s\n", portstatus,
portchange, portspeed(hub, portstatus));
@@ -5188,7 +5275,30 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1,
if ((portstatus & USB_PORT_STAT_CONNECTION) && udev &&
udev->state != USB_STATE_NOTATTACHED) {
if (portstatus & USB_PORT_STAT_ENABLE) {
- status = 0; /* Nothing to do */
+ /*
+ * USB-3 connections are initialized automatically by
+ * the hostcontroller hardware. Therefore check for
+ * changed device descriptors before resuscitating the
+ * device.
+ */
+ descriptor = udev->descriptor;
+ retval = usb_get_device_descriptor(udev,
+ sizeof(udev->descriptor));
+ if (retval < 0) {
+ dev_dbg(&udev->dev,
+ "can't read device descriptor %d\n",
+ retval);
+ } else {
+ if (descriptors_changed(udev, &descriptor,
+ udev->bos)) {
+ dev_dbg(&udev->dev,
+ "device descriptor has changed\n");
+ /* for disconnect() calls */
+ udev->descriptor = descriptor;
+ } else {
+ status = 0; /* Nothing to do */
+ }
+ }
#ifdef CONFIG_PM
} else if (udev->state == USB_STATE_SUSPENDED &&
udev->persist_enabled) {
@@ -5550,90 +5660,6 @@ void usb_hub_cleanup(void)
usb_deregister(&hub_driver);
} /* usb_hub_cleanup() */
-static int descriptors_changed(struct usb_device *udev,
- struct usb_device_descriptor *old_device_descriptor,
- struct usb_host_bos *old_bos)
-{
- int changed = 0;
- unsigned index;
- unsigned serial_len = 0;
- unsigned len;
- unsigned old_length;
- int length;
- char *buf;
-
- if (memcmp(&udev->descriptor, old_device_descriptor,
- sizeof(*old_device_descriptor)) != 0)
- return 1;
-
- if ((old_bos && !udev->bos) || (!old_bos && udev->bos))
- return 1;
- if (udev->bos) {
- len = le16_to_cpu(udev->bos->desc->wTotalLength);
- if (len != le16_to_cpu(old_bos->desc->wTotalLength))
- return 1;
- if (memcmp(udev->bos->desc, old_bos->desc, len))
- return 1;
- }
-
- /* Since the idVendor, idProduct, and bcdDevice values in the
- * device descriptor haven't changed, we will assume the
- * Manufacturer and Product strings haven't changed either.
- * But the SerialNumber string could be different (e.g., a
- * different flash card of the same brand).
- */
- if (udev->serial)
- serial_len = strlen(udev->serial) + 1;
-
- len = serial_len;
- for (index = 0; index < udev->descriptor.bNumConfigurations; index++) {
- old_length = le16_to_cpu(udev->config[index].desc.wTotalLength);
- len = max(len, old_length);
- }
-
- buf = kmalloc(len, GFP_NOIO);
- if (!buf)
- /* assume the worst */
- return 1;
-
- for (index = 0; index < udev->descriptor.bNumConfigurations; index++) {
- old_length = le16_to_cpu(udev->config[index].desc.wTotalLength);
- length = usb_get_descriptor(udev, USB_DT_CONFIG, index, buf,
- old_length);
- if (length != old_length) {
- dev_dbg(&udev->dev, "config index %d, error %d\n",
- index, length);
- changed = 1;
- break;
- }
- if (memcmp(buf, udev->rawdescriptors[index], old_length)
- != 0) {
- dev_dbg(&udev->dev, "config index %d changed (#%d)\n",
- index,
- ((struct usb_config_descriptor *) buf)->
- bConfigurationValue);
- changed = 1;
- break;
- }
- }
-
- if (!changed && serial_len) {
- length = usb_string(udev, udev->descriptor.iSerialNumber,
- buf, serial_len);
- if (length + 1 != serial_len) {
- dev_dbg(&udev->dev, "serial string error %d\n",
- length);
- changed = 1;
- } else if (memcmp(buf, udev->serial, length) != 0) {
- dev_dbg(&udev->dev, "serial string changed\n");
- changed = 1;
- }
- }
-
- kfree(buf);
- return changed;
-}
-
/**
* usb_reset_and_verify_device - perform a USB port reset to reinitialize a device
* @udev: device to reset (not in SUSPENDED or NOTATTACHED state)
@@ -5814,7 +5840,7 @@ re_enumerate_no_bos:
/**
* usb_reset_device - warn interface drivers and perform a USB port reset
- * @udev: device to reset (not in SUSPENDED or NOTATTACHED state)
+ * @udev: device to reset (not in NOTATTACHED state)
*
* Warns all drivers bound to registered interfaces (using their pre_reset
* method), performs the port reset, and then lets the drivers know that
@@ -5842,8 +5868,7 @@ int usb_reset_device(struct usb_device *udev)
struct usb_host_config *config = udev->actconfig;
struct usb_hub *hub = usb_hub_to_struct_hub(udev->parent);
- if (udev->state == USB_STATE_NOTATTACHED ||
- udev->state == USB_STATE_SUSPENDED) {
+ if (udev->state == USB_STATE_NOTATTACHED) {
dev_dbg(&udev->dev, "device reset not allowed in state %d\n",
udev->state);
return -EINVAL;
diff --git a/drivers/usb/dwc2/core.c b/drivers/usb/dwc2/core.c
index 8e41d70fd298..78a4925aa118 100644
--- a/drivers/usb/dwc2/core.c
+++ b/drivers/usb/dwc2/core.c
@@ -524,7 +524,7 @@ int dwc2_core_reset(struct dwc2_hsotg *hsotg, bool skip_wait)
greset |= GRSTCTL_CSFTRST;
dwc2_writel(hsotg, greset, GRSTCTL);
- if (dwc2_hsotg_wait_bit_clear(hsotg, GRSTCTL, GRSTCTL_CSFTRST, 50)) {
+ if (dwc2_hsotg_wait_bit_clear(hsotg, GRSTCTL, GRSTCTL_CSFTRST, 10000)) {
dev_warn(hsotg->dev, "%s: HANG! Soft Reset timeout GRSTCTL GRSTCTL_CSFTRST\n",
__func__);
return -EBUSY;
diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h
index d08d070a0fb6..968e03b89d04 100644
--- a/drivers/usb/dwc2/core.h
+++ b/drivers/usb/dwc2/core.h
@@ -134,7 +134,7 @@ struct dwc2_hsotg_req;
* @target_frame: Targeted frame num to setup next ISOC transfer
* @frame_overrun: Indicates SOF number overrun in DSTS
*
- * This is the driver's state for each registered enpoint, allowing it
+ * This is the driver's state for each registered endpoint, allowing it
* to keep track of transactions that need doing. Each endpoint has a
* lock to protect the state, to try and avoid using an overall lock
* for the host controller as much as possible.
diff --git a/drivers/usb/dwc2/debugfs.c b/drivers/usb/dwc2/debugfs.c
index 7f62f4cdc265..b8f2790abf91 100644
--- a/drivers/usb/dwc2/debugfs.c
+++ b/drivers/usb/dwc2/debugfs.c
@@ -770,7 +770,7 @@ int dwc2_debugfs_init(struct dwc2_hsotg *hsotg)
int ret;
struct dentry *root;
- root = debugfs_create_dir(dev_name(hsotg->dev), NULL);
+ root = debugfs_create_dir(dev_name(hsotg->dev), usb_debug_root);
hsotg->debug_root = root;
debugfs_create_file("params", 0444, root, hsotg, &params_fops);
diff --git a/drivers/usb/dwc3/Kconfig b/drivers/usb/dwc3/Kconfig
index 556a876c7896..206caa0ea1c6 100644
--- a/drivers/usb/dwc3/Kconfig
+++ b/drivers/usb/dwc3/Kconfig
@@ -97,24 +97,24 @@ config USB_DWC3_KEYSTONE
Say 'Y' or 'M' here if you have one such device
config USB_DWC3_MESON_G12A
- tristate "Amlogic Meson G12A Platforms"
- depends on OF && COMMON_CLK
- depends on ARCH_MESON || COMPILE_TEST
- default USB_DWC3
- select USB_ROLE_SWITCH
+ tristate "Amlogic Meson G12A Platforms"
+ depends on OF && COMMON_CLK
+ depends on ARCH_MESON || COMPILE_TEST
+ default USB_DWC3
+ select USB_ROLE_SWITCH
select REGMAP_MMIO
- help
- Support USB2/3 functionality in Amlogic G12A platforms.
- Say 'Y' or 'M' if you have one such device.
+ help
+ Support USB2/3 functionality in Amlogic G12A platforms.
+ Say 'Y' or 'M' if you have one such device.
config USB_DWC3_OF_SIMPLE
- tristate "Generic OF Simple Glue Layer"
- depends on OF && COMMON_CLK
- default USB_DWC3
- help
- Support USB2/3 functionality in simple SoC integrations.
- Currently supports Xilinx and Qualcomm DWC USB3 IP.
- Say 'Y' or 'M' if you have one such device.
+ tristate "Generic OF Simple Glue Layer"
+ depends on OF && COMMON_CLK
+ default USB_DWC3
+ help
+ Support USB2/3 functionality in simple SoC integrations.
+ Currently supports Xilinx and Qualcomm DWC USB3 IP.
+ Say 'Y' or 'M' if you have one such device.
config USB_DWC3_ST
tristate "STMicroelectronics Platforms"
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 97d6ae3c4df2..f561c6c9e8a9 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -566,8 +566,11 @@ static int dwc3_core_ulpi_init(struct dwc3 *dwc)
*/
static int dwc3_phy_setup(struct dwc3 *dwc)
{
+ unsigned int hw_mode;
u32 reg;
+ hw_mode = DWC3_GHWPARAMS0_MODE(dwc->hwparams.hwparams0);
+
reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0));
/*
@@ -585,6 +588,14 @@ static int dwc3_phy_setup(struct dwc3 *dwc)
if (dwc->revision > DWC3_REVISION_194A)
reg |= DWC3_GUSB3PIPECTL_SUSPHY;
+ /*
+ * For DRD controllers, GUSB3PIPECTL.SUSPENDENABLE must be cleared after
+ * power-on reset, and it can be set after core initialization, which is
+ * after device soft-reset during initialization.
+ */
+ if (hw_mode == DWC3_GHWPARAMS0_MODE_DRD)
+ reg &= ~DWC3_GUSB3PIPECTL_SUSPHY;
+
if (dwc->u2ss_inp3_quirk)
reg |= DWC3_GUSB3PIPECTL_U2SSINP3OK;
@@ -668,6 +679,14 @@ static int dwc3_phy_setup(struct dwc3 *dwc)
if (dwc->revision > DWC3_REVISION_194A)
reg |= DWC3_GUSB2PHYCFG_SUSPHY;
+ /*
+ * For DRD controllers, GUSB2PHYCFG.SUSPHY must be cleared after
+ * power-on reset, and it can be set after core initialization, which is
+ * after device soft-reset during initialization.
+ */
+ if (hw_mode == DWC3_GHWPARAMS0_MODE_DRD)
+ reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
+
if (dwc->dis_u2_susphy_quirk)
reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
@@ -902,9 +921,12 @@ static void dwc3_set_incr_burst_type(struct dwc3 *dwc)
*/
static int dwc3_core_init(struct dwc3 *dwc)
{
+ unsigned int hw_mode;
u32 reg;
int ret;
+ hw_mode = DWC3_GHWPARAMS0_MODE(dwc->hwparams.hwparams0);
+
/*
* Write Linux Version Code to our GUID register so it's easy to figure
* out which kernel version a bug was found.
@@ -940,6 +962,21 @@ static int dwc3_core_init(struct dwc3 *dwc)
if (ret)
goto err0a;
+ if (hw_mode == DWC3_GHWPARAMS0_MODE_DRD &&
+ dwc->revision > DWC3_REVISION_194A) {
+ if (!dwc->dis_u3_susphy_quirk) {
+ reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0));
+ reg |= DWC3_GUSB3PIPECTL_SUSPHY;
+ dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), reg);
+ }
+
+ if (!dwc->dis_u2_susphy_quirk) {
+ reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
+ reg |= DWC3_GUSB2PHYCFG_SUSPHY;
+ dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
+ }
+ }
+
dwc3_core_setup_global_control(dwc);
dwc3_core_num_eps(dwc);
diff --git a/drivers/usb/dwc3/debug.h b/drivers/usb/dwc3/debug.h
index 9baabed87d61..e56beb9d1e36 100644
--- a/drivers/usb/dwc3/debug.h
+++ b/drivers/usb/dwc3/debug.h
@@ -112,7 +112,7 @@ dwc3_gadget_link_string(enum dwc3_link_state link_state)
case DWC3_LINK_STATE_RESUME:
return "Resume";
default:
- return "UNKNOWN link state\n";
+ return "UNKNOWN link state";
}
}
@@ -141,7 +141,7 @@ dwc3_gadget_hs_link_string(enum dwc3_link_state link_state)
case DWC3_LINK_STATE_RESUME:
return "Resume";
default:
- return "UNKNOWN link state\n";
+ return "UNKNOWN link state";
}
}
diff --git a/drivers/usb/dwc3/debugfs.c b/drivers/usb/dwc3/debugfs.c
index 1c792710348f..4fe8b1e1485c 100644
--- a/drivers/usb/dwc3/debugfs.c
+++ b/drivers/usb/dwc3/debugfs.c
@@ -916,7 +916,7 @@ void dwc3_debugfs_init(struct dwc3 *dwc)
dwc->regset->nregs = ARRAY_SIZE(dwc3_regs);
dwc->regset->base = dwc->regs - DWC3_GLOBALS_REGS_START;
- root = debugfs_create_dir(dev_name(dwc->dev), NULL);
+ root = debugfs_create_dir(dev_name(dwc->dev), usb_debug_root);
dwc->root = root;
debugfs_create_regset32("regdump", S_IRUGO, root, dwc->regset);
diff --git a/drivers/usb/dwc3/dwc3-of-simple.c b/drivers/usb/dwc3/dwc3-of-simple.c
index bdac3e7d7b18..e64754be47b4 100644
--- a/drivers/usb/dwc3/dwc3-of-simple.c
+++ b/drivers/usb/dwc3/dwc3-of-simple.c
@@ -110,12 +110,9 @@ err_resetc_put:
return ret;
}
-static int dwc3_of_simple_remove(struct platform_device *pdev)
+static void __dwc3_of_simple_teardown(struct dwc3_of_simple *simple)
{
- struct dwc3_of_simple *simple = platform_get_drvdata(pdev);
- struct device *dev = &pdev->dev;
-
- of_platform_depopulate(dev);
+ of_platform_depopulate(simple->dev);
clk_bulk_disable_unprepare(simple->num_clocks, simple->clks);
clk_bulk_put_all(simple->num_clocks, simple->clks);
@@ -126,13 +123,27 @@ static int dwc3_of_simple_remove(struct platform_device *pdev)
reset_control_put(simple->resets);
- pm_runtime_disable(dev);
- pm_runtime_put_noidle(dev);
- pm_runtime_set_suspended(dev);
+ pm_runtime_disable(simple->dev);
+ pm_runtime_put_noidle(simple->dev);
+ pm_runtime_set_suspended(simple->dev);
+}
+
+static int dwc3_of_simple_remove(struct platform_device *pdev)
+{
+ struct dwc3_of_simple *simple = platform_get_drvdata(pdev);
+
+ __dwc3_of_simple_teardown(simple);
return 0;
}
+static void dwc3_of_simple_shutdown(struct platform_device *pdev)
+{
+ struct dwc3_of_simple *simple = platform_get_drvdata(pdev);
+
+ __dwc3_of_simple_teardown(simple);
+}
+
static int __maybe_unused dwc3_of_simple_runtime_suspend(struct device *dev)
{
struct dwc3_of_simple *simple = dev_get_drvdata(dev);
@@ -190,6 +201,7 @@ MODULE_DEVICE_TABLE(of, of_dwc3_simple_match);
static struct platform_driver dwc3_of_simple_driver = {
.probe = dwc3_of_simple_probe,
.remove = dwc3_of_simple_remove,
+ .shutdown = dwc3_of_simple_shutdown,
.driver = {
.name = "dwc3-of-simple",
.of_match_table = of_dwc3_simple_match,
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index 5ec54b69c29c..3b4f67000315 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -794,9 +794,9 @@ static int set_config(struct usb_composite_dev *cdev,
result = 0;
}
- INFO(cdev, "%s config #%d: %s\n",
- usb_speed_string(gadget->speed),
- number, c ? c->label : "unconfigured");
+ DBG(cdev, "%s config #%d: %s\n",
+ usb_speed_string(gadget->speed),
+ number, c ? c->label : "unconfigured");
if (!c)
goto done;
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
index 33852c2b29d1..ab9ac48a751a 100644
--- a/drivers/usb/gadget/configfs.c
+++ b/drivers/usb/gadget/configfs.c
@@ -1544,6 +1544,7 @@ static struct config_group *gadgets_make(
gi->composite.resume = NULL;
gi->composite.max_speed = USB_SPEED_SUPER;
+ spin_lock_init(&gi->spinlock);
mutex_init(&gi->lock);
INIT_LIST_HEAD(&gi->string_list);
INIT_LIST_HEAD(&gi->available_func);
diff --git a/drivers/usb/gadget/function/f_acm.c b/drivers/usb/gadget/function/f_acm.c
index 9fc98de83624..7c152c28b26c 100644
--- a/drivers/usb/gadget/function/f_acm.c
+++ b/drivers/usb/gadget/function/f_acm.c
@@ -771,6 +771,24 @@ static struct configfs_item_operations acm_item_ops = {
.release = acm_attr_release,
};
+#ifdef CONFIG_U_SERIAL_CONSOLE
+
+static ssize_t f_acm_console_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ return gserial_set_console(to_f_serial_opts(item)->port_num,
+ page, count);
+}
+
+static ssize_t f_acm_console_show(struct config_item *item, char *page)
+{
+ return gserial_get_console(to_f_serial_opts(item)->port_num, page);
+}
+
+CONFIGFS_ATTR(f_acm_, console);
+
+#endif /* CONFIG_U_SERIAL_CONSOLE */
+
static ssize_t f_acm_port_num_show(struct config_item *item, char *page)
{
return sprintf(page, "%u\n", to_f_serial_opts(item)->port_num);
@@ -779,6 +797,9 @@ static ssize_t f_acm_port_num_show(struct config_item *item, char *page)
CONFIGFS_ATTR_RO(f_acm_, port_num);
static struct configfs_attribute *acm_attrs[] = {
+#ifdef CONFIG_U_SERIAL_CONSOLE
+ &f_acm_attr_console,
+#endif
&f_acm_attr_port_num,
NULL,
};
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 59d9d512dcda..ce1d0235969c 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -1352,14 +1352,6 @@ static long ffs_epfile_ioctl(struct file *file, unsigned code,
return ret;
}
-#ifdef CONFIG_COMPAT
-static long ffs_epfile_compat_ioctl(struct file *file, unsigned code,
- unsigned long value)
-{
- return ffs_epfile_ioctl(file, code, value);
-}
-#endif
-
static const struct file_operations ffs_epfile_operations = {
.llseek = no_llseek,
@@ -1368,9 +1360,7 @@ static const struct file_operations ffs_epfile_operations = {
.read_iter = ffs_epfile_read_iter,
.release = ffs_epfile_release,
.unlocked_ioctl = ffs_epfile_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = ffs_epfile_compat_ioctl,
-#endif
+ .compat_ioctl = compat_ptr_ioctl,
};
diff --git a/drivers/usb/gadget/function/f_obex.c b/drivers/usb/gadget/function/f_obex.c
index 55b7f57d2dc7..ab26d84ed95e 100644
--- a/drivers/usb/gadget/function/f_obex.c
+++ b/drivers/usb/gadget/function/f_obex.c
@@ -432,7 +432,7 @@ static struct usb_function_instance *obex_alloc_inst(void)
return ERR_PTR(-ENOMEM);
opts->func_inst.free_func_inst = obex_free_inst;
- ret = gserial_alloc_line(&opts->port_num);
+ ret = gserial_alloc_line_no_console(&opts->port_num);
if (ret) {
kfree(opts);
return ERR_PTR(ret);
diff --git a/drivers/usb/gadget/function/f_serial.c b/drivers/usb/gadget/function/f_serial.c
index c860f30a0ea2..1406255d0865 100644
--- a/drivers/usb/gadget/function/f_serial.c
+++ b/drivers/usb/gadget/function/f_serial.c
@@ -266,6 +266,24 @@ static struct configfs_item_operations serial_item_ops = {
.release = serial_attr_release,
};
+#ifdef CONFIG_U_SERIAL_CONSOLE
+
+static ssize_t f_serial_console_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ return gserial_set_console(to_f_serial_opts(item)->port_num,
+ page, count);
+}
+
+static ssize_t f_serial_console_show(struct config_item *item, char *page)
+{
+ return gserial_get_console(to_f_serial_opts(item)->port_num, page);
+}
+
+CONFIGFS_ATTR(f_serial_, console);
+
+#endif /* CONFIG_U_SERIAL_CONSOLE */
+
static ssize_t f_serial_port_num_show(struct config_item *item, char *page)
{
return sprintf(page, "%u\n", to_f_serial_opts(item)->port_num);
@@ -274,6 +292,9 @@ static ssize_t f_serial_port_num_show(struct config_item *item, char *page)
CONFIGFS_ATTR_RO(f_serial_, port_num);
static struct configfs_attribute *acm_attrs[] = {
+#ifdef CONFIG_U_SERIAL_CONSOLE
+ &f_serial_attr_console,
+#endif
&f_serial_attr_port_num,
NULL,
};
diff --git a/drivers/usb/gadget/function/f_tcm.c b/drivers/usb/gadget/function/f_tcm.c
index 7f01f78b1d23..36504931b2d1 100644
--- a/drivers/usb/gadget/function/f_tcm.c
+++ b/drivers/usb/gadget/function/f_tcm.c
@@ -846,7 +846,7 @@ static void uasp_set_alt(struct f_uas *fu)
fu->flags = USBG_IS_UAS;
- if (gadget->speed == USB_SPEED_SUPER)
+ if (gadget->speed >= USB_SPEED_SUPER)
fu->flags |= USBG_USE_STREAMS;
config_ep_by_speed(gadget, f, fu->ep_in);
@@ -2093,6 +2093,16 @@ static void tcm_delayed_set_alt(struct work_struct *wq)
usb_composite_setup_continue(fu->function.config->cdev);
}
+static int tcm_get_alt(struct usb_function *f, unsigned intf)
+{
+ if (intf == bot_intf_desc.bInterfaceNumber)
+ return USB_G_ALT_INT_BBB;
+ if (intf == uasp_intf_desc.bInterfaceNumber)
+ return USB_G_ALT_INT_UAS;
+
+ return -EOPNOTSUPP;
+}
+
static int tcm_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
{
struct f_uas *fu = to_f_uas(f);
@@ -2300,6 +2310,7 @@ static struct usb_function *tcm_alloc(struct usb_function_instance *fi)
fu->function.bind = tcm_bind;
fu->function.unbind = tcm_unbind;
fu->function.set_alt = tcm_set_alt;
+ fu->function.get_alt = tcm_get_alt;
fu->function.setup = tcm_setup;
fu->function.disable = tcm_disable;
fu->function.free_func = tcm_free;
diff --git a/drivers/usb/gadget/function/u_audio.c b/drivers/usb/gadget/function/u_audio.c
index 56906d15fb55..7ec6a996af26 100644
--- a/drivers/usb/gadget/function/u_audio.c
+++ b/drivers/usb/gadget/function/u_audio.c
@@ -585,7 +585,7 @@ int g_audio_setup(struct g_audio *g_audio, const char *pcm_name,
sprintf(card->longname, "%s %i", card_name, card->dev->id);
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_CONTINUOUS,
- snd_dma_continuous_data(GFP_KERNEL), 0, BUFF_SIZE_MAX);
+ NULL, 0, BUFF_SIZE_MAX);
err = snd_card_register(card);
diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c
index 65f634ec7fc2..f986e5c55974 100644
--- a/drivers/usb/gadget/function/u_serial.c
+++ b/drivers/usb/gadget/function/u_serial.c
@@ -82,14 +82,13 @@
#define GS_CONSOLE_BUF_SIZE 8192
/* console info */
-struct gscons_info {
- struct gs_port *port;
- struct task_struct *console_thread;
- struct kfifo con_buf;
- /* protect the buf and busy flag */
- spinlock_t con_lock;
- int req_busy;
- struct usb_request *console_req;
+struct gs_console {
+ struct console console;
+ struct work_struct work;
+ spinlock_t lock;
+ struct usb_request *req;
+ struct kfifo buf;
+ size_t missed;
};
/*
@@ -101,8 +100,10 @@ struct gs_port {
spinlock_t port_lock; /* guard port_* access */
struct gserial *port_usb;
+#ifdef CONFIG_U_SERIAL_CONSOLE
+ struct gs_console *console;
+#endif
- bool openclose; /* open/close in progress */
u8 port_num;
struct list_head read_pool;
@@ -586,82 +587,45 @@ static int gs_open(struct tty_struct *tty, struct file *file)
{
int port_num = tty->index;
struct gs_port *port;
- int status;
-
- do {
- mutex_lock(&ports[port_num].lock);
- port = ports[port_num].port;
- if (!port)
- status = -ENODEV;
- else {
- spin_lock_irq(&port->port_lock);
-
- /* already open? Great. */
- if (port->port.count) {
- status = 0;
- port->port.count++;
-
- /* currently opening/closing? wait ... */
- } else if (port->openclose) {
- status = -EBUSY;
-
- /* ... else we do the work */
- } else {
- status = -EAGAIN;
- port->openclose = true;
- }
- spin_unlock_irq(&port->port_lock);
- }
- mutex_unlock(&ports[port_num].lock);
+ int status = 0;
- switch (status) {
- default:
- /* fully handled */
- return status;
- case -EAGAIN:
- /* must do the work */
- break;
- case -EBUSY:
- /* wait for EAGAIN task to finish */
- msleep(1);
- /* REVISIT could have a waitchannel here, if
- * concurrent open performance is important
- */
- break;
- }
- } while (status != -EAGAIN);
+ mutex_lock(&ports[port_num].lock);
+ port = ports[port_num].port;
+ if (!port) {
+ status = -ENODEV;
+ goto out;
+ }
- /* Do the "real open" */
spin_lock_irq(&port->port_lock);
/* allocate circular buffer on first open */
if (!kfifo_initialized(&port->port_write_buf)) {
spin_unlock_irq(&port->port_lock);
+
+ /*
+ * portmaster's mutex still protects from simultaneous open(),
+ * and close() can't happen, yet.
+ */
+
status = kfifo_alloc(&port->port_write_buf,
WRITE_BUF_SIZE, GFP_KERNEL);
- spin_lock_irq(&port->port_lock);
-
if (status) {
pr_debug("gs_open: ttyGS%d (%p,%p) no buffer\n",
- port->port_num, tty, file);
- port->openclose = false;
- goto exit_unlock_port;
+ port_num, tty, file);
+ goto out;
}
- }
- /* REVISIT if REMOVED (ports[].port NULL), abort the open
- * to let rmmod work faster (but this way isn't wrong).
- */
+ spin_lock_irq(&port->port_lock);
+ }
- /* REVISIT maybe wait for "carrier detect" */
+ /* already open? Great. */
+ if (port->port.count++)
+ goto exit_unlock_port;
tty->driver_data = port;
port->port.tty = tty;
- port->port.count = 1;
- port->openclose = false;
-
/* if connected, start the I/O stream */
if (port->port_usb) {
struct gserial *gser = port->port_usb;
@@ -675,20 +639,21 @@ static int gs_open(struct tty_struct *tty, struct file *file)
pr_debug("gs_open: ttyGS%d (%p,%p)\n", port->port_num, tty, file);
- status = 0;
-
exit_unlock_port:
spin_unlock_irq(&port->port_lock);
+out:
+ mutex_unlock(&ports[port_num].lock);
return status;
}
-static int gs_writes_finished(struct gs_port *p)
+static int gs_close_flush_done(struct gs_port *p)
{
int cond;
- /* return true on disconnect or empty buffer */
+ /* return true on disconnect or empty buffer or if raced with open() */
spin_lock_irq(&p->port_lock);
- cond = (p->port_usb == NULL) || !kfifo_len(&p->port_write_buf);
+ cond = p->port_usb == NULL || !kfifo_len(&p->port_write_buf) ||
+ p->port.count > 1;
spin_unlock_irq(&p->port_lock);
return cond;
@@ -702,6 +667,7 @@ static void gs_close(struct tty_struct *tty, struct file *file)
spin_lock_irq(&port->port_lock);
if (port->port.count != 1) {
+raced_with_open:
if (port->port.count == 0)
WARN_ON(1);
else
@@ -711,12 +677,6 @@ static void gs_close(struct tty_struct *tty, struct file *file)
pr_debug("gs_close: ttyGS%d (%p,%p) ...\n", port->port_num, tty, file);
- /* mark port as closing but in use; we can drop port lock
- * and sleep if necessary
- */
- port->openclose = true;
- port->port.count = 0;
-
gser = port->port_usb;
if (gser && gser->disconnect)
gser->disconnect(gser);
@@ -727,9 +687,13 @@ static void gs_close(struct tty_struct *tty, struct file *file)
if (kfifo_len(&port->port_write_buf) > 0 && gser) {
spin_unlock_irq(&port->port_lock);
wait_event_interruptible_timeout(port->drain_wait,
- gs_writes_finished(port),
+ gs_close_flush_done(port),
GS_CLOSE_TIMEOUT * HZ);
spin_lock_irq(&port->port_lock);
+
+ if (port->port.count != 1)
+ goto raced_with_open;
+
gser = port->port_usb;
}
@@ -742,10 +706,9 @@ static void gs_close(struct tty_struct *tty, struct file *file)
else
kfifo_reset(&port->port_write_buf);
+ port->port.count = 0;
port->port.tty = NULL;
- port->openclose = false;
-
pr_debug("gs_close: ttyGS%d (%p,%p) done!\n",
port->port_num, tty, file);
@@ -889,36 +852,9 @@ static struct tty_driver *gs_tty_driver;
#ifdef CONFIG_U_SERIAL_CONSOLE
-static struct gscons_info gscons_info;
-static struct console gserial_cons;
-
-static struct usb_request *gs_request_new(struct usb_ep *ep)
-{
- struct usb_request *req = usb_ep_alloc_request(ep, GFP_ATOMIC);
- if (!req)
- return NULL;
-
- req->buf = kmalloc(ep->maxpacket, GFP_ATOMIC);
- if (!req->buf) {
- usb_ep_free_request(ep, req);
- return NULL;
- }
-
- return req;
-}
-
-static void gs_request_free(struct usb_request *req, struct usb_ep *ep)
+static void gs_console_complete_out(struct usb_ep *ep, struct usb_request *req)
{
- if (!req)
- return;
-
- kfree(req->buf);
- usb_ep_free_request(ep, req);
-}
-
-static void gs_complete_out(struct usb_ep *ep, struct usb_request *req)
-{
- struct gscons_info *info = &gscons_info;
+ struct gs_console *cons = req->context;
switch (req->status) {
default:
@@ -927,12 +863,12 @@ static void gs_complete_out(struct usb_ep *ep, struct usb_request *req)
/* fall through */
case 0:
/* normal completion */
- spin_lock(&info->con_lock);
- info->req_busy = 0;
- spin_unlock(&info->con_lock);
-
- wake_up_process(info->console_thread);
+ spin_lock(&cons->lock);
+ req->length = 0;
+ schedule_work(&cons->work);
+ spin_unlock(&cons->lock);
break;
+ case -ECONNRESET:
case -ESHUTDOWN:
/* disconnect */
pr_vdebug("%s: %s shutdown\n", __func__, ep->name);
@@ -940,190 +876,250 @@ static void gs_complete_out(struct usb_ep *ep, struct usb_request *req)
}
}
-static int gs_console_connect(int port_num)
+static void __gs_console_push(struct gs_console *cons)
{
- struct gscons_info *info = &gscons_info;
- struct gs_port *port;
+ struct usb_request *req = cons->req;
struct usb_ep *ep;
+ size_t size;
- if (port_num != gserial_cons.index) {
- pr_err("%s: port num [%d] is not support console\n",
- __func__, port_num);
- return -ENXIO;
- }
+ if (!req)
+ return; /* disconnected */
- port = ports[port_num].port;
- ep = port->port_usb->in;
- if (!info->console_req) {
- info->console_req = gs_request_new(ep);
- if (!info->console_req)
- return -ENOMEM;
- info->console_req->complete = gs_complete_out;
+ if (req->length)
+ return; /* busy */
+
+ ep = cons->console.data;
+ size = kfifo_out(&cons->buf, req->buf, ep->maxpacket);
+ if (!size)
+ return;
+
+ if (cons->missed && ep->maxpacket >= 64) {
+ char buf[64];
+ size_t len;
+
+ len = sprintf(buf, "\n[missed %zu bytes]\n", cons->missed);
+ kfifo_in(&cons->buf, buf, len);
+ cons->missed = 0;
}
- info->port = port;
- spin_lock(&info->con_lock);
- info->req_busy = 0;
- spin_unlock(&info->con_lock);
- pr_vdebug("port[%d] console connect!\n", port_num);
- return 0;
+ req->length = size;
+ if (usb_ep_queue(ep, req, GFP_ATOMIC))
+ req->length = 0;
}
-static void gs_console_disconnect(struct usb_ep *ep)
+static void gs_console_work(struct work_struct *work)
{
- struct gscons_info *info = &gscons_info;
- struct usb_request *req = info->console_req;
+ struct gs_console *cons = container_of(work, struct gs_console, work);
+
+ spin_lock_irq(&cons->lock);
- gs_request_free(req, ep);
- info->console_req = NULL;
+ __gs_console_push(cons);
+
+ spin_unlock_irq(&cons->lock);
}
-static int gs_console_thread(void *data)
+static void gs_console_write(struct console *co,
+ const char *buf, unsigned count)
{
- struct gscons_info *info = &gscons_info;
- struct gs_port *port;
+ struct gs_console *cons = container_of(co, struct gs_console, console);
+ unsigned long flags;
+ size_t n;
+
+ spin_lock_irqsave(&cons->lock, flags);
+
+ n = kfifo_in(&cons->buf, buf, count);
+ if (n < count)
+ cons->missed += count - n;
+
+ if (cons->req && !cons->req->length)
+ schedule_work(&cons->work);
+
+ spin_unlock_irqrestore(&cons->lock, flags);
+}
+
+static struct tty_driver *gs_console_device(struct console *co, int *index)
+{
+ *index = co->index;
+ return gs_tty_driver;
+}
+
+static int gs_console_connect(struct gs_port *port)
+{
+ struct gs_console *cons = port->console;
struct usb_request *req;
struct usb_ep *ep;
- int xfer, ret, count, size;
-
- do {
- port = info->port;
- set_current_state(TASK_INTERRUPTIBLE);
- if (!port || !port->port_usb
- || !port->port_usb->in || !info->console_req)
- goto sched;
-
- req = info->console_req;
- ep = port->port_usb->in;
-
- spin_lock_irq(&info->con_lock);
- count = kfifo_len(&info->con_buf);
- size = ep->maxpacket;
-
- if (count > 0 && !info->req_busy) {
- set_current_state(TASK_RUNNING);
- if (count < size)
- size = count;
-
- xfer = kfifo_out(&info->con_buf, req->buf, size);
- req->length = xfer;
-
- spin_unlock(&info->con_lock);
- ret = usb_ep_queue(ep, req, GFP_ATOMIC);
- spin_lock(&info->con_lock);
- if (ret < 0)
- info->req_busy = 0;
- else
- info->req_busy = 1;
-
- spin_unlock_irq(&info->con_lock);
- } else {
- spin_unlock_irq(&info->con_lock);
-sched:
- if (kthread_should_stop()) {
- set_current_state(TASK_RUNNING);
- break;
- }
- schedule();
- }
- } while (1);
+
+ if (!cons)
+ return 0;
+
+ ep = port->port_usb->in;
+ req = gs_alloc_req(ep, ep->maxpacket, GFP_ATOMIC);
+ if (!req)
+ return -ENOMEM;
+ req->complete = gs_console_complete_out;
+ req->context = cons;
+ req->length = 0;
+
+ spin_lock(&cons->lock);
+ cons->req = req;
+ cons->console.data = ep;
+ spin_unlock(&cons->lock);
+
+ pr_debug("ttyGS%d: console connected!\n", port->port_num);
+
+ schedule_work(&cons->work);
return 0;
}
-static int gs_console_setup(struct console *co, char *options)
+static void gs_console_disconnect(struct gs_port *port)
{
- struct gscons_info *info = &gscons_info;
- int status;
+ struct gs_console *cons = port->console;
+ struct usb_request *req;
+ struct usb_ep *ep;
+
+ if (!cons)
+ return;
- info->port = NULL;
- info->console_req = NULL;
- info->req_busy = 0;
- spin_lock_init(&info->con_lock);
+ spin_lock(&cons->lock);
- status = kfifo_alloc(&info->con_buf, GS_CONSOLE_BUF_SIZE, GFP_KERNEL);
- if (status) {
- pr_err("%s: allocate console buffer failed\n", __func__);
- return status;
- }
+ req = cons->req;
+ ep = cons->console.data;
+ cons->req = NULL;
+
+ spin_unlock(&cons->lock);
- info->console_thread = kthread_create(gs_console_thread,
- co, "gs_console");
- if (IS_ERR(info->console_thread)) {
- pr_err("%s: cannot create console thread\n", __func__);
- kfifo_free(&info->con_buf);
- return PTR_ERR(info->console_thread);
+ if (!req)
+ return;
+
+ usb_ep_dequeue(ep, req);
+ gs_free_req(ep, req);
+}
+
+static int gs_console_init(struct gs_port *port)
+{
+ struct gs_console *cons;
+ int err;
+
+ if (port->console)
+ return 0;
+
+ cons = kzalloc(sizeof(*port->console), GFP_KERNEL);
+ if (!cons)
+ return -ENOMEM;
+
+ strcpy(cons->console.name, "ttyGS");
+ cons->console.write = gs_console_write;
+ cons->console.device = gs_console_device;
+ cons->console.flags = CON_PRINTBUFFER;
+ cons->console.index = port->port_num;
+
+ INIT_WORK(&cons->work, gs_console_work);
+ spin_lock_init(&cons->lock);
+
+ err = kfifo_alloc(&cons->buf, GS_CONSOLE_BUF_SIZE, GFP_KERNEL);
+ if (err) {
+ pr_err("ttyGS%d: allocate console buffer failed\n", port->port_num);
+ kfree(cons);
+ return err;
}
- wake_up_process(info->console_thread);
+
+ port->console = cons;
+ register_console(&cons->console);
+
+ spin_lock_irq(&port->port_lock);
+ if (port->port_usb)
+ gs_console_connect(port);
+ spin_unlock_irq(&port->port_lock);
return 0;
}
-static void gs_console_write(struct console *co,
- const char *buf, unsigned count)
+static void gs_console_exit(struct gs_port *port)
{
- struct gscons_info *info = &gscons_info;
- unsigned long flags;
+ struct gs_console *cons = port->console;
+
+ if (!cons)
+ return;
+
+ unregister_console(&cons->console);
- spin_lock_irqsave(&info->con_lock, flags);
- kfifo_in(&info->con_buf, buf, count);
- spin_unlock_irqrestore(&info->con_lock, flags);
+ spin_lock_irq(&port->port_lock);
+ if (cons->req)
+ gs_console_disconnect(port);
+ spin_unlock_irq(&port->port_lock);
- wake_up_process(info->console_thread);
+ cancel_work_sync(&cons->work);
+ kfifo_free(&cons->buf);
+ kfree(cons);
+ port->console = NULL;
}
-static struct tty_driver *gs_console_device(struct console *co, int *index)
+ssize_t gserial_set_console(unsigned char port_num, const char *page, size_t count)
{
- struct tty_driver **p = (struct tty_driver **)co->data;
+ struct gs_port *port;
+ bool enable;
+ int ret;
- if (!*p)
- return NULL;
+ ret = strtobool(page, &enable);
+ if (ret)
+ return ret;
- *index = co->index;
- return *p;
-}
+ mutex_lock(&ports[port_num].lock);
+ port = ports[port_num].port;
-static struct console gserial_cons = {
- .name = "ttyGS",
- .write = gs_console_write,
- .device = gs_console_device,
- .setup = gs_console_setup,
- .flags = CON_PRINTBUFFER,
- .index = -1,
- .data = &gs_tty_driver,
-};
+ if (WARN_ON(port == NULL)) {
+ ret = -ENXIO;
+ goto out;
+ }
-static void gserial_console_init(void)
-{
- register_console(&gserial_cons);
+ if (enable)
+ ret = gs_console_init(port);
+ else
+ gs_console_exit(port);
+out:
+ mutex_unlock(&ports[port_num].lock);
+
+ return ret < 0 ? ret : count;
}
+EXPORT_SYMBOL_GPL(gserial_set_console);
-static void gserial_console_exit(void)
+ssize_t gserial_get_console(unsigned char port_num, char *page)
{
- struct gscons_info *info = &gscons_info;
+ struct gs_port *port;
+ ssize_t ret;
- unregister_console(&gserial_cons);
- if (!IS_ERR_OR_NULL(info->console_thread))
- kthread_stop(info->console_thread);
- kfifo_free(&info->con_buf);
+ mutex_lock(&ports[port_num].lock);
+ port = ports[port_num].port;
+
+ if (WARN_ON(port == NULL))
+ ret = -ENXIO;
+ else
+ ret = sprintf(page, "%u\n", !!port->console);
+
+ mutex_unlock(&ports[port_num].lock);
+
+ return ret;
}
+EXPORT_SYMBOL_GPL(gserial_get_console);
#else
-static int gs_console_connect(int port_num)
+static int gs_console_connect(struct gs_port *port)
{
return 0;
}
-static void gs_console_disconnect(struct usb_ep *ep)
+static void gs_console_disconnect(struct gs_port *port)
{
}
-static void gserial_console_init(void)
+static int gs_console_init(struct gs_port *port)
{
+ return -ENOSYS;
}
-static void gserial_console_exit(void)
+static void gs_console_exit(struct gs_port *port)
{
}
@@ -1172,8 +1168,9 @@ static int gs_closed(struct gs_port *port)
int cond;
spin_lock_irq(&port->port_lock);
- cond = (port->port.count == 0) && !port->openclose;
+ cond = port->port.count == 0;
spin_unlock_irq(&port->port_lock);
+
return cond;
}
@@ -1197,18 +1194,19 @@ void gserial_free_line(unsigned char port_num)
return;
}
port = ports[port_num].port;
+ gs_console_exit(port);
ports[port_num].port = NULL;
mutex_unlock(&ports[port_num].lock);
gserial_free_port(port);
tty_unregister_device(gs_tty_driver, port_num);
- gserial_console_exit();
}
EXPORT_SYMBOL_GPL(gserial_free_line);
-int gserial_alloc_line(unsigned char *line_num)
+int gserial_alloc_line_no_console(unsigned char *line_num)
{
struct usb_cdc_line_coding coding;
+ struct gs_port *port;
struct device *tty_dev;
int ret;
int port_num;
@@ -1231,24 +1229,35 @@ int gserial_alloc_line(unsigned char *line_num)
/* ... and sysfs class devices, so mdev/udev make /dev/ttyGS* */
- tty_dev = tty_port_register_device(&ports[port_num].port->port,
+ port = ports[port_num].port;
+ tty_dev = tty_port_register_device(&port->port,
gs_tty_driver, port_num, NULL);
if (IS_ERR(tty_dev)) {
- struct gs_port *port;
pr_err("%s: failed to register tty for port %d, err %ld\n",
__func__, port_num, PTR_ERR(tty_dev));
ret = PTR_ERR(tty_dev);
- port = ports[port_num].port;
+ mutex_lock(&ports[port_num].lock);
ports[port_num].port = NULL;
+ mutex_unlock(&ports[port_num].lock);
gserial_free_port(port);
goto err;
}
*line_num = port_num;
- gserial_console_init();
err:
return ret;
}
+EXPORT_SYMBOL_GPL(gserial_alloc_line_no_console);
+
+int gserial_alloc_line(unsigned char *line_num)
+{
+ int ret = gserial_alloc_line_no_console(line_num);
+
+ if (!ret && !*line_num)
+ gs_console_init(ports[*line_num].port);
+
+ return ret;
+}
EXPORT_SYMBOL_GPL(gserial_alloc_line);
/**
@@ -1327,7 +1336,7 @@ int gserial_connect(struct gserial *gser, u8 port_num)
gser->disconnect(gser);
}
- status = gs_console_connect(port_num);
+ status = gs_console_connect(port);
spin_unlock_irqrestore(&port->port_lock, flags);
return status;
@@ -1359,12 +1368,14 @@ void gserial_disconnect(struct gserial *gser)
/* tell the TTY glue not to do I/O here any more */
spin_lock_irqsave(&port->port_lock, flags);
+ gs_console_disconnect(port);
+
/* REVISIT as above: how best to track this? */
port->port_line_coding = gser->port_line_coding;
port->port_usb = NULL;
gser->ioport = NULL;
- if (port->port.count > 0 || port->openclose) {
+ if (port->port.count > 0) {
wake_up_interruptible(&port->drain_wait);
if (port->port.tty)
tty_hangup(port->port.tty);
@@ -1377,7 +1388,7 @@ void gserial_disconnect(struct gserial *gser)
/* finally, free any unused/unusable I/O buffers */
spin_lock_irqsave(&port->port_lock, flags);
- if (port->port.count == 0 && !port->openclose)
+ if (port->port.count == 0)
kfifo_free(&port->port_write_buf);
gs_free_requests(gser->out, &port->read_pool, NULL);
gs_free_requests(gser->out, &port->read_queue, NULL);
@@ -1386,7 +1397,6 @@ void gserial_disconnect(struct gserial *gser)
port->read_allocated = port->read_started =
port->write_allocated = port->write_started = 0;
- gs_console_disconnect(gser->in);
spin_unlock_irqrestore(&port->port_lock, flags);
}
EXPORT_SYMBOL_GPL(gserial_disconnect);
diff --git a/drivers/usb/gadget/function/u_serial.h b/drivers/usb/gadget/function/u_serial.h
index 9acaac1cbb75..e5b08ab8cf7a 100644
--- a/drivers/usb/gadget/function/u_serial.h
+++ b/drivers/usb/gadget/function/u_serial.h
@@ -54,9 +54,17 @@ struct usb_request *gs_alloc_req(struct usb_ep *ep, unsigned len, gfp_t flags);
void gs_free_req(struct usb_ep *, struct usb_request *req);
/* management of individual TTY ports */
+int gserial_alloc_line_no_console(unsigned char *port_line);
int gserial_alloc_line(unsigned char *port_line);
void gserial_free_line(unsigned char port_line);
+#ifdef CONFIG_U_SERIAL_CONSOLE
+
+ssize_t gserial_set_console(unsigned char port_num, const char *page, size_t count);
+ssize_t gserial_get_console(unsigned char port_num, char *page);
+
+#endif /* CONFIG_U_SERIAL_CONSOLE */
+
/* connect/disconnect is handled by individual functions */
int gserial_connect(struct gserial *, u8 port_num);
void gserial_disconnect(struct gserial *);
diff --git a/drivers/usb/gadget/legacy/Kconfig b/drivers/usb/gadget/legacy/Kconfig
index 69ff7f8c86f5..119a4e47681f 100644
--- a/drivers/usb/gadget/legacy/Kconfig
+++ b/drivers/usb/gadget/legacy/Kconfig
@@ -149,21 +149,21 @@ config USB_ETH_RNDIS
is given in comments found in that info file.
config USB_ETH_EEM
- bool "Ethernet Emulation Model (EEM) support"
- depends on USB_ETH
+ bool "Ethernet Emulation Model (EEM) support"
+ depends on USB_ETH
select USB_LIBCOMPOSITE
select USB_F_EEM
- help
- CDC EEM is a newer USB standard that is somewhat simpler than CDC ECM
- and therefore can be supported by more hardware. Technically ECM and
- EEM are designed for different applications. The ECM model extends
- the network interface to the target (e.g. a USB cable modem), and the
- EEM model is for mobile devices to communicate with hosts using
- ethernet over USB. For Linux gadgets, however, the interface with
- the host is the same (a usbX device), so the differences are minimal.
-
- If you say "y" here, the Ethernet gadget driver will use the EEM
- protocol rather than ECM. If unsure, say "n".
+ help
+ CDC EEM is a newer USB standard that is somewhat simpler than CDC ECM
+ and therefore can be supported by more hardware. Technically ECM and
+ EEM are designed for different applications. The ECM model extends
+ the network interface to the target (e.g. a USB cable modem), and the
+ EEM model is for mobile devices to communicate with hosts using
+ ethernet over USB. For Linux gadgets, however, the interface with
+ the host is the same (a usbX device), so the differences are minimal.
+
+ If you say "y" here, the Ethernet gadget driver will use the EEM
+ protocol rather than ECM. If unsure, say "n".
config USB_G_NCM
tristate "Network Control Model (NCM) support"
diff --git a/drivers/usb/gadget/legacy/acm_ms.c b/drivers/usb/gadget/legacy/acm_ms.c
index af16672d5118..59be2d8417c9 100644
--- a/drivers/usb/gadget/legacy/acm_ms.c
+++ b/drivers/usb/gadget/legacy/acm_ms.c
@@ -105,7 +105,6 @@ static struct usb_function *f_msg;
*/
static int acm_ms_do_config(struct usb_configuration *c)
{
- struct fsg_opts *opts;
int status;
if (gadget_is_otg(c->cdev->gadget)) {
@@ -113,8 +112,6 @@ static int acm_ms_do_config(struct usb_configuration *c)
c->bmAttributes |= USB_CONFIG_ATT_WAKEUP;
}
- opts = fsg_opts_from_func_inst(fi_msg);
-
f_acm = usb_get_function(f_acm_inst);
if (IS_ERR(f_acm))
return PTR_ERR(f_acm);
diff --git a/drivers/usb/gadget/legacy/mass_storage.c b/drivers/usb/gadget/legacy/mass_storage.c
index fd5595ac5bf7..f18f77584fc2 100644
--- a/drivers/usb/gadget/legacy/mass_storage.c
+++ b/drivers/usb/gadget/legacy/mass_storage.c
@@ -105,7 +105,6 @@ FSG_MODULE_PARAMETERS(/* no prefix */, mod_data);
static int msg_do_config(struct usb_configuration *c)
{
- struct fsg_opts *opts;
int ret;
if (gadget_is_otg(c->cdev->gadget)) {
@@ -113,8 +112,6 @@ static int msg_do_config(struct usb_configuration *c)
c->bmAttributes |= USB_CONFIG_ATT_WAKEUP;
}
- opts = fsg_opts_from_func_inst(fi_msg);
-
f_msg = usb_get_function(fi_msg);
if (IS_ERR(f_msg))
return PTR_ERR(f_msg);
diff --git a/drivers/usb/gadget/legacy/serial.c b/drivers/usb/gadget/legacy/serial.c
index de30d7628eef..da44f89f5e73 100644
--- a/drivers/usb/gadget/legacy/serial.c
+++ b/drivers/usb/gadget/legacy/serial.c
@@ -97,6 +97,36 @@ static unsigned n_ports = 1;
module_param(n_ports, uint, 0);
MODULE_PARM_DESC(n_ports, "number of ports to create, default=1");
+static bool enable = true;
+
+static int switch_gserial_enable(bool do_enable);
+
+static int enable_set(const char *s, const struct kernel_param *kp)
+{
+ bool do_enable;
+ int ret;
+
+ if (!s) /* called for no-arg enable == default */
+ return 0;
+
+ ret = strtobool(s, &do_enable);
+ if (ret || enable == do_enable)
+ return ret;
+
+ ret = switch_gserial_enable(do_enable);
+ if (!ret)
+ enable = do_enable;
+
+ return ret;
+}
+
+static const struct kernel_param_ops enable_ops = {
+ .set = enable_set,
+ .get = param_get_bool,
+};
+
+module_param_cb(enable, &enable_ops, &enable, 0644);
+
/*-------------------------------------------------------------------------*/
static struct usb_configuration serial_config_driver = {
@@ -240,6 +270,19 @@ static struct usb_composite_driver gserial_driver = {
.unbind = gs_unbind,
};
+static int switch_gserial_enable(bool do_enable)
+{
+ if (!serial_config_driver.label)
+ /* init() was not called, yet */
+ return 0;
+
+ if (do_enable)
+ return usb_composite_probe(&gserial_driver);
+
+ usb_composite_unregister(&gserial_driver);
+ return 0;
+}
+
static int __init init(void)
{
/* We *could* export two configs; that'd be much cleaner...
@@ -266,12 +309,16 @@ static int __init init(void)
}
strings_dev[STRING_DESCRIPTION_IDX].s = serial_config_driver.label;
+ if (!enable)
+ return 0;
+
return usb_composite_probe(&gserial_driver);
}
module_init(init);
static void __exit cleanup(void)
{
- usb_composite_unregister(&gserial_driver);
+ if (enable)
+ usb_composite_unregister(&gserial_driver);
}
module_exit(cleanup);
diff --git a/drivers/usb/gadget/udc/Kconfig b/drivers/usb/gadget/udc/Kconfig
index d354036ff6c8..ae70ce29d5e4 100644
--- a/drivers/usb/gadget/udc/Kconfig
+++ b/drivers/usb/gadget/udc/Kconfig
@@ -120,10 +120,10 @@ config USB_FOTG210_UDC
dynamically linked module called "fotg210_udc".
config USB_GR_UDC
- tristate "Aeroflex Gaisler GRUSBDC USB Peripheral Controller Driver"
- depends on HAS_DMA
- help
- Select this to support Aeroflex Gaisler GRUSBDC cores from the GRLIB
+ tristate "Aeroflex Gaisler GRUSBDC USB Peripheral Controller Driver"
+ depends on HAS_DMA
+ help
+ Select this to support Aeroflex Gaisler GRUSBDC cores from the GRLIB
VHDL IP core library.
config USB_OMAP
@@ -441,6 +441,17 @@ config USB_GADGET_XILINX
dynamically linked module called "udc-xilinx" and force all
gadget drivers to also be dynamically linked.
+config USB_TEGRA_XUDC
+ tristate "NVIDIA Tegra Superspeed USB 3.0 Device Controller"
+ depends on ARCH_TEGRA || COMPILE_TEST
+ depends on PHY_TEGRA_XUSB
+ help
+ Enables NVIDIA Tegra USB 3.0 device mode controller driver.
+
+ Say "y" to link the driver statically, or "m" to build a
+ dynamically linked module called "tegra_xudc" and force all
+ gadget drivers to also be dynamically linked.
+
source "drivers/usb/gadget/udc/aspeed-vhub/Kconfig"
#
diff --git a/drivers/usb/gadget/udc/Makefile b/drivers/usb/gadget/udc/Makefile
index 897f648f3cf1..f6777e654a8e 100644
--- a/drivers/usb/gadget/udc/Makefile
+++ b/drivers/usb/gadget/udc/Makefile
@@ -24,6 +24,7 @@ obj-$(CONFIG_USB_BCM63XX_UDC) += bcm63xx_udc.o
obj-$(CONFIG_USB_FSL_USB2) += fsl_usb2_udc.o
fsl_usb2_udc-y := fsl_udc_core.o
fsl_usb2_udc-$(CONFIG_ARCH_MXC) += fsl_mxc_udc.o
+obj-$(CONFIG_USB_TEGRA_XUDC) += tegra-xudc.o
obj-$(CONFIG_USB_M66592) += m66592-udc.o
obj-$(CONFIG_USB_R8A66597) += r8a66597-udc.o
obj-$(CONFIG_USB_RENESAS_USB3) += renesas_usb3.o
diff --git a/drivers/usb/gadget/udc/at91_udc.c b/drivers/usb/gadget/udc/at91_udc.c
index 194ffb1ed462..1b2b548c59a0 100644
--- a/drivers/usb/gadget/udc/at91_udc.c
+++ b/drivers/usb/gadget/udc/at91_udc.c
@@ -1808,7 +1808,6 @@ static int at91udc_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct at91_udc *udc;
int retval;
- struct resource *res;
struct at91_ep *ep;
int i;
@@ -1839,8 +1838,7 @@ static int at91udc_probe(struct platform_device *pdev)
ep->is_pingpong = 1;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- udc->udp_baseaddr = devm_ioremap_resource(dev, res);
+ udc->udp_baseaddr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(udc->udp_baseaddr))
return PTR_ERR(udc->udp_baseaddr);
diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c
index 1d0d8952a74b..8a42768e3213 100644
--- a/drivers/usb/gadget/udc/atmel_usba_udc.c
+++ b/drivers/usb/gadget/udc/atmel_usba_udc.c
@@ -18,6 +18,7 @@
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/ctype.h>
+#include <linux/usb.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
#include <linux/delay.h>
@@ -226,7 +227,7 @@ static void usba_init_debugfs(struct usba_udc *udc)
struct dentry *root;
struct resource *regs_resource;
- root = debugfs_create_dir(udc->gadget.name, NULL);
+ root = debugfs_create_dir(udc->gadget.name, usb_debug_root);
udc->debugfs_root = root;
regs_resource = platform_get_resource(udc->pdev, IORESOURCE_MEM,
diff --git a/drivers/usb/gadget/udc/bcm63xx_udc.c b/drivers/usb/gadget/udc/bcm63xx_udc.c
index 97b16463f3ef..54501814dc3f 100644
--- a/drivers/usb/gadget/udc/bcm63xx_udc.c
+++ b/drivers/usb/gadget/udc/bcm63xx_udc.c
@@ -2248,7 +2248,7 @@ static void bcm63xx_udc_init_debugfs(struct bcm63xx_udc *udc)
if (!IS_ENABLED(CONFIG_USB_GADGET_DEBUG_FS))
return;
- root = debugfs_create_dir(udc->gadget.name, NULL);
+ root = debugfs_create_dir(udc->gadget.name, usb_debug_root);
udc->debugfs_root = root;
debugfs_create_file("usbd", 0400, root, udc, &bcm63xx_usbd_dbg_fops);
@@ -2282,7 +2282,6 @@ static int bcm63xx_udc_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct bcm63xx_usbd_platform_data *pd = dev_get_platdata(dev);
struct bcm63xx_udc *udc;
- struct resource *res;
int rc = -ENOMEM, i, irq;
udc = devm_kzalloc(dev, sizeof(*udc), GFP_KERNEL);
@@ -2298,13 +2297,11 @@ static int bcm63xx_udc_probe(struct platform_device *pdev)
return -EINVAL;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- udc->usbd_regs = devm_ioremap_resource(dev, res);
+ udc->usbd_regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(udc->usbd_regs))
return PTR_ERR(udc->usbd_regs);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- udc->iudma_regs = devm_ioremap_resource(dev, res);
+ udc->iudma_regs = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(udc->iudma_regs))
return PTR_ERR(udc->iudma_regs);
diff --git a/drivers/usb/gadget/udc/bdc/bdc_core.c b/drivers/usb/gadget/udc/bdc/bdc_core.c
index cc4a16e253ac..02a3a774670b 100644
--- a/drivers/usb/gadget/udc/bdc/bdc_core.c
+++ b/drivers/usb/gadget/udc/bdc/bdc_core.c
@@ -480,7 +480,6 @@ static void bdc_phy_exit(struct bdc *bdc)
static int bdc_probe(struct platform_device *pdev)
{
struct bdc *bdc;
- struct resource *res;
int ret = -ENOMEM;
int irq;
u32 temp;
@@ -508,8 +507,7 @@ static int bdc_probe(struct platform_device *pdev)
bdc->clk = clk;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- bdc->regs = devm_ioremap_resource(dev, res);
+ bdc->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(bdc->regs)) {
dev_err(dev, "ioremap error\n");
return -ENOMEM;
diff --git a/drivers/usb/gadget/udc/bdc/bdc_udc.c b/drivers/usb/gadget/udc/bdc/bdc_udc.c
index 7bfd58c846f7..248426a3e88a 100644
--- a/drivers/usb/gadget/udc/bdc/bdc_udc.c
+++ b/drivers/usb/gadget/udc/bdc/bdc_udc.c
@@ -195,7 +195,7 @@ static void handle_link_state_change(struct bdc *bdc, u32 uspc)
break;
case BDC_LINK_STATE_U0:
if (bdc->devstatus & REMOTE_WAKEUP_ISSUED) {
- bdc->devstatus &= ~REMOTE_WAKEUP_ISSUED;
+ bdc->devstatus &= ~REMOTE_WAKEUP_ISSUED;
if (bdc->gadget.speed == USB_SPEED_SUPER) {
bdc_function_wake_fh(bdc, 0);
bdc->devstatus |= FUNC_WAKE_ISSUED;
diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c
index 3d499d93c083..4c9d1e49d5ed 100644
--- a/drivers/usb/gadget/udc/dummy_hcd.c
+++ b/drivers/usb/gadget/udc/dummy_hcd.c
@@ -1321,7 +1321,7 @@ static int dummy_perform_transfer(struct urb *urb, struct dummy_request *req,
u32 this_sg;
bool next_sg;
- to_host = usb_pipein(urb->pipe);
+ to_host = usb_urb_dir_in(urb);
rbuf = req->req.buf + req->req.actual;
if (!urb->num_sgs) {
@@ -1409,7 +1409,7 @@ top:
/* FIXME update emulated data toggle too */
- to_host = usb_pipein(urb->pipe);
+ to_host = usb_urb_dir_in(urb);
if (unlikely(len == 0))
is_short = 1;
else {
@@ -1830,7 +1830,7 @@ restart:
/* find the gadget's ep for this request (if configured) */
address = usb_pipeendpoint (urb->pipe);
- if (usb_pipein(urb->pipe))
+ if (usb_urb_dir_in(urb))
address |= USB_DIR_IN;
ep = find_endpoint(dum, address);
if (!ep) {
@@ -2385,7 +2385,7 @@ static inline ssize_t show_urb(char *buf, size_t size, struct urb *urb)
s = "?";
break;
} s; }),
- ep, ep ? (usb_pipein(urb->pipe) ? "in" : "out") : "",
+ ep, ep ? (usb_urb_dir_in(urb) ? "in" : "out") : "",
({ char *s; \
switch (usb_pipetype(urb->pipe)) { \
case PIPE_CONTROL: \
@@ -2725,7 +2725,7 @@ static struct platform_driver dummy_hcd_driver = {
};
/*-------------------------------------------------------------------------*/
-#define MAX_NUM_UDC 2
+#define MAX_NUM_UDC 32
static struct platform_device *the_udc_pdev[MAX_NUM_UDC];
static struct platform_device *the_hcd_pdev[MAX_NUM_UDC];
diff --git a/drivers/usb/gadget/udc/fsl_qe_udc.h b/drivers/usb/gadget/udc/fsl_qe_udc.h
index 2c537a904ee7..53ca0ff7c2cb 100644
--- a/drivers/usb/gadget/udc/fsl_qe_udc.h
+++ b/drivers/usb/gadget/udc/fsl_qe_udc.h
@@ -333,8 +333,8 @@ struct qe_udc {
u32 resume_state; /* USB state to resume*/
u32 usb_state; /* USB current state */
u32 usb_next_state; /* USB next state */
- u32 ep0_state; /* Enpoint zero state */
- u32 ep0_dir; /* Enpoint zero direction: can be
+ u32 ep0_state; /* Endpoint zero state */
+ u32 ep0_dir; /* Endpoint zero direction: can be
USB_DIR_IN or USB_DIR_OUT*/
u32 usb_sof_count; /* SOF count */
u32 errors; /* USB ERRORs count */
diff --git a/drivers/usb/gadget/udc/fsl_udc_core.c b/drivers/usb/gadget/udc/fsl_udc_core.c
index 9a05863b2876..ec6eda426223 100644
--- a/drivers/usb/gadget/udc/fsl_udc_core.c
+++ b/drivers/usb/gadget/udc/fsl_udc_core.c
@@ -1052,10 +1052,11 @@ static int fsl_ep_fifo_status(struct usb_ep *_ep)
u32 bitmask;
struct ep_queue_head *qh;
- ep = container_of(_ep, struct fsl_ep, ep);
- if (!_ep || (!ep->ep.desc && ep_index(ep) != 0))
+ if (!_ep || _ep->desc || !(_ep->desc->bEndpointAddress&0xF))
return -ENODEV;
+ ep = container_of(_ep, struct fsl_ep, ep);
+
udc = (struct fsl_udc *)ep->udc;
if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN)
@@ -1208,7 +1209,7 @@ static int fsl_vbus_draw(struct usb_gadget *gadget, unsigned mA)
}
/* Change Data+ pullup status
- * this func is used by usb_gadget_connect/disconnet
+ * this func is used by usb_gadget_connect/disconnect
*/
static int fsl_pullup(struct usb_gadget *gadget, int is_on)
{
@@ -1595,14 +1596,13 @@ static int process_ep_req(struct fsl_udc *udc, int pipe,
struct fsl_req *curr_req)
{
struct ep_td_struct *curr_td;
- int td_complete, actual, remaining_length, j, tmp;
+ int actual, remaining_length, j, tmp;
int status = 0;
int errors = 0;
struct ep_queue_head *curr_qh = &udc->ep_qh[pipe];
int direction = pipe % 2;
curr_td = curr_req->head;
- td_complete = 0;
actual = curr_req->req.length;
for (j = 0; j < curr_req->dtd_count; j++) {
@@ -1647,11 +1647,9 @@ static int process_ep_req(struct fsl_udc *udc, int pipe,
status = -EPROTO;
break;
} else {
- td_complete++;
break;
}
} else {
- td_complete++;
VDBG("dTD transmitted successful");
}
diff --git a/drivers/usb/gadget/udc/gr_udc.c b/drivers/usb/gadget/udc/gr_udc.c
index 7a0e9a58c2d8..64d80c65bb96 100644
--- a/drivers/usb/gadget/udc/gr_udc.c
+++ b/drivers/usb/gadget/udc/gr_udc.c
@@ -29,6 +29,7 @@
#include <linux/list.h>
#include <linux/interrupt.h>
#include <linux/device.h>
+#include <linux/usb.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
#include <linux/dma-mapping.h>
@@ -208,7 +209,7 @@ static void gr_dfs_create(struct gr_udc *dev)
{
const char *name = "gr_udc_state";
- dev->dfs_root = debugfs_create_dir(dev_name(dev->dev), NULL);
+ dev->dfs_root = debugfs_create_dir(dev_name(dev->dev), usb_debug_root);
debugfs_create_file(name, 0444, dev->dfs_root, dev, &gr_dfs_fops);
}
@@ -2118,7 +2119,6 @@ static int gr_request_irq(struct gr_udc *dev, int irq)
static int gr_probe(struct platform_device *pdev)
{
struct gr_udc *dev;
- struct resource *res;
struct gr_regs __iomem *regs;
int retval;
u32 status;
@@ -2128,8 +2128,7 @@ static int gr_probe(struct platform_device *pdev)
return -ENOMEM;
dev->dev = &pdev->dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- regs = devm_ioremap_resource(dev->dev, res);
+ regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(regs))
return PTR_ERR(regs);
diff --git a/drivers/usb/gadget/udc/lpc32xx_udc.c b/drivers/usb/gadget/udc/lpc32xx_udc.c
index bf6c81e2f8cc..d14b2bb3f67c 100644
--- a/drivers/usb/gadget/udc/lpc32xx_udc.c
+++ b/drivers/usb/gadget/udc/lpc32xx_udc.c
@@ -3000,7 +3000,6 @@ static int lpc32xx_udc_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct lpc32xx_udc *udc;
int retval, i;
- struct resource *res;
dma_addr_t dma_handle;
struct device_node *isp1301_node;
@@ -3048,9 +3047,6 @@ static int lpc32xx_udc_probe(struct platform_device *pdev)
* IORESOURCE_IRQ, USB device interrupt number
* IORESOURCE_IRQ, USB transceiver interrupt number
*/
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENXIO;
spin_lock_init(&udc->lock);
@@ -3061,7 +3057,7 @@ static int lpc32xx_udc_probe(struct platform_device *pdev)
return udc->udp_irq[i];
}
- udc->udp_baseaddr = devm_ioremap_resource(dev, res);
+ udc->udp_baseaddr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(udc->udp_baseaddr)) {
dev_err(udc->dev, "IO map failure\n");
return PTR_ERR(udc->udp_baseaddr);
diff --git a/drivers/usb/gadget/udc/mv_u3d.h b/drivers/usb/gadget/udc/mv_u3d.h
index 982625b7197a..66b84f792f64 100644
--- a/drivers/usb/gadget/udc/mv_u3d.h
+++ b/drivers/usb/gadget/udc/mv_u3d.h
@@ -138,7 +138,7 @@ struct mv_u3d_op_regs {
u32 doorbell; /* doorbell register */
};
-/* control enpoint enable registers */
+/* control endpoint enable registers */
struct epxcr {
u32 epxoutcr0; /* ep out control 0 register */
u32 epxoutcr1; /* ep out control 1 register */
diff --git a/drivers/usb/gadget/udc/pch_udc.c b/drivers/usb/gadget/udc/pch_udc.c
index 265dab2bbfac..3344fb8c4181 100644
--- a/drivers/usb/gadget/udc/pch_udc.c
+++ b/drivers/usb/gadget/udc/pch_udc.c
@@ -1519,7 +1519,6 @@ static void pch_udc_free_dma_chain(struct pch_udc_dev *dev,
td = phys_to_virt(addr);
addr2 = (dma_addr_t)td->next;
dma_pool_free(dev->data_requests, td, addr);
- td->next = 0x00;
addr = addr2;
}
req->chain_len = 1;
diff --git a/drivers/usb/gadget/udc/pxa25x_udc.c b/drivers/usb/gadget/udc/pxa25x_udc.c
index d4be53559f2e..cfafdd92c2a8 100644
--- a/drivers/usb/gadget/udc/pxa25x_udc.c
+++ b/drivers/usb/gadget/udc/pxa25x_udc.c
@@ -2321,7 +2321,6 @@ static int pxa25x_udc_probe(struct platform_device *pdev)
struct pxa25x_udc *dev = &memory;
int retval, irq;
u32 chiprev;
- struct resource *res;
pr_info("%s: version %s\n", driver_name, DRIVER_VERSION);
@@ -2367,8 +2366,7 @@ static int pxa25x_udc_probe(struct platform_device *pdev)
if (irq < 0)
return -ENODEV;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- dev->regs = devm_ioremap_resource(&pdev->dev, res);
+ dev->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dev->regs))
return PTR_ERR(dev->regs);
diff --git a/drivers/usb/gadget/udc/pxa27x_udc.c b/drivers/usb/gadget/udc/pxa27x_udc.c
index 014233252299..78902d13fc27 100644
--- a/drivers/usb/gadget/udc/pxa27x_udc.c
+++ b/drivers/usb/gadget/udc/pxa27x_udc.c
@@ -207,7 +207,7 @@ static void pxa_init_debugfs(struct pxa_udc *udc)
{
struct dentry *root;
- root = debugfs_create_dir(udc->gadget.name, NULL);
+ root = debugfs_create_dir(udc->gadget.name, usb_debug_root);
udc->debugfs_root = root;
debugfs_create_file("udcstate", 0400, root, udc, &state_dbg_fops);
@@ -2356,7 +2356,6 @@ MODULE_DEVICE_TABLE(of, udc_pxa_dt_ids);
*/
static int pxa_udc_probe(struct platform_device *pdev)
{
- struct resource *regs;
struct pxa_udc *udc = &memory;
int retval = 0, gpio;
struct pxa2xx_udc_mach_info *mach = dev_get_platdata(&pdev->dev);
@@ -2378,8 +2377,7 @@ static int pxa_udc_probe(struct platform_device *pdev)
udc->gpiod = devm_gpiod_get(&pdev->dev, NULL, GPIOD_ASIS);
}
- regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- udc->regs = devm_ioremap_resource(&pdev->dev, regs);
+ udc->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(udc->regs))
return PTR_ERR(udc->regs);
udc->irq = platform_get_irq(pdev, 0);
diff --git a/drivers/usb/gadget/udc/r8a66597-udc.c b/drivers/usb/gadget/udc/r8a66597-udc.c
index 11e25a3f4f1f..582a16165ea9 100644
--- a/drivers/usb/gadget/udc/r8a66597-udc.c
+++ b/drivers/usb/gadget/udc/r8a66597-udc.c
@@ -1838,7 +1838,7 @@ static int r8a66597_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
char clk_name[8];
- struct resource *res, *ires;
+ struct resource *ires;
int irq;
void __iomem *reg = NULL;
struct r8a66597 *r8a66597 = NULL;
@@ -1846,8 +1846,7 @@ static int r8a66597_probe(struct platform_device *pdev)
int i;
unsigned long irq_trigger;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- reg = devm_ioremap_resource(&pdev->dev, res);
+ reg = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(reg))
return PTR_ERR(reg);
diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c
index 33703140233a..c5c3c14df67a 100644
--- a/drivers/usb/gadget/udc/renesas_usb3.c
+++ b/drivers/usb/gadget/udc/renesas_usb3.c
@@ -775,6 +775,18 @@ static void usb3_irq_epc_int_1_resume(struct renesas_usb3 *usb3)
usb3_transition_to_default_state(usb3, false);
}
+static void usb3_irq_epc_int_1_suspend(struct renesas_usb3 *usb3)
+{
+ usb3_disable_irq_1(usb3, USB_INT_1_B2_SPND);
+
+ if (usb3->gadget.speed != USB_SPEED_UNKNOWN &&
+ usb3->gadget.state != USB_STATE_NOTATTACHED) {
+ if (usb3->driver && usb3->driver->suspend)
+ usb3->driver->suspend(&usb3->gadget);
+ usb_gadget_set_state(&usb3->gadget, USB_STATE_SUSPENDED);
+ }
+}
+
static void usb3_irq_epc_int_1_disable(struct renesas_usb3 *usb3)
{
usb3_stop_usb3_connection(usb3);
@@ -860,6 +872,9 @@ static void usb3_irq_epc_int_1(struct renesas_usb3 *usb3, u32 int_sta_1)
if (int_sta_1 & USB_INT_1_B2_RSUM)
usb3_irq_epc_int_1_resume(usb3);
+ if (int_sta_1 & USB_INT_1_B2_SPND)
+ usb3_irq_epc_int_1_suspend(usb3);
+
if (int_sta_1 & USB_INT_1_SPEED)
usb3_irq_epc_int_1_speed(usb3);
@@ -2536,7 +2551,7 @@ static const struct file_operations renesas_usb3_b_device_fops = {
static void renesas_usb3_debugfs_init(struct renesas_usb3 *usb3,
struct device *dev)
{
- usb3->dentry = debugfs_create_dir(dev_name(dev), NULL);
+ usb3->dentry = debugfs_create_dir(dev_name(dev), usb_debug_root);
debugfs_create_file("b_device", 0644, usb3->dentry, usb3,
&renesas_usb3_b_device_fops);
@@ -2733,7 +2748,6 @@ static struct usb_role_switch_desc renesas_usb3_role_switch_desc = {
static int renesas_usb3_probe(struct platform_device *pdev)
{
struct renesas_usb3 *usb3;
- struct resource *res;
int irq, ret;
const struct renesas_usb3_priv *priv;
const struct soc_device_attribute *attr;
@@ -2752,8 +2766,7 @@ static int renesas_usb3_probe(struct platform_device *pdev)
if (!usb3)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- usb3->reg = devm_ioremap_resource(&pdev->dev, res);
+ usb3->reg = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(usb3->reg))
return PTR_ERR(usb3->reg);
diff --git a/drivers/usb/gadget/udc/s3c-hsudc.c b/drivers/usb/gadget/udc/s3c-hsudc.c
index 858993c73442..21252fbc0319 100644
--- a/drivers/usb/gadget/udc/s3c-hsudc.c
+++ b/drivers/usb/gadget/udc/s3c-hsudc.c
@@ -1263,7 +1263,6 @@ static const struct usb_gadget_ops s3c_hsudc_gadget_ops = {
static int s3c_hsudc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct resource *res;
struct s3c_hsudc *hsudc;
struct s3c24xx_hsudc_platdata *pd = dev_get_platdata(&pdev->dev);
int ret, i;
@@ -1290,9 +1289,7 @@ static int s3c_hsudc_probe(struct platform_device *pdev)
goto err_supplies;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
- hsudc->regs = devm_ioremap_resource(&pdev->dev, res);
+ hsudc->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(hsudc->regs)) {
ret = PTR_ERR(hsudc->regs);
goto err_res;
diff --git a/drivers/usb/gadget/udc/s3c2410_udc.c b/drivers/usb/gadget/udc/s3c2410_udc.c
index f82208fbc249..0507a2ca0f55 100644
--- a/drivers/usb/gadget/udc/s3c2410_udc.c
+++ b/drivers/usb/gadget/udc/s3c2410_udc.c
@@ -1978,7 +1978,8 @@ static int __init udc_init(void)
dprintk(DEBUG_NORMAL, "%s\n", gadget_name);
- s3c2410_udc_debugfs_root = debugfs_create_dir(gadget_name, NULL);
+ s3c2410_udc_debugfs_root = debugfs_create_dir(gadget_name,
+ usb_debug_root);
retval = platform_driver_register(&udc_driver_24x0);
if (retval)
diff --git a/drivers/usb/gadget/udc/tegra-xudc.c b/drivers/usb/gadget/udc/tegra-xudc.c
new file mode 100644
index 000000000000..634c2c19a176
--- /dev/null
+++ b/drivers/usb/gadget/udc/tegra-xudc.c
@@ -0,0 +1,3810 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * NVIDIA Tegra XUSB device mode controller
+ *
+ * Copyright (c) 2013-2019, NVIDIA CORPORATION. All rights reserved.
+ * Copyright (c) 2015, Google Inc.
+ */
+
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/phy/phy.h>
+#include <linux/phy/tegra/xusb.h>
+#include <linux/pm_domain.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb/role.h>
+#include <linux/workqueue.h>
+
+/* XUSB_DEV registers */
+#define SPARAM 0x000
+#define SPARAM_ERSTMAX_MASK GENMASK(20, 16)
+#define SPARAM_ERSTMAX(x) (((x) << 16) & SPARAM_ERSTMAX_MASK)
+#define DB 0x004
+#define DB_TARGET_MASK GENMASK(15, 8)
+#define DB_TARGET(x) (((x) << 8) & DB_TARGET_MASK)
+#define DB_STREAMID_MASK GENMASK(31, 16)
+#define DB_STREAMID(x) (((x) << 16) & DB_STREAMID_MASK)
+#define ERSTSZ 0x008
+#define ERSTSZ_ERSTXSZ_SHIFT(x) ((x) * 16)
+#define ERSTSZ_ERSTXSZ_MASK GENMASK(15, 0)
+#define ERSTXBALO(x) (0x010 + 8 * (x))
+#define ERSTXBAHI(x) (0x014 + 8 * (x))
+#define ERDPLO 0x020
+#define ERDPLO_EHB BIT(3)
+#define ERDPHI 0x024
+#define EREPLO 0x028
+#define EREPLO_ECS BIT(0)
+#define EREPLO_SEGI BIT(1)
+#define EREPHI 0x02c
+#define CTRL 0x030
+#define CTRL_RUN BIT(0)
+#define CTRL_LSE BIT(1)
+#define CTRL_IE BIT(4)
+#define CTRL_SMI_EVT BIT(5)
+#define CTRL_SMI_DSE BIT(6)
+#define CTRL_EWE BIT(7)
+#define CTRL_DEVADDR_MASK GENMASK(30, 24)
+#define CTRL_DEVADDR(x) (((x) << 24) & CTRL_DEVADDR_MASK)
+#define CTRL_ENABLE BIT(31)
+#define ST 0x034
+#define ST_RC BIT(0)
+#define ST_IP BIT(4)
+#define RT_IMOD 0x038
+#define RT_IMOD_IMODI_MASK GENMASK(15, 0)
+#define RT_IMOD_IMODI(x) ((x) & RT_IMOD_IMODI_MASK)
+#define RT_IMOD_IMODC_MASK GENMASK(31, 16)
+#define RT_IMOD_IMODC(x) (((x) << 16) & RT_IMOD_IMODC_MASK)
+#define PORTSC 0x03c
+#define PORTSC_CCS BIT(0)
+#define PORTSC_PED BIT(1)
+#define PORTSC_PR BIT(4)
+#define PORTSC_PLS_SHIFT 5
+#define PORTSC_PLS_MASK GENMASK(8, 5)
+#define PORTSC_PLS_U0 0x0
+#define PORTSC_PLS_U2 0x2
+#define PORTSC_PLS_U3 0x3
+#define PORTSC_PLS_DISABLED 0x4
+#define PORTSC_PLS_RXDETECT 0x5
+#define PORTSC_PLS_INACTIVE 0x6
+#define PORTSC_PLS_RESUME 0xf
+#define PORTSC_PLS(x) (((x) << PORTSC_PLS_SHIFT) & PORTSC_PLS_MASK)
+#define PORTSC_PS_SHIFT 10
+#define PORTSC_PS_MASK GENMASK(13, 10)
+#define PORTSC_PS_UNDEFINED 0x0
+#define PORTSC_PS_FS 0x1
+#define PORTSC_PS_LS 0x2
+#define PORTSC_PS_HS 0x3
+#define PORTSC_PS_SS 0x4
+#define PORTSC_LWS BIT(16)
+#define PORTSC_CSC BIT(17)
+#define PORTSC_WRC BIT(19)
+#define PORTSC_PRC BIT(21)
+#define PORTSC_PLC BIT(22)
+#define PORTSC_CEC BIT(23)
+#define PORTSC_WPR BIT(30)
+#define PORTSC_CHANGE_MASK (PORTSC_CSC | PORTSC_WRC | PORTSC_PRC | \
+ PORTSC_PLC | PORTSC_CEC)
+#define ECPLO 0x040
+#define ECPHI 0x044
+#define MFINDEX 0x048
+#define MFINDEX_FRAME_SHIFT 3
+#define MFINDEX_FRAME_MASK GENMASK(13, 3)
+#define PORTPM 0x04c
+#define PORTPM_L1S_MASK GENMASK(1, 0)
+#define PORTPM_L1S_DROP 0x0
+#define PORTPM_L1S_ACCEPT 0x1
+#define PORTPM_L1S_NYET 0x2
+#define PORTPM_L1S_STALL 0x3
+#define PORTPM_L1S(x) ((x) & PORTPM_L1S_MASK)
+#define PORTPM_RWE BIT(3)
+#define PORTPM_U2TIMEOUT_MASK GENMASK(15, 8)
+#define PORTPM_U1TIMEOUT_MASK GENMASK(23, 16)
+#define PORTPM_FLA BIT(24)
+#define PORTPM_VBA BIT(25)
+#define PORTPM_WOC BIT(26)
+#define PORTPM_WOD BIT(27)
+#define PORTPM_U1E BIT(28)
+#define PORTPM_U2E BIT(29)
+#define PORTPM_FRWE BIT(30)
+#define PORTPM_PNG_CYA BIT(31)
+#define EP_HALT 0x050
+#define EP_PAUSE 0x054
+#define EP_RELOAD 0x058
+#define EP_STCHG 0x05c
+#define DEVNOTIF_LO 0x064
+#define DEVNOTIF_LO_TRIG BIT(0)
+#define DEVNOTIF_LO_TYPE_MASK GENMASK(7, 4)
+#define DEVNOTIF_LO_TYPE(x) (((x) << 4) & DEVNOTIF_LO_TYPE_MASK)
+#define DEVNOTIF_LO_TYPE_FUNCTION_WAKE 0x1
+#define DEVNOTIF_HI 0x068
+#define PORTHALT 0x06c
+#define PORTHALT_HALT_LTSSM BIT(0)
+#define PORTHALT_HALT_REJECT BIT(1)
+#define PORTHALT_STCHG_REQ BIT(20)
+#define PORTHALT_STCHG_INTR_EN BIT(24)
+#define PORT_TM 0x070
+#define EP_THREAD_ACTIVE 0x074
+#define EP_STOPPED 0x078
+#define HSFSPI_COUNT0 0x100
+#define HSFSPI_COUNT13 0x134
+#define HSFSPI_COUNT13_U2_RESUME_K_DURATION_MASK GENMASK(29, 0)
+#define HSFSPI_COUNT13_U2_RESUME_K_DURATION(x) ((x) & \
+ HSFSPI_COUNT13_U2_RESUME_K_DURATION_MASK)
+#define BLCG 0x840
+#define SSPX_CORE_CNT0 0x610
+#define SSPX_CORE_CNT0_PING_TBURST_MASK GENMASK(7, 0)
+#define SSPX_CORE_CNT0_PING_TBURST(x) ((x) & SSPX_CORE_CNT0_PING_TBURST_MASK)
+#define SSPX_CORE_CNT30 0x688
+#define SSPX_CORE_CNT30_LMPITP_TIMER_MASK GENMASK(19, 0)
+#define SSPX_CORE_CNT30_LMPITP_TIMER(x) ((x) & \
+ SSPX_CORE_CNT30_LMPITP_TIMER_MASK)
+#define SSPX_CORE_CNT32 0x690
+#define SSPX_CORE_CNT32_POLL_TBURST_MAX_MASK GENMASK(7, 0)
+#define SSPX_CORE_CNT32_POLL_TBURST_MAX(x) ((x) & \
+ SSPX_CORE_CNT32_POLL_TBURST_MAX_MASK)
+#define SSPX_CORE_PADCTL4 0x750
+#define SSPX_CORE_PADCTL4_RXDAT_VLD_TIMEOUT_U3_MASK GENMASK(19, 0)
+#define SSPX_CORE_PADCTL4_RXDAT_VLD_TIMEOUT_U3(x) ((x) & \
+ SSPX_CORE_PADCTL4_RXDAT_VLD_TIMEOUT_U3_MASK)
+#define BLCG_DFPCI BIT(0)
+#define BLCG_UFPCI BIT(1)
+#define BLCG_FE BIT(2)
+#define BLCG_COREPLL_PWRDN BIT(8)
+#define BLCG_IOPLL_0_PWRDN BIT(9)
+#define BLCG_IOPLL_1_PWRDN BIT(10)
+#define BLCG_IOPLL_2_PWRDN BIT(11)
+#define BLCG_ALL 0x1ff
+#define CFG_DEV_SSPI_XFER 0x858
+#define CFG_DEV_SSPI_XFER_ACKTIMEOUT_MASK GENMASK(31, 0)
+#define CFG_DEV_SSPI_XFER_ACKTIMEOUT(x) ((x) & \
+ CFG_DEV_SSPI_XFER_ACKTIMEOUT_MASK)
+#define CFG_DEV_FE 0x85c
+#define CFG_DEV_FE_PORTREGSEL_MASK GENMASK(1, 0)
+#define CFG_DEV_FE_PORTREGSEL_SS_PI 1
+#define CFG_DEV_FE_PORTREGSEL_HSFS_PI 2
+#define CFG_DEV_FE_PORTREGSEL(x) ((x) & CFG_DEV_FE_PORTREGSEL_MASK)
+#define CFG_DEV_FE_INFINITE_SS_RETRY BIT(29)
+
+/* FPCI registers */
+#define XUSB_DEV_CFG_1 0x004
+#define XUSB_DEV_CFG_1_IO_SPACE_EN BIT(0)
+#define XUSB_DEV_CFG_1_MEMORY_SPACE_EN BIT(1)
+#define XUSB_DEV_CFG_1_BUS_MASTER_EN BIT(2)
+#define XUSB_DEV_CFG_4 0x010
+#define XUSB_DEV_CFG_4_BASE_ADDR_MASK GENMASK(31, 15)
+#define XUSB_DEV_CFG_5 0x014
+
+/* IPFS registers */
+#define XUSB_DEV_CONFIGURATION_0 0x180
+#define XUSB_DEV_CONFIGURATION_0_EN_FPCI BIT(0)
+#define XUSB_DEV_INTR_MASK_0 0x188
+#define XUSB_DEV_INTR_MASK_0_IP_INT_MASK BIT(16)
+
+struct tegra_xudc_ep_context {
+ __le32 info0;
+ __le32 info1;
+ __le32 deq_lo;
+ __le32 deq_hi;
+ __le32 tx_info;
+ __le32 rsvd[11];
+};
+
+#define EP_STATE_DISABLED 0
+#define EP_STATE_RUNNING 1
+#define EP_STATE_HALTED 2
+#define EP_STATE_STOPPED 3
+#define EP_STATE_ERROR 4
+
+#define EP_TYPE_INVALID 0
+#define EP_TYPE_ISOCH_OUT 1
+#define EP_TYPE_BULK_OUT 2
+#define EP_TYPE_INTERRUPT_OUT 3
+#define EP_TYPE_CONTROL 4
+#define EP_TYPE_ISCOH_IN 5
+#define EP_TYPE_BULK_IN 6
+#define EP_TYPE_INTERRUPT_IN 7
+
+#define BUILD_EP_CONTEXT_RW(name, member, shift, mask) \
+static inline u32 ep_ctx_read_##name(struct tegra_xudc_ep_context *ctx) \
+{ \
+ return (le32_to_cpu(ctx->member) >> (shift)) & (mask); \
+} \
+static inline void \
+ep_ctx_write_##name(struct tegra_xudc_ep_context *ctx, u32 val) \
+{ \
+ u32 tmp; \
+ \
+ tmp = le32_to_cpu(ctx->member) & ~((mask) << (shift)); \
+ tmp |= (val & (mask)) << (shift); \
+ ctx->member = cpu_to_le32(tmp); \
+}
+
+BUILD_EP_CONTEXT_RW(state, info0, 0, 0x7)
+BUILD_EP_CONTEXT_RW(mult, info0, 8, 0x3)
+BUILD_EP_CONTEXT_RW(max_pstreams, info0, 10, 0x1f)
+BUILD_EP_CONTEXT_RW(lsa, info0, 15, 0x1)
+BUILD_EP_CONTEXT_RW(interval, info0, 16, 0xff)
+BUILD_EP_CONTEXT_RW(cerr, info1, 1, 0x3)
+BUILD_EP_CONTEXT_RW(type, info1, 3, 0x7)
+BUILD_EP_CONTEXT_RW(hid, info1, 7, 0x1)
+BUILD_EP_CONTEXT_RW(max_burst_size, info1, 8, 0xff)
+BUILD_EP_CONTEXT_RW(max_packet_size, info1, 16, 0xffff)
+BUILD_EP_CONTEXT_RW(dcs, deq_lo, 0, 0x1)
+BUILD_EP_CONTEXT_RW(deq_lo, deq_lo, 4, 0xfffffff)
+BUILD_EP_CONTEXT_RW(deq_hi, deq_hi, 0, 0xffffffff)
+BUILD_EP_CONTEXT_RW(avg_trb_len, tx_info, 0, 0xffff)
+BUILD_EP_CONTEXT_RW(max_esit_payload, tx_info, 16, 0xffff)
+BUILD_EP_CONTEXT_RW(edtla, rsvd[0], 0, 0xffffff)
+BUILD_EP_CONTEXT_RW(seq_num, rsvd[0], 24, 0xff)
+BUILD_EP_CONTEXT_RW(partial_td, rsvd[0], 25, 0x1)
+BUILD_EP_CONTEXT_RW(cerrcnt, rsvd[1], 18, 0x3)
+BUILD_EP_CONTEXT_RW(data_offset, rsvd[2], 0, 0x1ffff)
+BUILD_EP_CONTEXT_RW(numtrbs, rsvd[2], 22, 0x1f)
+BUILD_EP_CONTEXT_RW(devaddr, rsvd[6], 0, 0x7f)
+
+static inline u64 ep_ctx_read_deq_ptr(struct tegra_xudc_ep_context *ctx)
+{
+ return ((u64)ep_ctx_read_deq_hi(ctx) << 32) |
+ (ep_ctx_read_deq_lo(ctx) << 4);
+}
+
+static inline void
+ep_ctx_write_deq_ptr(struct tegra_xudc_ep_context *ctx, u64 addr)
+{
+ ep_ctx_write_deq_lo(ctx, lower_32_bits(addr) >> 4);
+ ep_ctx_write_deq_hi(ctx, upper_32_bits(addr));
+}
+
+struct tegra_xudc_trb {
+ __le32 data_lo;
+ __le32 data_hi;
+ __le32 status;
+ __le32 control;
+};
+
+#define TRB_TYPE_RSVD 0
+#define TRB_TYPE_NORMAL 1
+#define TRB_TYPE_SETUP_STAGE 2
+#define TRB_TYPE_DATA_STAGE 3
+#define TRB_TYPE_STATUS_STAGE 4
+#define TRB_TYPE_ISOCH 5
+#define TRB_TYPE_LINK 6
+#define TRB_TYPE_TRANSFER_EVENT 32
+#define TRB_TYPE_PORT_STATUS_CHANGE_EVENT 34
+#define TRB_TYPE_STREAM 48
+#define TRB_TYPE_SETUP_PACKET_EVENT 63
+
+#define TRB_CMPL_CODE_INVALID 0
+#define TRB_CMPL_CODE_SUCCESS 1
+#define TRB_CMPL_CODE_DATA_BUFFER_ERR 2
+#define TRB_CMPL_CODE_BABBLE_DETECTED_ERR 3
+#define TRB_CMPL_CODE_USB_TRANS_ERR 4
+#define TRB_CMPL_CODE_TRB_ERR 5
+#define TRB_CMPL_CODE_STALL 6
+#define TRB_CMPL_CODE_INVALID_STREAM_TYPE_ERR 10
+#define TRB_CMPL_CODE_SHORT_PACKET 13
+#define TRB_CMPL_CODE_RING_UNDERRUN 14
+#define TRB_CMPL_CODE_RING_OVERRUN 15
+#define TRB_CMPL_CODE_EVENT_RING_FULL_ERR 21
+#define TRB_CMPL_CODE_STOPPED 26
+#define TRB_CMPL_CODE_ISOCH_BUFFER_OVERRUN 31
+#define TRB_CMPL_CODE_STREAM_NUMP_ERROR 219
+#define TRB_CMPL_CODE_PRIME_PIPE_RECEIVED 220
+#define TRB_CMPL_CODE_HOST_REJECTED 221
+#define TRB_CMPL_CODE_CTRL_DIR_ERR 222
+#define TRB_CMPL_CODE_CTRL_SEQNUM_ERR 223
+
+#define BUILD_TRB_RW(name, member, shift, mask) \
+static inline u32 trb_read_##name(struct tegra_xudc_trb *trb) \
+{ \
+ return (le32_to_cpu(trb->member) >> (shift)) & (mask); \
+} \
+static inline void \
+trb_write_##name(struct tegra_xudc_trb *trb, u32 val) \
+{ \
+ u32 tmp; \
+ \
+ tmp = le32_to_cpu(trb->member) & ~((mask) << (shift)); \
+ tmp |= (val & (mask)) << (shift); \
+ trb->member = cpu_to_le32(tmp); \
+}
+
+BUILD_TRB_RW(data_lo, data_lo, 0, 0xffffffff)
+BUILD_TRB_RW(data_hi, data_hi, 0, 0xffffffff)
+BUILD_TRB_RW(seq_num, status, 0, 0xffff)
+BUILD_TRB_RW(transfer_len, status, 0, 0xffffff)
+BUILD_TRB_RW(td_size, status, 17, 0x1f)
+BUILD_TRB_RW(cmpl_code, status, 24, 0xff)
+BUILD_TRB_RW(cycle, control, 0, 0x1)
+BUILD_TRB_RW(toggle_cycle, control, 1, 0x1)
+BUILD_TRB_RW(isp, control, 2, 0x1)
+BUILD_TRB_RW(chain, control, 4, 0x1)
+BUILD_TRB_RW(ioc, control, 5, 0x1)
+BUILD_TRB_RW(type, control, 10, 0x3f)
+BUILD_TRB_RW(stream_id, control, 16, 0xffff)
+BUILD_TRB_RW(endpoint_id, control, 16, 0x1f)
+BUILD_TRB_RW(tlbpc, control, 16, 0xf)
+BUILD_TRB_RW(data_stage_dir, control, 16, 0x1)
+BUILD_TRB_RW(frame_id, control, 20, 0x7ff)
+BUILD_TRB_RW(sia, control, 31, 0x1)
+
+static inline u64 trb_read_data_ptr(struct tegra_xudc_trb *trb)
+{
+ return ((u64)trb_read_data_hi(trb) << 32) |
+ trb_read_data_lo(trb);
+}
+
+static inline void trb_write_data_ptr(struct tegra_xudc_trb *trb, u64 addr)
+{
+ trb_write_data_lo(trb, lower_32_bits(addr));
+ trb_write_data_hi(trb, upper_32_bits(addr));
+}
+
+struct tegra_xudc_request {
+ struct usb_request usb_req;
+
+ size_t buf_queued;
+ unsigned int trbs_queued;
+ unsigned int trbs_needed;
+ bool need_zlp;
+
+ struct tegra_xudc_trb *first_trb;
+ struct tegra_xudc_trb *last_trb;
+
+ struct list_head list;
+};
+
+struct tegra_xudc_ep {
+ struct tegra_xudc *xudc;
+ struct usb_ep usb_ep;
+ unsigned int index;
+ char name[8];
+
+ struct tegra_xudc_ep_context *context;
+
+#define XUDC_TRANSFER_RING_SIZE 64
+ struct tegra_xudc_trb *transfer_ring;
+ dma_addr_t transfer_ring_phys;
+
+ unsigned int enq_ptr;
+ unsigned int deq_ptr;
+ bool pcs;
+ bool ring_full;
+ bool stream_rejected;
+
+ struct list_head queue;
+ const struct usb_endpoint_descriptor *desc;
+ const struct usb_ss_ep_comp_descriptor *comp_desc;
+};
+
+struct tegra_xudc_sel_timing {
+ __u8 u1sel;
+ __u8 u1pel;
+ __le16 u2sel;
+ __le16 u2pel;
+};
+
+enum tegra_xudc_setup_state {
+ WAIT_FOR_SETUP,
+ DATA_STAGE_XFER,
+ DATA_STAGE_RECV,
+ STATUS_STAGE_XFER,
+ STATUS_STAGE_RECV,
+};
+
+struct tegra_xudc_setup_packet {
+ struct usb_ctrlrequest ctrl_req;
+ unsigned int seq_num;
+};
+
+struct tegra_xudc_save_regs {
+ u32 ctrl;
+ u32 portpm;
+};
+
+struct tegra_xudc {
+ struct device *dev;
+ const struct tegra_xudc_soc *soc;
+ struct tegra_xusb_padctl *padctl;
+
+ spinlock_t lock;
+
+ struct usb_gadget gadget;
+ struct usb_gadget_driver *driver;
+
+#define XUDC_NR_EVENT_RINGS 2
+#define XUDC_EVENT_RING_SIZE 4096
+ struct tegra_xudc_trb *event_ring[XUDC_NR_EVENT_RINGS];
+ dma_addr_t event_ring_phys[XUDC_NR_EVENT_RINGS];
+ unsigned int event_ring_index;
+ unsigned int event_ring_deq_ptr;
+ bool ccs;
+
+#define XUDC_NR_EPS 32
+ struct tegra_xudc_ep ep[XUDC_NR_EPS];
+ struct tegra_xudc_ep_context *ep_context;
+ dma_addr_t ep_context_phys;
+
+ struct device *genpd_dev_device;
+ struct device *genpd_dev_ss;
+ struct device_link *genpd_dl_device;
+ struct device_link *genpd_dl_ss;
+
+ struct dma_pool *transfer_ring_pool;
+
+ bool queued_setup_packet;
+ struct tegra_xudc_setup_packet setup_packet;
+ enum tegra_xudc_setup_state setup_state;
+ u16 setup_seq_num;
+
+ u16 dev_addr;
+ u16 isoch_delay;
+ struct tegra_xudc_sel_timing sel_timing;
+ u8 test_mode_pattern;
+ u16 status_buf;
+ struct tegra_xudc_request *ep0_req;
+
+ bool pullup;
+
+ unsigned int nr_enabled_eps;
+ unsigned int nr_isoch_eps;
+
+ unsigned int device_state;
+ unsigned int resume_state;
+
+ int irq;
+
+ void __iomem *base;
+ resource_size_t phys_base;
+ void __iomem *ipfs;
+ void __iomem *fpci;
+
+ struct regulator_bulk_data *supplies;
+
+ struct clk_bulk_data *clks;
+
+ enum usb_role device_mode;
+ struct usb_role_switch *usb_role_sw;
+ struct work_struct usb_role_sw_work;
+
+ struct phy *usb3_phy;
+ struct phy *utmi_phy;
+
+ struct tegra_xudc_save_regs saved_regs;
+ bool suspended;
+ bool powergated;
+
+ struct completion disconnect_complete;
+
+ bool selfpowered;
+
+#define TOGGLE_VBUS_WAIT_MS 100
+ struct delayed_work plc_reset_work;
+ bool wait_csc;
+
+ struct delayed_work port_reset_war_work;
+ bool wait_for_sec_prc;
+};
+
+#define XUDC_TRB_MAX_BUFFER_SIZE 65536
+#define XUDC_MAX_ISOCH_EPS 4
+#define XUDC_INTERRUPT_MODERATION_US 0
+
+static struct usb_endpoint_descriptor tegra_xudc_ep0_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = 0,
+ .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
+ .wMaxPacketSize = cpu_to_le16(64),
+};
+
+struct tegra_xudc_soc {
+ const char * const *supply_names;
+ unsigned int num_supplies;
+ const char * const *clock_names;
+ unsigned int num_clks;
+ bool u1_enable;
+ bool u2_enable;
+ bool lpm_enable;
+ bool invalid_seq_num;
+ bool pls_quirk;
+ bool port_reset_quirk;
+ bool has_ipfs;
+};
+
+static inline u32 fpci_readl(struct tegra_xudc *xudc, unsigned int offset)
+{
+ return readl(xudc->fpci + offset);
+}
+
+static inline void fpci_writel(struct tegra_xudc *xudc, u32 val,
+ unsigned int offset)
+{
+ writel(val, xudc->fpci + offset);
+}
+
+static inline u32 ipfs_readl(struct tegra_xudc *xudc, unsigned int offset)
+{
+ return readl(xudc->ipfs + offset);
+}
+
+static inline void ipfs_writel(struct tegra_xudc *xudc, u32 val,
+ unsigned int offset)
+{
+ writel(val, xudc->ipfs + offset);
+}
+
+static inline u32 xudc_readl(struct tegra_xudc *xudc, unsigned int offset)
+{
+ return readl(xudc->base + offset);
+}
+
+static inline void xudc_writel(struct tegra_xudc *xudc, u32 val,
+ unsigned int offset)
+{
+ writel(val, xudc->base + offset);
+}
+
+static inline int xudc_readl_poll(struct tegra_xudc *xudc,
+ unsigned int offset, u32 mask, u32 val)
+{
+ u32 regval;
+
+ return readl_poll_timeout_atomic(xudc->base + offset, regval,
+ (regval & mask) == val, 1, 100);
+}
+
+static inline struct tegra_xudc *to_xudc(struct usb_gadget *gadget)
+{
+ return container_of(gadget, struct tegra_xudc, gadget);
+}
+
+static inline struct tegra_xudc_ep *to_xudc_ep(struct usb_ep *ep)
+{
+ return container_of(ep, struct tegra_xudc_ep, usb_ep);
+}
+
+static inline struct tegra_xudc_request *to_xudc_req(struct usb_request *req)
+{
+ return container_of(req, struct tegra_xudc_request, usb_req);
+}
+
+static inline void dump_trb(struct tegra_xudc *xudc, const char *type,
+ struct tegra_xudc_trb *trb)
+{
+ dev_dbg(xudc->dev,
+ "%s: %p, lo = %#x, hi = %#x, status = %#x, control = %#x\n",
+ type, trb, trb->data_lo, trb->data_hi, trb->status,
+ trb->control);
+}
+
+static void tegra_xudc_device_mode_on(struct tegra_xudc *xudc)
+{
+ int err;
+
+ pm_runtime_get_sync(xudc->dev);
+
+ err = phy_power_on(xudc->utmi_phy);
+ if (err < 0)
+ dev_err(xudc->dev, "utmi power on failed %d\n", err);
+
+ err = phy_power_on(xudc->usb3_phy);
+ if (err < 0)
+ dev_err(xudc->dev, "usb3 phy power on failed %d\n", err);
+
+ dev_dbg(xudc->dev, "device mode on\n");
+
+ tegra_xusb_padctl_set_vbus_override(xudc->padctl, true);
+
+ xudc->device_mode = USB_ROLE_DEVICE;
+}
+
+static void tegra_xudc_device_mode_off(struct tegra_xudc *xudc)
+{
+ bool connected = false;
+ u32 pls, val;
+ int err;
+
+ dev_dbg(xudc->dev, "device mode off\n");
+
+ connected = !!(xudc_readl(xudc, PORTSC) & PORTSC_CCS);
+
+ reinit_completion(&xudc->disconnect_complete);
+
+ tegra_xusb_padctl_set_vbus_override(xudc->padctl, false);
+
+ pls = (xudc_readl(xudc, PORTSC) & PORTSC_PLS_MASK) >>
+ PORTSC_PLS_SHIFT;
+
+ /* Direct link to U0 if disconnected in RESUME or U2. */
+ if (xudc->soc->pls_quirk && xudc->gadget.speed == USB_SPEED_SUPER &&
+ (pls == PORTSC_PLS_RESUME || pls == PORTSC_PLS_U2)) {
+ val = xudc_readl(xudc, PORTPM);
+ val |= PORTPM_FRWE;
+ xudc_writel(xudc, val, PORTPM);
+
+ val = xudc_readl(xudc, PORTSC);
+ val &= ~(PORTSC_CHANGE_MASK | PORTSC_PLS_MASK);
+ val |= PORTSC_LWS | PORTSC_PLS(PORTSC_PLS_U0);
+ xudc_writel(xudc, val, PORTSC);
+ }
+
+ xudc->device_mode = USB_ROLE_NONE;
+
+ /* Wait for disconnect event. */
+ if (connected)
+ wait_for_completion(&xudc->disconnect_complete);
+
+ /* Make sure interrupt handler has completed before powergating. */
+ synchronize_irq(xudc->irq);
+
+ err = phy_power_off(xudc->utmi_phy);
+ if (err < 0)
+ dev_err(xudc->dev, "utmi_phy power off failed %d\n", err);
+
+ err = phy_power_off(xudc->usb3_phy);
+ if (err < 0)
+ dev_err(xudc->dev, "usb3_phy power off failed %d\n", err);
+
+ pm_runtime_put(xudc->dev);
+}
+
+static void tegra_xudc_usb_role_sw_work(struct work_struct *work)
+{
+ struct tegra_xudc *xudc = container_of(work, struct tegra_xudc,
+ usb_role_sw_work);
+
+ if (!xudc->usb_role_sw ||
+ usb_role_switch_get_role(xudc->usb_role_sw) == USB_ROLE_DEVICE)
+ tegra_xudc_device_mode_on(xudc);
+ else
+ tegra_xudc_device_mode_off(xudc);
+
+}
+
+static int tegra_xudc_usb_role_sw_set(struct device *dev, enum usb_role role)
+{
+ struct tegra_xudc *xudc = dev_get_drvdata(dev);
+ unsigned long flags;
+
+ dev_dbg(dev, "%s role is %d\n", __func__, role);
+
+ spin_lock_irqsave(&xudc->lock, flags);
+
+ if (!xudc->suspended)
+ schedule_work(&xudc->usb_role_sw_work);
+
+ spin_unlock_irqrestore(&xudc->lock, flags);
+
+ return 0;
+}
+
+static void tegra_xudc_plc_reset_work(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct tegra_xudc *xudc = container_of(dwork, struct tegra_xudc,
+ plc_reset_work);
+ unsigned long flags;
+
+ spin_lock_irqsave(&xudc->lock, flags);
+
+ if (xudc->wait_csc) {
+ u32 pls = (xudc_readl(xudc, PORTSC) & PORTSC_PLS_MASK) >>
+ PORTSC_PLS_SHIFT;
+
+ if (pls == PORTSC_PLS_INACTIVE) {
+ dev_info(xudc->dev, "PLS = Inactive. Toggle VBUS\n");
+ tegra_xusb_padctl_set_vbus_override(xudc->padctl,
+ false);
+ tegra_xusb_padctl_set_vbus_override(xudc->padctl, true);
+ xudc->wait_csc = false;
+ }
+ }
+
+ spin_unlock_irqrestore(&xudc->lock, flags);
+}
+
+static void tegra_xudc_port_reset_war_work(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct tegra_xudc *xudc =
+ container_of(dwork, struct tegra_xudc, port_reset_war_work);
+ unsigned long flags;
+ u32 pls;
+ int ret;
+
+ spin_lock_irqsave(&xudc->lock, flags);
+
+ if ((xudc->device_mode == USB_ROLE_DEVICE)
+ && xudc->wait_for_sec_prc) {
+ pls = (xudc_readl(xudc, PORTSC) & PORTSC_PLS_MASK) >>
+ PORTSC_PLS_SHIFT;
+ dev_dbg(xudc->dev, "pls = %x\n", pls);
+
+ if (pls == PORTSC_PLS_DISABLED) {
+ dev_dbg(xudc->dev, "toggle vbus\n");
+ /* PRC doesn't complete in 100ms, toggle the vbus */
+ ret = tegra_phy_xusb_utmi_port_reset(xudc->utmi_phy);
+ if (ret == 1)
+ xudc->wait_for_sec_prc = 0;
+ }
+ }
+
+ spin_unlock_irqrestore(&xudc->lock, flags);
+}
+
+static dma_addr_t trb_virt_to_phys(struct tegra_xudc_ep *ep,
+ struct tegra_xudc_trb *trb)
+{
+ unsigned int index;
+
+ index = trb - ep->transfer_ring;
+
+ if (WARN_ON(index >= XUDC_TRANSFER_RING_SIZE))
+ return 0;
+
+ return (ep->transfer_ring_phys + index * sizeof(*trb));
+}
+
+static struct tegra_xudc_trb *trb_phys_to_virt(struct tegra_xudc_ep *ep,
+ dma_addr_t addr)
+{
+ struct tegra_xudc_trb *trb;
+ unsigned int index;
+
+ index = (addr - ep->transfer_ring_phys) / sizeof(*trb);
+
+ if (WARN_ON(index >= XUDC_TRANSFER_RING_SIZE))
+ return NULL;
+
+ trb = &ep->transfer_ring[index];
+
+ return trb;
+}
+
+static void ep_reload(struct tegra_xudc *xudc, unsigned int ep)
+{
+ xudc_writel(xudc, BIT(ep), EP_RELOAD);
+ xudc_readl_poll(xudc, EP_RELOAD, BIT(ep), 0);
+}
+
+static void ep_pause(struct tegra_xudc *xudc, unsigned int ep)
+{
+ u32 val;
+
+ val = xudc_readl(xudc, EP_PAUSE);
+ if (val & BIT(ep))
+ return;
+ val |= BIT(ep);
+
+ xudc_writel(xudc, val, EP_PAUSE);
+
+ xudc_readl_poll(xudc, EP_STCHG, BIT(ep), BIT(ep));
+
+ xudc_writel(xudc, BIT(ep), EP_STCHG);
+}
+
+static void ep_unpause(struct tegra_xudc *xudc, unsigned int ep)
+{
+ u32 val;
+
+ val = xudc_readl(xudc, EP_PAUSE);
+ if (!(val & BIT(ep)))
+ return;
+ val &= ~BIT(ep);
+
+ xudc_writel(xudc, val, EP_PAUSE);
+
+ xudc_readl_poll(xudc, EP_STCHG, BIT(ep), BIT(ep));
+
+ xudc_writel(xudc, BIT(ep), EP_STCHG);
+}
+
+static void ep_unpause_all(struct tegra_xudc *xudc)
+{
+ u32 val;
+
+ val = xudc_readl(xudc, EP_PAUSE);
+
+ xudc_writel(xudc, 0, EP_PAUSE);
+
+ xudc_readl_poll(xudc, EP_STCHG, val, val);
+
+ xudc_writel(xudc, val, EP_STCHG);
+}
+
+static void ep_halt(struct tegra_xudc *xudc, unsigned int ep)
+{
+ u32 val;
+
+ val = xudc_readl(xudc, EP_HALT);
+ if (val & BIT(ep))
+ return;
+ val |= BIT(ep);
+ xudc_writel(xudc, val, EP_HALT);
+
+ xudc_readl_poll(xudc, EP_STCHG, BIT(ep), BIT(ep));
+
+ xudc_writel(xudc, BIT(ep), EP_STCHG);
+}
+
+static void ep_unhalt(struct tegra_xudc *xudc, unsigned int ep)
+{
+ u32 val;
+
+ val = xudc_readl(xudc, EP_HALT);
+ if (!(val & BIT(ep)))
+ return;
+ val &= ~BIT(ep);
+ xudc_writel(xudc, val, EP_HALT);
+
+ xudc_readl_poll(xudc, EP_STCHG, BIT(ep), BIT(ep));
+
+ xudc_writel(xudc, BIT(ep), EP_STCHG);
+}
+
+static void ep_unhalt_all(struct tegra_xudc *xudc)
+{
+ u32 val;
+
+ val = xudc_readl(xudc, EP_HALT);
+ if (!val)
+ return;
+ xudc_writel(xudc, 0, EP_HALT);
+
+ xudc_readl_poll(xudc, EP_STCHG, val, val);
+
+ xudc_writel(xudc, val, EP_STCHG);
+}
+
+static void ep_wait_for_stopped(struct tegra_xudc *xudc, unsigned int ep)
+{
+ xudc_readl_poll(xudc, EP_STOPPED, BIT(ep), BIT(ep));
+ xudc_writel(xudc, BIT(ep), EP_STOPPED);
+}
+
+static void ep_wait_for_inactive(struct tegra_xudc *xudc, unsigned int ep)
+{
+ xudc_readl_poll(xudc, EP_THREAD_ACTIVE, BIT(ep), 0);
+}
+
+static void tegra_xudc_req_done(struct tegra_xudc_ep *ep,
+ struct tegra_xudc_request *req, int status)
+{
+ struct tegra_xudc *xudc = ep->xudc;
+
+ dev_dbg(xudc->dev, "completing request %p on EP %u with status %d\n",
+ req, ep->index, status);
+
+ if (likely(req->usb_req.status == -EINPROGRESS))
+ req->usb_req.status = status;
+
+ list_del_init(&req->list);
+
+ if (usb_endpoint_xfer_control(ep->desc)) {
+ usb_gadget_unmap_request(&xudc->gadget, &req->usb_req,
+ (xudc->setup_state ==
+ DATA_STAGE_XFER));
+ } else {
+ usb_gadget_unmap_request(&xudc->gadget, &req->usb_req,
+ usb_endpoint_dir_in(ep->desc));
+ }
+
+ spin_unlock(&xudc->lock);
+ usb_gadget_giveback_request(&ep->usb_ep, &req->usb_req);
+ spin_lock(&xudc->lock);
+}
+
+static void tegra_xudc_ep_nuke(struct tegra_xudc_ep *ep, int status)
+{
+ struct tegra_xudc_request *req;
+
+ while (!list_empty(&ep->queue)) {
+ req = list_first_entry(&ep->queue, struct tegra_xudc_request,
+ list);
+ tegra_xudc_req_done(ep, req, status);
+ }
+}
+
+static unsigned int ep_available_trbs(struct tegra_xudc_ep *ep)
+{
+ if (ep->ring_full)
+ return 0;
+
+ if (ep->deq_ptr > ep->enq_ptr)
+ return ep->deq_ptr - ep->enq_ptr - 1;
+
+ return XUDC_TRANSFER_RING_SIZE - (ep->enq_ptr - ep->deq_ptr) - 2;
+}
+
+static void tegra_xudc_queue_one_trb(struct tegra_xudc_ep *ep,
+ struct tegra_xudc_request *req,
+ struct tegra_xudc_trb *trb,
+ bool ioc)
+{
+ struct tegra_xudc *xudc = ep->xudc;
+ dma_addr_t buf_addr;
+ size_t len;
+
+ len = min_t(size_t, XUDC_TRB_MAX_BUFFER_SIZE, req->usb_req.length -
+ req->buf_queued);
+ if (len > 0)
+ buf_addr = req->usb_req.dma + req->buf_queued;
+ else
+ buf_addr = 0;
+
+ trb_write_data_ptr(trb, buf_addr);
+
+ trb_write_transfer_len(trb, len);
+ trb_write_td_size(trb, req->trbs_needed - req->trbs_queued - 1);
+
+ if (req->trbs_queued == req->trbs_needed - 1 ||
+ (req->need_zlp && req->trbs_queued == req->trbs_needed - 2))
+ trb_write_chain(trb, 0);
+ else
+ trb_write_chain(trb, 1);
+
+ trb_write_ioc(trb, ioc);
+
+ if (usb_endpoint_dir_out(ep->desc) ||
+ (usb_endpoint_xfer_control(ep->desc) &&
+ (xudc->setup_state == DATA_STAGE_RECV)))
+ trb_write_isp(trb, 1);
+ else
+ trb_write_isp(trb, 0);
+
+ if (usb_endpoint_xfer_control(ep->desc)) {
+ if (xudc->setup_state == DATA_STAGE_XFER ||
+ xudc->setup_state == DATA_STAGE_RECV)
+ trb_write_type(trb, TRB_TYPE_DATA_STAGE);
+ else
+ trb_write_type(trb, TRB_TYPE_STATUS_STAGE);
+
+ if (xudc->setup_state == DATA_STAGE_XFER ||
+ xudc->setup_state == STATUS_STAGE_XFER)
+ trb_write_data_stage_dir(trb, 1);
+ else
+ trb_write_data_stage_dir(trb, 0);
+ } else if (usb_endpoint_xfer_isoc(ep->desc)) {
+ trb_write_type(trb, TRB_TYPE_ISOCH);
+ trb_write_sia(trb, 1);
+ trb_write_frame_id(trb, 0);
+ trb_write_tlbpc(trb, 0);
+ } else if (usb_ss_max_streams(ep->comp_desc)) {
+ trb_write_type(trb, TRB_TYPE_STREAM);
+ trb_write_stream_id(trb, req->usb_req.stream_id);
+ } else {
+ trb_write_type(trb, TRB_TYPE_NORMAL);
+ trb_write_stream_id(trb, 0);
+ }
+
+ trb_write_cycle(trb, ep->pcs);
+
+ req->trbs_queued++;
+ req->buf_queued += len;
+
+ dump_trb(xudc, "TRANSFER", trb);
+}
+
+static unsigned int tegra_xudc_queue_trbs(struct tegra_xudc_ep *ep,
+ struct tegra_xudc_request *req)
+{
+ unsigned int i, count, available;
+ bool wait_td = false;
+
+ available = ep_available_trbs(ep);
+ count = req->trbs_needed - req->trbs_queued;
+ if (available < count) {
+ count = available;
+ ep->ring_full = true;
+ }
+
+ /*
+ * To generate zero-length packet on USB bus, SW needs schedule a
+ * standalone zero-length TD. According to HW's behavior, SW needs
+ * to schedule TDs in different ways for different endpoint types.
+ *
+ * For control endpoint:
+ * - Data stage TD (IOC = 1, CH = 0)
+ * - Ring doorbell and wait transfer event
+ * - Data stage TD for ZLP (IOC = 1, CH = 0)
+ * - Ring doorbell
+ *
+ * For bulk and interrupt endpoints:
+ * - Normal transfer TD (IOC = 0, CH = 0)
+ * - Normal transfer TD for ZLP (IOC = 1, CH = 0)
+ * - Ring doorbell
+ */
+
+ if (req->need_zlp && usb_endpoint_xfer_control(ep->desc) && count > 1)
+ wait_td = true;
+
+ if (!req->first_trb)
+ req->first_trb = &ep->transfer_ring[ep->enq_ptr];
+
+ for (i = 0; i < count; i++) {
+ struct tegra_xudc_trb *trb = &ep->transfer_ring[ep->enq_ptr];
+ bool ioc = false;
+
+ if ((i == count - 1) || (wait_td && i == count - 2))
+ ioc = true;
+
+ tegra_xudc_queue_one_trb(ep, req, trb, ioc);
+ req->last_trb = trb;
+
+ ep->enq_ptr++;
+ if (ep->enq_ptr == XUDC_TRANSFER_RING_SIZE - 1) {
+ trb = &ep->transfer_ring[ep->enq_ptr];
+ trb_write_cycle(trb, ep->pcs);
+ ep->pcs = !ep->pcs;
+ ep->enq_ptr = 0;
+ }
+
+ if (ioc)
+ break;
+ }
+
+ return count;
+}
+
+static void tegra_xudc_ep_ring_doorbell(struct tegra_xudc_ep *ep)
+{
+ struct tegra_xudc *xudc = ep->xudc;
+ u32 val;
+
+ if (list_empty(&ep->queue))
+ return;
+
+ val = DB_TARGET(ep->index);
+ if (usb_endpoint_xfer_control(ep->desc)) {
+ val |= DB_STREAMID(xudc->setup_seq_num);
+ } else if (usb_ss_max_streams(ep->comp_desc) > 0) {
+ struct tegra_xudc_request *req;
+
+ /* Don't ring doorbell if the stream has been rejected. */
+ if (ep->stream_rejected)
+ return;
+
+ req = list_first_entry(&ep->queue, struct tegra_xudc_request,
+ list);
+ val |= DB_STREAMID(req->usb_req.stream_id);
+ }
+
+ dev_dbg(xudc->dev, "ring doorbell: %#x\n", val);
+ xudc_writel(xudc, val, DB);
+}
+
+static void tegra_xudc_ep_kick_queue(struct tegra_xudc_ep *ep)
+{
+ struct tegra_xudc_request *req;
+ bool trbs_queued = false;
+
+ list_for_each_entry(req, &ep->queue, list) {
+ if (ep->ring_full)
+ break;
+
+ if (tegra_xudc_queue_trbs(ep, req) > 0)
+ trbs_queued = true;
+ }
+
+ if (trbs_queued)
+ tegra_xudc_ep_ring_doorbell(ep);
+}
+
+static int
+__tegra_xudc_ep_queue(struct tegra_xudc_ep *ep, struct tegra_xudc_request *req)
+{
+ struct tegra_xudc *xudc = ep->xudc;
+ int err;
+
+ if (usb_endpoint_xfer_control(ep->desc) && !list_empty(&ep->queue)) {
+ dev_err(xudc->dev, "control EP has pending transfers\n");
+ return -EINVAL;
+ }
+
+ if (usb_endpoint_xfer_control(ep->desc)) {
+ err = usb_gadget_map_request(&xudc->gadget, &req->usb_req,
+ (xudc->setup_state ==
+ DATA_STAGE_XFER));
+ } else {
+ err = usb_gadget_map_request(&xudc->gadget, &req->usb_req,
+ usb_endpoint_dir_in(ep->desc));
+ }
+
+ if (err < 0) {
+ dev_err(xudc->dev, "failed to map request: %d\n", err);
+ return err;
+ }
+
+ req->first_trb = NULL;
+ req->last_trb = NULL;
+ req->buf_queued = 0;
+ req->trbs_queued = 0;
+ req->need_zlp = false;
+ req->trbs_needed = DIV_ROUND_UP(req->usb_req.length,
+ XUDC_TRB_MAX_BUFFER_SIZE);
+ if (req->usb_req.length == 0)
+ req->trbs_needed++;
+
+ if (!usb_endpoint_xfer_isoc(ep->desc) &&
+ req->usb_req.zero && req->usb_req.length &&
+ ((req->usb_req.length % ep->usb_ep.maxpacket) == 0)) {
+ req->trbs_needed++;
+ req->need_zlp = true;
+ }
+
+ req->usb_req.status = -EINPROGRESS;
+ req->usb_req.actual = 0;
+
+ list_add_tail(&req->list, &ep->queue);
+
+ tegra_xudc_ep_kick_queue(ep);
+
+ return 0;
+}
+
+static int
+tegra_xudc_ep_queue(struct usb_ep *usb_ep, struct usb_request *usb_req,
+ gfp_t gfp)
+{
+ struct tegra_xudc_request *req;
+ struct tegra_xudc_ep *ep;
+ struct tegra_xudc *xudc;
+ unsigned long flags;
+ int ret;
+
+ if (!usb_ep || !usb_req)
+ return -EINVAL;
+
+ ep = to_xudc_ep(usb_ep);
+ req = to_xudc_req(usb_req);
+ xudc = ep->xudc;
+
+ spin_lock_irqsave(&xudc->lock, flags);
+ if (xudc->powergated || !ep->desc) {
+ ret = -ESHUTDOWN;
+ goto unlock;
+ }
+
+ ret = __tegra_xudc_ep_queue(ep, req);
+unlock:
+ spin_unlock_irqrestore(&xudc->lock, flags);
+
+ return ret;
+}
+
+static void squeeze_transfer_ring(struct tegra_xudc_ep *ep,
+ struct tegra_xudc_request *req)
+{
+ struct tegra_xudc_trb *trb = req->first_trb;
+ bool pcs_enq = trb_read_cycle(trb);
+ bool pcs;
+
+ /*
+ * Clear out all the TRBs part of or after the cancelled request,
+ * and must correct trb cycle bit to the last un-enqueued state.
+ */
+ while (trb != &ep->transfer_ring[ep->enq_ptr]) {
+ pcs = trb_read_cycle(trb);
+ memset(trb, 0, sizeof(*trb));
+ trb_write_cycle(trb, !pcs);
+ trb++;
+
+ if (trb_read_type(trb) == TRB_TYPE_LINK)
+ trb = ep->transfer_ring;
+ }
+
+ /* Requests will be re-queued at the start of the cancelled request. */
+ ep->enq_ptr = req->first_trb - ep->transfer_ring;
+ /*
+ * Retrieve the correct cycle bit state from the first trb of
+ * the cancelled request.
+ */
+ ep->pcs = pcs_enq;
+ ep->ring_full = false;
+ list_for_each_entry_continue(req, &ep->queue, list) {
+ req->usb_req.status = -EINPROGRESS;
+ req->usb_req.actual = 0;
+
+ req->first_trb = NULL;
+ req->last_trb = NULL;
+ req->buf_queued = 0;
+ req->trbs_queued = 0;
+ }
+}
+
+/*
+ * Determine if the given TRB is in the range [first trb, last trb] for the
+ * given request.
+ */
+static bool trb_in_request(struct tegra_xudc_ep *ep,
+ struct tegra_xudc_request *req,
+ struct tegra_xudc_trb *trb)
+{
+ dev_dbg(ep->xudc->dev, "%s: request %p -> %p; trb %p\n", __func__,
+ req->first_trb, req->last_trb, trb);
+
+ if (trb >= req->first_trb && (trb <= req->last_trb ||
+ req->last_trb < req->first_trb))
+ return true;
+
+ if (trb < req->first_trb && trb <= req->last_trb &&
+ req->last_trb < req->first_trb)
+ return true;
+
+ return false;
+}
+
+/*
+ * Determine if the given TRB is in the range [EP enqueue pointer, first TRB)
+ * for the given endpoint and request.
+ */
+static bool trb_before_request(struct tegra_xudc_ep *ep,
+ struct tegra_xudc_request *req,
+ struct tegra_xudc_trb *trb)
+{
+ struct tegra_xudc_trb *enq_trb = &ep->transfer_ring[ep->enq_ptr];
+
+ dev_dbg(ep->xudc->dev, "%s: request %p -> %p; enq ptr: %p; trb %p\n",
+ __func__, req->first_trb, req->last_trb, enq_trb, trb);
+
+ if (trb < req->first_trb && (enq_trb <= trb ||
+ req->first_trb < enq_trb))
+ return true;
+
+ if (trb > req->first_trb && req->first_trb < enq_trb && enq_trb <= trb)
+ return true;
+
+ return false;
+}
+
+static int
+__tegra_xudc_ep_dequeue(struct tegra_xudc_ep *ep,
+ struct tegra_xudc_request *req)
+{
+ struct tegra_xudc *xudc = ep->xudc;
+ struct tegra_xudc_request *r;
+ struct tegra_xudc_trb *deq_trb;
+ bool busy, kick_queue = false;
+ int ret = 0;
+
+ /* Make sure the request is actually queued to this endpoint. */
+ list_for_each_entry(r, &ep->queue, list) {
+ if (r == req)
+ break;
+ }
+
+ if (r != req)
+ return -EINVAL;
+
+ /* Request hasn't been queued in the transfer ring yet. */
+ if (!req->trbs_queued) {
+ tegra_xudc_req_done(ep, req, -ECONNRESET);
+ return 0;
+ }
+
+ /* Halt DMA for this endpiont. */
+ if (ep_ctx_read_state(ep->context) == EP_STATE_RUNNING) {
+ ep_pause(xudc, ep->index);
+ ep_wait_for_inactive(xudc, ep->index);
+ }
+
+ deq_trb = trb_phys_to_virt(ep, ep_ctx_read_deq_ptr(ep->context));
+ /* Is the hardware processing the TRB at the dequeue pointer? */
+ busy = (trb_read_cycle(deq_trb) == ep_ctx_read_dcs(ep->context));
+
+ if (trb_in_request(ep, req, deq_trb) && busy) {
+ /*
+ * Request has been partially completed or it hasn't
+ * started processing yet.
+ */
+ dma_addr_t deq_ptr;
+
+ squeeze_transfer_ring(ep, req);
+
+ req->usb_req.actual = ep_ctx_read_edtla(ep->context);
+ tegra_xudc_req_done(ep, req, -ECONNRESET);
+ kick_queue = true;
+
+ /* EDTLA is > 0: request has been partially completed */
+ if (req->usb_req.actual > 0) {
+ /*
+ * Abort the pending transfer and update the dequeue
+ * pointer
+ */
+ ep_ctx_write_edtla(ep->context, 0);
+ ep_ctx_write_partial_td(ep->context, 0);
+ ep_ctx_write_data_offset(ep->context, 0);
+
+ deq_ptr = trb_virt_to_phys(ep,
+ &ep->transfer_ring[ep->enq_ptr]);
+
+ if (dma_mapping_error(xudc->dev, deq_ptr)) {
+ ret = -EINVAL;
+ } else {
+ ep_ctx_write_deq_ptr(ep->context, deq_ptr);
+ ep_ctx_write_dcs(ep->context, ep->pcs);
+ ep_reload(xudc, ep->index);
+ }
+ }
+ } else if (trb_before_request(ep, req, deq_trb) && busy) {
+ /* Request hasn't started processing yet. */
+ squeeze_transfer_ring(ep, req);
+
+ tegra_xudc_req_done(ep, req, -ECONNRESET);
+ kick_queue = true;
+ } else {
+ /*
+ * Request has completed, but we haven't processed the
+ * completion event yet.
+ */
+ tegra_xudc_req_done(ep, req, -ECONNRESET);
+ ret = -EINVAL;
+ }
+
+ /* Resume the endpoint. */
+ ep_unpause(xudc, ep->index);
+
+ if (kick_queue)
+ tegra_xudc_ep_kick_queue(ep);
+
+ return ret;
+}
+
+static int
+tegra_xudc_ep_dequeue(struct usb_ep *usb_ep, struct usb_request *usb_req)
+{
+ struct tegra_xudc_request *req;
+ struct tegra_xudc_ep *ep;
+ struct tegra_xudc *xudc;
+ unsigned long flags;
+ int ret;
+
+ if (!usb_ep || !usb_req)
+ return -EINVAL;
+
+ ep = to_xudc_ep(usb_ep);
+ req = to_xudc_req(usb_req);
+ xudc = ep->xudc;
+
+ spin_lock_irqsave(&xudc->lock, flags);
+
+ if (xudc->powergated || !ep->desc) {
+ ret = -ESHUTDOWN;
+ goto unlock;
+ }
+
+ ret = __tegra_xudc_ep_dequeue(ep, req);
+unlock:
+ spin_unlock_irqrestore(&xudc->lock, flags);
+
+ return ret;
+}
+
+static int __tegra_xudc_ep_set_halt(struct tegra_xudc_ep *ep, bool halt)
+{
+ struct tegra_xudc *xudc = ep->xudc;
+
+ if (!ep->desc)
+ return -EINVAL;
+
+ if (usb_endpoint_xfer_isoc(ep->desc)) {
+ dev_err(xudc->dev, "can't halt isoc EP\n");
+ return -ENOTSUPP;
+ }
+
+ if (!!(xudc_readl(xudc, EP_HALT) & BIT(ep->index)) == halt) {
+ dev_dbg(xudc->dev, "EP %u already %s\n", ep->index,
+ halt ? "halted" : "not halted");
+ return 0;
+ }
+
+ if (halt) {
+ ep_halt(xudc, ep->index);
+ } else {
+ ep_ctx_write_state(ep->context, EP_STATE_DISABLED);
+
+ ep_reload(xudc, ep->index);
+
+ ep_ctx_write_state(ep->context, EP_STATE_RUNNING);
+ ep_ctx_write_seq_num(ep->context, 0);
+
+ ep_reload(xudc, ep->index);
+ ep_unpause(xudc, ep->index);
+ ep_unhalt(xudc, ep->index);
+
+ tegra_xudc_ep_ring_doorbell(ep);
+ }
+
+ return 0;
+}
+
+static int tegra_xudc_ep_set_halt(struct usb_ep *usb_ep, int value)
+{
+ struct tegra_xudc_ep *ep;
+ struct tegra_xudc *xudc;
+ unsigned long flags;
+ int ret;
+
+ if (!usb_ep)
+ return -EINVAL;
+
+ ep = to_xudc_ep(usb_ep);
+ xudc = ep->xudc;
+
+ spin_lock_irqsave(&xudc->lock, flags);
+ if (xudc->powergated) {
+ ret = -ESHUTDOWN;
+ goto unlock;
+ }
+
+ if (value && usb_endpoint_dir_in(ep->desc) &&
+ !list_empty(&ep->queue)) {
+ dev_err(xudc->dev, "can't halt EP with requests pending\n");
+ ret = -EAGAIN;
+ goto unlock;
+ }
+
+ ret = __tegra_xudc_ep_set_halt(ep, value);
+unlock:
+ spin_unlock_irqrestore(&xudc->lock, flags);
+
+ return ret;
+}
+
+static void tegra_xudc_ep_context_setup(struct tegra_xudc_ep *ep)
+{
+ const struct usb_endpoint_descriptor *desc = ep->desc;
+ const struct usb_ss_ep_comp_descriptor *comp_desc = ep->comp_desc;
+ struct tegra_xudc *xudc = ep->xudc;
+ u16 maxpacket, maxburst = 0, esit = 0;
+ u32 val;
+
+ maxpacket = usb_endpoint_maxp(desc) & 0x7ff;
+ if (xudc->gadget.speed == USB_SPEED_SUPER) {
+ if (!usb_endpoint_xfer_control(desc))
+ maxburst = comp_desc->bMaxBurst;
+
+ if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc))
+ esit = le16_to_cpu(comp_desc->wBytesPerInterval);
+ } else if ((xudc->gadget.speed < USB_SPEED_SUPER) &&
+ (usb_endpoint_xfer_int(desc) ||
+ usb_endpoint_xfer_isoc(desc))) {
+ if (xudc->gadget.speed == USB_SPEED_HIGH) {
+ maxburst = (usb_endpoint_maxp(desc) >> 11) & 0x3;
+ if (maxburst == 0x3) {
+ dev_warn(xudc->dev,
+ "invalid endpoint maxburst\n");
+ maxburst = 0x2;
+ }
+ }
+ esit = maxpacket * (maxburst + 1);
+ }
+
+ memset(ep->context, 0, sizeof(*ep->context));
+
+ ep_ctx_write_state(ep->context, EP_STATE_RUNNING);
+ ep_ctx_write_interval(ep->context, desc->bInterval);
+ if (xudc->gadget.speed == USB_SPEED_SUPER) {
+ if (usb_endpoint_xfer_isoc(desc)) {
+ ep_ctx_write_mult(ep->context,
+ comp_desc->bmAttributes & 0x3);
+ }
+
+ if (usb_endpoint_xfer_bulk(desc)) {
+ ep_ctx_write_max_pstreams(ep->context,
+ comp_desc->bmAttributes &
+ 0x1f);
+ ep_ctx_write_lsa(ep->context, 1);
+ }
+ }
+
+ if (!usb_endpoint_xfer_control(desc) && usb_endpoint_dir_out(desc))
+ val = usb_endpoint_type(desc);
+ else
+ val = usb_endpoint_type(desc) + EP_TYPE_CONTROL;
+
+ ep_ctx_write_type(ep->context, val);
+ ep_ctx_write_cerr(ep->context, 0x3);
+ ep_ctx_write_max_packet_size(ep->context, maxpacket);
+ ep_ctx_write_max_burst_size(ep->context, maxburst);
+
+ ep_ctx_write_deq_ptr(ep->context, ep->transfer_ring_phys);
+ ep_ctx_write_dcs(ep->context, ep->pcs);
+
+ /* Select a reasonable average TRB length based on endpoint type. */
+ switch (usb_endpoint_type(desc)) {
+ case USB_ENDPOINT_XFER_CONTROL:
+ val = 8;
+ break;
+ case USB_ENDPOINT_XFER_INT:
+ val = 1024;
+ break;
+ case USB_ENDPOINT_XFER_BULK:
+ case USB_ENDPOINT_XFER_ISOC:
+ default:
+ val = 3072;
+ break;
+ }
+
+ ep_ctx_write_avg_trb_len(ep->context, val);
+ ep_ctx_write_max_esit_payload(ep->context, esit);
+
+ ep_ctx_write_cerrcnt(ep->context, 0x3);
+}
+
+static void setup_link_trb(struct tegra_xudc_ep *ep,
+ struct tegra_xudc_trb *trb)
+{
+ trb_write_data_ptr(trb, ep->transfer_ring_phys);
+ trb_write_type(trb, TRB_TYPE_LINK);
+ trb_write_toggle_cycle(trb, 1);
+}
+
+static int __tegra_xudc_ep_disable(struct tegra_xudc_ep *ep)
+{
+ struct tegra_xudc *xudc = ep->xudc;
+
+ if (ep_ctx_read_state(ep->context) == EP_STATE_DISABLED) {
+ dev_err(xudc->dev, "endpoint %u already disabled\n",
+ ep->index);
+ return -EINVAL;
+ }
+
+ ep_ctx_write_state(ep->context, EP_STATE_DISABLED);
+
+ ep_reload(xudc, ep->index);
+
+ tegra_xudc_ep_nuke(ep, -ESHUTDOWN);
+
+ xudc->nr_enabled_eps--;
+ if (usb_endpoint_xfer_isoc(ep->desc))
+ xudc->nr_isoch_eps--;
+
+ ep->desc = NULL;
+ ep->comp_desc = NULL;
+
+ memset(ep->context, 0, sizeof(*ep->context));
+
+ ep_unpause(xudc, ep->index);
+ ep_unhalt(xudc, ep->index);
+ if (xudc_readl(xudc, EP_STOPPED) & BIT(ep->index))
+ xudc_writel(xudc, BIT(ep->index), EP_STOPPED);
+
+ /*
+ * If this is the last endpoint disabled in a de-configure request,
+ * switch back to address state.
+ */
+ if ((xudc->device_state == USB_STATE_CONFIGURED) &&
+ (xudc->nr_enabled_eps == 1)) {
+ u32 val;
+
+ xudc->device_state = USB_STATE_ADDRESS;
+ usb_gadget_set_state(&xudc->gadget, xudc->device_state);
+
+ val = xudc_readl(xudc, CTRL);
+ val &= ~CTRL_RUN;
+ xudc_writel(xudc, val, CTRL);
+ }
+
+ dev_info(xudc->dev, "ep %u disabled\n", ep->index);
+
+ return 0;
+}
+
+static int tegra_xudc_ep_disable(struct usb_ep *usb_ep)
+{
+ struct tegra_xudc_ep *ep;
+ struct tegra_xudc *xudc;
+ unsigned long flags;
+ int ret;
+
+ if (!usb_ep)
+ return -EINVAL;
+
+ ep = to_xudc_ep(usb_ep);
+ xudc = ep->xudc;
+
+ spin_lock_irqsave(&xudc->lock, flags);
+ if (xudc->powergated) {
+ ret = -ESHUTDOWN;
+ goto unlock;
+ }
+
+ ret = __tegra_xudc_ep_disable(ep);
+unlock:
+ spin_unlock_irqrestore(&xudc->lock, flags);
+
+ return ret;
+}
+
+static int __tegra_xudc_ep_enable(struct tegra_xudc_ep *ep,
+ const struct usb_endpoint_descriptor *desc)
+{
+ struct tegra_xudc *xudc = ep->xudc;
+ unsigned int i;
+ u32 val;
+
+ if (xudc->gadget.speed == USB_SPEED_SUPER &&
+ !usb_endpoint_xfer_control(desc) && !ep->usb_ep.comp_desc)
+ return -EINVAL;
+
+ /* Disable the EP if it is not disabled */
+ if (ep_ctx_read_state(ep->context) != EP_STATE_DISABLED)
+ __tegra_xudc_ep_disable(ep);
+
+ ep->desc = desc;
+ ep->comp_desc = ep->usb_ep.comp_desc;
+
+ if (usb_endpoint_xfer_isoc(desc)) {
+ if (xudc->nr_isoch_eps > XUDC_MAX_ISOCH_EPS) {
+ dev_err(xudc->dev, "too many isoch endpoints\n");
+ return -EBUSY;
+ }
+ xudc->nr_isoch_eps++;
+ }
+
+ memset(ep->transfer_ring, 0, XUDC_TRANSFER_RING_SIZE *
+ sizeof(*ep->transfer_ring));
+ setup_link_trb(ep, &ep->transfer_ring[XUDC_TRANSFER_RING_SIZE - 1]);
+
+ ep->enq_ptr = 0;
+ ep->deq_ptr = 0;
+ ep->pcs = true;
+ ep->ring_full = false;
+ xudc->nr_enabled_eps++;
+
+ tegra_xudc_ep_context_setup(ep);
+
+ /*
+ * No need to reload and un-halt EP0. This will be done automatically
+ * once a valid SETUP packet is received.
+ */
+ if (usb_endpoint_xfer_control(desc))
+ goto out;
+
+ /*
+ * Transition to configured state once the first non-control
+ * endpoint is enabled.
+ */
+ if (xudc->device_state == USB_STATE_ADDRESS) {
+ val = xudc_readl(xudc, CTRL);
+ val |= CTRL_RUN;
+ xudc_writel(xudc, val, CTRL);
+
+ xudc->device_state = USB_STATE_CONFIGURED;
+ usb_gadget_set_state(&xudc->gadget, xudc->device_state);
+ }
+
+ if (usb_endpoint_xfer_isoc(desc)) {
+ /*
+ * Pause all bulk endpoints when enabling an isoch endpoint
+ * to ensure the isoch endpoint is allocated enough bandwidth.
+ */
+ for (i = 0; i < ARRAY_SIZE(xudc->ep); i++) {
+ if (xudc->ep[i].desc &&
+ usb_endpoint_xfer_bulk(xudc->ep[i].desc))
+ ep_pause(xudc, i);
+ }
+ }
+
+ ep_reload(xudc, ep->index);
+ ep_unpause(xudc, ep->index);
+ ep_unhalt(xudc, ep->index);
+
+ if (usb_endpoint_xfer_isoc(desc)) {
+ for (i = 0; i < ARRAY_SIZE(xudc->ep); i++) {
+ if (xudc->ep[i].desc &&
+ usb_endpoint_xfer_bulk(xudc->ep[i].desc))
+ ep_unpause(xudc, i);
+ }
+ }
+
+out:
+ dev_info(xudc->dev, "EP %u (type: %s, dir: %s) enabled\n", ep->index,
+ usb_ep_type_string(usb_endpoint_type(ep->desc)),
+ usb_endpoint_dir_in(ep->desc) ? "in" : "out");
+
+ return 0;
+}
+
+static int tegra_xudc_ep_enable(struct usb_ep *usb_ep,
+ const struct usb_endpoint_descriptor *desc)
+{
+ struct tegra_xudc_ep *ep;
+ struct tegra_xudc *xudc;
+ unsigned long flags;
+ int ret;
+
+ if (!usb_ep || !desc || (desc->bDescriptorType != USB_DT_ENDPOINT))
+ return -EINVAL;
+
+ ep = to_xudc_ep(usb_ep);
+ xudc = ep->xudc;
+
+ spin_lock_irqsave(&xudc->lock, flags);
+ if (xudc->powergated) {
+ ret = -ESHUTDOWN;
+ goto unlock;
+ }
+
+ ret = __tegra_xudc_ep_enable(ep, desc);
+unlock:
+ spin_unlock_irqrestore(&xudc->lock, flags);
+
+ return ret;
+}
+
+static struct usb_request *
+tegra_xudc_ep_alloc_request(struct usb_ep *usb_ep, gfp_t gfp)
+{
+ struct tegra_xudc_request *req;
+
+ req = kzalloc(sizeof(*req), gfp);
+ if (!req)
+ return NULL;
+
+ INIT_LIST_HEAD(&req->list);
+
+ return &req->usb_req;
+}
+
+static void tegra_xudc_ep_free_request(struct usb_ep *usb_ep,
+ struct usb_request *usb_req)
+{
+ struct tegra_xudc_request *req = to_xudc_req(usb_req);
+
+ kfree(req);
+}
+
+static struct usb_ep_ops tegra_xudc_ep_ops = {
+ .enable = tegra_xudc_ep_enable,
+ .disable = tegra_xudc_ep_disable,
+ .alloc_request = tegra_xudc_ep_alloc_request,
+ .free_request = tegra_xudc_ep_free_request,
+ .queue = tegra_xudc_ep_queue,
+ .dequeue = tegra_xudc_ep_dequeue,
+ .set_halt = tegra_xudc_ep_set_halt,
+};
+
+static int tegra_xudc_ep0_enable(struct usb_ep *usb_ep,
+ const struct usb_endpoint_descriptor *desc)
+{
+ return -EBUSY;
+}
+
+static int tegra_xudc_ep0_disable(struct usb_ep *usb_ep)
+{
+ return -EBUSY;
+}
+
+static struct usb_ep_ops tegra_xudc_ep0_ops = {
+ .enable = tegra_xudc_ep0_enable,
+ .disable = tegra_xudc_ep0_disable,
+ .alloc_request = tegra_xudc_ep_alloc_request,
+ .free_request = tegra_xudc_ep_free_request,
+ .queue = tegra_xudc_ep_queue,
+ .dequeue = tegra_xudc_ep_dequeue,
+ .set_halt = tegra_xudc_ep_set_halt,
+};
+
+static int tegra_xudc_gadget_get_frame(struct usb_gadget *gadget)
+{
+ struct tegra_xudc *xudc = to_xudc(gadget);
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&xudc->lock, flags);
+ if (xudc->powergated) {
+ ret = -ESHUTDOWN;
+ goto unlock;
+ }
+
+ ret = (xudc_readl(xudc, MFINDEX) & MFINDEX_FRAME_MASK) >>
+ MFINDEX_FRAME_SHIFT;
+unlock:
+ spin_unlock_irqrestore(&xudc->lock, flags);
+
+ return ret;
+}
+
+static void tegra_xudc_resume_device_state(struct tegra_xudc *xudc)
+{
+ unsigned int i;
+ u32 val;
+
+ ep_unpause_all(xudc);
+
+ /* Direct link to U0. */
+ val = xudc_readl(xudc, PORTSC);
+ if (((val & PORTSC_PLS_MASK) >> PORTSC_PLS_SHIFT) != PORTSC_PLS_U0) {
+ val &= ~(PORTSC_CHANGE_MASK | PORTSC_PLS_MASK);
+ val |= PORTSC_LWS | PORTSC_PLS(PORTSC_PLS_U0);
+ xudc_writel(xudc, val, PORTSC);
+ }
+
+ if (xudc->device_state == USB_STATE_SUSPENDED) {
+ xudc->device_state = xudc->resume_state;
+ usb_gadget_set_state(&xudc->gadget, xudc->device_state);
+ xudc->resume_state = 0;
+ }
+
+ /*
+ * Doorbells may be dropped if they are sent too soon (< ~200ns)
+ * after unpausing the endpoint. Wait for 500ns just to be safe.
+ */
+ ndelay(500);
+ for (i = 0; i < ARRAY_SIZE(xudc->ep); i++)
+ tegra_xudc_ep_ring_doorbell(&xudc->ep[i]);
+}
+
+static int tegra_xudc_gadget_wakeup(struct usb_gadget *gadget)
+{
+ struct tegra_xudc *xudc = to_xudc(gadget);
+ unsigned long flags;
+ int ret = 0;
+ u32 val;
+
+ spin_lock_irqsave(&xudc->lock, flags);
+
+ if (xudc->powergated) {
+ ret = -ESHUTDOWN;
+ goto unlock;
+ }
+ val = xudc_readl(xudc, PORTPM);
+ dev_dbg(xudc->dev, "%s: PORTPM=%#x, speed=%x\n", __func__,
+ val, gadget->speed);
+
+ if (((xudc->gadget.speed <= USB_SPEED_HIGH) &&
+ (val & PORTPM_RWE)) ||
+ ((xudc->gadget.speed == USB_SPEED_SUPER) &&
+ (val & PORTPM_FRWE))) {
+ tegra_xudc_resume_device_state(xudc);
+
+ /* Send Device Notification packet. */
+ if (xudc->gadget.speed == USB_SPEED_SUPER) {
+ val = DEVNOTIF_LO_TYPE(DEVNOTIF_LO_TYPE_FUNCTION_WAKE)
+ | DEVNOTIF_LO_TRIG;
+ xudc_writel(xudc, 0, DEVNOTIF_HI);
+ xudc_writel(xudc, val, DEVNOTIF_LO);
+ }
+ }
+
+unlock:
+ dev_dbg(xudc->dev, "%s: ret value is %d", __func__, ret);
+ spin_unlock_irqrestore(&xudc->lock, flags);
+
+ return ret;
+}
+
+static int tegra_xudc_gadget_pullup(struct usb_gadget *gadget, int is_on)
+{
+ struct tegra_xudc *xudc = to_xudc(gadget);
+ unsigned long flags;
+ u32 val;
+
+ pm_runtime_get_sync(xudc->dev);
+
+ spin_lock_irqsave(&xudc->lock, flags);
+
+ if (is_on != xudc->pullup) {
+ val = xudc_readl(xudc, CTRL);
+ if (is_on)
+ val |= CTRL_ENABLE;
+ else
+ val &= ~CTRL_ENABLE;
+ xudc_writel(xudc, val, CTRL);
+ }
+
+ xudc->pullup = is_on;
+ dev_dbg(xudc->dev, "%s: pullup:%d", __func__, is_on);
+
+ spin_unlock_irqrestore(&xudc->lock, flags);
+
+ pm_runtime_put(xudc->dev);
+
+ return 0;
+}
+
+static int tegra_xudc_gadget_start(struct usb_gadget *gadget,
+ struct usb_gadget_driver *driver)
+{
+ struct tegra_xudc *xudc = to_xudc(gadget);
+ unsigned long flags;
+ u32 val;
+ int ret;
+
+ if (!driver)
+ return -EINVAL;
+
+ pm_runtime_get_sync(xudc->dev);
+
+ spin_lock_irqsave(&xudc->lock, flags);
+
+ if (xudc->driver) {
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ xudc->setup_state = WAIT_FOR_SETUP;
+ xudc->device_state = USB_STATE_DEFAULT;
+ usb_gadget_set_state(&xudc->gadget, xudc->device_state);
+
+ ret = __tegra_xudc_ep_enable(&xudc->ep[0], &tegra_xudc_ep0_desc);
+ if (ret < 0)
+ goto unlock;
+
+ val = xudc_readl(xudc, CTRL);
+ val |= CTRL_IE | CTRL_LSE;
+ xudc_writel(xudc, val, CTRL);
+
+ val = xudc_readl(xudc, PORTHALT);
+ val |= PORTHALT_STCHG_INTR_EN;
+ xudc_writel(xudc, val, PORTHALT);
+
+ if (xudc->pullup) {
+ val = xudc_readl(xudc, CTRL);
+ val |= CTRL_ENABLE;
+ xudc_writel(xudc, val, CTRL);
+ }
+
+ xudc->driver = driver;
+unlock:
+ dev_dbg(xudc->dev, "%s: ret value is %d", __func__, ret);
+ spin_unlock_irqrestore(&xudc->lock, flags);
+
+ pm_runtime_put(xudc->dev);
+
+ return ret;
+}
+
+static int tegra_xudc_gadget_stop(struct usb_gadget *gadget)
+{
+ struct tegra_xudc *xudc = to_xudc(gadget);
+ unsigned long flags;
+ u32 val;
+
+ pm_runtime_get_sync(xudc->dev);
+
+ spin_lock_irqsave(&xudc->lock, flags);
+
+ val = xudc_readl(xudc, CTRL);
+ val &= ~(CTRL_IE | CTRL_ENABLE);
+ xudc_writel(xudc, val, CTRL);
+
+ __tegra_xudc_ep_disable(&xudc->ep[0]);
+
+ xudc->driver = NULL;
+ dev_dbg(xudc->dev, "Gadget stopped");
+
+ spin_unlock_irqrestore(&xudc->lock, flags);
+
+ pm_runtime_put(xudc->dev);
+
+ return 0;
+}
+
+static int tegra_xudc_set_selfpowered(struct usb_gadget *gadget, int is_on)
+{
+ struct tegra_xudc *xudc = to_xudc(gadget);
+
+ dev_dbg(xudc->dev, "%s: %d\n", __func__, is_on);
+ xudc->selfpowered = !!is_on;
+
+ return 0;
+}
+
+static struct usb_gadget_ops tegra_xudc_gadget_ops = {
+ .get_frame = tegra_xudc_gadget_get_frame,
+ .wakeup = tegra_xudc_gadget_wakeup,
+ .pullup = tegra_xudc_gadget_pullup,
+ .udc_start = tegra_xudc_gadget_start,
+ .udc_stop = tegra_xudc_gadget_stop,
+ .set_selfpowered = tegra_xudc_set_selfpowered,
+};
+
+static void no_op_complete(struct usb_ep *ep, struct usb_request *req)
+{
+}
+
+static int
+tegra_xudc_ep0_queue_status(struct tegra_xudc *xudc,
+ void (*cmpl)(struct usb_ep *, struct usb_request *))
+{
+ xudc->ep0_req->usb_req.buf = NULL;
+ xudc->ep0_req->usb_req.dma = 0;
+ xudc->ep0_req->usb_req.length = 0;
+ xudc->ep0_req->usb_req.complete = cmpl;
+ xudc->ep0_req->usb_req.context = xudc;
+
+ return __tegra_xudc_ep_queue(&xudc->ep[0], xudc->ep0_req);
+}
+
+static int
+tegra_xudc_ep0_queue_data(struct tegra_xudc *xudc, void *buf, size_t len,
+ void (*cmpl)(struct usb_ep *, struct usb_request *))
+{
+ xudc->ep0_req->usb_req.buf = buf;
+ xudc->ep0_req->usb_req.length = len;
+ xudc->ep0_req->usb_req.complete = cmpl;
+ xudc->ep0_req->usb_req.context = xudc;
+
+ return __tegra_xudc_ep_queue(&xudc->ep[0], xudc->ep0_req);
+}
+
+static void tegra_xudc_ep0_req_done(struct tegra_xudc *xudc)
+{
+ switch (xudc->setup_state) {
+ case DATA_STAGE_XFER:
+ xudc->setup_state = STATUS_STAGE_RECV;
+ tegra_xudc_ep0_queue_status(xudc, no_op_complete);
+ break;
+ case DATA_STAGE_RECV:
+ xudc->setup_state = STATUS_STAGE_XFER;
+ tegra_xudc_ep0_queue_status(xudc, no_op_complete);
+ break;
+ default:
+ xudc->setup_state = WAIT_FOR_SETUP;
+ break;
+ }
+}
+
+static int tegra_xudc_ep0_delegate_req(struct tegra_xudc *xudc,
+ struct usb_ctrlrequest *ctrl)
+{
+ int ret;
+
+ spin_unlock(&xudc->lock);
+ ret = xudc->driver->setup(&xudc->gadget, ctrl);
+ spin_lock(&xudc->lock);
+
+ return ret;
+}
+
+static void set_feature_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct tegra_xudc *xudc = req->context;
+
+ if (xudc->test_mode_pattern) {
+ xudc_writel(xudc, xudc->test_mode_pattern, PORT_TM);
+ xudc->test_mode_pattern = 0;
+ }
+}
+
+static int tegra_xudc_ep0_set_feature(struct tegra_xudc *xudc,
+ struct usb_ctrlrequest *ctrl)
+{
+ bool set = (ctrl->bRequest == USB_REQ_SET_FEATURE);
+ u32 feature = le16_to_cpu(ctrl->wValue);
+ u32 index = le16_to_cpu(ctrl->wIndex);
+ u32 val, ep;
+ int ret;
+
+ if (le16_to_cpu(ctrl->wLength) != 0)
+ return -EINVAL;
+
+ switch (ctrl->bRequestType & USB_RECIP_MASK) {
+ case USB_RECIP_DEVICE:
+ switch (feature) {
+ case USB_DEVICE_REMOTE_WAKEUP:
+ if ((xudc->gadget.speed == USB_SPEED_SUPER) ||
+ (xudc->device_state == USB_STATE_DEFAULT))
+ return -EINVAL;
+
+ val = xudc_readl(xudc, PORTPM);
+ if (set)
+ val |= PORTPM_RWE;
+ else
+ val &= ~PORTPM_RWE;
+
+ xudc_writel(xudc, val, PORTPM);
+ break;
+ case USB_DEVICE_U1_ENABLE:
+ case USB_DEVICE_U2_ENABLE:
+ if ((xudc->device_state != USB_STATE_CONFIGURED) ||
+ (xudc->gadget.speed != USB_SPEED_SUPER))
+ return -EINVAL;
+
+ val = xudc_readl(xudc, PORTPM);
+ if ((feature == USB_DEVICE_U1_ENABLE) &&
+ xudc->soc->u1_enable) {
+ if (set)
+ val |= PORTPM_U1E;
+ else
+ val &= ~PORTPM_U1E;
+ }
+
+ if ((feature == USB_DEVICE_U2_ENABLE) &&
+ xudc->soc->u2_enable) {
+ if (set)
+ val |= PORTPM_U2E;
+ else
+ val &= ~PORTPM_U2E;
+ }
+
+ xudc_writel(xudc, val, PORTPM);
+ break;
+ case USB_DEVICE_TEST_MODE:
+ if (xudc->gadget.speed != USB_SPEED_HIGH)
+ return -EINVAL;
+
+ if (!set)
+ return -EINVAL;
+
+ xudc->test_mode_pattern = index >> 8;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ break;
+ case USB_RECIP_INTERFACE:
+ if (xudc->device_state != USB_STATE_CONFIGURED)
+ return -EINVAL;
+
+ switch (feature) {
+ case USB_INTRF_FUNC_SUSPEND:
+ if (set) {
+ val = xudc_readl(xudc, PORTPM);
+
+ if (index & USB_INTRF_FUNC_SUSPEND_RW)
+ val |= PORTPM_FRWE;
+ else
+ val &= ~PORTPM_FRWE;
+
+ xudc_writel(xudc, val, PORTPM);
+ }
+
+ return tegra_xudc_ep0_delegate_req(xudc, ctrl);
+ default:
+ return -EINVAL;
+ }
+
+ break;
+ case USB_RECIP_ENDPOINT:
+ ep = (index & USB_ENDPOINT_NUMBER_MASK) * 2 +
+ ((index & USB_DIR_IN) ? 1 : 0);
+
+ if ((xudc->device_state == USB_STATE_DEFAULT) ||
+ ((xudc->device_state == USB_STATE_ADDRESS) &&
+ (index != 0)))
+ return -EINVAL;
+
+ ret = __tegra_xudc_ep_set_halt(&xudc->ep[ep], set);
+ if (ret < 0)
+ return ret;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return tegra_xudc_ep0_queue_status(xudc, set_feature_complete);
+}
+
+static int tegra_xudc_ep0_get_status(struct tegra_xudc *xudc,
+ struct usb_ctrlrequest *ctrl)
+{
+ struct tegra_xudc_ep_context *ep_ctx;
+ u32 val, ep, index = le16_to_cpu(ctrl->wIndex);
+ u16 status = 0;
+
+ if (!(ctrl->bRequestType & USB_DIR_IN))
+ return -EINVAL;
+
+ if ((le16_to_cpu(ctrl->wValue) != 0) ||
+ (le16_to_cpu(ctrl->wLength) != 2))
+ return -EINVAL;
+
+ switch (ctrl->bRequestType & USB_RECIP_MASK) {
+ case USB_RECIP_DEVICE:
+ val = xudc_readl(xudc, PORTPM);
+
+ if (xudc->selfpowered)
+ status |= BIT(USB_DEVICE_SELF_POWERED);
+
+ if ((xudc->gadget.speed < USB_SPEED_SUPER) &&
+ (val & PORTPM_RWE))
+ status |= BIT(USB_DEVICE_REMOTE_WAKEUP);
+
+ if (xudc->gadget.speed == USB_SPEED_SUPER) {
+ if (val & PORTPM_U1E)
+ status |= BIT(USB_DEV_STAT_U1_ENABLED);
+ if (val & PORTPM_U2E)
+ status |= BIT(USB_DEV_STAT_U2_ENABLED);
+ }
+ break;
+ case USB_RECIP_INTERFACE:
+ if (xudc->gadget.speed == USB_SPEED_SUPER) {
+ status |= USB_INTRF_STAT_FUNC_RW_CAP;
+ val = xudc_readl(xudc, PORTPM);
+ if (val & PORTPM_FRWE)
+ status |= USB_INTRF_STAT_FUNC_RW;
+ }
+ break;
+ case USB_RECIP_ENDPOINT:
+ ep = (index & USB_ENDPOINT_NUMBER_MASK) * 2 +
+ ((index & USB_DIR_IN) ? 1 : 0);
+ ep_ctx = &xudc->ep_context[ep];
+
+ if ((xudc->device_state != USB_STATE_CONFIGURED) &&
+ ((xudc->device_state != USB_STATE_ADDRESS) || (ep != 0)))
+ return -EINVAL;
+
+ if (ep_ctx_read_state(ep_ctx) == EP_STATE_DISABLED)
+ return -EINVAL;
+
+ if (xudc_readl(xudc, EP_HALT) & BIT(ep))
+ status |= BIT(USB_ENDPOINT_HALT);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ xudc->status_buf = cpu_to_le16(status);
+ return tegra_xudc_ep0_queue_data(xudc, &xudc->status_buf,
+ sizeof(xudc->status_buf),
+ no_op_complete);
+}
+
+static void set_sel_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ /* Nothing to do with SEL values */
+}
+
+static int tegra_xudc_ep0_set_sel(struct tegra_xudc *xudc,
+ struct usb_ctrlrequest *ctrl)
+{
+ if (ctrl->bRequestType != (USB_DIR_OUT | USB_RECIP_DEVICE |
+ USB_TYPE_STANDARD))
+ return -EINVAL;
+
+ if (xudc->device_state == USB_STATE_DEFAULT)
+ return -EINVAL;
+
+ if ((le16_to_cpu(ctrl->wIndex) != 0) ||
+ (le16_to_cpu(ctrl->wValue) != 0) ||
+ (le16_to_cpu(ctrl->wLength) != 6))
+ return -EINVAL;
+
+ return tegra_xudc_ep0_queue_data(xudc, &xudc->sel_timing,
+ sizeof(xudc->sel_timing),
+ set_sel_complete);
+}
+
+static void set_isoch_delay_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ /* Nothing to do with isoch delay */
+}
+
+static int tegra_xudc_ep0_set_isoch_delay(struct tegra_xudc *xudc,
+ struct usb_ctrlrequest *ctrl)
+{
+ u32 delay = le16_to_cpu(ctrl->wValue);
+
+ if (ctrl->bRequestType != (USB_DIR_OUT | USB_RECIP_DEVICE |
+ USB_TYPE_STANDARD))
+ return -EINVAL;
+
+ if ((delay > 65535) || (le16_to_cpu(ctrl->wIndex) != 0) ||
+ (le16_to_cpu(ctrl->wLength) != 0))
+ return -EINVAL;
+
+ xudc->isoch_delay = delay;
+
+ return tegra_xudc_ep0_queue_status(xudc, set_isoch_delay_complete);
+}
+
+static void set_address_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct tegra_xudc *xudc = req->context;
+
+ if ((xudc->device_state == USB_STATE_DEFAULT) &&
+ (xudc->dev_addr != 0)) {
+ xudc->device_state = USB_STATE_ADDRESS;
+ usb_gadget_set_state(&xudc->gadget, xudc->device_state);
+ } else if ((xudc->device_state == USB_STATE_ADDRESS) &&
+ (xudc->dev_addr == 0)) {
+ xudc->device_state = USB_STATE_DEFAULT;
+ usb_gadget_set_state(&xudc->gadget, xudc->device_state);
+ }
+}
+
+static int tegra_xudc_ep0_set_address(struct tegra_xudc *xudc,
+ struct usb_ctrlrequest *ctrl)
+{
+ struct tegra_xudc_ep *ep0 = &xudc->ep[0];
+ u32 val, addr = le16_to_cpu(ctrl->wValue);
+
+ if (ctrl->bRequestType != (USB_DIR_OUT | USB_RECIP_DEVICE |
+ USB_TYPE_STANDARD))
+ return -EINVAL;
+
+ if ((addr > 127) || (le16_to_cpu(ctrl->wIndex) != 0) ||
+ (le16_to_cpu(ctrl->wLength) != 0))
+ return -EINVAL;
+
+ if (xudc->device_state == USB_STATE_CONFIGURED)
+ return -EINVAL;
+
+ dev_dbg(xudc->dev, "set address: %u\n", addr);
+
+ xudc->dev_addr = addr;
+ val = xudc_readl(xudc, CTRL);
+ val &= ~(CTRL_DEVADDR_MASK);
+ val |= CTRL_DEVADDR(addr);
+ xudc_writel(xudc, val, CTRL);
+
+ ep_ctx_write_devaddr(ep0->context, addr);
+
+ return tegra_xudc_ep0_queue_status(xudc, set_address_complete);
+}
+
+static int tegra_xudc_ep0_standard_req(struct tegra_xudc *xudc,
+ struct usb_ctrlrequest *ctrl)
+{
+ int ret;
+
+ switch (ctrl->bRequest) {
+ case USB_REQ_GET_STATUS:
+ dev_dbg(xudc->dev, "USB_REQ_GET_STATUS\n");
+ ret = tegra_xudc_ep0_get_status(xudc, ctrl);
+ break;
+ case USB_REQ_SET_ADDRESS:
+ dev_dbg(xudc->dev, "USB_REQ_SET_ADDRESS\n");
+ ret = tegra_xudc_ep0_set_address(xudc, ctrl);
+ break;
+ case USB_REQ_SET_SEL:
+ dev_dbg(xudc->dev, "USB_REQ_SET_SEL\n");
+ ret = tegra_xudc_ep0_set_sel(xudc, ctrl);
+ break;
+ case USB_REQ_SET_ISOCH_DELAY:
+ dev_dbg(xudc->dev, "USB_REQ_SET_ISOCH_DELAY\n");
+ ret = tegra_xudc_ep0_set_isoch_delay(xudc, ctrl);
+ break;
+ case USB_REQ_CLEAR_FEATURE:
+ case USB_REQ_SET_FEATURE:
+ dev_dbg(xudc->dev, "USB_REQ_CLEAR/SET_FEATURE\n");
+ ret = tegra_xudc_ep0_set_feature(xudc, ctrl);
+ break;
+ case USB_REQ_SET_CONFIGURATION:
+ dev_dbg(xudc->dev, "USB_REQ_SET_CONFIGURATION\n");
+ /*
+ * In theory we need to clear RUN bit before status stage of
+ * deconfig request sent, but this seems to be causing problems.
+ * Clear RUN once all endpoints are disabled instead.
+ */
+ fallthrough;
+ default:
+ ret = tegra_xudc_ep0_delegate_req(xudc, ctrl);
+ break;
+ }
+
+ return ret;
+}
+
+static void tegra_xudc_handle_ep0_setup_packet(struct tegra_xudc *xudc,
+ struct usb_ctrlrequest *ctrl,
+ u16 seq_num)
+{
+ int ret;
+
+ xudc->setup_seq_num = seq_num;
+
+ /* Ensure EP0 is unhalted. */
+ ep_unhalt(xudc, 0);
+
+ /*
+ * On Tegra210, setup packets with sequence numbers 0xfffe or 0xffff
+ * are invalid. Halt EP0 until we get a valid packet.
+ */
+ if (xudc->soc->invalid_seq_num &&
+ (seq_num == 0xfffe || seq_num == 0xffff)) {
+ dev_warn(xudc->dev, "invalid sequence number detected\n");
+ ep_halt(xudc, 0);
+ return;
+ }
+
+ if (ctrl->wLength)
+ xudc->setup_state = (ctrl->bRequestType & USB_DIR_IN) ?
+ DATA_STAGE_XFER : DATA_STAGE_RECV;
+ else
+ xudc->setup_state = STATUS_STAGE_XFER;
+
+ if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD)
+ ret = tegra_xudc_ep0_standard_req(xudc, ctrl);
+ else
+ ret = tegra_xudc_ep0_delegate_req(xudc, ctrl);
+
+ if (ret < 0) {
+ dev_warn(xudc->dev, "setup request failed: %d\n", ret);
+ xudc->setup_state = WAIT_FOR_SETUP;
+ ep_halt(xudc, 0);
+ }
+}
+
+static void tegra_xudc_handle_ep0_event(struct tegra_xudc *xudc,
+ struct tegra_xudc_trb *event)
+{
+ struct usb_ctrlrequest *ctrl = (struct usb_ctrlrequest *)event;
+ u16 seq_num = trb_read_seq_num(event);
+
+ if (xudc->setup_state != WAIT_FOR_SETUP) {
+ /*
+ * The controller is in the process of handling another
+ * setup request. Queue subsequent requests and handle
+ * the last one once the controller reports a sequence
+ * number error.
+ */
+ memcpy(&xudc->setup_packet.ctrl_req, ctrl, sizeof(*ctrl));
+ xudc->setup_packet.seq_num = seq_num;
+ xudc->queued_setup_packet = true;
+ } else {
+ tegra_xudc_handle_ep0_setup_packet(xudc, ctrl, seq_num);
+ }
+}
+
+static struct tegra_xudc_request *
+trb_to_request(struct tegra_xudc_ep *ep, struct tegra_xudc_trb *trb)
+{
+ struct tegra_xudc_request *req;
+
+ list_for_each_entry(req, &ep->queue, list) {
+ if (!req->trbs_queued)
+ break;
+
+ if (trb_in_request(ep, req, trb))
+ return req;
+ }
+
+ return NULL;
+}
+
+static void tegra_xudc_handle_transfer_completion(struct tegra_xudc *xudc,
+ struct tegra_xudc_ep *ep,
+ struct tegra_xudc_trb *event)
+{
+ struct tegra_xudc_request *req;
+ struct tegra_xudc_trb *trb;
+ bool short_packet;
+
+ short_packet = (trb_read_cmpl_code(event) ==
+ TRB_CMPL_CODE_SHORT_PACKET);
+
+ trb = trb_phys_to_virt(ep, trb_read_data_ptr(event));
+ req = trb_to_request(ep, trb);
+
+ /*
+ * TDs are complete on short packet or when the completed TRB is the
+ * last TRB in the TD (the CHAIN bit is unset).
+ */
+ if (req && (short_packet || (!trb_read_chain(trb) &&
+ (req->trbs_needed == req->trbs_queued)))) {
+ struct tegra_xudc_trb *last = req->last_trb;
+ unsigned int residual;
+
+ residual = trb_read_transfer_len(event);
+ req->usb_req.actual = req->usb_req.length - residual;
+
+ dev_dbg(xudc->dev, "bytes transferred %u / %u\n",
+ req->usb_req.actual, req->usb_req.length);
+
+ tegra_xudc_req_done(ep, req, 0);
+
+ if (ep->desc && usb_endpoint_xfer_control(ep->desc))
+ tegra_xudc_ep0_req_done(xudc);
+
+ /*
+ * Advance the dequeue pointer past the end of the current TD
+ * on short packet completion.
+ */
+ if (short_packet) {
+ ep->deq_ptr = (last - ep->transfer_ring) + 1;
+ if (ep->deq_ptr == XUDC_TRANSFER_RING_SIZE - 1)
+ ep->deq_ptr = 0;
+ }
+ } else if (!req) {
+ dev_warn(xudc->dev, "transfer event on dequeued request\n");
+ }
+
+ if (ep->desc)
+ tegra_xudc_ep_kick_queue(ep);
+}
+
+static void tegra_xudc_handle_transfer_event(struct tegra_xudc *xudc,
+ struct tegra_xudc_trb *event)
+{
+ unsigned int ep_index = trb_read_endpoint_id(event);
+ struct tegra_xudc_ep *ep = &xudc->ep[ep_index];
+ struct tegra_xudc_trb *trb;
+ u16 comp_code;
+
+ if (ep_ctx_read_state(ep->context) == EP_STATE_DISABLED) {
+ dev_warn(xudc->dev, "transfer event on disabled EP %u\n",
+ ep_index);
+ return;
+ }
+
+ /* Update transfer ring dequeue pointer. */
+ trb = trb_phys_to_virt(ep, trb_read_data_ptr(event));
+ comp_code = trb_read_cmpl_code(event);
+ if (comp_code != TRB_CMPL_CODE_BABBLE_DETECTED_ERR) {
+ ep->deq_ptr = (trb - ep->transfer_ring) + 1;
+
+ if (ep->deq_ptr == XUDC_TRANSFER_RING_SIZE - 1)
+ ep->deq_ptr = 0;
+ ep->ring_full = false;
+ }
+
+ switch (comp_code) {
+ case TRB_CMPL_CODE_SUCCESS:
+ case TRB_CMPL_CODE_SHORT_PACKET:
+ tegra_xudc_handle_transfer_completion(xudc, ep, event);
+ break;
+ case TRB_CMPL_CODE_HOST_REJECTED:
+ dev_info(xudc->dev, "stream rejected on EP %u\n", ep_index);
+
+ ep->stream_rejected = true;
+ break;
+ case TRB_CMPL_CODE_PRIME_PIPE_RECEIVED:
+ dev_info(xudc->dev, "prime pipe received on EP %u\n", ep_index);
+
+ if (ep->stream_rejected) {
+ ep->stream_rejected = false;
+ /*
+ * An EP is stopped when a stream is rejected. Wait
+ * for the EP to report that it is stopped and then
+ * un-stop it.
+ */
+ ep_wait_for_stopped(xudc, ep_index);
+ }
+ tegra_xudc_ep_ring_doorbell(ep);
+ break;
+ case TRB_CMPL_CODE_BABBLE_DETECTED_ERR:
+ /*
+ * Wait for the EP to be stopped so the controller stops
+ * processing doorbells.
+ */
+ ep_wait_for_stopped(xudc, ep_index);
+ ep->enq_ptr = ep->deq_ptr;
+ tegra_xudc_ep_nuke(ep, -EIO);
+ /* FALLTHROUGH */
+ case TRB_CMPL_CODE_STREAM_NUMP_ERROR:
+ case TRB_CMPL_CODE_CTRL_DIR_ERR:
+ case TRB_CMPL_CODE_INVALID_STREAM_TYPE_ERR:
+ case TRB_CMPL_CODE_RING_UNDERRUN:
+ case TRB_CMPL_CODE_RING_OVERRUN:
+ case TRB_CMPL_CODE_ISOCH_BUFFER_OVERRUN:
+ case TRB_CMPL_CODE_USB_TRANS_ERR:
+ case TRB_CMPL_CODE_TRB_ERR:
+ dev_err(xudc->dev, "completion error %#x on EP %u\n",
+ comp_code, ep_index);
+
+ ep_halt(xudc, ep_index);
+ break;
+ case TRB_CMPL_CODE_CTRL_SEQNUM_ERR:
+ dev_info(xudc->dev, "sequence number error\n");
+
+ /*
+ * Kill any queued control request and skip to the last
+ * setup packet we received.
+ */
+ tegra_xudc_ep_nuke(ep, -EINVAL);
+ xudc->setup_state = WAIT_FOR_SETUP;
+ if (!xudc->queued_setup_packet)
+ break;
+
+ tegra_xudc_handle_ep0_setup_packet(xudc,
+ &xudc->setup_packet.ctrl_req,
+ xudc->setup_packet.seq_num);
+ xudc->queued_setup_packet = false;
+ break;
+ case TRB_CMPL_CODE_STOPPED:
+ dev_dbg(xudc->dev, "stop completion code on EP %u\n",
+ ep_index);
+
+ /* Disconnected. */
+ tegra_xudc_ep_nuke(ep, -ECONNREFUSED);
+ break;
+ default:
+ dev_dbg(xudc->dev, "completion event %#x on EP %u\n",
+ comp_code, ep_index);
+ break;
+ }
+}
+
+static void tegra_xudc_reset(struct tegra_xudc *xudc)
+{
+ struct tegra_xudc_ep *ep0 = &xudc->ep[0];
+ dma_addr_t deq_ptr;
+ unsigned int i;
+
+ xudc->setup_state = WAIT_FOR_SETUP;
+ xudc->device_state = USB_STATE_DEFAULT;
+ usb_gadget_set_state(&xudc->gadget, xudc->device_state);
+
+ ep_unpause_all(xudc);
+
+ for (i = 0; i < ARRAY_SIZE(xudc->ep); i++)
+ tegra_xudc_ep_nuke(&xudc->ep[i], -ESHUTDOWN);
+
+ /*
+ * Reset sequence number and dequeue pointer to flush the transfer
+ * ring.
+ */
+ ep0->deq_ptr = ep0->enq_ptr;
+ ep0->ring_full = false;
+
+ xudc->setup_seq_num = 0;
+ xudc->queued_setup_packet = false;
+
+ ep_ctx_write_seq_num(ep0->context, xudc->setup_seq_num);
+
+ deq_ptr = trb_virt_to_phys(ep0, &ep0->transfer_ring[ep0->deq_ptr]);
+
+ if (!dma_mapping_error(xudc->dev, deq_ptr)) {
+ ep_ctx_write_deq_ptr(ep0->context, deq_ptr);
+ ep_ctx_write_dcs(ep0->context, ep0->pcs);
+ }
+
+ ep_unhalt_all(xudc);
+ ep_reload(xudc, 0);
+ ep_unpause(xudc, 0);
+}
+
+static void tegra_xudc_port_connect(struct tegra_xudc *xudc)
+{
+ struct tegra_xudc_ep *ep0 = &xudc->ep[0];
+ u16 maxpacket;
+ u32 val;
+
+ val = (xudc_readl(xudc, PORTSC) & PORTSC_PS_MASK) >> PORTSC_PS_SHIFT;
+ switch (val) {
+ case PORTSC_PS_LS:
+ xudc->gadget.speed = USB_SPEED_LOW;
+ break;
+ case PORTSC_PS_FS:
+ xudc->gadget.speed = USB_SPEED_FULL;
+ break;
+ case PORTSC_PS_HS:
+ xudc->gadget.speed = USB_SPEED_HIGH;
+ break;
+ case PORTSC_PS_SS:
+ xudc->gadget.speed = USB_SPEED_SUPER;
+ break;
+ default:
+ xudc->gadget.speed = USB_SPEED_UNKNOWN;
+ break;
+ }
+
+ xudc->device_state = USB_STATE_DEFAULT;
+ usb_gadget_set_state(&xudc->gadget, xudc->device_state);
+
+ xudc->setup_state = WAIT_FOR_SETUP;
+
+ if (xudc->gadget.speed == USB_SPEED_SUPER)
+ maxpacket = 512;
+ else
+ maxpacket = 64;
+
+ ep_ctx_write_max_packet_size(ep0->context, maxpacket);
+ tegra_xudc_ep0_desc.wMaxPacketSize = cpu_to_le16(maxpacket);
+ usb_ep_set_maxpacket_limit(&ep0->usb_ep, maxpacket);
+
+ if (!xudc->soc->u1_enable) {
+ val = xudc_readl(xudc, PORTPM);
+ val &= ~(PORTPM_U1TIMEOUT_MASK);
+ xudc_writel(xudc, val, PORTPM);
+ }
+
+ if (!xudc->soc->u2_enable) {
+ val = xudc_readl(xudc, PORTPM);
+ val &= ~(PORTPM_U2TIMEOUT_MASK);
+ xudc_writel(xudc, val, PORTPM);
+ }
+
+ if (xudc->gadget.speed <= USB_SPEED_HIGH) {
+ val = xudc_readl(xudc, PORTPM);
+ val &= ~(PORTPM_L1S_MASK);
+ if (xudc->soc->lpm_enable)
+ val |= PORTPM_L1S(PORTPM_L1S_ACCEPT);
+ else
+ val |= PORTPM_L1S(PORTPM_L1S_NYET);
+ xudc_writel(xudc, val, PORTPM);
+ }
+
+ val = xudc_readl(xudc, ST);
+ if (val & ST_RC)
+ xudc_writel(xudc, ST_RC, ST);
+}
+
+static void tegra_xudc_port_disconnect(struct tegra_xudc *xudc)
+{
+ tegra_xudc_reset(xudc);
+
+ if (xudc->driver && xudc->driver->disconnect) {
+ spin_unlock(&xudc->lock);
+ xudc->driver->disconnect(&xudc->gadget);
+ spin_lock(&xudc->lock);
+ }
+
+ xudc->device_state = USB_STATE_NOTATTACHED;
+ usb_gadget_set_state(&xudc->gadget, xudc->device_state);
+
+ complete(&xudc->disconnect_complete);
+}
+
+static void tegra_xudc_port_reset(struct tegra_xudc *xudc)
+{
+ tegra_xudc_reset(xudc);
+
+ if (xudc->driver) {
+ spin_unlock(&xudc->lock);
+ usb_gadget_udc_reset(&xudc->gadget, xudc->driver);
+ spin_lock(&xudc->lock);
+ }
+
+ tegra_xudc_port_connect(xudc);
+}
+
+static void tegra_xudc_port_suspend(struct tegra_xudc *xudc)
+{
+ dev_dbg(xudc->dev, "port suspend\n");
+
+ xudc->resume_state = xudc->device_state;
+ xudc->device_state = USB_STATE_SUSPENDED;
+ usb_gadget_set_state(&xudc->gadget, xudc->device_state);
+
+ if (xudc->driver->suspend) {
+ spin_unlock(&xudc->lock);
+ xudc->driver->suspend(&xudc->gadget);
+ spin_lock(&xudc->lock);
+ }
+}
+
+static void tegra_xudc_port_resume(struct tegra_xudc *xudc)
+{
+ dev_dbg(xudc->dev, "port resume\n");
+
+ tegra_xudc_resume_device_state(xudc);
+
+ if (xudc->driver->resume) {
+ spin_unlock(&xudc->lock);
+ xudc->driver->resume(&xudc->gadget);
+ spin_lock(&xudc->lock);
+ }
+}
+
+static inline void clear_port_change(struct tegra_xudc *xudc, u32 flag)
+{
+ u32 val;
+
+ val = xudc_readl(xudc, PORTSC);
+ val &= ~PORTSC_CHANGE_MASK;
+ val |= flag;
+ xudc_writel(xudc, val, PORTSC);
+}
+
+static void __tegra_xudc_handle_port_status(struct tegra_xudc *xudc)
+{
+ u32 portsc, porthalt;
+
+ porthalt = xudc_readl(xudc, PORTHALT);
+ if ((porthalt & PORTHALT_STCHG_REQ) &&
+ (porthalt & PORTHALT_HALT_LTSSM)) {
+ dev_dbg(xudc->dev, "STCHG_REQ, PORTHALT = %#x\n", porthalt);
+ porthalt &= ~PORTHALT_HALT_LTSSM;
+ xudc_writel(xudc, porthalt, PORTHALT);
+ }
+
+ portsc = xudc_readl(xudc, PORTSC);
+ if ((portsc & PORTSC_PRC) && (portsc & PORTSC_PR)) {
+ dev_dbg(xudc->dev, "PRC, PR, PORTSC = %#x\n", portsc);
+ clear_port_change(xudc, PORTSC_PRC | PORTSC_PED);
+#define TOGGLE_VBUS_WAIT_MS 100
+ if (xudc->soc->port_reset_quirk) {
+ schedule_delayed_work(&xudc->port_reset_war_work,
+ msecs_to_jiffies(TOGGLE_VBUS_WAIT_MS));
+ xudc->wait_for_sec_prc = 1;
+ }
+ }
+
+ if ((portsc & PORTSC_PRC) && !(portsc & PORTSC_PR)) {
+ dev_dbg(xudc->dev, "PRC, Not PR, PORTSC = %#x\n", portsc);
+ clear_port_change(xudc, PORTSC_PRC | PORTSC_PED);
+ tegra_xudc_port_reset(xudc);
+ cancel_delayed_work(&xudc->port_reset_war_work);
+ xudc->wait_for_sec_prc = 0;
+ }
+
+ portsc = xudc_readl(xudc, PORTSC);
+ if (portsc & PORTSC_WRC) {
+ dev_dbg(xudc->dev, "WRC, PORTSC = %#x\n", portsc);
+ clear_port_change(xudc, PORTSC_WRC | PORTSC_PED);
+ if (!(xudc_readl(xudc, PORTSC) & PORTSC_WPR))
+ tegra_xudc_port_reset(xudc);
+ }
+
+ portsc = xudc_readl(xudc, PORTSC);
+ if (portsc & PORTSC_CSC) {
+ dev_dbg(xudc->dev, "CSC, PORTSC = %#x\n", portsc);
+ clear_port_change(xudc, PORTSC_CSC);
+
+ if (portsc & PORTSC_CCS)
+ tegra_xudc_port_connect(xudc);
+ else
+ tegra_xudc_port_disconnect(xudc);
+
+ if (xudc->wait_csc) {
+ cancel_delayed_work(&xudc->plc_reset_work);
+ xudc->wait_csc = false;
+ }
+ }
+
+ portsc = xudc_readl(xudc, PORTSC);
+ if (portsc & PORTSC_PLC) {
+ u32 pls = (portsc & PORTSC_PLS_MASK) >> PORTSC_PLS_SHIFT;
+
+ dev_dbg(xudc->dev, "PLC, PORTSC = %#x\n", portsc);
+ clear_port_change(xudc, PORTSC_PLC);
+ switch (pls) {
+ case PORTSC_PLS_U3:
+ tegra_xudc_port_suspend(xudc);
+ break;
+ case PORTSC_PLS_U0:
+ if (xudc->gadget.speed < USB_SPEED_SUPER)
+ tegra_xudc_port_resume(xudc);
+ break;
+ case PORTSC_PLS_RESUME:
+ if (xudc->gadget.speed == USB_SPEED_SUPER)
+ tegra_xudc_port_resume(xudc);
+ break;
+ case PORTSC_PLS_INACTIVE:
+ schedule_delayed_work(&xudc->plc_reset_work,
+ msecs_to_jiffies(TOGGLE_VBUS_WAIT_MS));
+ xudc->wait_csc = true;
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (portsc & PORTSC_CEC) {
+ dev_warn(xudc->dev, "CEC, PORTSC = %#x\n", portsc);
+ clear_port_change(xudc, PORTSC_CEC);
+ }
+
+ dev_dbg(xudc->dev, "PORTSC = %#x\n", xudc_readl(xudc, PORTSC));
+}
+
+static void tegra_xudc_handle_port_status(struct tegra_xudc *xudc)
+{
+ while ((xudc_readl(xudc, PORTSC) & PORTSC_CHANGE_MASK) ||
+ (xudc_readl(xudc, PORTHALT) & PORTHALT_STCHG_REQ))
+ __tegra_xudc_handle_port_status(xudc);
+}
+
+static void tegra_xudc_handle_event(struct tegra_xudc *xudc,
+ struct tegra_xudc_trb *event)
+{
+ u32 type = trb_read_type(event);
+
+ dump_trb(xudc, "EVENT", event);
+
+ switch (type) {
+ case TRB_TYPE_PORT_STATUS_CHANGE_EVENT:
+ tegra_xudc_handle_port_status(xudc);
+ break;
+ case TRB_TYPE_TRANSFER_EVENT:
+ tegra_xudc_handle_transfer_event(xudc, event);
+ break;
+ case TRB_TYPE_SETUP_PACKET_EVENT:
+ tegra_xudc_handle_ep0_event(xudc, event);
+ break;
+ default:
+ dev_info(xudc->dev, "Unrecognized TRB type = %#x\n", type);
+ break;
+ }
+}
+
+static void tegra_xudc_process_event_ring(struct tegra_xudc *xudc)
+{
+ struct tegra_xudc_trb *event;
+ dma_addr_t erdp;
+
+ while (true) {
+ event = xudc->event_ring[xudc->event_ring_index] +
+ xudc->event_ring_deq_ptr;
+
+ if (trb_read_cycle(event) != xudc->ccs)
+ break;
+
+ tegra_xudc_handle_event(xudc, event);
+
+ xudc->event_ring_deq_ptr++;
+ if (xudc->event_ring_deq_ptr == XUDC_EVENT_RING_SIZE) {
+ xudc->event_ring_deq_ptr = 0;
+ xudc->event_ring_index++;
+ }
+
+ if (xudc->event_ring_index == XUDC_NR_EVENT_RINGS) {
+ xudc->event_ring_index = 0;
+ xudc->ccs = !xudc->ccs;
+ }
+ }
+
+ erdp = xudc->event_ring_phys[xudc->event_ring_index] +
+ xudc->event_ring_deq_ptr * sizeof(*event);
+
+ xudc_writel(xudc, upper_32_bits(erdp), ERDPHI);
+ xudc_writel(xudc, lower_32_bits(erdp) | ERDPLO_EHB, ERDPLO);
+}
+
+static irqreturn_t tegra_xudc_irq(int irq, void *data)
+{
+ struct tegra_xudc *xudc = data;
+ unsigned long flags;
+ u32 val;
+
+ val = xudc_readl(xudc, ST);
+ if (!(val & ST_IP))
+ return IRQ_NONE;
+ xudc_writel(xudc, ST_IP, ST);
+
+ spin_lock_irqsave(&xudc->lock, flags);
+ tegra_xudc_process_event_ring(xudc);
+ spin_unlock_irqrestore(&xudc->lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+static int tegra_xudc_alloc_ep(struct tegra_xudc *xudc, unsigned int index)
+{
+ struct tegra_xudc_ep *ep = &xudc->ep[index];
+
+ ep->xudc = xudc;
+ ep->index = index;
+ ep->context = &xudc->ep_context[index];
+ INIT_LIST_HEAD(&ep->queue);
+
+ /*
+ * EP1 would be the input endpoint corresponding to EP0, but since
+ * EP0 is bi-directional, EP1 is unused.
+ */
+ if (index == 1)
+ return 0;
+
+ ep->transfer_ring = dma_pool_alloc(xudc->transfer_ring_pool,
+ GFP_KERNEL,
+ &ep->transfer_ring_phys);
+ if (!ep->transfer_ring)
+ return -ENOMEM;
+
+ if (index) {
+ snprintf(ep->name, sizeof(ep->name), "ep%u%s", index / 2,
+ (index % 2 == 0) ? "out" : "in");
+ ep->usb_ep.name = ep->name;
+ usb_ep_set_maxpacket_limit(&ep->usb_ep, 1024);
+ ep->usb_ep.max_streams = 16;
+ ep->usb_ep.ops = &tegra_xudc_ep_ops;
+ ep->usb_ep.caps.type_bulk = true;
+ ep->usb_ep.caps.type_int = true;
+ if (index & 1)
+ ep->usb_ep.caps.dir_in = true;
+ else
+ ep->usb_ep.caps.dir_out = true;
+ list_add_tail(&ep->usb_ep.ep_list, &xudc->gadget.ep_list);
+ } else {
+ strscpy(ep->name, "ep0", 3);
+ ep->usb_ep.name = ep->name;
+ usb_ep_set_maxpacket_limit(&ep->usb_ep, 512);
+ ep->usb_ep.ops = &tegra_xudc_ep0_ops;
+ ep->usb_ep.caps.type_control = true;
+ ep->usb_ep.caps.dir_in = true;
+ ep->usb_ep.caps.dir_out = true;
+ }
+
+ return 0;
+}
+
+static void tegra_xudc_free_ep(struct tegra_xudc *xudc, unsigned int index)
+{
+ struct tegra_xudc_ep *ep = &xudc->ep[index];
+
+ /*
+ * EP1 would be the input endpoint corresponding to EP0, but since
+ * EP0 is bi-directional, EP1 is unused.
+ */
+ if (index == 1)
+ return;
+
+ dma_pool_free(xudc->transfer_ring_pool, ep->transfer_ring,
+ ep->transfer_ring_phys);
+}
+
+static int tegra_xudc_alloc_eps(struct tegra_xudc *xudc)
+{
+ struct usb_request *req;
+ unsigned int i;
+ int err;
+
+ xudc->ep_context =
+ dma_alloc_coherent(xudc->dev, XUDC_NR_EPS *
+ sizeof(*xudc->ep_context),
+ &xudc->ep_context_phys, GFP_KERNEL);
+ if (!xudc->ep_context)
+ return -ENOMEM;
+
+ xudc->transfer_ring_pool =
+ dmam_pool_create(dev_name(xudc->dev), xudc->dev,
+ XUDC_TRANSFER_RING_SIZE *
+ sizeof(struct tegra_xudc_trb),
+ sizeof(struct tegra_xudc_trb), 0);
+ if (!xudc->transfer_ring_pool) {
+ err = -ENOMEM;
+ goto free_ep_context;
+ }
+
+ INIT_LIST_HEAD(&xudc->gadget.ep_list);
+ for (i = 0; i < ARRAY_SIZE(xudc->ep); i++) {
+ err = tegra_xudc_alloc_ep(xudc, i);
+ if (err < 0)
+ goto free_eps;
+ }
+
+ req = tegra_xudc_ep_alloc_request(&xudc->ep[0].usb_ep, GFP_KERNEL);
+ if (!req) {
+ err = -ENOMEM;
+ goto free_eps;
+ }
+ xudc->ep0_req = to_xudc_req(req);
+
+ return 0;
+
+free_eps:
+ for (; i > 0; i--)
+ tegra_xudc_free_ep(xudc, i - 1);
+free_ep_context:
+ dma_free_coherent(xudc->dev, XUDC_NR_EPS * sizeof(*xudc->ep_context),
+ xudc->ep_context, xudc->ep_context_phys);
+ return err;
+}
+
+static void tegra_xudc_init_eps(struct tegra_xudc *xudc)
+{
+ xudc_writel(xudc, lower_32_bits(xudc->ep_context_phys), ECPLO);
+ xudc_writel(xudc, upper_32_bits(xudc->ep_context_phys), ECPHI);
+}
+
+static void tegra_xudc_free_eps(struct tegra_xudc *xudc)
+{
+ unsigned int i;
+
+ tegra_xudc_ep_free_request(&xudc->ep[0].usb_ep,
+ &xudc->ep0_req->usb_req);
+
+ for (i = 0; i < ARRAY_SIZE(xudc->ep); i++)
+ tegra_xudc_free_ep(xudc, i);
+
+ dma_free_coherent(xudc->dev, XUDC_NR_EPS * sizeof(*xudc->ep_context),
+ xudc->ep_context, xudc->ep_context_phys);
+}
+
+static int tegra_xudc_alloc_event_ring(struct tegra_xudc *xudc)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(xudc->event_ring); i++) {
+ xudc->event_ring[i] =
+ dma_alloc_coherent(xudc->dev, XUDC_EVENT_RING_SIZE *
+ sizeof(*xudc->event_ring[i]),
+ &xudc->event_ring_phys[i],
+ GFP_KERNEL);
+ if (!xudc->event_ring[i])
+ goto free_dma;
+ }
+
+ return 0;
+
+free_dma:
+ for (; i > 0; i--) {
+ dma_free_coherent(xudc->dev, XUDC_EVENT_RING_SIZE *
+ sizeof(*xudc->event_ring[i - 1]),
+ xudc->event_ring[i - 1],
+ xudc->event_ring_phys[i - 1]);
+ }
+ return -ENOMEM;
+}
+
+static void tegra_xudc_init_event_ring(struct tegra_xudc *xudc)
+{
+ unsigned int i;
+ u32 val;
+
+ val = xudc_readl(xudc, SPARAM);
+ val &= ~(SPARAM_ERSTMAX_MASK);
+ val |= SPARAM_ERSTMAX(XUDC_NR_EVENT_RINGS);
+ xudc_writel(xudc, val, SPARAM);
+
+ for (i = 0; i < ARRAY_SIZE(xudc->event_ring); i++) {
+ memset(xudc->event_ring[i], 0, XUDC_EVENT_RING_SIZE *
+ sizeof(*xudc->event_ring[i]));
+
+ val = xudc_readl(xudc, ERSTSZ);
+ val &= ~(ERSTSZ_ERSTXSZ_MASK << ERSTSZ_ERSTXSZ_SHIFT(i));
+ val |= XUDC_EVENT_RING_SIZE << ERSTSZ_ERSTXSZ_SHIFT(i);
+ xudc_writel(xudc, val, ERSTSZ);
+
+ xudc_writel(xudc, lower_32_bits(xudc->event_ring_phys[i]),
+ ERSTXBALO(i));
+ xudc_writel(xudc, upper_32_bits(xudc->event_ring_phys[i]),
+ ERSTXBAHI(i));
+ }
+
+ val = lower_32_bits(xudc->event_ring_phys[0]);
+ xudc_writel(xudc, val, ERDPLO);
+ val |= EREPLO_ECS;
+ xudc_writel(xudc, val, EREPLO);
+
+ val = upper_32_bits(xudc->event_ring_phys[0]);
+ xudc_writel(xudc, val, ERDPHI);
+ xudc_writel(xudc, val, EREPHI);
+
+ xudc->ccs = true;
+ xudc->event_ring_index = 0;
+ xudc->event_ring_deq_ptr = 0;
+}
+
+static void tegra_xudc_free_event_ring(struct tegra_xudc *xudc)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(xudc->event_ring); i++) {
+ dma_free_coherent(xudc->dev, XUDC_EVENT_RING_SIZE *
+ sizeof(*xudc->event_ring[i]),
+ xudc->event_ring[i],
+ xudc->event_ring_phys[i]);
+ }
+}
+
+static void tegra_xudc_fpci_ipfs_init(struct tegra_xudc *xudc)
+{
+ u32 val;
+
+ if (xudc->soc->has_ipfs) {
+ val = ipfs_readl(xudc, XUSB_DEV_CONFIGURATION_0);
+ val |= XUSB_DEV_CONFIGURATION_0_EN_FPCI;
+ ipfs_writel(xudc, val, XUSB_DEV_CONFIGURATION_0);
+ usleep_range(10, 15);
+ }
+
+ /* Enable bus master */
+ val = XUSB_DEV_CFG_1_IO_SPACE_EN | XUSB_DEV_CFG_1_MEMORY_SPACE_EN |
+ XUSB_DEV_CFG_1_BUS_MASTER_EN;
+ fpci_writel(xudc, val, XUSB_DEV_CFG_1);
+
+ /* Program BAR0 space */
+ val = fpci_readl(xudc, XUSB_DEV_CFG_4);
+ val &= ~(XUSB_DEV_CFG_4_BASE_ADDR_MASK);
+ val |= xudc->phys_base & (XUSB_DEV_CFG_4_BASE_ADDR_MASK);
+
+ fpci_writel(xudc, val, XUSB_DEV_CFG_4);
+ fpci_writel(xudc, upper_32_bits(xudc->phys_base), XUSB_DEV_CFG_5);
+
+ usleep_range(100, 200);
+
+ if (xudc->soc->has_ipfs) {
+ /* Enable interrupt assertion */
+ val = ipfs_readl(xudc, XUSB_DEV_INTR_MASK_0);
+ val |= XUSB_DEV_INTR_MASK_0_IP_INT_MASK;
+ ipfs_writel(xudc, val, XUSB_DEV_INTR_MASK_0);
+ }
+}
+
+static void tegra_xudc_device_params_init(struct tegra_xudc *xudc)
+{
+ u32 val, imod;
+
+ if (xudc->soc->has_ipfs) {
+ val = xudc_readl(xudc, BLCG);
+ val |= BLCG_ALL;
+ val &= ~(BLCG_DFPCI | BLCG_UFPCI | BLCG_FE |
+ BLCG_COREPLL_PWRDN);
+ val |= BLCG_IOPLL_0_PWRDN;
+ val |= BLCG_IOPLL_1_PWRDN;
+ val |= BLCG_IOPLL_2_PWRDN;
+
+ xudc_writel(xudc, val, BLCG);
+ }
+
+ /* Set a reasonable U3 exit timer value. */
+ val = xudc_readl(xudc, SSPX_CORE_PADCTL4);
+ val &= ~(SSPX_CORE_PADCTL4_RXDAT_VLD_TIMEOUT_U3_MASK);
+ val |= SSPX_CORE_PADCTL4_RXDAT_VLD_TIMEOUT_U3(0x5dc0);
+ xudc_writel(xudc, val, SSPX_CORE_PADCTL4);
+
+ /* Default ping LFPS tBurst is too large. */
+ val = xudc_readl(xudc, SSPX_CORE_CNT0);
+ val &= ~(SSPX_CORE_CNT0_PING_TBURST_MASK);
+ val |= SSPX_CORE_CNT0_PING_TBURST(0xa);
+ xudc_writel(xudc, val, SSPX_CORE_CNT0);
+
+ /* Default tPortConfiguration timeout is too small. */
+ val = xudc_readl(xudc, SSPX_CORE_CNT30);
+ val &= ~(SSPX_CORE_CNT30_LMPITP_TIMER_MASK);
+ val |= SSPX_CORE_CNT30_LMPITP_TIMER(0x978);
+ xudc_writel(xudc, val, SSPX_CORE_CNT30);
+
+ if (xudc->soc->lpm_enable) {
+ /* Set L1 resume duration to 95 us. */
+ val = xudc_readl(xudc, HSFSPI_COUNT13);
+ val &= ~(HSFSPI_COUNT13_U2_RESUME_K_DURATION_MASK);
+ val |= HSFSPI_COUNT13_U2_RESUME_K_DURATION(0x2c88);
+ xudc_writel(xudc, val, HSFSPI_COUNT13);
+ }
+
+ /*
+ * Compliacne suite appears to be violating polling LFPS tBurst max
+ * of 1.4us. Send 1.45us instead.
+ */
+ val = xudc_readl(xudc, SSPX_CORE_CNT32);
+ val &= ~(SSPX_CORE_CNT32_POLL_TBURST_MAX_MASK);
+ val |= SSPX_CORE_CNT32_POLL_TBURST_MAX(0xb0);
+ xudc_writel(xudc, val, SSPX_CORE_CNT32);
+
+ /* Direct HS/FS port instance to RxDetect. */
+ val = xudc_readl(xudc, CFG_DEV_FE);
+ val &= ~(CFG_DEV_FE_PORTREGSEL_MASK);
+ val |= CFG_DEV_FE_PORTREGSEL(CFG_DEV_FE_PORTREGSEL_HSFS_PI);
+ xudc_writel(xudc, val, CFG_DEV_FE);
+
+ val = xudc_readl(xudc, PORTSC);
+ val &= ~(PORTSC_CHANGE_MASK | PORTSC_PLS_MASK);
+ val |= PORTSC_LWS | PORTSC_PLS(PORTSC_PLS_RXDETECT);
+ xudc_writel(xudc, val, PORTSC);
+
+ /* Direct SS port instance to RxDetect. */
+ val = xudc_readl(xudc, CFG_DEV_FE);
+ val &= ~(CFG_DEV_FE_PORTREGSEL_MASK);
+ val |= CFG_DEV_FE_PORTREGSEL_SS_PI & CFG_DEV_FE_PORTREGSEL_MASK;
+ xudc_writel(xudc, val, CFG_DEV_FE);
+
+ val = xudc_readl(xudc, PORTSC);
+ val &= ~(PORTSC_CHANGE_MASK | PORTSC_PLS_MASK);
+ val |= PORTSC_LWS | PORTSC_PLS(PORTSC_PLS_RXDETECT);
+ xudc_writel(xudc, val, PORTSC);
+
+ /* Restore port instance. */
+ val = xudc_readl(xudc, CFG_DEV_FE);
+ val &= ~(CFG_DEV_FE_PORTREGSEL_MASK);
+ xudc_writel(xudc, val, CFG_DEV_FE);
+
+ /*
+ * Enable INFINITE_SS_RETRY to prevent device from entering
+ * Disabled.Error when attached to buggy SuperSpeed hubs.
+ */
+ val = xudc_readl(xudc, CFG_DEV_FE);
+ val |= CFG_DEV_FE_INFINITE_SS_RETRY;
+ xudc_writel(xudc, val, CFG_DEV_FE);
+
+ /* Set interrupt moderation. */
+ imod = XUDC_INTERRUPT_MODERATION_US * 4;
+ val = xudc_readl(xudc, RT_IMOD);
+ val &= ~((RT_IMOD_IMODI_MASK) | (RT_IMOD_IMODC_MASK));
+ val |= (RT_IMOD_IMODI(imod) | RT_IMOD_IMODC(imod));
+ xudc_writel(xudc, val, RT_IMOD);
+
+ /* increase SSPI transaction timeout from 32us to 512us */
+ val = xudc_readl(xudc, CFG_DEV_SSPI_XFER);
+ val &= ~(CFG_DEV_SSPI_XFER_ACKTIMEOUT_MASK);
+ val |= CFG_DEV_SSPI_XFER_ACKTIMEOUT(0xf000);
+ xudc_writel(xudc, val, CFG_DEV_SSPI_XFER);
+}
+
+static int tegra_xudc_phy_init(struct tegra_xudc *xudc)
+{
+ int err;
+
+ err = phy_init(xudc->utmi_phy);
+ if (err < 0) {
+ dev_err(xudc->dev, "utmi phy init failed: %d\n", err);
+ return err;
+ }
+
+ err = phy_init(xudc->usb3_phy);
+ if (err < 0) {
+ dev_err(xudc->dev, "usb3 phy init failed: %d\n", err);
+ goto exit_utmi_phy;
+ }
+
+ return 0;
+
+exit_utmi_phy:
+ phy_exit(xudc->utmi_phy);
+ return err;
+}
+
+static void tegra_xudc_phy_exit(struct tegra_xudc *xudc)
+{
+ phy_exit(xudc->usb3_phy);
+ phy_exit(xudc->utmi_phy);
+}
+
+static const char * const tegra210_xudc_supply_names[] = {
+ "hvdd-usb",
+ "avddio-usb",
+};
+
+static const char * const tegra210_xudc_clock_names[] = {
+ "dev",
+ "ss",
+ "ss_src",
+ "hs_src",
+ "fs_src",
+};
+
+static const char * const tegra186_xudc_clock_names[] = {
+ "dev",
+ "ss",
+ "ss_src",
+ "fs_src",
+};
+
+static struct tegra_xudc_soc tegra210_xudc_soc_data = {
+ .supply_names = tegra210_xudc_supply_names,
+ .num_supplies = ARRAY_SIZE(tegra210_xudc_supply_names),
+ .clock_names = tegra210_xudc_clock_names,
+ .num_clks = ARRAY_SIZE(tegra210_xudc_clock_names),
+ .u1_enable = false,
+ .u2_enable = true,
+ .lpm_enable = false,
+ .invalid_seq_num = true,
+ .pls_quirk = true,
+ .port_reset_quirk = true,
+ .has_ipfs = true,
+};
+
+static struct tegra_xudc_soc tegra186_xudc_soc_data = {
+ .clock_names = tegra186_xudc_clock_names,
+ .num_clks = ARRAY_SIZE(tegra186_xudc_clock_names),
+ .u1_enable = true,
+ .u2_enable = true,
+ .lpm_enable = false,
+ .invalid_seq_num = false,
+ .pls_quirk = false,
+ .port_reset_quirk = false,
+ .has_ipfs = false,
+};
+
+static const struct of_device_id tegra_xudc_of_match[] = {
+ {
+ .compatible = "nvidia,tegra210-xudc",
+ .data = &tegra210_xudc_soc_data
+ },
+ {
+ .compatible = "nvidia,tegra186-xudc",
+ .data = &tegra186_xudc_soc_data
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, tegra_xudc_of_match);
+
+static void tegra_xudc_powerdomain_remove(struct tegra_xudc *xudc)
+{
+ if (xudc->genpd_dl_ss)
+ device_link_del(xudc->genpd_dl_ss);
+ if (xudc->genpd_dl_device)
+ device_link_del(xudc->genpd_dl_device);
+ if (xudc->genpd_dev_ss)
+ dev_pm_domain_detach(xudc->genpd_dev_ss, true);
+ if (xudc->genpd_dev_device)
+ dev_pm_domain_detach(xudc->genpd_dev_device, true);
+}
+
+static int tegra_xudc_powerdomain_init(struct tegra_xudc *xudc)
+{
+ struct device *dev = xudc->dev;
+ int err;
+
+ xudc->genpd_dev_device = dev_pm_domain_attach_by_name(dev,
+ "dev");
+ if (IS_ERR(xudc->genpd_dev_device)) {
+ err = PTR_ERR(xudc->genpd_dev_device);
+ dev_err(dev, "failed to get dev pm-domain: %d\n", err);
+ return err;
+ }
+
+ xudc->genpd_dev_ss = dev_pm_domain_attach_by_name(dev, "ss");
+ if (IS_ERR(xudc->genpd_dev_ss)) {
+ err = PTR_ERR(xudc->genpd_dev_ss);
+ dev_err(dev, "failed to get superspeed pm-domain: %d\n", err);
+ return err;
+ }
+
+ xudc->genpd_dl_device = device_link_add(dev, xudc->genpd_dev_device,
+ DL_FLAG_PM_RUNTIME |
+ DL_FLAG_STATELESS);
+ if (!xudc->genpd_dl_device) {
+ dev_err(dev, "adding usb device device link failed!\n");
+ return -ENODEV;
+ }
+
+ xudc->genpd_dl_ss = device_link_add(dev, xudc->genpd_dev_ss,
+ DL_FLAG_PM_RUNTIME |
+ DL_FLAG_STATELESS);
+ if (!xudc->genpd_dl_ss) {
+ dev_err(dev, "adding superspeed device link failed!\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int tegra_xudc_probe(struct platform_device *pdev)
+{
+ struct tegra_xudc *xudc;
+ struct resource *res;
+ struct usb_role_switch_desc role_sx_desc = { 0 };
+ unsigned int i;
+ int err;
+
+ xudc = devm_kzalloc(&pdev->dev, sizeof(*xudc), GFP_ATOMIC);
+ if (!xudc)
+ return -ENOMEM;
+
+ xudc->dev = &pdev->dev;
+ platform_set_drvdata(pdev, xudc);
+
+ xudc->soc = of_device_get_match_data(&pdev->dev);
+ if (!xudc->soc)
+ return -ENODEV;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
+ xudc->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(xudc->base))
+ return PTR_ERR(xudc->base);
+ xudc->phys_base = res->start;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fpci");
+ xudc->fpci = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(xudc->fpci))
+ return PTR_ERR(xudc->fpci);
+
+ if (xudc->soc->has_ipfs) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "ipfs");
+ xudc->ipfs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(xudc->ipfs))
+ return PTR_ERR(xudc->ipfs);
+ }
+
+ xudc->irq = platform_get_irq(pdev, 0);
+ if (xudc->irq < 0) {
+ dev_err(xudc->dev, "failed to get IRQ: %d\n",
+ xudc->irq);
+ return xudc->irq;
+ }
+
+ err = devm_request_irq(&pdev->dev, xudc->irq, tegra_xudc_irq, 0,
+ dev_name(&pdev->dev), xudc);
+ if (err < 0) {
+ dev_err(xudc->dev, "failed to claim IRQ#%u: %d\n", xudc->irq,
+ err);
+ return err;
+ }
+
+ xudc->clks = devm_kcalloc(&pdev->dev, xudc->soc->num_clks,
+ sizeof(*xudc->clks), GFP_KERNEL);
+ if (!xudc->clks)
+ return -ENOMEM;
+
+ for (i = 0; i < xudc->soc->num_clks; i++)
+ xudc->clks[i].id = xudc->soc->clock_names[i];
+
+ err = devm_clk_bulk_get(&pdev->dev, xudc->soc->num_clks,
+ xudc->clks);
+ if (err) {
+ dev_err(xudc->dev, "failed to request clks %d\n", err);
+ return err;
+ }
+
+ xudc->supplies = devm_kcalloc(&pdev->dev, xudc->soc->num_supplies,
+ sizeof(*xudc->supplies), GFP_KERNEL);
+ if (!xudc->supplies)
+ return -ENOMEM;
+
+ for (i = 0; i < xudc->soc->num_supplies; i++)
+ xudc->supplies[i].supply = xudc->soc->supply_names[i];
+
+ err = devm_regulator_bulk_get(&pdev->dev, xudc->soc->num_supplies,
+ xudc->supplies);
+ if (err) {
+ dev_err(xudc->dev, "failed to request regulators %d\n", err);
+ return err;
+ }
+
+ xudc->padctl = tegra_xusb_padctl_get(&pdev->dev);
+ if (IS_ERR(xudc->padctl))
+ return PTR_ERR(xudc->padctl);
+
+ err = regulator_bulk_enable(xudc->soc->num_supplies, xudc->supplies);
+ if (err) {
+ dev_err(xudc->dev, "failed to enable regulators %d\n", err);
+ goto put_padctl;
+ }
+
+ xudc->usb3_phy = devm_phy_optional_get(&pdev->dev, "usb3");
+ if (IS_ERR(xudc->usb3_phy)) {
+ err = PTR_ERR(xudc->usb3_phy);
+ dev_err(xudc->dev, "failed to get usb3 phy: %d\n", err);
+ goto disable_regulator;
+ }
+
+ xudc->utmi_phy = devm_phy_optional_get(&pdev->dev, "usb2");
+ if (IS_ERR(xudc->utmi_phy)) {
+ err = PTR_ERR(xudc->utmi_phy);
+ dev_err(xudc->dev, "failed to get usb2 phy: %d\n", err);
+ goto disable_regulator;
+ }
+
+ err = tegra_xudc_powerdomain_init(xudc);
+ if (err)
+ goto put_powerdomains;
+
+ err = tegra_xudc_phy_init(xudc);
+ if (err)
+ goto put_powerdomains;
+
+ err = tegra_xudc_alloc_event_ring(xudc);
+ if (err)
+ goto disable_phy;
+
+ err = tegra_xudc_alloc_eps(xudc);
+ if (err)
+ goto free_event_ring;
+
+ spin_lock_init(&xudc->lock);
+
+ init_completion(&xudc->disconnect_complete);
+
+ INIT_WORK(&xudc->usb_role_sw_work, tegra_xudc_usb_role_sw_work);
+
+ INIT_DELAYED_WORK(&xudc->plc_reset_work, tegra_xudc_plc_reset_work);
+
+ INIT_DELAYED_WORK(&xudc->port_reset_war_work,
+ tegra_xudc_port_reset_war_work);
+
+ if (of_property_read_bool(xudc->dev->of_node, "usb-role-switch")) {
+ role_sx_desc.set = tegra_xudc_usb_role_sw_set;
+ role_sx_desc.fwnode = dev_fwnode(xudc->dev);
+
+ xudc->usb_role_sw = usb_role_switch_register(xudc->dev,
+ &role_sx_desc);
+ if (IS_ERR(xudc->usb_role_sw)) {
+ err = PTR_ERR(xudc->usb_role_sw);
+ dev_err(xudc->dev, "Failed to register USB role SW: %d",
+ err);
+ goto free_eps;
+ }
+ } else {
+ /* Set the mode as device mode and this keeps phy always ON */
+ dev_info(xudc->dev, "Set usb role to device mode always");
+ schedule_work(&xudc->usb_role_sw_work);
+ }
+
+ pm_runtime_enable(&pdev->dev);
+
+ xudc->gadget.ops = &tegra_xudc_gadget_ops;
+ xudc->gadget.ep0 = &xudc->ep[0].usb_ep;
+ xudc->gadget.name = "tegra-xudc";
+ xudc->gadget.max_speed = USB_SPEED_SUPER;
+
+ err = usb_add_gadget_udc(&pdev->dev, &xudc->gadget);
+ if (err) {
+ dev_err(&pdev->dev, "failed to add USB gadget: %d\n", err);
+ goto free_eps;
+ }
+
+ return 0;
+
+free_eps:
+ tegra_xudc_free_eps(xudc);
+free_event_ring:
+ tegra_xudc_free_event_ring(xudc);
+disable_phy:
+ tegra_xudc_phy_exit(xudc);
+put_powerdomains:
+ tegra_xudc_powerdomain_remove(xudc);
+disable_regulator:
+ regulator_bulk_disable(xudc->soc->num_supplies, xudc->supplies);
+put_padctl:
+ tegra_xusb_padctl_put(xudc->padctl);
+
+ return err;
+}
+
+static int tegra_xudc_remove(struct platform_device *pdev)
+{
+ struct tegra_xudc *xudc = platform_get_drvdata(pdev);
+
+ pm_runtime_get_sync(xudc->dev);
+
+ cancel_delayed_work(&xudc->plc_reset_work);
+
+ if (xudc->usb_role_sw) {
+ usb_role_switch_unregister(xudc->usb_role_sw);
+ cancel_work_sync(&xudc->usb_role_sw_work);
+ }
+
+ usb_del_gadget_udc(&xudc->gadget);
+
+ tegra_xudc_free_eps(xudc);
+ tegra_xudc_free_event_ring(xudc);
+
+ tegra_xudc_powerdomain_remove(xudc);
+
+ regulator_bulk_disable(xudc->soc->num_supplies, xudc->supplies);
+
+ phy_power_off(xudc->utmi_phy);
+ phy_power_off(xudc->usb3_phy);
+
+ tegra_xudc_phy_exit(xudc);
+
+ pm_runtime_disable(xudc->dev);
+ pm_runtime_put(xudc->dev);
+
+ tegra_xusb_padctl_put(xudc->padctl);
+
+ return 0;
+}
+
+static int __maybe_unused tegra_xudc_powergate(struct tegra_xudc *xudc)
+{
+ unsigned long flags;
+
+ dev_dbg(xudc->dev, "entering ELPG\n");
+
+ spin_lock_irqsave(&xudc->lock, flags);
+
+ xudc->powergated = true;
+ xudc->saved_regs.ctrl = xudc_readl(xudc, CTRL);
+ xudc->saved_regs.portpm = xudc_readl(xudc, PORTPM);
+ xudc_writel(xudc, 0, CTRL);
+
+ spin_unlock_irqrestore(&xudc->lock, flags);
+
+ clk_bulk_disable_unprepare(xudc->soc->num_clks, xudc->clks);
+
+ regulator_bulk_disable(xudc->soc->num_supplies, xudc->supplies);
+
+ dev_dbg(xudc->dev, "entering ELPG done\n");
+ return 0;
+}
+
+static int __maybe_unused tegra_xudc_unpowergate(struct tegra_xudc *xudc)
+{
+ unsigned long flags;
+ int err;
+
+ dev_dbg(xudc->dev, "exiting ELPG\n");
+
+ err = regulator_bulk_enable(xudc->soc->num_supplies,
+ xudc->supplies);
+ if (err < 0)
+ return err;
+
+ err = clk_bulk_prepare_enable(xudc->soc->num_clks, xudc->clks);
+ if (err < 0)
+ return err;
+
+ tegra_xudc_fpci_ipfs_init(xudc);
+
+ tegra_xudc_device_params_init(xudc);
+
+ tegra_xudc_init_event_ring(xudc);
+
+ tegra_xudc_init_eps(xudc);
+
+ xudc_writel(xudc, xudc->saved_regs.portpm, PORTPM);
+ xudc_writel(xudc, xudc->saved_regs.ctrl, CTRL);
+
+ spin_lock_irqsave(&xudc->lock, flags);
+ xudc->powergated = false;
+ spin_unlock_irqrestore(&xudc->lock, flags);
+
+ dev_dbg(xudc->dev, "exiting ELPG done\n");
+ return 0;
+}
+
+static int __maybe_unused tegra_xudc_suspend(struct device *dev)
+{
+ struct tegra_xudc *xudc = dev_get_drvdata(dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&xudc->lock, flags);
+ xudc->suspended = true;
+ spin_unlock_irqrestore(&xudc->lock, flags);
+
+ flush_work(&xudc->usb_role_sw_work);
+
+ /* Forcibly disconnect before powergating. */
+ tegra_xudc_device_mode_off(xudc);
+
+ if (!pm_runtime_status_suspended(dev))
+ tegra_xudc_powergate(xudc);
+
+ pm_runtime_disable(dev);
+
+ return 0;
+}
+
+static int __maybe_unused tegra_xudc_resume(struct device *dev)
+{
+ struct tegra_xudc *xudc = dev_get_drvdata(dev);
+ unsigned long flags;
+ int err;
+
+ err = tegra_xudc_unpowergate(xudc);
+ if (err < 0)
+ return err;
+
+ spin_lock_irqsave(&xudc->lock, flags);
+ xudc->suspended = false;
+ spin_unlock_irqrestore(&xudc->lock, flags);
+
+ schedule_work(&xudc->usb_role_sw_work);
+
+ pm_runtime_enable(dev);
+
+ return 0;
+}
+
+static int __maybe_unused tegra_xudc_runtime_suspend(struct device *dev)
+{
+ struct tegra_xudc *xudc = dev_get_drvdata(dev);
+
+ return tegra_xudc_powergate(xudc);
+}
+
+static int __maybe_unused tegra_xudc_runtime_resume(struct device *dev)
+{
+ struct tegra_xudc *xudc = dev_get_drvdata(dev);
+
+ return tegra_xudc_unpowergate(xudc);
+}
+
+static const struct dev_pm_ops tegra_xudc_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(tegra_xudc_suspend, tegra_xudc_resume)
+ SET_RUNTIME_PM_OPS(tegra_xudc_runtime_suspend,
+ tegra_xudc_runtime_resume, NULL)
+};
+
+static struct platform_driver tegra_xudc_driver = {
+ .probe = tegra_xudc_probe,
+ .remove = tegra_xudc_remove,
+ .driver = {
+ .name = "tegra-xudc",
+ .pm = &tegra_xudc_pm_ops,
+ .of_match_table = tegra_xudc_of_match,
+ },
+};
+module_platform_driver(tegra_xudc_driver);
+
+MODULE_DESCRIPTION("NVIDIA Tegra XUSB Device Controller");
+MODULE_AUTHOR("Andrew Bresticker <abrestic@chromium.org>");
+MODULE_AUTHOR("Hui Fu <hfu@nvidia.com>");
+MODULE_AUTHOR("Nagarjuna Kristam <nkristam@nvidia.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index 79b2e79dddd0..8d730180db06 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -38,9 +38,9 @@ config USB_XHCI_DBGCAP
before enabling this option. If unsure, say 'N'.
config USB_XHCI_PCI
- tristate
- depends on USB_PCI
- default y
+ tristate
+ depends on USB_PCI
+ default y
config USB_XHCI_PLATFORM
tristate "Generic xHCI driver for a platform device"
@@ -220,12 +220,12 @@ config USB_EHCI_HCD_ORION
Marvell PXA/MMP USB controller" for those.
config USB_EHCI_HCD_SPEAR
- tristate "Support for ST SPEAr on-chip EHCI USB controller"
- depends on USB_EHCI_HCD && PLAT_SPEAR
- default y
- ---help---
- Enables support for the on-chip EHCI controller on
- ST SPEAr chips.
+ tristate "Support for ST SPEAr on-chip EHCI USB controller"
+ depends on USB_EHCI_HCD && PLAT_SPEAR
+ default y
+ ---help---
+ Enables support for the on-chip EHCI controller on
+ ST SPEAr chips.
config USB_EHCI_HCD_STI
tristate "Support for ST STiHxxx on-chip EHCI USB controller"
@@ -237,21 +237,21 @@ config USB_EHCI_HCD_STI
STMicroelectronics consumer electronics SoC's.
config USB_EHCI_HCD_AT91
- tristate "Support for Atmel on-chip EHCI USB controller"
- depends on USB_EHCI_HCD && ARCH_AT91
- default y
- ---help---
- Enables support for the on-chip EHCI controller on
- Atmel chips.
+ tristate "Support for Atmel on-chip EHCI USB controller"
+ depends on USB_EHCI_HCD && ARCH_AT91
+ default y
+ ---help---
+ Enables support for the on-chip EHCI controller on
+ Atmel chips.
config USB_EHCI_TEGRA
- tristate "NVIDIA Tegra HCD support"
- depends on ARCH_TEGRA
- select USB_EHCI_ROOT_HUB_TT
- select USB_TEGRA_PHY
- help
- This driver enables support for the internal USB Host Controllers
- found in NVIDIA Tegra SoCs. The controllers are EHCI compliant.
+ tristate "NVIDIA Tegra HCD support"
+ depends on ARCH_TEGRA
+ select USB_EHCI_ROOT_HUB_TT
+ select USB_TEGRA_PHY
+ help
+ This driver enables support for the internal USB Host Controllers
+ found in NVIDIA Tegra SoCs. The controllers are EHCI compliant.
config USB_EHCI_HCD_PPC_OF
bool "EHCI support for PPC USB controller on OF platform bus"
@@ -269,10 +269,10 @@ config USB_EHCI_SH
If you use the PCI EHCI controller, this option is not necessary.
config USB_EHCI_EXYNOS
- tristate "EHCI support for Samsung S5P/EXYNOS SoC Series"
- depends on ARCH_S5PV210 || ARCH_EXYNOS
- help
- Enable support for the Samsung Exynos SOC's on-chip EHCI controller.
+ tristate "EHCI support for Samsung S5P/EXYNOS SoC Series"
+ depends on ARCH_S5PV210 || ARCH_EXYNOS
+ help
+ Enable support for the Samsung Exynos SOC's on-chip EHCI controller.
config USB_EHCI_MV
tristate "EHCI support for Marvell PXA/MMP USB controller"
@@ -409,12 +409,12 @@ config USB_OHCI_HCD_OMAP1
Enables support for the OHCI controller on OMAP1/2 chips.
config USB_OHCI_HCD_SPEAR
- tristate "Support for ST SPEAr on-chip OHCI USB controller"
- depends on USB_OHCI_HCD && PLAT_SPEAR
- default y
- ---help---
- Enables support for the on-chip OHCI controller on
- ST SPEAr chips.
+ tristate "Support for ST SPEAr on-chip OHCI USB controller"
+ depends on USB_OHCI_HCD && PLAT_SPEAR
+ default y
+ ---help---
+ Enables support for the on-chip OHCI controller on
+ ST SPEAr chips.
config USB_OHCI_HCD_STI
tristate "Support for ST STiHxxx on-chip OHCI USB controller"
@@ -426,12 +426,12 @@ config USB_OHCI_HCD_STI
STMicroelectronics consumer electronics SoC's.
config USB_OHCI_HCD_S3C2410
- tristate "OHCI support for Samsung S3C24xx/S3C64xx SoC series"
- depends on USB_OHCI_HCD && (ARCH_S3C24XX || ARCH_S3C64XX)
- default y
- ---help---
- Enables support for the on-chip OHCI controller on
- S3C24xx/S3C64xx chips.
+ tristate "OHCI support for Samsung S3C24xx/S3C64xx SoC series"
+ depends on USB_OHCI_HCD && (ARCH_S3C24XX || ARCH_S3C64XX)
+ default y
+ ---help---
+ Enables support for the on-chip OHCI controller on
+ S3C24xx/S3C64xx chips.
config USB_OHCI_HCD_LPC32XX
tristate "Support for LPC on-chip OHCI USB controller"
@@ -440,8 +440,8 @@ config USB_OHCI_HCD_LPC32XX
depends on USB_ISP1301
default y
---help---
- Enables support for the on-chip OHCI controller on
- NXP chips.
+ Enables support for the on-chip OHCI controller on
+ NXP chips.
config USB_OHCI_HCD_PXA27X
tristate "Support for PXA27X/PXA3XX on-chip OHCI USB controller"
@@ -456,8 +456,8 @@ config USB_OHCI_HCD_AT91
depends on USB_OHCI_HCD && ARCH_AT91 && OF
default y
---help---
- Enables support for the on-chip OHCI controller on
- Atmel chips.
+ Enables support for the on-chip OHCI controller on
+ Atmel chips.
config USB_OHCI_HCD_OMAP3
tristate "OHCI support for OMAP3 and later chips"
@@ -545,7 +545,7 @@ config USB_OHCI_EXYNOS
tristate "OHCI support for Samsung S5P/EXYNOS SoC Series"
depends on ARCH_S5PV210 || ARCH_EXYNOS
help
- Enable support for the Samsung Exynos SOC's on-chip OHCI controller.
+ Enable support for the Samsung Exynos SOC's on-chip OHCI controller.
config USB_CNS3XXX_OHCI
bool "Cavium CNS3XXX OHCI Module (DEPRECATED)"
@@ -609,8 +609,8 @@ config USB_UHCI_PLATFORM
default y if (ARCH_VT8500 || ARCH_ASPEED)
config USB_UHCI_ASPEED
- bool
- default y if ARCH_ASPEED
+ bool
+ default y if ARCH_ASPEED
config USB_FHCI_HCD
tristate "Freescale QE USB Host Controller support"
@@ -713,14 +713,14 @@ config USB_RENESAS_USBHS_HCD
module will be called renesas-usbhs.
config USB_IMX21_HCD
- tristate "i.MX21 HCD support"
- depends on ARM && ARCH_MXC
- help
- This driver enables support for the on-chip USB host in the
- i.MX21 processor.
-
- To compile this driver as a module, choose M here: the
- module will be called "imx21-hcd".
+ tristate "i.MX21 HCD support"
+ depends on ARM && ARCH_MXC
+ help
+ This driver enables support for the on-chip USB host in the
+ i.MX21 processor.
+
+ To compile this driver as a module, choose M here: the
+ module will be called "imx21-hcd".
config USB_HCD_BCMA
tristate "BCMA usb host driver"
diff --git a/drivers/usb/host/bcma-hcd.c b/drivers/usb/host/bcma-hcd.c
index 2400a826397a..652fa29beb27 100644
--- a/drivers/usb/host/bcma-hcd.c
+++ b/drivers/usb/host/bcma-hcd.c
@@ -406,9 +406,12 @@ static int bcma_hcd_probe(struct bcma_device *core)
return -ENOMEM;
usb_dev->core = core;
- if (core->dev.of_node)
+ if (core->dev.of_node) {
usb_dev->gpio_desc = devm_gpiod_get(&core->dev, "vcc",
GPIOD_OUT_HIGH);
+ if (IS_ERR(usb_dev->gpio_desc))
+ return PTR_ERR(usb_dev->gpio_desc);
+ }
switch (core->id.id) {
case BCMA_CORE_USB20_HOST:
diff --git a/drivers/usb/host/fotg210-hcd.c b/drivers/usb/host/fotg210-hcd.c
index 9e0c98d6bdb0..f967adf2d8df 100644
--- a/drivers/usb/host/fotg210-hcd.c
+++ b/drivers/usb/host/fotg210-hcd.c
@@ -5646,8 +5646,10 @@ static int fotg210_hcd_probe(struct platform_device *pdev)
return retval;
failed_dis_clk:
- if (!IS_ERR(fotg210->pclk))
+ if (!IS_ERR(fotg210->pclk)) {
clk_disable_unprepare(fotg210->pclk);
+ clk_put(fotg210->pclk);
+ }
failed_put_hcd:
usb_put_hcd(hcd);
fail_create_hcd:
@@ -5665,8 +5667,10 @@ static int fotg210_hcd_remove(struct platform_device *pdev)
struct usb_hcd *hcd = platform_get_drvdata(pdev);
struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
- if (!IS_ERR(fotg210->pclk))
+ if (!IS_ERR(fotg210->pclk)) {
clk_disable_unprepare(fotg210->pclk);
+ clk_put(fotg210->pclk);
+ }
usb_remove_hcd(hcd);
usb_put_hcd(hcd);
diff --git a/drivers/usb/host/imx21-dbg.c b/drivers/usb/host/imx21-dbg.c
index 7fcf1d9dd7f3..02a1344fbd6a 100644
--- a/drivers/usb/host/imx21-dbg.c
+++ b/drivers/usb/host/imx21-dbg.c
@@ -419,7 +419,7 @@ static void create_debug_files(struct imx21 *imx21)
{
struct dentry *root;
- root = debugfs_create_dir(dev_name(imx21->dev), NULL);
+ root = debugfs_create_dir(dev_name(imx21->dev), usb_debug_root);
imx21->debug_root = root;
debugfs_create_file("status", S_IRUGO, root, imx21, &debug_status_fops);
diff --git a/drivers/usb/host/isp1362-hcd.c b/drivers/usb/host/isp1362-hcd.c
index 96f8daa11f25..4a3a2852523f 100644
--- a/drivers/usb/host/isp1362-hcd.c
+++ b/drivers/usb/host/isp1362-hcd.c
@@ -2627,7 +2627,7 @@ static int isp1362_probe(struct platform_device *pdev)
{
struct usb_hcd *hcd;
struct isp1362_hcd *isp1362_hcd;
- struct resource *addr, *data, *irq_res;
+ struct resource *data, *irq_res;
void __iomem *addr_reg;
void __iomem *data_reg;
int irq;
@@ -2651,8 +2651,7 @@ static int isp1362_probe(struct platform_device *pdev)
irq = irq_res->start;
- addr = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- addr_reg = devm_ioremap_resource(&pdev->dev, addr);
+ addr_reg = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(addr_reg))
return PTR_ERR(addr_reg);
diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c
index fc35a7993b7b..b635c6a1b1a9 100644
--- a/drivers/usb/host/ohci-at91.c
+++ b/drivers/usb/host/ohci-at91.c
@@ -115,7 +115,6 @@ static void at91_start_hc(struct platform_device *pdev)
static void at91_stop_hc(struct platform_device *pdev)
{
struct usb_hcd *hcd = platform_get_drvdata(pdev);
- struct ohci_regs __iomem *regs = hcd->regs;
struct ohci_at91_priv *ohci_at91 = hcd_to_ohci_at91_priv(hcd);
dev_dbg(&pdev->dev, "stop\n");
@@ -123,7 +122,7 @@ static void at91_stop_hc(struct platform_device *pdev)
/*
* Put the USB host controller into reset.
*/
- writel(0, &regs->control);
+ usb_hcd_platform_shutdown(pdev);
/*
* Stop the USB clocks.
@@ -628,6 +627,7 @@ ohci_hcd_at91_drv_suspend(struct device *dev)
/* flush the writes */
(void) ohci_readl (ohci, &ohci->regs->control);
+ msleep(1);
at91_stop_clock(ohci_at91);
}
@@ -642,8 +642,8 @@ ohci_hcd_at91_drv_resume(struct device *dev)
if (ohci_at91->wakeup)
disable_irq_wake(hcd->irq);
-
- at91_start_clock(ohci_at91);
+ else
+ at91_start_clock(ohci_at91);
ohci_resume(hcd, false);
diff --git a/drivers/usb/host/ohci-nxp.c b/drivers/usb/host/ohci-nxp.c
index c561881d0e79..85878e8ad331 100644
--- a/drivers/usb/host/ohci-nxp.c
+++ b/drivers/usb/host/ohci-nxp.c
@@ -150,7 +150,7 @@ static void ohci_nxp_stop_hc(void)
static int ohci_hcd_nxp_probe(struct platform_device *pdev)
{
- struct usb_hcd *hcd = 0;
+ struct usb_hcd *hcd = NULL;
const struct hc_driver *driver = &ohci_nxp_hc_driver;
struct resource *res;
int ret = 0, irq;
diff --git a/drivers/usb/host/oxu210hp-hcd.c b/drivers/usb/host/oxu210hp-hcd.c
index e67242e437ed..fe09b8626329 100644
--- a/drivers/usb/host/oxu210hp-hcd.c
+++ b/drivers/usb/host/oxu210hp-hcd.c
@@ -676,12 +676,12 @@ static int oxu_hub_control(struct usb_hcd *hcd,
*/
/* Low level read/write registers functions */
-static inline u32 oxu_readl(void *base, u32 reg)
+static inline u32 oxu_readl(void __iomem *base, u32 reg)
{
return readl(base + reg);
}
-static inline void oxu_writel(void *base, u32 reg, u32 val)
+static inline void oxu_writel(void __iomem *base, u32 reg, u32 val)
{
writel(val, base + reg);
}
@@ -4063,7 +4063,7 @@ static const struct hc_driver oxu_hc_driver = {
* Module stuff
*/
-static void oxu_configuration(struct platform_device *pdev, void *base)
+static void oxu_configuration(struct platform_device *pdev, void __iomem *base)
{
u32 tmp;
@@ -4093,7 +4093,7 @@ static void oxu_configuration(struct platform_device *pdev, void *base)
oxu_writel(base, OXU_CHIPIRQEN_SET, OXU_USBSPHLPWUI | OXU_USBOTGLPWUI);
}
-static int oxu_verify_id(struct platform_device *pdev, void *base)
+static int oxu_verify_id(struct platform_device *pdev, void __iomem *base)
{
u32 id;
static const char * const bo[] = {
@@ -4121,7 +4121,7 @@ static int oxu_verify_id(struct platform_device *pdev, void *base)
static const struct hc_driver oxu_hc_driver;
static struct usb_hcd *oxu_create(struct platform_device *pdev,
unsigned long memstart, unsigned long memlen,
- void *base, int irq, int otg)
+ void __iomem *base, int irq, int otg)
{
struct device *dev = &pdev->dev;
@@ -4158,7 +4158,7 @@ static struct usb_hcd *oxu_create(struct platform_device *pdev,
static int oxu_init(struct platform_device *pdev,
unsigned long memstart, unsigned long memlen,
- void *base, int irq)
+ void __iomem *base, int irq)
{
struct oxu_info *info = platform_get_drvdata(pdev);
struct usb_hcd *hcd;
@@ -4207,7 +4207,7 @@ error_create_otg:
static int oxu_drv_probe(struct platform_device *pdev)
{
struct resource *res;
- void *base;
+ void __iomem *base;
unsigned long memstart, memlen;
int irq, ret;
struct oxu_info *info;
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index f6d04491df60..6c7f0a876b96 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -728,7 +728,7 @@ static void quirk_usb_handoff_uhci(struct pci_dev *pdev)
if (!pio_enabled(pdev))
return;
- for (i = 0; i < PCI_ROM_RESOURCE; i++)
+ for (i = 0; i < PCI_STD_NUM_BARS; i++)
if ((pci_resource_flags(pdev, i) & IORESOURCE_IO)) {
base = pci_resource_start(pdev, i);
break;
diff --git a/drivers/usb/host/u132-hcd.c b/drivers/usb/host/u132-hcd.c
index 4efee34f154f..e9209e3e6248 100644
--- a/drivers/usb/host/u132-hcd.c
+++ b/drivers/usb/host/u132-hcd.c
@@ -71,7 +71,7 @@ INT_MODULE_PARM(testing, 0);
/* Some boards misreport power switching/overcurrent*/
static bool distrust_firmware = true;
module_param(distrust_firmware, bool, 0);
-MODULE_PARM_DESC(distrust_firmware, "true to distrust firmware power/overcurren"
+MODULE_PARM_DESC(distrust_firmware, "true to distrust firmware power/overcurrent"
"t setup");
static DECLARE_WAIT_QUEUE_HEAD(u132_hcd_wait);
/*
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 1e0236e90687..a0025d23b257 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -48,6 +48,7 @@
#define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_XHCI 0x15e9
#define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_XHCI 0x15ec
#define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_XHCI 0x15f0
+#define PCI_DEVICE_ID_INTEL_ICE_LAKE_XHCI 0x8a13
#define PCI_DEVICE_ID_AMD_PROMONTORYA_4 0x43b9
#define PCI_DEVICE_ID_AMD_PROMONTORYA_3 0x43ba
@@ -212,7 +213,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
pdev->device == PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_XHCI ||
- pdev->device == PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_XHCI))
+ pdev->device == PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_XHCI ||
+ pdev->device == PCI_DEVICE_ID_INTEL_ICE_LAKE_XHCI))
xhci->quirks |= XHCI_DEFAULT_PM_RUNTIME_ALLOW;
if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index e7aab31fd9a5..6475c3d3b43b 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -280,6 +280,9 @@ void xhci_ring_cmd_db(struct xhci_hcd *xhci)
return;
xhci_dbg(xhci, "// Ding dong!\n");
+
+ trace_xhci_ring_host_doorbell(0, DB_VALUE_HOST);
+
writel(DB_VALUE_HOST, &xhci->dba->doorbell[0]);
/* Flush PCI posted writes */
readl(&xhci->dba->doorbell[0]);
@@ -401,6 +404,9 @@ void xhci_ring_ep_doorbell(struct xhci_hcd *xhci,
if ((ep_state & EP_STOP_CMD_PENDING) || (ep_state & SET_DEQ_PENDING) ||
(ep_state & EP_HALTED) || (ep_state & EP_CLEARING_TT))
return;
+
+ trace_xhci_ring_ep_doorbell(slot_id, DB_VALUE(ep_index, stream_id));
+
writel(DB_VALUE(ep_index, stream_id), db_addr);
/* The CPU has better things to do at this point than wait for a
* write-posting flush. It'll get there soon enough.
@@ -651,10 +657,8 @@ static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci,
}
xhci_urb_free_priv(urb_priv);
usb_hcd_unlink_urb_from_ep(hcd, urb);
- spin_unlock(&xhci->lock);
trace_xhci_urb_giveback(urb);
usb_hcd_giveback_urb(hcd, urb, status);
- spin_lock(&xhci->lock);
}
static void xhci_unmap_td_bounce_buffer(struct xhci_hcd *xhci,
@@ -2741,6 +2745,42 @@ static int xhci_handle_event(struct xhci_hcd *xhci)
}
/*
+ * Update Event Ring Dequeue Pointer:
+ * - When all events have finished
+ * - To avoid "Event Ring Full Error" condition
+ */
+static void xhci_update_erst_dequeue(struct xhci_hcd *xhci,
+ union xhci_trb *event_ring_deq)
+{
+ u64 temp_64;
+ dma_addr_t deq;
+
+ temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
+ /* If necessary, update the HW's version of the event ring deq ptr. */
+ if (event_ring_deq != xhci->event_ring->dequeue) {
+ deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
+ xhci->event_ring->dequeue);
+ if (deq == 0)
+ xhci_warn(xhci, "WARN something wrong with SW event ring dequeue ptr\n");
+ /*
+ * Per 4.9.4, Software writes to the ERDP register shall
+ * always advance the Event Ring Dequeue Pointer value.
+ */
+ if ((temp_64 & (u64) ~ERST_PTR_MASK) ==
+ ((u64) deq & (u64) ~ERST_PTR_MASK))
+ return;
+
+ /* Update HC event ring dequeue pointer */
+ temp_64 &= ERST_PTR_MASK;
+ temp_64 |= ((u64) deq & (u64) ~ERST_PTR_MASK);
+ }
+
+ /* Clear the event handler busy flag (RW1C) */
+ temp_64 |= ERST_EHB;
+ xhci_write_64(xhci, temp_64, &xhci->ir_set->erst_dequeue);
+}
+
+/*
* xHCI spec says we can get an interrupt, and if the HC has an error condition,
* we might get bad data out of the event ring. Section 4.10.2.7 has a list of
* indicators of an event TRB error, but we check the status *first* to be safe.
@@ -2751,9 +2791,9 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd)
union xhci_trb *event_ring_deq;
irqreturn_t ret = IRQ_NONE;
unsigned long flags;
- dma_addr_t deq;
u64 temp_64;
u32 status;
+ int event_loop = 0;
spin_lock_irqsave(&xhci->lock, flags);
/* Check if the xHC generated the interrupt, or the irq is shared */
@@ -2807,24 +2847,14 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd)
/* FIXME this should be a delayed service routine
* that clears the EHB.
*/
- while (xhci_handle_event(xhci) > 0) {}
-
- temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
- /* If necessary, update the HW's version of the event ring deq ptr. */
- if (event_ring_deq != xhci->event_ring->dequeue) {
- deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
- xhci->event_ring->dequeue);
- if (deq == 0)
- xhci_warn(xhci, "WARN something wrong with SW event "
- "ring dequeue ptr.\n");
- /* Update HC event ring dequeue pointer */
- temp_64 &= ERST_PTR_MASK;
- temp_64 |= ((u64) deq & (u64) ~ERST_PTR_MASK);
+ while (xhci_handle_event(xhci) > 0) {
+ if (event_loop++ < TRBS_PER_SEGMENT / 2)
+ continue;
+ xhci_update_erst_dequeue(xhci, event_ring_deq);
+ event_loop = 0;
}
- /* Clear the event handler busy flag (RW1C); event ring is empty. */
- temp_64 |= ERST_EHB;
- xhci_write_64(xhci, temp_64, &xhci->ir_set->erst_dequeue);
+ xhci_update_erst_dequeue(xhci, event_ring_deq);
ret = IRQ_HANDLED;
out:
diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c
index 2ff7c911fbd0..bf9065438320 100644
--- a/drivers/usb/host/xhci-tegra.c
+++ b/drivers/usb/host/xhci-tegra.c
@@ -42,19 +42,18 @@
#define XUSB_CFG_CSB_BASE_ADDR 0x800
/* FPCI mailbox registers */
-#define XUSB_CFG_ARU_MBOX_CMD 0x0e4
+/* XUSB_CFG_ARU_MBOX_CMD */
#define MBOX_DEST_FALC BIT(27)
#define MBOX_DEST_PME BIT(28)
#define MBOX_DEST_SMI BIT(29)
#define MBOX_DEST_XHCI BIT(30)
#define MBOX_INT_EN BIT(31)
-#define XUSB_CFG_ARU_MBOX_DATA_IN 0x0e8
+/* XUSB_CFG_ARU_MBOX_DATA_IN and XUSB_CFG_ARU_MBOX_DATA_OUT */
#define CMD_DATA_SHIFT 0
#define CMD_DATA_MASK 0xffffff
#define CMD_TYPE_SHIFT 24
#define CMD_TYPE_MASK 0xff
-#define XUSB_CFG_ARU_MBOX_DATA_OUT 0x0ec
-#define XUSB_CFG_ARU_MBOX_OWNER 0x0f0
+/* XUSB_CFG_ARU_MBOX_OWNER */
#define MBOX_OWNER_NONE 0
#define MBOX_OWNER_FW 1
#define MBOX_OWNER_SW 2
@@ -146,6 +145,13 @@ struct tegra_xusb_phy_type {
unsigned int num;
};
+struct tega_xusb_mbox_regs {
+ u16 cmd;
+ u16 data_in;
+ u16 data_out;
+ u16 owner;
+};
+
struct tegra_xusb_soc {
const char *firmware;
const char * const *supply_names;
@@ -160,6 +166,8 @@ struct tegra_xusb_soc {
} usb2, ulpi, hsic, usb3;
} ports;
+ struct tega_xusb_mbox_regs mbox;
+
bool scale_ss_clock;
bool has_ipfs;
};
@@ -395,15 +403,15 @@ static int tegra_xusb_mbox_send(struct tegra_xusb *tegra,
* ACK/NAK messages.
*/
if (!(msg->cmd == MBOX_CMD_ACK || msg->cmd == MBOX_CMD_NAK)) {
- value = fpci_readl(tegra, XUSB_CFG_ARU_MBOX_OWNER);
+ value = fpci_readl(tegra, tegra->soc->mbox.owner);
if (value != MBOX_OWNER_NONE) {
dev_err(tegra->dev, "mailbox is busy\n");
return -EBUSY;
}
- fpci_writel(tegra, MBOX_OWNER_SW, XUSB_CFG_ARU_MBOX_OWNER);
+ fpci_writel(tegra, MBOX_OWNER_SW, tegra->soc->mbox.owner);
- value = fpci_readl(tegra, XUSB_CFG_ARU_MBOX_OWNER);
+ value = fpci_readl(tegra, tegra->soc->mbox.owner);
if (value != MBOX_OWNER_SW) {
dev_err(tegra->dev, "failed to acquire mailbox\n");
return -EBUSY;
@@ -413,17 +421,17 @@ static int tegra_xusb_mbox_send(struct tegra_xusb *tegra,
}
value = tegra_xusb_mbox_pack(msg);
- fpci_writel(tegra, value, XUSB_CFG_ARU_MBOX_DATA_IN);
+ fpci_writel(tegra, value, tegra->soc->mbox.data_in);
- value = fpci_readl(tegra, XUSB_CFG_ARU_MBOX_CMD);
+ value = fpci_readl(tegra, tegra->soc->mbox.cmd);
value |= MBOX_INT_EN | MBOX_DEST_FALC;
- fpci_writel(tegra, value, XUSB_CFG_ARU_MBOX_CMD);
+ fpci_writel(tegra, value, tegra->soc->mbox.cmd);
if (wait_for_idle) {
unsigned long timeout = jiffies + msecs_to_jiffies(250);
while (time_before(jiffies, timeout)) {
- value = fpci_readl(tegra, XUSB_CFG_ARU_MBOX_OWNER);
+ value = fpci_readl(tegra, tegra->soc->mbox.owner);
if (value == MBOX_OWNER_NONE)
break;
@@ -431,7 +439,7 @@ static int tegra_xusb_mbox_send(struct tegra_xusb *tegra,
}
if (time_after(jiffies, timeout))
- value = fpci_readl(tegra, XUSB_CFG_ARU_MBOX_OWNER);
+ value = fpci_readl(tegra, tegra->soc->mbox.owner);
if (value != MBOX_OWNER_NONE)
return -ETIMEDOUT;
@@ -598,16 +606,16 @@ static irqreturn_t tegra_xusb_mbox_thread(int irq, void *data)
mutex_lock(&tegra->lock);
- value = fpci_readl(tegra, XUSB_CFG_ARU_MBOX_DATA_OUT);
+ value = fpci_readl(tegra, tegra->soc->mbox.data_out);
tegra_xusb_mbox_unpack(&msg, value);
- value = fpci_readl(tegra, XUSB_CFG_ARU_MBOX_CMD);
+ value = fpci_readl(tegra, tegra->soc->mbox.cmd);
value &= ~MBOX_DEST_SMI;
- fpci_writel(tegra, value, XUSB_CFG_ARU_MBOX_CMD);
+ fpci_writel(tegra, value, tegra->soc->mbox.cmd);
/* clear mailbox owner if no ACK/NAK is required */
if (!tegra_xusb_mbox_cmd_requires_ack(msg.cmd))
- fpci_writel(tegra, MBOX_OWNER_NONE, XUSB_CFG_ARU_MBOX_OWNER);
+ fpci_writel(tegra, MBOX_OWNER_NONE, tegra->soc->mbox.owner);
tegra_xusb_mbox_handle(tegra, &msg);
@@ -755,7 +763,6 @@ static int tegra_xusb_runtime_suspend(struct device *dev)
{
struct tegra_xusb *tegra = dev_get_drvdata(dev);
- tegra_xusb_phy_disable(tegra);
regulator_bulk_disable(tegra->soc->num_supplies, tegra->supplies);
tegra_xusb_clk_disable(tegra);
@@ -779,16 +786,8 @@ static int tegra_xusb_runtime_resume(struct device *dev)
goto disable_clk;
}
- err = tegra_xusb_phy_enable(tegra);
- if (err < 0) {
- dev_err(dev, "failed to enable PHYs: %d\n", err);
- goto disable_regulator;
- }
-
return 0;
-disable_regulator:
- regulator_bulk_disable(tegra->soc->num_supplies, tegra->supplies);
disable_clk:
tegra_xusb_clk_disable(tegra);
return err;
@@ -970,7 +969,7 @@ static int tegra_xusb_powerdomain_init(struct device *dev,
static int tegra_xusb_probe(struct platform_device *pdev)
{
struct tegra_xusb_mbox_msg msg;
- struct resource *res, *regs;
+ struct resource *regs;
struct tegra_xusb *tegra;
struct xhci_hcd *xhci;
unsigned int i, j, k;
@@ -992,14 +991,12 @@ static int tegra_xusb_probe(struct platform_device *pdev)
if (IS_ERR(tegra->regs))
return PTR_ERR(tegra->regs);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- tegra->fpci_base = devm_ioremap_resource(&pdev->dev, res);
+ tegra->fpci_base = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(tegra->fpci_base))
return PTR_ERR(tegra->fpci_base);
if (tegra->soc->has_ipfs) {
- res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
- tegra->ipfs_base = devm_ioremap_resource(&pdev->dev, res);
+ tegra->ipfs_base = devm_platform_ioremap_resource(pdev, 2);
if (IS_ERR(tegra->ipfs_base))
return PTR_ERR(tegra->ipfs_base);
}
@@ -1128,8 +1125,9 @@ static int tegra_xusb_probe(struct platform_device *pdev)
goto put_powerdomains;
}
- for (i = 0; i < tegra->soc->num_supplies; i++)
- tegra->supplies[i].supply = tegra->soc->supply_names[i];
+ regulator_bulk_set_supply_names(tegra->supplies,
+ tegra->soc->supply_names,
+ tegra->soc->num_supplies);
err = devm_regulator_bulk_get(&pdev->dev, tegra->soc->num_supplies,
tegra->supplies);
@@ -1181,6 +1179,12 @@ static int tegra_xusb_probe(struct platform_device *pdev)
*/
platform_set_drvdata(pdev, tegra);
+ err = tegra_xusb_phy_enable(tegra);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to enable PHYs: %d\n", err);
+ goto put_hcd;
+ }
+
pm_runtime_enable(&pdev->dev);
if (pm_runtime_enabled(&pdev->dev))
err = pm_runtime_get_sync(&pdev->dev);
@@ -1189,7 +1193,7 @@ static int tegra_xusb_probe(struct platform_device *pdev)
if (err < 0) {
dev_err(&pdev->dev, "failed to enable device: %d\n", err);
- goto disable_rpm;
+ goto disable_phy;
}
tegra_xusb_config(tegra, regs);
@@ -1275,9 +1279,11 @@ remove_usb2:
put_rpm:
if (!pm_runtime_status_suspended(&pdev->dev))
tegra_xusb_runtime_suspend(&pdev->dev);
-disable_rpm:
- pm_runtime_disable(&pdev->dev);
+put_hcd:
usb_put_hcd(tegra->hcd);
+disable_phy:
+ tegra_xusb_phy_disable(tegra);
+ pm_runtime_disable(&pdev->dev);
put_powerdomains:
if (!of_property_read_bool(pdev->dev.of_node, "power-domains")) {
tegra_powergate_power_off(TEGRA_POWERGATE_XUSBC);
@@ -1314,6 +1320,8 @@ static int tegra_xusb_remove(struct platform_device *pdev)
tegra_xusb_powerdomain_remove(&pdev->dev, tegra);
}
+ tegra_xusb_phy_disable(tegra);
+
tegra_xusb_padctl_put(tegra->padctl);
return 0;
@@ -1375,6 +1383,12 @@ static const struct tegra_xusb_soc tegra124_soc = {
},
.scale_ss_clock = true,
.has_ipfs = true,
+ .mbox = {
+ .cmd = 0xe4,
+ .data_in = 0xe8,
+ .data_out = 0xec,
+ .owner = 0xf0,
+ },
};
MODULE_FIRMWARE("nvidia/tegra124/xusb.bin");
@@ -1407,6 +1421,12 @@ static const struct tegra_xusb_soc tegra210_soc = {
},
.scale_ss_clock = false,
.has_ipfs = true,
+ .mbox = {
+ .cmd = 0xe4,
+ .data_in = 0xe8,
+ .data_out = 0xec,
+ .owner = 0xf0,
+ },
};
MODULE_FIRMWARE("nvidia/tegra210/xusb.bin");
@@ -1432,12 +1452,48 @@ static const struct tegra_xusb_soc tegra186_soc = {
},
.scale_ss_clock = false,
.has_ipfs = false,
+ .mbox = {
+ .cmd = 0xe4,
+ .data_in = 0xe8,
+ .data_out = 0xec,
+ .owner = 0xf0,
+ },
+};
+
+static const char * const tegra194_supply_names[] = {
+};
+
+static const struct tegra_xusb_phy_type tegra194_phy_types[] = {
+ { .name = "usb3", .num = 4, },
+ { .name = "usb2", .num = 4, },
+};
+
+static const struct tegra_xusb_soc tegra194_soc = {
+ .firmware = "nvidia/tegra194/xusb.bin",
+ .supply_names = tegra194_supply_names,
+ .num_supplies = ARRAY_SIZE(tegra194_supply_names),
+ .phy_types = tegra194_phy_types,
+ .num_types = ARRAY_SIZE(tegra194_phy_types),
+ .ports = {
+ .usb3 = { .offset = 0, .count = 4, },
+ .usb2 = { .offset = 4, .count = 4, },
+ },
+ .scale_ss_clock = false,
+ .has_ipfs = false,
+ .mbox = {
+ .cmd = 0x68,
+ .data_in = 0x6c,
+ .data_out = 0x70,
+ .owner = 0x74,
+ },
};
+MODULE_FIRMWARE("nvidia/tegra194/xusb.bin");
static const struct of_device_id tegra_xusb_of_match[] = {
{ .compatible = "nvidia,tegra124-xusb", .data = &tegra124_soc },
{ .compatible = "nvidia,tegra210-xusb", .data = &tegra210_soc },
{ .compatible = "nvidia,tegra186-xusb", .data = &tegra186_soc },
+ { .compatible = "nvidia,tegra194-xusb", .data = &tegra194_soc },
{ },
};
MODULE_DEVICE_TABLE(of, tegra_xusb_of_match);
diff --git a/drivers/usb/host/xhci-trace.h b/drivers/usb/host/xhci-trace.h
index 052a269d86f2..56eb867803a6 100644
--- a/drivers/usb/host/xhci-trace.h
+++ b/drivers/usb/host/xhci-trace.h
@@ -560,6 +560,32 @@ DEFINE_EVENT(xhci_log_portsc, xhci_hub_status_data,
TP_ARGS(portnum, portsc)
);
+DECLARE_EVENT_CLASS(xhci_log_doorbell,
+ TP_PROTO(u32 slot, u32 doorbell),
+ TP_ARGS(slot, doorbell),
+ TP_STRUCT__entry(
+ __field(u32, slot)
+ __field(u32, doorbell)
+ ),
+ TP_fast_assign(
+ __entry->slot = slot;
+ __entry->doorbell = doorbell;
+ ),
+ TP_printk("Ring doorbell for %s",
+ xhci_decode_doorbell(__entry->slot, __entry->doorbell)
+ )
+);
+
+DEFINE_EVENT(xhci_log_doorbell, xhci_ring_ep_doorbell,
+ TP_PROTO(u32 slot, u32 doorbell),
+ TP_ARGS(slot, doorbell)
+);
+
+DEFINE_EVENT(xhci_log_doorbell, xhci_ring_host_doorbell,
+ TP_PROTO(u32 slot, u32 doorbell),
+ TP_ARGS(slot, doorbell)
+);
+
DECLARE_EVENT_CLASS(xhci_dbc_log_request,
TP_PROTO(struct dbc_request *req),
TP_ARGS(req),
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 6c17e3fe181a..6721d059f58a 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -5301,7 +5301,8 @@ static const struct hc_driver xhci_hc_driver = {
* generic hardware linkage
*/
.irq = xhci_irq,
- .flags = HCD_MEMORY | HCD_DMA | HCD_USB3 | HCD_SHARED,
+ .flags = HCD_MEMORY | HCD_DMA | HCD_USB3 | HCD_SHARED |
+ HCD_BH,
/*
* basic lifecycle operations
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index f9f88626a57a..dc6f62a4b197 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -2580,6 +2580,35 @@ static inline const char *xhci_decode_portsc(u32 portsc)
return str;
}
+static inline const char *xhci_decode_doorbell(u32 slot, u32 doorbell)
+{
+ static char str[256];
+ u8 ep;
+ u16 stream;
+ int ret;
+
+ ep = (doorbell & 0xff);
+ stream = doorbell >> 16;
+
+ if (slot == 0) {
+ sprintf(str, "Command Ring %d", doorbell);
+ return str;
+ }
+ ret = sprintf(str, "Slot %d ", slot);
+ if (ep > 0 && ep < 32)
+ ret = sprintf(str + ret, "ep%d%s",
+ ep / 2,
+ ep % 2 ? "in" : "out");
+ else if (ep == 0 || ep < 248)
+ ret = sprintf(str + ret, "Reserved %d", ep);
+ else
+ ret = sprintf(str + ret, "Vendor Defined %d", ep);
+ if (stream)
+ ret = sprintf(str + ret, " Stream %d", stream);
+
+ return str;
+}
+
static inline const char *xhci_ep_state_string(u8 state)
{
switch (state) {
diff --git a/drivers/usb/image/microtek.c b/drivers/usb/image/microtek.c
index 7a6b122c833f..360416680e82 100644
--- a/drivers/usb/image/microtek.c
+++ b/drivers/usb/image/microtek.c
@@ -566,7 +566,6 @@ static int
mts_scsi_queuecommand_lck(struct scsi_cmnd *srb, mts_scsi_cmnd_callback callback)
{
struct mts_desc* desc = (struct mts_desc*)(srb->device->host->hostdata[0]);
- int err = 0;
int res;
MTS_DEBUG_GOT_HERE();
@@ -613,7 +612,7 @@ mts_scsi_queuecommand_lck(struct scsi_cmnd *srb, mts_scsi_cmnd_callback callback
}
out:
- return err;
+ return 0;
}
static DEF_SCSI_QCMD(mts_scsi_queuecommand)
diff --git a/drivers/usb/isp1760/isp1760-hcd.c b/drivers/usb/isp1760/isp1760-hcd.c
index 320fc4739835..579a21bd70ad 100644
--- a/drivers/usb/isp1760/isp1760-hcd.c
+++ b/drivers/usb/isp1760/isp1760-hcd.c
@@ -1032,8 +1032,6 @@ static int check_atl_transfer(struct usb_hcd *hcd, struct ptd *ptd,
urb->status = -EOVERFLOW;
else if (FROM_DW3_CERR(ptd->dw3))
urb->status = -EPIPE; /* Stall */
- else if (ptd->dw3 & DW3_ERROR_BIT)
- urb->status = -EPROTO; /* XactErr */
else
urb->status = -EPROTO; /* Unknown */
/*
diff --git a/drivers/usb/misc/Kconfig b/drivers/usb/misc/Kconfig
index 9bce583aada3..834b2494da73 100644
--- a/drivers/usb/misc/Kconfig
+++ b/drivers/usb/misc/Kconfig
@@ -181,8 +181,8 @@ config USB_TEST
including sample test device firmware and "how to use it".
config USB_EHSET_TEST_FIXTURE
- tristate "USB EHSET Test Fixture driver"
- help
+ tristate "USB EHSET Test Fixture driver"
+ help
Say Y here if you want to support the special test fixture device
used for the USB-IF Embedded Host High-Speed Electrical Test procedure.
@@ -233,17 +233,17 @@ config USB_HUB_USB251XB
Say Y or M here if you need to configure such a device via SMBus.
config USB_HSIC_USB3503
- tristate "USB3503 HSIC to USB20 Driver"
- depends on I2C
- select REGMAP_I2C
- help
- This option enables support for SMSC USB3503 HSIC to USB 2.0 Driver.
+ tristate "USB3503 HSIC to USB20 Driver"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ This option enables support for SMSC USB3503 HSIC to USB 2.0 Driver.
config USB_HSIC_USB4604
- tristate "USB4604 HSIC to USB20 Driver"
- depends on I2C
- help
- This option enables support for SMSC USB4604 HSIC to USB 2.0 Driver.
+ tristate "USB4604 HSIC to USB20 Driver"
+ depends on I2C
+ help
+ This option enables support for SMSC USB4604 HSIC to USB 2.0 Driver.
config USB_LINK_LAYER_TEST
tristate "USB Link Layer Test driver"
diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
index ac92725458b5..ba1eaabc7796 100644
--- a/drivers/usb/misc/appledisplay.c
+++ b/drivers/usb/misc/appledisplay.c
@@ -164,7 +164,12 @@ static int appledisplay_bl_get_brightness(struct backlight_device *bd)
0,
pdata->msgdata, 2,
ACD_USB_TIMEOUT);
- brightness = pdata->msgdata[1];
+ if (retval < 2) {
+ if (retval >= 0)
+ retval = -EMSGSIZE;
+ } else {
+ brightness = pdata->msgdata[1];
+ }
mutex_unlock(&pdata->sysfslock);
if (retval < 0)
@@ -299,6 +304,7 @@ error:
if (pdata) {
if (pdata->urb) {
usb_kill_urb(pdata->urb);
+ cancel_delayed_work_sync(&pdata->work);
if (pdata->urbdata)
usb_free_coherent(pdata->udev, ACD_URB_BUFFER_LEN,
pdata->urbdata, pdata->urb->transfer_dma);
diff --git a/drivers/usb/misc/chaoskey.c b/drivers/usb/misc/chaoskey.c
index 34e6cd6f40d3..87067c3d6109 100644
--- a/drivers/usb/misc/chaoskey.c
+++ b/drivers/usb/misc/chaoskey.c
@@ -384,13 +384,17 @@ static int _chaoskey_fill(struct chaoskey *dev)
!dev->reading,
(started ? NAK_TIMEOUT : ALEA_FIRST_TIMEOUT) );
- if (result < 0)
+ if (result < 0) {
+ usb_kill_urb(dev->urb);
goto out;
+ }
- if (result == 0)
+ if (result == 0) {
result = -ETIMEDOUT;
- else
+ usb_kill_urb(dev->urb);
+ } else {
result = dev->valid;
+ }
out:
/* Let the device go back to sleep eventually */
usb_autopm_put_interface(dev->interface);
@@ -526,7 +530,21 @@ static int chaoskey_suspend(struct usb_interface *interface,
static int chaoskey_resume(struct usb_interface *interface)
{
+ struct chaoskey *dev;
+ struct usb_device *udev = interface_to_usbdev(interface);
+
usb_dbg(interface, "resume");
+ dev = usb_get_intfdata(interface);
+
+ /*
+ * We may have lost power.
+ * In that case the device that needs a long time
+ * for the first requests needs an extended timeout
+ * again
+ */
+ if (le16_to_cpu(udev->descriptor.idVendor) == ALEA_VENDOR_ID)
+ dev->reads_started = false;
+
return 0;
}
#else
diff --git a/drivers/usb/misc/ftdi-elan.c b/drivers/usb/misc/ftdi-elan.c
index cdee3af33ad7..8a3d9c0c8d8b 100644
--- a/drivers/usb/misc/ftdi-elan.c
+++ b/drivers/usb/misc/ftdi-elan.c
@@ -333,7 +333,8 @@ static void ftdi_elan_abandon_completions(struct usb_ftdi *ftdi)
*respond->result = -ESHUTDOWN;
*respond->value = 0;
complete(&respond->wait_completion);
- } mutex_unlock(&ftdi->u132_lock);
+ }
+ mutex_unlock(&ftdi->u132_lock);
}
static void ftdi_elan_abandon_targets(struct usb_ftdi *ftdi)
@@ -763,7 +764,8 @@ static int ftdi_elan_total_command_size(struct usb_ftdi *ftdi, int command_size)
struct u132_command *command = &ftdi->command[COMMAND_MASK &
i++];
total_size += 5 + command->follows;
- } return total_size;
+ }
+ return total_size;
}
static int ftdi_elan_command_engine(struct usb_ftdi *ftdi)
diff --git a/drivers/usb/misc/idmouse.c b/drivers/usb/misc/idmouse.c
index 20b0f91a5d9b..4afb5ddfd361 100644
--- a/drivers/usb/misc/idmouse.c
+++ b/drivers/usb/misc/idmouse.c
@@ -56,11 +56,10 @@ static const struct usb_device_id idmouse_table[] = {
#define FTIP_SCROLL 0x24
#define ftip_command(dev, command, value, index) \
- usb_control_msg (dev->udev, usb_sndctrlpipe (dev->udev, 0), command, \
+ usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), command, \
USB_TYPE_VENDOR | USB_RECIP_ENDPOINT | USB_DIR_OUT, value, index, NULL, 0, 1000)
MODULE_DEVICE_TABLE(usb, idmouse_table);
-static DEFINE_MUTEX(open_disc_mutex);
/* structure to hold all of our device specific stuff */
struct usb_idmouse {
@@ -158,8 +157,8 @@ static int idmouse_create_image(struct usb_idmouse *dev)
/* loop over a blocking bulk read to get data from the device */
while (bytes_read < IMGSIZE) {
- result = usb_bulk_msg (dev->udev,
- usb_rcvbulkpipe (dev->udev, dev->bulk_in_endpointAddr),
+ result = usb_bulk_msg(dev->udev,
+ usb_rcvbulkpipe(dev->udev, dev->bulk_in_endpointAddr),
dev->bulk_in_buffer + bytes_read,
dev->bulk_in_size, &bulk_read, 5000);
if (result < 0) {
@@ -223,21 +222,17 @@ static int idmouse_open(struct inode *inode, struct file *file)
int result;
/* get the interface from minor number and driver information */
- interface = usb_find_interface (&idmouse_driver, iminor (inode));
+ interface = usb_find_interface(&idmouse_driver, iminor(inode));
if (!interface)
return -ENODEV;
- mutex_lock(&open_disc_mutex);
/* get the device information block from the interface */
dev = usb_get_intfdata(interface);
- if (!dev) {
- mutex_unlock(&open_disc_mutex);
+ if (!dev)
return -ENODEV;
- }
/* lock this device */
mutex_lock(&dev->lock);
- mutex_unlock(&open_disc_mutex);
/* check if already open */
if (dev->open) {
@@ -251,7 +246,7 @@ static int idmouse_open(struct inode *inode, struct file *file)
result = usb_autopm_get_interface(interface);
if (result)
goto error;
- result = idmouse_create_image (dev);
+ result = idmouse_create_image(dev);
usb_autopm_put_interface(interface);
if (result)
goto error;
@@ -280,27 +275,17 @@ static int idmouse_release(struct inode *inode, struct file *file)
if (dev == NULL)
return -ENODEV;
- mutex_lock(&open_disc_mutex);
/* lock our device */
mutex_lock(&dev->lock);
- /* are we really open? */
- if (dev->open <= 0) {
- mutex_unlock(&dev->lock);
- mutex_unlock(&open_disc_mutex);
- return -ENODEV;
- }
-
--dev->open;
if (!dev->present) {
/* the device was unplugged before the file was released */
mutex_unlock(&dev->lock);
- mutex_unlock(&open_disc_mutex);
idmouse_delete(dev);
} else {
mutex_unlock(&dev->lock);
- mutex_unlock(&open_disc_mutex);
}
return 0;
}
@@ -379,7 +364,6 @@ static int idmouse_probe(struct usb_interface *interface,
if (result) {
/* something prevented us from registering this device */
dev_err(&interface->dev, "Unable to allocate minor number.\n");
- usb_set_intfdata(interface, NULL);
idmouse_delete(dev);
return result;
}
@@ -392,19 +376,13 @@ static int idmouse_probe(struct usb_interface *interface,
static void idmouse_disconnect(struct usb_interface *interface)
{
- struct usb_idmouse *dev;
-
- /* get device structure */
- dev = usb_get_intfdata(interface);
+ struct usb_idmouse *dev = usb_get_intfdata(interface);
/* give back our minor */
usb_deregister_dev(interface, &idmouse_class);
- mutex_lock(&open_disc_mutex);
- usb_set_intfdata(interface, NULL);
/* lock the device */
mutex_lock(&dev->lock);
- mutex_unlock(&open_disc_mutex);
/* prevent device read, write and ioctl */
dev->present = 0;
diff --git a/drivers/usb/misc/legousbtower.c b/drivers/usb/misc/legousbtower.c
index 23061f1526b4..ab4b98b04115 100644
--- a/drivers/usb/misc/legousbtower.c
+++ b/drivers/usb/misc/legousbtower.c
@@ -157,19 +157,19 @@ MODULE_PARM_DESC(interrupt_out_interval, "Interrupt out interval in ms");
#define LEGO_USB_TOWER_REQUEST_GET_VERSION 0xFD
struct tower_reset_reply {
- __le16 size; /* little-endian */
+ __le16 size;
__u8 err_code;
__u8 spare;
-} __attribute__ ((packed));
+};
struct tower_get_version_reply {
- __le16 size; /* little-endian */
+ __le16 size;
__u8 err_code;
__u8 spare;
__u8 major;
__u8 minor;
- __le16 build_no; /* little-endian */
-} __attribute__ ((packed));
+ __le16 build_no;
+};
/* table of devices that work with this driver */
@@ -178,7 +178,7 @@ static const struct usb_device_id tower_table[] = {
{ } /* Terminating entry */
};
-MODULE_DEVICE_TABLE (usb, tower_table);
+MODULE_DEVICE_TABLE(usb, tower_table);
#define LEGO_USB_TOWER_MINOR_BASE 160
@@ -186,13 +186,13 @@ MODULE_DEVICE_TABLE (usb, tower_table);
/* Structure to hold all of our device specific stuff */
struct lego_usb_tower {
struct mutex lock; /* locks this structure */
- struct usb_device* udev; /* save off the usb device pointer */
+ struct usb_device *udev; /* save off the usb device pointer */
unsigned char minor; /* the starting minor number for this device */
int open_count; /* number of times this port has been opened */
unsigned long disconnected:1;
- char* read_buffer;
+ char *read_buffer;
size_t read_buffer_length; /* this much came in */
size_t read_packet_length; /* this much will be returned on read */
spinlock_t read_buffer_lock;
@@ -202,16 +202,15 @@ struct lego_usb_tower {
wait_queue_head_t read_wait;
wait_queue_head_t write_wait;
- char* interrupt_in_buffer;
- struct usb_endpoint_descriptor* interrupt_in_endpoint;
- struct urb* interrupt_in_urb;
+ char *interrupt_in_buffer;
+ struct usb_endpoint_descriptor *interrupt_in_endpoint;
+ struct urb *interrupt_in_urb;
int interrupt_in_interval;
- int interrupt_in_running;
int interrupt_in_done;
- char* interrupt_out_buffer;
- struct usb_endpoint_descriptor* interrupt_out_endpoint;
- struct urb* interrupt_out_urb;
+ char *interrupt_out_buffer;
+ struct usb_endpoint_descriptor *interrupt_out_endpoint;
+ struct urb *interrupt_out_urb;
int interrupt_out_interval;
int interrupt_out_busy;
@@ -219,21 +218,20 @@ struct lego_usb_tower {
/* local function prototypes */
-static ssize_t tower_read (struct file *file, char __user *buffer, size_t count, loff_t *ppos);
-static ssize_t tower_write (struct file *file, const char __user *buffer, size_t count, loff_t *ppos);
-static inline void tower_delete (struct lego_usb_tower *dev);
-static int tower_open (struct inode *inode, struct file *file);
-static int tower_release (struct inode *inode, struct file *file);
-static __poll_t tower_poll (struct file *file, poll_table *wait);
-static loff_t tower_llseek (struct file *file, loff_t off, int whence);
+static ssize_t tower_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos);
+static ssize_t tower_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos);
+static inline void tower_delete(struct lego_usb_tower *dev);
+static int tower_open(struct inode *inode, struct file *file);
+static int tower_release(struct inode *inode, struct file *file);
+static __poll_t tower_poll(struct file *file, poll_table *wait);
+static loff_t tower_llseek(struct file *file, loff_t off, int whence);
-static void tower_abort_transfers (struct lego_usb_tower *dev);
-static void tower_check_for_read_packet (struct lego_usb_tower *dev);
-static void tower_interrupt_in_callback (struct urb *urb);
-static void tower_interrupt_out_callback (struct urb *urb);
+static void tower_check_for_read_packet(struct lego_usb_tower *dev);
+static void tower_interrupt_in_callback(struct urb *urb);
+static void tower_interrupt_out_callback(struct urb *urb);
-static int tower_probe (struct usb_interface *interface, const struct usb_device_id *id);
-static void tower_disconnect (struct usb_interface *interface);
+static int tower_probe(struct usb_interface *interface, const struct usb_device_id *id);
+static void tower_disconnect(struct usb_interface *interface);
/* file operations needed when we register this driver */
@@ -288,23 +286,23 @@ static inline void lego_usb_tower_debug_data(struct device *dev,
/**
* tower_delete
*/
-static inline void tower_delete (struct lego_usb_tower *dev)
+static inline void tower_delete(struct lego_usb_tower *dev)
{
/* free data structures */
usb_free_urb(dev->interrupt_in_urb);
usb_free_urb(dev->interrupt_out_urb);
- kfree (dev->read_buffer);
- kfree (dev->interrupt_in_buffer);
- kfree (dev->interrupt_out_buffer);
+ kfree(dev->read_buffer);
+ kfree(dev->interrupt_in_buffer);
+ kfree(dev->interrupt_out_buffer);
usb_put_dev(dev->udev);
- kfree (dev);
+ kfree(dev);
}
/**
* tower_open
*/
-static int tower_open (struct inode *inode, struct file *file)
+static int tower_open(struct inode *inode, struct file *file)
{
struct lego_usb_tower *dev = NULL;
int subminor;
@@ -314,7 +312,6 @@ static int tower_open (struct inode *inode, struct file *file)
int result;
reset_reply = kmalloc(sizeof(*reset_reply), GFP_KERNEL);
-
if (!reset_reply) {
retval = -ENOMEM;
goto exit;
@@ -323,8 +320,7 @@ static int tower_open (struct inode *inode, struct file *file)
nonseekable_open(inode, file);
subminor = iminor(inode);
- interface = usb_find_interface (&tower_driver, subminor);
-
+ interface = usb_find_interface(&tower_driver, subminor);
if (!interface) {
pr_err("error, can't find device for minor %d\n", subminor);
retval = -ENODEV;
@@ -351,15 +347,15 @@ static int tower_open (struct inode *inode, struct file *file)
}
/* reset the tower */
- result = usb_control_msg (dev->udev,
- usb_rcvctrlpipe(dev->udev, 0),
- LEGO_USB_TOWER_REQUEST_RESET,
- USB_TYPE_VENDOR | USB_DIR_IN | USB_RECIP_DEVICE,
- 0,
- 0,
- reset_reply,
- sizeof(*reset_reply),
- 1000);
+ result = usb_control_msg(dev->udev,
+ usb_rcvctrlpipe(dev->udev, 0),
+ LEGO_USB_TOWER_REQUEST_RESET,
+ USB_TYPE_VENDOR | USB_DIR_IN | USB_RECIP_DEVICE,
+ 0,
+ 0,
+ reset_reply,
+ sizeof(*reset_reply),
+ 1000);
if (result < 0) {
dev_err(&dev->udev->dev,
"LEGO USB Tower reset control request failed\n");
@@ -370,24 +366,22 @@ static int tower_open (struct inode *inode, struct file *file)
/* initialize in direction */
dev->read_buffer_length = 0;
dev->read_packet_length = 0;
- usb_fill_int_urb (dev->interrupt_in_urb,
- dev->udev,
- usb_rcvintpipe(dev->udev, dev->interrupt_in_endpoint->bEndpointAddress),
- dev->interrupt_in_buffer,
- usb_endpoint_maxp(dev->interrupt_in_endpoint),
- tower_interrupt_in_callback,
- dev,
- dev->interrupt_in_interval);
-
- dev->interrupt_in_running = 1;
+ usb_fill_int_urb(dev->interrupt_in_urb,
+ dev->udev,
+ usb_rcvintpipe(dev->udev, dev->interrupt_in_endpoint->bEndpointAddress),
+ dev->interrupt_in_buffer,
+ usb_endpoint_maxp(dev->interrupt_in_endpoint),
+ tower_interrupt_in_callback,
+ dev,
+ dev->interrupt_in_interval);
+
dev->interrupt_in_done = 0;
mb();
- retval = usb_submit_urb (dev->interrupt_in_urb, GFP_KERNEL);
+ retval = usb_submit_urb(dev->interrupt_in_urb, GFP_KERNEL);
if (retval) {
dev_err(&dev->udev->dev,
"Couldn't submit interrupt_in_urb %d\n", retval);
- dev->interrupt_in_running = 0;
goto unlock_exit;
}
@@ -407,13 +401,12 @@ exit:
/**
* tower_release
*/
-static int tower_release (struct inode *inode, struct file *file)
+static int tower_release(struct inode *inode, struct file *file)
{
struct lego_usb_tower *dev;
int retval = 0;
dev = file->private_data;
-
if (dev == NULL) {
retval = -ENODEV;
goto exit;
@@ -421,56 +414,32 @@ static int tower_release (struct inode *inode, struct file *file)
mutex_lock(&dev->lock);
- if (dev->open_count != 1) {
- dev_dbg(&dev->udev->dev, "%s: device not opened exactly once\n",
- __func__);
- retval = -ENODEV;
- goto unlock_exit;
- }
-
if (dev->disconnected) {
/* the device was unplugged before the file was released */
/* unlock here as tower_delete frees dev */
mutex_unlock(&dev->lock);
- tower_delete (dev);
+ tower_delete(dev);
goto exit;
}
/* wait until write transfer is finished */
if (dev->interrupt_out_busy) {
- wait_event_interruptible_timeout (dev->write_wait, !dev->interrupt_out_busy, 2 * HZ);
+ wait_event_interruptible_timeout(dev->write_wait, !dev->interrupt_out_busy,
+ 2 * HZ);
}
- tower_abort_transfers (dev);
+
+ /* shutdown transfers */
+ usb_kill_urb(dev->interrupt_in_urb);
+ usb_kill_urb(dev->interrupt_out_urb);
+
dev->open_count = 0;
-unlock_exit:
mutex_unlock(&dev->lock);
exit:
return retval;
}
-
-/**
- * tower_abort_transfers
- * aborts transfers and frees associated data structures
- */
-static void tower_abort_transfers (struct lego_usb_tower *dev)
-{
- if (dev == NULL)
- return;
-
- /* shutdown transfer */
- if (dev->interrupt_in_running) {
- dev->interrupt_in_running = 0;
- mb();
- usb_kill_urb(dev->interrupt_in_urb);
- }
- if (dev->interrupt_out_busy)
- usb_kill_urb(dev->interrupt_out_urb);
-}
-
-
/**
* tower_check_for_read_packet
*
@@ -479,23 +448,23 @@ static void tower_abort_transfers (struct lego_usb_tower *dev)
* until it has been there unchanged for at least
* dev->packet_timeout_jiffies, or until the buffer is full.
*/
-static void tower_check_for_read_packet (struct lego_usb_tower *dev)
+static void tower_check_for_read_packet(struct lego_usb_tower *dev)
{
- spin_lock_irq (&dev->read_buffer_lock);
+ spin_lock_irq(&dev->read_buffer_lock);
if (!packet_timeout
|| time_after(jiffies, dev->read_last_arrival + dev->packet_timeout_jiffies)
|| dev->read_buffer_length == read_buffer_size) {
dev->read_packet_length = dev->read_buffer_length;
}
dev->interrupt_in_done = 0;
- spin_unlock_irq (&dev->read_buffer_lock);
+ spin_unlock_irq(&dev->read_buffer_lock);
}
/**
* tower_poll
*/
-static __poll_t tower_poll (struct file *file, poll_table *wait)
+static __poll_t tower_poll(struct file *file, poll_table *wait)
{
struct lego_usb_tower *dev;
__poll_t mask = 0;
@@ -509,12 +478,10 @@ static __poll_t tower_poll (struct file *file, poll_table *wait)
poll_wait(file, &dev->write_wait, wait);
tower_check_for_read_packet(dev);
- if (dev->read_packet_length > 0) {
+ if (dev->read_packet_length > 0)
mask |= EPOLLIN | EPOLLRDNORM;
- }
- if (!dev->interrupt_out_busy) {
+ if (!dev->interrupt_out_busy)
mask |= EPOLLOUT | EPOLLWRNORM;
- }
return mask;
}
@@ -523,7 +490,7 @@ static __poll_t tower_poll (struct file *file, poll_table *wait)
/**
* tower_llseek
*/
-static loff_t tower_llseek (struct file *file, loff_t off, int whence)
+static loff_t tower_llseek(struct file *file, loff_t off, int whence)
{
return -ESPIPE; /* unseekable */
}
@@ -532,7 +499,7 @@ static loff_t tower_llseek (struct file *file, loff_t off, int whence)
/**
* tower_read
*/
-static ssize_t tower_read (struct file *file, char __user *buffer, size_t count, loff_t *ppos)
+static ssize_t tower_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos)
{
struct lego_usb_tower *dev;
size_t bytes_to_read;
@@ -551,7 +518,6 @@ static ssize_t tower_read (struct file *file, char __user *buffer, size_t count,
/* verify that the device wasn't unplugged */
if (dev->disconnected) {
retval = -ENODEV;
- pr_err("No device or device unplugged %d\n", retval);
goto unlock_exit;
}
@@ -561,21 +527,19 @@ static ssize_t tower_read (struct file *file, char __user *buffer, size_t count,
goto unlock_exit;
}
- if (read_timeout) {
+ if (read_timeout)
timeout = jiffies + msecs_to_jiffies(read_timeout);
- }
/* wait for data */
- tower_check_for_read_packet (dev);
+ tower_check_for_read_packet(dev);
while (dev->read_packet_length == 0) {
if (file->f_flags & O_NONBLOCK) {
retval = -EAGAIN;
goto unlock_exit;
}
retval = wait_event_interruptible_timeout(dev->read_wait, dev->interrupt_in_done, dev->packet_timeout_jiffies);
- if (retval < 0) {
+ if (retval < 0)
goto unlock_exit;
- }
/* reset read timeout during read or write activity */
if (read_timeout
@@ -583,28 +547,27 @@ static ssize_t tower_read (struct file *file, char __user *buffer, size_t count,
timeout = jiffies + msecs_to_jiffies(read_timeout);
}
/* check for read timeout */
- if (read_timeout && time_after (jiffies, timeout)) {
+ if (read_timeout && time_after(jiffies, timeout)) {
retval = -ETIMEDOUT;
goto unlock_exit;
}
- tower_check_for_read_packet (dev);
+ tower_check_for_read_packet(dev);
}
/* copy the data from read_buffer into userspace */
bytes_to_read = min(count, dev->read_packet_length);
- if (copy_to_user (buffer, dev->read_buffer, bytes_to_read)) {
+ if (copy_to_user(buffer, dev->read_buffer, bytes_to_read)) {
retval = -EFAULT;
goto unlock_exit;
}
- spin_lock_irq (&dev->read_buffer_lock);
+ spin_lock_irq(&dev->read_buffer_lock);
dev->read_buffer_length -= bytes_to_read;
dev->read_packet_length -= bytes_to_read;
- for (i=0; i<dev->read_buffer_length; i++) {
+ for (i = 0; i < dev->read_buffer_length; i++)
dev->read_buffer[i] = dev->read_buffer[i+bytes_to_read];
- }
- spin_unlock_irq (&dev->read_buffer_lock);
+ spin_unlock_irq(&dev->read_buffer_lock);
retval = bytes_to_read;
@@ -620,7 +583,7 @@ exit:
/**
* tower_write
*/
-static ssize_t tower_write (struct file *file, const char __user *buffer, size_t count, loff_t *ppos)
+static ssize_t tower_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos)
{
struct lego_usb_tower *dev;
size_t bytes_to_write;
@@ -637,7 +600,6 @@ static ssize_t tower_write (struct file *file, const char __user *buffer, size_t
/* verify that the device wasn't unplugged */
if (dev->disconnected) {
retval = -ENODEV;
- pr_err("No device or device unplugged %d\n", retval);
goto unlock_exit;
}
@@ -653,10 +615,10 @@ static ssize_t tower_write (struct file *file, const char __user *buffer, size_t
retval = -EAGAIN;
goto unlock_exit;
}
- retval = wait_event_interruptible (dev->write_wait, !dev->interrupt_out_busy);
- if (retval) {
+ retval = wait_event_interruptible(dev->write_wait,
+ !dev->interrupt_out_busy);
+ if (retval)
goto unlock_exit;
- }
}
/* write the data into interrupt_out_buffer from userspace */
@@ -664,7 +626,7 @@ static ssize_t tower_write (struct file *file, const char __user *buffer, size_t
dev_dbg(&dev->udev->dev, "%s: count = %zd, bytes_to_write = %zd\n",
__func__, count, bytes_to_write);
- if (copy_from_user (dev->interrupt_out_buffer, buffer, bytes_to_write)) {
+ if (copy_from_user(dev->interrupt_out_buffer, buffer, bytes_to_write)) {
retval = -EFAULT;
goto unlock_exit;
}
@@ -682,7 +644,7 @@ static ssize_t tower_write (struct file *file, const char __user *buffer, size_t
dev->interrupt_out_busy = 1;
wmb();
- retval = usb_submit_urb (dev->interrupt_out_urb, GFP_KERNEL);
+ retval = usb_submit_urb(dev->interrupt_out_urb, GFP_KERNEL);
if (retval) {
dev->interrupt_out_busy = 0;
dev_err(&dev->udev->dev,
@@ -703,7 +665,7 @@ exit:
/**
* tower_interrupt_in_callback
*/
-static void tower_interrupt_in_callback (struct urb *urb)
+static void tower_interrupt_in_callback(struct urb *urb)
{
struct lego_usb_tower *dev = urb->context;
int status = urb->status;
@@ -729,9 +691,9 @@ static void tower_interrupt_in_callback (struct urb *urb)
if (urb->actual_length > 0) {
spin_lock_irqsave(&dev->read_buffer_lock, flags);
if (dev->read_buffer_length + urb->actual_length < read_buffer_size) {
- memcpy (dev->read_buffer + dev->read_buffer_length,
- dev->interrupt_in_buffer,
- urb->actual_length);
+ memcpy(dev->read_buffer + dev->read_buffer_length,
+ dev->interrupt_in_buffer,
+ urb->actual_length);
dev->read_buffer_length += urb->actual_length;
dev->read_last_arrival = jiffies;
dev_dbg(&dev->udev->dev, "%s: received %d bytes\n",
@@ -744,25 +706,21 @@ static void tower_interrupt_in_callback (struct urb *urb)
}
resubmit:
- /* resubmit if we're still running */
- if (dev->interrupt_in_running) {
- retval = usb_submit_urb (dev->interrupt_in_urb, GFP_ATOMIC);
- if (retval)
- dev_err(&dev->udev->dev,
- "%s: usb_submit_urb failed (%d)\n",
- __func__, retval);
+ retval = usb_submit_urb(dev->interrupt_in_urb, GFP_ATOMIC);
+ if (retval) {
+ dev_err(&dev->udev->dev, "%s: usb_submit_urb failed (%d)\n",
+ __func__, retval);
}
-
exit:
dev->interrupt_in_done = 1;
- wake_up_interruptible (&dev->read_wait);
+ wake_up_interruptible(&dev->read_wait);
}
/**
* tower_interrupt_out_callback
*/
-static void tower_interrupt_out_callback (struct urb *urb)
+static void tower_interrupt_out_callback(struct urb *urb)
{
struct lego_usb_tower *dev = urb->context;
int status = urb->status;
@@ -790,48 +748,27 @@ static void tower_interrupt_out_callback (struct urb *urb)
* Called by the usb core when a new device is connected that it thinks
* this driver might be interested in.
*/
-static int tower_probe (struct usb_interface *interface, const struct usb_device_id *id)
+static int tower_probe(struct usb_interface *interface, const struct usb_device_id *id)
{
struct device *idev = &interface->dev;
struct usb_device *udev = interface_to_usbdev(interface);
- struct lego_usb_tower *dev = NULL;
+ struct lego_usb_tower *dev;
struct tower_get_version_reply *get_version_reply = NULL;
int retval = -ENOMEM;
int result;
/* allocate memory for our device state and initialize it */
-
- dev = kmalloc (sizeof(struct lego_usb_tower), GFP_KERNEL);
-
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
goto exit;
mutex_init(&dev->lock);
-
dev->udev = usb_get_dev(udev);
- dev->open_count = 0;
- dev->disconnected = 0;
-
- dev->read_buffer = NULL;
- dev->read_buffer_length = 0;
- dev->read_packet_length = 0;
- spin_lock_init (&dev->read_buffer_lock);
+ spin_lock_init(&dev->read_buffer_lock);
dev->packet_timeout_jiffies = msecs_to_jiffies(packet_timeout);
dev->read_last_arrival = jiffies;
-
- init_waitqueue_head (&dev->read_wait);
- init_waitqueue_head (&dev->write_wait);
-
- dev->interrupt_in_buffer = NULL;
- dev->interrupt_in_endpoint = NULL;
- dev->interrupt_in_urb = NULL;
- dev->interrupt_in_running = 0;
- dev->interrupt_in_done = 0;
-
- dev->interrupt_out_buffer = NULL;
- dev->interrupt_out_endpoint = NULL;
- dev->interrupt_out_urb = NULL;
- dev->interrupt_out_busy = 0;
+ init_waitqueue_head(&dev->read_wait);
+ init_waitqueue_head(&dev->write_wait);
result = usb_find_common_endpoints_reverse(interface->cur_altsetting,
NULL, NULL,
@@ -843,16 +780,16 @@ static int tower_probe (struct usb_interface *interface, const struct usb_device
goto error;
}
- dev->read_buffer = kmalloc (read_buffer_size, GFP_KERNEL);
+ dev->read_buffer = kmalloc(read_buffer_size, GFP_KERNEL);
if (!dev->read_buffer)
goto error;
- dev->interrupt_in_buffer = kmalloc (usb_endpoint_maxp(dev->interrupt_in_endpoint), GFP_KERNEL);
+ dev->interrupt_in_buffer = kmalloc(usb_endpoint_maxp(dev->interrupt_in_endpoint), GFP_KERNEL);
if (!dev->interrupt_in_buffer)
goto error;
dev->interrupt_in_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!dev->interrupt_in_urb)
goto error;
- dev->interrupt_out_buffer = kmalloc (write_buffer_size, GFP_KERNEL);
+ dev->interrupt_out_buffer = kmalloc(write_buffer_size, GFP_KERNEL);
if (!dev->interrupt_out_buffer)
goto error;
dev->interrupt_out_urb = usb_alloc_urb(0, GFP_KERNEL);
@@ -862,22 +799,21 @@ static int tower_probe (struct usb_interface *interface, const struct usb_device
dev->interrupt_out_interval = interrupt_out_interval ? interrupt_out_interval : dev->interrupt_out_endpoint->bInterval;
get_version_reply = kmalloc(sizeof(*get_version_reply), GFP_KERNEL);
-
if (!get_version_reply) {
retval = -ENOMEM;
goto error;
}
/* get the firmware version and log it */
- result = usb_control_msg (udev,
- usb_rcvctrlpipe(udev, 0),
- LEGO_USB_TOWER_REQUEST_GET_VERSION,
- USB_TYPE_VENDOR | USB_DIR_IN | USB_RECIP_DEVICE,
- 0,
- 0,
- get_version_reply,
- sizeof(*get_version_reply),
- 1000);
+ result = usb_control_msg(udev,
+ usb_rcvctrlpipe(udev, 0),
+ LEGO_USB_TOWER_REQUEST_GET_VERSION,
+ USB_TYPE_VENDOR | USB_DIR_IN | USB_RECIP_DEVICE,
+ 0,
+ 0,
+ get_version_reply,
+ sizeof(*get_version_reply),
+ 1000);
if (result != sizeof(*get_version_reply)) {
if (result >= 0)
result = -EIO;
@@ -892,10 +828,9 @@ static int tower_probe (struct usb_interface *interface, const struct usb_device
le16_to_cpu(get_version_reply->build_no));
/* we can register the device now, as it is ready */
- usb_set_intfdata (interface, dev);
-
- retval = usb_register_dev (interface, &tower_class);
+ usb_set_intfdata(interface, dev);
+ retval = usb_register_dev(interface, &tower_class);
if (retval) {
/* something prevented us from registering this driver */
dev_err(idev, "Not able to get a minor for this device.\n");
@@ -924,17 +859,17 @@ error:
*
* Called by the usb core when the device is removed from the system.
*/
-static void tower_disconnect (struct usb_interface *interface)
+static void tower_disconnect(struct usb_interface *interface)
{
struct lego_usb_tower *dev;
int minor;
- dev = usb_get_intfdata (interface);
+ dev = usb_get_intfdata(interface);
minor = dev->minor;
/* give back our minor and prevent further open() */
- usb_deregister_dev (interface, &tower_class);
+ usb_deregister_dev(interface, &tower_class);
/* stop I/O */
usb_poison_urb(dev->interrupt_in_urb);
@@ -945,7 +880,7 @@ static void tower_disconnect (struct usb_interface *interface)
/* if the device is not opened, then we clean up right now */
if (!dev->open_count) {
mutex_unlock(&dev->lock);
- tower_delete (dev);
+ tower_delete(dev);
} else {
dev->disconnected = 1;
/* wake up pollers */
@@ -962,6 +897,4 @@ module_usb_driver(tower_driver);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
-#ifdef MODULE_LICENSE
MODULE_LICENSE("GPL");
-#endif
diff --git a/drivers/usb/misc/sisusbvga/Kconfig b/drivers/usb/misc/sisusbvga/Kconfig
index 9b632ab24f03..c16121276a21 100644
--- a/drivers/usb/misc/sisusbvga/Kconfig
+++ b/drivers/usb/misc/sisusbvga/Kconfig
@@ -4,7 +4,7 @@ config USB_SISUSBVGA
tristate "USB 2.0 SVGA dongle support (Net2280/SiS315)"
depends on (USB_MUSB_HDRC || USB_EHCI_HCD)
select FONT_SUPPORT if USB_SISUSBVGA_CON
- ---help---
+ ---help---
Say Y here if you intend to attach a USB2VGA dongle based on a
Net2280 and a SiS315 chip.
diff --git a/drivers/usb/misc/usb251xb.c b/drivers/usb/misc/usb251xb.c
index 6ca9111d150a..10c9e7f6273e 100644
--- a/drivers/usb/misc/usb251xb.c
+++ b/drivers/usb/misc/usb251xb.c
@@ -17,6 +17,7 @@
#include <linux/module.h>
#include <linux/nls.h>
#include <linux/of_device.h>
+#include <linux/regulator/consumer.h>
#include <linux/slab.h>
/* Internal Register Set Addresses & Default Values acc. to DS00001692C */
@@ -26,10 +27,6 @@
#define USB251XB_ADDR_PRODUCT_ID_LSB 0x02
#define USB251XB_ADDR_PRODUCT_ID_MSB 0x03
-#define USB251XB_DEF_PRODUCT_ID_12 0x2512 /* USB2512B/12Bi */
-#define USB251XB_DEF_PRODUCT_ID_13 0x2513 /* USB2513B/13Bi */
-#define USB251XB_DEF_PRODUCT_ID_14 0x2514 /* USB2514B/14Bi */
-#define USB251XB_DEF_PRODUCT_ID_17 0x2517 /* USB2517/17i */
#define USB251XB_ADDR_DEVICE_ID_LSB 0x04
#define USB251XB_ADDR_DEVICE_ID_MSB 0x05
@@ -74,7 +71,6 @@
#define USB251XB_ADDR_PRODUCT_STRING_LEN 0x14
#define USB251XB_ADDR_PRODUCT_STRING 0x54
-#define USB251XB_DEF_PRODUCT_STRING "USB251xB/xBi/7i"
#define USB251XB_ADDR_SERIAL_STRING_LEN 0x15
#define USB251XB_ADDR_SERIAL_STRING 0x92
@@ -116,6 +112,7 @@
struct usb251xb {
struct device *dev;
struct i2c_client *i2c;
+ struct regulator *vdd;
u8 skip_config;
struct gpio_desc *gpio_reset;
u16 vendor_id;
@@ -159,6 +156,14 @@ struct usb251xb_data {
char product_str[USB251XB_STRING_BUFSIZE / 2]; /* ASCII string */
};
+static const struct usb251xb_data usb2422_data = {
+ .product_id = 0x2422,
+ .port_cnt = 2,
+ .led_support = false,
+ .bat_support = true,
+ .product_str = "USB2422",
+};
+
static const struct usb251xb_data usb2512b_data = {
.product_id = 0x2512,
.port_cnt = 2,
@@ -261,20 +266,19 @@ static int usb251x_check_gpio_chip(struct usb251xb *hub)
}
#endif
-static void usb251xb_reset(struct usb251xb *hub, int state)
+static void usb251xb_reset(struct usb251xb *hub)
{
if (!hub->gpio_reset)
return;
i2c_lock_bus(hub->i2c->adapter, I2C_LOCK_SEGMENT);
- gpiod_set_value_cansleep(hub->gpio_reset, state);
+ gpiod_set_value_cansleep(hub->gpio_reset, 1);
+ usleep_range(1, 10); /* >=1us RESET_N asserted */
+ gpiod_set_value_cansleep(hub->gpio_reset, 0);
/* wait for hub recovery/stabilization */
- if (!state)
- usleep_range(500, 750); /* >=500us at power on */
- else
- usleep_range(1, 10); /* >=1us at power down */
+ usleep_range(500, 750); /* >=500us after RESET_N deasserted */
i2c_unlock_bus(hub->i2c->adapter, I2C_LOCK_SEGMENT);
}
@@ -292,7 +296,7 @@ static int usb251xb_connect(struct usb251xb *hub)
i2c_wb[0] = 0x01;
i2c_wb[1] = USB251XB_STATUS_COMMAND_ATTACH;
- usb251xb_reset(hub, 0);
+ usb251xb_reset(hub);
err = i2c_smbus_write_i2c_block_data(hub->i2c,
USB251XB_ADDR_STATUS_COMMAND, 2, i2c_wb);
@@ -342,7 +346,7 @@ static int usb251xb_connect(struct usb251xb *hub)
i2c_wb[USB251XB_ADDR_PORT_MAP_7] = hub->port_map7;
i2c_wb[USB251XB_ADDR_STATUS_COMMAND] = USB251XB_STATUS_COMMAND_ATTACH;
- usb251xb_reset(hub, 0);
+ usb251xb_reset(hub);
/* write registers */
for (i = 0; i < (USB251XB_I2C_REG_SZ / USB251XB_I2C_WRITE_SZ); i++) {
@@ -420,6 +424,10 @@ static int usb251xb_get_ofdata(struct usb251xb *hub,
return err;
}
+ hub->vdd = devm_regulator_get(dev, "vdd");
+ if (IS_ERR(hub->vdd))
+ return PTR_ERR(hub->vdd);
+
if (of_property_read_u16_array(np, "vendor-id", &hub->vendor_id, 1))
hub->vendor_id = USB251XB_DEF_VENDOR_ID;
@@ -593,6 +601,9 @@ static int usb251xb_get_ofdata(struct usb251xb *hub,
static const struct of_device_id usb251xb_of_match[] = {
{
+ .compatible = "microchip,usb2422",
+ .data = &usb2422_data,
+ }, {
.compatible = "microchip,usb2512b",
.data = &usb2512b_data,
}, {
@@ -665,6 +676,10 @@ static int usb251xb_probe(struct usb251xb *hub)
if (err)
return err;
+ err = regulator_enable(hub->vdd);
+ if (err)
+ return err;
+
err = usb251xb_connect(hub);
if (err) {
dev_err(dev, "Failed to connect hub (%d)\n", err);
@@ -692,7 +707,31 @@ static int usb251xb_i2c_probe(struct i2c_client *i2c,
return usb251xb_probe(hub);
}
+static int __maybe_unused usb251xb_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct usb251xb *hub = i2c_get_clientdata(client);
+
+ return regulator_disable(hub->vdd);
+}
+
+static int __maybe_unused usb251xb_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct usb251xb *hub = i2c_get_clientdata(client);
+ int err;
+
+ err = regulator_enable(hub->vdd);
+ if (err)
+ return err;
+
+ return usb251xb_connect(hub);
+}
+
+static SIMPLE_DEV_PM_OPS(usb251xb_pm_ops, usb251xb_suspend, usb251xb_resume);
+
static const struct i2c_device_id usb251xb_id[] = {
+ { "usb2422", 0 },
{ "usb2512b", 0 },
{ "usb2512bi", 0 },
{ "usb2513b", 0 },
@@ -709,6 +748,7 @@ static struct i2c_driver usb251xb_i2c_driver = {
.driver = {
.name = DRIVER_NAME,
.of_match_table = of_match_ptr(usb251xb_of_match),
+ .pm = &usb251xb_pm_ops,
},
.probe = usb251xb_i2c_probe,
.id_table = usb251xb_id,
diff --git a/drivers/usb/mtu3/mtu3_gadget_ep0.c b/drivers/usb/mtu3/mtu3_gadget_ep0.c
index 4da216c99726..2be182bd793a 100644
--- a/drivers/usb/mtu3/mtu3_gadget_ep0.c
+++ b/drivers/usb/mtu3/mtu3_gadget_ep0.c
@@ -153,6 +153,15 @@ static void ep0_stall_set(struct mtu3_ep *mep0, bool set, u32 pktrdy)
set ? "SEND" : "CLEAR", decode_ep0_state(mtu));
}
+static void ep0_do_status_stage(struct mtu3 *mtu)
+{
+ void __iomem *mbase = mtu->mac_base;
+ u32 value;
+
+ value = mtu3_readl(mbase, U3D_EP0CSR) & EP0_W1C_BITS;
+ mtu3_writel(mbase, U3D_EP0CSR, value | EP0_SETUPPKTRDY | EP0_DATAEND);
+}
+
static int ep0_queue(struct mtu3_ep *mep0, struct mtu3_request *mreq);
static void ep0_dummy_complete(struct usb_ep *ep, struct usb_request *req)
@@ -297,8 +306,7 @@ static int handle_test_mode(struct mtu3 *mtu, struct usb_ctrlrequest *setup)
ep0_load_test_packet(mtu);
/* send status before entering test mode. */
- value = mtu3_readl(mbase, U3D_EP0CSR) & EP0_W1C_BITS;
- mtu3_writel(mbase, U3D_EP0CSR, value | EP0_SETUPPKTRDY | EP0_DATAEND);
+ ep0_do_status_stage(mtu);
/* wait for ACK status sent by host */
readl_poll_timeout_atomic(mbase + U3D_EP0CSR, value,
@@ -632,7 +640,6 @@ __acquires(mtu->lock)
{
struct usb_ctrlrequest setup;
struct mtu3_request *mreq;
- void __iomem *mbase = mtu->mac_base;
int handled = 0;
ep0_read_setup(mtu, &setup);
@@ -664,14 +671,19 @@ finish:
if (mtu->test_mode) {
; /* nothing to do */
} else if (handled == USB_GADGET_DELAYED_STATUS) {
- /* handle the delay STATUS phase till receive ep_queue on ep0 */
- mtu->delayed_status = true;
- } else if (le16_to_cpu(setup.wLength) == 0) { /* no data stage */
- mtu3_writel(mbase, U3D_EP0CSR,
- (mtu3_readl(mbase, U3D_EP0CSR) & EP0_W1C_BITS)
- | EP0_SETUPPKTRDY | EP0_DATAEND);
+ mreq = next_ep0_request(mtu);
+ if (mreq) {
+ /* already asked us to continue delayed status */
+ ep0_do_status_stage(mtu);
+ ep0_req_giveback(mtu, &mreq->request);
+ } else {
+ /* do delayed STATUS stage till receive ep0_queue */
+ mtu->delayed_status = true;
+ }
+ } else if (le16_to_cpu(setup.wLength) == 0) { /* no data stage */
+ ep0_do_status_stage(mtu);
/* complete zlp request directly */
mreq = next_ep0_request(mtu);
if (mreq && !mreq->request.length)
@@ -802,12 +814,9 @@ static int ep0_queue(struct mtu3_ep *mep, struct mtu3_request *mreq)
}
if (mtu->delayed_status) {
- u32 csr;
mtu->delayed_status = false;
- csr = mtu3_readl(mtu->mac_base, U3D_EP0CSR) & EP0_W1C_BITS;
- csr |= EP0_SETUPPKTRDY | EP0_DATAEND;
- mtu3_writel(mtu->mac_base, U3D_EP0CSR, csr);
+ ep0_do_status_stage(mtu);
/* needn't giveback the request for handling delay STATUS */
return 0;
}
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index bd63450af76a..15cca912c53e 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -2431,14 +2431,12 @@ static int musb_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
int irq = platform_get_irq_byname(pdev, "mc");
- struct resource *iomem;
void __iomem *base;
if (irq <= 0)
return -ENODEV;
- iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(dev, iomem);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/drivers/usb/musb/musb_debugfs.c b/drivers/usb/musb/musb_debugfs.c
index f42858e2b54c..7b6281ab62ed 100644
--- a/drivers/usb/musb/musb_debugfs.c
+++ b/drivers/usb/musb/musb_debugfs.c
@@ -325,7 +325,7 @@ void musb_init_debugfs(struct musb *musb)
{
struct dentry *root;
- root = debugfs_create_dir(dev_name(musb->controller), NULL);
+ root = debugfs_create_dir(dev_name(musb->controller), usb_debug_root);
musb->debugfs_root = root;
debugfs_create_file("regdump", S_IRUGO, root, musb, &musb_regdump_fops);
diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c
index 327d4f7baaf7..88923175f71e 100644
--- a/drivers/usb/musb/musb_dsps.c
+++ b/drivers/usb/musb/musb_dsps.c
@@ -411,7 +411,7 @@ static int dsps_musb_dbg_init(struct musb *musb, struct dsps_glue *glue)
char buf[128];
sprintf(buf, "%s.dsps", dev_name(musb->controller));
- root = debugfs_create_dir(buf, NULL);
+ root = debugfs_create_dir(buf, usb_debug_root);
glue->dbgfs_root = root;
glue->regset.regs = dsps_musb_regs;
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index ffe462a657b1..f62ffaede1ab 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -1085,7 +1085,6 @@ static int musb_gadget_disable(struct usb_ep *ep)
u8 epnum;
struct musb_ep *musb_ep;
void __iomem *epio;
- int status = 0;
musb_ep = to_musb_ep(ep);
musb = musb_ep->musb;
@@ -1118,7 +1117,7 @@ static int musb_gadget_disable(struct usb_ep *ep)
musb_dbg(musb, "%s", musb_ep->end_point.name);
- return status;
+ return 0;
}
/*
@@ -1316,7 +1315,7 @@ done:
}
/*
- * Set or clear the halt bit of an endpoint. A halted enpoint won't tx/rx any
+ * Set or clear the halt bit of an endpoint. A halted endpoint won't tx/rx any
* data but will queue requests.
*
* exported to ep0 code
diff --git a/drivers/usb/phy/phy-keystone.c b/drivers/usb/phy/phy-keystone.c
index 19871266312d..110e6e9ad621 100644
--- a/drivers/usb/phy/phy-keystone.c
+++ b/drivers/usb/phy/phy-keystone.c
@@ -66,15 +66,13 @@ static int keystone_usbphy_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct keystone_usbphy *k_phy;
- struct resource *res;
int ret;
k_phy = devm_kzalloc(dev, sizeof(*k_phy), GFP_KERNEL);
if (!k_phy)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- k_phy->phy_ctrl = devm_ioremap_resource(dev, res);
+ k_phy->phy_ctrl = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(k_phy->phy_ctrl))
return PTR_ERR(k_phy->phy_ctrl);
diff --git a/drivers/usb/phy/phy-mxs-usb.c b/drivers/usb/phy/phy-mxs-usb.c
index 70b8c8248caf..67b39dc62b37 100644
--- a/drivers/usb/phy/phy-mxs-usb.c
+++ b/drivers/usb/phy/phy-mxs-usb.c
@@ -710,7 +710,6 @@ static enum usb_charger_type mxs_phy_charger_detect(struct usb_phy *phy)
static int mxs_phy_probe(struct platform_device *pdev)
{
- struct resource *res;
void __iomem *base;
struct clk *clk;
struct mxs_phy *mxs_phy;
@@ -723,8 +722,7 @@ static int mxs_phy_probe(struct platform_device *pdev)
if (!of_id)
return -ENODEV;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/drivers/usb/renesas_usbhs/common.c b/drivers/usb/renesas_usbhs/common.c
index a3c30b609433..d438b7871446 100644
--- a/drivers/usb/renesas_usbhs/common.c
+++ b/drivers/usb/renesas_usbhs/common.c
@@ -590,7 +590,7 @@ static int usbhs_probe(struct platform_device *pdev)
{
const struct renesas_usbhs_platform_info *info;
struct usbhs_priv *priv;
- struct resource *res, *irq_res;
+ struct resource *irq_res;
struct device *dev = &pdev->dev;
int ret, gpio;
u32 tmp;
@@ -619,8 +619,7 @@ static int usbhs_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->base = devm_ioremap_resource(&pdev->dev, res);
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
diff --git a/drivers/usb/renesas_usbhs/common.h b/drivers/usb/renesas_usbhs/common.h
index 0824099b905e..ef1735d014da 100644
--- a/drivers/usb/renesas_usbhs/common.h
+++ b/drivers/usb/renesas_usbhs/common.h
@@ -161,11 +161,12 @@ struct usbhs_priv;
#define VBSTS (1 << 7) /* VBUS_0 and VBUSIN_0 Input Status */
#define VALID (1 << 3) /* USB Request Receive */
-#define DVSQ_MASK (0x3 << 4) /* Device State */
+#define DVSQ_MASK (0x7 << 4) /* Device State */
#define POWER_STATE (0 << 4)
#define DEFAULT_STATE (1 << 4)
#define ADDRESS_STATE (2 << 4)
#define CONFIGURATION_STATE (3 << 4)
+#define SUSPENDED_STATE (4 << 4)
#define CTSQ_MASK (0x7) /* Control Transfer Stage */
#define IDLE_SETUP_STAGE 0 /* Idle stage or setup stage */
diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c
index 86637cd066cf..01c6a48c41bc 100644
--- a/drivers/usb/renesas_usbhs/fifo.c
+++ b/drivers/usb/renesas_usbhs/fifo.c
@@ -1273,11 +1273,11 @@ static void usbhsf_dma_init_dt(struct device *dev, struct usbhs_fifo *fifo,
*/
snprintf(name, sizeof(name), "ch%d", channel);
if (channel & 1) {
- fifo->tx_chan = dma_request_slave_channel_reason(dev, name);
+ fifo->tx_chan = dma_request_chan(dev, name);
if (IS_ERR(fifo->tx_chan))
fifo->tx_chan = NULL;
} else {
- fifo->rx_chan = dma_request_slave_channel_reason(dev, name);
+ fifo->rx_chan = dma_request_chan(dev, name);
if (IS_ERR(fifo->rx_chan))
fifo->rx_chan = NULL;
}
diff --git a/drivers/usb/renesas_usbhs/mod.c b/drivers/usb/renesas_usbhs/mod.c
index 10fc65596014..b98112cefaa4 100644
--- a/drivers/usb/renesas_usbhs/mod.c
+++ b/drivers/usb/renesas_usbhs/mod.c
@@ -169,17 +169,7 @@ void usbhs_mod_remove(struct usbhs_priv *priv)
*/
int usbhs_status_get_device_state(struct usbhs_irq_state *irq_state)
{
- int state = irq_state->intsts0 & DVSQ_MASK;
-
- switch (state) {
- case POWER_STATE:
- case DEFAULT_STATE:
- case ADDRESS_STATE:
- case CONFIGURATION_STATE:
- return state;
- }
-
- return -EIO;
+ return (int)irq_state->intsts0 & DVSQ_MASK;
}
int usbhs_status_get_ctrl_stage(struct usbhs_irq_state *irq_state)
@@ -348,10 +338,6 @@ void usbhs_irq_callback_update(struct usbhs_priv *priv, struct usbhs_mod *mod)
* usbhs_interrupt
*/
- /*
- * it don't enable DVSE (intenb0) here
- * but "mod->irq_dev_state" will be called.
- */
if (info->irq_vbus)
intenb0 |= VBSE;
@@ -362,6 +348,9 @@ void usbhs_irq_callback_update(struct usbhs_priv *priv, struct usbhs_mod *mod)
if (mod->irq_ctrl_stage)
intenb0 |= CTRE;
+ if (mod->irq_dev_state)
+ intenb0 |= DVSE;
+
if (mod->irq_empty && mod->irq_bempsts) {
usbhs_write(priv, BEMPENB, mod->irq_bempsts);
intenb0 |= BEMPE;
diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c
index cd38d74b3223..53489cafecc1 100644
--- a/drivers/usb/renesas_usbhs/mod_gadget.c
+++ b/drivers/usb/renesas_usbhs/mod_gadget.c
@@ -457,12 +457,18 @@ static int usbhsg_irq_dev_state(struct usbhs_priv *priv,
{
struct usbhsg_gpriv *gpriv = usbhsg_priv_to_gpriv(priv);
struct device *dev = usbhsg_gpriv_to_dev(gpriv);
+ int state = usbhs_status_get_device_state(irq_state);
gpriv->gadget.speed = usbhs_bus_get_speed(priv);
- dev_dbg(dev, "state = %x : speed : %d\n",
- usbhs_status_get_device_state(irq_state),
- gpriv->gadget.speed);
+ dev_dbg(dev, "state = %x : speed : %d\n", state, gpriv->gadget.speed);
+
+ if (gpriv->gadget.speed != USB_SPEED_UNKNOWN &&
+ (state & SUSPENDED_STATE)) {
+ if (gpriv->driver && gpriv->driver->suspend)
+ gpriv->driver->suspend(&gpriv->gadget);
+ usb_gadget_set_state(&gpriv->gadget, USB_STATE_SUSPENDED);
+ }
return 0;
}
diff --git a/drivers/usb/roles/class.c b/drivers/usb/roles/class.c
index 94b4e7db2b94..8273126ffdf4 100644
--- a/drivers/usb/roles/class.c
+++ b/drivers/usb/roles/class.c
@@ -175,6 +175,27 @@ void usb_role_switch_put(struct usb_role_switch *sw)
}
EXPORT_SYMBOL_GPL(usb_role_switch_put);
+/**
+ * usb_role_switch_find_by_fwnode - Find USB role switch with its fwnode
+ * @fwnode: fwnode of the USB Role Switch
+ *
+ * Finds and returns role switch with @fwnode. The reference count for the
+ * found switch is incremented.
+ */
+struct usb_role_switch *
+usb_role_switch_find_by_fwnode(const struct fwnode_handle *fwnode)
+{
+ struct device *dev;
+
+ if (!fwnode)
+ return NULL;
+
+ dev = class_find_device_by_fwnode(role_class, fwnode);
+
+ return dev ? to_role_switch(dev) : NULL;
+}
+EXPORT_SYMBOL_GPL(usb_role_switch_find_by_fwnode);
+
static umode_t
usb_role_switch_is_visible(struct kobject *kobj, struct attribute *attr, int n)
{
diff --git a/drivers/usb/serial/Kconfig b/drivers/usb/serial/Kconfig
index 67279c6bce33..ed4a18b435a0 100644
--- a/drivers/usb/serial/Kconfig
+++ b/drivers/usb/serial/Kconfig
@@ -269,19 +269,19 @@ config USB_SERIAL_F8153X
config USB_SERIAL_GARMIN
- tristate "USB Garmin GPS driver"
- help
- Say Y here if you want to connect to your Garmin GPS.
- Should work with most Garmin GPS devices which have a native USB port.
+ tristate "USB Garmin GPS driver"
+ help
+ Say Y here if you want to connect to your Garmin GPS.
+ Should work with most Garmin GPS devices which have a native USB port.
- See <http://sourceforge.net/projects/garmin-gps> for the latest
- version of the driver.
+ See <http://sourceforge.net/projects/garmin-gps> for the latest
+ version of the driver.
- To compile this driver as a module, choose M here: the
- module will be called garmin_gps.
+ To compile this driver as a module, choose M here: the
+ module will be called garmin_gps.
config USB_SERIAL_IPW
- tristate "USB IPWireless (3G UMTS TDD) Driver"
+ tristate "USB IPWireless (3G UMTS TDD) Driver"
select USB_SERIAL_WWAN
help
Say Y here if you want to use a IPWireless USB modem such as
@@ -341,20 +341,20 @@ config USB_SERIAL_KLSI
module will be called kl5kusb105.
config USB_SERIAL_KOBIL_SCT
- tristate "USB KOBIL chipcard reader"
- ---help---
- Say Y here if you want to use one of the following KOBIL USB chipcard
- readers:
-
- - USB TWIN
- - KAAN Standard Plus
- - KAAN SIM
- - SecOVID Reader Plus
- - B1 Professional
- - KAAN Professional
-
- Note that you need a current CT-API.
- To compile this driver as a module, choose M here: the
+ tristate "USB KOBIL chipcard reader"
+ ---help---
+ Say Y here if you want to use one of the following KOBIL USB chipcard
+ readers:
+
+ - USB TWIN
+ - KAAN Standard Plus
+ - KAAN SIM
+ - SecOVID Reader Plus
+ - B1 Professional
+ - KAAN Professional
+
+ Note that you need a current CT-API.
+ To compile this driver as a module, choose M here: the
module will be called kobil_sct.
config USB_SERIAL_MCT_U232
@@ -458,7 +458,7 @@ config USB_SERIAL_OTI6858
tristate "USB Ours Technology Inc. OTi-6858 USB To RS232 Bridge Controller"
help
Say Y here if you want to use the OTi-6858 single port USB to serial
- converter device.
+ converter device.
To compile this driver as a module, choose M here: the
module will be called oti6858.
diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c
index 3bb1fff02bed..df582fe855f0 100644
--- a/drivers/usb/serial/ch341.c
+++ b/drivers/usb/serial/ch341.c
@@ -48,12 +48,6 @@
#define CH341_BIT_DCD 0x08
#define CH341_BITS_MODEM_STAT 0x0f /* all bits */
-/*******************************/
-/* baudrate calculation factor */
-/*******************************/
-#define CH341_BAUDBASE_FACTOR 1532620800
-#define CH341_BAUDBASE_DIVMAX 3
-
/* Break support - the information used to implement this was gleaned from
* the Net/FreeBSD uchcom.c driver by Takanori Watanabe. Domo arigato.
*/
@@ -144,37 +138,96 @@ static int ch341_control_in(struct usb_device *dev,
return 0;
}
+#define CH341_CLKRATE 48000000
+#define CH341_CLK_DIV(ps, fact) (1 << (12 - 3 * (ps) - (fact)))
+#define CH341_MIN_RATE(ps) (CH341_CLKRATE / (CH341_CLK_DIV((ps), 1) * 512))
+
+static const speed_t ch341_min_rates[] = {
+ CH341_MIN_RATE(0),
+ CH341_MIN_RATE(1),
+ CH341_MIN_RATE(2),
+ CH341_MIN_RATE(3),
+};
+
+/*
+ * The device line speed is given by the following equation:
+ *
+ * baudrate = 48000000 / (2^(12 - 3 * ps - fact) * div), where
+ *
+ * 0 <= ps <= 3,
+ * 0 <= fact <= 1,
+ * 2 <= div <= 256 if fact = 0, or
+ * 9 <= div <= 256 if fact = 1
+ */
+static int ch341_get_divisor(speed_t speed)
+{
+ unsigned int fact, div, clk_div;
+ int ps;
+
+ /*
+ * Clamp to supported range, this makes the (ps < 0) and (div < 2)
+ * sanity checks below redundant.
+ */
+ speed = clamp(speed, 46U, 3000000U);
+
+ /*
+ * Start with highest possible base clock (fact = 1) that will give a
+ * divisor strictly less than 512.
+ */
+ fact = 1;
+ for (ps = 3; ps >= 0; ps--) {
+ if (speed > ch341_min_rates[ps])
+ break;
+ }
+
+ if (ps < 0)
+ return -EINVAL;
+
+ /* Determine corresponding divisor, rounding down. */
+ clk_div = CH341_CLK_DIV(ps, fact);
+ div = CH341_CLKRATE / (clk_div * speed);
+
+ /* Halve base clock (fact = 0) if required. */
+ if (div < 9 || div > 255) {
+ div /= 2;
+ clk_div *= 2;
+ fact = 0;
+ }
+
+ if (div < 2)
+ return -EINVAL;
+
+ /*
+ * Pick next divisor if resulting rate is closer to the requested one,
+ * scale up to avoid rounding errors on low rates.
+ */
+ if (16 * CH341_CLKRATE / (clk_div * div) - 16 * speed >=
+ 16 * speed - 16 * CH341_CLKRATE / (clk_div * (div + 1)))
+ div++;
+
+ return (0x100 - div) << 8 | fact << 2 | ps;
+}
+
static int ch341_set_baudrate_lcr(struct usb_device *dev,
struct ch341_private *priv, u8 lcr)
{
- short a;
+ int val;
int r;
- unsigned long factor;
- short divisor;
if (!priv->baud_rate)
return -EINVAL;
- factor = (CH341_BAUDBASE_FACTOR / priv->baud_rate);
- divisor = CH341_BAUDBASE_DIVMAX;
-
- while ((factor > 0xfff0) && divisor) {
- factor >>= 3;
- divisor--;
- }
- if (factor > 0xfff0)
+ val = ch341_get_divisor(priv->baud_rate);
+ if (val < 0)
return -EINVAL;
- factor = 0x10000 - factor;
- a = (factor & 0xff00) | divisor;
-
/*
* CH341A buffers data until a full endpoint-size packet (32 bytes)
* has been received unless bit 7 is set.
*/
- a |= BIT(7);
+ val |= BIT(7);
- r = ch341_control_out(dev, CH341_REQ_WRITE_REG, 0x1312, a);
+ r = ch341_control_out(dev, CH341_REQ_WRITE_REG, 0x1312, val);
if (r)
return r;
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 979bef9bfb6b..f5143eedbc48 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -125,6 +125,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */
{ USB_DEVICE(0x10C4, 0x8382) }, /* Cygnal Integrated Products, Inc. */
{ USB_DEVICE(0x10C4, 0x83A8) }, /* Amber Wireless AMB2560 */
+ { USB_DEVICE(0x10C4, 0x83AA) }, /* Mark-10 Digital Force Gauge */
{ USB_DEVICE(0x10C4, 0x83D8) }, /* DekTec DTA Plus VHF/UHF Booster/Attenuator */
{ USB_DEVICE(0x10C4, 0x8411) }, /* Kyocera GPS Module */
{ USB_DEVICE(0x10C4, 0x8418) }, /* IRZ Automation Teleport SG-10 GSM/GPRS Modem */
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 25e81faf4c24..9ad44a96dfe3 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -1033,6 +1033,9 @@ static const struct usb_device_id id_table_combined[] = {
/* Sienna devices */
{ USB_DEVICE(FTDI_VID, FTDI_SIENNA_PID) },
{ USB_DEVICE(ECHELON_VID, ECHELON_U20_PID) },
+ /* U-Blox devices */
+ { USB_DEVICE(UBLOX_VID, UBLOX_C099F9P_ZED_PID) },
+ { USB_DEVICE(UBLOX_VID, UBLOX_C099F9P_ODIN_PID) },
{ } /* Terminating entry */
};
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 22d66217cb41..e8373528264c 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -1558,3 +1558,10 @@
*/
#define UNJO_VID 0x22B7
#define UNJO_ISODEBUG_V1_PID 0x150D
+
+/*
+ * U-Blox products (http://www.u-blox.com).
+ */
+#define UBLOX_VID 0x1546
+#define UBLOX_C099F9P_ZED_PID 0x0502
+#define UBLOX_C099F9P_ODIN_PID 0x0503
diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
index 18110225d506..2ec4eeacebc7 100644
--- a/drivers/usb/serial/mos7720.c
+++ b/drivers/usb/serial/mos7720.c
@@ -1833,10 +1833,6 @@ static int mos7720_startup(struct usb_serial *serial)
product = le16_to_cpu(serial->dev->descriptor.idProduct);
dev = serial->dev;
- /* setting configuration feature to one */
- usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
- (__u8)0x03, 0x00, 0x01, 0x00, NULL, 0x00, 5000);
-
if (product == MOSCHIP_DEVICE_ID_7715) {
struct urb *urb = serial->port[0]->interrupt_in_urb;
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index a698d46ba773..23f91d658cb4 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -89,17 +89,10 @@
/* For higher baud Rates use TIOCEXBAUD */
#define TIOCEXBAUD 0x5462
-/* vendor id and device id defines */
-
-/* The native mos7840/7820 component */
-#define USB_VENDOR_ID_MOSCHIP 0x9710
-#define MOSCHIP_DEVICE_ID_7840 0x7840
-#define MOSCHIP_DEVICE_ID_7843 0x7843
-#define MOSCHIP_DEVICE_ID_7820 0x7820
-#define MOSCHIP_DEVICE_ID_7810 0x7810
-/* The native component can have its vendor/device id's overridden
- * in vendor-specific implementations. Such devices can be handled
- * by making a change here, in id_table.
+/*
+ * Vendor id and device id defines
+ *
+ * NOTE: Do not add new defines, add entries directly to the id_table instead.
*/
#define USB_VENDOR_ID_BANDB 0x0856
#define BANDB_DEVICE_ID_USO9ML2_2 0xAC22
@@ -116,14 +109,6 @@
#define BANDB_DEVICE_ID_USOPTL4_4P 0xBC03
#define BANDB_DEVICE_ID_USOPTL2_4 0xAC24
-/* This driver also supports
- * ATEN UC2324 device using Moschip MCS7840
- * ATEN UC2322 device using Moschip MCS7820
- */
-#define USB_VENDOR_ID_ATENINTL 0x0557
-#define ATENINTL_DEVICE_ID_UC2324 0x2011
-#define ATENINTL_DEVICE_ID_UC2322 0x7820
-
/* Interrupt Routine Defines */
#define SERIAL_IIR_RLS 0x06
@@ -171,30 +156,37 @@
#define LED_OFF_MS 500
enum mos7840_flag {
- MOS7840_FLAG_CTRL_BUSY,
MOS7840_FLAG_LED_BUSY,
};
+#define MCS_PORT_MASK GENMASK(2, 0)
+#define MCS_PORTS(nr) ((nr) & MCS_PORT_MASK)
+#define MCS_LED BIT(3)
+
+#define MCS_DEVICE(vid, pid, flags) \
+ USB_DEVICE((vid), (pid)), .driver_info = (flags)
+
static const struct usb_device_id id_table[] = {
- {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)},
- {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7843)},
- {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)},
- {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7810)},
- {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2)},
- {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2P)},
- {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_4)},
- {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_4P)},
- {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_2)},
- {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_4)},
- {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_2)},
- {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_4)},
- {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)},
- {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2P)},
- {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)},
- {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4P)},
- {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL2_4)},
- {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)},
- {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2322)},
+ { MCS_DEVICE(0x0557, 0x2011, MCS_PORTS(4)) }, /* ATEN UC2324 */
+ { MCS_DEVICE(0x0557, 0x7820, MCS_PORTS(2)) }, /* ATEN UC2322 */
+ { MCS_DEVICE(0x110a, 0x2210, MCS_PORTS(2)) }, /* Moxa UPort 2210 */
+ { MCS_DEVICE(0x9710, 0x7810, MCS_PORTS(1) | MCS_LED) }, /* ASIX MCS7810 */
+ { MCS_DEVICE(0x9710, 0x7820, MCS_PORTS(2)) }, /* MosChip MCS7820 */
+ { MCS_DEVICE(0x9710, 0x7840, MCS_PORTS(4)) }, /* MosChip MCS7840 */
+ { MCS_DEVICE(0x9710, 0x7843, MCS_PORTS(3)) }, /* ASIX MCS7840 3 port */
+ { USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2) },
+ { USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2P) },
+ { USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_4) },
+ { USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_4P) },
+ { USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_2) },
+ { USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_4) },
+ { USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_2) },
+ { USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_4) },
+ { USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2) },
+ { USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2P) },
+ { USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4) },
+ { USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4P) },
+ { USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL2_4) },
{} /* terminating entry */
};
MODULE_DEVICE_TABLE(usb, id_table);
@@ -206,19 +198,12 @@ struct moschip_port {
struct urb *read_urb; /* read URB for this port */
__u8 shadowLCR; /* last LCR value received */
__u8 shadowMCR; /* last MCR value received */
- char open;
- char open_ports;
struct usb_serial_port *port; /* loop back to the owner of this object */
/* Offsets */
__u8 SpRegOffset;
__u8 ControlRegOffset;
__u8 DcrRegOffset;
- /* for processing control URBS in interrupt context */
- struct urb *control_urb;
- struct usb_ctrlrequest *dr;
- char *ctrl_buf;
- int MsrLsr;
spinlock_t pool_lock;
struct urb *write_urb_pool[NUM_URBS];
@@ -360,150 +345,11 @@ static void mos7840_dump_serial_port(struct usb_serial_port *port,
/************************************************************************/
/************************************************************************/
-/* I N T E R F A C E F U N C T I O N S */
-/* I N T E R F A C E F U N C T I O N S */
-/************************************************************************/
-/************************************************************************/
-
-static inline void mos7840_set_port_private(struct usb_serial_port *port,
- struct moschip_port *data)
-{
- usb_set_serial_port_data(port, (void *)data);
-}
-
-static inline struct moschip_port *mos7840_get_port_private(struct
- usb_serial_port
- *port)
-{
- return (struct moschip_port *)usb_get_serial_port_data(port);
-}
-
-static void mos7840_handle_new_msr(struct moschip_port *port, __u8 new_msr)
-{
- struct moschip_port *mos7840_port;
- struct async_icount *icount;
- mos7840_port = port;
- if (new_msr &
- (MOS_MSR_DELTA_CTS | MOS_MSR_DELTA_DSR | MOS_MSR_DELTA_RI |
- MOS_MSR_DELTA_CD)) {
- icount = &mos7840_port->port->icount;
-
- /* update input line counters */
- if (new_msr & MOS_MSR_DELTA_CTS)
- icount->cts++;
- if (new_msr & MOS_MSR_DELTA_DSR)
- icount->dsr++;
- if (new_msr & MOS_MSR_DELTA_CD)
- icount->dcd++;
- if (new_msr & MOS_MSR_DELTA_RI)
- icount->rng++;
-
- wake_up_interruptible(&port->port->port.delta_msr_wait);
- }
-}
-
-static void mos7840_handle_new_lsr(struct moschip_port *port, __u8 new_lsr)
-{
- struct async_icount *icount;
-
- if (new_lsr & SERIAL_LSR_BI) {
- /*
- * Parity and Framing errors only count if they
- * occur exclusive of a break being
- * received.
- */
- new_lsr &= (__u8) (SERIAL_LSR_OE | SERIAL_LSR_BI);
- }
-
- /* update input line counters */
- icount = &port->port->icount;
- if (new_lsr & SERIAL_LSR_BI)
- icount->brk++;
- if (new_lsr & SERIAL_LSR_OE)
- icount->overrun++;
- if (new_lsr & SERIAL_LSR_PE)
- icount->parity++;
- if (new_lsr & SERIAL_LSR_FE)
- icount->frame++;
-}
-
-/************************************************************************/
-/************************************************************************/
/* U S B C A L L B A C K F U N C T I O N S */
/* U S B C A L L B A C K F U N C T I O N S */
/************************************************************************/
/************************************************************************/
-static void mos7840_control_callback(struct urb *urb)
-{
- unsigned char *data;
- struct moschip_port *mos7840_port;
- struct device *dev = &urb->dev->dev;
- __u8 regval = 0x0;
- int status = urb->status;
-
- mos7840_port = urb->context;
-
- switch (status) {
- case 0:
- /* success */
- break;
- case -ECONNRESET:
- case -ENOENT:
- case -ESHUTDOWN:
- /* this urb is terminated, clean up */
- dev_dbg(dev, "%s - urb shutting down with status: %d\n", __func__, status);
- goto out;
- default:
- dev_dbg(dev, "%s - nonzero urb status received: %d\n", __func__, status);
- goto out;
- }
-
- dev_dbg(dev, "%s urb buffer size is %d\n", __func__, urb->actual_length);
- if (urb->actual_length < 1)
- goto out;
-
- dev_dbg(dev, "%s mos7840_port->MsrLsr is %d port %d\n", __func__,
- mos7840_port->MsrLsr, mos7840_port->port_num);
- data = urb->transfer_buffer;
- regval = (__u8) data[0];
- dev_dbg(dev, "%s data is %x\n", __func__, regval);
- if (mos7840_port->MsrLsr == 0)
- mos7840_handle_new_msr(mos7840_port, regval);
- else if (mos7840_port->MsrLsr == 1)
- mos7840_handle_new_lsr(mos7840_port, regval);
-out:
- clear_bit_unlock(MOS7840_FLAG_CTRL_BUSY, &mos7840_port->flags);
-}
-
-static int mos7840_get_reg(struct moschip_port *mcs, __u16 Wval, __u16 reg,
- __u16 *val)
-{
- struct usb_device *dev = mcs->port->serial->dev;
- struct usb_ctrlrequest *dr = mcs->dr;
- unsigned char *buffer = mcs->ctrl_buf;
- int ret;
-
- if (test_and_set_bit_lock(MOS7840_FLAG_CTRL_BUSY, &mcs->flags))
- return -EBUSY;
-
- dr->bRequestType = MCS_RD_RTYPE;
- dr->bRequest = MCS_RDREQ;
- dr->wValue = cpu_to_le16(Wval); /* 0 */
- dr->wIndex = cpu_to_le16(reg);
- dr->wLength = cpu_to_le16(2);
-
- usb_fill_control_urb(mcs->control_urb, dev, usb_rcvctrlpipe(dev, 0),
- (unsigned char *)dr, buffer, 2,
- mos7840_control_callback, mcs);
- mcs->control_urb->transfer_buffer_length = 2;
- ret = usb_submit_urb(mcs->control_urb, GFP_ATOMIC);
- if (ret)
- clear_bit_unlock(MOS7840_FLAG_CTRL_BUSY, &mcs->flags);
-
- return ret;
-}
-
static void mos7840_set_led_callback(struct urb *urb)
{
switch (urb->status) {
@@ -580,146 +426,6 @@ static void mos7840_led_activity(struct usb_serial_port *port)
}
/*****************************************************************************
- * mos7840_interrupt_callback
- * this is the callback function for when we have received data on the
- * interrupt endpoint.
- *****************************************************************************/
-
-static void mos7840_interrupt_callback(struct urb *urb)
-{
- int result;
- int length;
- struct moschip_port *mos7840_port;
- struct usb_serial *serial;
- __u16 Data;
- unsigned char *data;
- __u8 sp[5];
- int i, rv = 0;
- __u16 wval, wreg = 0;
- int status = urb->status;
-
- switch (status) {
- case 0:
- /* success */
- break;
- case -ECONNRESET:
- case -ENOENT:
- case -ESHUTDOWN:
- /* this urb is terminated, clean up */
- dev_dbg(&urb->dev->dev, "%s - urb shutting down with status: %d\n",
- __func__, status);
- return;
- default:
- dev_dbg(&urb->dev->dev, "%s - nonzero urb status received: %d\n",
- __func__, status);
- goto exit;
- }
-
- length = urb->actual_length;
- data = urb->transfer_buffer;
-
- serial = urb->context;
-
- /* Moschip get 5 bytes
- * Byte 1 IIR Port 1 (port.number is 0)
- * Byte 2 IIR Port 2 (port.number is 1)
- * Byte 3 IIR Port 3 (port.number is 2)
- * Byte 4 IIR Port 4 (port.number is 3)
- * Byte 5 FIFO status for both */
-
- if (length > 5) {
- dev_dbg(&urb->dev->dev, "%s", "Wrong data !!!\n");
- return;
- }
-
- sp[0] = (__u8) data[0];
- sp[1] = (__u8) data[1];
- sp[2] = (__u8) data[2];
- sp[3] = (__u8) data[3];
-
- for (i = 0; i < serial->num_ports; i++) {
- mos7840_port = mos7840_get_port_private(serial->port[i]);
- wval = ((__u16)serial->port[i]->port_number + 1) << 8;
- if (mos7840_port->open) {
- if (sp[i] & 0x01) {
- dev_dbg(&urb->dev->dev, "SP%d No Interrupt !!!\n", i);
- } else {
- switch (sp[i] & 0x0f) {
- case SERIAL_IIR_RLS:
- dev_dbg(&urb->dev->dev, "Serial Port %d: Receiver status error or \n", i);
- dev_dbg(&urb->dev->dev, "address bit detected in 9-bit mode\n");
- mos7840_port->MsrLsr = 1;
- wreg = LINE_STATUS_REGISTER;
- break;
- case SERIAL_IIR_MS:
- dev_dbg(&urb->dev->dev, "Serial Port %d: Modem status change\n", i);
- mos7840_port->MsrLsr = 0;
- wreg = MODEM_STATUS_REGISTER;
- break;
- }
- rv = mos7840_get_reg(mos7840_port, wval, wreg, &Data);
- }
- }
- }
- if (!(rv < 0))
- /* the completion handler for the control urb will resubmit */
- return;
-exit:
- result = usb_submit_urb(urb, GFP_ATOMIC);
- if (result) {
- dev_err(&urb->dev->dev,
- "%s - Error %d submitting interrupt urb\n",
- __func__, result);
- }
-}
-
-static int mos7840_port_paranoia_check(struct usb_serial_port *port,
- const char *function)
-{
- if (!port) {
- pr_debug("%s - port == NULL\n", function);
- return -1;
- }
- if (!port->serial) {
- pr_debug("%s - port->serial == NULL\n", function);
- return -1;
- }
-
- return 0;
-}
-
-/* Inline functions to check the sanity of a pointer that is passed to us */
-static int mos7840_serial_paranoia_check(struct usb_serial *serial,
- const char *function)
-{
- if (!serial) {
- pr_debug("%s - serial == NULL\n", function);
- return -1;
- }
- if (!serial->type) {
- pr_debug("%s - serial->type == NULL!\n", function);
- return -1;
- }
-
- return 0;
-}
-
-static struct usb_serial *mos7840_get_usb_serial(struct usb_serial_port *port,
- const char *function)
-{
- /* if no port was specified, or it fails a paranoia check */
- if (!port ||
- mos7840_port_paranoia_check(port, function) ||
- mos7840_serial_paranoia_check(port->serial, function)) {
- /* then say that we don't have a valid usb_serial thing,
- * which will end up genrating -ENODEV return values */
- return NULL;
- }
-
- return port->serial;
-}
-
-/*****************************************************************************
* mos7840_bulk_in_callback
* this is the callback function for when we have received data on the
* bulk in endpoint.
@@ -727,35 +433,18 @@ static struct usb_serial *mos7840_get_usb_serial(struct usb_serial_port *port,
static void mos7840_bulk_in_callback(struct urb *urb)
{
+ struct moschip_port *mos7840_port = urb->context;
+ struct usb_serial_port *port = mos7840_port->port;
int retval;
unsigned char *data;
- struct usb_serial *serial;
- struct usb_serial_port *port;
- struct moschip_port *mos7840_port;
int status = urb->status;
- mos7840_port = urb->context;
- if (!mos7840_port)
- return;
-
if (status) {
dev_dbg(&urb->dev->dev, "nonzero read bulk status received: %d\n", status);
mos7840_port->read_urb_busy = false;
return;
}
- port = mos7840_port->port;
- if (mos7840_port_paranoia_check(port, __func__)) {
- mos7840_port->read_urb_busy = false;
- return;
- }
-
- serial = mos7840_get_usb_serial(port, __func__);
- if (!serial) {
- mos7840_port->read_urb_busy = false;
- return;
- }
-
data = urb->transfer_buffer;
usb_serial_debug_data(&port->dev, __func__, urb->actual_length, data);
@@ -767,12 +456,6 @@ static void mos7840_bulk_in_callback(struct urb *urb)
dev_dbg(&port->dev, "icount.rx is %d:\n", port->icount.rx);
}
- if (!mos7840_port->read_urb) {
- dev_dbg(&port->dev, "%s", "URB KILLED !!!\n");
- mos7840_port->read_urb_busy = false;
- return;
- }
-
if (mos7840_port->has_led)
mos7840_led_activity(port);
@@ -793,14 +476,12 @@ static void mos7840_bulk_in_callback(struct urb *urb)
static void mos7840_bulk_out_data_callback(struct urb *urb)
{
- struct moschip_port *mos7840_port;
- struct usb_serial_port *port;
+ struct moschip_port *mos7840_port = urb->context;
+ struct usb_serial_port *port = mos7840_port->port;
int status = urb->status;
unsigned long flags;
int i;
- mos7840_port = urb->context;
- port = mos7840_port->port;
spin_lock_irqsave(&mos7840_port->pool_lock, flags);
for (i = 0; i < NUM_URBS; i++) {
if (urb == mos7840_port->write_urb_pool[i]) {
@@ -815,11 +496,7 @@ static void mos7840_bulk_out_data_callback(struct urb *urb)
return;
}
- if (mos7840_port_paranoia_check(port, __func__))
- return;
-
- if (mos7840_port->open)
- tty_port_tty_wakeup(&port->port);
+ tty_port_tty_wakeup(&port->port);
}
@@ -836,32 +513,16 @@ static void mos7840_bulk_out_data_callback(struct urb *urb)
static int mos7840_open(struct tty_struct *tty, struct usb_serial_port *port)
{
+ struct moschip_port *mos7840_port = usb_get_serial_port_data(port);
+ struct usb_serial *serial = port->serial;
int response;
int j;
- struct usb_serial *serial;
struct urb *urb;
__u16 Data;
int status;
- struct moschip_port *mos7840_port;
- struct moschip_port *port0;
-
- if (mos7840_port_paranoia_check(port, __func__))
- return -ENODEV;
-
- serial = port->serial;
-
- if (mos7840_serial_paranoia_check(serial, __func__))
- return -ENODEV;
-
- mos7840_port = mos7840_get_port_private(port);
- port0 = mos7840_get_port_private(serial->port[0]);
-
- if (mos7840_port == NULL || port0 == NULL)
- return -ENODEV;
usb_clear_halt(serial->dev, port->write_urb->pipe);
usb_clear_halt(serial->dev, port->read_urb->pipe);
- port0->open_ports++;
/* Initialising the write urb pool */
for (j = 0; j < NUM_URBS; ++j) {
@@ -1012,41 +673,6 @@ static int mos7840_open(struct tty_struct *tty, struct usb_serial_port *port)
status = mos7840_set_reg_sync(port, mos7840_port->ControlRegOffset,
Data);
- /* Check to see if we've set up our endpoint info yet *
- * (can't set it up in mos7840_startup as the structures *
- * were not set up at that time.) */
- if (port0->open_ports == 1) {
- /* FIXME: Buffer never NULL, so URB is not submitted. */
- if (serial->port[0]->interrupt_in_buffer == NULL) {
- /* set up interrupt urb */
- usb_fill_int_urb(serial->port[0]->interrupt_in_urb,
- serial->dev,
- usb_rcvintpipe(serial->dev,
- serial->port[0]->interrupt_in_endpointAddress),
- serial->port[0]->interrupt_in_buffer,
- serial->port[0]->interrupt_in_urb->
- transfer_buffer_length,
- mos7840_interrupt_callback,
- serial,
- serial->port[0]->interrupt_in_urb->interval);
-
- /* start interrupt read for mos7840 */
- response =
- usb_submit_urb(serial->port[0]->interrupt_in_urb,
- GFP_KERNEL);
- if (response) {
- dev_err(&port->dev, "%s - Error %d submitting "
- "interrupt urb\n", __func__, response);
- }
-
- }
-
- }
-
- /* see if we've set up our endpoint info yet *
- * (can't set it up in mos7840_startup as the *
- * structures were not set up at that time.) */
-
dev_dbg(&port->dev, "port number is %d\n", port->port_number);
dev_dbg(&port->dev, "minor number is %d\n", port->minor);
dev_dbg(&port->dev, "Bulkin endpoint is %d\n", port->bulk_in_endpointAddress);
@@ -1086,9 +712,6 @@ static int mos7840_open(struct tty_struct *tty, struct usb_serial_port *port)
/* initialize our port settings */
/* Must set to enable ints! */
mos7840_port->shadowMCR = MCR_MASTER_IE;
- /* send a open port command */
- mos7840_port->open = 1;
- /* mos7840_change_port_settings(mos7840_port,old_termios); */
return 0;
err:
@@ -1115,17 +738,10 @@ err:
static int mos7840_chars_in_buffer(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
+ struct moschip_port *mos7840_port = usb_get_serial_port_data(port);
int i;
int chars = 0;
unsigned long flags;
- struct moschip_port *mos7840_port;
-
- if (mos7840_port_paranoia_check(port, __func__))
- return 0;
-
- mos7840_port = mos7840_get_port_private(port);
- if (mos7840_port == NULL)
- return 0;
spin_lock_irqsave(&mos7840_port->pool_lock, flags);
for (i = 0; i < NUM_URBS; ++i) {
@@ -1147,25 +763,10 @@ static int mos7840_chars_in_buffer(struct tty_struct *tty)
static void mos7840_close(struct usb_serial_port *port)
{
- struct usb_serial *serial;
- struct moschip_port *mos7840_port;
- struct moschip_port *port0;
+ struct moschip_port *mos7840_port = usb_get_serial_port_data(port);
int j;
__u16 Data;
- if (mos7840_port_paranoia_check(port, __func__))
- return;
-
- serial = mos7840_get_usb_serial(port, __func__);
- if (!serial)
- return;
-
- mos7840_port = mos7840_get_port_private(port);
- port0 = mos7840_get_port_private(serial->port[0]);
-
- if (mos7840_port == NULL || port0 == NULL)
- return;
-
for (j = 0; j < NUM_URBS; ++j)
usb_kill_urb(mos7840_port->write_urb_pool[j]);
@@ -1180,22 +781,11 @@ static void mos7840_close(struct usb_serial_port *port)
usb_kill_urb(mos7840_port->read_urb);
mos7840_port->read_urb_busy = false;
- port0->open_ports--;
- dev_dbg(&port->dev, "%s in close%d\n", __func__, port0->open_ports);
- if (port0->open_ports == 0) {
- if (serial->port[0]->interrupt_in_urb) {
- dev_dbg(&port->dev, "Shutdown interrupt_in_urb\n");
- usb_kill_urb(serial->port[0]->interrupt_in_urb);
- }
- }
-
Data = 0x0;
mos7840_set_uart_reg(port, MODEM_CONTROL_REGISTER, Data);
Data = 0x00;
mos7840_set_uart_reg(port, INTERRUPT_ENABLE_REGISTER, Data);
-
- mos7840_port->open = 0;
}
/*****************************************************************************
@@ -1205,21 +795,8 @@ static void mos7840_close(struct usb_serial_port *port)
static void mos7840_break(struct tty_struct *tty, int break_state)
{
struct usb_serial_port *port = tty->driver_data;
+ struct moschip_port *mos7840_port = usb_get_serial_port_data(port);
unsigned char data;
- struct usb_serial *serial;
- struct moschip_port *mos7840_port;
-
- if (mos7840_port_paranoia_check(port, __func__))
- return;
-
- serial = mos7840_get_usb_serial(port, __func__);
- if (!serial)
- return;
-
- mos7840_port = mos7840_get_port_private(port);
-
- if (mos7840_port == NULL)
- return;
if (break_state == -1)
data = mos7840_port->shadowLCR | LCR_SET_BREAK;
@@ -1244,17 +821,10 @@ static void mos7840_break(struct tty_struct *tty, int break_state)
static int mos7840_write_room(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
+ struct moschip_port *mos7840_port = usb_get_serial_port_data(port);
int i;
int room = 0;
unsigned long flags;
- struct moschip_port *mos7840_port;
-
- if (mos7840_port_paranoia_check(port, __func__))
- return -1;
-
- mos7840_port = mos7840_get_port_private(port);
- if (mos7840_port == NULL)
- return -1;
spin_lock_irqsave(&mos7840_port->pool_lock, flags);
for (i = 0; i < NUM_URBS; ++i) {
@@ -1280,29 +850,17 @@ static int mos7840_write_room(struct tty_struct *tty)
static int mos7840_write(struct tty_struct *tty, struct usb_serial_port *port,
const unsigned char *data, int count)
{
+ struct moschip_port *mos7840_port = usb_get_serial_port_data(port);
+ struct usb_serial *serial = port->serial;
int status;
int i;
int bytes_sent = 0;
int transfer_size;
unsigned long flags;
-
- struct moschip_port *mos7840_port;
- struct usb_serial *serial;
struct urb *urb;
/* __u16 Data; */
const unsigned char *current_position = data;
- if (mos7840_port_paranoia_check(port, __func__))
- return -1;
-
- serial = port->serial;
- if (mos7840_serial_paranoia_check(serial, __func__))
- return -1;
-
- mos7840_port = mos7840_get_port_private(port);
- if (mos7840_port == NULL)
- return -1;
-
/* try to find a free urb in the list */
urb = NULL;
@@ -1383,22 +941,9 @@ exit:
static void mos7840_throttle(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
- struct moschip_port *mos7840_port;
+ struct moschip_port *mos7840_port = usb_get_serial_port_data(port);
int status;
- if (mos7840_port_paranoia_check(port, __func__))
- return;
-
- mos7840_port = mos7840_get_port_private(port);
-
- if (mos7840_port == NULL)
- return;
-
- if (!mos7840_port->open) {
- dev_dbg(&port->dev, "%s", "port not opened\n");
- return;
- }
-
/* if we are implementing XON/XOFF, send the stop character */
if (I_IXOFF(tty)) {
unsigned char stop_char = STOP_CHAR(tty);
@@ -1425,19 +970,8 @@ static void mos7840_throttle(struct tty_struct *tty)
static void mos7840_unthrottle(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
+ struct moschip_port *mos7840_port = usb_get_serial_port_data(port);
int status;
- struct moschip_port *mos7840_port = mos7840_get_port_private(port);
-
- if (mos7840_port_paranoia_check(port, __func__))
- return;
-
- if (mos7840_port == NULL)
- return;
-
- if (!mos7840_port->open) {
- dev_dbg(&port->dev, "%s - port not opened\n", __func__);
- return;
- }
/* if we are implementing XON/XOFF, send the start character */
if (I_IXOFF(tty)) {
@@ -1460,15 +994,10 @@ static void mos7840_unthrottle(struct tty_struct *tty)
static int mos7840_tiocmget(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
- struct moschip_port *mos7840_port;
unsigned int result;
__u16 msr;
__u16 mcr;
int status;
- mos7840_port = mos7840_get_port_private(port);
-
- if (mos7840_port == NULL)
- return -ENODEV;
status = mos7840_get_uart_reg(port, MODEM_STATUS_REGISTER, &msr);
if (status < 0)
@@ -1493,15 +1022,10 @@ static int mos7840_tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear)
{
struct usb_serial_port *port = tty->driver_data;
- struct moschip_port *mos7840_port;
+ struct moschip_port *mos7840_port = usb_get_serial_port_data(port);
unsigned int mcr;
int status;
- mos7840_port = mos7840_get_port_private(port);
-
- if (mos7840_port == NULL)
- return -ENODEV;
-
/* FIXME: What locks the port registers ? */
mcr = mos7840_port->shadowMCR;
if (clear & TIOCM_RTS)
@@ -1578,21 +1102,11 @@ static int mos7840_calc_baud_rate_divisor(struct usb_serial_port *port,
static int mos7840_send_cmd_write_baud_rate(struct moschip_port *mos7840_port,
int baudRate)
{
+ struct usb_serial_port *port = mos7840_port->port;
int divisor = 0;
int status;
__u16 Data;
__u16 clk_sel_val;
- struct usb_serial_port *port;
-
- if (mos7840_port == NULL)
- return -1;
-
- port = mos7840_port->port;
- if (mos7840_port_paranoia_check(port, __func__))
- return -1;
-
- if (mos7840_serial_paranoia_check(port->serial, __func__))
- return -1;
dev_dbg(&port->dev, "%s - baud = %d\n", __func__, baudRate);
/* reset clk_uart_sel in spregOffset */
@@ -1681,6 +1195,7 @@ static int mos7840_send_cmd_write_baud_rate(struct moschip_port *mos7840_port,
static void mos7840_change_port_settings(struct tty_struct *tty,
struct moschip_port *mos7840_port, struct ktermios *old_termios)
{
+ struct usb_serial_port *port = mos7840_port->port;
int baud;
unsigned cflag;
__u8 lData;
@@ -1688,23 +1203,6 @@ static void mos7840_change_port_settings(struct tty_struct *tty,
__u8 lStop;
int status;
__u16 Data;
- struct usb_serial_port *port;
-
- if (mos7840_port == NULL)
- return;
-
- port = mos7840_port->port;
-
- if (mos7840_port_paranoia_check(port, __func__))
- return;
-
- if (mos7840_serial_paranoia_check(port->serial, __func__))
- return;
-
- if (!mos7840_port->open) {
- dev_dbg(&port->dev, "%s - port not opened\n", __func__);
- return;
- }
lData = LCR_BITS_8;
lStop = LCR_STOP_1;
@@ -1839,37 +1337,13 @@ static void mos7840_set_termios(struct tty_struct *tty,
struct usb_serial_port *port,
struct ktermios *old_termios)
{
+ struct moschip_port *mos7840_port = usb_get_serial_port_data(port);
int status;
- struct usb_serial *serial;
- struct moschip_port *mos7840_port;
-
- if (mos7840_port_paranoia_check(port, __func__))
- return;
-
- serial = port->serial;
-
- if (mos7840_serial_paranoia_check(serial, __func__))
- return;
-
- mos7840_port = mos7840_get_port_private(port);
-
- if (mos7840_port == NULL)
- return;
-
- if (!mos7840_port->open) {
- dev_dbg(&port->dev, "%s - port not opened\n", __func__);
- return;
- }
/* change the port settings to the new ones specified */
mos7840_change_port_settings(tty, mos7840_port, old_termios);
- if (!mos7840_port->read_urb) {
- dev_dbg(&port->dev, "%s", "URB KILLED !!!!!\n");
- return;
- }
-
if (!mos7840_port->read_urb_busy) {
mos7840_port->read_urb_busy = true;
status = usb_submit_urb(mos7840_port->read_urb, GFP_KERNEL);
@@ -1916,7 +1390,7 @@ static int mos7840_get_serial_info(struct tty_struct *tty,
struct serial_struct *ss)
{
struct usb_serial_port *port = tty->driver_data;
- struct moschip_port *mos7840_port = mos7840_get_port_private(port);
+ struct moschip_port *mos7840_port = usb_get_serial_port_data(port);
ss->type = PORT_16550A;
ss->line = mos7840_port->port->minor;
@@ -1939,15 +1413,6 @@ static int mos7840_ioctl(struct tty_struct *tty,
{
struct usb_serial_port *port = tty->driver_data;
void __user *argp = (void __user *)arg;
- struct moschip_port *mos7840_port;
-
- if (mos7840_port_paranoia_check(port, __func__))
- return -1;
-
- mos7840_port = mos7840_get_port_private(port);
-
- if (mos7840_port == NULL)
- return -1;
switch (cmd) {
/* return number of bytes available */
@@ -1962,6 +1427,13 @@ static int mos7840_ioctl(struct tty_struct *tty,
return -ENOIOCTLCMD;
}
+/*
+ * Check if GPO (pin 42) is connected to GPI (pin 33) as recommended by ASIX
+ * for MCS7810 by bit-banging a 16-bit word.
+ *
+ * Note that GPO is really RTS of the third port so this will toggle RTS of
+ * port two or three on two- and four-port devices.
+ */
static int mos7810_check(struct usb_serial *serial)
{
int i, pass_count = 0;
@@ -2019,16 +1491,12 @@ static int mos7810_check(struct usb_serial *serial)
static int mos7840_probe(struct usb_serial *serial,
const struct usb_device_id *id)
{
- u16 product = le16_to_cpu(serial->dev->descriptor.idProduct);
+ unsigned long device_flags = id->driver_info;
u8 *buf;
- int device_type;
- if (product == MOSCHIP_DEVICE_ID_7810 ||
- product == MOSCHIP_DEVICE_ID_7820 ||
- product == MOSCHIP_DEVICE_ID_7843) {
- device_type = product;
+ /* Skip device-type detection if we already have device flags. */
+ if (device_flags)
goto out;
- }
buf = kzalloc(VENDOR_READ_LENGTH, GFP_KERNEL);
if (!buf)
@@ -2040,15 +1508,15 @@ static int mos7840_probe(struct usb_serial *serial,
/* For a MCS7840 device GPIO0 must be set to 1 */
if (buf[0] & 0x01)
- device_type = MOSCHIP_DEVICE_ID_7840;
+ device_flags = MCS_PORTS(4);
else if (mos7810_check(serial))
- device_type = MOSCHIP_DEVICE_ID_7810;
+ device_flags = MCS_PORTS(1) | MCS_LED;
else
- device_type = MOSCHIP_DEVICE_ID_7820;
+ device_flags = MCS_PORTS(2);
kfree(buf);
out:
- usb_set_serial_data(serial, (void *)(unsigned long)device_type);
+ usb_set_serial_data(serial, (void *)device_flags);
return 0;
}
@@ -2056,19 +1524,10 @@ out:
static int mos7840_calc_num_ports(struct usb_serial *serial,
struct usb_serial_endpoints *epds)
{
- int device_type = (unsigned long)usb_get_serial_data(serial);
- int num_ports;
+ unsigned long device_flags = (unsigned long)usb_get_serial_data(serial);
+ int num_ports = MCS_PORTS(device_flags);
- if (device_type == MOSCHIP_DEVICE_ID_7843)
- num_ports = 3;
- else
- num_ports = (device_type >> 4) & 0x000F;
-
- /*
- * num_ports is currently never zero as device_type is one of
- * MOSCHIP_DEVICE_ID_78{1,2,4}0.
- */
- if (num_ports == 0)
+ if (num_ports == 0 || num_ports > 4)
return -ENODEV;
if (epds->num_bulk_in < num_ports || epds->num_bulk_out < num_ports) {
@@ -2079,10 +1538,27 @@ static int mos7840_calc_num_ports(struct usb_serial *serial,
return num_ports;
}
+static int mos7840_attach(struct usb_serial *serial)
+{
+ struct device *dev = &serial->interface->dev;
+ int status;
+ u16 val;
+
+ /* Zero Length flag enable */
+ val = 0x0f;
+ status = mos7840_set_reg_sync(serial->port[0], ZLP_REG5, val);
+ if (status < 0)
+ dev_dbg(dev, "Writing ZLP_REG5 failed status-0x%x\n", status);
+ else
+ dev_dbg(dev, "ZLP_REG5 Writing success status%d\n", status);
+
+ return status;
+}
+
static int mos7840_port_probe(struct usb_serial_port *port)
{
struct usb_serial *serial = port->serial;
- int device_type = (unsigned long)usb_get_serial_data(serial);
+ unsigned long device_flags = (unsigned long)usb_get_serial_data(serial);
struct moschip_port *mos7840_port;
int status;
int pnum;
@@ -2103,7 +1579,6 @@ static int mos7840_port_probe(struct usb_serial_port *port)
* common to all port */
mos7840_port->port = port;
- mos7840_set_port_private(port, mos7840_port);
spin_lock_init(&mos7840_port->pool_lock);
/* minor is not initialised until later by
@@ -2129,14 +1604,14 @@ static int mos7840_port_probe(struct usb_serial_port *port)
mos7840_port->DcrRegOffset = 0x16 + 3 * (phy_num - 2);
}
mos7840_dump_serial_port(port, mos7840_port);
- mos7840_set_port_private(port, mos7840_port);
+ usb_set_serial_port_data(port, mos7840_port);
/* enable rx_disable bit in control register */
status = mos7840_get_reg_sync(port,
mos7840_port->ControlRegOffset, &Data);
if (status < 0) {
dev_dbg(&port->dev, "Reading ControlReg failed status-0x%x\n", status);
- goto out;
+ goto error;
} else
dev_dbg(&port->dev, "ControlReg Reading success val is %x, status%d\n", Data, status);
Data |= 0x08; /* setting driver done bit */
@@ -2148,7 +1623,7 @@ static int mos7840_port_probe(struct usb_serial_port *port)
mos7840_port->ControlRegOffset, Data);
if (status < 0) {
dev_dbg(&port->dev, "Writing ControlReg failed(rx_disable) status-0x%x\n", status);
- goto out;
+ goto error;
} else
dev_dbg(&port->dev, "ControlReg Writing success(rx_disable) status%d\n", status);
@@ -2159,7 +1634,7 @@ static int mos7840_port_probe(struct usb_serial_port *port)
(__u16) (mos7840_port->DcrRegOffset + 0), Data);
if (status < 0) {
dev_dbg(&port->dev, "Writing DCR0 failed status-0x%x\n", status);
- goto out;
+ goto error;
} else
dev_dbg(&port->dev, "DCR0 Writing success status%d\n", status);
@@ -2168,7 +1643,7 @@ static int mos7840_port_probe(struct usb_serial_port *port)
(__u16) (mos7840_port->DcrRegOffset + 1), Data);
if (status < 0) {
dev_dbg(&port->dev, "Writing DCR1 failed status-0x%x\n", status);
- goto out;
+ goto error;
} else
dev_dbg(&port->dev, "DCR1 Writing success status%d\n", status);
@@ -2177,7 +1652,7 @@ static int mos7840_port_probe(struct usb_serial_port *port)
(__u16) (mos7840_port->DcrRegOffset + 2), Data);
if (status < 0) {
dev_dbg(&port->dev, "Writing DCR2 failed status-0x%x\n", status);
- goto out;
+ goto error;
} else
dev_dbg(&port->dev, "DCR2 Writing success status%d\n", status);
@@ -2186,7 +1661,7 @@ static int mos7840_port_probe(struct usb_serial_port *port)
status = mos7840_set_reg_sync(port, CLK_START_VALUE_REGISTER, Data);
if (status < 0) {
dev_dbg(&port->dev, "Writing CLK_START_VALUE_REGISTER failed status-0x%x\n", status);
- goto out;
+ goto error;
} else
dev_dbg(&port->dev, "CLK_START_VALUE_REGISTER Writing success status%d\n", status);
@@ -2203,7 +1678,7 @@ static int mos7840_port_probe(struct usb_serial_port *port)
status = mos7840_set_uart_reg(port, SCRATCH_PAD_REGISTER, Data);
if (status < 0) {
dev_dbg(&port->dev, "Writing SCRATCH_PAD_REGISTER failed status-0x%x\n", status);
- goto out;
+ goto error;
} else
dev_dbg(&port->dev, "SCRATCH_PAD_REGISTER Writing success status%d\n", status);
@@ -2217,7 +1692,7 @@ static int mos7840_port_probe(struct usb_serial_port *port)
(__u16)(ZLP_REG1 + ((__u16) mos7840_port->port_num)));
if (status < 0) {
dev_dbg(&port->dev, "Writing ZLP_REG%d failed status-0x%x\n", pnum + 2, status);
- goto out;
+ goto error;
} else
dev_dbg(&port->dev, "ZLP_REG%d Writing success status%d\n", pnum + 2, status);
} else {
@@ -2229,27 +1704,16 @@ static int mos7840_port_probe(struct usb_serial_port *port)
(__u16)(ZLP_REG1 + ((__u16) mos7840_port->port_num) - 0x1));
if (status < 0) {
dev_dbg(&port->dev, "Writing ZLP_REG%d failed status-0x%x\n", pnum + 1, status);
- goto out;
+ goto error;
} else
dev_dbg(&port->dev, "ZLP_REG%d Writing success status%d\n", pnum + 1, status);
}
- mos7840_port->control_urb = usb_alloc_urb(0, GFP_KERNEL);
- mos7840_port->ctrl_buf = kmalloc(16, GFP_KERNEL);
- mos7840_port->dr = kmalloc(sizeof(struct usb_ctrlrequest),
- GFP_KERNEL);
- if (!mos7840_port->control_urb || !mos7840_port->ctrl_buf ||
- !mos7840_port->dr) {
- status = -ENOMEM;
- goto error;
- }
- mos7840_port->has_led = false;
+ mos7840_port->has_led = device_flags & MCS_LED;
/* Initialize LED timers */
- if (device_type == MOSCHIP_DEVICE_ID_7810) {
- mos7840_port->has_led = true;
-
+ if (mos7840_port->has_led) {
mos7840_port->led_urb = usb_alloc_urb(0, GFP_KERNEL);
mos7840_port->led_dr = kmalloc(sizeof(*mos7840_port->led_dr),
GFP_KERNEL);
@@ -2269,29 +1733,11 @@ static int mos7840_port_probe(struct usb_serial_port *port)
/* Turn off LED */
mos7840_set_led_sync(port, MODEM_CONTROL_REGISTER, 0x0300);
}
-out:
- if (pnum == serial->num_ports - 1) {
- /* Zero Length flag enable */
- Data = 0x0f;
- status = mos7840_set_reg_sync(serial->port[0], ZLP_REG5, Data);
- if (status < 0) {
- dev_dbg(&port->dev, "Writing ZLP_REG5 failed status-0x%x\n", status);
- goto error;
- } else
- dev_dbg(&port->dev, "ZLP_REG5 Writing success status%d\n", status);
- /* setting configuration feature to one */
- usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
- 0x03, 0x00, 0x01, 0x00, NULL, 0x00,
- MOS_WDR_TIMEOUT);
- }
return 0;
error:
kfree(mos7840_port->led_dr);
usb_free_urb(mos7840_port->led_urb);
- kfree(mos7840_port->dr);
- kfree(mos7840_port->ctrl_buf);
- usb_free_urb(mos7840_port->control_urb);
kfree(mos7840_port);
return status;
@@ -2299,9 +1745,7 @@ error:
static int mos7840_port_remove(struct usb_serial_port *port)
{
- struct moschip_port *mos7840_port;
-
- mos7840_port = mos7840_get_port_private(port);
+ struct moschip_port *mos7840_port = usb_get_serial_port_data(port);
if (mos7840_port->has_led) {
/* Turn off LED */
@@ -2314,10 +1758,7 @@ static int mos7840_port_remove(struct usb_serial_port *port)
usb_free_urb(mos7840_port->led_urb);
kfree(mos7840_port->led_dr);
}
- usb_kill_urb(mos7840_port->control_urb);
- usb_free_urb(mos7840_port->control_urb);
- kfree(mos7840_port->ctrl_buf);
- kfree(mos7840_port->dr);
+
kfree(mos7840_port);
return 0;
@@ -2340,18 +1781,17 @@ static struct usb_serial_driver moschip7840_4port_device = {
.unthrottle = mos7840_unthrottle,
.calc_num_ports = mos7840_calc_num_ports,
.probe = mos7840_probe,
+ .attach = mos7840_attach,
.ioctl = mos7840_ioctl,
.get_serial = mos7840_get_serial_info,
.set_termios = mos7840_set_termios,
.break_ctl = mos7840_break,
.tiocmget = mos7840_tiocmget,
.tiocmset = mos7840_tiocmset,
- .tiocmiwait = usb_serial_generic_tiocmiwait,
.get_icount = usb_serial_generic_get_icount,
.port_probe = mos7840_port_probe,
.port_remove = mos7840_port_remove,
.read_bulk_callback = mos7840_bulk_in_callback,
- .read_int_callback = mos7840_interrupt_callback,
};
static struct usb_serial_driver * const serial_drivers[] = {
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 06ab016be0b6..e9491d400a24 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -197,6 +197,7 @@ static void option_instat_callback(struct urb *urb);
#define DELL_PRODUCT_5804_MINICARD_ATT 0x819b /* Novatel E371 */
#define DELL_PRODUCT_5821E 0x81d7
+#define DELL_PRODUCT_5821E_ESIM 0x81e0
#define KYOCERA_VENDOR_ID 0x0c88
#define KYOCERA_PRODUCT_KPC650 0x17da
@@ -1044,6 +1045,8 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, DELL_PRODUCT_5804_MINICARD_ATT, 0xff, 0xff, 0xff) },
{ USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5821E),
.driver_info = RSVD(0) | RSVD(1) | RSVD(6) },
+ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5821E_ESIM),
+ .driver_info = RSVD(0) | RSVD(1) | RSVD(6) },
{ USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_E100A) }, /* ADU-E100, ADU-310 */
{ USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) },
{ USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_620UW) },
@@ -1990,6 +1993,10 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x13) },
{ USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x14) },
{ USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x1b) },
+ { USB_DEVICE(0x0489, 0xe0b4), /* Foxconn T77W968 */
+ .driver_info = RSVD(0) | RSVD(1) | RSVD(6) },
+ { USB_DEVICE(0x0489, 0xe0b5), /* Foxconn T77W968 ESIM */
+ .driver_info = RSVD(0) | RSVD(1) | RSVD(6) },
{ USB_DEVICE(0x1508, 0x1001), /* Fibocom NL668 */
.driver_info = RSVD(4) | RSVD(5) | RSVD(6) },
{ USB_DEVICE(0x2cb7, 0x0104), /* Fibocom NL678 series */
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index 9d27b76c5c6e..aab737e1e7b6 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -47,6 +47,12 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_MOTOROLA) },
{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_ZTEK) },
{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_TB) },
+ { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_GC) },
+ { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_GB) },
+ { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_GT) },
+ { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_GL) },
+ { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_GE) },
+ { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_GS) },
{ USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID) },
{ USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID_RSAQ5) },
{ USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID),
@@ -130,9 +136,11 @@ MODULE_DEVICE_TABLE(usb, id_table);
#define VENDOR_WRITE_REQUEST_TYPE 0x40
#define VENDOR_WRITE_REQUEST 0x01
+#define VENDOR_WRITE_NREQUEST 0x80
#define VENDOR_READ_REQUEST_TYPE 0xc0
#define VENDOR_READ_REQUEST 0x01
+#define VENDOR_READ_NREQUEST 0x81
#define UART_STATE_INDEX 8
#define UART_STATE_MSR_MASK 0x8b
@@ -148,11 +156,24 @@ MODULE_DEVICE_TABLE(usb, id_table);
#define PL2303_FLOWCTRL_MASK 0xf0
+#define PL2303_READ_TYPE_HX_STATUS 0x8080
+
+#define PL2303_HXN_RESET_REG 0x07
+#define PL2303_HXN_RESET_UPSTREAM_PIPE 0x02
+#define PL2303_HXN_RESET_DOWNSTREAM_PIPE 0x01
+
+#define PL2303_HXN_FLOWCTRL_REG 0x0a
+#define PL2303_HXN_FLOWCTRL_MASK 0x1c
+#define PL2303_HXN_FLOWCTRL_NONE 0x1c
+#define PL2303_HXN_FLOWCTRL_RTS_CTS 0x18
+#define PL2303_HXN_FLOWCTRL_XON_XOFF 0x0c
+
static void pl2303_set_break(struct usb_serial_port *port, bool enable);
enum pl2303_type {
TYPE_01, /* Type 0 and 1 (difference unknown) */
TYPE_HX, /* HX version of the pl2303 chip */
+ TYPE_HXN, /* HXN version of the pl2303 chip */
TYPE_COUNT
};
@@ -184,16 +205,26 @@ static const struct pl2303_type_data pl2303_type_data[TYPE_COUNT] = {
[TYPE_HX] = {
.max_baud_rate = 12000000,
},
+ [TYPE_HXN] = {
+ .max_baud_rate = 12000000,
+ },
};
static int pl2303_vendor_read(struct usb_serial *serial, u16 value,
unsigned char buf[1])
{
+ struct pl2303_serial_private *spriv = usb_get_serial_data(serial);
struct device *dev = &serial->interface->dev;
+ u8 request;
int res;
+ if (spriv->type == &pl2303_type_data[TYPE_HXN])
+ request = VENDOR_READ_NREQUEST;
+ else
+ request = VENDOR_READ_REQUEST;
+
res = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
- VENDOR_READ_REQUEST, VENDOR_READ_REQUEST_TYPE,
+ request, VENDOR_READ_REQUEST_TYPE,
value, 0, buf, 1, 100);
if (res != 1) {
dev_err(dev, "%s - failed to read [%04x]: %d\n", __func__,
@@ -211,13 +242,20 @@ static int pl2303_vendor_read(struct usb_serial *serial, u16 value,
static int pl2303_vendor_write(struct usb_serial *serial, u16 value, u16 index)
{
+ struct pl2303_serial_private *spriv = usb_get_serial_data(serial);
struct device *dev = &serial->interface->dev;
+ u8 request;
int res;
dev_dbg(dev, "%s - [%04x] = %02x\n", __func__, value, index);
+ if (spriv->type == &pl2303_type_data[TYPE_HXN])
+ request = VENDOR_WRITE_NREQUEST;
+ else
+ request = VENDOR_WRITE_REQUEST;
+
res = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
- VENDOR_WRITE_REQUEST, VENDOR_WRITE_REQUEST_TYPE,
+ request, VENDOR_WRITE_REQUEST_TYPE,
value, index, NULL, 0, 100);
if (res) {
dev_err(dev, "%s - failed to write [%04x]: %d\n", __func__,
@@ -230,6 +268,7 @@ static int pl2303_vendor_write(struct usb_serial *serial, u16 value, u16 index)
static int pl2303_update_reg(struct usb_serial *serial, u8 reg, u8 mask, u8 val)
{
+ struct pl2303_serial_private *spriv = usb_get_serial_data(serial);
int ret = 0;
u8 *buf;
@@ -237,7 +276,11 @@ static int pl2303_update_reg(struct usb_serial *serial, u8 reg, u8 mask, u8 val)
if (!buf)
return -ENOMEM;
- ret = pl2303_vendor_read(serial, reg | 0x80, buf);
+ if (spriv->type == &pl2303_type_data[TYPE_HXN])
+ ret = pl2303_vendor_read(serial, reg, buf);
+ else
+ ret = pl2303_vendor_read(serial, reg | 0x80, buf);
+
if (ret)
goto out_free;
@@ -320,6 +363,7 @@ static int pl2303_startup(struct usb_serial *serial)
struct pl2303_serial_private *spriv;
enum pl2303_type type = TYPE_01;
unsigned char *buf;
+ int res;
spriv = kzalloc(sizeof(*spriv), GFP_KERNEL);
if (!spriv)
@@ -341,26 +385,37 @@ static int pl2303_startup(struct usb_serial *serial)
type = TYPE_01; /* type 1 */
dev_dbg(&serial->interface->dev, "device type: %d\n", type);
+ if (type == TYPE_HX) {
+ res = usb_control_msg(serial->dev,
+ usb_rcvctrlpipe(serial->dev, 0),
+ VENDOR_READ_REQUEST, VENDOR_READ_REQUEST_TYPE,
+ PL2303_READ_TYPE_HX_STATUS, 0, buf, 1, 100);
+ if (res != 1)
+ type = TYPE_HXN;
+ }
+
spriv->type = &pl2303_type_data[type];
spriv->quirks = (unsigned long)usb_get_serial_data(serial);
spriv->quirks |= spriv->type->quirks;
usb_set_serial_data(serial, spriv);
- pl2303_vendor_read(serial, 0x8484, buf);
- pl2303_vendor_write(serial, 0x0404, 0);
- pl2303_vendor_read(serial, 0x8484, buf);
- pl2303_vendor_read(serial, 0x8383, buf);
- pl2303_vendor_read(serial, 0x8484, buf);
- pl2303_vendor_write(serial, 0x0404, 1);
- pl2303_vendor_read(serial, 0x8484, buf);
- pl2303_vendor_read(serial, 0x8383, buf);
- pl2303_vendor_write(serial, 0, 1);
- pl2303_vendor_write(serial, 1, 0);
- if (spriv->quirks & PL2303_QUIRK_LEGACY)
- pl2303_vendor_write(serial, 2, 0x24);
- else
- pl2303_vendor_write(serial, 2, 0x44);
+ if (type != TYPE_HXN) {
+ pl2303_vendor_read(serial, 0x8484, buf);
+ pl2303_vendor_write(serial, 0x0404, 0);
+ pl2303_vendor_read(serial, 0x8484, buf);
+ pl2303_vendor_read(serial, 0x8383, buf);
+ pl2303_vendor_read(serial, 0x8484, buf);
+ pl2303_vendor_write(serial, 0x0404, 1);
+ pl2303_vendor_read(serial, 0x8484, buf);
+ pl2303_vendor_read(serial, 0x8383, buf);
+ pl2303_vendor_write(serial, 0, 1);
+ pl2303_vendor_write(serial, 1, 0);
+ if (spriv->quirks & PL2303_QUIRK_LEGACY)
+ pl2303_vendor_write(serial, 2, 0x24);
+ else
+ pl2303_vendor_write(serial, 2, 0x44);
+ }
kfree(buf);
@@ -719,14 +774,31 @@ static void pl2303_set_termios(struct tty_struct *tty,
}
if (C_CRTSCTS(tty)) {
- if (spriv->quirks & PL2303_QUIRK_LEGACY)
+ if (spriv->quirks & PL2303_QUIRK_LEGACY) {
pl2303_update_reg(serial, 0, PL2303_FLOWCTRL_MASK, 0x40);
- else
+ } else if (spriv->type == &pl2303_type_data[TYPE_HXN]) {
+ pl2303_update_reg(serial, PL2303_HXN_FLOWCTRL_REG,
+ PL2303_HXN_FLOWCTRL_MASK,
+ PL2303_HXN_FLOWCTRL_RTS_CTS);
+ } else {
pl2303_update_reg(serial, 0, PL2303_FLOWCTRL_MASK, 0x60);
+ }
} else if (pl2303_enable_xonxoff(tty, spriv->type)) {
- pl2303_update_reg(serial, 0, PL2303_FLOWCTRL_MASK, 0xc0);
+ if (spriv->type == &pl2303_type_data[TYPE_HXN]) {
+ pl2303_update_reg(serial, PL2303_HXN_FLOWCTRL_REG,
+ PL2303_HXN_FLOWCTRL_MASK,
+ PL2303_HXN_FLOWCTRL_XON_XOFF);
+ } else {
+ pl2303_update_reg(serial, 0, PL2303_FLOWCTRL_MASK, 0xc0);
+ }
} else {
- pl2303_update_reg(serial, 0, PL2303_FLOWCTRL_MASK, 0);
+ if (spriv->type == &pl2303_type_data[TYPE_HXN]) {
+ pl2303_update_reg(serial, PL2303_HXN_FLOWCTRL_REG,
+ PL2303_HXN_FLOWCTRL_MASK,
+ PL2303_HXN_FLOWCTRL_NONE);
+ } else {
+ pl2303_update_reg(serial, 0, PL2303_FLOWCTRL_MASK, 0);
+ }
}
kfree(buf);
@@ -767,8 +839,14 @@ static int pl2303_open(struct tty_struct *tty, struct usb_serial_port *port)
usb_clear_halt(serial->dev, port->read_urb->pipe);
} else {
/* reset upstream data pipes */
- pl2303_vendor_write(serial, 8, 0);
- pl2303_vendor_write(serial, 9, 0);
+ if (spriv->type == &pl2303_type_data[TYPE_HXN]) {
+ pl2303_vendor_write(serial, PL2303_HXN_RESET_REG,
+ PL2303_HXN_RESET_UPSTREAM_PIPE |
+ PL2303_HXN_RESET_DOWNSTREAM_PIPE);
+ } else {
+ pl2303_vendor_write(serial, 8, 0);
+ pl2303_vendor_write(serial, 9, 0);
+ }
}
/* Setup termios */
diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
index b0175f17d1a2..a019ea7e6e0e 100644
--- a/drivers/usb/serial/pl2303.h
+++ b/drivers/usb/serial/pl2303.h
@@ -9,6 +9,12 @@
#define PL2303_VENDOR_ID 0x067b
#define PL2303_PRODUCT_ID 0x2303
#define PL2303_PRODUCT_ID_TB 0x2304
+#define PL2303_PRODUCT_ID_GC 0x23a3
+#define PL2303_PRODUCT_ID_GB 0x23b3
+#define PL2303_PRODUCT_ID_GT 0x23c3
+#define PL2303_PRODUCT_ID_GL 0x23d3
+#define PL2303_PRODUCT_ID_GE 0x23e3
+#define PL2303_PRODUCT_ID_GS 0x23f3
#define PL2303_PRODUCT_ID_RSAQ2 0x04bb
#define PL2303_PRODUCT_ID_DCU11 0x1234
#define PL2303_PRODUCT_ID_PHAROS 0xaaa0
diff --git a/drivers/usb/storage/ene_ub6250.c b/drivers/usb/storage/ene_ub6250.c
index 8b1b73065421..98c1aa594e6c 100644
--- a/drivers/usb/storage/ene_ub6250.c
+++ b/drivers/usb/storage/ene_ub6250.c
@@ -561,7 +561,7 @@ static int ene_send_scsi_cmd(struct us_data *us, u8 fDir, void *buf, int use_sg)
residue = min(residue, transfer_length);
if (us->srb != NULL)
scsi_set_resid(us->srb, max(scsi_get_resid(us->srb),
- (int)residue));
+ residue));
}
if (bcs->Status != US_BULK_STAT_OK)
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
index 54a3c8195c96..66a4dcbbb1fc 100644
--- a/drivers/usb/storage/scsiglue.c
+++ b/drivers/usb/storage/scsiglue.c
@@ -369,8 +369,8 @@ static int queuecommand_lck(struct scsi_cmnd *srb,
/* check for state-transition errors */
if (us->srb != NULL) {
- printk(KERN_ERR "usb-storage: Error in %s: us->srb = %p\n",
- __func__, us->srb);
+ dev_err(&us->pusb_intf->dev,
+ "Error in %s: us->srb = %p\n", __func__, us->srb);
return SCSI_MLQUEUE_HOST_BUSY;
}
diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c
index 96cb0409dd89..238a8088e17f 100644
--- a/drivers/usb/storage/transport.c
+++ b/drivers/usb/storage/transport.c
@@ -1284,8 +1284,7 @@ int usb_stor_Bulk_transport(struct scsi_cmnd *srb, struct us_data *us)
} else {
residue = min(residue, transfer_length);
- scsi_set_resid(srb, max(scsi_get_resid(srb),
- (int) residue));
+ scsi_set_resid(srb, max(scsi_get_resid(srb), residue));
}
}
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
index 34538253f12c..95bba3ba6ac6 100644
--- a/drivers/usb/storage/uas.c
+++ b/drivers/usb/storage/uas.c
@@ -825,6 +825,10 @@ static int uas_slave_configure(struct scsi_device *sdev)
sdev->wce_default_on = 1;
}
+ /* Some disks cannot handle READ_CAPACITY_16 */
+ if (devinfo->flags & US_FL_NO_READ_CAPACITY_16)
+ sdev->no_read_capacity_16 = 1;
+
/*
* Some disks return the total number of blocks in response
* to READ CAPACITY rather than the highest block number.
@@ -834,6 +838,12 @@ static int uas_slave_configure(struct scsi_device *sdev)
sdev->fix_capacity = 1;
/*
+ * in some cases we have to guess
+ */
+ if (devinfo->flags & US_FL_CAPACITY_HEURISTICS)
+ sdev->guess_capacity = 1;
+
+ /*
* Some devices don't like MODE SENSE with page=0x3f,
* which is the command used for checking if a device
* is write-protected. Now that we tell the sd driver
@@ -859,7 +869,6 @@ static struct scsi_host_template uas_host_template = {
.eh_abort_handler = uas_eh_abort_handler,
.eh_device_reset_handler = uas_eh_device_reset_handler,
.this_id = -1,
- .sg_tablesize = SG_NONE,
.skip_settle_delay = 1,
.dma_boundary = PAGE_SIZE - 1,
};
diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
index d0bdebd87ce3..1b23741036ee 100644
--- a/drivers/usb/storage/unusual_uas.h
+++ b/drivers/usb/storage/unusual_uas.h
@@ -87,12 +87,15 @@ UNUSUAL_DEV(0x2537, 0x1068, 0x0000, 0x9999,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_IGNORE_UAS),
-/* Reported-by: Takeo Nakayama <javhera@gmx.com> */
+/*
+ * Initially Reported-by: Takeo Nakayama <javhera@gmx.com>
+ * UAS Ignore Reported by Steven Ellis <sellis@redhat.com>
+ */
UNUSUAL_DEV(0x357d, 0x7788, 0x0000, 0x9999,
"JMicron",
"JMS566",
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
- US_FL_NO_REPORT_OPCODES),
+ US_FL_NO_REPORT_OPCODES | US_FL_IGNORE_UAS),
/* Reported-by: Hans de Goede <hdegoede@redhat.com> */
UNUSUAL_DEV(0x4971, 0x1012, 0x0000, 0x9999,
diff --git a/drivers/usb/typec/Kconfig b/drivers/usb/typec/Kconfig
index 895e2418de53..b4f2aac7ae8a 100644
--- a/drivers/usb/typec/Kconfig
+++ b/drivers/usb/typec/Kconfig
@@ -50,6 +50,17 @@ source "drivers/usb/typec/tcpm/Kconfig"
source "drivers/usb/typec/ucsi/Kconfig"
+config TYPEC_HD3SS3220
+ tristate "TI HD3SS3220 Type-C DRP Port controller driver"
+ depends on I2C
+ depends on USB_ROLE_SWITCH
+ help
+ Say Y or M here if your system has TI HD3SS3220 Type-C DRP Port
+ controller driver.
+
+ If you choose to build this driver as a dynamically linked module, the
+ module will be called hd3ss3220.ko.
+
config TYPEC_TPS6598X
tristate "TI TPS6598x USB Power Delivery controller driver"
depends on I2C
diff --git a/drivers/usb/typec/Makefile b/drivers/usb/typec/Makefile
index 6696b7263d61..7753a5c3cd46 100644
--- a/drivers/usb/typec/Makefile
+++ b/drivers/usb/typec/Makefile
@@ -4,5 +4,6 @@ typec-y := class.o mux.o bus.o
obj-$(CONFIG_TYPEC) += altmodes/
obj-$(CONFIG_TYPEC_TCPM) += tcpm/
obj-$(CONFIG_TYPEC_UCSI) += ucsi/
+obj-$(CONFIG_TYPEC_HD3SS3220) += hd3ss3220.o
obj-$(CONFIG_TYPEC_TPS6598X) += tps6598x.o
obj-$(CONFIG_TYPEC) += mux/
diff --git a/drivers/usb/typec/class.c b/drivers/usb/typec/class.c
index 94a3eda62add..7ece6ca6e690 100644
--- a/drivers/usb/typec/class.c
+++ b/drivers/usb/typec/class.c
@@ -53,6 +53,7 @@ struct typec_port {
struct typec_mux *mux;
const struct typec_capability *cap;
+ const struct typec_operations *ops;
};
#define to_typec_port(_dev_) container_of(_dev_, struct typec_port, dev)
@@ -955,7 +956,7 @@ preferred_role_store(struct device *dev, struct device_attribute *attr,
return -EOPNOTSUPP;
}
- if (!port->cap->try_role) {
+ if (!port->ops || !port->ops->try_role) {
dev_dbg(dev, "Setting preferred role not supported\n");
return -EOPNOTSUPP;
}
@@ -968,7 +969,7 @@ preferred_role_store(struct device *dev, struct device_attribute *attr,
return -EINVAL;
}
- ret = port->cap->try_role(port->cap, role);
+ ret = port->ops->try_role(port, role);
if (ret)
return ret;
@@ -999,7 +1000,7 @@ static ssize_t data_role_store(struct device *dev,
struct typec_port *port = to_typec_port(dev);
int ret;
- if (!port->cap->dr_set) {
+ if (!port->ops || !port->ops->dr_set) {
dev_dbg(dev, "data role swapping not supported\n");
return -EOPNOTSUPP;
}
@@ -1014,7 +1015,7 @@ static ssize_t data_role_store(struct device *dev,
goto unlock_and_ret;
}
- ret = port->cap->dr_set(port->cap, ret);
+ ret = port->ops->dr_set(port, ret);
if (ret)
goto unlock_and_ret;
@@ -1049,7 +1050,7 @@ static ssize_t power_role_store(struct device *dev,
return -EOPNOTSUPP;
}
- if (!port->cap->pr_set) {
+ if (!port->ops || !port->ops->pr_set) {
dev_dbg(dev, "power role swapping not supported\n");
return -EOPNOTSUPP;
}
@@ -1071,7 +1072,7 @@ static ssize_t power_role_store(struct device *dev,
goto unlock_and_ret;
}
- ret = port->cap->pr_set(port->cap, ret);
+ ret = port->ops->pr_set(port, ret);
if (ret)
goto unlock_and_ret;
@@ -1102,7 +1103,8 @@ port_type_store(struct device *dev, struct device_attribute *attr,
int ret;
enum typec_port_type type;
- if (!port->cap->port_type_set || port->cap->type != TYPEC_PORT_DRP) {
+ if (port->cap->type != TYPEC_PORT_DRP ||
+ !port->ops || !port->ops->port_type_set) {
dev_dbg(dev, "changing port type not supported\n");
return -EOPNOTSUPP;
}
@@ -1119,7 +1121,7 @@ port_type_store(struct device *dev, struct device_attribute *attr,
goto unlock_and_ret;
}
- ret = port->cap->port_type_set(port->cap, type);
+ ret = port->ops->port_type_set(port, type);
if (ret)
goto unlock_and_ret;
@@ -1175,7 +1177,7 @@ static ssize_t vconn_source_store(struct device *dev,
return -EOPNOTSUPP;
}
- if (!port->cap->vconn_set) {
+ if (!port->ops || !port->ops->vconn_set) {
dev_dbg(dev, "VCONN swapping not supported\n");
return -EOPNOTSUPP;
}
@@ -1184,7 +1186,7 @@ static ssize_t vconn_source_store(struct device *dev,
if (ret)
return ret;
- ret = port->cap->vconn_set(port->cap, (enum typec_role)source);
+ ret = port->ops->vconn_set(port, (enum typec_role)source);
if (ret)
return ret;
@@ -1278,6 +1280,7 @@ static void typec_release(struct device *dev)
ida_destroy(&port->mode_ids);
typec_switch_put(port->sw);
typec_mux_put(port->mux);
+ kfree(port->cap);
kfree(port);
}
@@ -1487,6 +1490,16 @@ EXPORT_SYMBOL_GPL(typec_set_mode);
/* --------------------------------------- */
/**
+ * typec_get_drvdata - Return private driver data pointer
+ * @port: USB Type-C port
+ */
+void *typec_get_drvdata(struct typec_port *port)
+{
+ return dev_get_drvdata(&port->dev);
+}
+EXPORT_SYMBOL_GPL(typec_get_drvdata);
+
+/**
* typec_port_register_altmode - Register USB Type-C Port Alternate Mode
* @port: USB Type-C Port that supports the alternate mode
* @desc: Description of the alternate mode
@@ -1579,7 +1592,7 @@ struct typec_port *typec_register_port(struct device *parent,
mutex_init(&port->port_type_lock);
port->id = id;
- port->cap = cap;
+ port->ops = cap->ops;
port->port_type = cap->type;
port->prefer_role = cap->prefer_role;
@@ -1589,6 +1602,13 @@ struct typec_port *typec_register_port(struct device *parent,
port->dev.fwnode = cap->fwnode;
port->dev.type = &typec_port_dev_type;
dev_set_name(&port->dev, "port%d", id);
+ dev_set_drvdata(&port->dev, cap->driver_data);
+
+ port->cap = kmemdup(cap, sizeof(*cap), GFP_KERNEL);
+ if (!port->cap) {
+ put_device(&port->dev);
+ return ERR_PTR(-ENOMEM);
+ }
port->sw = typec_switch_get(&port->dev);
if (IS_ERR(port->sw)) {
diff --git a/drivers/usb/typec/hd3ss3220.c b/drivers/usb/typec/hd3ss3220.c
new file mode 100644
index 000000000000..323dfa8160ab
--- /dev/null
+++ b/drivers/usb/typec/hd3ss3220.c
@@ -0,0 +1,269 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * TI HD3SS3220 Type-C DRP Port Controller Driver
+ *
+ * Copyright (C) 2019 Renesas Electronics Corp.
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/usb/role.h>
+#include <linux/irqreturn.h>
+#include <linux/interrupt.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/usb/typec.h>
+#include <linux/delay.h>
+
+#define HD3SS3220_REG_CN_STAT_CTRL 0x09
+#define HD3SS3220_REG_GEN_CTRL 0x0A
+#define HD3SS3220_REG_DEV_REV 0xA0
+
+/* Register HD3SS3220_REG_CN_STAT_CTRL*/
+#define HD3SS3220_REG_CN_STAT_CTRL_ATTACHED_STATE_MASK (BIT(7) | BIT(6))
+#define HD3SS3220_REG_CN_STAT_CTRL_AS_DFP BIT(6)
+#define HD3SS3220_REG_CN_STAT_CTRL_AS_UFP BIT(7)
+#define HD3SS3220_REG_CN_STAT_CTRL_TO_ACCESSORY (BIT(7) | BIT(6))
+#define HD3SS3220_REG_CN_STAT_CTRL_INT_STATUS BIT(4)
+
+/* Register HD3SS3220_REG_GEN_CTRL*/
+#define HD3SS3220_REG_GEN_CTRL_SRC_PREF_MASK (BIT(2) | BIT(1))
+#define HD3SS3220_REG_GEN_CTRL_SRC_PREF_DRP_DEFAULT 0x00
+#define HD3SS3220_REG_GEN_CTRL_SRC_PREF_DRP_TRY_SNK BIT(1)
+#define HD3SS3220_REG_GEN_CTRL_SRC_PREF_DRP_TRY_SRC (BIT(2) | BIT(1))
+
+struct hd3ss3220 {
+ struct device *dev;
+ struct regmap *regmap;
+ struct usb_role_switch *role_sw;
+ struct typec_port *port;
+};
+
+static int hd3ss3220_set_source_pref(struct hd3ss3220 *hd3ss3220, int src_pref)
+{
+ return regmap_update_bits(hd3ss3220->regmap, HD3SS3220_REG_GEN_CTRL,
+ HD3SS3220_REG_GEN_CTRL_SRC_PREF_MASK,
+ src_pref);
+}
+
+static enum usb_role hd3ss3220_get_attached_state(struct hd3ss3220 *hd3ss3220)
+{
+ unsigned int reg_val;
+ enum usb_role attached_state;
+ int ret;
+
+ ret = regmap_read(hd3ss3220->regmap, HD3SS3220_REG_CN_STAT_CTRL,
+ &reg_val);
+ if (ret < 0)
+ return ret;
+
+ switch (reg_val & HD3SS3220_REG_CN_STAT_CTRL_ATTACHED_STATE_MASK) {
+ case HD3SS3220_REG_CN_STAT_CTRL_AS_DFP:
+ attached_state = USB_ROLE_HOST;
+ break;
+ case HD3SS3220_REG_CN_STAT_CTRL_AS_UFP:
+ attached_state = USB_ROLE_DEVICE;
+ break;
+ default:
+ attached_state = USB_ROLE_NONE;
+ break;
+ }
+
+ return attached_state;
+}
+
+static int hd3ss3220_dr_set(struct typec_port *port, enum typec_data_role role)
+{
+ struct hd3ss3220 *hd3ss3220 = typec_get_drvdata(port);
+ enum usb_role role_val;
+ int pref, ret = 0;
+
+ if (role == TYPEC_HOST) {
+ role_val = USB_ROLE_HOST;
+ pref = HD3SS3220_REG_GEN_CTRL_SRC_PREF_DRP_TRY_SRC;
+ } else {
+ role_val = USB_ROLE_DEVICE;
+ pref = HD3SS3220_REG_GEN_CTRL_SRC_PREF_DRP_TRY_SNK;
+ }
+
+ ret = hd3ss3220_set_source_pref(hd3ss3220, pref);
+ usleep_range(10, 100);
+
+ usb_role_switch_set_role(hd3ss3220->role_sw, role_val);
+ typec_set_data_role(hd3ss3220->port, role);
+
+ return ret;
+}
+
+static const struct typec_operations hd3ss3220_ops = {
+ .dr_set = hd3ss3220_dr_set
+};
+
+static void hd3ss3220_set_role(struct hd3ss3220 *hd3ss3220)
+{
+ enum usb_role role_state = hd3ss3220_get_attached_state(hd3ss3220);
+
+ usb_role_switch_set_role(hd3ss3220->role_sw, role_state);
+ if (role_state == USB_ROLE_NONE)
+ hd3ss3220_set_source_pref(hd3ss3220,
+ HD3SS3220_REG_GEN_CTRL_SRC_PREF_DRP_DEFAULT);
+
+ switch (role_state) {
+ case USB_ROLE_HOST:
+ typec_set_data_role(hd3ss3220->port, TYPEC_HOST);
+ break;
+ case USB_ROLE_DEVICE:
+ typec_set_data_role(hd3ss3220->port, TYPEC_DEVICE);
+ break;
+ default:
+ break;
+ }
+}
+
+static irqreturn_t hd3ss3220_irq(struct hd3ss3220 *hd3ss3220)
+{
+ int err;
+
+ hd3ss3220_set_role(hd3ss3220);
+ err = regmap_update_bits_base(hd3ss3220->regmap,
+ HD3SS3220_REG_CN_STAT_CTRL,
+ HD3SS3220_REG_CN_STAT_CTRL_INT_STATUS,
+ HD3SS3220_REG_CN_STAT_CTRL_INT_STATUS,
+ NULL, false, true);
+ if (err < 0)
+ return IRQ_NONE;
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t hd3ss3220_irq_handler(int irq, void *data)
+{
+ struct i2c_client *client = to_i2c_client(data);
+ struct hd3ss3220 *hd3ss3220 = i2c_get_clientdata(client);
+
+ return hd3ss3220_irq(hd3ss3220);
+}
+
+static const struct regmap_config config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0x0A,
+};
+
+static int hd3ss3220_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct typec_capability typec_cap = { };
+ struct hd3ss3220 *hd3ss3220;
+ struct fwnode_handle *connector;
+ int ret;
+ unsigned int data;
+
+ hd3ss3220 = devm_kzalloc(&client->dev, sizeof(struct hd3ss3220),
+ GFP_KERNEL);
+ if (!hd3ss3220)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, hd3ss3220);
+
+ hd3ss3220->dev = &client->dev;
+ hd3ss3220->regmap = devm_regmap_init_i2c(client, &config);
+ if (IS_ERR(hd3ss3220->regmap))
+ return PTR_ERR(hd3ss3220->regmap);
+
+ hd3ss3220_set_source_pref(hd3ss3220,
+ HD3SS3220_REG_GEN_CTRL_SRC_PREF_DRP_DEFAULT);
+ connector = device_get_named_child_node(hd3ss3220->dev, "connector");
+ if (!connector)
+ return -ENODEV;
+
+ hd3ss3220->role_sw = fwnode_usb_role_switch_get(connector);
+ if (IS_ERR(hd3ss3220->role_sw)) {
+ ret = PTR_ERR(hd3ss3220->role_sw);
+ goto err_put_fwnode;
+ }
+
+ typec_cap.prefer_role = TYPEC_NO_PREFERRED_ROLE;
+ typec_cap.driver_data = hd3ss3220;
+ typec_cap.type = TYPEC_PORT_DRP;
+ typec_cap.data = TYPEC_PORT_DRD;
+ typec_cap.ops = &hd3ss3220_ops;
+ typec_cap.fwnode = connector;
+
+ hd3ss3220->port = typec_register_port(&client->dev, &typec_cap);
+ if (IS_ERR(hd3ss3220->port)) {
+ ret = PTR_ERR(hd3ss3220->port);
+ goto err_put_role;
+ }
+
+ hd3ss3220_set_role(hd3ss3220);
+ ret = regmap_read(hd3ss3220->regmap, HD3SS3220_REG_CN_STAT_CTRL, &data);
+ if (ret < 0)
+ goto err_unreg_port;
+
+ if (data & HD3SS3220_REG_CN_STAT_CTRL_INT_STATUS) {
+ ret = regmap_write(hd3ss3220->regmap,
+ HD3SS3220_REG_CN_STAT_CTRL,
+ data | HD3SS3220_REG_CN_STAT_CTRL_INT_STATUS);
+ if (ret < 0)
+ goto err_unreg_port;
+ }
+
+ if (client->irq > 0) {
+ ret = devm_request_threaded_irq(&client->dev, client->irq, NULL,
+ hd3ss3220_irq_handler,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ "hd3ss3220", &client->dev);
+ if (ret)
+ goto err_unreg_port;
+ }
+
+ ret = i2c_smbus_read_byte_data(client, HD3SS3220_REG_DEV_REV);
+ if (ret < 0)
+ goto err_unreg_port;
+
+ fwnode_handle_put(connector);
+
+ dev_info(&client->dev, "probed revision=0x%x\n", ret);
+
+ return 0;
+err_unreg_port:
+ typec_unregister_port(hd3ss3220->port);
+err_put_role:
+ usb_role_switch_put(hd3ss3220->role_sw);
+err_put_fwnode:
+ fwnode_handle_put(connector);
+
+ return ret;
+}
+
+static int hd3ss3220_remove(struct i2c_client *client)
+{
+ struct hd3ss3220 *hd3ss3220 = i2c_get_clientdata(client);
+
+ typec_unregister_port(hd3ss3220->port);
+ usb_role_switch_put(hd3ss3220->role_sw);
+
+ return 0;
+}
+
+static const struct of_device_id dev_ids[] = {
+ { .compatible = "ti,hd3ss3220"},
+ {}
+};
+MODULE_DEVICE_TABLE(of, dev_ids);
+
+static struct i2c_driver hd3ss3220_driver = {
+ .driver = {
+ .name = "hd3ss3220",
+ .of_match_table = of_match_ptr(dev_ids),
+ },
+ .probe = hd3ss3220_probe,
+ .remove = hd3ss3220_remove,
+};
+
+module_i2c_driver(hd3ss3220_driver);
+
+MODULE_AUTHOR("Biju Das <biju.das@bp.renesas.com>");
+MODULE_DESCRIPTION("TI HD3SS3220 DRP Port Controller Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
index 5f61d9977a15..56fc356bc55c 100644
--- a/drivers/usb/typec/tcpm/tcpm.c
+++ b/drivers/usb/typec/tcpm/tcpm.c
@@ -380,9 +380,6 @@ static enum tcpm_state tcpm_default_state(struct tcpm_port *port)
return SNK_UNATTACHED;
else if (port->try_role == TYPEC_SOURCE)
return SRC_UNATTACHED;
- else if (port->tcpc->config &&
- port->tcpc->config->default_role == TYPEC_SINK)
- return SNK_UNATTACHED;
/* Fall through to return SRC_UNATTACHED */
} else if (port->port_type == TYPEC_PORT_SNK) {
return SNK_UNATTACHED;
@@ -390,12 +387,6 @@ static enum tcpm_state tcpm_default_state(struct tcpm_port *port)
return SRC_UNATTACHED;
}
-static inline
-struct tcpm_port *typec_cap_to_tcpm(const struct typec_capability *cap)
-{
- return container_of(cap, struct tcpm_port, typec_caps);
-}
-
static bool tcpm_port_is_disconnected(struct tcpm_port *port)
{
return (!port->attached && port->cc1 == TYPEC_CC_OPEN &&
@@ -3970,10 +3961,9 @@ void tcpm_pd_hard_reset(struct tcpm_port *port)
}
EXPORT_SYMBOL_GPL(tcpm_pd_hard_reset);
-static int tcpm_dr_set(const struct typec_capability *cap,
- enum typec_data_role data)
+static int tcpm_dr_set(struct typec_port *p, enum typec_data_role data)
{
- struct tcpm_port *port = typec_cap_to_tcpm(cap);
+ struct tcpm_port *port = typec_get_drvdata(p);
int ret;
mutex_lock(&port->swap_lock);
@@ -4038,10 +4028,9 @@ swap_unlock:
return ret;
}
-static int tcpm_pr_set(const struct typec_capability *cap,
- enum typec_role role)
+static int tcpm_pr_set(struct typec_port *p, enum typec_role role)
{
- struct tcpm_port *port = typec_cap_to_tcpm(cap);
+ struct tcpm_port *port = typec_get_drvdata(p);
int ret;
mutex_lock(&port->swap_lock);
@@ -4082,10 +4071,9 @@ swap_unlock:
return ret;
}
-static int tcpm_vconn_set(const struct typec_capability *cap,
- enum typec_role role)
+static int tcpm_vconn_set(struct typec_port *p, enum typec_role role)
{
- struct tcpm_port *port = typec_cap_to_tcpm(cap);
+ struct tcpm_port *port = typec_get_drvdata(p);
int ret;
mutex_lock(&port->swap_lock);
@@ -4122,16 +4110,16 @@ swap_unlock:
return ret;
}
-static int tcpm_try_role(const struct typec_capability *cap, int role)
+static int tcpm_try_role(struct typec_port *p, int role)
{
- struct tcpm_port *port = typec_cap_to_tcpm(cap);
+ struct tcpm_port *port = typec_get_drvdata(p);
struct tcpc_dev *tcpc = port->tcpc;
int ret = 0;
mutex_lock(&port->lock);
if (tcpc->try_role)
ret = tcpc->try_role(tcpc, role);
- if (!ret && (!tcpc->config || !tcpc->config->try_role_hw))
+ if (!ret)
port->try_role = role;
port->try_src_count = 0;
port->try_snk_count = 0;
@@ -4331,10 +4319,9 @@ static void tcpm_init(struct tcpm_port *port)
tcpm_set_state(port, PORT_RESET, 0);
}
-static int tcpm_port_type_set(const struct typec_capability *cap,
- enum typec_port_type type)
+static int tcpm_port_type_set(struct typec_port *p, enum typec_port_type type)
{
- struct tcpm_port *port = typec_cap_to_tcpm(cap);
+ struct tcpm_port *port = typec_get_drvdata(p);
mutex_lock(&port->lock);
if (type == port->port_type)
@@ -4359,6 +4346,14 @@ port_unlock:
return 0;
}
+static const struct typec_operations tcpm_ops = {
+ .try_role = tcpm_try_role,
+ .dr_set = tcpm_dr_set,
+ .pr_set = tcpm_pr_set,
+ .vconn_set = tcpm_vconn_set,
+ .port_type_set = tcpm_port_type_set
+};
+
void tcpm_tcpc_reset(struct tcpm_port *port)
{
mutex_lock(&port->lock);
@@ -4368,34 +4363,6 @@ void tcpm_tcpc_reset(struct tcpm_port *port)
}
EXPORT_SYMBOL_GPL(tcpm_tcpc_reset);
-static int tcpm_copy_pdos(u32 *dest_pdo, const u32 *src_pdo,
- unsigned int nr_pdo)
-{
- unsigned int i;
-
- if (nr_pdo > PDO_MAX_OBJECTS)
- nr_pdo = PDO_MAX_OBJECTS;
-
- for (i = 0; i < nr_pdo; i++)
- dest_pdo[i] = src_pdo[i];
-
- return nr_pdo;
-}
-
-static int tcpm_copy_vdos(u32 *dest_vdo, const u32 *src_vdo,
- unsigned int nr_vdo)
-{
- unsigned int i;
-
- if (nr_vdo > VDO_MAX_OBJECTS)
- nr_vdo = VDO_MAX_OBJECTS;
-
- for (i = 0; i < nr_vdo; i++)
- dest_vdo[i] = src_vdo[i];
-
- return nr_vdo;
-}
-
static int tcpm_fw_get_caps(struct tcpm_port *port,
struct fwnode_handle *fwnode)
{
@@ -4698,35 +4665,10 @@ static int devm_tcpm_psy_register(struct tcpm_port *port)
return PTR_ERR_OR_ZERO(port->psy);
}
-static int tcpm_copy_caps(struct tcpm_port *port,
- const struct tcpc_config *tcfg)
-{
- if (tcpm_validate_caps(port, tcfg->src_pdo, tcfg->nr_src_pdo) ||
- tcpm_validate_caps(port, tcfg->snk_pdo, tcfg->nr_snk_pdo))
- return -EINVAL;
-
- port->nr_src_pdo = tcpm_copy_pdos(port->src_pdo, tcfg->src_pdo,
- tcfg->nr_src_pdo);
- port->nr_snk_pdo = tcpm_copy_pdos(port->snk_pdo, tcfg->snk_pdo,
- tcfg->nr_snk_pdo);
-
- port->nr_snk_vdo = tcpm_copy_vdos(port->snk_vdo, tcfg->snk_vdo,
- tcfg->nr_snk_vdo);
-
- port->operating_snk_mw = tcfg->operating_snk_mw;
-
- port->typec_caps.prefer_role = tcfg->default_role;
- port->typec_caps.type = tcfg->type;
- port->typec_caps.data = tcfg->data;
- port->self_powered = tcfg->self_powered;
-
- return 0;
-}
-
struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc)
{
struct tcpm_port *port;
- int i, err;
+ int err;
if (!dev || !tcpc ||
!tcpc->get_vbus || !tcpc->set_cc || !tcpc->get_cc ||
@@ -4759,24 +4701,16 @@ struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc)
tcpm_debugfs_init(port);
err = tcpm_fw_get_caps(port, tcpc->fwnode);
- if ((err < 0) && tcpc->config)
- err = tcpm_copy_caps(port, tcpc->config);
if (err < 0)
goto out_destroy_wq;
- if (!tcpc->config || !tcpc->config->try_role_hw)
- port->try_role = port->typec_caps.prefer_role;
- else
- port->try_role = TYPEC_NO_PREFERRED_ROLE;
+ port->try_role = port->typec_caps.prefer_role;
port->typec_caps.fwnode = tcpc->fwnode;
port->typec_caps.revision = 0x0120; /* Type-C spec release 1.2 */
port->typec_caps.pd_revision = 0x0300; /* USB-PD spec release 3.0 */
- port->typec_caps.dr_set = tcpm_dr_set;
- port->typec_caps.pr_set = tcpm_pr_set;
- port->typec_caps.vconn_set = tcpm_vconn_set;
- port->typec_caps.try_role = tcpm_try_role;
- port->typec_caps.port_type_set = tcpm_port_type_set;
+ port->typec_caps.driver_data = port;
+ port->typec_caps.ops = &tcpm_ops;
port->partner_desc.identity = &port->partner_ident;
port->port_type = port->typec_caps.type;
@@ -4797,29 +4731,6 @@ struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc)
goto out_role_sw_put;
}
- if (tcpc->config && tcpc->config->alt_modes) {
- const struct typec_altmode_desc *paltmode = tcpc->config->alt_modes;
-
- i = 0;
- while (paltmode->svid && i < ARRAY_SIZE(port->port_altmode)) {
- struct typec_altmode *alt;
-
- alt = typec_port_register_altmode(port->typec_port,
- paltmode);
- if (IS_ERR(alt)) {
- tcpm_log(port,
- "%s: failed to register port alternate mode 0x%x",
- dev_name(dev), paltmode->svid);
- break;
- }
- typec_altmode_set_drvdata(alt, port);
- alt->ops = &tcpm_altmode_ops;
- port->port_altmode[i] = alt;
- i++;
- paltmode++;
- }
- }
-
mutex_lock(&port->lock);
tcpm_init(port);
mutex_unlock(&port->lock);
diff --git a/drivers/usb/typec/tps6598x.c b/drivers/usb/typec/tps6598x.c
index a38d1409f15b..0698addd1185 100644
--- a/drivers/usb/typec/tps6598x.c
+++ b/drivers/usb/typec/tps6598x.c
@@ -94,7 +94,6 @@ struct tps6598x {
struct typec_port *port;
struct typec_partner *partner;
struct usb_pd_identity partner_identity;
- struct typec_capability typec_cap;
};
/*
@@ -307,11 +306,10 @@ static int tps6598x_exec_cmd(struct tps6598x *tps, const char *cmd,
return 0;
}
-static int
-tps6598x_dr_set(const struct typec_capability *cap, enum typec_data_role role)
+static int tps6598x_dr_set(struct typec_port *port, enum typec_data_role role)
{
- struct tps6598x *tps = container_of(cap, struct tps6598x, typec_cap);
const char *cmd = (role == TYPEC_DEVICE) ? "SWUF" : "SWDF";
+ struct tps6598x *tps = typec_get_drvdata(port);
u32 status;
int ret;
@@ -338,11 +336,10 @@ out_unlock:
return ret;
}
-static int
-tps6598x_pr_set(const struct typec_capability *cap, enum typec_role role)
+static int tps6598x_pr_set(struct typec_port *port, enum typec_role role)
{
- struct tps6598x *tps = container_of(cap, struct tps6598x, typec_cap);
const char *cmd = (role == TYPEC_SINK) ? "SWSk" : "SWSr";
+ struct tps6598x *tps = typec_get_drvdata(port);
u32 status;
int ret;
@@ -369,6 +366,11 @@ out_unlock:
return ret;
}
+static const struct typec_operations tps6598x_ops = {
+ .dr_set = tps6598x_dr_set,
+ .pr_set = tps6598x_pr_set,
+};
+
static irqreturn_t tps6598x_interrupt(int irq, void *data)
{
struct tps6598x *tps = data;
@@ -448,6 +450,7 @@ static const struct regmap_config tps6598x_regmap_config = {
static int tps6598x_probe(struct i2c_client *client)
{
+ struct typec_capability typec_cap = { };
struct tps6598x *tps;
u32 status;
u32 conf;
@@ -492,40 +495,40 @@ static int tps6598x_probe(struct i2c_client *client)
if (ret < 0)
return ret;
- tps->typec_cap.revision = USB_TYPEC_REV_1_2;
- tps->typec_cap.pd_revision = 0x200;
- tps->typec_cap.prefer_role = TYPEC_NO_PREFERRED_ROLE;
- tps->typec_cap.pr_set = tps6598x_pr_set;
- tps->typec_cap.dr_set = tps6598x_dr_set;
+ typec_cap.revision = USB_TYPEC_REV_1_2;
+ typec_cap.pd_revision = 0x200;
+ typec_cap.prefer_role = TYPEC_NO_PREFERRED_ROLE;
+ typec_cap.driver_data = tps;
+ typec_cap.ops = &tps6598x_ops;
switch (TPS_SYSCONF_PORTINFO(conf)) {
case TPS_PORTINFO_SINK_ACCESSORY:
case TPS_PORTINFO_SINK:
- tps->typec_cap.type = TYPEC_PORT_SNK;
- tps->typec_cap.data = TYPEC_PORT_UFP;
+ typec_cap.type = TYPEC_PORT_SNK;
+ typec_cap.data = TYPEC_PORT_UFP;
break;
case TPS_PORTINFO_DRP_UFP_DRD:
case TPS_PORTINFO_DRP_DFP_DRD:
- tps->typec_cap.type = TYPEC_PORT_DRP;
- tps->typec_cap.data = TYPEC_PORT_DRD;
+ typec_cap.type = TYPEC_PORT_DRP;
+ typec_cap.data = TYPEC_PORT_DRD;
break;
case TPS_PORTINFO_DRP_UFP:
- tps->typec_cap.type = TYPEC_PORT_DRP;
- tps->typec_cap.data = TYPEC_PORT_UFP;
+ typec_cap.type = TYPEC_PORT_DRP;
+ typec_cap.data = TYPEC_PORT_UFP;
break;
case TPS_PORTINFO_DRP_DFP:
- tps->typec_cap.type = TYPEC_PORT_DRP;
- tps->typec_cap.data = TYPEC_PORT_DFP;
+ typec_cap.type = TYPEC_PORT_DRP;
+ typec_cap.data = TYPEC_PORT_DFP;
break;
case TPS_PORTINFO_SOURCE:
- tps->typec_cap.type = TYPEC_PORT_SRC;
- tps->typec_cap.data = TYPEC_PORT_DFP;
+ typec_cap.type = TYPEC_PORT_SRC;
+ typec_cap.data = TYPEC_PORT_DFP;
break;
default:
return -ENODEV;
}
- tps->port = typec_register_port(&client->dev, &tps->typec_cap);
+ tps->port = typec_register_port(&client->dev, &typec_cap);
if (IS_ERR(tps->port))
return PTR_ERR(tps->port);
diff --git a/drivers/usb/typec/ucsi/displayport.c b/drivers/usb/typec/ucsi/displayport.c
index d99700cb4dca..d4d5189edfb8 100644
--- a/drivers/usb/typec/ucsi/displayport.c
+++ b/drivers/usb/typec/ucsi/displayport.c
@@ -48,7 +48,8 @@ struct ucsi_dp {
static int ucsi_displayport_enter(struct typec_altmode *alt)
{
struct ucsi_dp *dp = typec_altmode_get_drvdata(alt);
- struct ucsi_control ctrl;
+ struct ucsi *ucsi = dp->con->ucsi;
+ u64 command;
u8 cur = 0;
int ret;
@@ -59,25 +60,21 @@ static int ucsi_displayport_enter(struct typec_altmode *alt)
dev_warn(&p->dev,
"firmware doesn't support alternate mode overriding\n");
- mutex_unlock(&dp->con->lock);
- return -EOPNOTSUPP;
+ ret = -EOPNOTSUPP;
+ goto err_unlock;
}
- UCSI_CMD_GET_CURRENT_CAM(ctrl, dp->con->num);
- ret = ucsi_send_command(dp->con->ucsi, &ctrl, &cur, sizeof(cur));
+ command = UCSI_GET_CURRENT_CAM | UCSI_CONNECTOR_NUMBER(dp->con->num);
+ ret = ucsi_send_command(ucsi, command, &cur, sizeof(cur));
if (ret < 0) {
- if (dp->con->ucsi->ppm->data->version > 0x0100) {
- mutex_unlock(&dp->con->lock);
- return ret;
- }
+ if (ucsi->version > 0x0100)
+ goto err_unlock;
cur = 0xff;
}
if (cur != 0xff) {
- mutex_unlock(&dp->con->lock);
- if (dp->con->port_altmode[cur] == alt)
- return 0;
- return -EBUSY;
+ ret = dp->con->port_altmode[cur] == alt ? 0 : -EBUSY;
+ goto err_unlock;
}
/*
@@ -94,16 +91,17 @@ static int ucsi_displayport_enter(struct typec_altmode *alt)
dp->vdo_size = 1;
schedule_work(&dp->work);
-
+ ret = 0;
+err_unlock:
mutex_unlock(&dp->con->lock);
- return 0;
+ return ret;
}
static int ucsi_displayport_exit(struct typec_altmode *alt)
{
struct ucsi_dp *dp = typec_altmode_get_drvdata(alt);
- struct ucsi_control ctrl;
+ u64 command;
int ret = 0;
mutex_lock(&dp->con->lock);
@@ -117,8 +115,8 @@ static int ucsi_displayport_exit(struct typec_altmode *alt)
goto out_unlock;
}
- ctrl.raw_cmd = UCSI_CMD_SET_NEW_CAM(dp->con->num, 0, dp->offset, 0);
- ret = ucsi_send_command(dp->con->ucsi, &ctrl, NULL, 0);
+ command = UCSI_CMD_SET_NEW_CAM(dp->con->num, 0, dp->offset, 0);
+ ret = ucsi_send_command(dp->con->ucsi, command, NULL, 0);
if (ret < 0)
goto out_unlock;
@@ -172,14 +170,14 @@ static int ucsi_displayport_status_update(struct ucsi_dp *dp)
static int ucsi_displayport_configure(struct ucsi_dp *dp)
{
u32 pins = DP_CONF_GET_PIN_ASSIGN(dp->data.conf);
- struct ucsi_control ctrl;
+ u64 command;
if (!dp->override)
return 0;
- ctrl.raw_cmd = UCSI_CMD_SET_NEW_CAM(dp->con->num, 1, dp->offset, pins);
+ command = UCSI_CMD_SET_NEW_CAM(dp->con->num, 1, dp->offset, pins);
- return ucsi_send_command(dp->con->ucsi, &ctrl, NULL, 0);
+ return ucsi_send_command(dp->con->ucsi, command, NULL, 0);
}
static int ucsi_displayport_vdm(struct typec_altmode *alt,
diff --git a/drivers/usb/typec/ucsi/trace.c b/drivers/usb/typec/ucsi/trace.c
index 1dabafb74320..48ad1dc1b1b2 100644
--- a/drivers/usb/typec/ucsi/trace.c
+++ b/drivers/usb/typec/ucsi/trace.c
@@ -33,17 +33,6 @@ const char *ucsi_cmd_str(u64 raw_cmd)
return ucsi_cmd_strs[(cmd >= ARRAY_SIZE(ucsi_cmd_strs)) ? 0 : cmd];
}
-static const char * const ucsi_ack_strs[] = {
- [0] = "",
- [UCSI_ACK_EVENT] = "event",
- [UCSI_ACK_CMD] = "command",
-};
-
-const char *ucsi_ack_str(u8 ack)
-{
- return ucsi_ack_strs[(ack >= ARRAY_SIZE(ucsi_ack_strs)) ? 0 : ack];
-}
-
const char *ucsi_cci_str(u32 cci)
{
if (cci & GENMASK(7, 0)) {
diff --git a/drivers/usb/typec/ucsi/trace.h b/drivers/usb/typec/ucsi/trace.h
index 783ec9c72055..a0d3a934d3d9 100644
--- a/drivers/usb/typec/ucsi/trace.h
+++ b/drivers/usb/typec/ucsi/trace.h
@@ -10,54 +10,18 @@
#include <linux/usb/typec_altmode.h>
const char *ucsi_cmd_str(u64 raw_cmd);
-const char *ucsi_ack_str(u8 ack);
const char *ucsi_cci_str(u32 cci);
const char *ucsi_recipient_str(u8 recipient);
-DECLARE_EVENT_CLASS(ucsi_log_ack,
- TP_PROTO(u8 ack),
- TP_ARGS(ack),
- TP_STRUCT__entry(
- __field(u8, ack)
- ),
- TP_fast_assign(
- __entry->ack = ack;
- ),
- TP_printk("ACK %s", ucsi_ack_str(__entry->ack))
-);
-
-DEFINE_EVENT(ucsi_log_ack, ucsi_ack,
- TP_PROTO(u8 ack),
- TP_ARGS(ack)
-);
-
-DECLARE_EVENT_CLASS(ucsi_log_control,
- TP_PROTO(struct ucsi_control *ctrl),
- TP_ARGS(ctrl),
- TP_STRUCT__entry(
- __field(u64, ctrl)
- ),
- TP_fast_assign(
- __entry->ctrl = ctrl->raw_cmd;
- ),
- TP_printk("control=%08llx (%s)", __entry->ctrl,
- ucsi_cmd_str(__entry->ctrl))
-);
-
-DEFINE_EVENT(ucsi_log_control, ucsi_command,
- TP_PROTO(struct ucsi_control *ctrl),
- TP_ARGS(ctrl)
-);
-
DECLARE_EVENT_CLASS(ucsi_log_command,
- TP_PROTO(struct ucsi_control *ctrl, int ret),
- TP_ARGS(ctrl, ret),
+ TP_PROTO(u64 command, int ret),
+ TP_ARGS(command, ret),
TP_STRUCT__entry(
__field(u64, ctrl)
__field(int, ret)
),
TP_fast_assign(
- __entry->ctrl = ctrl->raw_cmd;
+ __entry->ctrl = command;
__entry->ret = ret;
),
TP_printk("%s -> %s (err=%d)", ucsi_cmd_str(__entry->ctrl),
@@ -66,30 +30,13 @@ DECLARE_EVENT_CLASS(ucsi_log_command,
);
DEFINE_EVENT(ucsi_log_command, ucsi_run_command,
- TP_PROTO(struct ucsi_control *ctrl, int ret),
- TP_ARGS(ctrl, ret)
+ TP_PROTO(u64 command, int ret),
+ TP_ARGS(command, ret)
);
DEFINE_EVENT(ucsi_log_command, ucsi_reset_ppm,
- TP_PROTO(struct ucsi_control *ctrl, int ret),
- TP_ARGS(ctrl, ret)
-);
-
-DECLARE_EVENT_CLASS(ucsi_log_cci,
- TP_PROTO(u32 cci),
- TP_ARGS(cci),
- TP_STRUCT__entry(
- __field(u32, cci)
- ),
- TP_fast_assign(
- __entry->cci = cci;
- ),
- TP_printk("CCI=%08x %s", __entry->cci, ucsi_cci_str(__entry->cci))
-);
-
-DEFINE_EVENT(ucsi_log_cci, ucsi_notify,
- TP_PROTO(u32 cci),
- TP_ARGS(cci)
+ TP_PROTO(u64 command, int ret),
+ TP_ARGS(command, ret)
);
DECLARE_EVENT_CLASS(ucsi_log_connector_status,
@@ -109,13 +56,13 @@ DECLARE_EVENT_CLASS(ucsi_log_connector_status,
TP_fast_assign(
__entry->port = port - 1;
__entry->change = status->change;
- __entry->opmode = status->pwr_op_mode;
- __entry->connected = status->connected;
- __entry->pwr_dir = status->pwr_dir;
- __entry->partner_flags = status->partner_flags;
- __entry->partner_type = status->partner_type;
+ __entry->opmode = UCSI_CONSTAT_PWR_OPMODE(status->flags);
+ __entry->connected = !!(status->flags & UCSI_CONSTAT_CONNECTED);
+ __entry->pwr_dir = !!(status->flags & UCSI_CONSTAT_PWR_DIR);
+ __entry->partner_flags = UCSI_CONSTAT_PARTNER_FLAGS(status->flags);
+ __entry->partner_type = UCSI_CONSTAT_PARTNER_TYPE(status->flags);
__entry->request_data_obj = status->request_data_obj;
- __entry->bc_status = status->bc_status;
+ __entry->bc_status = UCSI_CONSTAT_BC_STATUS(status->pwr_status);
),
TP_printk("port%d status: change=%04x, opmode=%x, connected=%d, "
"sourcing=%d, partner_flags=%x, partner_type=%x, "
diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
index ba288b964dc8..4459bc68aa33 100644
--- a/drivers/usb/typec/ucsi/ucsi.c
+++ b/drivers/usb/typec/ucsi/ucsi.c
@@ -17,9 +17,6 @@
#include "ucsi.h"
#include "trace.h"
-#define to_ucsi_connector(_cap_) container_of(_cap_, struct ucsi_connector, \
- typec_cap)
-
/*
* UCSI_TIMEOUT_MS - PPM communication timeout
*
@@ -39,169 +36,148 @@
*/
#define UCSI_SWAP_TIMEOUT_MS 5000
-static inline int ucsi_sync(struct ucsi *ucsi)
+static int ucsi_acknowledge_command(struct ucsi *ucsi)
{
- if (ucsi->ppm && ucsi->ppm->sync)
- return ucsi->ppm->sync(ucsi->ppm);
- return 0;
+ u64 ctrl;
+
+ ctrl = UCSI_ACK_CC_CI;
+ ctrl |= UCSI_ACK_COMMAND_COMPLETE;
+
+ return ucsi->ops->sync_write(ucsi, UCSI_CONTROL, &ctrl, sizeof(ctrl));
+}
+
+static int ucsi_acknowledge_connector_change(struct ucsi *ucsi)
+{
+ u64 ctrl;
+
+ ctrl = UCSI_ACK_CC_CI;
+ ctrl |= UCSI_ACK_CONNECTOR_CHANGE;
+
+ return ucsi->ops->async_write(ucsi, UCSI_CONTROL, &ctrl, sizeof(ctrl));
}
-static int ucsi_command(struct ucsi *ucsi, struct ucsi_control *ctrl)
+static int ucsi_exec_command(struct ucsi *ucsi, u64 command);
+
+static int ucsi_read_error(struct ucsi *ucsi)
{
+ u16 error;
int ret;
- trace_ucsi_command(ctrl);
+ /* Acknowlege the command that failed */
+ ret = ucsi_acknowledge_command(ucsi);
+ if (ret)
+ return ret;
- set_bit(COMMAND_PENDING, &ucsi->flags);
+ ret = ucsi_exec_command(ucsi, UCSI_GET_ERROR_STATUS);
+ if (ret < 0)
+ return ret;
- ret = ucsi->ppm->cmd(ucsi->ppm, ctrl);
+ ret = ucsi->ops->read(ucsi, UCSI_MESSAGE_IN, &error, sizeof(error));
if (ret)
- goto err_clear_flag;
+ return ret;
- if (!wait_for_completion_timeout(&ucsi->complete,
- msecs_to_jiffies(UCSI_TIMEOUT_MS))) {
- dev_warn(ucsi->dev, "PPM NOT RESPONDING\n");
- ret = -ETIMEDOUT;
+ switch (error) {
+ case UCSI_ERROR_INCOMPATIBLE_PARTNER:
+ return -EOPNOTSUPP;
+ case UCSI_ERROR_CC_COMMUNICATION_ERR:
+ return -ECOMM;
+ case UCSI_ERROR_CONTRACT_NEGOTIATION_FAIL:
+ return -EPROTO;
+ case UCSI_ERROR_DEAD_BATTERY:
+ dev_warn(ucsi->dev, "Dead battery condition!\n");
+ return -EPERM;
+ case UCSI_ERROR_INVALID_CON_NUM:
+ case UCSI_ERROR_UNREGONIZED_CMD:
+ case UCSI_ERROR_INVALID_CMD_ARGUMENT:
+ dev_err(ucsi->dev, "possible UCSI driver bug %u\n", error);
+ return -EINVAL;
+ case UCSI_ERROR_OVERCURRENT:
+ dev_warn(ucsi->dev, "Overcurrent condition\n");
+ break;
+ case UCSI_ERROR_PARTNER_REJECTED_SWAP:
+ dev_warn(ucsi->dev, "Partner rejected swap\n");
+ break;
+ case UCSI_ERROR_HARD_RESET:
+ dev_warn(ucsi->dev, "Hard reset occurred\n");
+ break;
+ case UCSI_ERROR_PPM_POLICY_CONFLICT:
+ dev_warn(ucsi->dev, "PPM Policy conflict\n");
+ break;
+ case UCSI_ERROR_SWAP_REJECTED:
+ dev_warn(ucsi->dev, "Swap rejected\n");
+ break;
+ case UCSI_ERROR_UNDEFINED:
+ default:
+ dev_err(ucsi->dev, "unknown error %u\n", error);
+ break;
}
-err_clear_flag:
- clear_bit(COMMAND_PENDING, &ucsi->flags);
-
- return ret;
+ return -EIO;
}
-static int ucsi_ack(struct ucsi *ucsi, u8 ack)
+static int ucsi_exec_command(struct ucsi *ucsi, u64 cmd)
{
- struct ucsi_control ctrl;
+ u32 cci;
int ret;
- trace_ucsi_ack(ack);
-
- set_bit(ACK_PENDING, &ucsi->flags);
+ ret = ucsi->ops->sync_write(ucsi, UCSI_CONTROL, &cmd, sizeof(cmd));
+ if (ret)
+ return ret;
- UCSI_CMD_ACK(ctrl, ack);
- ret = ucsi->ppm->cmd(ucsi->ppm, &ctrl);
+ ret = ucsi->ops->read(ucsi, UCSI_CCI, &cci, sizeof(cci));
if (ret)
- goto out_clear_bit;
+ return ret;
- /* Waiting for ACK with ACK CMD, but not with EVENT for now */
- if (ack == UCSI_ACK_EVENT)
- goto out_clear_bit;
+ if (cci & UCSI_CCI_BUSY)
+ return -EBUSY;
- if (!wait_for_completion_timeout(&ucsi->complete,
- msecs_to_jiffies(UCSI_TIMEOUT_MS)))
- ret = -ETIMEDOUT;
+ if (!(cci & UCSI_CCI_COMMAND_COMPLETE))
+ return -EIO;
-out_clear_bit:
- clear_bit(ACK_PENDING, &ucsi->flags);
+ if (cci & UCSI_CCI_NOT_SUPPORTED)
+ return -EOPNOTSUPP;
- if (ret)
- dev_err(ucsi->dev, "%s: failed\n", __func__);
+ if (cci & UCSI_CCI_ERROR) {
+ if (cmd == UCSI_GET_ERROR_STATUS)
+ return -EIO;
+ return ucsi_read_error(ucsi);
+ }
- return ret;
+ return UCSI_CCI_LENGTH(cci);
}
-static int ucsi_run_command(struct ucsi *ucsi, struct ucsi_control *ctrl,
+static int ucsi_run_command(struct ucsi *ucsi, u64 command,
void *data, size_t size)
{
- struct ucsi_control _ctrl;
- u8 data_length;
- u16 error;
+ u8 length;
int ret;
- ret = ucsi_command(ucsi, ctrl);
- if (ret)
- goto err;
-
- switch (ucsi->status) {
- case UCSI_IDLE:
- ret = ucsi_sync(ucsi);
- if (ret)
- dev_warn(ucsi->dev, "%s: sync failed\n", __func__);
-
- if (data)
- memcpy(data, ucsi->ppm->data->message_in, size);
-
- data_length = ucsi->ppm->data->cci.data_length;
-
- ret = ucsi_ack(ucsi, UCSI_ACK_CMD);
- if (!ret)
- ret = data_length;
- break;
- case UCSI_BUSY:
- /* The caller decides whether to cancel or not */
- ret = -EBUSY;
- break;
- case UCSI_ERROR:
- ret = ucsi_ack(ucsi, UCSI_ACK_CMD);
- if (ret)
- break;
-
- _ctrl.raw_cmd = 0;
- _ctrl.cmd.cmd = UCSI_GET_ERROR_STATUS;
- ret = ucsi_command(ucsi, &_ctrl);
- if (ret) {
- dev_err(ucsi->dev, "reading error failed!\n");
- break;
- }
+ ret = ucsi_exec_command(ucsi, command);
+ if (ret < 0)
+ return ret;
- memcpy(&error, ucsi->ppm->data->message_in, sizeof(error));
+ length = ret;
- /* Something has really gone wrong */
- if (WARN_ON(ucsi->status == UCSI_ERROR)) {
- ret = -ENODEV;
- break;
- }
-
- ret = ucsi_ack(ucsi, UCSI_ACK_CMD);
+ if (data) {
+ ret = ucsi->ops->read(ucsi, UCSI_MESSAGE_IN, data, size);
if (ret)
- break;
-
- switch (error) {
- case UCSI_ERROR_INCOMPATIBLE_PARTNER:
- ret = -EOPNOTSUPP;
- break;
- case UCSI_ERROR_CC_COMMUNICATION_ERR:
- ret = -ECOMM;
- break;
- case UCSI_ERROR_CONTRACT_NEGOTIATION_FAIL:
- ret = -EPROTO;
- break;
- case UCSI_ERROR_DEAD_BATTERY:
- dev_warn(ucsi->dev, "Dead battery condition!\n");
- ret = -EPERM;
- break;
- /* The following mean a bug in this driver */
- case UCSI_ERROR_INVALID_CON_NUM:
- case UCSI_ERROR_UNREGONIZED_CMD:
- case UCSI_ERROR_INVALID_CMD_ARGUMENT:
- dev_warn(ucsi->dev,
- "%s: possible UCSI driver bug - error 0x%x\n",
- __func__, error);
- ret = -EINVAL;
- break;
- default:
- dev_warn(ucsi->dev,
- "%s: error without status\n", __func__);
- ret = -EIO;
- break;
- }
- break;
+ return ret;
}
-err:
- trace_ucsi_run_command(ctrl, ret);
+ ret = ucsi_acknowledge_command(ucsi);
+ if (ret)
+ return ret;
- return ret;
+ return length;
}
-int ucsi_send_command(struct ucsi *ucsi, struct ucsi_control *ctrl,
+int ucsi_send_command(struct ucsi *ucsi, u64 command,
void *retval, size_t size)
{
int ret;
mutex_lock(&ucsi->ppm_lock);
- ret = ucsi_run_command(ucsi, ctrl, retval, size);
+ ret = ucsi_run_command(ucsi, command, retval, size);
mutex_unlock(&ucsi->ppm_lock);
return ret;
@@ -210,11 +186,12 @@ EXPORT_SYMBOL_GPL(ucsi_send_command);
int ucsi_resume(struct ucsi *ucsi)
{
- struct ucsi_control ctrl;
+ u64 command;
/* Restore UCSI notification enable mask after system resume */
- UCSI_CMD_SET_NTFY_ENABLE(ctrl, UCSI_ENABLE_NTFY_ALL);
- return ucsi_send_command(ucsi, &ctrl, NULL, 0);
+ command = UCSI_SET_NOTIFICATION_ENABLE | UCSI_ENABLE_NTFY_ALL;
+
+ return ucsi_send_command(ucsi, command, NULL, 0);
}
EXPORT_SYMBOL_GPL(ucsi_resume);
/* -------------------------------------------------------------------------- */
@@ -222,15 +199,15 @@ EXPORT_SYMBOL_GPL(ucsi_resume);
void ucsi_altmode_update_active(struct ucsi_connector *con)
{
const struct typec_altmode *altmode = NULL;
- struct ucsi_control ctrl;
+ u64 command;
int ret;
u8 cur;
int i;
- UCSI_CMD_GET_CURRENT_CAM(ctrl, con->num);
- ret = ucsi_run_command(con->ucsi, &ctrl, &cur, sizeof(cur));
+ command = UCSI_GET_CURRENT_CAM | UCSI_CONNECTOR_NUMBER(con->num);
+ ret = ucsi_run_command(con->ucsi, command, &cur, sizeof(cur));
if (ret < 0) {
- if (con->ucsi->ppm->data->version > 0x0100) {
+ if (con->ucsi->version > 0x0100) {
dev_err(con->ucsi->dev,
"GET_CURRENT_CAM command failed\n");
return;
@@ -346,7 +323,7 @@ static int ucsi_register_altmodes(struct ucsi_connector *con, u8 recipient)
int max_altmodes = UCSI_MAX_ALTMODES;
struct typec_altmode_desc desc;
struct ucsi_altmode alt[2];
- struct ucsi_control ctrl;
+ u64 command;
int num = 1;
int ret;
int len;
@@ -364,8 +341,11 @@ static int ucsi_register_altmodes(struct ucsi_connector *con, u8 recipient)
for (i = 0; i < max_altmodes;) {
memset(alt, 0, sizeof(alt));
- UCSI_CMD_GET_ALTERNATE_MODES(ctrl, recipient, con->num, i, 1);
- len = ucsi_run_command(con->ucsi, &ctrl, alt, sizeof(alt));
+ command = UCSI_GET_ALTERNATE_MODES;
+ command |= UCSI_GET_ALTMODE_RECIPIENT(recipient);
+ command |= UCSI_GET_ALTMODE_CONNECTOR_NUMBER(con->num);
+ command |= UCSI_GET_ALTMODE_OFFSET(i);
+ len = ucsi_run_command(con->ucsi, command, alt, sizeof(alt));
if (len <= 0)
return len;
@@ -427,7 +407,7 @@ static void ucsi_unregister_altmodes(struct ucsi_connector *con, u8 recipient)
static void ucsi_pwr_opmode_change(struct ucsi_connector *con)
{
- switch (con->status.pwr_op_mode) {
+ switch (UCSI_CONSTAT_PWR_OPMODE(con->status.flags)) {
case UCSI_CONSTAT_PWR_OPMODE_PD:
typec_set_pwr_opmode(con->port, TYPEC_PWR_MODE_PD);
break;
@@ -445,6 +425,7 @@ static void ucsi_pwr_opmode_change(struct ucsi_connector *con)
static int ucsi_register_partner(struct ucsi_connector *con)
{
+ u8 pwr_opmode = UCSI_CONSTAT_PWR_OPMODE(con->status.flags);
struct typec_partner_desc desc;
struct typec_partner *partner;
@@ -453,7 +434,7 @@ static int ucsi_register_partner(struct ucsi_connector *con)
memset(&desc, 0, sizeof(desc));
- switch (con->status.partner_type) {
+ switch (UCSI_CONSTAT_PARTNER_TYPE(con->status.flags)) {
case UCSI_CONSTAT_PARTNER_TYPE_DEBUG:
desc.accessory = TYPEC_ACCESSORY_DEBUG;
break;
@@ -464,7 +445,7 @@ static int ucsi_register_partner(struct ucsi_connector *con)
break;
}
- desc.usb_pd = con->status.pwr_op_mode == UCSI_CONSTAT_PWR_OPMODE_PD;
+ desc.usb_pd = pwr_opmode == UCSI_CONSTAT_PWR_OPMODE_PD;
partner = typec_register_partner(con->port, &desc);
if (IS_ERR(partner)) {
@@ -496,7 +477,7 @@ static void ucsi_partner_change(struct ucsi_connector *con)
if (!con->partner)
return;
- switch (con->status.partner_type) {
+ switch (UCSI_CONSTAT_PARTNER_TYPE(con->status.flags)) {
case UCSI_CONSTAT_PARTNER_TYPE_UFP:
typec_set_data_role(con->port, TYPEC_HOST);
break;
@@ -521,29 +502,33 @@ static void ucsi_partner_change(struct ucsi_connector *con)
ucsi_altmode_update_active(con);
}
-static void ucsi_connector_change(struct work_struct *work)
+static void ucsi_handle_connector_change(struct work_struct *work)
{
struct ucsi_connector *con = container_of(work, struct ucsi_connector,
work);
struct ucsi *ucsi = con->ucsi;
- struct ucsi_control ctrl;
+ enum typec_role role;
+ u64 command;
int ret;
mutex_lock(&con->lock);
- UCSI_CMD_GET_CONNECTOR_STATUS(ctrl, con->num);
- ret = ucsi_send_command(ucsi, &ctrl, &con->status, sizeof(con->status));
+ command = UCSI_GET_CONNECTOR_STATUS | UCSI_CONNECTOR_NUMBER(con->num);
+ ret = ucsi_send_command(ucsi, command, &con->status,
+ sizeof(con->status));
if (ret < 0) {
dev_err(ucsi->dev, "%s: GET_CONNECTOR_STATUS failed (%d)\n",
__func__, ret);
goto out_unlock;
}
+ role = !!(con->status.flags & UCSI_CONSTAT_PWR_DIR);
+
if (con->status.change & UCSI_CONSTAT_POWER_OPMODE_CHANGE)
ucsi_pwr_opmode_change(con);
if (con->status.change & UCSI_CONSTAT_POWER_DIR_CHANGE) {
- typec_set_pwr_role(con->port, con->status.pwr_dir);
+ typec_set_pwr_role(con->port, role);
/* Complete pending power role swap */
if (!completion_done(&con->complete))
@@ -551,9 +536,9 @@ static void ucsi_connector_change(struct work_struct *work)
}
if (con->status.change & UCSI_CONSTAT_CONNECT_CHANGE) {
- typec_set_pwr_role(con->port, con->status.pwr_dir);
+ typec_set_pwr_role(con->port, role);
- switch (con->status.partner_type) {
+ switch (UCSI_CONSTAT_PARTNER_TYPE(con->status.flags)) {
case UCSI_CONSTAT_PARTNER_TYPE_UFP:
typec_set_data_role(con->port, TYPEC_HOST);
break;
@@ -564,7 +549,7 @@ static void ucsi_connector_change(struct work_struct *work)
break;
}
- if (con->status.connected)
+ if (con->status.flags & UCSI_CONSTAT_CONNECTED)
ucsi_register_partner(con);
else
ucsi_unregister_partner(con);
@@ -576,14 +561,15 @@ static void ucsi_connector_change(struct work_struct *work)
* Running GET_CAM_SUPPORTED command just to make sure the PPM
* does not get stuck in case it assumes we do so.
*/
- UCSI_CMD_GET_CAM_SUPPORTED(ctrl, con->num);
- ucsi_run_command(con->ucsi, &ctrl, NULL, 0);
+ command = UCSI_GET_CAM_SUPPORTED;
+ command |= UCSI_CONNECTOR_NUMBER(con->num);
+ ucsi_run_command(con->ucsi, command, NULL, 0);
}
if (con->status.change & UCSI_CONSTAT_PARTNER_CHANGE)
ucsi_partner_change(con);
- ret = ucsi_ack(ucsi, UCSI_ACK_EVENT);
+ ret = ucsi_acknowledge_connector_change(ucsi);
if (ret)
dev_err(ucsi->dev, "%s: ACK failed (%d)", __func__, ret);
@@ -595,117 +581,83 @@ out_unlock:
}
/**
- * ucsi_notify - PPM notification handler
- * @ucsi: Source UCSI Interface for the notifications
- *
- * Handle notifications from PPM of @ucsi.
+ * ucsi_connector_change - Process Connector Change Event
+ * @ucsi: UCSI Interface
+ * @num: Connector number
*/
-void ucsi_notify(struct ucsi *ucsi)
+void ucsi_connector_change(struct ucsi *ucsi, u8 num)
{
- struct ucsi_cci *cci;
-
- /* There is no requirement to sync here, but no harm either. */
- ucsi_sync(ucsi);
-
- cci = &ucsi->ppm->data->cci;
-
- if (cci->error)
- ucsi->status = UCSI_ERROR;
- else if (cci->busy)
- ucsi->status = UCSI_BUSY;
- else
- ucsi->status = UCSI_IDLE;
+ struct ucsi_connector *con = &ucsi->connector[num - 1];
- if (cci->cmd_complete && test_bit(COMMAND_PENDING, &ucsi->flags)) {
- complete(&ucsi->complete);
- } else if (cci->ack_complete && test_bit(ACK_PENDING, &ucsi->flags)) {
- complete(&ucsi->complete);
- } else if (cci->connector_change) {
- struct ucsi_connector *con;
-
- con = &ucsi->connector[cci->connector_change - 1];
-
- if (!test_and_set_bit(EVENT_PENDING, &ucsi->flags))
- schedule_work(&con->work);
- }
-
- trace_ucsi_notify(ucsi->ppm->data->raw_cci);
+ if (!test_and_set_bit(EVENT_PENDING, &ucsi->flags))
+ schedule_work(&con->work);
}
-EXPORT_SYMBOL_GPL(ucsi_notify);
+EXPORT_SYMBOL_GPL(ucsi_connector_change);
/* -------------------------------------------------------------------------- */
static int ucsi_reset_connector(struct ucsi_connector *con, bool hard)
{
- struct ucsi_control ctrl;
+ u64 command;
- UCSI_CMD_CONNECTOR_RESET(ctrl, con, hard);
+ command = UCSI_CONNECTOR_RESET | UCSI_CONNECTOR_NUMBER(con->num);
+ command |= hard ? UCSI_CONNECTOR_RESET_HARD : 0;
- return ucsi_send_command(con->ucsi, &ctrl, NULL, 0);
+ return ucsi_send_command(con->ucsi, command, NULL, 0);
}
static int ucsi_reset_ppm(struct ucsi *ucsi)
{
- struct ucsi_control ctrl;
+ u64 command = UCSI_PPM_RESET;
unsigned long tmo;
+ u32 cci;
int ret;
- ctrl.raw_cmd = 0;
- ctrl.cmd.cmd = UCSI_PPM_RESET;
- trace_ucsi_command(&ctrl);
- ret = ucsi->ppm->cmd(ucsi->ppm, &ctrl);
- if (ret)
- goto err;
+ ret = ucsi->ops->async_write(ucsi, UCSI_CONTROL, &command,
+ sizeof(command));
+ if (ret < 0)
+ return ret;
tmo = jiffies + msecs_to_jiffies(UCSI_TIMEOUT_MS);
do {
- /* Here sync is critical. */
- ret = ucsi_sync(ucsi);
- if (ret)
- goto err;
+ if (time_is_before_jiffies(tmo))
+ return -ETIMEDOUT;
- if (ucsi->ppm->data->cci.reset_complete)
- break;
+ ret = ucsi->ops->read(ucsi, UCSI_CCI, &cci, sizeof(cci));
+ if (ret)
+ return ret;
/* If the PPM is still doing something else, reset it again. */
- if (ucsi->ppm->data->raw_cci) {
- dev_warn_ratelimited(ucsi->dev,
- "Failed to reset PPM! Trying again..\n");
-
- trace_ucsi_command(&ctrl);
- ret = ucsi->ppm->cmd(ucsi->ppm, &ctrl);
- if (ret)
- goto err;
+ if (cci & ~UCSI_CCI_RESET_COMPLETE) {
+ ret = ucsi->ops->async_write(ucsi, UCSI_CONTROL,
+ &command,
+ sizeof(command));
+ if (ret < 0)
+ return ret;
}
- /* Letting the PPM settle down. */
msleep(20);
+ } while (!(cci & UCSI_CCI_RESET_COMPLETE));
- ret = -ETIMEDOUT;
- } while (time_is_after_jiffies(tmo));
-
-err:
- trace_ucsi_reset_ppm(&ctrl, ret);
-
- return ret;
+ return 0;
}
-static int ucsi_role_cmd(struct ucsi_connector *con, struct ucsi_control *ctrl)
+static int ucsi_role_cmd(struct ucsi_connector *con, u64 command)
{
int ret;
- ret = ucsi_send_command(con->ucsi, ctrl, NULL, 0);
+ ret = ucsi_send_command(con->ucsi, command, NULL, 0);
if (ret == -ETIMEDOUT) {
- struct ucsi_control c;
+ u64 c;
/* PPM most likely stopped responding. Resetting everything. */
mutex_lock(&con->ucsi->ppm_lock);
ucsi_reset_ppm(con->ucsi);
mutex_unlock(&con->ucsi->ppm_lock);
- UCSI_CMD_SET_NTFY_ENABLE(c, UCSI_ENABLE_NTFY_ALL);
- ucsi_send_command(con->ucsi, &c, NULL, 0);
+ c = UCSI_SET_NOTIFICATION_ENABLE | UCSI_ENABLE_NTFY_ALL;
+ ucsi_send_command(con->ucsi, c, NULL, 0);
ucsi_reset_connector(con, true);
}
@@ -713,11 +665,11 @@ static int ucsi_role_cmd(struct ucsi_connector *con, struct ucsi_control *ctrl)
return ret;
}
-static int
-ucsi_dr_swap(const struct typec_capability *cap, enum typec_data_role role)
+static int ucsi_dr_swap(struct typec_port *port, enum typec_data_role role)
{
- struct ucsi_connector *con = to_ucsi_connector(cap);
- struct ucsi_control ctrl;
+ struct ucsi_connector *con = typec_get_drvdata(port);
+ u8 partner_type;
+ u64 command;
int ret = 0;
mutex_lock(&con->lock);
@@ -727,14 +679,17 @@ ucsi_dr_swap(const struct typec_capability *cap, enum typec_data_role role)
goto out_unlock;
}
- if ((con->status.partner_type == UCSI_CONSTAT_PARTNER_TYPE_DFP &&
+ partner_type = UCSI_CONSTAT_PARTNER_TYPE(con->status.flags);
+ if ((partner_type == UCSI_CONSTAT_PARTNER_TYPE_DFP &&
role == TYPEC_DEVICE) ||
- (con->status.partner_type == UCSI_CONSTAT_PARTNER_TYPE_UFP &&
+ (partner_type == UCSI_CONSTAT_PARTNER_TYPE_UFP &&
role == TYPEC_HOST))
goto out_unlock;
- UCSI_CMD_SET_UOR(ctrl, con, role);
- ret = ucsi_role_cmd(con, &ctrl);
+ command = UCSI_SET_UOR | UCSI_CONNECTOR_NUMBER(con->num);
+ command |= UCSI_SET_UOR_ROLE(role);
+ command |= UCSI_SET_UOR_ACCEPT_ROLE_SWAPS;
+ ret = ucsi_role_cmd(con, command);
if (ret < 0)
goto out_unlock;
@@ -748,11 +703,11 @@ out_unlock:
return ret < 0 ? ret : 0;
}
-static int
-ucsi_pr_swap(const struct typec_capability *cap, enum typec_role role)
+static int ucsi_pr_swap(struct typec_port *port, enum typec_role role)
{
- struct ucsi_connector *con = to_ucsi_connector(cap);
- struct ucsi_control ctrl;
+ struct ucsi_connector *con = typec_get_drvdata(port);
+ enum typec_role cur_role;
+ u64 command;
int ret = 0;
mutex_lock(&con->lock);
@@ -762,11 +717,15 @@ ucsi_pr_swap(const struct typec_capability *cap, enum typec_role role)
goto out_unlock;
}
- if (con->status.pwr_dir == role)
+ cur_role = !!(con->status.flags & UCSI_CONSTAT_PWR_DIR);
+
+ if (cur_role == role)
goto out_unlock;
- UCSI_CMD_SET_PDR(ctrl, con, role);
- ret = ucsi_role_cmd(con, &ctrl);
+ command = UCSI_SET_PDR | UCSI_CONNECTOR_NUMBER(con->num);
+ command |= UCSI_SET_PDR_ROLE(role);
+ command |= UCSI_SET_PDR_ACCEPT_ROLE_SWAPS;
+ ret = ucsi_role_cmd(con, command);
if (ret < 0)
goto out_unlock;
@@ -777,7 +736,8 @@ ucsi_pr_swap(const struct typec_capability *cap, enum typec_role role)
}
/* Something has gone wrong while swapping the role */
- if (con->status.pwr_op_mode != UCSI_CONSTAT_PWR_OPMODE_PD) {
+ if (UCSI_CONSTAT_PWR_OPMODE(con->status.flags) !=
+ UCSI_CONSTAT_PWR_OPMODE_PD) {
ucsi_reset_connector(con, true);
ret = -EPROTO;
}
@@ -788,6 +748,11 @@ out_unlock:
return ret;
}
+static const struct typec_operations ucsi_ops = {
+ .dr_set = ucsi_dr_swap,
+ .pr_set = ucsi_pr_swap
+};
+
static struct fwnode_handle *ucsi_find_fwnode(struct ucsi_connector *con)
{
struct fwnode_handle *fwnode;
@@ -804,18 +769,19 @@ static int ucsi_register_port(struct ucsi *ucsi, int index)
struct ucsi_connector *con = &ucsi->connector[index];
struct typec_capability *cap = &con->typec_cap;
enum typec_accessory *accessory = cap->accessory;
- struct ucsi_control ctrl;
+ u64 command;
int ret;
- INIT_WORK(&con->work, ucsi_connector_change);
+ INIT_WORK(&con->work, ucsi_handle_connector_change);
init_completion(&con->complete);
mutex_init(&con->lock);
con->num = index + 1;
con->ucsi = ucsi;
/* Get connector capability */
- UCSI_CMD_GET_CONNECTOR_CAPABILITY(ctrl, con->num);
- ret = ucsi_run_command(ucsi, &ctrl, &con->cap, sizeof(con->cap));
+ command = UCSI_GET_CONNECTOR_CAPABILITY;
+ command |= UCSI_CONNECTOR_NUMBER(con->num);
+ ret = ucsi_run_command(ucsi, command, &con->cap, sizeof(con->cap));
if (ret < 0)
return ret;
@@ -826,11 +792,12 @@ static int ucsi_register_port(struct ucsi *ucsi, int index)
else if (con->cap.op_mode & UCSI_CONCAP_OPMODE_UFP)
cap->data = TYPEC_PORT_UFP;
- if (con->cap.provider && con->cap.consumer)
+ if ((con->cap.flags & UCSI_CONCAP_FLAG_PROVIDER) &&
+ (con->cap.flags & UCSI_CONCAP_FLAG_CONSUMER))
cap->type = TYPEC_PORT_DRP;
- else if (con->cap.provider)
+ else if (con->cap.flags & UCSI_CONCAP_FLAG_PROVIDER)
cap->type = TYPEC_PORT_SRC;
- else if (con->cap.consumer)
+ else if (con->cap.flags & UCSI_CONCAP_FLAG_CONSUMER)
cap->type = TYPEC_PORT_SNK;
cap->revision = ucsi->cap.typec_version;
@@ -843,8 +810,8 @@ static int ucsi_register_port(struct ucsi *ucsi, int index)
*accessory = TYPEC_ACCESSORY_DEBUG;
cap->fwnode = ucsi_find_fwnode(con);
- cap->dr_set = ucsi_dr_swap;
- cap->pr_set = ucsi_pr_swap;
+ cap->driver_data = con;
+ cap->ops = &ucsi_ops;
/* Register the connector */
con->port = typec_register_port(ucsi->dev, cap);
@@ -858,17 +825,15 @@ static int ucsi_register_port(struct ucsi *ucsi, int index)
con->num);
/* Get the status */
- UCSI_CMD_GET_CONNECTOR_STATUS(ctrl, con->num);
- ret = ucsi_run_command(ucsi, &ctrl, &con->status, sizeof(con->status));
+ command = UCSI_GET_CONNECTOR_STATUS | UCSI_CONNECTOR_NUMBER(con->num);
+ ret = ucsi_run_command(ucsi, command, &con->status,
+ sizeof(con->status));
if (ret < 0) {
dev_err(ucsi->dev, "con%d: failed to get status\n", con->num);
return 0;
}
- ucsi_pwr_opmode_change(con);
- typec_set_pwr_role(con->port, con->status.pwr_dir);
-
- switch (con->status.partner_type) {
+ switch (UCSI_CONSTAT_PARTNER_TYPE(con->status.flags)) {
case UCSI_CONSTAT_PARTNER_TYPE_UFP:
typec_set_data_role(con->port, TYPEC_HOST);
break;
@@ -880,8 +845,12 @@ static int ucsi_register_port(struct ucsi *ucsi, int index)
}
/* Check if there is already something connected */
- if (con->status.connected)
+ if (con->status.flags & UCSI_CONSTAT_CONNECTED) {
+ typec_set_pwr_role(con->port,
+ !!(con->status.flags & UCSI_CONSTAT_PWR_DIR));
+ ucsi_pwr_opmode_change(con);
ucsi_register_partner(con);
+ }
if (con->partner) {
ret = ucsi_register_altmodes(con, UCSI_RECIPIENT_SOP);
@@ -898,11 +867,16 @@ static int ucsi_register_port(struct ucsi *ucsi, int index)
return 0;
}
-static void ucsi_init(struct work_struct *work)
+/**
+ * ucsi_init - Initialize UCSI interface
+ * @ucsi: UCSI to be initialized
+ *
+ * Registers all ports @ucsi has and enables all notification events.
+ */
+int ucsi_init(struct ucsi *ucsi)
{
- struct ucsi *ucsi = container_of(work, struct ucsi, work);
struct ucsi_connector *con;
- struct ucsi_control ctrl;
+ u64 command;
int ret;
int i;
@@ -916,15 +890,15 @@ static void ucsi_init(struct work_struct *work)
}
/* Enable basic notifications */
- UCSI_CMD_SET_NTFY_ENABLE(ctrl, UCSI_ENABLE_NTFY_CMD_COMPLETE |
- UCSI_ENABLE_NTFY_ERROR);
- ret = ucsi_run_command(ucsi, &ctrl, NULL, 0);
+ command = UCSI_SET_NOTIFICATION_ENABLE;
+ command |= UCSI_ENABLE_NTFY_CMD_COMPLETE | UCSI_ENABLE_NTFY_ERROR;
+ ret = ucsi_run_command(ucsi, command, NULL, 0);
if (ret < 0)
goto err_reset;
/* Get PPM capabilities */
- UCSI_CMD_GET_CAPABILITY(ctrl);
- ret = ucsi_run_command(ucsi, &ctrl, &ucsi->cap, sizeof(ucsi->cap));
+ command = UCSI_GET_CAPABILITY;
+ ret = ucsi_run_command(ucsi, command, &ucsi->cap, sizeof(ucsi->cap));
if (ret < 0)
goto err_reset;
@@ -949,14 +923,14 @@ static void ucsi_init(struct work_struct *work)
}
/* Enable all notifications */
- UCSI_CMD_SET_NTFY_ENABLE(ctrl, UCSI_ENABLE_NTFY_ALL);
- ret = ucsi_run_command(ucsi, &ctrl, NULL, 0);
+ command = UCSI_SET_NOTIFICATION_ENABLE | UCSI_ENABLE_NTFY_ALL;
+ ret = ucsi_run_command(ucsi, command, NULL, 0);
if (ret < 0)
goto err_unregister;
mutex_unlock(&ucsi->ppm_lock);
- return;
+ return 0;
err_unregister:
for (con = ucsi->connector; con->port; con++) {
@@ -970,59 +944,115 @@ err_reset:
ucsi_reset_ppm(ucsi);
err:
mutex_unlock(&ucsi->ppm_lock);
- dev_err(ucsi->dev, "PPM init failed (%d)\n", ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ucsi_init);
+
+static void ucsi_init_work(struct work_struct *work)
+{
+ struct ucsi *ucsi = container_of(work, struct ucsi, work);
+ int ret;
+
+ ret = ucsi_init(ucsi);
+ if (ret)
+ dev_err(ucsi->dev, "PPM init failed (%d)\n", ret);
}
/**
- * ucsi_register_ppm - Register UCSI PPM Interface
- * @dev: Device interface to the PPM
- * @ppm: The PPM interface
- *
- * Allocates UCSI instance, associates it with @ppm and returns it to the
- * caller, and schedules initialization of the interface.
+ * ucsi_get_drvdata - Return private driver data pointer
+ * @ucsi: UCSI interface
+ */
+void *ucsi_get_drvdata(struct ucsi *ucsi)
+{
+ return ucsi->driver_data;
+}
+EXPORT_SYMBOL_GPL(ucsi_get_drvdata);
+
+/**
+ * ucsi_get_drvdata - Assign private driver data pointer
+ * @ucsi: UCSI interface
+ * @data: Private data pointer
*/
-struct ucsi *ucsi_register_ppm(struct device *dev, struct ucsi_ppm *ppm)
+void ucsi_set_drvdata(struct ucsi *ucsi, void *data)
+{
+ ucsi->driver_data = data;
+}
+EXPORT_SYMBOL_GPL(ucsi_set_drvdata);
+
+/**
+ * ucsi_create - Allocate UCSI instance
+ * @dev: Device interface to the PPM (Platform Policy Manager)
+ * @ops: I/O routines
+ */
+struct ucsi *ucsi_create(struct device *dev, const struct ucsi_operations *ops)
{
struct ucsi *ucsi;
+ if (!ops || !ops->read || !ops->sync_write || !ops->async_write)
+ return ERR_PTR(-EINVAL);
+
ucsi = kzalloc(sizeof(*ucsi), GFP_KERNEL);
if (!ucsi)
return ERR_PTR(-ENOMEM);
- INIT_WORK(&ucsi->work, ucsi_init);
- init_completion(&ucsi->complete);
+ INIT_WORK(&ucsi->work, ucsi_init_work);
mutex_init(&ucsi->ppm_lock);
-
ucsi->dev = dev;
- ucsi->ppm = ppm;
+ ucsi->ops = ops;
+
+ return ucsi;
+}
+EXPORT_SYMBOL_GPL(ucsi_create);
+
+/**
+ * ucsi_destroy - Free UCSI instance
+ * @ucsi: UCSI instance to be freed
+ */
+void ucsi_destroy(struct ucsi *ucsi)
+{
+ kfree(ucsi);
+}
+EXPORT_SYMBOL_GPL(ucsi_destroy);
+
+/**
+ * ucsi_register - Register UCSI interface
+ * @ucsi: UCSI instance
+ */
+int ucsi_register(struct ucsi *ucsi)
+{
+ int ret;
+
+ ret = ucsi->ops->read(ucsi, UCSI_VERSION, &ucsi->version,
+ sizeof(ucsi->version));
+ if (ret)
+ return ret;
+
+ if (!ucsi->version)
+ return -ENODEV;
- /*
- * Communication with the PPM takes a lot of time. It is not reasonable
- * to initialize the driver here. Using a work for now.
- */
queue_work(system_long_wq, &ucsi->work);
- return ucsi;
+ return 0;
}
-EXPORT_SYMBOL_GPL(ucsi_register_ppm);
+EXPORT_SYMBOL_GPL(ucsi_register);
/**
- * ucsi_unregister_ppm - Unregister UCSI PPM Interface
- * @ucsi: struct ucsi associated with the PPM
+ * ucsi_unregister - Unregister UCSI interface
+ * @ucsi: UCSI interface to be unregistered
*
- * Unregister UCSI PPM that was created with ucsi_register().
+ * Unregister UCSI interface that was created with ucsi_register().
*/
-void ucsi_unregister_ppm(struct ucsi *ucsi)
+void ucsi_unregister(struct ucsi *ucsi)
{
- struct ucsi_control ctrl;
+ u64 cmd = UCSI_SET_NOTIFICATION_ENABLE;
int i;
/* Make sure that we are not in the middle of driver initialization */
cancel_work_sync(&ucsi->work);
- /* Disable everything except command complete notification */
- UCSI_CMD_SET_NTFY_ENABLE(ctrl, UCSI_ENABLE_NTFY_CMD_COMPLETE)
- ucsi_send_command(ucsi, &ctrl, NULL, 0);
+ /* Disable notifications */
+ ucsi->ops->async_write(ucsi, UCSI_CONTROL, &cmd, sizeof(cmd));
for (i = 0; i < ucsi->cap.num_connectors; i++) {
cancel_work_sync(&ucsi->connector[i].work);
@@ -1032,12 +1062,9 @@ void ucsi_unregister_ppm(struct ucsi *ucsi)
typec_unregister_port(ucsi->connector[i].port);
}
- ucsi_reset_ppm(ucsi);
-
kfree(ucsi->connector);
- kfree(ucsi);
}
-EXPORT_SYMBOL_GPL(ucsi_unregister_ppm);
+EXPORT_SYMBOL_GPL(ucsi_unregister);
MODULE_AUTHOR("Heikki Krogerus <heikki.krogerus@linux.intel.com>");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/typec/ucsi/ucsi.h b/drivers/usb/typec/ucsi/ucsi.h
index de87d0b8319d..8569bbd3762f 100644
--- a/drivers/usb/typec/ucsi/ucsi.h
+++ b/drivers/usb/typec/ucsi/ucsi.h
@@ -10,177 +10,55 @@
/* -------------------------------------------------------------------------- */
-/* Command Status and Connector Change Indication (CCI) data structure */
-struct ucsi_cci {
- u8:1; /* reserved */
- u8 connector_change:7;
- u8 data_length;
- u16:9; /* reserved */
- u16 not_supported:1;
- u16 cancel_complete:1;
- u16 reset_complete:1;
- u16 busy:1;
- u16 ack_complete:1;
- u16 error:1;
- u16 cmd_complete:1;
-} __packed;
-
-/* Default fields in CONTROL data structure */
-struct ucsi_command {
- u8 cmd;
- u8 length;
- u64 data:48;
-} __packed;
-
-/* ACK Command structure */
-struct ucsi_ack_cmd {
- u8 cmd;
- u8 length;
- u8 cci_ack:1;
- u8 cmd_ack:1;
- u8:6; /* reserved */
-} __packed;
-
-/* Connector Reset Command structure */
-struct ucsi_con_rst {
- u8 cmd;
- u8 length;
- u8 con_num:7;
- u8 hard_reset:1;
-} __packed;
-
-/* Set USB Operation Mode Command structure */
-struct ucsi_uor_cmd {
- u8 cmd;
- u8 length;
- u16 con_num:7;
- u16 role:3;
-#define UCSI_UOR_ROLE_DFP BIT(0)
-#define UCSI_UOR_ROLE_UFP BIT(1)
-#define UCSI_UOR_ROLE_DRP BIT(2)
- u16:6; /* reserved */
-} __packed;
-
-/* Get Alternate Modes Command structure */
-struct ucsi_altmode_cmd {
- u8 cmd;
- u8 length;
- u8 recipient;
-#define UCSI_RECIPIENT_CON 0
-#define UCSI_RECIPIENT_SOP 1
-#define UCSI_RECIPIENT_SOP_P 2
-#define UCSI_RECIPIENT_SOP_PP 3
- u8 con_num;
- u8 offset;
- u8 num_altmodes;
-} __packed;
+struct ucsi;
-struct ucsi_control {
- union {
- u64 raw_cmd;
- struct ucsi_command cmd;
- struct ucsi_uor_cmd uor;
- struct ucsi_ack_cmd ack;
- struct ucsi_con_rst con_rst;
- struct ucsi_altmode_cmd alt;
- };
+/* UCSI offsets (Bytes) */
+#define UCSI_VERSION 0
+#define UCSI_CCI 4
+#define UCSI_CONTROL 8
+#define UCSI_MESSAGE_IN 16
+#define UCSI_MESSAGE_OUT 32
+
+/* Command Status and Connector Change Indication (CCI) bits */
+#define UCSI_CCI_CONNECTOR(_c_) (((_c_) & GENMASK(7, 0)) >> 1)
+#define UCSI_CCI_LENGTH(_c_) (((_c_) & GENMASK(15, 8)) >> 8)
+#define UCSI_CCI_NOT_SUPPORTED BIT(25)
+#define UCSI_CCI_CANCEL_COMPLETE BIT(26)
+#define UCSI_CCI_RESET_COMPLETE BIT(27)
+#define UCSI_CCI_BUSY BIT(28)
+#define UCSI_CCI_ACK_COMPLETE BIT(29)
+#define UCSI_CCI_ERROR BIT(30)
+#define UCSI_CCI_COMMAND_COMPLETE BIT(31)
+
+/**
+ * struct ucsi_operations - UCSI I/O operations
+ * @read: Read operation
+ * @sync_write: Blocking write operation
+ * @async_write: Non-blocking write operation
+ *
+ * Read and write routines for UCSI interface. @sync_write must wait for the
+ * Command Completion Event from the PPM before returning, and @async_write must
+ * return immediately after sending the data to the PPM.
+ */
+struct ucsi_operations {
+ int (*read)(struct ucsi *ucsi, unsigned int offset,
+ void *val, size_t val_len);
+ int (*sync_write)(struct ucsi *ucsi, unsigned int offset,
+ const void *val, size_t val_len);
+ int (*async_write)(struct ucsi *ucsi, unsigned int offset,
+ const void *val, size_t val_len);
};
-#define __UCSI_CMD(_ctrl_, _cmd_) \
-{ \
- (_ctrl_).raw_cmd = 0; \
- (_ctrl_).cmd.cmd = _cmd_; \
-}
-
-/* Helper for preparing ucsi_control for CONNECTOR_RESET command. */
-#define UCSI_CMD_CONNECTOR_RESET(_ctrl_, _con_, _hard_) \
-{ \
- __UCSI_CMD(_ctrl_, UCSI_CONNECTOR_RESET) \
- (_ctrl_).con_rst.con_num = (_con_)->num; \
- (_ctrl_).con_rst.hard_reset = _hard_; \
-}
-
-/* Helper for preparing ucsi_control for ACK_CC_CI command. */
-#define UCSI_CMD_ACK(_ctrl_, _ack_) \
-{ \
- __UCSI_CMD(_ctrl_, UCSI_ACK_CC_CI) \
- (_ctrl_).ack.cci_ack = ((_ack_) == UCSI_ACK_EVENT); \
- (_ctrl_).ack.cmd_ack = ((_ack_) == UCSI_ACK_CMD); \
-}
-
-/* Helper for preparing ucsi_control for SET_NOTIFY_ENABLE command. */
-#define UCSI_CMD_SET_NTFY_ENABLE(_ctrl_, _ntfys_) \
-{ \
- __UCSI_CMD(_ctrl_, UCSI_SET_NOTIFICATION_ENABLE) \
- (_ctrl_).cmd.data = _ntfys_; \
-}
-
-/* Helper for preparing ucsi_control for GET_CAPABILITY command. */
-#define UCSI_CMD_GET_CAPABILITY(_ctrl_) \
-{ \
- __UCSI_CMD(_ctrl_, UCSI_GET_CAPABILITY) \
-}
-
-/* Helper for preparing ucsi_control for GET_CONNECTOR_CAPABILITY command. */
-#define UCSI_CMD_GET_CONNECTOR_CAPABILITY(_ctrl_, _con_) \
-{ \
- __UCSI_CMD(_ctrl_, UCSI_GET_CONNECTOR_CAPABILITY) \
- (_ctrl_).cmd.data = _con_; \
-}
+struct ucsi *ucsi_create(struct device *dev, const struct ucsi_operations *ops);
+void ucsi_destroy(struct ucsi *ucsi);
+int ucsi_register(struct ucsi *ucsi);
+void ucsi_unregister(struct ucsi *ucsi);
+void *ucsi_get_drvdata(struct ucsi *ucsi);
+void ucsi_set_drvdata(struct ucsi *ucsi, void *data);
-/* Helper for preparing ucsi_control for GET_ALTERNATE_MODES command. */
-#define UCSI_CMD_GET_ALTERNATE_MODES(_ctrl_, _r_, _con_num_, _o_, _num_)\
-{ \
- __UCSI_CMD((_ctrl_), UCSI_GET_ALTERNATE_MODES) \
- _ctrl_.alt.recipient = (_r_); \
- _ctrl_.alt.con_num = (_con_num_); \
- _ctrl_.alt.offset = (_o_); \
- _ctrl_.alt.num_altmodes = (_num_) - 1; \
-}
+void ucsi_connector_change(struct ucsi *ucsi, u8 num);
-/* Helper for preparing ucsi_control for GET_CAM_SUPPORTED command. */
-#define UCSI_CMD_GET_CAM_SUPPORTED(_ctrl_, _con_) \
-{ \
- __UCSI_CMD((_ctrl_), UCSI_GET_CAM_SUPPORTED) \
- _ctrl_.cmd.data = (_con_); \
-}
-
-/* Helper for preparing ucsi_control for GET_CAM_SUPPORTED command. */
-#define UCSI_CMD_GET_CURRENT_CAM(_ctrl_, _con_) \
-{ \
- __UCSI_CMD((_ctrl_), UCSI_GET_CURRENT_CAM) \
- _ctrl_.cmd.data = (_con_); \
-}
-
-/* Helper for preparing ucsi_control for GET_CONNECTOR_STATUS command. */
-#define UCSI_CMD_GET_CONNECTOR_STATUS(_ctrl_, _con_) \
-{ \
- __UCSI_CMD(_ctrl_, UCSI_GET_CONNECTOR_STATUS) \
- (_ctrl_).cmd.data = _con_; \
-}
-
-#define __UCSI_ROLE(_ctrl_, _cmd_, _con_num_) \
-{ \
- __UCSI_CMD(_ctrl_, _cmd_) \
- (_ctrl_).uor.con_num = _con_num_; \
- (_ctrl_).uor.role = UCSI_UOR_ROLE_DRP; \
-}
-
-/* Helper for preparing ucsi_control for SET_UOR command. */
-#define UCSI_CMD_SET_UOR(_ctrl_, _con_, _role_) \
-{ \
- __UCSI_ROLE(_ctrl_, UCSI_SET_UOR, (_con_)->num) \
- (_ctrl_).uor.role |= (_role_) == TYPEC_HOST ? UCSI_UOR_ROLE_DFP : \
- UCSI_UOR_ROLE_UFP; \
-}
-
-/* Helper for preparing ucsi_control for SET_PDR command. */
-#define UCSI_CMD_SET_PDR(_ctrl_, _con_, _role_) \
-{ \
- __UCSI_ROLE(_ctrl_, UCSI_SET_PDR, (_con_)->num) \
- (_ctrl_).uor.role |= (_role_) == TYPEC_SOURCE ? UCSI_UOR_ROLE_DFP : \
- UCSI_UOR_ROLE_UFP; \
-}
+/* -------------------------------------------------------------------------- */
/* Commands */
#define UCSI_PPM_RESET 0x01
@@ -203,24 +81,49 @@ struct ucsi_control {
#define UCSI_GET_CONNECTOR_STATUS 0x12
#define UCSI_GET_ERROR_STATUS 0x13
-/* ACK_CC_CI commands */
-#define UCSI_ACK_EVENT 1
-#define UCSI_ACK_CMD 2
-
-/* Bits for SET_NOTIFICATION_ENABLE command */
-#define UCSI_ENABLE_NTFY_CMD_COMPLETE BIT(0)
-#define UCSI_ENABLE_NTFY_EXT_PWR_SRC_CHANGE BIT(1)
-#define UCSI_ENABLE_NTFY_PWR_OPMODE_CHANGE BIT(2)
-#define UCSI_ENABLE_NTFY_CAP_CHANGE BIT(5)
-#define UCSI_ENABLE_NTFY_PWR_LEVEL_CHANGE BIT(6)
-#define UCSI_ENABLE_NTFY_PD_RESET_COMPLETE BIT(7)
-#define UCSI_ENABLE_NTFY_CAM_CHANGE BIT(8)
-#define UCSI_ENABLE_NTFY_BAT_STATUS_CHANGE BIT(9)
-#define UCSI_ENABLE_NTFY_PARTNER_CHANGE BIT(11)
-#define UCSI_ENABLE_NTFY_PWR_DIR_CHANGE BIT(12)
-#define UCSI_ENABLE_NTFY_CONNECTOR_CHANGE BIT(14)
-#define UCSI_ENABLE_NTFY_ERROR BIT(15)
-#define UCSI_ENABLE_NTFY_ALL 0xdbe7
+#define UCSI_CONNECTOR_NUMBER(_num_) ((u64)(_num_) << 16)
+
+/* CONNECTOR_RESET command bits */
+#define UCSI_CONNECTOR_RESET_HARD BIT(23) /* Deprecated in v1.1 */
+
+/* ACK_CC_CI bits */
+#define UCSI_ACK_CONNECTOR_CHANGE BIT(16)
+#define UCSI_ACK_COMMAND_COMPLETE BIT(17)
+
+/* SET_NOTIFICATION_ENABLE command bits */
+#define UCSI_ENABLE_NTFY_CMD_COMPLETE BIT(16)
+#define UCSI_ENABLE_NTFY_EXT_PWR_SRC_CHANGE BIT(17)
+#define UCSI_ENABLE_NTFY_PWR_OPMODE_CHANGE BIT(18)
+#define UCSI_ENABLE_NTFY_CAP_CHANGE BIT(19)
+#define UCSI_ENABLE_NTFY_PWR_LEVEL_CHANGE BIT(20)
+#define UCSI_ENABLE_NTFY_PD_RESET_COMPLETE BIT(21)
+#define UCSI_ENABLE_NTFY_CAM_CHANGE BIT(22)
+#define UCSI_ENABLE_NTFY_BAT_STATUS_CHANGE BIT(23)
+#define UCSI_ENABLE_NTFY_PARTNER_CHANGE BIT(24)
+#define UCSI_ENABLE_NTFY_PWR_DIR_CHANGE BIT(25)
+#define UCSI_ENABLE_NTFY_CONNECTOR_CHANGE BIT(26)
+#define UCSI_ENABLE_NTFY_ERROR BIT(27)
+#define UCSI_ENABLE_NTFY_ALL 0xdbe70000
+
+/* SET_UOR command bits */
+#define UCSI_SET_UOR_ROLE(_r_) (((_r_) == TYPEC_HOST ? 1 : 2) << 23)
+#define UCSI_SET_UOR_ACCEPT_ROLE_SWAPS BIT(25)
+
+/* SET_PDF command bits */
+#define UCSI_SET_PDR_ROLE(_r_) (((_r_) == TYPEC_SOURCE ? 1 : 2) << 23)
+#define UCSI_SET_PDR_ACCEPT_ROLE_SWAPS BIT(25)
+
+/* GET_ALTERNATE_MODES command bits */
+#define UCSI_GET_ALTMODE_RECIPIENT(_r_) ((u64)(_r_) << 16)
+#define UCSI_RECIPIENT_CON 0
+#define UCSI_RECIPIENT_SOP 1
+#define UCSI_RECIPIENT_SOP_P 2
+#define UCSI_RECIPIENT_SOP_PP 3
+#define UCSI_GET_ALTMODE_CONNECTOR_NUMBER(_r_) ((u64)(_r_) << 24)
+#define UCSI_GET_ALTMODE_OFFSET(_r_) ((u64)(_r_) << 32)
+#define UCSI_GET_ALTMODE_NUM_ALTMODES(_r_) ((u64)(_r_) << 40)
+
+/* -------------------------------------------------------------------------- */
/* Error information returned by PPM in response to GET_ERROR_STATUS command. */
#define UCSI_ERROR_UNREGONIZED_CMD BIT(0)
@@ -230,6 +133,12 @@ struct ucsi_control {
#define UCSI_ERROR_CC_COMMUNICATION_ERR BIT(4)
#define UCSI_ERROR_DEAD_BATTERY BIT(5)
#define UCSI_ERROR_CONTRACT_NEGOTIATION_FAIL BIT(6)
+#define UCSI_ERROR_OVERCURRENT BIT(7)
+#define UCSI_ERROR_UNDEFINED BIT(8)
+#define UCSI_ERROR_PARTNER_REJECTED_SWAP BIT(9)
+#define UCSI_ERROR_HARD_RESET BIT(10)
+#define UCSI_ERROR_PPM_POLICY_CONFLICT BIT(11)
+#define UCSI_ERROR_SWAP_REJECTED BIT(12)
/* Data structure filled by PPM in response to GET_CAPABILITY command. */
struct ucsi_capability {
@@ -241,8 +150,8 @@ struct ucsi_capability {
#define UCSI_CAP_ATTR_POWER_AC_SUPPLY BIT(8)
#define UCSI_CAP_ATTR_POWER_OTHER BIT(10)
#define UCSI_CAP_ATTR_POWER_VBUS BIT(14)
- u32 num_connectors:8;
- u32 features:24;
+ u8 num_connectors;
+ u8 features;
#define UCSI_CAP_SET_UOM BIT(0)
#define UCSI_CAP_SET_PDM BIT(1)
#define UCSI_CAP_ALT_MODE_DETAILS BIT(2)
@@ -251,8 +160,9 @@ struct ucsi_capability {
#define UCSI_CAP_CABLE_DETAILS BIT(5)
#define UCSI_CAP_EXT_SUPPLY_NOTIFICATIONS BIT(6)
#define UCSI_CAP_PD_RESET BIT(7)
+ u16 reserved_1;
u8 num_alt_modes;
- u8 reserved;
+ u8 reserved_2;
u16 bc_version;
u16 pd_version;
u16 typec_version;
@@ -269,9 +179,9 @@ struct ucsi_connector_capability {
#define UCSI_CONCAP_OPMODE_USB2 BIT(5)
#define UCSI_CONCAP_OPMODE_USB3 BIT(6)
#define UCSI_CONCAP_OPMODE_ALT_MODE BIT(7)
- u8 provider:1;
- u8 consumer:1;
- u8:6; /* reserved */
+ u8 flags;
+#define UCSI_CONCAP_FLAG_PROVIDER BIT(0)
+#define UCSI_CONCAP_FLAG_CONSUMER BIT(1)
} __packed;
struct ucsi_altmode {
@@ -283,18 +193,17 @@ struct ucsi_altmode {
struct ucsi_cable_property {
u16 speed_supported;
u8 current_capability;
- u8 vbus_in_cable:1;
- u8 active_cable:1;
- u8 directionality:1;
- u8 plug_type:2;
-#define UCSI_CABLE_PROPERTY_PLUG_TYPE_A 0
-#define UCSI_CABLE_PROPERTY_PLUG_TYPE_B 1
-#define UCSI_CABLE_PROPERTY_PLUG_TYPE_C 2
-#define UCSI_CABLE_PROPERTY_PLUG_OTHER 3
- u8 mode_support:1;
- u8:2; /* reserved */
- u8 latency:4;
- u8:4; /* reserved */
+ u8 flags;
+#define UCSI_CABLE_PROP_FLAG_VBUS_IN_CABLE BIT(0)
+#define UCSI_CABLE_PROP_FLAG_ACTIVE_CABLE BIT(1)
+#define UCSI_CABLE_PROP_FLAG_DIRECTIONALITY BIT(2)
+#define UCSI_CABLE_PROP_FLAG_PLUG_TYPE(_f_) ((_f_) & GENMASK(3, 0))
+#define UCSI_CABLE_PROPERTY_PLUG_TYPE_A 0
+#define UCSI_CABLE_PROPERTY_PLUG_TYPE_B 1
+#define UCSI_CABLE_PROPERTY_PLUG_TYPE_C 2
+#define UCSI_CABLE_PROPERTY_PLUG_OTHER 3
+#define UCSI_CABLE_PROP_MODE_SUPPORT BIT(5)
+ u8 latency;
} __packed;
/* Data structure filled by PPM in response to GET_CONNECTOR_STATUS command. */
@@ -311,83 +220,47 @@ struct ucsi_connector_status {
#define UCSI_CONSTAT_POWER_DIR_CHANGE BIT(12)
#define UCSI_CONSTAT_CONNECT_CHANGE BIT(14)
#define UCSI_CONSTAT_ERROR BIT(15)
- u16 pwr_op_mode:3;
-#define UCSI_CONSTAT_PWR_OPMODE_NONE 0
-#define UCSI_CONSTAT_PWR_OPMODE_DEFAULT 1
-#define UCSI_CONSTAT_PWR_OPMODE_BC 2
-#define UCSI_CONSTAT_PWR_OPMODE_PD 3
-#define UCSI_CONSTAT_PWR_OPMODE_TYPEC1_5 4
-#define UCSI_CONSTAT_PWR_OPMODE_TYPEC3_0 5
- u16 connected:1;
- u16 pwr_dir:1;
- u16 partner_flags:8;
-#define UCSI_CONSTAT_PARTNER_FLAG_USB BIT(0)
-#define UCSI_CONSTAT_PARTNER_FLAG_ALT_MODE BIT(1)
- u16 partner_type:3;
-#define UCSI_CONSTAT_PARTNER_TYPE_DFP 1
-#define UCSI_CONSTAT_PARTNER_TYPE_UFP 2
-#define UCSI_CONSTAT_PARTNER_TYPE_CABLE 3 /* Powered Cable */
-#define UCSI_CONSTAT_PARTNER_TYPE_CABLE_AND_UFP 4 /* Powered Cable */
-#define UCSI_CONSTAT_PARTNER_TYPE_DEBUG 5
-#define UCSI_CONSTAT_PARTNER_TYPE_AUDIO 6
+ u16 flags;
+#define UCSI_CONSTAT_PWR_OPMODE(_f_) ((_f_) & GENMASK(2, 0))
+#define UCSI_CONSTAT_PWR_OPMODE_NONE 0
+#define UCSI_CONSTAT_PWR_OPMODE_DEFAULT 1
+#define UCSI_CONSTAT_PWR_OPMODE_BC 2
+#define UCSI_CONSTAT_PWR_OPMODE_PD 3
+#define UCSI_CONSTAT_PWR_OPMODE_TYPEC1_5 4
+#define UCSI_CONSTAT_PWR_OPMODE_TYPEC3_0 5
+#define UCSI_CONSTAT_CONNECTED BIT(3)
+#define UCSI_CONSTAT_PWR_DIR BIT(4)
+#define UCSI_CONSTAT_PARTNER_FLAGS(_f_) (((_f_) & GENMASK(12, 5)) >> 5)
+#define UCSI_CONSTAT_PARTNER_FLAG_USB 1
+#define UCSI_CONSTAT_PARTNER_FLAG_ALT_MODE 2
+#define UCSI_CONSTAT_PARTNER_TYPE(_f_) (((_f_) & GENMASK(15, 13)) >> 13)
+#define UCSI_CONSTAT_PARTNER_TYPE_DFP 1
+#define UCSI_CONSTAT_PARTNER_TYPE_UFP 2
+#define UCSI_CONSTAT_PARTNER_TYPE_CABLE 3 /* Powered Cable */
+#define UCSI_CONSTAT_PARTNER_TYPE_CABLE_AND_UFP 4 /* Powered Cable */
+#define UCSI_CONSTAT_PARTNER_TYPE_DEBUG 5
+#define UCSI_CONSTAT_PARTNER_TYPE_AUDIO 6
u32 request_data_obj;
- u8 bc_status:2;
-#define UCSI_CONSTAT_BC_NOT_CHARGING 0
-#define UCSI_CONSTAT_BC_NOMINAL_CHARGING 1
-#define UCSI_CONSTAT_BC_SLOW_CHARGING 2
-#define UCSI_CONSTAT_BC_TRICKLE_CHARGING 3
- u8 provider_cap_limit_reason:4;
-#define UCSI_CONSTAT_CAP_PWR_LOWERED 0
-#define UCSI_CONSTAT_CAP_PWR_BUDGET_LIMIT 1
- u8:2; /* reserved */
+ u8 pwr_status;
+#define UCSI_CONSTAT_BC_STATUS(_p_) ((_p_) & GENMASK(2, 0))
+#define UCSI_CONSTAT_BC_NOT_CHARGING 0
+#define UCSI_CONSTAT_BC_NOMINAL_CHARGING 1
+#define UCSI_CONSTAT_BC_SLOW_CHARGING 2
+#define UCSI_CONSTAT_BC_TRICKLE_CHARGING 3
+#define UCSI_CONSTAT_PROVIDER_CAP_LIMIT(_p_) (((_p_) & GENMASK(6, 3)) >> 3)
+#define UCSI_CONSTAT_CAP_PWR_LOWERED 0
+#define UCSI_CONSTAT_CAP_PWR_BUDGET_LIMIT 1
} __packed;
/* -------------------------------------------------------------------------- */
-struct ucsi;
-
-struct ucsi_data {
- u16 version;
- u16 reserved;
- union {
- u32 raw_cci;
- struct ucsi_cci cci;
- };
- struct ucsi_control ctrl;
- u32 message_in[4];
- u32 message_out[4];
-} __packed;
-
-/*
- * struct ucsi_ppm - Interface to UCSI Platform Policy Manager
- * @data: memory location to the UCSI data structures
- * @cmd: UCSI command execution routine
- * @sync: Refresh UCSI mailbox (the data structures)
- */
-struct ucsi_ppm {
- struct ucsi_data *data;
- int (*cmd)(struct ucsi_ppm *, struct ucsi_control *);
- int (*sync)(struct ucsi_ppm *);
-};
-
-struct ucsi *ucsi_register_ppm(struct device *dev, struct ucsi_ppm *ppm);
-void ucsi_unregister_ppm(struct ucsi *ucsi);
-void ucsi_notify(struct ucsi *ucsi);
-
-/* -------------------------------------------------------------------------- */
-
-enum ucsi_status {
- UCSI_IDLE = 0,
- UCSI_BUSY,
- UCSI_ERROR,
-};
-
struct ucsi {
+ u16 version;
struct device *dev;
- struct ucsi_ppm *ppm;
+ struct driver_data *driver_data;
+
+ const struct ucsi_operations *ops;
- enum ucsi_status status;
- struct completion complete;
struct ucsi_capability cap;
struct ucsi_connector *connector;
@@ -426,7 +299,7 @@ struct ucsi_connector {
struct ucsi_connector_capability cap;
};
-int ucsi_send_command(struct ucsi *ucsi, struct ucsi_control *ctrl,
+int ucsi_send_command(struct ucsi *ucsi, u64 command,
void *retval, size_t size);
void ucsi_altmode_update_active(struct ucsi_connector *con);
diff --git a/drivers/usb/typec/ucsi/ucsi_acpi.c b/drivers/usb/typec/ucsi/ucsi_acpi.c
index a18112a83fae..3f1786170098 100644
--- a/drivers/usb/typec/ucsi/ucsi_acpi.c
+++ b/drivers/usb/typec/ucsi/ucsi_acpi.c
@@ -19,7 +19,9 @@
struct ucsi_acpi {
struct device *dev;
struct ucsi *ucsi;
- struct ucsi_ppm ppm;
+ void __iomem *base;
+ struct completion complete;
+ unsigned long flags;
guid_t guid;
};
@@ -39,27 +41,73 @@ static int ucsi_acpi_dsm(struct ucsi_acpi *ua, int func)
return 0;
}
-static int ucsi_acpi_cmd(struct ucsi_ppm *ppm, struct ucsi_control *ctrl)
+static int ucsi_acpi_read(struct ucsi *ucsi, unsigned int offset,
+ void *val, size_t val_len)
{
- struct ucsi_acpi *ua = container_of(ppm, struct ucsi_acpi, ppm);
+ struct ucsi_acpi *ua = ucsi_get_drvdata(ucsi);
+ int ret;
+
+ ret = ucsi_acpi_dsm(ua, UCSI_DSM_FUNC_READ);
+ if (ret)
+ return ret;
+
+ memcpy(val, (const void __force *)(ua->base + offset), val_len);
+
+ return 0;
+}
+
+static int ucsi_acpi_async_write(struct ucsi *ucsi, unsigned int offset,
+ const void *val, size_t val_len)
+{
+ struct ucsi_acpi *ua = ucsi_get_drvdata(ucsi);
- ppm->data->ctrl.raw_cmd = ctrl->raw_cmd;
+ memcpy((void __force *)(ua->base + offset), val, val_len);
return ucsi_acpi_dsm(ua, UCSI_DSM_FUNC_WRITE);
}
-static int ucsi_acpi_sync(struct ucsi_ppm *ppm)
+static int ucsi_acpi_sync_write(struct ucsi *ucsi, unsigned int offset,
+ const void *val, size_t val_len)
{
- struct ucsi_acpi *ua = container_of(ppm, struct ucsi_acpi, ppm);
+ struct ucsi_acpi *ua = ucsi_get_drvdata(ucsi);
+ int ret;
+
+ set_bit(COMMAND_PENDING, &ua->flags);
+
+ ret = ucsi_acpi_async_write(ucsi, offset, val, val_len);
+ if (ret)
+ goto out_clear_bit;
- return ucsi_acpi_dsm(ua, UCSI_DSM_FUNC_READ);
+ if (!wait_for_completion_timeout(&ua->complete, msecs_to_jiffies(5000)))
+ ret = -ETIMEDOUT;
+
+out_clear_bit:
+ clear_bit(COMMAND_PENDING, &ua->flags);
+
+ return ret;
}
+static const struct ucsi_operations ucsi_acpi_ops = {
+ .read = ucsi_acpi_read,
+ .sync_write = ucsi_acpi_sync_write,
+ .async_write = ucsi_acpi_async_write
+};
+
static void ucsi_acpi_notify(acpi_handle handle, u32 event, void *data)
{
struct ucsi_acpi *ua = data;
+ u32 cci;
+ int ret;
+
+ ret = ucsi_acpi_read(ua->ucsi, UCSI_CCI, &cci, sizeof(cci));
+ if (ret)
+ return;
- ucsi_notify(ua->ucsi);
+ if (test_bit(COMMAND_PENDING, &ua->flags) &&
+ cci & (UCSI_CCI_ACK_COMPLETE | UCSI_CCI_COMMAND_COMPLETE))
+ complete(&ua->complete);
+ else if (UCSI_CCI_CONNECTOR(cci))
+ ucsi_connector_change(ua->ucsi, UCSI_CCI_CONNECTOR(cci));
}
static int ucsi_acpi_probe(struct platform_device *pdev)
@@ -90,35 +138,39 @@ static int ucsi_acpi_probe(struct platform_device *pdev)
* it can not be requested here, and we can not use
* devm_ioremap_resource().
*/
- ua->ppm.data = devm_ioremap(&pdev->dev, res->start, resource_size(res));
- if (!ua->ppm.data)
+ ua->base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+ if (!ua->base)
return -ENOMEM;
- if (!ua->ppm.data->version)
- return -ENODEV;
-
ret = guid_parse(UCSI_DSM_UUID, &ua->guid);
if (ret)
return ret;
- ua->ppm.cmd = ucsi_acpi_cmd;
- ua->ppm.sync = ucsi_acpi_sync;
+ init_completion(&ua->complete);
ua->dev = &pdev->dev;
+ ua->ucsi = ucsi_create(&pdev->dev, &ucsi_acpi_ops);
+ if (IS_ERR(ua->ucsi))
+ return PTR_ERR(ua->ucsi);
+
+ ucsi_set_drvdata(ua->ucsi, ua);
+
status = acpi_install_notify_handler(ACPI_HANDLE(&pdev->dev),
ACPI_DEVICE_NOTIFY,
ucsi_acpi_notify, ua);
if (ACPI_FAILURE(status)) {
dev_err(&pdev->dev, "failed to install notify handler\n");
+ ucsi_destroy(ua->ucsi);
return -ENODEV;
}
- ua->ucsi = ucsi_register_ppm(&pdev->dev, &ua->ppm);
- if (IS_ERR(ua->ucsi)) {
+ ret = ucsi_register(ua->ucsi);
+ if (ret) {
acpi_remove_notify_handler(ACPI_HANDLE(&pdev->dev),
ACPI_DEVICE_NOTIFY,
ucsi_acpi_notify);
- return PTR_ERR(ua->ucsi);
+ ucsi_destroy(ua->ucsi);
+ return ret;
}
platform_set_drvdata(pdev, ua);
@@ -130,7 +182,8 @@ static int ucsi_acpi_remove(struct platform_device *pdev)
{
struct ucsi_acpi *ua = platform_get_drvdata(pdev);
- ucsi_unregister_ppm(ua->ucsi);
+ ucsi_unregister(ua->ucsi);
+ ucsi_destroy(ua->ucsi);
acpi_remove_notify_handler(ACPI_HANDLE(&pdev->dev), ACPI_DEVICE_NOTIFY,
ucsi_acpi_notify);
diff --git a/drivers/usb/typec/ucsi/ucsi_ccg.c b/drivers/usb/typec/ucsi/ucsi_ccg.c
index d772fce51905..3370b3fc37b1 100644
--- a/drivers/usb/typec/ucsi/ucsi_ccg.c
+++ b/drivers/usb/typec/ucsi/ucsi_ccg.c
@@ -176,8 +176,8 @@ struct ccg_resp {
struct ucsi_ccg {
struct device *dev;
struct ucsi *ucsi;
- struct ucsi_ppm ppm;
struct i2c_client *client;
+
struct ccg_dev_info info;
/* version info for boot, primary and secondary */
struct version_info version[FW2 + 1];
@@ -196,6 +196,8 @@ struct ucsi_ccg {
/* fw build with vendor information */
u16 fw_build;
struct work_struct pm_work;
+
+ struct completion complete;
};
static int ccg_read(struct ucsi_ccg *uc, u16 rab, u8 *data, u32 len)
@@ -243,7 +245,7 @@ static int ccg_read(struct ucsi_ccg *uc, u16 rab, u8 *data, u32 len)
return 0;
}
-static int ccg_write(struct ucsi_ccg *uc, u16 rab, u8 *data, u32 len)
+static int ccg_write(struct ucsi_ccg *uc, u16 rab, const u8 *data, u32 len)
{
struct i2c_client *client = uc->client;
unsigned char *buf;
@@ -317,88 +319,85 @@ static int ucsi_ccg_init(struct ucsi_ccg *uc)
return -ETIMEDOUT;
}
-static int ucsi_ccg_send_data(struct ucsi_ccg *uc)
+static int ucsi_ccg_read(struct ucsi *ucsi, unsigned int offset,
+ void *val, size_t val_len)
{
- u8 *ppm = (u8 *)uc->ppm.data;
- int status;
- u16 rab;
+ u16 reg = CCGX_RAB_UCSI_DATA_BLOCK(offset);
- rab = CCGX_RAB_UCSI_DATA_BLOCK(offsetof(struct ucsi_data, message_out));
- status = ccg_write(uc, rab, ppm +
- offsetof(struct ucsi_data, message_out),
- sizeof(uc->ppm.data->message_out));
- if (status < 0)
- return status;
-
- rab = CCGX_RAB_UCSI_DATA_BLOCK(offsetof(struct ucsi_data, ctrl));
- return ccg_write(uc, rab, ppm + offsetof(struct ucsi_data, ctrl),
- sizeof(uc->ppm.data->ctrl));
+ return ccg_read(ucsi_get_drvdata(ucsi), reg, val, val_len);
}
-static int ucsi_ccg_recv_data(struct ucsi_ccg *uc)
+static int ucsi_ccg_async_write(struct ucsi *ucsi, unsigned int offset,
+ const void *val, size_t val_len)
{
- u8 *ppm = (u8 *)uc->ppm.data;
- int status;
- u16 rab;
+ u16 reg = CCGX_RAB_UCSI_DATA_BLOCK(offset);
- rab = CCGX_RAB_UCSI_DATA_BLOCK(offsetof(struct ucsi_data, cci));
- status = ccg_read(uc, rab, ppm + offsetof(struct ucsi_data, cci),
- sizeof(uc->ppm.data->cci));
- if (status < 0)
- return status;
-
- rab = CCGX_RAB_UCSI_DATA_BLOCK(offsetof(struct ucsi_data, message_in));
- return ccg_read(uc, rab, ppm + offsetof(struct ucsi_data, message_in),
- sizeof(uc->ppm.data->message_in));
+ return ccg_write(ucsi_get_drvdata(ucsi), reg, val, val_len);
}
-static int ucsi_ccg_ack_interrupt(struct ucsi_ccg *uc)
+static int ucsi_ccg_sync_write(struct ucsi *ucsi, unsigned int offset,
+ const void *val, size_t val_len)
{
- int status;
- unsigned char data;
+ struct ucsi_ccg *uc = ucsi_get_drvdata(ucsi);
+ int ret;
- status = ccg_read(uc, CCGX_RAB_INTR_REG, &data, sizeof(data));
- if (status < 0)
- return status;
+ mutex_lock(&uc->lock);
+ pm_runtime_get_sync(uc->dev);
+ set_bit(DEV_CMD_PENDING, &uc->flags);
- return ccg_write(uc, CCGX_RAB_INTR_REG, &data, sizeof(data));
-}
+ ret = ucsi_ccg_async_write(ucsi, offset, val, val_len);
+ if (ret)
+ goto err_clear_bit;
-static int ucsi_ccg_sync(struct ucsi_ppm *ppm)
-{
- struct ucsi_ccg *uc = container_of(ppm, struct ucsi_ccg, ppm);
- int status;
+ if (!wait_for_completion_timeout(&uc->complete, msecs_to_jiffies(5000)))
+ ret = -ETIMEDOUT;
- status = ucsi_ccg_recv_data(uc);
- if (status < 0)
- return status;
+err_clear_bit:
+ clear_bit(DEV_CMD_PENDING, &uc->flags);
+ pm_runtime_put_sync(uc->dev);
+ mutex_unlock(&uc->lock);
- /* ack interrupt to allow next command to run */
- return ucsi_ccg_ack_interrupt(uc);
+ return ret;
}
-static int ucsi_ccg_cmd(struct ucsi_ppm *ppm, struct ucsi_control *ctrl)
-{
- struct ucsi_ccg *uc = container_of(ppm, struct ucsi_ccg, ppm);
-
- ppm->data->ctrl.raw_cmd = ctrl->raw_cmd;
- return ucsi_ccg_send_data(uc);
-}
+static const struct ucsi_operations ucsi_ccg_ops = {
+ .read = ucsi_ccg_read,
+ .sync_write = ucsi_ccg_sync_write,
+ .async_write = ucsi_ccg_async_write
+};
static irqreturn_t ccg_irq_handler(int irq, void *data)
{
+ u16 reg = CCGX_RAB_UCSI_DATA_BLOCK(UCSI_CCI);
struct ucsi_ccg *uc = data;
+ u8 intr_reg;
+ u32 cci;
+ int ret;
+
+ ret = ccg_read(uc, CCGX_RAB_INTR_REG, &intr_reg, sizeof(intr_reg));
+ if (ret)
+ return ret;
+
+ ret = ccg_read(uc, reg, (void *)&cci, sizeof(cci));
+ if (ret)
+ goto err_clear_irq;
+
+ if (UCSI_CCI_CONNECTOR(cci))
+ ucsi_connector_change(uc->ucsi, UCSI_CCI_CONNECTOR(cci));
+
+ if (test_bit(DEV_CMD_PENDING, &uc->flags) &&
+ cci & (UCSI_CCI_ACK_COMPLETE | UCSI_CCI_COMMAND_COMPLETE))
+ complete(&uc->complete);
- ucsi_notify(uc->ucsi);
+err_clear_irq:
+ ccg_write(uc, CCGX_RAB_INTR_REG, &intr_reg, sizeof(intr_reg));
return IRQ_HANDLED;
}
static void ccg_pm_workaround_work(struct work_struct *pm_work)
{
- struct ucsi_ccg *uc = container_of(pm_work, struct ucsi_ccg, pm_work);
-
- ucsi_notify(uc->ucsi);
+ ccg_irq_handler(0, container_of(pm_work, struct ucsi_ccg, pm_work));
}
static int get_fw_info(struct ucsi_ccg *uc)
@@ -1027,10 +1026,10 @@ static int ccg_restart(struct ucsi_ccg *uc)
return status;
}
- uc->ucsi = ucsi_register_ppm(dev, &uc->ppm);
- if (IS_ERR(uc->ucsi)) {
- dev_err(uc->dev, "ucsi_register_ppm failed\n");
- return PTR_ERR(uc->ucsi);
+ status = ucsi_register(uc->ucsi);
+ if (status) {
+ dev_err(uc->dev, "failed to register the interface\n");
+ return status;
}
return 0;
@@ -1047,7 +1046,7 @@ static void ccg_update_firmware(struct work_struct *work)
return;
if (flash_mode != FLASH_NOT_NEEDED) {
- ucsi_unregister_ppm(uc->ucsi);
+ ucsi_unregister(uc->ucsi);
free_irq(uc->irq, uc);
ccg_fw_update(uc, flash_mode);
@@ -1091,21 +1090,15 @@ static int ucsi_ccg_probe(struct i2c_client *client,
struct device *dev = &client->dev;
struct ucsi_ccg *uc;
int status;
- u16 rab;
uc = devm_kzalloc(dev, sizeof(*uc), GFP_KERNEL);
if (!uc)
return -ENOMEM;
- uc->ppm.data = devm_kzalloc(dev, sizeof(struct ucsi_data), GFP_KERNEL);
- if (!uc->ppm.data)
- return -ENOMEM;
-
- uc->ppm.cmd = ucsi_ccg_cmd;
- uc->ppm.sync = ucsi_ccg_sync;
uc->dev = dev;
uc->client = client;
mutex_init(&uc->lock);
+ init_completion(&uc->complete);
INIT_WORK(&uc->work, ccg_update_firmware);
INIT_WORK(&uc->pm_work, ccg_pm_workaround_work);
@@ -1133,30 +1126,25 @@ static int ucsi_ccg_probe(struct i2c_client *client,
if (uc->info.mode & CCG_DEVINFO_PDPORTS_MASK)
uc->port_num++;
+ uc->ucsi = ucsi_create(dev, &ucsi_ccg_ops);
+ if (IS_ERR(uc->ucsi))
+ return PTR_ERR(uc->ucsi);
+
+ ucsi_set_drvdata(uc->ucsi, uc);
+
status = request_threaded_irq(client->irq, NULL, ccg_irq_handler,
IRQF_ONESHOT | IRQF_TRIGGER_HIGH,
dev_name(dev), uc);
if (status < 0) {
dev_err(uc->dev, "request_threaded_irq failed - %d\n", status);
- return status;
+ goto out_ucsi_destroy;
}
uc->irq = client->irq;
- uc->ucsi = ucsi_register_ppm(dev, &uc->ppm);
- if (IS_ERR(uc->ucsi)) {
- dev_err(uc->dev, "ucsi_register_ppm failed\n");
- return PTR_ERR(uc->ucsi);
- }
-
- rab = CCGX_RAB_UCSI_DATA_BLOCK(offsetof(struct ucsi_data, version));
- status = ccg_read(uc, rab, (u8 *)(uc->ppm.data) +
- offsetof(struct ucsi_data, version),
- sizeof(uc->ppm.data->version));
- if (status < 0) {
- ucsi_unregister_ppm(uc->ucsi);
- return status;
- }
+ status = ucsi_register(uc->ucsi);
+ if (status)
+ goto out_free_irq;
i2c_set_clientdata(client, uc);
@@ -1167,6 +1155,13 @@ static int ucsi_ccg_probe(struct i2c_client *client,
pm_runtime_idle(uc->dev);
return 0;
+
+out_free_irq:
+ free_irq(uc->irq, uc);
+out_ucsi_destroy:
+ ucsi_destroy(uc->ucsi);
+
+ return status;
}
static int ucsi_ccg_remove(struct i2c_client *client)
@@ -1175,8 +1170,9 @@ static int ucsi_ccg_remove(struct i2c_client *client)
cancel_work_sync(&uc->pm_work);
cancel_work_sync(&uc->work);
- ucsi_unregister_ppm(uc->ucsi);
pm_runtime_disable(uc->dev);
+ ucsi_unregister(uc->ucsi);
+ ucsi_destroy(uc->ucsi);
free_irq(uc->irq, uc);
return 0;
diff --git a/drivers/usb/usbip/Kconfig b/drivers/usb/usbip/Kconfig
index 2f86b28fa3da..7bbae7a08642 100644
--- a/drivers/usb/usbip/Kconfig
+++ b/drivers/usb/usbip/Kconfig
@@ -4,6 +4,7 @@ config USBIP_CORE
tristate "USB/IP support"
depends on NET
select USB_COMMON
+ select SGL_ALLOC
---help---
This enables pushing USB packets over IP to allow remote
machines direct access to USB devices. It provides the
diff --git a/drivers/usb/usbip/stub_rx.c b/drivers/usb/usbip/stub_rx.c
index 66edfeea68fe..e2b019532234 100644
--- a/drivers/usb/usbip/stub_rx.c
+++ b/drivers/usb/usbip/stub_rx.c
@@ -470,18 +470,50 @@ static void stub_recv_cmd_submit(struct stub_device *sdev,
if (pipe == -1)
return;
+ /*
+ * Smatch reported the error case where use_sg is true and buf_len is 0.
+ * In this case, It adds SDEV_EVENT_ERROR_MALLOC and stub_priv will be
+ * released by stub event handler and connection will be shut down.
+ */
priv = stub_priv_alloc(sdev, pdu);
if (!priv)
return;
buf_len = (unsigned long long)pdu->u.cmd_submit.transfer_buffer_length;
+ if (use_sg && !buf_len) {
+ dev_err(&udev->dev, "sg buffer with zero length\n");
+ goto err_malloc;
+ }
+
/* allocate urb transfer buffer, if needed */
if (buf_len) {
if (use_sg) {
sgl = sgl_alloc(buf_len, GFP_KERNEL, &nents);
if (!sgl)
goto err_malloc;
+
+ /* Check if the server's HCD supports SG */
+ if (!udev->bus->sg_tablesize) {
+ /*
+ * If the server's HCD doesn't support SG, break
+ * a single SG request into several URBs and map
+ * each SG list entry to corresponding URB
+ * buffer. The previously allocated SG list is
+ * stored in priv->sgl (If the server's HCD
+ * support SG, SG list is stored only in
+ * urb->sg) and it is used as an indicator that
+ * the server split single SG request into
+ * several URBs. Later, priv->sgl is used by
+ * stub_complete() and stub_send_ret_submit() to
+ * reassemble the divied URBs.
+ */
+ support_sg = 0;
+ num_urbs = nents;
+ priv->completed_urbs = 0;
+ pdu->u.cmd_submit.transfer_flags &=
+ ~URB_DMA_MAP_SG;
+ }
} else {
buffer = kzalloc(buf_len, GFP_KERNEL);
if (!buffer)
@@ -489,24 +521,6 @@ static void stub_recv_cmd_submit(struct stub_device *sdev,
}
}
- /* Check if the server's HCD supports SG */
- if (use_sg && !udev->bus->sg_tablesize) {
- /*
- * If the server's HCD doesn't support SG, break a single SG
- * request into several URBs and map each SG list entry to
- * corresponding URB buffer. The previously allocated SG
- * list is stored in priv->sgl (If the server's HCD support SG,
- * SG list is stored only in urb->sg) and it is used as an
- * indicator that the server split single SG request into
- * several URBs. Later, priv->sgl is used by stub_complete() and
- * stub_send_ret_submit() to reassemble the divied URBs.
- */
- support_sg = 0;
- num_urbs = nents;
- priv->completed_urbs = 0;
- pdu->u.cmd_submit.transfer_flags &= ~URB_DMA_MAP_SG;
- }
-
/* allocate urb array */
priv->num_urbs = num_urbs;
priv->urbs = kmalloc_array(num_urbs, sizeof(*priv->urbs), GFP_KERNEL);
diff --git a/drivers/usb/usbip/stub_tx.c b/drivers/usb/usbip/stub_tx.c
index 36010a82b359..b1c2f6781cb3 100644
--- a/drivers/usb/usbip/stub_tx.c
+++ b/drivers/usb/usbip/stub_tx.c
@@ -291,7 +291,7 @@ static int stub_send_ret_submit(struct stub_device *sdev)
kfree(iov);
usbip_event_add(&sdev->ud,
SDEV_EVENT_ERROR_TCP);
- return -1;
+ return -1;
}
}
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index 02206162eaa9..379a02c36e37 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -110,13 +110,15 @@ static inline bool vfio_pci_is_vga(struct pci_dev *pdev)
static void vfio_pci_probe_mmaps(struct vfio_pci_device *vdev)
{
struct resource *res;
- int bar;
+ int i;
struct vfio_pci_dummy_resource *dummy_res;
INIT_LIST_HEAD(&vdev->dummy_resources_list);
- for (bar = PCI_STD_RESOURCES; bar <= PCI_STD_RESOURCE_END; bar++) {
- res = vdev->pdev->resource + bar;
+ for (i = 0; i < PCI_STD_NUM_BARS; i++) {
+ int bar = i + PCI_STD_RESOURCES;
+
+ res = &vdev->pdev->resource[bar];
if (!IS_ENABLED(CONFIG_VFIO_PCI_MMAP))
goto no_mmap;
@@ -399,7 +401,8 @@ static void vfio_pci_disable(struct vfio_pci_device *vdev)
vfio_config_free(vdev);
- for (bar = PCI_STD_RESOURCES; bar <= PCI_STD_RESOURCE_END; bar++) {
+ for (i = 0; i < PCI_STD_NUM_BARS; i++) {
+ bar = i + PCI_STD_RESOURCES;
if (!vdev->barmap[bar])
continue;
pci_iounmap(pdev, vdev->barmap[bar]);
diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c
index f0891bd8444c..90c0b80f8acf 100644
--- a/drivers/vfio/pci/vfio_pci_config.c
+++ b/drivers/vfio/pci/vfio_pci_config.c
@@ -450,30 +450,32 @@ static void vfio_bar_fixup(struct vfio_pci_device *vdev)
{
struct pci_dev *pdev = vdev->pdev;
int i;
- __le32 *bar;
+ __le32 *vbar;
u64 mask;
- bar = (__le32 *)&vdev->vconfig[PCI_BASE_ADDRESS_0];
+ vbar = (__le32 *)&vdev->vconfig[PCI_BASE_ADDRESS_0];
- for (i = PCI_STD_RESOURCES; i <= PCI_STD_RESOURCE_END; i++, bar++) {
- if (!pci_resource_start(pdev, i)) {
- *bar = 0; /* Unmapped by host = unimplemented to user */
+ for (i = 0; i < PCI_STD_NUM_BARS; i++, vbar++) {
+ int bar = i + PCI_STD_RESOURCES;
+
+ if (!pci_resource_start(pdev, bar)) {
+ *vbar = 0; /* Unmapped by host = unimplemented to user */
continue;
}
- mask = ~(pci_resource_len(pdev, i) - 1);
+ mask = ~(pci_resource_len(pdev, bar) - 1);
- *bar &= cpu_to_le32((u32)mask);
- *bar |= vfio_generate_bar_flags(pdev, i);
+ *vbar &= cpu_to_le32((u32)mask);
+ *vbar |= vfio_generate_bar_flags(pdev, bar);
- if (*bar & cpu_to_le32(PCI_BASE_ADDRESS_MEM_TYPE_64)) {
- bar++;
- *bar &= cpu_to_le32((u32)(mask >> 32));
+ if (*vbar & cpu_to_le32(PCI_BASE_ADDRESS_MEM_TYPE_64)) {
+ vbar++;
+ *vbar &= cpu_to_le32((u32)(mask >> 32));
i++;
}
}
- bar = (__le32 *)&vdev->vconfig[PCI_ROM_ADDRESS];
+ vbar = (__le32 *)&vdev->vconfig[PCI_ROM_ADDRESS];
/*
* NB. REGION_INFO will have reported zero size if we weren't able
@@ -483,14 +485,14 @@ static void vfio_bar_fixup(struct vfio_pci_device *vdev)
if (pci_resource_start(pdev, PCI_ROM_RESOURCE)) {
mask = ~(pci_resource_len(pdev, PCI_ROM_RESOURCE) - 1);
mask |= PCI_ROM_ADDRESS_ENABLE;
- *bar &= cpu_to_le32((u32)mask);
+ *vbar &= cpu_to_le32((u32)mask);
} else if (pdev->resource[PCI_ROM_RESOURCE].flags &
IORESOURCE_ROM_SHADOW) {
mask = ~(0x20000 - 1);
mask |= PCI_ROM_ADDRESS_ENABLE;
- *bar &= cpu_to_le32((u32)mask);
+ *vbar &= cpu_to_le32((u32)mask);
} else
- *bar = 0;
+ *vbar = 0;
vdev->bardirty = false;
}
diff --git a/drivers/vfio/pci/vfio_pci_private.h b/drivers/vfio/pci/vfio_pci_private.h
index ee6ee91718a4..8a2c7607d513 100644
--- a/drivers/vfio/pci/vfio_pci_private.h
+++ b/drivers/vfio/pci/vfio_pci_private.h
@@ -86,8 +86,8 @@ struct vfio_pci_reflck {
struct vfio_pci_device {
struct pci_dev *pdev;
- void __iomem *barmap[PCI_STD_RESOURCE_END + 1];
- bool bar_mmap_supported[PCI_STD_RESOURCE_END + 1];
+ void __iomem *barmap[PCI_STD_NUM_BARS];
+ bool bar_mmap_supported[PCI_STD_NUM_BARS];
u8 *pci_config_map;
u8 *vconfig;
struct perm_bits *msi_perm;
diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
index 388597930b64..c8482624ca34 100644
--- a/drivers/vfio/vfio.c
+++ b/drivers/vfio/vfio.c
@@ -1184,15 +1184,6 @@ static long vfio_fops_unl_ioctl(struct file *filep,
return ret;
}
-#ifdef CONFIG_COMPAT
-static long vfio_fops_compat_ioctl(struct file *filep,
- unsigned int cmd, unsigned long arg)
-{
- arg = (unsigned long)compat_ptr(arg);
- return vfio_fops_unl_ioctl(filep, cmd, arg);
-}
-#endif /* CONFIG_COMPAT */
-
static int vfio_fops_open(struct inode *inode, struct file *filep)
{
struct vfio_container *container;
@@ -1275,9 +1266,7 @@ static const struct file_operations vfio_fops = {
.read = vfio_fops_read,
.write = vfio_fops_write,
.unlocked_ioctl = vfio_fops_unl_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = vfio_fops_compat_ioctl,
-#endif
+ .compat_ioctl = compat_ptr_ioctl,
.mmap = vfio_fops_mmap,
};
@@ -1556,15 +1545,6 @@ static long vfio_group_fops_unl_ioctl(struct file *filep,
return ret;
}
-#ifdef CONFIG_COMPAT
-static long vfio_group_fops_compat_ioctl(struct file *filep,
- unsigned int cmd, unsigned long arg)
-{
- arg = (unsigned long)compat_ptr(arg);
- return vfio_group_fops_unl_ioctl(filep, cmd, arg);
-}
-#endif /* CONFIG_COMPAT */
-
static int vfio_group_fops_open(struct inode *inode, struct file *filep)
{
struct vfio_group *group;
@@ -1620,9 +1600,7 @@ static int vfio_group_fops_release(struct inode *inode, struct file *filep)
static const struct file_operations vfio_group_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = vfio_group_fops_unl_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = vfio_group_fops_compat_ioctl,
-#endif
+ .compat_ioctl = compat_ptr_ioctl,
.open = vfio_group_fops_open,
.release = vfio_group_fops_release,
};
@@ -1687,24 +1665,13 @@ static int vfio_device_fops_mmap(struct file *filep, struct vm_area_struct *vma)
return device->ops->mmap(device->device_data, vma);
}
-#ifdef CONFIG_COMPAT
-static long vfio_device_fops_compat_ioctl(struct file *filep,
- unsigned int cmd, unsigned long arg)
-{
- arg = (unsigned long)compat_ptr(arg);
- return vfio_device_fops_unl_ioctl(filep, cmd, arg);
-}
-#endif /* CONFIG_COMPAT */
-
static const struct file_operations vfio_device_fops = {
.owner = THIS_MODULE,
.release = vfio_device_fops_release,
.read = vfio_device_fops_read,
.write = vfio_device_fops_write,
.unlocked_ioctl = vfio_device_fops_unl_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = vfio_device_fops_compat_ioctl,
-#endif
+ .compat_ioctl = compat_ptr_ioctl,
.mmap = vfio_device_fops_mmap,
};
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 1a2dd53caade..e158159671fa 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -1751,14 +1751,6 @@ static long vhost_net_ioctl(struct file *f, unsigned int ioctl,
}
}
-#ifdef CONFIG_COMPAT
-static long vhost_net_compat_ioctl(struct file *f, unsigned int ioctl,
- unsigned long arg)
-{
- return vhost_net_ioctl(f, ioctl, (unsigned long)compat_ptr(arg));
-}
-#endif
-
static ssize_t vhost_net_chr_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
struct file *file = iocb->ki_filp;
@@ -1794,9 +1786,7 @@ static const struct file_operations vhost_net_fops = {
.write_iter = vhost_net_chr_write_iter,
.poll = vhost_net_chr_poll,
.unlocked_ioctl = vhost_net_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = vhost_net_compat_ioctl,
-#endif
+ .compat_ioctl = compat_ptr_ioctl,
.open = vhost_net_open,
.llseek = noop_llseek,
};
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index a9caf1bc3c3e..0b949a14bce3 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -1727,21 +1727,11 @@ vhost_scsi_ioctl(struct file *f,
}
}
-#ifdef CONFIG_COMPAT
-static long vhost_scsi_compat_ioctl(struct file *f, unsigned int ioctl,
- unsigned long arg)
-{
- return vhost_scsi_ioctl(f, ioctl, (unsigned long)compat_ptr(arg));
-}
-#endif
-
static const struct file_operations vhost_scsi_fops = {
.owner = THIS_MODULE,
.release = vhost_scsi_release,
.unlocked_ioctl = vhost_scsi_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = vhost_scsi_compat_ioctl,
-#endif
+ .compat_ioctl = compat_ptr_ioctl,
.open = vhost_scsi_open,
.llseek = noop_llseek,
};
diff --git a/drivers/vhost/test.c b/drivers/vhost/test.c
index 056308008288..e37c92d4d7ad 100644
--- a/drivers/vhost/test.c
+++ b/drivers/vhost/test.c
@@ -304,21 +304,11 @@ static long vhost_test_ioctl(struct file *f, unsigned int ioctl,
}
}
-#ifdef CONFIG_COMPAT
-static long vhost_test_compat_ioctl(struct file *f, unsigned int ioctl,
- unsigned long arg)
-{
- return vhost_test_ioctl(f, ioctl, (unsigned long)compat_ptr(arg));
-}
-#endif
-
static const struct file_operations vhost_test_fops = {
.owner = THIS_MODULE,
.release = vhost_test_release,
.unlocked_ioctl = vhost_test_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = vhost_test_compat_ioctl,
-#endif
+ .compat_ioctl = compat_ptr_ioctl,
.open = vhost_test_open,
.llseek = noop_llseek,
};
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
index dde392b91bb3..50de0642dea6 100644
--- a/drivers/vhost/vsock.c
+++ b/drivers/vhost/vsock.c
@@ -810,23 +810,13 @@ static long vhost_vsock_dev_ioctl(struct file *f, unsigned int ioctl,
}
}
-#ifdef CONFIG_COMPAT
-static long vhost_vsock_dev_compat_ioctl(struct file *f, unsigned int ioctl,
- unsigned long arg)
-{
- return vhost_vsock_dev_ioctl(f, ioctl, (unsigned long)compat_ptr(arg));
-}
-#endif
-
static const struct file_operations vhost_vsock_fops = {
.owner = THIS_MODULE,
.open = vhost_vsock_dev_open,
.release = vhost_vsock_dev_release,
.llseek = noop_llseek,
.unlocked_ioctl = vhost_vsock_dev_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = vhost_vsock_dev_compat_ioctl,
-#endif
+ .compat_ioctl = compat_ptr_ioctl,
};
static struct miscdevice vhost_vsock_misc = {
diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig
index 40676be2e46a..403707a3e503 100644
--- a/drivers/video/backlight/Kconfig
+++ b/drivers/video/backlight/Kconfig
@@ -99,7 +99,7 @@ config LCD_TOSA
config LCD_HP700
tristate "HP Jornada 700 series LCD Driver"
- depends on SA1100_JORNADA720_SSP && !PREEMPT
+ depends on SA1100_JORNADA720_SSP && !PREEMPTION
default y
help
If you have an HP Jornada 700 series handheld (710/720/728)
@@ -228,7 +228,7 @@ config BACKLIGHT_HP680
config BACKLIGHT_HP700
tristate "HP Jornada 700 series Backlight Driver"
- depends on SA1100_JORNADA720_SSP && !PREEMPT
+ depends on SA1100_JORNADA720_SSP && !PREEMPTION
default y
help
If you have an HP Jornada 700 series,
@@ -282,12 +282,12 @@ config BACKLIGHT_TOSA
If you have an Sharp SL-6000 Zaurus say Y to enable a driver
for its backlight
-config BACKLIGHT_PM8941_WLED
- tristate "Qualcomm PM8941 WLED Driver"
+config BACKLIGHT_QCOM_WLED
+ tristate "Qualcomm PMIC WLED Driver"
select REGMAP
help
- If you have the Qualcomm PM8941, say Y to enable a driver for the
- WLED block.
+ If you have the Qualcomm PMIC, say Y to enable a driver for the
+ WLED block. Currently it supports PM8941 and PMI8998.
config BACKLIGHT_SAHARA
tristate "Tabletkiosk Sahara Touch-iT Backlight Driver"
diff --git a/drivers/video/backlight/Makefile b/drivers/video/backlight/Makefile
index 63c507c07437..6f8777037c37 100644
--- a/drivers/video/backlight/Makefile
+++ b/drivers/video/backlight/Makefile
@@ -48,8 +48,8 @@ obj-$(CONFIG_BACKLIGHT_OMAP1) += omap1_bl.o
obj-$(CONFIG_BACKLIGHT_OT200) += ot200_bl.o
obj-$(CONFIG_BACKLIGHT_PANDORA) += pandora_bl.o
obj-$(CONFIG_BACKLIGHT_PCF50633) += pcf50633-backlight.o
-obj-$(CONFIG_BACKLIGHT_PM8941_WLED) += pm8941-wled.o
obj-$(CONFIG_BACKLIGHT_PWM) += pwm_bl.o
+obj-$(CONFIG_BACKLIGHT_QCOM_WLED) += qcom-wled.o
obj-$(CONFIG_BACKLIGHT_SAHARA) += kb3886_bl.o
obj-$(CONFIG_BACKLIGHT_SKY81452) += sky81452-backlight.o
obj-$(CONFIG_BACKLIGHT_TOSA) += tosa_bl.o
diff --git a/drivers/video/backlight/gpio_backlight.c b/drivers/video/backlight/gpio_backlight.c
index 18e053e4716c..75409ddfba3e 100644
--- a/drivers/video/backlight/gpio_backlight.c
+++ b/drivers/video/backlight/gpio_backlight.c
@@ -6,29 +6,23 @@
#include <linux/backlight.h>
#include <linux/err.h>
#include <linux/fb.h>
-#include <linux/gpio.h> /* Only for legacy support */
#include <linux/gpio/consumer.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/platform_data/gpio_backlight.h>
#include <linux/platform_device.h>
#include <linux/property.h>
#include <linux/slab.h>
struct gpio_backlight {
- struct device *dev;
struct device *fbdev;
-
struct gpio_desc *gpiod;
- int def_value;
};
-static int gpio_backlight_update_status(struct backlight_device *bl)
+static int gpio_backlight_get_next_brightness(struct backlight_device *bl)
{
- struct gpio_backlight *gbl = bl_get_data(bl);
int brightness = bl->props.brightness;
if (bl->props.power != FB_BLANK_UNBLANK ||
@@ -36,6 +30,14 @@ static int gpio_backlight_update_status(struct backlight_device *bl)
bl->props.state & (BL_CORE_SUSPENDED | BL_CORE_FBBLANK))
brightness = 0;
+ return brightness;
+}
+
+static int gpio_backlight_update_status(struct backlight_device *bl)
+{
+ struct gpio_backlight *gbl = bl_get_data(bl);
+ int brightness = gpio_backlight_get_next_brightness(bl);
+
gpiod_set_value_cansleep(gbl->gpiod, brightness);
return 0;
@@ -55,105 +57,63 @@ static const struct backlight_ops gpio_backlight_ops = {
.check_fb = gpio_backlight_check_fb,
};
-static int gpio_backlight_probe_dt(struct platform_device *pdev,
- struct gpio_backlight *gbl)
-{
- struct device *dev = &pdev->dev;
- int ret;
-
- gbl->def_value = device_property_read_bool(dev, "default-on");
-
- gbl->gpiod = devm_gpiod_get(dev, NULL, GPIOD_ASIS);
- if (IS_ERR(gbl->gpiod)) {
- ret = PTR_ERR(gbl->gpiod);
-
- if (ret != -EPROBE_DEFER) {
- dev_err(dev,
- "Error: The gpios parameter is missing or invalid.\n");
- }
- return ret;
- }
-
- return 0;
-}
-
-static int gpio_backlight_initial_power_state(struct gpio_backlight *gbl)
-{
- struct device_node *node = gbl->dev->of_node;
-
- /* Not booted with device tree or no phandle link to the node */
- if (!node || !node->phandle)
- return gbl->def_value ? FB_BLANK_UNBLANK : FB_BLANK_POWERDOWN;
-
- /* if the enable GPIO is disabled, do not enable the backlight */
- if (gpiod_get_value_cansleep(gbl->gpiod) == 0)
- return FB_BLANK_POWERDOWN;
-
- return FB_BLANK_UNBLANK;
-}
-
-
static int gpio_backlight_probe(struct platform_device *pdev)
{
- struct gpio_backlight_platform_data *pdata =
- dev_get_platdata(&pdev->dev);
+ struct device *dev = &pdev->dev;
+ struct gpio_backlight_platform_data *pdata = dev_get_platdata(dev);
+ struct device_node *of_node = dev->of_node;
struct backlight_properties props;
struct backlight_device *bl;
struct gpio_backlight *gbl;
- int ret;
+ int ret, init_brightness, def_value;
- gbl = devm_kzalloc(&pdev->dev, sizeof(*gbl), GFP_KERNEL);
+ gbl = devm_kzalloc(dev, sizeof(*gbl), GFP_KERNEL);
if (gbl == NULL)
return -ENOMEM;
- gbl->dev = &pdev->dev;
+ if (pdata)
+ gbl->fbdev = pdata->fbdev;
- if (pdev->dev.fwnode) {
- ret = gpio_backlight_probe_dt(pdev, gbl);
- if (ret)
- return ret;
- } else if (pdata) {
- /*
- * Legacy platform data GPIO retrieveal. Do not expand
- * the use of this code path, currently only used by one
- * SH board.
- */
- unsigned long flags = GPIOF_DIR_OUT;
+ def_value = device_property_read_bool(dev, "default-on");
- gbl->fbdev = pdata->fbdev;
- gbl->def_value = pdata->def_value;
- flags |= gbl->def_value ? GPIOF_INIT_HIGH : GPIOF_INIT_LOW;
-
- ret = devm_gpio_request_one(gbl->dev, pdata->gpio, flags,
- pdata ? pdata->name : "backlight");
- if (ret < 0) {
- dev_err(&pdev->dev, "unable to request GPIO\n");
- return ret;
- }
- gbl->gpiod = gpio_to_desc(pdata->gpio);
- if (!gbl->gpiod)
- return -EINVAL;
- } else {
- dev_err(&pdev->dev,
- "failed to find platform data or device tree node.\n");
- return -ENODEV;
+ gbl->gpiod = devm_gpiod_get(dev, NULL, GPIOD_ASIS);
+ if (IS_ERR(gbl->gpiod)) {
+ ret = PTR_ERR(gbl->gpiod);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev,
+ "Error: The gpios parameter is missing or invalid.\n");
+ return ret;
}
memset(&props, 0, sizeof(props));
props.type = BACKLIGHT_RAW;
props.max_brightness = 1;
- bl = devm_backlight_device_register(&pdev->dev, dev_name(&pdev->dev),
- &pdev->dev, gbl, &gpio_backlight_ops,
- &props);
+ bl = devm_backlight_device_register(dev, dev_name(dev), dev, gbl,
+ &gpio_backlight_ops, &props);
if (IS_ERR(bl)) {
- dev_err(&pdev->dev, "failed to register backlight\n");
+ dev_err(dev, "failed to register backlight\n");
return PTR_ERR(bl);
}
- bl->props.power = gpio_backlight_initial_power_state(gbl);
+ /* Set the initial power state */
+ if (!of_node || !of_node->phandle)
+ /* Not booted with device tree or no phandle link to the node */
+ bl->props.power = def_value ? FB_BLANK_UNBLANK
+ : FB_BLANK_POWERDOWN;
+ else if (gpiod_get_direction(gbl->gpiod) == 0 &&
+ gpiod_get_value_cansleep(gbl->gpiod) == 0)
+ bl->props.power = FB_BLANK_POWERDOWN;
+ else
+ bl->props.power = FB_BLANK_UNBLANK;
+
bl->props.brightness = 1;
- backlight_update_status(bl);
+ init_brightness = gpio_backlight_get_next_brightness(bl);
+ ret = gpiod_direction_output(gbl->gpiod, init_brightness);
+ if (ret) {
+ dev_err(dev, "failed to set initial brightness\n");
+ return ret;
+ }
platform_set_drvdata(pdev, bl);
return 0;
diff --git a/drivers/video/backlight/ipaq_micro_bl.c b/drivers/video/backlight/ipaq_micro_bl.c
index 1123f67c12b3..85b16cc82878 100644
--- a/drivers/video/backlight/ipaq_micro_bl.c
+++ b/drivers/video/backlight/ipaq_micro_bl.c
@@ -44,7 +44,7 @@ static const struct backlight_ops micro_bl_ops = {
.update_status = micro_bl_update_status,
};
-static struct backlight_properties micro_bl_props = {
+static const struct backlight_properties micro_bl_props = {
.type = BACKLIGHT_RAW,
.max_brightness = 255,
.power = FB_BLANK_UNBLANK,
diff --git a/drivers/video/backlight/lm3630a_bl.c b/drivers/video/backlight/lm3630a_bl.c
index 2d8e8192e4e2..ee320883b710 100644
--- a/drivers/video/backlight/lm3630a_bl.c
+++ b/drivers/video/backlight/lm3630a_bl.c
@@ -12,6 +12,7 @@
#include <linux/uaccess.h>
#include <linux/interrupt.h>
#include <linux/regmap.h>
+#include <linux/gpio/consumer.h>
#include <linux/pwm.h>
#include <linux/platform_data/lm3630a_bl.h>
@@ -48,6 +49,7 @@ struct lm3630a_chip {
struct lm3630a_platform_data *pdata;
struct backlight_device *bleda;
struct backlight_device *bledb;
+ struct gpio_desc *enable_gpio;
struct regmap *regmap;
struct pwm_device *pwmd;
};
@@ -534,6 +536,13 @@ static int lm3630a_probe(struct i2c_client *client,
}
pchip->pdata = pdata;
+ pchip->enable_gpio = devm_gpiod_get_optional(&client->dev, "enable",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(pchip->enable_gpio)) {
+ rval = PTR_ERR(pchip->enable_gpio);
+ return rval;
+ }
+
/* chip initialize */
rval = lm3630a_chip_init(pchip);
if (rval < 0) {
@@ -598,12 +607,14 @@ static const struct i2c_device_id lm3630a_id[] = {
{}
};
+MODULE_DEVICE_TABLE(i2c, lm3630a_id);
+
static const struct of_device_id lm3630a_match_table[] = {
{ .compatible = "ti,lm3630a", },
{ },
};
-MODULE_DEVICE_TABLE(i2c, lm3630a_id);
+MODULE_DEVICE_TABLE(of, lm3630a_match_table);
static struct i2c_driver lm3630a_i2c_driver = {
.driver = {
diff --git a/drivers/video/backlight/pm8941-wled.c b/drivers/video/backlight/pm8941-wled.c
deleted file mode 100644
index 82b85725a22a..000000000000
--- a/drivers/video/backlight/pm8941-wled.c
+++ /dev/null
@@ -1,424 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/* Copyright (c) 2015, Sony Mobile Communications, AB.
- */
-
-#include <linux/kernel.h>
-#include <linux/backlight.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/regmap.h>
-
-/* From DT binding */
-#define PM8941_WLED_DEFAULT_BRIGHTNESS 2048
-
-#define PM8941_WLED_REG_VAL_BASE 0x40
-#define PM8941_WLED_REG_VAL_MAX 0xFFF
-
-#define PM8941_WLED_REG_MOD_EN 0x46
-#define PM8941_WLED_REG_MOD_EN_BIT BIT(7)
-#define PM8941_WLED_REG_MOD_EN_MASK BIT(7)
-
-#define PM8941_WLED_REG_SYNC 0x47
-#define PM8941_WLED_REG_SYNC_MASK 0x07
-#define PM8941_WLED_REG_SYNC_LED1 BIT(0)
-#define PM8941_WLED_REG_SYNC_LED2 BIT(1)
-#define PM8941_WLED_REG_SYNC_LED3 BIT(2)
-#define PM8941_WLED_REG_SYNC_ALL 0x07
-#define PM8941_WLED_REG_SYNC_CLEAR 0x00
-
-#define PM8941_WLED_REG_FREQ 0x4c
-#define PM8941_WLED_REG_FREQ_MASK 0x0f
-
-#define PM8941_WLED_REG_OVP 0x4d
-#define PM8941_WLED_REG_OVP_MASK 0x03
-
-#define PM8941_WLED_REG_BOOST 0x4e
-#define PM8941_WLED_REG_BOOST_MASK 0x07
-
-#define PM8941_WLED_REG_SINK 0x4f
-#define PM8941_WLED_REG_SINK_MASK 0xe0
-#define PM8941_WLED_REG_SINK_SHFT 0x05
-
-/* Per-'string' registers below */
-#define PM8941_WLED_REG_STR_OFFSET 0x10
-
-#define PM8941_WLED_REG_STR_MOD_EN_BASE 0x60
-#define PM8941_WLED_REG_STR_MOD_MASK BIT(7)
-#define PM8941_WLED_REG_STR_MOD_EN BIT(7)
-
-#define PM8941_WLED_REG_STR_SCALE_BASE 0x62
-#define PM8941_WLED_REG_STR_SCALE_MASK 0x1f
-
-#define PM8941_WLED_REG_STR_MOD_SRC_BASE 0x63
-#define PM8941_WLED_REG_STR_MOD_SRC_MASK 0x01
-#define PM8941_WLED_REG_STR_MOD_SRC_INT 0x00
-#define PM8941_WLED_REG_STR_MOD_SRC_EXT 0x01
-
-#define PM8941_WLED_REG_STR_CABC_BASE 0x66
-#define PM8941_WLED_REG_STR_CABC_MASK BIT(7)
-#define PM8941_WLED_REG_STR_CABC_EN BIT(7)
-
-struct pm8941_wled_config {
- u32 i_boost_limit;
- u32 ovp;
- u32 switch_freq;
- u32 num_strings;
- u32 i_limit;
- bool cs_out_en;
- bool ext_gen;
- bool cabc_en;
-};
-
-struct pm8941_wled {
- const char *name;
- struct regmap *regmap;
- u16 addr;
-
- struct pm8941_wled_config cfg;
-};
-
-static int pm8941_wled_update_status(struct backlight_device *bl)
-{
- struct pm8941_wled *wled = bl_get_data(bl);
- u16 val = bl->props.brightness;
- u8 ctrl = 0;
- int rc;
- int i;
-
- if (bl->props.power != FB_BLANK_UNBLANK ||
- bl->props.fb_blank != FB_BLANK_UNBLANK ||
- bl->props.state & BL_CORE_FBBLANK)
- val = 0;
-
- if (val != 0)
- ctrl = PM8941_WLED_REG_MOD_EN_BIT;
-
- rc = regmap_update_bits(wled->regmap,
- wled->addr + PM8941_WLED_REG_MOD_EN,
- PM8941_WLED_REG_MOD_EN_MASK, ctrl);
- if (rc)
- return rc;
-
- for (i = 0; i < wled->cfg.num_strings; ++i) {
- u8 v[2] = { val & 0xff, (val >> 8) & 0xf };
-
- rc = regmap_bulk_write(wled->regmap,
- wled->addr + PM8941_WLED_REG_VAL_BASE + 2 * i,
- v, 2);
- if (rc)
- return rc;
- }
-
- rc = regmap_update_bits(wled->regmap,
- wled->addr + PM8941_WLED_REG_SYNC,
- PM8941_WLED_REG_SYNC_MASK, PM8941_WLED_REG_SYNC_ALL);
- if (rc)
- return rc;
-
- rc = regmap_update_bits(wled->regmap,
- wled->addr + PM8941_WLED_REG_SYNC,
- PM8941_WLED_REG_SYNC_MASK, PM8941_WLED_REG_SYNC_CLEAR);
- return rc;
-}
-
-static int pm8941_wled_setup(struct pm8941_wled *wled)
-{
- int rc;
- int i;
-
- rc = regmap_update_bits(wled->regmap,
- wled->addr + PM8941_WLED_REG_OVP,
- PM8941_WLED_REG_OVP_MASK, wled->cfg.ovp);
- if (rc)
- return rc;
-
- rc = regmap_update_bits(wled->regmap,
- wled->addr + PM8941_WLED_REG_BOOST,
- PM8941_WLED_REG_BOOST_MASK, wled->cfg.i_boost_limit);
- if (rc)
- return rc;
-
- rc = regmap_update_bits(wled->regmap,
- wled->addr + PM8941_WLED_REG_FREQ,
- PM8941_WLED_REG_FREQ_MASK, wled->cfg.switch_freq);
- if (rc)
- return rc;
-
- if (wled->cfg.cs_out_en) {
- u8 all = (BIT(wled->cfg.num_strings) - 1)
- << PM8941_WLED_REG_SINK_SHFT;
-
- rc = regmap_update_bits(wled->regmap,
- wled->addr + PM8941_WLED_REG_SINK,
- PM8941_WLED_REG_SINK_MASK, all);
- if (rc)
- return rc;
- }
-
- for (i = 0; i < wled->cfg.num_strings; ++i) {
- u16 addr = wled->addr + PM8941_WLED_REG_STR_OFFSET * i;
-
- rc = regmap_update_bits(wled->regmap,
- addr + PM8941_WLED_REG_STR_MOD_EN_BASE,
- PM8941_WLED_REG_STR_MOD_MASK,
- PM8941_WLED_REG_STR_MOD_EN);
- if (rc)
- return rc;
-
- if (wled->cfg.ext_gen) {
- rc = regmap_update_bits(wled->regmap,
- addr + PM8941_WLED_REG_STR_MOD_SRC_BASE,
- PM8941_WLED_REG_STR_MOD_SRC_MASK,
- PM8941_WLED_REG_STR_MOD_SRC_EXT);
- if (rc)
- return rc;
- }
-
- rc = regmap_update_bits(wled->regmap,
- addr + PM8941_WLED_REG_STR_SCALE_BASE,
- PM8941_WLED_REG_STR_SCALE_MASK,
- wled->cfg.i_limit);
- if (rc)
- return rc;
-
- rc = regmap_update_bits(wled->regmap,
- addr + PM8941_WLED_REG_STR_CABC_BASE,
- PM8941_WLED_REG_STR_CABC_MASK,
- wled->cfg.cabc_en ?
- PM8941_WLED_REG_STR_CABC_EN : 0);
- if (rc)
- return rc;
- }
-
- return 0;
-}
-
-static const struct pm8941_wled_config pm8941_wled_config_defaults = {
- .i_boost_limit = 3,
- .i_limit = 20,
- .ovp = 2,
- .switch_freq = 5,
- .num_strings = 0,
- .cs_out_en = false,
- .ext_gen = false,
- .cabc_en = false,
-};
-
-struct pm8941_wled_var_cfg {
- const u32 *values;
- u32 (*fn)(u32);
- int size;
-};
-
-static const u32 pm8941_wled_i_boost_limit_values[] = {
- 105, 385, 525, 805, 980, 1260, 1400, 1680,
-};
-
-static const struct pm8941_wled_var_cfg pm8941_wled_i_boost_limit_cfg = {
- .values = pm8941_wled_i_boost_limit_values,
- .size = ARRAY_SIZE(pm8941_wled_i_boost_limit_values),
-};
-
-static const u32 pm8941_wled_ovp_values[] = {
- 35, 32, 29, 27,
-};
-
-static const struct pm8941_wled_var_cfg pm8941_wled_ovp_cfg = {
- .values = pm8941_wled_ovp_values,
- .size = ARRAY_SIZE(pm8941_wled_ovp_values),
-};
-
-static u32 pm8941_wled_num_strings_values_fn(u32 idx)
-{
- return idx + 1;
-}
-
-static const struct pm8941_wled_var_cfg pm8941_wled_num_strings_cfg = {
- .fn = pm8941_wled_num_strings_values_fn,
- .size = 3,
-};
-
-static u32 pm8941_wled_switch_freq_values_fn(u32 idx)
-{
- return 19200 / (2 * (1 + idx));
-}
-
-static const struct pm8941_wled_var_cfg pm8941_wled_switch_freq_cfg = {
- .fn = pm8941_wled_switch_freq_values_fn,
- .size = 16,
-};
-
-static const struct pm8941_wled_var_cfg pm8941_wled_i_limit_cfg = {
- .size = 26,
-};
-
-static u32 pm8941_wled_values(const struct pm8941_wled_var_cfg *cfg, u32 idx)
-{
- if (idx >= cfg->size)
- return UINT_MAX;
- if (cfg->fn)
- return cfg->fn(idx);
- if (cfg->values)
- return cfg->values[idx];
- return idx;
-}
-
-static int pm8941_wled_configure(struct pm8941_wled *wled, struct device *dev)
-{
- struct pm8941_wled_config *cfg = &wled->cfg;
- u32 val;
- int rc;
- u32 c;
- int i;
- int j;
-
- const struct {
- const char *name;
- u32 *val_ptr;
- const struct pm8941_wled_var_cfg *cfg;
- } u32_opts[] = {
- {
- "qcom,current-boost-limit",
- &cfg->i_boost_limit,
- .cfg = &pm8941_wled_i_boost_limit_cfg,
- },
- {
- "qcom,current-limit",
- &cfg->i_limit,
- .cfg = &pm8941_wled_i_limit_cfg,
- },
- {
- "qcom,ovp",
- &cfg->ovp,
- .cfg = &pm8941_wled_ovp_cfg,
- },
- {
- "qcom,switching-freq",
- &cfg->switch_freq,
- .cfg = &pm8941_wled_switch_freq_cfg,
- },
- {
- "qcom,num-strings",
- &cfg->num_strings,
- .cfg = &pm8941_wled_num_strings_cfg,
- },
- };
- const struct {
- const char *name;
- bool *val_ptr;
- } bool_opts[] = {
- { "qcom,cs-out", &cfg->cs_out_en, },
- { "qcom,ext-gen", &cfg->ext_gen, },
- { "qcom,cabc", &cfg->cabc_en, },
- };
-
- rc = of_property_read_u32(dev->of_node, "reg", &val);
- if (rc || val > 0xffff) {
- dev_err(dev, "invalid IO resources\n");
- return rc ? rc : -EINVAL;
- }
- wled->addr = val;
-
- rc = of_property_read_string(dev->of_node, "label", &wled->name);
- if (rc)
- wled->name = devm_kasprintf(dev, GFP_KERNEL, "%pOFn", dev->of_node);
-
- *cfg = pm8941_wled_config_defaults;
- for (i = 0; i < ARRAY_SIZE(u32_opts); ++i) {
- rc = of_property_read_u32(dev->of_node, u32_opts[i].name, &val);
- if (rc == -EINVAL) {
- continue;
- } else if (rc) {
- dev_err(dev, "error reading '%s'\n", u32_opts[i].name);
- return rc;
- }
-
- c = UINT_MAX;
- for (j = 0; c != val; j++) {
- c = pm8941_wled_values(u32_opts[i].cfg, j);
- if (c == UINT_MAX) {
- dev_err(dev, "invalid value for '%s'\n",
- u32_opts[i].name);
- return -EINVAL;
- }
- }
-
- dev_dbg(dev, "'%s' = %u\n", u32_opts[i].name, c);
- *u32_opts[i].val_ptr = j;
- }
-
- for (i = 0; i < ARRAY_SIZE(bool_opts); ++i) {
- if (of_property_read_bool(dev->of_node, bool_opts[i].name))
- *bool_opts[i].val_ptr = true;
- }
-
- cfg->num_strings = cfg->num_strings + 1;
-
- return 0;
-}
-
-static const struct backlight_ops pm8941_wled_ops = {
- .update_status = pm8941_wled_update_status,
-};
-
-static int pm8941_wled_probe(struct platform_device *pdev)
-{
- struct backlight_properties props;
- struct backlight_device *bl;
- struct pm8941_wled *wled;
- struct regmap *regmap;
- u32 val;
- int rc;
-
- regmap = dev_get_regmap(pdev->dev.parent, NULL);
- if (!regmap) {
- dev_err(&pdev->dev, "Unable to get regmap\n");
- return -EINVAL;
- }
-
- wled = devm_kzalloc(&pdev->dev, sizeof(*wled), GFP_KERNEL);
- if (!wled)
- return -ENOMEM;
-
- wled->regmap = regmap;
-
- rc = pm8941_wled_configure(wled, &pdev->dev);
- if (rc)
- return rc;
-
- rc = pm8941_wled_setup(wled);
- if (rc)
- return rc;
-
- val = PM8941_WLED_DEFAULT_BRIGHTNESS;
- of_property_read_u32(pdev->dev.of_node, "default-brightness", &val);
-
- memset(&props, 0, sizeof(struct backlight_properties));
- props.type = BACKLIGHT_RAW;
- props.brightness = val;
- props.max_brightness = PM8941_WLED_REG_VAL_MAX;
- bl = devm_backlight_device_register(&pdev->dev, wled->name,
- &pdev->dev, wled,
- &pm8941_wled_ops, &props);
- return PTR_ERR_OR_ZERO(bl);
-};
-
-static const struct of_device_id pm8941_wled_match_table[] = {
- { .compatible = "qcom,pm8941-wled" },
- {}
-};
-MODULE_DEVICE_TABLE(of, pm8941_wled_match_table);
-
-static struct platform_driver pm8941_wled_driver = {
- .probe = pm8941_wled_probe,
- .driver = {
- .name = "pm8941-wled",
- .of_match_table = pm8941_wled_match_table,
- },
-};
-
-module_platform_driver(pm8941_wled_driver);
-
-MODULE_DESCRIPTION("pm8941 wled driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
index 746eebc411df..efb4efc2a13d 100644
--- a/drivers/video/backlight/pwm_bl.c
+++ b/drivers/video/backlight/pwm_bl.c
@@ -125,8 +125,9 @@ static int pwm_backlight_update_status(struct backlight_device *bl)
state.duty_cycle = compute_duty_cycle(pb, brightness);
pwm_apply_state(pb->pwm, &state);
pwm_backlight_power_on(pb);
- } else
+ } else {
pwm_backlight_power_off(pb);
+ }
if (pb->notify_after)
pb->notify_after(pb->dev, brightness);
@@ -148,15 +149,16 @@ static const struct backlight_ops pwm_backlight_ops = {
};
#ifdef CONFIG_OF
-#define PWM_LUMINANCE_SCALE 10000 /* luminance scale */
+#define PWM_LUMINANCE_SHIFT 16
+#define PWM_LUMINANCE_SCALE (1 << PWM_LUMINANCE_SHIFT) /* luminance scale */
/*
* CIE lightness to PWM conversion.
*
* The CIE 1931 lightness formula is what actually describes how we perceive
* light:
- * Y = (L* / 902.3) if L* ≤ 0.08856
- * Y = ((L* + 16) / 116)^3 if L* > 0.08856
+ * Y = (L* / 903.3) if L* ≤ 8
+ * Y = ((L* + 16) / 116)^3 if L* > 8
*
* Where Y is the luminance, the amount of light coming out of the screen, and
* is a number between 0.0 and 1.0; and L* is the lightness, how bright a human
@@ -165,16 +167,25 @@ static const struct backlight_ops pwm_backlight_ops = {
* The following function does the fixed point maths needed to implement the
* above formula.
*/
-static u64 cie1931(unsigned int lightness, unsigned int scale)
+static u64 cie1931(unsigned int lightness)
{
u64 retval;
+ /*
+ * @lightness is given as a number between 0 and 1, expressed
+ * as a fixed-point number in scale
+ * PWM_LUMINANCE_SCALE. Convert to a percentage, still
+ * expressed as a fixed-point number, so the above formulas
+ * can be applied.
+ */
lightness *= 100;
- if (lightness <= (8 * scale)) {
- retval = DIV_ROUND_CLOSEST_ULL(lightness * 10, 9023);
+ if (lightness <= (8 * PWM_LUMINANCE_SCALE)) {
+ retval = DIV_ROUND_CLOSEST(lightness * 10, 9033);
} else {
- retval = int_pow((lightness + (16 * scale)) / 116, 3);
- retval = DIV_ROUND_CLOSEST_ULL(retval, (scale * scale));
+ retval = (lightness + (16 * PWM_LUMINANCE_SCALE)) / 116;
+ retval *= retval * retval;
+ retval += 1ULL << (2*PWM_LUMINANCE_SHIFT - 1);
+ retval >>= 2*PWM_LUMINANCE_SHIFT;
}
return retval;
@@ -208,8 +219,7 @@ int pwm_backlight_brightness_default(struct device *dev,
/* Fill the table using the cie1931 algorithm */
for (i = 0; i < data->max_brightness; i++) {
retval = cie1931((i * PWM_LUMINANCE_SCALE) /
- data->max_brightness, PWM_LUMINANCE_SCALE) *
- period;
+ data->max_brightness) * period;
retval = DIV_ROUND_CLOSEST_ULL(retval, PWM_LUMINANCE_SCALE);
if (retval > UINT_MAX)
return -EINVAL;
@@ -564,18 +574,17 @@ static int pwm_backlight_probe(struct platform_device *pdev)
memset(&props, 0, sizeof(struct backlight_properties));
if (data->levels) {
+ pb->levels = data->levels;
+
/*
* For the DT case, only when brightness levels is defined
* data->levels is filled. For the non-DT case, data->levels
* can come from platform data, however is not usual.
*/
- for (i = 0; i <= data->max_brightness; i++) {
+ for (i = 0; i <= data->max_brightness; i++)
if (data->levels[i] > pb->scale)
pb->scale = data->levels[i];
- pb->levels = data->levels;
- }
-
if (pwm_backlight_is_linear(data))
props.scale = BACKLIGHT_SCALE_LINEAR;
else
diff --git a/drivers/video/backlight/qcom-wled.c b/drivers/video/backlight/qcom-wled.c
new file mode 100644
index 000000000000..d46052d8ff41
--- /dev/null
+++ b/drivers/video/backlight/qcom-wled.c
@@ -0,0 +1,1296 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2015, Sony Mobile Communications, AB.
+ */
+
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/ktime.h>
+#include <linux/kernel.h>
+#include <linux/backlight.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/regmap.h>
+
+/* From DT binding */
+#define WLED_MAX_STRINGS 4
+
+#define WLED_DEFAULT_BRIGHTNESS 2048
+#define WLED_SOFT_START_DLY_US 10000
+#define WLED3_SINK_REG_BRIGHT_MAX 0xFFF
+
+/* WLED3/WLED4 control registers */
+#define WLED3_CTRL_REG_FAULT_STATUS 0x08
+#define WLED3_CTRL_REG_ILIM_FAULT_BIT BIT(0)
+#define WLED3_CTRL_REG_OVP_FAULT_BIT BIT(1)
+#define WLED4_CTRL_REG_SC_FAULT_BIT BIT(2)
+
+#define WLED3_CTRL_REG_INT_RT_STS 0x10
+#define WLED3_CTRL_REG_OVP_FAULT_STATUS BIT(1)
+
+#define WLED3_CTRL_REG_MOD_EN 0x46
+#define WLED3_CTRL_REG_MOD_EN_MASK BIT(7)
+#define WLED3_CTRL_REG_MOD_EN_SHIFT 7
+
+#define WLED3_CTRL_REG_FEEDBACK_CONTROL 0x48
+
+#define WLED3_CTRL_REG_FREQ 0x4c
+#define WLED3_CTRL_REG_FREQ_MASK GENMASK(3, 0)
+
+#define WLED3_CTRL_REG_OVP 0x4d
+#define WLED3_CTRL_REG_OVP_MASK GENMASK(1, 0)
+
+#define WLED3_CTRL_REG_ILIMIT 0x4e
+#define WLED3_CTRL_REG_ILIMIT_MASK GENMASK(2, 0)
+
+/* WLED3/WLED4 sink registers */
+#define WLED3_SINK_REG_SYNC 0x47
+#define WLED3_SINK_REG_SYNC_CLEAR 0x00
+
+#define WLED3_SINK_REG_CURR_SINK 0x4f
+#define WLED3_SINK_REG_CURR_SINK_MASK GENMASK(7, 5)
+#define WLED3_SINK_REG_CURR_SINK_SHFT 5
+
+/* WLED3 specific per-'string' registers below */
+#define WLED3_SINK_REG_BRIGHT(n) (0x40 + n)
+
+#define WLED3_SINK_REG_STR_MOD_EN(n) (0x60 + (n * 0x10))
+#define WLED3_SINK_REG_STR_MOD_MASK BIT(7)
+
+#define WLED3_SINK_REG_STR_FULL_SCALE_CURR(n) (0x62 + (n * 0x10))
+#define WLED3_SINK_REG_STR_FULL_SCALE_CURR_MASK GENMASK(4, 0)
+
+#define WLED3_SINK_REG_STR_MOD_SRC(n) (0x63 + (n * 0x10))
+#define WLED3_SINK_REG_STR_MOD_SRC_MASK BIT(0)
+#define WLED3_SINK_REG_STR_MOD_SRC_INT 0x00
+#define WLED3_SINK_REG_STR_MOD_SRC_EXT 0x01
+
+#define WLED3_SINK_REG_STR_CABC(n) (0x66 + (n * 0x10))
+#define WLED3_SINK_REG_STR_CABC_MASK BIT(7)
+
+/* WLED4 specific control registers */
+#define WLED4_CTRL_REG_SHORT_PROTECT 0x5e
+#define WLED4_CTRL_REG_SHORT_EN_MASK BIT(7)
+
+#define WLED4_CTRL_REG_SEC_ACCESS 0xd0
+#define WLED4_CTRL_REG_SEC_UNLOCK 0xa5
+
+#define WLED4_CTRL_REG_TEST1 0xe2
+#define WLED4_CTRL_REG_TEST1_EXT_FET_DTEST2 0x09
+
+/* WLED4 specific sink registers */
+#define WLED4_SINK_REG_CURR_SINK 0x46
+#define WLED4_SINK_REG_CURR_SINK_MASK GENMASK(7, 4)
+#define WLED4_SINK_REG_CURR_SINK_SHFT 4
+
+/* WLED4 specific per-'string' registers below */
+#define WLED4_SINK_REG_STR_MOD_EN(n) (0x50 + (n * 0x10))
+#define WLED4_SINK_REG_STR_MOD_MASK BIT(7)
+
+#define WLED4_SINK_REG_STR_FULL_SCALE_CURR(n) (0x52 + (n * 0x10))
+#define WLED4_SINK_REG_STR_FULL_SCALE_CURR_MASK GENMASK(3, 0)
+
+#define WLED4_SINK_REG_STR_MOD_SRC(n) (0x53 + (n * 0x10))
+#define WLED4_SINK_REG_STR_MOD_SRC_MASK BIT(0)
+#define WLED4_SINK_REG_STR_MOD_SRC_INT 0x00
+#define WLED4_SINK_REG_STR_MOD_SRC_EXT 0x01
+
+#define WLED4_SINK_REG_STR_CABC(n) (0x56 + (n * 0x10))
+#define WLED4_SINK_REG_STR_CABC_MASK BIT(7)
+
+#define WLED4_SINK_REG_BRIGHT(n) (0x57 + (n * 0x10))
+
+struct wled_var_cfg {
+ const u32 *values;
+ u32 (*fn)(u32);
+ int size;
+};
+
+struct wled_u32_opts {
+ const char *name;
+ u32 *val_ptr;
+ const struct wled_var_cfg *cfg;
+};
+
+struct wled_bool_opts {
+ const char *name;
+ bool *val_ptr;
+};
+
+struct wled_config {
+ u32 boost_i_limit;
+ u32 ovp;
+ u32 switch_freq;
+ u32 num_strings;
+ u32 string_i_limit;
+ u32 enabled_strings[WLED_MAX_STRINGS];
+ bool cs_out_en;
+ bool ext_gen;
+ bool cabc;
+ bool external_pfet;
+ bool auto_detection_enabled;
+};
+
+struct wled {
+ const char *name;
+ struct device *dev;
+ struct regmap *regmap;
+ struct mutex lock; /* Lock to avoid race from thread irq handler */
+ ktime_t last_short_event;
+ ktime_t start_ovp_fault_time;
+ u16 ctrl_addr;
+ u16 sink_addr;
+ u16 max_string_count;
+ u16 auto_detection_ovp_count;
+ u32 brightness;
+ u32 max_brightness;
+ u32 short_count;
+ u32 auto_detect_count;
+ bool disabled_by_short;
+ bool has_short_detect;
+ int short_irq;
+ int ovp_irq;
+
+ struct wled_config cfg;
+ struct delayed_work ovp_work;
+ int (*wled_set_brightness)(struct wled *wled, u16 brightness);
+};
+
+static int wled3_set_brightness(struct wled *wled, u16 brightness)
+{
+ int rc, i;
+ u8 v[2];
+
+ v[0] = brightness & 0xff;
+ v[1] = (brightness >> 8) & 0xf;
+
+ for (i = 0; i < wled->cfg.num_strings; ++i) {
+ rc = regmap_bulk_write(wled->regmap, wled->ctrl_addr +
+ WLED3_SINK_REG_BRIGHT(i), v, 2);
+ if (rc < 0)
+ return rc;
+ }
+
+ return 0;
+}
+
+static int wled4_set_brightness(struct wled *wled, u16 brightness)
+{
+ int rc, i;
+ u16 low_limit = wled->max_brightness * 4 / 1000;
+ u8 v[2];
+
+ /* WLED4's lower limit of operation is 0.4% */
+ if (brightness > 0 && brightness < low_limit)
+ brightness = low_limit;
+
+ v[0] = brightness & 0xff;
+ v[1] = (brightness >> 8) & 0xf;
+
+ for (i = 0; i < wled->cfg.num_strings; ++i) {
+ rc = regmap_bulk_write(wled->regmap, wled->sink_addr +
+ WLED4_SINK_REG_BRIGHT(i), v, 2);
+ if (rc < 0)
+ return rc;
+ }
+
+ return 0;
+}
+
+static void wled_ovp_work(struct work_struct *work)
+{
+ struct wled *wled = container_of(work,
+ struct wled, ovp_work.work);
+ enable_irq(wled->ovp_irq);
+}
+
+static int wled_module_enable(struct wled *wled, int val)
+{
+ int rc;
+
+ if (wled->disabled_by_short)
+ return -ENXIO;
+
+ rc = regmap_update_bits(wled->regmap, wled->ctrl_addr +
+ WLED3_CTRL_REG_MOD_EN,
+ WLED3_CTRL_REG_MOD_EN_MASK,
+ val << WLED3_CTRL_REG_MOD_EN_SHIFT);
+ if (rc < 0)
+ return rc;
+
+ if (wled->ovp_irq > 0) {
+ if (val) {
+ /*
+ * The hardware generates a storm of spurious OVP
+ * interrupts during soft start operations. So defer
+ * enabling the IRQ for 10ms to ensure that the
+ * soft start is complete.
+ */
+ schedule_delayed_work(&wled->ovp_work, HZ / 100);
+ } else {
+ if (!cancel_delayed_work_sync(&wled->ovp_work))
+ disable_irq(wled->ovp_irq);
+ }
+ }
+
+ return 0;
+}
+
+static int wled_sync_toggle(struct wled *wled)
+{
+ int rc;
+ unsigned int mask = GENMASK(wled->max_string_count - 1, 0);
+
+ rc = regmap_update_bits(wled->regmap,
+ wled->ctrl_addr + WLED3_SINK_REG_SYNC,
+ mask, mask);
+ if (rc < 0)
+ return rc;
+
+ rc = regmap_update_bits(wled->regmap,
+ wled->ctrl_addr + WLED3_SINK_REG_SYNC,
+ mask, WLED3_SINK_REG_SYNC_CLEAR);
+
+ return rc;
+}
+
+static int wled_update_status(struct backlight_device *bl)
+{
+ struct wled *wled = bl_get_data(bl);
+ u16 brightness = bl->props.brightness;
+ int rc = 0;
+
+ if (bl->props.power != FB_BLANK_UNBLANK ||
+ bl->props.fb_blank != FB_BLANK_UNBLANK ||
+ bl->props.state & BL_CORE_FBBLANK)
+ brightness = 0;
+
+ mutex_lock(&wled->lock);
+ if (brightness) {
+ rc = wled->wled_set_brightness(wled, brightness);
+ if (rc < 0) {
+ dev_err(wled->dev, "wled failed to set brightness rc:%d\n",
+ rc);
+ goto unlock_mutex;
+ }
+
+ rc = wled_sync_toggle(wled);
+ if (rc < 0) {
+ dev_err(wled->dev, "wled sync failed rc:%d\n", rc);
+ goto unlock_mutex;
+ }
+ }
+
+ if (!!brightness != !!wled->brightness) {
+ rc = wled_module_enable(wled, !!brightness);
+ if (rc < 0) {
+ dev_err(wled->dev, "wled enable failed rc:%d\n", rc);
+ goto unlock_mutex;
+ }
+ }
+
+ wled->brightness = brightness;
+
+unlock_mutex:
+ mutex_unlock(&wled->lock);
+
+ return rc;
+}
+
+#define WLED_SHORT_DLY_MS 20
+#define WLED_SHORT_CNT_MAX 5
+#define WLED_SHORT_RESET_CNT_DLY_US USEC_PER_SEC
+
+static irqreturn_t wled_short_irq_handler(int irq, void *_wled)
+{
+ struct wled *wled = _wled;
+ int rc;
+ s64 elapsed_time;
+
+ wled->short_count++;
+ mutex_lock(&wled->lock);
+ rc = wled_module_enable(wled, false);
+ if (rc < 0) {
+ dev_err(wled->dev, "wled disable failed rc:%d\n", rc);
+ goto unlock_mutex;
+ }
+
+ elapsed_time = ktime_us_delta(ktime_get(),
+ wled->last_short_event);
+ if (elapsed_time > WLED_SHORT_RESET_CNT_DLY_US)
+ wled->short_count = 1;
+
+ if (wled->short_count > WLED_SHORT_CNT_MAX) {
+ dev_err(wled->dev, "Short triggered %d times, disabling WLED forever!\n",
+ wled->short_count);
+ wled->disabled_by_short = true;
+ goto unlock_mutex;
+ }
+
+ wled->last_short_event = ktime_get();
+
+ msleep(WLED_SHORT_DLY_MS);
+ rc = wled_module_enable(wled, true);
+ if (rc < 0)
+ dev_err(wled->dev, "wled enable failed rc:%d\n", rc);
+
+unlock_mutex:
+ mutex_unlock(&wled->lock);
+
+ return IRQ_HANDLED;
+}
+
+#define AUTO_DETECT_BRIGHTNESS 200
+
+static void wled_auto_string_detection(struct wled *wled)
+{
+ int rc = 0, i;
+ u32 sink_config = 0, int_sts;
+ u8 sink_test = 0, sink_valid = 0, val;
+
+ /* Read configured sink configuration */
+ rc = regmap_read(wled->regmap, wled->sink_addr +
+ WLED4_SINK_REG_CURR_SINK, &sink_config);
+ if (rc < 0) {
+ dev_err(wled->dev, "Failed to read SINK configuration rc=%d\n",
+ rc);
+ goto failed_detect;
+ }
+
+ /* Disable the module before starting detection */
+ rc = regmap_update_bits(wled->regmap,
+ wled->ctrl_addr + WLED3_CTRL_REG_MOD_EN,
+ WLED3_CTRL_REG_MOD_EN_MASK, 0);
+ if (rc < 0) {
+ dev_err(wled->dev, "Failed to disable WLED module rc=%d\n", rc);
+ goto failed_detect;
+ }
+
+ /* Set low brightness across all sinks */
+ rc = wled4_set_brightness(wled, AUTO_DETECT_BRIGHTNESS);
+ if (rc < 0) {
+ dev_err(wled->dev, "Failed to set brightness for auto detection rc=%d\n",
+ rc);
+ goto failed_detect;
+ }
+
+ if (wled->cfg.cabc) {
+ for (i = 0; i < wled->cfg.num_strings; i++) {
+ rc = regmap_update_bits(wled->regmap, wled->sink_addr +
+ WLED4_SINK_REG_STR_CABC(i),
+ WLED4_SINK_REG_STR_CABC_MASK,
+ 0);
+ if (rc < 0)
+ goto failed_detect;
+ }
+ }
+
+ /* Disable all sinks */
+ rc = regmap_write(wled->regmap,
+ wled->sink_addr + WLED4_SINK_REG_CURR_SINK, 0);
+ if (rc < 0) {
+ dev_err(wled->dev, "Failed to disable all sinks rc=%d\n", rc);
+ goto failed_detect;
+ }
+
+ /* Iterate through the strings one by one */
+ for (i = 0; i < wled->cfg.num_strings; i++) {
+ sink_test = BIT((WLED4_SINK_REG_CURR_SINK_SHFT + i));
+
+ /* Enable feedback control */
+ rc = regmap_write(wled->regmap, wled->ctrl_addr +
+ WLED3_CTRL_REG_FEEDBACK_CONTROL, i + 1);
+ if (rc < 0) {
+ dev_err(wled->dev, "Failed to enable feedback for SINK %d rc = %d\n",
+ i + 1, rc);
+ goto failed_detect;
+ }
+
+ /* Enable the sink */
+ rc = regmap_write(wled->regmap, wled->sink_addr +
+ WLED4_SINK_REG_CURR_SINK, sink_test);
+ if (rc < 0) {
+ dev_err(wled->dev, "Failed to configure SINK %d rc=%d\n",
+ i + 1, rc);
+ goto failed_detect;
+ }
+
+ /* Enable the module */
+ rc = regmap_update_bits(wled->regmap, wled->ctrl_addr +
+ WLED3_CTRL_REG_MOD_EN,
+ WLED3_CTRL_REG_MOD_EN_MASK,
+ WLED3_CTRL_REG_MOD_EN_MASK);
+ if (rc < 0) {
+ dev_err(wled->dev, "Failed to enable WLED module rc=%d\n",
+ rc);
+ goto failed_detect;
+ }
+
+ usleep_range(WLED_SOFT_START_DLY_US,
+ WLED_SOFT_START_DLY_US + 1000);
+
+ rc = regmap_read(wled->regmap, wled->ctrl_addr +
+ WLED3_CTRL_REG_INT_RT_STS, &int_sts);
+ if (rc < 0) {
+ dev_err(wled->dev, "Error in reading WLED3_CTRL_INT_RT_STS rc=%d\n",
+ rc);
+ goto failed_detect;
+ }
+
+ if (int_sts & WLED3_CTRL_REG_OVP_FAULT_STATUS)
+ dev_dbg(wled->dev, "WLED OVP fault detected with SINK %d\n",
+ i + 1);
+ else
+ sink_valid |= sink_test;
+
+ /* Disable the module */
+ rc = regmap_update_bits(wled->regmap,
+ wled->ctrl_addr + WLED3_CTRL_REG_MOD_EN,
+ WLED3_CTRL_REG_MOD_EN_MASK, 0);
+ if (rc < 0) {
+ dev_err(wled->dev, "Failed to disable WLED module rc=%d\n",
+ rc);
+ goto failed_detect;
+ }
+ }
+
+ if (!sink_valid) {
+ dev_err(wled->dev, "No valid WLED sinks found\n");
+ wled->disabled_by_short = true;
+ goto failed_detect;
+ }
+
+ if (sink_valid != sink_config) {
+ dev_warn(wled->dev, "%x is not a valid sink configuration - using %x instead\n",
+ sink_config, sink_valid);
+ sink_config = sink_valid;
+ }
+
+ /* Write the new sink configuration */
+ rc = regmap_write(wled->regmap,
+ wled->sink_addr + WLED4_SINK_REG_CURR_SINK,
+ sink_config);
+ if (rc < 0) {
+ dev_err(wled->dev, "Failed to reconfigure the default sink rc=%d\n",
+ rc);
+ goto failed_detect;
+ }
+
+ /* Enable valid sinks */
+ for (i = 0; i < wled->cfg.num_strings; i++) {
+ if (wled->cfg.cabc) {
+ rc = regmap_update_bits(wled->regmap, wled->sink_addr +
+ WLED4_SINK_REG_STR_CABC(i),
+ WLED4_SINK_REG_STR_CABC_MASK,
+ WLED4_SINK_REG_STR_CABC_MASK);
+ if (rc < 0)
+ goto failed_detect;
+ }
+
+ if (sink_config & BIT(WLED4_SINK_REG_CURR_SINK_SHFT + i))
+ val = WLED4_SINK_REG_STR_MOD_MASK;
+ else
+ val = 0x0; /* Disable modulator_en for unused sink */
+
+ rc = regmap_write(wled->regmap, wled->sink_addr +
+ WLED4_SINK_REG_STR_MOD_EN(i), val);
+ if (rc < 0) {
+ dev_err(wled->dev, "Failed to configure MODULATOR_EN rc=%d\n",
+ rc);
+ goto failed_detect;
+ }
+ }
+
+ /* Restore the feedback setting */
+ rc = regmap_write(wled->regmap,
+ wled->ctrl_addr + WLED3_CTRL_REG_FEEDBACK_CONTROL, 0);
+ if (rc < 0) {
+ dev_err(wled->dev, "Failed to restore feedback setting rc=%d\n",
+ rc);
+ goto failed_detect;
+ }
+
+ /* Restore brightness */
+ rc = wled4_set_brightness(wled, wled->brightness);
+ if (rc < 0) {
+ dev_err(wled->dev, "Failed to set brightness after auto detection rc=%d\n",
+ rc);
+ goto failed_detect;
+ }
+
+ rc = regmap_update_bits(wled->regmap,
+ wled->ctrl_addr + WLED3_CTRL_REG_MOD_EN,
+ WLED3_CTRL_REG_MOD_EN_MASK,
+ WLED3_CTRL_REG_MOD_EN_MASK);
+ if (rc < 0) {
+ dev_err(wled->dev, "Failed to enable WLED module rc=%d\n", rc);
+ goto failed_detect;
+ }
+
+failed_detect:
+ return;
+}
+
+#define WLED_AUTO_DETECT_OVP_COUNT 5
+#define WLED_AUTO_DETECT_CNT_DLY_US USEC_PER_SEC
+static bool wled_auto_detection_required(struct wled *wled)
+{
+ s64 elapsed_time_us;
+
+ if (!wled->cfg.auto_detection_enabled)
+ return false;
+
+ /*
+ * Check if the OVP fault was an occasional one
+ * or if it's firing continuously, the latter qualifies
+ * for an auto-detection check.
+ */
+ if (!wled->auto_detection_ovp_count) {
+ wled->start_ovp_fault_time = ktime_get();
+ wled->auto_detection_ovp_count++;
+ } else {
+ elapsed_time_us = ktime_us_delta(ktime_get(),
+ wled->start_ovp_fault_time);
+ if (elapsed_time_us > WLED_AUTO_DETECT_CNT_DLY_US)
+ wled->auto_detection_ovp_count = 0;
+ else
+ wled->auto_detection_ovp_count++;
+
+ if (wled->auto_detection_ovp_count >=
+ WLED_AUTO_DETECT_OVP_COUNT) {
+ wled->auto_detection_ovp_count = 0;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static int wled_auto_detection_at_init(struct wled *wled)
+{
+ int rc;
+ u32 fault_status, rt_status;
+
+ if (!wled->cfg.auto_detection_enabled)
+ return 0;
+
+ rc = regmap_read(wled->regmap,
+ wled->ctrl_addr + WLED3_CTRL_REG_INT_RT_STS,
+ &rt_status);
+ if (rc < 0) {
+ dev_err(wled->dev, "Failed to read RT status rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = regmap_read(wled->regmap,
+ wled->ctrl_addr + WLED3_CTRL_REG_FAULT_STATUS,
+ &fault_status);
+ if (rc < 0) {
+ dev_err(wled->dev, "Failed to read fault status rc=%d\n", rc);
+ return rc;
+ }
+
+ if ((rt_status & WLED3_CTRL_REG_OVP_FAULT_STATUS) ||
+ (fault_status & WLED3_CTRL_REG_OVP_FAULT_BIT)) {
+ mutex_lock(&wled->lock);
+ wled_auto_string_detection(wled);
+ mutex_unlock(&wled->lock);
+ }
+
+ return rc;
+}
+
+static irqreturn_t wled_ovp_irq_handler(int irq, void *_wled)
+{
+ struct wled *wled = _wled;
+ int rc;
+ u32 int_sts, fault_sts;
+
+ rc = regmap_read(wled->regmap,
+ wled->ctrl_addr + WLED3_CTRL_REG_INT_RT_STS, &int_sts);
+ if (rc < 0) {
+ dev_err(wled->dev, "Error in reading WLED3_INT_RT_STS rc=%d\n",
+ rc);
+ return IRQ_HANDLED;
+ }
+
+ rc = regmap_read(wled->regmap, wled->ctrl_addr +
+ WLED3_CTRL_REG_FAULT_STATUS, &fault_sts);
+ if (rc < 0) {
+ dev_err(wled->dev, "Error in reading WLED_FAULT_STATUS rc=%d\n",
+ rc);
+ return IRQ_HANDLED;
+ }
+
+ if (fault_sts & (WLED3_CTRL_REG_OVP_FAULT_BIT |
+ WLED3_CTRL_REG_ILIM_FAULT_BIT))
+ dev_dbg(wled->dev, "WLED OVP fault detected, int_sts=%x fault_sts= %x\n",
+ int_sts, fault_sts);
+
+ if (fault_sts & WLED3_CTRL_REG_OVP_FAULT_BIT) {
+ if (wled_auto_detection_required(wled)) {
+ mutex_lock(&wled->lock);
+ wled_auto_string_detection(wled);
+ mutex_unlock(&wled->lock);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int wled3_setup(struct wled *wled)
+{
+ u16 addr;
+ u8 sink_en = 0;
+ int rc, i, j;
+
+ rc = regmap_update_bits(wled->regmap,
+ wled->ctrl_addr + WLED3_CTRL_REG_OVP,
+ WLED3_CTRL_REG_OVP_MASK, wled->cfg.ovp);
+ if (rc)
+ return rc;
+
+ rc = regmap_update_bits(wled->regmap,
+ wled->ctrl_addr + WLED3_CTRL_REG_ILIMIT,
+ WLED3_CTRL_REG_ILIMIT_MASK,
+ wled->cfg.boost_i_limit);
+ if (rc)
+ return rc;
+
+ rc = regmap_update_bits(wled->regmap,
+ wled->ctrl_addr + WLED3_CTRL_REG_FREQ,
+ WLED3_CTRL_REG_FREQ_MASK,
+ wled->cfg.switch_freq);
+ if (rc)
+ return rc;
+
+ for (i = 0; i < wled->cfg.num_strings; ++i) {
+ j = wled->cfg.enabled_strings[i];
+ addr = wled->ctrl_addr + WLED3_SINK_REG_STR_MOD_EN(j);
+ rc = regmap_update_bits(wled->regmap, addr,
+ WLED3_SINK_REG_STR_MOD_MASK,
+ WLED3_SINK_REG_STR_MOD_MASK);
+ if (rc)
+ return rc;
+
+ if (wled->cfg.ext_gen) {
+ addr = wled->ctrl_addr + WLED3_SINK_REG_STR_MOD_SRC(j);
+ rc = regmap_update_bits(wled->regmap, addr,
+ WLED3_SINK_REG_STR_MOD_SRC_MASK,
+ WLED3_SINK_REG_STR_MOD_SRC_EXT);
+ if (rc)
+ return rc;
+ }
+
+ addr = wled->ctrl_addr + WLED3_SINK_REG_STR_FULL_SCALE_CURR(j);
+ rc = regmap_update_bits(wled->regmap, addr,
+ WLED3_SINK_REG_STR_FULL_SCALE_CURR_MASK,
+ wled->cfg.string_i_limit);
+ if (rc)
+ return rc;
+
+ addr = wled->ctrl_addr + WLED3_SINK_REG_STR_CABC(j);
+ rc = regmap_update_bits(wled->regmap, addr,
+ WLED3_SINK_REG_STR_CABC_MASK,
+ wled->cfg.cabc ?
+ WLED3_SINK_REG_STR_CABC_MASK : 0);
+ if (rc)
+ return rc;
+
+ sink_en |= BIT(j + WLED3_SINK_REG_CURR_SINK_SHFT);
+ }
+
+ rc = regmap_update_bits(wled->regmap,
+ wled->ctrl_addr + WLED3_SINK_REG_CURR_SINK,
+ WLED3_SINK_REG_CURR_SINK_MASK, sink_en);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+static const struct wled_config wled3_config_defaults = {
+ .boost_i_limit = 3,
+ .string_i_limit = 20,
+ .ovp = 2,
+ .num_strings = 3,
+ .switch_freq = 5,
+ .cs_out_en = false,
+ .ext_gen = false,
+ .cabc = false,
+ .enabled_strings = {0, 1, 2, 3},
+};
+
+static int wled4_setup(struct wled *wled)
+{
+ int rc, temp, i, j;
+ u16 addr;
+ u8 sink_en = 0;
+ u32 sink_cfg;
+
+ rc = regmap_update_bits(wled->regmap,
+ wled->ctrl_addr + WLED3_CTRL_REG_OVP,
+ WLED3_CTRL_REG_OVP_MASK, wled->cfg.ovp);
+ if (rc < 0)
+ return rc;
+
+ rc = regmap_update_bits(wled->regmap,
+ wled->ctrl_addr + WLED3_CTRL_REG_ILIMIT,
+ WLED3_CTRL_REG_ILIMIT_MASK,
+ wled->cfg.boost_i_limit);
+ if (rc < 0)
+ return rc;
+
+ rc = regmap_update_bits(wled->regmap,
+ wled->ctrl_addr + WLED3_CTRL_REG_FREQ,
+ WLED3_CTRL_REG_FREQ_MASK,
+ wled->cfg.switch_freq);
+ if (rc < 0)
+ return rc;
+
+ if (wled->cfg.external_pfet) {
+ /* Unlock the secure register access */
+ rc = regmap_write(wled->regmap, wled->ctrl_addr +
+ WLED4_CTRL_REG_SEC_ACCESS,
+ WLED4_CTRL_REG_SEC_UNLOCK);
+ if (rc < 0)
+ return rc;
+
+ rc = regmap_write(wled->regmap,
+ wled->ctrl_addr + WLED4_CTRL_REG_TEST1,
+ WLED4_CTRL_REG_TEST1_EXT_FET_DTEST2);
+ if (rc < 0)
+ return rc;
+ }
+
+ rc = regmap_read(wled->regmap, wled->sink_addr +
+ WLED4_SINK_REG_CURR_SINK, &sink_cfg);
+ if (rc < 0)
+ return rc;
+
+ for (i = 0; i < wled->cfg.num_strings; i++) {
+ j = wled->cfg.enabled_strings[i];
+ temp = j + WLED4_SINK_REG_CURR_SINK_SHFT;
+ sink_en |= 1 << temp;
+ }
+
+ if (sink_cfg == sink_en) {
+ rc = wled_auto_detection_at_init(wled);
+ return rc;
+ }
+
+ rc = regmap_update_bits(wled->regmap,
+ wled->sink_addr + WLED4_SINK_REG_CURR_SINK,
+ WLED4_SINK_REG_CURR_SINK_MASK, 0);
+ if (rc < 0)
+ return rc;
+
+ rc = regmap_update_bits(wled->regmap, wled->ctrl_addr +
+ WLED3_CTRL_REG_MOD_EN,
+ WLED3_CTRL_REG_MOD_EN_MASK, 0);
+ if (rc < 0)
+ return rc;
+
+ /* Per sink/string configuration */
+ for (i = 0; i < wled->cfg.num_strings; i++) {
+ j = wled->cfg.enabled_strings[i];
+
+ addr = wled->sink_addr +
+ WLED4_SINK_REG_STR_MOD_EN(j);
+ rc = regmap_update_bits(wled->regmap, addr,
+ WLED4_SINK_REG_STR_MOD_MASK,
+ WLED4_SINK_REG_STR_MOD_MASK);
+ if (rc < 0)
+ return rc;
+
+ addr = wled->sink_addr +
+ WLED4_SINK_REG_STR_FULL_SCALE_CURR(j);
+ rc = regmap_update_bits(wled->regmap, addr,
+ WLED4_SINK_REG_STR_FULL_SCALE_CURR_MASK,
+ wled->cfg.string_i_limit);
+ if (rc < 0)
+ return rc;
+
+ addr = wled->sink_addr +
+ WLED4_SINK_REG_STR_CABC(j);
+ rc = regmap_update_bits(wled->regmap, addr,
+ WLED4_SINK_REG_STR_CABC_MASK,
+ wled->cfg.cabc ?
+ WLED4_SINK_REG_STR_CABC_MASK : 0);
+ if (rc < 0)
+ return rc;
+ }
+
+ rc = regmap_update_bits(wled->regmap, wled->ctrl_addr +
+ WLED3_CTRL_REG_MOD_EN,
+ WLED3_CTRL_REG_MOD_EN_MASK,
+ WLED3_CTRL_REG_MOD_EN_MASK);
+ if (rc < 0)
+ return rc;
+
+ rc = regmap_update_bits(wled->regmap,
+ wled->sink_addr + WLED4_SINK_REG_CURR_SINK,
+ WLED4_SINK_REG_CURR_SINK_MASK, sink_en);
+ if (rc < 0)
+ return rc;
+
+ rc = wled_sync_toggle(wled);
+ if (rc < 0) {
+ dev_err(wled->dev, "Failed to toggle sync reg rc:%d\n", rc);
+ return rc;
+ }
+
+ rc = wled_auto_detection_at_init(wled);
+
+ return rc;
+}
+
+static const struct wled_config wled4_config_defaults = {
+ .boost_i_limit = 4,
+ .string_i_limit = 10,
+ .ovp = 1,
+ .num_strings = 4,
+ .switch_freq = 11,
+ .cabc = false,
+ .external_pfet = false,
+ .auto_detection_enabled = false,
+};
+
+static const u32 wled3_boost_i_limit_values[] = {
+ 105, 385, 525, 805, 980, 1260, 1400, 1680,
+};
+
+static const struct wled_var_cfg wled3_boost_i_limit_cfg = {
+ .values = wled3_boost_i_limit_values,
+ .size = ARRAY_SIZE(wled3_boost_i_limit_values),
+};
+
+static const u32 wled4_boost_i_limit_values[] = {
+ 105, 280, 450, 620, 970, 1150, 1300, 1500,
+};
+
+static const struct wled_var_cfg wled4_boost_i_limit_cfg = {
+ .values = wled4_boost_i_limit_values,
+ .size = ARRAY_SIZE(wled4_boost_i_limit_values),
+};
+
+static const u32 wled3_ovp_values[] = {
+ 35, 32, 29, 27,
+};
+
+static const struct wled_var_cfg wled3_ovp_cfg = {
+ .values = wled3_ovp_values,
+ .size = ARRAY_SIZE(wled3_ovp_values),
+};
+
+static const u32 wled4_ovp_values[] = {
+ 31100, 29600, 19600, 18100,
+};
+
+static const struct wled_var_cfg wled4_ovp_cfg = {
+ .values = wled4_ovp_values,
+ .size = ARRAY_SIZE(wled4_ovp_values),
+};
+
+static u32 wled3_num_strings_values_fn(u32 idx)
+{
+ return idx + 1;
+}
+
+static const struct wled_var_cfg wled3_num_strings_cfg = {
+ .fn = wled3_num_strings_values_fn,
+ .size = 3,
+};
+
+static const struct wled_var_cfg wled4_num_strings_cfg = {
+ .fn = wled3_num_strings_values_fn,
+ .size = 4,
+};
+
+static u32 wled3_switch_freq_values_fn(u32 idx)
+{
+ return 19200 / (2 * (1 + idx));
+}
+
+static const struct wled_var_cfg wled3_switch_freq_cfg = {
+ .fn = wled3_switch_freq_values_fn,
+ .size = 16,
+};
+
+static const struct wled_var_cfg wled3_string_i_limit_cfg = {
+ .size = 26,
+};
+
+static const u32 wled4_string_i_limit_values[] = {
+ 0, 2500, 5000, 7500, 10000, 12500, 15000, 17500, 20000,
+ 22500, 25000, 27500, 30000,
+};
+
+static const struct wled_var_cfg wled4_string_i_limit_cfg = {
+ .values = wled4_string_i_limit_values,
+ .size = ARRAY_SIZE(wled4_string_i_limit_values),
+};
+
+static const struct wled_var_cfg wled3_string_cfg = {
+ .size = 8,
+};
+
+static const struct wled_var_cfg wled4_string_cfg = {
+ .size = 16,
+};
+
+static u32 wled_values(const struct wled_var_cfg *cfg, u32 idx)
+{
+ if (idx >= cfg->size)
+ return UINT_MAX;
+ if (cfg->fn)
+ return cfg->fn(idx);
+ if (cfg->values)
+ return cfg->values[idx];
+ return idx;
+}
+
+static int wled_configure(struct wled *wled, int version)
+{
+ struct wled_config *cfg = &wled->cfg;
+ struct device *dev = wled->dev;
+ const __be32 *prop_addr;
+ u32 size, val, c, string_len;
+ int rc, i, j;
+
+ const struct wled_u32_opts *u32_opts = NULL;
+ const struct wled_u32_opts wled3_opts[] = {
+ {
+ .name = "qcom,current-boost-limit",
+ .val_ptr = &cfg->boost_i_limit,
+ .cfg = &wled3_boost_i_limit_cfg,
+ },
+ {
+ .name = "qcom,current-limit",
+ .val_ptr = &cfg->string_i_limit,
+ .cfg = &wled3_string_i_limit_cfg,
+ },
+ {
+ .name = "qcom,ovp",
+ .val_ptr = &cfg->ovp,
+ .cfg = &wled3_ovp_cfg,
+ },
+ {
+ .name = "qcom,switching-freq",
+ .val_ptr = &cfg->switch_freq,
+ .cfg = &wled3_switch_freq_cfg,
+ },
+ {
+ .name = "qcom,num-strings",
+ .val_ptr = &cfg->num_strings,
+ .cfg = &wled3_num_strings_cfg,
+ },
+ };
+
+ const struct wled_u32_opts wled4_opts[] = {
+ {
+ .name = "qcom,current-boost-limit",
+ .val_ptr = &cfg->boost_i_limit,
+ .cfg = &wled4_boost_i_limit_cfg,
+ },
+ {
+ .name = "qcom,current-limit-microamp",
+ .val_ptr = &cfg->string_i_limit,
+ .cfg = &wled4_string_i_limit_cfg,
+ },
+ {
+ .name = "qcom,ovp-millivolt",
+ .val_ptr = &cfg->ovp,
+ .cfg = &wled4_ovp_cfg,
+ },
+ {
+ .name = "qcom,switching-freq",
+ .val_ptr = &cfg->switch_freq,
+ .cfg = &wled3_switch_freq_cfg,
+ },
+ {
+ .name = "qcom,num-strings",
+ .val_ptr = &cfg->num_strings,
+ .cfg = &wled4_num_strings_cfg,
+ },
+ };
+
+ const struct wled_bool_opts bool_opts[] = {
+ { "qcom,cs-out", &cfg->cs_out_en, },
+ { "qcom,ext-gen", &cfg->ext_gen, },
+ { "qcom,cabc", &cfg->cabc, },
+ { "qcom,external-pfet", &cfg->external_pfet, },
+ { "qcom,auto-string-detection", &cfg->auto_detection_enabled, },
+ };
+
+ prop_addr = of_get_address(dev->of_node, 0, NULL, NULL);
+ if (!prop_addr) {
+ dev_err(wled->dev, "invalid IO resources\n");
+ return -EINVAL;
+ }
+ wled->ctrl_addr = be32_to_cpu(*prop_addr);
+
+ rc = of_property_read_string(dev->of_node, "label", &wled->name);
+ if (rc)
+ wled->name = devm_kasprintf(dev, GFP_KERNEL, "%pOFn", dev->of_node);
+
+ switch (version) {
+ case 3:
+ u32_opts = wled3_opts;
+ size = ARRAY_SIZE(wled3_opts);
+ *cfg = wled3_config_defaults;
+ wled->wled_set_brightness = wled3_set_brightness;
+ wled->max_string_count = 3;
+ wled->sink_addr = wled->ctrl_addr;
+ break;
+
+ case 4:
+ u32_opts = wled4_opts;
+ size = ARRAY_SIZE(wled4_opts);
+ *cfg = wled4_config_defaults;
+ wled->wled_set_brightness = wled4_set_brightness;
+ wled->max_string_count = 4;
+
+ prop_addr = of_get_address(dev->of_node, 1, NULL, NULL);
+ if (!prop_addr) {
+ dev_err(wled->dev, "invalid IO resources\n");
+ return -EINVAL;
+ }
+ wled->sink_addr = be32_to_cpu(*prop_addr);
+ break;
+
+ default:
+ dev_err(wled->dev, "Invalid WLED version\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < size; ++i) {
+ rc = of_property_read_u32(dev->of_node, u32_opts[i].name, &val);
+ if (rc == -EINVAL) {
+ continue;
+ } else if (rc) {
+ dev_err(dev, "error reading '%s'\n", u32_opts[i].name);
+ return rc;
+ }
+
+ c = UINT_MAX;
+ for (j = 0; c != val; j++) {
+ c = wled_values(u32_opts[i].cfg, j);
+ if (c == UINT_MAX) {
+ dev_err(dev, "invalid value for '%s'\n",
+ u32_opts[i].name);
+ return -EINVAL;
+ }
+
+ if (c == val)
+ break;
+ }
+
+ dev_dbg(dev, "'%s' = %u\n", u32_opts[i].name, c);
+ *u32_opts[i].val_ptr = j;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(bool_opts); ++i) {
+ if (of_property_read_bool(dev->of_node, bool_opts[i].name))
+ *bool_opts[i].val_ptr = true;
+ }
+
+ cfg->num_strings = cfg->num_strings + 1;
+
+ string_len = of_property_count_elems_of_size(dev->of_node,
+ "qcom,enabled-strings",
+ sizeof(u32));
+ if (string_len > 0)
+ of_property_read_u32_array(dev->of_node,
+ "qcom,enabled-strings",
+ wled->cfg.enabled_strings,
+ sizeof(u32));
+
+ return 0;
+}
+
+static int wled_configure_short_irq(struct wled *wled,
+ struct platform_device *pdev)
+{
+ int rc;
+
+ if (!wled->has_short_detect)
+ return 0;
+
+ rc = regmap_update_bits(wled->regmap, wled->ctrl_addr +
+ WLED4_CTRL_REG_SHORT_PROTECT,
+ WLED4_CTRL_REG_SHORT_EN_MASK,
+ WLED4_CTRL_REG_SHORT_EN_MASK);
+ if (rc < 0)
+ return rc;
+
+ wled->short_irq = platform_get_irq_byname(pdev, "short");
+ if (wled->short_irq < 0) {
+ dev_dbg(&pdev->dev, "short irq is not used\n");
+ return 0;
+ }
+
+ rc = devm_request_threaded_irq(wled->dev, wled->short_irq,
+ NULL, wled_short_irq_handler,
+ IRQF_ONESHOT,
+ "wled_short_irq", wled);
+ if (rc < 0)
+ dev_err(wled->dev, "Unable to request short_irq (err:%d)\n",
+ rc);
+
+ return rc;
+}
+
+static int wled_configure_ovp_irq(struct wled *wled,
+ struct platform_device *pdev)
+{
+ int rc;
+ u32 val;
+
+ wled->ovp_irq = platform_get_irq_byname(pdev, "ovp");
+ if (wled->ovp_irq < 0) {
+ dev_dbg(&pdev->dev, "OVP IRQ not found - disabling automatic string detection\n");
+ return 0;
+ }
+
+ rc = devm_request_threaded_irq(wled->dev, wled->ovp_irq, NULL,
+ wled_ovp_irq_handler, IRQF_ONESHOT,
+ "wled_ovp_irq", wled);
+ if (rc < 0) {
+ dev_err(wled->dev, "Unable to request ovp_irq (err:%d)\n",
+ rc);
+ wled->ovp_irq = 0;
+ return 0;
+ }
+
+ rc = regmap_read(wled->regmap, wled->ctrl_addr +
+ WLED3_CTRL_REG_MOD_EN, &val);
+ if (rc < 0)
+ return rc;
+
+ /* Keep OVP irq disabled until module is enabled */
+ if (!(val & WLED3_CTRL_REG_MOD_EN_MASK))
+ disable_irq(wled->ovp_irq);
+
+ return 0;
+}
+
+static const struct backlight_ops wled_ops = {
+ .update_status = wled_update_status,
+};
+
+static int wled_probe(struct platform_device *pdev)
+{
+ struct backlight_properties props;
+ struct backlight_device *bl;
+ struct wled *wled;
+ struct regmap *regmap;
+ int version;
+ u32 val;
+ int rc;
+
+ regmap = dev_get_regmap(pdev->dev.parent, NULL);
+ if (!regmap) {
+ dev_err(&pdev->dev, "Unable to get regmap\n");
+ return -EINVAL;
+ }
+
+ wled = devm_kzalloc(&pdev->dev, sizeof(*wled), GFP_KERNEL);
+ if (!wled)
+ return -ENOMEM;
+
+ wled->regmap = regmap;
+ wled->dev = &pdev->dev;
+
+ version = (uintptr_t)of_device_get_match_data(&pdev->dev);
+ if (!version) {
+ dev_err(&pdev->dev, "Unknown device version\n");
+ return -ENODEV;
+ }
+
+ mutex_init(&wled->lock);
+ rc = wled_configure(wled, version);
+ if (rc)
+ return rc;
+
+ switch (version) {
+ case 3:
+ wled->cfg.auto_detection_enabled = false;
+ rc = wled3_setup(wled);
+ if (rc) {
+ dev_err(&pdev->dev, "wled3_setup failed\n");
+ return rc;
+ }
+ break;
+
+ case 4:
+ wled->has_short_detect = true;
+ rc = wled4_setup(wled);
+ if (rc) {
+ dev_err(&pdev->dev, "wled4_setup failed\n");
+ return rc;
+ }
+ break;
+
+ default:
+ dev_err(wled->dev, "Invalid WLED version\n");
+ break;
+ }
+
+ INIT_DELAYED_WORK(&wled->ovp_work, wled_ovp_work);
+
+ rc = wled_configure_short_irq(wled, pdev);
+ if (rc < 0)
+ return rc;
+
+ rc = wled_configure_ovp_irq(wled, pdev);
+ if (rc < 0)
+ return rc;
+
+ val = WLED_DEFAULT_BRIGHTNESS;
+ of_property_read_u32(pdev->dev.of_node, "default-brightness", &val);
+
+ memset(&props, 0, sizeof(struct backlight_properties));
+ props.type = BACKLIGHT_RAW;
+ props.brightness = val;
+ props.max_brightness = WLED3_SINK_REG_BRIGHT_MAX;
+ bl = devm_backlight_device_register(&pdev->dev, wled->name,
+ &pdev->dev, wled,
+ &wled_ops, &props);
+ return PTR_ERR_OR_ZERO(bl);
+};
+
+static int wled_remove(struct platform_device *pdev)
+{
+ struct wled *wled = dev_get_drvdata(&pdev->dev);
+
+ mutex_destroy(&wled->lock);
+ cancel_delayed_work_sync(&wled->ovp_work);
+ disable_irq(wled->short_irq);
+ disable_irq(wled->ovp_irq);
+
+ return 0;
+}
+
+static const struct of_device_id wled_match_table[] = {
+ { .compatible = "qcom,pm8941-wled", .data = (void *)3 },
+ { .compatible = "qcom,pmi8998-wled", .data = (void *)4 },
+ { .compatible = "qcom,pm660l-wled", .data = (void *)4 },
+ {}
+};
+MODULE_DEVICE_TABLE(of, wled_match_table);
+
+static struct platform_driver wled_driver = {
+ .probe = wled_probe,
+ .remove = wled_remove,
+ .driver = {
+ .name = "qcom,wled",
+ .of_match_table = wled_match_table,
+ },
+};
+
+module_platform_driver(wled_driver);
+
+MODULE_DESCRIPTION("Qualcomm WLED driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/video/backlight/tosa_bl.c b/drivers/video/backlight/tosa_bl.c
index 1275e815bd86..cff5e96fd988 100644
--- a/drivers/video/backlight/tosa_bl.c
+++ b/drivers/video/backlight/tosa_bl.c
@@ -18,7 +18,7 @@
#include <asm/mach/sharpsl_param.h>
-#include <mach/tosa.h>
+#include "tosa_bl.h"
#define COMADJ_DEFAULT 97
@@ -28,6 +28,7 @@
struct tosa_bl_data {
struct i2c_client *i2c;
struct backlight_device *bl;
+ struct gpio_desc *gpio;
int comadj;
};
@@ -42,7 +43,7 @@ static void tosa_bl_set_backlight(struct tosa_bl_data *data, int brightness)
i2c_smbus_write_byte_data(data->i2c, DAC_CH2, (u8)(brightness & 0xff));
/* SetBacklightVR */
- gpio_set_value(TOSA_GPIO_BL_C20MA, brightness & 0x100);
+ gpiod_set_value(data->gpio, brightness & 0x100);
tosa_bl_enable(spi, brightness);
}
@@ -87,9 +88,8 @@ static int tosa_bl_probe(struct i2c_client *client,
return -ENOMEM;
data->comadj = sharpsl_param.comadj == -1 ? COMADJ_DEFAULT : sharpsl_param.comadj;
-
- ret = devm_gpio_request_one(&client->dev, TOSA_GPIO_BL_C20MA,
- GPIOF_OUT_INIT_LOW, "backlight");
+ data->gpio = devm_gpiod_get(&client->dev, "backlight", GPIOD_OUT_LOW);
+ ret = PTR_ERR_OR_ZERO(data->gpio);
if (ret) {
dev_dbg(&data->bl->dev, "Unable to request gpio!\n");
return ret;
diff --git a/drivers/video/backlight/tosa_bl.h b/drivers/video/backlight/tosa_bl.h
new file mode 100644
index 000000000000..589e17e6fdb2
--- /dev/null
+++ b/drivers/video/backlight/tosa_bl.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef _TOSA_BL_H
+#define _TOSA_BL_H
+
+struct spi_device;
+extern int tosa_bl_enable(struct spi_device *spi, int enable);
+
+#endif
diff --git a/drivers/video/backlight/tosa_lcd.c b/drivers/video/backlight/tosa_lcd.c
index 29af8e27b6e5..e8ab583e5098 100644
--- a/drivers/video/backlight/tosa_lcd.c
+++ b/drivers/video/backlight/tosa_lcd.c
@@ -19,7 +19,7 @@
#include <asm/mach/sharpsl_param.h>
-#include <mach/tosa.h>
+#include "tosa_bl.h"
#define POWER_IS_ON(pwr) ((pwr) <= FB_BLANK_NORMAL)
@@ -28,12 +28,26 @@
#define TG_REG0_UD 0x0004
#define TG_REG0_LR 0x0008
+/*
+ * Timing Generator
+ */
+#define TG_PNLCTL 0x00
+#define TG_TPOSCTL 0x01
+#define TG_DUTYCTL 0x02
+#define TG_GPOSR 0x03
+#define TG_GPODR1 0x04
+#define TG_GPODR2 0x05
+#define TG_PINICTL 0x06
+#define TG_HPOSCTL 0x07
+
+
#define DAC_BASE 0x4e
struct tosa_lcd_data {
struct spi_device *spi;
struct lcd_device *lcd;
struct i2c_client *i2c;
+ struct gpio_desc *gpiod_tg;
int lcd_power;
bool is_vga;
@@ -66,7 +80,7 @@ EXPORT_SYMBOL(tosa_bl_enable);
static void tosa_lcd_tg_init(struct tosa_lcd_data *data)
{
/* TG on */
- gpio_set_value(TOSA_GPIO_TG_ON, 0);
+ gpiod_set_value(data->gpiod_tg, 0);
mdelay(60);
@@ -100,6 +114,7 @@ static void tosa_lcd_tg_on(struct tosa_lcd_data *data)
*/
struct i2c_adapter *adap = i2c_get_adapter(0);
struct i2c_board_info info = {
+ .dev_name = "tosa-bl",
.type = "tosa-bl",
.addr = DAC_BASE,
.platform_data = data->spi,
@@ -121,7 +136,7 @@ static void tosa_lcd_tg_off(struct tosa_lcd_data *data)
mdelay(50);
/* TG Off */
- gpio_set_value(TOSA_GPIO_TG_ON, 1);
+ gpiod_set_value(data->gpiod_tg, 1);
mdelay(100);
}
@@ -191,10 +206,9 @@ static int tosa_lcd_probe(struct spi_device *spi)
data->spi = spi;
spi_set_drvdata(spi, data);
- ret = devm_gpio_request_one(&spi->dev, TOSA_GPIO_TG_ON,
- GPIOF_OUT_INIT_LOW, "tg #pwr");
- if (ret < 0)
- return ret;
+ data->gpiod_tg = devm_gpiod_get(&spi->dev, "tg #pwr", GPIOD_OUT_LOW);
+ if (IS_ERR(data->gpiod_tg))
+ return PTR_ERR(data->gpiod_tg);
mdelay(60);
diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
index 1e70e838530e..aa9541bf964b 100644
--- a/drivers/video/fbdev/Kconfig
+++ b/drivers/video/fbdev/Kconfig
@@ -2214,6 +2214,7 @@ config FB_HYPERV
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
+ select FB_DEFERRED_IO
help
This framebuffer driver supports Microsoft Hyper-V Synthetic Video.
diff --git a/drivers/video/fbdev/aty/atyfb_base.c b/drivers/video/fbdev/aty/atyfb_base.c
index 6dda5d885a03..79d548746efd 100644
--- a/drivers/video/fbdev/aty/atyfb_base.c
+++ b/drivers/video/fbdev/aty/atyfb_base.c
@@ -48,7 +48,7 @@
******************************************************************************/
-
+#include <linux/compat.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
@@ -235,6 +235,13 @@ static int atyfb_pan_display(struct fb_var_screeninfo *var,
struct fb_info *info);
static int atyfb_blank(int blank, struct fb_info *info);
static int atyfb_ioctl(struct fb_info *info, u_int cmd, u_long arg);
+#ifdef CONFIG_COMPAT
+static int atyfb_compat_ioctl(struct fb_info *info, u_int cmd, u_long arg)
+{
+ return atyfb_ioctl(info, cmd, (u_long)compat_ptr(arg));
+}
+#endif
+
#ifdef __sparc__
static int atyfb_mmap(struct fb_info *info, struct vm_area_struct *vma);
#endif
@@ -290,6 +297,9 @@ static struct fb_ops atyfb_ops = {
.fb_pan_display = atyfb_pan_display,
.fb_blank = atyfb_blank,
.fb_ioctl = atyfb_ioctl,
+#ifdef CONFIG_COMPAT
+ .fb_compat_ioctl = atyfb_compat_ioctl,
+#endif
.fb_fillrect = atyfb_fillrect,
.fb_copyarea = atyfb_copyarea,
.fb_imageblit = atyfb_imageblit,
diff --git a/drivers/video/fbdev/aty/radeon_pm.c b/drivers/video/fbdev/aty/radeon_pm.c
index 2dc5703eac51..7c4483c7f313 100644
--- a/drivers/video/fbdev/aty/radeon_pm.c
+++ b/drivers/video/fbdev/aty/radeon_pm.c
@@ -2593,7 +2593,7 @@ static void radeon_set_suspend(struct radeonfb_info *rinfo, int suspend)
* calling pci_set_power_state()
*/
radeonfb_whack_power_state(rinfo, PCI_D2);
- __pci_complete_power_transition(rinfo->pdev, PCI_D2);
+ pci_platform_power_transition(rinfo->pdev, PCI_D2);
} else {
printk(KERN_DEBUG "radeonfb (%s): switching to D0 state...\n",
pci_name(rinfo->pdev));
diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
index e6a1c805064f..6f6fc785b545 100644
--- a/drivers/video/fbdev/core/fbmem.c
+++ b/drivers/video/fbdev/core/fbmem.c
@@ -1758,23 +1758,21 @@ EXPORT_SYMBOL(remove_conflicting_framebuffers);
/**
* remove_conflicting_pci_framebuffers - remove firmware-configured framebuffers for PCI devices
* @pdev: PCI device
- * @res_id: index of PCI BAR configuring framebuffer memory
* @name: requesting driver name
*
* This function removes framebuffer devices (eg. initialized by firmware)
- * using memory range configured for @pdev's BAR @res_id.
+ * using memory range configured for any of @pdev's memory bars.
*
* The function assumes that PCI device with shadowed ROM drives a primary
* display and so kicks out vga16fb.
*/
-int remove_conflicting_pci_framebuffers(struct pci_dev *pdev, int res_id, const char *name)
+int remove_conflicting_pci_framebuffers(struct pci_dev *pdev, const char *name)
{
struct apertures_struct *ap;
bool primary = false;
int err, idx, bar;
- bool res_id_found = false;
- for (idx = 0, bar = 0; bar < PCI_ROM_RESOURCE; bar++) {
+ for (idx = 0, bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM))
continue;
idx++;
@@ -1784,21 +1782,16 @@ int remove_conflicting_pci_framebuffers(struct pci_dev *pdev, int res_id, const
if (!ap)
return -ENOMEM;
- for (idx = 0, bar = 0; bar < PCI_ROM_RESOURCE; bar++) {
+ for (idx = 0, bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM))
continue;
ap->ranges[idx].base = pci_resource_start(pdev, bar);
ap->ranges[idx].size = pci_resource_len(pdev, bar);
- pci_info(pdev, "%s: bar %d: 0x%lx -> 0x%lx\n", __func__, bar,
- (unsigned long)pci_resource_start(pdev, bar),
- (unsigned long)pci_resource_end(pdev, bar));
+ pci_dbg(pdev, "%s: bar %d: 0x%lx -> 0x%lx\n", __func__, bar,
+ (unsigned long)pci_resource_start(pdev, bar),
+ (unsigned long)pci_resource_end(pdev, bar));
idx++;
- if (res_id == bar)
- res_id_found = true;
}
- if (!res_id_found)
- pci_warn(pdev, "%s: passed res_id (%d) is not a memory bar\n",
- __func__, res_id);
#ifdef CONFIG_X86
primary = pdev->resource[PCI_ROM_RESOURCE].flags &
diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c
index 51d97ec4f58f..1caa3726cb45 100644
--- a/drivers/video/fbdev/efifb.c
+++ b/drivers/video/fbdev/efifb.c
@@ -653,7 +653,7 @@ static void efifb_fixup_resources(struct pci_dev *dev)
if (!base)
return;
- for (i = 0; i <= PCI_STD_RESOURCE_END; i++) {
+ for (i = 0; i < PCI_STD_NUM_BARS; i++) {
struct resource *res = &dev->resource[i];
if (!(res->flags & IORESOURCE_MEM))
diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c
index 2dcb7c58b31e..4cd27e5172a1 100644
--- a/drivers/video/fbdev/hyperv_fb.c
+++ b/drivers/video/fbdev/hyperv_fb.c
@@ -23,6 +23,14 @@
*
* Portrait orientation is also supported:
* For example: video=hyperv_fb:864x1152
+ *
+ * When a Windows 10 RS5+ host is used, the virtual machine screen
+ * resolution is obtained from the host. The "video=hyperv_fb" option is
+ * not needed, but still can be used to overwrite what the host specifies.
+ * The VM resolution on the host could be set by executing the powershell
+ * "set-vmvideo" command. For example
+ * set-vmvideo -vmname name -horizontalresolution:1920 \
+ * -verticalresolution:1200 -resolutiontype single
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -34,6 +42,7 @@
#include <linux/fb.h>
#include <linux/pci.h>
#include <linux/efi.h>
+#include <linux/console.h>
#include <linux/hyperv.h>
@@ -44,6 +53,10 @@
#define SYNTHVID_VERSION(major, minor) ((minor) << 16 | (major))
#define SYNTHVID_VERSION_WIN7 SYNTHVID_VERSION(3, 0)
#define SYNTHVID_VERSION_WIN8 SYNTHVID_VERSION(3, 2)
+#define SYNTHVID_VERSION_WIN10 SYNTHVID_VERSION(3, 5)
+
+#define SYNTHVID_VER_GET_MAJOR(ver) (ver & 0x0000ffff)
+#define SYNTHVID_VER_GET_MINOR(ver) ((ver & 0xffff0000) >> 16)
#define SYNTHVID_DEPTH_WIN7 16
#define SYNTHVID_DEPTH_WIN8 32
@@ -82,16 +95,25 @@ enum synthvid_msg_type {
SYNTHVID_POINTER_SHAPE = 8,
SYNTHVID_FEATURE_CHANGE = 9,
SYNTHVID_DIRT = 10,
+ SYNTHVID_RESOLUTION_REQUEST = 13,
+ SYNTHVID_RESOLUTION_RESPONSE = 14,
- SYNTHVID_MAX = 11
+ SYNTHVID_MAX = 15
};
+#define SYNTHVID_EDID_BLOCK_SIZE 128
+#define SYNTHVID_MAX_RESOLUTION_COUNT 64
+
+struct hvd_screen_info {
+ u16 width;
+ u16 height;
+} __packed;
+
struct synthvid_msg_hdr {
u32 type;
u32 size; /* size of this header + payload after this field*/
} __packed;
-
struct synthvid_version_req {
u32 version;
} __packed;
@@ -102,6 +124,19 @@ struct synthvid_version_resp {
u8 max_video_outputs;
} __packed;
+struct synthvid_supported_resolution_req {
+ u8 maximum_resolution_count;
+} __packed;
+
+struct synthvid_supported_resolution_resp {
+ u8 edid_block[SYNTHVID_EDID_BLOCK_SIZE];
+ u8 resolution_count;
+ u8 default_resolution_index;
+ u8 is_standard;
+ struct hvd_screen_info
+ supported_resolution[SYNTHVID_MAX_RESOLUTION_COUNT];
+} __packed;
+
struct synthvid_vram_location {
u64 user_ctx;
u8 is_vram_gpa_specified;
@@ -187,6 +222,8 @@ struct synthvid_msg {
struct synthvid_pointer_shape ptr_shape;
struct synthvid_feature_change feature_chg;
struct synthvid_dirt dirt;
+ struct synthvid_supported_resolution_req resolution_req;
+ struct synthvid_supported_resolution_resp resolution_resp;
};
} __packed;
@@ -201,6 +238,7 @@ struct synthvid_msg {
#define RING_BUFSIZE (256 * 1024)
#define VSP_TIMEOUT (10 * HZ)
#define HVFB_UPDATE_DELAY (HZ / 20)
+#define HVFB_ONDEMAND_THROTTLE (HZ / 20)
struct hvfb_par {
struct fb_info *info;
@@ -211,6 +249,7 @@ struct hvfb_par {
struct delayed_work dwork;
bool update;
+ bool update_saved; /* The value of 'update' before hibernation */
u32 pseudo_palette[16];
u8 init_buf[MAX_VMBUS_PKT_SIZE];
@@ -220,12 +259,25 @@ struct hvfb_par {
bool synchronous_fb;
struct notifier_block hvfb_panic_nb;
+
+ /* Memory for deferred IO and frame buffer itself */
+ unsigned char *dio_vp;
+ unsigned char *mmio_vp;
+ unsigned long mmio_pp;
+
+ /* Dirty rectangle, protected by delayed_refresh_lock */
+ int x1, y1, x2, y2;
+ bool delayed_refresh;
+ spinlock_t delayed_refresh_lock;
};
static uint screen_width = HVFB_WIDTH;
static uint screen_height = HVFB_HEIGHT;
+static uint screen_width_max = HVFB_WIDTH;
+static uint screen_height_max = HVFB_HEIGHT;
static uint screen_depth;
static uint screen_fb_size;
+static uint dio_fb_size; /* FB size for deferred IO */
/* Send message to Hyper-V host */
static inline int synthvid_send(struct hv_device *hdev,
@@ -312,28 +364,88 @@ static int synthvid_send_ptr(struct hv_device *hdev)
}
/* Send updated screen area (dirty rectangle) location to host */
-static int synthvid_update(struct fb_info *info)
+static int
+synthvid_update(struct fb_info *info, int x1, int y1, int x2, int y2)
{
struct hv_device *hdev = device_to_hv_device(info->device);
struct synthvid_msg msg;
memset(&msg, 0, sizeof(struct synthvid_msg));
+ if (x2 == INT_MAX)
+ x2 = info->var.xres;
+ if (y2 == INT_MAX)
+ y2 = info->var.yres;
msg.vid_hdr.type = SYNTHVID_DIRT;
msg.vid_hdr.size = sizeof(struct synthvid_msg_hdr) +
sizeof(struct synthvid_dirt);
msg.dirt.video_output = 0;
msg.dirt.dirt_count = 1;
- msg.dirt.rect[0].x1 = 0;
- msg.dirt.rect[0].y1 = 0;
- msg.dirt.rect[0].x2 = info->var.xres;
- msg.dirt.rect[0].y2 = info->var.yres;
+ msg.dirt.rect[0].x1 = (x1 > x2) ? 0 : x1;
+ msg.dirt.rect[0].y1 = (y1 > y2) ? 0 : y1;
+ msg.dirt.rect[0].x2 =
+ (x2 < x1 || x2 > info->var.xres) ? info->var.xres : x2;
+ msg.dirt.rect[0].y2 =
+ (y2 < y1 || y2 > info->var.yres) ? info->var.yres : y2;
synthvid_send(hdev, &msg);
return 0;
}
+static void hvfb_docopy(struct hvfb_par *par,
+ unsigned long offset,
+ unsigned long size)
+{
+ if (!par || !par->mmio_vp || !par->dio_vp || !par->fb_ready ||
+ size == 0 || offset >= dio_fb_size)
+ return;
+
+ if (offset + size > dio_fb_size)
+ size = dio_fb_size - offset;
+
+ memcpy(par->mmio_vp + offset, par->dio_vp + offset, size);
+}
+
+/* Deferred IO callback */
+static void synthvid_deferred_io(struct fb_info *p,
+ struct list_head *pagelist)
+{
+ struct hvfb_par *par = p->par;
+ struct page *page;
+ unsigned long start, end;
+ int y1, y2, miny, maxy;
+
+ miny = INT_MAX;
+ maxy = 0;
+
+ /*
+ * Merge dirty pages. It is possible that last page cross
+ * over the end of frame buffer row yres. This is taken care of
+ * in synthvid_update function by clamping the y2
+ * value to yres.
+ */
+ list_for_each_entry(page, pagelist, lru) {
+ start = page->index << PAGE_SHIFT;
+ end = start + PAGE_SIZE - 1;
+ y1 = start / p->fix.line_length;
+ y2 = end / p->fix.line_length;
+ miny = min_t(int, miny, y1);
+ maxy = max_t(int, maxy, y2);
+
+ /* Copy from dio space to mmio address */
+ if (par->fb_ready)
+ hvfb_docopy(par, start, PAGE_SIZE);
+ }
+
+ if (par->fb_ready && par->update)
+ synthvid_update(p, 0, miny, p->var.xres, maxy + 1);
+}
+
+static struct fb_deferred_io synthvid_defio = {
+ .delay = HZ / 20,
+ .deferred_io = synthvid_deferred_io,
+};
/*
* Actions on received messages from host:
@@ -354,6 +466,7 @@ static void synthvid_recv_sub(struct hv_device *hdev)
/* Complete the wait event */
if (msg->vid_hdr.type == SYNTHVID_VERSION_RESPONSE ||
+ msg->vid_hdr.type == SYNTHVID_RESOLUTION_RESPONSE ||
msg->vid_hdr.type == SYNTHVID_VRAM_LOCATION_ACK) {
memcpy(par->init_buf, msg, MAX_VMBUS_PKT_SIZE);
complete(&par->wait);
@@ -400,6 +513,17 @@ static void synthvid_receive(void *ctx)
} while (bytes_recvd > 0 && ret == 0);
}
+/* Check if the ver1 version is equal or greater than ver2 */
+static inline bool synthvid_ver_ge(u32 ver1, u32 ver2)
+{
+ if (SYNTHVID_VER_GET_MAJOR(ver1) > SYNTHVID_VER_GET_MAJOR(ver2) ||
+ (SYNTHVID_VER_GET_MAJOR(ver1) == SYNTHVID_VER_GET_MAJOR(ver2) &&
+ SYNTHVID_VER_GET_MINOR(ver1) >= SYNTHVID_VER_GET_MINOR(ver2)))
+ return true;
+
+ return false;
+}
+
/* Check synthetic video protocol version with the host */
static int synthvid_negotiate_ver(struct hv_device *hdev, u32 ver)
{
@@ -428,6 +552,64 @@ static int synthvid_negotiate_ver(struct hv_device *hdev, u32 ver)
}
par->synthvid_version = ver;
+ pr_info("Synthvid Version major %d, minor %d\n",
+ SYNTHVID_VER_GET_MAJOR(ver), SYNTHVID_VER_GET_MINOR(ver));
+
+out:
+ return ret;
+}
+
+/* Get current resolution from the host */
+static int synthvid_get_supported_resolution(struct hv_device *hdev)
+{
+ struct fb_info *info = hv_get_drvdata(hdev);
+ struct hvfb_par *par = info->par;
+ struct synthvid_msg *msg = (struct synthvid_msg *)par->init_buf;
+ int ret = 0;
+ unsigned long t;
+ u8 index;
+ int i;
+
+ memset(msg, 0, sizeof(struct synthvid_msg));
+ msg->vid_hdr.type = SYNTHVID_RESOLUTION_REQUEST;
+ msg->vid_hdr.size = sizeof(struct synthvid_msg_hdr) +
+ sizeof(struct synthvid_supported_resolution_req);
+
+ msg->resolution_req.maximum_resolution_count =
+ SYNTHVID_MAX_RESOLUTION_COUNT;
+ synthvid_send(hdev, msg);
+
+ t = wait_for_completion_timeout(&par->wait, VSP_TIMEOUT);
+ if (!t) {
+ pr_err("Time out on waiting resolution response\n");
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ if (msg->resolution_resp.resolution_count == 0) {
+ pr_err("No supported resolutions\n");
+ ret = -ENODEV;
+ goto out;
+ }
+
+ index = msg->resolution_resp.default_resolution_index;
+ if (index >= msg->resolution_resp.resolution_count) {
+ pr_err("Invalid resolution index: %d\n", index);
+ ret = -ENODEV;
+ goto out;
+ }
+
+ for (i = 0; i < msg->resolution_resp.resolution_count; i++) {
+ screen_width_max = max_t(unsigned int, screen_width_max,
+ msg->resolution_resp.supported_resolution[i].width);
+ screen_height_max = max_t(unsigned int, screen_height_max,
+ msg->resolution_resp.supported_resolution[i].height);
+ }
+
+ screen_width =
+ msg->resolution_resp.supported_resolution[index].width;
+ screen_height =
+ msg->resolution_resp.supported_resolution[index].height;
out:
return ret;
@@ -448,11 +630,27 @@ static int synthvid_connect_vsp(struct hv_device *hdev)
}
/* Negotiate the protocol version with host */
- if (vmbus_proto_version == VERSION_WS2008 ||
- vmbus_proto_version == VERSION_WIN7)
- ret = synthvid_negotiate_ver(hdev, SYNTHVID_VERSION_WIN7);
- else
+ switch (vmbus_proto_version) {
+ case VERSION_WIN10:
+ case VERSION_WIN10_V5:
+ ret = synthvid_negotiate_ver(hdev, SYNTHVID_VERSION_WIN10);
+ if (!ret)
+ break;
+ /* Fallthrough */
+ case VERSION_WIN8:
+ case VERSION_WIN8_1:
ret = synthvid_negotiate_ver(hdev, SYNTHVID_VERSION_WIN8);
+ if (!ret)
+ break;
+ /* Fallthrough */
+ case VERSION_WS2008:
+ case VERSION_WIN7:
+ ret = synthvid_negotiate_ver(hdev, SYNTHVID_VERSION_WIN7);
+ break;
+ default:
+ ret = synthvid_negotiate_ver(hdev, SYNTHVID_VERSION_WIN10);
+ break;
+ }
if (ret) {
pr_err("Synthetic video device version not accepted\n");
@@ -464,6 +662,12 @@ static int synthvid_connect_vsp(struct hv_device *hdev)
else
screen_depth = SYNTHVID_DEPTH_WIN8;
+ if (synthvid_ver_ge(par->synthvid_version, SYNTHVID_VERSION_WIN10)) {
+ ret = synthvid_get_supported_resolution(hdev);
+ if (ret)
+ pr_info("Failed to get supported resolution from host, use default\n");
+ }
+
screen_fb_size = hdev->channel->offermsg.offer.
mmio_megabytes * 1024 * 1024;
@@ -488,7 +692,7 @@ static int synthvid_send_config(struct hv_device *hdev)
msg->vid_hdr.type = SYNTHVID_VRAM_LOCATION;
msg->vid_hdr.size = sizeof(struct synthvid_msg_hdr) +
sizeof(struct synthvid_vram_location);
- msg->vram.user_ctx = msg->vram.vram_gpa = info->fix.smem_start;
+ msg->vram.user_ctx = msg->vram.vram_gpa = par->mmio_pp;
msg->vram.is_vram_gpa_specified = 1;
synthvid_send(hdev, msg);
@@ -498,7 +702,7 @@ static int synthvid_send_config(struct hv_device *hdev)
ret = -ETIMEDOUT;
goto out;
}
- if (msg->vram_ack.user_ctx != info->fix.smem_start) {
+ if (msg->vram_ack.user_ctx != par->mmio_pp) {
pr_err("Unable to set VRAM location\n");
ret = -ENODEV;
goto out;
@@ -515,19 +719,77 @@ out:
/*
* Delayed work callback:
- * It is called at HVFB_UPDATE_DELAY or longer time interval to process
- * screen updates. It is re-scheduled if further update is necessary.
+ * It is scheduled to call whenever update request is received and it has
+ * not been called in last HVFB_ONDEMAND_THROTTLE time interval.
*/
static void hvfb_update_work(struct work_struct *w)
{
struct hvfb_par *par = container_of(w, struct hvfb_par, dwork.work);
struct fb_info *info = par->info;
+ unsigned long flags;
+ int x1, x2, y1, y2;
+ int j;
+
+ spin_lock_irqsave(&par->delayed_refresh_lock, flags);
+ /* Reset the request flag */
+ par->delayed_refresh = false;
+
+ /* Store the dirty rectangle to local variables */
+ x1 = par->x1;
+ x2 = par->x2;
+ y1 = par->y1;
+ y2 = par->y2;
+
+ /* Clear dirty rectangle */
+ par->x1 = par->y1 = INT_MAX;
+ par->x2 = par->y2 = 0;
+
+ spin_unlock_irqrestore(&par->delayed_refresh_lock, flags);
+
+ if (x1 > info->var.xres || x2 > info->var.xres ||
+ y1 > info->var.yres || y2 > info->var.yres || x2 <= x1)
+ return;
+
+ /* Copy the dirty rectangle to frame buffer memory */
+ for (j = y1; j < y2; j++) {
+ hvfb_docopy(par,
+ j * info->fix.line_length +
+ (x1 * screen_depth / 8),
+ (x2 - x1) * screen_depth / 8);
+ }
+
+ /* Refresh */
+ if (par->fb_ready && par->update)
+ synthvid_update(info, x1, y1, x2, y2);
+}
- if (par->fb_ready)
- synthvid_update(info);
+/*
+ * Control the on-demand refresh frequency. It schedules a delayed
+ * screen update if it has not yet.
+ */
+static void hvfb_ondemand_refresh_throttle(struct hvfb_par *par,
+ int x1, int y1, int w, int h)
+{
+ unsigned long flags;
+ int x2 = x1 + w;
+ int y2 = y1 + h;
+
+ spin_lock_irqsave(&par->delayed_refresh_lock, flags);
+
+ /* Merge dirty rectangle */
+ par->x1 = min_t(int, par->x1, x1);
+ par->y1 = min_t(int, par->y1, y1);
+ par->x2 = max_t(int, par->x2, x2);
+ par->y2 = max_t(int, par->y2, y2);
+
+ /* Schedule a delayed screen update if not yet */
+ if (par->delayed_refresh == false) {
+ schedule_delayed_work(&par->dwork,
+ HVFB_ONDEMAND_THROTTLE);
+ par->delayed_refresh = true;
+ }
- if (par->update)
- schedule_delayed_work(&par->dwork, HVFB_UPDATE_DELAY);
+ spin_unlock_irqrestore(&par->delayed_refresh_lock, flags);
}
static int hvfb_on_panic(struct notifier_block *nb,
@@ -539,7 +801,8 @@ static int hvfb_on_panic(struct notifier_block *nb,
par = container_of(nb, struct hvfb_par, hvfb_panic_nb);
par->synchronous_fb = true;
info = par->info;
- synthvid_update(info);
+ hvfb_docopy(par, 0, dio_fb_size);
+ synthvid_update(info, 0, 0, INT_MAX, INT_MAX);
return NOTIFY_DONE;
}
@@ -600,7 +863,10 @@ static void hvfb_cfb_fillrect(struct fb_info *p,
cfb_fillrect(p, rect);
if (par->synchronous_fb)
- synthvid_update(p);
+ synthvid_update(p, 0, 0, INT_MAX, INT_MAX);
+ else
+ hvfb_ondemand_refresh_throttle(par, rect->dx, rect->dy,
+ rect->width, rect->height);
}
static void hvfb_cfb_copyarea(struct fb_info *p,
@@ -610,7 +876,10 @@ static void hvfb_cfb_copyarea(struct fb_info *p,
cfb_copyarea(p, area);
if (par->synchronous_fb)
- synthvid_update(p);
+ synthvid_update(p, 0, 0, INT_MAX, INT_MAX);
+ else
+ hvfb_ondemand_refresh_throttle(par, area->dx, area->dy,
+ area->width, area->height);
}
static void hvfb_cfb_imageblit(struct fb_info *p,
@@ -620,7 +889,10 @@ static void hvfb_cfb_imageblit(struct fb_info *p,
cfb_imageblit(p, image);
if (par->synchronous_fb)
- synthvid_update(p);
+ synthvid_update(p, 0, 0, INT_MAX, INT_MAX);
+ else
+ hvfb_ondemand_refresh_throttle(par, image->dx, image->dy,
+ image->width, image->height);
}
static struct fb_ops hvfb_ops = {
@@ -653,6 +925,8 @@ static void hvfb_get_option(struct fb_info *info)
}
if (x < HVFB_WIDTH_MIN || y < HVFB_HEIGHT_MIN ||
+ (synthvid_ver_ge(par->synthvid_version, SYNTHVID_VERSION_WIN10) &&
+ (x > screen_width_max || y > screen_height_max)) ||
(par->synthvid_version == SYNTHVID_VERSION_WIN8 &&
x * y * screen_depth / 8 > SYNTHVID_FB_SIZE_WIN8) ||
(par->synthvid_version == SYNTHVID_VERSION_WIN7 &&
@@ -677,6 +951,9 @@ static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info)
resource_size_t pot_start, pot_end;
int ret;
+ dio_fb_size =
+ screen_width * screen_height * screen_depth / 8;
+
if (gen2vm) {
pot_start = 0;
pot_end = -1;
@@ -689,8 +966,12 @@ static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info)
}
if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) ||
- pci_resource_len(pdev, 0) < screen_fb_size)
+ pci_resource_len(pdev, 0) < screen_fb_size) {
+ pr_err("Resource not available or (0x%lx < 0x%lx)\n",
+ (unsigned long) pci_resource_len(pdev, 0),
+ (unsigned long) screen_fb_size);
goto err1;
+ }
pot_end = pci_resource_end(pdev, 0);
pot_start = pot_end - screen_fb_size + 1;
@@ -707,9 +988,14 @@ static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info)
if (!fb_virt)
goto err2;
+ /* Allocate memory for deferred IO */
+ par->dio_vp = vzalloc(round_up(dio_fb_size, PAGE_SIZE));
+ if (par->dio_vp == NULL)
+ goto err3;
+
info->apertures = alloc_apertures(1);
if (!info->apertures)
- goto err3;
+ goto err4;
if (gen2vm) {
info->apertures->ranges[0].base = screen_info.lfb_base;
@@ -721,16 +1007,23 @@ static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info)
info->apertures->ranges[0].size = pci_resource_len(pdev, 0);
}
+ /* Physical address of FB device */
+ par->mmio_pp = par->mem->start;
+ /* Virtual address of FB device */
+ par->mmio_vp = (unsigned char *) fb_virt;
+
info->fix.smem_start = par->mem->start;
- info->fix.smem_len = screen_fb_size;
- info->screen_base = fb_virt;
- info->screen_size = screen_fb_size;
+ info->fix.smem_len = dio_fb_size;
+ info->screen_base = par->dio_vp;
+ info->screen_size = dio_fb_size;
if (!gen2vm)
pci_dev_put(pdev);
return 0;
+err4:
+ vfree(par->dio_vp);
err3:
iounmap(fb_virt);
err2:
@@ -748,6 +1041,7 @@ static void hvfb_putmem(struct fb_info *info)
{
struct hvfb_par *par = info->par;
+ vfree(par->dio_vp);
iounmap(info->screen_base);
vmbus_free_mmio(par->mem->start, screen_fb_size);
par->mem = NULL;
@@ -771,6 +1065,11 @@ static int hvfb_probe(struct hv_device *hdev,
init_completion(&par->wait);
INIT_DELAYED_WORK(&par->dwork, hvfb_update_work);
+ par->delayed_refresh = false;
+ spin_lock_init(&par->delayed_refresh_lock);
+ par->x1 = par->y1 = INT_MAX;
+ par->x2 = par->y2 = 0;
+
/* Connect to VSP */
hv_set_drvdata(hdev, info);
ret = synthvid_connect_vsp(hdev);
@@ -779,17 +1078,16 @@ static int hvfb_probe(struct hv_device *hdev,
goto error1;
}
+ hvfb_get_option(info);
+ pr_info("Screen resolution: %dx%d, Color depth: %d\n",
+ screen_width, screen_height, screen_depth);
+
ret = hvfb_getmem(hdev, info);
if (ret) {
pr_err("No memory for framebuffer\n");
goto error2;
}
- hvfb_get_option(info);
- pr_info("Screen resolution: %dx%d, Color depth: %d\n",
- screen_width, screen_height, screen_depth);
-
-
/* Set up fb_info */
info->flags = FBINFO_DEFAULT;
@@ -823,6 +1121,10 @@ static int hvfb_probe(struct hv_device *hdev,
info->fbops = &hvfb_ops;
info->pseudo_palette = par->pseudo_palette;
+ /* Initialize deferred IO */
+ info->fbdefio = &synthvid_defio;
+ fb_deferred_io_init(info);
+
/* Send config to host */
ret = synthvid_send_config(hdev);
if (ret)
@@ -844,6 +1146,7 @@ static int hvfb_probe(struct hv_device *hdev,
return 0;
error:
+ fb_deferred_io_cleanup(info);
hvfb_putmem(info);
error2:
vmbus_close(hdev->channel);
@@ -866,6 +1169,8 @@ static int hvfb_remove(struct hv_device *hdev)
par->update = false;
par->fb_ready = false;
+ fb_deferred_io_cleanup(info);
+
unregister_framebuffer(info);
cancel_delayed_work_sync(&par->dwork);
@@ -878,6 +1183,61 @@ static int hvfb_remove(struct hv_device *hdev)
return 0;
}
+static int hvfb_suspend(struct hv_device *hdev)
+{
+ struct fb_info *info = hv_get_drvdata(hdev);
+ struct hvfb_par *par = info->par;
+
+ console_lock();
+
+ /* 1 means do suspend */
+ fb_set_suspend(info, 1);
+
+ cancel_delayed_work_sync(&par->dwork);
+
+ par->update_saved = par->update;
+ par->update = false;
+ par->fb_ready = false;
+
+ vmbus_close(hdev->channel);
+
+ console_unlock();
+
+ return 0;
+}
+
+static int hvfb_resume(struct hv_device *hdev)
+{
+ struct fb_info *info = hv_get_drvdata(hdev);
+ struct hvfb_par *par = info->par;
+ int ret;
+
+ console_lock();
+
+ ret = synthvid_connect_vsp(hdev);
+ if (ret != 0)
+ goto out;
+
+ ret = synthvid_send_config(hdev);
+ if (ret != 0) {
+ vmbus_close(hdev->channel);
+ goto out;
+ }
+
+ par->fb_ready = true;
+ par->update = par->update_saved;
+
+ schedule_delayed_work(&par->dwork, HVFB_UPDATE_DELAY);
+
+ /* 0 means do resume */
+ fb_set_suspend(info, 0);
+
+out:
+ console_unlock();
+
+ return ret;
+}
+
static const struct pci_device_id pci_stub_id_table[] = {
{
@@ -901,6 +1261,8 @@ static struct hv_driver hvfb_drv = {
.id_table = id_table,
.probe = hvfb_probe,
.remove = hvfb_remove,
+ .suspend = hvfb_suspend,
+ .resume = hvfb_resume,
.driver = {
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
diff --git a/drivers/video/fbdev/matrox/i2c-matroxfb.c b/drivers/video/fbdev/matrox/i2c-matroxfb.c
index 34e2659c3189..e2e4705e3fe0 100644
--- a/drivers/video/fbdev/matrox/i2c-matroxfb.c
+++ b/drivers/video/fbdev/matrox/i2c-matroxfb.c
@@ -191,8 +191,8 @@ static void* i2c_matroxfb_probe(struct matrox_fb_info* minfo) {
0x1b, I2C_CLIENT_END
};
- i2c_new_probed_device(&m2info->maven.adapter,
- &maven_info, addr_list, NULL);
+ i2c_new_scanned_device(&m2info->maven.adapter,
+ &maven_info, addr_list, NULL);
}
}
return m2info;
diff --git a/drivers/video/fbdev/sa1100fb.c b/drivers/video/fbdev/sa1100fb.c
index ae2bcfee338a..81ad3aa1ca06 100644
--- a/drivers/video/fbdev/sa1100fb.c
+++ b/drivers/video/fbdev/sa1100fb.c
@@ -968,19 +968,6 @@ static void sa1100fb_task(struct work_struct *w)
#ifdef CONFIG_CPU_FREQ
/*
- * Calculate the minimum DMA period over all displays that we own.
- * This, together with the SDRAM bandwidth defines the slowest CPU
- * frequency that can be selected.
- */
-static unsigned int sa1100fb_min_dma_period(struct sa1100fb_info *fbi)
-{
- /*
- * FIXME: we need to verify _all_ consoles.
- */
- return sa1100fb_display_dma_period(&fbi->fb.var);
-}
-
-/*
* CPU clock speed change handler. We need to adjust the LCD timing
* parameters when the CPU clock is adjusted by the power management
* subsystem.
diff --git a/drivers/video/hdmi.c b/drivers/video/hdmi.c
index b939bc28d886..9c82e2a0a411 100644
--- a/drivers/video/hdmi.c
+++ b/drivers/video/hdmi.c
@@ -1576,12 +1576,12 @@ static int hdmi_avi_infoframe_unpack(struct hdmi_avi_infoframe *frame,
if (ptr[0] & 0x10)
frame->active_aspect = ptr[1] & 0xf;
if (ptr[0] & 0x8) {
- frame->top_bar = (ptr[5] << 8) + ptr[6];
- frame->bottom_bar = (ptr[7] << 8) + ptr[8];
+ frame->top_bar = (ptr[6] << 8) | ptr[5];
+ frame->bottom_bar = (ptr[8] << 8) | ptr[7];
}
if (ptr[0] & 0x4) {
- frame->left_bar = (ptr[9] << 8) + ptr[10];
- frame->right_bar = (ptr[11] << 8) + ptr[12];
+ frame->left_bar = (ptr[10] << 8) | ptr[9];
+ frame->right_bar = (ptr[12] << 8) | ptr[11];
}
frame->scan_mode = ptr[0] & 0x3;
diff --git a/drivers/video/logo/.gitignore b/drivers/video/logo/.gitignore
index e48355f538fa..9dda1b26b2e4 100644
--- a/drivers/video/logo/.gitignore
+++ b/drivers/video/logo/.gitignore
@@ -5,3 +5,4 @@
*_vga16.c
*_clut224.c
*_gray256.c
+pnmtologo
diff --git a/drivers/video/logo/Makefile b/drivers/video/logo/Makefile
index 16f60c1e1766..bcda657493a4 100644
--- a/drivers/video/logo/Makefile
+++ b/drivers/video/logo/Makefile
@@ -18,24 +18,19 @@ obj-$(CONFIG_SPU_BASE) += logo_spe_clut224.o
# How to generate logo's
-pnmtologo := scripts/pnmtologo
+hostprogs-y := pnmtologo
# Create commands like "pnmtologo -t mono -n logo_mac_mono -o ..."
quiet_cmd_logo = LOGO $@
- cmd_logo = $(pnmtologo) \
- -t $(patsubst $*_%,%,$(notdir $(basename $<))) \
- -n $(notdir $(basename $<)) -o $@ $<
+ cmd_logo = $(obj)/pnmtologo -t $(lastword $(subst _, ,$*)) -n $* -o $@ $<
-$(obj)/%_mono.c: $(src)/%_mono.pbm $(pnmtologo) FORCE
+$(obj)/%.c: $(src)/%.pbm $(obj)/pnmtologo FORCE
$(call if_changed,logo)
-$(obj)/%_vga16.c: $(src)/%_vga16.ppm $(pnmtologo) FORCE
+$(obj)/%.c: $(src)/%.ppm $(obj)/pnmtologo FORCE
$(call if_changed,logo)
-$(obj)/%_clut224.c: $(src)/%_clut224.ppm $(pnmtologo) FORCE
- $(call if_changed,logo)
-
-$(obj)/%_gray256.c: $(src)/%_gray256.pgm $(pnmtologo) FORCE
+$(obj)/%.c: $(src)/%.pgm $(obj)/pnmtologo FORCE
$(call if_changed,logo)
# generated C files
diff --git a/scripts/pnmtologo.c b/drivers/video/logo/pnmtologo.c
index 4718d7895f0b..4718d7895f0b 100644
--- a/scripts/pnmtologo.c
+++ b/drivers/video/logo/pnmtologo.c
diff --git a/drivers/virt/fsl_hypervisor.c b/drivers/virt/fsl_hypervisor.c
index 93d5bebf9572..1b0b11b55d2a 100644
--- a/drivers/virt/fsl_hypervisor.c
+++ b/drivers/virt/fsl_hypervisor.c
@@ -706,7 +706,7 @@ static const struct file_operations fsl_hv_fops = {
.poll = fsl_hv_poll,
.read = fsl_hv_read,
.unlocked_ioctl = fsl_hv_ioctl,
- .compat_ioctl = fsl_hv_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
};
static struct miscdevice fsl_hv_misc_dev = {
diff --git a/drivers/w1/masters/sgi_w1.c b/drivers/w1/masters/sgi_w1.c
index 1b2d96b945be..e8c7fa68d3cc 100644
--- a/drivers/w1/masters/sgi_w1.c
+++ b/drivers/w1/masters/sgi_w1.c
@@ -77,15 +77,13 @@ static int sgi_w1_probe(struct platform_device *pdev)
{
struct sgi_w1_device *sdev;
struct sgi_w1_platform_data *pdata;
- struct resource *res;
sdev = devm_kzalloc(&pdev->dev, sizeof(struct sgi_w1_device),
GFP_KERNEL);
if (!sdev)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- sdev->mcr = devm_ioremap_resource(&pdev->dev, res);
+ sdev->mcr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(sdev->mcr))
return PTR_ERR(sdev->mcr);
diff --git a/drivers/w1/slaves/Kconfig b/drivers/w1/slaves/Kconfig
index b7847636501d..687753889c34 100644
--- a/drivers/w1/slaves/Kconfig
+++ b/drivers/w1/slaves/Kconfig
@@ -74,6 +74,14 @@ config W1_SLAVE_DS2805
organized as 7 pages of 16 bytes each with 64bit
unique number. Requires OverDrive Speed to talk to.
+config W1_SLAVE_DS2430
+ tristate "256b EEPROM family support (DS2430)"
+ help
+ Say Y here if you want to use a 1-wire 256bit EEPROM
+ family device (DS2430).
+ This EEPROM is organized as one page of 32 bytes for random
+ access.
+
config W1_SLAVE_DS2431
tristate "1kb EEPROM family support (DS2431)"
help
diff --git a/drivers/w1/slaves/Makefile b/drivers/w1/slaves/Makefile
index 8e9655eaa478..278bcf2a9bfd 100644
--- a/drivers/w1/slaves/Makefile
+++ b/drivers/w1/slaves/Makefile
@@ -10,6 +10,7 @@ obj-$(CONFIG_W1_SLAVE_DS2408) += w1_ds2408.o
obj-$(CONFIG_W1_SLAVE_DS2413) += w1_ds2413.o
obj-$(CONFIG_W1_SLAVE_DS2406) += w1_ds2406.o
obj-$(CONFIG_W1_SLAVE_DS2423) += w1_ds2423.o
+obj-$(CONFIG_W1_SLAVE_DS2430) += w1_ds2430.o
obj-$(CONFIG_W1_SLAVE_DS2431) += w1_ds2431.o
obj-$(CONFIG_W1_SLAVE_DS2805) += w1_ds2805.o
obj-$(CONFIG_W1_SLAVE_DS2433) += w1_ds2433.o
diff --git a/drivers/w1/slaves/w1_ds2430.c b/drivers/w1/slaves/w1_ds2430.c
new file mode 100644
index 000000000000..6fb0563fb2ae
--- /dev/null
+++ b/drivers/w1/slaves/w1_ds2430.c
@@ -0,0 +1,295 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * w1_ds2430.c - w1 family 14 (DS2430) driver
+ **
+ * Copyright (c) 2019 Angelo Dureghello <angelo.dureghello@timesys.com>
+ *
+ * Cloned and modified from ds2431
+ * Copyright (c) 2008 Bernhard Weirich <bernhard.weirich@riedel.net>
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/device.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+
+#include <linux/w1.h>
+
+#define W1_EEPROM_DS2430 0x14
+
+#define W1_F14_EEPROM_SIZE 32
+#define W1_F14_PAGE_COUNT 1
+#define W1_F14_PAGE_BITS 5
+#define W1_F14_PAGE_SIZE (1 << W1_F14_PAGE_BITS)
+#define W1_F14_PAGE_MASK 0x1F
+
+#define W1_F14_SCRATCH_BITS 5
+#define W1_F14_SCRATCH_SIZE (1 << W1_F14_SCRATCH_BITS)
+#define W1_F14_SCRATCH_MASK (W1_F14_SCRATCH_SIZE-1)
+
+#define W1_F14_READ_EEPROM 0xF0
+#define W1_F14_WRITE_SCRATCH 0x0F
+#define W1_F14_READ_SCRATCH 0xAA
+#define W1_F14_COPY_SCRATCH 0x55
+#define W1_F14_VALIDATION_KEY 0xa5
+
+#define W1_F14_TPROG_MS 11
+#define W1_F14_READ_RETRIES 10
+#define W1_F14_READ_MAXLEN W1_F14_SCRATCH_SIZE
+
+/*
+ * Check the file size bounds and adjusts count as needed.
+ * This would not be needed if the file size didn't reset to 0 after a write.
+ */
+static inline size_t w1_f14_fix_count(loff_t off, size_t count, size_t size)
+{
+ if (off > size)
+ return 0;
+
+ if ((off + count) > size)
+ return size - off;
+
+ return count;
+}
+
+/*
+ * Read a block from W1 ROM two times and compares the results.
+ * If they are equal they are returned, otherwise the read
+ * is repeated W1_F14_READ_RETRIES times.
+ *
+ * count must not exceed W1_F14_READ_MAXLEN.
+ */
+static int w1_f14_readblock(struct w1_slave *sl, int off, int count, char *buf)
+{
+ u8 wrbuf[2];
+ u8 cmp[W1_F14_READ_MAXLEN];
+ int tries = W1_F14_READ_RETRIES;
+
+ do {
+ wrbuf[0] = W1_F14_READ_EEPROM;
+ wrbuf[1] = off & 0xff;
+
+ if (w1_reset_select_slave(sl))
+ return -1;
+
+ w1_write_block(sl->master, wrbuf, 2);
+ w1_read_block(sl->master, buf, count);
+
+ if (w1_reset_select_slave(sl))
+ return -1;
+
+ w1_write_block(sl->master, wrbuf, 2);
+ w1_read_block(sl->master, cmp, count);
+
+ if (!memcmp(cmp, buf, count))
+ return 0;
+ } while (--tries);
+
+ dev_err(&sl->dev, "proof reading failed %d times\n",
+ W1_F14_READ_RETRIES);
+
+ return -1;
+}
+
+static ssize_t eeprom_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf,
+ loff_t off, size_t count)
+{
+ struct w1_slave *sl = kobj_to_w1_slave(kobj);
+ int todo = count;
+
+ count = w1_f14_fix_count(off, count, W1_F14_EEPROM_SIZE);
+ if (count == 0)
+ return 0;
+
+ mutex_lock(&sl->master->bus_mutex);
+
+ /* read directly from the EEPROM in chunks of W1_F14_READ_MAXLEN */
+ while (todo > 0) {
+ int block_read;
+
+ if (todo >= W1_F14_READ_MAXLEN)
+ block_read = W1_F14_READ_MAXLEN;
+ else
+ block_read = todo;
+
+ if (w1_f14_readblock(sl, off, block_read, buf) < 0)
+ count = -EIO;
+
+ todo -= W1_F14_READ_MAXLEN;
+ buf += W1_F14_READ_MAXLEN;
+ off += W1_F14_READ_MAXLEN;
+ }
+
+ mutex_unlock(&sl->master->bus_mutex);
+
+ return count;
+}
+
+/*
+ * Writes to the scratchpad and reads it back for verification.
+ * Then copies the scratchpad to EEPROM.
+ * The data must be aligned at W1_F14_SCRATCH_SIZE bytes and
+ * must be W1_F14_SCRATCH_SIZE bytes long.
+ * The master must be locked.
+ *
+ * @param sl The slave structure
+ * @param addr Address for the write
+ * @param len length must be <= (W1_F14_PAGE_SIZE - (addr & W1_F14_PAGE_MASK))
+ * @param data The data to write
+ * @return 0=Success -1=failure
+ */
+static int w1_f14_write(struct w1_slave *sl, int addr, int len, const u8 *data)
+{
+ int tries = W1_F14_READ_RETRIES;
+ u8 wrbuf[2];
+ u8 rdbuf[W1_F14_SCRATCH_SIZE + 3];
+
+retry:
+
+ /* Write the data to the scratchpad */
+ if (w1_reset_select_slave(sl))
+ return -1;
+
+ wrbuf[0] = W1_F14_WRITE_SCRATCH;
+ wrbuf[1] = addr & 0xff;
+
+ w1_write_block(sl->master, wrbuf, 2);
+ w1_write_block(sl->master, data, len);
+
+ /* Read the scratchpad and verify */
+ if (w1_reset_select_slave(sl))
+ return -1;
+
+ w1_write_8(sl->master, W1_F14_READ_SCRATCH);
+ w1_read_block(sl->master, rdbuf, len + 2);
+
+ /*
+ * Compare what was read against the data written
+ * Note: on read scratchpad, device returns 2 bulk 0xff bytes,
+ * to be discarded.
+ */
+ if ((memcmp(data, &rdbuf[2], len) != 0)) {
+
+ if (--tries)
+ goto retry;
+
+ dev_err(&sl->dev,
+ "could not write to eeprom, scratchpad compare failed %d times\n",
+ W1_F14_READ_RETRIES);
+
+ return -1;
+ }
+
+ /* Copy the scratchpad to EEPROM */
+ if (w1_reset_select_slave(sl))
+ return -1;
+
+ wrbuf[0] = W1_F14_COPY_SCRATCH;
+ wrbuf[1] = W1_F14_VALIDATION_KEY;
+ w1_write_block(sl->master, wrbuf, 2);
+
+ /* Sleep for tprog ms to wait for the write to complete */
+ msleep(W1_F14_TPROG_MS);
+
+ /* Reset the bus to wake up the EEPROM */
+ w1_reset_bus(sl->master);
+
+ return 0;
+}
+
+static ssize_t eeprom_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf,
+ loff_t off, size_t count)
+{
+ struct w1_slave *sl = kobj_to_w1_slave(kobj);
+ int addr, len;
+ int copy;
+
+ count = w1_f14_fix_count(off, count, W1_F14_EEPROM_SIZE);
+ if (count == 0)
+ return 0;
+
+ mutex_lock(&sl->master->bus_mutex);
+
+ /* Can only write data in blocks of the size of the scratchpad */
+ addr = off;
+ len = count;
+ while (len > 0) {
+
+ /* if len too short or addr not aligned */
+ if (len < W1_F14_SCRATCH_SIZE || addr & W1_F14_SCRATCH_MASK) {
+ char tmp[W1_F14_SCRATCH_SIZE];
+
+ /* read the block and update the parts to be written */
+ if (w1_f14_readblock(sl, addr & ~W1_F14_SCRATCH_MASK,
+ W1_F14_SCRATCH_SIZE, tmp)) {
+ count = -EIO;
+ goto out_up;
+ }
+
+ /* copy at most to the boundary of the PAGE or len */
+ copy = W1_F14_SCRATCH_SIZE -
+ (addr & W1_F14_SCRATCH_MASK);
+
+ if (copy > len)
+ copy = len;
+
+ memcpy(&tmp[addr & W1_F14_SCRATCH_MASK], buf, copy);
+ if (w1_f14_write(sl, addr & ~W1_F14_SCRATCH_MASK,
+ W1_F14_SCRATCH_SIZE, tmp) < 0) {
+ count = -EIO;
+ goto out_up;
+ }
+ } else {
+
+ copy = W1_F14_SCRATCH_SIZE;
+ if (w1_f14_write(sl, addr, copy, buf) < 0) {
+ count = -EIO;
+ goto out_up;
+ }
+ }
+ buf += copy;
+ addr += copy;
+ len -= copy;
+ }
+
+out_up:
+ mutex_unlock(&sl->master->bus_mutex);
+
+ return count;
+}
+
+static BIN_ATTR_RW(eeprom, W1_F14_EEPROM_SIZE);
+
+static struct bin_attribute *w1_f14_bin_attrs[] = {
+ &bin_attr_eeprom,
+ NULL,
+};
+
+static const struct attribute_group w1_f14_group = {
+ .bin_attrs = w1_f14_bin_attrs,
+};
+
+static const struct attribute_group *w1_f14_groups[] = {
+ &w1_f14_group,
+ NULL,
+};
+
+static struct w1_family_ops w1_f14_fops = {
+ .groups = w1_f14_groups,
+};
+
+static struct w1_family w1_family_14 = {
+ .fid = W1_EEPROM_DS2430,
+ .fops = &w1_f14_fops,
+};
+module_w1_family(w1_family_14);
+
+MODULE_AUTHOR("Angelo Dureghello <angelo.dureghello@timesys.com>");
+MODULE_DESCRIPTION("w1 family 14 driver for DS2430, 256kb EEPROM");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("w1-family-" __stringify(W1_EEPROM_DS2430));
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 58e7c100b6ad..1679e0dc869b 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -1485,6 +1485,7 @@ config W83627HF_WDT
NCT6791
NCT6792
NCT6102D/04D/06D
+ NCT6116D
This watchdog simply watches your kernel to make sure it doesn't
freeze, and if it does, it reboots your computer after a certain
@@ -1641,8 +1642,10 @@ config INDYDOG
config JZ4740_WDT
tristate "Ingenic jz4740 SoC hardware watchdog"
- depends on MACH_JZ4740 || MACH_JZ4780
+ depends on MIPS
+ depends on COMMON_CLK
select WATCHDOG_CORE
+ select MFD_SYSCON
help
Hardware driver for the built-in watchdog timer on Ingenic jz4740 SoCs.
diff --git a/drivers/watchdog/acquirewdt.c b/drivers/watchdog/acquirewdt.c
index 848db958411e..bc6f333565d3 100644
--- a/drivers/watchdog/acquirewdt.c
+++ b/drivers/watchdog/acquirewdt.c
@@ -221,6 +221,7 @@ static const struct file_operations acq_fops = {
.llseek = no_llseek,
.write = acq_write,
.unlocked_ioctl = acq_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.open = acq_open,
.release = acq_close,
};
diff --git a/drivers/watchdog/advantechwdt.c b/drivers/watchdog/advantechwdt.c
index 0d02bb275b3d..0e4c18a2aa42 100644
--- a/drivers/watchdog/advantechwdt.c
+++ b/drivers/watchdog/advantechwdt.c
@@ -220,6 +220,7 @@ static const struct file_operations advwdt_fops = {
.llseek = no_llseek,
.write = advwdt_write,
.unlocked_ioctl = advwdt_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.open = advwdt_open,
.release = advwdt_close,
};
diff --git a/drivers/watchdog/alim1535_wdt.c b/drivers/watchdog/alim1535_wdt.c
index c157dd3d92a3..42338c7d4540 100644
--- a/drivers/watchdog/alim1535_wdt.c
+++ b/drivers/watchdog/alim1535_wdt.c
@@ -362,6 +362,7 @@ static const struct file_operations ali_fops = {
.llseek = no_llseek,
.write = ali_write,
.unlocked_ioctl = ali_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.open = ali_open,
.release = ali_release,
};
diff --git a/drivers/watchdog/alim7101_wdt.c b/drivers/watchdog/alim7101_wdt.c
index c8e3ab056767..5af0358f4390 100644
--- a/drivers/watchdog/alim7101_wdt.c
+++ b/drivers/watchdog/alim7101_wdt.c
@@ -294,6 +294,7 @@ static const struct file_operations wdt_fops = {
.open = fop_open,
.release = fop_close,
.unlocked_ioctl = fop_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
};
static struct miscdevice wdt_miscdev = {
diff --git a/drivers/watchdog/ar7_wdt.c b/drivers/watchdog/ar7_wdt.c
index 668a1c704f28..c087027ffd5d 100644
--- a/drivers/watchdog/ar7_wdt.c
+++ b/drivers/watchdog/ar7_wdt.c
@@ -250,6 +250,7 @@ static const struct file_operations ar7_wdt_fops = {
.owner = THIS_MODULE,
.write = ar7_wdt_write,
.unlocked_ioctl = ar7_wdt_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.open = ar7_wdt_open,
.release = ar7_wdt_release,
.llseek = no_llseek,
diff --git a/drivers/watchdog/aspeed_wdt.c b/drivers/watchdog/aspeed_wdt.c
index 4ec0906bf12c..7e00960651fa 100644
--- a/drivers/watchdog/aspeed_wdt.c
+++ b/drivers/watchdog/aspeed_wdt.c
@@ -258,11 +258,6 @@ static int aspeed_wdt_probe(struct platform_device *pdev)
if (IS_ERR(wdt->base))
return PTR_ERR(wdt->base);
- /*
- * The ast2400 wdt can run at PCLK, or 1MHz. The ast2500 only
- * runs at 1MHz. We chose to always run at 1MHz, as there's no
- * good reason to have a faster watchdog counter.
- */
wdt->wdd.info = &aspeed_wdt_info;
wdt->wdd.ops = &aspeed_wdt_ops;
wdt->wdd.max_hw_heartbeat_ms = WDT_MAX_TIMEOUT_MS;
@@ -278,7 +273,16 @@ static int aspeed_wdt_probe(struct platform_device *pdev)
return -EINVAL;
config = ofdid->data;
- wdt->ctrl = WDT_CTRL_1MHZ_CLK;
+ /*
+ * On clock rates:
+ * - ast2400 wdt can run at PCLK, or 1MHz
+ * - ast2500 only runs at 1MHz, hard coding bit 4 to 1
+ * - ast2600 always runs at 1MHz
+ *
+ * Set the ast2400 to run at 1MHz as it simplifies the driver.
+ */
+ if (of_device_is_compatible(np, "aspeed,ast2400-wdt"))
+ wdt->ctrl = WDT_CTRL_1MHZ_CLK;
/*
* Control reset on a per-device basis to ensure the
diff --git a/drivers/watchdog/at91rm9200_wdt.c b/drivers/watchdog/at91rm9200_wdt.c
index 907a4545dee6..6d751eb8191d 100644
--- a/drivers/watchdog/at91rm9200_wdt.c
+++ b/drivers/watchdog/at91rm9200_wdt.c
@@ -213,6 +213,7 @@ static const struct file_operations at91wdt_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.unlocked_ioctl = at91_wdt_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.open = at91_wdt_open,
.release = at91_wdt_close,
.write = at91_wdt_write,
diff --git a/drivers/watchdog/at91sam9_wdt.h b/drivers/watchdog/at91sam9_wdt.h
index 390941c65eee..abfe34dd760a 100644
--- a/drivers/watchdog/at91sam9_wdt.h
+++ b/drivers/watchdog/at91sam9_wdt.h
@@ -4,33 +4,37 @@
*
* Copyright (C) 2007 Andrew Victor
* Copyright (C) 2007 Atmel Corporation.
+ * Copyright (C) 2019 Microchip Technology Inc. and its subsidiaries
*
* Watchdog Timer (WDT) - System peripherals regsters.
* Based on AT91SAM9261 datasheet revision D.
+ * Based on SAM9X60 datasheet.
*
*/
#ifndef AT91_WDT_H
#define AT91_WDT_H
+#include <linux/bits.h>
+
#define AT91_WDT_CR 0x00 /* Watchdog Control Register */
-#define AT91_WDT_WDRSTT (1 << 0) /* Restart */
-#define AT91_WDT_KEY (0xa5 << 24) /* KEY Password */
+#define AT91_WDT_WDRSTT BIT(0) /* Restart */
+#define AT91_WDT_KEY (0xa5UL << 24) /* KEY Password */
#define AT91_WDT_MR 0x04 /* Watchdog Mode Register */
-#define AT91_WDT_WDV (0xfff << 0) /* Counter Value */
-#define AT91_WDT_SET_WDV(x) ((x) & AT91_WDT_WDV)
-#define AT91_WDT_WDFIEN (1 << 12) /* Fault Interrupt Enable */
-#define AT91_WDT_WDRSTEN (1 << 13) /* Reset Processor */
-#define AT91_WDT_WDRPROC (1 << 14) /* Timer Restart */
-#define AT91_WDT_WDDIS (1 << 15) /* Watchdog Disable */
-#define AT91_WDT_WDD (0xfff << 16) /* Delta Value */
-#define AT91_WDT_SET_WDD(x) (((x) << 16) & AT91_WDT_WDD)
-#define AT91_WDT_WDDBGHLT (1 << 28) /* Debug Halt */
-#define AT91_WDT_WDIDLEHLT (1 << 29) /* Idle Halt */
+#define AT91_WDT_WDV (0xfffUL << 0) /* Counter Value */
+#define AT91_WDT_SET_WDV(x) ((x) & AT91_WDT_WDV)
+#define AT91_WDT_WDFIEN BIT(12) /* Fault Interrupt Enable */
+#define AT91_WDT_WDRSTEN BIT(13) /* Reset Processor */
+#define AT91_WDT_WDRPROC BIT(14) /* Timer Restart */
+#define AT91_WDT_WDDIS BIT(15) /* Watchdog Disable */
+#define AT91_WDT_WDD (0xfffUL << 16) /* Delta Value */
+#define AT91_WDT_SET_WDD(x) (((x) << 16) & AT91_WDT_WDD)
+#define AT91_WDT_WDDBGHLT BIT(28) /* Debug Halt */
+#define AT91_WDT_WDIDLEHLT BIT(29) /* Idle Halt */
-#define AT91_WDT_SR 0x08 /* Watchdog Status Register */
-#define AT91_WDT_WDUNF (1 << 0) /* Watchdog Underflow */
-#define AT91_WDT_WDERR (1 << 1) /* Watchdog Error */
+#define AT91_WDT_SR 0x08 /* Watchdog Status Register */
+#define AT91_WDT_WDUNF BIT(0) /* Watchdog Underflow */
+#define AT91_WDT_WDERR BIT(1) /* Watchdog Error */
#endif
diff --git a/drivers/watchdog/ath79_wdt.c b/drivers/watchdog/ath79_wdt.c
index 75de664ef4b0..d6dff97c280b 100644
--- a/drivers/watchdog/ath79_wdt.c
+++ b/drivers/watchdog/ath79_wdt.c
@@ -234,6 +234,7 @@ static const struct file_operations ath79_wdt_fops = {
.llseek = no_llseek,
.write = ath79_wdt_write,
.unlocked_ioctl = ath79_wdt_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.open = ath79_wdt_open,
.release = ath79_wdt_release,
};
diff --git a/drivers/watchdog/bcm63xx_wdt.c b/drivers/watchdog/bcm63xx_wdt.c
index e2af37c9a266..8a043b52aa2f 100644
--- a/drivers/watchdog/bcm63xx_wdt.c
+++ b/drivers/watchdog/bcm63xx_wdt.c
@@ -221,6 +221,7 @@ static const struct file_operations bcm63xx_wdt_fops = {
.llseek = no_llseek,
.write = bcm63xx_wdt_write,
.unlocked_ioctl = bcm63xx_wdt_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.open = bcm63xx_wdt_open,
.release = bcm63xx_wdt_release,
};
diff --git a/drivers/watchdog/bd70528_wdt.c b/drivers/watchdog/bd70528_wdt.c
index bc60e036627a..0170b37e6674 100644
--- a/drivers/watchdog/bd70528_wdt.c
+++ b/drivers/watchdog/bd70528_wdt.c
@@ -97,7 +97,7 @@ EXPORT_SYMBOL(bd70528_wdt_set);
/**
* bd70528_wdt_lock - take WDT lock
*
- * @bd70528: device data for the PMIC instance we want to operate on
+ * @data: device data for the PMIC instance we want to operate on
*
* Lock WDT for arming/disarming in order to avoid race condition caused
* by WDT state changes initiated by WDT and RTC drivers.
@@ -114,7 +114,7 @@ EXPORT_SYMBOL(bd70528_wdt_lock);
/**
* bd70528_wdt_unlock - unlock WDT lock
*
- * @bd70528: device data for the PMIC instance we want to operate on
+ * @data: device data for the PMIC instance we want to operate on
*
* Unlock WDT lock which has previously been taken by call to
* bd70528_wdt_lock.
diff --git a/drivers/watchdog/cadence_wdt.c b/drivers/watchdog/cadence_wdt.c
index f8d4e91d0383..06bd4e1a5923 100644
--- a/drivers/watchdog/cadence_wdt.c
+++ b/drivers/watchdog/cadence_wdt.c
@@ -335,8 +335,10 @@ static int cdns_wdt_probe(struct platform_device *pdev)
wdt->clk = devm_clk_get(dev, NULL);
if (IS_ERR(wdt->clk)) {
- dev_err(dev, "input clock not found\n");
- return PTR_ERR(wdt->clk);
+ ret = PTR_ERR(wdt->clk);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "input clock not found\n");
+ return ret;
}
ret = clk_prepare_enable(wdt->clk);
diff --git a/drivers/watchdog/cpu5wdt.c b/drivers/watchdog/cpu5wdt.c
index d6d53014cb68..9867a3a936df 100644
--- a/drivers/watchdog/cpu5wdt.c
+++ b/drivers/watchdog/cpu5wdt.c
@@ -187,6 +187,7 @@ static const struct file_operations cpu5wdt_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.unlocked_ioctl = cpu5wdt_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.open = cpu5wdt_open,
.write = cpu5wdt_write,
.release = cpu5wdt_release,
diff --git a/drivers/watchdog/eurotechwdt.c b/drivers/watchdog/eurotechwdt.c
index 3a83a48abcae..f5ffa7be066e 100644
--- a/drivers/watchdog/eurotechwdt.c
+++ b/drivers/watchdog/eurotechwdt.c
@@ -371,6 +371,7 @@ static const struct file_operations eurwdt_fops = {
.llseek = no_llseek,
.write = eurwdt_write,
.unlocked_ioctl = eurwdt_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.open = eurwdt_open,
.release = eurwdt_release,
};
diff --git a/drivers/watchdog/f71808e_wdt.c b/drivers/watchdog/f71808e_wdt.c
index e46104c2fd94..a3c44d75d80e 100644
--- a/drivers/watchdog/f71808e_wdt.c
+++ b/drivers/watchdog/f71808e_wdt.c
@@ -669,6 +669,7 @@ static const struct file_operations watchdog_fops = {
.release = watchdog_release,
.write = watchdog_write,
.unlocked_ioctl = watchdog_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
};
static struct miscdevice watchdog_miscdev = {
diff --git a/drivers/watchdog/gef_wdt.c b/drivers/watchdog/gef_wdt.c
index 7d5f56994f09..f6541d1b65e3 100644
--- a/drivers/watchdog/gef_wdt.c
+++ b/drivers/watchdog/gef_wdt.c
@@ -248,6 +248,7 @@ static const struct file_operations gef_wdt_fops = {
.llseek = no_llseek,
.write = gef_wdt_write,
.unlocked_ioctl = gef_wdt_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.open = gef_wdt_open,
.release = gef_wdt_release,
};
diff --git a/drivers/watchdog/geodewdt.c b/drivers/watchdog/geodewdt.c
index 8d105d98908e..9914a4283cb2 100644
--- a/drivers/watchdog/geodewdt.c
+++ b/drivers/watchdog/geodewdt.c
@@ -201,6 +201,7 @@ static const struct file_operations geodewdt_fops = {
.llseek = no_llseek,
.write = geodewdt_write,
.unlocked_ioctl = geodewdt_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.open = geodewdt_open,
.release = geodewdt_release,
};
diff --git a/drivers/watchdog/ib700wdt.c b/drivers/watchdog/ib700wdt.c
index 92fd7f33bc4d..2b65ea9451d1 100644
--- a/drivers/watchdog/ib700wdt.c
+++ b/drivers/watchdog/ib700wdt.c
@@ -259,6 +259,7 @@ static const struct file_operations ibwdt_fops = {
.llseek = no_llseek,
.write = ibwdt_write,
.unlocked_ioctl = ibwdt_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.open = ibwdt_open,
.release = ibwdt_close,
};
diff --git a/drivers/watchdog/ibmasr.c b/drivers/watchdog/ibmasr.c
index 897f7eda9e6a..4a22fe152086 100644
--- a/drivers/watchdog/ibmasr.c
+++ b/drivers/watchdog/ibmasr.c
@@ -344,6 +344,7 @@ static const struct file_operations asr_fops = {
.llseek = no_llseek,
.write = asr_write,
.unlocked_ioctl = asr_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.open = asr_open,
.release = asr_release,
};
diff --git a/drivers/watchdog/imx2_wdt.c b/drivers/watchdog/imx2_wdt.c
index 8d019a961ccc..f8d58bf0bf66 100644
--- a/drivers/watchdog/imx2_wdt.c
+++ b/drivers/watchdog/imx2_wdt.c
@@ -72,7 +72,6 @@ module_param(nowayout, bool, 0);
MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
-
static unsigned timeout;
module_param(timeout, uint, 0);
MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds (default="
@@ -247,13 +246,14 @@ static const struct regmap_config imx2_wdt_regmap_config = {
static int __init imx2_wdt_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct imx2_wdt_device *wdev;
struct watchdog_device *wdog;
void __iomem *base;
int ret;
u32 val;
- wdev = devm_kzalloc(&pdev->dev, sizeof(*wdev), GFP_KERNEL);
+ wdev = devm_kzalloc(dev, sizeof(*wdev), GFP_KERNEL);
if (!wdev)
return -ENOMEM;
@@ -261,16 +261,16 @@ static int __init imx2_wdt_probe(struct platform_device *pdev)
if (IS_ERR(base))
return PTR_ERR(base);
- wdev->regmap = devm_regmap_init_mmio_clk(&pdev->dev, NULL, base,
+ wdev->regmap = devm_regmap_init_mmio_clk(dev, NULL, base,
&imx2_wdt_regmap_config);
if (IS_ERR(wdev->regmap)) {
- dev_err(&pdev->dev, "regmap init failed\n");
+ dev_err(dev, "regmap init failed\n");
return PTR_ERR(wdev->regmap);
}
- wdev->clk = devm_clk_get(&pdev->dev, NULL);
+ wdev->clk = devm_clk_get(dev, NULL);
if (IS_ERR(wdev->clk)) {
- dev_err(&pdev->dev, "can't get Watchdog clock\n");
+ dev_err(dev, "can't get Watchdog clock\n");
return PTR_ERR(wdev->clk);
}
@@ -280,12 +280,12 @@ static int __init imx2_wdt_probe(struct platform_device *pdev)
wdog->min_timeout = 1;
wdog->timeout = IMX2_WDT_DEFAULT_TIME;
wdog->max_hw_heartbeat_ms = IMX2_WDT_MAX_TIME * 1000;
- wdog->parent = &pdev->dev;
+ wdog->parent = dev;
ret = platform_get_irq(pdev, 0);
if (ret > 0)
- if (!devm_request_irq(&pdev->dev, ret, imx2_wdt_isr, 0,
- dev_name(&pdev->dev), wdog))
+ if (!devm_request_irq(dev, ret, imx2_wdt_isr, 0,
+ dev_name(dev), wdog))
wdog->info = &imx2_wdt_pretimeout_info;
ret = clk_prepare_enable(wdev->clk);
@@ -295,13 +295,13 @@ static int __init imx2_wdt_probe(struct platform_device *pdev)
regmap_read(wdev->regmap, IMX2_WDT_WRSR, &val);
wdog->bootstatus = val & IMX2_WDT_WRSR_TOUT ? WDIOF_CARDRESET : 0;
- wdev->ext_reset = of_property_read_bool(pdev->dev.of_node,
+ wdev->ext_reset = of_property_read_bool(dev->of_node,
"fsl,ext-reset-output");
platform_set_drvdata(pdev, wdog);
watchdog_set_drvdata(wdog, wdev);
watchdog_set_nowayout(wdog, nowayout);
watchdog_set_restart_priority(wdog, 128);
- watchdog_init_timeout(wdog, timeout, &pdev->dev);
+ watchdog_init_timeout(wdog, timeout, dev);
if (imx2_wdt_is_running(wdev)) {
imx2_wdt_set_timeout(wdog, wdog->timeout);
@@ -319,7 +319,7 @@ static int __init imx2_wdt_probe(struct platform_device *pdev)
if (ret)
goto disable_clk;
- dev_info(&pdev->dev, "timeout %d sec (nowayout=%d)\n",
+ dev_info(dev, "timeout %d sec (nowayout=%d)\n",
wdog->timeout, nowayout);
return 0;
@@ -359,9 +359,8 @@ static void imx2_wdt_shutdown(struct platform_device *pdev)
}
}
-#ifdef CONFIG_PM_SLEEP
/* Disable watchdog if it is active or non-active but still running */
-static int imx2_wdt_suspend(struct device *dev)
+static int __maybe_unused imx2_wdt_suspend(struct device *dev)
{
struct watchdog_device *wdog = dev_get_drvdata(dev);
struct imx2_wdt_device *wdev = watchdog_get_drvdata(wdog);
@@ -382,7 +381,7 @@ static int imx2_wdt_suspend(struct device *dev)
}
/* Enable watchdog and configure it if necessary */
-static int imx2_wdt_resume(struct device *dev)
+static int __maybe_unused imx2_wdt_resume(struct device *dev)
{
struct watchdog_device *wdog = dev_get_drvdata(dev);
struct imx2_wdt_device *wdev = watchdog_get_drvdata(wdog);
@@ -407,7 +406,6 @@ static int imx2_wdt_resume(struct device *dev)
return 0;
}
-#endif
static SIMPLE_DEV_PM_OPS(imx2_wdt_pm_ops, imx2_wdt_suspend,
imx2_wdt_resume);
diff --git a/drivers/watchdog/imx7ulp_wdt.c b/drivers/watchdog/imx7ulp_wdt.c
index 5ce51026989a..0a87c6f4bab2 100644
--- a/drivers/watchdog/imx7ulp_wdt.c
+++ b/drivers/watchdog/imx7ulp_wdt.c
@@ -17,6 +17,9 @@
#define WDOG_CS_CMD32EN BIT(13)
#define WDOG_CS_ULK BIT(11)
#define WDOG_CS_RCS BIT(10)
+#define LPO_CLK 0x1
+#define LPO_CLK_SHIFT 8
+#define WDOG_CS_CLK (LPO_CLK << LPO_CLK_SHIFT)
#define WDOG_CS_EN BIT(7)
#define WDOG_CS_UPDATE BIT(5)
@@ -41,24 +44,25 @@ MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
struct imx7ulp_wdt_device {
- struct notifier_block restart_handler;
struct watchdog_device wdd;
void __iomem *base;
struct clk *clk;
};
-static inline void imx7ulp_wdt_enable(void __iomem *base, bool enable)
+static void imx7ulp_wdt_enable(struct watchdog_device *wdog, bool enable)
{
- u32 val = readl(base + WDOG_CS);
+ struct imx7ulp_wdt_device *wdt = watchdog_get_drvdata(wdog);
- writel(UNLOCK, base + WDOG_CNT);
+ u32 val = readl(wdt->base + WDOG_CS);
+
+ writel(UNLOCK, wdt->base + WDOG_CNT);
if (enable)
- writel(val | WDOG_CS_EN, base + WDOG_CS);
+ writel(val | WDOG_CS_EN, wdt->base + WDOG_CS);
else
- writel(val & ~WDOG_CS_EN, base + WDOG_CS);
+ writel(val & ~WDOG_CS_EN, wdt->base + WDOG_CS);
}
-static inline bool imx7ulp_wdt_is_enabled(void __iomem *base)
+static bool imx7ulp_wdt_is_enabled(void __iomem *base)
{
u32 val = readl(base + WDOG_CS);
@@ -76,18 +80,15 @@ static int imx7ulp_wdt_ping(struct watchdog_device *wdog)
static int imx7ulp_wdt_start(struct watchdog_device *wdog)
{
- struct imx7ulp_wdt_device *wdt = watchdog_get_drvdata(wdog);
- imx7ulp_wdt_enable(wdt->base, true);
+ imx7ulp_wdt_enable(wdog, true);
return 0;
}
static int imx7ulp_wdt_stop(struct watchdog_device *wdog)
{
- struct imx7ulp_wdt_device *wdt = watchdog_get_drvdata(wdog);
-
- imx7ulp_wdt_enable(wdt->base, false);
+ imx7ulp_wdt_enable(wdog, false);
return 0;
}
@@ -106,12 +107,28 @@ static int imx7ulp_wdt_set_timeout(struct watchdog_device *wdog,
return 0;
}
+static int imx7ulp_wdt_restart(struct watchdog_device *wdog,
+ unsigned long action, void *data)
+{
+ struct imx7ulp_wdt_device *wdt = watchdog_get_drvdata(wdog);
+
+ imx7ulp_wdt_enable(wdt->base, true);
+ imx7ulp_wdt_set_timeout(&wdt->wdd, 1);
+
+ /* wait for wdog to fire */
+ while (true)
+ ;
+
+ return NOTIFY_DONE;
+}
+
static const struct watchdog_ops imx7ulp_wdt_ops = {
.owner = THIS_MODULE,
.start = imx7ulp_wdt_start,
.stop = imx7ulp_wdt_stop,
.ping = imx7ulp_wdt_ping,
.set_timeout = imx7ulp_wdt_set_timeout,
+ .restart = imx7ulp_wdt_restart,
};
static const struct watchdog_info imx7ulp_wdt_info = {
@@ -120,7 +137,7 @@ static const struct watchdog_info imx7ulp_wdt_info = {
WDIOF_MAGICCLOSE,
};
-static inline void imx7ulp_wdt_init(void __iomem *base, unsigned int timeout)
+static void imx7ulp_wdt_init(void __iomem *base, unsigned int timeout)
{
u32 val;
@@ -131,7 +148,7 @@ static inline void imx7ulp_wdt_init(void __iomem *base, unsigned int timeout)
/* set an initial timeout value in TOVAL */
writel(timeout, base + WDOG_TOVAL);
/* enable 32bit command sequence and reconfigure */
- val = BIT(13) | BIT(8) | BIT(5);
+ val = WDOG_CS_CMD32EN | WDOG_CS_CLK | WDOG_CS_UPDATE;
writel(val, base + WDOG_CS);
}
diff --git a/drivers/watchdog/indydog.c b/drivers/watchdog/indydog.c
index 550358528084..9857bb74a723 100644
--- a/drivers/watchdog/indydog.c
+++ b/drivers/watchdog/indydog.c
@@ -152,6 +152,7 @@ static const struct file_operations indydog_fops = {
.llseek = no_llseek,
.write = indydog_write,
.unlocked_ioctl = indydog_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.open = indydog_open,
.release = indydog_release,
};
diff --git a/drivers/watchdog/intel-mid_wdt.c b/drivers/watchdog/intel-mid_wdt.c
index 2cdbd37c700c..470213abfd3d 100644
--- a/drivers/watchdog/intel-mid_wdt.c
+++ b/drivers/watchdog/intel-mid_wdt.c
@@ -134,6 +134,7 @@ static int mid_wdt_probe(struct platform_device *pdev)
wdt_dev->timeout = MID_WDT_DEFAULT_TIMEOUT;
wdt_dev->parent = dev;
+ watchdog_set_nowayout(wdt_dev, WATCHDOG_NOWAYOUT);
watchdog_set_drvdata(wdt_dev, dev);
ret = devm_request_irq(dev, pdata->irq, mid_wdt_irq,
diff --git a/drivers/watchdog/intel_scu_watchdog.c b/drivers/watchdog/intel_scu_watchdog.c
index 1c85103b750b..6ad5bf3451ec 100644
--- a/drivers/watchdog/intel_scu_watchdog.c
+++ b/drivers/watchdog/intel_scu_watchdog.c
@@ -412,6 +412,7 @@ static const struct file_operations intel_scu_fops = {
.llseek = no_llseek,
.write = intel_scu_write,
.unlocked_ioctl = intel_scu_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.open = intel_scu_open,
.release = intel_scu_release,
};
diff --git a/drivers/watchdog/iop_wdt.c b/drivers/watchdog/iop_wdt.c
index a9ccdb9a9159..6bf68d4750de 100644
--- a/drivers/watchdog/iop_wdt.c
+++ b/drivers/watchdog/iop_wdt.c
@@ -202,6 +202,7 @@ static const struct file_operations iop_wdt_fops = {
.llseek = no_llseek,
.write = iop_wdt_write,
.unlocked_ioctl = iop_wdt_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.open = iop_wdt_open,
.release = iop_wdt_release,
};
diff --git a/drivers/watchdog/it8712f_wdt.c b/drivers/watchdog/it8712f_wdt.c
index 2fe1a3c499ed..2fed40d14007 100644
--- a/drivers/watchdog/it8712f_wdt.c
+++ b/drivers/watchdog/it8712f_wdt.c
@@ -345,6 +345,7 @@ static const struct file_operations it8712f_wdt_fops = {
.llseek = no_llseek,
.write = it8712f_wdt_write,
.unlocked_ioctl = it8712f_wdt_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.open = it8712f_wdt_open,
.release = it8712f_wdt_release,
};
diff --git a/drivers/watchdog/ixp4xx_wdt.c b/drivers/watchdog/ixp4xx_wdt.c
index 9067998759e3..09886616fd21 100644
--- a/drivers/watchdog/ixp4xx_wdt.c
+++ b/drivers/watchdog/ixp4xx_wdt.c
@@ -163,6 +163,7 @@ static const struct file_operations ixp4xx_wdt_fops = {
.llseek = no_llseek,
.write = ixp4xx_wdt_write,
.unlocked_ioctl = ixp4xx_wdt_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.open = ixp4xx_wdt_open,
.release = ixp4xx_wdt_release,
};
diff --git a/drivers/watchdog/jz4740_wdt.c b/drivers/watchdog/jz4740_wdt.c
index c6052ae54f32..bdf9564efa29 100644
--- a/drivers/watchdog/jz4740_wdt.c
+++ b/drivers/watchdog/jz4740_wdt.c
@@ -5,6 +5,7 @@
*/
#include <linux/mfd/ingenic-tcu.h>
+#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/types.h>
@@ -17,19 +18,7 @@
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/of.h>
-
-#include <asm/mach-jz4740/timer.h>
-
-#define JZ_WDT_CLOCK_PCLK 0x1
-#define JZ_WDT_CLOCK_RTC 0x2
-#define JZ_WDT_CLOCK_EXT 0x4
-
-#define JZ_WDT_CLOCK_DIV_1 (0 << TCU_TCSR_PRESCALE_LSB)
-#define JZ_WDT_CLOCK_DIV_4 (1 << TCU_TCSR_PRESCALE_LSB)
-#define JZ_WDT_CLOCK_DIV_16 (2 << TCU_TCSR_PRESCALE_LSB)
-#define JZ_WDT_CLOCK_DIV_64 (3 << TCU_TCSR_PRESCALE_LSB)
-#define JZ_WDT_CLOCK_DIV_256 (4 << TCU_TCSR_PRESCALE_LSB)
-#define JZ_WDT_CLOCK_DIV_1024 (5 << TCU_TCSR_PRESCALE_LSB)
+#include <linux/regmap.h>
#define DEFAULT_HEARTBEAT 5
#define MAX_HEARTBEAT 2048
@@ -49,15 +38,17 @@ MODULE_PARM_DESC(heartbeat,
struct jz4740_wdt_drvdata {
struct watchdog_device wdt;
- void __iomem *base;
- struct clk *rtc_clk;
+ struct regmap *map;
+ struct clk *clk;
+ unsigned long clk_rate;
};
static int jz4740_wdt_ping(struct watchdog_device *wdt_dev)
{
struct jz4740_wdt_drvdata *drvdata = watchdog_get_drvdata(wdt_dev);
- writew(0x0, drvdata->base + TCU_REG_WDT_TCNT);
+ regmap_write(drvdata->map, TCU_REG_WDT_TCNT, 0);
+
return 0;
}
@@ -65,35 +56,17 @@ static int jz4740_wdt_set_timeout(struct watchdog_device *wdt_dev,
unsigned int new_timeout)
{
struct jz4740_wdt_drvdata *drvdata = watchdog_get_drvdata(wdt_dev);
- unsigned int rtc_clk_rate;
- unsigned int timeout_value;
- unsigned short clock_div = JZ_WDT_CLOCK_DIV_1;
- u8 tcer;
-
- rtc_clk_rate = clk_get_rate(drvdata->rtc_clk);
-
- timeout_value = rtc_clk_rate * new_timeout;
- while (timeout_value > 0xffff) {
- if (clock_div == JZ_WDT_CLOCK_DIV_1024) {
- /* Requested timeout too high;
- * use highest possible value. */
- timeout_value = 0xffff;
- break;
- }
- timeout_value >>= 2;
- clock_div += (1 << TCU_TCSR_PRESCALE_LSB);
- }
+ u16 timeout_value = (u16)(drvdata->clk_rate * new_timeout);
+ unsigned int tcer;
- tcer = readb(drvdata->base + TCU_REG_WDT_TCER);
- writeb(0x0, drvdata->base + TCU_REG_WDT_TCER);
- writew(clock_div, drvdata->base + TCU_REG_WDT_TCSR);
+ regmap_read(drvdata->map, TCU_REG_WDT_TCER, &tcer);
+ regmap_write(drvdata->map, TCU_REG_WDT_TCER, 0);
- writew((u16)timeout_value, drvdata->base + TCU_REG_WDT_TDR);
- writew(0x0, drvdata->base + TCU_REG_WDT_TCNT);
- writew(clock_div | JZ_WDT_CLOCK_RTC, drvdata->base + TCU_REG_WDT_TCSR);
+ regmap_write(drvdata->map, TCU_REG_WDT_TDR, timeout_value);
+ regmap_write(drvdata->map, TCU_REG_WDT_TCNT, 0);
if (tcer & TCU_WDT_TCER_TCEN)
- writeb(TCU_WDT_TCER_TCEN, drvdata->base + TCU_REG_WDT_TCER);
+ regmap_write(drvdata->map, TCU_REG_WDT_TCER, TCU_WDT_TCER_TCEN);
wdt_dev->timeout = new_timeout;
return 0;
@@ -102,16 +75,20 @@ static int jz4740_wdt_set_timeout(struct watchdog_device *wdt_dev,
static int jz4740_wdt_start(struct watchdog_device *wdt_dev)
{
struct jz4740_wdt_drvdata *drvdata = watchdog_get_drvdata(wdt_dev);
- u8 tcer;
+ unsigned int tcer;
+ int ret;
- tcer = readb(drvdata->base + TCU_REG_WDT_TCER);
+ ret = clk_prepare_enable(drvdata->clk);
+ if (ret)
+ return ret;
+
+ regmap_read(drvdata->map, TCU_REG_WDT_TCER, &tcer);
- jz4740_timer_enable_watchdog();
jz4740_wdt_set_timeout(wdt_dev, wdt_dev->timeout);
/* Start watchdog if it wasn't started already */
if (!(tcer & TCU_WDT_TCER_TCEN))
- writeb(TCU_WDT_TCER_TCEN, drvdata->base + TCU_REG_WDT_TCER);
+ regmap_write(drvdata->map, TCU_REG_WDT_TCER, TCU_WDT_TCER_TCEN);
return 0;
}
@@ -120,8 +97,8 @@ static int jz4740_wdt_stop(struct watchdog_device *wdt_dev)
{
struct jz4740_wdt_drvdata *drvdata = watchdog_get_drvdata(wdt_dev);
- writeb(0x0, drvdata->base + TCU_REG_WDT_TCER);
- jz4740_timer_disable_watchdog();
+ regmap_write(drvdata->map, TCU_REG_WDT_TCER, 0);
+ clk_disable_unprepare(drvdata->clk);
return 0;
}
@@ -162,33 +139,46 @@ static int jz4740_wdt_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct jz4740_wdt_drvdata *drvdata;
struct watchdog_device *jz4740_wdt;
+ long rate;
+ int ret;
drvdata = devm_kzalloc(dev, sizeof(struct jz4740_wdt_drvdata),
GFP_KERNEL);
if (!drvdata)
return -ENOMEM;
- if (heartbeat < 1 || heartbeat > MAX_HEARTBEAT)
- heartbeat = DEFAULT_HEARTBEAT;
+ drvdata->clk = devm_clk_get(&pdev->dev, "wdt");
+ if (IS_ERR(drvdata->clk)) {
+ dev_err(&pdev->dev, "cannot find WDT clock\n");
+ return PTR_ERR(drvdata->clk);
+ }
+
+ /* Set smallest clock possible */
+ rate = clk_round_rate(drvdata->clk, 1);
+ if (rate < 0)
+ return rate;
+ ret = clk_set_rate(drvdata->clk, rate);
+ if (ret)
+ return ret;
+
+ drvdata->clk_rate = rate;
jz4740_wdt = &drvdata->wdt;
jz4740_wdt->info = &jz4740_wdt_info;
jz4740_wdt->ops = &jz4740_wdt_ops;
- jz4740_wdt->timeout = heartbeat;
jz4740_wdt->min_timeout = 1;
- jz4740_wdt->max_timeout = MAX_HEARTBEAT;
+ jz4740_wdt->max_timeout = 0xffff / rate;
+ jz4740_wdt->timeout = clamp(heartbeat,
+ jz4740_wdt->min_timeout,
+ jz4740_wdt->max_timeout);
jz4740_wdt->parent = dev;
watchdog_set_nowayout(jz4740_wdt, nowayout);
watchdog_set_drvdata(jz4740_wdt, drvdata);
- drvdata->base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(drvdata->base))
- return PTR_ERR(drvdata->base);
-
- drvdata->rtc_clk = devm_clk_get(dev, "rtc");
- if (IS_ERR(drvdata->rtc_clk)) {
- dev_err(dev, "cannot find RTC clock\n");
- return PTR_ERR(drvdata->rtc_clk);
+ drvdata->map = device_node_to_regmap(dev->parent->of_node);
+ if (!drvdata->map) {
+ dev_err(dev, "regmap not found\n");
+ return -EINVAL;
}
return devm_watchdog_register_device(dev, &drvdata->wdt);
diff --git a/drivers/watchdog/m54xx_wdt.c b/drivers/watchdog/m54xx_wdt.c
index 752d03620f0a..22f335e1e164 100644
--- a/drivers/watchdog/m54xx_wdt.c
+++ b/drivers/watchdog/m54xx_wdt.c
@@ -183,6 +183,7 @@ static const struct file_operations m54xx_wdt_fops = {
.llseek = no_llseek,
.write = m54xx_wdt_write,
.unlocked_ioctl = m54xx_wdt_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.open = m54xx_wdt_open,
.release = m54xx_wdt_release,
};
diff --git a/drivers/watchdog/machzwd.c b/drivers/watchdog/machzwd.c
index cef2baf59dda..80ff94688487 100644
--- a/drivers/watchdog/machzwd.c
+++ b/drivers/watchdog/machzwd.c
@@ -361,6 +361,7 @@ static const struct file_operations zf_fops = {
.llseek = no_llseek,
.write = zf_write,
.unlocked_ioctl = zf_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.open = zf_open,
.release = zf_close,
};
diff --git a/drivers/watchdog/menz69_wdt.c b/drivers/watchdog/menz69_wdt.c
index ed18238c5407..8973f98bc6a5 100644
--- a/drivers/watchdog/menz69_wdt.c
+++ b/drivers/watchdog/menz69_wdt.c
@@ -168,3 +168,4 @@ module_mcb_driver(men_z069_driver);
MODULE_AUTHOR("Johannes Thumshirn <jth@kernel.org>");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("mcb:16z069");
+MODULE_IMPORT_NS(MCB);
diff --git a/drivers/watchdog/mixcomwd.c b/drivers/watchdog/mixcomwd.c
index a86faa5000f1..d387bad377c4 100644
--- a/drivers/watchdog/mixcomwd.c
+++ b/drivers/watchdog/mixcomwd.c
@@ -227,6 +227,7 @@ static const struct file_operations mixcomwd_fops = {
.llseek = no_llseek,
.write = mixcomwd_write,
.unlocked_ioctl = mixcomwd_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.open = mixcomwd_open,
.release = mixcomwd_release,
};
diff --git a/drivers/watchdog/mtx-1_wdt.c b/drivers/watchdog/mtx-1_wdt.c
index 25a92857b217..8aa1cb4a295f 100644
--- a/drivers/watchdog/mtx-1_wdt.c
+++ b/drivers/watchdog/mtx-1_wdt.c
@@ -181,6 +181,7 @@ static const struct file_operations mtx1_wdt_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.unlocked_ioctl = mtx1_wdt_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.open = mtx1_wdt_open,
.write = mtx1_wdt_write,
.release = mtx1_wdt_release,
diff --git a/drivers/watchdog/mv64x60_wdt.c b/drivers/watchdog/mv64x60_wdt.c
index 74bf7144a970..0bc72dd69b70 100644
--- a/drivers/watchdog/mv64x60_wdt.c
+++ b/drivers/watchdog/mv64x60_wdt.c
@@ -241,6 +241,7 @@ static const struct file_operations mv64x60_wdt_fops = {
.llseek = no_llseek,
.write = mv64x60_wdt_write,
.unlocked_ioctl = mv64x60_wdt_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.open = mv64x60_wdt_open,
.release = mv64x60_wdt_release,
};
diff --git a/drivers/watchdog/nv_tco.c b/drivers/watchdog/nv_tco.c
index 5f0082e300bd..d7a560e348d5 100644
--- a/drivers/watchdog/nv_tco.c
+++ b/drivers/watchdog/nv_tco.c
@@ -267,6 +267,7 @@ static const struct file_operations nv_tco_fops = {
.llseek = no_llseek,
.write = nv_tco_write,
.unlocked_ioctl = nv_tco_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.open = nv_tco_open,
.release = nv_tco_release,
};
diff --git a/drivers/watchdog/pc87413_wdt.c b/drivers/watchdog/pc87413_wdt.c
index 2af1a8b3f973..73fbfc99083b 100644
--- a/drivers/watchdog/pc87413_wdt.c
+++ b/drivers/watchdog/pc87413_wdt.c
@@ -473,6 +473,7 @@ static const struct file_operations pc87413_fops = {
.llseek = no_llseek,
.write = pc87413_write,
.unlocked_ioctl = pc87413_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.open = pc87413_open,
.release = pc87413_release,
};
diff --git a/drivers/watchdog/pcwd.c b/drivers/watchdog/pcwd.c
index c3c93e00b320..7a0587fdc52c 100644
--- a/drivers/watchdog/pcwd.c
+++ b/drivers/watchdog/pcwd.c
@@ -752,6 +752,7 @@ static const struct file_operations pcwd_fops = {
.llseek = no_llseek,
.write = pcwd_write,
.unlocked_ioctl = pcwd_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.open = pcwd_open,
.release = pcwd_close,
};
diff --git a/drivers/watchdog/pcwd_pci.c b/drivers/watchdog/pcwd_pci.c
index e30c1f762045..81508a42a90c 100644
--- a/drivers/watchdog/pcwd_pci.c
+++ b/drivers/watchdog/pcwd_pci.c
@@ -646,6 +646,7 @@ static const struct file_operations pcipcwd_fops = {
.llseek = no_llseek,
.write = pcipcwd_write,
.unlocked_ioctl = pcipcwd_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.open = pcipcwd_open,
.release = pcipcwd_release,
};
diff --git a/drivers/watchdog/pcwd_usb.c b/drivers/watchdog/pcwd_usb.c
index 6727f8ab2d18..2f44af1831d0 100644
--- a/drivers/watchdog/pcwd_usb.c
+++ b/drivers/watchdog/pcwd_usb.c
@@ -550,6 +550,7 @@ static const struct file_operations usb_pcwd_fops = {
.llseek = no_llseek,
.write = usb_pcwd_write,
.unlocked_ioctl = usb_pcwd_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.open = usb_pcwd_open,
.release = usb_pcwd_release,
};
diff --git a/drivers/watchdog/pika_wdt.c b/drivers/watchdog/pika_wdt.c
index 205c3c68fca1..a98abd0d3146 100644
--- a/drivers/watchdog/pika_wdt.c
+++ b/drivers/watchdog/pika_wdt.c
@@ -214,6 +214,7 @@ static const struct file_operations pikawdt_fops = {
.release = pikawdt_release,
.write = pikawdt_write,
.unlocked_ioctl = pikawdt_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
};
static struct miscdevice pikawdt_miscdev = {
diff --git a/drivers/watchdog/pnx833x_wdt.c b/drivers/watchdog/pnx833x_wdt.c
index aa53babf2bab..4097d076aab8 100644
--- a/drivers/watchdog/pnx833x_wdt.c
+++ b/drivers/watchdog/pnx833x_wdt.c
@@ -215,6 +215,7 @@ static const struct file_operations pnx833x_wdt_fops = {
.llseek = no_llseek,
.write = pnx833x_wdt_write,
.unlocked_ioctl = pnx833x_wdt_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.open = pnx833x_wdt_open,
.release = pnx833x_wdt_release,
};
diff --git a/drivers/watchdog/rc32434_wdt.c b/drivers/watchdog/rc32434_wdt.c
index a8a4b3a41a90..1dfede0abf18 100644
--- a/drivers/watchdog/rc32434_wdt.c
+++ b/drivers/watchdog/rc32434_wdt.c
@@ -245,6 +245,7 @@ static const struct file_operations rc32434_wdt_fops = {
.llseek = no_llseek,
.write = rc32434_wdt_write,
.unlocked_ioctl = rc32434_wdt_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.open = rc32434_wdt_open,
.release = rc32434_wdt_release,
};
diff --git a/drivers/watchdog/rdc321x_wdt.c b/drivers/watchdog/rdc321x_wdt.c
index 2e608ae6cbc7..57187efeb86f 100644
--- a/drivers/watchdog/rdc321x_wdt.c
+++ b/drivers/watchdog/rdc321x_wdt.c
@@ -199,6 +199,7 @@ static const struct file_operations rdc321x_wdt_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.unlocked_ioctl = rdc321x_wdt_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.open = rdc321x_wdt_open,
.write = rdc321x_wdt_write,
.release = rdc321x_wdt_release,
diff --git a/drivers/watchdog/riowd.c b/drivers/watchdog/riowd.c
index b35f7be20c00..dc3c06a92f93 100644
--- a/drivers/watchdog/riowd.c
+++ b/drivers/watchdog/riowd.c
@@ -163,6 +163,7 @@ static const struct file_operations riowd_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.unlocked_ioctl = riowd_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.open = riowd_open,
.write = riowd_write,
.release = riowd_release,
diff --git a/drivers/watchdog/sa1100_wdt.c b/drivers/watchdog/sa1100_wdt.c
index cbd8c957182f..9b93be00109f 100644
--- a/drivers/watchdog/sa1100_wdt.c
+++ b/drivers/watchdog/sa1100_wdt.c
@@ -141,6 +141,7 @@ static const struct file_operations sa1100dog_fops = {
.llseek = no_llseek,
.write = sa1100dog_write,
.unlocked_ioctl = sa1100dog_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.open = sa1100dog_open,
.release = sa1100dog_release,
};
diff --git a/drivers/watchdog/sb_wdog.c b/drivers/watchdog/sb_wdog.c
index 202fc8d8ca5f..da2dad00d473 100644
--- a/drivers/watchdog/sb_wdog.c
+++ b/drivers/watchdog/sb_wdog.c
@@ -237,6 +237,7 @@ static const struct file_operations sbwdog_fops = {
.llseek = no_llseek,
.write = sbwdog_write,
.unlocked_ioctl = sbwdog_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.open = sbwdog_open,
.release = sbwdog_release,
};
diff --git a/drivers/watchdog/sbc60xxwdt.c b/drivers/watchdog/sbc60xxwdt.c
index c3151642694c..f2cbe6d880a8 100644
--- a/drivers/watchdog/sbc60xxwdt.c
+++ b/drivers/watchdog/sbc60xxwdt.c
@@ -280,6 +280,7 @@ static const struct file_operations wdt_fops = {
.open = fop_open,
.release = fop_close,
.unlocked_ioctl = fop_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
};
static struct miscdevice wdt_miscdev = {
diff --git a/drivers/watchdog/sbc7240_wdt.c b/drivers/watchdog/sbc7240_wdt.c
index 12cdee7d5069..520b8dd77ed4 100644
--- a/drivers/watchdog/sbc7240_wdt.c
+++ b/drivers/watchdog/sbc7240_wdt.c
@@ -194,9 +194,8 @@ static long fop_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
if (wdt_set_timeout(new_timeout))
return -EINVAL;
-
- /* Fall through */
}
+ /* Fall through */
case WDIOC_GETTIMEOUT:
return put_user(timeout, (int __user *)arg);
default:
@@ -211,6 +210,7 @@ static const struct file_operations wdt_fops = {
.open = fop_open,
.release = fop_close,
.unlocked_ioctl = fop_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
};
static struct miscdevice wdt_miscdev = {
diff --git a/drivers/watchdog/sbc_epx_c3.c b/drivers/watchdog/sbc_epx_c3.c
index 86828c28843f..5e3a9ddb952e 100644
--- a/drivers/watchdog/sbc_epx_c3.c
+++ b/drivers/watchdog/sbc_epx_c3.c
@@ -156,6 +156,7 @@ static const struct file_operations epx_c3_fops = {
.llseek = no_llseek,
.write = epx_c3_write,
.unlocked_ioctl = epx_c3_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.open = epx_c3_open,
.release = epx_c3_release,
};
diff --git a/drivers/watchdog/sbc_fitpc2_wdt.c b/drivers/watchdog/sbc_fitpc2_wdt.c
index 3822a60a8d2b..1b20b33879c4 100644
--- a/drivers/watchdog/sbc_fitpc2_wdt.c
+++ b/drivers/watchdog/sbc_fitpc2_wdt.c
@@ -186,6 +186,7 @@ static const struct file_operations fitpc2_wdt_fops = {
.llseek = no_llseek,
.write = fitpc2_wdt_write,
.unlocked_ioctl = fitpc2_wdt_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.open = fitpc2_wdt_open,
.release = fitpc2_wdt_release,
};
diff --git a/drivers/watchdog/sc1200wdt.c b/drivers/watchdog/sc1200wdt.c
index 960385a766b3..9673eb12dacd 100644
--- a/drivers/watchdog/sc1200wdt.c
+++ b/drivers/watchdog/sc1200wdt.c
@@ -307,6 +307,7 @@ static const struct file_operations sc1200wdt_fops = {
.llseek = no_llseek,
.write = sc1200wdt_write,
.unlocked_ioctl = sc1200wdt_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.open = sc1200wdt_open,
.release = sc1200wdt_release,
};
diff --git a/drivers/watchdog/sc520_wdt.c b/drivers/watchdog/sc520_wdt.c
index a612128c5f80..fbe79bcc9297 100644
--- a/drivers/watchdog/sc520_wdt.c
+++ b/drivers/watchdog/sc520_wdt.c
@@ -336,6 +336,7 @@ static const struct file_operations wdt_fops = {
.open = fop_open,
.release = fop_close,
.unlocked_ioctl = fop_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
};
static struct miscdevice wdt_miscdev = {
diff --git a/drivers/watchdog/sch311x_wdt.c b/drivers/watchdog/sch311x_wdt.c
index 3612f1df381b..83949a385f62 100644
--- a/drivers/watchdog/sch311x_wdt.c
+++ b/drivers/watchdog/sch311x_wdt.c
@@ -337,6 +337,7 @@ static const struct file_operations sch311x_wdt_fops = {
.llseek = no_llseek,
.write = sch311x_wdt_write,
.unlocked_ioctl = sch311x_wdt_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.open = sch311x_wdt_open,
.release = sch311x_wdt_close,
};
diff --git a/drivers/watchdog/scx200_wdt.c b/drivers/watchdog/scx200_wdt.c
index 46268309ee9b..c94098acb78f 100644
--- a/drivers/watchdog/scx200_wdt.c
+++ b/drivers/watchdog/scx200_wdt.c
@@ -201,6 +201,7 @@ static const struct file_operations scx200_wdt_fops = {
.llseek = no_llseek,
.write = scx200_wdt_write,
.unlocked_ioctl = scx200_wdt_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.open = scx200_wdt_open,
.release = scx200_wdt_release,
};
diff --git a/drivers/watchdog/smsc37b787_wdt.c b/drivers/watchdog/smsc37b787_wdt.c
index f5713030d0f7..43de56acd767 100644
--- a/drivers/watchdog/smsc37b787_wdt.c
+++ b/drivers/watchdog/smsc37b787_wdt.c
@@ -505,6 +505,7 @@ static const struct file_operations wb_smsc_wdt_fops = {
.llseek = no_llseek,
.write = wb_smsc_wdt_write,
.unlocked_ioctl = wb_smsc_wdt_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.open = wb_smsc_wdt_open,
.release = wb_smsc_wdt_release,
};
diff --git a/drivers/watchdog/sprd_wdt.c b/drivers/watchdog/sprd_wdt.c
index 0bb17b046140..65cb55f3916f 100644
--- a/drivers/watchdog/sprd_wdt.c
+++ b/drivers/watchdog/sprd_wdt.c
@@ -327,10 +327,9 @@ static int sprd_wdt_probe(struct platform_device *pdev)
static int __maybe_unused sprd_wdt_pm_suspend(struct device *dev)
{
- struct watchdog_device *wdd = dev_get_drvdata(dev);
struct sprd_wdt *wdt = dev_get_drvdata(dev);
- if (watchdog_active(wdd))
+ if (watchdog_active(&wdt->wdd))
sprd_wdt_stop(&wdt->wdd);
sprd_wdt_disable(wdt);
@@ -339,7 +338,6 @@ static int __maybe_unused sprd_wdt_pm_suspend(struct device *dev)
static int __maybe_unused sprd_wdt_pm_resume(struct device *dev)
{
- struct watchdog_device *wdd = dev_get_drvdata(dev);
struct sprd_wdt *wdt = dev_get_drvdata(dev);
int ret;
@@ -347,7 +345,7 @@ static int __maybe_unused sprd_wdt_pm_resume(struct device *dev)
if (ret)
return ret;
- if (watchdog_active(wdd)) {
+ if (watchdog_active(&wdt->wdd)) {
ret = sprd_wdt_start(&wdt->wdd);
if (ret) {
sprd_wdt_disable(wdt);
diff --git a/drivers/watchdog/w83627hf_wdt.c b/drivers/watchdog/w83627hf_wdt.c
index 38b31e9947aa..fdf533fe0bb2 100644
--- a/drivers/watchdog/w83627hf_wdt.c
+++ b/drivers/watchdog/w83627hf_wdt.c
@@ -49,7 +49,7 @@ static int wdt_cfg_leave = 0xAA;/* key to lock configuration space */
enum chips { w83627hf, w83627s, w83697hf, w83697ug, w83637hf, w83627thf,
w83687thf, w83627ehf, w83627dhg, w83627uhg, w83667hg, w83627dhg_p,
w83667hg_b, nct6775, nct6776, nct6779, nct6791, nct6792, nct6793,
- nct6795, nct6796, nct6102 };
+ nct6795, nct6796, nct6102, nct6116 };
static int timeout; /* in seconds */
module_param(timeout, int, 0);
@@ -94,6 +94,7 @@ MODULE_PARM_DESC(early_disable, "Disable watchdog at boot time (default=0)");
#define NCT6775_ID 0xb4
#define NCT6776_ID 0xc3
#define NCT6102_ID 0xc4
+#define NCT6116_ID 0xd2
#define NCT6779_ID 0xc5
#define NCT6791_ID 0xc8
#define NCT6792_ID 0xc9
@@ -211,6 +212,7 @@ static int w83627hf_init(struct watchdog_device *wdog, enum chips chip)
case nct6795:
case nct6796:
case nct6102:
+ case nct6116:
/*
* These chips have a fixed WDTO# output pin (W83627UHG),
* or support more than one WDTO# output pin.
@@ -417,6 +419,12 @@ static int wdt_find(int addr)
cr_wdt_control = NCT6102D_WDT_CONTROL;
cr_wdt_csr = NCT6102D_WDT_CSR;
break;
+ case NCT6116_ID:
+ ret = nct6102;
+ cr_wdt_timeout = NCT6102D_WDT_TIMEOUT;
+ cr_wdt_control = NCT6102D_WDT_CONTROL;
+ cr_wdt_csr = NCT6102D_WDT_CSR;
+ break;
case 0xff:
ret = -ENODEV;
break;
@@ -482,6 +490,7 @@ static int __init wdt_init(void)
"NCT6795",
"NCT6796",
"NCT6102",
+ "NCT6116",
};
/* Apply system-specific quirks */
diff --git a/drivers/watchdog/w83877f_wdt.c b/drivers/watchdog/w83877f_wdt.c
index 6eb5185d6ea6..6b3b667e6f23 100644
--- a/drivers/watchdog/w83877f_wdt.c
+++ b/drivers/watchdog/w83877f_wdt.c
@@ -304,6 +304,7 @@ static const struct file_operations wdt_fops = {
.open = fop_open,
.release = fop_close,
.unlocked_ioctl = fop_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
};
static struct miscdevice wdt_miscdev = {
diff --git a/drivers/watchdog/w83977f_wdt.c b/drivers/watchdog/w83977f_wdt.c
index 16e9cbe72acc..5212e68c6b01 100644
--- a/drivers/watchdog/w83977f_wdt.c
+++ b/drivers/watchdog/w83977f_wdt.c
@@ -446,6 +446,7 @@ static const struct file_operations wdt_fops = {
.llseek = no_llseek,
.write = wdt_write,
.unlocked_ioctl = wdt_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.open = wdt_open,
.release = wdt_release,
};
diff --git a/drivers/watchdog/wafer5823wdt.c b/drivers/watchdog/wafer5823wdt.c
index 6d2071a0590d..a6925847f76f 100644
--- a/drivers/watchdog/wafer5823wdt.c
+++ b/drivers/watchdog/wafer5823wdt.c
@@ -230,6 +230,7 @@ static const struct file_operations wafwdt_fops = {
.llseek = no_llseek,
.write = wafwdt_write,
.unlocked_ioctl = wafwdt_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.open = wafwdt_open,
.release = wafwdt_close,
};
diff --git a/drivers/watchdog/watchdog_dev.c b/drivers/watchdog/watchdog_dev.c
index dbd2ad4c9294..4b2a85438478 100644
--- a/drivers/watchdog/watchdog_dev.c
+++ b/drivers/watchdog/watchdog_dev.c
@@ -34,7 +34,6 @@
#include <linux/init.h> /* For __init/__exit/... */
#include <linux/hrtimer.h> /* For hrtimers */
#include <linux/kernel.h> /* For printk/panic/... */
-#include <linux/kref.h> /* For data references */
#include <linux/kthread.h> /* For kthread_work */
#include <linux/miscdevice.h> /* For handling misc devices */
#include <linux/module.h> /* For module stuff/... */
@@ -52,14 +51,14 @@
/*
* struct watchdog_core_data - watchdog core internal data
- * @kref: Reference count.
+ * @dev: The watchdog's internal device
* @cdev: The watchdog's Character device.
* @wdd: Pointer to watchdog device.
* @lock: Lock for watchdog core.
* @status: Watchdog core internal status bits.
*/
struct watchdog_core_data {
- struct kref kref;
+ struct device dev;
struct cdev cdev;
struct watchdog_device *wdd;
struct mutex lock;
@@ -158,7 +157,8 @@ static inline void watchdog_update_worker(struct watchdog_device *wdd)
ktime_t t = watchdog_next_keepalive(wdd);
if (t > 0)
- hrtimer_start(&wd_data->timer, t, HRTIMER_MODE_REL);
+ hrtimer_start(&wd_data->timer, t,
+ HRTIMER_MODE_REL_HARD);
} else {
hrtimer_cancel(&wd_data->timer);
}
@@ -177,7 +177,7 @@ static int __watchdog_ping(struct watchdog_device *wdd)
if (ktime_after(earliest_keepalive, now)) {
hrtimer_start(&wd_data->timer,
ktime_sub(earliest_keepalive, now),
- HRTIMER_MODE_REL);
+ HRTIMER_MODE_REL_HARD);
return 0;
}
@@ -452,7 +452,26 @@ static ssize_t nowayout_show(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%d\n", !!test_bit(WDOG_NO_WAY_OUT, &wdd->status));
}
-static DEVICE_ATTR_RO(nowayout);
+
+static ssize_t nowayout_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct watchdog_device *wdd = dev_get_drvdata(dev);
+ unsigned int value;
+ int ret;
+
+ ret = kstrtouint(buf, 0, &value);
+ if (ret)
+ return ret;
+ if (value > 1)
+ return -EINVAL;
+ /* nowayout cannot be disabled once set */
+ if (test_bit(WDOG_NO_WAY_OUT, &wdd->status) && !value)
+ return -EPERM;
+ watchdog_set_nowayout(wdd, value);
+ return len;
+}
+static DEVICE_ATTR_RW(nowayout);
static ssize_t status_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -839,7 +858,7 @@ static int watchdog_open(struct inode *inode, struct file *file)
file->private_data = wd_data;
if (!hw_running)
- kref_get(&wd_data->kref);
+ get_device(&wd_data->dev);
/*
* open_timeout only applies for the first open from
@@ -860,11 +879,11 @@ out_clear:
return err;
}
-static void watchdog_core_data_release(struct kref *kref)
+static void watchdog_core_data_release(struct device *dev)
{
struct watchdog_core_data *wd_data;
- wd_data = container_of(kref, struct watchdog_core_data, kref);
+ wd_data = container_of(dev, struct watchdog_core_data, dev);
kfree(wd_data);
}
@@ -924,7 +943,7 @@ done:
*/
if (!running) {
module_put(wd_data->cdev.owner);
- kref_put(&wd_data->kref, watchdog_core_data_release);
+ put_device(&wd_data->dev);
}
return 0;
}
@@ -933,6 +952,7 @@ static const struct file_operations watchdog_fops = {
.owner = THIS_MODULE,
.write = watchdog_write,
.unlocked_ioctl = watchdog_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.open = watchdog_open,
.release = watchdog_release,
};
@@ -943,17 +963,22 @@ static struct miscdevice watchdog_miscdev = {
.fops = &watchdog_fops,
};
+static struct class watchdog_class = {
+ .name = "watchdog",
+ .owner = THIS_MODULE,
+ .dev_groups = wdt_groups,
+};
+
/*
* watchdog_cdev_register: register watchdog character device
* @wdd: watchdog device
- * @devno: character device number
*
* Register a watchdog character device including handling the legacy
* /dev/watchdog node. /dev/watchdog is actually a miscdevice and
* thus we set it up like that.
*/
-static int watchdog_cdev_register(struct watchdog_device *wdd, dev_t devno)
+static int watchdog_cdev_register(struct watchdog_device *wdd)
{
struct watchdog_core_data *wd_data;
int err;
@@ -961,7 +986,6 @@ static int watchdog_cdev_register(struct watchdog_device *wdd, dev_t devno)
wd_data = kzalloc(sizeof(struct watchdog_core_data), GFP_KERNEL);
if (!wd_data)
return -ENOMEM;
- kref_init(&wd_data->kref);
mutex_init(&wd_data->lock);
wd_data->wdd = wdd;
@@ -971,7 +995,7 @@ static int watchdog_cdev_register(struct watchdog_device *wdd, dev_t devno)
return -ENODEV;
kthread_init_work(&wd_data->work, watchdog_ping_work);
- hrtimer_init(&wd_data->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ hrtimer_init(&wd_data->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
wd_data->timer.function = watchdog_timer_expired;
if (wdd->id == 0) {
@@ -990,23 +1014,33 @@ static int watchdog_cdev_register(struct watchdog_device *wdd, dev_t devno)
}
}
+ device_initialize(&wd_data->dev);
+ wd_data->dev.devt = MKDEV(MAJOR(watchdog_devt), wdd->id);
+ wd_data->dev.class = &watchdog_class;
+ wd_data->dev.parent = wdd->parent;
+ wd_data->dev.groups = wdd->groups;
+ wd_data->dev.release = watchdog_core_data_release;
+ dev_set_drvdata(&wd_data->dev, wdd);
+ dev_set_name(&wd_data->dev, "watchdog%d", wdd->id);
+
/* Fill in the data structures */
cdev_init(&wd_data->cdev, &watchdog_fops);
- wd_data->cdev.owner = wdd->ops->owner;
/* Add the device */
- err = cdev_add(&wd_data->cdev, devno, 1);
+ err = cdev_device_add(&wd_data->cdev, &wd_data->dev);
if (err) {
pr_err("watchdog%d unable to add device %d:%d\n",
wdd->id, MAJOR(watchdog_devt), wdd->id);
if (wdd->id == 0) {
misc_deregister(&watchdog_miscdev);
old_wd_data = NULL;
- kref_put(&wd_data->kref, watchdog_core_data_release);
+ put_device(&wd_data->dev);
}
return err;
}
+ wd_data->cdev.owner = wdd->ops->owner;
+
/* Record time of most recent heartbeat as 'just before now'. */
wd_data->last_hw_keepalive = ktime_sub(ktime_get(), 1);
watchdog_set_open_deadline(wd_data);
@@ -1017,9 +1051,10 @@ static int watchdog_cdev_register(struct watchdog_device *wdd, dev_t devno)
*/
if (watchdog_hw_running(wdd)) {
__module_get(wdd->ops->owner);
- kref_get(&wd_data->kref);
+ get_device(&wd_data->dev);
if (handle_boot_enabled)
- hrtimer_start(&wd_data->timer, 0, HRTIMER_MODE_REL);
+ hrtimer_start(&wd_data->timer, 0,
+ HRTIMER_MODE_REL_HARD);
else
pr_info("watchdog%d running and kernel based pre-userspace handler disabled\n",
wdd->id);
@@ -1040,7 +1075,7 @@ static void watchdog_cdev_unregister(struct watchdog_device *wdd)
{
struct watchdog_core_data *wd_data = wdd->wd_data;
- cdev_del(&wd_data->cdev);
+ cdev_device_del(&wd_data->cdev, &wd_data->dev);
if (wdd->id == 0) {
misc_deregister(&watchdog_miscdev);
old_wd_data = NULL;
@@ -1059,15 +1094,9 @@ static void watchdog_cdev_unregister(struct watchdog_device *wdd)
hrtimer_cancel(&wd_data->timer);
kthread_cancel_work_sync(&wd_data->work);
- kref_put(&wd_data->kref, watchdog_core_data_release);
+ put_device(&wd_data->dev);
}
-static struct class watchdog_class = {
- .name = "watchdog",
- .owner = THIS_MODULE,
- .dev_groups = wdt_groups,
-};
-
static int watchdog_reboot_notifier(struct notifier_block *nb,
unsigned long code, void *data)
{
@@ -1098,27 +1127,14 @@ static int watchdog_reboot_notifier(struct notifier_block *nb,
int watchdog_dev_register(struct watchdog_device *wdd)
{
- struct device *dev;
- dev_t devno;
int ret;
- devno = MKDEV(MAJOR(watchdog_devt), wdd->id);
-
- ret = watchdog_cdev_register(wdd, devno);
+ ret = watchdog_cdev_register(wdd);
if (ret)
return ret;
- dev = device_create_with_groups(&watchdog_class, wdd->parent,
- devno, wdd, wdd->groups,
- "watchdog%d", wdd->id);
- if (IS_ERR(dev)) {
- watchdog_cdev_unregister(wdd);
- return PTR_ERR(dev);
- }
-
ret = watchdog_register_pretimeout(wdd);
if (ret) {
- device_destroy(&watchdog_class, devno);
watchdog_cdev_unregister(wdd);
return ret;
}
@@ -1126,7 +1142,8 @@ int watchdog_dev_register(struct watchdog_device *wdd)
if (test_bit(WDOG_STOP_ON_REBOOT, &wdd->status)) {
wdd->reboot_nb.notifier_call = watchdog_reboot_notifier;
- ret = devm_register_reboot_notifier(dev, &wdd->reboot_nb);
+ ret = devm_register_reboot_notifier(&wdd->wd_data->dev,
+ &wdd->reboot_nb);
if (ret) {
pr_err("watchdog%d: Cannot register reboot notifier (%d)\n",
wdd->id, ret);
@@ -1148,7 +1165,6 @@ int watchdog_dev_register(struct watchdog_device *wdd)
void watchdog_dev_unregister(struct watchdog_device *wdd)
{
watchdog_unregister_pretimeout(wdd);
- device_destroy(&watchdog_class, wdd->wd_data->cdev.dev);
watchdog_cdev_unregister(wdd);
}
diff --git a/drivers/watchdog/wdat_wdt.c b/drivers/watchdog/wdat_wdt.c
index e7cf41aa26c3..b069349b52f5 100644
--- a/drivers/watchdog/wdat_wdt.c
+++ b/drivers/watchdog/wdat_wdt.c
@@ -202,7 +202,7 @@ static int wdat_wdt_enable_reboot(struct wdat_wdt *wdat)
* WDAT specification says that the watchdog is required to reboot
* the system when it fires. However, it also states that it is
* recommeded to make it configurable through hardware register. We
- * enable reboot now if it is configrable, just in case.
+ * enable reboot now if it is configurable, just in case.
*/
ret = wdat_wdt_run_action(wdat, ACPI_WDAT_SET_REBOOT, 0, NULL);
if (ret && ret != -EOPNOTSUPP) {
diff --git a/drivers/watchdog/wdrtas.c b/drivers/watchdog/wdrtas.c
index 6ad7edb4a712..184a06a74f83 100644
--- a/drivers/watchdog/wdrtas.c
+++ b/drivers/watchdog/wdrtas.c
@@ -472,6 +472,7 @@ static const struct file_operations wdrtas_fops = {
.llseek = no_llseek,
.write = wdrtas_write,
.unlocked_ioctl = wdrtas_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.open = wdrtas_open,
.release = wdrtas_close,
};
diff --git a/drivers/watchdog/wdt.c b/drivers/watchdog/wdt.c
index 7d278b37e083..f9054cb0f8e2 100644
--- a/drivers/watchdog/wdt.c
+++ b/drivers/watchdog/wdt.c
@@ -523,6 +523,7 @@ static const struct file_operations wdt_fops = {
.llseek = no_llseek,
.write = wdt_write,
.unlocked_ioctl = wdt_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.open = wdt_open,
.release = wdt_release,
};
diff --git a/drivers/watchdog/wdt285.c b/drivers/watchdog/wdt285.c
index eb729d704836..e60993d0767e 100644
--- a/drivers/watchdog/wdt285.c
+++ b/drivers/watchdog/wdt285.c
@@ -181,6 +181,7 @@ static const struct file_operations watchdog_fops = {
.llseek = no_llseek,
.write = watchdog_write,
.unlocked_ioctl = watchdog_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.open = watchdog_open,
.release = watchdog_release,
};
diff --git a/drivers/watchdog/wdt977.c b/drivers/watchdog/wdt977.c
index 5c52c73e1839..066a4fb4d75b 100644
--- a/drivers/watchdog/wdt977.c
+++ b/drivers/watchdog/wdt977.c
@@ -422,6 +422,7 @@ static const struct file_operations wdt977_fops = {
.llseek = no_llseek,
.write = wdt977_write,
.unlocked_ioctl = wdt977_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.open = wdt977_open,
.release = wdt977_release,
};
diff --git a/drivers/watchdog/wdt_pci.c b/drivers/watchdog/wdt_pci.c
index 66303ab95685..e528024faa41 100644
--- a/drivers/watchdog/wdt_pci.c
+++ b/drivers/watchdog/wdt_pci.c
@@ -566,6 +566,7 @@ static const struct file_operations wdtpci_fops = {
.llseek = no_llseek,
.write = wdtpci_write,
.unlocked_ioctl = wdtpci_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.open = wdtpci_open,
.release = wdtpci_release,
};
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index 5bae515c8e25..4f2e78a5e4db 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -374,7 +374,6 @@ static void xen_online_page(struct page *page, unsigned int order)
mutex_lock(&balloon_mutex);
for (i = 0; i < size; i++) {
p = pfn_to_page(start_pfn + i);
- __online_page_set_limits(p);
balloon_append(p);
}
mutex_unlock(&balloon_mutex);
diff --git a/drivers/xen/gntdev-common.h b/drivers/xen/gntdev-common.h
index 2f8b949c3eeb..91e44c04f787 100644
--- a/drivers/xen/gntdev-common.h
+++ b/drivers/xen/gntdev-common.h
@@ -21,15 +21,8 @@ struct gntdev_dmabuf_priv;
struct gntdev_priv {
/* Maps with visible offsets in the file descriptor. */
struct list_head maps;
- /*
- * Maps that are not visible; will be freed on munmap.
- * Only populated if populate_freeable_maps == 1
- */
- struct list_head freeable_maps;
/* lock protects maps and freeable_maps. */
struct mutex lock;
- struct mm_struct *mm;
- struct mmu_notifier mn;
#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
/* Device for which DMA memory is allocated. */
@@ -49,6 +42,7 @@ struct gntdev_unmap_notify {
};
struct gntdev_grant_map {
+ struct mmu_interval_notifier notifier;
struct list_head next;
struct vm_area_struct *vma;
int index;
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index 81401f386c9c..a04ddf2a68af 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -63,7 +63,6 @@ MODULE_PARM_DESC(limit, "Maximum number of grants that may be mapped by "
static atomic_t pages_mapped = ATOMIC_INIT(0);
static int use_ptemod;
-#define populate_freeable_maps use_ptemod
static int unmap_grant_pages(struct gntdev_grant_map *map,
int offset, int pages);
@@ -249,12 +248,6 @@ void gntdev_put_map(struct gntdev_priv *priv, struct gntdev_grant_map *map)
evtchn_put(map->notify.event);
}
- if (populate_freeable_maps && priv) {
- mutex_lock(&priv->lock);
- list_del(&map->next);
- mutex_unlock(&priv->lock);
- }
-
if (map->pages && !use_ptemod)
unmap_grant_pages(map, 0, map->count);
gntdev_free_map(map);
@@ -444,16 +437,9 @@ static void gntdev_vma_close(struct vm_area_struct *vma)
pr_debug("gntdev_vma_close %p\n", vma);
if (use_ptemod) {
- /* It is possible that an mmu notifier could be running
- * concurrently, so take priv->lock to ensure that the vma won't
- * vanishing during the unmap_grant_pages call, since we will
- * spin here until that completes. Such a concurrent call will
- * not do any unmapping, since that has been done prior to
- * closing the vma, but it may still iterate the unmap_ops list.
- */
- mutex_lock(&priv->lock);
+ WARN_ON(map->vma != vma);
+ mmu_interval_notifier_remove(&map->notifier);
map->vma = NULL;
- mutex_unlock(&priv->lock);
}
vma->vm_private_data = NULL;
gntdev_put_map(priv, map);
@@ -475,109 +461,44 @@ static const struct vm_operations_struct gntdev_vmops = {
/* ------------------------------------------------------------------ */
-static bool in_range(struct gntdev_grant_map *map,
- unsigned long start, unsigned long end)
-{
- if (!map->vma)
- return false;
- if (map->vma->vm_start >= end)
- return false;
- if (map->vma->vm_end <= start)
- return false;
-
- return true;
-}
-
-static int unmap_if_in_range(struct gntdev_grant_map *map,
- unsigned long start, unsigned long end,
- bool blockable)
+static bool gntdev_invalidate(struct mmu_interval_notifier *mn,
+ const struct mmu_notifier_range *range,
+ unsigned long cur_seq)
{
+ struct gntdev_grant_map *map =
+ container_of(mn, struct gntdev_grant_map, notifier);
unsigned long mstart, mend;
int err;
- if (!in_range(map, start, end))
- return 0;
+ if (!mmu_notifier_range_blockable(range))
+ return false;
- if (!blockable)
- return -EAGAIN;
+ /*
+ * If the VMA is split or otherwise changed the notifier is not
+ * updated, but we don't want to process VA's outside the modified
+ * VMA. FIXME: It would be much more understandable to just prevent
+ * modifying the VMA in the first place.
+ */
+ if (map->vma->vm_start >= range->end ||
+ map->vma->vm_end <= range->start)
+ return true;
- mstart = max(start, map->vma->vm_start);
- mend = min(end, map->vma->vm_end);
+ mstart = max(range->start, map->vma->vm_start);
+ mend = min(range->end, map->vma->vm_end);
pr_debug("map %d+%d (%lx %lx), range %lx %lx, mrange %lx %lx\n",
map->index, map->count,
map->vma->vm_start, map->vma->vm_end,
- start, end, mstart, mend);
+ range->start, range->end, mstart, mend);
err = unmap_grant_pages(map,
(mstart - map->vma->vm_start) >> PAGE_SHIFT,
(mend - mstart) >> PAGE_SHIFT);
WARN_ON(err);
- return 0;
-}
-
-static int mn_invl_range_start(struct mmu_notifier *mn,
- const struct mmu_notifier_range *range)
-{
- struct gntdev_priv *priv = container_of(mn, struct gntdev_priv, mn);
- struct gntdev_grant_map *map;
- int ret = 0;
-
- if (mmu_notifier_range_blockable(range))
- mutex_lock(&priv->lock);
- else if (!mutex_trylock(&priv->lock))
- return -EAGAIN;
-
- list_for_each_entry(map, &priv->maps, next) {
- ret = unmap_if_in_range(map, range->start, range->end,
- mmu_notifier_range_blockable(range));
- if (ret)
- goto out_unlock;
- }
- list_for_each_entry(map, &priv->freeable_maps, next) {
- ret = unmap_if_in_range(map, range->start, range->end,
- mmu_notifier_range_blockable(range));
- if (ret)
- goto out_unlock;
- }
-
-out_unlock:
- mutex_unlock(&priv->lock);
-
- return ret;
-}
-
-static void mn_release(struct mmu_notifier *mn,
- struct mm_struct *mm)
-{
- struct gntdev_priv *priv = container_of(mn, struct gntdev_priv, mn);
- struct gntdev_grant_map *map;
- int err;
-
- mutex_lock(&priv->lock);
- list_for_each_entry(map, &priv->maps, next) {
- if (!map->vma)
- continue;
- pr_debug("map %d+%d (%lx %lx)\n",
- map->index, map->count,
- map->vma->vm_start, map->vma->vm_end);
- err = unmap_grant_pages(map, /* offset */ 0, map->count);
- WARN_ON(err);
- }
- list_for_each_entry(map, &priv->freeable_maps, next) {
- if (!map->vma)
- continue;
- pr_debug("map %d+%d (%lx %lx)\n",
- map->index, map->count,
- map->vma->vm_start, map->vma->vm_end);
- err = unmap_grant_pages(map, /* offset */ 0, map->count);
- WARN_ON(err);
- }
- mutex_unlock(&priv->lock);
+ return true;
}
-static const struct mmu_notifier_ops gntdev_mmu_ops = {
- .release = mn_release,
- .invalidate_range_start = mn_invl_range_start,
+static const struct mmu_interval_notifier_ops gntdev_mmu_ops = {
+ .invalidate = gntdev_invalidate,
};
/* ------------------------------------------------------------------ */
@@ -592,7 +513,6 @@ static int gntdev_open(struct inode *inode, struct file *flip)
return -ENOMEM;
INIT_LIST_HEAD(&priv->maps);
- INIT_LIST_HEAD(&priv->freeable_maps);
mutex_init(&priv->lock);
#ifdef CONFIG_XEN_GNTDEV_DMABUF
@@ -604,17 +524,6 @@ static int gntdev_open(struct inode *inode, struct file *flip)
}
#endif
- if (use_ptemod) {
- priv->mm = get_task_mm(current);
- if (!priv->mm) {
- kfree(priv);
- return -ENOMEM;
- }
- priv->mn.ops = &gntdev_mmu_ops;
- ret = mmu_notifier_register(&priv->mn, priv->mm);
- mmput(priv->mm);
- }
-
if (ret) {
kfree(priv);
return ret;
@@ -644,16 +553,12 @@ static int gntdev_release(struct inode *inode, struct file *flip)
list_del(&map->next);
gntdev_put_map(NULL /* already removed */, map);
}
- WARN_ON(!list_empty(&priv->freeable_maps));
mutex_unlock(&priv->lock);
#ifdef CONFIG_XEN_GNTDEV_DMABUF
gntdev_dmabuf_fini(priv->dmabuf_priv);
#endif
- if (use_ptemod)
- mmu_notifier_unregister(&priv->mn, priv->mm);
-
kfree(priv);
return 0;
}
@@ -714,8 +619,6 @@ static long gntdev_ioctl_unmap_grant_ref(struct gntdev_priv *priv,
map = gntdev_find_map_index(priv, op.index >> PAGE_SHIFT, op.count);
if (map) {
list_del(&map->next);
- if (populate_freeable_maps)
- list_add_tail(&map->next, &priv->freeable_maps);
err = 0;
}
mutex_unlock(&priv->lock);
@@ -1087,11 +990,6 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
goto unlock_out;
if (use_ptemod && map->vma)
goto unlock_out;
- if (use_ptemod && priv->mm != vma->vm_mm) {
- pr_warn("Huh? Other mm?\n");
- goto unlock_out;
- }
-
refcount_inc(&map->users);
vma->vm_ops = &gntdev_vmops;
@@ -1102,10 +1000,6 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
vma->vm_flags |= VM_DONTCOPY;
vma->vm_private_data = map;
-
- if (use_ptemod)
- map->vma = vma;
-
if (map->flags) {
if ((vma->vm_flags & VM_WRITE) &&
(map->flags & GNTMAP_readonly))
@@ -1116,8 +1010,28 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
map->flags |= GNTMAP_readonly;
}
+ if (use_ptemod) {
+ map->vma = vma;
+ err = mmu_interval_notifier_insert_locked(
+ &map->notifier, vma->vm_mm, vma->vm_start,
+ vma->vm_end - vma->vm_start, &gntdev_mmu_ops);
+ if (err)
+ goto out_unlock_put;
+ }
mutex_unlock(&priv->lock);
+ /*
+ * gntdev takes the address of the PTE in find_grant_ptes() and passes
+ * it to the hypervisor in gntdev_map_grant_pages(). The purpose of
+ * the notifier is to prevent the hypervisor pointer to the PTE from
+ * going stale.
+ *
+ * Since this vma's mappings can't be touched without the mmap_sem,
+ * and we are holding it now, there is no need for the notifier_range
+ * locking pattern.
+ */
+ mmu_interval_read_begin(&map->notifier);
+
if (use_ptemod) {
map->pages_vm_start = vma->vm_start;
err = apply_to_page_range(vma->vm_mm, vma->vm_start,
@@ -1166,8 +1080,11 @@ out_unlock_put:
mutex_unlock(&priv->lock);
out_put_map:
if (use_ptemod) {
- map->vma = NULL;
unmap_grant_pages(map, 0, map->count);
+ if (map->vma) {
+ mmu_interval_notifier_remove(&map->notifier);
+ map->vma = NULL;
+ }
}
gntdev_put_map(priv, map);
return err;
diff --git a/drivers/xen/platform-pci.c b/drivers/xen/platform-pci.c
index 5e30602fdbad..59e85e408c23 100644
--- a/drivers/xen/platform-pci.c
+++ b/drivers/xen/platform-pci.c
@@ -74,7 +74,7 @@ static int xen_allocate_irq(struct pci_dev *pdev)
"xen-platform-pci", pdev);
}
-static int platform_pci_resume(struct pci_dev *pdev)
+static int platform_pci_resume(struct device *dev)
{
int err;
@@ -83,7 +83,7 @@ static int platform_pci_resume(struct pci_dev *pdev)
err = xen_set_callback_via(callback_via);
if (err) {
- dev_err(&pdev->dev, "platform_pci_resume failure!\n");
+ dev_err(dev, "platform_pci_resume failure!\n");
return err;
}
return 0;
@@ -168,13 +168,17 @@ static const struct pci_device_id platform_pci_tbl[] = {
{0,}
};
+static struct dev_pm_ops platform_pm_ops = {
+ .resume_noirq = platform_pci_resume,
+};
+
static struct pci_driver platform_driver = {
.name = DRV_NAME,
.probe = platform_pci_probe,
.id_table = platform_pci_tbl,
-#ifdef CONFIG_PM
- .resume_early = platform_pci_resume,
-#endif
+ .driver = {
+ .pm = &platform_pm_ops,
+ },
};
builtin_pci_driver(platform_driver);
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index bd3a10dfac15..b6d27762c6f8 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -375,7 +375,7 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
* we can safely return the device addr and not worry about bounce
* buffering it.
*/
- if (dma_capable(dev, dev_addr, size) &&
+ if (dma_capable(dev, dev_addr, size, true) &&
!range_straddles_page_boundary(phys, size) &&
!xen_arch_need_swiotlb(dev, phys, dev_addr) &&
swiotlb_force != SWIOTLB_FORCE)
@@ -397,7 +397,7 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
/*
* Ensure that the address returned is DMA'ble
*/
- if (unlikely(!dma_capable(dev, dev_addr, size))) {
+ if (unlikely(!dma_capable(dev, dev_addr, size, true))) {
swiotlb_tbl_unmap_single(dev, map, size, size, dir,
attrs | DMA_ATTR_SKIP_CPU_SYNC);
return DMA_MAPPING_ERROR;
@@ -405,7 +405,7 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
done:
if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
- xen_dma_sync_for_device(dev, dev_addr, phys, size, dir);
+ xen_dma_sync_for_device(dev_addr, phys, size, dir);
return dev_addr;
}
@@ -425,7 +425,7 @@ static void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
BUG_ON(dir == DMA_NONE);
if (!dev_is_dma_coherent(hwdev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
- xen_dma_sync_for_cpu(hwdev, dev_addr, paddr, size, dir);
+ xen_dma_sync_for_cpu(dev_addr, paddr, size, dir);
/* NOTE: We use dev_addr here, not paddr! */
if (is_xen_swiotlb_buffer(dev_addr))
@@ -439,7 +439,7 @@ xen_swiotlb_sync_single_for_cpu(struct device *dev, dma_addr_t dma_addr,
phys_addr_t paddr = xen_bus_to_phys(dma_addr);
if (!dev_is_dma_coherent(dev))
- xen_dma_sync_for_cpu(dev, dma_addr, paddr, size, dir);
+ xen_dma_sync_for_cpu(dma_addr, paddr, size, dir);
if (is_xen_swiotlb_buffer(dma_addr))
swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_CPU);
@@ -455,7 +455,7 @@ xen_swiotlb_sync_single_for_device(struct device *dev, dma_addr_t dma_addr,
swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_DEVICE);
if (!dev_is_dma_coherent(dev))
- xen_dma_sync_for_device(dev, dma_addr, paddr, size, dir);
+ xen_dma_sync_for_device(dma_addr, paddr, size, dir);
}
/*
diff --git a/fs/afs/cmservice.c b/fs/afs/cmservice.c
index b86195e4dc6c..ff3994a6be23 100644
--- a/fs/afs/cmservice.c
+++ b/fs/afs/cmservice.c
@@ -342,14 +342,14 @@ static int afs_deliver_cb_callback(struct afs_call *call)
if (call->count2 != call->count && call->count2 != 0)
return afs_protocol_error(call, -EBADMSG,
afs_eproto_cb_count);
- call->_iter = &call->iter;
- iov_iter_discard(&call->iter, READ, call->count2 * 3 * 4);
+ call->iter = &call->def_iter;
+ iov_iter_discard(&call->def_iter, READ, call->count2 * 3 * 4);
call->unmarshall++;
/* Fall through */
case 4:
_debug("extract discard %zu/%u",
- iov_iter_count(&call->iter), call->count2 * 3 * 4);
+ iov_iter_count(call->iter), call->count2 * 3 * 4);
ret = afs_extract_data(call, false);
if (ret < 0)
diff --git a/fs/afs/dir_edit.c b/fs/afs/dir_edit.c
index d4fbe5f85f1b..b108528bf010 100644
--- a/fs/afs/dir_edit.c
+++ b/fs/afs/dir_edit.c
@@ -68,13 +68,11 @@ static int afs_find_contig_bits(union afs_xdr_dir_block *block, unsigned int nr_
static void afs_set_contig_bits(union afs_xdr_dir_block *block,
int bit, unsigned int nr_slots)
{
- u64 mask, before, after;
+ u64 mask;
mask = (1 << nr_slots) - 1;
mask <<= bit;
- before = *(u64 *)block->hdr.bitmap;
-
block->hdr.bitmap[0] |= (u8)(mask >> 0 * 8);
block->hdr.bitmap[1] |= (u8)(mask >> 1 * 8);
block->hdr.bitmap[2] |= (u8)(mask >> 2 * 8);
@@ -83,8 +81,6 @@ static void afs_set_contig_bits(union afs_xdr_dir_block *block,
block->hdr.bitmap[5] |= (u8)(mask >> 5 * 8);
block->hdr.bitmap[6] |= (u8)(mask >> 6 * 8);
block->hdr.bitmap[7] |= (u8)(mask >> 7 * 8);
-
- after = *(u64 *)block->hdr.bitmap;
}
/*
@@ -93,13 +89,11 @@ static void afs_set_contig_bits(union afs_xdr_dir_block *block,
static void afs_clear_contig_bits(union afs_xdr_dir_block *block,
int bit, unsigned int nr_slots)
{
- u64 mask, before, after;
+ u64 mask;
mask = (1 << nr_slots) - 1;
mask <<= bit;
- before = *(u64 *)block->hdr.bitmap;
-
block->hdr.bitmap[0] &= ~(u8)(mask >> 0 * 8);
block->hdr.bitmap[1] &= ~(u8)(mask >> 1 * 8);
block->hdr.bitmap[2] &= ~(u8)(mask >> 2 * 8);
@@ -108,8 +102,6 @@ static void afs_clear_contig_bits(union afs_xdr_dir_block *block,
block->hdr.bitmap[5] &= ~(u8)(mask >> 5 * 8);
block->hdr.bitmap[6] &= ~(u8)(mask >> 6 * 8);
block->hdr.bitmap[7] &= ~(u8)(mask >> 7 * 8);
-
- after = *(u64 *)block->hdr.bitmap;
}
/*
diff --git a/fs/afs/file.c b/fs/afs/file.c
index dd3c55c9101c..8415733f7bc1 100644
--- a/fs/afs/file.c
+++ b/fs/afs/file.c
@@ -223,7 +223,7 @@ static void afs_file_readpage_read_complete(struct page *page,
/*
* Fetch file data from the volume.
*/
-int afs_fetch_data(struct afs_vnode *vnode, struct key *key, struct afs_read *desc)
+int afs_fetch_data(struct afs_vnode *vnode, struct key *key, struct afs_read *req)
{
struct afs_fs_cursor fc;
struct afs_status_cb *scb;
@@ -246,7 +246,7 @@ int afs_fetch_data(struct afs_vnode *vnode, struct key *key, struct afs_read *de
while (afs_select_fileserver(&fc)) {
fc.cb_break = afs_calc_vnode_cb_break(vnode);
- afs_fs_fetch_data(&fc, scb, desc);
+ afs_fs_fetch_data(&fc, scb, req);
}
afs_check_for_remote_deletion(&fc, vnode);
@@ -257,7 +257,7 @@ int afs_fetch_data(struct afs_vnode *vnode, struct key *key, struct afs_read *de
if (ret == 0) {
afs_stat_v(vnode, n_fetches);
- atomic_long_add(desc->actual_len,
+ atomic_long_add(req->actual_len,
&afs_v2net(vnode)->n_fetch_bytes);
}
diff --git a/fs/afs/fsclient.c b/fs/afs/fsclient.c
index 6f84231f11a5..1f9c5d8e6fe5 100644
--- a/fs/afs/fsclient.c
+++ b/fs/afs/fsclient.c
@@ -323,7 +323,7 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call)
int ret;
_enter("{%u,%zu/%llu}",
- call->unmarshall, iov_iter_count(&call->iter), req->actual_len);
+ call->unmarshall, iov_iter_count(call->iter), req->actual_len);
switch (call->unmarshall) {
case 0:
@@ -363,14 +363,14 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call)
call->bvec[0].bv_len = size;
call->bvec[0].bv_offset = req->offset;
call->bvec[0].bv_page = req->pages[req->index];
- iov_iter_bvec(&call->iter, READ, call->bvec, 1, size);
+ iov_iter_bvec(&call->def_iter, READ, call->bvec, 1, size);
ASSERTCMP(size, <=, PAGE_SIZE);
/* Fall through */
/* extract the returned data */
case 2:
_debug("extract data %zu/%llu",
- iov_iter_count(&call->iter), req->remain);
+ iov_iter_count(call->iter), req->remain);
ret = afs_extract_data(call, true);
if (ret < 0)
@@ -398,7 +398,7 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call)
case 3:
_debug("extract discard %zu/%llu",
- iov_iter_count(&call->iter), req->actual_len - req->len);
+ iov_iter_count(call->iter), req->actual_len - req->len);
ret = afs_extract_data(call, true);
if (ret < 0)
@@ -490,7 +490,7 @@ static int afs_fs_fetch_data64(struct afs_fs_cursor *fc,
call->key = fc->key;
call->out_scb = scb;
call->out_volsync = NULL;
- call->read_request = req;
+ call->read_request = afs_get_read(req);
/* marshall the parameters */
bp = call->request;
@@ -503,7 +503,6 @@ static int afs_fs_fetch_data64(struct afs_fs_cursor *fc,
bp[6] = 0;
bp[7] = htonl(lower_32_bits(req->len));
- refcount_inc(&req->usage);
afs_use_fs_server(call, fc->cbi);
trace_afs_make_fs_call(call, &vnode->fid);
afs_set_fc_call(call, fc);
@@ -540,7 +539,7 @@ int afs_fs_fetch_data(struct afs_fs_cursor *fc,
call->key = fc->key;
call->out_scb = scb;
call->out_volsync = NULL;
- call->read_request = req;
+ call->read_request = afs_get_read(req);
/* marshall the parameters */
bp = call->request;
@@ -551,7 +550,6 @@ int afs_fs_fetch_data(struct afs_fs_cursor *fc,
bp[4] = htonl(lower_32_bits(req->pos));
bp[5] = htonl(lower_32_bits(req->len));
- refcount_inc(&req->usage);
afs_use_fs_server(call, fc->cbi);
trace_afs_make_fs_call(call, &vnode->fid);
afs_set_fc_call(call, fc);
@@ -1852,7 +1850,7 @@ static int afs_deliver_fs_get_capabilities(struct afs_call *call)
u32 count;
int ret;
- _enter("{%u,%zu}", call->unmarshall, iov_iter_count(&call->iter));
+ _enter("{%u,%zu}", call->unmarshall, iov_iter_count(call->iter));
switch (call->unmarshall) {
case 0:
diff --git a/fs/afs/internal.h b/fs/afs/internal.h
index 759e0578012c..1d81fc4c3058 100644
--- a/fs/afs/internal.h
+++ b/fs/afs/internal.h
@@ -115,9 +115,9 @@ struct afs_call {
struct afs_vnode *lvnode; /* vnode being locked */
void *request; /* request data (first part) */
struct address_space *mapping; /* Pages being written from */
- struct iov_iter iter; /* Buffer iterator */
- struct iov_iter *_iter; /* Iterator currently in use */
- union { /* Convenience for ->iter */
+ struct iov_iter def_iter; /* Default buffer/data iterator */
+ struct iov_iter *iter; /* Iterator currently in use */
+ union { /* Convenience for ->def_iter */
struct kvec kvec[1];
struct bio_vec bvec[1];
};
@@ -934,6 +934,12 @@ extern int afs_fetch_data(struct afs_vnode *, struct key *, struct afs_read *);
extern int afs_page_filler(void *, struct page *);
extern void afs_put_read(struct afs_read *);
+static inline struct afs_read *afs_get_read(struct afs_read *req)
+{
+ refcount_inc(&req->usage);
+ return req;
+}
+
/*
* flock.c
*/
@@ -1136,7 +1142,7 @@ static inline void afs_extract_begin(struct afs_call *call, void *buf, size_t si
{
call->kvec[0].iov_base = buf;
call->kvec[0].iov_len = size;
- iov_iter_kvec(&call->iter, READ, call->kvec, 1, size);
+ iov_iter_kvec(&call->def_iter, READ, call->kvec, 1, size);
}
static inline void afs_extract_to_tmp(struct afs_call *call)
@@ -1151,7 +1157,7 @@ static inline void afs_extract_to_tmp64(struct afs_call *call)
static inline void afs_extract_discard(struct afs_call *call, size_t size)
{
- iov_iter_discard(&call->iter, READ, size);
+ iov_iter_discard(&call->def_iter, READ, size);
}
static inline void afs_extract_to_buf(struct afs_call *call, size_t size)
diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c
index 61498d9f06ef..58d396592250 100644
--- a/fs/afs/rxrpc.c
+++ b/fs/afs/rxrpc.c
@@ -152,7 +152,7 @@ static struct afs_call *afs_alloc_call(struct afs_net *net,
INIT_WORK(&call->async_work, afs_process_async_call);
init_waitqueue_head(&call->waitq);
spin_lock_init(&call->state_lock);
- call->_iter = &call->iter;
+ call->iter = &call->def_iter;
o = atomic_inc_return(&net->nr_outstanding_calls);
trace_afs_call(call, afs_call_trace_alloc, 1, o,
@@ -513,12 +513,12 @@ static void afs_deliver_to_call(struct afs_call *call)
state == AFS_CALL_SV_AWAIT_ACK
) {
if (state == AFS_CALL_SV_AWAIT_ACK) {
- iov_iter_kvec(&call->iter, READ, NULL, 0, 0);
+ iov_iter_kvec(&call->def_iter, READ, NULL, 0, 0);
ret = rxrpc_kernel_recv_data(call->net->socket,
- call->rxcall, &call->iter,
+ call->rxcall, &call->def_iter,
false, &remote_abort,
&call->service_id);
- trace_afs_receive_data(call, &call->iter, false, ret);
+ trace_afs_receive_data(call, &call->def_iter, false, ret);
if (ret == -EINPROGRESS || ret == -EAGAIN)
return;
@@ -859,7 +859,7 @@ static int afs_deliver_cm_op_id(struct afs_call *call)
{
int ret;
- _enter("{%zu}", iov_iter_count(call->_iter));
+ _enter("{%zu}", iov_iter_count(call->iter));
/* the operation ID forms the first four bytes of the request data */
ret = afs_extract_data(call, true);
@@ -975,7 +975,7 @@ void afs_send_simple_reply(struct afs_call *call, const void *buf, size_t len)
int afs_extract_data(struct afs_call *call, bool want_more)
{
struct afs_net *net = call->net;
- struct iov_iter *iter = call->_iter;
+ struct iov_iter *iter = call->iter;
enum afs_call_state state;
u32 remote_abort = 0;
int ret;
diff --git a/fs/afs/server.c b/fs/afs/server.c
index 64d440aaabc0..1686bf188ccd 100644
--- a/fs/afs/server.c
+++ b/fs/afs/server.c
@@ -151,7 +151,7 @@ static struct afs_server *afs_install_server(struct afs_net *net,
const struct afs_addr_list *alist;
struct afs_server *server;
struct rb_node **pp, *p;
- int ret = -EEXIST, diff;
+ int diff;
_enter("%p", candidate);
@@ -196,7 +196,6 @@ static struct afs_server *afs_install_server(struct afs_net *net,
hlist_add_head_rcu(&server->addr6_link, &net->fs_addresses6);
write_sequnlock(&net->fs_addr_lock);
- ret = 0;
exists:
afs_get_server(server, afs_server_trace_get_install);
diff --git a/fs/afs/vlclient.c b/fs/afs/vlclient.c
index cfb0ac4bd039..516e9a3bb5b4 100644
--- a/fs/afs/vlclient.c
+++ b/fs/afs/vlclient.c
@@ -185,7 +185,7 @@ static int afs_deliver_vl_get_addrs_u(struct afs_call *call)
int i, ret;
_enter("{%u,%zu/%u}",
- call->unmarshall, iov_iter_count(call->_iter), call->count);
+ call->unmarshall, iov_iter_count(call->iter), call->count);
switch (call->unmarshall) {
case 0:
@@ -316,7 +316,7 @@ static int afs_deliver_vl_get_capabilities(struct afs_call *call)
int ret;
_enter("{%u,%zu/%u}",
- call->unmarshall, iov_iter_count(call->_iter), call->count);
+ call->unmarshall, iov_iter_count(call->iter), call->count);
switch (call->unmarshall) {
case 0:
@@ -425,7 +425,7 @@ static int afs_deliver_yfsvl_get_endpoints(struct afs_call *call)
int ret;
_enter("{%u,%zu,%u}",
- call->unmarshall, iov_iter_count(call->_iter), call->count2);
+ call->unmarshall, iov_iter_count(call->iter), call->count2);
switch (call->unmarshall) {
case 0:
diff --git a/fs/afs/xattr.c b/fs/afs/xattr.c
index 5552d034090a..7af41fd5f3ee 100644
--- a/fs/afs/xattr.c
+++ b/fs/afs/xattr.c
@@ -228,11 +228,11 @@ static int afs_xattr_get_yfs(const struct xattr_handler *handler,
break;
case 1:
data = buf;
- dsize = snprintf(buf, sizeof(buf), "%u", yacl->inherit_flag);
+ dsize = scnprintf(buf, sizeof(buf), "%u", yacl->inherit_flag);
break;
case 2:
data = buf;
- dsize = snprintf(buf, sizeof(buf), "%u", yacl->num_cleaned);
+ dsize = scnprintf(buf, sizeof(buf), "%u", yacl->num_cleaned);
break;
case 3:
data = yacl->vol_acl->data;
@@ -370,13 +370,15 @@ static int afs_xattr_get_fid(const struct xattr_handler *handler,
/* The volume ID is 64-bit, the vnode ID is 96-bit and the
* uniquifier is 32-bit.
*/
- len = sprintf(text, "%llx:", vnode->fid.vid);
+ len = scnprintf(text, sizeof(text), "%llx:", vnode->fid.vid);
if (vnode->fid.vnode_hi)
- len += sprintf(text + len, "%x%016llx",
- vnode->fid.vnode_hi, vnode->fid.vnode);
+ len += scnprintf(text + len, sizeof(text) - len, "%x%016llx",
+ vnode->fid.vnode_hi, vnode->fid.vnode);
else
- len += sprintf(text + len, "%llx", vnode->fid.vnode);
- len += sprintf(text + len, ":%x", vnode->fid.unique);
+ len += scnprintf(text + len, sizeof(text) - len, "%llx",
+ vnode->fid.vnode);
+ len += scnprintf(text + len, sizeof(text) - len, ":%x",
+ vnode->fid.unique);
if (size == 0)
return len;
diff --git a/fs/afs/yfsclient.c b/fs/afs/yfsclient.c
index 9ac035c17dc4..a26126ac7bf1 100644
--- a/fs/afs/yfsclient.c
+++ b/fs/afs/yfsclient.c
@@ -441,7 +441,7 @@ static int yfs_deliver_fs_fetch_data64(struct afs_call *call)
int ret;
_enter("{%u,%zu/%llu}",
- call->unmarshall, iov_iter_count(&call->iter), req->actual_len);
+ call->unmarshall, iov_iter_count(call->iter), req->actual_len);
switch (call->unmarshall) {
case 0:
@@ -476,14 +476,14 @@ static int yfs_deliver_fs_fetch_data64(struct afs_call *call)
call->bvec[0].bv_len = size;
call->bvec[0].bv_offset = req->offset;
call->bvec[0].bv_page = req->pages[req->index];
- iov_iter_bvec(&call->iter, READ, call->bvec, 1, size);
+ iov_iter_bvec(&call->def_iter, READ, call->bvec, 1, size);
ASSERTCMP(size, <=, PAGE_SIZE);
/* Fall through */
/* extract the returned data */
case 2:
_debug("extract data %zu/%llu",
- iov_iter_count(&call->iter), req->remain);
+ iov_iter_count(call->iter), req->remain);
ret = afs_extract_data(call, true);
if (ret < 0)
@@ -511,7 +511,7 @@ static int yfs_deliver_fs_fetch_data64(struct afs_call *call)
case 3:
_debug("extract discard %zu/%llu",
- iov_iter_count(&call->iter), req->actual_len - req->len);
+ iov_iter_count(call->iter), req->actual_len - req->len);
ret = afs_extract_data(call, true);
if (ret < 0)
@@ -605,7 +605,7 @@ int yfs_fs_fetch_data(struct afs_fs_cursor *fc, struct afs_status_cb *scb,
call->key = fc->key;
call->out_scb = scb;
call->out_volsync = NULL;
- call->read_request = req;
+ call->read_request = afs_get_read(req);
/* marshall the parameters */
bp = call->request;
@@ -616,7 +616,6 @@ int yfs_fs_fetch_data(struct afs_fs_cursor *fc, struct afs_status_cb *scb,
bp = xdr_encode_u64(bp, req->len);
yfs_check_req(call, bp);
- refcount_inc(&req->usage);
afs_use_fs_server(call, fc->cbi);
trace_afs_make_fs_call(call, &vnode->fid);
afs_set_fc_call(call, fc);
diff --git a/fs/aio.c b/fs/aio.c
index 0d9a559d488c..a9fbad2ce5e6 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -2056,7 +2056,7 @@ static long do_io_getevents(aio_context_t ctx_id,
* specifies an infinite timeout. Note that the timeout pointed to by
* timeout is relative. Will fail with -ENOSYS if not implemented.
*/
-#if !defined(CONFIG_64BIT_TIME) || defined(CONFIG_64BIT)
+#ifdef CONFIG_64BIT
SYSCALL_DEFINE5(io_getevents, aio_context_t, ctx_id,
long, min_nr,
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index c5642bcb6b46..5372eabd276a 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -1489,18 +1489,18 @@ static void fill_prstatus(struct elf_prstatus *prstatus,
* group-wide total, not its individual thread total.
*/
thread_group_cputime(p, &cputime);
- prstatus->pr_utime = ns_to_timeval(cputime.utime);
- prstatus->pr_stime = ns_to_timeval(cputime.stime);
+ prstatus->pr_utime = ns_to_kernel_old_timeval(cputime.utime);
+ prstatus->pr_stime = ns_to_kernel_old_timeval(cputime.stime);
} else {
u64 utime, stime;
task_cputime(p, &utime, &stime);
- prstatus->pr_utime = ns_to_timeval(utime);
- prstatus->pr_stime = ns_to_timeval(stime);
+ prstatus->pr_utime = ns_to_kernel_old_timeval(utime);
+ prstatus->pr_stime = ns_to_kernel_old_timeval(stime);
}
- prstatus->pr_cutime = ns_to_timeval(p->signal->cutime);
- prstatus->pr_cstime = ns_to_timeval(p->signal->cstime);
+ prstatus->pr_cutime = ns_to_kernel_old_timeval(p->signal->cutime);
+ prstatus->pr_cstime = ns_to_kernel_old_timeval(p->signal->cstime);
}
static int fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p,
diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c
index d86ebd0dcc3d..240f66663543 100644
--- a/fs/binfmt_elf_fdpic.c
+++ b/fs/binfmt_elf_fdpic.c
@@ -1359,17 +1359,17 @@ static void fill_prstatus(struct elf_prstatus *prstatus,
* group-wide total, not its individual thread total.
*/
thread_group_cputime(p, &cputime);
- prstatus->pr_utime = ns_to_timeval(cputime.utime);
- prstatus->pr_stime = ns_to_timeval(cputime.stime);
+ prstatus->pr_utime = ns_to_kernel_old_timeval(cputime.utime);
+ prstatus->pr_stime = ns_to_kernel_old_timeval(cputime.stime);
} else {
u64 utime, stime;
task_cputime(p, &utime, &stime);
- prstatus->pr_utime = ns_to_timeval(utime);
- prstatus->pr_stime = ns_to_timeval(stime);
+ prstatus->pr_utime = ns_to_kernel_old_timeval(utime);
+ prstatus->pr_stime = ns_to_kernel_old_timeval(stime);
}
- prstatus->pr_cutime = ns_to_timeval(p->signal->cutime);
- prstatus->pr_cstime = ns_to_timeval(p->signal->cstime);
+ prstatus->pr_cutime = ns_to_kernel_old_timeval(p->signal->cutime);
+ prstatus->pr_cstime = ns_to_kernel_old_timeval(p->signal->cstime);
prstatus->pr_exec_fdpic_loadmap = p->mm->context.exec_fdpic_loadmap;
prstatus->pr_interp_fdpic_loadmap = p->mm->context.interp_fdpic_loadmap;
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index a98c3c71fc54..f452a94abdc3 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -2299,7 +2299,7 @@ static const struct super_operations btrfs_super_ops = {
static const struct file_operations btrfs_ctl_fops = {
.open = btrfs_control_open,
.unlocked_ioctl = btrfs_control_ioctl,
- .compat_ioctl = btrfs_control_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.owner = THIS_MODULE,
.llseek = noop_llseek,
};
diff --git a/fs/buffer.c b/fs/buffer.c
index 86a38b979323..d8c7242426bb 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -47,6 +47,9 @@
#include <linux/pagevec.h>
#include <linux/sched/mm.h>
#include <trace/events/block.h>
+#include <linux/fscrypt.h>
+
+#include "internal.h"
static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh,
@@ -246,10 +249,6 @@ out:
return ret;
}
-/*
- * I/O completion handler for block_read_full_page() - pages
- * which come unlocked at the end of I/O.
- */
static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
{
unsigned long flags;
@@ -307,6 +306,47 @@ still_busy:
return;
}
+struct decrypt_bh_ctx {
+ struct work_struct work;
+ struct buffer_head *bh;
+};
+
+static void decrypt_bh(struct work_struct *work)
+{
+ struct decrypt_bh_ctx *ctx =
+ container_of(work, struct decrypt_bh_ctx, work);
+ struct buffer_head *bh = ctx->bh;
+ int err;
+
+ err = fscrypt_decrypt_pagecache_blocks(bh->b_page, bh->b_size,
+ bh_offset(bh));
+ end_buffer_async_read(bh, err == 0);
+ kfree(ctx);
+}
+
+/*
+ * I/O completion handler for block_read_full_page() - pages
+ * which come unlocked at the end of I/O.
+ */
+static void end_buffer_async_read_io(struct buffer_head *bh, int uptodate)
+{
+ /* Decrypt if needed */
+ if (uptodate && IS_ENABLED(CONFIG_FS_ENCRYPTION) &&
+ IS_ENCRYPTED(bh->b_page->mapping->host) &&
+ S_ISREG(bh->b_page->mapping->host->i_mode)) {
+ struct decrypt_bh_ctx *ctx = kmalloc(sizeof(*ctx), GFP_ATOMIC);
+
+ if (ctx) {
+ INIT_WORK(&ctx->work, decrypt_bh);
+ ctx->bh = bh;
+ fscrypt_enqueue_decrypt_work(&ctx->work);
+ return;
+ }
+ uptodate = 0;
+ }
+ end_buffer_async_read(bh, uptodate);
+}
+
/*
* Completion handler for block_write_full_page() - pages which are unlocked
* during I/O, and which have PageWriteback cleared upon I/O completion.
@@ -379,7 +419,7 @@ EXPORT_SYMBOL(end_buffer_async_write);
*/
static void mark_buffer_async_read(struct buffer_head *bh)
{
- bh->b_end_io = end_buffer_async_read;
+ bh->b_end_io = end_buffer_async_read_io;
set_buffer_async_read(bh);
}
@@ -1385,10 +1425,10 @@ static bool has_bh_in_lru(int cpu, void *dummy)
for (i = 0; i < BH_LRU_SIZE; i++) {
if (b->bhs[i])
- return 1;
+ return true;
}
- return 0;
+ return false;
}
void invalidate_bh_lrus(void)
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index d17a789fd856..2e4764fd1872 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -1809,6 +1809,7 @@ const struct file_operations ceph_dir_fops = {
.open = ceph_open,
.release = ceph_release,
.unlocked_ioctl = ceph_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.fsync = ceph_fsync,
.lock = ceph_lock,
.flock = ceph_flock,
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index 8de633964dc3..11929d2bb594 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -2188,7 +2188,7 @@ const struct file_operations ceph_file_fops = {
.splice_read = generic_file_splice_read,
.splice_write = iter_file_splice_write,
.unlocked_ioctl = ceph_ioctl,
- .compat_ioctl = ceph_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.fallocate = ceph_fallocate,
.copy_file_range = ceph_copy_file_range,
};
diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
index 0b4eee3bed66..19f6e592b941 100644
--- a/fs/cifs/cifs_debug.c
+++ b/fs/cifs/cifs_debug.c
@@ -122,6 +122,27 @@ static void cifs_debug_tcon(struct seq_file *m, struct cifs_tcon *tcon)
}
static void
+cifs_dump_channel(struct seq_file *m, int i, struct cifs_chan *chan)
+{
+ struct TCP_Server_Info *server = chan->server;
+
+ seq_printf(m, "\t\tChannel %d Number of credits: %d Dialect 0x%x "
+ "TCP status: %d Instance: %d Local Users To Server: %d "
+ "SecMode: 0x%x Req On Wire: %d In Send: %d "
+ "In MaxReq Wait: %d\n",
+ i+1,
+ server->credits,
+ server->dialect,
+ server->tcpStatus,
+ server->reconnect_instance,
+ server->srv_count,
+ server->sec_mode,
+ in_flight(server),
+ atomic_read(&server->in_send),
+ atomic_read(&server->num_waiters));
+}
+
+static void
cifs_dump_iface(struct seq_file *m, struct cifs_server_iface *iface)
{
struct sockaddr_in *ipv4 = (struct sockaddr_in *)&iface->sockaddr;
@@ -256,6 +277,11 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
if (!server->rdma)
goto skip_rdma;
+ if (!server->smbd_conn) {
+ seq_printf(m, "\nSMBDirect transport not available");
+ goto skip_rdma;
+ }
+
seq_printf(m, "\nSMBDirect (in hex) protocol version: %x "
"transport status: %x",
server->smbd_conn->protocol,
@@ -360,11 +386,10 @@ skip_rdma:
server->srv_count,
server->sec_mode, in_flight(server));
-#ifdef CONFIG_CIFS_STATS2
seq_printf(m, " In Send: %d In MaxReq Wait: %d",
atomic_read(&server->in_send),
atomic_read(&server->num_waiters));
-#endif
+
/* dump session id helpful for use with network trace */
seq_printf(m, " SessionId: 0x%llx", ses->Suid);
if (ses->session_flags & SMB2_SESSION_FLAG_ENCRYPT_DATA)
@@ -372,6 +397,13 @@ skip_rdma:
if (ses->sign)
seq_puts(m, " signed");
+ if (ses->chan_count > 1) {
+ seq_printf(m, "\n\n\tExtra Channels: %zu\n",
+ ses->chan_count-1);
+ for (j = 1; j < ses->chan_count; j++)
+ cifs_dump_channel(m, j, &ses->chans[j]);
+ }
+
seq_puts(m, "\n\tShares:");
j = 0;
@@ -410,8 +442,13 @@ skip_rdma:
seq_printf(m, "\n\tServer interfaces: %zu\n",
ses->iface_count);
for (j = 0; j < ses->iface_count; j++) {
+ struct cifs_server_iface *iface;
+
+ iface = &ses->iface_list[j];
seq_printf(m, "\t%d)", j);
- cifs_dump_iface(m, &ses->iface_list[j]);
+ cifs_dump_iface(m, iface);
+ if (is_ses_using_iface(ses, iface))
+ seq_puts(m, "\t\t[CONNECTED]\n");
}
spin_unlock(&ses->iface_lock);
}
diff --git a/fs/cifs/cifs_spnego.c b/fs/cifs/cifs_spnego.c
index 7f01c6e60791..7b9b876b513b 100644
--- a/fs/cifs/cifs_spnego.c
+++ b/fs/cifs/cifs_spnego.c
@@ -98,7 +98,7 @@ struct key_type cifs_spnego_key_type = {
struct key *
cifs_get_spnego_key(struct cifs_ses *sesInfo)
{
- struct TCP_Server_Info *server = sesInfo->server;
+ struct TCP_Server_Info *server = cifs_ses_server(sesInfo);
struct sockaddr_in *sa = (struct sockaddr_in *) &server->dstaddr;
struct sockaddr_in6 *sa6 = (struct sockaddr_in6 *) &server->dstaddr;
char *description, *dp;
diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c
index f842944a5c76..06ffe52bdcfa 100644
--- a/fs/cifs/cifsacl.c
+++ b/fs/cifs/cifsacl.c
@@ -39,8 +39,6 @@ static const struct cifs_sid sid_everyone = {
/* security id for Authenticated Users system group */
static const struct cifs_sid sid_authusers = {
1, 1, {0, 0, 0, 0, 0, 5}, {cpu_to_le32(11)} };
-/* group users */
-static const struct cifs_sid sid_user = {1, 2 , {0, 0, 0, 0, 0, 5}, {} };
/* S-1-22-1 Unmapped Unix users */
static const struct cifs_sid sid_unix_users = {1, 1, {0, 0, 0, 0, 0, 22},
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 1a135d1b85bd..1d1051d31513 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -119,6 +119,7 @@ extern mempool_t *cifs_mid_poolp;
struct workqueue_struct *cifsiod_wq;
struct workqueue_struct *decrypt_wq;
+struct workqueue_struct *fileinfo_put_wq;
struct workqueue_struct *cifsoplockd_wq;
__u32 cifs_lock_secret;
@@ -613,6 +614,10 @@ cifs_show_options(struct seq_file *s, struct dentry *root)
/* convert actimeo and display it in seconds */
seq_printf(s, ",actimeo=%lu", cifs_sb->actimeo / HZ);
+ if (tcon->ses->chan_max > 1)
+ seq_printf(s, ",multichannel,max_channel=%zu",
+ tcon->ses->chan_max);
+
return 0;
}
@@ -1219,6 +1224,7 @@ const struct file_operations cifs_file_ops = {
.open = cifs_open,
.release = cifs_close,
.lock = cifs_lock,
+ .flock = cifs_flock,
.fsync = cifs_fsync,
.flush = cifs_flush,
.mmap = cifs_file_mmap,
@@ -1238,6 +1244,7 @@ const struct file_operations cifs_file_strict_ops = {
.open = cifs_open,
.release = cifs_close,
.lock = cifs_lock,
+ .flock = cifs_flock,
.fsync = cifs_strict_fsync,
.flush = cifs_flush,
.mmap = cifs_file_strict_mmap,
@@ -1257,6 +1264,7 @@ const struct file_operations cifs_file_direct_ops = {
.open = cifs_open,
.release = cifs_close,
.lock = cifs_lock,
+ .flock = cifs_flock,
.fsync = cifs_fsync,
.flush = cifs_flush,
.mmap = cifs_file_mmap,
@@ -1543,7 +1551,7 @@ init_cifs(void)
/*
* Consider in future setting limit!=0 maybe to min(num_of_cores - 1, 3)
* so that we don't launch too many worker threads but
- * Documentation/workqueue.txt recommends setting it to 0
+ * Documentation/core-api/workqueue.rst recommends setting it to 0
*/
/* WQ_UNBOUND allows decrypt tasks to run on any CPU */
@@ -1554,11 +1562,18 @@ init_cifs(void)
goto out_destroy_cifsiod_wq;
}
+ fileinfo_put_wq = alloc_workqueue("cifsfileinfoput",
+ WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
+ if (!fileinfo_put_wq) {
+ rc = -ENOMEM;
+ goto out_destroy_decrypt_wq;
+ }
+
cifsoplockd_wq = alloc_workqueue("cifsoplockd",
WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
if (!cifsoplockd_wq) {
rc = -ENOMEM;
- goto out_destroy_decrypt_wq;
+ goto out_destroy_fileinfo_put_wq;
}
rc = cifs_fscache_register();
@@ -1624,6 +1639,8 @@ out_unreg_fscache:
cifs_fscache_unregister();
out_destroy_cifsoplockd_wq:
destroy_workqueue(cifsoplockd_wq);
+out_destroy_fileinfo_put_wq:
+ destroy_workqueue(fileinfo_put_wq);
out_destroy_decrypt_wq:
destroy_workqueue(decrypt_wq);
out_destroy_cifsiod_wq:
@@ -1653,6 +1670,7 @@ exit_cifs(void)
cifs_fscache_unregister();
destroy_workqueue(cifsoplockd_wq);
destroy_workqueue(decrypt_wq);
+ destroy_workqueue(fileinfo_put_wq);
destroy_workqueue(cifsiod_wq);
cifs_proc_clean();
}
@@ -1663,17 +1681,17 @@ MODULE_DESCRIPTION
("VFS to access SMB3 servers e.g. Samba, Macs, Azure and Windows (and "
"also older servers complying with the SNIA CIFS Specification)");
MODULE_VERSION(CIFS_VERSION);
-MODULE_SOFTDEP("pre: ecb");
-MODULE_SOFTDEP("pre: hmac");
-MODULE_SOFTDEP("pre: md4");
-MODULE_SOFTDEP("pre: md5");
-MODULE_SOFTDEP("pre: nls");
-MODULE_SOFTDEP("pre: aes");
-MODULE_SOFTDEP("pre: cmac");
-MODULE_SOFTDEP("pre: sha256");
-MODULE_SOFTDEP("pre: sha512");
-MODULE_SOFTDEP("pre: aead2");
-MODULE_SOFTDEP("pre: ccm");
-MODULE_SOFTDEP("pre: gcm");
+MODULE_SOFTDEP("ecb");
+MODULE_SOFTDEP("hmac");
+MODULE_SOFTDEP("md4");
+MODULE_SOFTDEP("md5");
+MODULE_SOFTDEP("nls");
+MODULE_SOFTDEP("aes");
+MODULE_SOFTDEP("cmac");
+MODULE_SOFTDEP("sha256");
+MODULE_SOFTDEP("sha512");
+MODULE_SOFTDEP("aead2");
+MODULE_SOFTDEP("ccm");
+MODULE_SOFTDEP("gcm");
module_init(init_cifs)
module_exit(exit_cifs)
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index bc4ca94137f2..b59dc7478130 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -108,6 +108,7 @@ extern ssize_t cifs_strict_readv(struct kiocb *iocb, struct iov_iter *to);
extern ssize_t cifs_user_writev(struct kiocb *iocb, struct iov_iter *from);
extern ssize_t cifs_direct_writev(struct kiocb *iocb, struct iov_iter *from);
extern ssize_t cifs_strict_writev(struct kiocb *iocb, struct iov_iter *from);
+extern int cifs_flock(struct file *pfile, int cmd, struct file_lock *plock);
extern int cifs_lock(struct file *, int, struct file_lock *);
extern int cifs_fsync(struct file *, loff_t, loff_t, int);
extern int cifs_strict_fsync(struct file *, loff_t, loff_t, int);
@@ -152,5 +153,5 @@ extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
extern const struct export_operations cifs_export_ops;
#endif /* CONFIG_CIFS_NFSD_EXPORT */
-#define CIFS_VERSION "2.23"
+#define CIFS_VERSION "2.24"
#endif /* _CIFSFS_H */
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index d78bfcc19156..d34a4ed8c57d 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -230,7 +230,8 @@ struct smb_version_operations {
bool (*compare_fids)(struct cifsFileInfo *, struct cifsFileInfo *);
/* setup request: allocate mid, sign message */
struct mid_q_entry *(*setup_request)(struct cifs_ses *,
- struct smb_rqst *);
+ struct TCP_Server_Info *,
+ struct smb_rqst *);
/* setup async request: allocate mid, sign message */
struct mid_q_entry *(*setup_async_request)(struct TCP_Server_Info *,
struct smb_rqst *);
@@ -268,8 +269,9 @@ struct smb_version_operations {
int (*check_message)(char *, unsigned int, struct TCP_Server_Info *);
bool (*is_oplock_break)(char *, struct TCP_Server_Info *);
int (*handle_cancelled_mid)(char *, struct TCP_Server_Info *);
- void (*downgrade_oplock)(struct TCP_Server_Info *,
- struct cifsInodeInfo *, bool);
+ void (*downgrade_oplock)(struct TCP_Server_Info *server,
+ struct cifsInodeInfo *cinode, __u32 oplock,
+ unsigned int epoch, bool *purge_cache);
/* process transaction2 response */
bool (*check_trans2)(struct mid_q_entry *, struct TCP_Server_Info *,
char *, int);
@@ -591,6 +593,10 @@ struct smb_vol {
bool resilient:1; /* noresilient not required since not fored for CA */
bool domainauto:1;
bool rdma:1;
+ bool multichannel:1;
+ bool use_client_guid:1;
+ /* reuse existing guid for multichannel */
+ u8 client_guid[SMB2_CLIENT_GUID_SIZE];
unsigned int bsize;
unsigned int rsize;
unsigned int wsize;
@@ -607,6 +613,7 @@ struct smb_vol {
__u64 snapshot_time; /* needed for timewarp tokens */
__u32 handle_timeout; /* persistent and durable handle timeout in ms */
unsigned int max_credits; /* smb3 max_credits 10 < credits < 60000 */
+ unsigned int max_channels;
__u16 compression; /* compression algorithm 0xFFFF default 0=disabled */
bool rootfs:1; /* if it's a SMB root file system */
};
@@ -736,12 +743,12 @@ struct TCP_Server_Info {
/* Total size of this PDU. Only valid from cifs_demultiplex_thread */
unsigned int pdu_size;
unsigned int total_read; /* total amount of data read in this pass */
+ atomic_t in_send; /* requests trying to send */
+ atomic_t num_waiters; /* blocked waiting to get in sendrecv */
#ifdef CONFIG_CIFS_FSCACHE
struct fscache_cookie *fscache; /* client index cache cookie */
#endif
#ifdef CONFIG_CIFS_STATS2
- atomic_t in_send; /* requests trying to send */
- atomic_t num_waiters; /* blocked waiting to get in sendrecv */
atomic_t num_cmds[NUMBER_OF_SMB2_COMMANDS]; /* total requests by cmd */
atomic_t smb2slowcmd[NUMBER_OF_SMB2_COMMANDS]; /* count resps > 1 sec */
__u64 time_per_cmd[NUMBER_OF_SMB2_COMMANDS]; /* total time per cmd */
@@ -953,6 +960,11 @@ struct cifs_server_iface {
struct sockaddr_storage sockaddr;
};
+struct cifs_chan {
+ struct TCP_Server_Info *server;
+ __u8 signkey[SMB3_SIGN_KEY_SIZE];
+};
+
/*
* Session structure. One of these for each uid session with a particular host
*/
@@ -983,12 +995,15 @@ struct cifs_ses {
bool sign; /* is signing required? */
bool need_reconnect:1; /* connection reset, uid now invalid */
bool domainAuto:1;
+ bool binding:1; /* are we binding the session? */
__u16 session_flags;
__u8 smb3signingkey[SMB3_SIGN_KEY_SIZE];
__u8 smb3encryptionkey[SMB3_SIGN_KEY_SIZE];
__u8 smb3decryptionkey[SMB3_SIGN_KEY_SIZE];
__u8 preauth_sha_hash[SMB2_PREAUTH_HASH_SIZE];
+ __u8 binding_preauth_sha_hash[SMB2_PREAUTH_HASH_SIZE];
+
/*
* Network interfaces available on the server this session is
* connected to.
@@ -1002,8 +1017,37 @@ struct cifs_ses {
struct cifs_server_iface *iface_list;
size_t iface_count;
unsigned long iface_last_update; /* jiffies */
+
+#define CIFS_MAX_CHANNELS 16
+ struct cifs_chan chans[CIFS_MAX_CHANNELS];
+ size_t chan_count;
+ size_t chan_max;
+ atomic_t chan_seq; /* round robin state */
};
+/*
+ * When binding a new channel, we need to access the channel which isn't fully
+ * established yet (one past the established count)
+ */
+
+static inline
+struct cifs_chan *cifs_ses_binding_channel(struct cifs_ses *ses)
+{
+ if (ses->binding)
+ return &ses->chans[ses->chan_count];
+ else
+ return NULL;
+}
+
+static inline
+struct TCP_Server_Info *cifs_ses_server(struct cifs_ses *ses)
+{
+ if (ses->binding)
+ return ses->chans[ses->chan_count].server;
+ else
+ return ses->server;
+}
+
static inline bool
cap_unix(struct cifs_ses *ses)
{
@@ -1260,11 +1304,14 @@ struct cifsFileInfo {
unsigned int f_flags;
bool invalidHandle:1; /* file closed via session abend */
bool oplock_break_cancelled:1;
+ unsigned int oplock_epoch; /* epoch from the lease break */
+ __u32 oplock_level; /* oplock/lease level from the lease break */
int count;
spinlock_t file_info_lock; /* protects four flag/count fields above */
struct mutex fh_mutex; /* prevents reopen race after dead ses*/
struct cifs_search_info srch_inf;
struct work_struct oplock_break; /* work for oplock breaks */
+ struct work_struct put; /* work for the final part of _put */
};
struct cifs_io_parms {
@@ -1370,7 +1417,8 @@ cifsFileInfo_get_locked(struct cifsFileInfo *cifs_file)
}
struct cifsFileInfo *cifsFileInfo_get(struct cifsFileInfo *cifs_file);
-void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, bool wait_oplock_hdlr);
+void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, bool wait_oplock_hdlr,
+ bool offload);
void cifsFileInfo_put(struct cifsFileInfo *cifs_file);
#define CIFS_CACHE_READ_FLG 1
@@ -1405,7 +1453,7 @@ struct cifsInodeInfo {
unsigned int epoch; /* used to track lease state changes */
#define CIFS_INODE_PENDING_OPLOCK_BREAK (0) /* oplock break in progress */
#define CIFS_INODE_PENDING_WRITERS (1) /* Writes in progress */
-#define CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2 (2) /* Downgrade oplock to L2 */
+#define CIFS_INODE_FLAG_UNUSED (2) /* Unused flag */
#define CIFS_INO_DELETE_PENDING (3) /* delete pending on server */
#define CIFS_INO_INVALID_MAPPING (4) /* pagecache is invalid */
#define CIFS_INO_LOCK (5) /* lock bit for synchronization */
@@ -1524,6 +1572,7 @@ struct mid_q_entry {
struct TCP_Server_Info *server; /* server corresponding to this mid */
__u64 mid; /* multiplex id */
__u16 credits; /* number of credits consumed by this mid */
+ __u16 credits_received; /* number of credits from the response */
__u32 pid; /* process id */
__u32 sequence_number; /* for CIFS signing */
unsigned long when_alloc; /* when mid was created */
@@ -1551,12 +1600,12 @@ struct close_cancelled_open {
struct cifs_fid fid;
struct cifs_tcon *tcon;
struct work_struct work;
+ __u64 mid;
+ __u16 cmd;
};
/* Make code in transport.c a little cleaner by moving
update of optional stats into function below */
-#ifdef CONFIG_CIFS_STATS2
-
static inline void cifs_in_send_inc(struct TCP_Server_Info *server)
{
atomic_inc(&server->in_send);
@@ -1577,26 +1626,12 @@ static inline void cifs_num_waiters_dec(struct TCP_Server_Info *server)
atomic_dec(&server->num_waiters);
}
+#ifdef CONFIG_CIFS_STATS2
static inline void cifs_save_when_sent(struct mid_q_entry *mid)
{
mid->when_sent = jiffies;
}
#else
-static inline void cifs_in_send_inc(struct TCP_Server_Info *server)
-{
-}
-static inline void cifs_in_send_dec(struct TCP_Server_Info *server)
-{
-}
-
-static inline void cifs_num_waiters_inc(struct TCP_Server_Info *server)
-{
-}
-
-static inline void cifs_num_waiters_dec(struct TCP_Server_Info *server)
-{
-}
-
static inline void cifs_save_when_sent(struct mid_q_entry *mid)
{
}
@@ -1907,6 +1942,7 @@ void cifs_queue_oplock_break(struct cifsFileInfo *cfile);
extern const struct slow_work_ops cifs_oplock_break_ops;
extern struct workqueue_struct *cifsiod_wq;
extern struct workqueue_struct *decrypt_wq;
+extern struct workqueue_struct *fileinfo_put_wq;
extern struct workqueue_struct *cifsoplockd_wq;
extern __u32 cifs_lock_secret;
@@ -1937,4 +1973,10 @@ extern struct smb_version_values smb302_values;
#define ALT_SMB311_VERSION_STRING "3.11"
extern struct smb_version_operations smb311_operations;
extern struct smb_version_values smb311_values;
+
+static inline bool is_smb1_server(struct TCP_Server_Info *server)
+{
+ return strcmp(server->vals->version_string, SMB1_VERSION_STRING) == 0;
+}
+
#endif /* _CIFS_GLOB_H */
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index fe597d3d5208..1ed695336f62 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -109,6 +109,7 @@ extern int SendReceive(const unsigned int /* xid */ , struct cifs_ses *,
extern int SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
char *in_buf, int flags);
extern struct mid_q_entry *cifs_setup_request(struct cifs_ses *,
+ struct TCP_Server_Info *,
struct smb_rqst *);
extern struct mid_q_entry *cifs_setup_async_request(struct TCP_Server_Info *,
struct smb_rqst *);
@@ -242,6 +243,7 @@ extern void cifs_add_pending_open_locked(struct cifs_fid *fid,
struct tcon_link *tlink,
struct cifs_pending_open *open);
extern void cifs_del_pending_open(struct cifs_pending_open *open);
+extern struct TCP_Server_Info *cifs_get_tcp_session(struct smb_vol *vol);
extern void cifs_put_tcp_session(struct TCP_Server_Info *server,
int from_reconnect);
extern void cifs_put_tcon(struct cifs_tcon *tcon);
@@ -584,6 +586,12 @@ void cifs_free_hash(struct crypto_shash **shash, struct sdesc **sdesc);
extern void rqst_page_get_length(struct smb_rqst *rqst, unsigned int page,
unsigned int *len, unsigned int *offset);
+int cifs_try_adding_channels(struct cifs_ses *ses);
+int cifs_ses_add_channel(struct cifs_ses *ses,
+ struct cifs_server_iface *iface);
+bool is_server_using_iface(struct TCP_Server_Info *server,
+ struct cifs_server_iface *iface);
+bool is_ses_using_iface(struct cifs_ses *ses, struct cifs_server_iface *iface);
void extract_unc_hostname(const char *unc, const char **h, size_t *len);
int copy_path_name(char *dst, const char *src);
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index ccaa8bad336f..86d1baedf21c 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -97,6 +97,7 @@ enum {
Opt_persistent, Opt_nopersistent,
Opt_resilient, Opt_noresilient,
Opt_domainauto, Opt_rdma, Opt_modesid, Opt_rootfs,
+ Opt_multichannel, Opt_nomultichannel,
Opt_compress,
/* Mount options which take numeric value */
@@ -106,7 +107,7 @@ enum {
Opt_min_enc_offload,
Opt_blocksize, Opt_rsize, Opt_wsize, Opt_actimeo,
Opt_echo_interval, Opt_max_credits, Opt_handletimeout,
- Opt_snapshot,
+ Opt_snapshot, Opt_max_channels,
/* Mount options which take string value */
Opt_user, Opt_pass, Opt_ip,
@@ -199,6 +200,8 @@ static const match_table_t cifs_mount_option_tokens = {
{ Opt_noresilient, "noresilienthandles"},
{ Opt_domainauto, "domainauto"},
{ Opt_rdma, "rdma"},
+ { Opt_multichannel, "multichannel" },
+ { Opt_nomultichannel, "nomultichannel" },
{ Opt_backupuid, "backupuid=%s" },
{ Opt_backupgid, "backupgid=%s" },
@@ -218,6 +221,7 @@ static const match_table_t cifs_mount_option_tokens = {
{ Opt_echo_interval, "echo_interval=%s" },
{ Opt_max_credits, "max_credits=%s" },
{ Opt_snapshot, "snapshot=%s" },
+ { Opt_max_channels, "max_channels=%s" },
{ Opt_compress, "compress=%s" },
{ Opt_blank_user, "user=" },
@@ -387,7 +391,7 @@ static inline int reconn_set_ipaddr(struct TCP_Server_Info *server)
#ifdef CONFIG_CIFS_DFS_UPCALL
struct super_cb_data {
struct TCP_Server_Info *server;
- struct cifs_sb_info *cifs_sb;
+ struct super_block *sb;
};
/* These functions must be called with server->srv_mutex held */
@@ -398,25 +402,39 @@ static void super_cb(struct super_block *sb, void *arg)
struct cifs_sb_info *cifs_sb;
struct cifs_tcon *tcon;
- if (d->cifs_sb)
+ if (d->sb)
return;
cifs_sb = CIFS_SB(sb);
tcon = cifs_sb_master_tcon(cifs_sb);
if (tcon->ses->server == d->server)
- d->cifs_sb = cifs_sb;
+ d->sb = sb;
}
-static inline struct cifs_sb_info *
-find_super_by_tcp(struct TCP_Server_Info *server)
+static struct super_block *get_tcp_super(struct TCP_Server_Info *server)
{
struct super_cb_data d = {
.server = server,
- .cifs_sb = NULL,
+ .sb = NULL,
};
iterate_supers_type(&cifs_fs_type, super_cb, &d);
- return d.cifs_sb ? d.cifs_sb : ERR_PTR(-ENOENT);
+
+ if (unlikely(!d.sb))
+ return ERR_PTR(-ENOENT);
+ /*
+ * Grab an active reference in order to prevent automounts (DFS links)
+ * of expiring and then freeing up our cifs superblock pointer while
+ * we're doing failover.
+ */
+ cifs_sb_active(d.sb);
+ return d.sb;
+}
+
+static inline void put_tcp_super(struct super_block *sb)
+{
+ if (!IS_ERR_OR_NULL(sb))
+ cifs_sb_deactive(sb);
}
static void reconn_inval_dfs_target(struct TCP_Server_Info *server,
@@ -480,6 +498,7 @@ cifs_reconnect(struct TCP_Server_Info *server)
struct mid_q_entry *mid_entry;
struct list_head retry_list;
#ifdef CONFIG_CIFS_DFS_UPCALL
+ struct super_block *sb = NULL;
struct cifs_sb_info *cifs_sb = NULL;
struct dfs_cache_tgt_list tgt_list = {0};
struct dfs_cache_tgt_iterator *tgt_it = NULL;
@@ -489,13 +508,15 @@ cifs_reconnect(struct TCP_Server_Info *server)
server->nr_targets = 1;
#ifdef CONFIG_CIFS_DFS_UPCALL
spin_unlock(&GlobalMid_Lock);
- cifs_sb = find_super_by_tcp(server);
- if (IS_ERR(cifs_sb)) {
- rc = PTR_ERR(cifs_sb);
+ sb = get_tcp_super(server);
+ if (IS_ERR(sb)) {
+ rc = PTR_ERR(sb);
cifs_dbg(FYI, "%s: will not do DFS failover: rc = %d\n",
__func__, rc);
- cifs_sb = NULL;
+ sb = NULL;
} else {
+ cifs_sb = CIFS_SB(sb);
+
rc = reconn_setup_dfs_targets(cifs_sb, &tgt_list, &tgt_it);
if (rc && (rc != -EOPNOTSUPP)) {
cifs_server_dbg(VFS, "%s: no target servers for DFS failover\n",
@@ -512,6 +533,10 @@ cifs_reconnect(struct TCP_Server_Info *server)
/* the demux thread will exit normally
next time through the loop */
spin_unlock(&GlobalMid_Lock);
+#ifdef CONFIG_CIFS_DFS_UPCALL
+ dfs_cache_free_tgts(&tgt_list);
+ put_tcp_super(sb);
+#endif
return rc;
} else
server->tcpStatus = CifsNeedReconnect;
@@ -638,7 +663,10 @@ cifs_reconnect(struct TCP_Server_Info *server)
__func__, rc);
}
dfs_cache_free_tgts(&tgt_list);
+
}
+
+ put_tcp_super(sb);
#endif
if (server->tcpStatus == CifsNeedNegotiate)
mod_delayed_work(cifsiod_wq, &server->echo, 0);
@@ -905,6 +933,20 @@ dequeue_mid(struct mid_q_entry *mid, bool malformed)
spin_unlock(&GlobalMid_Lock);
}
+static unsigned int
+smb2_get_credits_from_hdr(char *buffer, struct TCP_Server_Info *server)
+{
+ struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)buffer;
+
+ /*
+ * SMB1 does not use credits.
+ */
+ if (server->vals->header_preamble_size)
+ return 0;
+
+ return le16_to_cpu(shdr->CreditRequest);
+}
+
static void
handle_mid(struct mid_q_entry *mid, struct TCP_Server_Info *server,
char *buf, int malformed)
@@ -912,6 +954,7 @@ handle_mid(struct mid_q_entry *mid, struct TCP_Server_Info *server,
if (server->ops->check_trans2 &&
server->ops->check_trans2(mid, server, buf, malformed))
return;
+ mid->credits_received = smb2_get_credits_from_hdr(buf, server);
mid->resp_buf = buf;
mid->large_buf = server->large_buf;
/* Was previous buf put in mpx struct for multi-rsp? */
@@ -1222,12 +1265,6 @@ next_pdu:
for (i = 0; i < num_mids; i++) {
if (mids[i] != NULL) {
mids[i]->resp_buf_size = server->pdu_size;
- if ((mids[i]->mid_flags & MID_WAIT_CANCELLED) &&
- mids[i]->mid_state == MID_RESPONSE_RECEIVED &&
- server->ops->handle_cancelled_mid)
- server->ops->handle_cancelled_mid(
- mids[i]->resp_buf,
- server);
if (!mids[i]->multiRsp || mids[i]->multiEnd)
mids[i]->callback(mids[i]);
@@ -1672,6 +1709,10 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
vol->echo_interval = SMB_ECHO_INTERVAL_DEFAULT;
+ /* default to no multichannel (single server connection) */
+ vol->multichannel = false;
+ vol->max_channels = 1;
+
if (!mountdata)
goto cifs_parse_mount_err;
@@ -1965,6 +2006,12 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
case Opt_rdma:
vol->rdma = true;
break;
+ case Opt_multichannel:
+ vol->multichannel = true;
+ break;
+ case Opt_nomultichannel:
+ vol->multichannel = false;
+ break;
case Opt_compress:
vol->compression = UNKNOWN_TYPE;
cifs_dbg(VFS,
@@ -2128,6 +2175,15 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
}
vol->max_credits = option;
break;
+ case Opt_max_channels:
+ if (get_option_ul(args, &option) || option < 1 ||
+ option > CIFS_MAX_CHANNELS) {
+ cifs_dbg(VFS, "%s: Invalid max_channels value, needs to be 1-%d\n",
+ __func__, CIFS_MAX_CHANNELS);
+ goto cifs_parse_mount_err;
+ }
+ vol->max_channels = option;
+ break;
/* String Arguments */
@@ -2713,7 +2769,7 @@ cifs_put_tcp_session(struct TCP_Server_Info *server, int from_reconnect)
send_sig(SIGKILL, task, 1);
}
-static struct TCP_Server_Info *
+struct TCP_Server_Info *
cifs_get_tcp_session(struct smb_vol *volume_info)
{
struct TCP_Server_Info *tcp_ses = NULL;
@@ -2772,7 +2828,11 @@ cifs_get_tcp_session(struct smb_vol *volume_info)
sizeof(tcp_ses->srcaddr));
memcpy(&tcp_ses->dstaddr, &volume_info->dstaddr,
sizeof(tcp_ses->dstaddr));
- generate_random_uuid(tcp_ses->client_guid);
+ if (volume_info->use_client_guid)
+ memcpy(tcp_ses->client_guid, volume_info->client_guid,
+ SMB2_CLIENT_GUID_SIZE);
+ else
+ generate_random_uuid(tcp_ses->client_guid);
/*
* at this point we are the only ones with the pointer
* to the struct since the kernel thread not created yet
@@ -2861,6 +2921,13 @@ static int match_session(struct cifs_ses *ses, struct smb_vol *vol)
vol->sectype != ses->sectype)
return 0;
+ /*
+ * If an existing session is limited to less channels than
+ * requested, it should not be reused
+ */
+ if (ses->chan_max < vol->max_channels)
+ return 0;
+
switch (ses->sectype) {
case Kerberos:
if (!uid_eq(vol->cred_uid, ses->cred_uid))
@@ -3031,6 +3098,14 @@ void cifs_put_smb_ses(struct cifs_ses *ses)
list_del_init(&ses->smb_ses_list);
spin_unlock(&cifs_tcp_ses_lock);
+ /* close any extra channels */
+ if (ses->chan_count > 1) {
+ int i;
+
+ for (i = 1; i < ses->chan_count; i++)
+ cifs_put_tcp_session(ses->chans[i].server, 0);
+ }
+
sesInfoFree(ses);
cifs_put_tcp_session(server, 0);
}
@@ -3277,14 +3352,25 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info)
ses->sectype = volume_info->sectype;
ses->sign = volume_info->sign;
mutex_lock(&ses->session_mutex);
+
+ /* add server as first channel */
+ ses->chans[0].server = server;
+ ses->chan_count = 1;
+ ses->chan_max = volume_info->multichannel ? volume_info->max_channels:1;
+
rc = cifs_negotiate_protocol(xid, ses);
if (!rc)
rc = cifs_setup_session(xid, ses, volume_info->local_nls);
+
+ /* each channel uses a different signing key */
+ memcpy(ses->chans[0].signkey, ses->smb3signingkey,
+ sizeof(ses->smb3signingkey));
+
mutex_unlock(&ses->session_mutex);
if (rc)
goto get_ses_fail;
- /* success, put it on the list */
+ /* success, put it on the list and add it as first channel */
spin_lock(&cifs_tcp_ses_lock);
list_add(&ses->smb_ses_list, &server->smb_ses_list);
spin_unlock(&cifs_tcp_ses_lock);
@@ -4700,6 +4786,17 @@ static int is_path_remote(struct cifs_sb_info *cifs_sb, struct smb_vol *vol,
}
#ifdef CONFIG_CIFS_DFS_UPCALL
+static inline void set_root_tcon(struct cifs_sb_info *cifs_sb,
+ struct cifs_tcon *tcon,
+ struct cifs_tcon **root)
+{
+ spin_lock(&cifs_tcp_ses_lock);
+ tcon->tc_count++;
+ tcon->remap = cifs_remap(cifs_sb);
+ spin_unlock(&cifs_tcp_ses_lock);
+ *root = tcon;
+}
+
int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb_vol *vol)
{
int rc = 0;
@@ -4801,18 +4898,10 @@ int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb_vol *vol)
/* Cache out resolved root server */
(void)dfs_cache_find(xid, ses, cifs_sb->local_nls, cifs_remap(cifs_sb),
root_path + 1, NULL, NULL);
- /*
- * Save root tcon for additional DFS requests to update or create a new
- * DFS cache entry, or even perform DFS failover.
- */
- spin_lock(&cifs_tcp_ses_lock);
- tcon->tc_count++;
- tcon->dfs_path = root_path;
+ kfree(root_path);
root_path = NULL;
- tcon->remap = cifs_remap(cifs_sb);
- spin_unlock(&cifs_tcp_ses_lock);
- root_tcon = tcon;
+ set_root_tcon(cifs_sb, tcon, &root_tcon);
for (count = 1; ;) {
if (!rc && tcon) {
@@ -4849,6 +4938,15 @@ int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb_vol *vol)
mount_put_conns(cifs_sb, xid, server, ses, tcon);
rc = mount_get_conns(vol, cifs_sb, &xid, &server, &ses,
&tcon);
+ /*
+ * Ensure that DFS referrals go through new root server.
+ */
+ if (!rc && tcon &&
+ (tcon->share_flags & (SHI1005_FLAGS_DFS |
+ SHI1005_FLAGS_DFS_ROOT))) {
+ cifs_put_tcon(root_tcon);
+ set_root_tcon(cifs_sb, tcon, &root_tcon);
+ }
}
if (rc) {
if (rc == -EACCES || rc == -EOPNOTSUPP)
@@ -4897,6 +4995,7 @@ int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb_vol *vol)
cifs_autodisable_serverino(cifs_sb);
out:
free_xid(xid);
+ cifs_try_adding_channels(ses);
return mount_setup_tlink(cifs_sb, ses, tcon);
error:
@@ -5142,7 +5241,7 @@ int
cifs_negotiate_protocol(const unsigned int xid, struct cifs_ses *ses)
{
int rc = 0;
- struct TCP_Server_Info *server = ses->server;
+ struct TCP_Server_Info *server = cifs_ses_server(ses);
if (!server->ops->need_neg || !server->ops->negotiate)
return -ENOSYS;
@@ -5169,23 +5268,25 @@ cifs_setup_session(const unsigned int xid, struct cifs_ses *ses,
struct nls_table *nls_info)
{
int rc = -ENOSYS;
- struct TCP_Server_Info *server = ses->server;
-
- ses->capabilities = server->capabilities;
- if (linuxExtEnabled == 0)
- ses->capabilities &= (~server->vals->cap_unix);
+ struct TCP_Server_Info *server = cifs_ses_server(ses);
+
+ if (!ses->binding) {
+ ses->capabilities = server->capabilities;
+ if (linuxExtEnabled == 0)
+ ses->capabilities &= (~server->vals->cap_unix);
+
+ if (ses->auth_key.response) {
+ cifs_dbg(FYI, "Free previous auth_key.response = %p\n",
+ ses->auth_key.response);
+ kfree(ses->auth_key.response);
+ ses->auth_key.response = NULL;
+ ses->auth_key.len = 0;
+ }
+ }
cifs_dbg(FYI, "Security Mode: 0x%x Capabilities: 0x%x TimeAdjust: %d\n",
server->sec_mode, server->capabilities, server->timeAdj);
- if (ses->auth_key.response) {
- cifs_dbg(FYI, "Free previous auth_key.response = %p\n",
- ses->auth_key.response);
- kfree(ses->auth_key.response);
- ses->auth_key.response = NULL;
- ses->auth_key.len = 0;
- }
-
if (server->ops->sess_setup)
rc = server->ops->sess_setup(xid, ses, nls_info);
diff --git a/fs/cifs/dfs_cache.c b/fs/cifs/dfs_cache.c
index 1692c0c6c23a..2faa05860a48 100644
--- a/fs/cifs/dfs_cache.c
+++ b/fs/cifs/dfs_cache.c
@@ -1317,7 +1317,6 @@ static struct cifs_ses *find_root_ses(struct dfs_cache_vol_info *vi,
int rc;
struct dfs_info3_param ref = {0};
char *mdata = NULL, *devname = NULL;
- bool is_smb3 = tcon->ses->server->vals->header_preamble_size == 0;
struct TCP_Server_Info *server;
struct cifs_ses *ses;
struct smb_vol vol;
@@ -1344,7 +1343,7 @@ static struct cifs_ses *find_root_ses(struct dfs_cache_vol_info *vi,
goto out;
}
- rc = cifs_setup_volume_info(&vol, mdata, devname, is_smb3);
+ rc = cifs_setup_volume_info(&vol, mdata, devname, false);
kfree(devname);
if (rc) {
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index 7ce689d31aa2..f3b79012ff29 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -244,10 +244,8 @@ cifs_do_create(struct inode *inode, struct dentry *direntry, unsigned int xid,
*oplock = REQ_OPLOCK;
full_path = build_path_from_dentry(direntry);
- if (full_path == NULL) {
- rc = -ENOMEM;
- goto out;
- }
+ if (!full_path)
+ return -ENOMEM;
if (tcon->unix_ext && cap_unix(tcon->ses) && !tcon->broken_posix_open &&
(CIFS_UNIX_POSIX_PATH_OPS_CAP &
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index fa7b0fa72bb3..f1fe9c44d298 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -288,6 +288,8 @@ cifs_down_write(struct rw_semaphore *sem)
msleep(10);
}
+static void cifsFileInfo_put_work(struct work_struct *work);
+
struct cifsFileInfo *
cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
struct tcon_link *tlink, __u32 oplock)
@@ -325,6 +327,7 @@ cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
cfile->invalidHandle = false;
cfile->tlink = cifs_get_tlink(tlink);
INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
+ INIT_WORK(&cfile->put, cifsFileInfo_put_work);
mutex_init(&cfile->fh_mutex);
spin_lock_init(&cfile->file_info_lock);
@@ -375,6 +378,41 @@ cifsFileInfo_get(struct cifsFileInfo *cifs_file)
return cifs_file;
}
+static void cifsFileInfo_put_final(struct cifsFileInfo *cifs_file)
+{
+ struct inode *inode = d_inode(cifs_file->dentry);
+ struct cifsInodeInfo *cifsi = CIFS_I(inode);
+ struct cifsLockInfo *li, *tmp;
+ struct super_block *sb = inode->i_sb;
+
+ /*
+ * Delete any outstanding lock records. We'll lose them when the file
+ * is closed anyway.
+ */
+ cifs_down_write(&cifsi->lock_sem);
+ list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) {
+ list_del(&li->llist);
+ cifs_del_lock_waiters(li);
+ kfree(li);
+ }
+ list_del(&cifs_file->llist->llist);
+ kfree(cifs_file->llist);
+ up_write(&cifsi->lock_sem);
+
+ cifs_put_tlink(cifs_file->tlink);
+ dput(cifs_file->dentry);
+ cifs_sb_deactive(sb);
+ kfree(cifs_file);
+}
+
+static void cifsFileInfo_put_work(struct work_struct *work)
+{
+ struct cifsFileInfo *cifs_file = container_of(work,
+ struct cifsFileInfo, put);
+
+ cifsFileInfo_put_final(cifs_file);
+}
+
/**
* cifsFileInfo_put - release a reference of file priv data
*
@@ -382,15 +420,15 @@ cifsFileInfo_get(struct cifsFileInfo *cifs_file)
*/
void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
{
- _cifsFileInfo_put(cifs_file, true);
+ _cifsFileInfo_put(cifs_file, true, true);
}
/**
* _cifsFileInfo_put - release a reference of file priv data
*
* This may involve closing the filehandle @cifs_file out on the
- * server. Must be called without holding tcon->open_file_lock and
- * cifs_file->file_info_lock.
+ * server. Must be called without holding tcon->open_file_lock,
+ * cinode->open_file_lock and cifs_file->file_info_lock.
*
* If @wait_for_oplock_handler is true and we are releasing the last
* reference, wait for any running oplock break handler of the file
@@ -398,7 +436,8 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
* oplock break handler, you need to pass false.
*
*/
-void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, bool wait_oplock_handler)
+void _cifsFileInfo_put(struct cifsFileInfo *cifs_file,
+ bool wait_oplock_handler, bool offload)
{
struct inode *inode = d_inode(cifs_file->dentry);
struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
@@ -406,7 +445,6 @@ void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, bool wait_oplock_handler)
struct cifsInodeInfo *cifsi = CIFS_I(inode);
struct super_block *sb = inode->i_sb;
struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
- struct cifsLockInfo *li, *tmp;
struct cifs_fid fid;
struct cifs_pending_open open;
bool oplock_break_cancelled;
@@ -467,24 +505,10 @@ void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, bool wait_oplock_handler)
cifs_del_pending_open(&open);
- /*
- * Delete any outstanding lock records. We'll lose them when the file
- * is closed anyway.
- */
- cifs_down_write(&cifsi->lock_sem);
- list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) {
- list_del(&li->llist);
- cifs_del_lock_waiters(li);
- kfree(li);
- }
- list_del(&cifs_file->llist->llist);
- kfree(cifs_file->llist);
- up_write(&cifsi->lock_sem);
-
- cifs_put_tlink(cifs_file->tlink);
- dput(cifs_file->dentry);
- cifs_sb_deactive(sb);
- kfree(cifs_file);
+ if (offload)
+ queue_work(fileinfo_put_wq, &cifs_file->put);
+ else
+ cifsFileInfo_put_final(cifs_file);
}
int cifs_open(struct inode *inode, struct file *file)
@@ -728,6 +752,13 @@ cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
if (backup_cred(cifs_sb))
create_options |= CREATE_OPEN_BACKUP_INTENT;
+ /* O_SYNC also has bit for O_DSYNC so following check picks up either */
+ if (cfile->f_flags & O_SYNC)
+ create_options |= CREATE_WRITE_THROUGH;
+
+ if (cfile->f_flags & O_DIRECT)
+ create_options |= CREATE_NO_BUFFER;
+
if (server->ops->get_lease_key)
server->ops->get_lease_key(inode, &cfile->fid);
@@ -808,7 +839,7 @@ reopen_error_exit:
int cifs_close(struct inode *inode, struct file *file)
{
if (file->private_data != NULL) {
- cifsFileInfo_put(file->private_data);
+ _cifsFileInfo_put(file->private_data, true, false);
file->private_data = NULL;
}
@@ -1681,7 +1712,7 @@ cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
rc = server->ops->mand_unlock_range(cfile, flock, xid);
out:
- if (flock->fl_flags & FL_POSIX) {
+ if ((flock->fl_flags & FL_POSIX) || (flock->fl_flags & FL_FLOCK)) {
/*
* If this is a request to remove all locks because we
* are closing the file, it doesn't matter if the
@@ -1698,6 +1729,52 @@ out:
return rc;
}
+int cifs_flock(struct file *file, int cmd, struct file_lock *fl)
+{
+ int rc, xid;
+ int lock = 0, unlock = 0;
+ bool wait_flag = false;
+ bool posix_lck = false;
+ struct cifs_sb_info *cifs_sb;
+ struct cifs_tcon *tcon;
+ struct cifsFileInfo *cfile;
+ __u32 type;
+
+ rc = -EACCES;
+ xid = get_xid();
+
+ if (!(fl->fl_flags & FL_FLOCK))
+ return -ENOLCK;
+
+ cfile = (struct cifsFileInfo *)file->private_data;
+ tcon = tlink_tcon(cfile->tlink);
+
+ cifs_read_flock(fl, &type, &lock, &unlock, &wait_flag,
+ tcon->ses->server);
+ cifs_sb = CIFS_FILE_SB(file);
+
+ if (cap_unix(tcon->ses) &&
+ (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
+ ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
+ posix_lck = true;
+
+ if (!lock && !unlock) {
+ /*
+ * if no lock or unlock then nothing to do since we do not
+ * know what it is
+ */
+ free_xid(xid);
+ return -EOPNOTSUPP;
+ }
+
+ rc = cifs_setlk(file, fl, type, wait_flag, posix_lck, lock, unlock,
+ xid);
+ free_xid(xid);
+ return rc;
+
+
+}
+
int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
{
int rc, xid;
@@ -2757,9 +2834,17 @@ cifs_resend_wdata(struct cifs_writedata *wdata, struct list_head *wdata_list,
if (!rc) {
if (wdata->cfile->invalidHandle)
rc = -EAGAIN;
- else
+ else {
+#ifdef CONFIG_CIFS_SMB_DIRECT
+ if (wdata->mr) {
+ wdata->mr->need_invalidate = true;
+ smbd_deregister_mr(wdata->mr);
+ wdata->mr = NULL;
+ }
+#endif
rc = server->ops->async_writev(wdata,
cifs_uncached_writedata_release);
+ }
}
/* If the write was successfully sent, we are done */
@@ -3482,8 +3567,16 @@ static int cifs_resend_rdata(struct cifs_readdata *rdata,
if (!rc) {
if (rdata->cfile->invalidHandle)
rc = -EAGAIN;
- else
+ else {
+#ifdef CONFIG_CIFS_SMB_DIRECT
+ if (rdata->mr) {
+ rdata->mr->need_invalidate = true;
+ smbd_deregister_mr(rdata->mr);
+ rdata->mr = NULL;
+ }
+#endif
rc = server->ops->async_readv(rdata);
+ }
}
/* If the read was successfully sent, we are done */
@@ -4637,12 +4730,13 @@ void cifs_oplock_break(struct work_struct *work)
struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
struct TCP_Server_Info *server = tcon->ses->server;
int rc = 0;
+ bool purge_cache = false;
wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS,
TASK_UNINTERRUPTIBLE);
- server->ops->downgrade_oplock(server, cinode,
- test_bit(CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, &cinode->flags));
+ server->ops->downgrade_oplock(server, cinode, cfile->oplock_level,
+ cfile->oplock_epoch, &purge_cache);
if (!CIFS_CACHE_WRITE(cinode) && CIFS_CACHE_READ(cinode) &&
cifs_has_mand_locks(cinode)) {
@@ -4657,18 +4751,21 @@ void cifs_oplock_break(struct work_struct *work)
else
break_lease(inode, O_WRONLY);
rc = filemap_fdatawrite(inode->i_mapping);
- if (!CIFS_CACHE_READ(cinode)) {
+ if (!CIFS_CACHE_READ(cinode) || purge_cache) {
rc = filemap_fdatawait(inode->i_mapping);
mapping_set_error(inode->i_mapping, rc);
cifs_zap_mapping(inode);
}
cifs_dbg(FYI, "Oplock flush inode %p rc %d\n", inode, rc);
+ if (CIFS_CACHE_WRITE(cinode))
+ goto oplock_break_ack;
}
rc = cifs_push_locks(cfile);
if (rc)
cifs_dbg(VFS, "Push locks rc = %d\n", rc);
+oplock_break_ack:
/*
* releasing stale oplock after recent reconnect of smb session using
* a now incorrect file handle is not a data integrity issue but do
@@ -4680,7 +4777,7 @@ void cifs_oplock_break(struct work_struct *work)
cinode);
cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
}
- _cifsFileInfo_put(cfile, false /* do not wait for ourself */);
+ _cifsFileInfo_put(cfile, false /* do not wait for ourself */, false);
cifs_done_oplock_break(cinode);
}
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index df9377828e2f..8a76195e8a69 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -727,22 +727,138 @@ static __u64 simple_hashstr(const char *str)
return hash;
}
+/**
+ * cifs_backup_query_path_info - SMB1 fallback code to get ino
+ *
+ * Fallback code to get file metadata when we don't have access to
+ * @full_path (EACCESS) and have backup creds.
+ *
+ * @data will be set to search info result buffer
+ * @resp_buf will be set to cifs resp buf and needs to be freed with
+ * cifs_buf_release() when done with @data.
+ */
+static int
+cifs_backup_query_path_info(int xid,
+ struct cifs_tcon *tcon,
+ struct super_block *sb,
+ const char *full_path,
+ void **resp_buf,
+ FILE_ALL_INFO **data)
+{
+ struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
+ struct cifs_search_info info = {0};
+ u16 flags;
+ int rc;
+
+ *resp_buf = NULL;
+ info.endOfSearch = false;
+ if (tcon->unix_ext)
+ info.info_level = SMB_FIND_FILE_UNIX;
+ else if ((tcon->ses->capabilities &
+ tcon->ses->server->vals->cap_nt_find) == 0)
+ info.info_level = SMB_FIND_FILE_INFO_STANDARD;
+ else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)
+ info.info_level = SMB_FIND_FILE_ID_FULL_DIR_INFO;
+ else /* no srvino useful for fallback to some netapp */
+ info.info_level = SMB_FIND_FILE_DIRECTORY_INFO;
+
+ flags = CIFS_SEARCH_CLOSE_ALWAYS |
+ CIFS_SEARCH_CLOSE_AT_END |
+ CIFS_SEARCH_BACKUP_SEARCH;
+
+ rc = CIFSFindFirst(xid, tcon, full_path,
+ cifs_sb, NULL, flags, &info, false);
+ if (rc)
+ return rc;
+
+ *resp_buf = (void *)info.ntwrk_buf_start;
+ *data = (FILE_ALL_INFO *)info.srch_entries_start;
+ return 0;
+}
+
+static void
+cifs_set_fattr_ino(int xid,
+ struct cifs_tcon *tcon,
+ struct super_block *sb,
+ struct inode **inode,
+ const char *full_path,
+ FILE_ALL_INFO *data,
+ struct cifs_fattr *fattr)
+{
+ struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
+ struct TCP_Server_Info *server = tcon->ses->server;
+ int rc;
+
+ if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)) {
+ if (*inode)
+ fattr->cf_uniqueid = CIFS_I(*inode)->uniqueid;
+ else
+ fattr->cf_uniqueid = iunique(sb, ROOT_I);
+ return;
+ }
+
+ /*
+ * If we have an inode pass a NULL tcon to ensure we don't
+ * make a round trip to the server. This only works for SMB2+.
+ */
+ rc = server->ops->get_srv_inum(xid,
+ *inode ? NULL : tcon,
+ cifs_sb, full_path,
+ &fattr->cf_uniqueid,
+ data);
+ if (rc) {
+ /*
+ * If that fails reuse existing ino or generate one
+ * and disable server ones
+ */
+ if (*inode)
+ fattr->cf_uniqueid = CIFS_I(*inode)->uniqueid;
+ else {
+ fattr->cf_uniqueid = iunique(sb, ROOT_I);
+ cifs_autodisable_serverino(cifs_sb);
+ }
+ return;
+ }
+
+ /* If no errors, check for zero root inode (invalid) */
+ if (fattr->cf_uniqueid == 0 && strlen(full_path) == 0) {
+ cifs_dbg(FYI, "Invalid (0) inodenum\n");
+ if (*inode) {
+ /* reuse */
+ fattr->cf_uniqueid = CIFS_I(*inode)->uniqueid;
+ } else {
+ /* make an ino by hashing the UNC */
+ fattr->cf_flags |= CIFS_FATTR_FAKE_ROOT_INO;
+ fattr->cf_uniqueid = simple_hashstr(tcon->treeName);
+ }
+ }
+}
+
+static inline bool is_inode_cache_good(struct inode *ino)
+{
+ return ino && CIFS_CACHE_READ(CIFS_I(ino)) && CIFS_I(ino)->time != 0;
+}
+
int
-cifs_get_inode_info(struct inode **inode, const char *full_path,
- FILE_ALL_INFO *data, struct super_block *sb, int xid,
+cifs_get_inode_info(struct inode **inode,
+ const char *full_path,
+ FILE_ALL_INFO *in_data,
+ struct super_block *sb, int xid,
const struct cifs_fid *fid)
{
- __u16 srchflgs;
- int rc = 0, tmprc = ENOSYS;
+
struct cifs_tcon *tcon;
struct TCP_Server_Info *server;
struct tcon_link *tlink;
struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
- char *buf = NULL;
bool adjust_tz = false;
- struct cifs_fattr fattr;
- struct cifs_search_info *srchinf = NULL;
+ struct cifs_fattr fattr = {0};
bool symlink = false;
+ FILE_ALL_INFO *data = in_data;
+ FILE_ALL_INFO *tmp_data = NULL;
+ void *smb1_backup_rsp_buf = NULL;
+ int rc = 0;
+ int tmprc = 0;
tlink = cifs_sb_tlink(cifs_sb);
if (IS_ERR(tlink))
@@ -750,142 +866,88 @@ cifs_get_inode_info(struct inode **inode, const char *full_path,
tcon = tlink_tcon(tlink);
server = tcon->ses->server;
- cifs_dbg(FYI, "Getting info on %s\n", full_path);
+ /*
+ * 1. Fetch file metadata if not provided (data)
+ */
- if ((data == NULL) && (*inode != NULL)) {
- if (CIFS_CACHE_READ(CIFS_I(*inode)) &&
- CIFS_I(*inode)->time != 0) {
+ if (!data) {
+ if (is_inode_cache_good(*inode)) {
cifs_dbg(FYI, "No need to revalidate cached inode sizes\n");
- goto cgii_exit;
- }
- }
-
- /* if inode info is not passed, get it from server */
- if (data == NULL) {
- if (!server->ops->query_path_info) {
- rc = -ENOSYS;
- goto cgii_exit;
+ goto out;
}
- buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
- if (buf == NULL) {
+ tmp_data = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
+ if (!tmp_data) {
rc = -ENOMEM;
- goto cgii_exit;
+ goto out;
}
- data = (FILE_ALL_INFO *)buf;
- rc = server->ops->query_path_info(xid, tcon, cifs_sb, full_path,
- data, &adjust_tz, &symlink);
+ rc = server->ops->query_path_info(xid, tcon, cifs_sb,
+ full_path, tmp_data,
+ &adjust_tz, &symlink);
+ data = tmp_data;
}
- if (!rc) {
- cifs_all_info_to_fattr(&fattr, data, sb, adjust_tz,
- symlink);
- } else if (rc == -EREMOTE) {
+ /*
+ * 2. Convert it to internal cifs metadata (fattr)
+ */
+
+ switch (rc) {
+ case 0:
+ cifs_all_info_to_fattr(&fattr, data, sb, adjust_tz, symlink);
+ break;
+ case -EREMOTE:
+ /* DFS link, no metadata available on this server */
cifs_create_dfs_fattr(&fattr, sb);
rc = 0;
- } else if ((rc == -EACCES) && backup_cred(cifs_sb) &&
- (strcmp(server->vals->version_string, SMB1_VERSION_STRING)
- == 0)) {
+ break;
+ case -EACCES:
/*
- * For SMB2 and later the backup intent flag is already
- * sent if needed on open and there is no path based
- * FindFirst operation to use to retry with
+ * perm errors, try again with backup flags if possible
+ *
+ * For SMB2 and later the backup intent flag
+ * is already sent if needed on open and there
+ * is no path based FindFirst operation to use
+ * to retry with
*/
+ if (backup_cred(cifs_sb) && is_smb1_server(server)) {
+ /* for easier reading */
+ FILE_DIRECTORY_INFO *fdi;
+ SEARCH_ID_FULL_DIR_INFO *si;
+
+ rc = cifs_backup_query_path_info(xid, tcon, sb,
+ full_path,
+ &smb1_backup_rsp_buf,
+ &data);
+ if (rc)
+ goto out;
- srchinf = kzalloc(sizeof(struct cifs_search_info),
- GFP_KERNEL);
- if (srchinf == NULL) {
- rc = -ENOMEM;
- goto cgii_exit;
- }
+ fdi = (FILE_DIRECTORY_INFO *)data;
+ si = (SEARCH_ID_FULL_DIR_INFO *)data;
- srchinf->endOfSearch = false;
- if (tcon->unix_ext)
- srchinf->info_level = SMB_FIND_FILE_UNIX;
- else if ((tcon->ses->capabilities &
- tcon->ses->server->vals->cap_nt_find) == 0)
- srchinf->info_level = SMB_FIND_FILE_INFO_STANDARD;
- else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)
- srchinf->info_level = SMB_FIND_FILE_ID_FULL_DIR_INFO;
- else /* no srvino useful for fallback to some netapp */
- srchinf->info_level = SMB_FIND_FILE_DIRECTORY_INFO;
-
- srchflgs = CIFS_SEARCH_CLOSE_ALWAYS |
- CIFS_SEARCH_CLOSE_AT_END |
- CIFS_SEARCH_BACKUP_SEARCH;
-
- rc = CIFSFindFirst(xid, tcon, full_path,
- cifs_sb, NULL, srchflgs, srchinf, false);
- if (!rc) {
- data = (FILE_ALL_INFO *)srchinf->srch_entries_start;
+ cifs_dir_info_to_fattr(&fattr, fdi, cifs_sb);
+ fattr.cf_uniqueid = le64_to_cpu(si->UniqueId);
+ /* uniqueid set, skip get inum step */
+ goto handle_mnt_opt;
+ } else {
+ /* nothing we can do, bail out */
+ goto out;
+ }
+ break;
+ default:
+ cifs_dbg(FYI, "%s: unhandled err rc %d\n", __func__, rc);
+ goto out;
+ }
- cifs_dir_info_to_fattr(&fattr,
- (FILE_DIRECTORY_INFO *)data, cifs_sb);
- fattr.cf_uniqueid = le64_to_cpu(
- ((SEARCH_ID_FULL_DIR_INFO *)data)->UniqueId);
+ /*
+ * 3. Get or update inode number (fattr.cf_uniqueid)
+ */
- cifs_buf_release(srchinf->ntwrk_buf_start);
- }
- kfree(srchinf);
- if (rc)
- goto cgii_exit;
- } else
- goto cgii_exit;
+ cifs_set_fattr_ino(xid, tcon, sb, inode, full_path, data, &fattr);
/*
- * If an inode wasn't passed in, then get the inode number
- *
- * Is an i_ino of zero legal? Can we use that to check if the server
- * supports returning inode numbers? Are there other sanity checks we
- * can use to ensure that the server is really filling in that field?
+ * 4. Tweak fattr based on mount options
*/
- if (*inode == NULL) {
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
- if (server->ops->get_srv_inum)
- tmprc = server->ops->get_srv_inum(xid,
- tcon, cifs_sb, full_path,
- &fattr.cf_uniqueid, data);
- if (tmprc) {
- cifs_dbg(FYI, "GetSrvInodeNum rc %d\n",
- tmprc);
- fattr.cf_uniqueid = iunique(sb, ROOT_I);
- cifs_autodisable_serverino(cifs_sb);
- } else if ((fattr.cf_uniqueid == 0) &&
- strlen(full_path) == 0) {
- /* some servers ret bad root ino ie 0 */
- cifs_dbg(FYI, "Invalid (0) inodenum\n");
- fattr.cf_flags |=
- CIFS_FATTR_FAKE_ROOT_INO;
- fattr.cf_uniqueid =
- simple_hashstr(tcon->treeName);
- }
- } else
- fattr.cf_uniqueid = iunique(sb, ROOT_I);
- } else {
- if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)
- && server->ops->get_srv_inum) {
- /*
- * Pass a NULL tcon to ensure we don't make a round
- * trip to the server. This only works for SMB2+.
- */
- tmprc = server->ops->get_srv_inum(xid,
- NULL, cifs_sb, full_path,
- &fattr.cf_uniqueid, data);
- if (tmprc)
- fattr.cf_uniqueid = CIFS_I(*inode)->uniqueid;
- else if ((fattr.cf_uniqueid == 0) &&
- strlen(full_path) == 0) {
- /*
- * Reuse existing root inode num since
- * inum zero for root causes ls of . and .. to
- * not be returned
- */
- cifs_dbg(FYI, "Srv ret 0 inode num for root\n");
- fattr.cf_uniqueid = CIFS_I(*inode)->uniqueid;
- }
- } else
- fattr.cf_uniqueid = CIFS_I(*inode)->uniqueid;
- }
+handle_mnt_opt:
/* query for SFU type info if supported and needed */
if (fattr.cf_cifsattrs & ATTR_SYSTEM &&
cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL) {
@@ -900,8 +962,8 @@ cifs_get_inode_info(struct inode **inode, const char *full_path,
full_path, fid);
if (rc) {
cifs_dbg(FYI, "%s: Get mode from SID failed. rc=%d\n",
- __func__, rc);
- goto cgii_exit;
+ __func__, rc);
+ goto out;
}
} else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) {
rc = cifs_acl_to_fattr(cifs_sb, &fattr, *inode, false,
@@ -909,7 +971,7 @@ cifs_get_inode_info(struct inode **inode, const char *full_path,
if (rc) {
cifs_dbg(FYI, "%s: Getting ACL failed with error: %d\n",
__func__, rc);
- goto cgii_exit;
+ goto out;
}
}
@@ -925,6 +987,10 @@ cifs_get_inode_info(struct inode **inode, const char *full_path,
cifs_dbg(FYI, "check_mf_symlink: %d\n", tmprc);
}
+ /*
+ * 5. Update inode with final fattr data
+ */
+
if (!*inode) {
*inode = cifs_iget(sb, &fattr);
if (!*inode)
@@ -937,7 +1003,7 @@ cifs_get_inode_info(struct inode **inode, const char *full_path,
CIFS_I(*inode)->uniqueid != fattr.cf_uniqueid)) {
CIFS_I(*inode)->time = 0; /* force reval */
rc = -ESTALE;
- goto cgii_exit;
+ goto out;
}
/* if filetype is different, return error */
@@ -945,18 +1011,15 @@ cifs_get_inode_info(struct inode **inode, const char *full_path,
(fattr.cf_mode & S_IFMT))) {
CIFS_I(*inode)->time = 0; /* force reval */
rc = -ESTALE;
- goto cgii_exit;
+ goto out;
}
cifs_fattr_to_inode(*inode, &fattr);
}
-
-cgii_exit:
- if ((*inode) && ((*inode)->i_ino == 0))
- cifs_dbg(FYI, "inode number of zero returned\n");
-
- kfree(buf);
+out:
+ cifs_buf_release(smb1_backup_rsp_buf);
cifs_put_tlink(tlink);
+ kfree(tmp_data);
return rc;
}
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index 5ad83bdb9bea..40ca394fd5de 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -488,21 +488,10 @@ is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv)
set_bit(CIFS_INODE_PENDING_OPLOCK_BREAK,
&pCifsInode->flags);
- /*
- * Set flag if the server downgrades the oplock
- * to L2 else clear.
- */
- if (pSMB->OplockLevel)
- set_bit(
- CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
- &pCifsInode->flags);
- else
- clear_bit(
- CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
- &pCifsInode->flags);
-
- cifs_queue_oplock_break(netfile);
+ netfile->oplock_epoch = 0;
+ netfile->oplock_level = pSMB->OplockLevel;
netfile->oplock_break_cancelled = false;
+ cifs_queue_oplock_break(netfile);
spin_unlock(&tcon->open_file_lock);
spin_unlock(&cifs_tcp_ses_lock);
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
index 85bd644f9773..fb3bdc44775c 100644
--- a/fs/cifs/sess.c
+++ b/fs/cifs/sess.c
@@ -31,6 +31,231 @@
#include <linux/utsname.h>
#include <linux/slab.h>
#include "cifs_spnego.h"
+#include "smb2proto.h"
+
+bool
+is_server_using_iface(struct TCP_Server_Info *server,
+ struct cifs_server_iface *iface)
+{
+ struct sockaddr_in *i4 = (struct sockaddr_in *)&iface->sockaddr;
+ struct sockaddr_in6 *i6 = (struct sockaddr_in6 *)&iface->sockaddr;
+ struct sockaddr_in *s4 = (struct sockaddr_in *)&server->dstaddr;
+ struct sockaddr_in6 *s6 = (struct sockaddr_in6 *)&server->dstaddr;
+
+ if (server->dstaddr.ss_family != iface->sockaddr.ss_family)
+ return false;
+ if (server->dstaddr.ss_family == AF_INET) {
+ if (s4->sin_addr.s_addr != i4->sin_addr.s_addr)
+ return false;
+ } else if (server->dstaddr.ss_family == AF_INET6) {
+ if (memcmp(&s6->sin6_addr, &i6->sin6_addr,
+ sizeof(i6->sin6_addr)) != 0)
+ return false;
+ } else {
+ /* unknown family.. */
+ return false;
+ }
+ return true;
+}
+
+bool is_ses_using_iface(struct cifs_ses *ses, struct cifs_server_iface *iface)
+{
+ int i;
+
+ for (i = 0; i < ses->chan_count; i++) {
+ if (is_server_using_iface(ses->chans[i].server, iface))
+ return true;
+ }
+ return false;
+}
+
+/* returns number of channels added */
+int cifs_try_adding_channels(struct cifs_ses *ses)
+{
+ int old_chan_count = ses->chan_count;
+ int left = ses->chan_max - ses->chan_count;
+ int i = 0;
+ int rc = 0;
+ int tries = 0;
+
+ if (left <= 0) {
+ cifs_dbg(FYI,
+ "ses already at max_channels (%zu), nothing to open\n",
+ ses->chan_max);
+ return 0;
+ }
+
+ if (ses->server->dialect < SMB30_PROT_ID) {
+ cifs_dbg(VFS, "multichannel is not supported on this protocol version, use 3.0 or above\n");
+ return 0;
+ }
+
+ /*
+ * Keep connecting to same, fastest, iface for all channels as
+ * long as its RSS. Try next fastest one if not RSS or channel
+ * creation fails.
+ */
+ while (left > 0) {
+ struct cifs_server_iface *iface;
+
+ tries++;
+ if (tries > 3*ses->chan_max) {
+ cifs_dbg(FYI, "too many attempt at opening channels (%d channels left to open)\n",
+ left);
+ break;
+ }
+
+ iface = &ses->iface_list[i];
+ if (is_ses_using_iface(ses, iface) && !iface->rss_capable) {
+ i = (i+1) % ses->iface_count;
+ continue;
+ }
+
+ rc = cifs_ses_add_channel(ses, iface);
+ if (rc) {
+ cifs_dbg(FYI, "failed to open extra channel on iface#%d rc=%d\n",
+ i, rc);
+ i = (i+1) % ses->iface_count;
+ continue;
+ }
+
+ cifs_dbg(FYI, "successfully opened new channel on iface#%d\n",
+ i);
+ left--;
+ }
+
+ return ses->chan_count - old_chan_count;
+}
+
+int
+cifs_ses_add_channel(struct cifs_ses *ses, struct cifs_server_iface *iface)
+{
+ struct cifs_chan *chan;
+ struct smb_vol vol = {NULL};
+ static const char unc_fmt[] = "\\%s\\foo";
+ char unc[sizeof(unc_fmt)+SERVER_NAME_LEN_WITH_NULL] = {0};
+ struct sockaddr_in *ipv4 = (struct sockaddr_in *)&iface->sockaddr;
+ struct sockaddr_in6 *ipv6 = (struct sockaddr_in6 *)&iface->sockaddr;
+ int rc;
+ unsigned int xid = get_xid();
+
+ cifs_dbg(FYI, "adding channel to ses %p (speed:%zu bps rdma:%s ",
+ ses, iface->speed, iface->rdma_capable ? "yes" : "no");
+ if (iface->sockaddr.ss_family == AF_INET)
+ cifs_dbg(FYI, "ip:%pI4)\n", &ipv4->sin_addr);
+ else
+ cifs_dbg(FYI, "ip:%pI6)\n", &ipv6->sin6_addr);
+
+ /*
+ * Setup a smb_vol with mostly the same info as the existing
+ * session and overwrite it with the requested iface data.
+ *
+ * We need to setup at least the fields used for negprot and
+ * sesssetup.
+ *
+ * We only need the volume here, so we can reuse memory from
+ * the session and server without caring about memory
+ * management.
+ */
+
+ /* Always make new connection for now (TODO?) */
+ vol.nosharesock = true;
+
+ /* Auth */
+ vol.domainauto = ses->domainAuto;
+ vol.domainname = ses->domainName;
+ vol.username = ses->user_name;
+ vol.password = ses->password;
+ vol.sectype = ses->sectype;
+ vol.sign = ses->sign;
+
+ /* UNC and paths */
+ /* XXX: Use ses->server->hostname? */
+ sprintf(unc, unc_fmt, ses->serverName);
+ vol.UNC = unc;
+ vol.prepath = "";
+
+ /* Re-use same version as master connection */
+ vol.vals = ses->server->vals;
+ vol.ops = ses->server->ops;
+
+ vol.noblocksnd = ses->server->noblocksnd;
+ vol.noautotune = ses->server->noautotune;
+ vol.sockopt_tcp_nodelay = ses->server->tcp_nodelay;
+ vol.echo_interval = ses->server->echo_interval / HZ;
+
+ /*
+ * This will be used for encoding/decoding user/domain/pw
+ * during sess setup auth.
+ *
+ * XXX: We use the default for simplicity but the proper way
+ * would be to use the one that ses used, which is not
+ * stored. This might break when dealing with non-ascii
+ * strings.
+ */
+ vol.local_nls = load_nls_default();
+
+ /* Use RDMA if possible */
+ vol.rdma = iface->rdma_capable;
+ memcpy(&vol.dstaddr, &iface->sockaddr, sizeof(struct sockaddr_storage));
+
+ /* reuse master con client guid */
+ memcpy(&vol.client_guid, ses->server->client_guid,
+ SMB2_CLIENT_GUID_SIZE);
+ vol.use_client_guid = true;
+
+ mutex_lock(&ses->session_mutex);
+
+ chan = &ses->chans[ses->chan_count];
+ chan->server = cifs_get_tcp_session(&vol);
+ if (IS_ERR(chan->server)) {
+ rc = PTR_ERR(chan->server);
+ chan->server = NULL;
+ goto out;
+ }
+
+ /*
+ * We need to allocate the server crypto now as we will need
+ * to sign packets before we generate the channel signing key
+ * (we sign with the session key)
+ */
+ rc = smb311_crypto_shash_allocate(chan->server);
+ if (rc) {
+ cifs_dbg(VFS, "%s: crypto alloc failed\n", __func__);
+ goto out;
+ }
+
+ ses->binding = true;
+ rc = cifs_negotiate_protocol(xid, ses);
+ if (rc)
+ goto out;
+
+ rc = cifs_setup_session(xid, ses, vol.local_nls);
+ if (rc)
+ goto out;
+
+ /* success, put it on the list
+ * XXX: sharing ses between 2 tcp server is not possible, the
+ * way "internal" linked lists works in linux makes element
+ * only able to belong to one list
+ *
+ * the binding session is already established so the rest of
+ * the code should be able to look it up, no need to add the
+ * ses to the new server.
+ */
+
+ ses->chan_count++;
+ atomic_set(&ses->chan_seq, 0);
+out:
+ ses->binding = false;
+ mutex_unlock(&ses->session_mutex);
+
+ if (rc && chan->server)
+ cifs_put_tcp_session(chan->server, 0);
+ unload_nls(vol.local_nls);
+
+ return rc;
+}
static __u32 cifs_ssetup_hdr(struct cifs_ses *ses, SESSION_SETUP_ANDX *pSMB)
{
@@ -342,6 +567,7 @@ int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len,
void build_ntlmssp_negotiate_blob(unsigned char *pbuffer,
struct cifs_ses *ses)
{
+ struct TCP_Server_Info *server = cifs_ses_server(ses);
NEGOTIATE_MESSAGE *sec_blob = (NEGOTIATE_MESSAGE *)pbuffer;
__u32 flags;
@@ -354,9 +580,9 @@ void build_ntlmssp_negotiate_blob(unsigned char *pbuffer,
NTLMSSP_NEGOTIATE_128 | NTLMSSP_NEGOTIATE_UNICODE |
NTLMSSP_NEGOTIATE_NTLM | NTLMSSP_NEGOTIATE_EXTENDED_SEC |
NTLMSSP_NEGOTIATE_SEAL;
- if (ses->server->sign)
+ if (server->sign)
flags |= NTLMSSP_NEGOTIATE_SIGN;
- if (!ses->server->session_estab || ses->ntlmssp->sesskey_per_smbsess)
+ if (!server->session_estab || ses->ntlmssp->sesskey_per_smbsess)
flags |= NTLMSSP_NEGOTIATE_KEY_XCH;
sec_blob->NegotiateFlags = cpu_to_le32(flags);
diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
index 514810694c0f..d70a2bb062df 100644
--- a/fs/cifs/smb1ops.c
+++ b/fs/cifs/smb1ops.c
@@ -369,12 +369,10 @@ coalesce_t2(char *second_buf, struct smb_hdr *target_hdr)
static void
cifs_downgrade_oplock(struct TCP_Server_Info *server,
- struct cifsInodeInfo *cinode, bool set_level2)
+ struct cifsInodeInfo *cinode, __u32 oplock,
+ unsigned int epoch, bool *purge_cache)
{
- if (set_level2)
- cifs_set_oplock_level(cinode, OPLOCK_READ);
- else
- cifs_set_oplock_level(cinode, 0);
+ cifs_set_oplock_level(cinode, oplock);
}
static bool
diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c
index e311f58dc1c8..0516fc482d43 100644
--- a/fs/cifs/smb2misc.c
+++ b/fs/cifs/smb2misc.c
@@ -29,6 +29,7 @@
#include "cifs_unicode.h"
#include "smb2status.h"
#include "smb2glob.h"
+#include "nterr.h"
static int
check_smb2_hdr(struct smb2_sync_hdr *shdr, __u64 mid)
@@ -249,16 +250,10 @@ smb2_check_message(char *buf, unsigned int len, struct TCP_Server_Info *srvr)
* of junk. Other servers match RFC1001 len to actual
* SMB2/SMB3 frame length (header + smb2 response specific data)
* Some windows servers also pad up to 8 bytes when compounding.
- * If pad is longer than eight bytes, log the server behavior
- * (once), since may indicate a problem but allow it and continue
- * since the frame is parseable.
*/
- if (clc_len < len) {
- pr_warn_once(
- "srv rsp padded more than expected. Length %d not %d for cmd:%d mid:%llu\n",
- len, clc_len, command, mid);
+ if (clc_len < len)
return 0;
- }
+
pr_warn_once(
"srv rsp too short, len %d not %d. cmd:%d mid:%llu\n",
len, clc_len, command, mid);
@@ -534,7 +529,7 @@ smb2_tcon_has_lease(struct cifs_tcon *tcon, struct smb2_lease_break *rsp,
cifs_dbg(FYI, "found in the open list\n");
cifs_dbg(FYI, "lease key match, lease break 0x%x\n",
- le32_to_cpu(rsp->NewLeaseState));
+ lease_state);
if (ack_req)
cfile->oplock_break_cancelled = false;
@@ -543,17 +538,8 @@ smb2_tcon_has_lease(struct cifs_tcon *tcon, struct smb2_lease_break *rsp,
set_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &cinode->flags);
- /*
- * Set or clear flags depending on the lease state being READ.
- * HANDLE caching flag should be added when the client starts
- * to defer closing remote file handles with HANDLE leases.
- */
- if (lease_state & SMB2_LEASE_READ_CACHING_HE)
- set_bit(CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
- &cinode->flags);
- else
- clear_bit(CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
- &cinode->flags);
+ cfile->oplock_epoch = le16_to_cpu(rsp->Epoch);
+ cfile->oplock_level = lease_state;
cifs_queue_oplock_break(cfile);
kfree(lw);
@@ -576,7 +562,7 @@ smb2_tcon_has_lease(struct cifs_tcon *tcon, struct smb2_lease_break *rsp,
cifs_dbg(FYI, "found in the pending open list\n");
cifs_dbg(FYI, "lease key match, lease break 0x%x\n",
- le32_to_cpu(rsp->NewLeaseState));
+ lease_state);
open->oplock = lease_state;
}
@@ -673,10 +659,10 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server)
spin_lock(&cifs_tcp_ses_lock);
list_for_each(tmp, &server->smb_ses_list) {
ses = list_entry(tmp, struct cifs_ses, smb_ses_list);
+
list_for_each(tmp1, &ses->tcon_list) {
tcon = list_entry(tmp1, struct cifs_tcon, tcon_list);
- cifs_stats_inc(&tcon->stats.cifs_stats.num_oplock_brks);
spin_lock(&tcon->open_file_lock);
list_for_each(tmp2, &tcon->openFileList) {
cfile = list_entry(tmp2, struct cifsFileInfo,
@@ -688,6 +674,8 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server)
continue;
cifs_dbg(FYI, "file id match, oplock break\n");
+ cifs_stats_inc(
+ &tcon->stats.cifs_stats.num_oplock_brks);
cinode = CIFS_I(d_inode(cfile->dentry));
spin_lock(&cfile->file_info_lock);
if (!CIFS_CACHE_WRITE(cinode) &&
@@ -699,18 +687,9 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server)
set_bit(CIFS_INODE_PENDING_OPLOCK_BREAK,
&cinode->flags);
- /*
- * Set flag if the server downgrades the oplock
- * to L2 else clear.
- */
- if (rsp->OplockLevel)
- set_bit(
- CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
- &cinode->flags);
- else
- clear_bit(
- CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
- &cinode->flags);
+ cfile->oplock_epoch = 0;
+ cfile->oplock_level = rsp->OplockLevel;
+
spin_unlock(&cfile->file_info_lock);
cifs_queue_oplock_break(cfile);
@@ -720,9 +699,6 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server)
return true;
}
spin_unlock(&tcon->open_file_lock);
- spin_unlock(&cifs_tcp_ses_lock);
- cifs_dbg(FYI, "No matching file for oplock break\n");
- return true;
}
}
spin_unlock(&cifs_tcp_ses_lock);
@@ -735,45 +711,98 @@ smb2_cancelled_close_fid(struct work_struct *work)
{
struct close_cancelled_open *cancelled = container_of(work,
struct close_cancelled_open, work);
+ struct cifs_tcon *tcon = cancelled->tcon;
+ int rc;
- cifs_dbg(VFS, "Close unmatched open\n");
+ if (cancelled->mid)
+ cifs_tcon_dbg(VFS, "Close unmatched open for MID:%llx\n",
+ cancelled->mid);
+ else
+ cifs_tcon_dbg(VFS, "Close interrupted close\n");
- SMB2_close(0, cancelled->tcon, cancelled->fid.persistent_fid,
- cancelled->fid.volatile_fid);
- cifs_put_tcon(cancelled->tcon);
+ rc = SMB2_close(0, tcon, cancelled->fid.persistent_fid,
+ cancelled->fid.volatile_fid);
+ if (rc)
+ cifs_tcon_dbg(VFS, "Close cancelled mid failed rc:%d\n", rc);
+
+ cifs_put_tcon(tcon);
kfree(cancelled);
}
+/*
+ * Caller should already has an extra reference to @tcon
+ * This function is used to queue work to close a handle to prevent leaks
+ * on the server.
+ * We handle two cases. If an open was interrupted after we sent the
+ * SMB2_CREATE to the server but before we processed the reply, and second
+ * if a close was interrupted before we sent the SMB2_CLOSE to the server.
+ */
+static int
+__smb2_handle_cancelled_cmd(struct cifs_tcon *tcon, __u16 cmd, __u64 mid,
+ __u64 persistent_fid, __u64 volatile_fid)
+{
+ struct close_cancelled_open *cancelled;
+
+ cancelled = kzalloc(sizeof(*cancelled), GFP_KERNEL);
+ if (!cancelled)
+ return -ENOMEM;
+
+ cancelled->fid.persistent_fid = persistent_fid;
+ cancelled->fid.volatile_fid = volatile_fid;
+ cancelled->tcon = tcon;
+ cancelled->cmd = cmd;
+ cancelled->mid = mid;
+ INIT_WORK(&cancelled->work, smb2_cancelled_close_fid);
+ WARN_ON(queue_work(cifsiod_wq, &cancelled->work) == false);
+
+ return 0;
+}
+
+int
+smb2_handle_cancelled_close(struct cifs_tcon *tcon, __u64 persistent_fid,
+ __u64 volatile_fid)
+{
+ int rc;
+
+ cifs_dbg(FYI, "%s: tc_count=%d\n", __func__, tcon->tc_count);
+ spin_lock(&cifs_tcp_ses_lock);
+ tcon->tc_count++;
+ spin_unlock(&cifs_tcp_ses_lock);
+
+ rc = __smb2_handle_cancelled_cmd(tcon, SMB2_CLOSE_HE, 0,
+ persistent_fid, volatile_fid);
+ if (rc)
+ cifs_put_tcon(tcon);
+
+ return rc;
+}
+
int
smb2_handle_cancelled_mid(char *buffer, struct TCP_Server_Info *server)
{
struct smb2_sync_hdr *sync_hdr = (struct smb2_sync_hdr *)buffer;
struct smb2_create_rsp *rsp = (struct smb2_create_rsp *)buffer;
struct cifs_tcon *tcon;
- struct close_cancelled_open *cancelled;
+ int rc;
if (sync_hdr->Command != SMB2_CREATE ||
sync_hdr->Status != STATUS_SUCCESS)
return 0;
- cancelled = kzalloc(sizeof(*cancelled), GFP_KERNEL);
- if (!cancelled)
- return -ENOMEM;
-
tcon = smb2_find_smb_tcon(server, sync_hdr->SessionId,
sync_hdr->TreeId);
- if (!tcon) {
- kfree(cancelled);
+ if (!tcon)
return -ENOENT;
- }
- cancelled->fid.persistent_fid = rsp->PersistentFileId;
- cancelled->fid.volatile_fid = rsp->VolatileFileId;
- cancelled->tcon = tcon;
- INIT_WORK(&cancelled->work, smb2_cancelled_close_fid);
- queue_work(cifsiod_wq, &cancelled->work);
+ rc = __smb2_handle_cancelled_cmd(tcon,
+ le16_to_cpu(sync_hdr->Command),
+ le64_to_cpu(sync_hdr->MessageId),
+ rsp->PersistentFileId,
+ rsp->VolatileFileId);
+ if (rc)
+ cifs_put_tcon(tcon);
- return 0;
+ return rc;
}
/**
@@ -788,23 +817,37 @@ smb311_update_preauth_hash(struct cifs_ses *ses, struct kvec *iov, int nvec)
int i, rc;
struct sdesc *d;
struct smb2_sync_hdr *hdr;
+ struct TCP_Server_Info *server = cifs_ses_server(ses);
- if (ses->server->tcpStatus == CifsGood) {
- /* skip non smb311 connections */
- if (ses->server->dialect != SMB311_PROT_ID)
- return 0;
+ hdr = (struct smb2_sync_hdr *)iov[0].iov_base;
+ /* neg prot are always taken */
+ if (hdr->Command == SMB2_NEGOTIATE)
+ goto ok;
- /* skip last sess setup response */
- hdr = (struct smb2_sync_hdr *)iov[0].iov_base;
- if (hdr->Flags & SMB2_FLAGS_SIGNED)
- return 0;
- }
+ /*
+ * If we process a command which wasn't a negprot it means the
+ * neg prot was already done, so the server dialect was set
+ * and we can test it. Preauth requires 3.1.1 for now.
+ */
+ if (server->dialect != SMB311_PROT_ID)
+ return 0;
+
+ if (hdr->Command != SMB2_SESSION_SETUP)
+ return 0;
+
+ /* skip last sess setup response */
+ if ((hdr->Flags & SMB2_FLAGS_SERVER_TO_REDIR)
+ && (hdr->Status == NT_STATUS_OK
+ || (hdr->Status !=
+ cpu_to_le32(NT_STATUS_MORE_PROCESSING_REQUIRED))))
+ return 0;
- rc = smb311_crypto_shash_allocate(ses->server);
+ok:
+ rc = smb311_crypto_shash_allocate(server);
if (rc)
return rc;
- d = ses->server->secmech.sdescsha512;
+ d = server->secmech.sdescsha512;
rc = crypto_shash_init(&d->shash);
if (rc) {
cifs_dbg(VFS, "%s: could not init sha512 shash\n", __func__);
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index cd55af9b7cc5..a7f328f79c6f 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -10,6 +10,7 @@
#include <linux/falloc.h>
#include <linux/scatterlist.h>
#include <linux/uuid.h>
+#include <linux/sort.h>
#include <crypto/aead.h>
#include "cifsglob.h"
#include "smb2pdu.h"
@@ -151,13 +152,7 @@ smb2_get_credits_field(struct TCP_Server_Info *server, const int optype)
static unsigned int
smb2_get_credits(struct mid_q_entry *mid)
{
- struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)mid->resp_buf;
-
- if (mid->mid_state == MID_RESPONSE_RECEIVED
- || mid->mid_state == MID_RESPONSE_MALFORMED)
- return le16_to_cpu(shdr->CreditRequest);
-
- return 0;
+ return mid->credits_received;
}
static int
@@ -315,7 +310,7 @@ smb2_negotiate(const unsigned int xid, struct cifs_ses *ses)
{
int rc;
- ses->server->CurrentMid = 0;
+ cifs_ses_server(ses)->CurrentMid = 0;
rc = SMB2_negotiate(xid, ses);
/* BB we probably don't need to retry with modern servers */
if (rc == -EAGAIN)
@@ -558,6 +553,13 @@ out:
return rc;
}
+static int compare_iface(const void *ia, const void *ib)
+{
+ const struct cifs_server_iface *a = (struct cifs_server_iface *)ia;
+ const struct cifs_server_iface *b = (struct cifs_server_iface *)ib;
+
+ return a->speed == b->speed ? 0 : (a->speed > b->speed ? -1 : 1);
+}
static int
SMB3_request_interfaces(const unsigned int xid, struct cifs_tcon *tcon)
@@ -587,6 +589,9 @@ SMB3_request_interfaces(const unsigned int xid, struct cifs_tcon *tcon)
if (rc)
goto out;
+ /* sort interfaces from fastest to slowest */
+ sort(iface_list, iface_count, sizeof(*iface_list), compare_iface, NULL);
+
spin_lock(&ses->iface_lock);
kfree(ses->iface_list);
ses->iface_list = iface_list;
@@ -1402,15 +1407,10 @@ smb2_ioctl_query_info(const unsigned int xid,
if (smb3_encryption_required(tcon))
flags |= CIFS_TRANSFORM_REQ;
- buffer = kmalloc(qi.output_buffer_length, GFP_KERNEL);
- if (buffer == NULL)
- return -ENOMEM;
-
- if (copy_from_user(buffer, arg + sizeof(struct smb_query_info),
- qi.output_buffer_length)) {
- rc = -EFAULT;
- goto iqinf_exit;
- }
+ buffer = memdup_user(arg + sizeof(struct smb_query_info),
+ qi.output_buffer_length);
+ if (IS_ERR(buffer))
+ return PTR_ERR(buffer);
/* Open */
memset(&open_iov, 0, sizeof(open_iov));
@@ -1529,35 +1529,32 @@ smb2_ioctl_query_info(const unsigned int xid,
if (le32_to_cpu(io_rsp->OutputCount) < qi.input_buffer_length)
qi.input_buffer_length = le32_to_cpu(io_rsp->OutputCount);
if (qi.input_buffer_length > 0 &&
- le32_to_cpu(io_rsp->OutputOffset) + qi.input_buffer_length > rsp_iov[1].iov_len) {
- rc = -EFAULT;
- goto iqinf_exit;
- }
- if (copy_to_user(&pqi->input_buffer_length, &qi.input_buffer_length,
- sizeof(qi.input_buffer_length))) {
- rc = -EFAULT;
- goto iqinf_exit;
- }
+ le32_to_cpu(io_rsp->OutputOffset) + qi.input_buffer_length
+ > rsp_iov[1].iov_len)
+ goto e_fault;
+
+ if (copy_to_user(&pqi->input_buffer_length,
+ &qi.input_buffer_length,
+ sizeof(qi.input_buffer_length)))
+ goto e_fault;
+
if (copy_to_user((void __user *)pqi + sizeof(struct smb_query_info),
(const void *)io_rsp + le32_to_cpu(io_rsp->OutputOffset),
- qi.input_buffer_length)) {
- rc = -EFAULT;
- goto iqinf_exit;
- }
+ qi.input_buffer_length))
+ goto e_fault;
} else {
pqi = (struct smb_query_info __user *)arg;
qi_rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base;
if (le32_to_cpu(qi_rsp->OutputBufferLength) < qi.input_buffer_length)
qi.input_buffer_length = le32_to_cpu(qi_rsp->OutputBufferLength);
- if (copy_to_user(&pqi->input_buffer_length, &qi.input_buffer_length,
- sizeof(qi.input_buffer_length))) {
- rc = -EFAULT;
- goto iqinf_exit;
- }
- if (copy_to_user(pqi + 1, qi_rsp->Buffer, qi.input_buffer_length)) {
- rc = -EFAULT;
- goto iqinf_exit;
- }
+ if (copy_to_user(&pqi->input_buffer_length,
+ &qi.input_buffer_length,
+ sizeof(qi.input_buffer_length)))
+ goto e_fault;
+
+ if (copy_to_user(pqi + 1, qi_rsp->Buffer,
+ qi.input_buffer_length))
+ goto e_fault;
}
iqinf_exit:
@@ -1573,6 +1570,10 @@ smb2_ioctl_query_info(const unsigned int xid,
free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
return rc;
+
+e_fault:
+ rc = -EFAULT;
+ goto iqinf_exit;
}
static ssize_t
@@ -3281,22 +3282,38 @@ static long smb3_fallocate(struct file *file, struct cifs_tcon *tcon, int mode,
static void
smb2_downgrade_oplock(struct TCP_Server_Info *server,
- struct cifsInodeInfo *cinode, bool set_level2)
+ struct cifsInodeInfo *cinode, __u32 oplock,
+ unsigned int epoch, bool *purge_cache)
{
- if (set_level2)
- server->ops->set_oplock_level(cinode, SMB2_OPLOCK_LEVEL_II,
- 0, NULL);
- else
- server->ops->set_oplock_level(cinode, 0, 0, NULL);
+ server->ops->set_oplock_level(cinode, oplock, 0, NULL);
}
static void
-smb21_downgrade_oplock(struct TCP_Server_Info *server,
- struct cifsInodeInfo *cinode, bool set_level2)
+smb21_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
+ unsigned int epoch, bool *purge_cache);
+
+static void
+smb3_downgrade_oplock(struct TCP_Server_Info *server,
+ struct cifsInodeInfo *cinode, __u32 oplock,
+ unsigned int epoch, bool *purge_cache)
{
- server->ops->set_oplock_level(cinode,
- set_level2 ? SMB2_LEASE_READ_CACHING_HE :
- 0, 0, NULL);
+ unsigned int old_state = cinode->oplock;
+ unsigned int old_epoch = cinode->epoch;
+ unsigned int new_state;
+
+ if (epoch > old_epoch) {
+ smb21_set_oplock_level(cinode, oplock, 0, NULL);
+ cinode->epoch = epoch;
+ }
+
+ new_state = cinode->oplock;
+ *purge_cache = false;
+
+ if ((old_state & CIFS_CACHE_READ_FLG) != 0 &&
+ (new_state & CIFS_CACHE_READ_FLG) == 0)
+ *purge_cache = true;
+ else if (old_state == new_state && (epoch - old_epoch > 1))
+ *purge_cache = true;
}
static void
@@ -3598,14 +3615,16 @@ smb2_get_enc_key(struct TCP_Server_Info *server, __u64 ses_id, int enc, u8 *key)
u8 *ses_enc_key;
spin_lock(&cifs_tcp_ses_lock);
- list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
- if (ses->Suid != ses_id)
- continue;
- ses_enc_key = enc ? ses->smb3encryptionkey :
- ses->smb3decryptionkey;
- memcpy(key, ses_enc_key, SMB3_SIGN_KEY_SIZE);
- spin_unlock(&cifs_tcp_ses_lock);
- return 0;
+ list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) {
+ list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
+ if (ses->Suid == ses_id) {
+ ses_enc_key = enc ? ses->smb3encryptionkey :
+ ses->smb3decryptionkey;
+ memcpy(key, ses_enc_key, SMB3_SIGN_KEY_SIZE);
+ spin_unlock(&cifs_tcp_ses_lock);
+ return 0;
+ }
+ }
}
spin_unlock(&cifs_tcp_ses_lock);
@@ -4556,7 +4575,7 @@ struct smb_version_operations smb21_operations = {
.print_stats = smb2_print_stats,
.is_oplock_break = smb2_is_valid_oplock_break,
.handle_cancelled_mid = smb2_handle_cancelled_mid,
- .downgrade_oplock = smb21_downgrade_oplock,
+ .downgrade_oplock = smb2_downgrade_oplock,
.need_neg = smb2_need_neg,
.negotiate = smb2_negotiate,
.negotiate_wsize = smb2_negotiate_wsize,
@@ -4656,7 +4675,7 @@ struct smb_version_operations smb30_operations = {
.dump_share_caps = smb2_dump_share_caps,
.is_oplock_break = smb2_is_valid_oplock_break,
.handle_cancelled_mid = smb2_handle_cancelled_mid,
- .downgrade_oplock = smb21_downgrade_oplock,
+ .downgrade_oplock = smb3_downgrade_oplock,
.need_neg = smb2_need_neg,
.negotiate = smb2_negotiate,
.negotiate_wsize = smb3_negotiate_wsize,
@@ -4764,7 +4783,7 @@ struct smb_version_operations smb311_operations = {
.dump_share_caps = smb2_dump_share_caps,
.is_oplock_break = smb2_is_valid_oplock_break,
.handle_cancelled_mid = smb2_handle_cancelled_mid,
- .downgrade_oplock = smb21_downgrade_oplock,
+ .downgrade_oplock = smb3_downgrade_oplock,
.need_neg = smb2_need_neg,
.negotiate = smb2_negotiate,
.negotiate_wsize = smb3_negotiate_wsize,
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index 05149862aea4..ed77f94dbf1d 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -252,7 +252,7 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon)
if (tcon == NULL)
return 0;
- if (smb2_command == SMB2_TREE_CONNECT || smb2_command == SMB2_IOCTL)
+ if (smb2_command == SMB2_TREE_CONNECT)
return 0;
if (tcon->tidStatus == CifsExiting) {
@@ -426,16 +426,9 @@ fill_small_buf(__le16 smb2_command, struct cifs_tcon *tcon, void *buf,
* SMB information in the SMB header. If the return code is zero, this
* function must have filled in request_buf pointer.
*/
-static int
-smb2_plain_req_init(__le16 smb2_command, struct cifs_tcon *tcon,
- void **request_buf, unsigned int *total_len)
+static int __smb2_plain_req_init(__le16 smb2_command, struct cifs_tcon *tcon,
+ void **request_buf, unsigned int *total_len)
{
- int rc;
-
- rc = smb2_reconnect(smb2_command, tcon);
- if (rc)
- return rc;
-
/* BB eventually switch this to SMB2 specific small buf size */
if (smb2_command == SMB2_SET_INFO)
*request_buf = cifs_buf_get();
@@ -456,7 +449,31 @@ smb2_plain_req_init(__le16 smb2_command, struct cifs_tcon *tcon,
cifs_stats_inc(&tcon->num_smbs_sent);
}
- return rc;
+ return 0;
+}
+
+static int smb2_plain_req_init(__le16 smb2_command, struct cifs_tcon *tcon,
+ void **request_buf, unsigned int *total_len)
+{
+ int rc;
+
+ rc = smb2_reconnect(smb2_command, tcon);
+ if (rc)
+ return rc;
+
+ return __smb2_plain_req_init(smb2_command, tcon, request_buf,
+ total_len);
+}
+
+static int smb2_ioctl_req_init(u32 opcode, struct cifs_tcon *tcon,
+ void **request_buf, unsigned int *total_len)
+{
+ /* Skip reconnect only for FSCTL_VALIDATE_NEGOTIATE_INFO IOCTLs */
+ if (opcode == FSCTL_VALIDATE_NEGOTIATE_INFO) {
+ return __smb2_plain_req_init(SMB2_IOCTL, tcon, request_buf,
+ total_len);
+ }
+ return smb2_plain_req_init(SMB2_IOCTL, tcon, request_buf, total_len);
}
/* For explanation of negotiate contexts see MS-SMB2 section 2.2.3.1 */
@@ -791,7 +808,7 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
struct kvec rsp_iov;
int rc = 0;
int resp_buftype;
- struct TCP_Server_Info *server = ses->server;
+ struct TCP_Server_Info *server = cifs_ses_server(ses);
int blob_offset, blob_length;
char *security_blob;
int flags = CIFS_NEG_OP;
@@ -813,7 +830,7 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
memset(server->preauth_sha_hash, 0, SMB2_PREAUTH_HASH_SIZE);
memset(ses->preauth_sha_hash, 0, SMB2_PREAUTH_HASH_SIZE);
- if (strcmp(ses->server->vals->version_string,
+ if (strcmp(server->vals->version_string,
SMB3ANY_VERSION_STRING) == 0) {
req->Dialects[0] = cpu_to_le16(SMB30_PROT_ID);
req->Dialects[1] = cpu_to_le16(SMB302_PROT_ID);
@@ -829,7 +846,7 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
total_len += 8;
} else {
/* otherwise send specific dialect */
- req->Dialects[0] = cpu_to_le16(ses->server->vals->protocol_id);
+ req->Dialects[0] = cpu_to_le16(server->vals->protocol_id);
req->DialectCount = cpu_to_le16(1);
total_len += 2;
}
@@ -1171,7 +1188,7 @@ SMB2_sess_alloc_buffer(struct SMB2_sess_data *sess_data)
int rc;
struct cifs_ses *ses = sess_data->ses;
struct smb2_sess_setup_req *req;
- struct TCP_Server_Info *server = ses->server;
+ struct TCP_Server_Info *server = cifs_ses_server(ses);
unsigned int total_len;
rc = smb2_plain_req_init(SMB2_SESSION_SETUP, NULL, (void **) &req,
@@ -1179,13 +1196,21 @@ SMB2_sess_alloc_buffer(struct SMB2_sess_data *sess_data)
if (rc)
return rc;
- /* First session, not a reauthenticate */
- req->sync_hdr.SessionId = 0;
-
- /* if reconnect, we need to send previous sess id, otherwise it is 0 */
- req->PreviousSessionId = sess_data->previous_session;
-
- req->Flags = 0; /* MBZ */
+ if (sess_data->ses->binding) {
+ req->sync_hdr.SessionId = sess_data->ses->Suid;
+ req->sync_hdr.Flags |= SMB2_FLAGS_SIGNED;
+ req->PreviousSessionId = 0;
+ req->Flags = SMB2_SESSION_REQ_FLAG_BINDING;
+ } else {
+ /* First session, not a reauthenticate */
+ req->sync_hdr.SessionId = 0;
+ /*
+ * if reconnect, we need to send previous sess id
+ * otherwise it is 0
+ */
+ req->PreviousSessionId = sess_data->previous_session;
+ req->Flags = 0; /* MBZ */
+ }
/* enough to enable echos and oplocks and one max size write */
req->sync_hdr.CreditRequest = cpu_to_le16(130);
@@ -1258,28 +1283,33 @@ SMB2_sess_establish_session(struct SMB2_sess_data *sess_data)
{
int rc = 0;
struct cifs_ses *ses = sess_data->ses;
+ struct TCP_Server_Info *server = cifs_ses_server(ses);
- mutex_lock(&ses->server->srv_mutex);
- if (ses->server->ops->generate_signingkey) {
- rc = ses->server->ops->generate_signingkey(ses);
+ mutex_lock(&server->srv_mutex);
+ if (server->ops->generate_signingkey) {
+ rc = server->ops->generate_signingkey(ses);
if (rc) {
cifs_dbg(FYI,
"SMB3 session key generation failed\n");
- mutex_unlock(&ses->server->srv_mutex);
+ mutex_unlock(&server->srv_mutex);
return rc;
}
}
- if (!ses->server->session_estab) {
- ses->server->sequence_number = 0x2;
- ses->server->session_estab = true;
+ if (!server->session_estab) {
+ server->sequence_number = 0x2;
+ server->session_estab = true;
}
- mutex_unlock(&ses->server->srv_mutex);
+ mutex_unlock(&server->srv_mutex);
cifs_dbg(FYI, "SMB2/3 session established successfully\n");
- spin_lock(&GlobalMid_Lock);
- ses->status = CifsGood;
- ses->need_reconnect = false;
- spin_unlock(&GlobalMid_Lock);
+ /* keep existing ses state if binding */
+ if (!ses->binding) {
+ spin_lock(&GlobalMid_Lock);
+ ses->status = CifsGood;
+ ses->need_reconnect = false;
+ spin_unlock(&GlobalMid_Lock);
+ }
+
return rc;
}
@@ -1317,16 +1347,19 @@ SMB2_auth_kerberos(struct SMB2_sess_data *sess_data)
goto out_put_spnego_key;
}
- ses->auth_key.response = kmemdup(msg->data, msg->sesskey_len,
- GFP_KERNEL);
- if (!ses->auth_key.response) {
- cifs_dbg(VFS,
- "Kerberos can't allocate (%u bytes) memory",
- msg->sesskey_len);
- rc = -ENOMEM;
- goto out_put_spnego_key;
+ /* keep session key if binding */
+ if (!ses->binding) {
+ ses->auth_key.response = kmemdup(msg->data, msg->sesskey_len,
+ GFP_KERNEL);
+ if (!ses->auth_key.response) {
+ cifs_dbg(VFS,
+ "Kerberos can't allocate (%u bytes) memory",
+ msg->sesskey_len);
+ rc = -ENOMEM;
+ goto out_put_spnego_key;
+ }
+ ses->auth_key.len = msg->sesskey_len;
}
- ses->auth_key.len = msg->sesskey_len;
sess_data->iov[1].iov_base = msg->data + msg->sesskey_len;
sess_data->iov[1].iov_len = msg->secblob_len;
@@ -1336,9 +1369,11 @@ SMB2_auth_kerberos(struct SMB2_sess_data *sess_data)
goto out_put_spnego_key;
rsp = (struct smb2_sess_setup_rsp *)sess_data->iov[0].iov_base;
- ses->Suid = rsp->sync_hdr.SessionId;
-
- ses->session_flags = le16_to_cpu(rsp->SessionFlags);
+ /* keep session id and flags if binding */
+ if (!ses->binding) {
+ ses->Suid = rsp->sync_hdr.SessionId;
+ ses->session_flags = le16_to_cpu(rsp->SessionFlags);
+ }
rc = SMB2_sess_establish_session(sess_data);
out_put_spnego_key:
@@ -1432,9 +1467,11 @@ SMB2_sess_auth_rawntlmssp_negotiate(struct SMB2_sess_data *sess_data)
cifs_dbg(FYI, "rawntlmssp session setup challenge phase\n");
-
- ses->Suid = rsp->sync_hdr.SessionId;
- ses->session_flags = le16_to_cpu(rsp->SessionFlags);
+ /* keep existing ses id and flags if binding */
+ if (!ses->binding) {
+ ses->Suid = rsp->sync_hdr.SessionId;
+ ses->session_flags = le16_to_cpu(rsp->SessionFlags);
+ }
out:
kfree(ntlmssp_blob);
@@ -1491,8 +1528,11 @@ SMB2_sess_auth_rawntlmssp_authenticate(struct SMB2_sess_data *sess_data)
rsp = (struct smb2_sess_setup_rsp *)sess_data->iov[0].iov_base;
- ses->Suid = rsp->sync_hdr.SessionId;
- ses->session_flags = le16_to_cpu(rsp->SessionFlags);
+ /* keep existing ses id and flags if binding */
+ if (!ses->binding) {
+ ses->Suid = rsp->sync_hdr.SessionId;
+ ses->session_flags = le16_to_cpu(rsp->SessionFlags);
+ }
rc = SMB2_sess_establish_session(sess_data);
out:
@@ -1509,7 +1549,7 @@ SMB2_select_sec(struct cifs_ses *ses, struct SMB2_sess_data *sess_data)
{
int type;
- type = smb2_select_sectype(ses->server, ses->sectype);
+ type = smb2_select_sectype(cifs_ses_server(ses), ses->sectype);
cifs_dbg(FYI, "sess setup type %d\n", type);
if (type == Unspecified) {
cifs_dbg(VFS,
@@ -1537,7 +1577,7 @@ SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses,
const struct nls_table *nls_cp)
{
int rc = 0;
- struct TCP_Server_Info *server = ses->server;
+ struct TCP_Server_Info *server = cifs_ses_server(ses);
struct SMB2_sess_data *sess_data;
cifs_dbg(FYI, "Session Setup\n");
@@ -1563,7 +1603,7 @@ SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses,
/*
* Initialize the session hash with the server one.
*/
- memcpy(ses->preauth_sha_hash, ses->server->preauth_sha_hash,
+ memcpy(ses->preauth_sha_hash, server->preauth_sha_hash,
SMB2_PREAUTH_HASH_SIZE);
while (sess_data->func)
@@ -1807,6 +1847,8 @@ SMB2_tdis(const unsigned int xid, struct cifs_tcon *tcon)
if ((tcon->need_reconnect) || (tcon->ses->need_reconnect))
return 0;
+ close_shroot(&tcon->crfid);
+
rc = smb2_plain_req_init(SMB2_TREE_DISCONNECT, tcon, (void **) &req,
&total_len);
if (rc)
@@ -2661,7 +2703,7 @@ SMB2_ioctl_init(struct cifs_tcon *tcon, struct smb_rqst *rqst,
int rc;
char *in_data_buf;
- rc = smb2_plain_req_init(SMB2_IOCTL, tcon, (void **) &req, &total_len);
+ rc = smb2_ioctl_req_init(opcode, tcon, (void **) &req, &total_len);
if (rc)
return rc;
@@ -2972,7 +3014,21 @@ int
SMB2_close(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_fid, u64 volatile_fid)
{
- return SMB2_close_flags(xid, tcon, persistent_fid, volatile_fid, 0);
+ int rc;
+ int tmp_rc;
+
+ rc = SMB2_close_flags(xid, tcon, persistent_fid, volatile_fid, 0);
+
+ /* retry close in a worker thread if this one is interrupted */
+ if (rc == -EINTR) {
+ tmp_rc = smb2_handle_cancelled_close(tcon, persistent_fid,
+ volatile_fid);
+ if (tmp_rc)
+ cifs_dbg(VFS, "handle cancelled close fid 0x%llx returned error %d\n",
+ persistent_fid, tmp_rc);
+ }
+
+ return rc;
}
int
diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h
index 0abfde6d0b05..f264e1d36fe1 100644
--- a/fs/cifs/smb2pdu.h
+++ b/fs/cifs/smb2pdu.h
@@ -1386,7 +1386,7 @@ struct smb2_oplock_break {
struct smb2_lease_break {
struct smb2_sync_hdr sync_hdr;
__le16 StructureSize; /* Must be 44 */
- __le16 Reserved;
+ __le16 Epoch;
__le32 Flags;
__u8 LeaseKey[16];
__le32 CurrentLeaseState;
diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h
index 71b2930b8e0b..d21a5fcc8d06 100644
--- a/fs/cifs/smb2proto.h
+++ b/fs/cifs/smb2proto.h
@@ -46,7 +46,8 @@ extern int smb2_verify_signature(struct smb_rqst *, struct TCP_Server_Info *);
extern int smb2_check_receive(struct mid_q_entry *mid,
struct TCP_Server_Info *server, bool log_error);
extern struct mid_q_entry *smb2_setup_request(struct cifs_ses *ses,
- struct smb_rqst *rqst);
+ struct TCP_Server_Info *,
+ struct smb_rqst *rqst);
extern struct mid_q_entry *smb2_setup_async_request(
struct TCP_Server_Info *server, struct smb_rqst *rqst);
extern struct cifs_ses *smb2_find_smb_ses(struct TCP_Server_Info *server,
@@ -212,6 +213,9 @@ extern int SMB2_set_compression(const unsigned int xid, struct cifs_tcon *tcon,
extern int SMB2_oplock_break(const unsigned int xid, struct cifs_tcon *tcon,
const u64 persistent_fid, const u64 volatile_fid,
const __u8 oplock_level);
+extern int smb2_handle_cancelled_close(struct cifs_tcon *tcon,
+ __u64 persistent_fid,
+ __u64 volatile_fid);
extern int smb2_handle_cancelled_mid(char *buffer,
struct TCP_Server_Info *server);
void smb2_cancelled_close_fid(struct work_struct *work);
diff --git a/fs/cifs/smb2transport.c b/fs/cifs/smb2transport.c
index 148d7942c796..387c88704c52 100644
--- a/fs/cifs/smb2transport.c
+++ b/fs/cifs/smb2transport.c
@@ -98,6 +98,61 @@ err:
return rc;
}
+
+static
+int smb2_get_sign_key(__u64 ses_id, struct TCP_Server_Info *server, u8 *key)
+{
+ struct cifs_chan *chan;
+ struct cifs_ses *ses = NULL;
+ int i;
+ int rc = 0;
+
+ spin_lock(&cifs_tcp_ses_lock);
+
+ list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) {
+ list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
+ if (ses->Suid == ses_id)
+ goto found;
+ }
+ }
+ cifs_server_dbg(VFS, "%s: Could not find session 0x%llx\n",
+ __func__, ses_id);
+ rc = -ENOENT;
+ goto out;
+
+found:
+ if (ses->binding) {
+ /*
+ * If we are in the process of binding a new channel
+ * to an existing session, use the master connection
+ * session key
+ */
+ memcpy(key, ses->smb3signingkey, SMB3_SIGN_KEY_SIZE);
+ goto out;
+ }
+
+ /*
+ * Otherwise, use the channel key.
+ */
+
+ for (i = 0; i < ses->chan_count; i++) {
+ chan = ses->chans + i;
+ if (chan->server == server) {
+ memcpy(key, chan->signkey, SMB3_SIGN_KEY_SIZE);
+ goto out;
+ }
+ }
+
+ cifs_dbg(VFS,
+ "%s: Could not find channel signing key for session 0x%llx\n",
+ __func__, ses_id);
+ rc = -ENOENT;
+
+out:
+ spin_unlock(&cifs_tcp_ses_lock);
+ return rc;
+}
+
static struct cifs_ses *
smb2_find_smb_ses_unlocked(struct TCP_Server_Info *server, __u64 ses_id)
{
@@ -328,21 +383,45 @@ generate_smb3signingkey(struct cifs_ses *ses,
{
int rc;
- rc = generate_key(ses, ptriplet->signing.label,
- ptriplet->signing.context, ses->smb3signingkey,
- SMB3_SIGN_KEY_SIZE);
- if (rc)
- return rc;
+ /*
+ * All channels use the same encryption/decryption keys but
+ * they have their own signing key.
+ *
+ * When we generate the keys, check if it is for a new channel
+ * (binding) in which case we only need to generate a signing
+ * key and store it in the channel as to not overwrite the
+ * master connection signing key stored in the session
+ */
- rc = generate_key(ses, ptriplet->encryption.label,
- ptriplet->encryption.context, ses->smb3encryptionkey,
- SMB3_SIGN_KEY_SIZE);
- if (rc)
- return rc;
+ if (ses->binding) {
+ rc = generate_key(ses, ptriplet->signing.label,
+ ptriplet->signing.context,
+ cifs_ses_binding_channel(ses)->signkey,
+ SMB3_SIGN_KEY_SIZE);
+ if (rc)
+ return rc;
+ } else {
+ rc = generate_key(ses, ptriplet->signing.label,
+ ptriplet->signing.context,
+ ses->smb3signingkey,
+ SMB3_SIGN_KEY_SIZE);
+ if (rc)
+ return rc;
- rc = generate_key(ses, ptriplet->decryption.label,
- ptriplet->decryption.context,
- ses->smb3decryptionkey, SMB3_SIGN_KEY_SIZE);
+ memcpy(ses->chans[0].signkey, ses->smb3signingkey,
+ SMB3_SIGN_KEY_SIZE);
+
+ rc = generate_key(ses, ptriplet->encryption.label,
+ ptriplet->encryption.context,
+ ses->smb3encryptionkey,
+ SMB3_SIGN_KEY_SIZE);
+ rc = generate_key(ses, ptriplet->decryption.label,
+ ptriplet->decryption.context,
+ ses->smb3decryptionkey,
+ SMB3_SIGN_KEY_SIZE);
+ if (rc)
+ return rc;
+ }
if (rc)
return rc;
@@ -431,21 +510,19 @@ smb3_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server)
unsigned char *sigptr = smb3_signature;
struct kvec *iov = rqst->rq_iov;
struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)iov[0].iov_base;
- struct cifs_ses *ses;
struct shash_desc *shash = &server->secmech.sdesccmacaes->shash;
struct smb_rqst drqst;
+ u8 key[SMB3_SIGN_KEY_SIZE];
- ses = smb2_find_smb_ses(server, shdr->SessionId);
- if (!ses) {
- cifs_server_dbg(VFS, "%s: Could not find session\n", __func__);
+ rc = smb2_get_sign_key(shdr->SessionId, server, key);
+ if (rc)
return 0;
- }
memset(smb3_signature, 0x0, SMB2_CMACAES_SIZE);
memset(shdr->Signature, 0x0, SMB2_SIGNATURE_SIZE);
rc = crypto_shash_setkey(server->secmech.cmacaes,
- ses->smb3signingkey, SMB2_CMACAES_SIZE);
+ key, SMB2_CMACAES_SIZE);
if (rc) {
cifs_server_dbg(VFS, "%s: Could not set key for cmac aes\n", __func__);
return rc;
@@ -494,16 +571,25 @@ static int
smb2_sign_rqst(struct smb_rqst *rqst, struct TCP_Server_Info *server)
{
int rc = 0;
- struct smb2_sync_hdr *shdr =
- (struct smb2_sync_hdr *)rqst->rq_iov[0].iov_base;
+ struct smb2_sync_hdr *shdr;
+ struct smb2_sess_setup_req *ssr;
+ bool is_binding;
+ bool is_signed;
- if (!(shdr->Flags & SMB2_FLAGS_SIGNED) ||
- server->tcpStatus == CifsNeedNegotiate)
- return rc;
+ shdr = (struct smb2_sync_hdr *)rqst->rq_iov[0].iov_base;
+ ssr = (struct smb2_sess_setup_req *)shdr;
+
+ is_binding = shdr->Command == SMB2_SESSION_SETUP &&
+ (ssr->Flags & SMB2_SESSION_REQ_FLAG_BINDING);
+ is_signed = shdr->Flags & SMB2_FLAGS_SIGNED;
- if (!server->session_estab) {
+ if (!is_signed)
+ return 0;
+ if (server->tcpStatus == CifsNeedNegotiate)
+ return 0;
+ if (!is_binding && !server->session_estab) {
strncpy(shdr->Signature, "BSRSPYL", 8);
- return rc;
+ return 0;
}
rc = server->ops->calc_signature(rqst, server);
@@ -610,18 +696,18 @@ smb2_mid_entry_alloc(const struct smb2_sync_hdr *shdr,
}
static int
-smb2_get_mid_entry(struct cifs_ses *ses, struct smb2_sync_hdr *shdr,
- struct mid_q_entry **mid)
+smb2_get_mid_entry(struct cifs_ses *ses, struct TCP_Server_Info *server,
+ struct smb2_sync_hdr *shdr, struct mid_q_entry **mid)
{
- if (ses->server->tcpStatus == CifsExiting)
+ if (server->tcpStatus == CifsExiting)
return -ENOENT;
- if (ses->server->tcpStatus == CifsNeedReconnect) {
+ if (server->tcpStatus == CifsNeedReconnect) {
cifs_dbg(FYI, "tcp session dead - return to caller to retry\n");
return -EAGAIN;
}
- if (ses->server->tcpStatus == CifsNeedNegotiate &&
+ if (server->tcpStatus == CifsNeedNegotiate &&
shdr->Command != SMB2_NEGOTIATE)
return -EAGAIN;
@@ -638,11 +724,11 @@ smb2_get_mid_entry(struct cifs_ses *ses, struct smb2_sync_hdr *shdr,
/* else ok - we are shutting down the session */
}
- *mid = smb2_mid_entry_alloc(shdr, ses->server);
+ *mid = smb2_mid_entry_alloc(shdr, server);
if (*mid == NULL)
return -ENOMEM;
spin_lock(&GlobalMid_Lock);
- list_add_tail(&(*mid)->qhead, &ses->server->pending_mid_q);
+ list_add_tail(&(*mid)->qhead, &server->pending_mid_q);
spin_unlock(&GlobalMid_Lock);
return 0;
@@ -675,24 +761,25 @@ smb2_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
}
struct mid_q_entry *
-smb2_setup_request(struct cifs_ses *ses, struct smb_rqst *rqst)
+smb2_setup_request(struct cifs_ses *ses, struct TCP_Server_Info *server,
+ struct smb_rqst *rqst)
{
int rc;
struct smb2_sync_hdr *shdr =
(struct smb2_sync_hdr *)rqst->rq_iov[0].iov_base;
struct mid_q_entry *mid;
- smb2_seq_num_into_buf(ses->server, shdr);
+ smb2_seq_num_into_buf(server, shdr);
- rc = smb2_get_mid_entry(ses, shdr, &mid);
+ rc = smb2_get_mid_entry(ses, server, shdr, &mid);
if (rc) {
- revert_current_mid_from_hdr(ses->server, shdr);
+ revert_current_mid_from_hdr(server, shdr);
return ERR_PTR(rc);
}
- rc = smb2_sign_rqst(rqst, ses->server);
+ rc = smb2_sign_rqst(rqst, server);
if (rc) {
- revert_current_mid_from_hdr(ses->server, shdr);
+ revert_current_mid_from_hdr(server, shdr);
cifs_delete_mid(mid);
return ERR_PTR(rc);
}
diff --git a/fs/cifs/smbdirect.c b/fs/cifs/smbdirect.c
index 3c91fa97c9a8..5b1b97e9e0c9 100644
--- a/fs/cifs/smbdirect.c
+++ b/fs/cifs/smbdirect.c
@@ -1069,7 +1069,7 @@ static int smbd_post_send_data(
if (n_vec > SMBDIRECT_MAX_SGE) {
cifs_dbg(VFS, "Can't fit data to SGL, n_vec=%d\n", n_vec);
- return -ENOMEM;
+ return -EINVAL;
}
sg_init_table(sgl, n_vec);
@@ -1476,6 +1476,7 @@ void smbd_destroy(struct TCP_Server_Info *server)
info->transport_status = SMBD_DESTROYED;
destroy_workqueue(info->workqueue);
+ log_rdma_event(INFO, "rdma session destroyed\n");
kfree(info);
}
@@ -1505,8 +1506,9 @@ create_conn:
log_rdma_event(INFO, "creating rdma session\n");
server->smbd_conn = smbd_get_connection(
server, (struct sockaddr *) &server->dstaddr);
- log_rdma_event(INFO, "created rdma session info=%p\n",
- server->smbd_conn);
+
+ if (server->smbd_conn)
+ cifs_dbg(VFS, "RDMA transport re-established\n");
return server->smbd_conn ? 0 : -ENOENT;
}
@@ -1970,7 +1972,7 @@ read_rfc1002_done:
if (info->transport_status != SMBD_CONNECTED) {
log_read(ERR, "disconnected\n");
- return 0;
+ return -ECONNABORTED;
}
goto again;
@@ -2269,12 +2271,7 @@ static void smbd_mr_recovery_work(struct work_struct *work)
int rc;
list_for_each_entry(smbdirect_mr, &info->mr_list, list) {
- if (smbdirect_mr->state == MR_INVALIDATED)
- ib_dma_unmap_sg(
- info->id->device, smbdirect_mr->sgl,
- smbdirect_mr->sgl_count,
- smbdirect_mr->dir);
- else if (smbdirect_mr->state == MR_ERROR) {
+ if (smbdirect_mr->state == MR_ERROR) {
/* recover this MR entry */
rc = ib_dereg_mr(smbdirect_mr->mr);
@@ -2602,11 +2599,20 @@ int smbd_deregister_mr(struct smbd_mr *smbdirect_mr)
*/
smbdirect_mr->state = MR_INVALIDATED;
- /*
- * Schedule the work to do MR recovery for future I/Os
- * MR recovery is slow and we don't want it to block the current I/O
- */
- queue_work(info->workqueue, &info->mr_recovery_work);
+ if (smbdirect_mr->state == MR_INVALIDATED) {
+ ib_dma_unmap_sg(
+ info->id->device, smbdirect_mr->sgl,
+ smbdirect_mr->sgl_count,
+ smbdirect_mr->dir);
+ smbdirect_mr->state = MR_READY;
+ if (atomic_inc_return(&info->mr_ready_count) == 1)
+ wake_up_interruptible(&info->wait_mr);
+ } else
+ /*
+ * Schedule the work to do MR recovery for future I/Os MR
+ * recovery is slow and don't want it to block current I/O
+ */
+ queue_work(info->workqueue, &info->mr_recovery_work);
done:
if (atomic_dec_and_test(&info->mr_used_count))
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index ca3de62688d6..3d2e11f85cba 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -93,8 +93,14 @@ static void _cifs_mid_q_entry_release(struct kref *refcount)
__u16 smb_cmd = le16_to_cpu(midEntry->command);
unsigned long now;
unsigned long roundtrip_time;
- struct TCP_Server_Info *server = midEntry->server;
#endif
+ struct TCP_Server_Info *server = midEntry->server;
+
+ if (midEntry->resp_buf && (midEntry->mid_flags & MID_WAIT_CANCELLED) &&
+ midEntry->mid_state == MID_RESPONSE_RECEIVED &&
+ server->ops->handle_cancelled_mid)
+ server->ops->handle_cancelled_mid(midEntry->resp_buf, server);
+
midEntry->mid_state = MID_FREE;
atomic_dec(&midCount);
if (midEntry->large_buf)
@@ -319,8 +325,11 @@ __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
int val = 1;
__be32 rfc1002_marker;
- if (cifs_rdma_enabled(server) && server->smbd_conn) {
- rc = smbd_send(server, num_rqst, rqst);
+ if (cifs_rdma_enabled(server)) {
+ /* return -EAGAIN when connecting or reconnecting */
+ rc = -EAGAIN;
+ if (server->smbd_conn)
+ rc = smbd_send(server, num_rqst, rqst);
goto smbd_done;
}
@@ -927,7 +936,8 @@ cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
}
struct mid_q_entry *
-cifs_setup_request(struct cifs_ses *ses, struct smb_rqst *rqst)
+cifs_setup_request(struct cifs_ses *ses, struct TCP_Server_Info *ignored,
+ struct smb_rqst *rqst)
{
int rc;
struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
@@ -999,7 +1009,18 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
return -EIO;
}
- server = ses->server;
+ if (!ses->binding) {
+ uint index = 0;
+
+ if (ses->chan_count > 1) {
+ index = (uint)atomic_inc_return(&ses->chan_seq);
+ index %= ses->chan_count;
+ }
+ server = ses->chans[index].server;
+ } else {
+ server = cifs_ses_server(ses);
+ }
+
if (server->tcpStatus == CifsExiting)
return -ENOENT;
@@ -1044,7 +1065,7 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
}
for (i = 0; i < num_rqst; i++) {
- midQ[i] = server->ops->setup_request(ses, &rqst[i]);
+ midQ[i] = server->ops->setup_request(ses, server, &rqst[i]);
if (IS_ERR(midQ[i])) {
revert_current_mid(server, i);
for (j = 0; j < i; j++)
@@ -1119,8 +1140,8 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
midQ[i]->mid, le16_to_cpu(midQ[i]->command));
send_cancel(server, &rqst[i], midQ[i]);
spin_lock(&GlobalMid_Lock);
+ midQ[i]->mid_flags |= MID_WAIT_CANCELLED;
if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED) {
- midQ[i]->mid_flags |= MID_WAIT_CANCELLED;
midQ[i]->callback = cifs_cancelled_callback;
cancelled_mid[i] = true;
credits[i].value = 0;
@@ -1287,7 +1308,7 @@ SendReceive(const unsigned int xid, struct cifs_ses *ses,
rc = allocate_mid(ses, in_buf, &midQ);
if (rc) {
- mutex_unlock(&ses->server->srv_mutex);
+ mutex_unlock(&server->srv_mutex);
/* Update # of requests on wire to server */
add_credits(server, &credits, 0);
return rc;
diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
index b7f9ffa1d5f1..aaad4ca1217e 100644
--- a/fs/compat_binfmt_elf.c
+++ b/fs/compat_binfmt_elf.c
@@ -48,8 +48,8 @@
#define elf_prstatus compat_elf_prstatus
#define elf_prpsinfo compat_elf_prpsinfo
-#undef ns_to_timeval
-#define ns_to_timeval ns_to_old_timeval32
+#undef ns_to_kernel_old_timeval
+#define ns_to_kernel_old_timeval ns_to_old_timeval32
/*
* To use this file, asm/elf.h must define compat_elf_check_arch.
diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
index a7ec2d3dff92..358ea2ecf36b 100644
--- a/fs/compat_ioctl.c
+++ b/fs/compat_ioctl.c
@@ -11,8 +11,6 @@
* ioctls.
*/
-#include <linux/joystick.h>
-
#include <linux/types.h>
#include <linux/compat.h>
#include <linux/kernel.h>
@@ -27,13 +25,9 @@
#include <linux/file.h>
#include <linux/ppp-ioctl.h>
#include <linux/if_pppox.h>
-#include <linux/mtio.h>
#include <linux/tty.h>
#include <linux/vt_kern.h>
-#include <linux/raw.h>
#include <linux/blkdev.h>
-#include <linux/rtc.h>
-#include <linux/pci.h>
#include <linux/serial.h>
#include <linux/ctype.h>
#include <linux/syscalls.h>
@@ -42,13 +36,6 @@
#include "internal.h"
-#include <net/bluetooth/bluetooth.h>
-#include <net/bluetooth/hci_sock.h>
-#include <net/bluetooth/rfcomm.h>
-
-#include <linux/capi.h>
-#include <linux/gigaset_dev.h>
-
#ifdef CONFIG_BLOCK
#include <linux/cdrom.h>
#include <linux/fd.h>
@@ -60,448 +47,11 @@
#include <linux/uaccess.h>
#include <linux/watchdog.h>
-#include <linux/soundcard.h>
-
#include <linux/hiddev.h>
#include <linux/sort.h>
-#ifdef CONFIG_SPARC
-#include <linux/fb.h>
-#include <asm/fbio.h>
-#endif
-
-#define convert_in_user(srcptr, dstptr) \
-({ \
- typeof(*srcptr) val; \
- \
- get_user(val, srcptr) || put_user(val, dstptr); \
-})
-
-static int do_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
- int err;
-
- err = security_file_ioctl(file, cmd, arg);
- if (err)
- return err;
-
- return vfs_ioctl(file, cmd, arg);
-}
-
-#ifdef CONFIG_BLOCK
-typedef struct sg_io_hdr32 {
- compat_int_t interface_id; /* [i] 'S' for SCSI generic (required) */
- compat_int_t dxfer_direction; /* [i] data transfer direction */
- unsigned char cmd_len; /* [i] SCSI command length ( <= 16 bytes) */
- unsigned char mx_sb_len; /* [i] max length to write to sbp */
- unsigned short iovec_count; /* [i] 0 implies no scatter gather */
- compat_uint_t dxfer_len; /* [i] byte count of data transfer */
- compat_uint_t dxferp; /* [i], [*io] points to data transfer memory
- or scatter gather list */
- compat_uptr_t cmdp; /* [i], [*i] points to command to perform */
- compat_uptr_t sbp; /* [i], [*o] points to sense_buffer memory */
- compat_uint_t timeout; /* [i] MAX_UINT->no timeout (unit: millisec) */
- compat_uint_t flags; /* [i] 0 -> default, see SG_FLAG... */
- compat_int_t pack_id; /* [i->o] unused internally (normally) */
- compat_uptr_t usr_ptr; /* [i->o] unused internally */
- unsigned char status; /* [o] scsi status */
- unsigned char masked_status; /* [o] shifted, masked scsi status */
- unsigned char msg_status; /* [o] messaging level data (optional) */
- unsigned char sb_len_wr; /* [o] byte count actually written to sbp */
- unsigned short host_status; /* [o] errors from host adapter */
- unsigned short driver_status; /* [o] errors from software driver */
- compat_int_t resid; /* [o] dxfer_len - actual_transferred */
- compat_uint_t duration; /* [o] time taken by cmd (unit: millisec) */
- compat_uint_t info; /* [o] auxiliary information */
-} sg_io_hdr32_t; /* 64 bytes long (on sparc32) */
-
-typedef struct sg_iovec32 {
- compat_uint_t iov_base;
- compat_uint_t iov_len;
-} sg_iovec32_t;
-
-static int sg_build_iovec(sg_io_hdr_t __user *sgio, void __user *dxferp, u16 iovec_count)
-{
- sg_iovec_t __user *iov = (sg_iovec_t __user *) (sgio + 1);
- sg_iovec32_t __user *iov32 = dxferp;
- int i;
-
- for (i = 0; i < iovec_count; i++) {
- u32 base, len;
-
- if (get_user(base, &iov32[i].iov_base) ||
- get_user(len, &iov32[i].iov_len) ||
- put_user(compat_ptr(base), &iov[i].iov_base) ||
- put_user(len, &iov[i].iov_len))
- return -EFAULT;
- }
-
- if (put_user(iov, &sgio->dxferp))
- return -EFAULT;
- return 0;
-}
-
-static int sg_ioctl_trans(struct file *file, unsigned int cmd,
- sg_io_hdr32_t __user *sgio32)
-{
- sg_io_hdr_t __user *sgio;
- u16 iovec_count;
- u32 data;
- void __user *dxferp;
- int err;
- int interface_id;
-
- if (get_user(interface_id, &sgio32->interface_id))
- return -EFAULT;
- if (interface_id != 'S')
- return do_ioctl(file, cmd, (unsigned long)sgio32);
-
- if (get_user(iovec_count, &sgio32->iovec_count))
- return -EFAULT;
-
- {
- void __user *top = compat_alloc_user_space(0);
- void __user *new = compat_alloc_user_space(sizeof(sg_io_hdr_t) +
- (iovec_count * sizeof(sg_iovec_t)));
- if (new > top)
- return -EINVAL;
-
- sgio = new;
- }
-
- /* Ok, now construct. */
- if (copy_in_user(&sgio->interface_id, &sgio32->interface_id,
- (2 * sizeof(int)) +
- (2 * sizeof(unsigned char)) +
- (1 * sizeof(unsigned short)) +
- (1 * sizeof(unsigned int))))
- return -EFAULT;
-
- if (get_user(data, &sgio32->dxferp))
- return -EFAULT;
- dxferp = compat_ptr(data);
- if (iovec_count) {
- if (sg_build_iovec(sgio, dxferp, iovec_count))
- return -EFAULT;
- } else {
- if (put_user(dxferp, &sgio->dxferp))
- return -EFAULT;
- }
-
- {
- unsigned char __user *cmdp;
- unsigned char __user *sbp;
-
- if (get_user(data, &sgio32->cmdp))
- return -EFAULT;
- cmdp = compat_ptr(data);
-
- if (get_user(data, &sgio32->sbp))
- return -EFAULT;
- sbp = compat_ptr(data);
-
- if (put_user(cmdp, &sgio->cmdp) ||
- put_user(sbp, &sgio->sbp))
- return -EFAULT;
- }
-
- if (copy_in_user(&sgio->timeout, &sgio32->timeout,
- 3 * sizeof(int)))
- return -EFAULT;
-
- if (get_user(data, &sgio32->usr_ptr))
- return -EFAULT;
- if (put_user(compat_ptr(data), &sgio->usr_ptr))
- return -EFAULT;
-
- err = do_ioctl(file, cmd, (unsigned long) sgio);
-
- if (err >= 0) {
- void __user *datap;
-
- if (copy_in_user(&sgio32->pack_id, &sgio->pack_id,
- sizeof(int)) ||
- get_user(datap, &sgio->usr_ptr) ||
- put_user((u32)(unsigned long)datap,
- &sgio32->usr_ptr) ||
- copy_in_user(&sgio32->status, &sgio->status,
- (4 * sizeof(unsigned char)) +
- (2 * sizeof(unsigned short)) +
- (3 * sizeof(int))))
- err = -EFAULT;
- }
-
- return err;
-}
-
-struct compat_sg_req_info { /* used by SG_GET_REQUEST_TABLE ioctl() */
- char req_state;
- char orphan;
- char sg_io_owned;
- char problem;
- int pack_id;
- compat_uptr_t usr_ptr;
- unsigned int duration;
- int unused;
-};
-
-static int sg_grt_trans(struct file *file,
- unsigned int cmd, struct compat_sg_req_info __user *o)
-{
- int err, i;
- sg_req_info_t __user *r;
- r = compat_alloc_user_space(sizeof(sg_req_info_t)*SG_MAX_QUEUE);
- err = do_ioctl(file, cmd, (unsigned long)r);
- if (err < 0)
- return err;
- for (i = 0; i < SG_MAX_QUEUE; i++) {
- void __user *ptr;
- int d;
-
- if (copy_in_user(o + i, r + i, offsetof(sg_req_info_t, usr_ptr)) ||
- get_user(ptr, &r[i].usr_ptr) ||
- get_user(d, &r[i].duration) ||
- put_user((u32)(unsigned long)(ptr), &o[i].usr_ptr) ||
- put_user(d, &o[i].duration))
- return -EFAULT;
- }
- return err;
-}
-#endif /* CONFIG_BLOCK */
-
-struct sock_fprog32 {
- unsigned short len;
- compat_caddr_t filter;
-};
-
-#define PPPIOCSPASS32 _IOW('t', 71, struct sock_fprog32)
-#define PPPIOCSACTIVE32 _IOW('t', 70, struct sock_fprog32)
-
-static int ppp_sock_fprog_ioctl_trans(struct file *file,
- unsigned int cmd, struct sock_fprog32 __user *u_fprog32)
-{
- struct sock_fprog __user *u_fprog64 = compat_alloc_user_space(sizeof(struct sock_fprog));
- void __user *fptr64;
- u32 fptr32;
- u16 flen;
-
- if (get_user(flen, &u_fprog32->len) ||
- get_user(fptr32, &u_fprog32->filter))
- return -EFAULT;
-
- fptr64 = compat_ptr(fptr32);
-
- if (put_user(flen, &u_fprog64->len) ||
- put_user(fptr64, &u_fprog64->filter))
- return -EFAULT;
-
- if (cmd == PPPIOCSPASS32)
- cmd = PPPIOCSPASS;
- else
- cmd = PPPIOCSACTIVE;
-
- return do_ioctl(file, cmd, (unsigned long) u_fprog64);
-}
-
-struct ppp_option_data32 {
- compat_caddr_t ptr;
- u32 length;
- compat_int_t transmit;
-};
-#define PPPIOCSCOMPRESS32 _IOW('t', 77, struct ppp_option_data32)
-
-struct ppp_idle32 {
- compat_time_t xmit_idle;
- compat_time_t recv_idle;
-};
-#define PPPIOCGIDLE32 _IOR('t', 63, struct ppp_idle32)
-
-static int ppp_gidle(struct file *file, unsigned int cmd,
- struct ppp_idle32 __user *idle32)
-{
- struct ppp_idle __user *idle;
- __kernel_time_t xmit, recv;
- int err;
-
- idle = compat_alloc_user_space(sizeof(*idle));
-
- err = do_ioctl(file, PPPIOCGIDLE, (unsigned long) idle);
-
- if (!err) {
- if (get_user(xmit, &idle->xmit_idle) ||
- get_user(recv, &idle->recv_idle) ||
- put_user(xmit, &idle32->xmit_idle) ||
- put_user(recv, &idle32->recv_idle))
- err = -EFAULT;
- }
- return err;
-}
-
-static int ppp_scompress(struct file *file, unsigned int cmd,
- struct ppp_option_data32 __user *odata32)
-{
- struct ppp_option_data __user *odata;
- __u32 data;
- void __user *datap;
-
- odata = compat_alloc_user_space(sizeof(*odata));
-
- if (get_user(data, &odata32->ptr))
- return -EFAULT;
-
- datap = compat_ptr(data);
- if (put_user(datap, &odata->ptr))
- return -EFAULT;
-
- if (copy_in_user(&odata->length, &odata32->length,
- sizeof(__u32) + sizeof(int)))
- return -EFAULT;
-
- return do_ioctl(file, PPPIOCSCOMPRESS, (unsigned long) odata);
-}
-
-#ifdef CONFIG_BLOCK
-struct mtget32 {
- compat_long_t mt_type;
- compat_long_t mt_resid;
- compat_long_t mt_dsreg;
- compat_long_t mt_gstat;
- compat_long_t mt_erreg;
- compat_daddr_t mt_fileno;
- compat_daddr_t mt_blkno;
-};
-#define MTIOCGET32 _IOR('m', 2, struct mtget32)
-
-struct mtpos32 {
- compat_long_t mt_blkno;
-};
-#define MTIOCPOS32 _IOR('m', 3, struct mtpos32)
-
-static int mt_ioctl_trans(struct file *file,
- unsigned int cmd, void __user *argp)
-{
- /* NULL initialization to make gcc shut up */
- struct mtget __user *get = NULL;
- struct mtget32 __user *umget32;
- struct mtpos __user *pos = NULL;
- struct mtpos32 __user *upos32;
- unsigned long kcmd;
- void *karg;
- int err = 0;
-
- switch(cmd) {
- case MTIOCPOS32:
- kcmd = MTIOCPOS;
- pos = compat_alloc_user_space(sizeof(*pos));
- karg = pos;
- break;
- default: /* MTIOCGET32 */
- kcmd = MTIOCGET;
- get = compat_alloc_user_space(sizeof(*get));
- karg = get;
- break;
- }
- if (karg == NULL)
- return -EFAULT;
- err = do_ioctl(file, kcmd, (unsigned long)karg);
- if (err)
- return err;
- switch (cmd) {
- case MTIOCPOS32:
- upos32 = argp;
- err = convert_in_user(&pos->mt_blkno, &upos32->mt_blkno);
- break;
- case MTIOCGET32:
- umget32 = argp;
- err = convert_in_user(&get->mt_type, &umget32->mt_type);
- err |= convert_in_user(&get->mt_resid, &umget32->mt_resid);
- err |= convert_in_user(&get->mt_dsreg, &umget32->mt_dsreg);
- err |= convert_in_user(&get->mt_gstat, &umget32->mt_gstat);
- err |= convert_in_user(&get->mt_erreg, &umget32->mt_erreg);
- err |= convert_in_user(&get->mt_fileno, &umget32->mt_fileno);
- err |= convert_in_user(&get->mt_blkno, &umget32->mt_blkno);
- break;
- }
- return err ? -EFAULT: 0;
-}
-
-#endif /* CONFIG_BLOCK */
-
-/* Bluetooth ioctls */
-#define HCIUARTSETPROTO _IOW('U', 200, int)
-#define HCIUARTGETPROTO _IOR('U', 201, int)
-#define HCIUARTGETDEVICE _IOR('U', 202, int)
-#define HCIUARTSETFLAGS _IOW('U', 203, int)
-#define HCIUARTGETFLAGS _IOR('U', 204, int)
-
-#define RTC_IRQP_READ32 _IOR('p', 0x0b, compat_ulong_t)
-#define RTC_IRQP_SET32 _IOW('p', 0x0c, compat_ulong_t)
-#define RTC_EPOCH_READ32 _IOR('p', 0x0d, compat_ulong_t)
-#define RTC_EPOCH_SET32 _IOW('p', 0x0e, compat_ulong_t)
-
-static int rtc_ioctl(struct file *file,
- unsigned cmd, void __user *argp)
-{
- unsigned long __user *valp = compat_alloc_user_space(sizeof(*valp));
- int ret;
-
- if (valp == NULL)
- return -EFAULT;
- switch (cmd) {
- case RTC_IRQP_READ32:
- case RTC_EPOCH_READ32:
- ret = do_ioctl(file, (cmd == RTC_IRQP_READ32) ?
- RTC_IRQP_READ : RTC_EPOCH_READ,
- (unsigned long)valp);
- if (ret)
- return ret;
- return convert_in_user(valp, (unsigned int __user *)argp);
- case RTC_IRQP_SET32:
- return do_ioctl(file, RTC_IRQP_SET, (unsigned long)argp);
- case RTC_EPOCH_SET32:
- return do_ioctl(file, RTC_EPOCH_SET, (unsigned long)argp);
- }
-
- return -ENOIOCTLCMD;
-}
-
-/* on ia32 l_start is on a 32-bit boundary */
-#if defined(CONFIG_IA64) || defined(CONFIG_X86_64)
-struct space_resv_32 {
- __s16 l_type;
- __s16 l_whence;
- __s64 l_start __attribute__((packed));
- /* len == 0 means until end of file */
- __s64 l_len __attribute__((packed));
- __s32 l_sysid;
- __u32 l_pid;
- __s32 l_pad[4]; /* reserve area */
-};
-
-#define FS_IOC_RESVSP_32 _IOW ('X', 40, struct space_resv_32)
-#define FS_IOC_RESVSP64_32 _IOW ('X', 42, struct space_resv_32)
-
-/* just account for different alignment */
-static int compat_ioctl_preallocate(struct file *file,
- struct space_resv_32 __user *p32)
-{
- struct space_resv __user *p = compat_alloc_user_space(sizeof(*p));
-
- if (copy_in_user(&p->l_type, &p32->l_type, sizeof(s16)) ||
- copy_in_user(&p->l_whence, &p32->l_whence, sizeof(s16)) ||
- copy_in_user(&p->l_start, &p32->l_start, sizeof(s64)) ||
- copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
- copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
- copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
- copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
- return -EFAULT;
-
- return ioctl_preallocate(file, p);
-}
-#endif
-
/*
* simple reversible transform to make our table more evenly
* distributed after sorting.
@@ -509,33 +59,7 @@ static int compat_ioctl_preallocate(struct file *file,
#define XFORM(i) (((i) ^ ((i) << 27) ^ ((i) << 17)) & 0xffffffff)
#define COMPATIBLE_IOCTL(cmd) XFORM((u32)cmd),
-/* ioctl should not be warned about even if it's not implemented.
- Valid reasons to use this:
- - It is implemented with ->compat_ioctl on some device, but programs
- call it on others too.
- - The ioctl is not implemented in the native kernel, but programs
- call it commonly anyways.
- Most other reasons are not valid. */
-#define IGNORE_IOCTL(cmd) COMPATIBLE_IOCTL(cmd)
-
static unsigned int ioctl_pointer[] = {
-/* compatible ioctls first */
-/* Little t */
-COMPATIBLE_IOCTL(TIOCOUTQ)
-/* Little f */
-COMPATIBLE_IOCTL(FIOCLEX)
-COMPATIBLE_IOCTL(FIONCLEX)
-COMPATIBLE_IOCTL(FIOASYNC)
-COMPATIBLE_IOCTL(FIONBIO)
-COMPATIBLE_IOCTL(FIONREAD) /* This is also TIOCINQ */
-COMPATIBLE_IOCTL(FS_IOC_FIEMAP)
-/* 0x00 */
-COMPATIBLE_IOCTL(FIBMAP)
-COMPATIBLE_IOCTL(FIGETBSZ)
-/* 'X' - originally XFS but some now in the VFS */
-COMPATIBLE_IOCTL(FIFREEZE)
-COMPATIBLE_IOCTL(FITHAW)
-COMPATIBLE_IOCTL(FITRIM)
#ifdef CONFIG_BLOCK
/* Big S */
COMPATIBLE_IOCTL(SCSI_IOCTL_GET_IDLUN)
@@ -547,43 +71,10 @@ COMPATIBLE_IOCTL(SCSI_IOCTL_SEND_COMMAND)
COMPATIBLE_IOCTL(SCSI_IOCTL_PROBE_HOST)
COMPATIBLE_IOCTL(SCSI_IOCTL_GET_PCI)
#endif
-/* Big V (don't complain on serial console) */
-IGNORE_IOCTL(VT_OPENQRY)
-IGNORE_IOCTL(VT_GETMODE)
-/* Little p (/dev/rtc, /dev/envctrl, etc.) */
-COMPATIBLE_IOCTL(RTC_AIE_ON)
-COMPATIBLE_IOCTL(RTC_AIE_OFF)
-COMPATIBLE_IOCTL(RTC_UIE_ON)
-COMPATIBLE_IOCTL(RTC_UIE_OFF)
-COMPATIBLE_IOCTL(RTC_PIE_ON)
-COMPATIBLE_IOCTL(RTC_PIE_OFF)
-COMPATIBLE_IOCTL(RTC_WIE_ON)
-COMPATIBLE_IOCTL(RTC_WIE_OFF)
-COMPATIBLE_IOCTL(RTC_ALM_SET)
-COMPATIBLE_IOCTL(RTC_ALM_READ)
-COMPATIBLE_IOCTL(RTC_RD_TIME)
-COMPATIBLE_IOCTL(RTC_SET_TIME)
-COMPATIBLE_IOCTL(RTC_WKALM_SET)
-COMPATIBLE_IOCTL(RTC_WKALM_RD)
-/*
- * These two are only for the sbus rtc driver, but
- * hwclock tries them on every rtc device first when
- * running on sparc. On other architectures the entries
- * are useless but harmless.
- */
-COMPATIBLE_IOCTL(_IOR('p', 20, int[7])) /* RTCGET */
-COMPATIBLE_IOCTL(_IOW('p', 21, int[7])) /* RTCSET */
-/* Little m */
-COMPATIBLE_IOCTL(MTIOCTOP)
-/* Socket level stuff */
-COMPATIBLE_IOCTL(FIOQSIZE)
#ifdef CONFIG_BLOCK
-/* md calls this on random blockdevs */
-IGNORE_IOCTL(RAID_VERSION)
-/* qemu/qemu-img might call these two on plain files for probing */
-IGNORE_IOCTL(CDROM_DRIVE_STATUS)
-IGNORE_IOCTL(FDGETPRM32)
/* SG stuff */
+COMPATIBLE_IOCTL(SG_IO)
+COMPATIBLE_IOCTL(SG_GET_REQUEST_TABLE)
COMPATIBLE_IOCTL(SG_SET_TIMEOUT)
COMPATIBLE_IOCTL(SG_GET_TIMEOUT)
COMPATIBLE_IOCTL(SG_EMULATED_HOST)
@@ -607,314 +98,6 @@ COMPATIBLE_IOCTL(SG_GET_REQUEST_TABLE)
COMPATIBLE_IOCTL(SG_SET_KEEP_ORPHAN)
COMPATIBLE_IOCTL(SG_GET_KEEP_ORPHAN)
#endif
-/* PPP stuff */
-COMPATIBLE_IOCTL(PPPIOCGFLAGS)
-COMPATIBLE_IOCTL(PPPIOCSFLAGS)
-COMPATIBLE_IOCTL(PPPIOCGASYNCMAP)
-COMPATIBLE_IOCTL(PPPIOCSASYNCMAP)
-COMPATIBLE_IOCTL(PPPIOCGUNIT)
-COMPATIBLE_IOCTL(PPPIOCGRASYNCMAP)
-COMPATIBLE_IOCTL(PPPIOCSRASYNCMAP)
-COMPATIBLE_IOCTL(PPPIOCGMRU)
-COMPATIBLE_IOCTL(PPPIOCSMRU)
-COMPATIBLE_IOCTL(PPPIOCSMAXCID)
-COMPATIBLE_IOCTL(PPPIOCGXASYNCMAP)
-COMPATIBLE_IOCTL(PPPIOCSXASYNCMAP)
-COMPATIBLE_IOCTL(PPPIOCXFERUNIT)
-/* PPPIOCSCOMPRESS is translated */
-COMPATIBLE_IOCTL(PPPIOCGNPMODE)
-COMPATIBLE_IOCTL(PPPIOCSNPMODE)
-COMPATIBLE_IOCTL(PPPIOCGDEBUG)
-COMPATIBLE_IOCTL(PPPIOCSDEBUG)
-/* PPPIOCSPASS is translated */
-/* PPPIOCSACTIVE is translated */
-/* PPPIOCGIDLE is translated */
-COMPATIBLE_IOCTL(PPPIOCNEWUNIT)
-COMPATIBLE_IOCTL(PPPIOCATTACH)
-COMPATIBLE_IOCTL(PPPIOCDETACH)
-COMPATIBLE_IOCTL(PPPIOCSMRRU)
-COMPATIBLE_IOCTL(PPPIOCCONNECT)
-COMPATIBLE_IOCTL(PPPIOCDISCONN)
-COMPATIBLE_IOCTL(PPPIOCATTCHAN)
-COMPATIBLE_IOCTL(PPPIOCGCHAN)
-COMPATIBLE_IOCTL(PPPIOCGL2TPSTATS)
-/* Big A */
-/* sparc only */
-/* Big Q for sound/OSS */
-COMPATIBLE_IOCTL(SNDCTL_SEQ_RESET)
-COMPATIBLE_IOCTL(SNDCTL_SEQ_SYNC)
-COMPATIBLE_IOCTL(SNDCTL_SYNTH_INFO)
-COMPATIBLE_IOCTL(SNDCTL_SEQ_CTRLRATE)
-COMPATIBLE_IOCTL(SNDCTL_SEQ_GETOUTCOUNT)
-COMPATIBLE_IOCTL(SNDCTL_SEQ_GETINCOUNT)
-COMPATIBLE_IOCTL(SNDCTL_SEQ_PERCMODE)
-COMPATIBLE_IOCTL(SNDCTL_FM_LOAD_INSTR)
-COMPATIBLE_IOCTL(SNDCTL_SEQ_TESTMIDI)
-COMPATIBLE_IOCTL(SNDCTL_SEQ_RESETSAMPLES)
-COMPATIBLE_IOCTL(SNDCTL_SEQ_NRSYNTHS)
-COMPATIBLE_IOCTL(SNDCTL_SEQ_NRMIDIS)
-COMPATIBLE_IOCTL(SNDCTL_MIDI_INFO)
-COMPATIBLE_IOCTL(SNDCTL_SEQ_THRESHOLD)
-COMPATIBLE_IOCTL(SNDCTL_SYNTH_MEMAVL)
-COMPATIBLE_IOCTL(SNDCTL_FM_4OP_ENABLE)
-COMPATIBLE_IOCTL(SNDCTL_SEQ_PANIC)
-COMPATIBLE_IOCTL(SNDCTL_SEQ_OUTOFBAND)
-COMPATIBLE_IOCTL(SNDCTL_SEQ_GETTIME)
-COMPATIBLE_IOCTL(SNDCTL_SYNTH_ID)
-COMPATIBLE_IOCTL(SNDCTL_SYNTH_CONTROL)
-COMPATIBLE_IOCTL(SNDCTL_SYNTH_REMOVESAMPLE)
-/* Big T for sound/OSS */
-COMPATIBLE_IOCTL(SNDCTL_TMR_TIMEBASE)
-COMPATIBLE_IOCTL(SNDCTL_TMR_START)
-COMPATIBLE_IOCTL(SNDCTL_TMR_STOP)
-COMPATIBLE_IOCTL(SNDCTL_TMR_CONTINUE)
-COMPATIBLE_IOCTL(SNDCTL_TMR_TEMPO)
-COMPATIBLE_IOCTL(SNDCTL_TMR_SOURCE)
-COMPATIBLE_IOCTL(SNDCTL_TMR_METRONOME)
-COMPATIBLE_IOCTL(SNDCTL_TMR_SELECT)
-/* Little m for sound/OSS */
-COMPATIBLE_IOCTL(SNDCTL_MIDI_PRETIME)
-COMPATIBLE_IOCTL(SNDCTL_MIDI_MPUMODE)
-COMPATIBLE_IOCTL(SNDCTL_MIDI_MPUCMD)
-/* Big P for sound/OSS */
-COMPATIBLE_IOCTL(SNDCTL_DSP_RESET)
-COMPATIBLE_IOCTL(SNDCTL_DSP_SYNC)
-COMPATIBLE_IOCTL(SNDCTL_DSP_SPEED)
-COMPATIBLE_IOCTL(SNDCTL_DSP_STEREO)
-COMPATIBLE_IOCTL(SNDCTL_DSP_GETBLKSIZE)
-COMPATIBLE_IOCTL(SNDCTL_DSP_CHANNELS)
-COMPATIBLE_IOCTL(SOUND_PCM_WRITE_FILTER)
-COMPATIBLE_IOCTL(SNDCTL_DSP_POST)
-COMPATIBLE_IOCTL(SNDCTL_DSP_SUBDIVIDE)
-COMPATIBLE_IOCTL(SNDCTL_DSP_SETFRAGMENT)
-COMPATIBLE_IOCTL(SNDCTL_DSP_GETFMTS)
-COMPATIBLE_IOCTL(SNDCTL_DSP_SETFMT)
-COMPATIBLE_IOCTL(SNDCTL_DSP_GETOSPACE)
-COMPATIBLE_IOCTL(SNDCTL_DSP_GETISPACE)
-COMPATIBLE_IOCTL(SNDCTL_DSP_NONBLOCK)
-COMPATIBLE_IOCTL(SNDCTL_DSP_GETCAPS)
-COMPATIBLE_IOCTL(SNDCTL_DSP_GETTRIGGER)
-COMPATIBLE_IOCTL(SNDCTL_DSP_SETTRIGGER)
-COMPATIBLE_IOCTL(SNDCTL_DSP_GETIPTR)
-COMPATIBLE_IOCTL(SNDCTL_DSP_GETOPTR)
-/* SNDCTL_DSP_MAPINBUF, XXX needs translation */
-/* SNDCTL_DSP_MAPOUTBUF, XXX needs translation */
-COMPATIBLE_IOCTL(SNDCTL_DSP_SETSYNCRO)
-COMPATIBLE_IOCTL(SNDCTL_DSP_SETDUPLEX)
-COMPATIBLE_IOCTL(SNDCTL_DSP_GETODELAY)
-COMPATIBLE_IOCTL(SNDCTL_DSP_PROFILE)
-COMPATIBLE_IOCTL(SOUND_PCM_READ_RATE)
-COMPATIBLE_IOCTL(SOUND_PCM_READ_CHANNELS)
-COMPATIBLE_IOCTL(SOUND_PCM_READ_BITS)
-COMPATIBLE_IOCTL(SOUND_PCM_READ_FILTER)
-/* Big C for sound/OSS */
-COMPATIBLE_IOCTL(SNDCTL_COPR_RESET)
-COMPATIBLE_IOCTL(SNDCTL_COPR_LOAD)
-COMPATIBLE_IOCTL(SNDCTL_COPR_RDATA)
-COMPATIBLE_IOCTL(SNDCTL_COPR_RCODE)
-COMPATIBLE_IOCTL(SNDCTL_COPR_WDATA)
-COMPATIBLE_IOCTL(SNDCTL_COPR_WCODE)
-COMPATIBLE_IOCTL(SNDCTL_COPR_RUN)
-COMPATIBLE_IOCTL(SNDCTL_COPR_HALT)
-COMPATIBLE_IOCTL(SNDCTL_COPR_SENDMSG)
-COMPATIBLE_IOCTL(SNDCTL_COPR_RCVMSG)
-/* Big M for sound/OSS */
-COMPATIBLE_IOCTL(SOUND_MIXER_READ_VOLUME)
-COMPATIBLE_IOCTL(SOUND_MIXER_READ_BASS)
-COMPATIBLE_IOCTL(SOUND_MIXER_READ_TREBLE)
-COMPATIBLE_IOCTL(SOUND_MIXER_READ_SYNTH)
-COMPATIBLE_IOCTL(SOUND_MIXER_READ_PCM)
-COMPATIBLE_IOCTL(SOUND_MIXER_READ_SPEAKER)
-COMPATIBLE_IOCTL(SOUND_MIXER_READ_LINE)
-COMPATIBLE_IOCTL(SOUND_MIXER_READ_MIC)
-COMPATIBLE_IOCTL(SOUND_MIXER_READ_CD)
-COMPATIBLE_IOCTL(SOUND_MIXER_READ_IMIX)
-COMPATIBLE_IOCTL(SOUND_MIXER_READ_ALTPCM)
-COMPATIBLE_IOCTL(SOUND_MIXER_READ_RECLEV)
-COMPATIBLE_IOCTL(SOUND_MIXER_READ_IGAIN)
-COMPATIBLE_IOCTL(SOUND_MIXER_READ_OGAIN)
-COMPATIBLE_IOCTL(SOUND_MIXER_READ_LINE1)
-COMPATIBLE_IOCTL(SOUND_MIXER_READ_LINE2)
-COMPATIBLE_IOCTL(SOUND_MIXER_READ_LINE3)
-COMPATIBLE_IOCTL(MIXER_READ(SOUND_MIXER_DIGITAL1))
-COMPATIBLE_IOCTL(MIXER_READ(SOUND_MIXER_DIGITAL2))
-COMPATIBLE_IOCTL(MIXER_READ(SOUND_MIXER_DIGITAL3))
-COMPATIBLE_IOCTL(MIXER_READ(SOUND_MIXER_PHONEIN))
-COMPATIBLE_IOCTL(MIXER_READ(SOUND_MIXER_PHONEOUT))
-COMPATIBLE_IOCTL(MIXER_READ(SOUND_MIXER_VIDEO))
-COMPATIBLE_IOCTL(MIXER_READ(SOUND_MIXER_RADIO))
-COMPATIBLE_IOCTL(MIXER_READ(SOUND_MIXER_MONITOR))
-COMPATIBLE_IOCTL(SOUND_MIXER_READ_MUTE)
-/* SOUND_MIXER_READ_ENHANCE, same value as READ_MUTE */
-/* SOUND_MIXER_READ_LOUD, same value as READ_MUTE */
-COMPATIBLE_IOCTL(SOUND_MIXER_READ_RECSRC)
-COMPATIBLE_IOCTL(SOUND_MIXER_READ_DEVMASK)
-COMPATIBLE_IOCTL(SOUND_MIXER_READ_RECMASK)
-COMPATIBLE_IOCTL(SOUND_MIXER_READ_STEREODEVS)
-COMPATIBLE_IOCTL(SOUND_MIXER_READ_CAPS)
-COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_VOLUME)
-COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_BASS)
-COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_TREBLE)
-COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_SYNTH)
-COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_PCM)
-COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_SPEAKER)
-COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_LINE)
-COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_MIC)
-COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_CD)
-COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_IMIX)
-COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_ALTPCM)
-COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_RECLEV)
-COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_IGAIN)
-COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_OGAIN)
-COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_LINE1)
-COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_LINE2)
-COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_LINE3)
-COMPATIBLE_IOCTL(MIXER_WRITE(SOUND_MIXER_DIGITAL1))
-COMPATIBLE_IOCTL(MIXER_WRITE(SOUND_MIXER_DIGITAL2))
-COMPATIBLE_IOCTL(MIXER_WRITE(SOUND_MIXER_DIGITAL3))
-COMPATIBLE_IOCTL(MIXER_WRITE(SOUND_MIXER_PHONEIN))
-COMPATIBLE_IOCTL(MIXER_WRITE(SOUND_MIXER_PHONEOUT))
-COMPATIBLE_IOCTL(MIXER_WRITE(SOUND_MIXER_VIDEO))
-COMPATIBLE_IOCTL(MIXER_WRITE(SOUND_MIXER_RADIO))
-COMPATIBLE_IOCTL(MIXER_WRITE(SOUND_MIXER_MONITOR))
-COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_MUTE)
-/* SOUND_MIXER_WRITE_ENHANCE, same value as WRITE_MUTE */
-/* SOUND_MIXER_WRITE_LOUD, same value as WRITE_MUTE */
-COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_RECSRC)
-COMPATIBLE_IOCTL(SOUND_MIXER_INFO)
-COMPATIBLE_IOCTL(SOUND_OLD_MIXER_INFO)
-COMPATIBLE_IOCTL(SOUND_MIXER_ACCESS)
-COMPATIBLE_IOCTL(SOUND_MIXER_AGC)
-COMPATIBLE_IOCTL(SOUND_MIXER_3DSE)
-COMPATIBLE_IOCTL(SOUND_MIXER_PRIVATE1)
-COMPATIBLE_IOCTL(SOUND_MIXER_PRIVATE2)
-COMPATIBLE_IOCTL(SOUND_MIXER_PRIVATE3)
-COMPATIBLE_IOCTL(SOUND_MIXER_PRIVATE4)
-COMPATIBLE_IOCTL(SOUND_MIXER_PRIVATE5)
-COMPATIBLE_IOCTL(SOUND_MIXER_GETLEVELS)
-COMPATIBLE_IOCTL(SOUND_MIXER_SETLEVELS)
-COMPATIBLE_IOCTL(OSS_GETVERSION)
-/* Raw devices */
-COMPATIBLE_IOCTL(RAW_SETBIND)
-COMPATIBLE_IOCTL(RAW_GETBIND)
-/* Watchdog */
-COMPATIBLE_IOCTL(WDIOC_GETSUPPORT)
-COMPATIBLE_IOCTL(WDIOC_GETSTATUS)
-COMPATIBLE_IOCTL(WDIOC_GETBOOTSTATUS)
-COMPATIBLE_IOCTL(WDIOC_GETTEMP)
-COMPATIBLE_IOCTL(WDIOC_SETOPTIONS)
-COMPATIBLE_IOCTL(WDIOC_KEEPALIVE)
-COMPATIBLE_IOCTL(WDIOC_SETTIMEOUT)
-COMPATIBLE_IOCTL(WDIOC_GETTIMEOUT)
-COMPATIBLE_IOCTL(WDIOC_SETPRETIMEOUT)
-COMPATIBLE_IOCTL(WDIOC_GETPRETIMEOUT)
-/* Big R */
-COMPATIBLE_IOCTL(RNDGETENTCNT)
-COMPATIBLE_IOCTL(RNDADDTOENTCNT)
-COMPATIBLE_IOCTL(RNDGETPOOL)
-COMPATIBLE_IOCTL(RNDADDENTROPY)
-COMPATIBLE_IOCTL(RNDZAPENTCNT)
-COMPATIBLE_IOCTL(RNDCLEARPOOL)
-/* Bluetooth */
-COMPATIBLE_IOCTL(HCIDEVUP)
-COMPATIBLE_IOCTL(HCIDEVDOWN)
-COMPATIBLE_IOCTL(HCIDEVRESET)
-COMPATIBLE_IOCTL(HCIDEVRESTAT)
-COMPATIBLE_IOCTL(HCIGETDEVLIST)
-COMPATIBLE_IOCTL(HCIGETDEVINFO)
-COMPATIBLE_IOCTL(HCIGETCONNLIST)
-COMPATIBLE_IOCTL(HCIGETCONNINFO)
-COMPATIBLE_IOCTL(HCIGETAUTHINFO)
-COMPATIBLE_IOCTL(HCISETRAW)
-COMPATIBLE_IOCTL(HCISETSCAN)
-COMPATIBLE_IOCTL(HCISETAUTH)
-COMPATIBLE_IOCTL(HCISETENCRYPT)
-COMPATIBLE_IOCTL(HCISETPTYPE)
-COMPATIBLE_IOCTL(HCISETLINKPOL)
-COMPATIBLE_IOCTL(HCISETLINKMODE)
-COMPATIBLE_IOCTL(HCISETACLMTU)
-COMPATIBLE_IOCTL(HCISETSCOMTU)
-COMPATIBLE_IOCTL(HCIBLOCKADDR)
-COMPATIBLE_IOCTL(HCIUNBLOCKADDR)
-COMPATIBLE_IOCTL(HCIINQUIRY)
-COMPATIBLE_IOCTL(HCIUARTSETPROTO)
-COMPATIBLE_IOCTL(HCIUARTGETPROTO)
-COMPATIBLE_IOCTL(HCIUARTGETDEVICE)
-COMPATIBLE_IOCTL(HCIUARTSETFLAGS)
-COMPATIBLE_IOCTL(HCIUARTGETFLAGS)
-COMPATIBLE_IOCTL(RFCOMMCREATEDEV)
-COMPATIBLE_IOCTL(RFCOMMRELEASEDEV)
-COMPATIBLE_IOCTL(RFCOMMGETDEVLIST)
-COMPATIBLE_IOCTL(RFCOMMGETDEVINFO)
-COMPATIBLE_IOCTL(RFCOMMSTEALDLC)
-/* CAPI */
-COMPATIBLE_IOCTL(CAPI_REGISTER)
-COMPATIBLE_IOCTL(CAPI_GET_MANUFACTURER)
-COMPATIBLE_IOCTL(CAPI_GET_VERSION)
-COMPATIBLE_IOCTL(CAPI_GET_SERIAL)
-COMPATIBLE_IOCTL(CAPI_GET_PROFILE)
-COMPATIBLE_IOCTL(CAPI_MANUFACTURER_CMD)
-COMPATIBLE_IOCTL(CAPI_GET_ERRCODE)
-COMPATIBLE_IOCTL(CAPI_INSTALLED)
-COMPATIBLE_IOCTL(CAPI_GET_FLAGS)
-COMPATIBLE_IOCTL(CAPI_SET_FLAGS)
-COMPATIBLE_IOCTL(CAPI_CLR_FLAGS)
-COMPATIBLE_IOCTL(CAPI_NCCI_OPENCOUNT)
-COMPATIBLE_IOCTL(CAPI_NCCI_GETUNIT)
-/* Misc. */
-COMPATIBLE_IOCTL(0x41545900) /* ATYIO_CLKR */
-COMPATIBLE_IOCTL(0x41545901) /* ATYIO_CLKW */
-COMPATIBLE_IOCTL(PCIIOC_CONTROLLER)
-COMPATIBLE_IOCTL(PCIIOC_MMAP_IS_IO)
-COMPATIBLE_IOCTL(PCIIOC_MMAP_IS_MEM)
-COMPATIBLE_IOCTL(PCIIOC_WRITE_COMBINE)
-/* hiddev */
-COMPATIBLE_IOCTL(HIDIOCGVERSION)
-COMPATIBLE_IOCTL(HIDIOCAPPLICATION)
-COMPATIBLE_IOCTL(HIDIOCGDEVINFO)
-COMPATIBLE_IOCTL(HIDIOCGSTRING)
-COMPATIBLE_IOCTL(HIDIOCINITREPORT)
-COMPATIBLE_IOCTL(HIDIOCGREPORT)
-COMPATIBLE_IOCTL(HIDIOCSREPORT)
-COMPATIBLE_IOCTL(HIDIOCGREPORTINFO)
-COMPATIBLE_IOCTL(HIDIOCGFIELDINFO)
-COMPATIBLE_IOCTL(HIDIOCGUSAGE)
-COMPATIBLE_IOCTL(HIDIOCSUSAGE)
-COMPATIBLE_IOCTL(HIDIOCGUCODE)
-COMPATIBLE_IOCTL(HIDIOCGFLAG)
-COMPATIBLE_IOCTL(HIDIOCSFLAG)
-COMPATIBLE_IOCTL(HIDIOCGCOLLECTIONINDEX)
-COMPATIBLE_IOCTL(HIDIOCGCOLLECTIONINFO)
-/* joystick */
-COMPATIBLE_IOCTL(JSIOCGVERSION)
-COMPATIBLE_IOCTL(JSIOCGAXES)
-COMPATIBLE_IOCTL(JSIOCGBUTTONS)
-COMPATIBLE_IOCTL(JSIOCGNAME(0))
-
-/* fat 'r' ioctls. These are handled by fat with ->compat_ioctl,
- but we don't want warnings on other file systems. So declare
- them as compatible here. */
-#define VFAT_IOCTL_READDIR_BOTH32 _IOR('r', 1, struct compat_dirent[2])
-#define VFAT_IOCTL_READDIR_SHORT32 _IOR('r', 2, struct compat_dirent[2])
-
-IGNORE_IOCTL(VFAT_IOCTL_READDIR_BOTH32)
-IGNORE_IOCTL(VFAT_IOCTL_READDIR_SHORT32)
-
-#ifdef CONFIG_SPARC
-/* Sparc framebuffers, handled in sbusfb_compat_ioctl() */
-IGNORE_IOCTL(FBIOGTYPE)
-IGNORE_IOCTL(FBIOSATTR)
-IGNORE_IOCTL(FBIOGATTR)
-IGNORE_IOCTL(FBIOSVIDEO)
-IGNORE_IOCTL(FBIOGVIDEO)
-IGNORE_IOCTL(FBIOSCURPOS)
-IGNORE_IOCTL(FBIOGCURPOS)
-IGNORE_IOCTL(FBIOGCURMAX)
-IGNORE_IOCTL(FBIOPUTCMAP32)
-IGNORE_IOCTL(FBIOGETCMAP32)
-IGNORE_IOCTL(FBIOSCURSOR32)
-IGNORE_IOCTL(FBIOGCURSOR32)
-#endif
};
/*
@@ -927,51 +110,12 @@ IGNORE_IOCTL(FBIOGCURSOR32)
static long do_ioctl_trans(unsigned int cmd,
unsigned long arg, struct file *file)
{
- void __user *argp = compat_ptr(arg);
-
- switch (cmd) {
- case PPPIOCGIDLE32:
- return ppp_gidle(file, cmd, argp);
- case PPPIOCSCOMPRESS32:
- return ppp_scompress(file, cmd, argp);
- case PPPIOCSPASS32:
- case PPPIOCSACTIVE32:
- return ppp_sock_fprog_ioctl_trans(file, cmd, argp);
-#ifdef CONFIG_BLOCK
- case SG_IO:
- return sg_ioctl_trans(file, cmd, argp);
- case SG_GET_REQUEST_TABLE:
- return sg_grt_trans(file, cmd, argp);
- case MTIOCGET32:
- case MTIOCPOS32:
- return mt_ioctl_trans(file, cmd, argp);
-#endif
- /* Not implemented in the native kernel */
- case RTC_IRQP_READ32:
- case RTC_IRQP_SET32:
- case RTC_EPOCH_READ32:
- case RTC_EPOCH_SET32:
- return rtc_ioctl(file, cmd, argp);
- }
-
- /*
- * These take an integer instead of a pointer as 'arg',
- * so we must not do a compat_ptr() translation.
- */
- switch (cmd) {
- /* RAID */
- case HOT_REMOVE_DISK:
- case HOT_ADD_DISK:
- case SET_DISK_FAULTY:
- case SET_BITMAP_FILE:
- return vfs_ioctl(file, cmd, arg);
- }
-
return -ENOIOCTLCMD;
}
static int compat_ioctl_check_table(unsigned int xcmd)
{
+#ifdef CONFIG_BLOCK
int i;
const int max = ARRAY_SIZE(ioctl_pointer) - 1;
@@ -990,6 +134,9 @@ static int compat_ioctl_check_table(unsigned int xcmd)
i--;
return ioctl_pointer[i] == xcmd;
+#else
+ return 0;
+#endif
}
COMPAT_SYSCALL_DEFINE3(ioctl, unsigned int, fd, unsigned int, cmd,
@@ -1006,44 +153,62 @@ COMPAT_SYSCALL_DEFINE3(ioctl, unsigned int, fd, unsigned int, cmd,
if (error)
goto out_fput;
- /*
- * To allow the compat_ioctl handlers to be self contained
- * we need to check the common ioctls here first.
- * Just handle them with the standard handlers below.
- */
switch (cmd) {
+ /* these are never seen by ->ioctl(), no argument or int argument */
case FIOCLEX:
case FIONCLEX:
+ case FIFREEZE:
+ case FITHAW:
+ case FICLONE:
+ goto do_ioctl;
+ /* these are never seen by ->ioctl(), pointer argument */
case FIONBIO:
case FIOASYNC:
case FIOQSIZE:
- break;
-
-#if defined(CONFIG_IA64) || defined(CONFIG_X86_64)
+ case FS_IOC_FIEMAP:
+ case FIGETBSZ:
+ case FICLONERANGE:
+ case FIDEDUPERANGE:
+ goto found_handler;
+ /*
+ * The next group is the stuff handled inside file_ioctl().
+ * For regular files these never reach ->ioctl(); for
+ * devices, sockets, etc. they do and one (FIONREAD) is
+ * even accepted in some cases. In all those cases
+ * argument has the same type, so we can handle these
+ * here, shunting them towards do_vfs_ioctl().
+ * ->compat_ioctl() will never see any of those.
+ */
+ /* pointer argument, never actually handled by ->ioctl() */
+ case FIBMAP:
+ goto found_handler;
+ /* handled by some ->ioctl(); always a pointer to int */
+ case FIONREAD:
+ goto found_handler;
+ /* these get messy on amd64 due to alignment differences */
+#if defined(CONFIG_X86_64)
case FS_IOC_RESVSP_32:
case FS_IOC_RESVSP64_32:
- error = compat_ioctl_preallocate(f.file, compat_ptr(arg));
+ error = compat_ioctl_preallocate(f.file, 0, compat_ptr(arg));
+ goto out_fput;
+ case FS_IOC_UNRESVSP_32:
+ case FS_IOC_UNRESVSP64_32:
+ error = compat_ioctl_preallocate(f.file, FALLOC_FL_PUNCH_HOLE,
+ compat_ptr(arg));
+ goto out_fput;
+ case FS_IOC_ZERO_RANGE_32:
+ error = compat_ioctl_preallocate(f.file, FALLOC_FL_ZERO_RANGE,
+ compat_ptr(arg));
goto out_fput;
#else
case FS_IOC_RESVSP:
case FS_IOC_RESVSP64:
- error = ioctl_preallocate(f.file, compat_ptr(arg));
- goto out_fput;
+ case FS_IOC_UNRESVSP:
+ case FS_IOC_UNRESVSP64:
+ case FS_IOC_ZERO_RANGE:
+ goto found_handler;
#endif
- case FICLONE:
- case FICLONERANGE:
- case FIDEDUPERANGE:
- case FS_IOC_FIEMAP:
- goto do_ioctl;
-
- case FIBMAP:
- case FIGETBSZ:
- case FIONREAD:
- if (S_ISREG(file_inode(f.file)->i_mode))
- break;
- /*FALL THROUGH*/
-
default:
if (f.file->f_op->compat_ioctl) {
error = f.file->f_op->compat_ioctl(f.file, cmd, arg);
diff --git a/fs/dax.c b/fs/dax.c
index 2cc43cd914eb..1f1f0201cad1 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -1091,7 +1091,7 @@ EXPORT_SYMBOL_GPL(__dax_zero_page_range);
static loff_t
dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
- struct iomap *iomap)
+ struct iomap *iomap, struct iomap *srcmap)
{
struct block_device *bdev = iomap->bdev;
struct dax_device *dax_dev = iomap->dax_dev;
@@ -1248,7 +1248,8 @@ static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
struct inode *inode = mapping->host;
unsigned long vaddr = vmf->address;
loff_t pos = (loff_t)vmf->pgoff << PAGE_SHIFT;
- struct iomap iomap = { 0 };
+ struct iomap iomap = { .type = IOMAP_HOLE };
+ struct iomap srcmap = { .type = IOMAP_HOLE };
unsigned flags = IOMAP_FAULT;
int error, major = 0;
bool write = vmf->flags & FAULT_FLAG_WRITE;
@@ -1293,7 +1294,7 @@ static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
* the file system block size to be equal the page size, which means
* that we never have to deal with more than a single extent here.
*/
- error = ops->iomap_begin(inode, pos, PAGE_SIZE, flags, &iomap);
+ error = ops->iomap_begin(inode, pos, PAGE_SIZE, flags, &iomap, &srcmap);
if (iomap_errp)
*iomap_errp = error;
if (error) {
@@ -1472,7 +1473,8 @@ static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
unsigned int iomap_flags = (write ? IOMAP_WRITE : 0) | IOMAP_FAULT;
struct inode *inode = mapping->host;
vm_fault_t result = VM_FAULT_FALLBACK;
- struct iomap iomap = { 0 };
+ struct iomap iomap = { .type = IOMAP_HOLE };
+ struct iomap srcmap = { .type = IOMAP_HOLE };
pgoff_t max_pgoff;
void *entry;
loff_t pos;
@@ -1547,7 +1549,8 @@ static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
* to look up our filesystem block.
*/
pos = (loff_t)xas.xa_index << PAGE_SHIFT;
- error = ops->iomap_begin(inode, pos, PMD_SIZE, iomap_flags, &iomap);
+ error = ops->iomap_begin(inode, pos, PMD_SIZE, iomap_flags, &iomap,
+ &srcmap);
if (error)
goto unlock_entry;
diff --git a/fs/debugfs/file.c b/fs/debugfs/file.c
index 87846aad594b..dede25247b81 100644
--- a/fs/debugfs/file.c
+++ b/fs/debugfs/file.c
@@ -420,20 +420,11 @@ DEFINE_DEBUGFS_ATTRIBUTE(fops_u8_wo, NULL, debugfs_u8_set, "%llu\n");
* This function creates a file in debugfs with the given name that
* contains the value of the variable @value. If the @mode variable is so
* set, it can be read from, and written to.
- *
- * This function will return a pointer to a dentry if it succeeds. This
- * pointer must be passed to the debugfs_remove() function when the file is
- * to be removed (no automatic cleanup happens if your module is unloaded,
- * you are responsible here.) If an error occurs, %ERR_PTR(-ERROR) will be
- * returned.
- *
- * If debugfs is not enabled in the kernel, the value %ERR_PTR(-ENODEV) will
- * be returned.
*/
-struct dentry *debugfs_create_u8(const char *name, umode_t mode,
- struct dentry *parent, u8 *value)
+void debugfs_create_u8(const char *name, umode_t mode, struct dentry *parent,
+ u8 *value)
{
- return debugfs_create_mode_unsafe(name, mode, parent, value, &fops_u8,
+ debugfs_create_mode_unsafe(name, mode, parent, value, &fops_u8,
&fops_u8_ro, &fops_u8_wo);
}
EXPORT_SYMBOL_GPL(debugfs_create_u8);
@@ -465,20 +456,11 @@ DEFINE_DEBUGFS_ATTRIBUTE(fops_u16_wo, NULL, debugfs_u16_set, "%llu\n");
* This function creates a file in debugfs with the given name that
* contains the value of the variable @value. If the @mode variable is so
* set, it can be read from, and written to.
- *
- * This function will return a pointer to a dentry if it succeeds. This
- * pointer must be passed to the debugfs_remove() function when the file is
- * to be removed (no automatic cleanup happens if your module is unloaded,
- * you are responsible here.) If an error occurs, %ERR_PTR(-ERROR) will be
- * returned.
- *
- * If debugfs is not enabled in the kernel, the value %ERR_PTR(-ENODEV) will
- * be returned.
*/
-struct dentry *debugfs_create_u16(const char *name, umode_t mode,
- struct dentry *parent, u16 *value)
+void debugfs_create_u16(const char *name, umode_t mode, struct dentry *parent,
+ u16 *value)
{
- return debugfs_create_mode_unsafe(name, mode, parent, value, &fops_u16,
+ debugfs_create_mode_unsafe(name, mode, parent, value, &fops_u16,
&fops_u16_ro, &fops_u16_wo);
}
EXPORT_SYMBOL_GPL(debugfs_create_u16);
@@ -556,20 +538,11 @@ DEFINE_DEBUGFS_ATTRIBUTE(fops_u64_wo, NULL, debugfs_u64_set, "%llu\n");
* This function creates a file in debugfs with the given name that
* contains the value of the variable @value. If the @mode variable is so
* set, it can be read from, and written to.
- *
- * This function will return a pointer to a dentry if it succeeds. This
- * pointer must be passed to the debugfs_remove() function when the file is
- * to be removed (no automatic cleanup happens if your module is unloaded,
- * you are responsible here.) If an error occurs, %ERR_PTR(-ERROR) will be
- * returned.
- *
- * If debugfs is not enabled in the kernel, the value %ERR_PTR(-ENODEV) will
- * be returned.
*/
-struct dentry *debugfs_create_u64(const char *name, umode_t mode,
- struct dentry *parent, u64 *value)
+void debugfs_create_u64(const char *name, umode_t mode, struct dentry *parent,
+ u64 *value)
{
- return debugfs_create_mode_unsafe(name, mode, parent, value, &fops_u64,
+ debugfs_create_mode_unsafe(name, mode, parent, value, &fops_u64,
&fops_u64_ro, &fops_u64_wo);
}
EXPORT_SYMBOL_GPL(debugfs_create_u64);
@@ -660,10 +633,10 @@ DEFINE_DEBUGFS_ATTRIBUTE(fops_x64_wo, NULL, debugfs_u64_set, "0x%016llx\n");
* @value: a pointer to the variable that the file should read to and write
* from.
*/
-struct dentry *debugfs_create_x8(const char *name, umode_t mode,
- struct dentry *parent, u8 *value)
+void debugfs_create_x8(const char *name, umode_t mode, struct dentry *parent,
+ u8 *value)
{
- return debugfs_create_mode_unsafe(name, mode, parent, value, &fops_x8,
+ debugfs_create_mode_unsafe(name, mode, parent, value, &fops_x8,
&fops_x8_ro, &fops_x8_wo);
}
EXPORT_SYMBOL_GPL(debugfs_create_x8);
@@ -678,10 +651,10 @@ EXPORT_SYMBOL_GPL(debugfs_create_x8);
* @value: a pointer to the variable that the file should read to and write
* from.
*/
-struct dentry *debugfs_create_x16(const char *name, umode_t mode,
- struct dentry *parent, u16 *value)
+void debugfs_create_x16(const char *name, umode_t mode, struct dentry *parent,
+ u16 *value)
{
- return debugfs_create_mode_unsafe(name, mode, parent, value, &fops_x16,
+ debugfs_create_mode_unsafe(name, mode, parent, value, &fops_x16,
&fops_x16_ro, &fops_x16_wo);
}
EXPORT_SYMBOL_GPL(debugfs_create_x16);
@@ -696,10 +669,10 @@ EXPORT_SYMBOL_GPL(debugfs_create_x16);
* @value: a pointer to the variable that the file should read to and write
* from.
*/
-struct dentry *debugfs_create_x32(const char *name, umode_t mode,
- struct dentry *parent, u32 *value)
+void debugfs_create_x32(const char *name, umode_t mode, struct dentry *parent,
+ u32 *value)
{
- return debugfs_create_mode_unsafe(name, mode, parent, value, &fops_x32,
+ debugfs_create_mode_unsafe(name, mode, parent, value, &fops_x32,
&fops_x32_ro, &fops_x32_wo);
}
EXPORT_SYMBOL_GPL(debugfs_create_x32);
@@ -714,10 +687,10 @@ EXPORT_SYMBOL_GPL(debugfs_create_x32);
* @value: a pointer to the variable that the file should read to and write
* from.
*/
-struct dentry *debugfs_create_x64(const char *name, umode_t mode,
- struct dentry *parent, u64 *value)
+void debugfs_create_x64(const char *name, umode_t mode, struct dentry *parent,
+ u64 *value)
{
- return debugfs_create_mode_unsafe(name, mode, parent, value, &fops_x64,
+ debugfs_create_mode_unsafe(name, mode, parent, value, &fops_x64,
&fops_x64_ro, &fops_x64_wo);
}
EXPORT_SYMBOL_GPL(debugfs_create_x64);
@@ -748,12 +721,11 @@ DEFINE_DEBUGFS_ATTRIBUTE(fops_size_t_wo, NULL, debugfs_size_t_set, "%llu\n");
* @value: a pointer to the variable that the file should read to and write
* from.
*/
-struct dentry *debugfs_create_size_t(const char *name, umode_t mode,
- struct dentry *parent, size_t *value)
+void debugfs_create_size_t(const char *name, umode_t mode,
+ struct dentry *parent, size_t *value)
{
- return debugfs_create_mode_unsafe(name, mode, parent, value,
- &fops_size_t, &fops_size_t_ro,
- &fops_size_t_wo);
+ debugfs_create_mode_unsafe(name, mode, parent, value, &fops_size_t,
+ &fops_size_t_ro, &fops_size_t_wo);
}
EXPORT_SYMBOL_GPL(debugfs_create_size_t);
@@ -785,12 +757,11 @@ DEFINE_DEBUGFS_ATTRIBUTE(fops_atomic_t_wo, NULL, debugfs_atomic_t_set,
* @value: a pointer to the variable that the file should read to and write
* from.
*/
-struct dentry *debugfs_create_atomic_t(const char *name, umode_t mode,
- struct dentry *parent, atomic_t *value)
+void debugfs_create_atomic_t(const char *name, umode_t mode,
+ struct dentry *parent, atomic_t *value)
{
- return debugfs_create_mode_unsafe(name, mode, parent, value,
- &fops_atomic_t, &fops_atomic_t_ro,
- &fops_atomic_t_wo);
+ debugfs_create_mode_unsafe(name, mode, parent, value, &fops_atomic_t,
+ &fops_atomic_t_ro, &fops_atomic_t_wo);
}
EXPORT_SYMBOL_GPL(debugfs_create_atomic_t);
diff --git a/fs/direct-io.c b/fs/direct-io.c
index 9329ced91f1d..0ec4f270139f 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -221,27 +221,6 @@ static inline struct page *dio_get_page(struct dio *dio,
}
/*
- * Warn about a page cache invalidation failure during a direct io write.
- */
-void dio_warn_stale_pagecache(struct file *filp)
-{
- static DEFINE_RATELIMIT_STATE(_rs, 86400 * HZ, DEFAULT_RATELIMIT_BURST);
- char pathname[128];
- struct inode *inode = file_inode(filp);
- char *path;
-
- errseq_set(&inode->i_mapping->wb_err, -EIO);
- if (__ratelimit(&_rs)) {
- path = file_path(filp, pathname, sizeof(pathname));
- if (IS_ERR(path))
- path = "(unknown)";
- pr_crit("Page cache invalidation failure on direct I/O. Possible data corruption due to collision with buffered I/O!\n");
- pr_crit("File: %s PID: %d Comm: %.20s\n", path, current->pid,
- current->comm);
- }
-}
-
-/*
* dio_complete() - called when all DIO BIO I/O has been completed
*
* This drops i_dio_count, lets interested parties know that a DIO operation
diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c
index feecb57defa7..5fb45d865ce5 100644
--- a/fs/ecryptfs/file.c
+++ b/fs/ecryptfs/file.c
@@ -378,6 +378,7 @@ ecryptfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
return rc;
switch (cmd) {
+ case FITRIM:
case FS_IOC32_GETFLAGS:
case FS_IOC32_SETFLAGS:
case FS_IOC32_GETVERSION:
diff --git a/fs/erofs/Kconfig b/fs/erofs/Kconfig
index 9d634d3a1845..74b0aaa7114c 100644
--- a/fs/erofs/Kconfig
+++ b/fs/erofs/Kconfig
@@ -3,6 +3,7 @@
config EROFS_FS
tristate "EROFS filesystem support"
depends on BLOCK
+ select LIBCRC32C
help
EROFS (Enhanced Read-Only File System) is a lightweight
read-only file system with modern designs (eg. page-sized
diff --git a/fs/erofs/decompressor.c b/fs/erofs/decompressor.c
index 19f89f9fb10c..2890a67a1ded 100644
--- a/fs/erofs/decompressor.c
+++ b/fs/erofs/decompressor.c
@@ -73,7 +73,7 @@ static int z_erofs_lz4_prepare_destpages(struct z_erofs_decompress_req *rq,
victim = availables[--top];
get_page(victim);
} else {
- victim = erofs_allocpage(pagepool, GFP_KERNEL, false);
+ victim = erofs_allocpage(pagepool, GFP_KERNEL);
if (!victim)
return -ENOMEM;
victim->mapping = Z_EROFS_MAPPING_STAGING;
diff --git a/fs/erofs/erofs_fs.h b/fs/erofs/erofs_fs.h
index b1ee5654750d..385fa49c7749 100644
--- a/fs/erofs/erofs_fs.h
+++ b/fs/erofs/erofs_fs.h
@@ -11,6 +11,8 @@
#define EROFS_SUPER_OFFSET 1024
+#define EROFS_FEATURE_COMPAT_SB_CHKSUM 0x00000001
+
/*
* Any bits that aren't in EROFS_ALL_FEATURE_INCOMPAT should
* be incompatible with this kernel version.
@@ -37,7 +39,6 @@ struct erofs_super_block {
__u8 uuid[16]; /* 128-bit uuid for volume */
__u8 volume_name[16]; /* volume name */
__le32 feature_incompat;
-
__u8 reserved2[44];
};
diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h
index 544a453f3076..1ed5beff7d11 100644
--- a/fs/erofs/internal.h
+++ b/fs/erofs/internal.h
@@ -85,6 +85,7 @@ struct erofs_sb_info {
u8 uuid[16]; /* 128-bit uuid for volume */
u8 volume_name[16]; /* volume name */
+ u32 feature_compat;
u32 feature_incompat;
unsigned int mount_opt;
@@ -278,9 +279,7 @@ static inline unsigned int erofs_inode_datalayout(unsigned int value)
extern const struct super_operations erofs_sops;
extern const struct address_space_operations erofs_raw_access_aops;
-#ifdef CONFIG_EROFS_FS_ZIP
-extern const struct address_space_operations z_erofs_vle_normalaccess_aops;
-#endif
+extern const struct address_space_operations z_erofs_aops;
/*
* Logical to physical block mapping, used by erofs_map_blocks()
@@ -382,7 +381,7 @@ int erofs_namei(struct inode *dir, struct qstr *name,
extern const struct file_operations erofs_dir_fops;
/* utils.c / zdata.c */
-struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp, bool nofail);
+struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp);
#if (EROFS_PCPUBUF_NR_PAGES > 0)
void *erofs_get_pcpubuf(unsigned int pagenr);
diff --git a/fs/erofs/super.c b/fs/erofs/super.c
index 0e369494f2f2..057e6d7b5b7f 100644
--- a/fs/erofs/super.c
+++ b/fs/erofs/super.c
@@ -9,6 +9,7 @@
#include <linux/statfs.h>
#include <linux/parser.h>
#include <linux/seq_file.h>
+#include <linux/crc32c.h>
#include "xattr.h"
#define CREATE_TRACE_POINTS
@@ -46,6 +47,30 @@ void _erofs_info(struct super_block *sb, const char *function,
va_end(args);
}
+static int erofs_superblock_csum_verify(struct super_block *sb, void *sbdata)
+{
+ struct erofs_super_block *dsb;
+ u32 expected_crc, crc;
+
+ dsb = kmemdup(sbdata + EROFS_SUPER_OFFSET,
+ EROFS_BLKSIZ - EROFS_SUPER_OFFSET, GFP_KERNEL);
+ if (!dsb)
+ return -ENOMEM;
+
+ expected_crc = le32_to_cpu(dsb->checksum);
+ dsb->checksum = 0;
+ /* to allow for x86 boot sectors and other oddities. */
+ crc = crc32c(~0, dsb, EROFS_BLKSIZ - EROFS_SUPER_OFFSET);
+ kfree(dsb);
+
+ if (crc != expected_crc) {
+ erofs_err(sb, "invalid checksum 0x%08x, 0x%08x expected",
+ crc, expected_crc);
+ return -EBADMSG;
+ }
+ return 0;
+}
+
static void erofs_inode_init_once(void *ptr)
{
struct erofs_inode *vi = ptr;
@@ -112,7 +137,7 @@ static int erofs_read_superblock(struct super_block *sb)
sbi = EROFS_SB(sb);
- data = kmap_atomic(page);
+ data = kmap(page);
dsb = (struct erofs_super_block *)(data + EROFS_SUPER_OFFSET);
ret = -EINVAL;
@@ -121,6 +146,13 @@ static int erofs_read_superblock(struct super_block *sb)
goto out;
}
+ sbi->feature_compat = le32_to_cpu(dsb->feature_compat);
+ if (sbi->feature_compat & EROFS_FEATURE_COMPAT_SB_CHKSUM) {
+ ret = erofs_superblock_csum_verify(sb, data);
+ if (ret)
+ goto out;
+ }
+
blkszbits = dsb->blkszbits;
/* 9(512 bytes) + LOG_SECTORS_PER_BLOCK == LOG_BLOCK_SIZE */
if (blkszbits != LOG_BLOCK_SIZE) {
@@ -155,7 +187,7 @@ static int erofs_read_superblock(struct super_block *sb)
}
ret = 0;
out:
- kunmap_atomic(data);
+ kunmap(page);
put_page(page);
return ret;
}
@@ -566,9 +598,6 @@ static int erofs_show_options(struct seq_file *seq, struct dentry *root)
seq_puts(seq, ",cache_strategy=readahead");
} else if (sbi->cache_strategy == EROFS_ZIP_CACHE_READAROUND) {
seq_puts(seq, ",cache_strategy=readaround");
- } else {
- seq_puts(seq, ",cache_strategy=(unknown)");
- DBG_BUGON(1);
}
#endif
return 0;
diff --git a/fs/erofs/utils.c b/fs/erofs/utils.c
index d92b3e753a6f..1e8e1450d5b0 100644
--- a/fs/erofs/utils.c
+++ b/fs/erofs/utils.c
@@ -7,7 +7,7 @@
#include "internal.h"
#include <linux/pagevec.h>
-struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp, bool nofail)
+struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp)
{
struct page *page;
@@ -16,7 +16,7 @@ struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp, bool nofail)
DBG_BUGON(page_ref_count(page) != 1);
list_del(&page->lru);
} else {
- page = alloc_pages(gfp | (nofail ? __GFP_NOFAIL : 0), 0);
+ page = alloc_page(gfp);
}
return page;
}
@@ -149,8 +149,7 @@ static void erofs_workgroup_unfreeze_final(struct erofs_workgroup *grp)
}
static bool erofs_try_to_release_workgroup(struct erofs_sb_info *sbi,
- struct erofs_workgroup *grp,
- bool cleanup)
+ struct erofs_workgroup *grp)
{
/*
* If managed cache is on, refcount of workgroups
@@ -188,8 +187,7 @@ static bool erofs_try_to_release_workgroup(struct erofs_sb_info *sbi,
}
static unsigned long erofs_shrink_workstation(struct erofs_sb_info *sbi,
- unsigned long nr_shrink,
- bool cleanup)
+ unsigned long nr_shrink)
{
pgoff_t first_index = 0;
void *batch[PAGEVEC_SIZE];
@@ -208,7 +206,7 @@ repeat:
first_index = grp->index + 1;
/* try to shrink each valid workgroup */
- if (!erofs_try_to_release_workgroup(sbi, grp, cleanup))
+ if (!erofs_try_to_release_workgroup(sbi, grp))
continue;
++freed;
@@ -245,7 +243,8 @@ void erofs_shrinker_unregister(struct super_block *sb)
struct erofs_sb_info *const sbi = EROFS_SB(sb);
mutex_lock(&sbi->umount_mutex);
- erofs_shrink_workstation(sbi, ~0UL, true);
+ /* clean up all remaining workgroups in memory */
+ erofs_shrink_workstation(sbi, ~0UL);
spin_lock(&erofs_sb_list_lock);
list_del(&sbi->list);
@@ -294,7 +293,7 @@ static unsigned long erofs_shrink_scan(struct shrinker *shrink,
spin_unlock(&erofs_sb_list_lock);
sbi->shrinker_run_no = run_no;
- freed += erofs_shrink_workstation(sbi, nr, false);
+ freed += erofs_shrink_workstation(sbi, nr);
spin_lock(&erofs_sb_list_lock);
/* Get the next list element before we move this one */
diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c
index fad80c97d247..ca99425a4536 100644
--- a/fs/erofs/zdata.c
+++ b/fs/erofs/zdata.c
@@ -337,9 +337,9 @@ retry:
return COLLECT_PRIMARY; /* :( better luck next time */
}
-static struct z_erofs_collection *cllookup(struct z_erofs_collector *clt,
- struct inode *inode,
- struct erofs_map_blocks *map)
+static int z_erofs_lookup_collection(struct z_erofs_collector *clt,
+ struct inode *inode,
+ struct erofs_map_blocks *map)
{
struct erofs_workgroup *grp;
struct z_erofs_pcluster *pcl;
@@ -349,20 +349,20 @@ static struct z_erofs_collection *cllookup(struct z_erofs_collector *clt,
grp = erofs_find_workgroup(inode->i_sb, map->m_pa >> PAGE_SHIFT, &tag);
if (!grp)
- return NULL;
+ return -ENOENT;
pcl = container_of(grp, struct z_erofs_pcluster, obj);
if (clt->owned_head == &pcl->next || pcl == clt->tailpcl) {
DBG_BUGON(1);
erofs_workgroup_put(grp);
- return ERR_PTR(-EFSCORRUPTED);
+ return -EFSCORRUPTED;
}
cl = z_erofs_primarycollection(pcl);
if (cl->pageofs != (map->m_la & ~PAGE_MASK)) {
DBG_BUGON(1);
erofs_workgroup_put(grp);
- return ERR_PTR(-EFSCORRUPTED);
+ return -EFSCORRUPTED;
}
length = READ_ONCE(pcl->length);
@@ -370,7 +370,7 @@ static struct z_erofs_collection *cllookup(struct z_erofs_collector *clt,
if ((map->m_llen << Z_EROFS_PCLUSTER_LENGTH_BIT) > length) {
DBG_BUGON(1);
erofs_workgroup_put(grp);
- return ERR_PTR(-EFSCORRUPTED);
+ return -EFSCORRUPTED;
}
} else {
unsigned int llen = map->m_llen << Z_EROFS_PCLUSTER_LENGTH_BIT;
@@ -394,12 +394,12 @@ static struct z_erofs_collection *cllookup(struct z_erofs_collector *clt,
clt->tailpcl = NULL;
clt->pcl = pcl;
clt->cl = cl;
- return cl;
+ return 0;
}
-static struct z_erofs_collection *clregister(struct z_erofs_collector *clt,
- struct inode *inode,
- struct erofs_map_blocks *map)
+static int z_erofs_register_collection(struct z_erofs_collector *clt,
+ struct inode *inode,
+ struct erofs_map_blocks *map)
{
struct z_erofs_pcluster *pcl;
struct z_erofs_collection *cl;
@@ -408,7 +408,7 @@ static struct z_erofs_collection *clregister(struct z_erofs_collector *clt,
/* no available workgroup, let's allocate one */
pcl = kmem_cache_alloc(pcluster_cachep, GFP_NOFS);
if (!pcl)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
z_erofs_pcluster_init_always(pcl);
pcl->obj.index = map->m_pa >> PAGE_SHIFT;
@@ -442,7 +442,7 @@ static struct z_erofs_collection *clregister(struct z_erofs_collector *clt,
if (err) {
mutex_unlock(&cl->lock);
kmem_cache_free(pcluster_cachep, pcl);
- return ERR_PTR(-EAGAIN);
+ return -EAGAIN;
}
/* used to check tail merging loop due to corrupted images */
if (clt->owned_head == Z_EROFS_PCLUSTER_TAIL)
@@ -450,14 +450,14 @@ static struct z_erofs_collection *clregister(struct z_erofs_collector *clt,
clt->owned_head = &pcl->next;
clt->pcl = pcl;
clt->cl = cl;
- return cl;
+ return 0;
}
static int z_erofs_collector_begin(struct z_erofs_collector *clt,
struct inode *inode,
struct erofs_map_blocks *map)
{
- struct z_erofs_collection *cl;
+ int ret;
DBG_BUGON(clt->cl);
@@ -471,19 +471,22 @@ static int z_erofs_collector_begin(struct z_erofs_collector *clt,
}
repeat:
- cl = cllookup(clt, inode, map);
- if (!cl) {
- cl = clregister(clt, inode, map);
+ ret = z_erofs_lookup_collection(clt, inode, map);
+ if (ret == -ENOENT) {
+ ret = z_erofs_register_collection(clt, inode, map);
- if (cl == ERR_PTR(-EAGAIN))
+ /* someone registered at the same time, give another try */
+ if (ret == -EAGAIN) {
+ cond_resched();
goto repeat;
+ }
}
- if (IS_ERR(cl))
- return PTR_ERR(cl);
+ if (ret)
+ return ret;
z_erofs_pagevec_ctor_init(&clt->vector, Z_EROFS_NR_INLINE_PAGEVECS,
- cl->pagevec, cl->vcnt);
+ clt->cl->pagevec, clt->cl->vcnt);
clt->compressedpages = clt->pcl->compressed_pages;
if (clt->mode <= COLLECT_PRIMARY) /* cannot do in-place I/O */
@@ -543,15 +546,6 @@ static bool z_erofs_collector_end(struct z_erofs_collector *clt)
return true;
}
-static inline struct page *__stagingpage_alloc(struct list_head *pagepool,
- gfp_t gfp)
-{
- struct page *page = erofs_allocpage(pagepool, gfp, true);
-
- page->mapping = Z_EROFS_MAPPING_STAGING;
- return page;
-}
-
static bool should_alloc_managed_pages(struct z_erofs_decompress_frontend *fe,
unsigned int cachestrategy,
erofs_off_t la)
@@ -571,7 +565,7 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
struct list_head *pagepool)
{
struct inode *const inode = fe->inode;
- struct erofs_sb_info *const sbi __maybe_unused = EROFS_I_SB(inode);
+ struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
struct erofs_map_blocks *const map = &fe->map;
struct z_erofs_collector *const clt = &fe->clt;
const loff_t offset = page_offset(page);
@@ -658,8 +652,9 @@ retry:
/* should allocate an additional staging page for pagevec */
if (err == -EAGAIN) {
struct page *const newpage =
- __stagingpage_alloc(pagepool, GFP_NOFS);
+ erofs_allocpage(pagepool, GFP_NOFS | __GFP_NOFAIL);
+ newpage->mapping = Z_EROFS_MAPPING_STAGING;
err = z_erofs_attach_page(clt, newpage,
Z_EROFS_PAGE_TYPE_EXCLUSIVE);
if (!err)
@@ -698,13 +693,11 @@ err_out:
goto out;
}
-static void z_erofs_vle_unzip_kickoff(void *ptr, int bios)
+static void z_erofs_decompress_kickoff(struct z_erofs_decompressqueue *io,
+ bool sync, int bios)
{
- tagptr1_t t = tagptr_init(tagptr1_t, ptr);
- struct z_erofs_unzip_io *io = tagptr_unfold_ptr(t);
- bool background = tagptr_unfold_tags(t);
-
- if (!background) {
+ /* wake up the caller thread for sync decompression */
+ if (sync) {
unsigned long flags;
spin_lock_irqsave(&io->u.wait.lock, flags);
@@ -718,37 +711,30 @@ static void z_erofs_vle_unzip_kickoff(void *ptr, int bios)
queue_work(z_erofs_workqueue, &io->u.work);
}
-static inline void z_erofs_vle_read_endio(struct bio *bio)
+static void z_erofs_decompressqueue_endio(struct bio *bio)
{
- struct erofs_sb_info *sbi = NULL;
+ tagptr1_t t = tagptr_init(tagptr1_t, bio->bi_private);
+ struct z_erofs_decompressqueue *q = tagptr_unfold_ptr(t);
blk_status_t err = bio->bi_status;
struct bio_vec *bvec;
struct bvec_iter_all iter_all;
bio_for_each_segment_all(bvec, bio, iter_all) {
struct page *page = bvec->bv_page;
- bool cachemngd = false;
DBG_BUGON(PageUptodate(page));
DBG_BUGON(!page->mapping);
- if (!sbi && !z_erofs_page_is_staging(page))
- sbi = EROFS_SB(page->mapping->host->i_sb);
-
- /* sbi should already be gotten if the page is managed */
- if (sbi)
- cachemngd = erofs_page_is_managed(sbi, page);
-
if (err)
SetPageError(page);
- else if (cachemngd)
- SetPageUptodate(page);
- if (cachemngd)
+ if (erofs_page_is_managed(EROFS_SB(q->sb), page)) {
+ if (!err)
+ SetPageUptodate(page);
unlock_page(page);
+ }
}
-
- z_erofs_vle_unzip_kickoff(bio->bi_private, -1);
+ z_erofs_decompress_kickoff(q, tagptr_unfold_tags(t), -1);
bio_put(bio);
}
@@ -953,9 +939,8 @@ out:
return err;
}
-static void z_erofs_vle_unzip_all(struct super_block *sb,
- struct z_erofs_unzip_io *io,
- struct list_head *pagepool)
+static void z_erofs_decompress_queue(const struct z_erofs_decompressqueue *io,
+ struct list_head *pagepool)
{
z_erofs_next_pcluster_t owned = io->head;
@@ -971,21 +956,21 @@ static void z_erofs_vle_unzip_all(struct super_block *sb,
pcl = container_of(owned, struct z_erofs_pcluster, next);
owned = READ_ONCE(pcl->next);
- z_erofs_decompress_pcluster(sb, pcl, pagepool);
+ z_erofs_decompress_pcluster(io->sb, pcl, pagepool);
}
}
-static void z_erofs_vle_unzip_wq(struct work_struct *work)
+static void z_erofs_decompressqueue_work(struct work_struct *work)
{
- struct z_erofs_unzip_io_sb *iosb =
- container_of(work, struct z_erofs_unzip_io_sb, io.u.work);
+ struct z_erofs_decompressqueue *bgq =
+ container_of(work, struct z_erofs_decompressqueue, u.work);
LIST_HEAD(pagepool);
- DBG_BUGON(iosb->io.head == Z_EROFS_PCLUSTER_TAIL_CLOSED);
- z_erofs_vle_unzip_all(iosb->sb, &iosb->io, &pagepool);
+ DBG_BUGON(bgq->head == Z_EROFS_PCLUSTER_TAIL_CLOSED);
+ z_erofs_decompress_queue(bgq, &pagepool);
put_pages_list(&pagepool);
- kvfree(iosb);
+ kvfree(bgq);
}
static struct page *pickup_page_for_submission(struct z_erofs_pcluster *pcl,
@@ -994,8 +979,6 @@ static struct page *pickup_page_for_submission(struct z_erofs_pcluster *pcl,
struct address_space *mc,
gfp_t gfp)
{
- /* determined at compile time to avoid too many #ifdefs */
- const bool nocache = __builtin_constant_p(mc) ? !mc : false;
const pgoff_t index = pcl->obj.index;
bool tocache = false;
@@ -1016,7 +999,7 @@ repeat:
* the cached page has not been allocated and
* an placeholder is out there, prepare it now.
*/
- if (!nocache && page == PAGE_UNALLOCATED) {
+ if (page == PAGE_UNALLOCATED) {
tocache = true;
goto out_allocpage;
}
@@ -1029,21 +1012,6 @@ repeat:
mapping = READ_ONCE(page->mapping);
/*
- * if managed cache is disabled, it's no way to
- * get such a cached-like page.
- */
- if (nocache) {
- /* if managed cache is disabled, it is impossible `justfound' */
- DBG_BUGON(justfound);
-
- /* and it should be locked, not uptodate, and not truncated */
- DBG_BUGON(!PageLocked(page));
- DBG_BUGON(PageUptodate(page));
- DBG_BUGON(!mapping);
- goto out;
- }
-
- /*
* unmanaged (file) pages are all locked solidly,
* therefore it is impossible for `mapping' to be NULL.
*/
@@ -1093,50 +1061,52 @@ repeat:
unlock_page(page);
put_page(page);
out_allocpage:
- page = __stagingpage_alloc(pagepool, gfp);
- if (oldpage != cmpxchg(&pcl->compressed_pages[nr], oldpage, page)) {
- list_add(&page->lru, pagepool);
- cpu_relax();
- goto repeat;
- }
- if (nocache || !tocache)
- goto out;
- if (add_to_page_cache_lru(page, mc, index + nr, gfp)) {
+ page = erofs_allocpage(pagepool, gfp | __GFP_NOFAIL);
+ if (!tocache || add_to_page_cache_lru(page, mc, index + nr, gfp)) {
+ /* non-LRU / non-movable temporary page is needed */
page->mapping = Z_EROFS_MAPPING_STAGING;
- goto out;
+ tocache = false;
}
+ if (oldpage != cmpxchg(&pcl->compressed_pages[nr], oldpage, page)) {
+ if (tocache) {
+ /* since it added to managed cache successfully */
+ unlock_page(page);
+ put_page(page);
+ } else {
+ list_add(&page->lru, pagepool);
+ }
+ cond_resched();
+ goto repeat;
+ }
set_page_private(page, (unsigned long)pcl);
SetPagePrivate(page);
out: /* the only exit (for tracing and debugging) */
return page;
}
-static struct z_erofs_unzip_io *jobqueue_init(struct super_block *sb,
- struct z_erofs_unzip_io *io,
- bool foreground)
+static struct z_erofs_decompressqueue *
+jobqueue_init(struct super_block *sb,
+ struct z_erofs_decompressqueue *fgq, bool *fg)
{
- struct z_erofs_unzip_io_sb *iosb;
-
- if (foreground) {
- /* waitqueue available for foreground io */
- DBG_BUGON(!io);
+ struct z_erofs_decompressqueue *q;
- init_waitqueue_head(&io->u.wait);
- atomic_set(&io->pending_bios, 0);
- goto out;
+ if (fg && !*fg) {
+ q = kvzalloc(sizeof(*q), GFP_KERNEL | __GFP_NOWARN);
+ if (!q) {
+ *fg = true;
+ goto fg_out;
+ }
+ INIT_WORK(&q->u.work, z_erofs_decompressqueue_work);
+ } else {
+fg_out:
+ q = fgq;
+ init_waitqueue_head(&fgq->u.wait);
+ atomic_set(&fgq->pending_bios, 0);
}
-
- iosb = kvzalloc(sizeof(*iosb), GFP_KERNEL | __GFP_NOFAIL);
- DBG_BUGON(!iosb);
-
- /* initialize fields in the allocated descriptor */
- io = &iosb->io;
- iosb->sb = sb;
- INIT_WORK(&io->u.work, z_erofs_vle_unzip_wq);
-out:
- io->head = Z_EROFS_PCLUSTER_TAIL_CLOSED;
- return io;
+ q->sb = sb;
+ q->head = Z_EROFS_PCLUSTER_TAIL_CLOSED;
+ return q;
}
/* define decompression jobqueue types */
@@ -1147,22 +1117,17 @@ enum {
};
static void *jobqueueset_init(struct super_block *sb,
- z_erofs_next_pcluster_t qtail[],
- struct z_erofs_unzip_io *q[],
- struct z_erofs_unzip_io *fgq,
- bool forcefg)
+ struct z_erofs_decompressqueue *q[],
+ struct z_erofs_decompressqueue *fgq, bool *fg)
{
/*
* if managed cache is enabled, bypass jobqueue is needed,
* no need to read from device for all pclusters in this queue.
*/
- q[JQ_BYPASS] = jobqueue_init(sb, fgq + JQ_BYPASS, true);
- qtail[JQ_BYPASS] = &q[JQ_BYPASS]->head;
-
- q[JQ_SUBMIT] = jobqueue_init(sb, fgq + JQ_SUBMIT, forcefg);
- qtail[JQ_SUBMIT] = &q[JQ_SUBMIT]->head;
+ q[JQ_BYPASS] = jobqueue_init(sb, fgq + JQ_BYPASS, NULL);
+ q[JQ_SUBMIT] = jobqueue_init(sb, fgq + JQ_SUBMIT, fg);
- return tagptr_cast_ptr(tagptr_fold(tagptr1_t, q[JQ_SUBMIT], !forcefg));
+ return tagptr_cast_ptr(tagptr_fold(tagptr1_t, q[JQ_SUBMIT], *fg));
}
static void move_to_bypass_jobqueue(struct z_erofs_pcluster *pcl,
@@ -1184,9 +1149,8 @@ static void move_to_bypass_jobqueue(struct z_erofs_pcluster *pcl,
qtail[JQ_BYPASS] = &pcl->next;
}
-static bool postsubmit_is_all_bypassed(struct z_erofs_unzip_io *q[],
- unsigned int nr_bios,
- bool force_fg)
+static bool postsubmit_is_all_bypassed(struct z_erofs_decompressqueue *q[],
+ unsigned int nr_bios, bool force_fg)
{
/*
* although background is preferred, no one is pending for submission.
@@ -1195,19 +1159,19 @@ static bool postsubmit_is_all_bypassed(struct z_erofs_unzip_io *q[],
if (force_fg || nr_bios)
return false;
- kvfree(container_of(q[JQ_SUBMIT], struct z_erofs_unzip_io_sb, io));
+ kvfree(q[JQ_SUBMIT]);
return true;
}
-static bool z_erofs_vle_submit_all(struct super_block *sb,
- z_erofs_next_pcluster_t owned_head,
- struct list_head *pagepool,
- struct z_erofs_unzip_io *fgq,
- bool force_fg)
+static bool z_erofs_submit_queue(struct super_block *sb,
+ z_erofs_next_pcluster_t owned_head,
+ struct list_head *pagepool,
+ struct z_erofs_decompressqueue *fgq,
+ bool *force_fg)
{
- struct erofs_sb_info *const sbi __maybe_unused = EROFS_SB(sb);
+ struct erofs_sb_info *const sbi = EROFS_SB(sb);
z_erofs_next_pcluster_t qtail[NR_JOBQUEUES];
- struct z_erofs_unzip_io *q[NR_JOBQUEUES];
+ struct z_erofs_decompressqueue *q[NR_JOBQUEUES];
struct bio *bio;
void *bi_private;
/* since bio will be NULL, no need to initialize last_index */
@@ -1221,7 +1185,9 @@ static bool z_erofs_vle_submit_all(struct super_block *sb,
force_submit = false;
bio = NULL;
nr_bios = 0;
- bi_private = jobqueueset_init(sb, qtail, q, fgq, force_fg);
+ bi_private = jobqueueset_init(sb, q, fgq, force_fg);
+ qtail[JQ_BYPASS] = &q[JQ_BYPASS]->head;
+ qtail[JQ_SUBMIT] = &q[JQ_SUBMIT]->head;
/* by default, all need io submission */
q[JQ_SUBMIT]->head = owned_head;
@@ -1268,7 +1234,7 @@ submit_bio_retry:
if (!bio) {
bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES);
- bio->bi_end_io = z_erofs_vle_read_endio;
+ bio->bi_end_io = z_erofs_decompressqueue_endio;
bio_set_dev(bio, sb->s_bdev);
bio->bi_iter.bi_sector = (sector_t)(first_index + i) <<
LOG_SECTORS_PER_BLOCK;
@@ -1297,40 +1263,38 @@ skippage:
if (bio)
submit_bio(bio);
- if (postsubmit_is_all_bypassed(q, nr_bios, force_fg))
+ if (postsubmit_is_all_bypassed(q, nr_bios, *force_fg))
return true;
- z_erofs_vle_unzip_kickoff(bi_private, nr_bios);
+ z_erofs_decompress_kickoff(q[JQ_SUBMIT], *force_fg, nr_bios);
return true;
}
-static void z_erofs_submit_and_unzip(struct super_block *sb,
- struct z_erofs_collector *clt,
- struct list_head *pagepool,
- bool force_fg)
+static void z_erofs_runqueue(struct super_block *sb,
+ struct z_erofs_collector *clt,
+ struct list_head *pagepool, bool force_fg)
{
- struct z_erofs_unzip_io io[NR_JOBQUEUES];
+ struct z_erofs_decompressqueue io[NR_JOBQUEUES];
- if (!z_erofs_vle_submit_all(sb, clt->owned_head,
- pagepool, io, force_fg))
+ if (!z_erofs_submit_queue(sb, clt->owned_head,
+ pagepool, io, &force_fg))
return;
- /* decompress no I/O pclusters immediately */
- z_erofs_vle_unzip_all(sb, &io[JQ_BYPASS], pagepool);
+ /* handle bypass queue (no i/o pclusters) immediately */
+ z_erofs_decompress_queue(&io[JQ_BYPASS], pagepool);
if (!force_fg)
return;
/* wait until all bios are completed */
- wait_event(io[JQ_SUBMIT].u.wait,
- !atomic_read(&io[JQ_SUBMIT].pending_bios));
+ io_wait_event(io[JQ_SUBMIT].u.wait,
+ !atomic_read(&io[JQ_SUBMIT].pending_bios));
- /* let's synchronous decompression */
- z_erofs_vle_unzip_all(sb, &io[JQ_SUBMIT], pagepool);
+ /* handle synchronous decompress queue in the caller context */
+ z_erofs_decompress_queue(&io[JQ_SUBMIT], pagepool);
}
-static int z_erofs_vle_normalaccess_readpage(struct file *file,
- struct page *page)
+static int z_erofs_readpage(struct file *file, struct page *page)
{
struct inode *const inode = page->mapping->host;
struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode);
@@ -1345,7 +1309,7 @@ static int z_erofs_vle_normalaccess_readpage(struct file *file,
(void)z_erofs_collector_end(&f.clt);
/* if some compressed cluster ready, need submit them anyway */
- z_erofs_submit_and_unzip(inode->i_sb, &f.clt, &pagepool, true);
+ z_erofs_runqueue(inode->i_sb, &f.clt, &pagepool, true);
if (err)
erofs_err(inode->i_sb, "failed to read, err [%d]", err);
@@ -1364,10 +1328,8 @@ static bool should_decompress_synchronously(struct erofs_sb_info *sbi,
return nr <= sbi->max_sync_decompress_pages;
}
-static int z_erofs_vle_normalaccess_readpages(struct file *filp,
- struct address_space *mapping,
- struct list_head *pages,
- unsigned int nr_pages)
+static int z_erofs_readpages(struct file *filp, struct address_space *mapping,
+ struct list_head *pages, unsigned int nr_pages)
{
struct inode *const inode = mapping->host;
struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
@@ -1422,7 +1384,7 @@ static int z_erofs_vle_normalaccess_readpages(struct file *filp,
(void)z_erofs_collector_end(&f.clt);
- z_erofs_submit_and_unzip(inode->i_sb, &f.clt, &pagepool, sync);
+ z_erofs_runqueue(inode->i_sb, &f.clt, &pagepool, sync);
if (f.map.mpage)
put_page(f.map.mpage);
@@ -1432,8 +1394,8 @@ static int z_erofs_vle_normalaccess_readpages(struct file *filp,
return 0;
}
-const struct address_space_operations z_erofs_vle_normalaccess_aops = {
- .readpage = z_erofs_vle_normalaccess_readpage,
- .readpages = z_erofs_vle_normalaccess_readpages,
+const struct address_space_operations z_erofs_aops = {
+ .readpage = z_erofs_readpage,
+ .readpages = z_erofs_readpages,
};
diff --git a/fs/erofs/zdata.h b/fs/erofs/zdata.h
index faf950189bd7..7824f5563a55 100644
--- a/fs/erofs/zdata.h
+++ b/fs/erofs/zdata.h
@@ -84,7 +84,8 @@ struct z_erofs_pcluster {
#define Z_EROFS_WORKGROUP_SIZE sizeof(struct z_erofs_pcluster)
-struct z_erofs_unzip_io {
+struct z_erofs_decompressqueue {
+ struct super_block *sb;
atomic_t pending_bios;
z_erofs_next_pcluster_t head;
@@ -94,11 +95,6 @@ struct z_erofs_unzip_io {
} u;
};
-struct z_erofs_unzip_io_sb {
- struct z_erofs_unzip_io io;
- struct super_block *sb;
-};
-
#define MNGD_MAPPING(sbi) ((sbi)->managed_cache->i_mapping)
static inline bool erofs_page_is_managed(const struct erofs_sb_info *sbi,
struct page *page)
diff --git a/fs/erofs/zmap.c b/fs/erofs/zmap.c
index 6a26c293ae2d..736db3a4cdef 100644
--- a/fs/erofs/zmap.c
+++ b/fs/erofs/zmap.c
@@ -22,11 +22,11 @@ int z_erofs_fill_inode(struct inode *inode)
set_bit(EROFS_I_Z_INITED_BIT, &vi->flags);
}
- inode->i_mapping->a_ops = &z_erofs_vle_normalaccess_aops;
+ inode->i_mapping->a_ops = &z_erofs_aops;
return 0;
}
-static int fill_inode_lazy(struct inode *inode)
+static int z_erofs_fill_inode_lazy(struct inode *inode)
{
struct erofs_inode *const vi = EROFS_I(inode);
struct super_block *const sb = inode->i_sb;
@@ -138,8 +138,8 @@ static int z_erofs_reload_indexes(struct z_erofs_maprecorder *m,
return 0;
}
-static int vle_legacy_load_cluster_from_disk(struct z_erofs_maprecorder *m,
- unsigned long lcn)
+static int legacy_load_cluster_from_disk(struct z_erofs_maprecorder *m,
+ unsigned long lcn)
{
struct inode *const inode = m->inode;
struct erofs_inode *const vi = EROFS_I(inode);
@@ -311,13 +311,13 @@ out:
return unpack_compacted_index(m, amortizedshift, erofs_blkoff(pos));
}
-static int vle_load_cluster_from_disk(struct z_erofs_maprecorder *m,
- unsigned int lcn)
+static int z_erofs_load_cluster_from_disk(struct z_erofs_maprecorder *m,
+ unsigned int lcn)
{
const unsigned int datamode = EROFS_I(m->inode)->datalayout;
if (datamode == EROFS_INODE_FLAT_COMPRESSION_LEGACY)
- return vle_legacy_load_cluster_from_disk(m, lcn);
+ return legacy_load_cluster_from_disk(m, lcn);
if (datamode == EROFS_INODE_FLAT_COMPRESSION)
return compacted_load_cluster_from_disk(m, lcn);
@@ -325,8 +325,8 @@ static int vle_load_cluster_from_disk(struct z_erofs_maprecorder *m,
return -EINVAL;
}
-static int vle_extent_lookback(struct z_erofs_maprecorder *m,
- unsigned int lookback_distance)
+static int z_erofs_extent_lookback(struct z_erofs_maprecorder *m,
+ unsigned int lookback_distance)
{
struct erofs_inode *const vi = EROFS_I(m->inode);
struct erofs_map_blocks *const map = m->map;
@@ -343,7 +343,7 @@ static int vle_extent_lookback(struct z_erofs_maprecorder *m,
/* load extent head logical cluster if needed */
lcn -= lookback_distance;
- err = vle_load_cluster_from_disk(m, lcn);
+ err = z_erofs_load_cluster_from_disk(m, lcn);
if (err)
return err;
@@ -356,7 +356,7 @@ static int vle_extent_lookback(struct z_erofs_maprecorder *m,
DBG_BUGON(1);
return -EFSCORRUPTED;
}
- return vle_extent_lookback(m, m->delta[0]);
+ return z_erofs_extent_lookback(m, m->delta[0]);
case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
map->m_flags &= ~EROFS_MAP_ZIPPED;
/* fallthrough */
@@ -396,7 +396,7 @@ int z_erofs_map_blocks_iter(struct inode *inode,
goto out;
}
- err = fill_inode_lazy(inode);
+ err = z_erofs_fill_inode_lazy(inode);
if (err)
goto out;
@@ -405,7 +405,7 @@ int z_erofs_map_blocks_iter(struct inode *inode,
m.lcn = ofs >> lclusterbits;
endoff = ofs & ((1 << lclusterbits) - 1);
- err = vle_load_cluster_from_disk(&m, m.lcn);
+ err = z_erofs_load_cluster_from_disk(&m, m.lcn);
if (err)
goto unmap_out;
@@ -436,7 +436,7 @@ int z_erofs_map_blocks_iter(struct inode *inode,
/* fallthrough */
case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
/* get the correspoinding first chunk */
- err = vle_extent_lookback(&m, m.delta[0]);
+ err = z_erofs_extent_lookback(&m, m.delta[0]);
if (err)
goto unmap_out;
break;
diff --git a/fs/exec.c b/fs/exec.c
index c27231234764..74d88dab98dd 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -59,7 +59,6 @@
#include <linux/kmod.h>
#include <linux/fsnotify.h>
#include <linux/fs_struct.h>
-#include <linux/pipe_fs_i.h>
#include <linux/oom.h>
#include <linux/compat.h>
#include <linux/vmalloc.h>
@@ -1132,7 +1131,7 @@ static int de_thread(struct task_struct *tsk)
* also take its birthdate (always earlier than our own).
*/
tsk->start_time = leader->start_time;
- tsk->real_start_time = leader->real_start_time;
+ tsk->start_boottime = leader->start_boottime;
BUG_ON(!same_thread_group(leader, tsk));
BUG_ON(has_group_leader_pid(tsk));
diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
index e0cc55164505..fa9c951d3471 100644
--- a/fs/ext2/balloc.c
+++ b/fs/ext2/balloc.c
@@ -269,7 +269,7 @@ goal_in_my_reservation(struct ext2_reserve_window *rsv, ext2_grpblk_t grp_goal,
ext2_fsblk_t group_first_block, group_last_block;
group_first_block = ext2_group_first_block_no(sb, group);
- group_last_block = group_first_block + EXT2_BLOCKS_PER_GROUP(sb) - 1;
+ group_last_block = ext2_group_last_block_no(sb, group);
if ((rsv->_rsv_start > group_last_block) ||
(rsv->_rsv_end < group_first_block))
@@ -666,37 +666,24 @@ ext2_try_to_allocate(struct super_block *sb, int group,
unsigned long *count,
struct ext2_reserve_window *my_rsv)
{
- ext2_fsblk_t group_first_block;
+ ext2_fsblk_t group_first_block = ext2_group_first_block_no(sb, group);
+ ext2_fsblk_t group_last_block = ext2_group_last_block_no(sb, group);
ext2_grpblk_t start, end;
unsigned long num = 0;
+ start = 0;
+ end = group_last_block - group_first_block + 1;
/* we do allocation within the reservation window if we have a window */
if (my_rsv) {
- group_first_block = ext2_group_first_block_no(sb, group);
if (my_rsv->_rsv_start >= group_first_block)
start = my_rsv->_rsv_start - group_first_block;
- else
- /* reservation window cross group boundary */
- start = 0;
- end = my_rsv->_rsv_end - group_first_block + 1;
- if (end > EXT2_BLOCKS_PER_GROUP(sb))
- /* reservation window crosses group boundary */
- end = EXT2_BLOCKS_PER_GROUP(sb);
- if ((start <= grp_goal) && (grp_goal < end))
- start = grp_goal;
- else
+ if (my_rsv->_rsv_end < group_last_block)
+ end = my_rsv->_rsv_end - group_first_block + 1;
+ if (grp_goal < start || grp_goal >= end)
grp_goal = -1;
- } else {
- if (grp_goal > 0)
- start = grp_goal;
- else
- start = 0;
- end = EXT2_BLOCKS_PER_GROUP(sb);
}
-
BUG_ON(start > EXT2_BLOCKS_PER_GROUP(sb));
-repeat:
if (grp_goal < 0) {
grp_goal = find_next_usable_block(start, bitmap_bh, end);
if (grp_goal < 0)
@@ -711,32 +698,23 @@ repeat:
;
}
}
- start = grp_goal;
- if (ext2_set_bit_atomic(sb_bgl_lock(EXT2_SB(sb), group), grp_goal,
- bitmap_bh->b_data)) {
- /*
- * The block was allocated by another thread, or it was
- * allocated and then freed by another thread
- */
- start++;
- grp_goal++;
- if (start >= end)
- goto fail_access;
- goto repeat;
- }
- num++;
- grp_goal++;
- while (num < *count && grp_goal < end
- && !ext2_set_bit_atomic(sb_bgl_lock(EXT2_SB(sb), group),
+ for (; num < *count && grp_goal < end; grp_goal++) {
+ if (ext2_set_bit_atomic(sb_bgl_lock(EXT2_SB(sb), group),
grp_goal, bitmap_bh->b_data)) {
+ if (num == 0)
+ continue;
+ break;
+ }
num++;
- grp_goal++;
}
+
+ if (num == 0)
+ goto fail_access;
+
*count = num;
return grp_goal - num;
fail_access:
- *count = num;
return -1;
}
@@ -754,10 +732,9 @@ fail_access:
* but we will shift to the place where start_block is,
* then start from there, when looking for a reservable space.
*
- * @size: the target new reservation window size
+ * @sb: the super block.
*
- * @group_first_block: the first block we consider to start
- * the real search from
+ * @start_block: the first block we consider to start the real search from
*
* @last_block:
* the maximum block number that our goal reservable space
@@ -908,7 +885,7 @@ static int alloc_new_reservation(struct ext2_reserve_window_node *my_rsv,
spinlock_t *rsv_lock = &EXT2_SB(sb)->s_rsv_window_lock;
group_first_block = ext2_group_first_block_no(sb, group);
- group_end_block = group_first_block + (EXT2_BLOCKS_PER_GROUP(sb) - 1);
+ group_end_block = ext2_group_last_block_no(sb, group);
if (grp_goal < 0)
start_block = group_first_block;
@@ -1115,7 +1092,7 @@ ext2_try_to_allocate_with_rsv(struct super_block *sb, unsigned int group,
* first block is the block number of the first block in this group
*/
group_first_block = ext2_group_first_block_no(sb, group);
- group_last_block = group_first_block + (EXT2_BLOCKS_PER_GROUP(sb) - 1);
+ group_last_block = ext2_group_last_block_no(sb, group);
/*
* Basically we will allocate a new block from inode's reservation
@@ -1313,6 +1290,13 @@ retry_alloc:
if (free_blocks > 0) {
grp_target_blk = ((goal - le32_to_cpu(es->s_first_data_block)) %
EXT2_BLOCKS_PER_GROUP(sb));
+ /*
+ * In case we retry allocation (due to fs reservation not
+ * working out or fs corruption), the bitmap_bh is non-null
+ * pointer and we have to release it before calling
+ * read_block_bitmap().
+ */
+ brelse(bitmap_bh);
bitmap_bh = read_block_bitmap(sb, group_no);
if (!bitmap_bh)
goto io_error;
@@ -1404,6 +1388,7 @@ allocated:
* use. So we may want to selectively mark some of the blocks
* as free
*/
+ num = *count;
goto retry_alloc;
}
diff --git a/fs/ext2/ext2.h b/fs/ext2/ext2.h
index 10ab238de9a6..8178bd38a9d6 100644
--- a/fs/ext2/ext2.h
+++ b/fs/ext2/ext2.h
@@ -813,6 +813,18 @@ ext2_group_first_block_no(struct super_block *sb, unsigned long group_no)
le32_to_cpu(EXT2_SB(sb)->s_es->s_first_data_block);
}
+static inline ext2_fsblk_t
+ext2_group_last_block_no(struct super_block *sb, unsigned long group_no)
+{
+ struct ext2_sb_info *sbi = EXT2_SB(sb);
+
+ if (group_no == sbi->s_groups_count - 1)
+ return le32_to_cpu(sbi->s_es->s_blocks_count) - 1;
+ else
+ return ext2_group_first_block_no(sb, group_no) +
+ EXT2_BLOCKS_PER_GROUP(sb) - 1;
+}
+
#define ext2_set_bit __test_and_set_bit_le
#define ext2_clear_bit __test_and_clear_bit_le
#define ext2_test_bit test_bit_le
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index 7004ce581a32..119667e65890 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -701,10 +701,13 @@ static int ext2_get_blocks(struct inode *inode,
if (!partial) {
count++;
mutex_unlock(&ei->truncate_mutex);
- if (err)
- goto cleanup;
goto got_it;
}
+
+ if (err) {
+ mutex_unlock(&ei->truncate_mutex);
+ goto cleanup;
+ }
}
/*
@@ -801,7 +804,7 @@ int ext2_get_block(struct inode *inode, sector_t iblock,
#ifdef CONFIG_FS_DAX
static int ext2_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
- unsigned flags, struct iomap *iomap)
+ unsigned flags, struct iomap *iomap, struct iomap *srcmap)
{
unsigned int blkbits = inode->i_blkbits;
unsigned long first_block = offset >> blkbits;
diff --git a/fs/ext2/ioctl.c b/fs/ext2/ioctl.c
index 1b853fb0b163..32a8d10b579d 100644
--- a/fs/ext2/ioctl.c
+++ b/fs/ext2/ioctl.c
@@ -145,10 +145,13 @@ setversion_out:
if (ei->i_block_alloc_info){
struct ext2_reserve_window_node *rsv = &ei->i_block_alloc_info->rsv_window_node;
rsv->rsv_goal_size = rsv_window_size;
+ } else {
+ ret = -ENOMEM;
}
+
mutex_unlock(&ei->truncate_mutex);
mnt_drop_write_file(filp);
- return 0;
+ return ret;
}
default:
return -ENOTTY;
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index 30c630d73f0f..bcffe25da2f0 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -702,13 +702,7 @@ static int ext2_check_descriptors(struct super_block *sb)
for (i = 0; i < sbi->s_groups_count; i++) {
struct ext2_group_desc *gdp = ext2_get_group_desc(sb, i, NULL);
ext2_fsblk_t first_block = ext2_group_first_block_no(sb, i);
- ext2_fsblk_t last_block;
-
- if (i == sbi->s_groups_count - 1)
- last_block = le32_to_cpu(sbi->s_es->s_blocks_count) - 1;
- else
- last_block = first_block +
- (EXT2_BLOCKS_PER_GROUP(sb) - 1);
+ ext2_fsblk_t last_block = ext2_group_last_block_no(sb, i);
if (le32_to_cpu(gdp->bg_block_bitmap) < first_block ||
le32_to_cpu(gdp->bg_block_bitmap) > last_block)
@@ -806,7 +800,6 @@ static unsigned long descriptor_loc(struct super_block *sb,
{
struct ext2_sb_info *sbi = EXT2_SB(sb);
unsigned long bg, first_meta_bg;
- int has_super = 0;
first_meta_bg = le32_to_cpu(sbi->s_es->s_first_meta_bg);
@@ -814,10 +807,8 @@ static unsigned long descriptor_loc(struct super_block *sb,
nr < first_meta_bg)
return (logic_sb_block + nr + 1);
bg = sbi->s_desc_per_block * nr;
- if (ext2_bg_has_super(sb, bg))
- has_super = 1;
- return ext2_group_first_block_no(sb, bg) + has_super;
+ return ext2_group_first_block_no(sb, bg) + ext2_bg_has_super(sb, bg);
}
static int ext2_fill_super(struct super_block *sb, void *data, int silent)
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index b3a2cc7c0252..f8578caba40d 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -198,6 +198,12 @@ struct ext4_system_blocks {
*/
#define EXT4_IO_END_UNWRITTEN 0x0001
+struct ext4_io_end_vec {
+ struct list_head list; /* list of io_end_vec */
+ loff_t offset; /* offset in the file */
+ ssize_t size; /* size of the extent */
+};
+
/*
* For converting unwritten extents on a work queue. 'handle' is used for
* buffered writeback.
@@ -211,8 +217,7 @@ typedef struct ext4_io_end {
* bios covering the extent */
unsigned int flag; /* unwritten or not */
atomic_t count; /* reference counter */
- loff_t offset; /* offset in the file */
- ssize_t size; /* size of the extent */
+ struct list_head list_vec; /* list of ext4_io_end_vec */
} ext4_io_end_t;
struct ext4_io_submit {
@@ -1579,7 +1584,6 @@ enum {
EXT4_STATE_NO_EXPAND, /* No space for expansion */
EXT4_STATE_DA_ALLOC_CLOSE, /* Alloc DA blks on close */
EXT4_STATE_EXT_MIGRATE, /* Inode is migrating */
- EXT4_STATE_DIO_UNWRITTEN, /* need convert on dio done*/
EXT4_STATE_NEWENTRY, /* File just added to dir */
EXT4_STATE_MAY_INLINE_DATA, /* may have in-inode data */
EXT4_STATE_EXT_PRECACHED, /* extents have been precached */
@@ -2562,8 +2566,6 @@ int ext4_get_block_unwritten(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create);
int ext4_get_block(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create);
-int ext4_dio_get_block(struct inode *inode, sector_t iblock,
- struct buffer_head *bh_result, int create);
int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
struct buffer_head *bh, int create);
int ext4_walk_page_buffers(handle_t *handle,
@@ -2606,7 +2608,6 @@ extern int ext4_can_truncate(struct inode *inode);
extern int ext4_truncate(struct inode *);
extern int ext4_break_layouts(struct inode *);
extern int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length);
-extern int ext4_truncate_restart_trans(handle_t *, struct inode *, int nblocks);
extern void ext4_set_inode_flags(struct inode *);
extern int ext4_alloc_da_blocks(struct inode *inode);
extern void ext4_set_aops(struct inode *inode);
@@ -3266,6 +3267,8 @@ extern long ext4_fallocate(struct file *file, int mode, loff_t offset,
loff_t len);
extern int ext4_convert_unwritten_extents(handle_t *handle, struct inode *inode,
loff_t offset, ssize_t len);
+extern int ext4_convert_unwritten_io_end_vec(handle_t *handle,
+ ext4_io_end_t *io_end);
extern int ext4_map_blocks(handle_t *handle, struct inode *inode,
struct ext4_map_blocks *map, int flags);
extern int ext4_ext_calc_metadata_amount(struct inode *inode,
@@ -3298,6 +3301,10 @@ extern int ext4_swap_extents(handle_t *handle, struct inode *inode1,
ext4_lblk_t lblk2, ext4_lblk_t count,
int mark_unwritten,int *err);
extern int ext4_clu_mapped(struct inode *inode, ext4_lblk_t lclu);
+extern int ext4_datasem_ensure_credits(handle_t *handle, struct inode *inode,
+ int check_cred, int restart_cred,
+ int revoke_cred);
+
/* move_extent.c */
extern void ext4_double_down_write_data_sem(struct inode *first,
@@ -3324,6 +3331,8 @@ extern int ext4_bio_write_page(struct ext4_io_submit *io,
int len,
struct writeback_control *wbc,
bool keep_towrite);
+extern struct ext4_io_end_vec *ext4_alloc_io_end_vec(ext4_io_end_t *io_end);
+extern struct ext4_io_end_vec *ext4_last_io_end_vec(ext4_io_end_t *io_end);
/* mmp.c */
extern int ext4_multi_mount_protect(struct super_block *, ext4_fsblk_t);
@@ -3381,6 +3390,7 @@ static inline void ext4_clear_io_unwritten_flag(ext4_io_end_t *io_end)
}
extern const struct iomap_ops ext4_iomap_ops;
+extern const struct iomap_ops ext4_iomap_report_ops;
static inline int ext4_buffer_uptodate(struct buffer_head *bh)
{
diff --git a/fs/ext4/ext4_jbd2.c b/fs/ext4/ext4_jbd2.c
index 7c70b08d104c..d3b8cdea5df7 100644
--- a/fs/ext4/ext4_jbd2.c
+++ b/fs/ext4/ext4_jbd2.c
@@ -65,12 +65,14 @@ static int ext4_journal_check_start(struct super_block *sb)
}
handle_t *__ext4_journal_start_sb(struct super_block *sb, unsigned int line,
- int type, int blocks, int rsv_blocks)
+ int type, int blocks, int rsv_blocks,
+ int revoke_creds)
{
journal_t *journal;
int err;
- trace_ext4_journal_start(sb, blocks, rsv_blocks, _RET_IP_);
+ trace_ext4_journal_start(sb, blocks, rsv_blocks, revoke_creds,
+ _RET_IP_);
err = ext4_journal_check_start(sb);
if (err < 0)
return ERR_PTR(err);
@@ -78,8 +80,8 @@ handle_t *__ext4_journal_start_sb(struct super_block *sb, unsigned int line,
journal = EXT4_SB(sb)->s_journal;
if (!journal)
return ext4_get_nojournal();
- return jbd2__journal_start(journal, blocks, rsv_blocks, GFP_NOFS,
- type, line);
+ return jbd2__journal_start(journal, blocks, rsv_blocks, revoke_creds,
+ GFP_NOFS, type, line);
}
int __ext4_journal_stop(const char *where, unsigned int line, handle_t *handle)
@@ -119,8 +121,8 @@ handle_t *__ext4_journal_start_reserved(handle_t *handle, unsigned int line,
return ext4_get_nojournal();
sb = handle->h_journal->j_private;
- trace_ext4_journal_start_reserved(sb, handle->h_buffer_credits,
- _RET_IP_);
+ trace_ext4_journal_start_reserved(sb,
+ jbd2_handle_buffer_credits(handle), _RET_IP_);
err = ext4_journal_check_start(sb);
if (err < 0) {
jbd2_journal_free_reserved(handle);
@@ -133,6 +135,19 @@ handle_t *__ext4_journal_start_reserved(handle_t *handle, unsigned int line,
return handle;
}
+int __ext4_journal_ensure_credits(handle_t *handle, int check_cred,
+ int extend_cred, int revoke_cred)
+{
+ if (!ext4_handle_valid(handle))
+ return 0;
+ if (jbd2_handle_buffer_credits(handle) >= check_cred &&
+ handle->h_revoke_credits >= revoke_cred)
+ return 0;
+ extend_cred = max(0, extend_cred - jbd2_handle_buffer_credits(handle));
+ revoke_cred = max(0, revoke_cred - handle->h_revoke_credits);
+ return ext4_journal_extend(handle, extend_cred, revoke_cred);
+}
+
static void ext4_journal_abort_handle(const char *caller, unsigned int line,
const char *err_fn,
struct buffer_head *bh,
@@ -278,7 +293,7 @@ int __ext4_handle_dirty_metadata(const char *where, unsigned int line,
handle->h_type,
handle->h_line_no,
handle->h_requested_credits,
- handle->h_buffer_credits, err);
+ jbd2_handle_buffer_credits(handle), err);
return err;
}
ext4_error_inode(inode, where, line,
@@ -289,7 +304,8 @@ int __ext4_handle_dirty_metadata(const char *where, unsigned int line,
handle->h_type,
handle->h_line_no,
handle->h_requested_credits,
- handle->h_buffer_credits, err);
+ jbd2_handle_buffer_credits(handle),
+ err);
}
} else {
if (inode)
diff --git a/fs/ext4/ext4_jbd2.h b/fs/ext4/ext4_jbd2.h
index ef8fcf7d0d3b..a6b9b66dbfad 100644
--- a/fs/ext4/ext4_jbd2.h
+++ b/fs/ext4/ext4_jbd2.h
@@ -261,7 +261,8 @@ int __ext4_handle_dirty_super(const char *where, unsigned int line,
__ext4_handle_dirty_super(__func__, __LINE__, (handle), (sb))
handle_t *__ext4_journal_start_sb(struct super_block *sb, unsigned int line,
- int type, int blocks, int rsv_blocks);
+ int type, int blocks, int rsv_blocks,
+ int revoke_creds);
int __ext4_journal_stop(const char *where, unsigned int line, handle_t *handle);
#define EXT4_NOJOURNAL_MAX_REF_COUNT ((unsigned long) 4096)
@@ -288,28 +289,41 @@ static inline int ext4_handle_is_aborted(handle_t *handle)
return 0;
}
-static inline int ext4_handle_has_enough_credits(handle_t *handle, int needed)
+static inline int ext4_free_metadata_revoke_credits(struct super_block *sb,
+ int blocks)
{
- if (ext4_handle_valid(handle) && handle->h_buffer_credits < needed)
- return 0;
- return 1;
+ /* Freeing each metadata block can result in freeing one cluster */
+ return blocks * EXT4_SB(sb)->s_cluster_ratio;
+}
+
+static inline int ext4_trans_default_revoke_credits(struct super_block *sb)
+{
+ return ext4_free_metadata_revoke_credits(sb, 8);
}
#define ext4_journal_start_sb(sb, type, nblocks) \
- __ext4_journal_start_sb((sb), __LINE__, (type), (nblocks), 0)
+ __ext4_journal_start_sb((sb), __LINE__, (type), (nblocks), 0, \
+ ext4_trans_default_revoke_credits(sb))
#define ext4_journal_start(inode, type, nblocks) \
- __ext4_journal_start((inode), __LINE__, (type), (nblocks), 0)
+ __ext4_journal_start((inode), __LINE__, (type), (nblocks), 0, \
+ ext4_trans_default_revoke_credits((inode)->i_sb))
+
+#define ext4_journal_start_with_reserve(inode, type, blocks, rsv_blocks)\
+ __ext4_journal_start((inode), __LINE__, (type), (blocks), (rsv_blocks),\
+ ext4_trans_default_revoke_credits((inode)->i_sb))
-#define ext4_journal_start_with_reserve(inode, type, blocks, rsv_blocks) \
- __ext4_journal_start((inode), __LINE__, (type), (blocks), (rsv_blocks))
+#define ext4_journal_start_with_revoke(inode, type, blocks, revoke_creds) \
+ __ext4_journal_start((inode), __LINE__, (type), (blocks), 0, \
+ (revoke_creds))
static inline handle_t *__ext4_journal_start(struct inode *inode,
unsigned int line, int type,
- int blocks, int rsv_blocks)
+ int blocks, int rsv_blocks,
+ int revoke_creds)
{
return __ext4_journal_start_sb(inode->i_sb, line, type, blocks,
- rsv_blocks);
+ rsv_blocks, revoke_creds);
}
#define ext4_journal_stop(handle) \
@@ -332,20 +346,68 @@ static inline handle_t *ext4_journal_current_handle(void)
return journal_current_handle();
}
-static inline int ext4_journal_extend(handle_t *handle, int nblocks)
+static inline int ext4_journal_extend(handle_t *handle, int nblocks, int revoke)
{
if (ext4_handle_valid(handle))
- return jbd2_journal_extend(handle, nblocks);
+ return jbd2_journal_extend(handle, nblocks, revoke);
return 0;
}
-static inline int ext4_journal_restart(handle_t *handle, int nblocks)
+static inline int ext4_journal_restart(handle_t *handle, int nblocks,
+ int revoke)
{
if (ext4_handle_valid(handle))
- return jbd2_journal_restart(handle, nblocks);
+ return jbd2__journal_restart(handle, nblocks, revoke, GFP_NOFS);
return 0;
}
+int __ext4_journal_ensure_credits(handle_t *handle, int check_cred,
+ int extend_cred, int revoke_cred);
+
+
+/*
+ * Ensure @handle has at least @check_creds credits available. If not,
+ * transaction will be extended or restarted to contain at least @extend_cred
+ * credits. Before restarting transaction @fn is executed to allow for cleanup
+ * before the transaction is restarted.
+ *
+ * The return value is < 0 in case of error, 0 in case the handle has enough
+ * credits or transaction extension succeeded, 1 in case transaction had to be
+ * restarted.
+ */
+#define ext4_journal_ensure_credits_fn(handle, check_cred, extend_cred, \
+ revoke_cred, fn) \
+({ \
+ __label__ __ensure_end; \
+ int err = __ext4_journal_ensure_credits((handle), (check_cred), \
+ (extend_cred), (revoke_cred)); \
+ \
+ if (err <= 0) \
+ goto __ensure_end; \
+ err = (fn); \
+ if (err < 0) \
+ goto __ensure_end; \
+ err = ext4_journal_restart((handle), (extend_cred), (revoke_cred)); \
+ if (err == 0) \
+ err = 1; \
+__ensure_end: \
+ err; \
+})
+
+/*
+ * Ensure given handle has at least requested amount of credits available,
+ * possibly restarting transaction if needed. We also make sure the transaction
+ * has space for at least ext4_trans_default_revoke_credits(sb) revoke records
+ * as freeing one or two blocks is very common pattern and requesting this is
+ * very cheap.
+ */
+static inline int ext4_journal_ensure_credits(handle_t *handle, int credits,
+ int revoke_creds)
+{
+ return ext4_journal_ensure_credits_fn(handle, credits, credits,
+ revoke_creds, 0);
+}
+
static inline int ext4_journal_blocks_per_page(struct inode *inode)
{
if (EXT4_JOURNAL(inode) != NULL)
@@ -407,6 +469,7 @@ static inline int ext4_inode_journal_mode(struct inode *inode)
return EXT4_INODE_WRITEBACK_DATA_MODE; /* writeback */
/* We do not support data journalling with delayed allocation */
if (!S_ISREG(inode->i_mode) ||
+ ext4_test_inode_flag(inode, EXT4_INODE_EA_INODE) ||
test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA ||
(ext4_test_inode_flag(inode, EXT4_INODE_JOURNAL_DATA) &&
!test_opt(inode->i_sb, DELALLOC))) {
@@ -437,6 +500,19 @@ static inline int ext4_should_writeback_data(struct inode *inode)
return ext4_inode_journal_mode(inode) & EXT4_INODE_WRITEBACK_DATA_MODE;
}
+static inline int ext4_free_data_revoke_credits(struct inode *inode, int blocks)
+{
+ if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)
+ return 0;
+ if (!ext4_should_journal_data(inode))
+ return 0;
+ /*
+ * Data blocks in one extent are contiguous, just account for partial
+ * clusters at extent boundaries
+ */
+ return blocks + 2*(EXT4_SB(inode->i_sb)->s_cluster_ratio - 1);
+}
+
/*
* This function controls whether or not we should try to go down the
* dioread_nolock code paths, which makes it safe to avoid taking
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index fb0f99dc8c22..0e8708b77da6 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -100,29 +100,41 @@ static int ext4_split_extent_at(handle_t *handle,
static int ext4_find_delayed_extent(struct inode *inode,
struct extent_status *newes);
-static int ext4_ext_truncate_extend_restart(handle_t *handle,
- struct inode *inode,
- int needed)
+static int ext4_ext_trunc_restart_fn(struct inode *inode, int *dropped)
{
- int err;
-
- if (!ext4_handle_valid(handle))
- return 0;
- if (handle->h_buffer_credits >= needed)
- return 0;
/*
- * If we need to extend the journal get a few extra blocks
- * while we're at it for efficiency's sake.
+ * Drop i_data_sem to avoid deadlock with ext4_map_blocks. At this
+ * moment, get_block can be called only for blocks inside i_size since
+ * page cache has been already dropped and writes are blocked by
+ * i_mutex. So we can safely drop the i_data_sem here.
*/
- needed += 3;
- err = ext4_journal_extend(handle, needed - handle->h_buffer_credits);
- if (err <= 0)
- return err;
- err = ext4_truncate_restart_trans(handle, inode, needed);
- if (err == 0)
- err = -EAGAIN;
+ BUG_ON(EXT4_JOURNAL(inode) == NULL);
+ ext4_discard_preallocations(inode);
+ up_write(&EXT4_I(inode)->i_data_sem);
+ *dropped = 1;
+ return 0;
+}
- return err;
+/*
+ * Make sure 'handle' has at least 'check_cred' credits. If not, restart
+ * transaction with 'restart_cred' credits. The function drops i_data_sem
+ * when restarting transaction and gets it after transaction is restarted.
+ *
+ * The function returns 0 on success, 1 if transaction had to be restarted,
+ * and < 0 in case of fatal error.
+ */
+int ext4_datasem_ensure_credits(handle_t *handle, struct inode *inode,
+ int check_cred, int restart_cred,
+ int revoke_cred)
+{
+ int ret;
+ int dropped = 0;
+
+ ret = ext4_journal_ensure_credits_fn(handle, check_cred, restart_cred,
+ revoke_cred, ext4_ext_trunc_restart_fn(inode, &dropped));
+ if (dropped)
+ down_write(&EXT4_I(inode)->i_data_sem);
+ return ret;
}
/*
@@ -1753,16 +1765,9 @@ ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1,
*/
if (ext1_ee_len + ext2_ee_len > EXT_INIT_MAX_LEN)
return 0;
- /*
- * The check for IO to unwritten extent is somewhat racy as we
- * increment i_unwritten / set EXT4_STATE_DIO_UNWRITTEN only after
- * dropping i_data_sem. But reserved blocks should save us in that
- * case.
- */
+
if (ext4_ext_is_unwritten(ex1) &&
- (ext4_test_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN) ||
- atomic_read(&EXT4_I(inode)->i_unwritten) ||
- (ext1_ee_len + ext2_ee_len > EXT_UNWRITTEN_MAX_LEN)))
+ ext1_ee_len + ext2_ee_len > EXT_UNWRITTEN_MAX_LEN)
return 0;
#ifdef AGGRESSIVE_TEST
if (ext1_ee_len >= 4)
@@ -1840,7 +1845,8 @@ static void ext4_ext_try_to_merge_up(handle_t *handle,
* group descriptor to release the extent tree block. If we
* can't get the journal credits, give up.
*/
- if (ext4_journal_extend(handle, 2))
+ if (ext4_journal_extend(handle, 2,
+ ext4_free_metadata_revoke_credits(inode->i_sb, 1)))
return;
/*
@@ -2727,7 +2733,7 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
{
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
int err = 0, correct_index = 0;
- int depth = ext_depth(inode), credits;
+ int depth = ext_depth(inode), credits, revoke_credits;
struct ext4_extent_header *eh;
ext4_lblk_t a, b;
unsigned num;
@@ -2819,10 +2825,23 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
credits += (ext_depth(inode)) + 1;
}
credits += EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb);
-
- err = ext4_ext_truncate_extend_restart(handle, inode, credits);
- if (err)
+ /*
+ * We may end up freeing some index blocks and data from the
+ * punched range. Note that partial clusters are accounted for
+ * by ext4_free_data_revoke_credits().
+ */
+ revoke_credits =
+ ext4_free_metadata_revoke_credits(inode->i_sb,
+ ext_depth(inode)) +
+ ext4_free_data_revoke_credits(inode, b - a + 1);
+
+ err = ext4_datasem_ensure_credits(handle, inode, credits,
+ credits, revoke_credits);
+ if (err) {
+ if (err > 0)
+ err = -EAGAIN;
goto out;
+ }
err = ext4_ext_get_access(handle, inode, path + depth);
if (err)
@@ -2948,7 +2967,9 @@ int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start,
ext_debug("truncate since %u to %u\n", start, end);
/* probably first extent we're gonna free will be last in block */
- handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, depth + 1);
+ handle = ext4_journal_start_with_revoke(inode, EXT4_HT_TRUNCATE,
+ depth + 1,
+ ext4_free_metadata_revoke_credits(inode->i_sb, depth));
if (IS_ERR(handle))
return PTR_ERR(handle);
@@ -4962,23 +4983,13 @@ int ext4_convert_unwritten_extents(handle_t *handle, struct inode *inode,
int ret = 0;
int ret2 = 0;
struct ext4_map_blocks map;
- unsigned int credits, blkbits = inode->i_blkbits;
+ unsigned int blkbits = inode->i_blkbits;
+ unsigned int credits = 0;
map.m_lblk = offset >> blkbits;
max_blocks = EXT4_MAX_BLOCKS(len, offset, blkbits);
- /*
- * This is somewhat ugly but the idea is clear: When transaction is
- * reserved, everything goes into it. Otherwise we rather start several
- * smaller transactions for conversion of each extent separately.
- */
- if (handle) {
- handle = ext4_journal_start_reserved(handle,
- EXT4_HT_EXT_CONVERT);
- if (IS_ERR(handle))
- return PTR_ERR(handle);
- credits = 0;
- } else {
+ if (!handle) {
/*
* credits to insert 1 extent into extent tree
*/
@@ -5009,11 +5020,40 @@ int ext4_convert_unwritten_extents(handle_t *handle, struct inode *inode,
if (ret <= 0 || ret2)
break;
}
- if (!credits)
- ret2 = ext4_journal_stop(handle);
return ret > 0 ? ret2 : ret;
}
+int ext4_convert_unwritten_io_end_vec(handle_t *handle, ext4_io_end_t *io_end)
+{
+ int ret, err = 0;
+ struct ext4_io_end_vec *io_end_vec;
+
+ /*
+ * This is somewhat ugly but the idea is clear: When transaction is
+ * reserved, everything goes into it. Otherwise we rather start several
+ * smaller transactions for conversion of each extent separately.
+ */
+ if (handle) {
+ handle = ext4_journal_start_reserved(handle,
+ EXT4_HT_EXT_CONVERT);
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ }
+
+ list_for_each_entry(io_end_vec, &io_end->list_vec, list) {
+ ret = ext4_convert_unwritten_extents(handle, io_end->inode,
+ io_end_vec->offset,
+ io_end_vec->size);
+ if (ret)
+ break;
+ }
+
+ if (handle)
+ err = ext4_journal_stop(handle);
+
+ return ret < 0 ? ret : err;
+}
+
/*
* If newes is not existing extent (newes->ec_pblk equals zero) find
* delayed extent at start of newes and update newes accordingly and
@@ -5206,13 +5246,10 @@ ext4_access_path(handle_t *handle, struct inode *inode,
* descriptor) for each block group; assume two block
* groups
*/
- if (handle->h_buffer_credits < 7) {
- credits = ext4_writepage_trans_blocks(inode);
- err = ext4_ext_truncate_extend_restart(handle, inode, credits);
- /* EAGAIN is success */
- if (err && err != -EAGAIN)
- return err;
- }
+ credits = ext4_writepage_trans_blocks(inode);
+ err = ext4_datasem_ensure_credits(handle, inode, 7, credits, 0);
+ if (err < 0)
+ return err;
err = ext4_ext_get_access(handle, inode, path);
return err;
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index 8d2bbcc2d813..6a7293a5cda2 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -29,10 +29,58 @@
#include <linux/pagevec.h>
#include <linux/uio.h>
#include <linux/mman.h>
+#include <linux/backing-dev.h>
#include "ext4.h"
#include "ext4_jbd2.h"
#include "xattr.h"
#include "acl.h"
+#include "truncate.h"
+
+static bool ext4_dio_supported(struct inode *inode)
+{
+ if (IS_ENABLED(CONFIG_FS_ENCRYPTION) && IS_ENCRYPTED(inode))
+ return false;
+ if (fsverity_active(inode))
+ return false;
+ if (ext4_should_journal_data(inode))
+ return false;
+ if (ext4_has_inline_data(inode))
+ return false;
+ return true;
+}
+
+static ssize_t ext4_dio_read_iter(struct kiocb *iocb, struct iov_iter *to)
+{
+ ssize_t ret;
+ struct inode *inode = file_inode(iocb->ki_filp);
+
+ if (iocb->ki_flags & IOCB_NOWAIT) {
+ if (!inode_trylock_shared(inode))
+ return -EAGAIN;
+ } else {
+ inode_lock_shared(inode);
+ }
+
+ if (!ext4_dio_supported(inode)) {
+ inode_unlock_shared(inode);
+ /*
+ * Fallback to buffered I/O if the operation being performed on
+ * the inode is not supported by direct I/O. The IOCB_DIRECT
+ * flag needs to be cleared here in order to ensure that the
+ * direct I/O path within generic_file_read_iter() is not
+ * taken.
+ */
+ iocb->ki_flags &= ~IOCB_DIRECT;
+ return generic_file_read_iter(iocb, to);
+ }
+
+ ret = iomap_dio_rw(iocb, to, &ext4_iomap_ops, NULL,
+ is_sync_kiocb(iocb));
+ inode_unlock_shared(inode);
+
+ file_accessed(iocb->ki_filp);
+ return ret;
+}
#ifdef CONFIG_FS_DAX
static ssize_t ext4_dax_read_iter(struct kiocb *iocb, struct iov_iter *to)
@@ -64,16 +112,21 @@ static ssize_t ext4_dax_read_iter(struct kiocb *iocb, struct iov_iter *to)
static ssize_t ext4_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
- if (unlikely(ext4_forced_shutdown(EXT4_SB(file_inode(iocb->ki_filp)->i_sb))))
+ struct inode *inode = file_inode(iocb->ki_filp);
+
+ if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
return -EIO;
if (!iov_iter_count(to))
return 0; /* skip atime */
#ifdef CONFIG_FS_DAX
- if (IS_DAX(file_inode(iocb->ki_filp)))
+ if (IS_DAX(inode))
return ext4_dax_read_iter(iocb, to);
#endif
+ if (iocb->ki_flags & IOCB_DIRECT)
+ return ext4_dio_read_iter(iocb, to);
+
return generic_file_read_iter(iocb, to);
}
@@ -103,13 +156,6 @@ static int ext4_release_file(struct inode *inode, struct file *filp)
return 0;
}
-static void ext4_unwritten_wait(struct inode *inode)
-{
- wait_queue_head_t *wq = ext4_ioend_wq(inode);
-
- wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_unwritten) == 0));
-}
-
/*
* This tests whether the IO in question is block-aligned or not.
* Ext4 utilizes unwritten extents when hole-filling during direct IO, and they
@@ -162,13 +208,13 @@ static ssize_t ext4_write_checks(struct kiocb *iocb, struct iov_iter *from)
struct inode *inode = file_inode(iocb->ki_filp);
ssize_t ret;
+ if (unlikely(IS_IMMUTABLE(inode)))
+ return -EPERM;
+
ret = generic_write_checks(iocb, from);
if (ret <= 0)
return ret;
- if (unlikely(IS_IMMUTABLE(inode)))
- return -EPERM;
-
/*
* If we have encountered a bitmap-format file, the size limit
* is smaller than s_maxbytes, which is for extent-mapped files.
@@ -180,56 +226,266 @@ static ssize_t ext4_write_checks(struct kiocb *iocb, struct iov_iter *from)
return -EFBIG;
iov_iter_truncate(from, sbi->s_bitmap_maxbytes - iocb->ki_pos);
}
+
+ ret = file_modified(iocb->ki_filp);
+ if (ret)
+ return ret;
+
return iov_iter_count(from);
}
-#ifdef CONFIG_FS_DAX
-static ssize_t
-ext4_dax_write_iter(struct kiocb *iocb, struct iov_iter *from)
+static ssize_t ext4_buffered_write_iter(struct kiocb *iocb,
+ struct iov_iter *from)
{
- struct inode *inode = file_inode(iocb->ki_filp);
ssize_t ret;
+ struct inode *inode = file_inode(iocb->ki_filp);
- if (!inode_trylock(inode)) {
- if (iocb->ki_flags & IOCB_NOWAIT)
- return -EAGAIN;
- inode_lock(inode);
- }
+ if (iocb->ki_flags & IOCB_NOWAIT)
+ return -EOPNOTSUPP;
+
+ inode_lock(inode);
ret = ext4_write_checks(iocb, from);
if (ret <= 0)
goto out;
- ret = file_remove_privs(iocb->ki_filp);
- if (ret)
- goto out;
- ret = file_update_time(iocb->ki_filp);
- if (ret)
- goto out;
- ret = dax_iomap_rw(iocb, from, &ext4_iomap_ops);
+ current->backing_dev_info = inode_to_bdi(inode);
+ ret = generic_perform_write(iocb->ki_filp, from, iocb->ki_pos);
+ current->backing_dev_info = NULL;
+
out:
inode_unlock(inode);
- if (ret > 0)
+ if (likely(ret > 0)) {
+ iocb->ki_pos += ret;
ret = generic_write_sync(iocb, ret);
+ }
+
return ret;
}
-#endif
-static ssize_t
-ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
+static ssize_t ext4_handle_inode_extension(struct inode *inode, loff_t offset,
+ ssize_t written, size_t count)
{
+ handle_t *handle;
+ bool truncate = false;
+ u8 blkbits = inode->i_blkbits;
+ ext4_lblk_t written_blk, end_blk;
+
+ /*
+ * Note that EXT4_I(inode)->i_disksize can get extended up to
+ * inode->i_size while the I/O was running due to writeback of delalloc
+ * blocks. But, the code in ext4_iomap_alloc() is careful to use
+ * zeroed/unwritten extents if this is possible; thus we won't leave
+ * uninitialized blocks in a file even if we didn't succeed in writing
+ * as much as we intended.
+ */
+ WARN_ON_ONCE(i_size_read(inode) < EXT4_I(inode)->i_disksize);
+ if (offset + count <= EXT4_I(inode)->i_disksize) {
+ /*
+ * We need to ensure that the inode is removed from the orphan
+ * list if it has been added prematurely, due to writeback of
+ * delalloc blocks.
+ */
+ if (!list_empty(&EXT4_I(inode)->i_orphan) && inode->i_nlink) {
+ handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
+
+ if (IS_ERR(handle)) {
+ ext4_orphan_del(NULL, inode);
+ return PTR_ERR(handle);
+ }
+
+ ext4_orphan_del(handle, inode);
+ ext4_journal_stop(handle);
+ }
+
+ return written;
+ }
+
+ if (written < 0)
+ goto truncate;
+
+ handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
+ if (IS_ERR(handle)) {
+ written = PTR_ERR(handle);
+ goto truncate;
+ }
+
+ if (ext4_update_inode_size(inode, offset + written))
+ ext4_mark_inode_dirty(handle, inode);
+
+ /*
+ * We may need to truncate allocated but not written blocks beyond EOF.
+ */
+ written_blk = ALIGN(offset + written, 1 << blkbits);
+ end_blk = ALIGN(offset + count, 1 << blkbits);
+ if (written_blk < end_blk && ext4_can_truncate(inode))
+ truncate = true;
+
+ /*
+ * Remove the inode from the orphan list if it has been extended and
+ * everything went OK.
+ */
+ if (!truncate && inode->i_nlink)
+ ext4_orphan_del(handle, inode);
+ ext4_journal_stop(handle);
+
+ if (truncate) {
+truncate:
+ ext4_truncate_failed_write(inode);
+ /*
+ * If the truncate operation failed early, then the inode may
+ * still be on the orphan list. In that case, we need to try
+ * remove the inode from the in-memory linked list.
+ */
+ if (inode->i_nlink)
+ ext4_orphan_del(NULL, inode);
+ }
+
+ return written;
+}
+
+static int ext4_dio_write_end_io(struct kiocb *iocb, ssize_t size,
+ int error, unsigned int flags)
+{
+ loff_t offset = iocb->ki_pos;
struct inode *inode = file_inode(iocb->ki_filp);
- int o_direct = iocb->ki_flags & IOCB_DIRECT;
- int unaligned_aio = 0;
- int overwrite = 0;
+
+ if (error)
+ return error;
+
+ if (size && flags & IOMAP_DIO_UNWRITTEN)
+ return ext4_convert_unwritten_extents(NULL, inode,
+ offset, size);
+
+ return 0;
+}
+
+static const struct iomap_dio_ops ext4_dio_write_ops = {
+ .end_io = ext4_dio_write_end_io,
+};
+
+static ssize_t ext4_dio_write_iter(struct kiocb *iocb, struct iov_iter *from)
+{
ssize_t ret;
+ size_t count;
+ loff_t offset;
+ handle_t *handle;
+ struct inode *inode = file_inode(iocb->ki_filp);
+ bool extend = false, overwrite = false, unaligned_aio = false;
- if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
- return -EIO;
+ if (iocb->ki_flags & IOCB_NOWAIT) {
+ if (!inode_trylock(inode))
+ return -EAGAIN;
+ } else {
+ inode_lock(inode);
+ }
+
+ if (!ext4_dio_supported(inode)) {
+ inode_unlock(inode);
+ /*
+ * Fallback to buffered I/O if the inode does not support
+ * direct I/O.
+ */
+ return ext4_buffered_write_iter(iocb, from);
+ }
+
+ ret = ext4_write_checks(iocb, from);
+ if (ret <= 0) {
+ inode_unlock(inode);
+ return ret;
+ }
+
+ /*
+ * Unaligned asynchronous direct I/O must be serialized among each
+ * other as the zeroing of partial blocks of two competing unaligned
+ * asynchronous direct I/O writes can result in data corruption.
+ */
+ offset = iocb->ki_pos;
+ count = iov_iter_count(from);
+ if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) &&
+ !is_sync_kiocb(iocb) && ext4_unaligned_aio(inode, from, offset)) {
+ unaligned_aio = true;
+ inode_dio_wait(inode);
+ }
+
+ /*
+ * Determine whether the I/O will overwrite allocated and initialized
+ * blocks. If so, check to see whether it is possible to take the
+ * dioread_nolock path.
+ */
+ if (!unaligned_aio && ext4_overwrite_io(inode, offset, count) &&
+ ext4_should_dioread_nolock(inode)) {
+ overwrite = true;
+ downgrade_write(&inode->i_rwsem);
+ }
+
+ if (offset + count > EXT4_I(inode)->i_disksize) {
+ handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
+ if (IS_ERR(handle)) {
+ ret = PTR_ERR(handle);
+ goto out;
+ }
+
+ ret = ext4_orphan_add(handle, inode);
+ if (ret) {
+ ext4_journal_stop(handle);
+ goto out;
+ }
+
+ extend = true;
+ ext4_journal_stop(handle);
+ }
+
+ ret = iomap_dio_rw(iocb, from, &ext4_iomap_ops, &ext4_dio_write_ops,
+ is_sync_kiocb(iocb) || unaligned_aio || extend);
+
+ if (extend)
+ ret = ext4_handle_inode_extension(inode, offset, ret, count);
+
+out:
+ if (overwrite)
+ inode_unlock_shared(inode);
+ else
+ inode_unlock(inode);
+
+ if (ret >= 0 && iov_iter_count(from)) {
+ ssize_t err;
+ loff_t endbyte;
+
+ offset = iocb->ki_pos;
+ err = ext4_buffered_write_iter(iocb, from);
+ if (err < 0)
+ return err;
+
+ /*
+ * We need to ensure that the pages within the page cache for
+ * the range covered by this I/O are written to disk and
+ * invalidated. This is in attempt to preserve the expected
+ * direct I/O semantics in the case we fallback to buffered I/O
+ * to complete off the I/O request.
+ */
+ ret += err;
+ endbyte = offset + err - 1;
+ err = filemap_write_and_wait_range(iocb->ki_filp->f_mapping,
+ offset, endbyte);
+ if (!err)
+ invalidate_mapping_pages(iocb->ki_filp->f_mapping,
+ offset >> PAGE_SHIFT,
+ endbyte >> PAGE_SHIFT);
+ }
+
+ return ret;
+}
#ifdef CONFIG_FS_DAX
- if (IS_DAX(inode))
- return ext4_dax_write_iter(iocb, from);
-#endif
+static ssize_t
+ext4_dax_write_iter(struct kiocb *iocb, struct iov_iter *from)
+{
+ ssize_t ret;
+ size_t count;
+ loff_t offset;
+ handle_t *handle;
+ bool extend = false;
+ struct inode *inode = file_inode(iocb->ki_filp);
if (!inode_trylock(inode)) {
if (iocb->ki_flags & IOCB_NOWAIT)
@@ -241,49 +497,55 @@ ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
if (ret <= 0)
goto out;
- /*
- * Unaligned direct AIO must be serialized among each other as zeroing
- * of partial blocks of two competing unaligned AIOs can result in data
- * corruption.
- */
- if (o_direct && ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) &&
- !is_sync_kiocb(iocb) &&
- ext4_unaligned_aio(inode, from, iocb->ki_pos)) {
- unaligned_aio = 1;
- ext4_unwritten_wait(inode);
- }
+ offset = iocb->ki_pos;
+ count = iov_iter_count(from);
- iocb->private = &overwrite;
- /* Check whether we do a DIO overwrite or not */
- if (o_direct && !unaligned_aio) {
- if (ext4_overwrite_io(inode, iocb->ki_pos, iov_iter_count(from))) {
- if (ext4_should_dioread_nolock(inode))
- overwrite = 1;
- } else if (iocb->ki_flags & IOCB_NOWAIT) {
- ret = -EAGAIN;
+ if (offset + count > EXT4_I(inode)->i_disksize) {
+ handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
+ if (IS_ERR(handle)) {
+ ret = PTR_ERR(handle);
goto out;
}
- }
- ret = __generic_file_write_iter(iocb, from);
- /*
- * Unaligned direct AIO must be the only IO in flight. Otherwise
- * overlapping aligned IO after unaligned might result in data
- * corruption.
- */
- if (ret == -EIOCBQUEUED && unaligned_aio)
- ext4_unwritten_wait(inode);
- inode_unlock(inode);
+ ret = ext4_orphan_add(handle, inode);
+ if (ret) {
+ ext4_journal_stop(handle);
+ goto out;
+ }
- if (ret > 0)
- ret = generic_write_sync(iocb, ret);
+ extend = true;
+ ext4_journal_stop(handle);
+ }
- return ret;
+ ret = dax_iomap_rw(iocb, from, &ext4_iomap_ops);
+ if (extend)
+ ret = ext4_handle_inode_extension(inode, offset, ret, count);
out:
inode_unlock(inode);
+ if (ret > 0)
+ ret = generic_write_sync(iocb, ret);
return ret;
}
+#endif
+
+static ssize_t
+ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
+{
+ struct inode *inode = file_inode(iocb->ki_filp);
+
+ if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
+ return -EIO;
+
+#ifdef CONFIG_FS_DAX
+ if (IS_DAX(inode))
+ return ext4_dax_write_iter(iocb, from);
+#endif
+ if (iocb->ki_flags & IOCB_DIRECT)
+ return ext4_dio_write_iter(iocb, from);
+
+ return ext4_buffered_write_iter(iocb, from);
+}
#ifdef CONFIG_FS_DAX
static vm_fault_t ext4_dax_huge_fault(struct vm_fault *vmf,
@@ -494,12 +756,14 @@ loff_t ext4_llseek(struct file *file, loff_t offset, int whence)
maxbytes, i_size_read(inode));
case SEEK_HOLE:
inode_lock_shared(inode);
- offset = iomap_seek_hole(inode, offset, &ext4_iomap_ops);
+ offset = iomap_seek_hole(inode, offset,
+ &ext4_iomap_report_ops);
inode_unlock_shared(inode);
break;
case SEEK_DATA:
inode_lock_shared(inode);
- offset = iomap_seek_data(inode, offset, &ext4_iomap_ops);
+ offset = iomap_seek_data(inode, offset,
+ &ext4_iomap_report_ops);
inode_unlock_shared(inode);
break;
}
diff --git a/fs/ext4/fsync.c b/fs/ext4/fsync.c
index 5508baa11bb6..e10206e7f4bb 100644
--- a/fs/ext4/fsync.c
+++ b/fs/ext4/fsync.c
@@ -80,6 +80,43 @@ static int ext4_sync_parent(struct inode *inode)
return ret;
}
+static int ext4_fsync_nojournal(struct inode *inode, bool datasync,
+ bool *needs_barrier)
+{
+ int ret, err;
+
+ ret = sync_mapping_buffers(inode->i_mapping);
+ if (!(inode->i_state & I_DIRTY_ALL))
+ return ret;
+ if (datasync && !(inode->i_state & I_DIRTY_DATASYNC))
+ return ret;
+
+ err = sync_inode_metadata(inode, 1);
+ if (!ret)
+ ret = err;
+
+ if (!ret)
+ ret = ext4_sync_parent(inode);
+ if (test_opt(inode->i_sb, BARRIER))
+ *needs_barrier = true;
+
+ return ret;
+}
+
+static int ext4_fsync_journal(struct inode *inode, bool datasync,
+ bool *needs_barrier)
+{
+ struct ext4_inode_info *ei = EXT4_I(inode);
+ journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
+ tid_t commit_tid = datasync ? ei->i_datasync_tid : ei->i_sync_tid;
+
+ if (journal->j_flags & JBD2_BARRIER &&
+ !jbd2_trans_will_send_data_barrier(journal, commit_tid))
+ *needs_barrier = true;
+
+ return jbd2_complete_transaction(journal, commit_tid);
+}
+
/*
* akpm: A new design for ext4_sync_file().
*
@@ -91,17 +128,14 @@ static int ext4_sync_parent(struct inode *inode)
* What we do is just kick off a commit and wait on it. This will snapshot the
* inode to disk.
*/
-
int ext4_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
{
- struct inode *inode = file->f_mapping->host;
- struct ext4_inode_info *ei = EXT4_I(inode);
- journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
int ret = 0, err;
- tid_t commit_tid;
bool needs_barrier = false;
+ struct inode *inode = file->f_mapping->host;
+ struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
- if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
+ if (unlikely(ext4_forced_shutdown(sbi)))
return -EIO;
J_ASSERT(ext4_journal_current_handle() == NULL);
@@ -111,23 +145,15 @@ int ext4_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
if (sb_rdonly(inode->i_sb)) {
/* Make sure that we read updated s_mount_flags value */
smp_rmb();
- if (EXT4_SB(inode->i_sb)->s_mount_flags & EXT4_MF_FS_ABORTED)
+ if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED)
ret = -EROFS;
goto out;
}
- if (!journal) {
- ret = __generic_file_fsync(file, start, end, datasync);
- if (!ret)
- ret = ext4_sync_parent(inode);
- if (test_opt(inode->i_sb, BARRIER))
- goto issue_flush;
- goto out;
- }
-
ret = file_write_and_wait_range(file, start, end);
if (ret)
return ret;
+
/*
* data=writeback,ordered:
* The caller's filemap_fdatawrite()/wait will sync the data.
@@ -142,18 +168,14 @@ int ext4_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
* (they were dirtied by commit). But that's OK - the blocks are
* safe in-journal, which is all fsync() needs to ensure.
*/
- if (ext4_should_journal_data(inode)) {
+ if (!sbi->s_journal)
+ ret = ext4_fsync_nojournal(inode, datasync, &needs_barrier);
+ else if (ext4_should_journal_data(inode))
ret = ext4_force_commit(inode->i_sb);
- goto out;
- }
+ else
+ ret = ext4_fsync_journal(inode, datasync, &needs_barrier);
- commit_tid = datasync ? ei->i_datasync_tid : ei->i_sync_tid;
- if (journal->j_flags & JBD2_BARRIER &&
- !jbd2_trans_will_send_data_barrier(journal, commit_tid))
- needs_barrier = true;
- ret = jbd2_complete_transaction(journal, commit_tid);
if (needs_barrier) {
- issue_flush:
err = blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL);
if (!ret)
ret = err;
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index 764ff4c56233..dc333e8e51e8 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -265,13 +265,8 @@ void ext4_free_inode(handle_t *handle, struct inode *inode)
ext4_debug("freeing inode %lu\n", ino);
trace_ext4_free_inode(inode);
- /*
- * Note: we must free any quota before locking the superblock,
- * as writing the quota to disk may need the lock as well.
- */
dquot_initialize(inode);
dquot_free_inode(inode);
- dquot_drop(inode);
is_directory = S_ISDIR(inode->i_mode);
@@ -927,7 +922,7 @@ repeat_in_this_group:
BUG_ON(nblocks <= 0);
handle = __ext4_journal_start_sb(dir->i_sb, line_no,
handle_type, nblocks,
- 0);
+ 0, 0);
if (IS_ERR(handle)) {
err = PTR_ERR(handle);
ext4_std_error(sb, err);
diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c
index 36699a131168..3a4ab70fe9e0 100644
--- a/fs/ext4/indirect.c
+++ b/fs/ext4/indirect.c
@@ -331,11 +331,14 @@ static int ext4_alloc_branch(handle_t *handle,
for (i = 0; i <= indirect_blks; i++) {
if (i == indirect_blks) {
new_blocks[i] = ext4_mb_new_blocks(handle, ar, &err);
- } else
+ } else {
ar->goal = new_blocks[i] = ext4_new_meta_blocks(handle,
ar->inode, ar->goal,
ar->flags & EXT4_MB_DELALLOC_RESERVED,
NULL, &err);
+ /* Simplify error cleanup... */
+ branch[i+1].bh = NULL;
+ }
if (err) {
i--;
goto failed;
@@ -377,18 +380,25 @@ static int ext4_alloc_branch(handle_t *handle,
}
return 0;
failed:
+ if (i == indirect_blks) {
+ /* Free data blocks */
+ ext4_free_blocks(handle, ar->inode, NULL, new_blocks[i],
+ ar->len, 0);
+ i--;
+ }
for (; i >= 0; i--) {
/*
* We want to ext4_forget() only freshly allocated indirect
- * blocks. Buffer for new_blocks[i-1] is at branch[i].bh and
- * buffer at branch[0].bh is indirect block / inode already
- * existing before ext4_alloc_branch() was called.
+ * blocks. Buffer for new_blocks[i] is at branch[i+1].bh
+ * (buffer at branch[0].bh is indirect block / inode already
+ * existing before ext4_alloc_branch() was called). Also
+ * because blocks are freshly allocated, we don't need to
+ * revoke them which is why we don't set
+ * EXT4_FREE_BLOCKS_METADATA.
*/
- if (i > 0 && i != indirect_blks && branch[i].bh)
- ext4_forget(handle, 1, ar->inode, branch[i].bh,
- branch[i].bh->b_blocknr);
- ext4_free_blocks(handle, ar->inode, NULL, new_blocks[i],
- (i == indirect_blks) ? ar->len : 1, 0);
+ ext4_free_blocks(handle, ar->inode, branch[i+1].bh,
+ new_blocks[i], 1,
+ branch[i+1].bh ? EXT4_FREE_BLOCKS_FORGET : 0);
}
return err;
}
@@ -689,27 +699,63 @@ int ext4_ind_trans_blocks(struct inode *inode, int nrblocks)
return DIV_ROUND_UP(nrblocks, EXT4_ADDR_PER_BLOCK(inode->i_sb)) + 4;
}
+static int ext4_ind_trunc_restart_fn(handle_t *handle, struct inode *inode,
+ struct buffer_head *bh, int *dropped)
+{
+ int err;
+
+ if (bh) {
+ BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
+ err = ext4_handle_dirty_metadata(handle, inode, bh);
+ if (unlikely(err))
+ return err;
+ }
+ err = ext4_mark_inode_dirty(handle, inode);
+ if (unlikely(err))
+ return err;
+ /*
+ * Drop i_data_sem to avoid deadlock with ext4_map_blocks. At this
+ * moment, get_block can be called only for blocks inside i_size since
+ * page cache has been already dropped and writes are blocked by
+ * i_mutex. So we can safely drop the i_data_sem here.
+ */
+ BUG_ON(EXT4_JOURNAL(inode) == NULL);
+ ext4_discard_preallocations(inode);
+ up_write(&EXT4_I(inode)->i_data_sem);
+ *dropped = 1;
+ return 0;
+}
+
/*
* Truncate transactions can be complex and absolutely huge. So we need to
* be able to restart the transaction at a conventient checkpoint to make
* sure we don't overflow the journal.
*
* Try to extend this transaction for the purposes of truncation. If
- * extend fails, we need to propagate the failure up and restart the
- * transaction in the top-level truncate loop. --sct
- *
- * Returns 0 if we managed to create more room. If we can't create more
- * room, and the transaction must be restarted we return 1.
+ * extend fails, we restart transaction.
*/
-static int try_to_extend_transaction(handle_t *handle, struct inode *inode)
+static int ext4_ind_truncate_ensure_credits(handle_t *handle,
+ struct inode *inode,
+ struct buffer_head *bh,
+ int revoke_creds)
{
- if (!ext4_handle_valid(handle))
- return 0;
- if (ext4_handle_has_enough_credits(handle, EXT4_RESERVE_TRANS_BLOCKS+1))
- return 0;
- if (!ext4_journal_extend(handle, ext4_blocks_for_truncate(inode)))
- return 0;
- return 1;
+ int ret;
+ int dropped = 0;
+
+ ret = ext4_journal_ensure_credits_fn(handle, EXT4_RESERVE_TRANS_BLOCKS,
+ ext4_blocks_for_truncate(inode), revoke_creds,
+ ext4_ind_trunc_restart_fn(handle, inode, bh, &dropped));
+ if (dropped)
+ down_write(&EXT4_I(inode)->i_data_sem);
+ if (ret <= 0)
+ return ret;
+ if (bh) {
+ BUFFER_TRACE(bh, "retaking write access");
+ ret = ext4_journal_get_write_access(handle, bh);
+ if (unlikely(ret))
+ return ret;
+ }
+ return 0;
}
/*
@@ -844,27 +890,10 @@ static int ext4_clear_blocks(handle_t *handle, struct inode *inode,
return 1;
}
- if (try_to_extend_transaction(handle, inode)) {
- if (bh) {
- BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
- err = ext4_handle_dirty_metadata(handle, inode, bh);
- if (unlikely(err))
- goto out_err;
- }
- err = ext4_mark_inode_dirty(handle, inode);
- if (unlikely(err))
- goto out_err;
- err = ext4_truncate_restart_trans(handle, inode,
- ext4_blocks_for_truncate(inode));
- if (unlikely(err))
- goto out_err;
- if (bh) {
- BUFFER_TRACE(bh, "retaking write access");
- err = ext4_journal_get_write_access(handle, bh);
- if (unlikely(err))
- goto out_err;
- }
- }
+ err = ext4_ind_truncate_ensure_credits(handle, inode, bh,
+ ext4_free_data_revoke_credits(inode, count));
+ if (err < 0)
+ goto out_err;
for (p = first; p < last; p++)
*p = 0;
@@ -1047,11 +1076,11 @@ static void ext4_free_branches(handle_t *handle, struct inode *inode,
*/
if (ext4_handle_is_aborted(handle))
return;
- if (try_to_extend_transaction(handle, inode)) {
- ext4_mark_inode_dirty(handle, inode);
- ext4_truncate_restart_trans(handle, inode,
- ext4_blocks_for_truncate(inode));
- }
+ if (ext4_ind_truncate_ensure_credits(handle, inode,
+ NULL,
+ ext4_free_metadata_revoke_credits(
+ inode->i_sb, 1)) < 0)
+ return;
/*
* The forget flag here is critical because if
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index a7ca65177980..28f28de0c1b6 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -164,39 +164,18 @@ int ext4_inode_is_fast_symlink(struct inode *inode)
}
/*
- * Restart the transaction associated with *handle. This does a commit,
- * so before we call here everything must be consistently dirtied against
- * this transaction.
- */
-int ext4_truncate_restart_trans(handle_t *handle, struct inode *inode,
- int nblocks)
-{
- int ret;
-
- /*
- * Drop i_data_sem to avoid deadlock with ext4_map_blocks. At this
- * moment, get_block can be called only for blocks inside i_size since
- * page cache has been already dropped and writes are blocked by
- * i_mutex. So we can safely drop the i_data_sem here.
- */
- BUG_ON(EXT4_JOURNAL(inode) == NULL);
- jbd_debug(2, "restarting handle %p\n", handle);
- up_write(&EXT4_I(inode)->i_data_sem);
- ret = ext4_journal_restart(handle, nblocks);
- down_write(&EXT4_I(inode)->i_data_sem);
- ext4_discard_preallocations(inode);
-
- return ret;
-}
-
-/*
* Called at the last iput() if i_nlink is zero.
*/
void ext4_evict_inode(struct inode *inode)
{
handle_t *handle;
int err;
- int extra_credits = 3;
+ /*
+ * Credits for final inode cleanup and freeing:
+ * sb + inode (ext4_orphan_del()), block bitmap, group descriptor
+ * (xattr block freeing), bitmap, group descriptor (inode freeing)
+ */
+ int extra_credits = 6;
struct ext4_xattr_inode_array *ea_inode_array = NULL;
trace_ext4_evict_inode(inode);
@@ -252,8 +231,12 @@ void ext4_evict_inode(struct inode *inode)
if (!IS_NOQUOTA(inode))
extra_credits += EXT4_MAXQUOTAS_DEL_BLOCKS(inode->i_sb);
+ /*
+ * Block bitmap, group descriptor, and inode are accounted in both
+ * ext4_blocks_for_truncate() and extra_credits. So subtract 3.
+ */
handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE,
- ext4_blocks_for_truncate(inode)+extra_credits);
+ ext4_blocks_for_truncate(inode) + extra_credits - 3);
if (IS_ERR(handle)) {
ext4_std_error(inode->i_sb, PTR_ERR(handle));
/*
@@ -827,136 +810,6 @@ int ext4_get_block_unwritten(struct inode *inode, sector_t iblock,
#define DIO_MAX_BLOCKS 4096
/*
- * Get blocks function for the cases that need to start a transaction -
- * generally difference cases of direct IO and DAX IO. It also handles retries
- * in case of ENOSPC.
- */
-static int ext4_get_block_trans(struct inode *inode, sector_t iblock,
- struct buffer_head *bh_result, int flags)
-{
- int dio_credits;
- handle_t *handle;
- int retries = 0;
- int ret;
-
- /* Trim mapping request to maximum we can map at once for DIO */
- if (bh_result->b_size >> inode->i_blkbits > DIO_MAX_BLOCKS)
- bh_result->b_size = DIO_MAX_BLOCKS << inode->i_blkbits;
- dio_credits = ext4_chunk_trans_blocks(inode,
- bh_result->b_size >> inode->i_blkbits);
-retry:
- handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, dio_credits);
- if (IS_ERR(handle))
- return PTR_ERR(handle);
-
- ret = _ext4_get_block(inode, iblock, bh_result, flags);
- ext4_journal_stop(handle);
-
- if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
- goto retry;
- return ret;
-}
-
-/* Get block function for DIO reads and writes to inodes without extents */
-int ext4_dio_get_block(struct inode *inode, sector_t iblock,
- struct buffer_head *bh, int create)
-{
- /* We don't expect handle for direct IO */
- WARN_ON_ONCE(ext4_journal_current_handle());
-
- if (!create)
- return _ext4_get_block(inode, iblock, bh, 0);
- return ext4_get_block_trans(inode, iblock, bh, EXT4_GET_BLOCKS_CREATE);
-}
-
-/*
- * Get block function for AIO DIO writes when we create unwritten extent if
- * blocks are not allocated yet. The extent will be converted to written
- * after IO is complete.
- */
-static int ext4_dio_get_block_unwritten_async(struct inode *inode,
- sector_t iblock, struct buffer_head *bh_result, int create)
-{
- int ret;
-
- /* We don't expect handle for direct IO */
- WARN_ON_ONCE(ext4_journal_current_handle());
-
- ret = ext4_get_block_trans(inode, iblock, bh_result,
- EXT4_GET_BLOCKS_IO_CREATE_EXT);
-
- /*
- * When doing DIO using unwritten extents, we need io_end to convert
- * unwritten extents to written on IO completion. We allocate io_end
- * once we spot unwritten extent and store it in b_private. Generic
- * DIO code keeps b_private set and furthermore passes the value to
- * our completion callback in 'private' argument.
- */
- if (!ret && buffer_unwritten(bh_result)) {
- if (!bh_result->b_private) {
- ext4_io_end_t *io_end;
-
- io_end = ext4_init_io_end(inode, GFP_KERNEL);
- if (!io_end)
- return -ENOMEM;
- bh_result->b_private = io_end;
- ext4_set_io_unwritten_flag(inode, io_end);
- }
- set_buffer_defer_completion(bh_result);
- }
-
- return ret;
-}
-
-/*
- * Get block function for non-AIO DIO writes when we create unwritten extent if
- * blocks are not allocated yet. The extent will be converted to written
- * after IO is complete by ext4_direct_IO_write().
- */
-static int ext4_dio_get_block_unwritten_sync(struct inode *inode,
- sector_t iblock, struct buffer_head *bh_result, int create)
-{
- int ret;
-
- /* We don't expect handle for direct IO */
- WARN_ON_ONCE(ext4_journal_current_handle());
-
- ret = ext4_get_block_trans(inode, iblock, bh_result,
- EXT4_GET_BLOCKS_IO_CREATE_EXT);
-
- /*
- * Mark inode as having pending DIO writes to unwritten extents.
- * ext4_direct_IO_write() checks this flag and converts extents to
- * written.
- */
- if (!ret && buffer_unwritten(bh_result))
- ext4_set_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN);
-
- return ret;
-}
-
-static int ext4_dio_get_block_overwrite(struct inode *inode, sector_t iblock,
- struct buffer_head *bh_result, int create)
-{
- int ret;
-
- ext4_debug("ext4_dio_get_block_overwrite: inode %lu, create flag %d\n",
- inode->i_ino, create);
- /* We don't expect handle for direct IO */
- WARN_ON_ONCE(ext4_journal_current_handle());
-
- ret = _ext4_get_block(inode, iblock, bh_result, 0);
- /*
- * Blocks should have been preallocated! ext4_file_write_iter() checks
- * that.
- */
- WARN_ON_ONCE(!buffer_mapped(bh_result) || buffer_unwritten(bh_result));
-
- return ret;
-}
-
-
-/*
* `handle' can be NULL if create is zero
*/
struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode,
@@ -2341,6 +2194,79 @@ static int mpage_process_page_bufs(struct mpage_da_data *mpd,
}
/*
+ * mpage_process_page - update page buffers corresponding to changed extent and
+ * may submit fully mapped page for IO
+ *
+ * @mpd - description of extent to map, on return next extent to map
+ * @m_lblk - logical block mapping.
+ * @m_pblk - corresponding physical mapping.
+ * @map_bh - determines on return whether this page requires any further
+ * mapping or not.
+ * Scan given page buffers corresponding to changed extent and update buffer
+ * state according to new extent state.
+ * We map delalloc buffers to their physical location, clear unwritten bits.
+ * If the given page is not fully mapped, we update @map to the next extent in
+ * the given page that needs mapping & return @map_bh as true.
+ */
+static int mpage_process_page(struct mpage_da_data *mpd, struct page *page,
+ ext4_lblk_t *m_lblk, ext4_fsblk_t *m_pblk,
+ bool *map_bh)
+{
+ struct buffer_head *head, *bh;
+ ext4_io_end_t *io_end = mpd->io_submit.io_end;
+ ext4_lblk_t lblk = *m_lblk;
+ ext4_fsblk_t pblock = *m_pblk;
+ int err = 0;
+ int blkbits = mpd->inode->i_blkbits;
+ ssize_t io_end_size = 0;
+ struct ext4_io_end_vec *io_end_vec = ext4_last_io_end_vec(io_end);
+
+ bh = head = page_buffers(page);
+ do {
+ if (lblk < mpd->map.m_lblk)
+ continue;
+ if (lblk >= mpd->map.m_lblk + mpd->map.m_len) {
+ /*
+ * Buffer after end of mapped extent.
+ * Find next buffer in the page to map.
+ */
+ mpd->map.m_len = 0;
+ mpd->map.m_flags = 0;
+ io_end_vec->size += io_end_size;
+ io_end_size = 0;
+
+ err = mpage_process_page_bufs(mpd, head, bh, lblk);
+ if (err > 0)
+ err = 0;
+ if (!err && mpd->map.m_len && mpd->map.m_lblk > lblk) {
+ io_end_vec = ext4_alloc_io_end_vec(io_end);
+ if (IS_ERR(io_end_vec)) {
+ err = PTR_ERR(io_end_vec);
+ goto out;
+ }
+ io_end_vec->offset = mpd->map.m_lblk << blkbits;
+ }
+ *map_bh = true;
+ goto out;
+ }
+ if (buffer_delay(bh)) {
+ clear_buffer_delay(bh);
+ bh->b_blocknr = pblock++;
+ }
+ clear_buffer_unwritten(bh);
+ io_end_size += (1 << blkbits);
+ } while (lblk++, (bh = bh->b_this_page) != head);
+
+ io_end_vec->size += io_end_size;
+ io_end_size = 0;
+ *map_bh = false;
+out:
+ *m_lblk = lblk;
+ *m_pblk = pblock;
+ return err;
+}
+
+/*
* mpage_map_buffers - update buffers corresponding to changed extent and
* submit fully mapped pages for IO
*
@@ -2359,12 +2285,12 @@ static int mpage_map_and_submit_buffers(struct mpage_da_data *mpd)
struct pagevec pvec;
int nr_pages, i;
struct inode *inode = mpd->inode;
- struct buffer_head *head, *bh;
int bpp_bits = PAGE_SHIFT - inode->i_blkbits;
pgoff_t start, end;
ext4_lblk_t lblk;
- sector_t pblock;
+ ext4_fsblk_t pblock;
int err;
+ bool map_bh = false;
start = mpd->map.m_lblk >> bpp_bits;
end = (mpd->map.m_lblk + mpd->map.m_len - 1) >> bpp_bits;
@@ -2380,50 +2306,19 @@ static int mpage_map_and_submit_buffers(struct mpage_da_data *mpd)
for (i = 0; i < nr_pages; i++) {
struct page *page = pvec.pages[i];
- bh = head = page_buffers(page);
- do {
- if (lblk < mpd->map.m_lblk)
- continue;
- if (lblk >= mpd->map.m_lblk + mpd->map.m_len) {
- /*
- * Buffer after end of mapped extent.
- * Find next buffer in the page to map.
- */
- mpd->map.m_len = 0;
- mpd->map.m_flags = 0;
- /*
- * FIXME: If dioread_nolock supports
- * blocksize < pagesize, we need to make
- * sure we add size mapped so far to
- * io_end->size as the following call
- * can submit the page for IO.
- */
- err = mpage_process_page_bufs(mpd, head,
- bh, lblk);
- pagevec_release(&pvec);
- if (err > 0)
- err = 0;
- return err;
- }
- if (buffer_delay(bh)) {
- clear_buffer_delay(bh);
- bh->b_blocknr = pblock++;
- }
- clear_buffer_unwritten(bh);
- } while (lblk++, (bh = bh->b_this_page) != head);
-
+ err = mpage_process_page(mpd, page, &lblk, &pblock,
+ &map_bh);
/*
- * FIXME: This is going to break if dioread_nolock
- * supports blocksize < pagesize as we will try to
- * convert potentially unmapped parts of inode.
+ * If map_bh is true, means page may require further bh
+ * mapping, or maybe the page was submitted for IO.
+ * So we return to call further extent mapping.
*/
- mpd->io_submit.io_end->size += PAGE_SIZE;
+ if (err < 0 || map_bh == true)
+ goto out;
/* Page fully mapped - let IO run! */
err = mpage_submit_page(mpd, page);
- if (err < 0) {
- pagevec_release(&pvec);
- return err;
- }
+ if (err < 0)
+ goto out;
}
pagevec_release(&pvec);
}
@@ -2431,6 +2326,9 @@ static int mpage_map_and_submit_buffers(struct mpage_da_data *mpd)
mpd->map.m_len = 0;
mpd->map.m_flags = 0;
return 0;
+out:
+ pagevec_release(&pvec);
+ return err;
}
static int mpage_map_one_extent(handle_t *handle, struct mpage_da_data *mpd)
@@ -2510,9 +2408,13 @@ static int mpage_map_and_submit_extent(handle_t *handle,
int err;
loff_t disksize;
int progress = 0;
+ ext4_io_end_t *io_end = mpd->io_submit.io_end;
+ struct ext4_io_end_vec *io_end_vec;
- mpd->io_submit.io_end->offset =
- ((loff_t)map->m_lblk) << inode->i_blkbits;
+ io_end_vec = ext4_alloc_io_end_vec(io_end);
+ if (IS_ERR(io_end_vec))
+ return PTR_ERR(io_end_vec);
+ io_end_vec->offset = ((loff_t)map->m_lblk) << inode->i_blkbits;
do {
err = mpage_map_one_extent(handle, mpd);
if (err < 0) {
@@ -3406,473 +3308,235 @@ static bool ext4_inode_datasync_dirty(struct inode *inode)
return inode->i_state & I_DIRTY_DATASYNC;
}
-static int ext4_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
- unsigned flags, struct iomap *iomap)
+static void ext4_set_iomap(struct inode *inode, struct iomap *iomap,
+ struct ext4_map_blocks *map, loff_t offset,
+ loff_t length)
{
- struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
- unsigned int blkbits = inode->i_blkbits;
- unsigned long first_block, last_block;
- struct ext4_map_blocks map;
- bool delalloc = false;
- int ret;
-
- if ((offset >> blkbits) > EXT4_MAX_LOGICAL_BLOCK)
- return -EINVAL;
- first_block = offset >> blkbits;
- last_block = min_t(loff_t, (offset + length - 1) >> blkbits,
- EXT4_MAX_LOGICAL_BLOCK);
-
- if (flags & IOMAP_REPORT) {
- if (ext4_has_inline_data(inode)) {
- ret = ext4_inline_data_iomap(inode, iomap);
- if (ret != -EAGAIN) {
- if (ret == 0 && offset >= iomap->length)
- ret = -ENOENT;
- return ret;
- }
- }
- } else {
- if (WARN_ON_ONCE(ext4_has_inline_data(inode)))
- return -ERANGE;
- }
-
- map.m_lblk = first_block;
- map.m_len = last_block - first_block + 1;
-
- if (flags & IOMAP_REPORT) {
- ret = ext4_map_blocks(NULL, inode, &map, 0);
- if (ret < 0)
- return ret;
-
- if (ret == 0) {
- ext4_lblk_t end = map.m_lblk + map.m_len - 1;
- struct extent_status es;
-
- ext4_es_find_extent_range(inode, &ext4_es_is_delayed,
- map.m_lblk, end, &es);
-
- if (!es.es_len || es.es_lblk > end) {
- /* entire range is a hole */
- } else if (es.es_lblk > map.m_lblk) {
- /* range starts with a hole */
- map.m_len = es.es_lblk - map.m_lblk;
- } else {
- ext4_lblk_t offs = 0;
-
- if (es.es_lblk < map.m_lblk)
- offs = map.m_lblk - es.es_lblk;
- map.m_lblk = es.es_lblk + offs;
- map.m_len = es.es_len - offs;
- delalloc = true;
- }
- }
- } else if (flags & IOMAP_WRITE) {
- int dio_credits;
- handle_t *handle;
- int retries = 0;
-
- /* Trim mapping request to maximum we can map at once for DIO */
- if (map.m_len > DIO_MAX_BLOCKS)
- map.m_len = DIO_MAX_BLOCKS;
- dio_credits = ext4_chunk_trans_blocks(inode, map.m_len);
-retry:
- /*
- * Either we allocate blocks and then we don't get unwritten
- * extent so we have reserved enough credits, or the blocks
- * are already allocated and unwritten and in that case
- * extent conversion fits in the credits as well.
- */
- handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS,
- dio_credits);
- if (IS_ERR(handle))
- return PTR_ERR(handle);
-
- ret = ext4_map_blocks(handle, inode, &map,
- EXT4_GET_BLOCKS_CREATE_ZERO);
- if (ret < 0) {
- ext4_journal_stop(handle);
- if (ret == -ENOSPC &&
- ext4_should_retry_alloc(inode->i_sb, &retries))
- goto retry;
- return ret;
- }
-
- /*
- * If we added blocks beyond i_size, we need to make sure they
- * will get truncated if we crash before updating i_size in
- * ext4_iomap_end(). For faults we don't need to do that (and
- * even cannot because for orphan list operations inode_lock is
- * required) - if we happen to instantiate block beyond i_size,
- * it is because we race with truncate which has already added
- * the inode to the orphan list.
- */
- if (!(flags & IOMAP_FAULT) && first_block + map.m_len >
- (i_size_read(inode) + (1 << blkbits) - 1) >> blkbits) {
- int err;
-
- err = ext4_orphan_add(handle, inode);
- if (err < 0) {
- ext4_journal_stop(handle);
- return err;
- }
- }
- ext4_journal_stop(handle);
- } else {
- ret = ext4_map_blocks(NULL, inode, &map, 0);
- if (ret < 0)
- return ret;
- }
+ u8 blkbits = inode->i_blkbits;
+ /*
+ * Writes that span EOF might trigger an I/O size update on completion,
+ * so consider them to be dirty for the purpose of O_DSYNC, even if
+ * there is no other metadata changes being made or are pending.
+ */
iomap->flags = 0;
- if (ext4_inode_datasync_dirty(inode))
+ if (ext4_inode_datasync_dirty(inode) ||
+ offset + length > i_size_read(inode))
iomap->flags |= IOMAP_F_DIRTY;
+
+ if (map->m_flags & EXT4_MAP_NEW)
+ iomap->flags |= IOMAP_F_NEW;
+
iomap->bdev = inode->i_sb->s_bdev;
- iomap->dax_dev = sbi->s_daxdev;
- iomap->offset = (u64)first_block << blkbits;
- iomap->length = (u64)map.m_len << blkbits;
+ iomap->dax_dev = EXT4_SB(inode->i_sb)->s_daxdev;
+ iomap->offset = (u64) map->m_lblk << blkbits;
+ iomap->length = (u64) map->m_len << blkbits;
- if (ret == 0) {
- iomap->type = delalloc ? IOMAP_DELALLOC : IOMAP_HOLE;
- iomap->addr = IOMAP_NULL_ADDR;
+ /*
+ * Flags passed to ext4_map_blocks() for direct I/O writes can result
+ * in m_flags having both EXT4_MAP_MAPPED and EXT4_MAP_UNWRITTEN bits
+ * set. In order for any allocated unwritten extents to be converted
+ * into written extents correctly within the ->end_io() handler, we
+ * need to ensure that the iomap->type is set appropriately. Hence, the
+ * reason why we need to check whether the EXT4_MAP_UNWRITTEN bit has
+ * been set first.
+ */
+ if (map->m_flags & EXT4_MAP_UNWRITTEN) {
+ iomap->type = IOMAP_UNWRITTEN;
+ iomap->addr = (u64) map->m_pblk << blkbits;
+ } else if (map->m_flags & EXT4_MAP_MAPPED) {
+ iomap->type = IOMAP_MAPPED;
+ iomap->addr = (u64) map->m_pblk << blkbits;
} else {
- if (map.m_flags & EXT4_MAP_MAPPED) {
- iomap->type = IOMAP_MAPPED;
- } else if (map.m_flags & EXT4_MAP_UNWRITTEN) {
- iomap->type = IOMAP_UNWRITTEN;
- } else {
- WARN_ON_ONCE(1);
- return -EIO;
- }
- iomap->addr = (u64)map.m_pblk << blkbits;
+ iomap->type = IOMAP_HOLE;
+ iomap->addr = IOMAP_NULL_ADDR;
}
-
- if (map.m_flags & EXT4_MAP_NEW)
- iomap->flags |= IOMAP_F_NEW;
-
- return 0;
}
-static int ext4_iomap_end(struct inode *inode, loff_t offset, loff_t length,
- ssize_t written, unsigned flags, struct iomap *iomap)
+static int ext4_iomap_alloc(struct inode *inode, struct ext4_map_blocks *map,
+ unsigned int flags)
{
- int ret = 0;
handle_t *handle;
- int blkbits = inode->i_blkbits;
- bool truncate = false;
+ u8 blkbits = inode->i_blkbits;
+ int ret, dio_credits, m_flags = 0, retries = 0;
- if (!(flags & IOMAP_WRITE) || (flags & IOMAP_FAULT))
- return 0;
-
- handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
- if (IS_ERR(handle)) {
- ret = PTR_ERR(handle);
- goto orphan_del;
- }
- if (ext4_update_inode_size(inode, offset + written))
- ext4_mark_inode_dirty(handle, inode);
/*
- * We may need to truncate allocated but not written blocks beyond EOF.
+ * Trim the mapping request to the maximum value that we can map at
+ * once for direct I/O.
*/
- if (iomap->offset + iomap->length >
- ALIGN(inode->i_size, 1 << blkbits)) {
- ext4_lblk_t written_blk, end_blk;
+ if (map->m_len > DIO_MAX_BLOCKS)
+ map->m_len = DIO_MAX_BLOCKS;
+ dio_credits = ext4_chunk_trans_blocks(inode, map->m_len);
- written_blk = (offset + written) >> blkbits;
- end_blk = (offset + length) >> blkbits;
- if (written_blk < end_blk && ext4_can_truncate(inode))
- truncate = true;
- }
+retry:
/*
- * Remove inode from orphan list if we were extending a inode and
- * everything went fine.
+ * Either we allocate blocks and then don't get an unwritten extent, so
+ * in that case we have reserved enough credits. Or, the blocks are
+ * already allocated and unwritten. In that case, the extent conversion
+ * fits into the credits as well.
*/
- if (!truncate && inode->i_nlink &&
- !list_empty(&EXT4_I(inode)->i_orphan))
- ext4_orphan_del(handle, inode);
- ext4_journal_stop(handle);
- if (truncate) {
- ext4_truncate_failed_write(inode);
-orphan_del:
- /*
- * If truncate failed early the inode might still be on the
- * orphan list; we need to make sure the inode is removed from
- * the orphan list in that case.
- */
- if (inode->i_nlink)
- ext4_orphan_del(NULL, inode);
- }
- return ret;
-}
-
-const struct iomap_ops ext4_iomap_ops = {
- .iomap_begin = ext4_iomap_begin,
- .iomap_end = ext4_iomap_end,
-};
-
-static int ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
- ssize_t size, void *private)
-{
- ext4_io_end_t *io_end = private;
+ handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, dio_credits);
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
- /* if not async direct IO just return */
- if (!io_end)
- return 0;
+ /*
+ * DAX and direct I/O are the only two operations that are currently
+ * supported with IOMAP_WRITE.
+ */
+ WARN_ON(!IS_DAX(inode) && !(flags & IOMAP_DIRECT));
+ if (IS_DAX(inode))
+ m_flags = EXT4_GET_BLOCKS_CREATE_ZERO;
+ /*
+ * We use i_size instead of i_disksize here because delalloc writeback
+ * can complete at any point during the I/O and subsequently push the
+ * i_disksize out to i_size. This could be beyond where direct I/O is
+ * happening and thus expose allocated blocks to direct I/O reads.
+ */
+ else if ((map->m_lblk * (1 << blkbits)) >= i_size_read(inode))
+ m_flags = EXT4_GET_BLOCKS_CREATE;
+ else if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
+ m_flags = EXT4_GET_BLOCKS_IO_CREATE_EXT;
- ext_debug("ext4_end_io_dio(): io_end 0x%p "
- "for inode %lu, iocb 0x%p, offset %llu, size %zd\n",
- io_end, io_end->inode->i_ino, iocb, offset, size);
+ ret = ext4_map_blocks(handle, inode, map, m_flags);
/*
- * Error during AIO DIO. We cannot convert unwritten extents as the
- * data was not written. Just clear the unwritten flag and drop io_end.
+ * We cannot fill holes in indirect tree based inodes as that could
+ * expose stale data in the case of a crash. Use the magic error code
+ * to fallback to buffered I/O.
*/
- if (size <= 0) {
- ext4_clear_io_unwritten_flag(io_end);
- size = 0;
- }
- io_end->offset = offset;
- io_end->size = size;
- ext4_put_io_end(io_end);
+ if (!m_flags && !ret)
+ ret = -ENOTBLK;
- return 0;
+ ext4_journal_stop(handle);
+ if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
+ goto retry;
+
+ return ret;
}
-/*
- * Handling of direct IO writes.
- *
- * For ext4 extent files, ext4 will do direct-io write even to holes,
- * preallocated extents, and those write extend the file, no need to
- * fall back to buffered IO.
- *
- * For holes, we fallocate those blocks, mark them as unwritten
- * If those blocks were preallocated, we mark sure they are split, but
- * still keep the range to write as unwritten.
- *
- * The unwritten extents will be converted to written when DIO is completed.
- * For async direct IO, since the IO may still pending when return, we
- * set up an end_io call back function, which will do the conversion
- * when async direct IO completed.
- *
- * If the O_DIRECT write will extend the file then add this inode to the
- * orphan list. So recovery will truncate it back to the original size
- * if the machine crashes during the write.
- *
- */
-static ssize_t ext4_direct_IO_write(struct kiocb *iocb, struct iov_iter *iter)
+
+static int ext4_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
+ unsigned flags, struct iomap *iomap, struct iomap *srcmap)
{
- struct file *file = iocb->ki_filp;
- struct inode *inode = file->f_mapping->host;
- struct ext4_inode_info *ei = EXT4_I(inode);
- ssize_t ret;
- loff_t offset = iocb->ki_pos;
- size_t count = iov_iter_count(iter);
- int overwrite = 0;
- get_block_t *get_block_func = NULL;
- int dio_flags = 0;
- loff_t final_size = offset + count;
- int orphan = 0;
- handle_t *handle;
+ int ret;
+ struct ext4_map_blocks map;
+ u8 blkbits = inode->i_blkbits;
- if (final_size > inode->i_size || final_size > ei->i_disksize) {
- /* Credits for sb + inode write */
- handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
- if (IS_ERR(handle)) {
- ret = PTR_ERR(handle);
- goto out;
- }
- ret = ext4_orphan_add(handle, inode);
- if (ret) {
- ext4_journal_stop(handle);
- goto out;
- }
- orphan = 1;
- ext4_update_i_disksize(inode, inode->i_size);
- ext4_journal_stop(handle);
- }
+ if ((offset >> blkbits) > EXT4_MAX_LOGICAL_BLOCK)
+ return -EINVAL;
- BUG_ON(iocb->private == NULL);
+ if (WARN_ON_ONCE(ext4_has_inline_data(inode)))
+ return -ERANGE;
/*
- * Make all waiters for direct IO properly wait also for extent
- * conversion. This also disallows race between truncate() and
- * overwrite DIO as i_dio_count needs to be incremented under i_mutex.
+ * Calculate the first and last logical blocks respectively.
*/
- inode_dio_begin(inode);
+ map.m_lblk = offset >> blkbits;
+ map.m_len = min_t(loff_t, (offset + length - 1) >> blkbits,
+ EXT4_MAX_LOGICAL_BLOCK) - map.m_lblk + 1;
+
+ if (flags & IOMAP_WRITE)
+ ret = ext4_iomap_alloc(inode, &map, flags);
+ else
+ ret = ext4_map_blocks(NULL, inode, &map, 0);
+
+ if (ret < 0)
+ return ret;
- /* If we do a overwrite dio, i_mutex locking can be released */
- overwrite = *((int *)iocb->private);
+ ext4_set_iomap(inode, iomap, &map, offset, length);
- if (overwrite)
- inode_unlock(inode);
+ return 0;
+}
+static int ext4_iomap_end(struct inode *inode, loff_t offset, loff_t length,
+ ssize_t written, unsigned flags, struct iomap *iomap)
+{
/*
- * For extent mapped files we could direct write to holes and fallocate.
- *
- * Allocated blocks to fill the hole are marked as unwritten to prevent
- * parallel buffered read to expose the stale data before DIO complete
- * the data IO.
- *
- * As to previously fallocated extents, ext4 get_block will just simply
- * mark the buffer mapped but still keep the extents unwritten.
- *
- * For non AIO case, we will convert those unwritten extents to written
- * after return back from blockdev_direct_IO. That way we save us from
- * allocating io_end structure and also the overhead of offloading
- * the extent convertion to a workqueue.
- *
- * For async DIO, the conversion needs to be deferred when the
- * IO is completed. The ext4 end_io callback function will be
- * called to take care of the conversion work. Here for async
- * case, we allocate an io_end structure to hook to the iocb.
+ * Check to see whether an error occurred while writing out the data to
+ * the allocated blocks. If so, return the magic error code so that we
+ * fallback to buffered I/O and attempt to complete the remainder of
+ * the I/O. Any blocks that may have been allocated in preparation for
+ * the direct I/O will be reused during buffered I/O.
*/
- iocb->private = NULL;
- if (overwrite)
- get_block_func = ext4_dio_get_block_overwrite;
- else if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) ||
- round_down(offset, i_blocksize(inode)) >= inode->i_size) {
- get_block_func = ext4_dio_get_block;
- dio_flags = DIO_LOCKING | DIO_SKIP_HOLES;
- } else if (is_sync_kiocb(iocb)) {
- get_block_func = ext4_dio_get_block_unwritten_sync;
- dio_flags = DIO_LOCKING;
- } else {
- get_block_func = ext4_dio_get_block_unwritten_async;
- dio_flags = DIO_LOCKING;
- }
- ret = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, iter,
- get_block_func, ext4_end_io_dio, NULL,
- dio_flags);
+ if (flags & (IOMAP_WRITE | IOMAP_DIRECT) && written == 0)
+ return -ENOTBLK;
- if (ret > 0 && !overwrite && ext4_test_inode_state(inode,
- EXT4_STATE_DIO_UNWRITTEN)) {
- int err;
- /*
- * for non AIO case, since the IO is already
- * completed, we could do the conversion right here
- */
- err = ext4_convert_unwritten_extents(NULL, inode,
- offset, ret);
- if (err < 0)
- ret = err;
- ext4_clear_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN);
- }
+ return 0;
+}
- inode_dio_end(inode);
- /* take i_mutex locking again if we do a ovewrite dio */
- if (overwrite)
- inode_lock(inode);
+const struct iomap_ops ext4_iomap_ops = {
+ .iomap_begin = ext4_iomap_begin,
+ .iomap_end = ext4_iomap_end,
+};
- if (ret < 0 && final_size > inode->i_size)
- ext4_truncate_failed_write(inode);
+static bool ext4_iomap_is_delalloc(struct inode *inode,
+ struct ext4_map_blocks *map)
+{
+ struct extent_status es;
+ ext4_lblk_t offset = 0, end = map->m_lblk + map->m_len - 1;
- /* Handle extending of i_size after direct IO write */
- if (orphan) {
- int err;
+ ext4_es_find_extent_range(inode, &ext4_es_is_delayed,
+ map->m_lblk, end, &es);
- /* Credits for sb + inode write */
- handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
- if (IS_ERR(handle)) {
- /*
- * We wrote the data but cannot extend
- * i_size. Bail out. In async io case, we do
- * not return error here because we have
- * already submmitted the corresponding
- * bio. Returning error here makes the caller
- * think that this IO is done and failed
- * resulting in race with bio's completion
- * handler.
- */
- if (!ret)
- ret = PTR_ERR(handle);
- if (inode->i_nlink)
- ext4_orphan_del(NULL, inode);
+ if (!es.es_len || es.es_lblk > end)
+ return false;
- goto out;
- }
- if (inode->i_nlink)
- ext4_orphan_del(handle, inode);
- if (ret > 0) {
- loff_t end = offset + ret;
- if (end > inode->i_size || end > ei->i_disksize) {
- ext4_update_i_disksize(inode, end);
- if (end > inode->i_size)
- i_size_write(inode, end);
- /*
- * We're going to return a positive `ret'
- * here due to non-zero-length I/O, so there's
- * no way of reporting error returns from
- * ext4_mark_inode_dirty() to userspace. So
- * ignore it.
- */
- ext4_mark_inode_dirty(handle, inode);
- }
- }
- err = ext4_journal_stop(handle);
- if (ret == 0)
- ret = err;
+ if (es.es_lblk > map->m_lblk) {
+ map->m_len = es.es_lblk - map->m_lblk;
+ return false;
}
-out:
- return ret;
-}
-static ssize_t ext4_direct_IO_read(struct kiocb *iocb, struct iov_iter *iter)
-{
- struct address_space *mapping = iocb->ki_filp->f_mapping;
- struct inode *inode = mapping->host;
- size_t count = iov_iter_count(iter);
- ssize_t ret;
+ offset = map->m_lblk - es.es_lblk;
+ map->m_len = es.es_len - offset;
- /*
- * Shared inode_lock is enough for us - it protects against concurrent
- * writes & truncates and since we take care of writing back page cache,
- * we are protected against page writeback as well.
- */
- inode_lock_shared(inode);
- ret = filemap_write_and_wait_range(mapping, iocb->ki_pos,
- iocb->ki_pos + count - 1);
- if (ret)
- goto out_unlock;
- ret = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev,
- iter, ext4_dio_get_block, NULL, NULL, 0);
-out_unlock:
- inode_unlock_shared(inode);
- return ret;
+ return true;
}
-static ssize_t ext4_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
+static int ext4_iomap_begin_report(struct inode *inode, loff_t offset,
+ loff_t length, unsigned int flags,
+ struct iomap *iomap, struct iomap *srcmap)
{
- struct file *file = iocb->ki_filp;
- struct inode *inode = file->f_mapping->host;
- size_t count = iov_iter_count(iter);
- loff_t offset = iocb->ki_pos;
- ssize_t ret;
+ int ret;
+ bool delalloc = false;
+ struct ext4_map_blocks map;
+ u8 blkbits = inode->i_blkbits;
-#ifdef CONFIG_FS_ENCRYPTION
- if (IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode))
- return 0;
-#endif
- if (fsverity_active(inode))
- return 0;
+ if ((offset >> blkbits) > EXT4_MAX_LOGICAL_BLOCK)
+ return -EINVAL;
+
+ if (ext4_has_inline_data(inode)) {
+ ret = ext4_inline_data_iomap(inode, iomap);
+ if (ret != -EAGAIN) {
+ if (ret == 0 && offset >= iomap->length)
+ ret = -ENOENT;
+ return ret;
+ }
+ }
/*
- * If we are doing data journalling we don't support O_DIRECT
+ * Calculate the first and last logical block respectively.
*/
- if (ext4_should_journal_data(inode))
- return 0;
+ map.m_lblk = offset >> blkbits;
+ map.m_len = min_t(loff_t, (offset + length - 1) >> blkbits,
+ EXT4_MAX_LOGICAL_BLOCK) - map.m_lblk + 1;
- /* Let buffer I/O handle the inline data case. */
- if (ext4_has_inline_data(inode))
- return 0;
+ ret = ext4_map_blocks(NULL, inode, &map, 0);
+ if (ret < 0)
+ return ret;
+ if (ret == 0)
+ delalloc = ext4_iomap_is_delalloc(inode, &map);
- trace_ext4_direct_IO_enter(inode, offset, count, iov_iter_rw(iter));
- if (iov_iter_rw(iter) == READ)
- ret = ext4_direct_IO_read(iocb, iter);
- else
- ret = ext4_direct_IO_write(iocb, iter);
- trace_ext4_direct_IO_exit(inode, offset, count, iov_iter_rw(iter), ret);
- return ret;
+ ext4_set_iomap(inode, iomap, &map, offset, length);
+ if (delalloc && iomap->type == IOMAP_HOLE)
+ iomap->type = IOMAP_DELALLOC;
+
+ return 0;
}
+const struct iomap_ops ext4_iomap_report_ops = {
+ .iomap_begin = ext4_iomap_begin_report,
+};
+
/*
* Pages can be marked dirty completely asynchronously from ext4's journalling
* activity. By filemap_sync_pte(), try_to_unmap_one(), etc. We cannot do
@@ -3910,7 +3574,7 @@ static const struct address_space_operations ext4_aops = {
.bmap = ext4_bmap,
.invalidatepage = ext4_invalidatepage,
.releasepage = ext4_releasepage,
- .direct_IO = ext4_direct_IO,
+ .direct_IO = noop_direct_IO,
.migratepage = buffer_migrate_page,
.is_partially_uptodate = block_is_partially_uptodate,
.error_remove_page = generic_error_remove_page,
@@ -3927,7 +3591,7 @@ static const struct address_space_operations ext4_journalled_aops = {
.bmap = ext4_bmap,
.invalidatepage = ext4_journalled_invalidatepage,
.releasepage = ext4_releasepage,
- .direct_IO = ext4_direct_IO,
+ .direct_IO = noop_direct_IO,
.is_partially_uptodate = block_is_partially_uptodate,
.error_remove_page = generic_error_remove_page,
};
@@ -3943,7 +3607,7 @@ static const struct address_space_operations ext4_da_aops = {
.bmap = ext4_bmap,
.invalidatepage = ext4_invalidatepage,
.releasepage = ext4_releasepage,
- .direct_IO = ext4_direct_IO,
+ .direct_IO = noop_direct_IO,
.migratepage = buffer_migrate_page,
.is_partially_uptodate = block_is_partially_uptodate,
.error_remove_page = generic_error_remove_page,
@@ -5450,11 +5114,15 @@ static void ext4_wait_for_tail_page_commit(struct inode *inode)
offset = inode->i_size & (PAGE_SIZE - 1);
/*
- * All buffers in the last page remain valid? Then there's nothing to
- * do. We do the check mainly to optimize the common PAGE_SIZE ==
- * blocksize case
+ * If the page is fully truncated, we don't need to wait for any commit
+ * (and we even should not as __ext4_journalled_invalidatepage() may
+ * strip all buffers from the page but keep the page dirty which can then
+ * confuse e.g. concurrent ext4_writepage() seeing dirty page without
+ * buffers). Also we don't need to wait for any commit if all buffers in
+ * the page remain valid. This is most beneficial for the common case of
+ * blocksize == PAGESIZE.
*/
- if (offset > PAGE_SIZE - i_blocksize(inode))
+ if (!offset || offset > (PAGE_SIZE - i_blocksize(inode)))
return;
while (1) {
page = find_lock_page(inode->i_mapping,
@@ -5915,8 +5583,23 @@ static int __ext4_expand_extra_isize(struct inode *inode,
{
struct ext4_inode *raw_inode;
struct ext4_xattr_ibody_header *header;
+ unsigned int inode_size = EXT4_INODE_SIZE(inode->i_sb);
+ struct ext4_inode_info *ei = EXT4_I(inode);
int error;
+ /* this was checked at iget time, but double check for good measure */
+ if ((EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize > inode_size) ||
+ (ei->i_extra_isize & 3)) {
+ EXT4_ERROR_INODE(inode, "bad extra_isize %u (inode size %u)",
+ ei->i_extra_isize,
+ EXT4_INODE_SIZE(inode->i_sb));
+ return -EFSCORRUPTED;
+ }
+ if ((new_extra_isize < ei->i_extra_isize) ||
+ (new_extra_isize < 4) ||
+ (new_extra_isize > inode_size - EXT4_GOOD_OLD_INODE_SIZE))
+ return -EINVAL; /* Should never happen */
+
raw_inode = ext4_raw_inode(iloc);
header = IHDR(inode, raw_inode);
@@ -5968,9 +5651,8 @@ static int ext4_try_to_expand_extra_isize(struct inode *inode,
* If this is felt to be critical, then e2fsck should be run to
* force a large enough s_min_extra_isize.
*/
- if (ext4_handle_valid(handle) &&
- jbd2_journal_extend(handle,
- EXT4_DATA_TRANS_BLOCKS(inode->i_sb)) != 0)
+ if (ext4_journal_extend(handle,
+ EXT4_DATA_TRANS_BLOCKS(inode->i_sb), 0) != 0)
return -ENOSPC;
if (ext4_write_trylock_xattr(inode, &no_expand) == 0)
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index 0b7f316fd30f..e8870fff8224 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -1360,6 +1360,7 @@ long ext4_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
}
case EXT4_IOC_MOVE_EXT:
case EXT4_IOC_RESIZE_FS:
+ case FITRIM:
case EXT4_IOC_PRECACHE_EXTENTS:
case EXT4_IOC_SET_ENCRYPTION_POLICY:
case EXT4_IOC_GET_ENCRYPTION_PWSALT:
diff --git a/fs/ext4/migrate.c b/fs/ext4/migrate.c
index b1e4d359f73b..89725fa42573 100644
--- a/fs/ext4/migrate.c
+++ b/fs/ext4/migrate.c
@@ -50,29 +50,9 @@ static int finish_range(handle_t *handle, struct inode *inode,
needed = ext4_ext_calc_credits_for_single_extent(inode,
lb->last_block - lb->first_block + 1, path);
- /*
- * Make sure the credit we accumalated is not really high
- */
- if (needed && ext4_handle_has_enough_credits(handle,
- EXT4_RESERVE_TRANS_BLOCKS)) {
- up_write((&EXT4_I(inode)->i_data_sem));
- retval = ext4_journal_restart(handle, needed);
- down_write((&EXT4_I(inode)->i_data_sem));
- if (retval)
- goto err_out;
- } else if (needed) {
- retval = ext4_journal_extend(handle, needed);
- if (retval) {
- /*
- * IF not able to extend the journal restart the journal
- */
- up_write((&EXT4_I(inode)->i_data_sem));
- retval = ext4_journal_restart(handle, needed);
- down_write((&EXT4_I(inode)->i_data_sem));
- if (retval)
- goto err_out;
- }
- }
+ retval = ext4_datasem_ensure_credits(handle, inode, needed, needed, 0);
+ if (retval < 0)
+ goto err_out;
retval = ext4_ext_insert_extent(handle, inode, &path, &newext, 0);
err_out:
up_write((&EXT4_I(inode)->i_data_sem));
@@ -196,42 +176,30 @@ static int update_tind_extent_range(handle_t *handle, struct inode *inode,
}
-static int extend_credit_for_blkdel(handle_t *handle, struct inode *inode)
-{
- int retval = 0, needed;
-
- if (ext4_handle_has_enough_credits(handle, EXT4_RESERVE_TRANS_BLOCKS+1))
- return 0;
- /*
- * We are freeing a blocks. During this we touch
- * superblock, group descriptor and block bitmap.
- * So allocate a credit of 3. We may update
- * quota (user and group).
- */
- needed = 3 + EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb);
-
- if (ext4_journal_extend(handle, needed) != 0)
- retval = ext4_journal_restart(handle, needed);
-
- return retval;
-}
-
static int free_dind_blocks(handle_t *handle,
struct inode *inode, __le32 i_data)
{
int i;
__le32 *tmp_idata;
struct buffer_head *bh;
+ struct super_block *sb = inode->i_sb;
unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
+ int err;
- bh = ext4_sb_bread(inode->i_sb, le32_to_cpu(i_data), 0);
+ bh = ext4_sb_bread(sb, le32_to_cpu(i_data), 0);
if (IS_ERR(bh))
return PTR_ERR(bh);
tmp_idata = (__le32 *)bh->b_data;
for (i = 0; i < max_entries; i++) {
if (tmp_idata[i]) {
- extend_credit_for_blkdel(handle, inode);
+ err = ext4_journal_ensure_credits(handle,
+ EXT4_RESERVE_TRANS_BLOCKS,
+ ext4_free_metadata_revoke_credits(sb, 1));
+ if (err < 0) {
+ put_bh(bh);
+ return err;
+ }
ext4_free_blocks(handle, inode, NULL,
le32_to_cpu(tmp_idata[i]), 1,
EXT4_FREE_BLOCKS_METADATA |
@@ -239,7 +207,10 @@ static int free_dind_blocks(handle_t *handle,
}
}
put_bh(bh);
- extend_credit_for_blkdel(handle, inode);
+ err = ext4_journal_ensure_credits(handle, EXT4_RESERVE_TRANS_BLOCKS,
+ ext4_free_metadata_revoke_credits(sb, 1));
+ if (err < 0)
+ return err;
ext4_free_blocks(handle, inode, NULL, le32_to_cpu(i_data), 1,
EXT4_FREE_BLOCKS_METADATA |
EXT4_FREE_BLOCKS_FORGET);
@@ -270,7 +241,10 @@ static int free_tind_blocks(handle_t *handle,
}
}
put_bh(bh);
- extend_credit_for_blkdel(handle, inode);
+ retval = ext4_journal_ensure_credits(handle, EXT4_RESERVE_TRANS_BLOCKS,
+ ext4_free_metadata_revoke_credits(inode->i_sb, 1));
+ if (retval < 0)
+ return retval;
ext4_free_blocks(handle, inode, NULL, le32_to_cpu(i_data), 1,
EXT4_FREE_BLOCKS_METADATA |
EXT4_FREE_BLOCKS_FORGET);
@@ -283,7 +257,11 @@ static int free_ind_block(handle_t *handle, struct inode *inode, __le32 *i_data)
/* ei->i_data[EXT4_IND_BLOCK] */
if (i_data[0]) {
- extend_credit_for_blkdel(handle, inode);
+ retval = ext4_journal_ensure_credits(handle,
+ EXT4_RESERVE_TRANS_BLOCKS,
+ ext4_free_metadata_revoke_credits(inode->i_sb, 1));
+ if (retval < 0)
+ return retval;
ext4_free_blocks(handle, inode, NULL,
le32_to_cpu(i_data[0]), 1,
EXT4_FREE_BLOCKS_METADATA |
@@ -318,12 +296,9 @@ static int ext4_ext_swap_inode_data(handle_t *handle, struct inode *inode,
* One credit accounted for writing the
* i_data field of the original inode
*/
- retval = ext4_journal_extend(handle, 1);
- if (retval) {
- retval = ext4_journal_restart(handle, 1);
- if (retval)
- goto err_out;
- }
+ retval = ext4_journal_ensure_credits(handle, 1, 0);
+ if (retval < 0)
+ goto err_out;
i_data[0] = ei->i_data[EXT4_IND_BLOCK];
i_data[1] = ei->i_data[EXT4_DIND_BLOCK];
@@ -391,15 +366,20 @@ static int free_ext_idx(handle_t *handle, struct inode *inode,
ix = EXT_FIRST_INDEX(eh);
for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ix++) {
retval = free_ext_idx(handle, inode, ix);
- if (retval)
- break;
+ if (retval) {
+ put_bh(bh);
+ return retval;
+ }
}
}
put_bh(bh);
- extend_credit_for_blkdel(handle, inode);
+ retval = ext4_journal_ensure_credits(handle, EXT4_RESERVE_TRANS_BLOCKS,
+ ext4_free_metadata_revoke_credits(inode->i_sb, 1));
+ if (retval < 0)
+ return retval;
ext4_free_blocks(handle, inode, NULL, block, 1,
EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
- return retval;
+ return 0;
}
/*
@@ -574,9 +554,9 @@ err_out:
}
/* We mark the tmp_inode dirty via ext4_ext_tree_init. */
- if (ext4_journal_extend(handle, 1) != 0)
- ext4_journal_restart(handle, 1);
-
+ retval = ext4_journal_ensure_credits(handle, 1, 0);
+ if (retval < 0)
+ goto out_stop;
/*
* Mark the tmp_inode as of size zero
*/
@@ -594,6 +574,7 @@ err_out:
/* Reset the extent details */
ext4_ext_tree_init(handle, tmp_inode);
+out_stop:
ext4_journal_stop(handle);
out:
unlock_new_inode(tmp_inode);
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index a427d2031a8d..a856997d87b5 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -2547,18 +2547,29 @@ static void ext4_dec_count(handle_t *handle, struct inode *inode)
}
+/*
+ * Add non-directory inode to a directory. On success, the inode reference is
+ * consumed by dentry is instantiation. This is also indicated by clearing of
+ * *inodep pointer. On failure, the caller is responsible for dropping the
+ * inode reference in the safe context.
+ */
static int ext4_add_nondir(handle_t *handle,
- struct dentry *dentry, struct inode *inode)
+ struct dentry *dentry, struct inode **inodep)
{
+ struct inode *dir = d_inode(dentry->d_parent);
+ struct inode *inode = *inodep;
int err = ext4_add_entry(handle, dentry, inode);
if (!err) {
ext4_mark_inode_dirty(handle, inode);
+ if (IS_DIRSYNC(dir))
+ ext4_handle_sync(handle);
d_instantiate_new(dentry, inode);
+ *inodep = NULL;
return 0;
}
drop_nlink(inode);
+ ext4_orphan_add(handle, inode);
unlock_new_inode(inode);
- iput(inode);
return err;
}
@@ -2592,12 +2603,12 @@ retry:
inode->i_op = &ext4_file_inode_operations;
inode->i_fop = &ext4_file_operations;
ext4_set_aops(inode);
- err = ext4_add_nondir(handle, dentry, inode);
- if (!err && IS_DIRSYNC(dir))
- ext4_handle_sync(handle);
+ err = ext4_add_nondir(handle, dentry, &inode);
}
if (handle)
ext4_journal_stop(handle);
+ if (!IS_ERR_OR_NULL(inode))
+ iput(inode);
if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries))
goto retry;
return err;
@@ -2624,12 +2635,12 @@ retry:
if (!IS_ERR(inode)) {
init_special_inode(inode, inode->i_mode, rdev);
inode->i_op = &ext4_special_inode_operations;
- err = ext4_add_nondir(handle, dentry, inode);
- if (!err && IS_DIRSYNC(dir))
- ext4_handle_sync(handle);
+ err = ext4_add_nondir(handle, dentry, &inode);
}
if (handle)
ext4_journal_stop(handle);
+ if (!IS_ERR_OR_NULL(inode))
+ iput(inode);
if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries))
goto retry;
return err;
@@ -2779,10 +2790,12 @@ retry:
if (err) {
out_clear_inode:
clear_nlink(inode);
+ ext4_orphan_add(handle, inode);
unlock_new_inode(inode);
ext4_mark_inode_dirty(handle, inode);
+ ext4_journal_stop(handle);
iput(inode);
- goto out_stop;
+ goto out_retry;
}
ext4_inc_count(handle, dir);
ext4_update_dx_flag(dir);
@@ -2796,6 +2809,7 @@ out_clear_inode:
out_stop:
if (handle)
ext4_journal_stop(handle);
+out_retry:
if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries))
goto retry;
return err;
@@ -3182,18 +3196,17 @@ static int ext4_unlink(struct inode *dir, struct dentry *dentry)
if (IS_DIRSYNC(dir))
ext4_handle_sync(handle);
- if (inode->i_nlink == 0) {
- ext4_warning_inode(inode, "Deleting file '%.*s' with no links",
- dentry->d_name.len, dentry->d_name.name);
- set_nlink(inode, 1);
- }
retval = ext4_delete_entry(handle, dir, de, bh);
if (retval)
goto end_unlink;
dir->i_ctime = dir->i_mtime = current_time(dir);
ext4_update_dx_flag(dir);
ext4_mark_inode_dirty(handle, dir);
- drop_nlink(inode);
+ if (inode->i_nlink == 0)
+ ext4_warning_inode(inode, "Deleting file '%.*s' with no links",
+ dentry->d_name.len, dentry->d_name.name);
+ else
+ drop_nlink(inode);
if (!inode->i_nlink)
ext4_orphan_add(handle, inode);
inode->i_ctime = current_time(inode);
@@ -3328,12 +3341,11 @@ static int ext4_symlink(struct inode *dir,
inode->i_size = disk_link.len - 1;
}
EXT4_I(inode)->i_disksize = inode->i_size;
- err = ext4_add_nondir(handle, dentry, inode);
- if (!err && IS_DIRSYNC(dir))
- ext4_handle_sync(handle);
-
+ err = ext4_add_nondir(handle, dentry, &inode);
if (handle)
ext4_journal_stop(handle);
+ if (inode)
+ iput(inode);
goto out_free_encrypted_link;
err_drop_inode:
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
index 12ceadef32c5..24aeedb8fc75 100644
--- a/fs/ext4/page-io.c
+++ b/fs/ext4/page-io.c
@@ -31,18 +31,56 @@
#include "acl.h"
static struct kmem_cache *io_end_cachep;
+static struct kmem_cache *io_end_vec_cachep;
int __init ext4_init_pageio(void)
{
io_end_cachep = KMEM_CACHE(ext4_io_end, SLAB_RECLAIM_ACCOUNT);
if (io_end_cachep == NULL)
return -ENOMEM;
+
+ io_end_vec_cachep = KMEM_CACHE(ext4_io_end_vec, 0);
+ if (io_end_vec_cachep == NULL) {
+ kmem_cache_destroy(io_end_cachep);
+ return -ENOMEM;
+ }
return 0;
}
void ext4_exit_pageio(void)
{
kmem_cache_destroy(io_end_cachep);
+ kmem_cache_destroy(io_end_vec_cachep);
+}
+
+struct ext4_io_end_vec *ext4_alloc_io_end_vec(ext4_io_end_t *io_end)
+{
+ struct ext4_io_end_vec *io_end_vec;
+
+ io_end_vec = kmem_cache_zalloc(io_end_vec_cachep, GFP_NOFS);
+ if (!io_end_vec)
+ return ERR_PTR(-ENOMEM);
+ INIT_LIST_HEAD(&io_end_vec->list);
+ list_add_tail(&io_end_vec->list, &io_end->list_vec);
+ return io_end_vec;
+}
+
+static void ext4_free_io_end_vec(ext4_io_end_t *io_end)
+{
+ struct ext4_io_end_vec *io_end_vec, *tmp;
+
+ if (list_empty(&io_end->list_vec))
+ return;
+ list_for_each_entry_safe(io_end_vec, tmp, &io_end->list_vec, list) {
+ list_del(&io_end_vec->list);
+ kmem_cache_free(io_end_vec_cachep, io_end_vec);
+ }
+}
+
+struct ext4_io_end_vec *ext4_last_io_end_vec(ext4_io_end_t *io_end)
+{
+ BUG_ON(list_empty(&io_end->list_vec));
+ return list_last_entry(&io_end->list_vec, struct ext4_io_end_vec, list);
}
/*
@@ -125,6 +163,7 @@ static void ext4_release_io_end(ext4_io_end_t *io_end)
ext4_finish_bio(bio);
bio_put(bio);
}
+ ext4_free_io_end_vec(io_end);
kmem_cache_free(io_end_cachep, io_end);
}
@@ -136,29 +175,26 @@ static void ext4_release_io_end(ext4_io_end_t *io_end)
* cannot get to ext4_ext_truncate() before all IOs overlapping that range are
* completed (happens from ext4_free_ioend()).
*/
-static int ext4_end_io(ext4_io_end_t *io)
+static int ext4_end_io_end(ext4_io_end_t *io_end)
{
- struct inode *inode = io->inode;
- loff_t offset = io->offset;
- ssize_t size = io->size;
- handle_t *handle = io->handle;
+ struct inode *inode = io_end->inode;
+ handle_t *handle = io_end->handle;
int ret = 0;
- ext4_debug("ext4_end_io_nolock: io 0x%p from inode %lu,list->next 0x%p,"
+ ext4_debug("ext4_end_io_nolock: io_end 0x%p from inode %lu,list->next 0x%p,"
"list->prev 0x%p\n",
- io, inode->i_ino, io->list.next, io->list.prev);
+ io_end, inode->i_ino, io_end->list.next, io_end->list.prev);
- io->handle = NULL; /* Following call will use up the handle */
- ret = ext4_convert_unwritten_extents(handle, inode, offset, size);
+ io_end->handle = NULL; /* Following call will use up the handle */
+ ret = ext4_convert_unwritten_io_end_vec(handle, io_end);
if (ret < 0 && !ext4_forced_shutdown(EXT4_SB(inode->i_sb))) {
ext4_msg(inode->i_sb, KERN_EMERG,
"failed to convert unwritten extents to written "
"extents -- potential data loss! "
- "(inode %lu, offset %llu, size %zd, error %d)",
- inode->i_ino, offset, size, ret);
+ "(inode %lu, error %d)", inode->i_ino, ret);
}
- ext4_clear_io_unwritten_flag(io);
- ext4_release_io_end(io);
+ ext4_clear_io_unwritten_flag(io_end);
+ ext4_release_io_end(io_end);
return ret;
}
@@ -166,21 +202,21 @@ static void dump_completed_IO(struct inode *inode, struct list_head *head)
{
#ifdef EXT4FS_DEBUG
struct list_head *cur, *before, *after;
- ext4_io_end_t *io, *io0, *io1;
+ ext4_io_end_t *io_end, *io_end0, *io_end1;
if (list_empty(head))
return;
ext4_debug("Dump inode %lu completed io list\n", inode->i_ino);
- list_for_each_entry(io, head, list) {
- cur = &io->list;
+ list_for_each_entry(io_end, head, list) {
+ cur = &io_end->list;
before = cur->prev;
- io0 = container_of(before, ext4_io_end_t, list);
+ io_end0 = container_of(before, ext4_io_end_t, list);
after = cur->next;
- io1 = container_of(after, ext4_io_end_t, list);
+ io_end1 = container_of(after, ext4_io_end_t, list);
ext4_debug("io 0x%p from inode %lu,prev 0x%p,next 0x%p\n",
- io, inode->i_ino, io0, io1);
+ io_end, inode->i_ino, io_end0, io_end1);
}
#endif
}
@@ -207,7 +243,7 @@ static void ext4_add_complete_io(ext4_io_end_t *io_end)
static int ext4_do_flush_completed_IO(struct inode *inode,
struct list_head *head)
{
- ext4_io_end_t *io;
+ ext4_io_end_t *io_end;
struct list_head unwritten;
unsigned long flags;
struct ext4_inode_info *ei = EXT4_I(inode);
@@ -219,11 +255,11 @@ static int ext4_do_flush_completed_IO(struct inode *inode,
spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
while (!list_empty(&unwritten)) {
- io = list_entry(unwritten.next, ext4_io_end_t, list);
- BUG_ON(!(io->flag & EXT4_IO_END_UNWRITTEN));
- list_del_init(&io->list);
+ io_end = list_entry(unwritten.next, ext4_io_end_t, list);
+ BUG_ON(!(io_end->flag & EXT4_IO_END_UNWRITTEN));
+ list_del_init(&io_end->list);
- err = ext4_end_io(io);
+ err = ext4_end_io_end(io_end);
if (unlikely(!ret && err))
ret = err;
}
@@ -242,19 +278,22 @@ void ext4_end_io_rsv_work(struct work_struct *work)
ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags)
{
- ext4_io_end_t *io = kmem_cache_zalloc(io_end_cachep, flags);
- if (io) {
- io->inode = inode;
- INIT_LIST_HEAD(&io->list);
- atomic_set(&io->count, 1);
+ ext4_io_end_t *io_end = kmem_cache_zalloc(io_end_cachep, flags);
+
+ if (io_end) {
+ io_end->inode = inode;
+ INIT_LIST_HEAD(&io_end->list);
+ INIT_LIST_HEAD(&io_end->list_vec);
+ atomic_set(&io_end->count, 1);
}
- return io;
+ return io_end;
}
void ext4_put_io_end_defer(ext4_io_end_t *io_end)
{
if (atomic_dec_and_test(&io_end->count)) {
- if (!(io_end->flag & EXT4_IO_END_UNWRITTEN) || !io_end->size) {
+ if (!(io_end->flag & EXT4_IO_END_UNWRITTEN) ||
+ list_empty(&io_end->list_vec)) {
ext4_release_io_end(io_end);
return;
}
@@ -268,9 +307,8 @@ int ext4_put_io_end(ext4_io_end_t *io_end)
if (atomic_dec_and_test(&io_end->count)) {
if (io_end->flag & EXT4_IO_END_UNWRITTEN) {
- err = ext4_convert_unwritten_extents(io_end->handle,
- io_end->inode, io_end->offset,
- io_end->size);
+ err = ext4_convert_unwritten_io_end_vec(io_end->handle,
+ io_end);
io_end->handle = NULL;
ext4_clear_io_unwritten_flag(io_end);
}
@@ -307,10 +345,8 @@ static void ext4_end_bio(struct bio *bio)
struct inode *inode = io_end->inode;
ext4_warning(inode->i_sb, "I/O error %d writing to inode %lu "
- "(offset %llu size %ld starting block %llu)",
+ "starting block %llu)",
bio->bi_status, inode->i_ino,
- (unsigned long long) io_end->offset,
- (long) io_end->size,
(unsigned long long)
bi_sector >> (inode->i_blkbits - 9));
mapping_set_error(inode->i_mapping,
@@ -358,14 +394,16 @@ void ext4_io_submit_init(struct ext4_io_submit *io,
io->io_end = NULL;
}
-static int io_submit_init_bio(struct ext4_io_submit *io,
- struct buffer_head *bh)
+static void io_submit_init_bio(struct ext4_io_submit *io,
+ struct buffer_head *bh)
{
struct bio *bio;
+ /*
+ * bio_alloc will _always_ be able to allocate a bio if
+ * __GFP_DIRECT_RECLAIM is set, see comments for bio_alloc_bioset().
+ */
bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES);
- if (!bio)
- return -ENOMEM;
bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
bio_set_dev(bio, bh->b_bdev);
bio->bi_end_io = ext4_end_bio;
@@ -373,13 +411,12 @@ static int io_submit_init_bio(struct ext4_io_submit *io,
io->io_bio = bio;
io->io_next_block = bh->b_blocknr;
wbc_init_bio(io->io_wbc, bio);
- return 0;
}
-static int io_submit_add_bh(struct ext4_io_submit *io,
- struct inode *inode,
- struct page *page,
- struct buffer_head *bh)
+static void io_submit_add_bh(struct ext4_io_submit *io,
+ struct inode *inode,
+ struct page *page,
+ struct buffer_head *bh)
{
int ret;
@@ -388,9 +425,7 @@ submit_and_retry:
ext4_io_submit(io);
}
if (io->io_bio == NULL) {
- ret = io_submit_init_bio(io, bh);
- if (ret)
- return ret;
+ io_submit_init_bio(io, bh);
io->io_bio->bi_write_hint = inode->i_write_hint;
}
ret = bio_add_page(io->io_bio, page, bh->b_size, bh_offset(bh));
@@ -398,7 +433,6 @@ submit_and_retry:
goto submit_and_retry;
wbc_account_cgroup_owner(io->io_wbc, page, bh->b_size);
io->io_next_block++;
- return 0;
}
int ext4_bio_write_page(struct ext4_io_submit *io,
@@ -491,8 +525,14 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
gfp_flags |= __GFP_NOFAIL;
goto retry_encrypt;
}
- bounce_page = NULL;
- goto out;
+
+ printk_ratelimited(KERN_ERR "%s: ret = %d\n", __func__, ret);
+ redirty_page_for_writepage(wbc, page);
+ do {
+ clear_buffer_async_write(bh);
+ bh = bh->b_this_page;
+ } while (bh != head);
+ goto unlock;
}
}
@@ -500,30 +540,13 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
do {
if (!buffer_async_write(bh))
continue;
- ret = io_submit_add_bh(io, inode, bounce_page ?: page, bh);
- if (ret) {
- /*
- * We only get here on ENOMEM. Not much else
- * we can do but mark the page as dirty, and
- * better luck next time.
- */
- break;
- }
+ io_submit_add_bh(io, inode,
+ bounce_page ? bounce_page : page, bh);
nr_submitted++;
clear_buffer_dirty(bh);
} while ((bh = bh->b_this_page) != head);
- /* Error stopped previous loop? Clean up buffers... */
- if (ret) {
- out:
- fscrypt_free_bounce_page(bounce_page);
- printk_ratelimited(KERN_ERR "%s: ret = %d\n", __func__, ret);
- redirty_page_for_writepage(wbc, page);
- do {
- clear_buffer_async_write(bh);
- bh = bh->b_this_page;
- } while (bh != head);
- }
+unlock:
unlock_page(page);
/* Nothing submitted - we have to end page writeback */
if (!nr_submitted)
diff --git a/fs/ext4/readpage.c b/fs/ext4/readpage.c
index a30b203fa461..fef7755300c3 100644
--- a/fs/ext4/readpage.c
+++ b/fs/ext4/readpage.c
@@ -360,10 +360,12 @@ int ext4_mpage_readpages(struct address_space *mapping,
if (bio == NULL) {
struct bio_post_read_ctx *ctx;
+ /*
+ * bio_alloc will _always_ be able to allocate a bio if
+ * __GFP_DIRECT_RECLAIM is set, see bio_alloc_bioset().
+ */
bio = bio_alloc(GFP_KERNEL,
min_t(int, nr_pages, BIO_MAX_PAGES));
- if (!bio)
- goto set_error_page;
ctx = get_bio_post_read_ctx(inode, bio, page->index);
if (IS_ERR(ctx)) {
bio_put(bio);
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
index c0e9aef376a7..a8c0f2b5b6e1 100644
--- a/fs/ext4/resize.c
+++ b/fs/ext4/resize.c
@@ -388,28 +388,10 @@ static struct buffer_head *bclean(handle_t *handle, struct super_block *sb,
return bh;
}
-/*
- * If we have fewer than thresh credits, extend by EXT4_MAX_TRANS_DATA.
- * If that fails, restart the transaction & regain write access for the
- * buffer head which is used for block_bitmap modifications.
- */
-static int extend_or_restart_transaction(handle_t *handle, int thresh)
+static int ext4_resize_ensure_credits_batch(handle_t *handle, int credits)
{
- int err;
-
- if (ext4_handle_has_enough_credits(handle, thresh))
- return 0;
-
- err = ext4_journal_extend(handle, EXT4_MAX_TRANS_DATA);
- if (err < 0)
- return err;
- if (err) {
- err = ext4_journal_restart(handle, EXT4_MAX_TRANS_DATA);
- if (err)
- return err;
- }
-
- return 0;
+ return ext4_journal_ensure_credits_fn(handle, credits,
+ EXT4_MAX_TRANS_DATA, 0, 0);
}
/*
@@ -451,8 +433,8 @@ static int set_flexbg_block_bitmap(struct super_block *sb, handle_t *handle,
continue;
}
- err = extend_or_restart_transaction(handle, 1);
- if (err)
+ err = ext4_resize_ensure_credits_batch(handle, 1);
+ if (err < 0)
return err;
bh = sb_getblk(sb, flex_gd->groups[group].block_bitmap);
@@ -544,8 +526,8 @@ static int setup_new_flex_group_blocks(struct super_block *sb,
struct buffer_head *gdb;
ext4_debug("update backup group %#04llx\n", block);
- err = extend_or_restart_transaction(handle, 1);
- if (err)
+ err = ext4_resize_ensure_credits_batch(handle, 1);
+ if (err < 0)
goto out;
gdb = sb_getblk(sb, block);
@@ -602,8 +584,8 @@ handle_bb:
/* Initialize block bitmap of the @group */
block = group_data[i].block_bitmap;
- err = extend_or_restart_transaction(handle, 1);
- if (err)
+ err = ext4_resize_ensure_credits_batch(handle, 1);
+ if (err < 0)
goto out;
bh = bclean(handle, sb, block);
@@ -631,8 +613,8 @@ handle_ib:
/* Initialize inode bitmap of the @group */
block = group_data[i].inode_bitmap;
- err = extend_or_restart_transaction(handle, 1);
- if (err)
+ err = ext4_resize_ensure_credits_batch(handle, 1);
+ if (err < 0)
goto out;
/* Mark unused entries in inode bitmap used */
bh = bclean(handle, sb, block);
@@ -1109,10 +1091,8 @@ static void update_backups(struct super_block *sb, sector_t blk_off, char *data,
ext4_fsblk_t backup_block;
/* Out of journal space, and can't get more - abort - so sad */
- if (ext4_handle_valid(handle) &&
- handle->h_buffer_credits == 0 &&
- ext4_journal_extend(handle, EXT4_MAX_TRANS_DATA) &&
- (err = ext4_journal_restart(handle, EXT4_MAX_TRANS_DATA)))
+ err = ext4_resize_ensure_credits_batch(handle, 1);
+ if (err < 0)
break;
if (meta_bg == 0)
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index b3cbf8622eab..1d82b56d9b11 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -1172,9 +1172,9 @@ void ext4_clear_inode(struct inode *inode)
{
invalidate_inode_buffers(inode);
clear_inode(inode);
- dquot_drop(inode);
ext4_discard_preallocations(inode);
ext4_es_remove_extent(inode, 0, EXT_MAX_BLOCKS);
+ dquot_drop(inode);
if (EXT4_I(inode)->jinode) {
jbd2_journal_release_jbd_inode(EXT4_JOURNAL(inode),
EXT4_I(inode)->jinode);
@@ -1388,7 +1388,6 @@ static ssize_t ext4_quota_write(struct super_block *sb, int type,
static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
unsigned int flags);
static int ext4_enable_quotas(struct super_block *sb);
-static int ext4_get_next_id(struct super_block *sb, struct kqid *qid);
static struct dquot **ext4_get_dquots(struct inode *inode)
{
@@ -1406,7 +1405,7 @@ static const struct dquot_operations ext4_quota_operations = {
.destroy_dquot = dquot_destroy,
.get_projid = ext4_get_projid,
.get_inode_usage = ext4_get_inode_usage,
- .get_next_id = ext4_get_next_id,
+ .get_next_id = dquot_get_next_id,
};
static const struct quotactl_ops ext4_qctl_operations = {
@@ -2065,7 +2064,7 @@ static int parse_options(char *options, struct super_block *sb,
unsigned int *journal_ioprio,
int is_remount)
{
- struct ext4_sb_info *sbi = EXT4_SB(sb);
+ struct ext4_sb_info __maybe_unused *sbi = EXT4_SB(sb);
char *p, __maybe_unused *usr_qf_name, __maybe_unused *grp_qf_name;
substring_t args[MAX_OPT_ARGS];
int token;
@@ -2119,16 +2118,6 @@ static int parse_options(char *options, struct super_block *sb,
}
}
#endif
- if (test_opt(sb, DIOREAD_NOLOCK)) {
- int blocksize =
- BLOCK_SIZE << le32_to_cpu(sbi->s_es->s_log_block_size);
-
- if (blocksize < PAGE_SIZE) {
- ext4_msg(sb, KERN_ERR, "can't mount with "
- "dioread_nolock if block size != PAGE_SIZE");
- return 0;
- }
- }
return 1;
}
@@ -3569,12 +3558,15 @@ static void ext4_clamp_want_extra_isize(struct super_block *sb)
{
struct ext4_sb_info *sbi = EXT4_SB(sb);
struct ext4_super_block *es = sbi->s_es;
+ unsigned def_extra_isize = sizeof(struct ext4_inode) -
+ EXT4_GOOD_OLD_INODE_SIZE;
- /* determine the minimum size of new large inodes, if present */
- if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE &&
- sbi->s_want_extra_isize == 0) {
- sbi->s_want_extra_isize = sizeof(struct ext4_inode) -
- EXT4_GOOD_OLD_INODE_SIZE;
+ if (sbi->s_inode_size == EXT4_GOOD_OLD_INODE_SIZE) {
+ sbi->s_want_extra_isize = 0;
+ return;
+ }
+ if (sbi->s_want_extra_isize < 4) {
+ sbi->s_want_extra_isize = def_extra_isize;
if (ext4_has_feature_extra_isize(sb)) {
if (sbi->s_want_extra_isize <
le16_to_cpu(es->s_want_extra_isize))
@@ -3587,10 +3579,10 @@ static void ext4_clamp_want_extra_isize(struct super_block *sb)
}
}
/* Check if enough inode space is available */
- if (EXT4_GOOD_OLD_INODE_SIZE + sbi->s_want_extra_isize >
- sbi->s_inode_size) {
- sbi->s_want_extra_isize = sizeof(struct ext4_inode) -
- EXT4_GOOD_OLD_INODE_SIZE;
+ if ((sbi->s_want_extra_isize > sbi->s_inode_size) ||
+ (EXT4_GOOD_OLD_INODE_SIZE + sbi->s_want_extra_isize >
+ sbi->s_inode_size)) {
+ sbi->s_want_extra_isize = def_extra_isize;
ext4_msg(sb, KERN_INFO,
"required extra inode space not available");
}
@@ -4453,13 +4445,6 @@ no_journal:
}
}
- if ((DUMMY_ENCRYPTION_ENABLED(sbi) || ext4_has_feature_encrypt(sb)) &&
- (blocksize != PAGE_SIZE)) {
- ext4_msg(sb, KERN_ERR,
- "Unsupported blocksize for fs encryption");
- goto failed_mount_wq;
- }
-
if (ext4_has_feature_verity(sb) && blocksize != PAGE_SIZE) {
ext4_msg(sb, KERN_ERR, "Unsupported blocksize for fs-verity");
goto failed_mount_wq;
@@ -5849,7 +5834,7 @@ static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
/* Don't account quota for quota files to avoid recursion */
qf_inode->i_flags |= S_NOQUOTA;
lockdep_set_quota_inode(qf_inode, I_DATA_SEM_QUOTA);
- err = dquot_enable(qf_inode, type, format_id, flags);
+ err = dquot_load_quota_inode(qf_inode, type, format_id, flags);
if (err)
lockdep_set_quota_inode(qf_inode, I_DATA_SEM_NORMAL);
iput(qf_inode);
@@ -6033,18 +6018,6 @@ out:
}
return len;
}
-
-static int ext4_get_next_id(struct super_block *sb, struct kqid *qid)
-{
- const struct quota_format_ops *ops;
-
- if (!sb_has_quota_loaded(sb, qid->type))
- return -ESRCH;
- ops = sb_dqopt(sb)->ops[qid->type];
- if (!ops || !ops->get_next_id)
- return -ENOSYS;
- return dquot_get_next_id(sb, qid);
-}
#endif
static struct dentry *ext4_mount(struct file_system_type *fs_type, int flags,
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index 491f9ee4040e..8966a5439a22 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -967,55 +967,6 @@ int __ext4_xattr_set_credits(struct super_block *sb, struct inode *inode,
return credits;
}
-static int ext4_xattr_ensure_credits(handle_t *handle, struct inode *inode,
- int credits, struct buffer_head *bh,
- bool dirty, bool block_csum)
-{
- int error;
-
- if (!ext4_handle_valid(handle))
- return 0;
-
- if (handle->h_buffer_credits >= credits)
- return 0;
-
- error = ext4_journal_extend(handle, credits - handle->h_buffer_credits);
- if (!error)
- return 0;
- if (error < 0) {
- ext4_warning(inode->i_sb, "Extend journal (error %d)", error);
- return error;
- }
-
- if (bh && dirty) {
- if (block_csum)
- ext4_xattr_block_csum_set(inode, bh);
- error = ext4_handle_dirty_metadata(handle, NULL, bh);
- if (error) {
- ext4_warning(inode->i_sb, "Handle metadata (error %d)",
- error);
- return error;
- }
- }
-
- error = ext4_journal_restart(handle, credits);
- if (error) {
- ext4_warning(inode->i_sb, "Restart journal (error %d)", error);
- return error;
- }
-
- if (bh) {
- error = ext4_journal_get_write_access(handle, bh);
- if (error) {
- ext4_warning(inode->i_sb,
- "Get write access failed (error %d)",
- error);
- return error;
- }
- }
- return 0;
-}
-
static int ext4_xattr_inode_update_ref(handle_t *handle, struct inode *ea_inode,
int ref_change)
{
@@ -1149,6 +1100,24 @@ cleanup:
return saved_err;
}
+static int ext4_xattr_restart_fn(handle_t *handle, struct inode *inode,
+ struct buffer_head *bh, bool block_csum, bool dirty)
+{
+ int error;
+
+ if (bh && dirty) {
+ if (block_csum)
+ ext4_xattr_block_csum_set(inode, bh);
+ error = ext4_handle_dirty_metadata(handle, NULL, bh);
+ if (error) {
+ ext4_warning(inode->i_sb, "Handle metadata (error %d)",
+ error);
+ return error;
+ }
+ }
+ return 0;
+}
+
static void
ext4_xattr_inode_dec_ref_all(handle_t *handle, struct inode *parent,
struct buffer_head *bh,
@@ -1185,13 +1154,24 @@ ext4_xattr_inode_dec_ref_all(handle_t *handle, struct inode *parent,
continue;
}
- err = ext4_xattr_ensure_credits(handle, parent, credits, bh,
- dirty, block_csum);
- if (err) {
+ err = ext4_journal_ensure_credits_fn(handle, credits, credits,
+ ext4_free_metadata_revoke_credits(parent->i_sb, 1),
+ ext4_xattr_restart_fn(handle, parent, bh, block_csum,
+ dirty));
+ if (err < 0) {
ext4_warning_inode(ea_inode, "Ensure credits err=%d",
err);
continue;
}
+ if (err > 0) {
+ err = ext4_journal_get_write_access(handle, bh);
+ if (err) {
+ ext4_warning_inode(ea_inode,
+ "Re-get write access err=%d",
+ err);
+ continue;
+ }
+ }
err = ext4_xattr_inode_dec_ref(handle, ea_inode);
if (err) {
@@ -2335,7 +2315,7 @@ ext4_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index,
flags & XATTR_CREATE);
brelse(bh);
- if (!ext4_handle_has_enough_credits(handle, credits)) {
+ if (jbd2_handle_buffer_credits(handle) < credits) {
error = -ENOSPC;
goto cleanup;
}
@@ -2862,11 +2842,9 @@ int ext4_xattr_delete_inode(handle_t *handle, struct inode *inode,
struct inode *ea_inode;
int error;
- error = ext4_xattr_ensure_credits(handle, inode, extra_credits,
- NULL /* bh */,
- false /* dirty */,
- false /* block_csum */);
- if (error) {
+ error = ext4_journal_ensure_credits(handle, extra_credits,
+ ext4_free_metadata_revoke_credits(inode->i_sb, 1));
+ if (error < 0) {
EXT4_ERROR_INODE(inode, "ensure credits (error %d)", error);
goto cleanup;
}
diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
index a0eef95b9e0e..ffdaba0c55d2 100644
--- a/fs/f2fs/checkpoint.c
+++ b/fs/f2fs/checkpoint.c
@@ -581,7 +581,7 @@ int f2fs_acquire_orphan_inode(struct f2fs_sb_info *sbi)
if (time_to_inject(sbi, FAULT_ORPHAN)) {
spin_unlock(&im->ino_lock);
- f2fs_show_injection_info(FAULT_ORPHAN);
+ f2fs_show_injection_info(sbi, FAULT_ORPHAN);
return -ENOSPC;
}
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index 5755e897a5f0..a034cd0ce021 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -29,6 +29,7 @@
#define NUM_PREALLOC_POST_READ_CTXS 128
static struct kmem_cache *bio_post_read_ctx_cache;
+static struct kmem_cache *bio_entry_slab;
static mempool_t *bio_post_read_ctx_pool;
static bool __is_cp_guaranteed(struct page *page)
@@ -167,9 +168,10 @@ static bool f2fs_bio_post_read_required(struct bio *bio)
static void f2fs_read_end_io(struct bio *bio)
{
- if (time_to_inject(F2FS_P_SB(bio_first_page_all(bio)),
- FAULT_READ_IO)) {
- f2fs_show_injection_info(FAULT_READ_IO);
+ struct f2fs_sb_info *sbi = F2FS_P_SB(bio_first_page_all(bio));
+
+ if (time_to_inject(sbi, FAULT_READ_IO)) {
+ f2fs_show_injection_info(sbi, FAULT_READ_IO);
bio->bi_status = BLK_STS_IOERR;
}
@@ -191,7 +193,7 @@ static void f2fs_write_end_io(struct bio *bio)
struct bvec_iter_all iter_all;
if (time_to_inject(sbi, FAULT_WRITE_IO)) {
- f2fs_show_injection_info(FAULT_WRITE_IO);
+ f2fs_show_injection_info(sbi, FAULT_WRITE_IO);
bio->bi_status = BLK_STS_IOERR;
}
@@ -543,6 +545,126 @@ static bool io_is_mergeable(struct f2fs_sb_info *sbi, struct bio *bio,
return io_type_is_mergeable(io, fio);
}
+static void add_bio_entry(struct f2fs_sb_info *sbi, struct bio *bio,
+ struct page *page, enum temp_type temp)
+{
+ struct f2fs_bio_info *io = sbi->write_io[DATA] + temp;
+ struct bio_entry *be;
+
+ be = f2fs_kmem_cache_alloc(bio_entry_slab, GFP_NOFS);
+ be->bio = bio;
+ bio_get(bio);
+
+ if (bio_add_page(bio, page, PAGE_SIZE, 0) != PAGE_SIZE)
+ f2fs_bug_on(sbi, 1);
+
+ down_write(&io->bio_list_lock);
+ list_add_tail(&be->list, &io->bio_list);
+ up_write(&io->bio_list_lock);
+}
+
+static void del_bio_entry(struct bio_entry *be)
+{
+ list_del(&be->list);
+ kmem_cache_free(bio_entry_slab, be);
+}
+
+static int add_ipu_page(struct f2fs_sb_info *sbi, struct bio **bio,
+ struct page *page)
+{
+ enum temp_type temp;
+ bool found = false;
+ int ret = -EAGAIN;
+
+ for (temp = HOT; temp < NR_TEMP_TYPE && !found; temp++) {
+ struct f2fs_bio_info *io = sbi->write_io[DATA] + temp;
+ struct list_head *head = &io->bio_list;
+ struct bio_entry *be;
+
+ down_write(&io->bio_list_lock);
+ list_for_each_entry(be, head, list) {
+ if (be->bio != *bio)
+ continue;
+
+ found = true;
+
+ if (bio_add_page(*bio, page, PAGE_SIZE, 0) == PAGE_SIZE) {
+ ret = 0;
+ break;
+ }
+
+ /* bio is full */
+ del_bio_entry(be);
+ __submit_bio(sbi, *bio, DATA);
+ break;
+ }
+ up_write(&io->bio_list_lock);
+ }
+
+ if (ret) {
+ bio_put(*bio);
+ *bio = NULL;
+ }
+
+ return ret;
+}
+
+void f2fs_submit_merged_ipu_write(struct f2fs_sb_info *sbi,
+ struct bio **bio, struct page *page)
+{
+ enum temp_type temp;
+ bool found = false;
+ struct bio *target = bio ? *bio : NULL;
+
+ for (temp = HOT; temp < NR_TEMP_TYPE && !found; temp++) {
+ struct f2fs_bio_info *io = sbi->write_io[DATA] + temp;
+ struct list_head *head = &io->bio_list;
+ struct bio_entry *be;
+
+ if (list_empty(head))
+ continue;
+
+ down_read(&io->bio_list_lock);
+ list_for_each_entry(be, head, list) {
+ if (target)
+ found = (target == be->bio);
+ else
+ found = __has_merged_page(be->bio, NULL,
+ page, 0);
+ if (found)
+ break;
+ }
+ up_read(&io->bio_list_lock);
+
+ if (!found)
+ continue;
+
+ found = false;
+
+ down_write(&io->bio_list_lock);
+ list_for_each_entry(be, head, list) {
+ if (target)
+ found = (target == be->bio);
+ else
+ found = __has_merged_page(be->bio, NULL,
+ page, 0);
+ if (found) {
+ target = be->bio;
+ del_bio_entry(be);
+ break;
+ }
+ }
+ up_write(&io->bio_list_lock);
+ }
+
+ if (found)
+ __submit_bio(sbi, target, DATA);
+ if (bio && *bio) {
+ bio_put(*bio);
+ *bio = NULL;
+ }
+}
+
int f2fs_merge_page_bio(struct f2fs_io_info *fio)
{
struct bio *bio = *fio->bio;
@@ -557,20 +679,17 @@ int f2fs_merge_page_bio(struct f2fs_io_info *fio)
f2fs_trace_ios(fio, 0);
if (bio && !page_is_mergeable(fio->sbi, bio, *fio->last_block,
- fio->new_blkaddr)) {
- __submit_bio(fio->sbi, bio, fio->type);
- bio = NULL;
- }
+ fio->new_blkaddr))
+ f2fs_submit_merged_ipu_write(fio->sbi, &bio, NULL);
alloc_new:
if (!bio) {
bio = __bio_alloc(fio, BIO_MAX_PAGES);
bio_set_op_attrs(bio, fio->op, fio->op_flags);
- }
- if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
- __submit_bio(fio->sbi, bio, fio->type);
- bio = NULL;
- goto alloc_new;
+ add_bio_entry(fio->sbi, bio, page, fio->temp);
+ } else {
+ if (add_ipu_page(fio->sbi, &bio, page))
+ goto alloc_new;
}
if (fio->io_wbc)
@@ -584,19 +703,6 @@ alloc_new:
return 0;
}
-static void f2fs_submit_ipu_bio(struct f2fs_sb_info *sbi, struct bio **bio,
- struct page *page)
-{
- if (!bio)
- return;
-
- if (!__has_merged_page(*bio, NULL, page, 0))
- return;
-
- __submit_bio(sbi, *bio, DATA);
- *bio = NULL;
-}
-
void f2fs_submit_page_write(struct f2fs_io_info *fio)
{
struct f2fs_sb_info *sbi = fio->sbi;
@@ -2098,7 +2204,7 @@ static int __write_data_page(struct page *page, bool *submitted,
loff_t i_size = i_size_read(inode);
const pgoff_t end_index = ((unsigned long long) i_size)
>> PAGE_SHIFT;
- loff_t psize = (page->index + 1) << PAGE_SHIFT;
+ loff_t psize = (loff_t)(page->index + 1) << PAGE_SHIFT;
unsigned offset = 0;
bool need_balance_fs = false;
int err = 0;
@@ -2215,14 +2321,12 @@ out:
unlock_page(page);
if (!S_ISDIR(inode->i_mode) && !IS_NOQUOTA(inode) &&
- !F2FS_I(inode)->cp_task) {
- f2fs_submit_ipu_bio(sbi, bio, page);
+ !F2FS_I(inode)->cp_task)
f2fs_balance_fs(sbi, need_balance_fs);
- }
if (unlikely(f2fs_cp_error(sbi))) {
- f2fs_submit_ipu_bio(sbi, bio, page);
f2fs_submit_merged_write(sbi, DATA);
+ f2fs_submit_merged_ipu_write(sbi, bio, NULL);
submitted = NULL;
}
@@ -2342,13 +2446,11 @@ continue_unlock:
}
if (PageWriteback(page)) {
- if (wbc->sync_mode != WB_SYNC_NONE) {
+ if (wbc->sync_mode != WB_SYNC_NONE)
f2fs_wait_on_page_writeback(page,
DATA, true, true);
- f2fs_submit_ipu_bio(sbi, &bio, page);
- } else {
+ else
goto continue_unlock;
- }
}
if (!clear_page_dirty_for_io(page))
@@ -2406,7 +2508,7 @@ continue_unlock:
NULL, 0, DATA);
/* submit cached bio of IPU write */
if (bio)
- __submit_bio(sbi, bio, DATA);
+ f2fs_submit_merged_ipu_write(sbi, &bio, NULL);
return ret;
}
@@ -3211,8 +3313,22 @@ fail:
return -ENOMEM;
}
-void __exit f2fs_destroy_post_read_processing(void)
+void f2fs_destroy_post_read_processing(void)
{
mempool_destroy(bio_post_read_ctx_pool);
kmem_cache_destroy(bio_post_read_ctx_cache);
}
+
+int __init f2fs_init_bio_entry_cache(void)
+{
+ bio_entry_slab = f2fs_kmem_cache_create("bio_entry_slab",
+ sizeof(struct bio_entry));
+ if (!bio_entry_slab)
+ return -ENOMEM;
+ return 0;
+}
+
+void __exit f2fs_destroy_bio_entry_cache(void)
+{
+ kmem_cache_destroy(bio_entry_slab);
+}
diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
index 4033778bcbbf..c967cacf979e 100644
--- a/fs/f2fs/dir.c
+++ b/fs/f2fs/dir.c
@@ -628,7 +628,7 @@ int f2fs_add_regular_entry(struct inode *dir, const struct qstr *new_name,
start:
if (time_to_inject(F2FS_I_SB(dir), FAULT_DIR_DEPTH)) {
- f2fs_show_injection_info(FAULT_DIR_DEPTH);
+ f2fs_show_injection_info(F2FS_I_SB(dir), FAULT_DIR_DEPTH);
return -ENOSPC;
}
@@ -919,8 +919,9 @@ int f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d,
bit_pos++;
ctx->pos = start_pos + bit_pos;
printk_ratelimited(
- "%s, invalid namelen(0), ino:%u, run fsck to fix.",
- KERN_WARNING, le32_to_cpu(de->ino));
+ "%sF2FS-fs (%s): invalid namelen(0), ino:%u, run fsck to fix.",
+ KERN_WARNING, sbi->sb->s_id,
+ le32_to_cpu(de->ino));
set_sbi_flag(sbi, SBI_NEED_FSCK);
continue;
}
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 4024790028aa..5a888a063c7f 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -890,6 +890,7 @@ enum {
CURSEG_WARM_NODE, /* direct node blocks of normal files */
CURSEG_COLD_NODE, /* indirect node blocks */
NO_CHECK_TYPE,
+ CURSEG_COLD_DATA_PINNED,/* cold data for pinned file */
};
struct flush_cmd {
@@ -1068,6 +1069,11 @@ struct f2fs_io_info {
unsigned char version; /* version of the node */
};
+struct bio_entry {
+ struct bio *bio;
+ struct list_head list;
+};
+
#define is_read_io(rw) ((rw) == READ)
struct f2fs_bio_info {
struct f2fs_sb_info *sbi; /* f2fs superblock */
@@ -1077,6 +1083,8 @@ struct f2fs_bio_info {
struct rw_semaphore io_rwsem; /* blocking op for bio */
spinlock_t io_lock; /* serialize DATA/NODE IOs */
struct list_head io_list; /* track fios */
+ struct list_head bio_list; /* bio entry list head */
+ struct rw_semaphore bio_list_lock; /* lock to protect bio entry list */
};
#define FDEV(i) (sbi->devs[i])
@@ -1289,11 +1297,13 @@ struct f2fs_sb_info {
unsigned int gc_mode; /* current GC state */
unsigned int next_victim_seg[2]; /* next segment in victim section */
/* for skip statistic */
+ unsigned int atomic_files; /* # of opened atomic file */
unsigned long long skipped_atomic_files[2]; /* FG_GC and BG_GC */
unsigned long long skipped_gc_rwsem; /* FG_GC only */
/* threshold for gc trials on pinned files */
u64 gc_pin_file_threshold;
+ struct rw_semaphore pin_sem;
/* maximum # of trials to find a victim segment for SSR and GC */
unsigned int max_victim_search;
@@ -1365,9 +1375,10 @@ struct f2fs_private_dio {
};
#ifdef CONFIG_F2FS_FAULT_INJECTION
-#define f2fs_show_injection_info(type) \
- printk_ratelimited("%sF2FS-fs : inject %s in %s of %pS\n", \
- KERN_INFO, f2fs_fault_name[type], \
+#define f2fs_show_injection_info(sbi, type) \
+ printk_ratelimited("%sF2FS-fs (%s) : inject %s in %s of %pS\n", \
+ KERN_INFO, sbi->sb->s_id, \
+ f2fs_fault_name[type], \
__func__, __builtin_return_address(0))
static inline bool time_to_inject(struct f2fs_sb_info *sbi, int type)
{
@@ -1387,7 +1398,7 @@ static inline bool time_to_inject(struct f2fs_sb_info *sbi, int type)
return false;
}
#else
-#define f2fs_show_injection_info(type) do { } while (0)
+#define f2fs_show_injection_info(sbi, type) do { } while (0)
static inline bool time_to_inject(struct f2fs_sb_info *sbi, int type)
{
return false;
@@ -1772,7 +1783,7 @@ static inline int inc_valid_block_count(struct f2fs_sb_info *sbi,
return ret;
if (time_to_inject(sbi, FAULT_BLOCK)) {
- f2fs_show_injection_info(FAULT_BLOCK);
+ f2fs_show_injection_info(sbi, FAULT_BLOCK);
release = *count;
goto release_quota;
}
@@ -2024,7 +2035,7 @@ static inline int inc_valid_node_count(struct f2fs_sb_info *sbi,
}
if (time_to_inject(sbi, FAULT_BLOCK)) {
- f2fs_show_injection_info(FAULT_BLOCK);
+ f2fs_show_injection_info(sbi, FAULT_BLOCK);
goto enospc;
}
@@ -2139,7 +2150,8 @@ static inline struct page *f2fs_grab_cache_page(struct address_space *mapping,
return page;
if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_ALLOC)) {
- f2fs_show_injection_info(FAULT_PAGE_ALLOC);
+ f2fs_show_injection_info(F2FS_M_SB(mapping),
+ FAULT_PAGE_ALLOC);
return NULL;
}
}
@@ -2154,7 +2166,7 @@ static inline struct page *f2fs_pagecache_get_page(
int fgp_flags, gfp_t gfp_mask)
{
if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_GET)) {
- f2fs_show_injection_info(FAULT_PAGE_GET);
+ f2fs_show_injection_info(F2FS_M_SB(mapping), FAULT_PAGE_GET);
return NULL;
}
@@ -2223,7 +2235,7 @@ static inline struct bio *f2fs_bio_alloc(struct f2fs_sb_info *sbi,
return bio;
}
if (time_to_inject(sbi, FAULT_ALLOC_BIO)) {
- f2fs_show_injection_info(FAULT_ALLOC_BIO);
+ f2fs_show_injection_info(sbi, FAULT_ALLOC_BIO);
return NULL;
}
@@ -2704,6 +2716,20 @@ static inline void clear_file(struct inode *inode, int type)
f2fs_mark_inode_dirty_sync(inode, true);
}
+static inline bool f2fs_is_time_consistent(struct inode *inode)
+{
+ if (!timespec64_equal(F2FS_I(inode)->i_disk_time, &inode->i_atime))
+ return false;
+ if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 1, &inode->i_ctime))
+ return false;
+ if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 2, &inode->i_mtime))
+ return false;
+ if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 3,
+ &F2FS_I(inode)->i_crtime))
+ return false;
+ return true;
+}
+
static inline bool f2fs_skip_inode_update(struct inode *inode, int dsync)
{
bool ret;
@@ -2721,14 +2747,7 @@ static inline bool f2fs_skip_inode_update(struct inode *inode, int dsync)
i_size_read(inode) & ~PAGE_MASK)
return false;
- if (!timespec64_equal(F2FS_I(inode)->i_disk_time, &inode->i_atime))
- return false;
- if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 1, &inode->i_ctime))
- return false;
- if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 2, &inode->i_mtime))
- return false;
- if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 3,
- &F2FS_I(inode)->i_crtime))
+ if (!f2fs_is_time_consistent(inode))
return false;
down_read(&F2FS_I(inode)->i_sem);
@@ -2783,7 +2802,7 @@ static inline void *f2fs_kmalloc(struct f2fs_sb_info *sbi,
void *ret;
if (time_to_inject(sbi, FAULT_KMALLOC)) {
- f2fs_show_injection_info(FAULT_KMALLOC);
+ f2fs_show_injection_info(sbi, FAULT_KMALLOC);
return NULL;
}
@@ -2804,7 +2823,7 @@ static inline void *f2fs_kvmalloc(struct f2fs_sb_info *sbi,
size_t size, gfp_t flags)
{
if (time_to_inject(sbi, FAULT_KVMALLOC)) {
- f2fs_show_injection_info(FAULT_KVMALLOC);
+ f2fs_show_injection_info(sbi, FAULT_KVMALLOC);
return NULL;
}
@@ -3102,7 +3121,7 @@ void f2fs_release_discard_addrs(struct f2fs_sb_info *sbi);
int f2fs_npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra);
void allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type,
unsigned int start, unsigned int end);
-void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi);
+void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi, int type);
int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range);
bool f2fs_exist_trim_candidates(struct f2fs_sb_info *sbi,
struct cp_control *cpc);
@@ -3188,10 +3207,14 @@ void f2fs_destroy_checkpoint_caches(void);
*/
int f2fs_init_post_read_processing(void);
void f2fs_destroy_post_read_processing(void);
+int f2fs_init_bio_entry_cache(void);
+void f2fs_destroy_bio_entry_cache(void);
void f2fs_submit_merged_write(struct f2fs_sb_info *sbi, enum page_type type);
void f2fs_submit_merged_write_cond(struct f2fs_sb_info *sbi,
struct inode *inode, struct page *page,
nid_t ino, enum page_type type);
+void f2fs_submit_merged_ipu_write(struct f2fs_sb_info *sbi,
+ struct bio **bio, struct page *page);
void f2fs_flush_merged_writes(struct f2fs_sb_info *sbi);
int f2fs_submit_page_bio(struct f2fs_io_info *fio);
int f2fs_merge_page_bio(struct f2fs_io_info *fio);
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index 6a2e5b7d8fc7..85af112e868d 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -681,7 +681,7 @@ int f2fs_truncate(struct inode *inode)
trace_f2fs_truncate(inode);
if (time_to_inject(F2FS_I_SB(inode), FAULT_TRUNCATE)) {
- f2fs_show_injection_info(FAULT_TRUNCATE);
+ f2fs_show_injection_info(F2FS_I_SB(inode), FAULT_TRUNCATE);
return -EIO;
}
@@ -1142,7 +1142,7 @@ static int __clone_blkaddrs(struct inode *src_inode, struct inode *dst_inode,
}
dn.ofs_in_node++;
i++;
- new_size = (dst + i) << PAGE_SHIFT;
+ new_size = (loff_t)(dst + i) << PAGE_SHIFT;
if (dst_inode->i_size < new_size)
f2fs_i_size_write(dst_inode, new_size);
} while (--ilen && (do_replace[i] || blkaddr[i] == NULL_ADDR));
@@ -1548,12 +1548,44 @@ static int expand_inode_data(struct inode *inode, loff_t offset,
if (off_end)
map.m_len++;
- if (f2fs_is_pinned_file(inode))
- map.m_seg_type = CURSEG_COLD_DATA;
+ if (!map.m_len)
+ return 0;
+
+ if (f2fs_is_pinned_file(inode)) {
+ block_t len = (map.m_len >> sbi->log_blocks_per_seg) <<
+ sbi->log_blocks_per_seg;
+ block_t done = 0;
+
+ if (map.m_len % sbi->blocks_per_seg)
+ len += sbi->blocks_per_seg;
+
+ map.m_len = sbi->blocks_per_seg;
+next_alloc:
+ if (has_not_enough_free_secs(sbi, 0,
+ GET_SEC_FROM_SEG(sbi, overprovision_segments(sbi)))) {
+ mutex_lock(&sbi->gc_mutex);
+ err = f2fs_gc(sbi, true, false, NULL_SEGNO);
+ if (err && err != -ENODATA && err != -EAGAIN)
+ goto out_err;
+ }
+
+ down_write(&sbi->pin_sem);
+ map.m_seg_type = CURSEG_COLD_DATA_PINNED;
+ f2fs_allocate_new_segments(sbi, CURSEG_COLD_DATA);
+ err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_DIO);
+ up_write(&sbi->pin_sem);
+
+ done += map.m_len;
+ len -= map.m_len;
+ map.m_lblk += map.m_len;
+ if (!err && len)
+ goto next_alloc;
- err = f2fs_map_blocks(inode, &map, 1, (f2fs_is_pinned_file(inode) ?
- F2FS_GET_BLOCK_PRE_DIO :
- F2FS_GET_BLOCK_PRE_AIO));
+ map.m_len = done;
+ } else {
+ err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_AIO);
+ }
+out_err:
if (err) {
pgoff_t last_off;
@@ -1893,6 +1925,7 @@ static int f2fs_ioc_start_atomic_write(struct file *filp)
spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
if (list_empty(&fi->inmem_ilist))
list_add_tail(&fi->inmem_ilist, &sbi->inode_list[ATOMIC_FILE]);
+ sbi->atomic_files++;
spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
/* add inode in inmem_list first and set atomic_file */
@@ -3406,6 +3439,7 @@ long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
case F2FS_IOC_RELEASE_VOLATILE_WRITE:
case F2FS_IOC_ABORT_VOLATILE_WRITE:
case F2FS_IOC_SHUTDOWN:
+ case FITRIM:
case F2FS_IOC_SET_ENCRYPTION_POLICY:
case F2FS_IOC_GET_ENCRYPTION_PWSALT:
case F2FS_IOC_GET_ENCRYPTION_POLICY:
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index 5877bd729689..b3d399623290 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -54,7 +54,7 @@ static int gc_thread_func(void *data)
}
if (time_to_inject(sbi, FAULT_CHECKPOINT)) {
- f2fs_show_injection_info(FAULT_CHECKPOINT);
+ f2fs_show_injection_info(sbi, FAULT_CHECKPOINT);
f2fs_stop_checkpoint(sbi, false);
}
@@ -1012,8 +1012,14 @@ next_step:
block_t start_bidx;
nid_t nid = le32_to_cpu(entry->nid);
- /* stop BG_GC if there is not enough free sections. */
- if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0))
+ /*
+ * stop BG_GC if there is not enough free sections.
+ * Or, stop GC if the segment becomes fully valid caused by
+ * race condition along with SSR block allocation.
+ */
+ if ((gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) ||
+ get_valid_blocks(sbi, segno, false) ==
+ sbi->blocks_per_seg)
return submitted;
if (check_valid_map(sbi, segno, off) == 0)
@@ -1437,11 +1443,20 @@ static void update_sb_metadata(struct f2fs_sb_info *sbi, int secs)
raw_sb->segment_count_main = cpu_to_le32(segment_count_main + segs);
raw_sb->block_count = cpu_to_le64(block_count +
(long long)segs * sbi->blocks_per_seg);
+ if (f2fs_is_multi_device(sbi)) {
+ int last_dev = sbi->s_ndevs - 1;
+ int dev_segs =
+ le32_to_cpu(raw_sb->devs[last_dev].total_segments);
+
+ raw_sb->devs[last_dev].total_segments =
+ cpu_to_le32(dev_segs + segs);
+ }
}
static void update_fs_metadata(struct f2fs_sb_info *sbi, int secs)
{
int segs = secs * sbi->segs_per_sec;
+ long long blks = (long long)segs * sbi->blocks_per_seg;
long long user_block_count =
le64_to_cpu(F2FS_CKPT(sbi)->user_block_count);
@@ -1449,8 +1464,20 @@ static void update_fs_metadata(struct f2fs_sb_info *sbi, int secs)
MAIN_SEGS(sbi) = (int)MAIN_SEGS(sbi) + segs;
FREE_I(sbi)->free_sections = (int)FREE_I(sbi)->free_sections + secs;
FREE_I(sbi)->free_segments = (int)FREE_I(sbi)->free_segments + segs;
- F2FS_CKPT(sbi)->user_block_count = cpu_to_le64(user_block_count +
- (long long)segs * sbi->blocks_per_seg);
+ F2FS_CKPT(sbi)->user_block_count = cpu_to_le64(user_block_count + blks);
+
+ if (f2fs_is_multi_device(sbi)) {
+ int last_dev = sbi->s_ndevs - 1;
+
+ FDEV(last_dev).total_segments =
+ (int)FDEV(last_dev).total_segments + segs;
+ FDEV(last_dev).end_blk =
+ (long long)FDEV(last_dev).end_blk + blks;
+#ifdef CONFIG_BLK_DEV_ZONED
+ FDEV(last_dev).nr_blkz = (int)FDEV(last_dev).nr_blkz +
+ (int)(blks >> sbi->log_blocks_per_blkz);
+#endif
+ }
}
int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count)
@@ -1465,6 +1492,15 @@ int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count)
if (block_count > old_block_count)
return -EINVAL;
+ if (f2fs_is_multi_device(sbi)) {
+ int last_dev = sbi->s_ndevs - 1;
+ __u64 last_segs = FDEV(last_dev).total_segments;
+
+ if (block_count + last_segs * sbi->blocks_per_seg <=
+ old_block_count)
+ return -EINVAL;
+ }
+
/* new fs size should align to section size */
div_u64_rem(block_count, BLKS_PER_SEC(sbi), &rem);
if (rem)
diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
index db4fec30c30d..502bd491336a 100644
--- a/fs/f2fs/inode.c
+++ b/fs/f2fs/inode.c
@@ -615,7 +615,11 @@ int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc)
inode->i_ino == F2FS_META_INO(sbi))
return 0;
- if (!is_inode_flag_set(inode, FI_DIRTY_INODE))
+ /*
+ * atime could be updated without dirtying f2fs inode in lazytime mode
+ */
+ if (f2fs_is_time_consistent(inode) &&
+ !is_inode_flag_set(inode, FI_DIRTY_INODE))
return 0;
if (!f2fs_is_checkpoint_ready(sbi))
@@ -677,7 +681,7 @@ retry:
err = f2fs_truncate(inode);
if (time_to_inject(sbi, FAULT_EVICT_INODE)) {
- f2fs_show_injection_info(FAULT_EVICT_INODE);
+ f2fs_show_injection_info(sbi, FAULT_EVICT_INODE);
err = -EIO;
}
diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
index 4faf06e8bf89..a1c507b0b4ac 100644
--- a/fs/f2fs/namei.c
+++ b/fs/f2fs/namei.c
@@ -981,7 +981,8 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
if (!old_dir_entry || whiteout)
file_lost_pino(old_inode);
else
- F2FS_I(old_inode)->i_pino = new_dir->i_ino;
+ /* adjust dir's i_pino to pass fsck check */
+ f2fs_i_pino_write(old_inode, new_dir->i_ino);
up_write(&F2FS_I(old_inode)->i_sem);
old_inode->i_ctime = current_time(old_inode);
@@ -1141,7 +1142,11 @@ static int f2fs_cross_rename(struct inode *old_dir, struct dentry *old_dentry,
f2fs_set_link(old_dir, old_entry, old_page, new_inode);
down_write(&F2FS_I(old_inode)->i_sem);
- file_lost_pino(old_inode);
+ if (!old_dir_entry)
+ file_lost_pino(old_inode);
+ else
+ /* adjust dir's i_pino to pass fsck check */
+ f2fs_i_pino_write(old_inode, new_dir->i_ino);
up_write(&F2FS_I(old_inode)->i_sem);
old_dir->i_ctime = current_time(old_dir);
@@ -1156,7 +1161,11 @@ static int f2fs_cross_rename(struct inode *old_dir, struct dentry *old_dentry,
f2fs_set_link(new_dir, new_entry, new_page, old_inode);
down_write(&F2FS_I(new_inode)->i_sem);
- file_lost_pino(new_inode);
+ if (!new_dir_entry)
+ file_lost_pino(new_inode);
+ else
+ /* adjust dir's i_pino to pass fsck check */
+ f2fs_i_pino_write(new_inode, old_dir->i_ino);
up_write(&F2FS_I(new_inode)->i_sem);
new_dir->i_ctime = current_time(new_dir);
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index 8b66bc4c004b..3314a0f3405e 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -2349,7 +2349,6 @@ static int __f2fs_build_free_nids(struct f2fs_sb_info *sbi,
if (ret) {
up_read(&nm_i->nat_tree_lock);
- f2fs_bug_on(sbi, !mount);
f2fs_err(sbi, "NAT is corrupt, run fsck to fix it");
return ret;
}
@@ -2399,7 +2398,7 @@ bool f2fs_alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid)
struct free_nid *i = NULL;
retry:
if (time_to_inject(sbi, FAULT_ALLOC_NID)) {
- f2fs_show_injection_info(FAULT_ALLOC_NID);
+ f2fs_show_injection_info(sbi, FAULT_ALLOC_NID);
return false;
}
diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
index 783773e4560d..76477f71d4ee 100644
--- a/fs/f2fs/recovery.c
+++ b/fs/f2fs/recovery.c
@@ -711,7 +711,7 @@ next:
f2fs_put_page(page, 1);
}
if (!err)
- f2fs_allocate_new_segments(sbi);
+ f2fs_allocate_new_segments(sbi, NO_CHECK_TYPE);
return err;
}
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index 2c997f94a3b2..56e81447e2f3 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -288,6 +288,8 @@ void f2fs_drop_inmem_pages_all(struct f2fs_sb_info *sbi, bool gc_failure)
struct list_head *head = &sbi->inode_list[ATOMIC_FILE];
struct inode *inode;
struct f2fs_inode_info *fi;
+ unsigned int count = sbi->atomic_files;
+ unsigned int looped = 0;
next:
spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
if (list_empty(head)) {
@@ -296,22 +298,26 @@ next:
}
fi = list_first_entry(head, struct f2fs_inode_info, inmem_ilist);
inode = igrab(&fi->vfs_inode);
+ if (inode)
+ list_move_tail(&fi->inmem_ilist, head);
spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
if (inode) {
if (gc_failure) {
- if (fi->i_gc_failures[GC_FAILURE_ATOMIC])
- goto drop;
- goto skip;
+ if (!fi->i_gc_failures[GC_FAILURE_ATOMIC])
+ goto skip;
}
-drop:
set_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
f2fs_drop_inmem_pages(inode);
+skip:
iput(inode);
}
-skip:
congestion_wait(BLK_RW_ASYNC, HZ/50);
cond_resched();
+ if (gc_failure) {
+ if (++looped >= count)
+ return;
+ }
goto next;
}
@@ -327,13 +333,16 @@ void f2fs_drop_inmem_pages(struct inode *inode)
mutex_unlock(&fi->inmem_lock);
}
- clear_inode_flag(inode, FI_ATOMIC_FILE);
fi->i_gc_failures[GC_FAILURE_ATOMIC] = 0;
stat_dec_atomic_write(inode);
spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
if (!list_empty(&fi->inmem_ilist))
list_del_init(&fi->inmem_ilist);
+ if (f2fs_is_atomic_file(inode)) {
+ clear_inode_flag(inode, FI_ATOMIC_FILE);
+ sbi->atomic_files--;
+ }
spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
}
@@ -480,7 +489,7 @@ int f2fs_commit_inmem_pages(struct inode *inode)
void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need)
{
if (time_to_inject(sbi, FAULT_CHECKPOINT)) {
- f2fs_show_injection_info(FAULT_CHECKPOINT);
+ f2fs_show_injection_info(sbi, FAULT_CHECKPOINT);
f2fs_stop_checkpoint(sbi, false);
}
@@ -1008,8 +1017,9 @@ static void __remove_discard_cmd(struct f2fs_sb_info *sbi,
if (dc->error)
printk_ratelimited(
- "%sF2FS-fs: Issue discard(%u, %u, %u) failed, ret: %d",
- KERN_INFO, dc->lstart, dc->start, dc->len, dc->error);
+ "%sF2FS-fs (%s): Issue discard(%u, %u, %u) failed, ret: %d",
+ KERN_INFO, sbi->sb->s_id,
+ dc->lstart, dc->start, dc->len, dc->error);
__detach_discard_cmd(dcc, dc);
}
@@ -1149,7 +1159,7 @@ static int __submit_discard_cmd(struct f2fs_sb_info *sbi,
dc->len += len;
if (time_to_inject(sbi, FAULT_DISCARD)) {
- f2fs_show_injection_info(FAULT_DISCARD);
+ f2fs_show_injection_info(sbi, FAULT_DISCARD);
err = -EIO;
goto submit;
}
@@ -2691,7 +2701,7 @@ unlock:
up_read(&SM_I(sbi)->curseg_lock);
}
-void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi)
+void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi, int type)
{
struct curseg_info *curseg;
unsigned int old_segno;
@@ -2700,10 +2710,17 @@ void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi)
down_write(&SIT_I(sbi)->sentry_lock);
for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
+ if (type != NO_CHECK_TYPE && i != type)
+ continue;
+
curseg = CURSEG_I(sbi, i);
- old_segno = curseg->segno;
- SIT_I(sbi)->s_ops->allocate_segment(sbi, i, true);
- locate_dirty_segment(sbi, old_segno);
+ if (type == NO_CHECK_TYPE || curseg->next_blkoff ||
+ get_valid_blocks(sbi, curseg->segno, false) ||
+ get_ckpt_valid_blocks(sbi, curseg->segno)) {
+ old_segno = curseg->segno;
+ SIT_I(sbi)->s_ops->allocate_segment(sbi, i, true);
+ locate_dirty_segment(sbi, old_segno);
+ }
}
up_write(&SIT_I(sbi)->sentry_lock);
@@ -3069,6 +3086,19 @@ void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
{
struct sit_info *sit_i = SIT_I(sbi);
struct curseg_info *curseg = CURSEG_I(sbi, type);
+ bool put_pin_sem = false;
+
+ if (type == CURSEG_COLD_DATA) {
+ /* GC during CURSEG_COLD_DATA_PINNED allocation */
+ if (down_read_trylock(&sbi->pin_sem)) {
+ put_pin_sem = true;
+ } else {
+ type = CURSEG_WARM_DATA;
+ curseg = CURSEG_I(sbi, type);
+ }
+ } else if (type == CURSEG_COLD_DATA_PINNED) {
+ type = CURSEG_COLD_DATA;
+ }
down_read(&SM_I(sbi)->curseg_lock);
@@ -3134,6 +3164,9 @@ void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
mutex_unlock(&curseg->curseg_mutex);
up_read(&SM_I(sbi)->curseg_lock);
+
+ if (put_pin_sem)
+ up_read(&sbi->pin_sem);
}
static void update_device_state(struct f2fs_io_info *fio)
@@ -3380,7 +3413,10 @@ void f2fs_wait_on_page_writeback(struct page *page,
if (PageWriteback(page)) {
struct f2fs_sb_info *sbi = F2FS_P_SB(page);
+ /* submit cached LFS IO */
f2fs_submit_merged_write_cond(sbi, NULL, page, 0, type);
+ /* sbumit cached IPU IO */
+ f2fs_submit_merged_ipu_write(sbi, NULL, page);
if (ordered) {
wait_on_page_writeback(page);
f2fs_bug_on(sbi, locked && PageWriteback(page));
diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
index 325781a1ae4d..a95467b202ea 100644
--- a/fs/f2fs/segment.h
+++ b/fs/f2fs/segment.h
@@ -313,6 +313,8 @@ struct sit_entry_set {
*/
static inline struct curseg_info *CURSEG_I(struct f2fs_sb_info *sbi, int type)
{
+ if (type == CURSEG_COLD_DATA_PINNED)
+ type = CURSEG_COLD_DATA;
return (struct curseg_info *)(SM_I(sbi)->curseg_array + type);
}
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index 197ad6b314de..5111e1ffe58a 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -1213,9 +1213,13 @@ static int f2fs_statfs_project(struct super_block *sb,
return PTR_ERR(dquot);
spin_lock(&dquot->dq_dqb_lock);
- limit = (dquot->dq_dqb.dqb_bsoftlimit ?
- dquot->dq_dqb.dqb_bsoftlimit :
- dquot->dq_dqb.dqb_bhardlimit) >> sb->s_blocksize_bits;
+ limit = 0;
+ if (dquot->dq_dqb.dqb_bsoftlimit)
+ limit = dquot->dq_dqb.dqb_bsoftlimit;
+ if (dquot->dq_dqb.dqb_bhardlimit &&
+ (!limit || dquot->dq_dqb.dqb_bhardlimit < limit))
+ limit = dquot->dq_dqb.dqb_bhardlimit;
+
if (limit && buf->f_blocks > limit) {
curblock = dquot->dq_dqb.dqb_curspace >> sb->s_blocksize_bits;
buf->f_blocks = limit;
@@ -1224,9 +1228,13 @@ static int f2fs_statfs_project(struct super_block *sb,
(buf->f_blocks - curblock) : 0;
}
- limit = dquot->dq_dqb.dqb_isoftlimit ?
- dquot->dq_dqb.dqb_isoftlimit :
- dquot->dq_dqb.dqb_ihardlimit;
+ limit = 0;
+ if (dquot->dq_dqb.dqb_isoftlimit)
+ limit = dquot->dq_dqb.dqb_isoftlimit;
+ if (dquot->dq_dqb.dqb_ihardlimit &&
+ (!limit || dquot->dq_dqb.dqb_ihardlimit < limit))
+ limit = dquot->dq_dqb.dqb_ihardlimit;
+
if (limit && buf->f_files > limit) {
buf->f_files = limit;
buf->f_ffree =
@@ -1932,7 +1940,7 @@ static int f2fs_quota_enable(struct super_block *sb, int type, int format_id,
/* Don't account quota for quota files to avoid recursion */
qf_inode->i_flags |= S_NOQUOTA;
- err = dquot_enable(qf_inode, type, format_id, flags);
+ err = dquot_load_quota_inode(qf_inode, type, format_id, flags);
iput(qf_inode);
return err;
}
@@ -2618,6 +2626,21 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
return -EFSCORRUPTED;
}
+ if (RDEV(0).path[0]) {
+ block_t dev_seg_count = le32_to_cpu(RDEV(0).total_segments);
+ int i = 1;
+
+ while (i < MAX_DEVICES && RDEV(i).path[0]) {
+ dev_seg_count += le32_to_cpu(RDEV(i).total_segments);
+ i++;
+ }
+ if (segment_count != dev_seg_count) {
+ f2fs_info(sbi, "Segment count (%u) mismatch with total segments from devices (%u)",
+ segment_count, dev_seg_count);
+ return -EFSCORRUPTED;
+ }
+ }
+
if (secs_per_zone > total_sections || !secs_per_zone) {
f2fs_info(sbi, "Wrong secs_per_zone / total_sections (%u, %u)",
secs_per_zone, total_sections);
@@ -2852,6 +2875,7 @@ static void init_sb_info(struct f2fs_sb_info *sbi)
spin_lock_init(&sbi->dev_lock);
init_rwsem(&sbi->sb_lock);
+ init_rwsem(&sbi->pin_sem);
}
static int init_percpu_info(struct f2fs_sb_info *sbi)
@@ -2946,6 +2970,7 @@ static int read_raw_super_block(struct f2fs_sb_info *sbi,
f2fs_err(sbi, "Unable to read %dth superblock",
block + 1);
err = -EIO;
+ *recovery = 1;
continue;
}
@@ -2955,6 +2980,7 @@ static int read_raw_super_block(struct f2fs_sb_info *sbi,
f2fs_err(sbi, "Can't find valid F2FS filesystem in %dth superblock",
block + 1);
brelse(bh);
+ *recovery = 1;
continue;
}
@@ -2967,10 +2993,6 @@ static int read_raw_super_block(struct f2fs_sb_info *sbi,
brelse(bh);
}
- /* Fail to read any one of the superblocks*/
- if (err < 0)
- *recovery = 1;
-
/* No valid superblock */
if (!*raw_super)
kvfree(super);
@@ -3324,6 +3346,8 @@ try_onemore:
sbi->write_io[i][j].bio = NULL;
spin_lock_init(&sbi->write_io[i][j].io_lock);
INIT_LIST_HEAD(&sbi->write_io[i][j].io_list);
+ INIT_LIST_HEAD(&sbi->write_io[i][j].bio_list);
+ init_rwsem(&sbi->write_io[i][j].bio_list_lock);
}
}
@@ -3735,8 +3759,13 @@ static int __init init_f2fs_fs(void)
err = f2fs_init_post_read_processing();
if (err)
goto free_root_stats;
+ err = f2fs_init_bio_entry_cache();
+ if (err)
+ goto free_post_read;
return 0;
+free_post_read:
+ f2fs_destroy_post_read_processing();
free_root_stats:
f2fs_destroy_root_stats();
unregister_filesystem(&f2fs_fs_type);
@@ -3760,6 +3789,7 @@ fail:
static void __exit exit_f2fs_fs(void)
{
+ f2fs_destroy_bio_entry_cache();
f2fs_destroy_post_read_processing();
f2fs_destroy_root_stats();
unregister_filesystem(&f2fs_fs_type);
diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c
index b558b64a4c9c..70945ceb9c0c 100644
--- a/fs/f2fs/sysfs.c
+++ b/fs/f2fs/sysfs.c
@@ -154,6 +154,8 @@ static ssize_t features_show(struct f2fs_attr *a,
if (f2fs_sb_has_casefold(sbi))
len += snprintf(buf + len, PAGE_SIZE - len, "%s%s",
len ? ", " : "", "casefold");
+ len += snprintf(buf + len, PAGE_SIZE - len, "%s%s",
+ len ? ", " : "", "pin_file");
len += snprintf(buf + len, PAGE_SIZE - len, "\n");
return len;
}
@@ -443,6 +445,7 @@ F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, gc_no_gc_sleep_time, no_gc_sleep_time);
F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, gc_idle, gc_mode);
F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, gc_urgent, gc_mode);
F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, reclaim_segments, rec_prefree_segments);
+F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, main_blkaddr, main_blkaddr);
F2FS_RW_ATTR(DCC_INFO, discard_cmd_control, max_small_discards, max_discards);
F2FS_RW_ATTR(DCC_INFO, discard_cmd_control, discard_granularity, discard_granularity);
F2FS_RW_ATTR(RESERVED_BLOCKS, f2fs_sb_info, reserved_blocks, reserved_blocks);
@@ -510,6 +513,7 @@ static struct attribute *f2fs_attrs[] = {
ATTR_LIST(gc_idle),
ATTR_LIST(gc_urgent),
ATTR_LIST(reclaim_segments),
+ ATTR_LIST(main_blkaddr),
ATTR_LIST(max_small_discards),
ATTR_LIST(discard_granularity),
ATTR_LIST(batched_trim_sections),
diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c
index 181900af2576..296b3189448a 100644
--- a/fs/f2fs/xattr.c
+++ b/fs/f2fs/xattr.c
@@ -539,8 +539,9 @@ out:
ssize_t f2fs_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size)
{
struct inode *inode = d_inode(dentry);
+ nid_t xnid = F2FS_I(inode)->i_xattr_nid;
struct f2fs_xattr_entry *entry;
- void *base_addr;
+ void *base_addr, *last_base_addr;
int error = 0;
size_t rest = buffer_size;
@@ -550,6 +551,8 @@ ssize_t f2fs_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size)
if (error)
return error;
+ last_base_addr = (void *)base_addr + XATTR_SIZE(xnid, inode);
+
list_for_each_xattr(entry, base_addr) {
const struct xattr_handler *handler =
f2fs_xattr_handler(entry->e_name_index);
@@ -557,6 +560,15 @@ ssize_t f2fs_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size)
size_t prefix_len;
size_t size;
+ if ((void *)(entry) + sizeof(__u32) > last_base_addr ||
+ (void *)XATTR_NEXT_ENTRY(entry) > last_base_addr) {
+ f2fs_err(F2FS_I_SB(inode), "inode (%lu) has corrupted xattr",
+ inode->i_ino);
+ set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_FSCK);
+ error = -EFSCORRUPTED;
+ goto cleanup;
+ }
+
if (!handler || (handler->list && !handler->list(dentry)))
continue;
diff --git a/fs/fat/file.c b/fs/fat/file.c
index 4614c0ba5f1c..bdc4503c00a3 100644
--- a/fs/fat/file.c
+++ b/fs/fat/file.c
@@ -172,15 +172,6 @@ long fat_generic_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
}
}
-#ifdef CONFIG_COMPAT
-static long fat_generic_compat_ioctl(struct file *filp, unsigned int cmd,
- unsigned long arg)
-
-{
- return fat_generic_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
-}
-#endif
-
static int fat_file_release(struct inode *inode, struct file *filp)
{
if ((filp->f_mode & FMODE_WRITE) &&
@@ -215,9 +206,7 @@ const struct file_operations fat_file_operations = {
.mmap = generic_file_mmap,
.release = fat_file_release,
.unlocked_ioctl = fat_generic_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = fat_generic_compat_ioctl,
-#endif
+ .compat_ioctl = compat_ptr_ioctl,
.fsync = fat_file_fsync,
.splice_read = generic_file_splice_read,
.splice_write = iter_file_splice_write,
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index ed1abc9e33cf..d4e6691d2d92 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -705,7 +705,7 @@ static int fuse_copy_fill(struct fuse_copy_state *cs)
cs->pipebufs++;
cs->nr_segs--;
} else {
- if (cs->nr_segs == cs->pipe->buffers)
+ if (cs->nr_segs >= cs->pipe->max_usage)
return -EIO;
page = alloc_page(GFP_HIGHUSER);
@@ -881,7 +881,7 @@ static int fuse_ref_page(struct fuse_copy_state *cs, struct page *page,
struct pipe_buffer *buf;
int err;
- if (cs->nr_segs == cs->pipe->buffers)
+ if (cs->nr_segs >= cs->pipe->max_usage)
return -EIO;
err = unlock_request(cs->req);
@@ -1343,7 +1343,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
if (!fud)
return -EPERM;
- bufs = kvmalloc_array(pipe->buffers, sizeof(struct pipe_buffer),
+ bufs = kvmalloc_array(pipe->max_usage, sizeof(struct pipe_buffer),
GFP_KERNEL);
if (!bufs)
return -ENOMEM;
@@ -1355,7 +1355,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
if (ret < 0)
goto out;
- if (pipe->nrbufs + cs.nr_segs > pipe->buffers) {
+ if (pipe_occupancy(pipe->head, pipe->tail) + cs.nr_segs > pipe->max_usage) {
ret = -EIO;
goto out;
}
@@ -1937,6 +1937,7 @@ static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
struct file *out, loff_t *ppos,
size_t len, unsigned int flags)
{
+ unsigned int head, tail, mask, count;
unsigned nbuf;
unsigned idx;
struct pipe_buffer *bufs;
@@ -1951,8 +1952,12 @@ static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
pipe_lock(pipe);
- bufs = kvmalloc_array(pipe->nrbufs, sizeof(struct pipe_buffer),
- GFP_KERNEL);
+ head = pipe->head;
+ tail = pipe->tail;
+ mask = pipe->ring_size - 1;
+ count = head - tail;
+
+ bufs = kvmalloc_array(count, sizeof(struct pipe_buffer), GFP_KERNEL);
if (!bufs) {
pipe_unlock(pipe);
return -ENOMEM;
@@ -1960,8 +1965,8 @@ static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
nbuf = 0;
rem = 0;
- for (idx = 0; idx < pipe->nrbufs && rem < len; idx++)
- rem += pipe->bufs[(pipe->curbuf + idx) & (pipe->buffers - 1)].len;
+ for (idx = tail; idx < head && rem < len; idx++)
+ rem += pipe->bufs[idx & mask].len;
ret = -EINVAL;
if (rem < len)
@@ -1972,16 +1977,16 @@ static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
struct pipe_buffer *ibuf;
struct pipe_buffer *obuf;
- BUG_ON(nbuf >= pipe->buffers);
- BUG_ON(!pipe->nrbufs);
- ibuf = &pipe->bufs[pipe->curbuf];
+ BUG_ON(nbuf >= pipe->ring_size);
+ BUG_ON(tail == head);
+ ibuf = &pipe->bufs[tail & mask];
obuf = &bufs[nbuf];
if (rem >= ibuf->len) {
*obuf = *ibuf;
ibuf->ops = NULL;
- pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
- pipe->nrbufs--;
+ tail++;
+ pipe->tail = tail;
} else {
if (!pipe_buf_get(pipe, ibuf))
goto out_free;
@@ -2262,7 +2267,7 @@ const struct file_operations fuse_dev_operations = {
.release = fuse_dev_release,
.fasync = fuse_dev_fasync,
.unlocked_ioctl = fuse_dev_ioctl,
- .compat_ioctl = fuse_dev_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
};
EXPORT_SYMBOL_GPL(fuse_dev_operations);
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index f63df54a08c6..516103248272 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -1149,7 +1149,8 @@ static inline bool gfs2_iomap_need_write_lock(unsigned flags)
}
static int gfs2_iomap_begin(struct inode *inode, loff_t pos, loff_t length,
- unsigned flags, struct iomap *iomap)
+ unsigned flags, struct iomap *iomap,
+ struct iomap *srcmap)
{
struct gfs2_inode *ip = GFS2_I(inode);
struct metapath mp = { .mp_aheight = 1, };
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
index 997b326247e2..d07a295f9cac 100644
--- a/fs/gfs2/file.c
+++ b/fs/gfs2/file.c
@@ -6,6 +6,7 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/compat.h>
#include <linux/completion.h>
#include <linux/buffer_head.h>
#include <linux/pagemap.h>
@@ -354,6 +355,31 @@ static long gfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
return -ENOTTY;
}
+#ifdef CONFIG_COMPAT
+static long gfs2_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ switch(cmd) {
+ /* These are just misnamed, they actually get/put from/to user an int */
+ case FS_IOC32_GETFLAGS:
+ cmd = FS_IOC_GETFLAGS;
+ break;
+ case FS_IOC32_SETFLAGS:
+ cmd = FS_IOC_SETFLAGS;
+ break;
+ /* Keep this list in sync with gfs2_ioctl */
+ case FITRIM:
+ case FS_IOC_GETFSLABEL:
+ break;
+ default:
+ return -ENOIOCTLCMD;
+ }
+
+ return gfs2_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
+}
+#else
+#define gfs2_compat_ioctl NULL
+#endif
+
/**
* gfs2_size_hint - Give a hint to the size of a write request
* @filep: The struct file
@@ -732,7 +758,8 @@ static ssize_t gfs2_file_direct_read(struct kiocb *iocb, struct iov_iter *to)
if (ret)
goto out_uninit;
- ret = iomap_dio_rw(iocb, to, &gfs2_iomap_ops, NULL);
+ ret = iomap_dio_rw(iocb, to, &gfs2_iomap_ops, NULL,
+ is_sync_kiocb(iocb));
gfs2_glock_dq(&gh);
out_uninit:
@@ -767,7 +794,8 @@ static ssize_t gfs2_file_direct_write(struct kiocb *iocb, struct iov_iter *from)
if (offset + len > i_size_read(&ip->i_inode))
goto out;
- ret = iomap_dio_rw(iocb, from, &gfs2_iomap_ops, NULL);
+ ret = iomap_dio_rw(iocb, from, &gfs2_iomap_ops, NULL,
+ is_sync_kiocb(iocb));
out:
gfs2_glock_dq(&gh);
@@ -1293,6 +1321,7 @@ const struct file_operations gfs2_file_fops = {
.write_iter = gfs2_file_write_iter,
.iopoll = iomap_dio_iopoll,
.unlocked_ioctl = gfs2_ioctl,
+ .compat_ioctl = gfs2_compat_ioctl,
.mmap = gfs2_mmap,
.open = gfs2_open,
.release = gfs2_release,
@@ -1308,6 +1337,7 @@ const struct file_operations gfs2_file_fops = {
const struct file_operations gfs2_dir_fops = {
.iterate_shared = gfs2_readdir,
.unlocked_ioctl = gfs2_ioctl,
+ .compat_ioctl = gfs2_compat_ioctl,
.open = gfs2_open,
.release = gfs2_release,
.fsync = gfs2_fsync,
@@ -1324,6 +1354,7 @@ const struct file_operations gfs2_file_fops_nolock = {
.write_iter = gfs2_file_write_iter,
.iopoll = iomap_dio_iopoll,
.unlocked_ioctl = gfs2_ioctl,
+ .compat_ioctl = gfs2_compat_ioctl,
.mmap = gfs2_mmap,
.open = gfs2_open,
.release = gfs2_release,
@@ -1337,6 +1368,7 @@ const struct file_operations gfs2_file_fops_nolock = {
const struct file_operations gfs2_dir_fops_nolock = {
.iterate_shared = gfs2_readdir,
.unlocked_ioctl = gfs2_ioctl,
+ .compat_ioctl = gfs2_compat_ioctl,
.open = gfs2_open,
.release = gfs2_release,
.fsync = gfs2_fsync,
diff --git a/fs/hpfs/dir.c b/fs/hpfs/dir.c
index d85230c84ef2..f32f15669996 100644
--- a/fs/hpfs/dir.c
+++ b/fs/hpfs/dir.c
@@ -325,4 +325,5 @@ const struct file_operations hpfs_dir_ops =
.release = hpfs_dir_release,
.fsync = hpfs_file_fsync,
.unlocked_ioctl = hpfs_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
};
diff --git a/fs/hpfs/file.c b/fs/hpfs/file.c
index 1ecec124e76f..b36abf9cb345 100644
--- a/fs/hpfs/file.c
+++ b/fs/hpfs/file.c
@@ -215,6 +215,7 @@ const struct file_operations hpfs_file_ops =
.fsync = hpfs_file_fsync,
.splice_read = generic_file_splice_read,
.unlocked_ioctl = hpfs_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
};
const struct inode_operations hpfs_file_iops =
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index a478df035651..d5c2a3158610 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -440,7 +440,7 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
u32 hash;
index = page->index;
- hash = hugetlb_fault_mutex_hash(h, mapping, index, 0);
+ hash = hugetlb_fault_mutex_hash(mapping, index);
mutex_lock(&hugetlb_fault_mutex_table[hash]);
/*
@@ -644,7 +644,7 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
addr = index * hpage_size;
/* mutex taken here, fault path and hole punch */
- hash = hugetlb_fault_mutex_hash(h, mapping, index, addr);
+ hash = hugetlb_fault_mutex_hash(mapping, index);
mutex_lock(&hugetlb_fault_mutex_table[hash]);
/* See if already present in mapping to avoid alloc/free */
@@ -815,8 +815,11 @@ static struct inode *hugetlbfs_get_inode(struct super_block *sb,
/*
* File creation. Allocate an inode, and we're done..
*/
-static int hugetlbfs_mknod(struct inode *dir,
- struct dentry *dentry, umode_t mode, dev_t dev)
+static int do_hugetlbfs_mknod(struct inode *dir,
+ struct dentry *dentry,
+ umode_t mode,
+ dev_t dev,
+ bool tmpfile)
{
struct inode *inode;
int error = -ENOSPC;
@@ -824,13 +827,23 @@ static int hugetlbfs_mknod(struct inode *dir,
inode = hugetlbfs_get_inode(dir->i_sb, dir, mode, dev);
if (inode) {
dir->i_ctime = dir->i_mtime = current_time(dir);
- d_instantiate(dentry, inode);
- dget(dentry); /* Extra count - pin the dentry in core */
+ if (tmpfile) {
+ d_tmpfile(dentry, inode);
+ } else {
+ d_instantiate(dentry, inode);
+ dget(dentry);/* Extra count - pin the dentry in core */
+ }
error = 0;
}
return error;
}
+static int hugetlbfs_mknod(struct inode *dir,
+ struct dentry *dentry, umode_t mode, dev_t dev)
+{
+ return do_hugetlbfs_mknod(dir, dentry, mode, dev, false);
+}
+
static int hugetlbfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
{
int retval = hugetlbfs_mknod(dir, dentry, mode | S_IFDIR, 0);
@@ -844,6 +857,12 @@ static int hugetlbfs_create(struct inode *dir, struct dentry *dentry, umode_t mo
return hugetlbfs_mknod(dir, dentry, mode | S_IFREG, 0);
}
+static int hugetlbfs_tmpfile(struct inode *dir,
+ struct dentry *dentry, umode_t mode)
+{
+ return do_hugetlbfs_mknod(dir, dentry, mode | S_IFREG, 0, true);
+}
+
static int hugetlbfs_symlink(struct inode *dir,
struct dentry *dentry, const char *symname)
{
@@ -1102,6 +1121,7 @@ static const struct inode_operations hugetlbfs_dir_inode_operations = {
.mknod = hugetlbfs_mknod,
.rename = simple_rename,
.setattr = hugetlbfs_setattr,
+ .tmpfile = hugetlbfs_tmpfile,
};
static const struct inode_operations hugetlbfs_inode_operations = {
@@ -1461,28 +1481,41 @@ static int __init init_hugetlbfs_fs(void)
sizeof(struct hugetlbfs_inode_info),
0, SLAB_ACCOUNT, init_once);
if (hugetlbfs_inode_cachep == NULL)
- goto out2;
+ goto out;
error = register_filesystem(&hugetlbfs_fs_type);
if (error)
- goto out;
+ goto out_free;
+ /* default hstate mount is required */
+ mnt = mount_one_hugetlbfs(&hstates[default_hstate_idx]);
+ if (IS_ERR(mnt)) {
+ error = PTR_ERR(mnt);
+ goto out_unreg;
+ }
+ hugetlbfs_vfsmount[default_hstate_idx] = mnt;
+
+ /* other hstates are optional */
i = 0;
for_each_hstate(h) {
+ if (i == default_hstate_idx)
+ continue;
+
mnt = mount_one_hugetlbfs(h);
- if (IS_ERR(mnt) && i == 0) {
- error = PTR_ERR(mnt);
- goto out;
- }
- hugetlbfs_vfsmount[i] = mnt;
+ if (IS_ERR(mnt))
+ hugetlbfs_vfsmount[i] = NULL;
+ else
+ hugetlbfs_vfsmount[i] = mnt;
i++;
}
return 0;
- out:
+ out_unreg:
+ (void)unregister_filesystem(&hugetlbfs_fs_type);
+ out_free:
kmem_cache_destroy(hugetlbfs_inode_cachep);
- out2:
+ out:
return error;
}
fs_initcall(init_hugetlbfs_fs)
diff --git a/fs/io-wq.c b/fs/io-wq.c
index 9174007ce107..91b85df0861e 100644
--- a/fs/io-wq.c
+++ b/fs/io-wq.c
@@ -33,6 +33,7 @@ enum {
enum {
IO_WQ_BIT_EXIT = 0, /* wq exiting */
IO_WQ_BIT_CANCEL = 1, /* cancel work on list */
+ IO_WQ_BIT_ERROR = 2, /* error on setup */
};
enum {
@@ -56,6 +57,7 @@ struct io_worker {
struct rcu_head rcu;
struct mm_struct *mm;
+ const struct cred *creds;
struct files_struct *restore_files;
};
@@ -82,7 +84,7 @@ enum {
struct io_wqe {
struct {
spinlock_t lock;
- struct list_head work_list;
+ struct io_wq_work_list work_list;
unsigned long hash_map;
unsigned flags;
} ____cacheline_aligned_in_smp;
@@ -103,13 +105,13 @@ struct io_wqe {
struct io_wq {
struct io_wqe **wqes;
unsigned long state;
- unsigned nr_wqes;
get_work_fn *get_work;
put_work_fn *put_work;
struct task_struct *manager;
struct user_struct *user;
+ struct cred *creds;
struct mm_struct *mm;
refcount_t refs;
struct completion done;
@@ -135,6 +137,11 @@ static bool __io_worker_unuse(struct io_wqe *wqe, struct io_worker *worker)
{
bool dropped_lock = false;
+ if (worker->creds) {
+ revert_creds(worker->creds);
+ worker->creds = NULL;
+ }
+
if (current->files != worker->restore_files) {
__acquire(&wqe->lock);
spin_unlock_irq(&wqe->lock);
@@ -229,7 +236,8 @@ static void io_worker_exit(struct io_worker *worker)
static inline bool io_wqe_run_queue(struct io_wqe *wqe)
__must_hold(wqe->lock)
{
- if (!list_empty(&wqe->work_list) && !(wqe->flags & IO_WQE_FLAG_STALLED))
+ if (!wq_list_empty(&wqe->work_list) &&
+ !(wqe->flags & IO_WQE_FLAG_STALLED))
return true;
return false;
}
@@ -327,9 +335,9 @@ static void __io_worker_busy(struct io_wqe *wqe, struct io_worker *worker,
* If worker is moving from bound to unbound (or vice versa), then
* ensure we update the running accounting.
*/
- worker_bound = (worker->flags & IO_WORKER_F_BOUND) != 0;
- work_bound = (work->flags & IO_WQ_WORK_UNBOUND) == 0;
- if (worker_bound != work_bound) {
+ worker_bound = (worker->flags & IO_WORKER_F_BOUND) != 0;
+ work_bound = (work->flags & IO_WQ_WORK_UNBOUND) == 0;
+ if (worker_bound != work_bound) {
io_wqe_dec_running(wqe, worker);
if (work_bound) {
worker->flags |= IO_WORKER_F_BOUND;
@@ -368,12 +376,15 @@ static bool __io_worker_idle(struct io_wqe *wqe, struct io_worker *worker)
static struct io_wq_work *io_get_next_work(struct io_wqe *wqe, unsigned *hash)
__must_hold(wqe->lock)
{
+ struct io_wq_work_node *node, *prev;
struct io_wq_work *work;
- list_for_each_entry(work, &wqe->work_list, list) {
+ wq_list_for_each(node, prev, &wqe->work_list) {
+ work = container_of(node, struct io_wq_work, list);
+
/* not hashed, can run anytime */
if (!(work->flags & IO_WQ_WORK_HASHED)) {
- list_del(&work->list);
+ wq_node_del(&wqe->work_list, node, prev);
return work;
}
@@ -381,7 +392,7 @@ static struct io_wq_work *io_get_next_work(struct io_wqe *wqe, unsigned *hash)
*hash = work->flags >> IO_WQ_HASH_SHIFT;
if (!(wqe->hash_map & BIT_ULL(*hash))) {
wqe->hash_map |= BIT_ULL(*hash);
- list_del(&work->list);
+ wq_node_del(&wqe->work_list, node, prev);
return work;
}
}
@@ -409,7 +420,7 @@ static void io_worker_handle_work(struct io_worker *worker)
work = io_get_next_work(wqe, &hash);
if (work)
__io_worker_busy(wqe, worker, work);
- else if (!list_empty(&wqe->work_list))
+ else if (!wq_list_empty(&wqe->work_list))
wqe->flags |= IO_WQE_FLAG_STALLED;
spin_unlock_irq(&wqe->lock);
@@ -426,6 +437,9 @@ next:
worker->cur_work = work;
spin_unlock_irq(&worker->lock);
+ if (work->flags & IO_WQ_WORK_CB)
+ work->func(&work);
+
if ((work->flags & IO_WQ_WORK_NEEDS_FILES) &&
current->files != work->files) {
task_lock(current);
@@ -438,6 +452,8 @@ next:
set_fs(USER_DS);
worker->mm = wq->mm;
}
+ if (!worker->creds)
+ worker->creds = override_creds(wq->creds);
if (test_bit(IO_WQ_BIT_CANCEL, &wq->state))
work->flags |= IO_WQ_WORK_CANCEL;
if (worker->mm)
@@ -514,7 +530,7 @@ static int io_wqe_worker(void *data)
if (test_bit(IO_WQ_BIT_EXIT, &wq->state)) {
spin_lock_irq(&wqe->lock);
- if (!list_empty(&wqe->work_list))
+ if (!wq_list_empty(&wqe->work_list))
io_worker_handle_work(worker);
else
spin_unlock_irq(&wqe->lock);
@@ -562,14 +578,14 @@ void io_wq_worker_sleeping(struct task_struct *tsk)
spin_unlock_irq(&wqe->lock);
}
-static void create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index)
+static bool create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index)
{
struct io_wqe_acct *acct =&wqe->acct[index];
struct io_worker *worker;
- worker = kcalloc_node(1, sizeof(*worker), GFP_KERNEL, wqe->node);
+ worker = kzalloc_node(sizeof(*worker), GFP_KERNEL, wqe->node);
if (!worker)
- return;
+ return false;
refcount_set(&worker->ref, 1);
worker->nulls_node.pprev = NULL;
@@ -581,7 +597,7 @@ static void create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index)
"io_wqe_worker-%d/%d", index, wqe->node);
if (IS_ERR(worker->task)) {
kfree(worker);
- return;
+ return false;
}
spin_lock_irq(&wqe->lock);
@@ -599,6 +615,7 @@ static void create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index)
atomic_inc(&wq->user->processes);
wake_up_process(worker->task);
+ return true;
}
static inline bool io_wqe_need_worker(struct io_wqe *wqe, int index)
@@ -606,9 +623,6 @@ static inline bool io_wqe_need_worker(struct io_wqe *wqe, int index)
{
struct io_wqe_acct *acct = &wqe->acct[index];
- /* always ensure we have one bounded worker */
- if (index == IO_WQ_ACCT_BOUND && !acct->nr_workers)
- return true;
/* if we have available workers or no work, no need */
if (!hlist_nulls_empty(&wqe->free_list) || !io_wqe_run_queue(wqe))
return false;
@@ -621,12 +635,22 @@ static inline bool io_wqe_need_worker(struct io_wqe *wqe, int index)
static int io_wq_manager(void *data)
{
struct io_wq *wq = data;
+ int workers_to_create = num_possible_nodes();
+ int node;
- while (!kthread_should_stop()) {
- int i;
+ /* create fixed workers */
+ refcount_set(&wq->refs, workers_to_create);
+ for_each_node(node) {
+ if (!create_io_worker(wq, wq->wqes[node], IO_WQ_ACCT_BOUND))
+ goto err;
+ workers_to_create--;
+ }
+
+ complete(&wq->done);
- for (i = 0; i < wq->nr_wqes; i++) {
- struct io_wqe *wqe = wq->wqes[i];
+ while (!kthread_should_stop()) {
+ for_each_node(node) {
+ struct io_wqe *wqe = wq->wqes[node];
bool fork_worker[2] = { false, false };
spin_lock_irq(&wqe->lock);
@@ -645,6 +669,12 @@ static int io_wq_manager(void *data)
}
return 0;
+err:
+ set_bit(IO_WQ_BIT_ERROR, &wq->state);
+ set_bit(IO_WQ_BIT_EXIT, &wq->state);
+ if (refcount_sub_and_test(workers_to_create, &wq->refs))
+ complete(&wq->done);
+ return 0;
}
static bool io_wq_can_queue(struct io_wqe *wqe, struct io_wqe_acct *acct,
@@ -688,7 +718,7 @@ static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work)
}
spin_lock_irqsave(&wqe->lock, flags);
- list_add_tail(&work->list, &wqe->work_list);
+ wq_list_add_tail(&work->list, &wqe->work_list);
wqe->flags &= ~IO_WQE_FLAG_STALLED;
spin_unlock_irqrestore(&wqe->lock, flags);
@@ -750,7 +780,7 @@ static bool io_wq_for_each_worker(struct io_wqe *wqe,
void io_wq_cancel_all(struct io_wq *wq)
{
- int i;
+ int node;
set_bit(IO_WQ_BIT_CANCEL, &wq->state);
@@ -759,8 +789,8 @@ void io_wq_cancel_all(struct io_wq *wq)
* to a worker and the worker putting itself on the busy_list
*/
rcu_read_lock();
- for (i = 0; i < wq->nr_wqes; i++) {
- struct io_wqe *wqe = wq->wqes[i];
+ for_each_node(node) {
+ struct io_wqe *wqe = wq->wqes[node];
io_wq_for_each_worker(wqe, io_wqe_worker_send_sig, NULL);
}
@@ -803,14 +833,17 @@ static enum io_wq_cancel io_wqe_cancel_cb_work(struct io_wqe *wqe,
.cancel = cancel,
.caller_data = cancel_data,
};
+ struct io_wq_work_node *node, *prev;
struct io_wq_work *work;
unsigned long flags;
bool found = false;
spin_lock_irqsave(&wqe->lock, flags);
- list_for_each_entry(work, &wqe->work_list, list) {
+ wq_list_for_each(node, prev, &wqe->work_list) {
+ work = container_of(node, struct io_wq_work, list);
+
if (cancel(work, cancel_data)) {
- list_del(&work->list);
+ wq_node_del(&wqe->work_list, node, prev);
found = true;
break;
}
@@ -833,10 +866,10 @@ enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel,
void *data)
{
enum io_wq_cancel ret = IO_WQ_CANCEL_NOTFOUND;
- int i;
+ int node;
- for (i = 0; i < wq->nr_wqes; i++) {
- struct io_wqe *wqe = wq->wqes[i];
+ for_each_node(node) {
+ struct io_wqe *wqe = wq->wqes[node];
ret = io_wqe_cancel_cb_work(wqe, cancel, data);
if (ret != IO_WQ_CANCEL_NOTFOUND)
@@ -868,6 +901,7 @@ static bool io_wq_worker_cancel(struct io_worker *worker, void *data)
static enum io_wq_cancel io_wqe_cancel_work(struct io_wqe *wqe,
struct io_wq_work *cwork)
{
+ struct io_wq_work_node *node, *prev;
struct io_wq_work *work;
unsigned long flags;
bool found = false;
@@ -880,9 +914,11 @@ static enum io_wq_cancel io_wqe_cancel_work(struct io_wqe *wqe,
* no completion will be posted for it.
*/
spin_lock_irqsave(&wqe->lock, flags);
- list_for_each_entry(work, &wqe->work_list, list) {
+ wq_list_for_each(node, prev, &wqe->work_list) {
+ work = container_of(node, struct io_wq_work, list);
+
if (work == cwork) {
- list_del(&work->list);
+ wq_node_del(&wqe->work_list, node, prev);
found = true;
break;
}
@@ -910,10 +946,10 @@ static enum io_wq_cancel io_wqe_cancel_work(struct io_wqe *wqe,
enum io_wq_cancel io_wq_cancel_work(struct io_wq *wq, struct io_wq_work *cwork)
{
enum io_wq_cancel ret = IO_WQ_CANCEL_NOTFOUND;
- int i;
+ int node;
- for (i = 0; i < wq->nr_wqes; i++) {
- struct io_wqe *wqe = wq->wqes[i];
+ for_each_node(node) {
+ struct io_wqe *wqe = wq->wqes[node];
ret = io_wqe_cancel_work(wqe, cwork);
if (ret != IO_WQ_CANCEL_NOTFOUND)
@@ -944,10 +980,10 @@ static void io_wq_flush_func(struct io_wq_work **workptr)
void io_wq_flush(struct io_wq *wq)
{
struct io_wq_flush_data data;
- int i;
+ int node;
- for (i = 0; i < wq->nr_wqes; i++) {
- struct io_wqe *wqe = wq->wqes[i];
+ for_each_node(node) {
+ struct io_wqe *wqe = wq->wqes[node];
init_completion(&data.done);
INIT_IO_WORK(&data.work, io_wq_flush_func);
@@ -957,43 +993,39 @@ void io_wq_flush(struct io_wq *wq)
}
}
-struct io_wq *io_wq_create(unsigned bounded, struct mm_struct *mm,
- struct user_struct *user, get_work_fn *get_work,
- put_work_fn *put_work)
+struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)
{
- int ret = -ENOMEM, i, node;
+ int ret = -ENOMEM, node;
struct io_wq *wq;
- wq = kcalloc(1, sizeof(*wq), GFP_KERNEL);
+ wq = kzalloc(sizeof(*wq), GFP_KERNEL);
if (!wq)
return ERR_PTR(-ENOMEM);
- wq->nr_wqes = num_online_nodes();
- wq->wqes = kcalloc(wq->nr_wqes, sizeof(struct io_wqe *), GFP_KERNEL);
+ wq->wqes = kcalloc(nr_node_ids, sizeof(struct io_wqe *), GFP_KERNEL);
if (!wq->wqes) {
kfree(wq);
return ERR_PTR(-ENOMEM);
}
- wq->get_work = get_work;
- wq->put_work = put_work;
+ wq->get_work = data->get_work;
+ wq->put_work = data->put_work;
/* caller must already hold a reference to this */
- wq->user = user;
+ wq->user = data->user;
+ wq->creds = data->creds;
- i = 0;
- refcount_set(&wq->refs, wq->nr_wqes);
- for_each_online_node(node) {
+ for_each_node(node) {
struct io_wqe *wqe;
- wqe = kcalloc_node(1, sizeof(struct io_wqe), GFP_KERNEL, node);
+ wqe = kzalloc_node(sizeof(struct io_wqe), GFP_KERNEL, node);
if (!wqe)
- break;
- wq->wqes[i] = wqe;
+ goto err;
+ wq->wqes[node] = wqe;
wqe->node = node;
wqe->acct[IO_WQ_ACCT_BOUND].max_workers = bounded;
atomic_set(&wqe->acct[IO_WQ_ACCT_BOUND].nr_running, 0);
- if (user) {
+ if (wq->user) {
wqe->acct[IO_WQ_ACCT_UNBOUND].max_workers =
task_rlimit(current, RLIMIT_NPROC);
}
@@ -1001,33 +1033,36 @@ struct io_wq *io_wq_create(unsigned bounded, struct mm_struct *mm,
wqe->node = node;
wqe->wq = wq;
spin_lock_init(&wqe->lock);
- INIT_LIST_HEAD(&wqe->work_list);
+ INIT_WQ_LIST(&wqe->work_list);
INIT_HLIST_NULLS_HEAD(&wqe->free_list, 0);
INIT_HLIST_NULLS_HEAD(&wqe->busy_list, 1);
INIT_LIST_HEAD(&wqe->all_list);
-
- i++;
}
init_completion(&wq->done);
- if (i != wq->nr_wqes)
- goto err;
-
/* caller must have already done mmgrab() on this mm */
- wq->mm = mm;
+ wq->mm = data->mm;
wq->manager = kthread_create(io_wq_manager, wq, "io_wq_manager");
if (!IS_ERR(wq->manager)) {
wake_up_process(wq->manager);
+ wait_for_completion(&wq->done);
+ if (test_bit(IO_WQ_BIT_ERROR, &wq->state)) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ reinit_completion(&wq->done);
return wq;
}
ret = PTR_ERR(wq->manager);
- wq->manager = NULL;
-err:
complete(&wq->done);
- io_wq_destroy(wq);
+err:
+ for_each_node(node)
+ kfree(wq->wqes[node]);
+ kfree(wq->wqes);
+ kfree(wq);
return ERR_PTR(ret);
}
@@ -1039,27 +1074,21 @@ static bool io_wq_worker_wake(struct io_worker *worker, void *data)
void io_wq_destroy(struct io_wq *wq)
{
- int i;
+ int node;
- if (wq->manager) {
- set_bit(IO_WQ_BIT_EXIT, &wq->state);
+ set_bit(IO_WQ_BIT_EXIT, &wq->state);
+ if (wq->manager)
kthread_stop(wq->manager);
- }
rcu_read_lock();
- for (i = 0; i < wq->nr_wqes; i++) {
- struct io_wqe *wqe = wq->wqes[i];
-
- if (!wqe)
- continue;
- io_wq_for_each_worker(wqe, io_wq_worker_wake, NULL);
- }
+ for_each_node(node)
+ io_wq_for_each_worker(wq->wqes[node], io_wq_worker_wake, NULL);
rcu_read_unlock();
wait_for_completion(&wq->done);
- for (i = 0; i < wq->nr_wqes; i++)
- kfree(wq->wqes[i]);
+ for_each_node(node)
+ kfree(wq->wqes[node]);
kfree(wq->wqes);
kfree(wq);
}
diff --git a/fs/io-wq.h b/fs/io-wq.h
index 4b29f922f80c..600e0158cba7 100644
--- a/fs/io-wq.h
+++ b/fs/io-wq.h
@@ -11,6 +11,7 @@ enum {
IO_WQ_WORK_NEEDS_FILES = 16,
IO_WQ_WORK_UNBOUND = 32,
IO_WQ_WORK_INTERNAL = 64,
+ IO_WQ_WORK_CB = 128,
IO_WQ_HASH_SHIFT = 24, /* upper 8 bits are used for hash key */
};
@@ -21,15 +22,60 @@ enum io_wq_cancel {
IO_WQ_CANCEL_NOTFOUND, /* work not found */
};
+struct io_wq_work_node {
+ struct io_wq_work_node *next;
+};
+
+struct io_wq_work_list {
+ struct io_wq_work_node *first;
+ struct io_wq_work_node *last;
+};
+
+static inline void wq_list_add_tail(struct io_wq_work_node *node,
+ struct io_wq_work_list *list)
+{
+ if (!list->first) {
+ list->first = list->last = node;
+ } else {
+ list->last->next = node;
+ list->last = node;
+ }
+}
+
+static inline void wq_node_del(struct io_wq_work_list *list,
+ struct io_wq_work_node *node,
+ struct io_wq_work_node *prev)
+{
+ if (node == list->first)
+ list->first = node->next;
+ if (node == list->last)
+ list->last = prev;
+ if (prev)
+ prev->next = node->next;
+}
+
+#define wq_list_for_each(pos, prv, head) \
+ for (pos = (head)->first, prv = NULL; pos; prv = pos, pos = (pos)->next)
+
+#define wq_list_empty(list) ((list)->first == NULL)
+#define INIT_WQ_LIST(list) do { \
+ (list)->first = NULL; \
+ (list)->last = NULL; \
+} while (0)
+
struct io_wq_work {
- struct list_head list;
+ union {
+ struct io_wq_work_node list;
+ void *data;
+ };
void (*func)(struct io_wq_work **);
- unsigned flags;
struct files_struct *files;
+ unsigned flags;
};
#define INIT_IO_WORK(work, _func) \
do { \
+ (work)->list.next = NULL; \
(work)->func = _func; \
(work)->flags = 0; \
(work)->files = NULL; \
@@ -38,9 +84,16 @@ struct io_wq_work {
typedef void (get_work_fn)(struct io_wq_work *);
typedef void (put_work_fn)(struct io_wq_work *);
-struct io_wq *io_wq_create(unsigned bounded, struct mm_struct *mm,
- struct user_struct *user,
- get_work_fn *get_work, put_work_fn *put_work);
+struct io_wq_data {
+ struct mm_struct *mm;
+ struct user_struct *user;
+ struct cred *creds;
+
+ get_work_fn *get_work;
+ put_work_fn *put_work;
+};
+
+struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data);
void io_wq_destroy(struct io_wq *wq);
void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work);
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 4c030a92de79..ec53aa7cdc94 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -69,6 +69,7 @@
#include <linux/nospec.h>
#include <linux/sizes.h>
#include <linux/hugetlb.h>
+#include <linux/highmem.h>
#define CREATE_TRACE_POINTS
#include <trace/events/io_uring.h>
@@ -186,6 +187,7 @@ struct io_ring_ctx {
bool compat;
bool account_mem;
bool cq_overflow_flushed;
+ bool drain_next;
/*
* Ring buffer of indices into array of io_uring_sqe, which is
@@ -236,6 +238,8 @@ struct io_ring_ctx {
struct user_struct *user;
+ struct cred *creds;
+
/* 0 is for ctx quiesce/reinit/free, 1 is for sqo_thread started */
struct completion *completions;
@@ -278,16 +282,6 @@ struct io_ring_ctx {
} ____cacheline_aligned_in_smp;
};
-struct sqe_submit {
- const struct io_uring_sqe *sqe;
- struct file *ring_file;
- int ring_fd;
- u32 sequence;
- bool has_user;
- bool in_async;
- bool needs_fixed_file;
-};
-
/*
* First field must be the file pointer in all the
* iocb unions! See also 'struct kiocb' in <linux/fs.h>
@@ -298,12 +292,20 @@ struct io_poll_iocb {
__poll_t events;
bool done;
bool canceled;
- struct wait_queue_entry wait;
+ struct wait_queue_entry *wait;
+};
+
+struct io_timeout_data {
+ struct io_kiocb *req;
+ struct hrtimer timer;
+ struct timespec64 ts;
+ enum hrtimer_mode mode;
+ u32 seq_offset;
};
struct io_timeout {
struct file *file;
- struct hrtimer timer;
+ struct io_timeout_data *data;
};
/*
@@ -320,7 +322,12 @@ struct io_kiocb {
struct io_timeout timeout;
};
- struct sqe_submit submit;
+ const struct io_uring_sqe *sqe;
+ struct file *ring_file;
+ int ring_fd;
+ bool has_user;
+ bool in_async;
+ bool needs_fixed_file;
struct io_ring_ctx *ctx;
union {
@@ -333,19 +340,20 @@ struct io_kiocb {
#define REQ_F_NOWAIT 1 /* must not punt to workers */
#define REQ_F_IOPOLL_COMPLETED 2 /* polled IO has completed */
#define REQ_F_FIXED_FILE 4 /* ctx owns file */
-#define REQ_F_SEQ_PREV 8 /* sequential with previous */
+#define REQ_F_LINK_NEXT 8 /* already grabbed next link */
#define REQ_F_IO_DRAIN 16 /* drain existing IO first */
#define REQ_F_IO_DRAINED 32 /* drain done */
#define REQ_F_LINK 64 /* linked sqes */
#define REQ_F_LINK_TIMEOUT 128 /* has linked timeout */
#define REQ_F_FAIL_LINK 256 /* fail rest of links */
-#define REQ_F_SHADOW_DRAIN 512 /* link-drain shadow req */
+#define REQ_F_DRAIN_LINK 512 /* link should be fully drained */
#define REQ_F_TIMEOUT 1024 /* timeout request */
#define REQ_F_ISREG 2048 /* regular file */
#define REQ_F_MUST_PUNT 4096 /* must be punted even for NONBLOCK */
#define REQ_F_TIMEOUT_NOSEQ 8192 /* no timeout sequence */
#define REQ_F_INFLIGHT 16384 /* on inflight list */
#define REQ_F_COMP_LOCKED 32768 /* completion under lock */
+#define REQ_F_FREE_SQE 65536 /* free sqe if not async queued */
u64 user_data;
u32 result;
u32 sequence;
@@ -383,6 +391,9 @@ static void io_cqring_fill_event(struct io_kiocb *req, long res);
static void __io_free_req(struct io_kiocb *req);
static void io_put_req(struct io_kiocb *req);
static void io_double_put_req(struct io_kiocb *req);
+static void __io_double_put_req(struct io_kiocb *req);
+static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req);
+static void io_queue_linked_timeout(struct io_kiocb *req);
static struct kmem_cache *req_cachep;
@@ -521,12 +532,13 @@ static inline bool io_sqe_needs_user(const struct io_uring_sqe *sqe)
opcode == IORING_OP_WRITE_FIXED);
}
-static inline bool io_prep_async_work(struct io_kiocb *req)
+static inline bool io_prep_async_work(struct io_kiocb *req,
+ struct io_kiocb **link)
{
bool do_hashed = false;
- if (req->submit.sqe) {
- switch (req->submit.sqe->opcode) {
+ if (req->sqe) {
+ switch (req->sqe->opcode) {
case IORING_OP_WRITEV:
case IORING_OP_WRITE_FIXED:
do_hashed = true;
@@ -537,6 +549,7 @@ static inline bool io_prep_async_work(struct io_kiocb *req)
case IORING_OP_RECVMSG:
case IORING_OP_ACCEPT:
case IORING_OP_POLL_ADD:
+ case IORING_OP_CONNECT:
/*
* We know REQ_F_ISREG is not set on some of these
* opcodes, but this enables us to keep the check in
@@ -546,17 +559,21 @@ static inline bool io_prep_async_work(struct io_kiocb *req)
req->work.flags |= IO_WQ_WORK_UNBOUND;
break;
}
- if (io_sqe_needs_user(req->submit.sqe))
+ if (io_sqe_needs_user(req->sqe))
req->work.flags |= IO_WQ_WORK_NEEDS_USER;
}
+ *link = io_prep_linked_timeout(req);
return do_hashed;
}
static inline void io_queue_async_work(struct io_kiocb *req)
{
- bool do_hashed = io_prep_async_work(req);
struct io_ring_ctx *ctx = req->ctx;
+ struct io_kiocb *link;
+ bool do_hashed;
+
+ do_hashed = io_prep_async_work(req, &link);
trace_io_uring_queue_async_work(ctx, do_hashed, req, &req->work,
req->flags);
@@ -566,13 +583,16 @@ static inline void io_queue_async_work(struct io_kiocb *req)
io_wq_enqueue_hashed(ctx->io_wq, &req->work,
file_inode(req->file));
}
+
+ if (link)
+ io_queue_linked_timeout(link);
}
static void io_kill_timeout(struct io_kiocb *req)
{
int ret;
- ret = hrtimer_try_to_cancel(&req->timeout.timer);
+ ret = hrtimer_try_to_cancel(&req->timeout.data->timer);
if (ret != -1) {
atomic_inc(&req->ctx->cq_timeouts);
list_del_init(&req->list);
@@ -601,11 +621,6 @@ static void io_commit_cqring(struct io_ring_ctx *ctx)
__io_commit_cqring(ctx);
while ((req = io_get_deferred_req(ctx)) != NULL) {
- if (req->flags & REQ_F_SHADOW_DRAIN) {
- /* Just for drain, free it. */
- __io_free_req(req);
- continue;
- }
req->flags |= REQ_F_IO_DRAINED;
io_queue_async_work(req);
}
@@ -639,7 +654,8 @@ static void io_cqring_ev_posted(struct io_ring_ctx *ctx)
eventfd_signal(ctx->cq_ev_fd, 1);
}
-static void io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
+/* Returns true if there are no backlogged entries after the flush */
+static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
{
struct io_rings *rings = ctx->rings;
struct io_uring_cqe *cqe;
@@ -649,10 +665,10 @@ static void io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
if (!force) {
if (list_empty_careful(&ctx->cq_overflow_list))
- return;
+ return true;
if ((ctx->cached_cq_tail - READ_ONCE(rings->cq.head) ==
rings->cq_ring_entries))
- return;
+ return false;
}
spin_lock_irqsave(&ctx->completion_lock, flags);
@@ -661,6 +677,7 @@ static void io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
if (force)
ctx->cq_overflow_flushed = true;
+ cqe = NULL;
while (!list_empty(&ctx->cq_overflow_list)) {
cqe = io_get_cqring(ctx);
if (!cqe && !force)
@@ -688,6 +705,8 @@ static void io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
list_del(&req->list);
io_put_req(req);
}
+
+ return cqe != NULL;
}
static void io_cqring_fill_event(struct io_kiocb *req, long res)
@@ -787,6 +806,7 @@ static struct io_kiocb *io_get_req(struct io_ring_ctx *ctx,
}
got_it:
+ req->ring_file = NULL;
req->file = NULL;
req->ctx = ctx;
req->flags = 0;
@@ -816,6 +836,8 @@ static void __io_free_req(struct io_kiocb *req)
{
struct io_ring_ctx *ctx = req->ctx;
+ if (req->flags & REQ_F_FREE_SQE)
+ kfree(req->sqe);
if (req->file && !(req->flags & REQ_F_FIXED_FILE))
fput(req->file);
if (req->flags & REQ_F_INFLIGHT) {
@@ -827,6 +849,8 @@ static void __io_free_req(struct io_kiocb *req)
wake_up(&ctx->inflight_wait);
spin_unlock_irqrestore(&ctx->inflight_lock, flags);
}
+ if (req->flags & REQ_F_TIMEOUT)
+ kfree(req->timeout.data);
percpu_ref_put(&ctx->refs);
if (likely(!io_is_fallback_req(req)))
kmem_cache_free(req_cachep, req);
@@ -839,7 +863,7 @@ static bool io_link_cancel_timeout(struct io_kiocb *req)
struct io_ring_ctx *ctx = req->ctx;
int ret;
- ret = hrtimer_try_to_cancel(&req->timeout.timer);
+ ret = hrtimer_try_to_cancel(&req->timeout.data->timer);
if (ret != -1) {
io_cqring_fill_event(req, -ECANCELED);
io_commit_cqring(ctx);
@@ -857,6 +881,10 @@ static void io_req_link_next(struct io_kiocb *req, struct io_kiocb **nxtptr)
struct io_kiocb *nxt;
bool wake_ev = false;
+ /* Already got next link */
+ if (req->flags & REQ_F_LINK_NEXT)
+ return;
+
/*
* The list should never be empty when we are called here. But could
* potentially happen if the chain is messed up, check to be on the
@@ -865,31 +893,26 @@ static void io_req_link_next(struct io_kiocb *req, struct io_kiocb **nxtptr)
nxt = list_first_entry_or_null(&req->link_list, struct io_kiocb, list);
while (nxt) {
list_del_init(&nxt->list);
+
+ if ((req->flags & REQ_F_LINK_TIMEOUT) &&
+ (nxt->flags & REQ_F_TIMEOUT)) {
+ wake_ev |= io_link_cancel_timeout(nxt);
+ nxt = list_first_entry_or_null(&req->link_list,
+ struct io_kiocb, list);
+ req->flags &= ~REQ_F_LINK_TIMEOUT;
+ continue;
+ }
if (!list_empty(&req->link_list)) {
INIT_LIST_HEAD(&nxt->link_list);
list_splice(&req->link_list, &nxt->link_list);
nxt->flags |= REQ_F_LINK;
}
- /*
- * If we're in async work, we can continue processing the chain
- * in this context instead of having to queue up new async work.
- */
- if (req->flags & REQ_F_LINK_TIMEOUT) {
- wake_ev = io_link_cancel_timeout(nxt);
-
- /* we dropped this link, get next */
- nxt = list_first_entry_or_null(&req->link_list,
- struct io_kiocb, list);
- } else if (nxtptr && io_wq_current_is_worker()) {
- *nxtptr = nxt;
- break;
- } else {
- io_queue_async_work(nxt);
- break;
- }
+ *nxtptr = nxt;
+ break;
}
+ req->flags |= REQ_F_LINK_NEXT;
if (wake_ev)
io_cqring_ev_posted(ctx);
}
@@ -912,12 +935,13 @@ static void io_fail_links(struct io_kiocb *req)
trace_io_uring_fail_link(req, link);
if ((req->flags & REQ_F_LINK_TIMEOUT) &&
- link->submit.sqe->opcode == IORING_OP_LINK_TIMEOUT) {
+ link->sqe->opcode == IORING_OP_LINK_TIMEOUT) {
io_link_cancel_timeout(link);
} else {
io_cqring_fill_event(link, -ECANCELED);
- io_double_put_req(link);
+ __io_double_put_req(link);
}
+ req->flags &= ~REQ_F_LINK_TIMEOUT;
}
io_commit_cqring(ctx);
@@ -925,12 +949,10 @@ static void io_fail_links(struct io_kiocb *req)
io_cqring_ev_posted(ctx);
}
-static void io_free_req_find_next(struct io_kiocb *req, struct io_kiocb **nxt)
+static void io_req_find_next(struct io_kiocb *req, struct io_kiocb **nxt)
{
- if (likely(!(req->flags & REQ_F_LINK))) {
- __io_free_req(req);
+ if (likely(!(req->flags & REQ_F_LINK)))
return;
- }
/*
* If LINK is set, we have dependent requests in this chain. If we
@@ -956,32 +978,30 @@ static void io_free_req_find_next(struct io_kiocb *req, struct io_kiocb **nxt)
} else {
io_req_link_next(req, nxt);
}
-
- __io_free_req(req);
}
static void io_free_req(struct io_kiocb *req)
{
- io_free_req_find_next(req, NULL);
+ struct io_kiocb *nxt = NULL;
+
+ io_req_find_next(req, &nxt);
+ __io_free_req(req);
+
+ if (nxt)
+ io_queue_async_work(nxt);
}
/*
* Drop reference to request, return next in chain (if there is one) if this
* was the last reference to this request.
*/
+__attribute__((nonnull))
static void io_put_req_find_next(struct io_kiocb *req, struct io_kiocb **nxtptr)
{
- struct io_kiocb *nxt = NULL;
+ io_req_find_next(req, nxtptr);
if (refcount_dec_and_test(&req->refs))
- io_free_req_find_next(req, &nxt);
-
- if (nxt) {
- if (nxtptr)
- *nxtptr = nxt;
- else
- io_queue_async_work(nxt);
- }
+ __io_free_req(req);
}
static void io_put_req(struct io_kiocb *req)
@@ -990,13 +1010,24 @@ static void io_put_req(struct io_kiocb *req)
io_free_req(req);
}
-static void io_double_put_req(struct io_kiocb *req)
+/*
+ * Must only be used if we don't need to care about links, usually from
+ * within the completion handling itself.
+ */
+static void __io_double_put_req(struct io_kiocb *req)
{
/* drop both submit and complete references */
if (refcount_sub_and_test(2, &req->refs))
__io_free_req(req);
}
+static void io_double_put_req(struct io_kiocb *req)
+{
+ /* drop both submit and complete references */
+ if (refcount_sub_and_test(2, &req->refs))
+ io_free_req(req);
+}
+
static unsigned io_cqring_events(struct io_ring_ctx *ctx, bool noflush)
{
struct io_rings *rings = ctx->rings;
@@ -1048,7 +1079,8 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
* completions for those, only batch free for fixed
* file and non-linked commands.
*/
- if (((req->flags & (REQ_F_FIXED_FILE|REQ_F_LINK)) ==
+ if (((req->flags &
+ (REQ_F_FIXED_FILE|REQ_F_LINK|REQ_F_FREE_SQE)) ==
REQ_F_FIXED_FILE) && !io_is_fallback_req(req)) {
reqs[to_free++] = req;
if (to_free == ARRAY_SIZE(reqs))
@@ -1366,7 +1398,7 @@ static bool io_file_supports_async(struct file *file)
static int io_prep_rw(struct io_kiocb *req, bool force_nonblock)
{
- const struct io_uring_sqe *sqe = req->submit.sqe;
+ const struct io_uring_sqe *sqe = req->sqe;
struct io_ring_ctx *ctx = req->ctx;
struct kiocb *kiocb = &req->rw;
unsigned ioprio;
@@ -1453,15 +1485,15 @@ static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
static void kiocb_done(struct kiocb *kiocb, ssize_t ret, struct io_kiocb **nxt,
bool in_async)
{
- if (in_async && ret >= 0 && nxt && kiocb->ki_complete == io_complete_rw)
+ if (in_async && ret >= 0 && kiocb->ki_complete == io_complete_rw)
*nxt = __io_complete_rw(kiocb, ret);
else
io_rw_done(kiocb, ret);
}
-static int io_import_fixed(struct io_ring_ctx *ctx, int rw,
- const struct io_uring_sqe *sqe,
- struct iov_iter *iter)
+static ssize_t io_import_fixed(struct io_ring_ctx *ctx, int rw,
+ const struct io_uring_sqe *sqe,
+ struct iov_iter *iter)
{
size_t len = READ_ONCE(sqe->len);
struct io_mapped_ubuf *imu;
@@ -1533,11 +1565,10 @@ static int io_import_fixed(struct io_ring_ctx *ctx, int rw,
return len;
}
-static ssize_t io_import_iovec(struct io_ring_ctx *ctx, int rw,
- const struct sqe_submit *s, struct iovec **iovec,
- struct iov_iter *iter)
+static ssize_t io_import_iovec(int rw, struct io_kiocb *req,
+ struct iovec **iovec, struct iov_iter *iter)
{
- const struct io_uring_sqe *sqe = s->sqe;
+ const struct io_uring_sqe *sqe = req->sqe;
void __user *buf = u64_to_user_ptr(READ_ONCE(sqe->addr));
size_t sqe_len = READ_ONCE(sqe->len);
u8 opcode;
@@ -1551,18 +1582,16 @@ static ssize_t io_import_iovec(struct io_ring_ctx *ctx, int rw,
* flag.
*/
opcode = READ_ONCE(sqe->opcode);
- if (opcode == IORING_OP_READ_FIXED ||
- opcode == IORING_OP_WRITE_FIXED) {
- ssize_t ret = io_import_fixed(ctx, rw, sqe, iter);
+ if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED) {
*iovec = NULL;
- return ret;
+ return io_import_fixed(req->ctx, rw, sqe, iter);
}
- if (!s->has_user)
+ if (!req->has_user)
return -EFAULT;
#ifdef CONFIG_COMPAT
- if (ctx->compat)
+ if (req->ctx->compat)
return compat_import_iovec(rw, buf, sqe_len, UIO_FASTIOV,
iovec, iter);
#endif
@@ -1590,9 +1619,19 @@ static ssize_t loop_rw_iter(int rw, struct file *file, struct kiocb *kiocb,
return -EAGAIN;
while (iov_iter_count(iter)) {
- struct iovec iovec = iov_iter_iovec(iter);
+ struct iovec iovec;
ssize_t nr;
+ if (!iov_iter_is_bvec(iter)) {
+ iovec = iov_iter_iovec(iter);
+ } else {
+ /* fixed buffers import bvec */
+ iovec.iov_base = kmap(iter->bvec->bv_page)
+ + iter->iov_offset;
+ iovec.iov_len = min(iter->count,
+ iter->bvec->bv_len - iter->iov_offset);
+ }
+
if (rw == READ) {
nr = file->f_op->read(file, iovec.iov_base,
iovec.iov_len, &kiocb->ki_pos);
@@ -1601,6 +1640,9 @@ static ssize_t loop_rw_iter(int rw, struct file *file, struct kiocb *kiocb,
iovec.iov_len, &kiocb->ki_pos);
}
+ if (iov_iter_is_bvec(iter))
+ kunmap(iter->bvec->bv_page);
+
if (nr < 0) {
if (!ret)
ret = nr;
@@ -1633,7 +1675,7 @@ static int io_read(struct io_kiocb *req, struct io_kiocb **nxt,
if (unlikely(!(file->f_mode & FMODE_READ)))
return -EBADF;
- ret = io_import_iovec(req->ctx, READ, &req->submit, &iovec, &iter);
+ ret = io_import_iovec(READ, req, &iovec, &iter);
if (ret < 0)
return ret;
@@ -1665,7 +1707,7 @@ static int io_read(struct io_kiocb *req, struct io_kiocb **nxt,
ret2 = -EAGAIN;
/* Catch -EAGAIN return for forced non-blocking submission */
if (!force_nonblock || ret2 != -EAGAIN)
- kiocb_done(kiocb, ret2, nxt, req->submit.in_async);
+ kiocb_done(kiocb, ret2, nxt, req->in_async);
else
ret = -EAGAIN;
}
@@ -1691,7 +1733,7 @@ static int io_write(struct io_kiocb *req, struct io_kiocb **nxt,
if (unlikely(!(file->f_mode & FMODE_WRITE)))
return -EBADF;
- ret = io_import_iovec(req->ctx, WRITE, &req->submit, &iovec, &iter);
+ ret = io_import_iovec(WRITE, req, &iovec, &iter);
if (ret < 0)
return ret;
@@ -1728,7 +1770,7 @@ static int io_write(struct io_kiocb *req, struct io_kiocb **nxt,
else
ret2 = loop_rw_iter(WRITE, file, kiocb, &iter);
if (!force_nonblock || ret2 != -EAGAIN)
- kiocb_done(kiocb, ret2, nxt, req->submit.in_async);
+ kiocb_done(kiocb, ret2, nxt, req->in_async);
else
ret = -EAGAIN;
}
@@ -1918,7 +1960,7 @@ static int io_accept(struct io_kiocb *req, const struct io_uring_sqe *sqe,
if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
return -EINVAL;
- if (sqe->ioprio || sqe->off || sqe->len || sqe->buf_index)
+ if (sqe->ioprio || sqe->len || sqe->buf_index)
return -EINVAL;
addr = (struct sockaddr __user *) (unsigned long) READ_ONCE(sqe->addr);
@@ -1943,6 +1985,38 @@ static int io_accept(struct io_kiocb *req, const struct io_uring_sqe *sqe,
#endif
}
+static int io_connect(struct io_kiocb *req, const struct io_uring_sqe *sqe,
+ struct io_kiocb **nxt, bool force_nonblock)
+{
+#if defined(CONFIG_NET)
+ struct sockaddr __user *addr;
+ unsigned file_flags;
+ int addr_len, ret;
+
+ if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
+ return -EINVAL;
+ if (sqe->ioprio || sqe->len || sqe->buf_index || sqe->rw_flags)
+ return -EINVAL;
+
+ addr = (struct sockaddr __user *) (unsigned long) READ_ONCE(sqe->addr);
+ addr_len = READ_ONCE(sqe->addr2);
+ file_flags = force_nonblock ? O_NONBLOCK : 0;
+
+ ret = __sys_connect_file(req->file, addr, addr_len, file_flags);
+ if (ret == -EAGAIN && force_nonblock)
+ return -EAGAIN;
+ if (ret == -ERESTARTSYS)
+ ret = -EINTR;
+ if (ret < 0 && (req->flags & REQ_F_LINK))
+ req->flags |= REQ_F_FAIL_LINK;
+ io_cqring_add_event(req, ret);
+ io_put_req_find_next(req, nxt);
+ return 0;
+#else
+ return -EOPNOTSUPP;
+#endif
+}
+
static inline void io_poll_remove_req(struct io_kiocb *req)
{
if (!RB_EMPTY_NODE(&req->rb_node)) {
@@ -1957,8 +2031,8 @@ static void io_poll_remove_one(struct io_kiocb *req)
spin_lock(&poll->head->lock);
WRITE_ONCE(poll->canceled, true);
- if (!list_empty(&poll->wait.entry)) {
- list_del_init(&poll->wait.entry);
+ if (!list_empty(&poll->wait->entry)) {
+ list_del_init(&poll->wait->entry);
io_queue_async_work(req);
}
spin_unlock(&poll->head->lock);
@@ -2026,12 +2100,16 @@ static int io_poll_remove(struct io_kiocb *req, const struct io_uring_sqe *sqe)
return 0;
}
-static void io_poll_complete(struct io_kiocb *req, __poll_t mask)
+static void io_poll_complete(struct io_kiocb *req, __poll_t mask, int error)
{
struct io_ring_ctx *ctx = req->ctx;
req->poll.done = true;
- io_cqring_fill_event(req, mangle_poll(mask));
+ kfree(req->poll.wait);
+ if (error)
+ io_cqring_fill_event(req, error);
+ else
+ io_cqring_fill_event(req, mangle_poll(mask));
io_commit_cqring(ctx);
}
@@ -2044,11 +2122,16 @@ static void io_poll_complete_work(struct io_wq_work **workptr)
struct io_ring_ctx *ctx = req->ctx;
struct io_kiocb *nxt = NULL;
__poll_t mask = 0;
+ int ret = 0;
- if (work->flags & IO_WQ_WORK_CANCEL)
+ if (work->flags & IO_WQ_WORK_CANCEL) {
WRITE_ONCE(poll->canceled, true);
+ ret = -ECANCELED;
+ } else if (READ_ONCE(poll->canceled)) {
+ ret = -ECANCELED;
+ }
- if (!READ_ONCE(poll->canceled))
+ if (ret != -ECANCELED)
mask = vfs_poll(poll->file, &pt) & poll->events;
/*
@@ -2059,17 +2142,19 @@ static void io_poll_complete_work(struct io_wq_work **workptr)
* avoid further branches in the fast path.
*/
spin_lock_irq(&ctx->completion_lock);
- if (!mask && !READ_ONCE(poll->canceled)) {
- add_wait_queue(poll->head, &poll->wait);
+ if (!mask && ret != -ECANCELED) {
+ add_wait_queue(poll->head, poll->wait);
spin_unlock_irq(&ctx->completion_lock);
return;
}
io_poll_remove_req(req);
- io_poll_complete(req, mask);
+ io_poll_complete(req, mask, ret);
spin_unlock_irq(&ctx->completion_lock);
io_cqring_ev_posted(ctx);
+ if (ret < 0 && req->flags & REQ_F_LINK)
+ req->flags |= REQ_F_FAIL_LINK;
io_put_req_find_next(req, &nxt);
if (nxt)
*workptr = &nxt->work;
@@ -2078,8 +2163,7 @@ static void io_poll_complete_work(struct io_wq_work **workptr)
static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
void *key)
{
- struct io_poll_iocb *poll = container_of(wait, struct io_poll_iocb,
- wait);
+ struct io_poll_iocb *poll = wait->private;
struct io_kiocb *req = container_of(poll, struct io_kiocb, poll);
struct io_ring_ctx *ctx = req->ctx;
__poll_t mask = key_to_poll(key);
@@ -2089,7 +2173,7 @@ static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
if (mask && !(mask & poll->events))
return 0;
- list_del_init(&poll->wait.entry);
+ list_del_init(&poll->wait->entry);
/*
* Run completion inline if we can. We're using trylock here because
@@ -2099,7 +2183,7 @@ static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
*/
if (mask && spin_trylock_irqsave(&ctx->completion_lock, flags)) {
io_poll_remove_req(req);
- io_poll_complete(req, mask);
+ io_poll_complete(req, mask, 0);
req->flags |= REQ_F_COMP_LOCKED;
io_put_req(req);
spin_unlock_irqrestore(&ctx->completion_lock, flags);
@@ -2130,7 +2214,7 @@ static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head,
pt->error = 0;
pt->req->poll.head = head;
- add_wait_queue(head, &pt->req->poll.wait);
+ add_wait_queue(head, pt->req->poll.wait);
}
static void io_poll_req_insert(struct io_kiocb *req)
@@ -2169,7 +2253,11 @@ static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe,
if (!poll->file)
return -EBADF;
- req->submit.sqe = NULL;
+ poll->wait = kmalloc(sizeof(*poll->wait), GFP_KERNEL);
+ if (!poll->wait)
+ return -ENOMEM;
+
+ req->sqe = NULL;
INIT_IO_WORK(&req->work, io_poll_complete_work);
events = READ_ONCE(sqe->poll_events);
poll->events = demangle_poll(events) | EPOLLERR | EPOLLHUP;
@@ -2185,8 +2273,9 @@ static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe,
ipt.error = -EINVAL; /* same as no support for IOCB_CMD_POLL */
/* initialized the list so that we can do list_empty checks */
- INIT_LIST_HEAD(&poll->wait.entry);
- init_waitqueue_func_entry(&poll->wait, io_poll_wake);
+ INIT_LIST_HEAD(&poll->wait->entry);
+ init_waitqueue_func_entry(poll->wait, io_poll_wake);
+ poll->wait->private = poll;
INIT_LIST_HEAD(&req->list);
@@ -2195,14 +2284,14 @@ static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe,
spin_lock_irq(&ctx->completion_lock);
if (likely(poll->head)) {
spin_lock(&poll->head->lock);
- if (unlikely(list_empty(&poll->wait.entry))) {
+ if (unlikely(list_empty(&poll->wait->entry))) {
if (ipt.error)
cancel = true;
ipt.error = 0;
mask = 0;
}
if (mask || ipt.error)
- list_del_init(&poll->wait.entry);
+ list_del_init(&poll->wait->entry);
else if (cancel)
WRITE_ONCE(poll->canceled, true);
else if (!poll->done) /* actually waiting for an event */
@@ -2211,7 +2300,7 @@ static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe,
}
if (mask) { /* no async, we'd stolen it */
ipt.error = 0;
- io_poll_complete(req, mask);
+ io_poll_complete(req, mask, 0);
}
spin_unlock_irq(&ctx->completion_lock);
@@ -2224,12 +2313,12 @@ static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe,
static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
{
- struct io_ring_ctx *ctx;
- struct io_kiocb *req;
+ struct io_timeout_data *data = container_of(timer,
+ struct io_timeout_data, timer);
+ struct io_kiocb *req = data->req;
+ struct io_ring_ctx *ctx = req->ctx;
unsigned long flags;
- req = container_of(timer, struct io_kiocb, timeout.timer);
- ctx = req->ctx;
atomic_inc(&ctx->cq_timeouts);
spin_lock_irqsave(&ctx->completion_lock, flags);
@@ -2279,10 +2368,12 @@ static int io_timeout_cancel(struct io_ring_ctx *ctx, __u64 user_data)
if (ret == -ENOENT)
return ret;
- ret = hrtimer_try_to_cancel(&req->timeout.timer);
+ ret = hrtimer_try_to_cancel(&req->timeout.data->timer);
if (ret == -1)
return -EALREADY;
+ if (req->flags & REQ_F_LINK)
+ req->flags |= REQ_F_FAIL_LINK;
io_cqring_fill_event(req, -ECANCELED);
io_put_req(req);
return 0;
@@ -2319,34 +2410,54 @@ static int io_timeout_remove(struct io_kiocb *req,
return 0;
}
-static int io_timeout(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+static int io_timeout_setup(struct io_kiocb *req)
{
- unsigned count;
- struct io_ring_ctx *ctx = req->ctx;
- struct list_head *entry;
- enum hrtimer_mode mode;
- struct timespec64 ts;
- unsigned span = 0;
+ const struct io_uring_sqe *sqe = req->sqe;
+ struct io_timeout_data *data;
unsigned flags;
- if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
+ if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
return -EINVAL;
- if (sqe->flags || sqe->ioprio || sqe->buf_index || sqe->len != 1)
+ if (sqe->ioprio || sqe->buf_index || sqe->len != 1)
return -EINVAL;
flags = READ_ONCE(sqe->timeout_flags);
if (flags & ~IORING_TIMEOUT_ABS)
return -EINVAL;
- if (get_timespec64(&ts, u64_to_user_ptr(sqe->addr)))
+ data = kzalloc(sizeof(struct io_timeout_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+ data->req = req;
+ req->timeout.data = data;
+ req->flags |= REQ_F_TIMEOUT;
+
+ if (get_timespec64(&data->ts, u64_to_user_ptr(sqe->addr)))
return -EFAULT;
if (flags & IORING_TIMEOUT_ABS)
- mode = HRTIMER_MODE_ABS;
+ data->mode = HRTIMER_MODE_ABS;
else
- mode = HRTIMER_MODE_REL;
+ data->mode = HRTIMER_MODE_REL;
- hrtimer_init(&req->timeout.timer, CLOCK_MONOTONIC, mode);
- req->flags |= REQ_F_TIMEOUT;
+ hrtimer_init(&data->timer, CLOCK_MONOTONIC, data->mode);
+ return 0;
+}
+
+static int io_timeout(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+{
+ unsigned count;
+ struct io_ring_ctx *ctx = req->ctx;
+ struct io_timeout_data *data;
+ struct list_head *entry;
+ unsigned span = 0;
+ int ret;
+
+ ret = io_timeout_setup(req);
+ /* common setup allows flags (like links) set, we don't */
+ if (!ret && sqe->flags)
+ ret = -EINVAL;
+ if (ret)
+ return ret;
/*
* sqe->off holds how many events that need to occur for this
@@ -2362,8 +2473,7 @@ static int io_timeout(struct io_kiocb *req, const struct io_uring_sqe *sqe)
}
req->sequence = ctx->cached_sq_head + count - 1;
- /* reuse it to store the count */
- req->submit.sequence = count;
+ req->timeout.data->seq_offset = count;
/*
* Insertion sort, ensuring the first entry in the list is always
@@ -2374,6 +2484,7 @@ static int io_timeout(struct io_kiocb *req, const struct io_uring_sqe *sqe)
struct io_kiocb *nxt = list_entry(entry, struct io_kiocb, list);
unsigned nxt_sq_head;
long long tmp, tmp_nxt;
+ u32 nxt_offset = nxt->timeout.data->seq_offset;
if (nxt->flags & REQ_F_TIMEOUT_NOSEQ)
continue;
@@ -2383,8 +2494,8 @@ static int io_timeout(struct io_kiocb *req, const struct io_uring_sqe *sqe)
* long to store it.
*/
tmp = (long long)ctx->cached_sq_head + count - 1;
- nxt_sq_head = nxt->sequence - nxt->submit.sequence + 1;
- tmp_nxt = (long long)nxt_sq_head + nxt->submit.sequence - 1;
+ nxt_sq_head = nxt->sequence - nxt_offset + 1;
+ tmp_nxt = (long long)nxt_sq_head + nxt_offset - 1;
/*
* cached_sq_head may overflow, and it will never overflow twice
@@ -2406,8 +2517,9 @@ static int io_timeout(struct io_kiocb *req, const struct io_uring_sqe *sqe)
req->sequence -= span;
add:
list_add(&req->list, entry);
- req->timeout.timer.function = io_timeout_fn;
- hrtimer_start(&req->timeout.timer, timespec64_to_ktime(ts), mode);
+ data = req->timeout.data;
+ data->timer.function = io_timeout_fn;
+ hrtimer_start(&data->timer, timespec64_to_ktime(data->ts), data->mode);
spin_unlock_irq(&ctx->completion_lock);
return 0;
}
@@ -2442,7 +2554,7 @@ static int io_async_cancel_one(struct io_ring_ctx *ctx, void *sqe_addr)
static void io_async_find_and_cancel(struct io_ring_ctx *ctx,
struct io_kiocb *req, __u64 sqe_addr,
- struct io_kiocb **nxt)
+ struct io_kiocb **nxt, int success_ret)
{
unsigned long flags;
int ret;
@@ -2459,6 +2571,8 @@ static void io_async_find_and_cancel(struct io_ring_ctx *ctx,
goto done;
ret = io_poll_cancel(ctx, sqe_addr);
done:
+ if (!ret)
+ ret = success_ret;
io_cqring_fill_event(req, ret);
io_commit_cqring(ctx);
spin_unlock_irqrestore(&ctx->completion_lock, flags);
@@ -2480,13 +2594,12 @@ static int io_async_cancel(struct io_kiocb *req, const struct io_uring_sqe *sqe,
sqe->cancel_flags)
return -EINVAL;
- io_async_find_and_cancel(ctx, req, READ_ONCE(sqe->addr), NULL);
+ io_async_find_and_cancel(ctx, req, READ_ONCE(sqe->addr), nxt, 0);
return 0;
}
static int io_req_defer(struct io_kiocb *req)
{
- const struct io_uring_sqe *sqe = req->submit.sqe;
struct io_uring_sqe *sqe_copy;
struct io_ring_ctx *ctx = req->ctx;
@@ -2505,34 +2618,35 @@ static int io_req_defer(struct io_kiocb *req)
return 0;
}
- memcpy(sqe_copy, sqe, sizeof(*sqe_copy));
- req->submit.sqe = sqe_copy;
+ memcpy(sqe_copy, req->sqe, sizeof(*sqe_copy));
+ req->flags |= REQ_F_FREE_SQE;
+ req->sqe = sqe_copy;
- trace_io_uring_defer(ctx, req, false);
+ trace_io_uring_defer(ctx, req, req->user_data);
list_add_tail(&req->list, &ctx->defer_list);
spin_unlock_irq(&ctx->completion_lock);
return -EIOCBQUEUED;
}
-static int __io_submit_sqe(struct io_kiocb *req, struct io_kiocb **nxt,
- bool force_nonblock)
+__attribute__((nonnull))
+static int io_issue_sqe(struct io_kiocb *req, struct io_kiocb **nxt,
+ bool force_nonblock)
{
int ret, opcode;
- struct sqe_submit *s = &req->submit;
struct io_ring_ctx *ctx = req->ctx;
- opcode = READ_ONCE(s->sqe->opcode);
+ opcode = READ_ONCE(req->sqe->opcode);
switch (opcode) {
case IORING_OP_NOP:
ret = io_nop(req);
break;
case IORING_OP_READV:
- if (unlikely(s->sqe->buf_index))
+ if (unlikely(req->sqe->buf_index))
return -EINVAL;
ret = io_read(req, nxt, force_nonblock);
break;
case IORING_OP_WRITEV:
- if (unlikely(s->sqe->buf_index))
+ if (unlikely(req->sqe->buf_index))
return -EINVAL;
ret = io_write(req, nxt, force_nonblock);
break;
@@ -2543,34 +2657,37 @@ static int __io_submit_sqe(struct io_kiocb *req, struct io_kiocb **nxt,
ret = io_write(req, nxt, force_nonblock);
break;
case IORING_OP_FSYNC:
- ret = io_fsync(req, s->sqe, nxt, force_nonblock);
+ ret = io_fsync(req, req->sqe, nxt, force_nonblock);
break;
case IORING_OP_POLL_ADD:
- ret = io_poll_add(req, s->sqe, nxt);
+ ret = io_poll_add(req, req->sqe, nxt);
break;
case IORING_OP_POLL_REMOVE:
- ret = io_poll_remove(req, s->sqe);
+ ret = io_poll_remove(req, req->sqe);
break;
case IORING_OP_SYNC_FILE_RANGE:
- ret = io_sync_file_range(req, s->sqe, nxt, force_nonblock);
+ ret = io_sync_file_range(req, req->sqe, nxt, force_nonblock);
break;
case IORING_OP_SENDMSG:
- ret = io_sendmsg(req, s->sqe, nxt, force_nonblock);
+ ret = io_sendmsg(req, req->sqe, nxt, force_nonblock);
break;
case IORING_OP_RECVMSG:
- ret = io_recvmsg(req, s->sqe, nxt, force_nonblock);
+ ret = io_recvmsg(req, req->sqe, nxt, force_nonblock);
break;
case IORING_OP_TIMEOUT:
- ret = io_timeout(req, s->sqe);
+ ret = io_timeout(req, req->sqe);
break;
case IORING_OP_TIMEOUT_REMOVE:
- ret = io_timeout_remove(req, s->sqe);
+ ret = io_timeout_remove(req, req->sqe);
break;
case IORING_OP_ACCEPT:
- ret = io_accept(req, s->sqe, nxt, force_nonblock);
+ ret = io_accept(req, req->sqe, nxt, force_nonblock);
+ break;
+ case IORING_OP_CONNECT:
+ ret = io_connect(req, req->sqe, nxt, force_nonblock);
break;
case IORING_OP_ASYNC_CANCEL:
- ret = io_async_cancel(req, s->sqe, nxt);
+ ret = io_async_cancel(req, req->sqe, nxt);
break;
default:
ret = -EINVAL;
@@ -2585,22 +2702,29 @@ static int __io_submit_sqe(struct io_kiocb *req, struct io_kiocb **nxt,
return -EAGAIN;
/* workqueue context doesn't hold uring_lock, grab it now */
- if (s->in_async)
+ if (req->in_async)
mutex_lock(&ctx->uring_lock);
io_iopoll_req_issued(req);
- if (s->in_async)
+ if (req->in_async)
mutex_unlock(&ctx->uring_lock);
}
return 0;
}
+static void io_link_work_cb(struct io_wq_work **workptr)
+{
+ struct io_wq_work *work = *workptr;
+ struct io_kiocb *link = work->data;
+
+ io_queue_linked_timeout(link);
+ work->func = io_wq_submit_work;
+}
+
static void io_wq_submit_work(struct io_wq_work **workptr)
{
struct io_wq_work *work = *workptr;
struct io_kiocb *req = container_of(work, struct io_kiocb, work);
- struct sqe_submit *s = &req->submit;
- const struct io_uring_sqe *sqe = s->sqe;
struct io_kiocb *nxt = NULL;
int ret = 0;
@@ -2611,10 +2735,10 @@ static void io_wq_submit_work(struct io_wq_work **workptr)
ret = -ECANCELED;
if (!ret) {
- s->has_user = (work->flags & IO_WQ_WORK_HAS_MM) != 0;
- s->in_async = true;
+ req->has_user = (work->flags & IO_WQ_WORK_HAS_MM) != 0;
+ req->in_async = true;
do {
- ret = __io_submit_sqe(req, &nxt, false);
+ ret = io_issue_sqe(req, &nxt, false);
/*
* We can get EAGAIN for polled IO even though we're
* forcing a sync submission from here, since we can't
@@ -2636,13 +2760,17 @@ static void io_wq_submit_work(struct io_wq_work **workptr)
io_put_req(req);
}
- /* async context always use a copy of the sqe */
- kfree(sqe);
-
/* if a dependent link is ready, pass it back */
if (!ret && nxt) {
- io_prep_async_work(nxt);
+ struct io_kiocb *link;
+
+ io_prep_async_work(nxt, &link);
*workptr = &nxt->work;
+ if (link) {
+ nxt->work.flags |= IO_WQ_WORK_CB;
+ nxt->work.func = io_link_work_cb;
+ nxt->work.data = link;
+ }
}
}
@@ -2674,24 +2802,17 @@ static inline struct file *io_file_from_index(struct io_ring_ctx *ctx,
static int io_req_set_file(struct io_submit_state *state, struct io_kiocb *req)
{
- struct sqe_submit *s = &req->submit;
struct io_ring_ctx *ctx = req->ctx;
unsigned flags;
int fd;
- flags = READ_ONCE(s->sqe->flags);
- fd = READ_ONCE(s->sqe->fd);
+ flags = READ_ONCE(req->sqe->flags);
+ fd = READ_ONCE(req->sqe->fd);
if (flags & IOSQE_IO_DRAIN)
req->flags |= REQ_F_IO_DRAIN;
- /*
- * All io need record the previous position, if LINK vs DARIN,
- * it can be used to mark the position of the first IO in the
- * link list.
- */
- req->sequence = s->sequence;
- if (!io_op_needs_file(s->sqe))
+ if (!io_op_needs_file(req->sqe))
return 0;
if (flags & IOSQE_FIXED_FILE) {
@@ -2704,7 +2825,7 @@ static int io_req_set_file(struct io_submit_state *state, struct io_kiocb *req)
return -EBADF;
req->flags |= REQ_F_FIXED_FILE;
} else {
- if (s->needs_fixed_file)
+ if (req->needs_fixed_file)
return -EBADF;
trace_io_uring_file_get(ctx, fd);
req->file = io_file_get(state, fd);
@@ -2728,7 +2849,7 @@ static int io_grab_files(struct io_kiocb *req)
* the fd has changed since we started down this path, and disallow
* this operation if it has.
*/
- if (fcheck(req->submit.ring_fd) == req->submit.ring_file) {
+ if (fcheck(req->ring_fd) == req->ring_file) {
list_add(&req->inflight_entry, &ctx->inflight_list);
req->flags |= REQ_F_INFLIGHT;
req->work.files = current->files;
@@ -2742,8 +2863,9 @@ static int io_grab_files(struct io_kiocb *req)
static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
{
- struct io_kiocb *req = container_of(timer, struct io_kiocb,
- timeout.timer);
+ struct io_timeout_data *data = container_of(timer,
+ struct io_timeout_data, timer);
+ struct io_kiocb *req = data->req;
struct io_ring_ctx *ctx = req->ctx;
struct io_kiocb *prev = NULL;
unsigned long flags;
@@ -2756,16 +2878,20 @@ static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
*/
if (!list_empty(&req->list)) {
prev = list_entry(req->list.prev, struct io_kiocb, link_list);
- if (refcount_inc_not_zero(&prev->refs))
+ if (refcount_inc_not_zero(&prev->refs)) {
list_del_init(&req->list);
- else
+ prev->flags &= ~REQ_F_LINK_TIMEOUT;
+ } else
prev = NULL;
}
spin_unlock_irqrestore(&ctx->completion_lock, flags);
if (prev) {
- io_async_find_and_cancel(ctx, req, prev->user_data, NULL);
+ if (prev->flags & REQ_F_LINK)
+ prev->flags |= REQ_F_FAIL_LINK;
+ io_async_find_and_cancel(ctx, req, prev->user_data, NULL,
+ -ETIME);
io_put_req(prev);
} else {
io_cqring_add_event(req, -ETIME);
@@ -2774,8 +2900,7 @@ static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
return HRTIMER_NORESTART;
}
-static void io_queue_linked_timeout(struct io_kiocb *req, struct timespec64 *ts,
- enum hrtimer_mode *mode)
+static void io_queue_linked_timeout(struct io_kiocb *req)
{
struct io_ring_ctx *ctx = req->ctx;
@@ -2785,9 +2910,11 @@ static void io_queue_linked_timeout(struct io_kiocb *req, struct timespec64 *ts,
*/
spin_lock_irq(&ctx->completion_lock);
if (!list_empty(&req->list)) {
- req->timeout.timer.function = io_link_timeout_fn;
- hrtimer_start(&req->timeout.timer, timespec64_to_ktime(*ts),
- *mode);
+ struct io_timeout_data *data = req->timeout.data;
+
+ data->timer.function = io_link_timeout_fn;
+ hrtimer_start(&data->timer, timespec64_to_ktime(data->ts),
+ data->mode);
}
spin_unlock_irq(&ctx->completion_lock);
@@ -2795,66 +2922,30 @@ static void io_queue_linked_timeout(struct io_kiocb *req, struct timespec64 *ts,
io_put_req(req);
}
-static int io_validate_link_timeout(const struct io_uring_sqe *sqe,
- struct timespec64 *ts)
-{
- if (sqe->ioprio || sqe->buf_index || sqe->len != 1 || sqe->off)
- return -EINVAL;
- if (sqe->timeout_flags & ~IORING_TIMEOUT_ABS)
- return -EINVAL;
- if (get_timespec64(ts, u64_to_user_ptr(sqe->addr)))
- return -EFAULT;
-
- return 0;
-}
-
-static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req,
- struct timespec64 *ts,
- enum hrtimer_mode *mode)
+static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req)
{
struct io_kiocb *nxt;
- int ret;
if (!(req->flags & REQ_F_LINK))
return NULL;
nxt = list_first_entry_or_null(&req->link_list, struct io_kiocb, list);
- if (!nxt || nxt->submit.sqe->opcode != IORING_OP_LINK_TIMEOUT)
+ if (!nxt || nxt->sqe->opcode != IORING_OP_LINK_TIMEOUT)
return NULL;
- ret = io_validate_link_timeout(nxt->submit.sqe, ts);
- if (ret) {
- list_del_init(&nxt->list);
- io_cqring_add_event(nxt, ret);
- io_double_put_req(nxt);
- return ERR_PTR(-ECANCELED);
- }
-
- if (nxt->submit.sqe->timeout_flags & IORING_TIMEOUT_ABS)
- *mode = HRTIMER_MODE_ABS;
- else
- *mode = HRTIMER_MODE_REL;
-
req->flags |= REQ_F_LINK_TIMEOUT;
- hrtimer_init(&nxt->timeout.timer, CLOCK_MONOTONIC, *mode);
return nxt;
}
-static int __io_queue_sqe(struct io_kiocb *req)
+static void __io_queue_sqe(struct io_kiocb *req)
{
- enum hrtimer_mode mode;
- struct io_kiocb *nxt;
- struct timespec64 ts;
+ struct io_kiocb *linked_timeout = io_prep_linked_timeout(req);
+ struct io_kiocb *nxt = NULL;
int ret;
- nxt = io_prep_linked_timeout(req, &ts, &mode);
- if (IS_ERR(nxt)) {
- ret = PTR_ERR(nxt);
- nxt = NULL;
- goto err;
- }
-
- ret = __io_submit_sqe(req, NULL, true);
+ ret = io_issue_sqe(req, &nxt, true);
+ if (nxt)
+ io_queue_async_work(nxt);
/*
* We async punt it if the file wasn't marked NOWAIT, or if the file
@@ -2862,42 +2953,38 @@ static int __io_queue_sqe(struct io_kiocb *req)
*/
if (ret == -EAGAIN && (!(req->flags & REQ_F_NOWAIT) ||
(req->flags & REQ_F_MUST_PUNT))) {
- struct sqe_submit *s = &req->submit;
struct io_uring_sqe *sqe_copy;
- sqe_copy = kmemdup(s->sqe, sizeof(*sqe_copy), GFP_KERNEL);
- if (sqe_copy) {
- s->sqe = sqe_copy;
- if (req->work.flags & IO_WQ_WORK_NEEDS_FILES) {
- ret = io_grab_files(req);
- if (ret) {
- kfree(sqe_copy);
- goto err;
- }
- }
-
- /*
- * Queued up for async execution, worker will release
- * submit reference when the iocb is actually submitted.
- */
- io_queue_async_work(req);
+ sqe_copy = kmemdup(req->sqe, sizeof(*sqe_copy), GFP_KERNEL);
+ if (!sqe_copy)
+ goto err;
- if (nxt)
- io_queue_linked_timeout(nxt, &ts, &mode);
+ req->sqe = sqe_copy;
+ req->flags |= REQ_F_FREE_SQE;
- return 0;
+ if (req->work.flags & IO_WQ_WORK_NEEDS_FILES) {
+ ret = io_grab_files(req);
+ if (ret)
+ goto err;
}
+
+ /*
+ * Queued up for async execution, worker will release
+ * submit reference when the iocb is actually submitted.
+ */
+ io_queue_async_work(req);
+ return;
}
err:
/* drop submission reference */
io_put_req(req);
- if (nxt) {
+ if (linked_timeout) {
if (!ret)
- io_queue_linked_timeout(nxt, &ts, &mode);
+ io_queue_linked_timeout(linked_timeout);
else
- io_put_req(nxt);
+ io_put_req(linked_timeout);
}
/* and drop final reference, if we failed */
@@ -2907,83 +2994,52 @@ err:
req->flags |= REQ_F_FAIL_LINK;
io_put_req(req);
}
-
- return ret;
}
-static int io_queue_sqe(struct io_kiocb *req)
+static void io_queue_sqe(struct io_kiocb *req)
{
int ret;
- ret = io_req_defer(req);
- if (ret) {
- if (ret != -EIOCBQUEUED) {
- io_cqring_add_event(req, ret);
- io_double_put_req(req);
- }
- return 0;
+ if (unlikely(req->ctx->drain_next)) {
+ req->flags |= REQ_F_IO_DRAIN;
+ req->ctx->drain_next = false;
}
+ req->ctx->drain_next = (req->flags & REQ_F_DRAIN_LINK);
- return __io_queue_sqe(req);
-}
-
-static int io_queue_link_head(struct io_kiocb *req, struct io_kiocb *shadow)
-{
- int ret;
- int need_submit = false;
- struct io_ring_ctx *ctx = req->ctx;
-
- if (!shadow)
- return io_queue_sqe(req);
-
- /*
- * Mark the first IO in link list as DRAIN, let all the following
- * IOs enter the defer list. all IO needs to be completed before link
- * list.
- */
- req->flags |= REQ_F_IO_DRAIN;
ret = io_req_defer(req);
if (ret) {
if (ret != -EIOCBQUEUED) {
io_cqring_add_event(req, ret);
+ if (req->flags & REQ_F_LINK)
+ req->flags |= REQ_F_FAIL_LINK;
io_double_put_req(req);
- __io_free_req(shadow);
- return 0;
}
- } else {
- /*
- * If ret == 0 means that all IOs in front of link io are
- * running done. let's queue link head.
- */
- need_submit = true;
- }
-
- /* Insert shadow req to defer_list, blocking next IOs */
- spin_lock_irq(&ctx->completion_lock);
- trace_io_uring_defer(ctx, shadow, true);
- list_add_tail(&shadow->list, &ctx->defer_list);
- spin_unlock_irq(&ctx->completion_lock);
-
- if (need_submit)
- return __io_queue_sqe(req);
+ } else
+ __io_queue_sqe(req);
+}
- return 0;
+static inline void io_queue_link_head(struct io_kiocb *req)
+{
+ if (unlikely(req->flags & REQ_F_FAIL_LINK)) {
+ io_cqring_add_event(req, -ECANCELED);
+ io_double_put_req(req);
+ } else
+ io_queue_sqe(req);
}
+
#define SQE_VALID_FLAGS (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK)
static void io_submit_sqe(struct io_kiocb *req, struct io_submit_state *state,
struct io_kiocb **link)
{
- struct io_uring_sqe *sqe_copy;
- struct sqe_submit *s = &req->submit;
struct io_ring_ctx *ctx = req->ctx;
int ret;
- req->user_data = s->sqe->user_data;
+ req->user_data = req->sqe->user_data;
/* enforce forwards compatibility on users */
- if (unlikely(s->sqe->flags & ~SQE_VALID_FLAGS)) {
+ if (unlikely(req->sqe->flags & ~SQE_VALID_FLAGS)) {
ret = -EINVAL;
goto err_req;
}
@@ -3005,25 +3061,37 @@ err_req:
*/
if (*link) {
struct io_kiocb *prev = *link;
+ struct io_uring_sqe *sqe_copy;
- sqe_copy = kmemdup(s->sqe, sizeof(*sqe_copy), GFP_KERNEL);
+ if (req->sqe->flags & IOSQE_IO_DRAIN)
+ (*link)->flags |= REQ_F_DRAIN_LINK | REQ_F_IO_DRAIN;
+
+ if (READ_ONCE(req->sqe->opcode) == IORING_OP_LINK_TIMEOUT) {
+ ret = io_timeout_setup(req);
+ /* common setup allows offset being set, we don't */
+ if (!ret && req->sqe->off)
+ ret = -EINVAL;
+ if (ret) {
+ prev->flags |= REQ_F_FAIL_LINK;
+ goto err_req;
+ }
+ }
+
+ sqe_copy = kmemdup(req->sqe, sizeof(*sqe_copy), GFP_KERNEL);
if (!sqe_copy) {
ret = -EAGAIN;
goto err_req;
}
- s->sqe = sqe_copy;
+ req->sqe = sqe_copy;
+ req->flags |= REQ_F_FREE_SQE;
trace_io_uring_link(ctx, req, prev);
list_add_tail(&req->list, &prev->link_list);
- } else if (s->sqe->flags & IOSQE_IO_LINK) {
+ } else if (req->sqe->flags & IOSQE_IO_LINK) {
req->flags |= REQ_F_LINK;
INIT_LIST_HEAD(&req->link_list);
*link = req;
- } else if (READ_ONCE(s->sqe->opcode) == IORING_OP_LINK_TIMEOUT) {
- /* Only valid as a linked SQE */
- ret = -EINVAL;
- goto err_req;
} else {
io_queue_sqe(req);
}
@@ -3075,7 +3143,7 @@ static void io_commit_sqring(struct io_ring_ctx *ctx)
* used, it's important that those reads are done through READ_ONCE() to
* prevent a re-load down the line.
*/
-static bool io_get_sqring(struct io_ring_ctx *ctx, struct sqe_submit *s)
+static bool io_get_sqring(struct io_ring_ctx *ctx, struct io_kiocb *req)
{
struct io_rings *rings = ctx->rings;
u32 *sq_array = ctx->sq_array;
@@ -3091,14 +3159,18 @@ static bool io_get_sqring(struct io_ring_ctx *ctx, struct sqe_submit *s)
*/
head = ctx->cached_sq_head;
/* make sure SQ entry isn't read before tail */
- if (head == smp_load_acquire(&rings->sq.tail))
+ if (unlikely(head == smp_load_acquire(&rings->sq.tail)))
return false;
head = READ_ONCE(sq_array[head & ctx->sq_mask]);
- if (head < ctx->sq_entries) {
- s->ring_file = NULL;
- s->sqe = &ctx->sq_sqes[head];
- s->sequence = ctx->cached_sq_head;
+ if (likely(head < ctx->sq_entries)) {
+ /*
+ * All io need record the previous position, if LINK vs DARIN,
+ * it can be used to mark the position of the first IO in the
+ * link list.
+ */
+ req->sequence = ctx->cached_sq_head;
+ req->sqe = &ctx->sq_sqes[head];
ctx->cached_sq_head++;
return true;
}
@@ -3116,14 +3188,13 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
{
struct io_submit_state state, *statep = NULL;
struct io_kiocb *link = NULL;
- struct io_kiocb *shadow_req = NULL;
int i, submitted = 0;
bool mm_fault = false;
- if (!list_empty(&ctx->cq_overflow_list)) {
- io_cqring_overflow_flush(ctx, false);
+ /* if we have a backlog and couldn't flush it all, return BUSY */
+ if (!list_empty(&ctx->cq_overflow_list) &&
+ !io_cqring_overflow_flush(ctx, false))
return -EBUSY;
- }
if (nr > IO_PLUG_THRESHOLD) {
io_submit_state_start(&state, ctx, nr);
@@ -3140,12 +3211,12 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
submitted = -EAGAIN;
break;
}
- if (!io_get_sqring(ctx, &req->submit)) {
+ if (!io_get_sqring(ctx, req)) {
__io_free_req(req);
break;
}
- if (io_sqe_needs_user(req->submit.sqe) && !*mm) {
+ if (io_sqe_needs_user(req->sqe) && !*mm) {
mm_fault = mm_fault || !mmget_not_zero(ctx->sqo_mm);
if (!mm_fault) {
use_mm(ctx->sqo_mm);
@@ -3153,26 +3224,14 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
}
}
- sqe_flags = req->submit.sqe->flags;
-
- if (link && (sqe_flags & IOSQE_IO_DRAIN)) {
- if (!shadow_req) {
- shadow_req = io_get_req(ctx, NULL);
- if (unlikely(!shadow_req))
- goto out;
- shadow_req->flags |= (REQ_F_IO_DRAIN | REQ_F_SHADOW_DRAIN);
- refcount_dec(&shadow_req->refs);
- }
- shadow_req->sequence = req->submit.sequence;
- }
+ sqe_flags = req->sqe->flags;
-out:
- req->submit.ring_file = ring_file;
- req->submit.ring_fd = ring_fd;
- req->submit.has_user = *mm != NULL;
- req->submit.in_async = async;
- req->submit.needs_fixed_file = async;
- trace_io_uring_submit_sqe(ctx, req->submit.sqe->user_data,
+ req->ring_file = ring_file;
+ req->ring_fd = ring_fd;
+ req->has_user = *mm != NULL;
+ req->in_async = async;
+ req->needs_fixed_file = async;
+ trace_io_uring_submit_sqe(ctx, req->sqe->user_data,
true, async);
io_submit_sqe(req, statep, &link);
submitted++;
@@ -3182,14 +3241,13 @@ out:
* that's the end of the chain. Submit the previous link.
*/
if (!(sqe_flags & IOSQE_IO_LINK) && link) {
- io_queue_link_head(link, shadow_req);
+ io_queue_link_head(link);
link = NULL;
- shadow_req = NULL;
}
}
if (link)
- io_queue_link_head(link, shadow_req);
+ io_queue_link_head(link);
if (statep)
io_submit_state_end(&state);
@@ -3203,6 +3261,7 @@ static int io_sq_thread(void *data)
{
struct io_ring_ctx *ctx = data;
struct mm_struct *cur_mm = NULL;
+ const struct cred *old_cred;
mm_segment_t old_fs;
DEFINE_WAIT(wait);
unsigned inflight;
@@ -3213,6 +3272,7 @@ static int io_sq_thread(void *data)
old_fs = get_fs();
set_fs(USER_DS);
+ old_cred = override_creds(ctx->creds);
ret = timeout = inflight = 0;
while (!kthread_should_park()) {
@@ -3319,6 +3379,7 @@ static int io_sq_thread(void *data)
unuse_mm(cur_mm);
mmput(cur_mm);
}
+ revert_creds(old_cred);
kthread_parkme();
@@ -3898,6 +3959,7 @@ static void io_get_work(struct io_wq_work *work)
static int io_sq_offload_start(struct io_ring_ctx *ctx,
struct io_uring_params *p)
{
+ struct io_wq_data data;
unsigned concurrency;
int ret;
@@ -3942,10 +4004,15 @@ static int io_sq_offload_start(struct io_ring_ctx *ctx,
goto err;
}
+ data.mm = ctx->sqo_mm;
+ data.user = ctx->user;
+ data.creds = ctx->creds;
+ data.get_work = io_get_work;
+ data.put_work = io_put_work;
+
/* Do QD, or 4 * CPUS, whatever is smallest */
concurrency = min(ctx->sq_entries, 4 * num_online_cpus());
- ctx->io_wq = io_wq_create(concurrency, ctx->sqo_mm, ctx->user,
- io_get_work, io_put_work);
+ ctx->io_wq = io_wq_create(concurrency, &data);
if (IS_ERR(ctx->io_wq)) {
ret = PTR_ERR(ctx->io_wq);
ctx->io_wq = NULL;
@@ -4294,6 +4361,7 @@ static void io_ring_ctx_free(struct io_ring_ctx *ctx)
io_unaccount_mem(ctx->user,
ring_pages(ctx->sq_entries, ctx->cq_entries));
free_uid(ctx->user);
+ put_cred(ctx->creds);
kfree(ctx->completions);
kmem_cache_free(req_cachep, ctx->fallback_req);
kfree(ctx);
@@ -4402,12 +4470,11 @@ static int io_uring_flush(struct file *file, void *data)
return 0;
}
-static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
+static void *io_uring_validate_mmap_request(struct file *file,
+ loff_t pgoff, size_t sz)
{
- loff_t offset = (loff_t) vma->vm_pgoff << PAGE_SHIFT;
- unsigned long sz = vma->vm_end - vma->vm_start;
struct io_ring_ctx *ctx = file->private_data;
- unsigned long pfn;
+ loff_t offset = pgoff << PAGE_SHIFT;
struct page *page;
void *ptr;
@@ -4420,17 +4487,59 @@ static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
ptr = ctx->sq_sqes;
break;
default:
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
}
page = virt_to_head_page(ptr);
if (sz > page_size(page))
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
+
+ return ptr;
+}
+
+#ifdef CONFIG_MMU
+
+static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ size_t sz = vma->vm_end - vma->vm_start;
+ unsigned long pfn;
+ void *ptr;
+
+ ptr = io_uring_validate_mmap_request(file, vma->vm_pgoff, sz);
+ if (IS_ERR(ptr))
+ return PTR_ERR(ptr);
pfn = virt_to_phys(ptr) >> PAGE_SHIFT;
return remap_pfn_range(vma, vma->vm_start, pfn, sz, vma->vm_page_prot);
}
+#else /* !CONFIG_MMU */
+
+static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ return vma->vm_flags & (VM_SHARED | VM_MAYSHARE) ? 0 : -EINVAL;
+}
+
+static unsigned int io_uring_nommu_mmap_capabilities(struct file *file)
+{
+ return NOMMU_MAP_DIRECT | NOMMU_MAP_READ | NOMMU_MAP_WRITE;
+}
+
+static unsigned long io_uring_nommu_get_unmapped_area(struct file *file,
+ unsigned long addr, unsigned long len,
+ unsigned long pgoff, unsigned long flags)
+{
+ void *ptr;
+
+ ptr = io_uring_validate_mmap_request(file, pgoff, len);
+ if (IS_ERR(ptr))
+ return PTR_ERR(ptr);
+
+ return (unsigned long) ptr;
+}
+
+#endif /* !CONFIG_MMU */
+
SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
u32, min_complete, u32, flags, const sigset_t __user *, sig,
size_t, sigsz)
@@ -4501,6 +4610,10 @@ static const struct file_operations io_uring_fops = {
.release = io_uring_release,
.flush = io_uring_flush,
.mmap = io_uring_mmap,
+#ifndef CONFIG_MMU
+ .get_unmapped_area = io_uring_nommu_get_unmapped_area,
+ .mmap_capabilities = io_uring_nommu_mmap_capabilities,
+#endif
.poll = io_uring_poll,
.fasync = io_uring_fasync,
};
@@ -4531,12 +4644,18 @@ static int io_allocate_scq_urings(struct io_ring_ctx *ctx,
ctx->cq_entries = rings->cq_ring_entries;
size = array_size(sizeof(struct io_uring_sqe), p->sq_entries);
- if (size == SIZE_MAX)
+ if (size == SIZE_MAX) {
+ io_mem_free(ctx->rings);
+ ctx->rings = NULL;
return -EOVERFLOW;
+ }
ctx->sq_sqes = io_mem_alloc(size);
- if (!ctx->sq_sqes)
+ if (!ctx->sq_sqes) {
+ io_mem_free(ctx->rings);
+ ctx->rings = NULL;
return -ENOMEM;
+ }
return 0;
}
@@ -4640,6 +4759,7 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p)
ctx->compat = in_compat_syscall();
ctx->account_mem = account_mem;
ctx->user = user;
+ ctx->creds = prepare_creds();
ret = io_allocate_scq_urings(ctx, p);
if (ret)
diff --git a/fs/ioctl.c b/fs/ioctl.c
index fef3a6bf7c78..2f5e4e5b97e1 100644
--- a/fs/ioctl.c
+++ b/fs/ioctl.c
@@ -8,6 +8,7 @@
#include <linux/syscalls.h>
#include <linux/mm.h>
#include <linux/capability.h>
+#include <linux/compat.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/security.h>
@@ -174,10 +175,9 @@ static int fiemap_check_ranges(struct super_block *sb,
return 0;
}
-static int ioctl_fiemap(struct file *filp, unsigned long arg)
+static int ioctl_fiemap(struct file *filp, struct fiemap __user *ufiemap)
{
struct fiemap fiemap;
- struct fiemap __user *ufiemap = (struct fiemap __user *) arg;
struct fiemap_extent_info fieinfo = { 0, };
struct inode *inode = file_inode(filp);
struct super_block *sb = inode->i_sb;
@@ -244,7 +244,8 @@ fdput:
return ret;
}
-static long ioctl_file_clone_range(struct file *file, void __user *argp)
+static long ioctl_file_clone_range(struct file *file,
+ struct file_clone_range __user *argp)
{
struct file_clone_range args;
@@ -466,7 +467,7 @@ EXPORT_SYMBOL(generic_block_fiemap);
* Only the l_start, l_len and l_whence fields of the 'struct space_resv'
* are used here, rest are ignored.
*/
-int ioctl_preallocate(struct file *filp, void __user *argp)
+int ioctl_preallocate(struct file *filp, int mode, void __user *argp)
{
struct inode *inode = file_inode(filp);
struct space_resv sr;
@@ -487,9 +488,39 @@ int ioctl_preallocate(struct file *filp, void __user *argp)
return -EINVAL;
}
- return vfs_fallocate(filp, FALLOC_FL_KEEP_SIZE, sr.l_start, sr.l_len);
+ return vfs_fallocate(filp, mode | FALLOC_FL_KEEP_SIZE, sr.l_start,
+ sr.l_len);
}
+/* on ia32 l_start is on a 32-bit boundary */
+#if defined CONFIG_COMPAT && defined(CONFIG_X86_64)
+/* just account for different alignment */
+int compat_ioctl_preallocate(struct file *file, int mode,
+ struct space_resv_32 __user *argp)
+{
+ struct inode *inode = file_inode(file);
+ struct space_resv_32 sr;
+
+ if (copy_from_user(&sr, argp, sizeof(sr)))
+ return -EFAULT;
+
+ switch (sr.l_whence) {
+ case SEEK_SET:
+ break;
+ case SEEK_CUR:
+ sr.l_start += file->f_pos;
+ break;
+ case SEEK_END:
+ sr.l_start += i_size_read(inode);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return vfs_fallocate(file, mode | FALLOC_FL_KEEP_SIZE, sr.l_start, sr.l_len);
+}
+#endif
+
static int file_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
{
@@ -503,7 +534,12 @@ static int file_ioctl(struct file *filp, unsigned int cmd,
return put_user(i_size_read(inode) - filp->f_pos, p);
case FS_IOC_RESVSP:
case FS_IOC_RESVSP64:
- return ioctl_preallocate(filp, p);
+ return ioctl_preallocate(filp, 0, p);
+ case FS_IOC_UNRESVSP:
+ case FS_IOC_UNRESVSP64:
+ return ioctl_preallocate(filp, FALLOC_FL_PUNCH_HOLE, p);
+ case FS_IOC_ZERO_RANGE:
+ return ioctl_preallocate(filp, FALLOC_FL_ZERO_RANGE, p);
}
return vfs_ioctl(filp, cmd, arg);
@@ -584,9 +620,9 @@ static int ioctl_fsthaw(struct file *filp)
return thaw_super(sb);
}
-static int ioctl_file_dedupe_range(struct file *file, void __user *arg)
+static int ioctl_file_dedupe_range(struct file *file,
+ struct file_dedupe_range __user *argp)
{
- struct file_dedupe_range __user *argp = arg;
struct file_dedupe_range *same = NULL;
int ret;
unsigned long size;
@@ -635,7 +671,7 @@ int do_vfs_ioctl(struct file *filp, unsigned int fd, unsigned int cmd,
unsigned long arg)
{
int error = 0;
- int __user *argp = (int __user *)arg;
+ void __user *argp = (void __user *)arg;
struct inode *inode = file_inode(filp);
switch (cmd) {
@@ -674,13 +710,13 @@ int do_vfs_ioctl(struct file *filp, unsigned int fd, unsigned int cmd,
break;
case FS_IOC_FIEMAP:
- return ioctl_fiemap(filp, arg);
+ return ioctl_fiemap(filp, argp);
case FIGETBSZ:
/* anon_bdev filesystems may not have a block size */
if (!inode->i_sb->s_blocksize)
return -EINVAL;
- return put_user(inode->i_sb->s_blocksize, argp);
+ return put_user(inode->i_sb->s_blocksize, (int __user *)argp);
case FICLONE:
return ioctl_file_clone(filp, arg, 0, 0, 0);
@@ -719,3 +755,37 @@ SYSCALL_DEFINE3(ioctl, unsigned int, fd, unsigned int, cmd, unsigned long, arg)
{
return ksys_ioctl(fd, cmd, arg);
}
+
+#ifdef CONFIG_COMPAT
+/**
+ * compat_ptr_ioctl - generic implementation of .compat_ioctl file operation
+ *
+ * This is not normally called as a function, but instead set in struct
+ * file_operations as
+ *
+ * .compat_ioctl = compat_ptr_ioctl,
+ *
+ * On most architectures, the compat_ptr_ioctl() just passes all arguments
+ * to the corresponding ->ioctl handler. The exception is arch/s390, where
+ * compat_ptr() clears the top bit of a 32-bit pointer value, so user space
+ * pointers to the second 2GB alias the first 2GB, as is the case for
+ * native 32-bit s390 user space.
+ *
+ * The compat_ptr_ioctl() function must therefore be used only with ioctl
+ * functions that either ignore the argument or pass a pointer to a
+ * compatible data type.
+ *
+ * If any ioctl command handled by fops->unlocked_ioctl passes a plain
+ * integer instead of a pointer, or any of the passed data types
+ * is incompatible between 32-bit and 64-bit architectures, a proper
+ * handler is required instead of compat_ptr_ioctl.
+ */
+long compat_ptr_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ if (!file->f_op->unlocked_ioctl)
+ return -ENOIOCTLCMD;
+
+ return file->f_op->unlocked_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
+}
+EXPORT_SYMBOL(compat_ptr_ioctl);
+#endif
diff --git a/fs/iomap/Makefile b/fs/iomap/Makefile
index 93cd11938bf5..eef2722d93a1 100644
--- a/fs/iomap/Makefile
+++ b/fs/iomap/Makefile
@@ -3,13 +3,15 @@
# Copyright (c) 2019 Oracle.
# All Rights Reserved.
#
-obj-$(CONFIG_FS_IOMAP) += iomap.o
-iomap-y += \
- apply.o \
- buffered-io.o \
- direct-io.o \
- fiemap.o \
- seek.o
+ccflags-y += -I $(srctree)/$(src) # needed for trace events
+
+obj-$(CONFIG_FS_IOMAP) += iomap.o
+iomap-y += trace.o \
+ apply.o \
+ buffered-io.o \
+ direct-io.o \
+ fiemap.o \
+ seek.o
iomap-$(CONFIG_SWAP) += swapfile.o
diff --git a/fs/iomap/apply.c b/fs/iomap/apply.c
index 54c02aecf3cd..76925b40b5fd 100644
--- a/fs/iomap/apply.c
+++ b/fs/iomap/apply.c
@@ -7,6 +7,7 @@
#include <linux/compiler.h>
#include <linux/fs.h>
#include <linux/iomap.h>
+#include "trace.h"
/*
* Execute a iomap write on a segment of the mapping that spans a
@@ -23,8 +24,12 @@ loff_t
iomap_apply(struct inode *inode, loff_t pos, loff_t length, unsigned flags,
const struct iomap_ops *ops, void *data, iomap_actor_t actor)
{
- struct iomap iomap = { 0 };
+ struct iomap iomap = { .type = IOMAP_HOLE };
+ struct iomap srcmap = { .type = IOMAP_HOLE };
loff_t written = 0, ret;
+ u64 end;
+
+ trace_iomap_apply(inode, pos, length, flags, ops, actor, _RET_IP_);
/*
* Need to map a range from start position for length bytes. This can
@@ -38,7 +43,7 @@ iomap_apply(struct inode *inode, loff_t pos, loff_t length, unsigned flags,
* expose transient stale data. If the reserve fails, we can safely
* back out at this point as there is nothing to undo.
*/
- ret = ops->iomap_begin(inode, pos, length, flags, &iomap);
+ ret = ops->iomap_begin(inode, pos, length, flags, &iomap, &srcmap);
if (ret)
return ret;
if (WARN_ON(iomap.offset > pos))
@@ -46,19 +51,34 @@ iomap_apply(struct inode *inode, loff_t pos, loff_t length, unsigned flags,
if (WARN_ON(iomap.length == 0))
return -EIO;
+ trace_iomap_apply_dstmap(inode, &iomap);
+ if (srcmap.type != IOMAP_HOLE)
+ trace_iomap_apply_srcmap(inode, &srcmap);
+
/*
* Cut down the length to the one actually provided by the filesystem,
* as it might not be able to give us the whole size that we requested.
*/
- if (iomap.offset + iomap.length < pos + length)
- length = iomap.offset + iomap.length - pos;
+ end = iomap.offset + iomap.length;
+ if (srcmap.type != IOMAP_HOLE)
+ end = min(end, srcmap.offset + srcmap.length);
+ if (pos + length > end)
+ length = end - pos;
/*
- * Now that we have guaranteed that the space allocation will succeed.
+ * Now that we have guaranteed that the space allocation will succeed,
* we can do the copy-in page by page without having to worry about
* failures exposing transient data.
+ *
+ * To support COW operations, we read in data for partially blocks from
+ * the srcmap if the file system filled it in. In that case we the
+ * length needs to be limited to the earlier of the ends of the iomaps.
+ * If the file system did not provide a srcmap we pass in the normal
+ * iomap into the actors so that they don't need to have special
+ * handling for the two cases.
*/
- written = actor(inode, pos, length, data, &iomap);
+ written = actor(inode, pos, length, data, &iomap,
+ srcmap.type != IOMAP_HOLE ? &srcmap : &iomap);
/*
* Now the data has been copied, commit the range we've copied. This
diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index e25901ae3ff4..d33c7bc5ee92 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2010 Red Hat, Inc.
- * Copyright (c) 2016-2018 Christoph Hellwig.
+ * Copyright (C) 2016-2019 Christoph Hellwig.
*/
#include <linux/module.h>
#include <linux/compiler.h>
@@ -12,13 +12,34 @@
#include <linux/buffer_head.h>
#include <linux/dax.h>
#include <linux/writeback.h>
+#include <linux/list_sort.h>
#include <linux/swap.h>
#include <linux/bio.h>
#include <linux/sched/signal.h>
#include <linux/migrate.h>
+#include "trace.h"
#include "../internal.h"
+/*
+ * Structure allocated for each page when block size < PAGE_SIZE to track
+ * sub-page uptodate status and I/O completions.
+ */
+struct iomap_page {
+ atomic_t read_count;
+ atomic_t write_count;
+ DECLARE_BITMAP(uptodate, PAGE_SIZE / 512);
+};
+
+static inline struct iomap_page *to_iomap_page(struct page *page)
+{
+ if (page_has_private(page))
+ return (struct iomap_page *)page_private(page);
+ return NULL;
+}
+
+static struct bio_set iomap_ioend_bioset;
+
static struct iomap_page *
iomap_page_create(struct inode *inode, struct page *page)
{
@@ -203,9 +224,17 @@ iomap_read_inline_data(struct inode *inode, struct page *page,
SetPageUptodate(page);
}
+static inline bool iomap_block_needs_zeroing(struct inode *inode,
+ struct iomap *iomap, loff_t pos)
+{
+ return iomap->type != IOMAP_MAPPED ||
+ (iomap->flags & IOMAP_F_NEW) ||
+ pos >= i_size_read(inode);
+}
+
static loff_t
iomap_readpage_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
- struct iomap *iomap)
+ struct iomap *iomap, struct iomap *srcmap)
{
struct iomap_readpage_ctx *ctx = data;
struct page *page = ctx->cur_page;
@@ -226,7 +255,7 @@ iomap_readpage_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
if (plen == 0)
goto done;
- if (iomap->type != IOMAP_MAPPED || pos >= i_size_read(inode)) {
+ if (iomap_block_needs_zeroing(inode, iomap, pos)) {
zero_user(page, poff, plen);
iomap_set_range_uptodate(page, poff, plen);
goto done;
@@ -293,6 +322,8 @@ iomap_readpage(struct page *page, const struct iomap_ops *ops)
unsigned poff;
loff_t ret;
+ trace_iomap_readpage(page->mapping->host, 1);
+
for (poff = 0; poff < PAGE_SIZE; poff += ret) {
ret = iomap_apply(inode, page_offset(page) + poff,
PAGE_SIZE - poff, 0, ops, &ctx,
@@ -351,7 +382,7 @@ iomap_next_page(struct inode *inode, struct list_head *pages, loff_t pos,
static loff_t
iomap_readpages_actor(struct inode *inode, loff_t pos, loff_t length,
- void *data, struct iomap *iomap)
+ void *data, struct iomap *iomap, struct iomap *srcmap)
{
struct iomap_readpage_ctx *ctx = data;
loff_t done, ret;
@@ -371,7 +402,7 @@ iomap_readpages_actor(struct inode *inode, loff_t pos, loff_t length,
ctx->cur_page_in_bio = false;
}
ret = iomap_readpage_actor(inode, pos + done, length - done,
- ctx, iomap);
+ ctx, iomap, srcmap);
}
return done;
@@ -389,6 +420,8 @@ iomap_readpages(struct address_space *mapping, struct list_head *pages,
loff_t last = page_offset(list_entry(pages->next, struct page, lru));
loff_t length = last - pos + PAGE_SIZE, ret = 0;
+ trace_iomap_readpages(mapping->host, nr_pages);
+
while (length > 0) {
ret = iomap_apply(mapping->host, pos, length, 0, ops,
&ctx, iomap_readpages_actor);
@@ -455,6 +488,8 @@ EXPORT_SYMBOL_GPL(iomap_is_partially_uptodate);
int
iomap_releasepage(struct page *page, gfp_t gfp_mask)
{
+ trace_iomap_releasepage(page->mapping->host, page, 0, 0);
+
/*
* mm accommodates an old ext3 case where clean pages might not have had
* the dirty bit cleared. Thus, it can send actual dirty pages to
@@ -470,6 +505,8 @@ EXPORT_SYMBOL_GPL(iomap_releasepage);
void
iomap_invalidatepage(struct page *page, unsigned int offset, unsigned int len)
{
+ trace_iomap_invalidatepage(page->mapping->host, page, offset, len);
+
/*
* If we are invalidating the entire page, clear the dirty state from it
* and release it to avoid unnecessary buildup of the LRU.
@@ -511,6 +548,10 @@ iomap_migrate_page(struct address_space *mapping, struct page *newpage,
EXPORT_SYMBOL_GPL(iomap_migrate_page);
#endif /* CONFIG_MIGRATION */
+enum {
+ IOMAP_WRITE_F_UNSHARE = (1 << 0),
+};
+
static void
iomap_write_failed(struct inode *inode, loff_t pos, unsigned len)
{
@@ -525,19 +566,12 @@ iomap_write_failed(struct inode *inode, loff_t pos, unsigned len)
}
static int
-iomap_read_page_sync(struct inode *inode, loff_t block_start, struct page *page,
- unsigned poff, unsigned plen, unsigned from, unsigned to,
- struct iomap *iomap)
+iomap_read_page_sync(loff_t block_start, struct page *page, unsigned poff,
+ unsigned plen, struct iomap *iomap)
{
struct bio_vec bvec;
struct bio bio;
- if (iomap->type != IOMAP_MAPPED || block_start >= i_size_read(inode)) {
- zero_user_segments(page, poff, from, to, poff + plen);
- iomap_set_range_uptodate(page, poff, plen);
- return 0;
- }
-
bio_init(&bio, &bvec, 1);
bio.bi_opf = REQ_OP_READ;
bio.bi_iter.bi_sector = iomap_sector(iomap, block_start);
@@ -547,15 +581,15 @@ iomap_read_page_sync(struct inode *inode, loff_t block_start, struct page *page,
}
static int
-__iomap_write_begin(struct inode *inode, loff_t pos, unsigned len,
- struct page *page, struct iomap *iomap)
+__iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, int flags,
+ struct page *page, struct iomap *srcmap)
{
struct iomap_page *iop = iomap_page_create(inode, page);
loff_t block_size = i_blocksize(inode);
loff_t block_start = pos & ~(block_size - 1);
loff_t block_end = (pos + len + block_size - 1) & ~(block_size - 1);
unsigned from = offset_in_page(pos), to = from + len, poff, plen;
- int status = 0;
+ int status;
if (PageUptodate(page))
return 0;
@@ -566,29 +600,39 @@ __iomap_write_begin(struct inode *inode, loff_t pos, unsigned len,
if (plen == 0)
break;
- if ((from > poff && from < poff + plen) ||
- (to > poff && to < poff + plen)) {
- status = iomap_read_page_sync(inode, block_start, page,
- poff, plen, from, to, iomap);
- if (status)
- break;
+ if (!(flags & IOMAP_WRITE_F_UNSHARE) &&
+ (from <= poff || from >= poff + plen) &&
+ (to <= poff || to >= poff + plen))
+ continue;
+
+ if (iomap_block_needs_zeroing(inode, srcmap, block_start)) {
+ if (WARN_ON_ONCE(flags & IOMAP_WRITE_F_UNSHARE))
+ return -EIO;
+ zero_user_segments(page, poff, from, to, poff + plen);
+ iomap_set_range_uptodate(page, poff, plen);
+ continue;
}
+ status = iomap_read_page_sync(block_start, page, poff, plen,
+ srcmap);
+ if (status)
+ return status;
} while ((block_start += plen) < block_end);
- return status;
+ return 0;
}
static int
iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, unsigned flags,
- struct page **pagep, struct iomap *iomap)
+ struct page **pagep, struct iomap *iomap, struct iomap *srcmap)
{
const struct iomap_page_ops *page_ops = iomap->page_ops;
- pgoff_t index = pos >> PAGE_SHIFT;
struct page *page;
int status = 0;
BUG_ON(pos + len > iomap->offset + iomap->length);
+ if (srcmap != iomap)
+ BUG_ON(pos + len > srcmap->offset + srcmap->length);
if (fatal_signal_pending(current))
return -EINTR;
@@ -599,18 +643,20 @@ iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, unsigned flags,
return status;
}
- page = grab_cache_page_write_begin(inode->i_mapping, index, flags);
+ page = grab_cache_page_write_begin(inode->i_mapping, pos >> PAGE_SHIFT,
+ AOP_FLAG_NOFS);
if (!page) {
status = -ENOMEM;
goto out_no_page;
}
- if (iomap->type == IOMAP_INLINE)
- iomap_read_inline_data(inode, page, iomap);
+ if (srcmap->type == IOMAP_INLINE)
+ iomap_read_inline_data(inode, page, srcmap);
else if (iomap->flags & IOMAP_F_BUFFER_HEAD)
- status = __block_write_begin_int(page, pos, len, NULL, iomap);
+ status = __block_write_begin_int(page, pos, len, NULL, srcmap);
else
- status = __iomap_write_begin(inode, pos, len, page, iomap);
+ status = __iomap_write_begin(inode, pos, len, flags, page,
+ srcmap);
if (unlikely(status))
goto out_unlock;
@@ -656,7 +702,7 @@ EXPORT_SYMBOL_GPL(iomap_set_page_dirty);
static int
__iomap_write_end(struct inode *inode, loff_t pos, unsigned len,
- unsigned copied, struct page *page, struct iomap *iomap)
+ unsigned copied, struct page *page)
{
flush_dcache_page(page);
@@ -696,20 +742,20 @@ iomap_write_end_inline(struct inode *inode, struct page *page,
}
static int
-iomap_write_end(struct inode *inode, loff_t pos, unsigned len,
- unsigned copied, struct page *page, struct iomap *iomap)
+iomap_write_end(struct inode *inode, loff_t pos, unsigned len, unsigned copied,
+ struct page *page, struct iomap *iomap, struct iomap *srcmap)
{
const struct iomap_page_ops *page_ops = iomap->page_ops;
loff_t old_size = inode->i_size;
int ret;
- if (iomap->type == IOMAP_INLINE) {
+ if (srcmap->type == IOMAP_INLINE) {
ret = iomap_write_end_inline(inode, page, iomap, pos, copied);
- } else if (iomap->flags & IOMAP_F_BUFFER_HEAD) {
+ } else if (srcmap->flags & IOMAP_F_BUFFER_HEAD) {
ret = block_write_end(NULL, inode->i_mapping, pos, len, copied,
page, NULL);
} else {
- ret = __iomap_write_end(inode, pos, len, copied, page, iomap);
+ ret = __iomap_write_end(inode, pos, len, copied, page);
}
/*
@@ -736,12 +782,11 @@ iomap_write_end(struct inode *inode, loff_t pos, unsigned len,
static loff_t
iomap_write_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
- struct iomap *iomap)
+ struct iomap *iomap, struct iomap *srcmap)
{
struct iov_iter *i = data;
long status = 0;
ssize_t written = 0;
- unsigned int flags = AOP_FLAG_NOFS;
do {
struct page *page;
@@ -771,8 +816,8 @@ again:
break;
}
- status = iomap_write_begin(inode, pos, bytes, flags, &page,
- iomap);
+ status = iomap_write_begin(inode, pos, bytes, 0, &page, iomap,
+ srcmap);
if (unlikely(status))
break;
@@ -783,8 +828,8 @@ again:
flush_dcache_page(page);
- status = iomap_write_end(inode, pos, bytes, copied, page,
- iomap);
+ status = iomap_write_end(inode, pos, bytes, copied, page, iomap,
+ srcmap);
if (unlikely(status < 0))
break;
copied = status;
@@ -835,50 +880,32 @@ iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *iter,
}
EXPORT_SYMBOL_GPL(iomap_file_buffered_write);
-static struct page *
-__iomap_read_page(struct inode *inode, loff_t offset)
-{
- struct address_space *mapping = inode->i_mapping;
- struct page *page;
-
- page = read_mapping_page(mapping, offset >> PAGE_SHIFT, NULL);
- if (IS_ERR(page))
- return page;
- if (!PageUptodate(page)) {
- put_page(page);
- return ERR_PTR(-EIO);
- }
- return page;
-}
-
static loff_t
-iomap_dirty_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
- struct iomap *iomap)
+iomap_unshare_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
+ struct iomap *iomap, struct iomap *srcmap)
{
long status = 0;
ssize_t written = 0;
- do {
- struct page *page, *rpage;
- unsigned long offset; /* Offset into pagecache page */
- unsigned long bytes; /* Bytes to write to page */
-
- offset = offset_in_page(pos);
- bytes = min_t(loff_t, PAGE_SIZE - offset, length);
+ /* don't bother with blocks that are not shared to start with */
+ if (!(iomap->flags & IOMAP_F_SHARED))
+ return length;
+ /* don't bother with holes or unwritten extents */
+ if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN)
+ return length;
- rpage = __iomap_read_page(inode, pos);
- if (IS_ERR(rpage))
- return PTR_ERR(rpage);
+ do {
+ unsigned long offset = offset_in_page(pos);
+ unsigned long bytes = min_t(loff_t, PAGE_SIZE - offset, length);
+ struct page *page;
status = iomap_write_begin(inode, pos, bytes,
- AOP_FLAG_NOFS, &page, iomap);
- put_page(rpage);
+ IOMAP_WRITE_F_UNSHARE, &page, iomap, srcmap);
if (unlikely(status))
return status;
- WARN_ON_ONCE(!PageUptodate(page));
-
- status = iomap_write_end(inode, pos, bytes, bytes, page, iomap);
+ status = iomap_write_end(inode, pos, bytes, bytes, page, iomap,
+ srcmap);
if (unlikely(status <= 0)) {
if (WARN_ON_ONCE(status == 0))
return -EIO;
@@ -898,14 +925,14 @@ iomap_dirty_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
}
int
-iomap_file_dirty(struct inode *inode, loff_t pos, loff_t len,
+iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len,
const struct iomap_ops *ops)
{
loff_t ret;
while (len) {
ret = iomap_apply(inode, pos, len, IOMAP_WRITE, ops, NULL,
- iomap_dirty_actor);
+ iomap_unshare_actor);
if (ret <= 0)
return ret;
pos += ret;
@@ -914,23 +941,22 @@ iomap_file_dirty(struct inode *inode, loff_t pos, loff_t len,
return 0;
}
-EXPORT_SYMBOL_GPL(iomap_file_dirty);
+EXPORT_SYMBOL_GPL(iomap_file_unshare);
static int iomap_zero(struct inode *inode, loff_t pos, unsigned offset,
- unsigned bytes, struct iomap *iomap)
+ unsigned bytes, struct iomap *iomap, struct iomap *srcmap)
{
struct page *page;
int status;
- status = iomap_write_begin(inode, pos, bytes, AOP_FLAG_NOFS, &page,
- iomap);
+ status = iomap_write_begin(inode, pos, bytes, 0, &page, iomap, srcmap);
if (status)
return status;
zero_user(page, offset, bytes);
mark_page_accessed(page);
- return iomap_write_end(inode, pos, bytes, bytes, page, iomap);
+ return iomap_write_end(inode, pos, bytes, bytes, page, iomap, srcmap);
}
static int iomap_dax_zero(loff_t pos, unsigned offset, unsigned bytes,
@@ -942,14 +968,14 @@ static int iomap_dax_zero(loff_t pos, unsigned offset, unsigned bytes,
static loff_t
iomap_zero_range_actor(struct inode *inode, loff_t pos, loff_t count,
- void *data, struct iomap *iomap)
+ void *data, struct iomap *iomap, struct iomap *srcmap)
{
bool *did_zero = data;
loff_t written = 0;
int status;
/* already zeroed? we're done. */
- if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN)
+ if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN)
return count;
do {
@@ -961,7 +987,8 @@ iomap_zero_range_actor(struct inode *inode, loff_t pos, loff_t count,
if (IS_DAX(inode))
status = iomap_dax_zero(pos, offset, bytes, iomap);
else
- status = iomap_zero(inode, pos, offset, bytes, iomap);
+ status = iomap_zero(inode, pos, offset, bytes, iomap,
+ srcmap);
if (status < 0)
return status;
@@ -1011,7 +1038,7 @@ EXPORT_SYMBOL_GPL(iomap_truncate_page);
static loff_t
iomap_page_mkwrite_actor(struct inode *inode, loff_t pos, loff_t length,
- void *data, struct iomap *iomap)
+ void *data, struct iomap *iomap, struct iomap *srcmap)
{
struct page *page = data;
int ret;
@@ -1040,20 +1067,19 @@ vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops)
lock_page(page);
size = i_size_read(inode);
- if ((page->mapping != inode->i_mapping) ||
- (page_offset(page) > size)) {
+ offset = page_offset(page);
+ if (page->mapping != inode->i_mapping || offset > size) {
/* We overload EFAULT to mean page got truncated */
ret = -EFAULT;
goto out_unlock;
}
/* page is wholly or partially inside EOF */
- if (((page->index + 1) << PAGE_SHIFT) > size)
+ if (offset > size - PAGE_SIZE)
length = offset_in_page(size);
else
length = PAGE_SIZE;
- offset = page_offset(page);
while (length > 0) {
ret = iomap_apply(inode, offset, length,
IOMAP_WRITE | IOMAP_FAULT, ops, page,
@@ -1071,3 +1097,551 @@ out_unlock:
return block_page_mkwrite_return(ret);
}
EXPORT_SYMBOL_GPL(iomap_page_mkwrite);
+
+static void
+iomap_finish_page_writeback(struct inode *inode, struct page *page,
+ int error)
+{
+ struct iomap_page *iop = to_iomap_page(page);
+
+ if (error) {
+ SetPageError(page);
+ mapping_set_error(inode->i_mapping, -EIO);
+ }
+
+ WARN_ON_ONCE(i_blocksize(inode) < PAGE_SIZE && !iop);
+ WARN_ON_ONCE(iop && atomic_read(&iop->write_count) <= 0);
+
+ if (!iop || atomic_dec_and_test(&iop->write_count))
+ end_page_writeback(page);
+}
+
+/*
+ * We're now finished for good with this ioend structure. Update the page
+ * state, release holds on bios, and finally free up memory. Do not use the
+ * ioend after this.
+ */
+static void
+iomap_finish_ioend(struct iomap_ioend *ioend, int error)
+{
+ struct inode *inode = ioend->io_inode;
+ struct bio *bio = &ioend->io_inline_bio;
+ struct bio *last = ioend->io_bio, *next;
+ u64 start = bio->bi_iter.bi_sector;
+ bool quiet = bio_flagged(bio, BIO_QUIET);
+
+ for (bio = &ioend->io_inline_bio; bio; bio = next) {
+ struct bio_vec *bv;
+ struct bvec_iter_all iter_all;
+
+ /*
+ * For the last bio, bi_private points to the ioend, so we
+ * need to explicitly end the iteration here.
+ */
+ if (bio == last)
+ next = NULL;
+ else
+ next = bio->bi_private;
+
+ /* walk each page on bio, ending page IO on them */
+ bio_for_each_segment_all(bv, bio, iter_all)
+ iomap_finish_page_writeback(inode, bv->bv_page, error);
+ bio_put(bio);
+ }
+
+ if (unlikely(error && !quiet)) {
+ printk_ratelimited(KERN_ERR
+"%s: writeback error on inode %lu, offset %lld, sector %llu",
+ inode->i_sb->s_id, inode->i_ino, ioend->io_offset,
+ start);
+ }
+}
+
+void
+iomap_finish_ioends(struct iomap_ioend *ioend, int error)
+{
+ struct list_head tmp;
+
+ list_replace_init(&ioend->io_list, &tmp);
+ iomap_finish_ioend(ioend, error);
+
+ while (!list_empty(&tmp)) {
+ ioend = list_first_entry(&tmp, struct iomap_ioend, io_list);
+ list_del_init(&ioend->io_list);
+ iomap_finish_ioend(ioend, error);
+ }
+}
+EXPORT_SYMBOL_GPL(iomap_finish_ioends);
+
+/*
+ * We can merge two adjacent ioends if they have the same set of work to do.
+ */
+static bool
+iomap_ioend_can_merge(struct iomap_ioend *ioend, struct iomap_ioend *next)
+{
+ if (ioend->io_bio->bi_status != next->io_bio->bi_status)
+ return false;
+ if ((ioend->io_flags & IOMAP_F_SHARED) ^
+ (next->io_flags & IOMAP_F_SHARED))
+ return false;
+ if ((ioend->io_type == IOMAP_UNWRITTEN) ^
+ (next->io_type == IOMAP_UNWRITTEN))
+ return false;
+ if (ioend->io_offset + ioend->io_size != next->io_offset)
+ return false;
+ return true;
+}
+
+void
+iomap_ioend_try_merge(struct iomap_ioend *ioend, struct list_head *more_ioends,
+ void (*merge_private)(struct iomap_ioend *ioend,
+ struct iomap_ioend *next))
+{
+ struct iomap_ioend *next;
+
+ INIT_LIST_HEAD(&ioend->io_list);
+
+ while ((next = list_first_entry_or_null(more_ioends, struct iomap_ioend,
+ io_list))) {
+ if (!iomap_ioend_can_merge(ioend, next))
+ break;
+ list_move_tail(&next->io_list, &ioend->io_list);
+ ioend->io_size += next->io_size;
+ if (next->io_private && merge_private)
+ merge_private(ioend, next);
+ }
+}
+EXPORT_SYMBOL_GPL(iomap_ioend_try_merge);
+
+static int
+iomap_ioend_compare(void *priv, struct list_head *a, struct list_head *b)
+{
+ struct iomap_ioend *ia = container_of(a, struct iomap_ioend, io_list);
+ struct iomap_ioend *ib = container_of(b, struct iomap_ioend, io_list);
+
+ if (ia->io_offset < ib->io_offset)
+ return -1;
+ if (ia->io_offset > ib->io_offset)
+ return 1;
+ return 0;
+}
+
+void
+iomap_sort_ioends(struct list_head *ioend_list)
+{
+ list_sort(NULL, ioend_list, iomap_ioend_compare);
+}
+EXPORT_SYMBOL_GPL(iomap_sort_ioends);
+
+static void iomap_writepage_end_bio(struct bio *bio)
+{
+ struct iomap_ioend *ioend = bio->bi_private;
+
+ iomap_finish_ioend(ioend, blk_status_to_errno(bio->bi_status));
+}
+
+/*
+ * Submit the final bio for an ioend.
+ *
+ * If @error is non-zero, it means that we have a situation where some part of
+ * the submission process has failed after we have marked paged for writeback
+ * and unlocked them. In this situation, we need to fail the bio instead of
+ * submitting it. This typically only happens on a filesystem shutdown.
+ */
+static int
+iomap_submit_ioend(struct iomap_writepage_ctx *wpc, struct iomap_ioend *ioend,
+ int error)
+{
+ ioend->io_bio->bi_private = ioend;
+ ioend->io_bio->bi_end_io = iomap_writepage_end_bio;
+
+ if (wpc->ops->prepare_ioend)
+ error = wpc->ops->prepare_ioend(ioend, error);
+ if (error) {
+ /*
+ * If we are failing the IO now, just mark the ioend with an
+ * error and finish it. This will run IO completion immediately
+ * as there is only one reference to the ioend at this point in
+ * time.
+ */
+ ioend->io_bio->bi_status = errno_to_blk_status(error);
+ bio_endio(ioend->io_bio);
+ return error;
+ }
+
+ submit_bio(ioend->io_bio);
+ return 0;
+}
+
+static struct iomap_ioend *
+iomap_alloc_ioend(struct inode *inode, struct iomap_writepage_ctx *wpc,
+ loff_t offset, sector_t sector, struct writeback_control *wbc)
+{
+ struct iomap_ioend *ioend;
+ struct bio *bio;
+
+ bio = bio_alloc_bioset(GFP_NOFS, BIO_MAX_PAGES, &iomap_ioend_bioset);
+ bio_set_dev(bio, wpc->iomap.bdev);
+ bio->bi_iter.bi_sector = sector;
+ bio->bi_opf = REQ_OP_WRITE | wbc_to_write_flags(wbc);
+ bio->bi_write_hint = inode->i_write_hint;
+ wbc_init_bio(wbc, bio);
+
+ ioend = container_of(bio, struct iomap_ioend, io_inline_bio);
+ INIT_LIST_HEAD(&ioend->io_list);
+ ioend->io_type = wpc->iomap.type;
+ ioend->io_flags = wpc->iomap.flags;
+ ioend->io_inode = inode;
+ ioend->io_size = 0;
+ ioend->io_offset = offset;
+ ioend->io_private = NULL;
+ ioend->io_bio = bio;
+ return ioend;
+}
+
+/*
+ * Allocate a new bio, and chain the old bio to the new one.
+ *
+ * Note that we have to do perform the chaining in this unintuitive order
+ * so that the bi_private linkage is set up in the right direction for the
+ * traversal in iomap_finish_ioend().
+ */
+static struct bio *
+iomap_chain_bio(struct bio *prev)
+{
+ struct bio *new;
+
+ new = bio_alloc(GFP_NOFS, BIO_MAX_PAGES);
+ bio_copy_dev(new, prev);/* also copies over blkcg information */
+ new->bi_iter.bi_sector = bio_end_sector(prev);
+ new->bi_opf = prev->bi_opf;
+ new->bi_write_hint = prev->bi_write_hint;
+
+ bio_chain(prev, new);
+ bio_get(prev); /* for iomap_finish_ioend */
+ submit_bio(prev);
+ return new;
+}
+
+static bool
+iomap_can_add_to_ioend(struct iomap_writepage_ctx *wpc, loff_t offset,
+ sector_t sector)
+{
+ if ((wpc->iomap.flags & IOMAP_F_SHARED) !=
+ (wpc->ioend->io_flags & IOMAP_F_SHARED))
+ return false;
+ if (wpc->iomap.type != wpc->ioend->io_type)
+ return false;
+ if (offset != wpc->ioend->io_offset + wpc->ioend->io_size)
+ return false;
+ if (sector != bio_end_sector(wpc->ioend->io_bio))
+ return false;
+ return true;
+}
+
+/*
+ * Test to see if we have an existing ioend structure that we could append to
+ * first, otherwise finish off the current ioend and start another.
+ */
+static void
+iomap_add_to_ioend(struct inode *inode, loff_t offset, struct page *page,
+ struct iomap_page *iop, struct iomap_writepage_ctx *wpc,
+ struct writeback_control *wbc, struct list_head *iolist)
+{
+ sector_t sector = iomap_sector(&wpc->iomap, offset);
+ unsigned len = i_blocksize(inode);
+ unsigned poff = offset & (PAGE_SIZE - 1);
+ bool merged, same_page = false;
+
+ if (!wpc->ioend || !iomap_can_add_to_ioend(wpc, offset, sector)) {
+ if (wpc->ioend)
+ list_add(&wpc->ioend->io_list, iolist);
+ wpc->ioend = iomap_alloc_ioend(inode, wpc, offset, sector, wbc);
+ }
+
+ merged = __bio_try_merge_page(wpc->ioend->io_bio, page, len, poff,
+ &same_page);
+ if (iop && !same_page)
+ atomic_inc(&iop->write_count);
+
+ if (!merged) {
+ if (bio_full(wpc->ioend->io_bio, len)) {
+ wpc->ioend->io_bio =
+ iomap_chain_bio(wpc->ioend->io_bio);
+ }
+ bio_add_page(wpc->ioend->io_bio, page, len, poff);
+ }
+
+ wpc->ioend->io_size += len;
+ wbc_account_cgroup_owner(wbc, page, len);
+}
+
+/*
+ * We implement an immediate ioend submission policy here to avoid needing to
+ * chain multiple ioends and hence nest mempool allocations which can violate
+ * forward progress guarantees we need to provide. The current ioend we are
+ * adding blocks to is cached on the writepage context, and if the new block
+ * does not append to the cached ioend it will create a new ioend and cache that
+ * instead.
+ *
+ * If a new ioend is created and cached, the old ioend is returned and queued
+ * locally for submission once the entire page is processed or an error has been
+ * detected. While ioends are submitted immediately after they are completed,
+ * batching optimisations are provided by higher level block plugging.
+ *
+ * At the end of a writeback pass, there will be a cached ioend remaining on the
+ * writepage context that the caller will need to submit.
+ */
+static int
+iomap_writepage_map(struct iomap_writepage_ctx *wpc,
+ struct writeback_control *wbc, struct inode *inode,
+ struct page *page, u64 end_offset)
+{
+ struct iomap_page *iop = to_iomap_page(page);
+ struct iomap_ioend *ioend, *next;
+ unsigned len = i_blocksize(inode);
+ u64 file_offset; /* file offset of page */
+ int error = 0, count = 0, i;
+ LIST_HEAD(submit_list);
+
+ WARN_ON_ONCE(i_blocksize(inode) < PAGE_SIZE && !iop);
+ WARN_ON_ONCE(iop && atomic_read(&iop->write_count) != 0);
+
+ /*
+ * Walk through the page to find areas to write back. If we run off the
+ * end of the current map or find the current map invalid, grab a new
+ * one.
+ */
+ for (i = 0, file_offset = page_offset(page);
+ i < (PAGE_SIZE >> inode->i_blkbits) && file_offset < end_offset;
+ i++, file_offset += len) {
+ if (iop && !test_bit(i, iop->uptodate))
+ continue;
+
+ error = wpc->ops->map_blocks(wpc, inode, file_offset);
+ if (error)
+ break;
+ if (WARN_ON_ONCE(wpc->iomap.type == IOMAP_INLINE))
+ continue;
+ if (wpc->iomap.type == IOMAP_HOLE)
+ continue;
+ iomap_add_to_ioend(inode, file_offset, page, iop, wpc, wbc,
+ &submit_list);
+ count++;
+ }
+
+ WARN_ON_ONCE(!wpc->ioend && !list_empty(&submit_list));
+ WARN_ON_ONCE(!PageLocked(page));
+ WARN_ON_ONCE(PageWriteback(page));
+
+ /*
+ * We cannot cancel the ioend directly here on error. We may have
+ * already set other pages under writeback and hence we have to run I/O
+ * completion to mark the error state of the pages under writeback
+ * appropriately.
+ */
+ if (unlikely(error)) {
+ if (!count) {
+ /*
+ * If the current page hasn't been added to ioend, it
+ * won't be affected by I/O completions and we must
+ * discard and unlock it right here.
+ */
+ if (wpc->ops->discard_page)
+ wpc->ops->discard_page(page);
+ ClearPageUptodate(page);
+ unlock_page(page);
+ goto done;
+ }
+
+ /*
+ * If the page was not fully cleaned, we need to ensure that the
+ * higher layers come back to it correctly. That means we need
+ * to keep the page dirty, and for WB_SYNC_ALL writeback we need
+ * to ensure the PAGECACHE_TAG_TOWRITE index mark is not removed
+ * so another attempt to write this page in this writeback sweep
+ * will be made.
+ */
+ set_page_writeback_keepwrite(page);
+ } else {
+ clear_page_dirty_for_io(page);
+ set_page_writeback(page);
+ }
+
+ unlock_page(page);
+
+ /*
+ * Preserve the original error if there was one, otherwise catch
+ * submission errors here and propagate into subsequent ioend
+ * submissions.
+ */
+ list_for_each_entry_safe(ioend, next, &submit_list, io_list) {
+ int error2;
+
+ list_del_init(&ioend->io_list);
+ error2 = iomap_submit_ioend(wpc, ioend, error);
+ if (error2 && !error)
+ error = error2;
+ }
+
+ /*
+ * We can end up here with no error and nothing to write only if we race
+ * with a partial page truncate on a sub-page block sized filesystem.
+ */
+ if (!count)
+ end_page_writeback(page);
+done:
+ mapping_set_error(page->mapping, error);
+ return error;
+}
+
+/*
+ * Write out a dirty page.
+ *
+ * For delalloc space on the page we need to allocate space and flush it.
+ * For unwritten space on the page we need to start the conversion to
+ * regular allocated space.
+ */
+static int
+iomap_do_writepage(struct page *page, struct writeback_control *wbc, void *data)
+{
+ struct iomap_writepage_ctx *wpc = data;
+ struct inode *inode = page->mapping->host;
+ pgoff_t end_index;
+ u64 end_offset;
+ loff_t offset;
+
+ trace_iomap_writepage(inode, page, 0, 0);
+
+ /*
+ * Refuse to write the page out if we are called from reclaim context.
+ *
+ * This avoids stack overflows when called from deeply used stacks in
+ * random callers for direct reclaim or memcg reclaim. We explicitly
+ * allow reclaim from kswapd as the stack usage there is relatively low.
+ *
+ * This should never happen except in the case of a VM regression so
+ * warn about it.
+ */
+ if (WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) ==
+ PF_MEMALLOC))
+ goto redirty;
+
+ /*
+ * Given that we do not allow direct reclaim to call us, we should
+ * never be called in a recursive filesystem reclaim context.
+ */
+ if (WARN_ON_ONCE(current->flags & PF_MEMALLOC_NOFS))
+ goto redirty;
+
+ /*
+ * Is this page beyond the end of the file?
+ *
+ * The page index is less than the end_index, adjust the end_offset
+ * to the highest offset that this page should represent.
+ * -----------------------------------------------------
+ * | file mapping | <EOF> |
+ * -----------------------------------------------------
+ * | Page ... | Page N-2 | Page N-1 | Page N | |
+ * ^--------------------------------^----------|--------
+ * | desired writeback range | see else |
+ * ---------------------------------^------------------|
+ */
+ offset = i_size_read(inode);
+ end_index = offset >> PAGE_SHIFT;
+ if (page->index < end_index)
+ end_offset = (loff_t)(page->index + 1) << PAGE_SHIFT;
+ else {
+ /*
+ * Check whether the page to write out is beyond or straddles
+ * i_size or not.
+ * -------------------------------------------------------
+ * | file mapping | <EOF> |
+ * -------------------------------------------------------
+ * | Page ... | Page N-2 | Page N-1 | Page N | Beyond |
+ * ^--------------------------------^-----------|---------
+ * | | Straddles |
+ * ---------------------------------^-----------|--------|
+ */
+ unsigned offset_into_page = offset & (PAGE_SIZE - 1);
+
+ /*
+ * Skip the page if it is fully outside i_size, e.g. due to a
+ * truncate operation that is in progress. We must redirty the
+ * page so that reclaim stops reclaiming it. Otherwise
+ * iomap_vm_releasepage() is called on it and gets confused.
+ *
+ * Note that the end_index is unsigned long, it would overflow
+ * if the given offset is greater than 16TB on 32-bit system
+ * and if we do check the page is fully outside i_size or not
+ * via "if (page->index >= end_index + 1)" as "end_index + 1"
+ * will be evaluated to 0. Hence this page will be redirtied
+ * and be written out repeatedly which would result in an
+ * infinite loop, the user program that perform this operation
+ * will hang. Instead, we can verify this situation by checking
+ * if the page to write is totally beyond the i_size or if it's
+ * offset is just equal to the EOF.
+ */
+ if (page->index > end_index ||
+ (page->index == end_index && offset_into_page == 0))
+ goto redirty;
+
+ /*
+ * The page straddles i_size. It must be zeroed out on each
+ * and every writepage invocation because it may be mmapped.
+ * "A file is mapped in multiples of the page size. For a file
+ * that is not a multiple of the page size, the remaining
+ * memory is zeroed when mapped, and writes to that region are
+ * not written out to the file."
+ */
+ zero_user_segment(page, offset_into_page, PAGE_SIZE);
+
+ /* Adjust the end_offset to the end of file */
+ end_offset = offset;
+ }
+
+ return iomap_writepage_map(wpc, wbc, inode, page, end_offset);
+
+redirty:
+ redirty_page_for_writepage(wbc, page);
+ unlock_page(page);
+ return 0;
+}
+
+int
+iomap_writepage(struct page *page, struct writeback_control *wbc,
+ struct iomap_writepage_ctx *wpc,
+ const struct iomap_writeback_ops *ops)
+{
+ int ret;
+
+ wpc->ops = ops;
+ ret = iomap_do_writepage(page, wbc, wpc);
+ if (!wpc->ioend)
+ return ret;
+ return iomap_submit_ioend(wpc, wpc->ioend, ret);
+}
+EXPORT_SYMBOL_GPL(iomap_writepage);
+
+int
+iomap_writepages(struct address_space *mapping, struct writeback_control *wbc,
+ struct iomap_writepage_ctx *wpc,
+ const struct iomap_writeback_ops *ops)
+{
+ int ret;
+
+ wpc->ops = ops;
+ ret = write_cache_pages(mapping, wbc, iomap_do_writepage, wpc);
+ if (!wpc->ioend)
+ return ret;
+ return iomap_submit_ioend(wpc, wpc->ioend, ret);
+}
+EXPORT_SYMBOL_GPL(iomap_writepages);
+
+static int __init iomap_init(void)
+{
+ return bioset_init(&iomap_ioend_bioset, 4 * (PAGE_SIZE / SECTOR_SIZE),
+ offsetof(struct iomap_ioend, io_inline_bio),
+ BIOSET_NEED_BVECS);
+}
+fs_initcall(iomap_init);
diff --git a/fs/iomap/direct-io.c b/fs/iomap/direct-io.c
index 1fc28c2da279..23837926c0c5 100644
--- a/fs/iomap/direct-io.c
+++ b/fs/iomap/direct-io.c
@@ -201,12 +201,12 @@ iomap_dio_bio_actor(struct inode *inode, loff_t pos, loff_t length,
unsigned int blkbits = blksize_bits(bdev_logical_block_size(iomap->bdev));
unsigned int fs_block_size = i_blocksize(inode), pad;
unsigned int align = iov_iter_alignment(dio->submit.iter);
- struct iov_iter iter;
struct bio *bio;
bool need_zeroout = false;
bool use_fua = false;
int nr_pages, ret = 0;
size_t copied = 0;
+ size_t orig_count;
if ((pos | length | align) & ((1 << blkbits) - 1))
return -EINVAL;
@@ -236,15 +236,18 @@ iomap_dio_bio_actor(struct inode *inode, loff_t pos, loff_t length,
}
/*
- * Operate on a partial iter trimmed to the extent we were called for.
- * We'll update the iter in the dio once we're done with this extent.
+ * Save the original count and trim the iter to just the extent we
+ * are operating on right now. The iter will be re-expanded once
+ * we are done.
*/
- iter = *dio->submit.iter;
- iov_iter_truncate(&iter, length);
+ orig_count = iov_iter_count(dio->submit.iter);
+ iov_iter_truncate(dio->submit.iter, length);
- nr_pages = iov_iter_npages(&iter, BIO_MAX_PAGES);
- if (nr_pages <= 0)
- return nr_pages;
+ nr_pages = iov_iter_npages(dio->submit.iter, BIO_MAX_PAGES);
+ if (nr_pages <= 0) {
+ ret = nr_pages;
+ goto out;
+ }
if (need_zeroout) {
/* zero out from the start of the block to the write offset */
@@ -257,7 +260,8 @@ iomap_dio_bio_actor(struct inode *inode, loff_t pos, loff_t length,
size_t n;
if (dio->error) {
iov_iter_revert(dio->submit.iter, copied);
- return 0;
+ copied = ret = 0;
+ goto out;
}
bio = bio_alloc(GFP_KERNEL, nr_pages);
@@ -268,7 +272,7 @@ iomap_dio_bio_actor(struct inode *inode, loff_t pos, loff_t length,
bio->bi_private = dio;
bio->bi_end_io = iomap_dio_bio_end_io;
- ret = bio_iov_iter_get_pages(bio, &iter);
+ ret = bio_iov_iter_get_pages(bio, dio->submit.iter);
if (unlikely(ret)) {
/*
* We have to stop part way through an IO. We must fall
@@ -294,13 +298,11 @@ iomap_dio_bio_actor(struct inode *inode, loff_t pos, loff_t length,
bio_set_pages_dirty(bio);
}
- iov_iter_advance(dio->submit.iter, n);
-
dio->size += n;
pos += n;
copied += n;
- nr_pages = iov_iter_npages(&iter, BIO_MAX_PAGES);
+ nr_pages = iov_iter_npages(dio->submit.iter, BIO_MAX_PAGES);
iomap_dio_submit_bio(dio, iomap, bio);
} while (nr_pages);
@@ -318,7 +320,12 @@ zero_tail:
if (pad)
iomap_dio_zero(dio, iomap, pos, fs_block_size - pad);
}
- return copied ? copied : ret;
+out:
+ /* Undo iter limitation to current extent */
+ iov_iter_reexpand(dio->submit.iter, orig_count - copied);
+ if (copied)
+ return copied;
+ return ret;
}
static loff_t
@@ -358,7 +365,7 @@ iomap_dio_inline_actor(struct inode *inode, loff_t pos, loff_t length,
static loff_t
iomap_dio_actor(struct inode *inode, loff_t pos, loff_t length,
- void *data, struct iomap *iomap)
+ void *data, struct iomap *iomap, struct iomap *srcmap)
{
struct iomap_dio *dio = data;
@@ -392,15 +399,15 @@ iomap_dio_actor(struct inode *inode, loff_t pos, loff_t length,
*/
ssize_t
iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
- const struct iomap_ops *ops, const struct iomap_dio_ops *dops)
+ const struct iomap_ops *ops, const struct iomap_dio_ops *dops,
+ bool wait_for_completion)
{
struct address_space *mapping = iocb->ki_filp->f_mapping;
struct inode *inode = file_inode(iocb->ki_filp);
size_t count = iov_iter_count(iter);
- loff_t pos = iocb->ki_pos, start = pos;
+ loff_t pos = iocb->ki_pos;
loff_t end = iocb->ki_pos + count - 1, ret = 0;
unsigned int flags = IOMAP_DIRECT;
- bool wait_for_completion = is_sync_kiocb(iocb);
struct blk_plug plug;
struct iomap_dio *dio;
@@ -409,6 +416,9 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
if (!count)
return 0;
+ if (WARN_ON(is_sync_kiocb(iocb) && !wait_for_completion))
+ return -EIO;
+
dio = kmalloc(sizeof(*dio), GFP_KERNEL);
if (!dio)
return -ENOMEM;
@@ -430,7 +440,7 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
if (pos >= dio->i_size)
goto out_free_dio;
- if (iter_is_iovec(iter) && iov_iter_rw(iter) == READ)
+ if (iter_is_iovec(iter))
dio->flags |= IOMAP_DIO_DIRTY;
} else {
flags |= IOMAP_WRITE;
@@ -451,14 +461,14 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
}
if (iocb->ki_flags & IOCB_NOWAIT) {
- if (filemap_range_has_page(mapping, start, end)) {
+ if (filemap_range_has_page(mapping, pos, end)) {
ret = -EAGAIN;
goto out_free_dio;
}
flags |= IOMAP_NOWAIT;
}
- ret = filemap_write_and_wait_range(mapping, start, end);
+ ret = filemap_write_and_wait_range(mapping, pos, end);
if (ret)
goto out_free_dio;
@@ -469,7 +479,7 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
* pretty crazy thing to do, so we don't support it 100%.
*/
ret = invalidate_inode_pages2_range(mapping,
- start >> PAGE_SHIFT, end >> PAGE_SHIFT);
+ pos >> PAGE_SHIFT, end >> PAGE_SHIFT);
if (ret)
dio_warn_stale_pagecache(iocb->ki_filp);
ret = 0;
@@ -497,8 +507,15 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
}
pos += ret;
- if (iov_iter_rw(iter) == READ && pos >= dio->i_size)
+ if (iov_iter_rw(iter) == READ && pos >= dio->i_size) {
+ /*
+ * We only report that we've read data up to i_size.
+ * Revert iter to a state corresponding to that as
+ * some callers (such as splice code) rely on it.
+ */
+ iov_iter_revert(iter, pos - dio->i_size);
break;
+ }
} while ((count = iov_iter_count(iter)) > 0);
blk_finish_plug(&plug);
diff --git a/fs/iomap/fiemap.c b/fs/iomap/fiemap.c
index f26fdd36e383..bccf305ea9ce 100644
--- a/fs/iomap/fiemap.c
+++ b/fs/iomap/fiemap.c
@@ -44,7 +44,7 @@ static int iomap_to_fiemap(struct fiemap_extent_info *fi,
static loff_t
iomap_fiemap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
- struct iomap *iomap)
+ struct iomap *iomap, struct iomap *srcmap)
{
struct fiemap_ctx *ctx = data;
loff_t ret = length;
@@ -111,7 +111,7 @@ EXPORT_SYMBOL_GPL(iomap_fiemap);
static loff_t
iomap_bmap_actor(struct inode *inode, loff_t pos, loff_t length,
- void *data, struct iomap *iomap)
+ void *data, struct iomap *iomap, struct iomap *srcmap)
{
sector_t *bno = data, addr;
@@ -133,12 +133,16 @@ iomap_bmap(struct address_space *mapping, sector_t bno,
struct inode *inode = mapping->host;
loff_t pos = bno << inode->i_blkbits;
unsigned blocksize = i_blocksize(inode);
+ int ret;
if (filemap_write_and_wait(mapping))
return 0;
bno = 0;
- iomap_apply(inode, pos, blocksize, 0, ops, &bno, iomap_bmap_actor);
+ ret = iomap_apply(inode, pos, blocksize, 0, ops, &bno,
+ iomap_bmap_actor);
+ if (ret)
+ return 0;
return bno;
}
EXPORT_SYMBOL_GPL(iomap_bmap);
diff --git a/fs/iomap/seek.c b/fs/iomap/seek.c
index c04bad4b2b43..89f61d93c0bc 100644
--- a/fs/iomap/seek.c
+++ b/fs/iomap/seek.c
@@ -119,7 +119,7 @@ out:
static loff_t
iomap_seek_hole_actor(struct inode *inode, loff_t offset, loff_t length,
- void *data, struct iomap *iomap)
+ void *data, struct iomap *iomap, struct iomap *srcmap)
{
switch (iomap->type) {
case IOMAP_UNWRITTEN:
@@ -165,7 +165,7 @@ EXPORT_SYMBOL_GPL(iomap_seek_hole);
static loff_t
iomap_seek_data_actor(struct inode *inode, loff_t offset, loff_t length,
- void *data, struct iomap *iomap)
+ void *data, struct iomap *iomap, struct iomap *srcmap)
{
switch (iomap->type) {
case IOMAP_HOLE:
diff --git a/fs/iomap/swapfile.c b/fs/iomap/swapfile.c
index 152a230f668d..a648dbf6991e 100644
--- a/fs/iomap/swapfile.c
+++ b/fs/iomap/swapfile.c
@@ -76,7 +76,8 @@ static int iomap_swapfile_add_extent(struct iomap_swapfile_info *isi)
* distinction between written and unwritten extents.
*/
static loff_t iomap_swapfile_activate_actor(struct inode *inode, loff_t pos,
- loff_t count, void *data, struct iomap *iomap)
+ loff_t count, void *data, struct iomap *iomap,
+ struct iomap *srcmap)
{
struct iomap_swapfile_info *isi = data;
int error;
diff --git a/fs/iomap/trace.c b/fs/iomap/trace.c
new file mode 100644
index 000000000000..da217246b1a9
--- /dev/null
+++ b/fs/iomap/trace.c
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019 Christoph Hellwig
+ */
+#include <linux/iomap.h>
+
+/*
+ * We include this last to have the helpers above available for the trace
+ * event implementations.
+ */
+#define CREATE_TRACE_POINTS
+#include "trace.h"
diff --git a/fs/iomap/trace.h b/fs/iomap/trace.h
new file mode 100644
index 000000000000..6dc227b8c47e
--- /dev/null
+++ b/fs/iomap/trace.h
@@ -0,0 +1,191 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2009-2019 Christoph Hellwig
+ *
+ * NOTE: none of these tracepoints shall be consider a stable kernel ABI
+ * as they can change at any time.
+ */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM iomap
+
+#if !defined(_IOMAP_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _IOMAP_TRACE_H
+
+#include <linux/tracepoint.h>
+
+struct inode;
+
+DECLARE_EVENT_CLASS(iomap_readpage_class,
+ TP_PROTO(struct inode *inode, int nr_pages),
+ TP_ARGS(inode, nr_pages),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(u64, ino)
+ __field(int, nr_pages)
+ ),
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->nr_pages = nr_pages;
+ ),
+ TP_printk("dev %d:%d ino 0x%llx nr_pages %d",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->ino,
+ __entry->nr_pages)
+)
+
+#define DEFINE_READPAGE_EVENT(name) \
+DEFINE_EVENT(iomap_readpage_class, name, \
+ TP_PROTO(struct inode *inode, int nr_pages), \
+ TP_ARGS(inode, nr_pages))
+DEFINE_READPAGE_EVENT(iomap_readpage);
+DEFINE_READPAGE_EVENT(iomap_readpages);
+
+DECLARE_EVENT_CLASS(iomap_page_class,
+ TP_PROTO(struct inode *inode, struct page *page, unsigned long off,
+ unsigned int len),
+ TP_ARGS(inode, page, off, len),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(u64, ino)
+ __field(pgoff_t, pgoff)
+ __field(loff_t, size)
+ __field(unsigned long, offset)
+ __field(unsigned int, length)
+ ),
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->pgoff = page_offset(page);
+ __entry->size = i_size_read(inode);
+ __entry->offset = off;
+ __entry->length = len;
+ ),
+ TP_printk("dev %d:%d ino 0x%llx pgoff 0x%lx size 0x%llx offset %lx "
+ "length %x",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->ino,
+ __entry->pgoff,
+ __entry->size,
+ __entry->offset,
+ __entry->length)
+)
+
+#define DEFINE_PAGE_EVENT(name) \
+DEFINE_EVENT(iomap_page_class, name, \
+ TP_PROTO(struct inode *inode, struct page *page, unsigned long off, \
+ unsigned int len), \
+ TP_ARGS(inode, page, off, len))
+DEFINE_PAGE_EVENT(iomap_writepage);
+DEFINE_PAGE_EVENT(iomap_releasepage);
+DEFINE_PAGE_EVENT(iomap_invalidatepage);
+
+#define IOMAP_TYPE_STRINGS \
+ { IOMAP_HOLE, "HOLE" }, \
+ { IOMAP_DELALLOC, "DELALLOC" }, \
+ { IOMAP_MAPPED, "MAPPED" }, \
+ { IOMAP_UNWRITTEN, "UNWRITTEN" }, \
+ { IOMAP_INLINE, "INLINE" }
+
+#define IOMAP_FLAGS_STRINGS \
+ { IOMAP_WRITE, "WRITE" }, \
+ { IOMAP_ZERO, "ZERO" }, \
+ { IOMAP_REPORT, "REPORT" }, \
+ { IOMAP_FAULT, "FAULT" }, \
+ { IOMAP_DIRECT, "DIRECT" }, \
+ { IOMAP_NOWAIT, "NOWAIT" }
+
+#define IOMAP_F_FLAGS_STRINGS \
+ { IOMAP_F_NEW, "NEW" }, \
+ { IOMAP_F_DIRTY, "DIRTY" }, \
+ { IOMAP_F_SHARED, "SHARED" }, \
+ { IOMAP_F_MERGED, "MERGED" }, \
+ { IOMAP_F_BUFFER_HEAD, "BH" }, \
+ { IOMAP_F_SIZE_CHANGED, "SIZE_CHANGED" }
+
+DECLARE_EVENT_CLASS(iomap_class,
+ TP_PROTO(struct inode *inode, struct iomap *iomap),
+ TP_ARGS(inode, iomap),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(u64, ino)
+ __field(u64, addr)
+ __field(loff_t, offset)
+ __field(u64, length)
+ __field(u16, type)
+ __field(u16, flags)
+ __field(dev_t, bdev)
+ ),
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->addr = iomap->addr;
+ __entry->offset = iomap->offset;
+ __entry->length = iomap->length;
+ __entry->type = iomap->type;
+ __entry->flags = iomap->flags;
+ __entry->bdev = iomap->bdev ? iomap->bdev->bd_dev : 0;
+ ),
+ TP_printk("dev %d:%d ino 0x%llx bdev %d:%d addr %lld offset %lld "
+ "length %llu type %s flags %s",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->ino,
+ MAJOR(__entry->bdev), MINOR(__entry->bdev),
+ __entry->addr,
+ __entry->offset,
+ __entry->length,
+ __print_symbolic(__entry->type, IOMAP_TYPE_STRINGS),
+ __print_flags(__entry->flags, "|", IOMAP_F_FLAGS_STRINGS))
+)
+
+#define DEFINE_IOMAP_EVENT(name) \
+DEFINE_EVENT(iomap_class, name, \
+ TP_PROTO(struct inode *inode, struct iomap *iomap), \
+ TP_ARGS(inode, iomap))
+DEFINE_IOMAP_EVENT(iomap_apply_dstmap);
+DEFINE_IOMAP_EVENT(iomap_apply_srcmap);
+
+TRACE_EVENT(iomap_apply,
+ TP_PROTO(struct inode *inode, loff_t pos, loff_t length,
+ unsigned int flags, const void *ops, void *actor,
+ unsigned long caller),
+ TP_ARGS(inode, pos, length, flags, ops, actor, caller),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(u64, ino)
+ __field(loff_t, pos)
+ __field(loff_t, length)
+ __field(unsigned int, flags)
+ __field(const void *, ops)
+ __field(void *, actor)
+ __field(unsigned long, caller)
+ ),
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->pos = pos;
+ __entry->length = length;
+ __entry->flags = flags;
+ __entry->ops = ops;
+ __entry->actor = actor;
+ __entry->caller = caller;
+ ),
+ TP_printk("dev %d:%d ino 0x%llx pos %lld length %lld flags %s (0x%x) "
+ "ops %ps caller %pS actor %ps",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->ino,
+ __entry->pos,
+ __entry->length,
+ __print_flags(__entry->flags, "|", IOMAP_FLAGS_STRINGS),
+ __entry->flags,
+ __entry->ops,
+ (void *)__entry->caller,
+ __entry->actor)
+);
+
+#endif /* _IOMAP_TRACE_H */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE trace
+#include <trace/define_trace.h>
diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c
index a1909066bde6..8fff6677a5da 100644
--- a/fs/jbd2/checkpoint.c
+++ b/fs/jbd2/checkpoint.c
@@ -110,7 +110,7 @@ void __jbd2_log_wait_for_space(journal_t *journal)
int nblocks, space_left;
/* assert_spin_locked(&journal->j_state_lock); */
- nblocks = jbd2_space_needed(journal);
+ nblocks = journal->j_max_transaction_buffers;
while (jbd2_log_space_left(journal) < nblocks) {
write_unlock(&journal->j_state_lock);
mutex_lock_io(&journal->j_checkpoint_mutex);
diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
index 132fb92098c7..7f0b362b3842 100644
--- a/fs/jbd2/commit.c
+++ b/fs/jbd2/commit.c
@@ -482,10 +482,10 @@ void jbd2_journal_commit_transaction(journal_t *journal)
if (jh->b_committed_data) {
struct buffer_head *bh = jh2bh(jh);
- jbd_lock_bh_state(bh);
+ spin_lock(&jh->b_state_lock);
jbd2_free(jh->b_committed_data, bh->b_size);
jh->b_committed_data = NULL;
- jbd_unlock_bh_state(bh);
+ spin_unlock(&jh->b_state_lock);
}
jbd2_journal_refile_buffer(journal, jh);
}
@@ -560,8 +560,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
stats.run.rs_logging = jiffies;
stats.run.rs_flushing = jbd2_time_diff(stats.run.rs_flushing,
stats.run.rs_logging);
- stats.run.rs_blocks =
- atomic_read(&commit_transaction->t_outstanding_credits);
+ stats.run.rs_blocks = commit_transaction->t_nr_buffers;
stats.run.rs_blocks_logged = 0;
J_ASSERT(commit_transaction->t_nr_buffers <=
@@ -642,8 +641,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
/*
* start_this_handle() uses t_outstanding_credits to determine
- * the free space in the log, but this counter is changed
- * by jbd2_journal_next_log_block() also.
+ * the free space in the log.
*/
atomic_dec(&commit_transaction->t_outstanding_credits);
@@ -727,7 +725,6 @@ start_journal_io:
submit_bh(REQ_OP_WRITE, REQ_SYNC, bh);
}
cond_resched();
- stats.run.rs_blocks_logged += bufs;
/* Force a new descriptor to be generated next
time round the loop. */
@@ -814,6 +811,7 @@ start_journal_io:
if (unlikely(!buffer_uptodate(bh)))
err = -EIO;
jbd2_unfile_log_bh(bh);
+ stats.run.rs_blocks_logged++;
/*
* The list contains temporary buffer heads created by
@@ -859,6 +857,7 @@ start_journal_io:
BUFFER_TRACE(bh, "ph5: control buffer writeout done: unfile");
clear_buffer_jwrite(bh);
jbd2_unfile_log_bh(bh);
+ stats.run.rs_blocks_logged++;
__brelse(bh); /* One for getblk */
/* AKPM: bforget here */
}
@@ -880,6 +879,7 @@ start_journal_io:
}
if (cbh)
err = journal_wait_on_commit_record(journal, cbh);
+ stats.run.rs_blocks_logged++;
if (jbd2_has_feature_async_commit(journal) &&
journal->j_flags & JBD2_BARRIER) {
blkdev_issue_flush(journal->j_dev, GFP_NOFS, NULL);
@@ -888,6 +888,9 @@ start_journal_io:
if (err)
jbd2_journal_abort(journal, err);
+ WARN_ON_ONCE(
+ atomic_read(&commit_transaction->t_outstanding_credits) < 0);
+
/*
* Now disk caches for filesystem device are flushed so we are safe to
* erase checkpointed transactions from the log by updating journal
@@ -918,6 +921,7 @@ restart_loop:
transaction_t *cp_transaction;
struct buffer_head *bh;
int try_to_free = 0;
+ bool drop_ref;
jh = commit_transaction->t_forget;
spin_unlock(&journal->j_list_lock);
@@ -927,7 +931,7 @@ restart_loop:
* done with it.
*/
get_bh(bh);
- jbd_lock_bh_state(bh);
+ spin_lock(&jh->b_state_lock);
J_ASSERT_JH(jh, jh->b_transaction == commit_transaction);
/*
@@ -1022,8 +1026,10 @@ restart_loop:
try_to_free = 1;
}
JBUFFER_TRACE(jh, "refile or unfile buffer");
- __jbd2_journal_refile_buffer(jh);
- jbd_unlock_bh_state(bh);
+ drop_ref = __jbd2_journal_refile_buffer(jh);
+ spin_unlock(&jh->b_state_lock);
+ if (drop_ref)
+ jbd2_journal_put_journal_head(jh);
if (try_to_free)
release_buffer_page(bh); /* Drops bh reference */
else
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index 1c58859aa592..5e408ee24a1a 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -363,7 +363,7 @@ int jbd2_journal_write_metadata_buffer(transaction_t *transaction,
/* keep subsequent assertions sane */
atomic_set(&new_bh->b_count, 1);
- jbd_lock_bh_state(bh_in);
+ spin_lock(&jh_in->b_state_lock);
repeat:
/*
* If a new transaction has already done a buffer copy-out, then
@@ -405,13 +405,13 @@ repeat:
if (need_copy_out && !done_copy_out) {
char *tmp;
- jbd_unlock_bh_state(bh_in);
+ spin_unlock(&jh_in->b_state_lock);
tmp = jbd2_alloc(bh_in->b_size, GFP_NOFS);
if (!tmp) {
brelse(new_bh);
return -ENOMEM;
}
- jbd_lock_bh_state(bh_in);
+ spin_lock(&jh_in->b_state_lock);
if (jh_in->b_frozen_data) {
jbd2_free(tmp, bh_in->b_size);
goto repeat;
@@ -464,7 +464,7 @@ repeat:
__jbd2_journal_file_buffer(jh_in, transaction, BJ_Shadow);
spin_unlock(&journal->j_list_lock);
set_buffer_shadow(bh_in);
- jbd_unlock_bh_state(bh_in);
+ spin_unlock(&jh_in->b_state_lock);
return do_escape | (done_copy_out << 1);
}
@@ -840,6 +840,7 @@ jbd2_journal_get_descriptor_buffer(transaction_t *transaction, int type)
bh = __getblk(journal->j_dev, blocknr, journal->j_blocksize);
if (!bh)
return NULL;
+ atomic_dec(&transaction->t_outstanding_credits);
lock_buffer(bh);
memset(bh->b_data, 0, journal->j_blocksize);
header = (journal_header_t *)bh->b_data;
@@ -1098,6 +1099,16 @@ static void jbd2_stats_proc_exit(journal_t *journal)
remove_proc_entry(journal->j_devname, proc_jbd2_stats);
}
+/* Minimum size of descriptor tag */
+static int jbd2_min_tag_size(void)
+{
+ /*
+ * Tag with 32-bit block numbers does not use last four bytes of the
+ * structure
+ */
+ return sizeof(journal_block_tag_t) - 4;
+}
+
/*
* Management for journal control blocks: functions to create and
* destroy journal_t structures, and to initialise and read existing
@@ -1156,7 +1167,8 @@ static journal_t *journal_init_common(struct block_device *bdev,
journal->j_fs_dev = fs_dev;
journal->j_blk_offset = start;
journal->j_maxlen = len;
- n = journal->j_blocksize / sizeof(journal_block_tag_t);
+ /* We need enough buffers to write out full descriptor block. */
+ n = journal->j_blocksize / jbd2_min_tag_size();
journal->j_wbufsize = n;
journal->j_wbuf = kmalloc_array(n, sizeof(struct buffer_head *),
GFP_KERNEL);
@@ -1488,6 +1500,21 @@ void jbd2_journal_update_sb_errno(journal_t *journal)
}
EXPORT_SYMBOL(jbd2_journal_update_sb_errno);
+static int journal_revoke_records_per_block(journal_t *journal)
+{
+ int record_size;
+ int space = journal->j_blocksize - sizeof(jbd2_journal_revoke_header_t);
+
+ if (jbd2_has_feature_64bit(journal))
+ record_size = 8;
+ else
+ record_size = 4;
+
+ if (jbd2_journal_has_csum_v2or3(journal))
+ space -= sizeof(struct jbd2_journal_block_tail);
+ return space / record_size;
+}
+
/*
* Read the superblock for a given journal, performing initial
* validation of the format.
@@ -1596,6 +1623,8 @@ static int journal_get_superblock(journal_t *journal)
sizeof(sb->s_uuid));
}
+ journal->j_revoke_records_per_block =
+ journal_revoke_records_per_block(journal);
set_buffer_verified(bh);
return 0;
@@ -1916,6 +1945,8 @@ int jbd2_journal_set_features (journal_t *journal, unsigned long compat,
sb->s_feature_ro_compat |= cpu_to_be32(ro);
sb->s_feature_incompat |= cpu_to_be32(incompat);
unlock_buffer(journal->j_sb_buffer);
+ journal->j_revoke_records_per_block =
+ journal_revoke_records_per_block(journal);
return 1;
#undef COMPAT_FEATURE_ON
@@ -1946,6 +1977,8 @@ void jbd2_journal_clear_features(journal_t *journal, unsigned long compat,
sb->s_feature_compat &= ~cpu_to_be32(compat);
sb->s_feature_ro_compat &= ~cpu_to_be32(ro);
sb->s_feature_incompat &= ~cpu_to_be32(incompat);
+ journal->j_revoke_records_per_block =
+ journal_revoke_records_per_block(journal);
}
EXPORT_SYMBOL(jbd2_journal_clear_features);
@@ -2410,6 +2443,8 @@ static struct journal_head *journal_alloc_journal_head(void)
ret = kmem_cache_zalloc(jbd2_journal_head_cache,
GFP_NOFS | __GFP_NOFAIL);
}
+ if (ret)
+ spin_lock_init(&ret->b_state_lock);
return ret;
}
@@ -2529,17 +2564,23 @@ static void __journal_remove_journal_head(struct buffer_head *bh)
J_ASSERT_BH(bh, buffer_jbd(bh));
J_ASSERT_BH(bh, jh2bh(jh) == bh);
BUFFER_TRACE(bh, "remove journal_head");
+
+ /* Unlink before dropping the lock */
+ bh->b_private = NULL;
+ jh->b_bh = NULL; /* debug, really */
+ clear_buffer_jbd(bh);
+}
+
+static void journal_release_journal_head(struct journal_head *jh, size_t b_size)
+{
if (jh->b_frozen_data) {
printk(KERN_WARNING "%s: freeing b_frozen_data\n", __func__);
- jbd2_free(jh->b_frozen_data, bh->b_size);
+ jbd2_free(jh->b_frozen_data, b_size);
}
if (jh->b_committed_data) {
printk(KERN_WARNING "%s: freeing b_committed_data\n", __func__);
- jbd2_free(jh->b_committed_data, bh->b_size);
+ jbd2_free(jh->b_committed_data, b_size);
}
- bh->b_private = NULL;
- jh->b_bh = NULL; /* debug, really */
- clear_buffer_jbd(bh);
journal_free_journal_head(jh);
}
@@ -2557,9 +2598,11 @@ void jbd2_journal_put_journal_head(struct journal_head *jh)
if (!jh->b_jcount) {
__journal_remove_journal_head(bh);
jbd_unlock_bh_journal_head(bh);
+ journal_release_journal_head(jh, bh->b_size);
__brelse(bh);
- } else
+ } else {
jbd_unlock_bh_journal_head(bh);
+ }
}
/*
diff --git a/fs/jbd2/revoke.c b/fs/jbd2/revoke.c
index f08073d7bbf5..fa608788b93d 100644
--- a/fs/jbd2/revoke.c
+++ b/fs/jbd2/revoke.c
@@ -371,6 +371,11 @@ int jbd2_journal_revoke(handle_t *handle, unsigned long long blocknr,
}
#endif
+ if (WARN_ON_ONCE(handle->h_revoke_credits <= 0)) {
+ if (!bh_in)
+ brelse(bh);
+ return -EIO;
+ }
/* We really ought not ever to revoke twice in a row without
first having the revoke cancelled: it's illegal to free a
block twice without allocating it in between! */
@@ -391,6 +396,7 @@ int jbd2_journal_revoke(handle_t *handle, unsigned long long blocknr,
__brelse(bh);
}
}
+ handle->h_revoke_credits--;
jbd_debug(2, "insert revoke for block %llu, bh_in=%p\n",blocknr, bh_in);
err = insert_revoke_hash(journal, blocknr,
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index b25ebdcabfa3..27b9f9dee434 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -63,6 +63,28 @@ void jbd2_journal_free_transaction(transaction_t *transaction)
}
/*
+ * Base amount of descriptor blocks we reserve for each transaction.
+ */
+static int jbd2_descriptor_blocks_per_trans(journal_t *journal)
+{
+ int tag_space = journal->j_blocksize - sizeof(journal_header_t);
+ int tags_per_block;
+
+ /* Subtract UUID */
+ tag_space -= 16;
+ if (jbd2_journal_has_csum_v2or3(journal))
+ tag_space -= sizeof(struct jbd2_journal_block_tail);
+ /* Commit code leaves a slack space of 16 bytes at the end of block */
+ tags_per_block = (tag_space - 16) / journal_tag_bytes(journal);
+ /*
+ * Revoke descriptors are accounted separately so we need to reserve
+ * space for commit block and normal transaction descriptor blocks.
+ */
+ return 1 + DIV_ROUND_UP(journal->j_max_transaction_buffers,
+ tags_per_block);
+}
+
+/*
* jbd2_get_transaction: obtain a new transaction_t object.
*
* Simply initialise a new transaction. Initialize it in
@@ -88,7 +110,9 @@ static void jbd2_get_transaction(journal_t *journal,
spin_lock_init(&transaction->t_handle_lock);
atomic_set(&transaction->t_updates, 0);
atomic_set(&transaction->t_outstanding_credits,
+ jbd2_descriptor_blocks_per_trans(journal) +
atomic_read(&journal->j_reserved_credits));
+ atomic_set(&transaction->t_outstanding_revokes, 0);
atomic_set(&transaction->t_handle_count, 0);
INIT_LIST_HEAD(&transaction->t_inode_list);
INIT_LIST_HEAD(&transaction->t_private_list);
@@ -258,12 +282,13 @@ static int add_transaction_credits(journal_t *journal, int blocks,
* *before* starting to dirty potentially checkpointed buffers
* in the new transaction.
*/
- if (jbd2_log_space_left(journal) < jbd2_space_needed(journal)) {
+ if (jbd2_log_space_left(journal) < journal->j_max_transaction_buffers) {
atomic_sub(total, &t->t_outstanding_credits);
read_unlock(&journal->j_state_lock);
jbd2_might_wait_for_commit(journal);
write_lock(&journal->j_state_lock);
- if (jbd2_log_space_left(journal) < jbd2_space_needed(journal))
+ if (jbd2_log_space_left(journal) <
+ journal->j_max_transaction_buffers)
__jbd2_log_wait_for_space(journal);
write_unlock(&journal->j_state_lock);
return 1;
@@ -299,12 +324,12 @@ static int start_this_handle(journal_t *journal, handle_t *handle,
gfp_t gfp_mask)
{
transaction_t *transaction, *new_transaction = NULL;
- int blocks = handle->h_buffer_credits;
+ int blocks = handle->h_total_credits;
int rsv_blocks = 0;
unsigned long ts = jiffies;
if (handle->h_rsv_handle)
- rsv_blocks = handle->h_rsv_handle->h_buffer_credits;
+ rsv_blocks = handle->h_rsv_handle->h_total_credits;
/*
* Limit the number of reserved credits to 1/2 of maximum transaction
@@ -405,6 +430,7 @@ repeat:
update_t_max_wait(transaction, ts);
handle->h_transaction = transaction;
handle->h_requested_credits = blocks;
+ handle->h_revoke_credits_requested = handle->h_revoke_credits;
handle->h_start_jiffies = jiffies;
atomic_inc(&transaction->t_updates);
atomic_inc(&transaction->t_handle_count);
@@ -431,15 +457,15 @@ static handle_t *new_handle(int nblocks)
handle_t *handle = jbd2_alloc_handle(GFP_NOFS);
if (!handle)
return NULL;
- handle->h_buffer_credits = nblocks;
+ handle->h_total_credits = nblocks;
handle->h_ref = 1;
return handle;
}
handle_t *jbd2__journal_start(journal_t *journal, int nblocks, int rsv_blocks,
- gfp_t gfp_mask, unsigned int type,
- unsigned int line_no)
+ int revoke_records, gfp_t gfp_mask,
+ unsigned int type, unsigned int line_no)
{
handle_t *handle = journal_current_handle();
int err;
@@ -453,6 +479,8 @@ handle_t *jbd2__journal_start(journal_t *journal, int nblocks, int rsv_blocks,
return handle;
}
+ nblocks += DIV_ROUND_UP(revoke_records,
+ journal->j_revoke_records_per_block);
handle = new_handle(nblocks);
if (!handle)
return ERR_PTR(-ENOMEM);
@@ -468,6 +496,7 @@ handle_t *jbd2__journal_start(journal_t *journal, int nblocks, int rsv_blocks,
rsv_handle->h_journal = journal;
handle->h_rsv_handle = rsv_handle;
}
+ handle->h_revoke_credits = revoke_records;
err = start_this_handle(journal, handle, gfp_mask);
if (err < 0) {
@@ -508,16 +537,21 @@ EXPORT_SYMBOL(jbd2__journal_start);
*/
handle_t *jbd2_journal_start(journal_t *journal, int nblocks)
{
- return jbd2__journal_start(journal, nblocks, 0, GFP_NOFS, 0, 0);
+ return jbd2__journal_start(journal, nblocks, 0, 0, GFP_NOFS, 0, 0);
}
EXPORT_SYMBOL(jbd2_journal_start);
-void jbd2_journal_free_reserved(handle_t *handle)
+static void __jbd2_journal_unreserve_handle(handle_t *handle)
{
journal_t *journal = handle->h_journal;
WARN_ON(!handle->h_reserved);
- sub_reserved_credits(journal, handle->h_buffer_credits);
+ sub_reserved_credits(journal, handle->h_total_credits);
+}
+
+void jbd2_journal_free_reserved(handle_t *handle)
+{
+ __jbd2_journal_unreserve_handle(handle);
jbd2_free_handle(handle);
}
EXPORT_SYMBOL(jbd2_journal_free_reserved);
@@ -571,7 +605,7 @@ int jbd2_journal_start_reserved(handle_t *handle, unsigned int type,
handle->h_line_no = line_no;
trace_jbd2_handle_start(journal->j_fs_dev->bd_dev,
handle->h_transaction->t_tid, type,
- line_no, handle->h_buffer_credits);
+ line_no, handle->h_total_credits);
return 0;
}
EXPORT_SYMBOL(jbd2_journal_start_reserved);
@@ -580,6 +614,7 @@ EXPORT_SYMBOL(jbd2_journal_start_reserved);
* int jbd2_journal_extend() - extend buffer credits.
* @handle: handle to 'extend'
* @nblocks: nr blocks to try to extend by.
+ * @revoke_records: number of revoke records to try to extend by.
*
* Some transactions, such as large extends and truncates, can be done
* atomically all at once or in several stages. The operation requests
@@ -596,7 +631,7 @@ EXPORT_SYMBOL(jbd2_journal_start_reserved);
* return code < 0 implies an error
* return code > 0 implies normal transaction-full status.
*/
-int jbd2_journal_extend(handle_t *handle, int nblocks)
+int jbd2_journal_extend(handle_t *handle, int nblocks, int revoke_records)
{
transaction_t *transaction = handle->h_transaction;
journal_t *journal;
@@ -618,6 +653,12 @@ int jbd2_journal_extend(handle_t *handle, int nblocks)
goto error_out;
}
+ nblocks += DIV_ROUND_UP(
+ handle->h_revoke_credits_requested + revoke_records,
+ journal->j_revoke_records_per_block) -
+ DIV_ROUND_UP(
+ handle->h_revoke_credits_requested,
+ journal->j_revoke_records_per_block);
spin_lock(&transaction->t_handle_lock);
wanted = atomic_add_return(nblocks,
&transaction->t_outstanding_credits);
@@ -629,22 +670,16 @@ int jbd2_journal_extend(handle_t *handle, int nblocks)
goto unlock;
}
- if (wanted + (wanted >> JBD2_CONTROL_BLOCKS_SHIFT) >
- jbd2_log_space_left(journal)) {
- jbd_debug(3, "denied handle %p %d blocks: "
- "insufficient log space\n", handle, nblocks);
- atomic_sub(nblocks, &transaction->t_outstanding_credits);
- goto unlock;
- }
-
trace_jbd2_handle_extend(journal->j_fs_dev->bd_dev,
transaction->t_tid,
handle->h_type, handle->h_line_no,
- handle->h_buffer_credits,
+ handle->h_total_credits,
nblocks);
- handle->h_buffer_credits += nblocks;
+ handle->h_total_credits += nblocks;
handle->h_requested_credits += nblocks;
+ handle->h_revoke_credits += revoke_records;
+ handle->h_revoke_credits_requested += revoke_records;
result = 0;
jbd_debug(3, "extended handle %p by %d\n", handle, nblocks);
@@ -655,11 +690,55 @@ error_out:
return result;
}
+static void stop_this_handle(handle_t *handle)
+{
+ transaction_t *transaction = handle->h_transaction;
+ journal_t *journal = transaction->t_journal;
+ int revokes;
+
+ J_ASSERT(journal_current_handle() == handle);
+ J_ASSERT(atomic_read(&transaction->t_updates) > 0);
+ current->journal_info = NULL;
+ /*
+ * Subtract necessary revoke descriptor blocks from handle credits. We
+ * take care to account only for revoke descriptor blocks the
+ * transaction will really need as large sequences of transactions with
+ * small numbers of revokes are relatively common.
+ */
+ revokes = handle->h_revoke_credits_requested - handle->h_revoke_credits;
+ if (revokes) {
+ int t_revokes, revoke_descriptors;
+ int rr_per_blk = journal->j_revoke_records_per_block;
+
+ WARN_ON_ONCE(DIV_ROUND_UP(revokes, rr_per_blk)
+ > handle->h_total_credits);
+ t_revokes = atomic_add_return(revokes,
+ &transaction->t_outstanding_revokes);
+ revoke_descriptors =
+ DIV_ROUND_UP(t_revokes, rr_per_blk) -
+ DIV_ROUND_UP(t_revokes - revokes, rr_per_blk);
+ handle->h_total_credits -= revoke_descriptors;
+ }
+ atomic_sub(handle->h_total_credits,
+ &transaction->t_outstanding_credits);
+ if (handle->h_rsv_handle)
+ __jbd2_journal_unreserve_handle(handle->h_rsv_handle);
+ if (atomic_dec_and_test(&transaction->t_updates))
+ wake_up(&journal->j_wait_updates);
+
+ rwsem_release(&journal->j_trans_commit_map, _THIS_IP_);
+ /*
+ * Scope of the GFP_NOFS context is over here and so we can restore the
+ * original alloc context.
+ */
+ memalloc_nofs_restore(handle->saved_alloc_context);
+}
/**
* int jbd2_journal_restart() - restart a handle .
* @handle: handle to restart
* @nblocks: nr credits requested
+ * @revoke_records: number of revoke record credits requested
* @gfp_mask: memory allocation flags (for start_this_handle)
*
* Restart a handle for a multi-transaction filesystem
@@ -672,56 +751,48 @@ error_out:
* credits. We preserve reserved handle if there's any attached to the
* passed in handle.
*/
-int jbd2__journal_restart(handle_t *handle, int nblocks, gfp_t gfp_mask)
+int jbd2__journal_restart(handle_t *handle, int nblocks, int revoke_records,
+ gfp_t gfp_mask)
{
transaction_t *transaction = handle->h_transaction;
journal_t *journal;
tid_t tid;
- int need_to_start, ret;
+ int need_to_start;
+ int ret;
/* If we've had an abort of any type, don't even think about
* actually doing the restart! */
if (is_handle_aborted(handle))
return 0;
journal = transaction->t_journal;
+ tid = transaction->t_tid;
/*
* First unlink the handle from its current transaction, and start the
* commit on that.
*/
- J_ASSERT(atomic_read(&transaction->t_updates) > 0);
- J_ASSERT(journal_current_handle() == handle);
-
- read_lock(&journal->j_state_lock);
- spin_lock(&transaction->t_handle_lock);
- atomic_sub(handle->h_buffer_credits,
- &transaction->t_outstanding_credits);
- if (handle->h_rsv_handle) {
- sub_reserved_credits(journal,
- handle->h_rsv_handle->h_buffer_credits);
- }
- if (atomic_dec_and_test(&transaction->t_updates))
- wake_up(&journal->j_wait_updates);
- tid = transaction->t_tid;
- spin_unlock(&transaction->t_handle_lock);
+ jbd_debug(2, "restarting handle %p\n", handle);
+ stop_this_handle(handle);
handle->h_transaction = NULL;
- current->journal_info = NULL;
- jbd_debug(2, "restarting handle %p\n", handle);
+ /*
+ * TODO: If we use READ_ONCE / WRITE_ONCE for j_commit_request we can
+ * get rid of pointless j_state_lock traffic like this.
+ */
+ read_lock(&journal->j_state_lock);
need_to_start = !tid_geq(journal->j_commit_request, tid);
read_unlock(&journal->j_state_lock);
if (need_to_start)
jbd2_log_start_commit(journal, tid);
-
- rwsem_release(&journal->j_trans_commit_map, _THIS_IP_);
- handle->h_buffer_credits = nblocks;
- /*
- * Restore the original nofs context because the journal restart
- * is basically the same thing as journal stop and start.
- * start_this_handle will start a new nofs context.
- */
- memalloc_nofs_restore(handle->saved_alloc_context);
+ handle->h_total_credits = nblocks +
+ DIV_ROUND_UP(revoke_records,
+ journal->j_revoke_records_per_block);
+ handle->h_revoke_credits = revoke_records;
ret = start_this_handle(journal, handle, gfp_mask);
+ trace_jbd2_handle_restart(journal->j_fs_dev->bd_dev,
+ ret ? 0 : handle->h_transaction->t_tid,
+ handle->h_type, handle->h_line_no,
+ handle->h_total_credits);
return ret;
}
EXPORT_SYMBOL(jbd2__journal_restart);
@@ -729,7 +800,7 @@ EXPORT_SYMBOL(jbd2__journal_restart);
int jbd2_journal_restart(handle_t *handle, int nblocks)
{
- return jbd2__journal_restart(handle, nblocks, GFP_NOFS);
+ return jbd2__journal_restart(handle, nblocks, 0, GFP_NOFS);
}
EXPORT_SYMBOL(jbd2_journal_restart);
@@ -879,7 +950,7 @@ repeat:
start_lock = jiffies;
lock_buffer(bh);
- jbd_lock_bh_state(bh);
+ spin_lock(&jh->b_state_lock);
/* If it takes too long to lock the buffer, trace it */
time_lock = jbd2_time_diff(start_lock, jiffies);
@@ -929,7 +1000,7 @@ repeat:
error = -EROFS;
if (is_handle_aborted(handle)) {
- jbd_unlock_bh_state(bh);
+ spin_unlock(&jh->b_state_lock);
goto out;
}
error = 0;
@@ -993,7 +1064,7 @@ repeat:
*/
if (buffer_shadow(bh)) {
JBUFFER_TRACE(jh, "on shadow: sleep");
- jbd_unlock_bh_state(bh);
+ spin_unlock(&jh->b_state_lock);
wait_on_bit_io(&bh->b_state, BH_Shadow, TASK_UNINTERRUPTIBLE);
goto repeat;
}
@@ -1014,7 +1085,7 @@ repeat:
JBUFFER_TRACE(jh, "generate frozen data");
if (!frozen_buffer) {
JBUFFER_TRACE(jh, "allocate memory for buffer");
- jbd_unlock_bh_state(bh);
+ spin_unlock(&jh->b_state_lock);
frozen_buffer = jbd2_alloc(jh2bh(jh)->b_size,
GFP_NOFS | __GFP_NOFAIL);
goto repeat;
@@ -1033,7 +1104,7 @@ attach_next:
jh->b_next_transaction = transaction;
done:
- jbd_unlock_bh_state(bh);
+ spin_unlock(&jh->b_state_lock);
/*
* If we are about to journal a buffer, then any revoke pending on it is
@@ -1172,7 +1243,7 @@ int jbd2_journal_get_create_access(handle_t *handle, struct buffer_head *bh)
* that case: the transaction must have deleted the buffer for it to be
* reused here.
*/
- jbd_lock_bh_state(bh);
+ spin_lock(&jh->b_state_lock);
J_ASSERT_JH(jh, (jh->b_transaction == transaction ||
jh->b_transaction == NULL ||
(jh->b_transaction == journal->j_committing_transaction &&
@@ -1207,7 +1278,7 @@ int jbd2_journal_get_create_access(handle_t *handle, struct buffer_head *bh)
jh->b_next_transaction = transaction;
spin_unlock(&journal->j_list_lock);
}
- jbd_unlock_bh_state(bh);
+ spin_unlock(&jh->b_state_lock);
/*
* akpm: I added this. ext3_alloc_branch can pick up new indirect
@@ -1275,13 +1346,13 @@ repeat:
committed_data = jbd2_alloc(jh2bh(jh)->b_size,
GFP_NOFS|__GFP_NOFAIL);
- jbd_lock_bh_state(bh);
+ spin_lock(&jh->b_state_lock);
if (!jh->b_committed_data) {
/* Copy out the current buffer contents into the
* preserved, committed copy. */
JBUFFER_TRACE(jh, "generate b_committed data");
if (!committed_data) {
- jbd_unlock_bh_state(bh);
+ spin_unlock(&jh->b_state_lock);
goto repeat;
}
@@ -1289,7 +1360,7 @@ repeat:
committed_data = NULL;
memcpy(jh->b_committed_data, bh->b_data, bh->b_size);
}
- jbd_unlock_bh_state(bh);
+ spin_unlock(&jh->b_state_lock);
out:
jbd2_journal_put_journal_head(jh);
if (unlikely(committed_data))
@@ -1390,16 +1461,16 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
*/
if (jh->b_transaction != transaction &&
jh->b_next_transaction != transaction) {
- jbd_lock_bh_state(bh);
+ spin_lock(&jh->b_state_lock);
J_ASSERT_JH(jh, jh->b_transaction == transaction ||
jh->b_next_transaction == transaction);
- jbd_unlock_bh_state(bh);
+ spin_unlock(&jh->b_state_lock);
}
if (jh->b_modified == 1) {
/* If it's in our transaction it must be in BJ_Metadata list. */
if (jh->b_transaction == transaction &&
jh->b_jlist != BJ_Metadata) {
- jbd_lock_bh_state(bh);
+ spin_lock(&jh->b_state_lock);
if (jh->b_transaction == transaction &&
jh->b_jlist != BJ_Metadata)
pr_err("JBD2: assertion failure: h_type=%u "
@@ -1409,13 +1480,13 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
jh->b_jlist);
J_ASSERT_JH(jh, jh->b_transaction != transaction ||
jh->b_jlist == BJ_Metadata);
- jbd_unlock_bh_state(bh);
+ spin_unlock(&jh->b_state_lock);
}
goto out;
}
journal = transaction->t_journal;
- jbd_lock_bh_state(bh);
+ spin_lock(&jh->b_state_lock);
if (jh->b_modified == 0) {
/*
@@ -1423,12 +1494,12 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
* of the transaction. This needs to be done
* once a transaction -bzzz
*/
- if (handle->h_buffer_credits <= 0) {
+ if (WARN_ON_ONCE(jbd2_handle_buffer_credits(handle) <= 0)) {
ret = -ENOSPC;
goto out_unlock_bh;
}
jh->b_modified = 1;
- handle->h_buffer_credits--;
+ handle->h_total_credits--;
}
/*
@@ -1501,7 +1572,7 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
__jbd2_journal_file_buffer(jh, transaction, BJ_Metadata);
spin_unlock(&journal->j_list_lock);
out_unlock_bh:
- jbd_unlock_bh_state(bh);
+ spin_unlock(&jh->b_state_lock);
out:
JBUFFER_TRACE(jh, "exit");
return ret;
@@ -1539,18 +1610,20 @@ int jbd2_journal_forget (handle_t *handle, struct buffer_head *bh)
BUFFER_TRACE(bh, "entry");
- jbd_lock_bh_state(bh);
+ jh = jbd2_journal_grab_journal_head(bh);
+ if (!jh) {
+ __bforget(bh);
+ return 0;
+ }
- if (!buffer_jbd(bh))
- goto not_jbd;
- jh = bh2jh(bh);
+ spin_lock(&jh->b_state_lock);
/* Critical error: attempting to delete a bitmap buffer, maybe?
* Don't do any jbd operations, and return an error. */
if (!J_EXPECT_JH(jh, !jh->b_committed_data,
"inconsistent data on disk")) {
err = -EIO;
- goto not_jbd;
+ goto drop;
}
/* keep track of whether or not this transaction modified us */
@@ -1598,10 +1671,7 @@ int jbd2_journal_forget (handle_t *handle, struct buffer_head *bh)
__jbd2_journal_file_buffer(jh, transaction, BJ_Forget);
} else {
__jbd2_journal_unfile_buffer(jh);
- if (!buffer_jbd(bh)) {
- spin_unlock(&journal->j_list_lock);
- goto not_jbd;
- }
+ jbd2_journal_put_journal_head(jh);
}
spin_unlock(&journal->j_list_lock);
} else if (jh->b_transaction) {
@@ -1643,7 +1713,7 @@ int jbd2_journal_forget (handle_t *handle, struct buffer_head *bh)
if (!jh->b_cp_transaction) {
JBUFFER_TRACE(jh, "belongs to none transaction");
spin_unlock(&journal->j_list_lock);
- goto not_jbd;
+ goto drop;
}
/*
@@ -1653,7 +1723,7 @@ int jbd2_journal_forget (handle_t *handle, struct buffer_head *bh)
if (!buffer_dirty(bh)) {
__jbd2_journal_remove_checkpoint(jh);
spin_unlock(&journal->j_list_lock);
- goto not_jbd;
+ goto drop;
}
/*
@@ -1666,20 +1736,15 @@ int jbd2_journal_forget (handle_t *handle, struct buffer_head *bh)
__jbd2_journal_file_buffer(jh, transaction, BJ_Forget);
spin_unlock(&journal->j_list_lock);
}
-
- jbd_unlock_bh_state(bh);
- __brelse(bh);
drop:
+ __brelse(bh);
+ spin_unlock(&jh->b_state_lock);
+ jbd2_journal_put_journal_head(jh);
if (drop_reserve) {
/* no need to reserve log space for this block -bzzz */
- handle->h_buffer_credits++;
+ handle->h_total_credits++;
}
return err;
-
-not_jbd:
- jbd_unlock_bh_state(bh);
- __bforget(bh);
- goto drop;
}
/**
@@ -1706,45 +1771,34 @@ int jbd2_journal_stop(handle_t *handle)
tid_t tid;
pid_t pid;
+ if (--handle->h_ref > 0) {
+ jbd_debug(4, "h_ref %d -> %d\n", handle->h_ref + 1,
+ handle->h_ref);
+ if (is_handle_aborted(handle))
+ return -EIO;
+ return 0;
+ }
if (!transaction) {
/*
- * Handle is already detached from the transaction so
- * there is nothing to do other than decrease a refcount,
- * or free the handle if refcount drops to zero
+ * Handle is already detached from the transaction so there is
+ * nothing to do other than free the handle.
*/
- if (--handle->h_ref > 0) {
- jbd_debug(4, "h_ref %d -> %d\n", handle->h_ref + 1,
- handle->h_ref);
- return err;
- } else {
- if (handle->h_rsv_handle)
- jbd2_free_handle(handle->h_rsv_handle);
- goto free_and_exit;
- }
+ memalloc_nofs_restore(handle->saved_alloc_context);
+ goto free_and_exit;
}
journal = transaction->t_journal;
-
- J_ASSERT(journal_current_handle() == handle);
+ tid = transaction->t_tid;
if (is_handle_aborted(handle))
err = -EIO;
- else
- J_ASSERT(atomic_read(&transaction->t_updates) > 0);
-
- if (--handle->h_ref > 0) {
- jbd_debug(4, "h_ref %d -> %d\n", handle->h_ref + 1,
- handle->h_ref);
- return err;
- }
jbd_debug(4, "Handle %p going down\n", handle);
trace_jbd2_handle_stats(journal->j_fs_dev->bd_dev,
- transaction->t_tid,
- handle->h_type, handle->h_line_no,
+ tid, handle->h_type, handle->h_line_no,
jiffies - handle->h_start_jiffies,
handle->h_sync, handle->h_requested_credits,
(handle->h_requested_credits -
- handle->h_buffer_credits));
+ handle->h_total_credits));
/*
* Implement synchronous transaction batching. If the handle
@@ -1804,19 +1858,13 @@ int jbd2_journal_stop(handle_t *handle)
if (handle->h_sync)
transaction->t_synchronous_commit = 1;
- current->journal_info = NULL;
- atomic_sub(handle->h_buffer_credits,
- &transaction->t_outstanding_credits);
/*
* If the handle is marked SYNC, we need to set another commit
- * going! We also want to force a commit if the current
- * transaction is occupying too much of the log, or if the
- * transaction is too old now.
+ * going! We also want to force a commit if the transaction is too
+ * old now.
*/
if (handle->h_sync ||
- (atomic_read(&transaction->t_outstanding_credits) >
- journal->j_max_transaction_buffers) ||
time_after_eq(jiffies, transaction->t_expires)) {
/* Do this even for aborted journals: an abort still
* completes the commit thread, it just doesn't write
@@ -1825,7 +1873,7 @@ int jbd2_journal_stop(handle_t *handle)
jbd_debug(2, "transaction too old, requesting commit for "
"handle %p\n", handle);
/* This is non-blocking */
- jbd2_log_start_commit(journal, transaction->t_tid);
+ jbd2_log_start_commit(journal, tid);
/*
* Special case: JBD2_SYNC synchronous updates require us
@@ -1836,31 +1884,19 @@ int jbd2_journal_stop(handle_t *handle)
}
/*
- * Once we drop t_updates, if it goes to zero the transaction
- * could start committing on us and eventually disappear. So
- * once we do this, we must not dereference transaction
- * pointer again.
+ * Once stop_this_handle() drops t_updates, the transaction could start
+ * committing on us and eventually disappear. So we must not
+ * dereference transaction pointer again after calling
+ * stop_this_handle().
*/
- tid = transaction->t_tid;
- if (atomic_dec_and_test(&transaction->t_updates)) {
- wake_up(&journal->j_wait_updates);
- if (journal->j_barrier_count)
- wake_up(&journal->j_wait_transaction_locked);
- }
-
- rwsem_release(&journal->j_trans_commit_map, _THIS_IP_);
+ stop_this_handle(handle);
if (wait_for_commit)
err = jbd2_log_wait_commit(journal, tid);
- if (handle->h_rsv_handle)
- jbd2_journal_free_reserved(handle->h_rsv_handle);
free_and_exit:
- /*
- * Scope of the GFP_NOFS context is over here and so we can restore the
- * original alloc context.
- */
- memalloc_nofs_restore(handle->saved_alloc_context);
+ if (handle->h_rsv_handle)
+ jbd2_free_handle(handle->h_rsv_handle);
jbd2_free_handle(handle);
return err;
}
@@ -1878,7 +1914,7 @@ free_and_exit:
*
* j_list_lock is held.
*
- * jbd_lock_bh_state(jh2bh(jh)) is held.
+ * jh->b_state_lock is held.
*/
static inline void
@@ -1902,7 +1938,7 @@ __blist_add_buffer(struct journal_head **list, struct journal_head *jh)
*
* Called with j_list_lock held, and the journal may not be locked.
*
- * jbd_lock_bh_state(jh2bh(jh)) is held.
+ * jh->b_state_lock is held.
*/
static inline void
@@ -1934,7 +1970,7 @@ static void __jbd2_journal_temp_unlink_buffer(struct journal_head *jh)
transaction_t *transaction;
struct buffer_head *bh = jh2bh(jh);
- J_ASSERT_JH(jh, jbd_is_locked_bh_state(bh));
+ lockdep_assert_held(&jh->b_state_lock);
transaction = jh->b_transaction;
if (transaction)
assert_spin_locked(&transaction->t_journal->j_list_lock);
@@ -1971,17 +2007,15 @@ static void __jbd2_journal_temp_unlink_buffer(struct journal_head *jh)
}
/*
- * Remove buffer from all transactions.
+ * Remove buffer from all transactions. The caller is responsible for dropping
+ * the jh reference that belonged to the transaction.
*
* Called with bh_state lock and j_list_lock
- *
- * jh and bh may be already freed when this function returns.
*/
static void __jbd2_journal_unfile_buffer(struct journal_head *jh)
{
__jbd2_journal_temp_unlink_buffer(jh);
jh->b_transaction = NULL;
- jbd2_journal_put_journal_head(jh);
}
void jbd2_journal_unfile_buffer(journal_t *journal, struct journal_head *jh)
@@ -1990,18 +2024,19 @@ void jbd2_journal_unfile_buffer(journal_t *journal, struct journal_head *jh)
/* Get reference so that buffer cannot be freed before we unlock it */
get_bh(bh);
- jbd_lock_bh_state(bh);
+ spin_lock(&jh->b_state_lock);
spin_lock(&journal->j_list_lock);
__jbd2_journal_unfile_buffer(jh);
spin_unlock(&journal->j_list_lock);
- jbd_unlock_bh_state(bh);
+ spin_unlock(&jh->b_state_lock);
+ jbd2_journal_put_journal_head(jh);
__brelse(bh);
}
/*
* Called from jbd2_journal_try_to_free_buffers().
*
- * Called under jbd_lock_bh_state(bh)
+ * Called under jh->b_state_lock
*/
static void
__journal_try_to_free_buffer(journal_t *journal, struct buffer_head *bh)
@@ -2088,10 +2123,10 @@ int jbd2_journal_try_to_free_buffers(journal_t *journal,
if (!jh)
continue;
- jbd_lock_bh_state(bh);
+ spin_lock(&jh->b_state_lock);
__journal_try_to_free_buffer(journal, bh);
+ spin_unlock(&jh->b_state_lock);
jbd2_journal_put_journal_head(jh);
- jbd_unlock_bh_state(bh);
if (buffer_jbd(bh))
goto busy;
} while ((bh = bh->b_this_page) != head);
@@ -2112,7 +2147,7 @@ busy:
*
* Called under j_list_lock.
*
- * Called under jbd_lock_bh_state(bh).
+ * Called under jh->b_state_lock.
*/
static int __dispose_buffer(struct journal_head *jh, transaction_t *transaction)
{
@@ -2133,6 +2168,7 @@ static int __dispose_buffer(struct journal_head *jh, transaction_t *transaction)
} else {
JBUFFER_TRACE(jh, "on running transaction");
__jbd2_journal_unfile_buffer(jh);
+ jbd2_journal_put_journal_head(jh);
}
return may_free;
}
@@ -2199,18 +2235,15 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh,
* holding the page lock. --sct
*/
- if (!buffer_jbd(bh))
+ jh = jbd2_journal_grab_journal_head(bh);
+ if (!jh)
goto zap_buffer_unlocked;
/* OK, we have data buffer in journaled mode */
write_lock(&journal->j_state_lock);
- jbd_lock_bh_state(bh);
+ spin_lock(&jh->b_state_lock);
spin_lock(&journal->j_list_lock);
- jh = jbd2_journal_grab_journal_head(bh);
- if (!jh)
- goto zap_buffer_no_jh;
-
/*
* We cannot remove the buffer from checkpoint lists until the
* transaction adding inode to orphan list (let's call it T)
@@ -2289,10 +2322,10 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh,
* for commit and try again.
*/
if (partial_page) {
- jbd2_journal_put_journal_head(jh);
spin_unlock(&journal->j_list_lock);
- jbd_unlock_bh_state(bh);
+ spin_unlock(&jh->b_state_lock);
write_unlock(&journal->j_state_lock);
+ jbd2_journal_put_journal_head(jh);
return -EBUSY;
}
/*
@@ -2304,10 +2337,10 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh,
set_buffer_freed(bh);
if (journal->j_running_transaction && buffer_jbddirty(bh))
jh->b_next_transaction = journal->j_running_transaction;
- jbd2_journal_put_journal_head(jh);
spin_unlock(&journal->j_list_lock);
- jbd_unlock_bh_state(bh);
+ spin_unlock(&jh->b_state_lock);
write_unlock(&journal->j_state_lock);
+ jbd2_journal_put_journal_head(jh);
return 0;
} else {
/* Good, the buffer belongs to the running transaction.
@@ -2331,11 +2364,10 @@ zap_buffer:
* here.
*/
jh->b_modified = 0;
- jbd2_journal_put_journal_head(jh);
-zap_buffer_no_jh:
spin_unlock(&journal->j_list_lock);
- jbd_unlock_bh_state(bh);
+ spin_unlock(&jh->b_state_lock);
write_unlock(&journal->j_state_lock);
+ jbd2_journal_put_journal_head(jh);
zap_buffer_unlocked:
clear_buffer_dirty(bh);
J_ASSERT_BH(bh, !buffer_jbddirty(bh));
@@ -2422,7 +2454,7 @@ void __jbd2_journal_file_buffer(struct journal_head *jh,
int was_dirty = 0;
struct buffer_head *bh = jh2bh(jh);
- J_ASSERT_JH(jh, jbd_is_locked_bh_state(bh));
+ lockdep_assert_held(&jh->b_state_lock);
assert_spin_locked(&transaction->t_journal->j_list_lock);
J_ASSERT_JH(jh, jh->b_jlist < BJ_Types);
@@ -2484,11 +2516,11 @@ void __jbd2_journal_file_buffer(struct journal_head *jh,
void jbd2_journal_file_buffer(struct journal_head *jh,
transaction_t *transaction, int jlist)
{
- jbd_lock_bh_state(jh2bh(jh));
+ spin_lock(&jh->b_state_lock);
spin_lock(&transaction->t_journal->j_list_lock);
__jbd2_journal_file_buffer(jh, transaction, jlist);
spin_unlock(&transaction->t_journal->j_list_lock);
- jbd_unlock_bh_state(jh2bh(jh));
+ spin_unlock(&jh->b_state_lock);
}
/*
@@ -2498,23 +2530,25 @@ void jbd2_journal_file_buffer(struct journal_head *jh,
* buffer on that transaction's metadata list.
*
* Called under j_list_lock
- * Called under jbd_lock_bh_state(jh2bh(jh))
+ * Called under jh->b_state_lock
*
- * jh and bh may be already free when this function returns
+ * When this function returns true, there's no next transaction to refile to
+ * and the caller has to drop jh reference through
+ * jbd2_journal_put_journal_head().
*/
-void __jbd2_journal_refile_buffer(struct journal_head *jh)
+bool __jbd2_journal_refile_buffer(struct journal_head *jh)
{
int was_dirty, jlist;
struct buffer_head *bh = jh2bh(jh);
- J_ASSERT_JH(jh, jbd_is_locked_bh_state(bh));
+ lockdep_assert_held(&jh->b_state_lock);
if (jh->b_transaction)
assert_spin_locked(&jh->b_transaction->t_journal->j_list_lock);
/* If the buffer is now unused, just drop it. */
if (jh->b_next_transaction == NULL) {
__jbd2_journal_unfile_buffer(jh);
- return;
+ return true;
}
/*
@@ -2542,6 +2576,7 @@ void __jbd2_journal_refile_buffer(struct journal_head *jh)
if (was_dirty)
set_buffer_jbddirty(bh);
+ return false;
}
/*
@@ -2552,16 +2587,15 @@ void __jbd2_journal_refile_buffer(struct journal_head *jh)
*/
void jbd2_journal_refile_buffer(journal_t *journal, struct journal_head *jh)
{
- struct buffer_head *bh = jh2bh(jh);
+ bool drop;
- /* Get reference so that buffer cannot be freed before we unlock it */
- get_bh(bh);
- jbd_lock_bh_state(bh);
+ spin_lock(&jh->b_state_lock);
spin_lock(&journal->j_list_lock);
- __jbd2_journal_refile_buffer(jh);
- jbd_unlock_bh_state(bh);
+ drop = __jbd2_journal_refile_buffer(jh);
+ spin_unlock(&jh->b_state_lock);
spin_unlock(&journal->j_list_lock);
- __brelse(bh);
+ if (drop)
+ jbd2_journal_put_journal_head(jh);
}
/*
diff --git a/fs/jffs2/nodelist.c b/fs/jffs2/nodelist.c
index 021a4a2190ee..b86c78d178c6 100644
--- a/fs/jffs2/nodelist.c
+++ b/fs/jffs2/nodelist.c
@@ -226,7 +226,7 @@ static int jffs2_add_frag_to_fragtree(struct jffs2_sb_info *c, struct rb_root *r
lastend = this->ofs + this->size;
} else {
dbg_fragtree2("lookup gave no frag\n");
- return -EINVAL;
+ lastend = 0;
}
/* See if we ran off the end of the fragtree */
diff --git a/fs/namei.c b/fs/namei.c
index 671c3c1a3425..2dda552bcf7a 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -925,7 +925,7 @@ static inline int may_follow_link(struct nameidata *nd)
return -ECHILD;
audit_inode(nd->name, nd->stack[0].link.dentry, 0);
- audit_log_link_denied("follow_link");
+ audit_log_path_denied(AUDIT_ANOM_LINK, "follow_link");
return -EACCES;
}
@@ -993,7 +993,7 @@ static int may_linkat(struct path *link)
if (safe_hardlink_source(inode) || inode_owner_or_capable(inode))
return 0;
- audit_log_link_denied("linkat");
+ audit_log_path_denied(AUDIT_ANOM_LINK, "linkat");
return -EPERM;
}
@@ -1031,6 +1031,10 @@ static int may_create_in_sticky(struct dentry * const dir,
(dir->d_inode->i_mode & 0020 &&
((sysctl_protected_fifos >= 2 && S_ISFIFO(inode->i_mode)) ||
(sysctl_protected_regular >= 2 && S_ISREG(inode->i_mode))))) {
+ const char *operation = S_ISFIFO(inode->i_mode) ?
+ "sticky_create_fifo" :
+ "sticky_create_regular";
+ audit_log_path_denied(AUDIT_ANOM_CREAT, operation);
return -EACCES;
}
return 0;
diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c
index 91b9dac6b2cc..4ba73dbf3e8d 100644
--- a/fs/nilfs2/ioctl.c
+++ b/fs/nilfs2/ioctl.c
@@ -1354,6 +1354,7 @@ long nilfs_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
case NILFS_IOCTL_SYNC:
case NILFS_IOCTL_RESIZE:
case NILFS_IOCTL_SET_ALLOC_RANGE:
+ case FITRIM:
break;
default:
return -ENOIOCTLCMD;
diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
index 8508ab575017..0aa362b88550 100644
--- a/fs/notify/fanotify/fanotify_user.c
+++ b/fs/notify/fanotify/fanotify_user.c
@@ -523,7 +523,7 @@ static const struct file_operations fanotify_fops = {
.fasync = NULL,
.release = fanotify_release,
.unlocked_ioctl = fanotify_ioctl,
- .compat_ioctl = fanotify_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.llseek = noop_llseek,
};
diff --git a/fs/notify/fdinfo.c b/fs/notify/fdinfo.c
index 1e2bfd26b352..ef83f4020554 100644
--- a/fs/notify/fdinfo.c
+++ b/fs/notify/fdinfo.c
@@ -50,7 +50,7 @@ static void show_mark_fhandle(struct seq_file *m, struct inode *inode)
f.handle.handle_bytes = sizeof(f.pad);
size = f.handle.handle_bytes >> 2;
- ret = exportfs_encode_inode_fh(inode, (struct fid *)f.handle.f_handle, &size, 0);
+ ret = exportfs_encode_inode_fh(inode, (struct fid *)f.handle.f_handle, &size, NULL);
if ((ret == FILEID_INVALID) || (ret < 0)) {
WARN_ONCE(1, "Can't encode file handler for inotify: %d\n", ret);
return;
diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c
index 2ecef6155fc0..3e77b728a22b 100644
--- a/fs/notify/fsnotify.c
+++ b/fs/notify/fsnotify.c
@@ -381,8 +381,6 @@ out:
}
EXPORT_SYMBOL_GPL(fsnotify);
-extern struct kmem_cache *fsnotify_mark_connector_cachep;
-
static __init int fsnotify_init(void)
{
int ret;
diff --git a/fs/notify/fsnotify.h b/fs/notify/fsnotify.h
index f3462828a0e2..ff2063ec6b0f 100644
--- a/fs/notify/fsnotify.h
+++ b/fs/notify/fsnotify.h
@@ -65,4 +65,6 @@ extern void __fsnotify_update_child_dentry_flags(struct inode *inode);
extern struct fsnotify_event_holder *fsnotify_alloc_event_holder(void);
extern void fsnotify_destroy_event_holder(struct fsnotify_event_holder *holder);
+extern struct kmem_cache *fsnotify_mark_connector_cachep;
+
#endif /* __FS_NOTIFY_FSNOTIFY_H_ */
diff --git a/fs/ocfs2/acl.c b/fs/ocfs2/acl.c
index 3e7da392aa6f..bb981ec76456 100644
--- a/fs/ocfs2/acl.c
+++ b/fs/ocfs2/acl.c
@@ -327,8 +327,8 @@ int ocfs2_acl_chmod(struct inode *inode, struct buffer_head *bh)
down_read(&OCFS2_I(inode)->ip_xattr_sem);
acl = ocfs2_get_acl_nolock(inode, ACL_TYPE_ACCESS, bh);
up_read(&OCFS2_I(inode)->ip_xattr_sem);
- if (IS_ERR(acl) || !acl)
- return PTR_ERR(acl);
+ if (IS_ERR_OR_NULL(acl))
+ return PTR_ERR_OR_ZERO(acl);
ret = __posix_acl_chmod(&acl, GFP_KERNEL, inode->i_mode);
if (ret)
return ret;
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
index f9baefc76cf9..88534eb0e7c2 100644
--- a/fs/ocfs2/alloc.c
+++ b/fs/ocfs2/alloc.c
@@ -2288,9 +2288,9 @@ static int ocfs2_extend_rotate_transaction(handle_t *handle, int subtree_depth,
int ret = 0;
int credits = (path->p_tree_depth - subtree_depth) * 2 + 1 + op_credits;
- if (handle->h_buffer_credits < credits)
+ if (jbd2_handle_buffer_credits(handle) < credits)
ret = ocfs2_extend_trans(handle,
- credits - handle->h_buffer_credits);
+ credits - jbd2_handle_buffer_credits(handle));
return ret;
}
@@ -2367,7 +2367,7 @@ static int ocfs2_rotate_tree_right(handle_t *handle,
struct ocfs2_path *right_path,
struct ocfs2_path **ret_left_path)
{
- int ret, start, orig_credits = handle->h_buffer_credits;
+ int ret, start, orig_credits = jbd2_handle_buffer_credits(handle);
u32 cpos;
struct ocfs2_path *left_path = NULL;
struct super_block *sb = ocfs2_metadata_cache_get_super(et->et_ci);
@@ -3148,7 +3148,7 @@ static int ocfs2_rotate_tree_left(handle_t *handle,
struct ocfs2_path *path,
struct ocfs2_cached_dealloc_ctxt *dealloc)
{
- int ret, orig_credits = handle->h_buffer_credits;
+ int ret, orig_credits = jbd2_handle_buffer_credits(handle);
struct ocfs2_path *tmp_path = NULL, *restart_path = NULL;
struct ocfs2_extent_block *eb;
struct ocfs2_extent_list *el;
@@ -3386,8 +3386,8 @@ static int ocfs2_merge_rec_right(struct ocfs2_path *left_path,
right_path);
ret = ocfs2_extend_rotate_transaction(handle, subtree_index,
- handle->h_buffer_credits,
- right_path);
+ jbd2_handle_buffer_credits(handle),
+ right_path);
if (ret) {
mlog_errno(ret);
goto out;
@@ -3548,8 +3548,8 @@ static int ocfs2_merge_rec_left(struct ocfs2_path *right_path,
right_path);
ret = ocfs2_extend_rotate_transaction(handle, subtree_index,
- handle->h_buffer_credits,
- left_path);
+ jbd2_handle_buffer_credits(handle),
+ left_path);
if (ret) {
mlog_errno(ret);
goto out;
@@ -3623,7 +3623,7 @@ static int ocfs2_merge_rec_left(struct ocfs2_path *right_path,
le16_to_cpu(el->l_next_free_rec) == 1) {
/* extend credit for ocfs2_remove_rightmost_path */
ret = ocfs2_extend_rotate_transaction(handle, 0,
- handle->h_buffer_credits,
+ jbd2_handle_buffer_credits(handle),
right_path);
if (ret) {
mlog_errno(ret);
@@ -3669,7 +3669,7 @@ static int ocfs2_try_to_merge_extent(handle_t *handle,
if (ctxt->c_split_covers_rec && ctxt->c_has_empty_extent) {
/* extend credit for ocfs2_remove_rightmost_path */
ret = ocfs2_extend_rotate_transaction(handle, 0,
- handle->h_buffer_credits,
+ jbd2_handle_buffer_credits(handle),
path);
if (ret) {
mlog_errno(ret);
@@ -3725,7 +3725,7 @@ static int ocfs2_try_to_merge_extent(handle_t *handle,
/* extend credit for ocfs2_remove_rightmost_path */
ret = ocfs2_extend_rotate_transaction(handle, 0,
- handle->h_buffer_credits,
+ jbd2_handle_buffer_credits(handle),
path);
if (ret) {
mlog_errno(ret);
@@ -3755,7 +3755,7 @@ static int ocfs2_try_to_merge_extent(handle_t *handle,
/* extend credit for ocfs2_remove_rightmost_path */
ret = ocfs2_extend_rotate_transaction(handle, 0,
- handle->h_buffer_credits,
+ jbd2_handle_buffer_credits(handle),
path);
if (ret) {
mlog_errno(ret);
@@ -3799,7 +3799,7 @@ static int ocfs2_try_to_merge_extent(handle_t *handle,
if (ctxt->c_split_covers_rec) {
/* extend credit for ocfs2_remove_rightmost_path */
ret = ocfs2_extend_rotate_transaction(handle, 0,
- handle->h_buffer_credits,
+ jbd2_handle_buffer_credits(handle),
path);
if (ret) {
mlog_errno(ret);
@@ -5358,7 +5358,7 @@ static int ocfs2_truncate_rec(handle_t *handle,
if (ocfs2_is_empty_extent(&el->l_recs[0]) && index > 0) {
/* extend credit for ocfs2_remove_rightmost_path */
ret = ocfs2_extend_rotate_transaction(handle, 0,
- handle->h_buffer_credits,
+ jbd2_handle_buffer_credits(handle),
path);
if (ret) {
mlog_errno(ret);
@@ -5427,8 +5427,8 @@ static int ocfs2_truncate_rec(handle_t *handle,
}
ret = ocfs2_extend_rotate_transaction(handle, 0,
- handle->h_buffer_credits,
- path);
+ jbd2_handle_buffer_credits(handle),
+ path);
if (ret) {
mlog_errno(ret);
goto out;
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index 9cd0a6815933..3a67a6518ddf 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -11,7 +11,6 @@
#include <linux/pagemap.h>
#include <asm/byteorder.h>
#include <linux/swap.h>
-#include <linux/pipe_fs_i.h>
#include <linux/mpage.h>
#include <linux/quotaops.h>
#include <linux/blkdev.h>
diff --git a/fs/ocfs2/ioctl.c b/fs/ocfs2/ioctl.c
index efeea208fdeb..89984172fc4a 100644
--- a/fs/ocfs2/ioctl.c
+++ b/fs/ocfs2/ioctl.c
@@ -985,6 +985,7 @@ long ocfs2_compat_ioctl(struct file *file, unsigned cmd, unsigned long arg)
return -EFAULT;
return ocfs2_info_handle(inode, &info, 1);
+ case FITRIM:
case OCFS2_IOC_MOVE_EXT:
break;
default:
diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c
index 699a560efbb0..1afe57f425a0 100644
--- a/fs/ocfs2/journal.c
+++ b/fs/ocfs2/journal.c
@@ -420,14 +420,14 @@ int ocfs2_extend_trans(handle_t *handle, int nblocks)
if (!nblocks)
return 0;
- old_nblocks = handle->h_buffer_credits;
+ old_nblocks = jbd2_handle_buffer_credits(handle);
trace_ocfs2_extend_trans(old_nblocks, nblocks);
#ifdef CONFIG_OCFS2_DEBUG_FS
status = 1;
#else
- status = jbd2_journal_extend(handle, nblocks);
+ status = jbd2_journal_extend(handle, nblocks, 0);
if (status < 0) {
mlog_errno(status);
goto bail;
@@ -461,13 +461,13 @@ int ocfs2_allocate_extend_trans(handle_t *handle, int thresh)
BUG_ON(!handle);
- old_nblks = handle->h_buffer_credits;
+ old_nblks = jbd2_handle_buffer_credits(handle);
trace_ocfs2_allocate_extend_trans(old_nblks, thresh);
if (old_nblks < thresh)
return 0;
- status = jbd2_journal_extend(handle, OCFS2_MAX_TRANS_DATA);
+ status = jbd2_journal_extend(handle, OCFS2_MAX_TRANS_DATA, 0);
if (status < 0) {
mlog_errno(status);
goto bail;
diff --git a/fs/ocfs2/quota_global.c b/fs/ocfs2/quota_global.c
index 7a922190a8c7..eda83487c9ec 100644
--- a/fs/ocfs2/quota_global.c
+++ b/fs/ocfs2/quota_global.c
@@ -728,7 +728,7 @@ static int ocfs2_release_dquot(struct dquot *dquot)
mutex_lock(&dquot->dq_lock);
/* Check whether we are not racing with some other dqget() */
- if (atomic_read(&dquot->dq_count) > 1)
+ if (dquot_is_busy(dquot))
goto out;
/* Running from downconvert thread? Postpone quota processing to wq */
if (current == osb->dc_task) {
diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
index 69c21a3843af..4180c3ef0a68 100644
--- a/fs/ocfs2/suballoc.c
+++ b/fs/ocfs2/suballoc.c
@@ -1252,6 +1252,7 @@ static int ocfs2_test_bg_bit_allocatable(struct buffer_head *bg_bh,
int nr)
{
struct ocfs2_group_desc *bg = (struct ocfs2_group_desc *) bg_bh->b_data;
+ struct journal_head *jh;
int ret;
if (ocfs2_test_bit(nr, (unsigned long *)bg->bg_bitmap))
@@ -1260,13 +1261,14 @@ static int ocfs2_test_bg_bit_allocatable(struct buffer_head *bg_bh,
if (!buffer_jbd(bg_bh))
return 1;
- jbd_lock_bh_state(bg_bh);
- bg = (struct ocfs2_group_desc *) bh2jh(bg_bh)->b_committed_data;
+ jh = bh2jh(bg_bh);
+ spin_lock(&jh->b_state_lock);
+ bg = (struct ocfs2_group_desc *) jh->b_committed_data;
if (bg)
ret = !ocfs2_test_bit(nr, (unsigned long *)bg->bg_bitmap);
else
ret = 1;
- jbd_unlock_bh_state(bg_bh);
+ spin_unlock(&jh->b_state_lock);
return ret;
}
@@ -2387,6 +2389,7 @@ static int ocfs2_block_group_clear_bits(handle_t *handle,
int status;
unsigned int tmp;
struct ocfs2_group_desc *undo_bg = NULL;
+ struct journal_head *jh;
/* The caller got this descriptor from
* ocfs2_read_group_descriptor(). Any corruption is a code bug. */
@@ -2405,10 +2408,10 @@ static int ocfs2_block_group_clear_bits(handle_t *handle,
goto bail;
}
+ jh = bh2jh(group_bh);
if (undo_fn) {
- jbd_lock_bh_state(group_bh);
- undo_bg = (struct ocfs2_group_desc *)
- bh2jh(group_bh)->b_committed_data;
+ spin_lock(&jh->b_state_lock);
+ undo_bg = (struct ocfs2_group_desc *) jh->b_committed_data;
BUG_ON(!undo_bg);
}
@@ -2423,7 +2426,7 @@ static int ocfs2_block_group_clear_bits(handle_t *handle,
le16_add_cpu(&bg->bg_free_bits_count, num_bits);
if (le16_to_cpu(bg->bg_free_bits_count) > le16_to_cpu(bg->bg_bits)) {
if (undo_fn)
- jbd_unlock_bh_state(group_bh);
+ spin_unlock(&jh->b_state_lock);
return ocfs2_error(alloc_inode->i_sb, "Group descriptor # %llu has bit count %u but claims %u are freed. num_bits %d\n",
(unsigned long long)le64_to_cpu(bg->bg_blkno),
le16_to_cpu(bg->bg_bits),
@@ -2432,7 +2435,7 @@ static int ocfs2_block_group_clear_bits(handle_t *handle,
}
if (undo_fn)
- jbd_unlock_bh_state(group_bh);
+ spin_unlock(&jh->b_state_lock);
ocfs2_journal_dirty(handle, group_bh);
bail:
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index c81e86c62380..05dd68ade293 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -926,8 +926,8 @@ static int ocfs2_enable_quotas(struct ocfs2_super *osb)
status = -ENOENT;
goto out_quota_off;
}
- status = dquot_enable(inode[type], type, QFMT_OCFS2,
- DQUOT_USAGE_ENABLED);
+ status = dquot_load_quota_inode(inode[type], type, QFMT_OCFS2,
+ DQUOT_USAGE_ENABLED);
if (status < 0)
goto out_quota_off;
}
diff --git a/fs/pipe.c b/fs/pipe.c
index a9149199e0e7..648ce440ca85 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -43,10 +43,12 @@ unsigned long pipe_user_pages_hard;
unsigned long pipe_user_pages_soft = PIPE_DEF_BUFFERS * INR_OPEN_CUR;
/*
- * We use a start+len construction, which provides full use of the
- * allocated memory.
- * -- Florian Coosmann (FGC)
- *
+ * We use head and tail indices that aren't masked off, except at the point of
+ * dereference, but rather they're allowed to wrap naturally. This means there
+ * isn't a dead spot in the buffer, but the ring has to be a power of two and
+ * <= 2^31.
+ * -- David Howells 2019-09-23.
+ *
* Reads with count = 0 should always return 0.
* -- Julian Bradfield 1999-06-07.
*
@@ -285,10 +287,12 @@ pipe_read(struct kiocb *iocb, struct iov_iter *to)
ret = 0;
__pipe_lock(pipe);
for (;;) {
- int bufs = pipe->nrbufs;
- if (bufs) {
- int curbuf = pipe->curbuf;
- struct pipe_buffer *buf = pipe->bufs + curbuf;
+ unsigned int head = pipe->head;
+ unsigned int tail = pipe->tail;
+ unsigned int mask = pipe->ring_size - 1;
+
+ if (!pipe_empty(head, tail)) {
+ struct pipe_buffer *buf = &pipe->bufs[tail & mask];
size_t chars = buf->len;
size_t written;
int error;
@@ -320,18 +324,27 @@ pipe_read(struct kiocb *iocb, struct iov_iter *to)
}
if (!buf->len) {
+ bool wake;
pipe_buf_release(pipe, buf);
- curbuf = (curbuf + 1) & (pipe->buffers - 1);
- pipe->curbuf = curbuf;
- pipe->nrbufs = --bufs;
+ spin_lock_irq(&pipe->wait.lock);
+ tail++;
+ pipe->tail = tail;
do_wakeup = 1;
+ wake = head - (tail - 1) == pipe->max_usage / 2;
+ if (wake)
+ wake_up_locked_poll(
+ &pipe->wait, EPOLLOUT | EPOLLWRNORM);
+ spin_unlock_irq(&pipe->wait.lock);
+ if (wake)
+ kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
}
total_len -= chars;
if (!total_len)
break; /* common path: read succeeded */
+ if (!pipe_empty(head, tail)) /* More to do? */
+ continue;
}
- if (bufs) /* More to do? */
- continue;
+
if (!pipe->writers)
break;
if (!pipe->waiting_writers) {
@@ -352,17 +365,13 @@ pipe_read(struct kiocb *iocb, struct iov_iter *to)
ret = -ERESTARTSYS;
break;
}
- if (do_wakeup) {
- wake_up_interruptible_sync_poll(&pipe->wait, EPOLLOUT | EPOLLWRNORM);
- kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
- }
pipe_wait(pipe);
}
__pipe_unlock(pipe);
/* Signal writers asynchronously that there is more room. */
if (do_wakeup) {
- wake_up_interruptible_sync_poll(&pipe->wait, EPOLLOUT | EPOLLWRNORM);
+ wake_up_interruptible_poll(&pipe->wait, EPOLLOUT | EPOLLWRNORM);
kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
}
if (ret > 0)
@@ -380,6 +389,7 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
{
struct file *filp = iocb->ki_filp;
struct pipe_inode_info *pipe = filp->private_data;
+ unsigned int head, max_usage, mask;
ssize_t ret = 0;
int do_wakeup = 0;
size_t total_len = iov_iter_count(from);
@@ -397,12 +407,14 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
goto out;
}
+ head = pipe->head;
+ max_usage = pipe->max_usage;
+ mask = pipe->ring_size - 1;
+
/* We try to merge small writes */
chars = total_len & (PAGE_SIZE-1); /* size of the last buffer */
- if (pipe->nrbufs && chars != 0) {
- int lastbuf = (pipe->curbuf + pipe->nrbufs - 1) &
- (pipe->buffers - 1);
- struct pipe_buffer *buf = pipe->bufs + lastbuf;
+ if (!pipe_empty(head, pipe->tail) && chars != 0) {
+ struct pipe_buffer *buf = &pipe->bufs[(head - 1) & mask];
int offset = buf->offset + buf->len;
if (pipe_buf_can_merge(buf) && offset + chars <= PAGE_SIZE) {
@@ -423,18 +435,16 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
}
for (;;) {
- int bufs;
-
if (!pipe->readers) {
send_sig(SIGPIPE, current, 0);
if (!ret)
ret = -EPIPE;
break;
}
- bufs = pipe->nrbufs;
- if (bufs < pipe->buffers) {
- int newbuf = (pipe->curbuf + bufs) & (pipe->buffers-1);
- struct pipe_buffer *buf = pipe->bufs + newbuf;
+
+ head = pipe->head;
+ if (!pipe_full(head, pipe->tail, max_usage)) {
+ struct pipe_buffer *buf = &pipe->bufs[head & mask];
struct page *page = pipe->tmp_page;
int copied;
@@ -446,38 +456,64 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
}
pipe->tmp_page = page;
}
+
+ /* Allocate a slot in the ring in advance and attach an
+ * empty buffer. If we fault or otherwise fail to use
+ * it, either the reader will consume it or it'll still
+ * be there for the next write.
+ */
+ spin_lock_irq(&pipe->wait.lock);
+
+ head = pipe->head;
+ if (pipe_full(head, pipe->tail, max_usage)) {
+ spin_unlock_irq(&pipe->wait.lock);
+ continue;
+ }
+
+ pipe->head = head + 1;
+
/* Always wake up, even if the copy fails. Otherwise
* we lock up (O_NONBLOCK-)readers that sleep due to
* syscall merging.
* FIXME! Is this really true?
*/
- do_wakeup = 1;
- copied = copy_page_from_iter(page, 0, PAGE_SIZE, from);
- if (unlikely(copied < PAGE_SIZE && iov_iter_count(from))) {
- if (!ret)
- ret = -EFAULT;
- break;
- }
- ret += copied;
+ wake_up_locked_poll(
+ &pipe->wait, EPOLLIN | EPOLLRDNORM);
+
+ spin_unlock_irq(&pipe->wait.lock);
+ kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
/* Insert it into the buffer array */
+ buf = &pipe->bufs[head & mask];
buf->page = page;
buf->ops = &anon_pipe_buf_ops;
buf->offset = 0;
- buf->len = copied;
+ buf->len = 0;
buf->flags = 0;
if (is_packetized(filp)) {
buf->ops = &packet_pipe_buf_ops;
buf->flags = PIPE_BUF_FLAG_PACKET;
}
- pipe->nrbufs = ++bufs;
pipe->tmp_page = NULL;
+ copied = copy_page_from_iter(page, 0, PAGE_SIZE, from);
+ if (unlikely(copied < PAGE_SIZE && iov_iter_count(from))) {
+ if (!ret)
+ ret = -EFAULT;
+ break;
+ }
+ ret += copied;
+ buf->offset = 0;
+ buf->len = copied;
+
if (!iov_iter_count(from))
break;
}
- if (bufs < pipe->buffers)
+
+ if (!pipe_full(head, pipe->tail, max_usage))
continue;
+
+ /* Wait for buffer space to become available. */
if (filp->f_flags & O_NONBLOCK) {
if (!ret)
ret = -EAGAIN;
@@ -488,11 +524,6 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
ret = -ERESTARTSYS;
break;
}
- if (do_wakeup) {
- wake_up_interruptible_sync_poll(&pipe->wait, EPOLLIN | EPOLLRDNORM);
- kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
- do_wakeup = 0;
- }
pipe->waiting_writers++;
pipe_wait(pipe);
pipe->waiting_writers--;
@@ -500,7 +531,7 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
out:
__pipe_unlock(pipe);
if (do_wakeup) {
- wake_up_interruptible_sync_poll(&pipe->wait, EPOLLIN | EPOLLRDNORM);
+ wake_up_interruptible_poll(&pipe->wait, EPOLLIN | EPOLLRDNORM);
kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
}
if (ret > 0 && sb_start_write_trylock(file_inode(filp)->i_sb)) {
@@ -515,17 +546,19 @@ out:
static long pipe_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
struct pipe_inode_info *pipe = filp->private_data;
- int count, buf, nrbufs;
+ int count, head, tail, mask;
switch (cmd) {
case FIONREAD:
__pipe_lock(pipe);
count = 0;
- buf = pipe->curbuf;
- nrbufs = pipe->nrbufs;
- while (--nrbufs >= 0) {
- count += pipe->bufs[buf].len;
- buf = (buf+1) & (pipe->buffers - 1);
+ head = pipe->head;
+ tail = pipe->tail;
+ mask = pipe->ring_size - 1;
+
+ while (tail != head) {
+ count += pipe->bufs[tail & mask].len;
+ tail++;
}
__pipe_unlock(pipe);
@@ -541,21 +574,25 @@ pipe_poll(struct file *filp, poll_table *wait)
{
__poll_t mask;
struct pipe_inode_info *pipe = filp->private_data;
- int nrbufs;
+ unsigned int head = READ_ONCE(pipe->head);
+ unsigned int tail = READ_ONCE(pipe->tail);
poll_wait(filp, &pipe->wait, wait);
+ BUG_ON(pipe_occupancy(head, tail) > pipe->ring_size);
+
/* Reading only -- no need for acquiring the semaphore. */
- nrbufs = pipe->nrbufs;
mask = 0;
if (filp->f_mode & FMODE_READ) {
- mask = (nrbufs > 0) ? EPOLLIN | EPOLLRDNORM : 0;
+ if (!pipe_empty(head, tail))
+ mask |= EPOLLIN | EPOLLRDNORM;
if (!pipe->writers && filp->f_version != pipe->w_counter)
mask |= EPOLLHUP;
}
if (filp->f_mode & FMODE_WRITE) {
- mask |= (nrbufs < pipe->buffers) ? EPOLLOUT | EPOLLWRNORM : 0;
+ if (!pipe_full(head, tail, pipe->max_usage))
+ mask |= EPOLLOUT | EPOLLWRNORM;
/*
* Most Unices do not set EPOLLERR for FIFOs but on Linux they
* behave exactly like pipes for poll().
@@ -679,7 +716,8 @@ struct pipe_inode_info *alloc_pipe_info(void)
if (pipe->bufs) {
init_waitqueue_head(&pipe->wait);
pipe->r_counter = pipe->w_counter = 1;
- pipe->buffers = pipe_bufs;
+ pipe->max_usage = pipe_bufs;
+ pipe->ring_size = pipe_bufs;
pipe->user = user;
mutex_init(&pipe->mutex);
return pipe;
@@ -697,9 +735,9 @@ void free_pipe_info(struct pipe_inode_info *pipe)
{
int i;
- (void) account_pipe_buffers(pipe->user, pipe->buffers, 0);
+ (void) account_pipe_buffers(pipe->user, pipe->ring_size, 0);
free_uid(pipe->user);
- for (i = 0; i < pipe->buffers; i++) {
+ for (i = 0; i < pipe->ring_size; i++) {
struct pipe_buffer *buf = pipe->bufs + i;
if (buf->ops)
pipe_buf_release(pipe, buf);
@@ -882,7 +920,7 @@ SYSCALL_DEFINE1(pipe, int __user *, fildes)
static int wait_for_partner(struct pipe_inode_info *pipe, unsigned int *cnt)
{
- int cur = *cnt;
+ int cur = *cnt;
while (cur == *cnt) {
pipe_wait(pipe);
@@ -957,7 +995,7 @@ static int fifo_open(struct inode *inode, struct file *filp)
}
}
break;
-
+
case FMODE_WRITE:
/*
* O_WRONLY
@@ -977,7 +1015,7 @@ static int fifo_open(struct inode *inode, struct file *filp)
goto err_wr;
}
break;
-
+
case FMODE_READ | FMODE_WRITE:
/*
* O_RDWR
@@ -1056,14 +1094,14 @@ unsigned int round_pipe_size(unsigned long size)
static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long arg)
{
struct pipe_buffer *bufs;
- unsigned int size, nr_pages;
+ unsigned int size, nr_slots, head, tail, mask, n;
unsigned long user_bufs;
long ret = 0;
size = round_pipe_size(arg);
- nr_pages = size >> PAGE_SHIFT;
+ nr_slots = size >> PAGE_SHIFT;
- if (!nr_pages)
+ if (!nr_slots)
return -EINVAL;
/*
@@ -1073,13 +1111,13 @@ static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long arg)
* Decreasing the pipe capacity is always permitted, even
* if the user is currently over a limit.
*/
- if (nr_pages > pipe->buffers &&
+ if (nr_slots > pipe->ring_size &&
size > pipe_max_size && !capable(CAP_SYS_RESOURCE))
return -EPERM;
- user_bufs = account_pipe_buffers(pipe->user, pipe->buffers, nr_pages);
+ user_bufs = account_pipe_buffers(pipe->user, pipe->ring_size, nr_slots);
- if (nr_pages > pipe->buffers &&
+ if (nr_slots > pipe->ring_size &&
(too_many_pipe_buffers_hard(user_bufs) ||
too_many_pipe_buffers_soft(user_bufs)) &&
is_unprivileged_user()) {
@@ -1088,17 +1126,21 @@ static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long arg)
}
/*
- * We can shrink the pipe, if arg >= pipe->nrbufs. Since we don't
- * expect a lot of shrink+grow operations, just free and allocate
- * again like we would do for growing. If the pipe currently
+ * We can shrink the pipe, if arg is greater than the ring occupancy.
+ * Since we don't expect a lot of shrink+grow operations, just free and
+ * allocate again like we would do for growing. If the pipe currently
* contains more buffers than arg, then return busy.
*/
- if (nr_pages < pipe->nrbufs) {
+ mask = pipe->ring_size - 1;
+ head = pipe->head;
+ tail = pipe->tail;
+ n = pipe_occupancy(pipe->head, pipe->tail);
+ if (nr_slots < n) {
ret = -EBUSY;
goto out_revert_acct;
}
- bufs = kcalloc(nr_pages, sizeof(*bufs),
+ bufs = kcalloc(nr_slots, sizeof(*bufs),
GFP_KERNEL_ACCOUNT | __GFP_NOWARN);
if (unlikely(!bufs)) {
ret = -ENOMEM;
@@ -1107,33 +1149,37 @@ static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long arg)
/*
* The pipe array wraps around, so just start the new one at zero
- * and adjust the indexes.
+ * and adjust the indices.
*/
- if (pipe->nrbufs) {
- unsigned int tail;
- unsigned int head;
-
- tail = pipe->curbuf + pipe->nrbufs;
- if (tail < pipe->buffers)
- tail = 0;
- else
- tail &= (pipe->buffers - 1);
-
- head = pipe->nrbufs - tail;
- if (head)
- memcpy(bufs, pipe->bufs + pipe->curbuf, head * sizeof(struct pipe_buffer));
- if (tail)
- memcpy(bufs + head, pipe->bufs, tail * sizeof(struct pipe_buffer));
+ if (n > 0) {
+ unsigned int h = head & mask;
+ unsigned int t = tail & mask;
+ if (h > t) {
+ memcpy(bufs, pipe->bufs + t,
+ n * sizeof(struct pipe_buffer));
+ } else {
+ unsigned int tsize = pipe->ring_size - t;
+ if (h > 0)
+ memcpy(bufs + tsize, pipe->bufs,
+ h * sizeof(struct pipe_buffer));
+ memcpy(bufs, pipe->bufs + t,
+ tsize * sizeof(struct pipe_buffer));
+ }
}
- pipe->curbuf = 0;
+ head = n;
+ tail = 0;
+
kfree(pipe->bufs);
pipe->bufs = bufs;
- pipe->buffers = nr_pages;
- return nr_pages * PAGE_SIZE;
+ pipe->ring_size = nr_slots;
+ pipe->max_usage = nr_slots;
+ pipe->tail = tail;
+ pipe->head = head;
+ return pipe->max_usage * PAGE_SIZE;
out_revert_acct:
- (void) account_pipe_buffers(pipe->user, nr_pages, pipe->buffers);
+ (void) account_pipe_buffers(pipe->user, nr_slots, pipe->ring_size);
return ret;
}
@@ -1163,7 +1209,7 @@ long pipe_fcntl(struct file *file, unsigned int cmd, unsigned long arg)
ret = pipe_set_size(pipe, arg);
break;
case F_GETPIPE_SZ:
- ret = pipe->buffers * PAGE_SIZE;
+ ret = pipe->max_usage * PAGE_SIZE;
break;
default:
ret = -EINVAL;
diff --git a/fs/proc/array.c b/fs/proc/array.c
index 46dcb6f0eccf..5efaf3708ec6 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -533,7 +533,7 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
nice = task_nice(task);
/* convert nsec -> ticks */
- start_time = nsec_to_clock_t(task->real_start_time);
+ start_time = nsec_to_clock_t(task->start_boottime);
seq_put_decimal_ull(m, "", pid_nr_ns(pid, ns));
seq_puts(m, " (");
diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c
index 3d7024662d29..d896457e7c11 100644
--- a/fs/pstore/platform.c
+++ b/fs/pstore/platform.c
@@ -793,7 +793,7 @@ static void pstore_timefunc(struct timer_list *unused)
jiffies + msecs_to_jiffies(pstore_update_ms));
}
-void __init pstore_choose_compression(void)
+static void __init pstore_choose_compression(void)
{
const struct pstore_zbackend *step;
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index 6e826b454082..4639d53e96a3 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -497,7 +497,7 @@ int dquot_release(struct dquot *dquot)
mutex_lock(&dquot->dq_lock);
/* Check whether we are not racing with some other dqget() */
- if (atomic_read(&dquot->dq_count) > 1)
+ if (dquot_is_busy(dquot))
goto out_dqlock;
if (dqopt->ops[dquot->dq_id.type]->release_dqblk) {
ret = dqopt->ops[dquot->dq_id.type]->release_dqblk(dquot);
@@ -595,7 +595,6 @@ int dquot_scan_active(struct super_block *sb,
/* Now we have active dquot so we can just increase use count */
atomic_inc(&dquot->dq_count);
spin_unlock(&dq_list_lock);
- dqstats_inc(DQST_LOOKUPS);
dqput(old_dquot);
old_dquot = dquot;
/*
@@ -623,7 +622,7 @@ EXPORT_SYMBOL(dquot_scan_active);
/* Write all dquot structures to quota files */
int dquot_writeback_dquots(struct super_block *sb, int type)
{
- struct list_head *dirty;
+ struct list_head dirty;
struct dquot *dquot;
struct quota_info *dqopt = sb_dqopt(sb);
int cnt;
@@ -637,9 +636,10 @@ int dquot_writeback_dquots(struct super_block *sb, int type)
if (!sb_has_quota_active(sb, cnt))
continue;
spin_lock(&dq_list_lock);
- dirty = &dqopt->info[cnt].dqi_dirty_list;
- while (!list_empty(dirty)) {
- dquot = list_first_entry(dirty, struct dquot,
+ /* Move list away to avoid livelock. */
+ list_replace_init(&dqopt->info[cnt].dqi_dirty_list, &dirty);
+ while (!list_empty(&dirty)) {
+ dquot = list_first_entry(&dirty, struct dquot,
dq_dirty);
WARN_ON(!test_bit(DQ_ACTIVE_B, &dquot->dq_flags));
@@ -649,7 +649,6 @@ int dquot_writeback_dquots(struct super_block *sb, int type)
* use count */
dqgrab(dquot);
spin_unlock(&dq_list_lock);
- dqstats_inc(DQST_LOOKUPS);
err = sb->dq_op->write_dquot(dquot);
if (err) {
/*
@@ -2162,14 +2161,29 @@ int dquot_file_open(struct inode *inode, struct file *file)
}
EXPORT_SYMBOL(dquot_file_open);
+static void vfs_cleanup_quota_inode(struct super_block *sb, int type)
+{
+ struct quota_info *dqopt = sb_dqopt(sb);
+ struct inode *inode = dqopt->files[type];
+
+ if (!inode)
+ return;
+ if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) {
+ inode_lock(inode);
+ inode->i_flags &= ~S_NOQUOTA;
+ inode_unlock(inode);
+ }
+ dqopt->files[type] = NULL;
+ iput(inode);
+}
+
/*
* Turn quota off on a device. type == -1 ==> quotaoff for all types (umount)
*/
int dquot_disable(struct super_block *sb, int type, unsigned int flags)
{
- int cnt, ret = 0;
+ int cnt;
struct quota_info *dqopt = sb_dqopt(sb);
- struct inode *toputinode[MAXQUOTAS];
/* s_umount should be held in exclusive mode */
if (WARN_ON_ONCE(down_read_trylock(&sb->s_umount)))
@@ -2191,7 +2205,6 @@ int dquot_disable(struct super_block *sb, int type, unsigned int flags)
return 0;
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
- toputinode[cnt] = NULL;
if (type != -1 && cnt != type)
continue;
if (!sb_has_quota_loaded(sb, cnt))
@@ -2211,8 +2224,7 @@ int dquot_disable(struct super_block *sb, int type, unsigned int flags)
dqopt->flags &= ~dquot_state_flag(
DQUOT_SUSPENDED, cnt);
spin_unlock(&dq_state_lock);
- iput(dqopt->files[cnt]);
- dqopt->files[cnt] = NULL;
+ vfs_cleanup_quota_inode(sb, cnt);
continue;
}
spin_unlock(&dq_state_lock);
@@ -2234,10 +2246,6 @@ int dquot_disable(struct super_block *sb, int type, unsigned int flags)
if (dqopt->ops[cnt]->free_file_info)
dqopt->ops[cnt]->free_file_info(sb, cnt);
put_quota_format(dqopt->info[cnt].dqi_format);
-
- toputinode[cnt] = dqopt->files[cnt];
- if (!sb_has_quota_loaded(sb, cnt))
- dqopt->files[cnt] = NULL;
dqopt->info[cnt].dqi_flags = 0;
dqopt->info[cnt].dqi_igrace = 0;
dqopt->info[cnt].dqi_bgrace = 0;
@@ -2259,32 +2267,22 @@ int dquot_disable(struct super_block *sb, int type, unsigned int flags)
* must also discard the blockdev buffers so that we see the
* changes done by userspace on the next quotaon() */
for (cnt = 0; cnt < MAXQUOTAS; cnt++)
- /* This can happen when suspending quotas on remount-ro... */
- if (toputinode[cnt] && !sb_has_quota_loaded(sb, cnt)) {
- inode_lock(toputinode[cnt]);
- toputinode[cnt]->i_flags &= ~S_NOQUOTA;
- truncate_inode_pages(&toputinode[cnt]->i_data, 0);
- inode_unlock(toputinode[cnt]);
- mark_inode_dirty_sync(toputinode[cnt]);
+ if (!sb_has_quota_loaded(sb, cnt) && dqopt->files[cnt]) {
+ inode_lock(dqopt->files[cnt]);
+ truncate_inode_pages(&dqopt->files[cnt]->i_data, 0);
+ inode_unlock(dqopt->files[cnt]);
}
if (sb->s_bdev)
invalidate_bdev(sb->s_bdev);
put_inodes:
+ /* We are done when suspending quotas */
+ if (flags & DQUOT_SUSPENDED)
+ return 0;
+
for (cnt = 0; cnt < MAXQUOTAS; cnt++)
- if (toputinode[cnt]) {
- /* On remount RO, we keep the inode pointer so that we
- * can reenable quota on the subsequent remount RW. We
- * have to check 'flags' variable and not use sb_has_
- * function because another quotaon / quotaoff could
- * change global state before we got here. We refuse
- * to suspend quotas when there is pending delete on
- * the quota file... */
- if (!(flags & DQUOT_SUSPENDED))
- iput(toputinode[cnt]);
- else if (!toputinode[cnt]->i_nlink)
- ret = -EBUSY;
- }
- return ret;
+ if (!sb_has_quota_loaded(sb, cnt))
+ vfs_cleanup_quota_inode(sb, cnt);
+ return 0;
}
EXPORT_SYMBOL(dquot_disable);
@@ -2299,28 +2297,52 @@ EXPORT_SYMBOL(dquot_quota_off);
* Turn quotas on on a device
*/
-/*
- * Helper function to turn quotas on when we already have the inode of
- * quota file and no quota information is loaded.
- */
-static int vfs_load_quota_inode(struct inode *inode, int type, int format_id,
+static int vfs_setup_quota_inode(struct inode *inode, int type)
+{
+ struct super_block *sb = inode->i_sb;
+ struct quota_info *dqopt = sb_dqopt(sb);
+
+ if (!S_ISREG(inode->i_mode))
+ return -EACCES;
+ if (IS_RDONLY(inode))
+ return -EROFS;
+ if (sb_has_quota_loaded(sb, type))
+ return -EBUSY;
+
+ dqopt->files[type] = igrab(inode);
+ if (!dqopt->files[type])
+ return -EIO;
+ if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) {
+ /* We don't want quota and atime on quota files (deadlocks
+ * possible) Also nobody should write to the file - we use
+ * special IO operations which ignore the immutable bit. */
+ inode_lock(inode);
+ inode->i_flags |= S_NOQUOTA;
+ inode_unlock(inode);
+ /*
+ * When S_NOQUOTA is set, remove dquot references as no more
+ * references can be added
+ */
+ __dquot_drop(inode);
+ }
+ return 0;
+}
+
+int dquot_load_quota_sb(struct super_block *sb, int type, int format_id,
unsigned int flags)
{
struct quota_format_type *fmt = find_quota_format(format_id);
- struct super_block *sb = inode->i_sb;
struct quota_info *dqopt = sb_dqopt(sb);
int error;
+ /* Just unsuspend quotas? */
+ BUG_ON(flags & DQUOT_SUSPENDED);
+ /* s_umount should be held in exclusive mode */
+ if (WARN_ON_ONCE(down_read_trylock(&sb->s_umount)))
+ up_read(&sb->s_umount);
+
if (!fmt)
return -ESRCH;
- if (!S_ISREG(inode->i_mode)) {
- error = -EACCES;
- goto out_fmt;
- }
- if (IS_RDONLY(inode)) {
- error = -EROFS;
- goto out_fmt;
- }
if (!sb->s_op->quota_write || !sb->s_op->quota_read ||
(type == PRJQUOTA && sb->dq_op->get_projid == NULL)) {
error = -EINVAL;
@@ -2352,27 +2374,9 @@ static int vfs_load_quota_inode(struct inode *inode, int type, int format_id,
invalidate_bdev(sb->s_bdev);
}
- if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) {
- /* We don't want quota and atime on quota files (deadlocks
- * possible) Also nobody should write to the file - we use
- * special IO operations which ignore the immutable bit. */
- inode_lock(inode);
- inode->i_flags |= S_NOQUOTA;
- inode_unlock(inode);
- /*
- * When S_NOQUOTA is set, remove dquot references as no more
- * references can be added
- */
- __dquot_drop(inode);
- }
-
- error = -EIO;
- dqopt->files[type] = igrab(inode);
- if (!dqopt->files[type])
- goto out_file_flags;
error = -EINVAL;
if (!fmt->qf_ops->check_quota_file(sb, type))
- goto out_file_init;
+ goto out_fmt;
dqopt->ops[type] = fmt->qf_ops;
dqopt->info[type].dqi_format = fmt;
@@ -2380,7 +2384,7 @@ static int vfs_load_quota_inode(struct inode *inode, int type, int format_id,
INIT_LIST_HEAD(&dqopt->info[type].dqi_dirty_list);
error = dqopt->ops[type]->read_file_info(sb, type);
if (error < 0)
- goto out_file_init;
+ goto out_fmt;
if (dqopt->flags & DQUOT_QUOTA_SYS_FILE) {
spin_lock(&dq_data_lock);
dqopt->info[type].dqi_flags |= DQF_SYS_FILE;
@@ -2395,24 +2399,36 @@ static int vfs_load_quota_inode(struct inode *inode, int type, int format_id,
dquot_disable(sb, type, flags);
return error;
-out_file_init:
- dqopt->files[type] = NULL;
- iput(inode);
-out_file_flags:
- inode_lock(inode);
- inode->i_flags &= ~S_NOQUOTA;
- inode_unlock(inode);
out_fmt:
put_quota_format(fmt);
return error;
}
+EXPORT_SYMBOL(dquot_load_quota_sb);
+
+/*
+ * More powerful function for turning on quotas on given quota inode allowing
+ * setting of individual quota flags
+ */
+int dquot_load_quota_inode(struct inode *inode, int type, int format_id,
+ unsigned int flags)
+{
+ int err;
+
+ err = vfs_setup_quota_inode(inode, type);
+ if (err < 0)
+ return err;
+ err = dquot_load_quota_sb(inode->i_sb, type, format_id, flags);
+ if (err < 0)
+ vfs_cleanup_quota_inode(inode->i_sb, type);
+ return err;
+}
+EXPORT_SYMBOL(dquot_load_quota_inode);
/* Reenable quotas on remount RW */
int dquot_resume(struct super_block *sb, int type)
{
struct quota_info *dqopt = sb_dqopt(sb);
- struct inode *inode;
int ret = 0, cnt;
unsigned int flags;
@@ -2426,8 +2442,6 @@ int dquot_resume(struct super_block *sb, int type)
if (!sb_has_quota_suspended(sb, cnt))
continue;
- inode = dqopt->files[cnt];
- dqopt->files[cnt] = NULL;
spin_lock(&dq_state_lock);
flags = dqopt->flags & dquot_state_flag(DQUOT_USAGE_ENABLED |
DQUOT_LIMITS_ENABLED,
@@ -2436,9 +2450,10 @@ int dquot_resume(struct super_block *sb, int type)
spin_unlock(&dq_state_lock);
flags = dquot_generic_flag(flags, cnt);
- ret = vfs_load_quota_inode(inode, cnt,
- dqopt->info[cnt].dqi_fmt_id, flags);
- iput(inode);
+ ret = dquot_load_quota_sb(sb, cnt, dqopt->info[cnt].dqi_fmt_id,
+ flags);
+ if (ret < 0)
+ vfs_cleanup_quota_inode(sb, type);
}
return ret;
@@ -2455,7 +2470,7 @@ int dquot_quota_on(struct super_block *sb, int type, int format_id,
if (path->dentry->d_sb != sb)
error = -EXDEV;
else
- error = vfs_load_quota_inode(d_inode(path->dentry), type,
+ error = dquot_load_quota_inode(d_inode(path->dentry), type,
format_id, DQUOT_USAGE_ENABLED |
DQUOT_LIMITS_ENABLED);
return error;
@@ -2463,41 +2478,6 @@ int dquot_quota_on(struct super_block *sb, int type, int format_id,
EXPORT_SYMBOL(dquot_quota_on);
/*
- * More powerful function for turning on quotas allowing setting
- * of individual quota flags
- */
-int dquot_enable(struct inode *inode, int type, int format_id,
- unsigned int flags)
-{
- struct super_block *sb = inode->i_sb;
-
- /* Just unsuspend quotas? */
- BUG_ON(flags & DQUOT_SUSPENDED);
- /* s_umount should be held in exclusive mode */
- if (WARN_ON_ONCE(down_read_trylock(&sb->s_umount)))
- up_read(&sb->s_umount);
-
- if (!flags)
- return 0;
- /* Just updating flags needed? */
- if (sb_has_quota_loaded(sb, type)) {
- if (flags & DQUOT_USAGE_ENABLED &&
- sb_has_quota_usage_enabled(sb, type))
- return -EBUSY;
- if (flags & DQUOT_LIMITS_ENABLED &&
- sb_has_quota_limits_enabled(sb, type))
- return -EBUSY;
- spin_lock(&dq_state_lock);
- sb_dqopt(sb)->flags |= dquot_state_flag(flags, type);
- spin_unlock(&dq_state_lock);
- return 0;
- }
-
- return vfs_load_quota_inode(inode, type, format_id, flags);
-}
-EXPORT_SYMBOL(dquot_enable);
-
-/*
* This function is used when filesystem needs to initialize quotas
* during mount time.
*/
@@ -2518,7 +2498,7 @@ int dquot_quota_on_mount(struct super_block *sb, char *qf_name,
error = security_quota_on(dentry);
if (!error)
- error = vfs_load_quota_inode(d_inode(dentry), type, format_id,
+ error = dquot_load_quota_inode(d_inode(dentry), type, format_id,
DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);
out:
@@ -2543,13 +2523,17 @@ static int dquot_quota_enable(struct super_block *sb, unsigned int flags)
if (!(flags & qtype_enforce_flag(type)))
continue;
/* Can't enforce without accounting */
- if (!sb_has_quota_usage_enabled(sb, type))
- return -EINVAL;
- ret = dquot_enable(dqopt->files[type], type,
- dqopt->info[type].dqi_fmt_id,
- DQUOT_LIMITS_ENABLED);
- if (ret < 0)
+ if (!sb_has_quota_usage_enabled(sb, type)) {
+ ret = -EINVAL;
goto out_err;
+ }
+ if (sb_has_quota_limits_enabled(sb, type)) {
+ ret = -EBUSY;
+ goto out_err;
+ }
+ spin_lock(&dq_state_lock);
+ dqopt->flags |= dquot_state_flag(DQUOT_LIMITS_ENABLED, type);
+ spin_unlock(&dq_state_lock);
}
return 0;
out_err:
@@ -2599,10 +2583,12 @@ static int dquot_quota_disable(struct super_block *sb, unsigned int flags)
out_err:
/* Backout enforcement disabling we already did */
for (type--; type >= 0; type--) {
- if (flags & qtype_enforce_flag(type))
- dquot_enable(dqopt->files[type], type,
- dqopt->info[type].dqi_fmt_id,
- DQUOT_LIMITS_ENABLED);
+ if (flags & qtype_enforce_flag(type)) {
+ spin_lock(&dq_state_lock);
+ dqopt->flags |=
+ dquot_state_flag(DQUOT_LIMITS_ENABLED, type);
+ spin_unlock(&dq_state_lock);
+ }
}
return ret;
}
@@ -2800,8 +2786,10 @@ int dquot_get_state(struct super_block *sb, struct qc_state *state)
tstate->flags |= QCI_LIMITS_ENFORCED;
tstate->spc_timelimit = mi->dqi_bgrace;
tstate->ino_timelimit = mi->dqi_igrace;
- tstate->ino = dqopt->files[type]->i_ino;
- tstate->blocks = dqopt->files[type]->i_blocks;
+ if (dqopt->files[type]) {
+ tstate->ino = dqopt->files[type]->i_ino;
+ tstate->blocks = dqopt->files[type]->i_blocks;
+ }
tstate->nextents = 1; /* We don't know... */
spin_unlock(&dq_data_lock);
}
@@ -2860,68 +2848,73 @@ EXPORT_SYMBOL(dquot_quotactl_sysfile_ops);
static int do_proc_dqstats(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
- unsigned int type = (int *)table->data - dqstats.stat;
+ unsigned int type = (unsigned long *)table->data - dqstats.stat;
+ s64 value = percpu_counter_sum(&dqstats.counter[type]);
+
+ /* Filter negative values for non-monotonic counters */
+ if (value < 0 && (type == DQST_ALLOC_DQUOTS ||
+ type == DQST_FREE_DQUOTS))
+ value = 0;
/* Update global table */
- dqstats.stat[type] =
- percpu_counter_sum_positive(&dqstats.counter[type]);
- return proc_dointvec(table, write, buffer, lenp, ppos);
+ dqstats.stat[type] = value;
+ return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
}
static struct ctl_table fs_dqstats_table[] = {
{
.procname = "lookups",
.data = &dqstats.stat[DQST_LOOKUPS],
- .maxlen = sizeof(int),
+ .maxlen = sizeof(unsigned long),
.mode = 0444,
.proc_handler = do_proc_dqstats,
},
{
.procname = "drops",
.data = &dqstats.stat[DQST_DROPS],
- .maxlen = sizeof(int),
+ .maxlen = sizeof(unsigned long),
.mode = 0444,
.proc_handler = do_proc_dqstats,
},
{
.procname = "reads",
.data = &dqstats.stat[DQST_READS],
- .maxlen = sizeof(int),
+ .maxlen = sizeof(unsigned long),
.mode = 0444,
.proc_handler = do_proc_dqstats,
},
{
.procname = "writes",
.data = &dqstats.stat[DQST_WRITES],
- .maxlen = sizeof(int),
+ .maxlen = sizeof(unsigned long),
.mode = 0444,
.proc_handler = do_proc_dqstats,
},
{
.procname = "cache_hits",
.data = &dqstats.stat[DQST_CACHE_HITS],
- .maxlen = sizeof(int),
+ .maxlen = sizeof(unsigned long),
.mode = 0444,
.proc_handler = do_proc_dqstats,
},
{
.procname = "allocated_dquots",
.data = &dqstats.stat[DQST_ALLOC_DQUOTS],
- .maxlen = sizeof(int),
+ .maxlen = sizeof(unsigned long),
.mode = 0444,
.proc_handler = do_proc_dqstats,
},
{
.procname = "free_dquots",
.data = &dqstats.stat[DQST_FREE_DQUOTS],
- .maxlen = sizeof(int),
+ .maxlen = sizeof(unsigned long),
.mode = 0444,
.proc_handler = do_proc_dqstats,
},
{
.procname = "syncs",
.data = &dqstats.stat[DQST_SYNCS],
- .maxlen = sizeof(int),
+ .maxlen = sizeof(unsigned long),
.mode = 0444,
.proc_handler = do_proc_dqstats,
},
@@ -2983,11 +2976,7 @@ static int __init dquot_init(void)
/* Find power-of-two hlist_heads which can fit into allocation */
nr_hash = (1UL << order) * PAGE_SIZE / sizeof(struct hlist_head);
- dq_hash_bits = 0;
- do {
- dq_hash_bits++;
- } while (nr_hash >> dq_hash_bits);
- dq_hash_bits--;
+ dq_hash_bits = ilog2(nr_hash);
nr_hash = 1UL << dq_hash_bits;
dq_hash_mask = nr_hash - 1;
diff --git a/fs/quota/quota.c b/fs/quota/quota.c
index cb13fb76dbee..5444d3c4d93f 100644
--- a/fs/quota/quota.c
+++ b/fs/quota/quota.c
@@ -60,8 +60,6 @@ static int quota_sync_all(int type)
{
int ret;
- if (type >= MAXQUOTAS)
- return -EINVAL;
ret = security_quotactl(Q_SYNC, type, 0, NULL);
if (!ret)
iterate_supers(quota_sync_one, &type);
@@ -686,8 +684,6 @@ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id,
{
int ret;
- if (type >= MAXQUOTAS)
- return -EINVAL;
type = array_index_nospec(type, MAXQUOTAS);
/*
* Quota not supported on this fs? Check this before s_quota_types
@@ -831,6 +827,9 @@ int kernel_quotactl(unsigned int cmd, const char __user *special,
cmds = cmd >> SUBCMDSHIFT;
type = cmd & SUBCMDMASK;
+ if (type >= MAXQUOTAS)
+ return -EINVAL;
+
/*
* As a special case Q_SYNC can be called without a specific device.
* It will iterate all superblocks that have quota enabled and call
diff --git a/fs/quota/quota_v1.c b/fs/quota/quota_v1.c
index c740e5572eb8..cd92e5fa0062 100644
--- a/fs/quota/quota_v1.c
+++ b/fs/quota/quota_v1.c
@@ -217,7 +217,6 @@ static const struct quota_format_ops v1_format_ops = {
.check_quota_file = v1_check_quota_file,
.read_file_info = v1_read_file_info,
.write_file_info = v1_write_file_info,
- .free_file_info = NULL,
.read_dqblk = v1_read_dqblk,
.commit_dqblk = v1_commit_dqblk,
};
diff --git a/fs/reiserfs/file.c b/fs/reiserfs/file.c
index 843aadcc123c..84cf8bdbec9c 100644
--- a/fs/reiserfs/file.c
+++ b/fs/reiserfs/file.c
@@ -38,16 +38,10 @@ static int reiserfs_file_release(struct inode *inode, struct file *filp)
BUG_ON(!S_ISREG(inode->i_mode));
- if (atomic_add_unless(&REISERFS_I(inode)->openers, -1, 1))
+ if (!atomic_dec_and_mutex_lock(&REISERFS_I(inode)->openers,
+ &REISERFS_I(inode)->tailpack))
return 0;
- mutex_lock(&REISERFS_I(inode)->tailpack);
-
- if (!atomic_dec_and_test(&REISERFS_I(inode)->openers)) {
- mutex_unlock(&REISERFS_I(inode)->tailpack);
- return 0;
- }
-
/* fast out for when nothing needs to be done */
if ((!(REISERFS_I(inode)->i_flags & i_pack_on_close_mask) ||
!tail_has_to_be_packed(inode)) &&
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index 132ec4406ed0..6419e6dacc39 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -2097,6 +2097,15 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
goto out_inserted_sd;
}
+ /*
+ * Mark it private if we're creating the privroot
+ * or something under it.
+ */
+ if (IS_PRIVATE(dir) || dentry == REISERFS_SB(sb)->priv_root) {
+ inode->i_flags |= S_PRIVATE;
+ inode->i_opflags &= ~IOP_XATTR;
+ }
+
if (reiserfs_posixacl(inode->i_sb)) {
reiserfs_write_unlock(inode->i_sb);
retval = reiserfs_inherit_default_acl(th, dir, dentry, inode);
@@ -2111,8 +2120,7 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
reiserfs_warning(inode->i_sb, "jdm-13090",
"ACLs aren't enabled in the fs, "
"but vfs thinks they are!");
- } else if (IS_PRIVATE(dir))
- inode->i_flags |= S_PRIVATE;
+ }
if (security->name) {
reiserfs_write_unlock(inode->i_sb);
diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c
index 97f3fc4fdd79..959a066b7bb0 100644
--- a/fs/reiserfs/namei.c
+++ b/fs/reiserfs/namei.c
@@ -377,10 +377,13 @@ static struct dentry *reiserfs_lookup(struct inode *dir, struct dentry *dentry,
/*
* Propagate the private flag so we know we're
- * in the priv tree
+ * in the priv tree. Also clear IOP_XATTR
+ * since we don't have xattrs on xattr files.
*/
- if (IS_PRIVATE(dir))
+ if (IS_PRIVATE(dir)) {
inode->i_flags |= S_PRIVATE;
+ inode->i_opflags &= ~IOP_XATTR;
+ }
}
reiserfs_write_unlock(dir->i_sb);
if (retval == IO_ERROR) {
diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h
index e5ca9ed79e54..726580114d55 100644
--- a/fs/reiserfs/reiserfs.h
+++ b/fs/reiserfs/reiserfs.h
@@ -1168,6 +1168,8 @@ static inline int bmap_would_wrap(unsigned bmap_nr)
return bmap_nr > ((1LL << 16) - 1);
}
+extern const struct xattr_handler *reiserfs_xattr_handlers[];
+
/*
* this says about version of key of all items (but stat data) the
* object consists of
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index d69b4ac0ae2f..3244037b1286 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -2049,6 +2049,8 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
if (replay_only(s))
goto error_unlocked;
+ s->s_xattr = reiserfs_xattr_handlers;
+
if (bdev_read_only(s->s_bdev) && !sb_rdonly(s)) {
SWARN(silent, s, "clm-7000",
"Detected readonly device, marking FS readonly");
diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c
index b5b26d8a192c..62b40df36c98 100644
--- a/fs/reiserfs/xattr.c
+++ b/fs/reiserfs/xattr.c
@@ -122,13 +122,13 @@ static struct dentry *open_xa_root(struct super_block *sb, int flags)
struct dentry *xaroot;
if (d_really_is_negative(privroot))
- return ERR_PTR(-ENODATA);
+ return ERR_PTR(-EOPNOTSUPP);
inode_lock_nested(d_inode(privroot), I_MUTEX_XATTR);
xaroot = dget(REISERFS_SB(sb)->xattr_root);
if (!xaroot)
- xaroot = ERR_PTR(-ENODATA);
+ xaroot = ERR_PTR(-EOPNOTSUPP);
else if (d_really_is_negative(xaroot)) {
int err = -ENODATA;
@@ -619,6 +619,10 @@ int reiserfs_xattr_set(struct inode *inode, const char *name,
int error, error2;
size_t jbegin_count = reiserfs_xattr_nblocks(inode, buffer_size);
+ /* Check before we start a transaction and then do nothing. */
+ if (!d_really_is_positive(REISERFS_SB(inode->i_sb)->priv_root))
+ return -EOPNOTSUPP;
+
if (!(flags & XATTR_REPLACE))
jbegin_count += reiserfs_xattr_jcreate_nblocks(inode);
@@ -841,8 +845,7 @@ ssize_t reiserfs_listxattr(struct dentry * dentry, char *buffer, size_t size)
if (d_really_is_negative(dentry))
return -EINVAL;
- if (!dentry->d_sb->s_xattr ||
- get_inode_sd_version(d_inode(dentry)) == STAT_DATA_V1)
+ if (get_inode_sd_version(d_inode(dentry)) == STAT_DATA_V1)
return -EOPNOTSUPP;
dir = open_xa_dir(d_inode(dentry), XATTR_REPLACE);
@@ -882,6 +885,7 @@ static int create_privroot(struct dentry *dentry)
}
d_inode(dentry)->i_flags |= S_PRIVATE;
+ d_inode(dentry)->i_opflags &= ~IOP_XATTR;
reiserfs_info(dentry->d_sb, "Created %s - reserved for xattr "
"storage.\n", PRIVROOT_NAME);
@@ -895,7 +899,7 @@ static int create_privroot(struct dentry *dentry) { return 0; }
#endif
/* Actual operations that are exported to VFS-land */
-static const struct xattr_handler *reiserfs_xattr_handlers[] = {
+const struct xattr_handler *reiserfs_xattr_handlers[] = {
#ifdef CONFIG_REISERFS_FS_XATTR
&reiserfs_xattr_user_handler,
&reiserfs_xattr_trusted_handler,
@@ -966,8 +970,10 @@ int reiserfs_lookup_privroot(struct super_block *s)
if (!IS_ERR(dentry)) {
REISERFS_SB(s)->priv_root = dentry;
d_set_d_op(dentry, &xattr_lookup_poison_ops);
- if (d_really_is_positive(dentry))
+ if (d_really_is_positive(dentry)) {
d_inode(dentry)->i_flags |= S_PRIVATE;
+ d_inode(dentry)->i_opflags &= ~IOP_XATTR;
+ }
} else
err = PTR_ERR(dentry);
inode_unlock(d_inode(s->s_root));
@@ -996,7 +1002,6 @@ int reiserfs_xattr_init(struct super_block *s, int mount_flags)
}
if (d_really_is_positive(privroot)) {
- s->s_xattr = reiserfs_xattr_handlers;
inode_lock(d_inode(privroot));
if (!REISERFS_SB(s)->xattr_root) {
struct dentry *dentry;
diff --git a/fs/reiserfs/xattr_acl.c b/fs/reiserfs/xattr_acl.c
index aa9380bac196..05f666794561 100644
--- a/fs/reiserfs/xattr_acl.c
+++ b/fs/reiserfs/xattr_acl.c
@@ -320,10 +320,8 @@ reiserfs_inherit_default_acl(struct reiserfs_transaction_handle *th,
* would be useless since permissions are ignored, and a pain because
* it introduces locking cycles
*/
- if (IS_PRIVATE(dir)) {
- inode->i_flags |= S_PRIVATE;
+ if (IS_PRIVATE(inode))
goto apply_umask;
- }
err = posix_acl_create(dir, &inode->i_mode, &default_acl, &acl);
if (err)
diff --git a/fs/select.c b/fs/select.c
index 53a0c149f528..11d0285d46b7 100644
--- a/fs/select.c
+++ b/fs/select.c
@@ -321,7 +321,7 @@ static int poll_select_finish(struct timespec64 *end_time,
switch (pt_type) {
case PT_TIMEVAL:
{
- struct timeval rtv;
+ struct __kernel_old_timeval rtv;
if (sizeof(rtv) > sizeof(rtv.tv_sec) + sizeof(rtv.tv_usec))
memset(&rtv, 0, sizeof(rtv));
@@ -698,10 +698,10 @@ out_nofds:
}
static int kern_select(int n, fd_set __user *inp, fd_set __user *outp,
- fd_set __user *exp, struct timeval __user *tvp)
+ fd_set __user *exp, struct __kernel_old_timeval __user *tvp)
{
struct timespec64 end_time, *to = NULL;
- struct timeval tv;
+ struct __kernel_old_timeval tv;
int ret;
if (tvp) {
@@ -720,7 +720,7 @@ static int kern_select(int n, fd_set __user *inp, fd_set __user *outp,
}
SYSCALL_DEFINE5(select, int, n, fd_set __user *, inp, fd_set __user *, outp,
- fd_set __user *, exp, struct timeval __user *, tvp)
+ fd_set __user *, exp, struct __kernel_old_timeval __user *, tvp)
{
return kern_select(n, inp, outp, exp, tvp);
}
@@ -810,7 +810,7 @@ SYSCALL_DEFINE6(pselect6_time32, int, n, fd_set __user *, inp, fd_set __user *,
struct sel_arg_struct {
unsigned long n;
fd_set __user *inp, *outp, *exp;
- struct timeval __user *tvp;
+ struct __kernel_old_timeval __user *tvp;
};
SYSCALL_DEFINE1(old_select, struct sel_arg_struct __user *, arg)
diff --git a/fs/splice.c b/fs/splice.c
index 98412721f056..f2400ce7d528 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -185,6 +185,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
struct splice_pipe_desc *spd)
{
unsigned int spd_pages = spd->nr_pages;
+ unsigned int tail = pipe->tail;
+ unsigned int head = pipe->head;
+ unsigned int mask = pipe->ring_size - 1;
int ret = 0, page_nr = 0;
if (!spd_pages)
@@ -196,9 +199,8 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
goto out;
}
- while (pipe->nrbufs < pipe->buffers) {
- int newbuf = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
- struct pipe_buffer *buf = pipe->bufs + newbuf;
+ while (!pipe_full(head, tail, pipe->max_usage)) {
+ struct pipe_buffer *buf = &pipe->bufs[head & mask];
buf->page = spd->pages[page_nr];
buf->offset = spd->partial[page_nr].offset;
@@ -207,7 +209,8 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
buf->ops = spd->ops;
buf->flags = 0;
- pipe->nrbufs++;
+ head++;
+ pipe->head = head;
page_nr++;
ret += buf->len;
@@ -228,17 +231,19 @@ EXPORT_SYMBOL_GPL(splice_to_pipe);
ssize_t add_to_pipe(struct pipe_inode_info *pipe, struct pipe_buffer *buf)
{
+ unsigned int head = pipe->head;
+ unsigned int tail = pipe->tail;
+ unsigned int mask = pipe->ring_size - 1;
int ret;
if (unlikely(!pipe->readers)) {
send_sig(SIGPIPE, current, 0);
ret = -EPIPE;
- } else if (pipe->nrbufs == pipe->buffers) {
+ } else if (pipe_full(head, tail, pipe->max_usage)) {
ret = -EAGAIN;
} else {
- int newbuf = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
- pipe->bufs[newbuf] = *buf;
- pipe->nrbufs++;
+ pipe->bufs[head & mask] = *buf;
+ pipe->head = head + 1;
return buf->len;
}
pipe_buf_release(pipe, buf);
@@ -252,14 +257,14 @@ EXPORT_SYMBOL(add_to_pipe);
*/
int splice_grow_spd(const struct pipe_inode_info *pipe, struct splice_pipe_desc *spd)
{
- unsigned int buffers = READ_ONCE(pipe->buffers);
+ unsigned int max_usage = READ_ONCE(pipe->max_usage);
- spd->nr_pages_max = buffers;
- if (buffers <= PIPE_DEF_BUFFERS)
+ spd->nr_pages_max = max_usage;
+ if (max_usage <= PIPE_DEF_BUFFERS)
return 0;
- spd->pages = kmalloc_array(buffers, sizeof(struct page *), GFP_KERNEL);
- spd->partial = kmalloc_array(buffers, sizeof(struct partial_page),
+ spd->pages = kmalloc_array(max_usage, sizeof(struct page *), GFP_KERNEL);
+ spd->partial = kmalloc_array(max_usage, sizeof(struct partial_page),
GFP_KERNEL);
if (spd->pages && spd->partial)
@@ -298,10 +303,11 @@ ssize_t generic_file_splice_read(struct file *in, loff_t *ppos,
{
struct iov_iter to;
struct kiocb kiocb;
- int idx, ret;
+ unsigned int i_head;
+ int ret;
iov_iter_pipe(&to, READ, pipe, len);
- idx = to.idx;
+ i_head = to.head;
init_sync_kiocb(&kiocb, in);
kiocb.ki_pos = *ppos;
ret = call_read_iter(in, &kiocb, &to);
@@ -309,7 +315,7 @@ ssize_t generic_file_splice_read(struct file *in, loff_t *ppos,
*ppos = kiocb.ki_pos;
file_accessed(in);
} else if (ret < 0) {
- to.idx = idx;
+ to.head = i_head;
to.iov_offset = 0;
iov_iter_advance(&to, 0); /* to free what was emitted */
/*
@@ -370,11 +376,12 @@ static ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
struct iov_iter to;
struct page **pages;
unsigned int nr_pages;
+ unsigned int mask;
size_t offset, base, copied = 0;
ssize_t res;
int i;
- if (pipe->nrbufs == pipe->buffers)
+ if (pipe_full(pipe->head, pipe->tail, pipe->max_usage))
return -EAGAIN;
/*
@@ -400,8 +407,9 @@ static ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
}
}
- pipe->bufs[to.idx].offset = offset;
- pipe->bufs[to.idx].len -= offset;
+ mask = pipe->ring_size - 1;
+ pipe->bufs[to.head & mask].offset = offset;
+ pipe->bufs[to.head & mask].len -= offset;
for (i = 0; i < nr_pages; i++) {
size_t this_len = min_t(size_t, len, PAGE_SIZE - offset);
@@ -443,7 +451,8 @@ static int pipe_to_sendpage(struct pipe_inode_info *pipe,
more = (sd->flags & SPLICE_F_MORE) ? MSG_MORE : 0;
- if (sd->len < sd->total_len && pipe->nrbufs > 1)
+ if (sd->len < sd->total_len &&
+ pipe_occupancy(pipe->head, pipe->tail) > 1)
more |= MSG_SENDPAGE_NOTLAST;
return file->f_op->sendpage(file, buf->page, buf->offset,
@@ -481,10 +490,13 @@ static void wakeup_pipe_writers(struct pipe_inode_info *pipe)
static int splice_from_pipe_feed(struct pipe_inode_info *pipe, struct splice_desc *sd,
splice_actor *actor)
{
+ unsigned int head = pipe->head;
+ unsigned int tail = pipe->tail;
+ unsigned int mask = pipe->ring_size - 1;
int ret;
- while (pipe->nrbufs) {
- struct pipe_buffer *buf = pipe->bufs + pipe->curbuf;
+ while (!pipe_empty(tail, head)) {
+ struct pipe_buffer *buf = &pipe->bufs[tail & mask];
sd->len = buf->len;
if (sd->len > sd->total_len)
@@ -511,8 +523,8 @@ static int splice_from_pipe_feed(struct pipe_inode_info *pipe, struct splice_des
if (!buf->len) {
pipe_buf_release(pipe, buf);
- pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
- pipe->nrbufs--;
+ tail++;
+ pipe->tail = tail;
if (pipe->files)
sd->need_wakeup = true;
}
@@ -543,7 +555,7 @@ static int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_des
if (signal_pending(current))
return -ERESTARTSYS;
- while (!pipe->nrbufs) {
+ while (pipe_empty(pipe->head, pipe->tail)) {
if (!pipe->writers)
return 0;
@@ -686,7 +698,7 @@ iter_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
.pos = *ppos,
.u.file = out,
};
- int nbufs = pipe->buffers;
+ int nbufs = pipe->max_usage;
struct bio_vec *array = kcalloc(nbufs, sizeof(struct bio_vec),
GFP_KERNEL);
ssize_t ret;
@@ -699,16 +711,19 @@ iter_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
splice_from_pipe_begin(&sd);
while (sd.total_len) {
struct iov_iter from;
+ unsigned int head = pipe->head;
+ unsigned int tail = pipe->tail;
+ unsigned int mask = pipe->ring_size - 1;
size_t left;
- int n, idx;
+ int n;
ret = splice_from_pipe_next(pipe, &sd);
if (ret <= 0)
break;
- if (unlikely(nbufs < pipe->buffers)) {
+ if (unlikely(nbufs < pipe->max_usage)) {
kfree(array);
- nbufs = pipe->buffers;
+ nbufs = pipe->max_usage;
array = kcalloc(nbufs, sizeof(struct bio_vec),
GFP_KERNEL);
if (!array) {
@@ -719,16 +734,13 @@ iter_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
/* build the vector */
left = sd.total_len;
- for (n = 0, idx = pipe->curbuf; left && n < pipe->nrbufs; n++, idx++) {
- struct pipe_buffer *buf = pipe->bufs + idx;
+ for (n = 0; !pipe_empty(head, tail) && left && n < nbufs; tail++, n++) {
+ struct pipe_buffer *buf = &pipe->bufs[tail & mask];
size_t this_len = buf->len;
if (this_len > left)
this_len = left;
- if (idx == pipe->buffers - 1)
- idx = -1;
-
ret = pipe_buf_confirm(pipe, buf);
if (unlikely(ret)) {
if (ret == -ENODATA)
@@ -752,14 +764,15 @@ iter_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
*ppos = sd.pos;
/* dismiss the fully eaten buffers, adjust the partial one */
+ tail = pipe->tail;
while (ret) {
- struct pipe_buffer *buf = pipe->bufs + pipe->curbuf;
+ struct pipe_buffer *buf = &pipe->bufs[tail & mask];
if (ret >= buf->len) {
ret -= buf->len;
buf->len = 0;
pipe_buf_release(pipe, buf);
- pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
- pipe->nrbufs--;
+ tail++;
+ pipe->tail = tail;
if (pipe->files)
sd.need_wakeup = true;
} else {
@@ -942,15 +955,17 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
sd->flags &= ~SPLICE_F_NONBLOCK;
more = sd->flags & SPLICE_F_MORE;
- WARN_ON_ONCE(pipe->nrbufs != 0);
+ WARN_ON_ONCE(!pipe_empty(pipe->head, pipe->tail));
while (len) {
+ unsigned int p_space;
size_t read_len;
loff_t pos = sd->pos, prev_pos = pos;
/* Don't try to read more the pipe has space for. */
- read_len = min_t(size_t, len,
- (pipe->buffers - pipe->nrbufs) << PAGE_SHIFT);
+ p_space = pipe->max_usage -
+ pipe_occupancy(pipe->head, pipe->tail);
+ read_len = min_t(size_t, len, p_space << PAGE_SHIFT);
ret = do_splice_to(in, &pos, pipe, read_len, flags);
if (unlikely(ret <= 0))
goto out_release;
@@ -989,7 +1004,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
}
done:
- pipe->nrbufs = pipe->curbuf = 0;
+ pipe->tail = pipe->head = 0;
file_accessed(in);
return bytes;
@@ -998,8 +1013,8 @@ out_release:
* If we did an incomplete transfer we must release
* the pipe buffers in question:
*/
- for (i = 0; i < pipe->buffers; i++) {
- struct pipe_buffer *buf = pipe->bufs + i;
+ for (i = 0; i < pipe->ring_size; i++) {
+ struct pipe_buffer *buf = &pipe->bufs[i];
if (buf->ops)
pipe_buf_release(pipe, buf);
@@ -1075,7 +1090,7 @@ static int wait_for_space(struct pipe_inode_info *pipe, unsigned flags)
send_sig(SIGPIPE, current, 0);
return -EPIPE;
}
- if (pipe->nrbufs != pipe->buffers)
+ if (!pipe_full(pipe->head, pipe->tail, pipe->max_usage))
return 0;
if (flags & SPLICE_F_NONBLOCK)
return -EAGAIN;
@@ -1180,8 +1195,15 @@ static long do_splice(struct file *in, loff_t __user *off_in,
pipe_lock(opipe);
ret = wait_for_space(opipe, flags);
- if (!ret)
+ if (!ret) {
+ unsigned int p_space;
+
+ /* Don't try to read more the pipe has space for. */
+ p_space = opipe->max_usage - pipe_occupancy(opipe->head, opipe->tail);
+ len = min_t(size_t, len, p_space << PAGE_SHIFT);
+
ret = do_splice_to(in, &offset, opipe, len, flags);
+ }
pipe_unlock(opipe);
if (ret > 0)
wakeup_pipe_readers(opipe);
@@ -1442,16 +1464,16 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
int ret;
/*
- * Check ->nrbufs without the inode lock first. This function
+ * Check the pipe occupancy without the inode lock first. This function
* is speculative anyways, so missing one is ok.
*/
- if (pipe->nrbufs)
+ if (!pipe_empty(pipe->head, pipe->tail))
return 0;
ret = 0;
pipe_lock(pipe);
- while (!pipe->nrbufs) {
+ while (pipe_empty(pipe->head, pipe->tail)) {
if (signal_pending(current)) {
ret = -ERESTARTSYS;
break;
@@ -1480,16 +1502,16 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
int ret;
/*
- * Check ->nrbufs without the inode lock first. This function
+ * Check pipe occupancy without the inode lock first. This function
* is speculative anyways, so missing one is ok.
*/
- if (pipe->nrbufs < pipe->buffers)
+ if (pipe_full(pipe->head, pipe->tail, pipe->max_usage))
return 0;
ret = 0;
pipe_lock(pipe);
- while (pipe->nrbufs >= pipe->buffers) {
+ while (pipe_full(pipe->head, pipe->tail, pipe->max_usage)) {
if (!pipe->readers) {
send_sig(SIGPIPE, current, 0);
ret = -EPIPE;
@@ -1520,7 +1542,10 @@ static int splice_pipe_to_pipe(struct pipe_inode_info *ipipe,
size_t len, unsigned int flags)
{
struct pipe_buffer *ibuf, *obuf;
- int ret = 0, nbuf;
+ unsigned int i_head, o_head;
+ unsigned int i_tail, o_tail;
+ unsigned int i_mask, o_mask;
+ int ret = 0;
bool input_wakeup = false;
@@ -1540,7 +1565,14 @@ retry:
*/
pipe_double_lock(ipipe, opipe);
+ i_tail = ipipe->tail;
+ i_mask = ipipe->ring_size - 1;
+ o_head = opipe->head;
+ o_mask = opipe->ring_size - 1;
+
do {
+ size_t o_len;
+
if (!opipe->readers) {
send_sig(SIGPIPE, current, 0);
if (!ret)
@@ -1548,14 +1580,18 @@ retry:
break;
}
- if (!ipipe->nrbufs && !ipipe->writers)
+ i_head = ipipe->head;
+ o_tail = opipe->tail;
+
+ if (pipe_empty(i_head, i_tail) && !ipipe->writers)
break;
/*
* Cannot make any progress, because either the input
* pipe is empty or the output pipe is full.
*/
- if (!ipipe->nrbufs || opipe->nrbufs >= opipe->buffers) {
+ if (pipe_empty(i_head, i_tail) ||
+ pipe_full(o_head, o_tail, opipe->max_usage)) {
/* Already processed some buffers, break */
if (ret)
break;
@@ -1575,9 +1611,8 @@ retry:
goto retry;
}
- ibuf = ipipe->bufs + ipipe->curbuf;
- nbuf = (opipe->curbuf + opipe->nrbufs) & (opipe->buffers - 1);
- obuf = opipe->bufs + nbuf;
+ ibuf = &ipipe->bufs[i_tail & i_mask];
+ obuf = &opipe->bufs[o_head & o_mask];
if (len >= ibuf->len) {
/*
@@ -1585,10 +1620,12 @@ retry:
*/
*obuf = *ibuf;
ibuf->ops = NULL;
- opipe->nrbufs++;
- ipipe->curbuf = (ipipe->curbuf + 1) & (ipipe->buffers - 1);
- ipipe->nrbufs--;
+ i_tail++;
+ ipipe->tail = i_tail;
input_wakeup = true;
+ o_len = obuf->len;
+ o_head++;
+ opipe->head = o_head;
} else {
/*
* Get a reference to this pipe buffer,
@@ -1610,12 +1647,14 @@ retry:
pipe_buf_mark_unmergeable(obuf);
obuf->len = len;
- opipe->nrbufs++;
- ibuf->offset += obuf->len;
- ibuf->len -= obuf->len;
+ ibuf->offset += len;
+ ibuf->len -= len;
+ o_len = len;
+ o_head++;
+ opipe->head = o_head;
}
- ret += obuf->len;
- len -= obuf->len;
+ ret += o_len;
+ len -= o_len;
} while (len);
pipe_unlock(ipipe);
@@ -1641,7 +1680,10 @@ static int link_pipe(struct pipe_inode_info *ipipe,
size_t len, unsigned int flags)
{
struct pipe_buffer *ibuf, *obuf;
- int ret = 0, i = 0, nbuf;
+ unsigned int i_head, o_head;
+ unsigned int i_tail, o_tail;
+ unsigned int i_mask, o_mask;
+ int ret = 0;
/*
* Potential ABBA deadlock, work around it by ordering lock
@@ -1650,6 +1692,11 @@ static int link_pipe(struct pipe_inode_info *ipipe,
*/
pipe_double_lock(ipipe, opipe);
+ i_tail = ipipe->tail;
+ i_mask = ipipe->ring_size - 1;
+ o_head = opipe->head;
+ o_mask = opipe->ring_size - 1;
+
do {
if (!opipe->readers) {
send_sig(SIGPIPE, current, 0);
@@ -1658,15 +1705,19 @@ static int link_pipe(struct pipe_inode_info *ipipe,
break;
}
+ i_head = ipipe->head;
+ o_tail = opipe->tail;
+
/*
- * If we have iterated all input buffers or ran out of
+ * If we have iterated all input buffers or run out of
* output room, break.
*/
- if (i >= ipipe->nrbufs || opipe->nrbufs >= opipe->buffers)
+ if (pipe_empty(i_head, i_tail) ||
+ pipe_full(o_head, o_tail, opipe->max_usage))
break;
- ibuf = ipipe->bufs + ((ipipe->curbuf + i) & (ipipe->buffers-1));
- nbuf = (opipe->curbuf + opipe->nrbufs) & (opipe->buffers - 1);
+ ibuf = &ipipe->bufs[i_tail & i_mask];
+ obuf = &opipe->bufs[o_head & o_mask];
/*
* Get a reference to this pipe buffer,
@@ -1678,7 +1729,6 @@ static int link_pipe(struct pipe_inode_info *ipipe,
break;
}
- obuf = opipe->bufs + nbuf;
*obuf = *ibuf;
/*
@@ -1691,11 +1741,12 @@ static int link_pipe(struct pipe_inode_info *ipipe,
if (obuf->len > len)
obuf->len = len;
-
- opipe->nrbufs++;
ret += obuf->len;
len -= obuf->len;
- i++;
+
+ o_head++;
+ opipe->head = o_head;
+ i_tail++;
} while (len);
/*
diff --git a/fs/timerfd.c b/fs/timerfd.c
index 48305ba41e3c..ac7f59a58f94 100644
--- a/fs/timerfd.c
+++ b/fs/timerfd.c
@@ -302,11 +302,11 @@ static ssize_t timerfd_read(struct file *file, char __user *buf, size_t count,
static void timerfd_show(struct seq_file *m, struct file *file)
{
struct timerfd_ctx *ctx = file->private_data;
- struct itimerspec t;
+ struct timespec64 value, interval;
spin_lock_irq(&ctx->wqh.lock);
- t.it_value = ktime_to_timespec(timerfd_get_remaining(ctx));
- t.it_interval = ktime_to_timespec(ctx->tintv);
+ value = ktime_to_timespec64(timerfd_get_remaining(ctx));
+ interval = ktime_to_timespec64(ctx->tintv);
spin_unlock_irq(&ctx->wqh.lock);
seq_printf(m,
@@ -318,10 +318,10 @@ static void timerfd_show(struct seq_file *m, struct file *file)
ctx->clockid,
(unsigned long long)ctx->ticks,
ctx->settime_flags,
- (unsigned long long)t.it_value.tv_sec,
- (unsigned long long)t.it_value.tv_nsec,
- (unsigned long long)t.it_interval.tv_sec,
- (unsigned long long)t.it_interval.tv_nsec);
+ (unsigned long long)value.tv_sec,
+ (unsigned long long)value.tv_nsec,
+ (unsigned long long)interval.tv_sec,
+ (unsigned long long)interval.tv_nsec);
}
#else
#define timerfd_show NULL
diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c
index e4b52783819d..0f5a480fe264 100644
--- a/fs/ubifs/debug.c
+++ b/fs/ubifs/debug.c
@@ -2737,18 +2737,6 @@ static ssize_t dfs_file_write(struct file *file, const char __user *u,
struct dentry *dent = file->f_path.dentry;
int val;
- /*
- * TODO: this is racy - the file-system might have already been
- * unmounted and we'd oops in this case. The plan is to fix it with
- * help of 'iterate_supers_type()' which we should have in v3.0: when
- * a debugfs opened, we rember FS's UUID in file->private_data. Then
- * whenever we access the FS via a debugfs file, we iterate all UBIFS
- * superblocks and fine the one with the same UUID, and take the
- * locking right.
- *
- * The other way to go suggested by Al Viro is to create a separate
- * 'ubifs-debug' file-system instead.
- */
if (file->f_path.dentry == d->dfs_dump_lprops) {
ubifs_dump_lprops(c);
return count;
diff --git a/fs/ubifs/journal.c b/fs/ubifs/journal.c
index 4fd9683b8245..388fe8f5dc51 100644
--- a/fs/ubifs/journal.c
+++ b/fs/ubifs/journal.c
@@ -503,7 +503,7 @@ static void mark_inode_clean(struct ubifs_info *c, struct ubifs_inode *ui)
static void set_dent_cookie(struct ubifs_info *c, struct ubifs_dent_node *dent)
{
if (c->double_hash)
- dent->cookie = prandom_u32();
+ dent->cookie = (__force __le32) prandom_u32();
else
dent->cookie = 0;
}
@@ -899,7 +899,7 @@ int ubifs_jnl_write_inode(struct ubifs_info *c, const struct inode *inode)
fname_name(&nm) = xent->name;
fname_len(&nm) = le16_to_cpu(xent->nlen);
- xino = ubifs_iget(c->vfs_sb, xent->inum);
+ xino = ubifs_iget(c->vfs_sb, le64_to_cpu(xent->inum));
if (IS_ERR(xino)) {
err = PTR_ERR(xino);
ubifs_err(c, "dead directory entry '%s', error %d",
diff --git a/fs/ubifs/orphan.c b/fs/ubifs/orphan.c
index 3b4b4114f208..54d6db61106f 100644
--- a/fs/ubifs/orphan.c
+++ b/fs/ubifs/orphan.c
@@ -631,12 +631,17 @@ static int do_kill_orphans(struct ubifs_info *c, struct ubifs_scan_leb *sleb,
ino_t inum;
int i, n, err, first = 1;
+ ino = kmalloc(UBIFS_MAX_INO_NODE_SZ, GFP_NOFS);
+ if (!ino)
+ return -ENOMEM;
+
list_for_each_entry(snod, &sleb->nodes, list) {
if (snod->type != UBIFS_ORPH_NODE) {
ubifs_err(c, "invalid node type %d in orphan area at %d:%d",
snod->type, sleb->lnum, snod->offs);
ubifs_dump_node(c, snod->node);
- return -EINVAL;
+ err = -EINVAL;
+ goto out_free;
}
orph = snod->node;
@@ -663,20 +668,18 @@ static int do_kill_orphans(struct ubifs_info *c, struct ubifs_scan_leb *sleb,
ubifs_err(c, "out of order commit number %llu in orphan node at %d:%d",
cmt_no, sleb->lnum, snod->offs);
ubifs_dump_node(c, snod->node);
- return -EINVAL;
+ err = -EINVAL;
+ goto out_free;
}
dbg_rcvry("out of date LEB %d", sleb->lnum);
*outofdate = 1;
- return 0;
+ err = 0;
+ goto out_free;
}
if (first)
first = 0;
- ino = kmalloc(UBIFS_MAX_INO_NODE_SZ, GFP_NOFS);
- if (!ino)
- return -ENOMEM;
-
n = (le32_to_cpu(orph->ch.len) - UBIFS_ORPH_NODE_SZ) >> 3;
for (i = 0; i < n; i++) {
union ubifs_key key1, key2;
diff --git a/fs/ubifs/sb.c b/fs/ubifs/sb.c
index a551eb3e9b89..2b7c04bf8983 100644
--- a/fs/ubifs/sb.c
+++ b/fs/ubifs/sb.c
@@ -184,7 +184,7 @@ static int create_default_filesystem(struct ubifs_info *c)
if (err)
goto out;
} else {
- sup->hash_algo = 0xffff;
+ sup->hash_algo = cpu_to_le16(0xffff);
}
sup->ch.node_type = UBIFS_SB_NODE;
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index 7d4547e5202d..5e1e8ec0589e 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -2267,10 +2267,8 @@ static struct dentry *ubifs_mount(struct file_system_type *fs_type, int flags,
}
} else {
err = ubifs_fill_super(sb, data, flags & SB_SILENT ? 1 : 0);
- if (err) {
- kfree(c);
+ if (err)
goto out_deact;
- }
/* We do not support atime */
sb->s_flags |= SB_ACTIVE;
if (IS_ENABLED(CONFIG_UBIFS_ATIME_SUPPORT))
diff --git a/fs/ubifs/tnc_commit.c b/fs/ubifs/tnc_commit.c
index a384a0f9ff32..234be1c4dc87 100644
--- a/fs/ubifs/tnc_commit.c
+++ b/fs/ubifs/tnc_commit.c
@@ -212,7 +212,7 @@ static int is_idx_node_in_use(struct ubifs_info *c, union ubifs_key *key,
/**
* layout_leb_in_gaps - layout index nodes using in-the-gaps method.
* @c: UBIFS file-system description object
- * @p: return LEB number here
+ * @p: return LEB number in @c->gap_lebs[p]
*
* This function lays out new index nodes for dirty znodes using in-the-gaps
* method of TNC commit.
@@ -221,7 +221,7 @@ static int is_idx_node_in_use(struct ubifs_info *c, union ubifs_key *key,
* This function returns the number of index nodes written into the gaps, or a
* negative error code on failure.
*/
-static int layout_leb_in_gaps(struct ubifs_info *c, int *p)
+static int layout_leb_in_gaps(struct ubifs_info *c, int p)
{
struct ubifs_scan_leb *sleb;
struct ubifs_scan_node *snod;
@@ -236,7 +236,7 @@ static int layout_leb_in_gaps(struct ubifs_info *c, int *p)
* filled, however we do not check there at present.
*/
return lnum; /* Error code */
- *p = lnum;
+ c->gap_lebs[p] = lnum;
dbg_gc("LEB %d", lnum);
/*
* Scan the index LEB. We use the generic scan for this even though
@@ -355,7 +355,7 @@ static int get_leb_cnt(struct ubifs_info *c, int cnt)
*/
static int layout_in_gaps(struct ubifs_info *c, int cnt)
{
- int err, leb_needed_cnt, written, *p;
+ int err, leb_needed_cnt, written, p = 0, old_idx_lebs, *gap_lebs;
dbg_gc("%d znodes to write", cnt);
@@ -364,9 +364,9 @@ static int layout_in_gaps(struct ubifs_info *c, int cnt)
if (!c->gap_lebs)
return -ENOMEM;
- p = c->gap_lebs;
+ old_idx_lebs = c->lst.idx_lebs;
do {
- ubifs_assert(c, p < c->gap_lebs + c->lst.idx_lebs);
+ ubifs_assert(c, p < c->lst.idx_lebs);
written = layout_leb_in_gaps(c, p);
if (written < 0) {
err = written;
@@ -392,9 +392,29 @@ static int layout_in_gaps(struct ubifs_info *c, int cnt)
leb_needed_cnt = get_leb_cnt(c, cnt);
dbg_gc("%d znodes remaining, need %d LEBs, have %d", cnt,
leb_needed_cnt, c->ileb_cnt);
+ /*
+ * Dynamically change the size of @c->gap_lebs to prevent
+ * oob, because @c->lst.idx_lebs could be increased by
+ * function @get_idx_gc_leb (called by layout_leb_in_gaps->
+ * ubifs_find_dirty_idx_leb) during loop. Only enlarge
+ * @c->gap_lebs when needed.
+ *
+ */
+ if (leb_needed_cnt > c->ileb_cnt && p >= old_idx_lebs &&
+ old_idx_lebs < c->lst.idx_lebs) {
+ old_idx_lebs = c->lst.idx_lebs;
+ gap_lebs = krealloc(c->gap_lebs, sizeof(int) *
+ (old_idx_lebs + 1), GFP_NOFS);
+ if (!gap_lebs) {
+ kfree(c->gap_lebs);
+ c->gap_lebs = NULL;
+ return -ENOMEM;
+ }
+ c->gap_lebs = gap_lebs;
+ }
} while (leb_needed_cnt > c->ileb_cnt);
- *p = -1;
+ c->gap_lebs[p] = -1;
return 0;
}
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index f9fd18670e22..37df7c9eedb1 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -1460,7 +1460,8 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx,
start = vma->vm_start;
vma_end = min(end, vma->vm_end);
- new_flags = (vma->vm_flags & ~vm_flags) | vm_flags;
+ new_flags = (vma->vm_flags &
+ ~(VM_UFFD_MISSING|VM_UFFD_WP)) | vm_flags;
prev = vma_merge(mm, prev, start, vma_end, new_flags,
vma->anon_vma, vma->vm_file, vma->vm_pgoff,
vma_policy(vma),
@@ -1834,13 +1835,12 @@ static int userfaultfd_api(struct userfaultfd_ctx *ctx,
if (copy_from_user(&uffdio_api, buf, sizeof(uffdio_api)))
goto out;
features = uffdio_api.features;
- if (uffdio_api.api != UFFD_API || (features & ~UFFD_API_FEATURES)) {
- memset(&uffdio_api, 0, sizeof(uffdio_api));
- if (copy_to_user(buf, &uffdio_api, sizeof(uffdio_api)))
- goto out;
- ret = -EINVAL;
- goto out;
- }
+ ret = -EINVAL;
+ if (uffdio_api.api != UFFD_API || (features & ~UFFD_API_FEATURES))
+ goto err_out;
+ ret = -EPERM;
+ if ((features & UFFD_FEATURE_EVENT_FORK) && !capable(CAP_SYS_PTRACE))
+ goto err_out;
/* report all available features and ioctls to userland */
uffdio_api.features = UFFD_API_FEATURES;
uffdio_api.ioctls = UFFD_API_IOCTLS;
@@ -1853,6 +1853,11 @@ static int userfaultfd_api(struct userfaultfd_ctx *ctx,
ret = 0;
out:
return ret;
+err_out:
+ memset(&uffdio_api, 0, sizeof(uffdio_api));
+ if (copy_to_user(buf, &uffdio_api, sizeof(uffdio_api)))
+ ret = -EFAULT;
+ goto out;
}
static long userfaultfd_ioctl(struct file *file, unsigned cmd,
@@ -1923,7 +1928,7 @@ static const struct file_operations userfaultfd_fops = {
.poll = userfaultfd_poll,
.read = userfaultfd_read,
.unlocked_ioctl = userfaultfd_ioctl,
- .compat_ioctl = userfaultfd_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.llseek = noop_llseek,
};
diff --git a/fs/utimes.c b/fs/utimes.c
index 1ba3f7883870..c952b6b3d8a0 100644
--- a/fs/utimes.c
+++ b/fs/utimes.c
@@ -161,9 +161,9 @@ SYSCALL_DEFINE4(utimensat, int, dfd, const char __user *, filename,
* utimensat() instead.
*/
static long do_futimesat(int dfd, const char __user *filename,
- struct timeval __user *utimes)
+ struct __kernel_old_timeval __user *utimes)
{
- struct timeval times[2];
+ struct __kernel_old_timeval times[2];
struct timespec64 tstimes[2];
if (utimes) {
@@ -190,13 +190,13 @@ static long do_futimesat(int dfd, const char __user *filename,
SYSCALL_DEFINE3(futimesat, int, dfd, const char __user *, filename,
- struct timeval __user *, utimes)
+ struct __kernel_old_timeval __user *, utimes)
{
return do_futimesat(dfd, filename, utimes);
}
SYSCALL_DEFINE2(utimes, char __user *, filename,
- struct timeval __user *, utimes)
+ struct __kernel_old_timeval __user *, utimes)
{
return do_futimesat(AT_FDCWD, filename, utimes);
}
diff --git a/fs/xfs/Makefile b/fs/xfs/Makefile
index 06b68b6115bc..aceca2f9a3db 100644
--- a/fs/xfs/Makefile
+++ b/fs/xfs/Makefile
@@ -27,7 +27,6 @@ xfs-y += $(addprefix libxfs/, \
xfs_bmap_btree.o \
xfs_btree.o \
xfs_da_btree.o \
- xfs_da_format.o \
xfs_defer.o \
xfs_dir2.o \
xfs_dir2_block.o \
diff --git a/fs/xfs/kmem.c b/fs/xfs/kmem.c
index da031b93e182..1da94237a8cf 100644
--- a/fs/xfs/kmem.c
+++ b/fs/xfs/kmem.c
@@ -32,7 +32,7 @@ kmem_alloc(size_t size, xfs_km_flags_t flags)
/*
- * __vmalloc() will allocate data pages and auxillary structures (e.g.
+ * __vmalloc() will allocate data pages and auxiliary structures (e.g.
* pagetables) with GFP_KERNEL, yet we may be under GFP_NOFS context here. Hence
* we need to tell memory reclaim that we are in such a context via
* PF_MEMALLOC_NOFS to prevent memory reclaim re-entering the filesystem here
diff --git a/fs/xfs/kmem.h b/fs/xfs/kmem.h
index 8170d95cf930..6143117770e9 100644
--- a/fs/xfs/kmem.h
+++ b/fs/xfs/kmem.h
@@ -78,39 +78,9 @@ kmem_zalloc_large(size_t size, xfs_km_flags_t flags)
* Zone interfaces
*/
-#define KM_ZONE_HWALIGN SLAB_HWCACHE_ALIGN
-#define KM_ZONE_RECLAIM SLAB_RECLAIM_ACCOUNT
-#define KM_ZONE_SPREAD SLAB_MEM_SPREAD
-#define KM_ZONE_ACCOUNT SLAB_ACCOUNT
-
#define kmem_zone kmem_cache
#define kmem_zone_t struct kmem_cache
-static inline kmem_zone_t *
-kmem_zone_init(int size, char *zone_name)
-{
- return kmem_cache_create(zone_name, size, 0, 0, NULL);
-}
-
-static inline kmem_zone_t *
-kmem_zone_init_flags(int size, char *zone_name, slab_flags_t flags,
- void (*construct)(void *))
-{
- return kmem_cache_create(zone_name, size, 0, flags, construct);
-}
-
-static inline void
-kmem_zone_free(kmem_zone_t *zone, void *ptr)
-{
- kmem_cache_free(zone, ptr);
-}
-
-static inline void
-kmem_zone_destroy(kmem_zone_t *zone)
-{
- kmem_cache_destroy(zone);
-}
-
extern void *kmem_zone_alloc(kmem_zone_t *, xfs_km_flags_t);
static inline void *
diff --git a/fs/xfs/libxfs/xfs_ag_resv.c b/fs/xfs/libxfs/xfs_ag_resv.c
index 87a9747f1d36..fdfe6dc0d307 100644
--- a/fs/xfs/libxfs/xfs_ag_resv.c
+++ b/fs/xfs/libxfs/xfs_ag_resv.c
@@ -19,6 +19,8 @@
#include "xfs_btree.h"
#include "xfs_refcount_btree.h"
#include "xfs_ialloc_btree.h"
+#include "xfs_sb.h"
+#include "xfs_ag_resv.h"
/*
* Per-AG Block Reservations
diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c
index 533b04aaf6f6..c284e10af491 100644
--- a/fs/xfs/libxfs/xfs_alloc.c
+++ b/fs/xfs/libxfs/xfs_alloc.c
@@ -146,9 +146,13 @@ xfs_alloc_lookup_eq(
xfs_extlen_t len, /* length of extent */
int *stat) /* success/failure */
{
+ int error;
+
cur->bc_rec.a.ar_startblock = bno;
cur->bc_rec.a.ar_blockcount = len;
- return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat);
+ error = xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat);
+ cur->bc_private.a.priv.abt.active = (*stat == 1);
+ return error;
}
/*
@@ -162,9 +166,13 @@ xfs_alloc_lookup_ge(
xfs_extlen_t len, /* length of extent */
int *stat) /* success/failure */
{
+ int error;
+
cur->bc_rec.a.ar_startblock = bno;
cur->bc_rec.a.ar_blockcount = len;
- return xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat);
+ error = xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat);
+ cur->bc_private.a.priv.abt.active = (*stat == 1);
+ return error;
}
/*
@@ -178,9 +186,19 @@ xfs_alloc_lookup_le(
xfs_extlen_t len, /* length of extent */
int *stat) /* success/failure */
{
+ int error;
cur->bc_rec.a.ar_startblock = bno;
cur->bc_rec.a.ar_blockcount = len;
- return xfs_btree_lookup(cur, XFS_LOOKUP_LE, stat);
+ error = xfs_btree_lookup(cur, XFS_LOOKUP_LE, stat);
+ cur->bc_private.a.priv.abt.active = (*stat == 1);
+ return error;
+}
+
+static inline bool
+xfs_alloc_cur_active(
+ struct xfs_btree_cur *cur)
+{
+ return cur && cur->bc_private.a.priv.abt.active;
}
/*
@@ -313,7 +331,7 @@ xfs_alloc_compute_diff(
xfs_extlen_t newlen1=0; /* length with newbno1 */
xfs_extlen_t newlen2=0; /* length with newbno2 */
xfs_agblock_t wantend; /* end of target extent */
- bool userdata = xfs_alloc_is_userdata(datatype);
+ bool userdata = datatype & XFS_ALLOC_USERDATA;
ASSERT(freelen >= wantlen);
freeend = freebno + freelen;
@@ -433,13 +451,17 @@ xfs_alloc_fixup_trees(
#ifdef DEBUG
if ((error = xfs_alloc_get_rec(cnt_cur, &nfbno1, &nflen1, &i)))
return error;
- XFS_WANT_CORRUPTED_RETURN(mp,
- i == 1 && nfbno1 == fbno && nflen1 == flen);
+ if (XFS_IS_CORRUPT(mp,
+ i != 1 ||
+ nfbno1 != fbno ||
+ nflen1 != flen))
+ return -EFSCORRUPTED;
#endif
} else {
if ((error = xfs_alloc_lookup_eq(cnt_cur, fbno, flen, &i)))
return error;
- XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
+ if (XFS_IS_CORRUPT(mp, i != 1))
+ return -EFSCORRUPTED;
}
/*
* Look up the record in the by-block tree if necessary.
@@ -448,13 +470,17 @@ xfs_alloc_fixup_trees(
#ifdef DEBUG
if ((error = xfs_alloc_get_rec(bno_cur, &nfbno1, &nflen1, &i)))
return error;
- XFS_WANT_CORRUPTED_RETURN(mp,
- i == 1 && nfbno1 == fbno && nflen1 == flen);
+ if (XFS_IS_CORRUPT(mp,
+ i != 1 ||
+ nfbno1 != fbno ||
+ nflen1 != flen))
+ return -EFSCORRUPTED;
#endif
} else {
if ((error = xfs_alloc_lookup_eq(bno_cur, fbno, flen, &i)))
return error;
- XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
+ if (XFS_IS_CORRUPT(mp, i != 1))
+ return -EFSCORRUPTED;
}
#ifdef DEBUG
@@ -465,8 +491,10 @@ xfs_alloc_fixup_trees(
bnoblock = XFS_BUF_TO_BLOCK(bno_cur->bc_bufs[0]);
cntblock = XFS_BUF_TO_BLOCK(cnt_cur->bc_bufs[0]);
- XFS_WANT_CORRUPTED_RETURN(mp,
- bnoblock->bb_numrecs == cntblock->bb_numrecs);
+ if (XFS_IS_CORRUPT(mp,
+ bnoblock->bb_numrecs !=
+ cntblock->bb_numrecs))
+ return -EFSCORRUPTED;
}
#endif
@@ -496,25 +524,30 @@ xfs_alloc_fixup_trees(
*/
if ((error = xfs_btree_delete(cnt_cur, &i)))
return error;
- XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
+ if (XFS_IS_CORRUPT(mp, i != 1))
+ return -EFSCORRUPTED;
/*
* Add new by-size btree entry(s).
*/
if (nfbno1 != NULLAGBLOCK) {
if ((error = xfs_alloc_lookup_eq(cnt_cur, nfbno1, nflen1, &i)))
return error;
- XFS_WANT_CORRUPTED_RETURN(mp, i == 0);
+ if (XFS_IS_CORRUPT(mp, i != 0))
+ return -EFSCORRUPTED;
if ((error = xfs_btree_insert(cnt_cur, &i)))
return error;
- XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
+ if (XFS_IS_CORRUPT(mp, i != 1))
+ return -EFSCORRUPTED;
}
if (nfbno2 != NULLAGBLOCK) {
if ((error = xfs_alloc_lookup_eq(cnt_cur, nfbno2, nflen2, &i)))
return error;
- XFS_WANT_CORRUPTED_RETURN(mp, i == 0);
+ if (XFS_IS_CORRUPT(mp, i != 0))
+ return -EFSCORRUPTED;
if ((error = xfs_btree_insert(cnt_cur, &i)))
return error;
- XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
+ if (XFS_IS_CORRUPT(mp, i != 1))
+ return -EFSCORRUPTED;
}
/*
* Fix up the by-block btree entry(s).
@@ -525,7 +558,8 @@ xfs_alloc_fixup_trees(
*/
if ((error = xfs_btree_delete(bno_cur, &i)))
return error;
- XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
+ if (XFS_IS_CORRUPT(mp, i != 1))
+ return -EFSCORRUPTED;
} else {
/*
* Update the by-block entry to start later|be shorter.
@@ -539,10 +573,12 @@ xfs_alloc_fixup_trees(
*/
if ((error = xfs_alloc_lookup_eq(bno_cur, nfbno2, nflen2, &i)))
return error;
- XFS_WANT_CORRUPTED_RETURN(mp, i == 0);
+ if (XFS_IS_CORRUPT(mp, i != 0))
+ return -EFSCORRUPTED;
if ((error = xfs_btree_insert(bno_cur, &i)))
return error;
- XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
+ if (XFS_IS_CORRUPT(mp, i != 1))
+ return -EFSCORRUPTED;
}
return 0;
}
@@ -684,16 +720,298 @@ xfs_alloc_update_counters(
xfs_trans_agblocks_delta(tp, len);
if (unlikely(be32_to_cpu(agf->agf_freeblks) >
- be32_to_cpu(agf->agf_length)))
+ be32_to_cpu(agf->agf_length))) {
+ xfs_buf_corruption_error(agbp);
return -EFSCORRUPTED;
+ }
xfs_alloc_log_agf(tp, agbp, XFS_AGF_FREEBLKS);
return 0;
}
/*
- * Allocation group level functions.
+ * Block allocation algorithm and data structures.
+ */
+struct xfs_alloc_cur {
+ struct xfs_btree_cur *cnt; /* btree cursors */
+ struct xfs_btree_cur *bnolt;
+ struct xfs_btree_cur *bnogt;
+ xfs_extlen_t cur_len;/* current search length */
+ xfs_agblock_t rec_bno;/* extent startblock */
+ xfs_extlen_t rec_len;/* extent length */
+ xfs_agblock_t bno; /* alloc bno */
+ xfs_extlen_t len; /* alloc len */
+ xfs_extlen_t diff; /* diff from search bno */
+ unsigned int busy_gen;/* busy state */
+ bool busy;
+};
+
+/*
+ * Set up cursors, etc. in the extent allocation cursor. This function can be
+ * called multiple times to reset an initialized structure without having to
+ * reallocate cursors.
+ */
+static int
+xfs_alloc_cur_setup(
+ struct xfs_alloc_arg *args,
+ struct xfs_alloc_cur *acur)
+{
+ int error;
+ int i;
+
+ ASSERT(args->alignment == 1 || args->type != XFS_ALLOCTYPE_THIS_BNO);
+
+ acur->cur_len = args->maxlen;
+ acur->rec_bno = 0;
+ acur->rec_len = 0;
+ acur->bno = 0;
+ acur->len = 0;
+ acur->diff = -1;
+ acur->busy = false;
+ acur->busy_gen = 0;
+
+ /*
+ * Perform an initial cntbt lookup to check for availability of maxlen
+ * extents. If this fails, we'll return -ENOSPC to signal the caller to
+ * attempt a small allocation.
+ */
+ if (!acur->cnt)
+ acur->cnt = xfs_allocbt_init_cursor(args->mp, args->tp,
+ args->agbp, args->agno, XFS_BTNUM_CNT);
+ error = xfs_alloc_lookup_ge(acur->cnt, 0, args->maxlen, &i);
+ if (error)
+ return error;
+
+ /*
+ * Allocate the bnobt left and right search cursors.
+ */
+ if (!acur->bnolt)
+ acur->bnolt = xfs_allocbt_init_cursor(args->mp, args->tp,
+ args->agbp, args->agno, XFS_BTNUM_BNO);
+ if (!acur->bnogt)
+ acur->bnogt = xfs_allocbt_init_cursor(args->mp, args->tp,
+ args->agbp, args->agno, XFS_BTNUM_BNO);
+ return i == 1 ? 0 : -ENOSPC;
+}
+
+static void
+xfs_alloc_cur_close(
+ struct xfs_alloc_cur *acur,
+ bool error)
+{
+ int cur_error = XFS_BTREE_NOERROR;
+
+ if (error)
+ cur_error = XFS_BTREE_ERROR;
+
+ if (acur->cnt)
+ xfs_btree_del_cursor(acur->cnt, cur_error);
+ if (acur->bnolt)
+ xfs_btree_del_cursor(acur->bnolt, cur_error);
+ if (acur->bnogt)
+ xfs_btree_del_cursor(acur->bnogt, cur_error);
+ acur->cnt = acur->bnolt = acur->bnogt = NULL;
+}
+
+/*
+ * Check an extent for allocation and track the best available candidate in the
+ * allocation structure. The cursor is deactivated if it has entered an out of
+ * range state based on allocation arguments. Optionally return the extent
+ * extent geometry and allocation status if requested by the caller.
+ */
+static int
+xfs_alloc_cur_check(
+ struct xfs_alloc_arg *args,
+ struct xfs_alloc_cur *acur,
+ struct xfs_btree_cur *cur,
+ int *new)
+{
+ int error, i;
+ xfs_agblock_t bno, bnoa, bnew;
+ xfs_extlen_t len, lena, diff = -1;
+ bool busy;
+ unsigned busy_gen = 0;
+ bool deactivate = false;
+ bool isbnobt = cur->bc_btnum == XFS_BTNUM_BNO;
+
+ *new = 0;
+
+ error = xfs_alloc_get_rec(cur, &bno, &len, &i);
+ if (error)
+ return error;
+ if (XFS_IS_CORRUPT(args->mp, i != 1))
+ return -EFSCORRUPTED;
+
+ /*
+ * Check minlen and deactivate a cntbt cursor if out of acceptable size
+ * range (i.e., walking backwards looking for a minlen extent).
+ */
+ if (len < args->minlen) {
+ deactivate = !isbnobt;
+ goto out;
+ }
+
+ busy = xfs_alloc_compute_aligned(args, bno, len, &bnoa, &lena,
+ &busy_gen);
+ acur->busy |= busy;
+ if (busy)
+ acur->busy_gen = busy_gen;
+ /* deactivate a bnobt cursor outside of locality range */
+ if (bnoa < args->min_agbno || bnoa > args->max_agbno) {
+ deactivate = isbnobt;
+ goto out;
+ }
+ if (lena < args->minlen)
+ goto out;
+
+ args->len = XFS_EXTLEN_MIN(lena, args->maxlen);
+ xfs_alloc_fix_len(args);
+ ASSERT(args->len >= args->minlen);
+ if (args->len < acur->len)
+ goto out;
+
+ /*
+ * We have an aligned record that satisfies minlen and beats or matches
+ * the candidate extent size. Compare locality for near allocation mode.
+ */
+ ASSERT(args->type == XFS_ALLOCTYPE_NEAR_BNO);
+ diff = xfs_alloc_compute_diff(args->agbno, args->len,
+ args->alignment, args->datatype,
+ bnoa, lena, &bnew);
+ if (bnew == NULLAGBLOCK)
+ goto out;
+
+ /*
+ * Deactivate a bnobt cursor with worse locality than the current best.
+ */
+ if (diff > acur->diff) {
+ deactivate = isbnobt;
+ goto out;
+ }
+
+ ASSERT(args->len > acur->len ||
+ (args->len == acur->len && diff <= acur->diff));
+ acur->rec_bno = bno;
+ acur->rec_len = len;
+ acur->bno = bnew;
+ acur->len = args->len;
+ acur->diff = diff;
+ *new = 1;
+
+ /*
+ * We're done if we found a perfect allocation. This only deactivates
+ * the current cursor, but this is just an optimization to terminate a
+ * cntbt search that otherwise runs to the edge of the tree.
+ */
+ if (acur->diff == 0 && acur->len == args->maxlen)
+ deactivate = true;
+out:
+ if (deactivate)
+ cur->bc_private.a.priv.abt.active = false;
+ trace_xfs_alloc_cur_check(args->mp, cur->bc_btnum, bno, len, diff,
+ *new);
+ return 0;
+}
+
+/*
+ * Complete an allocation of a candidate extent. Remove the extent from both
+ * trees and update the args structure.
*/
+STATIC int
+xfs_alloc_cur_finish(
+ struct xfs_alloc_arg *args,
+ struct xfs_alloc_cur *acur)
+{
+ int error;
+
+ ASSERT(acur->cnt && acur->bnolt);
+ ASSERT(acur->bno >= acur->rec_bno);
+ ASSERT(acur->bno + acur->len <= acur->rec_bno + acur->rec_len);
+ ASSERT(acur->rec_bno + acur->rec_len <=
+ be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
+
+ error = xfs_alloc_fixup_trees(acur->cnt, acur->bnolt, acur->rec_bno,
+ acur->rec_len, acur->bno, acur->len, 0);
+ if (error)
+ return error;
+
+ args->agbno = acur->bno;
+ args->len = acur->len;
+ args->wasfromfl = 0;
+
+ trace_xfs_alloc_cur(args);
+ return 0;
+}
+
+/*
+ * Locality allocation lookup algorithm. This expects a cntbt cursor and uses
+ * bno optimized lookup to search for extents with ideal size and locality.
+ */
+STATIC int
+xfs_alloc_cntbt_iter(
+ struct xfs_alloc_arg *args,
+ struct xfs_alloc_cur *acur)
+{
+ struct xfs_btree_cur *cur = acur->cnt;
+ xfs_agblock_t bno;
+ xfs_extlen_t len, cur_len;
+ int error;
+ int i;
+
+ if (!xfs_alloc_cur_active(cur))
+ return 0;
+
+ /* locality optimized lookup */
+ cur_len = acur->cur_len;
+ error = xfs_alloc_lookup_ge(cur, args->agbno, cur_len, &i);
+ if (error)
+ return error;
+ if (i == 0)
+ return 0;
+ error = xfs_alloc_get_rec(cur, &bno, &len, &i);
+ if (error)
+ return error;
+
+ /* check the current record and update search length from it */
+ error = xfs_alloc_cur_check(args, acur, cur, &i);
+ if (error)
+ return error;
+ ASSERT(len >= acur->cur_len);
+ acur->cur_len = len;
+
+ /*
+ * We looked up the first record >= [agbno, len] above. The agbno is a
+ * secondary key and so the current record may lie just before or after
+ * agbno. If it is past agbno, check the previous record too so long as
+ * the length matches as it may be closer. Don't check a smaller record
+ * because that could deactivate our cursor.
+ */
+ if (bno > args->agbno) {
+ error = xfs_btree_decrement(cur, 0, &i);
+ if (!error && i) {
+ error = xfs_alloc_get_rec(cur, &bno, &len, &i);
+ if (!error && i && len == acur->cur_len)
+ error = xfs_alloc_cur_check(args, acur, cur,
+ &i);
+ }
+ if (error)
+ return error;
+ }
+
+ /*
+ * Increment the search key until we find at least one allocation
+ * candidate or if the extent we found was larger. Otherwise, double the
+ * search key to optimize the search. Efficiency is more important here
+ * than absolute best locality.
+ */
+ cur_len <<= 1;
+ if (!acur->len || acur->cur_len >= cur_len)
+ acur->cur_len++;
+ else
+ acur->cur_len = cur_len;
+
+ return error;
+}
/*
* Deal with the case where only small freespaces remain. Either return the
@@ -727,7 +1045,10 @@ xfs_alloc_ag_vextent_small(
error = xfs_alloc_get_rec(ccur, &fbno, &flen, &i);
if (error)
goto error;
- XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error);
+ if (XFS_IS_CORRUPT(args->mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto error;
+ }
goto out;
}
@@ -744,13 +1065,13 @@ xfs_alloc_ag_vextent_small(
goto out;
xfs_extent_busy_reuse(args->mp, args->agno, fbno, 1,
- xfs_alloc_allow_busy_reuse(args->datatype));
+ (args->datatype & XFS_ALLOC_NOBUSY));
- if (xfs_alloc_is_userdata(args->datatype)) {
+ if (args->datatype & XFS_ALLOC_USERDATA) {
struct xfs_buf *bp;
bp = xfs_btree_get_bufs(args->mp, args->tp, args->agno, fbno);
- if (!bp) {
+ if (XFS_IS_CORRUPT(args->mp, !bp)) {
error = -EFSCORRUPTED;
goto error;
}
@@ -758,9 +1079,12 @@ xfs_alloc_ag_vextent_small(
}
*fbnop = args->agbno = fbno;
*flenp = args->len = 1;
- XFS_WANT_CORRUPTED_GOTO(args->mp,
- fbno < be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length),
- error);
+ if (XFS_IS_CORRUPT(args->mp,
+ fbno >= be32_to_cpu(
+ XFS_BUF_TO_AGF(args->agbp)->agf_length))) {
+ error = -EFSCORRUPTED;
+ goto error;
+ }
args->wasfromfl = 1;
trace_xfs_alloc_small_freelist(args);
@@ -915,7 +1239,10 @@ xfs_alloc_ag_vextent_exact(
error = xfs_alloc_get_rec(bno_cur, &fbno, &flen, &i);
if (error)
goto error0;
- XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
+ if (XFS_IS_CORRUPT(args->mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto error0;
+ }
ASSERT(fbno <= args->agbno);
/*
@@ -984,98 +1311,243 @@ error0:
}
/*
- * Search the btree in a given direction via the search cursor and compare
- * the records found against the good extent we've already found.
+ * Search a given number of btree records in a given direction. Check each
+ * record against the good extent we've already found.
*/
STATIC int
-xfs_alloc_find_best_extent(
- struct xfs_alloc_arg *args, /* allocation argument structure */
- struct xfs_btree_cur **gcur, /* good cursor */
- struct xfs_btree_cur **scur, /* searching cursor */
- xfs_agblock_t gdiff, /* difference for search comparison */
- xfs_agblock_t *sbno, /* extent found by search */
- xfs_extlen_t *slen, /* extent length */
- xfs_agblock_t *sbnoa, /* aligned extent found by search */
- xfs_extlen_t *slena, /* aligned extent length */
- int dir) /* 0 = search right, 1 = search left */
+xfs_alloc_walk_iter(
+ struct xfs_alloc_arg *args,
+ struct xfs_alloc_cur *acur,
+ struct xfs_btree_cur *cur,
+ bool increment,
+ bool find_one, /* quit on first candidate */
+ int count, /* rec count (-1 for infinite) */
+ int *stat)
{
- xfs_agblock_t new;
- xfs_agblock_t sdiff;
int error;
int i;
- unsigned busy_gen;
- /* The good extent is perfect, no need to search. */
- if (!gdiff)
- goto out_use_good;
+ *stat = 0;
/*
- * Look until we find a better one, run out of space or run off the end.
+ * Search so long as the cursor is active or we find a better extent.
+ * The cursor is deactivated if it extends beyond the range of the
+ * current allocation candidate.
*/
- do {
- error = xfs_alloc_get_rec(*scur, sbno, slen, &i);
+ while (xfs_alloc_cur_active(cur) && count) {
+ error = xfs_alloc_cur_check(args, acur, cur, &i);
if (error)
- goto error0;
- XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
- xfs_alloc_compute_aligned(args, *sbno, *slen,
- sbnoa, slena, &busy_gen);
+ return error;
+ if (i == 1) {
+ *stat = 1;
+ if (find_one)
+ break;
+ }
+ if (!xfs_alloc_cur_active(cur))
+ break;
+
+ if (increment)
+ error = xfs_btree_increment(cur, 0, &i);
+ else
+ error = xfs_btree_decrement(cur, 0, &i);
+ if (error)
+ return error;
+ if (i == 0)
+ cur->bc_private.a.priv.abt.active = false;
+
+ if (count > 0)
+ count--;
+ }
+
+ return 0;
+}
+
+/*
+ * Search the by-bno and by-size btrees in parallel in search of an extent with
+ * ideal locality based on the NEAR mode ->agbno locality hint.
+ */
+STATIC int
+xfs_alloc_ag_vextent_locality(
+ struct xfs_alloc_arg *args,
+ struct xfs_alloc_cur *acur,
+ int *stat)
+{
+ struct xfs_btree_cur *fbcur = NULL;
+ int error;
+ int i;
+ bool fbinc;
+
+ ASSERT(acur->len == 0);
+ ASSERT(args->type == XFS_ALLOCTYPE_NEAR_BNO);
+
+ *stat = 0;
+
+ error = xfs_alloc_lookup_ge(acur->cnt, args->agbno, acur->cur_len, &i);
+ if (error)
+ return error;
+ error = xfs_alloc_lookup_le(acur->bnolt, args->agbno, 0, &i);
+ if (error)
+ return error;
+ error = xfs_alloc_lookup_ge(acur->bnogt, args->agbno, 0, &i);
+ if (error)
+ return error;
+
+ /*
+ * Search the bnobt and cntbt in parallel. Search the bnobt left and
+ * right and lookup the closest extent to the locality hint for each
+ * extent size key in the cntbt. The entire search terminates
+ * immediately on a bnobt hit because that means we've found best case
+ * locality. Otherwise the search continues until the cntbt cursor runs
+ * off the end of the tree. If no allocation candidate is found at this
+ * point, give up on locality, walk backwards from the end of the cntbt
+ * and take the first available extent.
+ *
+ * The parallel tree searches balance each other out to provide fairly
+ * consistent performance for various situations. The bnobt search can
+ * have pathological behavior in the worst case scenario of larger
+ * allocation requests and fragmented free space. On the other hand, the
+ * bnobt is able to satisfy most smaller allocation requests much more
+ * quickly than the cntbt. The cntbt search can sift through fragmented
+ * free space and sets of free extents for larger allocation requests
+ * more quickly than the bnobt. Since the locality hint is just a hint
+ * and we don't want to scan the entire bnobt for perfect locality, the
+ * cntbt search essentially bounds the bnobt search such that we can
+ * find good enough locality at reasonable performance in most cases.
+ */
+ while (xfs_alloc_cur_active(acur->bnolt) ||
+ xfs_alloc_cur_active(acur->bnogt) ||
+ xfs_alloc_cur_active(acur->cnt)) {
+
+ trace_xfs_alloc_cur_lookup(args);
/*
- * The good extent is closer than this one.
+ * Search the bnobt left and right. In the case of a hit, finish
+ * the search in the opposite direction and we're done.
*/
- if (!dir) {
- if (*sbnoa > args->max_agbno)
- goto out_use_good;
- if (*sbnoa >= args->agbno + gdiff)
- goto out_use_good;
- } else {
- if (*sbnoa < args->min_agbno)
- goto out_use_good;
- if (*sbnoa <= args->agbno - gdiff)
- goto out_use_good;
+ error = xfs_alloc_walk_iter(args, acur, acur->bnolt, false,
+ true, 1, &i);
+ if (error)
+ return error;
+ if (i == 1) {
+ trace_xfs_alloc_cur_left(args);
+ fbcur = acur->bnogt;
+ fbinc = true;
+ break;
+ }
+ error = xfs_alloc_walk_iter(args, acur, acur->bnogt, true, true,
+ 1, &i);
+ if (error)
+ return error;
+ if (i == 1) {
+ trace_xfs_alloc_cur_right(args);
+ fbcur = acur->bnolt;
+ fbinc = false;
+ break;
}
/*
- * Same distance, compare length and pick the best.
+ * Check the extent with best locality based on the current
+ * extent size search key and keep track of the best candidate.
*/
- if (*slena >= args->minlen) {
- args->len = XFS_EXTLEN_MIN(*slena, args->maxlen);
- xfs_alloc_fix_len(args);
-
- sdiff = xfs_alloc_compute_diff(args->agbno, args->len,
- args->alignment,
- args->datatype, *sbnoa,
- *slena, &new);
+ error = xfs_alloc_cntbt_iter(args, acur);
+ if (error)
+ return error;
+ if (!xfs_alloc_cur_active(acur->cnt)) {
+ trace_xfs_alloc_cur_lookup_done(args);
+ break;
+ }
+ }
- /*
- * Choose closer size and invalidate other cursor.
- */
- if (sdiff < gdiff)
- goto out_use_search;
- goto out_use_good;
+ /*
+ * If we failed to find anything due to busy extents, return empty
+ * handed so the caller can flush and retry. If no busy extents were
+ * found, walk backwards from the end of the cntbt as a last resort.
+ */
+ if (!xfs_alloc_cur_active(acur->cnt) && !acur->len && !acur->busy) {
+ error = xfs_btree_decrement(acur->cnt, 0, &i);
+ if (error)
+ return error;
+ if (i) {
+ acur->cnt->bc_private.a.priv.abt.active = true;
+ fbcur = acur->cnt;
+ fbinc = false;
}
+ }
- if (!dir)
- error = xfs_btree_increment(*scur, 0, &i);
- else
- error = xfs_btree_decrement(*scur, 0, &i);
+ /*
+ * Search in the opposite direction for a better entry in the case of
+ * a bnobt hit or walk backwards from the end of the cntbt.
+ */
+ if (fbcur) {
+ error = xfs_alloc_walk_iter(args, acur, fbcur, fbinc, true, -1,
+ &i);
if (error)
- goto error0;
- } while (i);
+ return error;
+ }
-out_use_good:
- xfs_btree_del_cursor(*scur, XFS_BTREE_NOERROR);
- *scur = NULL;
- return 0;
+ if (acur->len)
+ *stat = 1;
-out_use_search:
- xfs_btree_del_cursor(*gcur, XFS_BTREE_NOERROR);
- *gcur = NULL;
return 0;
+}
-error0:
- /* caller invalidates cursors */
- return error;
+/* Check the last block of the cnt btree for allocations. */
+static int
+xfs_alloc_ag_vextent_lastblock(
+ struct xfs_alloc_arg *args,
+ struct xfs_alloc_cur *acur,
+ xfs_agblock_t *bno,
+ xfs_extlen_t *len,
+ bool *allocated)
+{
+ int error;
+ int i;
+
+#ifdef DEBUG
+ /* Randomly don't execute the first algorithm. */
+ if (prandom_u32() & 1)
+ return 0;
+#endif
+
+ /*
+ * Start from the entry that lookup found, sequence through all larger
+ * free blocks. If we're actually pointing at a record smaller than
+ * maxlen, go to the start of this block, and skip all those smaller
+ * than minlen.
+ */
+ if (len || args->alignment > 1) {
+ acur->cnt->bc_ptrs[0] = 1;
+ do {
+ error = xfs_alloc_get_rec(acur->cnt, bno, len, &i);
+ if (error)
+ return error;
+ if (XFS_IS_CORRUPT(args->mp, i != 1))
+ return -EFSCORRUPTED;
+ if (*len >= args->minlen)
+ break;
+ error = xfs_btree_increment(acur->cnt, 0, &i);
+ if (error)
+ return error;
+ } while (i);
+ ASSERT(*len >= args->minlen);
+ if (!i)
+ return 0;
+ }
+
+ error = xfs_alloc_walk_iter(args, acur, acur->cnt, true, false, -1, &i);
+ if (error)
+ return error;
+
+ /*
+ * It didn't work. We COULD be in a case where there's a good record
+ * somewhere, so try again.
+ */
+ if (acur->len == 0)
+ return 0;
+
+ trace_xfs_alloc_near_first(args);
+ *allocated = true;
+ return 0;
}
/*
@@ -1084,41 +1556,17 @@ error0:
* and of the form k * prod + mod unless there's nothing that large.
* Return the starting a.g. block, or NULLAGBLOCK if we can't do it.
*/
-STATIC int /* error */
+STATIC int
xfs_alloc_ag_vextent_near(
- xfs_alloc_arg_t *args) /* allocation argument structure */
+ struct xfs_alloc_arg *args)
{
- xfs_btree_cur_t *bno_cur_gt; /* cursor for bno btree, right side */
- xfs_btree_cur_t *bno_cur_lt; /* cursor for bno btree, left side */
- xfs_btree_cur_t *cnt_cur; /* cursor for count btree */
- xfs_agblock_t gtbno; /* start bno of right side entry */
- xfs_agblock_t gtbnoa; /* aligned ... */
- xfs_extlen_t gtdiff; /* difference to right side entry */
- xfs_extlen_t gtlen; /* length of right side entry */
- xfs_extlen_t gtlena; /* aligned ... */
- xfs_agblock_t gtnew; /* useful start bno of right side */
- int error; /* error code */
- int i; /* result code, temporary */
- int j; /* result code, temporary */
- xfs_agblock_t ltbno; /* start bno of left side entry */
- xfs_agblock_t ltbnoa; /* aligned ... */
- xfs_extlen_t ltdiff; /* difference to left side entry */
- xfs_extlen_t ltlen; /* length of left side entry */
- xfs_extlen_t ltlena; /* aligned ... */
- xfs_agblock_t ltnew; /* useful start bno of left side */
- xfs_extlen_t rlen; /* length of returned extent */
- bool busy;
- unsigned busy_gen;
-#ifdef DEBUG
- /*
- * Randomly don't execute the first algorithm.
- */
- int dofirst; /* set to do first algorithm */
-
- dofirst = prandom_u32() & 1;
-#endif
+ struct xfs_alloc_cur acur = {};
+ int error; /* error code */
+ int i; /* result code, temporary */
+ xfs_agblock_t bno;
+ xfs_extlen_t len;
- /* handle unitialized agbno range so caller doesn't have to */
+ /* handle uninitialized agbno range so caller doesn't have to */
if (!args->min_agbno && !args->max_agbno)
args->max_agbno = args->mp->m_sb.sb_agblocks - 1;
ASSERT(args->min_agbno <= args->max_agbno);
@@ -1130,40 +1578,27 @@ xfs_alloc_ag_vextent_near(
args->agbno = args->max_agbno;
restart:
- bno_cur_lt = NULL;
- bno_cur_gt = NULL;
- ltlen = 0;
- gtlena = 0;
- ltlena = 0;
- busy = false;
-
- /*
- * Get a cursor for the by-size btree.
- */
- cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
- args->agno, XFS_BTNUM_CNT);
+ len = 0;
/*
- * See if there are any free extents as big as maxlen.
+ * Set up cursors and see if there are any free extents as big as
+ * maxlen. If not, pick the last entry in the tree unless the tree is
+ * empty.
*/
- if ((error = xfs_alloc_lookup_ge(cnt_cur, 0, args->maxlen, &i)))
- goto error0;
- /*
- * If none, then pick up the last entry in the tree unless the
- * tree is empty.
- */
- if (!i) {
- if ((error = xfs_alloc_ag_vextent_small(args, cnt_cur, &ltbno,
- &ltlen, &i)))
- goto error0;
- if (i == 0 || ltlen == 0) {
- xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
+ error = xfs_alloc_cur_setup(args, &acur);
+ if (error == -ENOSPC) {
+ error = xfs_alloc_ag_vextent_small(args, acur.cnt, &bno,
+ &len, &i);
+ if (error)
+ goto out;
+ if (i == 0 || len == 0) {
trace_xfs_alloc_near_noentry(args);
- return 0;
+ goto out;
}
ASSERT(i == 1);
+ } else if (error) {
+ goto out;
}
- args->wasfromfl = 0;
/*
* First algorithm.
@@ -1172,311 +1607,47 @@ restart:
* near the right edge of the tree. If it's in the last btree leaf
* block, then we just examine all the entries in that block
* that are big enough, and pick the best one.
- * This is written as a while loop so we can break out of it,
- * but we never loop back to the top.
*/
- while (xfs_btree_islastblock(cnt_cur, 0)) {
- xfs_extlen_t bdiff;
- int besti=0;
- xfs_extlen_t blen=0;
- xfs_agblock_t bnew=0;
-
-#ifdef DEBUG
- if (dofirst)
- break;
-#endif
- /*
- * Start from the entry that lookup found, sequence through
- * all larger free blocks. If we're actually pointing at a
- * record smaller than maxlen, go to the start of this block,
- * and skip all those smaller than minlen.
- */
- if (ltlen || args->alignment > 1) {
- cnt_cur->bc_ptrs[0] = 1;
- do {
- if ((error = xfs_alloc_get_rec(cnt_cur, &ltbno,
- &ltlen, &i)))
- goto error0;
- XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
- if (ltlen >= args->minlen)
- break;
- if ((error = xfs_btree_increment(cnt_cur, 0, &i)))
- goto error0;
- } while (i);
- ASSERT(ltlen >= args->minlen);
- if (!i)
- break;
- }
- i = cnt_cur->bc_ptrs[0];
- for (j = 1, blen = 0, bdiff = 0;
- !error && j && (blen < args->maxlen || bdiff > 0);
- error = xfs_btree_increment(cnt_cur, 0, &j)) {
- /*
- * For each entry, decide if it's better than
- * the previous best entry.
- */
- if ((error = xfs_alloc_get_rec(cnt_cur, &ltbno, &ltlen, &i)))
- goto error0;
- XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
- busy = xfs_alloc_compute_aligned(args, ltbno, ltlen,
- &ltbnoa, &ltlena, &busy_gen);
- if (ltlena < args->minlen)
- continue;
- if (ltbnoa < args->min_agbno || ltbnoa > args->max_agbno)
- continue;
- args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen);
- xfs_alloc_fix_len(args);
- ASSERT(args->len >= args->minlen);
- if (args->len < blen)
- continue;
- ltdiff = xfs_alloc_compute_diff(args->agbno, args->len,
- args->alignment, args->datatype, ltbnoa,
- ltlena, &ltnew);
- if (ltnew != NULLAGBLOCK &&
- (args->len > blen || ltdiff < bdiff)) {
- bdiff = ltdiff;
- bnew = ltnew;
- blen = args->len;
- besti = cnt_cur->bc_ptrs[0];
- }
- }
- /*
- * It didn't work. We COULD be in a case where
- * there's a good record somewhere, so try again.
- */
- if (blen == 0)
- break;
- /*
- * Point at the best entry, and retrieve it again.
- */
- cnt_cur->bc_ptrs[0] = besti;
- if ((error = xfs_alloc_get_rec(cnt_cur, &ltbno, &ltlen, &i)))
- goto error0;
- XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
- ASSERT(ltbno + ltlen <= be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
- args->len = blen;
-
- /*
- * We are allocating starting at bnew for blen blocks.
- */
- args->agbno = bnew;
- ASSERT(bnew >= ltbno);
- ASSERT(bnew + blen <= ltbno + ltlen);
- /*
- * Set up a cursor for the by-bno tree.
- */
- bno_cur_lt = xfs_allocbt_init_cursor(args->mp, args->tp,
- args->agbp, args->agno, XFS_BTNUM_BNO);
- /*
- * Fix up the btree entries.
- */
- if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur_lt, ltbno,
- ltlen, bnew, blen, XFSA_FIXUP_CNT_OK)))
- goto error0;
- xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
- xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_NOERROR);
+ if (xfs_btree_islastblock(acur.cnt, 0)) {
+ bool allocated = false;
- trace_xfs_alloc_near_first(args);
- return 0;
- }
- /*
- * Second algorithm.
- * Search in the by-bno tree to the left and to the right
- * simultaneously, until in each case we find a space big enough,
- * or run into the edge of the tree. When we run into the edge,
- * we deallocate that cursor.
- * If both searches succeed, we compare the two spaces and pick
- * the better one.
- * With alignment, it's possible for both to fail; the upper
- * level algorithm that picks allocation groups for allocations
- * is not supposed to do this.
- */
- /*
- * Allocate and initialize the cursor for the leftward search.
- */
- bno_cur_lt = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
- args->agno, XFS_BTNUM_BNO);
- /*
- * Lookup <= bno to find the leftward search's starting point.
- */
- if ((error = xfs_alloc_lookup_le(bno_cur_lt, args->agbno, args->maxlen, &i)))
- goto error0;
- if (!i) {
- /*
- * Didn't find anything; use this cursor for the rightward
- * search.
- */
- bno_cur_gt = bno_cur_lt;
- bno_cur_lt = NULL;
- }
- /*
- * Found something. Duplicate the cursor for the rightward search.
- */
- else if ((error = xfs_btree_dup_cursor(bno_cur_lt, &bno_cur_gt)))
- goto error0;
- /*
- * Increment the cursor, so we will point at the entry just right
- * of the leftward entry if any, or to the leftmost entry.
- */
- if ((error = xfs_btree_increment(bno_cur_gt, 0, &i)))
- goto error0;
- if (!i) {
- /*
- * It failed, there are no rightward entries.
- */
- xfs_btree_del_cursor(bno_cur_gt, XFS_BTREE_NOERROR);
- bno_cur_gt = NULL;
+ error = xfs_alloc_ag_vextent_lastblock(args, &acur, &bno, &len,
+ &allocated);
+ if (error)
+ goto out;
+ if (allocated)
+ goto alloc_finish;
}
- /*
- * Loop going left with the leftward cursor, right with the
- * rightward cursor, until either both directions give up or
- * we find an entry at least as big as minlen.
- */
- do {
- if (bno_cur_lt) {
- if ((error = xfs_alloc_get_rec(bno_cur_lt, &ltbno, &ltlen, &i)))
- goto error0;
- XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
- busy |= xfs_alloc_compute_aligned(args, ltbno, ltlen,
- &ltbnoa, &ltlena, &busy_gen);
- if (ltlena >= args->minlen && ltbnoa >= args->min_agbno)
- break;
- if ((error = xfs_btree_decrement(bno_cur_lt, 0, &i)))
- goto error0;
- if (!i || ltbnoa < args->min_agbno) {
- xfs_btree_del_cursor(bno_cur_lt,
- XFS_BTREE_NOERROR);
- bno_cur_lt = NULL;
- }
- }
- if (bno_cur_gt) {
- if ((error = xfs_alloc_get_rec(bno_cur_gt, &gtbno, &gtlen, &i)))
- goto error0;
- XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
- busy |= xfs_alloc_compute_aligned(args, gtbno, gtlen,
- &gtbnoa, &gtlena, &busy_gen);
- if (gtlena >= args->minlen && gtbnoa <= args->max_agbno)
- break;
- if ((error = xfs_btree_increment(bno_cur_gt, 0, &i)))
- goto error0;
- if (!i || gtbnoa > args->max_agbno) {
- xfs_btree_del_cursor(bno_cur_gt,
- XFS_BTREE_NOERROR);
- bno_cur_gt = NULL;
- }
- }
- } while (bno_cur_lt || bno_cur_gt);
/*
- * Got both cursors still active, need to find better entry.
+ * Second algorithm. Combined cntbt and bnobt search to find ideal
+ * locality.
*/
- if (bno_cur_lt && bno_cur_gt) {
- if (ltlena >= args->minlen) {
- /*
- * Left side is good, look for a right side entry.
- */
- args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen);
- xfs_alloc_fix_len(args);
- ltdiff = xfs_alloc_compute_diff(args->agbno, args->len,
- args->alignment, args->datatype, ltbnoa,
- ltlena, &ltnew);
-
- error = xfs_alloc_find_best_extent(args,
- &bno_cur_lt, &bno_cur_gt,
- ltdiff, &gtbno, &gtlen,
- &gtbnoa, &gtlena,
- 0 /* search right */);
- } else {
- ASSERT(gtlena >= args->minlen);
-
- /*
- * Right side is good, look for a left side entry.
- */
- args->len = XFS_EXTLEN_MIN(gtlena, args->maxlen);
- xfs_alloc_fix_len(args);
- gtdiff = xfs_alloc_compute_diff(args->agbno, args->len,
- args->alignment, args->datatype, gtbnoa,
- gtlena, &gtnew);
-
- error = xfs_alloc_find_best_extent(args,
- &bno_cur_gt, &bno_cur_lt,
- gtdiff, &ltbno, &ltlen,
- &ltbnoa, &ltlena,
- 1 /* search left */);
- }
-
- if (error)
- goto error0;
- }
+ error = xfs_alloc_ag_vextent_locality(args, &acur, &i);
+ if (error)
+ goto out;
/*
* If we couldn't get anything, give up.
*/
- if (bno_cur_lt == NULL && bno_cur_gt == NULL) {
- xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
-
- if (busy) {
+ if (!acur.len) {
+ if (acur.busy) {
trace_xfs_alloc_near_busy(args);
- xfs_extent_busy_flush(args->mp, args->pag, busy_gen);
+ xfs_extent_busy_flush(args->mp, args->pag,
+ acur.busy_gen);
goto restart;
}
trace_xfs_alloc_size_neither(args);
args->agbno = NULLAGBLOCK;
- return 0;
+ goto out;
}
- /*
- * At this point we have selected a freespace entry, either to the
- * left or to the right. If it's on the right, copy all the
- * useful variables to the "left" set so we only have one
- * copy of this code.
- */
- if (bno_cur_gt) {
- bno_cur_lt = bno_cur_gt;
- bno_cur_gt = NULL;
- ltbno = gtbno;
- ltbnoa = gtbnoa;
- ltlen = gtlen;
- ltlena = gtlena;
- j = 1;
- } else
- j = 0;
-
- /*
- * Fix up the length and compute the useful address.
- */
- args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen);
- xfs_alloc_fix_len(args);
- rlen = args->len;
- (void)xfs_alloc_compute_diff(args->agbno, rlen, args->alignment,
- args->datatype, ltbnoa, ltlena, &ltnew);
- ASSERT(ltnew >= ltbno);
- ASSERT(ltnew + rlen <= ltbnoa + ltlena);
- ASSERT(ltnew + rlen <= be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
- ASSERT(ltnew >= args->min_agbno && ltnew <= args->max_agbno);
- args->agbno = ltnew;
-
- if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur_lt, ltbno, ltlen,
- ltnew, rlen, XFSA_FIXUP_BNO_OK)))
- goto error0;
-
- if (j)
- trace_xfs_alloc_near_greater(args);
- else
- trace_xfs_alloc_near_lesser(args);
-
- xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
- xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_NOERROR);
- return 0;
+alloc_finish:
+ /* fix up btrees on a successful allocation */
+ error = xfs_alloc_cur_finish(args, &acur);
- error0:
- trace_xfs_alloc_near_error(args);
- if (cnt_cur != NULL)
- xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
- if (bno_cur_lt != NULL)
- xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_ERROR);
- if (bno_cur_gt != NULL)
- xfs_btree_del_cursor(bno_cur_gt, XFS_BTREE_ERROR);
+out:
+ xfs_alloc_cur_close(&acur, error);
return error;
}
@@ -1545,7 +1716,10 @@ restart:
error = xfs_alloc_get_rec(cnt_cur, &fbno, &flen, &i);
if (error)
goto error0;
- XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
+ if (XFS_IS_CORRUPT(args->mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto error0;
+ }
busy = xfs_alloc_compute_aligned(args, fbno, flen,
&rbno, &rlen, &busy_gen);
@@ -1579,8 +1753,13 @@ restart:
* This can't happen in the second case above.
*/
rlen = XFS_EXTLEN_MIN(args->maxlen, rlen);
- XFS_WANT_CORRUPTED_GOTO(args->mp, rlen == 0 ||
- (rlen <= flen && rbno + rlen <= fbno + flen), error0);
+ if (XFS_IS_CORRUPT(args->mp,
+ rlen != 0 &&
+ (rlen > flen ||
+ rbno + rlen > fbno + flen))) {
+ error = -EFSCORRUPTED;
+ goto error0;
+ }
if (rlen < args->maxlen) {
xfs_agblock_t bestfbno;
xfs_extlen_t bestflen;
@@ -1599,15 +1778,22 @@ restart:
if ((error = xfs_alloc_get_rec(cnt_cur, &fbno, &flen,
&i)))
goto error0;
- XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
+ if (XFS_IS_CORRUPT(args->mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto error0;
+ }
if (flen < bestrlen)
break;
busy = xfs_alloc_compute_aligned(args, fbno, flen,
&rbno, &rlen, &busy_gen);
rlen = XFS_EXTLEN_MIN(args->maxlen, rlen);
- XFS_WANT_CORRUPTED_GOTO(args->mp, rlen == 0 ||
- (rlen <= flen && rbno + rlen <= fbno + flen),
- error0);
+ if (XFS_IS_CORRUPT(args->mp,
+ rlen != 0 &&
+ (rlen > flen ||
+ rbno + rlen > fbno + flen))) {
+ error = -EFSCORRUPTED;
+ goto error0;
+ }
if (rlen > bestrlen) {
bestrlen = rlen;
bestrbno = rbno;
@@ -1620,7 +1806,10 @@ restart:
if ((error = xfs_alloc_lookup_eq(cnt_cur, bestfbno, bestflen,
&i)))
goto error0;
- XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
+ if (XFS_IS_CORRUPT(args->mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto error0;
+ }
rlen = bestrlen;
rbno = bestrbno;
flen = bestflen;
@@ -1643,7 +1832,10 @@ restart:
xfs_alloc_fix_len(args);
rlen = args->len;
- XFS_WANT_CORRUPTED_GOTO(args->mp, rlen <= flen, error0);
+ if (XFS_IS_CORRUPT(args->mp, rlen > flen)) {
+ error = -EFSCORRUPTED;
+ goto error0;
+ }
/*
* Allocate and initialize a cursor for the by-block tree.
*/
@@ -1657,10 +1849,13 @@ restart:
cnt_cur = bno_cur = NULL;
args->len = rlen;
args->agbno = rbno;
- XFS_WANT_CORRUPTED_GOTO(args->mp,
- args->agbno + args->len <=
- be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length),
- error0);
+ if (XFS_IS_CORRUPT(args->mp,
+ args->agbno + args->len >
+ be32_to_cpu(
+ XFS_BUF_TO_AGF(args->agbp)->agf_length))) {
+ error = -EFSCORRUPTED;
+ goto error0;
+ }
trace_xfs_alloc_size_done(args);
return 0;
@@ -1732,7 +1927,10 @@ xfs_free_ag_extent(
*/
if ((error = xfs_alloc_get_rec(bno_cur, &ltbno, &ltlen, &i)))
goto error0;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto error0;
+ }
/*
* It's not contiguous, though.
*/
@@ -1744,8 +1942,10 @@ xfs_free_ag_extent(
* space was invalid, it's (partly) already free.
* Very bad.
*/
- XFS_WANT_CORRUPTED_GOTO(mp,
- ltbno + ltlen <= bno, error0);
+ if (XFS_IS_CORRUPT(mp, ltbno + ltlen > bno)) {
+ error = -EFSCORRUPTED;
+ goto error0;
+ }
}
}
/*
@@ -1760,7 +1960,10 @@ xfs_free_ag_extent(
*/
if ((error = xfs_alloc_get_rec(bno_cur, &gtbno, &gtlen, &i)))
goto error0;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto error0;
+ }
/*
* It's not contiguous, though.
*/
@@ -1772,7 +1975,10 @@ xfs_free_ag_extent(
* space was invalid, it's (partly) already free.
* Very bad.
*/
- XFS_WANT_CORRUPTED_GOTO(mp, gtbno >= bno + len, error0);
+ if (XFS_IS_CORRUPT(mp, bno + len > gtbno)) {
+ error = -EFSCORRUPTED;
+ goto error0;
+ }
}
}
/*
@@ -1789,31 +1995,49 @@ xfs_free_ag_extent(
*/
if ((error = xfs_alloc_lookup_eq(cnt_cur, ltbno, ltlen, &i)))
goto error0;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto error0;
+ }
if ((error = xfs_btree_delete(cnt_cur, &i)))
goto error0;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto error0;
+ }
/*
* Delete the old by-size entry on the right.
*/
if ((error = xfs_alloc_lookup_eq(cnt_cur, gtbno, gtlen, &i)))
goto error0;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto error0;
+ }
if ((error = xfs_btree_delete(cnt_cur, &i)))
goto error0;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto error0;
+ }
/*
* Delete the old by-block entry for the right block.
*/
if ((error = xfs_btree_delete(bno_cur, &i)))
goto error0;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto error0;
+ }
/*
* Move the by-block cursor back to the left neighbor.
*/
if ((error = xfs_btree_decrement(bno_cur, 0, &i)))
goto error0;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto error0;
+ }
#ifdef DEBUG
/*
* Check that this is the right record: delete didn't
@@ -1826,9 +2050,13 @@ xfs_free_ag_extent(
if ((error = xfs_alloc_get_rec(bno_cur, &xxbno, &xxlen,
&i)))
goto error0;
- XFS_WANT_CORRUPTED_GOTO(mp,
- i == 1 && xxbno == ltbno && xxlen == ltlen,
- error0);
+ if (XFS_IS_CORRUPT(mp,
+ i != 1 ||
+ xxbno != ltbno ||
+ xxlen != ltlen)) {
+ error = -EFSCORRUPTED;
+ goto error0;
+ }
}
#endif
/*
@@ -1849,17 +2077,26 @@ xfs_free_ag_extent(
*/
if ((error = xfs_alloc_lookup_eq(cnt_cur, ltbno, ltlen, &i)))
goto error0;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto error0;
+ }
if ((error = xfs_btree_delete(cnt_cur, &i)))
goto error0;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto error0;
+ }
/*
* Back up the by-block cursor to the left neighbor, and
* update its length.
*/
if ((error = xfs_btree_decrement(bno_cur, 0, &i)))
goto error0;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto error0;
+ }
nbno = ltbno;
nlen = len + ltlen;
if ((error = xfs_alloc_update(bno_cur, nbno, nlen)))
@@ -1875,10 +2112,16 @@ xfs_free_ag_extent(
*/
if ((error = xfs_alloc_lookup_eq(cnt_cur, gtbno, gtlen, &i)))
goto error0;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto error0;
+ }
if ((error = xfs_btree_delete(cnt_cur, &i)))
goto error0;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto error0;
+ }
/*
* Update the starting block and length of the right
* neighbor in the by-block tree.
@@ -1897,7 +2140,10 @@ xfs_free_ag_extent(
nlen = len;
if ((error = xfs_btree_insert(bno_cur, &i)))
goto error0;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto error0;
+ }
}
xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
bno_cur = NULL;
@@ -1906,10 +2152,16 @@ xfs_free_ag_extent(
*/
if ((error = xfs_alloc_lookup_eq(cnt_cur, nbno, nlen, &i)))
goto error0;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 0, error0);
+ if (XFS_IS_CORRUPT(mp, i != 0)) {
+ error = -EFSCORRUPTED;
+ goto error0;
+ }
if ((error = xfs_btree_insert(cnt_cur, &i)))
goto error0;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto error0;
+ }
xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
cnt_cur = NULL;
@@ -1989,7 +2241,8 @@ xfs_alloc_longest_free_extent(
* reservations and AGFL rules in place, we can return this extent.
*/
if (pag->pagf_longest > delta)
- return pag->pagf_longest - delta;
+ return min_t(xfs_extlen_t, pag->pag_mount->m_ag_max_usable,
+ pag->pagf_longest - delta);
/* Otherwise, let the caller try for 1 block if there's space. */
return pag->pagf_flcount > 0 || pag->pagf_longest > 0;
@@ -2087,7 +2340,7 @@ xfs_free_agfl_block(
return error;
bp = xfs_btree_get_bufs(tp->t_mountp, tp, agno, agbno);
- if (!bp)
+ if (XFS_IS_CORRUPT(tp->t_mountp, !bp))
return -EFSCORRUPTED;
xfs_trans_binval(tp, bp);
@@ -2253,7 +2506,7 @@ xfs_alloc_fix_freelist(
* somewhere else if we are not being asked to try harder at this
* point
*/
- if (pag->pagf_metadata && xfs_alloc_is_userdata(args->datatype) &&
+ if (pag->pagf_metadata && (args->datatype & XFS_ALLOC_USERDATA) &&
(flags & XFS_ALLOC_FLAG_TRYLOCK)) {
ASSERT(!(flags & XFS_ALLOC_FLAG_FREEING));
goto out_agbp_relse;
@@ -2956,13 +3209,6 @@ xfs_alloc_vextent(
args->len);
#endif
- /* Zero the extent if we were asked to do so */
- if (args->datatype & XFS_ALLOC_USERDATA_ZERO) {
- error = xfs_zero_extent(args->ip, args->fsbno, args->len);
- if (error)
- goto error0;
- }
-
}
xfs_perag_put(args->pag);
return 0;
@@ -3038,12 +3284,18 @@ __xfs_free_extent(
if (error)
return error;
- XFS_WANT_CORRUPTED_GOTO(mp, agbno < mp->m_sb.sb_agblocks, err);
+ if (XFS_IS_CORRUPT(mp, agbno >= mp->m_sb.sb_agblocks)) {
+ error = -EFSCORRUPTED;
+ goto err;
+ }
/* validate the extent size is legal now we have the agf locked */
- XFS_WANT_CORRUPTED_GOTO(mp,
- agbno + len <= be32_to_cpu(XFS_BUF_TO_AGF(agbp)->agf_length),
- err);
+ if (XFS_IS_CORRUPT(mp,
+ agbno + len >
+ be32_to_cpu(XFS_BUF_TO_AGF(agbp)->agf_length))) {
+ error = -EFSCORRUPTED;
+ goto err;
+ }
error = xfs_free_ag_extent(tp, agbp, agno, agbno, len, oinfo, type);
if (error)
diff --git a/fs/xfs/libxfs/xfs_alloc.h b/fs/xfs/libxfs/xfs_alloc.h
index d6ed5d2c07c2..7380fbe4a3ff 100644
--- a/fs/xfs/libxfs/xfs_alloc.h
+++ b/fs/xfs/libxfs/xfs_alloc.h
@@ -54,7 +54,6 @@ typedef struct xfs_alloc_arg {
struct xfs_mount *mp; /* file system mount point */
struct xfs_buf *agbp; /* buffer for a.g. freelist header */
struct xfs_perag *pag; /* per-ag struct for this agno */
- struct xfs_inode *ip; /* for userdata zeroing method */
xfs_fsblock_t fsbno; /* file system block number */
xfs_agnumber_t agno; /* allocation group number */
xfs_agblock_t agbno; /* allocation group-relative block # */
@@ -83,20 +82,7 @@ typedef struct xfs_alloc_arg {
*/
#define XFS_ALLOC_USERDATA (1 << 0)/* allocation is for user data*/
#define XFS_ALLOC_INITIAL_USER_DATA (1 << 1)/* special case start of file */
-#define XFS_ALLOC_USERDATA_ZERO (1 << 2)/* zero extent on allocation */
-#define XFS_ALLOC_NOBUSY (1 << 3)/* Busy extents not allowed */
-
-static inline bool
-xfs_alloc_is_userdata(int datatype)
-{
- return (datatype & ~XFS_ALLOC_NOBUSY) != 0;
-}
-
-static inline bool
-xfs_alloc_allow_busy_reuse(int datatype)
-{
- return (datatype & XFS_ALLOC_NOBUSY) == 0;
-}
+#define XFS_ALLOC_NOBUSY (1 << 2)/* Busy extents not allowed */
/* freespace limit calculations */
#define XFS_ALLOC_AGFL_RESERVE 4
diff --git a/fs/xfs/libxfs/xfs_alloc_btree.c b/fs/xfs/libxfs/xfs_alloc_btree.c
index 2a94543857a1..279694d73e4e 100644
--- a/fs/xfs/libxfs/xfs_alloc_btree.c
+++ b/fs/xfs/libxfs/xfs_alloc_btree.c
@@ -507,6 +507,7 @@ xfs_allocbt_init_cursor(
cur->bc_private.a.agbp = agbp;
cur->bc_private.a.agno = agno;
+ cur->bc_private.a.priv.abt.active = false;
if (xfs_sb_version_hascrc(&mp->m_sb))
cur->bc_flags |= XFS_BTREE_CRC_BLOCKS;
diff --git a/fs/xfs/libxfs/xfs_attr.c b/fs/xfs/libxfs/xfs_attr.c
index 510ca6974604..0d7fcc983b3d 100644
--- a/fs/xfs/libxfs/xfs_attr.c
+++ b/fs/xfs/libxfs/xfs_attr.c
@@ -589,7 +589,7 @@ xfs_attr_leaf_addname(
*/
dp = args->dp;
args->blkno = 0;
- error = xfs_attr3_leaf_read(args->trans, args->dp, args->blkno, -1, &bp);
+ error = xfs_attr3_leaf_read(args->trans, args->dp, args->blkno, &bp);
if (error)
return error;
@@ -715,7 +715,7 @@ xfs_attr_leaf_addname(
* remove the "old" attr from that block (neat, huh!)
*/
error = xfs_attr3_leaf_read(args->trans, args->dp, args->blkno,
- -1, &bp);
+ &bp);
if (error)
return error;
@@ -769,7 +769,7 @@ xfs_attr_leaf_removename(
*/
dp = args->dp;
args->blkno = 0;
- error = xfs_attr3_leaf_read(args->trans, args->dp, args->blkno, -1, &bp);
+ error = xfs_attr3_leaf_read(args->trans, args->dp, args->blkno, &bp);
if (error)
return error;
@@ -813,7 +813,7 @@ xfs_attr_leaf_get(xfs_da_args_t *args)
trace_xfs_attr_leaf_get(args);
args->blkno = 0;
- error = xfs_attr3_leaf_read(args->trans, args->dp, args->blkno, -1, &bp);
+ error = xfs_attr3_leaf_read(args->trans, args->dp, args->blkno, &bp);
if (error)
return error;
@@ -1173,7 +1173,7 @@ xfs_attr_node_removename(
ASSERT(state->path.blk[0].bp);
state->path.blk[0].bp = NULL;
- error = xfs_attr3_leaf_read(args->trans, args->dp, 0, -1, &bp);
+ error = xfs_attr3_leaf_read(args->trans, args->dp, 0, &bp);
if (error)
goto out;
@@ -1266,10 +1266,9 @@ xfs_attr_refillstate(xfs_da_state_t *state)
ASSERT((path->active >= 0) && (path->active < XFS_DA_NODE_MAXDEPTH));
for (blk = path->blk, level = 0; level < path->active; blk++, level++) {
if (blk->disk_blkno) {
- error = xfs_da3_node_read(state->args->trans,
- state->args->dp,
- blk->blkno, blk->disk_blkno,
- &blk->bp, XFS_ATTR_FORK);
+ error = xfs_da3_node_read_mapped(state->args->trans,
+ state->args->dp, blk->disk_blkno,
+ &blk->bp, XFS_ATTR_FORK);
if (error)
return error;
} else {
@@ -1285,10 +1284,9 @@ xfs_attr_refillstate(xfs_da_state_t *state)
ASSERT((path->active >= 0) && (path->active < XFS_DA_NODE_MAXDEPTH));
for (blk = path->blk, level = 0; level < path->active; blk++, level++) {
if (blk->disk_blkno) {
- error = xfs_da3_node_read(state->args->trans,
- state->args->dp,
- blk->blkno, blk->disk_blkno,
- &blk->bp, XFS_ATTR_FORK);
+ error = xfs_da3_node_read_mapped(state->args->trans,
+ state->args->dp, blk->disk_blkno,
+ &blk->bp, XFS_ATTR_FORK);
if (error)
return error;
} else {
diff --git a/fs/xfs/libxfs/xfs_attr_leaf.c b/fs/xfs/libxfs/xfs_attr_leaf.c
index f0089e862216..08d4b10ae2d5 100644
--- a/fs/xfs/libxfs/xfs_attr_leaf.c
+++ b/fs/xfs/libxfs/xfs_attr_leaf.c
@@ -233,6 +233,61 @@ xfs_attr3_leaf_hdr_to_disk(
}
static xfs_failaddr_t
+xfs_attr3_leaf_verify_entry(
+ struct xfs_mount *mp,
+ char *buf_end,
+ struct xfs_attr_leafblock *leaf,
+ struct xfs_attr3_icleaf_hdr *leafhdr,
+ struct xfs_attr_leaf_entry *ent,
+ int idx,
+ __u32 *last_hashval)
+{
+ struct xfs_attr_leaf_name_local *lentry;
+ struct xfs_attr_leaf_name_remote *rentry;
+ char *name_end;
+ unsigned int nameidx;
+ unsigned int namesize;
+ __u32 hashval;
+
+ /* hash order check */
+ hashval = be32_to_cpu(ent->hashval);
+ if (hashval < *last_hashval)
+ return __this_address;
+ *last_hashval = hashval;
+
+ nameidx = be16_to_cpu(ent->nameidx);
+ if (nameidx < leafhdr->firstused || nameidx >= mp->m_attr_geo->blksize)
+ return __this_address;
+
+ /*
+ * Check the name information. The namelen fields are u8 so we can't
+ * possibly exceed the maximum name length of 255 bytes.
+ */
+ if (ent->flags & XFS_ATTR_LOCAL) {
+ lentry = xfs_attr3_leaf_name_local(leaf, idx);
+ namesize = xfs_attr_leaf_entsize_local(lentry->namelen,
+ be16_to_cpu(lentry->valuelen));
+ name_end = (char *)lentry + namesize;
+ if (lentry->namelen == 0)
+ return __this_address;
+ } else {
+ rentry = xfs_attr3_leaf_name_remote(leaf, idx);
+ namesize = xfs_attr_leaf_entsize_remote(rentry->namelen);
+ name_end = (char *)rentry + namesize;
+ if (rentry->namelen == 0)
+ return __this_address;
+ if (!(ent->flags & XFS_ATTR_INCOMPLETE) &&
+ rentry->valueblk == 0)
+ return __this_address;
+ }
+
+ if (name_end > buf_end)
+ return __this_address;
+
+ return NULL;
+}
+
+static xfs_failaddr_t
xfs_attr3_leaf_verify(
struct xfs_buf *bp)
{
@@ -240,7 +295,10 @@ xfs_attr3_leaf_verify(
struct xfs_mount *mp = bp->b_mount;
struct xfs_attr_leafblock *leaf = bp->b_addr;
struct xfs_attr_leaf_entry *entries;
+ struct xfs_attr_leaf_entry *ent;
+ char *buf_end;
uint32_t end; /* must be 32bit - see below */
+ __u32 last_hashval = 0;
int i;
xfs_failaddr_t fa;
@@ -273,8 +331,13 @@ xfs_attr3_leaf_verify(
(char *)bp->b_addr + ichdr.firstused)
return __this_address;
- /* XXX: need to range check rest of attr header values */
- /* XXX: hash order check? */
+ buf_end = (char *)bp->b_addr + mp->m_attr_geo->blksize;
+ for (i = 0, ent = entries; i < ichdr.count; ent++, i++) {
+ fa = xfs_attr3_leaf_verify_entry(mp, buf_end, leaf, &ichdr,
+ ent, i, &last_hashval);
+ if (fa)
+ return fa;
+ }
/*
* Quickly check the freemap information. Attribute data has to be
@@ -367,13 +430,12 @@ xfs_attr3_leaf_read(
struct xfs_trans *tp,
struct xfs_inode *dp,
xfs_dablk_t bno,
- xfs_daddr_t mappedbno,
struct xfs_buf **bpp)
{
int err;
- err = xfs_da_read_buf(tp, dp, bno, mappedbno, bpp,
- XFS_ATTR_FORK, &xfs_attr3_leaf_buf_ops);
+ err = xfs_da_read_buf(tp, dp, bno, 0, bpp, XFS_ATTR_FORK,
+ &xfs_attr3_leaf_buf_ops);
if (!err && tp && *bpp)
xfs_trans_buf_set_type(tp, *bpp, XFS_BLFT_ATTR_LEAF_BUF);
return err;
@@ -453,13 +515,15 @@ xfs_attr_copy_value(
* special case for dev/uuid inodes, they have fixed size data forks.
*/
int
-xfs_attr_shortform_bytesfit(xfs_inode_t *dp, int bytes)
+xfs_attr_shortform_bytesfit(
+ struct xfs_inode *dp,
+ int bytes)
{
- int offset;
- int minforkoff; /* lower limit on valid forkoff locations */
- int maxforkoff; /* upper limit on valid forkoff locations */
- int dsize;
- xfs_mount_t *mp = dp->i_mount;
+ struct xfs_mount *mp = dp->i_mount;
+ int64_t dsize;
+ int minforkoff;
+ int maxforkoff;
+ int offset;
/* rounded down */
offset = (XFS_LITINO(mp, dp->i_d.di_version) - bytes) >> 3;
@@ -525,7 +589,7 @@ xfs_attr_shortform_bytesfit(xfs_inode_t *dp, int bytes)
* A data fork btree root must have space for at least
* MINDBTPTRS key/ptr pairs if the data fork is small or empty.
*/
- minforkoff = max(dsize, XFS_BMDR_SPACE_CALC(MINDBTPTRS));
+ minforkoff = max_t(int64_t, dsize, XFS_BMDR_SPACE_CALC(MINDBTPTRS));
minforkoff = roundup(minforkoff, 8) >> 3;
/* attr fork btree root can have at least this many key/ptr pairs */
@@ -764,7 +828,7 @@ xfs_attr_shortform_lookup(xfs_da_args_t *args)
}
/*
- * Retreive the attribute value and length.
+ * Retrieve the attribute value and length.
*
* If ATTR_KERNOVAL is specified, only the length needs to be returned.
* Unlike a lookup, we only return an error if the attribute does not
@@ -924,7 +988,7 @@ xfs_attr_shortform_verify(
char *endp;
struct xfs_ifork *ifp;
int i;
- int size;
+ int64_t size;
ASSERT(ip->i_d.di_aformat == XFS_DINODE_FMT_LOCAL);
ifp = XFS_IFORK_PTR(ip, XFS_ATTR_FORK);
@@ -1080,7 +1144,6 @@ xfs_attr3_leaf_to_node(
struct xfs_attr_leafblock *leaf;
struct xfs_attr3_icleaf_hdr icleafhdr;
struct xfs_attr_leaf_entry *entries;
- struct xfs_da_node_entry *btree;
struct xfs_da3_icnode_hdr icnodehdr;
struct xfs_da_intnode *node;
struct xfs_inode *dp = args->dp;
@@ -1095,11 +1158,11 @@ xfs_attr3_leaf_to_node(
error = xfs_da_grow_inode(args, &blkno);
if (error)
goto out;
- error = xfs_attr3_leaf_read(args->trans, dp, 0, -1, &bp1);
+ error = xfs_attr3_leaf_read(args->trans, dp, 0, &bp1);
if (error)
goto out;
- error = xfs_da_get_buf(args->trans, dp, blkno, -1, &bp2, XFS_ATTR_FORK);
+ error = xfs_da_get_buf(args->trans, dp, blkno, &bp2, XFS_ATTR_FORK);
if (error)
goto out;
@@ -1120,18 +1183,17 @@ xfs_attr3_leaf_to_node(
if (error)
goto out;
node = bp1->b_addr;
- dp->d_ops->node_hdr_from_disk(&icnodehdr, node);
- btree = dp->d_ops->node_tree_p(node);
+ xfs_da3_node_hdr_from_disk(mp, &icnodehdr, node);
leaf = bp2->b_addr;
xfs_attr3_leaf_hdr_from_disk(args->geo, &icleafhdr, leaf);
entries = xfs_attr3_leaf_entryp(leaf);
/* both on-disk, don't endian-flip twice */
- btree[0].hashval = entries[icleafhdr.count - 1].hashval;
- btree[0].before = cpu_to_be32(blkno);
+ icnodehdr.btree[0].hashval = entries[icleafhdr.count - 1].hashval;
+ icnodehdr.btree[0].before = cpu_to_be32(blkno);
icnodehdr.count = 1;
- dp->d_ops->node_hdr_to_disk(node, &icnodehdr);
+ xfs_da3_node_hdr_to_disk(dp->i_mount, node, &icnodehdr);
xfs_trans_log_buf(args->trans, bp1, 0, args->geo->blksize - 1);
error = 0;
out:
@@ -1161,7 +1223,7 @@ xfs_attr3_leaf_create(
trace_xfs_attr_leaf_create(args);
- error = xfs_da_get_buf(args->trans, args->dp, blkno, -1, &bp,
+ error = xfs_da_get_buf(args->trans, args->dp, blkno, &bp,
XFS_ATTR_FORK);
if (error)
return error;
@@ -1447,7 +1509,9 @@ xfs_attr3_leaf_add_work(
for (i = 0; i < XFS_ATTR_LEAF_MAPSIZE; i++) {
if (ichdr->freemap[i].base == tmp) {
ichdr->freemap[i].base += sizeof(xfs_attr_leaf_entry_t);
- ichdr->freemap[i].size -= sizeof(xfs_attr_leaf_entry_t);
+ ichdr->freemap[i].size -=
+ min_t(uint16_t, ichdr->freemap[i].size,
+ sizeof(xfs_attr_leaf_entry_t));
}
}
ichdr->usedbytes += xfs_attr_leaf_entsize(leaf, args->index);
@@ -1931,7 +1995,7 @@ xfs_attr3_leaf_toosmall(
if (blkno == 0)
continue;
error = xfs_attr3_leaf_read(state->args->trans, state->args->dp,
- blkno, -1, &bp);
+ blkno, &bp);
if (error)
return error;
@@ -2281,8 +2345,10 @@ xfs_attr3_leaf_lookup_int(
leaf = bp->b_addr;
xfs_attr3_leaf_hdr_from_disk(args->geo, &ichdr, leaf);
entries = xfs_attr3_leaf_entryp(leaf);
- if (ichdr.count >= args->geo->blksize / 8)
+ if (ichdr.count >= args->geo->blksize / 8) {
+ xfs_buf_corruption_error(bp);
return -EFSCORRUPTED;
+ }
/*
* Binary search. (note: small blocks will skip this loop)
@@ -2298,10 +2364,14 @@ xfs_attr3_leaf_lookup_int(
else
break;
}
- if (!(probe >= 0 && (!ichdr.count || probe < ichdr.count)))
+ if (!(probe >= 0 && (!ichdr.count || probe < ichdr.count))) {
+ xfs_buf_corruption_error(bp);
return -EFSCORRUPTED;
- if (!(span <= 4 || be32_to_cpu(entry->hashval) == hashval))
+ }
+ if (!(span <= 4 || be32_to_cpu(entry->hashval) == hashval)) {
+ xfs_buf_corruption_error(bp);
return -EFSCORRUPTED;
+ }
/*
* Since we may have duplicate hashval's, find the first matching
@@ -2661,7 +2731,7 @@ xfs_attr3_leaf_clearflag(
/*
* Set up the operation.
*/
- error = xfs_attr3_leaf_read(args->trans, args->dp, args->blkno, -1, &bp);
+ error = xfs_attr3_leaf_read(args->trans, args->dp, args->blkno, &bp);
if (error)
return error;
@@ -2728,7 +2798,7 @@ xfs_attr3_leaf_setflag(
/*
* Set up the operation.
*/
- error = xfs_attr3_leaf_read(args->trans, args->dp, args->blkno, -1, &bp);
+ error = xfs_attr3_leaf_read(args->trans, args->dp, args->blkno, &bp);
if (error)
return error;
@@ -2790,7 +2860,7 @@ xfs_attr3_leaf_flipflags(
/*
* Read the block containing the "old" attr
*/
- error = xfs_attr3_leaf_read(args->trans, args->dp, args->blkno, -1, &bp1);
+ error = xfs_attr3_leaf_read(args->trans, args->dp, args->blkno, &bp1);
if (error)
return error;
@@ -2799,7 +2869,7 @@ xfs_attr3_leaf_flipflags(
*/
if (args->blkno2 != args->blkno) {
error = xfs_attr3_leaf_read(args->trans, args->dp, args->blkno2,
- -1, &bp2);
+ &bp2);
if (error)
return error;
} else {
diff --git a/fs/xfs/libxfs/xfs_attr_leaf.h b/fs/xfs/libxfs/xfs_attr_leaf.h
index 7b74e18becff..f4a188e28b7b 100644
--- a/fs/xfs/libxfs/xfs_attr_leaf.h
+++ b/fs/xfs/libxfs/xfs_attr_leaf.h
@@ -17,6 +17,29 @@ struct xfs_inode;
struct xfs_trans;
/*
+ * Incore version of the attribute leaf header.
+ */
+struct xfs_attr3_icleaf_hdr {
+ uint32_t forw;
+ uint32_t back;
+ uint16_t magic;
+ uint16_t count;
+ uint16_t usedbytes;
+ /*
+ * Firstused is 32-bit here instead of 16-bit like the on-disk variant
+ * to support maximum fsb size of 64k without overflow issues throughout
+ * the attr code. Instead, the overflow condition is handled on
+ * conversion to/from disk.
+ */
+ uint32_t firstused;
+ __u8 holes;
+ struct {
+ uint16_t base;
+ uint16_t size;
+ } freemap[XFS_ATTR_LEAF_MAPSIZE];
+};
+
+/*
* Used to keep a list of "remote value" extents when unlinking an inode.
*/
typedef struct xfs_attr_inactive_list {
@@ -67,8 +90,8 @@ int xfs_attr3_leaf_add(struct xfs_buf *leaf_buffer,
struct xfs_da_args *args);
int xfs_attr3_leaf_remove(struct xfs_buf *leaf_buffer,
struct xfs_da_args *args);
-void xfs_attr3_leaf_list_int(struct xfs_buf *bp,
- struct xfs_attr_list_context *context);
+int xfs_attr3_leaf_list_int(struct xfs_buf *bp,
+ struct xfs_attr_list_context *context);
/*
* Routines used for shrinking the Btree.
@@ -85,8 +108,7 @@ int xfs_attr_leaf_order(struct xfs_buf *leaf1_bp,
struct xfs_buf *leaf2_bp);
int xfs_attr_leaf_newentsize(struct xfs_da_args *args, int *local);
int xfs_attr3_leaf_read(struct xfs_trans *tp, struct xfs_inode *dp,
- xfs_dablk_t bno, xfs_daddr_t mappedbno,
- struct xfs_buf **bpp);
+ xfs_dablk_t bno, struct xfs_buf **bpp);
void xfs_attr3_leaf_hdr_from_disk(struct xfs_da_geometry *geo,
struct xfs_attr3_icleaf_hdr *to,
struct xfs_attr_leafblock *from);
diff --git a/fs/xfs/libxfs/xfs_attr_remote.c b/fs/xfs/libxfs/xfs_attr_remote.c
index 3e39b7d40f25..a6ef5df42669 100644
--- a/fs/xfs/libxfs/xfs_attr_remote.c
+++ b/fs/xfs/libxfs/xfs_attr_remote.c
@@ -19,6 +19,7 @@
#include "xfs_trans.h"
#include "xfs_bmap.h"
#include "xfs_attr.h"
+#include "xfs_attr_remote.h"
#include "xfs_trace.h"
#include "xfs_error.h"
diff --git a/fs/xfs/libxfs/xfs_bit.c b/fs/xfs/libxfs/xfs_bit.c
index 7071ff98fdbc..40ce5f3094d1 100644
--- a/fs/xfs/libxfs/xfs_bit.c
+++ b/fs/xfs/libxfs/xfs_bit.c
@@ -5,6 +5,7 @@
*/
#include "xfs.h"
#include "xfs_log_format.h"
+#include "xfs_bit.h"
/*
* XFS bit manipulation routines, used in non-realtime code.
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index 02469d59c787..4acc6e37c31d 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -34,6 +34,7 @@
#include "xfs_ag_resv.h"
#include "xfs_refcount.h"
#include "xfs_icache.h"
+#include "xfs_iomap.h"
kmem_zone_t *xfs_bmap_free_item_zone;
@@ -383,8 +384,10 @@ xfs_bmap_check_leaf_extents(
xfs_check_block(block, mp, 0, 0);
pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
bno = be64_to_cpu(*pp);
- XFS_WANT_CORRUPTED_GOTO(mp,
- xfs_verify_fsbno(mp, bno), error0);
+ if (XFS_IS_CORRUPT(mp, !xfs_verify_fsbno(mp, bno))) {
+ error = -EFSCORRUPTED;
+ goto error0;
+ }
if (bp_release) {
bp_release = 0;
xfs_trans_brelse(NULL, bp);
@@ -611,8 +614,8 @@ xfs_bmap_btree_to_extents(
pp = XFS_BMAP_BROOT_PTR_ADDR(mp, rblock, 1, ifp->if_broot_bytes);
cbno = be64_to_cpu(*pp);
#ifdef DEBUG
- XFS_WANT_CORRUPTED_RETURN(cur->bc_mp,
- xfs_btree_check_lptr(cur, cbno, 1));
+ if (XFS_IS_CORRUPT(cur->bc_mp, !xfs_btree_check_lptr(cur, cbno, 1)))
+ return -EFSCORRUPTED;
#endif
error = xfs_btree_read_bufl(mp, tp, cbno, &cbp, XFS_BMAP_BTREE_REF,
&xfs_bmbt_buf_ops);
@@ -728,7 +731,7 @@ xfs_bmap_extents_to_btree(
ip->i_d.di_nblocks++;
xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, 1L);
abp = xfs_btree_get_bufl(mp, tp, args.fsbno);
- if (!abp) {
+ if (XFS_IS_CORRUPT(mp, !abp)) {
error = -EFSCORRUPTED;
goto out_unreserve_dquot;
}
@@ -936,7 +939,10 @@ xfs_bmap_add_attrfork_btree(
if (error)
goto error0;
/* must be at least one entry */
- XFS_WANT_CORRUPTED_GOTO(mp, stat == 1, error0);
+ if (XFS_IS_CORRUPT(mp, stat != 1)) {
+ error = -EFSCORRUPTED;
+ goto error0;
+ }
if ((error = xfs_btree_new_iroot(cur, flags, &stat)))
goto error0;
if (stat == 0) {
@@ -1083,7 +1089,7 @@ xfs_bmap_add_attrfork(
goto trans_cancel;
if (XFS_IFORK_Q(ip))
goto trans_cancel;
- if (ip->i_d.di_anextents != 0) {
+ if (XFS_IS_CORRUPT(mp, ip->i_d.di_anextents != 0)) {
error = -EFSCORRUPTED;
goto trans_cancel;
}
@@ -1154,6 +1160,65 @@ trans_cancel:
* Internal and external extent tree search functions.
*/
+struct xfs_iread_state {
+ struct xfs_iext_cursor icur;
+ xfs_extnum_t loaded;
+};
+
+/* Stuff every bmbt record from this block into the incore extent map. */
+static int
+xfs_iread_bmbt_block(
+ struct xfs_btree_cur *cur,
+ int level,
+ void *priv)
+{
+ struct xfs_iread_state *ir = priv;
+ struct xfs_mount *mp = cur->bc_mp;
+ struct xfs_inode *ip = cur->bc_private.b.ip;
+ struct xfs_btree_block *block;
+ struct xfs_buf *bp;
+ struct xfs_bmbt_rec *frp;
+ xfs_extnum_t num_recs;
+ xfs_extnum_t j;
+ int whichfork = cur->bc_private.b.whichfork;
+
+ block = xfs_btree_get_block(cur, level, &bp);
+
+ /* Abort if we find more records than nextents. */
+ num_recs = xfs_btree_get_numrecs(block);
+ if (unlikely(ir->loaded + num_recs >
+ XFS_IFORK_NEXTENTS(ip, whichfork))) {
+ xfs_warn(ip->i_mount, "corrupt dinode %llu, (btree extents).",
+ (unsigned long long)ip->i_ino);
+ xfs_inode_verifier_error(ip, -EFSCORRUPTED, __func__, block,
+ sizeof(*block), __this_address);
+ return -EFSCORRUPTED;
+ }
+
+ /* Copy records into the incore cache. */
+ frp = XFS_BMBT_REC_ADDR(mp, block, 1);
+ for (j = 0; j < num_recs; j++, frp++, ir->loaded++) {
+ struct xfs_bmbt_irec new;
+ xfs_failaddr_t fa;
+
+ xfs_bmbt_disk_get_all(frp, &new);
+ fa = xfs_bmap_validate_extent(ip, whichfork, &new);
+ if (fa) {
+ xfs_inode_verifier_error(ip, -EFSCORRUPTED,
+ "xfs_iread_extents(2)", frp,
+ sizeof(*frp), fa);
+ return -EFSCORRUPTED;
+ }
+ xfs_iext_insert(ip, &ir->icur, &new,
+ xfs_bmap_fork_to_state(whichfork));
+ trace_xfs_read_extent(ip, &ir->icur,
+ xfs_bmap_fork_to_state(whichfork), _THIS_IP_);
+ xfs_iext_next(XFS_IFORK_PTR(ip, whichfork), &ir->icur);
+ }
+
+ return 0;
+}
+
/*
* Read in extents from a btree-format inode.
*/
@@ -1163,134 +1228,39 @@ xfs_iread_extents(
struct xfs_inode *ip,
int whichfork)
{
- struct xfs_mount *mp = ip->i_mount;
- int state = xfs_bmap_fork_to_state(whichfork);
+ struct xfs_iread_state ir;
struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
- xfs_extnum_t nextents = XFS_IFORK_NEXTENTS(ip, whichfork);
- struct xfs_btree_block *block = ifp->if_broot;
- struct xfs_iext_cursor icur;
- struct xfs_bmbt_irec new;
- xfs_fsblock_t bno;
- struct xfs_buf *bp;
- xfs_extnum_t i, j;
- int level;
- __be64 *pp;
+ struct xfs_mount *mp = ip->i_mount;
+ struct xfs_btree_cur *cur;
int error;
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
- if (unlikely(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) {
- XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp);
- return -EFSCORRUPTED;
- }
-
- /*
- * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
- */
- level = be16_to_cpu(block->bb_level);
- if (unlikely(level == 0)) {
- XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp);
- return -EFSCORRUPTED;
- }
- pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
- bno = be64_to_cpu(*pp);
-
- /*
- * Go down the tree until leaf level is reached, following the first
- * pointer (leftmost) at each level.
- */
- while (level-- > 0) {
- error = xfs_btree_read_bufl(mp, tp, bno, &bp,
- XFS_BMAP_BTREE_REF, &xfs_bmbt_buf_ops);
- if (error)
- goto out;
- block = XFS_BUF_TO_BLOCK(bp);
- if (level == 0)
- break;
- pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
- bno = be64_to_cpu(*pp);
- XFS_WANT_CORRUPTED_GOTO(mp,
- xfs_verify_fsbno(mp, bno), out_brelse);
- xfs_trans_brelse(tp, bp);
+ if (XFS_IS_CORRUPT(mp,
+ XFS_IFORK_FORMAT(ip, whichfork) !=
+ XFS_DINODE_FMT_BTREE)) {
+ error = -EFSCORRUPTED;
+ goto out;
}
- /*
- * Here with bp and block set to the leftmost leaf node in the tree.
- */
- i = 0;
- xfs_iext_first(ifp, &icur);
-
- /*
- * Loop over all leaf nodes. Copy information to the extent records.
- */
- for (;;) {
- xfs_bmbt_rec_t *frp;
- xfs_fsblock_t nextbno;
- xfs_extnum_t num_recs;
-
- num_recs = xfs_btree_get_numrecs(block);
- if (unlikely(i + num_recs > nextents)) {
- xfs_warn(ip->i_mount,
- "corrupt dinode %Lu, (btree extents).",
- (unsigned long long) ip->i_ino);
- xfs_inode_verifier_error(ip, -EFSCORRUPTED,
- __func__, block, sizeof(*block),
- __this_address);
- error = -EFSCORRUPTED;
- goto out_brelse;
- }
- /*
- * Read-ahead the next leaf block, if any.
- */
- nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
- if (nextbno != NULLFSBLOCK)
- xfs_btree_reada_bufl(mp, nextbno, 1,
- &xfs_bmbt_buf_ops);
- /*
- * Copy records into the extent records.
- */
- frp = XFS_BMBT_REC_ADDR(mp, block, 1);
- for (j = 0; j < num_recs; j++, frp++, i++) {
- xfs_failaddr_t fa;
-
- xfs_bmbt_disk_get_all(frp, &new);
- fa = xfs_bmap_validate_extent(ip, whichfork, &new);
- if (fa) {
- error = -EFSCORRUPTED;
- xfs_inode_verifier_error(ip, error,
- "xfs_iread_extents(2)",
- frp, sizeof(*frp), fa);
- goto out_brelse;
- }
- xfs_iext_insert(ip, &icur, &new, state);
- trace_xfs_read_extent(ip, &icur, state, _THIS_IP_);
- xfs_iext_next(ifp, &icur);
- }
- xfs_trans_brelse(tp, bp);
- bno = nextbno;
- /*
- * If we've reached the end, stop.
- */
- if (bno == NULLFSBLOCK)
- break;
- error = xfs_btree_read_bufl(mp, tp, bno, &bp,
- XFS_BMAP_BTREE_REF, &xfs_bmbt_buf_ops);
- if (error)
- goto out;
- block = XFS_BUF_TO_BLOCK(bp);
- }
+ ir.loaded = 0;
+ xfs_iext_first(ifp, &ir.icur);
+ cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
+ error = xfs_btree_visit_blocks(cur, xfs_iread_bmbt_block,
+ XFS_BTREE_VISIT_RECORDS, &ir);
+ xfs_btree_del_cursor(cur, error);
+ if (error)
+ goto out;
- if (i != XFS_IFORK_NEXTENTS(ip, whichfork)) {
+ if (XFS_IS_CORRUPT(mp,
+ ir.loaded != XFS_IFORK_NEXTENTS(ip, whichfork))) {
error = -EFSCORRUPTED;
goto out;
}
- ASSERT(i == xfs_iext_count(ifp));
+ ASSERT(ir.loaded == xfs_iext_count(ifp));
ifp->if_flags |= XFS_IFEXTENTS;
return 0;
-
-out_brelse:
- xfs_trans_brelse(tp, bp);
out:
xfs_iext_destroy(ifp);
return error;
@@ -1317,8 +1287,7 @@ xfs_bmap_first_unused(
xfs_fileoff_t lowest, max;
int error;
- ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE ||
- XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS ||
+ ASSERT(xfs_ifork_has_extents(ip, whichfork) ||
XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL);
if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) {
@@ -1374,7 +1343,8 @@ xfs_bmap_last_before(
case XFS_DINODE_FMT_EXTENTS:
break;
default:
- return -EIO;
+ ASSERT(0);
+ return -EFSCORRUPTED;
}
if (!(ifp->if_flags & XFS_IFEXTENTS)) {
@@ -1473,9 +1443,8 @@ xfs_bmap_last_offset(
if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL)
return 0;
- if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE &&
- XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
- return -EIO;
+ if (XFS_IS_CORRUPT(ip->i_mount, !xfs_ifork_has_extents(ip, whichfork)))
+ return -EFSCORRUPTED;
error = xfs_bmap_last_extent(NULL, ip, whichfork, &rec, &is_empty);
if (error || is_empty)
@@ -1652,15 +1621,24 @@ xfs_bmap_add_extent_delay_real(
error = xfs_bmbt_lookup_eq(bma->cur, &RIGHT, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
error = xfs_btree_delete(bma->cur, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
error = xfs_btree_decrement(bma->cur, 0, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
error = xfs_bmbt_update(bma->cur, &LEFT);
if (error)
goto done;
@@ -1686,7 +1664,10 @@ xfs_bmap_add_extent_delay_real(
error = xfs_bmbt_lookup_eq(bma->cur, &old, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
error = xfs_bmbt_update(bma->cur, &LEFT);
if (error)
goto done;
@@ -1716,7 +1697,10 @@ xfs_bmap_add_extent_delay_real(
error = xfs_bmbt_lookup_eq(bma->cur, &RIGHT, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
error = xfs_bmbt_update(bma->cur, &PREV);
if (error)
goto done;
@@ -1741,11 +1725,17 @@ xfs_bmap_add_extent_delay_real(
error = xfs_bmbt_lookup_eq(bma->cur, new, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
+ if (XFS_IS_CORRUPT(mp, i != 0)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
error = xfs_btree_insert(bma->cur, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
}
break;
@@ -1776,7 +1766,10 @@ xfs_bmap_add_extent_delay_real(
error = xfs_bmbt_lookup_eq(bma->cur, &old, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
error = xfs_bmbt_update(bma->cur, &LEFT);
if (error)
goto done;
@@ -1797,11 +1790,17 @@ xfs_bmap_add_extent_delay_real(
error = xfs_bmbt_lookup_eq(bma->cur, new, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
+ if (XFS_IS_CORRUPT(mp, i != 0)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
error = xfs_btree_insert(bma->cur, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
}
if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
@@ -1842,7 +1841,10 @@ xfs_bmap_add_extent_delay_real(
error = xfs_bmbt_lookup_eq(bma->cur, &old, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
error = xfs_bmbt_update(bma->cur, &RIGHT);
if (error)
goto done;
@@ -1874,11 +1876,17 @@ xfs_bmap_add_extent_delay_real(
error = xfs_bmbt_lookup_eq(bma->cur, new, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
+ if (XFS_IS_CORRUPT(mp, i != 0)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
error = xfs_btree_insert(bma->cur, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
}
if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
@@ -1954,11 +1962,17 @@ xfs_bmap_add_extent_delay_real(
error = xfs_bmbt_lookup_eq(bma->cur, new, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
+ if (XFS_IS_CORRUPT(mp, i != 0)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
error = xfs_btree_insert(bma->cur, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
}
if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
@@ -2152,19 +2166,34 @@ xfs_bmap_add_extent_unwritten_real(
error = xfs_bmbt_lookup_eq(cur, &RIGHT, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
if ((error = xfs_btree_delete(cur, &i)))
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
if ((error = xfs_btree_decrement(cur, 0, &i)))
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
if ((error = xfs_btree_delete(cur, &i)))
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
if ((error = xfs_btree_decrement(cur, 0, &i)))
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
error = xfs_bmbt_update(cur, &LEFT);
if (error)
goto done;
@@ -2190,13 +2219,22 @@ xfs_bmap_add_extent_unwritten_real(
error = xfs_bmbt_lookup_eq(cur, &PREV, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
if ((error = xfs_btree_delete(cur, &i)))
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
if ((error = xfs_btree_decrement(cur, 0, &i)))
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
error = xfs_bmbt_update(cur, &LEFT);
if (error)
goto done;
@@ -2225,13 +2263,22 @@ xfs_bmap_add_extent_unwritten_real(
error = xfs_bmbt_lookup_eq(cur, &RIGHT, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
if ((error = xfs_btree_delete(cur, &i)))
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
if ((error = xfs_btree_decrement(cur, 0, &i)))
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
error = xfs_bmbt_update(cur, &PREV);
if (error)
goto done;
@@ -2254,7 +2301,10 @@ xfs_bmap_add_extent_unwritten_real(
error = xfs_bmbt_lookup_eq(cur, new, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
error = xfs_bmbt_update(cur, &PREV);
if (error)
goto done;
@@ -2284,7 +2334,10 @@ xfs_bmap_add_extent_unwritten_real(
error = xfs_bmbt_lookup_eq(cur, &old, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
error = xfs_bmbt_update(cur, &PREV);
if (error)
goto done;
@@ -2318,14 +2371,20 @@ xfs_bmap_add_extent_unwritten_real(
error = xfs_bmbt_lookup_eq(cur, &old, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
error = xfs_bmbt_update(cur, &PREV);
if (error)
goto done;
cur->bc_rec.b = *new;
if ((error = xfs_btree_insert(cur, &i)))
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
}
break;
@@ -2352,7 +2411,10 @@ xfs_bmap_add_extent_unwritten_real(
error = xfs_bmbt_lookup_eq(cur, &old, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
error = xfs_bmbt_update(cur, &PREV);
if (error)
goto done;
@@ -2386,17 +2448,26 @@ xfs_bmap_add_extent_unwritten_real(
error = xfs_bmbt_lookup_eq(cur, &old, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
error = xfs_bmbt_update(cur, &PREV);
if (error)
goto done;
error = xfs_bmbt_lookup_eq(cur, new, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
+ if (XFS_IS_CORRUPT(mp, i != 0)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
if ((error = xfs_btree_insert(cur, &i)))
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
}
break;
@@ -2430,7 +2501,10 @@ xfs_bmap_add_extent_unwritten_real(
error = xfs_bmbt_lookup_eq(cur, &old, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
/* new right extent - oldext */
error = xfs_bmbt_update(cur, &r[1]);
if (error)
@@ -2439,7 +2513,10 @@ xfs_bmap_add_extent_unwritten_real(
cur->bc_rec.b = PREV;
if ((error = xfs_btree_insert(cur, &i)))
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
/*
* Reset the cursor to the position of the new extent
* we are about to insert as we can't trust it after
@@ -2448,11 +2525,17 @@ xfs_bmap_add_extent_unwritten_real(
error = xfs_bmbt_lookup_eq(cur, new, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
+ if (XFS_IS_CORRUPT(mp, i != 0)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
/* new middle extent - newext */
if ((error = xfs_btree_insert(cur, &i)))
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
}
break;
@@ -2735,15 +2818,24 @@ xfs_bmap_add_extent_hole_real(
error = xfs_bmbt_lookup_eq(cur, &right, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
error = xfs_btree_delete(cur, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
error = xfs_btree_decrement(cur, 0, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
error = xfs_bmbt_update(cur, &left);
if (error)
goto done;
@@ -2769,7 +2861,10 @@ xfs_bmap_add_extent_hole_real(
error = xfs_bmbt_lookup_eq(cur, &old, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
error = xfs_bmbt_update(cur, &left);
if (error)
goto done;
@@ -2796,7 +2891,10 @@ xfs_bmap_add_extent_hole_real(
error = xfs_bmbt_lookup_eq(cur, &old, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
error = xfs_bmbt_update(cur, &right);
if (error)
goto done;
@@ -2819,11 +2917,17 @@ xfs_bmap_add_extent_hole_real(
error = xfs_bmbt_lookup_eq(cur, new, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
+ if (XFS_IS_CORRUPT(mp, i != 0)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
error = xfs_btree_insert(cur, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
}
break;
}
@@ -3058,7 +3162,7 @@ xfs_bmap_adjacent(
mp = ap->ip->i_mount;
nullfb = ap->tp->t_firstblock == NULLFSBLOCK;
rt = XFS_IS_REALTIME_INODE(ap->ip) &&
- xfs_alloc_is_userdata(ap->datatype);
+ (ap->datatype & XFS_ALLOC_USERDATA);
fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp,
ap->tp->t_firstblock);
/*
@@ -3411,7 +3515,7 @@ xfs_bmap_btalloc(
if (ap->flags & XFS_BMAPI_COWFORK)
align = xfs_get_cowextsz_hint(ap->ip);
- else if (xfs_alloc_is_userdata(ap->datatype))
+ else if (ap->datatype & XFS_ALLOC_USERDATA)
align = xfs_get_extsz_hint(ap->ip);
if (align) {
error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev,
@@ -3426,7 +3530,7 @@ xfs_bmap_btalloc(
fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp,
ap->tp->t_firstblock);
if (nullfb) {
- if (xfs_alloc_is_userdata(ap->datatype) &&
+ if ((ap->datatype & XFS_ALLOC_USERDATA) &&
xfs_inode_is_filestream(ap->ip)) {
ag = xfs_filestream_lookup_ag(ap->ip);
ag = (ag != NULLAGNUMBER) ? ag : 0;
@@ -3466,7 +3570,7 @@ xfs_bmap_btalloc(
* enough for the request. If one isn't found, then adjust
* the minimum allocation size to the largest space found.
*/
- if (xfs_alloc_is_userdata(ap->datatype) &&
+ if ((ap->datatype & XFS_ALLOC_USERDATA) &&
xfs_inode_is_filestream(ap->ip))
error = xfs_bmap_btalloc_filestreams(ap, &args, &blen);
else
@@ -3500,13 +3604,11 @@ xfs_bmap_btalloc(
args.mod = args.prod - args.mod;
}
/*
- * If we are not low on available data blocks, and the
- * underlying logical volume manager is a stripe, and
- * the file offset is zero then try to allocate data
- * blocks on stripe unit boundary.
- * NOTE: ap->aeof is only set if the allocation length
- * is >= the stripe unit and the allocation offset is
- * at the end of file.
+ * If we are not low on available data blocks, and the underlying
+ * logical volume manager is a stripe, and the file offset is zero then
+ * try to allocate data blocks on stripe unit boundary. NOTE: ap->aeof
+ * is only set if the allocation length is >= the stripe unit and the
+ * allocation offset is at the end of file.
*/
if (!(ap->tp->t_flags & XFS_TRANS_LOWMODE) && ap->aeof) {
if (!ap->offset) {
@@ -3514,9 +3616,11 @@ xfs_bmap_btalloc(
atype = args.type;
isaligned = 1;
/*
- * Adjust for alignment
+ * Adjust minlen to try and preserve alignment if we
+ * can't guarantee an aligned maxlen extent.
*/
- if (blen > args.alignment && blen <= args.maxlen)
+ if (blen > args.alignment &&
+ blen <= args.maxlen + args.alignment)
args.minlen = blen - args.alignment;
args.minalignslop = 0;
} else {
@@ -3554,8 +3658,6 @@ xfs_bmap_btalloc(
args.wasdel = ap->wasdel;
args.resv = XFS_AG_RESV_NONE;
args.datatype = ap->datatype;
- if (ap->datatype & XFS_ALLOC_USERDATA_ZERO)
- args.ip = ap->ip;
error = xfs_alloc_vextent(&args);
if (error)
@@ -3640,20 +3742,6 @@ xfs_bmap_btalloc(
return 0;
}
-/*
- * xfs_bmap_alloc is called by xfs_bmapi to allocate an extent for a file.
- * It figures out where to ask the underlying allocator to put the new extent.
- */
-STATIC int
-xfs_bmap_alloc(
- struct xfs_bmalloca *ap) /* bmap alloc argument struct */
-{
- if (XFS_IS_REALTIME_INODE(ap->ip) &&
- xfs_alloc_is_userdata(ap->datatype))
- return xfs_bmap_rtalloc(ap);
- return xfs_bmap_btalloc(ap);
-}
-
/* Trim extent to fit a logical block range. */
void
xfs_trim_extent(
@@ -3815,11 +3903,8 @@ xfs_bmapi_read(
XFS_BMAPI_COWFORK)));
ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED|XFS_ILOCK_EXCL));
- if (unlikely(XFS_TEST_ERROR(
- (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
- XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE),
- mp, XFS_ERRTAG_BMAPIFORMAT))) {
- XFS_ERROR_REPORT("xfs_bmapi_read", XFS_ERRLEVEL_LOW, mp);
+ if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ip, whichfork)) ||
+ XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) {
return -EFSCORRUPTED;
}
@@ -4010,6 +4095,39 @@ out_unreserve_quota:
}
static int
+xfs_bmap_alloc_userdata(
+ struct xfs_bmalloca *bma)
+{
+ struct xfs_mount *mp = bma->ip->i_mount;
+ int whichfork = xfs_bmapi_whichfork(bma->flags);
+ int error;
+
+ /*
+ * Set the data type being allocated. For the data fork, the first data
+ * in the file is treated differently to all other allocations. For the
+ * attribute fork, we only need to ensure the allocated range is not on
+ * the busy list.
+ */
+ bma->datatype = XFS_ALLOC_NOBUSY;
+ if (whichfork == XFS_DATA_FORK) {
+ bma->datatype |= XFS_ALLOC_USERDATA;
+ if (bma->offset == 0)
+ bma->datatype |= XFS_ALLOC_INITIAL_USER_DATA;
+
+ if (mp->m_dalign && bma->length >= mp->m_dalign) {
+ error = xfs_bmap_isaeof(bma, whichfork);
+ if (error)
+ return error;
+ }
+
+ if (XFS_IS_REALTIME_INODE(bma->ip))
+ return xfs_bmap_rtalloc(bma);
+ }
+
+ return xfs_bmap_btalloc(bma);
+}
+
+static int
xfs_bmapi_allocate(
struct xfs_bmalloca *bma)
{
@@ -4028,7 +4146,8 @@ xfs_bmapi_allocate(
if (bma->wasdel) {
bma->length = (xfs_extlen_t)bma->got.br_blockcount;
bma->offset = bma->got.br_startoff;
- xfs_iext_peek_prev_extent(ifp, &bma->icur, &bma->prev);
+ if (!xfs_iext_peek_prev_extent(ifp, &bma->icur, &bma->prev))
+ bma->prev.br_startoff = NULLFILEOFF;
} else {
bma->length = XFS_FILBLKS_MIN(bma->length, MAXEXTLEN);
if (!bma->eof)
@@ -4036,43 +4155,24 @@ xfs_bmapi_allocate(
bma->got.br_startoff - bma->offset);
}
- /*
- * Set the data type being allocated. For the data fork, the first data
- * in the file is treated differently to all other allocations. For the
- * attribute fork, we only need to ensure the allocated range is not on
- * the busy list.
- */
- if (!(bma->flags & XFS_BMAPI_METADATA)) {
- bma->datatype = XFS_ALLOC_NOBUSY;
- if (whichfork == XFS_DATA_FORK) {
- if (bma->offset == 0)
- bma->datatype |= XFS_ALLOC_INITIAL_USER_DATA;
- else
- bma->datatype |= XFS_ALLOC_USERDATA;
- }
- if (bma->flags & XFS_BMAPI_ZERO)
- bma->datatype |= XFS_ALLOC_USERDATA_ZERO;
- }
+ if (bma->flags & XFS_BMAPI_CONTIG)
+ bma->minlen = bma->length;
+ else
+ bma->minlen = 1;
- bma->minlen = (bma->flags & XFS_BMAPI_CONTIG) ? bma->length : 1;
+ if (bma->flags & XFS_BMAPI_METADATA)
+ error = xfs_bmap_btalloc(bma);
+ else
+ error = xfs_bmap_alloc_userdata(bma);
+ if (error || bma->blkno == NULLFSBLOCK)
+ return error;
- /*
- * Only want to do the alignment at the eof if it is userdata and
- * allocation length is larger than a stripe unit.
- */
- if (mp->m_dalign && bma->length >= mp->m_dalign &&
- !(bma->flags & XFS_BMAPI_METADATA) && whichfork == XFS_DATA_FORK) {
- error = xfs_bmap_isaeof(bma, whichfork);
+ if (bma->flags & XFS_BMAPI_ZERO) {
+ error = xfs_zero_extent(bma->ip, bma->blkno, bma->length);
if (error)
return error;
}
- error = xfs_bmap_alloc(bma);
- if (error)
- return error;
-
- if (bma->blkno == NULLFSBLOCK)
- return 0;
if ((ifp->if_flags & XFS_IFBROOT) && !bma->cur)
bma->cur = xfs_bmbt_init_cursor(mp, bma->tp, bma->ip, whichfork);
/*
@@ -4312,11 +4412,8 @@ xfs_bmapi_write(
ASSERT((flags & (XFS_BMAPI_PREALLOC | XFS_BMAPI_ZERO)) !=
(XFS_BMAPI_PREALLOC | XFS_BMAPI_ZERO));
- if (unlikely(XFS_TEST_ERROR(
- (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
- XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE),
- mp, XFS_ERRTAG_BMAPIFORMAT))) {
- XFS_ERROR_REPORT("xfs_bmapi_write", XFS_ERRLEVEL_LOW, mp);
+ if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ip, whichfork)) ||
+ XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) {
return -EFSCORRUPTED;
}
@@ -4456,16 +4553,21 @@ int
xfs_bmapi_convert_delalloc(
struct xfs_inode *ip,
int whichfork,
- xfs_fileoff_t offset_fsb,
- struct xfs_bmbt_irec *imap,
+ xfs_off_t offset,
+ struct iomap *iomap,
unsigned int *seq)
{
struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
struct xfs_mount *mp = ip->i_mount;
+ xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
struct xfs_bmalloca bma = { NULL };
+ u16 flags = 0;
struct xfs_trans *tp;
int error;
+ if (whichfork == XFS_COW_FORK)
+ flags |= IOMAP_F_SHARED;
+
/*
* Space for the extent and indirect blocks was reserved when the
* delalloc extent was created so there's no need to do so here.
@@ -4495,7 +4597,7 @@ xfs_bmapi_convert_delalloc(
* the extent. Just return the real extent at this offset.
*/
if (!isnullstartblock(bma.got.br_startblock)) {
- *imap = bma.got;
+ xfs_bmbt_to_iomap(ip, iomap, &bma.got, flags);
*seq = READ_ONCE(ifp->if_seq);
goto out_trans_cancel;
}
@@ -4505,7 +4607,6 @@ xfs_bmapi_convert_delalloc(
bma.wasdel = true;
bma.offset = bma.got.br_startoff;
bma.length = max_t(xfs_filblks_t, bma.got.br_blockcount, MAXEXTLEN);
- bma.total = XFS_EXTENTADD_SPACE_RES(ip->i_mount, XFS_DATA_FORK);
bma.minleft = xfs_bmapi_minleft(tp, ip, whichfork);
if (whichfork == XFS_COW_FORK)
bma.flags = XFS_BMAPI_COWFORK | XFS_BMAPI_PREALLOC;
@@ -4528,7 +4629,7 @@ xfs_bmapi_convert_delalloc(
XFS_STATS_INC(mp, xs_xstrat_quick);
ASSERT(!isnullstartblock(bma.got.br_startblock));
- *imap = bma.got;
+ xfs_bmbt_to_iomap(ip, iomap, &bma.got, flags);
*seq = READ_ONCE(ifp->if_seq);
if (whichfork == XFS_COW_FORK)
@@ -4578,11 +4679,8 @@ xfs_bmapi_remap(
ASSERT((flags & (XFS_BMAPI_ATTRFORK | XFS_BMAPI_PREALLOC)) !=
(XFS_BMAPI_ATTRFORK | XFS_BMAPI_PREALLOC));
- if (unlikely(XFS_TEST_ERROR(
- (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
- XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE),
- mp, XFS_ERRTAG_BMAPIFORMAT))) {
- XFS_ERROR_REPORT("xfs_bmapi_remap", XFS_ERRLEVEL_LOW, mp);
+ if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ip, whichfork)) ||
+ XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) {
return -EFSCORRUPTED;
}
@@ -5013,7 +5111,10 @@ xfs_bmap_del_extent_real(
error = xfs_bmbt_lookup_eq(cur, &got, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
}
if (got.br_startoff == del->br_startoff)
@@ -5037,7 +5138,10 @@ xfs_bmap_del_extent_real(
}
if ((error = xfs_btree_delete(cur, &i)))
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
break;
case BMAP_LEFT_FILLING:
/*
@@ -5108,7 +5212,10 @@ xfs_bmap_del_extent_real(
error = xfs_bmbt_lookup_eq(cur, &got, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
/*
* Update the btree record back
* to the original value.
@@ -5125,7 +5232,10 @@ xfs_bmap_del_extent_real(
error = -ENOSPC;
goto done;
}
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
} else
flags |= xfs_ilog_fext(whichfork);
XFS_IFORK_NEXT_SET(ip, whichfork,
@@ -5192,7 +5302,7 @@ __xfs_bunmapi(
int isrt; /* freeing in rt area */
int logflags; /* transaction logging flags */
xfs_extlen_t mod; /* rt extent offset */
- struct xfs_mount *mp; /* mount structure */
+ struct xfs_mount *mp = ip->i_mount;
int tmp_logflags; /* partial logging flags */
int wasdel; /* was a delayed alloc extent */
int whichfork; /* data or attribute fork */
@@ -5209,14 +5319,8 @@ __xfs_bunmapi(
whichfork = xfs_bmapi_whichfork(flags);
ASSERT(whichfork != XFS_COW_FORK);
ifp = XFS_IFORK_PTR(ip, whichfork);
- if (unlikely(
- XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
- XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) {
- XFS_ERROR_REPORT("xfs_bunmapi", XFS_ERRLEVEL_LOW,
- ip->i_mount);
+ if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ip, whichfork)))
return -EFSCORRUPTED;
- }
- mp = ip->i_mount;
if (XFS_FORCED_SHUTDOWN(mp))
return -EIO;
@@ -5610,18 +5714,21 @@ xfs_bmse_merge(
error = xfs_bmbt_lookup_eq(cur, got, &i);
if (error)
return error;
- XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
+ if (XFS_IS_CORRUPT(mp, i != 1))
+ return -EFSCORRUPTED;
error = xfs_btree_delete(cur, &i);
if (error)
return error;
- XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
+ if (XFS_IS_CORRUPT(mp, i != 1))
+ return -EFSCORRUPTED;
/* lookup and update size of the previous extent */
error = xfs_bmbt_lookup_eq(cur, left, &i);
if (error)
return error;
- XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
+ if (XFS_IS_CORRUPT(mp, i != 1))
+ return -EFSCORRUPTED;
error = xfs_bmbt_update(cur, &new);
if (error)
@@ -5669,7 +5776,8 @@ xfs_bmap_shift_update_extent(
error = xfs_bmbt_lookup_eq(cur, &prev, &i);
if (error)
return error;
- XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
+ if (XFS_IS_CORRUPT(mp, i != 1))
+ return -EFSCORRUPTED;
error = xfs_bmbt_update(cur, got);
if (error)
@@ -5705,11 +5813,8 @@ xfs_bmap_collapse_extents(
int error = 0;
int logflags = 0;
- if (unlikely(XFS_TEST_ERROR(
- (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
- XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE),
- mp, XFS_ERRTAG_BMAPIFORMAT))) {
- XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp);
+ if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ip, whichfork)) ||
+ XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) {
return -EFSCORRUPTED;
}
@@ -5733,8 +5838,10 @@ xfs_bmap_collapse_extents(
*done = true;
goto del_cursor;
}
- XFS_WANT_CORRUPTED_GOTO(mp, !isnullstartblock(got.br_startblock),
- del_cursor);
+ if (XFS_IS_CORRUPT(mp, isnullstartblock(got.br_startblock))) {
+ error = -EFSCORRUPTED;
+ goto del_cursor;
+ }
new_startoff = got.br_startoff - offset_shift_fsb;
if (xfs_iext_peek_prev_extent(ifp, &icur, &prev)) {
@@ -5823,11 +5930,8 @@ xfs_bmap_insert_extents(
int error = 0;
int logflags = 0;
- if (unlikely(XFS_TEST_ERROR(
- (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
- XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE),
- mp, XFS_ERRTAG_BMAPIFORMAT))) {
- XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp);
+ if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ip, whichfork)) ||
+ XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) {
return -EFSCORRUPTED;
}
@@ -5860,11 +5964,14 @@ xfs_bmap_insert_extents(
goto del_cursor;
}
}
- XFS_WANT_CORRUPTED_GOTO(mp, !isnullstartblock(got.br_startblock),
- del_cursor);
+ if (XFS_IS_CORRUPT(mp, isnullstartblock(got.br_startblock))) {
+ error = -EFSCORRUPTED;
+ goto del_cursor;
+ }
- if (stop_fsb >= got.br_startoff + got.br_blockcount) {
- error = -EIO;
+ if (XFS_IS_CORRUPT(mp,
+ stop_fsb >= got.br_startoff + got.br_blockcount)) {
+ error = -EFSCORRUPTED;
goto del_cursor;
}
@@ -5929,12 +6036,8 @@ xfs_bmap_split_extent_at(
int logflags = 0;
int i = 0;
- if (unlikely(XFS_TEST_ERROR(
- (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
- XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE),
- mp, XFS_ERRTAG_BMAPIFORMAT))) {
- XFS_ERROR_REPORT("xfs_bmap_split_extent_at",
- XFS_ERRLEVEL_LOW, mp);
+ if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ip, whichfork)) ||
+ XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) {
return -EFSCORRUPTED;
}
@@ -5968,7 +6071,10 @@ xfs_bmap_split_extent_at(
error = xfs_bmbt_lookup_eq(cur, &got, &i);
if (error)
goto del_cursor;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, del_cursor);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto del_cursor;
+ }
}
got.br_blockcount = gotblkcnt;
@@ -5993,11 +6099,17 @@ xfs_bmap_split_extent_at(
error = xfs_bmbt_lookup_eq(cur, &new, &i);
if (error)
goto del_cursor;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 0, del_cursor);
+ if (XFS_IS_CORRUPT(mp, i != 0)) {
+ error = -EFSCORRUPTED;
+ goto del_cursor;
+ }
error = xfs_btree_insert(cur, &i);
if (error)
goto del_cursor;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, del_cursor);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto del_cursor;
+ }
}
/*
diff --git a/fs/xfs/libxfs/xfs_bmap.h b/fs/xfs/libxfs/xfs_bmap.h
index e2798c6f3a5f..14d25e0b7d9c 100644
--- a/fs/xfs/libxfs/xfs_bmap.h
+++ b/fs/xfs/libxfs/xfs_bmap.h
@@ -228,8 +228,7 @@ int xfs_bmapi_reserve_delalloc(struct xfs_inode *ip, int whichfork,
struct xfs_bmbt_irec *got, struct xfs_iext_cursor *cur,
int eof);
int xfs_bmapi_convert_delalloc(struct xfs_inode *ip, int whichfork,
- xfs_fileoff_t offset_fsb, struct xfs_bmbt_irec *imap,
- unsigned int *seq);
+ xfs_off_t offset, struct iomap *iomap, unsigned int *seq);
int xfs_bmap_add_extent_unwritten_real(struct xfs_trans *tp,
struct xfs_inode *ip, int whichfork,
struct xfs_iext_cursor *icur, struct xfs_btree_cur **curp,
diff --git a/fs/xfs/libxfs/xfs_btree.c b/fs/xfs/libxfs/xfs_btree.c
index 71de937f9e64..e2cc98931552 100644
--- a/fs/xfs/libxfs/xfs_btree.c
+++ b/fs/xfs/libxfs/xfs_btree.c
@@ -105,11 +105,10 @@ xfs_btree_check_lblock(
xfs_failaddr_t fa;
fa = __xfs_btree_check_lblock(cur, block, level, bp);
- if (unlikely(XFS_TEST_ERROR(fa != NULL, mp,
- XFS_ERRTAG_BTREE_CHECK_LBLOCK))) {
+ if (XFS_IS_CORRUPT(mp, fa != NULL) ||
+ XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BTREE_CHECK_LBLOCK)) {
if (bp)
trace_xfs_btree_corrupt(bp, _RET_IP_);
- XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp);
return -EFSCORRUPTED;
}
return 0;
@@ -169,11 +168,10 @@ xfs_btree_check_sblock(
xfs_failaddr_t fa;
fa = __xfs_btree_check_sblock(cur, block, level, bp);
- if (unlikely(XFS_TEST_ERROR(fa != NULL, mp,
- XFS_ERRTAG_BTREE_CHECK_SBLOCK))) {
+ if (XFS_IS_CORRUPT(mp, fa != NULL) ||
+ XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BTREE_CHECK_SBLOCK)) {
if (bp)
trace_xfs_btree_corrupt(bp, _RET_IP_);
- XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp);
return -EFSCORRUPTED;
}
return 0;
@@ -384,7 +382,7 @@ xfs_btree_del_cursor(
/*
* Free the cursor.
*/
- kmem_zone_free(xfs_btree_cur_zone, cur);
+ kmem_cache_free(xfs_btree_cur_zone, cur);
}
/*
@@ -717,25 +715,6 @@ xfs_btree_get_bufs(
}
/*
- * Check for the cursor referring to the last block at the given level.
- */
-int /* 1=is last block, 0=not last block */
-xfs_btree_islastblock(
- xfs_btree_cur_t *cur, /* btree cursor */
- int level) /* level to check */
-{
- struct xfs_btree_block *block; /* generic btree block pointer */
- xfs_buf_t *bp; /* buffer containing block */
-
- block = xfs_btree_get_block(cur, level, &bp);
- xfs_btree_check_block(cur, block, level, bp);
- if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
- return block->bb_u.l.bb_rightsib == cpu_to_be64(NULLFSBLOCK);
- else
- return block->bb_u.s.bb_rightsib == cpu_to_be32(NULLAGBLOCK);
-}
-
-/*
* Change the cursor to point to the first record at the given level.
* Other levels are unaffected.
*/
@@ -1820,6 +1799,7 @@ xfs_btree_lookup_get_block(
out_bad:
*blkp = NULL;
+ xfs_buf_corruption_error(bp);
xfs_trans_brelse(cur->bc_tp, bp);
return -EFSCORRUPTED;
}
@@ -1867,7 +1847,7 @@ xfs_btree_lookup(
XFS_BTREE_STATS_INC(cur, lookup);
/* No such thing as a zero-level tree. */
- if (cur->bc_nlevels == 0)
+ if (XFS_IS_CORRUPT(cur->bc_mp, cur->bc_nlevels == 0))
return -EFSCORRUPTED;
block = NULL;
@@ -1987,7 +1967,8 @@ xfs_btree_lookup(
error = xfs_btree_increment(cur, 0, &i);
if (error)
goto error0;
- XFS_WANT_CORRUPTED_RETURN(cur->bc_mp, i == 1);
+ if (XFS_IS_CORRUPT(cur->bc_mp, i != 1))
+ return -EFSCORRUPTED;
*stat = 1;
return 0;
}
@@ -2442,7 +2423,10 @@ xfs_btree_lshift(
if (error)
goto error0;
i = xfs_btree_firstrec(tcur, level);
- XFS_WANT_CORRUPTED_GOTO(tcur->bc_mp, i == 1, error0);
+ if (XFS_IS_CORRUPT(tcur->bc_mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto error0;
+ }
error = xfs_btree_decrement(tcur, level, &i);
if (error)
@@ -2609,7 +2593,10 @@ xfs_btree_rshift(
if (error)
goto error0;
i = xfs_btree_lastrec(tcur, level);
- XFS_WANT_CORRUPTED_GOTO(tcur->bc_mp, i == 1, error0);
+ if (XFS_IS_CORRUPT(tcur->bc_mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto error0;
+ }
error = xfs_btree_increment(tcur, level, &i);
if (error)
@@ -3463,7 +3450,10 @@ xfs_btree_insert(
goto error0;
}
- XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, error0);
+ if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto error0;
+ }
level++;
/*
@@ -3867,15 +3857,24 @@ xfs_btree_delrec(
* Actually any entry but the first would suffice.
*/
i = xfs_btree_lastrec(tcur, level);
- XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, error0);
+ if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto error0;
+ }
error = xfs_btree_increment(tcur, level, &i);
if (error)
goto error0;
- XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, error0);
+ if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto error0;
+ }
i = xfs_btree_lastrec(tcur, level);
- XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, error0);
+ if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto error0;
+ }
/* Grab a pointer to the block. */
right = xfs_btree_get_block(tcur, level, &rbp);
@@ -3919,12 +3918,18 @@ xfs_btree_delrec(
rrecs = xfs_btree_get_numrecs(right);
if (!xfs_btree_ptr_is_null(cur, &lptr)) {
i = xfs_btree_firstrec(tcur, level);
- XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, error0);
+ if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto error0;
+ }
error = xfs_btree_decrement(tcur, level, &i);
if (error)
goto error0;
- XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, error0);
+ if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto error0;
+ }
}
}
@@ -3938,13 +3943,19 @@ xfs_btree_delrec(
* previous block.
*/
i = xfs_btree_firstrec(tcur, level);
- XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, error0);
+ if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto error0;
+ }
error = xfs_btree_decrement(tcur, level, &i);
if (error)
goto error0;
i = xfs_btree_firstrec(tcur, level);
- XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, error0);
+ if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto error0;
+ }
/* Grab a pointer to the block. */
left = xfs_btree_get_block(tcur, level, &lbp);
@@ -4286,6 +4297,7 @@ int
xfs_btree_visit_blocks(
struct xfs_btree_cur *cur,
xfs_btree_visit_blocks_fn fn,
+ unsigned int flags,
void *data)
{
union xfs_btree_ptr lptr;
@@ -4311,6 +4323,11 @@ xfs_btree_visit_blocks(
/* save for the next iteration of the loop */
xfs_btree_copy_ptrs(cur, &lptr, ptr, 1);
+
+ if (!(flags & XFS_BTREE_VISIT_LEAVES))
+ continue;
+ } else if (!(flags & XFS_BTREE_VISIT_RECORDS)) {
+ continue;
}
/* for each buffer in the level */
@@ -4413,7 +4430,7 @@ xfs_btree_change_owner(
bbcoi.buffer_list = buffer_list;
return xfs_btree_visit_blocks(cur, xfs_btree_block_change_owner,
- &bbcoi);
+ XFS_BTREE_VISIT_ALL, &bbcoi);
}
/* Verify the v5 fields of a long-format btree block. */
@@ -4865,7 +4882,7 @@ xfs_btree_count_blocks(
{
*blocks = 0;
return xfs_btree_visit_blocks(cur, xfs_btree_count_blocks_helper,
- blocks);
+ XFS_BTREE_VISIT_ALL, blocks);
}
/* Compare two btree pointers. */
diff --git a/fs/xfs/libxfs/xfs_btree.h b/fs/xfs/libxfs/xfs_btree.h
index ced1e65d1483..fb9b2121c628 100644
--- a/fs/xfs/libxfs/xfs_btree.h
+++ b/fs/xfs/libxfs/xfs_btree.h
@@ -183,6 +183,9 @@ union xfs_btree_cur_private {
unsigned long nr_ops; /* # record updates */
int shape_changes; /* # of extent splits */
} refc;
+ struct {
+ bool active; /* allocation cursor state */
+ } abt;
};
/*
@@ -315,14 +318,6 @@ xfs_btree_get_bufs(
xfs_agblock_t agbno); /* allocation group block number */
/*
- * Check for the cursor referring to the last block at the given level.
- */
-int /* 1=is last block, 0=not last block */
-xfs_btree_islastblock(
- xfs_btree_cur_t *cur, /* btree cursor */
- int level); /* level to check */
-
-/*
* Compute first and last byte offsets for the fields given.
* Interprets the offsets table, which contains struct field offsets.
*/
@@ -482,8 +477,15 @@ int xfs_btree_query_all(struct xfs_btree_cur *cur, xfs_btree_query_range_fn fn,
typedef int (*xfs_btree_visit_blocks_fn)(struct xfs_btree_cur *cur, int level,
void *data);
+/* Visit record blocks. */
+#define XFS_BTREE_VISIT_RECORDS (1 << 0)
+/* Visit leaf blocks. */
+#define XFS_BTREE_VISIT_LEAVES (1 << 1)
+/* Visit all blocks. */
+#define XFS_BTREE_VISIT_ALL (XFS_BTREE_VISIT_RECORDS | \
+ XFS_BTREE_VISIT_LEAVES)
int xfs_btree_visit_blocks(struct xfs_btree_cur *cur,
- xfs_btree_visit_blocks_fn fn, void *data);
+ xfs_btree_visit_blocks_fn fn, unsigned int flags, void *data);
int xfs_btree_count_blocks(struct xfs_btree_cur *cur, xfs_extlen_t *blocks);
@@ -514,4 +516,21 @@ int xfs_btree_has_record(struct xfs_btree_cur *cur, union xfs_btree_irec *low,
union xfs_btree_irec *high, bool *exists);
bool xfs_btree_has_more_records(struct xfs_btree_cur *cur);
+/* Does this cursor point to the last block in the given level? */
+static inline bool
+xfs_btree_islastblock(
+ xfs_btree_cur_t *cur,
+ int level)
+{
+ struct xfs_btree_block *block;
+ struct xfs_buf *bp;
+
+ block = xfs_btree_get_block(cur, level, &bp);
+ ASSERT(block && xfs_btree_check_block(cur, block, level, bp) == 0);
+
+ if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
+ return block->bb_u.l.bb_rightsib == cpu_to_be64(NULLFSBLOCK);
+ return block->bb_u.s.bb_rightsib == cpu_to_be32(NULLAGBLOCK);
+}
+
#endif /* __XFS_BTREE_H__ */
diff --git a/fs/xfs/libxfs/xfs_da_btree.c b/fs/xfs/libxfs/xfs_da_btree.c
index 4fd1223c1bd5..8c3eafe280ed 100644
--- a/fs/xfs/libxfs/xfs_da_btree.c
+++ b/fs/xfs/libxfs/xfs_da_btree.c
@@ -12,9 +12,9 @@
#include "xfs_trans_resv.h"
#include "xfs_bit.h"
#include "xfs_mount.h"
+#include "xfs_inode.h"
#include "xfs_dir2.h"
#include "xfs_dir2_priv.h"
-#include "xfs_inode.h"
#include "xfs_trans.h"
#include "xfs_bmap.h"
#include "xfs_attr_leaf.h"
@@ -107,7 +107,66 @@ xfs_da_state_free(xfs_da_state_t *state)
#ifdef DEBUG
memset((char *)state, 0, sizeof(*state));
#endif /* DEBUG */
- kmem_zone_free(xfs_da_state_zone, state);
+ kmem_cache_free(xfs_da_state_zone, state);
+}
+
+static inline int xfs_dabuf_nfsb(struct xfs_mount *mp, int whichfork)
+{
+ if (whichfork == XFS_DATA_FORK)
+ return mp->m_dir_geo->fsbcount;
+ return mp->m_attr_geo->fsbcount;
+}
+
+void
+xfs_da3_node_hdr_from_disk(
+ struct xfs_mount *mp,
+ struct xfs_da3_icnode_hdr *to,
+ struct xfs_da_intnode *from)
+{
+ if (xfs_sb_version_hascrc(&mp->m_sb)) {
+ struct xfs_da3_intnode *from3 = (struct xfs_da3_intnode *)from;
+
+ to->forw = be32_to_cpu(from3->hdr.info.hdr.forw);
+ to->back = be32_to_cpu(from3->hdr.info.hdr.back);
+ to->magic = be16_to_cpu(from3->hdr.info.hdr.magic);
+ to->count = be16_to_cpu(from3->hdr.__count);
+ to->level = be16_to_cpu(from3->hdr.__level);
+ to->btree = from3->__btree;
+ ASSERT(to->magic == XFS_DA3_NODE_MAGIC);
+ } else {
+ to->forw = be32_to_cpu(from->hdr.info.forw);
+ to->back = be32_to_cpu(from->hdr.info.back);
+ to->magic = be16_to_cpu(from->hdr.info.magic);
+ to->count = be16_to_cpu(from->hdr.__count);
+ to->level = be16_to_cpu(from->hdr.__level);
+ to->btree = from->__btree;
+ ASSERT(to->magic == XFS_DA_NODE_MAGIC);
+ }
+}
+
+void
+xfs_da3_node_hdr_to_disk(
+ struct xfs_mount *mp,
+ struct xfs_da_intnode *to,
+ struct xfs_da3_icnode_hdr *from)
+{
+ if (xfs_sb_version_hascrc(&mp->m_sb)) {
+ struct xfs_da3_intnode *to3 = (struct xfs_da3_intnode *)to;
+
+ ASSERT(from->magic == XFS_DA3_NODE_MAGIC);
+ to3->hdr.info.hdr.forw = cpu_to_be32(from->forw);
+ to3->hdr.info.hdr.back = cpu_to_be32(from->back);
+ to3->hdr.info.hdr.magic = cpu_to_be16(from->magic);
+ to3->hdr.__count = cpu_to_be16(from->count);
+ to3->hdr.__level = cpu_to_be16(from->level);
+ } else {
+ ASSERT(from->magic == XFS_DA_NODE_MAGIC);
+ to->hdr.info.forw = cpu_to_be32(from->forw);
+ to->hdr.info.back = cpu_to_be32(from->back);
+ to->hdr.info.magic = cpu_to_be16(from->magic);
+ to->hdr.__count = cpu_to_be16(from->count);
+ to->hdr.__level = cpu_to_be16(from->level);
+ }
}
/*
@@ -145,12 +204,9 @@ xfs_da3_node_verify(
struct xfs_mount *mp = bp->b_mount;
struct xfs_da_intnode *hdr = bp->b_addr;
struct xfs_da3_icnode_hdr ichdr;
- const struct xfs_dir_ops *ops;
xfs_failaddr_t fa;
- ops = xfs_dir_get_ops(mp, NULL);
-
- ops->node_hdr_from_disk(&ichdr, hdr);
+ xfs_da3_node_hdr_from_disk(mp, &ichdr, hdr);
fa = xfs_da3_blkinfo_verify(bp, bp->b_addr);
if (fa)
@@ -275,46 +331,76 @@ const struct xfs_buf_ops xfs_da3_node_buf_ops = {
.verify_struct = xfs_da3_node_verify_struct,
};
+static int
+xfs_da3_node_set_type(
+ struct xfs_trans *tp,
+ struct xfs_buf *bp)
+{
+ struct xfs_da_blkinfo *info = bp->b_addr;
+
+ switch (be16_to_cpu(info->magic)) {
+ case XFS_DA_NODE_MAGIC:
+ case XFS_DA3_NODE_MAGIC:
+ xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DA_NODE_BUF);
+ return 0;
+ case XFS_ATTR_LEAF_MAGIC:
+ case XFS_ATTR3_LEAF_MAGIC:
+ xfs_trans_buf_set_type(tp, bp, XFS_BLFT_ATTR_LEAF_BUF);
+ return 0;
+ case XFS_DIR2_LEAFN_MAGIC:
+ case XFS_DIR3_LEAFN_MAGIC:
+ xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DIR_LEAFN_BUF);
+ return 0;
+ default:
+ XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, tp->t_mountp,
+ info, sizeof(*info));
+ xfs_trans_brelse(tp, bp);
+ return -EFSCORRUPTED;
+ }
+}
+
int
xfs_da3_node_read(
struct xfs_trans *tp,
struct xfs_inode *dp,
xfs_dablk_t bno,
+ struct xfs_buf **bpp,
+ int whichfork)
+{
+ int error;
+
+ error = xfs_da_read_buf(tp, dp, bno, 0, bpp, whichfork,
+ &xfs_da3_node_buf_ops);
+ if (error || !*bpp || !tp)
+ return error;
+ return xfs_da3_node_set_type(tp, *bpp);
+}
+
+int
+xfs_da3_node_read_mapped(
+ struct xfs_trans *tp,
+ struct xfs_inode *dp,
xfs_daddr_t mappedbno,
struct xfs_buf **bpp,
- int which_fork)
+ int whichfork)
{
- int err;
+ struct xfs_mount *mp = dp->i_mount;
+ int error;
- err = xfs_da_read_buf(tp, dp, bno, mappedbno, bpp,
- which_fork, &xfs_da3_node_buf_ops);
- if (!err && tp && *bpp) {
- struct xfs_da_blkinfo *info = (*bpp)->b_addr;
- int type;
+ error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, mappedbno,
+ XFS_FSB_TO_BB(mp, xfs_dabuf_nfsb(mp, whichfork)), 0,
+ bpp, &xfs_da3_node_buf_ops);
+ if (error || !*bpp)
+ return error;
- switch (be16_to_cpu(info->magic)) {
- case XFS_DA_NODE_MAGIC:
- case XFS_DA3_NODE_MAGIC:
- type = XFS_BLFT_DA_NODE_BUF;
- break;
- case XFS_ATTR_LEAF_MAGIC:
- case XFS_ATTR3_LEAF_MAGIC:
- type = XFS_BLFT_ATTR_LEAF_BUF;
- break;
- case XFS_DIR2_LEAFN_MAGIC:
- case XFS_DIR3_LEAFN_MAGIC:
- type = XFS_BLFT_DIR_LEAFN_BUF;
- break;
- default:
- XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW,
- tp->t_mountp, info, sizeof(*info));
- xfs_trans_brelse(tp, *bpp);
- *bpp = NULL;
- return -EFSCORRUPTED;
- }
- xfs_trans_buf_set_type(tp, *bpp, type);
- }
- return err;
+ if (whichfork == XFS_ATTR_FORK)
+ xfs_buf_set_ref(*bpp, XFS_ATTR_BTREE_REF);
+ else
+ xfs_buf_set_ref(*bpp, XFS_DIR_BTREE_REF);
+
+ if (!tp)
+ return 0;
+ return xfs_da3_node_set_type(tp, *bpp);
}
/*========================================================================
@@ -343,7 +429,7 @@ xfs_da3_node_create(
trace_xfs_da_node_create(args);
ASSERT(level <= XFS_DA_NODE_MAXDEPTH);
- error = xfs_da_get_buf(tp, dp, blkno, -1, &bp, whichfork);
+ error = xfs_da_get_buf(tp, dp, blkno, &bp, whichfork);
if (error)
return error;
bp->b_ops = &xfs_da3_node_buf_ops;
@@ -363,9 +449,9 @@ xfs_da3_node_create(
}
ichdr.level = level;
- dp->d_ops->node_hdr_to_disk(node, &ichdr);
+ xfs_da3_node_hdr_to_disk(dp->i_mount, node, &ichdr);
xfs_trans_log_buf(tp, bp,
- XFS_DA_LOGRANGE(node, &node->hdr, dp->d_ops->node_hdr_size));
+ XFS_DA_LOGRANGE(node, &node->hdr, args->geo->node_hdr_size));
*bpp = bp;
return 0;
@@ -504,6 +590,7 @@ xfs_da3_split(
node = oldblk->bp->b_addr;
if (node->hdr.info.forw) {
if (be32_to_cpu(node->hdr.info.forw) != addblk->blkno) {
+ xfs_buf_corruption_error(oldblk->bp);
error = -EFSCORRUPTED;
goto out;
}
@@ -516,6 +603,7 @@ xfs_da3_split(
node = oldblk->bp->b_addr;
if (node->hdr.info.back) {
if (be32_to_cpu(node->hdr.info.back) != addblk->blkno) {
+ xfs_buf_corruption_error(oldblk->bp);
error = -EFSCORRUPTED;
goto out;
}
@@ -568,7 +656,7 @@ xfs_da3_root_split(
dp = args->dp;
tp = args->trans;
- error = xfs_da_get_buf(tp, dp, blkno, -1, &bp, args->whichfork);
+ error = xfs_da_get_buf(tp, dp, blkno, &bp, args->whichfork);
if (error)
return error;
node = bp->b_addr;
@@ -577,8 +665,8 @@ xfs_da3_root_split(
oldroot->hdr.info.magic == cpu_to_be16(XFS_DA3_NODE_MAGIC)) {
struct xfs_da3_icnode_hdr icnodehdr;
- dp->d_ops->node_hdr_from_disk(&icnodehdr, oldroot);
- btree = dp->d_ops->node_tree_p(oldroot);
+ xfs_da3_node_hdr_from_disk(dp->i_mount, &icnodehdr, oldroot);
+ btree = icnodehdr.btree;
size = (int)((char *)&btree[icnodehdr.count] - (char *)oldroot);
level = icnodehdr.level;
@@ -589,15 +677,14 @@ xfs_da3_root_split(
xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DA_NODE_BUF);
} else {
struct xfs_dir3_icleaf_hdr leafhdr;
- struct xfs_dir2_leaf_entry *ents;
leaf = (xfs_dir2_leaf_t *)oldroot;
- dp->d_ops->leaf_hdr_from_disk(&leafhdr, leaf);
- ents = dp->d_ops->leaf_ents_p(leaf);
+ xfs_dir2_leaf_hdr_from_disk(dp->i_mount, &leafhdr, leaf);
ASSERT(leafhdr.magic == XFS_DIR2_LEAFN_MAGIC ||
leafhdr.magic == XFS_DIR3_LEAFN_MAGIC);
- size = (int)((char *)&ents[leafhdr.count] - (char *)leaf);
+ size = (int)((char *)&leafhdr.ents[leafhdr.count] -
+ (char *)leaf);
level = 0;
/*
@@ -637,14 +724,14 @@ xfs_da3_root_split(
return error;
node = bp->b_addr;
- dp->d_ops->node_hdr_from_disk(&nodehdr, node);
- btree = dp->d_ops->node_tree_p(node);
+ xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr, node);
+ btree = nodehdr.btree;
btree[0].hashval = cpu_to_be32(blk1->hashval);
btree[0].before = cpu_to_be32(blk1->blkno);
btree[1].hashval = cpu_to_be32(blk2->hashval);
btree[1].before = cpu_to_be32(blk2->blkno);
nodehdr.count = 2;
- dp->d_ops->node_hdr_to_disk(node, &nodehdr);
+ xfs_da3_node_hdr_to_disk(dp->i_mount, node, &nodehdr);
#ifdef DEBUG
if (oldroot->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) ||
@@ -686,7 +773,7 @@ xfs_da3_node_split(
trace_xfs_da_node_split(state->args);
node = oldblk->bp->b_addr;
- dp->d_ops->node_hdr_from_disk(&nodehdr, node);
+ xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr, node);
/*
* With V2 dirs the extra block is data or freespace.
@@ -733,7 +820,7 @@ xfs_da3_node_split(
* If we had double-split op below us, then add the extra block too.
*/
node = oldblk->bp->b_addr;
- dp->d_ops->node_hdr_from_disk(&nodehdr, node);
+ xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr, node);
if (oldblk->index <= nodehdr.count) {
oldblk->index++;
xfs_da3_node_add(state, oldblk, addblk);
@@ -788,10 +875,10 @@ xfs_da3_node_rebalance(
node1 = blk1->bp->b_addr;
node2 = blk2->bp->b_addr;
- dp->d_ops->node_hdr_from_disk(&nodehdr1, node1);
- dp->d_ops->node_hdr_from_disk(&nodehdr2, node2);
- btree1 = dp->d_ops->node_tree_p(node1);
- btree2 = dp->d_ops->node_tree_p(node2);
+ xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr1, node1);
+ xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr2, node2);
+ btree1 = nodehdr1.btree;
+ btree2 = nodehdr2.btree;
/*
* Figure out how many entries need to move, and in which direction.
@@ -804,10 +891,10 @@ xfs_da3_node_rebalance(
tmpnode = node1;
node1 = node2;
node2 = tmpnode;
- dp->d_ops->node_hdr_from_disk(&nodehdr1, node1);
- dp->d_ops->node_hdr_from_disk(&nodehdr2, node2);
- btree1 = dp->d_ops->node_tree_p(node1);
- btree2 = dp->d_ops->node_tree_p(node2);
+ xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr1, node1);
+ xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr2, node2);
+ btree1 = nodehdr1.btree;
+ btree2 = nodehdr2.btree;
swap = 1;
}
@@ -869,14 +956,15 @@ xfs_da3_node_rebalance(
/*
* Log header of node 1 and all current bits of node 2.
*/
- dp->d_ops->node_hdr_to_disk(node1, &nodehdr1);
+ xfs_da3_node_hdr_to_disk(dp->i_mount, node1, &nodehdr1);
xfs_trans_log_buf(tp, blk1->bp,
- XFS_DA_LOGRANGE(node1, &node1->hdr, dp->d_ops->node_hdr_size));
+ XFS_DA_LOGRANGE(node1, &node1->hdr,
+ state->args->geo->node_hdr_size));
- dp->d_ops->node_hdr_to_disk(node2, &nodehdr2);
+ xfs_da3_node_hdr_to_disk(dp->i_mount, node2, &nodehdr2);
xfs_trans_log_buf(tp, blk2->bp,
XFS_DA_LOGRANGE(node2, &node2->hdr,
- dp->d_ops->node_hdr_size +
+ state->args->geo->node_hdr_size +
(sizeof(btree2[0]) * nodehdr2.count)));
/*
@@ -886,10 +974,10 @@ xfs_da3_node_rebalance(
if (swap) {
node1 = blk1->bp->b_addr;
node2 = blk2->bp->b_addr;
- dp->d_ops->node_hdr_from_disk(&nodehdr1, node1);
- dp->d_ops->node_hdr_from_disk(&nodehdr2, node2);
- btree1 = dp->d_ops->node_tree_p(node1);
- btree2 = dp->d_ops->node_tree_p(node2);
+ xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr1, node1);
+ xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr2, node2);
+ btree1 = nodehdr1.btree;
+ btree2 = nodehdr2.btree;
}
blk1->hashval = be32_to_cpu(btree1[nodehdr1.count - 1].hashval);
blk2->hashval = be32_to_cpu(btree2[nodehdr2.count - 1].hashval);
@@ -921,8 +1009,8 @@ xfs_da3_node_add(
trace_xfs_da_node_add(state->args);
node = oldblk->bp->b_addr;
- dp->d_ops->node_hdr_from_disk(&nodehdr, node);
- btree = dp->d_ops->node_tree_p(node);
+ xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr, node);
+ btree = nodehdr.btree;
ASSERT(oldblk->index >= 0 && oldblk->index <= nodehdr.count);
ASSERT(newblk->blkno != 0);
@@ -945,9 +1033,10 @@ xfs_da3_node_add(
tmp + sizeof(*btree)));
nodehdr.count += 1;
- dp->d_ops->node_hdr_to_disk(node, &nodehdr);
+ xfs_da3_node_hdr_to_disk(dp->i_mount, node, &nodehdr);
xfs_trans_log_buf(state->args->trans, oldblk->bp,
- XFS_DA_LOGRANGE(node, &node->hdr, dp->d_ops->node_hdr_size));
+ XFS_DA_LOGRANGE(node, &node->hdr,
+ state->args->geo->node_hdr_size));
/*
* Copy the last hash value from the oldblk to propagate upwards.
@@ -1082,7 +1171,6 @@ xfs_da3_root_join(
xfs_dablk_t child;
struct xfs_buf *bp;
struct xfs_da3_icnode_hdr oldroothdr;
- struct xfs_da_node_entry *btree;
int error;
struct xfs_inode *dp = state->args->dp;
@@ -1092,7 +1180,7 @@ xfs_da3_root_join(
args = state->args;
oldroot = root_blk->bp->b_addr;
- dp->d_ops->node_hdr_from_disk(&oldroothdr, oldroot);
+ xfs_da3_node_hdr_from_disk(dp->i_mount, &oldroothdr, oldroot);
ASSERT(oldroothdr.forw == 0);
ASSERT(oldroothdr.back == 0);
@@ -1106,11 +1194,9 @@ xfs_da3_root_join(
* Read in the (only) child block, then copy those bytes into
* the root block's buffer and free the original child block.
*/
- btree = dp->d_ops->node_tree_p(oldroot);
- child = be32_to_cpu(btree[0].before);
+ child = be32_to_cpu(oldroothdr.btree[0].before);
ASSERT(child != 0);
- error = xfs_da3_node_read(args->trans, dp, child, -1, &bp,
- args->whichfork);
+ error = xfs_da3_node_read(args->trans, dp, child, &bp, args->whichfork);
if (error)
return error;
xfs_da_blkinfo_onlychild_validate(bp->b_addr, oldroothdr.level);
@@ -1172,7 +1258,7 @@ xfs_da3_node_toosmall(
blk = &state->path.blk[ state->path.active-1 ];
info = blk->bp->b_addr;
node = (xfs_da_intnode_t *)info;
- dp->d_ops->node_hdr_from_disk(&nodehdr, node);
+ xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr, node);
if (nodehdr.count > (state->args->geo->node_ents >> 1)) {
*action = 0; /* blk over 50%, don't try to join */
return 0; /* blk over 50%, don't try to join */
@@ -1224,13 +1310,13 @@ xfs_da3_node_toosmall(
blkno = nodehdr.back;
if (blkno == 0)
continue;
- error = xfs_da3_node_read(state->args->trans, dp,
- blkno, -1, &bp, state->args->whichfork);
+ error = xfs_da3_node_read(state->args->trans, dp, blkno, &bp,
+ state->args->whichfork);
if (error)
return error;
node = bp->b_addr;
- dp->d_ops->node_hdr_from_disk(&thdr, node);
+ xfs_da3_node_hdr_from_disk(dp->i_mount, &thdr, node);
xfs_trans_brelse(state->args->trans, bp);
if (count - thdr.count >= 0)
@@ -1272,18 +1358,14 @@ xfs_da3_node_lasthash(
struct xfs_buf *bp,
int *count)
{
- struct xfs_da_intnode *node;
- struct xfs_da_node_entry *btree;
struct xfs_da3_icnode_hdr nodehdr;
- node = bp->b_addr;
- dp->d_ops->node_hdr_from_disk(&nodehdr, node);
+ xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr, bp->b_addr);
if (count)
*count = nodehdr.count;
if (!nodehdr.count)
return 0;
- btree = dp->d_ops->node_tree_p(node);
- return be32_to_cpu(btree[nodehdr.count - 1].hashval);
+ return be32_to_cpu(nodehdr.btree[nodehdr.count - 1].hashval);
}
/*
@@ -1328,8 +1410,8 @@ xfs_da3_fixhashpath(
struct xfs_da3_icnode_hdr nodehdr;
node = blk->bp->b_addr;
- dp->d_ops->node_hdr_from_disk(&nodehdr, node);
- btree = dp->d_ops->node_tree_p(node);
+ xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr, node);
+ btree = nodehdr.btree;
if (be32_to_cpu(btree[blk->index].hashval) == lasthash)
break;
blk->hashval = lasthash;
@@ -1360,7 +1442,7 @@ xfs_da3_node_remove(
trace_xfs_da_node_remove(state->args);
node = drop_blk->bp->b_addr;
- dp->d_ops->node_hdr_from_disk(&nodehdr, node);
+ xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr, node);
ASSERT(drop_blk->index < nodehdr.count);
ASSERT(drop_blk->index >= 0);
@@ -1368,7 +1450,7 @@ xfs_da3_node_remove(
* Copy over the offending entry, or just zero it out.
*/
index = drop_blk->index;
- btree = dp->d_ops->node_tree_p(node);
+ btree = nodehdr.btree;
if (index < nodehdr.count - 1) {
tmp = nodehdr.count - index - 1;
tmp *= (uint)sizeof(xfs_da_node_entry_t);
@@ -1381,9 +1463,9 @@ xfs_da3_node_remove(
xfs_trans_log_buf(state->args->trans, drop_blk->bp,
XFS_DA_LOGRANGE(node, &btree[index], sizeof(btree[index])));
nodehdr.count -= 1;
- dp->d_ops->node_hdr_to_disk(node, &nodehdr);
+ xfs_da3_node_hdr_to_disk(dp->i_mount, node, &nodehdr);
xfs_trans_log_buf(state->args->trans, drop_blk->bp,
- XFS_DA_LOGRANGE(node, &node->hdr, dp->d_ops->node_hdr_size));
+ XFS_DA_LOGRANGE(node, &node->hdr, state->args->geo->node_hdr_size));
/*
* Copy the last hash value from the block to propagate upwards.
@@ -1416,10 +1498,10 @@ xfs_da3_node_unbalance(
drop_node = drop_blk->bp->b_addr;
save_node = save_blk->bp->b_addr;
- dp->d_ops->node_hdr_from_disk(&drop_hdr, drop_node);
- dp->d_ops->node_hdr_from_disk(&save_hdr, save_node);
- drop_btree = dp->d_ops->node_tree_p(drop_node);
- save_btree = dp->d_ops->node_tree_p(save_node);
+ xfs_da3_node_hdr_from_disk(dp->i_mount, &drop_hdr, drop_node);
+ xfs_da3_node_hdr_from_disk(dp->i_mount, &save_hdr, save_node);
+ drop_btree = drop_hdr.btree;
+ save_btree = save_hdr.btree;
tp = state->args->trans;
/*
@@ -1453,10 +1535,10 @@ xfs_da3_node_unbalance(
memcpy(&save_btree[sindex], &drop_btree[0], tmp);
save_hdr.count += drop_hdr.count;
- dp->d_ops->node_hdr_to_disk(save_node, &save_hdr);
+ xfs_da3_node_hdr_to_disk(dp->i_mount, save_node, &save_hdr);
xfs_trans_log_buf(tp, save_blk->bp,
XFS_DA_LOGRANGE(save_node, &save_node->hdr,
- dp->d_ops->node_hdr_size));
+ state->args->geo->node_hdr_size));
/*
* Save the last hashval in the remaining block for upward propagation.
@@ -1517,7 +1599,7 @@ xfs_da3_node_lookup_int(
*/
blk->blkno = blkno;
error = xfs_da3_node_read(args->trans, args->dp, blkno,
- -1, &blk->bp, args->whichfork);
+ &blk->bp, args->whichfork);
if (error) {
blk->blkno = 0;
state->path.active--;
@@ -1541,8 +1623,10 @@ xfs_da3_node_lookup_int(
break;
}
- if (magic != XFS_DA_NODE_MAGIC && magic != XFS_DA3_NODE_MAGIC)
+ if (magic != XFS_DA_NODE_MAGIC && magic != XFS_DA3_NODE_MAGIC) {
+ xfs_buf_corruption_error(blk->bp);
return -EFSCORRUPTED;
+ }
blk->magic = XFS_DA_NODE_MAGIC;
@@ -1550,19 +1634,22 @@ xfs_da3_node_lookup_int(
* Search an intermediate node for a match.
*/
node = blk->bp->b_addr;
- dp->d_ops->node_hdr_from_disk(&nodehdr, node);
- btree = dp->d_ops->node_tree_p(node);
+ xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr, node);
+ btree = nodehdr.btree;
/* Tree taller than we can handle; bail out! */
- if (nodehdr.level >= XFS_DA_NODE_MAXDEPTH)
+ if (nodehdr.level >= XFS_DA_NODE_MAXDEPTH) {
+ xfs_buf_corruption_error(blk->bp);
return -EFSCORRUPTED;
+ }
/* Check the level from the root. */
if (blkno == args->geo->leafblk)
expected_level = nodehdr.level - 1;
- else if (expected_level != nodehdr.level)
+ else if (expected_level != nodehdr.level) {
+ xfs_buf_corruption_error(blk->bp);
return -EFSCORRUPTED;
- else
+ } else
expected_level--;
max = nodehdr.count;
@@ -1612,11 +1699,11 @@ xfs_da3_node_lookup_int(
}
/* We can't point back to the root. */
- if (blkno == args->geo->leafblk)
+ if (XFS_IS_CORRUPT(dp->i_mount, blkno == args->geo->leafblk))
return -EFSCORRUPTED;
}
- if (expected_level != 0)
+ if (XFS_IS_CORRUPT(dp->i_mount, expected_level != 0))
return -EFSCORRUPTED;
/*
@@ -1678,10 +1765,10 @@ xfs_da3_node_order(
node1 = node1_bp->b_addr;
node2 = node2_bp->b_addr;
- dp->d_ops->node_hdr_from_disk(&node1hdr, node1);
- dp->d_ops->node_hdr_from_disk(&node2hdr, node2);
- btree1 = dp->d_ops->node_tree_p(node1);
- btree2 = dp->d_ops->node_tree_p(node2);
+ xfs_da3_node_hdr_from_disk(dp->i_mount, &node1hdr, node1);
+ xfs_da3_node_hdr_from_disk(dp->i_mount, &node2hdr, node2);
+ btree1 = node1hdr.btree;
+ btree2 = node2hdr.btree;
if (node1hdr.count > 0 && node2hdr.count > 0 &&
((be32_to_cpu(btree2[0].hashval) < be32_to_cpu(btree1[0].hashval)) ||
@@ -1746,7 +1833,7 @@ xfs_da3_blk_link(
if (old_info->back) {
error = xfs_da3_node_read(args->trans, dp,
be32_to_cpu(old_info->back),
- -1, &bp, args->whichfork);
+ &bp, args->whichfork);
if (error)
return error;
ASSERT(bp != NULL);
@@ -1767,7 +1854,7 @@ xfs_da3_blk_link(
if (old_info->forw) {
error = xfs_da3_node_read(args->trans, dp,
be32_to_cpu(old_info->forw),
- -1, &bp, args->whichfork);
+ &bp, args->whichfork);
if (error)
return error;
ASSERT(bp != NULL);
@@ -1826,7 +1913,7 @@ xfs_da3_blk_unlink(
if (drop_info->back) {
error = xfs_da3_node_read(args->trans, args->dp,
be32_to_cpu(drop_info->back),
- -1, &bp, args->whichfork);
+ &bp, args->whichfork);
if (error)
return error;
ASSERT(bp != NULL);
@@ -1843,7 +1930,7 @@ xfs_da3_blk_unlink(
if (drop_info->forw) {
error = xfs_da3_node_read(args->trans, args->dp,
be32_to_cpu(drop_info->forw),
- -1, &bp, args->whichfork);
+ &bp, args->whichfork);
if (error)
return error;
ASSERT(bp != NULL);
@@ -1878,7 +1965,6 @@ xfs_da3_path_shift(
{
struct xfs_da_state_blk *blk;
struct xfs_da_blkinfo *info;
- struct xfs_da_intnode *node;
struct xfs_da_args *args;
struct xfs_da_node_entry *btree;
struct xfs_da3_icnode_hdr nodehdr;
@@ -1901,17 +1987,16 @@ xfs_da3_path_shift(
ASSERT((path->active > 0) && (path->active < XFS_DA_NODE_MAXDEPTH));
level = (path->active-1) - 1; /* skip bottom layer in path */
for (blk = &path->blk[level]; level >= 0; blk--, level--) {
- node = blk->bp->b_addr;
- dp->d_ops->node_hdr_from_disk(&nodehdr, node);
- btree = dp->d_ops->node_tree_p(node);
+ xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr,
+ blk->bp->b_addr);
if (forward && (blk->index < nodehdr.count - 1)) {
blk->index++;
- blkno = be32_to_cpu(btree[blk->index].before);
+ blkno = be32_to_cpu(nodehdr.btree[blk->index].before);
break;
} else if (!forward && (blk->index > 0)) {
blk->index--;
- blkno = be32_to_cpu(btree[blk->index].before);
+ blkno = be32_to_cpu(nodehdr.btree[blk->index].before);
break;
}
}
@@ -1929,7 +2014,7 @@ xfs_da3_path_shift(
/*
* Read the next child block into a local buffer.
*/
- error = xfs_da3_node_read(args->trans, dp, blkno, -1, &bp,
+ error = xfs_da3_node_read(args->trans, dp, blkno, &bp,
args->whichfork);
if (error)
return error;
@@ -1962,9 +2047,9 @@ xfs_da3_path_shift(
case XFS_DA_NODE_MAGIC:
case XFS_DA3_NODE_MAGIC:
blk->magic = XFS_DA_NODE_MAGIC;
- node = (xfs_da_intnode_t *)info;
- dp->d_ops->node_hdr_from_disk(&nodehdr, node);
- btree = dp->d_ops->node_tree_p(node);
+ xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr,
+ bp->b_addr);
+ btree = nodehdr.btree;
blk->hashval = be32_to_cpu(btree[nodehdr.count - 1].hashval);
if (forward)
blk->index = 0;
@@ -2044,18 +2129,6 @@ xfs_da_compname(
XFS_CMP_EXACT : XFS_CMP_DIFFERENT;
}
-static xfs_dahash_t
-xfs_default_hashname(
- struct xfs_name *name)
-{
- return xfs_da_hashname(name->name, name->len);
-}
-
-const struct xfs_nameops xfs_default_nameops = {
- .hashname = xfs_default_hashname,
- .compname = xfs_da_compname
-};
-
int
xfs_da_grow_inode_int(
struct xfs_da_args *args,
@@ -2213,16 +2286,13 @@ xfs_da3_swap_lastblock(
error = xfs_bmap_last_before(tp, dp, &lastoff, w);
if (error)
return error;
- if (unlikely(lastoff == 0)) {
- XFS_ERROR_REPORT("xfs_da_swap_lastblock(1)", XFS_ERRLEVEL_LOW,
- mp);
+ if (XFS_IS_CORRUPT(mp, lastoff == 0))
return -EFSCORRUPTED;
- }
/*
* Read the last block in the btree space.
*/
last_blkno = (xfs_dablk_t)lastoff - args->geo->fsbcount;
- error = xfs_da3_node_read(tp, dp, last_blkno, -1, &last_buf, w);
+ error = xfs_da3_node_read(tp, dp, last_blkno, &last_buf, w);
if (error)
return error;
/*
@@ -2240,16 +2310,17 @@ xfs_da3_swap_lastblock(
struct xfs_dir2_leaf_entry *ents;
dead_leaf2 = (xfs_dir2_leaf_t *)dead_info;
- dp->d_ops->leaf_hdr_from_disk(&leafhdr, dead_leaf2);
- ents = dp->d_ops->leaf_ents_p(dead_leaf2);
+ xfs_dir2_leaf_hdr_from_disk(dp->i_mount, &leafhdr,
+ dead_leaf2);
+ ents = leafhdr.ents;
dead_level = 0;
dead_hash = be32_to_cpu(ents[leafhdr.count - 1].hashval);
} else {
struct xfs_da3_icnode_hdr deadhdr;
dead_node = (xfs_da_intnode_t *)dead_info;
- dp->d_ops->node_hdr_from_disk(&deadhdr, dead_node);
- btree = dp->d_ops->node_tree_p(dead_node);
+ xfs_da3_node_hdr_from_disk(dp->i_mount, &deadhdr, dead_node);
+ btree = deadhdr.btree;
dead_level = deadhdr.level;
dead_hash = be32_to_cpu(btree[deadhdr.count - 1].hashval);
}
@@ -2258,15 +2329,13 @@ xfs_da3_swap_lastblock(
* If the moved block has a left sibling, fix up the pointers.
*/
if ((sib_blkno = be32_to_cpu(dead_info->back))) {
- error = xfs_da3_node_read(tp, dp, sib_blkno, -1, &sib_buf, w);
+ error = xfs_da3_node_read(tp, dp, sib_blkno, &sib_buf, w);
if (error)
goto done;
sib_info = sib_buf->b_addr;
- if (unlikely(
- be32_to_cpu(sib_info->forw) != last_blkno ||
- sib_info->magic != dead_info->magic)) {
- XFS_ERROR_REPORT("xfs_da_swap_lastblock(2)",
- XFS_ERRLEVEL_LOW, mp);
+ if (XFS_IS_CORRUPT(mp,
+ be32_to_cpu(sib_info->forw) != last_blkno ||
+ sib_info->magic != dead_info->magic)) {
error = -EFSCORRUPTED;
goto done;
}
@@ -2280,15 +2349,13 @@ xfs_da3_swap_lastblock(
* If the moved block has a right sibling, fix up the pointers.
*/
if ((sib_blkno = be32_to_cpu(dead_info->forw))) {
- error = xfs_da3_node_read(tp, dp, sib_blkno, -1, &sib_buf, w);
+ error = xfs_da3_node_read(tp, dp, sib_blkno, &sib_buf, w);
if (error)
goto done;
sib_info = sib_buf->b_addr;
- if (unlikely(
- be32_to_cpu(sib_info->back) != last_blkno ||
- sib_info->magic != dead_info->magic)) {
- XFS_ERROR_REPORT("xfs_da_swap_lastblock(3)",
- XFS_ERRLEVEL_LOW, mp);
+ if (XFS_IS_CORRUPT(mp,
+ be32_to_cpu(sib_info->back) != last_blkno ||
+ sib_info->magic != dead_info->magic)) {
error = -EFSCORRUPTED;
goto done;
}
@@ -2304,27 +2371,24 @@ xfs_da3_swap_lastblock(
* Walk down the tree looking for the parent of the moved block.
*/
for (;;) {
- error = xfs_da3_node_read(tp, dp, par_blkno, -1, &par_buf, w);
+ error = xfs_da3_node_read(tp, dp, par_blkno, &par_buf, w);
if (error)
goto done;
par_node = par_buf->b_addr;
- dp->d_ops->node_hdr_from_disk(&par_hdr, par_node);
- if (level >= 0 && level != par_hdr.level + 1) {
- XFS_ERROR_REPORT("xfs_da_swap_lastblock(4)",
- XFS_ERRLEVEL_LOW, mp);
+ xfs_da3_node_hdr_from_disk(dp->i_mount, &par_hdr, par_node);
+ if (XFS_IS_CORRUPT(mp,
+ level >= 0 && level != par_hdr.level + 1)) {
error = -EFSCORRUPTED;
goto done;
}
level = par_hdr.level;
- btree = dp->d_ops->node_tree_p(par_node);
+ btree = par_hdr.btree;
for (entno = 0;
entno < par_hdr.count &&
be32_to_cpu(btree[entno].hashval) < dead_hash;
entno++)
continue;
- if (entno == par_hdr.count) {
- XFS_ERROR_REPORT("xfs_da_swap_lastblock(5)",
- XFS_ERRLEVEL_LOW, mp);
+ if (XFS_IS_CORRUPT(mp, entno == par_hdr.count)) {
error = -EFSCORRUPTED;
goto done;
}
@@ -2349,24 +2413,20 @@ xfs_da3_swap_lastblock(
par_blkno = par_hdr.forw;
xfs_trans_brelse(tp, par_buf);
par_buf = NULL;
- if (unlikely(par_blkno == 0)) {
- XFS_ERROR_REPORT("xfs_da_swap_lastblock(6)",
- XFS_ERRLEVEL_LOW, mp);
+ if (XFS_IS_CORRUPT(mp, par_blkno == 0)) {
error = -EFSCORRUPTED;
goto done;
}
- error = xfs_da3_node_read(tp, dp, par_blkno, -1, &par_buf, w);
+ error = xfs_da3_node_read(tp, dp, par_blkno, &par_buf, w);
if (error)
goto done;
par_node = par_buf->b_addr;
- dp->d_ops->node_hdr_from_disk(&par_hdr, par_node);
- if (par_hdr.level != level) {
- XFS_ERROR_REPORT("xfs_da_swap_lastblock(7)",
- XFS_ERRLEVEL_LOW, mp);
+ xfs_da3_node_hdr_from_disk(dp->i_mount, &par_hdr, par_node);
+ if (XFS_IS_CORRUPT(mp, par_hdr.level != level)) {
error = -EFSCORRUPTED;
goto done;
}
- btree = dp->d_ops->node_tree_p(par_node);
+ btree = par_hdr.btree;
entno = 0;
}
/*
@@ -2429,159 +2489,84 @@ xfs_da_shrink_inode(
return error;
}
-/*
- * See if the mapping(s) for this btree block are valid, i.e.
- * don't contain holes, are logically contiguous, and cover the whole range.
- */
-STATIC int
-xfs_da_map_covers_blocks(
- int nmap,
- xfs_bmbt_irec_t *mapp,
- xfs_dablk_t bno,
- int count)
-{
- int i;
- xfs_fileoff_t off;
-
- for (i = 0, off = bno; i < nmap; i++) {
- if (mapp[i].br_startblock == HOLESTARTBLOCK ||
- mapp[i].br_startblock == DELAYSTARTBLOCK) {
- return 0;
- }
- if (off != mapp[i].br_startoff) {
- return 0;
- }
- off += mapp[i].br_blockcount;
- }
- return off == bno + count;
-}
-
-/*
- * Convert a struct xfs_bmbt_irec to a struct xfs_buf_map.
- *
- * For the single map case, it is assumed that the caller has provided a pointer
- * to a valid xfs_buf_map. For the multiple map case, this function will
- * allocate the xfs_buf_map to hold all the maps and replace the caller's single
- * map pointer with the allocated map.
- */
static int
-xfs_buf_map_from_irec(
- struct xfs_mount *mp,
+xfs_dabuf_map(
+ struct xfs_inode *dp,
+ xfs_dablk_t bno,
+ unsigned int flags,
+ int whichfork,
struct xfs_buf_map **mapp,
- int *nmaps,
- struct xfs_bmbt_irec *irecs,
- int nirecs)
+ int *nmaps)
{
- struct xfs_buf_map *map;
- int i;
-
- ASSERT(*nmaps == 1);
- ASSERT(nirecs >= 1);
+ struct xfs_mount *mp = dp->i_mount;
+ int nfsb = xfs_dabuf_nfsb(mp, whichfork);
+ struct xfs_bmbt_irec irec, *irecs = &irec;
+ struct xfs_buf_map *map = *mapp;
+ xfs_fileoff_t off = bno;
+ int error = 0, nirecs, i;
+
+ if (nfsb > 1)
+ irecs = kmem_zalloc(sizeof(irec) * nfsb, KM_NOFS);
+
+ nirecs = nfsb;
+ error = xfs_bmapi_read(dp, bno, nfsb, irecs, &nirecs,
+ xfs_bmapi_aflag(whichfork));
+ if (error)
+ goto out_free_irecs;
+ /*
+ * Use the caller provided map for the single map case, else allocate a
+ * larger one that needs to be free by the caller.
+ */
if (nirecs > 1) {
- map = kmem_zalloc(nirecs * sizeof(struct xfs_buf_map),
- KM_NOFS);
+ map = kmem_zalloc(nirecs * sizeof(struct xfs_buf_map), KM_NOFS);
if (!map)
- return -ENOMEM;
+ goto out_free_irecs;
*mapp = map;
}
- *nmaps = nirecs;
- map = *mapp;
- for (i = 0; i < *nmaps; i++) {
- ASSERT(irecs[i].br_startblock != DELAYSTARTBLOCK &&
- irecs[i].br_startblock != HOLESTARTBLOCK);
+ for (i = 0; i < nirecs; i++) {
+ if (irecs[i].br_startblock == HOLESTARTBLOCK ||
+ irecs[i].br_startblock == DELAYSTARTBLOCK)
+ goto invalid_mapping;
+ if (off != irecs[i].br_startoff)
+ goto invalid_mapping;
+
map[i].bm_bn = XFS_FSB_TO_DADDR(mp, irecs[i].br_startblock);
map[i].bm_len = XFS_FSB_TO_BB(mp, irecs[i].br_blockcount);
+ off += irecs[i].br_blockcount;
}
- return 0;
-}
-
-/*
- * Map the block we are given ready for reading. There are three possible return
- * values:
- * -1 - will be returned if we land in a hole and mappedbno == -2 so the
- * caller knows not to execute a subsequent read.
- * 0 - if we mapped the block successfully
- * >0 - positive error number if there was an error.
- */
-static int
-xfs_dabuf_map(
- struct xfs_inode *dp,
- xfs_dablk_t bno,
- xfs_daddr_t mappedbno,
- int whichfork,
- struct xfs_buf_map **map,
- int *nmaps)
-{
- struct xfs_mount *mp = dp->i_mount;
- int nfsb;
- int error = 0;
- struct xfs_bmbt_irec irec;
- struct xfs_bmbt_irec *irecs = &irec;
- int nirecs;
- ASSERT(map && *map);
- ASSERT(*nmaps == 1);
+ if (off != bno + nfsb)
+ goto invalid_mapping;
- if (whichfork == XFS_DATA_FORK)
- nfsb = mp->m_dir_geo->fsbcount;
- else
- nfsb = mp->m_attr_geo->fsbcount;
-
- /*
- * Caller doesn't have a mapping. -2 means don't complain
- * if we land in a hole.
- */
- if (mappedbno == -1 || mappedbno == -2) {
- /*
- * Optimize the one-block case.
- */
- if (nfsb != 1)
- irecs = kmem_zalloc(sizeof(irec) * nfsb,
- KM_NOFS);
+ *nmaps = nirecs;
+out_free_irecs:
+ if (irecs != &irec)
+ kmem_free(irecs);
+ return error;
- nirecs = nfsb;
- error = xfs_bmapi_read(dp, (xfs_fileoff_t)bno, nfsb, irecs,
- &nirecs, xfs_bmapi_aflag(whichfork));
- if (error)
- goto out;
- } else {
- irecs->br_startblock = XFS_DADDR_TO_FSB(mp, mappedbno);
- irecs->br_startoff = (xfs_fileoff_t)bno;
- irecs->br_blockcount = nfsb;
- irecs->br_state = 0;
- nirecs = 1;
- }
+invalid_mapping:
+ /* Caller ok with no mapping. */
+ if (XFS_IS_CORRUPT(mp, !(flags & XFS_DABUF_MAP_HOLE_OK))) {
+ error = -EFSCORRUPTED;
+ if (xfs_error_level >= XFS_ERRLEVEL_LOW) {
+ xfs_alert(mp, "%s: bno %u inode %llu",
+ __func__, bno, dp->i_ino);
- if (!xfs_da_map_covers_blocks(nirecs, irecs, bno, nfsb)) {
- error = mappedbno == -2 ? -1 : -EFSCORRUPTED;
- if (unlikely(error == -EFSCORRUPTED)) {
- if (xfs_error_level >= XFS_ERRLEVEL_LOW) {
- int i;
- xfs_alert(mp, "%s: bno %lld dir: inode %lld",
- __func__, (long long)bno,
- (long long)dp->i_ino);
- for (i = 0; i < *nmaps; i++) {
- xfs_alert(mp,
+ for (i = 0; i < nirecs; i++) {
+ xfs_alert(mp,
"[%02d] br_startoff %lld br_startblock %lld br_blockcount %lld br_state %d",
- i,
- (long long)irecs[i].br_startoff,
- (long long)irecs[i].br_startblock,
- (long long)irecs[i].br_blockcount,
- irecs[i].br_state);
- }
+ i, irecs[i].br_startoff,
+ irecs[i].br_startblock,
+ irecs[i].br_blockcount,
+ irecs[i].br_state);
}
- XFS_ERROR_REPORT("xfs_da_do_buf(1)",
- XFS_ERRLEVEL_LOW, mp);
}
- goto out;
+ } else {
+ *nmaps = 0;
}
- error = xfs_buf_map_from_irec(mp, map, nmaps, irecs, nirecs);
-out:
- if (irecs != &irec)
- kmem_free(irecs);
- return error;
+ goto out_free_irecs;
}
/*
@@ -2589,37 +2574,28 @@ out:
*/
int
xfs_da_get_buf(
- struct xfs_trans *trans,
+ struct xfs_trans *tp,
struct xfs_inode *dp,
xfs_dablk_t bno,
- xfs_daddr_t mappedbno,
struct xfs_buf **bpp,
int whichfork)
{
+ struct xfs_mount *mp = dp->i_mount;
struct xfs_buf *bp;
- struct xfs_buf_map map;
- struct xfs_buf_map *mapp;
- int nmap;
+ struct xfs_buf_map map, *mapp = &map;
+ int nmap = 1;
int error;
*bpp = NULL;
- mapp = &map;
- nmap = 1;
- error = xfs_dabuf_map(dp, bno, mappedbno, whichfork,
- &mapp, &nmap);
- if (error) {
- /* mapping a hole is not an error, but we don't continue */
- if (error == -1)
- error = 0;
+ error = xfs_dabuf_map(dp, bno, 0, whichfork, &mapp, &nmap);
+ if (error || nmap == 0)
goto out_free;
- }
- bp = xfs_trans_get_buf_map(trans, dp->i_mount->m_ddev_targp,
- mapp, nmap, 0);
+ bp = xfs_trans_get_buf_map(tp, mp->m_ddev_targp, mapp, nmap, 0);
error = bp ? bp->b_error : -EIO;
if (error) {
if (bp)
- xfs_trans_brelse(trans, bp);
+ xfs_trans_brelse(tp, bp);
goto out_free;
}
@@ -2637,35 +2613,27 @@ out_free:
*/
int
xfs_da_read_buf(
- struct xfs_trans *trans,
+ struct xfs_trans *tp,
struct xfs_inode *dp,
xfs_dablk_t bno,
- xfs_daddr_t mappedbno,
+ unsigned int flags,
struct xfs_buf **bpp,
int whichfork,
const struct xfs_buf_ops *ops)
{
+ struct xfs_mount *mp = dp->i_mount;
struct xfs_buf *bp;
- struct xfs_buf_map map;
- struct xfs_buf_map *mapp;
- int nmap;
+ struct xfs_buf_map map, *mapp = &map;
+ int nmap = 1;
int error;
*bpp = NULL;
- mapp = &map;
- nmap = 1;
- error = xfs_dabuf_map(dp, bno, mappedbno, whichfork,
- &mapp, &nmap);
- if (error) {
- /* mapping a hole is not an error, but we don't continue */
- if (error == -1)
- error = 0;
+ error = xfs_dabuf_map(dp, bno, flags, whichfork, &mapp, &nmap);
+ if (error || !nmap)
goto out_free;
- }
- error = xfs_trans_read_buf_map(dp->i_mount, trans,
- dp->i_mount->m_ddev_targp,
- mapp, nmap, 0, &bp, ops);
+ error = xfs_trans_read_buf_map(mp, tp, mp->m_ddev_targp, mapp, nmap, 0,
+ &bp, ops);
if (error)
goto out_free;
@@ -2688,7 +2656,7 @@ int
xfs_da_reada_buf(
struct xfs_inode *dp,
xfs_dablk_t bno,
- xfs_daddr_t mappedbno,
+ unsigned int flags,
int whichfork,
const struct xfs_buf_ops *ops)
{
@@ -2699,16 +2667,10 @@ xfs_da_reada_buf(
mapp = &map;
nmap = 1;
- error = xfs_dabuf_map(dp, bno, mappedbno, whichfork,
- &mapp, &nmap);
- if (error) {
- /* mapping a hole is not an error, but we don't continue */
- if (error == -1)
- error = 0;
+ error = xfs_dabuf_map(dp, bno, flags, whichfork, &mapp, &nmap);
+ if (error || !nmap)
goto out_free;
- }
- mappedbno = mapp[0].bm_bn;
xfs_buf_readahead_map(dp->i_mount->m_ddev_targp, mapp, nmap, ops);
out_free:
diff --git a/fs/xfs/libxfs/xfs_da_btree.h b/fs/xfs/libxfs/xfs_da_btree.h
index ae0bbd20d9ca..e16610d1c14f 100644
--- a/fs/xfs/libxfs/xfs_da_btree.h
+++ b/fs/xfs/libxfs/xfs_da_btree.h
@@ -10,7 +10,6 @@
struct xfs_inode;
struct xfs_trans;
struct zone;
-struct xfs_dir_ops;
/*
* Directory/attribute geometry information. There will be one of these for each
@@ -18,15 +17,23 @@ struct xfs_dir_ops;
* structures will be attached to the xfs_mount.
*/
struct xfs_da_geometry {
- int blksize; /* da block size in bytes */
- int fsbcount; /* da block size in filesystem blocks */
+ unsigned int blksize; /* da block size in bytes */
+ unsigned int fsbcount; /* da block size in filesystem blocks */
uint8_t fsblog; /* log2 of _filesystem_ block size */
uint8_t blklog; /* log2 of da block size */
- uint node_ents; /* # of entries in a danode */
- int magicpct; /* 37% of block size in bytes */
+ unsigned int node_hdr_size; /* danode header size in bytes */
+ unsigned int node_ents; /* # of entries in a danode */
+ unsigned int magicpct; /* 37% of block size in bytes */
xfs_dablk_t datablk; /* blockno of dir data v2 */
+ unsigned int leaf_hdr_size; /* dir2 leaf header size */
+ unsigned int leaf_max_ents; /* # of entries in dir2 leaf */
xfs_dablk_t leafblk; /* blockno of leaf data v2 */
+ unsigned int free_hdr_size; /* dir2 free header size */
+ unsigned int free_max_bests; /* # of bests entries in dir2 free */
xfs_dablk_t freeblk; /* blockno of free data v2 */
+
+ xfs_dir2_data_aoff_t data_first_offset;
+ size_t data_entry_offset;
};
/*========================================================================
@@ -125,6 +132,25 @@ typedef struct xfs_da_state {
} xfs_da_state_t;
/*
+ * In-core version of the node header to abstract the differences in the v2 and
+ * v3 disk format of the headers. Callers need to convert to/from disk format as
+ * appropriate.
+ */
+struct xfs_da3_icnode_hdr {
+ uint32_t forw;
+ uint32_t back;
+ uint16_t magic;
+ uint16_t count;
+ uint16_t level;
+
+ /*
+ * Pointer to the on-disk format entries, which are behind the
+ * variable size (v4 vs v5) header in the on-disk block.
+ */
+ struct xfs_da_node_entry *btree;
+};
+
+/*
* Utility macros to aid in logging changed structure fields.
*/
#define XFS_DA_LOGOFF(BASE, ADDR) ((char *)(ADDR) - (char *)(BASE))
@@ -132,16 +158,6 @@ typedef struct xfs_da_state {
(uint)(XFS_DA_LOGOFF(BASE, ADDR)), \
(uint)(XFS_DA_LOGOFF(BASE, ADDR)+(SIZE)-1)
-/*
- * Name ops for directory and/or attr name operations
- */
-struct xfs_nameops {
- xfs_dahash_t (*hashname)(struct xfs_name *);
- enum xfs_dacmp (*compname)(struct xfs_da_args *,
- const unsigned char *, int);
-};
-
-
/*========================================================================
* Function prototypes.
*========================================================================*/
@@ -172,25 +188,28 @@ int xfs_da3_path_shift(xfs_da_state_t *state, xfs_da_state_path_t *path,
int xfs_da3_blk_link(xfs_da_state_t *state, xfs_da_state_blk_t *old_blk,
xfs_da_state_blk_t *new_blk);
int xfs_da3_node_read(struct xfs_trans *tp, struct xfs_inode *dp,
- xfs_dablk_t bno, xfs_daddr_t mappedbno,
- struct xfs_buf **bpp, int which_fork);
+ xfs_dablk_t bno, struct xfs_buf **bpp, int whichfork);
+int xfs_da3_node_read_mapped(struct xfs_trans *tp, struct xfs_inode *dp,
+ xfs_daddr_t mappedbno, struct xfs_buf **bpp,
+ int whichfork);
/*
* Utility routines.
*/
+
+#define XFS_DABUF_MAP_HOLE_OK (1 << 0)
+
int xfs_da_grow_inode(xfs_da_args_t *args, xfs_dablk_t *new_blkno);
int xfs_da_grow_inode_int(struct xfs_da_args *args, xfs_fileoff_t *bno,
int count);
int xfs_da_get_buf(struct xfs_trans *trans, struct xfs_inode *dp,
- xfs_dablk_t bno, xfs_daddr_t mappedbno,
- struct xfs_buf **bp, int whichfork);
+ xfs_dablk_t bno, struct xfs_buf **bp, int whichfork);
int xfs_da_read_buf(struct xfs_trans *trans, struct xfs_inode *dp,
- xfs_dablk_t bno, xfs_daddr_t mappedbno,
- struct xfs_buf **bpp, int whichfork,
- const struct xfs_buf_ops *ops);
+ xfs_dablk_t bno, unsigned int flags, struct xfs_buf **bpp,
+ int whichfork, const struct xfs_buf_ops *ops);
int xfs_da_reada_buf(struct xfs_inode *dp, xfs_dablk_t bno,
- xfs_daddr_t mapped_bno, int whichfork,
- const struct xfs_buf_ops *ops);
+ unsigned int flags, int whichfork,
+ const struct xfs_buf_ops *ops);
int xfs_da_shrink_inode(xfs_da_args_t *args, xfs_dablk_t dead_blkno,
struct xfs_buf *dead_buf);
@@ -202,7 +221,11 @@ enum xfs_dacmp xfs_da_compname(struct xfs_da_args *args,
xfs_da_state_t *xfs_da_state_alloc(void);
void xfs_da_state_free(xfs_da_state_t *state);
+void xfs_da3_node_hdr_from_disk(struct xfs_mount *mp,
+ struct xfs_da3_icnode_hdr *to, struct xfs_da_intnode *from);
+void xfs_da3_node_hdr_to_disk(struct xfs_mount *mp,
+ struct xfs_da_intnode *to, struct xfs_da3_icnode_hdr *from);
+
extern struct kmem_zone *xfs_da_state_zone;
-extern const struct xfs_nameops xfs_default_nameops;
#endif /* __XFS_DA_BTREE_H__ */
diff --git a/fs/xfs/libxfs/xfs_da_format.c b/fs/xfs/libxfs/xfs_da_format.c
deleted file mode 100644
index b1ae572496b6..000000000000
--- a/fs/xfs/libxfs/xfs_da_format.c
+++ /dev/null
@@ -1,888 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (c) 2000,2002,2005 Silicon Graphics, Inc.
- * Copyright (c) 2013 Red Hat, Inc.
- * All Rights Reserved.
- */
-#include "xfs.h"
-#include "xfs_fs.h"
-#include "xfs_shared.h"
-#include "xfs_format.h"
-#include "xfs_log_format.h"
-#include "xfs_trans_resv.h"
-#include "xfs_mount.h"
-#include "xfs_inode.h"
-#include "xfs_dir2.h"
-
-/*
- * Shortform directory ops
- */
-static int
-xfs_dir2_sf_entsize(
- struct xfs_dir2_sf_hdr *hdr,
- int len)
-{
- int count = sizeof(struct xfs_dir2_sf_entry); /* namelen + offset */
-
- count += len; /* name */
- count += hdr->i8count ? XFS_INO64_SIZE : XFS_INO32_SIZE; /* ino # */
- return count;
-}
-
-static int
-xfs_dir3_sf_entsize(
- struct xfs_dir2_sf_hdr *hdr,
- int len)
-{
- return xfs_dir2_sf_entsize(hdr, len) + sizeof(uint8_t);
-}
-
-static struct xfs_dir2_sf_entry *
-xfs_dir2_sf_nextentry(
- struct xfs_dir2_sf_hdr *hdr,
- struct xfs_dir2_sf_entry *sfep)
-{
- return (struct xfs_dir2_sf_entry *)
- ((char *)sfep + xfs_dir2_sf_entsize(hdr, sfep->namelen));
-}
-
-static struct xfs_dir2_sf_entry *
-xfs_dir3_sf_nextentry(
- struct xfs_dir2_sf_hdr *hdr,
- struct xfs_dir2_sf_entry *sfep)
-{
- return (struct xfs_dir2_sf_entry *)
- ((char *)sfep + xfs_dir3_sf_entsize(hdr, sfep->namelen));
-}
-
-
-/*
- * For filetype enabled shortform directories, the file type field is stored at
- * the end of the name. Because it's only a single byte, endian conversion is
- * not necessary. For non-filetype enable directories, the type is always
- * unknown and we never store the value.
- */
-static uint8_t
-xfs_dir2_sfe_get_ftype(
- struct xfs_dir2_sf_entry *sfep)
-{
- return XFS_DIR3_FT_UNKNOWN;
-}
-
-static void
-xfs_dir2_sfe_put_ftype(
- struct xfs_dir2_sf_entry *sfep,
- uint8_t ftype)
-{
- ASSERT(ftype < XFS_DIR3_FT_MAX);
-}
-
-static uint8_t
-xfs_dir3_sfe_get_ftype(
- struct xfs_dir2_sf_entry *sfep)
-{
- uint8_t ftype;
-
- ftype = sfep->name[sfep->namelen];
- if (ftype >= XFS_DIR3_FT_MAX)
- return XFS_DIR3_FT_UNKNOWN;
- return ftype;
-}
-
-static void
-xfs_dir3_sfe_put_ftype(
- struct xfs_dir2_sf_entry *sfep,
- uint8_t ftype)
-{
- ASSERT(ftype < XFS_DIR3_FT_MAX);
-
- sfep->name[sfep->namelen] = ftype;
-}
-
-/*
- * Inode numbers in short-form directories can come in two versions,
- * either 4 bytes or 8 bytes wide. These helpers deal with the
- * two forms transparently by looking at the headers i8count field.
- *
- * For 64-bit inode number the most significant byte must be zero.
- */
-static xfs_ino_t
-xfs_dir2_sf_get_ino(
- struct xfs_dir2_sf_hdr *hdr,
- uint8_t *from)
-{
- if (hdr->i8count)
- return get_unaligned_be64(from) & 0x00ffffffffffffffULL;
- else
- return get_unaligned_be32(from);
-}
-
-static void
-xfs_dir2_sf_put_ino(
- struct xfs_dir2_sf_hdr *hdr,
- uint8_t *to,
- xfs_ino_t ino)
-{
- ASSERT((ino & 0xff00000000000000ULL) == 0);
-
- if (hdr->i8count)
- put_unaligned_be64(ino, to);
- else
- put_unaligned_be32(ino, to);
-}
-
-static xfs_ino_t
-xfs_dir2_sf_get_parent_ino(
- struct xfs_dir2_sf_hdr *hdr)
-{
- return xfs_dir2_sf_get_ino(hdr, hdr->parent);
-}
-
-static void
-xfs_dir2_sf_put_parent_ino(
- struct xfs_dir2_sf_hdr *hdr,
- xfs_ino_t ino)
-{
- xfs_dir2_sf_put_ino(hdr, hdr->parent, ino);
-}
-
-/*
- * In short-form directory entries the inode numbers are stored at variable
- * offset behind the entry name. If the entry stores a filetype value, then it
- * sits between the name and the inode number. Hence the inode numbers may only
- * be accessed through the helpers below.
- */
-static xfs_ino_t
-xfs_dir2_sfe_get_ino(
- struct xfs_dir2_sf_hdr *hdr,
- struct xfs_dir2_sf_entry *sfep)
-{
- return xfs_dir2_sf_get_ino(hdr, &sfep->name[sfep->namelen]);
-}
-
-static void
-xfs_dir2_sfe_put_ino(
- struct xfs_dir2_sf_hdr *hdr,
- struct xfs_dir2_sf_entry *sfep,
- xfs_ino_t ino)
-{
- xfs_dir2_sf_put_ino(hdr, &sfep->name[sfep->namelen], ino);
-}
-
-static xfs_ino_t
-xfs_dir3_sfe_get_ino(
- struct xfs_dir2_sf_hdr *hdr,
- struct xfs_dir2_sf_entry *sfep)
-{
- return xfs_dir2_sf_get_ino(hdr, &sfep->name[sfep->namelen + 1]);
-}
-
-static void
-xfs_dir3_sfe_put_ino(
- struct xfs_dir2_sf_hdr *hdr,
- struct xfs_dir2_sf_entry *sfep,
- xfs_ino_t ino)
-{
- xfs_dir2_sf_put_ino(hdr, &sfep->name[sfep->namelen + 1], ino);
-}
-
-
-/*
- * Directory data block operations
- */
-
-/*
- * For special situations, the dirent size ends up fixed because we always know
- * what the size of the entry is. That's true for the "." and "..", and
- * therefore we know that they are a fixed size and hence their offsets are
- * constant, as is the first entry.
- *
- * Hence, this calculation is written as a macro to be able to be calculated at
- * compile time and so certain offsets can be calculated directly in the
- * structure initaliser via the macro. There are two macros - one for dirents
- * with ftype and without so there are no unresolvable conditionals in the
- * calculations. We also use round_up() as XFS_DIR2_DATA_ALIGN is always a power
- * of 2 and the compiler doesn't reject it (unlike roundup()).
- */
-#define XFS_DIR2_DATA_ENTSIZE(n) \
- round_up((offsetof(struct xfs_dir2_data_entry, name[0]) + (n) + \
- sizeof(xfs_dir2_data_off_t)), XFS_DIR2_DATA_ALIGN)
-
-#define XFS_DIR3_DATA_ENTSIZE(n) \
- round_up((offsetof(struct xfs_dir2_data_entry, name[0]) + (n) + \
- sizeof(xfs_dir2_data_off_t) + sizeof(uint8_t)), \
- XFS_DIR2_DATA_ALIGN)
-
-static int
-xfs_dir2_data_entsize(
- int n)
-{
- return XFS_DIR2_DATA_ENTSIZE(n);
-}
-
-static int
-xfs_dir3_data_entsize(
- int n)
-{
- return XFS_DIR3_DATA_ENTSIZE(n);
-}
-
-static uint8_t
-xfs_dir2_data_get_ftype(
- struct xfs_dir2_data_entry *dep)
-{
- return XFS_DIR3_FT_UNKNOWN;
-}
-
-static void
-xfs_dir2_data_put_ftype(
- struct xfs_dir2_data_entry *dep,
- uint8_t ftype)
-{
- ASSERT(ftype < XFS_DIR3_FT_MAX);
-}
-
-static uint8_t
-xfs_dir3_data_get_ftype(
- struct xfs_dir2_data_entry *dep)
-{
- uint8_t ftype = dep->name[dep->namelen];
-
- if (ftype >= XFS_DIR3_FT_MAX)
- return XFS_DIR3_FT_UNKNOWN;
- return ftype;
-}
-
-static void
-xfs_dir3_data_put_ftype(
- struct xfs_dir2_data_entry *dep,
- uint8_t type)
-{
- ASSERT(type < XFS_DIR3_FT_MAX);
- ASSERT(dep->namelen != 0);
-
- dep->name[dep->namelen] = type;
-}
-
-/*
- * Pointer to an entry's tag word.
- */
-static __be16 *
-xfs_dir2_data_entry_tag_p(
- struct xfs_dir2_data_entry *dep)
-{
- return (__be16 *)((char *)dep +
- xfs_dir2_data_entsize(dep->namelen) - sizeof(__be16));
-}
-
-static __be16 *
-xfs_dir3_data_entry_tag_p(
- struct xfs_dir2_data_entry *dep)
-{
- return (__be16 *)((char *)dep +
- xfs_dir3_data_entsize(dep->namelen) - sizeof(__be16));
-}
-
-/*
- * location of . and .. in data space (always block 0)
- */
-static struct xfs_dir2_data_entry *
-xfs_dir2_data_dot_entry_p(
- struct xfs_dir2_data_hdr *hdr)
-{
- return (struct xfs_dir2_data_entry *)
- ((char *)hdr + sizeof(struct xfs_dir2_data_hdr));
-}
-
-static struct xfs_dir2_data_entry *
-xfs_dir2_data_dotdot_entry_p(
- struct xfs_dir2_data_hdr *hdr)
-{
- return (struct xfs_dir2_data_entry *)
- ((char *)hdr + sizeof(struct xfs_dir2_data_hdr) +
- XFS_DIR2_DATA_ENTSIZE(1));
-}
-
-static struct xfs_dir2_data_entry *
-xfs_dir2_data_first_entry_p(
- struct xfs_dir2_data_hdr *hdr)
-{
- return (struct xfs_dir2_data_entry *)
- ((char *)hdr + sizeof(struct xfs_dir2_data_hdr) +
- XFS_DIR2_DATA_ENTSIZE(1) +
- XFS_DIR2_DATA_ENTSIZE(2));
-}
-
-static struct xfs_dir2_data_entry *
-xfs_dir2_ftype_data_dotdot_entry_p(
- struct xfs_dir2_data_hdr *hdr)
-{
- return (struct xfs_dir2_data_entry *)
- ((char *)hdr + sizeof(struct xfs_dir2_data_hdr) +
- XFS_DIR3_DATA_ENTSIZE(1));
-}
-
-static struct xfs_dir2_data_entry *
-xfs_dir2_ftype_data_first_entry_p(
- struct xfs_dir2_data_hdr *hdr)
-{
- return (struct xfs_dir2_data_entry *)
- ((char *)hdr + sizeof(struct xfs_dir2_data_hdr) +
- XFS_DIR3_DATA_ENTSIZE(1) +
- XFS_DIR3_DATA_ENTSIZE(2));
-}
-
-static struct xfs_dir2_data_entry *
-xfs_dir3_data_dot_entry_p(
- struct xfs_dir2_data_hdr *hdr)
-{
- return (struct xfs_dir2_data_entry *)
- ((char *)hdr + sizeof(struct xfs_dir3_data_hdr));
-}
-
-static struct xfs_dir2_data_entry *
-xfs_dir3_data_dotdot_entry_p(
- struct xfs_dir2_data_hdr *hdr)
-{
- return (struct xfs_dir2_data_entry *)
- ((char *)hdr + sizeof(struct xfs_dir3_data_hdr) +
- XFS_DIR3_DATA_ENTSIZE(1));
-}
-
-static struct xfs_dir2_data_entry *
-xfs_dir3_data_first_entry_p(
- struct xfs_dir2_data_hdr *hdr)
-{
- return (struct xfs_dir2_data_entry *)
- ((char *)hdr + sizeof(struct xfs_dir3_data_hdr) +
- XFS_DIR3_DATA_ENTSIZE(1) +
- XFS_DIR3_DATA_ENTSIZE(2));
-}
-
-static struct xfs_dir2_data_free *
-xfs_dir2_data_bestfree_p(struct xfs_dir2_data_hdr *hdr)
-{
- return hdr->bestfree;
-}
-
-static struct xfs_dir2_data_free *
-xfs_dir3_data_bestfree_p(struct xfs_dir2_data_hdr *hdr)
-{
- return ((struct xfs_dir3_data_hdr *)hdr)->best_free;
-}
-
-static struct xfs_dir2_data_entry *
-xfs_dir2_data_entry_p(struct xfs_dir2_data_hdr *hdr)
-{
- return (struct xfs_dir2_data_entry *)
- ((char *)hdr + sizeof(struct xfs_dir2_data_hdr));
-}
-
-static struct xfs_dir2_data_unused *
-xfs_dir2_data_unused_p(struct xfs_dir2_data_hdr *hdr)
-{
- return (struct xfs_dir2_data_unused *)
- ((char *)hdr + sizeof(struct xfs_dir2_data_hdr));
-}
-
-static struct xfs_dir2_data_entry *
-xfs_dir3_data_entry_p(struct xfs_dir2_data_hdr *hdr)
-{
- return (struct xfs_dir2_data_entry *)
- ((char *)hdr + sizeof(struct xfs_dir3_data_hdr));
-}
-
-static struct xfs_dir2_data_unused *
-xfs_dir3_data_unused_p(struct xfs_dir2_data_hdr *hdr)
-{
- return (struct xfs_dir2_data_unused *)
- ((char *)hdr + sizeof(struct xfs_dir3_data_hdr));
-}
-
-
-/*
- * Directory Leaf block operations
- */
-static int
-xfs_dir2_max_leaf_ents(struct xfs_da_geometry *geo)
-{
- return (geo->blksize - sizeof(struct xfs_dir2_leaf_hdr)) /
- (uint)sizeof(struct xfs_dir2_leaf_entry);
-}
-
-static struct xfs_dir2_leaf_entry *
-xfs_dir2_leaf_ents_p(struct xfs_dir2_leaf *lp)
-{
- return lp->__ents;
-}
-
-static int
-xfs_dir3_max_leaf_ents(struct xfs_da_geometry *geo)
-{
- return (geo->blksize - sizeof(struct xfs_dir3_leaf_hdr)) /
- (uint)sizeof(struct xfs_dir2_leaf_entry);
-}
-
-static struct xfs_dir2_leaf_entry *
-xfs_dir3_leaf_ents_p(struct xfs_dir2_leaf *lp)
-{
- return ((struct xfs_dir3_leaf *)lp)->__ents;
-}
-
-static void
-xfs_dir2_leaf_hdr_from_disk(
- struct xfs_dir3_icleaf_hdr *to,
- struct xfs_dir2_leaf *from)
-{
- to->forw = be32_to_cpu(from->hdr.info.forw);
- to->back = be32_to_cpu(from->hdr.info.back);
- to->magic = be16_to_cpu(from->hdr.info.magic);
- to->count = be16_to_cpu(from->hdr.count);
- to->stale = be16_to_cpu(from->hdr.stale);
-
- ASSERT(to->magic == XFS_DIR2_LEAF1_MAGIC ||
- to->magic == XFS_DIR2_LEAFN_MAGIC);
-}
-
-static void
-xfs_dir2_leaf_hdr_to_disk(
- struct xfs_dir2_leaf *to,
- struct xfs_dir3_icleaf_hdr *from)
-{
- ASSERT(from->magic == XFS_DIR2_LEAF1_MAGIC ||
- from->magic == XFS_DIR2_LEAFN_MAGIC);
-
- to->hdr.info.forw = cpu_to_be32(from->forw);
- to->hdr.info.back = cpu_to_be32(from->back);
- to->hdr.info.magic = cpu_to_be16(from->magic);
- to->hdr.count = cpu_to_be16(from->count);
- to->hdr.stale = cpu_to_be16(from->stale);
-}
-
-static void
-xfs_dir3_leaf_hdr_from_disk(
- struct xfs_dir3_icleaf_hdr *to,
- struct xfs_dir2_leaf *from)
-{
- struct xfs_dir3_leaf_hdr *hdr3 = (struct xfs_dir3_leaf_hdr *)from;
-
- to->forw = be32_to_cpu(hdr3->info.hdr.forw);
- to->back = be32_to_cpu(hdr3->info.hdr.back);
- to->magic = be16_to_cpu(hdr3->info.hdr.magic);
- to->count = be16_to_cpu(hdr3->count);
- to->stale = be16_to_cpu(hdr3->stale);
-
- ASSERT(to->magic == XFS_DIR3_LEAF1_MAGIC ||
- to->magic == XFS_DIR3_LEAFN_MAGIC);
-}
-
-static void
-xfs_dir3_leaf_hdr_to_disk(
- struct xfs_dir2_leaf *to,
- struct xfs_dir3_icleaf_hdr *from)
-{
- struct xfs_dir3_leaf_hdr *hdr3 = (struct xfs_dir3_leaf_hdr *)to;
-
- ASSERT(from->magic == XFS_DIR3_LEAF1_MAGIC ||
- from->magic == XFS_DIR3_LEAFN_MAGIC);
-
- hdr3->info.hdr.forw = cpu_to_be32(from->forw);
- hdr3->info.hdr.back = cpu_to_be32(from->back);
- hdr3->info.hdr.magic = cpu_to_be16(from->magic);
- hdr3->count = cpu_to_be16(from->count);
- hdr3->stale = cpu_to_be16(from->stale);
-}
-
-
-/*
- * Directory/Attribute Node block operations
- */
-static struct xfs_da_node_entry *
-xfs_da2_node_tree_p(struct xfs_da_intnode *dap)
-{
- return dap->__btree;
-}
-
-static struct xfs_da_node_entry *
-xfs_da3_node_tree_p(struct xfs_da_intnode *dap)
-{
- return ((struct xfs_da3_intnode *)dap)->__btree;
-}
-
-static void
-xfs_da2_node_hdr_from_disk(
- struct xfs_da3_icnode_hdr *to,
- struct xfs_da_intnode *from)
-{
- ASSERT(from->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
- to->forw = be32_to_cpu(from->hdr.info.forw);
- to->back = be32_to_cpu(from->hdr.info.back);
- to->magic = be16_to_cpu(from->hdr.info.magic);
- to->count = be16_to_cpu(from->hdr.__count);
- to->level = be16_to_cpu(from->hdr.__level);
-}
-
-static void
-xfs_da2_node_hdr_to_disk(
- struct xfs_da_intnode *to,
- struct xfs_da3_icnode_hdr *from)
-{
- ASSERT(from->magic == XFS_DA_NODE_MAGIC);
- to->hdr.info.forw = cpu_to_be32(from->forw);
- to->hdr.info.back = cpu_to_be32(from->back);
- to->hdr.info.magic = cpu_to_be16(from->magic);
- to->hdr.__count = cpu_to_be16(from->count);
- to->hdr.__level = cpu_to_be16(from->level);
-}
-
-static void
-xfs_da3_node_hdr_from_disk(
- struct xfs_da3_icnode_hdr *to,
- struct xfs_da_intnode *from)
-{
- struct xfs_da3_node_hdr *hdr3 = (struct xfs_da3_node_hdr *)from;
-
- ASSERT(from->hdr.info.magic == cpu_to_be16(XFS_DA3_NODE_MAGIC));
- to->forw = be32_to_cpu(hdr3->info.hdr.forw);
- to->back = be32_to_cpu(hdr3->info.hdr.back);
- to->magic = be16_to_cpu(hdr3->info.hdr.magic);
- to->count = be16_to_cpu(hdr3->__count);
- to->level = be16_to_cpu(hdr3->__level);
-}
-
-static void
-xfs_da3_node_hdr_to_disk(
- struct xfs_da_intnode *to,
- struct xfs_da3_icnode_hdr *from)
-{
- struct xfs_da3_node_hdr *hdr3 = (struct xfs_da3_node_hdr *)to;
-
- ASSERT(from->magic == XFS_DA3_NODE_MAGIC);
- hdr3->info.hdr.forw = cpu_to_be32(from->forw);
- hdr3->info.hdr.back = cpu_to_be32(from->back);
- hdr3->info.hdr.magic = cpu_to_be16(from->magic);
- hdr3->__count = cpu_to_be16(from->count);
- hdr3->__level = cpu_to_be16(from->level);
-}
-
-
-/*
- * Directory free space block operations
- */
-static int
-xfs_dir2_free_max_bests(struct xfs_da_geometry *geo)
-{
- return (geo->blksize - sizeof(struct xfs_dir2_free_hdr)) /
- sizeof(xfs_dir2_data_off_t);
-}
-
-static __be16 *
-xfs_dir2_free_bests_p(struct xfs_dir2_free *free)
-{
- return (__be16 *)((char *)free + sizeof(struct xfs_dir2_free_hdr));
-}
-
-/*
- * Convert data space db to the corresponding free db.
- */
-static xfs_dir2_db_t
-xfs_dir2_db_to_fdb(struct xfs_da_geometry *geo, xfs_dir2_db_t db)
-{
- return xfs_dir2_byte_to_db(geo, XFS_DIR2_FREE_OFFSET) +
- (db / xfs_dir2_free_max_bests(geo));
-}
-
-/*
- * Convert data space db to the corresponding index in a free db.
- */
-static int
-xfs_dir2_db_to_fdindex(struct xfs_da_geometry *geo, xfs_dir2_db_t db)
-{
- return db % xfs_dir2_free_max_bests(geo);
-}
-
-static int
-xfs_dir3_free_max_bests(struct xfs_da_geometry *geo)
-{
- return (geo->blksize - sizeof(struct xfs_dir3_free_hdr)) /
- sizeof(xfs_dir2_data_off_t);
-}
-
-static __be16 *
-xfs_dir3_free_bests_p(struct xfs_dir2_free *free)
-{
- return (__be16 *)((char *)free + sizeof(struct xfs_dir3_free_hdr));
-}
-
-/*
- * Convert data space db to the corresponding free db.
- */
-static xfs_dir2_db_t
-xfs_dir3_db_to_fdb(struct xfs_da_geometry *geo, xfs_dir2_db_t db)
-{
- return xfs_dir2_byte_to_db(geo, XFS_DIR2_FREE_OFFSET) +
- (db / xfs_dir3_free_max_bests(geo));
-}
-
-/*
- * Convert data space db to the corresponding index in a free db.
- */
-static int
-xfs_dir3_db_to_fdindex(struct xfs_da_geometry *geo, xfs_dir2_db_t db)
-{
- return db % xfs_dir3_free_max_bests(geo);
-}
-
-static void
-xfs_dir2_free_hdr_from_disk(
- struct xfs_dir3_icfree_hdr *to,
- struct xfs_dir2_free *from)
-{
- to->magic = be32_to_cpu(from->hdr.magic);
- to->firstdb = be32_to_cpu(from->hdr.firstdb);
- to->nvalid = be32_to_cpu(from->hdr.nvalid);
- to->nused = be32_to_cpu(from->hdr.nused);
- ASSERT(to->magic == XFS_DIR2_FREE_MAGIC);
-}
-
-static void
-xfs_dir2_free_hdr_to_disk(
- struct xfs_dir2_free *to,
- struct xfs_dir3_icfree_hdr *from)
-{
- ASSERT(from->magic == XFS_DIR2_FREE_MAGIC);
-
- to->hdr.magic = cpu_to_be32(from->magic);
- to->hdr.firstdb = cpu_to_be32(from->firstdb);
- to->hdr.nvalid = cpu_to_be32(from->nvalid);
- to->hdr.nused = cpu_to_be32(from->nused);
-}
-
-static void
-xfs_dir3_free_hdr_from_disk(
- struct xfs_dir3_icfree_hdr *to,
- struct xfs_dir2_free *from)
-{
- struct xfs_dir3_free_hdr *hdr3 = (struct xfs_dir3_free_hdr *)from;
-
- to->magic = be32_to_cpu(hdr3->hdr.magic);
- to->firstdb = be32_to_cpu(hdr3->firstdb);
- to->nvalid = be32_to_cpu(hdr3->nvalid);
- to->nused = be32_to_cpu(hdr3->nused);
-
- ASSERT(to->magic == XFS_DIR3_FREE_MAGIC);
-}
-
-static void
-xfs_dir3_free_hdr_to_disk(
- struct xfs_dir2_free *to,
- struct xfs_dir3_icfree_hdr *from)
-{
- struct xfs_dir3_free_hdr *hdr3 = (struct xfs_dir3_free_hdr *)to;
-
- ASSERT(from->magic == XFS_DIR3_FREE_MAGIC);
-
- hdr3->hdr.magic = cpu_to_be32(from->magic);
- hdr3->firstdb = cpu_to_be32(from->firstdb);
- hdr3->nvalid = cpu_to_be32(from->nvalid);
- hdr3->nused = cpu_to_be32(from->nused);
-}
-
-static const struct xfs_dir_ops xfs_dir2_ops = {
- .sf_entsize = xfs_dir2_sf_entsize,
- .sf_nextentry = xfs_dir2_sf_nextentry,
- .sf_get_ftype = xfs_dir2_sfe_get_ftype,
- .sf_put_ftype = xfs_dir2_sfe_put_ftype,
- .sf_get_ino = xfs_dir2_sfe_get_ino,
- .sf_put_ino = xfs_dir2_sfe_put_ino,
- .sf_get_parent_ino = xfs_dir2_sf_get_parent_ino,
- .sf_put_parent_ino = xfs_dir2_sf_put_parent_ino,
-
- .data_entsize = xfs_dir2_data_entsize,
- .data_get_ftype = xfs_dir2_data_get_ftype,
- .data_put_ftype = xfs_dir2_data_put_ftype,
- .data_entry_tag_p = xfs_dir2_data_entry_tag_p,
- .data_bestfree_p = xfs_dir2_data_bestfree_p,
-
- .data_dot_offset = sizeof(struct xfs_dir2_data_hdr),
- .data_dotdot_offset = sizeof(struct xfs_dir2_data_hdr) +
- XFS_DIR2_DATA_ENTSIZE(1),
- .data_first_offset = sizeof(struct xfs_dir2_data_hdr) +
- XFS_DIR2_DATA_ENTSIZE(1) +
- XFS_DIR2_DATA_ENTSIZE(2),
- .data_entry_offset = sizeof(struct xfs_dir2_data_hdr),
-
- .data_dot_entry_p = xfs_dir2_data_dot_entry_p,
- .data_dotdot_entry_p = xfs_dir2_data_dotdot_entry_p,
- .data_first_entry_p = xfs_dir2_data_first_entry_p,
- .data_entry_p = xfs_dir2_data_entry_p,
- .data_unused_p = xfs_dir2_data_unused_p,
-
- .leaf_hdr_size = sizeof(struct xfs_dir2_leaf_hdr),
- .leaf_hdr_to_disk = xfs_dir2_leaf_hdr_to_disk,
- .leaf_hdr_from_disk = xfs_dir2_leaf_hdr_from_disk,
- .leaf_max_ents = xfs_dir2_max_leaf_ents,
- .leaf_ents_p = xfs_dir2_leaf_ents_p,
-
- .node_hdr_size = sizeof(struct xfs_da_node_hdr),
- .node_hdr_to_disk = xfs_da2_node_hdr_to_disk,
- .node_hdr_from_disk = xfs_da2_node_hdr_from_disk,
- .node_tree_p = xfs_da2_node_tree_p,
-
- .free_hdr_size = sizeof(struct xfs_dir2_free_hdr),
- .free_hdr_to_disk = xfs_dir2_free_hdr_to_disk,
- .free_hdr_from_disk = xfs_dir2_free_hdr_from_disk,
- .free_max_bests = xfs_dir2_free_max_bests,
- .free_bests_p = xfs_dir2_free_bests_p,
- .db_to_fdb = xfs_dir2_db_to_fdb,
- .db_to_fdindex = xfs_dir2_db_to_fdindex,
-};
-
-static const struct xfs_dir_ops xfs_dir2_ftype_ops = {
- .sf_entsize = xfs_dir3_sf_entsize,
- .sf_nextentry = xfs_dir3_sf_nextentry,
- .sf_get_ftype = xfs_dir3_sfe_get_ftype,
- .sf_put_ftype = xfs_dir3_sfe_put_ftype,
- .sf_get_ino = xfs_dir3_sfe_get_ino,
- .sf_put_ino = xfs_dir3_sfe_put_ino,
- .sf_get_parent_ino = xfs_dir2_sf_get_parent_ino,
- .sf_put_parent_ino = xfs_dir2_sf_put_parent_ino,
-
- .data_entsize = xfs_dir3_data_entsize,
- .data_get_ftype = xfs_dir3_data_get_ftype,
- .data_put_ftype = xfs_dir3_data_put_ftype,
- .data_entry_tag_p = xfs_dir3_data_entry_tag_p,
- .data_bestfree_p = xfs_dir2_data_bestfree_p,
-
- .data_dot_offset = sizeof(struct xfs_dir2_data_hdr),
- .data_dotdot_offset = sizeof(struct xfs_dir2_data_hdr) +
- XFS_DIR3_DATA_ENTSIZE(1),
- .data_first_offset = sizeof(struct xfs_dir2_data_hdr) +
- XFS_DIR3_DATA_ENTSIZE(1) +
- XFS_DIR3_DATA_ENTSIZE(2),
- .data_entry_offset = sizeof(struct xfs_dir2_data_hdr),
-
- .data_dot_entry_p = xfs_dir2_data_dot_entry_p,
- .data_dotdot_entry_p = xfs_dir2_ftype_data_dotdot_entry_p,
- .data_first_entry_p = xfs_dir2_ftype_data_first_entry_p,
- .data_entry_p = xfs_dir2_data_entry_p,
- .data_unused_p = xfs_dir2_data_unused_p,
-
- .leaf_hdr_size = sizeof(struct xfs_dir2_leaf_hdr),
- .leaf_hdr_to_disk = xfs_dir2_leaf_hdr_to_disk,
- .leaf_hdr_from_disk = xfs_dir2_leaf_hdr_from_disk,
- .leaf_max_ents = xfs_dir2_max_leaf_ents,
- .leaf_ents_p = xfs_dir2_leaf_ents_p,
-
- .node_hdr_size = sizeof(struct xfs_da_node_hdr),
- .node_hdr_to_disk = xfs_da2_node_hdr_to_disk,
- .node_hdr_from_disk = xfs_da2_node_hdr_from_disk,
- .node_tree_p = xfs_da2_node_tree_p,
-
- .free_hdr_size = sizeof(struct xfs_dir2_free_hdr),
- .free_hdr_to_disk = xfs_dir2_free_hdr_to_disk,
- .free_hdr_from_disk = xfs_dir2_free_hdr_from_disk,
- .free_max_bests = xfs_dir2_free_max_bests,
- .free_bests_p = xfs_dir2_free_bests_p,
- .db_to_fdb = xfs_dir2_db_to_fdb,
- .db_to_fdindex = xfs_dir2_db_to_fdindex,
-};
-
-static const struct xfs_dir_ops xfs_dir3_ops = {
- .sf_entsize = xfs_dir3_sf_entsize,
- .sf_nextentry = xfs_dir3_sf_nextentry,
- .sf_get_ftype = xfs_dir3_sfe_get_ftype,
- .sf_put_ftype = xfs_dir3_sfe_put_ftype,
- .sf_get_ino = xfs_dir3_sfe_get_ino,
- .sf_put_ino = xfs_dir3_sfe_put_ino,
- .sf_get_parent_ino = xfs_dir2_sf_get_parent_ino,
- .sf_put_parent_ino = xfs_dir2_sf_put_parent_ino,
-
- .data_entsize = xfs_dir3_data_entsize,
- .data_get_ftype = xfs_dir3_data_get_ftype,
- .data_put_ftype = xfs_dir3_data_put_ftype,
- .data_entry_tag_p = xfs_dir3_data_entry_tag_p,
- .data_bestfree_p = xfs_dir3_data_bestfree_p,
-
- .data_dot_offset = sizeof(struct xfs_dir3_data_hdr),
- .data_dotdot_offset = sizeof(struct xfs_dir3_data_hdr) +
- XFS_DIR3_DATA_ENTSIZE(1),
- .data_first_offset = sizeof(struct xfs_dir3_data_hdr) +
- XFS_DIR3_DATA_ENTSIZE(1) +
- XFS_DIR3_DATA_ENTSIZE(2),
- .data_entry_offset = sizeof(struct xfs_dir3_data_hdr),
-
- .data_dot_entry_p = xfs_dir3_data_dot_entry_p,
- .data_dotdot_entry_p = xfs_dir3_data_dotdot_entry_p,
- .data_first_entry_p = xfs_dir3_data_first_entry_p,
- .data_entry_p = xfs_dir3_data_entry_p,
- .data_unused_p = xfs_dir3_data_unused_p,
-
- .leaf_hdr_size = sizeof(struct xfs_dir3_leaf_hdr),
- .leaf_hdr_to_disk = xfs_dir3_leaf_hdr_to_disk,
- .leaf_hdr_from_disk = xfs_dir3_leaf_hdr_from_disk,
- .leaf_max_ents = xfs_dir3_max_leaf_ents,
- .leaf_ents_p = xfs_dir3_leaf_ents_p,
-
- .node_hdr_size = sizeof(struct xfs_da3_node_hdr),
- .node_hdr_to_disk = xfs_da3_node_hdr_to_disk,
- .node_hdr_from_disk = xfs_da3_node_hdr_from_disk,
- .node_tree_p = xfs_da3_node_tree_p,
-
- .free_hdr_size = sizeof(struct xfs_dir3_free_hdr),
- .free_hdr_to_disk = xfs_dir3_free_hdr_to_disk,
- .free_hdr_from_disk = xfs_dir3_free_hdr_from_disk,
- .free_max_bests = xfs_dir3_free_max_bests,
- .free_bests_p = xfs_dir3_free_bests_p,
- .db_to_fdb = xfs_dir3_db_to_fdb,
- .db_to_fdindex = xfs_dir3_db_to_fdindex,
-};
-
-static const struct xfs_dir_ops xfs_dir2_nondir_ops = {
- .node_hdr_size = sizeof(struct xfs_da_node_hdr),
- .node_hdr_to_disk = xfs_da2_node_hdr_to_disk,
- .node_hdr_from_disk = xfs_da2_node_hdr_from_disk,
- .node_tree_p = xfs_da2_node_tree_p,
-};
-
-static const struct xfs_dir_ops xfs_dir3_nondir_ops = {
- .node_hdr_size = sizeof(struct xfs_da3_node_hdr),
- .node_hdr_to_disk = xfs_da3_node_hdr_to_disk,
- .node_hdr_from_disk = xfs_da3_node_hdr_from_disk,
- .node_tree_p = xfs_da3_node_tree_p,
-};
-
-/*
- * Return the ops structure according to the current config. If we are passed
- * an inode, then that overrides the default config we use which is based on
- * feature bits.
- */
-const struct xfs_dir_ops *
-xfs_dir_get_ops(
- struct xfs_mount *mp,
- struct xfs_inode *dp)
-{
- if (dp)
- return dp->d_ops;
- if (mp->m_dir_inode_ops)
- return mp->m_dir_inode_ops;
- if (xfs_sb_version_hascrc(&mp->m_sb))
- return &xfs_dir3_ops;
- if (xfs_sb_version_hasftype(&mp->m_sb))
- return &xfs_dir2_ftype_ops;
- return &xfs_dir2_ops;
-}
-
-const struct xfs_dir_ops *
-xfs_nondir_get_ops(
- struct xfs_mount *mp,
- struct xfs_inode *dp)
-{
- if (dp)
- return dp->d_ops;
- if (mp->m_nondir_inode_ops)
- return mp->m_nondir_inode_ops;
- if (xfs_sb_version_hascrc(&mp->m_sb))
- return &xfs_dir3_nondir_ops;
- return &xfs_dir2_nondir_ops;
-}
diff --git a/fs/xfs/libxfs/xfs_da_format.h b/fs/xfs/libxfs/xfs_da_format.h
index ae654e06b2fb..3dee33043e09 100644
--- a/fs/xfs/libxfs/xfs_da_format.h
+++ b/fs/xfs/libxfs/xfs_da_format.h
@@ -94,19 +94,6 @@ struct xfs_da3_intnode {
};
/*
- * In-core version of the node header to abstract the differences in the v2 and
- * v3 disk format of the headers. Callers need to convert to/from disk format as
- * appropriate.
- */
-struct xfs_da3_icnode_hdr {
- uint32_t forw;
- uint32_t back;
- uint16_t magic;
- uint16_t count;
- uint16_t level;
-};
-
-/*
* Directory version 2.
*
* There are 4 possible formats:
@@ -434,14 +421,6 @@ struct xfs_dir3_leaf_hdr {
__be32 pad; /* 64 bit alignment */
};
-struct xfs_dir3_icleaf_hdr {
- uint32_t forw;
- uint32_t back;
- uint16_t magic;
- uint16_t count;
- uint16_t stale;
-};
-
/*
* Leaf block entry.
*/
@@ -482,7 +461,7 @@ xfs_dir2_leaf_bests_p(struct xfs_dir2_leaf_tail *ltp)
}
/*
- * Free space block defintions for the node format.
+ * Free space block definitions for the node format.
*/
/*
@@ -521,19 +500,6 @@ struct xfs_dir3_free {
#define XFS_DIR3_FREE_CRC_OFF offsetof(struct xfs_dir3_free, hdr.hdr.crc)
/*
- * In core version of the free block header, abstracted away from on-disk format
- * differences. Use this in the code, and convert to/from the disk version using
- * xfs_dir3_free_hdr_from_disk/xfs_dir3_free_hdr_to_disk.
- */
-struct xfs_dir3_icfree_hdr {
- uint32_t magic;
- uint32_t firstdb;
- uint32_t nvalid;
- uint32_t nused;
-
-};
-
-/*
* Single block format.
*
* The single block format looks like the following drawing on disk:
@@ -710,29 +676,6 @@ struct xfs_attr3_leafblock {
};
/*
- * incore, neutral version of the attribute leaf header
- */
-struct xfs_attr3_icleaf_hdr {
- uint32_t forw;
- uint32_t back;
- uint16_t magic;
- uint16_t count;
- uint16_t usedbytes;
- /*
- * firstused is 32-bit here instead of 16-bit like the on-disk variant
- * to support maximum fsb size of 64k without overflow issues throughout
- * the attr code. Instead, the overflow condition is handled on
- * conversion to/from disk.
- */
- uint32_t firstused;
- __u8 holes;
- struct {
- uint16_t base;
- uint16_t size;
- } freemap[XFS_ATTR_LEAF_MAPSIZE];
-};
-
-/*
* Special value to represent fs block size in the leaf header firstused field.
* Only used when block size overflows the 2-bytes available on disk.
*/
diff --git a/fs/xfs/libxfs/xfs_dir2.c b/fs/xfs/libxfs/xfs_dir2.c
index 867c5dee0751..0aa87cbde49e 100644
--- a/fs/xfs/libxfs/xfs_dir2.c
+++ b/fs/xfs/libxfs/xfs_dir2.c
@@ -52,7 +52,7 @@ xfs_mode_to_ftype(
* ASCII case-insensitive (ie. A-Z) support for directories that was
* used in IRIX.
*/
-STATIC xfs_dahash_t
+xfs_dahash_t
xfs_ascii_ci_hashname(
struct xfs_name *name)
{
@@ -65,14 +65,14 @@ xfs_ascii_ci_hashname(
return hash;
}
-STATIC enum xfs_dacmp
+enum xfs_dacmp
xfs_ascii_ci_compname(
- struct xfs_da_args *args,
- const unsigned char *name,
- int len)
+ struct xfs_da_args *args,
+ const unsigned char *name,
+ int len)
{
- enum xfs_dacmp result;
- int i;
+ enum xfs_dacmp result;
+ int i;
if (args->namelen != len)
return XFS_CMP_DIFFERENT;
@@ -89,26 +89,16 @@ xfs_ascii_ci_compname(
return result;
}
-static const struct xfs_nameops xfs_ascii_ci_nameops = {
- .hashname = xfs_ascii_ci_hashname,
- .compname = xfs_ascii_ci_compname,
-};
-
int
xfs_da_mount(
struct xfs_mount *mp)
{
struct xfs_da_geometry *dageo;
- int nodehdr_size;
ASSERT(mp->m_sb.sb_versionnum & XFS_SB_VERSION_DIRV2BIT);
ASSERT(xfs_dir2_dirblock_bytes(&mp->m_sb) <= XFS_MAX_BLOCKSIZE);
- mp->m_dir_inode_ops = xfs_dir_get_ops(mp, NULL);
- mp->m_nondir_inode_ops = xfs_nondir_get_ops(mp, NULL);
-
- nodehdr_size = mp->m_dir_inode_ops->node_hdr_size;
mp->m_dir_geo = kmem_zalloc(sizeof(struct xfs_da_geometry),
KM_MAYFAIL);
mp->m_attr_geo = kmem_zalloc(sizeof(struct xfs_da_geometry),
@@ -125,6 +115,27 @@ xfs_da_mount(
dageo->fsblog = mp->m_sb.sb_blocklog;
dageo->blksize = xfs_dir2_dirblock_bytes(&mp->m_sb);
dageo->fsbcount = 1 << mp->m_sb.sb_dirblklog;
+ if (xfs_sb_version_hascrc(&mp->m_sb)) {
+ dageo->node_hdr_size = sizeof(struct xfs_da3_node_hdr);
+ dageo->leaf_hdr_size = sizeof(struct xfs_dir3_leaf_hdr);
+ dageo->free_hdr_size = sizeof(struct xfs_dir3_free_hdr);
+ dageo->data_entry_offset =
+ sizeof(struct xfs_dir3_data_hdr);
+ } else {
+ dageo->node_hdr_size = sizeof(struct xfs_da_node_hdr);
+ dageo->leaf_hdr_size = sizeof(struct xfs_dir2_leaf_hdr);
+ dageo->free_hdr_size = sizeof(struct xfs_dir2_free_hdr);
+ dageo->data_entry_offset =
+ sizeof(struct xfs_dir2_data_hdr);
+ }
+ dageo->leaf_max_ents = (dageo->blksize - dageo->leaf_hdr_size) /
+ sizeof(struct xfs_dir2_leaf_entry);
+ dageo->free_max_bests = (dageo->blksize - dageo->free_hdr_size) /
+ sizeof(xfs_dir2_data_off_t);
+
+ dageo->data_first_offset = dageo->data_entry_offset +
+ xfs_dir2_data_entsize(mp, 1) +
+ xfs_dir2_data_entsize(mp, 2);
/*
* Now we've set up the block conversion variables, we can calculate the
@@ -133,7 +144,7 @@ xfs_da_mount(
dageo->datablk = xfs_dir2_byte_to_da(dageo, XFS_DIR2_DATA_OFFSET);
dageo->leafblk = xfs_dir2_byte_to_da(dageo, XFS_DIR2_LEAF_OFFSET);
dageo->freeblk = xfs_dir2_byte_to_da(dageo, XFS_DIR2_FREE_OFFSET);
- dageo->node_ents = (dageo->blksize - nodehdr_size) /
+ dageo->node_ents = (dageo->blksize - dageo->node_hdr_size) /
(uint)sizeof(xfs_da_node_entry_t);
dageo->magicpct = (dageo->blksize * 37) / 100;
@@ -143,15 +154,10 @@ xfs_da_mount(
dageo->fsblog = mp->m_sb.sb_blocklog;
dageo->blksize = 1 << dageo->blklog;
dageo->fsbcount = 1;
- dageo->node_ents = (dageo->blksize - nodehdr_size) /
+ dageo->node_hdr_size = mp->m_dir_geo->node_hdr_size;
+ dageo->node_ents = (dageo->blksize - dageo->node_hdr_size) /
(uint)sizeof(xfs_da_node_entry_t);
dageo->magicpct = (dageo->blksize * 37) / 100;
-
- if (xfs_sb_version_hasasciici(&mp->m_sb))
- mp->m_dirnameops = &xfs_ascii_ci_nameops;
- else
- mp->m_dirnameops = &xfs_default_nameops;
-
return 0;
}
@@ -191,10 +197,10 @@ xfs_dir_ino_validate(
{
bool ino_ok = xfs_verify_dir_ino(mp, ino);
- if (unlikely(XFS_TEST_ERROR(!ino_ok, mp, XFS_ERRTAG_DIR_INO_VALIDATE))) {
+ if (XFS_IS_CORRUPT(mp, !ino_ok) ||
+ XFS_TEST_ERROR(false, mp, XFS_ERRTAG_DIR_INO_VALIDATE)) {
xfs_warn(mp, "Invalid inode number 0x%Lx",
(unsigned long long) ino);
- XFS_ERROR_REPORT("xfs_dir_ino_validate", XFS_ERRLEVEL_LOW, mp);
return -EFSCORRUPTED;
}
return 0;
@@ -262,7 +268,7 @@ xfs_dir_createname(
args->name = name->name;
args->namelen = name->len;
args->filetype = name->type;
- args->hashval = dp->i_mount->m_dirnameops->hashname(name);
+ args->hashval = xfs_dir2_hashname(dp->i_mount, name);
args->inumber = inum;
args->dp = dp;
args->total = total;
@@ -358,7 +364,7 @@ xfs_dir_lookup(
args->name = name->name;
args->namelen = name->len;
args->filetype = name->type;
- args->hashval = dp->i_mount->m_dirnameops->hashname(name);
+ args->hashval = xfs_dir2_hashname(dp->i_mount, name);
args->dp = dp;
args->whichfork = XFS_DATA_FORK;
args->trans = tp;
@@ -430,7 +436,7 @@ xfs_dir_removename(
args->name = name->name;
args->namelen = name->len;
args->filetype = name->type;
- args->hashval = dp->i_mount->m_dirnameops->hashname(name);
+ args->hashval = xfs_dir2_hashname(dp->i_mount, name);
args->inumber = ino;
args->dp = dp;
args->total = total;
@@ -491,7 +497,7 @@ xfs_dir_replace(
args->name = name->name;
args->namelen = name->len;
args->filetype = name->type;
- args->hashval = dp->i_mount->m_dirnameops->hashname(name);
+ args->hashval = xfs_dir2_hashname(dp->i_mount, name);
args->inumber = inum;
args->dp = dp;
args->total = total;
@@ -600,7 +606,9 @@ xfs_dir2_isblock(
if ((rval = xfs_bmap_last_offset(args->dp, &last, XFS_DATA_FORK)))
return rval;
rval = XFS_FSB_TO_B(args->dp->i_mount, last) == args->geo->blksize;
- if (rval != 0 && args->dp->i_d.di_size != args->geo->blksize)
+ if (XFS_IS_CORRUPT(args->dp->i_mount,
+ rval != 0 &&
+ args->dp->i_d.di_size != args->geo->blksize))
return -EFSCORRUPTED;
*vp = rval;
return 0;
diff --git a/fs/xfs/libxfs/xfs_dir2.h b/fs/xfs/libxfs/xfs_dir2.h
index f54244779492..033777e282f2 100644
--- a/fs/xfs/libxfs/xfs_dir2.h
+++ b/fs/xfs/libxfs/xfs_dir2.h
@@ -18,6 +18,8 @@ struct xfs_dir2_sf_entry;
struct xfs_dir2_data_hdr;
struct xfs_dir2_data_entry;
struct xfs_dir2_data_unused;
+struct xfs_dir3_icfree_hdr;
+struct xfs_dir3_icleaf_hdr;
extern struct xfs_name xfs_name_dotdot;
@@ -27,85 +29,6 @@ extern struct xfs_name xfs_name_dotdot;
extern unsigned char xfs_mode_to_ftype(int mode);
/*
- * directory operations vector for encode/decode routines
- */
-struct xfs_dir_ops {
- int (*sf_entsize)(struct xfs_dir2_sf_hdr *hdr, int len);
- struct xfs_dir2_sf_entry *
- (*sf_nextentry)(struct xfs_dir2_sf_hdr *hdr,
- struct xfs_dir2_sf_entry *sfep);
- uint8_t (*sf_get_ftype)(struct xfs_dir2_sf_entry *sfep);
- void (*sf_put_ftype)(struct xfs_dir2_sf_entry *sfep,
- uint8_t ftype);
- xfs_ino_t (*sf_get_ino)(struct xfs_dir2_sf_hdr *hdr,
- struct xfs_dir2_sf_entry *sfep);
- void (*sf_put_ino)(struct xfs_dir2_sf_hdr *hdr,
- struct xfs_dir2_sf_entry *sfep,
- xfs_ino_t ino);
- xfs_ino_t (*sf_get_parent_ino)(struct xfs_dir2_sf_hdr *hdr);
- void (*sf_put_parent_ino)(struct xfs_dir2_sf_hdr *hdr,
- xfs_ino_t ino);
-
- int (*data_entsize)(int len);
- uint8_t (*data_get_ftype)(struct xfs_dir2_data_entry *dep);
- void (*data_put_ftype)(struct xfs_dir2_data_entry *dep,
- uint8_t ftype);
- __be16 * (*data_entry_tag_p)(struct xfs_dir2_data_entry *dep);
- struct xfs_dir2_data_free *
- (*data_bestfree_p)(struct xfs_dir2_data_hdr *hdr);
-
- xfs_dir2_data_aoff_t data_dot_offset;
- xfs_dir2_data_aoff_t data_dotdot_offset;
- xfs_dir2_data_aoff_t data_first_offset;
- size_t data_entry_offset;
-
- struct xfs_dir2_data_entry *
- (*data_dot_entry_p)(struct xfs_dir2_data_hdr *hdr);
- struct xfs_dir2_data_entry *
- (*data_dotdot_entry_p)(struct xfs_dir2_data_hdr *hdr);
- struct xfs_dir2_data_entry *
- (*data_first_entry_p)(struct xfs_dir2_data_hdr *hdr);
- struct xfs_dir2_data_entry *
- (*data_entry_p)(struct xfs_dir2_data_hdr *hdr);
- struct xfs_dir2_data_unused *
- (*data_unused_p)(struct xfs_dir2_data_hdr *hdr);
-
- int leaf_hdr_size;
- void (*leaf_hdr_to_disk)(struct xfs_dir2_leaf *to,
- struct xfs_dir3_icleaf_hdr *from);
- void (*leaf_hdr_from_disk)(struct xfs_dir3_icleaf_hdr *to,
- struct xfs_dir2_leaf *from);
- int (*leaf_max_ents)(struct xfs_da_geometry *geo);
- struct xfs_dir2_leaf_entry *
- (*leaf_ents_p)(struct xfs_dir2_leaf *lp);
-
- int node_hdr_size;
- void (*node_hdr_to_disk)(struct xfs_da_intnode *to,
- struct xfs_da3_icnode_hdr *from);
- void (*node_hdr_from_disk)(struct xfs_da3_icnode_hdr *to,
- struct xfs_da_intnode *from);
- struct xfs_da_node_entry *
- (*node_tree_p)(struct xfs_da_intnode *dap);
-
- int free_hdr_size;
- void (*free_hdr_to_disk)(struct xfs_dir2_free *to,
- struct xfs_dir3_icfree_hdr *from);
- void (*free_hdr_from_disk)(struct xfs_dir3_icfree_hdr *to,
- struct xfs_dir2_free *from);
- int (*free_max_bests)(struct xfs_da_geometry *geo);
- __be16 * (*free_bests_p)(struct xfs_dir2_free *free);
- xfs_dir2_db_t (*db_to_fdb)(struct xfs_da_geometry *geo,
- xfs_dir2_db_t db);
- int (*db_to_fdindex)(struct xfs_da_geometry *geo,
- xfs_dir2_db_t db);
-};
-
-extern const struct xfs_dir_ops *
- xfs_dir_get_ops(struct xfs_mount *mp, struct xfs_inode *dp);
-extern const struct xfs_dir_ops *
- xfs_nondir_get_ops(struct xfs_mount *mp, struct xfs_inode *dp);
-
-/*
* Generic directory interface routines
*/
extern void xfs_dir_startup(void);
@@ -124,6 +47,8 @@ extern int xfs_dir_lookup(struct xfs_trans *tp, struct xfs_inode *dp,
extern int xfs_dir_removename(struct xfs_trans *tp, struct xfs_inode *dp,
struct xfs_name *name, xfs_ino_t ino,
xfs_extlen_t tot);
+extern bool xfs_dir2_sf_replace_needblock(struct xfs_inode *dp,
+ xfs_ino_t inum);
extern int xfs_dir_replace(struct xfs_trans *tp, struct xfs_inode *dp,
struct xfs_name *name, xfs_ino_t inum,
xfs_extlen_t tot);
@@ -143,10 +68,7 @@ extern int xfs_dir2_isleaf(struct xfs_da_args *args, int *r);
extern int xfs_dir2_shrink_inode(struct xfs_da_args *args, xfs_dir2_db_t db,
struct xfs_buf *bp);
-extern void xfs_dir2_data_freescan_int(struct xfs_da_geometry *geo,
- const struct xfs_dir_ops *ops,
- struct xfs_dir2_data_hdr *hdr, int *loghead);
-extern void xfs_dir2_data_freescan(struct xfs_inode *dp,
+extern void xfs_dir2_data_freescan(struct xfs_mount *mp,
struct xfs_dir2_data_hdr *hdr, int *loghead);
extern void xfs_dir2_data_log_entry(struct xfs_da_args *args,
struct xfs_buf *bp, struct xfs_dir2_data_entry *dep);
@@ -324,7 +246,7 @@ xfs_dir2_leaf_tail_p(struct xfs_da_geometry *geo, struct xfs_dir2_leaf *lp)
#define XFS_READDIR_BUFSIZE (32768)
unsigned char xfs_dir3_get_dtype(struct xfs_mount *mp, uint8_t filetype);
-void *xfs_dir3_data_endp(struct xfs_da_geometry *geo,
+unsigned int xfs_dir3_data_end_offset(struct xfs_da_geometry *geo,
struct xfs_dir2_data_hdr *hdr);
bool xfs_dir2_namecheck(const void *name, size_t length);
diff --git a/fs/xfs/libxfs/xfs_dir2_block.c b/fs/xfs/libxfs/xfs_dir2_block.c
index 49e4bc39e7bb..d6ced59b9567 100644
--- a/fs/xfs/libxfs/xfs_dir2_block.c
+++ b/fs/xfs/libxfs/xfs_dir2_block.c
@@ -123,7 +123,7 @@ xfs_dir3_block_read(
struct xfs_mount *mp = dp->i_mount;
int err;
- err = xfs_da_read_buf(tp, dp, mp->m_dir_geo->datablk, -1, bpp,
+ err = xfs_da_read_buf(tp, dp, mp->m_dir_geo->datablk, 0, bpp,
XFS_DATA_FORK, &xfs_dir3_block_buf_ops);
if (!err && tp && *bpp)
xfs_trans_buf_set_type(tp, *bpp, XFS_BLFT_DIR_BLOCK_BUF);
@@ -172,7 +172,7 @@ xfs_dir2_block_need_space(
struct xfs_dir2_data_unused *enddup = NULL;
*compact = 0;
- bf = dp->d_ops->data_bestfree_p(hdr);
+ bf = xfs_dir2_data_bestfree_p(dp->i_mount, hdr);
/*
* If there are stale entries we'll use one for the leaf.
@@ -311,7 +311,7 @@ xfs_dir2_block_compact(
* This needs to happen before the next call to use_free.
*/
if (needscan)
- xfs_dir2_data_freescan(args->dp, hdr, needlog);
+ xfs_dir2_data_freescan(args->dp->i_mount, hdr, needlog);
}
/*
@@ -355,7 +355,7 @@ xfs_dir2_block_addname(
if (error)
return error;
- len = dp->d_ops->data_entsize(args->namelen);
+ len = xfs_dir2_data_entsize(dp->i_mount, args->namelen);
/*
* Set up pointers to parts of the block.
@@ -458,7 +458,7 @@ xfs_dir2_block_addname(
* This needs to happen before the next call to use_free.
*/
if (needscan) {
- xfs_dir2_data_freescan(dp, hdr, &needlog);
+ xfs_dir2_data_freescan(dp->i_mount, hdr, &needlog);
needscan = 0;
}
/*
@@ -541,14 +541,14 @@ xfs_dir2_block_addname(
dep->inumber = cpu_to_be64(args->inumber);
dep->namelen = args->namelen;
memcpy(dep->name, args->name, args->namelen);
- dp->d_ops->data_put_ftype(dep, args->filetype);
- tagp = dp->d_ops->data_entry_tag_p(dep);
+ xfs_dir2_data_put_ftype(dp->i_mount, dep, args->filetype);
+ tagp = xfs_dir2_data_entry_tag_p(dp->i_mount, dep);
*tagp = cpu_to_be16((char *)dep - (char *)hdr);
/*
* Clean up the bestfree array and log the header, tail, and entry.
*/
if (needscan)
- xfs_dir2_data_freescan(dp, hdr, &needlog);
+ xfs_dir2_data_freescan(dp->i_mount, hdr, &needlog);
if (needlog)
xfs_dir2_data_log_header(args, bp);
xfs_dir2_block_log_tail(tp, bp);
@@ -633,7 +633,7 @@ xfs_dir2_block_lookup(
* Fill in inode number, CI name if appropriate, release the block.
*/
args->inumber = be64_to_cpu(dep->inumber);
- args->filetype = dp->d_ops->data_get_ftype(dep);
+ args->filetype = xfs_dir2_data_get_ftype(dp->i_mount, dep);
error = xfs_dir_cilookup_result(args, dep->name, dep->namelen);
xfs_trans_brelse(args->trans, bp);
return error;
@@ -660,13 +660,11 @@ xfs_dir2_block_lookup_int(
int high; /* binary search high index */
int low; /* binary search low index */
int mid; /* binary search current idx */
- xfs_mount_t *mp; /* filesystem mount point */
xfs_trans_t *tp; /* transaction pointer */
enum xfs_dacmp cmp; /* comparison result */
dp = args->dp;
tp = args->trans;
- mp = dp->i_mount;
error = xfs_dir3_block_read(tp, dp, &bp);
if (error)
@@ -718,7 +716,7 @@ xfs_dir2_block_lookup_int(
* and buffer. If it's the first case-insensitive match, store
* the index and buffer and continue looking for an exact match.
*/
- cmp = mp->m_dirnameops->compname(args, dep->name, dep->namelen);
+ cmp = xfs_dir2_compname(args, dep->name, dep->namelen);
if (cmp != XFS_CMP_DIFFERENT && cmp != args->cmpresult) {
args->cmpresult = cmp;
*bpp = bp;
@@ -791,7 +789,8 @@ xfs_dir2_block_removename(
needlog = needscan = 0;
xfs_dir2_data_make_free(args, bp,
(xfs_dir2_data_aoff_t)((char *)dep - (char *)hdr),
- dp->d_ops->data_entsize(dep->namelen), &needlog, &needscan);
+ xfs_dir2_data_entsize(dp->i_mount, dep->namelen), &needlog,
+ &needscan);
/*
* Fix up the block tail.
*/
@@ -806,7 +805,7 @@ xfs_dir2_block_removename(
* Fix up bestfree, log the header if necessary.
*/
if (needscan)
- xfs_dir2_data_freescan(dp, hdr, &needlog);
+ xfs_dir2_data_freescan(dp->i_mount, hdr, &needlog);
if (needlog)
xfs_dir2_data_log_header(args, bp);
xfs_dir3_data_check(dp, bp);
@@ -864,7 +863,7 @@ xfs_dir2_block_replace(
* Change the inode number to the new value.
*/
dep->inumber = cpu_to_be64(args->inumber);
- dp->d_ops->data_put_ftype(dep, args->filetype);
+ xfs_dir2_data_put_ftype(dp->i_mount, dep, args->filetype);
xfs_dir2_data_log_entry(args, bp, dep);
xfs_dir3_data_check(dp, bp);
return 0;
@@ -914,7 +913,6 @@ xfs_dir2_leaf_to_block(
__be16 *tagp; /* end of entry (tag) */
int to; /* block/leaf to index */
xfs_trans_t *tp; /* transaction pointer */
- struct xfs_dir2_leaf_entry *ents;
struct xfs_dir3_icleaf_hdr leafhdr;
trace_xfs_dir2_leaf_to_block(args);
@@ -923,8 +921,7 @@ xfs_dir2_leaf_to_block(
tp = args->trans;
mp = dp->i_mount;
leaf = lbp->b_addr;
- dp->d_ops->leaf_hdr_from_disk(&leafhdr, leaf);
- ents = dp->d_ops->leaf_ents_p(leaf);
+ xfs_dir2_leaf_hdr_from_disk(mp, &leafhdr, leaf);
ltp = xfs_dir2_leaf_tail_p(args->geo, leaf);
ASSERT(leafhdr.magic == XFS_DIR2_LEAF1_MAGIC ||
@@ -938,7 +935,7 @@ xfs_dir2_leaf_to_block(
while (dp->i_d.di_size > args->geo->blksize) {
int hdrsz;
- hdrsz = dp->d_ops->data_entry_offset;
+ hdrsz = args->geo->data_entry_offset;
bestsp = xfs_dir2_leaf_bests_p(ltp);
if (be16_to_cpu(bestsp[be32_to_cpu(ltp->bestcount) - 1]) ==
args->geo->blksize - hdrsz) {
@@ -953,7 +950,7 @@ xfs_dir2_leaf_to_block(
* Read the data block if we don't already have it, give up if it fails.
*/
if (!dbp) {
- error = xfs_dir3_data_read(tp, dp, args->geo->datablk, -1, &dbp);
+ error = xfs_dir3_data_read(tp, dp, args->geo->datablk, 0, &dbp);
if (error)
return error;
}
@@ -1004,9 +1001,10 @@ xfs_dir2_leaf_to_block(
*/
lep = xfs_dir2_block_leaf_p(btp);
for (from = to = 0; from < leafhdr.count; from++) {
- if (ents[from].address == cpu_to_be32(XFS_DIR2_NULL_DATAPTR))
+ if (leafhdr.ents[from].address ==
+ cpu_to_be32(XFS_DIR2_NULL_DATAPTR))
continue;
- lep[to++] = ents[from];
+ lep[to++] = leafhdr.ents[from];
}
ASSERT(to == be32_to_cpu(btp->count));
xfs_dir2_block_log_leaf(tp, dbp, 0, be32_to_cpu(btp->count) - 1);
@@ -1014,7 +1012,7 @@ xfs_dir2_leaf_to_block(
* Scan the bestfree if we need it and log the data block header.
*/
if (needscan)
- xfs_dir2_data_freescan(dp, hdr, &needlog);
+ xfs_dir2_data_freescan(dp->i_mount, hdr, &needlog);
if (needlog)
xfs_dir2_data_log_header(args, dbp);
/*
@@ -1039,47 +1037,38 @@ xfs_dir2_leaf_to_block(
*/
int /* error */
xfs_dir2_sf_to_block(
- xfs_da_args_t *args) /* operation arguments */
+ struct xfs_da_args *args)
{
+ struct xfs_trans *tp = args->trans;
+ struct xfs_inode *dp = args->dp;
+ struct xfs_mount *mp = dp->i_mount;
+ struct xfs_ifork *ifp = XFS_IFORK_PTR(dp, XFS_DATA_FORK);
+ struct xfs_da_geometry *geo = args->geo;
xfs_dir2_db_t blkno; /* dir-relative block # (0) */
xfs_dir2_data_hdr_t *hdr; /* block header */
xfs_dir2_leaf_entry_t *blp; /* block leaf entries */
struct xfs_buf *bp; /* block buffer */
xfs_dir2_block_tail_t *btp; /* block tail pointer */
xfs_dir2_data_entry_t *dep; /* data entry pointer */
- xfs_inode_t *dp; /* incore directory inode */
int dummy; /* trash */
xfs_dir2_data_unused_t *dup; /* unused entry pointer */
int endoffset; /* end of data objects */
int error; /* error return value */
int i; /* index */
- xfs_mount_t *mp; /* filesystem mount point */
int needlog; /* need to log block header */
int needscan; /* need to scan block freespc */
int newoffset; /* offset from current entry */
- int offset; /* target block offset */
+ unsigned int offset = geo->data_entry_offset;
xfs_dir2_sf_entry_t *sfep; /* sf entry pointer */
xfs_dir2_sf_hdr_t *oldsfp; /* old shortform header */
xfs_dir2_sf_hdr_t *sfp; /* shortform header */
__be16 *tagp; /* end of data entry */
- xfs_trans_t *tp; /* transaction pointer */
struct xfs_name name;
- struct xfs_ifork *ifp;
trace_xfs_dir2_sf_to_block(args);
- dp = args->dp;
- tp = args->trans;
- mp = dp->i_mount;
- ifp = XFS_IFORK_PTR(dp, XFS_DATA_FORK);
ASSERT(ifp->if_flags & XFS_IFINLINE);
- /*
- * Bomb out if the shortform directory is way too short.
- */
- if (dp->i_d.di_size < offsetof(xfs_dir2_sf_hdr_t, parent)) {
- ASSERT(XFS_FORCED_SHUTDOWN(mp));
- return -EIO;
- }
+ ASSERT(dp->i_d.di_size >= offsetof(struct xfs_dir2_sf_hdr, parent));
oldsfp = (xfs_dir2_sf_hdr_t *)ifp->if_u1.if_data;
@@ -1123,7 +1112,7 @@ xfs_dir2_sf_to_block(
* The whole thing is initialized to free by the init routine.
* Say we're using the leaf and tail area.
*/
- dup = dp->d_ops->data_unused_p(hdr);
+ dup = bp->b_addr + offset;
needlog = needscan = 0;
error = xfs_dir2_data_use_free(args, bp, dup, args->geo->blksize - i,
i, &needlog, &needscan);
@@ -1146,35 +1135,37 @@ xfs_dir2_sf_to_block(
be16_to_cpu(dup->length), &needlog, &needscan);
if (error)
goto out_free;
+
/*
* Create entry for .
*/
- dep = dp->d_ops->data_dot_entry_p(hdr);
+ dep = bp->b_addr + offset;
dep->inumber = cpu_to_be64(dp->i_ino);
dep->namelen = 1;
dep->name[0] = '.';
- dp->d_ops->data_put_ftype(dep, XFS_DIR3_FT_DIR);
- tagp = dp->d_ops->data_entry_tag_p(dep);
- *tagp = cpu_to_be16((char *)dep - (char *)hdr);
+ xfs_dir2_data_put_ftype(mp, dep, XFS_DIR3_FT_DIR);
+ tagp = xfs_dir2_data_entry_tag_p(mp, dep);
+ *tagp = cpu_to_be16(offset);
xfs_dir2_data_log_entry(args, bp, dep);
blp[0].hashval = cpu_to_be32(xfs_dir_hash_dot);
- blp[0].address = cpu_to_be32(xfs_dir2_byte_to_dataptr(
- (char *)dep - (char *)hdr));
+ blp[0].address = cpu_to_be32(xfs_dir2_byte_to_dataptr(offset));
+ offset += xfs_dir2_data_entsize(mp, dep->namelen);
+
/*
* Create entry for ..
*/
- dep = dp->d_ops->data_dotdot_entry_p(hdr);
- dep->inumber = cpu_to_be64(dp->d_ops->sf_get_parent_ino(sfp));
+ dep = bp->b_addr + offset;
+ dep->inumber = cpu_to_be64(xfs_dir2_sf_get_parent_ino(sfp));
dep->namelen = 2;
dep->name[0] = dep->name[1] = '.';
- dp->d_ops->data_put_ftype(dep, XFS_DIR3_FT_DIR);
- tagp = dp->d_ops->data_entry_tag_p(dep);
- *tagp = cpu_to_be16((char *)dep - (char *)hdr);
+ xfs_dir2_data_put_ftype(mp, dep, XFS_DIR3_FT_DIR);
+ tagp = xfs_dir2_data_entry_tag_p(mp, dep);
+ *tagp = cpu_to_be16(offset);
xfs_dir2_data_log_entry(args, bp, dep);
blp[1].hashval = cpu_to_be32(xfs_dir_hash_dotdot);
- blp[1].address = cpu_to_be32(xfs_dir2_byte_to_dataptr(
- (char *)dep - (char *)hdr));
- offset = dp->d_ops->data_first_offset;
+ blp[1].address = cpu_to_be32(xfs_dir2_byte_to_dataptr(offset));
+ offset += xfs_dir2_data_entsize(mp, dep->namelen);
+
/*
* Loop over existing entries, stuff them in.
*/
@@ -1183,6 +1174,7 @@ xfs_dir2_sf_to_block(
sfep = NULL;
else
sfep = xfs_dir2_sf_firstentry(sfp);
+
/*
* Need to preserve the existing offset values in the sf directory.
* Insert holes (unused entries) where necessary.
@@ -1199,40 +1191,39 @@ xfs_dir2_sf_to_block(
* There should be a hole here, make one.
*/
if (offset < newoffset) {
- dup = (xfs_dir2_data_unused_t *)((char *)hdr + offset);
+ dup = bp->b_addr + offset;
dup->freetag = cpu_to_be16(XFS_DIR2_DATA_FREE_TAG);
dup->length = cpu_to_be16(newoffset - offset);
- *xfs_dir2_data_unused_tag_p(dup) = cpu_to_be16(
- ((char *)dup - (char *)hdr));
+ *xfs_dir2_data_unused_tag_p(dup) = cpu_to_be16(offset);
xfs_dir2_data_log_unused(args, bp, dup);
xfs_dir2_data_freeinsert(hdr,
- dp->d_ops->data_bestfree_p(hdr),
- dup, &dummy);
+ xfs_dir2_data_bestfree_p(mp, hdr),
+ dup, &dummy);
offset += be16_to_cpu(dup->length);
continue;
}
/*
* Copy a real entry.
*/
- dep = (xfs_dir2_data_entry_t *)((char *)hdr + newoffset);
- dep->inumber = cpu_to_be64(dp->d_ops->sf_get_ino(sfp, sfep));
+ dep = bp->b_addr + newoffset;
+ dep->inumber = cpu_to_be64(xfs_dir2_sf_get_ino(mp, sfp, sfep));
dep->namelen = sfep->namelen;
- dp->d_ops->data_put_ftype(dep, dp->d_ops->sf_get_ftype(sfep));
+ xfs_dir2_data_put_ftype(mp, dep,
+ xfs_dir2_sf_get_ftype(mp, sfep));
memcpy(dep->name, sfep->name, dep->namelen);
- tagp = dp->d_ops->data_entry_tag_p(dep);
- *tagp = cpu_to_be16((char *)dep - (char *)hdr);
+ tagp = xfs_dir2_data_entry_tag_p(mp, dep);
+ *tagp = cpu_to_be16(newoffset);
xfs_dir2_data_log_entry(args, bp, dep);
name.name = sfep->name;
name.len = sfep->namelen;
- blp[2 + i].hashval = cpu_to_be32(mp->m_dirnameops->
- hashname(&name));
- blp[2 + i].address = cpu_to_be32(xfs_dir2_byte_to_dataptr(
- (char *)dep - (char *)hdr));
+ blp[2 + i].hashval = cpu_to_be32(xfs_dir2_hashname(mp, &name));
+ blp[2 + i].address =
+ cpu_to_be32(xfs_dir2_byte_to_dataptr(newoffset));
offset = (int)((char *)(tagp + 1) - (char *)hdr);
if (++i == sfp->count)
sfep = NULL;
else
- sfep = dp->d_ops->sf_nextentry(sfp, sfep);
+ sfep = xfs_dir2_sf_nextentry(mp, sfp, sfep);
}
/* Done with the temporary buffer */
kmem_free(sfp);
diff --git a/fs/xfs/libxfs/xfs_dir2_data.c b/fs/xfs/libxfs/xfs_dir2_data.c
index 2c79be4c3153..b9eba8213180 100644
--- a/fs/xfs/libxfs/xfs_dir2_data.c
+++ b/fs/xfs/libxfs/xfs_dir2_data.c
@@ -13,6 +13,7 @@
#include "xfs_mount.h"
#include "xfs_inode.h"
#include "xfs_dir2.h"
+#include "xfs_dir2_priv.h"
#include "xfs_error.h"
#include "xfs_trans.h"
#include "xfs_buf_item.h"
@@ -23,6 +24,71 @@ static xfs_failaddr_t xfs_dir2_data_freefind_verify(
struct xfs_dir2_data_unused *dup,
struct xfs_dir2_data_free **bf_ent);
+struct xfs_dir2_data_free *
+xfs_dir2_data_bestfree_p(
+ struct xfs_mount *mp,
+ struct xfs_dir2_data_hdr *hdr)
+{
+ if (xfs_sb_version_hascrc(&mp->m_sb))
+ return ((struct xfs_dir3_data_hdr *)hdr)->best_free;
+ return hdr->bestfree;
+}
+
+/*
+ * Pointer to an entry's tag word.
+ */
+__be16 *
+xfs_dir2_data_entry_tag_p(
+ struct xfs_mount *mp,
+ struct xfs_dir2_data_entry *dep)
+{
+ return (__be16 *)((char *)dep +
+ xfs_dir2_data_entsize(mp, dep->namelen) - sizeof(__be16));
+}
+
+uint8_t
+xfs_dir2_data_get_ftype(
+ struct xfs_mount *mp,
+ struct xfs_dir2_data_entry *dep)
+{
+ if (xfs_sb_version_hasftype(&mp->m_sb)) {
+ uint8_t ftype = dep->name[dep->namelen];
+
+ if (likely(ftype < XFS_DIR3_FT_MAX))
+ return ftype;
+ }
+
+ return XFS_DIR3_FT_UNKNOWN;
+}
+
+void
+xfs_dir2_data_put_ftype(
+ struct xfs_mount *mp,
+ struct xfs_dir2_data_entry *dep,
+ uint8_t ftype)
+{
+ ASSERT(ftype < XFS_DIR3_FT_MAX);
+ ASSERT(dep->namelen != 0);
+
+ if (xfs_sb_version_hasftype(&mp->m_sb))
+ dep->name[dep->namelen] = ftype;
+}
+
+/*
+ * The number of leaf entries is limited by the size of the block and the amount
+ * of space used by the data entries. We don't know how much space is used by
+ * the data entries yet, so just ensure that the count falls somewhere inside
+ * the block right now.
+ */
+static inline unsigned int
+xfs_dir2_data_max_leaf_entries(
+ struct xfs_da_geometry *geo)
+{
+ return (geo->blksize - sizeof(struct xfs_dir2_block_tail) -
+ geo->data_entry_offset) /
+ sizeof(struct xfs_dir2_leaf_entry);
+}
+
/*
* Check the consistency of the data block.
* The input can also be a block-format directory.
@@ -38,40 +104,27 @@ __xfs_dir3_data_check(
xfs_dir2_block_tail_t *btp=NULL; /* block tail */
int count; /* count of entries found */
xfs_dir2_data_hdr_t *hdr; /* data block header */
- xfs_dir2_data_entry_t *dep; /* data entry */
xfs_dir2_data_free_t *dfp; /* bestfree entry */
- xfs_dir2_data_unused_t *dup; /* unused entry */
- char *endp; /* end of useful data */
int freeseen; /* mask of bestfrees seen */
xfs_dahash_t hash; /* hash of current name */
int i; /* leaf index */
int lastfree; /* last entry was unused */
xfs_dir2_leaf_entry_t *lep=NULL; /* block leaf entries */
struct xfs_mount *mp = bp->b_mount;
- char *p; /* current data position */
int stale; /* count of stale leaves */
struct xfs_name name;
- const struct xfs_dir_ops *ops;
- struct xfs_da_geometry *geo;
-
- geo = mp->m_dir_geo;
+ unsigned int offset;
+ unsigned int end;
+ struct xfs_da_geometry *geo = mp->m_dir_geo;
/*
- * We can be passed a null dp here from a verifier, so we need to go the
- * hard way to get them.
+ * If this isn't a directory, something is seriously wrong. Bail out.
*/
- ops = xfs_dir_get_ops(mp, dp);
-
- /*
- * If this isn't a directory, or we don't get handed the dir ops,
- * something is seriously wrong. Bail out.
- */
- if ((dp && !S_ISDIR(VFS_I(dp)->i_mode)) ||
- ops != xfs_dir_get_ops(mp, NULL))
+ if (dp && !S_ISDIR(VFS_I(dp)->i_mode))
return __this_address;
hdr = bp->b_addr;
- p = (char *)ops->data_entry_p(hdr);
+ offset = geo->data_entry_offset;
switch (hdr->magic) {
case cpu_to_be32(XFS_DIR3_BLOCK_MAGIC):
@@ -79,15 +132,8 @@ __xfs_dir3_data_check(
btp = xfs_dir2_block_tail_p(geo, hdr);
lep = xfs_dir2_block_leaf_p(btp);
- /*
- * The number of leaf entries is limited by the size of the
- * block and the amount of space used by the data entries.
- * We don't know how much space is used by the data entries yet,
- * so just ensure that the count falls somewhere inside the
- * block right now.
- */
if (be32_to_cpu(btp->count) >=
- ((char *)btp - p) / sizeof(struct xfs_dir2_leaf_entry))
+ xfs_dir2_data_max_leaf_entries(geo))
return __this_address;
break;
case cpu_to_be32(XFS_DIR3_DATA_MAGIC):
@@ -96,14 +142,14 @@ __xfs_dir3_data_check(
default:
return __this_address;
}
- endp = xfs_dir3_data_endp(geo, hdr);
- if (!endp)
+ end = xfs_dir3_data_end_offset(geo, hdr);
+ if (!end)
return __this_address;
/*
* Account for zero bestfree entries.
*/
- bf = ops->data_bestfree_p(hdr);
+ bf = xfs_dir2_data_bestfree_p(mp, hdr);
count = lastfree = freeseen = 0;
if (!bf[0].length) {
if (bf[0].offset)
@@ -128,8 +174,10 @@ __xfs_dir3_data_check(
/*
* Loop over the data/unused entries.
*/
- while (p < endp) {
- dup = (xfs_dir2_data_unused_t *)p;
+ while (offset < end) {
+ struct xfs_dir2_data_unused *dup = bp->b_addr + offset;
+ struct xfs_dir2_data_entry *dep = bp->b_addr + offset;
+
/*
* If it's unused, look for the space in the bestfree table.
* If we find it, account for that, else make sure it
@@ -140,10 +188,10 @@ __xfs_dir3_data_check(
if (lastfree != 0)
return __this_address;
- if (endp < p + be16_to_cpu(dup->length))
+ if (offset + be16_to_cpu(dup->length) > end)
return __this_address;
if (be16_to_cpu(*xfs_dir2_data_unused_tag_p(dup)) !=
- (char *)dup - (char *)hdr)
+ offset)
return __this_address;
fa = xfs_dir2_data_freefind_verify(hdr, bf, dup, &dfp);
if (fa)
@@ -158,7 +206,7 @@ __xfs_dir3_data_check(
be16_to_cpu(bf[2].length))
return __this_address;
}
- p += be16_to_cpu(dup->length);
+ offset += be16_to_cpu(dup->length);
lastfree = 1;
continue;
}
@@ -168,17 +216,15 @@ __xfs_dir3_data_check(
* in the leaf section of the block.
* The linear search is crude but this is DEBUG code.
*/
- dep = (xfs_dir2_data_entry_t *)p;
if (dep->namelen == 0)
return __this_address;
if (xfs_dir_ino_validate(mp, be64_to_cpu(dep->inumber)))
return __this_address;
- if (endp < p + ops->data_entsize(dep->namelen))
+ if (offset + xfs_dir2_data_entsize(mp, dep->namelen) > end)
return __this_address;
- if (be16_to_cpu(*ops->data_entry_tag_p(dep)) !=
- (char *)dep - (char *)hdr)
+ if (be16_to_cpu(*xfs_dir2_data_entry_tag_p(mp, dep)) != offset)
return __this_address;
- if (ops->data_get_ftype(dep) >= XFS_DIR3_FT_MAX)
+ if (xfs_dir2_data_get_ftype(mp, dep) >= XFS_DIR3_FT_MAX)
return __this_address;
count++;
lastfree = 0;
@@ -189,7 +235,7 @@ __xfs_dir3_data_check(
((char *)dep - (char *)hdr));
name.name = dep->name;
name.len = dep->namelen;
- hash = mp->m_dirnameops->hashname(&name);
+ hash = xfs_dir2_hashname(mp, &name);
for (i = 0; i < be32_to_cpu(btp->count); i++) {
if (be32_to_cpu(lep[i].address) == addr &&
be32_to_cpu(lep[i].hashval) == hash)
@@ -198,7 +244,7 @@ __xfs_dir3_data_check(
if (i >= be32_to_cpu(btp->count))
return __this_address;
}
- p += ops->data_entsize(dep->namelen);
+ offset += xfs_dir2_data_entsize(mp, dep->namelen);
}
/*
* Need to have seen all the entries and all the bestfree slots.
@@ -354,13 +400,13 @@ xfs_dir3_data_read(
struct xfs_trans *tp,
struct xfs_inode *dp,
xfs_dablk_t bno,
- xfs_daddr_t mapped_bno,
+ unsigned int flags,
struct xfs_buf **bpp)
{
int err;
- err = xfs_da_read_buf(tp, dp, bno, mapped_bno, bpp,
- XFS_DATA_FORK, &xfs_dir3_data_buf_ops);
+ err = xfs_da_read_buf(tp, dp, bno, flags, bpp, XFS_DATA_FORK,
+ &xfs_dir3_data_buf_ops);
if (!err && tp && *bpp)
xfs_trans_buf_set_type(tp, *bpp, XFS_BLFT_DIR_DATA_BUF);
return err;
@@ -370,10 +416,10 @@ int
xfs_dir3_data_readahead(
struct xfs_inode *dp,
xfs_dablk_t bno,
- xfs_daddr_t mapped_bno)
+ unsigned int flags)
{
- return xfs_da_reada_buf(dp, bno, mapped_bno,
- XFS_DATA_FORK, &xfs_dir3_data_reada_buf_ops);
+ return xfs_da_reada_buf(dp, bno, flags, XFS_DATA_FORK,
+ &xfs_dir3_data_reada_buf_ops);
}
/*
@@ -561,17 +607,16 @@ xfs_dir2_data_freeremove(
* Given a data block, reconstruct its bestfree map.
*/
void
-xfs_dir2_data_freescan_int(
- struct xfs_da_geometry *geo,
- const struct xfs_dir_ops *ops,
- struct xfs_dir2_data_hdr *hdr,
- int *loghead)
+xfs_dir2_data_freescan(
+ struct xfs_mount *mp,
+ struct xfs_dir2_data_hdr *hdr,
+ int *loghead)
{
- xfs_dir2_data_entry_t *dep; /* active data entry */
- xfs_dir2_data_unused_t *dup; /* unused data entry */
- struct xfs_dir2_data_free *bf;
- char *endp; /* end of block's data */
- char *p; /* current entry pointer */
+ struct xfs_da_geometry *geo = mp->m_dir_geo;
+ struct xfs_dir2_data_free *bf = xfs_dir2_data_bestfree_p(mp, hdr);
+ void *addr = hdr;
+ unsigned int offset = geo->data_entry_offset;
+ unsigned int end;
ASSERT(hdr->magic == cpu_to_be32(XFS_DIR2_DATA_MAGIC) ||
hdr->magic == cpu_to_be32(XFS_DIR3_DATA_MAGIC) ||
@@ -581,79 +626,60 @@ xfs_dir2_data_freescan_int(
/*
* Start by clearing the table.
*/
- bf = ops->data_bestfree_p(hdr);
memset(bf, 0, sizeof(*bf) * XFS_DIR2_DATA_FD_COUNT);
*loghead = 1;
- /*
- * Set up pointers.
- */
- p = (char *)ops->data_entry_p(hdr);
- endp = xfs_dir3_data_endp(geo, hdr);
- /*
- * Loop over the block's entries.
- */
- while (p < endp) {
- dup = (xfs_dir2_data_unused_t *)p;
+
+ end = xfs_dir3_data_end_offset(geo, addr);
+ while (offset < end) {
+ struct xfs_dir2_data_unused *dup = addr + offset;
+ struct xfs_dir2_data_entry *dep = addr + offset;
+
/*
* If it's a free entry, insert it.
*/
if (be16_to_cpu(dup->freetag) == XFS_DIR2_DATA_FREE_TAG) {
- ASSERT((char *)dup - (char *)hdr ==
+ ASSERT(offset ==
be16_to_cpu(*xfs_dir2_data_unused_tag_p(dup)));
xfs_dir2_data_freeinsert(hdr, bf, dup, loghead);
- p += be16_to_cpu(dup->length);
+ offset += be16_to_cpu(dup->length);
+ continue;
}
+
/*
* For active entries, check their tags and skip them.
*/
- else {
- dep = (xfs_dir2_data_entry_t *)p;
- ASSERT((char *)dep - (char *)hdr ==
- be16_to_cpu(*ops->data_entry_tag_p(dep)));
- p += ops->data_entsize(dep->namelen);
- }
+ ASSERT(offset ==
+ be16_to_cpu(*xfs_dir2_data_entry_tag_p(mp, dep)));
+ offset += xfs_dir2_data_entsize(mp, dep->namelen);
}
}
-void
-xfs_dir2_data_freescan(
- struct xfs_inode *dp,
- struct xfs_dir2_data_hdr *hdr,
- int *loghead)
-{
- return xfs_dir2_data_freescan_int(dp->i_mount->m_dir_geo, dp->d_ops,
- hdr, loghead);
-}
-
/*
* Initialize a data block at the given block number in the directory.
* Give back the buffer for the created block.
*/
int /* error */
xfs_dir3_data_init(
- xfs_da_args_t *args, /* directory operation args */
- xfs_dir2_db_t blkno, /* logical dir block number */
- struct xfs_buf **bpp) /* output block buffer */
+ struct xfs_da_args *args, /* directory operation args */
+ xfs_dir2_db_t blkno, /* logical dir block number */
+ struct xfs_buf **bpp) /* output block buffer */
{
- struct xfs_buf *bp; /* block buffer */
- xfs_dir2_data_hdr_t *hdr; /* data block header */
- xfs_inode_t *dp; /* incore directory inode */
- xfs_dir2_data_unused_t *dup; /* unused entry pointer */
- struct xfs_dir2_data_free *bf;
- int error; /* error return value */
- int i; /* bestfree index */
- xfs_mount_t *mp; /* filesystem mount point */
- xfs_trans_t *tp; /* transaction pointer */
- int t; /* temp */
-
- dp = args->dp;
- mp = dp->i_mount;
- tp = args->trans;
+ struct xfs_trans *tp = args->trans;
+ struct xfs_inode *dp = args->dp;
+ struct xfs_mount *mp = dp->i_mount;
+ struct xfs_da_geometry *geo = args->geo;
+ struct xfs_buf *bp;
+ struct xfs_dir2_data_hdr *hdr;
+ struct xfs_dir2_data_unused *dup;
+ struct xfs_dir2_data_free *bf;
+ int error;
+ int i;
+
/*
* Get the buffer set up for the block.
*/
error = xfs_da_get_buf(tp, dp, xfs_dir2_db_to_da(args->geo, blkno),
- -1, &bp, XFS_DATA_FORK);
+ &bp, XFS_DATA_FORK);
if (error)
return error;
bp->b_ops = &xfs_dir3_data_buf_ops;
@@ -675,8 +701,9 @@ xfs_dir3_data_init(
} else
hdr->magic = cpu_to_be32(XFS_DIR2_DATA_MAGIC);
- bf = dp->d_ops->data_bestfree_p(hdr);
- bf[0].offset = cpu_to_be16(dp->d_ops->data_entry_offset);
+ bf = xfs_dir2_data_bestfree_p(mp, hdr);
+ bf[0].offset = cpu_to_be16(geo->data_entry_offset);
+ bf[0].length = cpu_to_be16(geo->blksize - geo->data_entry_offset);
for (i = 1; i < XFS_DIR2_DATA_FD_COUNT; i++) {
bf[i].length = 0;
bf[i].offset = 0;
@@ -685,13 +712,11 @@ xfs_dir3_data_init(
/*
* Set up an unused entry for the block's body.
*/
- dup = dp->d_ops->data_unused_p(hdr);
+ dup = bp->b_addr + geo->data_entry_offset;
dup->freetag = cpu_to_be16(XFS_DIR2_DATA_FREE_TAG);
-
- t = args->geo->blksize - (uint)dp->d_ops->data_entry_offset;
- bf[0].length = cpu_to_be16(t);
- dup->length = cpu_to_be16(t);
+ dup->length = bf[0].length;
*xfs_dir2_data_unused_tag_p(dup) = cpu_to_be16((char *)dup - (char *)hdr);
+
/*
* Log it and return it.
*/
@@ -710,6 +735,7 @@ xfs_dir2_data_log_entry(
struct xfs_buf *bp,
xfs_dir2_data_entry_t *dep) /* data entry pointer */
{
+ struct xfs_mount *mp = bp->b_mount;
struct xfs_dir2_data_hdr *hdr = bp->b_addr;
ASSERT(hdr->magic == cpu_to_be32(XFS_DIR2_DATA_MAGIC) ||
@@ -718,7 +744,7 @@ xfs_dir2_data_log_entry(
hdr->magic == cpu_to_be32(XFS_DIR3_BLOCK_MAGIC));
xfs_trans_log_buf(args->trans, bp, (uint)((char *)dep - (char *)hdr),
- (uint)((char *)(args->dp->d_ops->data_entry_tag_p(dep) + 1) -
+ (uint)((char *)(xfs_dir2_data_entry_tag_p(mp, dep) + 1) -
(char *)hdr - 1));
}
@@ -739,8 +765,7 @@ xfs_dir2_data_log_header(
hdr->magic == cpu_to_be32(XFS_DIR3_BLOCK_MAGIC));
#endif
- xfs_trans_log_buf(args->trans, bp, 0,
- args->dp->d_ops->data_entry_offset - 1);
+ xfs_trans_log_buf(args->trans, bp, 0, args->geo->data_entry_offset - 1);
}
/*
@@ -789,11 +814,11 @@ xfs_dir2_data_make_free(
{
xfs_dir2_data_hdr_t *hdr; /* data block pointer */
xfs_dir2_data_free_t *dfp; /* bestfree pointer */
- char *endptr; /* end of data area */
int needscan; /* need to regen bestfree */
xfs_dir2_data_unused_t *newdup; /* new unused entry */
xfs_dir2_data_unused_t *postdup; /* unused entry after us */
xfs_dir2_data_unused_t *prevdup; /* unused entry before us */
+ unsigned int end;
struct xfs_dir2_data_free *bf;
hdr = bp->b_addr;
@@ -801,14 +826,14 @@ xfs_dir2_data_make_free(
/*
* Figure out where the end of the data area is.
*/
- endptr = xfs_dir3_data_endp(args->geo, hdr);
- ASSERT(endptr != NULL);
+ end = xfs_dir3_data_end_offset(args->geo, hdr);
+ ASSERT(end != 0);
/*
* If this isn't the start of the block, then back up to
* the previous entry and see if it's free.
*/
- if (offset > args->dp->d_ops->data_entry_offset) {
+ if (offset > args->geo->data_entry_offset) {
__be16 *tagp; /* tag just before us */
tagp = (__be16 *)((char *)hdr + offset) - 1;
@@ -821,7 +846,7 @@ xfs_dir2_data_make_free(
* If this isn't the end of the block, see if the entry after
* us is free.
*/
- if ((char *)hdr + offset + len < endptr) {
+ if (offset + len < end) {
postdup =
(xfs_dir2_data_unused_t *)((char *)hdr + offset + len);
if (be16_to_cpu(postdup->freetag) != XFS_DIR2_DATA_FREE_TAG)
@@ -834,7 +859,7 @@ xfs_dir2_data_make_free(
* Previous and following entries are both free,
* merge everything into a single free entry.
*/
- bf = args->dp->d_ops->data_bestfree_p(hdr);
+ bf = xfs_dir2_data_bestfree_p(args->dp->i_mount, hdr);
if (prevdup && postdup) {
xfs_dir2_data_free_t *dfp2; /* another bestfree pointer */
@@ -1025,7 +1050,7 @@ xfs_dir2_data_use_free(
* Look up the entry in the bestfree table.
*/
oldlen = be16_to_cpu(dup->length);
- bf = args->dp->d_ops->data_bestfree_p(hdr);
+ bf = xfs_dir2_data_bestfree_p(args->dp->i_mount, hdr);
dfp = xfs_dir2_data_freefind(hdr, bf, dup);
ASSERT(dfp || oldlen <= be16_to_cpu(bf[2].length));
/*
@@ -1149,19 +1174,22 @@ corrupt:
}
/* Find the end of the entry data in a data/block format dir block. */
-void *
-xfs_dir3_data_endp(
+unsigned int
+xfs_dir3_data_end_offset(
struct xfs_da_geometry *geo,
struct xfs_dir2_data_hdr *hdr)
{
+ void *p;
+
switch (hdr->magic) {
case cpu_to_be32(XFS_DIR3_BLOCK_MAGIC):
case cpu_to_be32(XFS_DIR2_BLOCK_MAGIC):
- return xfs_dir2_block_leaf_p(xfs_dir2_block_tail_p(geo, hdr));
+ p = xfs_dir2_block_leaf_p(xfs_dir2_block_tail_p(geo, hdr));
+ return p - (void *)hdr;
case cpu_to_be32(XFS_DIR3_DATA_MAGIC):
case cpu_to_be32(XFS_DIR2_DATA_MAGIC):
- return (char *)hdr + geo->blksize;
+ return geo->blksize;
default:
- return NULL;
+ return 0;
}
}
diff --git a/fs/xfs/libxfs/xfs_dir2_leaf.c b/fs/xfs/libxfs/xfs_dir2_leaf.c
index a53e4585a2f3..a131b520aac7 100644
--- a/fs/xfs/libxfs/xfs_dir2_leaf.c
+++ b/fs/xfs/libxfs/xfs_dir2_leaf.c
@@ -24,12 +24,73 @@
* Local function declarations.
*/
static int xfs_dir2_leaf_lookup_int(xfs_da_args_t *args, struct xfs_buf **lbpp,
- int *indexp, struct xfs_buf **dbpp);
+ int *indexp, struct xfs_buf **dbpp,
+ struct xfs_dir3_icleaf_hdr *leafhdr);
static void xfs_dir3_leaf_log_bests(struct xfs_da_args *args,
struct xfs_buf *bp, int first, int last);
static void xfs_dir3_leaf_log_tail(struct xfs_da_args *args,
struct xfs_buf *bp);
+void
+xfs_dir2_leaf_hdr_from_disk(
+ struct xfs_mount *mp,
+ struct xfs_dir3_icleaf_hdr *to,
+ struct xfs_dir2_leaf *from)
+{
+ if (xfs_sb_version_hascrc(&mp->m_sb)) {
+ struct xfs_dir3_leaf *from3 = (struct xfs_dir3_leaf *)from;
+
+ to->forw = be32_to_cpu(from3->hdr.info.hdr.forw);
+ to->back = be32_to_cpu(from3->hdr.info.hdr.back);
+ to->magic = be16_to_cpu(from3->hdr.info.hdr.magic);
+ to->count = be16_to_cpu(from3->hdr.count);
+ to->stale = be16_to_cpu(from3->hdr.stale);
+ to->ents = from3->__ents;
+
+ ASSERT(to->magic == XFS_DIR3_LEAF1_MAGIC ||
+ to->magic == XFS_DIR3_LEAFN_MAGIC);
+ } else {
+ to->forw = be32_to_cpu(from->hdr.info.forw);
+ to->back = be32_to_cpu(from->hdr.info.back);
+ to->magic = be16_to_cpu(from->hdr.info.magic);
+ to->count = be16_to_cpu(from->hdr.count);
+ to->stale = be16_to_cpu(from->hdr.stale);
+ to->ents = from->__ents;
+
+ ASSERT(to->magic == XFS_DIR2_LEAF1_MAGIC ||
+ to->magic == XFS_DIR2_LEAFN_MAGIC);
+ }
+}
+
+void
+xfs_dir2_leaf_hdr_to_disk(
+ struct xfs_mount *mp,
+ struct xfs_dir2_leaf *to,
+ struct xfs_dir3_icleaf_hdr *from)
+{
+ if (xfs_sb_version_hascrc(&mp->m_sb)) {
+ struct xfs_dir3_leaf *to3 = (struct xfs_dir3_leaf *)to;
+
+ ASSERT(from->magic == XFS_DIR3_LEAF1_MAGIC ||
+ from->magic == XFS_DIR3_LEAFN_MAGIC);
+
+ to3->hdr.info.hdr.forw = cpu_to_be32(from->forw);
+ to3->hdr.info.hdr.back = cpu_to_be32(from->back);
+ to3->hdr.info.hdr.magic = cpu_to_be16(from->magic);
+ to3->hdr.count = cpu_to_be16(from->count);
+ to3->hdr.stale = cpu_to_be16(from->stale);
+ } else {
+ ASSERT(from->magic == XFS_DIR2_LEAF1_MAGIC ||
+ from->magic == XFS_DIR2_LEAFN_MAGIC);
+
+ to->hdr.info.forw = cpu_to_be32(from->forw);
+ to->hdr.info.back = cpu_to_be32(from->back);
+ to->hdr.info.magic = cpu_to_be16(from->magic);
+ to->hdr.count = cpu_to_be16(from->count);
+ to->hdr.stale = cpu_to_be16(from->stale);
+ }
+}
+
/*
* Check the internal consistency of a leaf1 block.
* Pop an assert if something is wrong.
@@ -43,7 +104,7 @@ xfs_dir3_leaf1_check(
struct xfs_dir2_leaf *leaf = bp->b_addr;
struct xfs_dir3_icleaf_hdr leafhdr;
- dp->d_ops->leaf_hdr_from_disk(&leafhdr, leaf);
+ xfs_dir2_leaf_hdr_from_disk(dp->i_mount, &leafhdr, leaf);
if (leafhdr.magic == XFS_DIR3_LEAF1_MAGIC) {
struct xfs_dir3_leaf_hdr *leaf3 = bp->b_addr;
@@ -52,7 +113,7 @@ xfs_dir3_leaf1_check(
} else if (leafhdr.magic != XFS_DIR2_LEAF1_MAGIC)
return __this_address;
- return xfs_dir3_leaf_check_int(dp->i_mount, dp, &leafhdr, leaf);
+ return xfs_dir3_leaf_check_int(dp->i_mount, &leafhdr, leaf);
}
static inline void
@@ -76,31 +137,15 @@ xfs_dir3_leaf_check(
xfs_failaddr_t
xfs_dir3_leaf_check_int(
- struct xfs_mount *mp,
- struct xfs_inode *dp,
- struct xfs_dir3_icleaf_hdr *hdr,
- struct xfs_dir2_leaf *leaf)
+ struct xfs_mount *mp,
+ struct xfs_dir3_icleaf_hdr *hdr,
+ struct xfs_dir2_leaf *leaf)
{
- struct xfs_dir2_leaf_entry *ents;
- xfs_dir2_leaf_tail_t *ltp;
- int stale;
- int i;
- const struct xfs_dir_ops *ops;
- struct xfs_dir3_icleaf_hdr leafhdr;
- struct xfs_da_geometry *geo = mp->m_dir_geo;
-
- /*
- * we can be passed a null dp here from a verifier, so we need to go the
- * hard way to get them.
- */
- ops = xfs_dir_get_ops(mp, dp);
+ struct xfs_da_geometry *geo = mp->m_dir_geo;
+ xfs_dir2_leaf_tail_t *ltp;
+ int stale;
+ int i;
- if (!hdr) {
- ops->leaf_hdr_from_disk(&leafhdr, leaf);
- hdr = &leafhdr;
- }
-
- ents = ops->leaf_ents_p(leaf);
ltp = xfs_dir2_leaf_tail_p(geo, leaf);
/*
@@ -108,23 +153,23 @@ xfs_dir3_leaf_check_int(
* Should factor in the size of the bests table as well.
* We can deduce a value for that from di_size.
*/
- if (hdr->count > ops->leaf_max_ents(geo))
+ if (hdr->count > geo->leaf_max_ents)
return __this_address;
/* Leaves and bests don't overlap in leaf format. */
if ((hdr->magic == XFS_DIR2_LEAF1_MAGIC ||
hdr->magic == XFS_DIR3_LEAF1_MAGIC) &&
- (char *)&ents[hdr->count] > (char *)xfs_dir2_leaf_bests_p(ltp))
+ (char *)&hdr->ents[hdr->count] > (char *)xfs_dir2_leaf_bests_p(ltp))
return __this_address;
/* Check hash value order, count stale entries. */
for (i = stale = 0; i < hdr->count; i++) {
if (i + 1 < hdr->count) {
- if (be32_to_cpu(ents[i].hashval) >
- be32_to_cpu(ents[i + 1].hashval))
+ if (be32_to_cpu(hdr->ents[i].hashval) >
+ be32_to_cpu(hdr->ents[i + 1].hashval))
return __this_address;
}
- if (ents[i].address == cpu_to_be32(XFS_DIR2_NULL_DATAPTR))
+ if (hdr->ents[i].address == cpu_to_be32(XFS_DIR2_NULL_DATAPTR))
stale++;
}
if (hdr->stale != stale)
@@ -139,17 +184,18 @@ xfs_dir3_leaf_check_int(
*/
static xfs_failaddr_t
xfs_dir3_leaf_verify(
- struct xfs_buf *bp)
+ struct xfs_buf *bp)
{
- struct xfs_mount *mp = bp->b_mount;
- struct xfs_dir2_leaf *leaf = bp->b_addr;
- xfs_failaddr_t fa;
+ struct xfs_mount *mp = bp->b_mount;
+ struct xfs_dir3_icleaf_hdr leafhdr;
+ xfs_failaddr_t fa;
fa = xfs_da3_blkinfo_verify(bp, bp->b_addr);
if (fa)
return fa;
- return xfs_dir3_leaf_check_int(mp, NULL, NULL, leaf);
+ xfs_dir2_leaf_hdr_from_disk(mp, &leafhdr, bp->b_addr);
+ return xfs_dir3_leaf_check_int(mp, &leafhdr, bp->b_addr);
}
static void
@@ -216,13 +262,12 @@ xfs_dir3_leaf_read(
struct xfs_trans *tp,
struct xfs_inode *dp,
xfs_dablk_t fbno,
- xfs_daddr_t mappedbno,
struct xfs_buf **bpp)
{
int err;
- err = xfs_da_read_buf(tp, dp, fbno, mappedbno, bpp,
- XFS_DATA_FORK, &xfs_dir3_leaf1_buf_ops);
+ err = xfs_da_read_buf(tp, dp, fbno, 0, bpp, XFS_DATA_FORK,
+ &xfs_dir3_leaf1_buf_ops);
if (!err && tp && *bpp)
xfs_trans_buf_set_type(tp, *bpp, XFS_BLFT_DIR_LEAF1_BUF);
return err;
@@ -233,13 +278,12 @@ xfs_dir3_leafn_read(
struct xfs_trans *tp,
struct xfs_inode *dp,
xfs_dablk_t fbno,
- xfs_daddr_t mappedbno,
struct xfs_buf **bpp)
{
int err;
- err = xfs_da_read_buf(tp, dp, fbno, mappedbno, bpp,
- XFS_DATA_FORK, &xfs_dir3_leafn_buf_ops);
+ err = xfs_da_read_buf(tp, dp, fbno, 0, bpp, XFS_DATA_FORK,
+ &xfs_dir3_leafn_buf_ops);
if (!err && tp && *bpp)
xfs_trans_buf_set_type(tp, *bpp, XFS_BLFT_DIR_LEAFN_BUF);
return err;
@@ -311,7 +355,7 @@ xfs_dir3_leaf_get_buf(
bno < xfs_dir2_byte_to_db(args->geo, XFS_DIR2_FREE_OFFSET));
error = xfs_da_get_buf(tp, dp, xfs_dir2_db_to_da(args->geo, bno),
- -1, &bp, XFS_DATA_FORK);
+ &bp, XFS_DATA_FORK);
if (error)
return error;
@@ -346,7 +390,6 @@ xfs_dir2_block_to_leaf(
int needscan; /* need to rescan bestfree */
xfs_trans_t *tp; /* transaction pointer */
struct xfs_dir2_data_free *bf;
- struct xfs_dir2_leaf_entry *ents;
struct xfs_dir3_icleaf_hdr leafhdr;
trace_xfs_dir2_block_to_leaf(args);
@@ -375,24 +418,24 @@ xfs_dir2_block_to_leaf(
xfs_dir3_data_check(dp, dbp);
btp = xfs_dir2_block_tail_p(args->geo, hdr);
blp = xfs_dir2_block_leaf_p(btp);
- bf = dp->d_ops->data_bestfree_p(hdr);
- ents = dp->d_ops->leaf_ents_p(leaf);
+ bf = xfs_dir2_data_bestfree_p(dp->i_mount, hdr);
/*
* Set the counts in the leaf header.
*/
- dp->d_ops->leaf_hdr_from_disk(&leafhdr, leaf);
+ xfs_dir2_leaf_hdr_from_disk(dp->i_mount, &leafhdr, leaf);
leafhdr.count = be32_to_cpu(btp->count);
leafhdr.stale = be32_to_cpu(btp->stale);
- dp->d_ops->leaf_hdr_to_disk(leaf, &leafhdr);
+ xfs_dir2_leaf_hdr_to_disk(dp->i_mount, leaf, &leafhdr);
xfs_dir3_leaf_log_header(args, lbp);
/*
* Could compact these but I think we always do the conversion
* after squeezing out stale entries.
*/
- memcpy(ents, blp, be32_to_cpu(btp->count) * sizeof(xfs_dir2_leaf_entry_t));
- xfs_dir3_leaf_log_ents(args, lbp, 0, leafhdr.count - 1);
+ memcpy(leafhdr.ents, blp,
+ be32_to_cpu(btp->count) * sizeof(struct xfs_dir2_leaf_entry));
+ xfs_dir3_leaf_log_ents(args, &leafhdr, lbp, 0, leafhdr.count - 1);
needscan = 0;
needlog = 1;
/*
@@ -415,7 +458,7 @@ xfs_dir2_block_to_leaf(
hdr->magic = cpu_to_be32(XFS_DIR3_DATA_MAGIC);
if (needscan)
- xfs_dir2_data_freescan(dp, hdr, &needlog);
+ xfs_dir2_data_freescan(dp->i_mount, hdr, &needlog);
/*
* Set up leaf tail and bests table.
*/
@@ -594,7 +637,7 @@ xfs_dir2_leaf_addname(
trace_xfs_dir2_leaf_addname(args);
- error = xfs_dir3_leaf_read(tp, dp, args->geo->leafblk, -1, &lbp);
+ error = xfs_dir3_leaf_read(tp, dp, args->geo->leafblk, &lbp);
if (error)
return error;
@@ -607,10 +650,10 @@ xfs_dir2_leaf_addname(
index = xfs_dir2_leaf_search_hash(args, lbp);
leaf = lbp->b_addr;
ltp = xfs_dir2_leaf_tail_p(args->geo, leaf);
- ents = dp->d_ops->leaf_ents_p(leaf);
- dp->d_ops->leaf_hdr_from_disk(&leafhdr, leaf);
+ xfs_dir2_leaf_hdr_from_disk(dp->i_mount, &leafhdr, leaf);
+ ents = leafhdr.ents;
bestsp = xfs_dir2_leaf_bests_p(ltp);
- length = dp->d_ops->data_entsize(args->namelen);
+ length = xfs_dir2_data_entsize(dp->i_mount, args->namelen);
/*
* See if there are any entries with the same hash value
@@ -773,7 +816,7 @@ xfs_dir2_leaf_addname(
else
xfs_dir3_leaf_log_bests(args, lbp, use_block, use_block);
hdr = dbp->b_addr;
- bf = dp->d_ops->data_bestfree_p(hdr);
+ bf = xfs_dir2_data_bestfree_p(dp->i_mount, hdr);
bestsp[use_block] = bf[0].length;
grown = 1;
} else {
@@ -783,13 +826,13 @@ xfs_dir2_leaf_addname(
*/
error = xfs_dir3_data_read(tp, dp,
xfs_dir2_db_to_da(args->geo, use_block),
- -1, &dbp);
+ 0, &dbp);
if (error) {
xfs_trans_brelse(tp, lbp);
return error;
}
hdr = dbp->b_addr;
- bf = dp->d_ops->data_bestfree_p(hdr);
+ bf = xfs_dir2_data_bestfree_p(dp->i_mount, hdr);
grown = 0;
}
/*
@@ -815,14 +858,14 @@ xfs_dir2_leaf_addname(
dep->inumber = cpu_to_be64(args->inumber);
dep->namelen = args->namelen;
memcpy(dep->name, args->name, dep->namelen);
- dp->d_ops->data_put_ftype(dep, args->filetype);
- tagp = dp->d_ops->data_entry_tag_p(dep);
+ xfs_dir2_data_put_ftype(dp->i_mount, dep, args->filetype);
+ tagp = xfs_dir2_data_entry_tag_p(dp->i_mount, dep);
*tagp = cpu_to_be16((char *)dep - (char *)hdr);
/*
* Need to scan fix up the bestfree table.
*/
if (needscan)
- xfs_dir2_data_freescan(dp, hdr, &needlog);
+ xfs_dir2_data_freescan(dp->i_mount, hdr, &needlog);
/*
* Need to log the data block's header.
*/
@@ -852,9 +895,9 @@ xfs_dir2_leaf_addname(
/*
* Log the leaf fields and give up the buffers.
*/
- dp->d_ops->leaf_hdr_to_disk(leaf, &leafhdr);
+ xfs_dir2_leaf_hdr_to_disk(dp->i_mount, leaf, &leafhdr);
xfs_dir3_leaf_log_header(args, lbp);
- xfs_dir3_leaf_log_ents(args, lbp, lfloglow, lfloghigh);
+ xfs_dir3_leaf_log_ents(args, &leafhdr, lbp, lfloglow, lfloghigh);
xfs_dir3_leaf_check(dp, lbp);
xfs_dir3_data_check(dp, dbp);
return 0;
@@ -874,7 +917,6 @@ xfs_dir3_leaf_compact(
xfs_dir2_leaf_t *leaf; /* leaf structure */
int loglow; /* first leaf entry to log */
int to; /* target leaf index */
- struct xfs_dir2_leaf_entry *ents;
struct xfs_inode *dp = args->dp;
leaf = bp->b_addr;
@@ -884,9 +926,9 @@ xfs_dir3_leaf_compact(
/*
* Compress out the stale entries in place.
*/
- ents = dp->d_ops->leaf_ents_p(leaf);
for (from = to = 0, loglow = -1; from < leafhdr->count; from++) {
- if (ents[from].address == cpu_to_be32(XFS_DIR2_NULL_DATAPTR))
+ if (leafhdr->ents[from].address ==
+ cpu_to_be32(XFS_DIR2_NULL_DATAPTR))
continue;
/*
* Only actually copy the entries that are different.
@@ -894,7 +936,7 @@ xfs_dir3_leaf_compact(
if (from > to) {
if (loglow == -1)
loglow = to;
- ents[to] = ents[from];
+ leafhdr->ents[to] = leafhdr->ents[from];
}
to++;
}
@@ -905,10 +947,10 @@ xfs_dir3_leaf_compact(
leafhdr->count -= leafhdr->stale;
leafhdr->stale = 0;
- dp->d_ops->leaf_hdr_to_disk(leaf, leafhdr);
+ xfs_dir2_leaf_hdr_to_disk(dp->i_mount, leaf, leafhdr);
xfs_dir3_leaf_log_header(args, bp);
if (loglow != -1)
- xfs_dir3_leaf_log_ents(args, bp, loglow, to - 1);
+ xfs_dir3_leaf_log_ents(args, leafhdr, bp, loglow, to - 1);
}
/*
@@ -1037,6 +1079,7 @@ xfs_dir3_leaf_log_bests(
void
xfs_dir3_leaf_log_ents(
struct xfs_da_args *args,
+ struct xfs_dir3_icleaf_hdr *hdr,
struct xfs_buf *bp,
int first,
int last)
@@ -1044,16 +1087,14 @@ xfs_dir3_leaf_log_ents(
xfs_dir2_leaf_entry_t *firstlep; /* pointer to first entry */
xfs_dir2_leaf_entry_t *lastlep; /* pointer to last entry */
struct xfs_dir2_leaf *leaf = bp->b_addr;
- struct xfs_dir2_leaf_entry *ents;
ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAF1_MAGIC) ||
leaf->hdr.info.magic == cpu_to_be16(XFS_DIR3_LEAF1_MAGIC) ||
leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) ||
leaf->hdr.info.magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC));
- ents = args->dp->d_ops->leaf_ents_p(leaf);
- firstlep = &ents[first];
- lastlep = &ents[last];
+ firstlep = &hdr->ents[first];
+ lastlep = &hdr->ents[last];
xfs_trans_log_buf(args->trans, bp,
(uint)((char *)firstlep - (char *)leaf),
(uint)((char *)lastlep - (char *)leaf + sizeof(*lastlep) - 1));
@@ -1076,7 +1117,7 @@ xfs_dir3_leaf_log_header(
xfs_trans_log_buf(args->trans, bp,
(uint)((char *)&leaf->hdr - (char *)leaf),
- args->dp->d_ops->leaf_hdr_size - 1);
+ args->geo->leaf_hdr_size - 1);
}
/*
@@ -1115,28 +1156,27 @@ xfs_dir2_leaf_lookup(
int error; /* error return code */
int index; /* found entry index */
struct xfs_buf *lbp; /* leaf buffer */
- xfs_dir2_leaf_t *leaf; /* leaf structure */
xfs_dir2_leaf_entry_t *lep; /* leaf entry */
xfs_trans_t *tp; /* transaction pointer */
- struct xfs_dir2_leaf_entry *ents;
+ struct xfs_dir3_icleaf_hdr leafhdr;
trace_xfs_dir2_leaf_lookup(args);
/*
* Look up name in the leaf block, returning both buffers and index.
*/
- if ((error = xfs_dir2_leaf_lookup_int(args, &lbp, &index, &dbp))) {
+ error = xfs_dir2_leaf_lookup_int(args, &lbp, &index, &dbp, &leafhdr);
+ if (error)
return error;
- }
+
tp = args->trans;
dp = args->dp;
xfs_dir3_leaf_check(dp, lbp);
- leaf = lbp->b_addr;
- ents = dp->d_ops->leaf_ents_p(leaf);
+
/*
* Get to the leaf entry and contained data entry address.
*/
- lep = &ents[index];
+ lep = &leafhdr.ents[index];
/*
* Point to the data entry.
@@ -1148,7 +1188,7 @@ xfs_dir2_leaf_lookup(
* Return the found inode number & CI name if appropriate
*/
args->inumber = be64_to_cpu(dep->inumber);
- args->filetype = dp->d_ops->data_get_ftype(dep);
+ args->filetype = xfs_dir2_data_get_ftype(dp->i_mount, dep);
error = xfs_dir_cilookup_result(args, dep->name, dep->namelen);
xfs_trans_brelse(tp, dbp);
xfs_trans_brelse(tp, lbp);
@@ -1166,7 +1206,8 @@ xfs_dir2_leaf_lookup_int(
xfs_da_args_t *args, /* operation arguments */
struct xfs_buf **lbpp, /* out: leaf buffer */
int *indexp, /* out: index in leaf block */
- struct xfs_buf **dbpp) /* out: data buffer */
+ struct xfs_buf **dbpp, /* out: data buffer */
+ struct xfs_dir3_icleaf_hdr *leafhdr)
{
xfs_dir2_db_t curdb = -1; /* current data block number */
struct xfs_buf *dbp = NULL; /* data buffer */
@@ -1182,22 +1223,19 @@ xfs_dir2_leaf_lookup_int(
xfs_trans_t *tp; /* transaction pointer */
xfs_dir2_db_t cidb = -1; /* case match data block no. */
enum xfs_dacmp cmp; /* name compare result */
- struct xfs_dir2_leaf_entry *ents;
- struct xfs_dir3_icleaf_hdr leafhdr;
dp = args->dp;
tp = args->trans;
mp = dp->i_mount;
- error = xfs_dir3_leaf_read(tp, dp, args->geo->leafblk, -1, &lbp);
+ error = xfs_dir3_leaf_read(tp, dp, args->geo->leafblk, &lbp);
if (error)
return error;
*lbpp = lbp;
leaf = lbp->b_addr;
xfs_dir3_leaf_check(dp, lbp);
- ents = dp->d_ops->leaf_ents_p(leaf);
- dp->d_ops->leaf_hdr_from_disk(&leafhdr, leaf);
+ xfs_dir2_leaf_hdr_from_disk(mp, leafhdr, leaf);
/*
* Look for the first leaf entry with our hash value.
@@ -1207,8 +1245,9 @@ xfs_dir2_leaf_lookup_int(
* Loop over all the entries with the right hash value
* looking to match the name.
*/
- for (lep = &ents[index];
- index < leafhdr.count && be32_to_cpu(lep->hashval) == args->hashval;
+ for (lep = &leafhdr->ents[index];
+ index < leafhdr->count &&
+ be32_to_cpu(lep->hashval) == args->hashval;
lep++, index++) {
/*
* Skip over stale leaf entries.
@@ -1229,7 +1268,7 @@ xfs_dir2_leaf_lookup_int(
xfs_trans_brelse(tp, dbp);
error = xfs_dir3_data_read(tp, dp,
xfs_dir2_db_to_da(args->geo, newdb),
- -1, &dbp);
+ 0, &dbp);
if (error) {
xfs_trans_brelse(tp, lbp);
return error;
@@ -1247,7 +1286,7 @@ xfs_dir2_leaf_lookup_int(
* and buffer. If it's the first case-insensitive match, store
* the index and buffer and continue looking for an exact match.
*/
- cmp = mp->m_dirnameops->compname(args, dep->name, dep->namelen);
+ cmp = xfs_dir2_compname(args, dep->name, dep->namelen);
if (cmp != XFS_CMP_DIFFERENT && cmp != args->cmpresult) {
args->cmpresult = cmp;
*indexp = index;
@@ -1271,7 +1310,7 @@ xfs_dir2_leaf_lookup_int(
xfs_trans_brelse(tp, dbp);
error = xfs_dir3_data_read(tp, dp,
xfs_dir2_db_to_da(args->geo, cidb),
- -1, &dbp);
+ 0, &dbp);
if (error) {
xfs_trans_brelse(tp, lbp);
return error;
@@ -1297,6 +1336,7 @@ int /* error */
xfs_dir2_leaf_removename(
xfs_da_args_t *args) /* operation arguments */
{
+ struct xfs_da_geometry *geo = args->geo;
__be16 *bestsp; /* leaf block best freespace */
xfs_dir2_data_hdr_t *hdr; /* data block header */
xfs_dir2_db_t db; /* data block number */
@@ -1314,7 +1354,6 @@ xfs_dir2_leaf_removename(
int needscan; /* need to rescan data frees */
xfs_dir2_data_off_t oldbest; /* old value of best free */
struct xfs_dir2_data_free *bf; /* bestfree table */
- struct xfs_dir2_leaf_entry *ents;
struct xfs_dir3_icleaf_hdr leafhdr;
trace_xfs_dir2_leaf_removename(args);
@@ -1322,51 +1361,54 @@ xfs_dir2_leaf_removename(
/*
* Lookup the leaf entry, get the leaf and data blocks read in.
*/
- if ((error = xfs_dir2_leaf_lookup_int(args, &lbp, &index, &dbp))) {
+ error = xfs_dir2_leaf_lookup_int(args, &lbp, &index, &dbp, &leafhdr);
+ if (error)
return error;
- }
+
dp = args->dp;
leaf = lbp->b_addr;
hdr = dbp->b_addr;
xfs_dir3_data_check(dp, dbp);
- bf = dp->d_ops->data_bestfree_p(hdr);
- dp->d_ops->leaf_hdr_from_disk(&leafhdr, leaf);
- ents = dp->d_ops->leaf_ents_p(leaf);
+ bf = xfs_dir2_data_bestfree_p(dp->i_mount, hdr);
+
/*
* Point to the leaf entry, use that to point to the data entry.
*/
- lep = &ents[index];
- db = xfs_dir2_dataptr_to_db(args->geo, be32_to_cpu(lep->address));
+ lep = &leafhdr.ents[index];
+ db = xfs_dir2_dataptr_to_db(geo, be32_to_cpu(lep->address));
dep = (xfs_dir2_data_entry_t *)((char *)hdr +
- xfs_dir2_dataptr_to_off(args->geo, be32_to_cpu(lep->address)));
+ xfs_dir2_dataptr_to_off(geo, be32_to_cpu(lep->address)));
needscan = needlog = 0;
oldbest = be16_to_cpu(bf[0].length);
- ltp = xfs_dir2_leaf_tail_p(args->geo, leaf);
+ ltp = xfs_dir2_leaf_tail_p(geo, leaf);
bestsp = xfs_dir2_leaf_bests_p(ltp);
- if (be16_to_cpu(bestsp[db]) != oldbest)
+ if (be16_to_cpu(bestsp[db]) != oldbest) {
+ xfs_buf_corruption_error(lbp);
return -EFSCORRUPTED;
+ }
/*
* Mark the former data entry unused.
*/
xfs_dir2_data_make_free(args, dbp,
(xfs_dir2_data_aoff_t)((char *)dep - (char *)hdr),
- dp->d_ops->data_entsize(dep->namelen), &needlog, &needscan);
+ xfs_dir2_data_entsize(dp->i_mount, dep->namelen), &needlog,
+ &needscan);
/*
* We just mark the leaf entry stale by putting a null in it.
*/
leafhdr.stale++;
- dp->d_ops->leaf_hdr_to_disk(leaf, &leafhdr);
+ xfs_dir2_leaf_hdr_to_disk(dp->i_mount, leaf, &leafhdr);
xfs_dir3_leaf_log_header(args, lbp);
lep->address = cpu_to_be32(XFS_DIR2_NULL_DATAPTR);
- xfs_dir3_leaf_log_ents(args, lbp, index, index);
+ xfs_dir3_leaf_log_ents(args, &leafhdr, lbp, index, index);
/*
* Scan the freespace in the data block again if necessary,
* log the data block header if necessary.
*/
if (needscan)
- xfs_dir2_data_freescan(dp, hdr, &needlog);
+ xfs_dir2_data_freescan(dp->i_mount, hdr, &needlog);
if (needlog)
xfs_dir2_data_log_header(args, dbp);
/*
@@ -1382,8 +1424,8 @@ xfs_dir2_leaf_removename(
* If the data block is now empty then get rid of the data block.
*/
if (be16_to_cpu(bf[0].length) ==
- args->geo->blksize - dp->d_ops->data_entry_offset) {
- ASSERT(db != args->geo->datablk);
+ geo->blksize - geo->data_entry_offset) {
+ ASSERT(db != geo->datablk);
if ((error = xfs_dir2_shrink_inode(args, db, dbp))) {
/*
* Nope, can't get rid of it because it caused
@@ -1425,7 +1467,7 @@ xfs_dir2_leaf_removename(
/*
* If the data block was not the first one, drop it.
*/
- else if (db != args->geo->datablk)
+ else if (db != geo->datablk)
dbp = NULL;
xfs_dir3_leaf_check(dp, lbp);
@@ -1448,26 +1490,24 @@ xfs_dir2_leaf_replace(
int error; /* error return code */
int index; /* index of leaf entry */
struct xfs_buf *lbp; /* leaf buffer */
- xfs_dir2_leaf_t *leaf; /* leaf structure */
xfs_dir2_leaf_entry_t *lep; /* leaf entry */
xfs_trans_t *tp; /* transaction pointer */
- struct xfs_dir2_leaf_entry *ents;
+ struct xfs_dir3_icleaf_hdr leafhdr;
trace_xfs_dir2_leaf_replace(args);
/*
* Look up the entry.
*/
- if ((error = xfs_dir2_leaf_lookup_int(args, &lbp, &index, &dbp))) {
+ error = xfs_dir2_leaf_lookup_int(args, &lbp, &index, &dbp, &leafhdr);
+ if (error)
return error;
- }
+
dp = args->dp;
- leaf = lbp->b_addr;
- ents = dp->d_ops->leaf_ents_p(leaf);
/*
* Point to the leaf entry, get data address from it.
*/
- lep = &ents[index];
+ lep = &leafhdr.ents[index];
/*
* Point to the data entry.
*/
@@ -1479,7 +1519,7 @@ xfs_dir2_leaf_replace(
* Put the new inode number in, log it.
*/
dep->inumber = cpu_to_be64(args->inumber);
- dp->d_ops->data_put_ftype(dep, args->filetype);
+ xfs_dir2_data_put_ftype(dp->i_mount, dep, args->filetype);
tp = args->trans;
xfs_dir2_data_log_entry(args, dbp, dep);
xfs_dir3_leaf_check(dp, lbp);
@@ -1501,21 +1541,17 @@ xfs_dir2_leaf_search_hash(
xfs_dahash_t hashwant; /* hash value looking for */
int high; /* high leaf index */
int low; /* low leaf index */
- xfs_dir2_leaf_t *leaf; /* leaf structure */
xfs_dir2_leaf_entry_t *lep; /* leaf entry */
int mid=0; /* current leaf index */
- struct xfs_dir2_leaf_entry *ents;
struct xfs_dir3_icleaf_hdr leafhdr;
- leaf = lbp->b_addr;
- ents = args->dp->d_ops->leaf_ents_p(leaf);
- args->dp->d_ops->leaf_hdr_from_disk(&leafhdr, leaf);
+ xfs_dir2_leaf_hdr_from_disk(args->dp->i_mount, &leafhdr, lbp->b_addr);
/*
* Note, the table cannot be empty, so we have to go through the loop.
* Binary search the leaf entries looking for our hash value.
*/
- for (lep = ents, low = 0, high = leafhdr.count - 1,
+ for (lep = leafhdr.ents, low = 0, high = leafhdr.count - 1,
hashwant = args->hashval;
low <= high; ) {
mid = (low + high) >> 1;
@@ -1552,6 +1588,7 @@ xfs_dir2_leaf_trim_data(
struct xfs_buf *lbp, /* leaf buffer */
xfs_dir2_db_t db) /* data block number */
{
+ struct xfs_da_geometry *geo = args->geo;
__be16 *bestsp; /* leaf bests table */
struct xfs_buf *dbp; /* data block buffer */
xfs_inode_t *dp; /* incore directory inode */
@@ -1565,23 +1602,23 @@ xfs_dir2_leaf_trim_data(
/*
* Read the offending data block. We need its buffer.
*/
- error = xfs_dir3_data_read(tp, dp, xfs_dir2_db_to_da(args->geo, db),
- -1, &dbp);
+ error = xfs_dir3_data_read(tp, dp, xfs_dir2_db_to_da(geo, db), 0, &dbp);
if (error)
return error;
leaf = lbp->b_addr;
- ltp = xfs_dir2_leaf_tail_p(args->geo, leaf);
+ ltp = xfs_dir2_leaf_tail_p(geo, leaf);
#ifdef DEBUG
{
struct xfs_dir2_data_hdr *hdr = dbp->b_addr;
- struct xfs_dir2_data_free *bf = dp->d_ops->data_bestfree_p(hdr);
+ struct xfs_dir2_data_free *bf =
+ xfs_dir2_data_bestfree_p(dp->i_mount, hdr);
ASSERT(hdr->magic == cpu_to_be32(XFS_DIR2_DATA_MAGIC) ||
hdr->magic == cpu_to_be32(XFS_DIR3_DATA_MAGIC));
ASSERT(be16_to_cpu(bf[0].length) ==
- args->geo->blksize - dp->d_ops->data_entry_offset);
+ geo->blksize - geo->data_entry_offset);
ASSERT(db == be32_to_cpu(ltp->bestcount) - 1);
}
#endif
@@ -1639,7 +1676,6 @@ xfs_dir2_node_to_leaf(
int error; /* error return code */
struct xfs_buf *fbp; /* buffer for freespace block */
xfs_fileoff_t fo; /* freespace file offset */
- xfs_dir2_free_t *free; /* freespace structure */
struct xfs_buf *lbp; /* buffer for leaf block */
xfs_dir2_leaf_tail_t *ltp; /* tail of leaf structure */
xfs_dir2_leaf_t *leaf; /* leaf structure */
@@ -1697,7 +1733,7 @@ xfs_dir2_node_to_leaf(
return 0;
lbp = state->path.blk[0].bp;
leaf = lbp->b_addr;
- dp->d_ops->leaf_hdr_from_disk(&leafhdr, leaf);
+ xfs_dir2_leaf_hdr_from_disk(mp, &leafhdr, leaf);
ASSERT(leafhdr.magic == XFS_DIR2_LEAFN_MAGIC ||
leafhdr.magic == XFS_DIR3_LEAFN_MAGIC);
@@ -1708,8 +1744,7 @@ xfs_dir2_node_to_leaf(
error = xfs_dir2_free_read(tp, dp, args->geo->freeblk, &fbp);
if (error)
return error;
- free = fbp->b_addr;
- dp->d_ops->free_hdr_from_disk(&freehdr, free);
+ xfs_dir2_free_hdr_from_disk(mp, &freehdr, fbp->b_addr);
ASSERT(!freehdr.firstdb);
@@ -1743,10 +1778,10 @@ xfs_dir2_node_to_leaf(
/*
* Set up the leaf bests table.
*/
- memcpy(xfs_dir2_leaf_bests_p(ltp), dp->d_ops->free_bests_p(free),
+ memcpy(xfs_dir2_leaf_bests_p(ltp), freehdr.bests,
freehdr.nvalid * sizeof(xfs_dir2_data_off_t));
- dp->d_ops->leaf_hdr_to_disk(leaf, &leafhdr);
+ xfs_dir2_leaf_hdr_to_disk(mp, leaf, &leafhdr);
xfs_dir3_leaf_log_header(args, lbp);
xfs_dir3_leaf_log_bests(args, lbp, 0, be32_to_cpu(ltp->bestcount) - 1);
xfs_dir3_leaf_log_tail(args, lbp);
diff --git a/fs/xfs/libxfs/xfs_dir2_node.c b/fs/xfs/libxfs/xfs_dir2_node.c
index 705c4f562758..a0cc5e240306 100644
--- a/fs/xfs/libxfs/xfs_dir2_node.c
+++ b/fs/xfs/libxfs/xfs_dir2_node.c
@@ -34,6 +34,25 @@ static int xfs_dir2_leafn_remove(xfs_da_args_t *args, struct xfs_buf *bp,
int *rval);
/*
+ * Convert data space db to the corresponding free db.
+ */
+static xfs_dir2_db_t
+xfs_dir2_db_to_fdb(struct xfs_da_geometry *geo, xfs_dir2_db_t db)
+{
+ return xfs_dir2_byte_to_db(geo, XFS_DIR2_FREE_OFFSET) +
+ (db / geo->free_max_bests);
+}
+
+/*
+ * Convert data space db to the corresponding index in a free db.
+ */
+static int
+xfs_dir2_db_to_fdindex(struct xfs_da_geometry *geo, xfs_dir2_db_t db)
+{
+ return db % geo->free_max_bests;
+}
+
+/*
* Check internal consistency of a leafn block.
*/
#ifdef DEBUG
@@ -45,7 +64,7 @@ xfs_dir3_leafn_check(
struct xfs_dir2_leaf *leaf = bp->b_addr;
struct xfs_dir3_icleaf_hdr leafhdr;
- dp->d_ops->leaf_hdr_from_disk(&leafhdr, leaf);
+ xfs_dir2_leaf_hdr_from_disk(dp->i_mount, &leafhdr, leaf);
if (leafhdr.magic == XFS_DIR3_LEAFN_MAGIC) {
struct xfs_dir3_leaf_hdr *leaf3 = bp->b_addr;
@@ -54,7 +73,7 @@ xfs_dir3_leafn_check(
} else if (leafhdr.magic != XFS_DIR2_LEAFN_MAGIC)
return __this_address;
- return xfs_dir3_leaf_check_int(dp->i_mount, dp, &leafhdr, leaf);
+ return xfs_dir3_leaf_check_int(dp->i_mount, &leafhdr, leaf);
}
static inline void
@@ -160,10 +179,9 @@ xfs_dir3_free_header_check(
struct xfs_buf *bp)
{
struct xfs_mount *mp = dp->i_mount;
+ int maxbests = mp->m_dir_geo->free_max_bests;
unsigned int firstdb;
- int maxbests;
- maxbests = dp->d_ops->free_max_bests(mp->m_dir_geo);
firstdb = (xfs_dir2_da_to_db(mp->m_dir_geo, fbno) -
xfs_dir2_byte_to_db(mp->m_dir_geo, XFS_DIR2_FREE_OFFSET)) *
maxbests;
@@ -194,14 +212,14 @@ __xfs_dir3_free_read(
struct xfs_trans *tp,
struct xfs_inode *dp,
xfs_dablk_t fbno,
- xfs_daddr_t mappedbno,
+ unsigned int flags,
struct xfs_buf **bpp)
{
xfs_failaddr_t fa;
int err;
- err = xfs_da_read_buf(tp, dp, fbno, mappedbno, bpp,
- XFS_DATA_FORK, &xfs_dir3_free_buf_ops);
+ err = xfs_da_read_buf(tp, dp, fbno, flags, bpp, XFS_DATA_FORK,
+ &xfs_dir3_free_buf_ops);
if (err || !*bpp)
return err;
@@ -220,6 +238,58 @@ __xfs_dir3_free_read(
return 0;
}
+void
+xfs_dir2_free_hdr_from_disk(
+ struct xfs_mount *mp,
+ struct xfs_dir3_icfree_hdr *to,
+ struct xfs_dir2_free *from)
+{
+ if (xfs_sb_version_hascrc(&mp->m_sb)) {
+ struct xfs_dir3_free *from3 = (struct xfs_dir3_free *)from;
+
+ to->magic = be32_to_cpu(from3->hdr.hdr.magic);
+ to->firstdb = be32_to_cpu(from3->hdr.firstdb);
+ to->nvalid = be32_to_cpu(from3->hdr.nvalid);
+ to->nused = be32_to_cpu(from3->hdr.nused);
+ to->bests = from3->bests;
+
+ ASSERT(to->magic == XFS_DIR3_FREE_MAGIC);
+ } else {
+ to->magic = be32_to_cpu(from->hdr.magic);
+ to->firstdb = be32_to_cpu(from->hdr.firstdb);
+ to->nvalid = be32_to_cpu(from->hdr.nvalid);
+ to->nused = be32_to_cpu(from->hdr.nused);
+ to->bests = from->bests;
+
+ ASSERT(to->magic == XFS_DIR2_FREE_MAGIC);
+ }
+}
+
+static void
+xfs_dir2_free_hdr_to_disk(
+ struct xfs_mount *mp,
+ struct xfs_dir2_free *to,
+ struct xfs_dir3_icfree_hdr *from)
+{
+ if (xfs_sb_version_hascrc(&mp->m_sb)) {
+ struct xfs_dir3_free *to3 = (struct xfs_dir3_free *)to;
+
+ ASSERT(from->magic == XFS_DIR3_FREE_MAGIC);
+
+ to3->hdr.hdr.magic = cpu_to_be32(from->magic);
+ to3->hdr.firstdb = cpu_to_be32(from->firstdb);
+ to3->hdr.nvalid = cpu_to_be32(from->nvalid);
+ to3->hdr.nused = cpu_to_be32(from->nused);
+ } else {
+ ASSERT(from->magic == XFS_DIR2_FREE_MAGIC);
+
+ to->hdr.magic = cpu_to_be32(from->magic);
+ to->hdr.firstdb = cpu_to_be32(from->firstdb);
+ to->hdr.nvalid = cpu_to_be32(from->nvalid);
+ to->hdr.nused = cpu_to_be32(from->nused);
+ }
+}
+
int
xfs_dir2_free_read(
struct xfs_trans *tp,
@@ -227,7 +297,7 @@ xfs_dir2_free_read(
xfs_dablk_t fbno,
struct xfs_buf **bpp)
{
- return __xfs_dir3_free_read(tp, dp, fbno, -1, bpp);
+ return __xfs_dir3_free_read(tp, dp, fbno, 0, bpp);
}
static int
@@ -237,7 +307,7 @@ xfs_dir2_free_try_read(
xfs_dablk_t fbno,
struct xfs_buf **bpp)
{
- return __xfs_dir3_free_read(tp, dp, fbno, -2, bpp);
+ return __xfs_dir3_free_read(tp, dp, fbno, XFS_DABUF_MAP_HOLE_OK, bpp);
}
static int
@@ -254,7 +324,7 @@ xfs_dir3_free_get_buf(
struct xfs_dir3_icfree_hdr hdr;
error = xfs_da_get_buf(tp, dp, xfs_dir2_db_to_da(args->geo, fbno),
- -1, &bp, XFS_DATA_FORK);
+ &bp, XFS_DATA_FORK);
if (error)
return error;
@@ -278,7 +348,7 @@ xfs_dir3_free_get_buf(
uuid_copy(&hdr3->hdr.uuid, &mp->m_sb.sb_meta_uuid);
} else
hdr.magic = XFS_DIR2_FREE_MAGIC;
- dp->d_ops->free_hdr_to_disk(bp->b_addr, &hdr);
+ xfs_dir2_free_hdr_to_disk(mp, bp->b_addr, &hdr);
*bpp = bp;
return 0;
}
@@ -289,21 +359,19 @@ xfs_dir3_free_get_buf(
STATIC void
xfs_dir2_free_log_bests(
struct xfs_da_args *args,
+ struct xfs_dir3_icfree_hdr *hdr,
struct xfs_buf *bp,
int first, /* first entry to log */
int last) /* last entry to log */
{
- xfs_dir2_free_t *free; /* freespace structure */
- __be16 *bests;
+ struct xfs_dir2_free *free = bp->b_addr;
- free = bp->b_addr;
- bests = args->dp->d_ops->free_bests_p(free);
ASSERT(free->hdr.magic == cpu_to_be32(XFS_DIR2_FREE_MAGIC) ||
free->hdr.magic == cpu_to_be32(XFS_DIR3_FREE_MAGIC));
xfs_trans_log_buf(args->trans, bp,
- (uint)((char *)&bests[first] - (char *)free),
- (uint)((char *)&bests[last] - (char *)free +
- sizeof(bests[0]) - 1));
+ (char *)&hdr->bests[first] - (char *)free,
+ (char *)&hdr->bests[last] - (char *)free +
+ sizeof(hdr->bests[0]) - 1);
}
/*
@@ -322,7 +390,7 @@ xfs_dir2_free_log_header(
free->hdr.magic == cpu_to_be32(XFS_DIR3_FREE_MAGIC));
#endif
xfs_trans_log_buf(args->trans, bp, 0,
- args->dp->d_ops->free_hdr_size - 1);
+ args->geo->free_hdr_size - 1);
}
/*
@@ -339,14 +407,12 @@ xfs_dir2_leaf_to_node(
int error; /* error return value */
struct xfs_buf *fbp; /* freespace buffer */
xfs_dir2_db_t fdb; /* freespace block number */
- xfs_dir2_free_t *free; /* freespace structure */
__be16 *from; /* pointer to freespace entry */
int i; /* leaf freespace index */
xfs_dir2_leaf_t *leaf; /* leaf structure */
xfs_dir2_leaf_tail_t *ltp; /* leaf tail structure */
int n; /* count of live freespc ents */
xfs_dir2_data_off_t off; /* freespace entry value */
- __be16 *to; /* pointer to freespace entry */
xfs_trans_t *tp; /* transaction pointer */
struct xfs_dir3_icfree_hdr freehdr;
@@ -368,24 +434,25 @@ xfs_dir2_leaf_to_node(
if (error)
return error;
- free = fbp->b_addr;
- dp->d_ops->free_hdr_from_disk(&freehdr, free);
+ xfs_dir2_free_hdr_from_disk(dp->i_mount, &freehdr, fbp->b_addr);
leaf = lbp->b_addr;
ltp = xfs_dir2_leaf_tail_p(args->geo, leaf);
if (be32_to_cpu(ltp->bestcount) >
- (uint)dp->i_d.di_size / args->geo->blksize)
+ (uint)dp->i_d.di_size / args->geo->blksize) {
+ xfs_buf_corruption_error(lbp);
return -EFSCORRUPTED;
+ }
/*
* Copy freespace entries from the leaf block to the new block.
* Count active entries.
*/
from = xfs_dir2_leaf_bests_p(ltp);
- to = dp->d_ops->free_bests_p(free);
- for (i = n = 0; i < be32_to_cpu(ltp->bestcount); i++, from++, to++) {
- if ((off = be16_to_cpu(*from)) != NULLDATAOFF)
+ for (i = n = 0; i < be32_to_cpu(ltp->bestcount); i++, from++) {
+ off = be16_to_cpu(*from);
+ if (off != NULLDATAOFF)
n++;
- *to = cpu_to_be16(off);
+ freehdr.bests[i] = cpu_to_be16(off);
}
/*
@@ -394,8 +461,8 @@ xfs_dir2_leaf_to_node(
freehdr.nused = n;
freehdr.nvalid = be32_to_cpu(ltp->bestcount);
- dp->d_ops->free_hdr_to_disk(fbp->b_addr, &freehdr);
- xfs_dir2_free_log_bests(args, fbp, 0, freehdr.nvalid - 1);
+ xfs_dir2_free_hdr_to_disk(dp->i_mount, fbp->b_addr, &freehdr);
+ xfs_dir2_free_log_bests(args, &freehdr, fbp, 0, freehdr.nvalid - 1);
xfs_dir2_free_log_header(args, fbp);
/*
@@ -438,15 +505,17 @@ xfs_dir2_leafn_add(
trace_xfs_dir2_leafn_add(args, index);
- dp->d_ops->leaf_hdr_from_disk(&leafhdr, leaf);
- ents = dp->d_ops->leaf_ents_p(leaf);
+ xfs_dir2_leaf_hdr_from_disk(dp->i_mount, &leafhdr, leaf);
+ ents = leafhdr.ents;
/*
* Quick check just to make sure we are not going to index
* into other peoples memory
*/
- if (index < 0)
+ if (index < 0) {
+ xfs_buf_corruption_error(bp);
return -EFSCORRUPTED;
+ }
/*
* If there are already the maximum number of leaf entries in
@@ -455,7 +524,7 @@ xfs_dir2_leafn_add(
* a compact.
*/
- if (leafhdr.count == dp->d_ops->leaf_max_ents(args->geo)) {
+ if (leafhdr.count == args->geo->leaf_max_ents) {
if (!leafhdr.stale)
return -ENOSPC;
compact = leafhdr.stale > 1;
@@ -493,9 +562,9 @@ xfs_dir2_leafn_add(
lep->address = cpu_to_be32(xfs_dir2_db_off_to_dataptr(args->geo,
args->blkno, args->index));
- dp->d_ops->leaf_hdr_to_disk(leaf, &leafhdr);
+ xfs_dir2_leaf_hdr_to_disk(dp->i_mount, leaf, &leafhdr);
xfs_dir3_leaf_log_header(args, bp);
- xfs_dir3_leaf_log_ents(args, bp, lfloglow, lfloghigh);
+ xfs_dir3_leaf_log_ents(args, &leafhdr, bp, lfloglow, lfloghigh);
xfs_dir3_leaf_check(dp, bp);
return 0;
}
@@ -509,10 +578,9 @@ xfs_dir2_free_hdr_check(
{
struct xfs_dir3_icfree_hdr hdr;
- dp->d_ops->free_hdr_from_disk(&hdr, bp->b_addr);
+ xfs_dir2_free_hdr_from_disk(dp->i_mount, &hdr, bp->b_addr);
- ASSERT((hdr.firstdb %
- dp->d_ops->free_max_bests(dp->i_mount->m_dir_geo)) == 0);
+ ASSERT((hdr.firstdb % dp->i_mount->m_dir_geo->free_max_bests) == 0);
ASSERT(hdr.firstdb <= db);
ASSERT(db < hdr.firstdb + hdr.nvalid);
}
@@ -530,11 +598,9 @@ xfs_dir2_leaf_lasthash(
struct xfs_buf *bp, /* leaf buffer */
int *count) /* count of entries in leaf */
{
- struct xfs_dir2_leaf *leaf = bp->b_addr;
- struct xfs_dir2_leaf_entry *ents;
struct xfs_dir3_icleaf_hdr leafhdr;
- dp->d_ops->leaf_hdr_from_disk(&leafhdr, leaf);
+ xfs_dir2_leaf_hdr_from_disk(dp->i_mount, &leafhdr, bp->b_addr);
ASSERT(leafhdr.magic == XFS_DIR2_LEAFN_MAGIC ||
leafhdr.magic == XFS_DIR3_LEAFN_MAGIC ||
@@ -545,9 +611,7 @@ xfs_dir2_leaf_lasthash(
*count = leafhdr.count;
if (!leafhdr.count)
return 0;
-
- ents = dp->d_ops->leaf_ents_p(leaf);
- return be32_to_cpu(ents[leafhdr.count - 1].hashval);
+ return be32_to_cpu(leafhdr.ents[leafhdr.count - 1].hashval);
}
/*
@@ -576,15 +640,13 @@ xfs_dir2_leafn_lookup_for_addname(
xfs_dir2_db_t newdb; /* new data block number */
xfs_dir2_db_t newfdb; /* new free block number */
xfs_trans_t *tp; /* transaction pointer */
- struct xfs_dir2_leaf_entry *ents;
struct xfs_dir3_icleaf_hdr leafhdr;
dp = args->dp;
tp = args->trans;
mp = dp->i_mount;
leaf = bp->b_addr;
- dp->d_ops->leaf_hdr_from_disk(&leafhdr, leaf);
- ents = dp->d_ops->leaf_ents_p(leaf);
+ xfs_dir2_leaf_hdr_from_disk(mp, &leafhdr, leaf);
xfs_dir3_leaf_check(dp, bp);
ASSERT(leafhdr.count > 0);
@@ -604,11 +666,11 @@ xfs_dir2_leafn_lookup_for_addname(
ASSERT(free->hdr.magic == cpu_to_be32(XFS_DIR2_FREE_MAGIC) ||
free->hdr.magic == cpu_to_be32(XFS_DIR3_FREE_MAGIC));
}
- length = dp->d_ops->data_entsize(args->namelen);
+ length = xfs_dir2_data_entsize(mp, args->namelen);
/*
* Loop over leaf entries with the right hash value.
*/
- for (lep = &ents[index];
+ for (lep = &leafhdr.ents[index];
index < leafhdr.count && be32_to_cpu(lep->hashval) == args->hashval;
lep++, index++) {
/*
@@ -630,14 +692,14 @@ xfs_dir2_leafn_lookup_for_addname(
* in hand, take a look at it.
*/
if (newdb != curdb) {
- __be16 *bests;
+ struct xfs_dir3_icfree_hdr freehdr;
curdb = newdb;
/*
* Convert the data block to the free block
* holding its freespace information.
*/
- newfdb = dp->d_ops->db_to_fdb(args->geo, newdb);
+ newfdb = xfs_dir2_db_to_fdb(args->geo, newdb);
/*
* If it's not the one we have in hand, read it in.
*/
@@ -661,20 +723,20 @@ xfs_dir2_leafn_lookup_for_addname(
/*
* Get the index for our entry.
*/
- fi = dp->d_ops->db_to_fdindex(args->geo, curdb);
+ fi = xfs_dir2_db_to_fdindex(args->geo, curdb);
/*
* If it has room, return it.
*/
- bests = dp->d_ops->free_bests_p(free);
- if (unlikely(bests[fi] == cpu_to_be16(NULLDATAOFF))) {
- XFS_ERROR_REPORT("xfs_dir2_leafn_lookup_int",
- XFS_ERRLEVEL_LOW, mp);
+ xfs_dir2_free_hdr_from_disk(mp, &freehdr, free);
+ if (XFS_IS_CORRUPT(mp,
+ freehdr.bests[fi] ==
+ cpu_to_be16(NULLDATAOFF))) {
if (curfdb != newfdb)
xfs_trans_brelse(tp, curbp);
return -EFSCORRUPTED;
}
curfdb = newfdb;
- if (be16_to_cpu(bests[fi]) >= length)
+ if (be16_to_cpu(freehdr.bests[fi]) >= length)
goto out;
}
}
@@ -728,19 +790,19 @@ xfs_dir2_leafn_lookup_for_entry(
xfs_dir2_db_t newdb; /* new data block number */
xfs_trans_t *tp; /* transaction pointer */
enum xfs_dacmp cmp; /* comparison result */
- struct xfs_dir2_leaf_entry *ents;
struct xfs_dir3_icleaf_hdr leafhdr;
dp = args->dp;
tp = args->trans;
mp = dp->i_mount;
leaf = bp->b_addr;
- dp->d_ops->leaf_hdr_from_disk(&leafhdr, leaf);
- ents = dp->d_ops->leaf_ents_p(leaf);
+ xfs_dir2_leaf_hdr_from_disk(mp, &leafhdr, leaf);
xfs_dir3_leaf_check(dp, bp);
- if (leafhdr.count <= 0)
+ if (leafhdr.count <= 0) {
+ xfs_buf_corruption_error(bp);
return -EFSCORRUPTED;
+ }
/*
* Look up the hash value in the leaf entries.
@@ -756,7 +818,7 @@ xfs_dir2_leafn_lookup_for_entry(
/*
* Loop over leaf entries with the right hash value.
*/
- for (lep = &ents[index];
+ for (lep = &leafhdr.ents[index];
index < leafhdr.count && be32_to_cpu(lep->hashval) == args->hashval;
lep++, index++) {
/*
@@ -795,7 +857,7 @@ xfs_dir2_leafn_lookup_for_entry(
error = xfs_dir3_data_read(tp, dp,
xfs_dir2_db_to_da(args->geo,
newdb),
- -1, &curbp);
+ 0, &curbp);
if (error)
return error;
}
@@ -813,7 +875,7 @@ xfs_dir2_leafn_lookup_for_entry(
* EEXIST immediately. If it's the first case-insensitive
* match, store the block & inode number and continue looking.
*/
- cmp = mp->m_dirnameops->compname(args, dep->name, dep->namelen);
+ cmp = xfs_dir2_compname(args, dep->name, dep->namelen);
if (cmp != XFS_CMP_DIFFERENT && cmp != args->cmpresult) {
/* If there is a CI match block, drop it */
if (args->cmpresult != XFS_CMP_DIFFERENT &&
@@ -821,7 +883,7 @@ xfs_dir2_leafn_lookup_for_entry(
xfs_trans_brelse(tp, state->extrablk.bp);
args->cmpresult = cmp;
args->inumber = be64_to_cpu(dep->inumber);
- args->filetype = dp->d_ops->data_get_ftype(dep);
+ args->filetype = xfs_dir2_data_get_ftype(mp, dep);
*indexp = index;
state->extravalid = 1;
state->extrablk.bp = curbp;
@@ -911,7 +973,7 @@ xfs_dir3_leafn_moveents(
if (start_d < dhdr->count) {
memmove(&dents[start_d + count], &dents[start_d],
(dhdr->count - start_d) * sizeof(xfs_dir2_leaf_entry_t));
- xfs_dir3_leaf_log_ents(args, bp_d, start_d + count,
+ xfs_dir3_leaf_log_ents(args, dhdr, bp_d, start_d + count,
count + dhdr->count - 1);
}
/*
@@ -933,7 +995,7 @@ xfs_dir3_leafn_moveents(
*/
memcpy(&dents[start_d], &sents[start_s],
count * sizeof(xfs_dir2_leaf_entry_t));
- xfs_dir3_leaf_log_ents(args, bp_d, start_d, start_d + count - 1);
+ xfs_dir3_leaf_log_ents(args, dhdr, bp_d, start_d, start_d + count - 1);
/*
* If there are source entries after the ones we copied,
@@ -942,7 +1004,8 @@ xfs_dir3_leafn_moveents(
if (start_s + count < shdr->count) {
memmove(&sents[start_s], &sents[start_s + count],
count * sizeof(xfs_dir2_leaf_entry_t));
- xfs_dir3_leaf_log_ents(args, bp_s, start_s, start_s + count - 1);
+ xfs_dir3_leaf_log_ents(args, shdr, bp_s, start_s,
+ start_s + count - 1);
}
/*
@@ -971,10 +1034,10 @@ xfs_dir2_leafn_order(
struct xfs_dir3_icleaf_hdr hdr1;
struct xfs_dir3_icleaf_hdr hdr2;
- dp->d_ops->leaf_hdr_from_disk(&hdr1, leaf1);
- dp->d_ops->leaf_hdr_from_disk(&hdr2, leaf2);
- ents1 = dp->d_ops->leaf_ents_p(leaf1);
- ents2 = dp->d_ops->leaf_ents_p(leaf2);
+ xfs_dir2_leaf_hdr_from_disk(dp->i_mount, &hdr1, leaf1);
+ xfs_dir2_leaf_hdr_from_disk(dp->i_mount, &hdr2, leaf2);
+ ents1 = hdr1.ents;
+ ents2 = hdr2.ents;
if (hdr1.count > 0 && hdr2.count > 0 &&
(be32_to_cpu(ents2[0].hashval) < be32_to_cpu(ents1[0].hashval) ||
@@ -1024,10 +1087,10 @@ xfs_dir2_leafn_rebalance(
leaf1 = blk1->bp->b_addr;
leaf2 = blk2->bp->b_addr;
- dp->d_ops->leaf_hdr_from_disk(&hdr1, leaf1);
- dp->d_ops->leaf_hdr_from_disk(&hdr2, leaf2);
- ents1 = dp->d_ops->leaf_ents_p(leaf1);
- ents2 = dp->d_ops->leaf_ents_p(leaf2);
+ xfs_dir2_leaf_hdr_from_disk(dp->i_mount, &hdr1, leaf1);
+ xfs_dir2_leaf_hdr_from_disk(dp->i_mount, &hdr2, leaf2);
+ ents1 = hdr1.ents;
+ ents2 = hdr2.ents;
oldsum = hdr1.count + hdr2.count;
#if defined(DEBUG) || defined(XFS_WARN)
@@ -1073,8 +1136,8 @@ xfs_dir2_leafn_rebalance(
ASSERT(hdr1.stale + hdr2.stale == oldstale);
/* log the changes made when moving the entries */
- dp->d_ops->leaf_hdr_to_disk(leaf1, &hdr1);
- dp->d_ops->leaf_hdr_to_disk(leaf2, &hdr2);
+ xfs_dir2_leaf_hdr_to_disk(dp->i_mount, leaf1, &hdr1);
+ xfs_dir2_leaf_hdr_to_disk(dp->i_mount, leaf2, &hdr2);
xfs_dir3_leaf_log_header(args, blk1->bp);
xfs_dir3_leaf_log_header(args, blk2->bp);
@@ -1120,19 +1183,17 @@ xfs_dir3_data_block_free(
int longest)
{
int logfree = 0;
- __be16 *bests;
struct xfs_dir3_icfree_hdr freehdr;
struct xfs_inode *dp = args->dp;
- dp->d_ops->free_hdr_from_disk(&freehdr, free);
- bests = dp->d_ops->free_bests_p(free);
+ xfs_dir2_free_hdr_from_disk(dp->i_mount, &freehdr, free);
if (hdr) {
/*
* Data block is not empty, just set the free entry to the new
* value.
*/
- bests[findex] = cpu_to_be16(longest);
- xfs_dir2_free_log_bests(args, fbp, findex, findex);
+ freehdr.bests[findex] = cpu_to_be16(longest);
+ xfs_dir2_free_log_bests(args, &freehdr, fbp, findex, findex);
return 0;
}
@@ -1148,18 +1209,18 @@ xfs_dir3_data_block_free(
int i; /* free entry index */
for (i = findex - 1; i >= 0; i--) {
- if (bests[i] != cpu_to_be16(NULLDATAOFF))
+ if (freehdr.bests[i] != cpu_to_be16(NULLDATAOFF))
break;
}
freehdr.nvalid = i + 1;
logfree = 0;
} else {
/* Not the last entry, just punch it out. */
- bests[findex] = cpu_to_be16(NULLDATAOFF);
+ freehdr.bests[findex] = cpu_to_be16(NULLDATAOFF);
logfree = 1;
}
- dp->d_ops->free_hdr_to_disk(free, &freehdr);
+ xfs_dir2_free_hdr_to_disk(dp->i_mount, free, &freehdr);
xfs_dir2_free_log_header(args, fbp);
/*
@@ -1184,7 +1245,7 @@ xfs_dir3_data_block_free(
/* Log the free entry that changed, unless we got rid of it. */
if (logfree)
- xfs_dir2_free_log_bests(args, fbp, findex, findex);
+ xfs_dir2_free_log_bests(args, &freehdr, fbp, findex, findex);
return 0;
}
@@ -1201,6 +1262,7 @@ xfs_dir2_leafn_remove(
xfs_da_state_blk_t *dblk, /* data block */
int *rval) /* resulting block needs join */
{
+ struct xfs_da_geometry *geo = args->geo;
xfs_dir2_data_hdr_t *hdr; /* data block header */
xfs_dir2_db_t db; /* data block number */
struct xfs_buf *dbp; /* data block buffer */
@@ -1215,27 +1277,25 @@ xfs_dir2_leafn_remove(
xfs_trans_t *tp; /* transaction pointer */
struct xfs_dir2_data_free *bf; /* bestfree table */
struct xfs_dir3_icleaf_hdr leafhdr;
- struct xfs_dir2_leaf_entry *ents;
trace_xfs_dir2_leafn_remove(args, index);
dp = args->dp;
tp = args->trans;
leaf = bp->b_addr;
- dp->d_ops->leaf_hdr_from_disk(&leafhdr, leaf);
- ents = dp->d_ops->leaf_ents_p(leaf);
+ xfs_dir2_leaf_hdr_from_disk(dp->i_mount, &leafhdr, leaf);
/*
* Point to the entry we're removing.
*/
- lep = &ents[index];
+ lep = &leafhdr.ents[index];
/*
* Extract the data block and offset from the entry.
*/
- db = xfs_dir2_dataptr_to_db(args->geo, be32_to_cpu(lep->address));
+ db = xfs_dir2_dataptr_to_db(geo, be32_to_cpu(lep->address));
ASSERT(dblk->blkno == db);
- off = xfs_dir2_dataptr_to_off(args->geo, be32_to_cpu(lep->address));
+ off = xfs_dir2_dataptr_to_off(geo, be32_to_cpu(lep->address));
ASSERT(dblk->index == off);
/*
@@ -1243,11 +1303,11 @@ xfs_dir2_leafn_remove(
* Log the leaf block changes.
*/
leafhdr.stale++;
- dp->d_ops->leaf_hdr_to_disk(leaf, &leafhdr);
+ xfs_dir2_leaf_hdr_to_disk(dp->i_mount, leaf, &leafhdr);
xfs_dir3_leaf_log_header(args, bp);
lep->address = cpu_to_be32(XFS_DIR2_NULL_DATAPTR);
- xfs_dir3_leaf_log_ents(args, bp, index, index);
+ xfs_dir3_leaf_log_ents(args, &leafhdr, bp, index, index);
/*
* Make the data entry free. Keep track of the longest freespace
@@ -1256,17 +1316,18 @@ xfs_dir2_leafn_remove(
dbp = dblk->bp;
hdr = dbp->b_addr;
dep = (xfs_dir2_data_entry_t *)((char *)hdr + off);
- bf = dp->d_ops->data_bestfree_p(hdr);
+ bf = xfs_dir2_data_bestfree_p(dp->i_mount, hdr);
longest = be16_to_cpu(bf[0].length);
needlog = needscan = 0;
xfs_dir2_data_make_free(args, dbp, off,
- dp->d_ops->data_entsize(dep->namelen), &needlog, &needscan);
+ xfs_dir2_data_entsize(dp->i_mount, dep->namelen), &needlog,
+ &needscan);
/*
* Rescan the data block freespaces for bestfree.
* Log the data block header if needed.
*/
if (needscan)
- xfs_dir2_data_freescan(dp, hdr, &needlog);
+ xfs_dir2_data_freescan(dp->i_mount, hdr, &needlog);
if (needlog)
xfs_dir2_data_log_header(args, dbp);
xfs_dir3_data_check(dp, dbp);
@@ -1285,9 +1346,8 @@ xfs_dir2_leafn_remove(
* Convert the data block number to a free block,
* read in the free block.
*/
- fdb = dp->d_ops->db_to_fdb(args->geo, db);
- error = xfs_dir2_free_read(tp, dp,
- xfs_dir2_db_to_da(args->geo, fdb),
+ fdb = xfs_dir2_db_to_fdb(geo, db);
+ error = xfs_dir2_free_read(tp, dp, xfs_dir2_db_to_da(geo, fdb),
&fbp);
if (error)
return error;
@@ -1295,23 +1355,22 @@ xfs_dir2_leafn_remove(
#ifdef DEBUG
{
struct xfs_dir3_icfree_hdr freehdr;
- dp->d_ops->free_hdr_from_disk(&freehdr, free);
- ASSERT(freehdr.firstdb == dp->d_ops->free_max_bests(args->geo) *
- (fdb - xfs_dir2_byte_to_db(args->geo,
- XFS_DIR2_FREE_OFFSET)));
+
+ xfs_dir2_free_hdr_from_disk(dp->i_mount, &freehdr, free);
+ ASSERT(freehdr.firstdb == geo->free_max_bests *
+ (fdb - xfs_dir2_byte_to_db(geo, XFS_DIR2_FREE_OFFSET)));
}
#endif
/*
* Calculate which entry we need to fix.
*/
- findex = dp->d_ops->db_to_fdindex(args->geo, db);
+ findex = xfs_dir2_db_to_fdindex(geo, db);
longest = be16_to_cpu(bf[0].length);
/*
* If the data block is now empty we can get rid of it
* (usually).
*/
- if (longest == args->geo->blksize -
- dp->d_ops->data_entry_offset) {
+ if (longest == geo->blksize - geo->data_entry_offset) {
/*
* Try to punch out the data block.
*/
@@ -1343,9 +1402,9 @@ xfs_dir2_leafn_remove(
* Return indication of whether this leaf block is empty enough
* to justify trying to join it with a neighbor.
*/
- *rval = (dp->d_ops->leaf_hdr_size +
- (uint)sizeof(ents[0]) * (leafhdr.count - leafhdr.stale)) <
- args->geo->magicpct;
+ *rval = (geo->leaf_hdr_size +
+ (uint)sizeof(leafhdr.ents) * (leafhdr.count - leafhdr.stale)) <
+ geo->magicpct;
return 0;
}
@@ -1444,12 +1503,12 @@ xfs_dir2_leafn_toosmall(
*/
blk = &state->path.blk[state->path.active - 1];
leaf = blk->bp->b_addr;
- dp->d_ops->leaf_hdr_from_disk(&leafhdr, leaf);
- ents = dp->d_ops->leaf_ents_p(leaf);
+ xfs_dir2_leaf_hdr_from_disk(dp->i_mount, &leafhdr, leaf);
+ ents = leafhdr.ents;
xfs_dir3_leaf_check(dp, blk->bp);
count = leafhdr.count - leafhdr.stale;
- bytes = dp->d_ops->leaf_hdr_size + count * sizeof(ents[0]);
+ bytes = state->args->geo->leaf_hdr_size + count * sizeof(ents[0]);
if (bytes > (state->args->geo->blksize >> 1)) {
/*
* Blk over 50%, don't try to join.
@@ -1494,8 +1553,7 @@ xfs_dir2_leafn_toosmall(
/*
* Read the sibling leaf block.
*/
- error = xfs_dir3_leafn_read(state->args->trans, dp,
- blkno, -1, &bp);
+ error = xfs_dir3_leafn_read(state->args->trans, dp, blkno, &bp);
if (error)
return error;
@@ -1507,8 +1565,8 @@ xfs_dir2_leafn_toosmall(
(state->args->geo->blksize >> 2);
leaf = bp->b_addr;
- dp->d_ops->leaf_hdr_from_disk(&hdr2, leaf);
- ents = dp->d_ops->leaf_ents_p(leaf);
+ xfs_dir2_leaf_hdr_from_disk(dp->i_mount, &hdr2, leaf);
+ ents = hdr2.ents;
count += hdr2.count - hdr2.stale;
bytes -= count * sizeof(ents[0]);
@@ -1570,10 +1628,10 @@ xfs_dir2_leafn_unbalance(
drop_leaf = drop_blk->bp->b_addr;
save_leaf = save_blk->bp->b_addr;
- dp->d_ops->leaf_hdr_from_disk(&savehdr, save_leaf);
- dp->d_ops->leaf_hdr_from_disk(&drophdr, drop_leaf);
- sents = dp->d_ops->leaf_ents_p(save_leaf);
- dents = dp->d_ops->leaf_ents_p(drop_leaf);
+ xfs_dir2_leaf_hdr_from_disk(dp->i_mount, &savehdr, save_leaf);
+ xfs_dir2_leaf_hdr_from_disk(dp->i_mount, &drophdr, drop_leaf);
+ sents = savehdr.ents;
+ dents = drophdr.ents;
/*
* If there are any stale leaf entries, take this opportunity
@@ -1599,8 +1657,8 @@ xfs_dir2_leafn_unbalance(
save_blk->hashval = be32_to_cpu(sents[savehdr.count - 1].hashval);
/* log the changes made when moving the entries */
- dp->d_ops->leaf_hdr_to_disk(save_leaf, &savehdr);
- dp->d_ops->leaf_hdr_to_disk(drop_leaf, &drophdr);
+ xfs_dir2_leaf_hdr_to_disk(dp->i_mount, save_leaf, &savehdr);
+ xfs_dir2_leaf_hdr_to_disk(dp->i_mount, drop_leaf, &drophdr);
xfs_dir3_leaf_log_header(args, save_blk->bp);
xfs_dir3_leaf_log_header(args, drop_blk->bp);
@@ -1619,19 +1677,16 @@ xfs_dir2_node_add_datablk(
xfs_dir2_db_t *dbno,
struct xfs_buf **dbpp,
struct xfs_buf **fbpp,
+ struct xfs_dir3_icfree_hdr *hdr,
int *findex)
{
struct xfs_inode *dp = args->dp;
struct xfs_trans *tp = args->trans;
struct xfs_mount *mp = dp->i_mount;
- struct xfs_dir3_icfree_hdr freehdr;
struct xfs_dir2_data_free *bf;
- struct xfs_dir2_data_hdr *hdr;
- struct xfs_dir2_free *free = NULL;
xfs_dir2_db_t fbno;
struct xfs_buf *fbp;
struct xfs_buf *dbp;
- __be16 *bests = NULL;
int error;
/* Not allowed to allocate, return failure. */
@@ -1650,7 +1705,7 @@ xfs_dir2_node_add_datablk(
* Get the freespace block corresponding to the data block
* that was just allocated.
*/
- fbno = dp->d_ops->db_to_fdb(args->geo, *dbno);
+ fbno = xfs_dir2_db_to_fdb(args->geo, *dbno);
error = xfs_dir2_free_try_read(tp, dp,
xfs_dir2_db_to_da(args->geo, fbno), &fbp);
if (error)
@@ -1665,11 +1720,13 @@ xfs_dir2_node_add_datablk(
if (error)
return error;
- if (dp->d_ops->db_to_fdb(args->geo, *dbno) != fbno) {
+ if (XFS_IS_CORRUPT(mp,
+ xfs_dir2_db_to_fdb(args->geo, *dbno) !=
+ fbno)) {
xfs_alert(mp,
"%s: dir ino %llu needed freesp block %lld for data block %lld, got %lld",
__func__, (unsigned long long)dp->i_ino,
- (long long)dp->d_ops->db_to_fdb(args->geo, *dbno),
+ (long long)xfs_dir2_db_to_fdb(args->geo, *dbno),
(long long)*dbno, (long long)fbno);
if (fblk) {
xfs_alert(mp,
@@ -1679,7 +1736,6 @@ xfs_dir2_node_add_datablk(
} else {
xfs_alert(mp, " ... fblk is NULL");
}
- XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp);
return -EFSCORRUPTED;
}
@@ -1687,44 +1743,39 @@ xfs_dir2_node_add_datablk(
error = xfs_dir3_free_get_buf(args, fbno, &fbp);
if (error)
return error;
- free = fbp->b_addr;
- bests = dp->d_ops->free_bests_p(free);
- dp->d_ops->free_hdr_from_disk(&freehdr, free);
+ xfs_dir2_free_hdr_from_disk(mp, hdr, fbp->b_addr);
/* Remember the first slot as our empty slot. */
- freehdr.firstdb = (fbno - xfs_dir2_byte_to_db(args->geo,
+ hdr->firstdb = (fbno - xfs_dir2_byte_to_db(args->geo,
XFS_DIR2_FREE_OFFSET)) *
- dp->d_ops->free_max_bests(args->geo);
+ args->geo->free_max_bests;
} else {
- free = fbp->b_addr;
- bests = dp->d_ops->free_bests_p(free);
- dp->d_ops->free_hdr_from_disk(&freehdr, free);
+ xfs_dir2_free_hdr_from_disk(mp, hdr, fbp->b_addr);
}
/* Set the freespace block index from the data block number. */
- *findex = dp->d_ops->db_to_fdindex(args->geo, *dbno);
+ *findex = xfs_dir2_db_to_fdindex(args->geo, *dbno);
/* Extend the freespace table if the new data block is off the end. */
- if (*findex >= freehdr.nvalid) {
- ASSERT(*findex < dp->d_ops->free_max_bests(args->geo));
- freehdr.nvalid = *findex + 1;
- bests[*findex] = cpu_to_be16(NULLDATAOFF);
+ if (*findex >= hdr->nvalid) {
+ ASSERT(*findex < args->geo->free_max_bests);
+ hdr->nvalid = *findex + 1;
+ hdr->bests[*findex] = cpu_to_be16(NULLDATAOFF);
}
/*
* If this entry was for an empty data block (this should always be
* true) then update the header.
*/
- if (bests[*findex] == cpu_to_be16(NULLDATAOFF)) {
- freehdr.nused++;
- dp->d_ops->free_hdr_to_disk(fbp->b_addr, &freehdr);
+ if (hdr->bests[*findex] == cpu_to_be16(NULLDATAOFF)) {
+ hdr->nused++;
+ xfs_dir2_free_hdr_to_disk(mp, fbp->b_addr, hdr);
xfs_dir2_free_log_header(args, fbp);
}
/* Update the freespace value for the new block in the table. */
- hdr = dbp->b_addr;
- bf = dp->d_ops->data_bestfree_p(hdr);
- bests[*findex] = bf[0].length;
+ bf = xfs_dir2_data_bestfree_p(mp, dbp->b_addr);
+ hdr->bests[*findex] = bf[0].length;
*dbpp = dbp;
*fbpp = fbp;
@@ -1737,11 +1788,10 @@ xfs_dir2_node_find_freeblk(
struct xfs_da_state_blk *fblk,
xfs_dir2_db_t *dbnop,
struct xfs_buf **fbpp,
+ struct xfs_dir3_icfree_hdr *hdr,
int *findexp,
int length)
{
- struct xfs_dir3_icfree_hdr freehdr;
- struct xfs_dir2_free *free = NULL;
struct xfs_inode *dp = args->dp;
struct xfs_trans *tp = args->trans;
struct xfs_buf *fbp = NULL;
@@ -1751,7 +1801,6 @@ xfs_dir2_node_find_freeblk(
xfs_dir2_db_t dbno = -1;
xfs_dir2_db_t fbno;
xfs_fileoff_t fo;
- __be16 *bests = NULL;
int findex = 0;
int error;
@@ -1762,17 +1811,14 @@ xfs_dir2_node_find_freeblk(
*/
if (fblk) {
fbp = fblk->bp;
- free = fbp->b_addr;
findex = fblk->index;
+ xfs_dir2_free_hdr_from_disk(dp->i_mount, hdr, fbp->b_addr);
if (findex >= 0) {
/* caller already found the freespace for us. */
- bests = dp->d_ops->free_bests_p(free);
- dp->d_ops->free_hdr_from_disk(&freehdr, free);
-
- ASSERT(findex < freehdr.nvalid);
- ASSERT(be16_to_cpu(bests[findex]) != NULLDATAOFF);
- ASSERT(be16_to_cpu(bests[findex]) >= length);
- dbno = freehdr.firstdb + findex;
+ ASSERT(findex < hdr->nvalid);
+ ASSERT(be16_to_cpu(hdr->bests[findex]) != NULLDATAOFF);
+ ASSERT(be16_to_cpu(hdr->bests[findex]) >= length);
+ dbno = hdr->firstdb + findex;
goto found_block;
}
@@ -1814,15 +1860,13 @@ xfs_dir2_node_find_freeblk(
if (!fbp)
continue;
- free = fbp->b_addr;
- bests = dp->d_ops->free_bests_p(free);
- dp->d_ops->free_hdr_from_disk(&freehdr, free);
+ xfs_dir2_free_hdr_from_disk(dp->i_mount, hdr, fbp->b_addr);
/* Scan the free entry array for a large enough free space. */
- for (findex = freehdr.nvalid - 1; findex >= 0; findex--) {
- if (be16_to_cpu(bests[findex]) != NULLDATAOFF &&
- be16_to_cpu(bests[findex]) >= length) {
- dbno = freehdr.firstdb + findex;
+ for (findex = hdr->nvalid - 1; findex >= 0; findex--) {
+ if (be16_to_cpu(hdr->bests[findex]) != NULLDATAOFF &&
+ be16_to_cpu(hdr->bests[findex]) >= length) {
+ dbno = hdr->firstdb + findex;
goto found_block;
}
}
@@ -1838,7 +1882,6 @@ found_block:
return 0;
}
-
/*
* Add the data entry for a node-format directory name addition.
* The leaf entry is added in xfs_dir2_leafn_add.
@@ -1853,9 +1896,9 @@ xfs_dir2_node_addname_int(
struct xfs_dir2_data_entry *dep; /* data entry pointer */
struct xfs_dir2_data_hdr *hdr; /* data block header */
struct xfs_dir2_data_free *bf;
- struct xfs_dir2_free *free = NULL; /* freespace block structure */
struct xfs_trans *tp = args->trans;
struct xfs_inode *dp = args->dp;
+ struct xfs_dir3_icfree_hdr freehdr;
struct xfs_buf *dbp; /* data block buffer */
struct xfs_buf *fbp; /* freespace buffer */
xfs_dir2_data_aoff_t aoff;
@@ -1867,11 +1910,10 @@ xfs_dir2_node_addname_int(
int needlog = 0; /* need to log data header */
int needscan = 0; /* need to rescan data frees */
__be16 *tagp; /* data entry tag pointer */
- __be16 *bests;
- length = dp->d_ops->data_entsize(args->namelen);
- error = xfs_dir2_node_find_freeblk(args, fblk, &dbno, &fbp, &findex,
- length);
+ length = xfs_dir2_data_entsize(dp->i_mount, args->namelen);
+ error = xfs_dir2_node_find_freeblk(args, fblk, &dbno, &fbp, &freehdr,
+ &findex, length);
if (error)
return error;
@@ -1893,19 +1935,19 @@ xfs_dir2_node_addname_int(
/* we're going to have to log the free block index later */
logfree = 1;
error = xfs_dir2_node_add_datablk(args, fblk, &dbno, &dbp, &fbp,
- &findex);
+ &freehdr, &findex);
} else {
/* Read the data block in. */
error = xfs_dir3_data_read(tp, dp,
xfs_dir2_db_to_da(args->geo, dbno),
- -1, &dbp);
+ 0, &dbp);
}
if (error)
return error;
/* setup for data block up now */
hdr = dbp->b_addr;
- bf = dp->d_ops->data_bestfree_p(hdr);
+ bf = xfs_dir2_data_bestfree_p(dp->i_mount, hdr);
ASSERT(be16_to_cpu(bf[0].length) >= length);
/* Point to the existing unused space. */
@@ -1926,28 +1968,26 @@ xfs_dir2_node_addname_int(
dep->inumber = cpu_to_be64(args->inumber);
dep->namelen = args->namelen;
memcpy(dep->name, args->name, dep->namelen);
- dp->d_ops->data_put_ftype(dep, args->filetype);
- tagp = dp->d_ops->data_entry_tag_p(dep);
+ xfs_dir2_data_put_ftype(dp->i_mount, dep, args->filetype);
+ tagp = xfs_dir2_data_entry_tag_p(dp->i_mount, dep);
*tagp = cpu_to_be16((char *)dep - (char *)hdr);
xfs_dir2_data_log_entry(args, dbp, dep);
/* Rescan the freespace and log the data block if needed. */
if (needscan)
- xfs_dir2_data_freescan(dp, hdr, &needlog);
+ xfs_dir2_data_freescan(dp->i_mount, hdr, &needlog);
if (needlog)
xfs_dir2_data_log_header(args, dbp);
/* If the freespace block entry is now wrong, update it. */
- free = fbp->b_addr;
- bests = dp->d_ops->free_bests_p(free);
- if (bests[findex] != bf[0].length) {
- bests[findex] = bf[0].length;
+ if (freehdr.bests[findex] != bf[0].length) {
+ freehdr.bests[findex] = bf[0].length;
logfree = 1;
}
/* Log the freespace entry if needed. */
if (logfree)
- xfs_dir2_free_log_bests(args, fbp, findex, findex);
+ xfs_dir2_free_log_bests(args, &freehdr, fbp, findex, findex);
/* Return the data block and offset in args. */
args->blkno = (xfs_dablk_t)dbno;
@@ -2155,8 +2195,6 @@ xfs_dir2_node_replace(
int i; /* btree level */
xfs_ino_t inum; /* new inode number */
int ftype; /* new file type */
- xfs_dir2_leaf_t *leaf; /* leaf structure */
- xfs_dir2_leaf_entry_t *lep; /* leaf entry being changed */
int rval; /* internal return value */
xfs_da_state_t *state; /* btree cursor */
@@ -2188,16 +2226,17 @@ xfs_dir2_node_replace(
* and locked it. But paranoia is good.
*/
if (rval == -EEXIST) {
- struct xfs_dir2_leaf_entry *ents;
+ struct xfs_dir3_icleaf_hdr leafhdr;
+
/*
* Find the leaf entry.
*/
blk = &state->path.blk[state->path.active - 1];
ASSERT(blk->magic == XFS_DIR2_LEAFN_MAGIC);
- leaf = blk->bp->b_addr;
- ents = args->dp->d_ops->leaf_ents_p(leaf);
- lep = &ents[blk->index];
ASSERT(state->extravalid);
+
+ xfs_dir2_leaf_hdr_from_disk(state->mp, &leafhdr,
+ blk->bp->b_addr);
/*
* Point to the data entry.
*/
@@ -2207,13 +2246,13 @@ xfs_dir2_node_replace(
dep = (xfs_dir2_data_entry_t *)
((char *)hdr +
xfs_dir2_dataptr_to_off(args->geo,
- be32_to_cpu(lep->address)));
+ be32_to_cpu(leafhdr.ents[blk->index].address)));
ASSERT(inum != be64_to_cpu(dep->inumber));
/*
* Fill in the new inode number and log the entry.
*/
dep->inumber = cpu_to_be64(inum);
- args->dp->d_ops->data_put_ftype(dep, ftype);
+ xfs_dir2_data_put_ftype(state->mp, dep, ftype);
xfs_dir2_data_log_entry(args, state->extrablk.bp, dep);
rval = 0;
}
@@ -2270,7 +2309,7 @@ xfs_dir2_node_trim_free(
if (!bp)
return 0;
free = bp->b_addr;
- dp->d_ops->free_hdr_from_disk(&freehdr, free);
+ xfs_dir2_free_hdr_from_disk(dp->i_mount, &freehdr, free);
/*
* If there are used entries, there's nothing to do.
diff --git a/fs/xfs/libxfs/xfs_dir2_priv.h b/fs/xfs/libxfs/xfs_dir2_priv.h
index 59f9fb2241a5..c031c53d0f0d 100644
--- a/fs/xfs/libxfs/xfs_dir2_priv.h
+++ b/fs/xfs/libxfs/xfs_dir2_priv.h
@@ -8,7 +8,41 @@
struct dir_context;
+/*
+ * In-core version of the leaf and free block headers to abstract the
+ * differences in the v2 and v3 disk format of the headers.
+ */
+struct xfs_dir3_icleaf_hdr {
+ uint32_t forw;
+ uint32_t back;
+ uint16_t magic;
+ uint16_t count;
+ uint16_t stale;
+
+ /*
+ * Pointer to the on-disk format entries, which are behind the
+ * variable size (v4 vs v5) header in the on-disk block.
+ */
+ struct xfs_dir2_leaf_entry *ents;
+};
+
+struct xfs_dir3_icfree_hdr {
+ uint32_t magic;
+ uint32_t firstdb;
+ uint32_t nvalid;
+ uint32_t nused;
+
+ /*
+ * Pointer to the on-disk format entries, which are behind the
+ * variable size (v4 vs v5) header in the on-disk block.
+ */
+ __be16 *bests;
+};
+
/* xfs_dir2.c */
+xfs_dahash_t xfs_ascii_ci_hashname(struct xfs_name *name);
+enum xfs_dacmp xfs_ascii_ci_compname(struct xfs_da_args *args,
+ const unsigned char *name, int len);
extern int xfs_dir2_grow_inode(struct xfs_da_args *args, int space,
xfs_dir2_db_t *dbp);
extern int xfs_dir_cilookup_result(struct xfs_da_args *args,
@@ -26,6 +60,15 @@ extern int xfs_dir2_leaf_to_block(struct xfs_da_args *args,
struct xfs_buf *lbp, struct xfs_buf *dbp);
/* xfs_dir2_data.c */
+struct xfs_dir2_data_free *xfs_dir2_data_bestfree_p(struct xfs_mount *mp,
+ struct xfs_dir2_data_hdr *hdr);
+__be16 *xfs_dir2_data_entry_tag_p(struct xfs_mount *mp,
+ struct xfs_dir2_data_entry *dep);
+uint8_t xfs_dir2_data_get_ftype(struct xfs_mount *mp,
+ struct xfs_dir2_data_entry *dep);
+void xfs_dir2_data_put_ftype(struct xfs_mount *mp,
+ struct xfs_dir2_data_entry *dep, uint8_t ftype);
+
#ifdef DEBUG
extern void xfs_dir3_data_check(struct xfs_inode *dp, struct xfs_buf *bp);
#else
@@ -34,10 +77,10 @@ extern void xfs_dir3_data_check(struct xfs_inode *dp, struct xfs_buf *bp);
extern xfs_failaddr_t __xfs_dir3_data_check(struct xfs_inode *dp,
struct xfs_buf *bp);
-extern int xfs_dir3_data_read(struct xfs_trans *tp, struct xfs_inode *dp,
- xfs_dablk_t bno, xfs_daddr_t mapped_bno, struct xfs_buf **bpp);
-extern int xfs_dir3_data_readahead(struct xfs_inode *dp, xfs_dablk_t bno,
- xfs_daddr_t mapped_bno);
+int xfs_dir3_data_read(struct xfs_trans *tp, struct xfs_inode *dp,
+ xfs_dablk_t bno, unsigned int flags, struct xfs_buf **bpp);
+int xfs_dir3_data_readahead(struct xfs_inode *dp, xfs_dablk_t bno,
+ unsigned int flags);
extern struct xfs_dir2_data_free *
xfs_dir2_data_freeinsert(struct xfs_dir2_data_hdr *hdr,
@@ -47,10 +90,14 @@ extern int xfs_dir3_data_init(struct xfs_da_args *args, xfs_dir2_db_t blkno,
struct xfs_buf **bpp);
/* xfs_dir2_leaf.c */
-extern int xfs_dir3_leaf_read(struct xfs_trans *tp, struct xfs_inode *dp,
- xfs_dablk_t fbno, xfs_daddr_t mappedbno, struct xfs_buf **bpp);
-extern int xfs_dir3_leafn_read(struct xfs_trans *tp, struct xfs_inode *dp,
- xfs_dablk_t fbno, xfs_daddr_t mappedbno, struct xfs_buf **bpp);
+void xfs_dir2_leaf_hdr_from_disk(struct xfs_mount *mp,
+ struct xfs_dir3_icleaf_hdr *to, struct xfs_dir2_leaf *from);
+void xfs_dir2_leaf_hdr_to_disk(struct xfs_mount *mp, struct xfs_dir2_leaf *to,
+ struct xfs_dir3_icleaf_hdr *from);
+int xfs_dir3_leaf_read(struct xfs_trans *tp, struct xfs_inode *dp,
+ xfs_dablk_t fbno, struct xfs_buf **bpp);
+int xfs_dir3_leafn_read(struct xfs_trans *tp, struct xfs_inode *dp,
+ xfs_dablk_t fbno, struct xfs_buf **bpp);
extern int xfs_dir2_block_to_leaf(struct xfs_da_args *args,
struct xfs_buf *dbp);
extern int xfs_dir2_leaf_addname(struct xfs_da_args *args);
@@ -62,7 +109,8 @@ extern void xfs_dir3_leaf_compact_x1(struct xfs_dir3_icleaf_hdr *leafhdr,
extern int xfs_dir3_leaf_get_buf(struct xfs_da_args *args, xfs_dir2_db_t bno,
struct xfs_buf **bpp, uint16_t magic);
extern void xfs_dir3_leaf_log_ents(struct xfs_da_args *args,
- struct xfs_buf *bp, int first, int last);
+ struct xfs_dir3_icleaf_hdr *hdr, struct xfs_buf *bp, int first,
+ int last);
extern void xfs_dir3_leaf_log_header(struct xfs_da_args *args,
struct xfs_buf *bp);
extern int xfs_dir2_leaf_lookup(struct xfs_da_args *args);
@@ -79,10 +127,11 @@ xfs_dir3_leaf_find_entry(struct xfs_dir3_icleaf_hdr *leafhdr,
extern int xfs_dir2_node_to_leaf(struct xfs_da_state *state);
extern xfs_failaddr_t xfs_dir3_leaf_check_int(struct xfs_mount *mp,
- struct xfs_inode *dp, struct xfs_dir3_icleaf_hdr *hdr,
- struct xfs_dir2_leaf *leaf);
+ struct xfs_dir3_icleaf_hdr *hdr, struct xfs_dir2_leaf *leaf);
/* xfs_dir2_node.c */
+void xfs_dir2_free_hdr_from_disk(struct xfs_mount *mp,
+ struct xfs_dir3_icfree_hdr *to, struct xfs_dir2_free *from);
extern int xfs_dir2_leaf_to_node(struct xfs_da_args *args,
struct xfs_buf *lbp);
extern xfs_dahash_t xfs_dir2_leaf_lasthash(struct xfs_inode *dp,
@@ -108,6 +157,14 @@ extern int xfs_dir2_free_read(struct xfs_trans *tp, struct xfs_inode *dp,
xfs_dablk_t fbno, struct xfs_buf **bpp);
/* xfs_dir2_sf.c */
+xfs_ino_t xfs_dir2_sf_get_ino(struct xfs_mount *mp, struct xfs_dir2_sf_hdr *hdr,
+ struct xfs_dir2_sf_entry *sfep);
+xfs_ino_t xfs_dir2_sf_get_parent_ino(struct xfs_dir2_sf_hdr *hdr);
+void xfs_dir2_sf_put_parent_ino(struct xfs_dir2_sf_hdr *hdr, xfs_ino_t ino);
+uint8_t xfs_dir2_sf_get_ftype(struct xfs_mount *mp,
+ struct xfs_dir2_sf_entry *sfep);
+struct xfs_dir2_sf_entry *xfs_dir2_sf_nextentry(struct xfs_mount *mp,
+ struct xfs_dir2_sf_hdr *hdr, struct xfs_dir2_sf_entry *sfep);
extern int xfs_dir2_block_sfsize(struct xfs_inode *dp,
struct xfs_dir2_data_hdr *block, struct xfs_dir2_sf_hdr *sfhp);
extern int xfs_dir2_block_to_sf(struct xfs_da_args *args, struct xfs_buf *bp,
@@ -123,4 +180,39 @@ extern xfs_failaddr_t xfs_dir2_sf_verify(struct xfs_inode *ip);
extern int xfs_readdir(struct xfs_trans *tp, struct xfs_inode *dp,
struct dir_context *ctx, size_t bufsize);
+static inline unsigned int
+xfs_dir2_data_entsize(
+ struct xfs_mount *mp,
+ unsigned int namelen)
+{
+ unsigned int len;
+
+ len = offsetof(struct xfs_dir2_data_entry, name[0]) + namelen +
+ sizeof(xfs_dir2_data_off_t) /* tag */;
+ if (xfs_sb_version_hasftype(&mp->m_sb))
+ len += sizeof(uint8_t);
+ return round_up(len, XFS_DIR2_DATA_ALIGN);
+}
+
+static inline xfs_dahash_t
+xfs_dir2_hashname(
+ struct xfs_mount *mp,
+ struct xfs_name *name)
+{
+ if (unlikely(xfs_sb_version_hasasciici(&mp->m_sb)))
+ return xfs_ascii_ci_hashname(name);
+ return xfs_da_hashname(name->name, name->len);
+}
+
+static inline enum xfs_dacmp
+xfs_dir2_compname(
+ struct xfs_da_args *args,
+ const unsigned char *name,
+ int len)
+{
+ if (unlikely(xfs_sb_version_hasasciici(&args->dp->i_mount->m_sb)))
+ return xfs_ascii_ci_compname(args, name, len);
+ return xfs_da_compname(args, name, len);
+}
+
#endif /* __XFS_DIR2_PRIV_H__ */
diff --git a/fs/xfs/libxfs/xfs_dir2_sf.c b/fs/xfs/libxfs/xfs_dir2_sf.c
index 85f14fc2a8da..8b94d33d232f 100644
--- a/fs/xfs/libxfs/xfs_dir2_sf.c
+++ b/fs/xfs/libxfs/xfs_dir2_sf.c
@@ -37,6 +37,126 @@ static void xfs_dir2_sf_check(xfs_da_args_t *args);
static void xfs_dir2_sf_toino4(xfs_da_args_t *args);
static void xfs_dir2_sf_toino8(xfs_da_args_t *args);
+static int
+xfs_dir2_sf_entsize(
+ struct xfs_mount *mp,
+ struct xfs_dir2_sf_hdr *hdr,
+ int len)
+{
+ int count = len;
+
+ count += sizeof(struct xfs_dir2_sf_entry); /* namelen + offset */
+ count += hdr->i8count ? XFS_INO64_SIZE : XFS_INO32_SIZE; /* ino # */
+
+ if (xfs_sb_version_hasftype(&mp->m_sb))
+ count += sizeof(uint8_t);
+ return count;
+}
+
+struct xfs_dir2_sf_entry *
+xfs_dir2_sf_nextentry(
+ struct xfs_mount *mp,
+ struct xfs_dir2_sf_hdr *hdr,
+ struct xfs_dir2_sf_entry *sfep)
+{
+ return (void *)sfep + xfs_dir2_sf_entsize(mp, hdr, sfep->namelen);
+}
+
+/*
+ * In short-form directory entries the inode numbers are stored at variable
+ * offset behind the entry name. If the entry stores a filetype value, then it
+ * sits between the name and the inode number. The actual inode numbers can
+ * come in two formats as well, either 4 bytes or 8 bytes wide.
+ */
+xfs_ino_t
+xfs_dir2_sf_get_ino(
+ struct xfs_mount *mp,
+ struct xfs_dir2_sf_hdr *hdr,
+ struct xfs_dir2_sf_entry *sfep)
+{
+ uint8_t *from = sfep->name + sfep->namelen;
+
+ if (xfs_sb_version_hasftype(&mp->m_sb))
+ from++;
+
+ if (!hdr->i8count)
+ return get_unaligned_be32(from);
+ return get_unaligned_be64(from) & XFS_MAXINUMBER;
+}
+
+static void
+xfs_dir2_sf_put_ino(
+ struct xfs_mount *mp,
+ struct xfs_dir2_sf_hdr *hdr,
+ struct xfs_dir2_sf_entry *sfep,
+ xfs_ino_t ino)
+{
+ uint8_t *to = sfep->name + sfep->namelen;
+
+ ASSERT(ino <= XFS_MAXINUMBER);
+
+ if (xfs_sb_version_hasftype(&mp->m_sb))
+ to++;
+
+ if (hdr->i8count)
+ put_unaligned_be64(ino, to);
+ else
+ put_unaligned_be32(ino, to);
+}
+
+xfs_ino_t
+xfs_dir2_sf_get_parent_ino(
+ struct xfs_dir2_sf_hdr *hdr)
+{
+ if (!hdr->i8count)
+ return get_unaligned_be32(hdr->parent);
+ return get_unaligned_be64(hdr->parent) & XFS_MAXINUMBER;
+}
+
+void
+xfs_dir2_sf_put_parent_ino(
+ struct xfs_dir2_sf_hdr *hdr,
+ xfs_ino_t ino)
+{
+ ASSERT(ino <= XFS_MAXINUMBER);
+
+ if (hdr->i8count)
+ put_unaligned_be64(ino, hdr->parent);
+ else
+ put_unaligned_be32(ino, hdr->parent);
+}
+
+/*
+ * The file type field is stored at the end of the name for filetype enabled
+ * shortform directories, or not at all otherwise.
+ */
+uint8_t
+xfs_dir2_sf_get_ftype(
+ struct xfs_mount *mp,
+ struct xfs_dir2_sf_entry *sfep)
+{
+ if (xfs_sb_version_hasftype(&mp->m_sb)) {
+ uint8_t ftype = sfep->name[sfep->namelen];
+
+ if (ftype < XFS_DIR3_FT_MAX)
+ return ftype;
+ }
+
+ return XFS_DIR3_FT_UNKNOWN;
+}
+
+static void
+xfs_dir2_sf_put_ftype(
+ struct xfs_mount *mp,
+ struct xfs_dir2_sf_entry *sfep,
+ uint8_t ftype)
+{
+ ASSERT(ftype < XFS_DIR3_FT_MAX);
+
+ if (xfs_sb_version_hasftype(&mp->m_sb))
+ sfep->name[sfep->namelen] = ftype;
+}
+
/*
* Given a block directory (dp/block), calculate its size as a shortform (sf)
* directory and a header for the sf directory, if it will fit it the
@@ -125,7 +245,7 @@ xfs_dir2_block_sfsize(
*/
sfhp->count = count;
sfhp->i8count = i8count;
- dp->d_ops->sf_put_parent_ino(sfhp, parent);
+ xfs_dir2_sf_put_parent_ino(sfhp, parent);
return size;
}
@@ -135,64 +255,48 @@ xfs_dir2_block_sfsize(
*/
int /* error */
xfs_dir2_block_to_sf(
- xfs_da_args_t *args, /* operation arguments */
+ struct xfs_da_args *args, /* operation arguments */
struct xfs_buf *bp,
int size, /* shortform directory size */
- xfs_dir2_sf_hdr_t *sfhp) /* shortform directory hdr */
+ struct xfs_dir2_sf_hdr *sfhp) /* shortform directory hdr */
{
- xfs_dir2_data_hdr_t *hdr; /* block header */
- xfs_dir2_data_entry_t *dep; /* data entry pointer */
- xfs_inode_t *dp; /* incore directory inode */
- xfs_dir2_data_unused_t *dup; /* unused data pointer */
- char *endptr; /* end of data entries */
+ struct xfs_inode *dp = args->dp;
+ struct xfs_mount *mp = dp->i_mount;
int error; /* error return value */
int logflags; /* inode logging flags */
- xfs_mount_t *mp; /* filesystem mount point */
- char *ptr; /* current data pointer */
- xfs_dir2_sf_entry_t *sfep; /* shortform entry */
- xfs_dir2_sf_hdr_t *sfp; /* shortform directory header */
- xfs_dir2_sf_hdr_t *dst; /* temporary data buffer */
+ struct xfs_dir2_sf_entry *sfep; /* shortform entry */
+ struct xfs_dir2_sf_hdr *sfp; /* shortform directory header */
+ unsigned int offset = args->geo->data_entry_offset;
+ unsigned int end;
trace_xfs_dir2_block_to_sf(args);
- dp = args->dp;
- mp = dp->i_mount;
-
- /*
- * allocate a temporary destination buffer the size of the inode
- * to format the data into. Once we have formatted the data, we
- * can free the block and copy the formatted data into the inode literal
- * area.
- */
- dst = kmem_alloc(mp->m_sb.sb_inodesize, 0);
- hdr = bp->b_addr;
-
/*
- * Copy the header into the newly allocate local space.
+ * Allocate a temporary destination buffer the size of the inode to
+ * format the data into. Once we have formatted the data, we can free
+ * the block and copy the formatted data into the inode literal area.
*/
- sfp = (xfs_dir2_sf_hdr_t *)dst;
+ sfp = kmem_alloc(mp->m_sb.sb_inodesize, 0);
memcpy(sfp, sfhp, xfs_dir2_sf_hdr_size(sfhp->i8count));
/*
- * Set up to loop over the block's entries.
+ * Loop over the active and unused entries. Stop when we reach the
+ * leaf/tail portion of the block.
*/
- ptr = (char *)dp->d_ops->data_entry_p(hdr);
- endptr = xfs_dir3_data_endp(args->geo, hdr);
+ end = xfs_dir3_data_end_offset(args->geo, bp->b_addr);
sfep = xfs_dir2_sf_firstentry(sfp);
- /*
- * Loop over the active and unused entries.
- * Stop when we reach the leaf/tail portion of the block.
- */
- while (ptr < endptr) {
+ while (offset < end) {
+ struct xfs_dir2_data_unused *dup = bp->b_addr + offset;
+ struct xfs_dir2_data_entry *dep = bp->b_addr + offset;
+
/*
* If it's unused, just skip over it.
*/
- dup = (xfs_dir2_data_unused_t *)ptr;
if (be16_to_cpu(dup->freetag) == XFS_DIR2_DATA_FREE_TAG) {
- ptr += be16_to_cpu(dup->length);
+ offset += be16_to_cpu(dup->length);
continue;
}
- dep = (xfs_dir2_data_entry_t *)ptr;
+
/*
* Skip .
*/
@@ -204,24 +308,22 @@ xfs_dir2_block_to_sf(
else if (dep->namelen == 2 &&
dep->name[0] == '.' && dep->name[1] == '.')
ASSERT(be64_to_cpu(dep->inumber) ==
- dp->d_ops->sf_get_parent_ino(sfp));
+ xfs_dir2_sf_get_parent_ino(sfp));
/*
* Normal entry, copy it into shortform.
*/
else {
sfep->namelen = dep->namelen;
- xfs_dir2_sf_put_offset(sfep,
- (xfs_dir2_data_aoff_t)
- ((char *)dep - (char *)hdr));
+ xfs_dir2_sf_put_offset(sfep, offset);
memcpy(sfep->name, dep->name, dep->namelen);
- dp->d_ops->sf_put_ino(sfp, sfep,
+ xfs_dir2_sf_put_ino(mp, sfp, sfep,
be64_to_cpu(dep->inumber));
- dp->d_ops->sf_put_ftype(sfep,
- dp->d_ops->data_get_ftype(dep));
+ xfs_dir2_sf_put_ftype(mp, sfep,
+ xfs_dir2_data_get_ftype(mp, dep));
- sfep = dp->d_ops->sf_nextentry(sfp, sfep);
+ sfep = xfs_dir2_sf_nextentry(mp, sfp, sfep);
}
- ptr += dp->d_ops->data_entsize(dep->namelen);
+ offset += xfs_dir2_data_entsize(mp, dep->namelen);
}
ASSERT((char *)sfep - (char *)sfp == size);
@@ -240,7 +342,7 @@ xfs_dir2_block_to_sf(
* Convert the inode to local format and copy the data in.
*/
ASSERT(dp->i_df.if_bytes == 0);
- xfs_init_local_fork(dp, XFS_DATA_FORK, dst, size);
+ xfs_init_local_fork(dp, XFS_DATA_FORK, sfp, size);
dp->i_d.di_format = XFS_DINODE_FMT_LOCAL;
dp->i_d.di_size = size;
@@ -248,7 +350,7 @@ xfs_dir2_block_to_sf(
xfs_dir2_sf_check(args);
out:
xfs_trans_log_inode(args->trans, dp, logflags);
- kmem_free(dst);
+ kmem_free(sfp);
return error;
}
@@ -277,13 +379,7 @@ xfs_dir2_sf_addname(
ASSERT(xfs_dir2_sf_lookup(args) == -ENOENT);
dp = args->dp;
ASSERT(dp->i_df.if_flags & XFS_IFINLINE);
- /*
- * Make sure the shortform value has some of its header.
- */
- if (dp->i_d.di_size < offsetof(xfs_dir2_sf_hdr_t, parent)) {
- ASSERT(XFS_FORCED_SHUTDOWN(dp->i_mount));
- return -EIO;
- }
+ ASSERT(dp->i_d.di_size >= offsetof(struct xfs_dir2_sf_hdr, parent));
ASSERT(dp->i_df.if_bytes == dp->i_d.di_size);
ASSERT(dp->i_df.if_u1.if_data != NULL);
sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data;
@@ -291,7 +387,7 @@ xfs_dir2_sf_addname(
/*
* Compute entry (and change in) size.
*/
- incr_isize = dp->d_ops->sf_entsize(sfp, args->namelen);
+ incr_isize = xfs_dir2_sf_entsize(dp->i_mount, sfp, args->namelen);
objchange = 0;
/*
@@ -364,18 +460,17 @@ xfs_dir2_sf_addname_easy(
xfs_dir2_data_aoff_t offset, /* offset to use for new ent */
int new_isize) /* new directory size */
{
+ struct xfs_inode *dp = args->dp;
+ struct xfs_mount *mp = dp->i_mount;
int byteoff; /* byte offset in sf dir */
- xfs_inode_t *dp; /* incore directory inode */
xfs_dir2_sf_hdr_t *sfp; /* shortform structure */
- dp = args->dp;
-
sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data;
byteoff = (int)((char *)sfep - (char *)sfp);
/*
* Grow the in-inode space.
*/
- xfs_idata_realloc(dp, dp->d_ops->sf_entsize(sfp, args->namelen),
+ xfs_idata_realloc(dp, xfs_dir2_sf_entsize(mp, sfp, args->namelen),
XFS_DATA_FORK);
/*
* Need to set up again due to realloc of the inode data.
@@ -388,8 +483,8 @@ xfs_dir2_sf_addname_easy(
sfep->namelen = args->namelen;
xfs_dir2_sf_put_offset(sfep, offset);
memcpy(sfep->name, args->name, sfep->namelen);
- dp->d_ops->sf_put_ino(sfp, sfep, args->inumber);
- dp->d_ops->sf_put_ftype(sfep, args->filetype);
+ xfs_dir2_sf_put_ino(mp, sfp, sfep, args->inumber);
+ xfs_dir2_sf_put_ftype(mp, sfep, args->filetype);
/*
* Update the header and inode.
@@ -416,9 +511,10 @@ xfs_dir2_sf_addname_hard(
int objchange, /* changing inode number size */
int new_isize) /* new directory size */
{
+ struct xfs_inode *dp = args->dp;
+ struct xfs_mount *mp = dp->i_mount;
int add_datasize; /* data size need for new ent */
char *buf; /* buffer for old */
- xfs_inode_t *dp; /* incore directory inode */
int eof; /* reached end of old dir */
int nbytes; /* temp for byte copies */
xfs_dir2_data_aoff_t new_offset; /* next offset value */
@@ -432,8 +528,6 @@ xfs_dir2_sf_addname_hard(
/*
* Copy the old directory to the stack buffer.
*/
- dp = args->dp;
-
sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data;
old_isize = (int)dp->i_d.di_size;
buf = kmem_alloc(old_isize, 0);
@@ -444,13 +538,13 @@ xfs_dir2_sf_addname_hard(
* to insert the new entry.
* If it's going to end up at the end then oldsfep will point there.
*/
- for (offset = dp->d_ops->data_first_offset,
+ for (offset = args->geo->data_first_offset,
oldsfep = xfs_dir2_sf_firstentry(oldsfp),
- add_datasize = dp->d_ops->data_entsize(args->namelen),
+ add_datasize = xfs_dir2_data_entsize(mp, args->namelen),
eof = (char *)oldsfep == &buf[old_isize];
!eof;
- offset = new_offset + dp->d_ops->data_entsize(oldsfep->namelen),
- oldsfep = dp->d_ops->sf_nextentry(oldsfp, oldsfep),
+ offset = new_offset + xfs_dir2_data_entsize(mp, oldsfep->namelen),
+ oldsfep = xfs_dir2_sf_nextentry(mp, oldsfp, oldsfep),
eof = (char *)oldsfep == &buf[old_isize]) {
new_offset = xfs_dir2_sf_get_offset(oldsfep);
if (offset + add_datasize <= new_offset)
@@ -479,8 +573,8 @@ xfs_dir2_sf_addname_hard(
sfep->namelen = args->namelen;
xfs_dir2_sf_put_offset(sfep, offset);
memcpy(sfep->name, args->name, sfep->namelen);
- dp->d_ops->sf_put_ino(sfp, sfep, args->inumber);
- dp->d_ops->sf_put_ftype(sfep, args->filetype);
+ xfs_dir2_sf_put_ino(mp, sfp, sfep, args->inumber);
+ xfs_dir2_sf_put_ftype(mp, sfep, args->filetype);
sfp->count++;
if (args->inumber > XFS_DIR2_MAX_SHORT_INUM && !objchange)
sfp->i8count++;
@@ -488,7 +582,7 @@ xfs_dir2_sf_addname_hard(
* If there's more left to copy, do that.
*/
if (!eof) {
- sfep = dp->d_ops->sf_nextentry(sfp, sfep);
+ sfep = xfs_dir2_sf_nextentry(mp, sfp, sfep);
memcpy(sfep, oldsfep, old_isize - nbytes);
}
kmem_free(buf);
@@ -510,7 +604,8 @@ xfs_dir2_sf_addname_pick(
xfs_dir2_sf_entry_t **sfepp, /* out(1): new entry ptr */
xfs_dir2_data_aoff_t *offsetp) /* out(1): new offset */
{
- xfs_inode_t *dp; /* incore directory inode */
+ struct xfs_inode *dp = args->dp;
+ struct xfs_mount *mp = dp->i_mount;
int holefit; /* found hole it will fit in */
int i; /* entry number */
xfs_dir2_data_aoff_t offset; /* data block offset */
@@ -519,11 +614,9 @@ xfs_dir2_sf_addname_pick(
int size; /* entry's data size */
int used; /* data bytes used */
- dp = args->dp;
-
sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data;
- size = dp->d_ops->data_entsize(args->namelen);
- offset = dp->d_ops->data_first_offset;
+ size = xfs_dir2_data_entsize(mp, args->namelen);
+ offset = args->geo->data_first_offset;
sfep = xfs_dir2_sf_firstentry(sfp);
holefit = 0;
/*
@@ -535,8 +628,8 @@ xfs_dir2_sf_addname_pick(
if (!holefit)
holefit = offset + size <= xfs_dir2_sf_get_offset(sfep);
offset = xfs_dir2_sf_get_offset(sfep) +
- dp->d_ops->data_entsize(sfep->namelen);
- sfep = dp->d_ops->sf_nextentry(sfp, sfep);
+ xfs_dir2_data_entsize(mp, sfep->namelen);
+ sfep = xfs_dir2_sf_nextentry(mp, sfp, sfep);
}
/*
* Calculate data bytes used excluding the new entry, if this
@@ -578,7 +671,8 @@ static void
xfs_dir2_sf_check(
xfs_da_args_t *args) /* operation arguments */
{
- xfs_inode_t *dp; /* incore directory inode */
+ struct xfs_inode *dp = args->dp;
+ struct xfs_mount *mp = dp->i_mount;
int i; /* entry number */
int i8count; /* number of big inode#s */
xfs_ino_t ino; /* entry inode number */
@@ -586,23 +680,21 @@ xfs_dir2_sf_check(
xfs_dir2_sf_entry_t *sfep; /* shortform dir entry */
xfs_dir2_sf_hdr_t *sfp; /* shortform structure */
- dp = args->dp;
-
sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data;
- offset = dp->d_ops->data_first_offset;
- ino = dp->d_ops->sf_get_parent_ino(sfp);
+ offset = args->geo->data_first_offset;
+ ino = xfs_dir2_sf_get_parent_ino(sfp);
i8count = ino > XFS_DIR2_MAX_SHORT_INUM;
for (i = 0, sfep = xfs_dir2_sf_firstentry(sfp);
i < sfp->count;
- i++, sfep = dp->d_ops->sf_nextentry(sfp, sfep)) {
+ i++, sfep = xfs_dir2_sf_nextentry(mp, sfp, sfep)) {
ASSERT(xfs_dir2_sf_get_offset(sfep) >= offset);
- ino = dp->d_ops->sf_get_ino(sfp, sfep);
+ ino = xfs_dir2_sf_get_ino(mp, sfp, sfep);
i8count += ino > XFS_DIR2_MAX_SHORT_INUM;
offset =
xfs_dir2_sf_get_offset(sfep) +
- dp->d_ops->data_entsize(sfep->namelen);
- ASSERT(dp->d_ops->sf_get_ftype(sfep) < XFS_DIR3_FT_MAX);
+ xfs_dir2_data_entsize(mp, sfep->namelen);
+ ASSERT(xfs_dir2_sf_get_ftype(mp, sfep) < XFS_DIR3_FT_MAX);
}
ASSERT(i8count == sfp->i8count);
ASSERT((char *)sfep - (char *)sfp == dp->i_d.di_size);
@@ -622,22 +714,16 @@ xfs_dir2_sf_verify(
struct xfs_dir2_sf_entry *sfep;
struct xfs_dir2_sf_entry *next_sfep;
char *endp;
- const struct xfs_dir_ops *dops;
struct xfs_ifork *ifp;
xfs_ino_t ino;
int i;
int i8count;
int offset;
- int size;
+ int64_t size;
int error;
uint8_t filetype;
ASSERT(ip->i_d.di_format == XFS_DINODE_FMT_LOCAL);
- /*
- * xfs_iread calls us before xfs_setup_inode sets up ip->d_ops,
- * so we can only trust the mountpoint to have the right pointer.
- */
- dops = xfs_dir_get_ops(mp, NULL);
ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
sfp = (struct xfs_dir2_sf_hdr *)ifp->if_u1.if_data;
@@ -653,12 +739,12 @@ xfs_dir2_sf_verify(
endp = (char *)sfp + size;
/* Check .. entry */
- ino = dops->sf_get_parent_ino(sfp);
+ ino = xfs_dir2_sf_get_parent_ino(sfp);
i8count = ino > XFS_DIR2_MAX_SHORT_INUM;
error = xfs_dir_ino_validate(mp, ino);
if (error)
return __this_address;
- offset = dops->data_first_offset;
+ offset = mp->m_dir_geo->data_first_offset;
/* Check all reported entries */
sfep = xfs_dir2_sf_firstentry(sfp);
@@ -680,7 +766,7 @@ xfs_dir2_sf_verify(
* within the data buffer. The next entry starts after the
* name component, so nextentry is an acceptable test.
*/
- next_sfep = dops->sf_nextentry(sfp, sfep);
+ next_sfep = xfs_dir2_sf_nextentry(mp, sfp, sfep);
if (endp < (char *)next_sfep)
return __this_address;
@@ -689,19 +775,19 @@ xfs_dir2_sf_verify(
return __this_address;
/* Check the inode number. */
- ino = dops->sf_get_ino(sfp, sfep);
+ ino = xfs_dir2_sf_get_ino(mp, sfp, sfep);
i8count += ino > XFS_DIR2_MAX_SHORT_INUM;
error = xfs_dir_ino_validate(mp, ino);
if (error)
return __this_address;
/* Check the file type. */
- filetype = dops->sf_get_ftype(sfep);
+ filetype = xfs_dir2_sf_get_ftype(mp, sfep);
if (filetype >= XFS_DIR3_FT_MAX)
return __this_address;
offset = xfs_dir2_sf_get_offset(sfep) +
- dops->data_entsize(sfep->namelen);
+ xfs_dir2_data_entsize(mp, sfep->namelen);
sfep = next_sfep;
}
@@ -763,7 +849,7 @@ xfs_dir2_sf_create(
/*
* Now can put in the inode number, since i8count is set.
*/
- dp->d_ops->sf_put_parent_ino(sfp, pino);
+ xfs_dir2_sf_put_parent_ino(sfp, pino);
sfp->count = 0;
dp->i_d.di_size = size;
xfs_dir2_sf_check(args);
@@ -779,7 +865,8 @@ int /* error */
xfs_dir2_sf_lookup(
xfs_da_args_t *args) /* operation arguments */
{
- xfs_inode_t *dp; /* incore directory inode */
+ struct xfs_inode *dp = args->dp;
+ struct xfs_mount *mp = dp->i_mount;
int i; /* entry index */
int error;
xfs_dir2_sf_entry_t *sfep; /* shortform directory entry */
@@ -790,16 +877,9 @@ xfs_dir2_sf_lookup(
trace_xfs_dir2_sf_lookup(args);
xfs_dir2_sf_check(args);
- dp = args->dp;
ASSERT(dp->i_df.if_flags & XFS_IFINLINE);
- /*
- * Bail out if the directory is way too short.
- */
- if (dp->i_d.di_size < offsetof(xfs_dir2_sf_hdr_t, parent)) {
- ASSERT(XFS_FORCED_SHUTDOWN(dp->i_mount));
- return -EIO;
- }
+ ASSERT(dp->i_d.di_size >= offsetof(struct xfs_dir2_sf_hdr, parent));
ASSERT(dp->i_df.if_bytes == dp->i_d.di_size);
ASSERT(dp->i_df.if_u1.if_data != NULL);
sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data;
@@ -818,7 +898,7 @@ xfs_dir2_sf_lookup(
*/
if (args->namelen == 2 &&
args->name[0] == '.' && args->name[1] == '.') {
- args->inumber = dp->d_ops->sf_get_parent_ino(sfp);
+ args->inumber = xfs_dir2_sf_get_parent_ino(sfp);
args->cmpresult = XFS_CMP_EXACT;
args->filetype = XFS_DIR3_FT_DIR;
return -EEXIST;
@@ -828,18 +908,17 @@ xfs_dir2_sf_lookup(
*/
ci_sfep = NULL;
for (i = 0, sfep = xfs_dir2_sf_firstentry(sfp); i < sfp->count;
- i++, sfep = dp->d_ops->sf_nextentry(sfp, sfep)) {
+ i++, sfep = xfs_dir2_sf_nextentry(mp, sfp, sfep)) {
/*
* Compare name and if it's an exact match, return the inode
* number. If it's the first case-insensitive match, store the
* inode number and continue looking for an exact match.
*/
- cmp = dp->i_mount->m_dirnameops->compname(args, sfep->name,
- sfep->namelen);
+ cmp = xfs_dir2_compname(args, sfep->name, sfep->namelen);
if (cmp != XFS_CMP_DIFFERENT && cmp != args->cmpresult) {
args->cmpresult = cmp;
- args->inumber = dp->d_ops->sf_get_ino(sfp, sfep);
- args->filetype = dp->d_ops->sf_get_ftype(sfep);
+ args->inumber = xfs_dir2_sf_get_ino(mp, sfp, sfep);
+ args->filetype = xfs_dir2_sf_get_ftype(mp, sfep);
if (cmp == XFS_CMP_EXACT)
return -EEXIST;
ci_sfep = sfep;
@@ -864,8 +943,9 @@ int /* error */
xfs_dir2_sf_removename(
xfs_da_args_t *args)
{
+ struct xfs_inode *dp = args->dp;
+ struct xfs_mount *mp = dp->i_mount;
int byteoff; /* offset of removed entry */
- xfs_inode_t *dp; /* incore directory inode */
int entsize; /* this entry's size */
int i; /* shortform entry index */
int newsize; /* new inode size */
@@ -875,17 +955,9 @@ xfs_dir2_sf_removename(
trace_xfs_dir2_sf_removename(args);
- dp = args->dp;
-
ASSERT(dp->i_df.if_flags & XFS_IFINLINE);
oldsize = (int)dp->i_d.di_size;
- /*
- * Bail out if the directory is way too short.
- */
- if (oldsize < offsetof(xfs_dir2_sf_hdr_t, parent)) {
- ASSERT(XFS_FORCED_SHUTDOWN(dp->i_mount));
- return -EIO;
- }
+ ASSERT(oldsize >= offsetof(struct xfs_dir2_sf_hdr, parent));
ASSERT(dp->i_df.if_bytes == oldsize);
ASSERT(dp->i_df.if_u1.if_data != NULL);
sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data;
@@ -895,10 +967,10 @@ xfs_dir2_sf_removename(
* Find the one we're deleting.
*/
for (i = 0, sfep = xfs_dir2_sf_firstentry(sfp); i < sfp->count;
- i++, sfep = dp->d_ops->sf_nextentry(sfp, sfep)) {
+ i++, sfep = xfs_dir2_sf_nextentry(mp, sfp, sfep)) {
if (xfs_da_compname(args, sfep->name, sfep->namelen) ==
XFS_CMP_EXACT) {
- ASSERT(dp->d_ops->sf_get_ino(sfp, sfep) ==
+ ASSERT(xfs_dir2_sf_get_ino(mp, sfp, sfep) ==
args->inumber);
break;
}
@@ -912,7 +984,7 @@ xfs_dir2_sf_removename(
* Calculate sizes.
*/
byteoff = (int)((char *)sfep - (char *)sfp);
- entsize = dp->d_ops->sf_entsize(sfp, args->namelen);
+ entsize = xfs_dir2_sf_entsize(mp, sfp, args->namelen);
newsize = oldsize - entsize;
/*
* Copy the part if any after the removed entry, sliding it down.
@@ -945,13 +1017,35 @@ xfs_dir2_sf_removename(
}
/*
+ * Check whether the sf dir replace operation need more blocks.
+ */
+bool
+xfs_dir2_sf_replace_needblock(
+ struct xfs_inode *dp,
+ xfs_ino_t inum)
+{
+ int newsize;
+ struct xfs_dir2_sf_hdr *sfp;
+
+ if (dp->i_d.di_format != XFS_DINODE_FMT_LOCAL)
+ return false;
+
+ sfp = (struct xfs_dir2_sf_hdr *)dp->i_df.if_u1.if_data;
+ newsize = dp->i_df.if_bytes + (sfp->count + 1) * XFS_INO64_DIFF;
+
+ return inum > XFS_DIR2_MAX_SHORT_INUM &&
+ sfp->i8count == 0 && newsize > XFS_IFORK_DSIZE(dp);
+}
+
+/*
* Replace the inode number of an entry in a shortform directory.
*/
int /* error */
xfs_dir2_sf_replace(
xfs_da_args_t *args) /* operation arguments */
{
- xfs_inode_t *dp; /* incore directory inode */
+ struct xfs_inode *dp = args->dp;
+ struct xfs_mount *mp = dp->i_mount;
int i; /* entry index */
xfs_ino_t ino=0; /* entry old inode number */
int i8elevated; /* sf_toino8 set i8count=1 */
@@ -960,16 +1054,8 @@ xfs_dir2_sf_replace(
trace_xfs_dir2_sf_replace(args);
- dp = args->dp;
-
ASSERT(dp->i_df.if_flags & XFS_IFINLINE);
- /*
- * Bail out if the shortform directory is way too small.
- */
- if (dp->i_d.di_size < offsetof(xfs_dir2_sf_hdr_t, parent)) {
- ASSERT(XFS_FORCED_SHUTDOWN(dp->i_mount));
- return -EIO;
- }
+ ASSERT(dp->i_d.di_size >= offsetof(struct xfs_dir2_sf_hdr, parent));
ASSERT(dp->i_df.if_bytes == dp->i_d.di_size);
ASSERT(dp->i_df.if_u1.if_data != NULL);
sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data;
@@ -980,17 +1066,14 @@ xfs_dir2_sf_replace(
*/
if (args->inumber > XFS_DIR2_MAX_SHORT_INUM && sfp->i8count == 0) {
int error; /* error return value */
- int newsize; /* new inode size */
- newsize = dp->i_df.if_bytes + (sfp->count + 1) * XFS_INO64_DIFF;
/*
* Won't fit as shortform, convert to block then do replace.
*/
- if (newsize > XFS_IFORK_DSIZE(dp)) {
+ if (xfs_dir2_sf_replace_needblock(dp, args->inumber)) {
error = xfs_dir2_sf_to_block(args);
- if (error) {
+ if (error)
return error;
- }
return xfs_dir2_block_replace(args);
}
/*
@@ -1008,22 +1091,23 @@ xfs_dir2_sf_replace(
*/
if (args->namelen == 2 &&
args->name[0] == '.' && args->name[1] == '.') {
- ino = dp->d_ops->sf_get_parent_ino(sfp);
+ ino = xfs_dir2_sf_get_parent_ino(sfp);
ASSERT(args->inumber != ino);
- dp->d_ops->sf_put_parent_ino(sfp, args->inumber);
+ xfs_dir2_sf_put_parent_ino(sfp, args->inumber);
}
/*
* Normal entry, look for the name.
*/
else {
for (i = 0, sfep = xfs_dir2_sf_firstentry(sfp); i < sfp->count;
- i++, sfep = dp->d_ops->sf_nextentry(sfp, sfep)) {
+ i++, sfep = xfs_dir2_sf_nextentry(mp, sfp, sfep)) {
if (xfs_da_compname(args, sfep->name, sfep->namelen) ==
XFS_CMP_EXACT) {
- ino = dp->d_ops->sf_get_ino(sfp, sfep);
+ ino = xfs_dir2_sf_get_ino(mp, sfp, sfep);
ASSERT(args->inumber != ino);
- dp->d_ops->sf_put_ino(sfp, sfep, args->inumber);
- dp->d_ops->sf_put_ftype(sfep, args->filetype);
+ xfs_dir2_sf_put_ino(mp, sfp, sfep,
+ args->inumber);
+ xfs_dir2_sf_put_ftype(mp, sfep, args->filetype);
break;
}
}
@@ -1076,8 +1160,9 @@ static void
xfs_dir2_sf_toino4(
xfs_da_args_t *args) /* operation arguments */
{
+ struct xfs_inode *dp = args->dp;
+ struct xfs_mount *mp = dp->i_mount;
char *buf; /* old dir's buffer */
- xfs_inode_t *dp; /* incore directory inode */
int i; /* entry index */
int newsize; /* new inode size */
xfs_dir2_sf_entry_t *oldsfep; /* old sf entry */
@@ -1088,8 +1173,6 @@ xfs_dir2_sf_toino4(
trace_xfs_dir2_sf_toino4(args);
- dp = args->dp;
-
/*
* Copy the old directory to the buffer.
* Then nuke it from the inode, and add the new buffer to the inode.
@@ -1116,21 +1199,22 @@ xfs_dir2_sf_toino4(
*/
sfp->count = oldsfp->count;
sfp->i8count = 0;
- dp->d_ops->sf_put_parent_ino(sfp, dp->d_ops->sf_get_parent_ino(oldsfp));
+ xfs_dir2_sf_put_parent_ino(sfp, xfs_dir2_sf_get_parent_ino(oldsfp));
/*
* Copy the entries field by field.
*/
for (i = 0, sfep = xfs_dir2_sf_firstentry(sfp),
oldsfep = xfs_dir2_sf_firstentry(oldsfp);
i < sfp->count;
- i++, sfep = dp->d_ops->sf_nextentry(sfp, sfep),
- oldsfep = dp->d_ops->sf_nextentry(oldsfp, oldsfep)) {
+ i++, sfep = xfs_dir2_sf_nextentry(mp, sfp, sfep),
+ oldsfep = xfs_dir2_sf_nextentry(mp, oldsfp, oldsfep)) {
sfep->namelen = oldsfep->namelen;
memcpy(sfep->offset, oldsfep->offset, sizeof(sfep->offset));
memcpy(sfep->name, oldsfep->name, sfep->namelen);
- dp->d_ops->sf_put_ino(sfp, sfep,
- dp->d_ops->sf_get_ino(oldsfp, oldsfep));
- dp->d_ops->sf_put_ftype(sfep, dp->d_ops->sf_get_ftype(oldsfep));
+ xfs_dir2_sf_put_ino(mp, sfp, sfep,
+ xfs_dir2_sf_get_ino(mp, oldsfp, oldsfep));
+ xfs_dir2_sf_put_ftype(mp, sfep,
+ xfs_dir2_sf_get_ftype(mp, oldsfep));
}
/*
* Clean up the inode.
@@ -1149,8 +1233,9 @@ static void
xfs_dir2_sf_toino8(
xfs_da_args_t *args) /* operation arguments */
{
+ struct xfs_inode *dp = args->dp;
+ struct xfs_mount *mp = dp->i_mount;
char *buf; /* old dir's buffer */
- xfs_inode_t *dp; /* incore directory inode */
int i; /* entry index */
int newsize; /* new inode size */
xfs_dir2_sf_entry_t *oldsfep; /* old sf entry */
@@ -1161,8 +1246,6 @@ xfs_dir2_sf_toino8(
trace_xfs_dir2_sf_toino8(args);
- dp = args->dp;
-
/*
* Copy the old directory to the buffer.
* Then nuke it from the inode, and add the new buffer to the inode.
@@ -1189,21 +1272,22 @@ xfs_dir2_sf_toino8(
*/
sfp->count = oldsfp->count;
sfp->i8count = 1;
- dp->d_ops->sf_put_parent_ino(sfp, dp->d_ops->sf_get_parent_ino(oldsfp));
+ xfs_dir2_sf_put_parent_ino(sfp, xfs_dir2_sf_get_parent_ino(oldsfp));
/*
* Copy the entries field by field.
*/
for (i = 0, sfep = xfs_dir2_sf_firstentry(sfp),
oldsfep = xfs_dir2_sf_firstentry(oldsfp);
i < sfp->count;
- i++, sfep = dp->d_ops->sf_nextentry(sfp, sfep),
- oldsfep = dp->d_ops->sf_nextentry(oldsfp, oldsfep)) {
+ i++, sfep = xfs_dir2_sf_nextentry(mp, sfp, sfep),
+ oldsfep = xfs_dir2_sf_nextentry(mp, oldsfp, oldsfep)) {
sfep->namelen = oldsfep->namelen;
memcpy(sfep->offset, oldsfep->offset, sizeof(sfep->offset));
memcpy(sfep->name, oldsfep->name, sfep->namelen);
- dp->d_ops->sf_put_ino(sfp, sfep,
- dp->d_ops->sf_get_ino(oldsfp, oldsfep));
- dp->d_ops->sf_put_ftype(sfep, dp->d_ops->sf_get_ftype(oldsfep));
+ xfs_dir2_sf_put_ino(mp, sfp, sfep,
+ xfs_dir2_sf_get_ino(mp, oldsfp, oldsfep));
+ xfs_dir2_sf_put_ftype(mp, sfep,
+ xfs_dir2_sf_get_ftype(mp, oldsfep));
}
/*
* Clean up the inode.
diff --git a/fs/xfs/libxfs/xfs_dquot_buf.c b/fs/xfs/libxfs/xfs_dquot_buf.c
index e8bd688a4073..bedc1e752b60 100644
--- a/fs/xfs/libxfs/xfs_dquot_buf.c
+++ b/fs/xfs/libxfs/xfs_dquot_buf.c
@@ -35,10 +35,10 @@ xfs_calc_dquots_per_chunk(
xfs_failaddr_t
xfs_dquot_verify(
- struct xfs_mount *mp,
- xfs_disk_dquot_t *ddq,
- xfs_dqid_t id,
- uint type) /* used only during quotacheck */
+ struct xfs_mount *mp,
+ struct xfs_disk_dquot *ddq,
+ xfs_dqid_t id,
+ uint type) /* used only during quotacheck */
{
/*
* We can encounter an uninitialized dquot buffer for 2 reasons:
diff --git a/fs/xfs/libxfs/xfs_format.h b/fs/xfs/libxfs/xfs_format.h
index c968b60cee15..1b7dcbae051c 100644
--- a/fs/xfs/libxfs/xfs_format.h
+++ b/fs/xfs/libxfs/xfs_format.h
@@ -920,13 +920,13 @@ static inline uint xfs_dinode_size(int version)
* This enum is used in string mapping in xfs_trace.h; please keep the
* TRACE_DEFINE_ENUMs for it up to date.
*/
-typedef enum xfs_dinode_fmt {
+enum xfs_dinode_fmt {
XFS_DINODE_FMT_DEV, /* xfs_dev_t */
XFS_DINODE_FMT_LOCAL, /* bulk data */
XFS_DINODE_FMT_EXTENTS, /* struct xfs_bmbt_rec */
XFS_DINODE_FMT_BTREE, /* struct xfs_bmdr_block */
XFS_DINODE_FMT_UUID /* added long ago, but never used */
-} xfs_dinode_fmt_t;
+};
#define XFS_INODE_FORMAT_STR \
{ XFS_DINODE_FMT_DEV, "dev" }, \
@@ -1144,11 +1144,11 @@ static inline void xfs_dinode_put_rdev(struct xfs_dinode *dip, xfs_dev_t rdev)
/*
* This is the main portion of the on-disk representation of quota
- * information for a user. This is the q_core of the xfs_dquot_t that
+ * information for a user. This is the q_core of the struct xfs_dquot that
* is kept in kernel memory. We pad this with some more expansion room
* to construct the on disk structure.
*/
-typedef struct xfs_disk_dquot {
+struct xfs_disk_dquot {
__be16 d_magic; /* dquot magic = XFS_DQUOT_MAGIC */
__u8 d_version; /* dquot version */
__u8 d_flags; /* XFS_DQ_USER/PROJ/GROUP */
@@ -1171,15 +1171,15 @@ typedef struct xfs_disk_dquot {
__be32 d_rtbtimer; /* similar to above; for RT disk blocks */
__be16 d_rtbwarns; /* warnings issued wrt RT disk blocks */
__be16 d_pad;
-} xfs_disk_dquot_t;
+};
/*
* This is what goes on disk. This is separated from the xfs_disk_dquot because
* carrying the unnecessary padding would be a waste of memory.
*/
typedef struct xfs_dqblk {
- xfs_disk_dquot_t dd_diskdq; /* portion that lives incore as well */
- char dd_fill[4]; /* filling for posterity */
+ struct xfs_disk_dquot dd_diskdq; /* portion living incore as well */
+ char dd_fill[4];/* filling for posterity */
/*
* These two are only present on filesystems with the CRC bits set.
diff --git a/fs/xfs/libxfs/xfs_fs.h b/fs/xfs/libxfs/xfs_fs.h
index e9371a8e0e26..ef95ca07d084 100644
--- a/fs/xfs/libxfs/xfs_fs.h
+++ b/fs/xfs/libxfs/xfs_fs.h
@@ -324,7 +324,7 @@ typedef struct xfs_growfs_rt {
* Structures returned from ioctl XFS_IOC_FSBULKSTAT & XFS_IOC_FSBULKSTAT_SINGLE
*/
typedef struct xfs_bstime {
- time_t tv_sec; /* seconds */
+ __kernel_long_t tv_sec; /* seconds */
__s32 tv_nsec; /* and nanoseconds */
} xfs_bstime_t;
@@ -416,7 +416,7 @@ struct xfs_bulkstat {
/*
* Project quota id helpers (previously projid was 16bit only
- * and using two 16bit values to hold new 32bit projid was choosen
+ * and using two 16bit values to hold new 32bit projid was chosen
* to retain compatibility with "old" filesystems).
*/
static inline uint32_t
diff --git a/fs/xfs/libxfs/xfs_ialloc.c b/fs/xfs/libxfs/xfs_ialloc.c
index 588d44613094..988cde7744e6 100644
--- a/fs/xfs/libxfs/xfs_ialloc.c
+++ b/fs/xfs/libxfs/xfs_ialloc.c
@@ -544,7 +544,10 @@ xfs_inobt_insert_sprec(
nrec->ir_free, &i);
if (error)
goto error;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto error;
+ }
goto out;
}
@@ -557,17 +560,23 @@ xfs_inobt_insert_sprec(
error = xfs_inobt_get_rec(cur, &rec, &i);
if (error)
goto error;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error);
- XFS_WANT_CORRUPTED_GOTO(mp,
- rec.ir_startino == nrec->ir_startino,
- error);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto error;
+ }
+ if (XFS_IS_CORRUPT(mp, rec.ir_startino != nrec->ir_startino)) {
+ error = -EFSCORRUPTED;
+ goto error;
+ }
/*
* This should never fail. If we have coexisting records that
* cannot merge, something is seriously wrong.
*/
- XFS_WANT_CORRUPTED_GOTO(mp, __xfs_inobt_can_merge(nrec, &rec),
- error);
+ if (XFS_IS_CORRUPT(mp, !__xfs_inobt_can_merge(nrec, &rec))) {
+ error = -EFSCORRUPTED;
+ goto error;
+ }
trace_xfs_irec_merge_pre(mp, agno, rec.ir_startino,
rec.ir_holemask, nrec->ir_startino,
@@ -1057,7 +1066,8 @@ xfs_ialloc_next_rec(
error = xfs_inobt_get_rec(cur, rec, &i);
if (error)
return error;
- XFS_WANT_CORRUPTED_RETURN(cur->bc_mp, i == 1);
+ if (XFS_IS_CORRUPT(cur->bc_mp, i != 1))
+ return -EFSCORRUPTED;
}
return 0;
@@ -1081,7 +1091,8 @@ xfs_ialloc_get_rec(
error = xfs_inobt_get_rec(cur, rec, &i);
if (error)
return error;
- XFS_WANT_CORRUPTED_RETURN(cur->bc_mp, i == 1);
+ if (XFS_IS_CORRUPT(cur->bc_mp, i != 1))
+ return -EFSCORRUPTED;
}
return 0;
@@ -1161,12 +1172,18 @@ xfs_dialloc_ag_inobt(
error = xfs_inobt_lookup(cur, pagino, XFS_LOOKUP_LE, &i);
if (error)
goto error0;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto error0;
+ }
error = xfs_inobt_get_rec(cur, &rec, &j);
if (error)
goto error0;
- XFS_WANT_CORRUPTED_GOTO(mp, j == 1, error0);
+ if (XFS_IS_CORRUPT(mp, j != 1)) {
+ error = -EFSCORRUPTED;
+ goto error0;
+ }
if (rec.ir_freecount > 0) {
/*
@@ -1321,19 +1338,28 @@ xfs_dialloc_ag_inobt(
error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &i);
if (error)
goto error0;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto error0;
+ }
for (;;) {
error = xfs_inobt_get_rec(cur, &rec, &i);
if (error)
goto error0;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto error0;
+ }
if (rec.ir_freecount > 0)
break;
error = xfs_btree_increment(cur, 0, &i);
if (error)
goto error0;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto error0;
+ }
}
alloc_inode:
@@ -1393,7 +1419,8 @@ xfs_dialloc_ag_finobt_near(
error = xfs_inobt_get_rec(lcur, rec, &i);
if (error)
return error;
- XFS_WANT_CORRUPTED_RETURN(lcur->bc_mp, i == 1);
+ if (XFS_IS_CORRUPT(lcur->bc_mp, i != 1))
+ return -EFSCORRUPTED;
/*
* See if we've landed in the parent inode record. The finobt
@@ -1416,10 +1443,16 @@ xfs_dialloc_ag_finobt_near(
error = xfs_inobt_get_rec(rcur, &rrec, &j);
if (error)
goto error_rcur;
- XFS_WANT_CORRUPTED_GOTO(lcur->bc_mp, j == 1, error_rcur);
+ if (XFS_IS_CORRUPT(lcur->bc_mp, j != 1)) {
+ error = -EFSCORRUPTED;
+ goto error_rcur;
+ }
}
- XFS_WANT_CORRUPTED_GOTO(lcur->bc_mp, i == 1 || j == 1, error_rcur);
+ if (XFS_IS_CORRUPT(lcur->bc_mp, i != 1 && j != 1)) {
+ error = -EFSCORRUPTED;
+ goto error_rcur;
+ }
if (i == 1 && j == 1) {
/*
* Both the left and right records are valid. Choose the closer
@@ -1472,7 +1505,8 @@ xfs_dialloc_ag_finobt_newino(
error = xfs_inobt_get_rec(cur, rec, &i);
if (error)
return error;
- XFS_WANT_CORRUPTED_RETURN(cur->bc_mp, i == 1);
+ if (XFS_IS_CORRUPT(cur->bc_mp, i != 1))
+ return -EFSCORRUPTED;
return 0;
}
}
@@ -1483,12 +1517,14 @@ xfs_dialloc_ag_finobt_newino(
error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &i);
if (error)
return error;
- XFS_WANT_CORRUPTED_RETURN(cur->bc_mp, i == 1);
+ if (XFS_IS_CORRUPT(cur->bc_mp, i != 1))
+ return -EFSCORRUPTED;
error = xfs_inobt_get_rec(cur, rec, &i);
if (error)
return error;
- XFS_WANT_CORRUPTED_RETURN(cur->bc_mp, i == 1);
+ if (XFS_IS_CORRUPT(cur->bc_mp, i != 1))
+ return -EFSCORRUPTED;
return 0;
}
@@ -1510,20 +1546,24 @@ xfs_dialloc_ag_update_inobt(
error = xfs_inobt_lookup(cur, frec->ir_startino, XFS_LOOKUP_EQ, &i);
if (error)
return error;
- XFS_WANT_CORRUPTED_RETURN(cur->bc_mp, i == 1);
+ if (XFS_IS_CORRUPT(cur->bc_mp, i != 1))
+ return -EFSCORRUPTED;
error = xfs_inobt_get_rec(cur, &rec, &i);
if (error)
return error;
- XFS_WANT_CORRUPTED_RETURN(cur->bc_mp, i == 1);
+ if (XFS_IS_CORRUPT(cur->bc_mp, i != 1))
+ return -EFSCORRUPTED;
ASSERT((XFS_AGINO_TO_OFFSET(cur->bc_mp, rec.ir_startino) %
XFS_INODES_PER_CHUNK) == 0);
rec.ir_free &= ~XFS_INOBT_MASK(offset);
rec.ir_freecount--;
- XFS_WANT_CORRUPTED_RETURN(cur->bc_mp, (rec.ir_free == frec->ir_free) &&
- (rec.ir_freecount == frec->ir_freecount));
+ if (XFS_IS_CORRUPT(cur->bc_mp,
+ rec.ir_free != frec->ir_free ||
+ rec.ir_freecount != frec->ir_freecount))
+ return -EFSCORRUPTED;
return xfs_inobt_update(cur, &rec);
}
@@ -1933,14 +1973,20 @@ xfs_difree_inobt(
__func__, error);
goto error0;
}
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto error0;
+ }
error = xfs_inobt_get_rec(cur, &rec, &i);
if (error) {
xfs_warn(mp, "%s: xfs_inobt_get_rec() returned error %d.",
__func__, error);
goto error0;
}
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto error0;
+ }
/*
* Get the offset in the inode chunk.
*/
@@ -2052,7 +2098,10 @@ xfs_difree_finobt(
* freed an inode in a previously fully allocated chunk. If not,
* something is out of sync.
*/
- XFS_WANT_CORRUPTED_GOTO(mp, ibtrec->ir_freecount == 1, error);
+ if (XFS_IS_CORRUPT(mp, ibtrec->ir_freecount != 1)) {
+ error = -EFSCORRUPTED;
+ goto error;
+ }
error = xfs_inobt_insert_rec(cur, ibtrec->ir_holemask,
ibtrec->ir_count,
@@ -2075,14 +2124,20 @@ xfs_difree_finobt(
error = xfs_inobt_get_rec(cur, &rec, &i);
if (error)
goto error;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto error;
+ }
rec.ir_free |= XFS_INOBT_MASK(offset);
rec.ir_freecount++;
- XFS_WANT_CORRUPTED_GOTO(mp, (rec.ir_free == ibtrec->ir_free) &&
- (rec.ir_freecount == ibtrec->ir_freecount),
- error);
+ if (XFS_IS_CORRUPT(mp,
+ rec.ir_free != ibtrec->ir_free ||
+ rec.ir_freecount != ibtrec->ir_freecount)) {
+ error = -EFSCORRUPTED;
+ goto error;
+ }
/*
* The content of inobt records should always match between the inobt
diff --git a/fs/xfs/libxfs/xfs_iext_tree.c b/fs/xfs/libxfs/xfs_iext_tree.c
index 7bc87408f1a0..52451809c478 100644
--- a/fs/xfs/libxfs/xfs_iext_tree.c
+++ b/fs/xfs/libxfs/xfs_iext_tree.c
@@ -596,7 +596,7 @@ xfs_iext_realloc_root(
struct xfs_ifork *ifp,
struct xfs_iext_cursor *cur)
{
- size_t new_size = ifp->if_bytes + sizeof(struct xfs_iext_rec);
+ int64_t new_size = ifp->if_bytes + sizeof(struct xfs_iext_rec);
void *new;
/* account for the prev/next pointers */
diff --git a/fs/xfs/libxfs/xfs_inode_buf.c b/fs/xfs/libxfs/xfs_inode_buf.c
index 28ab3c5255e1..8afacfe4be0a 100644
--- a/fs/xfs/libxfs/xfs_inode_buf.c
+++ b/fs/xfs/libxfs/xfs_inode_buf.c
@@ -213,13 +213,12 @@ xfs_inode_from_disk(
to->di_version = from->di_version;
if (to->di_version == 1) {
set_nlink(inode, be16_to_cpu(from->di_onlink));
- to->di_projid_lo = 0;
- to->di_projid_hi = 0;
+ to->di_projid = 0;
to->di_version = 2;
} else {
set_nlink(inode, be32_to_cpu(from->di_nlink));
- to->di_projid_lo = be16_to_cpu(from->di_projid_lo);
- to->di_projid_hi = be16_to_cpu(from->di_projid_hi);
+ to->di_projid = (prid_t)be16_to_cpu(from->di_projid_hi) << 16 |
+ be16_to_cpu(from->di_projid_lo);
}
to->di_format = from->di_format;
@@ -256,8 +255,8 @@ xfs_inode_from_disk(
if (to->di_version == 3) {
inode_set_iversion_queried(inode,
be64_to_cpu(from->di_changecount));
- to->di_crtime.t_sec = be32_to_cpu(from->di_crtime.t_sec);
- to->di_crtime.t_nsec = be32_to_cpu(from->di_crtime.t_nsec);
+ to->di_crtime.tv_sec = be32_to_cpu(from->di_crtime.t_sec);
+ to->di_crtime.tv_nsec = be32_to_cpu(from->di_crtime.t_nsec);
to->di_flags2 = be64_to_cpu(from->di_flags2);
to->di_cowextsize = be32_to_cpu(from->di_cowextsize);
}
@@ -279,8 +278,8 @@ xfs_inode_to_disk(
to->di_format = from->di_format;
to->di_uid = cpu_to_be32(from->di_uid);
to->di_gid = cpu_to_be32(from->di_gid);
- to->di_projid_lo = cpu_to_be16(from->di_projid_lo);
- to->di_projid_hi = cpu_to_be16(from->di_projid_hi);
+ to->di_projid_lo = cpu_to_be16(from->di_projid & 0xffff);
+ to->di_projid_hi = cpu_to_be16(from->di_projid >> 16);
memset(to->di_pad, 0, sizeof(to->di_pad));
to->di_atime.t_sec = cpu_to_be32(inode->i_atime.tv_sec);
@@ -306,8 +305,8 @@ xfs_inode_to_disk(
if (from->di_version == 3) {
to->di_changecount = cpu_to_be64(inode_peek_iversion(inode));
- to->di_crtime.t_sec = cpu_to_be32(from->di_crtime.t_sec);
- to->di_crtime.t_nsec = cpu_to_be32(from->di_crtime.t_nsec);
+ to->di_crtime.t_sec = cpu_to_be32(from->di_crtime.tv_sec);
+ to->di_crtime.t_nsec = cpu_to_be32(from->di_crtime.tv_nsec);
to->di_flags2 = cpu_to_be64(from->di_flags2);
to->di_cowextsize = cpu_to_be32(from->di_cowextsize);
to->di_ino = cpu_to_be64(ip->i_ino);
@@ -632,8 +631,6 @@ xfs_iread(
if ((iget_flags & XFS_IGET_CREATE) &&
xfs_sb_version_hascrc(&mp->m_sb) &&
!(mp->m_flags & XFS_MOUNT_IKEEP)) {
- /* initialise the on-disk inode core */
- memset(&ip->i_d, 0, sizeof(ip->i_d));
VFS_I(ip)->i_generation = prandom_u32();
ip->i_d.di_version = 3;
return 0;
diff --git a/fs/xfs/libxfs/xfs_inode_buf.h b/fs/xfs/libxfs/xfs_inode_buf.h
index ab0f84165317..fd94b1078722 100644
--- a/fs/xfs/libxfs/xfs_inode_buf.h
+++ b/fs/xfs/libxfs/xfs_inode_buf.h
@@ -21,8 +21,7 @@ struct xfs_icdinode {
uint16_t di_flushiter; /* incremented on flush */
uint32_t di_uid; /* owner's user id */
uint32_t di_gid; /* owner's group id */
- uint16_t di_projid_lo; /* lower part of owner's project id */
- uint16_t di_projid_hi; /* higher part of owner's project id */
+ uint32_t di_projid; /* owner's project id */
xfs_fsize_t di_size; /* number of bytes in file */
xfs_rfsblock_t di_nblocks; /* # of direct & btree blocks used */
xfs_extlen_t di_extsize; /* basic/minimum extent size for file */
@@ -37,7 +36,7 @@ struct xfs_icdinode {
uint64_t di_flags2; /* more random flags */
uint32_t di_cowextsize; /* basic cow extent size for file */
- xfs_ictimestamp_t di_crtime; /* time created */
+ struct timespec64 di_crtime; /* time created */
};
/*
diff --git a/fs/xfs/libxfs/xfs_inode_fork.c b/fs/xfs/libxfs/xfs_inode_fork.c
index c643beeb5a24..ad2b9c313fd2 100644
--- a/fs/xfs/libxfs/xfs_inode_fork.c
+++ b/fs/xfs/libxfs/xfs_inode_fork.c
@@ -75,11 +75,15 @@ xfs_iformat_fork(
error = xfs_iformat_btree(ip, dip, XFS_DATA_FORK);
break;
default:
+ xfs_inode_verifier_error(ip, -EFSCORRUPTED, __func__,
+ dip, sizeof(*dip), __this_address);
return -EFSCORRUPTED;
}
break;
default:
+ xfs_inode_verifier_error(ip, -EFSCORRUPTED, __func__, dip,
+ sizeof(*dip), __this_address);
return -EFSCORRUPTED;
}
if (error)
@@ -110,14 +114,16 @@ xfs_iformat_fork(
error = xfs_iformat_btree(ip, dip, XFS_ATTR_FORK);
break;
default:
+ xfs_inode_verifier_error(ip, error, __func__, dip,
+ sizeof(*dip), __this_address);
error = -EFSCORRUPTED;
break;
}
if (error) {
- kmem_zone_free(xfs_ifork_zone, ip->i_afp);
+ kmem_cache_free(xfs_ifork_zone, ip->i_afp);
ip->i_afp = NULL;
if (ip->i_cowfp)
- kmem_zone_free(xfs_ifork_zone, ip->i_cowfp);
+ kmem_cache_free(xfs_ifork_zone, ip->i_cowfp);
ip->i_cowfp = NULL;
xfs_idestroy_fork(ip, XFS_DATA_FORK);
}
@@ -129,7 +135,7 @@ xfs_init_local_fork(
struct xfs_inode *ip,
int whichfork,
const void *data,
- int size)
+ int64_t size)
{
struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
int mem_size = size, real_size = 0;
@@ -467,11 +473,11 @@ xfs_iroot_realloc(
void
xfs_idata_realloc(
struct xfs_inode *ip,
- int byte_diff,
+ int64_t byte_diff,
int whichfork)
{
struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
- int new_size = (int)ifp->if_bytes + byte_diff;
+ int64_t new_size = ifp->if_bytes + byte_diff;
ASSERT(new_size >= 0);
ASSERT(new_size <= XFS_IFORK_SIZE(ip, whichfork));
@@ -525,10 +531,10 @@ xfs_idestroy_fork(
}
if (whichfork == XFS_ATTR_FORK) {
- kmem_zone_free(xfs_ifork_zone, ip->i_afp);
+ kmem_cache_free(xfs_ifork_zone, ip->i_afp);
ip->i_afp = NULL;
} else if (whichfork == XFS_COW_FORK) {
- kmem_zone_free(xfs_ifork_zone, ip->i_cowfp);
+ kmem_cache_free(xfs_ifork_zone, ip->i_cowfp);
ip->i_cowfp = NULL;
}
}
@@ -552,7 +558,7 @@ xfs_iextents_copy(
struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
struct xfs_iext_cursor icur;
struct xfs_bmbt_irec rec;
- int copied = 0;
+ int64_t copied = 0;
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL | XFS_ILOCK_SHARED));
ASSERT(ifp->if_bytes > 0);
diff --git a/fs/xfs/libxfs/xfs_inode_fork.h b/fs/xfs/libxfs/xfs_inode_fork.h
index 00c62ce170d0..500333d0101e 100644
--- a/fs/xfs/libxfs/xfs_inode_fork.h
+++ b/fs/xfs/libxfs/xfs_inode_fork.h
@@ -13,16 +13,16 @@ struct xfs_dinode;
* File incore extent information, present for each of data & attr forks.
*/
struct xfs_ifork {
- int if_bytes; /* bytes in if_u1 */
- unsigned int if_seq; /* fork mod counter */
+ int64_t if_bytes; /* bytes in if_u1 */
struct xfs_btree_block *if_broot; /* file's incore btree root */
- short if_broot_bytes; /* bytes allocated for root */
- unsigned char if_flags; /* per-fork flags */
+ unsigned int if_seq; /* fork mod counter */
int if_height; /* height of the extent tree */
union {
void *if_root; /* extent tree root */
char *if_data; /* inline file data */
} if_u1;
+ short if_broot_bytes; /* bytes allocated for root */
+ unsigned char if_flags; /* per-fork flags */
};
/*
@@ -87,18 +87,24 @@ struct xfs_ifork {
#define XFS_IFORK_MAXEXT(ip, w) \
(XFS_IFORK_SIZE(ip, w) / sizeof(xfs_bmbt_rec_t))
+#define xfs_ifork_has_extents(ip, w) \
+ (XFS_IFORK_FORMAT((ip), (w)) == XFS_DINODE_FMT_EXTENTS || \
+ XFS_IFORK_FORMAT((ip), (w)) == XFS_DINODE_FMT_BTREE)
+
struct xfs_ifork *xfs_iext_state_to_fork(struct xfs_inode *ip, int state);
int xfs_iformat_fork(struct xfs_inode *, struct xfs_dinode *);
void xfs_iflush_fork(struct xfs_inode *, struct xfs_dinode *,
struct xfs_inode_log_item *, int);
void xfs_idestroy_fork(struct xfs_inode *, int);
-void xfs_idata_realloc(struct xfs_inode *, int, int);
+void xfs_idata_realloc(struct xfs_inode *ip, int64_t byte_diff,
+ int whichfork);
void xfs_iroot_realloc(struct xfs_inode *, int, int);
int xfs_iread_extents(struct xfs_trans *, struct xfs_inode *, int);
int xfs_iextents_copy(struct xfs_inode *, struct xfs_bmbt_rec *,
int);
-void xfs_init_local_fork(struct xfs_inode *, int, const void *, int);
+void xfs_init_local_fork(struct xfs_inode *ip, int whichfork,
+ const void *data, int64_t size);
xfs_extnum_t xfs_iext_count(struct xfs_ifork *ifp);
void xfs_iext_insert(struct xfs_inode *, struct xfs_iext_cursor *cur,
diff --git a/fs/xfs/libxfs/xfs_log_format.h b/fs/xfs/libxfs/xfs_log_format.h
index e5f97c69b320..8ef31d71a9c7 100644
--- a/fs/xfs/libxfs/xfs_log_format.h
+++ b/fs/xfs/libxfs/xfs_log_format.h
@@ -432,9 +432,9 @@ static inline uint xfs_log_dinode_size(int version)
}
/*
- * Buffer Log Format defintions
+ * Buffer Log Format definitions
*
- * These are the physical dirty bitmap defintions for the log format structure.
+ * These are the physical dirty bitmap definitions for the log format structure.
*/
#define XFS_BLF_CHUNK 128
#define XFS_BLF_SHIFT 7
diff --git a/fs/xfs/libxfs/xfs_log_recover.h b/fs/xfs/libxfs/xfs_log_recover.h
index f3d18eaecebb..3bf671637a91 100644
--- a/fs/xfs/libxfs/xfs_log_recover.h
+++ b/fs/xfs/libxfs/xfs_log_recover.h
@@ -30,14 +30,14 @@ typedef struct xlog_recover_item {
xfs_log_iovec_t *ri_buf; /* ptr to regions buffer */
} xlog_recover_item_t;
-typedef struct xlog_recover {
+struct xlog_recover {
struct hlist_node r_list;
xlog_tid_t r_log_tid; /* log's transaction id */
xfs_trans_header_t r_theader; /* trans header for partial */
int r_state; /* not needed */
xfs_lsn_t r_lsn; /* xact lsn */
struct list_head r_itemq; /* q for items */
-} xlog_recover_t;
+};
#define ITEM_TYPE(i) (*(unsigned short *)(i)->ri_buf[0].i_addr)
diff --git a/fs/xfs/libxfs/xfs_refcount.c b/fs/xfs/libxfs/xfs_refcount.c
index 9a7fadb1361c..d7d702ee4d1a 100644
--- a/fs/xfs/libxfs/xfs_refcount.c
+++ b/fs/xfs/libxfs/xfs_refcount.c
@@ -200,7 +200,10 @@ xfs_refcount_insert(
error = xfs_btree_insert(cur, i);
if (error)
goto out_error;
- XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, *i == 1, out_error);
+ if (XFS_IS_CORRUPT(cur->bc_mp, *i != 1)) {
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
out_error:
if (error)
@@ -227,10 +230,16 @@ xfs_refcount_delete(
error = xfs_refcount_get_rec(cur, &irec, &found_rec);
if (error)
goto out_error;
- XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, found_rec == 1, out_error);
+ if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
trace_xfs_refcount_delete(cur->bc_mp, cur->bc_private.a.agno, &irec);
error = xfs_btree_delete(cur, i);
- XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, *i == 1, out_error);
+ if (XFS_IS_CORRUPT(cur->bc_mp, *i != 1)) {
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
if (error)
goto out_error;
error = xfs_refcount_lookup_ge(cur, irec.rc_startblock, &found_rec);
@@ -349,7 +358,10 @@ xfs_refcount_split_extent(
error = xfs_refcount_get_rec(cur, &rcext, &found_rec);
if (error)
goto out_error;
- XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, found_rec == 1, out_error);
+ if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
if (rcext.rc_startblock == agbno || xfs_refc_next(&rcext) <= agbno)
return 0;
@@ -371,7 +383,10 @@ xfs_refcount_split_extent(
error = xfs_refcount_insert(cur, &tmp, &found_rec);
if (error)
goto out_error;
- XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, found_rec == 1, out_error);
+ if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
return error;
out_error:
@@ -410,19 +425,27 @@ xfs_refcount_merge_center_extents(
&found_rec);
if (error)
goto out_error;
- XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, found_rec == 1, out_error);
+ if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
error = xfs_refcount_delete(cur, &found_rec);
if (error)
goto out_error;
- XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, found_rec == 1, out_error);
+ if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
if (center->rc_refcount > 1) {
error = xfs_refcount_delete(cur, &found_rec);
if (error)
goto out_error;
- XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, found_rec == 1,
- out_error);
+ if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
}
/* Enlarge the left extent. */
@@ -430,7 +453,10 @@ xfs_refcount_merge_center_extents(
&found_rec);
if (error)
goto out_error;
- XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, found_rec == 1, out_error);
+ if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
left->rc_blockcount = extlen;
error = xfs_refcount_update(cur, left);
@@ -469,14 +495,18 @@ xfs_refcount_merge_left_extent(
&found_rec);
if (error)
goto out_error;
- XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, found_rec == 1,
- out_error);
+ if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
error = xfs_refcount_delete(cur, &found_rec);
if (error)
goto out_error;
- XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, found_rec == 1,
- out_error);
+ if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
}
/* Enlarge the left extent. */
@@ -484,7 +514,10 @@ xfs_refcount_merge_left_extent(
&found_rec);
if (error)
goto out_error;
- XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, found_rec == 1, out_error);
+ if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
left->rc_blockcount += cleft->rc_blockcount;
error = xfs_refcount_update(cur, left);
@@ -526,14 +559,18 @@ xfs_refcount_merge_right_extent(
&found_rec);
if (error)
goto out_error;
- XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, found_rec == 1,
- out_error);
+ if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
error = xfs_refcount_delete(cur, &found_rec);
if (error)
goto out_error;
- XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, found_rec == 1,
- out_error);
+ if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
}
/* Enlarge the right extent. */
@@ -541,7 +578,10 @@ xfs_refcount_merge_right_extent(
&found_rec);
if (error)
goto out_error;
- XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, found_rec == 1, out_error);
+ if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
right->rc_startblock -= cright->rc_blockcount;
right->rc_blockcount += cright->rc_blockcount;
@@ -587,7 +627,10 @@ xfs_refcount_find_left_extents(
error = xfs_refcount_get_rec(cur, &tmp, &found_rec);
if (error)
goto out_error;
- XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, found_rec == 1, out_error);
+ if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
if (xfs_refc_next(&tmp) != agbno)
return 0;
@@ -605,8 +648,10 @@ xfs_refcount_find_left_extents(
error = xfs_refcount_get_rec(cur, &tmp, &found_rec);
if (error)
goto out_error;
- XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, found_rec == 1,
- out_error);
+ if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
/* if tmp starts at the end of our range, just use that */
if (tmp.rc_startblock == agbno)
@@ -671,7 +716,10 @@ xfs_refcount_find_right_extents(
error = xfs_refcount_get_rec(cur, &tmp, &found_rec);
if (error)
goto out_error;
- XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, found_rec == 1, out_error);
+ if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
if (tmp.rc_startblock != agbno + aglen)
return 0;
@@ -689,8 +737,10 @@ xfs_refcount_find_right_extents(
error = xfs_refcount_get_rec(cur, &tmp, &found_rec);
if (error)
goto out_error;
- XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, found_rec == 1,
- out_error);
+ if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
/* if tmp ends at the end of our range, just use that */
if (xfs_refc_next(&tmp) == agbno + aglen)
@@ -913,8 +963,11 @@ xfs_refcount_adjust_extents(
&found_tmp);
if (error)
goto out_error;
- XFS_WANT_CORRUPTED_GOTO(cur->bc_mp,
- found_tmp == 1, out_error);
+ if (XFS_IS_CORRUPT(cur->bc_mp,
+ found_tmp != 1)) {
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
cur->bc_private.a.priv.refc.nr_ops++;
} else {
fsbno = XFS_AGB_TO_FSB(cur->bc_mp,
@@ -955,8 +1008,10 @@ xfs_refcount_adjust_extents(
error = xfs_refcount_delete(cur, &found_rec);
if (error)
goto out_error;
- XFS_WANT_CORRUPTED_GOTO(cur->bc_mp,
- found_rec == 1, out_error);
+ if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
cur->bc_private.a.priv.refc.nr_ops++;
goto advloop;
} else {
@@ -1122,7 +1177,7 @@ xfs_refcount_finish_one(
XFS_ALLOC_FLAG_FREEING, &agbp);
if (error)
return error;
- if (!agbp)
+ if (XFS_IS_CORRUPT(tp->t_mountp, !agbp))
return -EFSCORRUPTED;
rcur = xfs_refcountbt_init_cursor(mp, tp, agbp, agno);
@@ -1272,7 +1327,10 @@ xfs_refcount_find_shared(
error = xfs_refcount_get_rec(cur, &tmp, &i);
if (error)
goto out_error;
- XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, out_error);
+ if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
/* If the extent ends before the start, look at the next one */
if (tmp.rc_startblock + tmp.rc_blockcount <= agbno) {
@@ -1284,7 +1342,10 @@ xfs_refcount_find_shared(
error = xfs_refcount_get_rec(cur, &tmp, &i);
if (error)
goto out_error;
- XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, out_error);
+ if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
}
/* If the extent starts after the range we want, bail out */
@@ -1312,7 +1373,10 @@ xfs_refcount_find_shared(
error = xfs_refcount_get_rec(cur, &tmp, &i);
if (error)
goto out_error;
- XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, out_error);
+ if (XFS_IS_CORRUPT(cur->bc_mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
if (tmp.rc_startblock >= agbno + aglen ||
tmp.rc_startblock != *fbno + *flen)
break;
@@ -1413,8 +1477,11 @@ xfs_refcount_adjust_cow_extents(
switch (adj) {
case XFS_REFCOUNT_ADJUST_COW_ALLOC:
/* Adding a CoW reservation, there should be nothing here. */
- XFS_WANT_CORRUPTED_GOTO(cur->bc_mp,
- ext.rc_startblock >= agbno + aglen, out_error);
+ if (XFS_IS_CORRUPT(cur->bc_mp,
+ agbno + aglen > ext.rc_startblock)) {
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
tmp.rc_startblock = agbno;
tmp.rc_blockcount = aglen;
@@ -1426,17 +1493,25 @@ xfs_refcount_adjust_cow_extents(
&found_tmp);
if (error)
goto out_error;
- XFS_WANT_CORRUPTED_GOTO(cur->bc_mp,
- found_tmp == 1, out_error);
+ if (XFS_IS_CORRUPT(cur->bc_mp, found_tmp != 1)) {
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
break;
case XFS_REFCOUNT_ADJUST_COW_FREE:
/* Removing a CoW reservation, there should be one extent. */
- XFS_WANT_CORRUPTED_GOTO(cur->bc_mp,
- ext.rc_startblock == agbno, out_error);
- XFS_WANT_CORRUPTED_GOTO(cur->bc_mp,
- ext.rc_blockcount == aglen, out_error);
- XFS_WANT_CORRUPTED_GOTO(cur->bc_mp,
- ext.rc_refcount == 1, out_error);
+ if (XFS_IS_CORRUPT(cur->bc_mp, ext.rc_startblock != agbno)) {
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
+ if (XFS_IS_CORRUPT(cur->bc_mp, ext.rc_blockcount != aglen)) {
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
+ if (XFS_IS_CORRUPT(cur->bc_mp, ext.rc_refcount != 1)) {
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
ext.rc_refcount = 0;
trace_xfs_refcount_modify_extent(cur->bc_mp,
@@ -1444,8 +1519,10 @@ xfs_refcount_adjust_cow_extents(
error = xfs_refcount_delete(cur, &found_rec);
if (error)
goto out_error;
- XFS_WANT_CORRUPTED_GOTO(cur->bc_mp,
- found_rec == 1, out_error);
+ if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) {
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
break;
default:
ASSERT(0);
@@ -1584,14 +1661,15 @@ struct xfs_refcount_recovery {
/* Stuff an extent on the recovery list. */
STATIC int
xfs_refcount_recover_extent(
- struct xfs_btree_cur *cur,
+ struct xfs_btree_cur *cur,
union xfs_btree_rec *rec,
void *priv)
{
struct list_head *debris = priv;
struct xfs_refcount_recovery *rr;
- if (be32_to_cpu(rec->refc.rc_refcount) != 1)
+ if (XFS_IS_CORRUPT(cur->bc_mp,
+ be32_to_cpu(rec->refc.rc_refcount) != 1))
return -EFSCORRUPTED;
rr = kmem_alloc(sizeof(struct xfs_refcount_recovery), 0);
diff --git a/fs/xfs/libxfs/xfs_rmap.c b/fs/xfs/libxfs/xfs_rmap.c
index 38e9414878b3..ff9412f113c4 100644
--- a/fs/xfs/libxfs/xfs_rmap.c
+++ b/fs/xfs/libxfs/xfs_rmap.c
@@ -113,7 +113,10 @@ xfs_rmap_insert(
error = xfs_rmap_lookup_eq(rcur, agbno, len, owner, offset, flags, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(rcur->bc_mp, i == 0, done);
+ if (XFS_IS_CORRUPT(rcur->bc_mp, i != 0)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
rcur->bc_rec.r.rm_startblock = agbno;
rcur->bc_rec.r.rm_blockcount = len;
@@ -123,7 +126,10 @@ xfs_rmap_insert(
error = xfs_btree_insert(rcur, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(rcur->bc_mp, i == 1, done);
+ if (XFS_IS_CORRUPT(rcur->bc_mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
done:
if (error)
trace_xfs_rmap_insert_error(rcur->bc_mp,
@@ -149,12 +155,18 @@ xfs_rmap_delete(
error = xfs_rmap_lookup_eq(rcur, agbno, len, owner, offset, flags, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(rcur->bc_mp, i == 1, done);
+ if (XFS_IS_CORRUPT(rcur->bc_mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
error = xfs_btree_delete(rcur, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(rcur->bc_mp, i == 1, done);
+ if (XFS_IS_CORRUPT(rcur->bc_mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
done:
if (error)
trace_xfs_rmap_delete_error(rcur->bc_mp,
@@ -406,24 +418,39 @@ xfs_rmap_free_check_owner(
return 0;
/* Make sure the unwritten flag matches. */
- XFS_WANT_CORRUPTED_GOTO(mp, (flags & XFS_RMAP_UNWRITTEN) ==
- (rec->rm_flags & XFS_RMAP_UNWRITTEN), out);
+ if (XFS_IS_CORRUPT(mp,
+ (flags & XFS_RMAP_UNWRITTEN) !=
+ (rec->rm_flags & XFS_RMAP_UNWRITTEN))) {
+ error = -EFSCORRUPTED;
+ goto out;
+ }
/* Make sure the owner matches what we expect to find in the tree. */
- XFS_WANT_CORRUPTED_GOTO(mp, owner == rec->rm_owner, out);
+ if (XFS_IS_CORRUPT(mp, owner != rec->rm_owner)) {
+ error = -EFSCORRUPTED;
+ goto out;
+ }
/* Check the offset, if necessary. */
if (XFS_RMAP_NON_INODE_OWNER(owner))
goto out;
if (flags & XFS_RMAP_BMBT_BLOCK) {
- XFS_WANT_CORRUPTED_GOTO(mp, rec->rm_flags & XFS_RMAP_BMBT_BLOCK,
- out);
+ if (XFS_IS_CORRUPT(mp,
+ !(rec->rm_flags & XFS_RMAP_BMBT_BLOCK))) {
+ error = -EFSCORRUPTED;
+ goto out;
+ }
} else {
- XFS_WANT_CORRUPTED_GOTO(mp, rec->rm_offset <= offset, out);
- XFS_WANT_CORRUPTED_GOTO(mp,
- ltoff + rec->rm_blockcount >= offset + len,
- out);
+ if (XFS_IS_CORRUPT(mp, rec->rm_offset > offset)) {
+ error = -EFSCORRUPTED;
+ goto out;
+ }
+ if (XFS_IS_CORRUPT(mp,
+ offset + len > ltoff + rec->rm_blockcount)) {
+ error = -EFSCORRUPTED;
+ goto out;
+ }
}
out:
@@ -482,12 +509,18 @@ xfs_rmap_unmap(
error = xfs_rmap_lookup_le(cur, bno, len, owner, offset, flags, &i);
if (error)
goto out_error;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, out_error);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
error = xfs_rmap_get_rec(cur, &ltrec, &i);
if (error)
goto out_error;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, out_error);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
trace_xfs_rmap_lookup_le_range_result(cur->bc_mp,
cur->bc_private.a.agno, ltrec.rm_startblock,
ltrec.rm_blockcount, ltrec.rm_owner,
@@ -502,8 +535,12 @@ xfs_rmap_unmap(
* be the case that the "left" extent goes all the way to EOFS.
*/
if (owner == XFS_RMAP_OWN_NULL) {
- XFS_WANT_CORRUPTED_GOTO(mp, bno >= ltrec.rm_startblock +
- ltrec.rm_blockcount, out_error);
+ if (XFS_IS_CORRUPT(mp,
+ bno <
+ ltrec.rm_startblock + ltrec.rm_blockcount)) {
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
goto out_done;
}
@@ -526,15 +563,22 @@ xfs_rmap_unmap(
error = xfs_rmap_get_rec(cur, &rtrec, &i);
if (error)
goto out_error;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, out_error);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
if (rtrec.rm_startblock >= bno + len)
goto out_done;
}
/* Make sure the extent we found covers the entire freeing range. */
- XFS_WANT_CORRUPTED_GOTO(mp, ltrec.rm_startblock <= bno &&
- ltrec.rm_startblock + ltrec.rm_blockcount >=
- bno + len, out_error);
+ if (XFS_IS_CORRUPT(mp,
+ ltrec.rm_startblock > bno ||
+ ltrec.rm_startblock + ltrec.rm_blockcount <
+ bno + len)) {
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
/* Check owner information. */
error = xfs_rmap_free_check_owner(mp, ltoff, &ltrec, len, owner,
@@ -551,7 +595,10 @@ xfs_rmap_unmap(
error = xfs_btree_delete(cur, &i);
if (error)
goto out_error;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, out_error);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
} else if (ltrec.rm_startblock == bno) {
/*
* overlap left hand side of extent: move the start, trim the
@@ -743,7 +790,10 @@ xfs_rmap_map(
error = xfs_rmap_get_rec(cur, &ltrec, &have_lt);
if (error)
goto out_error;
- XFS_WANT_CORRUPTED_GOTO(mp, have_lt == 1, out_error);
+ if (XFS_IS_CORRUPT(mp, have_lt != 1)) {
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
trace_xfs_rmap_lookup_le_range_result(cur->bc_mp,
cur->bc_private.a.agno, ltrec.rm_startblock,
ltrec.rm_blockcount, ltrec.rm_owner,
@@ -753,9 +803,12 @@ xfs_rmap_map(
have_lt = 0;
}
- XFS_WANT_CORRUPTED_GOTO(mp,
- have_lt == 0 ||
- ltrec.rm_startblock + ltrec.rm_blockcount <= bno, out_error);
+ if (XFS_IS_CORRUPT(mp,
+ have_lt != 0 &&
+ ltrec.rm_startblock + ltrec.rm_blockcount > bno)) {
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
/*
* Increment the cursor to see if we have a right-adjacent record to our
@@ -769,9 +822,14 @@ xfs_rmap_map(
error = xfs_rmap_get_rec(cur, &gtrec, &have_gt);
if (error)
goto out_error;
- XFS_WANT_CORRUPTED_GOTO(mp, have_gt == 1, out_error);
- XFS_WANT_CORRUPTED_GOTO(mp, bno + len <= gtrec.rm_startblock,
- out_error);
+ if (XFS_IS_CORRUPT(mp, have_gt != 1)) {
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
+ if (XFS_IS_CORRUPT(mp, bno + len > gtrec.rm_startblock)) {
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
trace_xfs_rmap_find_right_neighbor_result(cur->bc_mp,
cur->bc_private.a.agno, gtrec.rm_startblock,
gtrec.rm_blockcount, gtrec.rm_owner,
@@ -821,7 +879,10 @@ xfs_rmap_map(
error = xfs_btree_delete(cur, &i);
if (error)
goto out_error;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, out_error);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
}
/* point the cursor back to the left record and update */
@@ -865,7 +926,10 @@ xfs_rmap_map(
error = xfs_btree_insert(cur, &i);
if (error)
goto out_error;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, out_error);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
}
trace_xfs_rmap_map_done(mp, cur->bc_private.a.agno, bno, len,
@@ -957,12 +1021,18 @@ xfs_rmap_convert(
error = xfs_rmap_lookup_le(cur, bno, len, owner, offset, oldext, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
error = xfs_rmap_get_rec(cur, &PREV, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
trace_xfs_rmap_lookup_le_range_result(cur->bc_mp,
cur->bc_private.a.agno, PREV.rm_startblock,
PREV.rm_blockcount, PREV.rm_owner,
@@ -995,10 +1065,16 @@ xfs_rmap_convert(
error = xfs_rmap_get_rec(cur, &LEFT, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
- XFS_WANT_CORRUPTED_GOTO(mp,
- LEFT.rm_startblock + LEFT.rm_blockcount <= bno,
- done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
+ if (XFS_IS_CORRUPT(mp,
+ LEFT.rm_startblock + LEFT.rm_blockcount >
+ bno)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
trace_xfs_rmap_find_left_neighbor_result(cur->bc_mp,
cur->bc_private.a.agno, LEFT.rm_startblock,
LEFT.rm_blockcount, LEFT.rm_owner,
@@ -1017,7 +1093,10 @@ xfs_rmap_convert(
error = xfs_btree_increment(cur, 0, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
error = xfs_btree_increment(cur, 0, &i);
if (error)
goto done;
@@ -1026,9 +1105,14 @@ xfs_rmap_convert(
error = xfs_rmap_get_rec(cur, &RIGHT, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
- XFS_WANT_CORRUPTED_GOTO(mp, bno + len <= RIGHT.rm_startblock,
- done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
+ if (XFS_IS_CORRUPT(mp, bno + len > RIGHT.rm_startblock)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
trace_xfs_rmap_find_right_neighbor_result(cur->bc_mp,
cur->bc_private.a.agno, RIGHT.rm_startblock,
RIGHT.rm_blockcount, RIGHT.rm_owner,
@@ -1055,7 +1139,10 @@ xfs_rmap_convert(
error = xfs_rmap_lookup_le(cur, bno, len, owner, offset, oldext, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
/*
* Switch out based on the FILLING and CONTIG state bits.
@@ -1071,7 +1158,10 @@ xfs_rmap_convert(
error = xfs_btree_increment(cur, 0, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
trace_xfs_rmap_delete(mp, cur->bc_private.a.agno,
RIGHT.rm_startblock, RIGHT.rm_blockcount,
RIGHT.rm_owner, RIGHT.rm_offset,
@@ -1079,11 +1169,17 @@ xfs_rmap_convert(
error = xfs_btree_delete(cur, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
error = xfs_btree_decrement(cur, 0, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
trace_xfs_rmap_delete(mp, cur->bc_private.a.agno,
PREV.rm_startblock, PREV.rm_blockcount,
PREV.rm_owner, PREV.rm_offset,
@@ -1091,11 +1187,17 @@ xfs_rmap_convert(
error = xfs_btree_delete(cur, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
error = xfs_btree_decrement(cur, 0, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
NEW = LEFT;
NEW.rm_blockcount += PREV.rm_blockcount + RIGHT.rm_blockcount;
error = xfs_rmap_update(cur, &NEW);
@@ -1115,11 +1217,17 @@ xfs_rmap_convert(
error = xfs_btree_delete(cur, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
error = xfs_btree_decrement(cur, 0, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
NEW = LEFT;
NEW.rm_blockcount += PREV.rm_blockcount;
error = xfs_rmap_update(cur, &NEW);
@@ -1135,7 +1243,10 @@ xfs_rmap_convert(
error = xfs_btree_increment(cur, 0, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
trace_xfs_rmap_delete(mp, cur->bc_private.a.agno,
RIGHT.rm_startblock, RIGHT.rm_blockcount,
RIGHT.rm_owner, RIGHT.rm_offset,
@@ -1143,11 +1254,17 @@ xfs_rmap_convert(
error = xfs_btree_delete(cur, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
error = xfs_btree_decrement(cur, 0, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
NEW = PREV;
NEW.rm_blockcount = len + RIGHT.rm_blockcount;
NEW.rm_flags = newext;
@@ -1214,7 +1331,10 @@ xfs_rmap_convert(
error = xfs_btree_insert(cur, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
break;
case RMAP_RIGHT_FILLING | RMAP_RIGHT_CONTIG:
@@ -1253,7 +1373,10 @@ xfs_rmap_convert(
oldext, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
+ if (XFS_IS_CORRUPT(mp, i != 0)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
NEW.rm_startblock = bno;
NEW.rm_owner = owner;
NEW.rm_offset = offset;
@@ -1265,7 +1388,10 @@ xfs_rmap_convert(
error = xfs_btree_insert(cur, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
break;
case 0:
@@ -1295,7 +1421,10 @@ xfs_rmap_convert(
error = xfs_btree_insert(cur, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
/*
* Reset the cursor to the position of the new extent
* we are about to insert as we can't trust it after
@@ -1305,7 +1434,10 @@ xfs_rmap_convert(
oldext, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
+ if (XFS_IS_CORRUPT(mp, i != 0)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
/* new middle extent - newext */
cur->bc_rec.r.rm_flags &= ~XFS_RMAP_UNWRITTEN;
cur->bc_rec.r.rm_flags |= newext;
@@ -1314,7 +1446,10 @@ xfs_rmap_convert(
error = xfs_btree_insert(cur, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
break;
case RMAP_LEFT_FILLING | RMAP_LEFT_CONTIG | RMAP_RIGHT_CONTIG:
@@ -1383,7 +1518,10 @@ xfs_rmap_convert_shared(
&PREV, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
ASSERT(PREV.rm_offset <= offset);
ASSERT(PREV.rm_offset + PREV.rm_blockcount >= new_endoff);
@@ -1406,9 +1544,12 @@ xfs_rmap_convert_shared(
goto done;
if (i) {
state |= RMAP_LEFT_VALID;
- XFS_WANT_CORRUPTED_GOTO(mp,
- LEFT.rm_startblock + LEFT.rm_blockcount <= bno,
- done);
+ if (XFS_IS_CORRUPT(mp,
+ LEFT.rm_startblock + LEFT.rm_blockcount >
+ bno)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
if (xfs_rmap_is_mergeable(&LEFT, owner, newext))
state |= RMAP_LEFT_CONTIG;
}
@@ -1423,9 +1564,14 @@ xfs_rmap_convert_shared(
error = xfs_rmap_get_rec(cur, &RIGHT, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
- XFS_WANT_CORRUPTED_GOTO(mp, bno + len <= RIGHT.rm_startblock,
- done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
+ if (XFS_IS_CORRUPT(mp, bno + len > RIGHT.rm_startblock)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
trace_xfs_rmap_find_right_neighbor_result(cur->bc_mp,
cur->bc_private.a.agno, RIGHT.rm_startblock,
RIGHT.rm_blockcount, RIGHT.rm_owner,
@@ -1472,7 +1618,10 @@ xfs_rmap_convert_shared(
NEW.rm_offset, NEW.rm_flags, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
NEW.rm_blockcount += PREV.rm_blockcount + RIGHT.rm_blockcount;
error = xfs_rmap_update(cur, &NEW);
if (error)
@@ -1495,7 +1644,10 @@ xfs_rmap_convert_shared(
NEW.rm_offset, NEW.rm_flags, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
NEW.rm_blockcount += PREV.rm_blockcount;
error = xfs_rmap_update(cur, &NEW);
if (error)
@@ -1518,7 +1670,10 @@ xfs_rmap_convert_shared(
NEW.rm_offset, NEW.rm_flags, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
NEW.rm_blockcount += RIGHT.rm_blockcount;
NEW.rm_flags = RIGHT.rm_flags;
error = xfs_rmap_update(cur, &NEW);
@@ -1538,7 +1693,10 @@ xfs_rmap_convert_shared(
NEW.rm_offset, NEW.rm_flags, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
NEW.rm_flags = newext;
error = xfs_rmap_update(cur, &NEW);
if (error)
@@ -1570,7 +1728,10 @@ xfs_rmap_convert_shared(
NEW.rm_offset, NEW.rm_flags, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
NEW.rm_blockcount += len;
error = xfs_rmap_update(cur, &NEW);
if (error)
@@ -1612,7 +1773,10 @@ xfs_rmap_convert_shared(
NEW.rm_offset, NEW.rm_flags, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
NEW.rm_blockcount = offset - NEW.rm_offset;
error = xfs_rmap_update(cur, &NEW);
if (error)
@@ -1644,7 +1808,10 @@ xfs_rmap_convert_shared(
NEW.rm_offset, NEW.rm_flags, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
NEW.rm_blockcount -= len;
error = xfs_rmap_update(cur, &NEW);
if (error)
@@ -1679,7 +1846,10 @@ xfs_rmap_convert_shared(
NEW.rm_offset, NEW.rm_flags, &i);
if (error)
goto done;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto done;
+ }
NEW.rm_blockcount = offset - NEW.rm_offset;
error = xfs_rmap_update(cur, &NEW);
if (error)
@@ -1765,25 +1935,44 @@ xfs_rmap_unmap_shared(
&ltrec, &i);
if (error)
goto out_error;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, out_error);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
ltoff = ltrec.rm_offset;
/* Make sure the extent we found covers the entire freeing range. */
- XFS_WANT_CORRUPTED_GOTO(mp, ltrec.rm_startblock <= bno &&
- ltrec.rm_startblock + ltrec.rm_blockcount >=
- bno + len, out_error);
+ if (XFS_IS_CORRUPT(mp,
+ ltrec.rm_startblock > bno ||
+ ltrec.rm_startblock + ltrec.rm_blockcount <
+ bno + len)) {
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
/* Make sure the owner matches what we expect to find in the tree. */
- XFS_WANT_CORRUPTED_GOTO(mp, owner == ltrec.rm_owner, out_error);
+ if (XFS_IS_CORRUPT(mp, owner != ltrec.rm_owner)) {
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
/* Make sure the unwritten flag matches. */
- XFS_WANT_CORRUPTED_GOTO(mp, (flags & XFS_RMAP_UNWRITTEN) ==
- (ltrec.rm_flags & XFS_RMAP_UNWRITTEN), out_error);
+ if (XFS_IS_CORRUPT(mp,
+ (flags & XFS_RMAP_UNWRITTEN) !=
+ (ltrec.rm_flags & XFS_RMAP_UNWRITTEN))) {
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
/* Check the offset. */
- XFS_WANT_CORRUPTED_GOTO(mp, ltrec.rm_offset <= offset, out_error);
- XFS_WANT_CORRUPTED_GOTO(mp, offset <= ltoff + ltrec.rm_blockcount,
- out_error);
+ if (XFS_IS_CORRUPT(mp, ltrec.rm_offset > offset)) {
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
+ if (XFS_IS_CORRUPT(mp, offset > ltoff + ltrec.rm_blockcount)) {
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
if (ltrec.rm_startblock == bno && ltrec.rm_blockcount == len) {
/* Exact match, simply remove the record from rmap tree. */
@@ -1836,7 +2025,10 @@ xfs_rmap_unmap_shared(
ltrec.rm_offset, ltrec.rm_flags, &i);
if (error)
goto out_error;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, out_error);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
ltrec.rm_blockcount -= len;
error = xfs_rmap_update(cur, &ltrec);
if (error)
@@ -1862,7 +2054,10 @@ xfs_rmap_unmap_shared(
ltrec.rm_offset, ltrec.rm_flags, &i);
if (error)
goto out_error;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, out_error);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
ltrec.rm_blockcount = bno - ltrec.rm_startblock;
error = xfs_rmap_update(cur, &ltrec);
if (error)
@@ -1938,7 +2133,10 @@ xfs_rmap_map_shared(
error = xfs_rmap_get_rec(cur, &gtrec, &have_gt);
if (error)
goto out_error;
- XFS_WANT_CORRUPTED_GOTO(mp, have_gt == 1, out_error);
+ if (XFS_IS_CORRUPT(mp, have_gt != 1)) {
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
trace_xfs_rmap_find_right_neighbor_result(cur->bc_mp,
cur->bc_private.a.agno, gtrec.rm_startblock,
gtrec.rm_blockcount, gtrec.rm_owner,
@@ -1987,7 +2185,10 @@ xfs_rmap_map_shared(
ltrec.rm_offset, ltrec.rm_flags, &i);
if (error)
goto out_error;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, out_error);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto out_error;
+ }
error = xfs_rmap_update(cur, &ltrec);
if (error)
@@ -2199,7 +2400,7 @@ xfs_rmap_finish_one(
error = xfs_free_extent_fix_freelist(tp, agno, &agbp);
if (error)
return error;
- if (!agbp)
+ if (XFS_IS_CORRUPT(tp->t_mountp, !agbp))
return -EFSCORRUPTED;
rcur = xfs_rmapbt_init_cursor(mp, tp, agbp, agno);
diff --git a/fs/xfs/libxfs/xfs_rtbitmap.c b/fs/xfs/libxfs/xfs_rtbitmap.c
index 8ea1efc97b41..f42c74cb8be5 100644
--- a/fs/xfs/libxfs/xfs_rtbitmap.c
+++ b/fs/xfs/libxfs/xfs_rtbitmap.c
@@ -15,7 +15,7 @@
#include "xfs_bmap.h"
#include "xfs_trans.h"
#include "xfs_rtalloc.h"
-
+#include "xfs_error.h"
/*
* Realtime allocator bitmap functions shared with userspace.
@@ -70,7 +70,7 @@ xfs_rtbuf_get(
if (error)
return error;
- if (nmap == 0 || !xfs_bmap_is_real_extent(&map))
+ if (XFS_IS_CORRUPT(mp, nmap == 0 || !xfs_bmap_is_real_extent(&map)))
return -EFSCORRUPTED;
ASSERT(map.br_startblock != NULLFSBLOCK);
diff --git a/fs/xfs/libxfs/xfs_sb.c b/fs/xfs/libxfs/xfs_sb.c
index ac6cdca63e15..0ac69751fe85 100644
--- a/fs/xfs/libxfs/xfs_sb.c
+++ b/fs/xfs/libxfs/xfs_sb.c
@@ -10,6 +10,7 @@
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
#include "xfs_bit.h"
+#include "xfs_sb.h"
#include "xfs_mount.h"
#include "xfs_ialloc.h"
#include "xfs_alloc.h"
diff --git a/fs/xfs/libxfs/xfs_trans_inode.c b/fs/xfs/libxfs/xfs_trans_inode.c
index a9ad90926b87..2b8ccb5b975d 100644
--- a/fs/xfs/libxfs/xfs_trans_inode.c
+++ b/fs/xfs/libxfs/xfs_trans_inode.c
@@ -55,7 +55,7 @@ xfs_trans_ichgtime(
int flags)
{
struct inode *inode = VFS_I(ip);
- struct timespec64 tv;
+ struct timespec64 tv;
ASSERT(tp);
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
@@ -66,10 +66,8 @@ xfs_trans_ichgtime(
inode->i_mtime = tv;
if (flags & XFS_ICHGTIME_CHG)
inode->i_ctime = tv;
- if (flags & XFS_ICHGTIME_CREATE) {
- ip->i_d.di_crtime.t_sec = (int32_t)tv.tv_sec;
- ip->i_d.di_crtime.t_nsec = (int32_t)tv.tv_nsec;
- }
+ if (flags & XFS_ICHGTIME_CREATE)
+ ip->i_d.di_crtime = tv;
}
/*
diff --git a/fs/xfs/libxfs/xfs_trans_resv.c b/fs/xfs/libxfs/xfs_trans_resv.c
index d12bbd526e7c..c55cd9a3dec9 100644
--- a/fs/xfs/libxfs/xfs_trans_resv.c
+++ b/fs/xfs/libxfs/xfs_trans_resv.c
@@ -718,7 +718,7 @@ xfs_calc_clear_agi_bucket_reservation(
/*
* Adjusting quota limits.
- * the xfs_disk_dquot_t: sizeof(struct xfs_disk_dquot)
+ * the disk quota buffer: sizeof(struct xfs_disk_dquot)
*/
STATIC uint
xfs_calc_qm_setqlim_reservation(void)
@@ -742,7 +742,7 @@ xfs_calc_qm_dqalloc_reservation(
/*
* Turning off quotas.
- * the xfs_qoff_logitem_t: sizeof(struct xfs_qoff_logitem) * 2
+ * the quota off logitems: sizeof(struct xfs_qoff_logitem) * 2
* the superblock for the quota flags: sector size
*/
STATIC uint
@@ -755,7 +755,7 @@ xfs_calc_qm_quotaoff_reservation(
/*
* End of turning off quotas.
- * the xfs_qoff_logitem_t: sizeof(struct xfs_qoff_logitem) * 2
+ * the quota off logitems: sizeof(struct xfs_qoff_logitem) * 2
*/
STATIC uint
xfs_calc_qm_quotaoff_end_reservation(void)
diff --git a/fs/xfs/libxfs/xfs_types.h b/fs/xfs/libxfs/xfs_types.h
index 300b3e91ca3a..397d94775440 100644
--- a/fs/xfs/libxfs/xfs_types.h
+++ b/fs/xfs/libxfs/xfs_types.h
@@ -21,7 +21,6 @@ typedef int32_t xfs_suminfo_t; /* type of bitmap summary info */
typedef uint32_t xfs_rtword_t; /* word type for bitmap manipulations */
typedef int64_t xfs_lsn_t; /* log sequence number */
-typedef int32_t xfs_tid_t; /* transaction identifier */
typedef uint32_t xfs_dablk_t; /* dir/attr block number (in file) */
typedef uint32_t xfs_dahash_t; /* dir/attr hash value */
@@ -33,7 +32,6 @@ typedef uint64_t xfs_fileoff_t; /* block number in a file */
typedef uint64_t xfs_filblks_t; /* number of blocks in a file */
typedef int64_t xfs_srtblock_t; /* signed version of xfs_rtblock_t */
-typedef int64_t xfs_sfiloff_t; /* signed block number in a file */
/*
* New verifiers will return the instruction address of the failing check.
diff --git a/fs/xfs/scrub/attr.c b/fs/xfs/scrub/attr.c
index 0edc7f8eb96e..d9f0dd444b80 100644
--- a/fs/xfs/scrub/attr.c
+++ b/fs/xfs/scrub/attr.c
@@ -398,15 +398,14 @@ out:
STATIC int
xchk_xattr_rec(
struct xchk_da_btree *ds,
- int level,
- void *rec)
+ int level)
{
struct xfs_mount *mp = ds->state->mp;
- struct xfs_attr_leaf_entry *ent = rec;
- struct xfs_da_state_blk *blk;
+ struct xfs_da_state_blk *blk = &ds->state->path.blk[level];
struct xfs_attr_leaf_name_local *lentry;
struct xfs_attr_leaf_name_remote *rentry;
struct xfs_buf *bp;
+ struct xfs_attr_leaf_entry *ent;
xfs_dahash_t calc_hash;
xfs_dahash_t hash;
int nameidx;
@@ -414,7 +413,9 @@ xchk_xattr_rec(
unsigned int badflags;
int error;
- blk = &ds->state->path.blk[level];
+ ASSERT(blk->magic == XFS_ATTR_LEAF_MAGIC);
+
+ ent = xfs_attr3_leaf_entryp(blk->bp->b_addr) + blk->index;
/* Check the whole block, if necessary. */
error = xchk_xattr_block(ds, level);
diff --git a/fs/xfs/scrub/bitmap.c b/fs/xfs/scrub/bitmap.c
index 3d47d111be5a..18a684e18a69 100644
--- a/fs/xfs/scrub/bitmap.c
+++ b/fs/xfs/scrub/bitmap.c
@@ -294,5 +294,6 @@ xfs_bitmap_set_btblocks(
struct xfs_bitmap *bitmap,
struct xfs_btree_cur *cur)
{
- return xfs_btree_visit_blocks(cur, xfs_bitmap_collect_btblock, bitmap);
+ return xfs_btree_visit_blocks(cur, xfs_bitmap_collect_btblock,
+ XFS_BTREE_VISIT_ALL, bitmap);
}
diff --git a/fs/xfs/scrub/common.h b/fs/xfs/scrub/common.h
index 003a772cd26c..2e50d146105d 100644
--- a/fs/xfs/scrub/common.h
+++ b/fs/xfs/scrub/common.h
@@ -14,8 +14,15 @@
static inline bool
xchk_should_terminate(
struct xfs_scrub *sc,
- int *error)
+ int *error)
{
+ /*
+ * If preemption is disabled, we need to yield to the scheduler every
+ * few seconds so that we don't run afoul of the soft lockup watchdog
+ * or RCU stall detector.
+ */
+ cond_resched();
+
if (fatal_signal_pending(current)) {
if (*error == 0)
*error = -EAGAIN;
diff --git a/fs/xfs/scrub/dabtree.c b/fs/xfs/scrub/dabtree.c
index 77ff9f97bcda..97a15b6f2865 100644
--- a/fs/xfs/scrub/dabtree.c
+++ b/fs/xfs/scrub/dabtree.c
@@ -77,40 +77,18 @@ xchk_da_set_corrupt(
__return_address);
}
-/* Find an entry at a certain level in a da btree. */
-STATIC void *
-xchk_da_btree_entry(
- struct xchk_da_btree *ds,
- int level,
- int rec)
+static struct xfs_da_node_entry *
+xchk_da_btree_node_entry(
+ struct xchk_da_btree *ds,
+ int level)
{
- char *ents;
- struct xfs_da_state_blk *blk;
- void *baddr;
+ struct xfs_da_state_blk *blk = &ds->state->path.blk[level];
+ struct xfs_da3_icnode_hdr hdr;
- /* Dispatch the entry finding function. */
- blk = &ds->state->path.blk[level];
- baddr = blk->bp->b_addr;
- switch (blk->magic) {
- case XFS_ATTR_LEAF_MAGIC:
- case XFS_ATTR3_LEAF_MAGIC:
- ents = (char *)xfs_attr3_leaf_entryp(baddr);
- return ents + (rec * sizeof(struct xfs_attr_leaf_entry));
- case XFS_DIR2_LEAFN_MAGIC:
- case XFS_DIR3_LEAFN_MAGIC:
- ents = (char *)ds->dargs.dp->d_ops->leaf_ents_p(baddr);
- return ents + (rec * sizeof(struct xfs_dir2_leaf_entry));
- case XFS_DIR2_LEAF1_MAGIC:
- case XFS_DIR3_LEAF1_MAGIC:
- ents = (char *)ds->dargs.dp->d_ops->leaf_ents_p(baddr);
- return ents + (rec * sizeof(struct xfs_dir2_leaf_entry));
- case XFS_DA_NODE_MAGIC:
- case XFS_DA3_NODE_MAGIC:
- ents = (char *)ds->dargs.dp->d_ops->node_tree_p(baddr);
- return ents + (rec * sizeof(struct xfs_da_node_entry));
- }
+ ASSERT(blk->magic == XFS_DA_NODE_MAGIC);
- return NULL;
+ xfs_da3_node_hdr_from_disk(ds->sc->mp, &hdr, blk->bp->b_addr);
+ return hdr.btree + blk->index;
}
/* Scrub a da btree hash (key). */
@@ -120,7 +98,6 @@ xchk_da_btree_hash(
int level,
__be32 *hashp)
{
- struct xfs_da_state_blk *blks;
struct xfs_da_node_entry *entry;
xfs_dahash_t hash;
xfs_dahash_t parent_hash;
@@ -135,8 +112,7 @@ xchk_da_btree_hash(
return 0;
/* Is this hash no larger than the parent hash? */
- blks = ds->state->path.blk;
- entry = xchk_da_btree_entry(ds, level - 1, blks[level - 1].index);
+ entry = xchk_da_btree_node_entry(ds, level - 1);
parent_hash = be32_to_cpu(entry->hashval);
if (parent_hash < hash)
xchk_da_set_corrupt(ds, level);
@@ -355,8 +331,8 @@ xchk_da_btree_block(
goto out_nobuf;
/* Read the buffer. */
- error = xfs_da_read_buf(dargs->trans, dargs->dp, blk->blkno, -2,
- &blk->bp, dargs->whichfork,
+ error = xfs_da_read_buf(dargs->trans, dargs->dp, blk->blkno,
+ XFS_DABUF_MAP_HOLE_OK, &blk->bp, dargs->whichfork,
&xchk_da_btree_buf_ops);
if (!xchk_da_process_error(ds, level, &error))
goto out_nobuf;
@@ -433,8 +409,8 @@ xchk_da_btree_block(
XFS_BLFT_DA_NODE_BUF);
blk->magic = XFS_DA_NODE_MAGIC;
node = blk->bp->b_addr;
- ip->d_ops->node_hdr_from_disk(&nodehdr, node);
- btree = ip->d_ops->node_tree_p(node);
+ xfs_da3_node_hdr_from_disk(ip->i_mount, &nodehdr, node);
+ btree = nodehdr.btree;
*pmaxrecs = nodehdr.count;
blk->hashval = be32_to_cpu(btree[*pmaxrecs - 1].hashval);
if (level == 0) {
@@ -479,14 +455,12 @@ xchk_da_btree(
struct xfs_mount *mp = sc->mp;
struct xfs_da_state_blk *blks;
struct xfs_da_node_entry *key;
- void *rec;
xfs_dablk_t blkno;
int level;
int error;
/* Skip short format data structures; no btree to scan. */
- if (XFS_IFORK_FORMAT(sc->ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
- XFS_IFORK_FORMAT(sc->ip, whichfork) != XFS_DINODE_FMT_BTREE)
+ if (!xfs_ifork_has_extents(sc->ip, whichfork))
return 0;
/* Set up initial da state. */
@@ -538,9 +512,7 @@ xchk_da_btree(
}
/* Dispatch record scrubbing. */
- rec = xchk_da_btree_entry(&ds, level,
- blks[level].index);
- error = scrub_fn(&ds, level, rec);
+ error = scrub_fn(&ds, level);
if (error)
break;
if (xchk_should_terminate(sc, &error) ||
@@ -562,7 +534,7 @@ xchk_da_btree(
}
/* Hashes in order for scrub? */
- key = xchk_da_btree_entry(&ds, level, blks[level].index);
+ key = xchk_da_btree_node_entry(&ds, level);
error = xchk_da_btree_hash(&ds, level, &key->hashval);
if (error)
goto out;
diff --git a/fs/xfs/scrub/dabtree.h b/fs/xfs/scrub/dabtree.h
index cb3f0003245b..1f3515c6d5a8 100644
--- a/fs/xfs/scrub/dabtree.h
+++ b/fs/xfs/scrub/dabtree.h
@@ -28,8 +28,7 @@ struct xchk_da_btree {
int tree_level;
};
-typedef int (*xchk_da_btree_rec_fn)(struct xchk_da_btree *ds,
- int level, void *rec);
+typedef int (*xchk_da_btree_rec_fn)(struct xchk_da_btree *ds, int level);
/* Check for da btree operation errors. */
bool xchk_da_process_error(struct xchk_da_btree *ds, int level, int *error);
diff --git a/fs/xfs/scrub/dir.c b/fs/xfs/scrub/dir.c
index 1e2e11721eb9..266da4e4bde6 100644
--- a/fs/xfs/scrub/dir.c
+++ b/fs/xfs/scrub/dir.c
@@ -113,6 +113,9 @@ xchk_dir_actor(
offset = xfs_dir2_db_to_da(mp->m_dir_geo,
xfs_dir2_dataptr_to_db(mp->m_dir_geo, pos));
+ if (xchk_should_terminate(sdc->sc, &error))
+ return error;
+
/* Does this inode number make sense? */
if (!xfs_verify_dir_ino(mp, ino)) {
xchk_fblock_set_corrupt(sdc->sc, XFS_DATA_FORK, offset);
@@ -179,15 +182,17 @@ out:
STATIC int
xchk_dir_rec(
struct xchk_da_btree *ds,
- int level,
- void *rec)
+ int level)
{
+ struct xfs_da_state_blk *blk = &ds->state->path.blk[level];
struct xfs_mount *mp = ds->state->mp;
- struct xfs_dir2_leaf_entry *ent = rec;
struct xfs_inode *dp = ds->dargs.dp;
+ struct xfs_da_geometry *geo = mp->m_dir_geo;
struct xfs_dir2_data_entry *dent;
struct xfs_buf *bp;
- char *p, *endp;
+ struct xfs_dir2_leaf_entry *ent;
+ unsigned int end;
+ unsigned int iter_off;
xfs_ino_t ino;
xfs_dablk_t rec_bno;
xfs_dir2_db_t db;
@@ -195,9 +200,16 @@ xchk_dir_rec(
xfs_dir2_dataptr_t ptr;
xfs_dahash_t calc_hash;
xfs_dahash_t hash;
+ struct xfs_dir3_icleaf_hdr hdr;
unsigned int tag;
int error;
+ ASSERT(blk->magic == XFS_DIR2_LEAF1_MAGIC ||
+ blk->magic == XFS_DIR2_LEAFN_MAGIC);
+
+ xfs_dir2_leaf_hdr_from_disk(mp, &hdr, blk->bp->b_addr);
+ ent = hdr.ents + blk->index;
+
/* Check the hash of the entry. */
error = xchk_da_btree_hash(ds, level, &ent->hashval);
if (error)
@@ -209,15 +221,16 @@ xchk_dir_rec(
return 0;
/* Find the directory entry's location. */
- db = xfs_dir2_dataptr_to_db(mp->m_dir_geo, ptr);
- off = xfs_dir2_dataptr_to_off(mp->m_dir_geo, ptr);
- rec_bno = xfs_dir2_db_to_da(mp->m_dir_geo, db);
+ db = xfs_dir2_dataptr_to_db(geo, ptr);
+ off = xfs_dir2_dataptr_to_off(geo, ptr);
+ rec_bno = xfs_dir2_db_to_da(geo, db);
- if (rec_bno >= mp->m_dir_geo->leafblk) {
+ if (rec_bno >= geo->leafblk) {
xchk_da_set_corrupt(ds, level);
goto out;
}
- error = xfs_dir3_data_read(ds->dargs.trans, dp, rec_bno, -2, &bp);
+ error = xfs_dir3_data_read(ds->dargs.trans, dp, rec_bno,
+ XFS_DABUF_MAP_HOLE_OK, &bp);
if (!xchk_fblock_process_error(ds->sc, XFS_DATA_FORK, rec_bno,
&error))
goto out;
@@ -230,38 +243,37 @@ xchk_dir_rec(
if (ds->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
goto out_relse;
- dent = (struct xfs_dir2_data_entry *)(((char *)bp->b_addr) + off);
+ dent = bp->b_addr + off;
/* Make sure we got a real directory entry. */
- p = (char *)mp->m_dir_inode_ops->data_entry_p(bp->b_addr);
- endp = xfs_dir3_data_endp(mp->m_dir_geo, bp->b_addr);
- if (!endp) {
+ iter_off = geo->data_entry_offset;
+ end = xfs_dir3_data_end_offset(geo, bp->b_addr);
+ if (!end) {
xchk_fblock_set_corrupt(ds->sc, XFS_DATA_FORK, rec_bno);
goto out_relse;
}
- while (p < endp) {
- struct xfs_dir2_data_entry *dep;
- struct xfs_dir2_data_unused *dup;
+ for (;;) {
+ struct xfs_dir2_data_entry *dep = bp->b_addr + iter_off;
+ struct xfs_dir2_data_unused *dup = bp->b_addr + iter_off;
+
+ if (iter_off >= end) {
+ xchk_fblock_set_corrupt(ds->sc, XFS_DATA_FORK, rec_bno);
+ goto out_relse;
+ }
- dup = (struct xfs_dir2_data_unused *)p;
if (be16_to_cpu(dup->freetag) == XFS_DIR2_DATA_FREE_TAG) {
- p += be16_to_cpu(dup->length);
+ iter_off += be16_to_cpu(dup->length);
continue;
}
- dep = (struct xfs_dir2_data_entry *)p;
if (dep == dent)
break;
- p += mp->m_dir_inode_ops->data_entsize(dep->namelen);
- }
- if (p >= endp) {
- xchk_fblock_set_corrupt(ds->sc, XFS_DATA_FORK, rec_bno);
- goto out_relse;
+ iter_off += xfs_dir2_data_entsize(mp, dep->namelen);
}
/* Retrieve the entry, sanity check it, and compare hashes. */
ino = be64_to_cpu(dent->inumber);
hash = be32_to_cpu(ent->hashval);
- tag = be16_to_cpup(dp->d_ops->data_entry_tag_p(dent));
+ tag = be16_to_cpup(xfs_dir2_data_entry_tag_p(mp, dent));
if (!xfs_verify_dir_ino(mp, ino) || tag != off)
xchk_fblock_set_corrupt(ds->sc, XFS_DATA_FORK, rec_bno);
if (dent->namelen == 0) {
@@ -319,19 +331,15 @@ xchk_directory_data_bestfree(
struct xfs_buf *bp;
struct xfs_dir2_data_free *bf;
struct xfs_mount *mp = sc->mp;
- const struct xfs_dir_ops *d_ops;
- char *ptr;
- char *endptr;
u16 tag;
unsigned int nr_bestfrees = 0;
unsigned int nr_frees = 0;
unsigned int smallest_bestfree;
int newlen;
- int offset;
+ unsigned int offset;
+ unsigned int end;
int error;
- d_ops = sc->ip->d_ops;
-
if (is_block) {
/* dir block format */
if (lblk != XFS_B_TO_FSBT(mp, XFS_DIR2_DATA_OFFSET))
@@ -339,7 +347,7 @@ xchk_directory_data_bestfree(
error = xfs_dir3_block_read(sc->tp, sc->ip, &bp);
} else {
/* dir data format */
- error = xfs_dir3_data_read(sc->tp, sc->ip, lblk, -1, &bp);
+ error = xfs_dir3_data_read(sc->tp, sc->ip, lblk, 0, &bp);
}
if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, lblk, &error))
goto out;
@@ -351,7 +359,7 @@ xchk_directory_data_bestfree(
goto out_buf;
/* Do the bestfrees correspond to actual free space? */
- bf = d_ops->data_bestfree_p(bp->b_addr);
+ bf = xfs_dir2_data_bestfree_p(mp, bp->b_addr);
smallest_bestfree = UINT_MAX;
for (dfp = &bf[0]; dfp < &bf[XFS_DIR2_DATA_FD_COUNT]; dfp++) {
offset = be16_to_cpu(dfp->offset);
@@ -361,13 +369,13 @@ xchk_directory_data_bestfree(
xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
goto out_buf;
}
- dup = (struct xfs_dir2_data_unused *)(bp->b_addr + offset);
+ dup = bp->b_addr + offset;
tag = be16_to_cpu(*xfs_dir2_data_unused_tag_p(dup));
/* bestfree doesn't match the entry it points at? */
if (dup->freetag != cpu_to_be16(XFS_DIR2_DATA_FREE_TAG) ||
be16_to_cpu(dup->length) != be16_to_cpu(dfp->length) ||
- tag != ((char *)dup - (char *)bp->b_addr)) {
+ tag != offset) {
xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
goto out_buf;
}
@@ -383,30 +391,30 @@ xchk_directory_data_bestfree(
}
/* Make sure the bestfrees are actually the best free spaces. */
- ptr = (char *)d_ops->data_entry_p(bp->b_addr);
- endptr = xfs_dir3_data_endp(mp->m_dir_geo, bp->b_addr);
+ offset = mp->m_dir_geo->data_entry_offset;
+ end = xfs_dir3_data_end_offset(mp->m_dir_geo, bp->b_addr);
/* Iterate the entries, stopping when we hit or go past the end. */
- while (ptr < endptr) {
- dup = (struct xfs_dir2_data_unused *)ptr;
+ while (offset < end) {
+ dup = bp->b_addr + offset;
+
/* Skip real entries */
if (dup->freetag != cpu_to_be16(XFS_DIR2_DATA_FREE_TAG)) {
- struct xfs_dir2_data_entry *dep;
+ struct xfs_dir2_data_entry *dep = bp->b_addr + offset;
- dep = (struct xfs_dir2_data_entry *)ptr;
- newlen = d_ops->data_entsize(dep->namelen);
+ newlen = xfs_dir2_data_entsize(mp, dep->namelen);
if (newlen <= 0) {
xchk_fblock_set_corrupt(sc, XFS_DATA_FORK,
lblk);
goto out_buf;
}
- ptr += newlen;
+ offset += newlen;
continue;
}
/* Spot check this free entry */
tag = be16_to_cpu(*xfs_dir2_data_unused_tag_p(dup));
- if (tag != ((char *)dup - (char *)bp->b_addr)) {
+ if (tag != offset) {
xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
goto out_buf;
}
@@ -425,13 +433,13 @@ xchk_directory_data_bestfree(
xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
goto out_buf;
}
- ptr += newlen;
- if (ptr <= endptr)
+ offset += newlen;
+ if (offset <= end)
nr_frees++;
}
/* We're required to fill all the space. */
- if (ptr != endptr)
+ if (offset != end)
xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
/* Did we see at least as many free slots as there are bestfrees? */
@@ -458,7 +466,7 @@ xchk_directory_check_freesp(
{
struct xfs_dir2_data_free *dfp;
- dfp = sc->ip->d_ops->data_bestfree_p(dbp->b_addr);
+ dfp = xfs_dir2_data_bestfree_p(sc->mp, dbp->b_addr);
if (len != be16_to_cpu(dfp->length))
xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
@@ -475,12 +483,10 @@ xchk_directory_leaf1_bestfree(
xfs_dablk_t lblk)
{
struct xfs_dir3_icleaf_hdr leafhdr;
- struct xfs_dir2_leaf_entry *ents;
struct xfs_dir2_leaf_tail *ltp;
struct xfs_dir2_leaf *leaf;
struct xfs_buf *dbp;
struct xfs_buf *bp;
- const struct xfs_dir_ops *d_ops = sc->ip->d_ops;
struct xfs_da_geometry *geo = sc->mp->m_dir_geo;
__be16 *bestp;
__u16 best;
@@ -492,14 +498,13 @@ xchk_directory_leaf1_bestfree(
int error;
/* Read the free space block. */
- error = xfs_dir3_leaf_read(sc->tp, sc->ip, lblk, -1, &bp);
+ error = xfs_dir3_leaf_read(sc->tp, sc->ip, lblk, &bp);
if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, lblk, &error))
goto out;
xchk_buffer_recheck(sc, bp);
leaf = bp->b_addr;
- d_ops->leaf_hdr_from_disk(&leafhdr, leaf);
- ents = d_ops->leaf_ents_p(leaf);
+ xfs_dir2_leaf_hdr_from_disk(sc->ip->i_mount, &leafhdr, leaf);
ltp = xfs_dir2_leaf_tail_p(geo, leaf);
bestcount = be32_to_cpu(ltp->bestcount);
bestp = xfs_dir2_leaf_bests_p(ltp);
@@ -521,24 +526,25 @@ xchk_directory_leaf1_bestfree(
}
/* Is the leaf count even remotely sane? */
- if (leafhdr.count > d_ops->leaf_max_ents(geo)) {
+ if (leafhdr.count > geo->leaf_max_ents) {
xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
goto out;
}
/* Leaves and bests don't overlap in leaf format. */
- if ((char *)&ents[leafhdr.count] > (char *)bestp) {
+ if ((char *)&leafhdr.ents[leafhdr.count] > (char *)bestp) {
xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
goto out;
}
/* Check hash value order, count stale entries. */
for (i = 0; i < leafhdr.count; i++) {
- hash = be32_to_cpu(ents[i].hashval);
+ hash = be32_to_cpu(leafhdr.ents[i].hashval);
if (i > 0 && lasthash > hash)
xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, lblk);
lasthash = hash;
- if (ents[i].address == cpu_to_be32(XFS_DIR2_NULL_DATAPTR))
+ if (leafhdr.ents[i].address ==
+ cpu_to_be32(XFS_DIR2_NULL_DATAPTR))
stale++;
}
if (leafhdr.stale != stale)
@@ -552,7 +558,7 @@ xchk_directory_leaf1_bestfree(
if (best == NULLDATAOFF)
continue;
error = xfs_dir3_data_read(sc->tp, sc->ip,
- i * args->geo->fsbcount, -1, &dbp);
+ i * args->geo->fsbcount, 0, &dbp);
if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, lblk,
&error))
break;
@@ -575,7 +581,6 @@ xchk_directory_free_bestfree(
struct xfs_dir3_icfree_hdr freehdr;
struct xfs_buf *dbp;
struct xfs_buf *bp;
- __be16 *bestp;
__u16 best;
unsigned int stale = 0;
int i;
@@ -595,17 +600,16 @@ xchk_directory_free_bestfree(
}
/* Check all the entries. */
- sc->ip->d_ops->free_hdr_from_disk(&freehdr, bp->b_addr);
- bestp = sc->ip->d_ops->free_bests_p(bp->b_addr);
- for (i = 0; i < freehdr.nvalid; i++, bestp++) {
- best = be16_to_cpu(*bestp);
+ xfs_dir2_free_hdr_from_disk(sc->ip->i_mount, &freehdr, bp->b_addr);
+ for (i = 0; i < freehdr.nvalid; i++) {
+ best = be16_to_cpu(freehdr.bests[i]);
if (best == NULLDATAOFF) {
stale++;
continue;
}
error = xfs_dir3_data_read(sc->tp, sc->ip,
(freehdr.firstdb + i) * args->geo->fsbcount,
- -1, &dbp);
+ 0, &dbp);
if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, lblk,
&error))
break;
diff --git a/fs/xfs/scrub/fscounters.c b/fs/xfs/scrub/fscounters.c
index 98f82d7c8b40..7251c66a82c9 100644
--- a/fs/xfs/scrub/fscounters.c
+++ b/fs/xfs/scrub/fscounters.c
@@ -104,7 +104,7 @@ next_loop_perag:
pag = NULL;
error = 0;
- if (fatal_signal_pending(current))
+ if (xchk_should_terminate(sc, &error))
break;
}
@@ -163,6 +163,7 @@ xchk_fscount_aggregate_agcounts(
uint64_t delayed;
xfs_agnumber_t agno;
int tries = 8;
+ int error = 0;
retry:
fsc->icount = 0;
@@ -196,10 +197,13 @@ retry:
xfs_perag_put(pag);
- if (fatal_signal_pending(current))
+ if (xchk_should_terminate(sc, &error))
break;
}
+ if (error)
+ return error;
+
/*
* The global incore space reservation is taken from the incore
* counters, so leave that out of the computation.
diff --git a/fs/xfs/scrub/health.c b/fs/xfs/scrub/health.c
index b2f602811e9d..83d27cdf579b 100644
--- a/fs/xfs/scrub/health.c
+++ b/fs/xfs/scrub/health.c
@@ -11,6 +11,7 @@
#include "xfs_sb.h"
#include "xfs_health.h"
#include "scrub/scrub.h"
+#include "scrub/health.h"
/*
* Scrub and In-Core Filesystem Health Assessments
diff --git a/fs/xfs/scrub/parent.c b/fs/xfs/scrub/parent.c
index c962bd534690..5705adc43a75 100644
--- a/fs/xfs/scrub/parent.c
+++ b/fs/xfs/scrub/parent.c
@@ -32,8 +32,10 @@ xchk_setup_parent(
struct xchk_parent_ctx {
struct dir_context dc;
+ struct xfs_scrub *sc;
xfs_ino_t ino;
xfs_nlink_t nlink;
+ bool cancelled;
};
/* Look for a single entry in a directory pointing to an inode. */
@@ -47,11 +49,21 @@ xchk_parent_actor(
unsigned type)
{
struct xchk_parent_ctx *spc;
+ int error = 0;
spc = container_of(dc, struct xchk_parent_ctx, dc);
if (spc->ino == ino)
spc->nlink++;
- return 0;
+
+ /*
+ * If we're facing a fatal signal, bail out. Store the cancellation
+ * status separately because the VFS readdir code squashes error codes
+ * into short directory reads.
+ */
+ if (xchk_should_terminate(spc->sc, &error))
+ spc->cancelled = true;
+
+ return error;
}
/* Count the number of dentries in the parent dir that point to this inode. */
@@ -62,10 +74,9 @@ xchk_parent_count_parent_dentries(
xfs_nlink_t *nlink)
{
struct xchk_parent_ctx spc = {
- .dc.actor = xchk_parent_actor,
- .dc.pos = 0,
- .ino = sc->ip->i_ino,
- .nlink = 0,
+ .dc.actor = xchk_parent_actor,
+ .ino = sc->ip->i_ino,
+ .sc = sc,
};
size_t bufsize;
loff_t oldpos;
@@ -80,7 +91,7 @@ xchk_parent_count_parent_dentries(
*/
lock_mode = xfs_ilock_data_map_shared(parent);
if (parent->i_d.di_nextents > 0)
- error = xfs_dir3_data_readahead(parent, 0, -1);
+ error = xfs_dir3_data_readahead(parent, 0, 0);
xfs_iunlock(parent, lock_mode);
if (error)
return error;
@@ -97,6 +108,10 @@ xchk_parent_count_parent_dentries(
error = xfs_readdir(sc->tp, parent, &spc.dc, bufsize);
if (error)
goto out;
+ if (spc.cancelled) {
+ error = -EAGAIN;
+ goto out;
+ }
if (oldpos == spc.dc.pos)
break;
oldpos = spc.dc.pos;
diff --git a/fs/xfs/scrub/quota.c b/fs/xfs/scrub/quota.c
index 0a33b4421c32..905a34558361 100644
--- a/fs/xfs/scrub/quota.c
+++ b/fs/xfs/scrub/quota.c
@@ -93,6 +93,10 @@ xchk_quota_item(
unsigned long long rcount;
xfs_ino_t fs_icount;
xfs_dqid_t id = be32_to_cpu(d->d_id);
+ int error = 0;
+
+ if (xchk_should_terminate(sc, &error))
+ return error;
/*
* Except for the root dquot, the actual dquot we got must either have
@@ -178,6 +182,9 @@ xchk_quota_item(
if (id != 0 && rhard != 0 && rcount > rhard)
xchk_fblock_set_warning(sc, XFS_DATA_FORK, offset);
+ if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
+ return -EFSCORRUPTED;
+
return 0;
}
diff --git a/fs/xfs/scrub/scrub.c b/fs/xfs/scrub/scrub.c
index 15c8c5f3f688..f1775bb19313 100644
--- a/fs/xfs/scrub/scrub.c
+++ b/fs/xfs/scrub/scrub.c
@@ -16,6 +16,7 @@
#include "xfs_qm.h"
#include "xfs_errortag.h"
#include "xfs_error.h"
+#include "xfs_scrub.h"
#include "scrub/scrub.h"
#include "scrub/common.h"
#include "scrub/trace.h"
diff --git a/fs/xfs/xfs_acl.c b/fs/xfs/xfs_acl.c
index 96d7071cfa46..91693fce34a8 100644
--- a/fs/xfs/xfs_acl.c
+++ b/fs/xfs/xfs_acl.c
@@ -12,8 +12,10 @@
#include "xfs_inode.h"
#include "xfs_attr.h"
#include "xfs_trace.h"
-#include <linux/posix_acl_xattr.h>
+#include "xfs_error.h"
+#include "xfs_acl.h"
+#include <linux/posix_acl_xattr.h>
/*
* Locking scheme:
@@ -23,6 +25,7 @@
STATIC struct posix_acl *
xfs_acl_from_disk(
+ struct xfs_mount *mp,
const struct xfs_acl *aclp,
int len,
int max_entries)
@@ -32,11 +35,18 @@ xfs_acl_from_disk(
const struct xfs_acl_entry *ace;
unsigned int count, i;
- if (len < sizeof(*aclp))
+ if (len < sizeof(*aclp)) {
+ XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, aclp,
+ len);
return ERR_PTR(-EFSCORRUPTED);
+ }
+
count = be32_to_cpu(aclp->acl_cnt);
- if (count > max_entries || XFS_ACL_SIZE(count) != len)
+ if (count > max_entries || XFS_ACL_SIZE(count) != len) {
+ XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, aclp,
+ len);
return ERR_PTR(-EFSCORRUPTED);
+ }
acl = posix_acl_alloc(count, GFP_KERNEL);
if (!acl)
@@ -145,7 +155,7 @@ xfs_get_acl(struct inode *inode, int type)
if (error != -ENOATTR)
acl = ERR_PTR(error);
} else {
- acl = xfs_acl_from_disk(xfs_acl, len,
+ acl = xfs_acl_from_disk(ip->i_mount, xfs_acl, len,
XFS_ACL_MAX_ENTRIES(ip->i_mount));
kmem_free(xfs_acl);
}
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index f16d5f196c6b..3a688eb5c5ae 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -18,108 +18,22 @@
#include "xfs_bmap_util.h"
#include "xfs_reflink.h"
-/*
- * structure owned by writepages passed to individual writepage calls
- */
struct xfs_writepage_ctx {
- struct xfs_bmbt_irec imap;
- int fork;
+ struct iomap_writepage_ctx ctx;
unsigned int data_seq;
unsigned int cow_seq;
- struct xfs_ioend *ioend;
};
-struct block_device *
-xfs_find_bdev_for_inode(
- struct inode *inode)
-{
- struct xfs_inode *ip = XFS_I(inode);
- struct xfs_mount *mp = ip->i_mount;
-
- if (XFS_IS_REALTIME_INODE(ip))
- return mp->m_rtdev_targp->bt_bdev;
- else
- return mp->m_ddev_targp->bt_bdev;
-}
-
-struct dax_device *
-xfs_find_daxdev_for_inode(
- struct inode *inode)
+static inline struct xfs_writepage_ctx *
+XFS_WPC(struct iomap_writepage_ctx *ctx)
{
- struct xfs_inode *ip = XFS_I(inode);
- struct xfs_mount *mp = ip->i_mount;
-
- if (XFS_IS_REALTIME_INODE(ip))
- return mp->m_rtdev_targp->bt_daxdev;
- else
- return mp->m_ddev_targp->bt_daxdev;
-}
-
-static void
-xfs_finish_page_writeback(
- struct inode *inode,
- struct bio_vec *bvec,
- int error)
-{
- struct iomap_page *iop = to_iomap_page(bvec->bv_page);
-
- if (error) {
- SetPageError(bvec->bv_page);
- mapping_set_error(inode->i_mapping, -EIO);
- }
-
- ASSERT(iop || i_blocksize(inode) == PAGE_SIZE);
- ASSERT(!iop || atomic_read(&iop->write_count) > 0);
-
- if (!iop || atomic_dec_and_test(&iop->write_count))
- end_page_writeback(bvec->bv_page);
-}
-
-/*
- * We're now finished for good with this ioend structure. Update the page
- * state, release holds on bios, and finally free up memory. Do not use the
- * ioend after this.
- */
-STATIC void
-xfs_destroy_ioend(
- struct xfs_ioend *ioend,
- int error)
-{
- struct inode *inode = ioend->io_inode;
- struct bio *bio = &ioend->io_inline_bio;
- struct bio *last = ioend->io_bio, *next;
- u64 start = bio->bi_iter.bi_sector;
- bool quiet = bio_flagged(bio, BIO_QUIET);
-
- for (bio = &ioend->io_inline_bio; bio; bio = next) {
- struct bio_vec *bvec;
- struct bvec_iter_all iter_all;
-
- /*
- * For the last bio, bi_private points to the ioend, so we
- * need to explicitly end the iteration here.
- */
- if (bio == last)
- next = NULL;
- else
- next = bio->bi_private;
-
- /* walk each page on bio, ending page IO on them */
- bio_for_each_segment_all(bvec, bio, iter_all)
- xfs_finish_page_writeback(inode, bvec, error);
- bio_put(bio);
- }
-
- if (unlikely(error && !quiet)) {
- xfs_err_ratelimited(XFS_I(inode)->i_mount,
- "writeback error on sector %llu", start);
- }
+ return container_of(ctx, struct xfs_writepage_ctx, ctx);
}
/*
* Fast and loose check if this write could update the on-disk inode size.
*/
-static inline bool xfs_ioend_is_append(struct xfs_ioend *ioend)
+static inline bool xfs_ioend_is_append(struct iomap_ioend *ioend)
{
return ioend->io_offset + ioend->io_size >
XFS_I(ioend->io_inode)->i_d.di_size;
@@ -127,7 +41,7 @@ static inline bool xfs_ioend_is_append(struct xfs_ioend *ioend)
STATIC int
xfs_setfilesize_trans_alloc(
- struct xfs_ioend *ioend)
+ struct iomap_ioend *ioend)
{
struct xfs_mount *mp = XFS_I(ioend->io_inode)->i_mount;
struct xfs_trans *tp;
@@ -137,7 +51,7 @@ xfs_setfilesize_trans_alloc(
if (error)
return error;
- ioend->io_append_trans = tp;
+ ioend->io_private = tp;
/*
* We may pass freeze protection with a transaction. So tell lockdep
@@ -200,11 +114,11 @@ xfs_setfilesize(
STATIC int
xfs_setfilesize_ioend(
- struct xfs_ioend *ioend,
+ struct iomap_ioend *ioend,
int error)
{
struct xfs_inode *ip = XFS_I(ioend->io_inode);
- struct xfs_trans *tp = ioend->io_append_trans;
+ struct xfs_trans *tp = ioend->io_private;
/*
* The transaction may have been allocated in the I/O submission thread,
@@ -228,9 +142,8 @@ xfs_setfilesize_ioend(
*/
STATIC void
xfs_end_ioend(
- struct xfs_ioend *ioend)
+ struct iomap_ioend *ioend)
{
- struct list_head ioend_list;
struct xfs_inode *ip = XFS_I(ioend->io_inode);
xfs_off_t offset = ioend->io_offset;
size_t size = ioend->io_size;
@@ -257,7 +170,7 @@ xfs_end_ioend(
*/
error = blk_status_to_errno(ioend->io_bio->bi_status);
if (unlikely(error)) {
- if (ioend->io_fork == XFS_COW_FORK)
+ if (ioend->io_flags & IOMAP_F_SHARED)
xfs_reflink_cancel_cow_range(ip, offset, size, true);
goto done;
}
@@ -265,154 +178,86 @@ xfs_end_ioend(
/*
* Success: commit the COW or unwritten blocks if needed.
*/
- if (ioend->io_fork == XFS_COW_FORK)
+ if (ioend->io_flags & IOMAP_F_SHARED)
error = xfs_reflink_end_cow(ip, offset, size);
- else if (ioend->io_state == XFS_EXT_UNWRITTEN)
+ else if (ioend->io_type == IOMAP_UNWRITTEN)
error = xfs_iomap_write_unwritten(ip, offset, size, false);
else
- ASSERT(!xfs_ioend_is_append(ioend) || ioend->io_append_trans);
+ ASSERT(!xfs_ioend_is_append(ioend) || ioend->io_private);
done:
- if (ioend->io_append_trans)
+ if (ioend->io_private)
error = xfs_setfilesize_ioend(ioend, error);
- list_replace_init(&ioend->io_list, &ioend_list);
- xfs_destroy_ioend(ioend, error);
-
- while (!list_empty(&ioend_list)) {
- ioend = list_first_entry(&ioend_list, struct xfs_ioend,
- io_list);
- list_del_init(&ioend->io_list);
- xfs_destroy_ioend(ioend, error);
- }
-
+ iomap_finish_ioends(ioend, error);
memalloc_nofs_restore(nofs_flag);
}
/*
- * We can merge two adjacent ioends if they have the same set of work to do.
- */
-static bool
-xfs_ioend_can_merge(
- struct xfs_ioend *ioend,
- struct xfs_ioend *next)
-{
- if (ioend->io_bio->bi_status != next->io_bio->bi_status)
- return false;
- if ((ioend->io_fork == XFS_COW_FORK) ^ (next->io_fork == XFS_COW_FORK))
- return false;
- if ((ioend->io_state == XFS_EXT_UNWRITTEN) ^
- (next->io_state == XFS_EXT_UNWRITTEN))
- return false;
- if (ioend->io_offset + ioend->io_size != next->io_offset)
- return false;
- return true;
-}
-
-/*
* If the to be merged ioend has a preallocated transaction for file
* size updates we need to ensure the ioend it is merged into also
* has one. If it already has one we can simply cancel the transaction
* as it is guaranteed to be clean.
*/
static void
-xfs_ioend_merge_append_transactions(
- struct xfs_ioend *ioend,
- struct xfs_ioend *next)
+xfs_ioend_merge_private(
+ struct iomap_ioend *ioend,
+ struct iomap_ioend *next)
{
- if (!ioend->io_append_trans) {
- ioend->io_append_trans = next->io_append_trans;
- next->io_append_trans = NULL;
+ if (!ioend->io_private) {
+ ioend->io_private = next->io_private;
+ next->io_private = NULL;
} else {
xfs_setfilesize_ioend(next, -ECANCELED);
}
}
-/* Try to merge adjacent completions. */
-STATIC void
-xfs_ioend_try_merge(
- struct xfs_ioend *ioend,
- struct list_head *more_ioends)
-{
- struct xfs_ioend *next_ioend;
-
- while (!list_empty(more_ioends)) {
- next_ioend = list_first_entry(more_ioends, struct xfs_ioend,
- io_list);
- if (!xfs_ioend_can_merge(ioend, next_ioend))
- break;
- list_move_tail(&next_ioend->io_list, &ioend->io_list);
- ioend->io_size += next_ioend->io_size;
- if (next_ioend->io_append_trans)
- xfs_ioend_merge_append_transactions(ioend, next_ioend);
- }
-}
-
-/* list_sort compare function for ioends */
-static int
-xfs_ioend_compare(
- void *priv,
- struct list_head *a,
- struct list_head *b)
-{
- struct xfs_ioend *ia;
- struct xfs_ioend *ib;
-
- ia = container_of(a, struct xfs_ioend, io_list);
- ib = container_of(b, struct xfs_ioend, io_list);
- if (ia->io_offset < ib->io_offset)
- return -1;
- else if (ia->io_offset > ib->io_offset)
- return 1;
- return 0;
-}
-
/* Finish all pending io completions. */
void
xfs_end_io(
struct work_struct *work)
{
- struct xfs_inode *ip;
- struct xfs_ioend *ioend;
- struct list_head completion_list;
+ struct xfs_inode *ip =
+ container_of(work, struct xfs_inode, i_ioend_work);
+ struct iomap_ioend *ioend;
+ struct list_head tmp;
unsigned long flags;
- ip = container_of(work, struct xfs_inode, i_ioend_work);
-
spin_lock_irqsave(&ip->i_ioend_lock, flags);
- list_replace_init(&ip->i_ioend_list, &completion_list);
+ list_replace_init(&ip->i_ioend_list, &tmp);
spin_unlock_irqrestore(&ip->i_ioend_lock, flags);
- list_sort(NULL, &completion_list, xfs_ioend_compare);
-
- while (!list_empty(&completion_list)) {
- ioend = list_first_entry(&completion_list, struct xfs_ioend,
- io_list);
+ iomap_sort_ioends(&tmp);
+ while ((ioend = list_first_entry_or_null(&tmp, struct iomap_ioend,
+ io_list))) {
list_del_init(&ioend->io_list);
- xfs_ioend_try_merge(ioend, &completion_list);
+ iomap_ioend_try_merge(ioend, &tmp, xfs_ioend_merge_private);
xfs_end_ioend(ioend);
}
}
+static inline bool xfs_ioend_needs_workqueue(struct iomap_ioend *ioend)
+{
+ return ioend->io_private ||
+ ioend->io_type == IOMAP_UNWRITTEN ||
+ (ioend->io_flags & IOMAP_F_SHARED);
+}
+
STATIC void
xfs_end_bio(
struct bio *bio)
{
- struct xfs_ioend *ioend = bio->bi_private;
+ struct iomap_ioend *ioend = bio->bi_private;
struct xfs_inode *ip = XFS_I(ioend->io_inode);
- struct xfs_mount *mp = ip->i_mount;
unsigned long flags;
- if (ioend->io_fork == XFS_COW_FORK ||
- ioend->io_state == XFS_EXT_UNWRITTEN ||
- ioend->io_append_trans != NULL) {
- spin_lock_irqsave(&ip->i_ioend_lock, flags);
- if (list_empty(&ip->i_ioend_list))
- WARN_ON_ONCE(!queue_work(mp->m_unwritten_workqueue,
- &ip->i_ioend_work));
- list_add_tail(&ioend->io_list, &ip->i_ioend_list);
- spin_unlock_irqrestore(&ip->i_ioend_lock, flags);
- } else
- xfs_destroy_ioend(ioend, blk_status_to_errno(bio->bi_status));
+ ASSERT(xfs_ioend_needs_workqueue(ioend));
+
+ spin_lock_irqsave(&ip->i_ioend_lock, flags);
+ if (list_empty(&ip->i_ioend_list))
+ WARN_ON_ONCE(!queue_work(ip->i_mount->m_unwritten_workqueue,
+ &ip->i_ioend_work));
+ list_add_tail(&ioend->io_list, &ip->i_ioend_list);
+ spin_unlock_irqrestore(&ip->i_ioend_lock, flags);
}
/*
@@ -421,19 +266,19 @@ xfs_end_bio(
*/
static bool
xfs_imap_valid(
- struct xfs_writepage_ctx *wpc,
+ struct iomap_writepage_ctx *wpc,
struct xfs_inode *ip,
- xfs_fileoff_t offset_fsb)
+ loff_t offset)
{
- if (offset_fsb < wpc->imap.br_startoff ||
- offset_fsb >= wpc->imap.br_startoff + wpc->imap.br_blockcount)
+ if (offset < wpc->iomap.offset ||
+ offset >= wpc->iomap.offset + wpc->iomap.length)
return false;
/*
* If this is a COW mapping, it is sufficient to check that the mapping
* covers the offset. Be careful to check this first because the caller
* can revalidate a COW mapping without updating the data seqno.
*/
- if (wpc->fork == XFS_COW_FORK)
+ if (wpc->iomap.flags & IOMAP_F_SHARED)
return true;
/*
@@ -443,17 +288,17 @@ xfs_imap_valid(
* checked (and found nothing at this offset) could have added
* overlapping blocks.
*/
- if (wpc->data_seq != READ_ONCE(ip->i_df.if_seq))
+ if (XFS_WPC(wpc)->data_seq != READ_ONCE(ip->i_df.if_seq))
return false;
if (xfs_inode_has_cow_data(ip) &&
- wpc->cow_seq != READ_ONCE(ip->i_cowfp->if_seq))
+ XFS_WPC(wpc)->cow_seq != READ_ONCE(ip->i_cowfp->if_seq))
return false;
return true;
}
/*
* Pass in a dellalloc extent and convert it to real extents, return the real
- * extent that maps offset_fsb in wpc->imap.
+ * extent that maps offset_fsb in wpc->iomap.
*
* The current page is held locked so nothing could have removed the block
* backing offset_fsb, although it could have moved from the COW to the data
@@ -461,32 +306,38 @@ xfs_imap_valid(
*/
static int
xfs_convert_blocks(
- struct xfs_writepage_ctx *wpc,
+ struct iomap_writepage_ctx *wpc,
struct xfs_inode *ip,
- xfs_fileoff_t offset_fsb)
+ int whichfork,
+ loff_t offset)
{
int error;
+ unsigned *seq;
+
+ if (whichfork == XFS_COW_FORK)
+ seq = &XFS_WPC(wpc)->cow_seq;
+ else
+ seq = &XFS_WPC(wpc)->data_seq;
/*
- * Attempt to allocate whatever delalloc extent currently backs
- * offset_fsb and put the result into wpc->imap. Allocate in a loop
- * because it may take several attempts to allocate real blocks for a
- * contiguous delalloc extent if free space is sufficiently fragmented.
+ * Attempt to allocate whatever delalloc extent currently backs offset
+ * and put the result into wpc->iomap. Allocate in a loop because it
+ * may take several attempts to allocate real blocks for a contiguous
+ * delalloc extent if free space is sufficiently fragmented.
*/
do {
- error = xfs_bmapi_convert_delalloc(ip, wpc->fork, offset_fsb,
- &wpc->imap, wpc->fork == XFS_COW_FORK ?
- &wpc->cow_seq : &wpc->data_seq);
+ error = xfs_bmapi_convert_delalloc(ip, whichfork, offset,
+ &wpc->iomap, seq);
if (error)
return error;
- } while (wpc->imap.br_startoff + wpc->imap.br_blockcount <= offset_fsb);
+ } while (wpc->iomap.offset + wpc->iomap.length <= offset);
return 0;
}
-STATIC int
+static int
xfs_map_blocks(
- struct xfs_writepage_ctx *wpc,
+ struct iomap_writepage_ctx *wpc,
struct inode *inode,
loff_t offset)
{
@@ -496,6 +347,7 @@ xfs_map_blocks(
xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
xfs_fileoff_t end_fsb = XFS_B_TO_FSB(mp, offset + count);
xfs_fileoff_t cow_fsb = NULLFILEOFF;
+ int whichfork = XFS_DATA_FORK;
struct xfs_bmbt_irec imap;
struct xfs_iext_cursor icur;
int retries = 0;
@@ -519,7 +371,7 @@ xfs_map_blocks(
* against concurrent updates and provides a memory barrier on the way
* out that ensures that we always see the current value.
*/
- if (xfs_imap_valid(wpc, ip, offset_fsb))
+ if (xfs_imap_valid(wpc, ip, offset))
return 0;
/*
@@ -541,10 +393,10 @@ retry:
xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &icur, &imap))
cow_fsb = imap.br_startoff;
if (cow_fsb != NULLFILEOFF && cow_fsb <= offset_fsb) {
- wpc->cow_seq = READ_ONCE(ip->i_cowfp->if_seq);
+ XFS_WPC(wpc)->cow_seq = READ_ONCE(ip->i_cowfp->if_seq);
xfs_iunlock(ip, XFS_ILOCK_SHARED);
- wpc->fork = XFS_COW_FORK;
+ whichfork = XFS_COW_FORK;
goto allocate_blocks;
}
@@ -552,7 +404,7 @@ retry:
* No COW extent overlap. Revalidate now that we may have updated
* ->cow_seq. If the data mapping is still valid, we're done.
*/
- if (xfs_imap_valid(wpc, ip, offset_fsb)) {
+ if (xfs_imap_valid(wpc, ip, offset)) {
xfs_iunlock(ip, XFS_ILOCK_SHARED);
return 0;
}
@@ -564,11 +416,9 @@ retry:
*/
if (!xfs_iext_lookup_extent(ip, &ip->i_df, offset_fsb, &icur, &imap))
imap.br_startoff = end_fsb; /* fake a hole past EOF */
- wpc->data_seq = READ_ONCE(ip->i_df.if_seq);
+ XFS_WPC(wpc)->data_seq = READ_ONCE(ip->i_df.if_seq);
xfs_iunlock(ip, XFS_ILOCK_SHARED);
- wpc->fork = XFS_DATA_FORK;
-
/* landed in a hole or beyond EOF? */
if (imap.br_startoff > offset_fsb) {
imap.br_blockcount = imap.br_startoff - offset_fsb;
@@ -592,11 +442,11 @@ retry:
isnullstartblock(imap.br_startblock))
goto allocate_blocks;
- wpc->imap = imap;
- trace_xfs_map_blocks_found(ip, offset, count, wpc->fork, &imap);
+ xfs_bmbt_to_iomap(ip, &wpc->iomap, &imap, 0);
+ trace_xfs_map_blocks_found(ip, offset, count, whichfork, &imap);
return 0;
allocate_blocks:
- error = xfs_convert_blocks(wpc, ip, offset_fsb);
+ error = xfs_convert_blocks(wpc, ip, whichfork, offset);
if (error) {
/*
* If we failed to find the extent in the COW fork we might have
@@ -605,7 +455,7 @@ allocate_blocks:
* the former case, but prevent additional retries to avoid
* looping forever for the latter case.
*/
- if (error == -EAGAIN && wpc->fork == XFS_COW_FORK && !retries++)
+ if (error == -EAGAIN && whichfork == XFS_COW_FORK && !retries++)
goto retry;
ASSERT(error != -EAGAIN);
return error;
@@ -616,34 +466,22 @@ allocate_blocks:
* original delalloc one. Trim the return extent to the next COW
* boundary again to force a re-lookup.
*/
- if (wpc->fork != XFS_COW_FORK && cow_fsb != NULLFILEOFF &&
- cow_fsb < wpc->imap.br_startoff + wpc->imap.br_blockcount)
- wpc->imap.br_blockcount = cow_fsb - wpc->imap.br_startoff;
+ if (whichfork != XFS_COW_FORK && cow_fsb != NULLFILEOFF) {
+ loff_t cow_offset = XFS_FSB_TO_B(mp, cow_fsb);
+
+ if (cow_offset < wpc->iomap.offset + wpc->iomap.length)
+ wpc->iomap.length = cow_offset - wpc->iomap.offset;
+ }
- ASSERT(wpc->imap.br_startoff <= offset_fsb);
- ASSERT(wpc->imap.br_startoff + wpc->imap.br_blockcount > offset_fsb);
- trace_xfs_map_blocks_alloc(ip, offset, count, wpc->fork, &imap);
+ ASSERT(wpc->iomap.offset <= offset);
+ ASSERT(wpc->iomap.offset + wpc->iomap.length > offset);
+ trace_xfs_map_blocks_alloc(ip, offset, count, whichfork, &imap);
return 0;
}
-/*
- * Submit the bio for an ioend. We are passed an ioend with a bio attached to
- * it, and we submit that bio. The ioend may be used for multiple bio
- * submissions, so we only want to allocate an append transaction for the ioend
- * once. In the case of multiple bio submission, each bio will take an IO
- * reference to the ioend to ensure that the ioend completion is only done once
- * all bios have been submitted and the ioend is really done.
- *
- * If @status is non-zero, it means that we have a situation where some part of
- * the submission process has failed after we have marked paged for writeback
- * and unlocked them. In this situation, we need to fail the bio and ioend
- * rather than submit it to IO. This typically only happens on a filesystem
- * shutdown.
- */
-STATIC int
-xfs_submit_ioend(
- struct writeback_control *wbc,
- struct xfs_ioend *ioend,
+static int
+xfs_prepare_ioend(
+ struct iomap_ioend *ioend,
int status)
{
unsigned int nofs_flag;
@@ -656,157 +494,24 @@ xfs_submit_ioend(
nofs_flag = memalloc_nofs_save();
/* Convert CoW extents to regular */
- if (!status && ioend->io_fork == XFS_COW_FORK) {
+ if (!status && (ioend->io_flags & IOMAP_F_SHARED)) {
status = xfs_reflink_convert_cow(XFS_I(ioend->io_inode),
ioend->io_offset, ioend->io_size);
}
/* Reserve log space if we might write beyond the on-disk inode size. */
if (!status &&
- (ioend->io_fork == XFS_COW_FORK ||
- ioend->io_state != XFS_EXT_UNWRITTEN) &&
+ ((ioend->io_flags & IOMAP_F_SHARED) ||
+ ioend->io_type != IOMAP_UNWRITTEN) &&
xfs_ioend_is_append(ioend) &&
- !ioend->io_append_trans)
+ !ioend->io_private)
status = xfs_setfilesize_trans_alloc(ioend);
memalloc_nofs_restore(nofs_flag);
- ioend->io_bio->bi_private = ioend;
- ioend->io_bio->bi_end_io = xfs_end_bio;
-
- /*
- * If we are failing the IO now, just mark the ioend with an
- * error and finish it. This will run IO completion immediately
- * as there is only one reference to the ioend at this point in
- * time.
- */
- if (status) {
- ioend->io_bio->bi_status = errno_to_blk_status(status);
- bio_endio(ioend->io_bio);
- return status;
- }
-
- submit_bio(ioend->io_bio);
- return 0;
-}
-
-static struct xfs_ioend *
-xfs_alloc_ioend(
- struct inode *inode,
- int fork,
- xfs_exntst_t state,
- xfs_off_t offset,
- struct block_device *bdev,
- sector_t sector,
- struct writeback_control *wbc)
-{
- struct xfs_ioend *ioend;
- struct bio *bio;
-
- bio = bio_alloc_bioset(GFP_NOFS, BIO_MAX_PAGES, &xfs_ioend_bioset);
- bio_set_dev(bio, bdev);
- bio->bi_iter.bi_sector = sector;
- bio->bi_opf = REQ_OP_WRITE | wbc_to_write_flags(wbc);
- bio->bi_write_hint = inode->i_write_hint;
- wbc_init_bio(wbc, bio);
-
- ioend = container_of(bio, struct xfs_ioend, io_inline_bio);
- INIT_LIST_HEAD(&ioend->io_list);
- ioend->io_fork = fork;
- ioend->io_state = state;
- ioend->io_inode = inode;
- ioend->io_size = 0;
- ioend->io_offset = offset;
- ioend->io_append_trans = NULL;
- ioend->io_bio = bio;
- return ioend;
-}
-
-/*
- * Allocate a new bio, and chain the old bio to the new one.
- *
- * Note that we have to do perform the chaining in this unintuitive order
- * so that the bi_private linkage is set up in the right direction for the
- * traversal in xfs_destroy_ioend().
- */
-static struct bio *
-xfs_chain_bio(
- struct bio *prev)
-{
- struct bio *new;
-
- new = bio_alloc(GFP_NOFS, BIO_MAX_PAGES);
- bio_copy_dev(new, prev);/* also copies over blkcg information */
- new->bi_iter.bi_sector = bio_end_sector(prev);
- new->bi_opf = prev->bi_opf;
- new->bi_write_hint = prev->bi_write_hint;
-
- bio_chain(prev, new);
- bio_get(prev); /* for xfs_destroy_ioend */
- submit_bio(prev);
- return new;
-}
-
-/*
- * Test to see if we have an existing ioend structure that we could append to
- * first, otherwise finish off the current ioend and start another.
- */
-STATIC void
-xfs_add_to_ioend(
- struct inode *inode,
- xfs_off_t offset,
- struct page *page,
- struct iomap_page *iop,
- struct xfs_writepage_ctx *wpc,
- struct writeback_control *wbc,
- struct list_head *iolist)
-{
- struct xfs_inode *ip = XFS_I(inode);
- struct xfs_mount *mp = ip->i_mount;
- struct block_device *bdev = xfs_find_bdev_for_inode(inode);
- unsigned len = i_blocksize(inode);
- unsigned poff = offset & (PAGE_SIZE - 1);
- bool merged, same_page = false;
- sector_t sector;
-
- sector = xfs_fsb_to_db(ip, wpc->imap.br_startblock) +
- ((offset - XFS_FSB_TO_B(mp, wpc->imap.br_startoff)) >> 9);
-
- if (!wpc->ioend ||
- wpc->fork != wpc->ioend->io_fork ||
- wpc->imap.br_state != wpc->ioend->io_state ||
- sector != bio_end_sector(wpc->ioend->io_bio) ||
- offset != wpc->ioend->io_offset + wpc->ioend->io_size) {
- if (wpc->ioend)
- list_add(&wpc->ioend->io_list, iolist);
- wpc->ioend = xfs_alloc_ioend(inode, wpc->fork,
- wpc->imap.br_state, offset, bdev, sector, wbc);
- }
-
- merged = __bio_try_merge_page(wpc->ioend->io_bio, page, len, poff,
- &same_page);
-
- if (iop && !same_page)
- atomic_inc(&iop->write_count);
-
- if (!merged) {
- if (bio_full(wpc->ioend->io_bio, len))
- wpc->ioend->io_bio = xfs_chain_bio(wpc->ioend->io_bio);
- bio_add_page(wpc->ioend->io_bio, page, len, poff);
- }
-
- wpc->ioend->io_size += len;
- wbc_account_cgroup_owner(wbc, page, len);
-}
-
-STATIC void
-xfs_vm_invalidatepage(
- struct page *page,
- unsigned int offset,
- unsigned int length)
-{
- trace_xfs_invalidatepage(page->mapping->host, page, offset, length);
- iomap_invalidatepage(page, offset, length);
+ if (xfs_ioend_needs_workqueue(ioend))
+ ioend->io_bio->bi_end_io = xfs_end_bio;
+ return status;
}
/*
@@ -820,8 +525,8 @@ xfs_vm_invalidatepage(
* transaction as there is no space left for block reservation (typically why we
* see a ENOSPC in writeback).
*/
-STATIC void
-xfs_aops_discard_page(
+static void
+xfs_discard_page(
struct page *page)
{
struct inode *inode = page->mapping->host;
@@ -843,246 +548,14 @@ xfs_aops_discard_page(
if (error && !XFS_FORCED_SHUTDOWN(mp))
xfs_alert(mp, "page discard unable to remove delalloc mapping.");
out_invalidate:
- xfs_vm_invalidatepage(page, 0, PAGE_SIZE);
-}
-
-/*
- * We implement an immediate ioend submission policy here to avoid needing to
- * chain multiple ioends and hence nest mempool allocations which can violate
- * forward progress guarantees we need to provide. The current ioend we are
- * adding blocks to is cached on the writepage context, and if the new block
- * does not append to the cached ioend it will create a new ioend and cache that
- * instead.
- *
- * If a new ioend is created and cached, the old ioend is returned and queued
- * locally for submission once the entire page is processed or an error has been
- * detected. While ioends are submitted immediately after they are completed,
- * batching optimisations are provided by higher level block plugging.
- *
- * At the end of a writeback pass, there will be a cached ioend remaining on the
- * writepage context that the caller will need to submit.
- */
-static int
-xfs_writepage_map(
- struct xfs_writepage_ctx *wpc,
- struct writeback_control *wbc,
- struct inode *inode,
- struct page *page,
- uint64_t end_offset)
-{
- LIST_HEAD(submit_list);
- struct iomap_page *iop = to_iomap_page(page);
- unsigned len = i_blocksize(inode);
- struct xfs_ioend *ioend, *next;
- uint64_t file_offset; /* file offset of page */
- int error = 0, count = 0, i;
-
- ASSERT(iop || i_blocksize(inode) == PAGE_SIZE);
- ASSERT(!iop || atomic_read(&iop->write_count) == 0);
-
- /*
- * Walk through the page to find areas to write back. If we run off the
- * end of the current map or find the current map invalid, grab a new
- * one.
- */
- for (i = 0, file_offset = page_offset(page);
- i < (PAGE_SIZE >> inode->i_blkbits) && file_offset < end_offset;
- i++, file_offset += len) {
- if (iop && !test_bit(i, iop->uptodate))
- continue;
-
- error = xfs_map_blocks(wpc, inode, file_offset);
- if (error)
- break;
- if (wpc->imap.br_startblock == HOLESTARTBLOCK)
- continue;
- xfs_add_to_ioend(inode, file_offset, page, iop, wpc, wbc,
- &submit_list);
- count++;
- }
-
- ASSERT(wpc->ioend || list_empty(&submit_list));
- ASSERT(PageLocked(page));
- ASSERT(!PageWriteback(page));
-
- /*
- * On error, we have to fail the ioend here because we may have set
- * pages under writeback, we have to make sure we run IO completion to
- * mark the error state of the IO appropriately, so we can't cancel the
- * ioend directly here. That means we have to mark this page as under
- * writeback if we included any blocks from it in the ioend chain so
- * that completion treats it correctly.
- *
- * If we didn't include the page in the ioend, the on error we can
- * simply discard and unlock it as there are no other users of the page
- * now. The caller will still need to trigger submission of outstanding
- * ioends on the writepage context so they are treated correctly on
- * error.
- */
- if (unlikely(error)) {
- if (!count) {
- xfs_aops_discard_page(page);
- ClearPageUptodate(page);
- unlock_page(page);
- goto done;
- }
-
- /*
- * If the page was not fully cleaned, we need to ensure that the
- * higher layers come back to it correctly. That means we need
- * to keep the page dirty, and for WB_SYNC_ALL writeback we need
- * to ensure the PAGECACHE_TAG_TOWRITE index mark is not removed
- * so another attempt to write this page in this writeback sweep
- * will be made.
- */
- set_page_writeback_keepwrite(page);
- } else {
- clear_page_dirty_for_io(page);
- set_page_writeback(page);
- }
-
- unlock_page(page);
-
- /*
- * Preserve the original error if there was one, otherwise catch
- * submission errors here and propagate into subsequent ioend
- * submissions.
- */
- list_for_each_entry_safe(ioend, next, &submit_list, io_list) {
- int error2;
-
- list_del_init(&ioend->io_list);
- error2 = xfs_submit_ioend(wbc, ioend, error);
- if (error2 && !error)
- error = error2;
- }
-
- /*
- * We can end up here with no error and nothing to write only if we race
- * with a partial page truncate on a sub-page block sized filesystem.
- */
- if (!count)
- end_page_writeback(page);
-done:
- mapping_set_error(page->mapping, error);
- return error;
+ iomap_invalidatepage(page, 0, PAGE_SIZE);
}
-/*
- * Write out a dirty page.
- *
- * For delalloc space on the page we need to allocate space and flush it.
- * For unwritten space on the page we need to start the conversion to
- * regular allocated space.
- */
-STATIC int
-xfs_do_writepage(
- struct page *page,
- struct writeback_control *wbc,
- void *data)
-{
- struct xfs_writepage_ctx *wpc = data;
- struct inode *inode = page->mapping->host;
- loff_t offset;
- uint64_t end_offset;
- pgoff_t end_index;
-
- trace_xfs_writepage(inode, page, 0, 0);
-
- /*
- * Refuse to write the page out if we are called from reclaim context.
- *
- * This avoids stack overflows when called from deeply used stacks in
- * random callers for direct reclaim or memcg reclaim. We explicitly
- * allow reclaim from kswapd as the stack usage there is relatively low.
- *
- * This should never happen except in the case of a VM regression so
- * warn about it.
- */
- if (WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) ==
- PF_MEMALLOC))
- goto redirty;
-
- /*
- * Given that we do not allow direct reclaim to call us, we should
- * never be called while in a filesystem transaction.
- */
- if (WARN_ON_ONCE(current->flags & PF_MEMALLOC_NOFS))
- goto redirty;
-
- /*
- * Is this page beyond the end of the file?
- *
- * The page index is less than the end_index, adjust the end_offset
- * to the highest offset that this page should represent.
- * -----------------------------------------------------
- * | file mapping | <EOF> |
- * -----------------------------------------------------
- * | Page ... | Page N-2 | Page N-1 | Page N | |
- * ^--------------------------------^----------|--------
- * | desired writeback range | see else |
- * ---------------------------------^------------------|
- */
- offset = i_size_read(inode);
- end_index = offset >> PAGE_SHIFT;
- if (page->index < end_index)
- end_offset = (xfs_off_t)(page->index + 1) << PAGE_SHIFT;
- else {
- /*
- * Check whether the page to write out is beyond or straddles
- * i_size or not.
- * -------------------------------------------------------
- * | file mapping | <EOF> |
- * -------------------------------------------------------
- * | Page ... | Page N-2 | Page N-1 | Page N | Beyond |
- * ^--------------------------------^-----------|---------
- * | | Straddles |
- * ---------------------------------^-----------|--------|
- */
- unsigned offset_into_page = offset & (PAGE_SIZE - 1);
-
- /*
- * Skip the page if it is fully outside i_size, e.g. due to a
- * truncate operation that is in progress. We must redirty the
- * page so that reclaim stops reclaiming it. Otherwise
- * xfs_vm_releasepage() is called on it and gets confused.
- *
- * Note that the end_index is unsigned long, it would overflow
- * if the given offset is greater than 16TB on 32-bit system
- * and if we do check the page is fully outside i_size or not
- * via "if (page->index >= end_index + 1)" as "end_index + 1"
- * will be evaluated to 0. Hence this page will be redirtied
- * and be written out repeatedly which would result in an
- * infinite loop, the user program that perform this operation
- * will hang. Instead, we can verify this situation by checking
- * if the page to write is totally beyond the i_size or if it's
- * offset is just equal to the EOF.
- */
- if (page->index > end_index ||
- (page->index == end_index && offset_into_page == 0))
- goto redirty;
-
- /*
- * The page straddles i_size. It must be zeroed out on each
- * and every writepage invocation because it may be mmapped.
- * "A file is mapped in multiples of the page size. For a file
- * that is not a multiple of the page size, the remaining
- * memory is zeroed when mapped, and writes to that region are
- * not written out to the file."
- */
- zero_user_segment(page, offset_into_page, PAGE_SIZE);
-
- /* Adjust the end_offset to the end of file */
- end_offset = offset;
- }
-
- return xfs_writepage_map(wpc, wbc, inode, page, end_offset);
-
-redirty:
- redirty_page_for_writepage(wbc, page);
- unlock_page(page);
- return 0;
-}
+static const struct iomap_writeback_ops xfs_writeback_ops = {
+ .map_blocks = xfs_map_blocks,
+ .prepare_ioend = xfs_prepare_ioend,
+ .discard_page = xfs_discard_page,
+};
STATIC int
xfs_vm_writepage(
@@ -1090,12 +563,8 @@ xfs_vm_writepage(
struct writeback_control *wbc)
{
struct xfs_writepage_ctx wpc = { };
- int ret;
- ret = xfs_do_writepage(page, wbc, &wpc);
- if (wpc.ioend)
- ret = xfs_submit_ioend(wbc, wpc.ioend, ret);
- return ret;
+ return iomap_writepage(page, wbc, &wpc.ctx, &xfs_writeback_ops);
}
STATIC int
@@ -1104,13 +573,9 @@ xfs_vm_writepages(
struct writeback_control *wbc)
{
struct xfs_writepage_ctx wpc = { };
- int ret;
xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED);
- ret = write_cache_pages(mapping, wbc, xfs_do_writepage, &wpc);
- if (wpc.ioend)
- ret = xfs_submit_ioend(wbc, wpc.ioend, ret);
- return ret;
+ return iomap_writepages(mapping, wbc, &wpc.ctx, &xfs_writeback_ops);
}
STATIC int
@@ -1118,18 +583,11 @@ xfs_dax_writepages(
struct address_space *mapping,
struct writeback_control *wbc)
{
- xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED);
- return dax_writeback_mapping_range(mapping,
- xfs_find_bdev_for_inode(mapping->host), wbc);
-}
+ struct xfs_inode *ip = XFS_I(mapping->host);
-STATIC int
-xfs_vm_releasepage(
- struct page *page,
- gfp_t gfp_mask)
-{
- trace_xfs_releasepage(page->mapping->host, page, 0, 0);
- return iomap_releasepage(page, gfp_mask);
+ xfs_iflags_clear(ip, XFS_ITRUNCATED);
+ return dax_writeback_mapping_range(mapping,
+ xfs_inode_buftarg(ip)->bt_bdev, wbc);
}
STATIC sector_t
@@ -1152,7 +610,7 @@ xfs_vm_bmap(
*/
if (xfs_is_cow_inode(ip) || XFS_IS_REALTIME_INODE(ip))
return 0;
- return iomap_bmap(mapping, block, &xfs_iomap_ops);
+ return iomap_bmap(mapping, block, &xfs_read_iomap_ops);
}
STATIC int
@@ -1160,8 +618,7 @@ xfs_vm_readpage(
struct file *unused,
struct page *page)
{
- trace_xfs_vm_readpage(page->mapping->host, 1);
- return iomap_readpage(page, &xfs_iomap_ops);
+ return iomap_readpage(page, &xfs_read_iomap_ops);
}
STATIC int
@@ -1171,8 +628,7 @@ xfs_vm_readpages(
struct list_head *pages,
unsigned nr_pages)
{
- trace_xfs_vm_readpages(mapping->host, nr_pages);
- return iomap_readpages(mapping, pages, nr_pages, &xfs_iomap_ops);
+ return iomap_readpages(mapping, pages, nr_pages, &xfs_read_iomap_ops);
}
static int
@@ -1181,8 +637,9 @@ xfs_iomap_swapfile_activate(
struct file *swap_file,
sector_t *span)
{
- sis->bdev = xfs_find_bdev_for_inode(file_inode(swap_file));
- return iomap_swapfile_activate(sis, swap_file, span, &xfs_iomap_ops);
+ sis->bdev = xfs_inode_buftarg(XFS_I(file_inode(swap_file)))->bt_bdev;
+ return iomap_swapfile_activate(sis, swap_file, span,
+ &xfs_read_iomap_ops);
}
const struct address_space_operations xfs_address_space_operations = {
@@ -1191,8 +648,8 @@ const struct address_space_operations xfs_address_space_operations = {
.writepage = xfs_vm_writepage,
.writepages = xfs_vm_writepages,
.set_page_dirty = iomap_set_page_dirty,
- .releasepage = xfs_vm_releasepage,
- .invalidatepage = xfs_vm_invalidatepage,
+ .releasepage = iomap_releasepage,
+ .invalidatepage = iomap_invalidatepage,
.bmap = xfs_vm_bmap,
.direct_IO = noop_direct_IO,
.migratepage = iomap_migrate_page,
diff --git a/fs/xfs/xfs_aops.h b/fs/xfs/xfs_aops.h
index 45a1ea240cbb..e0bd68419764 100644
--- a/fs/xfs/xfs_aops.h
+++ b/fs/xfs/xfs_aops.h
@@ -6,29 +6,9 @@
#ifndef __XFS_AOPS_H__
#define __XFS_AOPS_H__
-extern struct bio_set xfs_ioend_bioset;
-
-/*
- * Structure for buffered I/O completions.
- */
-struct xfs_ioend {
- struct list_head io_list; /* next ioend in chain */
- int io_fork; /* inode fork written back */
- xfs_exntst_t io_state; /* extent state */
- struct inode *io_inode; /* file being written to */
- size_t io_size; /* size of the extent */
- xfs_off_t io_offset; /* offset in the file */
- struct xfs_trans *io_append_trans;/* xact. for size update */
- struct bio *io_bio; /* bio being built */
- struct bio io_inline_bio; /* MUST BE LAST! */
-};
-
extern const struct address_space_operations xfs_address_space_operations;
extern const struct address_space_operations xfs_dax_aops;
int xfs_setfilesize(struct xfs_inode *ip, xfs_off_t offset, size_t size);
-extern struct block_device *xfs_find_bdev_for_inode(struct inode *);
-extern struct dax_device *xfs_find_daxdev_for_inode(struct inode *);
-
#endif /* __XFS_AOPS_H__ */
diff --git a/fs/xfs/xfs_attr_inactive.c b/fs/xfs/xfs_attr_inactive.c
index a640a285cc52..5ff49523d8ea 100644
--- a/fs/xfs/xfs_attr_inactive.c
+++ b/fs/xfs/xfs_attr_inactive.c
@@ -22,6 +22,7 @@
#include "xfs_attr_leaf.h"
#include "xfs_quota.h"
#include "xfs_dir2.h"
+#include "xfs_error.h"
/*
* Look at all the extents for this logical region,
@@ -190,37 +191,35 @@ xfs_attr3_leaf_inactive(
*/
STATIC int
xfs_attr3_node_inactive(
- struct xfs_trans **trans,
- struct xfs_inode *dp,
- struct xfs_buf *bp,
- int level)
+ struct xfs_trans **trans,
+ struct xfs_inode *dp,
+ struct xfs_buf *bp,
+ int level)
{
- xfs_da_blkinfo_t *info;
- xfs_da_intnode_t *node;
- xfs_dablk_t child_fsb;
- xfs_daddr_t parent_blkno, child_blkno;
- int error, i;
- struct xfs_buf *child_bp;
- struct xfs_da_node_entry *btree;
+ struct xfs_mount *mp = dp->i_mount;
+ struct xfs_da_blkinfo *info;
+ xfs_dablk_t child_fsb;
+ xfs_daddr_t parent_blkno, child_blkno;
+ struct xfs_buf *child_bp;
struct xfs_da3_icnode_hdr ichdr;
+ int error, i;
/*
* Since this code is recursive (gasp!) we must protect ourselves.
*/
if (level > XFS_DA_NODE_MAXDEPTH) {
xfs_trans_brelse(*trans, bp); /* no locks for later trans */
- return -EIO;
+ xfs_buf_corruption_error(bp);
+ return -EFSCORRUPTED;
}
- node = bp->b_addr;
- dp->d_ops->node_hdr_from_disk(&ichdr, node);
+ xfs_da3_node_hdr_from_disk(dp->i_mount, &ichdr, bp->b_addr);
parent_blkno = bp->b_bn;
if (!ichdr.count) {
xfs_trans_brelse(*trans, bp);
return 0;
}
- btree = dp->d_ops->node_tree_p(node);
- child_fsb = be32_to_cpu(btree[0].before);
+ child_fsb = be32_to_cpu(ichdr.btree[0].before);
xfs_trans_brelse(*trans, bp); /* no locks for later trans */
/*
@@ -235,7 +234,7 @@ xfs_attr3_node_inactive(
* traversal of the tree so we may deal with many blocks
* before we come back to this one.
*/
- error = xfs_da3_node_read(*trans, dp, child_fsb, -1, &child_bp,
+ error = xfs_da3_node_read(*trans, dp, child_fsb, &child_bp,
XFS_ATTR_FORK);
if (error)
return error;
@@ -258,8 +257,9 @@ xfs_attr3_node_inactive(
error = xfs_attr3_leaf_inactive(trans, dp, child_bp);
break;
default:
- error = -EIO;
+ xfs_buf_corruption_error(child_bp);
xfs_trans_brelse(*trans, child_bp);
+ error = -EFSCORRUPTED;
break;
}
if (error)
@@ -268,10 +268,16 @@ xfs_attr3_node_inactive(
/*
* Remove the subsidiary block from the cache and from the log.
*/
- error = xfs_da_get_buf(*trans, dp, 0, child_blkno, &child_bp,
- XFS_ATTR_FORK);
- if (error)
+ child_bp = xfs_trans_get_buf(*trans, mp->m_ddev_targp,
+ child_blkno,
+ XFS_FSB_TO_BB(mp, mp->m_attr_geo->fsbcount), 0);
+ if (!child_bp)
+ return -EIO;
+ error = bp->b_error;
+ if (error) {
+ xfs_trans_brelse(*trans, child_bp);
return error;
+ }
xfs_trans_binval(*trans, child_bp);
/*
@@ -279,13 +285,15 @@ xfs_attr3_node_inactive(
* child block number.
*/
if (i + 1 < ichdr.count) {
- error = xfs_da3_node_read(*trans, dp, 0, parent_blkno,
- &bp, XFS_ATTR_FORK);
+ struct xfs_da3_icnode_hdr phdr;
+
+ error = xfs_da3_node_read_mapped(*trans, dp,
+ parent_blkno, &bp, XFS_ATTR_FORK);
if (error)
return error;
- node = bp->b_addr;
- btree = dp->d_ops->node_tree_p(node);
- child_fsb = be32_to_cpu(btree[i + 1].before);
+ xfs_da3_node_hdr_from_disk(dp->i_mount, &phdr,
+ bp->b_addr);
+ child_fsb = be32_to_cpu(phdr.btree[i + 1].before);
xfs_trans_brelse(*trans, bp);
}
/*
@@ -310,6 +318,7 @@ xfs_attr3_root_inactive(
struct xfs_trans **trans,
struct xfs_inode *dp)
{
+ struct xfs_mount *mp = dp->i_mount;
struct xfs_da_blkinfo *info;
struct xfs_buf *bp;
xfs_daddr_t blkno;
@@ -321,7 +330,7 @@ xfs_attr3_root_inactive(
* the extents in reverse order the extent containing
* block 0 must still be there.
*/
- error = xfs_da3_node_read(*trans, dp, 0, -1, &bp, XFS_ATTR_FORK);
+ error = xfs_da3_node_read(*trans, dp, 0, &bp, XFS_ATTR_FORK);
if (error)
return error;
blkno = bp->b_bn;
@@ -341,7 +350,8 @@ xfs_attr3_root_inactive(
error = xfs_attr3_leaf_inactive(trans, dp, bp);
break;
default:
- error = -EIO;
+ error = -EFSCORRUPTED;
+ xfs_buf_corruption_error(bp);
xfs_trans_brelse(*trans, bp);
break;
}
@@ -351,9 +361,15 @@ xfs_attr3_root_inactive(
/*
* Invalidate the incore copy of the root block.
*/
- error = xfs_da_get_buf(*trans, dp, 0, blkno, &bp, XFS_ATTR_FORK);
- if (error)
+ bp = xfs_trans_get_buf(*trans, mp->m_ddev_targp, blkno,
+ XFS_FSB_TO_BB(mp, mp->m_attr_geo->fsbcount), 0);
+ if (!bp)
+ return -EIO;
+ error = bp->b_error;
+ if (error) {
+ xfs_trans_brelse(*trans, bp);
return error;
+ }
xfs_trans_binval(*trans, bp); /* remove from cache */
/*
* Commit the invalidate and start the next transaction.
diff --git a/fs/xfs/xfs_attr_list.c b/fs/xfs/xfs_attr_list.c
index 00758fdc2fec..d37743bdf274 100644
--- a/fs/xfs/xfs_attr_list.c
+++ b/fs/xfs/xfs_attr_list.c
@@ -49,14 +49,16 @@ xfs_attr_shortform_compare(const void *a, const void *b)
* we can begin returning them to the user.
*/
static int
-xfs_attr_shortform_list(xfs_attr_list_context_t *context)
+xfs_attr_shortform_list(
+ struct xfs_attr_list_context *context)
{
- attrlist_cursor_kern_t *cursor;
- xfs_attr_sf_sort_t *sbuf, *sbp;
- xfs_attr_shortform_t *sf;
- xfs_attr_sf_entry_t *sfe;
- xfs_inode_t *dp;
- int sbsize, nsbuf, count, i;
+ struct attrlist_cursor_kern *cursor;
+ struct xfs_attr_sf_sort *sbuf, *sbp;
+ struct xfs_attr_shortform *sf;
+ struct xfs_attr_sf_entry *sfe;
+ struct xfs_inode *dp;
+ int sbsize, nsbuf, count, i;
+ int error = 0;
ASSERT(context != NULL);
dp = context->dp;
@@ -84,6 +86,10 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context)
(XFS_ISRESET_CURSOR(cursor) &&
(dp->i_afp->if_bytes + sf->hdr.count * 16) < context->bufsize)) {
for (i = 0, sfe = &sf->list[0]; i < sf->hdr.count; i++) {
+ if (XFS_IS_CORRUPT(context->dp->i_mount,
+ !xfs_attr_namecheck(sfe->nameval,
+ sfe->namelen)))
+ return -EFSCORRUPTED;
context->put_listent(context,
sfe->flags,
sfe->nameval,
@@ -161,10 +167,8 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context)
break;
}
}
- if (i == nsbuf) {
- kmem_free(sbuf);
- return 0;
- }
+ if (i == nsbuf)
+ goto out;
/*
* Loop putting entries into the user buffer.
@@ -174,6 +178,12 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context)
cursor->hashval = sbp->hash;
cursor->offset = 0;
}
+ if (XFS_IS_CORRUPT(context->dp->i_mount,
+ !xfs_attr_namecheck(sbp->name,
+ sbp->namelen))) {
+ error = -EFSCORRUPTED;
+ goto out;
+ }
context->put_listent(context,
sbp->flags,
sbp->name,
@@ -183,9 +193,9 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context)
break;
cursor->offset++;
}
-
+out:
kmem_free(sbuf);
- return 0;
+ return error;
}
/*
@@ -213,7 +223,7 @@ xfs_attr_node_list_lookup(
ASSERT(*pbp == NULL);
cursor->blkno = 0;
for (;;) {
- error = xfs_da3_node_read(tp, dp, cursor->blkno, -1, &bp,
+ error = xfs_da3_node_read(tp, dp, cursor->blkno, &bp,
XFS_ATTR_FORK);
if (error)
return error;
@@ -229,7 +239,7 @@ xfs_attr_node_list_lookup(
goto out_corruptbuf;
}
- dp->d_ops->node_hdr_from_disk(&nodehdr, node);
+ xfs_da3_node_hdr_from_disk(mp, &nodehdr, node);
/* Tree taller than we can handle; bail out! */
if (nodehdr.level >= XFS_DA_NODE_MAXDEPTH)
@@ -243,7 +253,7 @@ xfs_attr_node_list_lookup(
else
expected_level--;
- btree = dp->d_ops->node_tree_p(node);
+ btree = nodehdr.btree;
for (i = 0; i < nodehdr.count; btree++, i++) {
if (cursor->hashval <= be32_to_cpu(btree->hashval)) {
cursor->blkno = be32_to_cpu(btree->before);
@@ -258,7 +268,7 @@ xfs_attr_node_list_lookup(
return 0;
/* We can't point back to the root. */
- if (cursor->blkno == 0)
+ if (XFS_IS_CORRUPT(mp, cursor->blkno == 0))
return -EFSCORRUPTED;
}
@@ -269,6 +279,7 @@ xfs_attr_node_list_lookup(
return 0;
out_corruptbuf:
+ xfs_buf_corruption_error(bp);
xfs_trans_brelse(tp, bp);
return -EFSCORRUPTED;
}
@@ -284,7 +295,7 @@ xfs_attr_node_list(
struct xfs_buf *bp;
struct xfs_inode *dp = context->dp;
struct xfs_mount *mp = dp->i_mount;
- int error;
+ int error = 0;
trace_xfs_attr_node_list(context);
@@ -298,8 +309,8 @@ xfs_attr_node_list(
*/
bp = NULL;
if (cursor->blkno > 0) {
- error = xfs_da3_node_read(context->tp, dp, cursor->blkno, -1,
- &bp, XFS_ATTR_FORK);
+ error = xfs_da3_node_read(context->tp, dp, cursor->blkno, &bp,
+ XFS_ATTR_FORK);
if ((error != 0) && (error != -EFSCORRUPTED))
return error;
if (bp) {
@@ -358,24 +369,27 @@ xfs_attr_node_list(
*/
for (;;) {
leaf = bp->b_addr;
- xfs_attr3_leaf_list_int(bp, context);
+ error = xfs_attr3_leaf_list_int(bp, context);
+ if (error)
+ break;
xfs_attr3_leaf_hdr_from_disk(mp->m_attr_geo, &leafhdr, leaf);
if (context->seen_enough || leafhdr.forw == 0)
break;
cursor->blkno = leafhdr.forw;
xfs_trans_brelse(context->tp, bp);
- error = xfs_attr3_leaf_read(context->tp, dp, cursor->blkno, -1, &bp);
+ error = xfs_attr3_leaf_read(context->tp, dp, cursor->blkno,
+ &bp);
if (error)
return error;
}
xfs_trans_brelse(context->tp, bp);
- return 0;
+ return error;
}
/*
* Copy out attribute list entries for attr_list(), for leaf attribute lists.
*/
-void
+int
xfs_attr3_leaf_list_int(
struct xfs_buf *bp,
struct xfs_attr_list_context *context)
@@ -417,7 +431,7 @@ xfs_attr3_leaf_list_int(
}
if (i == ichdr.count) {
trace_xfs_attr_list_notfound(context);
- return;
+ return 0;
}
} else {
entry = &entries[0];
@@ -457,6 +471,9 @@ xfs_attr3_leaf_list_int(
valuelen = be32_to_cpu(name_rmt->valuelen);
}
+ if (XFS_IS_CORRUPT(context->dp->i_mount,
+ !xfs_attr_namecheck(name, namelen)))
+ return -EFSCORRUPTED;
context->put_listent(context, entry->flags,
name, namelen, valuelen);
if (context->seen_enough)
@@ -464,7 +481,7 @@ xfs_attr3_leaf_list_int(
cursor->offset++;
}
trace_xfs_attr_list_leaf_end(context);
- return;
+ return 0;
}
/*
@@ -479,13 +496,13 @@ xfs_attr_leaf_list(xfs_attr_list_context_t *context)
trace_xfs_attr_leaf_list(context);
context->cursor->blkno = 0;
- error = xfs_attr3_leaf_read(context->tp, context->dp, 0, -1, &bp);
+ error = xfs_attr3_leaf_read(context->tp, context->dp, 0, &bp);
if (error)
return error;
- xfs_attr3_leaf_list_int(bp, context);
+ error = xfs_attr3_leaf_list_int(bp, context);
xfs_trans_brelse(context->tp, bp);
- return 0;
+ return error;
}
int
diff --git a/fs/xfs/xfs_bmap_item.c b/fs/xfs/xfs_bmap_item.c
index 83d24e983d4c..ee6f4229cebc 100644
--- a/fs/xfs/xfs_bmap_item.c
+++ b/fs/xfs/xfs_bmap_item.c
@@ -21,7 +21,7 @@
#include "xfs_icache.h"
#include "xfs_bmap_btree.h"
#include "xfs_trans_space.h"
-
+#include "xfs_error.h"
kmem_zone_t *xfs_bui_zone;
kmem_zone_t *xfs_bud_zone;
@@ -35,7 +35,7 @@ void
xfs_bui_item_free(
struct xfs_bui_log_item *buip)
{
- kmem_zone_free(xfs_bui_zone, buip);
+ kmem_cache_free(xfs_bui_zone, buip);
}
/*
@@ -201,7 +201,7 @@ xfs_bud_item_release(
struct xfs_bud_log_item *budp = BUD_ITEM(lip);
xfs_bui_release(budp->bud_buip);
- kmem_zone_free(xfs_bud_zone, budp);
+ kmem_cache_free(xfs_bud_zone, budp);
}
static const struct xfs_item_ops xfs_bud_item_ops = {
@@ -456,7 +456,7 @@ xfs_bui_recover(
if (buip->bui_format.bui_nextents != XFS_BUI_MAX_FAST_EXTENTS) {
set_bit(XFS_BUI_RECOVERED, &buip->bui_flags);
xfs_bui_release(buip);
- return -EIO;
+ return -EFSCORRUPTED;
}
/*
@@ -490,7 +490,7 @@ xfs_bui_recover(
*/
set_bit(XFS_BUI_RECOVERED, &buip->bui_flags);
xfs_bui_release(buip);
- return -EIO;
+ return -EFSCORRUPTED;
}
error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate,
@@ -525,6 +525,7 @@ xfs_bui_recover(
type = bui_type;
break;
default:
+ XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp);
error = -EFSCORRUPTED;
goto err_inode;
}
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index 4f443703065e..2efd78a9719e 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -53,15 +53,16 @@ xfs_fsb_to_db(struct xfs_inode *ip, xfs_fsblock_t fsb)
*/
int
xfs_zero_extent(
- struct xfs_inode *ip,
- xfs_fsblock_t start_fsb,
- xfs_off_t count_fsb)
+ struct xfs_inode *ip,
+ xfs_fsblock_t start_fsb,
+ xfs_off_t count_fsb)
{
- struct xfs_mount *mp = ip->i_mount;
- xfs_daddr_t sector = xfs_fsb_to_db(ip, start_fsb);
- sector_t block = XFS_BB_TO_FSBT(mp, sector);
+ struct xfs_mount *mp = ip->i_mount;
+ struct xfs_buftarg *target = xfs_inode_buftarg(ip);
+ xfs_daddr_t sector = xfs_fsb_to_db(ip, start_fsb);
+ sector_t block = XFS_BB_TO_FSBT(mp, sector);
- return blkdev_issue_zeroout(xfs_find_bdev_for_inode(VFS_I(ip)),
+ return blkdev_issue_zeroout(target->bt_bdev,
block << (mp->m_super->s_blocksize_bits - 9),
count_fsb << (mp->m_super->s_blocksize_bits - 9),
GFP_NOFS, 0);
@@ -164,13 +165,6 @@ xfs_bmap_rtalloc(
xfs_trans_mod_dquot_byino(ap->tp, ap->ip,
ap->wasdel ? XFS_TRANS_DQ_DELRTBCOUNT :
XFS_TRANS_DQ_RTBCOUNT, (long) ralen);
-
- /* Zero the extent if we were asked to do so */
- if (ap->datatype & XFS_ALLOC_USERDATA_ZERO) {
- error = xfs_zero_extent(ap->ip, ap->blkno, ap->length);
- if (error)
- return error;
- }
} else {
ap->length = 0;
}
@@ -179,29 +173,6 @@ xfs_bmap_rtalloc(
#endif /* CONFIG_XFS_RT */
/*
- * Check if the endoff is outside the last extent. If so the caller will grow
- * the allocation to a stripe unit boundary. All offsets are considered outside
- * the end of file for an empty fork, so 1 is returned in *eof in that case.
- */
-int
-xfs_bmap_eof(
- struct xfs_inode *ip,
- xfs_fileoff_t endoff,
- int whichfork,
- int *eof)
-{
- struct xfs_bmbt_irec rec;
- int error;
-
- error = xfs_bmap_last_extent(NULL, ip, whichfork, &rec, eof);
- if (error || *eof)
- return error;
-
- *eof = endoff >= rec.br_startoff + rec.br_blockcount;
- return 0;
-}
-
-/*
* Extent tree block counting routines.
*/
@@ -229,106 +200,6 @@ xfs_bmap_count_leaves(
}
/*
- * Count leaf blocks given a range of extent records originally
- * in btree format.
- */
-STATIC void
-xfs_bmap_disk_count_leaves(
- struct xfs_mount *mp,
- struct xfs_btree_block *block,
- int numrecs,
- xfs_filblks_t *count)
-{
- int b;
- xfs_bmbt_rec_t *frp;
-
- for (b = 1; b <= numrecs; b++) {
- frp = XFS_BMBT_REC_ADDR(mp, block, b);
- *count += xfs_bmbt_disk_get_blockcount(frp);
- }
-}
-
-/*
- * Recursively walks each level of a btree
- * to count total fsblocks in use.
- */
-STATIC int
-xfs_bmap_count_tree(
- struct xfs_mount *mp,
- struct xfs_trans *tp,
- struct xfs_ifork *ifp,
- xfs_fsblock_t blockno,
- int levelin,
- xfs_extnum_t *nextents,
- xfs_filblks_t *count)
-{
- int error;
- struct xfs_buf *bp, *nbp;
- int level = levelin;
- __be64 *pp;
- xfs_fsblock_t bno = blockno;
- xfs_fsblock_t nextbno;
- struct xfs_btree_block *block, *nextblock;
- int numrecs;
-
- error = xfs_btree_read_bufl(mp, tp, bno, &bp, XFS_BMAP_BTREE_REF,
- &xfs_bmbt_buf_ops);
- if (error)
- return error;
- *count += 1;
- block = XFS_BUF_TO_BLOCK(bp);
-
- if (--level) {
- /* Not at node above leaves, count this level of nodes */
- nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
- while (nextbno != NULLFSBLOCK) {
- error = xfs_btree_read_bufl(mp, tp, nextbno, &nbp,
- XFS_BMAP_BTREE_REF,
- &xfs_bmbt_buf_ops);
- if (error)
- return error;
- *count += 1;
- nextblock = XFS_BUF_TO_BLOCK(nbp);
- nextbno = be64_to_cpu(nextblock->bb_u.l.bb_rightsib);
- xfs_trans_brelse(tp, nbp);
- }
-
- /* Dive to the next level */
- pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
- bno = be64_to_cpu(*pp);
- error = xfs_bmap_count_tree(mp, tp, ifp, bno, level, nextents,
- count);
- if (error) {
- xfs_trans_brelse(tp, bp);
- XFS_ERROR_REPORT("xfs_bmap_count_tree(1)",
- XFS_ERRLEVEL_LOW, mp);
- return -EFSCORRUPTED;
- }
- xfs_trans_brelse(tp, bp);
- } else {
- /* count all level 1 nodes and their leaves */
- for (;;) {
- nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
- numrecs = be16_to_cpu(block->bb_numrecs);
- (*nextents) += numrecs;
- xfs_bmap_disk_count_leaves(mp, block, numrecs, count);
- xfs_trans_brelse(tp, bp);
- if (nextbno == NULLFSBLOCK)
- break;
- bno = nextbno;
- error = xfs_btree_read_bufl(mp, tp, bno, &bp,
- XFS_BMAP_BTREE_REF,
- &xfs_bmbt_buf_ops);
- if (error)
- return error;
- *count += 1;
- block = XFS_BUF_TO_BLOCK(bp);
- }
- }
- return 0;
-}
-
-/*
* Count fsblocks of the given fork. Delayed allocation extents are
* not counted towards the totals.
*/
@@ -340,26 +211,19 @@ xfs_bmap_count_blocks(
xfs_extnum_t *nextents,
xfs_filblks_t *count)
{
- struct xfs_mount *mp; /* file system mount structure */
- __be64 *pp; /* pointer to block address */
- struct xfs_btree_block *block; /* current btree block */
- struct xfs_ifork *ifp; /* fork structure */
- xfs_fsblock_t bno; /* block # of "block" */
- int level; /* btree level, for checking */
+ struct xfs_mount *mp = ip->i_mount;
+ struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
+ struct xfs_btree_cur *cur;
+ xfs_extlen_t btblocks = 0;
int error;
- bno = NULLFSBLOCK;
- mp = ip->i_mount;
*nextents = 0;
*count = 0;
- ifp = XFS_IFORK_PTR(ip, whichfork);
+
if (!ifp)
return 0;
switch (XFS_IFORK_FORMAT(ip, whichfork)) {
- case XFS_DINODE_FMT_EXTENTS:
- *nextents = xfs_bmap_count_leaves(ifp, count);
- return 0;
case XFS_DINODE_FMT_BTREE:
if (!(ifp->if_flags & XFS_IFEXTENTS)) {
error = xfs_iread_extents(tp, ip, whichfork);
@@ -367,26 +231,23 @@ xfs_bmap_count_blocks(
return error;
}
+ cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
+ error = xfs_btree_count_blocks(cur, &btblocks);
+ xfs_btree_del_cursor(cur, error);
+ if (error)
+ return error;
+
/*
- * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
+ * xfs_btree_count_blocks includes the root block contained in
+ * the inode fork in @btblocks, so subtract one because we're
+ * only interested in allocated disk blocks.
*/
- block = ifp->if_broot;
- level = be16_to_cpu(block->bb_level);
- ASSERT(level > 0);
- pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
- bno = be64_to_cpu(*pp);
- ASSERT(bno != NULLFSBLOCK);
- ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount);
- ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks);
-
- error = xfs_bmap_count_tree(mp, tp, ifp, bno, level,
- nextents, count);
- if (error) {
- XFS_ERROR_REPORT("xfs_bmap_count_blocks(2)",
- XFS_ERRLEVEL_LOW, mp);
- return -EFSCORRUPTED;
- }
- return 0;
+ *count += btblocks - 1;
+
+ /* fall through */
+ case XFS_DINODE_FMT_EXTENTS:
+ *nextents = xfs_bmap_count_leaves(ifp, count);
+ break;
}
return 0;
@@ -964,8 +825,8 @@ xfs_alloc_file_space(
xfs_trans_ijoin(tp, ip, 0);
error = xfs_bmapi_write(tp, ip, startoffset_fsb,
- allocatesize_fsb, alloc_type, resblks,
- imapp, &nimaps);
+ allocatesize_fsb, alloc_type, 0, imapp,
+ &nimaps);
if (error)
goto error0;
@@ -1039,6 +900,7 @@ out_trans_cancel:
goto out_unlock;
}
+/* Caller must first wait for the completion of any pending DIOs if required. */
int
xfs_flush_unmap_range(
struct xfs_inode *ip,
@@ -1050,9 +912,6 @@ xfs_flush_unmap_range(
xfs_off_t rounding, start, end;
int error;
- /* wait for the completion of any pending DIOs */
- inode_dio_wait(inode);
-
rounding = max_t(xfs_off_t, 1 << mp->m_sb.sb_blocklog, PAGE_SIZE);
start = round_down(offset, rounding);
end = round_up(offset + len, rounding) - 1;
@@ -1084,10 +943,6 @@ xfs_free_file_space(
if (len <= 0) /* if nothing being freed */
return 0;
- error = xfs_flush_unmap_range(ip, offset, len);
- if (error)
- return error;
-
startoffset_fsb = XFS_B_TO_FSB(mp, offset);
endoffset_fsb = XFS_B_TO_FSBT(mp, offset + len);
@@ -1113,7 +968,8 @@ xfs_free_file_space(
return 0;
if (offset + len > XFS_ISIZE(ip))
len = XFS_ISIZE(ip) - offset;
- error = iomap_zero_range(VFS_I(ip), offset, len, NULL, &xfs_iomap_ops);
+ error = iomap_zero_range(VFS_I(ip), offset, len, NULL,
+ &xfs_buffered_write_iomap_ops);
if (error)
return error;
@@ -1131,43 +987,6 @@ xfs_free_file_space(
return error;
}
-/*
- * Preallocate and zero a range of a file. This mechanism has the allocation
- * semantics of fallocate and in addition converts data in the range to zeroes.
- */
-int
-xfs_zero_file_space(
- struct xfs_inode *ip,
- xfs_off_t offset,
- xfs_off_t len)
-{
- struct xfs_mount *mp = ip->i_mount;
- uint blksize;
- int error;
-
- trace_xfs_zero_file_space(ip);
-
- blksize = 1 << mp->m_sb.sb_blocklog;
-
- /*
- * Punch a hole and prealloc the range. We use hole punch rather than
- * unwritten extent conversion for two reasons:
- *
- * 1.) Hole punch handles partial block zeroing for us.
- *
- * 2.) If prealloc returns ENOSPC, the file range is still zero-valued
- * by virtue of the hole punch.
- */
- error = xfs_free_file_space(ip, offset, len);
- if (error || xfs_is_always_cow_inode(ip))
- return error;
-
- return xfs_alloc_file_space(ip, round_down(offset, blksize),
- round_up(offset + len, blksize) -
- round_down(offset, blksize),
- XFS_BMAPI_PREALLOC);
-}
-
static int
xfs_prepare_shift(
struct xfs_inode *ip,
@@ -1750,6 +1569,14 @@ xfs_swap_extents(
goto out_unlock;
}
+ error = xfs_qm_dqattach(ip);
+ if (error)
+ goto out_unlock;
+
+ error = xfs_qm_dqattach(tip);
+ if (error)
+ goto out_unlock;
+
error = xfs_swap_extent_flush(ip);
if (error)
goto out_unlock;
diff --git a/fs/xfs/xfs_bmap_util.h b/fs/xfs/xfs_bmap_util.h
index 7a78229cf1a7..9f993168b55b 100644
--- a/fs/xfs/xfs_bmap_util.h
+++ b/fs/xfs/xfs_bmap_util.h
@@ -30,8 +30,6 @@ xfs_bmap_rtalloc(struct xfs_bmalloca *ap)
}
#endif /* CONFIG_XFS_RT */
-int xfs_bmap_eof(struct xfs_inode *ip, xfs_fileoff_t endoff,
- int whichfork, int *eof);
int xfs_bmap_punch_delalloc_range(struct xfs_inode *ip,
xfs_fileoff_t start_fsb, xfs_fileoff_t length);
@@ -59,8 +57,6 @@ int xfs_alloc_file_space(struct xfs_inode *ip, xfs_off_t offset,
xfs_off_t len, int alloc_type);
int xfs_free_file_space(struct xfs_inode *ip, xfs_off_t offset,
xfs_off_t len);
-int xfs_zero_file_space(struct xfs_inode *ip, xfs_off_t offset,
- xfs_off_t len);
int xfs_collapse_file_space(struct xfs_inode *, xfs_off_t offset,
xfs_off_t len);
int xfs_insert_file_space(struct xfs_inode *, xfs_off_t offset,
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index 0abba171aa89..a0229c368e78 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -238,7 +238,7 @@ _xfs_buf_alloc(
*/
error = xfs_buf_get_maps(bp, nmaps);
if (error) {
- kmem_zone_free(xfs_buf_zone, bp);
+ kmem_cache_free(xfs_buf_zone, bp);
return NULL;
}
@@ -304,7 +304,7 @@ _xfs_buf_free_pages(
* The buffer must not be on any hash - use xfs_buf_rele instead for
* hashed and refcounted buffers
*/
-void
+static void
xfs_buf_free(
xfs_buf_t *bp)
{
@@ -328,7 +328,7 @@ xfs_buf_free(
kmem_free(bp->b_addr);
_xfs_buf_free_pages(bp);
xfs_buf_free_maps(bp);
- kmem_zone_free(xfs_buf_zone, bp);
+ kmem_cache_free(xfs_buf_zone, bp);
}
/*
@@ -461,7 +461,7 @@ _xfs_buf_map_pages(
unsigned nofs_flag;
/*
- * vm_map_ram() will allocate auxillary structures (e.g.
+ * vm_map_ram() will allocate auxiliary structures (e.g.
* pagetables) with GFP_KERNEL, yet we are likely to be under
* GFP_NOFS context here. Hence we need to tell memory reclaim
* that we are in such a context via PF_MEMALLOC_NOFS to prevent
@@ -949,7 +949,7 @@ xfs_buf_get_uncached(
_xfs_buf_free_pages(bp);
fail_free_buf:
xfs_buf_free_maps(bp);
- kmem_zone_free(xfs_buf_zone, bp);
+ kmem_cache_free(xfs_buf_zone, bp);
fail:
return NULL;
}
@@ -1261,8 +1261,7 @@ xfs_buf_ioapply_map(
int map,
int *buf_offset,
int *count,
- int op,
- int op_flags)
+ int op)
{
int page_index;
int total_nr_pages = bp->b_page_count;
@@ -1297,7 +1296,7 @@ next_chunk:
bio->bi_iter.bi_sector = sector;
bio->bi_end_io = xfs_buf_bio_end_io;
bio->bi_private = bp;
- bio_set_op_attrs(bio, op, op_flags);
+ bio->bi_opf = op;
for (; size && nr_pages; nr_pages--, page_index++) {
int rbytes, nbytes = PAGE_SIZE - offset;
@@ -1342,7 +1341,6 @@ _xfs_buf_ioapply(
{
struct blk_plug plug;
int op;
- int op_flags = 0;
int offset;
int size;
int i;
@@ -1384,15 +1382,14 @@ _xfs_buf_ioapply(
dump_stack();
}
}
- } else if (bp->b_flags & XBF_READ_AHEAD) {
- op = REQ_OP_READ;
- op_flags = REQ_RAHEAD;
} else {
op = REQ_OP_READ;
+ if (bp->b_flags & XBF_READ_AHEAD)
+ op |= REQ_RAHEAD;
}
/* we only use the buffer cache for meta-data */
- op_flags |= REQ_META;
+ op |= REQ_META;
/*
* Walk all the vectors issuing IO on them. Set up the initial offset
@@ -1404,7 +1401,7 @@ _xfs_buf_ioapply(
size = BBTOB(bp->b_length);
blk_start_plug(&plug);
for (i = 0; i < bp->b_map_count; i++) {
- xfs_buf_ioapply_map(bp, i, &offset, &size, op, op_flags);
+ xfs_buf_ioapply_map(bp, i, &offset, &size, op);
if (bp->b_error)
break;
if (size <= 0)
@@ -2063,8 +2060,9 @@ xfs_buf_delwri_pushbuf(
int __init
xfs_buf_init(void)
{
- xfs_buf_zone = kmem_zone_init_flags(sizeof(xfs_buf_t), "xfs_buf",
- KM_ZONE_HWALIGN, NULL);
+ xfs_buf_zone = kmem_cache_create("xfs_buf",
+ sizeof(struct xfs_buf), 0,
+ SLAB_HWCACHE_ALIGN, NULL);
if (!xfs_buf_zone)
goto out;
@@ -2077,7 +2075,7 @@ xfs_buf_init(void)
void
xfs_buf_terminate(void)
{
- kmem_zone_destroy(xfs_buf_zone);
+ kmem_cache_destroy(xfs_buf_zone);
}
void xfs_buf_set_ref(struct xfs_buf *bp, int lru_ref)
diff --git a/fs/xfs/xfs_buf.h b/fs/xfs/xfs_buf.h
index f6ce17d8d848..56e081dd1d96 100644
--- a/fs/xfs/xfs_buf.h
+++ b/fs/xfs/xfs_buf.h
@@ -244,7 +244,6 @@ int xfs_buf_read_uncached(struct xfs_buftarg *target, xfs_daddr_t daddr,
void xfs_buf_hold(struct xfs_buf *bp);
/* Releasing Buffers */
-extern void xfs_buf_free(xfs_buf_t *);
extern void xfs_buf_rele(xfs_buf_t *);
/* Locking and Unlocking Buffers */
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
index d74fbd1e9d3e..3458a1264a3f 100644
--- a/fs/xfs/xfs_buf_item.c
+++ b/fs/xfs/xfs_buf_item.c
@@ -763,7 +763,7 @@ xfs_buf_item_init(
error = xfs_buf_item_get_format(bip, bp->b_map_count);
ASSERT(error == 0);
if (error) { /* to stop gcc throwing set-but-unused warnings */
- kmem_zone_free(xfs_buf_item_zone, bip);
+ kmem_cache_free(xfs_buf_item_zone, bip);
return error;
}
@@ -851,7 +851,7 @@ xfs_buf_item_log_segment(
* first_bit and last_bit.
*/
while ((bits_to_set - bits_set) >= NBWORD) {
- *wordp |= 0xffffffff;
+ *wordp = 0xffffffff;
bits_set += NBWORD;
wordp++;
}
@@ -939,7 +939,7 @@ xfs_buf_item_free(
{
xfs_buf_item_free_format(bip);
kmem_free(bip->bli_item.li_lv_shadow);
- kmem_zone_free(xfs_buf_item_zone, bip);
+ kmem_cache_free(xfs_buf_item_zone, bip);
}
/*
diff --git a/fs/xfs/xfs_dir2_readdir.c b/fs/xfs/xfs_dir2_readdir.c
index 283df898dd9f..0d3b640cf1cc 100644
--- a/fs/xfs/xfs_dir2_readdir.c
+++ b/fs/xfs/xfs_dir2_readdir.c
@@ -17,6 +17,7 @@
#include "xfs_trace.h"
#include "xfs_bmap.h"
#include "xfs_trans.h"
+#include "xfs_error.h"
/*
* Directory file type support functions
@@ -47,6 +48,7 @@ xfs_dir2_sf_getdents(
{
int i; /* shortform entry number */
struct xfs_inode *dp = args->dp; /* incore directory inode */
+ struct xfs_mount *mp = dp->i_mount;
xfs_dir2_dataptr_t off; /* current entry's offset */
xfs_dir2_sf_entry_t *sfep; /* shortform directory entry */
xfs_dir2_sf_hdr_t *sfp; /* shortform structure */
@@ -68,15 +70,15 @@ xfs_dir2_sf_getdents(
return 0;
/*
- * Precalculate offsets for . and .. as we will always need them.
- *
- * XXX(hch): the second argument is sometimes 0 and sometimes
- * geo->datablk
+ * Precalculate offsets for "." and ".." as we will always need them.
+ * This relies on the fact that directories always start with the
+ * entries for "." and "..".
*/
dot_offset = xfs_dir2_db_off_to_dataptr(geo, geo->datablk,
- dp->d_ops->data_dot_offset);
+ geo->data_entry_offset);
dotdot_offset = xfs_dir2_db_off_to_dataptr(geo, geo->datablk,
- dp->d_ops->data_dotdot_offset);
+ geo->data_entry_offset +
+ xfs_dir2_data_entsize(mp, sizeof(".") - 1));
/*
* Put . entry unless we're starting past it.
@@ -91,7 +93,7 @@ xfs_dir2_sf_getdents(
* Put .. entry unless we're starting past it.
*/
if (ctx->pos <= dotdot_offset) {
- ino = dp->d_ops->sf_get_parent_ino(sfp);
+ ino = xfs_dir2_sf_get_parent_ino(sfp);
ctx->pos = dotdot_offset & 0x7fffffff;
if (!dir_emit(ctx, "..", 2, ino, DT_DIR))
return 0;
@@ -108,17 +110,21 @@ xfs_dir2_sf_getdents(
xfs_dir2_sf_get_offset(sfep));
if (ctx->pos > off) {
- sfep = dp->d_ops->sf_nextentry(sfp, sfep);
+ sfep = xfs_dir2_sf_nextentry(mp, sfp, sfep);
continue;
}
- ino = dp->d_ops->sf_get_ino(sfp, sfep);
- filetype = dp->d_ops->sf_get_ftype(sfep);
+ ino = xfs_dir2_sf_get_ino(mp, sfp, sfep);
+ filetype = xfs_dir2_sf_get_ftype(mp, sfep);
ctx->pos = off & 0x7fffffff;
+ if (XFS_IS_CORRUPT(dp->i_mount,
+ !xfs_dir2_namecheck(sfep->name,
+ sfep->namelen)))
+ return -EFSCORRUPTED;
if (!dir_emit(ctx, (char *)sfep->name, sfep->namelen, ino,
- xfs_dir3_get_dtype(dp->i_mount, filetype)))
+ xfs_dir3_get_dtype(mp, filetype)))
return 0;
- sfep = dp->d_ops->sf_nextentry(sfp, sfep);
+ sfep = xfs_dir2_sf_nextentry(mp, sfp, sfep);
}
ctx->pos = xfs_dir2_db_off_to_dataptr(geo, geo->datablk + 1, 0) &
@@ -135,17 +141,14 @@ xfs_dir2_block_getdents(
struct dir_context *ctx)
{
struct xfs_inode *dp = args->dp; /* incore directory inode */
- xfs_dir2_data_hdr_t *hdr; /* block header */
struct xfs_buf *bp; /* buffer for block */
- xfs_dir2_data_entry_t *dep; /* block data entry */
- xfs_dir2_data_unused_t *dup; /* block unused entry */
- char *endptr; /* end of the data entries */
int error; /* error return value */
- char *ptr; /* current data entry */
int wantoff; /* starting block offset */
xfs_off_t cook;
struct xfs_da_geometry *geo = args->geo;
int lock_mode;
+ unsigned int offset;
+ unsigned int end;
/*
* If the block number in the offset is out of range, we're done.
@@ -164,56 +167,55 @@ xfs_dir2_block_getdents(
* We'll skip entries before this.
*/
wantoff = xfs_dir2_dataptr_to_off(geo, ctx->pos);
- hdr = bp->b_addr;
xfs_dir3_data_check(dp, bp);
- /*
- * Set up values for the loop.
- */
- ptr = (char *)dp->d_ops->data_entry_p(hdr);
- endptr = xfs_dir3_data_endp(geo, hdr);
/*
* Loop over the data portion of the block.
* Each object is a real entry (dep) or an unused one (dup).
*/
- while (ptr < endptr) {
+ offset = geo->data_entry_offset;
+ end = xfs_dir3_data_end_offset(geo, bp->b_addr);
+ while (offset < end) {
+ struct xfs_dir2_data_unused *dup = bp->b_addr + offset;
+ struct xfs_dir2_data_entry *dep = bp->b_addr + offset;
uint8_t filetype;
- dup = (xfs_dir2_data_unused_t *)ptr;
/*
* Unused, skip it.
*/
if (be16_to_cpu(dup->freetag) == XFS_DIR2_DATA_FREE_TAG) {
- ptr += be16_to_cpu(dup->length);
+ offset += be16_to_cpu(dup->length);
continue;
}
- dep = (xfs_dir2_data_entry_t *)ptr;
-
/*
* Bump pointer for the next iteration.
*/
- ptr += dp->d_ops->data_entsize(dep->namelen);
+ offset += xfs_dir2_data_entsize(dp->i_mount, dep->namelen);
+
/*
* The entry is before the desired starting point, skip it.
*/
- if ((char *)dep - (char *)hdr < wantoff)
+ if (offset < wantoff)
continue;
- cook = xfs_dir2_db_off_to_dataptr(geo, geo->datablk,
- (char *)dep - (char *)hdr);
+ cook = xfs_dir2_db_off_to_dataptr(geo, geo->datablk, offset);
ctx->pos = cook & 0x7fffffff;
- filetype = dp->d_ops->data_get_ftype(dep);
+ filetype = xfs_dir2_data_get_ftype(dp->i_mount, dep);
/*
* If it didn't fit, set the final offset to here & return.
*/
+ if (XFS_IS_CORRUPT(dp->i_mount,
+ !xfs_dir2_namecheck(dep->name,
+ dep->namelen))) {
+ error = -EFSCORRUPTED;
+ goto out_rele;
+ }
if (!dir_emit(ctx, (char *)dep->name, dep->namelen,
be64_to_cpu(dep->inumber),
- xfs_dir3_get_dtype(dp->i_mount, filetype))) {
- xfs_trans_brelse(args->trans, bp);
- return 0;
- }
+ xfs_dir3_get_dtype(dp->i_mount, filetype)))
+ goto out_rele;
}
/*
@@ -222,8 +224,9 @@ xfs_dir2_block_getdents(
*/
ctx->pos = xfs_dir2_db_off_to_dataptr(geo, geo->datablk + 1, 0) &
0x7fffffff;
+out_rele:
xfs_trans_brelse(args->trans, bp);
- return 0;
+ return error;
}
/*
@@ -276,7 +279,7 @@ xfs_dir2_leaf_readbuf(
new_off = xfs_dir2_da_to_byte(geo, map.br_startoff);
if (new_off > *cur_off)
*cur_off = new_off;
- error = xfs_dir3_data_read(args->trans, dp, map.br_startoff, -1, &bp);
+ error = xfs_dir3_data_read(args->trans, dp, map.br_startoff, 0, &bp);
if (error)
goto out;
@@ -311,7 +314,8 @@ xfs_dir2_leaf_readbuf(
break;
}
if (next_ra > *ra_blk) {
- xfs_dir3_data_readahead(dp, next_ra, -2);
+ xfs_dir3_data_readahead(dp, next_ra,
+ XFS_DABUF_MAP_HOLE_OK);
*ra_blk = next_ra;
}
ra_want -= geo->fsbcount;
@@ -343,17 +347,17 @@ xfs_dir2_leaf_getdents(
size_t bufsize)
{
struct xfs_inode *dp = args->dp;
+ struct xfs_mount *mp = dp->i_mount;
struct xfs_buf *bp = NULL; /* data block buffer */
- xfs_dir2_data_hdr_t *hdr; /* data block header */
xfs_dir2_data_entry_t *dep; /* data entry */
xfs_dir2_data_unused_t *dup; /* unused entry */
- char *ptr = NULL; /* pointer to current data */
struct xfs_da_geometry *geo = args->geo;
xfs_dablk_t rablk = 0; /* current readahead block */
xfs_dir2_off_t curoff; /* current overall offset */
int length; /* temporary length value */
int byteoff; /* offset in current block */
int lock_mode;
+ unsigned int offset = 0;
int error = 0; /* error return value */
/*
@@ -380,7 +384,7 @@ xfs_dir2_leaf_getdents(
* If we have no buffer, or we're off the end of the
* current buffer, need to get another one.
*/
- if (!bp || ptr >= (char *)bp->b_addr + geo->blksize) {
+ if (!bp || offset >= geo->blksize) {
if (bp) {
xfs_trans_brelse(args->trans, bp);
bp = NULL;
@@ -393,36 +397,35 @@ xfs_dir2_leaf_getdents(
if (error || !bp)
break;
- hdr = bp->b_addr;
xfs_dir3_data_check(dp, bp);
/*
* Find our position in the block.
*/
- ptr = (char *)dp->d_ops->data_entry_p(hdr);
+ offset = geo->data_entry_offset;
byteoff = xfs_dir2_byte_to_off(geo, curoff);
/*
* Skip past the header.
*/
if (byteoff == 0)
- curoff += dp->d_ops->data_entry_offset;
+ curoff += geo->data_entry_offset;
/*
* Skip past entries until we reach our offset.
*/
else {
- while ((char *)ptr - (char *)hdr < byteoff) {
- dup = (xfs_dir2_data_unused_t *)ptr;
+ while (offset < byteoff) {
+ dup = bp->b_addr + offset;
if (be16_to_cpu(dup->freetag)
== XFS_DIR2_DATA_FREE_TAG) {
length = be16_to_cpu(dup->length);
- ptr += length;
+ offset += length;
continue;
}
- dep = (xfs_dir2_data_entry_t *)ptr;
- length =
- dp->d_ops->data_entsize(dep->namelen);
- ptr += length;
+ dep = bp->b_addr + offset;
+ length = xfs_dir2_data_entsize(mp,
+ dep->namelen);
+ offset += length;
}
/*
* Now set our real offset.
@@ -430,32 +433,38 @@ xfs_dir2_leaf_getdents(
curoff =
xfs_dir2_db_off_to_byte(geo,
xfs_dir2_byte_to_db(geo, curoff),
- (char *)ptr - (char *)hdr);
- if (ptr >= (char *)hdr + geo->blksize) {
+ offset);
+ if (offset >= geo->blksize)
continue;
- }
}
}
+
/*
- * We have a pointer to an entry.
- * Is it a live one?
+ * We have a pointer to an entry. Is it a live one?
*/
- dup = (xfs_dir2_data_unused_t *)ptr;
+ dup = bp->b_addr + offset;
+
/*
* No, it's unused, skip over it.
*/
if (be16_to_cpu(dup->freetag) == XFS_DIR2_DATA_FREE_TAG) {
length = be16_to_cpu(dup->length);
- ptr += length;
+ offset += length;
curoff += length;
continue;
}
- dep = (xfs_dir2_data_entry_t *)ptr;
- length = dp->d_ops->data_entsize(dep->namelen);
- filetype = dp->d_ops->data_get_ftype(dep);
+ dep = bp->b_addr + offset;
+ length = xfs_dir2_data_entsize(mp, dep->namelen);
+ filetype = xfs_dir2_data_get_ftype(mp, dep);
ctx->pos = xfs_dir2_byte_to_dataptr(curoff) & 0x7fffffff;
+ if (XFS_IS_CORRUPT(dp->i_mount,
+ !xfs_dir2_namecheck(dep->name,
+ dep->namelen))) {
+ error = -EFSCORRUPTED;
+ break;
+ }
if (!dir_emit(ctx, (char *)dep->name, dep->namelen,
be64_to_cpu(dep->inumber),
xfs_dir3_get_dtype(dp->i_mount, filetype)))
@@ -464,7 +473,7 @@ xfs_dir2_leaf_getdents(
/*
* Advance to next entry in the block.
*/
- ptr += length;
+ offset += length;
curoff += length;
/* bufsize may have just been a guess; don't go negative */
bufsize = bufsize > length ? bufsize - length : 0;
diff --git a/fs/xfs/xfs_discard.c b/fs/xfs/xfs_discard.c
index 8ec7aab89044..cae613620175 100644
--- a/fs/xfs/xfs_discard.c
+++ b/fs/xfs/xfs_discard.c
@@ -13,6 +13,7 @@
#include "xfs_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_alloc.h"
+#include "xfs_discard.h"
#include "xfs_error.h"
#include "xfs_extent_busy.h"
#include "xfs_trace.h"
@@ -70,7 +71,10 @@ xfs_trim_extents(
error = xfs_alloc_get_rec(cur, &fbno, &flen, &i);
if (error)
goto out_del_cursor;
- XFS_WANT_CORRUPTED_GOTO(mp, i == 1, out_del_cursor);
+ if (XFS_IS_CORRUPT(mp, i != 1)) {
+ error = -EFSCORRUPTED;
+ goto out_del_cursor;
+ }
ASSERT(flen <= be32_to_cpu(XFS_BUF_TO_AGF(agbp)->agf_longest));
/*
diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c
index aeb95e7391c1..2bff21ca9d78 100644
--- a/fs/xfs/xfs_dquot.c
+++ b/fs/xfs/xfs_dquot.c
@@ -48,7 +48,7 @@ static struct lock_class_key xfs_dquot_project_class;
*/
void
xfs_qm_dqdestroy(
- xfs_dquot_t *dqp)
+ struct xfs_dquot *dqp)
{
ASSERT(list_empty(&dqp->q_lru));
@@ -56,7 +56,7 @@ xfs_qm_dqdestroy(
mutex_destroy(&dqp->q_qlock);
XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot);
- kmem_zone_free(xfs_qm_dqzone, dqp);
+ kmem_cache_free(xfs_qm_dqzone, dqp);
}
/*
@@ -113,8 +113,8 @@ xfs_qm_adjust_dqlimits(
*/
void
xfs_qm_adjust_dqtimers(
- xfs_mount_t *mp,
- xfs_disk_dquot_t *d)
+ struct xfs_mount *mp,
+ struct xfs_disk_dquot *d)
{
ASSERT(d->d_id);
@@ -305,8 +305,8 @@ xfs_dquot_disk_alloc(
/* Create the block mapping. */
xfs_trans_ijoin(tp, quotip, XFS_ILOCK_EXCL);
error = xfs_bmapi_write(tp, quotip, dqp->q_fileoffset,
- XFS_DQUOT_CLUSTER_SIZE_FSB, XFS_BMAPI_METADATA,
- XFS_QM_DQALLOC_SPACE_RES(mp), &map, &nmaps);
+ XFS_DQUOT_CLUSTER_SIZE_FSB, XFS_BMAPI_METADATA, 0, &map,
+ &nmaps);
if (error)
return error;
ASSERT(map.br_blockcount == XFS_DQUOT_CLUSTER_SIZE_FSB);
@@ -497,7 +497,7 @@ xfs_dquot_from_disk(
struct xfs_disk_dquot *ddqp = bp->b_addr + dqp->q_bufoffset;
/* copy everything from disk dquot to the incore dquot */
- memcpy(&dqp->q_core, ddqp, sizeof(xfs_disk_dquot_t));
+ memcpy(&dqp->q_core, ddqp, sizeof(struct xfs_disk_dquot));
/*
* Reservation counters are defined as reservation plus current usage
@@ -833,7 +833,7 @@ xfs_qm_id_for_quotatype(
case XFS_DQ_GROUP:
return ip->i_d.di_gid;
case XFS_DQ_PROJ:
- return xfs_get_projid(ip);
+ return ip->i_d.di_projid;
}
ASSERT(0);
return 0;
@@ -989,7 +989,7 @@ xfs_qm_dqput(
*/
void
xfs_qm_dqrele(
- xfs_dquot_t *dqp)
+ struct xfs_dquot *dqp)
{
if (!dqp)
return;
@@ -1018,8 +1018,8 @@ xfs_qm_dqflush_done(
struct xfs_buf *bp,
struct xfs_log_item *lip)
{
- xfs_dq_logitem_t *qip = (struct xfs_dq_logitem *)lip;
- xfs_dquot_t *dqp = qip->qli_dquot;
+ struct xfs_dq_logitem *qip = (struct xfs_dq_logitem *)lip;
+ struct xfs_dquot *dqp = qip->qli_dquot;
struct xfs_ail *ailp = lip->li_ailp;
/*
@@ -1126,11 +1126,11 @@ xfs_qm_dqflush(
xfs_buf_relse(bp);
xfs_dqfunlock(dqp);
xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
- return -EIO;
+ return -EFSCORRUPTED;
}
/* This is the only portion of data that needs to persist */
- memcpy(ddqp, &dqp->q_core, sizeof(xfs_disk_dquot_t));
+ memcpy(ddqp, &dqp->q_core, sizeof(struct xfs_disk_dquot));
/*
* Clear the dirty field and remember the flush lsn for later use.
@@ -1188,8 +1188,8 @@ out_unlock:
*/
void
xfs_dqlock2(
- xfs_dquot_t *d1,
- xfs_dquot_t *d2)
+ struct xfs_dquot *d1,
+ struct xfs_dquot *d2)
{
if (d1 && d2) {
ASSERT(d1 != d2);
@@ -1211,20 +1211,22 @@ xfs_dqlock2(
int __init
xfs_qm_init(void)
{
- xfs_qm_dqzone =
- kmem_zone_init(sizeof(struct xfs_dquot), "xfs_dquot");
+ xfs_qm_dqzone = kmem_cache_create("xfs_dquot",
+ sizeof(struct xfs_dquot),
+ 0, 0, NULL);
if (!xfs_qm_dqzone)
goto out;
- xfs_qm_dqtrxzone =
- kmem_zone_init(sizeof(struct xfs_dquot_acct), "xfs_dqtrx");
+ xfs_qm_dqtrxzone = kmem_cache_create("xfs_dqtrx",
+ sizeof(struct xfs_dquot_acct),
+ 0, 0, NULL);
if (!xfs_qm_dqtrxzone)
goto out_free_dqzone;
return 0;
out_free_dqzone:
- kmem_zone_destroy(xfs_qm_dqzone);
+ kmem_cache_destroy(xfs_qm_dqzone);
out:
return -ENOMEM;
}
@@ -1232,8 +1234,8 @@ out:
void
xfs_qm_exit(void)
{
- kmem_zone_destroy(xfs_qm_dqtrxzone);
- kmem_zone_destroy(xfs_qm_dqzone);
+ kmem_cache_destroy(xfs_qm_dqtrxzone);
+ kmem_cache_destroy(xfs_qm_dqzone);
}
/*
diff --git a/fs/xfs/xfs_dquot.h b/fs/xfs/xfs_dquot.h
index 4fe85709d55d..fe3e46df604b 100644
--- a/fs/xfs/xfs_dquot.h
+++ b/fs/xfs/xfs_dquot.h
@@ -30,33 +30,36 @@ enum {
/*
* The incore dquot structure
*/
-typedef struct xfs_dquot {
- uint dq_flags; /* various flags (XFS_DQ_*) */
- struct list_head q_lru; /* global free list of dquots */
- struct xfs_mount*q_mount; /* filesystem this relates to */
- uint q_nrefs; /* # active refs from inodes */
- xfs_daddr_t q_blkno; /* blkno of dquot buffer */
- int q_bufoffset; /* off of dq in buffer (# dquots) */
- xfs_fileoff_t q_fileoffset; /* offset in quotas file */
-
- xfs_disk_dquot_t q_core; /* actual usage & quotas */
- xfs_dq_logitem_t q_logitem; /* dquot log item */
- xfs_qcnt_t q_res_bcount; /* total regular nblks used+reserved */
- xfs_qcnt_t q_res_icount; /* total inos allocd+reserved */
- xfs_qcnt_t q_res_rtbcount;/* total realtime blks used+reserved */
- xfs_qcnt_t q_prealloc_lo_wmark;/* prealloc throttle wmark */
- xfs_qcnt_t q_prealloc_hi_wmark;/* prealloc disabled wmark */
- int64_t q_low_space[XFS_QLOWSP_MAX];
- struct mutex q_qlock; /* quota lock */
- struct completion q_flush; /* flush completion queue */
- atomic_t q_pincount; /* dquot pin count */
- wait_queue_head_t q_pinwait; /* dquot pinning wait queue */
-} xfs_dquot_t;
+struct xfs_dquot {
+ uint dq_flags;
+ struct list_head q_lru;
+ struct xfs_mount *q_mount;
+ uint q_nrefs;
+ xfs_daddr_t q_blkno;
+ int q_bufoffset;
+ xfs_fileoff_t q_fileoffset;
+
+ struct xfs_disk_dquot q_core;
+ struct xfs_dq_logitem q_logitem;
+ /* total regular nblks used+reserved */
+ xfs_qcnt_t q_res_bcount;
+ /* total inos allocd+reserved */
+ xfs_qcnt_t q_res_icount;
+ /* total realtime blks used+reserved */
+ xfs_qcnt_t q_res_rtbcount;
+ xfs_qcnt_t q_prealloc_lo_wmark;
+ xfs_qcnt_t q_prealloc_hi_wmark;
+ int64_t q_low_space[XFS_QLOWSP_MAX];
+ struct mutex q_qlock;
+ struct completion q_flush;
+ atomic_t q_pincount;
+ struct wait_queue_head q_pinwait;
+};
/*
* Lock hierarchy for q_qlock:
* XFS_QLOCK_NORMAL is the implicit default,
- * XFS_QLOCK_NESTED is the dquot with the higher id in xfs_dqlock2
+ * XFS_QLOCK_NESTED is the dquot with the higher id in xfs_dqlock2
*/
enum {
XFS_QLOCK_NORMAL = 0,
@@ -64,21 +67,21 @@ enum {
};
/*
- * Manage the q_flush completion queue embedded in the dquot. This completion
+ * Manage the q_flush completion queue embedded in the dquot. This completion
* queue synchronizes processes attempting to flush the in-core dquot back to
* disk.
*/
-static inline void xfs_dqflock(xfs_dquot_t *dqp)
+static inline void xfs_dqflock(struct xfs_dquot *dqp)
{
wait_for_completion(&dqp->q_flush);
}
-static inline bool xfs_dqflock_nowait(xfs_dquot_t *dqp)
+static inline bool xfs_dqflock_nowait(struct xfs_dquot *dqp)
{
return try_wait_for_completion(&dqp->q_flush);
}
-static inline void xfs_dqfunlock(xfs_dquot_t *dqp)
+static inline void xfs_dqfunlock(struct xfs_dquot *dqp)
{
complete(&dqp->q_flush);
}
@@ -112,7 +115,7 @@ static inline int xfs_this_quota_on(struct xfs_mount *mp, int type)
}
}
-static inline xfs_dquot_t *xfs_inode_dquot(struct xfs_inode *ip, int type)
+static inline struct xfs_dquot *xfs_inode_dquot(struct xfs_inode *ip, int type)
{
switch (type & XFS_DQ_ALLTYPES) {
case XFS_DQ_USER:
@@ -147,31 +150,30 @@ static inline bool xfs_dquot_lowsp(struct xfs_dquot *dqp)
#define XFS_QM_ISPDQ(dqp) ((dqp)->dq_flags & XFS_DQ_PROJ)
#define XFS_QM_ISGDQ(dqp) ((dqp)->dq_flags & XFS_DQ_GROUP)
-extern void xfs_qm_dqdestroy(xfs_dquot_t *);
-extern int xfs_qm_dqflush(struct xfs_dquot *, struct xfs_buf **);
-extern void xfs_qm_dqunpin_wait(xfs_dquot_t *);
-extern void xfs_qm_adjust_dqtimers(xfs_mount_t *,
- xfs_disk_dquot_t *);
-extern void xfs_qm_adjust_dqlimits(struct xfs_mount *,
- struct xfs_dquot *);
-extern xfs_dqid_t xfs_qm_id_for_quotatype(struct xfs_inode *ip,
- uint type);
-extern int xfs_qm_dqget(struct xfs_mount *mp, xfs_dqid_t id,
+void xfs_qm_dqdestroy(struct xfs_dquot *dqp);
+int xfs_qm_dqflush(struct xfs_dquot *dqp, struct xfs_buf **bpp);
+void xfs_qm_dqunpin_wait(struct xfs_dquot *dqp);
+void xfs_qm_adjust_dqtimers(struct xfs_mount *mp,
+ struct xfs_disk_dquot *d);
+void xfs_qm_adjust_dqlimits(struct xfs_mount *mp,
+ struct xfs_dquot *d);
+xfs_dqid_t xfs_qm_id_for_quotatype(struct xfs_inode *ip, uint type);
+int xfs_qm_dqget(struct xfs_mount *mp, xfs_dqid_t id,
uint type, bool can_alloc,
struct xfs_dquot **dqpp);
-extern int xfs_qm_dqget_inode(struct xfs_inode *ip, uint type,
- bool can_alloc,
- struct xfs_dquot **dqpp);
-extern int xfs_qm_dqget_next(struct xfs_mount *mp, xfs_dqid_t id,
+int xfs_qm_dqget_inode(struct xfs_inode *ip, uint type,
+ bool can_alloc,
+ struct xfs_dquot **dqpp);
+int xfs_qm_dqget_next(struct xfs_mount *mp, xfs_dqid_t id,
uint type, struct xfs_dquot **dqpp);
-extern int xfs_qm_dqget_uncached(struct xfs_mount *mp,
- xfs_dqid_t id, uint type,
- struct xfs_dquot **dqpp);
-extern void xfs_qm_dqput(xfs_dquot_t *);
+int xfs_qm_dqget_uncached(struct xfs_mount *mp,
+ xfs_dqid_t id, uint type,
+ struct xfs_dquot **dqpp);
+void xfs_qm_dqput(struct xfs_dquot *dqp);
-extern void xfs_dqlock2(struct xfs_dquot *, struct xfs_dquot *);
+void xfs_dqlock2(struct xfs_dquot *, struct xfs_dquot *);
-extern void xfs_dquot_set_prealloc_limits(struct xfs_dquot *);
+void xfs_dquot_set_prealloc_limits(struct xfs_dquot *);
static inline struct xfs_dquot *xfs_qm_dqhold(struct xfs_dquot *dqp)
{
diff --git a/fs/xfs/xfs_dquot_item.h b/fs/xfs/xfs_dquot_item.h
index 1aed34ccdabc..3bb19e556ade 100644
--- a/fs/xfs/xfs_dquot_item.h
+++ b/fs/xfs/xfs_dquot_item.h
@@ -11,25 +11,27 @@ struct xfs_trans;
struct xfs_mount;
struct xfs_qoff_logitem;
-typedef struct xfs_dq_logitem {
- struct xfs_log_item qli_item; /* common portion */
- struct xfs_dquot *qli_dquot; /* dquot ptr */
- xfs_lsn_t qli_flush_lsn; /* lsn at last flush */
-} xfs_dq_logitem_t;
+struct xfs_dq_logitem {
+ struct xfs_log_item qli_item; /* common portion */
+ struct xfs_dquot *qli_dquot; /* dquot ptr */
+ xfs_lsn_t qli_flush_lsn; /* lsn at last flush */
+};
-typedef struct xfs_qoff_logitem {
- struct xfs_log_item qql_item; /* common portion */
- struct xfs_qoff_logitem *qql_start_lip; /* qoff-start logitem, if any */
+struct xfs_qoff_logitem {
+ struct xfs_log_item qql_item; /* common portion */
+ struct xfs_qoff_logitem *qql_start_lip; /* qoff-start logitem, if any */
unsigned int qql_flags;
-} xfs_qoff_logitem_t;
+};
-extern void xfs_qm_dquot_logitem_init(struct xfs_dquot *);
-extern xfs_qoff_logitem_t *xfs_qm_qoff_logitem_init(struct xfs_mount *,
- struct xfs_qoff_logitem *, uint);
-extern xfs_qoff_logitem_t *xfs_trans_get_qoff_item(struct xfs_trans *,
- struct xfs_qoff_logitem *, uint);
-extern void xfs_trans_log_quotaoff_item(struct xfs_trans *,
- struct xfs_qoff_logitem *);
+void xfs_qm_dquot_logitem_init(struct xfs_dquot *dqp);
+struct xfs_qoff_logitem *xfs_qm_qoff_logitem_init(struct xfs_mount *mp,
+ struct xfs_qoff_logitem *start,
+ uint flags);
+struct xfs_qoff_logitem *xfs_trans_get_qoff_item(struct xfs_trans *tp,
+ struct xfs_qoff_logitem *startqoff,
+ uint flags);
+void xfs_trans_log_quotaoff_item(struct xfs_trans *tp,
+ struct xfs_qoff_logitem *qlp);
#endif /* __XFS_DQUOT_ITEM_H__ */
diff --git a/fs/xfs/xfs_error.c b/fs/xfs/xfs_error.c
index 849fd4476950..331765afc53e 100644
--- a/fs/xfs/xfs_error.c
+++ b/fs/xfs/xfs_error.c
@@ -257,7 +257,7 @@ xfs_errortag_test(
xfs_warn_ratelimited(mp,
"Injecting error (%s) at file %s, line %d, on filesystem \"%s\"",
- expression, file, line, mp->m_fsname);
+ expression, file, line, mp->m_super->s_id);
return true;
}
@@ -329,19 +329,40 @@ xfs_corruption_error(
const char *tag,
int level,
struct xfs_mount *mp,
- void *buf,
+ const void *buf,
size_t bufsize,
const char *filename,
int linenum,
xfs_failaddr_t failaddr)
{
- if (level <= xfs_error_level)
+ if (buf && level <= xfs_error_level)
xfs_hex_dump(buf, bufsize);
xfs_error_report(tag, level, mp, filename, linenum, failaddr);
xfs_alert(mp, "Corruption detected. Unmount and run xfs_repair");
}
/*
+ * Complain about the kinds of metadata corruption that we can't detect from a
+ * verifier, such as incorrect inter-block relationship data. Does not set
+ * bp->b_error.
+ */
+void
+xfs_buf_corruption_error(
+ struct xfs_buf *bp)
+{
+ struct xfs_mount *mp = bp->b_mount;
+
+ xfs_alert_tag(mp, XFS_PTAG_VERIFIER_ERROR,
+ "Metadata corruption detected at %pS, %s block 0x%llx",
+ __return_address, bp->b_ops->name, bp->b_bn);
+
+ xfs_alert(mp, "Unmount and run xfs_repair");
+
+ if (xfs_error_level >= XFS_ERRLEVEL_HIGH)
+ xfs_stack_trace();
+}
+
+/*
* Warnings specifically for verifier errors. Differentiate CRC vs. invalid
* values, and omit the stack trace unless the error level is tuned high.
*/
@@ -350,7 +371,7 @@ xfs_buf_verifier_error(
struct xfs_buf *bp,
int error,
const char *name,
- void *buf,
+ const void *buf,
size_t bufsz,
xfs_failaddr_t failaddr)
{
@@ -402,7 +423,7 @@ xfs_inode_verifier_error(
struct xfs_inode *ip,
int error,
const char *name,
- void *buf,
+ const void *buf,
size_t bufsz,
xfs_failaddr_t failaddr)
{
diff --git a/fs/xfs/xfs_error.h b/fs/xfs/xfs_error.h
index 602aa7d62b66..31a5d321ba9a 100644
--- a/fs/xfs/xfs_error.h
+++ b/fs/xfs/xfs_error.h
@@ -12,16 +12,17 @@ extern void xfs_error_report(const char *tag, int level, struct xfs_mount *mp,
const char *filename, int linenum,
xfs_failaddr_t failaddr);
extern void xfs_corruption_error(const char *tag, int level,
- struct xfs_mount *mp, void *buf, size_t bufsize,
+ struct xfs_mount *mp, const void *buf, size_t bufsize,
const char *filename, int linenum,
xfs_failaddr_t failaddr);
+void xfs_buf_corruption_error(struct xfs_buf *bp);
extern void xfs_buf_verifier_error(struct xfs_buf *bp, int error,
- const char *name, void *buf, size_t bufsz,
+ const char *name, const void *buf, size_t bufsz,
xfs_failaddr_t failaddr);
extern void xfs_verifier_error(struct xfs_buf *bp, int error,
xfs_failaddr_t failaddr);
extern void xfs_inode_verifier_error(struct xfs_inode *ip, int error,
- const char *name, void *buf, size_t bufsz,
+ const char *name, const void *buf, size_t bufsz,
xfs_failaddr_t failaddr);
#define XFS_ERROR_REPORT(e, lvl, mp) \
@@ -37,32 +38,6 @@ extern void xfs_inode_verifier_error(struct xfs_inode *ip, int error,
/* Dump 128 bytes of any corrupt buffer */
#define XFS_CORRUPTION_DUMP_LEN (128)
-/*
- * Macros to set EFSCORRUPTED & return/branch.
- */
-#define XFS_WANT_CORRUPTED_GOTO(mp, x, l) \
- { \
- int fs_is_ok = (x); \
- ASSERT(fs_is_ok); \
- if (unlikely(!fs_is_ok)) { \
- XFS_ERROR_REPORT("XFS_WANT_CORRUPTED_GOTO", \
- XFS_ERRLEVEL_LOW, mp); \
- error = -EFSCORRUPTED; \
- goto l; \
- } \
- }
-
-#define XFS_WANT_CORRUPTED_RETURN(mp, x) \
- { \
- int fs_is_ok = (x); \
- ASSERT(fs_is_ok); \
- if (unlikely(!fs_is_ok)) { \
- XFS_ERROR_REPORT("XFS_WANT_CORRUPTED_RETURN", \
- XFS_ERRLEVEL_LOW, mp); \
- return -EFSCORRUPTED; \
- } \
- }
-
#ifdef DEBUG
extern int xfs_errortag_init(struct xfs_mount *mp);
extern void xfs_errortag_del(struct xfs_mount *mp);
diff --git a/fs/xfs/xfs_extent_busy.c b/fs/xfs/xfs_extent_busy.c
index 2183d87be4cf..3991e59cfd18 100644
--- a/fs/xfs/xfs_extent_busy.c
+++ b/fs/xfs/xfs_extent_busy.c
@@ -367,7 +367,7 @@ restart:
* If this is a metadata allocation, try to reuse the busy
* extent instead of trimming the allocation.
*/
- if (!xfs_alloc_is_userdata(args->datatype) &&
+ if (!(args->datatype & XFS_ALLOC_USERDATA) &&
!(busyp->flags & XFS_EXTENT_BUSY_DISCARDED)) {
if (!xfs_extent_busy_update_extent(args->mp, args->pag,
busyp, fbno, flen,
diff --git a/fs/xfs/xfs_extfree_item.c b/fs/xfs/xfs_extfree_item.c
index e44efc41a041..6ea847f6e298 100644
--- a/fs/xfs/xfs_extfree_item.c
+++ b/fs/xfs/xfs_extfree_item.c
@@ -21,7 +21,7 @@
#include "xfs_alloc.h"
#include "xfs_bmap.h"
#include "xfs_trace.h"
-
+#include "xfs_error.h"
kmem_zone_t *xfs_efi_zone;
kmem_zone_t *xfs_efd_zone;
@@ -39,7 +39,7 @@ xfs_efi_item_free(
if (efip->efi_format.efi_nextents > XFS_EFI_MAX_FAST_EXTENTS)
kmem_free(efip);
else
- kmem_zone_free(xfs_efi_zone, efip);
+ kmem_cache_free(xfs_efi_zone, efip);
}
/*
@@ -228,6 +228,7 @@ xfs_efi_copy_format(xfs_log_iovec_t *buf, xfs_efi_log_format_t *dst_efi_fmt)
}
return 0;
}
+ XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, NULL);
return -EFSCORRUPTED;
}
@@ -243,7 +244,7 @@ xfs_efd_item_free(struct xfs_efd_log_item *efdp)
if (efdp->efd_format.efd_nextents > XFS_EFD_MAX_FAST_EXTENTS)
kmem_free(efdp);
else
- kmem_zone_free(xfs_efd_zone, efdp);
+ kmem_cache_free(xfs_efd_zone, efdp);
}
/*
@@ -624,7 +625,7 @@ xfs_efi_recover(
*/
set_bit(XFS_EFI_RECOVERED, &efip->efi_flags);
xfs_efi_release(efip);
- return -EIO;
+ return -EFSCORRUPTED;
}
}
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index 1ffb179f35d2..c93250108952 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -188,7 +188,8 @@ xfs_file_dio_aio_read(
file_accessed(iocb->ki_filp);
xfs_ilock(ip, XFS_IOLOCK_SHARED);
- ret = iomap_dio_rw(iocb, to, &xfs_iomap_ops, NULL);
+ ret = iomap_dio_rw(iocb, to, &xfs_read_iomap_ops, NULL,
+ is_sync_kiocb(iocb));
xfs_iunlock(ip, XFS_IOLOCK_SHARED);
return ret;
@@ -215,7 +216,7 @@ xfs_file_dax_read(
xfs_ilock(ip, XFS_IOLOCK_SHARED);
}
- ret = dax_iomap_rw(iocb, to, &xfs_iomap_ops);
+ ret = dax_iomap_rw(iocb, to, &xfs_read_iomap_ops);
xfs_iunlock(ip, XFS_IOLOCK_SHARED);
file_accessed(iocb->ki_filp);
@@ -351,7 +352,7 @@ restart:
trace_xfs_zero_eof(ip, isize, iocb->ki_pos - isize);
error = iomap_zero_range(inode, isize, iocb->ki_pos - isize,
- NULL, &xfs_iomap_ops);
+ NULL, &xfs_buffered_write_iomap_ops);
if (error)
return error;
} else
@@ -486,8 +487,7 @@ xfs_file_dio_aio_write(
int unaligned_io = 0;
int iolock;
size_t count = iov_iter_count(from);
- struct xfs_buftarg *target = XFS_IS_REALTIME_INODE(ip) ?
- mp->m_rtdev_targp : mp->m_ddev_targp;
+ struct xfs_buftarg *target = xfs_inode_buftarg(ip);
/* DIO must be aligned to device logical sector size */
if ((iocb->ki_pos | count) & target->bt_logical_sectormask)
@@ -547,15 +547,13 @@ xfs_file_dio_aio_write(
}
trace_xfs_file_direct_write(ip, count, iocb->ki_pos);
- ret = iomap_dio_rw(iocb, from, &xfs_iomap_ops, &xfs_dio_write_ops);
-
/*
- * If unaligned, this is the only IO in-flight. If it has not yet
- * completed, wait on it before we release the iolock to prevent
- * subsequent overlapping IO.
+ * If unaligned, this is the only IO in-flight. Wait on it before we
+ * release the iolock to prevent subsequent overlapping IO.
*/
- if (ret == -EIOCBQUEUED && unaligned_io)
- inode_dio_wait(inode);
+ ret = iomap_dio_rw(iocb, from, &xfs_direct_write_iomap_ops,
+ &xfs_dio_write_ops,
+ is_sync_kiocb(iocb) || unaligned_io);
out:
xfs_iunlock(ip, iolock);
@@ -594,7 +592,7 @@ xfs_file_dax_write(
count = iov_iter_count(from);
trace_xfs_file_dax_write(ip, count, pos);
- ret = dax_iomap_rw(iocb, from, &xfs_iomap_ops);
+ ret = dax_iomap_rw(iocb, from, &xfs_direct_write_iomap_ops);
if (ret > 0 && iocb->ki_pos > i_size_read(inode)) {
i_size_write(inode, iocb->ki_pos);
error = xfs_setfilesize(ip, pos, ret);
@@ -641,7 +639,8 @@ write_retry:
current->backing_dev_info = inode_to_bdi(inode);
trace_xfs_file_buffered_write(ip, iov_iter_count(from), iocb->ki_pos);
- ret = iomap_file_buffered_write(iocb, from, &xfs_iomap_ops);
+ ret = iomap_file_buffered_write(iocb, from,
+ &xfs_buffered_write_iomap_ops);
if (likely(ret >= 0))
iocb->ki_pos += ret;
@@ -818,6 +817,36 @@ xfs_file_fallocate(
if (error)
goto out_unlock;
+ /*
+ * Must wait for all AIO to complete before we continue as AIO can
+ * change the file size on completion without holding any locks we
+ * currently hold. We must do this first because AIO can update both
+ * the on disk and in memory inode sizes, and the operations that follow
+ * require the in-memory size to be fully up-to-date.
+ */
+ inode_dio_wait(inode);
+
+ /*
+ * Now AIO and DIO has drained we flush and (if necessary) invalidate
+ * the cached range over the first operation we are about to run.
+ *
+ * We care about zero and collapse here because they both run a hole
+ * punch over the range first. Because that can zero data, and the range
+ * of invalidation for the shift operations is much larger, we still do
+ * the required flush for collapse in xfs_prepare_shift().
+ *
+ * Insert has the same range requirements as collapse, and we extend the
+ * file first which can zero data. Hence insert has the same
+ * flush/invalidate requirements as collapse and so they are both
+ * handled at the right time by xfs_prepare_shift().
+ */
+ if (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_ZERO_RANGE |
+ FALLOC_FL_COLLAPSE_RANGE)) {
+ error = xfs_flush_unmap_range(ip, offset, len);
+ if (error)
+ goto out_unlock;
+ }
+
if (mode & FALLOC_FL_PUNCH_HOLE) {
error = xfs_free_file_space(ip, offset, len);
if (error)
@@ -881,16 +910,30 @@ xfs_file_fallocate(
}
if (mode & FALLOC_FL_ZERO_RANGE) {
- error = xfs_zero_file_space(ip, offset, len);
+ /*
+ * Punch a hole and prealloc the range. We use a hole
+ * punch rather than unwritten extent conversion for two
+ * reasons:
+ *
+ * 1.) Hole punch handles partial block zeroing for us.
+ * 2.) If prealloc returns ENOSPC, the file range is
+ * still zero-valued by virtue of the hole punch.
+ */
+ unsigned int blksize = i_blocksize(inode);
+
+ trace_xfs_zero_file_space(ip);
+
+ error = xfs_free_file_space(ip, offset, len);
+ if (error)
+ goto out_unlock;
+
+ len = round_up(offset + len, blksize) -
+ round_down(offset, blksize);
+ offset = round_down(offset, blksize);
} else if (mode & FALLOC_FL_UNSHARE_RANGE) {
error = xfs_reflink_unshare(ip, offset, len);
if (error)
goto out_unlock;
-
- if (!xfs_is_always_cow_inode(ip)) {
- error = xfs_alloc_file_space(ip, offset, len,
- XFS_BMAPI_PREALLOC);
- }
} else {
/*
* If always_cow mode we can't use preallocations and
@@ -900,12 +943,14 @@ xfs_file_fallocate(
error = -EOPNOTSUPP;
goto out_unlock;
}
+ }
+ if (!xfs_is_always_cow_inode(ip)) {
error = xfs_alloc_file_space(ip, offset, len,
XFS_BMAPI_PREALLOC);
+ if (error)
+ goto out_unlock;
}
- if (error)
- goto out_unlock;
}
if (file->f_flags & O_DSYNC)
@@ -1059,7 +1104,7 @@ xfs_dir_open(
*/
mode = xfs_ilock_data_map_shared(ip);
if (ip->i_d.di_nextents > 0)
- error = xfs_dir3_data_readahead(ip, 0, -1);
+ error = xfs_dir3_data_readahead(ip, 0, 0);
xfs_iunlock(ip, mode);
return error;
}
@@ -1156,12 +1201,16 @@ __xfs_filemap_fault(
if (IS_DAX(inode)) {
pfn_t pfn;
- ret = dax_iomap_fault(vmf, pe_size, &pfn, NULL, &xfs_iomap_ops);
+ ret = dax_iomap_fault(vmf, pe_size, &pfn, NULL,
+ (write_fault && !vmf->cow_page) ?
+ &xfs_direct_write_iomap_ops :
+ &xfs_read_iomap_ops);
if (ret & VM_FAULT_NEEDDSYNC)
ret = dax_finish_sync_fault(vmf, pe_size, pfn);
} else {
if (write_fault)
- ret = iomap_page_mkwrite(vmf, &xfs_iomap_ops);
+ ret = iomap_page_mkwrite(vmf,
+ &xfs_buffered_write_iomap_ops);
else
ret = filemap_fault(vmf);
}
@@ -1225,22 +1274,22 @@ static const struct vm_operations_struct xfs_file_vm_ops = {
STATIC int
xfs_file_mmap(
- struct file *filp,
- struct vm_area_struct *vma)
+ struct file *file,
+ struct vm_area_struct *vma)
{
- struct dax_device *dax_dev;
+ struct inode *inode = file_inode(file);
+ struct xfs_buftarg *target = xfs_inode_buftarg(XFS_I(inode));
- dax_dev = xfs_find_daxdev_for_inode(file_inode(filp));
/*
* We don't support synchronous mappings for non-DAX files and
* for DAX files if underneath dax_device is not synchronous.
*/
- if (!daxdev_mapping_supported(vma, dax_dev))
+ if (!daxdev_mapping_supported(vma, target->bt_daxdev))
return -EOPNOTSUPP;
- file_accessed(filp);
+ file_accessed(file);
vma->vm_ops = &xfs_file_vm_ops;
- if (IS_DAX(file_inode(filp)))
+ if (IS_DAX(inode))
vma->vm_flags |= VM_HUGEPAGE;
return 0;
}
diff --git a/fs/xfs/xfs_filestream.c b/fs/xfs/xfs_filestream.c
index 574a7a8b4736..5f12b5d8527a 100644
--- a/fs/xfs/xfs_filestream.c
+++ b/fs/xfs/xfs_filestream.c
@@ -18,6 +18,7 @@
#include "xfs_trace.h"
#include "xfs_ag_resv.h"
#include "xfs_trans.h"
+#include "xfs_filestream.h"
struct xfs_fstrm_item {
struct xfs_mru_cache_elem mru;
@@ -374,7 +375,7 @@ xfs_filestream_new_ag(
startag = (item->ag + 1) % mp->m_sb.sb_agcount;
}
- if (xfs_alloc_is_userdata(ap->datatype))
+ if (ap->datatype & XFS_ALLOC_USERDATA)
flags |= XFS_PICK_USERDATA;
if (ap->tp->t_flags & XFS_TRANS_LOWMODE)
flags |= XFS_PICK_LOWSPACE;
diff --git a/fs/xfs/xfs_fsmap.c b/fs/xfs/xfs_fsmap.c
index d082143feb5a..918456ca29e1 100644
--- a/fs/xfs/xfs_fsmap.c
+++ b/fs/xfs/xfs_fsmap.c
@@ -146,6 +146,7 @@ xfs_fsmap_owner_from_rmap(
dest->fmr_owner = XFS_FMR_OWN_FREE;
break;
default:
+ ASSERT(0);
return -EFSCORRUPTED;
}
return 0;
diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c
index 944add5ff8e0..8dc2e5414276 100644
--- a/fs/xfs/xfs_icache.c
+++ b/fs/xfs/xfs_icache.c
@@ -44,7 +44,7 @@ xfs_inode_alloc(
if (!ip)
return NULL;
if (inode_init_always(mp->m_super, VFS_I(ip))) {
- kmem_zone_free(xfs_inode_zone, ip);
+ kmem_cache_free(xfs_inode_zone, ip);
return NULL;
}
@@ -104,7 +104,7 @@ xfs_inode_free_callback(
ip->i_itemp = NULL;
}
- kmem_zone_free(xfs_inode_zone, ip);
+ kmem_cache_free(xfs_inode_zone, ip);
}
static void
@@ -1419,7 +1419,7 @@ xfs_inode_match_id(
return 0;
if ((eofb->eof_flags & XFS_EOF_FLAGS_PRID) &&
- xfs_get_projid(ip) != eofb->eof_prid)
+ ip->i_d.di_projid != eofb->eof_prid)
return 0;
return 1;
@@ -1443,7 +1443,7 @@ xfs_inode_match_id_union(
return 1;
if ((eofb->eof_flags & XFS_EOF_FLAGS_PRID) &&
- xfs_get_projid(ip) == eofb->eof_prid)
+ ip->i_d.di_projid == eofb->eof_prid)
return 1;
return 0;
diff --git a/fs/xfs/xfs_icreate_item.c b/fs/xfs/xfs_icreate_item.c
index 3ebd1b7f49d8..490fee22b878 100644
--- a/fs/xfs/xfs_icreate_item.c
+++ b/fs/xfs/xfs_icreate_item.c
@@ -55,7 +55,7 @@ STATIC void
xfs_icreate_item_release(
struct xfs_log_item *lip)
{
- kmem_zone_free(xfs_icreate_zone, ICR_ITEM(lip));
+ kmem_cache_free(xfs_icreate_zone, ICR_ITEM(lip));
}
static const struct xfs_item_ops xfs_icreate_item_ops = {
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index 18f4b262e61c..401da197f012 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -55,6 +55,12 @@ xfs_extlen_t
xfs_get_extsz_hint(
struct xfs_inode *ip)
{
+ /*
+ * No point in aligning allocations if we need to COW to actually
+ * write to them.
+ */
+ if (xfs_is_always_cow_inode(ip))
+ return 0;
if ((ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE) && ip->i_d.di_extsize)
return ip->i_d.di_extsize;
if (XFS_IS_REALTIME_INODE(ip))
@@ -809,7 +815,7 @@ xfs_ialloc(
ip->i_d.di_uid = xfs_kuid_to_uid(current_fsuid());
ip->i_d.di_gid = xfs_kgid_to_gid(current_fsgid());
inode->i_rdev = rdev;
- xfs_set_projid(ip, prid);
+ ip->i_d.di_projid = prid;
if (pip && XFS_INHERIT_GID(pip)) {
ip->i_d.di_gid = pip->i_d.di_gid;
@@ -845,8 +851,7 @@ xfs_ialloc(
inode_set_iversion(inode, 1);
ip->i_d.di_flags2 = 0;
ip->i_d.di_cowextsize = 0;
- ip->i_d.di_crtime.t_sec = (int32_t)tv.tv_sec;
- ip->i_d.di_crtime.t_nsec = (int32_t)tv.tv_nsec;
+ ip->i_d.di_crtime = tv;
}
@@ -1418,7 +1423,7 @@ xfs_link(
* the tree quota mechanism could be circumvented.
*/
if (unlikely((tdp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) &&
- (xfs_get_projid(tdp) != xfs_get_projid(sip)))) {
+ tdp->i_d.di_projid != sip->i_d.di_projid)) {
error = -EXDEV;
goto error_return;
}
@@ -2130,8 +2135,10 @@ xfs_iunlink_update_bucket(
* passed in because either we're adding or removing ourselves from the
* head of the list.
*/
- if (old_value == new_agino)
+ if (old_value == new_agino) {
+ xfs_buf_corruption_error(agibp);
return -EFSCORRUPTED;
+ }
agi->agi_unlinked[bucket_index] = cpu_to_be32(new_agino);
offset = offsetof(struct xfs_agi, agi_unlinked) +
@@ -2194,6 +2201,8 @@ xfs_iunlink_update_inode(
/* Make sure the old pointer isn't garbage. */
old_value = be32_to_cpu(dip->di_next_unlinked);
if (!xfs_verify_agino_or_null(mp, agno, old_value)) {
+ xfs_inode_verifier_error(ip, -EFSCORRUPTED, __func__, dip,
+ sizeof(*dip), __this_address);
error = -EFSCORRUPTED;
goto out;
}
@@ -2205,8 +2214,11 @@ xfs_iunlink_update_inode(
*/
*old_next_agino = old_value;
if (old_value == next_agino) {
- if (next_agino != NULLAGINO)
+ if (next_agino != NULLAGINO) {
+ xfs_inode_verifier_error(ip, -EFSCORRUPTED, __func__,
+ dip, sizeof(*dip), __this_address);
error = -EFSCORRUPTED;
+ }
goto out;
}
@@ -2257,8 +2269,10 @@ xfs_iunlink(
*/
next_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]);
if (next_agino == agino ||
- !xfs_verify_agino_or_null(mp, agno, next_agino))
+ !xfs_verify_agino_or_null(mp, agno, next_agino)) {
+ xfs_buf_corruption_error(agibp);
return -EFSCORRUPTED;
+ }
if (next_agino != NULLAGINO) {
struct xfs_perag *pag;
@@ -3196,6 +3210,7 @@ xfs_rename(
struct xfs_trans *tp;
struct xfs_inode *wip = NULL; /* whiteout inode */
struct xfs_inode *inodes[__XFS_SORT_INODES];
+ struct xfs_buf *agibp;
int num_inodes = __XFS_SORT_INODES;
bool new_parent = (src_dp != target_dp);
bool src_is_directory = S_ISDIR(VFS_I(src_ip)->i_mode);
@@ -3270,7 +3285,7 @@ xfs_rename(
* tree quota mechanism would be circumvented.
*/
if (unlikely((target_dp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) &&
- (xfs_get_projid(target_dp) != xfs_get_projid(src_ip)))) {
+ target_dp->i_d.di_projid != src_ip->i_d.di_projid)) {
error = -EXDEV;
goto out_trans_cancel;
}
@@ -3327,7 +3342,6 @@ xfs_rename(
goto out_trans_cancel;
xfs_bumplink(tp, wip);
- xfs_trans_log_inode(tp, wip, XFS_ILOG_CORE);
VFS_I(wip)->i_state &= ~I_LINKABLE;
}
@@ -3361,6 +3375,22 @@ xfs_rename(
* In case there is already an entry with the same
* name at the destination directory, remove it first.
*/
+
+ /*
+ * Check whether the replace operation will need to allocate
+ * blocks. This happens when the shortform directory lacks
+ * space and we have to convert it to a block format directory.
+ * When more blocks are necessary, we must lock the AGI first
+ * to preserve locking order (AGI -> AGF).
+ */
+ if (xfs_dir2_sf_replace_needblock(target_dp, src_ip->i_ino)) {
+ error = xfs_read_agi(mp, tp,
+ XFS_INO_TO_AGNO(mp, target_ip->i_ino),
+ &agibp);
+ if (error)
+ goto out_trans_cancel;
+ }
+
error = xfs_dir_replace(tp, target_dp, target_name,
src_ip->i_ino, spaceres);
if (error)
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index 558173f95a03..492e53992fa9 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -37,9 +37,6 @@ typedef struct xfs_inode {
struct xfs_ifork *i_cowfp; /* copy on write extents */
struct xfs_ifork i_df; /* data fork */
- /* operations vectors */
- const struct xfs_dir_ops *d_ops; /* directory ops vector */
-
/* Transaction and locking information. */
struct xfs_inode_log_item *i_itemp; /* logging information */
mrlock_t i_lock; /* inode lock */
@@ -177,30 +174,11 @@ xfs_iflags_test_and_set(xfs_inode_t *ip, unsigned short flags)
return ret;
}
-/*
- * Project quota id helpers (previously projid was 16bit only
- * and using two 16bit values to hold new 32bit projid was chosen
- * to retain compatibility with "old" filesystems).
- */
-static inline prid_t
-xfs_get_projid(struct xfs_inode *ip)
-{
- return (prid_t)ip->i_d.di_projid_hi << 16 | ip->i_d.di_projid_lo;
-}
-
-static inline void
-xfs_set_projid(struct xfs_inode *ip,
- prid_t projid)
-{
- ip->i_d.di_projid_hi = (uint16_t) (projid >> 16);
- ip->i_d.di_projid_lo = (uint16_t) (projid & 0xffff);
-}
-
static inline prid_t
xfs_get_initial_prid(struct xfs_inode *dp)
{
if (dp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT)
- return xfs_get_projid(dp);
+ return dp->i_d.di_projid;
return XFS_PROJID_DEFAULT;
}
@@ -220,6 +198,13 @@ static inline bool xfs_inode_has_cow_data(struct xfs_inode *ip)
}
/*
+ * Return the buftarg used for data allocations on a given inode.
+ */
+#define xfs_inode_buftarg(ip) \
+ (XFS_IS_REALTIME_INODE(ip) ? \
+ (ip)->i_mount->m_rtdev_targp : (ip)->i_mount->m_ddev_targp)
+
+/*
* In-core inode flags.
*/
#define XFS_IRECLAIM (1 << 0) /* started reclaiming this inode */
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c
index bb8f076805b9..8bd5d0de6321 100644
--- a/fs/xfs/xfs_inode_item.c
+++ b/fs/xfs/xfs_inode_item.c
@@ -17,6 +17,7 @@
#include "xfs_trans_priv.h"
#include "xfs_buf_item.h"
#include "xfs_log.h"
+#include "xfs_error.h"
#include <linux/iversion.h>
@@ -309,8 +310,8 @@ xfs_inode_to_log_dinode(
to->di_format = from->di_format;
to->di_uid = from->di_uid;
to->di_gid = from->di_gid;
- to->di_projid_lo = from->di_projid_lo;
- to->di_projid_hi = from->di_projid_hi;
+ to->di_projid_lo = from->di_projid & 0xffff;
+ to->di_projid_hi = from->di_projid >> 16;
memset(to->di_pad, 0, sizeof(to->di_pad));
memset(to->di_pad3, 0, sizeof(to->di_pad3));
@@ -340,8 +341,8 @@ xfs_inode_to_log_dinode(
if (from->di_version == 3) {
to->di_changecount = inode_peek_iversion(inode);
- to->di_crtime.t_sec = from->di_crtime.t_sec;
- to->di_crtime.t_nsec = from->di_crtime.t_nsec;
+ to->di_crtime.t_sec = from->di_crtime.tv_sec;
+ to->di_crtime.t_nsec = from->di_crtime.tv_nsec;
to->di_flags2 = from->di_flags2;
to->di_cowextsize = from->di_cowextsize;
to->di_ino = ip->i_ino;
@@ -666,7 +667,7 @@ xfs_inode_item_destroy(
xfs_inode_t *ip)
{
kmem_free(ip->i_itemp->ili_item.li_lv_shadow);
- kmem_zone_free(xfs_ili_zone, ip->i_itemp);
+ kmem_cache_free(xfs_ili_zone, ip->i_itemp);
}
@@ -828,8 +829,10 @@ xfs_inode_item_format_convert(
{
struct xfs_inode_log_format_32 *in_f32 = buf->i_addr;
- if (buf->i_len != sizeof(*in_f32))
+ if (buf->i_len != sizeof(*in_f32)) {
+ XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, NULL);
return -EFSCORRUPTED;
+ }
in_f->ilf_type = in_f32->ilf_type;
in_f->ilf_size = in_f32->ilf_size;
diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
index d58f0d6a699e..7b35d62ede9f 100644
--- a/fs/xfs/xfs_ioctl.c
+++ b/fs/xfs/xfs_ioctl.c
@@ -33,6 +33,8 @@
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_health.h"
+#include "xfs_reflink.h"
+#include "xfs_ioctl.h"
#include <linux/mount.h>
#include <linux/namei.h>
@@ -290,82 +292,6 @@ xfs_readlink_by_handle(
return error;
}
-int
-xfs_set_dmattrs(
- xfs_inode_t *ip,
- uint evmask,
- uint16_t state)
-{
- xfs_mount_t *mp = ip->i_mount;
- xfs_trans_t *tp;
- int error;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- if (XFS_FORCED_SHUTDOWN(mp))
- return -EIO;
-
- error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ichange, 0, 0, 0, &tp);
- if (error)
- return error;
-
- xfs_ilock(ip, XFS_ILOCK_EXCL);
- xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
-
- ip->i_d.di_dmevmask = evmask;
- ip->i_d.di_dmstate = state;
-
- xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
- error = xfs_trans_commit(tp);
-
- return error;
-}
-
-STATIC int
-xfs_fssetdm_by_handle(
- struct file *parfilp,
- void __user *arg)
-{
- int error;
- struct fsdmidata fsd;
- xfs_fsop_setdm_handlereq_t dmhreq;
- struct dentry *dentry;
-
- if (!capable(CAP_MKNOD))
- return -EPERM;
- if (copy_from_user(&dmhreq, arg, sizeof(xfs_fsop_setdm_handlereq_t)))
- return -EFAULT;
-
- error = mnt_want_write_file(parfilp);
- if (error)
- return error;
-
- dentry = xfs_handlereq_to_dentry(parfilp, &dmhreq.hreq);
- if (IS_ERR(dentry)) {
- mnt_drop_write_file(parfilp);
- return PTR_ERR(dentry);
- }
-
- if (IS_IMMUTABLE(d_inode(dentry)) || IS_APPEND(d_inode(dentry))) {
- error = -EPERM;
- goto out;
- }
-
- if (copy_from_user(&fsd, dmhreq.data, sizeof(fsd))) {
- error = -EFAULT;
- goto out;
- }
-
- error = xfs_set_dmattrs(XFS_I(d_inode(dentry)), fsd.fsd_dmevmask,
- fsd.fsd_dmstate);
-
- out:
- mnt_drop_write_file(parfilp);
- dput(dentry);
- return error;
-}
-
STATIC int
xfs_attrlist_by_handle(
struct file *parfilp,
@@ -588,13 +514,12 @@ xfs_attrmulti_by_handle(
int
xfs_ioc_space(
struct file *filp,
- unsigned int cmd,
xfs_flock64_t *bf)
{
struct inode *inode = file_inode(filp);
struct xfs_inode *ip = XFS_I(inode);
struct iattr iattr;
- enum xfs_prealloc_flags flags = 0;
+ enum xfs_prealloc_flags flags = XFS_PREALLOC_CLEAR;
uint iolock = XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL;
int error;
@@ -607,6 +532,9 @@ xfs_ioc_space(
if (!S_ISREG(inode->i_mode))
return -EINVAL;
+ if (xfs_is_always_cow_inode(ip))
+ return -EOPNOTSUPP;
+
if (filp->f_flags & O_DSYNC)
flags |= XFS_PREALLOC_SYNC;
if (filp->f_mode & FMODE_NOCMTIME)
@@ -620,6 +548,7 @@ xfs_ioc_space(
error = xfs_break_layouts(inode, &iolock, BREAK_UNMAP);
if (error)
goto out_unlock;
+ inode_dio_wait(inode);
switch (bf->l_whence) {
case 0: /*SEEK_SET*/
@@ -635,73 +564,21 @@ xfs_ioc_space(
goto out_unlock;
}
- /*
- * length of <= 0 for resv/unresv/zero is invalid. length for
- * alloc/free is ignored completely and we have no idea what userspace
- * might have set it to, so set it to zero to allow range
- * checks to pass.
- */
- switch (cmd) {
- case XFS_IOC_ZERO_RANGE:
- case XFS_IOC_RESVSP:
- case XFS_IOC_RESVSP64:
- case XFS_IOC_UNRESVSP:
- case XFS_IOC_UNRESVSP64:
- if (bf->l_len <= 0) {
- error = -EINVAL;
- goto out_unlock;
- }
- break;
- default:
- bf->l_len = 0;
- break;
- }
-
- if (bf->l_start < 0 ||
- bf->l_start > inode->i_sb->s_maxbytes ||
- bf->l_start + bf->l_len < 0 ||
- bf->l_start + bf->l_len >= inode->i_sb->s_maxbytes) {
+ if (bf->l_start < 0 || bf->l_start > inode->i_sb->s_maxbytes) {
error = -EINVAL;
goto out_unlock;
}
- switch (cmd) {
- case XFS_IOC_ZERO_RANGE:
- flags |= XFS_PREALLOC_SET;
- error = xfs_zero_file_space(ip, bf->l_start, bf->l_len);
- break;
- case XFS_IOC_RESVSP:
- case XFS_IOC_RESVSP64:
- flags |= XFS_PREALLOC_SET;
- error = xfs_alloc_file_space(ip, bf->l_start, bf->l_len,
- XFS_BMAPI_PREALLOC);
- break;
- case XFS_IOC_UNRESVSP:
- case XFS_IOC_UNRESVSP64:
- error = xfs_free_file_space(ip, bf->l_start, bf->l_len);
- break;
- case XFS_IOC_ALLOCSP:
- case XFS_IOC_ALLOCSP64:
- case XFS_IOC_FREESP:
- case XFS_IOC_FREESP64:
- flags |= XFS_PREALLOC_CLEAR;
- if (bf->l_start > XFS_ISIZE(ip)) {
- error = xfs_alloc_file_space(ip, XFS_ISIZE(ip),
- bf->l_start - XFS_ISIZE(ip), 0);
- if (error)
- goto out_unlock;
- }
-
- iattr.ia_valid = ATTR_SIZE;
- iattr.ia_size = bf->l_start;
-
- error = xfs_vn_setattr_size(file_dentry(filp), &iattr);
- break;
- default:
- ASSERT(0);
- error = -EINVAL;
+ if (bf->l_start > XFS_ISIZE(ip)) {
+ error = xfs_alloc_file_space(ip, XFS_ISIZE(ip),
+ bf->l_start - XFS_ISIZE(ip), 0);
+ if (error)
+ goto out_unlock;
}
+ iattr.ia_valid = ATTR_SIZE;
+ iattr.ia_size = bf->l_start;
+ error = xfs_vn_setattr_size(file_dentry(filp), &iattr);
if (error)
goto out_unlock;
@@ -1116,7 +993,7 @@ xfs_fill_fsxattr(
fa->fsx_extsize = ip->i_d.di_extsize << ip->i_mount->m_sb.sb_blocklog;
fa->fsx_cowextsize = ip->i_d.di_cowextsize <<
ip->i_mount->m_sb.sb_blocklog;
- fa->fsx_projid = xfs_get_projid(ip);
+ fa->fsx_projid = ip->i_d.di_projid;
if (attr) {
if (ip->i_afp) {
@@ -1311,10 +1188,9 @@ xfs_ioctl_setattr_dax_invalidate(
* have to check the device for dax support or flush pagecache.
*/
if (fa->fsx_xflags & FS_XFLAG_DAX) {
- if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode)))
- return -EINVAL;
- if (!bdev_dax_supported(xfs_find_bdev_for_inode(VFS_I(ip)),
- sb->s_blocksize))
+ struct xfs_buftarg *target = xfs_inode_buftarg(ip);
+
+ if (!bdev_dax_supported(target->bt_bdev, sb->s_blocksize))
return -EINVAL;
}
@@ -1569,7 +1445,7 @@ xfs_ioctl_setattr(
}
if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_PQUOTA_ON(mp) &&
- xfs_get_projid(ip) != fa->fsx_projid) {
+ ip->i_d.di_projid != fa->fsx_projid) {
code = xfs_qm_vop_chown_reserve(tp, ip, udqp, NULL, pdqp,
capable(CAP_FOWNER) ? XFS_QMOPT_FORCE_RES : 0);
if (code) /* out of quota */
@@ -1606,13 +1482,13 @@ xfs_ioctl_setattr(
VFS_I(ip)->i_mode &= ~(S_ISUID|S_ISGID);
/* Change the ownerships and register project quota modifications */
- if (xfs_get_projid(ip) != fa->fsx_projid) {
+ if (ip->i_d.di_projid != fa->fsx_projid) {
if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_PQUOTA_ON(mp)) {
olddquot = xfs_qm_vop_chown(tp, ip,
&ip->i_pdquot, pdqp);
}
ASSERT(ip->i_d.di_version > 1);
- xfs_set_projid(ip, fa->fsx_projid);
+ ip->i_d.di_projid = fa->fsx_projid;
}
/*
@@ -2122,24 +1998,17 @@ xfs_file_ioctl(
return xfs_ioc_setlabel(filp, mp, arg);
case XFS_IOC_ALLOCSP:
case XFS_IOC_FREESP:
- case XFS_IOC_RESVSP:
- case XFS_IOC_UNRESVSP:
case XFS_IOC_ALLOCSP64:
- case XFS_IOC_FREESP64:
- case XFS_IOC_RESVSP64:
- case XFS_IOC_UNRESVSP64:
- case XFS_IOC_ZERO_RANGE: {
+ case XFS_IOC_FREESP64: {
xfs_flock64_t bf;
if (copy_from_user(&bf, arg, sizeof(bf)))
return -EFAULT;
- return xfs_ioc_space(filp, cmd, &bf);
+ return xfs_ioc_space(filp, &bf);
}
case XFS_IOC_DIOINFO: {
- struct dioattr da;
- xfs_buftarg_t *target =
- XFS_IS_REALTIME_INODE(ip) ?
- mp->m_rtdev_targp : mp->m_ddev_targp;
+ struct xfs_buftarg *target = xfs_inode_buftarg(ip);
+ struct dioattr da;
da.d_mem = da.d_miniosz = target->bt_logical_sectorsize;
da.d_maxiosz = INT_MAX & ~(da.d_miniosz - 1);
@@ -2183,22 +2052,6 @@ xfs_file_ioctl(
case XFS_IOC_SETXFLAGS:
return xfs_ioc_setxflags(ip, filp, arg);
- case XFS_IOC_FSSETDM: {
- struct fsdmidata dmi;
-
- if (copy_from_user(&dmi, arg, sizeof(dmi)))
- return -EFAULT;
-
- error = mnt_want_write_file(filp);
- if (error)
- return error;
-
- error = xfs_set_dmattrs(ip, dmi.fsd_dmevmask,
- dmi.fsd_dmstate);
- mnt_drop_write_file(filp);
- return error;
- }
-
case XFS_IOC_GETBMAP:
case XFS_IOC_GETBMAPA:
case XFS_IOC_GETBMAPX:
@@ -2226,8 +2079,6 @@ xfs_file_ioctl(
return -EFAULT;
return xfs_open_by_handle(filp, &hreq);
}
- case XFS_IOC_FSSETDM_BY_HANDLE:
- return xfs_fssetdm_by_handle(filp, arg);
case XFS_IOC_READLINK_BY_HANDLE: {
xfs_fsop_handlereq_t hreq;
diff --git a/fs/xfs/xfs_ioctl.h b/fs/xfs/xfs_ioctl.h
index 654c0bb1bcf8..420bd95dc326 100644
--- a/fs/xfs/xfs_ioctl.h
+++ b/fs/xfs/xfs_ioctl.h
@@ -9,7 +9,6 @@
extern int
xfs_ioc_space(
struct file *filp,
- unsigned int cmd,
xfs_flock64_t *bf);
int
@@ -71,12 +70,6 @@ xfs_file_compat_ioctl(
unsigned int cmd,
unsigned long arg);
-extern int
-xfs_set_dmattrs(
- struct xfs_inode *ip,
- uint evmask,
- uint16_t state);
-
struct xfs_ibulk;
struct xfs_bstat;
struct xfs_inogrp;
diff --git a/fs/xfs/xfs_ioctl32.c b/fs/xfs/xfs_ioctl32.c
index 1e08bf79b478..c4c4f09113d3 100644
--- a/fs/xfs/xfs_ioctl32.c
+++ b/fs/xfs/xfs_ioctl32.c
@@ -500,44 +500,6 @@ xfs_compat_attrmulti_by_handle(
return error;
}
-STATIC int
-xfs_compat_fssetdm_by_handle(
- struct file *parfilp,
- void __user *arg)
-{
- int error;
- struct fsdmidata fsd;
- compat_xfs_fsop_setdm_handlereq_t dmhreq;
- struct dentry *dentry;
-
- if (!capable(CAP_MKNOD))
- return -EPERM;
- if (copy_from_user(&dmhreq, arg,
- sizeof(compat_xfs_fsop_setdm_handlereq_t)))
- return -EFAULT;
-
- dentry = xfs_compat_handlereq_to_dentry(parfilp, &dmhreq.hreq);
- if (IS_ERR(dentry))
- return PTR_ERR(dentry);
-
- if (IS_IMMUTABLE(d_inode(dentry)) || IS_APPEND(d_inode(dentry))) {
- error = -EPERM;
- goto out;
- }
-
- if (copy_from_user(&fsd, compat_ptr(dmhreq.data), sizeof(fsd))) {
- error = -EFAULT;
- goto out;
- }
-
- error = xfs_set_dmattrs(XFS_I(d_inode(dentry)), fsd.fsd_dmevmask,
- fsd.fsd_dmstate);
-
-out:
- dput(dentry);
- return error;
-}
-
long
xfs_file_compat_ioctl(
struct file *filp,
@@ -557,18 +519,13 @@ xfs_file_compat_ioctl(
case XFS_IOC_ALLOCSP_32:
case XFS_IOC_FREESP_32:
case XFS_IOC_ALLOCSP64_32:
- case XFS_IOC_FREESP64_32:
- case XFS_IOC_RESVSP_32:
- case XFS_IOC_UNRESVSP_32:
- case XFS_IOC_RESVSP64_32:
- case XFS_IOC_UNRESVSP64_32:
- case XFS_IOC_ZERO_RANGE_32: {
+ case XFS_IOC_FREESP64_32: {
struct xfs_flock64 bf;
if (xfs_compat_flock64_copyin(&bf, arg))
return -EFAULT;
cmd = _NATIVE_IOC(cmd, struct xfs_flock64);
- return xfs_ioc_space(filp, cmd, &bf);
+ return xfs_ioc_space(filp, &bf);
}
case XFS_IOC_FSGEOMETRY_V1_32:
return xfs_compat_ioc_fsgeometry_v1(mp, arg);
@@ -651,8 +608,6 @@ xfs_file_compat_ioctl(
return xfs_compat_attrlist_by_handle(filp, arg);
case XFS_IOC_ATTRMULTI_BY_HANDLE_32:
return xfs_compat_attrmulti_by_handle(filp, arg);
- case XFS_IOC_FSSETDM_BY_HANDLE_32:
- return xfs_compat_fssetdm_by_handle(filp, arg);
default:
/* try the native version */
return xfs_file_ioctl(filp, cmd, (unsigned long)arg);
diff --git a/fs/xfs/xfs_ioctl32.h b/fs/xfs/xfs_ioctl32.h
index 7985344d3aa6..8c7743cd490e 100644
--- a/fs/xfs/xfs_ioctl32.h
+++ b/fs/xfs/xfs_ioctl32.h
@@ -99,7 +99,7 @@ typedef struct compat_xfs_fsop_handlereq {
_IOWR('X', 108, struct compat_xfs_fsop_handlereq)
/* The bstat field in the swapext struct needs translation */
-typedef struct compat_xfs_swapext {
+struct compat_xfs_swapext {
int64_t sx_version; /* version */
int64_t sx_fdtarget; /* fd of target file */
int64_t sx_fdtmp; /* fd of tmp file */
@@ -107,7 +107,7 @@ typedef struct compat_xfs_swapext {
xfs_off_t sx_length; /* leng from offset */
char sx_pad[16]; /* pad space, unused */
struct compat_xfs_bstat sx_stat; /* stat of target b4 copy */
-} __compat_packed compat_xfs_swapext_t;
+} __compat_packed;
#define XFS_IOC_SWAPEXT_32 _IOWR('X', 109, struct compat_xfs_swapext)
@@ -143,15 +143,6 @@ typedef struct compat_xfs_fsop_attrmulti_handlereq {
#define XFS_IOC_ATTRMULTI_BY_HANDLE_32 \
_IOW('X', 123, struct compat_xfs_fsop_attrmulti_handlereq)
-typedef struct compat_xfs_fsop_setdm_handlereq {
- struct compat_xfs_fsop_handlereq hreq; /* handle information */
- /* ptr to struct fsdmidata */
- compat_uptr_t data; /* DMAPI data */
-} compat_xfs_fsop_setdm_handlereq_t;
-
-#define XFS_IOC_FSSETDM_BY_HANDLE_32 \
- _IOW('X', 121, struct compat_xfs_fsop_setdm_handlereq)
-
#ifdef BROKEN_X86_ALIGNMENT
/* on ia32 l_start is on a 32-bit boundary */
typedef struct compat_xfs_flock64 {
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index f780e223b118..28e2d1f37267 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -29,8 +29,8 @@
#include "xfs_reflink.h"
-#define XFS_WRITEIO_ALIGN(mp,off) (((off) >> mp->m_writeio_log) \
- << mp->m_writeio_log)
+#define XFS_ALLOC_ALIGN(mp, off) \
+ (((off) >> mp->m_allocsize_log) << mp->m_allocsize_log)
static int
xfs_alert_fsblock_zero(
@@ -54,9 +54,10 @@ xfs_bmbt_to_iomap(
struct xfs_inode *ip,
struct iomap *iomap,
struct xfs_bmbt_irec *imap,
- bool shared)
+ u16 flags)
{
struct xfs_mount *mp = ip->i_mount;
+ struct xfs_buftarg *target = xfs_inode_buftarg(ip);
if (unlikely(!xfs_valid_startblock(ip, imap->br_startblock)))
return xfs_alert_fsblock_zero(ip, imap);
@@ -77,14 +78,13 @@ xfs_bmbt_to_iomap(
}
iomap->offset = XFS_FSB_TO_B(mp, imap->br_startoff);
iomap->length = XFS_FSB_TO_B(mp, imap->br_blockcount);
- iomap->bdev = xfs_find_bdev_for_inode(VFS_I(ip));
- iomap->dax_dev = xfs_find_daxdev_for_inode(VFS_I(ip));
+ iomap->bdev = target->bt_bdev;
+ iomap->dax_dev = target->bt_daxdev;
+ iomap->flags = flags;
if (xfs_ipincount(ip) &&
(ip->i_itemp->ili_fsync_fields & ~XFS_ILOG_TIMESTAMP))
iomap->flags |= IOMAP_F_DIRTY;
- if (shared)
- iomap->flags |= IOMAP_F_SHARED;
return 0;
}
@@ -95,18 +95,30 @@ xfs_hole_to_iomap(
xfs_fileoff_t offset_fsb,
xfs_fileoff_t end_fsb)
{
+ struct xfs_buftarg *target = xfs_inode_buftarg(ip);
+
iomap->addr = IOMAP_NULL_ADDR;
iomap->type = IOMAP_HOLE;
iomap->offset = XFS_FSB_TO_B(ip->i_mount, offset_fsb);
iomap->length = XFS_FSB_TO_B(ip->i_mount, end_fsb - offset_fsb);
- iomap->bdev = xfs_find_bdev_for_inode(VFS_I(ip));
- iomap->dax_dev = xfs_find_daxdev_for_inode(VFS_I(ip));
+ iomap->bdev = target->bt_bdev;
+ iomap->dax_dev = target->bt_daxdev;
+}
+
+static inline xfs_fileoff_t
+xfs_iomap_end_fsb(
+ struct xfs_mount *mp,
+ loff_t offset,
+ loff_t count)
+{
+ ASSERT(offset <= mp->m_super->s_maxbytes);
+ return min(XFS_B_TO_FSB(mp, offset + count),
+ XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes));
}
-xfs_extlen_t
+static xfs_extlen_t
xfs_eof_alignment(
- struct xfs_inode *ip,
- xfs_extlen_t extsize)
+ struct xfs_inode *ip)
{
struct xfs_mount *mp = ip->i_mount;
xfs_extlen_t align = 0;
@@ -129,111 +141,80 @@ xfs_eof_alignment(
align = 0;
}
- /*
- * Always round up the allocation request to an extent boundary
- * (when file on a real-time subvolume or has di_extsize hint).
- */
- if (extsize) {
- if (align)
- align = roundup_64(align, extsize);
- else
- align = extsize;
- }
-
return align;
}
-STATIC int
+/*
+ * Check if last_fsb is outside the last extent, and if so grow it to the next
+ * stripe unit boundary.
+ */
+xfs_fileoff_t
xfs_iomap_eof_align_last_fsb(
struct xfs_inode *ip,
- xfs_extlen_t extsize,
- xfs_fileoff_t *last_fsb)
+ xfs_fileoff_t end_fsb)
{
- xfs_extlen_t align = xfs_eof_alignment(ip, extsize);
+ struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
+ xfs_extlen_t extsz = xfs_get_extsz_hint(ip);
+ xfs_extlen_t align = xfs_eof_alignment(ip);
+ struct xfs_bmbt_irec irec;
+ struct xfs_iext_cursor icur;
+
+ ASSERT(ifp->if_flags & XFS_IFEXTENTS);
+
+ /*
+ * Always round up the allocation request to the extent hint boundary.
+ */
+ if (extsz) {
+ if (align)
+ align = roundup_64(align, extsz);
+ else
+ align = extsz;
+ }
if (align) {
- xfs_fileoff_t new_last_fsb = roundup_64(*last_fsb, align);
- int eof, error;
+ xfs_fileoff_t aligned_end_fsb = roundup_64(end_fsb, align);
- error = xfs_bmap_eof(ip, new_last_fsb, XFS_DATA_FORK, &eof);
- if (error)
- return error;
- if (eof)
- *last_fsb = new_last_fsb;
+ xfs_iext_last(ifp, &icur);
+ if (!xfs_iext_get_extent(ifp, &icur, &irec) ||
+ aligned_end_fsb >= irec.br_startoff + irec.br_blockcount)
+ return aligned_end_fsb;
}
- return 0;
+
+ return end_fsb;
}
int
xfs_iomap_write_direct(
- xfs_inode_t *ip,
- xfs_off_t offset,
- size_t count,
- xfs_bmbt_irec_t *imap,
- int nmaps)
+ struct xfs_inode *ip,
+ xfs_fileoff_t offset_fsb,
+ xfs_fileoff_t count_fsb,
+ struct xfs_bmbt_irec *imap)
{
- xfs_mount_t *mp = ip->i_mount;
- xfs_fileoff_t offset_fsb;
- xfs_fileoff_t last_fsb;
- xfs_filblks_t count_fsb, resaligned;
- xfs_extlen_t extsz;
- int nimaps;
- int quota_flag;
- int rt;
- xfs_trans_t *tp;
- uint qblocks, resblks, resrtextents;
- int error;
- int lockmode;
- int bmapi_flags = XFS_BMAPI_PREALLOC;
- uint tflags = 0;
-
- rt = XFS_IS_REALTIME_INODE(ip);
- extsz = xfs_get_extsz_hint(ip);
- lockmode = XFS_ILOCK_SHARED; /* locked by caller */
-
- ASSERT(xfs_isilocked(ip, lockmode));
+ struct xfs_mount *mp = ip->i_mount;
+ struct xfs_trans *tp;
+ xfs_filblks_t resaligned;
+ int nimaps;
+ int quota_flag;
+ uint qblocks, resblks;
+ unsigned int resrtextents = 0;
+ int error;
+ int bmapi_flags = XFS_BMAPI_PREALLOC;
+ uint tflags = 0;
- offset_fsb = XFS_B_TO_FSBT(mp, offset);
- last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count)));
- if ((offset + count) > XFS_ISIZE(ip)) {
- /*
- * Assert that the in-core extent list is present since this can
- * call xfs_iread_extents() and we only have the ilock shared.
- * This should be safe because the lock was held around a bmapi
- * call in the caller and we only need it to access the in-core
- * list.
- */
- ASSERT(XFS_IFORK_PTR(ip, XFS_DATA_FORK)->if_flags &
- XFS_IFEXTENTS);
- error = xfs_iomap_eof_align_last_fsb(ip, extsz, &last_fsb);
- if (error)
- goto out_unlock;
- } else {
- if (nmaps && (imap->br_startblock == HOLESTARTBLOCK))
- last_fsb = min(last_fsb, (xfs_fileoff_t)
- imap->br_blockcount +
- imap->br_startoff);
- }
- count_fsb = last_fsb - offset_fsb;
ASSERT(count_fsb > 0);
- resaligned = xfs_aligned_fsb_count(offset_fsb, count_fsb, extsz);
- if (unlikely(rt)) {
+ resaligned = xfs_aligned_fsb_count(offset_fsb, count_fsb,
+ xfs_get_extsz_hint(ip));
+ if (unlikely(XFS_IS_REALTIME_INODE(ip))) {
resrtextents = qblocks = resaligned;
resrtextents /= mp->m_sb.sb_rextsize;
resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
quota_flag = XFS_QMOPT_RES_RTBLKS;
} else {
- resrtextents = 0;
resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, resaligned);
quota_flag = XFS_QMOPT_RES_REGBLKS;
}
- /*
- * Drop the shared lock acquired by the caller, attach the dquot if
- * necessary and move on to transaction setup.
- */
- xfs_iunlock(ip, lockmode);
error = xfs_qm_dqattach(ip);
if (error)
return error;
@@ -263,8 +244,7 @@ xfs_iomap_write_direct(
if (error)
return error;
- lockmode = XFS_ILOCK_EXCL;
- xfs_ilock(ip, lockmode);
+ xfs_ilock(ip, XFS_ILOCK_EXCL);
error = xfs_trans_reserve_quota_nblks(tp, ip, qblocks, 0, quota_flag);
if (error)
@@ -277,8 +257,8 @@ xfs_iomap_write_direct(
* caller gave to us.
*/
nimaps = 1;
- error = xfs_bmapi_write(tp, ip, offset_fsb, count_fsb,
- bmapi_flags, resblks, imap, &nimaps);
+ error = xfs_bmapi_write(tp, ip, offset_fsb, count_fsb, bmapi_flags, 0,
+ imap, &nimaps);
if (error)
goto out_res_cancel;
@@ -301,7 +281,7 @@ xfs_iomap_write_direct(
error = xfs_alert_fsblock_zero(ip, imap);
out_unlock:
- xfs_iunlock(ip, lockmode);
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
return error;
out_res_cancel:
@@ -410,19 +390,19 @@ xfs_iomap_prealloc_size(
if (offset + count <= XFS_ISIZE(ip))
return 0;
- if (!(mp->m_flags & XFS_MOUNT_DFLT_IOSIZE) &&
- (XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, mp->m_writeio_blocks)))
+ if (!(mp->m_flags & XFS_MOUNT_ALLOCSIZE) &&
+ (XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, mp->m_allocsize_blocks)))
return 0;
/*
* If an explicit allocsize is set, the file is small, or we
* are writing behind a hole, then use the minimum prealloc:
*/
- if ((mp->m_flags & XFS_MOUNT_DFLT_IOSIZE) ||
+ if ((mp->m_flags & XFS_MOUNT_ALLOCSIZE) ||
XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, mp->m_dalign) ||
!xfs_iext_peek_prev_extent(ifp, icur, &prev) ||
prev.br_startoff + prev.br_blockcount < offset_fsb)
- return mp->m_writeio_blocks;
+ return mp->m_allocsize_blocks;
/*
* Determine the initial size of the preallocation. We are beyond the
@@ -515,219 +495,13 @@ xfs_iomap_prealloc_size(
while (alloc_blocks && alloc_blocks >= freesp)
alloc_blocks >>= 4;
check_writeio:
- if (alloc_blocks < mp->m_writeio_blocks)
- alloc_blocks = mp->m_writeio_blocks;
+ if (alloc_blocks < mp->m_allocsize_blocks)
+ alloc_blocks = mp->m_allocsize_blocks;
trace_xfs_iomap_prealloc_size(ip, alloc_blocks, shift,
- mp->m_writeio_blocks);
+ mp->m_allocsize_blocks);
return alloc_blocks;
}
-static int
-xfs_file_iomap_begin_delay(
- struct inode *inode,
- loff_t offset,
- loff_t count,
- unsigned flags,
- struct iomap *iomap)
-{
- struct xfs_inode *ip = XFS_I(inode);
- struct xfs_mount *mp = ip->i_mount;
- xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
- xfs_fileoff_t maxbytes_fsb =
- XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
- xfs_fileoff_t end_fsb;
- struct xfs_bmbt_irec imap, cmap;
- struct xfs_iext_cursor icur, ccur;
- xfs_fsblock_t prealloc_blocks = 0;
- bool eof = false, cow_eof = false, shared = false;
- int whichfork = XFS_DATA_FORK;
- int error = 0;
-
- ASSERT(!XFS_IS_REALTIME_INODE(ip));
- ASSERT(!xfs_get_extsz_hint(ip));
-
- xfs_ilock(ip, XFS_ILOCK_EXCL);
-
- if (unlikely(XFS_TEST_ERROR(
- (XFS_IFORK_FORMAT(ip, XFS_DATA_FORK) != XFS_DINODE_FMT_EXTENTS &&
- XFS_IFORK_FORMAT(ip, XFS_DATA_FORK) != XFS_DINODE_FMT_BTREE),
- mp, XFS_ERRTAG_BMAPIFORMAT))) {
- XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp);
- error = -EFSCORRUPTED;
- goto out_unlock;
- }
-
- XFS_STATS_INC(mp, xs_blk_mapw);
-
- if (!(ip->i_df.if_flags & XFS_IFEXTENTS)) {
- error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK);
- if (error)
- goto out_unlock;
- }
-
- end_fsb = min(XFS_B_TO_FSB(mp, offset + count), maxbytes_fsb);
-
- /*
- * Search the data fork fork first to look up our source mapping. We
- * always need the data fork map, as we have to return it to the
- * iomap code so that the higher level write code can read data in to
- * perform read-modify-write cycles for unaligned writes.
- */
- eof = !xfs_iext_lookup_extent(ip, &ip->i_df, offset_fsb, &icur, &imap);
- if (eof)
- imap.br_startoff = end_fsb; /* fake hole until the end */
-
- /* We never need to allocate blocks for zeroing a hole. */
- if ((flags & IOMAP_ZERO) && imap.br_startoff > offset_fsb) {
- xfs_hole_to_iomap(ip, iomap, offset_fsb, imap.br_startoff);
- goto out_unlock;
- }
-
- /*
- * Search the COW fork extent list even if we did not find a data fork
- * extent. This serves two purposes: first this implements the
- * speculative preallocation using cowextsize, so that we also unshare
- * block adjacent to shared blocks instead of just the shared blocks
- * themselves. Second the lookup in the extent list is generally faster
- * than going out to the shared extent tree.
- */
- if (xfs_is_cow_inode(ip)) {
- if (!ip->i_cowfp) {
- ASSERT(!xfs_is_reflink_inode(ip));
- xfs_ifork_init_cow(ip);
- }
- cow_eof = !xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb,
- &ccur, &cmap);
- if (!cow_eof && cmap.br_startoff <= offset_fsb) {
- trace_xfs_reflink_cow_found(ip, &cmap);
- whichfork = XFS_COW_FORK;
- goto done;
- }
- }
-
- if (imap.br_startoff <= offset_fsb) {
- /*
- * For reflink files we may need a delalloc reservation when
- * overwriting shared extents. This includes zeroing of
- * existing extents that contain data.
- */
- if (!xfs_is_cow_inode(ip) ||
- ((flags & IOMAP_ZERO) && imap.br_state != XFS_EXT_NORM)) {
- trace_xfs_iomap_found(ip, offset, count, XFS_DATA_FORK,
- &imap);
- goto done;
- }
-
- xfs_trim_extent(&imap, offset_fsb, end_fsb - offset_fsb);
-
- /* Trim the mapping to the nearest shared extent boundary. */
- error = xfs_inode_need_cow(ip, &imap, &shared);
- if (error)
- goto out_unlock;
-
- /* Not shared? Just report the (potentially capped) extent. */
- if (!shared) {
- trace_xfs_iomap_found(ip, offset, count, XFS_DATA_FORK,
- &imap);
- goto done;
- }
-
- /*
- * Fork all the shared blocks from our write offset until the
- * end of the extent.
- */
- whichfork = XFS_COW_FORK;
- end_fsb = imap.br_startoff + imap.br_blockcount;
- } else {
- /*
- * We cap the maximum length we map here to MAX_WRITEBACK_PAGES
- * pages to keep the chunks of work done where somewhat
- * symmetric with the work writeback does. This is a completely
- * arbitrary number pulled out of thin air.
- *
- * Note that the values needs to be less than 32-bits wide until
- * the lower level functions are updated.
- */
- count = min_t(loff_t, count, 1024 * PAGE_SIZE);
- end_fsb = min(XFS_B_TO_FSB(mp, offset + count), maxbytes_fsb);
-
- if (xfs_is_always_cow_inode(ip))
- whichfork = XFS_COW_FORK;
- }
-
- error = xfs_qm_dqattach_locked(ip, false);
- if (error)
- goto out_unlock;
-
- if (eof) {
- prealloc_blocks = xfs_iomap_prealloc_size(ip, whichfork, offset,
- count, &icur);
- if (prealloc_blocks) {
- xfs_extlen_t align;
- xfs_off_t end_offset;
- xfs_fileoff_t p_end_fsb;
-
- end_offset = XFS_WRITEIO_ALIGN(mp, offset + count - 1);
- p_end_fsb = XFS_B_TO_FSBT(mp, end_offset) +
- prealloc_blocks;
-
- align = xfs_eof_alignment(ip, 0);
- if (align)
- p_end_fsb = roundup_64(p_end_fsb, align);
-
- p_end_fsb = min(p_end_fsb, maxbytes_fsb);
- ASSERT(p_end_fsb > offset_fsb);
- prealloc_blocks = p_end_fsb - end_fsb;
- }
- }
-
-retry:
- error = xfs_bmapi_reserve_delalloc(ip, whichfork, offset_fsb,
- end_fsb - offset_fsb, prealloc_blocks,
- whichfork == XFS_DATA_FORK ? &imap : &cmap,
- whichfork == XFS_DATA_FORK ? &icur : &ccur,
- whichfork == XFS_DATA_FORK ? eof : cow_eof);
- switch (error) {
- case 0:
- break;
- case -ENOSPC:
- case -EDQUOT:
- /* retry without any preallocation */
- trace_xfs_delalloc_enospc(ip, offset, count);
- if (prealloc_blocks) {
- prealloc_blocks = 0;
- goto retry;
- }
- /*FALLTHRU*/
- default:
- goto out_unlock;
- }
-
- /*
- * Flag newly allocated delalloc blocks with IOMAP_F_NEW so we punch
- * them out if the write happens to fail.
- */
- iomap->flags |= IOMAP_F_NEW;
- trace_xfs_iomap_alloc(ip, offset, count, whichfork,
- whichfork == XFS_DATA_FORK ? &imap : &cmap);
-done:
- if (whichfork == XFS_COW_FORK) {
- if (imap.br_startoff > offset_fsb) {
- xfs_trim_extent(&cmap, offset_fsb,
- imap.br_startoff - offset_fsb);
- error = xfs_bmbt_to_iomap(ip, iomap, &cmap, true);
- goto out_unlock;
- }
- /* ensure we only report blocks we have a reservation for */
- xfs_trim_extent(&imap, cmap.br_startoff, cmap.br_blockcount);
- shared = true;
- }
- error = xfs_bmbt_to_iomap(ip, iomap, &imap, shared);
-out_unlock:
- xfs_iunlock(ip, XFS_ILOCK_EXCL);
- return error;
-}
-
int
xfs_iomap_write_unwritten(
xfs_inode_t *ip,
@@ -765,6 +539,11 @@ xfs_iomap_write_unwritten(
*/
resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0) << 1;
+ /* Attach dquots so that bmbt splits are accounted correctly. */
+ error = xfs_qm_dqattach(ip);
+ if (error)
+ return error;
+
do {
/*
* Set up a transaction to convert the range of extents
@@ -783,6 +562,11 @@ xfs_iomap_write_unwritten(
xfs_ilock(ip, XFS_ILOCK_EXCL);
xfs_trans_ijoin(tp, ip, 0);
+ error = xfs_trans_reserve_quota_nblks(tp, ip, resblks, 0,
+ XFS_QMOPT_RES_REGBLKS);
+ if (error)
+ goto error_on_bmapi_transaction;
+
/*
* Modify the unwritten extent state of the buffer.
*/
@@ -840,23 +624,42 @@ error_on_bmapi_transaction:
static inline bool
imap_needs_alloc(
struct inode *inode,
+ unsigned flags,
struct xfs_bmbt_irec *imap,
int nimaps)
{
- return !nimaps ||
- imap->br_startblock == HOLESTARTBLOCK ||
- imap->br_startblock == DELAYSTARTBLOCK ||
- (IS_DAX(inode) && imap->br_state == XFS_EXT_UNWRITTEN);
+ /* don't allocate blocks when just zeroing */
+ if (flags & IOMAP_ZERO)
+ return false;
+ if (!nimaps ||
+ imap->br_startblock == HOLESTARTBLOCK ||
+ imap->br_startblock == DELAYSTARTBLOCK)
+ return true;
+ /* we convert unwritten extents before copying the data for DAX */
+ if (IS_DAX(inode) && imap->br_state == XFS_EXT_UNWRITTEN)
+ return true;
+ return false;
}
static inline bool
-needs_cow_for_zeroing(
+imap_needs_cow(
+ struct xfs_inode *ip,
+ unsigned int flags,
struct xfs_bmbt_irec *imap,
int nimaps)
{
- return nimaps &&
- imap->br_startblock != HOLESTARTBLOCK &&
- imap->br_state != XFS_EXT_UNWRITTEN;
+ if (!xfs_is_cow_inode(ip))
+ return false;
+
+ /* when zeroing we don't have to COW holes or unwritten extents */
+ if (flags & IOMAP_ZERO) {
+ if (!nimaps ||
+ imap->br_startblock == HOLESTARTBLOCK ||
+ imap->br_state == XFS_EXT_UNWRITTEN)
+ return false;
+ }
+
+ return true;
}
static int
@@ -872,15 +675,8 @@ xfs_ilock_for_iomap(
* COW writes may allocate delalloc space or convert unwritten COW
* extents, so we need to make sure to take the lock exclusively here.
*/
- if (xfs_is_cow_inode(ip) && is_write) {
- /*
- * FIXME: It could still overwrite on unshared extents and not
- * need allocation.
- */
- if (flags & IOMAP_NOWAIT)
- return -EAGAIN;
+ if (xfs_is_cow_inode(ip) && is_write)
mode = XFS_ILOCK_EXCL;
- }
/*
* Extents not yet cached requires exclusive access, don't block. This
@@ -917,111 +713,73 @@ relock:
}
static int
-xfs_file_iomap_begin(
+xfs_direct_write_iomap_begin(
struct inode *inode,
loff_t offset,
loff_t length,
unsigned flags,
- struct iomap *iomap)
+ struct iomap *iomap,
+ struct iomap *srcmap)
{
struct xfs_inode *ip = XFS_I(inode);
struct xfs_mount *mp = ip->i_mount;
- struct xfs_bmbt_irec imap;
- xfs_fileoff_t offset_fsb, end_fsb;
+ struct xfs_bmbt_irec imap, cmap;
+ xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
+ xfs_fileoff_t end_fsb = xfs_iomap_end_fsb(mp, offset, length);
int nimaps = 1, error = 0;
bool shared = false;
+ u16 iomap_flags = 0;
unsigned lockmode;
+ ASSERT(flags & (IOMAP_WRITE | IOMAP_ZERO));
+
if (XFS_FORCED_SHUTDOWN(mp))
return -EIO;
- if ((flags & (IOMAP_WRITE | IOMAP_ZERO)) && !(flags & IOMAP_DIRECT) &&
- !IS_DAX(inode) && !xfs_get_extsz_hint(ip)) {
- /* Reserve delalloc blocks for regular writeback. */
- return xfs_file_iomap_begin_delay(inode, offset, length, flags,
- iomap);
- }
-
/*
- * Lock the inode in the manner required for the specified operation and
- * check for as many conditions that would result in blocking as
- * possible. This removes most of the non-blocking checks from the
- * mapping code below.
+ * Writes that span EOF might trigger an IO size update on completion,
+ * so consider them to be dirty for the purposes of O_DSYNC even if
+ * there is no other metadata changes pending or have been made here.
*/
+ if (offset + length > i_size_read(inode))
+ iomap_flags |= IOMAP_F_DIRTY;
+
error = xfs_ilock_for_iomap(ip, flags, &lockmode);
if (error)
return error;
- ASSERT(offset <= mp->m_super->s_maxbytes);
- if (offset > mp->m_super->s_maxbytes - length)
- length = mp->m_super->s_maxbytes - offset;
- offset_fsb = XFS_B_TO_FSBT(mp, offset);
- end_fsb = XFS_B_TO_FSB(mp, offset + length);
-
error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap,
&nimaps, 0);
if (error)
goto out_unlock;
- if (flags & IOMAP_REPORT) {
- /* Trim the mapping to the nearest shared extent boundary. */
- error = xfs_reflink_trim_around_shared(ip, &imap, &shared);
- if (error)
+ if (imap_needs_cow(ip, flags, &imap, nimaps)) {
+ error = -EAGAIN;
+ if (flags & IOMAP_NOWAIT)
goto out_unlock;
- }
-
- /* Non-modifying mapping requested, so we are done */
- if (!(flags & (IOMAP_WRITE | IOMAP_ZERO)))
- goto out_found;
-
- /*
- * Break shared extents if necessary. Checks for non-blocking IO have
- * been done up front, so we don't need to do them here.
- */
- if (xfs_is_cow_inode(ip)) {
- struct xfs_bmbt_irec cmap;
- bool directio = (flags & IOMAP_DIRECT);
-
- /* if zeroing doesn't need COW allocation, then we are done. */
- if ((flags & IOMAP_ZERO) &&
- !needs_cow_for_zeroing(&imap, nimaps))
- goto out_found;
/* may drop and re-acquire the ilock */
- cmap = imap;
- error = xfs_reflink_allocate_cow(ip, &cmap, &shared, &lockmode,
- directio);
+ error = xfs_reflink_allocate_cow(ip, &imap, &cmap, &shared,
+ &lockmode, flags & IOMAP_DIRECT);
if (error)
goto out_unlock;
-
- /*
- * For buffered writes we need to report the address of the
- * previous block (if there was any) so that the higher level
- * write code can perform read-modify-write operations; we
- * won't need the CoW fork mapping until writeback. For direct
- * I/O, which must be block aligned, we need to report the
- * newly allocated address. If the data fork has a hole, copy
- * the COW fork mapping to avoid allocating to the data fork.
- */
- if (directio || imap.br_startblock == HOLESTARTBLOCK)
- imap = cmap;
-
+ if (shared)
+ goto out_found_cow;
end_fsb = imap.br_startoff + imap.br_blockcount;
length = XFS_FSB_TO_B(mp, end_fsb) - offset;
}
- /* Don't need to allocate over holes when doing zeroing operations. */
- if (flags & IOMAP_ZERO)
- goto out_found;
+ if (imap_needs_alloc(inode, flags, &imap, nimaps))
+ goto allocate_blocks;
- if (!imap_needs_alloc(inode, &imap, nimaps))
- goto out_found;
+ xfs_iunlock(ip, lockmode);
+ trace_xfs_iomap_found(ip, offset, length, XFS_DATA_FORK, &imap);
+ return xfs_bmbt_to_iomap(ip, iomap, &imap, iomap_flags);
- /* If nowait is set bail since we are going to make allocations. */
- if (flags & IOMAP_NOWAIT) {
- error = -EAGAIN;
+allocate_blocks:
+ error = -EAGAIN;
+ if (flags & IOMAP_NOWAIT)
goto out_unlock;
- }
/*
* We cap the maximum length we map to a sane size to keep the chunks
@@ -1033,48 +791,273 @@ xfs_file_iomap_begin(
* lower level functions are updated.
*/
length = min_t(loff_t, length, 1024 * PAGE_SIZE);
+ end_fsb = xfs_iomap_end_fsb(mp, offset, length);
- /*
- * xfs_iomap_write_direct() expects the shared lock. It is unlocked on
- * return.
- */
- if (lockmode == XFS_ILOCK_EXCL)
- xfs_ilock_demote(ip, lockmode);
- error = xfs_iomap_write_direct(ip, offset, length, &imap,
- nimaps);
+ if (offset + length > XFS_ISIZE(ip))
+ end_fsb = xfs_iomap_eof_align_last_fsb(ip, end_fsb);
+ else if (nimaps && imap.br_startblock == HOLESTARTBLOCK)
+ end_fsb = min(end_fsb, imap.br_startoff + imap.br_blockcount);
+ xfs_iunlock(ip, lockmode);
+
+ error = xfs_iomap_write_direct(ip, offset_fsb, end_fsb - offset_fsb,
+ &imap);
if (error)
return error;
- iomap->flags |= IOMAP_F_NEW;
trace_xfs_iomap_alloc(ip, offset, length, XFS_DATA_FORK, &imap);
+ return xfs_bmbt_to_iomap(ip, iomap, &imap, iomap_flags | IOMAP_F_NEW);
-out_finish:
- return xfs_bmbt_to_iomap(ip, iomap, &imap, shared);
-
-out_found:
- ASSERT(nimaps);
+out_found_cow:
xfs_iunlock(ip, lockmode);
- trace_xfs_iomap_found(ip, offset, length, XFS_DATA_FORK, &imap);
- goto out_finish;
+ length = XFS_FSB_TO_B(mp, cmap.br_startoff + cmap.br_blockcount);
+ trace_xfs_iomap_found(ip, offset, length - offset, XFS_COW_FORK, &cmap);
+ if (imap.br_startblock != HOLESTARTBLOCK) {
+ error = xfs_bmbt_to_iomap(ip, srcmap, &imap, 0);
+ if (error)
+ return error;
+ }
+ return xfs_bmbt_to_iomap(ip, iomap, &cmap, IOMAP_F_SHARED);
out_unlock:
xfs_iunlock(ip, lockmode);
return error;
}
+const struct iomap_ops xfs_direct_write_iomap_ops = {
+ .iomap_begin = xfs_direct_write_iomap_begin,
+};
+
static int
-xfs_file_iomap_end_delalloc(
- struct xfs_inode *ip,
+xfs_buffered_write_iomap_begin(
+ struct inode *inode,
+ loff_t offset,
+ loff_t count,
+ unsigned flags,
+ struct iomap *iomap,
+ struct iomap *srcmap)
+{
+ struct xfs_inode *ip = XFS_I(inode);
+ struct xfs_mount *mp = ip->i_mount;
+ xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
+ xfs_fileoff_t end_fsb = xfs_iomap_end_fsb(mp, offset, count);
+ struct xfs_bmbt_irec imap, cmap;
+ struct xfs_iext_cursor icur, ccur;
+ xfs_fsblock_t prealloc_blocks = 0;
+ bool eof = false, cow_eof = false, shared = false;
+ int allocfork = XFS_DATA_FORK;
+ int error = 0;
+
+ /* we can't use delayed allocations when using extent size hints */
+ if (xfs_get_extsz_hint(ip))
+ return xfs_direct_write_iomap_begin(inode, offset, count,
+ flags, iomap, srcmap);
+
+ ASSERT(!XFS_IS_REALTIME_INODE(ip));
+
+ xfs_ilock(ip, XFS_ILOCK_EXCL);
+
+ if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ip, XFS_DATA_FORK)) ||
+ XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) {
+ error = -EFSCORRUPTED;
+ goto out_unlock;
+ }
+
+ XFS_STATS_INC(mp, xs_blk_mapw);
+
+ if (!(ip->i_df.if_flags & XFS_IFEXTENTS)) {
+ error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK);
+ if (error)
+ goto out_unlock;
+ }
+
+ /*
+ * Search the data fork fork first to look up our source mapping. We
+ * always need the data fork map, as we have to return it to the
+ * iomap code so that the higher level write code can read data in to
+ * perform read-modify-write cycles for unaligned writes.
+ */
+ eof = !xfs_iext_lookup_extent(ip, &ip->i_df, offset_fsb, &icur, &imap);
+ if (eof)
+ imap.br_startoff = end_fsb; /* fake hole until the end */
+
+ /* We never need to allocate blocks for zeroing a hole. */
+ if ((flags & IOMAP_ZERO) && imap.br_startoff > offset_fsb) {
+ xfs_hole_to_iomap(ip, iomap, offset_fsb, imap.br_startoff);
+ goto out_unlock;
+ }
+
+ /*
+ * Search the COW fork extent list even if we did not find a data fork
+ * extent. This serves two purposes: first this implements the
+ * speculative preallocation using cowextsize, so that we also unshare
+ * block adjacent to shared blocks instead of just the shared blocks
+ * themselves. Second the lookup in the extent list is generally faster
+ * than going out to the shared extent tree.
+ */
+ if (xfs_is_cow_inode(ip)) {
+ if (!ip->i_cowfp) {
+ ASSERT(!xfs_is_reflink_inode(ip));
+ xfs_ifork_init_cow(ip);
+ }
+ cow_eof = !xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb,
+ &ccur, &cmap);
+ if (!cow_eof && cmap.br_startoff <= offset_fsb) {
+ trace_xfs_reflink_cow_found(ip, &cmap);
+ goto found_cow;
+ }
+ }
+
+ if (imap.br_startoff <= offset_fsb) {
+ /*
+ * For reflink files we may need a delalloc reservation when
+ * overwriting shared extents. This includes zeroing of
+ * existing extents that contain data.
+ */
+ if (!xfs_is_cow_inode(ip) ||
+ ((flags & IOMAP_ZERO) && imap.br_state != XFS_EXT_NORM)) {
+ trace_xfs_iomap_found(ip, offset, count, XFS_DATA_FORK,
+ &imap);
+ goto found_imap;
+ }
+
+ xfs_trim_extent(&imap, offset_fsb, end_fsb - offset_fsb);
+
+ /* Trim the mapping to the nearest shared extent boundary. */
+ error = xfs_inode_need_cow(ip, &imap, &shared);
+ if (error)
+ goto out_unlock;
+
+ /* Not shared? Just report the (potentially capped) extent. */
+ if (!shared) {
+ trace_xfs_iomap_found(ip, offset, count, XFS_DATA_FORK,
+ &imap);
+ goto found_imap;
+ }
+
+ /*
+ * Fork all the shared blocks from our write offset until the
+ * end of the extent.
+ */
+ allocfork = XFS_COW_FORK;
+ end_fsb = imap.br_startoff + imap.br_blockcount;
+ } else {
+ /*
+ * We cap the maximum length we map here to MAX_WRITEBACK_PAGES
+ * pages to keep the chunks of work done where somewhat
+ * symmetric with the work writeback does. This is a completely
+ * arbitrary number pulled out of thin air.
+ *
+ * Note that the values needs to be less than 32-bits wide until
+ * the lower level functions are updated.
+ */
+ count = min_t(loff_t, count, 1024 * PAGE_SIZE);
+ end_fsb = xfs_iomap_end_fsb(mp, offset, count);
+
+ if (xfs_is_always_cow_inode(ip))
+ allocfork = XFS_COW_FORK;
+ }
+
+ error = xfs_qm_dqattach_locked(ip, false);
+ if (error)
+ goto out_unlock;
+
+ if (eof) {
+ prealloc_blocks = xfs_iomap_prealloc_size(ip, allocfork, offset,
+ count, &icur);
+ if (prealloc_blocks) {
+ xfs_extlen_t align;
+ xfs_off_t end_offset;
+ xfs_fileoff_t p_end_fsb;
+
+ end_offset = XFS_ALLOC_ALIGN(mp, offset + count - 1);
+ p_end_fsb = XFS_B_TO_FSBT(mp, end_offset) +
+ prealloc_blocks;
+
+ align = xfs_eof_alignment(ip);
+ if (align)
+ p_end_fsb = roundup_64(p_end_fsb, align);
+
+ p_end_fsb = min(p_end_fsb,
+ XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes));
+ ASSERT(p_end_fsb > offset_fsb);
+ prealloc_blocks = p_end_fsb - end_fsb;
+ }
+ }
+
+retry:
+ error = xfs_bmapi_reserve_delalloc(ip, allocfork, offset_fsb,
+ end_fsb - offset_fsb, prealloc_blocks,
+ allocfork == XFS_DATA_FORK ? &imap : &cmap,
+ allocfork == XFS_DATA_FORK ? &icur : &ccur,
+ allocfork == XFS_DATA_FORK ? eof : cow_eof);
+ switch (error) {
+ case 0:
+ break;
+ case -ENOSPC:
+ case -EDQUOT:
+ /* retry without any preallocation */
+ trace_xfs_delalloc_enospc(ip, offset, count);
+ if (prealloc_blocks) {
+ prealloc_blocks = 0;
+ goto retry;
+ }
+ /*FALLTHRU*/
+ default:
+ goto out_unlock;
+ }
+
+ if (allocfork == XFS_COW_FORK) {
+ trace_xfs_iomap_alloc(ip, offset, count, allocfork, &cmap);
+ goto found_cow;
+ }
+
+ /*
+ * Flag newly allocated delalloc blocks with IOMAP_F_NEW so we punch
+ * them out if the write happens to fail.
+ */
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
+ trace_xfs_iomap_alloc(ip, offset, count, allocfork, &imap);
+ return xfs_bmbt_to_iomap(ip, iomap, &imap, IOMAP_F_NEW);
+
+found_imap:
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
+ return xfs_bmbt_to_iomap(ip, iomap, &imap, 0);
+
+found_cow:
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
+ if (imap.br_startoff <= offset_fsb) {
+ error = xfs_bmbt_to_iomap(ip, srcmap, &imap, 0);
+ if (error)
+ return error;
+ } else {
+ xfs_trim_extent(&cmap, offset_fsb,
+ imap.br_startoff - offset_fsb);
+ }
+ return xfs_bmbt_to_iomap(ip, iomap, &cmap, IOMAP_F_SHARED);
+
+out_unlock:
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
+ return error;
+}
+
+static int
+xfs_buffered_write_iomap_end(
+ struct inode *inode,
loff_t offset,
loff_t length,
ssize_t written,
+ unsigned flags,
struct iomap *iomap)
{
+ struct xfs_inode *ip = XFS_I(inode);
struct xfs_mount *mp = ip->i_mount;
xfs_fileoff_t start_fsb;
xfs_fileoff_t end_fsb;
int error = 0;
+ if (iomap->type != IOMAP_DELALLOC)
+ return 0;
+
/*
* Behave as if the write failed if drop writes is enabled. Set the NEW
* flag to force delalloc cleanup.
@@ -1119,24 +1102,51 @@ xfs_file_iomap_end_delalloc(
return 0;
}
+const struct iomap_ops xfs_buffered_write_iomap_ops = {
+ .iomap_begin = xfs_buffered_write_iomap_begin,
+ .iomap_end = xfs_buffered_write_iomap_end,
+};
+
static int
-xfs_file_iomap_end(
+xfs_read_iomap_begin(
struct inode *inode,
loff_t offset,
loff_t length,
- ssize_t written,
unsigned flags,
- struct iomap *iomap)
+ struct iomap *iomap,
+ struct iomap *srcmap)
{
- if ((flags & IOMAP_WRITE) && iomap->type == IOMAP_DELALLOC)
- return xfs_file_iomap_end_delalloc(XFS_I(inode), offset,
- length, written, iomap);
- return 0;
+ struct xfs_inode *ip = XFS_I(inode);
+ struct xfs_mount *mp = ip->i_mount;
+ struct xfs_bmbt_irec imap;
+ xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
+ xfs_fileoff_t end_fsb = xfs_iomap_end_fsb(mp, offset, length);
+ int nimaps = 1, error = 0;
+ bool shared = false;
+ unsigned lockmode;
+
+ ASSERT(!(flags & (IOMAP_WRITE | IOMAP_ZERO)));
+
+ if (XFS_FORCED_SHUTDOWN(mp))
+ return -EIO;
+
+ error = xfs_ilock_for_iomap(ip, flags, &lockmode);
+ if (error)
+ return error;
+ error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap,
+ &nimaps, 0);
+ if (!error && (flags & IOMAP_REPORT))
+ error = xfs_reflink_trim_around_shared(ip, &imap, &shared);
+ xfs_iunlock(ip, lockmode);
+
+ if (error)
+ return error;
+ trace_xfs_iomap_found(ip, offset, length, XFS_DATA_FORK, &imap);
+ return xfs_bmbt_to_iomap(ip, iomap, &imap, shared ? IOMAP_F_SHARED : 0);
}
-const struct iomap_ops xfs_iomap_ops = {
- .iomap_begin = xfs_file_iomap_begin,
- .iomap_end = xfs_file_iomap_end,
+const struct iomap_ops xfs_read_iomap_ops = {
+ .iomap_begin = xfs_read_iomap_begin,
};
static int
@@ -1145,7 +1155,8 @@ xfs_seek_iomap_begin(
loff_t offset,
loff_t length,
unsigned flags,
- struct iomap *iomap)
+ struct iomap *iomap,
+ struct iomap *srcmap)
{
struct xfs_inode *ip = XFS_I(inode);
struct xfs_mount *mp = ip->i_mount;
@@ -1178,8 +1189,7 @@ xfs_seek_iomap_begin(
/*
* Fake a hole until the end of the file.
*/
- data_fsb = min(XFS_B_TO_FSB(mp, offset + length),
- XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes));
+ data_fsb = xfs_iomap_end_fsb(mp, offset, length);
}
/*
@@ -1193,7 +1203,7 @@ xfs_seek_iomap_begin(
if (data_fsb < cow_fsb + cmap.br_blockcount)
end_fsb = min(end_fsb, data_fsb);
xfs_trim_extent(&cmap, offset_fsb, end_fsb);
- error = xfs_bmbt_to_iomap(ip, iomap, &cmap, true);
+ error = xfs_bmbt_to_iomap(ip, iomap, &cmap, IOMAP_F_SHARED);
/*
* This is a COW extent, so we must probe the page cache
* because there could be dirty page cache being backed
@@ -1215,7 +1225,7 @@ xfs_seek_iomap_begin(
imap.br_state = XFS_EXT_NORM;
done:
xfs_trim_extent(&imap, offset_fsb, end_fsb);
- error = xfs_bmbt_to_iomap(ip, iomap, &imap, false);
+ error = xfs_bmbt_to_iomap(ip, iomap, &imap, 0);
out_unlock:
xfs_iunlock(ip, lockmode);
return error;
@@ -1231,7 +1241,8 @@ xfs_xattr_iomap_begin(
loff_t offset,
loff_t length,
unsigned flags,
- struct iomap *iomap)
+ struct iomap *iomap,
+ struct iomap *srcmap)
{
struct xfs_inode *ip = XFS_I(inode);
struct xfs_mount *mp = ip->i_mount;
@@ -1261,7 +1272,7 @@ out_unlock:
if (error)
return error;
ASSERT(nimaps);
- return xfs_bmbt_to_iomap(ip, iomap, &imap, false);
+ return xfs_bmbt_to_iomap(ip, iomap, &imap, 0);
}
const struct iomap_ops xfs_xattr_iomap_ops = {
diff --git a/fs/xfs/xfs_iomap.h b/fs/xfs/xfs_iomap.h
index 5c2f6aa6d78f..7d3703556d0e 100644
--- a/fs/xfs/xfs_iomap.h
+++ b/fs/xfs/xfs_iomap.h
@@ -11,13 +11,14 @@
struct xfs_inode;
struct xfs_bmbt_irec;
-int xfs_iomap_write_direct(struct xfs_inode *, xfs_off_t, size_t,
- struct xfs_bmbt_irec *, int);
+int xfs_iomap_write_direct(struct xfs_inode *ip, xfs_fileoff_t offset_fsb,
+ xfs_fileoff_t count_fsb, struct xfs_bmbt_irec *imap);
int xfs_iomap_write_unwritten(struct xfs_inode *, xfs_off_t, xfs_off_t, bool);
+xfs_fileoff_t xfs_iomap_eof_align_last_fsb(struct xfs_inode *ip,
+ xfs_fileoff_t end_fsb);
int xfs_bmbt_to_iomap(struct xfs_inode *, struct iomap *,
- struct xfs_bmbt_irec *, bool shared);
-xfs_extlen_t xfs_eof_alignment(struct xfs_inode *ip, xfs_extlen_t extsize);
+ struct xfs_bmbt_irec *, u16);
static inline xfs_filblks_t
xfs_aligned_fsb_count(
@@ -39,7 +40,9 @@ xfs_aligned_fsb_count(
return count_fsb;
}
-extern const struct iomap_ops xfs_iomap_ops;
+extern const struct iomap_ops xfs_buffered_write_iomap_ops;
+extern const struct iomap_ops xfs_direct_write_iomap_ops;
+extern const struct iomap_ops xfs_read_iomap_ops;
extern const struct iomap_ops xfs_seek_iomap_ops;
extern const struct iomap_ops xfs_xattr_iomap_ops;
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
index fe285d123d69..8afe69ca188b 100644
--- a/fs/xfs/xfs_iops.c
+++ b/fs/xfs/xfs_iops.c
@@ -20,6 +20,7 @@
#include "xfs_symlink.h"
#include "xfs_dir2.h"
#include "xfs_iomap.h"
+#include "xfs_error.h"
#include <linux/xattr.h>
#include <linux/posix_acl.h>
@@ -470,20 +471,57 @@ xfs_vn_get_link_inline(
struct inode *inode,
struct delayed_call *done)
{
+ struct xfs_inode *ip = XFS_I(inode);
char *link;
- ASSERT(XFS_I(inode)->i_df.if_flags & XFS_IFINLINE);
+ ASSERT(ip->i_df.if_flags & XFS_IFINLINE);
/*
* The VFS crashes on a NULL pointer, so return -EFSCORRUPTED if
* if_data is junk.
*/
- link = XFS_I(inode)->i_df.if_u1.if_data;
- if (!link)
+ link = ip->i_df.if_u1.if_data;
+ if (XFS_IS_CORRUPT(ip->i_mount, !link))
return ERR_PTR(-EFSCORRUPTED);
return link;
}
+static uint32_t
+xfs_stat_blksize(
+ struct xfs_inode *ip)
+{
+ struct xfs_mount *mp = ip->i_mount;
+
+ /*
+ * If the file blocks are being allocated from a realtime volume, then
+ * always return the realtime extent size.
+ */
+ if (XFS_IS_REALTIME_INODE(ip))
+ return xfs_get_extsz_hint(ip) << mp->m_sb.sb_blocklog;
+
+ /*
+ * Allow large block sizes to be reported to userspace programs if the
+ * "largeio" mount option is used.
+ *
+ * If compatibility mode is specified, simply return the basic unit of
+ * caching so that we don't get inefficient read/modify/write I/O from
+ * user apps. Otherwise....
+ *
+ * If the underlying volume is a stripe, then return the stripe width in
+ * bytes as the recommended I/O size. It is not a stripe and we've set a
+ * default buffered I/O size, return that, otherwise return the compat
+ * default.
+ */
+ if (mp->m_flags & XFS_MOUNT_LARGEIO) {
+ if (mp->m_swidth)
+ return mp->m_swidth << mp->m_sb.sb_blocklog;
+ if (mp->m_flags & XFS_MOUNT_ALLOCSIZE)
+ return 1U << mp->m_allocsize_log;
+ }
+
+ return PAGE_SIZE;
+}
+
STATIC int
xfs_vn_getattr(
const struct path *path,
@@ -516,8 +554,7 @@ xfs_vn_getattr(
if (ip->i_d.di_version == 3) {
if (request_mask & STATX_BTIME) {
stat->result_mask |= STATX_BTIME;
- stat->btime.tv_sec = ip->i_d.di_crtime.t_sec;
- stat->btime.tv_nsec = ip->i_d.di_crtime.t_nsec;
+ stat->btime = ip->i_d.di_crtime;
}
}
@@ -543,16 +580,7 @@ xfs_vn_getattr(
stat->rdev = inode->i_rdev;
break;
default:
- if (XFS_IS_REALTIME_INODE(ip)) {
- /*
- * If the file blocks are being allocated from a
- * realtime volume, then return the inode's realtime
- * extent size or the realtime volume's extent size.
- */
- stat->blksize =
- xfs_get_extsz_hint(ip) << mp->m_sb.sb_blocklog;
- } else
- stat->blksize = xfs_preferred_iosize(mp);
+ stat->blksize = xfs_stat_blksize(ip);
stat->rdev = 0;
break;
}
@@ -664,7 +692,7 @@ xfs_setattr_nonsize(
ASSERT(gdqp == NULL);
error = xfs_qm_vop_dqalloc(ip, xfs_kuid_to_uid(uid),
xfs_kgid_to_gid(gid),
- xfs_get_projid(ip),
+ ip->i_d.di_projid,
qflags, &udqp, &gdqp, NULL);
if (error)
return error;
@@ -883,10 +911,10 @@ xfs_setattr_size(
if (newsize > oldsize) {
trace_xfs_zero_eof(ip, oldsize, newsize - oldsize);
error = iomap_zero_range(inode, oldsize, newsize - oldsize,
- &did_zeroing, &xfs_iomap_ops);
+ &did_zeroing, &xfs_buffered_write_iomap_ops);
} else {
error = iomap_truncate_page(inode, newsize, &did_zeroing,
- &xfs_iomap_ops);
+ &xfs_buffered_write_iomap_ops);
}
if (error)
@@ -1114,7 +1142,7 @@ xfs_vn_fiemap(
&xfs_xattr_iomap_ops);
} else {
error = iomap_fiemap(inode, fieinfo, start, length,
- &xfs_iomap_ops);
+ &xfs_read_iomap_ops);
}
xfs_iunlock(XFS_I(inode), XFS_IOLOCK_SHARED);
@@ -1227,7 +1255,7 @@ xfs_inode_supports_dax(
return false;
/* Device has to support DAX too. */
- return xfs_find_daxdev_for_inode(VFS_I(ip)) != NULL;
+ return xfs_inode_buftarg(ip)->bt_daxdev != NULL;
}
STATIC void
@@ -1290,9 +1318,7 @@ xfs_setup_inode(
lockdep_set_class(&inode->i_rwsem,
&inode->i_sb->s_type->i_mutex_dir_key);
lockdep_set_class(&ip->i_lock.mr_lock, &xfs_dir_ilock_class);
- ip->d_ops = ip->i_mount->m_dir_inode_ops;
} else {
- ip->d_ops = ip->i_mount->m_nondir_inode_ops;
lockdep_set_class(&ip->i_lock.mr_lock, &xfs_nondir_ilock_class);
}
diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c
index 884950adbd16..4b31c29b7e6b 100644
--- a/fs/xfs/xfs_itable.c
+++ b/fs/xfs/xfs_itable.c
@@ -84,7 +84,7 @@ xfs_bulkstat_one_int(
/* xfs_iget returns the following without needing
* further change.
*/
- buf->bs_projectid = xfs_get_projid(ip);
+ buf->bs_projectid = ip->i_d.di_projid;
buf->bs_ino = ino;
buf->bs_uid = dic->di_uid;
buf->bs_gid = dic->di_gid;
@@ -97,8 +97,8 @@ xfs_bulkstat_one_int(
buf->bs_mtime_nsec = inode->i_mtime.tv_nsec;
buf->bs_ctime = inode->i_ctime.tv_sec;
buf->bs_ctime_nsec = inode->i_ctime.tv_nsec;
- buf->bs_btime = dic->di_crtime.t_sec;
- buf->bs_btime_nsec = dic->di_crtime.t_nsec;
+ buf->bs_btime = dic->di_crtime.tv_sec;
+ buf->bs_btime_nsec = dic->di_crtime.tv_nsec;
buf->bs_gen = inode->i_generation;
buf->bs_mode = inode->i_mode;
diff --git a/fs/xfs/xfs_iwalk.c b/fs/xfs/xfs_iwalk.c
index aa375cf53021..233dcc8784db 100644
--- a/fs/xfs/xfs_iwalk.c
+++ b/fs/xfs/xfs_iwalk.c
@@ -298,7 +298,8 @@ xfs_iwalk_ag_start(
error = xfs_inobt_get_rec(*curpp, irec, has_more);
if (error)
return error;
- XFS_WANT_CORRUPTED_RETURN(mp, *has_more == 1);
+ if (XFS_IS_CORRUPT(mp, *has_more != 1))
+ return -EFSCORRUPTED;
/*
* If the LE lookup yielded an inobt record before the cursor position,
diff --git a/fs/xfs/xfs_linux.h b/fs/xfs/xfs_linux.h
index ca15105681ca..8738bb03f253 100644
--- a/fs/xfs/xfs_linux.h
+++ b/fs/xfs/xfs_linux.h
@@ -223,26 +223,32 @@ int xfs_rw_bdev(struct block_device *bdev, sector_t sector, unsigned int count,
char *data, unsigned int op);
#define ASSERT_ALWAYS(expr) \
- (likely(expr) ? (void)0 : assfail(#expr, __FILE__, __LINE__))
+ (likely(expr) ? (void)0 : assfail(NULL, #expr, __FILE__, __LINE__))
#ifdef DEBUG
#define ASSERT(expr) \
- (likely(expr) ? (void)0 : assfail(#expr, __FILE__, __LINE__))
+ (likely(expr) ? (void)0 : assfail(NULL, #expr, __FILE__, __LINE__))
#else /* !DEBUG */
#ifdef XFS_WARN
#define ASSERT(expr) \
- (likely(expr) ? (void)0 : asswarn(#expr, __FILE__, __LINE__))
+ (likely(expr) ? (void)0 : asswarn(NULL, #expr, __FILE__, __LINE__))
#else /* !DEBUG && !XFS_WARN */
-#define ASSERT(expr) ((void)0)
+#define ASSERT(expr) ((void)0)
#endif /* XFS_WARN */
#endif /* DEBUG */
+#define XFS_IS_CORRUPT(mp, expr) \
+ (unlikely(expr) ? xfs_corruption_error(#expr, XFS_ERRLEVEL_LOW, (mp), \
+ NULL, 0, __FILE__, __LINE__, \
+ __this_address), \
+ true : false)
+
#define STATIC static noinline
#ifdef CONFIG_XFS_RT
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index 641d07f30a27..6a147c63a8a6 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -57,10 +57,6 @@ xlog_state_get_iclog_space(
struct xlog_ticket *ticket,
int *continued_write,
int *logoffsetp);
-STATIC int
-xlog_state_release_iclog(
- struct xlog *log,
- struct xlog_in_core *iclog);
STATIC void
xlog_state_switch_iclogs(
struct xlog *log,
@@ -83,7 +79,10 @@ STATIC void
xlog_ungrant_log_space(
struct xlog *log,
struct xlog_ticket *ticket);
-
+STATIC void
+xlog_sync(
+ struct xlog *log,
+ struct xlog_in_core *iclog);
#if defined(DEBUG)
STATIC void
xlog_verify_dest_ptr(
@@ -552,16 +551,71 @@ xfs_log_done(
return lsn;
}
+static bool
+__xlog_state_release_iclog(
+ struct xlog *log,
+ struct xlog_in_core *iclog)
+{
+ lockdep_assert_held(&log->l_icloglock);
+
+ if (iclog->ic_state == XLOG_STATE_WANT_SYNC) {
+ /* update tail before writing to iclog */
+ xfs_lsn_t tail_lsn = xlog_assign_tail_lsn(log->l_mp);
+
+ iclog->ic_state = XLOG_STATE_SYNCING;
+ iclog->ic_header.h_tail_lsn = cpu_to_be64(tail_lsn);
+ xlog_verify_tail_lsn(log, iclog, tail_lsn);
+ /* cycle incremented when incrementing curr_block */
+ return true;
+ }
+
+ ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE);
+ return false;
+}
+
+/*
+ * Flush iclog to disk if this is the last reference to the given iclog and the
+ * it is in the WANT_SYNC state.
+ */
+static int
+xlog_state_release_iclog(
+ struct xlog *log,
+ struct xlog_in_core *iclog)
+{
+ lockdep_assert_held(&log->l_icloglock);
+
+ if (iclog->ic_state == XLOG_STATE_IOERROR)
+ return -EIO;
+
+ if (atomic_dec_and_test(&iclog->ic_refcnt) &&
+ __xlog_state_release_iclog(log, iclog)) {
+ spin_unlock(&log->l_icloglock);
+ xlog_sync(log, iclog);
+ spin_lock(&log->l_icloglock);
+ }
+
+ return 0;
+}
+
int
xfs_log_release_iclog(
- struct xfs_mount *mp,
+ struct xfs_mount *mp,
struct xlog_in_core *iclog)
{
- if (xlog_state_release_iclog(mp->m_log, iclog)) {
+ struct xlog *log = mp->m_log;
+ bool sync;
+
+ if (iclog->ic_state == XLOG_STATE_IOERROR) {
xfs_force_shutdown(mp, SHUTDOWN_LOG_IO_ERROR);
return -EIO;
}
+ if (atomic_dec_and_lock(&iclog->ic_refcnt, &log->l_icloglock)) {
+ sync = __xlog_state_release_iclog(log, iclog);
+ spin_unlock(&log->l_icloglock);
+ if (sync)
+ xlog_sync(log, iclog);
+ }
return 0;
}
@@ -866,10 +920,7 @@ out_err:
iclog = log->l_iclog;
atomic_inc(&iclog->ic_refcnt);
xlog_state_want_sync(log, iclog);
- spin_unlock(&log->l_icloglock);
error = xlog_state_release_iclog(log, iclog);
-
- spin_lock(&log->l_icloglock);
switch (iclog->ic_state) {
default:
if (!XLOG_FORCED_SHUTDOWN(log)) {
@@ -924,8 +975,8 @@ xfs_log_unmount_write(xfs_mount_t *mp)
#ifdef DEBUG
first_iclog = iclog = log->l_iclog;
do {
- if (!(iclog->ic_state & XLOG_STATE_IOERROR)) {
- ASSERT(iclog->ic_state & XLOG_STATE_ACTIVE);
+ if (iclog->ic_state != XLOG_STATE_IOERROR) {
+ ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE);
ASSERT(iclog->ic_offset == 0);
}
iclog = iclog->ic_next;
@@ -950,21 +1001,17 @@ xfs_log_unmount_write(xfs_mount_t *mp)
spin_lock(&log->l_icloglock);
iclog = log->l_iclog;
atomic_inc(&iclog->ic_refcnt);
-
xlog_state_want_sync(log, iclog);
- spin_unlock(&log->l_icloglock);
error = xlog_state_release_iclog(log, iclog);
-
- spin_lock(&log->l_icloglock);
-
- if ( ! ( iclog->ic_state == XLOG_STATE_ACTIVE
- || iclog->ic_state == XLOG_STATE_DIRTY
- || iclog->ic_state == XLOG_STATE_IOERROR) ) {
-
- xlog_wait(&iclog->ic_force_wait,
- &log->l_icloglock);
- } else {
+ switch (iclog->ic_state) {
+ case XLOG_STATE_ACTIVE:
+ case XLOG_STATE_DIRTY:
+ case XLOG_STATE_IOERROR:
spin_unlock(&log->l_icloglock);
+ break;
+ default:
+ xlog_wait(&iclog->ic_force_wait, &log->l_icloglock);
+ break;
}
}
@@ -1254,7 +1301,7 @@ xlog_ioend_work(
* didn't succeed.
*/
aborted = true;
- } else if (iclog->ic_state & XLOG_STATE_IOERROR) {
+ } else if (iclog->ic_state == XLOG_STATE_IOERROR) {
aborted = true;
}
@@ -1479,7 +1526,7 @@ xlog_alloc_log(
log->l_ioend_workqueue = alloc_workqueue("xfs-log/%s",
WQ_MEM_RECLAIM | WQ_FREEZABLE | WQ_HIGHPRI, 0,
- mp->m_fsname);
+ mp->m_super->s_id);
if (!log->l_ioend_workqueue)
goto out_free_iclog;
@@ -1727,7 +1774,7 @@ xlog_write_iclog(
* across the log IO to archieve that.
*/
down(&iclog->ic_sema);
- if (unlikely(iclog->ic_state & XLOG_STATE_IOERROR)) {
+ if (unlikely(iclog->ic_state == XLOG_STATE_IOERROR)) {
/*
* It would seem logical to return EIO here, but we rely on
* the log state machine to propagate I/O errors instead of
@@ -1735,13 +1782,11 @@ xlog_write_iclog(
* the buffer manually, the code needs to be kept in sync
* with the I/O completion path.
*/
- xlog_state_done_syncing(iclog, XFS_LI_ABORTED);
+ xlog_state_done_syncing(iclog, true);
up(&iclog->ic_sema);
return;
}
- iclog->ic_io_size = count;
-
bio_init(&iclog->ic_bio, iclog->ic_bvec, howmany(count, PAGE_SIZE));
bio_set_dev(&iclog->ic_bio, log->l_targ->bt_bdev);
iclog->ic_bio.bi_iter.bi_sector = log->l_logBBstart + bno;
@@ -1751,9 +1796,9 @@ xlog_write_iclog(
if (need_flush)
iclog->ic_bio.bi_opf |= REQ_PREFLUSH;
- xlog_map_iclog_data(&iclog->ic_bio, iclog->ic_data, iclog->ic_io_size);
+ xlog_map_iclog_data(&iclog->ic_bio, iclog->ic_data, count);
if (is_vmalloc_addr(iclog->ic_data))
- flush_kernel_vmap_range(iclog->ic_data, iclog->ic_io_size);
+ flush_kernel_vmap_range(iclog->ic_data, count);
/*
* If this log buffer would straddle the end of the log we will have
@@ -1969,7 +2014,6 @@ xlog_dealloc_log(
/*
* Update counters atomically now that memcpy is done.
*/
-/* ARGSUSED */
static inline void
xlog_state_finish_copy(
struct xlog *log,
@@ -1977,16 +2021,11 @@ xlog_state_finish_copy(
int record_cnt,
int copy_bytes)
{
- spin_lock(&log->l_icloglock);
+ lockdep_assert_held(&log->l_icloglock);
be32_add_cpu(&iclog->ic_header.h_num_logops, record_cnt);
iclog->ic_offset += copy_bytes;
-
- spin_unlock(&log->l_icloglock);
-} /* xlog_state_finish_copy */
-
-
-
+}
/*
* print out info relating to regions written which consume
@@ -2263,15 +2302,18 @@ xlog_write_copy_finish(
int log_offset,
struct xlog_in_core **commit_iclog)
{
+ int error;
+
if (*partial_copy) {
/*
* This iclog has already been marked WANT_SYNC by
* xlog_state_get_iclog_space.
*/
+ spin_lock(&log->l_icloglock);
xlog_state_finish_copy(log, iclog, *record_cnt, *data_cnt);
*record_cnt = 0;
*data_cnt = 0;
- return xlog_state_release_iclog(log, iclog);
+ goto release_iclog;
}
*partial_copy = 0;
@@ -2279,21 +2321,25 @@ xlog_write_copy_finish(
if (iclog->ic_size - log_offset <= sizeof(xlog_op_header_t)) {
/* no more space in this iclog - push it. */
+ spin_lock(&log->l_icloglock);
xlog_state_finish_copy(log, iclog, *record_cnt, *data_cnt);
*record_cnt = 0;
*data_cnt = 0;
- spin_lock(&log->l_icloglock);
xlog_state_want_sync(log, iclog);
- spin_unlock(&log->l_icloglock);
-
if (!commit_iclog)
- return xlog_state_release_iclog(log, iclog);
+ goto release_iclog;
+ spin_unlock(&log->l_icloglock);
ASSERT(flags & XLOG_COMMIT_TRANS);
*commit_iclog = iclog;
}
return 0;
+
+release_iclog:
+ error = xlog_state_release_iclog(log, iclog);
+ spin_unlock(&log->l_icloglock);
+ return error;
}
/*
@@ -2355,7 +2401,7 @@ xlog_write(
int contwr = 0;
int record_cnt = 0;
int data_cnt = 0;
- int error;
+ int error = 0;
*start_lsn = 0;
@@ -2506,13 +2552,17 @@ next_lv:
ASSERT(len == 0);
+ spin_lock(&log->l_icloglock);
xlog_state_finish_copy(log, iclog, record_cnt, data_cnt);
- if (!commit_iclog)
- return xlog_state_release_iclog(log, iclog);
+ if (commit_iclog) {
+ ASSERT(flags & XLOG_COMMIT_TRANS);
+ *commit_iclog = iclog;
+ } else {
+ error = xlog_state_release_iclog(log, iclog);
+ }
+ spin_unlock(&log->l_icloglock);
- ASSERT(flags & XLOG_COMMIT_TRANS);
- *commit_iclog = iclog;
- return 0;
+ return error;
}
@@ -2548,7 +2598,7 @@ xlog_state_clean_iclog(
int changed = 0;
/* Prepare the completed iclog. */
- if (!(dirty_iclog->ic_state & XLOG_STATE_IOERROR))
+ if (dirty_iclog->ic_state != XLOG_STATE_IOERROR)
dirty_iclog->ic_state = XLOG_STATE_DIRTY;
/* Walk all the iclogs to update the ordered active state. */
@@ -2639,7 +2689,8 @@ xlog_get_lowest_lsn(
xfs_lsn_t lowest_lsn = 0, lsn;
do {
- if (iclog->ic_state & (XLOG_STATE_ACTIVE | XLOG_STATE_DIRTY))
+ if (iclog->ic_state == XLOG_STATE_ACTIVE ||
+ iclog->ic_state == XLOG_STATE_DIRTY)
continue;
lsn = be64_to_cpu(iclog->ic_header.h_lsn);
@@ -2699,61 +2750,48 @@ static bool
xlog_state_iodone_process_iclog(
struct xlog *log,
struct xlog_in_core *iclog,
- struct xlog_in_core *completed_iclog,
bool *ioerror)
{
xfs_lsn_t lowest_lsn;
xfs_lsn_t header_lsn;
- /* Skip all iclogs in the ACTIVE & DIRTY states */
- if (iclog->ic_state & (XLOG_STATE_ACTIVE | XLOG_STATE_DIRTY))
+ switch (iclog->ic_state) {
+ case XLOG_STATE_ACTIVE:
+ case XLOG_STATE_DIRTY:
+ /*
+ * Skip all iclogs in the ACTIVE & DIRTY states:
+ */
return false;
-
- /*
- * Between marking a filesystem SHUTDOWN and stopping the log, we do
- * flush all iclogs to disk (if there wasn't a log I/O error). So, we do
- * want things to go smoothly in case of just a SHUTDOWN w/o a
- * LOG_IO_ERROR.
- */
- if (iclog->ic_state & XLOG_STATE_IOERROR) {
+ case XLOG_STATE_IOERROR:
+ /*
+ * Between marking a filesystem SHUTDOWN and stopping the log,
+ * we do flush all iclogs to disk (if there wasn't a log I/O
+ * error). So, we do want things to go smoothly in case of just
+ * a SHUTDOWN w/o a LOG_IO_ERROR.
+ */
*ioerror = true;
return false;
- }
-
- /*
- * Can only perform callbacks in order. Since this iclog is not in the
- * DONE_SYNC/ DO_CALLBACK state, we skip the rest and just try to clean
- * up. If we set our iclog to DO_CALLBACK, we will not process it when
- * we retry since a previous iclog is in the CALLBACK and the state
- * cannot change since we are holding the l_icloglock.
- */
- if (!(iclog->ic_state &
- (XLOG_STATE_DONE_SYNC | XLOG_STATE_DO_CALLBACK))) {
- if (completed_iclog &&
- (completed_iclog->ic_state == XLOG_STATE_DONE_SYNC)) {
- completed_iclog->ic_state = XLOG_STATE_DO_CALLBACK;
- }
+ case XLOG_STATE_DONE_SYNC:
+ /*
+ * Now that we have an iclog that is in the DONE_SYNC state, do
+ * one more check here to see if we have chased our tail around.
+ * If this is not the lowest lsn iclog, then we will leave it
+ * for another completion to process.
+ */
+ header_lsn = be64_to_cpu(iclog->ic_header.h_lsn);
+ lowest_lsn = xlog_get_lowest_lsn(log);
+ if (lowest_lsn && XFS_LSN_CMP(lowest_lsn, header_lsn) < 0)
+ return false;
+ xlog_state_set_callback(log, iclog, header_lsn);
+ return false;
+ default:
+ /*
+ * Can only perform callbacks in order. Since this iclog is not
+ * in the DONE_SYNC state, we skip the rest and just try to
+ * clean up.
+ */
return true;
}
-
- /*
- * We now have an iclog that is in either the DO_CALLBACK or DONE_SYNC
- * states. The other states (WANT_SYNC, SYNCING, or CALLBACK were caught
- * by the above if and are going to clean (i.e. we aren't doing their
- * callbacks) see the above if.
- *
- * We will do one more check here to see if we have chased our tail
- * around. If this is not the lowest lsn iclog, then we will leave it
- * for another completion to process.
- */
- header_lsn = be64_to_cpu(iclog->ic_header.h_lsn);
- lowest_lsn = xlog_get_lowest_lsn(log);
- if (lowest_lsn && XFS_LSN_CMP(lowest_lsn, header_lsn) < 0)
- return false;
-
- xlog_state_set_callback(log, iclog, header_lsn);
- return false;
-
}
/*
@@ -2770,6 +2808,8 @@ xlog_state_do_iclog_callbacks(
struct xlog *log,
struct xlog_in_core *iclog,
bool aborted)
+ __releases(&log->l_icloglock)
+ __acquires(&log->l_icloglock)
{
spin_unlock(&log->l_icloglock);
spin_lock(&iclog->ic_callback_lock);
@@ -2792,57 +2832,13 @@ xlog_state_do_iclog_callbacks(
spin_unlock(&iclog->ic_callback_lock);
}
-#ifdef DEBUG
-/*
- * Make one last gasp attempt to see if iclogs are being left in limbo. If the
- * above loop finds an iclog earlier than the current iclog and in one of the
- * syncing states, the current iclog is put into DO_CALLBACK and the callbacks
- * are deferred to the completion of the earlier iclog. Walk the iclogs in order
- * and make sure that no iclog is in DO_CALLBACK unless an earlier iclog is in
- * one of the syncing states.
- *
- * Note that SYNCING|IOERROR is a valid state so we cannot just check for
- * ic_state == SYNCING.
- */
-static void
-xlog_state_callback_check_state(
- struct xlog *log)
-{
- struct xlog_in_core *first_iclog = log->l_iclog;
- struct xlog_in_core *iclog = first_iclog;
-
- do {
- ASSERT(iclog->ic_state != XLOG_STATE_DO_CALLBACK);
- /*
- * Terminate the loop if iclogs are found in states
- * which will cause other threads to clean up iclogs.
- *
- * SYNCING - i/o completion will go through logs
- * DONE_SYNC - interrupt thread should be waiting for
- * l_icloglock
- * IOERROR - give up hope all ye who enter here
- */
- if (iclog->ic_state == XLOG_STATE_WANT_SYNC ||
- iclog->ic_state & XLOG_STATE_SYNCING ||
- iclog->ic_state == XLOG_STATE_DONE_SYNC ||
- iclog->ic_state == XLOG_STATE_IOERROR )
- break;
- iclog = iclog->ic_next;
- } while (first_iclog != iclog);
-}
-#else
-#define xlog_state_callback_check_state(l) ((void)0)
-#endif
-
STATIC void
xlog_state_do_callback(
struct xlog *log,
- bool aborted,
- struct xlog_in_core *ciclog)
+ bool aborted)
{
struct xlog_in_core *iclog;
struct xlog_in_core *first_iclog;
- bool did_callbacks = false;
bool cycled_icloglock;
bool ioerror;
int flushcnt = 0;
@@ -2866,11 +2862,11 @@ xlog_state_do_callback(
do {
if (xlog_state_iodone_process_iclog(log, iclog,
- ciclog, &ioerror))
+ &ioerror))
break;
- if (!(iclog->ic_state &
- (XLOG_STATE_CALLBACK | XLOG_STATE_IOERROR))) {
+ if (iclog->ic_state != XLOG_STATE_CALLBACK &&
+ iclog->ic_state != XLOG_STATE_IOERROR) {
iclog = iclog->ic_next;
continue;
}
@@ -2886,8 +2882,6 @@ xlog_state_do_callback(
iclog = iclog->ic_next;
} while (first_iclog != iclog);
- did_callbacks |= cycled_icloglock;
-
if (repeats > 5000) {
flushcnt += repeats;
repeats = 0;
@@ -2897,10 +2891,8 @@ xlog_state_do_callback(
}
} while (!ioerror && cycled_icloglock);
- if (did_callbacks)
- xlog_state_callback_check_state(log);
-
- if (log->l_iclog->ic_state & (XLOG_STATE_ACTIVE|XLOG_STATE_IOERROR))
+ if (log->l_iclog->ic_state == XLOG_STATE_ACTIVE ||
+ log->l_iclog->ic_state == XLOG_STATE_IOERROR)
wake_up_all(&log->l_flush_wait);
spin_unlock(&log->l_icloglock);
@@ -2929,8 +2921,6 @@ xlog_state_done_syncing(
spin_lock(&log->l_icloglock);
- ASSERT(iclog->ic_state == XLOG_STATE_SYNCING ||
- iclog->ic_state == XLOG_STATE_IOERROR);
ASSERT(atomic_read(&iclog->ic_refcnt) == 0);
/*
@@ -2939,8 +2929,10 @@ xlog_state_done_syncing(
* and none should ever be attempted to be written to disk
* again.
*/
- if (iclog->ic_state != XLOG_STATE_IOERROR)
+ if (iclog->ic_state == XLOG_STATE_SYNCING)
iclog->ic_state = XLOG_STATE_DONE_SYNC;
+ else
+ ASSERT(iclog->ic_state == XLOG_STATE_IOERROR);
/*
* Someone could be sleeping prior to writing out the next
@@ -2949,7 +2941,7 @@ xlog_state_done_syncing(
*/
wake_up_all(&iclog->ic_write_wait);
spin_unlock(&log->l_icloglock);
- xlog_state_do_callback(log, aborted, iclog); /* also cleans log */
+ xlog_state_do_callback(log, aborted); /* also cleans log */
} /* xlog_state_done_syncing */
@@ -2983,7 +2975,6 @@ xlog_state_get_iclog_space(
int log_offset;
xlog_rec_header_t *head;
xlog_in_core_t *iclog;
- int error;
restart:
spin_lock(&log->l_icloglock);
@@ -3032,24 +3023,22 @@ restart:
* can fit into remaining data section.
*/
if (iclog->ic_size - iclog->ic_offset < 2*sizeof(xlog_op_header_t)) {
+ int error = 0;
+
xlog_state_switch_iclogs(log, iclog, iclog->ic_size);
/*
- * If I'm the only one writing to this iclog, sync it to disk.
- * We need to do an atomic compare and decrement here to avoid
- * racing with concurrent atomic_dec_and_lock() calls in
+ * If we are the only one writing to this iclog, sync it to
+ * disk. We need to do an atomic compare and decrement here to
+ * avoid racing with concurrent atomic_dec_and_lock() calls in
* xlog_state_release_iclog() when there is more than one
* reference to the iclog.
*/
- if (!atomic_add_unless(&iclog->ic_refcnt, -1, 1)) {
- /* we are the only one */
- spin_unlock(&log->l_icloglock);
+ if (!atomic_add_unless(&iclog->ic_refcnt, -1, 1))
error = xlog_state_release_iclog(log, iclog);
- if (error)
- return error;
- } else {
- spin_unlock(&log->l_icloglock);
- }
+ spin_unlock(&log->l_icloglock);
+ if (error)
+ return error;
goto restart;
}
@@ -3161,60 +3150,6 @@ xlog_ungrant_log_space(
}
/*
- * Flush iclog to disk if this is the last reference to the given iclog and
- * the WANT_SYNC bit is set.
- *
- * When this function is entered, the iclog is not necessarily in the
- * WANT_SYNC state. It may be sitting around waiting to get filled.
- *
- *
- */
-STATIC int
-xlog_state_release_iclog(
- struct xlog *log,
- struct xlog_in_core *iclog)
-{
- int sync = 0; /* do we sync? */
-
- if (iclog->ic_state & XLOG_STATE_IOERROR)
- return -EIO;
-
- ASSERT(atomic_read(&iclog->ic_refcnt) > 0);
- if (!atomic_dec_and_lock(&iclog->ic_refcnt, &log->l_icloglock))
- return 0;
-
- if (iclog->ic_state & XLOG_STATE_IOERROR) {
- spin_unlock(&log->l_icloglock);
- return -EIO;
- }
- ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE ||
- iclog->ic_state == XLOG_STATE_WANT_SYNC);
-
- if (iclog->ic_state == XLOG_STATE_WANT_SYNC) {
- /* update tail before writing to iclog */
- xfs_lsn_t tail_lsn = xlog_assign_tail_lsn(log->l_mp);
- sync++;
- iclog->ic_state = XLOG_STATE_SYNCING;
- iclog->ic_header.h_tail_lsn = cpu_to_be64(tail_lsn);
- xlog_verify_tail_lsn(log, iclog, tail_lsn);
- /* cycle incremented when incrementing curr_block */
- }
- spin_unlock(&log->l_icloglock);
-
- /*
- * We let the log lock go, so it's possible that we hit a log I/O
- * error or some other SHUTDOWN condition that marks the iclog
- * as XLOG_STATE_IOERROR before the bwrite. However, we know that
- * this iclog has consistent data, so we ignore IOERROR
- * flags after this point.
- */
- if (sync)
- xlog_sync(log, iclog);
- return 0;
-} /* xlog_state_release_iclog */
-
-
-/*
* This routine will mark the current iclog in the ring as WANT_SYNC
* and move the current iclog pointer to the next iclog in the ring.
* When this routine is called from xlog_state_get_iclog_space(), the
@@ -3307,7 +3242,7 @@ xfs_log_force(
spin_lock(&log->l_icloglock);
iclog = log->l_iclog;
- if (iclog->ic_state & XLOG_STATE_IOERROR)
+ if (iclog->ic_state == XLOG_STATE_IOERROR)
goto out_error;
if (iclog->ic_state == XLOG_STATE_DIRTY ||
@@ -3337,12 +3272,9 @@ xfs_log_force(
atomic_inc(&iclog->ic_refcnt);
lsn = be64_to_cpu(iclog->ic_header.h_lsn);
xlog_state_switch_iclogs(log, iclog, 0);
- spin_unlock(&log->l_icloglock);
-
if (xlog_state_release_iclog(log, iclog))
- return -EIO;
+ goto out_error;
- spin_lock(&log->l_icloglock);
if (be64_to_cpu(iclog->ic_header.h_lsn) != lsn ||
iclog->ic_state == XLOG_STATE_DIRTY)
goto out_unlock;
@@ -3367,11 +3299,11 @@ xfs_log_force(
if (!(flags & XFS_LOG_SYNC))
goto out_unlock;
- if (iclog->ic_state & XLOG_STATE_IOERROR)
+ if (iclog->ic_state == XLOG_STATE_IOERROR)
goto out_error;
XFS_STATS_INC(mp, xs_log_force_sleep);
xlog_wait(&iclog->ic_force_wait, &log->l_icloglock);
- if (iclog->ic_state & XLOG_STATE_IOERROR)
+ if (iclog->ic_state == XLOG_STATE_IOERROR)
return -EIO;
return 0;
@@ -3396,7 +3328,7 @@ __xfs_log_force_lsn(
spin_lock(&log->l_icloglock);
iclog = log->l_iclog;
- if (iclog->ic_state & XLOG_STATE_IOERROR)
+ if (iclog->ic_state == XLOG_STATE_IOERROR)
goto out_error;
while (be64_to_cpu(iclog->ic_header.h_lsn) != lsn) {
@@ -3425,10 +3357,8 @@ __xfs_log_force_lsn(
* will go out then.
*/
if (!already_slept &&
- (iclog->ic_prev->ic_state &
- (XLOG_STATE_WANT_SYNC | XLOG_STATE_SYNCING))) {
- ASSERT(!(iclog->ic_state & XLOG_STATE_IOERROR));
-
+ (iclog->ic_prev->ic_state == XLOG_STATE_WANT_SYNC ||
+ iclog->ic_prev->ic_state == XLOG_STATE_SYNCING)) {
XFS_STATS_INC(mp, xs_log_force_sleep);
xlog_wait(&iclog->ic_prev->ic_write_wait,
@@ -3437,24 +3367,23 @@ __xfs_log_force_lsn(
}
atomic_inc(&iclog->ic_refcnt);
xlog_state_switch_iclogs(log, iclog, 0);
- spin_unlock(&log->l_icloglock);
if (xlog_state_release_iclog(log, iclog))
- return -EIO;
+ goto out_error;
if (log_flushed)
*log_flushed = 1;
- spin_lock(&log->l_icloglock);
}
if (!(flags & XFS_LOG_SYNC) ||
- (iclog->ic_state & (XLOG_STATE_ACTIVE | XLOG_STATE_DIRTY)))
+ (iclog->ic_state == XLOG_STATE_ACTIVE ||
+ iclog->ic_state == XLOG_STATE_DIRTY))
goto out_unlock;
- if (iclog->ic_state & XLOG_STATE_IOERROR)
+ if (iclog->ic_state == XLOG_STATE_IOERROR)
goto out_error;
XFS_STATS_INC(mp, xs_log_force_sleep);
xlog_wait(&iclog->ic_force_wait, &log->l_icloglock);
- if (iclog->ic_state & XLOG_STATE_IOERROR)
+ if (iclog->ic_state == XLOG_STATE_IOERROR)
return -EIO;
return 0;
@@ -3517,8 +3446,8 @@ xlog_state_want_sync(
if (iclog->ic_state == XLOG_STATE_ACTIVE) {
xlog_state_switch_iclogs(log, iclog, 0);
} else {
- ASSERT(iclog->ic_state &
- (XLOG_STATE_WANT_SYNC|XLOG_STATE_IOERROR));
+ ASSERT(iclog->ic_state == XLOG_STATE_WANT_SYNC ||
+ iclog->ic_state == XLOG_STATE_IOERROR);
}
}
@@ -3539,7 +3468,7 @@ xfs_log_ticket_put(
{
ASSERT(atomic_read(&ticket->t_ref) > 0);
if (atomic_dec_and_test(&ticket->t_ref))
- kmem_zone_free(xfs_log_ticket_zone, ticket);
+ kmem_cache_free(xfs_log_ticket_zone, ticket);
}
xlog_ticket_t *
@@ -3895,7 +3824,7 @@ xlog_state_ioerror(
xlog_in_core_t *iclog, *ic;
iclog = log->l_iclog;
- if (! (iclog->ic_state & XLOG_STATE_IOERROR)) {
+ if (iclog->ic_state != XLOG_STATE_IOERROR) {
/*
* Mark all the incore logs IOERROR.
* From now on, no log flushes will result.
@@ -3955,7 +3884,7 @@ xfs_log_force_umount(
* Somebody could've already done the hard work for us.
* No need to get locks for this.
*/
- if (logerror && log->l_iclog->ic_state & XLOG_STATE_IOERROR) {
+ if (logerror && log->l_iclog->ic_state == XLOG_STATE_IOERROR) {
ASSERT(XLOG_FORCED_SHUTDOWN(log));
return 1;
}
@@ -4006,21 +3935,8 @@ xfs_log_force_umount(
spin_lock(&log->l_cilp->xc_push_lock);
wake_up_all(&log->l_cilp->xc_commit_wait);
spin_unlock(&log->l_cilp->xc_push_lock);
- xlog_state_do_callback(log, true, NULL);
-
-#ifdef XFSERRORDEBUG
- {
- xlog_in_core_t *iclog;
+ xlog_state_do_callback(log, true);
- spin_lock(&log->l_icloglock);
- iclog = log->l_iclog;
- do {
- ASSERT(iclog->ic_callback == 0);
- iclog = iclog->ic_next;
- } while (iclog != log->l_iclog);
- spin_unlock(&log->l_icloglock);
- }
-#endif
/* return non-zero if log IOERROR transition had already happened */
return retval;
}
diff --git a/fs/xfs/xfs_log_cil.c b/fs/xfs/xfs_log_cil.c
index ef652abd112c..48435cf2aa16 100644
--- a/fs/xfs/xfs_log_cil.c
+++ b/fs/xfs/xfs_log_cil.c
@@ -179,7 +179,7 @@ xlog_cil_alloc_shadow_bufs(
/*
* We free and allocate here as a realloc would copy
- * unecessary data. We don't use kmem_zalloc() for the
+ * unnecessary data. We don't use kmem_zalloc() for the
* same reason - we don't need to zero the data area in
* the buffer, only the log vector header and the iovec
* storage.
@@ -682,7 +682,7 @@ xlog_cil_push(
}
- /* check for a previously pushed seqeunce */
+ /* check for a previously pushed sequence */
if (push_seq < cil->xc_ctx->sequence) {
spin_unlock(&cil->xc_push_lock);
goto out_skip;
@@ -847,7 +847,7 @@ restart:
goto out_abort;
spin_lock(&commit_iclog->ic_callback_lock);
- if (commit_iclog->ic_state & XLOG_STATE_IOERROR) {
+ if (commit_iclog->ic_state == XLOG_STATE_IOERROR) {
spin_unlock(&commit_iclog->ic_callback_lock);
goto out_abort;
}
diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h
index b880c23cb6e4..b192c5a9f9fd 100644
--- a/fs/xfs/xfs_log_priv.h
+++ b/fs/xfs/xfs_log_priv.h
@@ -40,17 +40,15 @@ static inline uint xlog_get_client_id(__be32 i)
/*
* In core log state
*/
-#define XLOG_STATE_ACTIVE 0x0001 /* Current IC log being written to */
-#define XLOG_STATE_WANT_SYNC 0x0002 /* Want to sync this iclog; no more writes */
-#define XLOG_STATE_SYNCING 0x0004 /* This IC log is syncing */
-#define XLOG_STATE_DONE_SYNC 0x0008 /* Done syncing to disk */
-#define XLOG_STATE_DO_CALLBACK \
- 0x0010 /* Process callback functions */
-#define XLOG_STATE_CALLBACK 0x0020 /* Callback functions now */
-#define XLOG_STATE_DIRTY 0x0040 /* Dirty IC log, not ready for ACTIVE status*/
-#define XLOG_STATE_IOERROR 0x0080 /* IO error happened in sync'ing log */
-#define XLOG_STATE_ALL 0x7FFF /* All possible valid flags */
-#define XLOG_STATE_NOTUSED 0x8000 /* This IC log not being used */
+enum xlog_iclog_state {
+ XLOG_STATE_ACTIVE, /* Current IC log being written to */
+ XLOG_STATE_WANT_SYNC, /* Want to sync this iclog; no more writes */
+ XLOG_STATE_SYNCING, /* This IC log is syncing */
+ XLOG_STATE_DONE_SYNC, /* Done syncing to disk */
+ XLOG_STATE_CALLBACK, /* Callback functions now */
+ XLOG_STATE_DIRTY, /* Dirty IC log, not ready for ACTIVE status */
+ XLOG_STATE_IOERROR, /* IO error happened in sync'ing log */
+};
/*
* Flags to log ticket
@@ -179,8 +177,6 @@ typedef struct xlog_ticket {
* - ic_next is the pointer to the next iclog in the ring.
* - ic_log is a pointer back to the global log structure.
* - ic_size is the full size of the log buffer, minus the cycle headers.
- * - ic_io_size is the size of the currently pending log buffer write, which
- * might be smaller than ic_size
* - ic_offset is the current number of bytes written to in this iclog.
* - ic_refcnt is bumped when someone is writing to the log.
* - ic_state is the state of the iclog.
@@ -205,9 +201,8 @@ typedef struct xlog_in_core {
struct xlog_in_core *ic_prev;
struct xlog *ic_log;
u32 ic_size;
- u32 ic_io_size;
u32 ic_offset;
- unsigned short ic_state;
+ enum xlog_iclog_state ic_state;
char *ic_datap; /* pointer to iclog data */
/* Callback structures need their own cacheline */
@@ -399,8 +394,6 @@ struct xlog {
/* The following field are used for debugging; need to hold icloglock */
#ifdef DEBUG
void *l_iclog_bak[XLOG_MAX_ICLOGS];
- /* log record crc error injection factor */
- uint32_t l_badcrc_factor;
#endif
/* log recovery lsn tracking (for buffer submission */
xfs_lsn_t l_recovery_lsn;
@@ -542,7 +535,11 @@ xlog_cil_force(struct xlog *log)
* by a spinlock. This matches the semantics of all the wait queues used in the
* log code.
*/
-static inline void xlog_wait(wait_queue_head_t *wq, spinlock_t *lock)
+static inline void
+xlog_wait(
+ struct wait_queue_head *wq,
+ struct spinlock *lock)
+ __releases(lock)
{
DECLARE_WAITQUEUE(wait, current);
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index c1a514ffff55..99ec3fba4548 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -103,10 +103,9 @@ xlog_alloc_buffer(
* Pass log block 0 since we don't have an addr yet, buffer will be
* verified on read.
*/
- if (!xlog_verify_bno(log, 0, nbblks)) {
+ if (XFS_IS_CORRUPT(log->l_mp, !xlog_verify_bno(log, 0, nbblks))) {
xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer",
nbblks);
- XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
return NULL;
}
@@ -152,11 +151,10 @@ xlog_do_io(
{
int error;
- if (!xlog_verify_bno(log, blk_no, nbblks)) {
+ if (XFS_IS_CORRUPT(log->l_mp, !xlog_verify_bno(log, blk_no, nbblks))) {
xfs_warn(log->l_mp,
"Invalid log block/length (0x%llx, 0x%x) for buffer",
blk_no, nbblks);
- XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
return -EFSCORRUPTED;
}
@@ -244,19 +242,17 @@ xlog_header_check_recover(
* (XLOG_FMT_UNKNOWN). This stops us from trying to recover
* a dirty log created in IRIX.
*/
- if (unlikely(head->h_fmt != cpu_to_be32(XLOG_FMT))) {
+ if (XFS_IS_CORRUPT(mp, head->h_fmt != cpu_to_be32(XLOG_FMT))) {
xfs_warn(mp,
"dirty log written in incompatible format - can't recover");
xlog_header_check_dump(mp, head);
- XFS_ERROR_REPORT("xlog_header_check_recover(1)",
- XFS_ERRLEVEL_HIGH, mp);
return -EFSCORRUPTED;
- } else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) {
+ }
+ if (XFS_IS_CORRUPT(mp, !uuid_equal(&mp->m_sb.sb_uuid,
+ &head->h_fs_uuid))) {
xfs_warn(mp,
"dirty log entry has mismatched uuid - can't recover");
xlog_header_check_dump(mp, head);
- XFS_ERROR_REPORT("xlog_header_check_recover(2)",
- XFS_ERRLEVEL_HIGH, mp);
return -EFSCORRUPTED;
}
return 0;
@@ -279,11 +275,10 @@ xlog_header_check_mount(
* by IRIX and continue.
*/
xfs_warn(mp, "null uuid in log - IRIX style log");
- } else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) {
+ } else if (XFS_IS_CORRUPT(mp, !uuid_equal(&mp->m_sb.sb_uuid,
+ &head->h_fs_uuid))) {
xfs_warn(mp, "log has mismatched uuid - can't recover");
xlog_header_check_dump(mp, head);
- XFS_ERROR_REPORT("xlog_header_check_mount",
- XFS_ERRLEVEL_HIGH, mp);
return -EFSCORRUPTED;
}
return 0;
@@ -471,7 +466,7 @@ xlog_find_verify_log_record(
xfs_warn(log->l_mp,
"Log inconsistent (didn't find previous header)");
ASSERT(0);
- error = -EIO;
+ error = -EFSCORRUPTED;
goto out;
}
@@ -1347,10 +1342,11 @@ xlog_find_tail(
error = xlog_rseek_logrec_hdr(log, *head_blk, *head_blk, 1, buffer,
&rhead_blk, &rhead, &wrapped);
if (error < 0)
- return error;
+ goto done;
if (!error) {
xfs_warn(log->l_mp, "%s: couldn't find sync record", __func__);
- return -EIO;
+ error = -EFSCORRUPTED;
+ goto done;
}
*tail_blk = BLOCK_LSN(be64_to_cpu(rhead->h_tail_lsn));
@@ -1699,11 +1695,10 @@ xlog_clear_stale_blocks(
* the distance from the beginning of the log to the
* tail.
*/
- if (unlikely(head_block < tail_block || head_block >= log->l_logBBsize)) {
- XFS_ERROR_REPORT("xlog_clear_stale_blocks(1)",
- XFS_ERRLEVEL_LOW, log->l_mp);
+ if (XFS_IS_CORRUPT(log->l_mp,
+ head_block < tail_block ||
+ head_block >= log->l_logBBsize))
return -EFSCORRUPTED;
- }
tail_distance = tail_block + (log->l_logBBsize - head_block);
} else {
/*
@@ -1711,11 +1706,10 @@ xlog_clear_stale_blocks(
* so the distance from the head to the tail is just
* the tail block minus the head block.
*/
- if (unlikely(head_block >= tail_block || head_cycle != (tail_cycle + 1))){
- XFS_ERROR_REPORT("xlog_clear_stale_blocks(2)",
- XFS_ERRLEVEL_LOW, log->l_mp);
+ if (XFS_IS_CORRUPT(log->l_mp,
+ head_block >= tail_block ||
+ head_cycle != tail_cycle + 1))
return -EFSCORRUPTED;
- }
tail_distance = tail_block - head_block;
}
@@ -2135,13 +2129,11 @@ xlog_recover_do_inode_buffer(
*/
logged_nextp = item->ri_buf[item_index].i_addr +
next_unlinked_offset - reg_buf_offset;
- if (unlikely(*logged_nextp == 0)) {
+ if (XFS_IS_CORRUPT(mp, *logged_nextp == 0)) {
xfs_alert(mp,
"Bad inode buffer log record (ptr = "PTR_FMT", bp = "PTR_FMT"). "
"Trying to replay bad (0) inode di_next_unlinked field.",
item, bp);
- XFS_ERROR_REPORT("xlog_recover_do_inode_buf",
- XFS_ERRLEVEL_LOW, mp);
return -EFSCORRUPTED;
}
@@ -2576,6 +2568,7 @@ xlog_recover_do_reg_buffer(
int bit;
int nbits;
xfs_failaddr_t fa;
+ const size_t size_disk_dquot = sizeof(struct xfs_disk_dquot);
trace_xfs_log_recover_buf_reg_buf(mp->m_log, buf_f);
@@ -2618,7 +2611,7 @@ xlog_recover_do_reg_buffer(
"XFS: NULL dquot in %s.", __func__);
goto next;
}
- if (item->ri_buf[i].i_len < sizeof(xfs_disk_dquot_t)) {
+ if (item->ri_buf[i].i_len < size_disk_dquot) {
xfs_alert(mp,
"XFS: dquot too small (%d) in %s.",
item->ri_buf[i].i_len, __func__);
@@ -2969,22 +2962,18 @@ xlog_recover_inode_pass2(
* Make sure the place we're flushing out to really looks
* like an inode!
*/
- if (unlikely(!xfs_verify_magic16(bp, dip->di_magic))) {
+ if (XFS_IS_CORRUPT(mp, !xfs_verify_magic16(bp, dip->di_magic))) {
xfs_alert(mp,
"%s: Bad inode magic number, dip = "PTR_FMT", dino bp = "PTR_FMT", ino = %Ld",
__func__, dip, bp, in_f->ilf_ino);
- XFS_ERROR_REPORT("xlog_recover_inode_pass2(1)",
- XFS_ERRLEVEL_LOW, mp);
error = -EFSCORRUPTED;
goto out_release;
}
ldip = item->ri_buf[1].i_addr;
- if (unlikely(ldip->di_magic != XFS_DINODE_MAGIC)) {
+ if (XFS_IS_CORRUPT(mp, ldip->di_magic != XFS_DINODE_MAGIC)) {
xfs_alert(mp,
"%s: Bad inode log record, rec ptr "PTR_FMT", ino %Ld",
__func__, item, in_f->ilf_ino);
- XFS_ERROR_REPORT("xlog_recover_inode_pass2(2)",
- XFS_ERRLEVEL_LOW, mp);
error = -EFSCORRUPTED;
goto out_release;
}
@@ -3166,7 +3155,7 @@ xlog_recover_inode_pass2(
default:
xfs_warn(log->l_mp, "%s: Invalid flag", __func__);
ASSERT(0);
- error = -EIO;
+ error = -EFSCORRUPTED;
goto out_release;
}
}
@@ -3247,12 +3236,12 @@ xlog_recover_dquot_pass2(
recddq = item->ri_buf[1].i_addr;
if (recddq == NULL) {
xfs_alert(log->l_mp, "NULL dquot in %s.", __func__);
- return -EIO;
+ return -EFSCORRUPTED;
}
- if (item->ri_buf[1].i_len < sizeof(xfs_disk_dquot_t)) {
+ if (item->ri_buf[1].i_len < sizeof(struct xfs_disk_dquot)) {
xfs_alert(log->l_mp, "dquot too small (%d) in %s.",
item->ri_buf[1].i_len, __func__);
- return -EIO;
+ return -EFSCORRUPTED;
}
/*
@@ -3279,7 +3268,7 @@ xlog_recover_dquot_pass2(
if (fa) {
xfs_alert(mp, "corrupt dquot ID 0x%x in log at %pS",
dq_f->qlf_id, fa);
- return -EIO;
+ return -EFSCORRUPTED;
}
ASSERT(dq_f->qlf_len == 1);
@@ -3537,6 +3526,7 @@ xfs_cui_copy_format(
memcpy(dst_cui_fmt, src_cui_fmt, len);
return 0;
}
+ XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, NULL);
return -EFSCORRUPTED;
}
@@ -3601,8 +3591,10 @@ xlog_recover_cud_pass2(
struct xfs_ail *ailp = log->l_ailp;
cud_formatp = item->ri_buf[0].i_addr;
- if (item->ri_buf[0].i_len != sizeof(struct xfs_cud_log_format))
+ if (item->ri_buf[0].i_len != sizeof(struct xfs_cud_log_format)) {
+ XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, log->l_mp);
return -EFSCORRUPTED;
+ }
cui_id = cud_formatp->cud_cui_id;
/*
@@ -3654,6 +3646,7 @@ xfs_bui_copy_format(
memcpy(dst_bui_fmt, src_bui_fmt, len);
return 0;
}
+ XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, NULL);
return -EFSCORRUPTED;
}
@@ -3677,8 +3670,10 @@ xlog_recover_bui_pass2(
bui_formatp = item->ri_buf[0].i_addr;
- if (bui_formatp->bui_nextents != XFS_BUI_MAX_FAST_EXTENTS)
+ if (bui_formatp->bui_nextents != XFS_BUI_MAX_FAST_EXTENTS) {
+ XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, log->l_mp);
return -EFSCORRUPTED;
+ }
buip = xfs_bui_init(mp);
error = xfs_bui_copy_format(&item->ri_buf[0], &buip->bui_format);
if (error) {
@@ -3720,8 +3715,10 @@ xlog_recover_bud_pass2(
struct xfs_ail *ailp = log->l_ailp;
bud_formatp = item->ri_buf[0].i_addr;
- if (item->ri_buf[0].i_len != sizeof(struct xfs_bud_log_format))
+ if (item->ri_buf[0].i_len != sizeof(struct xfs_bud_log_format)) {
+ XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, log->l_mp);
return -EFSCORRUPTED;
+ }
bui_id = bud_formatp->bud_bui_id;
/*
@@ -4018,7 +4015,7 @@ xlog_recover_commit_pass1(
xfs_warn(log->l_mp, "%s: invalid item type (%d)",
__func__, ITEM_TYPE(item));
ASSERT(0);
- return -EIO;
+ return -EFSCORRUPTED;
}
}
@@ -4066,7 +4063,7 @@ xlog_recover_commit_pass2(
xfs_warn(log->l_mp, "%s: invalid item type (%d)",
__func__, ITEM_TYPE(item));
ASSERT(0);
- return -EIO;
+ return -EFSCORRUPTED;
}
}
@@ -4187,7 +4184,7 @@ xlog_recover_add_to_cont_trans(
ASSERT(len <= sizeof(struct xfs_trans_header));
if (len > sizeof(struct xfs_trans_header)) {
xfs_warn(log->l_mp, "%s: bad header length", __func__);
- return -EIO;
+ return -EFSCORRUPTED;
}
xlog_recover_add_item(&trans->r_itemq);
@@ -4243,13 +4240,13 @@ xlog_recover_add_to_trans(
xfs_warn(log->l_mp, "%s: bad header magic number",
__func__);
ASSERT(0);
- return -EIO;
+ return -EFSCORRUPTED;
}
if (len > sizeof(struct xfs_trans_header)) {
xfs_warn(log->l_mp, "%s: bad header length", __func__);
ASSERT(0);
- return -EIO;
+ return -EFSCORRUPTED;
}
/*
@@ -4285,7 +4282,7 @@ xlog_recover_add_to_trans(
in_f->ilf_size);
ASSERT(0);
kmem_free(ptr);
- return -EIO;
+ return -EFSCORRUPTED;
}
item->ri_total = in_f->ilf_size;
@@ -4293,7 +4290,16 @@ xlog_recover_add_to_trans(
kmem_zalloc(item->ri_total * sizeof(xfs_log_iovec_t),
0);
}
- ASSERT(item->ri_total > item->ri_cnt);
+
+ if (item->ri_total <= item->ri_cnt) {
+ xfs_warn(log->l_mp,
+ "log item region count (%d) overflowed size (%d)",
+ item->ri_cnt, item->ri_total);
+ ASSERT(0);
+ kmem_free(ptr);
+ return -EFSCORRUPTED;
+ }
+
/* Description region is ri_buf[0] */
item->ri_buf[item->ri_cnt].i_addr = ptr;
item->ri_buf[item->ri_cnt].i_len = len;
@@ -4380,7 +4386,7 @@ xlog_recovery_process_trans(
default:
xfs_warn(log->l_mp, "%s: bad flag 0x%x", __func__, flags);
ASSERT(0);
- error = -EIO;
+ error = -EFSCORRUPTED;
break;
}
if (error || freeit)
@@ -4460,7 +4466,7 @@ xlog_recover_process_ophdr(
xfs_warn(log->l_mp, "%s: bad clientid 0x%x",
__func__, ohead->oh_clientid);
ASSERT(0);
- return -EIO;
+ return -EFSCORRUPTED;
}
/*
@@ -4470,7 +4476,7 @@ xlog_recover_process_ophdr(
if (dp + len > end) {
xfs_warn(log->l_mp, "%s: bad length 0x%x", __func__, len);
WARN_ON(1);
- return -EIO;
+ return -EFSCORRUPTED;
}
trans = xlog_recover_ophdr_to_trans(rhash, rhead, ohead);
@@ -5172,8 +5178,10 @@ xlog_recover_process(
* If the filesystem is CRC enabled, this mismatch becomes a
* fatal log corruption failure.
*/
- if (xfs_sb_version_hascrc(&log->l_mp->m_sb))
+ if (xfs_sb_version_hascrc(&log->l_mp->m_sb)) {
+ XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, log->l_mp);
return -EFSCORRUPTED;
+ }
}
xlog_unpack_data(rhead, dp, log);
@@ -5190,31 +5198,25 @@ xlog_valid_rec_header(
{
int hlen;
- if (unlikely(rhead->h_magicno != cpu_to_be32(XLOG_HEADER_MAGIC_NUM))) {
- XFS_ERROR_REPORT("xlog_valid_rec_header(1)",
- XFS_ERRLEVEL_LOW, log->l_mp);
+ if (XFS_IS_CORRUPT(log->l_mp,
+ rhead->h_magicno != cpu_to_be32(XLOG_HEADER_MAGIC_NUM)))
return -EFSCORRUPTED;
- }
- if (unlikely(
- (!rhead->h_version ||
- (be32_to_cpu(rhead->h_version) & (~XLOG_VERSION_OKBITS))))) {
+ if (XFS_IS_CORRUPT(log->l_mp,
+ (!rhead->h_version ||
+ (be32_to_cpu(rhead->h_version) &
+ (~XLOG_VERSION_OKBITS))))) {
xfs_warn(log->l_mp, "%s: unrecognised log version (%d).",
__func__, be32_to_cpu(rhead->h_version));
- return -EIO;
+ return -EFSCORRUPTED;
}
/* LR body must have data or it wouldn't have been written */
hlen = be32_to_cpu(rhead->h_len);
- if (unlikely( hlen <= 0 || hlen > INT_MAX )) {
- XFS_ERROR_REPORT("xlog_valid_rec_header(2)",
- XFS_ERRLEVEL_LOW, log->l_mp);
+ if (XFS_IS_CORRUPT(log->l_mp, hlen <= 0 || hlen > INT_MAX))
return -EFSCORRUPTED;
- }
- if (unlikely( blkno > log->l_logBBsize || blkno > INT_MAX )) {
- XFS_ERROR_REPORT("xlog_valid_rec_header(3)",
- XFS_ERRLEVEL_LOW, log->l_mp);
+ if (XFS_IS_CORRUPT(log->l_mp,
+ blkno > log->l_logBBsize || blkno > INT_MAX))
return -EFSCORRUPTED;
- }
return 0;
}
@@ -5296,8 +5298,12 @@ xlog_do_recovery_pass(
"invalid iclog size (%d bytes), using lsunit (%d bytes)",
h_size, log->l_mp->m_logbsize);
h_size = log->l_mp->m_logbsize;
- } else
- return -EFSCORRUPTED;
+ } else {
+ XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW,
+ log->l_mp);
+ error = -EFSCORRUPTED;
+ goto bread_err1;
+ }
}
if ((be32_to_cpu(rhead->h_version) & XLOG_VERSION_2) &&
diff --git a/fs/xfs/xfs_message.c b/fs/xfs/xfs_message.c
index 9804efe525a9..e0f9d3b6abe9 100644
--- a/fs/xfs/xfs_message.c
+++ b/fs/xfs/xfs_message.c
@@ -20,8 +20,8 @@ __xfs_printk(
const struct xfs_mount *mp,
struct va_format *vaf)
{
- if (mp && mp->m_fsname) {
- printk("%sXFS (%s): %pV\n", level, mp->m_fsname, vaf);
+ if (mp && mp->m_super) {
+ printk("%sXFS (%s): %pV\n", level, mp->m_super->s_id, vaf);
return;
}
printk("%sXFS: %pV\n", level, vaf);
@@ -86,17 +86,25 @@ xfs_alert_tag(
}
void
-asswarn(char *expr, char *file, int line)
+asswarn(
+ struct xfs_mount *mp,
+ char *expr,
+ char *file,
+ int line)
{
- xfs_warn(NULL, "Assertion failed: %s, file: %s, line: %d",
+ xfs_warn(mp, "Assertion failed: %s, file: %s, line: %d",
expr, file, line);
WARN_ON(1);
}
void
-assfail(char *expr, char *file, int line)
+assfail(
+ struct xfs_mount *mp,
+ char *expr,
+ char *file,
+ int line)
{
- xfs_emerg(NULL, "Assertion failed: %s, file: %s, line: %d",
+ xfs_emerg(mp, "Assertion failed: %s, file: %s, line: %d",
expr, file, line);
if (xfs_globals.bug_on_assert)
BUG();
@@ -105,7 +113,7 @@ assfail(char *expr, char *file, int line)
}
void
-xfs_hex_dump(void *p, int length)
+xfs_hex_dump(const void *p, int length)
{
print_hex_dump(KERN_ALERT, "", DUMP_PREFIX_OFFSET, 16, 1, p, length, 1);
}
diff --git a/fs/xfs/xfs_message.h b/fs/xfs/xfs_message.h
index 34447dca97d1..0b05e10995a0 100644
--- a/fs/xfs/xfs_message.h
+++ b/fs/xfs/xfs_message.h
@@ -57,9 +57,9 @@ do { \
#define xfs_debug_ratelimited(dev, fmt, ...) \
xfs_printk_ratelimited(xfs_debug, dev, fmt, ##__VA_ARGS__)
-extern void assfail(char *expr, char *f, int l);
-extern void asswarn(char *expr, char *f, int l);
+void assfail(struct xfs_mount *mp, char *expr, char *f, int l);
+void asswarn(struct xfs_mount *mp, char *expr, char *f, int l);
-extern void xfs_hex_dump(void *p, int length);
+extern void xfs_hex_dump(const void *p, int length);
#endif /* __XFS_MESSAGE_H */
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index ba5b6f3b2b88..fca65109cf24 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -426,45 +426,6 @@ xfs_update_alignment(xfs_mount_t *mp)
}
/*
- * Set the default minimum read and write sizes unless
- * already specified in a mount option.
- * We use smaller I/O sizes when the file system
- * is being used for NFS service (wsync mount option).
- */
-STATIC void
-xfs_set_rw_sizes(xfs_mount_t *mp)
-{
- xfs_sb_t *sbp = &(mp->m_sb);
- int readio_log, writeio_log;
-
- if (!(mp->m_flags & XFS_MOUNT_DFLT_IOSIZE)) {
- if (mp->m_flags & XFS_MOUNT_WSYNC) {
- readio_log = XFS_WSYNC_READIO_LOG;
- writeio_log = XFS_WSYNC_WRITEIO_LOG;
- } else {
- readio_log = XFS_READIO_LOG_LARGE;
- writeio_log = XFS_WRITEIO_LOG_LARGE;
- }
- } else {
- readio_log = mp->m_readio_log;
- writeio_log = mp->m_writeio_log;
- }
-
- if (sbp->sb_blocklog > readio_log) {
- mp->m_readio_log = sbp->sb_blocklog;
- } else {
- mp->m_readio_log = readio_log;
- }
- mp->m_readio_blocks = 1 << (mp->m_readio_log - sbp->sb_blocklog);
- if (sbp->sb_blocklog > writeio_log) {
- mp->m_writeio_log = sbp->sb_blocklog;
- } else {
- mp->m_writeio_log = writeio_log;
- }
- mp->m_writeio_blocks = 1 << (mp->m_writeio_log - sbp->sb_blocklog);
-}
-
-/*
* precalculate the low space thresholds for dynamic speculative preallocation.
*/
void
@@ -706,7 +667,8 @@ xfs_mountfs(
/* enable fail_at_unmount as default */
mp->m_fail_unmount = true;
- error = xfs_sysfs_init(&mp->m_kobj, &xfs_mp_ktype, NULL, mp->m_fsname);
+ error = xfs_sysfs_init(&mp->m_kobj, &xfs_mp_ktype,
+ NULL, mp->m_super->s_id);
if (error)
goto out;
@@ -728,9 +690,12 @@ xfs_mountfs(
goto out_remove_errortag;
/*
- * Set the minimum read and write sizes
+ * Update the preferred write size based on the information from the
+ * on-disk superblock.
*/
- xfs_set_rw_sizes(mp);
+ mp->m_allocsize_log =
+ max_t(uint32_t, sbp->sb_blocklog, mp->m_allocsize_log);
+ mp->m_allocsize_blocks = 1U << (mp->m_allocsize_log - sbp->sb_blocklog);
/* set the low space thresholds for dynamic preallocation */
xfs_set_low_space_thresholds(mp);
@@ -796,9 +761,8 @@ xfs_mountfs(
goto out_free_dir;
}
- if (!sbp->sb_logblocks) {
+ if (XFS_IS_CORRUPT(mp, !sbp->sb_logblocks)) {
xfs_warn(mp, "no log defined");
- XFS_ERROR_REPORT("xfs_mountfs", XFS_ERRLEVEL_LOW, mp);
error = -EFSCORRUPTED;
goto out_free_perag;
}
@@ -836,12 +800,10 @@ xfs_mountfs(
ASSERT(rip != NULL);
- if (unlikely(!S_ISDIR(VFS_I(rip)->i_mode))) {
+ if (XFS_IS_CORRUPT(mp, !S_ISDIR(VFS_I(rip)->i_mode))) {
xfs_warn(mp, "corrupted root inode %llu: not a directory",
(unsigned long long)rip->i_ino);
xfs_iunlock(rip, XFS_ILOCK_EXCL);
- XFS_ERROR_REPORT("xfs_mountfs_int(2)", XFS_ERRLEVEL_LOW,
- mp);
error = -EFSCORRUPTED;
goto out_rele_rip;
}
@@ -1277,7 +1239,7 @@ xfs_mod_fdblocks(
printk_once(KERN_WARNING
"Filesystem \"%s\": reserve blocks depleted! "
"Consider increasing reserve pool size.",
- mp->m_fsname);
+ mp->m_super->s_id);
fdblocks_enospc:
spin_unlock(&mp->m_sb_lock);
return -ENOSPC;
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index fdb60e09a9c5..88ab09ed29e7 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -9,10 +9,8 @@
struct xlog;
struct xfs_inode;
struct xfs_mru_cache;
-struct xfs_nameops;
struct xfs_ail;
struct xfs_quotainfo;
-struct xfs_dir_ops;
struct xfs_da_geometry;
/* dynamic preallocation free space thresholds, 5% down to 1% */
@@ -59,7 +57,6 @@ struct xfs_error_cfg {
typedef struct xfs_mount {
struct super_block *m_super;
- xfs_tid_t m_tid; /* next unused tid for fs */
/*
* Bitsets of per-fs metadata that have been checked and/or are sick.
@@ -89,8 +86,6 @@ typedef struct xfs_mount {
struct percpu_counter m_delalloc_blks;
struct xfs_buf *m_sb_bp; /* buffer for superblock */
- char *m_fsname; /* filesystem name */
- int m_fsname_len; /* strlen of fs name */
char *m_rtname; /* realtime device name */
char *m_logname; /* external log device name */
int m_bsize; /* fs logical block size */
@@ -98,10 +93,8 @@ typedef struct xfs_mount {
xfs_agnumber_t m_agirotor; /* last ag dir inode alloced */
spinlock_t m_agirotor_lock;/* .. and lock protecting it */
xfs_agnumber_t m_maxagi; /* highest inode alloc group */
- uint m_readio_log; /* min read size log bytes */
- uint m_readio_blocks; /* min read size blocks */
- uint m_writeio_log; /* min write size log bytes */
- uint m_writeio_blocks; /* min write size blocks */
+ uint m_allocsize_log;/* min write size log bytes */
+ uint m_allocsize_blocks; /* min write size blocks */
struct xfs_da_geometry *m_dir_geo; /* directory block geometry */
struct xfs_da_geometry *m_attr_geo; /* attribute block geometry */
struct xlog *m_log; /* log specific stuff */
@@ -159,10 +152,6 @@ typedef struct xfs_mount {
int m_dalign; /* stripe unit */
int m_swidth; /* stripe width */
uint8_t m_sectbb_log; /* sectlog - BBSHIFT */
- const struct xfs_nameops *m_dirnameops; /* vector of dir name ops */
- const struct xfs_dir_ops *m_dir_inode_ops; /* vector of dir inode ops */
- const struct xfs_dir_ops *m_nondir_inode_ops; /* !dir inode ops */
- uint m_chsize; /* size of next field */
atomic_t m_active_trans; /* number trans frozen */
struct xfs_mru_cache *m_filestream; /* per-mount filestream data */
struct delayed_work m_reclaim_work; /* background inode reclaim */
@@ -229,7 +218,7 @@ typedef struct xfs_mount {
#define XFS_MOUNT_ATTR2 (1ULL << 8) /* allow use of attr2 format */
#define XFS_MOUNT_GRPID (1ULL << 9) /* group-ID assigned from directory */
#define XFS_MOUNT_NORECOVERY (1ULL << 10) /* no recovery - dirty fs */
-#define XFS_MOUNT_DFLT_IOSIZE (1ULL << 12) /* set default i/o size */
+#define XFS_MOUNT_ALLOCSIZE (1ULL << 12) /* specified allocation size */
#define XFS_MOUNT_SMALL_INUMS (1ULL << 14) /* user wants 32bit inodes */
#define XFS_MOUNT_32BITINODES (1ULL << 15) /* inode32 allocator active */
#define XFS_MOUNT_NOUUID (1ULL << 16) /* ignore uuid during mount */
@@ -238,7 +227,7 @@ typedef struct xfs_mount {
* allocation */
#define XFS_MOUNT_RDONLY (1ULL << 20) /* read-only fs */
#define XFS_MOUNT_DIRSYNC (1ULL << 21) /* synchronous directory ops */
-#define XFS_MOUNT_COMPAT_IOSIZE (1ULL << 22) /* don't report large preferred
+#define XFS_MOUNT_LARGEIO (1ULL << 22) /* report large preferred
* I/O size in stat() */
#define XFS_MOUNT_FILESTREAMS (1ULL << 24) /* enable the filestreams
allocator */
@@ -246,13 +235,6 @@ typedef struct xfs_mount {
#define XFS_MOUNT_DAX (1ULL << 62) /* TEST ONLY! */
-
-/*
- * Default minimum read and write sizes.
- */
-#define XFS_READIO_LOG_LARGE 16
-#define XFS_WRITEIO_LOG_LARGE 16
-
/*
* Max and min values for mount-option defined I/O
* preallocation sizes.
@@ -260,37 +242,6 @@ typedef struct xfs_mount {
#define XFS_MAX_IO_LOG 30 /* 1G */
#define XFS_MIN_IO_LOG PAGE_SHIFT
-/*
- * Synchronous read and write sizes. This should be
- * better for NFSv2 wsync filesystems.
- */
-#define XFS_WSYNC_READIO_LOG 15 /* 32k */
-#define XFS_WSYNC_WRITEIO_LOG 14 /* 16k */
-
-/*
- * Allow large block sizes to be reported to userspace programs if the
- * "largeio" mount option is used.
- *
- * If compatibility mode is specified, simply return the basic unit of caching
- * so that we don't get inefficient read/modify/write I/O from user apps.
- * Otherwise....
- *
- * If the underlying volume is a stripe, then return the stripe width in bytes
- * as the recommended I/O size. It is not a stripe and we've set a default
- * buffered I/O size, return that, otherwise return the compat default.
- */
-static inline unsigned long
-xfs_preferred_iosize(xfs_mount_t *mp)
-{
- if (mp->m_flags & XFS_MOUNT_COMPAT_IOSIZE)
- return PAGE_SIZE;
- return (mp->m_swidth ?
- (mp->m_swidth << mp->m_sb.sb_blocklog) :
- ((mp->m_flags & XFS_MOUNT_DFLT_IOSIZE) ?
- (1 << (int)max(mp->m_readio_log, mp->m_writeio_log)) :
- PAGE_SIZE));
-}
-
#define XFS_LAST_UNMOUNT_WAS_CLEAN(mp) \
((mp)->m_flags & XFS_MOUNT_WAS_CLEAN)
#define XFS_FORCED_SHUTDOWN(mp) ((mp)->m_flags & XFS_MOUNT_FS_SHUTDOWN)
diff --git a/fs/xfs/xfs_pnfs.c b/fs/xfs/xfs_pnfs.c
index a339bd5fa260..bb3008d390aa 100644
--- a/fs/xfs/xfs_pnfs.c
+++ b/fs/xfs/xfs_pnfs.c
@@ -12,6 +12,7 @@
#include "xfs_trans.h"
#include "xfs_bmap.h"
#include "xfs_iomap.h"
+#include "xfs_pnfs.h"
/*
* Ensure that we do not have any outstanding pNFS layouts that can be used by
@@ -59,7 +60,7 @@ xfs_fs_get_uuid(
printk_once(KERN_NOTICE
"XFS (%s): using experimental pNFS feature, use at your own risk!\n",
- mp->m_fsname);
+ mp->m_super->s_id);
if (*len < sizeof(uuid_t))
return -EINVAL;
@@ -142,43 +143,38 @@ xfs_fs_map_blocks(
lock_flags = xfs_ilock_data_map_shared(ip);
error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb,
&imap, &nimaps, bmapi_flags);
- xfs_iunlock(ip, lock_flags);
- if (error)
- goto out_unlock;
+ ASSERT(!nimaps || imap.br_startblock != DELAYSTARTBLOCK);
+
+ if (!error && write &&
+ (!nimaps || imap.br_startblock == HOLESTARTBLOCK)) {
+ if (offset + length > XFS_ISIZE(ip))
+ end_fsb = xfs_iomap_eof_align_last_fsb(ip, end_fsb);
+ else if (nimaps && imap.br_startblock == HOLESTARTBLOCK)
+ end_fsb = min(end_fsb, imap.br_startoff +
+ imap.br_blockcount);
+ xfs_iunlock(ip, lock_flags);
+
+ error = xfs_iomap_write_direct(ip, offset_fsb,
+ end_fsb - offset_fsb, &imap);
+ if (error)
+ goto out_unlock;
- if (write) {
- enum xfs_prealloc_flags flags = 0;
-
- ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
-
- if (!nimaps || imap.br_startblock == HOLESTARTBLOCK) {
- /*
- * xfs_iomap_write_direct() expects to take ownership of
- * the shared ilock.
- */
- xfs_ilock(ip, XFS_ILOCK_SHARED);
- error = xfs_iomap_write_direct(ip, offset, length,
- &imap, nimaps);
- if (error)
- goto out_unlock;
-
- /*
- * Ensure the next transaction is committed
- * synchronously so that the blocks allocated and
- * handed out to the client are guaranteed to be
- * present even after a server crash.
- */
- flags |= XFS_PREALLOC_SET | XFS_PREALLOC_SYNC;
- }
-
- error = xfs_update_prealloc_flags(ip, flags);
+ /*
+ * Ensure the next transaction is committed synchronously so
+ * that the blocks allocated and handed out to the client are
+ * guaranteed to be present even after a server crash.
+ */
+ error = xfs_update_prealloc_flags(ip,
+ XFS_PREALLOC_SET | XFS_PREALLOC_SYNC);
if (error)
goto out_unlock;
+ } else {
+ xfs_iunlock(ip, lock_flags);
}
xfs_iunlock(ip, XFS_IOLOCK_EXCL);
- error = xfs_bmbt_to_iomap(ip, iomap, &imap, false);
+ error = xfs_bmbt_to_iomap(ip, iomap, &imap, 0);
*device_generation = mp->m_generation;
return error;
out_unlock:
diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c
index ecd8ce152ab1..0b0909657bad 100644
--- a/fs/xfs/xfs_qm.c
+++ b/fs/xfs/xfs_qm.c
@@ -22,6 +22,7 @@
#include "xfs_qm.h"
#include "xfs_trace.h"
#include "xfs_icache.h"
+#include "xfs_error.h"
/*
* The global quota manager. There is only one of these for the entire
@@ -29,10 +30,10 @@
* quota functionality, including maintaining the freelist and hash
* tables of dquots.
*/
-STATIC int xfs_qm_init_quotainos(xfs_mount_t *);
-STATIC int xfs_qm_init_quotainfo(xfs_mount_t *);
+STATIC int xfs_qm_init_quotainos(struct xfs_mount *mp);
+STATIC int xfs_qm_init_quotainfo(struct xfs_mount *mp);
-STATIC void xfs_qm_destroy_quotainos(xfs_quotainfo_t *qi);
+STATIC void xfs_qm_destroy_quotainos(struct xfs_quotainfo *qi);
STATIC void xfs_qm_dqfree_one(struct xfs_dquot *dqp);
/*
* We use the batch lookup interface to iterate over the dquots as it
@@ -243,14 +244,14 @@ xfs_qm_unmount_quotas(
STATIC int
xfs_qm_dqattach_one(
- xfs_inode_t *ip,
- xfs_dqid_t id,
- uint type,
- bool doalloc,
- xfs_dquot_t **IO_idqpp)
+ struct xfs_inode *ip,
+ xfs_dqid_t id,
+ uint type,
+ bool doalloc,
+ struct xfs_dquot **IO_idqpp)
{
- xfs_dquot_t *dqp;
- int error;
+ struct xfs_dquot *dqp;
+ int error;
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
error = 0;
@@ -341,7 +342,7 @@ xfs_qm_dqattach_locked(
}
if (XFS_IS_PQUOTA_ON(mp) && !ip->i_pdquot) {
- error = xfs_qm_dqattach_one(ip, xfs_get_projid(ip), XFS_DQ_PROJ,
+ error = xfs_qm_dqattach_one(ip, ip->i_d.di_projid, XFS_DQ_PROJ,
doalloc, &ip->i_pdquot);
if (error)
goto done;
@@ -539,12 +540,12 @@ xfs_qm_shrink_count(
STATIC void
xfs_qm_set_defquota(
- xfs_mount_t *mp,
- uint type,
- xfs_quotainfo_t *qinf)
+ struct xfs_mount *mp,
+ uint type,
+ struct xfs_quotainfo *qinf)
{
- xfs_dquot_t *dqp;
- struct xfs_def_quota *defq;
+ struct xfs_dquot *dqp;
+ struct xfs_def_quota *defq;
struct xfs_disk_dquot *ddqp;
int error;
@@ -642,7 +643,7 @@ xfs_qm_init_quotainfo(
ASSERT(XFS_IS_QUOTA_RUNNING(mp));
- qinf = mp->m_quotainfo = kmem_zalloc(sizeof(xfs_quotainfo_t), 0);
+ qinf = mp->m_quotainfo = kmem_zalloc(sizeof(struct xfs_quotainfo), 0);
error = list_lru_init(&qinf->qi_lru);
if (error)
@@ -709,9 +710,9 @@ out_free_qinf:
*/
void
xfs_qm_destroy_quotainfo(
- xfs_mount_t *mp)
+ struct xfs_mount *mp)
{
- xfs_quotainfo_t *qi;
+ struct xfs_quotainfo *qi;
qi = mp->m_quotainfo;
ASSERT(qi != NULL);
@@ -754,11 +755,15 @@ xfs_qm_qino_alloc(
if ((flags & XFS_QMOPT_PQUOTA) &&
(mp->m_sb.sb_gquotino != NULLFSINO)) {
ino = mp->m_sb.sb_gquotino;
- ASSERT(mp->m_sb.sb_pquotino == NULLFSINO);
+ if (XFS_IS_CORRUPT(mp,
+ mp->m_sb.sb_pquotino != NULLFSINO))
+ return -EFSCORRUPTED;
} else if ((flags & XFS_QMOPT_GQUOTA) &&
(mp->m_sb.sb_pquotino != NULLFSINO)) {
ino = mp->m_sb.sb_pquotino;
- ASSERT(mp->m_sb.sb_gquotino == NULLFSINO);
+ if (XFS_IS_CORRUPT(mp,
+ mp->m_sb.sb_gquotino != NULLFSINO))
+ return -EFSCORRUPTED;
}
if (ino != NULLFSINO) {
error = xfs_iget(mp, NULL, ino, 0, 0, ip);
@@ -1559,7 +1564,7 @@ error_rele:
STATIC void
xfs_qm_destroy_quotainos(
- xfs_quotainfo_t *qi)
+ struct xfs_quotainfo *qi)
{
if (qi->qi_uquotaip) {
xfs_irele(qi->qi_uquotaip);
@@ -1693,7 +1698,7 @@ xfs_qm_vop_dqalloc(
}
}
if ((flags & XFS_QMOPT_PQUOTA) && XFS_IS_PQUOTA_ON(mp)) {
- if (xfs_get_projid(ip) != prid) {
+ if (ip->i_d.di_projid != prid) {
xfs_iunlock(ip, lockflags);
error = xfs_qm_dqget(mp, (xfs_dqid_t)prid, XFS_DQ_PROJ,
true, &pq);
@@ -1737,14 +1742,14 @@ error_rele:
* Actually transfer ownership, and do dquot modifications.
* These were already reserved.
*/
-xfs_dquot_t *
+struct xfs_dquot *
xfs_qm_vop_chown(
- xfs_trans_t *tp,
- xfs_inode_t *ip,
- xfs_dquot_t **IO_olddq,
- xfs_dquot_t *newdq)
+ struct xfs_trans *tp,
+ struct xfs_inode *ip,
+ struct xfs_dquot **IO_olddq,
+ struct xfs_dquot *newdq)
{
- xfs_dquot_t *prevdq;
+ struct xfs_dquot *prevdq;
uint bfield = XFS_IS_REALTIME_INODE(ip) ?
XFS_TRANS_DQ_RTBCOUNT : XFS_TRANS_DQ_BCOUNT;
@@ -1827,7 +1832,7 @@ xfs_qm_vop_chown_reserve(
}
if (XFS_IS_PQUOTA_ON(ip->i_mount) && pdqp &&
- xfs_get_projid(ip) != be32_to_cpu(pdqp->q_core.d_id)) {
+ ip->i_d.di_projid != be32_to_cpu(pdqp->q_core.d_id)) {
prjflags = XFS_QMOPT_ENOSPC;
pdq_delblks = pdqp;
if (delblks) {
@@ -1928,7 +1933,7 @@ xfs_qm_vop_create_dqattach(
}
if (pdqp && XFS_IS_PQUOTA_ON(mp)) {
ASSERT(ip->i_pdquot == NULL);
- ASSERT(xfs_get_projid(ip) == be32_to_cpu(pdqp->q_core.d_id));
+ ASSERT(ip->i_d.di_projid == be32_to_cpu(pdqp->q_core.d_id));
ip->i_pdquot = xfs_qm_dqhold(pdqp);
xfs_trans_mod_dquot(tp, pdqp, XFS_TRANS_DQ_ICOUNT, 1);
diff --git a/fs/xfs/xfs_qm.h b/fs/xfs/xfs_qm.h
index b41b75089548..7823af39008b 100644
--- a/fs/xfs/xfs_qm.h
+++ b/fs/xfs/xfs_qm.h
@@ -54,7 +54,7 @@ struct xfs_def_quota {
* Various quota information for individual filesystems.
* The mount structure keeps a pointer to this.
*/
-typedef struct xfs_quotainfo {
+struct xfs_quotainfo {
struct radix_tree_root qi_uquota_tree;
struct radix_tree_root qi_gquota_tree;
struct radix_tree_root qi_pquota_tree;
@@ -76,8 +76,8 @@ typedef struct xfs_quotainfo {
struct xfs_def_quota qi_usr_default;
struct xfs_def_quota qi_grp_default;
struct xfs_def_quota qi_prj_default;
- struct shrinker qi_shrinker;
-} xfs_quotainfo_t;
+ struct shrinker qi_shrinker;
+};
static inline struct radix_tree_root *
xfs_dquot_tree(
diff --git a/fs/xfs/xfs_qm_bhv.c b/fs/xfs/xfs_qm_bhv.c
index 5d72e88598b4..fc2fa418919f 100644
--- a/fs/xfs/xfs_qm_bhv.c
+++ b/fs/xfs/xfs_qm_bhv.c
@@ -54,13 +54,13 @@ xfs_fill_statvfs_from_dquot(
*/
void
xfs_qm_statvfs(
- xfs_inode_t *ip,
+ struct xfs_inode *ip,
struct kstatfs *statp)
{
- xfs_mount_t *mp = ip->i_mount;
- xfs_dquot_t *dqp;
+ struct xfs_mount *mp = ip->i_mount;
+ struct xfs_dquot *dqp;
- if (!xfs_qm_dqget(mp, xfs_get_projid(ip), XFS_DQ_PROJ, false, &dqp)) {
+ if (!xfs_qm_dqget(mp, ip->i_d.di_projid, XFS_DQ_PROJ, false, &dqp)) {
xfs_fill_statvfs_from_dquot(statp, dqp);
xfs_qm_dqput(dqp);
}
diff --git a/fs/xfs/xfs_qm_syscalls.c b/fs/xfs/xfs_qm_syscalls.c
index da7ad0383037..1ea82764bf89 100644
--- a/fs/xfs/xfs_qm_syscalls.c
+++ b/fs/xfs/xfs_qm_syscalls.c
@@ -19,9 +19,72 @@
#include "xfs_qm.h"
#include "xfs_icache.h"
-STATIC int xfs_qm_log_quotaoff(xfs_mount_t *, xfs_qoff_logitem_t **, uint);
-STATIC int xfs_qm_log_quotaoff_end(xfs_mount_t *, xfs_qoff_logitem_t *,
- uint);
+STATIC int
+xfs_qm_log_quotaoff(
+ struct xfs_mount *mp,
+ struct xfs_qoff_logitem **qoffstartp,
+ uint flags)
+{
+ struct xfs_trans *tp;
+ int error;
+ struct xfs_qoff_logitem *qoffi;
+
+ *qoffstartp = NULL;
+
+ error = xfs_trans_alloc(mp, &M_RES(mp)->tr_qm_quotaoff, 0, 0, 0, &tp);
+ if (error)
+ goto out;
+
+ qoffi = xfs_trans_get_qoff_item(tp, NULL, flags & XFS_ALL_QUOTA_ACCT);
+ xfs_trans_log_quotaoff_item(tp, qoffi);
+
+ spin_lock(&mp->m_sb_lock);
+ mp->m_sb.sb_qflags = (mp->m_qflags & ~(flags)) & XFS_MOUNT_QUOTA_ALL;
+ spin_unlock(&mp->m_sb_lock);
+
+ xfs_log_sb(tp);
+
+ /*
+ * We have to make sure that the transaction is secure on disk before we
+ * return and actually stop quota accounting. So, make it synchronous.
+ * We don't care about quotoff's performance.
+ */
+ xfs_trans_set_sync(tp);
+ error = xfs_trans_commit(tp);
+ if (error)
+ goto out;
+
+ *qoffstartp = qoffi;
+out:
+ return error;
+}
+
+STATIC int
+xfs_qm_log_quotaoff_end(
+ struct xfs_mount *mp,
+ struct xfs_qoff_logitem *startqoff,
+ uint flags)
+{
+ struct xfs_trans *tp;
+ int error;
+ struct xfs_qoff_logitem *qoffi;
+
+ error = xfs_trans_alloc(mp, &M_RES(mp)->tr_qm_equotaoff, 0, 0, 0, &tp);
+ if (error)
+ return error;
+
+ qoffi = xfs_trans_get_qoff_item(tp, startqoff,
+ flags & XFS_ALL_QUOTA_ACCT);
+ xfs_trans_log_quotaoff_item(tp, qoffi);
+
+ /*
+ * We have to make sure that the transaction is secure on disk before we
+ * return and actually stop quota accounting. So, make it synchronous.
+ * We don't care about quotoff's performance.
+ */
+ xfs_trans_set_sync(tp);
+ return xfs_trans_commit(tp);
+}
/*
* Turn off quota accounting and/or enforcement for all udquots and/or
@@ -40,7 +103,7 @@ xfs_qm_scall_quotaoff(
uint dqtype;
int error;
uint inactivate_flags;
- xfs_qoff_logitem_t *qoffstart;
+ struct xfs_qoff_logitem *qoffstart;
/*
* No file system can have quotas enabled on disk but not in core.
@@ -538,74 +601,6 @@ out_unlock:
return error;
}
-STATIC int
-xfs_qm_log_quotaoff_end(
- xfs_mount_t *mp,
- xfs_qoff_logitem_t *startqoff,
- uint flags)
-{
- xfs_trans_t *tp;
- int error;
- xfs_qoff_logitem_t *qoffi;
-
- error = xfs_trans_alloc(mp, &M_RES(mp)->tr_qm_equotaoff, 0, 0, 0, &tp);
- if (error)
- return error;
-
- qoffi = xfs_trans_get_qoff_item(tp, startqoff,
- flags & XFS_ALL_QUOTA_ACCT);
- xfs_trans_log_quotaoff_item(tp, qoffi);
-
- /*
- * We have to make sure that the transaction is secure on disk before we
- * return and actually stop quota accounting. So, make it synchronous.
- * We don't care about quotoff's performance.
- */
- xfs_trans_set_sync(tp);
- return xfs_trans_commit(tp);
-}
-
-
-STATIC int
-xfs_qm_log_quotaoff(
- xfs_mount_t *mp,
- xfs_qoff_logitem_t **qoffstartp,
- uint flags)
-{
- xfs_trans_t *tp;
- int error;
- xfs_qoff_logitem_t *qoffi;
-
- *qoffstartp = NULL;
-
- error = xfs_trans_alloc(mp, &M_RES(mp)->tr_qm_quotaoff, 0, 0, 0, &tp);
- if (error)
- goto out;
-
- qoffi = xfs_trans_get_qoff_item(tp, NULL, flags & XFS_ALL_QUOTA_ACCT);
- xfs_trans_log_quotaoff_item(tp, qoffi);
-
- spin_lock(&mp->m_sb_lock);
- mp->m_sb.sb_qflags = (mp->m_qflags & ~(flags)) & XFS_MOUNT_QUOTA_ALL;
- spin_unlock(&mp->m_sb_lock);
-
- xfs_log_sb(tp);
-
- /*
- * We have to make sure that the transaction is secure on disk before we
- * return and actually stop quota accounting. So, make it synchronous.
- * We don't care about quotoff's performance.
- */
- xfs_trans_set_sync(tp);
- error = xfs_trans_commit(tp);
- if (error)
- goto out;
-
- *qoffstartp = qoffi;
-out:
- return error;
-}
-
/* Fill out the quota context. */
static void
xfs_qm_scall_getquota_fill_qc(
diff --git a/fs/xfs/xfs_quotaops.c b/fs/xfs/xfs_quotaops.c
index cd6c7210a373..c7de17deeae6 100644
--- a/fs/xfs/xfs_quotaops.c
+++ b/fs/xfs/xfs_quotaops.c
@@ -201,6 +201,9 @@ xfs_fs_rm_xquota(
if (XFS_IS_QUOTA_ON(mp))
return -EINVAL;
+ if (uflags & ~(FS_USER_QUOTA | FS_GROUP_QUOTA | FS_PROJ_QUOTA))
+ return -EINVAL;
+
if (uflags & FS_USER_QUOTA)
flags |= XFS_DQ_USER;
if (uflags & FS_GROUP_QUOTA)
diff --git a/fs/xfs/xfs_refcount_item.c b/fs/xfs/xfs_refcount_item.c
index 2328268e6245..8eeed73928cd 100644
--- a/fs/xfs/xfs_refcount_item.c
+++ b/fs/xfs/xfs_refcount_item.c
@@ -17,7 +17,7 @@
#include "xfs_refcount_item.h"
#include "xfs_log.h"
#include "xfs_refcount.h"
-
+#include "xfs_error.h"
kmem_zone_t *xfs_cui_zone;
kmem_zone_t *xfs_cud_zone;
@@ -34,7 +34,7 @@ xfs_cui_item_free(
if (cuip->cui_format.cui_nextents > XFS_CUI_MAX_FAST_EXTENTS)
kmem_free(cuip);
else
- kmem_zone_free(xfs_cui_zone, cuip);
+ kmem_cache_free(xfs_cui_zone, cuip);
}
/*
@@ -206,7 +206,7 @@ xfs_cud_item_release(
struct xfs_cud_log_item *cudp = CUD_ITEM(lip);
xfs_cui_release(cudp->cud_cuip);
- kmem_zone_free(xfs_cud_zone, cudp);
+ kmem_cache_free(xfs_cud_zone, cudp);
}
static const struct xfs_item_ops xfs_cud_item_ops = {
@@ -497,7 +497,7 @@ xfs_cui_recover(
*/
set_bit(XFS_CUI_RECOVERED, &cuip->cui_flags);
xfs_cui_release(cuip);
- return -EIO;
+ return -EFSCORRUPTED;
}
}
@@ -536,6 +536,7 @@ xfs_cui_recover(
type = refc_type;
break;
default:
+ XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp);
error = -EFSCORRUPTED;
goto abort_error;
}
diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c
index 0f08153b4994..de451235c4ee 100644
--- a/fs/xfs/xfs_reflink.c
+++ b/fs/xfs/xfs_reflink.c
@@ -308,13 +308,13 @@ static int
xfs_find_trim_cow_extent(
struct xfs_inode *ip,
struct xfs_bmbt_irec *imap,
+ struct xfs_bmbt_irec *cmap,
bool *shared,
bool *found)
{
xfs_fileoff_t offset_fsb = imap->br_startoff;
xfs_filblks_t count_fsb = imap->br_blockcount;
struct xfs_iext_cursor icur;
- struct xfs_bmbt_irec got;
*found = false;
@@ -322,23 +322,22 @@ xfs_find_trim_cow_extent(
* If we don't find an overlapping extent, trim the range we need to
* allocate to fit the hole we found.
*/
- if (!xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &icur, &got))
- got.br_startoff = offset_fsb + count_fsb;
- if (got.br_startoff > offset_fsb) {
+ if (!xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &icur, cmap))
+ cmap->br_startoff = offset_fsb + count_fsb;
+ if (cmap->br_startoff > offset_fsb) {
xfs_trim_extent(imap, imap->br_startoff,
- got.br_startoff - imap->br_startoff);
+ cmap->br_startoff - imap->br_startoff);
return xfs_inode_need_cow(ip, imap, shared);
}
*shared = true;
- if (isnullstartblock(got.br_startblock)) {
- xfs_trim_extent(imap, got.br_startoff, got.br_blockcount);
+ if (isnullstartblock(cmap->br_startblock)) {
+ xfs_trim_extent(imap, cmap->br_startoff, cmap->br_blockcount);
return 0;
}
/* real extent found - no need to allocate */
- xfs_trim_extent(&got, offset_fsb, count_fsb);
- *imap = got;
+ xfs_trim_extent(cmap, offset_fsb, count_fsb);
*found = true;
return 0;
}
@@ -348,6 +347,7 @@ int
xfs_reflink_allocate_cow(
struct xfs_inode *ip,
struct xfs_bmbt_irec *imap,
+ struct xfs_bmbt_irec *cmap,
bool *shared,
uint *lockmode,
bool convert_now)
@@ -367,7 +367,7 @@ xfs_reflink_allocate_cow(
xfs_ifork_init_cow(ip);
}
- error = xfs_find_trim_cow_extent(ip, imap, shared, &found);
+ error = xfs_find_trim_cow_extent(ip, imap, cmap, shared, &found);
if (error || !*shared)
return error;
if (found)
@@ -392,7 +392,7 @@ xfs_reflink_allocate_cow(
/*
* Check for an overlapping extent again now that we dropped the ilock.
*/
- error = xfs_find_trim_cow_extent(ip, imap, shared, &found);
+ error = xfs_find_trim_cow_extent(ip, imap, cmap, shared, &found);
if (error || !*shared)
goto out_trans_cancel;
if (found) {
@@ -410,8 +410,8 @@ xfs_reflink_allocate_cow(
/* Allocate the entire reservation as unwritten blocks. */
nimaps = 1;
error = xfs_bmapi_write(tp, ip, imap->br_startoff, imap->br_blockcount,
- XFS_BMAPI_COWFORK | XFS_BMAPI_PREALLOC,
- resblks, imap, &nimaps);
+ XFS_BMAPI_COWFORK | XFS_BMAPI_PREALLOC, 0, cmap,
+ &nimaps);
if (error)
goto out_unreserve;
@@ -427,15 +427,15 @@ xfs_reflink_allocate_cow(
if (nimaps == 0)
return -ENOSPC;
convert:
- xfs_trim_extent(imap, offset_fsb, count_fsb);
+ xfs_trim_extent(cmap, offset_fsb, count_fsb);
/*
* COW fork extents are supposed to remain unwritten until we're ready
* to initiate a disk write. For direct I/O we are going to write the
* data and need the conversion, but for buffered writes we're done.
*/
- if (!convert_now || imap->br_state == XFS_EXT_NORM)
+ if (!convert_now || cmap->br_state == XFS_EXT_NORM)
return 0;
- trace_xfs_reflink_convert_cow(ip, imap);
+ trace_xfs_reflink_convert_cow(ip, cmap);
return xfs_reflink_convert_cow_locked(ip, offset_fsb, count_fsb);
out_unreserve:
@@ -1270,7 +1270,7 @@ xfs_reflink_zero_posteof(
trace_xfs_zero_eof(ip, isize, pos - isize);
return iomap_zero_range(VFS_I(ip), isize, pos - isize, NULL,
- &xfs_iomap_ops);
+ &xfs_buffered_write_iomap_ops);
}
/*
@@ -1381,85 +1381,6 @@ out_unlock:
return ret;
}
-/*
- * The user wants to preemptively CoW all shared blocks in this file,
- * which enables us to turn off the reflink flag. Iterate all
- * extents which are not prealloc/delalloc to see which ranges are
- * mentioned in the refcount tree, then read those blocks into the
- * pagecache, dirty them, fsync them back out, and then we can update
- * the inode flag. What happens if we run out of memory? :)
- */
-STATIC int
-xfs_reflink_dirty_extents(
- struct xfs_inode *ip,
- xfs_fileoff_t fbno,
- xfs_filblks_t end,
- xfs_off_t isize)
-{
- struct xfs_mount *mp = ip->i_mount;
- xfs_agnumber_t agno;
- xfs_agblock_t agbno;
- xfs_extlen_t aglen;
- xfs_agblock_t rbno;
- xfs_extlen_t rlen;
- xfs_off_t fpos;
- xfs_off_t flen;
- struct xfs_bmbt_irec map[2];
- int nmaps;
- int error = 0;
-
- while (end - fbno > 0) {
- nmaps = 1;
- /*
- * Look for extents in the file. Skip holes, delalloc, or
- * unwritten extents; they can't be reflinked.
- */
- error = xfs_bmapi_read(ip, fbno, end - fbno, map, &nmaps, 0);
- if (error)
- goto out;
- if (nmaps == 0)
- break;
- if (!xfs_bmap_is_real_extent(&map[0]))
- goto next;
-
- map[1] = map[0];
- while (map[1].br_blockcount) {
- agno = XFS_FSB_TO_AGNO(mp, map[1].br_startblock);
- agbno = XFS_FSB_TO_AGBNO(mp, map[1].br_startblock);
- aglen = map[1].br_blockcount;
-
- error = xfs_reflink_find_shared(mp, NULL, agno, agbno,
- aglen, &rbno, &rlen, true);
- if (error)
- goto out;
- if (rbno == NULLAGBLOCK)
- break;
-
- /* Dirty the pages */
- xfs_iunlock(ip, XFS_ILOCK_EXCL);
- fpos = XFS_FSB_TO_B(mp, map[1].br_startoff +
- (rbno - agbno));
- flen = XFS_FSB_TO_B(mp, rlen);
- if (fpos + flen > isize)
- flen = isize - fpos;
- error = iomap_file_dirty(VFS_I(ip), fpos, flen,
- &xfs_iomap_ops);
- xfs_ilock(ip, XFS_ILOCK_EXCL);
- if (error)
- goto out;
-
- map[1].br_blockcount -= (rbno - agbno + rlen);
- map[1].br_startoff += (rbno - agbno + rlen);
- map[1].br_startblock += (rbno - agbno + rlen);
- }
-
-next:
- fbno = map[0].br_startoff + map[0].br_blockcount;
- }
-out:
- return error;
-}
-
/* Does this inode need the reflink flag? */
int
xfs_reflink_inode_has_shared_extents(
@@ -1596,10 +1517,7 @@ xfs_reflink_unshare(
xfs_off_t offset,
xfs_off_t len)
{
- struct xfs_mount *mp = ip->i_mount;
- xfs_fileoff_t fbno;
- xfs_filblks_t end;
- xfs_off_t isize;
+ struct inode *inode = VFS_I(ip);
int error;
if (!xfs_is_reflink_inode(ip))
@@ -1607,20 +1525,13 @@ xfs_reflink_unshare(
trace_xfs_reflink_unshare(ip, offset, len);
- inode_dio_wait(VFS_I(ip));
+ inode_dio_wait(inode);
- /* Try to CoW the selected ranges */
- xfs_ilock(ip, XFS_ILOCK_EXCL);
- fbno = XFS_B_TO_FSBT(mp, offset);
- isize = i_size_read(VFS_I(ip));
- end = XFS_B_TO_FSB(mp, offset + len);
- error = xfs_reflink_dirty_extents(ip, fbno, end, isize);
+ error = iomap_file_unshare(inode, offset, len,
+ &xfs_buffered_write_iomap_ops);
if (error)
- goto out_unlock;
- xfs_iunlock(ip, XFS_ILOCK_EXCL);
-
- /* Wait for the IO to finish */
- error = filemap_write_and_wait(VFS_I(ip)->i_mapping);
+ goto out;
+ error = filemap_write_and_wait(inode->i_mapping);
if (error)
goto out;
@@ -1628,11 +1539,8 @@ xfs_reflink_unshare(
error = xfs_reflink_try_clear_inode_flag(ip);
if (error)
goto out;
-
return 0;
-out_unlock:
- xfs_iunlock(ip, XFS_ILOCK_EXCL);
out:
trace_xfs_reflink_unshare_error(ip, error, _RET_IP_);
return error;
diff --git a/fs/xfs/xfs_reflink.h b/fs/xfs/xfs_reflink.h
index 28a43b7f581d..d18ad7f4fb64 100644
--- a/fs/xfs/xfs_reflink.h
+++ b/fs/xfs/xfs_reflink.h
@@ -25,8 +25,8 @@ extern int xfs_reflink_trim_around_shared(struct xfs_inode *ip,
bool xfs_inode_need_cow(struct xfs_inode *ip, struct xfs_bmbt_irec *imap,
bool *shared);
-extern int xfs_reflink_allocate_cow(struct xfs_inode *ip,
- struct xfs_bmbt_irec *imap, bool *shared, uint *lockmode,
+int xfs_reflink_allocate_cow(struct xfs_inode *ip, struct xfs_bmbt_irec *imap,
+ struct xfs_bmbt_irec *cmap, bool *shared, uint *lockmode,
bool convert_now);
extern int xfs_reflink_convert_cow(struct xfs_inode *ip, xfs_off_t offset,
xfs_off_t count);
diff --git a/fs/xfs/xfs_rmap_item.c b/fs/xfs/xfs_rmap_item.c
index 8939e0ea09cd..4911b68f95dd 100644
--- a/fs/xfs/xfs_rmap_item.c
+++ b/fs/xfs/xfs_rmap_item.c
@@ -17,7 +17,7 @@
#include "xfs_rmap_item.h"
#include "xfs_log.h"
#include "xfs_rmap.h"
-
+#include "xfs_error.h"
kmem_zone_t *xfs_rui_zone;
kmem_zone_t *xfs_rud_zone;
@@ -34,7 +34,7 @@ xfs_rui_item_free(
if (ruip->rui_format.rui_nextents > XFS_RUI_MAX_FAST_EXTENTS)
kmem_free(ruip);
else
- kmem_zone_free(xfs_rui_zone, ruip);
+ kmem_cache_free(xfs_rui_zone, ruip);
}
/*
@@ -171,8 +171,10 @@ xfs_rui_copy_format(
src_rui_fmt = buf->i_addr;
len = xfs_rui_log_format_sizeof(src_rui_fmt->rui_nextents);
- if (buf->i_len != len)
+ if (buf->i_len != len) {
+ XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, NULL);
return -EFSCORRUPTED;
+ }
memcpy(dst_rui_fmt, src_rui_fmt, len);
return 0;
@@ -227,7 +229,7 @@ xfs_rud_item_release(
struct xfs_rud_log_item *rudp = RUD_ITEM(lip);
xfs_rui_release(rudp->rud_ruip);
- kmem_zone_free(xfs_rud_zone, rudp);
+ kmem_cache_free(xfs_rud_zone, rudp);
}
static const struct xfs_item_ops xfs_rud_item_ops = {
@@ -539,7 +541,7 @@ xfs_rui_recover(
*/
set_bit(XFS_RUI_RECOVERED, &ruip->rui_flags);
xfs_rui_release(ruip);
- return -EIO;
+ return -EFSCORRUPTED;
}
}
@@ -581,6 +583,7 @@ xfs_rui_recover(
type = XFS_RMAP_FREE;
break;
default:
+ XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, NULL);
error = -EFSCORRUPTED;
goto abort_error;
}
diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c
index 4a48a8c75b4f..d42b5a2047e0 100644
--- a/fs/xfs/xfs_rtalloc.c
+++ b/fs/xfs/xfs_rtalloc.c
@@ -792,8 +792,7 @@ xfs_growfs_rt_alloc(
*/
nmap = 1;
error = xfs_bmapi_write(tp, ip, oblocks, nblocks - oblocks,
- XFS_BMAPI_METADATA, resblks, &map,
- &nmap);
+ XFS_BMAPI_METADATA, 0, &map, &nmap);
if (!error && nmap < 1)
error = -ENOSPC;
if (error)
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index 8d1df9f8be07..d9ae27ddf253 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -37,10 +37,10 @@
#include "xfs_reflink.h"
#include <linux/magic.h>
-#include <linux/parser.h>
+#include <linux/fs_context.h>
+#include <linux/fs_parser.h>
static const struct super_operations xfs_super_operations;
-struct bio_set xfs_ioend_bioset;
static struct kset *xfs_kset; /* top-level xfs sysfs dir */
#ifdef DEBUG
@@ -51,7 +51,7 @@ static struct xfs_kobj xfs_dbg_kobj; /* global debug sysfs attrs */
* Table driven mount option parser.
*/
enum {
- Opt_logbufs, Opt_logbsize, Opt_logdev, Opt_rtdev, Opt_biosize,
+ Opt_logbufs, Opt_logbsize, Opt_logdev, Opt_rtdev,
Opt_wsync, Opt_noalign, Opt_swalloc, Opt_sunit, Opt_swidth, Opt_nouuid,
Opt_grpid, Opt_nogrpid, Opt_bsdgroups, Opt_sysvgroups,
Opt_allocsize, Opt_norecovery, Opt_inode64, Opt_inode32, Opt_ikeep,
@@ -59,382 +59,67 @@ enum {
Opt_filestreams, Opt_quota, Opt_noquota, Opt_usrquota, Opt_grpquota,
Opt_prjquota, Opt_uquota, Opt_gquota, Opt_pquota,
Opt_uqnoenforce, Opt_gqnoenforce, Opt_pqnoenforce, Opt_qnoenforce,
- Opt_discard, Opt_nodiscard, Opt_dax, Opt_err,
+ Opt_discard, Opt_nodiscard, Opt_dax,
};
-static const match_table_t tokens = {
- {Opt_logbufs, "logbufs=%u"}, /* number of XFS log buffers */
- {Opt_logbsize, "logbsize=%s"}, /* size of XFS log buffers */
- {Opt_logdev, "logdev=%s"}, /* log device */
- {Opt_rtdev, "rtdev=%s"}, /* realtime I/O device */
- {Opt_biosize, "biosize=%u"}, /* log2 of preferred buffered io size */
- {Opt_wsync, "wsync"}, /* safe-mode nfs compatible mount */
- {Opt_noalign, "noalign"}, /* turn off stripe alignment */
- {Opt_swalloc, "swalloc"}, /* turn on stripe width allocation */
- {Opt_sunit, "sunit=%u"}, /* data volume stripe unit */
- {Opt_swidth, "swidth=%u"}, /* data volume stripe width */
- {Opt_nouuid, "nouuid"}, /* ignore filesystem UUID */
- {Opt_grpid, "grpid"}, /* group-ID from parent directory */
- {Opt_nogrpid, "nogrpid"}, /* group-ID from current process */
- {Opt_bsdgroups, "bsdgroups"}, /* group-ID from parent directory */
- {Opt_sysvgroups,"sysvgroups"}, /* group-ID from current process */
- {Opt_allocsize, "allocsize=%s"},/* preferred allocation size */
- {Opt_norecovery,"norecovery"}, /* don't run XFS recovery */
- {Opt_inode64, "inode64"}, /* inodes can be allocated anywhere */
- {Opt_inode32, "inode32"}, /* inode allocation limited to
- * XFS_MAXINUMBER_32 */
- {Opt_ikeep, "ikeep"}, /* do not free empty inode clusters */
- {Opt_noikeep, "noikeep"}, /* free empty inode clusters */
- {Opt_largeio, "largeio"}, /* report large I/O sizes in stat() */
- {Opt_nolargeio, "nolargeio"}, /* do not report large I/O sizes
- * in stat(). */
- {Opt_attr2, "attr2"}, /* do use attr2 attribute format */
- {Opt_noattr2, "noattr2"}, /* do not use attr2 attribute format */
- {Opt_filestreams,"filestreams"},/* use filestreams allocator */
- {Opt_quota, "quota"}, /* disk quotas (user) */
- {Opt_noquota, "noquota"}, /* no quotas */
- {Opt_usrquota, "usrquota"}, /* user quota enabled */
- {Opt_grpquota, "grpquota"}, /* group quota enabled */
- {Opt_prjquota, "prjquota"}, /* project quota enabled */
- {Opt_uquota, "uquota"}, /* user quota (IRIX variant) */
- {Opt_gquota, "gquota"}, /* group quota (IRIX variant) */
- {Opt_pquota, "pquota"}, /* project quota (IRIX variant) */
- {Opt_uqnoenforce,"uqnoenforce"},/* user quota limit enforcement */
- {Opt_gqnoenforce,"gqnoenforce"},/* group quota limit enforcement */
- {Opt_pqnoenforce,"pqnoenforce"},/* project quota limit enforcement */
- {Opt_qnoenforce, "qnoenforce"}, /* same as uqnoenforce */
- {Opt_discard, "discard"}, /* Discard unused blocks */
- {Opt_nodiscard, "nodiscard"}, /* Do not discard unused blocks */
- {Opt_dax, "dax"}, /* Enable direct access to bdev pages */
- {Opt_err, NULL},
+static const struct fs_parameter_spec xfs_param_specs[] = {
+ fsparam_u32("logbufs", Opt_logbufs),
+ fsparam_string("logbsize", Opt_logbsize),
+ fsparam_string("logdev", Opt_logdev),
+ fsparam_string("rtdev", Opt_rtdev),
+ fsparam_flag("wsync", Opt_wsync),
+ fsparam_flag("noalign", Opt_noalign),
+ fsparam_flag("swalloc", Opt_swalloc),
+ fsparam_u32("sunit", Opt_sunit),
+ fsparam_u32("swidth", Opt_swidth),
+ fsparam_flag("nouuid", Opt_nouuid),
+ fsparam_flag("grpid", Opt_grpid),
+ fsparam_flag("nogrpid", Opt_nogrpid),
+ fsparam_flag("bsdgroups", Opt_bsdgroups),
+ fsparam_flag("sysvgroups", Opt_sysvgroups),
+ fsparam_string("allocsize", Opt_allocsize),
+ fsparam_flag("norecovery", Opt_norecovery),
+ fsparam_flag("inode64", Opt_inode64),
+ fsparam_flag("inode32", Opt_inode32),
+ fsparam_flag("ikeep", Opt_ikeep),
+ fsparam_flag("noikeep", Opt_noikeep),
+ fsparam_flag("largeio", Opt_largeio),
+ fsparam_flag("nolargeio", Opt_nolargeio),
+ fsparam_flag("attr2", Opt_attr2),
+ fsparam_flag("noattr2", Opt_noattr2),
+ fsparam_flag("filestreams", Opt_filestreams),
+ fsparam_flag("quota", Opt_quota),
+ fsparam_flag("noquota", Opt_noquota),
+ fsparam_flag("usrquota", Opt_usrquota),
+ fsparam_flag("grpquota", Opt_grpquota),
+ fsparam_flag("prjquota", Opt_prjquota),
+ fsparam_flag("uquota", Opt_uquota),
+ fsparam_flag("gquota", Opt_gquota),
+ fsparam_flag("pquota", Opt_pquota),
+ fsparam_flag("uqnoenforce", Opt_uqnoenforce),
+ fsparam_flag("gqnoenforce", Opt_gqnoenforce),
+ fsparam_flag("pqnoenforce", Opt_pqnoenforce),
+ fsparam_flag("qnoenforce", Opt_qnoenforce),
+ fsparam_flag("discard", Opt_discard),
+ fsparam_flag("nodiscard", Opt_nodiscard),
+ fsparam_flag("dax", Opt_dax),
+ {}
};
-
-STATIC int
-suffix_kstrtoint(const substring_t *s, unsigned int base, int *res)
-{
- int last, shift_left_factor = 0, _res;
- char *value;
- int ret = 0;
-
- value = match_strdup(s);
- if (!value)
- return -ENOMEM;
-
- last = strlen(value) - 1;
- if (value[last] == 'K' || value[last] == 'k') {
- shift_left_factor = 10;
- value[last] = '\0';
- }
- if (value[last] == 'M' || value[last] == 'm') {
- shift_left_factor = 20;
- value[last] = '\0';
- }
- if (value[last] == 'G' || value[last] == 'g') {
- shift_left_factor = 30;
- value[last] = '\0';
- }
-
- if (kstrtoint(value, base, &_res))
- ret = -EINVAL;
- kfree(value);
- *res = _res << shift_left_factor;
- return ret;
-}
-
-/*
- * This function fills in xfs_mount_t fields based on mount args.
- * Note: the superblock has _not_ yet been read in.
- *
- * Note that this function leaks the various device name allocations on
- * failure. The caller takes care of them.
- *
- * *sb is const because this is also used to test options on the remount
- * path, and we don't want this to have any side effects at remount time.
- * Today this function does not change *sb, but just to future-proof...
- */
-STATIC int
-xfs_parseargs(
- struct xfs_mount *mp,
- char *options)
-{
- const struct super_block *sb = mp->m_super;
- char *p;
- substring_t args[MAX_OPT_ARGS];
- int dsunit = 0;
- int dswidth = 0;
- int iosize = 0;
- uint8_t iosizelog = 0;
-
- /*
- * set up the mount name first so all the errors will refer to the
- * correct device.
- */
- mp->m_fsname = kstrndup(sb->s_id, MAXNAMELEN, GFP_KERNEL);
- if (!mp->m_fsname)
- return -ENOMEM;
- mp->m_fsname_len = strlen(mp->m_fsname) + 1;
-
- /*
- * Copy binary VFS mount flags we are interested in.
- */
- if (sb_rdonly(sb))
- mp->m_flags |= XFS_MOUNT_RDONLY;
- if (sb->s_flags & SB_DIRSYNC)
- mp->m_flags |= XFS_MOUNT_DIRSYNC;
- if (sb->s_flags & SB_SYNCHRONOUS)
- mp->m_flags |= XFS_MOUNT_WSYNC;
-
- /*
- * Set some default flags that could be cleared by the mount option
- * parsing.
- */
- mp->m_flags |= XFS_MOUNT_COMPAT_IOSIZE;
-
- /*
- * These can be overridden by the mount option parsing.
- */
- mp->m_logbufs = -1;
- mp->m_logbsize = -1;
-
- if (!options)
- goto done;
-
- while ((p = strsep(&options, ",")) != NULL) {
- int token;
-
- if (!*p)
- continue;
-
- token = match_token(p, tokens, args);
- switch (token) {
- case Opt_logbufs:
- if (match_int(args, &mp->m_logbufs))
- return -EINVAL;
- break;
- case Opt_logbsize:
- if (suffix_kstrtoint(args, 10, &mp->m_logbsize))
- return -EINVAL;
- break;
- case Opt_logdev:
- kfree(mp->m_logname);
- mp->m_logname = match_strdup(args);
- if (!mp->m_logname)
- return -ENOMEM;
- break;
- case Opt_rtdev:
- kfree(mp->m_rtname);
- mp->m_rtname = match_strdup(args);
- if (!mp->m_rtname)
- return -ENOMEM;
- break;
- case Opt_allocsize:
- case Opt_biosize:
- if (suffix_kstrtoint(args, 10, &iosize))
- return -EINVAL;
- iosizelog = ffs(iosize) - 1;
- break;
- case Opt_grpid:
- case Opt_bsdgroups:
- mp->m_flags |= XFS_MOUNT_GRPID;
- break;
- case Opt_nogrpid:
- case Opt_sysvgroups:
- mp->m_flags &= ~XFS_MOUNT_GRPID;
- break;
- case Opt_wsync:
- mp->m_flags |= XFS_MOUNT_WSYNC;
- break;
- case Opt_norecovery:
- mp->m_flags |= XFS_MOUNT_NORECOVERY;
- break;
- case Opt_noalign:
- mp->m_flags |= XFS_MOUNT_NOALIGN;
- break;
- case Opt_swalloc:
- mp->m_flags |= XFS_MOUNT_SWALLOC;
- break;
- case Opt_sunit:
- if (match_int(args, &dsunit))
- return -EINVAL;
- break;
- case Opt_swidth:
- if (match_int(args, &dswidth))
- return -EINVAL;
- break;
- case Opt_inode32:
- mp->m_flags |= XFS_MOUNT_SMALL_INUMS;
- break;
- case Opt_inode64:
- mp->m_flags &= ~XFS_MOUNT_SMALL_INUMS;
- break;
- case Opt_nouuid:
- mp->m_flags |= XFS_MOUNT_NOUUID;
- break;
- case Opt_ikeep:
- mp->m_flags |= XFS_MOUNT_IKEEP;
- break;
- case Opt_noikeep:
- mp->m_flags &= ~XFS_MOUNT_IKEEP;
- break;
- case Opt_largeio:
- mp->m_flags &= ~XFS_MOUNT_COMPAT_IOSIZE;
- break;
- case Opt_nolargeio:
- mp->m_flags |= XFS_MOUNT_COMPAT_IOSIZE;
- break;
- case Opt_attr2:
- mp->m_flags |= XFS_MOUNT_ATTR2;
- break;
- case Opt_noattr2:
- mp->m_flags &= ~XFS_MOUNT_ATTR2;
- mp->m_flags |= XFS_MOUNT_NOATTR2;
- break;
- case Opt_filestreams:
- mp->m_flags |= XFS_MOUNT_FILESTREAMS;
- break;
- case Opt_noquota:
- mp->m_qflags &= ~XFS_ALL_QUOTA_ACCT;
- mp->m_qflags &= ~XFS_ALL_QUOTA_ENFD;
- mp->m_qflags &= ~XFS_ALL_QUOTA_ACTIVE;
- break;
- case Opt_quota:
- case Opt_uquota:
- case Opt_usrquota:
- mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE |
- XFS_UQUOTA_ENFD);
- break;
- case Opt_qnoenforce:
- case Opt_uqnoenforce:
- mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE);
- mp->m_qflags &= ~XFS_UQUOTA_ENFD;
- break;
- case Opt_pquota:
- case Opt_prjquota:
- mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE |
- XFS_PQUOTA_ENFD);
- break;
- case Opt_pqnoenforce:
- mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE);
- mp->m_qflags &= ~XFS_PQUOTA_ENFD;
- break;
- case Opt_gquota:
- case Opt_grpquota:
- mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE |
- XFS_GQUOTA_ENFD);
- break;
- case Opt_gqnoenforce:
- mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE);
- mp->m_qflags &= ~XFS_GQUOTA_ENFD;
- break;
- case Opt_discard:
- mp->m_flags |= XFS_MOUNT_DISCARD;
- break;
- case Opt_nodiscard:
- mp->m_flags &= ~XFS_MOUNT_DISCARD;
- break;
-#ifdef CONFIG_FS_DAX
- case Opt_dax:
- mp->m_flags |= XFS_MOUNT_DAX;
- break;
-#endif
- default:
- xfs_warn(mp, "unknown mount option [%s].", p);
- return -EINVAL;
- }
- }
-
- /*
- * no recovery flag requires a read-only mount
- */
- if ((mp->m_flags & XFS_MOUNT_NORECOVERY) &&
- !(mp->m_flags & XFS_MOUNT_RDONLY)) {
- xfs_warn(mp, "no-recovery mounts must be read-only.");
- return -EINVAL;
- }
-
- if ((mp->m_flags & XFS_MOUNT_NOALIGN) && (dsunit || dswidth)) {
- xfs_warn(mp,
- "sunit and swidth options incompatible with the noalign option");
- return -EINVAL;
- }
-
-#ifndef CONFIG_XFS_QUOTA
- if (XFS_IS_QUOTA_RUNNING(mp)) {
- xfs_warn(mp, "quota support not available in this kernel.");
- return -EINVAL;
- }
-#endif
-
- if ((dsunit && !dswidth) || (!dsunit && dswidth)) {
- xfs_warn(mp, "sunit and swidth must be specified together");
- return -EINVAL;
- }
-
- if (dsunit && (dswidth % dsunit != 0)) {
- xfs_warn(mp,
- "stripe width (%d) must be a multiple of the stripe unit (%d)",
- dswidth, dsunit);
- return -EINVAL;
- }
-
-done:
- if (dsunit && !(mp->m_flags & XFS_MOUNT_NOALIGN)) {
- /*
- * At this point the superblock has not been read
- * in, therefore we do not know the block size.
- * Before the mount call ends we will convert
- * these to FSBs.
- */
- mp->m_dalign = dsunit;
- mp->m_swidth = dswidth;
- }
-
- if (mp->m_logbufs != -1 &&
- mp->m_logbufs != 0 &&
- (mp->m_logbufs < XLOG_MIN_ICLOGS ||
- mp->m_logbufs > XLOG_MAX_ICLOGS)) {
- xfs_warn(mp, "invalid logbufs value: %d [not %d-%d]",
- mp->m_logbufs, XLOG_MIN_ICLOGS, XLOG_MAX_ICLOGS);
- return -EINVAL;
- }
- if (mp->m_logbsize != -1 &&
- mp->m_logbsize != 0 &&
- (mp->m_logbsize < XLOG_MIN_RECORD_BSIZE ||
- mp->m_logbsize > XLOG_MAX_RECORD_BSIZE ||
- !is_power_of_2(mp->m_logbsize))) {
- xfs_warn(mp,
- "invalid logbufsize: %d [not 16k,32k,64k,128k or 256k]",
- mp->m_logbsize);
- return -EINVAL;
- }
-
- if (iosizelog) {
- if (iosizelog > XFS_MAX_IO_LOG ||
- iosizelog < XFS_MIN_IO_LOG) {
- xfs_warn(mp, "invalid log iosize: %d [not %d-%d]",
- iosizelog, XFS_MIN_IO_LOG,
- XFS_MAX_IO_LOG);
- return -EINVAL;
- }
-
- mp->m_flags |= XFS_MOUNT_DFLT_IOSIZE;
- mp->m_readio_log = iosizelog;
- mp->m_writeio_log = iosizelog;
- }
-
- return 0;
-}
+static const struct fs_parameter_description xfs_fs_parameters = {
+ .name = "xfs",
+ .specs = xfs_param_specs,
+};
struct proc_xfs_info {
uint64_t flag;
char *str;
};
-STATIC void
-xfs_showargs(
- struct xfs_mount *mp,
- struct seq_file *m)
+static int
+xfs_fs_show_options(
+ struct seq_file *m,
+ struct dentry *root)
{
static struct proc_xfs_info xfs_info_set[] = {
/* the few simple ones we can get from the mount struct */
@@ -448,30 +133,24 @@ xfs_showargs(
{ XFS_MOUNT_FILESTREAMS, ",filestreams" },
{ XFS_MOUNT_GRPID, ",grpid" },
{ XFS_MOUNT_DISCARD, ",discard" },
- { XFS_MOUNT_SMALL_INUMS, ",inode32" },
+ { XFS_MOUNT_LARGEIO, ",largeio" },
{ XFS_MOUNT_DAX, ",dax" },
{ 0, NULL }
};
- static struct proc_xfs_info xfs_info_unset[] = {
- /* the few simple ones we can get from the mount struct */
- { XFS_MOUNT_COMPAT_IOSIZE, ",largeio" },
- { XFS_MOUNT_SMALL_INUMS, ",inode64" },
- { 0, NULL }
- };
+ struct xfs_mount *mp = XFS_M(root->d_sb);
struct proc_xfs_info *xfs_infop;
for (xfs_infop = xfs_info_set; xfs_infop->flag; xfs_infop++) {
if (mp->m_flags & xfs_infop->flag)
seq_puts(m, xfs_infop->str);
}
- for (xfs_infop = xfs_info_unset; xfs_infop->flag; xfs_infop++) {
- if (!(mp->m_flags & xfs_infop->flag))
- seq_puts(m, xfs_infop->str);
- }
- if (mp->m_flags & XFS_MOUNT_DFLT_IOSIZE)
+ seq_printf(m, ",inode%d",
+ (mp->m_flags & XFS_MOUNT_SMALL_INUMS) ? 32 : 64);
+
+ if (mp->m_flags & XFS_MOUNT_ALLOCSIZE)
seq_printf(m, ",allocsize=%dk",
- (int)(1 << mp->m_writeio_log) >> 10);
+ (1 << mp->m_allocsize_log) >> 10);
if (mp->m_logbufs > 0)
seq_printf(m, ",logbufs=%d", mp->m_logbufs);
@@ -510,6 +189,8 @@ xfs_showargs(
if (!(mp->m_qflags & XFS_ALL_QUOTA_ACCT))
seq_puts(m, ",noquota");
+
+ return 0;
}
static uint64_t
@@ -808,33 +489,33 @@ xfs_init_mount_workqueues(
struct xfs_mount *mp)
{
mp->m_buf_workqueue = alloc_workqueue("xfs-buf/%s",
- WQ_MEM_RECLAIM|WQ_FREEZABLE, 1, mp->m_fsname);
+ WQ_MEM_RECLAIM|WQ_FREEZABLE, 1, mp->m_super->s_id);
if (!mp->m_buf_workqueue)
goto out;
mp->m_unwritten_workqueue = alloc_workqueue("xfs-conv/%s",
- WQ_MEM_RECLAIM|WQ_FREEZABLE, 0, mp->m_fsname);
+ WQ_MEM_RECLAIM|WQ_FREEZABLE, 0, mp->m_super->s_id);
if (!mp->m_unwritten_workqueue)
goto out_destroy_buf;
mp->m_cil_workqueue = alloc_workqueue("xfs-cil/%s",
WQ_MEM_RECLAIM | WQ_FREEZABLE | WQ_UNBOUND,
- 0, mp->m_fsname);
+ 0, mp->m_super->s_id);
if (!mp->m_cil_workqueue)
goto out_destroy_unwritten;
mp->m_reclaim_workqueue = alloc_workqueue("xfs-reclaim/%s",
- WQ_MEM_RECLAIM|WQ_FREEZABLE, 0, mp->m_fsname);
+ WQ_MEM_RECLAIM|WQ_FREEZABLE, 0, mp->m_super->s_id);
if (!mp->m_reclaim_workqueue)
goto out_destroy_cil;
mp->m_eofblocks_workqueue = alloc_workqueue("xfs-eofblocks/%s",
- WQ_MEM_RECLAIM|WQ_FREEZABLE, 0, mp->m_fsname);
+ WQ_MEM_RECLAIM|WQ_FREEZABLE, 0, mp->m_super->s_id);
if (!mp->m_eofblocks_workqueue)
goto out_destroy_reclaim;
mp->m_sync_workqueue = alloc_workqueue("xfs-sync/%s", WQ_FREEZABLE, 0,
- mp->m_fsname);
+ mp->m_super->s_id);
if (!mp->m_sync_workqueue)
goto out_destroy_eofb;
@@ -1038,13 +719,13 @@ xfs_fs_drop_inode(
return generic_drop_inode(inode) || (ip->i_flags & XFS_IDONTCACHE);
}
-STATIC void
-xfs_free_fsname(
+static void
+xfs_mount_free(
struct xfs_mount *mp)
{
- kfree(mp->m_fsname);
kfree(mp->m_rtname);
kfree(mp->m_logname);
+ kmem_free(mp);
}
STATIC int
@@ -1205,181 +886,6 @@ xfs_quiesce_attr(
xfs_log_quiesce(mp);
}
-STATIC int
-xfs_test_remount_options(
- struct super_block *sb,
- char *options)
-{
- int error = 0;
- struct xfs_mount *tmp_mp;
-
- tmp_mp = kmem_zalloc(sizeof(*tmp_mp), KM_MAYFAIL);
- if (!tmp_mp)
- return -ENOMEM;
-
- tmp_mp->m_super = sb;
- error = xfs_parseargs(tmp_mp, options);
- xfs_free_fsname(tmp_mp);
- kmem_free(tmp_mp);
-
- return error;
-}
-
-STATIC int
-xfs_fs_remount(
- struct super_block *sb,
- int *flags,
- char *options)
-{
- struct xfs_mount *mp = XFS_M(sb);
- xfs_sb_t *sbp = &mp->m_sb;
- substring_t args[MAX_OPT_ARGS];
- char *p;
- int error;
-
- /* First, check for complete junk; i.e. invalid options */
- error = xfs_test_remount_options(sb, options);
- if (error)
- return error;
-
- sync_filesystem(sb);
- while ((p = strsep(&options, ",")) != NULL) {
- int token;
-
- if (!*p)
- continue;
-
- token = match_token(p, tokens, args);
- switch (token) {
- case Opt_inode64:
- mp->m_flags &= ~XFS_MOUNT_SMALL_INUMS;
- mp->m_maxagi = xfs_set_inode_alloc(mp, sbp->sb_agcount);
- break;
- case Opt_inode32:
- mp->m_flags |= XFS_MOUNT_SMALL_INUMS;
- mp->m_maxagi = xfs_set_inode_alloc(mp, sbp->sb_agcount);
- break;
- default:
- /*
- * Logically we would return an error here to prevent
- * users from believing they might have changed
- * mount options using remount which can't be changed.
- *
- * But unfortunately mount(8) adds all options from
- * mtab and fstab to the mount arguments in some cases
- * so we can't blindly reject options, but have to
- * check for each specified option if it actually
- * differs from the currently set option and only
- * reject it if that's the case.
- *
- * Until that is implemented we return success for
- * every remount request, and silently ignore all
- * options that we can't actually change.
- */
-#if 0
- xfs_info(mp,
- "mount option \"%s\" not supported for remount", p);
- return -EINVAL;
-#else
- break;
-#endif
- }
- }
-
- /* ro -> rw */
- if ((mp->m_flags & XFS_MOUNT_RDONLY) && !(*flags & SB_RDONLY)) {
- if (mp->m_flags & XFS_MOUNT_NORECOVERY) {
- xfs_warn(mp,
- "ro->rw transition prohibited on norecovery mount");
- return -EINVAL;
- }
-
- if (XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5 &&
- xfs_sb_has_ro_compat_feature(sbp,
- XFS_SB_FEAT_RO_COMPAT_UNKNOWN)) {
- xfs_warn(mp,
-"ro->rw transition prohibited on unknown (0x%x) ro-compat filesystem",
- (sbp->sb_features_ro_compat &
- XFS_SB_FEAT_RO_COMPAT_UNKNOWN));
- return -EINVAL;
- }
-
- mp->m_flags &= ~XFS_MOUNT_RDONLY;
-
- /*
- * If this is the first remount to writeable state we
- * might have some superblock changes to update.
- */
- if (mp->m_update_sb) {
- error = xfs_sync_sb(mp, false);
- if (error) {
- xfs_warn(mp, "failed to write sb changes");
- return error;
- }
- mp->m_update_sb = false;
- }
-
- /*
- * Fill out the reserve pool if it is empty. Use the stashed
- * value if it is non-zero, otherwise go with the default.
- */
- xfs_restore_resvblks(mp);
- xfs_log_work_queue(mp);
-
- /* Recover any CoW blocks that never got remapped. */
- error = xfs_reflink_recover_cow(mp);
- if (error) {
- xfs_err(mp,
- "Error %d recovering leftover CoW allocations.", error);
- xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
- return error;
- }
- xfs_start_block_reaping(mp);
-
- /* Create the per-AG metadata reservation pool .*/
- error = xfs_fs_reserve_ag_blocks(mp);
- if (error && error != -ENOSPC)
- return error;
- }
-
- /* rw -> ro */
- if (!(mp->m_flags & XFS_MOUNT_RDONLY) && (*flags & SB_RDONLY)) {
- /*
- * Cancel background eofb scanning so it cannot race with the
- * final log force+buftarg wait and deadlock the remount.
- */
- xfs_stop_block_reaping(mp);
-
- /* Get rid of any leftover CoW reservations... */
- error = xfs_icache_free_cowblocks(mp, NULL);
- if (error) {
- xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
- return error;
- }
-
- /* Free the per-AG metadata reservation pool. */
- error = xfs_fs_unreserve_ag_blocks(mp);
- if (error) {
- xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
- return error;
- }
-
- /*
- * Before we sync the metadata, we need to free up the reserve
- * block pool so that the used block count in the superblock on
- * disk is correct at the end of the remount. Stash the current
- * reserve pool size so that if we get remounted rw, we can
- * return it to the same size.
- */
- xfs_save_resvblks(mp);
-
- xfs_quiesce_attr(mp);
- mp->m_flags |= XFS_MOUNT_RDONLY;
- }
-
- return 0;
-}
-
/*
* Second stage of a freeze. The data is already frozen so we only
* need to take care of the metadata. Once that's done sync the superblock
@@ -1410,15 +916,6 @@ xfs_fs_unfreeze(
return 0;
}
-STATIC int
-xfs_fs_show_options(
- struct seq_file *m,
- struct dentry *root)
-{
- xfs_showargs(XFS_M(root->d_sb), m);
- return 0;
-}
-
/*
* This function fills in xfs_mount_t fields based on mount args.
* Note: the superblock _has_ now been read in.
@@ -1541,60 +1038,337 @@ xfs_destroy_percpu_counters(
percpu_counter_destroy(&mp->m_delalloc_blks);
}
-static struct xfs_mount *
-xfs_mount_alloc(
+static void
+xfs_fs_put_super(
struct super_block *sb)
{
- struct xfs_mount *mp;
+ struct xfs_mount *mp = XFS_M(sb);
- mp = kzalloc(sizeof(struct xfs_mount), GFP_KERNEL);
- if (!mp)
- return NULL;
+ /* if ->fill_super failed, we have no mount to tear down */
+ if (!sb->s_fs_info)
+ return;
- mp->m_super = sb;
- spin_lock_init(&mp->m_sb_lock);
- spin_lock_init(&mp->m_agirotor_lock);
- INIT_RADIX_TREE(&mp->m_perag_tree, GFP_ATOMIC);
- spin_lock_init(&mp->m_perag_lock);
- mutex_init(&mp->m_growlock);
- atomic_set(&mp->m_active_trans, 0);
- INIT_DELAYED_WORK(&mp->m_reclaim_work, xfs_reclaim_worker);
- INIT_DELAYED_WORK(&mp->m_eofblocks_work, xfs_eofblocks_worker);
- INIT_DELAYED_WORK(&mp->m_cowblocks_work, xfs_cowblocks_worker);
- mp->m_kobj.kobject.kset = xfs_kset;
- /*
- * We don't create the finobt per-ag space reservation until after log
- * recovery, so we must set this to true so that an ifree transaction
- * started during log recovery will not depend on space reservations
- * for finobt expansion.
- */
- mp->m_finobt_nores = true;
- return mp;
+ xfs_notice(mp, "Unmounting Filesystem");
+ xfs_filestream_unmount(mp);
+ xfs_unmountfs(mp);
+
+ xfs_freesb(mp);
+ free_percpu(mp->m_stats.xs_stats);
+ xfs_destroy_percpu_counters(mp);
+ xfs_destroy_mount_workqueues(mp);
+ xfs_close_devices(mp);
+
+ sb->s_fs_info = NULL;
+ xfs_mount_free(mp);
}
+static long
+xfs_fs_nr_cached_objects(
+ struct super_block *sb,
+ struct shrink_control *sc)
+{
+ /* Paranoia: catch incorrect calls during mount setup or teardown */
+ if (WARN_ON_ONCE(!sb->s_fs_info))
+ return 0;
+ return xfs_reclaim_inodes_count(XFS_M(sb));
+}
-STATIC int
-xfs_fs_fill_super(
+static long
+xfs_fs_free_cached_objects(
struct super_block *sb,
- void *data,
- int silent)
+ struct shrink_control *sc)
{
- struct inode *root;
- struct xfs_mount *mp = NULL;
- int flags = 0, error = -ENOMEM;
+ return xfs_reclaim_inodes_nr(XFS_M(sb), sc->nr_to_scan);
+}
+static const struct super_operations xfs_super_operations = {
+ .alloc_inode = xfs_fs_alloc_inode,
+ .destroy_inode = xfs_fs_destroy_inode,
+ .dirty_inode = xfs_fs_dirty_inode,
+ .drop_inode = xfs_fs_drop_inode,
+ .put_super = xfs_fs_put_super,
+ .sync_fs = xfs_fs_sync_fs,
+ .freeze_fs = xfs_fs_freeze,
+ .unfreeze_fs = xfs_fs_unfreeze,
+ .statfs = xfs_fs_statfs,
+ .show_options = xfs_fs_show_options,
+ .nr_cached_objects = xfs_fs_nr_cached_objects,
+ .free_cached_objects = xfs_fs_free_cached_objects,
+};
+
+static int
+suffix_kstrtoint(
+ const char *s,
+ unsigned int base,
+ int *res)
+{
+ int last, shift_left_factor = 0, _res;
+ char *value;
+ int ret = 0;
+
+ value = kstrdup(s, GFP_KERNEL);
+ if (!value)
+ return -ENOMEM;
+
+ last = strlen(value) - 1;
+ if (value[last] == 'K' || value[last] == 'k') {
+ shift_left_factor = 10;
+ value[last] = '\0';
+ }
+ if (value[last] == 'M' || value[last] == 'm') {
+ shift_left_factor = 20;
+ value[last] = '\0';
+ }
+ if (value[last] == 'G' || value[last] == 'g') {
+ shift_left_factor = 30;
+ value[last] = '\0';
+ }
+
+ if (kstrtoint(value, base, &_res))
+ ret = -EINVAL;
+ kfree(value);
+ *res = _res << shift_left_factor;
+ return ret;
+}
+
+/*
+ * Set mount state from a mount option.
+ *
+ * NOTE: mp->m_super is NULL here!
+ */
+static int
+xfs_fc_parse_param(
+ struct fs_context *fc,
+ struct fs_parameter *param)
+{
+ struct xfs_mount *mp = fc->s_fs_info;
+ struct fs_parse_result result;
+ int size = 0;
+ int opt;
+
+ opt = fs_parse(fc, &xfs_fs_parameters, param, &result);
+ if (opt < 0)
+ return opt;
+
+ switch (opt) {
+ case Opt_logbufs:
+ mp->m_logbufs = result.uint_32;
+ return 0;
+ case Opt_logbsize:
+ if (suffix_kstrtoint(param->string, 10, &mp->m_logbsize))
+ return -EINVAL;
+ return 0;
+ case Opt_logdev:
+ kfree(mp->m_logname);
+ mp->m_logname = kstrdup(param->string, GFP_KERNEL);
+ if (!mp->m_logname)
+ return -ENOMEM;
+ return 0;
+ case Opt_rtdev:
+ kfree(mp->m_rtname);
+ mp->m_rtname = kstrdup(param->string, GFP_KERNEL);
+ if (!mp->m_rtname)
+ return -ENOMEM;
+ return 0;
+ case Opt_allocsize:
+ if (suffix_kstrtoint(param->string, 10, &size))
+ return -EINVAL;
+ mp->m_allocsize_log = ffs(size) - 1;
+ mp->m_flags |= XFS_MOUNT_ALLOCSIZE;
+ return 0;
+ case Opt_grpid:
+ case Opt_bsdgroups:
+ mp->m_flags |= XFS_MOUNT_GRPID;
+ return 0;
+ case Opt_nogrpid:
+ case Opt_sysvgroups:
+ mp->m_flags &= ~XFS_MOUNT_GRPID;
+ return 0;
+ case Opt_wsync:
+ mp->m_flags |= XFS_MOUNT_WSYNC;
+ return 0;
+ case Opt_norecovery:
+ mp->m_flags |= XFS_MOUNT_NORECOVERY;
+ return 0;
+ case Opt_noalign:
+ mp->m_flags |= XFS_MOUNT_NOALIGN;
+ return 0;
+ case Opt_swalloc:
+ mp->m_flags |= XFS_MOUNT_SWALLOC;
+ return 0;
+ case Opt_sunit:
+ mp->m_dalign = result.uint_32;
+ return 0;
+ case Opt_swidth:
+ mp->m_swidth = result.uint_32;
+ return 0;
+ case Opt_inode32:
+ mp->m_flags |= XFS_MOUNT_SMALL_INUMS;
+ return 0;
+ case Opt_inode64:
+ mp->m_flags &= ~XFS_MOUNT_SMALL_INUMS;
+ return 0;
+ case Opt_nouuid:
+ mp->m_flags |= XFS_MOUNT_NOUUID;
+ return 0;
+ case Opt_ikeep:
+ mp->m_flags |= XFS_MOUNT_IKEEP;
+ return 0;
+ case Opt_noikeep:
+ mp->m_flags &= ~XFS_MOUNT_IKEEP;
+ return 0;
+ case Opt_largeio:
+ mp->m_flags |= XFS_MOUNT_LARGEIO;
+ return 0;
+ case Opt_nolargeio:
+ mp->m_flags &= ~XFS_MOUNT_LARGEIO;
+ return 0;
+ case Opt_attr2:
+ mp->m_flags |= XFS_MOUNT_ATTR2;
+ return 0;
+ case Opt_noattr2:
+ mp->m_flags &= ~XFS_MOUNT_ATTR2;
+ mp->m_flags |= XFS_MOUNT_NOATTR2;
+ return 0;
+ case Opt_filestreams:
+ mp->m_flags |= XFS_MOUNT_FILESTREAMS;
+ return 0;
+ case Opt_noquota:
+ mp->m_qflags &= ~XFS_ALL_QUOTA_ACCT;
+ mp->m_qflags &= ~XFS_ALL_QUOTA_ENFD;
+ mp->m_qflags &= ~XFS_ALL_QUOTA_ACTIVE;
+ return 0;
+ case Opt_quota:
+ case Opt_uquota:
+ case Opt_usrquota:
+ mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE |
+ XFS_UQUOTA_ENFD);
+ return 0;
+ case Opt_qnoenforce:
+ case Opt_uqnoenforce:
+ mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE);
+ mp->m_qflags &= ~XFS_UQUOTA_ENFD;
+ return 0;
+ case Opt_pquota:
+ case Opt_prjquota:
+ mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE |
+ XFS_PQUOTA_ENFD);
+ return 0;
+ case Opt_pqnoenforce:
+ mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE);
+ mp->m_qflags &= ~XFS_PQUOTA_ENFD;
+ return 0;
+ case Opt_gquota:
+ case Opt_grpquota:
+ mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE |
+ XFS_GQUOTA_ENFD);
+ return 0;
+ case Opt_gqnoenforce:
+ mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE);
+ mp->m_qflags &= ~XFS_GQUOTA_ENFD;
+ return 0;
+ case Opt_discard:
+ mp->m_flags |= XFS_MOUNT_DISCARD;
+ return 0;
+ case Opt_nodiscard:
+ mp->m_flags &= ~XFS_MOUNT_DISCARD;
+ return 0;
+#ifdef CONFIG_FS_DAX
+ case Opt_dax:
+ mp->m_flags |= XFS_MOUNT_DAX;
+ return 0;
+#endif
+ default:
+ xfs_warn(mp, "unknown mount option [%s].", param->key);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+xfs_fc_validate_params(
+ struct xfs_mount *mp)
+{
/*
- * allocate mp and do all low-level struct initializations before we
- * attach it to the super
+ * no recovery flag requires a read-only mount
*/
- mp = xfs_mount_alloc(sb);
- if (!mp)
- goto out;
- sb->s_fs_info = mp;
+ if ((mp->m_flags & XFS_MOUNT_NORECOVERY) &&
+ !(mp->m_flags & XFS_MOUNT_RDONLY)) {
+ xfs_warn(mp, "no-recovery mounts must be read-only.");
+ return -EINVAL;
+ }
+
+ if ((mp->m_flags & XFS_MOUNT_NOALIGN) &&
+ (mp->m_dalign || mp->m_swidth)) {
+ xfs_warn(mp,
+ "sunit and swidth options incompatible with the noalign option");
+ return -EINVAL;
+ }
+
+ if (!IS_ENABLED(CONFIG_XFS_QUOTA) && mp->m_qflags != 0) {
+ xfs_warn(mp, "quota support not available in this kernel.");
+ return -EINVAL;
+ }
+
+ if ((mp->m_dalign && !mp->m_swidth) ||
+ (!mp->m_dalign && mp->m_swidth)) {
+ xfs_warn(mp, "sunit and swidth must be specified together");
+ return -EINVAL;
+ }
+
+ if (mp->m_dalign && (mp->m_swidth % mp->m_dalign != 0)) {
+ xfs_warn(mp,
+ "stripe width (%d) must be a multiple of the stripe unit (%d)",
+ mp->m_swidth, mp->m_dalign);
+ return -EINVAL;
+ }
+
+ if (mp->m_logbufs != -1 &&
+ mp->m_logbufs != 0 &&
+ (mp->m_logbufs < XLOG_MIN_ICLOGS ||
+ mp->m_logbufs > XLOG_MAX_ICLOGS)) {
+ xfs_warn(mp, "invalid logbufs value: %d [not %d-%d]",
+ mp->m_logbufs, XLOG_MIN_ICLOGS, XLOG_MAX_ICLOGS);
+ return -EINVAL;
+ }
+
+ if (mp->m_logbsize != -1 &&
+ mp->m_logbsize != 0 &&
+ (mp->m_logbsize < XLOG_MIN_RECORD_BSIZE ||
+ mp->m_logbsize > XLOG_MAX_RECORD_BSIZE ||
+ !is_power_of_2(mp->m_logbsize))) {
+ xfs_warn(mp,
+ "invalid logbufsize: %d [not 16k,32k,64k,128k or 256k]",
+ mp->m_logbsize);
+ return -EINVAL;
+ }
+
+ if ((mp->m_flags & XFS_MOUNT_ALLOCSIZE) &&
+ (mp->m_allocsize_log > XFS_MAX_IO_LOG ||
+ mp->m_allocsize_log < XFS_MIN_IO_LOG)) {
+ xfs_warn(mp, "invalid log iosize: %d [not %d-%d]",
+ mp->m_allocsize_log, XFS_MIN_IO_LOG, XFS_MAX_IO_LOG);
+ return -EINVAL;
+ }
- error = xfs_parseargs(mp, (char *)data);
+ return 0;
+}
+
+static int
+xfs_fc_fill_super(
+ struct super_block *sb,
+ struct fs_context *fc)
+{
+ struct xfs_mount *mp = sb->s_fs_info;
+ struct inode *root;
+ int flags = 0, error;
+
+ mp->m_super = sb;
+
+ error = xfs_fc_validate_params(mp);
if (error)
- goto out_free_fsname;
+ goto out_free_names;
sb_min_blocksize(sb, BBSIZE);
sb->s_xattr = xfs_xattr_handlers;
@@ -1616,12 +1390,12 @@ xfs_fs_fill_super(
msleep(xfs_globals.mount_delay * 1000);
}
- if (silent)
+ if (fc->sb_flags & SB_SILENT)
flags |= XFS_MFSI_QUIET;
error = xfs_open_devices(mp);
if (error)
- goto out_free_fsname;
+ goto out_free_names;
error = xfs_init_mount_workqueues(mp);
if (error)
@@ -1758,11 +1532,9 @@ xfs_fs_fill_super(
xfs_destroy_mount_workqueues(mp);
out_close_devices:
xfs_close_devices(mp);
- out_free_fsname:
+ out_free_names:
sb->s_fs_info = NULL;
- xfs_free_fsname(mp);
- kfree(mp);
- out:
+ xfs_mount_free(mp);
return error;
out_unmount:
@@ -1771,80 +1543,252 @@ xfs_fs_fill_super(
goto out_free_sb;
}
-STATIC void
-xfs_fs_put_super(
- struct super_block *sb)
+static int
+xfs_fc_get_tree(
+ struct fs_context *fc)
{
- struct xfs_mount *mp = XFS_M(sb);
+ return get_tree_bdev(fc, xfs_fc_fill_super);
+}
- /* if ->fill_super failed, we have no mount to tear down */
- if (!sb->s_fs_info)
- return;
+static int
+xfs_remount_rw(
+ struct xfs_mount *mp)
+{
+ struct xfs_sb *sbp = &mp->m_sb;
+ int error;
- xfs_notice(mp, "Unmounting Filesystem");
- xfs_filestream_unmount(mp);
- xfs_unmountfs(mp);
+ if (mp->m_flags & XFS_MOUNT_NORECOVERY) {
+ xfs_warn(mp,
+ "ro->rw transition prohibited on norecovery mount");
+ return -EINVAL;
+ }
- xfs_freesb(mp);
- free_percpu(mp->m_stats.xs_stats);
- xfs_destroy_percpu_counters(mp);
- xfs_destroy_mount_workqueues(mp);
- xfs_close_devices(mp);
+ if (XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5 &&
+ xfs_sb_has_ro_compat_feature(sbp, XFS_SB_FEAT_RO_COMPAT_UNKNOWN)) {
+ xfs_warn(mp,
+ "ro->rw transition prohibited on unknown (0x%x) ro-compat filesystem",
+ (sbp->sb_features_ro_compat &
+ XFS_SB_FEAT_RO_COMPAT_UNKNOWN));
+ return -EINVAL;
+ }
- sb->s_fs_info = NULL;
- xfs_free_fsname(mp);
- kfree(mp);
+ mp->m_flags &= ~XFS_MOUNT_RDONLY;
+
+ /*
+ * If this is the first remount to writeable state we might have some
+ * superblock changes to update.
+ */
+ if (mp->m_update_sb) {
+ error = xfs_sync_sb(mp, false);
+ if (error) {
+ xfs_warn(mp, "failed to write sb changes");
+ return error;
+ }
+ mp->m_update_sb = false;
+ }
+
+ /*
+ * Fill out the reserve pool if it is empty. Use the stashed value if
+ * it is non-zero, otherwise go with the default.
+ */
+ xfs_restore_resvblks(mp);
+ xfs_log_work_queue(mp);
+
+ /* Recover any CoW blocks that never got remapped. */
+ error = xfs_reflink_recover_cow(mp);
+ if (error) {
+ xfs_err(mp,
+ "Error %d recovering leftover CoW allocations.", error);
+ xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
+ return error;
+ }
+ xfs_start_block_reaping(mp);
+
+ /* Create the per-AG metadata reservation pool .*/
+ error = xfs_fs_reserve_ag_blocks(mp);
+ if (error && error != -ENOSPC)
+ return error;
+
+ return 0;
}
-STATIC struct dentry *
-xfs_fs_mount(
- struct file_system_type *fs_type,
- int flags,
- const char *dev_name,
- void *data)
+static int
+xfs_remount_ro(
+ struct xfs_mount *mp)
{
- return mount_bdev(fs_type, flags, dev_name, data, xfs_fs_fill_super);
+ int error;
+
+ /*
+ * Cancel background eofb scanning so it cannot race with the final
+ * log force+buftarg wait and deadlock the remount.
+ */
+ xfs_stop_block_reaping(mp);
+
+ /* Get rid of any leftover CoW reservations... */
+ error = xfs_icache_free_cowblocks(mp, NULL);
+ if (error) {
+ xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
+ return error;
+ }
+
+ /* Free the per-AG metadata reservation pool. */
+ error = xfs_fs_unreserve_ag_blocks(mp);
+ if (error) {
+ xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
+ return error;
+ }
+
+ /*
+ * Before we sync the metadata, we need to free up the reserve block
+ * pool so that the used block count in the superblock on disk is
+ * correct at the end of the remount. Stash the current* reserve pool
+ * size so that if we get remounted rw, we can return it to the same
+ * size.
+ */
+ xfs_save_resvblks(mp);
+
+ xfs_quiesce_attr(mp);
+ mp->m_flags |= XFS_MOUNT_RDONLY;
+
+ return 0;
}
-static long
-xfs_fs_nr_cached_objects(
- struct super_block *sb,
- struct shrink_control *sc)
+/*
+ * Logically we would return an error here to prevent users from believing
+ * they might have changed mount options using remount which can't be changed.
+ *
+ * But unfortunately mount(8) adds all options from mtab and fstab to the mount
+ * arguments in some cases so we can't blindly reject options, but have to
+ * check for each specified option if it actually differs from the currently
+ * set option and only reject it if that's the case.
+ *
+ * Until that is implemented we return success for every remount request, and
+ * silently ignore all options that we can't actually change.
+ */
+static int
+xfs_fc_reconfigure(
+ struct fs_context *fc)
{
- /* Paranoia: catch incorrect calls during mount setup or teardown */
- if (WARN_ON_ONCE(!sb->s_fs_info))
- return 0;
- return xfs_reclaim_inodes_count(XFS_M(sb));
+ struct xfs_mount *mp = XFS_M(fc->root->d_sb);
+ struct xfs_mount *new_mp = fc->s_fs_info;
+ xfs_sb_t *sbp = &mp->m_sb;
+ int flags = fc->sb_flags;
+ int error;
+
+ error = xfs_fc_validate_params(new_mp);
+ if (error)
+ return error;
+
+ sync_filesystem(mp->m_super);
+
+ /* inode32 -> inode64 */
+ if ((mp->m_flags & XFS_MOUNT_SMALL_INUMS) &&
+ !(new_mp->m_flags & XFS_MOUNT_SMALL_INUMS)) {
+ mp->m_flags &= ~XFS_MOUNT_SMALL_INUMS;
+ mp->m_maxagi = xfs_set_inode_alloc(mp, sbp->sb_agcount);
+ }
+
+ /* inode64 -> inode32 */
+ if (!(mp->m_flags & XFS_MOUNT_SMALL_INUMS) &&
+ (new_mp->m_flags & XFS_MOUNT_SMALL_INUMS)) {
+ mp->m_flags |= XFS_MOUNT_SMALL_INUMS;
+ mp->m_maxagi = xfs_set_inode_alloc(mp, sbp->sb_agcount);
+ }
+
+ /* ro -> rw */
+ if ((mp->m_flags & XFS_MOUNT_RDONLY) && !(flags & SB_RDONLY)) {
+ error = xfs_remount_rw(mp);
+ if (error)
+ return error;
+ }
+
+ /* rw -> ro */
+ if (!(mp->m_flags & XFS_MOUNT_RDONLY) && (flags & SB_RDONLY)) {
+ error = xfs_remount_ro(mp);
+ if (error)
+ return error;
+ }
+
+ return 0;
}
-static long
-xfs_fs_free_cached_objects(
- struct super_block *sb,
- struct shrink_control *sc)
+static void xfs_fc_free(
+ struct fs_context *fc)
{
- return xfs_reclaim_inodes_nr(XFS_M(sb), sc->nr_to_scan);
+ struct xfs_mount *mp = fc->s_fs_info;
+
+ /*
+ * mp is stored in the fs_context when it is initialized.
+ * mp is transferred to the superblock on a successful mount,
+ * but if an error occurs before the transfer we have to free
+ * it here.
+ */
+ if (mp)
+ xfs_mount_free(mp);
}
-static const struct super_operations xfs_super_operations = {
- .alloc_inode = xfs_fs_alloc_inode,
- .destroy_inode = xfs_fs_destroy_inode,
- .dirty_inode = xfs_fs_dirty_inode,
- .drop_inode = xfs_fs_drop_inode,
- .put_super = xfs_fs_put_super,
- .sync_fs = xfs_fs_sync_fs,
- .freeze_fs = xfs_fs_freeze,
- .unfreeze_fs = xfs_fs_unfreeze,
- .statfs = xfs_fs_statfs,
- .remount_fs = xfs_fs_remount,
- .show_options = xfs_fs_show_options,
- .nr_cached_objects = xfs_fs_nr_cached_objects,
- .free_cached_objects = xfs_fs_free_cached_objects,
+static const struct fs_context_operations xfs_context_ops = {
+ .parse_param = xfs_fc_parse_param,
+ .get_tree = xfs_fc_get_tree,
+ .reconfigure = xfs_fc_reconfigure,
+ .free = xfs_fc_free,
};
+static int xfs_init_fs_context(
+ struct fs_context *fc)
+{
+ struct xfs_mount *mp;
+
+ mp = kmem_alloc(sizeof(struct xfs_mount), KM_ZERO);
+ if (!mp)
+ return -ENOMEM;
+
+ spin_lock_init(&mp->m_sb_lock);
+ spin_lock_init(&mp->m_agirotor_lock);
+ INIT_RADIX_TREE(&mp->m_perag_tree, GFP_ATOMIC);
+ spin_lock_init(&mp->m_perag_lock);
+ mutex_init(&mp->m_growlock);
+ atomic_set(&mp->m_active_trans, 0);
+ INIT_DELAYED_WORK(&mp->m_reclaim_work, xfs_reclaim_worker);
+ INIT_DELAYED_WORK(&mp->m_eofblocks_work, xfs_eofblocks_worker);
+ INIT_DELAYED_WORK(&mp->m_cowblocks_work, xfs_cowblocks_worker);
+ mp->m_kobj.kobject.kset = xfs_kset;
+ /*
+ * We don't create the finobt per-ag space reservation until after log
+ * recovery, so we must set this to true so that an ifree transaction
+ * started during log recovery will not depend on space reservations
+ * for finobt expansion.
+ */
+ mp->m_finobt_nores = true;
+
+ /*
+ * These can be overridden by the mount option parsing.
+ */
+ mp->m_logbufs = -1;
+ mp->m_logbsize = -1;
+ mp->m_allocsize_log = 16; /* 64k */
+
+ /*
+ * Copy binary VFS mount flags we are interested in.
+ */
+ if (fc->sb_flags & SB_RDONLY)
+ mp->m_flags |= XFS_MOUNT_RDONLY;
+ if (fc->sb_flags & SB_DIRSYNC)
+ mp->m_flags |= XFS_MOUNT_DIRSYNC;
+ if (fc->sb_flags & SB_SYNCHRONOUS)
+ mp->m_flags |= XFS_MOUNT_WSYNC;
+
+ fc->s_fs_info = mp;
+ fc->ops = &xfs_context_ops;
+
+ return 0;
+}
+
static struct file_system_type xfs_fs_type = {
.owner = THIS_MODULE,
.name = "xfs",
- .mount = xfs_fs_mount,
+ .init_fs_context = xfs_init_fs_context,
+ .parameters = &xfs_fs_parameters,
.kill_sb = kill_block_super,
.fs_flags = FS_REQUIRES_DEV,
};
@@ -1853,37 +1797,39 @@ MODULE_ALIAS_FS("xfs");
STATIC int __init
xfs_init_zones(void)
{
- if (bioset_init(&xfs_ioend_bioset, 4 * (PAGE_SIZE / SECTOR_SIZE),
- offsetof(struct xfs_ioend, io_inline_bio),
- BIOSET_NEED_BVECS))
- goto out;
-
- xfs_log_ticket_zone = kmem_zone_init(sizeof(xlog_ticket_t),
- "xfs_log_ticket");
+ xfs_log_ticket_zone = kmem_cache_create("xfs_log_ticket",
+ sizeof(struct xlog_ticket),
+ 0, 0, NULL);
if (!xfs_log_ticket_zone)
- goto out_free_ioend_bioset;
+ goto out;
- xfs_bmap_free_item_zone = kmem_zone_init(
- sizeof(struct xfs_extent_free_item),
- "xfs_bmap_free_item");
+ xfs_bmap_free_item_zone = kmem_cache_create("xfs_bmap_free_item",
+ sizeof(struct xfs_extent_free_item),
+ 0, 0, NULL);
if (!xfs_bmap_free_item_zone)
goto out_destroy_log_ticket_zone;
- xfs_btree_cur_zone = kmem_zone_init(sizeof(xfs_btree_cur_t),
- "xfs_btree_cur");
+ xfs_btree_cur_zone = kmem_cache_create("xfs_btree_cur",
+ sizeof(struct xfs_btree_cur),
+ 0, 0, NULL);
if (!xfs_btree_cur_zone)
goto out_destroy_bmap_free_item_zone;
- xfs_da_state_zone = kmem_zone_init(sizeof(xfs_da_state_t),
- "xfs_da_state");
+ xfs_da_state_zone = kmem_cache_create("xfs_da_state",
+ sizeof(struct xfs_da_state),
+ 0, 0, NULL);
if (!xfs_da_state_zone)
goto out_destroy_btree_cur_zone;
- xfs_ifork_zone = kmem_zone_init(sizeof(struct xfs_ifork), "xfs_ifork");
+ xfs_ifork_zone = kmem_cache_create("xfs_ifork",
+ sizeof(struct xfs_ifork),
+ 0, 0, NULL);
if (!xfs_ifork_zone)
goto out_destroy_da_state_zone;
- xfs_trans_zone = kmem_zone_init(sizeof(xfs_trans_t), "xfs_trans");
+ xfs_trans_zone = kmem_cache_create("xf_trans",
+ sizeof(struct xfs_trans),
+ 0, 0, NULL);
if (!xfs_trans_zone)
goto out_destroy_ifork_zone;
@@ -1893,111 +1839,121 @@ xfs_init_zones(void)
* size possible under XFS. This wastes a little bit of memory,
* but it is much faster.
*/
- xfs_buf_item_zone = kmem_zone_init(sizeof(struct xfs_buf_log_item),
- "xfs_buf_item");
+ xfs_buf_item_zone = kmem_cache_create("xfs_buf_item",
+ sizeof(struct xfs_buf_log_item),
+ 0, 0, NULL);
if (!xfs_buf_item_zone)
goto out_destroy_trans_zone;
- xfs_efd_zone = kmem_zone_init((sizeof(xfs_efd_log_item_t) +
- ((XFS_EFD_MAX_FAST_EXTENTS - 1) *
- sizeof(xfs_extent_t))), "xfs_efd_item");
+ xfs_efd_zone = kmem_cache_create("xfs_efd_item",
+ (sizeof(struct xfs_efd_log_item) +
+ (XFS_EFD_MAX_FAST_EXTENTS - 1) *
+ sizeof(struct xfs_extent)),
+ 0, 0, NULL);
if (!xfs_efd_zone)
goto out_destroy_buf_item_zone;
- xfs_efi_zone = kmem_zone_init((sizeof(xfs_efi_log_item_t) +
- ((XFS_EFI_MAX_FAST_EXTENTS - 1) *
- sizeof(xfs_extent_t))), "xfs_efi_item");
+ xfs_efi_zone = kmem_cache_create("xfs_efi_item",
+ (sizeof(struct xfs_efi_log_item) +
+ (XFS_EFI_MAX_FAST_EXTENTS - 1) *
+ sizeof(struct xfs_extent)),
+ 0, 0, NULL);
if (!xfs_efi_zone)
goto out_destroy_efd_zone;
- xfs_inode_zone =
- kmem_zone_init_flags(sizeof(xfs_inode_t), "xfs_inode",
- KM_ZONE_HWALIGN | KM_ZONE_RECLAIM | KM_ZONE_SPREAD |
- KM_ZONE_ACCOUNT, xfs_fs_inode_init_once);
+ xfs_inode_zone = kmem_cache_create("xfs_inode",
+ sizeof(struct xfs_inode), 0,
+ (SLAB_HWCACHE_ALIGN |
+ SLAB_RECLAIM_ACCOUNT |
+ SLAB_MEM_SPREAD | SLAB_ACCOUNT),
+ xfs_fs_inode_init_once);
if (!xfs_inode_zone)
goto out_destroy_efi_zone;
- xfs_ili_zone =
- kmem_zone_init_flags(sizeof(xfs_inode_log_item_t), "xfs_ili",
- KM_ZONE_SPREAD, NULL);
+ xfs_ili_zone = kmem_cache_create("xfs_ili",
+ sizeof(struct xfs_inode_log_item), 0,
+ SLAB_MEM_SPREAD, NULL);
if (!xfs_ili_zone)
goto out_destroy_inode_zone;
- xfs_icreate_zone = kmem_zone_init(sizeof(struct xfs_icreate_item),
- "xfs_icr");
+
+ xfs_icreate_zone = kmem_cache_create("xfs_icr",
+ sizeof(struct xfs_icreate_item),
+ 0, 0, NULL);
if (!xfs_icreate_zone)
goto out_destroy_ili_zone;
- xfs_rud_zone = kmem_zone_init(sizeof(struct xfs_rud_log_item),
- "xfs_rud_item");
+ xfs_rud_zone = kmem_cache_create("xfs_rud_item",
+ sizeof(struct xfs_rud_log_item),
+ 0, 0, NULL);
if (!xfs_rud_zone)
goto out_destroy_icreate_zone;
- xfs_rui_zone = kmem_zone_init(
+ xfs_rui_zone = kmem_cache_create("xfs_rui_item",
xfs_rui_log_item_sizeof(XFS_RUI_MAX_FAST_EXTENTS),
- "xfs_rui_item");
+ 0, 0, NULL);
if (!xfs_rui_zone)
goto out_destroy_rud_zone;
- xfs_cud_zone = kmem_zone_init(sizeof(struct xfs_cud_log_item),
- "xfs_cud_item");
+ xfs_cud_zone = kmem_cache_create("xfs_cud_item",
+ sizeof(struct xfs_cud_log_item),
+ 0, 0, NULL);
if (!xfs_cud_zone)
goto out_destroy_rui_zone;
- xfs_cui_zone = kmem_zone_init(
+ xfs_cui_zone = kmem_cache_create("xfs_cui_item",
xfs_cui_log_item_sizeof(XFS_CUI_MAX_FAST_EXTENTS),
- "xfs_cui_item");
+ 0, 0, NULL);
if (!xfs_cui_zone)
goto out_destroy_cud_zone;
- xfs_bud_zone = kmem_zone_init(sizeof(struct xfs_bud_log_item),
- "xfs_bud_item");
+ xfs_bud_zone = kmem_cache_create("xfs_bud_item",
+ sizeof(struct xfs_bud_log_item),
+ 0, 0, NULL);
if (!xfs_bud_zone)
goto out_destroy_cui_zone;
- xfs_bui_zone = kmem_zone_init(
+ xfs_bui_zone = kmem_cache_create("xfs_bui_item",
xfs_bui_log_item_sizeof(XFS_BUI_MAX_FAST_EXTENTS),
- "xfs_bui_item");
+ 0, 0, NULL);
if (!xfs_bui_zone)
goto out_destroy_bud_zone;
return 0;
out_destroy_bud_zone:
- kmem_zone_destroy(xfs_bud_zone);
+ kmem_cache_destroy(xfs_bud_zone);
out_destroy_cui_zone:
- kmem_zone_destroy(xfs_cui_zone);
+ kmem_cache_destroy(xfs_cui_zone);
out_destroy_cud_zone:
- kmem_zone_destroy(xfs_cud_zone);
+ kmem_cache_destroy(xfs_cud_zone);
out_destroy_rui_zone:
- kmem_zone_destroy(xfs_rui_zone);
+ kmem_cache_destroy(xfs_rui_zone);
out_destroy_rud_zone:
- kmem_zone_destroy(xfs_rud_zone);
+ kmem_cache_destroy(xfs_rud_zone);
out_destroy_icreate_zone:
- kmem_zone_destroy(xfs_icreate_zone);
+ kmem_cache_destroy(xfs_icreate_zone);
out_destroy_ili_zone:
- kmem_zone_destroy(xfs_ili_zone);
+ kmem_cache_destroy(xfs_ili_zone);
out_destroy_inode_zone:
- kmem_zone_destroy(xfs_inode_zone);
+ kmem_cache_destroy(xfs_inode_zone);
out_destroy_efi_zone:
- kmem_zone_destroy(xfs_efi_zone);
+ kmem_cache_destroy(xfs_efi_zone);
out_destroy_efd_zone:
- kmem_zone_destroy(xfs_efd_zone);
+ kmem_cache_destroy(xfs_efd_zone);
out_destroy_buf_item_zone:
- kmem_zone_destroy(xfs_buf_item_zone);
+ kmem_cache_destroy(xfs_buf_item_zone);
out_destroy_trans_zone:
- kmem_zone_destroy(xfs_trans_zone);
+ kmem_cache_destroy(xfs_trans_zone);
out_destroy_ifork_zone:
- kmem_zone_destroy(xfs_ifork_zone);
+ kmem_cache_destroy(xfs_ifork_zone);
out_destroy_da_state_zone:
- kmem_zone_destroy(xfs_da_state_zone);
+ kmem_cache_destroy(xfs_da_state_zone);
out_destroy_btree_cur_zone:
- kmem_zone_destroy(xfs_btree_cur_zone);
+ kmem_cache_destroy(xfs_btree_cur_zone);
out_destroy_bmap_free_item_zone:
- kmem_zone_destroy(xfs_bmap_free_item_zone);
+ kmem_cache_destroy(xfs_bmap_free_item_zone);
out_destroy_log_ticket_zone:
- kmem_zone_destroy(xfs_log_ticket_zone);
- out_free_ioend_bioset:
- bioset_exit(&xfs_ioend_bioset);
+ kmem_cache_destroy(xfs_log_ticket_zone);
out:
return -ENOMEM;
}
@@ -2010,25 +1966,24 @@ xfs_destroy_zones(void)
* destroy caches.
*/
rcu_barrier();
- kmem_zone_destroy(xfs_bui_zone);
- kmem_zone_destroy(xfs_bud_zone);
- kmem_zone_destroy(xfs_cui_zone);
- kmem_zone_destroy(xfs_cud_zone);
- kmem_zone_destroy(xfs_rui_zone);
- kmem_zone_destroy(xfs_rud_zone);
- kmem_zone_destroy(xfs_icreate_zone);
- kmem_zone_destroy(xfs_ili_zone);
- kmem_zone_destroy(xfs_inode_zone);
- kmem_zone_destroy(xfs_efi_zone);
- kmem_zone_destroy(xfs_efd_zone);
- kmem_zone_destroy(xfs_buf_item_zone);
- kmem_zone_destroy(xfs_trans_zone);
- kmem_zone_destroy(xfs_ifork_zone);
- kmem_zone_destroy(xfs_da_state_zone);
- kmem_zone_destroy(xfs_btree_cur_zone);
- kmem_zone_destroy(xfs_bmap_free_item_zone);
- kmem_zone_destroy(xfs_log_ticket_zone);
- bioset_exit(&xfs_ioend_bioset);
+ kmem_cache_destroy(xfs_bui_zone);
+ kmem_cache_destroy(xfs_bud_zone);
+ kmem_cache_destroy(xfs_cui_zone);
+ kmem_cache_destroy(xfs_cud_zone);
+ kmem_cache_destroy(xfs_rui_zone);
+ kmem_cache_destroy(xfs_rud_zone);
+ kmem_cache_destroy(xfs_icreate_zone);
+ kmem_cache_destroy(xfs_ili_zone);
+ kmem_cache_destroy(xfs_inode_zone);
+ kmem_cache_destroy(xfs_efi_zone);
+ kmem_cache_destroy(xfs_efd_zone);
+ kmem_cache_destroy(xfs_buf_item_zone);
+ kmem_cache_destroy(xfs_trans_zone);
+ kmem_cache_destroy(xfs_ifork_zone);
+ kmem_cache_destroy(xfs_da_state_zone);
+ kmem_cache_destroy(xfs_btree_cur_zone);
+ kmem_cache_destroy(xfs_bmap_free_item_zone);
+ kmem_cache_destroy(xfs_log_ticket_zone);
}
STATIC int __init
diff --git a/fs/xfs/xfs_super.h b/fs/xfs/xfs_super.h
index 763e43d22dee..b552cf6d3379 100644
--- a/fs/xfs/xfs_super.h
+++ b/fs/xfs/xfs_super.h
@@ -11,9 +11,11 @@
#ifdef CONFIG_XFS_QUOTA
extern int xfs_qm_init(void);
extern void xfs_qm_exit(void);
+# define XFS_QUOTA_STRING "quota, "
#else
# define xfs_qm_init() (0)
# define xfs_qm_exit() do { } while (0)
+# define XFS_QUOTA_STRING
#endif
#ifdef CONFIG_XFS_POSIX_ACL
@@ -50,6 +52,12 @@ extern void xfs_qm_exit(void);
# define XFS_WARN_STRING
#endif
+#ifdef CONFIG_XFS_ASSERT_FATAL
+# define XFS_ASSERT_FATAL_STRING "fatal assert, "
+#else
+# define XFS_ASSERT_FATAL_STRING
+#endif
+
#ifdef DEBUG
# define XFS_DBG_STRING "debug"
#else
@@ -63,6 +71,8 @@ extern void xfs_qm_exit(void);
XFS_SCRUB_STRING \
XFS_REPAIR_STRING \
XFS_WARN_STRING \
+ XFS_QUOTA_STRING \
+ XFS_ASSERT_FATAL_STRING \
XFS_DBG_STRING /* DBG must be last */
struct xfs_inode;
diff --git a/fs/xfs/xfs_symlink.c b/fs/xfs/xfs_symlink.c
index ed66fd2de327..a25502bc2071 100644
--- a/fs/xfs/xfs_symlink.c
+++ b/fs/xfs/xfs_symlink.c
@@ -17,6 +17,7 @@
#include "xfs_bmap.h"
#include "xfs_bmap_btree.h"
#include "xfs_quota.h"
+#include "xfs_symlink.h"
#include "xfs_trans_space.h"
#include "xfs_trace.h"
#include "xfs_trans.h"
diff --git a/fs/xfs/xfs_symlink.h b/fs/xfs/xfs_symlink.h
index 9743d8c9394b..b1fa091427e6 100644
--- a/fs/xfs/xfs_symlink.h
+++ b/fs/xfs/xfs_symlink.h
@@ -5,7 +5,7 @@
#ifndef __XFS_SYMLINK_H
#define __XFS_SYMLINK_H 1
-/* Kernel only symlink defintions */
+/* Kernel only symlink definitions */
int xfs_symlink(struct xfs_inode *dp, struct xfs_name *link_name,
const char *target_path, umode_t mode, struct xfs_inode **ipp);
diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h
index eaae275ed430..c13bb3655e48 100644
--- a/fs/xfs/xfs_trace.h
+++ b/fs/xfs/xfs_trace.h
@@ -725,7 +725,7 @@ TRACE_EVENT(xfs_iomap_prealloc_size,
__entry->writeio_blocks = writeio_blocks;
),
TP_printk("dev %d:%d ino 0x%llx prealloc blocks %llu shift %d "
- "m_writeio_blocks %u",
+ "m_allocsize_blocks %u",
MAJOR(__entry->dev), MINOR(__entry->dev), __entry->ino,
__entry->blocks, __entry->shift, __entry->writeio_blocks)
)
@@ -1158,71 +1158,6 @@ DEFINE_RW_EVENT(xfs_file_buffered_write);
DEFINE_RW_EVENT(xfs_file_direct_write);
DEFINE_RW_EVENT(xfs_file_dax_write);
-DECLARE_EVENT_CLASS(xfs_page_class,
- TP_PROTO(struct inode *inode, struct page *page, unsigned long off,
- unsigned int len),
- TP_ARGS(inode, page, off, len),
- TP_STRUCT__entry(
- __field(dev_t, dev)
- __field(xfs_ino_t, ino)
- __field(pgoff_t, pgoff)
- __field(loff_t, size)
- __field(unsigned long, offset)
- __field(unsigned int, length)
- ),
- TP_fast_assign(
- __entry->dev = inode->i_sb->s_dev;
- __entry->ino = XFS_I(inode)->i_ino;
- __entry->pgoff = page_offset(page);
- __entry->size = i_size_read(inode);
- __entry->offset = off;
- __entry->length = len;
- ),
- TP_printk("dev %d:%d ino 0x%llx pgoff 0x%lx size 0x%llx offset %lx "
- "length %x",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- __entry->ino,
- __entry->pgoff,
- __entry->size,
- __entry->offset,
- __entry->length)
-)
-
-#define DEFINE_PAGE_EVENT(name) \
-DEFINE_EVENT(xfs_page_class, name, \
- TP_PROTO(struct inode *inode, struct page *page, unsigned long off, \
- unsigned int len), \
- TP_ARGS(inode, page, off, len))
-DEFINE_PAGE_EVENT(xfs_writepage);
-DEFINE_PAGE_EVENT(xfs_releasepage);
-DEFINE_PAGE_EVENT(xfs_invalidatepage);
-
-DECLARE_EVENT_CLASS(xfs_readpage_class,
- TP_PROTO(struct inode *inode, int nr_pages),
- TP_ARGS(inode, nr_pages),
- TP_STRUCT__entry(
- __field(dev_t, dev)
- __field(xfs_ino_t, ino)
- __field(int, nr_pages)
- ),
- TP_fast_assign(
- __entry->dev = inode->i_sb->s_dev;
- __entry->ino = inode->i_ino;
- __entry->nr_pages = nr_pages;
- ),
- TP_printk("dev %d:%d ino 0x%llx nr_pages %d",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- __entry->ino,
- __entry->nr_pages)
-)
-
-#define DEFINE_READPAGE_EVENT(name) \
-DEFINE_EVENT(xfs_readpage_class, name, \
- TP_PROTO(struct inode *inode, int nr_pages), \
- TP_ARGS(inode, nr_pages))
-DEFINE_READPAGE_EVENT(xfs_vm_readpage);
-DEFINE_READPAGE_EVENT(xfs_vm_readpages);
-
DECLARE_EVENT_CLASS(xfs_imap_class,
TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count,
int whichfork, struct xfs_bmbt_irec *irec),
@@ -1642,8 +1577,11 @@ DEFINE_ALLOC_EVENT(xfs_alloc_exact_notfound);
DEFINE_ALLOC_EVENT(xfs_alloc_exact_error);
DEFINE_ALLOC_EVENT(xfs_alloc_near_nominleft);
DEFINE_ALLOC_EVENT(xfs_alloc_near_first);
-DEFINE_ALLOC_EVENT(xfs_alloc_near_greater);
-DEFINE_ALLOC_EVENT(xfs_alloc_near_lesser);
+DEFINE_ALLOC_EVENT(xfs_alloc_cur);
+DEFINE_ALLOC_EVENT(xfs_alloc_cur_right);
+DEFINE_ALLOC_EVENT(xfs_alloc_cur_left);
+DEFINE_ALLOC_EVENT(xfs_alloc_cur_lookup);
+DEFINE_ALLOC_EVENT(xfs_alloc_cur_lookup_done);
DEFINE_ALLOC_EVENT(xfs_alloc_near_error);
DEFINE_ALLOC_EVENT(xfs_alloc_near_noentry);
DEFINE_ALLOC_EVENT(xfs_alloc_near_busy);
@@ -1663,6 +1601,32 @@ DEFINE_ALLOC_EVENT(xfs_alloc_vextent_noagbp);
DEFINE_ALLOC_EVENT(xfs_alloc_vextent_loopfailed);
DEFINE_ALLOC_EVENT(xfs_alloc_vextent_allfailed);
+TRACE_EVENT(xfs_alloc_cur_check,
+ TP_PROTO(struct xfs_mount *mp, xfs_btnum_t btnum, xfs_agblock_t bno,
+ xfs_extlen_t len, xfs_extlen_t diff, bool new),
+ TP_ARGS(mp, btnum, bno, len, diff, new),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(xfs_btnum_t, btnum)
+ __field(xfs_agblock_t, bno)
+ __field(xfs_extlen_t, len)
+ __field(xfs_extlen_t, diff)
+ __field(bool, new)
+ ),
+ TP_fast_assign(
+ __entry->dev = mp->m_super->s_dev;
+ __entry->btnum = btnum;
+ __entry->bno = bno;
+ __entry->len = len;
+ __entry->diff = diff;
+ __entry->new = new;
+ ),
+ TP_printk("dev %d:%d btree %s bno 0x%x len 0x%x diff 0x%x new %d",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __print_symbolic(__entry->btnum, XFS_BTNUM_STRINGS),
+ __entry->bno, __entry->len, __entry->diff, __entry->new)
+)
+
DECLARE_EVENT_CLASS(xfs_da_class,
TP_PROTO(struct xfs_da_args *args),
TP_ARGS(args),
diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
index f4795fdb7389..3b208f9a865c 100644
--- a/fs/xfs/xfs_trans.c
+++ b/fs/xfs/xfs_trans.c
@@ -71,7 +71,7 @@ xfs_trans_free(
if (!(tp->t_flags & XFS_TRANS_NO_WRITECOUNT))
sb_end_intwrite(tp->t_mountp->m_super);
xfs_trans_free_dqinfo(tp);
- kmem_zone_free(xfs_trans_zone, tp);
+ kmem_cache_free(xfs_trans_zone, tp);
}
/*
diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c
index 6ccfd75d3c24..00cc5b8734be 100644
--- a/fs/xfs/xfs_trans_ail.c
+++ b/fs/xfs/xfs_trans_ail.c
@@ -427,15 +427,15 @@ xfsaild_push(
case XFS_ITEM_FLUSHING:
/*
- * The item or its backing buffer is already beeing
+ * The item or its backing buffer is already being
* flushed. The typical reason for that is that an
* inode buffer is locked because we already pushed the
* updates to it as part of inode clustering.
*
* We do not want to to stop flushing just because lots
- * of items are already beeing flushed, but we need to
+ * of items are already being flushed, but we need to
* re-try the flushing relatively soon if most of the
- * AIL is beeing flushed.
+ * AIL is being flushed.
*/
XFS_STATS_INC(mp, xs_push_ail_flushing);
trace_xfs_ail_flushing(lip);
@@ -612,7 +612,7 @@ xfsaild(
* The push is run asynchronously in a workqueue, which means the caller needs
* to handle waiting on the async flush for space to become available.
* We don't want to interrupt any push that is in progress, hence we only queue
- * work if we set the pushing bit approriately.
+ * work if we set the pushing bit appropriately.
*
* We do this unlocked - we only need to know whether there is anything in the
* AIL at the time we are called. We don't need to access the contents of
@@ -836,7 +836,7 @@ xfs_trans_ail_init(
init_waitqueue_head(&ailp->ail_empty);
ailp->ail_task = kthread_run(xfsaild, ailp, "xfsaild/%s",
- ailp->ail_mount->m_fsname);
+ ailp->ail_mount->m_super->s_id);
if (IS_ERR(ailp->ail_task))
goto out_free_ailp;
diff --git a/fs/xfs/xfs_trans_dquot.c b/fs/xfs/xfs_trans_dquot.c
index 16457465833b..a6fe2d8dc40f 100644
--- a/fs/xfs/xfs_trans_dquot.c
+++ b/fs/xfs/xfs_trans_dquot.c
@@ -25,8 +25,8 @@ STATIC void xfs_trans_alloc_dqinfo(xfs_trans_t *);
*/
void
xfs_trans_dqjoin(
- xfs_trans_t *tp,
- xfs_dquot_t *dqp)
+ struct xfs_trans *tp,
+ struct xfs_dquot *dqp)
{
ASSERT(XFS_DQ_IS_LOCKED(dqp));
ASSERT(dqp->q_logitem.qli_dquot == dqp);
@@ -49,8 +49,8 @@ xfs_trans_dqjoin(
*/
void
xfs_trans_log_dquot(
- xfs_trans_t *tp,
- xfs_dquot_t *dqp)
+ struct xfs_trans *tp,
+ struct xfs_dquot *dqp)
{
ASSERT(XFS_DQ_IS_LOCKED(dqp));
@@ -486,12 +486,12 @@ xfs_trans_apply_dquot_deltas(
*/
void
xfs_trans_unreserve_and_mod_dquots(
- xfs_trans_t *tp)
+ struct xfs_trans *tp)
{
int i, j;
- xfs_dquot_t *dqp;
+ struct xfs_dquot *dqp;
struct xfs_dqtrx *qtrx, *qa;
- bool locked;
+ bool locked;
if (!tp->t_dqinfo || !(tp->t_flags & XFS_TRANS_DQ_DIRTY))
return;
@@ -571,21 +571,21 @@ xfs_quota_warn(
*/
STATIC int
xfs_trans_dqresv(
- xfs_trans_t *tp,
- xfs_mount_t *mp,
- xfs_dquot_t *dqp,
- int64_t nblks,
- long ninos,
- uint flags)
+ struct xfs_trans *tp,
+ struct xfs_mount *mp,
+ struct xfs_dquot *dqp,
+ int64_t nblks,
+ long ninos,
+ uint flags)
{
- xfs_qcnt_t hardlimit;
- xfs_qcnt_t softlimit;
- time_t timer;
- xfs_qwarncnt_t warns;
- xfs_qwarncnt_t warnlimit;
- xfs_qcnt_t total_count;
- xfs_qcnt_t *resbcountp;
- xfs_quotainfo_t *q = mp->m_quotainfo;
+ xfs_qcnt_t hardlimit;
+ xfs_qcnt_t softlimit;
+ time_t timer;
+ xfs_qwarncnt_t warns;
+ xfs_qwarncnt_t warnlimit;
+ xfs_qcnt_t total_count;
+ xfs_qcnt_t *resbcountp;
+ struct xfs_quotainfo *q = mp->m_quotainfo;
struct xfs_def_quota *defq;
@@ -824,13 +824,13 @@ xfs_trans_reserve_quota_nblks(
/*
* This routine is called to allocate a quotaoff log item.
*/
-xfs_qoff_logitem_t *
+struct xfs_qoff_logitem *
xfs_trans_get_qoff_item(
- xfs_trans_t *tp,
- xfs_qoff_logitem_t *startqoff,
+ struct xfs_trans *tp,
+ struct xfs_qoff_logitem *startqoff,
uint flags)
{
- xfs_qoff_logitem_t *q;
+ struct xfs_qoff_logitem *q;
ASSERT(tp != NULL);
@@ -852,8 +852,8 @@ xfs_trans_get_qoff_item(
*/
void
xfs_trans_log_quotaoff_item(
- xfs_trans_t *tp,
- xfs_qoff_logitem_t *qlp)
+ struct xfs_trans *tp,
+ struct xfs_qoff_logitem *qlp)
{
tp->t_flags |= XFS_TRANS_DIRTY;
set_bit(XFS_LI_DIRTY, &qlp->qql_item.li_flags);
@@ -872,6 +872,6 @@ xfs_trans_free_dqinfo(
{
if (!tp->t_dqinfo)
return;
- kmem_zone_free(xfs_qm_dqtrxzone, tp->t_dqinfo);
+ kmem_cache_free(xfs_qm_dqtrxzone, tp->t_dqinfo);
tp->t_dqinfo = NULL;
}
diff --git a/fs/xfs/xfs_xattr.c b/fs/xfs/xfs_xattr.c
index cb895b1df5e4..383f0203d103 100644
--- a/fs/xfs/xfs_xattr.c
+++ b/fs/xfs/xfs_xattr.c
@@ -11,6 +11,7 @@
#include "xfs_da_format.h"
#include "xfs_inode.h"
#include "xfs_attr.h"
+#include "xfs_acl.h"
#include <linux/posix_acl_xattr.h>
#include <linux/xattr.h>
diff --git a/include/Kbuild b/include/Kbuild
deleted file mode 100644
index 25edc43483e0..000000000000
--- a/include/Kbuild
+++ /dev/null
@@ -1,1187 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-
-# Add header-test-$(CONFIG_...) guard to headers that are only compiled
-# for particular architectures.
-#
-# Headers listed in header-test- are excluded from the test coverage.
-# Many headers are excluded for now because they fail to build. Please
-# consider to fix headers first before adding new ones to the blacklist.
-#
-# Sorted alphabetically.
-header-test- += acpi/acbuffer.h
-header-test- += acpi/acpi.h
-header-test- += acpi/acpi_bus.h
-header-test- += acpi/acpi_drivers.h
-header-test- += acpi/acpi_io.h
-header-test- += acpi/acpi_lpat.h
-header-test- += acpi/acpiosxf.h
-header-test- += acpi/acpixf.h
-header-test- += acpi/acrestyp.h
-header-test- += acpi/actbl.h
-header-test- += acpi/actbl1.h
-header-test- += acpi/actbl2.h
-header-test- += acpi/actbl3.h
-header-test- += acpi/actypes.h
-header-test- += acpi/battery.h
-header-test- += acpi/cppc_acpi.h
-header-test- += acpi/nfit.h
-header-test- += acpi/platform/acenv.h
-header-test- += acpi/platform/acenvex.h
-header-test- += acpi/platform/acintel.h
-header-test- += acpi/platform/aclinux.h
-header-test- += acpi/platform/aclinuxex.h
-header-test- += acpi/processor.h
-header-test-$(CONFIG_X86) += clocksource/hyperv_timer.h
-header-test- += clocksource/timer-sp804.h
-header-test- += crypto/cast_common.h
-header-test- += crypto/internal/cryptouser.h
-header-test- += crypto/pkcs7.h
-header-test- += crypto/poly1305.h
-header-test- += crypto/sha3.h
-header-test- += drm/ati_pcigart.h
-header-test- += drm/bridge/dw_hdmi.h
-header-test- += drm/bridge/dw_mipi_dsi.h
-header-test- += drm/drm_audio_component.h
-header-test- += drm/drm_auth.h
-header-test- += drm/drm_debugfs.h
-header-test- += drm/drm_debugfs_crc.h
-header-test- += drm/drm_displayid.h
-header-test- += drm/drm_encoder_slave.h
-header-test- += drm/drm_fb_cma_helper.h
-header-test- += drm/drm_fb_helper.h
-header-test- += drm/drm_fixed.h
-header-test- += drm/drm_format_helper.h
-header-test- += drm/drm_lease.h
-header-test- += drm/drm_legacy.h
-header-test- += drm/drm_panel.h
-header-test- += drm/drm_plane_helper.h
-header-test- += drm/drm_rect.h
-header-test- += drm/i915_component.h
-header-test- += drm/intel-gtt.h
-header-test- += drm/tinydrm/tinydrm-helpers.h
-header-test- += drm/ttm/ttm_debug.h
-header-test- += keys/asymmetric-parser.h
-header-test- += keys/asymmetric-subtype.h
-header-test- += keys/asymmetric-type.h
-header-test- += keys/big_key-type.h
-header-test- += keys/request_key_auth-type.h
-header-test- += kvm/arm_arch_timer.h
-header-test-$(CONFIG_ARM) += kvm/arm_hypercalls.h
-header-test-$(CONFIG_ARM64) += kvm/arm_hypercalls.h
-header-test- += kvm/arm_pmu.h
-header-test-$(CONFIG_ARM) += kvm/arm_psci.h
-header-test-$(CONFIG_ARM64) += kvm/arm_psci.h
-header-test- += kvm/arm_vgic.h
-header-test- += linux/8250_pci.h
-header-test- += linux/a.out.h
-header-test- += linux/adxl.h
-header-test- += linux/agpgart.h
-header-test- += linux/alcor_pci.h
-header-test- += linux/amba/clcd.h
-header-test- += linux/amba/pl080.h
-header-test- += linux/amd-iommu.h
-header-test-$(CONFIG_ARM) += linux/arm-cci.h
-header-test-$(CONFIG_ARM64) += linux/arm-cci.h
-header-test- += linux/arm_sdei.h
-header-test- += linux/asn1_decoder.h
-header-test- += linux/ata_platform.h
-header-test- += linux/ath9k_platform.h
-header-test- += linux/atm_tcp.h
-header-test- += linux/atomic-fallback.h
-header-test- += linux/avf/virtchnl.h
-header-test- += linux/bcm47xx_sprom.h
-header-test- += linux/bcma/bcma_driver_gmac_cmn.h
-header-test- += linux/bcma/bcma_driver_mips.h
-header-test- += linux/bcma/bcma_driver_pci.h
-header-test- += linux/bcma/bcma_driver_pcie2.h
-header-test- += linux/bit_spinlock.h
-header-test- += linux/blk-mq-rdma.h
-header-test- += linux/blk-mq.h
-header-test- += linux/blktrace_api.h
-header-test- += linux/blockgroup_lock.h
-header-test- += linux/bma150.h
-header-test- += linux/bpf_lirc.h
-header-test- += linux/bpf_types.h
-header-test- += linux/bsg-lib.h
-header-test- += linux/bsg.h
-header-test- += linux/btf.h
-header-test- += linux/btree-128.h
-header-test- += linux/btree-type.h
-header-test-$(CONFIG_CPU_BIG_ENDIAN) += linux/byteorder/big_endian.h
-header-test- += linux/byteorder/generic.h
-header-test-$(CONFIG_CPU_LITTLE_ENDIAN) += linux/byteorder/little_endian.h
-header-test- += linux/c2port.h
-header-test- += linux/can/dev/peak_canfd.h
-header-test- += linux/can/platform/cc770.h
-header-test- += linux/can/platform/sja1000.h
-header-test- += linux/ceph/ceph_features.h
-header-test- += linux/ceph/ceph_frag.h
-header-test- += linux/ceph/ceph_fs.h
-header-test- += linux/ceph/debugfs.h
-header-test- += linux/ceph/msgr.h
-header-test- += linux/ceph/rados.h
-header-test- += linux/cgroup_subsys.h
-header-test- += linux/clk/sunxi-ng.h
-header-test- += linux/clk/ti.h
-header-test- += linux/cn_proc.h
-header-test- += linux/coda_psdev.h
-header-test- += linux/compaction.h
-header-test- += linux/console_struct.h
-header-test- += linux/count_zeros.h
-header-test- += linux/cs5535.h
-header-test- += linux/cuda.h
-header-test- += linux/cyclades.h
-header-test- += linux/dcookies.h
-header-test- += linux/delayacct.h
-header-test- += linux/delayed_call.h
-header-test- += linux/device-mapper.h
-header-test- += linux/devpts_fs.h
-header-test- += linux/dio.h
-header-test- += linux/dirent.h
-header-test- += linux/dlm_plock.h
-header-test- += linux/dm-dirty-log.h
-header-test- += linux/dm-region-hash.h
-header-test- += linux/dma-debug.h
-header-test- += linux/dma/mmp-pdma.h
-header-test- += linux/dma/sprd-dma.h
-header-test- += linux/dns_resolver.h
-header-test- += linux/drbd_genl.h
-header-test- += linux/drbd_genl_api.h
-header-test- += linux/dw_apb_timer.h
-header-test- += linux/dynamic_debug.h
-header-test- += linux/dynamic_queue_limits.h
-header-test- += linux/ecryptfs.h
-header-test- += linux/edma.h
-header-test- += linux/eeprom_93cx6.h
-header-test- += linux/efs_vh.h
-header-test- += linux/elevator.h
-header-test- += linux/elfcore-compat.h
-header-test- += linux/error-injection.h
-header-test- += linux/errseq.h
-header-test- += linux/eventpoll.h
-header-test- += linux/ext2_fs.h
-header-test- += linux/f75375s.h
-header-test- += linux/falloc.h
-header-test- += linux/fault-inject.h
-header-test- += linux/fbcon.h
-header-test- += linux/firmware/intel/stratix10-svc-client.h
-header-test- += linux/firmware/meson/meson_sm.h
-header-test- += linux/firmware/trusted_foundations.h
-header-test- += linux/firmware/xlnx-zynqmp.h
-header-test- += linux/fixp-arith.h
-header-test- += linux/flat.h
-header-test- += linux/fs_types.h
-header-test- += linux/fs_uart_pd.h
-header-test- += linux/fsi-occ.h
-header-test- += linux/fsi-sbefifo.h
-header-test- += linux/fsl/bestcomm/ata.h
-header-test- += linux/fsl/bestcomm/bestcomm.h
-header-test- += linux/fsl/bestcomm/bestcomm_priv.h
-header-test- += linux/fsl/bestcomm/fec.h
-header-test- += linux/fsl/bestcomm/gen_bd.h
-header-test- += linux/fsl/bestcomm/sram.h
-header-test- += linux/fsl_hypervisor.h
-header-test- += linux/fsldma.h
-header-test- += linux/ftrace_irq.h
-header-test- += linux/gameport.h
-header-test- += linux/genl_magic_func.h
-header-test- += linux/genl_magic_struct.h
-header-test- += linux/gpio/aspeed.h
-header-test- += linux/gpio/gpio-reg.h
-header-test- += linux/hid-debug.h
-header-test- += linux/hiddev.h
-header-test- += linux/hippidevice.h
-header-test- += linux/hmm.h
-header-test- += linux/hp_sdc.h
-header-test- += linux/huge_mm.h
-header-test- += linux/hugetlb_cgroup.h
-header-test- += linux/hugetlb_inline.h
-header-test- += linux/hwmon-vid.h
-header-test- += linux/hyperv.h
-header-test- += linux/i2c-algo-pca.h
-header-test- += linux/i2c-algo-pcf.h
-header-test- += linux/i3c/ccc.h
-header-test- += linux/i3c/device.h
-header-test- += linux/i3c/master.h
-header-test- += linux/i8042.h
-header-test- += linux/ide.h
-header-test- += linux/idle_inject.h
-header-test- += linux/if_frad.h
-header-test- += linux/if_rmnet.h
-header-test- += linux/if_tap.h
-header-test- += linux/iio/accel/kxcjk_1013.h
-header-test- += linux/iio/adc/ad_sigma_delta.h
-header-test- += linux/iio/buffer-dma.h
-header-test- += linux/iio/buffer_impl.h
-header-test- += linux/iio/common/st_sensors.h
-header-test- += linux/iio/common/st_sensors_i2c.h
-header-test- += linux/iio/common/st_sensors_spi.h
-header-test- += linux/iio/dac/ad5421.h
-header-test- += linux/iio/dac/ad5504.h
-header-test- += linux/iio/dac/ad5791.h
-header-test- += linux/iio/dac/max517.h
-header-test- += linux/iio/dac/mcp4725.h
-header-test- += linux/iio/frequency/ad9523.h
-header-test- += linux/iio/frequency/adf4350.h
-header-test- += linux/iio/hw-consumer.h
-header-test- += linux/iio/imu/adis.h
-header-test- += linux/iio/sysfs.h
-header-test- += linux/iio/timer/stm32-timer-trigger.h
-header-test- += linux/iio/trigger.h
-header-test- += linux/iio/triggered_event.h
-header-test- += linux/imx-media.h
-header-test- += linux/inet_diag.h
-header-test- += linux/init_ohci1394_dma.h
-header-test- += linux/initrd.h
-header-test- += linux/input/adp5589.h
-header-test- += linux/input/bu21013.h
-header-test- += linux/input/cma3000.h
-header-test- += linux/input/kxtj9.h
-header-test- += linux/input/lm8333.h
-header-test- += linux/input/sparse-keymap.h
-header-test- += linux/input/touchscreen.h
-header-test- += linux/input/tps6507x-ts.h
-header-test-$(CONFIG_X86) += linux/intel-iommu.h
-header-test- += linux/intel-ish-client-if.h
-header-test- += linux/intel-pti.h
-header-test- += linux/intel-svm.h
-header-test- += linux/interconnect-provider.h
-header-test- += linux/ioc3.h
-header-test-$(CONFIG_BLOCK) += linux/iomap.h
-header-test- += linux/ipack.h
-header-test- += linux/irq_cpustat.h
-header-test- += linux/irq_poll.h
-header-test- += linux/irqchip/arm-gic-v3.h
-header-test- += linux/irqchip/arm-gic-v4.h
-header-test- += linux/irqchip/irq-madera.h
-header-test- += linux/irqchip/irq-sa11x0.h
-header-test- += linux/irqchip/mxs.h
-header-test- += linux/irqchip/versatile-fpga.h
-header-test- += linux/irqdesc.h
-header-test- += linux/irqflags.h
-header-test- += linux/iscsi_boot_sysfs.h
-header-test- += linux/isdn/capiutil.h
-header-test- += linux/isdn/hdlc.h
-header-test- += linux/isdn_ppp.h
-header-test- += linux/jbd2.h
-header-test- += linux/jump_label.h
-header-test- += linux/jump_label_ratelimit.h
-header-test- += linux/jz4740-adc.h
-header-test- += linux/kasan.h
-header-test- += linux/kcore.h
-header-test- += linux/kdev_t.h
-header-test- += linux/kernelcapi.h
-header-test- += linux/khugepaged.h
-header-test- += linux/kobj_map.h
-header-test- += linux/kobject_ns.h
-header-test- += linux/kvm_host.h
-header-test- += linux/kvm_irqfd.h
-header-test- += linux/kvm_para.h
-header-test- += linux/lantiq.h
-header-test- += linux/lapb.h
-header-test- += linux/latencytop.h
-header-test- += linux/led-lm3530.h
-header-test- += linux/leds-bd2802.h
-header-test- += linux/leds-lp3944.h
-header-test- += linux/leds-lp3952.h
-header-test- += linux/leds_pwm.h
-header-test- += linux/libata.h
-header-test- += linux/license.h
-header-test- += linux/lightnvm.h
-header-test- += linux/lis3lv02d.h
-header-test- += linux/list_bl.h
-header-test- += linux/list_lru.h
-header-test- += linux/list_nulls.h
-header-test- += linux/lockd/share.h
-header-test- += linux/lzo.h
-header-test- += linux/mailbox/zynqmp-ipi-message.h
-header-test- += linux/maple.h
-header-test- += linux/mbcache.h
-header-test- += linux/mbus.h
-header-test- += linux/mc146818rtc.h
-header-test- += linux/mc6821.h
-header-test- += linux/mdev.h
-header-test- += linux/mem_encrypt.h
-header-test- += linux/memfd.h
-header-test- += linux/mfd/88pm80x.h
-header-test- += linux/mfd/88pm860x.h
-header-test- += linux/mfd/abx500/ab8500-bm.h
-header-test- += linux/mfd/abx500/ab8500-gpadc.h
-header-test- += linux/mfd/adp5520.h
-header-test- += linux/mfd/arizona/pdata.h
-header-test- += linux/mfd/as3711.h
-header-test- += linux/mfd/as3722.h
-header-test- += linux/mfd/da903x.h
-header-test- += linux/mfd/da9055/pdata.h
-header-test- += linux/mfd/db8500-prcmu.h
-header-test- += linux/mfd/dbx500-prcmu.h
-header-test- += linux/mfd/dln2.h
-header-test- += linux/mfd/dm355evm_msp.h
-header-test- += linux/mfd/ds1wm.h
-header-test- += linux/mfd/ezx-pcap.h
-header-test- += linux/mfd/intel_msic.h
-header-test- += linux/mfd/janz.h
-header-test- += linux/mfd/kempld.h
-header-test- += linux/mfd/lm3533.h
-header-test- += linux/mfd/lp8788-isink.h
-header-test- += linux/mfd/lpc_ich.h
-header-test- += linux/mfd/max77693.h
-header-test- += linux/mfd/max8998-private.h
-header-test- += linux/mfd/menelaus.h
-header-test- += linux/mfd/mt6397/core.h
-header-test- += linux/mfd/palmas.h
-header-test- += linux/mfd/pcf50633/backlight.h
-header-test- += linux/mfd/rc5t583.h
-header-test- += linux/mfd/retu.h
-header-test- += linux/mfd/samsung/core.h
-header-test- += linux/mfd/si476x-platform.h
-header-test- += linux/mfd/si476x-reports.h
-header-test- += linux/mfd/sky81452.h
-header-test- += linux/mfd/smsc.h
-header-test- += linux/mfd/sta2x11-mfd.h
-header-test- += linux/mfd/stmfx.h
-header-test- += linux/mfd/tc3589x.h
-header-test- += linux/mfd/tc6387xb.h
-header-test- += linux/mfd/tc6393xb.h
-header-test- += linux/mfd/tps65090.h
-header-test- += linux/mfd/tps6586x.h
-header-test- += linux/mfd/tps65910.h
-header-test- += linux/mfd/tps80031.h
-header-test- += linux/mfd/ucb1x00.h
-header-test- += linux/mfd/viperboard.h
-header-test- += linux/mfd/wm831x/core.h
-header-test- += linux/mfd/wm831x/otp.h
-header-test- += linux/mfd/wm831x/pdata.h
-header-test- += linux/mfd/wm8994/core.h
-header-test- += linux/mfd/wm8994/pdata.h
-header-test- += linux/mlx4/doorbell.h
-header-test- += linux/mlx4/srq.h
-header-test- += linux/mlx5/doorbell.h
-header-test- += linux/mlx5/eq.h
-header-test- += linux/mlx5/fs_helpers.h
-header-test- += linux/mlx5/mlx5_ifc.h
-header-test- += linux/mlx5/mlx5_ifc_fpga.h
-header-test- += linux/mm-arch-hooks.h
-header-test- += linux/mm_inline.h
-header-test- += linux/mmu_context.h
-header-test- += linux/mpage.h
-header-test- += linux/mtd/bbm.h
-header-test- += linux/mtd/cfi.h
-header-test- += linux/mtd/doc2000.h
-header-test- += linux/mtd/flashchip.h
-header-test- += linux/mtd/ftl.h
-header-test- += linux/mtd/gen_probe.h
-header-test- += linux/mtd/jedec.h
-header-test- += linux/mtd/nand_bch.h
-header-test- += linux/mtd/nand_ecc.h
-header-test- += linux/mtd/ndfc.h
-header-test- += linux/mtd/onenand.h
-header-test- += linux/mtd/pismo.h
-header-test- += linux/mtd/plat-ram.h
-header-test- += linux/mtd/spi-nor.h
-header-test- += linux/mv643xx.h
-header-test- += linux/mv643xx_eth.h
-header-test- += linux/mvebu-pmsu.h
-header-test- += linux/mxm-wmi.h
-header-test- += linux/n_r3964.h
-header-test- += linux/ndctl.h
-header-test- += linux/nfs.h
-header-test- += linux/nfs_fs_i.h
-header-test- += linux/nfs_fs_sb.h
-header-test- += linux/nfs_page.h
-header-test- += linux/nfs_xdr.h
-header-test- += linux/nfsacl.h
-header-test- += linux/nl802154.h
-header-test- += linux/ns_common.h
-header-test- += linux/nsc_gpio.h
-header-test- += linux/ntb_transport.h
-header-test- += linux/nubus.h
-header-test- += linux/nvme-fc-driver.h
-header-test- += linux/nvme-fc.h
-header-test- += linux/nvme-rdma.h
-header-test- += linux/nvram.h
-header-test- += linux/objagg.h
-header-test- += linux/of_clk.h
-header-test- += linux/of_net.h
-header-test- += linux/of_pdt.h
-header-test- += linux/olpc-ec.h
-header-test- += linux/omap-dma.h
-header-test- += linux/omap-dmaengine.h
-header-test- += linux/omap-gpmc.h
-header-test- += linux/omap-iommu.h
-header-test- += linux/omap-mailbox.h
-header-test- += linux/once.h
-header-test- += linux/osq_lock.h
-header-test- += linux/overflow.h
-header-test- += linux/page-flags-layout.h
-header-test- += linux/page-isolation.h
-header-test- += linux/page_ext.h
-header-test- += linux/page_owner.h
-header-test- += linux/parport_pc.h
-header-test- += linux/parser.h
-header-test- += linux/pci-acpi.h
-header-test- += linux/pci-dma-compat.h
-header-test- += linux/pci_hotplug.h
-header-test- += linux/pda_power.h
-header-test- += linux/perf/arm_pmu.h
-header-test- += linux/perf_regs.h
-header-test- += linux/phy/omap_control_phy.h
-header-test- += linux/phy/tegra/xusb.h
-header-test- += linux/phy/ulpi_phy.h
-header-test- += linux/phy_fixed.h
-header-test- += linux/pipe_fs_i.h
-header-test- += linux/pktcdvd.h
-header-test- += linux/pl320-ipc.h
-header-test- += linux/pl353-smc.h
-header-test- += linux/platform_data/ad5449.h
-header-test- += linux/platform_data/ad5755.h
-header-test- += linux/platform_data/ad7266.h
-header-test- += linux/platform_data/ad7291.h
-header-test- += linux/platform_data/ad7298.h
-header-test- += linux/platform_data/ad7303.h
-header-test- += linux/platform_data/ad7791.h
-header-test- += linux/platform_data/ad7793.h
-header-test- += linux/platform_data/ad7887.h
-header-test- += linux/platform_data/adau17x1.h
-header-test- += linux/platform_data/adp8870.h
-header-test- += linux/platform_data/ads1015.h
-header-test- += linux/platform_data/ads7828.h
-header-test- += linux/platform_data/apds990x.h
-header-test- += linux/platform_data/arm-ux500-pm.h
-header-test- += linux/platform_data/asoc-s3c.h
-header-test- += linux/platform_data/at91_adc.h
-header-test- += linux/platform_data/ata-pxa.h
-header-test- += linux/platform_data/atmel.h
-header-test- += linux/platform_data/bh1770glc.h
-header-test- += linux/platform_data/brcmfmac.h
-header-test- += linux/platform_data/cros_ec_commands.h
-header-test- += linux/platform_data/clk-u300.h
-header-test- += linux/platform_data/cyttsp4.h
-header-test- += linux/platform_data/dma-coh901318.h
-header-test- += linux/platform_data/dma-imx-sdma.h
-header-test- += linux/platform_data/dma-mcf-edma.h
-header-test- += linux/platform_data/dma-s3c24xx.h
-header-test- += linux/platform_data/dmtimer-omap.h
-header-test- += linux/platform_data/dsa.h
-header-test- += linux/platform_data/edma.h
-header-test- += linux/platform_data/elm.h
-header-test- += linux/platform_data/emif_plat.h
-header-test- += linux/platform_data/fsa9480.h
-header-test- += linux/platform_data/g762.h
-header-test- += linux/platform_data/gpio-ath79.h
-header-test- += linux/platform_data/gpio-davinci.h
-header-test- += linux/platform_data/gpio-dwapb.h
-header-test- += linux/platform_data/gpio-htc-egpio.h
-header-test- += linux/platform_data/gpmc-omap.h
-header-test- += linux/platform_data/hsmmc-omap.h
-header-test- += linux/platform_data/hwmon-s3c.h
-header-test- += linux/platform_data/i2c-davinci.h
-header-test- += linux/platform_data/i2c-imx.h
-header-test- += linux/platform_data/i2c-mux-reg.h
-header-test- += linux/platform_data/i2c-ocores.h
-header-test- += linux/platform_data/i2c-xiic.h
-header-test- += linux/platform_data/intel-spi.h
-header-test- += linux/platform_data/invensense_mpu6050.h
-header-test- += linux/platform_data/irda-pxaficp.h
-header-test- += linux/platform_data/irda-sa11x0.h
-header-test- += linux/platform_data/itco_wdt.h
-header-test- += linux/platform_data/jz4740/jz4740_nand.h
-header-test- += linux/platform_data/keyboard-pxa930_rotary.h
-header-test- += linux/platform_data/keypad-omap.h
-header-test- += linux/platform_data/leds-lp55xx.h
-header-test- += linux/platform_data/leds-omap.h
-header-test- += linux/platform_data/lp855x.h
-header-test- += linux/platform_data/lp8727.h
-header-test- += linux/platform_data/max197.h
-header-test- += linux/platform_data/max3421-hcd.h
-header-test- += linux/platform_data/max732x.h
-header-test- += linux/platform_data/mcs.h
-header-test- += linux/platform_data/mdio-bcm-unimac.h
-header-test- += linux/platform_data/mdio-gpio.h
-header-test- += linux/platform_data/media/si4713.h
-header-test- += linux/platform_data/mlxreg.h
-header-test- += linux/platform_data/mmc-omap.h
-header-test- += linux/platform_data/mmc-sdhci-s3c.h
-header-test- += linux/platform_data/mmp_audio.h
-header-test- += linux/platform_data/mtd-orion_nand.h
-header-test- += linux/platform_data/mv88e6xxx.h
-header-test- += linux/platform_data/net-cw1200.h
-header-test- += linux/platform_data/omap-twl4030.h
-header-test- += linux/platform_data/omapdss.h
-header-test- += linux/platform_data/pcf857x.h
-header-test- += linux/platform_data/pixcir_i2c_ts.h
-header-test- += linux/platform_data/pwm_omap_dmtimer.h
-header-test- += linux/platform_data/pxa2xx_udc.h
-header-test- += linux/platform_data/pxa_sdhci.h
-header-test- += linux/platform_data/remoteproc-omap.h
-header-test- += linux/platform_data/sa11x0-serial.h
-header-test- += linux/platform_data/sc18is602.h
-header-test- += linux/platform_data/sdhci-pic32.h
-header-test- += linux/platform_data/serial-sccnxp.h
-header-test- += linux/platform_data/sht3x.h
-header-test- += linux/platform_data/shtc1.h
-header-test- += linux/platform_data/si5351.h
-header-test- += linux/platform_data/sky81452-backlight.h
-header-test- += linux/platform_data/spi-davinci.h
-header-test- += linux/platform_data/spi-ep93xx.h
-header-test- += linux/platform_data/spi-mt65xx.h
-header-test- += linux/platform_data/st_sensors_pdata.h
-header-test- += linux/platform_data/ti-sysc.h
-header-test- += linux/platform_data/timer-ixp4xx.h
-header-test- += linux/platform_data/touchscreen-s3c2410.h
-header-test- += linux/platform_data/tsc2007.h
-header-test- += linux/platform_data/tsl2772.h
-header-test- += linux/platform_data/uio_pruss.h
-header-test- += linux/platform_data/usb-davinci.h
-header-test- += linux/platform_data/usb-ehci-mxc.h
-header-test- += linux/platform_data/usb-ehci-orion.h
-header-test- += linux/platform_data/usb-mx2.h
-header-test- += linux/platform_data/usb-ohci-s3c2410.h
-header-test- += linux/platform_data/usb-omap.h
-header-test- += linux/platform_data/usb-s3c2410_udc.h
-header-test- += linux/platform_data/usb3503.h
-header-test- += linux/platform_data/ux500_wdt.h
-header-test- += linux/platform_data/video-clcd-versatile.h
-header-test- += linux/platform_data/video-imxfb.h
-header-test- += linux/platform_data/video-pxafb.h
-header-test- += linux/platform_data/video_s3c.h
-header-test- += linux/platform_data/voltage-omap.h
-header-test- += linux/platform_data/x86/apple.h
-header-test- += linux/platform_data/x86/clk-pmc-atom.h
-header-test- += linux/platform_data/x86/pmc_atom.h
-header-test- += linux/platform_data/xtalk-bridge.h
-header-test- += linux/pm2301_charger.h
-header-test- += linux/pm_wakeirq.h
-header-test- += linux/pm_wakeup.h
-header-test- += linux/pmbus.h
-header-test- += linux/pmu.h
-header-test- += linux/posix_acl.h
-header-test- += linux/posix_acl_xattr.h
-header-test- += linux/power/ab8500.h
-header-test- += linux/power/bq27xxx_battery.h
-header-test- += linux/power/generic-adc-battery.h
-header-test- += linux/power/jz4740-battery.h
-header-test- += linux/power/max17042_battery.h
-header-test- += linux/power/max8903_charger.h
-header-test- += linux/ppp-comp.h
-header-test- += linux/pps-gpio.h
-header-test- += linux/pr.h
-header-test- += linux/proc_ns.h
-header-test- += linux/processor.h
-header-test- += linux/psi.h
-header-test- += linux/psp-sev.h
-header-test- += linux/pstore.h
-header-test- += linux/ptr_ring.h
-header-test- += linux/ptrace.h
-header-test- += linux/qcom-geni-se.h
-header-test- += linux/qed/eth_common.h
-header-test- += linux/qed/fcoe_common.h
-header-test- += linux/qed/iscsi_common.h
-header-test- += linux/qed/iwarp_common.h
-header-test- += linux/qed/qed_eth_if.h
-header-test- += linux/qed/qed_fcoe_if.h
-header-test- += linux/qed/rdma_common.h
-header-test- += linux/qed/storage_common.h
-header-test- += linux/qed/tcp_common.h
-header-test- += linux/qnx6_fs.h
-header-test- += linux/quicklist.h
-header-test- += linux/ramfs.h
-header-test- += linux/range.h
-header-test- += linux/rcu_node_tree.h
-header-test- += linux/rculist_bl.h
-header-test- += linux/rculist_nulls.h
-header-test- += linux/rcutiny.h
-header-test- += linux/rcutree.h
-header-test- += linux/reboot-mode.h
-header-test- += linux/regulator/fixed.h
-header-test- += linux/regulator/gpio-regulator.h
-header-test- += linux/regulator/max8973-regulator.h
-header-test- += linux/regulator/of_regulator.h
-header-test- += linux/regulator/tps51632-regulator.h
-header-test- += linux/regulator/tps62360.h
-header-test- += linux/regulator/tps6507x.h
-header-test- += linux/regulator/userspace-consumer.h
-header-test- += linux/remoteproc/st_slim_rproc.h
-header-test- += linux/reset/socfpga.h
-header-test- += linux/reset/sunxi.h
-header-test- += linux/rtc/m48t59.h
-header-test- += linux/rtc/rtc-omap.h
-header-test- += linux/rtc/sirfsoc_rtciobrg.h
-header-test- += linux/rwlock.h
-header-test- += linux/rwlock_types.h
-header-test- += linux/scc.h
-header-test- += linux/sched/deadline.h
-header-test- += linux/sched/smt.h
-header-test- += linux/sched/sysctl.h
-header-test- += linux/sched_clock.h
-header-test- += linux/scpi_protocol.h
-header-test- += linux/scx200_gpio.h
-header-test- += linux/seccomp.h
-header-test- += linux/sed-opal.h
-header-test- += linux/seg6_iptunnel.h
-header-test- += linux/selection.h
-header-test- += linux/set_memory.h
-header-test- += linux/shrinker.h
-header-test- += linux/sirfsoc_dma.h
-header-test- += linux/skb_array.h
-header-test- += linux/slab_def.h
-header-test- += linux/slub_def.h
-header-test- += linux/sm501.h
-header-test- += linux/smc91x.h
-header-test- += linux/static_key.h
-header-test- += linux/soc/actions/owl-sps.h
-header-test- += linux/soc/amlogic/meson-canvas.h
-header-test- += linux/soc/brcmstb/brcmstb.h
-header-test- += linux/soc/ixp4xx/npe.h
-header-test- += linux/soc/mediatek/infracfg.h
-header-test- += linux/soc/qcom/smd-rpm.h
-header-test- += linux/soc/qcom/smem.h
-header-test- += linux/soc/qcom/smem_state.h
-header-test- += linux/soc/qcom/wcnss_ctrl.h
-header-test- += linux/soc/renesas/rcar-rst.h
-header-test- += linux/soc/samsung/exynos-pmu.h
-header-test- += linux/soc/sunxi/sunxi_sram.h
-header-test- += linux/soc/ti/ti-msgmgr.h
-header-test- += linux/soc/ti/ti_sci_inta_msi.h
-header-test- += linux/soc/ti/ti_sci_protocol.h
-header-test- += linux/soundwire/sdw.h
-header-test- += linux/soundwire/sdw_intel.h
-header-test- += linux/soundwire/sdw_type.h
-header-test- += linux/spi/ad7877.h
-header-test- += linux/spi/ads7846.h
-header-test- += linux/spi/at86rf230.h
-header-test- += linux/spi/ds1305.h
-header-test- += linux/spi/libertas_spi.h
-header-test- += linux/spi/lms283gf05.h
-header-test- += linux/spi/max7301.h
-header-test- += linux/spi/mcp23s08.h
-header-test- += linux/spi/rspi.h
-header-test- += linux/spi/s3c24xx.h
-header-test- += linux/spi/sh_msiof.h
-header-test- += linux/spi/spi-fsl-dspi.h
-header-test- += linux/spi/spi_bitbang.h
-header-test- += linux/spi/spi_gpio.h
-header-test- += linux/spi/xilinx_spi.h
-header-test- += linux/spinlock_api_smp.h
-header-test- += linux/spinlock_api_up.h
-header-test- += linux/spinlock_types.h
-header-test- += linux/splice.h
-header-test- += linux/sram.h
-header-test- += linux/srcutiny.h
-header-test- += linux/srcutree.h
-header-test- += linux/ssb/ssb_driver_chipcommon.h
-header-test- += linux/ssb/ssb_driver_extif.h
-header-test- += linux/ssb/ssb_driver_mips.h
-header-test- += linux/ssb/ssb_driver_pci.h
-header-test- += linux/ssbi.h
-header-test- += linux/stackdepot.h
-header-test- += linux/stmp3xxx_rtc_wdt.h
-header-test- += linux/string_helpers.h
-header-test- += linux/sungem_phy.h
-header-test- += linux/sunrpc/msg_prot.h
-header-test- += linux/sunrpc/rpc_pipe_fs.h
-header-test- += linux/sunrpc/xprtmultipath.h
-header-test- += linux/sunrpc/xprtsock.h
-header-test- += linux/sunxi-rsb.h
-header-test- += linux/svga.h
-header-test- += linux/sw842.h
-header-test- += linux/swapfile.h
-header-test- += linux/swapops.h
-header-test- += linux/swiotlb.h
-header-test- += linux/sysv_fs.h
-header-test- += linux/t10-pi.h
-header-test- += linux/task_io_accounting.h
-header-test- += linux/tick.h
-header-test- += linux/timb_dma.h
-header-test- += linux/timekeeping.h
-header-test- += linux/timekeeping32.h
-header-test- += linux/ts-nbus.h
-header-test- += linux/tsacct_kern.h
-header-test- += linux/tty_flip.h
-header-test- += linux/tty_ldisc.h
-header-test- += linux/ucb1400.h
-header-test- += linux/usb/association.h
-header-test- += linux/usb/cdc-wdm.h
-header-test- += linux/usb/cdc_ncm.h
-header-test- += linux/usb/ezusb.h
-header-test- += linux/usb/gadget_configfs.h
-header-test- += linux/usb/gpio_vbus.h
-header-test- += linux/usb/hcd.h
-header-test- += linux/usb/iowarrior.h
-header-test- += linux/usb/irda.h
-header-test- += linux/usb/isp116x.h
-header-test- += linux/usb/isp1362.h
-header-test- += linux/usb/musb.h
-header-test- += linux/usb/net2280.h
-header-test- += linux/usb/ohci_pdriver.h
-header-test- += linux/usb/otg-fsm.h
-header-test- += linux/usb/pd_ado.h
-header-test- += linux/usb/r8a66597.h
-header-test- += linux/usb/rndis_host.h
-header-test- += linux/usb/serial.h
-header-test- += linux/usb/sl811.h
-header-test- += linux/usb/storage.h
-header-test- += linux/usb/uas.h
-header-test- += linux/usb/usb338x.h
-header-test- += linux/usb/usbnet.h
-header-test- += linux/usb/wusb-wa.h
-header-test- += linux/usb/xhci-dbgp.h
-header-test- += linux/usb_usual.h
-header-test- += linux/user-return-notifier.h
-header-test- += linux/userfaultfd_k.h
-header-test- += linux/verification.h
-header-test- += linux/vgaarb.h
-header-test- += linux/via_core.h
-header-test- += linux/via_i2c.h
-header-test- += linux/virtio_byteorder.h
-header-test- += linux/virtio_ring.h
-header-test- += linux/visorbus.h
-header-test- += linux/vme.h
-header-test- += linux/vmstat.h
-header-test- += linux/vmw_vmci_api.h
-header-test- += linux/vmw_vmci_defs.h
-header-test- += linux/vringh.h
-header-test- += linux/vt_buffer.h
-header-test- += linux/zorro.h
-header-test- += linux/zpool.h
-header-test- += math-emu/double.h
-header-test- += math-emu/op-common.h
-header-test- += math-emu/quad.h
-header-test- += math-emu/single.h
-header-test- += math-emu/soft-fp.h
-header-test- += media/davinci/dm355_ccdc.h
-header-test- += media/davinci/dm644x_ccdc.h
-header-test- += media/davinci/isif.h
-header-test- += media/davinci/vpbe_osd.h
-header-test- += media/davinci/vpbe_types.h
-header-test- += media/davinci/vpif_types.h
-header-test- += media/demux.h
-header-test- += media/drv-intf/soc_mediabus.h
-header-test- += media/dvb_net.h
-header-test- += media/fwht-ctrls.h
-header-test- += media/i2c/ad9389b.h
-header-test- += media/i2c/adv7343.h
-header-test- += media/i2c/adv7511.h
-header-test- += media/i2c/adv7842.h
-header-test- += media/i2c/m5mols.h
-header-test- += media/i2c/mt9m032.h
-header-test- += media/i2c/mt9t112.h
-header-test- += media/i2c/mt9v032.h
-header-test- += media/i2c/ov2659.h
-header-test- += media/i2c/ov7670.h
-header-test- += media/i2c/rj54n1cb0c.h
-header-test- += media/i2c/saa6588.h
-header-test- += media/i2c/saa7115.h
-header-test- += media/i2c/sr030pc30.h
-header-test- += media/i2c/tc358743.h
-header-test- += media/i2c/tda1997x.h
-header-test- += media/i2c/ths7303.h
-header-test- += media/i2c/tvaudio.h
-header-test- += media/i2c/tvp514x.h
-header-test- += media/i2c/tvp7002.h
-header-test- += media/i2c/wm8775.h
-header-test- += media/imx.h
-header-test- += media/media-dev-allocator.h
-header-test- += media/mpeg2-ctrls.h
-header-test- += media/rcar-fcp.h
-header-test- += media/tuner-types.h
-header-test- += media/tveeprom.h
-header-test- += media/v4l2-flash-led-class.h
-header-test- += misc/altera.h
-header-test- += misc/cxl-base.h
-header-test- += misc/cxllib.h
-header-test- += net/9p/9p.h
-header-test- += net/9p/client.h
-header-test- += net/9p/transport.h
-header-test- += net/af_vsock.h
-header-test- += net/ax88796.h
-header-test- += net/bluetooth/hci.h
-header-test- += net/bluetooth/hci_core.h
-header-test- += net/bluetooth/hci_mon.h
-header-test- += net/bluetooth/hci_sock.h
-header-test- += net/bluetooth/l2cap.h
-header-test- += net/bluetooth/mgmt.h
-header-test- += net/bluetooth/rfcomm.h
-header-test- += net/bluetooth/sco.h
-header-test- += net/bond_options.h
-header-test- += net/caif/cfsrvl.h
-header-test- += net/codel_impl.h
-header-test- += net/codel_qdisc.h
-header-test- += net/compat.h
-header-test- += net/datalink.h
-header-test- += net/dcbevent.h
-header-test- += net/dcbnl.h
-header-test- += net/dn_dev.h
-header-test- += net/dn_fib.h
-header-test- += net/dn_neigh.h
-header-test- += net/dn_nsp.h
-header-test- += net/dn_route.h
-header-test- += net/erspan.h
-header-test- += net/esp.h
-header-test- += net/ethoc.h
-header-test- += net/firewire.h
-header-test- += net/flow_offload.h
-header-test- += net/fq.h
-header-test- += net/fq_impl.h
-header-test- += net/garp.h
-header-test- += net/gtp.h
-header-test- += net/gue.h
-header-test- += net/hwbm.h
-header-test- += net/ila.h
-header-test- += net/inet6_connection_sock.h
-header-test- += net/inet_common.h
-header-test- += net/inet_frag.h
-header-test- += net/ip6_route.h
-header-test- += net/ip_vs.h
-header-test- += net/ipcomp.h
-header-test- += net/ipconfig.h
-header-test- += net/iucv/af_iucv.h
-header-test- += net/iucv/iucv.h
-header-test- += net/lapb.h
-header-test- += net/llc_c_ac.h
-header-test- += net/llc_c_st.h
-header-test- += net/llc_s_ac.h
-header-test- += net/llc_s_ev.h
-header-test- += net/llc_s_st.h
-header-test- += net/mpls_iptunnel.h
-header-test- += net/mrp.h
-header-test- += net/ncsi.h
-header-test- += net/netevent.h
-header-test- += net/netns/can.h
-header-test- += net/netns/generic.h
-header-test- += net/netns/ieee802154_6lowpan.h
-header-test- += net/netns/ipv4.h
-header-test- += net/netns/ipv6.h
-header-test- += net/netns/mpls.h
-header-test- += net/netns/nftables.h
-header-test- += net/netns/sctp.h
-header-test- += net/netrom.h
-header-test- += net/p8022.h
-header-test- += net/phonet/pep.h
-header-test- += net/phonet/phonet.h
-header-test- += net/phonet/pn_dev.h
-header-test- += net/pptp.h
-header-test- += net/psample.h
-header-test- += net/psnap.h
-header-test- += net/regulatory.h
-header-test- += net/rose.h
-header-test- += net/sctp/auth.h
-header-test- += net/sctp/stream_interleave.h
-header-test- += net/sctp/stream_sched.h
-header-test- += net/sctp/tsnmap.h
-header-test- += net/sctp/ulpevent.h
-header-test- += net/sctp/ulpqueue.h
-header-test- += net/secure_seq.h
-header-test- += net/smc.h
-header-test- += net/stp.h
-header-test- += net/transp_v6.h
-header-test- += net/tun_proto.h
-header-test- += net/udplite.h
-header-test- += net/xdp.h
-header-test- += net/xdp_priv.h
-header-test- += pcmcia/cistpl.h
-header-test- += pcmcia/ds.h
-header-test- += rdma/tid_rdma_defs.h
-header-test- += scsi/fc/fc_encaps.h
-header-test- += scsi/fc/fc_fc2.h
-header-test- += scsi/fc/fc_fcoe.h
-header-test- += scsi/fc/fc_fip.h
-header-test- += scsi/fc_encode.h
-header-test- += scsi/fc_frame.h
-header-test- += scsi/iser.h
-header-test- += scsi/libfc.h
-header-test- += scsi/libfcoe.h
-header-test- += scsi/libsas.h
-header-test- += scsi/sas_ata.h
-header-test- += scsi/scsi_cmnd.h
-header-test- += scsi/scsi_dbg.h
-header-test- += scsi/scsi_device.h
-header-test- += scsi/scsi_dh.h
-header-test- += scsi/scsi_eh.h
-header-test- += scsi/scsi_host.h
-header-test- += scsi/scsi_ioctl.h
-header-test- += scsi/scsi_request.h
-header-test- += scsi/scsi_tcq.h
-header-test- += scsi/scsi_transport.h
-header-test- += scsi/scsi_transport_fc.h
-header-test- += scsi/scsi_transport_sas.h
-header-test- += scsi/scsi_transport_spi.h
-header-test- += scsi/scsi_transport_srp.h
-header-test- += scsi/scsicam.h
-header-test- += scsi/sg.h
-header-test- += soc/arc/aux.h
-header-test- += soc/arc/mcip.h
-header-test- += soc/arc/timers.h
-header-test- += soc/brcmstb/common.h
-header-test- += soc/fsl/bman.h
-header-test- += soc/fsl/qe/qe.h
-header-test- += soc/fsl/qe/qe_ic.h
-header-test- += soc/fsl/qe/qe_tdm.h
-header-test- += soc/fsl/qe/ucc.h
-header-test- += soc/fsl/qe/ucc_fast.h
-header-test- += soc/fsl/qe/ucc_slow.h
-header-test- += soc/fsl/qman.h
-header-test- += soc/nps/common.h
-header-test-$(CONFIG_ARC) += soc/nps/mtm.h
-header-test- += soc/qcom/cmd-db.h
-header-test- += soc/qcom/rpmh.h
-header-test- += soc/qcom/tcs.h
-header-test- += soc/tegra/ahb.h
-header-test- += soc/tegra/bpmp-abi.h
-header-test- += soc/tegra/common.h
-header-test- += soc/tegra/flowctrl.h
-header-test- += soc/tegra/fuse.h
-header-test- += soc/tegra/mc.h
-header-test- += sound/ac97/compat.h
-header-test- += sound/aci.h
-header-test- += sound/ad1843.h
-header-test- += sound/adau1373.h
-header-test- += sound/ak4113.h
-header-test- += sound/ak4114.h
-header-test- += sound/ak4117.h
-header-test- += sound/cs35l33.h
-header-test- += sound/cs35l34.h
-header-test- += sound/cs35l35.h
-header-test- += sound/cs35l36.h
-header-test- += sound/cs4271.h
-header-test- += sound/cs42l52.h
-header-test- += sound/cs8427.h
-header-test- += sound/da7218.h
-header-test- += sound/da7219-aad.h
-header-test- += sound/da7219.h
-header-test- += sound/da9055.h
-header-test- += sound/emu8000.h
-header-test- += sound/emux_synth.h
-header-test- += sound/hda_component.h
-header-test- += sound/hda_hwdep.h
-header-test- += sound/hda_i915.h
-header-test- += sound/hwdep.h
-header-test- += sound/i2c.h
-header-test- += sound/l3.h
-header-test- += sound/max98088.h
-header-test- += sound/max98095.h
-header-test- += sound/mixer_oss.h
-header-test- += sound/omap-hdmi-audio.h
-header-test- += sound/pcm_drm_eld.h
-header-test- += sound/pcm_iec958.h
-header-test- += sound/pcm_oss.h
-header-test- += sound/pxa2xx-lib.h
-header-test- += sound/rt286.h
-header-test- += sound/rt298.h
-header-test- += sound/rt5645.h
-header-test- += sound/rt5659.h
-header-test- += sound/rt5660.h
-header-test- += sound/rt5665.h
-header-test- += sound/rt5670.h
-header-test- += sound/s3c24xx_uda134x.h
-header-test- += sound/seq_device.h
-header-test- += sound/seq_kernel.h
-header-test- += sound/seq_midi_emul.h
-header-test- += sound/seq_oss.h
-header-test- += sound/soc-acpi-intel-match.h
-header-test- += sound/soc-dai.h
-header-test- += sound/soc-dapm.h
-header-test- += sound/soc-dpcm.h
-header-test- += sound/sof/control.h
-header-test- += sound/sof/dai-intel.h
-header-test- += sound/sof/dai.h
-header-test- += sound/sof/header.h
-header-test- += sound/sof/info.h
-header-test- += sound/sof/pm.h
-header-test- += sound/sof/stream.h
-header-test- += sound/sof/topology.h
-header-test- += sound/sof/trace.h
-header-test- += sound/sof/xtensa.h
-header-test- += sound/spear_spdif.h
-header-test- += sound/sta32x.h
-header-test- += sound/sta350.h
-header-test- += sound/tea6330t.h
-header-test- += sound/tlv320aic32x4.h
-header-test- += sound/tlv320dac33-plat.h
-header-test- += sound/uda134x.h
-header-test- += sound/wavefront.h
-header-test- += sound/wm8903.h
-header-test- += sound/wm8904.h
-header-test- += sound/wm8960.h
-header-test- += sound/wm8962.h
-header-test- += sound/wm8993.h
-header-test- += sound/wm8996.h
-header-test- += sound/wm9081.h
-header-test- += sound/wm9090.h
-header-test- += target/iscsi/iscsi_target_stat.h
-header-test- += trace/bpf_probe.h
-header-test- += trace/events/9p.h
-header-test- += trace/events/afs.h
-header-test- += trace/events/asoc.h
-header-test- += trace/events/bcache.h
-header-test- += trace/events/block.h
-header-test- += trace/events/cachefiles.h
-header-test- += trace/events/cgroup.h
-header-test- += trace/events/clk.h
-header-test- += trace/events/cma.h
-header-test- += trace/events/ext4.h
-header-test- += trace/events/f2fs.h
-header-test- += trace/events/fs_dax.h
-header-test- += trace/events/fscache.h
-header-test- += trace/events/fsi.h
-header-test- += trace/events/fsi_master_ast_cf.h
-header-test- += trace/events/fsi_master_gpio.h
-header-test- += trace/events/huge_memory.h
-header-test- += trace/events/ib_mad.h
-header-test- += trace/events/ib_umad.h
-header-test- += trace/events/io_uring.h
-header-test- += trace/events/iscsi.h
-header-test- += trace/events/jbd2.h
-header-test- += trace/events/kvm.h
-header-test- += trace/events/kyber.h
-header-test- += trace/events/libata.h
-header-test- += trace/events/mce.h
-header-test- += trace/events/mdio.h
-header-test- += trace/events/migrate.h
-header-test- += trace/events/mmflags.h
-header-test- += trace/events/nbd.h
-header-test- += trace/events/nilfs2.h
-header-test- += trace/events/pwc.h
-header-test- += trace/events/rdma.h
-header-test- += trace/events/rpcgss.h
-header-test- += trace/events/rpcrdma.h
-header-test- += trace/events/rxrpc.h
-header-test- += trace/events/scsi.h
-header-test- += trace/events/siox.h
-header-test- += trace/events/spi.h
-header-test- += trace/events/swiotlb.h
-header-test- += trace/events/syscalls.h
-header-test- += trace/events/target.h
-header-test- += trace/events/thermal_power_allocator.h
-header-test- += trace/events/timer.h
-header-test- += trace/events/wbt.h
-header-test- += trace/events/xen.h
-header-test- += trace/perf.h
-header-test- += trace/trace_events.h
-header-test- += uapi/drm/vmwgfx_drm.h
-header-test- += uapi/linux/a.out.h
-header-test- += uapi/linux/coda.h
-header-test- += uapi/linux/coda_psdev.h
-header-test- += uapi/linux/errqueue.h
-header-test- += uapi/linux/eventpoll.h
-header-test- += uapi/linux/hdlc/ioctl.h
-header-test- += uapi/linux/input.h
-header-test- += uapi/linux/kvm.h
-header-test- += uapi/linux/kvm_para.h
-header-test- += uapi/linux/lightnvm.h
-header-test- += uapi/linux/mic_common.h
-header-test- += uapi/linux/mman.h
-header-test- += uapi/linux/nilfs2_ondisk.h
-header-test- += uapi/linux/patchkey.h
-header-test- += uapi/linux/ptrace.h
-header-test- += uapi/linux/scc.h
-header-test- += uapi/linux/seg6_iptunnel.h
-header-test- += uapi/linux/smc_diag.h
-header-test- += uapi/linux/timex.h
-header-test- += uapi/linux/videodev2.h
-header-test- += uapi/scsi/scsi_bsg_fc.h
-header-test- += uapi/sound/asound.h
-header-test- += uapi/sound/sof/eq.h
-header-test- += uapi/sound/sof/fw.h
-header-test- += uapi/sound/sof/header.h
-header-test- += uapi/sound/sof/manifest.h
-header-test- += uapi/sound/sof/trace.h
-header-test- += uapi/xen/evtchn.h
-header-test- += uapi/xen/gntdev.h
-header-test- += uapi/xen/privcmd.h
-header-test- += vdso/vsyscall.h
-header-test- += video/broadsheetfb.h
-header-test- += video/cvisionppc.h
-header-test- += video/gbe.h
-header-test- += video/kyro.h
-header-test- += video/maxinefb.h
-header-test- += video/metronomefb.h
-header-test- += video/neomagic.h
-header-test- += video/of_display_timing.h
-header-test- += video/omapvrfb.h
-header-test- += video/s1d13xxxfb.h
-header-test- += video/sstfb.h
-header-test- += video/tgafb.h
-header-test- += video/udlfb.h
-header-test- += video/uvesafb.h
-header-test- += video/vga.h
-header-test- += video/w100fb.h
-header-test- += xen/acpi.h
-header-test- += xen/arm/hypercall.h
-header-test- += xen/arm/page-coherent.h
-header-test- += xen/arm/page.h
-header-test- += xen/balloon.h
-header-test- += xen/events.h
-header-test- += xen/features.h
-header-test- += xen/grant_table.h
-header-test- += xen/hvm.h
-header-test- += xen/interface/callback.h
-header-test- += xen/interface/event_channel.h
-header-test- += xen/interface/grant_table.h
-header-test- += xen/interface/hvm/dm_op.h
-header-test- += xen/interface/hvm/hvm_op.h
-header-test- += xen/interface/hvm/hvm_vcpu.h
-header-test- += xen/interface/hvm/params.h
-header-test- += xen/interface/hvm/start_info.h
-header-test- += xen/interface/io/9pfs.h
-header-test- += xen/interface/io/blkif.h
-header-test- += xen/interface/io/console.h
-header-test- += xen/interface/io/displif.h
-header-test- += xen/interface/io/fbif.h
-header-test- += xen/interface/io/kbdif.h
-header-test- += xen/interface/io/netif.h
-header-test- += xen/interface/io/pciif.h
-header-test- += xen/interface/io/protocols.h
-header-test- += xen/interface/io/pvcalls.h
-header-test- += xen/interface/io/ring.h
-header-test- += xen/interface/io/sndif.h
-header-test- += xen/interface/io/tpmif.h
-header-test- += xen/interface/io/vscsiif.h
-header-test- += xen/interface/io/xs_wire.h
-header-test- += xen/interface/memory.h
-header-test- += xen/interface/nmi.h
-header-test- += xen/interface/physdev.h
-header-test- += xen/interface/platform.h
-header-test- += xen/interface/sched.h
-header-test- += xen/interface/vcpu.h
-header-test- += xen/interface/version.h
-header-test- += xen/interface/xen-mca.h
-header-test- += xen/interface/xen.h
-header-test- += xen/interface/xenpmu.h
-header-test- += xen/mem-reservation.h
-header-test- += xen/page.h
-header-test- += xen/platform_pci.h
-header-test- += xen/swiotlb-xen.h
-header-test- += xen/xen-front-pgdir-shbuf.h
-header-test- += xen/xen-ops.h
-header-test- += xen/xen.h
-header-test- += xen/xenbus.h
-
-# Do not include directly
-header-test- += linux/compiler-clang.h
-header-test- += linux/compiler-gcc.h
-header-test- += linux/patchkey.h
-header-test- += linux/rwlock_api_smp.h
-header-test- += linux/spinlock_types_up.h
-header-test- += linux/spinlock_up.h
-header-test- += linux/wimax/debug.h
-header-test- += rdma/uverbs_named_ioctl.h
-
-# asm-generic/*.h is used by asm/*.h, and should not be included directly
-header-test- += asm-generic/% uapi/asm-generic/%
-
-# Timestamp files touched by Kconfig
-header-test- += config/%
-
-# Timestamp files touched by scripts/adjust_autoksyms.sh
-header-test- += ksym/%
-
-# You could compile-test these, but maybe not so useful...
-header-test- += dt-bindings/%
-
-# Do not test generated headers. Stale headers are often left over when you
-# traverse the git history without cleaning.
-header-test- += generated/%
-
-# The rest are compile-tested
-header-test-pattern-y += */*.h */*/*.h */*/*/*.h */*/*/*/*.h
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index 175f7b40c585..0c23fd0548d1 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -78,9 +78,6 @@ acpi_evaluate_dsm_typed(acpi_handle handle, const guid_t *guid, u64 rev,
bool acpi_dev_found(const char *hid);
bool acpi_dev_present(const char *hid, const char *uid, s64 hrv);
-struct acpi_device *
-acpi_dev_get_first_match_dev(const char *hid, const char *uid, s64 hrv);
-
#ifdef CONFIG_ACPI
#include <linux/proc_fs.h>
@@ -683,6 +680,11 @@ static inline bool acpi_device_can_poweroff(struct acpi_device *adev)
adev->power.states[ACPI_STATE_D3_HOT].flags.explicit_set);
}
+bool acpi_dev_hid_uid_match(struct acpi_device *adev, const char *hid2, const char *uid2);
+
+struct acpi_device *
+acpi_dev_get_first_match_dev(const char *hid, const char *uid, s64 hrv);
+
static inline void acpi_dev_put(struct acpi_device *adev)
{
put_device(&adev->dev);
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index e5e041413581..18790b9e16b5 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -12,7 +12,7 @@
/* Current ACPICA subsystem version in YYYYMMDD format */
-#define ACPI_CA_VERSION 0x20190816
+#define ACPI_CA_VERSION 0x20191018
#include <acpi/acconfig.h>
#include <acpi/actypes.h>
@@ -458,7 +458,11 @@ ACPI_EXTERNAL_RETURN_STATUS(acpi_status ACPI_INIT_FUNCTION
u8 physical))
ACPI_EXTERNAL_RETURN_STATUS(acpi_status
- acpi_load_table(struct acpi_table_header *table))
+ acpi_load_table(struct acpi_table_header *table,
+ u32 *table_idx))
+
+ACPI_EXTERNAL_RETURN_STATUS(acpi_status
+ acpi_unload_table(u32 table_index))
ACPI_EXTERNAL_RETURN_STATUS(acpi_status
acpi_unload_parent_table(acpi_handle object))
diff --git a/include/acpi/button.h b/include/acpi/button.h
index 3a2b8535dec6..340da7784cc8 100644
--- a/include/acpi/button.h
+++ b/include/acpi/button.h
@@ -2,21 +2,9 @@
#ifndef ACPI_BUTTON_H
#define ACPI_BUTTON_H
-#include <linux/notifier.h>
-
#if IS_ENABLED(CONFIG_ACPI_BUTTON)
-extern int acpi_lid_notifier_register(struct notifier_block *nb);
-extern int acpi_lid_notifier_unregister(struct notifier_block *nb);
extern int acpi_lid_open(void);
#else
-static inline int acpi_lid_notifier_register(struct notifier_block *nb)
-{
- return 0;
-}
-static inline int acpi_lid_notifier_unregister(struct notifier_block *nb)
-{
- return 0;
-}
static inline int acpi_lid_open(void)
{
return 1;
diff --git a/include/asm-generic/4level-fixup.h b/include/asm-generic/4level-fixup.h
index e3667c9a33a5..c86cf7cb4bba 100644
--- a/include/asm-generic/4level-fixup.h
+++ b/include/asm-generic/4level-fixup.h
@@ -30,7 +30,6 @@
#undef pud_free_tlb
#define pud_free_tlb(tlb, x, addr) do { } while (0)
#define pud_free(mm, x) do { } while (0)
-#define __pud_free_tlb(tlb, x, addr) do { } while (0)
#undef pud_addr_end
#define pud_addr_end(addr, end) (end)
diff --git a/include/asm-generic/5level-fixup.h b/include/asm-generic/5level-fixup.h
index f6947da70d71..4c74b1c1d13b 100644
--- a/include/asm-generic/5level-fixup.h
+++ b/include/asm-generic/5level-fixup.h
@@ -51,7 +51,6 @@ static inline int p4d_present(p4d_t p4d)
#undef p4d_free_tlb
#define p4d_free_tlb(tlb, x, addr) do { } while (0)
#define p4d_free(mm, x) do { } while (0)
-#define __p4d_free_tlb(tlb, x, addr) do { } while (0)
#undef p4d_addr_end
#define p4d_addr_end(addr, end) (end)
diff --git a/include/asm-generic/Kbuild b/include/asm-generic/Kbuild
index adff14fcb8e4..ddfee1bd9dc1 100644
--- a/include/asm-generic/Kbuild
+++ b/include/asm-generic/Kbuild
@@ -4,4 +4,5 @@
# (This file is not included when SRCARCH=um since UML borrows several
# asm headers from the host architecutre.)
+mandatory-y += msi.h
mandatory-y += simd.h
diff --git a/include/asm-generic/export.h b/include/asm-generic/export.h
index fa577978fbbd..afddc5442e92 100644
--- a/include/asm-generic/export.h
+++ b/include/asm-generic/export.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef __ASM_GENERIC_EXPORT_H
#define __ASM_GENERIC_EXPORT_H
@@ -31,7 +32,6 @@
*/
.macro ___EXPORT_SYMBOL name,val,sec
#ifdef CONFIG_MODULES
- .globl __ksymtab_\name
.section ___ksymtab\sec+\name,"a"
.balign KSYM_ALIGN
__ksymtab_\name:
@@ -44,7 +44,6 @@ __kstrtab_\name:
#ifdef CONFIG_MODVERSIONS
.section ___kcrctab\sec+\name,"a"
.balign KCRC_ALIGN
-__kcrctab_\name:
#if defined(CONFIG_MODULE_REL_CRCS)
.long __crc_\name - .
#else
diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h
index d02806513670..325fc98cc9ff 100644
--- a/include/asm-generic/io.h
+++ b/include/asm-generic/io.h
@@ -922,39 +922,17 @@ static inline void *phys_to_virt(unsigned long address)
/**
* DOC: ioremap() and ioremap_*() variants
*
- * If you have an IOMMU your architecture is expected to have both ioremap()
- * and iounmap() implemented otherwise the asm-generic helpers will provide a
- * direct mapping.
+ * Architectures with an MMU are expected to provide ioremap() and iounmap()
+ * themselves or rely on GENERIC_IOREMAP. For NOMMU architectures we provide
+ * a default nop-op implementation that expect that the physical address used
+ * for MMIO are already marked as uncached, and can be used as kernel virtual
+ * addresses.
*
- * There are ioremap_*() call variants, if you have no IOMMU we naturally will
- * default to direct mapping for all of them, you can override these defaults.
- * If you have an IOMMU you are highly encouraged to provide your own
- * ioremap variant implementation as there currently is no safe architecture
- * agnostic default. To avoid possible improper behaviour default asm-generic
- * ioremap_*() variants all return NULL when an IOMMU is available. If you've
- * defined your own ioremap_*() variant you must then declare your own
- * ioremap_*() variant as defined to itself to avoid the default NULL return.
+ * ioremap_wc() and ioremap_wt() can provide more relaxed caching attributes
+ * for specific drivers if the architecture choses to implement them. If they
+ * are not implemented we fall back to plain ioremap.
*/
-
-#ifdef CONFIG_MMU
-
-#ifndef ioremap_uc
-#define ioremap_uc ioremap_uc
-static inline void __iomem *ioremap_uc(phys_addr_t offset, size_t size)
-{
- return NULL;
-}
-#endif
-
-#else /* !CONFIG_MMU */
-
-/*
- * Change "struct page" to physical address.
- *
- * This implementation is for the no-MMU case only... if you have an MMU
- * you'll need to provide your own definitions.
- */
-
+#ifndef CONFIG_MMU
#ifndef ioremap
#define ioremap ioremap
static inline void __iomem *ioremap(phys_addr_t offset, size_t size)
@@ -965,42 +943,47 @@ static inline void __iomem *ioremap(phys_addr_t offset, size_t size)
#ifndef iounmap
#define iounmap iounmap
-
static inline void iounmap(void __iomem *addr)
{
}
#endif
-#endif /* CONFIG_MMU */
-#ifndef ioremap_nocache
-void __iomem *ioremap(phys_addr_t phys_addr, size_t size);
-#define ioremap_nocache ioremap_nocache
-static inline void __iomem *ioremap_nocache(phys_addr_t offset, size_t size)
-{
- return ioremap(offset, size);
-}
-#endif
+#elif defined(CONFIG_GENERIC_IOREMAP)
+#include <asm/pgtable.h>
-#ifndef ioremap_uc
-#define ioremap_uc ioremap_uc
-static inline void __iomem *ioremap_uc(phys_addr_t offset, size_t size)
+void __iomem *ioremap_prot(phys_addr_t addr, size_t size, unsigned long prot);
+void iounmap(volatile void __iomem *addr);
+
+static inline void __iomem *ioremap(phys_addr_t addr, size_t size)
{
- return ioremap_nocache(offset, size);
+ /* _PAGE_IOREMAP needs to be supplied by the architecture */
+ return ioremap_prot(addr, size, _PAGE_IOREMAP);
}
+#endif /* !CONFIG_MMU || CONFIG_GENERIC_IOREMAP */
+
+#ifndef ioremap_nocache
+#define ioremap_nocache ioremap
#endif
#ifndef ioremap_wc
-#define ioremap_wc ioremap_wc
-static inline void __iomem *ioremap_wc(phys_addr_t offset, size_t size)
-{
- return ioremap_nocache(offset, size);
-}
+#define ioremap_wc ioremap
#endif
#ifndef ioremap_wt
-#define ioremap_wt ioremap_wt
-static inline void __iomem *ioremap_wt(phys_addr_t offset, size_t size)
+#define ioremap_wt ioremap
+#endif
+
+/*
+ * ioremap_uc is special in that we do require an explicit architecture
+ * implementation. In general you do not want to use this function in a
+ * driver and use plain ioremap, which is uncached by default. Similarly
+ * architectures should not implement it unless they have a very good
+ * reason.
+ */
+#ifndef ioremap_uc
+#define ioremap_uc ioremap_uc
+static inline void __iomem *ioremap_uc(phys_addr_t offset, size_t size)
{
- return ioremap_nocache(offset, size);
+ return NULL;
}
#endif
diff --git a/include/asm-generic/mshyperv.h b/include/asm-generic/mshyperv.h
index 18d8e2d8210f..b3f1082cc435 100644
--- a/include/asm-generic/mshyperv.h
+++ b/include/asm-generic/mshyperv.h
@@ -166,10 +166,12 @@ static inline int cpumask_to_vpset(struct hv_vpset *vpset,
void hyperv_report_panic(struct pt_regs *regs, long err);
void hyperv_report_panic_msg(phys_addr_t pa, size_t size);
bool hv_is_hyperv_initialized(void);
+bool hv_is_hibernation_supported(void);
void hyperv_cleanup(void);
void hv_setup_sched_clock(void *sched_clock);
#else /* CONFIG_HYPERV */
static inline bool hv_is_hyperv_initialized(void) { return false; }
+static inline bool hv_is_hibernation_supported(void) { return false; }
static inline void hyperv_cleanup(void) {}
#endif /* CONFIG_HYPERV */
diff --git a/include/asm-generic/pgtable-nop4d.h b/include/asm-generic/pgtable-nop4d.h
index aebab905e6cd..ce2cbb3c380f 100644
--- a/include/asm-generic/pgtable-nop4d.h
+++ b/include/asm-generic/pgtable-nop4d.h
@@ -50,7 +50,7 @@ static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
*/
#define p4d_alloc_one(mm, address) NULL
#define p4d_free(mm, x) do { } while (0)
-#define __p4d_free_tlb(tlb, x, a) do { } while (0)
+#define p4d_free_tlb(tlb, x, a) do { } while (0)
#undef p4d_addr_end
#define p4d_addr_end(addr, end) (end)
diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
index b85b8271a73d..0d9b28cba16d 100644
--- a/include/asm-generic/pgtable-nopmd.h
+++ b/include/asm-generic/pgtable-nopmd.h
@@ -60,7 +60,7 @@ static inline pmd_t * pmd_offset(pud_t * pud, unsigned long address)
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
{
}
-#define __pmd_free_tlb(tlb, x, a) do { } while (0)
+#define pmd_free_tlb(tlb, x, a) do { } while (0)
#undef pmd_addr_end
#define pmd_addr_end(addr, end) (end)
diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
index c77a1d301155..d3776cb494c0 100644
--- a/include/asm-generic/pgtable-nopud.h
+++ b/include/asm-generic/pgtable-nopud.h
@@ -59,7 +59,7 @@ static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address)
*/
#define pud_alloc_one(mm, address) NULL
#define pud_free(mm, x) do { } while (0)
-#define __pud_free_tlb(tlb, x, a) do { } while (0)
+#define pud_free_tlb(tlb, x, a) do { } while (0)
#undef pud_addr_end
#define pud_addr_end(addr, end) (end)
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 818691846c90..798ea36a0549 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -558,8 +558,19 @@ static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
* Do the tests inline, but report and clear the bad entry in mm/memory.c.
*/
void pgd_clear_bad(pgd_t *);
+
+#ifndef __PAGETABLE_P4D_FOLDED
void p4d_clear_bad(p4d_t *);
+#else
+#define p4d_clear_bad(p4d) do { } while (0)
+#endif
+
+#ifndef __PAGETABLE_PUD_FOLDED
void pud_clear_bad(pud_t *);
+#else
+#define pud_clear_bad(p4d) do { } while (0)
+#endif
+
void pmd_clear_bad(pmd_t *);
static inline int pgd_none_or_clear_bad(pgd_t *pgd)
@@ -903,6 +914,21 @@ static inline int pud_write(pud_t pud)
}
#endif /* pud_write */
+#if !defined(CONFIG_ARCH_HAS_PTE_DEVMAP) || !defined(CONFIG_TRANSPARENT_HUGEPAGE)
+static inline int pmd_devmap(pmd_t pmd)
+{
+ return 0;
+}
+static inline int pud_devmap(pud_t pud)
+{
+ return 0;
+}
+static inline int pgd_devmap(pgd_t pgd)
+{
+ return 0;
+}
+#endif
+
#if !defined(CONFIG_TRANSPARENT_HUGEPAGE) || \
(defined(CONFIG_TRANSPARENT_HUGEPAGE) && \
!defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD))
@@ -912,6 +938,31 @@ static inline int pud_trans_huge(pud_t pud)
}
#endif
+/* See pmd_none_or_trans_huge_or_clear_bad for discussion. */
+static inline int pud_none_or_trans_huge_or_dev_or_clear_bad(pud_t *pud)
+{
+ pud_t pudval = READ_ONCE(*pud);
+
+ if (pud_none(pudval) || pud_trans_huge(pudval) || pud_devmap(pudval))
+ return 1;
+ if (unlikely(pud_bad(pudval))) {
+ pud_clear_bad(pud);
+ return 1;
+ }
+ return 0;
+}
+
+/* See pmd_trans_unstable for discussion. */
+static inline int pud_trans_unstable(pud_t *pud)
+{
+#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \
+ defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
+ return pud_none_or_trans_huge_or_dev_or_clear_bad(pud);
+#else
+ return 0;
+#endif
+}
+
#ifndef pmd_read_atomic
static inline pmd_t pmd_read_atomic(pmd_t *pmdp)
{
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index 04c0644006fd..2b10036fefd0 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -428,7 +428,7 @@ static inline void tlb_change_page_size(struct mmu_gather *tlb,
{
#ifdef CONFIG_HAVE_MMU_GATHER_PAGE_SIZE
if (tlb->page_size && tlb->page_size != page_size) {
- if (!tlb->fullmm)
+ if (!tlb->fullmm && !tlb->need_flush_all)
tlb_flush_mmu(tlb);
}
@@ -584,7 +584,6 @@ static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vm
} while (0)
#endif
-#ifndef __ARCH_HAS_4LEVEL_HACK
#ifndef pud_free_tlb
#define pud_free_tlb(tlb, pudp, address) \
do { \
@@ -594,9 +593,7 @@ static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vm
__pud_free_tlb(tlb, pudp, address); \
} while (0)
#endif
-#endif
-#ifndef __ARCH_HAS_5LEVEL_HACK
#ifndef p4d_free_tlb
#define p4d_free_tlb(tlb, pudp, address) \
do { \
@@ -605,7 +602,6 @@ static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vm
__p4d_free_tlb(tlb, pudp, address); \
} while (0)
#endif
-#endif
#endif /* CONFIG_MMU */
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 63cedc3c0c77..e00f41aa8ec4 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -141,14 +141,23 @@
* compiler option used. A given kernel image will only use one, AKA
* FTRACE_CALLSITE_SECTION. We capture all of them here to avoid header
* dependencies for FTRACE_CALLSITE_SECTION's definition.
+ *
+ * Need to also make ftrace_stub_graph point to ftrace_stub
+ * so that the same stub location may have different protocols
+ * and not mess up with C verifiers.
*/
#define MCOUNT_REC() . = ALIGN(8); \
__start_mcount_loc = .; \
KEEP(*(__mcount_loc)) \
KEEP(*(__patchable_function_entries)) \
- __stop_mcount_loc = .;
+ __stop_mcount_loc = .; \
+ ftrace_stub_graph = ftrace_stub;
#else
-#define MCOUNT_REC()
+# ifdef CONFIG_FUNCTION_TRACER
+# define MCOUNT_REC() ftrace_stub_graph = ftrace_stub;
+# else
+# define MCOUNT_REC()
+# endif
#endif
#ifdef CONFIG_TRACE_BRANCH_PROFILING
diff --git a/include/drm/amd_asic_type.h b/include/drm/amd_asic_type.h
index 296aab724677..b1230e33d506 100644
--- a/include/drm/amd_asic_type.h
+++ b/include/drm/amd_asic_type.h
@@ -27,34 +27,36 @@
*/
enum amd_asic_type {
CHIP_TAHITI = 0,
- CHIP_PITCAIRN,
- CHIP_VERDE,
- CHIP_OLAND,
- CHIP_HAINAN,
- CHIP_BONAIRE,
- CHIP_KAVERI,
- CHIP_KABINI,
- CHIP_HAWAII,
- CHIP_MULLINS,
- CHIP_TOPAZ,
- CHIP_TONGA,
- CHIP_FIJI,
- CHIP_CARRIZO,
- CHIP_STONEY,
- CHIP_POLARIS10,
- CHIP_POLARIS11,
- CHIP_POLARIS12,
- CHIP_VEGAM,
- CHIP_VEGA10,
- CHIP_VEGA12,
- CHIP_VEGA20,
- CHIP_RAVEN,
- CHIP_ARCTURUS,
- CHIP_RENOIR,
- CHIP_NAVI10,
- CHIP_NAVI14,
- CHIP_NAVI12,
+ CHIP_PITCAIRN, /* 1 */
+ CHIP_VERDE, /* 2 */
+ CHIP_OLAND, /* 3 */
+ CHIP_HAINAN, /* 4 */
+ CHIP_BONAIRE, /* 5 */
+ CHIP_KAVERI, /* 6 */
+ CHIP_KABINI, /* 7 */
+ CHIP_HAWAII, /* 8 */
+ CHIP_MULLINS, /* 9 */
+ CHIP_TOPAZ, /* 10 */
+ CHIP_TONGA, /* 11 */
+ CHIP_FIJI, /* 12 */
+ CHIP_CARRIZO, /* 13 */
+ CHIP_STONEY, /* 14 */
+ CHIP_POLARIS10, /* 15 */
+ CHIP_POLARIS11, /* 16 */
+ CHIP_POLARIS12, /* 17 */
+ CHIP_VEGAM, /* 18 */
+ CHIP_VEGA10, /* 19 */
+ CHIP_VEGA12, /* 20 */
+ CHIP_VEGA20, /* 21 */
+ CHIP_RAVEN, /* 22 */
+ CHIP_ARCTURUS, /* 23 */
+ CHIP_RENOIR, /* 24 */
+ CHIP_NAVI10, /* 25 */
+ CHIP_NAVI14, /* 26 */
+ CHIP_NAVI12, /* 27 */
CHIP_LAST,
};
+extern const char *amdgpu_asic_name[];
+
#endif /*__AMD_ASIC_TYPE_H__ */
diff --git a/include/drm/bridge/dw_hdmi.h b/include/drm/bridge/dw_hdmi.h
index cf528c289857..9d4d5cc47969 100644
--- a/include/drm/bridge/dw_hdmi.h
+++ b/include/drm/bridge/dw_hdmi.h
@@ -6,6 +6,8 @@
#ifndef __DW_HDMI__
#define __DW_HDMI__
+#include <sound/hdmi-codec.h>
+
struct drm_connector;
struct drm_display_mode;
struct drm_encoder;
@@ -126,6 +128,7 @@ struct dw_hdmi_plat_data {
const struct drm_display_mode *mode);
unsigned long input_bus_format;
unsigned long input_bus_encoding;
+ bool use_drm_infoframe;
/* Vendor PHY support */
const struct dw_hdmi_phy_ops *phy_ops;
@@ -154,8 +157,11 @@ void dw_hdmi_resume(struct dw_hdmi *hdmi);
void dw_hdmi_setup_rx_sense(struct dw_hdmi *hdmi, bool hpd, bool rx_sense);
+int dw_hdmi_set_plugged_cb(struct dw_hdmi *hdmi, hdmi_codec_plugged_cb fn,
+ struct device *codec_dev);
void dw_hdmi_set_sample_rate(struct dw_hdmi *hdmi, unsigned int rate);
void dw_hdmi_set_channel_count(struct dw_hdmi *hdmi, unsigned int cnt);
+void dw_hdmi_set_channel_status(struct dw_hdmi *hdmi, u8 *channel_status);
void dw_hdmi_set_channel_allocation(struct dw_hdmi *hdmi, unsigned int ca);
void dw_hdmi_audio_enable(struct dw_hdmi *hdmi);
void dw_hdmi_audio_disable(struct dw_hdmi *hdmi);
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
deleted file mode 100644
index 037b1f7a87a5..000000000000
--- a/include/drm/drmP.h
+++ /dev/null
@@ -1,103 +0,0 @@
-/*
- * Internal Header for the Direct Rendering Manager
- *
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * Copyright (c) 2009-2010, Code Aurora Forum.
- * All rights reserved.
- *
- * Author: Rickard E. (Rik) Faith <faith@valinux.com>
- * Author: Gareth Hughes <gareth@valinux.com>
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#ifndef _DRM_P_H_
-#define _DRM_P_H_
-
-#include <linux/agp_backend.h>
-#include <linux/cdev.h>
-#include <linux/dma-mapping.h>
-#include <linux/file.h>
-#include <linux/fs.h>
-#include <linux/highmem.h>
-#include <linux/idr.h>
-#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/jiffies.h>
-#include <linux/kernel.h>
-#include <linux/kref.h>
-#include <linux/miscdevice.h>
-#include <linux/mm.h>
-#include <linux/mutex.h>
-#include <linux/platform_device.h>
-#include <linux/poll.h>
-#include <linux/ratelimit.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/types.h>
-#include <linux/vmalloc.h>
-#include <linux/workqueue.h>
-#include <linux/dma-fence.h>
-#include <linux/module.h>
-#include <linux/mman.h>
-#include <asm/pgalloc.h>
-#include <linux/uaccess.h>
-
-#include <uapi/drm/drm.h>
-#include <uapi/drm/drm_mode.h>
-
-#include <drm/drm_agpsupport.h>
-#include <drm/drm_crtc.h>
-#include <drm/drm_fourcc.h>
-#include <drm/drm_hashtab.h>
-#include <drm/drm_mm.h>
-#include <drm/drm_os_linux.h>
-#include <drm/drm_sarea.h>
-#include <drm/drm_drv.h>
-#include <drm/drm_prime.h>
-#include <drm/drm_print.h>
-#include <drm/drm_pci.h>
-#include <drm/drm_file.h>
-#include <drm/drm_debugfs.h>
-#include <drm/drm_ioctl.h>
-#include <drm/drm_sysfs.h>
-#include <drm/drm_vblank.h>
-#include <drm/drm_irq.h>
-#include <drm/drm_device.h>
-
-struct module;
-
-struct device_node;
-struct videomode;
-struct dma_resv;
-struct dma_buf_attachment;
-
-struct pci_dev;
-struct pci_controller;
-
-/*
- * NOTE: drmP.h is obsolete - do NOT add anything to this file
- *
- * Do not include drmP.h in new files.
- * Work is ongoing to remove drmP.h includes from existing files
- */
-
-#endif
diff --git a/include/drm/drm_bridge.h b/include/drm/drm_bridge.h
index 7616f6562fe4..c0a2286a81e9 100644
--- a/include/drm/drm_bridge.h
+++ b/include/drm/drm_bridge.h
@@ -42,7 +42,7 @@ struct drm_bridge_funcs {
* This callback is invoked whenever our bridge is being attached to a
* &drm_encoder.
*
- * The attach callback is optional.
+ * The @attach callback is optional.
*
* RETURNS:
*
@@ -56,7 +56,7 @@ struct drm_bridge_funcs {
* This callback is invoked whenever our bridge is being detached from a
* &drm_encoder.
*
- * The detach callback is optional.
+ * The @detach callback is optional.
*/
void (*detach)(struct drm_bridge *bridge);
@@ -76,7 +76,7 @@ struct drm_bridge_funcs {
* atomic helpers to validate modes supplied by userspace in
* drm_atomic_helper_check_modeset().
*
- * This function is optional.
+ * The @mode_valid callback is optional.
*
* NOTE:
*
@@ -108,7 +108,7 @@ struct drm_bridge_funcs {
* this function passes all other callbacks must succeed for this
* configuration.
*
- * The mode_fixup callback is optional.
+ * The @mode_fixup callback is optional.
*
* NOTE:
*
@@ -146,7 +146,7 @@ struct drm_bridge_funcs {
* The bridge can assume that the display pipe (i.e. clocks and timing
* signals) feeding it is still running when this callback is called.
*
- * The disable callback is optional.
+ * The @disable callback is optional.
*/
void (*disable)(struct drm_bridge *bridge);
@@ -165,7 +165,7 @@ struct drm_bridge_funcs {
* singals) feeding it is no longer running when this callback is
* called.
*
- * The post_disable callback is optional.
+ * The @post_disable callback is optional.
*/
void (*post_disable)(struct drm_bridge *bridge);
@@ -214,7 +214,7 @@ struct drm_bridge_funcs {
* not enable the display link feeding the next bridge in the chain (if
* there is one) when this callback is called.
*
- * The pre_enable callback is optional.
+ * The @pre_enable callback is optional.
*/
void (*pre_enable)(struct drm_bridge *bridge);
@@ -234,7 +234,7 @@ struct drm_bridge_funcs {
* callback must enable the display link feeding the next bridge in the
* chain if there is one.
*
- * The enable callback is optional.
+ * The @enable callback is optional.
*/
void (*enable)(struct drm_bridge *bridge);
@@ -283,7 +283,7 @@ struct drm_bridge_funcs {
* would be prudent to also provide an implementation of @enable if
* you are expecting driver calls into &drm_bridge_enable.
*
- * The enable callback is optional.
+ * The @atomic_enable callback is optional.
*/
void (*atomic_enable)(struct drm_bridge *bridge,
struct drm_atomic_state *state);
@@ -305,7 +305,7 @@ struct drm_bridge_funcs {
* would be prudent to also provide an implementation of @disable if
* you are expecting driver calls into &drm_bridge_disable.
*
- * The disable callback is optional.
+ * The @atomic_disable callback is optional.
*/
void (*atomic_disable)(struct drm_bridge *bridge,
struct drm_atomic_state *state);
@@ -330,7 +330,7 @@ struct drm_bridge_funcs {
* @post_disable if you are expecting driver calls into
* &drm_bridge_post_disable.
*
- * The post_disable callback is optional.
+ * The @atomic_post_disable callback is optional.
*/
void (*atomic_post_disable)(struct drm_bridge *bridge,
struct drm_atomic_state *state);
@@ -429,12 +429,15 @@ void drm_atomic_bridge_enable(struct drm_bridge *bridge,
struct drm_atomic_state *state);
#ifdef CONFIG_DRM_PANEL_BRIDGE
-struct drm_bridge *drm_panel_bridge_add(struct drm_panel *panel,
- u32 connector_type);
+struct drm_bridge *drm_panel_bridge_add(struct drm_panel *panel);
+struct drm_bridge *drm_panel_bridge_add_typed(struct drm_panel *panel,
+ u32 connector_type);
void drm_panel_bridge_remove(struct drm_bridge *bridge);
struct drm_bridge *devm_drm_panel_bridge_add(struct device *dev,
- struct drm_panel *panel,
- u32 connector_type);
+ struct drm_panel *panel);
+struct drm_bridge *devm_drm_panel_bridge_add_typed(struct device *dev,
+ struct drm_panel *panel,
+ u32 connector_type);
#endif
#endif
diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h
index 681cb590f952..5f8c3389d46f 100644
--- a/include/drm/drm_connector.h
+++ b/include/drm/drm_connector.h
@@ -281,6 +281,10 @@ enum drm_panel_orientation {
/* Additional Colorimetry extension added as part of CTA 861.G */
#define DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65 11
#define DRM_MODE_COLORIMETRY_DCI_P3_RGB_THEATER 12
+/* Additional Colorimetry Options added for DP 1.4a VSC Colorimetry Format */
+#define DRM_MODE_COLORIMETRY_RGB_WIDE_FIXED 13
+#define DRM_MODE_COLORIMETRY_RGB_WIDE_FLOAT 14
+#define DRM_MODE_COLORIMETRY_BT601_YCC 15
/**
* enum drm_bus_flags - bus_flags info for &drm_display_info
@@ -1288,12 +1292,12 @@ struct drm_connector {
/** @override_edid: has the EDID been overwritten through debugfs for testing? */
bool override_edid;
-#define DRM_CONNECTOR_MAX_ENCODER 3
/**
- * @encoder_ids: Valid encoders for this connector. Please only use
- * drm_connector_for_each_possible_encoder() to enumerate these.
+ * @possible_encoders: Bit mask of encoders that can drive this
+ * connector, drm_encoder_index() determines the index into the bitfield
+ * and the bits are set with drm_connector_attach_encoder().
*/
- uint32_t encoder_ids[DRM_CONNECTOR_MAX_ENCODER];
+ u32 possible_encoders;
/**
* @encoder: Currently bound encoder driving this connector, if any.
@@ -1523,7 +1527,8 @@ int drm_connector_attach_scaling_mode_property(struct drm_connector *connector,
int drm_connector_attach_vrr_capable_property(
struct drm_connector *connector);
int drm_mode_create_aspect_ratio_property(struct drm_device *dev);
-int drm_mode_create_colorspace_property(struct drm_connector *connector);
+int drm_mode_create_hdmi_colorspace_property(struct drm_connector *connector);
+int drm_mode_create_dp_colorspace_property(struct drm_connector *connector);
int drm_mode_create_content_type_property(struct drm_device *dev);
void drm_hdmi_avi_infoframe_content_type(struct hdmi_avi_infoframe *frame,
const struct drm_connector_state *conn_state);
@@ -1608,13 +1613,9 @@ bool drm_connector_has_possible_encoder(struct drm_connector *connector,
* drm_connector_for_each_possible_encoder - iterate connector's possible encoders
* @connector: &struct drm_connector pointer
* @encoder: &struct drm_encoder pointer used as cursor
- * @__i: int iteration cursor, for macro-internal use
*/
-#define drm_connector_for_each_possible_encoder(connector, encoder, __i) \
- for ((__i) = 0; (__i) < ARRAY_SIZE((connector)->encoder_ids) && \
- (connector)->encoder_ids[(__i)] != 0; (__i)++) \
- for_each_if((encoder) = \
- drm_encoder_find((connector)->dev, NULL, \
- (connector)->encoder_ids[(__i)])) \
+#define drm_connector_for_each_possible_encoder(connector, encoder) \
+ drm_for_each_encoder_mask(encoder, (connector)->dev, \
+ (connector)->possible_encoders)
#endif
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index 408b6f4e63c0..5e9b15a0e8c5 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -41,7 +41,6 @@
#include <drm/drm_connector.h>
#include <drm/drm_device.h>
#include <drm/drm_property.h>
-#include <drm/drm_bridge.h>
#include <drm/drm_edid.h>
#include <drm/drm_plane.h>
#include <drm/drm_blend.h>
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index 8364502f92cf..51ecb5112ef8 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -23,9 +23,9 @@
#ifndef _DRM_DP_HELPER_H_
#define _DRM_DP_HELPER_H_
-#include <linux/types.h>
-#include <linux/i2c.h>
#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/types.h>
/*
* Unless otherwise noted, all values are from the DP 1.1a spec. Note that
@@ -42,6 +42,48 @@
* 1.2 formally includes both eDP and DPI definitions.
*/
+/* MSA (Main Stream Attribute) MISC bits (as MISC1<<8|MISC0) */
+#define DP_MSA_MISC_SYNC_CLOCK (1 << 0)
+#define DP_MSA_MISC_INTERLACE_VTOTAL_EVEN (1 << 8)
+#define DP_MSA_MISC_STEREO_NO_3D (0 << 9)
+#define DP_MSA_MISC_STEREO_PROG_RIGHT_EYE (1 << 9)
+#define DP_MSA_MISC_STEREO_PROG_LEFT_EYE (3 << 9)
+/* bits per component for non-RAW */
+#define DP_MSA_MISC_6_BPC (0 << 5)
+#define DP_MSA_MISC_8_BPC (1 << 5)
+#define DP_MSA_MISC_10_BPC (2 << 5)
+#define DP_MSA_MISC_12_BPC (3 << 5)
+#define DP_MSA_MISC_16_BPC (4 << 5)
+/* bits per component for RAW */
+#define DP_MSA_MISC_RAW_6_BPC (1 << 5)
+#define DP_MSA_MISC_RAW_7_BPC (2 << 5)
+#define DP_MSA_MISC_RAW_8_BPC (3 << 5)
+#define DP_MSA_MISC_RAW_10_BPC (4 << 5)
+#define DP_MSA_MISC_RAW_12_BPC (5 << 5)
+#define DP_MSA_MISC_RAW_14_BPC (6 << 5)
+#define DP_MSA_MISC_RAW_16_BPC (7 << 5)
+/* pixel encoding/colorimetry format */
+#define _DP_MSA_MISC_COLOR(misc1_7, misc0_21, misc0_3, misc0_4) \
+ ((misc1_7) << 15 | (misc0_4) << 4 | (misc0_3) << 3 | ((misc0_21) << 1))
+#define DP_MSA_MISC_COLOR_RGB _DP_MSA_MISC_COLOR(0, 0, 0, 0)
+#define DP_MSA_MISC_COLOR_CEA_RGB _DP_MSA_MISC_COLOR(0, 0, 1, 0)
+#define DP_MSA_MISC_COLOR_RGB_WIDE_FIXED _DP_MSA_MISC_COLOR(0, 3, 0, 0)
+#define DP_MSA_MISC_COLOR_RGB_WIDE_FLOAT _DP_MSA_MISC_COLOR(0, 3, 0, 1)
+#define DP_MSA_MISC_COLOR_Y_ONLY _DP_MSA_MISC_COLOR(1, 0, 0, 0)
+#define DP_MSA_MISC_COLOR_RAW _DP_MSA_MISC_COLOR(1, 1, 0, 0)
+#define DP_MSA_MISC_COLOR_YCBCR_422_BT601 _DP_MSA_MISC_COLOR(0, 1, 1, 0)
+#define DP_MSA_MISC_COLOR_YCBCR_422_BT709 _DP_MSA_MISC_COLOR(0, 1, 1, 1)
+#define DP_MSA_MISC_COLOR_YCBCR_444_BT601 _DP_MSA_MISC_COLOR(0, 2, 1, 0)
+#define DP_MSA_MISC_COLOR_YCBCR_444_BT709 _DP_MSA_MISC_COLOR(0, 2, 1, 1)
+#define DP_MSA_MISC_COLOR_XVYCC_422_BT601 _DP_MSA_MISC_COLOR(0, 1, 0, 0)
+#define DP_MSA_MISC_COLOR_XVYCC_422_BT709 _DP_MSA_MISC_COLOR(0, 1, 0, 1)
+#define DP_MSA_MISC_COLOR_XVYCC_444_BT601 _DP_MSA_MISC_COLOR(0, 2, 0, 0)
+#define DP_MSA_MISC_COLOR_XVYCC_444_BT709 _DP_MSA_MISC_COLOR(0, 2, 0, 1)
+#define DP_MSA_MISC_COLOR_OPRGB _DP_MSA_MISC_COLOR(0, 0, 1, 1)
+#define DP_MSA_MISC_COLOR_DCI_P3 _DP_MSA_MISC_COLOR(0, 3, 1, 0)
+#define DP_MSA_MISC_COLOR_COLOR_PROFILE _DP_MSA_MISC_COLOR(0, 3, 1, 1)
+#define DP_MSA_MISC_COLOR_VSC_SDP (1 << 14)
+
#define DP_AUX_MAX_PAYLOAD_BYTES 16
#define DP_AUX_I2C_WRITE 0x0
@@ -95,6 +137,7 @@
# define DP_DETAILED_CAP_INFO_AVAILABLE (1 << 4) /* DPI */
#define DP_MAIN_LINK_CHANNEL_CODING 0x006
+# define DP_CAP_ANSI_8B10B (1 << 0)
#define DP_DOWN_STREAM_PORT_COUNT 0x007
# define DP_PORT_COUNT_MASK 0x0f
@@ -562,6 +605,14 @@
# define DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT 6
#define DP_ADJUST_REQUEST_POST_CURSOR2 0x20c
+# define DP_ADJUST_POST_CURSOR2_LANE0_MASK 0x03
+# define DP_ADJUST_POST_CURSOR2_LANE0_SHIFT 0
+# define DP_ADJUST_POST_CURSOR2_LANE1_MASK 0x0c
+# define DP_ADJUST_POST_CURSOR2_LANE1_SHIFT 2
+# define DP_ADJUST_POST_CURSOR2_LANE2_MASK 0x30
+# define DP_ADJUST_POST_CURSOR2_LANE2_SHIFT 4
+# define DP_ADJUST_POST_CURSOR2_LANE3_MASK 0xc0
+# define DP_ADJUST_POST_CURSOR2_LANE3_SHIFT 6
#define DP_TEST_REQUEST 0x218
# define DP_TEST_LINK_TRAINING (1 << 0)
@@ -966,6 +1017,36 @@
#define DP_HDCP_2_2_REG_STREAM_TYPE_OFFSET 0x69494
#define DP_HDCP_2_2_REG_DBG_OFFSET 0x69518
+/* Link Training (LT)-tunable PHY Repeaters */
+#define DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV 0xf0000 /* 1.3 */
+#define DP_MAX_LINK_RATE_PHY_REPEATER 0xf0001 /* 1.4a */
+#define DP_PHY_REPEATER_CNT 0xf0002 /* 1.3 */
+#define DP_PHY_REPEATER_MODE 0xf0003 /* 1.3 */
+#define DP_MAX_LANE_COUNT_PHY_REPEATER 0xf0004 /* 1.4a */
+#define DP_Repeater_FEC_CAPABILITY 0xf0004 /* 1.4 */
+#define DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT 0xf0005 /* 1.4a */
+#define DP_TRAINING_PATTERN_SET_PHY_REPEATER1 0xf0010 /* 1.3 */
+#define DP_TRAINING_LANE0_SET_PHY_REPEATER1 0xf0011 /* 1.3 */
+#define DP_TRAINING_LANE1_SET_PHY_REPEATER1 0xf0012 /* 1.3 */
+#define DP_TRAINING_LANE2_SET_PHY_REPEATER1 0xf0013 /* 1.3 */
+#define DP_TRAINING_LANE3_SET_PHY_REPEATER1 0xf0014 /* 1.3 */
+#define DP_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER1 0xf0020 /* 1.4a */
+#define DP_TRANSMITTER_CAPABILITY_PHY_REPEATER1 0xf0021 /* 1.4a */
+#define DP_LANE0_1_STATUS_PHY_REPEATER1 0xf0030 /* 1.3 */
+#define DP_LANE2_3_STATUS_PHY_REPEATER1 0xf0031 /* 1.3 */
+#define DP_LANE_ALIGN_STATUS_UPDATED_PHY_REPEATER1 0xf0032 /* 1.3 */
+#define DP_ADJUST_REQUEST_LANE0_1_PHY_REPEATER1 0xf0033 /* 1.3 */
+#define DP_ADJUST_REQUEST_LANE2_3_PHY_REPEATER1 0xf0034 /* 1.3 */
+#define DP_SYMBOL_ERROR_COUNT_LANE0_PHY_REPEATER1 0xf0035 /* 1.3 */
+#define DP_SYMBOL_ERROR_COUNT_LANE1_PHY_REPEATER1 0xf0037 /* 1.3 */
+#define DP_SYMBOL_ERROR_COUNT_LANE2_PHY_REPEATER1 0xf0039 /* 1.3 */
+#define DP_SYMBOL_ERROR_COUNT_LANE3_PHY_REPEATER1 0xf003b /* 1.3 */
+#define DP_FEC_STATUS_PHY_REPEATER1 0xf0290 /* 1.4 */
+
+/* Repeater modes */
+#define DP_PHY_REPEATER_MODE_TRANSPARENT 0x55 /* 1.3 */
+#define DP_PHY_REPEATER_MODE_NON_TRANSPARENT 0xaa /* 1.3 */
+
/* DP HDCP message start offsets in DPCD address space */
#define DP_HDCP_2_2_AKE_INIT_OFFSET DP_HDCP_2_2_REG_RTX_OFFSET
#define DP_HDCP_2_2_AKE_SEND_CERT_OFFSET DP_HDCP_2_2_REG_CERT_RX_OFFSET
@@ -1049,6 +1130,8 @@ u8 drm_dp_get_adjust_request_voltage(const u8 link_status[DP_LINK_STATUS_SIZE],
int lane);
u8 drm_dp_get_adjust_request_pre_emphasis(const u8 link_status[DP_LINK_STATUS_SIZE],
int lane);
+u8 drm_dp_get_adjust_request_post_cursor(const u8 link_status[DP_LINK_STATUS_SIZE],
+ unsigned int lane);
#define DP_BRANCH_OUI_HEADER_SIZE 0xc
#define DP_RECEIVER_CAP_SIZE 0xf
@@ -1144,6 +1227,13 @@ drm_dp_enhanced_frame_cap(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
}
static inline bool
+drm_dp_fast_training_cap(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+ return dpcd[DP_DPCD_REV] >= 0x11 &&
+ (dpcd[DP_MAX_DOWNSPREAD] & DP_NO_AUX_HANDSHAKE_LINK_TRAINING);
+}
+
+static inline bool
drm_dp_tps3_supported(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
{
return dpcd[DP_DPCD_REV] >= 0x12 &&
@@ -1208,6 +1298,19 @@ drm_dp_sink_supports_fec(const u8 fec_capable)
return fec_capable & DP_FEC_CAPABLE;
}
+static inline bool
+drm_dp_channel_coding_supported(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+ return dpcd[DP_MAIN_LINK_CHANNEL_CODING] & DP_CAP_ANSI_8B10B;
+}
+
+static inline bool
+drm_dp_alternate_scrambler_reset_cap(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+ return dpcd[DP_EDP_CONFIGURATION_CAP] &
+ DP_ALTERNATE_SCRAMBLER_RESET_CAP;
+}
+
/*
* DisplayPort AUX channel
*/
@@ -1230,20 +1333,19 @@ struct drm_dp_aux_msg {
struct cec_adapter;
struct edid;
+struct drm_connector;
/**
* struct drm_dp_aux_cec - DisplayPort CEC-Tunneling-over-AUX
* @lock: mutex protecting this struct
* @adap: the CEC adapter for CEC-Tunneling-over-AUX support.
- * @name: name of the CEC adapter
- * @parent: parent device of the CEC adapter
+ * @connector: the connector this CEC adapter is associated with
* @unregister_work: unregister the CEC adapter
*/
struct drm_dp_aux_cec {
struct mutex lock;
struct cec_adapter *adap;
- const char *name;
- struct device *parent;
+ struct drm_connector *connector;
struct delayed_work unregister_work;
};
@@ -1353,22 +1455,6 @@ static inline ssize_t drm_dp_dpcd_writeb(struct drm_dp_aux *aux,
int drm_dp_dpcd_read_link_status(struct drm_dp_aux *aux,
u8 status[DP_LINK_STATUS_SIZE]);
-/*
- * DisplayPort link
- */
-#define DP_LINK_CAP_ENHANCED_FRAMING (1 << 0)
-
-struct drm_dp_link {
- unsigned char revision;
- unsigned int rate;
- unsigned int num_lanes;
- unsigned long capabilities;
-};
-
-int drm_dp_link_probe(struct drm_dp_aux *aux, struct drm_dp_link *link);
-int drm_dp_link_power_up(struct drm_dp_aux *aux, struct drm_dp_link *link);
-int drm_dp_link_power_down(struct drm_dp_aux *aux, struct drm_dp_link *link);
-int drm_dp_link_configure(struct drm_dp_aux *aux, struct drm_dp_link *link);
int drm_dp_downstream_max_clock(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
const u8 port_cap[4]);
int drm_dp_downstream_max_bpc(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
@@ -1451,8 +1537,8 @@ drm_dp_has_quirk(const struct drm_dp_desc *desc, enum drm_dp_quirk quirk)
#ifdef CONFIG_DRM_DP_CEC
void drm_dp_cec_irq(struct drm_dp_aux *aux);
-void drm_dp_cec_register_connector(struct drm_dp_aux *aux, const char *name,
- struct device *parent);
+void drm_dp_cec_register_connector(struct drm_dp_aux *aux,
+ struct drm_connector *connector);
void drm_dp_cec_unregister_connector(struct drm_dp_aux *aux);
void drm_dp_cec_set_edid(struct drm_dp_aux *aux, const struct edid *edid);
void drm_dp_cec_unset_edid(struct drm_dp_aux *aux);
@@ -1461,9 +1547,9 @@ static inline void drm_dp_cec_irq(struct drm_dp_aux *aux)
{
}
-static inline void drm_dp_cec_register_connector(struct drm_dp_aux *aux,
- const char *name,
- struct device *parent)
+static inline void
+drm_dp_cec_register_connector(struct drm_dp_aux *aux,
+ struct drm_connector *connector)
{
}
diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h
index 2ba6253ea6d3..d5fc90b30487 100644
--- a/include/drm/drm_dp_mst_helper.h
+++ b/include/drm/drm_dp_mst_helper.h
@@ -26,6 +26,26 @@
#include <drm/drm_dp_helper.h>
#include <drm/drm_atomic.h>
+#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
+#include <linux/stackdepot.h>
+#include <linux/timekeeping.h>
+
+enum drm_dp_mst_topology_ref_type {
+ DRM_DP_MST_TOPOLOGY_REF_GET,
+ DRM_DP_MST_TOPOLOGY_REF_PUT,
+};
+
+struct drm_dp_mst_topology_ref_history {
+ struct drm_dp_mst_topology_ref_entry {
+ enum drm_dp_mst_topology_ref_type type;
+ int count;
+ ktime_t ts_nsec;
+ depot_stack_handle_t backtrace;
+ } *entries;
+ int len;
+};
+#endif /* IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS) */
+
struct drm_dp_mst_branch;
/**
@@ -45,21 +65,31 @@ struct drm_dp_vcpi {
/**
* struct drm_dp_mst_port - MST port
* @port_num: port number
- * @input: if this port is an input port.
- * @mcs: message capability status - DP 1.2 spec.
- * @ddps: DisplayPort Device Plug Status - DP 1.2
- * @pdt: Peer Device Type
- * @ldps: Legacy Device Plug Status
- * @dpcd_rev: DPCD revision of device on this port
- * @num_sdp_streams: Number of simultaneous streams
- * @num_sdp_stream_sinks: Number of stream sinks
- * @available_pbn: Available bandwidth for this port.
+ * @input: if this port is an input port. Protected by
+ * &drm_dp_mst_topology_mgr.base.lock.
+ * @mcs: message capability status - DP 1.2 spec. Protected by
+ * &drm_dp_mst_topology_mgr.base.lock.
+ * @ddps: DisplayPort Device Plug Status - DP 1.2. Protected by
+ * &drm_dp_mst_topology_mgr.base.lock.
+ * @pdt: Peer Device Type. Protected by
+ * &drm_dp_mst_topology_mgr.base.lock.
+ * @ldps: Legacy Device Plug Status. Protected by
+ * &drm_dp_mst_topology_mgr.base.lock.
+ * @dpcd_rev: DPCD revision of device on this port. Protected by
+ * &drm_dp_mst_topology_mgr.base.lock.
+ * @num_sdp_streams: Number of simultaneous streams. Protected by
+ * &drm_dp_mst_topology_mgr.base.lock.
+ * @num_sdp_stream_sinks: Number of stream sinks. Protected by
+ * &drm_dp_mst_topology_mgr.base.lock.
+ * @available_pbn: Available bandwidth for this port. Protected by
+ * &drm_dp_mst_topology_mgr.base.lock.
* @next: link to next port on this branch device
- * @mstb: branch device attach below this port
- * @aux: i2c aux transport to talk to device connected to this port.
+ * @aux: i2c aux transport to talk to device connected to this port, protected
+ * by &drm_dp_mst_topology_mgr.base.lock.
* @parent: branch device parent of this port
* @vcpi: Virtual Channel Payload info for this port.
- * @connector: DRM connector this port is connected to.
+ * @connector: DRM connector this port is connected to. Protected by
+ * &drm_dp_mst_topology_mgr.base.lock.
* @mgr: topology manager this port lives under.
*
* This structure represents an MST port endpoint on a device somewhere
@@ -79,6 +109,14 @@ struct drm_dp_mst_port {
*/
struct kref malloc_kref;
+#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
+ /**
+ * @topology_ref_history: A history of each topology
+ * reference/dereference. See CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS.
+ */
+ struct drm_dp_mst_topology_ref_history topology_ref_history;
+#endif
+
u8 port_num;
bool input;
bool mcs;
@@ -90,7 +128,17 @@ struct drm_dp_mst_port {
u8 num_sdp_stream_sinks;
uint16_t available_pbn;
struct list_head next;
- struct drm_dp_mst_branch *mstb; /* pointer to an mstb if this port has one */
+ /**
+ * @mstb: the branch device connected to this port, if there is one.
+ * This should be considered protected for reading by
+ * &drm_dp_mst_topology_mgr.lock. There are two exceptions to this:
+ * &drm_dp_mst_topology_mgr.up_req_work and
+ * &drm_dp_mst_topology_mgr.work, which do not grab
+ * &drm_dp_mst_topology_mgr.lock during reads but are the only
+ * updaters of this list and are protected from writing concurrently
+ * by &drm_dp_mst_topology_mgr.probe_lock.
+ */
+ struct drm_dp_mst_branch *mstb;
struct drm_dp_aux aux; /* i2c bus for this port? */
struct drm_dp_mst_branch *parent;
@@ -116,7 +164,6 @@ struct drm_dp_mst_port {
* @lct: Link count total to talk to this branch device.
* @num_ports: number of ports on the branch.
* @msg_slots: one bit per transmitted msg slot.
- * @ports: linked list of ports on this branch.
* @port_parent: pointer to the port parent, NULL if toplevel.
* @mgr: topology manager for this branch device.
* @tx_slots: transmission slots for this device.
@@ -143,11 +190,35 @@ struct drm_dp_mst_branch {
*/
struct kref malloc_kref;
+#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
+ /**
+ * @topology_ref_history: A history of each topology
+ * reference/dereference. See CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS.
+ */
+ struct drm_dp_mst_topology_ref_history topology_ref_history;
+#endif
+
+ /**
+ * @destroy_next: linked-list entry used by
+ * drm_dp_delayed_destroy_work()
+ */
+ struct list_head destroy_next;
+
u8 rad[8];
u8 lct;
int num_ports;
int msg_slots;
+ /**
+ * @ports: the list of ports on this branch device. This should be
+ * considered protected for reading by &drm_dp_mst_topology_mgr.lock.
+ * There are two exceptions to this:
+ * &drm_dp_mst_topology_mgr.up_req_work and
+ * &drm_dp_mst_topology_mgr.work, which do not grab
+ * &drm_dp_mst_topology_mgr.lock during reads but are the only
+ * updaters of this list and are protected from updating the list
+ * concurrently by @drm_dp_mst_topology_mgr.probe_lock
+ */
struct list_head ports;
/* list of tx ops queue for this port */
@@ -287,7 +358,7 @@ struct drm_dp_remote_dpcd_write {
struct drm_dp_remote_i2c_read {
u8 num_transactions;
u8 port_number;
- struct {
+ struct drm_dp_remote_i2c_read_tx {
u8 i2c_dev_id;
u8 num_bytes;
u8 *bytes;
@@ -334,7 +405,7 @@ struct drm_dp_resource_status_notify {
struct drm_dp_query_payload_ack_reply {
u8 port_number;
- u8 allocated_pbn;
+ u16 allocated_pbn;
};
struct drm_dp_sideband_msg_req_body {
@@ -481,15 +552,11 @@ struct drm_dp_mst_topology_mgr {
int conn_base_id;
/**
- * @down_rep_recv: Message receiver state for down replies. This and
- * @up_req_recv are only ever access from the work item, which is
- * serialised.
+ * @down_rep_recv: Message receiver state for down replies.
*/
struct drm_dp_sideband_msg_rx down_rep_recv;
/**
- * @up_req_recv: Message receiver state for up requests. This and
- * @down_rep_recv are only ever access from the work item, which is
- * serialised.
+ * @up_req_recv: Message receiver state for up requests.
*/
struct drm_dp_sideband_msg_rx up_req_recv;
@@ -499,6 +566,13 @@ struct drm_dp_mst_topology_mgr {
struct mutex lock;
/**
+ * @probe_lock: Prevents @work and @up_req_work, the only writers of
+ * &drm_dp_mst_port.mstb and &drm_dp_mst_branch.ports, from racing
+ * while they update the topology.
+ */
+ struct mutex probe_lock;
+
+ /**
* @mst_state: If this manager is enabled for an MST capable port. False
* if no MST sink/branch devices is connected.
*/
@@ -575,18 +649,49 @@ struct drm_dp_mst_topology_mgr {
struct work_struct tx_work;
/**
- * @destroy_connector_list: List of to be destroyed connectors.
+ * @destroy_port_list: List of to be destroyed connectors.
+ */
+ struct list_head destroy_port_list;
+ /**
+ * @destroy_branch_device_list: List of to be destroyed branch
+ * devices.
+ */
+ struct list_head destroy_branch_device_list;
+ /**
+ * @delayed_destroy_lock: Protects @destroy_port_list and
+ * @destroy_branch_device_list.
+ */
+ struct mutex delayed_destroy_lock;
+ /**
+ * @delayed_destroy_work: Work item to destroy MST port and branch
+ * devices, needed to avoid locking inversion.
+ */
+ struct work_struct delayed_destroy_work;
+
+ /**
+ * @up_req_list: List of pending up requests from the topology that
+ * need to be processed, in chronological order.
*/
- struct list_head destroy_connector_list;
+ struct list_head up_req_list;
/**
- * @destroy_connector_lock: Protects @connector_list.
+ * @up_req_lock: Protects @up_req_list
*/
- struct mutex destroy_connector_lock;
+ struct mutex up_req_lock;
/**
- * @destroy_connector_work: Work item to destroy connectors. Needed to
- * avoid locking inversion.
+ * @up_req_work: Work item to process up requests received from the
+ * topology. Needed to avoid blocking hotplug handling and sideband
+ * transmissions.
*/
- struct work_struct destroy_connector_work;
+ struct work_struct up_req_work;
+
+#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
+ /**
+ * @topology_ref_history_lock: protects
+ * &drm_dp_mst_port.topology_ref_history and
+ * &drm_dp_mst_branch.topology_ref_history.
+ */
+ struct mutex topology_ref_history_lock;
+#endif
};
int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
@@ -603,7 +708,11 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled);
-enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port);
+int
+drm_dp_mst_detect_port(struct drm_connector *connector,
+ struct drm_modeset_acquire_ctx *ctx,
+ struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_port *port);
bool drm_dp_mst_port_has_audio(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port);
@@ -642,7 +751,8 @@ void drm_dp_mst_dump_topology(struct seq_file *m,
void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr);
int __must_check
-drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr);
+drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr,
+ bool sync);
ssize_t drm_dp_mst_dpcd_read(struct drm_dp_aux *aux,
unsigned int offset, void *buffer, size_t size);
diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h
index 8976afe48c1c..cf13470810a5 100644
--- a/include/drm/drm_drv.h
+++ b/include/drm/drm_drv.h
@@ -778,8 +778,6 @@ struct drm_driver {
int dev_priv_size;
};
-extern unsigned int drm_debug;
-
int drm_dev_init(struct drm_device *dev,
struct drm_driver *driver,
struct device *parent);
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
index b9719418c3d2..f0b03d401c27 100644
--- a/include/drm/drm_edid.h
+++ b/include/drm/drm_edid.h
@@ -368,6 +368,10 @@ drm_hdmi_avi_infoframe_colorspace(struct hdmi_avi_infoframe *frame,
const struct drm_connector_state *conn_state);
void
+drm_hdmi_avi_infoframe_bars(struct hdmi_avi_infoframe *frame,
+ const struct drm_connector_state *conn_state);
+
+void
drm_hdmi_avi_infoframe_quant_range(struct hdmi_avi_infoframe *frame,
struct drm_connector *connector,
const struct drm_display_mode *mode,
@@ -481,7 +485,6 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid);
int drm_add_override_edid_modes(struct drm_connector *connector);
u8 drm_match_cea_mode(const struct drm_display_mode *to_match);
-enum hdmi_picture_aspect drm_get_cea_aspect_ratio(const u8 video_code);
bool drm_detect_hdmi_monitor(struct edid *edid);
bool drm_detect_monitor_audio(struct edid *edid);
enum hdmi_quantization_range
diff --git a/include/drm/drm_encoder.h b/include/drm/drm_encoder.h
index 70cfca03d812..f06164f44efe 100644
--- a/include/drm/drm_encoder.h
+++ b/include/drm/drm_encoder.h
@@ -140,7 +140,7 @@ struct drm_encoder {
* @possible_crtcs: Bitmask of potential CRTC bindings, using
* drm_crtc_index() as the index into the bitfield. The driver must set
* the bits for all &drm_crtc objects this encoder can be connected to
- * before calling drm_encoder_init().
+ * before calling drm_dev_register().
*
* In reality almost every driver gets this wrong.
*
@@ -154,7 +154,7 @@ struct drm_encoder {
* using drm_encoder_index() as the index into the bitfield. The driver
* must set the bits for all &drm_encoder objects which can clone a
* &drm_crtc together with this encoder before calling
- * drm_encoder_init(). Drivers should set the bit representing the
+ * drm_dev_register(). Drivers should set the bit representing the
* encoder itself, too. Cloning bits should be set such that when two
* encoders can be used in a cloned configuration, they both should have
* each another bits set.
@@ -198,7 +198,7 @@ static inline unsigned int drm_encoder_index(const struct drm_encoder *encoder)
}
/**
- * drm_encoder_mask - find the mask of a registered ENCODER
+ * drm_encoder_mask - find the mask of a registered encoder
* @encoder: encoder to find mask for
*
* Given a registered encoder, return the mask bit of that encoder for an
diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h
index c8a8ae2a678a..2338e9f94a03 100644
--- a/include/drm/drm_fb_helper.h
+++ b/include/drm/drm_fb_helper.h
@@ -235,7 +235,6 @@ void drm_fb_helper_unlink_fbi(struct drm_fb_helper *fb_helper);
void drm_fb_helper_deferred_io(struct fb_info *info,
struct list_head *pagelist);
-int drm_fb_helper_defio_init(struct drm_fb_helper *fb_helper);
ssize_t drm_fb_helper_sys_read(struct fb_info *info, char __user *buf,
size_t count, loff_t *ppos);
@@ -539,18 +538,16 @@ drm_fb_helper_remove_conflicting_framebuffers(struct apertures_struct *a,
/**
* drm_fb_helper_remove_conflicting_pci_framebuffers - remove firmware-configured framebuffers for PCI devices
* @pdev: PCI device
- * @resource_id: index of PCI BAR configuring framebuffer memory
* @name: requesting driver name
*
* This function removes framebuffer devices (eg. initialized by firmware)
- * using memory range configured for @pdev's BAR @resource_id.
+ * using memory range configured for any of @pdev's memory bars.
*
* The function assumes that PCI device with shadowed ROM drives a primary
* display and so kicks out vga16fb.
*/
static inline int
drm_fb_helper_remove_conflicting_pci_framebuffers(struct pci_dev *pdev,
- int resource_id,
const char *name)
{
int ret = 0;
@@ -560,7 +557,7 @@ drm_fb_helper_remove_conflicting_pci_framebuffers(struct pci_dev *pdev,
* otherwise the vga fbdev driver falls over.
*/
#if IS_REACHABLE(CONFIG_FB)
- ret = remove_conflicting_pci_framebuffers(pdev, resource_id, name);
+ ret = remove_conflicting_pci_framebuffers(pdev, name);
#endif
if (ret == 0)
ret = vga_remove_vgacon(pdev);
diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h
index 6aaba14f5972..97a48165642c 100644
--- a/include/drm/drm_gem.h
+++ b/include/drm/drm_gem.h
@@ -151,6 +151,21 @@ struct drm_gem_object_funcs {
void (*vunmap)(struct drm_gem_object *obj, void *vaddr);
/**
+ * @mmap:
+ *
+ * Handle mmap() of the gem object, setup vma accordingly.
+ *
+ * This callback is optional.
+ *
+ * The callback is used by by both drm_gem_mmap_obj() and
+ * drm_gem_prime_mmap(). When @mmap is present @vm_ops is not
+ * used, the @mmap callback must set vma->vm_ops instead. The @mmap
+ * callback is always called with a 0 offset. The caller will remove
+ * the fake offset as necessary.
+ */
+ int (*mmap)(struct drm_gem_object *obj, struct vm_area_struct *vma);
+
+ /**
* @vm_ops:
*
* Virtual memory operations used with mmap.
diff --git a/include/drm/drm_gem_shmem_helper.h b/include/drm/drm_gem_shmem_helper.h
index 7865e6b5d36c..e34a7b7f848a 100644
--- a/include/drm/drm_gem_shmem_helper.h
+++ b/include/drm/drm_gem_shmem_helper.h
@@ -101,32 +101,6 @@ struct drm_gem_shmem_object {
#define to_drm_gem_shmem_obj(obj) \
container_of(obj, struct drm_gem_shmem_object, base)
-/**
- * DEFINE_DRM_GEM_SHMEM_FOPS() - Macro to generate file operations for shmem drivers
- * @name: name for the generated structure
- *
- * This macro autogenerates a suitable &struct file_operations for shmem based
- * drivers, which can be assigned to &drm_driver.fops. Note that this structure
- * cannot be shared between drivers, because it contains a reference to the
- * current module using THIS_MODULE.
- *
- * Note that the declaration is already marked as static - if you need a
- * non-static version of this you're probably doing it wrong and will break the
- * THIS_MODULE reference by accident.
- */
-#define DEFINE_DRM_GEM_SHMEM_FOPS(name) \
- static const struct file_operations name = {\
- .owner = THIS_MODULE,\
- .open = drm_open,\
- .release = drm_release,\
- .unlocked_ioctl = drm_ioctl,\
- .compat_ioctl = drm_compat_ioctl,\
- .poll = drm_poll,\
- .read = drm_read,\
- .llseek = noop_llseek,\
- .mmap = drm_gem_shmem_mmap, \
- }
-
struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size);
void drm_gem_shmem_free_object(struct drm_gem_object *obj);
@@ -156,9 +130,7 @@ drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args);
-int drm_gem_shmem_mmap(struct file *filp, struct vm_area_struct *vma);
-
-extern const struct vm_operations_struct drm_gem_shmem_vm_ops;
+int drm_gem_shmem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
void drm_gem_shmem_print_info(struct drm_printer *p, unsigned int indent,
const struct drm_gem_object *obj);
diff --git a/include/drm/drm_gem_ttm_helper.h b/include/drm/drm_gem_ttm_helper.h
new file mode 100644
index 000000000000..118cef76f84f
--- /dev/null
+++ b/include/drm/drm_gem_ttm_helper.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#ifndef DRM_GEM_TTM_HELPER_H
+#define DRM_GEM_TTM_HELPER_H
+
+#include <linux/kernel.h>
+
+#include <drm/drm_gem.h>
+#include <drm/drm_device.h>
+#include <drm/ttm/ttm_bo_api.h>
+#include <drm/ttm/ttm_bo_driver.h>
+
+#define drm_gem_ttm_of_gem(gem_obj) \
+ container_of(gem_obj, struct ttm_buffer_object, base)
+
+void drm_gem_ttm_print_info(struct drm_printer *p, unsigned int indent,
+ const struct drm_gem_object *gem);
+int drm_gem_ttm_mmap(struct drm_gem_object *gem,
+ struct vm_area_struct *vma);
+
+#endif
diff --git a/include/drm/drm_gem_vram_helper.h b/include/drm/drm_gem_vram_helper.h
index ac217d768456..e040541a105f 100644
--- a/include/drm/drm_gem_vram_helper.h
+++ b/include/drm/drm_gem_vram_helper.h
@@ -3,18 +3,26 @@
#ifndef DRM_GEM_VRAM_HELPER_H
#define DRM_GEM_VRAM_HELPER_H
+#include <drm/drm_file.h>
#include <drm/drm_gem.h>
+#include <drm/drm_ioctl.h>
#include <drm/ttm/ttm_bo_api.h>
+#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
+
#include <linux/kernel.h> /* for container_of() */
struct drm_mode_create_dumb;
+struct drm_plane;
+struct drm_plane_state;
+struct drm_simple_display_pipe;
struct drm_vram_mm_funcs;
struct filp;
struct vm_area_struct;
#define DRM_GEM_VRAM_PL_FLAG_VRAM TTM_PL_FLAG_VRAM
#define DRM_GEM_VRAM_PL_FLAG_SYSTEM TTM_PL_FLAG_SYSTEM
+#define DRM_GEM_VRAM_PL_FLAG_TOPDOWN TTM_PL_FLAG_TOPDOWN
/*
* Buffer-object helpers
@@ -34,11 +42,26 @@ struct vm_area_struct;
* backed by VRAM. It can be used for simple framebuffer devices with
* dedicated memory. The buffer object can be evicted to system memory if
* video memory becomes scarce.
+ *
+ * GEM VRAM objects perform reference counting for pin and mapping
+ * operations. So a buffer object that has been pinned N times with
+ * drm_gem_vram_pin() must be unpinned N times with
+ * drm_gem_vram_unpin(). The same applies to pairs of
+ * drm_gem_vram_kmap() and drm_gem_vram_kunmap(), as well as pairs of
+ * drm_gem_vram_vmap() and drm_gem_vram_vunmap().
*/
struct drm_gem_vram_object {
struct ttm_buffer_object bo;
struct ttm_bo_kmap_obj kmap;
+ /**
+ * @kmap_use_count:
+ *
+ * Reference count on the virtual address.
+ * The address are un-mapped when the count reaches zero.
+ */
+ unsigned int kmap_use_count;
+
/* Supported placements are %TTM_PL_VRAM and %TTM_PL_SYSTEM */
struct ttm_placement placement;
struct ttm_place placements[2];
@@ -83,6 +106,8 @@ int drm_gem_vram_unpin(struct drm_gem_vram_object *gbo);
void *drm_gem_vram_kmap(struct drm_gem_vram_object *gbo, bool map,
bool *is_iomem);
void drm_gem_vram_kunmap(struct drm_gem_vram_object *gbo);
+void *drm_gem_vram_vmap(struct drm_gem_vram_object *gbo);
+void drm_gem_vram_vunmap(struct drm_gem_vram_object *gbo, void *vaddr);
int drm_gem_vram_fill_create_dumb(struct drm_file *file,
struct drm_device *dev,
@@ -92,18 +117,6 @@ int drm_gem_vram_fill_create_dumb(struct drm_file *file,
struct drm_mode_create_dumb *args);
/*
- * Helpers for struct ttm_bo_driver
- */
-
-void drm_gem_vram_bo_driver_evict_flags(struct ttm_buffer_object *bo,
- struct ttm_placement *pl);
-
-int drm_gem_vram_bo_driver_verify_access(struct ttm_buffer_object *bo,
- struct file *filp);
-
-extern const struct drm_vram_mm_funcs drm_gem_vram_mm_funcs;
-
-/*
* Helpers for struct drm_driver
*/
@@ -114,6 +127,28 @@ int drm_gem_vram_driver_dumb_mmap_offset(struct drm_file *file,
struct drm_device *dev,
uint32_t handle, uint64_t *offset);
+/*
+ * Helpers for struct drm_plane_helper_funcs
+ */
+int
+drm_gem_vram_plane_helper_prepare_fb(struct drm_plane *plane,
+ struct drm_plane_state *new_state);
+void
+drm_gem_vram_plane_helper_cleanup_fb(struct drm_plane *plane,
+ struct drm_plane_state *old_state);
+
+/*
+ * Helpers for struct drm_simple_display_pipe_funcs
+ */
+
+int drm_gem_vram_simple_display_pipe_prepare_fb(
+ struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *new_state);
+
+void drm_gem_vram_simple_display_pipe_cleanup_fb(
+ struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *old_state);
+
/**
* define DRM_GEM_VRAM_DRIVER - default callback functions for \
&struct drm_driver
@@ -122,8 +157,56 @@ int drm_gem_vram_driver_dumb_mmap_offset(struct drm_file *file,
* &struct drm_driver with default functions.
*/
#define DRM_GEM_VRAM_DRIVER \
+ .debugfs_init = drm_vram_mm_debugfs_init, \
.dumb_create = drm_gem_vram_driver_dumb_create, \
.dumb_map_offset = drm_gem_vram_driver_dumb_mmap_offset, \
.gem_prime_mmap = drm_gem_prime_mmap
+/*
+ * VRAM memory manager
+ */
+
+/**
+ * struct drm_vram_mm - An instance of VRAM MM
+ * @vram_base: Base address of the managed video memory
+ * @vram_size: Size of the managed video memory in bytes
+ * @bdev: The TTM BO device.
+ * @funcs: TTM BO functions
+ *
+ * The fields &struct drm_vram_mm.vram_base and
+ * &struct drm_vram_mm.vrm_size are managed by VRAM MM, but are
+ * available for public read access. Use the field
+ * &struct drm_vram_mm.bdev to access the TTM BO device.
+ */
+struct drm_vram_mm {
+ uint64_t vram_base;
+ size_t vram_size;
+
+ struct ttm_bo_device bdev;
+};
+
+/**
+ * drm_vram_mm_of_bdev() - \
+ Returns the container of type &struct ttm_bo_device for field bdev.
+ * @bdev: the TTM BO device
+ *
+ * Returns:
+ * The containing instance of &struct drm_vram_mm
+ */
+static inline struct drm_vram_mm *drm_vram_mm_of_bdev(
+ struct ttm_bo_device *bdev)
+{
+ return container_of(bdev, struct drm_vram_mm, bdev);
+}
+
+int drm_vram_mm_debugfs_init(struct drm_minor *minor);
+
+/*
+ * Helpers for integration with struct drm_device
+ */
+
+struct drm_vram_mm *drm_vram_helper_alloc_mm(
+ struct drm_device *dev, uint64_t vram_base, size_t vram_size);
+void drm_vram_helper_release_mm(struct drm_device *dev);
+
#endif
diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h
index 2c3bbb43c7d1..d7939c054259 100644
--- a/include/drm/drm_mm.h
+++ b/include/drm/drm_mm.h
@@ -168,8 +168,9 @@ struct drm_mm_node {
struct rb_node rb_hole_addr;
u64 __subtree_last;
u64 hole_size;
- bool allocated : 1;
- bool scanned_block : 1;
+ unsigned long flags;
+#define DRM_MM_NODE_ALLOCATED_BIT 0
+#define DRM_MM_NODE_SCANNED_BIT 1
#ifdef CONFIG_DRM_DEBUG_MM
depot_stack_handle_t stack;
#endif
@@ -253,7 +254,7 @@ struct drm_mm_scan {
*/
static inline bool drm_mm_node_allocated(const struct drm_mm_node *node)
{
- return node->allocated;
+ return test_bit(DRM_MM_NODE_ALLOCATED_BIT, &node->flags);
}
/**
diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h
index 6b18c8adfe9d..5a87f1bd7a3f 100644
--- a/include/drm/drm_modeset_helper_vtables.h
+++ b/include/drm/drm_modeset_helper_vtables.h
@@ -955,9 +955,8 @@ struct drm_connector_helper_funcs {
* @atomic_best_encoder.
*
* You can leave this function to NULL if the connector is only
- * attached to a single encoder and you are using the atomic helpers.
- * In this case, the core will call drm_atomic_helper_best_encoder()
- * for you.
+ * attached to a single encoder. In this case, the core will call
+ * drm_connector_get_single_encoder() for you.
*
* RETURNS:
*
@@ -977,7 +976,7 @@ struct drm_connector_helper_funcs {
*
* This function is used by drm_atomic_helper_check_modeset().
* If it is not implemented, the core will fallback to @best_encoder
- * (or drm_atomic_helper_best_encoder() if @best_encoder is NULL).
+ * (or drm_connector_get_single_encoder() if @best_encoder is NULL).
*
* NOTE:
*
diff --git a/include/drm/drm_modeset_lock.h b/include/drm/drm_modeset_lock.h
index 7b8841065b11..4fc9a43ac45a 100644
--- a/include/drm/drm_modeset_lock.h
+++ b/include/drm/drm_modeset_lock.h
@@ -114,6 +114,15 @@ static inline bool drm_modeset_is_locked(struct drm_modeset_lock *lock)
return ww_mutex_is_locked(&lock->mutex);
}
+/**
+ * drm_modeset_lock_assert_held - equivalent to lockdep_assert_held()
+ * @lock: lock to check
+ */
+static inline void drm_modeset_lock_assert_held(struct drm_modeset_lock *lock)
+{
+ lockdep_assert_held(&lock->mutex.base);
+}
+
int drm_modeset_lock(struct drm_modeset_lock *lock,
struct drm_modeset_acquire_ctx *ctx);
int __must_check drm_modeset_lock_single_interruptible(struct drm_modeset_lock *lock);
diff --git a/include/drm/drm_os_linux.h b/include/drm/drm_os_linux.h
deleted file mode 100644
index ee8d61b64f29..000000000000
--- a/include/drm/drm_os_linux.h
+++ /dev/null
@@ -1,55 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/**
- * \file drm_os_linux.h
- * OS abstraction macros.
- */
-
-#include <linux/interrupt.h> /* For task queue support */
-#include <linux/sched/signal.h>
-#include <linux/delay.h>
-#include <linux/io-64-nonatomic-lo-hi.h>
-
-/** Current process ID */
-#define DRM_CURRENTPID task_pid_nr(current)
-#define DRM_UDELAY(d) udelay(d)
-/** Read a byte from a MMIO region */
-#define DRM_READ8(map, offset) readb(((void __iomem *)(map)->handle) + (offset))
-/** Read a word from a MMIO region */
-#define DRM_READ16(map, offset) readw(((void __iomem *)(map)->handle) + (offset))
-/** Read a dword from a MMIO region */
-#define DRM_READ32(map, offset) readl(((void __iomem *)(map)->handle) + (offset))
-/** Write a byte into a MMIO region */
-#define DRM_WRITE8(map, offset, val) writeb(val, ((void __iomem *)(map)->handle) + (offset))
-/** Write a word into a MMIO region */
-#define DRM_WRITE16(map, offset, val) writew(val, ((void __iomem *)(map)->handle) + (offset))
-/** Write a dword into a MMIO region */
-#define DRM_WRITE32(map, offset, val) writel(val, ((void __iomem *)(map)->handle) + (offset))
-
-/** Read a qword from a MMIO region - be careful using these unless you really understand them */
-#define DRM_READ64(map, offset) readq(((void __iomem *)(map)->handle) + (offset))
-/** Write a qword into a MMIO region */
-#define DRM_WRITE64(map, offset, val) writeq(val, ((void __iomem *)(map)->handle) + (offset))
-
-#define DRM_WAIT_ON( ret, queue, timeout, condition ) \
-do { \
- DECLARE_WAITQUEUE(entry, current); \
- unsigned long end = jiffies + (timeout); \
- add_wait_queue(&(queue), &entry); \
- \
- for (;;) { \
- __set_current_state(TASK_INTERRUPTIBLE); \
- if (condition) \
- break; \
- if (time_after_eq(jiffies, end)) { \
- ret = -EBUSY; \
- break; \
- } \
- schedule_timeout((HZ/100 > 1) ? HZ/100 : 1); \
- if (signal_pending(current)) { \
- ret = -EINTR; \
- break; \
- } \
- } \
- __set_current_state(TASK_RUNNING); \
- remove_wait_queue(&(queue), &entry); \
-} while (0)
diff --git a/include/drm/drm_panel.h b/include/drm/drm_panel.h
index 624bd15ecfab..ce8da64022b4 100644
--- a/include/drm/drm_panel.h
+++ b/include/drm/drm_panel.h
@@ -140,6 +140,15 @@ struct drm_panel {
const struct drm_panel_funcs *funcs;
/**
+ * @connector_type:
+ *
+ * Type of the panel as a DRM_MODE_CONNECTOR_* value. This is used to
+ * initialise the drm_connector corresponding to the panel with the
+ * correct connector type.
+ */
+ int connector_type;
+
+ /**
* @list:
*
* Panel entry in registry.
@@ -147,7 +156,9 @@ struct drm_panel {
struct list_head list;
};
-void drm_panel_init(struct drm_panel *panel);
+void drm_panel_init(struct drm_panel *panel, struct device *dev,
+ const struct drm_panel_funcs *funcs,
+ int connector_type);
int drm_panel_add(struct drm_panel *panel);
void drm_panel_remove(struct drm_panel *panel);
diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h
index cd5903ad33f7..3f396d94afe4 100644
--- a/include/drm/drm_plane.h
+++ b/include/drm/drm_plane.h
@@ -140,10 +140,11 @@ struct drm_plane_state {
* @zpos:
* Priority of the given plane on crtc (optional).
*
- * Note that multiple active planes on the same crtc can have an
- * identical zpos value. The rule to solving the conflict is to compare
- * the plane object IDs; the plane with a higher ID must be stacked on
- * top of a plane with a lower ID.
+ * User-space may set mutable zpos properties so that multiple active
+ * planes on the same CRTC have identical zpos values. This is a
+ * user-space bug, but drivers can solve the conflict by comparing the
+ * plane object IDs; the plane with a higher ID is stacked on top of a
+ * plane with a lower ID.
*
* See drm_plane_create_zpos_property() and
* drm_plane_create_zpos_immutable_property() for more details.
@@ -183,8 +184,26 @@ struct drm_plane_state {
*/
struct drm_property_blob *fb_damage_clips;
- /** @src: clipped source coordinates of the plane (in 16.16) */
- /** @dst: clipped destination coordinates of the plane */
+ /**
+ * @src:
+ *
+ * source coordinates of the plane (in 16.16).
+ *
+ * When using drm_atomic_helper_check_plane_state(),
+ * the coordinates are clipped, but the driver may choose
+ * to use unclipped coordinates instead when the hardware
+ * performs the clipping automatically.
+ */
+ /**
+ * @dst:
+ *
+ * clipped destination coordinates of the plane.
+ *
+ * When using drm_atomic_helper_check_plane_state(),
+ * the coordinates are clipped, but the driver may choose
+ * to use unclipped coordinates instead when the hardware
+ * performs the clipping automatically.
+ */
struct drm_rect src, dst;
/**
diff --git a/include/drm/drm_prime.h b/include/drm/drm_prime.h
index d89311b822d5..9af7422b44cf 100644
--- a/include/drm/drm_prime.h
+++ b/include/drm/drm_prime.h
@@ -61,8 +61,6 @@ struct drm_device;
struct drm_gem_object;
struct drm_file;
-struct device;
-
/* core prime functions */
struct dma_buf *drm_gem_dmabuf_export(struct drm_device *dev,
struct dma_buf_export_info *exp_info);
diff --git a/include/drm/drm_print.h b/include/drm/drm_print.h
index a5d6f2f3e430..5b8049992c24 100644
--- a/include/drm/drm_print.h
+++ b/include/drm/drm_print.h
@@ -34,6 +34,8 @@
#include <drm/drm.h>
+extern unsigned int drm_debug;
+
/**
* DOC: print
*
@@ -83,11 +85,14 @@ void __drm_printfn_seq_file(struct drm_printer *p, struct va_format *vaf);
void __drm_puts_seq_file(struct drm_printer *p, const char *str);
void __drm_printfn_info(struct drm_printer *p, struct va_format *vaf);
void __drm_printfn_debug(struct drm_printer *p, struct va_format *vaf);
+void __drm_printfn_err(struct drm_printer *p, struct va_format *vaf);
__printf(2, 3)
void drm_printf(struct drm_printer *p, const char *f, ...);
void drm_puts(struct drm_printer *p, const char *str);
void drm_print_regset32(struct drm_printer *p, struct debugfs_regset32 *regset);
+void drm_print_bits(struct drm_printer *p, unsigned long value,
+ const char * const bits[], unsigned int nbits);
__printf(2, 0)
/**
@@ -227,6 +232,22 @@ static inline struct drm_printer drm_debug_printer(const char *prefix)
return p;
}
+/**
+ * drm_err_printer - construct a &drm_printer that outputs to pr_err()
+ * @prefix: debug output prefix
+ *
+ * RETURNS:
+ * The &drm_printer object
+ */
+static inline struct drm_printer drm_err_printer(const char *prefix)
+{
+ struct drm_printer p = {
+ .printfn = __drm_printfn_err,
+ .prefix = prefix
+ };
+ return p;
+}
+
/*
* The following categories are defined:
*
@@ -272,6 +293,11 @@ static inline struct drm_printer drm_debug_printer(const char *prefix)
#define DRM_UT_LEASE 0x80
#define DRM_UT_DP 0x100
+static inline bool drm_debug_enabled(unsigned int category)
+{
+ return unlikely(drm_debug & category);
+}
+
__printf(3, 4)
void drm_dev_printk(const struct device *dev, const char *level,
const char *format, ...);
diff --git a/include/drm/drm_rect.h b/include/drm/drm_rect.h
index 6195820aa5c5..cd0106135b6a 100644
--- a/include/drm/drm_rect.h
+++ b/include/drm/drm_rect.h
@@ -70,6 +70,23 @@ struct drm_rect {
(r)->y1 >> 16, (((r)->y1 & 0xffff) * 15625) >> 10
/**
+ * drm_rect_init - initialize the rectangle from x/y/w/h
+ * @r: rectangle
+ * @x: x coordinate
+ * @y: y coordinate
+ * @width: width
+ * @height: height
+ */
+static inline void drm_rect_init(struct drm_rect *r, int x, int y,
+ int width, int height)
+{
+ r->x1 = x;
+ r->y1 = y;
+ r->x2 = x + width;
+ r->y2 = y + height;
+}
+
+/**
* drm_rect_adjust_size - adjust the size of the rectangle
* @r: rectangle to be adjusted
* @dw: horizontal adjustment
@@ -107,6 +124,20 @@ static inline void drm_rect_translate(struct drm_rect *r, int dx, int dy)
}
/**
+ * drm_rect_translate_to - translate the rectangle to an absolute position
+ * @r: rectangle to be tranlated
+ * @x: horizontal position
+ * @y: vertical position
+ *
+ * Move rectangle @r to @x in the horizontal direction,
+ * and to @y in the vertical direction.
+ */
+static inline void drm_rect_translate_to(struct drm_rect *r, int x, int y)
+{
+ drm_rect_translate(r, x - r->x1, y - r->y1);
+}
+
+/**
* drm_rect_downscale - downscale a rectangle
* @r: rectangle to be downscaled
* @horz: horizontal downscale factor
diff --git a/include/drm/drm_simple_kms_helper.h b/include/drm/drm_simple_kms_helper.h
index 4d89cd0a60db..15afee9cf049 100644
--- a/include/drm/drm_simple_kms_helper.h
+++ b/include/drm/drm_simple_kms_helper.h
@@ -49,7 +49,7 @@ struct drm_simple_display_pipe_funcs {
*
* drm_mode_status Enum
*/
- enum drm_mode_status (*mode_valid)(struct drm_crtc *crtc,
+ enum drm_mode_status (*mode_valid)(struct drm_simple_display_pipe *pipe,
const struct drm_display_mode *mode);
/**
diff --git a/include/drm/drm_vblank.h b/include/drm/drm_vblank.h
index 9fe4ba8bc622..c16c44052b3d 100644
--- a/include/drm/drm_vblank.h
+++ b/include/drm/drm_vblank.h
@@ -109,9 +109,20 @@ struct drm_vblank_crtc {
seqlock_t seqlock;
/**
- * @count: Current software vblank counter.
+ * @count:
+ *
+ * Current software vblank counter.
+ *
+ * Note that for a given vblank counter value drm_crtc_handle_vblank()
+ * and drm_crtc_vblank_count() or drm_crtc_vblank_count_and_time()
+ * provide a barrier: Any writes done before calling
+ * drm_crtc_handle_vblank() will be visible to callers of the later
+ * functions, iff the vblank count is the same or a later one.
+ *
+ * IMPORTANT: This guarantee requires barriers, therefor never access
+ * this field directly. Use drm_crtc_vblank_count() instead.
*/
- u64 count;
+ atomic64_t count;
/**
* @time: Vblank timestamp corresponding to @count.
*/
diff --git a/include/drm/drm_vram_mm_helper.h b/include/drm/drm_vram_mm_helper.h
deleted file mode 100644
index 2aacfb1ccfae..000000000000
--- a/include/drm/drm_vram_mm_helper.h
+++ /dev/null
@@ -1,104 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-
-#ifndef DRM_VRAM_MM_HELPER_H
-#define DRM_VRAM_MM_HELPER_H
-
-#include <drm/drm_file.h>
-#include <drm/drm_ioctl.h>
-#include <drm/ttm/ttm_bo_driver.h>
-
-struct drm_device;
-
-/**
- * struct drm_vram_mm_funcs - Callback functions for &struct drm_vram_mm
- * @evict_flags: Provides an implementation for struct \
- &ttm_bo_driver.evict_flags
- * @verify_access: Provides an implementation for \
- struct &ttm_bo_driver.verify_access
- *
- * These callback function integrate VRAM MM with TTM buffer objects. New
- * functions can be added if necessary.
- */
-struct drm_vram_mm_funcs {
- void (*evict_flags)(struct ttm_buffer_object *bo,
- struct ttm_placement *placement);
- int (*verify_access)(struct ttm_buffer_object *bo, struct file *filp);
-};
-
-/**
- * struct drm_vram_mm - An instance of VRAM MM
- * @vram_base: Base address of the managed video memory
- * @vram_size: Size of the managed video memory in bytes
- * @bdev: The TTM BO device.
- * @funcs: TTM BO functions
- *
- * The fields &struct drm_vram_mm.vram_base and
- * &struct drm_vram_mm.vrm_size are managed by VRAM MM, but are
- * available for public read access. Use the field
- * &struct drm_vram_mm.bdev to access the TTM BO device.
- */
-struct drm_vram_mm {
- uint64_t vram_base;
- size_t vram_size;
-
- struct ttm_bo_device bdev;
-
- const struct drm_vram_mm_funcs *funcs;
-};
-
-/**
- * drm_vram_mm_of_bdev() - \
- Returns the container of type &struct ttm_bo_device for field bdev.
- * @bdev: the TTM BO device
- *
- * Returns:
- * The containing instance of &struct drm_vram_mm
- */
-static inline struct drm_vram_mm *drm_vram_mm_of_bdev(
- struct ttm_bo_device *bdev)
-{
- return container_of(bdev, struct drm_vram_mm, bdev);
-}
-
-int drm_vram_mm_init(struct drm_vram_mm *vmm, struct drm_device *dev,
- uint64_t vram_base, size_t vram_size,
- const struct drm_vram_mm_funcs *funcs);
-void drm_vram_mm_cleanup(struct drm_vram_mm *vmm);
-
-int drm_vram_mm_mmap(struct file *filp, struct vm_area_struct *vma,
- struct drm_vram_mm *vmm);
-
-/*
- * Helpers for integration with struct drm_device
- */
-
-struct drm_vram_mm *drm_vram_helper_alloc_mm(
- struct drm_device *dev, uint64_t vram_base, size_t vram_size,
- const struct drm_vram_mm_funcs *funcs);
-void drm_vram_helper_release_mm(struct drm_device *dev);
-
-/*
- * Helpers for &struct file_operations
- */
-
-int drm_vram_mm_file_operations_mmap(
- struct file *filp, struct vm_area_struct *vma);
-
-/**
- * define DRM_VRAM_MM_FILE_OPERATIONS - default callback functions for \
- &struct file_operations
- *
- * Drivers that use VRAM MM can use this macro to initialize
- * &struct file_operations with default functions.
- */
-#define DRM_VRAM_MM_FILE_OPERATIONS \
- .llseek = no_llseek, \
- .read = drm_read, \
- .poll = drm_poll, \
- .unlocked_ioctl = drm_ioctl, \
- .compat_ioctl = drm_compat_ioctl, \
- .mmap = drm_vram_mm_file_operations_mmap, \
- .open = drm_open, \
- .release = drm_release \
-
-#endif
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index 57b4121c750a..684692a8ed76 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -26,6 +26,7 @@
#include <drm/spsc_queue.h>
#include <linux/dma-fence.h>
+#include <linux/completion.h>
#define MAX_WAIT_SCHED_ENTITY_Q_EMPTY msecs_to_jiffies(1000)
@@ -71,6 +72,7 @@ enum drm_sched_priority {
* @last_scheduled: points to the finished fence of the last scheduled job.
* @last_user: last group leader pushing a job into the entity.
* @stopped: Marks the enity as removed from rq and destined for termination.
+ * @entity_idle: Signals when enityt is not in use
*
* Entities will emit jobs in order to their corresponding hardware
* ring, and the scheduler will alternate between entities based on
@@ -94,6 +96,7 @@ struct drm_sched_entity {
struct dma_fence *last_scheduled;
struct task_struct *last_user;
bool stopped;
+ struct completion entity_idle;
};
/**
diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h
index 23274cf92712..6722005884db 100644
--- a/include/drm/i915_drm.h
+++ b/include/drm/i915_drm.h
@@ -100,22 +100,4 @@ extern struct resource intel_graphics_stolen_res;
#define INTEL_GEN11_BSM_DW1 0xc4
#define INTEL_BSM_MASK (-(1u << 20))
-enum port {
- PORT_NONE = -1,
-
- PORT_A = 0,
- PORT_B,
- PORT_C,
- PORT_D,
- PORT_E,
- PORT_F,
- PORT_G,
- PORT_H,
- PORT_I,
-
- I915_MAX_PORTS
-};
-
-#define port_name(p) ((p) + 'A')
-
#endif /* _I915_DRM_H_ */
diff --git a/include/drm/i915_mei_hdcp_interface.h b/include/drm/i915_mei_hdcp_interface.h
index 8c344255146a..4d48de8890ca 100644
--- a/include/drm/i915_mei_hdcp_interface.h
+++ b/include/drm/i915_mei_hdcp_interface.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: (GPL-2.0+) */
/*
- * Copyright © 2017-2018 Intel Corporation
+ * Copyright © 2017-2019 Intel Corporation
*
* Authors:
* Ramalingam C <ramalingam.c@intel.com>
@@ -42,9 +42,44 @@ enum hdcp_wired_protocol {
HDCP_PROTOCOL_DP
};
+enum mei_fw_ddi {
+ MEI_DDI_INVALID_PORT = 0x0,
+
+ MEI_DDI_B = 1,
+ MEI_DDI_C,
+ MEI_DDI_D,
+ MEI_DDI_E,
+ MEI_DDI_F,
+ MEI_DDI_A = 7,
+ MEI_DDI_RANGE_END = MEI_DDI_A,
+};
+
+/**
+ * enum mei_fw_tc - ME Firmware defined index for transcoders
+ * @MEI_INVALID_TRANSCODER: Index for Invalid transcoder
+ * @MEI_TRANSCODER_EDP: Index for EDP Transcoder
+ * @MEI_TRANSCODER_DSI0: Index for DSI0 Transcoder
+ * @MEI_TRANSCODER_DSI1: Index for DSI1 Transcoder
+ * @MEI_TRANSCODER_A: Index for Transcoder A
+ * @MEI_TRANSCODER_B: Index for Transcoder B
+ * @MEI_TRANSCODER_C: Index for Transcoder C
+ * @MEI_TRANSCODER_D: Index for Transcoder D
+ */
+enum mei_fw_tc {
+ MEI_INVALID_TRANSCODER = 0x00,
+ MEI_TRANSCODER_EDP,
+ MEI_TRANSCODER_DSI0,
+ MEI_TRANSCODER_DSI1,
+ MEI_TRANSCODER_A = 0x10,
+ MEI_TRANSCODER_B,
+ MEI_TRANSCODER_C,
+ MEI_TRANSCODER_D
+};
+
/**
* struct hdcp_port_data - intel specific HDCP port data
- * @port: port index as per I915
+ * @fw_ddi: ddi index as per ME FW
+ * @fw_tc: transcoder index as per ME FW
* @port_type: HDCP port type as per ME FW classification
* @protocol: HDCP adaptation as per ME FW
* @k: No of streams transmitted on a port. Only on DP MST this is != 1
@@ -56,7 +91,8 @@ enum hdcp_wired_protocol {
* streams
*/
struct hdcp_port_data {
- enum port port;
+ enum mei_fw_ddi fw_ddi;
+ enum mei_fw_tc fw_tc;
u8 port_type;
u8 protocol;
u16 k;
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index 43c4929a2171..65e399d280f7 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -147,7 +147,6 @@ struct ttm_tt;
* holds a pointer to a persistent shmem object.
* @ttm: TTM structure holding system pages.
* @evicted: Whether the object was evicted without user-space knowing.
- * @cpu_writes: For synchronization. Number of cpu writers.
* @lru: List head for the lru list.
* @ddestroy: List head for the delayed destroy list.
* @swap: List head for swap LRU list.
@@ -199,12 +198,6 @@ struct ttm_buffer_object {
bool evicted;
/**
- * Members protected by the bo::reserved lock only when written to.
- */
-
- atomic_t cpu_writers;
-
- /**
* Members protected by the bdev::lru_lock.
*/
@@ -368,30 +361,6 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
void ttm_bo_put(struct ttm_buffer_object *bo);
/**
- * ttm_bo_add_to_lru
- *
- * @bo: The buffer object.
- *
- * Add this bo to the relevant mem type lru and, if it's backed by
- * system pages (ttms) to the swap list.
- * This function must be called with struct ttm_bo_global::lru_lock held, and
- * is typically called immediately prior to unreserving a bo.
- */
-void ttm_bo_add_to_lru(struct ttm_buffer_object *bo);
-
-/**
- * ttm_bo_del_from_lru
- *
- * @bo: The buffer object.
- *
- * Remove this bo from all lru lists used to lookup and reserve an object.
- * This function must be called with struct ttm_bo_global::lru_lock held,
- * and is usually called just immediately after the bo has been reserved to
- * avoid recursive reservation from lru lists.
- */
-void ttm_bo_del_from_lru(struct ttm_buffer_object *bo);
-
-/**
* ttm_bo_move_to_lru_tail
*
* @bo: The buffer object.
@@ -442,31 +411,6 @@ bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
const struct ttm_place *place);
/**
- * ttm_bo_synccpu_write_grab
- *
- * @bo: The buffer object:
- * @no_wait: Return immediately if buffer is busy.
- *
- * Synchronizes a buffer object for CPU RW access. This means
- * command submission that affects the buffer will return -EBUSY
- * until ttm_bo_synccpu_write_release is called.
- *
- * Returns
- * -EBUSY if the buffer is busy and no_wait is true.
- * -ERESTARTSYS if interrupted by a signal.
- */
-int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait);
-
-/**
- * ttm_bo_synccpu_write_release:
- *
- * @bo : The buffer object.
- *
- * Releases a synccpu lock.
- */
-void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo);
-
-/**
* ttm_bo_acc_size
*
* @bdev: Pointer to a ttm_bo_device struct.
@@ -710,16 +654,14 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo, unsigned long start_page,
void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map);
/**
- * ttm_fbdev_mmap - mmap fbdev memory backed by a ttm buffer object.
+ * ttm_bo_mmap_obj - mmap memory backed by a ttm buffer object.
*
* @vma: vma as input from the fbdev mmap method.
- * @bo: The bo backing the address space. The address space will
- * have the same size as the bo, and start at offset 0.
+ * @bo: The bo backing the address space.
*
- * This function is intended to be called by the fbdev mmap method
- * if the fbdev address space is to be backed by a bo.
+ * Maps a buffer object.
*/
-int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo);
+int ttm_bo_mmap_obj(struct vm_area_struct *vma, struct ttm_buffer_object *bo);
/**
* ttm_bo_mmap - mmap out of the ttm device address space.
@@ -785,4 +727,18 @@ static inline bool ttm_bo_uses_embedded_gem_object(struct ttm_buffer_object *bo)
{
return bo->base.dev != NULL;
}
+
+/* Default number of pre-faulted pages in the TTM fault handler */
+#define TTM_BO_VM_NUM_PREFAULT 16
+
+vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo,
+ struct vm_fault *vmf);
+
+vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
+ pgprot_t prot,
+ pgoff_t num_prefault);
+
+void ttm_bo_vm_open(struct vm_area_struct *vma);
+
+void ttm_bo_vm_close(struct vm_area_struct *vma);
#endif
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index 6f536caea368..cac7a8a0825a 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -423,7 +423,6 @@ extern struct ttm_bo_global {
*/
struct kobject kobj;
- struct ttm_mem_global *mem_glob;
struct page *dummy_read_page;
spinlock_t lru_lock;
@@ -451,7 +450,7 @@ extern struct ttm_bo_global {
*
* @driver: Pointer to a struct ttm_bo_driver struct setup by the driver.
* @man: An array of mem_type_managers.
- * @vma_manager: Address space manager
+ * @vma_manager: Address space manager (pointer)
* lru_lock: Spinlock that protects the buffer+device lru lists and
* ddestroy lists.
* @dev_mapping: A pointer to the struct address_space representing the
@@ -467,14 +466,13 @@ struct ttm_bo_device {
* Constant after bo device init / atomic.
*/
struct list_head device_list;
- struct ttm_bo_global *glob;
struct ttm_bo_driver *driver;
struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES];
/*
* Protected by internal locks.
*/
- struct drm_vma_offset_manager vma_manager;
+ struct drm_vma_offset_manager *vma_manager;
/*
* Protected by the global:lru lock.
@@ -595,6 +593,7 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev);
* @glob: A pointer to an initialized struct ttm_bo_global.
* @driver: A pointer to a struct ttm_bo_driver set up by the caller.
* @mapping: The address space to use for this bo.
+ * @vma_manager: A pointer to a vma manager.
* @file_page_offset: Offset into the device address space that is available
* for buffer data. This ensures compatibility with other users of the
* address space.
@@ -606,6 +605,7 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev);
int ttm_bo_device_init(struct ttm_bo_device *bdev,
struct ttm_bo_driver *driver,
struct address_space *mapping,
+ struct drm_vma_offset_manager *vma_manager,
bool need_dma32);
/**
@@ -629,9 +629,6 @@ void ttm_mem_io_free_vm(struct ttm_buffer_object *bo);
int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible);
void ttm_mem_io_unlock(struct ttm_mem_type_manager *man);
-void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo);
-void ttm_bo_add_to_lru(struct ttm_buffer_object *bo);
-
/**
* __ttm_bo_reserve:
*
@@ -725,15 +722,9 @@ static inline int ttm_bo_reserve(struct ttm_buffer_object *bo,
bool interruptible, bool no_wait,
struct ww_acquire_ctx *ticket)
{
- int ret;
-
WARN_ON(!kref_read(&bo->kref));
- ret = __ttm_bo_reserve(bo, interruptible, no_wait, ticket);
- if (likely(ret == 0))
- ttm_bo_del_sub_from_lru(bo);
-
- return ret;
+ return __ttm_bo_reserve(bo, interruptible, no_wait, ticket);
}
/**
@@ -760,9 +751,7 @@ static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
else
dma_resv_lock_slow(bo->base.resv, ticket);
- if (likely(ret == 0))
- ttm_bo_del_sub_from_lru(bo);
- else if (ret == -EINTR)
+ if (ret == -EINTR)
ret = -ERESTARTSYS;
return ret;
@@ -777,12 +766,9 @@ static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
*/
static inline void ttm_bo_unreserve(struct ttm_buffer_object *bo)
{
- spin_lock(&bo->bdev->glob->lru_lock);
- if (list_empty(&bo->lru))
- ttm_bo_add_to_lru(bo);
- else
- ttm_bo_move_to_lru_tail(bo, NULL);
- spin_unlock(&bo->bdev->glob->lru_lock);
+ spin_lock(&ttm_bo_glob.lru_lock);
+ ttm_bo_move_to_lru_tail(bo, NULL);
+ spin_unlock(&ttm_bo_glob.lru_lock);
dma_resv_unlock(bo->base.resv);
}
diff --git a/include/drm/ttm/ttm_execbuf_util.h b/include/drm/ttm/ttm_execbuf_util.h
index 7e46cc678e7e..5a19843bb80d 100644
--- a/include/drm/ttm/ttm_execbuf_util.h
+++ b/include/drm/ttm/ttm_execbuf_util.h
@@ -99,7 +99,7 @@ extern void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
extern int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
struct list_head *list, bool intr,
- struct list_head *dups, bool del_lru);
+ struct list_head *dups);
/**
* function ttm_eu_fence_buffer_objects.
diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
index 3ff48a0a2d7b..c78ea99c42cf 100644
--- a/include/drm/ttm/ttm_memory.h
+++ b/include/drm/ttm/ttm_memory.h
@@ -65,7 +65,6 @@
struct ttm_mem_zone;
extern struct ttm_mem_global {
struct kobject kobj;
- struct ttm_bo_global *bo_glob;
struct workqueue_struct *swap_queue;
struct work_struct work;
spinlock_t lock;
diff --git a/include/drm/ttm/ttm_page_alloc.h b/include/drm/ttm/ttm_page_alloc.h
index 4d9b019d253c..a6b6ef5f9bf4 100644
--- a/include/drm/ttm/ttm_page_alloc.h
+++ b/include/drm/ttm/ttm_page_alloc.h
@@ -74,7 +74,7 @@ void ttm_unmap_and_unpopulate_pages(struct device *dev, struct ttm_dma_tt *tt);
*/
int ttm_page_alloc_debugfs(struct seq_file *m, void *data);
-#if defined(CONFIG_SWIOTLB) || defined(CONFIG_INTEL_IOMMU)
+#if defined(CONFIG_DRM_TTM_DMA_PAGE_POOL)
/**
* Initialize pool allocator.
*/
diff --git a/include/dt-bindings/clock/aspeed-clock.h b/include/dt-bindings/clock/aspeed-clock.h
index f43738607d77..9ff4f6e4558c 100644
--- a/include/dt-bindings/clock/aspeed-clock.h
+++ b/include/dt-bindings/clock/aspeed-clock.h
@@ -39,6 +39,8 @@
#define ASPEED_CLK_BCLK 33
#define ASPEED_CLK_MPLL 34
#define ASPEED_CLK_24M 35
+#define ASPEED_CLK_MAC1RCLK 36
+#define ASPEED_CLK_MAC2RCLK 37
#define ASPEED_RESET_XDMA 0
#define ASPEED_RESET_MCTP 1
diff --git a/include/dt-bindings/clock/ast2600-clock.h b/include/dt-bindings/clock/ast2600-clock.h
index 38074a5f7296..62b9520a00fd 100644
--- a/include/dt-bindings/clock/ast2600-clock.h
+++ b/include/dt-bindings/clock/ast2600-clock.h
@@ -83,6 +83,10 @@
#define ASPEED_CLK_MAC12 64
#define ASPEED_CLK_MAC34 65
#define ASPEED_CLK_USBPHY_40M 66
+#define ASPEED_CLK_MAC1RCLK 67
+#define ASPEED_CLK_MAC2RCLK 68
+#define ASPEED_CLK_MAC3RCLK 69
+#define ASPEED_CLK_MAC4RCLK 70
/* Only list resets here that are not part of a gate */
#define ASPEED_RESET_ADC 55
diff --git a/include/dt-bindings/clock/axg-audio-clkc.h b/include/dt-bindings/clock/axg-audio-clkc.h
index 75901c636893..f561f5c5ef8f 100644
--- a/include/dt-bindings/clock/axg-audio-clkc.h
+++ b/include/dt-bindings/clock/axg-audio-clkc.h
@@ -80,5 +80,15 @@
#define AUD_CLKID_TDM_SCLK_PAD0 160
#define AUD_CLKID_TDM_SCLK_PAD1 161
#define AUD_CLKID_TDM_SCLK_PAD2 162
+#define AUD_CLKID_TOP 163
+#define AUD_CLKID_TORAM 164
+#define AUD_CLKID_EQDRC 165
+#define AUD_CLKID_RESAMPLE_B 166
+#define AUD_CLKID_TOVAD 167
+#define AUD_CLKID_LOCKER 168
+#define AUD_CLKID_SPDIFIN_LB 169
+#define AUD_CLKID_FRDDR_D 170
+#define AUD_CLKID_TODDR_D 171
+#define AUD_CLKID_LOOPBACK_B 172
#endif /* __AXG_AUDIO_CLKC_BINDINGS_H */
diff --git a/include/dt-bindings/clock/bm1880-clock.h b/include/dt-bindings/clock/bm1880-clock.h
new file mode 100644
index 000000000000..b46732361b25
--- /dev/null
+++ b/include/dt-bindings/clock/bm1880-clock.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Device Tree binding constants for Bitmain BM1880 SoC
+ *
+ * Copyright (c) 2019 Linaro Ltd.
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_BM1880_H
+#define __DT_BINDINGS_CLOCK_BM1880_H
+
+#define BM1880_CLK_OSC 0
+#define BM1880_CLK_MPLL 1
+#define BM1880_CLK_SPLL 2
+#define BM1880_CLK_FPLL 3
+#define BM1880_CLK_DDRPLL 4
+#define BM1880_CLK_A53 5
+#define BM1880_CLK_50M_A53 6
+#define BM1880_CLK_AHB_ROM 7
+#define BM1880_CLK_AXI_SRAM 8
+#define BM1880_CLK_DDR_AXI 9
+#define BM1880_CLK_EFUSE 10
+#define BM1880_CLK_APB_EFUSE 11
+#define BM1880_CLK_AXI5_EMMC 12
+#define BM1880_CLK_EMMC 13
+#define BM1880_CLK_100K_EMMC 14
+#define BM1880_CLK_AXI5_SD 15
+#define BM1880_CLK_SD 16
+#define BM1880_CLK_100K_SD 17
+#define BM1880_CLK_500M_ETH0 18
+#define BM1880_CLK_AXI4_ETH0 19
+#define BM1880_CLK_500M_ETH1 20
+#define BM1880_CLK_AXI4_ETH1 21
+#define BM1880_CLK_AXI1_GDMA 22
+#define BM1880_CLK_APB_GPIO 23
+#define BM1880_CLK_APB_GPIO_INTR 24
+#define BM1880_CLK_GPIO_DB 25
+#define BM1880_CLK_AXI1_MINER 26
+#define BM1880_CLK_AHB_SF 27
+#define BM1880_CLK_SDMA_AXI 28
+#define BM1880_CLK_SDMA_AUD 29
+#define BM1880_CLK_APB_I2C 30
+#define BM1880_CLK_APB_WDT 31
+#define BM1880_CLK_APB_JPEG 32
+#define BM1880_CLK_JPEG_AXI 33
+#define BM1880_CLK_AXI5_NF 34
+#define BM1880_CLK_APB_NF 35
+#define BM1880_CLK_NF 36
+#define BM1880_CLK_APB_PWM 37
+#define BM1880_CLK_DIV_0_RV 38
+#define BM1880_CLK_DIV_1_RV 39
+#define BM1880_CLK_MUX_RV 40
+#define BM1880_CLK_RV 41
+#define BM1880_CLK_APB_SPI 42
+#define BM1880_CLK_TPU_AXI 43
+#define BM1880_CLK_DIV_UART_500M 44
+#define BM1880_CLK_UART_500M 45
+#define BM1880_CLK_APB_UART 46
+#define BM1880_CLK_APB_I2S 47
+#define BM1880_CLK_AXI4_USB 48
+#define BM1880_CLK_APB_USB 49
+#define BM1880_CLK_125M_USB 50
+#define BM1880_CLK_33K_USB 51
+#define BM1880_CLK_DIV_12M_USB 52
+#define BM1880_CLK_12M_USB 53
+#define BM1880_CLK_APB_VIDEO 54
+#define BM1880_CLK_VIDEO_AXI 55
+#define BM1880_CLK_VPP_AXI 56
+#define BM1880_CLK_APB_VPP 57
+#define BM1880_CLK_DIV_0_AXI1 58
+#define BM1880_CLK_DIV_1_AXI1 59
+#define BM1880_CLK_AXI1 60
+#define BM1880_CLK_AXI2 61
+#define BM1880_CLK_AXI3 62
+#define BM1880_CLK_AXI4 63
+#define BM1880_CLK_AXI5 64
+#define BM1880_CLK_DIV_0_AXI6 65
+#define BM1880_CLK_DIV_1_AXI6 66
+#define BM1880_CLK_MUX_AXI6 67
+#define BM1880_CLK_AXI6 68
+#define BM1880_NR_CLKS 69
+
+#endif /* __DT_BINDINGS_CLOCK_BM1880_H */
diff --git a/include/dt-bindings/clock/imx7ulp-clock.h b/include/dt-bindings/clock/imx7ulp-clock.h
index 6f66f9005c81..38145bdcd975 100644
--- a/include/dt-bindings/clock/imx7ulp-clock.h
+++ b/include/dt-bindings/clock/imx7ulp-clock.h
@@ -49,6 +49,7 @@
#define IMX7ULP_CLK_NIC1_DIV 36
#define IMX7ULP_CLK_NIC1_BUS_DIV 37
#define IMX7ULP_CLK_NIC1_EXT_DIV 38
+/* IMX7ULP_CLK_MIPI_PLL is unsupported and shouldn't be used in DT */
#define IMX7ULP_CLK_MIPI_PLL 39
#define IMX7ULP_CLK_SIRC 40
#define IMX7ULP_CLK_SOSC_BUS_CLK 41
diff --git a/include/dt-bindings/clock/imx8mm-clock.h b/include/dt-bindings/clock/imx8mm-clock.h
index 07e6c686f3ef..edeece2289f0 100644
--- a/include/dt-bindings/clock/imx8mm-clock.h
+++ b/include/dt-bindings/clock/imx8mm-clock.h
@@ -248,6 +248,23 @@
#define IMX8MM_CLK_SNVS_ROOT 228
#define IMX8MM_CLK_GIC 229
-#define IMX8MM_CLK_END 230
+#define IMX8MM_SYS_PLL1_40M_CG 230
+#define IMX8MM_SYS_PLL1_80M_CG 231
+#define IMX8MM_SYS_PLL1_100M_CG 232
+#define IMX8MM_SYS_PLL1_133M_CG 233
+#define IMX8MM_SYS_PLL1_160M_CG 234
+#define IMX8MM_SYS_PLL1_200M_CG 235
+#define IMX8MM_SYS_PLL1_266M_CG 236
+#define IMX8MM_SYS_PLL1_400M_CG 237
+#define IMX8MM_SYS_PLL2_50M_CG 238
+#define IMX8MM_SYS_PLL2_100M_CG 239
+#define IMX8MM_SYS_PLL2_125M_CG 240
+#define IMX8MM_SYS_PLL2_166M_CG 241
+#define IMX8MM_SYS_PLL2_200M_CG 242
+#define IMX8MM_SYS_PLL2_250M_CG 243
+#define IMX8MM_SYS_PLL2_333M_CG 244
+#define IMX8MM_SYS_PLL2_500M_CG 245
+
+#define IMX8MM_CLK_END 246
#endif
diff --git a/include/dt-bindings/clock/imx8mn-clock.h b/include/dt-bindings/clock/imx8mn-clock.h
index d7b201652f4c..0f2b8423ce1d 100644
--- a/include/dt-bindings/clock/imx8mn-clock.h
+++ b/include/dt-bindings/clock/imx8mn-clock.h
@@ -211,6 +211,23 @@
#define IMX8MN_CLK_GPU_CORE_ROOT 193
#define IMX8MN_CLK_GIC 194
-#define IMX8MN_CLK_END 195
+#define IMX8MN_SYS_PLL1_40M_CG 195
+#define IMX8MN_SYS_PLL1_80M_CG 196
+#define IMX8MN_SYS_PLL1_100M_CG 197
+#define IMX8MN_SYS_PLL1_133M_CG 198
+#define IMX8MN_SYS_PLL1_160M_CG 199
+#define IMX8MN_SYS_PLL1_200M_CG 200
+#define IMX8MN_SYS_PLL1_266M_CG 201
+#define IMX8MN_SYS_PLL1_400M_CG 202
+#define IMX8MN_SYS_PLL2_50M_CG 203
+#define IMX8MN_SYS_PLL2_100M_CG 204
+#define IMX8MN_SYS_PLL2_125M_CG 205
+#define IMX8MN_SYS_PLL2_166M_CG 206
+#define IMX8MN_SYS_PLL2_200M_CG 207
+#define IMX8MN_SYS_PLL2_250M_CG 208
+#define IMX8MN_SYS_PLL2_333M_CG 209
+#define IMX8MN_SYS_PLL2_500M_CG 210
+
+#define IMX8MN_CLK_END 211
#endif
diff --git a/include/dt-bindings/clock/imx8mq-clock.h b/include/dt-bindings/clock/imx8mq-clock.h
index 65463673d25e..3bab9b21c8d7 100644
--- a/include/dt-bindings/clock/imx8mq-clock.h
+++ b/include/dt-bindings/clock/imx8mq-clock.h
@@ -403,5 +403,27 @@
#define IMX8MQ_CLK_SNVS_ROOT 264
#define IMX8MQ_CLK_GIC 265
-#define IMX8MQ_CLK_END 266
+#define IMX8MQ_VIDEO2_PLL1_REF_SEL 266
+
+#define IMX8MQ_SYS1_PLL_40M_CG 267
+#define IMX8MQ_SYS1_PLL_80M_CG 268
+#define IMX8MQ_SYS1_PLL_100M_CG 269
+#define IMX8MQ_SYS1_PLL_133M_CG 270
+#define IMX8MQ_SYS1_PLL_160M_CG 271
+#define IMX8MQ_SYS1_PLL_200M_CG 272
+#define IMX8MQ_SYS1_PLL_266M_CG 273
+#define IMX8MQ_SYS1_PLL_400M_CG 274
+#define IMX8MQ_SYS1_PLL_800M_CG 275
+#define IMX8MQ_SYS2_PLL_50M_CG 276
+#define IMX8MQ_SYS2_PLL_100M_CG 277
+#define IMX8MQ_SYS2_PLL_125M_CG 278
+#define IMX8MQ_SYS2_PLL_166M_CG 279
+#define IMX8MQ_SYS2_PLL_200M_CG 280
+#define IMX8MQ_SYS2_PLL_250M_CG 281
+#define IMX8MQ_SYS2_PLL_333M_CG 282
+#define IMX8MQ_SYS2_PLL_500M_CG 283
+#define IMX8MQ_SYS2_PLL_1000M_CG 284
+
+#define IMX8MQ_CLK_END 285
+
#endif /* __DT_BINDINGS_CLOCK_IMX8MQ_H */
diff --git a/include/dt-bindings/clock/omap5.h b/include/dt-bindings/clock/omap5.h
index e5411938983c..ba672064ccb4 100644
--- a/include/dt-bindings/clock/omap5.h
+++ b/include/dt-bindings/clock/omap5.h
@@ -86,6 +86,10 @@
#define OMAP5_UART5_CLKCTRL OMAP5_CLKCTRL_INDEX(0x170)
#define OMAP5_UART6_CLKCTRL OMAP5_CLKCTRL_INDEX(0x178)
+/* iva clocks */
+#define OMAP5_IVA_CLKCTRL OMAP5_CLKCTRL_INDEX(0x20)
+#define OMAP5_SL2IF_CLKCTRL OMAP5_CLKCTRL_INDEX(0x28)
+
/* dss clocks */
#define OMAP5_DSS_CORE_CLKCTRL OMAP5_CLKCTRL_INDEX(0x20)
diff --git a/include/dt-bindings/clock/px30-cru.h b/include/dt-bindings/clock/px30-cru.h
index 00101479f7c4..5b1416fcde6f 100644
--- a/include/dt-bindings/clock/px30-cru.h
+++ b/include/dt-bindings/clock/px30-cru.h
@@ -85,6 +85,8 @@
#define SCLK_EMMC_DIV50 83
#define SCLK_DDRCLK 84
#define SCLK_UART1_SRC 85
+#define SCLK_SDMMC_DIV 86
+#define SCLK_SDMMC_DIV50 87
/* dclk gates */
#define DCLK_VOPB 150
diff --git a/include/dt-bindings/clock/qcom,gcc-msm8998.h b/include/dt-bindings/clock/qcom,gcc-msm8998.h
index ab376262fcea..de1d8a1f5966 100644
--- a/include/dt-bindings/clock/qcom,gcc-msm8998.h
+++ b/include/dt-bindings/clock/qcom,gcc-msm8998.h
@@ -177,6 +177,11 @@
#define GCC_UFS_CLKREF_CLK 168
#define GCC_PCIE_CLKREF_CLK 169
#define GCC_RX1_USB2_CLKREF_CLK 170
+#define GCC_MSS_CFG_AHB_CLK 171
+#define GCC_BOOT_ROM_AHB_CLK 172
+#define GCC_MSS_GPLL0_DIV_CLK_SRC 173
+#define GCC_MSS_SNOC_AXI_CLK 174
+#define GCC_MSS_MNOC_BIMC_AXI_CLK 175
#define PCIE_0_GDSC 0
#define UFS_GDSC 1
@@ -290,5 +295,6 @@
#define GCC_MSMPU_BCR 105
#define GCC_QUSB2PHY_PRIM_BCR 106
#define GCC_QUSB2PHY_SEC_BCR 107
+#define GCC_MSS_RESTART 108
#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-sc7180.h b/include/dt-bindings/clock/qcom,gcc-sc7180.h
new file mode 100644
index 000000000000..e8029b2e92d7
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,gcc-sc7180.h
@@ -0,0 +1,155 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GCC_SC7180_H
+#define _DT_BINDINGS_CLK_QCOM_GCC_SC7180_H
+
+/* GCC clocks */
+#define GCC_GPLL0_MAIN_DIV_CDIV 0
+#define GPLL0 1
+#define GPLL0_OUT_EVEN 2
+#define GPLL1 3
+#define GPLL4 4
+#define GPLL6 5
+#define GPLL7 6
+#define GCC_AGGRE_UFS_PHY_AXI_CLK 7
+#define GCC_AGGRE_USB3_PRIM_AXI_CLK 8
+#define GCC_BOOT_ROM_AHB_CLK 9
+#define GCC_CAMERA_AHB_CLK 10
+#define GCC_CAMERA_HF_AXI_CLK 11
+#define GCC_CAMERA_THROTTLE_HF_AXI_CLK 12
+#define GCC_CAMERA_XO_CLK 13
+#define GCC_CE1_AHB_CLK 14
+#define GCC_CE1_AXI_CLK 15
+#define GCC_CE1_CLK 16
+#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK 17
+#define GCC_CPUSS_AHB_CLK 18
+#define GCC_CPUSS_AHB_CLK_SRC 19
+#define GCC_CPUSS_GNOC_CLK 20
+#define GCC_CPUSS_RBCPR_CLK 21
+#define GCC_DDRSS_GPU_AXI_CLK 22
+#define GCC_DISP_AHB_CLK 23
+#define GCC_DISP_GPLL0_CLK_SRC 24
+#define GCC_DISP_GPLL0_DIV_CLK_SRC 25
+#define GCC_DISP_HF_AXI_CLK 26
+#define GCC_DISP_THROTTLE_HF_AXI_CLK 27
+#define GCC_DISP_XO_CLK 28
+#define GCC_GP1_CLK 29
+#define GCC_GP1_CLK_SRC 30
+#define GCC_GP2_CLK 31
+#define GCC_GP2_CLK_SRC 32
+#define GCC_GP3_CLK 33
+#define GCC_GP3_CLK_SRC 34
+#define GCC_GPU_CFG_AHB_CLK 35
+#define GCC_GPU_GPLL0_CLK_SRC 36
+#define GCC_GPU_GPLL0_DIV_CLK_SRC 37
+#define GCC_GPU_MEMNOC_GFX_CLK 38
+#define GCC_GPU_SNOC_DVM_GFX_CLK 39
+#define GCC_NPU_AXI_CLK 40
+#define GCC_NPU_BWMON_AXI_CLK 41
+#define GCC_NPU_BWMON_DMA_CFG_AHB_CLK 42
+#define GCC_NPU_BWMON_DSP_CFG_AHB_CLK 43
+#define GCC_NPU_CFG_AHB_CLK 44
+#define GCC_NPU_DMA_CLK 45
+#define GCC_NPU_GPLL0_CLK_SRC 46
+#define GCC_NPU_GPLL0_DIV_CLK_SRC 47
+#define GCC_PDM2_CLK 48
+#define GCC_PDM2_CLK_SRC 49
+#define GCC_PDM_AHB_CLK 50
+#define GCC_PDM_XO4_CLK 51
+#define GCC_PRNG_AHB_CLK 52
+#define GCC_QSPI_CNOC_PERIPH_AHB_CLK 53
+#define GCC_QSPI_CORE_CLK 54
+#define GCC_QSPI_CORE_CLK_SRC 55
+#define GCC_QUPV3_WRAP0_CORE_2X_CLK 56
+#define GCC_QUPV3_WRAP0_CORE_CLK 57
+#define GCC_QUPV3_WRAP0_S0_CLK 58
+#define GCC_QUPV3_WRAP0_S0_CLK_SRC 59
+#define GCC_QUPV3_WRAP0_S1_CLK 60
+#define GCC_QUPV3_WRAP0_S1_CLK_SRC 61
+#define GCC_QUPV3_WRAP0_S2_CLK 62
+#define GCC_QUPV3_WRAP0_S2_CLK_SRC 63
+#define GCC_QUPV3_WRAP0_S3_CLK 64
+#define GCC_QUPV3_WRAP0_S3_CLK_SRC 65
+#define GCC_QUPV3_WRAP0_S4_CLK 66
+#define GCC_QUPV3_WRAP0_S4_CLK_SRC 67
+#define GCC_QUPV3_WRAP0_S5_CLK 68
+#define GCC_QUPV3_WRAP0_S5_CLK_SRC 69
+#define GCC_QUPV3_WRAP1_CORE_2X_CLK 70
+#define GCC_QUPV3_WRAP1_CORE_CLK 71
+#define GCC_QUPV3_WRAP1_S0_CLK 72
+#define GCC_QUPV3_WRAP1_S0_CLK_SRC 73
+#define GCC_QUPV3_WRAP1_S1_CLK 74
+#define GCC_QUPV3_WRAP1_S1_CLK_SRC 75
+#define GCC_QUPV3_WRAP1_S2_CLK 76
+#define GCC_QUPV3_WRAP1_S2_CLK_SRC 77
+#define GCC_QUPV3_WRAP1_S3_CLK 78
+#define GCC_QUPV3_WRAP1_S3_CLK_SRC 79
+#define GCC_QUPV3_WRAP1_S4_CLK 80
+#define GCC_QUPV3_WRAP1_S4_CLK_SRC 81
+#define GCC_QUPV3_WRAP1_S5_CLK 82
+#define GCC_QUPV3_WRAP1_S5_CLK_SRC 83
+#define GCC_QUPV3_WRAP_0_M_AHB_CLK 84
+#define GCC_QUPV3_WRAP_0_S_AHB_CLK 85
+#define GCC_QUPV3_WRAP_1_M_AHB_CLK 86
+#define GCC_QUPV3_WRAP_1_S_AHB_CLK 87
+#define GCC_SDCC1_AHB_CLK 88
+#define GCC_SDCC1_APPS_CLK 89
+#define GCC_SDCC1_APPS_CLK_SRC 90
+#define GCC_SDCC1_ICE_CORE_CLK 91
+#define GCC_SDCC1_ICE_CORE_CLK_SRC 92
+#define GCC_SDCC2_AHB_CLK 93
+#define GCC_SDCC2_APPS_CLK 94
+#define GCC_SDCC2_APPS_CLK_SRC 95
+#define GCC_SYS_NOC_CPUSS_AHB_CLK 96
+#define GCC_UFS_MEM_CLKREF_CLK 97
+#define GCC_UFS_PHY_AHB_CLK 98
+#define GCC_UFS_PHY_AXI_CLK 99
+#define GCC_UFS_PHY_AXI_CLK_SRC 100
+#define GCC_UFS_PHY_ICE_CORE_CLK 101
+#define GCC_UFS_PHY_ICE_CORE_CLK_SRC 102
+#define GCC_UFS_PHY_PHY_AUX_CLK 103
+#define GCC_UFS_PHY_PHY_AUX_CLK_SRC 104
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK 105
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK 106
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK 107
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC 108
+#define GCC_USB30_PRIM_MASTER_CLK 109
+#define GCC_USB30_PRIM_MASTER_CLK_SRC 110
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK 111
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 112
+#define GCC_USB30_PRIM_SLEEP_CLK 113
+#define GCC_USB3_PRIM_CLKREF_CLK 114
+#define GCC_USB3_PRIM_PHY_AUX_CLK 115
+#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 116
+#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 117
+#define GCC_USB3_PRIM_PHY_PIPE_CLK 118
+#define GCC_USB_PHY_CFG_AHB2PHY_CLK 119
+#define GCC_VIDEO_AHB_CLK 120
+#define GCC_VIDEO_AXI_CLK 121
+#define GCC_VIDEO_GPLL0_DIV_CLK_SRC 122
+#define GCC_VIDEO_THROTTLE_AXI_CLK 123
+#define GCC_VIDEO_XO_CLK 124
+
+/* GCC resets */
+#define GCC_QUSB2PHY_PRIM_BCR 0
+#define GCC_QUSB2PHY_SEC_BCR 1
+#define GCC_UFS_PHY_BCR 2
+#define GCC_USB30_PRIM_BCR 3
+#define GCC_USB3_DP_PHY_PRIM_BCR 4
+#define GCC_USB3_DP_PHY_SEC_BCR 5
+#define GCC_USB3_PHY_PRIM_BCR 6
+#define GCC_USB3_PHY_SEC_BCR 7
+#define GCC_USB3PHY_PHY_PRIM_BCR 8
+#define GCC_USB3PHY_PHY_SEC_BCR 9
+#define GCC_USB_PHY_CFG_AHB2PHY_BCR 10
+
+/* GCC GDSCRs */
+#define UFS_PHY_GDSC 0
+#define USB30_PRIM_GDSC 1
+#define HLOS1_VOTE_MMNOC_MMU_TBU_HF0_GDSC 2
+#define HLOS1_VOTE_MMNOC_MMU_TBU_SF_GDSC 3
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,q6sstopcc-qcs404.h b/include/dt-bindings/clock/qcom,q6sstopcc-qcs404.h
new file mode 100644
index 000000000000..c6f5290f0914
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,q6sstopcc-qcs404.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_Q6SSTOP_QCS404_H
+#define _DT_BINDINGS_CLK_Q6SSTOP_QCS404_H
+
+#define LCC_AHBFABRIC_CBC_CLK 0
+#define LCC_Q6SS_AHBS_CBC_CLK 1
+#define LCC_Q6SS_TCM_SLAVE_CBC_CLK 2
+#define LCC_Q6SS_AHBM_CBC_CLK 3
+#define LCC_Q6SS_AXIM_CBC_CLK 4
+#define LCC_Q6SS_BCR_SLEEP_CLK 5
+#define TCSR_Q6SS_LCC_CBCR_CLK 6
+
+#define Q6SSTOP_BCR_RESET 1
+#endif
diff --git a/include/dt-bindings/clock/r8a774b1-cpg-mssr.h b/include/dt-bindings/clock/r8a774b1-cpg-mssr.h
new file mode 100644
index 000000000000..1355451b74b0
--- /dev/null
+++ b/include/dt-bindings/clock/r8a774b1-cpg-mssr.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2019 Renesas Electronics Corp.
+ */
+#ifndef __DT_BINDINGS_CLOCK_R8A774B1_CPG_MSSR_H__
+#define __DT_BINDINGS_CLOCK_R8A774B1_CPG_MSSR_H__
+
+#include <dt-bindings/clock/renesas-cpg-mssr.h>
+
+/* r8a774b1 CPG Core Clocks */
+#define R8A774B1_CLK_Z 0
+#define R8A774B1_CLK_ZG 1
+#define R8A774B1_CLK_ZTR 2
+#define R8A774B1_CLK_ZTRD2 3
+#define R8A774B1_CLK_ZT 4
+#define R8A774B1_CLK_ZX 5
+#define R8A774B1_CLK_S0D1 6
+#define R8A774B1_CLK_S0D2 7
+#define R8A774B1_CLK_S0D3 8
+#define R8A774B1_CLK_S0D4 9
+#define R8A774B1_CLK_S0D6 10
+#define R8A774B1_CLK_S0D8 11
+#define R8A774B1_CLK_S0D12 12
+#define R8A774B1_CLK_S1D2 13
+#define R8A774B1_CLK_S1D4 14
+#define R8A774B1_CLK_S2D1 15
+#define R8A774B1_CLK_S2D2 16
+#define R8A774B1_CLK_S2D4 17
+#define R8A774B1_CLK_S3D1 18
+#define R8A774B1_CLK_S3D2 19
+#define R8A774B1_CLK_S3D4 20
+#define R8A774B1_CLK_LB 21
+#define R8A774B1_CLK_CL 22
+#define R8A774B1_CLK_ZB3 23
+#define R8A774B1_CLK_ZB3D2 24
+#define R8A774B1_CLK_CR 25
+#define R8A774B1_CLK_DDR 26
+#define R8A774B1_CLK_SD0H 27
+#define R8A774B1_CLK_SD0 28
+#define R8A774B1_CLK_SD1H 29
+#define R8A774B1_CLK_SD1 30
+#define R8A774B1_CLK_SD2H 31
+#define R8A774B1_CLK_SD2 32
+#define R8A774B1_CLK_SD3H 33
+#define R8A774B1_CLK_SD3 34
+#define R8A774B1_CLK_RPC 35
+#define R8A774B1_CLK_RPCD2 36
+#define R8A774B1_CLK_MSO 37
+#define R8A774B1_CLK_HDMI 38
+#define R8A774B1_CLK_CSI0 39
+#define R8A774B1_CLK_CP 40
+#define R8A774B1_CLK_CPEX 41
+#define R8A774B1_CLK_R 42
+#define R8A774B1_CLK_OSC 43
+#define R8A774B1_CLK_CANFD 44
+
+#endif /* __DT_BINDINGS_CLOCK_R8A774B1_CPG_MSSR_H__ */
diff --git a/include/dt-bindings/clock/r8a77961-cpg-mssr.h b/include/dt-bindings/clock/r8a77961-cpg-mssr.h
new file mode 100644
index 000000000000..7921d785546d
--- /dev/null
+++ b/include/dt-bindings/clock/r8a77961-cpg-mssr.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: GPL-2.0+
+ *
+ * Copyright (C) 2019 Renesas Electronics Corp.
+ */
+#ifndef __DT_BINDINGS_CLOCK_R8A77961_CPG_MSSR_H__
+#define __DT_BINDINGS_CLOCK_R8A77961_CPG_MSSR_H__
+
+#include <dt-bindings/clock/renesas-cpg-mssr.h>
+
+/* r8a77961 CPG Core Clocks */
+#define R8A77961_CLK_Z 0
+#define R8A77961_CLK_Z2 1
+#define R8A77961_CLK_ZR 2
+#define R8A77961_CLK_ZG 3
+#define R8A77961_CLK_ZTR 4
+#define R8A77961_CLK_ZTRD2 5
+#define R8A77961_CLK_ZT 6
+#define R8A77961_CLK_ZX 7
+#define R8A77961_CLK_S0D1 8
+#define R8A77961_CLK_S0D2 9
+#define R8A77961_CLK_S0D3 10
+#define R8A77961_CLK_S0D4 11
+#define R8A77961_CLK_S0D6 12
+#define R8A77961_CLK_S0D8 13
+#define R8A77961_CLK_S0D12 14
+#define R8A77961_CLK_S1D1 15
+#define R8A77961_CLK_S1D2 16
+#define R8A77961_CLK_S1D4 17
+#define R8A77961_CLK_S2D1 18
+#define R8A77961_CLK_S2D2 19
+#define R8A77961_CLK_S2D4 20
+#define R8A77961_CLK_S3D1 21
+#define R8A77961_CLK_S3D2 22
+#define R8A77961_CLK_S3D4 23
+#define R8A77961_CLK_LB 24
+#define R8A77961_CLK_CL 25
+#define R8A77961_CLK_ZB3 26
+#define R8A77961_CLK_ZB3D2 27
+#define R8A77961_CLK_ZB3D4 28
+#define R8A77961_CLK_CR 29
+#define R8A77961_CLK_CRD2 30
+#define R8A77961_CLK_SD0H 31
+#define R8A77961_CLK_SD0 32
+#define R8A77961_CLK_SD1H 33
+#define R8A77961_CLK_SD1 34
+#define R8A77961_CLK_SD2H 35
+#define R8A77961_CLK_SD2 36
+#define R8A77961_CLK_SD3H 37
+#define R8A77961_CLK_SD3 38
+#define R8A77961_CLK_SSP2 39
+#define R8A77961_CLK_SSP1 40
+#define R8A77961_CLK_SSPRS 41
+#define R8A77961_CLK_RPC 42
+#define R8A77961_CLK_RPCD2 43
+#define R8A77961_CLK_MSO 44
+#define R8A77961_CLK_CANFD 45
+#define R8A77961_CLK_HDMI 46
+#define R8A77961_CLK_CSI0 47
+/* CLK_CSIREF was removed */
+#define R8A77961_CLK_CP 49
+#define R8A77961_CLK_CPEX 50
+#define R8A77961_CLK_R 51
+#define R8A77961_CLK_OSC 52
+
+#endif /* __DT_BINDINGS_CLOCK_R8A77961_CPG_MSSR_H__ */
diff --git a/include/dt-bindings/clock/sun8i-h3-ccu.h b/include/dt-bindings/clock/sun8i-h3-ccu.h
index c5f7e9a70968..30d2d15373a2 100644
--- a/include/dt-bindings/clock/sun8i-h3-ccu.h
+++ b/include/dt-bindings/clock/sun8i-h3-ccu.h
@@ -143,7 +143,7 @@
#define CLK_AVS 110
#define CLK_HDMI 111
#define CLK_HDMI_DDC 112
-
+#define CLK_MBUS 113
#define CLK_GPU 114
/* New clocks imported in H5 */
diff --git a/include/dt-bindings/clock/tegra124-car-common.h b/include/dt-bindings/clock/tegra124-car-common.h
index 4331f1df6ebe..0c4f5be0a742 100644
--- a/include/dt-bindings/clock/tegra124-car-common.h
+++ b/include/dt-bindings/clock/tegra124-car-common.h
@@ -337,7 +337,8 @@
#define TEGRA124_CLK_CLK_OUT_3_MUX 308
/* 309 */
/* 310 */
-#define TEGRA124_CLK_SOR0_LVDS 311
+#define TEGRA124_CLK_SOR0_LVDS 311 /* deprecated */
+#define TEGRA124_CLK_SOR0_OUT 311
#define TEGRA124_CLK_XUSB_SS_DIV2 312
#define TEGRA124_CLK_PLL_M_UD 313
diff --git a/include/dt-bindings/clock/tegra210-car.h b/include/dt-bindings/clock/tegra210-car.h
index 6b77e721f6b1..44f60623f99b 100644
--- a/include/dt-bindings/clock/tegra210-car.h
+++ b/include/dt-bindings/clock/tegra210-car.h
@@ -308,8 +308,8 @@
#define TEGRA210_CLK_CLK_OUT_2 278
#define TEGRA210_CLK_CLK_OUT_3 279
#define TEGRA210_CLK_BLINK 280
-/* 281 */
-#define TEGRA210_CLK_SOR1_SRC 282
+#define TEGRA210_CLK_SOR0_LVDS 281 /* deprecated */
+#define TEGRA210_CLK_SOR0_OUT 281
#define TEGRA210_CLK_SOR1_OUT 282
/* 283 */
#define TEGRA210_CLK_XUSB_HOST_SRC 284
@@ -391,7 +391,7 @@
#define TEGRA210_CLK_CLK_OUT_3_MUX 358
#define TEGRA210_CLK_DSIA_MUX 359
#define TEGRA210_CLK_DSIB_MUX 360
-#define TEGRA210_CLK_SOR0_LVDS 361
+/* 361 */
#define TEGRA210_CLK_XUSB_SS_DIV2 362
#define TEGRA210_CLK_PLL_M_UD 363
diff --git a/include/dt-bindings/clock/x1000-cgu.h b/include/dt-bindings/clock/x1000-cgu.h
new file mode 100644
index 000000000000..bbaebaf7adb9
--- /dev/null
+++ b/include/dt-bindings/clock/x1000-cgu.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * This header provides clock numbers for the ingenic,x1000-cgu DT binding.
+ *
+ * They are roughly ordered as:
+ * - external clocks
+ * - PLLs
+ * - muxes/dividers in the order they appear in the x1000 programmers manual
+ * - gates in order of their bit in the CLKGR* registers
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_X1000_CGU_H__
+#define __DT_BINDINGS_CLOCK_X1000_CGU_H__
+
+#define X1000_CLK_EXCLK 0
+#define X1000_CLK_RTCLK 1
+#define X1000_CLK_APLL 2
+#define X1000_CLK_MPLL 3
+#define X1000_CLK_SCLKA 4
+#define X1000_CLK_CPUMUX 5
+#define X1000_CLK_CPU 6
+#define X1000_CLK_L2CACHE 7
+#define X1000_CLK_AHB0 8
+#define X1000_CLK_AHB2PMUX 9
+#define X1000_CLK_AHB2 10
+#define X1000_CLK_PCLK 11
+#define X1000_CLK_DDR 12
+#define X1000_CLK_MAC 13
+#define X1000_CLK_MSCMUX 14
+#define X1000_CLK_MSC0 15
+#define X1000_CLK_MSC1 16
+#define X1000_CLK_SSIPLL 17
+#define X1000_CLK_SSIMUX 18
+#define X1000_CLK_SFC 19
+#define X1000_CLK_I2C0 20
+#define X1000_CLK_I2C1 21
+#define X1000_CLK_I2C2 22
+#define X1000_CLK_UART0 23
+#define X1000_CLK_UART1 24
+#define X1000_CLK_UART2 25
+#define X1000_CLK_SSI 26
+#define X1000_CLK_PDMA 27
+
+#endif /* __DT_BINDINGS_CLOCK_X1000_CGU_H__ */
diff --git a/include/dt-bindings/dma/x1000-dma.h b/include/dt-bindings/dma/x1000-dma.h
new file mode 100644
index 000000000000..401e1656e696
--- /dev/null
+++ b/include/dt-bindings/dma/x1000-dma.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * This header provides macros for X1000 DMA bindings.
+ *
+ * Copyright (c) 2019 Zhou Yanjie <zhouyanjie@zoho.com>
+ */
+
+#ifndef __DT_BINDINGS_DMA_X1000_DMA_H__
+#define __DT_BINDINGS_DMA_X1000_DMA_H__
+
+/*
+ * Request type numbers for the X1000 DMA controller (written to the DRTn
+ * register for the channel).
+ */
+#define X1000_DMA_DMIC_RX 0x5
+#define X1000_DMA_I2S0_TX 0x6
+#define X1000_DMA_I2S0_RX 0x7
+#define X1000_DMA_AUTO 0x8
+#define X1000_DMA_UART2_TX 0x10
+#define X1000_DMA_UART2_RX 0x11
+#define X1000_DMA_UART1_TX 0x12
+#define X1000_DMA_UART1_RX 0x13
+#define X1000_DMA_UART0_TX 0x14
+#define X1000_DMA_UART0_RX 0x15
+#define X1000_DMA_SSI0_TX 0x16
+#define X1000_DMA_SSI0_RX 0x17
+#define X1000_DMA_MSC0_TX 0x1a
+#define X1000_DMA_MSC0_RX 0x1b
+#define X1000_DMA_MSC1_TX 0x1c
+#define X1000_DMA_MSC1_RX 0x1d
+#define X1000_DMA_PCM0_TX 0x20
+#define X1000_DMA_PCM0_RX 0x21
+#define X1000_DMA_SMB0_TX 0x24
+#define X1000_DMA_SMB0_RX 0x25
+#define X1000_DMA_SMB1_TX 0x26
+#define X1000_DMA_SMB1_RX 0x27
+#define X1000_DMA_SMB2_TX 0x28
+#define X1000_DMA_SMB2_RX 0x29
+
+#endif /* __DT_BINDINGS_DMA_X1000_DMA_H__ */
diff --git a/include/dt-bindings/gpio/meson-a1-gpio.h b/include/dt-bindings/gpio/meson-a1-gpio.h
new file mode 100644
index 000000000000..40e57a5ff1db
--- /dev/null
+++ b/include/dt-bindings/gpio/meson-a1-gpio.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
+/*
+ * Copyright (c) 2019 Amlogic, Inc. All rights reserved.
+ * Author: Qianggui Song <qianggui.song@amlogic.com>
+ */
+
+#ifndef _DT_BINDINGS_MESON_A1_GPIO_H
+#define _DT_BINDINGS_MESON_A1_GPIO_H
+
+#define GPIOP_0 0
+#define GPIOP_1 1
+#define GPIOP_2 2
+#define GPIOP_3 3
+#define GPIOP_4 4
+#define GPIOP_5 5
+#define GPIOP_6 6
+#define GPIOP_7 7
+#define GPIOP_8 8
+#define GPIOP_9 9
+#define GPIOP_10 10
+#define GPIOP_11 11
+#define GPIOP_12 12
+#define GPIOB_0 13
+#define GPIOB_1 14
+#define GPIOB_2 15
+#define GPIOB_3 16
+#define GPIOB_4 17
+#define GPIOB_5 18
+#define GPIOB_6 19
+#define GPIOX_0 20
+#define GPIOX_1 21
+#define GPIOX_2 22
+#define GPIOX_3 23
+#define GPIOX_4 24
+#define GPIOX_5 25
+#define GPIOX_6 26
+#define GPIOX_7 27
+#define GPIOX_8 28
+#define GPIOX_9 29
+#define GPIOX_10 30
+#define GPIOX_11 31
+#define GPIOX_12 32
+#define GPIOX_13 33
+#define GPIOX_14 34
+#define GPIOX_15 35
+#define GPIOX_16 36
+#define GPIOF_0 37
+#define GPIOF_1 38
+#define GPIOF_2 39
+#define GPIOF_3 40
+#define GPIOF_4 41
+#define GPIOF_5 42
+#define GPIOF_6 43
+#define GPIOF_7 44
+#define GPIOF_8 45
+#define GPIOF_9 46
+#define GPIOF_10 47
+#define GPIOF_11 48
+#define GPIOF_12 49
+#define GPIOA_0 50
+#define GPIOA_1 51
+#define GPIOA_2 52
+#define GPIOA_3 53
+#define GPIOA_4 54
+#define GPIOA_5 55
+#define GPIOA_6 56
+#define GPIOA_7 57
+#define GPIOA_8 58
+#define GPIOA_9 59
+#define GPIOA_10 60
+#define GPIOA_11 61
+
+#endif /* _DT_BINDINGS_MESON_A1_GPIO_H */
diff --git a/include/dt-bindings/iio/adc/ingenic,adc.h b/include/dt-bindings/iio/adc/ingenic,adc.h
index 82706b2706ac..42f871ab3272 100644
--- a/include/dt-bindings/iio/adc/ingenic,adc.h
+++ b/include/dt-bindings/iio/adc/ingenic,adc.h
@@ -6,5 +6,6 @@
/* ADC channel idx. */
#define INGENIC_ADC_AUX 0
#define INGENIC_ADC_BATTERY 1
+#define INGENIC_ADC_AUX2 2
#endif
diff --git a/include/dt-bindings/interconnect/qcom,msm8974.h b/include/dt-bindings/interconnect/qcom,msm8974.h
new file mode 100644
index 000000000000..e65ae27ffff2
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,msm8974.h
@@ -0,0 +1,146 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) */
+/*
+ * Qualcomm msm8974 interconnect IDs
+ *
+ * Copyright (c) 2019 Brian Masney <masneyb@onstation.org>
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_MSM8974_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_MSM8974_H
+
+#define BIMC_MAS_AMPSS_M0 0
+#define BIMC_MAS_AMPSS_M1 1
+#define BIMC_MAS_MSS_PROC 2
+#define BIMC_TO_MNOC 3
+#define BIMC_TO_SNOC 4
+#define BIMC_SLV_EBI_CH0 5
+#define BIMC_SLV_AMPSS_L2 6
+
+#define CNOC_MAS_RPM_INST 0
+#define CNOC_MAS_RPM_DATA 1
+#define CNOC_MAS_RPM_SYS 2
+#define CNOC_MAS_DEHR 3
+#define CNOC_MAS_QDSS_DAP 4
+#define CNOC_MAS_SPDM 5
+#define CNOC_MAS_TIC 6
+#define CNOC_SLV_CLK_CTL 7
+#define CNOC_SLV_CNOC_MSS 8
+#define CNOC_SLV_SECURITY 9
+#define CNOC_SLV_TCSR 10
+#define CNOC_SLV_TLMM 11
+#define CNOC_SLV_CRYPTO_0_CFG 12
+#define CNOC_SLV_CRYPTO_1_CFG 13
+#define CNOC_SLV_IMEM_CFG 14
+#define CNOC_SLV_MESSAGE_RAM 15
+#define CNOC_SLV_BIMC_CFG 16
+#define CNOC_SLV_BOOT_ROM 17
+#define CNOC_SLV_PMIC_ARB 18
+#define CNOC_SLV_SPDM_WRAPPER 19
+#define CNOC_SLV_DEHR_CFG 20
+#define CNOC_SLV_MPM 21
+#define CNOC_SLV_QDSS_CFG 22
+#define CNOC_SLV_RBCPR_CFG 23
+#define CNOC_SLV_RBCPR_QDSS_APU_CFG 24
+#define CNOC_TO_SNOC 25
+#define CNOC_SLV_CNOC_ONOC_CFG 26
+#define CNOC_SLV_CNOC_MNOC_MMSS_CFG 27
+#define CNOC_SLV_CNOC_MNOC_CFG 28
+#define CNOC_SLV_PNOC_CFG 29
+#define CNOC_SLV_SNOC_MPU_CFG 30
+#define CNOC_SLV_SNOC_CFG 31
+#define CNOC_SLV_EBI1_DLL_CFG 32
+#define CNOC_SLV_PHY_APU_CFG 33
+#define CNOC_SLV_EBI1_PHY_CFG 34
+#define CNOC_SLV_RPM 35
+#define CNOC_SLV_SERVICE_CNOC 36
+
+#define MNOC_MAS_GRAPHICS_3D 0
+#define MNOC_MAS_JPEG 1
+#define MNOC_MAS_MDP_PORT0 2
+#define MNOC_MAS_VIDEO_P0 3
+#define MNOC_MAS_VIDEO_P1 4
+#define MNOC_MAS_VFE 5
+#define MNOC_TO_CNOC 6
+#define MNOC_TO_BIMC 7
+#define MNOC_SLV_CAMERA_CFG 8
+#define MNOC_SLV_DISPLAY_CFG 9
+#define MNOC_SLV_OCMEM_CFG 10
+#define MNOC_SLV_CPR_CFG 11
+#define MNOC_SLV_CPR_XPU_CFG 12
+#define MNOC_SLV_MISC_CFG 13
+#define MNOC_SLV_MISC_XPU_CFG 14
+#define MNOC_SLV_VENUS_CFG 15
+#define MNOC_SLV_GRAPHICS_3D_CFG 16
+#define MNOC_SLV_MMSS_CLK_CFG 17
+#define MNOC_SLV_MMSS_CLK_XPU_CFG 18
+#define MNOC_SLV_MNOC_MPU_CFG 19
+#define MNOC_SLV_ONOC_MPU_CFG 20
+#define MNOC_SLV_SERVICE_MNOC 21
+
+#define OCMEM_NOC_TO_OCMEM_VNOC 0
+#define OCMEM_MAS_JPEG_OCMEM 1
+#define OCMEM_MAS_MDP_OCMEM 2
+#define OCMEM_MAS_VIDEO_P0_OCMEM 3
+#define OCMEM_MAS_VIDEO_P1_OCMEM 4
+#define OCMEM_MAS_VFE_OCMEM 5
+#define OCMEM_MAS_CNOC_ONOC_CFG 6
+#define OCMEM_SLV_SERVICE_ONOC 7
+#define OCMEM_VNOC_TO_SNOC 8
+#define OCMEM_VNOC_TO_OCMEM_NOC 9
+#define OCMEM_VNOC_MAS_GFX3D 10
+#define OCMEM_SLV_OCMEM 11
+
+#define PNOC_MAS_PNOC_CFG 0
+#define PNOC_MAS_SDCC_1 1
+#define PNOC_MAS_SDCC_3 2
+#define PNOC_MAS_SDCC_4 3
+#define PNOC_MAS_SDCC_2 4
+#define PNOC_MAS_TSIF 5
+#define PNOC_MAS_BAM_DMA 6
+#define PNOC_MAS_BLSP_2 7
+#define PNOC_MAS_USB_HSIC 8
+#define PNOC_MAS_BLSP_1 9
+#define PNOC_MAS_USB_HS 10
+#define PNOC_TO_SNOC 11
+#define PNOC_SLV_SDCC_1 12
+#define PNOC_SLV_SDCC_3 13
+#define PNOC_SLV_SDCC_2 14
+#define PNOC_SLV_SDCC_4 15
+#define PNOC_SLV_TSIF 16
+#define PNOC_SLV_BAM_DMA 17
+#define PNOC_SLV_BLSP_2 18
+#define PNOC_SLV_USB_HSIC 19
+#define PNOC_SLV_BLSP_1 20
+#define PNOC_SLV_USB_HS 21
+#define PNOC_SLV_PDM 22
+#define PNOC_SLV_PERIPH_APU_CFG 23
+#define PNOC_SLV_PNOC_MPU_CFG 24
+#define PNOC_SLV_PRNG 25
+#define PNOC_SLV_SERVICE_PNOC 26
+
+#define SNOC_MAS_LPASS_AHB 0
+#define SNOC_MAS_QDSS_BAM 1
+#define SNOC_MAS_SNOC_CFG 2
+#define SNOC_TO_BIMC 3
+#define SNOC_TO_CNOC 4
+#define SNOC_TO_PNOC 5
+#define SNOC_TO_OCMEM_VNOC 6
+#define SNOC_MAS_CRYPTO_CORE0 7
+#define SNOC_MAS_CRYPTO_CORE1 8
+#define SNOC_MAS_LPASS_PROC 9
+#define SNOC_MAS_MSS 10
+#define SNOC_MAS_MSS_NAV 11
+#define SNOC_MAS_OCMEM_DMA 12
+#define SNOC_MAS_WCSS 13
+#define SNOC_MAS_QDSS_ETR 14
+#define SNOC_MAS_USB3 15
+#define SNOC_SLV_AMPSS 16
+#define SNOC_SLV_LPASS 17
+#define SNOC_SLV_USB3 18
+#define SNOC_SLV_WCSS 19
+#define SNOC_SLV_OCIMEM 20
+#define SNOC_SLV_SNOC_OCMEM 21
+#define SNOC_SLV_SERVICE_SNOC 22
+#define SNOC_SLV_QDSS_STM 23
+
+#endif
diff --git a/include/dt-bindings/pinctrl/at91.h b/include/dt-bindings/pinctrl/at91.h
index 3831f91fb3ba..e8e117306b1b 100644
--- a/include/dt-bindings/pinctrl/at91.h
+++ b/include/dt-bindings/pinctrl/at91.h
@@ -27,8 +27,8 @@
#define AT91_PINCTRL_DRIVE_STRENGTH_MED (0x2 << 5)
#define AT91_PINCTRL_DRIVE_STRENGTH_HI (0x3 << 5)
-#define AT91_PINCTRL_SLEWRATE_DIS (0x0 << 9)
-#define AT91_PINCTRL_SLEWRATE_ENA (0x1 << 9)
+#define AT91_PINCTRL_SLEWRATE_ENA (0x0 << 9)
+#define AT91_PINCTRL_SLEWRATE_DIS (0x1 << 9)
#define AT91_PIOA 0
#define AT91_PIOB 1
diff --git a/include/dt-bindings/pmu/exynos_ppmu.h b/include/dt-bindings/pmu/exynos_ppmu.h
new file mode 100644
index 000000000000..8724abe130f3
--- /dev/null
+++ b/include/dt-bindings/pmu/exynos_ppmu.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Samsung Exynos PPMU event types for counting in regs
+ *
+ * Copyright (c) 2019, Samsung Electronics
+ * Author: Lukasz Luba <l.luba@partner.samsung.com>
+ */
+
+#ifndef __DT_BINDINGS_PMU_EXYNOS_PPMU_H
+#define __DT_BINDINGS_PMU_EXYNOS_PPMU_H
+
+#define PPMU_RO_BUSY_CYCLE_CNT 0x0
+#define PPMU_WO_BUSY_CYCLE_CNT 0x1
+#define PPMU_RW_BUSY_CYCLE_CNT 0x2
+#define PPMU_RO_REQUEST_CNT 0x3
+#define PPMU_WO_REQUEST_CNT 0x4
+#define PPMU_RO_DATA_CNT 0x5
+#define PPMU_WO_DATA_CNT 0x6
+#define PPMU_RO_LATENCY 0x12
+#define PPMU_WO_LATENCY 0x16
+#define PPMU_V2_RO_DATA_CNT 0x4
+#define PPMU_V2_WO_DATA_CNT 0x5
+#define PPMU_V2_EVT3_RW_DATA_CNT 0x22
+
+#endif
diff --git a/include/dt-bindings/power/r8a774b1-sysc.h b/include/dt-bindings/power/r8a774b1-sysc.h
new file mode 100644
index 000000000000..373736402f04
--- /dev/null
+++ b/include/dt-bindings/power/r8a774b1-sysc.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2019 Renesas Electronics Corp.
+ */
+#ifndef __DT_BINDINGS_POWER_R8A774B1_SYSC_H__
+#define __DT_BINDINGS_POWER_R8A774B1_SYSC_H__
+
+/*
+ * These power domain indices match the numbers of the interrupt bits
+ * representing the power areas in the various Interrupt Registers
+ * (e.g. SYSCISR, Interrupt Status Register)
+ */
+
+#define R8A774B1_PD_CA57_CPU0 0
+#define R8A774B1_PD_CA57_CPU1 1
+#define R8A774B1_PD_A3VP 9
+#define R8A774B1_PD_CA57_SCU 12
+#define R8A774B1_PD_A3VC 14
+#define R8A774B1_PD_3DG_A 17
+#define R8A774B1_PD_3DG_B 18
+#define R8A774B1_PD_A2VC1 26
+
+/* Always-on power area */
+#define R8A774B1_PD_ALWAYS_ON 32
+
+#endif /* __DT_BINDINGS_POWER_R8A774B1_SYSC_H__ */
diff --git a/include/dt-bindings/power/r8a77961-sysc.h b/include/dt-bindings/power/r8a77961-sysc.h
new file mode 100644
index 000000000000..7a3800996f7c
--- /dev/null
+++ b/include/dt-bindings/power/r8a77961-sysc.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2019 Glider bvba
+ */
+#ifndef __DT_BINDINGS_POWER_R8A77961_SYSC_H__
+#define __DT_BINDINGS_POWER_R8A77961_SYSC_H__
+
+/*
+ * These power domain indices match the numbers of the interrupt bits
+ * representing the power areas in the various Interrupt Registers
+ * (e.g. SYSCISR, Interrupt Status Register)
+ */
+
+#define R8A77961_PD_CA57_CPU0 0
+#define R8A77961_PD_CA57_CPU1 1
+#define R8A77961_PD_CA53_CPU0 5
+#define R8A77961_PD_CA53_CPU1 6
+#define R8A77961_PD_CA53_CPU2 7
+#define R8A77961_PD_CA53_CPU3 8
+#define R8A77961_PD_CA57_SCU 12
+#define R8A77961_PD_CR7 13
+#define R8A77961_PD_A3VC 14
+#define R8A77961_PD_3DG_A 17
+#define R8A77961_PD_3DG_B 18
+#define R8A77961_PD_CA53_SCU 21
+#define R8A77961_PD_A3IR 24
+#define R8A77961_PD_A2VC1 26
+
+/* Always-on power area */
+#define R8A77961_PD_ALWAYS_ON 32
+
+#endif /* __DT_BINDINGS_POWER_R8A77961_SYSC_H__ */
diff --git a/include/dt-bindings/reset/amlogic,meson-g12a-audio-reset.h b/include/dt-bindings/reset/amlogic,meson-g12a-audio-reset.h
index 14b78dabed0e..f805129ca7af 100644
--- a/include/dt-bindings/reset/amlogic,meson-g12a-audio-reset.h
+++ b/include/dt-bindings/reset/amlogic,meson-g12a-audio-reset.h
@@ -35,4 +35,19 @@
#define AUD_RESET_TOHDMITX 24
#define AUD_RESET_CLKTREE 25
+/* SM1 added resets */
+#define AUD_RESET_RESAMPLE_B 26
+#define AUD_RESET_TOVAD 27
+#define AUD_RESET_LOCKER 28
+#define AUD_RESET_SPDIFIN_LB 29
+#define AUD_RESET_FRATV 30
+#define AUD_RESET_FRHDMIRX 31
+#define AUD_RESET_FRDDR_D 32
+#define AUD_RESET_TODDR_D 33
+#define AUD_RESET_LOOPBACK_B 34
+#define AUD_RESET_EARCTX 35
+#define AUD_RESET_EARCRX 36
+#define AUD_RESET_FRDDR_E 37
+#define AUD_RESET_TODDR_E 38
+
#endif
diff --git a/include/dt-bindings/sound/samsung-i2s.h b/include/dt-bindings/sound/samsung-i2s.h
index 77545f14c379..250de0d6c734 100644
--- a/include/dt-bindings/sound/samsung-i2s.h
+++ b/include/dt-bindings/sound/samsung-i2s.h
@@ -2,8 +2,14 @@
#ifndef _DT_BINDINGS_SAMSUNG_I2S_H
#define _DT_BINDINGS_SAMSUNG_I2S_H
-#define CLK_I2S_CDCLK 0
-#define CLK_I2S_RCLK_SRC 1
-#define CLK_I2S_RCLK_PSR 2
+#define CLK_I2S_CDCLK 0 /* the CDCLK (CODECLKO) gate clock */
+
+#define CLK_I2S_RCLK_SRC 1 /* the RCLKSRC mux clock (corresponding to
+ * RCLKSRC bit in IISMOD register)
+ */
+
+#define CLK_I2S_RCLK_PSR 2 /* the RCLK prescaler divider clock
+ * (corresponding to the IISPSR register)
+ */
#endif /* _DT_BINDINGS_SAMSUNG_I2S_H */
diff --git a/include/keys/system_keyring.h b/include/keys/system_keyring.h
index c1a96fdf598b..fb8b07daa9d1 100644
--- a/include/keys/system_keyring.h
+++ b/include/keys/system_keyring.h
@@ -35,12 +35,18 @@ extern int restrict_link_by_builtin_and_secondary_trusted(
extern int mark_hash_blacklisted(const char *hash);
extern int is_hash_blacklisted(const u8 *hash, size_t hash_len,
const char *type);
+extern int is_binary_blacklisted(const u8 *hash, size_t hash_len);
#else
static inline int is_hash_blacklisted(const u8 *hash, size_t hash_len,
const char *type)
{
return 0;
}
+
+static inline int is_binary_blacklisted(const u8 *hash, size_t hash_len)
+{
+ return 0;
+}
#endif
#ifdef CONFIG_IMA_BLACKLIST_KEYRING
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 8b4e516bac00..0f37a7d5fa77 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -678,6 +678,14 @@ static inline bool acpi_dev_present(const char *hid, const char *uid, s64 hrv)
return false;
}
+struct acpi_device;
+
+static inline bool
+acpi_dev_hid_uid_match(struct acpi_device *adev, const char *hid2, const char *uid2)
+{
+ return false;
+}
+
static inline struct acpi_device *
acpi_dev_get_first_match_dev(const char *hid, const char *uid, s64 hrv)
{
diff --git a/include/linux/aer.h b/include/linux/aer.h
index 514bffa11dbb..fa19e01f418a 100644
--- a/include/linux/aer.h
+++ b/include/linux/aer.h
@@ -46,6 +46,8 @@ int pci_enable_pcie_error_reporting(struct pci_dev *dev);
int pci_disable_pcie_error_reporting(struct pci_dev *dev);
int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev);
int pci_cleanup_aer_error_status_regs(struct pci_dev *dev);
+void pci_save_aer_state(struct pci_dev *dev);
+void pci_restore_aer_state(struct pci_dev *dev);
#else
static inline int pci_enable_pcie_error_reporting(struct pci_dev *dev)
{
@@ -63,6 +65,8 @@ static inline int pci_cleanup_aer_error_status_regs(struct pci_dev *dev)
{
return -EINVAL;
}
+static inline void pci_save_aer_state(struct pci_dev *dev) {}
+static inline void pci_restore_aer_state(struct pci_dev *dev) {}
#endif
void cper_print_aer(struct pci_dev *dev, int aer_severity,
diff --git a/include/linux/arch_topology.h b/include/linux/arch_topology.h
index 42f2b5126094..3015ecbb90b1 100644
--- a/include/linux/arch_topology.h
+++ b/include/linux/arch_topology.h
@@ -57,6 +57,7 @@ const struct cpumask *cpu_coregroup_mask(int cpu);
void update_siblings_masks(unsigned int cpu);
void remove_cpu_topology(unsigned int cpuid);
void reset_cpu_topology(void);
+int parse_acpi_topology(void);
#endif
#endif /* _LINUX_ARCH_TOPOLOGY_H_ */
diff --git a/include/linux/audit.h b/include/linux/audit.h
index aee3dc9eb378..f9ceae57ca8d 100644
--- a/include/linux/audit.h
+++ b/include/linux/audit.h
@@ -156,7 +156,8 @@ extern void audit_log_d_path(struct audit_buffer *ab,
const struct path *path);
extern void audit_log_key(struct audit_buffer *ab,
char *key);
-extern void audit_log_link_denied(const char *operation);
+extern void audit_log_path_denied(int type,
+ const char *operation);
extern void audit_log_lost(const char *message);
extern int audit_log_task_context(struct audit_buffer *ab);
@@ -217,7 +218,7 @@ static inline void audit_log_d_path(struct audit_buffer *ab,
{ }
static inline void audit_log_key(struct audit_buffer *ab, char *key)
{ }
-static inline void audit_log_link_denied(const char *string)
+static inline void audit_log_path_denied(int type, const char *operation)
{ }
static inline int audit_log_task_context(struct audit_buffer *ab)
{
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 397bb9bc230b..6012e2592628 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -869,6 +869,8 @@ extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t,
unsigned int, void __user *);
extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t,
struct scsi_ioctl_command __user *);
+extern int get_sg_io_hdr(struct sg_io_hdr *hdr, const void __user *argp);
+extern int put_sg_io_hdr(const struct sg_io_hdr *hdr, void __user *argp);
extern int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags);
extern void blk_queue_exit(struct request_queue *q);
diff --git a/include/linux/bsearch.h b/include/linux/bsearch.h
index 62b1eb348858..8ed53d7524ea 100644
--- a/include/linux/bsearch.h
+++ b/include/linux/bsearch.h
@@ -5,6 +5,6 @@
#include <linux/types.h>
void *bsearch(const void *key, const void *base, size_t num, size_t size,
- int (*cmp)(const void *key, const void *elt));
+ cmp_func_t cmp);
#endif /* _LINUX_BSEARCH_H */
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index 2fdfe8061363..caf4b9df16eb 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -818,6 +818,7 @@ unsigned int clk_hw_get_num_parents(const struct clk_hw *hw);
struct clk_hw *clk_hw_get_parent(const struct clk_hw *hw);
struct clk_hw *clk_hw_get_parent_by_index(const struct clk_hw *hw,
unsigned int index);
+int clk_hw_get_parent_index(struct clk_hw *hw);
int clk_hw_set_parent(struct clk_hw *hw, struct clk_hw *new_parent);
unsigned int __clk_get_enable_count(struct clk *clk);
unsigned long clk_hw_get_rate(const struct clk_hw *hw);
diff --git a/include/linux/clk/tegra.h b/include/linux/clk/tegra.h
index b8aef62cc3f5..2b1b35240074 100644
--- a/include/linux/clk/tegra.h
+++ b/include/linux/clk/tegra.h
@@ -108,6 +108,19 @@ static inline void tegra_cpu_clock_resume(void)
tegra_cpu_car_ops->resume();
}
+#else
+static inline bool tegra_cpu_rail_off_ready(void)
+{
+ return false;
+}
+
+static inline void tegra_cpu_clock_suspend(void)
+{
+}
+
+static inline void tegra_cpu_clock_resume(void)
+{
+}
#endif
extern void tegra210_xusb_pll_hw_control_enable(void);
@@ -119,4 +132,15 @@ extern void tegra210_put_utmipll_in_iddq(void);
extern void tegra210_put_utmipll_out_iddq(void);
extern int tegra210_clk_handle_mbist_war(unsigned int id);
+struct clk;
+
+typedef long (tegra20_clk_emc_round_cb)(unsigned long rate,
+ unsigned long min_rate,
+ unsigned long max_rate,
+ void *arg);
+
+void tegra20_clk_set_emc_round_callback(tegra20_clk_emc_round_cb *round_cb,
+ void *cb_arg);
+int tegra20_clk_prepare_emc_mc_same_freq(struct clk *emc_clk, bool same);
+
#endif /* __LINUX_CLK_TEGRA_H_ */
diff --git a/include/linux/clk/ti.h b/include/linux/clk/ti.h
index 1e8ef96555ce..c62f6fa6763d 100644
--- a/include/linux/clk/ti.h
+++ b/include/linux/clk/ti.h
@@ -153,7 +153,7 @@ struct clk_hw_omap {
u8 fixed_div;
struct clk_omap_reg enable_reg;
u8 enable_bit;
- u8 flags;
+ unsigned long flags;
struct clk_omap_reg clksel_reg;
struct dpll_data *dpll_data;
const char *clkdm_name;
@@ -298,6 +298,7 @@ struct ti_clk_features {
void ti_clk_setup_features(struct ti_clk_features *features);
const struct ti_clk_features *ti_clk_get_features(void);
+bool ti_clk_is_in_standby(struct clk *clk);
int omap3_noncore_dpll_save_context(struct clk_hw *hw);
void omap3_noncore_dpll_restore_context(struct clk_hw *hw);
diff --git a/include/linux/compat.h b/include/linux/compat.h
index c4c389c7e1b4..68f79d855c3d 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -116,14 +116,7 @@ typedef __compat_gid32_t compat_gid_t;
struct compat_sel_arg_struct;
struct rusage;
-struct compat_itimerval {
- struct old_timeval32 it_interval;
- struct old_timeval32 it_value;
-};
-
-struct itimerval;
-int get_compat_itimerval(struct itimerval *, const struct compat_itimerval __user *);
-int put_compat_itimerval(struct compat_itimerval __user *, const struct itimerval *);
+struct old_itimerval32;
struct compat_tms {
compat_clock_t tms_utime;
@@ -666,10 +659,10 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
/* kernel/itimer.c */
asmlinkage long compat_sys_getitimer(int which,
- struct compat_itimerval __user *it);
+ struct old_itimerval32 __user *it);
asmlinkage long compat_sys_setitimer(int which,
- struct compat_itimerval __user *in,
- struct compat_itimerval __user *out);
+ struct old_itimerval32 __user *in,
+ struct old_itimerval32 __user *out);
/* kernel/kexec.c */
asmlinkage long compat_sys_kexec_load(compat_ulong_t entry,
@@ -935,10 +928,10 @@ static inline bool in_compat_syscall(void) { return is_compat_task(); }
*/
static inline struct old_timeval32 ns_to_old_timeval32(s64 nsec)
{
- struct timeval tv;
+ struct __kernel_old_timeval tv;
struct old_timeval32 ctv;
- tv = ns_to_timeval(nsec);
+ tv = ns_to_kernel_old_timeval(nsec);
ctv.tv_sec = tv.tv_sec;
ctv.tv_usec = tv.tv_usec;
diff --git a/include/linux/coresight.h b/include/linux/coresight.h
index a2b68823717b..44e552de419c 100644
--- a/include/linux/coresight.h
+++ b/include/linux/coresight.h
@@ -285,6 +285,8 @@ extern void coresight_disclaim_device(void __iomem *base);
extern void coresight_disclaim_device_unlocked(void __iomem *base);
extern char *coresight_alloc_device_name(struct coresight_dev_list *devs,
struct device *dev);
+
+extern bool coresight_loses_context_with_cpu(struct device *dev);
#else
static inline struct coresight_device *
coresight_register(struct coresight_desc *desc) { return NULL; }
@@ -307,6 +309,10 @@ static inline int coresight_claim_device(void __iomem *base)
static inline void coresight_disclaim_device(void __iomem *base) {}
static inline void coresight_disclaim_device_unlocked(void __iomem *base) {}
+static inline bool coresight_loses_context_with_cpu(struct device *dev)
+{
+ return false;
+}
#endif
extern int coresight_get_cpu(struct device *dev);
diff --git a/include/linux/counter.h b/include/linux/counter.h
index a061cdcdef7c..9dbd5df4cd34 100644
--- a/include/linux/counter.h
+++ b/include/linux/counter.h
@@ -290,53 +290,22 @@ struct counter_device_state {
const struct attribute_group **groups;
};
-/**
- * struct counter_signal_read_value - Opaque Signal read value
- * @buf: string representation of Signal read value
- * @len: length of string in @buf
- */
-struct counter_signal_read_value {
- char *buf;
- size_t len;
-};
-
-/**
- * struct counter_count_read_value - Opaque Count read value
- * @buf: string representation of Count read value
- * @len: length of string in @buf
- */
-struct counter_count_read_value {
- char *buf;
- size_t len;
-};
-
-/**
- * struct counter_count_write_value - Opaque Count write value
- * @buf: string representation of Count write value
- */
-struct counter_count_write_value {
- const char *buf;
+enum counter_signal_value {
+ COUNTER_SIGNAL_LOW = 0,
+ COUNTER_SIGNAL_HIGH
};
/**
* struct counter_ops - Callbacks from driver
* @signal_read: optional read callback for Signal attribute. The read
* value of the respective Signal should be passed back via
- * the val parameter. val points to an opaque type which
- * should be set only by calling the
- * counter_signal_read_value_set function from within the
- * signal_read callback.
+ * the val parameter.
* @count_read: optional read callback for Count attribute. The read
* value of the respective Count should be passed back via
- * the val parameter. val points to an opaque type which
- * should be set only by calling the
- * counter_count_read_value_set function from within the
- * count_read callback.
+ * the val parameter.
* @count_write: optional write callback for Count attribute. The write
* value for the respective Count is passed in via the val
- * parameter. val points to an opaque type which should be
- * accessed only by calling the
- * counter_count_write_value_get function.
+ * parameter.
* @function_get: function to get the current count function mode. Returns
* 0 on success and negative error code on error. The index
* of the respective Count's returned function mode should
@@ -346,7 +315,7 @@ struct counter_count_write_value {
* Count's functions_list array.
* @action_get: function to get the current action mode. Returns 0 on
* success and negative error code on error. The index of
- * the respective Signal's returned action mode should be
+ * the respective Synapse's returned action mode should be
* passed back via the action parameter.
* @action_set: function to set the action mode. action is the index of
* the requested action mode from the respective Synapse's
@@ -355,13 +324,11 @@ struct counter_count_write_value {
struct counter_ops {
int (*signal_read)(struct counter_device *counter,
struct counter_signal *signal,
- struct counter_signal_read_value *val);
+ enum counter_signal_value *val);
int (*count_read)(struct counter_device *counter,
- struct counter_count *count,
- struct counter_count_read_value *val);
+ struct counter_count *count, unsigned long *val);
int (*count_write)(struct counter_device *counter,
- struct counter_count *count,
- struct counter_count_write_value *val);
+ struct counter_count *count, unsigned long val);
int (*function_get)(struct counter_device *counter,
struct counter_count *count, size_t *function);
int (*function_set)(struct counter_device *counter,
@@ -477,29 +444,6 @@ struct counter_device {
void *priv;
};
-enum counter_signal_level {
- COUNTER_SIGNAL_LEVEL_LOW = 0,
- COUNTER_SIGNAL_LEVEL_HIGH
-};
-
-enum counter_signal_value_type {
- COUNTER_SIGNAL_LEVEL = 0
-};
-
-enum counter_count_value_type {
- COUNTER_COUNT_POSITION = 0,
-};
-
-void counter_signal_read_value_set(struct counter_signal_read_value *const val,
- const enum counter_signal_value_type type,
- void *const data);
-void counter_count_read_value_set(struct counter_count_read_value *const val,
- const enum counter_count_value_type type,
- void *const data);
-int counter_count_write_value_get(void *const data,
- const enum counter_count_value_type type,
- const struct counter_count_write_value *const val);
-
int counter_register(struct counter_device *const counter);
void counter_unregister(struct counter_device *const counter);
int devm_counter_register(struct device *dev,
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index bc6c879bd110..1ca2baf817ed 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -184,7 +184,12 @@ void arch_cpu_idle_dead(void);
int cpu_report_state(int cpu);
int cpu_check_up_prepare(int cpu);
void cpu_set_state_online(int cpu);
-void play_idle(unsigned long duration_us);
+void play_idle_precise(u64 duration_ns, u64 latency_ns);
+
+static inline void play_idle(unsigned long duration_us)
+{
+ play_idle_precise(duration_us * NSEC_PER_USEC, U64_MAX);
+}
#ifdef CONFIG_HOTPLUG_CPU
bool cpu_wait_death(unsigned int cpu, int seconds);
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index 4b6b5bea8f79..2dbe46b7c213 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -29,10 +29,13 @@ struct cpuidle_driver;
* CPUIDLE DEVICE INTERFACE *
****************************/
+#define CPUIDLE_STATE_DISABLED_BY_USER BIT(0)
+#define CPUIDLE_STATE_DISABLED_BY_DRIVER BIT(1)
+
struct cpuidle_state_usage {
unsigned long long disable;
unsigned long long usage;
- unsigned long long time; /* in US */
+ u64 time_ns;
unsigned long long above; /* Number of times it's been too deep */
unsigned long long below; /* Number of times it's been too shallow */
#ifdef CONFIG_SUSPEND
@@ -45,6 +48,8 @@ struct cpuidle_state {
char name[CPUIDLE_NAME_LEN];
char desc[CPUIDLE_DESC_LEN];
+ u64 exit_latency_ns;
+ u64 target_residency_ns;
unsigned int flags;
unsigned int exit_latency; /* in US */
int power_usage; /* in mW */
@@ -80,14 +85,14 @@ struct cpuidle_driver_kobj;
struct cpuidle_device {
unsigned int registered:1;
unsigned int enabled:1;
- unsigned int use_deepest_state:1;
unsigned int poll_time_limit:1;
unsigned int cpu;
ktime_t next_hrtimer;
int last_state_idx;
- int last_residency;
+ u64 last_residency_ns;
u64 poll_limit_ns;
+ u64 forced_idle_latency_limit_ns;
struct cpuidle_state_usage states_usage[CPUIDLE_STATE_MAX];
struct cpuidle_state_kobj *kobjs[CPUIDLE_STATE_MAX];
struct cpuidle_driver_kobj *kobj_driver;
@@ -144,6 +149,8 @@ extern int cpuidle_register_driver(struct cpuidle_driver *drv);
extern struct cpuidle_driver *cpuidle_get_driver(void);
extern struct cpuidle_driver *cpuidle_driver_ref(void);
extern void cpuidle_driver_unref(void);
+extern void cpuidle_driver_state_disabled(struct cpuidle_driver *drv, int idx,
+ bool disable);
extern void cpuidle_unregister_driver(struct cpuidle_driver *drv);
extern int cpuidle_register_device(struct cpuidle_device *dev);
extern void cpuidle_unregister_device(struct cpuidle_device *dev);
@@ -181,6 +188,8 @@ static inline int cpuidle_register_driver(struct cpuidle_driver *drv)
static inline struct cpuidle_driver *cpuidle_get_driver(void) {return NULL; }
static inline struct cpuidle_driver *cpuidle_driver_ref(void) {return NULL; }
static inline void cpuidle_driver_unref(void) {}
+static inline void cpuidle_driver_state_disabled(struct cpuidle_driver *drv,
+ int idx, bool disable) { }
static inline void cpuidle_unregister_driver(struct cpuidle_driver *drv) { }
static inline int cpuidle_register_device(struct cpuidle_device *dev)
{return -ENODEV; }
@@ -204,18 +213,20 @@ static inline struct cpuidle_device *cpuidle_get_device(void) {return NULL; }
#ifdef CONFIG_CPU_IDLE
extern int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
- struct cpuidle_device *dev);
+ struct cpuidle_device *dev,
+ u64 latency_limit_ns);
extern int cpuidle_enter_s2idle(struct cpuidle_driver *drv,
struct cpuidle_device *dev);
-extern void cpuidle_use_deepest_state(bool enable);
+extern void cpuidle_use_deepest_state(u64 latency_limit_ns);
#else
static inline int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
- struct cpuidle_device *dev)
+ struct cpuidle_device *dev,
+ u64 latency_limit_ns)
{return -ENODEV; }
static inline int cpuidle_enter_s2idle(struct cpuidle_driver *drv,
struct cpuidle_device *dev)
{return -ENODEV; }
-static inline void cpuidle_use_deepest_state(bool enable)
+static inline void cpuidle_use_deepest_state(u64 latency_limit_ns)
{
}
#endif
@@ -260,7 +271,7 @@ struct cpuidle_governor {
#ifdef CONFIG_CPU_IDLE
extern int cpuidle_register_governor(struct cpuidle_governor *gov);
-extern int cpuidle_governor_latency_req(unsigned int cpu);
+extern s64 cpuidle_governor_latency_req(unsigned int cpu);
#else
static inline int cpuidle_register_governor(struct cpuidle_governor *gov)
{return 0;}
diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h
index 58424eb3b329..bf9b6cafa4c2 100644
--- a/include/linux/debugfs.h
+++ b/include/linux/debugfs.h
@@ -54,6 +54,8 @@ static const struct file_operations __fops = { \
.llseek = no_llseek, \
}
+typedef struct vfsmount *(*debugfs_automount_t)(struct dentry *, void *);
+
#if defined(CONFIG_DEBUG_FS)
struct dentry *debugfs_lookup(const char *name, struct dentry *parent);
@@ -75,7 +77,6 @@ struct dentry *debugfs_create_dir(const char *name, struct dentry *parent);
struct dentry *debugfs_create_symlink(const char *name, struct dentry *parent,
const char *dest);
-typedef struct vfsmount *(*debugfs_automount_t)(struct dentry *, void *);
struct dentry *debugfs_create_automount(const char *name,
struct dentry *parent,
debugfs_automount_t f,
@@ -97,28 +98,28 @@ ssize_t debugfs_attr_write(struct file *file, const char __user *buf,
struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry,
struct dentry *new_dir, const char *new_name);
-struct dentry *debugfs_create_u8(const char *name, umode_t mode,
- struct dentry *parent, u8 *value);
-struct dentry *debugfs_create_u16(const char *name, umode_t mode,
- struct dentry *parent, u16 *value);
+void debugfs_create_u8(const char *name, umode_t mode, struct dentry *parent,
+ u8 *value);
+void debugfs_create_u16(const char *name, umode_t mode, struct dentry *parent,
+ u16 *value);
struct dentry *debugfs_create_u32(const char *name, umode_t mode,
struct dentry *parent, u32 *value);
-struct dentry *debugfs_create_u64(const char *name, umode_t mode,
- struct dentry *parent, u64 *value);
+void debugfs_create_u64(const char *name, umode_t mode, struct dentry *parent,
+ u64 *value);
struct dentry *debugfs_create_ulong(const char *name, umode_t mode,
struct dentry *parent, unsigned long *value);
-struct dentry *debugfs_create_x8(const char *name, umode_t mode,
- struct dentry *parent, u8 *value);
-struct dentry *debugfs_create_x16(const char *name, umode_t mode,
- struct dentry *parent, u16 *value);
-struct dentry *debugfs_create_x32(const char *name, umode_t mode,
- struct dentry *parent, u32 *value);
-struct dentry *debugfs_create_x64(const char *name, umode_t mode,
- struct dentry *parent, u64 *value);
-struct dentry *debugfs_create_size_t(const char *name, umode_t mode,
- struct dentry *parent, size_t *value);
-struct dentry *debugfs_create_atomic_t(const char *name, umode_t mode,
- struct dentry *parent, atomic_t *value);
+void debugfs_create_x8(const char *name, umode_t mode, struct dentry *parent,
+ u8 *value);
+void debugfs_create_x16(const char *name, umode_t mode, struct dentry *parent,
+ u16 *value);
+void debugfs_create_x32(const char *name, umode_t mode, struct dentry *parent,
+ u32 *value);
+void debugfs_create_x64(const char *name, umode_t mode, struct dentry *parent,
+ u64 *value);
+void debugfs_create_size_t(const char *name, umode_t mode,
+ struct dentry *parent, size_t *value);
+void debugfs_create_atomic_t(const char *name, umode_t mode,
+ struct dentry *parent, atomic_t *value);
struct dentry *debugfs_create_bool(const char *name, umode_t mode,
struct dentry *parent, bool *value);
@@ -203,7 +204,7 @@ static inline struct dentry *debugfs_create_symlink(const char *name,
static inline struct dentry *debugfs_create_automount(const char *name,
struct dentry *parent,
- struct vfsmount *(*f)(void *),
+ debugfs_automount_t f,
void *data)
{
return ERR_PTR(-ENODEV);
@@ -244,19 +245,11 @@ static inline struct dentry *debugfs_rename(struct dentry *old_dir, struct dentr
return ERR_PTR(-ENODEV);
}
-static inline struct dentry *debugfs_create_u8(const char *name, umode_t mode,
- struct dentry *parent,
- u8 *value)
-{
- return ERR_PTR(-ENODEV);
-}
+static inline void debugfs_create_u8(const char *name, umode_t mode,
+ struct dentry *parent, u8 *value) { }
-static inline struct dentry *debugfs_create_u16(const char *name, umode_t mode,
- struct dentry *parent,
- u16 *value)
-{
- return ERR_PTR(-ENODEV);
-}
+static inline void debugfs_create_u16(const char *name, umode_t mode,
+ struct dentry *parent, u16 *value) { }
static inline struct dentry *debugfs_create_u32(const char *name, umode_t mode,
struct dentry *parent,
@@ -265,12 +258,8 @@ static inline struct dentry *debugfs_create_u32(const char *name, umode_t mode,
return ERR_PTR(-ENODEV);
}
-static inline struct dentry *debugfs_create_u64(const char *name, umode_t mode,
- struct dentry *parent,
- u64 *value)
-{
- return ERR_PTR(-ENODEV);
-}
+static inline void debugfs_create_u64(const char *name, umode_t mode,
+ struct dentry *parent, u64 *value) { }
static inline struct dentry *debugfs_create_ulong(const char *name,
umode_t mode,
@@ -280,46 +269,26 @@ static inline struct dentry *debugfs_create_ulong(const char *name,
return ERR_PTR(-ENODEV);
}
-static inline struct dentry *debugfs_create_x8(const char *name, umode_t mode,
- struct dentry *parent,
- u8 *value)
-{
- return ERR_PTR(-ENODEV);
-}
+static inline void debugfs_create_x8(const char *name, umode_t mode,
+ struct dentry *parent, u8 *value) { }
-static inline struct dentry *debugfs_create_x16(const char *name, umode_t mode,
- struct dentry *parent,
- u16 *value)
-{
- return ERR_PTR(-ENODEV);
-}
+static inline void debugfs_create_x16(const char *name, umode_t mode,
+ struct dentry *parent, u16 *value) { }
-static inline struct dentry *debugfs_create_x32(const char *name, umode_t mode,
- struct dentry *parent,
- u32 *value)
-{
- return ERR_PTR(-ENODEV);
-}
+static inline void debugfs_create_x32(const char *name, umode_t mode,
+ struct dentry *parent, u32 *value) { }
-static inline struct dentry *debugfs_create_x64(const char *name, umode_t mode,
- struct dentry *parent,
- u64 *value)
-{
- return ERR_PTR(-ENODEV);
-}
+static inline void debugfs_create_x64(const char *name, umode_t mode,
+ struct dentry *parent, u64 *value) { }
-static inline struct dentry *debugfs_create_size_t(const char *name, umode_t mode,
- struct dentry *parent,
- size_t *value)
-{
- return ERR_PTR(-ENODEV);
-}
+static inline void debugfs_create_size_t(const char *name, umode_t mode,
+ struct dentry *parent, size_t *value)
+{ }
-static inline struct dentry *debugfs_create_atomic_t(const char *name, umode_t mode,
- struct dentry *parent, atomic_t *value)
-{
- return ERR_PTR(-ENODEV);
-}
+static inline void debugfs_create_atomic_t(const char *name, umode_t mode,
+ struct dentry *parent,
+ atomic_t *value)
+{ }
static inline struct dentry *debugfs_create_bool(const char *name, umode_t mode,
struct dentry *parent,
@@ -383,4 +352,25 @@ static inline ssize_t debugfs_write_file_bool(struct file *file,
#endif
+/**
+ * debugfs_create_xul - create a debugfs file that is used to read and write an
+ * unsigned long value, formatted in hexadecimal
+ * @name: a pointer to a string containing the name of the file to create.
+ * @mode: the permission that the file should have
+ * @parent: a pointer to the parent dentry for this file. This should be a
+ * directory dentry if set. If this parameter is %NULL, then the
+ * file will be created in the root of the debugfs filesystem.
+ * @value: a pointer to the variable that the file should read to and write
+ * from.
+ */
+static inline void debugfs_create_xul(const char *name, umode_t mode,
+ struct dentry *parent,
+ unsigned long *value)
+{
+ if (sizeof(*value) == sizeof(u32))
+ debugfs_create_x32(name, mode, parent, (u32 *)value);
+ else
+ debugfs_create_x64(name, mode, parent, (u64 *)value);
+}
+
#endif
diff --git a/include/linux/device.h b/include/linux/device.h
index 297239a08bb7..e226030c1df3 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -80,6 +80,13 @@ extern void bus_remove_file(struct bus_type *, struct bus_attribute *);
* that generate uevents to add the environment variables.
* @probe: Called when a new device or driver add to this bus, and callback
* the specific driver's probe to initial the matched device.
+ * @sync_state: Called to sync device state to software state after all the
+ * state tracking consumers linked to this device (present at
+ * the time of late_initcall) have successfully bound to a
+ * driver. If the device has no consumers, this function will
+ * be called at late_initcall_sync level. If the device has
+ * consumers that are never bound to a driver, this function
+ * will never get called until they do.
* @remove: Called when a device removed from this bus.
* @shutdown: Called at shut-down time to quiesce the device.
*
@@ -123,6 +130,7 @@ struct bus_type {
int (*match)(struct device *dev, struct device_driver *drv);
int (*uevent)(struct device *dev, struct kobj_uevent_env *env);
int (*probe)(struct device *dev);
+ void (*sync_state)(struct device *dev);
int (*remove)(struct device *dev);
void (*shutdown)(struct device *dev);
@@ -340,6 +348,13 @@ enum probe_type {
* @probe: Called to query the existence of a specific device,
* whether this driver can work with it, and bind the driver
* to a specific device.
+ * @sync_state: Called to sync device state to software state after all the
+ * state tracking consumers linked to this device (present at
+ * the time of late_initcall) have successfully bound to a
+ * driver. If the device has no consumers, this function will
+ * be called at late_initcall_sync level. If the device has
+ * consumers that are never bound to a driver, this function
+ * will never get called until they do.
* @remove: Called when the device is removed from the system to
* unbind a device from this driver.
* @shutdown: Called at shut-down time to quiesce the device.
@@ -379,6 +394,7 @@ struct device_driver {
const struct acpi_device_id *acpi_match_table;
int (*probe) (struct device *dev);
+ void (*sync_state)(struct device *dev);
int (*remove) (struct device *dev);
void (*shutdown) (struct device *dev);
int (*suspend) (struct device *dev, pm_message_t state);
@@ -946,6 +962,8 @@ extern void devm_free_pages(struct device *dev, unsigned long addr);
void __iomem *devm_ioremap_resource(struct device *dev,
const struct resource *res);
+void __iomem *devm_ioremap_resource_wc(struct device *dev,
+ const struct resource *res);
void __iomem *devm_of_iomap(struct device *dev,
struct device_node *node, int index,
@@ -1080,6 +1098,7 @@ enum device_link_state {
* AUTOREMOVE_SUPPLIER: Remove the link automatically on supplier driver unbind.
* AUTOPROBE_CONSUMER: Probe consumer driver automatically after supplier binds.
* MANAGED: The core tracks presence of supplier/consumer drivers (internal).
+ * SYNC_STATE_ONLY: Link only affects sync_state() behavior.
*/
#define DL_FLAG_STATELESS BIT(0)
#define DL_FLAG_AUTOREMOVE_CONSUMER BIT(1)
@@ -1088,6 +1107,7 @@ enum device_link_state {
#define DL_FLAG_AUTOREMOVE_SUPPLIER BIT(4)
#define DL_FLAG_AUTOPROBE_CONSUMER BIT(5)
#define DL_FLAG_MANAGED BIT(6)
+#define DL_FLAG_SYNC_STATE_ONLY BIT(7)
/**
* struct device_link - Device link representation.
@@ -1135,11 +1155,18 @@ enum dl_dev_state {
* struct dev_links_info - Device data related to device links.
* @suppliers: List of links to supplier devices.
* @consumers: List of links to consumer devices.
+ * @needs_suppliers: Hook to global list of devices waiting for suppliers.
+ * @defer_sync: Hook to global list of devices that have deferred sync_state.
+ * @need_for_probe: If needs_suppliers is on a list, this indicates if the
+ * suppliers are needed for probe or not.
* @status: Driver status information.
*/
struct dev_links_info {
struct list_head suppliers;
struct list_head consumers;
+ struct list_head needs_suppliers;
+ struct list_head defer_sync;
+ bool need_for_probe;
enum dl_dev_state status;
};
@@ -1186,8 +1213,8 @@ struct dev_links_info {
* @coherent_dma_mask: Like dma_mask, but for alloc_coherent mapping as not all
* hardware supports 64-bit addresses for consistent allocations
* such descriptors.
- * @bus_dma_mask: Mask of an upstream bridge or bus which imposes a smaller DMA
- * limit than the device itself supports.
+ * @bus_dma_limit: Limit of an upstream bridge or bus which imposes a smaller
+ * DMA limit than the device itself supports.
* @dma_pfn_offset: offset of DMA memory range relatively of RAM
* @dma_parms: A low level driver may set these to teach IOMMU code about
* segment limitations.
@@ -1215,6 +1242,9 @@ struct dev_links_info {
* @offline: Set after successful invocation of bus type's .offline().
* @of_node_reused: Set if the device-tree node is shared with an ancestor
* device.
+ * @state_synced: The hardware state of this device has been synced to match
+ * the software state of this device by calling the driver/bus
+ * sync_state() callback.
* @dma_coherent: this particular device is dma coherent, even if the
* architecture supports non-coherent devices.
*
@@ -1270,7 +1300,7 @@ struct device {
not all hardware supports
64 bit addresses for consistent
allocations such descriptors. */
- u64 bus_dma_mask; /* upstream dma_mask constraint */
+ u64 bus_dma_limit; /* upstream dma constraint */
unsigned long dma_pfn_offset;
struct device_dma_parameters *dma_parms;
@@ -1311,6 +1341,7 @@ struct device {
bool offline_disabled:1;
bool offline:1;
bool of_node_reused:1;
+ bool state_synced:1;
#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL)
@@ -1653,6 +1684,8 @@ struct device_link *device_link_add(struct device *consumer,
struct device *supplier, u32 flags);
void device_link_del(struct device_link *link);
void device_link_remove(void *consumer, struct device *supplier);
+void device_links_supplier_sync_state_pause(void);
+void device_links_supplier_sync_state_resume(void);
#ifndef dev_fmt
#define dev_fmt(fmt) fmt
diff --git a/include/linux/device_cgroup.h b/include/linux/device_cgroup.h
index 8557efe096dc..fa35b52e0002 100644
--- a/include/linux/device_cgroup.h
+++ b/include/linux/device_cgroup.h
@@ -12,26 +12,15 @@
#define DEVCG_DEV_ALL 4 /* this represents all devices */
#ifdef CONFIG_CGROUP_DEVICE
-extern int __devcgroup_check_permission(short type, u32 major, u32 minor,
- short access);
+int devcgroup_check_permission(short type, u32 major, u32 minor,
+ short access);
#else
-static inline int __devcgroup_check_permission(short type, u32 major, u32 minor,
- short access)
+static inline int devcgroup_check_permission(short type, u32 major, u32 minor,
+ short access)
{ return 0; }
#endif
#if defined(CONFIG_CGROUP_DEVICE) || defined(CONFIG_CGROUP_BPF)
-static inline int devcgroup_check_permission(short type, u32 major, u32 minor,
- short access)
-{
- int rc = BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type, major, minor, access);
-
- if (rc)
- return -EPERM;
-
- return __devcgroup_check_permission(type, major, minor, access);
-}
-
static inline int devcgroup_inode_permission(struct inode *inode, int mask)
{
short type, access = 0;
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index ec212cb27fdc..af73f835c51c 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -43,6 +43,18 @@ struct dma_buf_ops {
bool cache_sgt_mapping;
/**
+ * @dynamic_mapping:
+ *
+ * If true the framework makes sure that the map/unmap_dma_buf
+ * callbacks are always called with the dma_resv object locked.
+ *
+ * If false the framework makes sure that the map/unmap_dma_buf
+ * callbacks are always called without the dma_resv object locked.
+ * Mutual exclusive with @cache_sgt_mapping.
+ */
+ bool dynamic_mapping;
+
+ /**
* @attach:
*
* This is called from dma_buf_attach() to make sure that a given
@@ -109,6 +121,9 @@ struct dma_buf_ops {
* any other kind of sharing that the exporter might wish to make
* available to buffer-users.
*
+ * This is always called with the dmabuf->resv object locked when
+ * the dynamic_mapping flag is true.
+ *
* Returns:
*
* A &sg_table scatter list of or the backing storage of the DMA buffer,
@@ -267,14 +282,16 @@ struct dma_buf_ops {
* struct dma_buf - shared buffer object
* @size: size of the buffer
* @file: file pointer used for sharing buffers across, and for refcounting.
- * @attachments: list of dma_buf_attachment that denotes all devices attached.
+ * @attachments: list of dma_buf_attachment that denotes all devices attached,
+ * protected by dma_resv lock.
* @ops: dma_buf_ops associated with this buffer object.
* @lock: used internally to serialize list manipulation, attach/detach and
- * vmap/unmap, and accesses to name
+ * vmap/unmap
* @vmapping_counter: used internally to refcnt the vmaps
* @vmap_ptr: the current vmap ptr if vmapping_counter > 0
* @exp_name: name of the exporter; useful for debugging.
- * @name: userspace-provided name; useful for accounting and debugging.
+ * @name: userspace-provided name; useful for accounting and debugging,
+ * protected by @resv.
* @owner: pointer to exporter module; used for refcounting when exporter is a
* kernel module.
* @list_node: node for dma_buf accounting and debugging.
@@ -323,10 +340,12 @@ struct dma_buf {
* struct dma_buf_attachment - holds device-buffer attachment data
* @dmabuf: buffer for this attachment.
* @dev: device attached to the buffer.
- * @node: list of dma_buf_attachment.
+ * @node: list of dma_buf_attachment, protected by dma_resv lock of the dmabuf.
* @sgt: cached mapping.
* @dir: direction of cached mapping.
* @priv: exporter specific attachment data.
+ * @dynamic_mapping: true if dma_buf_map/unmap_attachment() is called with the
+ * dma_resv lock held.
*
* This structure holds the attachment information between the dma_buf buffer
* and its user device(s). The list contains one attachment struct per device
@@ -343,6 +362,7 @@ struct dma_buf_attachment {
struct list_head node;
struct sg_table *sgt;
enum dma_data_direction dir;
+ bool dynamic_mapping;
void *priv;
};
@@ -394,10 +414,40 @@ static inline void get_dma_buf(struct dma_buf *dmabuf)
get_file(dmabuf->file);
}
+/**
+ * dma_buf_is_dynamic - check if a DMA-buf uses dynamic mappings.
+ * @dmabuf: the DMA-buf to check
+ *
+ * Returns true if a DMA-buf exporter wants to be called with the dma_resv
+ * locked for the map/unmap callbacks, false if it doesn't wants to be called
+ * with the lock held.
+ */
+static inline bool dma_buf_is_dynamic(struct dma_buf *dmabuf)
+{
+ return dmabuf->ops->dynamic_mapping;
+}
+
+/**
+ * dma_buf_attachment_is_dynamic - check if a DMA-buf attachment uses dynamic
+ * mappinsg
+ * @attach: the DMA-buf attachment to check
+ *
+ * Returns true if a DMA-buf importer wants to call the map/unmap functions with
+ * the dma_resv lock held.
+ */
+static inline bool
+dma_buf_attachment_is_dynamic(struct dma_buf_attachment *attach)
+{
+ return attach->dynamic_mapping;
+}
+
struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
- struct device *dev);
+ struct device *dev);
+struct dma_buf_attachment *
+dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
+ bool dynamic_mapping);
void dma_buf_detach(struct dma_buf *dmabuf,
- struct dma_buf_attachment *dmabuf_attach);
+ struct dma_buf_attachment *attach);
struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info);
@@ -409,6 +459,7 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *,
enum dma_data_direction);
void dma_buf_unmap_attachment(struct dma_buf_attachment *, struct sg_table *,
enum dma_data_direction);
+void dma_buf_move_notify(struct dma_buf *dma_buf);
int dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
enum dma_data_direction dir);
int dma_buf_end_cpu_access(struct dma_buf *dma_buf,
diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h
index d03af3605460..24b8684aa21d 100644
--- a/include/linux/dma-direct.h
+++ b/include/linux/dma-direct.h
@@ -3,6 +3,7 @@
#define _LINUX_DMA_DIRECT_H 1
#include <linux/dma-mapping.h>
+#include <linux/memblock.h> /* for min_low_pfn */
#include <linux/mem_encrypt.h>
extern unsigned int zone_dma_bits;
@@ -23,15 +24,6 @@ static inline phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t dev_addr)
return paddr + ((phys_addr_t)dev->dma_pfn_offset << PAGE_SHIFT);
}
-
-static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
-{
- if (!dev->dma_mask)
- return false;
-
- return addr + size - 1 <=
- min_not_zero(*dev->dma_mask, dev->bus_dma_mask);
-}
#endif /* !CONFIG_ARCH_HAS_PHYS_TO_DMA */
#ifdef CONFIG_ARCH_HAS_FORCE_DMA_UNENCRYPTED
@@ -59,6 +51,21 @@ static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
return __sme_clr(__dma_to_phys(dev, daddr));
}
+static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size,
+ bool is_ram)
+{
+ dma_addr_t end = addr + size - 1;
+
+ if (!dev->dma_mask)
+ return false;
+
+ if (is_ram && !IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT) &&
+ min(addr, end) < phys_to_dma(dev, PFN_PHYS(min_low_pfn)))
+ return false;
+
+ return end <= min_not_zero(*dev->dma_mask, dev->bus_dma_limit);
+}
+
u64 dma_direct_get_required_mask(struct device *dev);
void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
gfp_t gfp, unsigned long attrs);
@@ -69,7 +76,13 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t dma_addr, unsigned long attrs);
struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs);
-void __dma_direct_free_pages(struct device *dev, size_t size, struct page *page);
+ gfp_t gfp, unsigned long attrs);
+int dma_direct_get_sgtable(struct device *dev, struct sg_table *sgt,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ unsigned long attrs);
+bool dma_direct_can_mmap(struct device *dev);
+int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ unsigned long attrs);
int dma_direct_supported(struct device *dev, u64 mask);
#endif /* _LINUX_DMA_DIRECT_H */
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 4a1c4fca475a..330ad58fbf4d 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -15,11 +15,8 @@
/**
* List of possible attributes associated with a DMA mapping. The semantics
* of each attribute should be defined in Documentation/DMA-attributes.txt.
- *
- * DMA_ATTR_WRITE_BARRIER: DMA to a memory region with this attribute
- * forces all pending DMA writes to complete.
*/
-#define DMA_ATTR_WRITE_BARRIER (1UL << 0)
+
/*
* DMA_ATTR_WEAK_ORDERING: Specifies that reads and writes to the mapping
* may be weakly ordered, that is that reads and writes may pass each other.
@@ -162,7 +159,7 @@ int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr);
int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, size_t size, int *ret);
-void *dma_alloc_from_global_coherent(ssize_t size, dma_addr_t *dma_handle);
+void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size, dma_addr_t *dma_handle);
int dma_release_from_global_coherent(int order, void *vaddr);
int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *cpu_addr,
size_t size, int *ret);
@@ -172,7 +169,7 @@ int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *cpu_addr,
#define dma_release_from_dev_coherent(dev, order, vaddr) (0)
#define dma_mmap_from_dev_coherent(dev, vma, vaddr, order, ret) (0)
-static inline void *dma_alloc_from_global_coherent(ssize_t size,
+static inline void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size,
dma_addr_t *dma_handle)
{
return NULL;
@@ -583,6 +580,10 @@ static inline unsigned long dma_get_merge_boundary(struct device *dev)
static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{
+ /* DMA must never operate on areas that might be remapped. */
+ if (dev_WARN_ONCE(dev, is_vmalloc_addr(ptr),
+ "rejecting DMA map of vmalloc memory\n"))
+ return DMA_MAPPING_ERROR;
debug_dma_map_single(dev, ptr, size);
return dma_map_page_attrs(dev, virt_to_page(ptr), offset_in_page(ptr),
size, dir, attrs);
@@ -693,7 +694,7 @@ static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask)
*/
static inline bool dma_addressing_limited(struct device *dev)
{
- return min_not_zero(dma_get_mask(dev), dev->bus_dma_mask) <
+ return min_not_zero(dma_get_mask(dev), dev->bus_dma_limit) <
dma_get_required_mask(dev);
}
diff --git a/include/linux/dma-noncoherent.h b/include/linux/dma-noncoherent.h
index dd3de6d88fc0..ca9b5770caee 100644
--- a/include/linux/dma-noncoherent.h
+++ b/include/linux/dma-noncoherent.h
@@ -41,8 +41,6 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
gfp_t gfp, unsigned long attrs);
void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t dma_addr, unsigned long attrs);
-long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr,
- dma_addr_t dma_addr);
#ifdef CONFIG_MMU
/*
@@ -75,29 +73,29 @@ static inline void arch_dma_cache_sync(struct device *dev, void *vaddr,
#endif /* CONFIG_DMA_NONCOHERENT_CACHE_SYNC */
#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE
-void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
- size_t size, enum dma_data_direction dir);
+void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir);
#else
-static inline void arch_sync_dma_for_device(struct device *dev,
- phys_addr_t paddr, size_t size, enum dma_data_direction dir)
+static inline void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir)
{
}
#endif /* ARCH_HAS_SYNC_DMA_FOR_DEVICE */
#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU
-void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
- size_t size, enum dma_data_direction dir);
+void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir);
#else
-static inline void arch_sync_dma_for_cpu(struct device *dev,
- phys_addr_t paddr, size_t size, enum dma_data_direction dir)
+static inline void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir)
{
}
#endif /* ARCH_HAS_SYNC_DMA_FOR_CPU */
#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL
-void arch_sync_dma_for_cpu_all(struct device *dev);
+void arch_sync_dma_for_cpu_all(void);
#else
-static inline void arch_sync_dma_for_cpu_all(struct device *dev)
+static inline void arch_sync_dma_for_cpu_all(void)
{
}
#endif /* CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL */
diff --git a/include/linux/dma/sprd-dma.h b/include/linux/dma/sprd-dma.h
index ab82df64682a..d09c6f6f6da5 100644
--- a/include/linux/dma/sprd-dma.h
+++ b/include/linux/dma/sprd-dma.h
@@ -118,6 +118,9 @@ enum sprd_dma_int_type {
* struct sprd_dma_linklist - DMA link-list address structure
* @virt_addr: link-list virtual address to configure link-list node
* @phy_addr: link-list physical address to link DMA transfer
+ * @wrap_addr: the wrap address for link-list mode, which means once the
+ * transfer address reaches the wrap address, the next transfer address
+ * will jump to the address specified by wrap_to register.
*
* The Spreadtrum DMA controller supports the link-list mode, that means slaves
* can supply several groups configurations (each configuration represents one
@@ -181,6 +184,7 @@ enum sprd_dma_int_type {
struct sprd_dma_linklist {
unsigned long virt_addr;
phys_addr_t phy_addr;
+ phys_addr_t wrap_addr;
};
#endif
diff --git a/include/linux/dmar.h b/include/linux/dmar.h
index a7cf3599d9a1..f64ca27dc210 100644
--- a/include/linux/dmar.h
+++ b/include/linux/dmar.h
@@ -129,6 +129,7 @@ static inline int dmar_res_noop(struct acpi_dmar_header *hdr, void *arg)
#ifdef CONFIG_INTEL_IOMMU
extern int iommu_detected, no_iommu;
extern int intel_iommu_init(void);
+extern void intel_iommu_shutdown(void);
extern int dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg);
extern int dmar_parse_one_atsr(struct acpi_dmar_header *header, void *arg);
extern int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg);
@@ -137,6 +138,7 @@ extern int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert);
extern int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info);
#else /* !CONFIG_INTEL_IOMMU: */
static inline int intel_iommu_init(void) { return -ENODEV; }
+static inline void intel_iommu_shutdown(void) { }
#define dmar_parse_one_rmrr dmar_res_noop
#define dmar_parse_one_atsr dmar_res_noop
diff --git a/include/linux/dmi.h b/include/linux/dmi.h
index 8de8c4f15163..927f8a8b7a1d 100644
--- a/include/linux/dmi.h
+++ b/include/linux/dmi.h
@@ -113,6 +113,8 @@ extern int dmi_walk(void (*decode)(const struct dmi_header *, void *),
extern bool dmi_match(enum dmi_field f, const char *str);
extern void dmi_memdev_name(u16 handle, const char **bank, const char **device);
extern u64 dmi_memdev_size(u16 handle);
+extern u8 dmi_memdev_type(u16 handle);
+extern u16 dmi_memdev_handle(int slot);
#else
@@ -142,6 +144,8 @@ static inline bool dmi_match(enum dmi_field f, const char *str)
static inline void dmi_memdev_name(u16 handle, const char **bank,
const char **device) { }
static inline u64 dmi_memdev_size(u16 handle) { return ~0ul; }
+static inline u8 dmi_memdev_type(u16 handle) { return 0x0; }
+static inline u16 dmi_memdev_handle(int slot) { return 0xffff; }
static inline const struct dmi_system_id *
dmi_first_match(const struct dmi_system_id *list) { return NULL; }
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 028efa7a9f3b..99dfea595c8c 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -112,6 +112,7 @@ typedef struct {
#define EFI_MEMORY_MORE_RELIABLE \
((u64)0x0000000000010000ULL) /* higher reliability */
#define EFI_MEMORY_RO ((u64)0x0000000000020000ULL) /* read-only */
+#define EFI_MEMORY_SP ((u64)0x0000000000040000ULL) /* soft reserved */
#define EFI_MEMORY_RUNTIME ((u64)0x8000000000000000ULL) /* range requires runtime mapping */
#define EFI_MEMORY_DESCRIPTOR_VERSION 1
@@ -1044,7 +1045,6 @@ extern void efi_enter_virtual_mode (void); /* switch EFI to virtual mode, if pos
extern efi_status_t efi_query_variable_store(u32 attributes,
unsigned long size,
bool nonblocking);
-extern void efi_find_mirror(void);
#else
static inline efi_status_t efi_query_variable_store(u32 attributes,
@@ -1202,6 +1202,7 @@ extern int __init efi_setup_pcdp_console(char *);
#define EFI_DBG 8 /* Print additional debug info at runtime */
#define EFI_NX_PE_DATA 9 /* Can runtime data regions be mapped non-executable? */
#define EFI_MEM_ATTR 10 /* Did firmware publish an EFI_MEMORY_ATTRIBUTES table? */
+#define EFI_MEM_NO_SOFT_RESERVE 11 /* Is the kernel configured to ignore soft reservations? */
#ifdef CONFIG_EFI
/*
@@ -1212,6 +1213,14 @@ static inline bool efi_enabled(int feature)
return test_bit(feature, &efi.flags) != 0;
}
extern void efi_reboot(enum reboot_mode reboot_mode, const char *__unused);
+
+bool __pure __efi_soft_reserve_enabled(void);
+
+static inline bool __pure efi_soft_reserve_enabled(void)
+{
+ return IS_ENABLED(CONFIG_EFI_SOFT_RESERVE)
+ && __efi_soft_reserve_enabled();
+}
#else
static inline bool efi_enabled(int feature)
{
@@ -1225,6 +1234,11 @@ efi_capsule_pending(int *reset_type)
{
return false;
}
+
+static inline bool efi_soft_reserve_enabled(void)
+{
+ return false;
+}
#endif
extern int efi_status_to_err(efi_status_t status);
diff --git a/include/linux/export.h b/include/linux/export.h
index 941d075f03d6..aee5c86ae350 100644
--- a/include/linux/export.h
+++ b/include/linux/export.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef _LINUX_EXPORT_H
#define _LINUX_EXPORT_H
diff --git a/include/linux/falloc.h b/include/linux/falloc.h
index 674d59f4d6ce..8bf3d79f3e82 100644
--- a/include/linux/falloc.h
+++ b/include/linux/falloc.h
@@ -20,7 +20,10 @@ struct space_resv {
};
#define FS_IOC_RESVSP _IOW('X', 40, struct space_resv)
+#define FS_IOC_UNRESVSP _IOW('X', 41, struct space_resv)
#define FS_IOC_RESVSP64 _IOW('X', 42, struct space_resv)
+#define FS_IOC_UNRESVSP64 _IOW('X', 43, struct space_resv)
+#define FS_IOC_ZERO_RANGE _IOW('X', 57, struct space_resv)
#define FALLOC_FL_SUPPORTED_MASK (FALLOC_FL_KEEP_SIZE | \
FALLOC_FL_PUNCH_HOLE | \
@@ -29,4 +32,27 @@ struct space_resv {
FALLOC_FL_INSERT_RANGE | \
FALLOC_FL_UNSHARE_RANGE)
+/* on ia32 l_start is on a 32-bit boundary */
+#if defined(CONFIG_X86_64)
+struct space_resv_32 {
+ __s16 l_type;
+ __s16 l_whence;
+ __s64 l_start __attribute__((packed));
+ /* len == 0 means until end of file */
+ __s64 l_len __attribute__((packed));
+ __s32 l_sysid;
+ __u32 l_pid;
+ __s32 l_pad[4]; /* reserve area */
+};
+
+#define FS_IOC_RESVSP_32 _IOW ('X', 40, struct space_resv_32)
+#define FS_IOC_UNRESVSP_32 _IOW ('X', 41, struct space_resv_32)
+#define FS_IOC_RESVSP64_32 _IOW ('X', 42, struct space_resv_32)
+#define FS_IOC_UNRESVSP64_32 _IOW ('X', 43, struct space_resv_32)
+#define FS_IOC_ZERO_RANGE_32 _IOW ('X', 57, struct space_resv_32)
+
+int compat_ioctl_preallocate(struct file *, int, struct space_resv_32 __user *);
+
+#endif
+
#endif /* _FALLOC_H_ */
diff --git a/include/linux/fb.h b/include/linux/fb.h
index 756706b666a1..41e0069eca0a 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -607,7 +607,7 @@ extern ssize_t fb_sys_write(struct fb_info *info, const char __user *buf,
extern int register_framebuffer(struct fb_info *fb_info);
extern void unregister_framebuffer(struct fb_info *fb_info);
extern void unlink_framebuffer(struct fb_info *fb_info);
-extern int remove_conflicting_pci_framebuffers(struct pci_dev *pdev, int res_id,
+extern int remove_conflicting_pci_framebuffers(struct pci_dev *pdev,
const char *name);
extern int remove_conflicting_framebuffers(struct apertures_struct *a,
const char *name, bool primary);
diff --git a/include/linux/firmware/intel/stratix10-svc-client.h b/include/linux/firmware/intel/stratix10-svc-client.h
index b6c4302a39e0..59bc6e2af693 100644
--- a/include/linux/firmware/intel/stratix10-svc-client.h
+++ b/include/linux/firmware/intel/stratix10-svc-client.h
@@ -41,6 +41,12 @@
*
* SVC_STATUS_RSU_OK:
* Secure firmware accepts the request of remote status update (RSU).
+ *
+ * SVC_STATUS_RSU_ERROR:
+ * Error encountered during remote system update.
+ *
+ * SVC_STATUS_RSU_NO_SUPPORT:
+ * Secure firmware doesn't support RSU retry or notify feature.
*/
#define SVC_STATUS_RECONFIG_REQUEST_OK 0
#define SVC_STATUS_RECONFIG_BUFFER_SUBMITTED 1
@@ -50,6 +56,8 @@
#define SVC_STATUS_RECONFIG_ERROR 5
#define SVC_STATUS_RSU_OK 6
#define SVC_STATUS_RSU_ERROR 7
+#define SVC_STATUS_RSU_NO_SUPPORT 8
+
/**
* Flag bit for COMMAND_RECONFIG
*
diff --git a/include/linux/firmware/xlnx-zynqmp.h b/include/linux/firmware/xlnx-zynqmp.h
index 778abbbc7d94..df366f1a4cb4 100644
--- a/include/linux/firmware/xlnx-zynqmp.h
+++ b/include/linux/firmware/xlnx-zynqmp.h
@@ -91,7 +91,8 @@ enum pm_ret_status {
};
enum pm_ioctl_id {
- IOCTL_SET_PLL_FRAC_MODE = 8,
+ IOCTL_SET_SD_TAPDELAY = 7,
+ IOCTL_SET_PLL_FRAC_MODE,
IOCTL_GET_PLL_FRAC_MODE,
IOCTL_SET_PLL_FRAC_DATA,
IOCTL_GET_PLL_FRAC_DATA,
@@ -250,6 +251,16 @@ enum zynqmp_pm_request_ack {
ZYNQMP_PM_REQUEST_ACK_NON_BLOCKING,
};
+enum pm_node_id {
+ NODE_SD_0 = 39,
+ NODE_SD_1,
+};
+
+enum tap_delay_type {
+ PM_TAPDELAY_INPUT = 0,
+ PM_TAPDELAY_OUTPUT,
+};
+
/**
* struct zynqmp_pm_query_data - PM query data
* @qid: query ID
diff --git a/include/linux/fs.h b/include/linux/fs.h
index ae6c5c37f3ae..98e0349adb52 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1727,6 +1727,13 @@ int vfs_mkobj(struct dentry *, umode_t,
extern long vfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
+#ifdef CONFIG_COMPAT
+extern long compat_ptr_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg);
+#else
+#define compat_ptr_ioctl NULL
+#endif
+
/*
* VFS file helper functions.
*/
@@ -2547,7 +2554,7 @@ extern int finish_no_open(struct file *file, struct dentry *dentry);
/* fs/ioctl.c */
-extern int ioctl_preallocate(struct file *filp, void __user *argp);
+extern int ioctl_preallocate(struct file *filp, int mode, void __user *argp);
/* fs/dcache.c */
extern void __init vfs_caches_init_early(void);
@@ -3149,7 +3156,6 @@ enum {
};
void dio_end_io(struct bio *bio);
-void dio_warn_stale_pagecache(struct file *filp);
ssize_t __blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
struct block_device *bdev, struct iov_iter *iter,
@@ -3194,6 +3200,11 @@ static inline void inode_dio_end(struct inode *inode)
wake_up_bit(&inode->i_state, __I_DIO_WAKEUP);
}
+/*
+ * Warn about a page cache invalidation failure diring a direct I/O write.
+ */
+void dio_warn_stale_pagecache(struct file *filp);
+
extern void inode_set_flags(struct inode *inode, unsigned int flags,
unsigned int mask);
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 9141f2263286..7247d35c3d16 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -51,6 +51,7 @@ static inline void early_trace_init(void) { }
struct module;
struct ftrace_hash;
+struct ftrace_direct_func;
#if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_MODULES) && \
defined(CONFIG_DYNAMIC_FTRACE)
@@ -142,24 +143,30 @@ ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops);
* PID - Is affected by set_ftrace_pid (allows filtering on those pids)
* RCU - Set when the ops can only be called when RCU is watching.
* TRACE_ARRAY - The ops->private points to a trace_array descriptor.
+ * PERMANENT - Set when the ops is permanent and should not be affected by
+ * ftrace_enabled.
+ * DIRECT - Used by the direct ftrace_ops helper for direct functions
+ * (internal ftrace only, should not be used by others)
*/
enum {
- FTRACE_OPS_FL_ENABLED = 1 << 0,
- FTRACE_OPS_FL_DYNAMIC = 1 << 1,
- FTRACE_OPS_FL_SAVE_REGS = 1 << 2,
- FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = 1 << 3,
- FTRACE_OPS_FL_RECURSION_SAFE = 1 << 4,
- FTRACE_OPS_FL_STUB = 1 << 5,
- FTRACE_OPS_FL_INITIALIZED = 1 << 6,
- FTRACE_OPS_FL_DELETED = 1 << 7,
- FTRACE_OPS_FL_ADDING = 1 << 8,
- FTRACE_OPS_FL_REMOVING = 1 << 9,
- FTRACE_OPS_FL_MODIFYING = 1 << 10,
- FTRACE_OPS_FL_ALLOC_TRAMP = 1 << 11,
- FTRACE_OPS_FL_IPMODIFY = 1 << 12,
- FTRACE_OPS_FL_PID = 1 << 13,
- FTRACE_OPS_FL_RCU = 1 << 14,
- FTRACE_OPS_FL_TRACE_ARRAY = 1 << 15,
+ FTRACE_OPS_FL_ENABLED = BIT(0),
+ FTRACE_OPS_FL_DYNAMIC = BIT(1),
+ FTRACE_OPS_FL_SAVE_REGS = BIT(2),
+ FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = BIT(3),
+ FTRACE_OPS_FL_RECURSION_SAFE = BIT(4),
+ FTRACE_OPS_FL_STUB = BIT(5),
+ FTRACE_OPS_FL_INITIALIZED = BIT(6),
+ FTRACE_OPS_FL_DELETED = BIT(7),
+ FTRACE_OPS_FL_ADDING = BIT(8),
+ FTRACE_OPS_FL_REMOVING = BIT(9),
+ FTRACE_OPS_FL_MODIFYING = BIT(10),
+ FTRACE_OPS_FL_ALLOC_TRAMP = BIT(11),
+ FTRACE_OPS_FL_IPMODIFY = BIT(12),
+ FTRACE_OPS_FL_PID = BIT(13),
+ FTRACE_OPS_FL_RCU = BIT(14),
+ FTRACE_OPS_FL_TRACE_ARRAY = BIT(15),
+ FTRACE_OPS_FL_PERMANENT = BIT(16),
+ FTRACE_OPS_FL_DIRECT = BIT(17),
};
#ifdef CONFIG_DYNAMIC_FTRACE
@@ -239,6 +246,70 @@ static inline void ftrace_free_init_mem(void) { }
static inline void ftrace_free_mem(struct module *mod, void *start, void *end) { }
#endif /* CONFIG_FUNCTION_TRACER */
+struct ftrace_func_entry {
+ struct hlist_node hlist;
+ unsigned long ip;
+ unsigned long direct; /* for direct lookup only */
+};
+
+struct dyn_ftrace;
+
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
+extern int ftrace_direct_func_count;
+int register_ftrace_direct(unsigned long ip, unsigned long addr);
+int unregister_ftrace_direct(unsigned long ip, unsigned long addr);
+int modify_ftrace_direct(unsigned long ip, unsigned long old_addr, unsigned long new_addr);
+struct ftrace_direct_func *ftrace_find_direct_func(unsigned long addr);
+int ftrace_modify_direct_caller(struct ftrace_func_entry *entry,
+ struct dyn_ftrace *rec,
+ unsigned long old_addr,
+ unsigned long new_addr);
+#else
+# define ftrace_direct_func_count 0
+static inline int register_ftrace_direct(unsigned long ip, unsigned long addr)
+{
+ return -ENOTSUPP;
+}
+static inline int unregister_ftrace_direct(unsigned long ip, unsigned long addr)
+{
+ return -ENOTSUPP;
+}
+static inline int modify_ftrace_direct(unsigned long ip,
+ unsigned long old_addr, unsigned long new_addr)
+{
+ return -ENOTSUPP;
+}
+static inline struct ftrace_direct_func *ftrace_find_direct_func(unsigned long addr)
+{
+ return NULL;
+}
+static inline int ftrace_modify_direct_caller(struct ftrace_func_entry *entry,
+ struct dyn_ftrace *rec,
+ unsigned long old_addr,
+ unsigned long new_addr)
+{
+ return -ENODEV;
+}
+#endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
+
+#ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
+/*
+ * This must be implemented by the architecture.
+ * It is the way the ftrace direct_ops helper, when called
+ * via ftrace (because there's other callbacks besides the
+ * direct call), can inform the architecture's trampoline that this
+ * routine has a direct caller, and what the caller is.
+ *
+ * For example, in x86, it returns the direct caller
+ * callback function via the regs->orig_ax parameter.
+ * Then in the ftrace trampoline, if this is set, it makes
+ * the return from the trampoline jump to the direct caller
+ * instead of going back to the function it just traced.
+ */
+static inline void arch_ftrace_set_direct_caller(struct pt_regs *regs,
+ unsigned long addr) { }
+#endif /* CONFIG_HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
+
#ifdef CONFIG_STACK_TRACER
extern int stack_tracer_enabled;
@@ -291,8 +362,6 @@ static inline void stack_tracer_enable(void) { }
int ftrace_arch_code_modify_prepare(void);
int ftrace_arch_code_modify_post_process(void);
-struct dyn_ftrace;
-
enum ftrace_bug_type {
FTRACE_BUG_UNKNOWN,
FTRACE_BUG_INIT,
@@ -330,6 +399,7 @@ bool is_ftrace_trampoline(unsigned long addr);
* REGS_EN - the function is set up to save regs.
* IPMODIFY - the record allows for the IP address to be changed.
* DISABLED - the record is not ready to be touched yet
+ * DIRECT - there is a direct function to call
*
* When a new ftrace_ops is registered and wants a function to save
* pt_regs, the rec->flag REGS is set. When the function has been
@@ -345,10 +415,12 @@ enum {
FTRACE_FL_TRAMP_EN = (1UL << 27),
FTRACE_FL_IPMODIFY = (1UL << 26),
FTRACE_FL_DISABLED = (1UL << 25),
+ FTRACE_FL_DIRECT = (1UL << 24),
+ FTRACE_FL_DIRECT_EN = (1UL << 23),
};
-#define FTRACE_REF_MAX_SHIFT 25
-#define FTRACE_FL_BITS 7
+#define FTRACE_REF_MAX_SHIFT 23
+#define FTRACE_FL_BITS 9
#define FTRACE_FL_MASKED_BITS ((1UL << FTRACE_FL_BITS) - 1)
#define FTRACE_FL_MASK (FTRACE_FL_MASKED_BITS << FTRACE_REF_MAX_SHIFT)
#define FTRACE_REF_MAX ((1UL << FTRACE_REF_MAX_SHIFT) - 1)
diff --git a/include/linux/fwnode.h b/include/linux/fwnode.h
index ababd6bc82f3..8feeb94b8acc 100644
--- a/include/linux/fwnode.h
+++ b/include/linux/fwnode.h
@@ -17,6 +17,7 @@ struct device;
struct fwnode_handle {
struct fwnode_handle *secondary;
const struct fwnode_operations *ops;
+ struct device *dev;
};
/**
@@ -49,13 +50,15 @@ struct fwnode_reference_args {
* struct fwnode_operations - Operations for fwnode interface
* @get: Get a reference to an fwnode.
* @put: Put a reference to an fwnode.
+ * @device_is_available: Return true if the device is available.
* @device_get_match_data: Return the device driver match data.
* @property_present: Return true if a property is present.
- * @property_read_integer_array: Read an array of integer properties. Return
- * zero on success, a negative error code
- * otherwise.
+ * @property_read_int_array: Read an array of integer properties. Return zero on
+ * success, a negative error code otherwise.
* @property_read_string_array: Read an array of string properties. Return zero
* on success, a negative error code otherwise.
+ * @get_name: Return the name of an fwnode.
+ * @get_name_prefix: Get a prefix for a node (for printing purposes).
* @get_parent: Return the parent of an fwnode.
* @get_next_child_node: Return the next child node in an iteration.
* @get_named_child_node: Return a child node with a given name.
@@ -65,6 +68,44 @@ struct fwnode_reference_args {
* endpoint node.
* @graph_get_port_parent: Return the parent node of a port node.
* @graph_parse_endpoint: Parse endpoint for port and endpoint id.
+ * @add_links: Called after the device corresponding to the fwnode is added
+ * using device_add(). The function is expected to create device
+ * links to all the suppliers of the device that are available at
+ * the time this function is called. The function must NOT stop
+ * at the first failed device link if other unlinked supplier
+ * devices are present in the system. This is necessary for the
+ * driver/bus sync_state() callbacks to work correctly.
+ *
+ * For example, say Device-C depends on suppliers Device-S1 and
+ * Device-S2 and the dependency is listed in that order in the
+ * firmware. Say, S1 gets populated from the firmware after
+ * late_initcall_sync(). Say S2 is populated and probed way
+ * before that in device_initcall(). When C is populated, if this
+ * add_links() function doesn't continue past a "failed linking to
+ * S1" and continue linking C to S2, then S2 will get a
+ * sync_state() callback before C is probed. This is because from
+ * the perspective of S2, C was never a consumer when its
+ * sync_state() evaluation is done. To avoid this, the add_links()
+ * function has to go through all available suppliers of the
+ * device (that corresponds to this fwnode) and link to them
+ * before returning.
+ *
+ * If some suppliers are not yet available (indicated by an error
+ * return value), this function will be called again when other
+ * devices are added to allow creating device links to any newly
+ * available suppliers.
+ *
+ * Return 0 if device links have been successfully created to all
+ * the known suppliers of this device or if the supplier
+ * information is not known.
+ *
+ * Return -ENODEV if the suppliers needed for probing this device
+ * have not been registered yet (because device links can only be
+ * created to devices registered with the driver core).
+ *
+ * Return -EAGAIN if some of the suppliers of this device have not
+ * been registered yet, but none of those suppliers are necessary
+ * for probing the device.
*/
struct fwnode_operations {
struct fwnode_handle *(*get)(struct fwnode_handle *fwnode);
@@ -82,6 +123,8 @@ struct fwnode_operations {
(*property_read_string_array)(const struct fwnode_handle *fwnode_handle,
const char *propname, const char **val,
size_t nval);
+ const char *(*get_name)(const struct fwnode_handle *fwnode);
+ const char *(*get_name_prefix)(const struct fwnode_handle *fwnode);
struct fwnode_handle *(*get_parent)(const struct fwnode_handle *fwnode);
struct fwnode_handle *
(*get_next_child_node)(const struct fwnode_handle *fwnode,
@@ -102,6 +145,8 @@ struct fwnode_operations {
(*graph_get_port_parent)(struct fwnode_handle *fwnode);
int (*graph_parse_endpoint)(const struct fwnode_handle *fwnode,
struct fwnode_endpoint *endpoint);
+ int (*add_links)(const struct fwnode_handle *fwnode,
+ struct device *dev);
};
#define fwnode_has_op(fwnode, op) \
@@ -123,5 +168,6 @@ struct fwnode_operations {
if (fwnode_has_op(fwnode, op)) \
(fwnode)->ops->op(fwnode, ## __VA_ARGS__); \
} while (false)
+#define get_dev_from_fwnode(fwnode) get_device((fwnode)->dev)
#endif
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 61f2f6ff9467..e5b817cb86e7 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -612,6 +612,8 @@ static inline bool pm_suspended_storage(void)
/* The below functions must be run on a range from a single zone. */
extern int alloc_contig_range(unsigned long start, unsigned long end,
unsigned migratetype, gfp_t gfp_mask);
+extern struct page *alloc_contig_pages(unsigned long nr_pages, gfp_t gfp_mask,
+ int nid, nodemask_t *nodemask);
#endif
void free_contig_range(unsigned long pfn, unsigned int nr_pages);
diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
index 5dd9c982e2cb..e2480ef94c55 100644
--- a/include/linux/gpio/driver.h
+++ b/include/linux/gpio/driver.h
@@ -22,6 +22,9 @@ enum gpio_lookup_flags;
struct gpio_chip;
+#define GPIO_LINE_DIRECTION_IN 1
+#define GPIO_LINE_DIRECTION_OUT 0
+
/**
* struct gpio_irq_chip - GPIO interrupt controller
*/
@@ -286,6 +289,9 @@ struct gpio_irq_chip {
* state (such as pullup/pulldown configuration).
* @init_valid_mask: optional routine to initialize @valid_mask, to be used if
* not all GPIOs are valid.
+ * @add_pin_ranges: optional routine to initialize pin ranges, to be used when
+ * requires special mapping of the pins that provides GPIO functionality.
+ * It is called after adding GPIO chip and before adding IRQ chip.
* @base: identifies the first GPIO number handled by this chip;
* or, if negative during registration, requests dynamic ID allocation.
* DEPRECATION: providing anything non-negative and nailing the base
@@ -376,6 +382,8 @@ struct gpio_chip {
unsigned long *valid_mask,
unsigned int ngpios);
+ int (*add_pin_ranges)(struct gpio_chip *chip);
+
int base;
u16 ngpio;
const char *const *names;
diff --git a/include/linux/hmm.h b/include/linux/hmm.h
index 3fec513b9c00..ddf9f7144c43 100644
--- a/include/linux/hmm.h
+++ b/include/linux/hmm.h
@@ -62,37 +62,12 @@
#include <linux/kconfig.h>
#include <asm/pgtable.h>
-#ifdef CONFIG_HMM_MIRROR
-
#include <linux/device.h>
#include <linux/migrate.h>
#include <linux/memremap.h>
#include <linux/completion.h>
#include <linux/mmu_notifier.h>
-
-/*
- * struct hmm - HMM per mm struct
- *
- * @mm: mm struct this HMM struct is bound to
- * @lock: lock protecting ranges list
- * @ranges: list of range being snapshotted
- * @mirrors: list of mirrors for this mm
- * @mmu_notifier: mmu notifier to track updates to CPU page table
- * @mirrors_sem: read/write semaphore protecting the mirrors list
- * @wq: wait queue for user waiting on a range invalidation
- * @notifiers: count of active mmu notifiers
- */
-struct hmm {
- struct mmu_notifier mmu_notifier;
- spinlock_t ranges_lock;
- struct list_head ranges;
- struct list_head mirrors;
- struct rw_semaphore mirrors_sem;
- wait_queue_head_t wq;
- long notifiers;
-};
-
/*
* hmm_pfn_flag_e - HMM flag enums
*
@@ -145,6 +120,8 @@ enum hmm_pfn_value_e {
/*
* struct hmm_range - track invalidation lock on virtual address range
*
+ * @notifier: a mmu_interval_notifier that includes the start/end
+ * @notifier_seq: result of mmu_interval_read_begin()
* @hmm: the core HMM structure this range is active against
* @vma: the vm area struct for the range
* @list: all range lock are on a list
@@ -159,8 +136,8 @@ enum hmm_pfn_value_e {
* @valid: pfns array did not change since it has been fill by an HMM function
*/
struct hmm_range {
- struct hmm *hmm;
- struct list_head list;
+ struct mmu_interval_notifier *notifier;
+ unsigned long notifier_seq;
unsigned long start;
unsigned long end;
uint64_t *pfns;
@@ -169,33 +146,9 @@ struct hmm_range {
uint64_t default_flags;
uint64_t pfn_flags_mask;
uint8_t pfn_shift;
- bool valid;
};
/*
- * hmm_range_wait_until_valid() - wait for range to be valid
- * @range: range affected by invalidation to wait on
- * @timeout: time out for wait in ms (ie abort wait after that period of time)
- * Return: true if the range is valid, false otherwise.
- */
-static inline bool hmm_range_wait_until_valid(struct hmm_range *range,
- unsigned long timeout)
-{
- return wait_event_timeout(range->hmm->wq, range->valid,
- msecs_to_jiffies(timeout)) != 0;
-}
-
-/*
- * hmm_range_valid() - test if a range is valid or not
- * @range: range
- * Return: true if the range is valid, false otherwise.
- */
-static inline bool hmm_range_valid(struct hmm_range *range)
-{
- return range->valid;
-}
-
-/*
* hmm_device_entry_to_page() - return struct page pointed to by a device entry
* @range: range use to decode device entry value
* @entry: device entry value to get corresponding struct page from
@@ -265,120 +218,6 @@ static inline uint64_t hmm_device_entry_from_pfn(const struct hmm_range *range,
}
/*
- * Mirroring: how to synchronize device page table with CPU page table.
- *
- * A device driver that is participating in HMM mirroring must always
- * synchronize with CPU page table updates. For this, device drivers can either
- * directly use mmu_notifier APIs or they can use the hmm_mirror API. Device
- * drivers can decide to register one mirror per device per process, or just
- * one mirror per process for a group of devices. The pattern is:
- *
- * int device_bind_address_space(..., struct mm_struct *mm, ...)
- * {
- * struct device_address_space *das;
- *
- * // Device driver specific initialization, and allocation of das
- * // which contains an hmm_mirror struct as one of its fields.
- * ...
- *
- * ret = hmm_mirror_register(&das->mirror, mm, &device_mirror_ops);
- * if (ret) {
- * // Cleanup on error
- * return ret;
- * }
- *
- * // Other device driver specific initialization
- * ...
- * }
- *
- * Once an hmm_mirror is registered for an address space, the device driver
- * will get callbacks through sync_cpu_device_pagetables() operation (see
- * hmm_mirror_ops struct).
- *
- * Device driver must not free the struct containing the hmm_mirror struct
- * before calling hmm_mirror_unregister(). The expected usage is to do that when
- * the device driver is unbinding from an address space.
- *
- *
- * void device_unbind_address_space(struct device_address_space *das)
- * {
- * // Device driver specific cleanup
- * ...
- *
- * hmm_mirror_unregister(&das->mirror);
- *
- * // Other device driver specific cleanup, and now das can be freed
- * ...
- * }
- */
-
-struct hmm_mirror;
-
-/*
- * struct hmm_mirror_ops - HMM mirror device operations callback
- *
- * @update: callback to update range on a device
- */
-struct hmm_mirror_ops {
- /* release() - release hmm_mirror
- *
- * @mirror: pointer to struct hmm_mirror
- *
- * This is called when the mm_struct is being released. The callback
- * must ensure that all access to any pages obtained from this mirror
- * is halted before the callback returns. All future access should
- * fault.
- */
- void (*release)(struct hmm_mirror *mirror);
-
- /* sync_cpu_device_pagetables() - synchronize page tables
- *
- * @mirror: pointer to struct hmm_mirror
- * @update: update information (see struct mmu_notifier_range)
- * Return: -EAGAIN if mmu_notifier_range_blockable(update) is false
- * and callback needs to block, 0 otherwise.
- *
- * This callback ultimately originates from mmu_notifiers when the CPU
- * page table is updated. The device driver must update its page table
- * in response to this callback. The update argument tells what action
- * to perform.
- *
- * The device driver must not return from this callback until the device
- * page tables are completely updated (TLBs flushed, etc); this is a
- * synchronous call.
- */
- int (*sync_cpu_device_pagetables)(
- struct hmm_mirror *mirror,
- const struct mmu_notifier_range *update);
-};
-
-/*
- * struct hmm_mirror - mirror struct for a device driver
- *
- * @hmm: pointer to struct hmm (which is unique per mm_struct)
- * @ops: device driver callback for HMM mirror operations
- * @list: for list of mirrors of a given mm
- *
- * Each address space (mm_struct) being mirrored by a device must register one
- * instance of an hmm_mirror struct with HMM. HMM will track the list of all
- * mirrors for each mm_struct.
- */
-struct hmm_mirror {
- struct hmm *hmm;
- const struct hmm_mirror_ops *ops;
- struct list_head list;
-};
-
-int hmm_mirror_register(struct hmm_mirror *mirror, struct mm_struct *mm);
-void hmm_mirror_unregister(struct hmm_mirror *mirror);
-
-/*
- * Please see Documentation/vm/hmm.rst for how to use the range API.
- */
-int hmm_range_register(struct hmm_range *range, struct hmm_mirror *mirror);
-void hmm_range_unregister(struct hmm_range *range);
-
-/*
* Retry fault if non-blocking, drop mmap_sem and return -EAGAIN in that case.
*/
#define HMM_FAULT_ALLOW_RETRY (1 << 0)
@@ -386,16 +225,17 @@ void hmm_range_unregister(struct hmm_range *range);
/* Don't fault in missing PTEs, just snapshot the current state. */
#define HMM_FAULT_SNAPSHOT (1 << 1)
+#ifdef CONFIG_HMM_MIRROR
+/*
+ * Please see Documentation/vm/hmm.rst for how to use the range API.
+ */
long hmm_range_fault(struct hmm_range *range, unsigned int flags);
-
-long hmm_range_dma_map(struct hmm_range *range,
- struct device *device,
- dma_addr_t *daddrs,
- unsigned int flags);
-long hmm_range_dma_unmap(struct hmm_range *range,
- struct device *device,
- dma_addr_t *daddrs,
- bool dirty);
+#else
+static inline long hmm_range_fault(struct hmm_range *range, unsigned int flags)
+{
+ return -EOPNOTSUPP;
+}
+#endif
/*
* HMM_RANGE_DEFAULT_TIMEOUT - default timeout (ms) when waiting for a range
@@ -406,6 +246,4 @@ long hmm_range_dma_unmap(struct hmm_range *range,
*/
#define HMM_RANGE_DEFAULT_TIMEOUT 1000
-#endif /* IS_ENABLED(CONFIG_HMM_MIRROR) */
-
#endif /* LINUX_HMM_H */
diff --git a/include/linux/host1x.h b/include/linux/host1x.h
index e6eea45e1154..6f8d772591ba 100644
--- a/include/linux/host1x.h
+++ b/include/linux/host1x.h
@@ -18,6 +18,7 @@ enum host1x_class {
};
struct host1x_client;
+struct iommu_group;
/**
* struct host1x_client_ops - host1x client operations
@@ -34,6 +35,7 @@ struct host1x_client_ops {
* @list: list node for the host1x client
* @parent: pointer to struct device representing the host1x controller
* @dev: pointer to struct device backing this host1x client
+ * @group: IOMMU group that this client is a member of
* @ops: host1x client operations
* @class: host1x class represented by this client
* @channel: host1x channel associated with this client
@@ -44,6 +46,7 @@ struct host1x_client {
struct list_head list;
struct device *parent;
struct device *dev;
+ struct iommu_group *group;
const struct host1x_client_ops *ops;
@@ -64,8 +67,9 @@ struct sg_table;
struct host1x_bo_ops {
struct host1x_bo *(*get)(struct host1x_bo *bo);
void (*put)(struct host1x_bo *bo);
- dma_addr_t (*pin)(struct host1x_bo *bo, struct sg_table **sgt);
- void (*unpin)(struct host1x_bo *bo, struct sg_table *sgt);
+ struct sg_table *(*pin)(struct device *dev, struct host1x_bo *bo,
+ dma_addr_t *phys);
+ void (*unpin)(struct device *dev, struct sg_table *sgt);
void *(*mmap)(struct host1x_bo *bo);
void (*munmap)(struct host1x_bo *bo, void *addr);
void *(*kmap)(struct host1x_bo *bo, unsigned int pagenum);
@@ -92,15 +96,17 @@ static inline void host1x_bo_put(struct host1x_bo *bo)
bo->ops->put(bo);
}
-static inline dma_addr_t host1x_bo_pin(struct host1x_bo *bo,
- struct sg_table **sgt)
+static inline struct sg_table *host1x_bo_pin(struct device *dev,
+ struct host1x_bo *bo,
+ dma_addr_t *phys)
{
- return bo->ops->pin(bo, sgt);
+ return bo->ops->pin(dev, bo, phys);
}
-static inline void host1x_bo_unpin(struct host1x_bo *bo, struct sg_table *sgt)
+static inline void host1x_bo_unpin(struct device *dev, struct host1x_bo *bo,
+ struct sg_table *sgt)
{
- bo->ops->unpin(bo, sgt);
+ bo->ops->unpin(dev, sgt);
}
static inline void *host1x_bo_mmap(struct host1x_bo *bo)
@@ -158,7 +164,7 @@ u32 host1x_syncpt_base_id(struct host1x_syncpt_base *base);
struct host1x_channel;
struct host1x_job;
-struct host1x_channel *host1x_channel_request(struct device *dev);
+struct host1x_channel *host1x_channel_request(struct host1x_client *client);
struct host1x_channel *host1x_channel_get(struct host1x_channel *channel);
void host1x_channel_put(struct host1x_channel *channel);
int host1x_job_submit(struct host1x_job *job);
@@ -167,6 +173,9 @@ int host1x_job_submit(struct host1x_job *job);
* host1x job
*/
+#define HOST1X_RELOC_READ (1 << 0)
+#define HOST1X_RELOC_WRITE (1 << 1)
+
struct host1x_reloc {
struct {
struct host1x_bo *bo;
@@ -177,6 +186,7 @@ struct host1x_reloc {
unsigned long offset;
} target;
unsigned long shift;
+ unsigned long flags;
};
struct host1x_job {
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index 1b9a51a1bccb..1f98b52118f0 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -456,12 +456,18 @@ extern u64 hrtimer_next_event_without(const struct hrtimer *exclude);
extern bool hrtimer_active(const struct hrtimer *timer);
-/*
- * Helper function to check, whether the timer is on one of the queues
+/**
+ * hrtimer_is_queued = check, whether the timer is on one of the queues
+ * @timer: Timer to check
+ *
+ * Returns: True if the timer is queued, false otherwise
+ *
+ * The function can be used lockless, but it gives only a current snapshot.
*/
-static inline int hrtimer_is_queued(struct hrtimer *timer)
+static inline bool hrtimer_is_queued(struct hrtimer *timer)
{
- return timer->state & HRTIMER_STATE_ENQUEUED;
+ /* The READ_ONCE pairs with the update functions of timer->state */
+ return !!(READ_ONCE(timer->state) & HRTIMER_STATE_ENQUEUED);
}
/*
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 93d5cf0bc716..0b84e13e88e2 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -216,7 +216,6 @@ static inline int is_swap_pmd(pmd_t pmd)
static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
struct vm_area_struct *vma)
{
- VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma);
if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd))
return __pmd_trans_huge_lock(pmd, vma);
else
@@ -225,7 +224,6 @@ static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
struct vm_area_struct *vma)
{
- VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma);
if (pud_trans_huge(*pud) || pud_devmap(*pud))
return __pud_trans_huge_lock(pud, vma);
else
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 53fc34f930d0..31d4920994b9 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -105,8 +105,7 @@ void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason);
void free_huge_page(struct page *page);
void hugetlb_fix_reserve_counts(struct inode *inode);
extern struct mutex *hugetlb_fault_mutex_table;
-u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping,
- pgoff_t idx, unsigned long address);
+u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx);
pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud);
@@ -164,38 +163,130 @@ static inline void adjust_range_if_pmd_sharing_possible(
{
}
-#define follow_hugetlb_page(m,v,p,vs,a,b,i,w,n) ({ BUG(); 0; })
-#define follow_huge_addr(mm, addr, write) ERR_PTR(-EINVAL)
-#define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; })
+static inline long follow_hugetlb_page(struct mm_struct *mm,
+ struct vm_area_struct *vma, struct page **pages,
+ struct vm_area_struct **vmas, unsigned long *position,
+ unsigned long *nr_pages, long i, unsigned int flags,
+ int *nonblocking)
+{
+ BUG();
+ return 0;
+}
+
+static inline struct page *follow_huge_addr(struct mm_struct *mm,
+ unsigned long address, int write)
+{
+ return ERR_PTR(-EINVAL);
+}
+
+static inline int copy_hugetlb_page_range(struct mm_struct *dst,
+ struct mm_struct *src, struct vm_area_struct *vma)
+{
+ BUG();
+ return 0;
+}
+
static inline void hugetlb_report_meminfo(struct seq_file *m)
{
}
-#define hugetlb_report_node_meminfo(n, buf) 0
+
+static inline int hugetlb_report_node_meminfo(int nid, char *buf)
+{
+ return 0;
+}
+
static inline void hugetlb_show_meminfo(void)
{
}
-#define follow_huge_pd(vma, addr, hpd, flags, pdshift) NULL
-#define follow_huge_pmd(mm, addr, pmd, flags) NULL
-#define follow_huge_pud(mm, addr, pud, flags) NULL
-#define follow_huge_pgd(mm, addr, pgd, flags) NULL
-#define prepare_hugepage_range(file, addr, len) (-EINVAL)
-#define pmd_huge(x) 0
-#define pud_huge(x) 0
-#define is_hugepage_only_range(mm, addr, len) 0
-#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; })
-#define hugetlb_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma, dst_addr, \
- src_addr, pagep) ({ BUG(); 0; })
-#define huge_pte_offset(mm, address, sz) 0
+
+static inline struct page *follow_huge_pd(struct vm_area_struct *vma,
+ unsigned long address, hugepd_t hpd, int flags,
+ int pdshift)
+{
+ return NULL;
+}
+
+static inline struct page *follow_huge_pmd(struct mm_struct *mm,
+ unsigned long address, pmd_t *pmd, int flags)
+{
+ return NULL;
+}
+
+static inline struct page *follow_huge_pud(struct mm_struct *mm,
+ unsigned long address, pud_t *pud, int flags)
+{
+ return NULL;
+}
+
+static inline struct page *follow_huge_pgd(struct mm_struct *mm,
+ unsigned long address, pgd_t *pgd, int flags)
+{
+ return NULL;
+}
+
+static inline int prepare_hugepage_range(struct file *file,
+ unsigned long addr, unsigned long len)
+{
+ return -EINVAL;
+}
+
+static inline int pmd_huge(pmd_t pmd)
+{
+ return 0;
+}
+
+static inline int pud_huge(pud_t pud)
+{
+ return 0;
+}
+
+static inline int is_hugepage_only_range(struct mm_struct *mm,
+ unsigned long addr, unsigned long len)
+{
+ return 0;
+}
+
+static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
+ unsigned long addr, unsigned long end,
+ unsigned long floor, unsigned long ceiling)
+{
+ BUG();
+}
+
+static inline int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
+ pte_t *dst_pte,
+ struct vm_area_struct *dst_vma,
+ unsigned long dst_addr,
+ unsigned long src_addr,
+ struct page **pagep)
+{
+ BUG();
+ return 0;
+}
+
+static inline pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr,
+ unsigned long sz)
+{
+ return NULL;
+}
static inline bool isolate_huge_page(struct page *page, struct list_head *list)
{
return false;
}
-#define putback_active_hugepage(p) do {} while (0)
-#define move_hugetlb_state(old, new, reason) do {} while (0)
-static inline unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
- unsigned long address, unsigned long end, pgprot_t newprot)
+static inline void putback_active_hugepage(struct page *page)
+{
+}
+
+static inline void move_hugetlb_state(struct page *oldpage,
+ struct page *newpage, int reason)
+{
+}
+
+static inline unsigned long hugetlb_change_protection(
+ struct vm_area_struct *vma, unsigned long address,
+ unsigned long end, pgprot_t newprot)
{
return 0;
}
@@ -213,9 +304,10 @@ static inline void __unmap_hugepage_range(struct mmu_gather *tlb,
{
BUG();
}
+
static inline vm_fault_t hugetlb_fault(struct mm_struct *mm,
- struct vm_area_struct *vma, unsigned long address,
- unsigned int flags)
+ struct vm_area_struct *vma, unsigned long address,
+ unsigned int flags)
{
BUG();
return 0;
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index b4a017093b69..26f3aeeae1ca 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -182,19 +182,21 @@ static inline u32 hv_get_avail_to_write_percent(
* 2 . 4 (Windows 8)
* 3 . 0 (Windows 8 R2)
* 4 . 0 (Windows 10)
+ * 4 . 1 (Windows 10 RS3)
* 5 . 0 (Newer Windows 10)
+ * 5 . 1 (Windows 10 RS4)
+ * 5 . 2 (Windows Server 2019, RS5)
*/
#define VERSION_WS2008 ((0 << 16) | (13))
#define VERSION_WIN7 ((1 << 16) | (1))
#define VERSION_WIN8 ((2 << 16) | (4))
#define VERSION_WIN8_1 ((3 << 16) | (0))
-#define VERSION_WIN10 ((4 << 16) | (0))
+#define VERSION_WIN10 ((4 << 16) | (0))
+#define VERSION_WIN10_V4_1 ((4 << 16) | (1))
#define VERSION_WIN10_V5 ((5 << 16) | (0))
-
-#define VERSION_INVAL -1
-
-#define VERSION_CURRENT VERSION_WIN10_V5
+#define VERSION_WIN10_V5_1 ((5 << 16) | (1))
+#define VERSION_WIN10_V5_2 ((5 << 16) | (2))
/* Make maximum size of pipe payload of 16K */
#define MAX_PIPE_DATA_PAYLOAD (sizeof(u8) * 16384)
@@ -932,6 +934,21 @@ struct vmbus_channel {
* full outbound ring buffer.
*/
u64 out_full_first;
+
+ /* enabling/disabling fuzz testing on the channel (default is false)*/
+ bool fuzz_testing_state;
+
+ /*
+ * Interrupt delay will delay the guest from emptying the ring buffer
+ * for a specific amount of time. The delay is in microseconds and will
+ * be between 1 to a maximum of 1000, its default is 0 (no delay).
+ * The Message delay will delay guest reading on a per message basis
+ * in microseconds between 1 to 1000 with the default being 0
+ * (no delay).
+ */
+ u32 fuzz_testing_interrupt_delay;
+ u32 fuzz_testing_message_delay;
+
};
static inline bool is_hvsock_channel(const struct vmbus_channel *c)
@@ -1180,6 +1197,10 @@ struct hv_device {
struct vmbus_channel *channel;
struct kset *channels_kset;
+
+ /* place holder to keep track of the dir for hv device in debugfs */
+ struct dentry *debug_dir;
+
};
diff --git a/include/linux/i2c-pxa.h b/include/linux/i2c-pxa.h
deleted file mode 100644
index a897e2b507b6..000000000000
--- a/include/linux/i2c-pxa.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_I2C_ALGO_PXA_H
-#define _LINUX_I2C_ALGO_PXA_H
-
-typedef enum i2c_slave_event_e {
- I2C_SLAVE_EVENT_START_READ,
- I2C_SLAVE_EVENT_START_WRITE,
- I2C_SLAVE_EVENT_STOP
-} i2c_slave_event_t;
-
-struct i2c_slave_client {
- void *data;
- void (*event)(void *ptr, i2c_slave_event_t event);
- int (*read) (void *ptr);
- void (*write)(void *ptr, unsigned int val);
-};
-
-#endif /* _LINUX_I2C_ALGO_PXA_H */
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index 1361637c369d..d2f786706657 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -452,10 +452,16 @@ i2c_new_client_device(struct i2c_adapter *adap, struct i2c_board_info const *inf
* a default probing method is used.
*/
extern struct i2c_client *
+i2c_new_scanned_device(struct i2c_adapter *adap,
+ struct i2c_board_info *info,
+ unsigned short const *addr_list,
+ int (*probe)(struct i2c_adapter *adap, unsigned short addr));
+
+extern struct i2c_client *
i2c_new_probed_device(struct i2c_adapter *adap,
- struct i2c_board_info *info,
- unsigned short const *addr_list,
- int (*probe)(struct i2c_adapter *adap, unsigned short addr));
+ struct i2c_board_info *info,
+ unsigned short const *addr_list,
+ int (*probe)(struct i2c_adapter *adap, unsigned short addr));
/* Common custom probe functions */
extern int i2c_probe_func_quick_read(struct i2c_adapter *adap, unsigned short addr);
@@ -575,6 +581,10 @@ struct i2c_lock_operations {
* @scl_int_delay_ns: time IP core additionally needs to setup SCL in ns
* @sda_fall_ns: time SDA signal takes to fall in ns; t(f) in the I2C specification
* @sda_hold_ns: time IP core additionally needs to hold SDA in ns
+ * @digital_filter_width_ns: width in ns of spikes on i2c lines that the IP core
+ * digital filter can filter out
+ * @analog_filter_cutoff_freq_hz: threshold frequency for the low pass IP core
+ * analog filter
*/
struct i2c_timings {
u32 bus_freq_hz;
@@ -583,6 +593,8 @@ struct i2c_timings {
u32 scl_int_delay_ns;
u32 sda_fall_ns;
u32 sda_hold_ns;
+ u32 digital_filter_width_ns;
+ u32 analog_filter_cutoff_freq_hz;
};
/**
@@ -844,9 +856,6 @@ extern void i2c_del_driver(struct i2c_driver *driver);
#define i2c_add_driver(driver) \
i2c_register_driver(THIS_MODULE, driver)
-extern struct i2c_client *i2c_use_client(struct i2c_client *client);
-extern void i2c_release_client(struct i2c_client *client);
-
/* call the i2c_client->command() of all attached clients with
* the given arguments */
extern void i2c_clients_command(struct i2c_adapter *adap,
diff --git a/include/linux/iio/adc/ad_sigma_delta.h b/include/linux/iio/adc/ad_sigma_delta.h
index 7716fa0c9fce..8a4e25a7080c 100644
--- a/include/linux/iio/adc/ad_sigma_delta.h
+++ b/include/linux/iio/adc/ad_sigma_delta.h
@@ -119,6 +119,8 @@ int ad_sd_reset(struct ad_sigma_delta *sigma_delta,
int ad_sigma_delta_single_conversion(struct iio_dev *indio_dev,
const struct iio_chan_spec *chan, int *val);
+int ad_sd_calibrate(struct ad_sigma_delta *sigma_delta,
+ unsigned int mode, unsigned int channel);
int ad_sd_calibrate_all(struct ad_sigma_delta *sigma_delta,
const struct ad_sd_calib_data *cd, unsigned int n);
int ad_sd_init(struct ad_sigma_delta *sigma_delta, struct iio_dev *indio_dev,
diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h
index 8e132cf819e4..862ce0019eba 100644
--- a/include/linux/iio/iio.h
+++ b/include/linux/iio/iio.h
@@ -510,6 +510,7 @@ struct iio_buffer_setup_ops {
* attributes
* @chan_attr_group: [INTERN] group for all attrs in base directory
* @name: [DRIVER] name of the device.
+ * @label: [DRIVER] unique name to identify which device this is
* @info: [DRIVER] callbacks and constant info from driver
* @clock_id: [INTERN] timestamping clock posix identifier
* @info_exist_lock: [INTERN] lock to prevent use during removal
@@ -553,6 +554,7 @@ struct iio_dev {
struct list_head channel_attr_list;
struct attribute_group chan_attr_group;
const char *name;
+ const char *label;
const struct iio_info *info;
clockid_t clock_id;
struct mutex info_exist_lock;
diff --git a/include/linux/iio/imu/adis.h b/include/linux/iio/imu/adis.h
index 4c53815bb729..92aae14593bf 100644
--- a/include/linux/iio/imu/adis.h
+++ b/include/linux/iio/imu/adis.h
@@ -129,7 +129,8 @@ static inline int adis_read_reg_16(struct adis *adis, unsigned int reg,
int ret;
ret = adis_read_reg(adis, reg, &tmp, 2);
- *val = tmp;
+ if (ret == 0)
+ *val = tmp;
return ret;
}
@@ -147,7 +148,8 @@ static inline int adis_read_reg_32(struct adis *adis, unsigned int reg,
int ret;
ret = adis_read_reg(adis, reg, &tmp, 4);
- *val = tmp;
+ if (ret == 0)
+ *val = tmp;
return ret;
}
diff --git a/include/linux/ima.h b/include/linux/ima.h
index 1c37f17f7203..6d904754d858 100644
--- a/include/linux/ima.h
+++ b/include/linux/ima.h
@@ -29,7 +29,8 @@ extern void ima_kexec_cmdline(const void *buf, int size);
extern void ima_add_kexec_buffer(struct kimage *image);
#endif
-#if (defined(CONFIG_X86) && defined(CONFIG_EFI)) || defined(CONFIG_S390)
+#if (defined(CONFIG_X86) && defined(CONFIG_EFI)) || defined(CONFIG_S390) \
+ || defined(CONFIG_PPC_SECURE_BOOT)
extern bool arch_ima_get_secureboot(void);
extern const char * const *arch_get_ima_policy(void);
#else
diff --git a/include/linux/input.h b/include/linux/input.h
index 94f277cd806a..56f2fd32e609 100644
--- a/include/linux/input.h
+++ b/include/linux/input.h
@@ -383,6 +383,7 @@ int input_setup_polling(struct input_dev *dev,
void input_set_poll_interval(struct input_dev *dev, unsigned int interval);
void input_set_min_poll_interval(struct input_dev *dev, unsigned int interval);
void input_set_max_poll_interval(struct input_dev *dev, unsigned int interval);
+int input_get_poll_interval(struct input_dev *dev);
int __must_check input_register_handler(struct input_handler *);
void input_unregister_handler(struct input_handler *);
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 89fc59dab57d..c5fe60ec6b84 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -140,6 +140,19 @@ request_threaded_irq(unsigned int irq, irq_handler_t handler,
irq_handler_t thread_fn,
unsigned long flags, const char *name, void *dev);
+/**
+ * request_irq - Add a handler for an interrupt line
+ * @irq: The interrupt line to allocate
+ * @handler: Function to be called when the IRQ occurs.
+ * Primary handler for threaded interrupts
+ * If NULL, the default primary handler is installed
+ * @flags: Handling flags
+ * @name: Name of the device generating this interrupt
+ * @dev: A cookie passed to the handler function
+ *
+ * This call allocates an interrupt and establishes a handler; see
+ * the documentation for request_threaded_irq() for details.
+ */
static inline int __must_check
request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags,
const char *name, void *dev)
@@ -520,8 +533,7 @@ enum
IRQ_POLL_SOFTIRQ,
TASKLET_SOFTIRQ,
SCHED_SOFTIRQ,
- HRTIMER_SOFTIRQ, /* Unused, but kept as tools rely on the
- numbering. Sigh! */
+ HRTIMER_SOFTIRQ,
RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */
NR_SOFTIRQS
diff --git a/include/linux/io-pgtable.h b/include/linux/io-pgtable.h
index ec7a13405f10..ee21eedafe98 100644
--- a/include/linux/io-pgtable.h
+++ b/include/linux/io-pgtable.h
@@ -102,7 +102,7 @@ struct io_pgtable_cfg {
struct {
u64 ttbr[2];
u64 tcr;
- u64 mair[2];
+ u64 mair;
} arm_lpae_s1_cfg;
struct {
diff --git a/include/linux/io.h b/include/linux/io.h
index accac822336a..a59834bc0a11 100644
--- a/include/linux/io.h
+++ b/include/linux/io.h
@@ -64,6 +64,8 @@ static inline void devm_ioport_unmap(struct device *dev, void __iomem *addr)
void __iomem *devm_ioremap(struct device *dev, resource_size_t offset,
resource_size_t size);
+void __iomem *devm_ioremap_uc(struct device *dev, resource_size_t offset,
+ resource_size_t size);
void __iomem *devm_ioremap_nocache(struct device *dev, resource_size_t offset,
resource_size_t size);
void __iomem *devm_ioremap_wc(struct device *dev, resource_size_t offset,
diff --git a/include/linux/ioasid.h b/include/linux/ioasid.h
new file mode 100644
index 000000000000..6f000d7a0ddc
--- /dev/null
+++ b/include/linux/ioasid.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_IOASID_H
+#define __LINUX_IOASID_H
+
+#include <linux/types.h>
+#include <linux/errno.h>
+
+#define INVALID_IOASID ((ioasid_t)-1)
+typedef unsigned int ioasid_t;
+typedef ioasid_t (*ioasid_alloc_fn_t)(ioasid_t min, ioasid_t max, void *data);
+typedef void (*ioasid_free_fn_t)(ioasid_t ioasid, void *data);
+
+struct ioasid_set {
+ int dummy;
+};
+
+/**
+ * struct ioasid_allocator_ops - IOASID allocator helper functions and data
+ *
+ * @alloc: helper function to allocate IOASID
+ * @free: helper function to free IOASID
+ * @list: for tracking ops that share helper functions but not data
+ * @pdata: data belong to the allocator, provided when calling alloc()
+ */
+struct ioasid_allocator_ops {
+ ioasid_alloc_fn_t alloc;
+ ioasid_free_fn_t free;
+ struct list_head list;
+ void *pdata;
+};
+
+#define DECLARE_IOASID_SET(name) struct ioasid_set name = { 0 }
+
+#if IS_ENABLED(CONFIG_IOASID)
+ioasid_t ioasid_alloc(struct ioasid_set *set, ioasid_t min, ioasid_t max,
+ void *private);
+void ioasid_free(ioasid_t ioasid);
+void *ioasid_find(struct ioasid_set *set, ioasid_t ioasid,
+ bool (*getter)(void *));
+int ioasid_register_allocator(struct ioasid_allocator_ops *allocator);
+void ioasid_unregister_allocator(struct ioasid_allocator_ops *allocator);
+int ioasid_set_data(ioasid_t ioasid, void *data);
+
+#else /* !CONFIG_IOASID */
+static inline ioasid_t ioasid_alloc(struct ioasid_set *set, ioasid_t min,
+ ioasid_t max, void *private)
+{
+ return INVALID_IOASID;
+}
+
+static inline void ioasid_free(ioasid_t ioasid)
+{
+}
+
+static inline void *ioasid_find(struct ioasid_set *set, ioasid_t ioasid,
+ bool (*getter)(void *))
+{
+ return NULL;
+}
+
+static inline int ioasid_register_allocator(struct ioasid_allocator_ops *allocator)
+{
+ return -ENOTSUPP;
+}
+
+static inline void ioasid_unregister_allocator(struct ioasid_allocator_ops *allocator)
+{
+}
+
+static inline int ioasid_set_data(ioasid_t ioasid, void *data)
+{
+ return -ENOTSUPP;
+}
+
+#endif /* CONFIG_IOASID */
+#endif /* __LINUX_IOASID_H */
diff --git a/include/linux/iomap.h b/include/linux/iomap.h
index 7aa5d6117936..8b09463dae0d 100644
--- a/include/linux/iomap.h
+++ b/include/linux/iomap.h
@@ -4,6 +4,7 @@
#include <linux/atomic.h>
#include <linux/bitmap.h>
+#include <linux/blk_types.h>
#include <linux/mm.h>
#include <linux/types.h>
#include <linux/mm_types.h>
@@ -12,6 +13,7 @@
struct address_space;
struct fiemap_extent_info;
struct inode;
+struct iomap_writepage_ctx;
struct iov_iter;
struct kiocb;
struct page;
@@ -21,28 +23,45 @@ struct vm_fault;
/*
* Types of block ranges for iomap mappings:
*/
-#define IOMAP_HOLE 0x01 /* no blocks allocated, need allocation */
-#define IOMAP_DELALLOC 0x02 /* delayed allocation blocks */
-#define IOMAP_MAPPED 0x03 /* blocks allocated at @addr */
-#define IOMAP_UNWRITTEN 0x04 /* blocks allocated at @addr in unwritten state */
-#define IOMAP_INLINE 0x05 /* data inline in the inode */
+#define IOMAP_HOLE 0 /* no blocks allocated, need allocation */
+#define IOMAP_DELALLOC 1 /* delayed allocation blocks */
+#define IOMAP_MAPPED 2 /* blocks allocated at @addr */
+#define IOMAP_UNWRITTEN 3 /* blocks allocated at @addr in unwritten state */
+#define IOMAP_INLINE 4 /* data inline in the inode */
/*
- * Flags for all iomap mappings:
+ * Flags reported by the file system from iomap_begin:
+ *
+ * IOMAP_F_NEW indicates that the blocks have been newly allocated and need
+ * zeroing for areas that no data is copied to.
*
* IOMAP_F_DIRTY indicates the inode has uncommitted metadata needed to access
* written data and requires fdatasync to commit them to persistent storage.
+ * This needs to take into account metadata changes that *may* be made at IO
+ * completion, such as file size updates from direct IO.
+ *
+ * IOMAP_F_SHARED indicates that the blocks are shared, and will need to be
+ * unshared as part a write.
+ *
+ * IOMAP_F_MERGED indicates that the iomap contains the merge of multiple block
+ * mappings.
+ *
+ * IOMAP_F_BUFFER_HEAD indicates that the file system requires the use of
+ * buffer heads for this mapping.
*/
-#define IOMAP_F_NEW 0x01 /* blocks have been newly allocated */
-#define IOMAP_F_DIRTY 0x02 /* uncommitted metadata */
-#define IOMAP_F_BUFFER_HEAD 0x04 /* file system requires buffer heads */
-#define IOMAP_F_SIZE_CHANGED 0x08 /* file size has changed */
+#define IOMAP_F_NEW 0x01
+#define IOMAP_F_DIRTY 0x02
+#define IOMAP_F_SHARED 0x04
+#define IOMAP_F_MERGED 0x08
+#define IOMAP_F_BUFFER_HEAD 0x10
/*
- * Flags that only need to be reported for IOMAP_REPORT requests:
+ * Flags set by the core iomap code during operations:
+ *
+ * IOMAP_F_SIZE_CHANGED indicates to the iomap_end method that the file size
+ * has changed as the result of this write operation.
*/
-#define IOMAP_F_MERGED 0x10 /* contains multiple blocks/extents */
-#define IOMAP_F_SHARED 0x20 /* block shared with another file */
+#define IOMAP_F_SIZE_CHANGED 0x100
/*
* Flags from 0x1000 up are for file system specific usage:
@@ -110,7 +129,8 @@ struct iomap_ops {
* The actual length is returned in iomap->length.
*/
int (*iomap_begin)(struct inode *inode, loff_t pos, loff_t length,
- unsigned flags, struct iomap *iomap);
+ unsigned flags, struct iomap *iomap,
+ struct iomap *srcmap);
/*
* Commit and/or unreserve space previous allocated using iomap_begin.
@@ -126,29 +146,12 @@ struct iomap_ops {
* Main iomap iterator function.
*/
typedef loff_t (*iomap_actor_t)(struct inode *inode, loff_t pos, loff_t len,
- void *data, struct iomap *iomap);
+ void *data, struct iomap *iomap, struct iomap *srcmap);
loff_t iomap_apply(struct inode *inode, loff_t pos, loff_t length,
unsigned flags, const struct iomap_ops *ops, void *data,
iomap_actor_t actor);
-/*
- * Structure allocate for each page when block size < PAGE_SIZE to track
- * sub-page uptodate status and I/O completions.
- */
-struct iomap_page {
- atomic_t read_count;
- atomic_t write_count;
- DECLARE_BITMAP(uptodate, PAGE_SIZE / 512);
-};
-
-static inline struct iomap_page *to_iomap_page(struct page *page)
-{
- if (page_has_private(page))
- return (struct iomap_page *)page_private(page);
- return NULL;
-}
-
ssize_t iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *from,
const struct iomap_ops *ops);
int iomap_readpage(struct page *page, const struct iomap_ops *ops);
@@ -166,7 +169,7 @@ int iomap_migrate_page(struct address_space *mapping, struct page *newpage,
#else
#define iomap_migrate_page NULL
#endif
-int iomap_file_dirty(struct inode *inode, loff_t pos, loff_t len,
+int iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len,
const struct iomap_ops *ops);
int iomap_zero_range(struct inode *inode, loff_t pos, loff_t len,
bool *did_zero, const struct iomap_ops *ops);
@@ -184,6 +187,63 @@ sector_t iomap_bmap(struct address_space *mapping, sector_t bno,
const struct iomap_ops *ops);
/*
+ * Structure for writeback I/O completions.
+ */
+struct iomap_ioend {
+ struct list_head io_list; /* next ioend in chain */
+ u16 io_type;
+ u16 io_flags; /* IOMAP_F_* */
+ struct inode *io_inode; /* file being written to */
+ size_t io_size; /* size of the extent */
+ loff_t io_offset; /* offset in the file */
+ void *io_private; /* file system private data */
+ struct bio *io_bio; /* bio being built */
+ struct bio io_inline_bio; /* MUST BE LAST! */
+};
+
+struct iomap_writeback_ops {
+ /*
+ * Required, maps the blocks so that writeback can be performed on
+ * the range starting at offset.
+ */
+ int (*map_blocks)(struct iomap_writepage_ctx *wpc, struct inode *inode,
+ loff_t offset);
+
+ /*
+ * Optional, allows the file systems to perform actions just before
+ * submitting the bio and/or override the bio end_io handler for complex
+ * operations like copy on write extent manipulation or unwritten extent
+ * conversions.
+ */
+ int (*prepare_ioend)(struct iomap_ioend *ioend, int status);
+
+ /*
+ * Optional, allows the file system to discard state on a page where
+ * we failed to submit any I/O.
+ */
+ void (*discard_page)(struct page *page);
+};
+
+struct iomap_writepage_ctx {
+ struct iomap iomap;
+ struct iomap_ioend *ioend;
+ const struct iomap_writeback_ops *ops;
+};
+
+void iomap_finish_ioends(struct iomap_ioend *ioend, int error);
+void iomap_ioend_try_merge(struct iomap_ioend *ioend,
+ struct list_head *more_ioends,
+ void (*merge_private)(struct iomap_ioend *ioend,
+ struct iomap_ioend *next));
+void iomap_sort_ioends(struct list_head *ioend_list);
+int iomap_writepage(struct page *page, struct writeback_control *wbc,
+ struct iomap_writepage_ctx *wpc,
+ const struct iomap_writeback_ops *ops);
+int iomap_writepages(struct address_space *mapping,
+ struct writeback_control *wbc, struct iomap_writepage_ctx *wpc,
+ const struct iomap_writeback_ops *ops);
+
+/*
* Flags for direct I/O ->end_io:
*/
#define IOMAP_DIO_UNWRITTEN (1 << 0) /* covers unwritten extent(s) */
@@ -195,7 +255,8 @@ struct iomap_dio_ops {
};
ssize_t iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
- const struct iomap_ops *ops, const struct iomap_dio_ops *dops);
+ const struct iomap_ops *ops, const struct iomap_dio_ops *dops,
+ bool wait_for_completion);
int iomap_dio_iopoll(struct kiocb *kiocb, bool spin);
#ifdef CONFIG_SWAP
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 29bac5345563..f2223cbb5fd5 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -13,6 +13,7 @@
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/of.h>
+#include <linux/ioasid.h>
#include <uapi/linux/iommu.h>
#define IOMMU_READ (1 << 0)
@@ -31,11 +32,11 @@
*/
#define IOMMU_PRIV (1 << 5)
/*
- * Non-coherent masters on few Qualcomm SoCs can use this page protection flag
- * to set correct cacheability attributes to use an outer level of cache -
- * last level cache, aka system cache.
+ * Non-coherent masters can use this page protection flag to set cacheable
+ * memory attributes for only a transparent outer level of cache, also known as
+ * the last-level or system cache.
*/
-#define IOMMU_QCOM_SYS_CACHE (1 << 6)
+#define IOMMU_SYS_CACHE_ONLY (1 << 6)
struct iommu_ops;
struct iommu_group;
@@ -244,7 +245,10 @@ struct iommu_iotlb_gather {
* @sva_unbind: Unbind process address space from device
* @sva_get_pasid: Get PASID associated to a SVA handle
* @page_response: handle page request response
+ * @cache_invalidate: invalidate translation caches
* @pgsize_bitmap: bitmap of all possible supported page sizes
+ * @sva_bind_gpasid: bind guest pasid and mm
+ * @sva_unbind_gpasid: unbind guest pasid and mm
*/
struct iommu_ops {
bool (*capable)(enum iommu_cap);
@@ -256,7 +260,7 @@ struct iommu_ops {
int (*attach_dev)(struct iommu_domain *domain, struct device *dev);
void (*detach_dev)(struct iommu_domain *domain, struct device *dev);
int (*map)(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot);
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp);
size_t (*unmap)(struct iommu_domain *domain, unsigned long iova,
size_t size, struct iommu_iotlb_gather *iotlb_gather);
void (*flush_iotlb_all)(struct iommu_domain *domain);
@@ -306,6 +310,12 @@ struct iommu_ops {
int (*page_response)(struct device *dev,
struct iommu_fault_event *evt,
struct iommu_page_response *msg);
+ int (*cache_invalidate)(struct iommu_domain *domain, struct device *dev,
+ struct iommu_cache_invalidate_info *inv_info);
+ int (*sva_bind_gpasid)(struct iommu_domain *domain,
+ struct device *dev, struct iommu_gpasid_bind_data *data);
+
+ int (*sva_unbind_gpasid)(struct device *dev, int pasid);
unsigned long pgsize_bitmap;
};
@@ -417,10 +427,19 @@ extern int iommu_attach_device(struct iommu_domain *domain,
struct device *dev);
extern void iommu_detach_device(struct iommu_domain *domain,
struct device *dev);
+extern int iommu_cache_invalidate(struct iommu_domain *domain,
+ struct device *dev,
+ struct iommu_cache_invalidate_info *inv_info);
+extern int iommu_sva_bind_gpasid(struct iommu_domain *domain,
+ struct device *dev, struct iommu_gpasid_bind_data *data);
+extern int iommu_sva_unbind_gpasid(struct iommu_domain *domain,
+ struct device *dev, ioasid_t pasid);
extern struct iommu_domain *iommu_get_domain_for_dev(struct device *dev);
extern struct iommu_domain *iommu_get_dma_domain(struct device *dev);
extern int iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot);
+extern int iommu_map_atomic(struct iommu_domain *domain, unsigned long iova,
+ phys_addr_t paddr, size_t size, int prot);
extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova,
size_t size);
extern size_t iommu_unmap_fast(struct iommu_domain *domain,
@@ -428,6 +447,9 @@ extern size_t iommu_unmap_fast(struct iommu_domain *domain,
struct iommu_iotlb_gather *iotlb_gather);
extern size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
struct scatterlist *sg,unsigned int nents, int prot);
+extern size_t iommu_map_sg_atomic(struct iommu_domain *domain,
+ unsigned long iova, struct scatterlist *sg,
+ unsigned int nents, int prot);
extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova);
extern void iommu_set_fault_handler(struct iommu_domain *domain,
iommu_fault_handler_t handler, void *token);
@@ -662,6 +684,13 @@ static inline int iommu_map(struct iommu_domain *domain, unsigned long iova,
return -ENODEV;
}
+static inline int iommu_map_atomic(struct iommu_domain *domain,
+ unsigned long iova, phys_addr_t paddr,
+ size_t size, int prot)
+{
+ return -ENODEV;
+}
+
static inline size_t iommu_unmap(struct iommu_domain *domain,
unsigned long iova, size_t size)
{
@@ -682,6 +711,13 @@ static inline size_t iommu_map_sg(struct iommu_domain *domain,
return 0;
}
+static inline size_t iommu_map_sg_atomic(struct iommu_domain *domain,
+ unsigned long iova, struct scatterlist *sg,
+ unsigned int nents, int prot)
+{
+ return 0;
+}
+
static inline void iommu_flush_tlb_all(struct iommu_domain *domain)
{
}
@@ -1005,6 +1041,25 @@ static inline int iommu_sva_get_pasid(struct iommu_sva *handle)
return IOMMU_PASID_INVALID;
}
+static inline int
+iommu_cache_invalidate(struct iommu_domain *domain,
+ struct device *dev,
+ struct iommu_cache_invalidate_info *inv_info)
+{
+ return -ENODEV;
+}
+static inline int iommu_sva_bind_gpasid(struct iommu_domain *domain,
+ struct device *dev, struct iommu_gpasid_bind_data *data)
+{
+ return -ENODEV;
+}
+
+static inline int iommu_sva_unbind_gpasid(struct iommu_domain *domain,
+ struct device *dev, int pasid)
+{
+ return -ENODEV;
+}
+
#endif /* CONFIG_IOMMU_API */
#ifdef CONFIG_IOMMU_DEBUGFS
diff --git a/include/linux/ioport.h b/include/linux/ioport.h
index 7bddddfc76d6..a9b9170b5dd2 100644
--- a/include/linux/ioport.h
+++ b/include/linux/ioport.h
@@ -134,6 +134,7 @@ enum {
IORES_DESC_PERSISTENT_MEMORY_LEGACY = 5,
IORES_DESC_DEVICE_PRIVATE_MEMORY = 6,
IORES_DESC_RESERVED = 7,
+ IORES_DESC_SOFT_RESERVED = 8,
};
/*
diff --git a/include/linux/irq.h b/include/linux/irq.h
index fb301cf29148..7853eb9301f2 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -610,6 +610,12 @@ extern int irq_chip_pm_put(struct irq_data *data);
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
extern void handle_fasteoi_ack_irq(struct irq_desc *desc);
extern void handle_fasteoi_mask_irq(struct irq_desc *desc);
+extern int irq_chip_set_parent_state(struct irq_data *data,
+ enum irqchip_irq_state which,
+ bool val);
+extern int irq_chip_get_parent_state(struct irq_data *data,
+ enum irqchip_irq_state which,
+ bool *state);
extern void irq_chip_enable_parent(struct irq_data *data);
extern void irq_chip_disable_parent(struct irq_data *data);
extern void irq_chip_ack_parent(struct irq_data *data);
diff --git a/include/linux/irq_work.h b/include/linux/irq_work.h
index b11fcdfd0770..02da997ad12c 100644
--- a/include/linux/irq_work.h
+++ b/include/linux/irq_work.h
@@ -22,7 +22,7 @@
#define IRQ_WORK_CLAIMED (IRQ_WORK_PENDING | IRQ_WORK_BUSY)
struct irq_work {
- unsigned long flags;
+ atomic_t flags;
struct llist_node llnode;
void (*func)(struct irq_work *);
};
@@ -30,11 +30,15 @@ struct irq_work {
static inline
void init_irq_work(struct irq_work *work, void (*func)(struct irq_work *))
{
- work->flags = 0;
+ atomic_set(&work->flags, 0);
work->func = func;
}
-#define DEFINE_IRQ_WORK(name, _f) struct irq_work name = { .func = (_f), }
+#define DEFINE_IRQ_WORK(name, _f) struct irq_work name = { \
+ .flags = ATOMIC_INIT(0), \
+ .func = (_f) \
+}
+
bool irq_work_queue(struct irq_work *work);
bool irq_work_queue_on(struct irq_work *work, int cpu);
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index a0bde9e12efa..de991d6633a5 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -334,10 +334,10 @@
#define GITS_TYPER_PLPIS (1UL << 0)
#define GITS_TYPER_VLPIS (1UL << 1)
#define GITS_TYPER_ITT_ENTRY_SIZE_SHIFT 4
-#define GITS_TYPER_ITT_ENTRY_SIZE(r) ((((r) >> GITS_TYPER_ITT_ENTRY_SIZE_SHIFT) & 0xf) + 1)
+#define GITS_TYPER_ITT_ENTRY_SIZE GENMASK_ULL(7, 4)
#define GITS_TYPER_IDBITS_SHIFT 8
#define GITS_TYPER_DEVBITS_SHIFT 13
-#define GITS_TYPER_DEVBITS(r) ((((r) >> GITS_TYPER_DEVBITS_SHIFT) & 0x1f) + 1)
+#define GITS_TYPER_DEVBITS GENMASK_ULL(17, 13)
#define GITS_TYPER_PTA (1UL << 19)
#define GITS_TYPER_HCC_SHIFT 24
#define GITS_TYPER_HCC(r) (((r) >> GITS_TYPER_HCC_SHIFT) & 0xff)
diff --git a/include/linux/irqchip/ingenic.h b/include/linux/irqchip/ingenic.h
deleted file mode 100644
index 146558853ad4..000000000000
--- a/include/linux/irqchip/ingenic.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2010, Lars-Peter Clausen <lars@metafoo.de>
- */
-
-#ifndef __LINUX_IRQCHIP_INGENIC_H__
-#define __LINUX_IRQCHIP_INGENIC_H__
-
-#include <linux/irq.h>
-
-extern void ingenic_intc_irq_suspend(struct irq_data *data);
-extern void ingenic_intc_irq_resume(struct irq_data *data);
-
-#endif
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
index 583e7abd07f9..3c340dbc5a1f 100644
--- a/include/linux/irqdomain.h
+++ b/include/linux/irqdomain.h
@@ -83,6 +83,7 @@ enum irq_domain_bus_token {
DOMAIN_BUS_IPI,
DOMAIN_BUS_FSL_MC_MSI,
DOMAIN_BUS_TI_SCI_INTA_MSI,
+ DOMAIN_BUS_WAKEUP,
};
/**
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index 564793c24d12..29dce6ff6bae 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -313,7 +313,6 @@ enum jbd_state_bits {
BH_Revoked, /* Has been revoked from the log */
BH_RevokeValid, /* Revoked flag is valid */
BH_JBDDirty, /* Is dirty but journaled */
- BH_State, /* Pins most journal_head state */
BH_JournalHead, /* Pins bh->b_private and jh->b_bh */
BH_Shadow, /* IO on shadow buffer is running */
BH_Verified, /* Metadata block has been verified ok */
@@ -342,26 +341,6 @@ static inline struct journal_head *bh2jh(struct buffer_head *bh)
return bh->b_private;
}
-static inline void jbd_lock_bh_state(struct buffer_head *bh)
-{
- bit_spin_lock(BH_State, &bh->b_state);
-}
-
-static inline int jbd_trylock_bh_state(struct buffer_head *bh)
-{
- return bit_spin_trylock(BH_State, &bh->b_state);
-}
-
-static inline int jbd_is_locked_bh_state(struct buffer_head *bh)
-{
- return bit_spin_is_locked(BH_State, &bh->b_state);
-}
-
-static inline void jbd_unlock_bh_state(struct buffer_head *bh)
-{
- bit_spin_unlock(BH_State, &bh->b_state);
-}
-
static inline void jbd_lock_bh_journal_head(struct buffer_head *bh)
{
bit_spin_lock(BH_JournalHead, &bh->b_state);
@@ -477,7 +456,9 @@ struct jbd2_revoke_table_s;
* @h_transaction: Which compound transaction is this update a part of?
* @h_journal: Which journal handle belongs to - used iff h_reserved set.
* @h_rsv_handle: Handle reserved for finishing the logical operation.
- * @h_buffer_credits: Number of remaining buffers we are allowed to dirty.
+ * @h_total_credits: Number of remaining buffers we are allowed to add to
+ journal. These are dirty buffers and revoke descriptor blocks.
+ * @h_revoke_credits: Number of remaining revoke records available for handle
* @h_ref: Reference count on this handle.
* @h_err: Field for caller's use to track errors through large fs operations.
* @h_sync: Flag for sync-on-close.
@@ -487,7 +468,8 @@ struct jbd2_revoke_table_s;
* @h_type: For handle statistics.
* @h_line_no: For handle statistics.
* @h_start_jiffies: Handle Start time.
- * @h_requested_credits: Holds @h_buffer_credits after handle is started.
+ * @h_requested_credits: Holds @h_total_credits after handle is started.
+ * @h_revoke_credits_requested: Holds @h_revoke_credits after handle is started.
* @saved_alloc_context: Saved context while transaction is open.
**/
@@ -504,7 +486,9 @@ struct jbd2_journal_handle
};
handle_t *h_rsv_handle;
- int h_buffer_credits;
+ int h_total_credits;
+ int h_revoke_credits;
+ int h_revoke_credits_requested;
int h_ref;
int h_err;
@@ -556,9 +540,9 @@ struct transaction_chp_stats_s {
* ->jbd_lock_bh_journal_head() (This is "innermost")
*
* j_state_lock
- * ->jbd_lock_bh_state()
+ * ->b_state_lock
*
- * jbd_lock_bh_state()
+ * b_state_lock
* ->j_list_lock
*
* j_state_lock
@@ -681,12 +665,25 @@ struct transaction_s
atomic_t t_updates;
/*
- * Number of buffers reserved for use by all handles in this transaction
- * handle but not yet modified. [none]
+ * Number of blocks reserved for this transaction in the journal.
+ * This is including all credits reserved when starting transaction
+ * handles as well as all journal descriptor blocks needed for this
+ * transaction. [none]
*/
atomic_t t_outstanding_credits;
/*
+ * Number of revoke records for this transaction added by already
+ * stopped handles. [none]
+ */
+ atomic_t t_outstanding_revokes;
+
+ /*
+ * How many handles used this transaction? [none]
+ */
+ atomic_t t_handle_count;
+
+ /*
* Forward and backward links for the circular list of all transactions
* awaiting checkpoint. [j_list_lock]
*/
@@ -704,11 +701,6 @@ struct transaction_s
ktime_t t_start_time;
/*
- * How many handles used this transaction? [none]
- */
- atomic_t t_handle_count;
-
- /*
* This transaction is being forced and some process is
* waiting for it to finish.
*/
@@ -1025,6 +1017,13 @@ struct journal_s
int j_max_transaction_buffers;
/**
+ * @j_revoke_records_per_block:
+ *
+ * Number of revoke records that fit in one descriptor block.
+ */
+ int j_revoke_records_per_block;
+
+ /**
* @j_commit_interval:
*
* What is the maximum transaction lifetime before we begin a commit?
@@ -1257,7 +1256,7 @@ JBD2_FEATURE_INCOMPAT_FUNCS(csum3, CSUM_V3)
/* Filing buffers */
extern void jbd2_journal_unfile_buffer(journal_t *, struct journal_head *);
-extern void __jbd2_journal_refile_buffer(struct journal_head *);
+extern bool __jbd2_journal_refile_buffer(struct journal_head *);
extern void jbd2_journal_refile_buffer(journal_t *, struct journal_head *);
extern void __jbd2_journal_file_buffer(struct journal_head *, transaction_t *, int);
extern void __journal_free_buffer(struct journal_head *bh);
@@ -1358,14 +1357,16 @@ static inline handle_t *journal_current_handle(void)
extern handle_t *jbd2_journal_start(journal_t *, int nblocks);
extern handle_t *jbd2__journal_start(journal_t *, int blocks, int rsv_blocks,
- gfp_t gfp_mask, unsigned int type,
- unsigned int line_no);
+ int revoke_records, gfp_t gfp_mask,
+ unsigned int type, unsigned int line_no);
extern int jbd2_journal_restart(handle_t *, int nblocks);
-extern int jbd2__journal_restart(handle_t *, int nblocks, gfp_t gfp_mask);
+extern int jbd2__journal_restart(handle_t *, int nblocks,
+ int revoke_records, gfp_t gfp_mask);
extern int jbd2_journal_start_reserved(handle_t *handle,
unsigned int type, unsigned int line_no);
extern void jbd2_journal_free_reserved(handle_t *handle);
-extern int jbd2_journal_extend (handle_t *, int nblocks);
+extern int jbd2_journal_extend(handle_t *handle, int nblocks,
+ int revoke_records);
extern int jbd2_journal_get_write_access(handle_t *, struct buffer_head *);
extern int jbd2_journal_get_create_access (handle_t *, struct buffer_head *);
extern int jbd2_journal_get_undo_access(handle_t *, struct buffer_head *);
@@ -1561,37 +1562,18 @@ static inline int jbd2_journal_has_csum_v2or3(journal_t *journal)
}
/*
- * We reserve t_outstanding_credits >> JBD2_CONTROL_BLOCKS_SHIFT for
- * transaction control blocks.
- */
-#define JBD2_CONTROL_BLOCKS_SHIFT 5
-
-/*
- * Return the minimum number of blocks which must be free in the journal
- * before a new transaction may be started. Must be called under j_state_lock.
- */
-static inline int jbd2_space_needed(journal_t *journal)
-{
- int nblocks = journal->j_max_transaction_buffers;
- return nblocks + (nblocks >> JBD2_CONTROL_BLOCKS_SHIFT);
-}
-
-/*
* Return number of free blocks in the log. Must be called under j_state_lock.
*/
static inline unsigned long jbd2_log_space_left(journal_t *journal)
{
/* Allow for rounding errors */
- unsigned long free = journal->j_free - 32;
+ long free = journal->j_free - 32;
if (journal->j_committing_transaction) {
- unsigned long committing = atomic_read(&journal->
- j_committing_transaction->t_outstanding_credits);
-
- /* Transaction + control blocks */
- free -= committing + (committing >> JBD2_CONTROL_BLOCKS_SHIFT);
+ free -= atomic_read(&journal->
+ j_committing_transaction->t_outstanding_credits);
}
- return free;
+ return max_t(long, free, 0);
}
/*
@@ -1645,6 +1627,20 @@ static inline tid_t jbd2_get_latest_transaction(journal_t *journal)
return tid;
}
+static inline int jbd2_handle_buffer_credits(handle_t *handle)
+{
+ journal_t *journal;
+
+ if (!handle->h_reserved)
+ journal = handle->h_transaction->t_journal;
+ else
+ journal = handle->h_journal;
+
+ return handle->h_total_credits -
+ DIV_ROUND_UP(handle->h_revoke_credits_requested,
+ journal->j_revoke_records_per_block);
+}
+
#ifdef __KERNEL__
#define buffer_trace_init(bh) do {} while (0)
diff --git a/include/linux/journal-head.h b/include/linux/journal-head.h
index 9fb870524314..75bc56109031 100644
--- a/include/linux/journal-head.h
+++ b/include/linux/journal-head.h
@@ -11,6 +11,8 @@
#ifndef JOURNAL_HEAD_H_INCLUDED
#define JOURNAL_HEAD_H_INCLUDED
+#include <linux/spinlock.h>
+
typedef unsigned int tid_t; /* Unique transaction ID */
typedef struct transaction_s transaction_t; /* Compound transaction type */
@@ -24,13 +26,18 @@ struct journal_head {
struct buffer_head *b_bh;
/*
+ * Protect the buffer head state
+ */
+ spinlock_t b_state_lock;
+
+ /*
* Reference count - see description in journal.c
* [jbd_lock_bh_journal_head()]
*/
int b_jcount;
/*
- * Journalling list for this buffer [jbd_lock_bh_state()]
+ * Journalling list for this buffer [b_state_lock]
* NOTE: We *cannot* combine this with b_modified into a bitfield
* as gcc would then (which the C standard allows but which is
* very unuseful) make 64-bit accesses to the bitfield and clobber
@@ -41,20 +48,20 @@ struct journal_head {
/*
* This flag signals the buffer has been modified by
* the currently running transaction
- * [jbd_lock_bh_state()]
+ * [b_state_lock]
*/
unsigned b_modified;
/*
* Copy of the buffer data frozen for writing to the log.
- * [jbd_lock_bh_state()]
+ * [b_state_lock]
*/
char *b_frozen_data;
/*
* Pointer to a saved copy of the buffer containing no uncommitted
* deallocation references, so that allocations can avoid overwriting
- * uncommitted deletes. [jbd_lock_bh_state()]
+ * uncommitted deletes. [b_state_lock]
*/
char *b_committed_data;
@@ -63,7 +70,7 @@ struct journal_head {
* metadata: either the running transaction or the committing
* transaction (if there is one). Only applies to buffers on a
* transaction's data or metadata journaling list.
- * [j_list_lock] [jbd_lock_bh_state()]
+ * [j_list_lock] [b_state_lock]
* Either of these locks is enough for reading, both are needed for
* changes.
*/
@@ -73,13 +80,13 @@ struct journal_head {
* Pointer to the running compound transaction which is currently
* modifying the buffer's metadata, if there was already a transaction
* committing it when the new transaction touched it.
- * [t_list_lock] [jbd_lock_bh_state()]
+ * [t_list_lock] [b_state_lock]
*/
transaction_t *b_next_transaction;
/*
* Doubly-linked list of buffers on a transaction's data, metadata or
- * forget queue. [t_list_lock] [jbd_lock_bh_state()]
+ * forget queue. [t_list_lock] [b_state_lock]
*/
struct journal_head *b_tnext, *b_tprev;
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index cc8a03cc9674..4f404c565db1 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -70,8 +70,18 @@ struct kasan_cache {
int free_meta_offset;
};
+/*
+ * These functions provide a special case to support backing module
+ * allocations with real shadow memory. With KASAN vmalloc, the special
+ * case is unnecessary, as the work is handled in the generic case.
+ */
+#ifndef CONFIG_KASAN_VMALLOC
int kasan_module_alloc(void *addr, size_t size);
void kasan_free_shadow(const struct vm_struct *vm);
+#else
+static inline int kasan_module_alloc(void *addr, size_t size) { return 0; }
+static inline void kasan_free_shadow(const struct vm_struct *vm) {}
+#endif
int kasan_add_zero_shadow(void *start, unsigned long size);
void kasan_remove_zero_shadow(void *start, unsigned long size);
@@ -194,4 +204,25 @@ static inline void *kasan_reset_tag(const void *addr)
#endif /* CONFIG_KASAN_SW_TAGS */
+#ifdef CONFIG_KASAN_VMALLOC
+int kasan_populate_vmalloc(unsigned long requested_size,
+ struct vm_struct *area);
+void kasan_poison_vmalloc(void *start, unsigned long size);
+void kasan_release_vmalloc(unsigned long start, unsigned long end,
+ unsigned long free_region_start,
+ unsigned long free_region_end);
+#else
+static inline int kasan_populate_vmalloc(unsigned long requested_size,
+ struct vm_struct *area)
+{
+ return 0;
+}
+
+static inline void kasan_poison_vmalloc(void *start, unsigned long size) {}
+static inline void kasan_release_vmalloc(unsigned long start,
+ unsigned long end,
+ unsigned long free_region_start,
+ unsigned long free_region_end) {}
+#endif
+
#endif /* LINUX_KASAN_H */
diff --git a/include/linux/led-class-flash.h b/include/linux/led-class-flash.h
index 1e824963af17..21a3358a1731 100644
--- a/include/linux/led-class-flash.h
+++ b/include/linux/led-class-flash.h
@@ -94,12 +94,15 @@ static inline struct led_classdev_flash *lcdev_to_flcdev(
*
* Returns: 0 on success or negative error value on failure
*/
-extern int led_classdev_flash_register_ext(struct device *parent,
- struct led_classdev_flash *fled_cdev,
- struct led_init_data *init_data);
+int led_classdev_flash_register_ext(struct device *parent,
+ struct led_classdev_flash *fled_cdev,
+ struct led_init_data *init_data);
-#define led_classdev_flash_register(parent, fled_cdev) \
- led_classdev_flash_register_ext(parent, fled_cdev, NULL)
+static inline int led_classdev_flash_register(struct device *parent,
+ struct led_classdev_flash *fled_cdev)
+{
+ return led_classdev_flash_register_ext(parent, fled_cdev, NULL);
+}
/**
* led_classdev_flash_unregister - unregisters an object of led_classdev class
@@ -108,7 +111,21 @@ extern int led_classdev_flash_register_ext(struct device *parent,
*
* Unregister a previously registered via led_classdev_flash_register object
*/
-extern void led_classdev_flash_unregister(struct led_classdev_flash *fled_cdev);
+void led_classdev_flash_unregister(struct led_classdev_flash *fled_cdev);
+
+int devm_led_classdev_flash_register_ext(struct device *parent,
+ struct led_classdev_flash *fled_cdev,
+ struct led_init_data *init_data);
+
+
+static inline int devm_led_classdev_flash_register(struct device *parent,
+ struct led_classdev_flash *fled_cdev)
+{
+ return devm_led_classdev_flash_register_ext(parent, fled_cdev, NULL);
+}
+
+void devm_led_classdev_flash_unregister(struct device *parent,
+ struct led_classdev_flash *fled_cdev);
/**
* led_set_flash_strobe - setup flash strobe
@@ -156,8 +173,8 @@ static inline int led_get_flash_strobe(struct led_classdev_flash *fled_cdev,
*
* Returns: 0 on success or negative error value on failure
*/
-extern int led_set_flash_brightness(struct led_classdev_flash *fled_cdev,
- u32 brightness);
+int led_set_flash_brightness(struct led_classdev_flash *fled_cdev,
+ u32 brightness);
/**
* led_update_flash_brightness - update flash LED brightness
@@ -168,7 +185,7 @@ extern int led_set_flash_brightness(struct led_classdev_flash *fled_cdev,
*
* Returns: 0 on success or negative error value on failure
*/
-extern int led_update_flash_brightness(struct led_classdev_flash *fled_cdev);
+int led_update_flash_brightness(struct led_classdev_flash *fled_cdev);
/**
* led_set_flash_timeout - set flash LED timeout
@@ -179,8 +196,7 @@ extern int led_update_flash_brightness(struct led_classdev_flash *fled_cdev);
*
* Returns: 0 on success or negative error value on failure
*/
-extern int led_set_flash_timeout(struct led_classdev_flash *fled_cdev,
- u32 timeout);
+int led_set_flash_timeout(struct led_classdev_flash *fled_cdev, u32 timeout);
/**
* led_get_flash_fault - get the flash LED fault
@@ -191,7 +207,6 @@ extern int led_set_flash_timeout(struct led_classdev_flash *fled_cdev,
*
* Returns: 0 on success or negative error value on failure
*/
-extern int led_get_flash_fault(struct led_classdev_flash *fled_cdev,
- u32 *fault);
+int led_get_flash_fault(struct led_classdev_flash *fled_cdev, u32 *fault);
#endif /* __LINUX_FLASH_LEDS_H_INCLUDED */
diff --git a/include/linux/leds.h b/include/linux/leds.h
index efb309dba914..242258f7d837 100644
--- a/include/linux/leds.h
+++ b/include/linux/leds.h
@@ -161,7 +161,7 @@ struct led_classdev {
*
* Returns: 0 on success or negative error value on failure
*/
-extern int led_classdev_register_ext(struct device *parent,
+int led_classdev_register_ext(struct device *parent,
struct led_classdev *led_cdev,
struct led_init_data *init_data);
@@ -181,7 +181,7 @@ static inline int led_classdev_register(struct device *parent,
return led_classdev_register_ext(parent, led_cdev, NULL);
}
-extern int devm_led_classdev_register_ext(struct device *parent,
+int devm_led_classdev_register_ext(struct device *parent,
struct led_classdev *led_cdev,
struct led_init_data *init_data);
@@ -190,11 +190,11 @@ static inline int devm_led_classdev_register(struct device *parent,
{
return devm_led_classdev_register_ext(parent, led_cdev, NULL);
}
-extern void led_classdev_unregister(struct led_classdev *led_cdev);
-extern void devm_led_classdev_unregister(struct device *parent,
- struct led_classdev *led_cdev);
-extern void led_classdev_suspend(struct led_classdev *led_cdev);
-extern void led_classdev_resume(struct led_classdev *led_cdev);
+void led_classdev_unregister(struct led_classdev *led_cdev);
+void devm_led_classdev_unregister(struct device *parent,
+ struct led_classdev *led_cdev);
+void led_classdev_suspend(struct led_classdev *led_cdev);
+void led_classdev_resume(struct led_classdev *led_cdev);
/**
* led_blink_set - set blinking with software fallback
@@ -211,9 +211,8 @@ extern void led_classdev_resume(struct led_classdev *led_cdev);
* led_cdev->brightness_set() will not stop the blinking,
* use led_classdev_brightness_set() instead.
*/
-extern void led_blink_set(struct led_classdev *led_cdev,
- unsigned long *delay_on,
- unsigned long *delay_off);
+void led_blink_set(struct led_classdev *led_cdev, unsigned long *delay_on,
+ unsigned long *delay_off);
/**
* led_blink_set_oneshot - do a oneshot software blink
* @led_cdev: the LED to start blinking
@@ -228,10 +227,9 @@ extern void led_blink_set(struct led_classdev *led_cdev,
* If invert is set, led blinks for delay_off first, then for
* delay_on and leave the led on after the on-off cycle.
*/
-extern void led_blink_set_oneshot(struct led_classdev *led_cdev,
- unsigned long *delay_on,
- unsigned long *delay_off,
- int invert);
+void led_blink_set_oneshot(struct led_classdev *led_cdev,
+ unsigned long *delay_on, unsigned long *delay_off,
+ int invert);
/**
* led_set_brightness - set LED brightness
* @led_cdev: the LED to set
@@ -241,8 +239,8 @@ extern void led_blink_set_oneshot(struct led_classdev *led_cdev,
* software blink timer that implements blinking when the
* hardware doesn't. This function is guaranteed not to sleep.
*/
-extern void led_set_brightness(struct led_classdev *led_cdev,
- enum led_brightness brightness);
+void led_set_brightness(struct led_classdev *led_cdev,
+ enum led_brightness brightness);
/**
* led_set_brightness_sync - set LED brightness synchronously
@@ -255,8 +253,8 @@ extern void led_set_brightness(struct led_classdev *led_cdev,
*
* Returns: 0 on success or negative error value on failure
*/
-extern int led_set_brightness_sync(struct led_classdev *led_cdev,
- enum led_brightness value);
+int led_set_brightness_sync(struct led_classdev *led_cdev,
+ enum led_brightness value);
/**
* led_update_brightness - update LED brightness
@@ -267,7 +265,7 @@ extern int led_set_brightness_sync(struct led_classdev *led_cdev,
*
* Returns: 0 on success or negative error value on failure
*/
-extern int led_update_brightness(struct led_classdev *led_cdev);
+int led_update_brightness(struct led_classdev *led_cdev);
/**
* led_get_default_pattern - return default pattern
@@ -279,8 +277,7 @@ extern int led_update_brightness(struct led_classdev *led_cdev);
* Return: Allocated array of integers with default pattern from device tree
* or NULL. Caller is responsible for kfree().
*/
-extern u32 *led_get_default_pattern(struct led_classdev *led_cdev,
- unsigned int *size);
+u32 *led_get_default_pattern(struct led_classdev *led_cdev, unsigned int *size);
/**
* led_sysfs_disable - disable LED sysfs interface
@@ -288,7 +285,7 @@ extern u32 *led_get_default_pattern(struct led_classdev *led_cdev,
*
* Disable the led_cdev's sysfs interface.
*/
-extern void led_sysfs_disable(struct led_classdev *led_cdev);
+void led_sysfs_disable(struct led_classdev *led_cdev);
/**
* led_sysfs_enable - enable LED sysfs interface
@@ -296,7 +293,7 @@ extern void led_sysfs_disable(struct led_classdev *led_cdev);
*
* Enable the led_cdev's sysfs interface.
*/
-extern void led_sysfs_enable(struct led_classdev *led_cdev);
+void led_sysfs_enable(struct led_classdev *led_cdev);
/**
* led_compose_name - compose LED class device name
@@ -310,8 +307,8 @@ extern void led_sysfs_enable(struct led_classdev *led_cdev);
*
* Returns: 0 on success or negative error value on failure
*/
-extern int led_compose_name(struct device *dev, struct led_init_data *init_data,
- char *led_classdev_name);
+int led_compose_name(struct device *dev, struct led_init_data *init_data,
+ char *led_classdev_name);
/**
* led_sysfs_is_disabled - check if LED sysfs interface is disabled
@@ -360,33 +357,25 @@ struct led_trigger {
#define led_trigger_get_led(dev) ((struct led_classdev *)dev_get_drvdata((dev)))
#define led_trigger_get_drvdata(dev) (led_get_trigger_data(led_trigger_get_led(dev)))
-ssize_t led_trigger_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count);
-ssize_t led_trigger_show(struct device *dev, struct device_attribute *attr,
- char *buf);
-
/* Registration functions for complex triggers */
-extern int led_trigger_register(struct led_trigger *trigger);
-extern void led_trigger_unregister(struct led_trigger *trigger);
-extern int devm_led_trigger_register(struct device *dev,
+int led_trigger_register(struct led_trigger *trigger);
+void led_trigger_unregister(struct led_trigger *trigger);
+int devm_led_trigger_register(struct device *dev,
struct led_trigger *trigger);
-extern void led_trigger_register_simple(const char *name,
+void led_trigger_register_simple(const char *name,
struct led_trigger **trigger);
-extern void led_trigger_unregister_simple(struct led_trigger *trigger);
-extern void led_trigger_event(struct led_trigger *trigger,
- enum led_brightness event);
-extern void led_trigger_blink(struct led_trigger *trigger,
- unsigned long *delay_on,
- unsigned long *delay_off);
-extern void led_trigger_blink_oneshot(struct led_trigger *trigger,
- unsigned long *delay_on,
- unsigned long *delay_off,
- int invert);
-extern void led_trigger_set_default(struct led_classdev *led_cdev);
-extern int led_trigger_set(struct led_classdev *led_cdev,
- struct led_trigger *trigger);
-extern void led_trigger_remove(struct led_classdev *led_cdev);
+void led_trigger_unregister_simple(struct led_trigger *trigger);
+void led_trigger_event(struct led_trigger *trigger, enum led_brightness event);
+void led_trigger_blink(struct led_trigger *trigger, unsigned long *delay_on,
+ unsigned long *delay_off);
+void led_trigger_blink_oneshot(struct led_trigger *trigger,
+ unsigned long *delay_on,
+ unsigned long *delay_off,
+ int invert);
+void led_trigger_set_default(struct led_classdev *led_cdev);
+int led_trigger_set(struct led_classdev *led_cdev, struct led_trigger *trigger);
+void led_trigger_remove(struct led_classdev *led_cdev);
static inline void led_set_trigger_data(struct led_classdev *led_cdev,
void *trigger_data)
@@ -414,8 +403,7 @@ static inline void *led_get_trigger_data(struct led_classdev *led_cdev)
* This is meant to be used on triggers with statically
* allocated name.
*/
-extern void led_trigger_rename_static(const char *name,
- struct led_trigger *trig);
+void led_trigger_rename_static(const char *name, struct led_trigger *trig);
#define module_led_trigger(__led_trigger) \
module_driver(__led_trigger, led_trigger_register, \
@@ -457,20 +445,20 @@ static inline void *led_get_trigger_data(struct led_classdev *led_cdev)
/* Trigger specific functions */
#ifdef CONFIG_LEDS_TRIGGER_DISK
-extern void ledtrig_disk_activity(bool write);
+void ledtrig_disk_activity(bool write);
#else
static inline void ledtrig_disk_activity(bool write) {}
#endif
#ifdef CONFIG_LEDS_TRIGGER_MTD
-extern void ledtrig_mtd_activity(void);
+void ledtrig_mtd_activity(void);
#else
static inline void ledtrig_mtd_activity(void) {}
#endif
#if defined(CONFIG_LEDS_TRIGGER_CAMERA) || defined(CONFIG_LEDS_TRIGGER_CAMERA_MODULE)
-extern void ledtrig_flash_ctrl(bool on);
-extern void ledtrig_torch_ctrl(bool on);
+void ledtrig_flash_ctrl(bool on);
+void ledtrig_torch_ctrl(bool on);
#else
static inline void ledtrig_flash_ctrl(bool on) {}
static inline void ledtrig_torch_ctrl(bool on) {}
@@ -550,7 +538,7 @@ enum cpu_led_event {
CPU_LED_HALTED, /* Machine shutdown */
};
#ifdef CONFIG_LEDS_TRIGGER_CPU
-extern void ledtrig_cpu(enum cpu_led_event evt);
+void ledtrig_cpu(enum cpu_led_event evt);
#else
static inline void ledtrig_cpu(enum cpu_led_event evt)
{
@@ -559,7 +547,7 @@ static inline void ledtrig_cpu(enum cpu_led_event evt)
#endif
#ifdef CONFIG_LEDS_BRIGHTNESS_HW_CHANGED
-extern void led_classdev_notify_brightness_hw_changed(
+void led_classdev_notify_brightness_hw_changed(
struct led_classdev *led_cdev, enum led_brightness brightness);
#else
static inline void led_classdev_notify_brightness_hw_changed(
diff --git a/include/linux/libfdt_env.h b/include/linux/libfdt_env.h
index edb0f0c30904..cea8574a29b1 100644
--- a/include/linux/libfdt_env.h
+++ b/include/linux/libfdt_env.h
@@ -2,11 +2,14 @@
#ifndef LIBFDT_ENV_H
#define LIBFDT_ENV_H
-#include <linux/kernel.h> /* For INT_MAX */
+#include <linux/limits.h> /* For INT_MAX */
#include <linux/string.h>
#include <asm/byteorder.h>
+#define INT32_MAX S32_MAX
+#define UINT32_MAX U32_MAX
+
typedef __be16 fdt16_t;
typedef __be32 fdt32_t;
typedef __be64 fdt64_t;
diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h
index b6eddf912568..9df091bd30ba 100644
--- a/include/linux/libnvdimm.h
+++ b/include/linux/libnvdimm.h
@@ -65,13 +65,6 @@ enum {
DPA_RESOURCE_ADJUSTED = 1 << 0,
};
-extern struct attribute_group nvdimm_bus_attribute_group;
-extern struct attribute_group nvdimm_attribute_group;
-extern struct attribute_group nd_device_attribute_group;
-extern struct attribute_group nd_numa_attribute_group;
-extern struct attribute_group nd_region_attribute_group;
-extern struct attribute_group nd_mapping_attribute_group;
-
struct nvdimm;
struct nvdimm_bus_descriptor;
typedef int (*ndctl_fn)(struct nvdimm_bus_descriptor *nd_desc,
diff --git a/include/linux/license.h b/include/linux/license.h
index decdbf43cb5c..7cce390f120b 100644
--- a/include/linux/license.h
+++ b/include/linux/license.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef __LICENSE_H
#define __LICENSE_H
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index f491690d54c6..b38bbefabfab 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -358,6 +358,9 @@ static inline phys_addr_t memblock_phys_alloc(phys_addr_t size,
MEMBLOCK_ALLOC_ACCESSIBLE);
}
+void *memblock_alloc_exact_nid_raw(phys_addr_t size, phys_addr_t align,
+ phys_addr_t min_addr, phys_addr_t max_addr,
+ int nid);
void *memblock_alloc_try_nid_raw(phys_addr_t size, phys_addr_t align,
phys_addr_t min_addr, phys_addr_t max_addr,
int nid);
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index ae703ea3ef48..a7a0a1a5c8d5 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -58,7 +58,6 @@ enum mem_cgroup_protection {
struct mem_cgroup_reclaim_cookie {
pg_data_t *pgdat;
- int priority;
unsigned int generation;
};
@@ -81,7 +80,6 @@ struct mem_cgroup_id {
enum mem_cgroup_events_target {
MEM_CGROUP_TARGET_THRESH,
MEM_CGROUP_TARGET_SOFTLIMIT,
- MEM_CGROUP_TARGET_NUMAINFO,
MEM_CGROUP_NTARGETS,
};
@@ -112,7 +110,7 @@ struct memcg_shrinker_map {
};
/*
- * per-zone information in memory controller.
+ * per-node information in memory controller.
*/
struct mem_cgroup_per_node {
struct lruvec lruvec;
@@ -126,7 +124,7 @@ struct mem_cgroup_per_node {
unsigned long lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS];
- struct mem_cgroup_reclaim_iter iter[DEF_PRIORITY + 1];
+ struct mem_cgroup_reclaim_iter iter;
struct memcg_shrinker_map __rcu *shrinker_map;
@@ -134,9 +132,6 @@ struct mem_cgroup_per_node {
unsigned long usage_in_excess;/* Set to the value by which */
/* the soft limit is exceeded*/
bool on_tree;
- bool congested; /* memcg has many dirty pages */
- /* backed by a congested BDI */
-
struct mem_cgroup *memcg; /* Back pointer, we cannot */
/* use container_of */
};
@@ -313,13 +308,6 @@ struct mem_cgroup {
struct list_head kmem_caches;
#endif
- int last_scanned_node;
-#if MAX_NUMNODES > 1
- nodemask_t scan_nodes;
- atomic_t numainfo_events;
- atomic_t numainfo_updating;
-#endif
-
#ifdef CONFIG_CGROUP_WRITEBACK
struct list_head cgwb_list;
struct wb_domain cgwb_domain;
@@ -394,25 +382,27 @@ mem_cgroup_nodeinfo(struct mem_cgroup *memcg, int nid)
}
/**
- * mem_cgroup_lruvec - get the lru list vector for a node or a memcg zone
- * @node: node of the wanted lruvec
+ * mem_cgroup_lruvec - get the lru list vector for a memcg & node
* @memcg: memcg of the wanted lruvec
*
- * Returns the lru list vector holding pages for a given @node or a given
- * @memcg and @zone. This can be the node lruvec, if the memory controller
- * is disabled.
+ * Returns the lru list vector holding pages for a given @memcg &
+ * @node combination. This can be the node lruvec, if the memory
+ * controller is disabled.
*/
-static inline struct lruvec *mem_cgroup_lruvec(struct pglist_data *pgdat,
- struct mem_cgroup *memcg)
+static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg,
+ struct pglist_data *pgdat)
{
struct mem_cgroup_per_node *mz;
struct lruvec *lruvec;
if (mem_cgroup_disabled()) {
- lruvec = node_lruvec(pgdat);
+ lruvec = &pgdat->__lruvec;
goto out;
}
+ if (!memcg)
+ memcg = root_mem_cgroup;
+
mz = mem_cgroup_nodeinfo(memcg, pgdat->node_id);
lruvec = &mz->lruvec;
out:
@@ -728,7 +718,7 @@ static inline void __mod_lruvec_page_state(struct page *page,
return;
}
- lruvec = mem_cgroup_lruvec(pgdat, page->mem_cgroup);
+ lruvec = mem_cgroup_lruvec(page->mem_cgroup, pgdat);
__mod_lruvec_state(lruvec, idx, val);
}
@@ -899,16 +889,21 @@ static inline void mem_cgroup_migrate(struct page *old, struct page *new)
{
}
-static inline struct lruvec *mem_cgroup_lruvec(struct pglist_data *pgdat,
- struct mem_cgroup *memcg)
+static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg,
+ struct pglist_data *pgdat)
{
- return node_lruvec(pgdat);
+ return &pgdat->__lruvec;
}
static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page,
struct pglist_data *pgdat)
{
- return &pgdat->lruvec;
+ return &pgdat->__lruvec;
+}
+
+static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
+{
+ return NULL;
}
static inline bool mm_match_cgroup(struct mm_struct *mm,
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index f46ea71b4ffd..3a08ecdfca11 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -102,13 +102,10 @@ extern unsigned long __offline_isolated_pages(unsigned long start_pfn,
typedef void (*online_page_callback_t)(struct page *page, unsigned int order);
+extern void generic_online_page(struct page *page, unsigned int order);
extern int set_online_page_callback(online_page_callback_t callback);
extern int restore_online_page_callback(online_page_callback_t callback);
-extern void __online_page_set_limits(struct page *page);
-extern void __online_page_increment_counters(struct page *page);
-extern void __online_page_free(struct page *page);
-
extern int try_online_node(int nid);
extern int arch_add_memory(int nid, u64 start, u64 size,
@@ -229,9 +226,6 @@ void put_online_mems(void);
void mem_hotplug_begin(void);
void mem_hotplug_done(void);
-extern void set_zone_contiguous(struct zone *zone);
-extern void clear_zone_contiguous(struct zone *zone);
-
#else /* ! CONFIG_MEMORY_HOTPLUG */
#define pfn_to_online_page(pfn) \
({ \
@@ -339,6 +333,9 @@ static inline int remove_memory(int nid, u64 start, u64 size)
static inline void __remove_memory(int nid, u64 start, u64 size) {}
#endif /* CONFIG_MEMORY_HOTREMOVE */
+extern void set_zone_contiguous(struct zone *zone);
+extern void clear_zone_contiguous(struct zone *zone);
+
extern void __ref free_area_init_core_hotplug(int nid);
extern int __add_memory(int nid, u64 start, u64 size);
extern int add_memory(int nid, u64 start, u64 size);
diff --git a/include/linux/memregion.h b/include/linux/memregion.h
new file mode 100644
index 000000000000..e11595256cac
--- /dev/null
+++ b/include/linux/memregion.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _MEMREGION_H_
+#define _MEMREGION_H_
+#include <linux/types.h>
+#include <linux/errno.h>
+
+struct memregion_info {
+ int target_node;
+};
+
+#ifdef CONFIG_MEMREGION
+int memregion_alloc(gfp_t gfp);
+void memregion_free(int id);
+#else
+static inline int memregion_alloc(gfp_t gfp)
+{
+ return -ENOMEM;
+}
+void memregion_free(int id)
+{
+}
+#endif
+#endif /* _MEMREGION_H_ */
diff --git a/include/linux/mfd/abx500/ab8500-gpadc.h b/include/linux/mfd/abx500/ab8500-gpadc.h
deleted file mode 100644
index 836c944abe2e..000000000000
--- a/include/linux/mfd/abx500/ab8500-gpadc.h
+++ /dev/null
@@ -1,75 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2010 ST-Ericsson SA
- *
- * Author: Arun R Murthy <arun.murthy@stericsson.com>
- * Author: Daniel Willerud <daniel.willerud@stericsson.com>
- * Author: M'boumba Cedric Madianga <cedric.madianga@stericsson.com>
- */
-
-#ifndef _AB8500_GPADC_H
-#define _AB8500_GPADC_H
-
-/* GPADC source: From datasheet(ADCSwSel[4:0] in GPADCCtrl2
- * and ADCHwSel[4:0] in GPADCCtrl3 ) */
-#define BAT_CTRL 0x01
-#define BTEMP_BALL 0x02
-#define MAIN_CHARGER_V 0x03
-#define ACC_DETECT1 0x04
-#define ACC_DETECT2 0x05
-#define ADC_AUX1 0x06
-#define ADC_AUX2 0x07
-#define MAIN_BAT_V 0x08
-#define VBUS_V 0x09
-#define MAIN_CHARGER_C 0x0A
-#define USB_CHARGER_C 0x0B
-#define BK_BAT_V 0x0C
-#define DIE_TEMP 0x0D
-#define USB_ID 0x0E
-#define XTAL_TEMP 0x12
-#define VBAT_TRUE_MEAS 0x13
-#define BAT_CTRL_AND_IBAT 0x1C
-#define VBAT_MEAS_AND_IBAT 0x1D
-#define VBAT_TRUE_MEAS_AND_IBAT 0x1E
-#define BAT_TEMP_AND_IBAT 0x1F
-
-/* Virtual channel used only for ibat convertion to ampere
- * Battery current conversion (ibat) cannot be requested as a single conversion
- * but it is always in combination with other input requests
- */
-#define IBAT_VIRTUAL_CHANNEL 0xFF
-
-#define SAMPLE_1 1
-#define SAMPLE_4 4
-#define SAMPLE_8 8
-#define SAMPLE_16 16
-#define RISING_EDGE 0
-#define FALLING_EDGE 1
-
-/* Arbitrary ADC conversion type constants */
-#define ADC_SW 0
-#define ADC_HW 1
-
-struct ab8500_gpadc;
-
-struct ab8500_gpadc *ab8500_gpadc_get(char *name);
-int ab8500_gpadc_sw_hw_convert(struct ab8500_gpadc *gpadc, u8 channel,
- u8 avg_sample, u8 trig_edge, u8 trig_timer, u8 conv_type);
-static inline int ab8500_gpadc_convert(struct ab8500_gpadc *gpadc, u8 channel)
-{
- return ab8500_gpadc_sw_hw_convert(gpadc, channel,
- SAMPLE_16, 0, 0, ADC_SW);
-}
-
-int ab8500_gpadc_read_raw(struct ab8500_gpadc *gpadc, u8 channel,
- u8 avg_sample, u8 trig_edge, u8 trig_timer, u8 conv_type);
-int ab8500_gpadc_double_read_raw(struct ab8500_gpadc *gpadc, u8 channel,
- u8 avg_sample, u8 trig_edge, u8 trig_timer, u8 conv_type,
- int *ibat);
-int ab8500_gpadc_ad_to_voltage(struct ab8500_gpadc *gpadc,
- u8 channel, int ad_value);
-void ab8540_gpadc_get_otp(struct ab8500_gpadc *gpadc,
- u16 *vmain_l, u16 *vmain_h, u16 *btemp_l, u16 *btemp_h,
- u16 *vbat_l, u16 *vbat_h, u16 *ibat_l, u16 *ibat_h);
-
-#endif /* _AB8500_GPADC_H */
diff --git a/include/linux/mfd/arizona/registers.h b/include/linux/mfd/arizona/registers.h
index bb1a2530ae27..49e24d1de8d4 100644
--- a/include/linux/mfd/arizona/registers.h
+++ b/include/linux/mfd/arizona/registers.h
@@ -1186,13 +1186,6 @@
#define ARIZONA_DSP4_SCRATCH_1 0x1441
#define ARIZONA_DSP4_SCRATCH_2 0x1442
#define ARIZONA_DSP4_SCRATCH_3 0x1443
-#define ARIZONA_FRF_COEFF_1 0x1700
-#define ARIZONA_FRF_COEFF_2 0x1701
-#define ARIZONA_FRF_COEFF_3 0x1702
-#define ARIZONA_FRF_COEFF_4 0x1703
-#define ARIZONA_V2_DAC_COMP_1 0x1704
-#define ARIZONA_V2_DAC_COMP_2 0x1705
-
/*
* Field Definitions.
diff --git a/include/linux/mfd/core.h b/include/linux/mfd/core.h
index b43fc5773ad7..d01d1299e49d 100644
--- a/include/linux/mfd/core.h
+++ b/include/linux/mfd/core.h
@@ -12,6 +12,35 @@
#include <linux/platform_device.h>
+#define MFD_RES_SIZE(arr) (sizeof(arr) / sizeof(struct resource))
+
+#define MFD_CELL_ALL(_name, _res, _pdata, _pdsize, _id, _compat, _match)\
+ { \
+ .name = (_name), \
+ .resources = (_res), \
+ .num_resources = MFD_RES_SIZE((_res)), \
+ .platform_data = (_pdata), \
+ .pdata_size = (_pdsize), \
+ .of_compatible = (_compat), \
+ .acpi_match = (_match), \
+ .id = (_id), \
+ }
+
+#define OF_MFD_CELL(_name, _res, _pdata, _pdsize,_id, _compat) \
+ MFD_CELL_ALL(_name, _res, _pdata, _pdsize, _id, _compat, NULL) \
+
+#define ACPI_MFD_CELL(_name, _res, _pdata, _pdsize, _id, _match) \
+ MFD_CELL_ALL(_name, _res, _pdata, _pdsize, _id, NULL, _match) \
+
+#define MFD_CELL_BASIC(_name, _res, _pdata, _pdsize, _id) \
+ MFD_CELL_ALL(_name, _res, _pdata, _pdsize, _id, NULL, NULL) \
+
+#define MFD_CELL_RES(_name, _res) \
+ MFD_CELL_ALL(_name, _res, NULL, 0, 0, NULL, NULL) \
+
+#define MFD_CELL_NAME(_name) \
+ MFD_CELL_ALL(_name, NULL, NULL, 0, 0, NULL, NULL) \
+
struct irq_domain;
struct property_entry;
@@ -30,8 +59,6 @@ struct mfd_cell {
const char *name;
int id;
- /* refcounting for multiple drivers to use a single cell */
- atomic_t *usage_count;
int (*enable)(struct platform_device *dev);
int (*disable)(struct platform_device *dev);
@@ -87,24 +114,6 @@ extern int mfd_cell_enable(struct platform_device *pdev);
extern int mfd_cell_disable(struct platform_device *pdev);
/*
- * "Clone" multiple platform devices for a single cell. This is to be used
- * for devices that have multiple users of a cell. For example, if an mfd
- * driver wants the cell "foo" to be used by a GPIO driver, an MTD driver,
- * and a platform driver, the following bit of code would be use after first
- * calling mfd_add_devices():
- *
- * const char *fclones[] = { "foo-gpio", "foo-mtd" };
- * err = mfd_clone_cells("foo", fclones, ARRAY_SIZE(fclones));
- *
- * Each driver (MTD, GPIO, and platform driver) would then register
- * platform_drivers for "foo-mtd", "foo-gpio", and "foo", respectively.
- * The cell's .enable/.disable hooks should be used to deal with hardware
- * resource contention.
- */
-extern int mfd_clone_cell(const char *cell, const char **clones,
- size_t n_clones);
-
-/*
* Given a platform device that's been created by mfd_add_devices(), fetch
* the mfd_cell that created it.
*/
diff --git a/include/linux/mfd/db8500-prcmu.h b/include/linux/mfd/db8500-prcmu.h
index 813710aa2cfd..1fc75d2b4a38 100644
--- a/include/linux/mfd/db8500-prcmu.h
+++ b/include/linux/mfd/db8500-prcmu.h
@@ -489,7 +489,7 @@ struct prcmu_auto_pm_config {
#ifdef CONFIG_MFD_DB8500_PRCMU
-void db8500_prcmu_early_init(u32 phy_base, u32 size);
+void db8500_prcmu_early_init(void);
int prcmu_set_rc_a2p(enum romcode_write);
enum romcode_read prcmu_get_rc_p2a(void);
enum ap_pwrst prcmu_get_xp70_current_state(void);
@@ -546,7 +546,7 @@ void db8500_prcmu_write_masked(unsigned int reg, u32 mask, u32 value);
#else /* !CONFIG_MFD_DB8500_PRCMU */
-static inline void db8500_prcmu_early_init(u32 phy_base, u32 size) {}
+static inline void db8500_prcmu_early_init(void) {}
static inline int prcmu_set_rc_a2p(enum romcode_write code)
{
diff --git a/include/linux/mfd/dbx500-prcmu.h b/include/linux/mfd/dbx500-prcmu.h
index 238401a50d0b..e2571040c7e8 100644
--- a/include/linux/mfd/dbx500-prcmu.h
+++ b/include/linux/mfd/dbx500-prcmu.h
@@ -190,6 +190,7 @@ enum ddr_pwrst {
#define PRCMU_FW_PROJECT_U8500_MBL2 12 /* Customer specific */
#define PRCMU_FW_PROJECT_U8520 13
#define PRCMU_FW_PROJECT_U8420 14
+#define PRCMU_FW_PROJECT_U8420_SYSCLK 17
#define PRCMU_FW_PROJECT_A9420 20
/* [32..63] 9540 and derivatives */
#define PRCMU_FW_PROJECT_U9540 32
@@ -211,9 +212,9 @@ struct prcmu_fw_version {
#if defined(CONFIG_UX500_SOC_DB8500)
-static inline void prcmu_early_init(u32 phy_base, u32 size)
+static inline void prcmu_early_init(void)
{
- return db8500_prcmu_early_init(phy_base, size);
+ return db8500_prcmu_early_init();
}
static inline int prcmu_set_power_state(u8 state, bool keep_ulp_clk,
@@ -401,7 +402,7 @@ static inline int prcmu_config_a9wdog(u8 num, bool sleep_auto_off)
}
#else
-static inline void prcmu_early_init(u32 phy_base, u32 size) {}
+static inline void prcmu_early_init(void) {}
static inline int prcmu_set_power_state(u8 state, bool keep_ulp_clk,
bool keep_ap_pll)
diff --git a/include/linux/mfd/madera/core.h b/include/linux/mfd/madera/core.h
index 7ffa696cce7c..ad2c138105d4 100644
--- a/include/linux/mfd/madera/core.h
+++ b/include/linux/mfd/madera/core.h
@@ -8,6 +8,7 @@
#ifndef MADERA_CORE_H
#define MADERA_CORE_H
+#include <linux/clk.h>
#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
#include <linux/mfd/madera/pdata.h>
@@ -29,6 +30,13 @@ enum madera_type {
CS42L92 = 9,
};
+enum {
+ MADERA_MCLK1,
+ MADERA_MCLK2,
+ MADERA_MCLK3,
+ MADERA_NUM_MCLK
+};
+
#define MADERA_MAX_CORE_SUPPLIES 2
#define MADERA_MAX_GPIOS 40
@@ -155,6 +163,7 @@ struct snd_soc_dapm_context;
* @irq_dev: the irqchip child driver device
* @irq_data: pointer to irqchip data for the child irqchip driver
* @irq: host irq number from SPI or I2C configuration
+ * @mclk: Structure holding clock supplies
* @out_clamp: indicates output clamp state for each analogue output
* @out_shorted: indicates short circuit state for each analogue output
* @hp_ena: bitflags of enable state for the headphone outputs
@@ -184,6 +193,8 @@ struct madera {
struct regmap_irq_chip_data *irq_data;
int irq;
+ struct clk_bulk_data mclk[MADERA_NUM_MCLK];
+
unsigned int num_micbias;
unsigned int num_childbias[MADERA_MAX_MICBIAS];
diff --git a/include/linux/mfd/max77620.h b/include/linux/mfd/max77620.h
index 12ba157cb83f..f552ef5b1100 100644
--- a/include/linux/mfd/max77620.h
+++ b/include/linux/mfd/max77620.h
@@ -329,7 +329,6 @@ struct max77620_chip {
struct regmap *rmap;
int chip_irq;
- int irq_base;
/* chip id */
enum max77620_chip_id chip_id;
diff --git a/include/linux/mfd/mt6397/rtc.h b/include/linux/mfd/mt6397/rtc.h
new file mode 100644
index 000000000000..f84b9163c0ee
--- /dev/null
+++ b/include/linux/mfd/mt6397/rtc.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2014-2019 MediaTek Inc.
+ *
+ * Author: Tianping.Fang <tianping.fang@mediatek.com>
+ * Sean Wang <sean.wang@mediatek.com>
+ */
+
+#ifndef _LINUX_MFD_MT6397_RTC_H_
+#define _LINUX_MFD_MT6397_RTC_H_
+
+#include <linux/jiffies.h>
+#include <linux/mutex.h>
+#include <linux/regmap.h>
+#include <linux/rtc.h>
+
+#define RTC_BBPU 0x0000
+#define RTC_BBPU_CBUSY BIT(6)
+#define RTC_BBPU_KEY (0x43 << 8)
+
+#define RTC_WRTGR 0x003c
+
+#define RTC_IRQ_STA 0x0002
+#define RTC_IRQ_STA_AL BIT(0)
+#define RTC_IRQ_STA_LP BIT(3)
+
+#define RTC_IRQ_EN 0x0004
+#define RTC_IRQ_EN_AL BIT(0)
+#define RTC_IRQ_EN_ONESHOT BIT(2)
+#define RTC_IRQ_EN_LP BIT(3)
+#define RTC_IRQ_EN_ONESHOT_AL (RTC_IRQ_EN_ONESHOT | RTC_IRQ_EN_AL)
+
+#define RTC_AL_MASK 0x0008
+#define RTC_AL_MASK_DOW BIT(4)
+
+#define RTC_TC_SEC 0x000a
+/* Min, Hour, Dom... register offset to RTC_TC_SEC */
+#define RTC_OFFSET_SEC 0
+#define RTC_OFFSET_MIN 1
+#define RTC_OFFSET_HOUR 2
+#define RTC_OFFSET_DOM 3
+#define RTC_OFFSET_DOW 4
+#define RTC_OFFSET_MTH 5
+#define RTC_OFFSET_YEAR 6
+#define RTC_OFFSET_COUNT 7
+
+#define RTC_AL_SEC 0x0018
+
+#define RTC_PDN2 0x002e
+#define RTC_PDN2_PWRON_ALARM BIT(4)
+
+#define RTC_MIN_YEAR 1968
+#define RTC_BASE_YEAR 1900
+#define RTC_NUM_YEARS 128
+#define RTC_MIN_YEAR_OFFSET (RTC_MIN_YEAR - RTC_BASE_YEAR)
+
+#define MTK_RTC_POLL_DELAY_US 10
+#define MTK_RTC_POLL_TIMEOUT (jiffies_to_usecs(HZ))
+
+struct mt6397_rtc {
+ struct device *dev;
+ struct rtc_device *rtc_dev;
+
+ /* Protect register access from multiple tasks */
+ struct mutex lock;
+ struct regmap *regmap;
+ int irq;
+ u32 addr_base;
+};
+
+#endif /* _LINUX_MFD_MT6397_RTC_H_ */
diff --git a/include/linux/mfd/rk808.h b/include/linux/mfd/rk808.h
index 7cfd2b0504df..a59bf323f713 100644
--- a/include/linux/mfd/rk808.h
+++ b/include/linux/mfd/rk808.h
@@ -610,7 +610,7 @@ enum {
RK808_ID = 0x0000,
RK809_ID = 0x8090,
RK817_ID = 0x8170,
- RK818_ID = 0x8181,
+ RK818_ID = 0x8180,
};
struct rk808 {
diff --git a/include/linux/mfd/twl.h b/include/linux/mfd/twl.h
index 44aff52a5002..089e8942223a 100644
--- a/include/linux/mfd/twl.h
+++ b/include/linux/mfd/twl.h
@@ -181,14 +181,18 @@ static inline int twl_i2c_read_u8(u8 mod_no, u8 *val, u8 reg) {
}
static inline int twl_i2c_write_u16(u8 mod_no, u16 val, u8 reg) {
- val = cpu_to_le16(val);
- return twl_i2c_write(mod_no, (u8*) &val, reg, 2);
+ __le16 value;
+
+ value = cpu_to_le16(val);
+ return twl_i2c_write(mod_no, (u8 *) &value, reg, 2);
}
static inline int twl_i2c_read_u16(u8 mod_no, u16 *val, u8 reg) {
int ret;
- ret = twl_i2c_read(mod_no, (u8*) val, reg, 2);
- *val = le16_to_cpu(*val);
+ __le16 value;
+
+ ret = twl_i2c_read(mod_no, (u8 *) &value, reg, 2);
+ *val = le16_to_cpu(value);
return ret;
}
diff --git a/include/linux/miscdevice.h b/include/linux/miscdevice.h
index 3247a3dc7934..b06b75776a32 100644
--- a/include/linux/miscdevice.h
+++ b/include/linux/miscdevice.h
@@ -57,6 +57,7 @@
#define UHID_MINOR 239
#define USERIO_MINOR 240
#define VHOST_VSOCK_MINOR 241
+#define RFKILL_MINOR 242
#define MISC_DYNAMIC_MINOR 255
struct device;
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 1884513aac90..27200dea0297 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -556,8 +556,6 @@ struct mlx5_priv {
struct dentry *cmdif_debugfs;
/* end: qp staff */
- struct xarray mkey_table;
-
/* start: alloc staff */
/* protect buffer alocation according to numa node */
struct mutex alloc_mutex;
@@ -942,8 +940,6 @@ struct mlx5_cmd_mailbox *mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev *dev,
gfp_t flags, int npages);
void mlx5_free_cmd_mailbox_chain(struct mlx5_core_dev *dev,
struct mlx5_cmd_mailbox *head);
-void mlx5_init_mkey_table(struct mlx5_core_dev *dev);
-void mlx5_cleanup_mkey_table(struct mlx5_core_dev *dev);
int mlx5_core_create_mkey_cb(struct mlx5_core_dev *dev,
struct mlx5_core_mkey *mkey,
struct mlx5_async_ctx *async_ctx, u32 *in,
diff --git a/include/linux/mm.h b/include/linux/mm.h
index a2adf95b3f9c..8b0ef04b6d15 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -564,21 +564,6 @@ int vma_is_stack_for_current(struct vm_area_struct *vma);
struct mmu_gather;
struct inode;
-#if !defined(CONFIG_ARCH_HAS_PTE_DEVMAP) || !defined(CONFIG_TRANSPARENT_HUGEPAGE)
-static inline int pmd_devmap(pmd_t pmd)
-{
- return 0;
-}
-static inline int pud_devmap(pud_t pud)
-{
- return 0;
-}
-static inline int pgd_devmap(pgd_t pgd)
-{
- return 0;
-}
-#endif
-
/*
* FIXME: take this include out, include page-flags.h in
* files which need it (119 of them)
@@ -1643,19 +1628,27 @@ static inline unsigned long get_mm_counter(struct mm_struct *mm, int member)
return (unsigned long)val;
}
+void mm_trace_rss_stat(struct mm_struct *mm, int member, long count);
+
static inline void add_mm_counter(struct mm_struct *mm, int member, long value)
{
- atomic_long_add(value, &mm->rss_stat.count[member]);
+ long count = atomic_long_add_return(value, &mm->rss_stat.count[member]);
+
+ mm_trace_rss_stat(mm, member, count);
}
static inline void inc_mm_counter(struct mm_struct *mm, int member)
{
- atomic_long_inc(&mm->rss_stat.count[member]);
+ long count = atomic_long_inc_return(&mm->rss_stat.count[member]);
+
+ mm_trace_rss_stat(mm, member, count);
}
static inline void dec_mm_counter(struct mm_struct *mm, int member)
{
- atomic_long_dec(&mm->rss_stat.count[member]);
+ long count = atomic_long_dec_return(&mm->rss_stat.count[member]);
+
+ mm_trace_rss_stat(mm, member, count);
}
/* Optimized variant when page is already known not to be PageAnon */
@@ -2214,9 +2207,6 @@ void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...);
extern void setup_per_cpu_pageset(void);
-extern void zone_pcp_update(struct zone *zone);
-extern void zone_pcp_reset(struct zone *zone);
-
/* page_alloc.c */
extern int min_free_kbytes;
extern int watermark_boost_factor;
@@ -2632,7 +2622,6 @@ typedef int (*pte_fn_t)(pte_t *pte, unsigned long addr, void *data);
extern int apply_to_page_range(struct mm_struct *mm, unsigned long address,
unsigned long size, pte_fn_t fn, void *data);
-
#ifdef CONFIG_PAGE_POISONING
extern bool page_poisoning_enabled(void);
extern void kernel_poison_pages(struct page *page, int numpages, int enable);
@@ -2781,7 +2770,7 @@ extern int sysctl_memory_failure_early_kill;
extern int sysctl_memory_failure_recovery;
extern void shake_page(struct page *p, int access);
extern atomic_long_t num_poisoned_pages __read_mostly;
-extern int soft_offline_page(struct page *page, int flags);
+extern int soft_offline_page(unsigned long pfn, int flags);
/*
@@ -2873,5 +2862,17 @@ static inline int pages_identical(struct page *page1, struct page *page2)
return !memcmp_pages(page1, page2);
}
+#ifdef CONFIG_MAPPING_DIRTY_HELPERS
+unsigned long clean_record_shared_mapping_range(struct address_space *mapping,
+ pgoff_t first_index, pgoff_t nr,
+ pgoff_t bitmap_pgoff,
+ unsigned long *bitmap,
+ pgoff_t *start,
+ pgoff_t *end);
+
+unsigned long wp_shared_mapping_range(struct address_space *mapping,
+ pgoff_t first_index, pgoff_t nr);
+#endif
+
#endif /* __KERNEL__ */
#endif /* _LINUX_MM_H */
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index 9b6336ad3266..cf3780a6ccc4 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -226,7 +226,7 @@ struct mmc_queue_req;
* MMC Physical partitions
*/
struct mmc_part {
- unsigned int size; /* partition size (in bytes) */
+ u64 size; /* partition size (in bytes) */
unsigned int part_cfg; /* partition type */
char name[MAX_MMC_PART_NAME_LEN];
bool force_ro; /* to make boot parts RO by default */
@@ -291,6 +291,7 @@ struct mmc_card {
struct sd_switch_caps sw_caps; /* switch (CMD6) caps */
unsigned int sdio_funcs; /* number of SDIO functions */
+ atomic_t sdio_funcs_probed; /* number of probed SDIO funcs */
struct sdio_cccr cccr; /* common card info */
struct sdio_cis cis; /* common tuple info */
struct sdio_func *sdio_func[SDIO_MAX_FUNCS]; /* SDIO functions (devices) */
diff --git a/include/linux/mmc/sdio_ids.h b/include/linux/mmc/sdio_ids.h
index d1a5d5df02f5..08b25c02b5a1 100644
--- a/include/linux/mmc/sdio_ids.h
+++ b/include/linux/mmc/sdio_ids.h
@@ -71,6 +71,8 @@
#define SDIO_VENDOR_ID_TI 0x0097
#define SDIO_DEVICE_ID_TI_WL1271 0x4076
+#define SDIO_VENDOR_ID_TI_WL1251 0x104c
+#define SDIO_DEVICE_ID_TI_WL1251 0x9066
#define SDIO_VENDOR_ID_STE 0x0020
#define SDIO_DEVICE_ID_STE_CW1200 0x2280
diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
index 1bd8e6a09a3c..9e6caa8ecd19 100644
--- a/include/linux/mmu_notifier.h
+++ b/include/linux/mmu_notifier.h
@@ -6,9 +6,12 @@
#include <linux/spinlock.h>
#include <linux/mm_types.h>
#include <linux/srcu.h>
+#include <linux/interval_tree.h>
+struct mmu_notifier_mm;
struct mmu_notifier;
-struct mmu_notifier_ops;
+struct mmu_notifier_range;
+struct mmu_interval_notifier;
/**
* enum mmu_notifier_event - reason for the mmu notifier callback
@@ -31,6 +34,9 @@ struct mmu_notifier_ops;
* access flags). User should soft dirty the page in the end callback to make
* sure that anyone relying on soft dirtyness catch pages that might be written
* through non CPU mappings.
+ *
+ * @MMU_NOTIFY_RELEASE: used during mmu_interval_notifier invalidate to signal
+ * that the mm refcount is zero and the range is no longer accessible.
*/
enum mmu_notifier_event {
MMU_NOTIFY_UNMAP = 0,
@@ -38,38 +44,11 @@ enum mmu_notifier_event {
MMU_NOTIFY_PROTECTION_VMA,
MMU_NOTIFY_PROTECTION_PAGE,
MMU_NOTIFY_SOFT_DIRTY,
-};
-
-#ifdef CONFIG_MMU_NOTIFIER
-
-#ifdef CONFIG_LOCKDEP
-extern struct lockdep_map __mmu_notifier_invalidate_range_start_map;
-#endif
-
-/*
- * The mmu notifier_mm structure is allocated and installed in
- * mm->mmu_notifier_mm inside the mm_take_all_locks() protected
- * critical section and it's released only when mm_count reaches zero
- * in mmdrop().
- */
-struct mmu_notifier_mm {
- /* all mmu notifiers registerd in this mm are queued in this list */
- struct hlist_head list;
- /* to serialize the list modifications and hlist_unhashed */
- spinlock_t lock;
+ MMU_NOTIFY_RELEASE,
};
#define MMU_NOTIFIER_RANGE_BLOCKABLE (1 << 0)
-struct mmu_notifier_range {
- struct vm_area_struct *vma;
- struct mm_struct *mm;
- unsigned long start;
- unsigned long end;
- unsigned flags;
- enum mmu_notifier_event event;
-};
-
struct mmu_notifier_ops {
/*
* Called either by mmu_notifier_unregister or when the mm is
@@ -249,6 +228,41 @@ struct mmu_notifier {
unsigned int users;
};
+/**
+ * struct mmu_interval_notifier_ops
+ * @invalidate: Upon return the caller must stop using any SPTEs within this
+ * range. This function can sleep. Return false only if sleeping
+ * was required but mmu_notifier_range_blockable(range) is false.
+ */
+struct mmu_interval_notifier_ops {
+ bool (*invalidate)(struct mmu_interval_notifier *mni,
+ const struct mmu_notifier_range *range,
+ unsigned long cur_seq);
+};
+
+struct mmu_interval_notifier {
+ struct interval_tree_node interval_tree;
+ const struct mmu_interval_notifier_ops *ops;
+ struct mm_struct *mm;
+ struct hlist_node deferred_item;
+ unsigned long invalidate_seq;
+};
+
+#ifdef CONFIG_MMU_NOTIFIER
+
+#ifdef CONFIG_LOCKDEP
+extern struct lockdep_map __mmu_notifier_invalidate_range_start_map;
+#endif
+
+struct mmu_notifier_range {
+ struct vm_area_struct *vma;
+ struct mm_struct *mm;
+ unsigned long start;
+ unsigned long end;
+ unsigned flags;
+ enum mmu_notifier_event event;
+};
+
static inline int mm_has_notifiers(struct mm_struct *mm)
{
return unlikely(mm->mmu_notifier_mm);
@@ -275,6 +289,81 @@ extern int __mmu_notifier_register(struct mmu_notifier *mn,
struct mm_struct *mm);
extern void mmu_notifier_unregister(struct mmu_notifier *mn,
struct mm_struct *mm);
+
+unsigned long mmu_interval_read_begin(struct mmu_interval_notifier *mni);
+int mmu_interval_notifier_insert(struct mmu_interval_notifier *mni,
+ struct mm_struct *mm, unsigned long start,
+ unsigned long length,
+ const struct mmu_interval_notifier_ops *ops);
+int mmu_interval_notifier_insert_locked(
+ struct mmu_interval_notifier *mni, struct mm_struct *mm,
+ unsigned long start, unsigned long length,
+ const struct mmu_interval_notifier_ops *ops);
+void mmu_interval_notifier_remove(struct mmu_interval_notifier *mni);
+
+/**
+ * mmu_interval_set_seq - Save the invalidation sequence
+ * @mni - The mni passed to invalidate
+ * @cur_seq - The cur_seq passed to the invalidate() callback
+ *
+ * This must be called unconditionally from the invalidate callback of a
+ * struct mmu_interval_notifier_ops under the same lock that is used to call
+ * mmu_interval_read_retry(). It updates the sequence number for later use by
+ * mmu_interval_read_retry(). The provided cur_seq will always be odd.
+ *
+ * If the caller does not call mmu_interval_read_begin() or
+ * mmu_interval_read_retry() then this call is not required.
+ */
+static inline void mmu_interval_set_seq(struct mmu_interval_notifier *mni,
+ unsigned long cur_seq)
+{
+ WRITE_ONCE(mni->invalidate_seq, cur_seq);
+}
+
+/**
+ * mmu_interval_read_retry - End a read side critical section against a VA range
+ * mni: The range
+ * seq: The return of the paired mmu_interval_read_begin()
+ *
+ * This MUST be called under a user provided lock that is also held
+ * unconditionally by op->invalidate() when it calls mmu_interval_set_seq().
+ *
+ * Each call should be paired with a single mmu_interval_read_begin() and
+ * should be used to conclude the read side.
+ *
+ * Returns true if an invalidation collided with this critical section, and
+ * the caller should retry.
+ */
+static inline bool mmu_interval_read_retry(struct mmu_interval_notifier *mni,
+ unsigned long seq)
+{
+ return mni->invalidate_seq != seq;
+}
+
+/**
+ * mmu_interval_check_retry - Test if a collision has occurred
+ * mni: The range
+ * seq: The return of the matching mmu_interval_read_begin()
+ *
+ * This can be used in the critical section between mmu_interval_read_begin()
+ * and mmu_interval_read_retry(). A return of true indicates an invalidation
+ * has collided with this critical region and a future
+ * mmu_interval_read_retry() will return true.
+ *
+ * False is not reliable and only suggests a collision may not have
+ * occured. It can be called many times and does not have to hold the user
+ * provided lock.
+ *
+ * This call can be used as part of loops and other expensive operations to
+ * expedite a retry.
+ */
+static inline bool mmu_interval_check_retry(struct mmu_interval_notifier *mni,
+ unsigned long seq)
+{
+ /* Pairs with the WRITE_ONCE in mmu_interval_set_seq() */
+ return READ_ONCE(mni->invalidate_seq) != seq;
+}
+
extern void __mmu_notifier_mm_destroy(struct mm_struct *mm);
extern void __mmu_notifier_release(struct mm_struct *mm);
extern int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index b0a36d1580b6..89d8ff06c9ce 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -273,12 +273,12 @@ enum lru_list {
#define for_each_evictable_lru(lru) for (lru = 0; lru <= LRU_ACTIVE_FILE; lru++)
-static inline int is_file_lru(enum lru_list lru)
+static inline bool is_file_lru(enum lru_list lru)
{
return (lru == LRU_INACTIVE_FILE || lru == LRU_ACTIVE_FILE);
}
-static inline int is_active_lru(enum lru_list lru)
+static inline bool is_active_lru(enum lru_list lru)
{
return (lru == LRU_ACTIVE_ANON || lru == LRU_ACTIVE_FILE);
}
@@ -296,6 +296,12 @@ struct zone_reclaim_stat {
unsigned long recent_scanned[2];
};
+enum lruvec_flags {
+ LRUVEC_CONGESTED, /* lruvec has many dirty pages
+ * backed by a congested BDI
+ */
+};
+
struct lruvec {
struct list_head lists[NR_LRU_LISTS];
struct zone_reclaim_stat reclaim_stat;
@@ -303,12 +309,14 @@ struct lruvec {
atomic_long_t inactive_age;
/* Refaults at the time of last reclaim cycle */
unsigned long refaults;
+ /* Various lruvec state flags (enum lruvec_flags) */
+ unsigned long flags;
#ifdef CONFIG_MEMCG
struct pglist_data *pgdat;
#endif
};
-/* Isolate unmapped file */
+/* Isolate unmapped pages */
#define ISOLATE_UNMAPPED ((__force isolate_mode_t)0x2)
/* Isolate for asynchronous migration */
#define ISOLATE_ASYNC_MIGRATE ((__force isolate_mode_t)0x4)
@@ -572,9 +580,6 @@ struct zone {
} ____cacheline_internodealigned_in_smp;
enum pgdat_flags {
- PGDAT_CONGESTED, /* pgdat has many dirty pages backed by
- * a congested BDI
- */
PGDAT_DIRTY, /* reclaim scanning has recently found
* many dirty file pages at the tail
* of the LRU.
@@ -777,7 +782,13 @@ typedef struct pglist_data {
#endif
/* Fields commonly accessed by the page reclaim scanner */
- struct lruvec lruvec;
+
+ /*
+ * NOTE: THIS IS UNUSED IF MEMCG IS ENABLED.
+ *
+ * Use mem_cgroup_lruvec() to look up lruvecs.
+ */
+ struct lruvec __lruvec;
unsigned long flags;
@@ -800,11 +811,6 @@ typedef struct pglist_data {
#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
#define node_end_pfn(nid) pgdat_end_pfn(NODE_DATA(nid))
-static inline struct lruvec *node_lruvec(struct pglist_data *pgdat)
-{
- return &pgdat->lruvec;
-}
-
static inline unsigned long pgdat_end_pfn(pg_data_t *pgdat)
{
return pgdat->node_start_pfn + pgdat->node_spanned_pages;
@@ -842,7 +848,7 @@ static inline struct pglist_data *lruvec_pgdat(struct lruvec *lruvec)
#ifdef CONFIG_MEMCG
return lruvec->pgdat;
#else
- return container_of(lruvec, struct pglist_data, lruvec);
+ return container_of(lruvec, struct pglist_data, __lruvec);
#endif
}
@@ -1079,7 +1085,7 @@ static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
/**
* for_each_zone_zonelist_nodemask - helper macro to iterate over valid zones in a zonelist at or below a given zone index and within a nodemask
* @zone - The current zone in the iterator
- * @z - The current pointer within zonelist->zones being iterated
+ * @z - The current pointer within zonelist->_zonerefs being iterated
* @zlist - The zonelist being iterated
* @highidx - The zone index of the highest zone to return
* @nodemask - Nodemask allowed by the allocator
diff --git a/include/linux/module.h b/include/linux/module.h
index 6d20895e7739..bd165ba68617 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -1,11 +1,14 @@
-#ifndef _LINUX_MODULE_H
-#define _LINUX_MODULE_H
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Dynamic loading of modules into the kernel.
*
* Rewritten by Richard Henderson <rth@tamu.edu> Dec 1996
* Rewritten again by Rusty Russell, 2002
*/
+
+#ifndef _LINUX_MODULE_H
+#define _LINUX_MODULE_H
+
#include <linux/list.h>
#include <linux/stat.h>
#include <linux/compiler.h>
diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
index 5229c18025e9..ca92aea8a6bd 100644
--- a/include/linux/moduleloader.h
+++ b/include/linux/moduleloader.h
@@ -91,7 +91,7 @@ void module_arch_cleanup(struct module *mod);
/* Any cleanup before freeing mod->module_init */
void module_arch_freeing_init(struct module *mod);
-#ifdef CONFIG_KASAN
+#if defined(CONFIG_KASAN) && !defined(CONFIG_KASAN_VMALLOC)
#include <linux/kasan.h>
#define MODULE_ALIGN (PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT)
#else
diff --git a/include/linux/mtio.h b/include/linux/mtio.h
new file mode 100644
index 000000000000..67d03156f2c2
--- /dev/null
+++ b/include/linux/mtio.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_MTIO_COMPAT_H
+#define _LINUX_MTIO_COMPAT_H
+
+#include <linux/compat.h>
+#include <uapi/linux/mtio.h>
+#include <linux/uaccess.h>
+
+/*
+ * helper functions for implementing compat ioctls on the four tape
+ * drivers: we define the 32-bit layout of each incompatible structure,
+ * plus a wrapper function to copy it to user space in either format.
+ */
+
+struct mtget32 {
+ s32 mt_type;
+ s32 mt_resid;
+ s32 mt_dsreg;
+ s32 mt_gstat;
+ s32 mt_erreg;
+ s32 mt_fileno;
+ s32 mt_blkno;
+};
+#define MTIOCGET32 _IOR('m', 2, struct mtget32)
+
+struct mtpos32 {
+ s32 mt_blkno;
+};
+#define MTIOCPOS32 _IOR('m', 3, struct mtpos32)
+
+static inline int put_user_mtget(void __user *u, struct mtget *k)
+{
+ struct mtget32 k32 = {
+ .mt_type = k->mt_type,
+ .mt_resid = k->mt_resid,
+ .mt_dsreg = k->mt_dsreg,
+ .mt_gstat = k->mt_gstat,
+ .mt_erreg = k->mt_erreg,
+ .mt_fileno = k->mt_fileno,
+ .mt_blkno = k->mt_blkno,
+ };
+ int ret;
+
+ if (in_compat_syscall())
+ ret = copy_to_user(u, &k32, sizeof(k32));
+ else
+ ret = copy_to_user(u, k, sizeof(*k));
+
+ return ret ? -EFAULT : 0;
+}
+
+static inline int put_user_mtpos(void __user *u, struct mtpos *k)
+{
+ if (in_compat_syscall())
+ return put_user(k->mt_blkno, (u32 __user *)u);
+ else
+ return put_user(k->mt_blkno, (long __user *)u);
+}
+
+#endif
diff --git a/include/linux/nd.h b/include/linux/nd.h
index f778f962d1b6..55c735997805 100644
--- a/include/linux/nd.h
+++ b/include/linux/nd.h
@@ -147,7 +147,7 @@ static inline int nvdimm_read_bytes(struct nd_namespace_common *ndns,
/**
* nvdimm_write_bytes() - synchronously write bytes to an nvdimm namespace
- * @ndns: device to read
+ * @ndns: device to write
* @offset: namespace-relative starting offset
* @buf: buffer to drain
* @size: transfer length
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 9e6fb8524d91..cf0923579af4 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -1327,6 +1327,10 @@ struct net_device_ops {
struct nlattr *port[]);
int (*ndo_get_vf_port)(struct net_device *dev,
int vf, struct sk_buff *skb);
+ int (*ndo_get_vf_guid)(struct net_device *dev,
+ int vf,
+ struct ifla_vf_guid *node_guid,
+ struct ifla_vf_guid *port_guid);
int (*ndo_set_vf_guid)(struct net_device *dev,
int vf, u64 guid,
int guid_type);
diff --git a/include/linux/nvmem-consumer.h b/include/linux/nvmem-consumer.h
index 02dc4aa992b2..d3776be48c53 100644
--- a/include/linux/nvmem-consumer.h
+++ b/include/linux/nvmem-consumer.h
@@ -121,7 +121,7 @@ static inline void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len)
}
static inline int nvmem_cell_write(struct nvmem_cell *cell,
- const char *buf, size_t len)
+ void *buf, size_t len)
{
return -EOPNOTSUPP;
}
diff --git a/include/linux/of_address.h b/include/linux/of_address.h
index 30e40fb6936b..eac7ab109df4 100644
--- a/include/linux/of_address.h
+++ b/include/linux/of_address.h
@@ -12,6 +12,7 @@ struct of_pci_range_parser {
const __be32 *end;
int np;
int pna;
+ bool dma;
};
struct of_pci_range {
@@ -33,10 +34,6 @@ extern u64 of_translate_dma_address(struct device_node *dev,
extern u64 of_translate_address(struct device_node *np, const __be32 *addr);
extern int of_address_to_resource(struct device_node *dev, int index,
struct resource *r);
-extern struct device_node *of_find_matching_node_by_address(
- struct device_node *from,
- const struct of_device_id *matches,
- u64 base_address);
extern void __iomem *of_iomap(struct device_node *device, int index);
void __iomem *of_io_request_and_map(struct device_node *device,
int index, const char *name);
@@ -55,8 +52,6 @@ extern int of_pci_dma_range_parser_init(struct of_pci_range_parser *parser,
extern struct of_pci_range *of_pci_range_parser_one(
struct of_pci_range_parser *parser,
struct of_pci_range *range);
-extern int of_dma_get_range(struct device_node *np, u64 *dma_addr,
- u64 *paddr, u64 *size);
extern bool of_dma_is_coherent(struct device_node *np);
#else /* CONFIG_OF_ADDRESS */
static inline void __iomem *of_io_request_and_map(struct device_node *device,
@@ -71,14 +66,6 @@ static inline u64 of_translate_address(struct device_node *np,
return OF_BAD_ADDR;
}
-static inline struct device_node *of_find_matching_node_by_address(
- struct device_node *from,
- const struct of_device_id *matches,
- u64 base_address)
-{
- return NULL;
-}
-
static inline const __be32 *of_get_address(struct device_node *dev, int index,
u64 *size, unsigned int *flags)
{
@@ -104,12 +91,6 @@ static inline struct of_pci_range *of_pci_range_parser_one(
return NULL;
}
-static inline int of_dma_get_range(struct device_node *np, u64 *dma_addr,
- u64 *paddr, u64 *size)
-{
- return -ENODEV;
-}
-
static inline bool of_dma_is_coherent(struct device_node *np)
{
return false;
diff --git a/include/linux/of_pci.h b/include/linux/of_pci.h
index 21a89c4880fa..29658c0ee71f 100644
--- a/include/linux/of_pci.h
+++ b/include/linux/of_pci.h
@@ -2,11 +2,10 @@
#ifndef __OF_PCI_H
#define __OF_PCI_H
-#include <linux/pci.h>
-#include <linux/msi.h>
+#include <linux/types.h>
+#include <linux/errno.h>
struct pci_dev;
-struct of_phandle_args;
struct device_node;
#if IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_PCI)
diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h
index 1099c2fee20f..6861df759fad 100644
--- a/include/linux/page-isolation.h
+++ b/include/linux/page-isolation.h
@@ -30,7 +30,7 @@ static inline bool is_migrate_isolate(int migratetype)
}
#endif
-#define SKIP_HWPOISON 0x1
+#define MEMORY_OFFLINE 0x1
#define REPORT_FAILURE 0x2
bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
@@ -58,7 +58,7 @@ undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
* Test all pages in [start_pfn, end_pfn) are isolated or not.
*/
int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
- bool skip_hwpoisoned_pages);
+ int isol_flags);
struct page *alloc_migrate_target(struct page *page, unsigned long private);
diff --git a/include/linux/pagewalk.h b/include/linux/pagewalk.h
index bddd9759bab9..6ec82e92c87f 100644
--- a/include/linux/pagewalk.h
+++ b/include/linux/pagewalk.h
@@ -24,6 +24,9 @@ struct mm_walk;
* "do page table walk over the current vma", returning
* a negative value means "abort current page table walk
* right now" and returning 1 means "skip the current vma"
+ * @pre_vma: if set, called before starting walk on a non-null vma.
+ * @post_vma: if set, called after a walk on a non-null vma, provided
+ * that @pre_vma and the vma walk succeeded.
*/
struct mm_walk_ops {
int (*pud_entry)(pud_t *pud, unsigned long addr,
@@ -39,6 +42,9 @@ struct mm_walk_ops {
struct mm_walk *walk);
int (*test_walk)(unsigned long addr, unsigned long next,
struct mm_walk *walk);
+ int (*pre_vma)(unsigned long start, unsigned long end,
+ struct mm_walk *walk);
+ void (*post_vma)(struct mm_walk *walk);
};
/**
@@ -62,5 +68,8 @@ int walk_page_range(struct mm_struct *mm, unsigned long start,
void *private);
int walk_page_vma(struct vm_area_struct *vma, const struct mm_walk_ops *ops,
void *private);
+int walk_page_mapping(struct address_space *mapping, pgoff_t first_index,
+ pgoff_t nr, const struct mm_walk_ops *ops,
+ void *private);
#endif /* _LINUX_PAGEWALK_H */
diff --git a/include/linux/parport.h b/include/linux/parport.h
index 397607a0c0eb..13932ce8b37b 100644
--- a/include/linux/parport.h
+++ b/include/linux/parport.h
@@ -460,6 +460,7 @@ extern size_t parport_ieee1284_epp_read_addr (struct parport *,
void *, size_t, int);
/* IEEE1284.3 functions */
+#define daisy_dev_name "Device ID probe"
extern int parport_daisy_init (struct parport *port);
extern void parport_daisy_fini (struct parport *port);
extern struct pardevice *parport_open (int devnum, const char *name);
diff --git a/include/linux/pci-ats.h b/include/linux/pci-ats.h
index 1ebb88e7c184..5d62e78946a3 100644
--- a/include/linux/pci-ats.h
+++ b/include/linux/pci-ats.h
@@ -4,74 +4,39 @@
#include <linux/pci.h>
-#ifdef CONFIG_PCI_PRI
+#ifdef CONFIG_PCI_ATS
+/* Address Translation Service */
+int pci_enable_ats(struct pci_dev *dev, int ps);
+void pci_disable_ats(struct pci_dev *dev);
+int pci_ats_queue_depth(struct pci_dev *dev);
+int pci_ats_page_aligned(struct pci_dev *dev);
+#else /* CONFIG_PCI_ATS */
+static inline int pci_enable_ats(struct pci_dev *d, int ps)
+{ return -ENODEV; }
+static inline void pci_disable_ats(struct pci_dev *d) { }
+static inline int pci_ats_queue_depth(struct pci_dev *d)
+{ return -ENODEV; }
+static inline int pci_ats_page_aligned(struct pci_dev *dev)
+{ return 0; }
+#endif /* CONFIG_PCI_ATS */
+#ifdef CONFIG_PCI_PRI
int pci_enable_pri(struct pci_dev *pdev, u32 reqs);
void pci_disable_pri(struct pci_dev *pdev);
-void pci_restore_pri_state(struct pci_dev *pdev);
int pci_reset_pri(struct pci_dev *pdev);
-
-#else /* CONFIG_PCI_PRI */
-
-static inline int pci_enable_pri(struct pci_dev *pdev, u32 reqs)
-{
- return -ENODEV;
-}
-
-static inline void pci_disable_pri(struct pci_dev *pdev)
-{
-}
-
-static inline void pci_restore_pri_state(struct pci_dev *pdev)
-{
-}
-
-static inline int pci_reset_pri(struct pci_dev *pdev)
-{
- return -ENODEV;
-}
-
+int pci_prg_resp_pasid_required(struct pci_dev *pdev);
#endif /* CONFIG_PCI_PRI */
#ifdef CONFIG_PCI_PASID
-
int pci_enable_pasid(struct pci_dev *pdev, int features);
void pci_disable_pasid(struct pci_dev *pdev);
-void pci_restore_pasid_state(struct pci_dev *pdev);
int pci_pasid_features(struct pci_dev *pdev);
int pci_max_pasids(struct pci_dev *pdev);
-int pci_prg_resp_pasid_required(struct pci_dev *pdev);
-
-#else /* CONFIG_PCI_PASID */
-
-static inline int pci_enable_pasid(struct pci_dev *pdev, int features)
-{
- return -EINVAL;
-}
-
-static inline void pci_disable_pasid(struct pci_dev *pdev)
-{
-}
-
-static inline void pci_restore_pasid_state(struct pci_dev *pdev)
-{
-}
-
+#else /* CONFIG_PCI_PASID */
static inline int pci_pasid_features(struct pci_dev *pdev)
-{
- return -EINVAL;
-}
-
+{ return -EINVAL; }
static inline int pci_max_pasids(struct pci_dev *pdev)
-{
- return -EINVAL;
-}
-
-static inline int pci_prg_resp_pasid_required(struct pci_dev *pdev)
-{
- return 0;
-}
+{ return -EINVAL; }
#endif /* CONFIG_PCI_PASID */
-
-#endif /* LINUX_PCI_ATS_H*/
+#endif /* LINUX_PCI_ATS_H */
diff --git a/include/linux/pci-epc.h b/include/linux/pci-epc.h
index f641badc2c61..56f1846b9d39 100644
--- a/include/linux/pci-epc.h
+++ b/include/linux/pci-epc.h
@@ -117,7 +117,7 @@ struct pci_epc_features {
unsigned int msix_capable : 1;
u8 reserved_bar;
u8 bar_fixed_64bit;
- u64 bar_fixed_size[BAR_5 + 1];
+ u64 bar_fixed_size[PCI_STD_NUM_BARS];
size_t align;
};
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 1a6cf19eac2d..c393dff2d66f 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -82,7 +82,7 @@ enum pci_mmap_state {
enum {
/* #0-5: standard PCI resources */
PCI_STD_RESOURCES,
- PCI_STD_RESOURCE_END = 5,
+ PCI_STD_RESOURCE_END = PCI_STD_RESOURCES + PCI_STD_NUM_BARS - 1,
/* #6: expansion ROM resource */
PCI_ROM_RESOURCE,
@@ -284,7 +284,6 @@ struct irq_affinity;
struct pcie_link_state;
struct pci_vpd;
struct pci_sriov;
-struct pci_ats;
struct pci_p2pdma;
/* The pci_dev structure describes PCI devices */
@@ -452,12 +451,14 @@ struct pci_dev {
};
u16 ats_cap; /* ATS Capability offset */
u8 ats_stu; /* ATS Smallest Translation Unit */
- atomic_t ats_ref_cnt; /* Number of VFs with ATS enabled */
#endif
#ifdef CONFIG_PCI_PRI
+ u16 pri_cap; /* PRI Capability offset */
u32 pri_reqs_alloc; /* Number of PRI requests allocated */
+ unsigned int pasid_required:1; /* PRG Response PASID Required */
#endif
#ifdef CONFIG_PCI_PASID
+ u16 pasid_cap; /* PASID Capability offset */
u16 pasid_features;
#endif
#ifdef CONFIG_PCI_P2PDMA
@@ -805,8 +806,6 @@ struct module;
* The remove function always gets called from process
* context, so it can sleep.
* @suspend: Put device into low power state.
- * @suspend_late: Put device into low power state.
- * @resume_early: Wake device from low power state.
* @resume: Wake device from low power state.
* (Please see Documentation/power/pci.rst for descriptions
* of PCI Power Management and the related functions.)
@@ -829,8 +828,6 @@ struct pci_driver {
int (*probe)(struct pci_dev *dev, const struct pci_device_id *id); /* New device inserted */
void (*remove)(struct pci_dev *dev); /* Device removed (NULL if not a hot-plug capable driver) */
int (*suspend)(struct pci_dev *dev, pm_message_t state); /* Device suspended */
- int (*suspend_late)(struct pci_dev *dev, pm_message_t state);
- int (*resume_early)(struct pci_dev *dev);
int (*resume)(struct pci_dev *dev); /* Device woken up */
void (*shutdown)(struct pci_dev *dev);
int (*sriov_configure)(struct pci_dev *dev, int num_vfs); /* On PF */
@@ -1232,7 +1229,7 @@ struct pci_cap_saved_state *pci_find_saved_ext_cap(struct pci_dev *dev,
int pci_add_cap_save_buffer(struct pci_dev *dev, char cap, unsigned int size);
int pci_add_ext_cap_save_buffer(struct pci_dev *dev,
u16 cap, unsigned int size);
-int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state);
+int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state);
int pci_set_power_state(struct pci_dev *dev, pci_power_t state);
pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state);
bool pci_pme_capable(struct pci_dev *dev, pci_power_t state);
@@ -1454,7 +1451,6 @@ int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
void pci_free_irq_vectors(struct pci_dev *dev);
int pci_irq_vector(struct pci_dev *dev, unsigned int nr);
const struct cpumask *pci_irq_get_affinity(struct pci_dev *pdev, int vec);
-int pci_irq_get_node(struct pci_dev *pdev, int vec);
#else
static inline int pci_msi_vec_count(struct pci_dev *dev) { return -ENOSYS; }
@@ -1497,11 +1493,6 @@ static inline const struct cpumask *pci_irq_get_affinity(struct pci_dev *pdev,
{
return cpu_possible_mask;
}
-
-static inline int pci_irq_get_node(struct pci_dev *pdev, int vec)
-{
- return first_online_node;
-}
#endif
/**
@@ -1544,9 +1535,13 @@ extern bool pcie_ports_native;
#define pcie_ports_native false
#endif
-#define PCIE_LINK_STATE_L0S 1
-#define PCIE_LINK_STATE_L1 2
-#define PCIE_LINK_STATE_CLKPM 4
+#define PCIE_LINK_STATE_L0S BIT(0)
+#define PCIE_LINK_STATE_L1 BIT(1)
+#define PCIE_LINK_STATE_CLKPM BIT(2)
+#define PCIE_LINK_STATE_L1_1 BIT(3)
+#define PCIE_LINK_STATE_L1_2 BIT(4)
+#define PCIE_LINK_STATE_L1_1_PCIPM BIT(5)
+#define PCIE_LINK_STATE_L1_2_PCIPM BIT(6)
#ifdef CONFIG_PCIEASPM
int pci_disable_link_state(struct pci_dev *pdev, int state);
@@ -1777,19 +1772,6 @@ pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
NULL);
}
-#ifdef CONFIG_PCI_ATS
-/* Address Translation Service */
-int pci_enable_ats(struct pci_dev *dev, int ps);
-void pci_disable_ats(struct pci_dev *dev);
-int pci_ats_queue_depth(struct pci_dev *dev);
-int pci_ats_page_aligned(struct pci_dev *dev);
-#else
-static inline int pci_enable_ats(struct pci_dev *d, int ps) { return -ENODEV; }
-static inline void pci_disable_ats(struct pci_dev *d) { }
-static inline int pci_ats_queue_depth(struct pci_dev *d) { return -ENODEV; }
-static inline int pci_ats_page_aligned(struct pci_dev *dev) { return 0; }
-#endif
-
/* Include architecture-dependent settings and functions */
#include <asm/pci.h>
@@ -2279,6 +2261,7 @@ struct irq_domain;
struct irq_domain *pci_host_bridge_of_msi_domain(struct pci_bus *bus);
int pci_parse_request_of_pci_ranges(struct device *dev,
struct list_head *resources,
+ struct list_head *ib_resources,
struct resource **bus_range);
/* Arch may override this (weak) */
@@ -2287,9 +2270,11 @@ struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus);
#else /* CONFIG_OF */
static inline struct irq_domain *
pci_host_bridge_of_msi_domain(struct pci_bus *bus) { return NULL; }
-static inline int pci_parse_request_of_pci_ranges(struct device *dev,
- struct list_head *resources,
- struct resource **bus_range)
+static inline int
+pci_parse_request_of_pci_ranges(struct device *dev,
+ struct list_head *resources,
+ struct list_head *ib_resources,
+ struct resource **bus_range)
{
return -EINVAL;
}
@@ -2311,9 +2296,11 @@ struct irq_domain *pci_host_bridge_acpi_msi_domain(struct pci_bus *bus);
void
pci_msi_register_fwnode_provider(struct fwnode_handle *(*fn)(struct device *));
+bool pci_pr3_present(struct pci_dev *pdev);
#else
static inline struct irq_domain *
pci_host_bridge_acpi_msi_domain(struct pci_bus *bus) { return NULL; }
+static inline bool pci_pr3_present(struct pci_dev *pdev) { return false; }
#endif
#ifdef CONFIG_EEH
@@ -2401,4 +2388,12 @@ void pci_uevent_ers(struct pci_dev *pdev, enum pci_ers_result err_type);
#define pci_info_ratelimited(pdev, fmt, arg...) \
dev_info_ratelimited(&(pdev)->dev, fmt, ##arg)
+#define pci_WARN(pdev, condition, fmt, arg...) \
+ WARN(condition, "%s %s: " fmt, \
+ dev_driver_string(&(pdev)->dev), pci_name(pdev), ##arg)
+
+#define pci_WARN_ONCE(pdev, condition, fmt, arg...) \
+ WARN_ONCE(condition, "%s %s: " fmt, \
+ dev_driver_string(&(pdev)->dev), pci_name(pdev), ##arg)
+
#endif /* LINUX_PCI_H */
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 21a572469a4e..2302d133af6f 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -3006,6 +3006,7 @@
#define PCI_DEVICE_ID_INTEL_84460GX 0x84ea
#define PCI_DEVICE_ID_INTEL_IXP4XX 0x8500
#define PCI_DEVICE_ID_INTEL_IXP2800 0x9004
+#define PCI_DEVICE_ID_INTEL_VMD_9A0B 0x9a0b
#define PCI_DEVICE_ID_INTEL_S21152BB 0xb152
#define PCI_VENDOR_ID_SCALEMP 0x8686
diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h
index 7aef0abc194a..390031e816dc 100644
--- a/include/linux/percpu-refcount.h
+++ b/include/linux/percpu-refcount.h
@@ -186,14 +186,14 @@ static inline void percpu_ref_get_many(struct percpu_ref *ref, unsigned long nr)
{
unsigned long __percpu *percpu_count;
- rcu_read_lock_sched();
+ rcu_read_lock();
if (__ref_is_percpu(ref, &percpu_count))
this_cpu_add(*percpu_count, nr);
else
atomic_long_add(nr, &ref->count);
- rcu_read_unlock_sched();
+ rcu_read_unlock();
}
/**
@@ -223,7 +223,7 @@ static inline bool percpu_ref_tryget(struct percpu_ref *ref)
unsigned long __percpu *percpu_count;
bool ret;
- rcu_read_lock_sched();
+ rcu_read_lock();
if (__ref_is_percpu(ref, &percpu_count)) {
this_cpu_inc(*percpu_count);
@@ -232,7 +232,7 @@ static inline bool percpu_ref_tryget(struct percpu_ref *ref)
ret = atomic_long_inc_not_zero(&ref->count);
}
- rcu_read_unlock_sched();
+ rcu_read_unlock();
return ret;
}
@@ -257,7 +257,7 @@ static inline bool percpu_ref_tryget_live(struct percpu_ref *ref)
unsigned long __percpu *percpu_count;
bool ret = false;
- rcu_read_lock_sched();
+ rcu_read_lock();
if (__ref_is_percpu(ref, &percpu_count)) {
this_cpu_inc(*percpu_count);
@@ -266,7 +266,7 @@ static inline bool percpu_ref_tryget_live(struct percpu_ref *ref)
ret = atomic_long_inc_not_zero(&ref->count);
}
- rcu_read_unlock_sched();
+ rcu_read_unlock();
return ret;
}
@@ -285,14 +285,14 @@ static inline void percpu_ref_put_many(struct percpu_ref *ref, unsigned long nr)
{
unsigned long __percpu *percpu_count;
- rcu_read_lock_sched();
+ rcu_read_lock();
if (__ref_is_percpu(ref, &percpu_count))
this_cpu_sub(*percpu_count, nr);
else if (unlikely(atomic_long_sub_and_test(nr, &ref->count)))
ref->release(ref);
- rcu_read_unlock_sched();
+ rcu_read_unlock();
}
/**
diff --git a/include/linux/phy/phy.h b/include/linux/phy/phy.h
index 15032f145063..56d3a100006a 100644
--- a/include/linux/phy/phy.h
+++ b/include/linux/phy/phy.h
@@ -38,7 +38,8 @@ enum phy_mode {
PHY_MODE_PCIE,
PHY_MODE_ETHERNET,
PHY_MODE_MIPI_DPHY,
- PHY_MODE_SATA
+ PHY_MODE_SATA,
+ PHY_MODE_LVDS,
};
/**
diff --git a/include/linux/phy/tegra/xusb.h b/include/linux/phy/tegra/xusb.h
index ee59562c8354..1235865e7e2c 100644
--- a/include/linux/phy/tegra/xusb.h
+++ b/include/linux/phy/tegra/xusb.h
@@ -18,5 +18,7 @@ int tegra_xusb_padctl_hsic_set_idle(struct tegra_xusb_padctl *padctl,
unsigned int port, bool idle);
int tegra_xusb_padctl_usb3_set_lfps_detect(struct tegra_xusb_padctl *padctl,
unsigned int port, bool enable);
-
+int tegra_xusb_padctl_set_vbus_override(struct tegra_xusb_padctl *padctl,
+ bool val);
+int tegra_phy_xusb_utmi_port_reset(struct phy *phy);
#endif /* PHY_TEGRA_XUSB_H */
diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
index 5c626fdc10db..44f2245debda 100644
--- a/include/linux/pipe_fs_i.h
+++ b/include/linux/pipe_fs_i.h
@@ -30,9 +30,10 @@ struct pipe_buffer {
* struct pipe_inode_info - a linux kernel pipe
* @mutex: mutex protecting the whole thing
* @wait: reader/writer wait point in case of empty/full pipe
- * @nrbufs: the number of non-empty pipe buffers in this pipe
- * @buffers: total number of buffers (should be a power of 2)
- * @curbuf: the current pipe buffer entry
+ * @head: The point of buffer production
+ * @tail: The point of buffer consumption
+ * @max_usage: The maximum number of slots that may be used in the ring
+ * @ring_size: total number of buffers (should be a power of 2)
* @tmp_page: cached released page
* @readers: number of current readers of this pipe
* @writers: number of current writers of this pipe
@@ -48,7 +49,10 @@ struct pipe_buffer {
struct pipe_inode_info {
struct mutex mutex;
wait_queue_head_t wait;
- unsigned int nrbufs, curbuf, buffers;
+ unsigned int head;
+ unsigned int tail;
+ unsigned int max_usage;
+ unsigned int ring_size;
unsigned int readers;
unsigned int writers;
unsigned int files;
@@ -105,6 +109,58 @@ struct pipe_buf_operations {
};
/**
+ * pipe_empty - Return true if the pipe is empty
+ * @head: The pipe ring head pointer
+ * @tail: The pipe ring tail pointer
+ */
+static inline bool pipe_empty(unsigned int head, unsigned int tail)
+{
+ return head == tail;
+}
+
+/**
+ * pipe_occupancy - Return number of slots used in the pipe
+ * @head: The pipe ring head pointer
+ * @tail: The pipe ring tail pointer
+ */
+static inline unsigned int pipe_occupancy(unsigned int head, unsigned int tail)
+{
+ return head - tail;
+}
+
+/**
+ * pipe_full - Return true if the pipe is full
+ * @head: The pipe ring head pointer
+ * @tail: The pipe ring tail pointer
+ * @limit: The maximum amount of slots available.
+ */
+static inline bool pipe_full(unsigned int head, unsigned int tail,
+ unsigned int limit)
+{
+ return pipe_occupancy(head, tail) >= limit;
+}
+
+/**
+ * pipe_space_for_user - Return number of slots available to userspace
+ * @head: The pipe ring head pointer
+ * @tail: The pipe ring tail pointer
+ * @pipe: The pipe info structure
+ */
+static inline unsigned int pipe_space_for_user(unsigned int head, unsigned int tail,
+ struct pipe_inode_info *pipe)
+{
+ unsigned int p_occupancy, p_space;
+
+ p_occupancy = pipe_occupancy(head, tail);
+ if (p_occupancy >= pipe->max_usage)
+ return 0;
+ p_space = pipe->ring_size - p_occupancy;
+ if (p_space > pipe->max_usage)
+ p_space = pipe->max_usage;
+ return p_space;
+}
+
+/**
* pipe_buf_get - get a reference to a pipe_buffer
* @pipe: the pipe that the buffer belongs to
* @buf: the buffer to get a reference to
diff --git a/include/linux/platform_data/cros_ec_commands.h b/include/linux/platform_data/cros_ec_commands.h
index 98415686cbfa..69210881ebac 100644
--- a/include/linux/platform_data/cros_ec_commands.h
+++ b/include/linux/platform_data/cros_ec_commands.h
@@ -556,6 +556,9 @@ enum host_event_code {
/* Keyboard recovery combo with hardware reinitialization */
EC_HOST_EVENT_KEYBOARD_RECOVERY_HW_REINIT = 30,
+ /* WoV */
+ EC_HOST_EVENT_WOV = 31,
+
/*
* The high bit of the event mask is not used as a host event code. If
* it reads back as set, then the entire event mask should be
@@ -1277,8 +1280,6 @@ enum ec_feature_code {
* MOTIONSENSE_CMD_TABLET_MODE_LID_ANGLE.
*/
EC_FEATURE_REFINED_TABLET_MODE_HYSTERESIS = 37,
- /* EC supports audio codec. */
- EC_FEATURE_AUDIO_CODEC = 38,
/* The MCU is a System Companion Processor (SCP). */
EC_FEATURE_SCP = 39,
/* The MCU is an Integrated Sensor Hub */
@@ -4468,92 +4469,246 @@ enum mkbp_cec_event {
/*****************************************************************************/
-/* Commands for I2S recording on audio codec. */
+/* Commands for audio codec. */
+#define EC_CMD_EC_CODEC 0x00BC
-#define EC_CMD_CODEC_I2S 0x00BC
-#define EC_WOV_I2S_SAMPLE_RATE 48000
+enum ec_codec_subcmd {
+ EC_CODEC_GET_CAPABILITIES = 0x0,
+ EC_CODEC_GET_SHM_ADDR = 0x1,
+ EC_CODEC_SET_SHM_ADDR = 0x2,
+ EC_CODEC_SUBCMD_COUNT,
+};
-enum ec_codec_i2s_subcmd {
- EC_CODEC_SET_SAMPLE_DEPTH = 0x0,
- EC_CODEC_SET_GAIN = 0x1,
- EC_CODEC_GET_GAIN = 0x2,
- EC_CODEC_I2S_ENABLE = 0x3,
- EC_CODEC_I2S_SET_CONFIG = 0x4,
- EC_CODEC_I2S_SET_TDM_CONFIG = 0x5,
- EC_CODEC_I2S_SET_BCLK = 0x6,
- EC_CODEC_I2S_SUBCMD_COUNT = 0x7,
+enum ec_codec_cap {
+ EC_CODEC_CAP_WOV_AUDIO_SHM = 0,
+ EC_CODEC_CAP_WOV_LANG_SHM = 1,
+ EC_CODEC_CAP_LAST = 32,
};
-enum ec_sample_depth_value {
- EC_CODEC_SAMPLE_DEPTH_16 = 0,
- EC_CODEC_SAMPLE_DEPTH_24 = 1,
+enum ec_codec_shm_id {
+ EC_CODEC_SHM_ID_WOV_AUDIO = 0x0,
+ EC_CODEC_SHM_ID_WOV_LANG = 0x1,
+ EC_CODEC_SHM_ID_LAST,
};
-enum ec_i2s_config {
- EC_DAI_FMT_I2S = 0,
- EC_DAI_FMT_RIGHT_J = 1,
- EC_DAI_FMT_LEFT_J = 2,
- EC_DAI_FMT_PCM_A = 3,
- EC_DAI_FMT_PCM_B = 4,
- EC_DAI_FMT_PCM_TDM = 5,
+enum ec_codec_shm_type {
+ EC_CODEC_SHM_TYPE_EC_RAM = 0x0,
+ EC_CODEC_SHM_TYPE_SYSTEM_RAM = 0x1,
};
-/*
- * For subcommand EC_CODEC_GET_GAIN.
- */
-struct __ec_align1 ec_codec_i2s_gain {
+struct __ec_align1 ec_param_ec_codec_get_shm_addr {
+ uint8_t shm_id;
+ uint8_t reserved[3];
+};
+
+struct __ec_align4 ec_param_ec_codec_set_shm_addr {
+ uint64_t phys_addr;
+ uint32_t len;
+ uint8_t shm_id;
+ uint8_t reserved[3];
+};
+
+struct __ec_align4 ec_param_ec_codec {
+ uint8_t cmd; /* enum ec_codec_subcmd */
+ uint8_t reserved[3];
+
+ union {
+ struct ec_param_ec_codec_get_shm_addr
+ get_shm_addr_param;
+ struct ec_param_ec_codec_set_shm_addr
+ set_shm_addr_param;
+ };
+};
+
+struct __ec_align4 ec_response_ec_codec_get_capabilities {
+ uint32_t capabilities;
+};
+
+struct __ec_align4 ec_response_ec_codec_get_shm_addr {
+ uint64_t phys_addr;
+ uint32_t len;
+ uint8_t type;
+ uint8_t reserved[3];
+};
+
+/*****************************************************************************/
+
+/* Commands for DMIC on audio codec. */
+#define EC_CMD_EC_CODEC_DMIC 0x00BD
+
+enum ec_codec_dmic_subcmd {
+ EC_CODEC_DMIC_GET_MAX_GAIN = 0x0,
+ EC_CODEC_DMIC_SET_GAIN_IDX = 0x1,
+ EC_CODEC_DMIC_GET_GAIN_IDX = 0x2,
+ EC_CODEC_DMIC_SUBCMD_COUNT,
+};
+
+enum ec_codec_dmic_channel {
+ EC_CODEC_DMIC_CHANNEL_0 = 0x0,
+ EC_CODEC_DMIC_CHANNEL_1 = 0x1,
+ EC_CODEC_DMIC_CHANNEL_2 = 0x2,
+ EC_CODEC_DMIC_CHANNEL_3 = 0x3,
+ EC_CODEC_DMIC_CHANNEL_4 = 0x4,
+ EC_CODEC_DMIC_CHANNEL_5 = 0x5,
+ EC_CODEC_DMIC_CHANNEL_6 = 0x6,
+ EC_CODEC_DMIC_CHANNEL_7 = 0x7,
+ EC_CODEC_DMIC_CHANNEL_COUNT,
+};
+
+struct __ec_align1 ec_param_ec_codec_dmic_set_gain_idx {
+ uint8_t channel; /* enum ec_codec_dmic_channel */
+ uint8_t gain;
+ uint8_t reserved[2];
+};
+
+struct __ec_align1 ec_param_ec_codec_dmic_get_gain_idx {
+ uint8_t channel; /* enum ec_codec_dmic_channel */
+ uint8_t reserved[3];
+};
+
+struct __ec_align4 ec_param_ec_codec_dmic {
+ uint8_t cmd; /* enum ec_codec_dmic_subcmd */
+ uint8_t reserved[3];
+
+ union {
+ struct ec_param_ec_codec_dmic_set_gain_idx
+ set_gain_idx_param;
+ struct ec_param_ec_codec_dmic_get_gain_idx
+ get_gain_idx_param;
+ };
+};
+
+struct __ec_align1 ec_response_ec_codec_dmic_get_max_gain {
+ uint8_t max_gain;
+};
+
+struct __ec_align1 ec_response_ec_codec_dmic_get_gain_idx {
+ uint8_t gain;
+};
+
+/*****************************************************************************/
+
+/* Commands for I2S RX on audio codec. */
+
+#define EC_CMD_EC_CODEC_I2S_RX 0x00BE
+
+enum ec_codec_i2s_rx_subcmd {
+ EC_CODEC_I2S_RX_ENABLE = 0x0,
+ EC_CODEC_I2S_RX_DISABLE = 0x1,
+ EC_CODEC_I2S_RX_SET_SAMPLE_DEPTH = 0x2,
+ EC_CODEC_I2S_RX_SET_DAIFMT = 0x3,
+ EC_CODEC_I2S_RX_SET_BCLK = 0x4,
+ EC_CODEC_I2S_RX_SUBCMD_COUNT,
+};
+
+enum ec_codec_i2s_rx_sample_depth {
+ EC_CODEC_I2S_RX_SAMPLE_DEPTH_16 = 0x0,
+ EC_CODEC_I2S_RX_SAMPLE_DEPTH_24 = 0x1,
+ EC_CODEC_I2S_RX_SAMPLE_DEPTH_COUNT,
+};
+
+enum ec_codec_i2s_rx_daifmt {
+ EC_CODEC_I2S_RX_DAIFMT_I2S = 0x0,
+ EC_CODEC_I2S_RX_DAIFMT_RIGHT_J = 0x1,
+ EC_CODEC_I2S_RX_DAIFMT_LEFT_J = 0x2,
+ EC_CODEC_I2S_RX_DAIFMT_COUNT,
+};
+
+struct __ec_align1 ec_param_ec_codec_i2s_rx_set_sample_depth {
+ uint8_t depth;
+ uint8_t reserved[3];
+};
+
+struct __ec_align1 ec_param_ec_codec_i2s_rx_set_gain {
uint8_t left;
uint8_t right;
+ uint8_t reserved[2];
};
-struct __ec_todo_unpacked ec_param_codec_i2s_tdm {
- int16_t ch0_delay; /* 0 to 496 */
- int16_t ch1_delay; /* -1 to 496 */
- uint8_t adjacent_to_ch0;
- uint8_t adjacent_to_ch1;
+struct __ec_align1 ec_param_ec_codec_i2s_rx_set_daifmt {
+ uint8_t daifmt;
+ uint8_t reserved[3];
};
-struct __ec_todo_packed ec_param_codec_i2s {
- /* enum ec_codec_i2s_subcmd */
- uint8_t cmd;
+struct __ec_align4 ec_param_ec_codec_i2s_rx_set_bclk {
+ uint32_t bclk;
+};
+
+struct __ec_align4 ec_param_ec_codec_i2s_rx {
+ uint8_t cmd; /* enum ec_codec_i2s_rx_subcmd */
+ uint8_t reserved[3];
+
union {
- /*
- * EC_CODEC_SET_SAMPLE_DEPTH
- * Value should be one of ec_sample_depth_value.
- */
- uint8_t depth;
+ struct ec_param_ec_codec_i2s_rx_set_sample_depth
+ set_sample_depth_param;
+ struct ec_param_ec_codec_i2s_rx_set_daifmt
+ set_daifmt_param;
+ struct ec_param_ec_codec_i2s_rx_set_bclk
+ set_bclk_param;
+ };
+};
- /*
- * EC_CODEC_SET_GAIN
- * Value should be 0~43 for both channels.
- */
- struct ec_codec_i2s_gain gain;
+/*****************************************************************************/
+/* Commands for WoV on audio codec. */
+
+#define EC_CMD_EC_CODEC_WOV 0x00BF
+
+enum ec_codec_wov_subcmd {
+ EC_CODEC_WOV_SET_LANG = 0x0,
+ EC_CODEC_WOV_SET_LANG_SHM = 0x1,
+ EC_CODEC_WOV_GET_LANG = 0x2,
+ EC_CODEC_WOV_ENABLE = 0x3,
+ EC_CODEC_WOV_DISABLE = 0x4,
+ EC_CODEC_WOV_READ_AUDIO = 0x5,
+ EC_CODEC_WOV_READ_AUDIO_SHM = 0x6,
+ EC_CODEC_WOV_SUBCMD_COUNT,
+};
- /*
- * EC_CODEC_I2S_ENABLE
- * 1 to enable, 0 to disable.
- */
- uint8_t i2s_enable;
+/*
+ * @hash is SHA256 of the whole language model.
+ * @total_len indicates the length of whole language model.
+ * @offset is the cursor from the beginning of the model.
+ * @buf is the packet buffer.
+ * @len denotes how many bytes in the buf.
+ */
+struct __ec_align4 ec_param_ec_codec_wov_set_lang {
+ uint8_t hash[32];
+ uint32_t total_len;
+ uint32_t offset;
+ uint8_t buf[128];
+ uint32_t len;
+};
- /*
- * EC_CODEC_I2S_SET_CONFIG
- * Value should be one of ec_i2s_config.
- */
- uint8_t i2s_config;
+struct __ec_align4 ec_param_ec_codec_wov_set_lang_shm {
+ uint8_t hash[32];
+ uint32_t total_len;
+};
- /*
- * EC_CODEC_I2S_SET_TDM_CONFIG
- * Value should be one of ec_i2s_config.
- */
- struct ec_param_codec_i2s_tdm tdm_param;
+struct __ec_align4 ec_param_ec_codec_wov {
+ uint8_t cmd; /* enum ec_codec_wov_subcmd */
+ uint8_t reserved[3];
- /*
- * EC_CODEC_I2S_SET_BCLK
- */
- uint32_t bclk;
+ union {
+ struct ec_param_ec_codec_wov_set_lang
+ set_lang_param;
+ struct ec_param_ec_codec_wov_set_lang_shm
+ set_lang_shm_param;
};
};
+struct __ec_align4 ec_response_ec_codec_wov_get_lang {
+ uint8_t hash[32];
+};
+
+struct __ec_align4 ec_response_ec_codec_wov_read_audio {
+ uint8_t buf[128];
+ uint32_t len;
+};
+
+struct __ec_align4 ec_response_ec_codec_wov_read_audio_shm {
+ uint32_t offset;
+ uint32_t len;
+};
/*****************************************************************************/
/* System commands */
diff --git a/include/linux/platform_data/cros_ec_proto.h b/include/linux/platform_data/cros_ec_proto.h
index eab7036cda09..30098a551523 100644
--- a/include/linux/platform_data/cros_ec_proto.h
+++ b/include/linux/platform_data/cros_ec_proto.h
@@ -12,6 +12,7 @@
#include <linux/mutex.h>
#include <linux/notifier.h>
+#include <linux/mfd/cros_ec.h>
#include <linux/platform_data/cros_ec_commands.h>
#define CROS_EC_DEV_NAME "cros_ec"
@@ -115,12 +116,16 @@ struct cros_ec_command {
* code.
* @pkt_xfer: Send packet to EC and get response.
* @lock: One transaction at a time.
- * @mkbp_event_supported: True if this EC supports the MKBP event protocol.
+ * @mkbp_event_supported: 0 if MKBP not supported. Otherwise its value is
+ * the maximum supported version of the MKBP host event
+ * command + 1.
* @host_sleep_v1: True if this EC supports the sleep v1 command.
* @event_notifier: Interrupt event notifier for transport devices.
* @event_data: Raw payload transferred with the MKBP event.
* @event_size: Size in bytes of the event data.
* @host_event_wake_mask: Mask of host events that cause wake from suspend.
+ * @last_event_time: exact time from the hard irq when we got notified of
+ * a new event.
* @ec: The platform_device used by the mfd driver to interface with the
* main EC.
* @pd: The platform_device used by the mfd driver to interface with the
@@ -153,7 +158,7 @@ struct cros_ec_device {
int (*pkt_xfer)(struct cros_ec_device *ec,
struct cros_ec_command *msg);
struct mutex lock;
- bool mkbp_event_supported;
+ u8 mkbp_event_supported;
bool host_sleep_v1;
struct blocking_notifier_head event_notifier;
@@ -161,6 +166,7 @@ struct cros_ec_device {
int event_size;
u32 host_event_wake_mask;
u32 last_resume_result;
+ ktime_t last_event_time;
/* The platform devices used by the mfd driver */
struct platform_device *ec;
@@ -168,14 +174,6 @@ struct cros_ec_device {
};
/**
- * struct cros_ec_sensor_platform - ChromeOS EC sensor platform information.
- * @sensor_num: Id of the sensor, as reported by the EC.
- */
-struct cros_ec_sensor_platform {
- u8 sensor_num;
-};
-
-/**
* struct cros_ec_platform - ChromeOS EC platform information.
* @ec_name: Name of EC device (e.g. 'cros-ec', 'cros-pd', ...)
* used in /dev/ and sysfs.
@@ -187,133 +185,51 @@ struct cros_ec_platform {
u16 cmd_offset;
};
-/**
- * cros_ec_suspend() - Handle a suspend operation for the ChromeOS EC device.
- * @ec_dev: Device to suspend.
- *
- * This can be called by drivers to handle a suspend event.
- *
- * Return: 0 on success or negative error code.
- */
int cros_ec_suspend(struct cros_ec_device *ec_dev);
-/**
- * cros_ec_resume() - Handle a resume operation for the ChromeOS EC device.
- * @ec_dev: Device to resume.
- *
- * This can be called by drivers to handle a resume event.
- *
- * Return: 0 on success or negative error code.
- */
int cros_ec_resume(struct cros_ec_device *ec_dev);
-/**
- * cros_ec_prepare_tx() - Prepare an outgoing message in the output buffer.
- * @ec_dev: Device to register.
- * @msg: Message to write.
- *
- * This is intended to be used by all ChromeOS EC drivers, but at present
- * only SPI uses it. Once LPC uses the same protocol it can start using it.
- * I2C could use it now, with a refactor of the existing code.
- *
- * Return: 0 on success or negative error code.
- */
int cros_ec_prepare_tx(struct cros_ec_device *ec_dev,
struct cros_ec_command *msg);
-/**
- * cros_ec_check_result() - Check ec_msg->result.
- * @ec_dev: EC device.
- * @msg: Message to check.
- *
- * This is used by ChromeOS EC drivers to check the ec_msg->result for
- * errors and to warn about them.
- *
- * Return: 0 on success or negative error code.
- */
int cros_ec_check_result(struct cros_ec_device *ec_dev,
struct cros_ec_command *msg);
-/**
- * cros_ec_cmd_xfer() - Send a command to the ChromeOS EC.
- * @ec_dev: EC device.
- * @msg: Message to write.
- *
- * Call this to send a command to the ChromeOS EC. This should be used
- * instead of calling the EC's cmd_xfer() callback directly.
- *
- * Return: 0 on success or negative error code.
- */
int cros_ec_cmd_xfer(struct cros_ec_device *ec_dev,
struct cros_ec_command *msg);
-/**
- * cros_ec_cmd_xfer_status() - Send a command to the ChromeOS EC.
- * @ec_dev: EC device.
- * @msg: Message to write.
- *
- * This function is identical to cros_ec_cmd_xfer, except it returns success
- * status only if both the command was transmitted successfully and the EC
- * replied with success status. It's not necessary to check msg->result when
- * using this function.
- *
- * Return: The number of bytes transferred on success or negative error code.
- */
int cros_ec_cmd_xfer_status(struct cros_ec_device *ec_dev,
struct cros_ec_command *msg);
-/**
- * cros_ec_register() - Register a new ChromeOS EC, using the provided info.
- * @ec_dev: Device to register.
- *
- * Before calling this, allocate a pointer to a new device and then fill
- * in all the fields up to the --private-- marker.
- *
- * Return: 0 on success or negative error code.
- */
int cros_ec_register(struct cros_ec_device *ec_dev);
-/**
- * cros_ec_unregister() - Remove a ChromeOS EC.
- * @ec_dev: Device to unregister.
- *
- * Call this to deregister a ChromeOS EC, then clean up any private data.
- *
- * Return: 0 on success or negative error code.
- */
int cros_ec_unregister(struct cros_ec_device *ec_dev);
-/**
- * cros_ec_query_all() - Query the protocol version supported by the
- * ChromeOS EC.
- * @ec_dev: Device to register.
- *
- * Return: 0 on success or negative error code.
- */
int cros_ec_query_all(struct cros_ec_device *ec_dev);
-/**
- * cros_ec_get_next_event() - Fetch next event from the ChromeOS EC.
- * @ec_dev: Device to fetch event from.
- * @wake_event: Pointer to a bool set to true upon return if the event might be
- * treated as a wake event. Ignored if null.
- *
- * Return: negative error code on errors; 0 for no data; or else number of
- * bytes received (i.e., an event was retrieved successfully). Event types are
- * written out to @ec_dev->event_data.event_type on success.
- */
-int cros_ec_get_next_event(struct cros_ec_device *ec_dev, bool *wake_event);
+int cros_ec_get_next_event(struct cros_ec_device *ec_dev,
+ bool *wake_event,
+ bool *has_more_events);
+
+u32 cros_ec_get_host_event(struct cros_ec_device *ec_dev);
+
+int cros_ec_check_features(struct cros_ec_dev *ec, int feature);
+
+int cros_ec_get_sensor_count(struct cros_ec_dev *ec);
+
+bool cros_ec_handle_event(struct cros_ec_device *ec_dev);
/**
- * cros_ec_get_host_event() - Return a mask of event set by the ChromeOS EC.
- * @ec_dev: Device to fetch event from.
+ * cros_ec_get_time_ns() - Return time in ns.
*
- * When MKBP is supported, when the EC raises an interrupt, we collect the
- * events raised and call the functions in the ec notifier. This function
- * is a helper to know which events are raised.
+ * This is the function used to record the time for last_event_time in struct
+ * cros_ec_device during the hard irq.
*
- * Return: 0 on error or non-zero bitmask of one or more EC_HOST_EVENT_*.
+ * Return: ktime_t format since boot.
*/
-u32 cros_ec_get_host_event(struct cros_ec_device *ec_dev);
+static inline ktime_t cros_ec_get_time_ns(void)
+{
+ return ktime_get_boottime_ns();
+}
#endif /* __LINUX_CROS_EC_PROTO_H */
diff --git a/include/linux/platform_data/cros_ec_sensorhub.h b/include/linux/platform_data/cros_ec_sensorhub.h
new file mode 100644
index 000000000000..bef7ffc7fce1
--- /dev/null
+++ b/include/linux/platform_data/cros_ec_sensorhub.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Chrome OS EC MEMS Sensor Hub driver.
+ *
+ * Copyright 2019 Google LLC
+ */
+
+#ifndef __LINUX_PLATFORM_DATA_CROS_EC_SENSORHUB_H
+#define __LINUX_PLATFORM_DATA_CROS_EC_SENSORHUB_H
+
+#include <linux/platform_data/cros_ec_commands.h>
+
+/**
+ * struct cros_ec_sensor_platform - ChromeOS EC sensor platform information.
+ * @sensor_num: Id of the sensor, as reported by the EC.
+ */
+struct cros_ec_sensor_platform {
+ u8 sensor_num;
+};
+
+/**
+ * struct cros_ec_sensorhub - Sensor Hub device data.
+ *
+ * @ec: Embedded Controller where the hub is located.
+ */
+struct cros_ec_sensorhub {
+ struct cros_ec_dev *ec;
+};
+
+#endif /* __LINUX_PLATFORM_DATA_CROS_EC_SENSORHUB_H */
diff --git a/include/linux/platform_data/gpio_backlight.h b/include/linux/platform_data/gpio_backlight.h
index 34179d600360..1a8b5b1946fe 100644
--- a/include/linux/platform_data/gpio_backlight.h
+++ b/include/linux/platform_data/gpio_backlight.h
@@ -9,9 +9,6 @@ struct device;
struct gpio_backlight_platform_data {
struct device *fbdev;
- int gpio;
- int def_value;
- const char *name;
};
#endif
diff --git a/include/linux/platform_data/hsmmc-omap.h b/include/linux/platform_data/hsmmc-omap.h
index e79d238ff18f..7124a5f4bf06 100644
--- a/include/linux/platform_data/hsmmc-omap.h
+++ b/include/linux/platform_data/hsmmc-omap.h
@@ -67,9 +67,6 @@ struct omap_hsmmc_platform_data {
/* string specifying a particular variant of hardware */
char *version;
- /* if we have special card, init it using this callback */
- void (*init_card)(struct mmc_card *card);
-
const char *name;
u32 ocr_mask;
};
diff --git a/include/linux/platform_data/i2c-pxa.h b/include/linux/platform_data/i2c-pxa.h
index cb290092599c..6a9b28399b39 100644
--- a/include/linux/platform_data/i2c-pxa.h
+++ b/include/linux/platform_data/i2c-pxa.h
@@ -55,11 +55,7 @@
*/
#define I2C_ISR_INIT 0x7FF /* status register init */
-struct i2c_slave_client;
-
struct i2c_pxa_platform_data {
- unsigned int slave_addr;
- struct i2c_slave_client *slave;
unsigned int class;
unsigned int use_pio :1;
unsigned int fast_mode :1;
diff --git a/include/linux/platform_data/pixcir_i2c_ts.h b/include/linux/platform_data/pixcir_i2c_ts.h
deleted file mode 100644
index 4ab3cd6f1cc2..000000000000
--- a/include/linux/platform_data/pixcir_i2c_ts.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _PIXCIR_I2C_TS_H
-#define _PIXCIR_I2C_TS_H
-
-/*
- * Register map
- */
-#define PIXCIR_REG_POWER_MODE 51
-#define PIXCIR_REG_INT_MODE 52
-
-/*
- * Power modes:
- * active: max scan speed
- * idle: lower scan speed with automatic transition to active on touch
- * halt: datasheet says sleep but this is more like halt as the chip
- * clocks are cut and it can only be brought out of this mode
- * using the RESET pin.
- */
-enum pixcir_power_mode {
- PIXCIR_POWER_ACTIVE,
- PIXCIR_POWER_IDLE,
- PIXCIR_POWER_HALT,
-};
-
-#define PIXCIR_POWER_MODE_MASK 0x03
-#define PIXCIR_POWER_ALLOW_IDLE (1UL << 2)
-
-/*
- * Interrupt modes:
- * periodical: interrupt is asserted periodicaly
- * diff coordinates: interrupt is asserted when coordinates change
- * level on touch: interrupt level asserted during touch
- * pulse on touch: interrupt pulse asserted druing touch
- *
- */
-enum pixcir_int_mode {
- PIXCIR_INT_PERIODICAL,
- PIXCIR_INT_DIFF_COORD,
- PIXCIR_INT_LEVEL_TOUCH,
- PIXCIR_INT_PULSE_TOUCH,
-};
-
-#define PIXCIR_INT_MODE_MASK 0x03
-#define PIXCIR_INT_ENABLE (1UL << 3)
-#define PIXCIR_INT_POL_HIGH (1UL << 2)
-
-/**
- * struct pixcir_irc_chip_data - chip related data
- * @max_fingers: Max number of fingers reported simultaneously by h/w
- * @has_hw_ids: Hardware supports finger tracking IDs
- *
- */
-struct pixcir_i2c_chip_data {
- u8 max_fingers;
- bool has_hw_ids;
-};
-
-struct pixcir_ts_platform_data {
- int x_max;
- int y_max;
- struct pixcir_i2c_chip_data chip;
-};
-
-#endif
diff --git a/include/linux/platform_data/st_sensors_pdata.h b/include/linux/platform_data/st_sensors_pdata.h
index 30929c22227d..e40b28ca892e 100644
--- a/include/linux/platform_data/st_sensors_pdata.h
+++ b/include/linux/platform_data/st_sensors_pdata.h
@@ -18,12 +18,14 @@
* @open_drain: set the interrupt line to be open drain if possible.
* @spi_3wire: enable spi-3wire mode.
* @pullups: enable/disable i2c controller pullup resistors.
+ * @wakeup_source: enable/disable device as wakeup generator.
*/
struct st_sensors_platform_data {
u8 drdy_int_pin;
bool open_drain;
bool spi_3wire;
bool pullups;
+ bool wakeup_source;
};
#endif /* ST_SENSORS_PDATA_H */
diff --git a/include/linux/platform_data/wilco-ec.h b/include/linux/platform_data/wilco-ec.h
index ad03b586a095..afede15a95bf 100644
--- a/include/linux/platform_data/wilco-ec.h
+++ b/include/linux/platform_data/wilco-ec.h
@@ -29,6 +29,7 @@
* @data_size: Size of the data buffer used for EC communication.
* @debugfs_pdev: The child platform_device used by the debugfs sub-driver.
* @rtc_pdev: The child platform_device used by the RTC sub-driver.
+ * @charger_pdev: Child platform_device used by the charger config sub-driver.
* @telem_pdev: The child platform_device used by the telemetry sub-driver.
*/
struct wilco_ec_device {
@@ -41,6 +42,7 @@ struct wilco_ec_device {
size_t data_size;
struct platform_device *debugfs_pdev;
struct platform_device *rtc_pdev;
+ struct platform_device *charger_pdev;
struct platform_device *telem_pdev;
};
@@ -120,6 +122,19 @@ struct wilco_ec_message {
*/
int wilco_ec_mailbox(struct wilco_ec_device *ec, struct wilco_ec_message *msg);
+/**
+ * wilco_keyboard_leds_init() - Set up the keyboard backlight LEDs.
+ * @ec: EC device to query.
+ *
+ * After this call, the keyboard backlight will be exposed through a an LED
+ * device at /sys/class/leds.
+ *
+ * This may sleep because it uses wilco_ec_mailbox().
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int wilco_keyboard_leds_init(struct wilco_ec_device *ec);
+
/*
* A Property is typically a data item that is stored to NVRAM
* by the EC. Each of these data items has an index associated
diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h
index f2688404d1cd..276a03c24691 100644
--- a/include/linux/platform_device.h
+++ b/include/linux/platform_device.h
@@ -57,6 +57,12 @@ platform_find_device_by_driver(struct device *start,
extern void __iomem *
devm_platform_ioremap_resource(struct platform_device *pdev,
unsigned int index);
+extern void __iomem *
+devm_platform_ioremap_resource_wc(struct platform_device *pdev,
+ unsigned int index);
+extern void __iomem *
+devm_platform_ioremap_resource_byname(struct platform_device *pdev,
+ const char *name);
extern int platform_get_irq(struct platform_device *, unsigned int);
extern int platform_get_irq_optional(struct platform_device *, unsigned int);
extern int platform_irq_count(struct platform_device *);
@@ -294,58 +300,6 @@ void platform_unregister_drivers(struct platform_driver * const *drivers,
#define platform_register_drivers(drivers, count) \
__platform_register_drivers(drivers, count, THIS_MODULE)
-/* early platform driver interface */
-struct early_platform_driver {
- const char *class_str;
- struct platform_driver *pdrv;
- struct list_head list;
- int requested_id;
- char *buffer;
- int bufsize;
-};
-
-#define EARLY_PLATFORM_ID_UNSET -2
-#define EARLY_PLATFORM_ID_ERROR -3
-
-extern int early_platform_driver_register(struct early_platform_driver *epdrv,
- char *buf);
-extern void early_platform_add_devices(struct platform_device **devs, int num);
-
-static inline int is_early_platform_device(struct platform_device *pdev)
-{
- return !pdev->dev.driver;
-}
-
-extern void early_platform_driver_register_all(char *class_str);
-extern int early_platform_driver_probe(char *class_str,
- int nr_probe, int user_only);
-extern void early_platform_cleanup(void);
-
-#define early_platform_init(class_string, platdrv) \
- early_platform_init_buffer(class_string, platdrv, NULL, 0)
-
-#ifndef MODULE
-#define early_platform_init_buffer(class_string, platdrv, buf, bufsiz) \
-static __initdata struct early_platform_driver early_driver = { \
- .class_str = class_string, \
- .buffer = buf, \
- .bufsize = bufsiz, \
- .pdrv = platdrv, \
- .requested_id = EARLY_PLATFORM_ID_UNSET, \
-}; \
-static int __init early_platform_driver_setup_func(char *buffer) \
-{ \
- return early_platform_driver_register(&early_driver, buffer); \
-} \
-early_param(class_string, early_platform_driver_setup_func)
-#else /* MODULE */
-#define early_platform_init_buffer(class_string, platdrv, buf, bufsiz) \
-static inline char *early_platform_driver_setup_func(void) \
-{ \
- return bufsiz ? buf : NULL; \
-}
-#endif /* MODULE */
-
#ifdef CONFIG_SUSPEND
extern int platform_pm_suspend(struct device *dev);
extern int platform_pm_resume(struct device *dev);
@@ -380,4 +334,16 @@ extern int platform_dma_configure(struct device *dev);
#define USE_PLATFORM_PM_SLEEP_OPS
#endif
+#ifndef CONFIG_SUPERH
+/*
+ * REVISIT: This stub is needed for all non-SuperH users of early platform
+ * drivers. It should go away once we introduce the new platform_device-based
+ * early driver framework.
+ */
+static inline int is_sh_early_platform_device(struct platform_device *pdev)
+{
+ return 0;
+}
+#endif /* CONFIG_SUPERH */
+
#endif /* _PLATFORM_DEVICE_H_ */
diff --git a/include/linux/pm.h b/include/linux/pm.h
index 4c441be03079..e057d1fa2469 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -637,6 +637,7 @@ extern void dev_pm_put_subsys_data(struct device *dev);
* struct dev_pm_domain - power management domain representation.
*
* @ops: Power management operations associated with this domain.
+ * @start: Called when a user needs to start the device via the domain.
* @detach: Called when removing a device from the domain.
* @activate: Called before executing probe routines for bus types and drivers.
* @sync: Called after successful driver probe.
@@ -648,6 +649,7 @@ extern void dev_pm_put_subsys_data(struct device *dev);
*/
struct dev_pm_domain {
struct dev_pm_ops ops;
+ int (*start)(struct device *dev);
void (*detach)(struct device *dev, bool power_off);
int (*activate)(struct device *dev);
void (*sync)(struct device *dev);
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index baf02ff91a31..5a31c711b896 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -366,6 +366,7 @@ struct device *dev_pm_domain_attach_by_id(struct device *dev,
struct device *dev_pm_domain_attach_by_name(struct device *dev,
const char *name);
void dev_pm_domain_detach(struct device *dev, bool power_off);
+int dev_pm_domain_start(struct device *dev);
void dev_pm_domain_set(struct device *dev, struct dev_pm_domain *pd);
#else
static inline int dev_pm_domain_attach(struct device *dev, bool power_on)
@@ -383,6 +384,10 @@ static inline struct device *dev_pm_domain_attach_by_name(struct device *dev,
return NULL;
}
static inline void dev_pm_domain_detach(struct device *dev, bool power_off) {}
+static inline int dev_pm_domain_start(struct device *dev)
+{
+ return 0;
+}
static inline void dev_pm_domain_set(struct device *dev,
struct dev_pm_domain *pd) {}
#endif
diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h
index b8197ab014f2..747861816f4f 100644
--- a/include/linux/pm_opp.h
+++ b/include/linux/pm_opp.h
@@ -22,6 +22,7 @@ struct opp_table;
enum dev_pm_opp_event {
OPP_EVENT_ADD, OPP_EVENT_REMOVE, OPP_EVENT_ENABLE, OPP_EVENT_DISABLE,
+ OPP_EVENT_ADJUST_VOLTAGE,
};
/**
@@ -113,6 +114,10 @@ int dev_pm_opp_add(struct device *dev, unsigned long freq,
void dev_pm_opp_remove(struct device *dev, unsigned long freq);
void dev_pm_opp_remove_all_dynamic(struct device *dev);
+int dev_pm_opp_adjust_voltage(struct device *dev, unsigned long freq,
+ unsigned long u_volt, unsigned long u_volt_min,
+ unsigned long u_volt_max);
+
int dev_pm_opp_enable(struct device *dev, unsigned long freq);
int dev_pm_opp_disable(struct device *dev, unsigned long freq);
@@ -242,6 +247,14 @@ static inline void dev_pm_opp_remove_all_dynamic(struct device *dev)
{
}
+static inline int
+dev_pm_opp_adjust_voltage(struct device *dev, unsigned long freq,
+ unsigned long u_volt, unsigned long u_volt_min,
+ unsigned long u_volt_max)
+{
+ return 0;
+}
+
static inline int dev_pm_opp_enable(struct device *dev, unsigned long freq)
{
return 0;
diff --git a/include/linux/power/smartreflex.h b/include/linux/power/smartreflex.h
index d0b37e937037..971c9264179e 100644
--- a/include/linux/power/smartreflex.h
+++ b/include/linux/power/smartreflex.h
@@ -293,6 +293,9 @@ struct omap_sr_data {
struct voltagedomain *voltdm;
};
+
+extern struct omap_sr_data omap_sr_pdata[OMAP_SR_NR];
+
#ifdef CONFIG_POWER_AVS_OMAP
/* Smartreflex module enable/disable interface */
diff --git a/include/linux/property.h b/include/linux/property.h
index 9b3d4ca3a73a..48335288c2a9 100644
--- a/include/linux/property.h
+++ b/include/linux/property.h
@@ -22,7 +22,6 @@ enum dev_prop_type {
DEV_PROP_U32,
DEV_PROP_U64,
DEV_PROP_STRING,
- DEV_PROP_MAX,
};
enum dev_dma_attr {
@@ -80,9 +79,14 @@ struct fwnode_handle *fwnode_find_reference(const struct fwnode_handle *fwnode,
const char *name,
unsigned int index);
+const char *fwnode_get_name(const struct fwnode_handle *fwnode);
+const char *fwnode_get_name_prefix(const struct fwnode_handle *fwnode);
struct fwnode_handle *fwnode_get_parent(const struct fwnode_handle *fwnode);
struct fwnode_handle *fwnode_get_next_parent(
struct fwnode_handle *fwnode);
+unsigned int fwnode_count_parents(const struct fwnode_handle *fwn);
+struct fwnode_handle *fwnode_get_nth_parent(struct fwnode_handle *fwn,
+ unsigned int depth);
struct fwnode_handle *fwnode_get_next_child_node(
const struct fwnode_handle *fwnode, struct fwnode_handle *child);
struct fwnode_handle *fwnode_get_next_available_child_node(
@@ -234,13 +238,7 @@ struct property_entry {
bool is_array;
enum dev_prop_type type;
union {
- union {
- const u8 *u8_data;
- const u16 *u16_data;
- const u32 *u32_data;
- const u64 *u64_data;
- const char * const *str;
- } pointer;
+ const void *pointer;
union {
u8 u8_data;
u16 u16_data;
@@ -252,62 +250,63 @@ struct property_entry {
};
/*
- * Note: the below four initializers for the anonymous union are carefully
+ * Note: the below initializers for the anonymous union are carefully
* crafted to avoid gcc-4.4.4's problems with initialization of anon unions
* and structs.
*/
-#define PROPERTY_ENTRY_INTEGER_ARRAY(_name_, _type_, _Type_, _val_) \
+#define __PROPERTY_ENTRY_ELEMENT_SIZE(_elem_) \
+ sizeof(((struct property_entry *)NULL)->value._elem_)
+
+#define __PROPERTY_ENTRY_ARRAY_LEN(_name_, _elem_, _Type_, _val_, _len_)\
(struct property_entry) { \
.name = _name_, \
- .length = ARRAY_SIZE(_val_) * sizeof(_type_), \
+ .length = (_len_) * __PROPERTY_ENTRY_ELEMENT_SIZE(_elem_), \
.is_array = true, \
.type = DEV_PROP_##_Type_, \
- { .pointer = { ._type_##_data = _val_ } }, \
+ { .pointer = _val_ }, \
}
-#define PROPERTY_ENTRY_U8_ARRAY(_name_, _val_) \
- PROPERTY_ENTRY_INTEGER_ARRAY(_name_, u8, U8, _val_)
-#define PROPERTY_ENTRY_U16_ARRAY(_name_, _val_) \
- PROPERTY_ENTRY_INTEGER_ARRAY(_name_, u16, U16, _val_)
-#define PROPERTY_ENTRY_U32_ARRAY(_name_, _val_) \
- PROPERTY_ENTRY_INTEGER_ARRAY(_name_, u32, U32, _val_)
-#define PROPERTY_ENTRY_U64_ARRAY(_name_, _val_) \
- PROPERTY_ENTRY_INTEGER_ARRAY(_name_, u64, U64, _val_)
-
-#define PROPERTY_ENTRY_STRING_ARRAY(_name_, _val_) \
-(struct property_entry) { \
- .name = _name_, \
- .length = ARRAY_SIZE(_val_) * sizeof(const char *), \
- .is_array = true, \
- .type = DEV_PROP_STRING, \
- { .pointer = { .str = _val_ } }, \
-}
-
-#define PROPERTY_ENTRY_INTEGER(_name_, _type_, _Type_, _val_) \
-(struct property_entry) { \
- .name = _name_, \
- .length = sizeof(_type_), \
- .type = DEV_PROP_##_Type_, \
- { .value = { ._type_##_data = _val_ } }, \
+#define PROPERTY_ENTRY_U8_ARRAY_LEN(_name_, _val_, _len_) \
+ __PROPERTY_ENTRY_ARRAY_LEN(_name_, u8_data, U8, _val_, _len_)
+#define PROPERTY_ENTRY_U16_ARRAY_LEN(_name_, _val_, _len_) \
+ __PROPERTY_ENTRY_ARRAY_LEN(_name_, u16_data, U16, _val_, _len_)
+#define PROPERTY_ENTRY_U32_ARRAY_LEN(_name_, _val_, _len_) \
+ __PROPERTY_ENTRY_ARRAY_LEN(_name_, u32_data, U32, _val_, _len_)
+#define PROPERTY_ENTRY_U64_ARRAY_LEN(_name_, _val_, _len_) \
+ __PROPERTY_ENTRY_ARRAY_LEN(_name_, u64_data, U64, _val_, _len_)
+#define PROPERTY_ENTRY_STRING_ARRAY_LEN(_name_, _val_, _len_) \
+ __PROPERTY_ENTRY_ARRAY_LEN(_name_, str, STRING, _val_, _len_)
+
+#define PROPERTY_ENTRY_U8_ARRAY(_name_, _val_) \
+ PROPERTY_ENTRY_U8_ARRAY_LEN(_name_, _val_, ARRAY_SIZE(_val_))
+#define PROPERTY_ENTRY_U16_ARRAY(_name_, _val_) \
+ PROPERTY_ENTRY_U16_ARRAY_LEN(_name_, _val_, ARRAY_SIZE(_val_))
+#define PROPERTY_ENTRY_U32_ARRAY(_name_, _val_) \
+ PROPERTY_ENTRY_U32_ARRAY_LEN(_name_, _val_, ARRAY_SIZE(_val_))
+#define PROPERTY_ENTRY_U64_ARRAY(_name_, _val_) \
+ PROPERTY_ENTRY_U64_ARRAY_LEN(_name_, _val_, ARRAY_SIZE(_val_))
+#define PROPERTY_ENTRY_STRING_ARRAY(_name_, _val_) \
+ PROPERTY_ENTRY_STRING_ARRAY_LEN(_name_, _val_, ARRAY_SIZE(_val_))
+
+#define __PROPERTY_ENTRY_ELEMENT(_name_, _elem_, _Type_, _val_) \
+(struct property_entry) { \
+ .name = _name_, \
+ .length = __PROPERTY_ENTRY_ELEMENT_SIZE(_elem_), \
+ .type = DEV_PROP_##_Type_, \
+ { .value = { ._elem_ = _val_ } }, \
}
-#define PROPERTY_ENTRY_U8(_name_, _val_) \
- PROPERTY_ENTRY_INTEGER(_name_, u8, U8, _val_)
-#define PROPERTY_ENTRY_U16(_name_, _val_) \
- PROPERTY_ENTRY_INTEGER(_name_, u16, U16, _val_)
-#define PROPERTY_ENTRY_U32(_name_, _val_) \
- PROPERTY_ENTRY_INTEGER(_name_, u32, U32, _val_)
-#define PROPERTY_ENTRY_U64(_name_, _val_) \
- PROPERTY_ENTRY_INTEGER(_name_, u64, U64, _val_)
-
-#define PROPERTY_ENTRY_STRING(_name_, _val_) \
-(struct property_entry) { \
- .name = _name_, \
- .length = sizeof(const char *), \
- .type = DEV_PROP_STRING, \
- { .value = { .str = _val_ } }, \
-}
+#define PROPERTY_ENTRY_U8(_name_, _val_) \
+ __PROPERTY_ENTRY_ELEMENT(_name_, u8_data, U8, _val_)
+#define PROPERTY_ENTRY_U16(_name_, _val_) \
+ __PROPERTY_ENTRY_ELEMENT(_name_, u16_data, U16, _val_)
+#define PROPERTY_ENTRY_U32(_name_, _val_) \
+ __PROPERTY_ENTRY_ELEMENT(_name_, u32_data, U32, _val_)
+#define PROPERTY_ENTRY_U64(_name_, _val_) \
+ __PROPERTY_ENTRY_ELEMENT(_name_, u64_data, U64, _val_)
+#define PROPERTY_ENTRY_STRING(_name_, _val_) \
+ __PROPERTY_ENTRY_ELEMENT(_name_, str, STRING, _val_)
#define PROPERTY_ENTRY_BOOL(_name_) \
(struct property_entry) { \
@@ -418,7 +417,8 @@ struct software_node {
};
bool is_software_node(const struct fwnode_handle *fwnode);
-const struct software_node *to_software_node(struct fwnode_handle *fwnode);
+const struct software_node *
+to_software_node(const struct fwnode_handle *fwnode);
struct fwnode_handle *software_node_fwnode(const struct software_node *node);
const struct software_node *
diff --git a/include/linux/qcom_scm.h b/include/linux/qcom_scm.h
index 2d5eff506e13..ffd72b3b14ee 100644
--- a/include/linux/qcom_scm.h
+++ b/include/linux/qcom_scm.h
@@ -58,6 +58,7 @@ extern int qcom_scm_set_remote_state(u32 state, u32 id);
extern int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare);
extern int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size);
extern int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare);
+extern int qcom_scm_qsmmu500_wait_safe_toggle(bool en);
extern int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val);
extern int qcom_scm_io_writel(phys_addr_t addr, unsigned int val);
#else
@@ -97,6 +98,7 @@ qcom_scm_set_remote_state(u32 state,u32 id) { return -ENODEV; }
static inline int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare) { return -ENODEV; }
static inline int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size) { return -ENODEV; }
static inline int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare) { return -ENODEV; }
+static inline int qcom_scm_qsmmu500_wait_safe_toggle(bool en) { return -ENODEV; }
static inline int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val) { return -ENODEV; }
static inline int qcom_scm_io_writel(phys_addr_t addr, unsigned int val) { return -ENODEV; }
#endif
diff --git a/include/linux/quota.h b/include/linux/quota.h
index f32dd270b8e3..27aab84fcbaa 100644
--- a/include/linux/quota.h
+++ b/include/linux/quota.h
@@ -263,7 +263,7 @@ enum {
};
struct dqstats {
- int stat[_DQST_DQSTAT_LAST];
+ unsigned long stat[_DQST_DQSTAT_LAST];
struct percpu_counter counter[_DQST_DQSTAT_LAST];
};
diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h
index 185d94829701..9cf0cd3dc88c 100644
--- a/include/linux/quotaops.h
+++ b/include/linux/quotaops.h
@@ -54,6 +54,16 @@ static inline struct dquot *dqgrab(struct dquot *dquot)
atomic_inc(&dquot->dq_count);
return dquot;
}
+
+static inline bool dquot_is_busy(struct dquot *dquot)
+{
+ if (test_bit(DQ_MOD_B, &dquot->dq_flags))
+ return true;
+ if (atomic_read(&dquot->dq_count) > 1)
+ return true;
+ return false;
+}
+
void dqput(struct dquot *dquot);
int dquot_scan_active(struct super_block *sb,
int (*fn)(struct dquot *dquot, unsigned long priv),
@@ -87,7 +97,9 @@ int dquot_mark_dquot_dirty(struct dquot *dquot);
int dquot_file_open(struct inode *inode, struct file *file);
-int dquot_enable(struct inode *inode, int type, int format_id,
+int dquot_load_quota_sb(struct super_block *sb, int type, int format_id,
+ unsigned int flags);
+int dquot_load_quota_inode(struct inode *inode, int type, int format_id,
unsigned int flags);
int dquot_quota_on(struct super_block *sb, int type, int format_id,
const struct path *path);
diff --git a/include/linux/resource_ext.h b/include/linux/resource_ext.h
index 06da59b23b79..ff0339df56af 100644
--- a/include/linux/resource_ext.h
+++ b/include/linux/resource_ext.h
@@ -66,4 +66,16 @@ resource_list_destroy_entry(struct resource_entry *entry)
#define resource_list_for_each_entry_safe(entry, tmp, list) \
list_for_each_entry_safe((entry), (tmp), (list), node)
+static inline struct resource_entry *
+resource_list_first_type(struct list_head *list, unsigned long type)
+{
+ struct resource_entry *entry;
+
+ resource_list_for_each_entry(entry, list) {
+ if (resource_type(entry->res) == type)
+ return entry;
+ }
+ return NULL;
+}
+
#endif /* _LINUX_RESOURCE_EXT_H */
diff --git a/include/linux/rtc.h b/include/linux/rtc.h
index df666cf29ef1..4e9d3c71addb 100644
--- a/include/linux/rtc.h
+++ b/include/linux/rtc.h
@@ -159,11 +159,16 @@ struct rtc_device {
};
#define to_rtc_device(d) container_of(d, struct rtc_device, dev)
+#define rtc_lock(d) mutex_lock(&d->ops_lock)
+#define rtc_unlock(d) mutex_unlock(&d->ops_lock)
+
/* useful timestamps */
+#define RTC_TIMESTAMP_BEGIN_0000 -62167219200ULL /* 0000-01-01 00:00:00 */
#define RTC_TIMESTAMP_BEGIN_1900 -2208988800LL /* 1900-01-01 00:00:00 */
#define RTC_TIMESTAMP_BEGIN_2000 946684800LL /* 2000-01-01 00:00:00 */
#define RTC_TIMESTAMP_END_2063 2966371199LL /* 2063-12-31 23:59:59 */
#define RTC_TIMESTAMP_END_2099 4102444799LL /* 2099-12-31 23:59:59 */
+#define RTC_TIMESTAMP_END_2199 7258118399LL /* 2199-12-31 23:59:59 */
#define RTC_TIMESTAMP_END_9999 253402300799LL /* 9999-12-31 23:59:59 */
extern struct rtc_device *devm_rtc_device_register(struct device *dev,
diff --git a/include/linux/rtc/ds1685.h b/include/linux/rtc/ds1685.h
index 43aec568ba7c..67ee9d20cc5a 100644
--- a/include/linux/rtc/ds1685.h
+++ b/include/linux/rtc/ds1685.h
@@ -42,14 +42,11 @@
struct ds1685_priv {
struct rtc_device *dev;
void __iomem *regs;
+ void __iomem *data;
u32 regstep;
- resource_size_t baseaddr;
- size_t size;
int irq_num;
bool bcd_mode;
bool no_irq;
- bool uie_unsupported;
- bool alloc_io_resources;
u8 (*read)(struct ds1685_priv *, int);
void (*write)(struct ds1685_priv *, int, u8);
void (*prepare_poweroff)(void);
@@ -74,12 +71,13 @@ struct ds1685_rtc_platform_data {
const bool bcd_mode;
const bool no_irq;
const bool uie_unsupported;
- const bool alloc_io_resources;
- u8 (*plat_read)(struct ds1685_priv *, int);
- void (*plat_write)(struct ds1685_priv *, int, u8);
void (*plat_prepare_poweroff)(void);
void (*plat_wake_alarm)(void);
void (*plat_post_ram_clear)(void);
+ enum {
+ ds1685_reg_direct,
+ ds1685_reg_indirect
+ } access_type;
};
diff --git a/include/linux/rtsx_pci.h b/include/linux/rtsx_pci.h
index f87da30a58b1..65b8142a7fed 100644
--- a/include/linux/rtsx_pci.h
+++ b/include/linux/rtsx_pci.h
@@ -1262,6 +1262,7 @@ struct rtsx_pcr {
#define PID_5250 0x5250
#define PID_525A 0x525A
#define PID_5260 0x5260
+#define PID_5261 0x5261
#define CHK_PCI_PID(pcr, pid) ((pcr)->pci->device == (pid))
#define PCI_VID(pcr) ((pcr)->pci->vendor)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 07e68d9f5dc4..0cd97d9dd021 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -862,7 +862,7 @@ struct task_struct {
u64 start_time;
/* Boot based time in nsecs: */
- u64 real_start_time;
+ u64 start_boottime;
/* MM fault and swap info: this can arguably be seen as either mm-specific or thread-specific: */
unsigned long min_flt;
diff --git a/include/linux/seccomp.h b/include/linux/seccomp.h
index 84868d37b35d..03583b6d1416 100644
--- a/include/linux/seccomp.h
+++ b/include/linux/seccomp.h
@@ -33,10 +33,10 @@ struct seccomp {
#ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER
extern int __secure_computing(const struct seccomp_data *sd);
-static inline int secure_computing(const struct seccomp_data *sd)
+static inline int secure_computing(void)
{
if (unlikely(test_thread_flag(TIF_SECCOMP)))
- return __secure_computing(sd);
+ return __secure_computing(NULL);
return 0;
}
#else
@@ -59,7 +59,7 @@ struct seccomp { };
struct seccomp_filter { };
#ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER
-static inline int secure_computing(struct seccomp_data *sd) { return 0; }
+static inline int secure_computing(void) { return 0; }
#else
static inline void secure_computing_strict(int this_syscall) { return; }
#endif
diff --git a/include/linux/security.h b/include/linux/security.h
index 06ff66834501..3e8d4bacd59d 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -117,12 +117,14 @@ enum lockdown_reason {
LOCKDOWN_MODULE_PARAMETERS,
LOCKDOWN_MMIOTRACE,
LOCKDOWN_DEBUGFS,
+ LOCKDOWN_XMON_WR,
LOCKDOWN_INTEGRITY_MAX,
LOCKDOWN_KCORE,
LOCKDOWN_KPROBES,
LOCKDOWN_BPF_READ,
LOCKDOWN_PERF,
LOCKDOWN_TRACEFS,
+ LOCKDOWN_XMON_RW,
LOCKDOWN_CONFIDENTIALITY_MAX,
};
diff --git a/include/linux/seq_buf.h b/include/linux/seq_buf.h
index aa5deb041c25..fb0205d87d3c 100644
--- a/include/linux/seq_buf.h
+++ b/include/linux/seq_buf.h
@@ -125,6 +125,9 @@ extern int seq_buf_putmem(struct seq_buf *s, const void *mem, unsigned int len);
extern int seq_buf_putmem_hex(struct seq_buf *s, const void *mem,
unsigned int len);
extern int seq_buf_path(struct seq_buf *s, const struct path *path, const char *esc);
+extern int seq_buf_hex_dump(struct seq_buf *s, const char *prefix_str,
+ int prefix_type, int rowsize, int groupsize,
+ const void *buf, size_t len, bool ascii);
#ifdef CONFIG_BINARY_PRINTF
extern int
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index eceb3607864b..7af5bec7d3b0 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -3658,9 +3658,12 @@ static inline void skb_get_new_timestamp(const struct sk_buff *skb,
}
static inline void skb_get_timestampns(const struct sk_buff *skb,
- struct timespec *stamp)
+ struct __kernel_old_timespec *stamp)
{
- *stamp = ktime_to_timespec(skb->tstamp);
+ struct timespec64 ts = ktime_to_timespec64(skb->tstamp);
+
+ stamp->tv_sec = ts.tv_sec;
+ stamp->tv_nsec = ts.tv_nsec;
}
static inline void skb_get_new_timestampns(const struct sk_buff *skb,
diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h
index 6cb077b646a5..ef7031f8a304 100644
--- a/include/linux/skmsg.h
+++ b/include/linux/skmsg.h
@@ -14,6 +14,7 @@
#include <net/strparser.h>
#define MAX_MSG_FRAGS MAX_SKB_FRAGS
+#define NR_MSG_FRAG_IDS (MAX_MSG_FRAGS + 1)
enum __sk_action {
__SK_DROP = 0,
@@ -29,13 +30,15 @@ struct sk_msg_sg {
u32 size;
u32 copybreak;
unsigned long copy;
- /* The extra element is used for chaining the front and sections when
- * the list becomes partitioned (e.g. end < start). The crypto APIs
- * require the chaining.
+ /* The extra two elements:
+ * 1) used for chaining the front and sections when the list becomes
+ * partitioned (e.g. end < start). The crypto APIs require the
+ * chaining;
+ * 2) to chain tailer SG entries after the message.
*/
- struct scatterlist data[MAX_MSG_FRAGS + 1];
+ struct scatterlist data[MAX_MSG_FRAGS + 2];
};
-static_assert(BITS_PER_LONG >= MAX_MSG_FRAGS);
+static_assert(BITS_PER_LONG >= NR_MSG_FRAG_IDS);
/* UAPI in filter.c depends on struct sk_msg_sg being first element. */
struct sk_msg {
@@ -142,13 +145,13 @@ static inline void sk_msg_apply_bytes(struct sk_psock *psock, u32 bytes)
static inline u32 sk_msg_iter_dist(u32 start, u32 end)
{
- return end >= start ? end - start : end + (MAX_MSG_FRAGS - start);
+ return end >= start ? end - start : end + (NR_MSG_FRAG_IDS - start);
}
#define sk_msg_iter_var_prev(var) \
do { \
if (var == 0) \
- var = MAX_MSG_FRAGS - 1; \
+ var = NR_MSG_FRAG_IDS - 1; \
else \
var--; \
} while (0)
@@ -156,7 +159,7 @@ static inline u32 sk_msg_iter_dist(u32 start, u32 end)
#define sk_msg_iter_var_next(var) \
do { \
var++; \
- if (var == MAX_MSG_FRAGS) \
+ if (var == NR_MSG_FRAG_IDS) \
var = 0; \
} while (0)
@@ -173,9 +176,9 @@ static inline void sk_msg_clear_meta(struct sk_msg *msg)
static inline void sk_msg_init(struct sk_msg *msg)
{
- BUILD_BUG_ON(ARRAY_SIZE(msg->sg.data) - 1 != MAX_MSG_FRAGS);
+ BUILD_BUG_ON(ARRAY_SIZE(msg->sg.data) - 1 != NR_MSG_FRAG_IDS);
memset(msg, 0, sizeof(*msg));
- sg_init_marker(msg->sg.data, MAX_MSG_FRAGS);
+ sg_init_marker(msg->sg.data, NR_MSG_FRAG_IDS);
}
static inline void sk_msg_xfer(struct sk_msg *dst, struct sk_msg *src,
@@ -196,14 +199,11 @@ static inline void sk_msg_xfer_full(struct sk_msg *dst, struct sk_msg *src)
static inline bool sk_msg_full(const struct sk_msg *msg)
{
- return (msg->sg.end == msg->sg.start) && msg->sg.size;
+ return sk_msg_iter_dist(msg->sg.start, msg->sg.end) == MAX_MSG_FRAGS;
}
static inline u32 sk_msg_elem_used(const struct sk_msg *msg)
{
- if (sk_msg_full(msg))
- return MAX_MSG_FRAGS;
-
return sk_msg_iter_dist(msg->sg.start, msg->sg.end);
}
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 4d2a2fa55ed5..877a95c6a2d2 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -561,26 +561,6 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
return __kmalloc(size, flags);
}
-/*
- * Determine size used for the nth kmalloc cache.
- * return size or 0 if a kmalloc cache for that
- * size does not exist
- */
-static __always_inline unsigned int kmalloc_size(unsigned int n)
-{
-#ifndef CONFIG_SLOB
- if (n > 2)
- return 1U << n;
-
- if (n == 1 && KMALLOC_MIN_SIZE <= 32)
- return 96;
-
- if (n == 2 && KMALLOC_MIN_SIZE <= 64)
- return 192;
-#endif
- return 0;
-}
-
static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
{
#ifndef CONFIG_SLOB
diff --git a/include/linux/soc/qcom/irq.h b/include/linux/soc/qcom/irq.h
new file mode 100644
index 000000000000..9e1ece58e55b
--- /dev/null
+++ b/include/linux/soc/qcom/irq.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __QCOM_IRQ_H
+#define __QCOM_IRQ_H
+
+#include <linux/irqdomain.h>
+
+#define GPIO_NO_WAKE_IRQ ~0U
+
+/**
+ * QCOM specific IRQ domain flags that distinguishes the handling of wakeup
+ * capable interrupts by different interrupt controllers.
+ *
+ * IRQ_DOMAIN_FLAG_QCOM_PDC_WAKEUP: Line must be masked at TLMM and the
+ * interrupt configuration is done at PDC
+ * IRQ_DOMAIN_FLAG_QCOM_MPM_WAKEUP: Interrupt configuration is handled at TLMM
+ */
+#define IRQ_DOMAIN_FLAG_QCOM_PDC_WAKEUP (IRQ_DOMAIN_FLAG_NONCORE << 0)
+#define IRQ_DOMAIN_FLAG_QCOM_MPM_WAKEUP (IRQ_DOMAIN_FLAG_NONCORE << 1)
+
+/**
+ * irq_domain_qcom_handle_wakeup: Return if the domain handles interrupt
+ * configuration
+ * @d: irq domain
+ *
+ * This QCOM specific irq domain call returns if the interrupt controller
+ * requires the interrupt be masked at the child interrupt controller.
+ */
+static inline bool irq_domain_qcom_handle_wakeup(const struct irq_domain *d)
+{
+ return (d->flags & IRQ_DOMAIN_FLAG_QCOM_PDC_WAKEUP);
+}
+
+#endif
diff --git a/include/linux/socket.h b/include/linux/socket.h
index 09c32a21555b..4bde63021c09 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -399,6 +399,9 @@ extern int __sys_accept4(int fd, struct sockaddr __user *upeer_sockaddr,
int __user *upeer_addrlen, int flags);
extern int __sys_socket(int family, int type, int protocol);
extern int __sys_bind(int fd, struct sockaddr __user *umyaddr, int addrlen);
+extern int __sys_connect_file(struct file *file,
+ struct sockaddr __user *uservaddr, int addrlen,
+ int file_flags);
extern int __sys_connect(int fd, struct sockaddr __user *uservaddr,
int addrlen);
extern int __sys_listen(int fd, int backlog);
diff --git a/include/linux/sort.h b/include/linux/sort.h
index 61b96d0ebc44..b5898725fe9d 100644
--- a/include/linux/sort.h
+++ b/include/linux/sort.h
@@ -5,12 +5,12 @@
#include <linux/types.h>
void sort_r(void *base, size_t num, size_t size,
- int (*cmp)(const void *, const void *, const void *),
- void (*swap)(void *, void *, int),
+ cmp_r_func_t cmp_func,
+ swap_func_t swap_func,
const void *priv);
void sort(void *base, size_t num, size_t size,
- int (*cmp)(const void *, const void *),
- void (*swap)(void *, void *, int));
+ cmp_func_t cmp_func,
+ swap_func_t swap_func);
#endif
diff --git a/include/linux/soundwire/sdw.h b/include/linux/soundwire/sdw.h
index ea787201c3ac..28745b9ba279 100644
--- a/include/linux/soundwire/sdw.h
+++ b/include/linux/soundwire/sdw.h
@@ -40,9 +40,6 @@ struct sdw_slave;
#define SDW_VALID_PORT_RANGE(n) ((n) <= 14 && (n) >= 1)
-#define SDW_DAI_ID_RANGE_START 100
-#define SDW_DAI_ID_RANGE_END 200
-
enum {
SDW_PORT_DIRN_SINK = 0,
SDW_PORT_DIRN_SOURCE,
@@ -406,6 +403,8 @@ int sdw_slave_read_prop(struct sdw_slave *slave);
* SDW Slave Structures and APIs
*/
+#define SDW_IGNORED_UNIQUE_ID 0xFF
+
/**
* struct sdw_slave_id - Slave ID
* @mfg_id: MIPI Manufacturer ID
@@ -421,7 +420,7 @@ struct sdw_slave_id {
__u16 mfg_id;
__u16 part_id;
__u8 class_id;
- __u8 unique_id:4;
+ __u8 unique_id;
__u8 sdw_version:4;
};
diff --git a/include/linux/string.h b/include/linux/string.h
index b6ccdc2c7f02..02894e417565 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -216,6 +216,8 @@ int bprintf(u32 *bin_buf, size_t size, const char *fmt, ...) __printf(3, 4);
extern ssize_t memory_read_from_buffer(void *to, size_t count, loff_t *ppos,
const void *from, size_t available);
+int ptr_to_hashval(const void *ptr, unsigned long *hashval_out);
+
/**
* strstarts - does @str start with @prefix?
* @str: string to examine
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 063c0c1e112b..1e99f7ac1d7e 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -307,7 +307,7 @@ struct vma_swap_readahead {
};
/* linux/mm/workingset.c */
-void *workingset_eviction(struct page *page);
+void *workingset_eviction(struct page *page, struct mem_cgroup *target_memcg);
void workingset_refault(struct page *page, void *shadow);
void workingset_activation(struct page *page);
diff --git a/include/linux/sys_soc.h b/include/linux/sys_soc.h
index 48ceea867dd6..d9b3cf0f410c 100644
--- a/include/linux/sys_soc.h
+++ b/include/linux/sys_soc.h
@@ -15,6 +15,7 @@ struct soc_device_attribute {
const char *serial_number;
const char *soc_id;
const void *data;
+ const struct attribute_group *custom_attr_group;
};
/**
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index f7c561c4dcdd..d0391cc2dae9 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -51,7 +51,7 @@ struct statx;
struct __sysctl_args;
struct sysinfo;
struct timespec;
-struct timeval;
+struct __kernel_old_timeval;
struct __kernel_timex;
struct timezone;
struct tms;
@@ -732,9 +732,9 @@ asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3,
asmlinkage long sys_getcpu(unsigned __user *cpu, unsigned __user *node, struct getcpu_cache __user *cache);
/* kernel/time.c */
-asmlinkage long sys_gettimeofday(struct timeval __user *tv,
+asmlinkage long sys_gettimeofday(struct __kernel_old_timeval __user *tv,
struct timezone __user *tz);
-asmlinkage long sys_settimeofday(struct timeval __user *tv,
+asmlinkage long sys_settimeofday(struct __kernel_old_timeval __user *tv,
struct timezone __user *tz);
asmlinkage long sys_adjtimex(struct __kernel_timex __user *txc_p);
asmlinkage long sys_adjtimex_time32(struct old_timex32 __user *txc_p);
@@ -1076,15 +1076,15 @@ asmlinkage long sys_fadvise64(int fd, loff_t offset, size_t len, int advice);
asmlinkage long sys_alarm(unsigned int seconds);
asmlinkage long sys_getpgrp(void);
asmlinkage long sys_pause(void);
-asmlinkage long sys_time(time_t __user *tloc);
+asmlinkage long sys_time(__kernel_old_time_t __user *tloc);
asmlinkage long sys_time32(old_time32_t __user *tloc);
#ifdef __ARCH_WANT_SYS_UTIME
asmlinkage long sys_utime(char __user *filename,
struct utimbuf __user *times);
asmlinkage long sys_utimes(char __user *filename,
- struct timeval __user *utimes);
+ struct __kernel_old_timeval __user *utimes);
asmlinkage long sys_futimesat(int dfd, const char __user *filename,
- struct timeval __user *utimes);
+ struct __kernel_old_timeval __user *utimes);
#endif
asmlinkage long sys_futimesat_time32(unsigned int dfd,
const char __user *filename,
@@ -1098,7 +1098,7 @@ asmlinkage long sys_getdents(unsigned int fd,
struct linux_dirent __user *dirent,
unsigned int count);
asmlinkage long sys_select(int n, fd_set __user *inp, fd_set __user *outp,
- fd_set __user *exp, struct timeval __user *tvp);
+ fd_set __user *exp, struct __kernel_old_timeval __user *tvp);
asmlinkage long sys_poll(struct pollfd __user *ufds, unsigned int nfds,
int timeout);
asmlinkage long sys_epoll_wait(int epfd, struct epoll_event __user *events,
@@ -1116,7 +1116,7 @@ asmlinkage long sys_sysfs(int option,
asmlinkage long sys_fork(void);
/* obsolete: kernel/time/time.c */
-asmlinkage long sys_stime(time_t __user *tptr);
+asmlinkage long sys_stime(__kernel_old_time_t __user *tptr);
asmlinkage long sys_stime32(old_time32_t __user *tptr);
/* obsolete: kernel/signal.c */
diff --git a/include/linux/time.h b/include/linux/time.h
index 27d83fd2ae61..0760a4f5a15c 100644
--- a/include/linux/time.h
+++ b/include/linux/time.h
@@ -35,10 +35,11 @@ extern time64_t mktime64(const unsigned int year, const unsigned int mon,
extern u32 (*arch_gettimeoffset)(void);
#endif
-struct itimerval;
-extern int do_setitimer(int which, struct itimerval *value,
- struct itimerval *ovalue);
-extern int do_getitimer(int which, struct itimerval *value);
+#ifdef CONFIG_POSIX_TIMERS
+extern void clear_itimer(void);
+#else
+static inline void clear_itimer(void) {}
+#endif
extern long do_utimes(int dfd, const char __user *filename, struct timespec64 *times, int flags);
diff --git a/include/linux/time32.h b/include/linux/time32.h
index 0a1f302a1753..cad4c3186002 100644
--- a/include/linux/time32.h
+++ b/include/linux/time32.h
@@ -12,7 +12,7 @@
#include <linux/time64.h>
#include <linux/timex.h>
-#define TIME_T_MAX (time_t)((1UL << ((sizeof(time_t) << 3) - 1)) - 1)
+#define TIME_T_MAX (__kernel_old_time_t)((1UL << ((sizeof(__kernel_old_time_t) << 3) - 1)) - 1)
typedef s32 old_time32_t;
diff --git a/include/linux/trace.h b/include/linux/trace.h
index b95ffb2188ab..7fd86d3c691f 100644
--- a/include/linux/trace.h
+++ b/include/linux/trace.h
@@ -24,6 +24,14 @@ struct trace_export {
int register_ftrace_export(struct trace_export *export);
int unregister_ftrace_export(struct trace_export *export);
+struct trace_array;
+
+void trace_printk_init_buffers(void);
+int trace_array_printk(struct trace_array *tr, unsigned long ip,
+ const char *fmt, ...);
+void trace_array_put(struct trace_array *tr);
+struct trace_array *trace_array_get_by_name(const char *name);
+int trace_array_destroy(struct trace_array *tr);
#endif /* CONFIG_TRACING */
#endif /* _LINUX_TRACE_H */
diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
index 30a8cdcfd4a4..4c6e15605766 100644
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
@@ -45,6 +45,11 @@ const char *trace_print_array_seq(struct trace_seq *p,
const void *buf, int count,
size_t el_size);
+const char *
+trace_print_hex_dump_seq(struct trace_seq *p, const char *prefix_str,
+ int prefix_type, int rowsize, int groupsize,
+ const void *buf, size_t len, bool ascii);
+
struct trace_iterator;
struct trace_event;
@@ -550,7 +555,8 @@ extern int trace_event_get_offsets(struct trace_event_call *call);
int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set);
int trace_set_clr_event(const char *system, const char *event, int set);
-
+int trace_array_set_clr_event(struct trace_array *tr, const char *system,
+ const char *event, bool enable);
/*
* The double __builtin_constant_p is because gcc will give us an error
* if we try to allocate the static variable to fmt if it is not a
diff --git a/include/linux/trace_seq.h b/include/linux/trace_seq.h
index 6609b39a7232..6c30508fca19 100644
--- a/include/linux/trace_seq.h
+++ b/include/linux/trace_seq.h
@@ -92,6 +92,10 @@ extern int trace_seq_path(struct trace_seq *s, const struct path *path);
extern void trace_seq_bitmask(struct trace_seq *s, const unsigned long *maskp,
int nmaskbits);
+extern int trace_seq_hex_dump(struct trace_seq *s, const char *prefix_str,
+ int prefix_type, int rowsize, int groupsize,
+ const void *buf, size_t len, bool ascii);
+
#else /* CONFIG_TRACING */
static inline void trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
{
diff --git a/include/linux/types.h b/include/linux/types.h
index 05030f608be3..eb870ad42919 100644
--- a/include/linux/types.h
+++ b/include/linux/types.h
@@ -67,7 +67,7 @@ typedef __kernel_ptrdiff_t ptrdiff_t;
#ifndef _TIME_T
#define _TIME_T
-typedef __kernel_time_t time_t;
+typedef __kernel_old_time_t time_t;
#endif
#ifndef _CLOCK_T
@@ -225,5 +225,10 @@ struct callback_head {
typedef void (*rcu_callback_t)(struct rcu_head *head);
typedef void (*call_rcu_func_t)(struct rcu_head *head, rcu_callback_t func);
+typedef void (*swap_func_t)(void *a, void *b, int size);
+
+typedef int (*cmp_r_func_t)(const void *a, const void *b, const void *priv);
+typedef int (*cmp_func_t)(const void *a, const void *b);
+
#endif /* __ASSEMBLY__ */
#endif /* _LINUX_TYPES_H */
diff --git a/include/linux/uio.h b/include/linux/uio.h
index ab5f523bc0df..9576fd8158d7 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -45,8 +45,8 @@ struct iov_iter {
union {
unsigned long nr_segs;
struct {
- int idx;
- int start_idx;
+ unsigned int head;
+ unsigned int start_head;
};
};
};
diff --git a/include/linux/usb/role.h b/include/linux/usb/role.h
index 2d77f97df72d..efac3af83d6b 100644
--- a/include/linux/usb/role.h
+++ b/include/linux/usb/role.h
@@ -51,6 +51,9 @@ struct usb_role_switch *fwnode_usb_role_switch_get(struct fwnode_handle *node);
void usb_role_switch_put(struct usb_role_switch *sw);
struct usb_role_switch *
+usb_role_switch_find_by_fwnode(const struct fwnode_handle *fwnode);
+
+struct usb_role_switch *
usb_role_switch_register(struct device *parent,
const struct usb_role_switch_desc *desc);
void usb_role_switch_unregister(struct usb_role_switch *sw);
diff --git a/include/linux/usb/tcpm.h b/include/linux/usb/tcpm.h
index f516955a0cf4..e7979c01c351 100644
--- a/include/linux/usb/tcpm.h
+++ b/include/linux/usb/tcpm.h
@@ -46,45 +46,6 @@ enum tcpm_transmit_type {
TCPC_TX_BIST_MODE_2 = 7
};
-/**
- * struct tcpc_config - Port configuration
- * @src_pdo: PDO parameters sent to port partner as response to
- * PD_CTRL_GET_SOURCE_CAP message
- * @nr_src_pdo: Number of entries in @src_pdo
- * @snk_pdo: PDO parameters sent to partner as response to
- * PD_CTRL_GET_SINK_CAP message
- * @nr_snk_pdo: Number of entries in @snk_pdo
- * @operating_snk_mw:
- * Required operating sink power in mW
- * @type: Port type (TYPEC_PORT_DFP, TYPEC_PORT_UFP, or
- * TYPEC_PORT_DRP)
- * @default_role:
- * Default port role (TYPEC_SINK or TYPEC_SOURCE).
- * Set to TYPEC_NO_PREFERRED_ROLE if no default role.
- * @try_role_hw:True if try.{Src,Snk} is implemented in hardware
- * @alt_modes: List of supported alternate modes
- */
-struct tcpc_config {
- const u32 *src_pdo;
- unsigned int nr_src_pdo;
-
- const u32 *snk_pdo;
- unsigned int nr_snk_pdo;
-
- const u32 *snk_vdo;
- unsigned int nr_snk_vdo;
-
- unsigned int operating_snk_mw;
-
- enum typec_port_type type;
- enum typec_port_data data;
- enum typec_role default_role;
- bool try_role_hw; /* try.{src,snk} implemented in hardware */
- bool self_powered; /* port belongs to a self powered device */
-
- const struct typec_altmode_desc *alt_modes;
-};
-
/* Mux state attributes */
#define TCPC_MUX_USB_ENABLED BIT(0) /* USB enabled */
#define TCPC_MUX_DP_ENABLED BIT(1) /* DP enabled */
@@ -92,7 +53,6 @@ struct tcpc_config {
/**
* struct tcpc_dev - Port configuration and callback functions
- * @config: Pointer to port configuration
* @fwnode: Pointer to port fwnode
* @get_vbus: Called to read current VBUS state
* @get_current_limit:
@@ -121,7 +81,6 @@ struct tcpc_config {
* @mux: Pointer to multiplexer data
*/
struct tcpc_dev {
- const struct tcpc_config *config;
struct fwnode_handle *fwnode;
int (*init)(struct tcpc_dev *dev);
diff --git a/include/linux/usb/typec.h b/include/linux/usb/typec.h
index 7df4ecabc78a..0f52723a11bd 100644
--- a/include/linux/usb/typec.h
+++ b/include/linux/usb/typec.h
@@ -168,6 +168,23 @@ struct typec_partner_desc {
struct usb_pd_identity *identity;
};
+/**
+ * struct typec_operations - USB Type-C Port Operations
+ * @try_role: Set data role preference for DRP port
+ * @dr_set: Set Data Role
+ * @pr_set: Set Power Role
+ * @vconn_set: Source VCONN
+ * @port_type_set: Set port type
+ */
+struct typec_operations {
+ int (*try_role)(struct typec_port *port, int role);
+ int (*dr_set)(struct typec_port *port, enum typec_data_role role);
+ int (*pr_set)(struct typec_port *port, enum typec_role role);
+ int (*vconn_set)(struct typec_port *port, enum typec_role role);
+ int (*port_type_set)(struct typec_port *port,
+ enum typec_port_type type);
+};
+
/*
* struct typec_capability - USB Type-C Port Capabilities
* @type: Supported power role of the port
@@ -179,11 +196,8 @@ struct typec_partner_desc {
* @sw: Cable plug orientation switch
* @mux: Multiplexer switch for Alternate/Accessory Modes
* @fwnode: Optional fwnode of the port
- * @try_role: Set data role preference for DRP port
- * @dr_set: Set Data Role
- * @pr_set: Set Power Role
- * @vconn_set: Set VCONN Role
- * @port_type_set: Set port type
+ * @driver_data: Private pointer for driver specific info
+ * @ops: Port operations vector
*
* Static capabilities of a single USB Type-C port.
*/
@@ -195,21 +209,10 @@ struct typec_capability {
int prefer_role;
enum typec_accessory accessory[TYPEC_MAX_ACCESSORY];
- struct typec_switch *sw;
- struct typec_mux *mux;
struct fwnode_handle *fwnode;
+ void *driver_data;
- int (*try_role)(const struct typec_capability *,
- int role);
-
- int (*dr_set)(const struct typec_capability *,
- enum typec_data_role);
- int (*pr_set)(const struct typec_capability *,
- enum typec_role);
- int (*vconn_set)(const struct typec_capability *,
- enum typec_role);
- int (*port_type_set)(const struct typec_capability *,
- enum typec_port_type);
+ const struct typec_operations *ops;
};
/* Specific to try_role(). Indicates the user want's to clear the preference. */
@@ -241,6 +244,8 @@ int typec_set_orientation(struct typec_port *port,
enum typec_orientation typec_get_orientation(struct typec_port *port);
int typec_set_mode(struct typec_port *port, int mode);
+void *typec_get_drvdata(struct typec_port *port);
+
int typec_find_port_power_role(const char *name);
int typec_find_power_role(const char *name);
int typec_find_port_data_role(const char *name);
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index b4c58a191eb1..a4b241102771 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -22,6 +22,18 @@ struct notifier_block; /* in notifier.h */
#define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */
#define VM_NO_GUARD 0x00000040 /* don't add guard page */
#define VM_KASAN 0x00000080 /* has allocated kasan shadow memory */
+
+/*
+ * VM_KASAN is used slighly differently depending on CONFIG_KASAN_VMALLOC.
+ *
+ * If IS_ENABLED(CONFIG_KASAN_VMALLOC), VM_KASAN is set on a vm_struct after
+ * shadow memory has been mapped. It's used to handle allocation errors so that
+ * we don't try to poision shadow on free if it was never allocated.
+ *
+ * Otherwise, VM_KASAN is set for kasan_module_alloc() allocations and used to
+ * determine which allocations need the module shadow freed.
+ */
+
/*
* Memory with VM_FLUSH_RESET_PERMS cannot be freed in an interrupt or with
* vfree_atomic().
diff --git a/include/linux/w1.h b/include/linux/w1.h
index 7da0c7588e04..cebf3464bc03 100644
--- a/include/linux/w1.h
+++ b/include/linux/w1.h
@@ -262,6 +262,7 @@ struct w1_family_ops {
* @family_entry: family linked list
* @fid: 8 bit family identifier
* @fops: operations for this family
+ * @of_match_table: open firmware match table
* @refcnt: reference counter
*/
struct w1_family {
diff --git a/include/linux/wait.h b/include/linux/wait.h
index 3eb7cae8206c..3283c8d02137 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -201,9 +201,10 @@ void __wake_up(struct wait_queue_head *wq_head, unsigned int mode, int nr, void
void __wake_up_locked_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
void __wake_up_locked_key_bookmark(struct wait_queue_head *wq_head,
unsigned int mode, void *key, wait_queue_entry_t *bookmark);
-void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode, int nr, void *key);
+void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
+void __wake_up_locked_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr);
-void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode, int nr);
+void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode);
#define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL)
#define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL)
@@ -214,7 +215,7 @@ void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode, int nr);
#define wake_up_interruptible(x) __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
#define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
#define wake_up_interruptible_all(x) __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
-#define wake_up_interruptible_sync(x) __wake_up_sync((x), TASK_INTERRUPTIBLE, 1)
+#define wake_up_interruptible_sync(x) __wake_up_sync((x), TASK_INTERRUPTIBLE)
/*
* Wakeup macros to be used to report events to the targets.
@@ -228,7 +229,9 @@ void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode, int nr);
#define wake_up_interruptible_poll(x, m) \
__wake_up(x, TASK_INTERRUPTIBLE, 1, poll_to_key(m))
#define wake_up_interruptible_sync_poll(x, m) \
- __wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, poll_to_key(m))
+ __wake_up_sync_key((x), TASK_INTERRUPTIBLE, poll_to_key(m))
+#define wake_up_interruptible_sync_poll_locked(x, m) \
+ __wake_up_locked_sync_key((x), TASK_INTERRUPTIBLE, poll_to_key(m))
#define ___wait_cond_timeout(condition) \
({ \
diff --git a/include/media/cec-notifier.h b/include/media/cec-notifier.h
index f161f8a493ac..985afea1ee36 100644
--- a/include/media/cec-notifier.h
+++ b/include/media/cec-notifier.h
@@ -93,8 +93,10 @@ cec_notifier_cec_adap_register(struct device *hdmi_dev, const char *conn_name,
* cec_notifier_cec_adap_unregister - decrease refcount and delete when the
* refcount reaches 0.
* @n: notifier. If NULL, then this function does nothing.
+ * @adap: the cec adapter that registered this notifier.
*/
-void cec_notifier_cec_adap_unregister(struct cec_notifier *n);
+void cec_notifier_cec_adap_unregister(struct cec_notifier *n,
+ struct cec_adapter *adap);
/**
* cec_notifier_set_phys_addr - set a new physical address.
@@ -160,7 +162,8 @@ cec_notifier_cec_adap_register(struct device *hdmi_dev, const char *conn_name,
return (struct cec_notifier *)0xdeadfeed;
}
-static inline void cec_notifier_cec_adap_unregister(struct cec_notifier *n)
+static inline void cec_notifier_cec_adap_unregister(struct cec_notifier *n,
+ struct cec_adapter *adap)
{
}
diff --git a/include/media/cec-pin.h b/include/media/cec-pin.h
index 604e79cb6cbf..88c8b016eb09 100644
--- a/include/media/cec-pin.h
+++ b/include/media/cec-pin.h
@@ -29,8 +29,11 @@
* an error if negative. If NULL or -ENOTTY is returned,
* then this is not supported.
*
- * These operations are used by the cec pin framework to manipulate
- * the CEC pin.
+ * @received: optional. High-level CEC message callback. Allows the driver
+ * to process CEC messages.
+ *
+ * These operations (except for the @received op) are used by the
+ * cec pin framework to manipulate the CEC pin.
*/
struct cec_pin_ops {
bool (*read)(struct cec_adapter *adap);
@@ -42,6 +45,9 @@ struct cec_pin_ops {
void (*status)(struct cec_adapter *adap, struct seq_file *file);
int (*read_hpd)(struct cec_adapter *adap);
int (*read_5v)(struct cec_adapter *adap);
+
+ /* High-level CEC message callback */
+ int (*received)(struct cec_adapter *adap, struct cec_msg *msg);
};
/**
diff --git a/include/media/cec.h b/include/media/cec.h
index 4d59387bc61b..0a4f69cc9dd4 100644
--- a/include/media/cec.h
+++ b/include/media/cec.h
@@ -18,9 +18,6 @@
#include <linux/cec-funcs.h>
#include <media/rc-core.h>
-/* CEC_ADAP_G_CONNECTOR_INFO is available */
-#define CEC_CAP_CONNECTOR_INFO (1 << 8)
-
#define CEC_CAP_DEFAULTS (CEC_CAP_LOG_ADDRS | CEC_CAP_TRANSMIT | \
CEC_CAP_PASSTHROUGH | CEC_CAP_RC)
@@ -147,34 +144,6 @@ struct cec_adap_ops {
*/
#define CEC_MAX_MSG_TX_QUEUE_SZ (18 * 1)
-/**
- * struct cec_drm_connector_info - tells which drm connector is
- * associated with the CEC adapter.
- * @card_no: drm card number
- * @connector_id: drm connector ID
- */
-struct cec_drm_connector_info {
- __u32 card_no;
- __u32 connector_id;
-};
-
-#define CEC_CONNECTOR_TYPE_NO_CONNECTOR 0
-#define CEC_CONNECTOR_TYPE_DRM 1
-
-/**
- * struct cec_connector_info - tells if and which connector is
- * associated with the CEC adapter.
- * @type: connector type (if any)
- * @drm: drm connector info
- */
-struct cec_connector_info {
- __u32 type;
- union {
- struct cec_drm_connector_info drm;
- __u32 raw[16];
- };
-};
-
struct cec_adapter {
struct module *owner;
char name[32];
diff --git a/include/media/dvb-usb-ids.h b/include/media/dvb-usb-ids.h
index 7ce4e8332421..1409230ad3a4 100644
--- a/include/media/dvb-usb-ids.h
+++ b/include/media/dvb-usb-ids.h
@@ -389,6 +389,7 @@
#define USB_PID_MYGICA_T230 0xc688
#define USB_PID_MYGICA_T230C 0xc689
#define USB_PID_MYGICA_T230C2 0xc68a
+#define USB_PID_MYGICA_T230C_LITE 0xc699
#define USB_PID_ELGATO_EYETV_DIVERSITY 0x0011
#define USB_PID_ELGATO_EYETV_DTT 0x0021
#define USB_PID_ELGATO_EYETV_DTT_2 0x003f
diff --git a/include/media/hevc-ctrls.h b/include/media/hevc-ctrls.h
new file mode 100644
index 000000000000..1009cf0891cc
--- /dev/null
+++ b/include/media/hevc-ctrls.h
@@ -0,0 +1,212 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * These are the HEVC state controls for use with stateless HEVC
+ * codec drivers.
+ *
+ * It turns out that these structs are not stable yet and will undergo
+ * more changes. So keep them private until they are stable and ready to
+ * become part of the official public API.
+ */
+
+#ifndef _HEVC_CTRLS_H_
+#define _HEVC_CTRLS_H_
+
+#include <linux/videodev2.h>
+
+/* The pixel format isn't stable at the moment and will likely be renamed. */
+#define V4L2_PIX_FMT_HEVC_SLICE v4l2_fourcc('S', '2', '6', '5') /* HEVC parsed slices */
+
+#define V4L2_CID_MPEG_VIDEO_HEVC_SPS (V4L2_CID_MPEG_BASE + 1008)
+#define V4L2_CID_MPEG_VIDEO_HEVC_PPS (V4L2_CID_MPEG_BASE + 1009)
+#define V4L2_CID_MPEG_VIDEO_HEVC_SLICE_PARAMS (V4L2_CID_MPEG_BASE + 1010)
+#define V4L2_CID_MPEG_VIDEO_HEVC_DECODE_MODE (V4L2_CID_MPEG_BASE + 1015)
+#define V4L2_CID_MPEG_VIDEO_HEVC_START_CODE (V4L2_CID_MPEG_BASE + 1016)
+
+/* enum v4l2_ctrl_type type values */
+#define V4L2_CTRL_TYPE_HEVC_SPS 0x0120
+#define V4L2_CTRL_TYPE_HEVC_PPS 0x0121
+#define V4L2_CTRL_TYPE_HEVC_SLICE_PARAMS 0x0122
+
+enum v4l2_mpeg_video_hevc_decode_mode {
+ V4L2_MPEG_VIDEO_HEVC_DECODE_MODE_SLICE_BASED,
+ V4L2_MPEG_VIDEO_HEVC_DECODE_MODE_FRAME_BASED,
+};
+
+enum v4l2_mpeg_video_hevc_start_code {
+ V4L2_MPEG_VIDEO_HEVC_START_CODE_NONE,
+ V4L2_MPEG_VIDEO_HEVC_START_CODE_ANNEX_B,
+};
+
+#define V4L2_HEVC_SLICE_TYPE_B 0
+#define V4L2_HEVC_SLICE_TYPE_P 1
+#define V4L2_HEVC_SLICE_TYPE_I 2
+
+#define V4L2_HEVC_SPS_FLAG_SEPARATE_COLOUR_PLANE (1ULL << 0)
+#define V4L2_HEVC_SPS_FLAG_SCALING_LIST_ENABLED (1ULL << 1)
+#define V4L2_HEVC_SPS_FLAG_AMP_ENABLED (1ULL << 2)
+#define V4L2_HEVC_SPS_FLAG_SAMPLE_ADAPTIVE_OFFSET (1ULL << 3)
+#define V4L2_HEVC_SPS_FLAG_PCM_ENABLED (1ULL << 4)
+#define V4L2_HEVC_SPS_FLAG_PCM_LOOP_FILTER_DISABLED (1ULL << 5)
+#define V4L2_HEVC_SPS_FLAG_LONG_TERM_REF_PICS_PRESENT (1ULL << 6)
+#define V4L2_HEVC_SPS_FLAG_SPS_TEMPORAL_MVP_ENABLED (1ULL << 7)
+#define V4L2_HEVC_SPS_FLAG_STRONG_INTRA_SMOOTHING_ENABLED (1ULL << 8)
+
+/* The controls are not stable at the moment and will likely be reworked. */
+struct v4l2_ctrl_hevc_sps {
+ /* ISO/IEC 23008-2, ITU-T Rec. H.265: Sequence parameter set */
+ __u16 pic_width_in_luma_samples;
+ __u16 pic_height_in_luma_samples;
+ __u8 bit_depth_luma_minus8;
+ __u8 bit_depth_chroma_minus8;
+ __u8 log2_max_pic_order_cnt_lsb_minus4;
+ __u8 sps_max_dec_pic_buffering_minus1;
+ __u8 sps_max_num_reorder_pics;
+ __u8 sps_max_latency_increase_plus1;
+ __u8 log2_min_luma_coding_block_size_minus3;
+ __u8 log2_diff_max_min_luma_coding_block_size;
+ __u8 log2_min_luma_transform_block_size_minus2;
+ __u8 log2_diff_max_min_luma_transform_block_size;
+ __u8 max_transform_hierarchy_depth_inter;
+ __u8 max_transform_hierarchy_depth_intra;
+ __u8 pcm_sample_bit_depth_luma_minus1;
+ __u8 pcm_sample_bit_depth_chroma_minus1;
+ __u8 log2_min_pcm_luma_coding_block_size_minus3;
+ __u8 log2_diff_max_min_pcm_luma_coding_block_size;
+ __u8 num_short_term_ref_pic_sets;
+ __u8 num_long_term_ref_pics_sps;
+ __u8 chroma_format_idc;
+
+ __u8 padding;
+
+ __u64 flags;
+};
+
+#define V4L2_HEVC_PPS_FLAG_DEPENDENT_SLICE_SEGMENT (1ULL << 0)
+#define V4L2_HEVC_PPS_FLAG_OUTPUT_FLAG_PRESENT (1ULL << 1)
+#define V4L2_HEVC_PPS_FLAG_SIGN_DATA_HIDING_ENABLED (1ULL << 2)
+#define V4L2_HEVC_PPS_FLAG_CABAC_INIT_PRESENT (1ULL << 3)
+#define V4L2_HEVC_PPS_FLAG_CONSTRAINED_INTRA_PRED (1ULL << 4)
+#define V4L2_HEVC_PPS_FLAG_TRANSFORM_SKIP_ENABLED (1ULL << 5)
+#define V4L2_HEVC_PPS_FLAG_CU_QP_DELTA_ENABLED (1ULL << 6)
+#define V4L2_HEVC_PPS_FLAG_PPS_SLICE_CHROMA_QP_OFFSETS_PRESENT (1ULL << 7)
+#define V4L2_HEVC_PPS_FLAG_WEIGHTED_PRED (1ULL << 8)
+#define V4L2_HEVC_PPS_FLAG_WEIGHTED_BIPRED (1ULL << 9)
+#define V4L2_HEVC_PPS_FLAG_TRANSQUANT_BYPASS_ENABLED (1ULL << 10)
+#define V4L2_HEVC_PPS_FLAG_TILES_ENABLED (1ULL << 11)
+#define V4L2_HEVC_PPS_FLAG_ENTROPY_CODING_SYNC_ENABLED (1ULL << 12)
+#define V4L2_HEVC_PPS_FLAG_LOOP_FILTER_ACROSS_TILES_ENABLED (1ULL << 13)
+#define V4L2_HEVC_PPS_FLAG_PPS_LOOP_FILTER_ACROSS_SLICES_ENABLED (1ULL << 14)
+#define V4L2_HEVC_PPS_FLAG_DEBLOCKING_FILTER_OVERRIDE_ENABLED (1ULL << 15)
+#define V4L2_HEVC_PPS_FLAG_PPS_DISABLE_DEBLOCKING_FILTER (1ULL << 16)
+#define V4L2_HEVC_PPS_FLAG_LISTS_MODIFICATION_PRESENT (1ULL << 17)
+#define V4L2_HEVC_PPS_FLAG_SLICE_SEGMENT_HEADER_EXTENSION_PRESENT (1ULL << 18)
+
+struct v4l2_ctrl_hevc_pps {
+ /* ISO/IEC 23008-2, ITU-T Rec. H.265: Picture parameter set */
+ __u8 num_extra_slice_header_bits;
+ __s8 init_qp_minus26;
+ __u8 diff_cu_qp_delta_depth;
+ __s8 pps_cb_qp_offset;
+ __s8 pps_cr_qp_offset;
+ __u8 num_tile_columns_minus1;
+ __u8 num_tile_rows_minus1;
+ __u8 column_width_minus1[20];
+ __u8 row_height_minus1[22];
+ __s8 pps_beta_offset_div2;
+ __s8 pps_tc_offset_div2;
+ __u8 log2_parallel_merge_level_minus2;
+
+ __u8 padding[4];
+ __u64 flags;
+};
+
+#define V4L2_HEVC_DPB_ENTRY_RPS_ST_CURR_BEFORE 0x01
+#define V4L2_HEVC_DPB_ENTRY_RPS_ST_CURR_AFTER 0x02
+#define V4L2_HEVC_DPB_ENTRY_RPS_LT_CURR 0x03
+
+#define V4L2_HEVC_DPB_ENTRIES_NUM_MAX 16
+
+struct v4l2_hevc_dpb_entry {
+ __u64 timestamp;
+ __u8 rps;
+ __u8 field_pic;
+ __u16 pic_order_cnt[2];
+ __u8 padding[2];
+};
+
+struct v4l2_hevc_pred_weight_table {
+ __s8 delta_luma_weight_l0[V4L2_HEVC_DPB_ENTRIES_NUM_MAX];
+ __s8 luma_offset_l0[V4L2_HEVC_DPB_ENTRIES_NUM_MAX];
+ __s8 delta_chroma_weight_l0[V4L2_HEVC_DPB_ENTRIES_NUM_MAX][2];
+ __s8 chroma_offset_l0[V4L2_HEVC_DPB_ENTRIES_NUM_MAX][2];
+
+ __s8 delta_luma_weight_l1[V4L2_HEVC_DPB_ENTRIES_NUM_MAX];
+ __s8 luma_offset_l1[V4L2_HEVC_DPB_ENTRIES_NUM_MAX];
+ __s8 delta_chroma_weight_l1[V4L2_HEVC_DPB_ENTRIES_NUM_MAX][2];
+ __s8 chroma_offset_l1[V4L2_HEVC_DPB_ENTRIES_NUM_MAX][2];
+
+ __u8 padding[6];
+
+ __u8 luma_log2_weight_denom;
+ __s8 delta_chroma_log2_weight_denom;
+};
+
+#define V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_SAO_LUMA (1ULL << 0)
+#define V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_SAO_CHROMA (1ULL << 1)
+#define V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_TEMPORAL_MVP_ENABLED (1ULL << 2)
+#define V4L2_HEVC_SLICE_PARAMS_FLAG_MVD_L1_ZERO (1ULL << 3)
+#define V4L2_HEVC_SLICE_PARAMS_FLAG_CABAC_INIT (1ULL << 4)
+#define V4L2_HEVC_SLICE_PARAMS_FLAG_COLLOCATED_FROM_L0 (1ULL << 5)
+#define V4L2_HEVC_SLICE_PARAMS_FLAG_USE_INTEGER_MV (1ULL << 6)
+#define V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_DEBLOCKING_FILTER_DISABLED (1ULL << 7)
+#define V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_LOOP_FILTER_ACROSS_SLICES_ENABLED (1ULL << 8)
+
+struct v4l2_ctrl_hevc_slice_params {
+ __u32 bit_size;
+ __u32 data_bit_offset;
+
+ /* ISO/IEC 23008-2, ITU-T Rec. H.265: NAL unit header */
+ __u8 nal_unit_type;
+ __u8 nuh_temporal_id_plus1;
+
+ /* ISO/IEC 23008-2, ITU-T Rec. H.265: General slice segment header */
+ __u8 slice_type;
+ __u8 colour_plane_id;
+ __u16 slice_pic_order_cnt;
+ __u8 num_ref_idx_l0_active_minus1;
+ __u8 num_ref_idx_l1_active_minus1;
+ __u8 collocated_ref_idx;
+ __u8 five_minus_max_num_merge_cand;
+ __s8 slice_qp_delta;
+ __s8 slice_cb_qp_offset;
+ __s8 slice_cr_qp_offset;
+ __s8 slice_act_y_qp_offset;
+ __s8 slice_act_cb_qp_offset;
+ __s8 slice_act_cr_qp_offset;
+ __s8 slice_beta_offset_div2;
+ __s8 slice_tc_offset_div2;
+
+ /* ISO/IEC 23008-2, ITU-T Rec. H.265: Picture timing SEI message */
+ __u8 pic_struct;
+
+ /* ISO/IEC 23008-2, ITU-T Rec. H.265: General slice segment header */
+ __u8 num_active_dpb_entries;
+ __u8 ref_idx_l0[V4L2_HEVC_DPB_ENTRIES_NUM_MAX];
+ __u8 ref_idx_l1[V4L2_HEVC_DPB_ENTRIES_NUM_MAX];
+
+ __u8 num_rps_poc_st_curr_before;
+ __u8 num_rps_poc_st_curr_after;
+ __u8 num_rps_poc_lt_curr;
+
+ __u8 padding;
+
+ /* ISO/IEC 23008-2, ITU-T Rec. H.265: General slice segment header */
+ struct v4l2_hevc_dpb_entry dpb[V4L2_HEVC_DPB_ENTRIES_NUM_MAX];
+
+ /* ISO/IEC 23008-2, ITU-T Rec. H.265: Weighted prediction parameter */
+ struct v4l2_hevc_pred_weight_table pred_weight_table;
+
+ __u64 flags;
+};
+
+#endif
diff --git a/include/media/i2c/smiapp.h b/include/media/i2c/smiapp.h
index d6ccc859bfcd..80f8251d87a3 100644
--- a/include/media/i2c/smiapp.h
+++ b/include/media/i2c/smiapp.h
@@ -49,7 +49,6 @@ struct smiapp_hwconfig {
unsigned short i2c_addr_dfl; /* Default i2c addr */
unsigned short i2c_addr_alt; /* Alternate i2c addr */
- uint32_t nvm_size; /* bytes */
uint32_t ext_clk; /* sensor external clk */
unsigned int lanes; /* Number of CSI-2 lanes */
diff --git a/include/media/rc-map.h b/include/media/rc-map.h
index afd2ab31bdf2..f99575a0d29c 100644
--- a/include/media/rc-map.h
+++ b/include/media/rc-map.h
@@ -159,21 +159,22 @@ struct rc_map *rc_map_get(const char *name);
#define RC_MAP_ASUS_PS3_100 "rc-asus-ps3-100"
#define RC_MAP_ATI_TV_WONDER_HD_600 "rc-ati-tv-wonder-hd-600"
#define RC_MAP_ATI_X10 "rc-ati-x10"
+#define RC_MAP_AVERMEDIA "rc-avermedia"
#define RC_MAP_AVERMEDIA_A16D "rc-avermedia-a16d"
#define RC_MAP_AVERMEDIA_CARDBUS "rc-avermedia-cardbus"
#define RC_MAP_AVERMEDIA_DVBT "rc-avermedia-dvbt"
#define RC_MAP_AVERMEDIA_M135A "rc-avermedia-m135a"
#define RC_MAP_AVERMEDIA_M733A_RM_K6 "rc-avermedia-m733a-rm-k6"
#define RC_MAP_AVERMEDIA_RM_KS "rc-avermedia-rm-ks"
-#define RC_MAP_AVERMEDIA "rc-avermedia"
#define RC_MAP_AVERTV_303 "rc-avertv-303"
#define RC_MAP_AZUREWAVE_AD_TU700 "rc-azurewave-ad-tu700"
-#define RC_MAP_BEHOLD_COLUMBUS "rc-behold-columbus"
+#define RC_MAP_BEELINK_GS1 "rc-beelink-gs1"
#define RC_MAP_BEHOLD "rc-behold"
+#define RC_MAP_BEHOLD_COLUMBUS "rc-behold-columbus"
#define RC_MAP_BUDGET_CI_OLD "rc-budget-ci-old"
#define RC_MAP_CEC "rc-cec"
-#define RC_MAP_CINERGY_1400 "rc-cinergy-1400"
#define RC_MAP_CINERGY "rc-cinergy"
+#define RC_MAP_CINERGY_1400 "rc-cinergy-1400"
#define RC_MAP_D680_DMB "rc-d680-dmb"
#define RC_MAP_DELOCK_61959 "rc-delock-61959"
#define RC_MAP_DIB0700_NEC_TABLE "rc-dib0700-nec"
@@ -181,17 +182,17 @@ struct rc_map *rc_map_get(const char *name);
#define RC_MAP_DIGITALNOW_TINYTWIN "rc-digitalnow-tinytwin"
#define RC_MAP_DIGITTRADE "rc-digittrade"
#define RC_MAP_DM1105_NEC "rc-dm1105-nec"
-#define RC_MAP_DNTV_LIVE_DVBT_PRO "rc-dntv-live-dvbt-pro"
#define RC_MAP_DNTV_LIVE_DVB_T "rc-dntv-live-dvb-t"
+#define RC_MAP_DNTV_LIVE_DVBT_PRO "rc-dntv-live-dvbt-pro"
#define RC_MAP_DTT200U "rc-dtt200u"
#define RC_MAP_DVBSKY "rc-dvbsky"
#define RC_MAP_DVICO_MCE "rc-dvico-mce"
#define RC_MAP_DVICO_PORTABLE "rc-dvico-portable"
#define RC_MAP_EMPTY "rc-empty"
#define RC_MAP_EM_TERRATEC "rc-em-terratec"
+#define RC_MAP_ENCORE_ENLTV "rc-encore-enltv"
#define RC_MAP_ENCORE_ENLTV2 "rc-encore-enltv2"
#define RC_MAP_ENCORE_ENLTV_FM53 "rc-encore-enltv-fm53"
-#define RC_MAP_ENCORE_ENLTV "rc-encore-enltv"
#define RC_MAP_EVGA_INDTUBE "rc-evga-indtube"
#define RC_MAP_EZTV "rc-eztv"
#define RC_MAP_FLYDVB "rc-flydvb"
@@ -201,6 +202,7 @@ struct rc_map *rc_map_get(const char *name);
#define RC_MAP_GEEKBOX "rc-geekbox"
#define RC_MAP_GENIUS_TVGO_A11MCE "rc-genius-tvgo-a11mce"
#define RC_MAP_GOTVIEW7135 "rc-gotview7135"
+#define RC_MAP_HAUPPAUGE "rc-hauppauge"
#define RC_MAP_HAUPPAUGE_NEW "rc-hauppauge"
#define RC_MAP_HISI_POPLAR "rc-hisi-poplar"
#define RC_MAP_HISI_TV_DEMO "rc-hisi-tv-demo"
@@ -223,8 +225,8 @@ struct rc_map *rc_map_get(const char *name);
#define RC_MAP_MEDION_X10_OR2X "rc-medion-x10-or2x"
#define RC_MAP_MSI_DIGIVOX_II "rc-msi-digivox-ii"
#define RC_MAP_MSI_DIGIVOX_III "rc-msi-digivox-iii"
-#define RC_MAP_MSI_TVANYWHERE_PLUS "rc-msi-tvanywhere-plus"
#define RC_MAP_MSI_TVANYWHERE "rc-msi-tvanywhere"
+#define RC_MAP_MSI_TVANYWHERE_PLUS "rc-msi-tvanywhere-plus"
#define RC_MAP_NEBULA "rc-nebula"
#define RC_MAP_NEC_TERRATEC_CINERGY_XS "rc-nec-terratec-cinergy-xs"
#define RC_MAP_NORWOOD "rc-norwood"
@@ -234,21 +236,21 @@ struct rc_map *rc_map_get(const char *name);
#define RC_MAP_PINNACLE_COLOR "rc-pinnacle-color"
#define RC_MAP_PINNACLE_GREY "rc-pinnacle-grey"
#define RC_MAP_PINNACLE_PCTV_HD "rc-pinnacle-pctv-hd"
-#define RC_MAP_PIXELVIEW_NEW "rc-pixelview-new"
#define RC_MAP_PIXELVIEW "rc-pixelview"
-#define RC_MAP_PIXELVIEW_002T "rc-pixelview-002t"
+#define RC_MAP_PIXELVIEW_002T "rc-pixelview-002t"
#define RC_MAP_PIXELVIEW_MK12 "rc-pixelview-mk12"
+#define RC_MAP_PIXELVIEW_NEW "rc-pixelview-new"
#define RC_MAP_POWERCOLOR_REAL_ANGEL "rc-powercolor-real-angel"
#define RC_MAP_PROTEUS_2309 "rc-proteus-2309"
#define RC_MAP_PURPLETV "rc-purpletv"
#define RC_MAP_PV951 "rc-pv951"
-#define RC_MAP_HAUPPAUGE "rc-hauppauge"
#define RC_MAP_RC5_TV "rc-rc5-tv"
#define RC_MAP_RC6_MCE "rc-rc6-mce"
#define RC_MAP_REAL_AUDIO_220_32_KEYS "rc-real-audio-220-32-keys"
#define RC_MAP_REDDO "rc-reddo"
#define RC_MAP_SNAPSTREAM_FIREFLY "rc-snapstream-firefly"
#define RC_MAP_STREAMZAP "rc-streamzap"
+#define RC_MAP_SU3000 "rc-su3000"
#define RC_MAP_TANGO "rc-tango"
#define RC_MAP_TANIX_TX3MINI "rc-tanix-tx3mini"
#define RC_MAP_TANIX_TX5MAX "rc-tanix-tx5max"
@@ -268,6 +270,7 @@ struct rc_map *rc_map_get(const char *name);
#define RC_MAP_TT_1500 "rc-tt-1500"
#define RC_MAP_TWINHAN_DTV_CAB_CI "rc-twinhan-dtv-cab-ci"
#define RC_MAP_TWINHAN_VP1027_DVBS "rc-twinhan1027"
+#define RC_MAP_VEGA_S9X "rc-vega-s9x"
#define RC_MAP_VIDEOMATE_K100 "rc-videomate-k100"
#define RC_MAP_VIDEOMATE_S350 "rc-videomate-s350"
#define RC_MAP_VIDEOMATE_TV_PVR "rc-videomate-tv-pvr"
@@ -275,9 +278,8 @@ struct rc_map *rc_map_get(const char *name);
#define RC_MAP_WETEK_PLAY2 "rc-wetek-play2"
#define RC_MAP_WINFAST "rc-winfast"
#define RC_MAP_WINFAST_USBII_DELUXE "rc-winfast-usbii-deluxe"
-#define RC_MAP_SU3000 "rc-su3000"
-#define RC_MAP_XBOX_DVD "rc-xbox-dvd"
#define RC_MAP_X96MAX "rc-x96max"
+#define RC_MAP_XBOX_DVD "rc-xbox-dvd"
#define RC_MAP_ZX_IRDEC "rc-zx-irdec"
/*
diff --git a/include/media/v4l2-common.h b/include/media/v4l2-common.h
index c070d8ae11e5..d8c29e089000 100644
--- a/include/media/v4l2-common.h
+++ b/include/media/v4l2-common.h
@@ -457,8 +457,24 @@ int v4l2_s_parm_cap(struct video_device *vdev,
/* Pixel format and FourCC helpers */
/**
+ * enum v4l2_pixel_encoding - specifies the pixel encoding value
+ *
+ * @V4L2_PIXEL_ENC_UNKNOWN: Pixel encoding is unknown/un-initialized
+ * @V4L2_PIXEL_ENC_YUV: Pixel encoding is YUV
+ * @V4L2_PIXEL_ENC_RGB: Pixel encoding is RGB
+ * @V4L2_PIXEL_ENC_BAYER: Pixel encoding is Bayer
+ */
+enum v4l2_pixel_encoding {
+ V4L2_PIXEL_ENC_UNKNOWN = 0,
+ V4L2_PIXEL_ENC_YUV = 1,
+ V4L2_PIXEL_ENC_RGB = 2,
+ V4L2_PIXEL_ENC_BAYER = 3,
+};
+
+/**
* struct v4l2_format_info - information about a V4L2 format
* @format: 4CC format identifier (V4L2_PIX_FMT_*)
+ * @pixel_enc: Pixel encoding (see enum v4l2_pixel_encoding above)
* @mem_planes: Number of memory planes, which includes the alpha plane (1 to 4).
* @comp_planes: Number of component planes, which includes the alpha plane (1 to 4).
* @bpp: Array of per-plane bytes per pixel
@@ -469,6 +485,7 @@ int v4l2_s_parm_cap(struct video_device *vdev,
*/
struct v4l2_format_info {
u32 format;
+ u8 pixel_enc;
u8 mem_planes;
u8 comp_planes;
u8 bpp[4];
@@ -478,8 +495,22 @@ struct v4l2_format_info {
u8 block_h[4];
};
-const struct v4l2_format_info *v4l2_format_info(u32 format);
+static inline bool v4l2_is_format_rgb(const struct v4l2_format_info *f)
+{
+ return f && f->pixel_enc == V4L2_PIXEL_ENC_RGB;
+}
+static inline bool v4l2_is_format_yuv(const struct v4l2_format_info *f)
+{
+ return f && f->pixel_enc == V4L2_PIXEL_ENC_YUV;
+}
+
+static inline bool v4l2_is_format_bayer(const struct v4l2_format_info *f)
+{
+ return f && f->pixel_enc == V4L2_PIXEL_ENC_BAYER;
+}
+
+const struct v4l2_format_info *v4l2_format_info(u32 format);
void v4l2_apply_frmsize_constraints(u32 *width, u32 *height,
const struct v4l2_frmsize_stepwise *frmsize);
int v4l2_fill_pixfmt(struct v4l2_pix_format *pixfmt, u32 pixelformat,
diff --git a/include/media/v4l2-ctrls.h b/include/media/v4l2-ctrls.h
index 570ff4b0205a..7db9e719a583 100644
--- a/include/media/v4l2-ctrls.h
+++ b/include/media/v4l2-ctrls.h
@@ -21,6 +21,7 @@
#include <media/fwht-ctrls.h>
#include <media/h264-ctrls.h>
#include <media/vp8-ctrls.h>
+#include <media/hevc-ctrls.h>
/* forward references */
struct file;
@@ -50,7 +51,12 @@ struct poll_table_struct;
* @p_h264_slice_params: Pointer to a struct v4l2_ctrl_h264_slice_params.
* @p_h264_decode_params: Pointer to a struct v4l2_ctrl_h264_decode_params.
* @p_vp8_frame_header: Pointer to a VP8 frame header structure.
+ * @p_hevc_sps: Pointer to an HEVC sequence parameter set structure.
+ * @p_hevc_pps: Pointer to an HEVC picture parameter set structure.
+ * @p_hevc_slice_params: Pointer to an HEVC slice parameters structure.
+ * @p_area: Pointer to an area.
* @p: Pointer to a compound value.
+ * @p_const: Pointer to a constant compound value.
*/
union v4l2_ctrl_ptr {
s32 *p_s32;
@@ -68,10 +74,27 @@ union v4l2_ctrl_ptr {
struct v4l2_ctrl_h264_slice_params *p_h264_slice_params;
struct v4l2_ctrl_h264_decode_params *p_h264_decode_params;
struct v4l2_ctrl_vp8_frame_header *p_vp8_frame_header;
+ struct v4l2_ctrl_hevc_sps *p_hevc_sps;
+ struct v4l2_ctrl_hevc_pps *p_hevc_pps;
+ struct v4l2_ctrl_hevc_slice_params *p_hevc_slice_params;
+ struct v4l2_area *p_area;
void *p;
+ const void *p_const;
};
/**
+ * v4l2_ctrl_ptr_create() - Helper function to return a v4l2_ctrl_ptr from a
+ * void pointer
+ * @ptr: The void pointer
+ */
+static inline union v4l2_ctrl_ptr v4l2_ctrl_ptr_create(void *ptr)
+{
+ union v4l2_ctrl_ptr p = { .p = ptr };
+
+ return p;
+}
+
+/**
* struct v4l2_ctrl_ops - The control operations that the driver has to provide.
*
* @g_volatile_ctrl: Get a new value for this control. Generally only relevant
@@ -200,6 +223,9 @@ typedef void (*v4l2_ctrl_notify_fnc)(struct v4l2_ctrl *ctrl, void *priv);
* not freed when the control is deleted. Should this be needed
* then a new internal bitfield can be added to tell the framework
* to free this pointer.
+ * @p_def: The control's default value represented via a union which
+ * provides a standard way of accessing control types
+ * through a pointer (for compound controls only).
* @p_cur: The control's current value represented via a union which
* provides a standard way of accessing control types
* through a pointer.
@@ -254,6 +280,7 @@ struct v4l2_ctrl {
s32 val;
} cur;
+ union v4l2_ctrl_ptr p_def;
union v4l2_ctrl_ptr p_new;
union v4l2_ctrl_ptr p_cur;
};
@@ -357,6 +384,7 @@ struct v4l2_ctrl_handler {
* @max: The control's maximum value.
* @step: The control's step value for non-menu controls.
* @def: The control's default value.
+ * @p_def: The control's default value for compound controls.
* @dims: The size of each dimension.
* @elem_size: The size in bytes of the control.
* @flags: The control's flags.
@@ -385,6 +413,7 @@ struct v4l2_ctrl_config {
s64 max;
u64 step;
s64 def;
+ union v4l2_ctrl_ptr p_def;
u32 dims[V4L2_CTRL_MAX_DIMS];
u32 elem_size;
u32 flags;
@@ -647,6 +676,24 @@ struct v4l2_ctrl *v4l2_ctrl_new_std_menu_items(struct v4l2_ctrl_handler *hdl,
const char * const *qmenu);
/**
+ * v4l2_ctrl_new_std_compound() - Allocate and initialize a new standard V4L2
+ * compound control.
+ *
+ * @hdl: The control handler.
+ * @ops: The control ops.
+ * @id: The control ID.
+ * @p_def: The control's default value.
+ *
+ * Sames as v4l2_ctrl_new_std(), but with support to compound controls, thanks
+ * to the @p_def field.
+ *
+ */
+struct v4l2_ctrl *v4l2_ctrl_new_std_compound(struct v4l2_ctrl_handler *hdl,
+ const struct v4l2_ctrl_ops *ops,
+ u32 id,
+ const union v4l2_ctrl_ptr p_def);
+
+/**
* v4l2_ctrl_new_int_menu() - Create a new standard V4L2 integer menu control.
*
* @hdl: The control handler.
@@ -1065,6 +1112,46 @@ static inline int v4l2_ctrl_s_ctrl_string(struct v4l2_ctrl *ctrl, const char *s)
return rval;
}
+/**
+ * __v4l2_ctrl_s_ctrl_area() - Unlocked variant of v4l2_ctrl_s_ctrl_area().
+ *
+ * @ctrl: The control.
+ * @area: The new area.
+ *
+ * This sets the control's new area safely by going through the control
+ * framework. This function assumes the control's handler is already locked,
+ * allowing it to be used from within the &v4l2_ctrl_ops functions.
+ *
+ * This function is for area type controls only.
+ */
+int __v4l2_ctrl_s_ctrl_area(struct v4l2_ctrl *ctrl,
+ const struct v4l2_area *area);
+
+/**
+ * v4l2_ctrl_s_ctrl_area() - Helper function to set a control's area value
+ * from within a driver.
+ *
+ * @ctrl: The control.
+ * @area: The new area.
+ *
+ * This sets the control's new area safely by going through the control
+ * framework. This function will lock the control's handler, so it cannot be
+ * used from within the &v4l2_ctrl_ops functions.
+ *
+ * This function is for area type controls only.
+ */
+static inline int v4l2_ctrl_s_ctrl_area(struct v4l2_ctrl *ctrl,
+ const struct v4l2_area *area)
+{
+ int rval;
+
+ v4l2_ctrl_lock(ctrl);
+ rval = __v4l2_ctrl_s_ctrl_area(ctrl, area);
+ v4l2_ctrl_unlock(ctrl);
+
+ return rval;
+}
+
/* Internal helper functions that deal with control events. */
extern const struct v4l2_subscribed_event_ops v4l2_ctrl_sub_ev_ops;
diff --git a/include/media/v4l2-device.h b/include/media/v4l2-device.h
index e0b8f2602670..5f36e0d2ede6 100644
--- a/include/media/v4l2-device.h
+++ b/include/media/v4l2-device.h
@@ -72,7 +72,7 @@ static inline void v4l2_device_get(struct v4l2_device *v4l2_dev)
}
/**
- * v4l2_device_put - putss a V4L2 device reference
+ * v4l2_device_put - puts a V4L2 device reference
*
* @v4l2_dev: pointer to struct &v4l2_device
*
diff --git a/include/media/v4l2-mem2mem.h b/include/media/v4l2-mem2mem.h
index 0b9c3a287061..1d85e24791e4 100644
--- a/include/media/v4l2-mem2mem.h
+++ b/include/media/v4l2-mem2mem.h
@@ -21,7 +21,8 @@
* callback.
* The job does NOT have to end before this callback returns
* (and it will be the usual case). When the job finishes,
- * v4l2_m2m_job_finish() has to be called.
+ * v4l2_m2m_job_finish() or v4l2_m2m_buf_done_and_job_finish()
+ * has to be called.
* @job_ready: optional. Should return 0 if the driver does not have a job
* fully prepared to run yet (i.e. it will not be able to finish a
* transaction without sleeping). If not provided, it will be
@@ -33,7 +34,8 @@
* stop the device safely; e.g. in the next interrupt handler),
* even if the transaction would not have been finished by then.
* After the driver performs the necessary steps, it has to call
- * v4l2_m2m_job_finish() (as if the transaction ended normally).
+ * v4l2_m2m_job_finish() or v4l2_m2m_buf_done_and_job_finish() as
+ * if the transaction ended normally.
* This function does not have to (and will usually not) wait
* until the device enters a state when it can be stopped.
*/
@@ -73,6 +75,11 @@ struct v4l2_m2m_queue_ctx {
* struct v4l2_m2m_ctx - Memory to memory context structure
*
* @q_lock: struct &mutex lock
+ * @new_frame: valid in the device_run callback: if true, then this
+ * starts a new frame; if false, then this is a new slice
+ * for an existing frame. This is always true unless
+ * V4L2_BUF_CAP_SUPPORTS_M2M_HOLD_CAPTURE_BUF is set, which
+ * indicates slicing support.
* @m2m_dev: opaque pointer to the internal data to handle M2M context
* @cap_q_ctx: Capture (output to memory) queue context
* @out_q_ctx: Output (input from memory) queue context
@@ -89,6 +96,8 @@ struct v4l2_m2m_ctx {
/* optional cap/out vb2 queues lock */
struct mutex *q_lock;
+ bool new_frame;
+
/* internal use only */
struct v4l2_m2m_dev *m2m_dev;
@@ -173,6 +182,33 @@ void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx);
void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev,
struct v4l2_m2m_ctx *m2m_ctx);
+/**
+ * v4l2_m2m_buf_done_and_job_finish() - return source/destination buffers with
+ * state and inform the framework that a job has been finished and have it
+ * clean up
+ *
+ * @m2m_dev: opaque pointer to the internal data to handle M2M context
+ * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
+ * @state: vb2 buffer state passed to v4l2_m2m_buf_done().
+ *
+ * Drivers that set V4L2_BUF_CAP_SUPPORTS_M2M_HOLD_CAPTURE_BUF must use this
+ * function instead of job_finish() to take held buffers into account. It is
+ * optional for other drivers.
+ *
+ * This function removes the source buffer from the ready list and returns
+ * it with the given state. The same is done for the destination buffer, unless
+ * it is marked 'held'. In that case the buffer is kept on the ready list.
+ *
+ * After that the job is finished (see job_finish()).
+ *
+ * This allows for multiple output buffers to be used to fill in a single
+ * capture buffer. This is typically used by stateless decoders where
+ * multiple e.g. H.264 slices contribute to a single decoded frame.
+ */
+void v4l2_m2m_buf_done_and_job_finish(struct v4l2_m2m_dev *m2m_dev,
+ struct v4l2_m2m_ctx *m2m_ctx,
+ enum vb2_buffer_state state);
+
static inline void
v4l2_m2m_buf_done(struct vb2_v4l2_buffer *buf, enum vb2_buffer_state state)
{
@@ -672,6 +708,10 @@ int v4l2_m2m_ioctl_try_encoder_cmd(struct file *file, void *fh,
struct v4l2_encoder_cmd *ec);
int v4l2_m2m_ioctl_try_decoder_cmd(struct file *file, void *fh,
struct v4l2_decoder_cmd *dc);
+int v4l2_m2m_ioctl_stateless_try_decoder_cmd(struct file *file, void *fh,
+ struct v4l2_decoder_cmd *dc);
+int v4l2_m2m_ioctl_stateless_decoder_cmd(struct file *file, void *priv,
+ struct v4l2_decoder_cmd *dc);
int v4l2_m2m_fop_mmap(struct file *file, struct vm_area_struct *vma);
__poll_t v4l2_m2m_fop_poll(struct file *file, poll_table *wait);
diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h
index 640aabe69450..a2b2208b02da 100644
--- a/include/media/videobuf2-core.h
+++ b/include/media/videobuf2-core.h
@@ -505,6 +505,8 @@ struct vb2_buf_ops {
* @buf_ops: callbacks to deliver buffer information.
* between user-space and kernel-space.
* @drv_priv: driver private data.
+ * @subsystem_flags: Flags specific to the subsystem (V4L2/DVB/etc.). Not used
+ * by the vb2 core.
* @buf_struct_size: size of the driver-specific buffer structure;
* "0" indicates the driver doesn't want to use a custom buffer
* structure type. for example, ``sizeof(struct vb2_v4l2_buffer)``
@@ -571,6 +573,7 @@ struct vb2_queue {
const struct vb2_buf_ops *buf_ops;
void *drv_priv;
+ u32 subsystem_flags;
unsigned int buf_struct_size;
u32 timestamp_flags;
gfp_t gfp_flags;
diff --git a/include/media/videobuf2-v4l2.h b/include/media/videobuf2-v4l2.h
index 8a10889dc2fd..59bf33a12648 100644
--- a/include/media/videobuf2-v4l2.h
+++ b/include/media/videobuf2-v4l2.h
@@ -33,6 +33,7 @@
* @timecode: frame timecode.
* @sequence: sequence count of this frame.
* @request_fd: the request_fd associated with this buffer
+ * @is_held: if true, then this capture buffer was held
* @planes: plane information (userptr/fd, length, bytesused, data_offset).
*
* Should contain enough information to be able to cover all the fields
@@ -46,9 +47,13 @@ struct vb2_v4l2_buffer {
struct v4l2_timecode timecode;
__u32 sequence;
__s32 request_fd;
+ bool is_held;
struct vb2_plane planes[VB2_MAX_PLANES];
};
+/* VB2 V4L2 flags as set in vb2_queue.subsystem_flags */
+#define VB2_V4L2_FL_SUPPORTS_M2M_HOLD_CAPTURE_BUF (1 << 0)
+
/*
* to_vb2_v4l2_buffer() - cast struct vb2_buffer * to struct vb2_v4l2_buffer *
*/
diff --git a/include/net/ip.h b/include/net/ip.h
index cebf3e10def1..02d68e346f67 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -339,7 +339,7 @@ static inline u64 snmp_fold_field64(void __percpu *mib, int offt, size_t syncp_o
void inet_get_local_port_range(struct net *net, int *low, int *high);
#ifdef CONFIG_SYSCTL
-static inline bool inet_is_local_reserved_port(struct net *net, int port)
+static inline bool inet_is_local_reserved_port(struct net *net, unsigned short port)
{
if (!net->ipv4.sysctl_local_reserved_ports)
return false;
@@ -351,20 +351,20 @@ static inline bool sysctl_dev_name_is_allowed(const char *name)
return strcmp(name, "default") != 0 && strcmp(name, "all") != 0;
}
-static inline int inet_prot_sock(struct net *net)
+static inline bool inet_port_requires_bind_service(struct net *net, unsigned short port)
{
- return net->ipv4.sysctl_ip_prot_sock;
+ return port < net->ipv4.sysctl_ip_prot_sock;
}
#else
-static inline bool inet_is_local_reserved_port(struct net *net, int port)
+static inline bool inet_is_local_reserved_port(struct net *net, unsigned short port)
{
return false;
}
-static inline int inet_prot_sock(struct net *net)
+static inline bool inet_port_requires_bind_service(struct net *net, unsigned short port)
{
- return PROT_SOCK;
+ return port < PROT_SOCK;
}
#endif
diff --git a/include/net/tls.h b/include/net/tls.h
index 6ed91e82edd0..df630f5fc723 100644
--- a/include/net/tls.h
+++ b/include/net/tls.h
@@ -100,7 +100,6 @@ struct tls_rec {
struct list_head list;
int tx_ready;
int tx_flags;
- int inplace_crypto;
struct sk_msg msg_plaintext;
struct sk_msg msg_encrypted;
@@ -377,7 +376,7 @@ int tls_push_sg(struct sock *sk, struct tls_context *ctx,
int flags);
int tls_push_partial_record(struct sock *sk, struct tls_context *ctx,
int flags);
-bool tls_free_partial_record(struct sock *sk, struct tls_context *ctx);
+void tls_free_partial_record(struct sock *sk, struct tls_context *ctx);
static inline struct tls_msg *tls_msg(struct sk_buff *skb)
{
diff --git a/include/rdma/ib_cm.h b/include/rdma/ib_cm.h
index 49f4f75499b3..b01a8a8d4de9 100644
--- a/include/rdma/ib_cm.h
+++ b/include/rdma/ib_cm.h
@@ -1,38 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/*
* Copyright (c) 2004, 2005 Intel Corporation. All rights reserved.
* Copyright (c) 2004 Topspin Corporation. All rights reserved.
* Copyright (c) 2004 Voltaire Corporation. All rights reserved.
* Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019, Mellanox Technologies inc. All rights reserved.
*/
-#if !defined(IB_CM_H)
+#ifndef IB_CM_H
#define IB_CM_H
#include <rdma/ib_mad.h>
diff --git a/include/rdma/ib_mad.h b/include/rdma/ib_mad.h
index eea946fcc819..4e62650e2127 100644
--- a/include/rdma/ib_mad.h
+++ b/include/rdma/ib_mad.h
@@ -815,46 +815,6 @@ int ib_modify_mad(struct ib_mad_agent *mad_agent,
struct ib_mad_send_buf *send_buf, u32 timeout_ms);
/**
- * ib_redirect_mad_qp - Registers a QP for MAD services.
- * @qp: Reference to a QP that requires MAD services.
- * @rmpp_version: If set, indicates that the client will send
- * and receive MADs that contain the RMPP header for the given version.
- * If set to 0, indicates that RMPP is not used by this client.
- * @send_handler: The completion callback routine invoked after a send
- * request has completed.
- * @recv_handler: The completion callback routine invoked for a received
- * MAD.
- * @context: User specified context associated with the registration.
- *
- * Use of this call allows clients to use MAD services, such as RMPP,
- * on user-owned QPs. After calling this routine, users may send
- * MADs on the specified QP by calling ib_mad_post_send.
- */
-struct ib_mad_agent *ib_redirect_mad_qp(struct ib_qp *qp,
- u8 rmpp_version,
- ib_mad_send_handler send_handler,
- ib_mad_recv_handler recv_handler,
- void *context);
-
-/**
- * ib_process_mad_wc - Processes a work completion associated with a
- * MAD sent or received on a redirected QP.
- * @mad_agent: Specifies the registered MAD service using the redirected QP.
- * @wc: References a work completion associated with a sent or received
- * MAD segment.
- *
- * This routine is used to complete or continue processing on a MAD request.
- * If the work completion is associated with a send operation, calling
- * this routine is required to continue an RMPP transfer or to wait for a
- * corresponding response, if it is a request. If the work completion is
- * associated with a receive operation, calling this routine is required to
- * process an inbound or outbound RMPP transfer, or to match a response MAD
- * with its corresponding request.
- */
-int ib_process_mad_wc(struct ib_mad_agent *mad_agent,
- struct ib_wc *wc);
-
-/**
* ib_create_send_mad - Allocate and initialize a data buffer and work request
* for sending a MAD.
* @mad_agent: Specifies the registered MAD service to associate with the MAD.
diff --git a/include/rdma/ib_umem.h b/include/rdma/ib_umem.h
index a91b2af64ec4..753f54e17e0a 100644
--- a/include/rdma/ib_umem.h
+++ b/include/rdma/ib_umem.h
@@ -70,7 +70,7 @@ static inline size_t ib_umem_num_pages(struct ib_umem *umem)
#ifdef CONFIG_INFINIBAND_USER_MEM
struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr,
- size_t size, int access, int dmasync);
+ size_t size, int access);
void ib_umem_release(struct ib_umem *umem);
int ib_umem_page_count(struct ib_umem *umem);
int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
@@ -85,7 +85,7 @@ unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
static inline struct ib_umem *ib_umem_get(struct ib_udata *udata,
unsigned long addr, size_t size,
- int access, int dmasync)
+ int access)
{
return ERR_PTR(-EINVAL);
}
diff --git a/include/rdma/ib_umem_odp.h b/include/rdma/ib_umem_odp.h
index 253df1a1fa54..81429acc8257 100644
--- a/include/rdma/ib_umem_odp.h
+++ b/include/rdma/ib_umem_odp.h
@@ -35,11 +35,11 @@
#include <rdma/ib_umem.h>
#include <rdma/ib_verbs.h>
-#include <linux/interval_tree.h>
struct ib_umem_odp {
struct ib_umem umem;
- struct ib_ucontext_per_mm *per_mm;
+ struct mmu_interval_notifier notifier;
+ struct pid *tgid;
/*
* An array of the pages included in the on-demand paging umem.
@@ -62,13 +62,8 @@ struct ib_umem_odp {
struct mutex umem_mutex;
void *private; /* for the HW driver to use. */
- int notifiers_seq;
- int notifiers_count;
int npages;
- /* Tree tracking */
- struct interval_tree_node interval_tree;
-
/*
* An implicit odp umem cannot be DMA mapped, has 0 length, and serves
* only as an anchor for the driver to hold onto the per_mm. FIXME:
@@ -77,10 +72,7 @@ struct ib_umem_odp {
*/
bool is_implicit_odp;
- struct completion notifier_completion;
- int dying;
unsigned int page_shift;
- struct work_struct work;
};
static inline struct ib_umem_odp *to_ib_umem_odp(struct ib_umem *umem)
@@ -91,13 +83,13 @@ static inline struct ib_umem_odp *to_ib_umem_odp(struct ib_umem *umem)
/* Returns the first page of an ODP umem. */
static inline unsigned long ib_umem_start(struct ib_umem_odp *umem_odp)
{
- return umem_odp->interval_tree.start;
+ return umem_odp->notifier.interval_tree.start;
}
/* Returns the address of the page after the last one of an ODP umem. */
static inline unsigned long ib_umem_end(struct ib_umem_odp *umem_odp)
{
- return umem_odp->interval_tree.last + 1;
+ return umem_odp->notifier.interval_tree.last + 1;
}
static inline size_t ib_umem_odp_num_pages(struct ib_umem_odp *umem_odp)
@@ -121,21 +113,15 @@ static inline size_t ib_umem_odp_num_pages(struct ib_umem_odp *umem_odp)
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
-struct ib_ucontext_per_mm {
- struct mmu_notifier mn;
- struct pid *tgid;
-
- struct rb_root_cached umem_tree;
- /* Protects umem_tree */
- struct rw_semaphore umem_rwsem;
-};
-
-struct ib_umem_odp *ib_umem_odp_get(struct ib_udata *udata, unsigned long addr,
- size_t size, int access);
+struct ib_umem_odp *
+ib_umem_odp_get(struct ib_udata *udata, unsigned long addr, size_t size,
+ int access, const struct mmu_interval_notifier_ops *ops);
struct ib_umem_odp *ib_umem_odp_alloc_implicit(struct ib_udata *udata,
int access);
-struct ib_umem_odp *ib_umem_odp_alloc_child(struct ib_umem_odp *root_umem,
- unsigned long addr, size_t size);
+struct ib_umem_odp *
+ib_umem_odp_alloc_child(struct ib_umem_odp *root_umem, unsigned long addr,
+ size_t size,
+ const struct mmu_interval_notifier_ops *ops);
void ib_umem_odp_release(struct ib_umem_odp *umem_odp);
int ib_umem_odp_map_dma_pages(struct ib_umem_odp *umem_odp, u64 start_offset,
@@ -145,55 +131,11 @@ int ib_umem_odp_map_dma_pages(struct ib_umem_odp *umem_odp, u64 start_offset,
void ib_umem_odp_unmap_dma_pages(struct ib_umem_odp *umem_odp, u64 start_offset,
u64 bound);
-typedef int (*umem_call_back)(struct ib_umem_odp *item, u64 start, u64 end,
- void *cookie);
-/*
- * Call the callback on each ib_umem in the range. Returns the logical or of
- * the return values of the functions called.
- */
-int rbt_ib_umem_for_each_in_range(struct rb_root_cached *root,
- u64 start, u64 end,
- umem_call_back cb,
- bool blockable, void *cookie);
-
-/*
- * Find first region intersecting with address range.
- * Return NULL if not found
- */
-static inline struct ib_umem_odp *
-rbt_ib_umem_lookup(struct rb_root_cached *root, u64 addr, u64 length)
-{
- struct interval_tree_node *node;
-
- node = interval_tree_iter_first(root, addr, addr + length - 1);
- if (!node)
- return NULL;
- return container_of(node, struct ib_umem_odp, interval_tree);
-
-}
-
-static inline int ib_umem_mmu_notifier_retry(struct ib_umem_odp *umem_odp,
- unsigned long mmu_seq)
-{
- /*
- * This code is strongly based on the KVM code from
- * mmu_notifier_retry. Should be called with
- * the relevant locks taken (umem_odp->umem_mutex
- * and the ucontext umem_mutex semaphore locked for read).
- */
-
- if (unlikely(umem_odp->notifiers_count))
- return 1;
- if (umem_odp->notifiers_seq != mmu_seq)
- return 1;
- return 0;
-}
-
#else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
-static inline struct ib_umem_odp *ib_umem_odp_get(struct ib_udata *udata,
- unsigned long addr,
- size_t size, int access)
+static inline struct ib_umem_odp *
+ib_umem_odp_get(struct ib_udata *udata, unsigned long addr, size_t size,
+ int access, const struct mmu_interval_notifier_ops *ops)
{
return ERR_PTR(-EINVAL);
}
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index e7e733add99f..cacb48faf670 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -445,6 +445,8 @@ struct ib_device_attr {
struct ib_tm_caps tm_caps;
struct ib_cq_caps cq_caps;
u64 max_dm_size;
+ /* Max entries for sgl for optimized performance per READ */
+ u32 max_sgl_rd;
};
enum ib_mtu {
@@ -1471,6 +1473,7 @@ struct ib_ucontext {
* Implementation details of the RDMA core, don't use in drivers:
*/
struct rdma_restrack_entry res;
+ struct xarray mmap_xa;
};
struct ib_uobject {
@@ -2120,7 +2123,7 @@ struct ib_flow_action {
atomic_t usecnt;
};
-struct ib_mad_hdr;
+struct ib_mad;
struct ib_grh;
enum ib_process_mad_flags {
@@ -2218,6 +2221,11 @@ struct rdma_netdev_alloc_params {
struct net_device *netdev, void *param);
};
+struct ib_odp_counters {
+ atomic64_t faults;
+ atomic64_t invalidations;
+};
+
struct ib_counters {
struct ib_device *device;
struct ib_uobject *uobject;
@@ -2251,6 +2259,21 @@ struct iw_cm_conn_param;
#define DECLARE_RDMA_OBJ_SIZE(ib_struct) size_t size_##ib_struct
+struct rdma_user_mmap_entry {
+ struct kref ref;
+ struct ib_ucontext *ucontext;
+ unsigned long start_pgoff;
+ size_t npages;
+ bool driver_removed;
+};
+
+/* Return the offset (in bytes) the user should pass to libc's mmap() */
+static inline u64
+rdma_user_mmap_get_offset(const struct rdma_user_mmap_entry *entry)
+{
+ return (u64)entry->start_pgoff << PAGE_SHIFT;
+}
+
/**
* struct ib_device_ops - InfiniBand device operations
* This structure defines all the InfiniBand device operations, providers will
@@ -2278,9 +2301,8 @@ struct ib_device_ops {
int (*process_mad)(struct ib_device *device, int process_mad_flags,
u8 port_num, const struct ib_wc *in_wc,
const struct ib_grh *in_grh,
- const struct ib_mad_hdr *in_mad, size_t in_mad_size,
- struct ib_mad_hdr *out_mad, size_t *out_mad_size,
- u16 *out_mad_pkey_index);
+ const struct ib_mad *in_mad, struct ib_mad *out_mad,
+ size_t *out_mad_size, u16 *out_mad_pkey_index);
int (*query_device)(struct ib_device *device,
struct ib_device_attr *device_attr,
struct ib_udata *udata);
@@ -2363,6 +2385,13 @@ struct ib_device_ops {
struct ib_udata *udata);
void (*dealloc_ucontext)(struct ib_ucontext *context);
int (*mmap)(struct ib_ucontext *context, struct vm_area_struct *vma);
+ /**
+ * This will be called once refcount of an entry in mmap_xa reaches
+ * zero. The type of the memory that was mapped may differ between
+ * entries and is opaque to the rdma_user_mmap interface.
+ * Therefore needs to be implemented by the driver in mmap_free.
+ */
+ void (*mmap_free)(struct rdma_user_mmap_entry *entry);
void (*disassociate_ucontext)(struct ib_ucontext *ibcontext);
int (*alloc_pd)(struct ib_pd *pd, struct ib_udata *udata);
void (*dealloc_pd)(struct ib_pd *pd, struct ib_udata *udata);
@@ -2422,8 +2451,6 @@ struct ib_device_ops {
u64 iova);
int (*unmap_fmr)(struct list_head *fmr_list);
int (*dealloc_fmr)(struct ib_fmr *fmr);
- void (*invalidate_range)(struct ib_umem_odp *umem_odp,
- unsigned long start, unsigned long end);
int (*attach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid);
int (*detach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid);
struct ib_xrcd *(*alloc_xrcd)(struct ib_device *device,
@@ -2448,6 +2475,9 @@ struct ib_device_ops {
struct ifla_vf_info *ivf);
int (*get_vf_stats)(struct ib_device *device, int vf, u8 port,
struct ifla_vf_stats *stats);
+ int (*get_vf_guid)(struct ib_device *device, int vf, u8 port,
+ struct ifla_vf_guid *node_guid,
+ struct ifla_vf_guid *port_guid);
int (*set_vf_guid)(struct ib_device *device, int vf, u8 port, u64 guid,
int type);
struct ib_wq *(*create_wq)(struct ib_pd *pd,
@@ -2563,6 +2593,13 @@ struct ib_device_ops {
*/
int (*counter_update_stats)(struct rdma_counter *counter);
+ /**
+ * Allows rdma drivers to add their own restrack attributes
+ * dumped via 'rdma stat' iproute2 command.
+ */
+ int (*fill_stat_entry)(struct sk_buff *msg,
+ struct rdma_restrack_entry *entry);
+
DECLARE_RDMA_OBJ_SIZE(ib_ah);
DECLARE_RDMA_OBJ_SIZE(ib_cq);
DECLARE_RDMA_OBJ_SIZE(ib_pd);
@@ -2789,18 +2826,21 @@ void ib_set_client_data(struct ib_device *device, struct ib_client *client,
void ib_set_device_ops(struct ib_device *device,
const struct ib_device_ops *ops);
-#if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS)
int rdma_user_mmap_io(struct ib_ucontext *ucontext, struct vm_area_struct *vma,
- unsigned long pfn, unsigned long size, pgprot_t prot);
-#else
-static inline int rdma_user_mmap_io(struct ib_ucontext *ucontext,
- struct vm_area_struct *vma,
- unsigned long pfn, unsigned long size,
- pgprot_t prot)
-{
- return -EINVAL;
-}
-#endif
+ unsigned long pfn, unsigned long size, pgprot_t prot,
+ struct rdma_user_mmap_entry *entry);
+int rdma_user_mmap_entry_insert(struct ib_ucontext *ucontext,
+ struct rdma_user_mmap_entry *entry,
+ size_t length);
+struct rdma_user_mmap_entry *
+rdma_user_mmap_entry_get_pgoff(struct ib_ucontext *ucontext,
+ unsigned long pgoff);
+struct rdma_user_mmap_entry *
+rdma_user_mmap_entry_get(struct ib_ucontext *ucontext,
+ struct vm_area_struct *vma);
+void rdma_user_mmap_entry_put(struct rdma_user_mmap_entry *entry);
+
+void rdma_user_mmap_entry_remove(struct rdma_user_mmap_entry *entry);
static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len)
{
@@ -3303,6 +3343,9 @@ int ib_get_vf_config(struct ib_device *device, int vf, u8 port,
struct ifla_vf_info *info);
int ib_get_vf_stats(struct ib_device *device, int vf, u8 port,
struct ifla_vf_stats *stats);
+int ib_get_vf_guid(struct ib_device *device, int vf, u8 port,
+ struct ifla_vf_guid *node_guid,
+ struct ifla_vf_guid *port_guid);
int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid,
int type);
@@ -4043,9 +4086,7 @@ static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
*/
static inline unsigned int ib_dma_max_seg_size(struct ib_device *dev)
{
- struct device_dma_parameters *p = dev->dma_device->dma_parms;
-
- return p ? p->max_segment_size : UINT_MAX;
+ return dma_get_max_seg_size(dev->dma_device);
}
/**
diff --git a/include/rdma/restrack.h b/include/rdma/restrack.h
index 83df1ec6664e..7682d1bcf789 100644
--- a/include/rdma/restrack.h
+++ b/include/rdma/restrack.h
@@ -156,6 +156,11 @@ int rdma_nl_put_driver_u32_hex(struct sk_buff *msg, const char *name,
int rdma_nl_put_driver_u64(struct sk_buff *msg, const char *name, u64 value);
int rdma_nl_put_driver_u64_hex(struct sk_buff *msg, const char *name,
u64 value);
+int rdma_nl_put_driver_string(struct sk_buff *msg, const char *name,
+ const char *str);
+int rdma_nl_stat_hwcounter_entry(struct sk_buff *msg, const char *name,
+ u64 value);
+
struct rdma_restrack_entry *rdma_restrack_get_byid(struct ib_device *dev,
enum rdma_restrack_type type,
u32 id);
diff --git a/include/scsi/iscsi_proto.h b/include/scsi/iscsi_proto.h
index b71b5c4f418c..533f56733ba8 100644
--- a/include/scsi/iscsi_proto.h
+++ b/include/scsi/iscsi_proto.h
@@ -627,6 +627,7 @@ struct iscsi_reject {
#define ISCSI_REASON_BOOKMARK_INVALID 9
#define ISCSI_REASON_BOOKMARK_NO_RESOURCES 10
#define ISCSI_REASON_NEGOTIATION_RESET 11
+#define ISCSI_REASON_WAITING_FOR_LOGOUT 12
/* Max. number of Key=Value pairs in a text message */
#define MAX_KEY_VALUE_PAIRS 8192
diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h
index 91bd749a02f7..a2849bb9cd19 100644
--- a/include/scsi/scsi_cmnd.h
+++ b/include/scsi/scsi_cmnd.h
@@ -63,6 +63,7 @@ struct scsi_pointer {
/* for scmd->state */
#define SCMD_STATE_COMPLETE 0
+#define SCMD_STATE_INFLIGHT 1
struct scsi_cmnd {
struct scsi_request req;
@@ -190,12 +191,12 @@ static inline unsigned scsi_bufflen(struct scsi_cmnd *cmd)
return cmd->sdb.length;
}
-static inline void scsi_set_resid(struct scsi_cmnd *cmd, int resid)
+static inline void scsi_set_resid(struct scsi_cmnd *cmd, unsigned int resid)
{
cmd->req.resid_len = resid;
}
-static inline int scsi_get_resid(struct scsi_cmnd *cmd)
+static inline unsigned int scsi_get_resid(struct scsi_cmnd *cmd)
{
return cmd->req.resid_len;
}
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index 202f4d6a4342..3ed836db5306 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -140,8 +140,10 @@ struct scsi_device {
const char * rev; /* ... "nullnullnullnull" before scan */
#define SCSI_VPD_PG_LEN 255
+ struct scsi_vpd __rcu *vpd_pg0;
struct scsi_vpd __rcu *vpd_pg83;
struct scsi_vpd __rcu *vpd_pg80;
+ struct scsi_vpd __rcu *vpd_pg89;
unsigned char current_tag; /* current tag */
struct scsi_target *sdev_target; /* used only for single_lun */
@@ -199,7 +201,8 @@ struct scsi_device {
unsigned broken_fua:1; /* Don't set FUA bit */
unsigned lun_in_cdb:1; /* Store LUN bits in CDB[1] */
unsigned unmap_limit_for_ws:1; /* Use the UNMAP limit for WRITE SAME */
-
+ unsigned rpm_autosuspend:1; /* Enable runtime autosuspend at device
+ * creation time */
atomic_t disk_events_disable_depth; /* disable depth for disk events */
DECLARE_BITMAP(supported_events, SDEV_EVT_MAXBITS); /* supported events */
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index 31e0d6ca1eba..f577647bf5f2 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -23,19 +23,6 @@ struct scsi_host_cmd_pool;
struct scsi_transport_template;
-/*
- * The various choices mean:
- * NONE: Self evident. Host adapter is not capable of scatter-gather.
- * ALL: Means that the host adapter module can do scatter-gather,
- * and that there is no limit to the size of the table to which
- * we scatter/gather data. The value we set here is the maximum
- * single element sglist. To use chained sglists, the adapter
- * has to set a value beyond ALL (and correctly use the chain
- * handling API.
- * Anything else: Indicates the maximum number of chains that can be
- * used in one scatter-gather request.
- */
-#define SG_NONE 0
#define SG_ALL SG_CHUNK_SIZE
#define MODE_UNKNOWN 0x00
@@ -345,7 +332,7 @@ struct scsi_host_template {
/*
* This determines if we will use a non-interrupt driven
* or an interrupt driven scheme. It is set to the maximum number
- * of simultaneous commands a given host adapter will accept.
+ * of simultaneous commands a single hw queue in HBA will accept.
*/
int can_queue;
@@ -486,6 +473,9 @@ struct scsi_host_template {
*/
unsigned int cmd_size;
struct scsi_host_cmd_pool *cmd_pool;
+
+ /* Delay for runtime autosuspend */
+ int rpm_autosuspend_delay;
};
/*
@@ -551,7 +541,6 @@ struct Scsi_Host {
/* Area to keep a shared tag map */
struct blk_mq_tag_set tag_set;
- atomic_t host_busy; /* commands actually active on low-level */
atomic_t host_blocked;
unsigned int host_failed; /* commands that failed.
diff --git a/include/soc/mscc/ocelot.h b/include/soc/mscc/ocelot.h
index e1108a5f4f17..64cbbbe74a36 100644
--- a/include/soc/mscc/ocelot.h
+++ b/include/soc/mscc/ocelot.h
@@ -406,13 +406,6 @@ struct ocelot_ops {
int (*reset)(struct ocelot *ocelot);
};
-struct ocelot_skb {
- struct list_head head;
- struct sk_buff *skb;
- u8 id;
-};
-
-
struct ocelot_port {
struct ocelot *ocelot;
@@ -425,7 +418,7 @@ struct ocelot_port {
u16 vid;
u8 ptp_cmd;
- struct list_head skbs;
+ struct sk_buff_head tx_skbs;
u8 ts_id;
};
diff --git a/include/sound/core.h b/include/sound/core.h
index ee238f100f73..af3dce956c17 100644
--- a/include/sound/core.h
+++ b/include/sound/core.h
@@ -117,6 +117,7 @@ struct snd_card {
struct device card_dev; /* cardX object for sysfs */
const struct attribute_group *dev_groups[4]; /* assigned sysfs attr */
bool registered; /* card_dev is registered? */
+ int sync_irq; /* assigned irq, used for PCM sync */
wait_queue_head_t remove_sleep;
#ifdef CONFIG_PM
diff --git a/include/sound/dmaengine_pcm.h b/include/sound/dmaengine_pcm.h
index c679f6116580..b65220685920 100644
--- a/include/sound/dmaengine_pcm.h
+++ b/include/sound/dmaengine_pcm.h
@@ -83,6 +83,11 @@ void snd_dmaengine_pcm_set_config_from_dai_data(
const struct snd_dmaengine_dai_dma_data *dma_data,
struct dma_slave_config *config);
+int snd_dmaengine_pcm_refine_runtime_hwparams(
+ struct snd_pcm_substream *substream,
+ struct snd_dmaengine_dai_dma_data *dma_data,
+ struct snd_pcm_hardware *hw,
+ struct dma_chan *chan);
/*
* Try to request the DMA channel using compat_request_channel or
diff --git a/include/sound/hda_codec.h b/include/sound/hda_codec.h
index 9a0393cf024c..ac18f428eda6 100644
--- a/include/sound/hda_codec.h
+++ b/include/sound/hda_codec.h
@@ -254,6 +254,7 @@ struct hda_codec {
unsigned int force_pin_prefix:1; /* Add location prefix */
unsigned int link_down_at_suspend:1; /* link down at runtime suspend */
unsigned int relaxed_resume:1; /* don't resume forcibly for jack */
+ unsigned int mst_no_extra_pcms:1; /* no backup PCMs for DP-MST */
#ifdef CONFIG_PM
unsigned long power_on_acct;
diff --git a/include/sound/intel-dsp-config.h b/include/sound/intel-dsp-config.h
new file mode 100644
index 000000000000..c36622bee3f8
--- /dev/null
+++ b/include/sound/intel-dsp-config.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * intel-dsp-config.h - Intel DSP config
+ *
+ * Copyright (c) 2019 Jaroslav Kysela <perex@perex.cz>
+ */
+
+#ifndef __INTEL_DSP_CONFIG_H__
+#define __INTEL_DSP_CONFIG_H__
+
+struct pci_dev;
+
+enum {
+ SND_INTEL_DSP_DRIVER_ANY = 0,
+ SND_INTEL_DSP_DRIVER_LEGACY,
+ SND_INTEL_DSP_DRIVER_SST,
+ SND_INTEL_DSP_DRIVER_SOF,
+ SND_INTEL_DSP_DRIVER_LAST = SND_INTEL_DSP_DRIVER_SOF
+};
+
+#if IS_ENABLED(CONFIG_SND_INTEL_DSP_CONFIG)
+
+int snd_intel_dsp_driver_probe(struct pci_dev *pci);
+
+#else
+
+static inline int snd_intel_dsp_driver_probe(struct pci_dev *pci)
+{
+ return SND_INTEL_DSP_DRIVER_ANY;
+}
+
+#endif
+
+#endif
diff --git a/include/sound/memalloc.h b/include/sound/memalloc.h
index 240622d89c0b..3b47832b1c1f 100644
--- a/include/sound/memalloc.h
+++ b/include/sound/memalloc.h
@@ -21,7 +21,6 @@ struct snd_dma_device {
struct device *dev; /* generic device */
};
-#define snd_dma_pci_data(pci) (&(pci)->dev)
#define snd_dma_continuous_data(x) ((struct device *)(__force unsigned long)(x))
@@ -44,6 +43,7 @@ struct snd_dma_device {
#else
#define SNDRV_DMA_TYPE_DEV_IRAM SNDRV_DMA_TYPE_DEV
#endif
+#define SNDRV_DMA_TYPE_VMALLOC 7 /* vmalloc'ed buffer */
/*
* info for buffer allocation
diff --git a/include/sound/pcm.h b/include/sound/pcm.h
index bbe6eb1ff5d2..8a89fa6fdd5e 100644
--- a/include/sound/pcm.h
+++ b/include/sound/pcm.h
@@ -59,6 +59,7 @@ struct snd_pcm_ops {
int (*hw_free)(struct snd_pcm_substream *substream);
int (*prepare)(struct snd_pcm_substream *substream);
int (*trigger)(struct snd_pcm_substream *substream, int cmd);
+ int (*sync_stop)(struct snd_pcm_substream *substream);
snd_pcm_uframes_t (*pointer)(struct snd_pcm_substream *substream);
int (*get_time_info)(struct snd_pcm_substream *substream,
struct timespec *system_ts, struct timespec *audio_ts,
@@ -395,6 +396,7 @@ struct snd_pcm_runtime {
wait_queue_head_t sleep; /* poll sleep */
wait_queue_head_t tsleep; /* transfer sleep */
struct fasync_struct *fasync;
+ bool stop_operating; /* sync_stop will be called */
/* -- private section -- */
void *private_data;
@@ -414,6 +416,7 @@ struct snd_pcm_runtime {
size_t dma_bytes; /* size of DMA area */
struct snd_dma_buffer *dma_buffer_p; /* allocated buffer */
+ unsigned int buffer_changed:1; /* buffer allocation changed; set only in managed mode */
/* -- audio timestamp config -- */
struct snd_pcm_audio_tstamp_config audio_tstamp_config;
@@ -475,6 +478,7 @@ struct snd_pcm_substream {
#endif /* CONFIG_SND_VERBOSE_PROCFS */
/* misc flags */
unsigned int hw_opened: 1;
+ unsigned int managed_buffer_alloc:1;
};
#define SUBSTREAM_BUSY(substream) ((substream)->ref_count > 0)
@@ -1186,6 +1190,12 @@ void snd_pcm_lib_preallocate_pages_for_all(struct snd_pcm *pcm,
int snd_pcm_lib_malloc_pages(struct snd_pcm_substream *substream, size_t size);
int snd_pcm_lib_free_pages(struct snd_pcm_substream *substream);
+void snd_pcm_set_managed_buffer(struct snd_pcm_substream *substream, int type,
+ struct device *data, size_t size, size_t max);
+void snd_pcm_set_managed_buffer_all(struct snd_pcm *pcm, int type,
+ struct device *data,
+ size_t size, size_t max);
+
int _snd_pcm_lib_alloc_vmalloc_buffer(struct snd_pcm_substream *substream,
size_t size, gfp_t gfp_flags);
int snd_pcm_lib_free_vmalloc_buffer(struct snd_pcm_substream *substream);
@@ -1236,14 +1246,6 @@ static inline int snd_pcm_lib_alloc_vmalloc_32_buffer
*/
#define snd_pcm_substream_sgbuf(substream) \
snd_pcm_get_dma_buf(substream)->private_data
-
-struct page *snd_pcm_sgbuf_ops_page(struct snd_pcm_substream *substream,
- unsigned long offset);
-#else /* !SND_DMA_SGBUF */
-/*
- * fake using a continuous buffer
- */
-#define snd_pcm_sgbuf_ops_page NULL
#endif /* SND_DMA_SGBUF */
/**
@@ -1336,8 +1338,6 @@ static inline void snd_pcm_limit_isa_dma_size(int dma, size_t *max)
(IEC958_AES1_CON_PCM_CODER<<8)|\
(IEC958_AES3_CON_FS_48000<<24))
-#define PCM_RUNTIME_CHECK(sub) snd_BUG_ON(!(sub) || !(sub)->runtime)
-
const char *snd_pcm_format_name(snd_pcm_format_t format);
/**
diff --git a/include/sound/pxa2xx-lib.h b/include/sound/pxa2xx-lib.h
index 6758fc12fa84..0feaf16e6ac0 100644
--- a/include/sound/pxa2xx-lib.h
+++ b/include/sound/pxa2xx-lib.h
@@ -10,6 +10,7 @@ struct snd_pcm_substream;
struct snd_pcm_hw_params;
struct snd_soc_pcm_runtime;
struct snd_pcm;
+struct snd_soc_component;
extern int pxa2xx_pcm_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params);
@@ -23,8 +24,29 @@ extern int pxa2xx_pcm_mmap(struct snd_pcm_substream *substream,
struct vm_area_struct *vma);
extern int pxa2xx_pcm_preallocate_dma_buffer(struct snd_pcm *pcm, int stream);
extern void pxa2xx_pcm_free_dma_buffers(struct snd_pcm *pcm);
-extern int pxa2xx_soc_pcm_new(struct snd_soc_pcm_runtime *rtd);
-extern const struct snd_pcm_ops pxa2xx_pcm_ops;
+extern void pxa2xx_soc_pcm_free(struct snd_soc_component *component,
+ struct snd_pcm *pcm);
+extern int pxa2xx_soc_pcm_new(struct snd_soc_component *component,
+ struct snd_soc_pcm_runtime *rtd);
+extern int pxa2xx_soc_pcm_open(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream);
+extern int pxa2xx_soc_pcm_close(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream);
+extern int pxa2xx_soc_pcm_hw_params(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params);
+extern int pxa2xx_soc_pcm_hw_free(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream);
+extern int pxa2xx_soc_pcm_prepare(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream);
+extern int pxa2xx_soc_pcm_trigger(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream, int cmd);
+extern snd_pcm_uframes_t
+pxa2xx_soc_pcm_pointer(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream);
+extern int pxa2xx_soc_pcm_mmap(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
+ struct vm_area_struct *vma);
/* AC97 */
diff --git a/include/sound/rt5682.h b/include/sound/rt5682.h
index bf2ee75aabb1..bc2c31734df1 100644
--- a/include/sound/rt5682.h
+++ b/include/sound/rt5682.h
@@ -31,6 +31,7 @@ struct rt5682_platform_data {
enum rt5682_dmic1_data_pin dmic1_data_pin;
enum rt5682_dmic1_clk_pin dmic1_clk_pin;
enum rt5682_jd_src jd_src;
+ unsigned int btndet_delay;
};
#endif
diff --git a/include/sound/simple_card_utils.h b/include/sound/simple_card_utils.h
index 31f76b6abf71..bbdd1542d6f1 100644
--- a/include/sound/simple_card_utils.h
+++ b/include/sound/simple_card_utils.h
@@ -8,6 +8,7 @@
#ifndef __SIMPLE_CARD_UTILS_H
#define __SIMPLE_CARD_UTILS_H
+#include <linux/clk.h>
#include <sound/soc.h>
#define asoc_simple_init_hp(card, sjack, prefix) \
diff --git a/include/sound/soc-acpi-intel-match.h b/include/sound/soc-acpi-intel-match.h
index 6c9929abd90b..20c0bee3b959 100644
--- a/include/sound/soc-acpi-intel-match.h
+++ b/include/sound/soc-acpi-intel-match.h
@@ -24,9 +24,12 @@ extern struct snd_soc_acpi_mach snd_soc_acpi_intel_kbl_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_bxt_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_glk_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_cnl_machines[];
+extern struct snd_soc_acpi_mach snd_soc_acpi_intel_cfl_machines[];
+extern struct snd_soc_acpi_mach snd_soc_acpi_intel_cml_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_icl_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_tgl_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_ehl_machines[];
+extern struct snd_soc_acpi_mach snd_soc_acpi_intel_jsl_machines[];
/*
* generic table used for HDA codec-based platforms, possibly with
diff --git a/include/sound/soc-acpi.h b/include/sound/soc-acpi.h
index 35b38e41e5b2..c4c997bd0379 100644
--- a/include/sound/soc-acpi.h
+++ b/include/sound/soc-acpi.h
@@ -60,12 +60,14 @@ static inline struct snd_soc_acpi_mach *snd_soc_acpi_codec_list(void *arg)
* @acpi_ipc_irq_index: used for BYT-CR detection
* @platform: string used for HDaudio codec support
* @codec_mask: used for HDAudio support
+ * @common_hdmi_codec_drv: use commom HDAudio HDMI codec driver
*/
struct snd_soc_acpi_mach_params {
u32 acpi_ipc_irq_index;
const char *platform;
u32 codec_mask;
u32 dmic_num;
+ bool common_hdmi_codec_drv;
};
/**
@@ -75,6 +77,7 @@ struct snd_soc_acpi_mach_params {
* all firmware/topology related fields.
*
* @id: ACPI ID (usually the codec's) used to find a matching machine driver.
+ * @link_mask: describes required board layout, e.g. for SoundWire.
* @drv_name: machine driver name
* @fw_filename: firmware file name. Used when SOF is not enabled.
* @board: board name
@@ -90,6 +93,7 @@ struct snd_soc_acpi_mach_params {
/* Descriptor for SST ASoC machine driver */
struct snd_soc_acpi_mach {
const u8 id[ACPI_ID_LEN];
+ const u32 link_mask;
const char *drv_name;
const char *fw_filename;
const char *board;
diff --git a/include/sound/soc-component.h b/include/sound/soc-component.h
index 5d80b2eef525..506f72a6b2c2 100644
--- a/include/sound/soc-component.h
+++ b/include/sound/soc-component.h
@@ -3,10 +3,6 @@
* soc-component.h
*
* Copyright (c) 2019 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __SOC_COMPONENT_H
#define __SOC_COMPONENT_H
@@ -51,8 +47,10 @@ struct snd_soc_component_driver {
unsigned int reg, unsigned int val);
/* pcm creation and destruction */
- int (*pcm_new)(struct snd_soc_pcm_runtime *rtd);
- void (*pcm_free)(struct snd_pcm *pcm);
+ int (*pcm_construct)(struct snd_soc_component *component,
+ struct snd_soc_pcm_runtime *rtd);
+ void (*pcm_destruct)(struct snd_soc_component *component,
+ struct snd_pcm *pcm);
/* component wide operations */
int (*set_sysclk)(struct snd_soc_component *component,
@@ -74,7 +72,42 @@ struct snd_soc_component_driver {
int (*set_bias_level)(struct snd_soc_component *component,
enum snd_soc_bias_level level);
- const struct snd_pcm_ops *ops;
+ int (*open)(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream);
+ int (*close)(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream);
+ int (*ioctl)(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
+ unsigned int cmd, void *arg);
+ int (*hw_params)(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params);
+ int (*hw_free)(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream);
+ int (*prepare)(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream);
+ int (*trigger)(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream, int cmd);
+ int (*sync_stop)(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream);
+ snd_pcm_uframes_t (*pointer)(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream);
+ int (*get_time_info)(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream, struct timespec *system_ts,
+ struct timespec *audio_ts,
+ struct snd_pcm_audio_tstamp_config *audio_tstamp_config,
+ struct snd_pcm_audio_tstamp_report *audio_tstamp_report);
+ int (*copy_user)(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream, int channel,
+ unsigned long pos, void __user *buf,
+ unsigned long bytes);
+ struct page *(*page)(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
+ unsigned long offset);
+ int (*mmap)(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
+ struct vm_area_struct *vma);
+
const struct snd_compr_ops *compr_ops;
/* probe ordering - for components with runtime dependencies */
@@ -374,6 +407,7 @@ int snd_soc_component_of_xlate_dai_name(struct snd_soc_component *component,
int snd_soc_pcm_component_pointer(struct snd_pcm_substream *substream);
int snd_soc_pcm_component_ioctl(struct snd_pcm_substream *substream,
unsigned int cmd, void *arg);
+int snd_soc_pcm_component_sync_stop(struct snd_pcm_substream *substream);
int snd_soc_pcm_component_copy_user(struct snd_pcm_substream *substream,
int channel, unsigned long pos,
void __user *buf, unsigned long bytes);
@@ -381,7 +415,7 @@ struct page *snd_soc_pcm_component_page(struct snd_pcm_substream *substream,
unsigned long offset);
int snd_soc_pcm_component_mmap(struct snd_pcm_substream *substream,
struct vm_area_struct *vma);
-int snd_soc_pcm_component_new(struct snd_pcm *pcm);
-void snd_soc_pcm_component_free(struct snd_pcm *pcm);
+int snd_soc_pcm_component_new(struct snd_soc_pcm_runtime *rtd);
+void snd_soc_pcm_component_free(struct snd_soc_pcm_runtime *rtd);
#endif /* __SOC_COMPONENT_H */
diff --git a/include/sound/soc-dpcm.h b/include/sound/soc-dpcm.h
index e55aeb00ce2d..b654ebfc8766 100644
--- a/include/sound/soc-dpcm.h
+++ b/include/sound/soc-dpcm.h
@@ -103,15 +103,15 @@ struct snd_soc_dpcm_runtime {
int trigger_pending; /* trigger cmd + 1 if pending, 0 if not */
};
-#define for_each_dpcm_fe(be, stream, dpcm) \
- list_for_each_entry(dpcm, &(be)->dpcm[stream].fe_clients, list_fe)
-
-#define for_each_dpcm_be(fe, stream, dpcm) \
- list_for_each_entry(dpcm, &(fe)->dpcm[stream].be_clients, list_be)
-#define for_each_dpcm_be_safe(fe, stream, dpcm, _dpcm) \
- list_for_each_entry_safe(dpcm, _dpcm, &(fe)->dpcm[stream].be_clients, list_be)
-#define for_each_dpcm_be_rollback(fe, stream, dpcm) \
- list_for_each_entry_continue_reverse(dpcm, &(fe)->dpcm[stream].be_clients, list_be)
+#define for_each_dpcm_fe(be, stream, _dpcm) \
+ list_for_each_entry(_dpcm, &(be)->dpcm[stream].fe_clients, list_fe)
+
+#define for_each_dpcm_be(fe, stream, _dpcm) \
+ list_for_each_entry(_dpcm, &(fe)->dpcm[stream].be_clients, list_be)
+#define for_each_dpcm_be_safe(fe, stream, _dpcm, __dpcm) \
+ list_for_each_entry_safe(_dpcm, __dpcm, &(fe)->dpcm[stream].be_clients, list_be)
+#define for_each_dpcm_be_rollback(fe, stream, _dpcm) \
+ list_for_each_entry_continue_reverse(_dpcm, &(fe)->dpcm[stream].be_clients, list_be)
/* can this BE stop and free */
int snd_soc_dpcm_can_be_free_stop(struct snd_soc_pcm_runtime *fe,
diff --git a/include/sound/soc.h b/include/sound/soc.h
index f264c6509f00..c28a1ed5e8df 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -299,6 +299,12 @@
.put = snd_soc_bytes_put, .private_value = \
((unsigned long)&(struct soc_bytes) \
{.base = xbase, .num_regs = xregs }) }
+#define SND_SOC_BYTES_E(xname, xbase, xregs, xhandler_get, xhandler_put) \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
+ .info = snd_soc_bytes_info, .get = xhandler_get, \
+ .put = xhandler_put, .private_value = \
+ ((unsigned long)&(struct soc_bytes) \
+ {.base = xbase, .num_regs = xregs }) }
#define SND_SOC_BYTES_MASK(xname, xbase, xregs, xmask) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
@@ -739,10 +745,12 @@ struct snd_soc_rtdcom_list {
struct snd_soc_component*
snd_soc_rtdcom_lookup(struct snd_soc_pcm_runtime *rtd,
const char *driver_name);
-#define for_each_rtdcom(rtd, rtdcom) \
- list_for_each_entry(rtdcom, &(rtd)->component_list, list)
-#define for_each_rtdcom_safe(rtd, rtdcom1, rtdcom2) \
- list_for_each_entry_safe(rtdcom1, rtdcom2, &(rtd)->component_list, list)
+#define for_each_rtd_components(rtd, rtdcom, _component) \
+ for (rtdcom = list_first_entry(&(rtd)->component_list, \
+ typeof(*rtdcom), list); \
+ (&rtdcom->list != &(rtd)->component_list) && \
+ (_component = rtdcom->component); \
+ rtdcom = list_next_entry(rtdcom, list))
struct snd_soc_dai_link_component {
const char *name;
@@ -845,7 +853,9 @@ struct snd_soc_dai_link {
unsigned int ignore:1;
struct list_head list; /* DAI link list of the soc card */
+#ifdef CONFIG_SND_SOC_TOPOLOGY
struct snd_soc_dobj dobj; /* For topology */
+#endif
};
#define for_each_link_codecs(link, i, codec) \
for ((i) = 0; \
@@ -978,6 +988,7 @@ struct snd_soc_card {
const char *name;
const char *long_name;
const char *driver_name;
+ const char *components;
char dmi_longname[80];
char topology_shortname[32];
@@ -1148,7 +1159,6 @@ struct snd_soc_pcm_runtime {
struct list_head component_list; /* list of connected components */
/* bit field */
- unsigned int dev_registered:1;
unsigned int pop_wait:1;
unsigned int fe_compr:1; /* for Dynamic PCM */
};
@@ -1168,7 +1178,9 @@ struct soc_mixer_control {
unsigned int sign_bit;
unsigned int invert:1;
unsigned int autodisable:1;
+#ifdef CONFIG_SND_SOC_TOPOLOGY
struct snd_soc_dobj dobj;
+#endif
};
struct soc_bytes {
@@ -1179,8 +1191,9 @@ struct soc_bytes {
struct soc_bytes_ext {
int max;
+#ifdef CONFIG_SND_SOC_TOPOLOGY
struct snd_soc_dobj dobj;
-
+#endif
/* used for TLV byte control */
int (*get)(struct snd_kcontrol *kcontrol, unsigned int __user *bytes,
unsigned int size);
@@ -1204,7 +1217,9 @@ struct soc_enum {
const char * const *texts;
const unsigned int *values;
unsigned int autodisable:1;
+#ifdef CONFIG_SND_SOC_TOPOLOGY
struct snd_soc_dobj dobj;
+#endif
};
/* device driver data */
@@ -1325,8 +1340,10 @@ struct snd_soc_dai_link *snd_soc_find_dai_link(struct snd_soc_card *card,
int id, const char *name,
const char *stream_name);
-int snd_soc_register_dai(struct snd_soc_component *component,
- struct snd_soc_dai_driver *dai_drv);
+struct snd_soc_dai *snd_soc_register_dai(struct snd_soc_component *component,
+ struct snd_soc_dai_driver *dai_drv,
+ bool legacy_dai_naming);
+void snd_soc_unregister_dai(struct snd_soc_dai *dai);
struct snd_soc_dai *snd_soc_find_dai(
const struct snd_soc_dai_link_component *dlc);
@@ -1391,6 +1408,11 @@ static inline void snd_soc_dapm_mutex_unlock(struct snd_soc_dapm_context *dapm)
mutex_unlock(&dapm->card->dapm_mutex);
}
+/* bypass */
+int snd_soc_pcm_lib_ioctl(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
+ unsigned int cmd, void *arg);
+
#include <sound/soc-component.h>
#endif
diff --git a/include/sound/sof.h b/include/sound/sof.h
index 4640566b54fe..479101736ee0 100644
--- a/include/sound/sof.h
+++ b/include/sound/sof.h
@@ -61,6 +61,9 @@ struct sof_dev_desc {
/* list of machines using this configuration */
struct snd_soc_acpi_mach *machines;
+ /* alternate list of machines using this configuration */
+ struct snd_soc_acpi_mach *alt_machines;
+
/* Platform resource indexes in BAR / ACPI resources. */
/* Must set to -1 if not used - add new items to end */
int resindex_lpe_base;
diff --git a/include/sound/sof/dai-imx.h b/include/sound/sof/dai-imx.h
new file mode 100644
index 000000000000..e02fb0b0fae1
--- /dev/null
+++ b/include/sound/sof/dai-imx.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
+/*
+ * Copyright 2019 NXP
+ *
+ * Author: Daniel Baluta <daniel.baluta@nxp.com>
+ */
+
+#ifndef __INCLUDE_SOUND_SOF_DAI_IMX_H__
+#define __INCLUDE_SOUND_SOF_DAI_IMX_H__
+
+#include <sound/sof/header.h>
+
+/* ESAI Configuration Request - SOF_IPC_DAI_ESAI_CONFIG */
+struct sof_ipc_dai_esai_params {
+ struct sof_ipc_hdr hdr;
+
+ /* MCLK */
+ uint16_t reserved1;
+ uint16_t mclk_id;
+ uint32_t mclk_direction;
+
+ uint32_t mclk_rate; /* MCLK frequency in Hz */
+ uint32_t fsync_rate; /* FSYNC frequency in Hz */
+ uint32_t bclk_rate; /* BCLK frequency in Hz */
+
+ /* TDM */
+ uint32_t tdm_slots;
+ uint32_t rx_slots;
+ uint32_t tx_slots;
+ uint16_t tdm_slot_width;
+ uint16_t reserved2; /* alignment */
+} __packed;
+
+#endif
diff --git a/include/sound/sof/dai.h b/include/sound/sof/dai.h
index 0f1235022146..c229565767e5 100644
--- a/include/sound/sof/dai.h
+++ b/include/sound/sof/dai.h
@@ -11,6 +11,7 @@
#include <sound/sof/header.h>
#include <sound/sof/dai-intel.h>
+#include <sound/sof/dai-imx.h>
/*
* DAI Configuration.
@@ -73,6 +74,7 @@ struct sof_ipc_dai_config {
struct sof_ipc_dai_dmic_params dmic;
struct sof_ipc_dai_hda_params hda;
struct sof_ipc_dai_alh_params alh;
+ struct sof_ipc_dai_esai_params esai;
};
} __packed;
diff --git a/include/sound/sof/header.h b/include/sound/sof/header.h
index 10f00c08dbb7..bf3edd9c08b4 100644
--- a/include/sound/sof/header.h
+++ b/include/sound/sof/header.h
@@ -9,6 +9,7 @@
#ifndef __INCLUDE_SOUND_SOF_HEADER_H__
#define __INCLUDE_SOUND_SOF_HEADER_H__
+#include <linux/types.h>
#include <uapi/sound/sof/abi.h>
/** \addtogroup sof_uapi uAPI
@@ -74,6 +75,7 @@
#define SOF_IPC_PM_CLK_GET SOF_CMD_TYPE(0x005)
#define SOF_IPC_PM_CLK_REQ SOF_CMD_TYPE(0x006)
#define SOF_IPC_PM_CORE_ENABLE SOF_CMD_TYPE(0x007)
+#define SOF_IPC_PM_GATE SOF_CMD_TYPE(0x008)
/* component runtime config - multiple different types */
#define SOF_IPC_COMP_SET_VALUE SOF_CMD_TYPE(0x001)
diff --git a/include/sound/sof/pm.h b/include/sound/sof/pm.h
index 003879401d63..3cf2e0f39d94 100644
--- a/include/sound/sof/pm.h
+++ b/include/sound/sof/pm.h
@@ -45,4 +45,12 @@ struct sof_ipc_pm_core_config {
uint32_t enable_mask;
} __packed;
+struct sof_ipc_pm_gate {
+ struct sof_ipc_cmd_hdr hdr;
+ uint32_t flags; /* platform specific */
+
+ /* reserved for future use */
+ uint32_t reserved[5];
+} __packed;
+
#endif
diff --git a/include/sound/sof/stream.h b/include/sound/sof/stream.h
index 0b71b381b952..7facefb541b3 100644
--- a/include/sound/sof/stream.h
+++ b/include/sound/sof/stream.h
@@ -83,10 +83,10 @@ struct sof_ipc_stream_params {
uint16_t sample_valid_bytes;
uint16_t sample_container_bytes;
- /* for notifying host period has completed - 0 means no period IRQ */
uint32_t host_period_bytes;
+ uint16_t no_stream_position; /**< 1 means don't send stream position */
- uint32_t reserved[2];
+ uint16_t reserved[3];
uint16_t chmap[SOF_IPC_MAX_CHANNELS]; /**< channel map - SOF_CHMAP_ */
} __packed;
diff --git a/include/sound/timer.h b/include/sound/timer.h
index 199c36295a0d..a53e37bcd746 100644
--- a/include/sound/timer.h
+++ b/include/sound/timer.h
@@ -118,8 +118,10 @@ int snd_timer_global_new(char *id, int device, struct snd_timer **rtimer);
int snd_timer_global_free(struct snd_timer *timer);
int snd_timer_global_register(struct snd_timer *timer);
-int snd_timer_open(struct snd_timer_instance **ti, char *owner, struct snd_timer_id *tid, unsigned int slave_id);
-int snd_timer_close(struct snd_timer_instance *timeri);
+struct snd_timer_instance *snd_timer_instance_new(const char *owner);
+void snd_timer_instance_free(struct snd_timer_instance *timeri);
+int snd_timer_open(struct snd_timer_instance *timeri, struct snd_timer_id *tid, unsigned int slave_id);
+void snd_timer_close(struct snd_timer_instance *timeri);
unsigned long snd_timer_resolution(struct snd_timer_instance *timeri);
int snd_timer_start(struct snd_timer_instance *timeri, unsigned int ticks);
int snd_timer_stop(struct snd_timer_instance *timeri);
diff --git a/include/sound/wm8904.h b/include/sound/wm8904.h
index 14074405f501..88ac1870510e 100644
--- a/include/sound/wm8904.h
+++ b/include/sound/wm8904.h
@@ -120,7 +120,7 @@
* DRC configurations are specified with a label and a set of register
* values to write (the enable bits will be ignored). At runtime an
* enumerated control will be presented for each DRC block allowing
- * the user to choose the configration to use.
+ * the user to choose the configuration to use.
*
* Configurations may be generated by hand or by using the DRC control
* panel provided by the WISCE - see http://www.wolfsonmicro.com/wisce/
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 7c9716fe731e..1728e883b7b2 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -876,7 +876,6 @@ struct se_portal_group {
/* Spinlock for adding/removing sessions */
spinlock_t session_lock;
struct mutex tpg_lun_mutex;
- struct list_head se_tpg_node;
/* linked list for initiator ACL list */
struct list_head acl_node_list;
struct hlist_head tpg_lun_hlist;
diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h
index d68e9e536814..182c9fe9c0e9 100644
--- a/include/trace/events/ext4.h
+++ b/include/trace/events/ext4.h
@@ -1746,15 +1746,16 @@ TRACE_EVENT(ext4_load_inode,
TRACE_EVENT(ext4_journal_start,
TP_PROTO(struct super_block *sb, int blocks, int rsv_blocks,
- unsigned long IP),
+ int revoke_creds, unsigned long IP),
- TP_ARGS(sb, blocks, rsv_blocks, IP),
+ TP_ARGS(sb, blocks, rsv_blocks, revoke_creds, IP),
TP_STRUCT__entry(
__field( dev_t, dev )
__field(unsigned long, ip )
__field( int, blocks )
__field( int, rsv_blocks )
+ __field( int, revoke_creds )
),
TP_fast_assign(
@@ -1762,11 +1763,13 @@ TRACE_EVENT(ext4_journal_start,
__entry->ip = IP;
__entry->blocks = blocks;
__entry->rsv_blocks = rsv_blocks;
+ __entry->revoke_creds = revoke_creds;
),
- TP_printk("dev %d,%d blocks, %d rsv_blocks, %d caller %pS",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- __entry->blocks, __entry->rsv_blocks, (void *)__entry->ip)
+ TP_printk("dev %d,%d blocks %d, rsv_blocks %d, revoke_creds %d, "
+ "caller %pS", MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->blocks, __entry->rsv_blocks, __entry->revoke_creds,
+ (void *)__entry->ip)
);
TRACE_EVENT(ext4_journal_start_reserved,
diff --git a/include/trace/events/fsi.h b/include/trace/events/fsi.h
index 92e5e89e52ed..9832cb8e0eb0 100644
--- a/include/trace/events/fsi.h
+++ b/include/trace/events/fsi.h
@@ -26,7 +26,7 @@ TRACE_EVENT(fsi_master_read,
__entry->addr = addr;
__entry->size = size;
),
- TP_printk("fsi%d:%02d:%02d %08x[%zd]",
+ TP_printk("fsi%d:%02d:%02d %08x[%zu]",
__entry->master_idx,
__entry->link,
__entry->id,
@@ -56,7 +56,7 @@ TRACE_EVENT(fsi_master_write,
__entry->data = 0;
memcpy(&__entry->data, data, size);
),
- TP_printk("fsi%d:%02d:%02d %08x[%zd] <= {%*ph}",
+ TP_printk("fsi%d:%02d:%02d %08x[%zu] <= {%*ph}",
__entry->master_idx,
__entry->link,
__entry->id,
@@ -93,7 +93,7 @@ TRACE_EVENT(fsi_master_rw_result,
if (__entry->write || !__entry->ret)
memcpy(&__entry->data, data, size);
),
- TP_printk("fsi%d:%02d:%02d %08x[%zd] %s {%*ph} ret %d",
+ TP_printk("fsi%d:%02d:%02d %08x[%zu] %s {%*ph} ret %d",
__entry->master_idx,
__entry->link,
__entry->id,
diff --git a/include/trace/events/fsi_master_aspeed.h b/include/trace/events/fsi_master_aspeed.h
new file mode 100644
index 000000000000..a355ceacc33f
--- /dev/null
+++ b/include/trace/events/fsi_master_aspeed.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM fsi_master_aspeed
+
+#if !defined(_TRACE_FSI_MASTER_ASPEED_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_FSI_MASTER_ASPEED_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(fsi_master_aspeed_opb_read,
+ TP_PROTO(uint32_t addr, size_t size, uint32_t result, uint32_t status, uint32_t irq_status),
+ TP_ARGS(addr, size, result, status, irq_status),
+ TP_STRUCT__entry(
+ __field(uint32_t, addr)
+ __field(size_t, size)
+ __field(uint32_t, result)
+ __field(uint32_t, status)
+ __field(uint32_t, irq_status)
+ ),
+ TP_fast_assign(
+ __entry->addr = addr;
+ __entry->size = size;
+ __entry->result = result;
+ __entry->status = status;
+ __entry->irq_status = irq_status;
+ ),
+ TP_printk("addr %08x size %zu: result %08x sts: %08x irq_sts: %08x",
+ __entry->addr, __entry->size, __entry->result,
+ __entry->status, __entry->irq_status
+ )
+);
+
+TRACE_EVENT(fsi_master_aspeed_opb_write,
+ TP_PROTO(uint32_t addr, uint32_t val, size_t size, uint32_t status, uint32_t irq_status),
+ TP_ARGS(addr, val, size, status, irq_status),
+ TP_STRUCT__entry(
+ __field(uint32_t, addr)
+ __field(uint32_t, val)
+ __field(size_t, size)
+ __field(uint32_t, status)
+ __field(uint32_t, irq_status)
+ ),
+ TP_fast_assign(
+ __entry->addr = addr;
+ __entry->val = val;
+ __entry->size = size;
+ __entry->status = status;
+ __entry->irq_status = irq_status;
+ ),
+ TP_printk("addr %08x val %08x size %zu status: %08x irq_sts: %08x",
+ __entry->addr, __entry->val, __entry->size,
+ __entry->status, __entry->irq_status
+ )
+ );
+
+TRACE_EVENT(fsi_master_aspeed_opb_error,
+ TP_PROTO(uint32_t mresp0, uint32_t mstap0, uint32_t mesrb0),
+ TP_ARGS(mresp0, mstap0, mesrb0),
+ TP_STRUCT__entry(
+ __field(uint32_t, mresp0)
+ __field(uint32_t, mstap0)
+ __field(uint32_t, mesrb0)
+ ),
+ TP_fast_assign(
+ __entry->mresp0 = mresp0;
+ __entry->mstap0 = mstap0;
+ __entry->mesrb0 = mesrb0;
+ ),
+ TP_printk("mresp0 %08x mstap0 %08x mesrb0 %08x",
+ __entry->mresp0, __entry->mstap0, __entry->mesrb0
+ )
+ );
+
+#endif
+
+#include <trace/define_trace.h>
diff --git a/include/trace/events/io_uring.h b/include/trace/events/io_uring.h
index 72a4d0174b02..b352d66b5d51 100644
--- a/include/trace/events/io_uring.h
+++ b/include/trace/events/io_uring.h
@@ -163,35 +163,35 @@ TRACE_EVENT(io_uring_queue_async_work,
);
/**
- * io_uring_defer_list - called before the io_uring work added into defer_list
+ * io_uring_defer - called when an io_uring request is deferred
*
* @ctx: pointer to a ring context structure
* @req: pointer to a deferred request
- * @shadow: whether request is shadow or not
+ * @user_data: user data associated with the request
*
* Allows to track deferred requests, to get an insight about what requests are
* not started immediately.
*/
TRACE_EVENT(io_uring_defer,
- TP_PROTO(void *ctx, void *req, bool shadow),
+ TP_PROTO(void *ctx, void *req, unsigned long long user_data),
- TP_ARGS(ctx, req, shadow),
+ TP_ARGS(ctx, req, user_data),
TP_STRUCT__entry (
__field( void *, ctx )
__field( void *, req )
- __field( bool, shadow )
+ __field( unsigned long long, data )
),
TP_fast_assign(
__entry->ctx = ctx;
__entry->req = req;
- __entry->shadow = shadow;
+ __entry->data = user_data;
),
- TP_printk("ring %p, request %p%s", __entry->ctx, __entry->req,
- __entry->shadow ? ", shadow": "")
+ TP_printk("ring %p, request %p user_data %llu", __entry->ctx,
+ __entry->req, __entry->data)
);
/**
diff --git a/include/trace/events/jbd2.h b/include/trace/events/jbd2.h
index 2310b259329f..d16a32867f3a 100644
--- a/include/trace/events/jbd2.h
+++ b/include/trace/events/jbd2.h
@@ -133,7 +133,7 @@ TRACE_EVENT(jbd2_submit_inode_data,
(unsigned long) __entry->ino)
);
-TRACE_EVENT(jbd2_handle_start,
+DECLARE_EVENT_CLASS(jbd2_handle_start_class,
TP_PROTO(dev_t dev, unsigned long tid, unsigned int type,
unsigned int line_no, int requested_blocks),
@@ -161,6 +161,20 @@ TRACE_EVENT(jbd2_handle_start,
__entry->type, __entry->line_no, __entry->requested_blocks)
);
+DEFINE_EVENT(jbd2_handle_start_class, jbd2_handle_start,
+ TP_PROTO(dev_t dev, unsigned long tid, unsigned int type,
+ unsigned int line_no, int requested_blocks),
+
+ TP_ARGS(dev, tid, type, line_no, requested_blocks)
+);
+
+DEFINE_EVENT(jbd2_handle_start_class, jbd2_handle_restart,
+ TP_PROTO(dev_t dev, unsigned long tid, unsigned int type,
+ unsigned int line_no, int requested_blocks),
+
+ TP_ARGS(dev, tid, type, line_no, requested_blocks)
+);
+
TRACE_EVENT(jbd2_handle_extend,
TP_PROTO(dev_t dev, unsigned long tid, unsigned int type,
unsigned int line_no, int buffer_credits,
diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h
index 69e8bb8963db..ad7e642bd497 100644
--- a/include/trace/events/kmem.h
+++ b/include/trace/events/kmem.h
@@ -316,6 +316,53 @@ TRACE_EVENT(mm_page_alloc_extfrag,
__entry->change_ownership)
);
+/*
+ * Required for uniquely and securely identifying mm in rss_stat tracepoint.
+ */
+#ifndef __PTR_TO_HASHVAL
+static unsigned int __maybe_unused mm_ptr_to_hash(const void *ptr)
+{
+ int ret;
+ unsigned long hashval;
+
+ ret = ptr_to_hashval(ptr, &hashval);
+ if (ret)
+ return 0;
+
+ /* The hashed value is only 32-bit */
+ return (unsigned int)hashval;
+}
+#define __PTR_TO_HASHVAL
+#endif
+
+TRACE_EVENT(rss_stat,
+
+ TP_PROTO(struct mm_struct *mm,
+ int member,
+ long count),
+
+ TP_ARGS(mm, member, count),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, mm_id)
+ __field(unsigned int, curr)
+ __field(int, member)
+ __field(long, size)
+ ),
+
+ TP_fast_assign(
+ __entry->mm_id = mm_ptr_to_hash(mm);
+ __entry->curr = !!(current->mm == mm);
+ __entry->member = member;
+ __entry->size = (count << PAGE_SHIFT);
+ ),
+
+ TP_printk("mm_id=%u curr=%d member=%d size=%ldB",
+ __entry->mm_id,
+ __entry->curr,
+ __entry->member,
+ __entry->size)
+ );
#endif /* _TRACE_KMEM_H */
/* This part must be outside protection */
diff --git a/include/trace/events/timer.h b/include/trace/events/timer.h
index 295517f109d7..19abb6c3eb73 100644
--- a/include/trace/events/timer.h
+++ b/include/trace/events/timer.h
@@ -303,7 +303,7 @@ DEFINE_EVENT(hrtimer_class, hrtimer_cancel,
*/
TRACE_EVENT(itimer_state,
- TP_PROTO(int which, const struct itimerval *const value,
+ TP_PROTO(int which, const struct itimerspec64 *const value,
unsigned long long expires),
TP_ARGS(which, value, expires),
@@ -312,24 +312,24 @@ TRACE_EVENT(itimer_state,
__field( int, which )
__field( unsigned long long, expires )
__field( long, value_sec )
- __field( long, value_usec )
+ __field( long, value_nsec )
__field( long, interval_sec )
- __field( long, interval_usec )
+ __field( long, interval_nsec )
),
TP_fast_assign(
__entry->which = which;
__entry->expires = expires;
__entry->value_sec = value->it_value.tv_sec;
- __entry->value_usec = value->it_value.tv_usec;
+ __entry->value_nsec = value->it_value.tv_nsec;
__entry->interval_sec = value->it_interval.tv_sec;
- __entry->interval_usec = value->it_interval.tv_usec;
+ __entry->interval_nsec = value->it_interval.tv_nsec;
),
- TP_printk("which=%d expires=%llu it_value=%ld.%ld it_interval=%ld.%ld",
+ TP_printk("which=%d expires=%llu it_value=%ld.%06ld it_interval=%ld.%06ld",
__entry->which, __entry->expires,
- __entry->value_sec, __entry->value_usec,
- __entry->interval_sec, __entry->interval_usec)
+ __entry->value_sec, __entry->value_nsec / NSEC_PER_USEC,
+ __entry->interval_sec, __entry->interval_nsec / NSEC_PER_USEC)
);
/**
diff --git a/include/trace/trace_events.h b/include/trace/trace_events.h
index 4ecdfe2e3580..7089760d4c7a 100644
--- a/include/trace/trace_events.h
+++ b/include/trace/trace_events.h
@@ -340,6 +340,12 @@ TRACE_MAKE_SYSTEM_STR();
trace_print_array_seq(p, array, count, el_size); \
})
+#undef __print_hex_dump
+#define __print_hex_dump(prefix_str, prefix_type, \
+ rowsize, groupsize, buf, len, ascii) \
+ trace_print_hex_dump_seq(p, prefix_str, prefix_type, \
+ rowsize, groupsize, buf, len, ascii)
+
#undef DECLARE_EVENT_CLASS
#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
static notrace enum print_line_t \
diff --git a/include/uapi/asm-generic/msgbuf.h b/include/uapi/asm-generic/msgbuf.h
index 9fe4881557cb..af95aa89012e 100644
--- a/include/uapi/asm-generic/msgbuf.h
+++ b/include/uapi/asm-generic/msgbuf.h
@@ -13,9 +13,9 @@
* everyone just ended up making identical copies without specific
* optimizations, so we may just as well all use the same one.
*
- * 64 bit architectures typically define a 64 bit __kernel_time_t,
- * so they do not need the first three padding words.
- * On big-endian systems, the padding is in the wrong place.
+ * 64 bit architectures use a 64-bit long time field here, while
+ * 32 bit architectures have a pair of unsigned long values.
+ * On big-endian systems, the lower half is in the wrong place.
*
* Pad space is left for:
* - 2 miscellaneous 32-bit values
@@ -24,9 +24,9 @@
struct msqid64_ds {
struct ipc64_perm msg_perm;
#if __BITS_PER_LONG == 64
- __kernel_time_t msg_stime; /* last msgsnd time */
- __kernel_time_t msg_rtime; /* last msgrcv time */
- __kernel_time_t msg_ctime; /* last change time */
+ long msg_stime; /* last msgsnd time */
+ long msg_rtime; /* last msgrcv time */
+ long msg_ctime; /* last change time */
#else
unsigned long msg_stime; /* last msgsnd time */
unsigned long msg_stime_high;
diff --git a/include/uapi/asm-generic/posix_types.h b/include/uapi/asm-generic/posix_types.h
index f0733a26ebfc..2f9c80595ba7 100644
--- a/include/uapi/asm-generic/posix_types.h
+++ b/include/uapi/asm-generic/posix_types.h
@@ -86,6 +86,7 @@ typedef struct {
*/
typedef __kernel_long_t __kernel_off_t;
typedef long long __kernel_loff_t;
+typedef __kernel_long_t __kernel_old_time_t;
typedef __kernel_long_t __kernel_time_t;
typedef long long __kernel_time64_t;
typedef __kernel_long_t __kernel_clock_t;
diff --git a/include/uapi/asm-generic/sembuf.h b/include/uapi/asm-generic/sembuf.h
index 0bae010f1b64..137606018c6a 100644
--- a/include/uapi/asm-generic/sembuf.h
+++ b/include/uapi/asm-generic/sembuf.h
@@ -13,9 +13,8 @@
* everyone just ended up making identical copies without specific
* optimizations, so we may just as well all use the same one.
*
- * 64 bit architectures use a 64-bit __kernel_time_t here, while
+ * 64 bit architectures use a 64-bit long time field here, while
* 32 bit architectures have a pair of unsigned long values.
- * so they do not need the first two padding words.
*
* On big-endian systems, the padding is in the wrong place for
* historic reasons, so user space has to reconstruct a time_t
@@ -29,8 +28,8 @@
struct semid64_ds {
struct ipc64_perm sem_perm; /* permissions .. see ipc.h */
#if __BITS_PER_LONG == 64
- __kernel_time_t sem_otime; /* last semop time */
- __kernel_time_t sem_ctime; /* last change time */
+ long sem_otime; /* last semop time */
+ long sem_ctime; /* last change time */
#else
unsigned long sem_otime; /* last semop time */
unsigned long sem_otime_high;
diff --git a/include/uapi/asm-generic/shmbuf.h b/include/uapi/asm-generic/shmbuf.h
index e504422fc501..2bab955e0fed 100644
--- a/include/uapi/asm-generic/shmbuf.h
+++ b/include/uapi/asm-generic/shmbuf.h
@@ -13,9 +13,9 @@
* everyone just ended up making identical copies without specific
* optimizations, so we may just as well all use the same one.
*
- * 64 bit architectures typically define a 64 bit __kernel_time_t,
- * so they do not need the first two padding words.
- * On big-endian systems, the padding is in the wrong place.
+ * 64 bit architectures use a 64-bit long time field here, while
+ * 32 bit architectures have a pair of unsigned long values.
+ * On big-endian systems, the lower half is in the wrong place.
*
*
* Pad space is left for:
@@ -26,9 +26,9 @@ struct shmid64_ds {
struct ipc64_perm shm_perm; /* operation perms */
size_t shm_segsz; /* size of segment (bytes) */
#if __BITS_PER_LONG == 64
- __kernel_time_t shm_atime; /* last attach time */
- __kernel_time_t shm_dtime; /* last detach time */
- __kernel_time_t shm_ctime; /* last change time */
+ long shm_atime; /* last attach time */
+ long shm_dtime; /* last detach time */
+ long shm_ctime; /* last change time */
#else
unsigned long shm_atime; /* last attach time */
unsigned long shm_atime_high;
diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
index 4fe35d600ab8..bbdad866e3fe 100644
--- a/include/uapi/drm/amdgpu_drm.h
+++ b/include/uapi/drm/amdgpu_drm.h
@@ -500,6 +500,8 @@ struct drm_amdgpu_gem_op {
#define AMDGPU_VM_MTYPE_CC (3 << 5)
/* Use UC MTYPE instead of default MTYPE */
#define AMDGPU_VM_MTYPE_UC (4 << 5)
+/* Use RW MTYPE instead of default MTYPE */
+#define AMDGPU_VM_MTYPE_RW (5 << 5)
struct drm_amdgpu_gem_va {
/** GEM object handle */
diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h
index 8a5b2f8f8eb9..868bf7996c0f 100644
--- a/include/uapi/drm/drm.h
+++ b/include/uapi/drm/drm.h
@@ -778,11 +778,12 @@ struct drm_syncobj_array {
__u32 pad;
};
+#define DRM_SYNCOBJ_QUERY_FLAGS_LAST_SUBMITTED (1 << 0) /* last available point on timeline syncobj */
struct drm_syncobj_timeline_array {
__u64 handles;
__u64 points;
__u32 count_handles;
- __u32 pad;
+ __u32 flags;
};
diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h
index 3feeaa3f987a..8caaaf7ff91b 100644
--- a/include/uapi/drm/drm_fourcc.h
+++ b/include/uapi/drm/drm_fourcc.h
@@ -69,7 +69,7 @@ extern "C" {
#define fourcc_code(a, b, c, d) ((__u32)(a) | ((__u32)(b) << 8) | \
((__u32)(c) << 16) | ((__u32)(d) << 24))
-#define DRM_FORMAT_BIG_ENDIAN (1<<31) /* format is big endian instead of little endian */
+#define DRM_FORMAT_BIG_ENDIAN (1U<<31) /* format is big endian instead of little endian */
/* Reserve 0 for the invalid format specifier */
#define DRM_FORMAT_INVALID 0
@@ -648,7 +648,21 @@ extern "C" {
* Further information on the use of AFBC modifiers can be found in
* Documentation/gpu/afbc.rst
*/
-#define DRM_FORMAT_MOD_ARM_AFBC(__afbc_mode) fourcc_mod_code(ARM, __afbc_mode)
+
+/*
+ * The top 4 bits (out of the 56 bits alloted for specifying vendor specific
+ * modifiers) denote the category for modifiers. Currently we have only two
+ * categories of modifiers ie AFBC and MISC. We can have a maximum of sixteen
+ * different categories.
+ */
+#define DRM_FORMAT_MOD_ARM_CODE(__type, __val) \
+ fourcc_mod_code(ARM, ((__u64)(__type) << 52) | ((__val) & 0x000fffffffffffffULL))
+
+#define DRM_FORMAT_MOD_ARM_TYPE_AFBC 0x00
+#define DRM_FORMAT_MOD_ARM_TYPE_MISC 0x01
+
+#define DRM_FORMAT_MOD_ARM_AFBC(__afbc_mode) \
+ DRM_FORMAT_MOD_ARM_CODE(DRM_FORMAT_MOD_ARM_TYPE_AFBC, __afbc_mode)
/*
* AFBC superblock size
@@ -743,6 +757,16 @@ extern "C" {
#define AFBC_FORMAT_MOD_BCH (1ULL << 11)
/*
+ * Arm 16x16 Block U-Interleaved modifier
+ *
+ * This is used by Arm Mali Utgard and Midgard GPUs. It divides the image
+ * into 16x16 pixel blocks. Blocks are stored linearly in order, but pixels
+ * in the block are reordered.
+ */
+#define DRM_FORMAT_MOD_ARM_16X16_BLOCK_U_INTERLEAVED \
+ DRM_FORMAT_MOD_ARM_CODE(DRM_FORMAT_MOD_ARM_TYPE_MISC, 1ULL)
+
+/*
* Allwinner tiled modifier
*
* This tiling mode is implemented by the VPU found on all Allwinner platforms,
diff --git a/include/uapi/drm/exynos_drm.h b/include/uapi/drm/exynos_drm.h
index 3e59b8382dd8..45c6582b3df3 100644
--- a/include/uapi/drm/exynos_drm.h
+++ b/include/uapi/drm/exynos_drm.h
@@ -68,7 +68,7 @@ struct drm_exynos_gem_info {
/**
* A structure for user connection request of virtual display.
*
- * @connection: indicate whether doing connetion or not by user.
+ * @connection: indicate whether doing connection or not by user.
* @extensions: if this value is 1 then the vidi driver would need additional
* 128bytes edid data.
* @edid: the edid data pointer from user side.
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index 469dc512cca3..5400d7e057f1 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -611,6 +611,13 @@ typedef struct drm_i915_irq_wait {
* See I915_EXEC_FENCE_OUT and I915_EXEC_FENCE_SUBMIT.
*/
#define I915_PARAM_HAS_EXEC_SUBMIT_FENCE 53
+
+/*
+ * Revision of the i915-perf uAPI. The value returned helps determine what
+ * i915-perf features are available. See drm_i915_perf_property_id.
+ */
+#define I915_PARAM_PERF_REVISION 54
+
/* Must be kept compact -- no holes and well documented */
typedef struct drm_i915_getparam {
@@ -1565,6 +1572,21 @@ struct drm_i915_gem_context_param {
* i915_context_engines_bond (I915_CONTEXT_ENGINES_EXT_BOND)
*/
#define I915_CONTEXT_PARAM_ENGINES 0xa
+
+/*
+ * I915_CONTEXT_PARAM_PERSISTENCE:
+ *
+ * Allow the context and active rendering to survive the process until
+ * completion. Persistence allows fire-and-forget clients to queue up a
+ * bunch of work, hand the output over to a display server and then quit.
+ * If the context is marked as not persistent, upon closing (either via
+ * an explicit DRM_I915_GEM_CONTEXT_DESTROY or implicitly from file closure
+ * or process termination), the context and any outstanding requests will be
+ * cancelled (and exported fences for cancelled requests marked as -EIO).
+ *
+ * By default, new contexts allow persistence.
+ */
+#define I915_CONTEXT_PARAM_PERSISTENCE 0xb
/* Must be kept compact -- no holes and well documented */
__u64 value;
@@ -1844,23 +1866,31 @@ enum drm_i915_perf_property_id {
* Open the stream for a specific context handle (as used with
* execbuffer2). A stream opened for a specific context this way
* won't typically require root privileges.
+ *
+ * This property is available in perf revision 1.
*/
DRM_I915_PERF_PROP_CTX_HANDLE = 1,
/**
* A value of 1 requests the inclusion of raw OA unit reports as
* part of stream samples.
+ *
+ * This property is available in perf revision 1.
*/
DRM_I915_PERF_PROP_SAMPLE_OA,
/**
* The value specifies which set of OA unit metrics should be
* be configured, defining the contents of any OA unit reports.
+ *
+ * This property is available in perf revision 1.
*/
DRM_I915_PERF_PROP_OA_METRICS_SET,
/**
* The value specifies the size and layout of OA unit reports.
+ *
+ * This property is available in perf revision 1.
*/
DRM_I915_PERF_PROP_OA_FORMAT,
@@ -1870,9 +1900,22 @@ enum drm_i915_perf_property_id {
* from this exponent as follows:
*
* 80ns * 2^(period_exponent + 1)
+ *
+ * This property is available in perf revision 1.
*/
DRM_I915_PERF_PROP_OA_EXPONENT,
+ /**
+ * Specifying this property is only valid when specify a context to
+ * filter with DRM_I915_PERF_PROP_CTX_HANDLE. Specifying this property
+ * will hold preemption of the particular context we want to gather
+ * performance data about. The execbuf2 submissions must include a
+ * drm_i915_gem_execbuffer_ext_perf parameter for this to apply.
+ *
+ * This property is available in perf revision 3.
+ */
+ DRM_I915_PERF_PROP_HOLD_PREEMPTION,
+
DRM_I915_PERF_PROP_MAX /* non-ABI */
};
@@ -1901,6 +1944,8 @@ struct drm_i915_perf_open_param {
* to close and re-open a stream with the same configuration.
*
* It's undefined whether any pending data for the stream will be lost.
+ *
+ * This ioctl is available in perf revision 1.
*/
#define I915_PERF_IOCTL_ENABLE _IO('i', 0x0)
@@ -1908,10 +1953,25 @@ struct drm_i915_perf_open_param {
* Disable data capture for a stream.
*
* It is an error to try and read a stream that is disabled.
+ *
+ * This ioctl is available in perf revision 1.
*/
#define I915_PERF_IOCTL_DISABLE _IO('i', 0x1)
/**
+ * Change metrics_set captured by a stream.
+ *
+ * If the stream is bound to a specific context, the configuration change
+ * will performed inline with that context such that it takes effect before
+ * the next execbuf submission.
+ *
+ * Returns the previously bound metrics set id, or a negative error code.
+ *
+ * This ioctl is available in perf revision 2.
+ */
+#define I915_PERF_IOCTL_CONFIG _IO('i', 0x2)
+
+/**
* Common to all i915 perf records
*/
struct drm_i915_perf_record_header {
@@ -1984,6 +2044,7 @@ struct drm_i915_query_item {
__u64 query_id;
#define DRM_I915_QUERY_TOPOLOGY_INFO 1
#define DRM_I915_QUERY_ENGINE_INFO 2
+#define DRM_I915_QUERY_PERF_CONFIG 3
/* Must be kept compact -- no holes and well documented */
/*
@@ -1995,9 +2056,18 @@ struct drm_i915_query_item {
__s32 length;
/*
- * Unused for now. Must be cleared to zero.
+ * When query_id == DRM_I915_QUERY_TOPOLOGY_INFO, must be 0.
+ *
+ * When query_id == DRM_I915_QUERY_PERF_CONFIG, must be one of the
+ * following :
+ * - DRM_I915_QUERY_PERF_CONFIG_LIST
+ * - DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID
+ * - DRM_I915_QUERY_PERF_CONFIG_FOR_UUID
*/
__u32 flags;
+#define DRM_I915_QUERY_PERF_CONFIG_LIST 1
+#define DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID 2
+#define DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_ID 3
/*
* Data will be written at the location pointed by data_ptr when the
@@ -2033,8 +2103,10 @@ struct drm_i915_query {
* (data[X / 8] >> (X % 8)) & 1
*
* - the subslice mask for each slice with one bit per subslice telling
- * whether a subslice is available. The availability of subslice Y in slice
- * X can be queried with the following formula :
+ * whether a subslice is available. Gen12 has dual-subslices, which are
+ * similar to two gen11 subslices. For gen12, this array represents dual-
+ * subslices. The availability of subslice Y in slice X can be queried
+ * with the following formula :
*
* (data[subslice_offset +
* X * subslice_stride +
@@ -2123,6 +2195,56 @@ struct drm_i915_query_engine_info {
struct drm_i915_engine_info engines[];
};
+/*
+ * Data written by the kernel with query DRM_I915_QUERY_PERF_CONFIG.
+ */
+struct drm_i915_query_perf_config {
+ union {
+ /*
+ * When query_item.flags == DRM_I915_QUERY_PERF_CONFIG_LIST, i915 sets
+ * this fields to the number of configurations available.
+ */
+ __u64 n_configs;
+
+ /*
+ * When query_id == DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_ID,
+ * i915 will use the value in this field as configuration
+ * identifier to decide what data to write into config_ptr.
+ */
+ __u64 config;
+
+ /*
+ * When query_id == DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID,
+ * i915 will use the value in this field as configuration
+ * identifier to decide what data to write into config_ptr.
+ *
+ * String formatted like "%08x-%04x-%04x-%04x-%012x"
+ */
+ char uuid[36];
+ };
+
+ /*
+ * Unused for now. Must be cleared to zero.
+ */
+ __u32 flags;
+
+ /*
+ * When query_item.flags == DRM_I915_QUERY_PERF_CONFIG_LIST, i915 will
+ * write an array of __u64 of configuration identifiers.
+ *
+ * When query_item.flags == DRM_I915_QUERY_PERF_CONFIG_DATA, i915 will
+ * write a struct drm_i915_perf_oa_config. If the following fields of
+ * drm_i915_perf_oa_config are set not set to 0, i915 will write into
+ * the associated pointers the values of submitted when the
+ * configuration was created :
+ *
+ * - n_mux_regs
+ * - n_boolean_regs
+ * - n_flex_regs
+ */
+ __u8 data[];
+};
+
#if defined(__cplusplus)
}
#endif
diff --git a/include/uapi/drm/omap_drm.h b/include/uapi/drm/omap_drm.h
index 1fccffef9e27..5a142fad473c 100644
--- a/include/uapi/drm/omap_drm.h
+++ b/include/uapi/drm/omap_drm.h
@@ -38,20 +38,20 @@ struct drm_omap_param {
__u64 value; /* in (set_param), out (get_param) */
};
-#define OMAP_BO_SCANOUT 0x00000001 /* scanout capable (phys contiguous) */
-#define OMAP_BO_CACHE_MASK 0x00000006 /* cache type mask, see cache modes */
-#define OMAP_BO_TILED_MASK 0x00000f00 /* tiled mapping mask, see tiled modes */
+/* Scanout buffer, consumable by DSS */
+#define OMAP_BO_SCANOUT 0x00000001
-/* cache modes */
-#define OMAP_BO_CACHED 0x00000000 /* default */
-#define OMAP_BO_WC 0x00000002 /* write-combine */
-#define OMAP_BO_UNCACHED 0x00000004 /* strongly-ordered (uncached) */
+/* Buffer CPU caching mode: cached, write-combining or uncached. */
+#define OMAP_BO_CACHED 0x00000000
+#define OMAP_BO_WC 0x00000002
+#define OMAP_BO_UNCACHED 0x00000004
+#define OMAP_BO_CACHE_MASK 0x00000006
-/* tiled modes */
+/* Use TILER for the buffer. The TILER container unit can be 8, 16 or 32 bits. */
#define OMAP_BO_TILED_8 0x00000100
#define OMAP_BO_TILED_16 0x00000200
#define OMAP_BO_TILED_32 0x00000300
-#define OMAP_BO_TILED (OMAP_BO_TILED_8 | OMAP_BO_TILED_16 | OMAP_BO_TILED_32)
+#define OMAP_BO_TILED_MASK 0x00000f00
union omap_gem_size {
__u32 bytes; /* (for non-tiled formats) */
diff --git a/include/uapi/drm/v3d_drm.h b/include/uapi/drm/v3d_drm.h
index 58fbe48c91e9..1ce746e228d9 100644
--- a/include/uapi/drm/v3d_drm.h
+++ b/include/uapi/drm/v3d_drm.h
@@ -48,6 +48,8 @@ extern "C" {
#define DRM_IOCTL_V3D_SUBMIT_TFU DRM_IOW(DRM_COMMAND_BASE + DRM_V3D_SUBMIT_TFU, struct drm_v3d_submit_tfu)
#define DRM_IOCTL_V3D_SUBMIT_CSD DRM_IOW(DRM_COMMAND_BASE + DRM_V3D_SUBMIT_CSD, struct drm_v3d_submit_csd)
+#define DRM_V3D_SUBMIT_CL_FLUSH_CACHE 0x01
+
/**
* struct drm_v3d_submit_cl - ioctl argument for submitting commands to the 3D
* engine.
@@ -61,7 +63,7 @@ extern "C" {
* flushed by the time the render done IRQ happens, which is the
* trigger for out_sync. Any dirtying of cachelines by the job (only
* possible using TMU writes) must be flushed by the caller using the
- * CL's cache flush commands.
+ * DRM_V3D_SUBMIT_CL_FLUSH_CACHE_FLAG flag.
*/
struct drm_v3d_submit_cl {
/* Pointer to the binner command list.
@@ -124,8 +126,7 @@ struct drm_v3d_submit_cl {
/* Number of BO handles passed in (size is that times 4). */
__u32 bo_handle_count;
- /* Pad, must be zero-filled. */
- __u32 pad;
+ __u32 flags;
};
/**
@@ -193,6 +194,7 @@ enum drm_v3d_param {
DRM_V3D_PARAM_V3D_CORE0_IDENT2,
DRM_V3D_PARAM_SUPPORTS_TFU,
DRM_V3D_PARAM_SUPPORTS_CSD,
+ DRM_V3D_PARAM_SUPPORTS_CACHE_FLUSH,
};
struct drm_v3d_get_param {
diff --git a/include/uapi/drm/vmwgfx_drm.h b/include/uapi/drm/vmwgfx_drm.h
index 399f58317cff..02cab33f2f25 100644
--- a/include/uapi/drm/vmwgfx_drm.h
+++ b/include/uapi/drm/vmwgfx_drm.h
@@ -891,11 +891,13 @@ struct drm_vmw_shader_arg {
* surface.
* @drm_vmw_surface_flag_create_buffer: Create a backup buffer if none is
* given.
+ * @drm_vmw_surface_flag_coherent: Back surface with coherent memory.
*/
enum drm_vmw_surface_flags {
drm_vmw_surface_flag_shareable = (1 << 0),
drm_vmw_surface_flag_scanout = (1 << 1),
- drm_vmw_surface_flag_create_buffer = (1 << 2)
+ drm_vmw_surface_flag_create_buffer = (1 << 2),
+ drm_vmw_surface_flag_coherent = (1 << 3),
};
/**
diff --git a/include/uapi/linux/audit.h b/include/uapi/linux/audit.h
index c89c6495983d..3ad935527177 100644
--- a/include/uapi/linux/audit.h
+++ b/include/uapi/linux/audit.h
@@ -143,6 +143,7 @@
#define AUDIT_ANOM_PROMISCUOUS 1700 /* Device changed promiscuous mode */
#define AUDIT_ANOM_ABEND 1701 /* Process ended abnormally */
#define AUDIT_ANOM_LINK 1702 /* Suspicious use of file links */
+#define AUDIT_ANOM_CREAT 1703 /* Suspicious file creation */
#define AUDIT_INTEGRITY_DATA 1800 /* Data integrity verification */
#define AUDIT_INTEGRITY_METADATA 1801 /* Metadata integrity verification */
#define AUDIT_INTEGRITY_STATUS 1802 /* Integrity enable status */
diff --git a/include/uapi/linux/cec-funcs.h b/include/uapi/linux/cec-funcs.h
index 8997d5068c08..37590027b604 100644
--- a/include/uapi/linux/cec-funcs.h
+++ b/include/uapi/linux/cec-funcs.h
@@ -923,7 +923,8 @@ static inline void cec_msg_give_deck_status(struct cec_msg *msg,
msg->len = 3;
msg->msg[1] = CEC_MSG_GIVE_DECK_STATUS;
msg->msg[2] = status_req;
- msg->reply = reply ? CEC_MSG_DECK_STATUS : 0;
+ msg->reply = (reply && status_req != CEC_OP_STATUS_REQ_OFF) ?
+ CEC_MSG_DECK_STATUS : 0;
}
static inline void cec_ops_give_deck_status(const struct cec_msg *msg,
@@ -1027,7 +1028,8 @@ static inline void cec_msg_give_tuner_device_status(struct cec_msg *msg,
msg->len = 3;
msg->msg[1] = CEC_MSG_GIVE_TUNER_DEVICE_STATUS;
msg->msg[2] = status_req;
- msg->reply = reply ? CEC_MSG_TUNER_DEVICE_STATUS : 0;
+ msg->reply = (reply && status_req != CEC_OP_STATUS_REQ_OFF) ?
+ CEC_MSG_TUNER_DEVICE_STATUS : 0;
}
static inline void cec_ops_give_tuner_device_status(const struct cec_msg *msg,
@@ -1302,17 +1304,17 @@ static inline void cec_msg_user_control_pressed(struct cec_msg *msg,
if (!ui_cmd->has_opt_arg)
return;
switch (ui_cmd->ui_cmd) {
- case 0x56:
- case 0x57:
- case 0x60:
- case 0x68:
- case 0x69:
- case 0x6a:
+ case CEC_OP_UI_CMD_SELECT_BROADCAST_TYPE:
+ case CEC_OP_UI_CMD_SELECT_SOUND_PRESENTATION:
+ case CEC_OP_UI_CMD_PLAY_FUNCTION:
+ case CEC_OP_UI_CMD_SELECT_MEDIA_FUNCTION:
+ case CEC_OP_UI_CMD_SELECT_AV_INPUT_FUNCTION:
+ case CEC_OP_UI_CMD_SELECT_AUDIO_INPUT_FUNCTION:
/* The optional operand is one byte for all these ui commands */
msg->len++;
msg->msg[3] = ui_cmd->play_mode;
break;
- case 0x67:
+ case CEC_OP_UI_CMD_TUNE_FUNCTION:
msg->len += 4;
msg->msg[3] = (ui_cmd->channel_identifier.channel_number_fmt << 2) |
(ui_cmd->channel_identifier.major >> 8);
@@ -1331,17 +1333,17 @@ static inline void cec_ops_user_control_pressed(const struct cec_msg *msg,
if (msg->len == 3)
return;
switch (ui_cmd->ui_cmd) {
- case 0x56:
- case 0x57:
- case 0x60:
- case 0x68:
- case 0x69:
- case 0x6a:
+ case CEC_OP_UI_CMD_SELECT_BROADCAST_TYPE:
+ case CEC_OP_UI_CMD_SELECT_SOUND_PRESENTATION:
+ case CEC_OP_UI_CMD_PLAY_FUNCTION:
+ case CEC_OP_UI_CMD_SELECT_MEDIA_FUNCTION:
+ case CEC_OP_UI_CMD_SELECT_AV_INPUT_FUNCTION:
+ case CEC_OP_UI_CMD_SELECT_AUDIO_INPUT_FUNCTION:
/* The optional operand is one byte for all these ui commands */
ui_cmd->play_mode = msg->msg[3];
ui_cmd->has_opt_arg = 1;
break;
- case 0x67:
+ case CEC_OP_UI_CMD_TUNE_FUNCTION:
if (msg->len < 7)
break;
ui_cmd->has_opt_arg = 1;
diff --git a/include/uapi/linux/cec.h b/include/uapi/linux/cec.h
index 5704fa0292b5..7d1a06c52469 100644
--- a/include/uapi/linux/cec.h
+++ b/include/uapi/linux/cec.h
@@ -317,6 +317,8 @@ static inline int cec_is_unconfigured(__u16 log_addr_mask)
#define CEC_CAP_NEEDS_HPD (1 << 6)
/* Hardware can monitor CEC pin transitions */
#define CEC_CAP_MONITOR_PIN (1 << 7)
+/* CEC_ADAP_G_CONNECTOR_INFO is available */
+#define CEC_CAP_CONNECTOR_INFO (1 << 8)
/**
* struct cec_caps - CEC capabilities structure.
@@ -375,6 +377,34 @@ struct cec_log_addrs {
/* CDC-Only device: supports only CDC messages */
#define CEC_LOG_ADDRS_FL_CDC_ONLY (1 << 2)
+/**
+ * struct cec_drm_connector_info - tells which drm connector is
+ * associated with the CEC adapter.
+ * @card_no: drm card number
+ * @connector_id: drm connector ID
+ */
+struct cec_drm_connector_info {
+ __u32 card_no;
+ __u32 connector_id;
+};
+
+#define CEC_CONNECTOR_TYPE_NO_CONNECTOR 0
+#define CEC_CONNECTOR_TYPE_DRM 1
+
+/**
+ * struct cec_connector_info - tells if and which connector is
+ * associated with the CEC adapter.
+ * @type: connector type (if any)
+ * @drm: drm connector info
+ */
+struct cec_connector_info {
+ __u32 type;
+ union {
+ struct cec_drm_connector_info drm;
+ __u32 raw[16];
+ };
+};
+
/* Events */
/* Event that occurs when the adapter state changes */
@@ -398,10 +428,17 @@ struct cec_log_addrs {
* struct cec_event_state_change - used when the CEC adapter changes state.
* @phys_addr: the current physical address
* @log_addr_mask: the current logical address mask
+ * @have_conn_info: if non-zero, then HDMI connector information is available.
+ * This field is only valid if CEC_CAP_CONNECTOR_INFO is set. If that
+ * capability is set and @have_conn_info is zero, then that indicates
+ * that the HDMI connector device is not instantiated, either because
+ * the HDMI driver is still configuring the device or because the HDMI
+ * device was unbound.
*/
struct cec_event_state_change {
__u16 phys_addr;
__u16 log_addr_mask;
+ __u16 have_conn_info;
};
/**
@@ -476,6 +513,9 @@ struct cec_event {
#define CEC_G_MODE _IOR('a', 8, __u32)
#define CEC_S_MODE _IOW('a', 9, __u32)
+/* Get the connector info */
+#define CEC_ADAP_G_CONNECTOR_INFO _IOR('a', 10, struct cec_connector_info)
+
/*
* The remainder of this header defines all CEC messages and operands.
* The format matters since it the cec-ctl utility parses it to generate
@@ -768,8 +808,8 @@ struct cec_event {
#define CEC_MSG_SELECT_DIGITAL_SERVICE 0x93
#define CEC_MSG_TUNER_DEVICE_STATUS 0x07
/* Recording Flag Operand (rec_flag) */
-#define CEC_OP_REC_FLAG_USED 0
-#define CEC_OP_REC_FLAG_NOT_USED 1
+#define CEC_OP_REC_FLAG_NOT_USED 0
+#define CEC_OP_REC_FLAG_USED 1
/* Tuner Display Info Operand (tuner_display_info) */
#define CEC_OP_TUNER_DISPLAY_INFO_DIGITAL 0
#define CEC_OP_TUNER_DISPLAY_INFO_NONE 1
@@ -820,6 +860,95 @@ struct cec_event {
#define CEC_OP_MENU_STATE_DEACTIVATED 0x01
#define CEC_MSG_USER_CONTROL_PRESSED 0x44
+/* UI Command Operand (ui_cmd) */
+#define CEC_OP_UI_CMD_SELECT 0x00
+#define CEC_OP_UI_CMD_UP 0x01
+#define CEC_OP_UI_CMD_DOWN 0x02
+#define CEC_OP_UI_CMD_LEFT 0x03
+#define CEC_OP_UI_CMD_RIGHT 0x04
+#define CEC_OP_UI_CMD_RIGHT_UP 0x05
+#define CEC_OP_UI_CMD_RIGHT_DOWN 0x06
+#define CEC_OP_UI_CMD_LEFT_UP 0x07
+#define CEC_OP_UI_CMD_LEFT_DOWN 0x08
+#define CEC_OP_UI_CMD_DEVICE_ROOT_MENU 0x09
+#define CEC_OP_UI_CMD_DEVICE_SETUP_MENU 0x0a
+#define CEC_OP_UI_CMD_CONTENTS_MENU 0x0b
+#define CEC_OP_UI_CMD_FAVORITE_MENU 0x0c
+#define CEC_OP_UI_CMD_BACK 0x0d
+#define CEC_OP_UI_CMD_MEDIA_TOP_MENU 0x10
+#define CEC_OP_UI_CMD_MEDIA_CONTEXT_SENSITIVE_MENU 0x11
+#define CEC_OP_UI_CMD_NUMBER_ENTRY_MODE 0x1d
+#define CEC_OP_UI_CMD_NUMBER_11 0x1e
+#define CEC_OP_UI_CMD_NUMBER_12 0x1f
+#define CEC_OP_UI_CMD_NUMBER_0_OR_NUMBER_10 0x20
+#define CEC_OP_UI_CMD_NUMBER_1 0x21
+#define CEC_OP_UI_CMD_NUMBER_2 0x22
+#define CEC_OP_UI_CMD_NUMBER_3 0x23
+#define CEC_OP_UI_CMD_NUMBER_4 0x24
+#define CEC_OP_UI_CMD_NUMBER_5 0x25
+#define CEC_OP_UI_CMD_NUMBER_6 0x26
+#define CEC_OP_UI_CMD_NUMBER_7 0x27
+#define CEC_OP_UI_CMD_NUMBER_8 0x28
+#define CEC_OP_UI_CMD_NUMBER_9 0x29
+#define CEC_OP_UI_CMD_DOT 0x2a
+#define CEC_OP_UI_CMD_ENTER 0x2b
+#define CEC_OP_UI_CMD_CLEAR 0x2c
+#define CEC_OP_UI_CMD_NEXT_FAVORITE 0x2f
+#define CEC_OP_UI_CMD_CHANNEL_UP 0x30
+#define CEC_OP_UI_CMD_CHANNEL_DOWN 0x31
+#define CEC_OP_UI_CMD_PREVIOUS_CHANNEL 0x32
+#define CEC_OP_UI_CMD_SOUND_SELECT 0x33
+#define CEC_OP_UI_CMD_INPUT_SELECT 0x34
+#define CEC_OP_UI_CMD_DISPLAY_INFORMATION 0x35
+#define CEC_OP_UI_CMD_HELP 0x36
+#define CEC_OP_UI_CMD_PAGE_UP 0x37
+#define CEC_OP_UI_CMD_PAGE_DOWN 0x38
+#define CEC_OP_UI_CMD_POWER 0x40
+#define CEC_OP_UI_CMD_VOLUME_UP 0x41
+#define CEC_OP_UI_CMD_VOLUME_DOWN 0x42
+#define CEC_OP_UI_CMD_MUTE 0x43
+#define CEC_OP_UI_CMD_PLAY 0x44
+#define CEC_OP_UI_CMD_STOP 0x45
+#define CEC_OP_UI_CMD_PAUSE 0x46
+#define CEC_OP_UI_CMD_RECORD 0x47
+#define CEC_OP_UI_CMD_REWIND 0x48
+#define CEC_OP_UI_CMD_FAST_FORWARD 0x49
+#define CEC_OP_UI_CMD_EJECT 0x4a
+#define CEC_OP_UI_CMD_SKIP_FORWARD 0x4b
+#define CEC_OP_UI_CMD_SKIP_BACKWARD 0x4c
+#define CEC_OP_UI_CMD_STOP_RECORD 0x4d
+#define CEC_OP_UI_CMD_PAUSE_RECORD 0x4e
+#define CEC_OP_UI_CMD_ANGLE 0x50
+#define CEC_OP_UI_CMD_SUB_PICTURE 0x51
+#define CEC_OP_UI_CMD_VIDEO_ON_DEMAND 0x52
+#define CEC_OP_UI_CMD_ELECTRONIC_PROGRAM_GUIDE 0x53
+#define CEC_OP_UI_CMD_TIMER_PROGRAMMING 0x54
+#define CEC_OP_UI_CMD_INITIAL_CONFIGURATION 0x55
+#define CEC_OP_UI_CMD_SELECT_BROADCAST_TYPE 0x56
+#define CEC_OP_UI_CMD_SELECT_SOUND_PRESENTATION 0x57
+#define CEC_OP_UI_CMD_AUDIO_DESCRIPTION 0x58
+#define CEC_OP_UI_CMD_INTERNET 0x59
+#define CEC_OP_UI_CMD_3D_MODE 0x5a
+#define CEC_OP_UI_CMD_PLAY_FUNCTION 0x60
+#define CEC_OP_UI_CMD_PAUSE_PLAY_FUNCTION 0x61
+#define CEC_OP_UI_CMD_RECORD_FUNCTION 0x62
+#define CEC_OP_UI_CMD_PAUSE_RECORD_FUNCTION 0x63
+#define CEC_OP_UI_CMD_STOP_FUNCTION 0x64
+#define CEC_OP_UI_CMD_MUTE_FUNCTION 0x65
+#define CEC_OP_UI_CMD_RESTORE_VOLUME_FUNCTION 0x66
+#define CEC_OP_UI_CMD_TUNE_FUNCTION 0x67
+#define CEC_OP_UI_CMD_SELECT_MEDIA_FUNCTION 0x68
+#define CEC_OP_UI_CMD_SELECT_AV_INPUT_FUNCTION 0x69
+#define CEC_OP_UI_CMD_SELECT_AUDIO_INPUT_FUNCTION 0x6a
+#define CEC_OP_UI_CMD_POWER_TOGGLE_FUNCTION 0x6b
+#define CEC_OP_UI_CMD_POWER_OFF_FUNCTION 0x6c
+#define CEC_OP_UI_CMD_POWER_ON_FUNCTION 0x6d
+#define CEC_OP_UI_CMD_F1_BLUE 0x71
+#define CEC_OP_UI_CMD_F2_RED 0x72
+#define CEC_OP_UI_CMD_F3_GREEN 0x73
+#define CEC_OP_UI_CMD_F4_YELLOW 0x74
+#define CEC_OP_UI_CMD_F5 0x75
+#define CEC_OP_UI_CMD_DATA 0x76
/* UI Broadcast Type Operand (ui_bcast_type) */
#define CEC_OP_UI_BCAST_TYPE_TOGGLE_ALL 0x00
#define CEC_OP_UI_BCAST_TYPE_TOGGLE_DIG_ANA 0x01
diff --git a/include/uapi/linux/chio.h b/include/uapi/linux/chio.h
index 689fc93fafda..e1cad4c319ee 100644
--- a/include/uapi/linux/chio.h
+++ b/include/uapi/linux/chio.h
@@ -3,6 +3,9 @@
* ioctl interface for the scsi media changer driver
*/
+#ifndef _UAPI_LINUX_CHIO_H
+#define _UAPI_LINUX_CHIO_H
+
/* changer element types */
#define CHET_MT 0 /* media transport element (robot) */
#define CHET_ST 1 /* storage element (media slots) */
@@ -160,10 +163,4 @@ struct changer_set_voltag {
#define CHIOSVOLTAG _IOW('c',18,struct changer_set_voltag)
#define CHIOGVPARAMS _IOR('c',19,struct changer_vendor_params)
-/* ---------------------------------------------------------------------- */
-
-/*
- * Local variables:
- * c-basic-offset: 8
- * End:
- */
+#endif /* _UAPI_LINUX_CHIO_H */
diff --git a/include/uapi/linux/cyclades.h b/include/uapi/linux/cyclades.h
index 8279bc3d60ca..fc0add2194a9 100644
--- a/include/uapi/linux/cyclades.h
+++ b/include/uapi/linux/cyclades.h
@@ -83,9 +83,9 @@ struct cyclades_monitor {
* open)
*/
struct cyclades_idle_stats {
- __kernel_time_t in_use; /* Time device has been in use (secs) */
- __kernel_time_t recv_idle; /* Time since last char received (secs) */
- __kernel_time_t xmit_idle; /* Time since last char transmitted (secs) */
+ __kernel_old_time_t in_use; /* Time device has been in use (secs) */
+ __kernel_old_time_t recv_idle; /* Time since last char received (secs) */
+ __kernel_old_time_t xmit_idle; /* Time since last char transmitted (secs) */
unsigned long recv_bytes; /* Bytes received */
unsigned long xmit_bytes; /* Bytes transmitted */
unsigned long overruns; /* Input overruns */
diff --git a/include/uapi/linux/elfcore.h b/include/uapi/linux/elfcore.h
index 0b2c9e16e345..baf03562306d 100644
--- a/include/uapi/linux/elfcore.h
+++ b/include/uapi/linux/elfcore.h
@@ -53,10 +53,10 @@ struct elf_prstatus
pid_t pr_ppid;
pid_t pr_pgrp;
pid_t pr_sid;
- struct timeval pr_utime; /* User time */
- struct timeval pr_stime; /* System time */
- struct timeval pr_cutime; /* Cumulative user time */
- struct timeval pr_cstime; /* Cumulative system time */
+ struct __kernel_old_timeval pr_utime; /* User time */
+ struct __kernel_old_timeval pr_stime; /* System time */
+ struct __kernel_old_timeval pr_cutime; /* Cumulative user time */
+ struct __kernel_old_timeval pr_cstime; /* Cumulative system time */
#if 0
long pr_instr; /* Current instruction */
#endif
diff --git a/include/uapi/linux/errqueue.h b/include/uapi/linux/errqueue.h
index 28491dac074b..0cca19670fd2 100644
--- a/include/uapi/linux/errqueue.h
+++ b/include/uapi/linux/errqueue.h
@@ -37,9 +37,16 @@ struct sock_extended_err {
* The timestamping interfaces SO_TIMESTAMPING, MSG_TSTAMP_*
* communicate network timestamps by passing this struct in a cmsg with
* recvmsg(). See Documentation/networking/timestamping.txt for details.
+ * User space sees a timespec definition that matches either
+ * __kernel_timespec or __kernel_old_timespec, in the kernel we
+ * require two structure definitions to provide both.
*/
struct scm_timestamping {
+#ifdef __KERNEL__
+ struct __kernel_old_timespec ts[3];
+#else
struct timespec ts[3];
+#endif
};
struct scm_timestamping64 {
diff --git a/include/uapi/linux/gpio.h b/include/uapi/linux/gpio.h
index 4ebfe0ac6c5b..799cf823d493 100644
--- a/include/uapi/linux/gpio.h
+++ b/include/uapi/linux/gpio.h
@@ -33,6 +33,9 @@ struct gpiochip_info {
#define GPIOLINE_FLAG_ACTIVE_LOW (1UL << 2)
#define GPIOLINE_FLAG_OPEN_DRAIN (1UL << 3)
#define GPIOLINE_FLAG_OPEN_SOURCE (1UL << 4)
+#define GPIOLINE_FLAG_BIAS_PULL_UP (1UL << 5)
+#define GPIOLINE_FLAG_BIAS_PULL_DOWN (1UL << 6)
+#define GPIOLINE_FLAG_BIAS_DISABLE (1UL << 7)
/**
* struct gpioline_info - Information about a certain GPIO line
@@ -62,6 +65,9 @@ struct gpioline_info {
#define GPIOHANDLE_REQUEST_ACTIVE_LOW (1UL << 2)
#define GPIOHANDLE_REQUEST_OPEN_DRAIN (1UL << 3)
#define GPIOHANDLE_REQUEST_OPEN_SOURCE (1UL << 4)
+#define GPIOHANDLE_REQUEST_BIAS_PULL_UP (1UL << 5)
+#define GPIOHANDLE_REQUEST_BIAS_PULL_DOWN (1UL << 6)
+#define GPIOHANDLE_REQUEST_BIAS_DISABLE (1UL << 7)
/**
* struct gpiohandle_request - Information about a GPIO handle request
@@ -95,6 +101,24 @@ struct gpiohandle_request {
};
/**
+ * struct gpiohandle_config - Configuration for a GPIO handle request
+ * @flags: updated flags for the requested GPIO lines, such as
+ * GPIOHANDLE_REQUEST_OUTPUT, GPIOHANDLE_REQUEST_ACTIVE_LOW etc, OR:ed
+ * together
+ * @default_values: if the GPIOHANDLE_REQUEST_OUTPUT is set in flags,
+ * this specifies the default output value, should be 0 (low) or
+ * 1 (high), anything else than 0 or 1 will be interpreted as 1 (high)
+ * @padding: reserved for future use and should be zero filled
+ */
+struct gpiohandle_config {
+ __u32 flags;
+ __u8 default_values[GPIOHANDLES_MAX];
+ __u32 padding[4]; /* padding for future use */
+};
+
+#define GPIOHANDLE_SET_CONFIG_IOCTL _IOWR(0xB4, 0x0a, struct gpiohandle_config)
+
+/**
* struct gpiohandle_data - Information of values on a GPIO handle
* @values: when getting the state of lines this contains the current
* state of a line, when setting the state of lines these should contain
diff --git a/include/uapi/linux/input-event-codes.h b/include/uapi/linux/input-event-codes.h
index 85387c76c24f..00aebeaae090 100644
--- a/include/uapi/linux/input-event-codes.h
+++ b/include/uapi/linux/input-event-codes.h
@@ -650,6 +650,81 @@
#define KEY_DATA 0x277
#define KEY_ONSCREEN_KEYBOARD 0x278
+/*
+ * Some keyboards have keys which do not have a defined meaning, these keys
+ * are intended to be programmed / bound to macros by the user. For most
+ * keyboards with these macro-keys the key-sequence to inject, or action to
+ * take, is all handled by software on the host side. So from the kernel's
+ * point of view these are just normal keys.
+ *
+ * The KEY_MACRO# codes below are intended for such keys, which may be labeled
+ * e.g. G1-G18, or S1 - S30. The KEY_MACRO# codes MUST NOT be used for keys
+ * where the marking on the key does indicate a defined meaning / purpose.
+ *
+ * The KEY_MACRO# codes MUST also NOT be used as fallback for when no existing
+ * KEY_FOO define matches the marking / purpose. In this case a new KEY_FOO
+ * define MUST be added.
+ */
+#define KEY_MACRO1 0x290
+#define KEY_MACRO2 0x291
+#define KEY_MACRO3 0x292
+#define KEY_MACRO4 0x293
+#define KEY_MACRO5 0x294
+#define KEY_MACRO6 0x295
+#define KEY_MACRO7 0x296
+#define KEY_MACRO8 0x297
+#define KEY_MACRO9 0x298
+#define KEY_MACRO10 0x299
+#define KEY_MACRO11 0x29a
+#define KEY_MACRO12 0x29b
+#define KEY_MACRO13 0x29c
+#define KEY_MACRO14 0x29d
+#define KEY_MACRO15 0x29e
+#define KEY_MACRO16 0x29f
+#define KEY_MACRO17 0x2a0
+#define KEY_MACRO18 0x2a1
+#define KEY_MACRO19 0x2a2
+#define KEY_MACRO20 0x2a3
+#define KEY_MACRO21 0x2a4
+#define KEY_MACRO22 0x2a5
+#define KEY_MACRO23 0x2a6
+#define KEY_MACRO24 0x2a7
+#define KEY_MACRO25 0x2a8
+#define KEY_MACRO26 0x2a9
+#define KEY_MACRO27 0x2aa
+#define KEY_MACRO28 0x2ab
+#define KEY_MACRO29 0x2ac
+#define KEY_MACRO30 0x2ad
+
+/*
+ * Some keyboards with the macro-keys described above have some extra keys
+ * for controlling the host-side software responsible for the macro handling:
+ * -A macro recording start/stop key. Note that not all keyboards which emit
+ * KEY_MACRO_RECORD_START will also emit KEY_MACRO_RECORD_STOP if
+ * KEY_MACRO_RECORD_STOP is not advertised, then KEY_MACRO_RECORD_START
+ * should be interpreted as a recording start/stop toggle;
+ * -Keys for switching between different macro (pre)sets, either a key for
+ * cycling through the configured presets or keys to directly select a preset.
+ */
+#define KEY_MACRO_RECORD_START 0x2b0
+#define KEY_MACRO_RECORD_STOP 0x2b1
+#define KEY_MACRO_PRESET_CYCLE 0x2b2
+#define KEY_MACRO_PRESET1 0x2b3
+#define KEY_MACRO_PRESET2 0x2b4
+#define KEY_MACRO_PRESET3 0x2b5
+
+/*
+ * Some keyboards have a buildin LCD panel where the contents are controlled
+ * by the host. Often these have a number of keys directly below the LCD
+ * intended for controlling a menu shown on the LCD. These keys often don't
+ * have any labeling so we just name them KEY_KBD_LCD_MENU#
+ */
+#define KEY_KBD_LCD_MENU1 0x2b8
+#define KEY_KBD_LCD_MENU2 0x2b9
+#define KEY_KBD_LCD_MENU3 0x2ba
+#define KEY_KBD_LCD_MENU4 0x2bb
+#define KEY_KBD_LCD_MENU5 0x2bc
+
#define BTN_TRIGGER_HAPPY 0x2c0
#define BTN_TRIGGER_HAPPY1 0x2c0
#define BTN_TRIGGER_HAPPY2 0x2c1
diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h
index 2a1569211d87..4637ed1d9949 100644
--- a/include/uapi/linux/io_uring.h
+++ b/include/uapi/linux/io_uring.h
@@ -73,6 +73,7 @@ struct io_uring_sqe {
#define IORING_OP_ACCEPT 13
#define IORING_OP_ASYNC_CANCEL 14
#define IORING_OP_LINK_TIMEOUT 15
+#define IORING_OP_CONNECT 16
/*
* sqe->fsync_flags
diff --git a/include/uapi/linux/iommu.h b/include/uapi/linux/iommu.h
index fc00c5d4741b..4ad3496e5c43 100644
--- a/include/uapi/linux/iommu.h
+++ b/include/uapi/linux/iommu.h
@@ -152,4 +152,173 @@ struct iommu_page_response {
__u32 code;
};
+/* defines the granularity of the invalidation */
+enum iommu_inv_granularity {
+ IOMMU_INV_GRANU_DOMAIN, /* domain-selective invalidation */
+ IOMMU_INV_GRANU_PASID, /* PASID-selective invalidation */
+ IOMMU_INV_GRANU_ADDR, /* page-selective invalidation */
+ IOMMU_INV_GRANU_NR, /* number of invalidation granularities */
+};
+
+/**
+ * struct iommu_inv_addr_info - Address Selective Invalidation Structure
+ *
+ * @flags: indicates the granularity of the address-selective invalidation
+ * - If the PASID bit is set, the @pasid field is populated and the invalidation
+ * relates to cache entries tagged with this PASID and matching the address
+ * range.
+ * - If ARCHID bit is set, @archid is populated and the invalidation relates
+ * to cache entries tagged with this architecture specific ID and matching
+ * the address range.
+ * - Both PASID and ARCHID can be set as they may tag different caches.
+ * - If neither PASID or ARCHID is set, global addr invalidation applies.
+ * - The LEAF flag indicates whether only the leaf PTE caching needs to be
+ * invalidated and other paging structure caches can be preserved.
+ * @pasid: process address space ID
+ * @archid: architecture-specific ID
+ * @addr: first stage/level input address
+ * @granule_size: page/block size of the mapping in bytes
+ * @nb_granules: number of contiguous granules to be invalidated
+ */
+struct iommu_inv_addr_info {
+#define IOMMU_INV_ADDR_FLAGS_PASID (1 << 0)
+#define IOMMU_INV_ADDR_FLAGS_ARCHID (1 << 1)
+#define IOMMU_INV_ADDR_FLAGS_LEAF (1 << 2)
+ __u32 flags;
+ __u32 archid;
+ __u64 pasid;
+ __u64 addr;
+ __u64 granule_size;
+ __u64 nb_granules;
+};
+
+/**
+ * struct iommu_inv_pasid_info - PASID Selective Invalidation Structure
+ *
+ * @flags: indicates the granularity of the PASID-selective invalidation
+ * - If the PASID bit is set, the @pasid field is populated and the invalidation
+ * relates to cache entries tagged with this PASID and matching the address
+ * range.
+ * - If the ARCHID bit is set, the @archid is populated and the invalidation
+ * relates to cache entries tagged with this architecture specific ID and
+ * matching the address range.
+ * - Both PASID and ARCHID can be set as they may tag different caches.
+ * - At least one of PASID or ARCHID must be set.
+ * @pasid: process address space ID
+ * @archid: architecture-specific ID
+ */
+struct iommu_inv_pasid_info {
+#define IOMMU_INV_PASID_FLAGS_PASID (1 << 0)
+#define IOMMU_INV_PASID_FLAGS_ARCHID (1 << 1)
+ __u32 flags;
+ __u32 archid;
+ __u64 pasid;
+};
+
+/**
+ * struct iommu_cache_invalidate_info - First level/stage invalidation
+ * information
+ * @version: API version of this structure
+ * @cache: bitfield that allows to select which caches to invalidate
+ * @granularity: defines the lowest granularity used for the invalidation:
+ * domain > PASID > addr
+ * @padding: reserved for future use (should be zero)
+ * @pasid_info: invalidation data when @granularity is %IOMMU_INV_GRANU_PASID
+ * @addr_info: invalidation data when @granularity is %IOMMU_INV_GRANU_ADDR
+ *
+ * Not all the combinations of cache/granularity are valid:
+ *
+ * +--------------+---------------+---------------+---------------+
+ * | type / | DEV_IOTLB | IOTLB | PASID |
+ * | granularity | | | cache |
+ * +==============+===============+===============+===============+
+ * | DOMAIN | N/A | Y | Y |
+ * +--------------+---------------+---------------+---------------+
+ * | PASID | Y | Y | Y |
+ * +--------------+---------------+---------------+---------------+
+ * | ADDR | Y | Y | N/A |
+ * +--------------+---------------+---------------+---------------+
+ *
+ * Invalidations by %IOMMU_INV_GRANU_DOMAIN don't take any argument other than
+ * @version and @cache.
+ *
+ * If multiple cache types are invalidated simultaneously, they all
+ * must support the used granularity.
+ */
+struct iommu_cache_invalidate_info {
+#define IOMMU_CACHE_INVALIDATE_INFO_VERSION_1 1
+ __u32 version;
+/* IOMMU paging structure cache */
+#define IOMMU_CACHE_INV_TYPE_IOTLB (1 << 0) /* IOMMU IOTLB */
+#define IOMMU_CACHE_INV_TYPE_DEV_IOTLB (1 << 1) /* Device IOTLB */
+#define IOMMU_CACHE_INV_TYPE_PASID (1 << 2) /* PASID cache */
+#define IOMMU_CACHE_INV_TYPE_NR (3)
+ __u8 cache;
+ __u8 granularity;
+ __u8 padding[2];
+ union {
+ struct iommu_inv_pasid_info pasid_info;
+ struct iommu_inv_addr_info addr_info;
+ };
+};
+
+/**
+ * struct iommu_gpasid_bind_data_vtd - Intel VT-d specific data on device and guest
+ * SVA binding.
+ *
+ * @flags: VT-d PASID table entry attributes
+ * @pat: Page attribute table data to compute effective memory type
+ * @emt: Extended memory type
+ *
+ * Only guest vIOMMU selectable and effective options are passed down to
+ * the host IOMMU.
+ */
+struct iommu_gpasid_bind_data_vtd {
+#define IOMMU_SVA_VTD_GPASID_SRE (1 << 0) /* supervisor request */
+#define IOMMU_SVA_VTD_GPASID_EAFE (1 << 1) /* extended access enable */
+#define IOMMU_SVA_VTD_GPASID_PCD (1 << 2) /* page-level cache disable */
+#define IOMMU_SVA_VTD_GPASID_PWT (1 << 3) /* page-level write through */
+#define IOMMU_SVA_VTD_GPASID_EMTE (1 << 4) /* extended mem type enable */
+#define IOMMU_SVA_VTD_GPASID_CD (1 << 5) /* PASID-level cache disable */
+ __u64 flags;
+ __u32 pat;
+ __u32 emt;
+};
+
+/**
+ * struct iommu_gpasid_bind_data - Information about device and guest PASID binding
+ * @version: Version of this data structure
+ * @format: PASID table entry format
+ * @flags: Additional information on guest bind request
+ * @gpgd: Guest page directory base of the guest mm to bind
+ * @hpasid: Process address space ID used for the guest mm in host IOMMU
+ * @gpasid: Process address space ID used for the guest mm in guest IOMMU
+ * @addr_width: Guest virtual address width
+ * @padding: Reserved for future use (should be zero)
+ * @vtd: Intel VT-d specific data
+ *
+ * Guest to host PASID mapping can be an identity or non-identity, where guest
+ * has its own PASID space. For non-identify mapping, guest to host PASID lookup
+ * is needed when VM programs guest PASID into an assigned device. VMM may
+ * trap such PASID programming then request host IOMMU driver to convert guest
+ * PASID to host PASID based on this bind data.
+ */
+struct iommu_gpasid_bind_data {
+#define IOMMU_GPASID_BIND_VERSION_1 1
+ __u32 version;
+#define IOMMU_PASID_FORMAT_INTEL_VTD 1
+ __u32 format;
+#define IOMMU_SVA_GPASID_VAL (1 << 0) /* guest PASID valid */
+ __u64 flags;
+ __u64 gpgd;
+ __u64 hpasid;
+ __u64 gpasid;
+ __u32 addr_width;
+ __u8 padding[12];
+ /* Vendor specific data */
+ union {
+ struct iommu_gpasid_bind_data_vtd vtd;
+ };
+};
+
#endif /* _UAPI_IOMMU_H */
diff --git a/include/uapi/linux/magic.h b/include/uapi/linux/magic.h
index 903cc2d2750b..3ac436376d79 100644
--- a/include/uapi/linux/magic.h
+++ b/include/uapi/linux/magic.h
@@ -94,5 +94,6 @@
#define ZSMALLOC_MAGIC 0x58295829
#define DMA_BUF_MAGIC 0x444d4142 /* "DMAB" */
#define Z3FOLD_MAGIC 0x33
+#define PPC_CMM_MAGIC 0xc7571590
#endif /* __LINUX_MAGIC_H__ */
diff --git a/include/uapi/linux/msg.h b/include/uapi/linux/msg.h
index e4a0d9a9a9e8..01ee8d54c1c8 100644
--- a/include/uapi/linux/msg.h
+++ b/include/uapi/linux/msg.h
@@ -19,9 +19,9 @@ struct msqid_ds {
struct ipc_perm msg_perm;
struct msg *msg_first; /* first message on queue,unused */
struct msg *msg_last; /* last message in queue,unused */
- __kernel_time_t msg_stime; /* last msgsnd time */
- __kernel_time_t msg_rtime; /* last msgrcv time */
- __kernel_time_t msg_ctime; /* last change time */
+ __kernel_old_time_t msg_stime; /* last msgsnd time */
+ __kernel_old_time_t msg_rtime; /* last msgrcv time */
+ __kernel_old_time_t msg_ctime; /* last change time */
unsigned long msg_lcbytes; /* Reuse junk fields for 32 bit */
unsigned long msg_lqbytes; /* ditto */
unsigned short msg_cbytes; /* current number of bytes on queue */
diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h
index 29d6e93fd15e..acb7d2bdb419 100644
--- a/include/uapi/linux/pci_regs.h
+++ b/include/uapi/linux/pci_regs.h
@@ -34,6 +34,7 @@
* of which the first 64 bytes are standardized as follows:
*/
#define PCI_STD_HEADER_SIZEOF 64
+#define PCI_STD_NUM_BARS 6 /* Number of standard BARs */
#define PCI_VENDOR_ID 0x00 /* 16 bits */
#define PCI_DEVICE_ID 0x02 /* 16 bits */
#define PCI_COMMAND 0x04 /* 16 bits */
@@ -673,6 +674,8 @@
#define PCI_EXP_LNKCTL2_TLS_8_0GT 0x0003 /* Supported Speed 8GT/s */
#define PCI_EXP_LNKCTL2_TLS_16_0GT 0x0004 /* Supported Speed 16GT/s */
#define PCI_EXP_LNKCTL2_TLS_32_0GT 0x0005 /* Supported Speed 32GT/s */
+#define PCI_EXP_LNKCTL2_ENTER_COMP 0x0010 /* Enter Compliance */
+#define PCI_EXP_LNKCTL2_TX_MARGIN 0x0380 /* Transmit Margin */
#define PCI_EXP_LNKSTA2 50 /* Link Status 2 */
#define PCI_CAP_EXP_ENDPOINT_SIZEOF_V2 52 /* v2 endpoints with link end here */
#define PCI_EXP_SLTCAP2 52 /* Slot Capabilities 2 */
diff --git a/include/uapi/linux/ppp-ioctl.h b/include/uapi/linux/ppp-ioctl.h
index 88b5f9990320..7bd2a5a75348 100644
--- a/include/uapi/linux/ppp-ioctl.h
+++ b/include/uapi/linux/ppp-ioctl.h
@@ -104,6 +104,8 @@ struct pppol2tp_ioc_stats {
#define PPPIOCGDEBUG _IOR('t', 65, int) /* Read debug level */
#define PPPIOCSDEBUG _IOW('t', 64, int) /* Set debug level */
#define PPPIOCGIDLE _IOR('t', 63, struct ppp_idle) /* get idle time */
+#define PPPIOCGIDLE32 _IOR('t', 63, struct ppp_idle32) /* 32-bit times */
+#define PPPIOCGIDLE64 _IOR('t', 63, struct ppp_idle64) /* 64-bit times */
#define PPPIOCNEWUNIT _IOWR('t', 62, int) /* create new ppp unit */
#define PPPIOCATTACH _IOW('t', 61, int) /* attach to ppp unit */
#define PPPIOCDETACH _IOW('t', 60, int) /* obsolete, do not use */
diff --git a/include/uapi/linux/ppp_defs.h b/include/uapi/linux/ppp_defs.h
index fff51b91b409..20286bd90ab5 100644
--- a/include/uapi/linux/ppp_defs.h
+++ b/include/uapi/linux/ppp_defs.h
@@ -142,10 +142,24 @@ struct ppp_comp_stats {
/*
* The following structure records the time in seconds since
* the last NP packet was sent or received.
+ *
+ * Linux implements both 32-bit and 64-bit time_t versions
+ * for compatibility with user space that defines ppp_idle
+ * based on the libc time_t.
*/
struct ppp_idle {
- __kernel_time_t xmit_idle; /* time since last NP packet sent */
- __kernel_time_t recv_idle; /* time since last NP packet received */
+ __kernel_old_time_t xmit_idle; /* time since last NP packet sent */
+ __kernel_old_time_t recv_idle; /* time since last NP packet received */
+};
+
+struct ppp_idle32 {
+ __s32 xmit_idle; /* time since last NP packet sent */
+ __s32 recv_idle; /* time since last NP packet received */
+};
+
+struct ppp_idle64 {
+ __s64 xmit_idle; /* time since last NP packet sent */
+ __s64 recv_idle; /* time since last NP packet received */
};
#endif /* _UAPI_PPP_DEFS_H_ */
diff --git a/include/uapi/linux/resource.h b/include/uapi/linux/resource.h
index cc00fd079631..74ef57b38f9f 100644
--- a/include/uapi/linux/resource.h
+++ b/include/uapi/linux/resource.h
@@ -22,8 +22,8 @@
#define RUSAGE_THREAD 1 /* only the calling thread */
struct rusage {
- struct timeval ru_utime; /* user time used */
- struct timeval ru_stime; /* system time used */
+ struct __kernel_old_timeval ru_utime; /* user time used */
+ struct __kernel_old_timeval ru_stime; /* system time used */
__kernel_long_t ru_maxrss; /* maximum resident set size */
__kernel_long_t ru_ixrss; /* integral shared memory size */
__kernel_long_t ru_idrss; /* integral unshared data size */
diff --git a/include/uapi/linux/seccomp.h b/include/uapi/linux/seccomp.h
index 90734aa5aa36..be84d87f1f46 100644
--- a/include/uapi/linux/seccomp.h
+++ b/include/uapi/linux/seccomp.h
@@ -76,6 +76,35 @@ struct seccomp_notif {
struct seccomp_data data;
};
+/*
+ * Valid flags for struct seccomp_notif_resp
+ *
+ * Note, the SECCOMP_USER_NOTIF_FLAG_CONTINUE flag must be used with caution!
+ * If set by the process supervising the syscalls of another process the
+ * syscall will continue. This is problematic because of an inherent TOCTOU.
+ * An attacker can exploit the time while the supervised process is waiting on
+ * a response from the supervising process to rewrite syscall arguments which
+ * are passed as pointers of the intercepted syscall.
+ * It should be absolutely clear that this means that the seccomp notifier
+ * _cannot_ be used to implement a security policy! It should only ever be used
+ * in scenarios where a more privileged process supervises the syscalls of a
+ * lesser privileged process to get around kernel-enforced security
+ * restrictions when the privileged process deems this safe. In other words,
+ * in order to continue a syscall the supervising process should be sure that
+ * another security mechanism or the kernel itself will sufficiently block
+ * syscalls if arguments are rewritten to something unsafe.
+ *
+ * Similar precautions should be applied when stacking SECCOMP_RET_USER_NOTIF
+ * or SECCOMP_RET_TRACE. For SECCOMP_RET_USER_NOTIF filters acting on the
+ * same syscall, the most recently added filter takes precedence. This means
+ * that the new SECCOMP_RET_USER_NOTIF filter can override any
+ * SECCOMP_IOCTL_NOTIF_SEND from earlier filters, essentially allowing all
+ * such filtered syscalls to be executed by sending the response
+ * SECCOMP_USER_NOTIF_FLAG_CONTINUE. Note that SECCOMP_RET_TRACE can equally
+ * be overriden by SECCOMP_USER_NOTIF_FLAG_CONTINUE.
+ */
+#define SECCOMP_USER_NOTIF_FLAG_CONTINUE (1UL << 0)
+
struct seccomp_notif_resp {
__u64 id;
__s64 val;
diff --git a/include/uapi/linux/sem.h b/include/uapi/linux/sem.h
index 39a1876f039e..75aa3b273cd9 100644
--- a/include/uapi/linux/sem.h
+++ b/include/uapi/linux/sem.h
@@ -24,8 +24,8 @@
/* Obsolete, used only for backwards compatibility and libc5 compiles */
struct semid_ds {
struct ipc_perm sem_perm; /* permissions .. see ipc.h */
- __kernel_time_t sem_otime; /* last semop time */
- __kernel_time_t sem_ctime; /* create/last semctl() time */
+ __kernel_old_time_t sem_otime; /* last semop time */
+ __kernel_old_time_t sem_ctime; /* create/last semctl() time */
struct sem *sem_base; /* ptr to first semaphore in array */
struct sem_queue *sem_pending; /* pending operations to be processed */
struct sem_queue **sem_pending_last; /* last pending operation */
diff --git a/include/uapi/linux/serial_core.h b/include/uapi/linux/serial_core.h
index e7fe550b6038..8ec3dd742ea4 100644
--- a/include/uapi/linux/serial_core.h
+++ b/include/uapi/linux/serial_core.h
@@ -290,7 +290,7 @@
/* Sunix UART */
#define PORT_SUNIX 121
-/* Freescale Linflex UART */
+/* Freescale LINFlexD UART */
#define PORT_LINFLEXUART 122
#endif /* _UAPILINUX_SERIAL_CORE_H */
diff --git a/include/uapi/linux/shm.h b/include/uapi/linux/shm.h
index 6507ad0afc81..8d1f17a4e08e 100644
--- a/include/uapi/linux/shm.h
+++ b/include/uapi/linux/shm.h
@@ -28,9 +28,9 @@
struct shmid_ds {
struct ipc_perm shm_perm; /* operation perms */
int shm_segsz; /* size of segment (bytes) */
- __kernel_time_t shm_atime; /* last attach time */
- __kernel_time_t shm_dtime; /* last detach time */
- __kernel_time_t shm_ctime; /* last change time */
+ __kernel_old_time_t shm_atime; /* last attach time */
+ __kernel_old_time_t shm_dtime; /* last detach time */
+ __kernel_old_time_t shm_ctime; /* last change time */
__kernel_ipc_pid_t shm_cpid; /* pid of creator */
__kernel_ipc_pid_t shm_lpid; /* pid of last operator */
unsigned short shm_nattch; /* no. of current attaches */
diff --git a/include/uapi/linux/time.h b/include/uapi/linux/time.h
index 958932effc5e..a655aa28dc6e 100644
--- a/include/uapi/linux/time.h
+++ b/include/uapi/linux/time.h
@@ -8,13 +8,13 @@
#ifndef _STRUCT_TIMESPEC
#define _STRUCT_TIMESPEC
struct timespec {
- __kernel_time_t tv_sec; /* seconds */
- long tv_nsec; /* nanoseconds */
+ __kernel_old_time_t tv_sec; /* seconds */
+ long tv_nsec; /* nanoseconds */
};
#endif
struct timeval {
- __kernel_time_t tv_sec; /* seconds */
+ __kernel_old_time_t tv_sec; /* seconds */
__kernel_suseconds_t tv_usec; /* microseconds */
};
diff --git a/include/uapi/linux/time_types.h b/include/uapi/linux/time_types.h
index 27bfc8fc6904..074e391d73a1 100644
--- a/include/uapi/linux/time_types.h
+++ b/include/uapi/linux/time_types.h
@@ -28,6 +28,11 @@ struct __kernel_old_timeval {
};
#endif
+struct __kernel_old_timespec {
+ __kernel_old_time_t tv_sec; /* seconds */
+ long tv_nsec; /* nanoseconds */
+};
+
struct __kernel_sock_timeval {
__s64 tv_sec;
__s64 tv_usec;
diff --git a/include/uapi/linux/utime.h b/include/uapi/linux/utime.h
index fd9aa26b6860..bc8f13e81d6e 100644
--- a/include/uapi/linux/utime.h
+++ b/include/uapi/linux/utime.h
@@ -5,8 +5,8 @@
#include <linux/types.h>
struct utimbuf {
- __kernel_time_t actime;
- __kernel_time_t modtime;
+ __kernel_old_time_t actime;
+ __kernel_old_time_t modtime;
};
#endif
diff --git a/include/uapi/linux/v4l2-controls.h b/include/uapi/linux/v4l2-controls.h
index a2669b79b294..5a7bedee2b0e 100644
--- a/include/uapi/linux/v4l2-controls.h
+++ b/include/uapi/linux/v4l2-controls.h
@@ -1034,6 +1034,7 @@ enum v4l2_jpeg_chroma_subsampling {
#define V4L2_CID_TEST_PATTERN_GREENR (V4L2_CID_IMAGE_SOURCE_CLASS_BASE + 5)
#define V4L2_CID_TEST_PATTERN_BLUE (V4L2_CID_IMAGE_SOURCE_CLASS_BASE + 6)
#define V4L2_CID_TEST_PATTERN_GREENB (V4L2_CID_IMAGE_SOURCE_CLASS_BASE + 7)
+#define V4L2_CID_UNIT_CELL_SIZE (V4L2_CID_IMAGE_SOURCE_CLASS_BASE + 8)
/* Image processing controls */
diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
index 530638dffd93..04481c717fee 100644
--- a/include/uapi/linux/videodev2.h
+++ b/include/uapi/linux/videodev2.h
@@ -422,6 +422,11 @@ struct v4l2_fract {
__u32 denominator;
};
+struct v4l2_area {
+ __u32 width;
+ __u32 height;
+};
+
/**
* struct v4l2_capability - Describes V4L2 device caps returned by VIDIOC_QUERYCAP
*
@@ -755,6 +760,7 @@ struct v4l2_pix_format {
#define V4L2_META_FMT_VSP1_HGT v4l2_fourcc('V', 'S', 'P', 'T') /* R-Car VSP1 2-D Histogram */
#define V4L2_META_FMT_UVC v4l2_fourcc('U', 'V', 'C', 'H') /* UVC Payload Header metadata */
#define V4L2_META_FMT_D4XX v4l2_fourcc('D', '4', 'X', 'X') /* D4XX Payload Header metadata */
+#define V4L2_META_FMT_VIVID v4l2_fourcc('V', 'I', 'V', 'D') /* Vivid Metadata */
/* priv field value to indicates that subsequent fields are valid. */
#define V4L2_PIX_FMT_PRIV_MAGIC 0xfeedcafe
@@ -915,11 +921,12 @@ struct v4l2_requestbuffers {
};
/* capabilities for struct v4l2_requestbuffers and v4l2_create_buffers */
-#define V4L2_BUF_CAP_SUPPORTS_MMAP (1 << 0)
-#define V4L2_BUF_CAP_SUPPORTS_USERPTR (1 << 1)
-#define V4L2_BUF_CAP_SUPPORTS_DMABUF (1 << 2)
-#define V4L2_BUF_CAP_SUPPORTS_REQUESTS (1 << 3)
-#define V4L2_BUF_CAP_SUPPORTS_ORPHANED_BUFS (1 << 4)
+#define V4L2_BUF_CAP_SUPPORTS_MMAP (1 << 0)
+#define V4L2_BUF_CAP_SUPPORTS_USERPTR (1 << 1)
+#define V4L2_BUF_CAP_SUPPORTS_DMABUF (1 << 2)
+#define V4L2_BUF_CAP_SUPPORTS_REQUESTS (1 << 3)
+#define V4L2_BUF_CAP_SUPPORTS_ORPHANED_BUFS (1 << 4)
+#define V4L2_BUF_CAP_SUPPORTS_M2M_HOLD_CAPTURE_BUF (1 << 5)
/**
* struct v4l2_plane - plane info for multi-planar buffers
@@ -1041,6 +1048,8 @@ static inline __u64 v4l2_timeval_to_ns(const struct timeval *tv)
#define V4L2_BUF_FLAG_IN_REQUEST 0x00000080
/* timecode field is valid */
#define V4L2_BUF_FLAG_TIMECODE 0x00000100
+/* Don't return the capture buffer until OUTPUT timestamp changes */
+#define V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF 0x00000200
/* Buffer is prepared for queuing */
#define V4L2_BUF_FLAG_PREPARED 0x00000400
/* Cache handling flags */
@@ -1675,6 +1684,7 @@ struct v4l2_ext_control {
__u8 __user *p_u8;
__u16 __user *p_u16;
__u32 __user *p_u32;
+ struct v4l2_area __user *p_area;
void __user *ptr;
};
} __attribute__ ((packed));
@@ -1720,6 +1730,7 @@ enum v4l2_ctrl_type {
V4L2_CTRL_TYPE_U8 = 0x0100,
V4L2_CTRL_TYPE_U16 = 0x0101,
V4L2_CTRL_TYPE_U32 = 0x0102,
+ V4L2_CTRL_TYPE_AREA = 0x0106,
};
/* Used in the VIDIOC_QUERYCTRL ioctl for querying controls */
@@ -1975,6 +1986,7 @@ struct v4l2_encoder_cmd {
#define V4L2_DEC_CMD_STOP (1)
#define V4L2_DEC_CMD_PAUSE (2)
#define V4L2_DEC_CMD_RESUME (3)
+#define V4L2_DEC_CMD_FLUSH (4)
/* Flags for V4L2_DEC_CMD_START */
#define V4L2_DEC_CMD_START_MUTE_AUDIO (1 << 0)
diff --git a/include/uapi/misc/fastrpc.h b/include/uapi/misc/fastrpc.h
index fb792e882cef..07de2b7aac85 100644
--- a/include/uapi/misc/fastrpc.h
+++ b/include/uapi/misc/fastrpc.h
@@ -10,6 +10,8 @@
#define FASTRPC_IOCTL_INVOKE _IOWR('R', 3, struct fastrpc_invoke)
#define FASTRPC_IOCTL_INIT_ATTACH _IO('R', 4)
#define FASTRPC_IOCTL_INIT_CREATE _IOWR('R', 5, struct fastrpc_init_create)
+#define FASTRPC_IOCTL_MMAP _IOWR('R', 6, struct fastrpc_req_mmap)
+#define FASTRPC_IOCTL_MUNMAP _IOWR('R', 7, struct fastrpc_req_munmap)
struct fastrpc_invoke_args {
__u64 ptr;
@@ -38,4 +40,17 @@ struct fastrpc_alloc_dma_buf {
__u64 size; /* size */
};
+struct fastrpc_req_mmap {
+ __s32 fd;
+ __u32 flags; /* flags for dsp to map with */
+ __u64 vaddrin; /* optional virtual address */
+ __u64 size; /* size */
+ __u64 vaddrout; /* dsp virtual address */
+};
+
+struct fastrpc_req_munmap {
+ __u64 vaddrout; /* address to unmap */
+ __u64 size; /* size */
+};
+
#endif /* __QCOM_FASTRPC_H__ */
diff --git a/include/uapi/misc/habanalabs.h b/include/uapi/misc/habanalabs.h
index 39c4ea51a719..4faa2c9767e5 100644
--- a/include/uapi/misc/habanalabs.h
+++ b/include/uapi/misc/habanalabs.h
@@ -88,13 +88,19 @@ enum hl_device_status {
* internal engine.
* HL_INFO_DEVICE_STATUS - Retrieve the device's status. This opcode doesn't
* require an open context.
- * HL_INFO_DEVICE_UTILIZATION - Retrieve the total utilization of the device
- * over the last period specified by the user.
- * The period can be between 100ms to 1s, in
- * resolution of 100ms. The return value is a
- * percentage of the utilization rate.
+ * HL_INFO_DEVICE_UTILIZATION - Retrieve the total utilization of the device
+ * over the last period specified by the user.
+ * The period can be between 100ms to 1s, in
+ * resolution of 100ms. The return value is a
+ * percentage of the utilization rate.
* HL_INFO_HW_EVENTS_AGGREGATE - Receive an array describing how many times each
* event occurred since the driver was loaded.
+ * HL_INFO_CLK_RATE - Retrieve the current and maximum clock rate
+ * of the device in MHz. The maximum clock rate is
+ * configurable via sysfs parameter
+ * HL_INFO_RESET_COUNT - Retrieve the counts of the soft and hard reset
+ * operations performed on the device since the last
+ * time the driver was loaded.
*/
#define HL_INFO_HW_IP_INFO 0
#define HL_INFO_HW_EVENTS 1
@@ -103,8 +109,11 @@ enum hl_device_status {
#define HL_INFO_DEVICE_STATUS 4
#define HL_INFO_DEVICE_UTILIZATION 6
#define HL_INFO_HW_EVENTS_AGGREGATE 7
+#define HL_INFO_CLK_RATE 8
+#define HL_INFO_RESET_COUNT 9
#define HL_INFO_VERSION_MAX_LEN 128
+#define HL_INFO_CARD_NAME_MAX_LEN 16
struct hl_info_hw_ip_info {
__u64 sram_base_address;
@@ -123,6 +132,7 @@ struct hl_info_hw_ip_info {
__u8 dram_enabled;
__u8 pad[2];
__u8 armcp_version[HL_INFO_VERSION_MAX_LEN];
+ __u8 card_name[HL_INFO_CARD_NAME_MAX_LEN];
};
struct hl_info_dram_usage {
@@ -149,6 +159,16 @@ struct hl_info_device_utilization {
__u32 pad;
};
+struct hl_info_clk_rate {
+ __u32 cur_clk_rate_mhz;
+ __u32 max_clk_rate_mhz;
+};
+
+struct hl_info_reset_count {
+ __u32 hard_reset_cnt;
+ __u32 soft_reset_cnt;
+};
+
struct hl_info_args {
/* Location of relevant struct in userspace */
__u64 return_pointer;
@@ -181,13 +201,15 @@ struct hl_info_args {
/* Opcode to destroy previously created command buffer */
#define HL_CB_OP_DESTROY 1
+#define HL_MAX_CB_SIZE 0x200000 /* 2MB */
+
struct hl_cb_in {
/* Handle of CB or 0 if we want to create one */
__u64 cb_handle;
/* HL_CB_OP_* */
__u32 op;
- /* Size of CB. Maximum size is 2MB. The minimum size that will be
- * allocated, regardless of this parameter's value, is PAGE_SIZE
+ /* Size of CB. Maximum size is HL_MAX_CB_SIZE. The minimum size that
+ * will be allocated, regardless of this parameter's value, is PAGE_SIZE
*/
__u32 cb_size;
/* Context ID - Currently not in use */
@@ -233,6 +255,8 @@ struct hl_cs_chunk {
#define HL_CS_STATUS_SUCCESS 0
+#define HL_MAX_JOBS_PER_CS 512
+
struct hl_cs_in {
/* this holds address of array of hl_cs_chunk for restore phase */
__u64 chunks_restore;
@@ -242,9 +266,13 @@ struct hl_cs_in {
* Currently not in use
*/
__u64 chunks_store;
- /* Number of chunks in restore phase array */
+ /* Number of chunks in restore phase array. Maximum number is
+ * HL_MAX_JOBS_PER_CS
+ */
__u32 num_chunks_restore;
- /* Number of chunks in execution array */
+ /* Number of chunks in execution array. Maximum number is
+ * HL_MAX_JOBS_PER_CS
+ */
__u32 num_chunks_execute;
/* Number of chunks in restore phase array - Currently not in use */
__u32 num_chunks_store;
@@ -589,7 +617,7 @@ struct hl_debug_args {
*
* The user can call this IOCTL with a handle it received from the CS IOCTL
* to wait until the handle's CS has finished executing. The user will wait
- * inside the kernel until the CS has finished or until the user-requeusted
+ * inside the kernel until the CS has finished or until the user-requested
* timeout has expired.
*
* The return value of the IOCTL is a standard Linux error code. The possible
diff --git a/include/uapi/rdma/cxgb3-abi.h b/include/uapi/rdma/cxgb3-abi.h
deleted file mode 100644
index 85aed672f43e..000000000000
--- a/include/uapi/rdma/cxgb3-abi.h
+++ /dev/null
@@ -1,82 +0,0 @@
-/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR Linux-OpenIB) */
-/*
- * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef CXGB3_ABI_USER_H
-#define CXGB3_ABI_USER_H
-
-#include <linux/types.h>
-
-#define IWCH_UVERBS_ABI_VERSION 1
-
-/*
- * Make sure that all structs defined in this file remain laid out so
- * that they pack the same way on 32-bit and 64-bit architectures (to
- * avoid incompatibility between 32-bit userspace and 64-bit kernels).
- * In particular do not use pointer types -- pass pointers in __aligned_u64
- * instead.
- */
-struct iwch_create_cq_req {
- __aligned_u64 user_rptr_addr;
-};
-
-struct iwch_create_cq_resp_v0 {
- __aligned_u64 key;
- __u32 cqid;
- __u32 size_log2;
-};
-
-struct iwch_create_cq_resp {
- __aligned_u64 key;
- __u32 cqid;
- __u32 size_log2;
- __u32 memsize;
- __u32 reserved;
-};
-
-struct iwch_create_qp_resp {
- __aligned_u64 key;
- __aligned_u64 db_key;
- __u32 qpid;
- __u32 size_log2;
- __u32 sq_size_log2;
- __u32 rq_size_log2;
-};
-
-struct iwch_reg_user_mr_resp {
- __u32 pbl_addr;
-};
-
-struct iwch_alloc_pd_resp {
- __u32 pdid;
-};
-
-#endif /* CXGB3_ABI_USER_H */
diff --git a/include/uapi/rdma/efa-abi.h b/include/uapi/rdma/efa-abi.h
index 9599a2a62be8..53b6e2036a9b 100644
--- a/include/uapi/rdma/efa-abi.h
+++ b/include/uapi/rdma/efa-abi.h
@@ -90,12 +90,18 @@ struct efa_ibv_create_ah_resp {
__u8 reserved_30[2];
};
+enum {
+ EFA_QUERY_DEVICE_CAPS_RDMA_READ = 1 << 0,
+};
+
struct efa_ibv_ex_query_device_resp {
__u32 comp_mask;
__u32 max_sq_wr;
__u32 max_rq_wr;
__u16 max_sq_sge;
__u16 max_rq_sge;
+ __u32 max_rdma_size;
+ __u32 device_caps;
};
#endif /* EFA_ABI_USER_H */
diff --git a/include/uapi/rdma/ib_user_ioctl_verbs.h b/include/uapi/rdma/ib_user_ioctl_verbs.h
index 72c7fc75f960..9019b2d906ea 100644
--- a/include/uapi/rdma/ib_user_ioctl_verbs.h
+++ b/include/uapi/rdma/ib_user_ioctl_verbs.h
@@ -173,4 +173,26 @@ struct ib_uverbs_query_port_resp_ex {
__u8 reserved[6];
};
+enum rdma_driver_id {
+ RDMA_DRIVER_UNKNOWN,
+ RDMA_DRIVER_MLX5,
+ RDMA_DRIVER_MLX4,
+ RDMA_DRIVER_CXGB3,
+ RDMA_DRIVER_CXGB4,
+ RDMA_DRIVER_MTHCA,
+ RDMA_DRIVER_BNXT_RE,
+ RDMA_DRIVER_OCRDMA,
+ RDMA_DRIVER_NES,
+ RDMA_DRIVER_I40IW,
+ RDMA_DRIVER_VMW_PVRDMA,
+ RDMA_DRIVER_QEDR,
+ RDMA_DRIVER_HNS,
+ RDMA_DRIVER_USNIC,
+ RDMA_DRIVER_RXE,
+ RDMA_DRIVER_HFI1,
+ RDMA_DRIVER_QIB,
+ RDMA_DRIVER_EFA,
+ RDMA_DRIVER_SIW,
+};
+
#endif
diff --git a/include/uapi/rdma/mlx5_user_ioctl_cmds.h b/include/uapi/rdma/mlx5_user_ioctl_cmds.h
index d0da070cf0ab..20d88307f75f 100644
--- a/include/uapi/rdma/mlx5_user_ioctl_cmds.h
+++ b/include/uapi/rdma/mlx5_user_ioctl_cmds.h
@@ -198,6 +198,7 @@ enum mlx5_ib_create_flow_attrs {
MLX5_IB_ATTR_CREATE_FLOW_ARR_FLOW_ACTIONS,
MLX5_IB_ATTR_CREATE_FLOW_TAG,
MLX5_IB_ATTR_CREATE_FLOW_ARR_COUNTERS_DEVX,
+ MLX5_IB_ATTR_CREATE_FLOW_ARR_COUNTERS_DEVX_OFFSET,
};
enum mlx5_ib_destoy_flow_attrs {
diff --git a/include/uapi/rdma/nes-abi.h b/include/uapi/rdma/nes-abi.h
deleted file mode 100644
index f80495baa969..000000000000
--- a/include/uapi/rdma/nes-abi.h
+++ /dev/null
@@ -1,115 +0,0 @@
-/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR Linux-OpenIB) */
-/*
- * Copyright (c) 2006 - 2011 Intel Corporation. All rights reserved.
- * Copyright (c) 2005 Topspin Communications. All rights reserved.
- * Copyright (c) 2005 Cisco Systems. All rights reserved.
- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#ifndef NES_ABI_USER_H
-#define NES_ABI_USER_H
-
-#include <linux/types.h>
-
-#define NES_ABI_USERSPACE_VER 2
-#define NES_ABI_KERNEL_VER 2
-
-/*
- * Make sure that all structs defined in this file remain laid out so
- * that they pack the same way on 32-bit and 64-bit architectures (to
- * avoid incompatibility between 32-bit userspace and 64-bit kernels).
- * In particular do not use pointer types -- pass pointers in __u64
- * instead.
- */
-
-struct nes_alloc_ucontext_req {
- __u32 reserved32;
- __u8 userspace_ver;
- __u8 reserved8[3];
-};
-
-struct nes_alloc_ucontext_resp {
- __u32 max_pds; /* maximum pds allowed for this user process */
- __u32 max_qps; /* maximum qps allowed for this user process */
- __u32 wq_size; /* size of the WQs (sq+rq) allocated to the mmaped area */
- __u8 virtwq; /* flag to indicate if virtual WQ are to be used or not */
- __u8 kernel_ver;
- __u8 reserved[2];
-};
-
-struct nes_alloc_pd_resp {
- __u32 pd_id;
- __u32 mmap_db_index;
-};
-
-struct nes_create_cq_req {
- __aligned_u64 user_cq_buffer;
- __u32 mcrqf;
- __u8 reserved[4];
-};
-
-struct nes_create_qp_req {
- __aligned_u64 user_wqe_buffers;
- __aligned_u64 user_qp_buffer;
-};
-
-enum iwnes_memreg_type {
- IWNES_MEMREG_TYPE_MEM = 0x0000,
- IWNES_MEMREG_TYPE_QP = 0x0001,
- IWNES_MEMREG_TYPE_CQ = 0x0002,
- IWNES_MEMREG_TYPE_MW = 0x0003,
- IWNES_MEMREG_TYPE_FMR = 0x0004,
- IWNES_MEMREG_TYPE_FMEM = 0x0005,
-};
-
-struct nes_mem_reg_req {
- __u32 reg_type; /* indicates if id is memory, QP or CQ */
- __u32 reserved;
-};
-
-struct nes_create_cq_resp {
- __u32 cq_id;
- __u32 cq_size;
- __u32 mmap_db_index;
- __u32 reserved;
-};
-
-struct nes_create_qp_resp {
- __u32 qp_id;
- __u32 actual_sq_size;
- __u32 actual_rq_size;
- __u32 mmap_sq_db_index;
- __u32 mmap_rq_db_index;
- __u32 nes_drv_opt;
-};
-
-#endif /* NES_ABI_USER_H */
diff --git a/include/uapi/rdma/qedr-abi.h b/include/uapi/rdma/qedr-abi.h
index 7a10b3a325fa..c022ee26089b 100644
--- a/include/uapi/rdma/qedr-abi.h
+++ b/include/uapi/rdma/qedr-abi.h
@@ -38,6 +38,15 @@
#define QEDR_ABI_VERSION (8)
/* user kernel communication data structures. */
+enum qedr_alloc_ucontext_flags {
+ QEDR_ALLOC_UCTX_RESERVED = 1 << 0,
+ QEDR_ALLOC_UCTX_DB_REC = 1 << 1
+};
+
+struct qedr_alloc_ucontext_req {
+ __u32 context_flags;
+ __u32 reserved;
+};
struct qedr_alloc_ucontext_resp {
__aligned_u64 db_pa;
@@ -74,6 +83,7 @@ struct qedr_create_cq_uresp {
__u32 db_offset;
__u16 icid;
__u16 reserved;
+ __aligned_u64 db_rec_addr;
};
struct qedr_create_qp_ureq {
@@ -109,6 +119,13 @@ struct qedr_create_qp_uresp {
__u32 rq_db2_offset;
__u32 reserved;
+
+ /* address of SQ doorbell recovery user entry */
+ __aligned_u64 sq_db_rec_addr;
+
+ /* address of RQ doorbell recovery user entry */
+ __aligned_u64 rq_db_rec_addr;
+
};
struct qedr_create_srq_ureq {
@@ -128,4 +145,12 @@ struct qedr_create_srq_uresp {
__u32 reserved1;
};
+/* doorbell recovery entry allocated and populated by userspace doorbelling
+ * entities and mapped to kernel. Kernel uses this to register doorbell
+ * information with doorbell drop recovery mechanism.
+ */
+struct qedr_user_db_rec {
+ __aligned_u64 db_data; /* doorbell data */
+};
+
#endif /* __QEDR_USER_H__ */
diff --git a/include/uapi/rdma/rdma_user_ioctl_cmds.h b/include/uapi/rdma/rdma_user_ioctl_cmds.h
index b8bb285f6b2a..7b1ec806f8f9 100644
--- a/include/uapi/rdma/rdma_user_ioctl_cmds.h
+++ b/include/uapi/rdma/rdma_user_ioctl_cmds.h
@@ -84,26 +84,4 @@ struct ib_uverbs_ioctl_hdr {
struct ib_uverbs_attr attrs[0];
};
-enum rdma_driver_id {
- RDMA_DRIVER_UNKNOWN,
- RDMA_DRIVER_MLX5,
- RDMA_DRIVER_MLX4,
- RDMA_DRIVER_CXGB3,
- RDMA_DRIVER_CXGB4,
- RDMA_DRIVER_MTHCA,
- RDMA_DRIVER_BNXT_RE,
- RDMA_DRIVER_OCRDMA,
- RDMA_DRIVER_NES,
- RDMA_DRIVER_I40IW,
- RDMA_DRIVER_VMW_PVRDMA,
- RDMA_DRIVER_QEDR,
- RDMA_DRIVER_HNS,
- RDMA_DRIVER_USNIC,
- RDMA_DRIVER_RXE,
- RDMA_DRIVER_HFI1,
- RDMA_DRIVER_QIB,
- RDMA_DRIVER_EFA,
- RDMA_DRIVER_SIW,
-};
-
#endif
diff --git a/include/uapi/rdma/vmw_pvrdma-abi.h b/include/uapi/rdma/vmw_pvrdma-abi.h
index 6e73f0274e41..f8b638c73371 100644
--- a/include/uapi/rdma/vmw_pvrdma-abi.h
+++ b/include/uapi/rdma/vmw_pvrdma-abi.h
@@ -179,6 +179,11 @@ struct pvrdma_create_qp {
__aligned_u64 qp_addr;
};
+struct pvrdma_create_qp_resp {
+ __u32 qpn;
+ __u32 qp_handle;
+};
+
/* PVRDMA masked atomic compare and swap */
struct pvrdma_ex_cmp_swap {
__aligned_u64 swap_val;
diff --git a/include/uapi/sound/compress_params.h b/include/uapi/sound/compress_params.h
index 3d4d6de66a17..9c96fb0e4d90 100644
--- a/include/uapi/sound/compress_params.h
+++ b/include/uapi/sound/compress_params.h
@@ -317,12 +317,22 @@ struct snd_enc_generic {
__s32 reserved[15]; /* Can be used for SND_AUDIOCODEC_BESPOKE */
} __attribute__((packed, aligned(4)));
+struct snd_dec_flac {
+ __u16 sample_size;
+ __u16 min_blk_size;
+ __u16 max_blk_size;
+ __u16 min_frame_size;
+ __u16 max_frame_size;
+ __u16 reserved;
+} __attribute__((packed, aligned(4)));
+
union snd_codec_options {
struct snd_enc_wma wma;
struct snd_enc_vorbis vorbis;
struct snd_enc_real real;
struct snd_enc_flac flac;
struct snd_enc_generic generic;
+ struct snd_dec_flac flac_d;
} __attribute__((packed, aligned(4)));
/** struct snd_codec_desc - description of codec capabilities
diff --git a/include/uapi/sound/sof/abi.h b/include/uapi/sound/sof/abi.h
index a0fe0d4c4b66..ebfdc20ca081 100644
--- a/include/uapi/sound/sof/abi.h
+++ b/include/uapi/sound/sof/abi.h
@@ -26,7 +26,7 @@
/* SOF ABI version major, minor and patch numbers */
#define SOF_ABI_MAJOR 3
-#define SOF_ABI_MINOR 10
+#define SOF_ABI_MINOR 11
#define SOF_ABI_PATCH 0
/* SOF ABI version number. Format within 32bit word is MMmmmppp */
diff --git a/include/uapi/sound/sof/tokens.h b/include/uapi/sound/sof/tokens.h
index 8f996857fb24..76883e6fb750 100644
--- a/include/uapi/sound/sof/tokens.h
+++ b/include/uapi/sound/sof/tokens.h
@@ -111,7 +111,14 @@
/* TODO: Add SAI tokens */
/* ESAI */
-#define SOF_TKN_IMX_ESAI_FIRST_TOKEN 1100
-/* TODO: Add ESAI tokens */
+#define SOF_TKN_IMX_ESAI_MCLK_ID 1100
+
+/* Stream */
+#define SOF_TKN_STREAM_PLAYBACK_COMPATIBLE_D0I3 1200
+#define SOF_TKN_STREAM_CAPTURE_COMPATIBLE_D0I3 1201
+
+/* Led control for mute switches */
+#define SOF_TKN_MUTE_LED_USE 1300
+#define SOF_TKN_MUTE_LED_DIRECTION 1301
#endif
diff --git a/include/xen/swiotlb-xen.h b/include/xen/swiotlb-xen.h
index d71380f6ed0b..ffc0d3902b71 100644
--- a/include/xen/swiotlb-xen.h
+++ b/include/xen/swiotlb-xen.h
@@ -4,10 +4,10 @@
#include <linux/swiotlb.h>
-void xen_dma_sync_for_cpu(struct device *dev, dma_addr_t handle,
- phys_addr_t paddr, size_t size, enum dma_data_direction dir);
-void xen_dma_sync_for_device(struct device *dev, dma_addr_t handle,
- phys_addr_t paddr, size_t size, enum dma_data_direction dir);
+void xen_dma_sync_for_cpu(dma_addr_t handle, phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir);
+void xen_dma_sync_for_device(dma_addr_t handle, phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir);
extern int xen_swiotlb_init(int verbose, bool early);
extern const struct dma_map_ops xen_swiotlb_dma_ops;
diff --git a/init/Kconfig b/init/Kconfig
index 67a602ee17f1..d7163fcf233b 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -104,29 +104,9 @@ config COMPILE_TEST
here. If you are a user/distributor, say N here to exclude useless
drivers to be distributed.
-config HEADER_TEST
- bool "Compile test headers that should be standalone compilable"
- help
- Compile test headers listed in header-test-y target to ensure they are
- self-contained, i.e. compilable as standalone units.
-
- If you are a developer or tester and want to ensure the requested
- headers are self-contained, say Y here. Otherwise, choose N.
-
-config KERNEL_HEADER_TEST
- bool "Compile test kernel headers"
- depends on HEADER_TEST
- help
- Headers in include/ are used to build external moduls.
- Compile test them to ensure they are self-contained, i.e.
- compilable as standalone units.
-
- If you are a developer or tester and want to ensure the headers
- in include/ are self-contained, say Y here. Otherwise, choose N.
-
config UAPI_HEADER_TEST
bool "Compile test UAPI headers"
- depends on HEADER_TEST && HEADERS_INSTALL && CC_CAN_LINK
+ depends on HEADERS_INSTALL && CC_CAN_LINK
help
Compile test headers exported to user-space to ensure they are
self-contained, i.e. compilable as standalone units.
@@ -1376,23 +1356,6 @@ config SYSFS_SYSCALL
If unsure say Y here.
-config SYSCTL_SYSCALL
- bool "Sysctl syscall support" if EXPERT
- depends on PROC_SYSCTL
- default n
- select SYSCTL
- ---help---
- sys_sysctl uses binary paths that have been found challenging
- to properly maintain and use. The interface in /proc/sys
- using paths with ascii names is now the primary path to this
- information.
-
- Almost nothing using the binary sysctl interface so if you are
- trying to save some space it is probably safe to disable this,
- making your kernel marginally smaller.
-
- If unsure say N here.
-
config FHANDLE
bool "open by fhandle syscalls" if EXPERT
select EXPORTFS
diff --git a/ipc/syscall.c b/ipc/syscall.c
index 581bdff4e7c5..dfb0e988d542 100644
--- a/ipc/syscall.c
+++ b/ipc/syscall.c
@@ -30,7 +30,7 @@ int ksys_ipc(unsigned int call, int first, unsigned long second,
return ksys_semtimedop(first, (struct sembuf __user *)ptr,
second, NULL);
case SEMTIMEDOP:
- if (IS_ENABLED(CONFIG_64BIT) || !IS_ENABLED(CONFIG_64BIT_TIME))
+ if (IS_ENABLED(CONFIG_64BIT))
return ksys_semtimedop(first, ptr, second,
(const struct __kernel_timespec __user *)fifth);
else if (IS_ENABLED(CONFIG_COMPAT_32BIT_TIME))
diff --git a/kernel/Makefile b/kernel/Makefile
index f0902a7bd1b3..f2cc0d118a0b 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -130,7 +130,7 @@ $(obj)/config_data.gz: $(KCONFIG_CONFIG) FORCE
$(obj)/kheaders.o: $(obj)/kheaders_data.tar.xz
quiet_cmd_genikh = CHK $(obj)/kheaders_data.tar.xz
- cmd_genikh = $(BASH) $(srctree)/kernel/gen_kheaders.sh $@
+ cmd_genikh = $(CONFIG_SHELL) $(srctree)/kernel/gen_kheaders.sh $@
$(obj)/kheaders_data.tar.xz: FORCE
$(call cmd,genikh)
diff --git a/kernel/audit.c b/kernel/audit.c
index da8dc0db5bd3..8e09f0f55b4b 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -830,7 +830,7 @@ static int kauditd_thread(void *dummy)
rc = kauditd_send_queue(sk, portid,
&audit_hold_queue, UNICAST_RETRIES,
NULL, kauditd_rehold_skb);
- if (ac && rc < 0) {
+ if (rc < 0) {
sk = NULL;
auditd_reset(ac);
goto main_queue;
@@ -840,7 +840,7 @@ static int kauditd_thread(void *dummy)
rc = kauditd_send_queue(sk, portid,
&audit_retry_queue, UNICAST_RETRIES,
NULL, kauditd_hold_skb);
- if (ac && rc < 0) {
+ if (rc < 0) {
sk = NULL;
auditd_reset(ac);
goto main_queue;
@@ -2155,18 +2155,19 @@ void audit_log_task_info(struct audit_buffer *ab)
EXPORT_SYMBOL(audit_log_task_info);
/**
- * audit_log_link_denied - report a link restriction denial
- * @operation: specific link operation
+ * audit_log_path_denied - report a path restriction denial
+ * @type: audit message type (AUDIT_ANOM_LINK, AUDIT_ANOM_CREAT, etc)
+ * @operation: specific operation name
*/
-void audit_log_link_denied(const char *operation)
+void audit_log_path_denied(int type, const char *operation)
{
struct audit_buffer *ab;
if (!audit_enabled || audit_dummy_context())
return;
- /* Generate AUDIT_ANOM_LINK with subject, operation, outcome. */
- ab = audit_log_start(audit_context(), GFP_KERNEL, AUDIT_ANOM_LINK);
+ /* Generate log with subject, operation, outcome. */
+ ab = audit_log_start(audit_context(), GFP_KERNEL, type);
if (!ab)
return;
audit_log_format(ab, "op=%s", operation);
diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
index caca752ee5e6..3f958b90d914 100644
--- a/kernel/bpf/stackmap.c
+++ b/kernel/bpf/stackmap.c
@@ -289,7 +289,7 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs,
if (irqs_disabled()) {
work = this_cpu_ptr(&up_read_work);
- if (work->irq_work.flags & IRQ_WORK_BUSY)
+ if (atomic_read(&work->irq_work.flags) & IRQ_WORK_BUSY)
/* cannot queue more up_read, fallback */
irq_work_busy = true;
}
diff --git a/kernel/compat.c b/kernel/compat.c
index a2bc1d6ceb57..95005f849c68 100644
--- a/kernel/compat.c
+++ b/kernel/compat.c
@@ -90,30 +90,6 @@ int compat_put_timespec(const struct timespec *ts, void __user *uts)
}
EXPORT_SYMBOL_GPL(compat_put_timespec);
-int get_compat_itimerval(struct itimerval *o, const struct compat_itimerval __user *i)
-{
- struct compat_itimerval v32;
-
- if (copy_from_user(&v32, i, sizeof(struct compat_itimerval)))
- return -EFAULT;
- o->it_interval.tv_sec = v32.it_interval.tv_sec;
- o->it_interval.tv_usec = v32.it_interval.tv_usec;
- o->it_value.tv_sec = v32.it_value.tv_sec;
- o->it_value.tv_usec = v32.it_value.tv_usec;
- return 0;
-}
-
-int put_compat_itimerval(struct compat_itimerval __user *o, const struct itimerval *i)
-{
- struct compat_itimerval v32;
-
- v32.it_interval.tv_sec = i->it_interval.tv_sec;
- v32.it_interval.tv_usec = i->it_interval.tv_usec;
- v32.it_value.tv_sec = i->it_value.tv_sec;
- v32.it_value.tv_usec = i->it_value.tv_usec;
- return copy_to_user(o, &v32, sizeof(struct compat_itimerval)) ? -EFAULT : 0;
-}
-
#ifdef __ARCH_WANT_SYS_SIGPROCMASK
/*
diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
index f76d6f77dd5e..2b7c9b67931d 100644
--- a/kernel/debug/debug_core.c
+++ b/kernel/debug/debug_core.c
@@ -441,6 +441,37 @@ setundefined:
return 0;
}
+#ifdef CONFIG_KGDB_KDB
+void kdb_dump_stack_on_cpu(int cpu)
+{
+ if (cpu == raw_smp_processor_id() || !IS_ENABLED(CONFIG_SMP)) {
+ dump_stack();
+ return;
+ }
+
+ if (!(kgdb_info[cpu].exception_state & DCPU_IS_SLAVE)) {
+ kdb_printf("ERROR: Task on cpu %d didn't stop in the debugger\n",
+ cpu);
+ return;
+ }
+
+ /*
+ * In general, architectures don't support dumping the stack of a
+ * "running" process that's not the current one. From the point of
+ * view of the Linux, kernel processes that are looping in the kgdb
+ * slave loop are still "running". There's also no API (that actually
+ * works across all architectures) that can do a stack crawl based
+ * on registers passed as a parameter.
+ *
+ * Solve this conundrum by asking slave CPUs to do the backtrace
+ * themselves.
+ */
+ kgdb_info[cpu].exception_state |= DCPU_WANT_BT;
+ while (kgdb_info[cpu].exception_state & DCPU_WANT_BT)
+ cpu_relax();
+}
+#endif
+
/*
* Return true if there is a valid kgdb I/O module. Also if no
* debugger is attached a message can be printed to the console about
@@ -580,6 +611,9 @@ cpu_loop:
atomic_xchg(&kgdb_active, cpu);
break;
}
+ } else if (kgdb_info[cpu].exception_state & DCPU_WANT_BT) {
+ dump_stack();
+ kgdb_info[cpu].exception_state &= ~DCPU_WANT_BT;
} else if (kgdb_info[cpu].exception_state & DCPU_IS_SLAVE) {
if (!raw_spin_is_locked(&dbg_slave_lock))
goto return_normal;
diff --git a/kernel/debug/debug_core.h b/kernel/debug/debug_core.h
index b4a7c326d546..cd22b5f68831 100644
--- a/kernel/debug/debug_core.h
+++ b/kernel/debug/debug_core.h
@@ -33,7 +33,7 @@ struct kgdb_state {
#define DCPU_WANT_MASTER 0x1 /* Waiting to become a master kgdb cpu */
#define DCPU_NEXT_MASTER 0x2 /* Transition from one master cpu to another */
#define DCPU_IS_SLAVE 0x4 /* Slave cpu enter exception */
-#define DCPU_SSTEP 0x8 /* CPU is single stepping */
+#define DCPU_WANT_BT 0x8 /* Slave cpu should backtrace then clear flag */
struct debuggerinfo_struct {
void *debuggerinfo;
@@ -76,6 +76,7 @@ extern int kdb_stub(struct kgdb_state *ks);
extern int kdb_parse(const char *cmdstr);
extern int kdb_common_init_state(struct kgdb_state *ks);
extern int kdb_common_deinit_state(void);
+extern void kdb_dump_stack_on_cpu(int cpu);
#else /* ! CONFIG_KGDB_KDB */
static inline int kdb_stub(struct kgdb_state *ks)
{
diff --git a/kernel/debug/kdb/kdb_bt.c b/kernel/debug/kdb/kdb_bt.c
index 7e2379aa0a1e..4af48ac53625 100644
--- a/kernel/debug/kdb/kdb_bt.c
+++ b/kernel/debug/kdb/kdb_bt.c
@@ -22,20 +22,15 @@
static void kdb_show_stack(struct task_struct *p, void *addr)
{
int old_lvl = console_loglevel;
+
console_loglevel = CONSOLE_LOGLEVEL_MOTORMOUTH;
kdb_trap_printk++;
- kdb_set_current_task(p);
- if (addr) {
- show_stack((struct task_struct *)p, addr);
- } else if (kdb_current_regs) {
-#ifdef CONFIG_X86
- show_stack(p, &kdb_current_regs->sp);
-#else
- show_stack(p, NULL);
-#endif
- } else {
- show_stack(p, NULL);
- }
+
+ if (!addr && kdb_task_has_cpu(p))
+ kdb_dump_stack_on_cpu(kdb_process_cpu(p));
+ else
+ show_stack(p, addr);
+
console_loglevel = old_lvl;
kdb_trap_printk--;
}
@@ -78,12 +73,12 @@ static void kdb_show_stack(struct task_struct *p, void *addr)
*/
static int
-kdb_bt1(struct task_struct *p, unsigned long mask,
- int argcount, int btaprompt)
+kdb_bt1(struct task_struct *p, unsigned long mask, bool btaprompt)
{
- char buffer[2];
- if (kdb_getarea(buffer[0], (unsigned long)p) ||
- kdb_getarea(buffer[0], (unsigned long)(p+1)-1))
+ char ch;
+
+ if (kdb_getarea(ch, (unsigned long)p) ||
+ kdb_getarea(ch, (unsigned long)(p+1)-1))
return KDB_BADADDR;
if (!kdb_task_state(p, mask))
return 0;
@@ -91,22 +86,47 @@ kdb_bt1(struct task_struct *p, unsigned long mask,
kdb_ps1(p);
kdb_show_stack(p, NULL);
if (btaprompt) {
- kdb_getstr(buffer, sizeof(buffer),
- "Enter <q> to end, <cr> to continue:");
- if (buffer[0] == 'q') {
- kdb_printf("\n");
+ kdb_printf("Enter <q> to end, <cr> or <space> to continue:");
+ do {
+ ch = kdb_getchar();
+ } while (!strchr("\r\n q", ch));
+ kdb_printf("\n");
+
+ /* reset the pager */
+ kdb_nextline = 1;
+
+ if (ch == 'q')
return 1;
- }
}
touch_nmi_watchdog();
return 0;
}
+static void
+kdb_bt_cpu(unsigned long cpu)
+{
+ struct task_struct *kdb_tsk;
+
+ if (cpu >= num_possible_cpus() || !cpu_online(cpu)) {
+ kdb_printf("WARNING: no process for cpu %ld\n", cpu);
+ return;
+ }
+
+ /* If a CPU failed to round up we could be here */
+ kdb_tsk = KDB_TSK(cpu);
+ if (!kdb_tsk) {
+ kdb_printf("WARNING: no task for cpu %ld\n", cpu);
+ return;
+ }
+
+ kdb_set_current_task(kdb_tsk);
+ kdb_bt1(kdb_tsk, ~0UL, false);
+}
+
int
kdb_bt(int argc, const char **argv)
{
int diag;
- int argcount = 5;
int btaprompt = 1;
int nextarg;
unsigned long addr;
@@ -125,7 +145,7 @@ kdb_bt(int argc, const char **argv)
/* Run the active tasks first */
for_each_online_cpu(cpu) {
p = kdb_curr_task(cpu);
- if (kdb_bt1(p, mask, argcount, btaprompt))
+ if (kdb_bt1(p, mask, btaprompt))
return 0;
}
/* Now the inactive tasks */
@@ -134,7 +154,7 @@ kdb_bt(int argc, const char **argv)
return 0;
if (task_curr(p))
continue;
- if (kdb_bt1(p, mask, argcount, btaprompt))
+ if (kdb_bt1(p, mask, btaprompt))
return 0;
} kdb_while_each_thread(g, p);
} else if (strcmp(argv[0], "btp") == 0) {
@@ -148,7 +168,7 @@ kdb_bt(int argc, const char **argv)
p = find_task_by_pid_ns(pid, &init_pid_ns);
if (p) {
kdb_set_current_task(p);
- return kdb_bt1(p, ~0UL, argcount, 0);
+ return kdb_bt1(p, ~0UL, false);
}
kdb_printf("No process with pid == %ld found\n", pid);
return 0;
@@ -159,11 +179,10 @@ kdb_bt(int argc, const char **argv)
if (diag)
return diag;
kdb_set_current_task((struct task_struct *)addr);
- return kdb_bt1((struct task_struct *)addr, ~0UL, argcount, 0);
+ return kdb_bt1((struct task_struct *)addr, ~0UL, false);
} else if (strcmp(argv[0], "btc") == 0) {
unsigned long cpu = ~0;
struct task_struct *save_current_task = kdb_current_task;
- char buf[80];
if (argc > 1)
return KDB_ARGCOUNT;
if (argc == 1) {
@@ -171,35 +190,22 @@ kdb_bt(int argc, const char **argv)
if (diag)
return diag;
}
- /* Recursive use of kdb_parse, do not use argv after
- * this point */
- argv = NULL;
if (cpu != ~0) {
- if (cpu >= num_possible_cpus() || !cpu_online(cpu)) {
- kdb_printf("no process for cpu %ld\n", cpu);
- return 0;
- }
- sprintf(buf, "btt 0x%px\n", KDB_TSK(cpu));
- kdb_parse(buf);
- return 0;
- }
- kdb_printf("btc: cpu status: ");
- kdb_parse("cpu\n");
- for_each_online_cpu(cpu) {
- void *kdb_tsk = KDB_TSK(cpu);
-
- /* If a CPU failed to round up we could be here */
- if (!kdb_tsk) {
- kdb_printf("WARNING: no task for cpu %ld\n",
- cpu);
- continue;
+ kdb_bt_cpu(cpu);
+ } else {
+ /*
+ * Recursive use of kdb_parse, do not use argv after
+ * this point.
+ */
+ argv = NULL;
+ kdb_printf("btc: cpu status: ");
+ kdb_parse("cpu\n");
+ for_each_online_cpu(cpu) {
+ kdb_bt_cpu(cpu);
+ touch_nmi_watchdog();
}
-
- sprintf(buf, "btt 0x%px\n", kdb_tsk);
- kdb_parse(buf);
- touch_nmi_watchdog();
+ kdb_set_current_task(save_current_task);
}
- kdb_set_current_task(save_current_task);
return 0;
} else {
if (argc) {
@@ -211,7 +217,7 @@ kdb_bt(int argc, const char **argv)
kdb_show_stack(kdb_current_task, (void *)addr);
return 0;
} else {
- return kdb_bt1(kdb_current_task, ~0UL, argcount, 0);
+ return kdb_bt1(kdb_current_task, ~0UL, false);
}
}
diff --git a/kernel/debug/kdb/kdb_io.c b/kernel/debug/kdb/kdb_io.c
index 3a5184eb6977..8bcdded5d61f 100644
--- a/kernel/debug/kdb/kdb_io.c
+++ b/kernel/debug/kdb/kdb_io.c
@@ -49,14 +49,88 @@ static int kgdb_transition_check(char *buffer)
return 0;
}
-static int kdb_read_get_key(char *buffer, size_t bufsize)
+/**
+ * kdb_handle_escape() - validity check on an accumulated escape sequence.
+ * @buf: Accumulated escape characters to be examined. Note that buf
+ * is not a string, it is an array of characters and need not be
+ * nil terminated.
+ * @sz: Number of accumulated escape characters.
+ *
+ * Return: -1 if the escape sequence is unwanted, 0 if it is incomplete,
+ * otherwise it returns a mapped key value to pass to the upper layers.
+ */
+static int kdb_handle_escape(char *buf, size_t sz)
+{
+ char *lastkey = buf + sz - 1;
+
+ switch (sz) {
+ case 1:
+ if (*lastkey == '\e')
+ return 0;
+ break;
+
+ case 2: /* \e<something> */
+ if (*lastkey == '[')
+ return 0;
+ break;
+
+ case 3:
+ switch (*lastkey) {
+ case 'A': /* \e[A, up arrow */
+ return 16;
+ case 'B': /* \e[B, down arrow */
+ return 14;
+ case 'C': /* \e[C, right arrow */
+ return 6;
+ case 'D': /* \e[D, left arrow */
+ return 2;
+ case '1': /* \e[<1,3,4>], may be home, del, end */
+ case '3':
+ case '4':
+ return 0;
+ }
+ break;
+
+ case 4:
+ if (*lastkey == '~') {
+ switch (buf[2]) {
+ case '1': /* \e[1~, home */
+ return 1;
+ case '3': /* \e[3~, del */
+ return 4;
+ case '4': /* \e[4~, end */
+ return 5;
+ }
+ }
+ break;
+ }
+
+ return -1;
+}
+
+/**
+ * kdb_getchar() - Read a single character from a kdb console (or consoles).
+ *
+ * Other than polling the various consoles that are currently enabled,
+ * most of the work done in this function is dealing with escape sequences.
+ *
+ * An escape key could be the start of a vt100 control sequence such as \e[D
+ * (left arrow) or it could be a character in its own right. The standard
+ * method for detecting the difference is to wait for 2 seconds to see if there
+ * are any other characters. kdb is complicated by the lack of a timer service
+ * (interrupts are off), by multiple input sources. Escape sequence processing
+ * has to be done as states in the polling loop.
+ *
+ * Return: The key pressed or a control code derived from an escape sequence.
+ */
+char kdb_getchar(void)
{
#define ESCAPE_UDELAY 1000
#define ESCAPE_DELAY (2*1000000/ESCAPE_UDELAY) /* 2 seconds worth of udelays */
- char escape_data[5]; /* longest vt100 escape sequence is 4 bytes */
- char *ped = escape_data;
+ char buf[4]; /* longest vt100 escape sequence is 4 bytes */
+ char *pbuf = buf;
int escape_delay = 0;
- get_char_func *f, *f_escape = NULL;
+ get_char_func *f, *f_prev = NULL;
int key;
for (f = &kdb_poll_funcs[0]; ; ++f) {
@@ -65,109 +139,37 @@ static int kdb_read_get_key(char *buffer, size_t bufsize)
touch_nmi_watchdog();
f = &kdb_poll_funcs[0];
}
- if (escape_delay == 2) {
- *ped = '\0';
- ped = escape_data;
- --escape_delay;
- }
- if (escape_delay == 1) {
- key = *ped++;
- if (!*ped)
- --escape_delay;
- break;
- }
+
key = (*f)();
if (key == -1) {
if (escape_delay) {
udelay(ESCAPE_UDELAY);
- --escape_delay;
+ if (--escape_delay == 0)
+ return '\e';
}
continue;
}
- if (bufsize <= 2) {
- if (key == '\r')
- key = '\n';
- *buffer++ = key;
- *buffer = '\0';
- return -1;
- }
- if (escape_delay == 0 && key == '\e') {
+
+ /*
+ * When the first character is received (or we get a change
+ * input source) we set ourselves up to handle an escape
+ * sequences (just in case).
+ */
+ if (f_prev != f) {
+ f_prev = f;
+ pbuf = buf;
escape_delay = ESCAPE_DELAY;
- ped = escape_data;
- f_escape = f;
- }
- if (escape_delay) {
- *ped++ = key;
- if (f_escape != f) {
- escape_delay = 2;
- continue;
- }
- if (ped - escape_data == 1) {
- /* \e */
- continue;
- } else if (ped - escape_data == 2) {
- /* \e<something> */
- if (key != '[')
- escape_delay = 2;
- continue;
- } else if (ped - escape_data == 3) {
- /* \e[<something> */
- int mapkey = 0;
- switch (key) {
- case 'A': /* \e[A, up arrow */
- mapkey = 16;
- break;
- case 'B': /* \e[B, down arrow */
- mapkey = 14;
- break;
- case 'C': /* \e[C, right arrow */
- mapkey = 6;
- break;
- case 'D': /* \e[D, left arrow */
- mapkey = 2;
- break;
- case '1': /* dropthrough */
- case '3': /* dropthrough */
- /* \e[<1,3,4>], may be home, del, end */
- case '4':
- mapkey = -1;
- break;
- }
- if (mapkey != -1) {
- if (mapkey > 0) {
- escape_data[0] = mapkey;
- escape_data[1] = '\0';
- }
- escape_delay = 2;
- }
- continue;
- } else if (ped - escape_data == 4) {
- /* \e[<1,3,4><something> */
- int mapkey = 0;
- if (key == '~') {
- switch (escape_data[2]) {
- case '1': /* \e[1~, home */
- mapkey = 1;
- break;
- case '3': /* \e[3~, del */
- mapkey = 4;
- break;
- case '4': /* \e[4~, end */
- mapkey = 5;
- break;
- }
- }
- if (mapkey > 0) {
- escape_data[0] = mapkey;
- escape_data[1] = '\0';
- }
- escape_delay = 2;
- continue;
- }
}
- break; /* A key to process */
+
+ *pbuf++ = key;
+ key = kdb_handle_escape(buf, pbuf - buf);
+ if (key < 0) /* no escape sequence; return best character */
+ return buf[pbuf - buf == 2 ? 1 : 0];
+ if (key > 0)
+ return key;
}
- return key;
+
+ unreachable();
}
/*
@@ -188,17 +190,7 @@ static int kdb_read_get_key(char *buffer, size_t bufsize)
* function. It is not reentrant - it relies on the fact
* that while kdb is running on only one "master debug" cpu.
* Remarks:
- *
- * The buffer size must be >= 2. A buffer size of 2 means that the caller only
- * wants a single key.
- *
- * An escape key could be the start of a vt100 control sequence such as \e[D
- * (left arrow) or it could be a character in its own right. The standard
- * method for detecting the difference is to wait for 2 seconds to see if there
- * are any other characters. kdb is complicated by the lack of a timer service
- * (interrupts are off), by multiple input sources and by the need to sometimes
- * return after just one key. Escape sequence processing has to be done as
- * states in the polling loop.
+ * The buffer size must be >= 2.
*/
static char *kdb_read(char *buffer, size_t bufsize)
@@ -233,9 +225,7 @@ static char *kdb_read(char *buffer, size_t bufsize)
*cp = '\0';
kdb_printf("%s", buffer);
poll_again:
- key = kdb_read_get_key(buffer, bufsize);
- if (key == -1)
- return buffer;
+ key = kdb_getchar();
if (key != 9)
tab = 0;
switch (key) {
@@ -746,7 +736,7 @@ kdb_printit:
/* check for having reached the LINES number of printed lines */
if (kdb_nextline >= linecount) {
- char buf1[16] = "";
+ char ch;
/* Watch out for recursion here. Any routine that calls
* kdb_printf will come back through here. And kdb_read
@@ -781,39 +771,38 @@ kdb_printit:
if (logging)
printk("%s", moreprompt);
- kdb_read(buf1, 2); /* '2' indicates to return
- * immediately after getting one key. */
+ ch = kdb_getchar();
kdb_nextline = 1; /* Really set output line 1 */
/* empty and reset the buffer: */
kdb_buffer[0] = '\0';
next_avail = kdb_buffer;
size_avail = sizeof(kdb_buffer);
- if ((buf1[0] == 'q') || (buf1[0] == 'Q')) {
+ if ((ch == 'q') || (ch == 'Q')) {
/* user hit q or Q */
KDB_FLAG_SET(CMD_INTERRUPT); /* command interrupted */
KDB_STATE_CLEAR(PAGER);
/* end of command output; back to normal mode */
kdb_grepping_flag = 0;
kdb_printf("\n");
- } else if (buf1[0] == ' ') {
+ } else if (ch == ' ') {
kdb_printf("\r");
suspend_grep = 1; /* for this recursion */
- } else if (buf1[0] == '\n') {
+ } else if (ch == '\n' || ch == '\r') {
kdb_nextline = linecount - 1;
kdb_printf("\r");
suspend_grep = 1; /* for this recursion */
- } else if (buf1[0] == '/' && !kdb_grepping_flag) {
+ } else if (ch == '/' && !kdb_grepping_flag) {
kdb_printf("\r");
kdb_getstr(kdb_grep_string, KDB_GREP_STRLEN,
kdbgetenv("SEARCHPROMPT") ?: "search> ");
*strchrnul(kdb_grep_string, '\n') = '\0';
kdb_grepping_flag += KDB_GREPPING_FLAG_SEARCH;
suspend_grep = 1; /* for this recursion */
- } else if (buf1[0] && buf1[0] != '\n') {
- /* user hit something other than enter */
+ } else if (ch) {
+ /* user hit something unexpected */
suspend_grep = 1; /* for this recursion */
- if (buf1[0] != '/')
+ if (ch != '/')
kdb_printf(
"\nOnly 'q', 'Q' or '/' are processed at "
"more prompt, input ignored\n");
diff --git a/kernel/debug/kdb/kdb_private.h b/kernel/debug/kdb/kdb_private.h
index 2118d8258b7c..55d052061ef9 100644
--- a/kernel/debug/kdb/kdb_private.h
+++ b/kernel/debug/kdb/kdb_private.h
@@ -210,6 +210,7 @@ extern void kdb_ps1(const struct task_struct *p);
extern void kdb_print_nameval(const char *name, unsigned long val);
extern void kdb_send_sig(struct task_struct *p, int sig);
extern void kdb_meminfo_proc_show(void);
+extern char kdb_getchar(void);
extern char *kdb_getstr(char *, size_t, const char *);
extern void kdb_gdb_state_pass(char *buf);
diff --git a/kernel/dma/Kconfig b/kernel/dma/Kconfig
index 73c5c2b8e824..4c103a24e380 100644
--- a/kernel/dma/Kconfig
+++ b/kernel/dma/Kconfig
@@ -51,9 +51,6 @@ config ARCH_HAS_SYNC_DMA_FOR_CPU_ALL
config ARCH_HAS_DMA_PREP_COHERENT
bool
-config ARCH_HAS_DMA_COHERENT_TO_PFN
- bool
-
config ARCH_HAS_FORCE_DMA_UNENCRYPTED
bool
@@ -68,9 +65,18 @@ config SWIOTLB
bool
select NEED_DMA_MAP_STATE
+#
+# Should be selected if we can mmap non-coherent mappings to userspace.
+# The only thing that is really required is a way to set an uncached bit
+# in the pagetables
+#
+config DMA_NONCOHERENT_MMAP
+ bool
+
config DMA_REMAP
depends on MMU
select GENERIC_ALLOCATOR
+ select DMA_NONCOHERENT_MMAP
bool
config DMA_DIRECT_REMAP
diff --git a/kernel/dma/coherent.c b/kernel/dma/coherent.c
index 545e3869b0e3..551b0eb7028a 100644
--- a/kernel/dma/coherent.c
+++ b/kernel/dma/coherent.c
@@ -123,8 +123,9 @@ int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
return ret;
}
-static void *__dma_alloc_from_coherent(struct dma_coherent_mem *mem,
- ssize_t size, dma_addr_t *dma_handle)
+static void *__dma_alloc_from_coherent(struct device *dev,
+ struct dma_coherent_mem *mem,
+ ssize_t size, dma_addr_t *dma_handle)
{
int order = get_order(size);
unsigned long flags;
@@ -143,7 +144,7 @@ static void *__dma_alloc_from_coherent(struct dma_coherent_mem *mem,
/*
* Memory was found in the coherent area.
*/
- *dma_handle = mem->device_base + (pageno << PAGE_SHIFT);
+ *dma_handle = dma_get_device_base(dev, mem) + (pageno << PAGE_SHIFT);
ret = mem->virt_base + (pageno << PAGE_SHIFT);
spin_unlock_irqrestore(&mem->spinlock, flags);
memset(ret, 0, size);
@@ -175,17 +176,18 @@ int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size,
if (!mem)
return 0;
- *ret = __dma_alloc_from_coherent(mem, size, dma_handle);
+ *ret = __dma_alloc_from_coherent(dev, mem, size, dma_handle);
return 1;
}
-void *dma_alloc_from_global_coherent(ssize_t size, dma_addr_t *dma_handle)
+void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size,
+ dma_addr_t *dma_handle)
{
if (!dma_coherent_default_memory)
return NULL;
- return __dma_alloc_from_coherent(dma_coherent_default_memory, size,
- dma_handle);
+ return __dma_alloc_from_coherent(dev, dma_coherent_default_memory, size,
+ dma_handle);
}
static int __dma_release_from_coherent(struct dma_coherent_mem *mem,
diff --git a/kernel/dma/contiguous.c b/kernel/dma/contiguous.c
index 69cfb4345388..daa4e6eefdde 100644
--- a/kernel/dma/contiguous.c
+++ b/kernel/dma/contiguous.c
@@ -42,10 +42,11 @@ struct cma *dma_contiguous_default_area;
* Users, who want to set the size of global CMA area for their system
* should use cma= kernel parameter.
*/
-static const phys_addr_t size_bytes = (phys_addr_t)CMA_SIZE_MBYTES * SZ_1M;
-static phys_addr_t size_cmdline = -1;
-static phys_addr_t base_cmdline;
-static phys_addr_t limit_cmdline;
+static const phys_addr_t size_bytes __initconst =
+ (phys_addr_t)CMA_SIZE_MBYTES * SZ_1M;
+static phys_addr_t size_cmdline __initdata = -1;
+static phys_addr_t base_cmdline __initdata;
+static phys_addr_t limit_cmdline __initdata;
static int __init early_cma(char *p)
{
diff --git a/kernel/dma/debug.c b/kernel/dma/debug.c
index a26170469543..2031ed1ad7fa 100644
--- a/kernel/dma/debug.c
+++ b/kernel/dma/debug.c
@@ -27,7 +27,7 @@
#include <asm/sections.h>
-#define HASH_SIZE 1024ULL
+#define HASH_SIZE 16384ULL
#define HASH_FN_SHIFT 13
#define HASH_FN_MASK (HASH_SIZE - 1)
@@ -54,40 +54,40 @@ enum map_err_types {
* struct dma_debug_entry - track a dma_map* or dma_alloc_coherent mapping
* @list: node on pre-allocated free_entries list
* @dev: 'dev' argument to dma_map_{page|single|sg} or dma_alloc_coherent
- * @type: single, page, sg, coherent
- * @pfn: page frame of the start address
- * @offset: offset of mapping relative to pfn
* @size: length of the mapping
+ * @type: single, page, sg, coherent
* @direction: enum dma_data_direction
* @sg_call_ents: 'nents' from dma_map_sg
* @sg_mapped_ents: 'mapped_ents' from dma_map_sg
+ * @pfn: page frame of the start address
+ * @offset: offset of mapping relative to pfn
* @map_err_type: track whether dma_mapping_error() was checked
* @stacktrace: support backtraces when a violation is detected
*/
struct dma_debug_entry {
struct list_head list;
struct device *dev;
- int type;
- unsigned long pfn;
- size_t offset;
u64 dev_addr;
u64 size;
+ int type;
int direction;
int sg_call_ents;
int sg_mapped_ents;
+ unsigned long pfn;
+ size_t offset;
enum map_err_types map_err_type;
#ifdef CONFIG_STACKTRACE
unsigned int stack_len;
unsigned long stack_entries[DMA_DEBUG_STACKTRACE_ENTRIES];
#endif
-};
+} ____cacheline_aligned_in_smp;
typedef bool (*match_fn)(struct dma_debug_entry *, struct dma_debug_entry *);
struct hash_bucket {
struct list_head list;
spinlock_t lock;
-} ____cacheline_aligned_in_smp;
+};
/* Hash list to save the allocated dma addresses */
static struct hash_bucket dma_entry_hash[HASH_SIZE];
@@ -255,12 +255,10 @@ static struct hash_bucket *get_hash_bucket(struct dma_debug_entry *entry,
* Give up exclusive access to the hash bucket
*/
static void put_hash_bucket(struct hash_bucket *bucket,
- unsigned long *flags)
+ unsigned long flags)
__releases(&bucket->lock)
{
- unsigned long __flags = *flags;
-
- spin_unlock_irqrestore(&bucket->lock, __flags);
+ spin_unlock_irqrestore(&bucket->lock, flags);
}
static bool exact_match(struct dma_debug_entry *a, struct dma_debug_entry *b)
@@ -359,7 +357,7 @@ static struct dma_debug_entry *bucket_find_contain(struct hash_bucket **bucket,
/*
* Nothing found, go back a hash bucket
*/
- put_hash_bucket(*bucket, flags);
+ put_hash_bucket(*bucket, *flags);
range += (1 << HASH_FN_SHIFT);
index.dev_addr -= (1 << HASH_FN_SHIFT);
*bucket = get_hash_bucket(&index, flags);
@@ -420,6 +418,7 @@ void debug_dma_dump_mappings(struct device *dev)
}
spin_unlock_irqrestore(&bucket->lock, flags);
+ cond_resched();
}
}
@@ -608,7 +607,7 @@ static void add_dma_entry(struct dma_debug_entry *entry)
bucket = get_hash_bucket(entry, &flags);
hash_bucket_add(bucket, entry);
- put_hash_bucket(bucket, &flags);
+ put_hash_bucket(bucket, flags);
rc = active_cacheline_insert(entry);
if (rc == -ENOMEM) {
@@ -1001,7 +1000,7 @@ static void check_unmap(struct dma_debug_entry *ref)
if (!entry) {
/* must drop lock before calling dma_mapping_error */
- put_hash_bucket(bucket, &flags);
+ put_hash_bucket(bucket, flags);
if (dma_mapping_error(ref->dev, ref->dev_addr)) {
err_printk(ref->dev, NULL,
@@ -1083,7 +1082,7 @@ static void check_unmap(struct dma_debug_entry *ref)
hash_bucket_del(entry);
dma_entry_free(entry);
- put_hash_bucket(bucket, &flags);
+ put_hash_bucket(bucket, flags);
}
static void check_for_stack(struct device *dev,
@@ -1203,7 +1202,7 @@ static void check_sync(struct device *dev,
}
out:
- put_hash_bucket(bucket, &flags);
+ put_hash_bucket(bucket, flags);
}
static void check_sg_segment(struct device *dev, struct scatterlist *sg)
@@ -1318,7 +1317,7 @@ void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
}
}
- put_hash_bucket(bucket, &flags);
+ put_hash_bucket(bucket, flags);
}
EXPORT_SYMBOL(debug_dma_mapping_error);
@@ -1391,7 +1390,7 @@ static int get_nr_mapped_entries(struct device *dev,
if (entry)
mapped_ents = entry->sg_mapped_ents;
- put_hash_bucket(bucket, &flags);
+ put_hash_bucket(bucket, flags);
return mapped_ents;
}
diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index 0b67c04e531b..6af7ae83c4ad 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -12,6 +12,7 @@
#include <linux/dma-contiguous.h>
#include <linux/dma-noncoherent.h>
#include <linux/pfn.h>
+#include <linux/vmalloc.h>
#include <linux/set_memory.h>
#include <linux/swiotlb.h>
@@ -26,10 +27,10 @@ static void report_addr(struct device *dev, dma_addr_t dma_addr, size_t size)
{
if (!dev->dma_mask) {
dev_err_once(dev, "DMA map on device without dma_mask\n");
- } else if (*dev->dma_mask >= DMA_BIT_MASK(32) || dev->bus_dma_mask) {
+ } else if (*dev->dma_mask >= DMA_BIT_MASK(32) || dev->bus_dma_limit) {
dev_err_once(dev,
- "overflow %pad+%zu of DMA mask %llx bus mask %llx\n",
- &dma_addr, size, *dev->dma_mask, dev->bus_dma_mask);
+ "overflow %pad+%zu of DMA mask %llx bus limit %llx\n",
+ &dma_addr, size, *dev->dma_mask, dev->bus_dma_limit);
}
WARN_ON_ONCE(1);
}
@@ -42,6 +43,12 @@ static inline dma_addr_t phys_to_dma_direct(struct device *dev,
return phys_to_dma(dev, phys);
}
+static inline struct page *dma_direct_to_page(struct device *dev,
+ dma_addr_t dma_addr)
+{
+ return pfn_to_page(PHYS_PFN(dma_to_phys(dev, dma_addr)));
+}
+
u64 dma_direct_get_required_mask(struct device *dev)
{
u64 max_dma = phys_to_dma_direct(dev, (max_pfn - 1) << PAGE_SHIFT);
@@ -50,15 +57,14 @@ u64 dma_direct_get_required_mask(struct device *dev)
}
static gfp_t __dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask,
- u64 *phys_mask)
+ u64 *phys_limit)
{
- if (dev->bus_dma_mask && dev->bus_dma_mask < dma_mask)
- dma_mask = dev->bus_dma_mask;
+ u64 dma_limit = min_not_zero(dma_mask, dev->bus_dma_limit);
if (force_dma_unencrypted(dev))
- *phys_mask = __dma_to_phys(dev, dma_mask);
+ *phys_limit = __dma_to_phys(dev, dma_limit);
else
- *phys_mask = dma_to_phys(dev, dma_mask);
+ *phys_limit = dma_to_phys(dev, dma_limit);
/*
* Optimistically try the zone that the physical address mask falls
@@ -68,9 +74,9 @@ static gfp_t __dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask,
* Note that GFP_DMA32 and GFP_DMA are no ops without the corresponding
* zones.
*/
- if (*phys_mask <= DMA_BIT_MASK(zone_dma_bits))
+ if (*phys_limit <= DMA_BIT_MASK(zone_dma_bits))
return GFP_DMA;
- if (*phys_mask <= DMA_BIT_MASK(32))
+ if (*phys_limit <= DMA_BIT_MASK(32))
return GFP_DMA32;
return 0;
}
@@ -78,16 +84,16 @@ static gfp_t __dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask,
static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
{
return phys_to_dma_direct(dev, phys) + size - 1 <=
- min_not_zero(dev->coherent_dma_mask, dev->bus_dma_mask);
+ min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit);
}
struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
+ gfp_t gfp, unsigned long attrs)
{
size_t alloc_size = PAGE_ALIGN(size);
int node = dev_to_node(dev);
struct page *page = NULL;
- u64 phys_mask;
+ u64 phys_limit;
if (attrs & DMA_ATTR_NO_WARN)
gfp |= __GFP_NOWARN;
@@ -95,7 +101,7 @@ struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
/* we always manually zero the memory once we are done: */
gfp &= ~__GFP_ZERO;
gfp |= __dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
- &phys_mask);
+ &phys_limit);
page = dma_alloc_contiguous(dev, alloc_size, gfp);
if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
dma_free_contiguous(dev, page, alloc_size);
@@ -109,7 +115,7 @@ again:
page = NULL;
if (IS_ENABLED(CONFIG_ZONE_DMA32) &&
- phys_mask < DMA_BIT_MASK(64) &&
+ phys_limit < DMA_BIT_MASK(64) &&
!(gfp & (GFP_DMA32 | GFP_DMA))) {
gfp |= GFP_DMA32;
goto again;
@@ -130,7 +136,16 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
struct page *page;
void *ret;
- page = __dma_direct_alloc_pages(dev, size, dma_handle, gfp, attrs);
+ if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
+ dma_alloc_need_uncached(dev, attrs) &&
+ !gfpflags_allow_blocking(gfp)) {
+ ret = dma_alloc_from_pool(PAGE_ALIGN(size), &page, gfp);
+ if (!ret)
+ return NULL;
+ goto done;
+ }
+
+ page = __dma_direct_alloc_pages(dev, size, gfp, attrs);
if (!page)
return NULL;
@@ -139,9 +154,28 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
/* remove any dirty cache lines on the kernel alias */
if (!PageHighMem(page))
arch_dma_prep_coherent(page, size);
- *dma_handle = phys_to_dma(dev, page_to_phys(page));
/* return the page pointer as the opaque cookie */
- return page;
+ ret = page;
+ goto done;
+ }
+
+ if ((IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
+ dma_alloc_need_uncached(dev, attrs)) ||
+ (IS_ENABLED(CONFIG_DMA_REMAP) && PageHighMem(page))) {
+ /* remove any dirty cache lines on the kernel alias */
+ arch_dma_prep_coherent(page, PAGE_ALIGN(size));
+
+ /* create a coherent mapping */
+ ret = dma_common_contiguous_remap(page, PAGE_ALIGN(size),
+ dma_pgprot(dev, PAGE_KERNEL, attrs),
+ __builtin_return_address(0));
+ if (!ret) {
+ dma_free_contiguous(dev, page, size);
+ return ret;
+ }
+
+ memset(ret, 0, size);
+ goto done;
}
if (PageHighMem(page)) {
@@ -152,17 +186,14 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
* so log an error and fail.
*/
dev_info(dev, "Rejecting highmem page from CMA.\n");
- __dma_direct_free_pages(dev, size, page);
+ dma_free_contiguous(dev, page, size);
return NULL;
}
ret = page_address(page);
- if (force_dma_unencrypted(dev)) {
+ if (force_dma_unencrypted(dev))
set_memory_decrypted((unsigned long)ret, 1 << get_order(size));
- *dma_handle = __phys_to_dma(dev, page_to_phys(page));
- } else {
- *dma_handle = phys_to_dma(dev, page_to_phys(page));
- }
+
memset(ret, 0, size);
if (IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) &&
@@ -170,15 +201,14 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
arch_dma_prep_coherent(page, size);
ret = uncached_kernel_address(ret);
}
-
+done:
+ if (force_dma_unencrypted(dev))
+ *dma_handle = __phys_to_dma(dev, page_to_phys(page));
+ else
+ *dma_handle = phys_to_dma(dev, page_to_phys(page));
return ret;
}
-void __dma_direct_free_pages(struct device *dev, size_t size, struct page *page)
-{
- dma_free_contiguous(dev, page, size);
-}
-
void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t dma_addr, unsigned long attrs)
{
@@ -187,23 +217,28 @@ void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
!force_dma_unencrypted(dev)) {
/* cpu_addr is a struct page cookie, not a kernel address */
- __dma_direct_free_pages(dev, size, cpu_addr);
+ dma_free_contiguous(dev, cpu_addr, size);
return;
}
+ if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
+ dma_free_from_pool(cpu_addr, PAGE_ALIGN(size)))
+ return;
+
if (force_dma_unencrypted(dev))
set_memory_encrypted((unsigned long)cpu_addr, 1 << page_order);
- if (IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) &&
- dma_alloc_need_uncached(dev, attrs))
- cpu_addr = cached_kernel_address(cpu_addr);
- __dma_direct_free_pages(dev, size, virt_to_page(cpu_addr));
+ if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr))
+ vunmap(cpu_addr);
+
+ dma_free_contiguous(dev, dma_direct_to_page(dev, dma_addr), size);
}
void *dma_direct_alloc(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
{
if (!IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) &&
+ !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
dma_alloc_need_uncached(dev, attrs))
return arch_dma_alloc(dev, size, dma_handle, gfp, attrs);
return dma_direct_alloc_pages(dev, size, dma_handle, gfp, attrs);
@@ -213,6 +248,7 @@ void dma_direct_free(struct device *dev, size_t size,
void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs)
{
if (!IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) &&
+ !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
dma_alloc_need_uncached(dev, attrs))
arch_dma_free(dev, size, cpu_addr, dma_addr, attrs);
else
@@ -230,7 +266,7 @@ void dma_direct_sync_single_for_device(struct device *dev,
swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_DEVICE);
if (!dev_is_dma_coherent(dev))
- arch_sync_dma_for_device(dev, paddr, size, dir);
+ arch_sync_dma_for_device(paddr, size, dir);
}
EXPORT_SYMBOL(dma_direct_sync_single_for_device);
@@ -248,7 +284,7 @@ void dma_direct_sync_sg_for_device(struct device *dev,
dir, SYNC_FOR_DEVICE);
if (!dev_is_dma_coherent(dev))
- arch_sync_dma_for_device(dev, paddr, sg->length,
+ arch_sync_dma_for_device(paddr, sg->length,
dir);
}
}
@@ -264,8 +300,8 @@ void dma_direct_sync_single_for_cpu(struct device *dev,
phys_addr_t paddr = dma_to_phys(dev, addr);
if (!dev_is_dma_coherent(dev)) {
- arch_sync_dma_for_cpu(dev, paddr, size, dir);
- arch_sync_dma_for_cpu_all(dev);
+ arch_sync_dma_for_cpu(paddr, size, dir);
+ arch_sync_dma_for_cpu_all();
}
if (unlikely(is_swiotlb_buffer(paddr)))
@@ -283,7 +319,7 @@ void dma_direct_sync_sg_for_cpu(struct device *dev,
phys_addr_t paddr = dma_to_phys(dev, sg_dma_address(sg));
if (!dev_is_dma_coherent(dev))
- arch_sync_dma_for_cpu(dev, paddr, sg->length, dir);
+ arch_sync_dma_for_cpu(paddr, sg->length, dir);
if (unlikely(is_swiotlb_buffer(paddr)))
swiotlb_tbl_sync_single(dev, paddr, sg->length, dir,
@@ -291,7 +327,7 @@ void dma_direct_sync_sg_for_cpu(struct device *dev,
}
if (!dev_is_dma_coherent(dev))
- arch_sync_dma_for_cpu_all(dev);
+ arch_sync_dma_for_cpu_all();
}
EXPORT_SYMBOL(dma_direct_sync_sg_for_cpu);
@@ -325,7 +361,7 @@ static inline bool dma_direct_possible(struct device *dev, dma_addr_t dma_addr,
size_t size)
{
return swiotlb_force != SWIOTLB_FORCE &&
- dma_capable(dev, dma_addr, size);
+ dma_capable(dev, dma_addr, size, true);
}
dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
@@ -342,7 +378,7 @@ dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
}
if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
- arch_sync_dma_for_device(dev, phys, size, dir);
+ arch_sync_dma_for_device(phys, size, dir);
return dma_addr;
}
EXPORT_SYMBOL(dma_direct_map_page);
@@ -374,7 +410,7 @@ dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr,
{
dma_addr_t dma_addr = paddr;
- if (unlikely(!dma_direct_possible(dev, dma_addr, size))) {
+ if (unlikely(!dma_capable(dev, dma_addr, size, false))) {
report_addr(dev, dma_addr, size);
return DMA_MAPPING_ERROR;
}
@@ -383,6 +419,59 @@ dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr,
}
EXPORT_SYMBOL(dma_direct_map_resource);
+int dma_direct_get_sgtable(struct device *dev, struct sg_table *sgt,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ unsigned long attrs)
+{
+ struct page *page = dma_direct_to_page(dev, dma_addr);
+ int ret;
+
+ ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
+ if (!ret)
+ sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
+ return ret;
+}
+
+#ifdef CONFIG_MMU
+bool dma_direct_can_mmap(struct device *dev)
+{
+ return dev_is_dma_coherent(dev) ||
+ IS_ENABLED(CONFIG_DMA_NONCOHERENT_MMAP);
+}
+
+int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ unsigned long attrs)
+{
+ unsigned long user_count = vma_pages(vma);
+ unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+ unsigned long pfn = PHYS_PFN(dma_to_phys(dev, dma_addr));
+ int ret = -ENXIO;
+
+ vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs);
+
+ if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
+ return ret;
+
+ if (vma->vm_pgoff >= count || user_count > count - vma->vm_pgoff)
+ return -ENXIO;
+ return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
+ user_count << PAGE_SHIFT, vma->vm_page_prot);
+}
+#else /* CONFIG_MMU */
+bool dma_direct_can_mmap(struct device *dev)
+{
+ return false;
+}
+
+int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ unsigned long attrs)
+{
+ return -ENXIO;
+}
+#endif /* CONFIG_MMU */
+
/*
* Because 32-bit DMA masks are so common we expect every architecture to be
* able to satisfy them - either by not supporting more physical memory, or by
diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c
index d9334f31a5af..12ff766ec1fa 100644
--- a/kernel/dma/mapping.c
+++ b/kernel/dma/mapping.c
@@ -112,24 +112,9 @@ int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
unsigned long attrs)
{
- struct page *page;
+ struct page *page = virt_to_page(cpu_addr);
int ret;
- if (!dev_is_dma_coherent(dev)) {
- unsigned long pfn;
-
- if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_COHERENT_TO_PFN))
- return -ENXIO;
-
- /* If the PFN is not valid, we do not have a struct page */
- pfn = arch_dma_coherent_to_pfn(dev, cpu_addr, dma_addr);
- if (!pfn_valid(pfn))
- return -ENXIO;
- page = pfn_to_page(pfn);
- } else {
- page = virt_to_page(cpu_addr);
- }
-
ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
if (!ret)
sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
@@ -154,7 +139,7 @@ int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt,
const struct dma_map_ops *ops = get_dma_ops(dev);
if (dma_is_direct(ops))
- return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr,
+ return dma_direct_get_sgtable(dev, sgt, cpu_addr, dma_addr,
size, attrs);
if (!ops->get_sgtable)
return -ENXIO;
@@ -192,7 +177,6 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
unsigned long user_count = vma_pages(vma);
unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
unsigned long off = vma->vm_pgoff;
- unsigned long pfn;
int ret = -ENXIO;
vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs);
@@ -203,19 +187,8 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
if (off >= count || user_count > count - off)
return -ENXIO;
- if (!dev_is_dma_coherent(dev)) {
- if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_COHERENT_TO_PFN))
- return -ENXIO;
-
- /* If the PFN is not valid, we do not have a struct page */
- pfn = arch_dma_coherent_to_pfn(dev, cpu_addr, dma_addr);
- if (!pfn_valid(pfn))
- return -ENXIO;
- } else {
- pfn = page_to_pfn(virt_to_page(cpu_addr));
- }
-
- return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
+ return remap_pfn_range(vma, vma->vm_start,
+ page_to_pfn(virt_to_page(cpu_addr)) + vma->vm_pgoff,
user_count << PAGE_SHIFT, vma->vm_page_prot);
#else
return -ENXIO;
@@ -233,12 +206,8 @@ bool dma_can_mmap(struct device *dev)
{
const struct dma_map_ops *ops = get_dma_ops(dev);
- if (dma_is_direct(ops)) {
- return IS_ENABLED(CONFIG_MMU) &&
- (dev_is_dma_coherent(dev) ||
- IS_ENABLED(CONFIG_ARCH_HAS_DMA_COHERENT_TO_PFN));
- }
-
+ if (dma_is_direct(ops))
+ return dma_direct_can_mmap(dev);
return ops->mmap != NULL;
}
EXPORT_SYMBOL_GPL(dma_can_mmap);
@@ -263,7 +232,7 @@ int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
const struct dma_map_ops *ops = get_dma_ops(dev);
if (dma_is_direct(ops))
- return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size,
+ return dma_direct_mmap(dev, vma, cpu_addr, dma_addr, size,
attrs);
if (!ops->mmap)
return -ENXIO;
diff --git a/kernel/dma/remap.c b/kernel/dma/remap.c
index c00b9258fa6a..d47bd40fc0f5 100644
--- a/kernel/dma/remap.c
+++ b/kernel/dma/remap.c
@@ -210,59 +210,4 @@ bool dma_free_from_pool(void *start, size_t size)
gen_pool_free(atomic_pool, (unsigned long)start, size);
return true;
}
-
-void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
- gfp_t flags, unsigned long attrs)
-{
- struct page *page = NULL;
- void *ret;
-
- size = PAGE_ALIGN(size);
-
- if (!gfpflags_allow_blocking(flags)) {
- ret = dma_alloc_from_pool(size, &page, flags);
- if (!ret)
- return NULL;
- goto done;
- }
-
- page = __dma_direct_alloc_pages(dev, size, dma_handle, flags, attrs);
- if (!page)
- return NULL;
-
- /* remove any dirty cache lines on the kernel alias */
- arch_dma_prep_coherent(page, size);
-
- /* create a coherent mapping */
- ret = dma_common_contiguous_remap(page, size,
- dma_pgprot(dev, PAGE_KERNEL, attrs),
- __builtin_return_address(0));
- if (!ret) {
- __dma_direct_free_pages(dev, size, page);
- return ret;
- }
-
- memset(ret, 0, size);
-done:
- *dma_handle = phys_to_dma(dev, page_to_phys(page));
- return ret;
-}
-
-void arch_dma_free(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_handle, unsigned long attrs)
-{
- if (!dma_free_from_pool(vaddr, PAGE_ALIGN(size))) {
- phys_addr_t phys = dma_to_phys(dev, dma_handle);
- struct page *page = pfn_to_page(__phys_to_pfn(phys));
-
- vunmap(vaddr);
- __dma_direct_free_pages(dev, size, page);
- }
-}
-
-long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr,
- dma_addr_t dma_addr)
-{
- return __phys_to_pfn(dma_to_phys(dev, dma_addr));
-}
#endif /* CONFIG_DMA_DIRECT_REMAP */
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index 673a2cdb2656..9280d6f8271e 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -678,7 +678,7 @@ bool swiotlb_map(struct device *dev, phys_addr_t *phys, dma_addr_t *dma_addr,
/* Ensure that the address returned is DMA'ble */
*dma_addr = __phys_to_dma(dev, *phys);
- if (unlikely(!dma_capable(dev, *dma_addr, size))) {
+ if (unlikely(!dma_capable(dev, *dma_addr, size, true))) {
swiotlb_tbl_unmap_single(dev, *phys, size, size, dir,
attrs | DMA_ATTR_SKIP_CPU_SYNC);
return false;
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index c74761004ee5..ece7e13f6e4a 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -1457,7 +1457,7 @@ static int xol_add_vma(struct mm_struct *mm, struct xol_area *area)
/* Try to map as high as possible, this is only a hint. */
area->vaddr = get_unmapped_area(NULL, TASK_SIZE - PAGE_SIZE,
PAGE_SIZE, 0, 0);
- if (area->vaddr & ~PAGE_MASK) {
+ if (IS_ERR_VALUE(area->vaddr)) {
ret = area->vaddr;
goto fail;
}
diff --git a/kernel/exit.c b/kernel/exit.c
index 0bac4b60d5f3..bcbd59888e67 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -1409,7 +1409,7 @@ static int child_wait_callback(wait_queue_entry_t *wait, unsigned mode,
void __wake_up_parent(struct task_struct *p, struct task_struct *parent)
{
__wake_up_sync_key(&parent->signal->wait_chldexit,
- TASK_INTERRUPTIBLE, 1, p);
+ TASK_INTERRUPTIBLE, p);
}
static long do_wait(struct wait_opts *wo)
diff --git a/kernel/fork.c b/kernel/fork.c
index 00b64f41c2b4..2508a4f238a3 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -40,7 +40,6 @@
#include <linux/binfmts.h>
#include <linux/mman.h>
#include <linux/mmu_notifier.h>
-#include <linux/hmm.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/vmacache.h>
@@ -94,6 +93,7 @@
#include <linux/livepatch.h>
#include <linux/thread_info.h>
#include <linux/stackleak.h>
+#include <linux/kasan.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
@@ -224,6 +224,9 @@ static unsigned long *alloc_thread_stack_node(struct task_struct *tsk, int node)
if (!s)
continue;
+ /* Clear the KASAN shadow of the stack. */
+ kasan_unpoison_shadow(s->addr, THREAD_SIZE);
+
/* Clear stale pointers from reused stack. */
memset(s->addr, 0, THREAD_SIZE);
@@ -2182,7 +2185,7 @@ static __latent_entropy struct task_struct *copy_process(
*/
p->start_time = ktime_get_ns();
- p->real_start_time = ktime_get_boottime_ns();
+ p->start_boottime = ktime_get_boottime_ns();
/*
* Make it visible to the rest of the system, but dont wake it up yet.
diff --git a/kernel/gen_kheaders.sh b/kernel/gen_kheaders.sh
index 5a0fc0b0403a..e13ca842eb7e 100755
--- a/kernel/gen_kheaders.sh
+++ b/kernel/gen_kheaders.sh
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/bin/sh
# SPDX-License-Identifier: GPL-2.0
# This script generates an archive consisting of kernel headers
@@ -21,30 +21,38 @@ arch/$SRCARCH/include/
# Uncomment it for debugging.
# if [ ! -f /tmp/iter ]; then iter=1; echo 1 > /tmp/iter;
# else iter=$(($(cat /tmp/iter) + 1)); echo $iter > /tmp/iter; fi
-# find $src_file_list -name "*.h" | xargs ls -l > /tmp/src-ls-$iter
-# find $obj_file_list -name "*.h" | xargs ls -l > /tmp/obj-ls-$iter
+# find $all_dirs -name "*.h" | xargs ls -l > /tmp/ls-$iter
+
+all_dirs=
+if [ "$building_out_of_srctree" ]; then
+ for d in $dir_list; do
+ all_dirs="$all_dirs $srctree/$d"
+ done
+fi
+all_dirs="$all_dirs $dir_list"
# include/generated/compile.h is ignored because it is touched even when none
-# of the source files changed. This causes pointless regeneration, so let us
-# ignore them for md5 calculation.
-pushd $srctree > /dev/null
-src_files_md5="$(find $dir_list -name "*.h" |
- grep -v "include/generated/compile.h" |
- grep -v "include/generated/autoconf.h" |
- xargs ls -l | md5sum | cut -d ' ' -f1)"
-popd > /dev/null
-obj_files_md5="$(find $dir_list -name "*.h" |
- grep -v "include/generated/compile.h" |
- grep -v "include/generated/autoconf.h" |
+# of the source files changed.
+#
+# When Kconfig regenerates include/generated/autoconf.h, its timestamp is
+# updated, but the contents might be still the same. When any CONFIG option is
+# changed, Kconfig touches the corresponding timestamp file include/config/*.h.
+# Hence, the md5sum detects the configuration change anyway. We do not need to
+# check include/generated/autoconf.h explicitly.
+#
+# Ignore them for md5 calculation to avoid pointless regeneration.
+headers_md5="$(find $all_dirs -name "*.h" |
+ grep -v "include/generated/compile.h" |
+ grep -v "include/generated/autoconf.h" |
xargs ls -l | md5sum | cut -d ' ' -f1)"
+
# Any changes to this script will also cause a rebuild of the archive.
this_file_md5="$(ls -l $sfile | md5sum | cut -d ' ' -f1)"
if [ -f $tarfile ]; then tarfile_md5="$(md5sum $tarfile | cut -d ' ' -f1)"; fi
if [ -f kernel/kheaders.md5 ] &&
- [ "$(cat kernel/kheaders.md5|head -1)" == "$src_files_md5" ] &&
- [ "$(cat kernel/kheaders.md5|head -2|tail -1)" == "$obj_files_md5" ] &&
- [ "$(cat kernel/kheaders.md5|head -3|tail -1)" == "$this_file_md5" ] &&
- [ "$(cat kernel/kheaders.md5|tail -1)" == "$tarfile_md5" ]; then
+ [ "$(head -n 1 kernel/kheaders.md5)" = "$headers_md5" ] &&
+ [ "$(head -n 2 kernel/kheaders.md5 | tail -n 1)" = "$this_file_md5" ] &&
+ [ "$(tail -n 1 kernel/kheaders.md5)" = "$tarfile_md5" ]; then
exit
fi
@@ -55,14 +63,17 @@ fi
rm -rf $cpio_dir
mkdir $cpio_dir
-pushd $srctree > /dev/null
-for f in $dir_list;
- do find "$f" -name "*.h";
-done | cpio --quiet -pd $cpio_dir
-popd > /dev/null
+if [ "$building_out_of_srctree" ]; then
+ (
+ cd $srctree
+ for f in $dir_list
+ do find "$f" -name "*.h";
+ done | cpio --quiet -pd $cpio_dir
+ )
+fi
-# The second CPIO can complain if files already exist which can
-# happen with out of tree builds. Just silence CPIO for now.
+# The second CPIO can complain if files already exist which can happen with out
+# of tree builds having stale headers in srctree. Just silence CPIO for now.
for f in $dir_list;
do find "$f" -name "*.h";
done | cpio --quiet -pd $cpio_dir >/dev/null 2>&1
@@ -79,8 +90,7 @@ find $cpio_dir -printf "./%P\n" | LC_ALL=C sort | \
--owner=0 --group=0 --numeric-owner --no-recursion \
-Jcf $tarfile -C $cpio_dir/ -T - > /dev/null
-echo "$src_files_md5" > kernel/kheaders.md5
-echo "$obj_files_md5" >> kernel/kheaders.md5
+echo $headers_md5 > kernel/kheaders.md5
echo "$this_file_md5" >> kernel/kheaders.md5
echo "$(md5sum $tarfile | cut -d ' ' -f1)" >> kernel/kheaders.md5
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index b76703b2c0af..b3fa2d87d2f3 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -1298,6 +1298,50 @@ EXPORT_SYMBOL_GPL(handle_fasteoi_mask_irq);
#endif /* CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS */
/**
+ * irq_chip_set_parent_state - set the state of a parent interrupt.
+ *
+ * @data: Pointer to interrupt specific data
+ * @which: State to be restored (one of IRQCHIP_STATE_*)
+ * @val: Value corresponding to @which
+ *
+ * Conditional success, if the underlying irqchip does not implement it.
+ */
+int irq_chip_set_parent_state(struct irq_data *data,
+ enum irqchip_irq_state which,
+ bool val)
+{
+ data = data->parent_data;
+
+ if (!data || !data->chip->irq_set_irqchip_state)
+ return 0;
+
+ return data->chip->irq_set_irqchip_state(data, which, val);
+}
+EXPORT_SYMBOL_GPL(irq_chip_set_parent_state);
+
+/**
+ * irq_chip_get_parent_state - get the state of a parent interrupt.
+ *
+ * @data: Pointer to interrupt specific data
+ * @which: one of IRQCHIP_STATE_* the caller wants to know
+ * @state: a pointer to a boolean where the state is to be stored
+ *
+ * Conditional success, if the underlying irqchip does not implement it.
+ */
+int irq_chip_get_parent_state(struct irq_data *data,
+ enum irqchip_irq_state which,
+ bool *state)
+{
+ data = data->parent_data;
+
+ if (!data || !data->chip->irq_get_irqchip_state)
+ return 0;
+
+ return data->chip->irq_get_irqchip_state(data, which, state);
+}
+EXPORT_SYMBOL_GPL(irq_chip_get_parent_state);
+
+/**
* irq_chip_enable_parent - Enable the parent interrupt (defaults to unmask if
* NULL)
* @data: Pointer to interrupt specific data
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
index 9be995fc3c5a..5b8fdd659e54 100644
--- a/kernel/irq/irqdesc.c
+++ b/kernel/irq/irqdesc.c
@@ -750,7 +750,7 @@ void irq_free_descs(unsigned int from, unsigned int cnt)
EXPORT_SYMBOL_GPL(irq_free_descs);
/**
- * irq_alloc_descs - allocate and initialize a range of irq descriptors
+ * __irq_alloc_descs - allocate and initialize a range of irq descriptors
* @irq: Allocate for specific irq number if irq >= 0
* @from: Start the search from this irq number
* @cnt: Number of consecutive irqs to allocate.
diff --git a/kernel/irq_work.c b/kernel/irq_work.c
index d42acaf81886..828cc30774bc 100644
--- a/kernel/irq_work.c
+++ b/kernel/irq_work.c
@@ -29,24 +29,16 @@ static DEFINE_PER_CPU(struct llist_head, lazy_list);
*/
static bool irq_work_claim(struct irq_work *work)
{
- unsigned long flags, oflags, nflags;
+ int oflags;
+ oflags = atomic_fetch_or(IRQ_WORK_CLAIMED, &work->flags);
/*
- * Start with our best wish as a premise but only trust any
- * flag value after cmpxchg() result.
+ * If the work is already pending, no need to raise the IPI.
+ * The pairing atomic_fetch_andnot() in irq_work_run() makes sure
+ * everything we did before is visible.
*/
- flags = work->flags & ~IRQ_WORK_PENDING;
- for (;;) {
- nflags = flags | IRQ_WORK_CLAIMED;
- oflags = cmpxchg(&work->flags, flags, nflags);
- if (oflags == flags)
- break;
- if (oflags & IRQ_WORK_PENDING)
- return false;
- flags = oflags;
- cpu_relax();
- }
-
+ if (oflags & IRQ_WORK_PENDING)
+ return false;
return true;
}
@@ -61,7 +53,7 @@ void __weak arch_irq_work_raise(void)
static void __irq_work_queue_local(struct irq_work *work)
{
/* If the work is "lazy", handle it from next tick if any */
- if (work->flags & IRQ_WORK_LAZY) {
+ if (atomic_read(&work->flags) & IRQ_WORK_LAZY) {
if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) &&
tick_nohz_tick_stopped())
arch_irq_work_raise();
@@ -143,7 +135,6 @@ static void irq_work_run_list(struct llist_head *list)
{
struct irq_work *work, *tmp;
struct llist_node *llnode;
- unsigned long flags;
BUG_ON(!irqs_disabled());
@@ -152,6 +143,7 @@ static void irq_work_run_list(struct llist_head *list)
llnode = llist_del_all(list);
llist_for_each_entry_safe(work, tmp, llnode, llnode) {
+ int flags;
/*
* Clear the PENDING bit, after this point the @work
* can be re-used.
@@ -159,15 +151,15 @@ static void irq_work_run_list(struct llist_head *list)
* to claim that work don't rely on us to handle their data
* while we are in the middle of the func.
*/
- flags = work->flags & ~IRQ_WORK_PENDING;
- xchg(&work->flags, flags);
+ flags = atomic_fetch_andnot(IRQ_WORK_PENDING, &work->flags);
work->func(work);
/*
* Clear the BUSY bit and return to the free state if
* no-one else claimed it meanwhile.
*/
- (void)cmpxchg(&work->flags, flags, flags & ~IRQ_WORK_BUSY);
+ flags &= ~IRQ_WORK_PENDING;
+ (void)atomic_cmpxchg(&work->flags, flags, flags & ~IRQ_WORK_BUSY);
}
}
@@ -199,7 +191,7 @@ void irq_work_sync(struct irq_work *work)
{
lockdep_assert_irqs_enabled();
- while (work->flags & IRQ_WORK_BUSY)
+ while (atomic_read(&work->flags) & IRQ_WORK_BUSY)
cpu_relax();
}
EXPORT_SYMBOL_GPL(irq_work_sync);
diff --git a/kernel/kexec_file.c b/kernel/kexec_file.c
index 79f252af7dee..a2df93948665 100644
--- a/kernel/kexec_file.c
+++ b/kernel/kexec_file.c
@@ -1304,7 +1304,7 @@ int crash_prepare_elf64_headers(struct crash_mem *mem, int kernel_map,
if (kernel_map) {
phdr->p_type = PT_LOAD;
phdr->p_flags = PF_R|PF_W|PF_X;
- phdr->p_vaddr = (Elf64_Addr)_text;
+ phdr->p_vaddr = (unsigned long) _text;
phdr->p_filesz = phdr->p_memsz = _end - _text;
phdr->p_offset = phdr->p_paddr = __pa_symbol(_text);
ehdr->e_phnum++;
@@ -1321,7 +1321,7 @@ int crash_prepare_elf64_headers(struct crash_mem *mem, int kernel_map,
phdr->p_offset = mstart;
phdr->p_paddr = mstart;
- phdr->p_vaddr = (unsigned long long) __va(mstart);
+ phdr->p_vaddr = (unsigned long) __va(mstart);
phdr->p_filesz = phdr->p_memsz = mend - mstart + 1;
phdr->p_align = 0;
ehdr->e_phnum++;
diff --git a/kernel/livepatch/patch.c b/kernel/livepatch/patch.c
index bd43537702bd..b552cf2d85f8 100644
--- a/kernel/livepatch/patch.c
+++ b/kernel/livepatch/patch.c
@@ -196,7 +196,8 @@ static int klp_patch_func(struct klp_func *func)
ops->fops.func = klp_ftrace_handler;
ops->fops.flags = FTRACE_OPS_FL_SAVE_REGS |
FTRACE_OPS_FL_DYNAMIC |
- FTRACE_OPS_FL_IPMODIFY;
+ FTRACE_OPS_FL_IPMODIFY |
+ FTRACE_OPS_FL_PERMANENT;
list_add(&ops->node, &klp_ops);
diff --git a/kernel/module.c b/kernel/module.c
index acf7962936c4..052a40212b8e 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -3728,7 +3728,6 @@ static int complete_formation(struct module *mod, struct load_info *info)
module_enable_ro(mod, false);
module_enable_nx(mod);
- module_enable_x(mod);
/* Mark state as coming so strong_try_module_get() ignores us,
* but kallsyms etc. can see us. */
@@ -3751,6 +3750,11 @@ static int prepare_coming_module(struct module *mod)
if (err)
return err;
+ /* Make module executable after ftrace is enabled */
+ mutex_lock(&module_mutex);
+ module_enable_x(mod);
+ mutex_unlock(&module_mutex);
+
blocking_notifier_call_chain(&module_notify_list,
MODULE_STATE_COMING, mod);
return 0;
diff --git a/kernel/power/power.h b/kernel/power/power.h
index 44bee462ff57..7cdc64dc2373 100644
--- a/kernel/power/power.h
+++ b/kernel/power/power.h
@@ -179,7 +179,7 @@ extern void swsusp_close(fmode_t);
extern int swsusp_unmark(void);
#endif
-struct timeval;
+struct __kernel_old_timeval;
/* kernel/power/swsusp.c */
extern void swsusp_show_speed(ktime_t, ktime_t, unsigned int, char *);
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index 83105874f255..26b9168321e7 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -734,8 +734,15 @@ zone_found:
* We have found the zone. Now walk the radix tree to find the leaf node
* for our PFN.
*/
+
+ /*
+ * If the zone we wish to scan is the the current zone and the
+ * pfn falls into the current node then we do not need to walk
+ * the tree.
+ */
node = bm->cur.node;
- if (((pfn - zone->start_pfn) & ~BM_BLOCK_MASK) == bm->cur.node_pfn)
+ if (zone == bm->cur.zone &&
+ ((pfn - zone->start_pfn) & ~BM_BLOCK_MASK) == bm->cur.node_pfn)
goto node_found;
node = zone->rtree;
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index c8be5a0f5259..1ef6f75d92f1 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -2961,7 +2961,7 @@ static void wake_up_klogd_work_func(struct irq_work *irq_work)
static DEFINE_PER_CPU(struct irq_work, wake_up_klogd_work) = {
.func = wake_up_klogd_work_func,
- .flags = IRQ_WORK_LAZY,
+ .flags = ATOMIC_INIT(IRQ_WORK_LAZY),
};
void wake_up_klogd(void)
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
index 428cd05c0b5d..ffa959e91227 100644
--- a/kernel/sched/idle.c
+++ b/kernel/sched/idle.c
@@ -104,7 +104,7 @@ static int call_cpuidle(struct cpuidle_driver *drv, struct cpuidle_device *dev,
* update no idle residency and return.
*/
if (current_clr_polling_and_test()) {
- dev->last_residency = 0;
+ dev->last_residency_ns = 0;
local_irq_enable();
return -EBUSY;
}
@@ -165,7 +165,9 @@ static void cpuidle_idle_call(void)
* until a proper wakeup interrupt happens.
*/
- if (idle_should_enter_s2idle() || dev->use_deepest_state) {
+ if (idle_should_enter_s2idle() || dev->forced_idle_latency_limit_ns) {
+ u64 max_latency_ns;
+
if (idle_should_enter_s2idle()) {
rcu_idle_enter();
@@ -176,12 +178,16 @@ static void cpuidle_idle_call(void)
}
rcu_idle_exit();
+
+ max_latency_ns = U64_MAX;
+ } else {
+ max_latency_ns = dev->forced_idle_latency_limit_ns;
}
tick_nohz_idle_stop_tick();
rcu_idle_enter();
- next_state = cpuidle_find_deepest_state(drv, dev);
+ next_state = cpuidle_find_deepest_state(drv, dev, max_latency_ns);
call_cpuidle(drv, dev, next_state);
} else {
bool stop_tick = true;
@@ -311,7 +317,7 @@ static enum hrtimer_restart idle_inject_timer_fn(struct hrtimer *timer)
return HRTIMER_NORESTART;
}
-void play_idle(unsigned long duration_us)
+void play_idle_precise(u64 duration_ns, u64 latency_ns)
{
struct idle_timer it;
@@ -323,29 +329,29 @@ void play_idle(unsigned long duration_us)
WARN_ON_ONCE(current->nr_cpus_allowed != 1);
WARN_ON_ONCE(!(current->flags & PF_KTHREAD));
WARN_ON_ONCE(!(current->flags & PF_NO_SETAFFINITY));
- WARN_ON_ONCE(!duration_us);
+ WARN_ON_ONCE(!duration_ns);
rcu_sleep_check();
preempt_disable();
current->flags |= PF_IDLE;
- cpuidle_use_deepest_state(true);
+ cpuidle_use_deepest_state(latency_ns);
it.done = 0;
hrtimer_init_on_stack(&it.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
it.timer.function = idle_inject_timer_fn;
- hrtimer_start(&it.timer, ns_to_ktime(duration_us * NSEC_PER_USEC),
+ hrtimer_start(&it.timer, ns_to_ktime(duration_ns),
HRTIMER_MODE_REL_PINNED);
while (!READ_ONCE(it.done))
do_idle();
- cpuidle_use_deepest_state(false);
+ cpuidle_use_deepest_state(0);
current->flags &= ~PF_IDLE;
preempt_fold_need_resched();
preempt_enable();
}
-EXPORT_SYMBOL_GPL(play_idle);
+EXPORT_SYMBOL_GPL(play_idle_precise);
void cpu_startup_entry(enum cpuhp_state state)
{
diff --git a/kernel/sched/wait.c b/kernel/sched/wait.c
index c1e566a114ca..ba059fbfc53a 100644
--- a/kernel/sched/wait.c
+++ b/kernel/sched/wait.c
@@ -169,7 +169,6 @@ EXPORT_SYMBOL_GPL(__wake_up_locked_key_bookmark);
* __wake_up_sync_key - wake up threads blocked on a waitqueue.
* @wq_head: the waitqueue
* @mode: which threads
- * @nr_exclusive: how many wake-one or wake-many threads to wake up
* @key: opaque value to be passed to wakeup targets
*
* The sync wakeup differs that the waker knows that it will schedule
@@ -183,26 +182,44 @@ EXPORT_SYMBOL_GPL(__wake_up_locked_key_bookmark);
* accessing the task state.
*/
void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode,
- int nr_exclusive, void *key)
+ void *key)
{
- int wake_flags = 1; /* XXX WF_SYNC */
-
if (unlikely(!wq_head))
return;
- if (unlikely(nr_exclusive != 1))
- wake_flags = 0;
-
- __wake_up_common_lock(wq_head, mode, nr_exclusive, wake_flags, key);
+ __wake_up_common_lock(wq_head, mode, 1, WF_SYNC, key);
}
EXPORT_SYMBOL_GPL(__wake_up_sync_key);
+/**
+ * __wake_up_locked_sync_key - wake up a thread blocked on a locked waitqueue.
+ * @wq_head: the waitqueue
+ * @mode: which threads
+ * @key: opaque value to be passed to wakeup targets
+ *
+ * The sync wakeup differs in that the waker knows that it will schedule
+ * away soon, so while the target thread will be woken up, it will not
+ * be migrated to another CPU - ie. the two threads are 'synchronized'
+ * with each other. This can prevent needless bouncing between CPUs.
+ *
+ * On UP it can prevent extra preemption.
+ *
+ * If this function wakes up a task, it executes a full memory barrier before
+ * accessing the task state.
+ */
+void __wake_up_locked_sync_key(struct wait_queue_head *wq_head,
+ unsigned int mode, void *key)
+{
+ __wake_up_common(wq_head, mode, 1, WF_SYNC, key, NULL);
+}
+EXPORT_SYMBOL_GPL(__wake_up_locked_sync_key);
+
/*
* __wake_up_sync - see __wake_up_sync_key()
*/
-void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode, int nr_exclusive)
+void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode)
{
- __wake_up_sync_key(wq_head, mode, nr_exclusive, NULL);
+ __wake_up_sync_key(wq_head, mode, NULL);
}
EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */
diff --git a/kernel/seccomp.c b/kernel/seccomp.c
index dba52a7db5e8..12d2227e5786 100644
--- a/kernel/seccomp.c
+++ b/kernel/seccomp.c
@@ -75,6 +75,7 @@ struct seccomp_knotif {
/* The return values, only valid when in SECCOMP_NOTIFY_REPLIED */
int error;
long val;
+ u32 flags;
/* Signals when this has entered SECCOMP_NOTIFY_REPLIED */
struct completion ready;
@@ -732,11 +733,12 @@ static u64 seccomp_next_notify_id(struct seccomp_filter *filter)
return filter->notif->next_id++;
}
-static void seccomp_do_user_notification(int this_syscall,
- struct seccomp_filter *match,
- const struct seccomp_data *sd)
+static int seccomp_do_user_notification(int this_syscall,
+ struct seccomp_filter *match,
+ const struct seccomp_data *sd)
{
int err;
+ u32 flags = 0;
long ret = 0;
struct seccomp_knotif n = {};
@@ -764,6 +766,7 @@ static void seccomp_do_user_notification(int this_syscall,
if (err == 0) {
ret = n.val;
err = n.error;
+ flags = n.flags;
}
/*
@@ -780,8 +783,14 @@ static void seccomp_do_user_notification(int this_syscall,
list_del(&n.list);
out:
mutex_unlock(&match->notify_lock);
+
+ /* Userspace requests to continue the syscall. */
+ if (flags & SECCOMP_USER_NOTIF_FLAG_CONTINUE)
+ return 0;
+
syscall_set_return_value(current, task_pt_regs(current),
err, ret);
+ return -1;
}
static int __seccomp_filter(int this_syscall, const struct seccomp_data *sd,
@@ -867,8 +876,10 @@ static int __seccomp_filter(int this_syscall, const struct seccomp_data *sd,
return 0;
case SECCOMP_RET_USER_NOTIF:
- seccomp_do_user_notification(this_syscall, match, sd);
- goto skip;
+ if (seccomp_do_user_notification(this_syscall, match, sd))
+ goto skip;
+
+ return 0;
case SECCOMP_RET_LOG:
seccomp_log(this_syscall, 0, action, true);
@@ -1087,7 +1098,11 @@ static long seccomp_notify_send(struct seccomp_filter *filter,
if (copy_from_user(&resp, buf, sizeof(resp)))
return -EFAULT;
- if (resp.flags)
+ if (resp.flags & ~SECCOMP_USER_NOTIF_FLAG_CONTINUE)
+ return -EINVAL;
+
+ if ((resp.flags & SECCOMP_USER_NOTIF_FLAG_CONTINUE) &&
+ (resp.error || resp.val))
return -EINVAL;
ret = mutex_lock_interruptible(&filter->notify_lock);
@@ -1116,6 +1131,7 @@ static long seccomp_notify_send(struct seccomp_filter *filter,
knotif->state = SECCOMP_NOTIFY_REPLIED;
knotif->error = resp.error;
knotif->val = resp.val;
+ knotif->flags = resp.flags;
complete(&knotif->ready);
out:
mutex_unlock(&filter->notify_lock);
diff --git a/kernel/sys.c b/kernel/sys.c
index a611d1d58c7d..d3aef31e24dc 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -1763,8 +1763,8 @@ void getrusage(struct task_struct *p, int who, struct rusage *r)
unlock_task_sighand(p, &flags);
out:
- r->ru_utime = ns_to_timeval(utime);
- r->ru_stime = ns_to_timeval(stime);
+ r->ru_utime = ns_to_kernel_old_timeval(utime);
+ r->ru_stime = ns_to_kernel_old_timeval(stime);
if (who != RUSAGE_CHILDREN) {
struct mm_struct *mm = get_task_mm(p);
diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c
index 34b76895b81e..3b69a560a7ac 100644
--- a/kernel/sys_ni.c
+++ b/kernel/sys_ni.c
@@ -410,6 +410,29 @@ COND_SYSCALL(send);
COND_SYSCALL(bdflush);
COND_SYSCALL(uselib);
+/* optional: time32 */
+COND_SYSCALL(time32);
+COND_SYSCALL(stime32);
+COND_SYSCALL(utime32);
+COND_SYSCALL(adjtimex_time32);
+COND_SYSCALL(sched_rr_get_interval_time32);
+COND_SYSCALL(nanosleep_time32);
+COND_SYSCALL(rt_sigtimedwait_time32);
+COND_SYSCALL_COMPAT(rt_sigtimedwait_time32);
+COND_SYSCALL(timer_settime32);
+COND_SYSCALL(timer_gettime32);
+COND_SYSCALL(clock_settime32);
+COND_SYSCALL(clock_gettime32);
+COND_SYSCALL(clock_getres_time32);
+COND_SYSCALL(clock_nanosleep_time32);
+COND_SYSCALL(utimes_time32);
+COND_SYSCALL(futimesat_time32);
+COND_SYSCALL(pselect6_time32);
+COND_SYSCALL_COMPAT(pselect6_time32);
+COND_SYSCALL(ppoll_time32);
+COND_SYSCALL_COMPAT(ppoll_time32);
+COND_SYSCALL(utimensat_time32);
+COND_SYSCALL(clock_adjtime32);
/*
* The syscalls below are not found in include/uapi/asm-generic/unistd.h
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index b6f2f35d0bcf..70665934d53e 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -1466,7 +1466,7 @@ static struct ctl_table vm_table[] = {
.procname = "drop_caches",
.data = &sysctl_drop_caches,
.maxlen = sizeof(int),
- .mode = 0644,
+ .mode = 0200,
.proc_handler = drop_caches_sysctl_handler,
.extra1 = SYSCTL_ONE,
.extra2 = &four,
diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
index 73c132095a7b..7d550cc76a3b 100644
--- a/kernel/sysctl_binary.c
+++ b/kernel/sysctl_binary.c
@@ -18,1317 +18,12 @@
#include <linux/slab.h>
#include <linux/compat.h>
-#ifdef CONFIG_SYSCTL_SYSCALL
-
-struct bin_table;
-typedef ssize_t bin_convert_t(struct file *file,
- void __user *oldval, size_t oldlen, void __user *newval, size_t newlen);
-
-static bin_convert_t bin_dir;
-static bin_convert_t bin_string;
-static bin_convert_t bin_intvec;
-static bin_convert_t bin_ulongvec;
-static bin_convert_t bin_uuid;
-static bin_convert_t bin_dn_node_address;
-
-#define CTL_DIR bin_dir
-#define CTL_STR bin_string
-#define CTL_INT bin_intvec
-#define CTL_ULONG bin_ulongvec
-#define CTL_UUID bin_uuid
-#define CTL_DNADR bin_dn_node_address
-
-#define BUFSZ 256
-
-struct bin_table {
- bin_convert_t *convert;
- int ctl_name;
- const char *procname;
- const struct bin_table *child;
-};
-
-static const struct bin_table bin_random_table[] = {
- { CTL_INT, RANDOM_POOLSIZE, "poolsize" },
- { CTL_INT, RANDOM_ENTROPY_COUNT, "entropy_avail" },
- { CTL_INT, RANDOM_READ_THRESH, "read_wakeup_threshold" },
- { CTL_INT, RANDOM_WRITE_THRESH, "write_wakeup_threshold" },
- { CTL_UUID, RANDOM_BOOT_ID, "boot_id" },
- { CTL_UUID, RANDOM_UUID, "uuid" },
- {}
-};
-
-static const struct bin_table bin_pty_table[] = {
- { CTL_INT, PTY_MAX, "max" },
- { CTL_INT, PTY_NR, "nr" },
- {}
-};
-
-static const struct bin_table bin_kern_table[] = {
- { CTL_STR, KERN_OSTYPE, "ostype" },
- { CTL_STR, KERN_OSRELEASE, "osrelease" },
- /* KERN_OSREV not used */
- { CTL_STR, KERN_VERSION, "version" },
- /* KERN_SECUREMASK not used */
- /* KERN_PROF not used */
- { CTL_STR, KERN_NODENAME, "hostname" },
- { CTL_STR, KERN_DOMAINNAME, "domainname" },
-
- { CTL_INT, KERN_PANIC, "panic" },
- { CTL_INT, KERN_REALROOTDEV, "real-root-dev" },
-
- { CTL_STR, KERN_SPARC_REBOOT, "reboot-cmd" },
- { CTL_INT, KERN_CTLALTDEL, "ctrl-alt-del" },
- { CTL_INT, KERN_PRINTK, "printk" },
-
- /* KERN_NAMETRANS not used */
- /* KERN_PPC_HTABRECLAIM not used */
- /* KERN_PPC_ZEROPAGED not used */
- { CTL_INT, KERN_PPC_POWERSAVE_NAP, "powersave-nap" },
-
- { CTL_STR, KERN_MODPROBE, "modprobe" },
- { CTL_INT, KERN_SG_BIG_BUFF, "sg-big-buff" },
- { CTL_INT, KERN_ACCT, "acct" },
- /* KERN_PPC_L2CR "l2cr" no longer used */
-
- /* KERN_RTSIGNR not used */
- /* KERN_RTSIGMAX not used */
-
- { CTL_ULONG, KERN_SHMMAX, "shmmax" },
- { CTL_INT, KERN_MSGMAX, "msgmax" },
- { CTL_INT, KERN_MSGMNB, "msgmnb" },
- /* KERN_MSGPOOL not used*/
- { CTL_INT, KERN_SYSRQ, "sysrq" },
- { CTL_INT, KERN_MAX_THREADS, "threads-max" },
- { CTL_DIR, KERN_RANDOM, "random", bin_random_table },
- { CTL_ULONG, KERN_SHMALL, "shmall" },
- { CTL_INT, KERN_MSGMNI, "msgmni" },
- { CTL_INT, KERN_SEM, "sem" },
- { CTL_INT, KERN_SPARC_STOP_A, "stop-a" },
- { CTL_INT, KERN_SHMMNI, "shmmni" },
-
- { CTL_INT, KERN_OVERFLOWUID, "overflowuid" },
- { CTL_INT, KERN_OVERFLOWGID, "overflowgid" },
-
- { CTL_STR, KERN_HOTPLUG, "hotplug", },
- { CTL_INT, KERN_IEEE_EMULATION_WARNINGS, "ieee_emulation_warnings" },
-
- { CTL_INT, KERN_S390_USER_DEBUG_LOGGING, "userprocess_debug" },
- { CTL_INT, KERN_CORE_USES_PID, "core_uses_pid" },
- /* KERN_TAINTED "tainted" no longer used */
- { CTL_INT, KERN_CADPID, "cad_pid" },
- { CTL_INT, KERN_PIDMAX, "pid_max" },
- { CTL_STR, KERN_CORE_PATTERN, "core_pattern" },
- { CTL_INT, KERN_PANIC_ON_OOPS, "panic_on_oops" },
- { CTL_INT, KERN_HPPA_PWRSW, "soft-power" },
- { CTL_INT, KERN_HPPA_UNALIGNED, "unaligned-trap" },
-
- { CTL_INT, KERN_PRINTK_RATELIMIT, "printk_ratelimit" },
- { CTL_INT, KERN_PRINTK_RATELIMIT_BURST, "printk_ratelimit_burst" },
-
- { CTL_DIR, KERN_PTY, "pty", bin_pty_table },
- { CTL_INT, KERN_NGROUPS_MAX, "ngroups_max" },
- { CTL_INT, KERN_SPARC_SCONS_PWROFF, "scons-poweroff" },
- /* KERN_HZ_TIMER "hz_timer" no longer used */
- { CTL_INT, KERN_UNKNOWN_NMI_PANIC, "unknown_nmi_panic" },
- { CTL_INT, KERN_BOOTLOADER_TYPE, "bootloader_type" },
- { CTL_INT, KERN_RANDOMIZE, "randomize_va_space" },
-
- { CTL_INT, KERN_SPIN_RETRY, "spin_retry" },
- /* KERN_ACPI_VIDEO_FLAGS "acpi_video_flags" no longer used */
- { CTL_INT, KERN_IA64_UNALIGNED, "ignore-unaligned-usertrap" },
- { CTL_INT, KERN_COMPAT_LOG, "compat-log" },
- { CTL_INT, KERN_MAX_LOCK_DEPTH, "max_lock_depth" },
- { CTL_INT, KERN_PANIC_ON_NMI, "panic_on_unrecovered_nmi" },
- { CTL_INT, KERN_PANIC_ON_WARN, "panic_on_warn" },
- { CTL_ULONG, KERN_PANIC_PRINT, "panic_print" },
- {}
-};
-
-static const struct bin_table bin_vm_table[] = {
- { CTL_INT, VM_OVERCOMMIT_MEMORY, "overcommit_memory" },
- { CTL_INT, VM_PAGE_CLUSTER, "page-cluster" },
- { CTL_INT, VM_DIRTY_BACKGROUND, "dirty_background_ratio" },
- { CTL_INT, VM_DIRTY_RATIO, "dirty_ratio" },
- /* VM_DIRTY_WB_CS "dirty_writeback_centisecs" no longer used */
- /* VM_DIRTY_EXPIRE_CS "dirty_expire_centisecs" no longer used */
- /* VM_NR_PDFLUSH_THREADS "nr_pdflush_threads" no longer used */
- { CTL_INT, VM_OVERCOMMIT_RATIO, "overcommit_ratio" },
- /* VM_PAGEBUF unused */
- /* VM_HUGETLB_PAGES "nr_hugepages" no longer used */
- { CTL_INT, VM_SWAPPINESS, "swappiness" },
- { CTL_INT, VM_LOWMEM_RESERVE_RATIO, "lowmem_reserve_ratio" },
- { CTL_INT, VM_MIN_FREE_KBYTES, "min_free_kbytes" },
- { CTL_INT, VM_MAX_MAP_COUNT, "max_map_count" },
- { CTL_INT, VM_LAPTOP_MODE, "laptop_mode" },
- { CTL_INT, VM_BLOCK_DUMP, "block_dump" },
- { CTL_INT, VM_HUGETLB_GROUP, "hugetlb_shm_group" },
- { CTL_INT, VM_VFS_CACHE_PRESSURE, "vfs_cache_pressure" },
- { CTL_INT, VM_LEGACY_VA_LAYOUT, "legacy_va_layout" },
- /* VM_SWAP_TOKEN_TIMEOUT unused */
- { CTL_INT, VM_DROP_PAGECACHE, "drop_caches" },
- { CTL_INT, VM_PERCPU_PAGELIST_FRACTION, "percpu_pagelist_fraction" },
- { CTL_INT, VM_ZONE_RECLAIM_MODE, "zone_reclaim_mode" },
- { CTL_INT, VM_MIN_UNMAPPED, "min_unmapped_ratio" },
- { CTL_INT, VM_PANIC_ON_OOM, "panic_on_oom" },
- { CTL_INT, VM_VDSO_ENABLED, "vdso_enabled" },
- { CTL_INT, VM_MIN_SLAB, "min_slab_ratio" },
-
- {}
-};
-
-static const struct bin_table bin_net_core_table[] = {
- { CTL_INT, NET_CORE_WMEM_MAX, "wmem_max" },
- { CTL_INT, NET_CORE_RMEM_MAX, "rmem_max" },
- { CTL_INT, NET_CORE_WMEM_DEFAULT, "wmem_default" },
- { CTL_INT, NET_CORE_RMEM_DEFAULT, "rmem_default" },
- /* NET_CORE_DESTROY_DELAY unused */
- { CTL_INT, NET_CORE_MAX_BACKLOG, "netdev_max_backlog" },
- /* NET_CORE_FASTROUTE unused */
- { CTL_INT, NET_CORE_MSG_COST, "message_cost" },
- { CTL_INT, NET_CORE_MSG_BURST, "message_burst" },
- { CTL_INT, NET_CORE_OPTMEM_MAX, "optmem_max" },
- /* NET_CORE_HOT_LIST_LENGTH unused */
- /* NET_CORE_DIVERT_VERSION unused */
- /* NET_CORE_NO_CONG_THRESH unused */
- /* NET_CORE_NO_CONG unused */
- /* NET_CORE_LO_CONG unused */
- /* NET_CORE_MOD_CONG unused */
- { CTL_INT, NET_CORE_DEV_WEIGHT, "dev_weight" },
- { CTL_INT, NET_CORE_SOMAXCONN, "somaxconn" },
- { CTL_INT, NET_CORE_BUDGET, "netdev_budget" },
- { CTL_INT, NET_CORE_AEVENT_ETIME, "xfrm_aevent_etime" },
- { CTL_INT, NET_CORE_AEVENT_RSEQTH, "xfrm_aevent_rseqth" },
- { CTL_INT, NET_CORE_WARNINGS, "warnings" },
- {},
-};
-
-static const struct bin_table bin_net_unix_table[] = {
- /* NET_UNIX_DESTROY_DELAY unused */
- /* NET_UNIX_DELETE_DELAY unused */
- { CTL_INT, NET_UNIX_MAX_DGRAM_QLEN, "max_dgram_qlen" },
- {}
-};
-
-static const struct bin_table bin_net_ipv4_route_table[] = {
- { CTL_INT, NET_IPV4_ROUTE_FLUSH, "flush" },
- /* NET_IPV4_ROUTE_MIN_DELAY "min_delay" no longer used */
- /* NET_IPV4_ROUTE_MAX_DELAY "max_delay" no longer used */
- { CTL_INT, NET_IPV4_ROUTE_GC_THRESH, "gc_thresh" },
- { CTL_INT, NET_IPV4_ROUTE_MAX_SIZE, "max_size" },
- { CTL_INT, NET_IPV4_ROUTE_GC_MIN_INTERVAL, "gc_min_interval" },
- { CTL_INT, NET_IPV4_ROUTE_GC_MIN_INTERVAL_MS, "gc_min_interval_ms" },
- { CTL_INT, NET_IPV4_ROUTE_GC_TIMEOUT, "gc_timeout" },
- /* NET_IPV4_ROUTE_GC_INTERVAL "gc_interval" no longer used */
- { CTL_INT, NET_IPV4_ROUTE_REDIRECT_LOAD, "redirect_load" },
- { CTL_INT, NET_IPV4_ROUTE_REDIRECT_NUMBER, "redirect_number" },
- { CTL_INT, NET_IPV4_ROUTE_REDIRECT_SILENCE, "redirect_silence" },
- { CTL_INT, NET_IPV4_ROUTE_ERROR_COST, "error_cost" },
- { CTL_INT, NET_IPV4_ROUTE_ERROR_BURST, "error_burst" },
- { CTL_INT, NET_IPV4_ROUTE_GC_ELASTICITY, "gc_elasticity" },
- { CTL_INT, NET_IPV4_ROUTE_MTU_EXPIRES, "mtu_expires" },
- { CTL_INT, NET_IPV4_ROUTE_MIN_PMTU, "min_pmtu" },
- { CTL_INT, NET_IPV4_ROUTE_MIN_ADVMSS, "min_adv_mss" },
- {}
-};
-
-static const struct bin_table bin_net_ipv4_conf_vars_table[] = {
- { CTL_INT, NET_IPV4_CONF_FORWARDING, "forwarding" },
- { CTL_INT, NET_IPV4_CONF_MC_FORWARDING, "mc_forwarding" },
-
- { CTL_INT, NET_IPV4_CONF_ACCEPT_REDIRECTS, "accept_redirects" },
- { CTL_INT, NET_IPV4_CONF_SECURE_REDIRECTS, "secure_redirects" },
- { CTL_INT, NET_IPV4_CONF_SEND_REDIRECTS, "send_redirects" },
- { CTL_INT, NET_IPV4_CONF_SHARED_MEDIA, "shared_media" },
- { CTL_INT, NET_IPV4_CONF_RP_FILTER, "rp_filter" },
- { CTL_INT, NET_IPV4_CONF_ACCEPT_SOURCE_ROUTE, "accept_source_route" },
- { CTL_INT, NET_IPV4_CONF_PROXY_ARP, "proxy_arp" },
- { CTL_INT, NET_IPV4_CONF_MEDIUM_ID, "medium_id" },
- { CTL_INT, NET_IPV4_CONF_BOOTP_RELAY, "bootp_relay" },
- { CTL_INT, NET_IPV4_CONF_LOG_MARTIANS, "log_martians" },
- { CTL_INT, NET_IPV4_CONF_TAG, "tag" },
- { CTL_INT, NET_IPV4_CONF_ARPFILTER, "arp_filter" },
- { CTL_INT, NET_IPV4_CONF_ARP_ANNOUNCE, "arp_announce" },
- { CTL_INT, NET_IPV4_CONF_ARP_IGNORE, "arp_ignore" },
- { CTL_INT, NET_IPV4_CONF_ARP_ACCEPT, "arp_accept" },
- { CTL_INT, NET_IPV4_CONF_ARP_NOTIFY, "arp_notify" },
-
- { CTL_INT, NET_IPV4_CONF_NOXFRM, "disable_xfrm" },
- { CTL_INT, NET_IPV4_CONF_NOPOLICY, "disable_policy" },
- { CTL_INT, NET_IPV4_CONF_FORCE_IGMP_VERSION, "force_igmp_version" },
- { CTL_INT, NET_IPV4_CONF_PROMOTE_SECONDARIES, "promote_secondaries" },
- {}
-};
-
-static const struct bin_table bin_net_ipv4_conf_table[] = {
- { CTL_DIR, NET_PROTO_CONF_ALL, "all", bin_net_ipv4_conf_vars_table },
- { CTL_DIR, NET_PROTO_CONF_DEFAULT, "default", bin_net_ipv4_conf_vars_table },
- { CTL_DIR, 0, NULL, bin_net_ipv4_conf_vars_table },
- {}
-};
-
-static const struct bin_table bin_net_neigh_vars_table[] = {
- { CTL_INT, NET_NEIGH_MCAST_SOLICIT, "mcast_solicit" },
- { CTL_INT, NET_NEIGH_UCAST_SOLICIT, "ucast_solicit" },
- { CTL_INT, NET_NEIGH_APP_SOLICIT, "app_solicit" },
- /* NET_NEIGH_RETRANS_TIME "retrans_time" no longer used */
- { CTL_INT, NET_NEIGH_REACHABLE_TIME, "base_reachable_time" },
- { CTL_INT, NET_NEIGH_DELAY_PROBE_TIME, "delay_first_probe_time" },
- { CTL_INT, NET_NEIGH_GC_STALE_TIME, "gc_stale_time" },
- { CTL_INT, NET_NEIGH_UNRES_QLEN, "unres_qlen" },
- { CTL_INT, NET_NEIGH_PROXY_QLEN, "proxy_qlen" },
- /* NET_NEIGH_ANYCAST_DELAY "anycast_delay" no longer used */
- /* NET_NEIGH_PROXY_DELAY "proxy_delay" no longer used */
- /* NET_NEIGH_LOCKTIME "locktime" no longer used */
- { CTL_INT, NET_NEIGH_GC_INTERVAL, "gc_interval" },
- { CTL_INT, NET_NEIGH_GC_THRESH1, "gc_thresh1" },
- { CTL_INT, NET_NEIGH_GC_THRESH2, "gc_thresh2" },
- { CTL_INT, NET_NEIGH_GC_THRESH3, "gc_thresh3" },
- { CTL_INT, NET_NEIGH_RETRANS_TIME_MS, "retrans_time_ms" },
- { CTL_INT, NET_NEIGH_REACHABLE_TIME_MS, "base_reachable_time_ms" },
- {}
-};
-
-static const struct bin_table bin_net_neigh_table[] = {
- { CTL_DIR, NET_PROTO_CONF_DEFAULT, "default", bin_net_neigh_vars_table },
- { CTL_DIR, 0, NULL, bin_net_neigh_vars_table },
- {}
-};
-
-static const struct bin_table bin_net_ipv4_netfilter_table[] = {
- { CTL_INT, NET_IPV4_NF_CONNTRACK_MAX, "ip_conntrack_max" },
-
- /* NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_SYN_SENT "ip_conntrack_tcp_timeout_syn_sent" no longer used */
- /* NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_SYN_RECV "ip_conntrack_tcp_timeout_syn_recv" no longer used */
- /* NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_ESTABLISHED "ip_conntrack_tcp_timeout_established" no longer used */
- /* NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_FIN_WAIT "ip_conntrack_tcp_timeout_fin_wait" no longer used */
- /* NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_CLOSE_WAIT "ip_conntrack_tcp_timeout_close_wait" no longer used */
- /* NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_LAST_ACK "ip_conntrack_tcp_timeout_last_ack" no longer used */
- /* NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_TIME_WAIT "ip_conntrack_tcp_timeout_time_wait" no longer used */
- /* NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_CLOSE "ip_conntrack_tcp_timeout_close" no longer used */
-
- /* NET_IPV4_NF_CONNTRACK_UDP_TIMEOUT "ip_conntrack_udp_timeout" no longer used */
- /* NET_IPV4_NF_CONNTRACK_UDP_TIMEOUT_STREAM "ip_conntrack_udp_timeout_stream" no longer used */
- /* NET_IPV4_NF_CONNTRACK_ICMP_TIMEOUT "ip_conntrack_icmp_timeout" no longer used */
- /* NET_IPV4_NF_CONNTRACK_GENERIC_TIMEOUT "ip_conntrack_generic_timeout" no longer used */
-
- { CTL_INT, NET_IPV4_NF_CONNTRACK_BUCKETS, "ip_conntrack_buckets" },
- { CTL_INT, NET_IPV4_NF_CONNTRACK_LOG_INVALID, "ip_conntrack_log_invalid" },
- /* NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_MAX_RETRANS "ip_conntrack_tcp_timeout_max_retrans" no longer used */
- { CTL_INT, NET_IPV4_NF_CONNTRACK_TCP_LOOSE, "ip_conntrack_tcp_loose" },
- { CTL_INT, NET_IPV4_NF_CONNTRACK_TCP_BE_LIBERAL, "ip_conntrack_tcp_be_liberal" },
- { CTL_INT, NET_IPV4_NF_CONNTRACK_TCP_MAX_RETRANS, "ip_conntrack_tcp_max_retrans" },
-
- /* NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_CLOSED "ip_conntrack_sctp_timeout_closed" no longer used */
- /* NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_COOKIE_WAIT "ip_conntrack_sctp_timeout_cookie_wait" no longer used */
- /* NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_COOKIE_ECHOED "ip_conntrack_sctp_timeout_cookie_echoed" no longer used */
- /* NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_ESTABLISHED "ip_conntrack_sctp_timeout_established" no longer used */
- /* NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_SENT "ip_conntrack_sctp_timeout_shutdown_sent" no longer used */
- /* NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_RECD "ip_conntrack_sctp_timeout_shutdown_recd" no longer used */
- /* NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_ACK_SENT "ip_conntrack_sctp_timeout_shutdown_ack_sent" no longer used */
-
- { CTL_INT, NET_IPV4_NF_CONNTRACK_COUNT, "ip_conntrack_count" },
- { CTL_INT, NET_IPV4_NF_CONNTRACK_CHECKSUM, "ip_conntrack_checksum" },
- {}
-};
-
-static const struct bin_table bin_net_ipv4_table[] = {
- {CTL_INT, NET_IPV4_FORWARD, "ip_forward" },
-
- { CTL_DIR, NET_IPV4_CONF, "conf", bin_net_ipv4_conf_table },
- { CTL_DIR, NET_IPV4_NEIGH, "neigh", bin_net_neigh_table },
- { CTL_DIR, NET_IPV4_ROUTE, "route", bin_net_ipv4_route_table },
- /* NET_IPV4_FIB_HASH unused */
- { CTL_DIR, NET_IPV4_NETFILTER, "netfilter", bin_net_ipv4_netfilter_table },
-
- { CTL_INT, NET_IPV4_TCP_TIMESTAMPS, "tcp_timestamps" },
- { CTL_INT, NET_IPV4_TCP_WINDOW_SCALING, "tcp_window_scaling" },
- { CTL_INT, NET_IPV4_TCP_SACK, "tcp_sack" },
- { CTL_INT, NET_IPV4_TCP_RETRANS_COLLAPSE, "tcp_retrans_collapse" },
- { CTL_INT, NET_IPV4_DEFAULT_TTL, "ip_default_ttl" },
- /* NET_IPV4_AUTOCONFIG unused */
- { CTL_INT, NET_IPV4_NO_PMTU_DISC, "ip_no_pmtu_disc" },
- { CTL_INT, NET_IPV4_NONLOCAL_BIND, "ip_nonlocal_bind" },
- { CTL_INT, NET_IPV4_TCP_SYN_RETRIES, "tcp_syn_retries" },
- { CTL_INT, NET_TCP_SYNACK_RETRIES, "tcp_synack_retries" },
- { CTL_INT, NET_TCP_MAX_ORPHANS, "tcp_max_orphans" },
- { CTL_INT, NET_TCP_MAX_TW_BUCKETS, "tcp_max_tw_buckets" },
- { CTL_INT, NET_IPV4_DYNADDR, "ip_dynaddr" },
- { CTL_INT, NET_IPV4_TCP_KEEPALIVE_TIME, "tcp_keepalive_time" },
- { CTL_INT, NET_IPV4_TCP_KEEPALIVE_PROBES, "tcp_keepalive_probes" },
- { CTL_INT, NET_IPV4_TCP_KEEPALIVE_INTVL, "tcp_keepalive_intvl" },
- { CTL_INT, NET_IPV4_TCP_RETRIES1, "tcp_retries1" },
- { CTL_INT, NET_IPV4_TCP_RETRIES2, "tcp_retries2" },
- { CTL_INT, NET_IPV4_TCP_FIN_TIMEOUT, "tcp_fin_timeout" },
- { CTL_INT, NET_TCP_SYNCOOKIES, "tcp_syncookies" },
- { CTL_INT, NET_TCP_TW_RECYCLE, "tcp_tw_recycle" },
- { CTL_INT, NET_TCP_ABORT_ON_OVERFLOW, "tcp_abort_on_overflow" },
- { CTL_INT, NET_TCP_STDURG, "tcp_stdurg" },
- { CTL_INT, NET_TCP_RFC1337, "tcp_rfc1337" },
- { CTL_INT, NET_TCP_MAX_SYN_BACKLOG, "tcp_max_syn_backlog" },
- { CTL_INT, NET_IPV4_LOCAL_PORT_RANGE, "ip_local_port_range" },
- { CTL_INT, NET_IPV4_IGMP_MAX_MEMBERSHIPS, "igmp_max_memberships" },
- { CTL_INT, NET_IPV4_IGMP_MAX_MSF, "igmp_max_msf" },
- { CTL_INT, NET_IPV4_INET_PEER_THRESHOLD, "inet_peer_threshold" },
- { CTL_INT, NET_IPV4_INET_PEER_MINTTL, "inet_peer_minttl" },
- { CTL_INT, NET_IPV4_INET_PEER_MAXTTL, "inet_peer_maxttl" },
- { CTL_INT, NET_IPV4_INET_PEER_GC_MINTIME, "inet_peer_gc_mintime" },
- { CTL_INT, NET_IPV4_INET_PEER_GC_MAXTIME, "inet_peer_gc_maxtime" },
- { CTL_INT, NET_TCP_ORPHAN_RETRIES, "tcp_orphan_retries" },
- { CTL_INT, NET_TCP_FACK, "tcp_fack" },
- { CTL_INT, NET_TCP_REORDERING, "tcp_reordering" },
- { CTL_INT, NET_TCP_ECN, "tcp_ecn" },
- { CTL_INT, NET_TCP_DSACK, "tcp_dsack" },
- { CTL_INT, NET_TCP_MEM, "tcp_mem" },
- { CTL_INT, NET_TCP_WMEM, "tcp_wmem" },
- { CTL_INT, NET_TCP_RMEM, "tcp_rmem" },
- { CTL_INT, NET_TCP_APP_WIN, "tcp_app_win" },
- { CTL_INT, NET_TCP_ADV_WIN_SCALE, "tcp_adv_win_scale" },
- { CTL_INT, NET_TCP_TW_REUSE, "tcp_tw_reuse" },
- { CTL_INT, NET_TCP_FRTO, "tcp_frto" },
- { CTL_INT, NET_TCP_FRTO_RESPONSE, "tcp_frto_response" },
- { CTL_INT, NET_TCP_LOW_LATENCY, "tcp_low_latency" },
- { CTL_INT, NET_TCP_NO_METRICS_SAVE, "tcp_no_metrics_save" },
- { CTL_INT, NET_TCP_MODERATE_RCVBUF, "tcp_moderate_rcvbuf" },
- { CTL_INT, NET_TCP_TSO_WIN_DIVISOR, "tcp_tso_win_divisor" },
- { CTL_STR, NET_TCP_CONG_CONTROL, "tcp_congestion_control" },
- { CTL_INT, NET_TCP_MTU_PROBING, "tcp_mtu_probing" },
- { CTL_INT, NET_TCP_BASE_MSS, "tcp_base_mss" },
- { CTL_INT, NET_IPV4_TCP_WORKAROUND_SIGNED_WINDOWS, "tcp_workaround_signed_windows" },
- { CTL_INT, NET_TCP_SLOW_START_AFTER_IDLE, "tcp_slow_start_after_idle" },
- { CTL_INT, NET_CIPSOV4_CACHE_ENABLE, "cipso_cache_enable" },
- { CTL_INT, NET_CIPSOV4_CACHE_BUCKET_SIZE, "cipso_cache_bucket_size" },
- { CTL_INT, NET_CIPSOV4_RBM_OPTFMT, "cipso_rbm_optfmt" },
- { CTL_INT, NET_CIPSOV4_RBM_STRICTVALID, "cipso_rbm_strictvalid" },
- /* NET_TCP_AVAIL_CONG_CONTROL "tcp_available_congestion_control" no longer used */
- { CTL_STR, NET_TCP_ALLOWED_CONG_CONTROL, "tcp_allowed_congestion_control" },
- { CTL_INT, NET_TCP_MAX_SSTHRESH, "tcp_max_ssthresh" },
-
- { CTL_INT, NET_IPV4_ICMP_ECHO_IGNORE_ALL, "icmp_echo_ignore_all" },
- { CTL_INT, NET_IPV4_ICMP_ECHO_IGNORE_BROADCASTS, "icmp_echo_ignore_broadcasts" },
- { CTL_INT, NET_IPV4_ICMP_IGNORE_BOGUS_ERROR_RESPONSES, "icmp_ignore_bogus_error_responses" },
- { CTL_INT, NET_IPV4_ICMP_ERRORS_USE_INBOUND_IFADDR, "icmp_errors_use_inbound_ifaddr" },
- { CTL_INT, NET_IPV4_ICMP_RATELIMIT, "icmp_ratelimit" },
- { CTL_INT, NET_IPV4_ICMP_RATEMASK, "icmp_ratemask" },
-
- { CTL_INT, NET_IPV4_IPFRAG_HIGH_THRESH, "ipfrag_high_thresh" },
- { CTL_INT, NET_IPV4_IPFRAG_LOW_THRESH, "ipfrag_low_thresh" },
- { CTL_INT, NET_IPV4_IPFRAG_TIME, "ipfrag_time" },
-
- { CTL_INT, NET_IPV4_IPFRAG_SECRET_INTERVAL, "ipfrag_secret_interval" },
- /* NET_IPV4_IPFRAG_MAX_DIST "ipfrag_max_dist" no longer used */
-
- { CTL_INT, 2088 /* NET_IPQ_QMAX */, "ip_queue_maxlen" },
-
- /* NET_TCP_DEFAULT_WIN_SCALE unused */
- /* NET_TCP_BIC_BETA unused */
- /* NET_IPV4_TCP_MAX_KA_PROBES unused */
- /* NET_IPV4_IP_MASQ_DEBUG unused */
- /* NET_TCP_SYN_TAILDROP unused */
- /* NET_IPV4_ICMP_SOURCEQUENCH_RATE unused */
- /* NET_IPV4_ICMP_DESTUNREACH_RATE unused */
- /* NET_IPV4_ICMP_TIMEEXCEED_RATE unused */
- /* NET_IPV4_ICMP_PARAMPROB_RATE unused */
- /* NET_IPV4_ICMP_ECHOREPLY_RATE unused */
- /* NET_IPV4_ALWAYS_DEFRAG unused */
- {}
-};
-
-static const struct bin_table bin_net_ipx_table[] = {
- { CTL_INT, NET_IPX_PPROP_BROADCASTING, "ipx_pprop_broadcasting" },
- /* NET_IPX_FORWARDING unused */
- {}
-};
-
-static const struct bin_table bin_net_atalk_table[] = {
- { CTL_INT, NET_ATALK_AARP_EXPIRY_TIME, "aarp-expiry-time" },
- { CTL_INT, NET_ATALK_AARP_TICK_TIME, "aarp-tick-time" },
- { CTL_INT, NET_ATALK_AARP_RETRANSMIT_LIMIT, "aarp-retransmit-limit" },
- { CTL_INT, NET_ATALK_AARP_RESOLVE_TIME, "aarp-resolve-time" },
- {},
-};
-
-static const struct bin_table bin_net_netrom_table[] = {
- { CTL_INT, NET_NETROM_DEFAULT_PATH_QUALITY, "default_path_quality" },
- { CTL_INT, NET_NETROM_OBSOLESCENCE_COUNT_INITIALISER, "obsolescence_count_initialiser" },
- { CTL_INT, NET_NETROM_NETWORK_TTL_INITIALISER, "network_ttl_initialiser" },
- { CTL_INT, NET_NETROM_TRANSPORT_TIMEOUT, "transport_timeout" },
- { CTL_INT, NET_NETROM_TRANSPORT_MAXIMUM_TRIES, "transport_maximum_tries" },
- { CTL_INT, NET_NETROM_TRANSPORT_ACKNOWLEDGE_DELAY, "transport_acknowledge_delay" },
- { CTL_INT, NET_NETROM_TRANSPORT_BUSY_DELAY, "transport_busy_delay" },
- { CTL_INT, NET_NETROM_TRANSPORT_REQUESTED_WINDOW_SIZE, "transport_requested_window_size" },
- { CTL_INT, NET_NETROM_TRANSPORT_NO_ACTIVITY_TIMEOUT, "transport_no_activity_timeout" },
- { CTL_INT, NET_NETROM_ROUTING_CONTROL, "routing_control" },
- { CTL_INT, NET_NETROM_LINK_FAILS_COUNT, "link_fails_count" },
- { CTL_INT, NET_NETROM_RESET, "reset" },
- {}
-};
-
-static const struct bin_table bin_net_ax25_param_table[] = {
- { CTL_INT, NET_AX25_IP_DEFAULT_MODE, "ip_default_mode" },
- { CTL_INT, NET_AX25_DEFAULT_MODE, "ax25_default_mode" },
- { CTL_INT, NET_AX25_BACKOFF_TYPE, "backoff_type" },
- { CTL_INT, NET_AX25_CONNECT_MODE, "connect_mode" },
- { CTL_INT, NET_AX25_STANDARD_WINDOW, "standard_window_size" },
- { CTL_INT, NET_AX25_EXTENDED_WINDOW, "extended_window_size" },
- { CTL_INT, NET_AX25_T1_TIMEOUT, "t1_timeout" },
- { CTL_INT, NET_AX25_T2_TIMEOUT, "t2_timeout" },
- { CTL_INT, NET_AX25_T3_TIMEOUT, "t3_timeout" },
- { CTL_INT, NET_AX25_IDLE_TIMEOUT, "idle_timeout" },
- { CTL_INT, NET_AX25_N2, "maximum_retry_count" },
- { CTL_INT, NET_AX25_PACLEN, "maximum_packet_length" },
- { CTL_INT, NET_AX25_PROTOCOL, "protocol" },
- { CTL_INT, NET_AX25_DAMA_SLAVE_TIMEOUT, "dama_slave_timeout" },
- {}
-};
-
-static const struct bin_table bin_net_ax25_table[] = {
- { CTL_DIR, 0, NULL, bin_net_ax25_param_table },
- {}
-};
-
-static const struct bin_table bin_net_rose_table[] = {
- { CTL_INT, NET_ROSE_RESTART_REQUEST_TIMEOUT, "restart_request_timeout" },
- { CTL_INT, NET_ROSE_CALL_REQUEST_TIMEOUT, "call_request_timeout" },
- { CTL_INT, NET_ROSE_RESET_REQUEST_TIMEOUT, "reset_request_timeout" },
- { CTL_INT, NET_ROSE_CLEAR_REQUEST_TIMEOUT, "clear_request_timeout" },
- { CTL_INT, NET_ROSE_ACK_HOLD_BACK_TIMEOUT, "acknowledge_hold_back_timeout" },
- { CTL_INT, NET_ROSE_ROUTING_CONTROL, "routing_control" },
- { CTL_INT, NET_ROSE_LINK_FAIL_TIMEOUT, "link_fail_timeout" },
- { CTL_INT, NET_ROSE_MAX_VCS, "maximum_virtual_circuits" },
- { CTL_INT, NET_ROSE_WINDOW_SIZE, "window_size" },
- { CTL_INT, NET_ROSE_NO_ACTIVITY_TIMEOUT, "no_activity_timeout" },
- {}
-};
-
-static const struct bin_table bin_net_ipv6_conf_var_table[] = {
- { CTL_INT, NET_IPV6_FORWARDING, "forwarding" },
- { CTL_INT, NET_IPV6_HOP_LIMIT, "hop_limit" },
- { CTL_INT, NET_IPV6_MTU, "mtu" },
- { CTL_INT, NET_IPV6_ACCEPT_RA, "accept_ra" },
- { CTL_INT, NET_IPV6_ACCEPT_REDIRECTS, "accept_redirects" },
- { CTL_INT, NET_IPV6_AUTOCONF, "autoconf" },
- { CTL_INT, NET_IPV6_DAD_TRANSMITS, "dad_transmits" },
- { CTL_INT, NET_IPV6_RTR_SOLICITS, "router_solicitations" },
- { CTL_INT, NET_IPV6_RTR_SOLICIT_INTERVAL, "router_solicitation_interval" },
- { CTL_INT, NET_IPV6_RTR_SOLICIT_DELAY, "router_solicitation_delay" },
- { CTL_INT, NET_IPV6_USE_TEMPADDR, "use_tempaddr" },
- { CTL_INT, NET_IPV6_TEMP_VALID_LFT, "temp_valid_lft" },
- { CTL_INT, NET_IPV6_TEMP_PREFERED_LFT, "temp_prefered_lft" },
- { CTL_INT, NET_IPV6_REGEN_MAX_RETRY, "regen_max_retry" },
- { CTL_INT, NET_IPV6_MAX_DESYNC_FACTOR, "max_desync_factor" },
- { CTL_INT, NET_IPV6_MAX_ADDRESSES, "max_addresses" },
- { CTL_INT, NET_IPV6_FORCE_MLD_VERSION, "force_mld_version" },
- { CTL_INT, NET_IPV6_ACCEPT_RA_DEFRTR, "accept_ra_defrtr" },
- { CTL_INT, NET_IPV6_ACCEPT_RA_PINFO, "accept_ra_pinfo" },
- { CTL_INT, NET_IPV6_ACCEPT_RA_RTR_PREF, "accept_ra_rtr_pref" },
- { CTL_INT, NET_IPV6_RTR_PROBE_INTERVAL, "router_probe_interval" },
- { CTL_INT, NET_IPV6_ACCEPT_RA_RT_INFO_MAX_PLEN, "accept_ra_rt_info_max_plen" },
- { CTL_INT, NET_IPV6_PROXY_NDP, "proxy_ndp" },
- { CTL_INT, NET_IPV6_ACCEPT_SOURCE_ROUTE, "accept_source_route" },
- { CTL_INT, NET_IPV6_ACCEPT_RA_FROM_LOCAL, "accept_ra_from_local" },
- {}
-};
-
-static const struct bin_table bin_net_ipv6_conf_table[] = {
- { CTL_DIR, NET_PROTO_CONF_ALL, "all", bin_net_ipv6_conf_var_table },
- { CTL_DIR, NET_PROTO_CONF_DEFAULT, "default", bin_net_ipv6_conf_var_table },
- { CTL_DIR, 0, NULL, bin_net_ipv6_conf_var_table },
- {}
-};
-
-static const struct bin_table bin_net_ipv6_route_table[] = {
- /* NET_IPV6_ROUTE_FLUSH "flush" no longer used */
- { CTL_INT, NET_IPV6_ROUTE_GC_THRESH, "gc_thresh" },
- { CTL_INT, NET_IPV6_ROUTE_MAX_SIZE, "max_size" },
- { CTL_INT, NET_IPV6_ROUTE_GC_MIN_INTERVAL, "gc_min_interval" },
- { CTL_INT, NET_IPV6_ROUTE_GC_TIMEOUT, "gc_timeout" },
- { CTL_INT, NET_IPV6_ROUTE_GC_INTERVAL, "gc_interval" },
- { CTL_INT, NET_IPV6_ROUTE_GC_ELASTICITY, "gc_elasticity" },
- { CTL_INT, NET_IPV6_ROUTE_MTU_EXPIRES, "mtu_expires" },
- { CTL_INT, NET_IPV6_ROUTE_MIN_ADVMSS, "min_adv_mss" },
- { CTL_INT, NET_IPV6_ROUTE_GC_MIN_INTERVAL_MS, "gc_min_interval_ms" },
- {}
-};
-
-static const struct bin_table bin_net_ipv6_icmp_table[] = {
- { CTL_INT, NET_IPV6_ICMP_RATELIMIT, "ratelimit" },
- {}
-};
-
-static const struct bin_table bin_net_ipv6_table[] = {
- { CTL_DIR, NET_IPV6_CONF, "conf", bin_net_ipv6_conf_table },
- { CTL_DIR, NET_IPV6_NEIGH, "neigh", bin_net_neigh_table },
- { CTL_DIR, NET_IPV6_ROUTE, "route", bin_net_ipv6_route_table },
- { CTL_DIR, NET_IPV6_ICMP, "icmp", bin_net_ipv6_icmp_table },
- { CTL_INT, NET_IPV6_BINDV6ONLY, "bindv6only" },
- { CTL_INT, NET_IPV6_IP6FRAG_HIGH_THRESH, "ip6frag_high_thresh" },
- { CTL_INT, NET_IPV6_IP6FRAG_LOW_THRESH, "ip6frag_low_thresh" },
- { CTL_INT, NET_IPV6_IP6FRAG_TIME, "ip6frag_time" },
- { CTL_INT, NET_IPV6_IP6FRAG_SECRET_INTERVAL, "ip6frag_secret_interval" },
- { CTL_INT, NET_IPV6_MLD_MAX_MSF, "mld_max_msf" },
- { CTL_INT, 2088 /* IPQ_QMAX */, "ip6_queue_maxlen" },
- {}
-};
-
-static const struct bin_table bin_net_x25_table[] = {
- { CTL_INT, NET_X25_RESTART_REQUEST_TIMEOUT, "restart_request_timeout" },
- { CTL_INT, NET_X25_CALL_REQUEST_TIMEOUT, "call_request_timeout" },
- { CTL_INT, NET_X25_RESET_REQUEST_TIMEOUT, "reset_request_timeout" },
- { CTL_INT, NET_X25_CLEAR_REQUEST_TIMEOUT, "clear_request_timeout" },
- { CTL_INT, NET_X25_ACK_HOLD_BACK_TIMEOUT, "acknowledgement_hold_back_timeout" },
- { CTL_INT, NET_X25_FORWARD, "x25_forward" },
- {}
-};
-
-static const struct bin_table bin_net_tr_table[] = {
- { CTL_INT, NET_TR_RIF_TIMEOUT, "rif_timeout" },
- {}
-};
-
-
-static const struct bin_table bin_net_decnet_conf_vars[] = {
- { CTL_INT, NET_DECNET_CONF_DEV_FORWARDING, "forwarding" },
- { CTL_INT, NET_DECNET_CONF_DEV_PRIORITY, "priority" },
- { CTL_INT, NET_DECNET_CONF_DEV_T2, "t2" },
- { CTL_INT, NET_DECNET_CONF_DEV_T3, "t3" },
- {}
-};
-
-static const struct bin_table bin_net_decnet_conf[] = {
- { CTL_DIR, NET_DECNET_CONF_ETHER, "ethernet", bin_net_decnet_conf_vars },
- { CTL_DIR, NET_DECNET_CONF_GRE, "ipgre", bin_net_decnet_conf_vars },
- { CTL_DIR, NET_DECNET_CONF_X25, "x25", bin_net_decnet_conf_vars },
- { CTL_DIR, NET_DECNET_CONF_PPP, "ppp", bin_net_decnet_conf_vars },
- { CTL_DIR, NET_DECNET_CONF_DDCMP, "ddcmp", bin_net_decnet_conf_vars },
- { CTL_DIR, NET_DECNET_CONF_LOOPBACK, "loopback", bin_net_decnet_conf_vars },
- { CTL_DIR, 0, NULL, bin_net_decnet_conf_vars },
- {}
-};
-
-static const struct bin_table bin_net_decnet_table[] = {
- { CTL_DIR, NET_DECNET_CONF, "conf", bin_net_decnet_conf },
- { CTL_DNADR, NET_DECNET_NODE_ADDRESS, "node_address" },
- { CTL_STR, NET_DECNET_NODE_NAME, "node_name" },
- { CTL_STR, NET_DECNET_DEFAULT_DEVICE, "default_device" },
- { CTL_INT, NET_DECNET_TIME_WAIT, "time_wait" },
- { CTL_INT, NET_DECNET_DN_COUNT, "dn_count" },
- { CTL_INT, NET_DECNET_DI_COUNT, "di_count" },
- { CTL_INT, NET_DECNET_DR_COUNT, "dr_count" },
- { CTL_INT, NET_DECNET_DST_GC_INTERVAL, "dst_gc_interval" },
- { CTL_INT, NET_DECNET_NO_FC_MAX_CWND, "no_fc_max_cwnd" },
- { CTL_INT, NET_DECNET_MEM, "decnet_mem" },
- { CTL_INT, NET_DECNET_RMEM, "decnet_rmem" },
- { CTL_INT, NET_DECNET_WMEM, "decnet_wmem" },
- { CTL_INT, NET_DECNET_DEBUG_LEVEL, "debug" },
- {}
-};
-
-static const struct bin_table bin_net_sctp_table[] = {
- { CTL_INT, NET_SCTP_RTO_INITIAL, "rto_initial" },
- { CTL_INT, NET_SCTP_RTO_MIN, "rto_min" },
- { CTL_INT, NET_SCTP_RTO_MAX, "rto_max" },
- { CTL_INT, NET_SCTP_RTO_ALPHA, "rto_alpha_exp_divisor" },
- { CTL_INT, NET_SCTP_RTO_BETA, "rto_beta_exp_divisor" },
- { CTL_INT, NET_SCTP_VALID_COOKIE_LIFE, "valid_cookie_life" },
- { CTL_INT, NET_SCTP_ASSOCIATION_MAX_RETRANS, "association_max_retrans" },
- { CTL_INT, NET_SCTP_PATH_MAX_RETRANS, "path_max_retrans" },
- { CTL_INT, NET_SCTP_MAX_INIT_RETRANSMITS, "max_init_retransmits" },
- { CTL_INT, NET_SCTP_HB_INTERVAL, "hb_interval" },
- { CTL_INT, NET_SCTP_PRESERVE_ENABLE, "cookie_preserve_enable" },
- { CTL_INT, NET_SCTP_MAX_BURST, "max_burst" },
- { CTL_INT, NET_SCTP_ADDIP_ENABLE, "addip_enable" },
- { CTL_INT, NET_SCTP_PRSCTP_ENABLE, "prsctp_enable" },
- { CTL_INT, NET_SCTP_SNDBUF_POLICY, "sndbuf_policy" },
- { CTL_INT, NET_SCTP_SACK_TIMEOUT, "sack_timeout" },
- { CTL_INT, NET_SCTP_RCVBUF_POLICY, "rcvbuf_policy" },
- {}
-};
-
-static const struct bin_table bin_net_llc_llc2_timeout_table[] = {
- { CTL_INT, NET_LLC2_ACK_TIMEOUT, "ack" },
- { CTL_INT, NET_LLC2_P_TIMEOUT, "p" },
- { CTL_INT, NET_LLC2_REJ_TIMEOUT, "rej" },
- { CTL_INT, NET_LLC2_BUSY_TIMEOUT, "busy" },
- {}
-};
-
-static const struct bin_table bin_net_llc_station_table[] = {
- { CTL_INT, NET_LLC_STATION_ACK_TIMEOUT, "ack_timeout" },
- {}
-};
-
-static const struct bin_table bin_net_llc_llc2_table[] = {
- { CTL_DIR, NET_LLC2, "timeout", bin_net_llc_llc2_timeout_table },
- {}
-};
-
-static const struct bin_table bin_net_llc_table[] = {
- { CTL_DIR, NET_LLC2, "llc2", bin_net_llc_llc2_table },
- { CTL_DIR, NET_LLC_STATION, "station", bin_net_llc_station_table },
- {}
-};
-
-static const struct bin_table bin_net_netfilter_table[] = {
- { CTL_INT, NET_NF_CONNTRACK_MAX, "nf_conntrack_max" },
- /* NET_NF_CONNTRACK_TCP_TIMEOUT_SYN_SENT "nf_conntrack_tcp_timeout_syn_sent" no longer used */
- /* NET_NF_CONNTRACK_TCP_TIMEOUT_SYN_RECV "nf_conntrack_tcp_timeout_syn_recv" no longer used */
- /* NET_NF_CONNTRACK_TCP_TIMEOUT_ESTABLISHED "nf_conntrack_tcp_timeout_established" no longer used */
- /* NET_NF_CONNTRACK_TCP_TIMEOUT_FIN_WAIT "nf_conntrack_tcp_timeout_fin_wait" no longer used */
- /* NET_NF_CONNTRACK_TCP_TIMEOUT_CLOSE_WAIT "nf_conntrack_tcp_timeout_close_wait" no longer used */
- /* NET_NF_CONNTRACK_TCP_TIMEOUT_LAST_ACK "nf_conntrack_tcp_timeout_last_ack" no longer used */
- /* NET_NF_CONNTRACK_TCP_TIMEOUT_TIME_WAIT "nf_conntrack_tcp_timeout_time_wait" no longer used */
- /* NET_NF_CONNTRACK_TCP_TIMEOUT_CLOSE "nf_conntrack_tcp_timeout_close" no longer used */
- /* NET_NF_CONNTRACK_UDP_TIMEOUT "nf_conntrack_udp_timeout" no longer used */
- /* NET_NF_CONNTRACK_UDP_TIMEOUT_STREAM "nf_conntrack_udp_timeout_stream" no longer used */
- /* NET_NF_CONNTRACK_ICMP_TIMEOUT "nf_conntrack_icmp_timeout" no longer used */
- /* NET_NF_CONNTRACK_GENERIC_TIMEOUT "nf_conntrack_generic_timeout" no longer used */
- { CTL_INT, NET_NF_CONNTRACK_BUCKETS, "nf_conntrack_buckets" },
- { CTL_INT, NET_NF_CONNTRACK_LOG_INVALID, "nf_conntrack_log_invalid" },
- /* NET_NF_CONNTRACK_TCP_TIMEOUT_MAX_RETRANS "nf_conntrack_tcp_timeout_max_retrans" no longer used */
- { CTL_INT, NET_NF_CONNTRACK_TCP_LOOSE, "nf_conntrack_tcp_loose" },
- { CTL_INT, NET_NF_CONNTRACK_TCP_BE_LIBERAL, "nf_conntrack_tcp_be_liberal" },
- { CTL_INT, NET_NF_CONNTRACK_TCP_MAX_RETRANS, "nf_conntrack_tcp_max_retrans" },
- /* NET_NF_CONNTRACK_SCTP_TIMEOUT_CLOSED "nf_conntrack_sctp_timeout_closed" no longer used */
- /* NET_NF_CONNTRACK_SCTP_TIMEOUT_COOKIE_WAIT "nf_conntrack_sctp_timeout_cookie_wait" no longer used */
- /* NET_NF_CONNTRACK_SCTP_TIMEOUT_COOKIE_ECHOED "nf_conntrack_sctp_timeout_cookie_echoed" no longer used */
- /* NET_NF_CONNTRACK_SCTP_TIMEOUT_ESTABLISHED "nf_conntrack_sctp_timeout_established" no longer used */
- /* NET_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_SENT "nf_conntrack_sctp_timeout_shutdown_sent" no longer used */
- /* NET_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_RECD "nf_conntrack_sctp_timeout_shutdown_recd" no longer used */
- /* NET_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_ACK_SENT "nf_conntrack_sctp_timeout_shutdown_ack_sent" no longer used */
- { CTL_INT, NET_NF_CONNTRACK_COUNT, "nf_conntrack_count" },
- /* NET_NF_CONNTRACK_ICMPV6_TIMEOUT "nf_conntrack_icmpv6_timeout" no longer used */
- /* NET_NF_CONNTRACK_FRAG6_TIMEOUT "nf_conntrack_frag6_timeout" no longer used */
- { CTL_INT, NET_NF_CONNTRACK_FRAG6_LOW_THRESH, "nf_conntrack_frag6_low_thresh" },
- { CTL_INT, NET_NF_CONNTRACK_FRAG6_HIGH_THRESH, "nf_conntrack_frag6_high_thresh" },
- { CTL_INT, NET_NF_CONNTRACK_CHECKSUM, "nf_conntrack_checksum" },
-
- {}
-};
-
-static const struct bin_table bin_net_table[] = {
- { CTL_DIR, NET_CORE, "core", bin_net_core_table },
- /* NET_ETHER not used */
- /* NET_802 not used */
- { CTL_DIR, NET_UNIX, "unix", bin_net_unix_table },
- { CTL_DIR, NET_IPV4, "ipv4", bin_net_ipv4_table },
- { CTL_DIR, NET_IPX, "ipx", bin_net_ipx_table },
- { CTL_DIR, NET_ATALK, "appletalk", bin_net_atalk_table },
- { CTL_DIR, NET_NETROM, "netrom", bin_net_netrom_table },
- { CTL_DIR, NET_AX25, "ax25", bin_net_ax25_table },
- /* NET_BRIDGE "bridge" no longer used */
- { CTL_DIR, NET_ROSE, "rose", bin_net_rose_table },
- { CTL_DIR, NET_IPV6, "ipv6", bin_net_ipv6_table },
- { CTL_DIR, NET_X25, "x25", bin_net_x25_table },
- { CTL_DIR, NET_TR, "token-ring", bin_net_tr_table },
- { CTL_DIR, NET_DECNET, "decnet", bin_net_decnet_table },
- /* NET_ECONET not used */
- { CTL_DIR, NET_SCTP, "sctp", bin_net_sctp_table },
- { CTL_DIR, NET_LLC, "llc", bin_net_llc_table },
- { CTL_DIR, NET_NETFILTER, "netfilter", bin_net_netfilter_table },
- /* NET_DCCP "dccp" no longer used */
- /* NET_IRDA "irda" no longer used */
- { CTL_INT, 2089, "nf_conntrack_max" },
- {}
-};
-
-static const struct bin_table bin_fs_quota_table[] = {
- { CTL_INT, FS_DQ_LOOKUPS, "lookups" },
- { CTL_INT, FS_DQ_DROPS, "drops" },
- { CTL_INT, FS_DQ_READS, "reads" },
- { CTL_INT, FS_DQ_WRITES, "writes" },
- { CTL_INT, FS_DQ_CACHE_HITS, "cache_hits" },
- { CTL_INT, FS_DQ_ALLOCATED, "allocated_dquots" },
- { CTL_INT, FS_DQ_FREE, "free_dquots" },
- { CTL_INT, FS_DQ_SYNCS, "syncs" },
- { CTL_INT, FS_DQ_WARNINGS, "warnings" },
- {}
-};
-
-static const struct bin_table bin_fs_xfs_table[] = {
- { CTL_INT, XFS_SGID_INHERIT, "irix_sgid_inherit" },
- { CTL_INT, XFS_SYMLINK_MODE, "irix_symlink_mode" },
- { CTL_INT, XFS_PANIC_MASK, "panic_mask" },
-
- { CTL_INT, XFS_ERRLEVEL, "error_level" },
- { CTL_INT, XFS_SYNCD_TIMER, "xfssyncd_centisecs" },
- { CTL_INT, XFS_INHERIT_SYNC, "inherit_sync" },
- { CTL_INT, XFS_INHERIT_NODUMP, "inherit_nodump" },
- { CTL_INT, XFS_INHERIT_NOATIME, "inherit_noatime" },
- { CTL_INT, XFS_BUF_TIMER, "xfsbufd_centisecs" },
- { CTL_INT, XFS_BUF_AGE, "age_buffer_centisecs" },
- { CTL_INT, XFS_INHERIT_NOSYM, "inherit_nosymlinks" },
- { CTL_INT, XFS_ROTORSTEP, "rotorstep" },
- { CTL_INT, XFS_INHERIT_NODFRG, "inherit_nodefrag" },
- { CTL_INT, XFS_FILESTREAM_TIMER, "filestream_centisecs" },
- { CTL_INT, XFS_STATS_CLEAR, "stats_clear" },
- {}
-};
-
-static const struct bin_table bin_fs_ocfs2_nm_table[] = {
- { CTL_STR, 1, "hb_ctl_path" },
- {}
-};
-
-static const struct bin_table bin_fs_ocfs2_table[] = {
- { CTL_DIR, 1, "nm", bin_fs_ocfs2_nm_table },
- {}
-};
-
-static const struct bin_table bin_inotify_table[] = {
- { CTL_INT, INOTIFY_MAX_USER_INSTANCES, "max_user_instances" },
- { CTL_INT, INOTIFY_MAX_USER_WATCHES, "max_user_watches" },
- { CTL_INT, INOTIFY_MAX_QUEUED_EVENTS, "max_queued_events" },
- {}
-};
-
-static const struct bin_table bin_fs_table[] = {
- { CTL_INT, FS_NRINODE, "inode-nr" },
- { CTL_INT, FS_STATINODE, "inode-state" },
- /* FS_MAXINODE unused */
- /* FS_NRDQUOT unused */
- /* FS_MAXDQUOT unused */
- /* FS_NRFILE "file-nr" no longer used */
- { CTL_INT, FS_MAXFILE, "file-max" },
- { CTL_INT, FS_DENTRY, "dentry-state" },
- /* FS_NRSUPER unused */
- /* FS_MAXUPSER unused */
- { CTL_INT, FS_OVERFLOWUID, "overflowuid" },
- { CTL_INT, FS_OVERFLOWGID, "overflowgid" },
- { CTL_INT, FS_LEASES, "leases-enable" },
- { CTL_INT, FS_DIR_NOTIFY, "dir-notify-enable" },
- { CTL_INT, FS_LEASE_TIME, "lease-break-time" },
- { CTL_DIR, FS_DQSTATS, "quota", bin_fs_quota_table },
- { CTL_DIR, FS_XFS, "xfs", bin_fs_xfs_table },
- { CTL_ULONG, FS_AIO_NR, "aio-nr" },
- { CTL_ULONG, FS_AIO_MAX_NR, "aio-max-nr" },
- { CTL_DIR, FS_INOTIFY, "inotify", bin_inotify_table },
- { CTL_DIR, FS_OCFS2, "ocfs2", bin_fs_ocfs2_table },
- { CTL_INT, KERN_SETUID_DUMPABLE, "suid_dumpable" },
- {}
-};
-
-static const struct bin_table bin_ipmi_table[] = {
- { CTL_INT, DEV_IPMI_POWEROFF_POWERCYCLE, "poweroff_powercycle" },
- {}
-};
-
-static const struct bin_table bin_mac_hid_files[] = {
- /* DEV_MAC_HID_KEYBOARD_SENDS_LINUX_KEYCODES unused */
- /* DEV_MAC_HID_KEYBOARD_LOCK_KEYCODES unused */
- { CTL_INT, DEV_MAC_HID_MOUSE_BUTTON_EMULATION, "mouse_button_emulation" },
- { CTL_INT, DEV_MAC_HID_MOUSE_BUTTON2_KEYCODE, "mouse_button2_keycode" },
- { CTL_INT, DEV_MAC_HID_MOUSE_BUTTON3_KEYCODE, "mouse_button3_keycode" },
- /* DEV_MAC_HID_ADB_MOUSE_SENDS_KEYCODES unused */
- {}
-};
-
-static const struct bin_table bin_raid_table[] = {
- { CTL_INT, DEV_RAID_SPEED_LIMIT_MIN, "speed_limit_min" },
- { CTL_INT, DEV_RAID_SPEED_LIMIT_MAX, "speed_limit_max" },
- {}
-};
-
-static const struct bin_table bin_scsi_table[] = {
- { CTL_INT, DEV_SCSI_LOGGING_LEVEL, "logging_level" },
- {}
-};
-
-static const struct bin_table bin_dev_table[] = {
- /* DEV_CDROM "cdrom" no longer used */
- /* DEV_HWMON unused */
- /* DEV_PARPORT "parport" no longer used */
- { CTL_DIR, DEV_RAID, "raid", bin_raid_table },
- { CTL_DIR, DEV_MAC_HID, "mac_hid", bin_mac_hid_files },
- { CTL_DIR, DEV_SCSI, "scsi", bin_scsi_table },
- { CTL_DIR, DEV_IPMI, "ipmi", bin_ipmi_table },
- {}
-};
-
-static const struct bin_table bin_bus_isa_table[] = {
- { CTL_INT, BUS_ISA_MEM_BASE, "membase" },
- { CTL_INT, BUS_ISA_PORT_BASE, "portbase" },
- { CTL_INT, BUS_ISA_PORT_SHIFT, "portshift" },
- {}
-};
-
-static const struct bin_table bin_bus_table[] = {
- { CTL_DIR, CTL_BUS_ISA, "isa", bin_bus_isa_table },
- {}
-};
-
-
-static const struct bin_table bin_s390dbf_table[] = {
- { CTL_INT, 5678 /* CTL_S390DBF_STOPPABLE */, "debug_stoppable" },
- { CTL_INT, 5679 /* CTL_S390DBF_ACTIVE */, "debug_active" },
- {}
-};
-
-static const struct bin_table bin_sunrpc_table[] = {
- /* CTL_RPCDEBUG "rpc_debug" no longer used */
- /* CTL_NFSDEBUG "nfs_debug" no longer used */
- /* CTL_NFSDDEBUG "nfsd_debug" no longer used */
- /* CTL_NLMDEBUG "nlm_debug" no longer used */
-
- { CTL_INT, CTL_SLOTTABLE_UDP, "udp_slot_table_entries" },
- { CTL_INT, CTL_SLOTTABLE_TCP, "tcp_slot_table_entries" },
- { CTL_INT, CTL_MIN_RESVPORT, "min_resvport" },
- { CTL_INT, CTL_MAX_RESVPORT, "max_resvport" },
- {}
-};
-
-static const struct bin_table bin_pm_table[] = {
- /* frv specific */
- /* 1 == CTL_PM_SUSPEND "suspend" no longer used" */
- { CTL_INT, 2 /* CTL_PM_CMODE */, "cmode" },
- { CTL_INT, 3 /* CTL_PM_P0 */, "p0" },
- { CTL_INT, 4 /* CTL_PM_CM */, "cm" },
- {}
-};
-
-static const struct bin_table bin_root_table[] = {
- { CTL_DIR, CTL_KERN, "kernel", bin_kern_table },
- { CTL_DIR, CTL_VM, "vm", bin_vm_table },
- { CTL_DIR, CTL_NET, "net", bin_net_table },
- /* CTL_PROC not used */
- { CTL_DIR, CTL_FS, "fs", bin_fs_table },
- /* CTL_DEBUG "debug" no longer used */
- { CTL_DIR, CTL_DEV, "dev", bin_dev_table },
- { CTL_DIR, CTL_BUS, "bus", bin_bus_table },
- { CTL_DIR, CTL_ABI, "abi" },
- /* CTL_CPU not used */
- /* CTL_ARLAN "arlan" no longer used */
- { CTL_DIR, CTL_S390DBF, "s390dbf", bin_s390dbf_table },
- { CTL_DIR, CTL_SUNRPC, "sunrpc", bin_sunrpc_table },
- { CTL_DIR, CTL_PM, "pm", bin_pm_table },
- {}
-};
-
-static ssize_t bin_dir(struct file *file,
- void __user *oldval, size_t oldlen, void __user *newval, size_t newlen)
-{
- return -ENOTDIR;
-}
-
-
-static ssize_t bin_string(struct file *file,
- void __user *oldval, size_t oldlen, void __user *newval, size_t newlen)
-{
- ssize_t result, copied = 0;
-
- if (oldval && oldlen) {
- char __user *lastp;
- loff_t pos = 0;
- int ch;
-
- result = vfs_read(file, oldval, oldlen, &pos);
- if (result < 0)
- goto out;
-
- copied = result;
- lastp = oldval + copied - 1;
-
- result = -EFAULT;
- if (get_user(ch, lastp))
- goto out;
-
- /* Trim off the trailing newline */
- if (ch == '\n') {
- result = -EFAULT;
- if (put_user('\0', lastp))
- goto out;
- copied -= 1;
- }
- }
-
- if (newval && newlen) {
- loff_t pos = 0;
-
- result = vfs_write(file, newval, newlen, &pos);
- if (result < 0)
- goto out;
- }
-
- result = copied;
-out:
- return result;
-}
-
-static ssize_t bin_intvec(struct file *file,
- void __user *oldval, size_t oldlen, void __user *newval, size_t newlen)
-{
- ssize_t copied = 0;
- char *buffer;
- ssize_t result;
-
- result = -ENOMEM;
- buffer = kmalloc(BUFSZ, GFP_KERNEL);
- if (!buffer)
- goto out;
-
- if (oldval && oldlen) {
- unsigned __user *vec = oldval;
- size_t length = oldlen / sizeof(*vec);
- char *str, *end;
- int i;
- loff_t pos = 0;
-
- result = kernel_read(file, buffer, BUFSZ - 1, &pos);
- if (result < 0)
- goto out_kfree;
-
- str = buffer;
- end = str + result;
- *end++ = '\0';
- for (i = 0; i < length; i++) {
- unsigned long value;
-
- value = simple_strtoul(str, &str, 10);
- while (isspace(*str))
- str++;
-
- result = -EFAULT;
- if (put_user(value, vec + i))
- goto out_kfree;
-
- copied += sizeof(*vec);
- if (!isdigit(*str))
- break;
- }
- }
-
- if (newval && newlen) {
- unsigned __user *vec = newval;
- size_t length = newlen / sizeof(*vec);
- char *str, *end;
- int i;
- loff_t pos = 0;
-
- str = buffer;
- end = str + BUFSZ;
- for (i = 0; i < length; i++) {
- unsigned long value;
-
- result = -EFAULT;
- if (get_user(value, vec + i))
- goto out_kfree;
-
- str += scnprintf(str, end - str, "%lu\t", value);
- }
-
- result = kernel_write(file, buffer, str - buffer, &pos);
- if (result < 0)
- goto out_kfree;
- }
- result = copied;
-out_kfree:
- kfree(buffer);
-out:
- return result;
-}
-
-static ssize_t bin_ulongvec(struct file *file,
- void __user *oldval, size_t oldlen, void __user *newval, size_t newlen)
-{
- ssize_t copied = 0;
- char *buffer;
- ssize_t result;
-
- result = -ENOMEM;
- buffer = kmalloc(BUFSZ, GFP_KERNEL);
- if (!buffer)
- goto out;
-
- if (oldval && oldlen) {
- unsigned long __user *vec = oldval;
- size_t length = oldlen / sizeof(*vec);
- char *str, *end;
- int i;
- loff_t pos = 0;
-
- result = kernel_read(file, buffer, BUFSZ - 1, &pos);
- if (result < 0)
- goto out_kfree;
-
- str = buffer;
- end = str + result;
- *end++ = '\0';
- for (i = 0; i < length; i++) {
- unsigned long value;
-
- value = simple_strtoul(str, &str, 10);
- while (isspace(*str))
- str++;
-
- result = -EFAULT;
- if (put_user(value, vec + i))
- goto out_kfree;
-
- copied += sizeof(*vec);
- if (!isdigit(*str))
- break;
- }
- }
-
- if (newval && newlen) {
- unsigned long __user *vec = newval;
- size_t length = newlen / sizeof(*vec);
- char *str, *end;
- int i;
- loff_t pos = 0;
-
- str = buffer;
- end = str + BUFSZ;
- for (i = 0; i < length; i++) {
- unsigned long value;
-
- result = -EFAULT;
- if (get_user(value, vec + i))
- goto out_kfree;
-
- str += scnprintf(str, end - str, "%lu\t", value);
- }
-
- result = kernel_write(file, buffer, str - buffer, &pos);
- if (result < 0)
- goto out_kfree;
- }
- result = copied;
-out_kfree:
- kfree(buffer);
-out:
- return result;
-}
-
-static ssize_t bin_uuid(struct file *file,
- void __user *oldval, size_t oldlen, void __user *newval, size_t newlen)
-{
- ssize_t result, copied = 0;
-
- /* Only supports reads */
- if (oldval && oldlen) {
- char buf[UUID_STRING_LEN + 1];
- uuid_t uuid;
- loff_t pos = 0;
-
- result = kernel_read(file, buf, sizeof(buf) - 1, &pos);
- if (result < 0)
- goto out;
-
- buf[result] = '\0';
-
- result = -EIO;
- if (uuid_parse(buf, &uuid))
- goto out;
-
- if (oldlen > 16)
- oldlen = 16;
-
- result = -EFAULT;
- if (copy_to_user(oldval, &uuid, oldlen))
- goto out;
-
- copied = oldlen;
- }
- result = copied;
-out:
- return result;
-}
-
-static ssize_t bin_dn_node_address(struct file *file,
- void __user *oldval, size_t oldlen, void __user *newval, size_t newlen)
-{
- ssize_t result, copied = 0;
-
- if (oldval && oldlen) {
- char buf[15], *nodep;
- unsigned long area, node;
- __le16 dnaddr;
- loff_t pos = 0;
-
- result = kernel_read(file, buf, sizeof(buf) - 1, &pos);
- if (result < 0)
- goto out;
-
- buf[result] = '\0';
-
- /* Convert the decnet address to binary */
- result = -EIO;
- nodep = strchr(buf, '.');
- if (!nodep)
- goto out;
- ++nodep;
-
- area = simple_strtoul(buf, NULL, 10);
- node = simple_strtoul(nodep, NULL, 10);
-
- result = -EIO;
- if ((area > 63)||(node > 1023))
- goto out;
-
- dnaddr = cpu_to_le16((area << 10) | node);
-
- result = -EFAULT;
- if (put_user(dnaddr, (__le16 __user *)oldval))
- goto out;
-
- copied = sizeof(dnaddr);
- }
-
- if (newval && newlen) {
- __le16 dnaddr;
- char buf[15];
- int len;
- loff_t pos = 0;
-
- result = -EINVAL;
- if (newlen != sizeof(dnaddr))
- goto out;
-
- result = -EFAULT;
- if (get_user(dnaddr, (__le16 __user *)newval))
- goto out;
-
- len = scnprintf(buf, sizeof(buf), "%hu.%hu",
- le16_to_cpu(dnaddr) >> 10,
- le16_to_cpu(dnaddr) & 0x3ff);
-
- result = kernel_write(file, buf, len, &pos);
- if (result < 0)
- goto out;
- }
-
- result = copied;
-out:
- return result;
-}
-
-static const struct bin_table *get_sysctl(const int *name, int nlen, char *path)
-{
- const struct bin_table *table = &bin_root_table[0];
- int ctl_name;
-
- /* The binary sysctl tables have a small maximum depth so
- * there is no danger of overflowing our path as it PATH_MAX
- * bytes long.
- */
- memcpy(path, "sys/", 4);
- path += 4;
-
-repeat:
- if (!nlen)
- return ERR_PTR(-ENOTDIR);
- ctl_name = *name;
- name++;
- nlen--;
- for ( ; table->convert; table++) {
- int len = 0;
-
- /*
- * For a wild card entry map from ifindex to network
- * device name.
- */
- if (!table->ctl_name) {
-#ifdef CONFIG_NET
- struct net *net = current->nsproxy->net_ns;
- struct net_device *dev;
- dev = dev_get_by_index(net, ctl_name);
- if (dev) {
- len = strlen(dev->name);
- memcpy(path, dev->name, len);
- dev_put(dev);
- }
-#endif
- /* Use the well known sysctl number to proc name mapping */
- } else if (ctl_name == table->ctl_name) {
- len = strlen(table->procname);
- memcpy(path, table->procname, len);
- }
- if (len) {
- path += len;
- if (table->child) {
- *path++ = '/';
- table = table->child;
- goto repeat;
- }
- *path = '\0';
- return table;
- }
- }
- return ERR_PTR(-ENOTDIR);
-}
-
-static char *sysctl_getname(const int *name, int nlen, const struct bin_table **tablep)
-{
- char *tmp, *result;
-
- result = ERR_PTR(-ENOMEM);
- tmp = __getname();
- if (tmp) {
- const struct bin_table *table = get_sysctl(name, nlen, tmp);
- result = tmp;
- *tablep = table;
- if (IS_ERR(table)) {
- __putname(tmp);
- result = ERR_CAST(table);
- }
- }
- return result;
-}
-
-static ssize_t binary_sysctl(const int *name, int nlen,
- void __user *oldval, size_t oldlen, void __user *newval, size_t newlen)
-{
- const struct bin_table *table = NULL;
- struct vfsmount *mnt;
- struct file *file;
- ssize_t result;
- char *pathname;
- int flags;
-
- pathname = sysctl_getname(name, nlen, &table);
- result = PTR_ERR(pathname);
- if (IS_ERR(pathname))
- goto out;
-
- /* How should the sysctl be accessed? */
- if (oldval && oldlen && newval && newlen) {
- flags = O_RDWR;
- } else if (newval && newlen) {
- flags = O_WRONLY;
- } else if (oldval && oldlen) {
- flags = O_RDONLY;
- } else {
- result = 0;
- goto out_putname;
- }
-
- mnt = task_active_pid_ns(current)->proc_mnt;
- file = file_open_root(mnt->mnt_root, mnt, pathname, flags, 0);
- result = PTR_ERR(file);
- if (IS_ERR(file))
- goto out_putname;
-
- result = table->convert(file, oldval, oldlen, newval, newlen);
-
- fput(file);
-out_putname:
- __putname(pathname);
-out:
- return result;
-}
-
-
-#else /* CONFIG_SYSCTL_SYSCALL */
-
static ssize_t binary_sysctl(const int *name, int nlen,
void __user *oldval, size_t oldlen, void __user *newval, size_t newlen)
{
return -ENOSYS;
}
-#endif /* CONFIG_SYSCTL_SYSCALL */
-
-
static void deprecated_sysctl_warning(const int *name, int nlen)
{
int i;
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index 65605530ee34..8de90ea31280 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -966,7 +966,8 @@ static int enqueue_hrtimer(struct hrtimer *timer,
base->cpu_base->active_bases |= 1 << base->index;
- timer->state = HRTIMER_STATE_ENQUEUED;
+ /* Pairs with the lockless read in hrtimer_is_queued() */
+ WRITE_ONCE(timer->state, HRTIMER_STATE_ENQUEUED);
return timerqueue_add(&base->active, &timer->node);
}
@@ -988,7 +989,8 @@ static void __remove_hrtimer(struct hrtimer *timer,
struct hrtimer_cpu_base *cpu_base = base->cpu_base;
u8 state = timer->state;
- timer->state = newstate;
+ /* Pairs with the lockless read in hrtimer_is_queued() */
+ WRITE_ONCE(timer->state, newstate);
if (!(state & HRTIMER_STATE_ENQUEUED))
return;
@@ -1013,8 +1015,9 @@ static void __remove_hrtimer(struct hrtimer *timer,
static inline int
remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base, bool restart)
{
- if (hrtimer_is_queued(timer)) {
- u8 state = timer->state;
+ u8 state = timer->state;
+
+ if (state & HRTIMER_STATE_ENQUEUED) {
int reprogram;
/*
@@ -1940,7 +1943,7 @@ out:
return ret;
}
-#if !defined(CONFIG_64BIT_TIME) || defined(CONFIG_64BIT)
+#ifdef CONFIG_64BIT
SYSCALL_DEFINE2(nanosleep, struct __kernel_timespec __user *, rqtp,
struct __kernel_timespec __user *, rmtp)
diff --git a/kernel/time/itimer.c b/kernel/time/itimer.c
index 77f1e5635cc1..9e59c9ea92aa 100644
--- a/kernel/time/itimer.c
+++ b/kernel/time/itimer.c
@@ -26,7 +26,7 @@
* Returns the delta between the expiry time and now, which can be
* less than zero or 1usec for an pending expired timer
*/
-static struct timeval itimer_get_remtime(struct hrtimer *timer)
+static struct timespec64 itimer_get_remtime(struct hrtimer *timer)
{
ktime_t rem = __hrtimer_get_remaining(timer, true);
@@ -41,11 +41,11 @@ static struct timeval itimer_get_remtime(struct hrtimer *timer)
} else
rem = 0;
- return ktime_to_timeval(rem);
+ return ktime_to_timespec64(rem);
}
static void get_cpu_itimer(struct task_struct *tsk, unsigned int clock_id,
- struct itimerval *const value)
+ struct itimerspec64 *const value)
{
u64 val, interval;
struct cpu_itimer *it = &tsk->signal->it[clock_id];
@@ -69,11 +69,11 @@ static void get_cpu_itimer(struct task_struct *tsk, unsigned int clock_id,
spin_unlock_irq(&tsk->sighand->siglock);
- value->it_value = ns_to_timeval(val);
- value->it_interval = ns_to_timeval(interval);
+ value->it_value = ns_to_timespec64(val);
+ value->it_interval = ns_to_timespec64(interval);
}
-int do_getitimer(int which, struct itimerval *value)
+static int do_getitimer(int which, struct itimerspec64 *value)
{
struct task_struct *tsk = current;
@@ -82,7 +82,7 @@ int do_getitimer(int which, struct itimerval *value)
spin_lock_irq(&tsk->sighand->siglock);
value->it_value = itimer_get_remtime(&tsk->signal->real_timer);
value->it_interval =
- ktime_to_timeval(tsk->signal->it_real_incr);
+ ktime_to_timespec64(tsk->signal->it_real_incr);
spin_unlock_irq(&tsk->sighand->siglock);
break;
case ITIMER_VIRTUAL:
@@ -97,34 +97,59 @@ int do_getitimer(int which, struct itimerval *value)
return 0;
}
+static int put_itimerval(struct itimerval __user *o,
+ const struct itimerspec64 *i)
+{
+ struct itimerval v;
+
+ v.it_interval.tv_sec = i->it_interval.tv_sec;
+ v.it_interval.tv_usec = i->it_interval.tv_nsec / NSEC_PER_USEC;
+ v.it_value.tv_sec = i->it_value.tv_sec;
+ v.it_value.tv_usec = i->it_value.tv_nsec / NSEC_PER_USEC;
+ return copy_to_user(o, &v, sizeof(struct itimerval)) ? -EFAULT : 0;
+}
+
+
SYSCALL_DEFINE2(getitimer, int, which, struct itimerval __user *, value)
{
- int error = -EFAULT;
- struct itimerval get_buffer;
+ struct itimerspec64 get_buffer;
+ int error = do_getitimer(which, &get_buffer);
- if (value) {
- error = do_getitimer(which, &get_buffer);
- if (!error &&
- copy_to_user(value, &get_buffer, sizeof(get_buffer)))
- error = -EFAULT;
- }
+ if (!error && put_itimerval(value, &get_buffer))
+ error = -EFAULT;
return error;
}
-#ifdef CONFIG_COMPAT
+#if defined(CONFIG_COMPAT) || defined(CONFIG_ALPHA)
+struct old_itimerval32 {
+ struct old_timeval32 it_interval;
+ struct old_timeval32 it_value;
+};
+
+static int put_old_itimerval32(struct old_itimerval32 __user *o,
+ const struct itimerspec64 *i)
+{
+ struct old_itimerval32 v32;
+
+ v32.it_interval.tv_sec = i->it_interval.tv_sec;
+ v32.it_interval.tv_usec = i->it_interval.tv_nsec / NSEC_PER_USEC;
+ v32.it_value.tv_sec = i->it_value.tv_sec;
+ v32.it_value.tv_usec = i->it_value.tv_nsec / NSEC_PER_USEC;
+ return copy_to_user(o, &v32, sizeof(struct old_itimerval32)) ? -EFAULT : 0;
+}
+
COMPAT_SYSCALL_DEFINE2(getitimer, int, which,
- struct compat_itimerval __user *, it)
+ struct old_itimerval32 __user *, value)
{
- struct itimerval kit;
- int error = do_getitimer(which, &kit);
+ struct itimerspec64 get_buffer;
+ int error = do_getitimer(which, &get_buffer);
- if (!error && put_compat_itimerval(it, &kit))
+ if (!error && put_old_itimerval32(value, &get_buffer))
error = -EFAULT;
return error;
}
#endif
-
/*
* The timer is automagically restarted, when interval != 0
*/
@@ -141,8 +166,8 @@ enum hrtimer_restart it_real_fn(struct hrtimer *timer)
}
static void set_cpu_itimer(struct task_struct *tsk, unsigned int clock_id,
- const struct itimerval *const value,
- struct itimerval *const ovalue)
+ const struct itimerspec64 *const value,
+ struct itimerspec64 *const ovalue)
{
u64 oval, nval, ointerval, ninterval;
struct cpu_itimer *it = &tsk->signal->it[clock_id];
@@ -151,8 +176,8 @@ static void set_cpu_itimer(struct task_struct *tsk, unsigned int clock_id,
* Use the to_ktime conversion because that clamps the maximum
* value to KTIME_MAX and avoid multiplication overflows.
*/
- nval = ktime_to_ns(timeval_to_ktime(value->it_value));
- ninterval = ktime_to_ns(timeval_to_ktime(value->it_interval));
+ nval = timespec64_to_ns(&value->it_value);
+ ninterval = timespec64_to_ns(&value->it_interval);
spin_lock_irq(&tsk->sighand->siglock);
@@ -171,8 +196,8 @@ static void set_cpu_itimer(struct task_struct *tsk, unsigned int clock_id,
spin_unlock_irq(&tsk->sighand->siglock);
if (ovalue) {
- ovalue->it_value = ns_to_timeval(oval);
- ovalue->it_interval = ns_to_timeval(ointerval);
+ ovalue->it_value = ns_to_timespec64(oval);
+ ovalue->it_interval = ns_to_timespec64(ointerval);
}
}
@@ -182,19 +207,13 @@ static void set_cpu_itimer(struct task_struct *tsk, unsigned int clock_id,
#define timeval_valid(t) \
(((t)->tv_sec >= 0) && (((unsigned long) (t)->tv_usec) < USEC_PER_SEC))
-int do_setitimer(int which, struct itimerval *value, struct itimerval *ovalue)
+static int do_setitimer(int which, struct itimerspec64 *value,
+ struct itimerspec64 *ovalue)
{
struct task_struct *tsk = current;
struct hrtimer *timer;
ktime_t expires;
- /*
- * Validate the timevals in value.
- */
- if (!timeval_valid(&value->it_value) ||
- !timeval_valid(&value->it_interval))
- return -EINVAL;
-
switch (which) {
case ITIMER_REAL:
again:
@@ -203,7 +222,7 @@ again:
if (ovalue) {
ovalue->it_value = itimer_get_remtime(timer);
ovalue->it_interval
- = ktime_to_timeval(tsk->signal->it_real_incr);
+ = ktime_to_timespec64(tsk->signal->it_real_incr);
}
/* We are sharing ->siglock with it_real_fn() */
if (hrtimer_try_to_cancel(timer) < 0) {
@@ -211,10 +230,10 @@ again:
hrtimer_cancel_wait_running(timer);
goto again;
}
- expires = timeval_to_ktime(value->it_value);
+ expires = timespec64_to_ktime(value->it_value);
if (expires != 0) {
tsk->signal->it_real_incr =
- timeval_to_ktime(value->it_interval);
+ timespec64_to_ktime(value->it_interval);
hrtimer_start(timer, expires, HRTIMER_MODE_REL);
} else
tsk->signal->it_real_incr = 0;
@@ -234,6 +253,17 @@ again:
return 0;
}
+#ifdef CONFIG_SECURITY_SELINUX
+void clear_itimer(void)
+{
+ struct itimerspec64 v = {};
+ int i;
+
+ for (i = 0; i < 3; i++)
+ do_setitimer(i, &v, NULL);
+}
+#endif
+
#ifdef __ARCH_WANT_SYS_ALARM
/**
@@ -250,15 +280,15 @@ again:
*/
static unsigned int alarm_setitimer(unsigned int seconds)
{
- struct itimerval it_new, it_old;
+ struct itimerspec64 it_new, it_old;
#if BITS_PER_LONG < 64
if (seconds > INT_MAX)
seconds = INT_MAX;
#endif
it_new.it_value.tv_sec = seconds;
- it_new.it_value.tv_usec = 0;
- it_new.it_interval.tv_sec = it_new.it_interval.tv_usec = 0;
+ it_new.it_value.tv_nsec = 0;
+ it_new.it_interval.tv_sec = it_new.it_interval.tv_nsec = 0;
do_setitimer(ITIMER_REAL, &it_new, &it_old);
@@ -266,8 +296,8 @@ static unsigned int alarm_setitimer(unsigned int seconds)
* We can't return 0 if we have an alarm pending ... And we'd
* better return too much than too little anyway
*/
- if ((!it_old.it_value.tv_sec && it_old.it_value.tv_usec) ||
- it_old.it_value.tv_usec >= 500000)
+ if ((!it_old.it_value.tv_sec && it_old.it_value.tv_nsec) ||
+ it_old.it_value.tv_nsec >= (NSEC_PER_SEC / 2))
it_old.it_value.tv_sec++;
return it_old.it_value.tv_sec;
@@ -284,15 +314,35 @@ SYSCALL_DEFINE1(alarm, unsigned int, seconds)
#endif
+static int get_itimerval(struct itimerspec64 *o, const struct itimerval __user *i)
+{
+ struct itimerval v;
+
+ if (copy_from_user(&v, i, sizeof(struct itimerval)))
+ return -EFAULT;
+
+ /* Validate the timevals in value. */
+ if (!timeval_valid(&v.it_value) ||
+ !timeval_valid(&v.it_interval))
+ return -EINVAL;
+
+ o->it_interval.tv_sec = v.it_interval.tv_sec;
+ o->it_interval.tv_nsec = v.it_interval.tv_usec * NSEC_PER_USEC;
+ o->it_value.tv_sec = v.it_value.tv_sec;
+ o->it_value.tv_nsec = v.it_value.tv_usec * NSEC_PER_USEC;
+ return 0;
+}
+
SYSCALL_DEFINE3(setitimer, int, which, struct itimerval __user *, value,
struct itimerval __user *, ovalue)
{
- struct itimerval set_buffer, get_buffer;
+ struct itimerspec64 set_buffer, get_buffer;
int error;
if (value) {
- if(copy_from_user(&set_buffer, value, sizeof(set_buffer)))
- return -EFAULT;
+ error = get_itimerval(&set_buffer, value);
+ if (error)
+ return error;
} else {
memset(&set_buffer, 0, sizeof(set_buffer));
printk_once(KERN_WARNING "%s calls setitimer() with new_value NULL pointer."
@@ -304,30 +354,53 @@ SYSCALL_DEFINE3(setitimer, int, which, struct itimerval __user *, value,
if (error || !ovalue)
return error;
- if (copy_to_user(ovalue, &get_buffer, sizeof(get_buffer)))
+ if (put_itimerval(ovalue, &get_buffer))
+ return -EFAULT;
+ return 0;
+}
+
+#if defined(CONFIG_COMPAT) || defined(CONFIG_ALPHA)
+static int get_old_itimerval32(struct itimerspec64 *o, const struct old_itimerval32 __user *i)
+{
+ struct old_itimerval32 v32;
+
+ if (copy_from_user(&v32, i, sizeof(struct old_itimerval32)))
return -EFAULT;
+
+ /* Validate the timevals in value. */
+ if (!timeval_valid(&v32.it_value) ||
+ !timeval_valid(&v32.it_interval))
+ return -EINVAL;
+
+ o->it_interval.tv_sec = v32.it_interval.tv_sec;
+ o->it_interval.tv_nsec = v32.it_interval.tv_usec * NSEC_PER_USEC;
+ o->it_value.tv_sec = v32.it_value.tv_sec;
+ o->it_value.tv_nsec = v32.it_value.tv_usec * NSEC_PER_USEC;
return 0;
}
-#ifdef CONFIG_COMPAT
COMPAT_SYSCALL_DEFINE3(setitimer, int, which,
- struct compat_itimerval __user *, in,
- struct compat_itimerval __user *, out)
+ struct old_itimerval32 __user *, value,
+ struct old_itimerval32 __user *, ovalue)
{
- struct itimerval kin, kout;
+ struct itimerspec64 set_buffer, get_buffer;
int error;
- if (in) {
- if (get_compat_itimerval(&kin, in))
- return -EFAULT;
+ if (value) {
+ error = get_old_itimerval32(&set_buffer, value);
+ if (error)
+ return error;
} else {
- memset(&kin, 0, sizeof(kin));
+ memset(&set_buffer, 0, sizeof(set_buffer));
+ printk_once(KERN_WARNING "%s calls setitimer() with new_value NULL pointer."
+ " Misfeature support will be removed\n",
+ current->comm);
}
- error = do_setitimer(which, &kin, out ? &kout : NULL);
- if (error || !out)
+ error = do_setitimer(which, &set_buffer, ovalue ? &get_buffer : NULL);
+ if (error || !ovalue)
return error;
- if (put_compat_itimerval(out, &kout))
+ if (put_old_itimerval32(ovalue, &get_buffer))
return -EFAULT;
return 0;
}
diff --git a/kernel/time/time.c b/kernel/time/time.c
index 5c54ca632d08..704ccd9451b0 100644
--- a/kernel/time/time.c
+++ b/kernel/time/time.c
@@ -59,9 +59,9 @@ EXPORT_SYMBOL(sys_tz);
* why not move it into the appropriate arch directory (for those
* architectures that need it).
*/
-SYSCALL_DEFINE1(time, time_t __user *, tloc)
+SYSCALL_DEFINE1(time, __kernel_old_time_t __user *, tloc)
{
- time_t i = (time_t)ktime_get_real_seconds();
+ __kernel_old_time_t i = (__kernel_old_time_t)ktime_get_real_seconds();
if (tloc) {
if (put_user(i,tloc))
@@ -78,7 +78,7 @@ SYSCALL_DEFINE1(time, time_t __user *, tloc)
* architectures that need it).
*/
-SYSCALL_DEFINE1(stime, time_t __user *, tptr)
+SYSCALL_DEFINE1(stime, __kernel_old_time_t __user *, tptr)
{
struct timespec64 tv;
int err;
@@ -137,7 +137,7 @@ SYSCALL_DEFINE1(stime32, old_time32_t __user *, tptr)
#endif /* __ARCH_WANT_SYS_TIME32 */
#endif
-SYSCALL_DEFINE2(gettimeofday, struct timeval __user *, tv,
+SYSCALL_DEFINE2(gettimeofday, struct __kernel_old_timeval __user *, tv,
struct timezone __user *, tz)
{
if (likely(tv != NULL)) {
@@ -179,7 +179,7 @@ int do_sys_settimeofday64(const struct timespec64 *tv, const struct timezone *tz
return error;
if (tz) {
- /* Verify we're witin the +-15 hrs range */
+ /* Verify we're within the +-15 hrs range */
if (tz->tz_minuteswest > 15*60 || tz->tz_minuteswest < -15*60)
return -EINVAL;
@@ -196,22 +196,21 @@ int do_sys_settimeofday64(const struct timespec64 *tv, const struct timezone *tz
return 0;
}
-SYSCALL_DEFINE2(settimeofday, struct timeval __user *, tv,
+SYSCALL_DEFINE2(settimeofday, struct __kernel_old_timeval __user *, tv,
struct timezone __user *, tz)
{
struct timespec64 new_ts;
- struct timeval user_tv;
struct timezone new_tz;
if (tv) {
- if (copy_from_user(&user_tv, tv, sizeof(*tv)))
+ if (get_user(new_ts.tv_sec, &tv->tv_sec) ||
+ get_user(new_ts.tv_nsec, &tv->tv_usec))
return -EFAULT;
- if (!timeval_valid(&user_tv))
+ if (new_ts.tv_nsec > USEC_PER_SEC || new_ts.tv_nsec < 0)
return -EINVAL;
- new_ts.tv_sec = user_tv.tv_sec;
- new_ts.tv_nsec = user_tv.tv_usec * NSEC_PER_USEC;
+ new_ts.tv_nsec *= NSEC_PER_USEC;
}
if (tz) {
if (copy_from_user(&new_tz, tz, sizeof(*tz)))
@@ -245,18 +244,17 @@ COMPAT_SYSCALL_DEFINE2(settimeofday, struct old_timeval32 __user *, tv,
struct timezone __user *, tz)
{
struct timespec64 new_ts;
- struct timeval user_tv;
struct timezone new_tz;
if (tv) {
- if (compat_get_timeval(&user_tv, tv))
+ if (get_user(new_ts.tv_sec, &tv->tv_sec) ||
+ get_user(new_ts.tv_nsec, &tv->tv_usec))
return -EFAULT;
- if (!timeval_valid(&user_tv))
+ if (new_ts.tv_nsec > USEC_PER_SEC || new_ts.tv_nsec < 0)
return -EINVAL;
- new_ts.tv_sec = user_tv.tv_sec;
- new_ts.tv_nsec = user_tv.tv_usec * NSEC_PER_USEC;
+ new_ts.tv_nsec *= NSEC_PER_USEC;
}
if (tz) {
if (copy_from_user(&new_tz, tz, sizeof(*tz)))
@@ -267,7 +265,7 @@ COMPAT_SYSCALL_DEFINE2(settimeofday, struct old_timeval32 __user *, tv,
}
#endif
-#if !defined(CONFIG_64BIT_TIME) || defined(CONFIG_64BIT)
+#ifdef CONFIG_64BIT
SYSCALL_DEFINE1(adjtimex, struct __kernel_timex __user *, txc_p)
{
struct __kernel_timex txc; /* Local copy of parameter */
@@ -550,18 +548,21 @@ EXPORT_SYMBOL(set_normalized_timespec64);
*/
struct timespec64 ns_to_timespec64(const s64 nsec)
{
- struct timespec64 ts;
+ struct timespec64 ts = { 0, 0 };
s32 rem;
- if (!nsec)
- return (struct timespec64) {0, 0};
-
- ts.tv_sec = div_s64_rem(nsec, NSEC_PER_SEC, &rem);
- if (unlikely(rem < 0)) {
- ts.tv_sec--;
- rem += NSEC_PER_SEC;
+ if (likely(nsec > 0)) {
+ ts.tv_sec = div_u64_rem(nsec, NSEC_PER_SEC, &rem);
+ ts.tv_nsec = rem;
+ } else if (nsec < 0) {
+ /*
+ * With negative times, tv_sec points to the earlier
+ * second, and tv_nsec counts the nanoseconds since
+ * then, so tv_nsec is always a positive number.
+ */
+ ts.tv_sec = -div_u64_rem(-nsec - 1, NSEC_PER_SEC, &rem) - 1;
+ ts.tv_nsec = NSEC_PER_SEC - rem - 1;
}
- ts.tv_nsec = rem;
return ts;
}
@@ -880,10 +881,11 @@ int get_timespec64(struct timespec64 *ts,
ts->tv_sec = kts.tv_sec;
- /* Zero out the padding for 32 bit systems or in compat mode */
- if (IS_ENABLED(CONFIG_64BIT_TIME) && in_compat_syscall())
+ /* Zero out the padding in compat mode */
+ if (in_compat_syscall())
kts.tv_nsec &= 0xFFFFFFFFUL;
+ /* In 32-bit mode, this drops the padding */
ts->tv_nsec = kts.tv_nsec;
return 0;
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index e08527f50d2a..cdf5afa87f65 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -33,6 +33,9 @@ config HAVE_DYNAMIC_FTRACE
config HAVE_DYNAMIC_FTRACE_WITH_REGS
bool
+config HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
+ bool
+
config HAVE_FTRACE_MCOUNT_RECORD
bool
help
@@ -76,7 +79,7 @@ config FTRACE_NMI_ENTER
config EVENT_TRACING
select CONTEXT_SWITCH_TRACER
- select GLOB
+ select GLOB
bool
config CONTEXT_SWITCH_TRACER
@@ -106,7 +109,6 @@ config PREEMPTIRQ_TRACEPOINTS
config TRACING
bool
- select DEBUG_FS
select RING_BUFFER
select STACKTRACE if STACKTRACE_SUPPORT
select TRACEPOINTS
@@ -308,7 +310,7 @@ config TRACER_SNAPSHOT
cat snapshot
config TRACER_SNAPSHOT_PER_CPU_SWAP
- bool "Allow snapshot to swap per CPU"
+ bool "Allow snapshot to swap per CPU"
depends on TRACER_SNAPSHOT
select RING_BUFFER_ALLOW_SWAP
help
@@ -557,6 +559,11 @@ config DYNAMIC_FTRACE_WITH_REGS
depends on DYNAMIC_FTRACE
depends on HAVE_DYNAMIC_FTRACE_WITH_REGS
+config DYNAMIC_FTRACE_WITH_DIRECT_CALLS
+ def_bool y
+ depends on DYNAMIC_FTRACE
+ depends on HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
+
config FUNCTION_PROFILER
bool "Kernel function profiler"
depends on FUNCTION_TRACER
@@ -675,7 +682,7 @@ config MMIOTRACE_TEST
Say N, unless you absolutely know what you are doing.
config TRACEPOINT_BENCHMARK
- bool "Add tracepoint that benchmarks tracepoints"
+ bool "Add tracepoint that benchmarks tracepoints"
help
This option creates the tracepoint "benchmark:benchmark_event".
When the tracepoint is enabled, it kicks off a kernel thread that
@@ -724,7 +731,7 @@ config RING_BUFFER_STARTUP_TEST
bool "Ring buffer startup self test"
depends on RING_BUFFER
help
- Run a simple self test on the ring buffer on boot up. Late in the
+ Run a simple self test on the ring buffer on boot up. Late in the
kernel boot sequence, the test will start that kicks off
a thread per cpu. Each thread will write various size events
into the ring buffer. Another thread is created to send IPIs
@@ -752,9 +759,9 @@ config PREEMPTIRQ_DELAY_TEST
configurable delay. The module busy waits for the duration of the
critical section.
- For example, the following invocation forces a one-time irq-disabled
- critical section for 500us:
- modprobe preemptirq_delay_test test_mode=irq delay=500000
+ For example, the following invocation generates a burst of three
+ irq-disabled critical sections for 500us:
+ modprobe preemptirq_delay_test test_mode=irq delay=500 burst_size=3
If unsure, say N
@@ -763,7 +770,7 @@ config TRACE_EVAL_MAP_FILE
depends on TRACING
help
The "print fmt" of the trace events will show the enum/sizeof names
- instead of their values. This can cause problems for user space tools
+ instead of their values. This can cause problems for user space tools
that use this string to parse the raw data as user space does not know
how to convert the string to its value.
@@ -784,7 +791,7 @@ config TRACE_EVAL_MAP_FILE
they are needed for the "eval_map" file. Enabling this option will
increase the memory footprint of the running kernel.
- If unsure, say N
+ If unsure, say N.
config GCOV_PROFILE_FTRACE
bool "Enable GCOV profiling on ftrace subsystem"
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index ffc91d4935ac..e5ef4ae9edb5 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -739,7 +739,7 @@ BPF_CALL_1(bpf_send_signal, u32, sig)
return -EINVAL;
work = this_cpu_ptr(&send_signal_work);
- if (work->irq_work.flags & IRQ_WORK_BUSY)
+ if (atomic_read(&work->irq_work.flags) & IRQ_WORK_BUSY)
return -EBUSY;
/* Add the current task, which is the target of sending signal,
diff --git a/kernel/trace/fgraph.c b/kernel/trace/fgraph.c
index 7950a0356042..67e0c462b059 100644
--- a/kernel/trace/fgraph.c
+++ b/kernel/trace/fgraph.c
@@ -332,9 +332,14 @@ int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
return 0;
}
+/*
+ * Simply points to ftrace_stub, but with the proper protocol.
+ * Defined by the linker script in linux/vmlinux.lds.h
+ */
+extern void ftrace_stub_graph(struct ftrace_graph_ret *);
+
/* The callbacks that hook a function */
-trace_func_graph_ret_t ftrace_graph_return =
- (trace_func_graph_ret_t)ftrace_stub;
+trace_func_graph_ret_t ftrace_graph_return = ftrace_stub_graph;
trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
static trace_func_graph_ent_t __ftrace_graph_entry = ftrace_graph_entry_stub;
@@ -614,7 +619,7 @@ void unregister_ftrace_graph(struct fgraph_ops *gops)
goto out;
ftrace_graph_active--;
- ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
+ ftrace_graph_return = ftrace_stub_graph;
ftrace_graph_entry = ftrace_graph_entry_stub;
__ftrace_graph_entry = ftrace_graph_entry_stub;
ftrace_shutdown(&graph_ops, FTRACE_STOP_FUNC_RET);
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 5259d4dea675..74439ab5c2b6 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -326,6 +326,8 @@ int __register_ftrace_function(struct ftrace_ops *ops)
if (ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED)
ops->flags |= FTRACE_OPS_FL_SAVE_REGS;
#endif
+ if (!ftrace_enabled && (ops->flags & FTRACE_OPS_FL_PERMANENT))
+ return -EBUSY;
if (!core_kernel_data((unsigned long)ops))
ops->flags |= FTRACE_OPS_FL_DYNAMIC;
@@ -463,10 +465,10 @@ static void *function_stat_start(struct tracer_stat *trace)
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
/* function graph compares on total time */
-static int function_stat_cmp(void *p1, void *p2)
+static int function_stat_cmp(const void *p1, const void *p2)
{
- struct ftrace_profile *a = p1;
- struct ftrace_profile *b = p2;
+ const struct ftrace_profile *a = p1;
+ const struct ftrace_profile *b = p2;
if (a->time < b->time)
return -1;
@@ -477,10 +479,10 @@ static int function_stat_cmp(void *p1, void *p2)
}
#else
/* not function graph compares against hits */
-static int function_stat_cmp(void *p1, void *p2)
+static int function_stat_cmp(const void *p1, const void *p2)
{
- struct ftrace_profile *a = p1;
- struct ftrace_profile *b = p2;
+ const struct ftrace_profile *a = p1;
+ const struct ftrace_profile *b = p2;
if (a->counter < b->counter)
return -1;
@@ -1018,11 +1020,6 @@ static bool update_all_ops;
# error Dynamic ftrace depends on MCOUNT_RECORD
#endif
-struct ftrace_func_entry {
- struct hlist_node hlist;
- unsigned long ip;
-};
-
struct ftrace_func_probe {
struct ftrace_probe_ops *probe_ops;
struct ftrace_ops ops;
@@ -1370,24 +1367,16 @@ ftrace_hash_rec_enable_modify(struct ftrace_ops *ops, int filter_hash);
static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops,
struct ftrace_hash *new_hash);
-static struct ftrace_hash *
-__ftrace_hash_move(struct ftrace_hash *src)
+static struct ftrace_hash *dup_hash(struct ftrace_hash *src, int size)
{
struct ftrace_func_entry *entry;
- struct hlist_node *tn;
- struct hlist_head *hhd;
struct ftrace_hash *new_hash;
- int size = src->count;
+ struct hlist_head *hhd;
+ struct hlist_node *tn;
int bits = 0;
int i;
/*
- * If the new source is empty, just return the empty_hash.
- */
- if (ftrace_hash_empty(src))
- return EMPTY_HASH;
-
- /*
* Make the hash size about 1/2 the # found
*/
for (size /= 2; size; size >>= 1)
@@ -1411,10 +1400,23 @@ __ftrace_hash_move(struct ftrace_hash *src)
__add_hash_entry(new_hash, entry);
}
}
-
return new_hash;
}
+static struct ftrace_hash *
+__ftrace_hash_move(struct ftrace_hash *src)
+{
+ int size = src->count;
+
+ /*
+ * If the new source is empty, just return the empty_hash.
+ */
+ if (ftrace_hash_empty(src))
+ return EMPTY_HASH;
+
+ return dup_hash(src, size);
+}
+
static int
ftrace_hash_move(struct ftrace_ops *ops, int enable,
struct ftrace_hash **dst, struct ftrace_hash *src)
@@ -1534,6 +1536,26 @@ static int ftrace_cmp_recs(const void *a, const void *b)
return 0;
}
+static struct dyn_ftrace *lookup_rec(unsigned long start, unsigned long end)
+{
+ struct ftrace_page *pg;
+ struct dyn_ftrace *rec = NULL;
+ struct dyn_ftrace key;
+
+ key.ip = start;
+ key.flags = end; /* overload flags, as it is unsigned long */
+
+ for (pg = ftrace_pages_start; pg; pg = pg->next) {
+ if (end < pg->records[0].ip ||
+ start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE))
+ continue;
+ rec = bsearch(&key, pg->records, pg->index,
+ sizeof(struct dyn_ftrace),
+ ftrace_cmp_recs);
+ }
+ return rec;
+}
+
/**
* ftrace_location_range - return the first address of a traced location
* if it touches the given ip range
@@ -1548,23 +1570,11 @@ static int ftrace_cmp_recs(const void *a, const void *b)
*/
unsigned long ftrace_location_range(unsigned long start, unsigned long end)
{
- struct ftrace_page *pg;
struct dyn_ftrace *rec;
- struct dyn_ftrace key;
-
- key.ip = start;
- key.flags = end; /* overload flags, as it is unsigned long */
- for (pg = ftrace_pages_start; pg; pg = pg->next) {
- if (end < pg->records[0].ip ||
- start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE))
- continue;
- rec = bsearch(&key, pg->records, pg->index,
- sizeof(struct dyn_ftrace),
- ftrace_cmp_recs);
- if (rec)
- return rec->ip;
- }
+ rec = lookup_rec(start, end);
+ if (rec)
+ return rec->ip;
return 0;
}
@@ -1715,6 +1725,9 @@ static bool __ftrace_hash_rec_update(struct ftrace_ops *ops,
if (FTRACE_WARN_ON(ftrace_rec_count(rec) == FTRACE_REF_MAX))
return false;
+ if (ops->flags & FTRACE_OPS_FL_DIRECT)
+ rec->flags |= FTRACE_FL_DIRECT;
+
/*
* If there's only a single callback registered to a
* function, and the ops has a trampoline registered
@@ -1743,6 +1756,15 @@ static bool __ftrace_hash_rec_update(struct ftrace_ops *ops,
rec->flags--;
/*
+ * Only the internal direct_ops should have the
+ * DIRECT flag set. Thus, if it is removing a
+ * function, then that function should no longer
+ * be direct.
+ */
+ if (ops->flags & FTRACE_OPS_FL_DIRECT)
+ rec->flags &= ~FTRACE_FL_DIRECT;
+
+ /*
* If the rec had REGS enabled and the ops that is
* being removed had REGS set, then see if there is
* still any ops for this record that wants regs.
@@ -2077,15 +2099,34 @@ static int ftrace_check_record(struct dyn_ftrace *rec, bool enable, bool update)
* If enabling and the REGS flag does not match the REGS_EN, or
* the TRAMP flag doesn't match the TRAMP_EN, then do not ignore
* this record. Set flags to fail the compare against ENABLED.
+ * Same for direct calls.
*/
if (flag) {
- if (!(rec->flags & FTRACE_FL_REGS) !=
+ if (!(rec->flags & FTRACE_FL_REGS) !=
!(rec->flags & FTRACE_FL_REGS_EN))
flag |= FTRACE_FL_REGS;
- if (!(rec->flags & FTRACE_FL_TRAMP) !=
+ if (!(rec->flags & FTRACE_FL_TRAMP) !=
!(rec->flags & FTRACE_FL_TRAMP_EN))
flag |= FTRACE_FL_TRAMP;
+
+ /*
+ * Direct calls are special, as count matters.
+ * We must test the record for direct, if the
+ * DIRECT and DIRECT_EN do not match, but only
+ * if the count is 1. That's because, if the
+ * count is something other than one, we do not
+ * want the direct enabled (it will be done via the
+ * direct helper). But if DIRECT_EN is set, and
+ * the count is not one, we need to clear it.
+ */
+ if (ftrace_rec_count(rec) == 1) {
+ if (!(rec->flags & FTRACE_FL_DIRECT) !=
+ !(rec->flags & FTRACE_FL_DIRECT_EN))
+ flag |= FTRACE_FL_DIRECT;
+ } else if (rec->flags & FTRACE_FL_DIRECT_EN) {
+ flag |= FTRACE_FL_DIRECT;
+ }
}
/* If the state of this record hasn't changed, then do nothing */
@@ -2110,6 +2151,25 @@ static int ftrace_check_record(struct dyn_ftrace *rec, bool enable, bool update)
else
rec->flags &= ~FTRACE_FL_TRAMP_EN;
}
+ if (flag & FTRACE_FL_DIRECT) {
+ /*
+ * If there's only one user (direct_ops helper)
+ * then we can call the direct function
+ * directly (no ftrace trampoline).
+ */
+ if (ftrace_rec_count(rec) == 1) {
+ if (rec->flags & FTRACE_FL_DIRECT)
+ rec->flags |= FTRACE_FL_DIRECT_EN;
+ else
+ rec->flags &= ~FTRACE_FL_DIRECT_EN;
+ } else {
+ /*
+ * Can only call directly if there's
+ * only one callback to the function.
+ */
+ rec->flags &= ~FTRACE_FL_DIRECT_EN;
+ }
+ }
}
/*
@@ -2139,7 +2199,7 @@ static int ftrace_check_record(struct dyn_ftrace *rec, bool enable, bool update)
* and REGS states. The _EN flags must be disabled though.
*/
rec->flags &= ~(FTRACE_FL_ENABLED | FTRACE_FL_TRAMP_EN |
- FTRACE_FL_REGS_EN);
+ FTRACE_FL_REGS_EN | FTRACE_FL_DIRECT_EN);
}
ftrace_bug_type = FTRACE_BUG_NOP;
@@ -2294,6 +2354,52 @@ ftrace_find_tramp_ops_new(struct dyn_ftrace *rec)
return NULL;
}
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
+/* Protected by rcu_tasks for reading, and direct_mutex for writing */
+static struct ftrace_hash *direct_functions = EMPTY_HASH;
+static DEFINE_MUTEX(direct_mutex);
+int ftrace_direct_func_count;
+
+/*
+ * Search the direct_functions hash to see if the given instruction pointer
+ * has a direct caller attached to it.
+ */
+static unsigned long find_rec_direct(unsigned long ip)
+{
+ struct ftrace_func_entry *entry;
+
+ entry = __ftrace_lookup_ip(direct_functions, ip);
+ if (!entry)
+ return 0;
+
+ return entry->direct;
+}
+
+static void call_direct_funcs(unsigned long ip, unsigned long pip,
+ struct ftrace_ops *ops, struct pt_regs *regs)
+{
+ unsigned long addr;
+
+ addr = find_rec_direct(ip);
+ if (!addr)
+ return;
+
+ arch_ftrace_set_direct_caller(regs, addr);
+}
+
+struct ftrace_ops direct_ops = {
+ .func = call_direct_funcs,
+ .flags = FTRACE_OPS_FL_IPMODIFY | FTRACE_OPS_FL_RECURSION_SAFE
+ | FTRACE_OPS_FL_DIRECT | FTRACE_OPS_FL_SAVE_REGS
+ | FTRACE_OPS_FL_PERMANENT,
+};
+#else
+static inline unsigned long find_rec_direct(unsigned long ip)
+{
+ return 0;
+}
+#endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
+
/**
* ftrace_get_addr_new - Get the call address to set to
* @rec: The ftrace record descriptor
@@ -2307,6 +2413,15 @@ ftrace_find_tramp_ops_new(struct dyn_ftrace *rec)
unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec)
{
struct ftrace_ops *ops;
+ unsigned long addr;
+
+ if ((rec->flags & FTRACE_FL_DIRECT) &&
+ (ftrace_rec_count(rec) == 1)) {
+ addr = find_rec_direct(rec->ip);
+ if (addr)
+ return addr;
+ WARN_ON_ONCE(1);
+ }
/* Trampolines take precedence over regs */
if (rec->flags & FTRACE_FL_TRAMP) {
@@ -2339,6 +2454,15 @@ unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec)
unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec)
{
struct ftrace_ops *ops;
+ unsigned long addr;
+
+ /* Direct calls take precedence over trampolines */
+ if (rec->flags & FTRACE_FL_DIRECT_EN) {
+ addr = find_rec_direct(rec->ip);
+ if (addr)
+ return addr;
+ WARN_ON_ONCE(1);
+ }
/* Trampolines take precedence over regs */
if (rec->flags & FTRACE_FL_TRAMP_EN) {
@@ -2861,6 +2985,8 @@ static void ftrace_shutdown_sysctl(void)
static u64 ftrace_update_time;
unsigned long ftrace_update_tot_cnt;
+unsigned long ftrace_number_of_pages;
+unsigned long ftrace_number_of_groups;
static inline int ops_traces_mod(struct ftrace_ops *ops)
{
@@ -2985,6 +3111,9 @@ static int ftrace_allocate_records(struct ftrace_page *pg, int count)
goto again;
}
+ ftrace_number_of_pages += 1 << order;
+ ftrace_number_of_groups++;
+
cnt = (PAGE_SIZE << order) / ENTRY_SIZE;
pg->size = cnt;
@@ -3040,6 +3169,8 @@ ftrace_allocate_pages(unsigned long num_to_init)
start_pg = pg->next;
kfree(pg);
pg = start_pg;
+ ftrace_number_of_pages -= 1 << order;
+ ftrace_number_of_groups--;
}
pr_info("ftrace: FAILED to allocate memory for functions\n");
return NULL;
@@ -3450,10 +3581,11 @@ static int t_show(struct seq_file *m, void *v)
if (iter->flags & FTRACE_ITER_ENABLED) {
struct ftrace_ops *ops;
- seq_printf(m, " (%ld)%s%s",
+ seq_printf(m, " (%ld)%s%s%s",
ftrace_rec_count(rec),
rec->flags & FTRACE_FL_REGS ? " R" : " ",
- rec->flags & FTRACE_FL_IPMODIFY ? " I" : " ");
+ rec->flags & FTRACE_FL_IPMODIFY ? " I" : " ",
+ rec->flags & FTRACE_FL_DIRECT ? " D" : " ");
if (rec->flags & FTRACE_FL_TRAMP_EN) {
ops = ftrace_find_tramp_ops_any(rec);
if (ops) {
@@ -3469,6 +3601,13 @@ static int t_show(struct seq_file *m, void *v)
} else {
add_trampoline_func(m, NULL, rec);
}
+ if (rec->flags & FTRACE_FL_DIRECT) {
+ unsigned long direct;
+
+ direct = find_rec_direct(rec->ip);
+ if (direct)
+ seq_printf(m, "\n\tdirect-->%pS", (void *)direct);
+ }
}
seq_putc(m, '\n');
@@ -4800,6 +4939,366 @@ ftrace_set_addr(struct ftrace_ops *ops, unsigned long ip, int remove,
return ftrace_set_hash(ops, NULL, 0, ip, remove, reset, enable);
}
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
+
+struct ftrace_direct_func {
+ struct list_head next;
+ unsigned long addr;
+ int count;
+};
+
+static LIST_HEAD(ftrace_direct_funcs);
+
+/**
+ * ftrace_find_direct_func - test an address if it is a registered direct caller
+ * @addr: The address of a registered direct caller
+ *
+ * This searches to see if a ftrace direct caller has been registered
+ * at a specific address, and if so, it returns a descriptor for it.
+ *
+ * This can be used by architecture code to see if an address is
+ * a direct caller (trampoline) attached to a fentry/mcount location.
+ * This is useful for the function_graph tracer, as it may need to
+ * do adjustments if it traced a location that also has a direct
+ * trampoline attached to it.
+ */
+struct ftrace_direct_func *ftrace_find_direct_func(unsigned long addr)
+{
+ struct ftrace_direct_func *entry;
+ bool found = false;
+
+ /* May be called by fgraph trampoline (protected by rcu tasks) */
+ list_for_each_entry_rcu(entry, &ftrace_direct_funcs, next) {
+ if (entry->addr == addr) {
+ found = true;
+ break;
+ }
+ }
+ if (found)
+ return entry;
+
+ return NULL;
+}
+
+/**
+ * register_ftrace_direct - Call a custom trampoline directly
+ * @ip: The address of the nop at the beginning of a function
+ * @addr: The address of the trampoline to call at @ip
+ *
+ * This is used to connect a direct call from the nop location (@ip)
+ * at the start of ftrace traced functions. The location that it calls
+ * (@addr) must be able to handle a direct call, and save the parameters
+ * of the function being traced, and restore them (or inject new ones
+ * if needed), before returning.
+ *
+ * Returns:
+ * 0 on success
+ * -EBUSY - Another direct function is already attached (there can be only one)
+ * -ENODEV - @ip does not point to a ftrace nop location (or not supported)
+ * -ENOMEM - There was an allocation failure.
+ */
+int register_ftrace_direct(unsigned long ip, unsigned long addr)
+{
+ struct ftrace_direct_func *direct;
+ struct ftrace_func_entry *entry;
+ struct ftrace_hash *free_hash = NULL;
+ struct dyn_ftrace *rec;
+ int ret = -EBUSY;
+
+ mutex_lock(&direct_mutex);
+
+ /* See if there's a direct function at @ip already */
+ if (find_rec_direct(ip))
+ goto out_unlock;
+
+ ret = -ENODEV;
+ rec = lookup_rec(ip, ip);
+ if (!rec)
+ goto out_unlock;
+
+ /*
+ * Check if the rec says it has a direct call but we didn't
+ * find one earlier?
+ */
+ if (WARN_ON(rec->flags & FTRACE_FL_DIRECT))
+ goto out_unlock;
+
+ /* Make sure the ip points to the exact record */
+ if (ip != rec->ip) {
+ ip = rec->ip;
+ /* Need to check this ip for a direct. */
+ if (find_rec_direct(ip))
+ goto out_unlock;
+ }
+
+ ret = -ENOMEM;
+ if (ftrace_hash_empty(direct_functions) ||
+ direct_functions->count > 2 * (1 << direct_functions->size_bits)) {
+ struct ftrace_hash *new_hash;
+ int size = ftrace_hash_empty(direct_functions) ? 0 :
+ direct_functions->count + 1;
+
+ if (size < 32)
+ size = 32;
+
+ new_hash = dup_hash(direct_functions, size);
+ if (!new_hash)
+ goto out_unlock;
+
+ free_hash = direct_functions;
+ direct_functions = new_hash;
+ }
+
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ goto out_unlock;
+
+ direct = ftrace_find_direct_func(addr);
+ if (!direct) {
+ direct = kmalloc(sizeof(*direct), GFP_KERNEL);
+ if (!direct) {
+ kfree(entry);
+ goto out_unlock;
+ }
+ direct->addr = addr;
+ direct->count = 0;
+ list_add_rcu(&direct->next, &ftrace_direct_funcs);
+ ftrace_direct_func_count++;
+ }
+
+ entry->ip = ip;
+ entry->direct = addr;
+ __add_hash_entry(direct_functions, entry);
+
+ ret = ftrace_set_filter_ip(&direct_ops, ip, 0, 0);
+ if (ret)
+ remove_hash_entry(direct_functions, entry);
+
+ if (!ret && !(direct_ops.flags & FTRACE_OPS_FL_ENABLED)) {
+ ret = register_ftrace_function(&direct_ops);
+ if (ret)
+ ftrace_set_filter_ip(&direct_ops, ip, 1, 0);
+ }
+
+ if (ret) {
+ kfree(entry);
+ if (!direct->count) {
+ list_del_rcu(&direct->next);
+ synchronize_rcu_tasks();
+ kfree(direct);
+ if (free_hash)
+ free_ftrace_hash(free_hash);
+ free_hash = NULL;
+ ftrace_direct_func_count--;
+ }
+ } else {
+ direct->count++;
+ }
+ out_unlock:
+ mutex_unlock(&direct_mutex);
+
+ if (free_hash) {
+ synchronize_rcu_tasks();
+ free_ftrace_hash(free_hash);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(register_ftrace_direct);
+
+static struct ftrace_func_entry *find_direct_entry(unsigned long *ip,
+ struct dyn_ftrace **recp)
+{
+ struct ftrace_func_entry *entry;
+ struct dyn_ftrace *rec;
+
+ rec = lookup_rec(*ip, *ip);
+ if (!rec)
+ return NULL;
+
+ entry = __ftrace_lookup_ip(direct_functions, rec->ip);
+ if (!entry) {
+ WARN_ON(rec->flags & FTRACE_FL_DIRECT);
+ return NULL;
+ }
+
+ WARN_ON(!(rec->flags & FTRACE_FL_DIRECT));
+
+ /* Passed in ip just needs to be on the call site */
+ *ip = rec->ip;
+
+ if (recp)
+ *recp = rec;
+
+ return entry;
+}
+
+int unregister_ftrace_direct(unsigned long ip, unsigned long addr)
+{
+ struct ftrace_direct_func *direct;
+ struct ftrace_func_entry *entry;
+ int ret = -ENODEV;
+
+ mutex_lock(&direct_mutex);
+
+ entry = find_direct_entry(&ip, NULL);
+ if (!entry)
+ goto out_unlock;
+
+ if (direct_functions->count == 1)
+ unregister_ftrace_function(&direct_ops);
+
+ ret = ftrace_set_filter_ip(&direct_ops, ip, 1, 0);
+
+ WARN_ON(ret);
+
+ remove_hash_entry(direct_functions, entry);
+
+ direct = ftrace_find_direct_func(addr);
+ if (!WARN_ON(!direct)) {
+ /* This is the good path (see the ! before WARN) */
+ direct->count--;
+ WARN_ON(direct->count < 0);
+ if (!direct->count) {
+ list_del_rcu(&direct->next);
+ synchronize_rcu_tasks();
+ kfree(direct);
+ ftrace_direct_func_count--;
+ }
+ }
+ out_unlock:
+ mutex_unlock(&direct_mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(unregister_ftrace_direct);
+
+static struct ftrace_ops stub_ops = {
+ .func = ftrace_stub,
+};
+
+/**
+ * ftrace_modify_direct_caller - modify ftrace nop directly
+ * @entry: The ftrace hash entry of the direct helper for @rec
+ * @rec: The record representing the function site to patch
+ * @old_addr: The location that the site at @rec->ip currently calls
+ * @new_addr: The location that the site at @rec->ip should call
+ *
+ * An architecture may overwrite this function to optimize the
+ * changing of the direct callback on an ftrace nop location.
+ * This is called with the ftrace_lock mutex held, and no other
+ * ftrace callbacks are on the associated record (@rec). Thus,
+ * it is safe to modify the ftrace record, where it should be
+ * currently calling @old_addr directly, to call @new_addr.
+ *
+ * Safety checks should be made to make sure that the code at
+ * @rec->ip is currently calling @old_addr. And this must
+ * also update entry->direct to @new_addr.
+ */
+int __weak ftrace_modify_direct_caller(struct ftrace_func_entry *entry,
+ struct dyn_ftrace *rec,
+ unsigned long old_addr,
+ unsigned long new_addr)
+{
+ unsigned long ip = rec->ip;
+ int ret;
+
+ /*
+ * The ftrace_lock was used to determine if the record
+ * had more than one registered user to it. If it did,
+ * we needed to prevent that from changing to do the quick
+ * switch. But if it did not (only a direct caller was attached)
+ * then this function is called. But this function can deal
+ * with attached callers to the rec that we care about, and
+ * since this function uses standard ftrace calls that take
+ * the ftrace_lock mutex, we need to release it.
+ */
+ mutex_unlock(&ftrace_lock);
+
+ /*
+ * By setting a stub function at the same address, we force
+ * the code to call the iterator and the direct_ops helper.
+ * This means that @ip does not call the direct call, and
+ * we can simply modify it.
+ */
+ ret = ftrace_set_filter_ip(&stub_ops, ip, 0, 0);
+ if (ret)
+ goto out_lock;
+
+ ret = register_ftrace_function(&stub_ops);
+ if (ret) {
+ ftrace_set_filter_ip(&stub_ops, ip, 1, 0);
+ goto out_lock;
+ }
+
+ entry->direct = new_addr;
+
+ /*
+ * By removing the stub, we put back the direct call, calling
+ * the @new_addr.
+ */
+ unregister_ftrace_function(&stub_ops);
+ ftrace_set_filter_ip(&stub_ops, ip, 1, 0);
+
+ out_lock:
+ mutex_lock(&ftrace_lock);
+
+ return ret;
+}
+
+/**
+ * modify_ftrace_direct - Modify an existing direct call to call something else
+ * @ip: The instruction pointer to modify
+ * @old_addr: The address that the current @ip calls directly
+ * @new_addr: The address that the @ip should call
+ *
+ * This modifies a ftrace direct caller at an instruction pointer without
+ * having to disable it first. The direct call will switch over to the
+ * @new_addr without missing anything.
+ *
+ * Returns: zero on success. Non zero on error, which includes:
+ * -ENODEV : the @ip given has no direct caller attached
+ * -EINVAL : the @old_addr does not match the current direct caller
+ */
+int modify_ftrace_direct(unsigned long ip,
+ unsigned long old_addr, unsigned long new_addr)
+{
+ struct ftrace_func_entry *entry;
+ struct dyn_ftrace *rec;
+ int ret = -ENODEV;
+
+ mutex_lock(&direct_mutex);
+
+ mutex_lock(&ftrace_lock);
+ entry = find_direct_entry(&ip, &rec);
+ if (!entry)
+ goto out_unlock;
+
+ ret = -EINVAL;
+ if (entry->direct != old_addr)
+ goto out_unlock;
+
+ /*
+ * If there's no other ftrace callback on the rec->ip location,
+ * then it can be changed directly by the architecture.
+ * If there is another caller, then we just need to change the
+ * direct caller helper to point to @new_addr.
+ */
+ if (ftrace_rec_count(rec) == 1) {
+ ret = ftrace_modify_direct_caller(entry, rec, old_addr, new_addr);
+ } else {
+ entry->direct = new_addr;
+ ret = 0;
+ }
+
+ out_unlock:
+ mutex_unlock(&ftrace_lock);
+ mutex_unlock(&direct_mutex);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(modify_ftrace_direct);
+#endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
+
/**
* ftrace_set_filter_ip - set a function to filter on in ftrace by address
* @ops - the ops to set the filter with
@@ -5818,6 +6317,8 @@ void ftrace_release_mod(struct module *mod)
free_pages((unsigned long)pg->records, order);
tmp_page = pg->next;
kfree(pg);
+ ftrace_number_of_pages -= 1 << order;
+ ftrace_number_of_groups--;
}
}
@@ -6159,6 +6660,8 @@ void ftrace_free_mem(struct module *mod, void *start_ptr, void *end_ptr)
*last_pg = pg->next;
order = get_count_order(pg->size / ENTRIES_PER_PAGE);
free_pages((unsigned long)pg->records, order);
+ ftrace_number_of_pages -= 1 << order;
+ ftrace_number_of_groups--;
kfree(pg);
pg = container_of(last_pg, struct ftrace_page, next);
if (!(*last_pg))
@@ -6214,6 +6717,9 @@ void __init ftrace_init(void)
__start_mcount_loc,
__stop_mcount_loc);
+ pr_info("ftrace: allocated %ld pages with %ld groups\n",
+ ftrace_number_of_pages, ftrace_number_of_groups);
+
set_ftrace_early_filters();
return;
@@ -6754,6 +7260,18 @@ int unregister_ftrace_function(struct ftrace_ops *ops)
}
EXPORT_SYMBOL_GPL(unregister_ftrace_function);
+static bool is_permanent_ops_registered(void)
+{
+ struct ftrace_ops *op;
+
+ do_for_each_ftrace_op(op, ftrace_ops_list) {
+ if (op->flags & FTRACE_OPS_FL_PERMANENT)
+ return true;
+ } while_for_each_ftrace_op(op);
+
+ return false;
+}
+
int
ftrace_enable_sysctl(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
@@ -6771,8 +7289,6 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled))
goto out;
- last_ftrace_enabled = !!ftrace_enabled;
-
if (ftrace_enabled) {
/* we are starting ftrace again */
@@ -6783,12 +7299,19 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
ftrace_startup_sysctl();
} else {
+ if (is_permanent_ops_registered()) {
+ ftrace_enabled = true;
+ ret = -EBUSY;
+ goto out;
+ }
+
/* stopping ftrace calls (just send to ftrace_stub) */
ftrace_trace_function = ftrace_stub;
ftrace_shutdown_sysctl();
}
+ last_ftrace_enabled = !!ftrace_enabled;
out:
mutex_unlock(&ftrace_lock);
return ret;
diff --git a/kernel/trace/preemptirq_delay_test.c b/kernel/trace/preemptirq_delay_test.c
index d8765c952fab..31c0fad4cb9e 100644
--- a/kernel/trace/preemptirq_delay_test.c
+++ b/kernel/trace/preemptirq_delay_test.c
@@ -10,18 +10,25 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/kernel.h>
+#include <linux/kobject.h>
#include <linux/kthread.h>
#include <linux/module.h>
#include <linux/printk.h>
#include <linux/string.h>
+#include <linux/sysfs.h>
static ulong delay = 100;
-static char test_mode[10] = "irq";
+static char test_mode[12] = "irq";
+static uint burst_size = 1;
-module_param_named(delay, delay, ulong, S_IRUGO);
-module_param_string(test_mode, test_mode, 10, S_IRUGO);
-MODULE_PARM_DESC(delay, "Period in microseconds (100 uS default)");
-MODULE_PARM_DESC(test_mode, "Mode of the test such as preempt or irq (default irq)");
+module_param_named(delay, delay, ulong, 0444);
+module_param_string(test_mode, test_mode, 12, 0444);
+module_param_named(burst_size, burst_size, uint, 0444);
+MODULE_PARM_DESC(delay, "Period in microseconds (100 us default)");
+MODULE_PARM_DESC(test_mode, "Mode of the test such as preempt, irq, or alternate (default irq)");
+MODULE_PARM_DESC(burst_size, "The size of a burst (default 1)");
+
+#define MIN(x, y) ((x) < (y) ? (x) : (y))
static void busy_wait(ulong time)
{
@@ -34,37 +41,136 @@ static void busy_wait(ulong time)
} while ((end - start) < (time * 1000));
}
-static int preemptirq_delay_run(void *data)
+static __always_inline void irqoff_test(void)
{
unsigned long flags;
+ local_irq_save(flags);
+ busy_wait(delay);
+ local_irq_restore(flags);
+}
- if (!strcmp(test_mode, "irq")) {
- local_irq_save(flags);
- busy_wait(delay);
- local_irq_restore(flags);
- } else if (!strcmp(test_mode, "preempt")) {
- preempt_disable();
- busy_wait(delay);
- preempt_enable();
+static __always_inline void preemptoff_test(void)
+{
+ preempt_disable();
+ busy_wait(delay);
+ preempt_enable();
+}
+
+static void execute_preemptirqtest(int idx)
+{
+ if (!strcmp(test_mode, "irq"))
+ irqoff_test();
+ else if (!strcmp(test_mode, "preempt"))
+ preemptoff_test();
+ else if (!strcmp(test_mode, "alternate")) {
+ if (idx % 2 == 0)
+ irqoff_test();
+ else
+ preemptoff_test();
}
+}
+
+#define DECLARE_TESTFN(POSTFIX) \
+ static void preemptirqtest_##POSTFIX(int idx) \
+ { \
+ execute_preemptirqtest(idx); \
+ } \
+/*
+ * We create 10 different functions, so that we can get 10 different
+ * backtraces.
+ */
+DECLARE_TESTFN(0)
+DECLARE_TESTFN(1)
+DECLARE_TESTFN(2)
+DECLARE_TESTFN(3)
+DECLARE_TESTFN(4)
+DECLARE_TESTFN(5)
+DECLARE_TESTFN(6)
+DECLARE_TESTFN(7)
+DECLARE_TESTFN(8)
+DECLARE_TESTFN(9)
+
+static void (*testfuncs[])(int) = {
+ preemptirqtest_0,
+ preemptirqtest_1,
+ preemptirqtest_2,
+ preemptirqtest_3,
+ preemptirqtest_4,
+ preemptirqtest_5,
+ preemptirqtest_6,
+ preemptirqtest_7,
+ preemptirqtest_8,
+ preemptirqtest_9,
+};
+
+#define NR_TEST_FUNCS ARRAY_SIZE(testfuncs)
+
+static int preemptirq_delay_run(void *data)
+{
+ int i;
+ int s = MIN(burst_size, NR_TEST_FUNCS);
+
+ for (i = 0; i < s; i++)
+ (testfuncs[i])(i);
return 0;
}
-static int __init preemptirq_delay_init(void)
+static struct task_struct *preemptirq_start_test(void)
{
char task_name[50];
- struct task_struct *test_task;
snprintf(task_name, sizeof(task_name), "%s_test", test_mode);
+ return kthread_run(preemptirq_delay_run, NULL, task_name);
+}
+
+
+static ssize_t trigger_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ preemptirq_start_test();
+ return count;
+}
+
+static struct kobj_attribute trigger_attribute =
+ __ATTR(trigger, 0200, NULL, trigger_store);
+
+static struct attribute *attrs[] = {
+ &trigger_attribute.attr,
+ NULL,
+};
+
+static struct attribute_group attr_group = {
+ .attrs = attrs,
+};
+
+static struct kobject *preemptirq_delay_kobj;
+
+static int __init preemptirq_delay_init(void)
+{
+ struct task_struct *test_task;
+ int retval;
+
+ test_task = preemptirq_start_test();
+ retval = PTR_ERR_OR_ZERO(test_task);
+ if (retval != 0)
+ return retval;
+
+ preemptirq_delay_kobj = kobject_create_and_add("preemptirq_delay_test",
+ kernel_kobj);
+ if (!preemptirq_delay_kobj)
+ return -ENOMEM;
+
+ retval = sysfs_create_group(preemptirq_delay_kobj, &attr_group);
+ if (retval)
+ kobject_put(preemptirq_delay_kobj);
- test_task = kthread_run(preemptirq_delay_run, NULL, task_name);
- return PTR_ERR_OR_ZERO(test_task);
+ return retval;
}
static void __exit preemptirq_delay_exit(void)
{
- return;
+ kobject_put(preemptirq_delay_kobj);
}
module_init(preemptirq_delay_init)
diff --git a/kernel/trace/ring_buffer_benchmark.c b/kernel/trace/ring_buffer_benchmark.c
index 09b0b49f346e..32149e46551c 100644
--- a/kernel/trace/ring_buffer_benchmark.c
+++ b/kernel/trace/ring_buffer_benchmark.c
@@ -269,10 +269,10 @@ static void ring_buffer_producer(void)
#ifndef CONFIG_PREEMPTION
/*
- * If we are a non preempt kernel, the 10 second run will
+ * If we are a non preempt kernel, the 10 seconds run will
* stop everything while it runs. Instead, we will call
* cond_resched and also add any time that was lost by a
- * rescedule.
+ * reschedule.
*
* Do a cond resched at the same frequency we would wake up
* the reader.
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 6a0ee9178365..02a23a6e5e00 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -45,6 +45,9 @@
#include <linux/trace.h>
#include <linux/sched/clock.h>
#include <linux/sched/rt.h>
+#include <linux/fsnotify.h>
+#include <linux/irq_work.h>
+#include <linux/workqueue.h>
#include "trace.h"
#include "trace_output.h"
@@ -298,12 +301,24 @@ static void __trace_array_put(struct trace_array *this_tr)
this_tr->ref--;
}
+/**
+ * trace_array_put - Decrement the reference counter for this trace array.
+ *
+ * NOTE: Use this when we no longer need the trace array returned by
+ * trace_array_get_by_name(). This ensures the trace array can be later
+ * destroyed.
+ *
+ */
void trace_array_put(struct trace_array *this_tr)
{
+ if (!this_tr)
+ return;
+
mutex_lock(&trace_types_lock);
__trace_array_put(this_tr);
mutex_unlock(&trace_types_lock);
}
+EXPORT_SYMBOL_GPL(trace_array_put);
int tracing_check_open_get_tr(struct trace_array *tr)
{
@@ -1497,6 +1512,74 @@ static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
}
unsigned long __read_mostly tracing_thresh;
+static const struct file_operations tracing_max_lat_fops;
+
+#if (defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)) && \
+ defined(CONFIG_FSNOTIFY)
+
+static struct workqueue_struct *fsnotify_wq;
+
+static void latency_fsnotify_workfn(struct work_struct *work)
+{
+ struct trace_array *tr = container_of(work, struct trace_array,
+ fsnotify_work);
+ fsnotify(tr->d_max_latency->d_inode, FS_MODIFY,
+ tr->d_max_latency->d_inode, FSNOTIFY_EVENT_INODE, NULL, 0);
+}
+
+static void latency_fsnotify_workfn_irq(struct irq_work *iwork)
+{
+ struct trace_array *tr = container_of(iwork, struct trace_array,
+ fsnotify_irqwork);
+ queue_work(fsnotify_wq, &tr->fsnotify_work);
+}
+
+static void trace_create_maxlat_file(struct trace_array *tr,
+ struct dentry *d_tracer)
+{
+ INIT_WORK(&tr->fsnotify_work, latency_fsnotify_workfn);
+ init_irq_work(&tr->fsnotify_irqwork, latency_fsnotify_workfn_irq);
+ tr->d_max_latency = trace_create_file("tracing_max_latency", 0644,
+ d_tracer, &tr->max_latency,
+ &tracing_max_lat_fops);
+}
+
+__init static int latency_fsnotify_init(void)
+{
+ fsnotify_wq = alloc_workqueue("tr_max_lat_wq",
+ WQ_UNBOUND | WQ_HIGHPRI, 0);
+ if (!fsnotify_wq) {
+ pr_err("Unable to allocate tr_max_lat_wq\n");
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+late_initcall_sync(latency_fsnotify_init);
+
+void latency_fsnotify(struct trace_array *tr)
+{
+ if (!fsnotify_wq)
+ return;
+ /*
+ * We cannot call queue_work(&tr->fsnotify_work) from here because it's
+ * possible that we are called from __schedule() or do_idle(), which
+ * could cause a deadlock.
+ */
+ irq_work_queue(&tr->fsnotify_irqwork);
+}
+
+/*
+ * (defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)) && \
+ * defined(CONFIG_FSNOTIFY)
+ */
+#else
+
+#define trace_create_maxlat_file(tr, d_tracer) \
+ trace_create_file("tracing_max_latency", 0644, d_tracer, \
+ &tr->max_latency, &tracing_max_lat_fops)
+
+#endif
#ifdef CONFIG_TRACER_MAX_TRACE
/*
@@ -1536,6 +1619,7 @@ __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
/* record this tasks comm */
tracing_record_cmdline(tsk);
+ latency_fsnotify(tr);
}
/**
@@ -3225,6 +3309,9 @@ int trace_array_printk(struct trace_array *tr,
if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
return 0;
+ if (!tr)
+ return -ENOENT;
+
va_start(ap, fmt);
ret = trace_array_vprintk(tr, ip, fmt, ap);
va_end(ap);
@@ -3654,6 +3741,8 @@ print_trace_header(struct seq_file *m, struct trace_iterator *iter)
"desktop",
#elif defined(CONFIG_PREEMPT)
"preempt",
+#elif defined(CONFIG_PREEMPT_RT)
+ "preempt_rt",
#else
"unknown",
#endif
@@ -4609,7 +4698,7 @@ int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
if (mask == TRACE_ITER_RECORD_TGID) {
if (!tgid_map)
- tgid_map = kcalloc(PID_MAX_DEFAULT + 1,
+ tgid_map = kvcalloc(PID_MAX_DEFAULT + 1,
sizeof(*tgid_map),
GFP_KERNEL);
if (!tgid_map) {
@@ -7583,14 +7672,23 @@ static ssize_t
tracing_read_dyn_info(struct file *filp, char __user *ubuf,
size_t cnt, loff_t *ppos)
{
- unsigned long *p = filp->private_data;
- char buf[64]; /* Not too big for a shallow stack */
+ ssize_t ret;
+ char *buf;
int r;
- r = scnprintf(buf, 63, "%ld", *p);
- buf[r++] = '\n';
+ /* 256 should be plenty to hold the amount needed */
+ buf = kmalloc(256, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
- return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+ r = scnprintf(buf, 256, "%ld pages:%ld groups: %ld\n",
+ ftrace_update_tot_cnt,
+ ftrace_number_of_pages,
+ ftrace_number_of_groups);
+
+ ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+ kfree(buf);
+ return ret;
}
static const struct file_operations tracing_dyn_info_fops = {
@@ -8351,24 +8449,15 @@ static void update_tracer_options(struct trace_array *tr)
mutex_unlock(&trace_types_lock);
}
-struct trace_array *trace_array_create(const char *name)
+static struct trace_array *trace_array_create(const char *name)
{
struct trace_array *tr;
int ret;
- mutex_lock(&event_mutex);
- mutex_lock(&trace_types_lock);
-
- ret = -EEXIST;
- list_for_each_entry(tr, &ftrace_trace_arrays, list) {
- if (tr->name && strcmp(tr->name, name) == 0)
- goto out_unlock;
- }
-
ret = -ENOMEM;
tr = kzalloc(sizeof(*tr), GFP_KERNEL);
if (!tr)
- goto out_unlock;
+ return ERR_PTR(ret);
tr->name = kstrdup(name, GFP_KERNEL);
if (!tr->name)
@@ -8413,8 +8502,8 @@ struct trace_array *trace_array_create(const char *name)
list_add(&tr->list, &ftrace_trace_arrays);
- mutex_unlock(&trace_types_lock);
- mutex_unlock(&event_mutex);
+ tr->ref++;
+
return tr;
@@ -8424,24 +8513,77 @@ struct trace_array *trace_array_create(const char *name)
kfree(tr->name);
kfree(tr);
- out_unlock:
- mutex_unlock(&trace_types_lock);
- mutex_unlock(&event_mutex);
-
return ERR_PTR(ret);
}
-EXPORT_SYMBOL_GPL(trace_array_create);
static int instance_mkdir(const char *name)
{
- return PTR_ERR_OR_ZERO(trace_array_create(name));
+ struct trace_array *tr;
+ int ret;
+
+ mutex_lock(&event_mutex);
+ mutex_lock(&trace_types_lock);
+
+ ret = -EEXIST;
+ list_for_each_entry(tr, &ftrace_trace_arrays, list) {
+ if (tr->name && strcmp(tr->name, name) == 0)
+ goto out_unlock;
+ }
+
+ tr = trace_array_create(name);
+
+ ret = PTR_ERR_OR_ZERO(tr);
+
+out_unlock:
+ mutex_unlock(&trace_types_lock);
+ mutex_unlock(&event_mutex);
+ return ret;
+}
+
+/**
+ * trace_array_get_by_name - Create/Lookup a trace array, given its name.
+ * @name: The name of the trace array to be looked up/created.
+ *
+ * Returns pointer to trace array with given name.
+ * NULL, if it cannot be created.
+ *
+ * NOTE: This function increments the reference counter associated with the
+ * trace array returned. This makes sure it cannot be freed while in use.
+ * Use trace_array_put() once the trace array is no longer needed.
+ *
+ */
+struct trace_array *trace_array_get_by_name(const char *name)
+{
+ struct trace_array *tr;
+
+ mutex_lock(&event_mutex);
+ mutex_lock(&trace_types_lock);
+
+ list_for_each_entry(tr, &ftrace_trace_arrays, list) {
+ if (tr->name && strcmp(tr->name, name) == 0)
+ goto out_unlock;
+ }
+
+ tr = trace_array_create(name);
+
+ if (IS_ERR(tr))
+ tr = NULL;
+out_unlock:
+ if (tr)
+ tr->ref++;
+
+ mutex_unlock(&trace_types_lock);
+ mutex_unlock(&event_mutex);
+ return tr;
}
+EXPORT_SYMBOL_GPL(trace_array_get_by_name);
static int __remove_instance(struct trace_array *tr)
{
int i;
- if (tr->ref || (tr->current_trace && tr->current_trace->ref))
+ /* Reference counter for a newly created trace array = 1. */
+ if (tr->ref > 1 || (tr->current_trace && tr->current_trace->ref))
return -EBUSY;
list_del(&tr->list);
@@ -8473,17 +8615,26 @@ static int __remove_instance(struct trace_array *tr)
return 0;
}
-int trace_array_destroy(struct trace_array *tr)
+int trace_array_destroy(struct trace_array *this_tr)
{
+ struct trace_array *tr;
int ret;
- if (!tr)
+ if (!this_tr)
return -EINVAL;
mutex_lock(&event_mutex);
mutex_lock(&trace_types_lock);
- ret = __remove_instance(tr);
+ ret = -ENODEV;
+
+ /* Making sure trace array exists before destroying it. */
+ list_for_each_entry(tr, &ftrace_trace_arrays, list) {
+ if (tr == this_tr) {
+ ret = __remove_instance(tr);
+ break;
+ }
+ }
mutex_unlock(&trace_types_lock);
mutex_unlock(&event_mutex);
@@ -8585,8 +8736,7 @@ init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
create_trace_options_dir(tr);
#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
- trace_create_file("tracing_max_latency", 0644, d_tracer,
- &tr->max_latency, &tracing_max_lat_fops);
+ trace_create_maxlat_file(tr, d_tracer);
#endif
if (ftrace_create_function_files(tr, d_tracer))
@@ -8782,7 +8932,7 @@ static __init int tracer_init_tracefs(void)
#ifdef CONFIG_DYNAMIC_FTRACE
trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
- &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
+ NULL, &tracing_dyn_info_fops);
#endif
create_trace_instances(d_tracer);
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index d685c61085c0..ca7fccafbcbb 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -11,11 +11,14 @@
#include <linux/mmiotrace.h>
#include <linux/tracepoint.h>
#include <linux/ftrace.h>
+#include <linux/trace.h>
#include <linux/hw_breakpoint.h>
#include <linux/trace_seq.h>
#include <linux/trace_events.h>
#include <linux/compiler.h>
#include <linux/glob.h>
+#include <linux/irq_work.h>
+#include <linux/workqueue.h>
#ifdef CONFIG_FTRACE_SYSCALLS
#include <asm/unistd.h> /* For NR_SYSCALLS */
@@ -264,6 +267,11 @@ struct trace_array {
#endif
#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
unsigned long max_latency;
+#ifdef CONFIG_FSNOTIFY
+ struct dentry *d_max_latency;
+ struct work_struct fsnotify_work;
+ struct irq_work fsnotify_irqwork;
+#endif
#endif
struct trace_pid_list __rcu *filtered_pids;
/*
@@ -337,7 +345,6 @@ extern struct list_head ftrace_trace_arrays;
extern struct mutex trace_types_lock;
extern int trace_array_get(struct trace_array *tr);
-extern void trace_array_put(struct trace_array *tr);
extern int tracing_check_open_get_tr(struct trace_array *tr);
extern int tracing_set_time_stamp_abs(struct trace_array *tr, bool abs);
@@ -786,6 +793,17 @@ void update_max_tr_single(struct trace_array *tr,
struct task_struct *tsk, int cpu);
#endif /* CONFIG_TRACER_MAX_TRACE */
+#if (defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)) && \
+ defined(CONFIG_FSNOTIFY)
+
+void latency_fsnotify(struct trace_array *tr);
+
+#else
+
+static inline void latency_fsnotify(struct trace_array *tr) { }
+
+#endif
+
#ifdef CONFIG_STACKTRACE
void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
int pc);
@@ -804,6 +822,8 @@ extern void trace_event_follow_fork(struct trace_array *tr, bool enable);
#ifdef CONFIG_DYNAMIC_FTRACE
extern unsigned long ftrace_update_tot_cnt;
+extern unsigned long ftrace_number_of_pages;
+extern unsigned long ftrace_number_of_groups;
void ftrace_init_trace_array(struct trace_array *tr);
#else
static inline void ftrace_init_trace_array(struct trace_array *tr) { }
@@ -853,8 +873,6 @@ trace_vprintk(unsigned long ip, const char *fmt, va_list args);
extern int
trace_array_vprintk(struct trace_array *tr,
unsigned long ip, const char *fmt, va_list args);
-int trace_array_printk(struct trace_array *tr,
- unsigned long ip, const char *fmt, ...);
int trace_array_printk_buf(struct ring_buffer *buffer,
unsigned long ip, const char *fmt, ...);
void trace_printk_seq(struct trace_seq *s);
@@ -1870,7 +1888,6 @@ extern const char *__start___tracepoint_str[];
extern const char *__stop___tracepoint_str[];
void trace_printk_control(bool enabled);
-void trace_printk_init_buffers(void);
void trace_printk_start_comm(void);
int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled);
diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c
index 3ea65cdff30d..88e158d27965 100644
--- a/kernel/trace/trace_branch.c
+++ b/kernel/trace/trace_branch.c
@@ -244,7 +244,7 @@ static int annotated_branch_stat_headers(struct seq_file *m)
return 0;
}
-static inline long get_incorrect_percent(struct ftrace_branch_data *p)
+static inline long get_incorrect_percent(const struct ftrace_branch_data *p)
{
long percent;
@@ -332,10 +332,10 @@ annotated_branch_stat_next(void *v, int idx)
return p;
}
-static int annotated_branch_stat_cmp(void *p1, void *p2)
+static int annotated_branch_stat_cmp(const void *p1, const void *p2)
{
- struct ftrace_branch_data *a = p1;
- struct ftrace_branch_data *b = p2;
+ const struct ftrace_branch_data *a = p1;
+ const struct ftrace_branch_data *b = p2;
long percent_a, percent_b;
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index fba87d10f0c1..6b3a69e9aa6a 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -793,6 +793,8 @@ int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set)
char *event = NULL, *sub = NULL, *match;
int ret;
+ if (!tr)
+ return -ENOENT;
/*
* The buf format can be <subsystem>:<event-name>
* *:<event-name> means any event by that name.
@@ -825,7 +827,6 @@ int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set)
return ret;
}
-EXPORT_SYMBOL_GPL(ftrace_set_clr_event);
/**
* trace_set_clr_event - enable or disable an event
@@ -850,6 +851,32 @@ int trace_set_clr_event(const char *system, const char *event, int set)
}
EXPORT_SYMBOL_GPL(trace_set_clr_event);
+/**
+ * trace_array_set_clr_event - enable or disable an event for a trace array.
+ * @tr: concerned trace array.
+ * @system: system name to match (NULL for any system)
+ * @event: event name to match (NULL for all events, within system)
+ * @enable: true to enable, false to disable
+ *
+ * This is a way for other parts of the kernel to enable or disable
+ * event recording.
+ *
+ * Returns 0 on success, -EINVAL if the parameters do not match any
+ * registered events.
+ */
+int trace_array_set_clr_event(struct trace_array *tr, const char *system,
+ const char *event, bool enable)
+{
+ int set;
+
+ if (!tr)
+ return -ENOENT;
+
+ set = (enable == true) ? 1 : 0;
+ return __ftrace_set_clr_event(tr, NULL, system, event, set);
+}
+EXPORT_SYMBOL_GPL(trace_array_set_clr_event);
+
/* 128 should be much more than enough */
#define EVENT_BUF_SIZE 127
diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
index 7482a1466ebf..f49d1a36d3ae 100644
--- a/kernel/trace/trace_events_hist.c
+++ b/kernel/trace/trace_events_hist.c
@@ -23,7 +23,7 @@
#include "trace_dynevent.h"
#define SYNTH_SYSTEM "synthetic"
-#define SYNTH_FIELDS_MAX 16
+#define SYNTH_FIELDS_MAX 32
#define STR_VAR_LEN_MAX 32 /* must be multiple of sizeof(u64) */
diff --git a/kernel/trace/trace_export.c b/kernel/trace/trace_export.c
index 45630a76ed3a..2e6d2e9741cc 100644
--- a/kernel/trace/trace_export.c
+++ b/kernel/trace/trace_export.c
@@ -171,7 +171,7 @@ ftrace_define_fields_##name(struct trace_event_call *event_call) \
#define FTRACE_ENTRY_REG(call, struct_name, etype, tstruct, print, filter,\
regfn) \
\
-struct trace_event_class __refdata event_class_ftrace_##call = { \
+static struct trace_event_class __refdata event_class_ftrace_##call = { \
.system = __stringify(TRACE_SYSTEM), \
.define_fields = ftrace_define_fields_##call, \
.fields = LIST_HEAD_INIT(event_class_ftrace_##call.fields),\
@@ -187,7 +187,7 @@ struct trace_event_call __used event_##call = { \
.print_fmt = print, \
.flags = TRACE_EVENT_FL_IGNORE_ENABLE, \
}; \
-struct trace_event_call __used \
+static struct trace_event_call __used \
__attribute__((section("_ftrace_events"))) *__event_##call = &event_##call;
#undef FTRACE_ENTRY
diff --git a/kernel/trace/trace_hwlat.c b/kernel/trace/trace_hwlat.c
index 862f4b0139fc..6638d63f0921 100644
--- a/kernel/trace/trace_hwlat.c
+++ b/kernel/trace/trace_hwlat.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * trace_hwlatdetect.c - A simple Hardware Latency detector.
+ * trace_hwlat.c - A simple Hardware Latency detector.
*
* Use this tracer to detect large system latencies induced by the behavior of
* certain underlying system hardware or firmware, independent of Linux itself.
@@ -237,6 +237,7 @@ static int get_sample(void)
/* If we exceed the threshold value, we have found a hardware latency */
if (sample > thresh || outer_sample > thresh) {
struct hwlat_sample s;
+ u64 latency;
ret = 1;
@@ -253,11 +254,13 @@ static int get_sample(void)
s.nmi_count = nmi_count;
trace_hwlat_sample(&s);
+ latency = max(sample, outer_sample);
+
/* Keep a running maximum ever recorded hardware latency */
- if (sample > tr->max_latency)
- tr->max_latency = sample;
- if (outer_sample > tr->max_latency)
- tr->max_latency = outer_sample;
+ if (latency > tr->max_latency) {
+ tr->max_latency = latency;
+ latency_fsnotify(tr);
+ }
}
out:
@@ -276,7 +279,7 @@ static void move_to_next_cpu(void)
return;
/*
* If for some reason the user modifies the CPU affinity
- * of this thread, than stop migrating for the duration
+ * of this thread, then stop migrating for the duration
* of the current test.
*/
if (!cpumask_equal(current_mask, current->cpus_ptr))
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index 1552a95c743b..7f890262c8a3 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -435,11 +435,10 @@ static int disable_trace_kprobe(struct trace_event_call *call,
#if defined(CONFIG_KPROBES_ON_FTRACE) && \
!defined(CONFIG_KPROBE_EVENTS_ON_NOTRACE)
-static bool within_notrace_func(struct trace_kprobe *tk)
+static bool __within_notrace_func(unsigned long addr)
{
- unsigned long offset, size, addr;
+ unsigned long offset, size;
- addr = trace_kprobe_address(tk);
if (!addr || !kallsyms_lookup_size_offset(addr, &size, &offset))
return false;
@@ -452,6 +451,28 @@ static bool within_notrace_func(struct trace_kprobe *tk)
*/
return !ftrace_location_range(addr, addr + size - 1);
}
+
+static bool within_notrace_func(struct trace_kprobe *tk)
+{
+ unsigned long addr = addr = trace_kprobe_address(tk);
+ char symname[KSYM_NAME_LEN], *p;
+
+ if (!__within_notrace_func(addr))
+ return false;
+
+ /* Check if the address is on a suffixed-symbol */
+ if (!lookup_symbol_name(addr, symname)) {
+ p = strchr(symname, '.');
+ if (!p)
+ return true;
+ *p = '\0';
+ addr = (unsigned long)kprobe_lookup_name(symname, 0);
+ if (addr)
+ return __within_notrace_func(addr);
+ }
+
+ return true;
+}
#else
#define within_notrace_func(tk) (false)
#endif
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index d54ce252b05a..d9b4b7c22db4 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -274,6 +274,21 @@ trace_print_array_seq(struct trace_seq *p, const void *buf, int count,
}
EXPORT_SYMBOL(trace_print_array_seq);
+const char *
+trace_print_hex_dump_seq(struct trace_seq *p, const char *prefix_str,
+ int prefix_type, int rowsize, int groupsize,
+ const void *buf, size_t len, bool ascii)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+
+ trace_seq_putc(p, '\n');
+ trace_seq_hex_dump(p, prefix_str, prefix_type,
+ rowsize, groupsize, buf, len, ascii);
+ trace_seq_putc(p, 0);
+ return ret;
+}
+EXPORT_SYMBOL(trace_print_hex_dump_seq);
+
int trace_raw_output_prep(struct trace_iterator *iter,
struct trace_event *trace_event)
{
diff --git a/kernel/trace/trace_seq.c b/kernel/trace/trace_seq.c
index 6b1c562ffdaf..344e4c1aa09c 100644
--- a/kernel/trace/trace_seq.c
+++ b/kernel/trace/trace_seq.c
@@ -376,3 +376,33 @@ int trace_seq_to_user(struct trace_seq *s, char __user *ubuf, int cnt)
return seq_buf_to_user(&s->seq, ubuf, cnt);
}
EXPORT_SYMBOL_GPL(trace_seq_to_user);
+
+int trace_seq_hex_dump(struct trace_seq *s, const char *prefix_str,
+ int prefix_type, int rowsize, int groupsize,
+ const void *buf, size_t len, bool ascii)
+{
+ unsigned int save_len = s->seq.len;
+
+ if (s->full)
+ return 0;
+
+ __trace_seq_init(s);
+
+ if (TRACE_SEQ_BUF_LEFT(s) < 1) {
+ s->full = 1;
+ return 0;
+ }
+
+ seq_buf_hex_dump(&(s->seq), prefix_str,
+ prefix_type, rowsize, groupsize,
+ buf, len, ascii);
+
+ if (unlikely(seq_buf_has_overflowed(&s->seq))) {
+ s->seq.len = save_len;
+ s->full = 1;
+ return 0;
+ }
+
+ return 1;
+}
+EXPORT_SYMBOL(trace_seq_hex_dump);
diff --git a/kernel/trace/trace_stat.c b/kernel/trace/trace_stat.c
index 9ab0a1a7ad5e..874f1274cf99 100644
--- a/kernel/trace/trace_stat.c
+++ b/kernel/trace/trace_stat.c
@@ -72,9 +72,7 @@ static void destroy_session(struct stat_session *session)
kfree(session);
}
-typedef int (*cmp_stat_t)(void *, void *);
-
-static int insert_stat(struct rb_root *root, void *stat, cmp_stat_t cmp)
+static int insert_stat(struct rb_root *root, void *stat, cmp_func_t cmp)
{
struct rb_node **new = &(root->rb_node), *parent = NULL;
struct stat_node *data;
@@ -112,7 +110,7 @@ static int insert_stat(struct rb_root *root, void *stat, cmp_stat_t cmp)
* This one will force an insertion as right-most node
* in the rbtree.
*/
-static int dummy_cmp(void *p1, void *p2)
+static int dummy_cmp(const void *p1, const void *p2)
{
return -1;
}
diff --git a/kernel/trace/trace_stat.h b/kernel/trace/trace_stat.h
index 8786d17caf49..31d7dc5bf1db 100644
--- a/kernel/trace/trace_stat.h
+++ b/kernel/trace/trace_stat.h
@@ -16,7 +16,7 @@ struct tracer_stat {
void *(*stat_start)(struct tracer_stat *trace);
void *(*stat_next)(void *prev, int idx);
/* Compare two entries for stats sorting */
- int (*stat_cmp)(void *p1, void *p2);
+ cmp_func_t stat_cmp;
/* Print a stat entry */
int (*stat_show)(struct seq_file *s, void *p);
/* Release an entry */
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
index fa8fbff736d6..16fa218556fa 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -7,6 +7,7 @@
#include <linux/module.h> /* for MODULE_NAME_LEN via KSYM_SYMBOL_LEN */
#include <linux/ftrace.h>
#include <linux/perf_event.h>
+#include <linux/xarray.h>
#include <asm/syscall.h>
#include "trace_output.h"
@@ -30,6 +31,7 @@ syscall_get_enter_fields(struct trace_event_call *call)
extern struct syscall_metadata *__start_syscalls_metadata[];
extern struct syscall_metadata *__stop_syscalls_metadata[];
+static DEFINE_XARRAY(syscalls_metadata_sparse);
static struct syscall_metadata **syscalls_metadata;
#ifndef ARCH_HAS_SYSCALL_MATCH_SYM_NAME
@@ -101,6 +103,9 @@ find_syscall_meta(unsigned long syscall)
static struct syscall_metadata *syscall_nr_to_meta(int nr)
{
+ if (IS_ENABLED(CONFIG_HAVE_SPARSE_SYSCALL_NR))
+ return xa_load(&syscalls_metadata_sparse, (unsigned long)nr);
+
if (!syscalls_metadata || nr >= NR_syscalls || nr < 0)
return NULL;
@@ -536,12 +541,16 @@ void __init init_ftrace_syscalls(void)
struct syscall_metadata *meta;
unsigned long addr;
int i;
-
- syscalls_metadata = kcalloc(NR_syscalls, sizeof(*syscalls_metadata),
- GFP_KERNEL);
- if (!syscalls_metadata) {
- WARN_ON(1);
- return;
+ void *ret;
+
+ if (!IS_ENABLED(CONFIG_HAVE_SPARSE_SYSCALL_NR)) {
+ syscalls_metadata = kcalloc(NR_syscalls,
+ sizeof(*syscalls_metadata),
+ GFP_KERNEL);
+ if (!syscalls_metadata) {
+ WARN_ON(1);
+ return;
+ }
}
for (i = 0; i < NR_syscalls; i++) {
@@ -551,7 +560,16 @@ void __init init_ftrace_syscalls(void)
continue;
meta->syscall_nr = i;
- syscalls_metadata[i] = meta;
+
+ if (!IS_ENABLED(CONFIG_HAVE_SPARSE_SYSCALL_NR)) {
+ syscalls_metadata[i] = meta;
+ } else {
+ ret = xa_store(&syscalls_metadata_sparse, i, meta,
+ GFP_KERNEL);
+ WARN(xa_is_err(ret),
+ "Syscall memory allocation failed\n");
+ }
+
}
}
diff --git a/lib/Kconfig b/lib/Kconfig
index 3321d04dfa5a..6d7c5877c9f1 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -605,6 +605,9 @@ config ARCH_NO_SG_CHAIN
config ARCH_HAS_PMEM_API
bool
+config MEMREGION
+ bool
+
# use memcpy to implement user copies for nommu architectures
config UACCESS_MEMCPY
bool
@@ -637,6 +640,9 @@ config STRING_SELFTEST
endmenu
+config GENERIC_IOREMAP
+ bool
+
config GENERIC_LIB_ASHLDI3
bool
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index ecde997db751..2f6fb96405af 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -308,17 +308,6 @@ config HEADERS_INSTALL
user-space program samples. It is also needed by some features such
as uapi header sanity checks.
-config HEADERS_CHECK
- bool "Run sanity checks on uapi headers when building 'all'"
- depends on HEADERS_INSTALL
- help
- This option will run basic sanity checks on uapi headers when
- building the 'all' target, for example, ensure that they do not
- attempt to include files which were not exported, etc.
-
- If you're making modifications to header files which are
- relevant for userspace, say 'Y'.
-
config OPTIMIZE_INLINING
def_bool y
help
@@ -2167,4 +2156,11 @@ config IO_STRICT_DEVMEM
source "arch/$(SRCARCH)/Kconfig.debug"
+config HYPERV_TESTING
+ bool "Microsoft Hyper-V driver testing"
+ default n
+ depends on HYPERV && DEBUG_FS
+ help
+ Select this option to enable Hyper-V vmbus testing.
+
endmenu # Kernel hacking
diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan
index 6c9682ce0254..81f5464ea9e1 100644
--- a/lib/Kconfig.kasan
+++ b/lib/Kconfig.kasan
@@ -6,6 +6,9 @@ config HAVE_ARCH_KASAN
config HAVE_ARCH_KASAN_SW_TAGS
bool
+config HAVE_ARCH_KASAN_VMALLOC
+ bool
+
config CC_HAS_KASAN_GENERIC
def_bool $(cc-option, -fsanitize=kernel-address)
@@ -142,6 +145,19 @@ config KASAN_SW_TAGS_IDENTIFY
(use-after-free or out-of-bounds) at the cost of increased
memory consumption.
+config KASAN_VMALLOC
+ bool "Back mappings in vmalloc space with real shadow memory"
+ depends on KASAN && HAVE_ARCH_KASAN_VMALLOC
+ help
+ By default, the shadow region for vmalloc space is the read-only
+ zero page. This means that KASAN cannot detect errors involving
+ vmalloc space.
+
+ Enabling this option will hook in to vmap/vmalloc and back those
+ mappings with real shadow memory allocated on demand. This allows
+ for KASAN to detect more sorts of errors (and to support vmapped
+ stacks), but at the cost of higher memory usage.
+
config TEST_KASAN
tristate "Module for testing KASAN for bug detection"
depends on m && KASAN
diff --git a/lib/Makefile b/lib/Makefile
index b7f0ea999d48..c2f0e2a4e4e8 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -214,6 +214,7 @@ obj-$(CONFIG_GENERIC_NET_UTILS) += net_utils.o
obj-$(CONFIG_SG_SPLIT) += sg_split.o
obj-$(CONFIG_SG_POOL) += sg_pool.o
+obj-$(CONFIG_MEMREGION) += memregion.o
obj-$(CONFIG_STMP_DEVICE) += stmp_device.o
obj-$(CONFIG_IRQ_POLL) += irq_poll.o
diff --git a/lib/bsearch.c b/lib/bsearch.c
index 8baa83968162..8b3aae5ae77a 100644
--- a/lib/bsearch.c
+++ b/lib/bsearch.c
@@ -29,7 +29,7 @@
* the same comparison function for both sort() and bsearch().
*/
void *bsearch(const void *key, const void *base, size_t num, size_t size,
- int (*cmp)(const void *key, const void *elt))
+ cmp_func_t cmp)
{
const char *pivot;
int result;
diff --git a/lib/devres.c b/lib/devres.c
index 6a0e9bd6524a..f56070cf970b 100644
--- a/lib/devres.c
+++ b/lib/devres.c
@@ -9,6 +9,7 @@
enum devm_ioremap_type {
DEVM_IOREMAP = 0,
DEVM_IOREMAP_NC,
+ DEVM_IOREMAP_UC,
DEVM_IOREMAP_WC,
};
@@ -39,6 +40,9 @@ static void __iomem *__devm_ioremap(struct device *dev, resource_size_t offset,
case DEVM_IOREMAP_NC:
addr = ioremap_nocache(offset, size);
break;
+ case DEVM_IOREMAP_UC:
+ addr = ioremap_uc(offset, size);
+ break;
case DEVM_IOREMAP_WC:
addr = ioremap_wc(offset, size);
break;
@@ -69,6 +73,21 @@ void __iomem *devm_ioremap(struct device *dev, resource_size_t offset,
EXPORT_SYMBOL(devm_ioremap);
/**
+ * devm_ioremap_uc - Managed ioremap_uc()
+ * @dev: Generic device to remap IO address for
+ * @offset: Resource address to map
+ * @size: Size of map
+ *
+ * Managed ioremap_uc(). Map is automatically unmapped on driver detach.
+ */
+void __iomem *devm_ioremap_uc(struct device *dev, resource_size_t offset,
+ resource_size_t size)
+{
+ return __devm_ioremap(dev, offset, size, DEVM_IOREMAP_UC);
+}
+EXPORT_SYMBOL_GPL(devm_ioremap_uc);
+
+/**
* devm_ioremap_nocache - Managed ioremap_nocache()
* @dev: Generic device to remap IO address for
* @offset: Resource address to map
@@ -114,25 +133,9 @@ void devm_iounmap(struct device *dev, void __iomem *addr)
}
EXPORT_SYMBOL(devm_iounmap);
-/**
- * devm_ioremap_resource() - check, request region, and ioremap resource
- * @dev: generic device to handle the resource for
- * @res: resource to be handled
- *
- * Checks that a resource is a valid memory region, requests the memory
- * region and ioremaps it. All operations are managed and will be undone
- * on driver detach.
- *
- * Returns a pointer to the remapped memory or an ERR_PTR() encoded error code
- * on failure. Usage example:
- *
- * res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- * base = devm_ioremap_resource(&pdev->dev, res);
- * if (IS_ERR(base))
- * return PTR_ERR(base);
- */
-void __iomem *devm_ioremap_resource(struct device *dev,
- const struct resource *res)
+static void __iomem *
+__devm_ioremap_resource(struct device *dev, const struct resource *res,
+ enum devm_ioremap_type type)
{
resource_size_t size;
void __iomem *dest_ptr;
@@ -151,7 +154,7 @@ void __iomem *devm_ioremap_resource(struct device *dev,
return IOMEM_ERR_PTR(-EBUSY);
}
- dest_ptr = devm_ioremap(dev, res->start, size);
+ dest_ptr = __devm_ioremap(dev, res->start, size, type);
if (!dest_ptr) {
dev_err(dev, "ioremap failed for resource %pR\n", res);
devm_release_mem_region(dev, res->start, size);
@@ -160,8 +163,46 @@ void __iomem *devm_ioremap_resource(struct device *dev,
return dest_ptr;
}
+
+/**
+ * devm_ioremap_resource() - check, request region, and ioremap resource
+ * @dev: generic device to handle the resource for
+ * @res: resource to be handled
+ *
+ * Checks that a resource is a valid memory region, requests the memory
+ * region and ioremaps it. All operations are managed and will be undone
+ * on driver detach.
+ *
+ * Returns a pointer to the remapped memory or an ERR_PTR() encoded error code
+ * on failure. Usage example:
+ *
+ * res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ * base = devm_ioremap_resource(&pdev->dev, res);
+ * if (IS_ERR(base))
+ * return PTR_ERR(base);
+ */
+void __iomem *devm_ioremap_resource(struct device *dev,
+ const struct resource *res)
+{
+ return __devm_ioremap_resource(dev, res, DEVM_IOREMAP);
+}
EXPORT_SYMBOL(devm_ioremap_resource);
+/**
+ * devm_ioremap_resource_wc() - write-combined variant of
+ * devm_ioremap_resource()
+ * @dev: generic device to handle the resource for
+ * @res: resource to be handled
+ *
+ * Returns a pointer to the remapped memory or an ERR_PTR() encoded error code
+ * on failure. Usage example:
+ */
+void __iomem *devm_ioremap_resource_wc(struct device *dev,
+ const struct resource *res)
+{
+ return __devm_ioremap_resource(dev, res, DEVM_IOREMAP_WC);
+}
+
/*
* devm_of_iomap - Requests a resource and maps the memory mapped IO
* for a given device_node managed by a given device
@@ -262,7 +303,7 @@ EXPORT_SYMBOL(devm_ioport_unmap);
/*
* PCI iomap devres
*/
-#define PCIM_IOMAP_MAX PCI_ROM_RESOURCE
+#define PCIM_IOMAP_MAX PCI_STD_NUM_BARS
struct pcim_iomap_devres {
void __iomem *table[PCIM_IOMAP_MAX];
diff --git a/lib/genalloc.c b/lib/genalloc.c
index 9fc31292cfa1..24d20ca7e91b 100644
--- a/lib/genalloc.c
+++ b/lib/genalloc.c
@@ -472,7 +472,7 @@ void *gen_pool_dma_zalloc_align(struct gen_pool *pool, size_t size,
EXPORT_SYMBOL(gen_pool_dma_zalloc_align);
/**
- * gen_pool_free - free allocated special memory back to the pool
+ * gen_pool_free_owner - free allocated special memory back to the pool
* @pool: pool to free to
* @addr: starting address of memory to free back to pool
* @size: size in bytes of memory to free
diff --git a/lib/ioremap.c b/lib/ioremap.c
index 0a2ffadc6d71..3f0e18543de8 100644
--- a/lib/ioremap.c
+++ b/lib/ioremap.c
@@ -231,3 +231,42 @@ int ioremap_page_range(unsigned long addr,
return err;
}
+
+#ifdef CONFIG_GENERIC_IOREMAP
+void __iomem *ioremap_prot(phys_addr_t addr, size_t size, unsigned long prot)
+{
+ unsigned long offset, vaddr;
+ phys_addr_t last_addr;
+ struct vm_struct *area;
+
+ /* Disallow wrap-around or zero size */
+ last_addr = addr + size - 1;
+ if (!size || last_addr < addr)
+ return NULL;
+
+ /* Page-align mappings */
+ offset = addr & (~PAGE_MASK);
+ addr -= offset;
+ size = PAGE_ALIGN(size + offset);
+
+ area = get_vm_area_caller(size, VM_IOREMAP,
+ __builtin_return_address(0));
+ if (!area)
+ return NULL;
+ vaddr = (unsigned long)area->addr;
+
+ if (ioremap_page_range(vaddr, vaddr + size, addr, __pgprot(prot))) {
+ free_vm_area(area);
+ return NULL;
+ }
+
+ return (void __iomem *)(vaddr + offset);
+}
+EXPORT_SYMBOL(ioremap_prot);
+
+void iounmap(volatile void __iomem *addr)
+{
+ vunmap((void *)((unsigned long)addr & PAGE_MASK));
+}
+EXPORT_SYMBOL(iounmap);
+#endif /* CONFIG_GENERIC_IOREMAP */
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index 639d5e7014c1..fb29c02c6a3c 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -325,28 +325,33 @@ done:
static bool sanity(const struct iov_iter *i)
{
struct pipe_inode_info *pipe = i->pipe;
- int idx = i->idx;
- int next = pipe->curbuf + pipe->nrbufs;
+ unsigned int p_head = pipe->head;
+ unsigned int p_tail = pipe->tail;
+ unsigned int p_mask = pipe->ring_size - 1;
+ unsigned int p_occupancy = pipe_occupancy(p_head, p_tail);
+ unsigned int i_head = i->head;
+ unsigned int idx;
+
if (i->iov_offset) {
struct pipe_buffer *p;
- if (unlikely(!pipe->nrbufs))
+ if (unlikely(p_occupancy == 0))
goto Bad; // pipe must be non-empty
- if (unlikely(idx != ((next - 1) & (pipe->buffers - 1))))
+ if (unlikely(i_head != p_head - 1))
goto Bad; // must be at the last buffer...
- p = &pipe->bufs[idx];
+ p = &pipe->bufs[i_head & p_mask];
if (unlikely(p->offset + p->len != i->iov_offset))
goto Bad; // ... at the end of segment
} else {
- if (idx != (next & (pipe->buffers - 1)))
+ if (i_head != p_head)
goto Bad; // must be right after the last buffer
}
return true;
Bad:
- printk(KERN_ERR "idx = %d, offset = %zd\n", i->idx, i->iov_offset);
- printk(KERN_ERR "curbuf = %d, nrbufs = %d, buffers = %d\n",
- pipe->curbuf, pipe->nrbufs, pipe->buffers);
- for (idx = 0; idx < pipe->buffers; idx++)
+ printk(KERN_ERR "idx = %d, offset = %zd\n", i_head, i->iov_offset);
+ printk(KERN_ERR "head = %d, tail = %d, buffers = %d\n",
+ p_head, p_tail, pipe->ring_size);
+ for (idx = 0; idx < pipe->ring_size; idx++)
printk(KERN_ERR "[%p %p %d %d]\n",
pipe->bufs[idx].ops,
pipe->bufs[idx].page,
@@ -359,18 +364,15 @@ Bad:
#define sanity(i) true
#endif
-static inline int next_idx(int idx, struct pipe_inode_info *pipe)
-{
- return (idx + 1) & (pipe->buffers - 1);
-}
-
static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t bytes,
struct iov_iter *i)
{
struct pipe_inode_info *pipe = i->pipe;
struct pipe_buffer *buf;
+ unsigned int p_tail = pipe->tail;
+ unsigned int p_mask = pipe->ring_size - 1;
+ unsigned int i_head = i->head;
size_t off;
- int idx;
if (unlikely(bytes > i->count))
bytes = i->count;
@@ -382,8 +384,7 @@ static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t by
return 0;
off = i->iov_offset;
- idx = i->idx;
- buf = &pipe->bufs[idx];
+ buf = &pipe->bufs[i_head & p_mask];
if (off) {
if (offset == off && buf->page == page) {
/* merge with the last one */
@@ -391,18 +392,21 @@ static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t by
i->iov_offset += bytes;
goto out;
}
- idx = next_idx(idx, pipe);
- buf = &pipe->bufs[idx];
+ i_head++;
+ buf = &pipe->bufs[i_head & p_mask];
}
- if (idx == pipe->curbuf && pipe->nrbufs)
+ if (pipe_full(i_head, p_tail, pipe->max_usage))
return 0;
- pipe->nrbufs++;
+
buf->ops = &page_cache_pipe_buf_ops;
- get_page(buf->page = page);
+ get_page(page);
+ buf->page = page;
buf->offset = offset;
buf->len = bytes;
+
+ pipe->head = i_head + 1;
i->iov_offset = offset + bytes;
- i->idx = idx;
+ i->head = i_head;
out:
i->count -= bytes;
return bytes;
@@ -480,24 +484,30 @@ static inline bool allocated(struct pipe_buffer *buf)
return buf->ops == &default_pipe_buf_ops;
}
-static inline void data_start(const struct iov_iter *i, int *idxp, size_t *offp)
+static inline void data_start(const struct iov_iter *i,
+ unsigned int *iter_headp, size_t *offp)
{
+ unsigned int p_mask = i->pipe->ring_size - 1;
+ unsigned int iter_head = i->head;
size_t off = i->iov_offset;
- int idx = i->idx;
- if (off && (!allocated(&i->pipe->bufs[idx]) || off == PAGE_SIZE)) {
- idx = next_idx(idx, i->pipe);
+
+ if (off && (!allocated(&i->pipe->bufs[iter_head & p_mask]) ||
+ off == PAGE_SIZE)) {
+ iter_head++;
off = 0;
}
- *idxp = idx;
+ *iter_headp = iter_head;
*offp = off;
}
static size_t push_pipe(struct iov_iter *i, size_t size,
- int *idxp, size_t *offp)
+ int *iter_headp, size_t *offp)
{
struct pipe_inode_info *pipe = i->pipe;
+ unsigned int p_tail = pipe->tail;
+ unsigned int p_mask = pipe->ring_size - 1;
+ unsigned int iter_head;
size_t off;
- int idx;
ssize_t left;
if (unlikely(size > i->count))
@@ -506,33 +516,34 @@ static size_t push_pipe(struct iov_iter *i, size_t size,
return 0;
left = size;
- data_start(i, &idx, &off);
- *idxp = idx;
+ data_start(i, &iter_head, &off);
+ *iter_headp = iter_head;
*offp = off;
if (off) {
left -= PAGE_SIZE - off;
if (left <= 0) {
- pipe->bufs[idx].len += size;
+ pipe->bufs[iter_head & p_mask].len += size;
return size;
}
- pipe->bufs[idx].len = PAGE_SIZE;
- idx = next_idx(idx, pipe);
+ pipe->bufs[iter_head & p_mask].len = PAGE_SIZE;
+ iter_head++;
}
- while (idx != pipe->curbuf || !pipe->nrbufs) {
+ while (!pipe_full(iter_head, p_tail, pipe->max_usage)) {
+ struct pipe_buffer *buf = &pipe->bufs[iter_head & p_mask];
struct page *page = alloc_page(GFP_USER);
if (!page)
break;
- pipe->nrbufs++;
- pipe->bufs[idx].ops = &default_pipe_buf_ops;
- pipe->bufs[idx].page = page;
- pipe->bufs[idx].offset = 0;
- if (left <= PAGE_SIZE) {
- pipe->bufs[idx].len = left;
+
+ buf->ops = &default_pipe_buf_ops;
+ buf->page = page;
+ buf->offset = 0;
+ buf->len = min_t(ssize_t, left, PAGE_SIZE);
+ left -= buf->len;
+ iter_head++;
+ pipe->head = iter_head;
+
+ if (left == 0)
return size;
- }
- pipe->bufs[idx].len = PAGE_SIZE;
- left -= PAGE_SIZE;
- idx = next_idx(idx, pipe);
}
return size - left;
}
@@ -541,23 +552,26 @@ static size_t copy_pipe_to_iter(const void *addr, size_t bytes,
struct iov_iter *i)
{
struct pipe_inode_info *pipe = i->pipe;
+ unsigned int p_mask = pipe->ring_size - 1;
+ unsigned int i_head;
size_t n, off;
- int idx;
if (!sanity(i))
return 0;
- bytes = n = push_pipe(i, bytes, &idx, &off);
+ bytes = n = push_pipe(i, bytes, &i_head, &off);
if (unlikely(!n))
return 0;
- for ( ; n; idx = next_idx(idx, pipe), off = 0) {
+ do {
size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
- memcpy_to_page(pipe->bufs[idx].page, off, addr, chunk);
- i->idx = idx;
+ memcpy_to_page(pipe->bufs[i_head & p_mask].page, off, addr, chunk);
+ i->head = i_head;
i->iov_offset = off + chunk;
n -= chunk;
addr += chunk;
- }
+ off = 0;
+ i_head++;
+ } while (n);
i->count -= bytes;
return bytes;
}
@@ -573,28 +587,31 @@ static size_t csum_and_copy_to_pipe_iter(const void *addr, size_t bytes,
__wsum *csum, struct iov_iter *i)
{
struct pipe_inode_info *pipe = i->pipe;
+ unsigned int p_mask = pipe->ring_size - 1;
+ unsigned int i_head;
size_t n, r;
size_t off = 0;
__wsum sum = *csum;
- int idx;
if (!sanity(i))
return 0;
- bytes = n = push_pipe(i, bytes, &idx, &r);
+ bytes = n = push_pipe(i, bytes, &i_head, &r);
if (unlikely(!n))
return 0;
- for ( ; n; idx = next_idx(idx, pipe), r = 0) {
+ do {
size_t chunk = min_t(size_t, n, PAGE_SIZE - r);
- char *p = kmap_atomic(pipe->bufs[idx].page);
+ char *p = kmap_atomic(pipe->bufs[i_head & p_mask].page);
sum = csum_and_memcpy(p + r, addr, chunk, sum, off);
kunmap_atomic(p);
- i->idx = idx;
+ i->head = i_head;
i->iov_offset = r + chunk;
n -= chunk;
off += chunk;
addr += chunk;
- }
+ r = 0;
+ i_head++;
+ } while (n);
i->count -= bytes;
*csum = sum;
return bytes;
@@ -645,29 +662,32 @@ static size_t copy_pipe_to_iter_mcsafe(const void *addr, size_t bytes,
struct iov_iter *i)
{
struct pipe_inode_info *pipe = i->pipe;
+ unsigned int p_mask = pipe->ring_size - 1;
+ unsigned int i_head;
size_t n, off, xfer = 0;
- int idx;
if (!sanity(i))
return 0;
- bytes = n = push_pipe(i, bytes, &idx, &off);
+ bytes = n = push_pipe(i, bytes, &i_head, &off);
if (unlikely(!n))
return 0;
- for ( ; n; idx = next_idx(idx, pipe), off = 0) {
+ do {
size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
unsigned long rem;
- rem = memcpy_mcsafe_to_page(pipe->bufs[idx].page, off, addr,
- chunk);
- i->idx = idx;
+ rem = memcpy_mcsafe_to_page(pipe->bufs[i_head & p_mask].page,
+ off, addr, chunk);
+ i->head = i_head;
i->iov_offset = off + chunk - rem;
xfer += chunk - rem;
if (rem)
break;
n -= chunk;
addr += chunk;
- }
+ off = 0;
+ i_head++;
+ } while (n);
i->count -= xfer;
return xfer;
}
@@ -925,23 +945,26 @@ EXPORT_SYMBOL(copy_page_from_iter);
static size_t pipe_zero(size_t bytes, struct iov_iter *i)
{
struct pipe_inode_info *pipe = i->pipe;
+ unsigned int p_mask = pipe->ring_size - 1;
+ unsigned int i_head;
size_t n, off;
- int idx;
if (!sanity(i))
return 0;
- bytes = n = push_pipe(i, bytes, &idx, &off);
+ bytes = n = push_pipe(i, bytes, &i_head, &off);
if (unlikely(!n))
return 0;
- for ( ; n; idx = next_idx(idx, pipe), off = 0) {
+ do {
size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
- memzero_page(pipe->bufs[idx].page, off, chunk);
- i->idx = idx;
+ memzero_page(pipe->bufs[i_head & p_mask].page, off, chunk);
+ i->head = i_head;
i->iov_offset = off + chunk;
n -= chunk;
- }
+ off = 0;
+ i_head++;
+ } while (n);
i->count -= bytes;
return bytes;
}
@@ -987,20 +1010,26 @@ EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
static inline void pipe_truncate(struct iov_iter *i)
{
struct pipe_inode_info *pipe = i->pipe;
- if (pipe->nrbufs) {
+ unsigned int p_tail = pipe->tail;
+ unsigned int p_head = pipe->head;
+ unsigned int p_mask = pipe->ring_size - 1;
+
+ if (!pipe_empty(p_head, p_tail)) {
+ struct pipe_buffer *buf;
+ unsigned int i_head = i->head;
size_t off = i->iov_offset;
- int idx = i->idx;
- int nrbufs = (idx - pipe->curbuf) & (pipe->buffers - 1);
+
if (off) {
- pipe->bufs[idx].len = off - pipe->bufs[idx].offset;
- idx = next_idx(idx, pipe);
- nrbufs++;
+ buf = &pipe->bufs[i_head & p_mask];
+ buf->len = off - buf->offset;
+ i_head++;
}
- while (pipe->nrbufs > nrbufs) {
- pipe_buf_release(pipe, &pipe->bufs[idx]);
- idx = next_idx(idx, pipe);
- pipe->nrbufs--;
+ while (p_head != i_head) {
+ p_head--;
+ pipe_buf_release(pipe, &pipe->bufs[p_head & p_mask]);
}
+
+ pipe->head = p_head;
}
}
@@ -1011,18 +1040,20 @@ static void pipe_advance(struct iov_iter *i, size_t size)
size = i->count;
if (size) {
struct pipe_buffer *buf;
+ unsigned int p_mask = pipe->ring_size - 1;
+ unsigned int i_head = i->head;
size_t off = i->iov_offset, left = size;
- int idx = i->idx;
+
if (off) /* make it relative to the beginning of buffer */
- left += off - pipe->bufs[idx].offset;
+ left += off - pipe->bufs[i_head & p_mask].offset;
while (1) {
- buf = &pipe->bufs[idx];
+ buf = &pipe->bufs[i_head & p_mask];
if (left <= buf->len)
break;
left -= buf->len;
- idx = next_idx(idx, pipe);
+ i_head++;
}
- i->idx = idx;
+ i->head = i_head;
i->iov_offset = buf->offset + left;
}
i->count -= size;
@@ -1053,25 +1084,27 @@ void iov_iter_revert(struct iov_iter *i, size_t unroll)
i->count += unroll;
if (unlikely(iov_iter_is_pipe(i))) {
struct pipe_inode_info *pipe = i->pipe;
- int idx = i->idx;
+ unsigned int p_mask = pipe->ring_size - 1;
+ unsigned int i_head = i->head;
size_t off = i->iov_offset;
while (1) {
- size_t n = off - pipe->bufs[idx].offset;
+ struct pipe_buffer *b = &pipe->bufs[i_head & p_mask];
+ size_t n = off - b->offset;
if (unroll < n) {
off -= unroll;
break;
}
unroll -= n;
- if (!unroll && idx == i->start_idx) {
+ if (!unroll && i_head == i->start_head) {
off = 0;
break;
}
- if (!idx--)
- idx = pipe->buffers - 1;
- off = pipe->bufs[idx].offset + pipe->bufs[idx].len;
+ i_head--;
+ b = &pipe->bufs[i_head & p_mask];
+ off = b->offset + b->len;
}
i->iov_offset = off;
- i->idx = idx;
+ i->head = i_head;
pipe_truncate(i);
return;
}
@@ -1159,13 +1192,13 @@ void iov_iter_pipe(struct iov_iter *i, unsigned int direction,
size_t count)
{
BUG_ON(direction != READ);
- WARN_ON(pipe->nrbufs == pipe->buffers);
+ WARN_ON(pipe_full(pipe->head, pipe->tail, pipe->ring_size));
i->type = ITER_PIPE | READ;
i->pipe = pipe;
- i->idx = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
+ i->head = pipe->head;
i->iov_offset = 0;
i->count = count;
- i->start_idx = i->idx;
+ i->start_head = i->head;
}
EXPORT_SYMBOL(iov_iter_pipe);
@@ -1189,11 +1222,12 @@ EXPORT_SYMBOL(iov_iter_discard);
unsigned long iov_iter_alignment(const struct iov_iter *i)
{
+ unsigned int p_mask = i->pipe->ring_size - 1;
unsigned long res = 0;
size_t size = i->count;
if (unlikely(iov_iter_is_pipe(i))) {
- if (size && i->iov_offset && allocated(&i->pipe->bufs[i->idx]))
+ if (size && i->iov_offset && allocated(&i->pipe->bufs[i->head & p_mask]))
return size | i->iov_offset;
return size;
}
@@ -1231,19 +1265,20 @@ EXPORT_SYMBOL(iov_iter_gap_alignment);
static inline ssize_t __pipe_get_pages(struct iov_iter *i,
size_t maxsize,
struct page **pages,
- int idx,
+ int iter_head,
size_t *start)
{
struct pipe_inode_info *pipe = i->pipe;
- ssize_t n = push_pipe(i, maxsize, &idx, start);
+ unsigned int p_mask = pipe->ring_size - 1;
+ ssize_t n = push_pipe(i, maxsize, &iter_head, start);
if (!n)
return -EFAULT;
maxsize = n;
n += *start;
while (n > 0) {
- get_page(*pages++ = pipe->bufs[idx].page);
- idx = next_idx(idx, pipe);
+ get_page(*pages++ = pipe->bufs[iter_head & p_mask].page);
+ iter_head++;
n -= PAGE_SIZE;
}
@@ -1254,9 +1289,8 @@ static ssize_t pipe_get_pages(struct iov_iter *i,
struct page **pages, size_t maxsize, unsigned maxpages,
size_t *start)
{
- unsigned npages;
+ unsigned int iter_head, npages;
size_t capacity;
- int idx;
if (!maxsize)
return 0;
@@ -1264,12 +1298,12 @@ static ssize_t pipe_get_pages(struct iov_iter *i,
if (!sanity(i))
return -EFAULT;
- data_start(i, &idx, start);
- /* some of this one + all after this one */
- npages = ((i->pipe->curbuf - idx - 1) & (i->pipe->buffers - 1)) + 1;
- capacity = min(npages,maxpages) * PAGE_SIZE - *start;
+ data_start(i, &iter_head, start);
+ /* Amount of free space: some of this one + all after this one */
+ npages = pipe_space_for_user(iter_head, i->pipe->tail, i->pipe);
+ capacity = min(npages, maxpages) * PAGE_SIZE - *start;
- return __pipe_get_pages(i, min(maxsize, capacity), pages, idx, start);
+ return __pipe_get_pages(i, min(maxsize, capacity), pages, iter_head, start);
}
ssize_t iov_iter_get_pages(struct iov_iter *i,
@@ -1323,9 +1357,8 @@ static ssize_t pipe_get_pages_alloc(struct iov_iter *i,
size_t *start)
{
struct page **p;
+ unsigned int iter_head, npages;
ssize_t n;
- int idx;
- int npages;
if (!maxsize)
return 0;
@@ -1333,9 +1366,9 @@ static ssize_t pipe_get_pages_alloc(struct iov_iter *i,
if (!sanity(i))
return -EFAULT;
- data_start(i, &idx, start);
- /* some of this one + all after this one */
- npages = ((i->pipe->curbuf - idx - 1) & (i->pipe->buffers - 1)) + 1;
+ data_start(i, &iter_head, start);
+ /* Amount of free space: some of this one + all after this one */
+ npages = pipe_space_for_user(iter_head, i->pipe->tail, i->pipe);
n = npages * PAGE_SIZE - *start;
if (maxsize > n)
maxsize = n;
@@ -1344,7 +1377,7 @@ static ssize_t pipe_get_pages_alloc(struct iov_iter *i,
p = get_pages_array(npages);
if (!p)
return -ENOMEM;
- n = __pipe_get_pages(i, maxsize, p, idx, start);
+ n = __pipe_get_pages(i, maxsize, p, iter_head, start);
if (n > 0)
*pages = p;
else
@@ -1560,15 +1593,15 @@ int iov_iter_npages(const struct iov_iter *i, int maxpages)
if (unlikely(iov_iter_is_pipe(i))) {
struct pipe_inode_info *pipe = i->pipe;
+ unsigned int iter_head;
size_t off;
- int idx;
if (!sanity(i))
return 0;
- data_start(i, &idx, &off);
+ data_start(i, &iter_head, &off);
/* some of this one + all after this one */
- npages = ((pipe->curbuf - idx - 1) & (pipe->buffers - 1)) + 1;
+ npages = pipe_space_for_user(iter_head, pipe->tail, pipe);
if (npages >= maxpages)
return maxpages;
} else iterate_all_kinds(i, size, v, ({
@@ -1678,6 +1711,7 @@ ssize_t compat_import_iovec(int type,
*iov = p == *iov ? NULL : p;
return n;
}
+EXPORT_SYMBOL(compat_import_iovec);
#endif
int import_single_range(int rw, void __user *buf, size_t len,
diff --git a/lib/memregion.c b/lib/memregion.c
new file mode 100644
index 000000000000..77c85b5251da
--- /dev/null
+++ b/lib/memregion.c
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* identifiers for device / performance-differentiated memory regions */
+#include <linux/idr.h>
+#include <linux/types.h>
+
+static DEFINE_IDA(memregion_ids);
+
+int memregion_alloc(gfp_t gfp)
+{
+ return ida_alloc(&memregion_ids, gfp);
+}
+EXPORT_SYMBOL(memregion_alloc);
+
+void memregion_free(int id)
+{
+ ida_free(&memregion_ids, id);
+}
+EXPORT_SYMBOL(memregion_free);
diff --git a/lib/seq_buf.c b/lib/seq_buf.c
index bd807f545a9d..4e865d42ab03 100644
--- a/lib/seq_buf.c
+++ b/lib/seq_buf.c
@@ -328,3 +328,65 @@ int seq_buf_to_user(struct seq_buf *s, char __user *ubuf, int cnt)
s->readpos += cnt;
return cnt;
}
+
+/**
+ * seq_buf_hex_dump - print formatted hex dump into the sequence buffer
+ * @s: seq_buf descriptor
+ * @prefix_str: string to prefix each line with;
+ * caller supplies trailing spaces for alignment if desired
+ * @prefix_type: controls whether prefix of an offset, address, or none
+ * is printed (%DUMP_PREFIX_OFFSET, %DUMP_PREFIX_ADDRESS, %DUMP_PREFIX_NONE)
+ * @rowsize: number of bytes to print per line; must be 16 or 32
+ * @groupsize: number of bytes to print at a time (1, 2, 4, 8; default = 1)
+ * @buf: data blob to dump
+ * @len: number of bytes in the @buf
+ * @ascii: include ASCII after the hex output
+ *
+ * Function is an analogue of print_hex_dump() and thus has similar interface.
+ *
+ * linebuf size is maximal length for one line.
+ * 32 * 3 - maximum bytes per line, each printed into 2 chars + 1 for
+ * separating space
+ * 2 - spaces separating hex dump and ascii representation
+ * 32 - ascii representation
+ * 1 - terminating '\0'
+ *
+ * Returns zero on success, -1 on overflow
+ */
+int seq_buf_hex_dump(struct seq_buf *s, const char *prefix_str, int prefix_type,
+ int rowsize, int groupsize,
+ const void *buf, size_t len, bool ascii)
+{
+ const u8 *ptr = buf;
+ int i, linelen, remaining = len;
+ unsigned char linebuf[32 * 3 + 2 + 32 + 1];
+ int ret;
+
+ if (rowsize != 16 && rowsize != 32)
+ rowsize = 16;
+
+ for (i = 0; i < len; i += rowsize) {
+ linelen = min(remaining, rowsize);
+ remaining -= rowsize;
+
+ hex_dump_to_buffer(ptr + i, linelen, rowsize, groupsize,
+ linebuf, sizeof(linebuf), ascii);
+
+ switch (prefix_type) {
+ case DUMP_PREFIX_ADDRESS:
+ ret = seq_buf_printf(s, "%s%p: %s\n",
+ prefix_str, ptr + i, linebuf);
+ break;
+ case DUMP_PREFIX_OFFSET:
+ ret = seq_buf_printf(s, "%s%.8x: %s\n",
+ prefix_str, i, linebuf);
+ break;
+ default:
+ ret = seq_buf_printf(s, "%s%s\n", prefix_str, linebuf);
+ break;
+ }
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
diff --git a/lib/sort.c b/lib/sort.c
index d54cf97e9548..3ad454411997 100644
--- a/lib/sort.c
+++ b/lib/sort.c
@@ -117,8 +117,6 @@ static void swap_bytes(void *a, void *b, size_t n)
} while (n);
}
-typedef void (*swap_func_t)(void *a, void *b, int size);
-
/*
* The values are arbitrary as long as they can't be confused with
* a pointer, but small integers make for the smallest compare
@@ -144,12 +142,9 @@ static void do_swap(void *a, void *b, size_t size, swap_func_t swap_func)
swap_func(a, b, (int)size);
}
-typedef int (*cmp_func_t)(const void *, const void *);
-typedef int (*cmp_r_func_t)(const void *, const void *, const void *);
#define _CMP_WRAPPER ((cmp_r_func_t)0L)
-static int do_cmp(const void *a, const void *b,
- cmp_r_func_t cmp, const void *priv)
+static int do_cmp(const void *a, const void *b, cmp_r_func_t cmp, const void *priv)
{
if (cmp == _CMP_WRAPPER)
return ((cmp_func_t)(priv))(a, b);
@@ -202,8 +197,8 @@ static size_t parent(size_t i, unsigned int lsbit, size_t size)
* it less suitable for kernel use.
*/
void sort_r(void *base, size_t num, size_t size,
- int (*cmp_func)(const void *, const void *, const void *),
- void (*swap_func)(void *, void *, int size),
+ cmp_r_func_t cmp_func,
+ swap_func_t swap_func,
const void *priv)
{
/* pre-scale counters for performance */
@@ -269,8 +264,8 @@ void sort_r(void *base, size_t num, size_t size,
EXPORT_SYMBOL(sort_r);
void sort(void *base, size_t num, size_t size,
- int (*cmp_func)(const void *, const void *),
- void (*swap_func)(void *, void *, int size))
+ cmp_func_t cmp_func,
+ swap_func_t swap_func)
{
return sort_r(base, num, size, _CMP_WRAPPER, swap_func, cmp_func);
}
diff --git a/lib/test_kasan.c b/lib/test_kasan.c
index 49cc4d570a40..328d33beae36 100644
--- a/lib/test_kasan.c
+++ b/lib/test_kasan.c
@@ -19,6 +19,7 @@
#include <linux/string.h>
#include <linux/uaccess.h>
#include <linux/io.h>
+#include <linux/vmalloc.h>
#include <asm/page.h>
@@ -748,6 +749,30 @@ static noinline void __init kmalloc_double_kzfree(void)
kzfree(ptr);
}
+#ifdef CONFIG_KASAN_VMALLOC
+static noinline void __init vmalloc_oob(void)
+{
+ void *area;
+
+ pr_info("vmalloc out-of-bounds\n");
+
+ /*
+ * We have to be careful not to hit the guard page.
+ * The MMU will catch that and crash us.
+ */
+ area = vmalloc(3000);
+ if (!area) {
+ pr_err("Allocation failed\n");
+ return;
+ }
+
+ ((volatile char *)area)[3100];
+ vfree(area);
+}
+#else
+static void __init vmalloc_oob(void) {}
+#endif
+
static int __init kmalloc_tests_init(void)
{
/*
@@ -793,6 +818,7 @@ static int __init kmalloc_tests_init(void)
kasan_strings();
kasan_bitops();
kmalloc_double_kzfree();
+ vmalloc_oob();
kasan_restore_multi_shot(multishot);
diff --git a/lib/test_printf.c b/lib/test_printf.c
index 030daeb4fe21..2d9f520d2f27 100644
--- a/lib/test_printf.c
+++ b/lib/test_printf.c
@@ -22,6 +22,8 @@
#include <linux/gfp.h>
#include <linux/mm.h>
+#include <linux/property.h>
+
#include "../tools/testing/selftests/kselftest_module.h"
#define BUF_SIZE 256
@@ -593,6 +595,35 @@ flags(void)
kfree(cmp_buffer);
}
+static void __init fwnode_pointer(void)
+{
+ const struct software_node softnodes[] = {
+ { .name = "first", },
+ { .name = "second", .parent = &softnodes[0], },
+ { .name = "third", .parent = &softnodes[1], },
+ { NULL /* Guardian */ }
+ };
+ const char * const full_name = "first/second/third";
+ const char * const full_name_second = "first/second";
+ const char * const second_name = "second";
+ const char * const third_name = "third";
+ int rval;
+
+ rval = software_node_register_nodes(softnodes);
+ if (rval) {
+ pr_warn("cannot register softnodes; rval %d\n", rval);
+ return;
+ }
+
+ test(full_name_second, "%pfw", software_node_fwnode(&softnodes[1]));
+ test(full_name, "%pfw", software_node_fwnode(&softnodes[2]));
+ test(full_name, "%pfwf", software_node_fwnode(&softnodes[2]));
+ test(second_name, "%pfwP", software_node_fwnode(&softnodes[1]));
+ test(third_name, "%pfwP", software_node_fwnode(&softnodes[2]));
+
+ software_node_unregister_nodes(softnodes);
+}
+
static void __init
errptr(void)
{
@@ -636,6 +667,7 @@ test_pointer(void)
netdev_features();
flags();
errptr();
+ fwnode_pointer();
}
static void __init selftest(void)
diff --git a/lib/vdso/gettimeofday.c b/lib/vdso/gettimeofday.c
index 45f57fd2db64..9ecfd3b547ba 100644
--- a/lib/vdso/gettimeofday.c
+++ b/lib/vdso/gettimeofday.c
@@ -164,10 +164,10 @@ __cvdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz)
}
#ifdef VDSO_HAS_TIME
-static __maybe_unused time_t __cvdso_time(time_t *time)
+static __maybe_unused __kernel_old_time_t __cvdso_time(__kernel_old_time_t *time)
{
const struct vdso_data *vd = __arch_get_vdso_data();
- time_t t = READ_ONCE(vd[CS_HRES_COARSE].basetime[CLOCK_REALTIME].sec);
+ __kernel_old_time_t t = READ_ONCE(vd[CS_HRES_COARSE].basetime[CLOCK_REALTIME].sec);
if (time)
*time = t;
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index b54d252b398e..7c488a1ce318 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -39,6 +39,7 @@
#include <net/addrconf.h>
#include <linux/siphash.h>
#include <linux/compiler.h>
+#include <linux/property.h>
#ifdef CONFIG_BLOCK
#include <linux/blkdev.h>
#endif
@@ -760,11 +761,38 @@ static int __init initialize_ptr_random(void)
early_initcall(initialize_ptr_random);
/* Maps a pointer to a 32 bit unique identifier. */
+static inline int __ptr_to_hashval(const void *ptr, unsigned long *hashval_out)
+{
+ unsigned long hashval;
+
+ if (static_branch_unlikely(&not_filled_random_ptr_key))
+ return -EAGAIN;
+
+#ifdef CONFIG_64BIT
+ hashval = (unsigned long)siphash_1u64((u64)ptr, &ptr_key);
+ /*
+ * Mask off the first 32 bits, this makes explicit that we have
+ * modified the address (and 32 bits is plenty for a unique ID).
+ */
+ hashval = hashval & 0xffffffff;
+#else
+ hashval = (unsigned long)siphash_1u32((u32)ptr, &ptr_key);
+#endif
+ *hashval_out = hashval;
+ return 0;
+}
+
+int ptr_to_hashval(const void *ptr, unsigned long *hashval_out)
+{
+ return __ptr_to_hashval(ptr, hashval_out);
+}
+
static char *ptr_to_id(char *buf, char *end, const void *ptr,
struct printf_spec spec)
{
const char *str = sizeof(ptr) == 8 ? "(____ptrval____)" : "(ptrval)";
unsigned long hashval;
+ int ret;
/* When debugging early boot use non-cryptographically secure hash. */
if (unlikely(debug_boot_weak_hash)) {
@@ -772,22 +800,13 @@ static char *ptr_to_id(char *buf, char *end, const void *ptr,
return pointer_string(buf, end, (const void *)hashval, spec);
}
- if (static_branch_unlikely(&not_filled_random_ptr_key)) {
+ ret = __ptr_to_hashval(ptr, &hashval);
+ if (ret) {
spec.field_width = 2 * sizeof(ptr);
/* string length must be less than default_width */
return error_string(buf, end, str, spec);
}
-#ifdef CONFIG_64BIT
- hashval = (unsigned long)siphash_1u64((u64)ptr, &ptr_key);
- /*
- * Mask off the first 32 bits, this makes explicit that we have
- * modified the address (and 32 bits is plenty for a unique ID).
- */
- hashval = hashval & 0xffffffff;
-#else
- hashval = (unsigned long)siphash_1u32((u32)ptr, &ptr_key);
-#endif
return pointer_string(buf, end, (const void *)hashval, spec);
}
@@ -938,7 +957,7 @@ char *symbol_string(char *buf, char *end, void *ptr,
#ifdef CONFIG_KALLSYMS
if (*fmt == 'B')
sprint_backtrace(sym, value);
- else if (*fmt != 'f' && *fmt != 's')
+ else if (*fmt != 's')
sprint_symbol(sym, value);
else
sprint_symbol_no_offset(sym, value);
@@ -1892,32 +1911,25 @@ char *flags_string(char *buf, char *end, void *flags_ptr,
return format_flags(buf, end, flags, names);
}
-static const char *device_node_name_for_depth(const struct device_node *np, int depth)
-{
- for ( ; np && depth; depth--)
- np = np->parent;
-
- return kbasename(np->full_name);
-}
-
static noinline_for_stack
-char *device_node_gen_full_name(const struct device_node *np, char *buf, char *end)
+char *fwnode_full_name_string(struct fwnode_handle *fwnode, char *buf,
+ char *end)
{
int depth;
- const struct device_node *parent = np->parent;
-
- /* special case for root node */
- if (!parent)
- return string_nocheck(buf, end, "/", default_str_spec);
- for (depth = 0; parent->parent; depth++)
- parent = parent->parent;
+ /* Loop starting from the root node to the current node. */
+ for (depth = fwnode_count_parents(fwnode); depth >= 0; depth--) {
+ struct fwnode_handle *__fwnode =
+ fwnode_get_nth_parent(fwnode, depth);
- for ( ; depth >= 0; depth--) {
- buf = string_nocheck(buf, end, "/", default_str_spec);
- buf = string(buf, end, device_node_name_for_depth(np, depth),
+ buf = string(buf, end, fwnode_get_name_prefix(__fwnode),
default_str_spec);
+ buf = string(buf, end, fwnode_get_name(__fwnode),
+ default_str_spec);
+
+ fwnode_handle_put(__fwnode);
}
+
return buf;
}
@@ -1941,6 +1953,9 @@ char *device_node_string(char *buf, char *end, struct device_node *dn,
struct printf_spec str_spec = spec;
str_spec.field_width = -1;
+ if (fmt[0] != 'F')
+ return error_string(buf, end, "(%pO?)", spec);
+
if (!IS_ENABLED(CONFIG_OF))
return error_string(buf, end, "(%pOF?)", spec);
@@ -1962,10 +1977,11 @@ char *device_node_string(char *buf, char *end, struct device_node *dn,
switch (*fmt) {
case 'f': /* full_name */
- buf = device_node_gen_full_name(dn, buf, end);
+ buf = fwnode_full_name_string(of_fwnode_handle(dn), buf,
+ end);
break;
case 'n': /* name */
- p = kbasename(of_node_full_name(dn));
+ p = fwnode_get_name(of_fwnode_handle(dn));
precision = str_spec.precision;
str_spec.precision = strchrnul(p, '@') - p;
buf = string(buf, end, p, str_spec);
@@ -1975,7 +1991,7 @@ char *device_node_string(char *buf, char *end, struct device_node *dn,
buf = number(buf, end, (unsigned int)dn->phandle, num_spec);
break;
case 'P': /* path-spec */
- p = kbasename(of_node_full_name(dn));
+ p = fwnode_get_name(of_fwnode_handle(dn));
if (!p[1])
p = "/";
buf = string(buf, end, p, str_spec);
@@ -2013,15 +2029,34 @@ char *device_node_string(char *buf, char *end, struct device_node *dn,
return widen_string(buf, buf - buf_start, end, spec);
}
-static char *kobject_string(char *buf, char *end, void *ptr,
- struct printf_spec spec, const char *fmt)
+static noinline_for_stack
+char *fwnode_string(char *buf, char *end, struct fwnode_handle *fwnode,
+ struct printf_spec spec, const char *fmt)
{
- switch (fmt[1]) {
- case 'F':
- return device_node_string(buf, end, ptr, spec, fmt + 1);
+ struct printf_spec str_spec = spec;
+ char *buf_start = buf;
+
+ str_spec.field_width = -1;
+
+ if (*fmt != 'w')
+ return error_string(buf, end, "(%pf?)", spec);
+
+ if (check_pointer(&buf, end, fwnode, spec))
+ return buf;
+
+ fmt++;
+
+ switch (*fmt) {
+ case 'P': /* name */
+ buf = string(buf, end, fwnode_get_name(fwnode), str_spec);
+ break;
+ case 'f': /* full_name */
+ default:
+ buf = fwnode_full_name_string(fwnode, buf, end);
+ break;
}
- return error_string(buf, end, "(%pO?)", spec);
+ return widen_string(buf, buf - buf_start, end, spec);
}
/*
@@ -2036,9 +2071,9 @@ static char *kobject_string(char *buf, char *end, void *ptr,
*
* - 'S' For symbolic direct pointers (or function descriptors) with offset
* - 's' For symbolic direct pointers (or function descriptors) without offset
- * - 'F' Same as 'S'
- * - 'f' Same as 's'
- * - '[FfSs]R' as above with __builtin_extract_return_addr() translation
+ * - '[Ss]R' as above with __builtin_extract_return_addr() translation
+ * - '[Ff]' %pf and %pF were obsoleted and later removed in favor of
+ * %ps and %pS. Be careful when re-using these specifiers.
* - 'B' For backtraced symbolic direct pointers with offset
* - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
* - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201]
@@ -2128,6 +2163,10 @@ static char *kobject_string(char *buf, char *end, void *ptr,
* F device node flags
* c major compatible string
* C full compatible string
+ * - 'fw[fP]' For a firmware node (struct fwnode_handle) pointer
+ * Without an option prints the full name of the node
+ * f full name
+ * P node name, including a possible unit address
* - 'x' For printing the address. Equivalent to "%lx".
*
* ** When making changes please also update:
@@ -2141,8 +2180,6 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
struct printf_spec spec)
{
switch (*fmt) {
- case 'F':
- case 'f':
case 'S':
case 's':
ptr = dereference_symbol_descriptor(ptr);
@@ -2204,7 +2241,9 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
case 'G':
return flags_string(buf, end, ptr, spec, fmt);
case 'O':
- return kobject_string(buf, end, ptr, spec, fmt);
+ return device_node_string(buf, end, ptr, spec, fmt + 1);
+ case 'f':
+ return fwnode_string(buf, end, ptr, spec, fmt + 1);
case 'x':
return pointer_string(buf, end, ptr, spec);
case 'e':
@@ -2844,8 +2883,6 @@ int vbin_printf(u32 *bin_buf, size_t size, const char *fmt, va_list args)
/* Dereference of functions is still OK */
case 'S':
case 's':
- case 'F':
- case 'f':
case 'x':
case 'K':
case 'e':
diff --git a/mm/Kconfig b/mm/Kconfig
index a5dae9a7eb51..ab80933be65f 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -29,7 +29,7 @@ config FLATMEM_MANUAL
For systems that have holes in their physical address
spaces and for features like NUMA and memory hotplug,
- choose "Sparse Memory"
+ choose "Sparse Memory".
If unsure, choose this option (Flat Memory) over any other.
@@ -122,9 +122,9 @@ config SPARSEMEM_VMEMMAP
depends on SPARSEMEM && SPARSEMEM_VMEMMAP_ENABLE
default y
help
- SPARSEMEM_VMEMMAP uses a virtually mapped memmap to optimise
- pfn_to_page and page_to_pfn operations. This is the most
- efficient option when sufficient kernel resources are available.
+ SPARSEMEM_VMEMMAP uses a virtually mapped memmap to optimise
+ pfn_to_page and page_to_pfn operations. This is the most
+ efficient option when sufficient kernel resources are available.
config HAVE_MEMBLOCK_NODE_MAP
bool
@@ -160,9 +160,9 @@ config MEMORY_HOTPLUG_SPARSE
depends on SPARSEMEM && MEMORY_HOTPLUG
config MEMORY_HOTPLUG_DEFAULT_ONLINE
- bool "Online the newly added memory blocks by default"
- depends on MEMORY_HOTPLUG
- help
+ bool "Online the newly added memory blocks by default"
+ depends on MEMORY_HOTPLUG
+ help
This option sets the default policy setting for memory hotplug
onlining policy (/sys/devices/system/memory/auto_online_blocks) which
determines what happens to newly added memory regions. Policy setting
@@ -227,14 +227,14 @@ config COMPACTION
select MIGRATION
depends on MMU
help
- Compaction is the only memory management component to form
- high order (larger physically contiguous) memory blocks
- reliably. The page allocator relies on compaction heavily and
- the lack of the feature can lead to unexpected OOM killer
- invocations for high order memory requests. You shouldn't
- disable this option unless there really is a strong reason for
- it and then we would be really interested to hear about that at
- linux-mm@kvack.org.
+ Compaction is the only memory management component to form
+ high order (larger physically contiguous) memory blocks
+ reliably. The page allocator relies on compaction heavily and
+ the lack of the feature can lead to unexpected OOM killer
+ invocations for high order memory requests. You shouldn't
+ disable this option unless there really is a strong reason for
+ it and then we would be really interested to hear about that at
+ linux-mm@kvack.org.
#
# support for page migration
@@ -258,7 +258,7 @@ config ARCH_ENABLE_THP_MIGRATION
bool
config CONTIG_ALLOC
- def_bool (MEMORY_ISOLATION && COMPACTION) || CMA
+ def_bool (MEMORY_ISOLATION && COMPACTION) || CMA
config PHYS_ADDR_T_64BIT
def_bool 64BIT
@@ -284,6 +284,7 @@ config VIRT_TO_BUS
config MMU_NOTIFIER
bool
select SRCU
+ select INTERVAL_TREE
config KSM
bool "Enable KSM for page merging"
@@ -301,10 +302,10 @@ config KSM
root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
config DEFAULT_MMAP_MIN_ADDR
- int "Low address space to protect from user allocation"
+ int "Low address space to protect from user allocation"
depends on MMU
- default 4096
- help
+ default 4096
+ help
This is the portion of low virtual memory which should be protected
from userspace allocation. Keeping a user from writing to low pages
can help reduce the impact of kernel NULL pointer bugs.
@@ -407,7 +408,7 @@ choice
endchoice
config ARCH_WANTS_THP_SWAP
- def_bool n
+ def_bool n
config THP_SWAP
def_bool y
@@ -674,7 +675,6 @@ config DEV_PAGEMAP_OPS
config HMM_MIRROR
bool
depends on MMU
- depends on MMU_NOTIFIER
config DEVICE_PRIVATE
bool "Unaddressable device memory (GPU memory, ...)"
@@ -736,4 +736,7 @@ config ARCH_HAS_PTE_SPECIAL
config ARCH_HAS_HUGEPD
bool
+config MAPPING_DIRTY_HELPERS
+ bool
+
endmenu
diff --git a/mm/Makefile b/mm/Makefile
index d996846697ef..1937cc251883 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -107,3 +107,4 @@ obj-$(CONFIG_PERCPU_STATS) += percpu-stats.o
obj-$(CONFIG_ZONE_DEVICE) += memremap.o
obj-$(CONFIG_HMM_MIRROR) += hmm.o
obj-$(CONFIG_MEMFD_CREATE) += memfd.o
+obj-$(CONFIG_MAPPING_DIRTY_HELPERS) += mapping_dirty_helpers.o
diff --git a/mm/cma.c b/mm/cma.c
index 7fe0b8356775..be55d1988c67 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -95,13 +95,11 @@ static void cma_clear_bitmap(struct cma *cma, unsigned long pfn,
static int __init cma_activate_area(struct cma *cma)
{
- int bitmap_size = BITS_TO_LONGS(cma_bitmap_maxno(cma)) * sizeof(long);
unsigned long base_pfn = cma->base_pfn, pfn = base_pfn;
unsigned i = cma->count >> pageblock_order;
struct zone *zone;
- cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
-
+ cma->bitmap = bitmap_zalloc(cma_bitmap_maxno(cma), GFP_KERNEL);
if (!cma->bitmap) {
cma->count = 0;
return -ENOMEM;
@@ -139,7 +137,7 @@ static int __init cma_activate_area(struct cma *cma)
not_in_zone:
pr_err("CMA area %s could not be activated\n", cma->name);
- kfree(cma->bitmap);
+ bitmap_free(cma->bitmap);
cma->count = 0;
return -EINVAL;
}
diff --git a/mm/cma_debug.c b/mm/cma_debug.c
index a7dd9e8e10d5..4e6cbe2f586e 100644
--- a/mm/cma_debug.c
+++ b/mm/cma_debug.c
@@ -29,7 +29,7 @@ static int cma_debugfs_get(void *data, u64 *val)
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(cma_debugfs_fops, cma_debugfs_get, NULL, "%llu\n");
+DEFINE_DEBUGFS_ATTRIBUTE(cma_debugfs_fops, cma_debugfs_get, NULL, "%llu\n");
static int cma_used_get(void *data, u64 *val)
{
@@ -44,7 +44,7 @@ static int cma_used_get(void *data, u64 *val)
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(cma_used_fops, cma_used_get, NULL, "%llu\n");
+DEFINE_DEBUGFS_ATTRIBUTE(cma_used_fops, cma_used_get, NULL, "%llu\n");
static int cma_maxchunk_get(void *data, u64 *val)
{
@@ -66,7 +66,7 @@ static int cma_maxchunk_get(void *data, u64 *val)
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(cma_maxchunk_fops, cma_maxchunk_get, NULL, "%llu\n");
+DEFINE_DEBUGFS_ATTRIBUTE(cma_maxchunk_fops, cma_maxchunk_get, NULL, "%llu\n");
static void cma_add_to_cma_mem_list(struct cma *cma, struct cma_mem *mem)
{
@@ -126,7 +126,7 @@ static int cma_free_write(void *data, u64 val)
return cma_free_mem(cma, pages);
}
-DEFINE_SIMPLE_ATTRIBUTE(cma_free_fops, NULL, cma_free_write, "%llu\n");
+DEFINE_DEBUGFS_ATTRIBUTE(cma_free_fops, NULL, cma_free_write, "%llu\n");
static int cma_alloc_mem(struct cma *cma, int count)
{
@@ -158,7 +158,7 @@ static int cma_alloc_write(void *data, u64 val)
return cma_alloc_mem(cma, pages);
}
-DEFINE_SIMPLE_ATTRIBUTE(cma_alloc_fops, NULL, cma_alloc_write, "%llu\n");
+DEFINE_DEBUGFS_ATTRIBUTE(cma_alloc_fops, NULL, cma_alloc_write, "%llu\n");
static void cma_debugfs_add_one(struct cma *cma, struct dentry *root_dentry)
{
diff --git a/mm/filemap.c b/mm/filemap.c
index 85b7d087eb45..bf6aa30be58d 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -2329,27 +2329,6 @@ EXPORT_SYMBOL(generic_file_read_iter);
#ifdef CONFIG_MMU
#define MMAP_LOTSAMISS (100)
-static struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf,
- struct file *fpin)
-{
- int flags = vmf->flags;
-
- if (fpin)
- return fpin;
-
- /*
- * FAULT_FLAG_RETRY_NOWAIT means we don't want to wait on page locks or
- * anything, so we only pin the file and drop the mmap_sem if only
- * FAULT_FLAG_ALLOW_RETRY is set.
- */
- if ((flags & (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT)) ==
- FAULT_FLAG_ALLOW_RETRY) {
- fpin = get_file(vmf->vma->vm_file);
- up_read(&vmf->vma->vm_mm->mmap_sem);
- }
- return fpin;
-}
-
/*
* lock_page_maybe_drop_mmap - lock the page, possibly dropping the mmap_sem
* @vmf - the vm_fault for this fault.
@@ -3161,6 +3140,27 @@ int pagecache_write_end(struct file *file, struct address_space *mapping,
}
EXPORT_SYMBOL(pagecache_write_end);
+/*
+ * Warn about a page cache invalidation failure during a direct I/O write.
+ */
+void dio_warn_stale_pagecache(struct file *filp)
+{
+ static DEFINE_RATELIMIT_STATE(_rs, 86400 * HZ, DEFAULT_RATELIMIT_BURST);
+ char pathname[128];
+ struct inode *inode = file_inode(filp);
+ char *path;
+
+ errseq_set(&inode->i_mapping->wb_err, -EIO);
+ if (__ratelimit(&_rs)) {
+ path = file_path(filp, pathname, sizeof(pathname));
+ if (IS_ERR(path))
+ path = "(unknown)";
+ pr_crit("Page cache invalidation failure on direct I/O. Possible data corruption due to collision with buffered I/O!\n");
+ pr_crit("File: %s PID: %d Comm: %.20s\n", path, current->pid,
+ current->comm);
+ }
+}
+
ssize_t
generic_file_direct_write(struct kiocb *iocb, struct iov_iter *from)
{
@@ -3218,11 +3218,15 @@ generic_file_direct_write(struct kiocb *iocb, struct iov_iter *from)
* Most of the time we do not need this since dio_complete() will do
* the invalidation for us. However there are some file systems that
* do not end up with dio_complete() being called, so let's not break
- * them by removing it completely
+ * them by removing it completely.
+ *
+ * Noticeable example is a blkdev_direct_IO().
+ *
+ * Skip invalidation for async writes or if mapping has no pages.
*/
- if (mapping->nrpages)
- invalidate_inode_pages2_range(mapping,
- pos >> PAGE_SHIFT, end);
+ if (written > 0 && mapping->nrpages &&
+ invalidate_inode_pages2_range(mapping, pos >> PAGE_SHIFT, end))
+ dio_warn_stale_pagecache(file);
if (written > 0) {
pos += written;
diff --git a/mm/gup.c b/mm/gup.c
index 8f236a335ae9..7646bf993b25 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -734,11 +734,17 @@ static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags)
* Or NULL if the caller does not require them.
* @nonblocking: whether waiting for disk IO or mmap_sem contention
*
- * Returns number of pages pinned. This may be fewer than the number
- * requested. If nr_pages is 0 or negative, returns 0. If no pages
- * were pinned, returns -errno. Each page returned must be released
- * with a put_page() call when it is finished with. vmas will only
- * remain valid while mmap_sem is held.
+ * Returns either number of pages pinned (which may be less than the
+ * number requested), or an error. Details about the return value:
+ *
+ * -- If nr_pages is 0, returns 0.
+ * -- If nr_pages is >0, but no pages were pinned, returns -errno.
+ * -- If nr_pages is >0, and some pages were pinned, returns the number of
+ * pages pinned. Again, this may be less than nr_pages.
+ *
+ * The caller is responsible for releasing returned @pages, via put_page().
+ *
+ * @vmas are valid only as long as mmap_sem is held.
*
* Must be called with mmap_sem held. It may be released. See below.
*
@@ -1107,11 +1113,17 @@ static __always_inline long __get_user_pages_locked(struct task_struct *tsk,
* subsequently whether VM_FAULT_RETRY functionality can be
* utilised. Lock must initially be held.
*
- * Returns number of pages pinned. This may be fewer than the number
- * requested. If nr_pages is 0 or negative, returns 0. If no pages
- * were pinned, returns -errno. Each page returned must be released
- * with a put_page() call when it is finished with. vmas will only
- * remain valid while mmap_sem is held.
+ * Returns either number of pages pinned (which may be less than the
+ * number requested), or an error. Details about the return value:
+ *
+ * -- If nr_pages is 0, returns 0.
+ * -- If nr_pages is >0, but no pages were pinned, returns -errno.
+ * -- If nr_pages is >0, and some pages were pinned, returns the number of
+ * pages pinned. Again, this may be less than nr_pages.
+ *
+ * The caller is responsible for releasing returned @pages, via put_page().
+ *
+ * @vmas are valid only as long as mmap_sem is held.
*
* Must be called with mmap_sem held for read or write.
*
@@ -1443,6 +1455,7 @@ static long check_and_migrate_cma_pages(struct task_struct *tsk,
bool drain_allow = true;
bool migrate_allow = true;
LIST_HEAD(cma_page_list);
+ long ret = nr_pages;
check_again:
for (i = 0; i < nr_pages;) {
@@ -1504,17 +1517,18 @@ check_again:
* again migrating any new CMA pages which we failed to isolate
* earlier.
*/
- nr_pages = __get_user_pages_locked(tsk, mm, start, nr_pages,
+ ret = __get_user_pages_locked(tsk, mm, start, nr_pages,
pages, vmas, NULL,
gup_flags);
- if ((nr_pages > 0) && migrate_allow) {
+ if ((ret > 0) && migrate_allow) {
+ nr_pages = ret;
drain_allow = true;
goto check_again;
}
}
- return nr_pages;
+ return ret;
}
#else
static long check_and_migrate_cma_pages(struct task_struct *tsk,
diff --git a/mm/hmm.c b/mm/hmm.c
index 902f5fa6bf93..d379cb6496ae 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -26,193 +26,6 @@
#include <linux/mmu_notifier.h>
#include <linux/memory_hotplug.h>
-static struct mmu_notifier *hmm_alloc_notifier(struct mm_struct *mm)
-{
- struct hmm *hmm;
-
- hmm = kzalloc(sizeof(*hmm), GFP_KERNEL);
- if (!hmm)
- return ERR_PTR(-ENOMEM);
-
- init_waitqueue_head(&hmm->wq);
- INIT_LIST_HEAD(&hmm->mirrors);
- init_rwsem(&hmm->mirrors_sem);
- INIT_LIST_HEAD(&hmm->ranges);
- spin_lock_init(&hmm->ranges_lock);
- hmm->notifiers = 0;
- return &hmm->mmu_notifier;
-}
-
-static void hmm_free_notifier(struct mmu_notifier *mn)
-{
- struct hmm *hmm = container_of(mn, struct hmm, mmu_notifier);
-
- WARN_ON(!list_empty(&hmm->ranges));
- WARN_ON(!list_empty(&hmm->mirrors));
- kfree(hmm);
-}
-
-static void hmm_release(struct mmu_notifier *mn, struct mm_struct *mm)
-{
- struct hmm *hmm = container_of(mn, struct hmm, mmu_notifier);
- struct hmm_mirror *mirror;
-
- /*
- * Since hmm_range_register() holds the mmget() lock hmm_release() is
- * prevented as long as a range exists.
- */
- WARN_ON(!list_empty_careful(&hmm->ranges));
-
- down_read(&hmm->mirrors_sem);
- list_for_each_entry(mirror, &hmm->mirrors, list) {
- /*
- * Note: The driver is not allowed to trigger
- * hmm_mirror_unregister() from this thread.
- */
- if (mirror->ops->release)
- mirror->ops->release(mirror);
- }
- up_read(&hmm->mirrors_sem);
-}
-
-static void notifiers_decrement(struct hmm *hmm)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&hmm->ranges_lock, flags);
- hmm->notifiers--;
- if (!hmm->notifiers) {
- struct hmm_range *range;
-
- list_for_each_entry(range, &hmm->ranges, list) {
- if (range->valid)
- continue;
- range->valid = true;
- }
- wake_up_all(&hmm->wq);
- }
- spin_unlock_irqrestore(&hmm->ranges_lock, flags);
-}
-
-static int hmm_invalidate_range_start(struct mmu_notifier *mn,
- const struct mmu_notifier_range *nrange)
-{
- struct hmm *hmm = container_of(mn, struct hmm, mmu_notifier);
- struct hmm_mirror *mirror;
- struct hmm_range *range;
- unsigned long flags;
- int ret = 0;
-
- spin_lock_irqsave(&hmm->ranges_lock, flags);
- hmm->notifiers++;
- list_for_each_entry(range, &hmm->ranges, list) {
- if (nrange->end < range->start || nrange->start >= range->end)
- continue;
-
- range->valid = false;
- }
- spin_unlock_irqrestore(&hmm->ranges_lock, flags);
-
- if (mmu_notifier_range_blockable(nrange))
- down_read(&hmm->mirrors_sem);
- else if (!down_read_trylock(&hmm->mirrors_sem)) {
- ret = -EAGAIN;
- goto out;
- }
-
- list_for_each_entry(mirror, &hmm->mirrors, list) {
- int rc;
-
- rc = mirror->ops->sync_cpu_device_pagetables(mirror, nrange);
- if (rc) {
- if (WARN_ON(mmu_notifier_range_blockable(nrange) ||
- rc != -EAGAIN))
- continue;
- ret = -EAGAIN;
- break;
- }
- }
- up_read(&hmm->mirrors_sem);
-
-out:
- if (ret)
- notifiers_decrement(hmm);
- return ret;
-}
-
-static void hmm_invalidate_range_end(struct mmu_notifier *mn,
- const struct mmu_notifier_range *nrange)
-{
- struct hmm *hmm = container_of(mn, struct hmm, mmu_notifier);
-
- notifiers_decrement(hmm);
-}
-
-static const struct mmu_notifier_ops hmm_mmu_notifier_ops = {
- .release = hmm_release,
- .invalidate_range_start = hmm_invalidate_range_start,
- .invalidate_range_end = hmm_invalidate_range_end,
- .alloc_notifier = hmm_alloc_notifier,
- .free_notifier = hmm_free_notifier,
-};
-
-/*
- * hmm_mirror_register() - register a mirror against an mm
- *
- * @mirror: new mirror struct to register
- * @mm: mm to register against
- * Return: 0 on success, -ENOMEM if no memory, -EINVAL if invalid arguments
- *
- * To start mirroring a process address space, the device driver must register
- * an HMM mirror struct.
- *
- * The caller cannot unregister the hmm_mirror while any ranges are
- * registered.
- *
- * Callers using this function must put a call to mmu_notifier_synchronize()
- * in their module exit functions.
- */
-int hmm_mirror_register(struct hmm_mirror *mirror, struct mm_struct *mm)
-{
- struct mmu_notifier *mn;
-
- lockdep_assert_held_write(&mm->mmap_sem);
-
- /* Sanity check */
- if (!mm || !mirror || !mirror->ops)
- return -EINVAL;
-
- mn = mmu_notifier_get_locked(&hmm_mmu_notifier_ops, mm);
- if (IS_ERR(mn))
- return PTR_ERR(mn);
- mirror->hmm = container_of(mn, struct hmm, mmu_notifier);
-
- down_write(&mirror->hmm->mirrors_sem);
- list_add(&mirror->list, &mirror->hmm->mirrors);
- up_write(&mirror->hmm->mirrors_sem);
-
- return 0;
-}
-EXPORT_SYMBOL(hmm_mirror_register);
-
-/*
- * hmm_mirror_unregister() - unregister a mirror
- *
- * @mirror: mirror struct to unregister
- *
- * Stop mirroring a process address space, and cleanup.
- */
-void hmm_mirror_unregister(struct hmm_mirror *mirror)
-{
- struct hmm *hmm = mirror->hmm;
-
- down_write(&hmm->mirrors_sem);
- list_del(&mirror->list);
- up_write(&hmm->mirrors_sem);
- mmu_notifier_put(&hmm->mmu_notifier);
-}
-EXPORT_SYMBOL(hmm_mirror_unregister);
-
struct hmm_vma_walk {
struct hmm_range *range;
struct dev_pagemap *pgmap;
@@ -252,18 +65,15 @@ err:
return -EFAULT;
}
-static int hmm_pfns_bad(unsigned long addr,
- unsigned long end,
- struct mm_walk *walk)
+static int hmm_pfns_fill(unsigned long addr, unsigned long end,
+ struct hmm_range *range, enum hmm_pfn_value_e value)
{
- struct hmm_vma_walk *hmm_vma_walk = walk->private;
- struct hmm_range *range = hmm_vma_walk->range;
uint64_t *pfns = range->pfns;
unsigned long i;
i = (addr - range->start) >> PAGE_SHIFT;
for (; addr < end; addr += PAGE_SIZE, i++)
- pfns[i] = range->values[HMM_PFN_ERROR];
+ pfns[i] = range->values[value];
return 0;
}
@@ -532,8 +342,14 @@ static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr,
if (unlikely(!hmm_vma_walk->pgmap))
return -EBUSY;
} else if (IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL) && pte_special(pte)) {
- *pfn = range->values[HMM_PFN_SPECIAL];
- return -EFAULT;
+ if (!is_zero_pfn(pte_pfn(pte))) {
+ *pfn = range->values[HMM_PFN_SPECIAL];
+ return -EFAULT;
+ }
+ /*
+ * Since each architecture defines a struct page for the zero
+ * page, just fall through and treat it like a normal page.
+ */
}
*pfn = hmm_device_entry_from_pfn(range, pte_pfn(pte)) | cpu_flags;
@@ -584,7 +400,7 @@ again:
}
return 0;
} else if (!pmd_present(pmd))
- return hmm_pfns_bad(start, end, walk);
+ return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR);
if (pmd_devmap(pmd) || pmd_trans_huge(pmd)) {
/*
@@ -612,7 +428,7 @@ again:
* recover.
*/
if (pmd_bad(pmd))
- return hmm_pfns_bad(start, end, walk);
+ return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR);
ptep = pte_offset_map(pmdp, addr);
i = (addr - range->start) >> PAGE_SHIFT;
@@ -770,93 +586,55 @@ unlock:
#define hmm_vma_walk_hugetlb_entry NULL
#endif /* CONFIG_HUGETLB_PAGE */
-static void hmm_pfns_clear(struct hmm_range *range,
- uint64_t *pfns,
- unsigned long addr,
- unsigned long end)
-{
- for (; addr < end; addr += PAGE_SIZE, pfns++)
- *pfns = range->values[HMM_PFN_NONE];
-}
-
-/*
- * hmm_range_register() - start tracking change to CPU page table over a range
- * @range: range
- * @mm: the mm struct for the range of virtual address
- *
- * Return: 0 on success, -EFAULT if the address space is no longer valid
- *
- * Track updates to the CPU page table see include/linux/hmm.h
- */
-int hmm_range_register(struct hmm_range *range, struct hmm_mirror *mirror)
+static int hmm_vma_walk_test(unsigned long start, unsigned long end,
+ struct mm_walk *walk)
{
- struct hmm *hmm = mirror->hmm;
- unsigned long flags;
-
- range->valid = false;
- range->hmm = NULL;
-
- if ((range->start & (PAGE_SIZE - 1)) || (range->end & (PAGE_SIZE - 1)))
- return -EINVAL;
- if (range->start >= range->end)
- return -EINVAL;
+ struct hmm_vma_walk *hmm_vma_walk = walk->private;
+ struct hmm_range *range = hmm_vma_walk->range;
+ struct vm_area_struct *vma = walk->vma;
- /* Prevent hmm_release() from running while the range is valid */
- if (!mmget_not_zero(hmm->mmu_notifier.mm))
+ /*
+ * Skip vma ranges that don't have struct page backing them or
+ * map I/O devices directly.
+ */
+ if (vma->vm_flags & (VM_IO | VM_PFNMAP | VM_MIXEDMAP))
return -EFAULT;
- /* Initialize range to track CPU page table updates. */
- spin_lock_irqsave(&hmm->ranges_lock, flags);
-
- range->hmm = hmm;
- list_add(&range->list, &hmm->ranges);
-
/*
- * If there are any concurrent notifiers we have to wait for them for
- * the range to be valid (see hmm_range_wait_until_valid()).
+ * If the vma does not allow read access, then assume that it does not
+ * allow write access either. HMM does not support architectures
+ * that allow write without read.
*/
- if (!hmm->notifiers)
- range->valid = true;
- spin_unlock_irqrestore(&hmm->ranges_lock, flags);
-
- return 0;
-}
-EXPORT_SYMBOL(hmm_range_register);
+ if (!(vma->vm_flags & VM_READ)) {
+ bool fault, write_fault;
-/*
- * hmm_range_unregister() - stop tracking change to CPU page table over a range
- * @range: range
- *
- * Range struct is used to track updates to the CPU page table after a call to
- * hmm_range_register(). See include/linux/hmm.h for how to use it.
- */
-void hmm_range_unregister(struct hmm_range *range)
-{
- struct hmm *hmm = range->hmm;
- unsigned long flags;
+ /*
+ * Check to see if a fault is requested for any page in the
+ * range.
+ */
+ hmm_range_need_fault(hmm_vma_walk, range->pfns +
+ ((start - range->start) >> PAGE_SHIFT),
+ (end - start) >> PAGE_SHIFT,
+ 0, &fault, &write_fault);
+ if (fault || write_fault)
+ return -EFAULT;
- spin_lock_irqsave(&hmm->ranges_lock, flags);
- list_del_init(&range->list);
- spin_unlock_irqrestore(&hmm->ranges_lock, flags);
+ hmm_pfns_fill(start, end, range, HMM_PFN_NONE);
+ hmm_vma_walk->last = end;
- /* Drop reference taken by hmm_range_register() */
- mmput(hmm->mmu_notifier.mm);
+ /* Skip this vma and continue processing the next vma. */
+ return 1;
+ }
- /*
- * The range is now invalid and the ref on the hmm is dropped, so
- * poison the pointer. Leave other fields in place, for the caller's
- * use.
- */
- range->valid = false;
- memset(&range->hmm, POISON_INUSE, sizeof(range->hmm));
+ return 0;
}
-EXPORT_SYMBOL(hmm_range_unregister);
static const struct mm_walk_ops hmm_walk_ops = {
.pud_entry = hmm_vma_walk_pud,
.pmd_entry = hmm_vma_walk_pmd,
.pte_hole = hmm_vma_walk_hole,
.hugetlb_entry = hmm_vma_walk_hugetlb_entry,
+ .test_walk = hmm_vma_walk_test,
};
/**
@@ -889,210 +667,27 @@ static const struct mm_walk_ops hmm_walk_ops = {
*/
long hmm_range_fault(struct hmm_range *range, unsigned int flags)
{
- const unsigned long device_vma = VM_IO | VM_PFNMAP | VM_MIXEDMAP;
- unsigned long start = range->start, end;
- struct hmm_vma_walk hmm_vma_walk;
- struct hmm *hmm = range->hmm;
- struct vm_area_struct *vma;
+ struct hmm_vma_walk hmm_vma_walk = {
+ .range = range,
+ .last = range->start,
+ .flags = flags,
+ };
+ struct mm_struct *mm = range->notifier->mm;
int ret;
- lockdep_assert_held(&hmm->mmu_notifier.mm->mmap_sem);
+ lockdep_assert_held(&mm->mmap_sem);
do {
/* If range is no longer valid force retry. */
- if (!range->valid)
+ if (mmu_interval_check_retry(range->notifier,
+ range->notifier_seq))
return -EBUSY;
+ ret = walk_page_range(mm, hmm_vma_walk.last, range->end,
+ &hmm_walk_ops, &hmm_vma_walk);
+ } while (ret == -EBUSY);
- vma = find_vma(hmm->mmu_notifier.mm, start);
- if (vma == NULL || (vma->vm_flags & device_vma))
- return -EFAULT;
-
- if (!(vma->vm_flags & VM_READ)) {
- /*
- * If vma do not allow read access, then assume that it
- * does not allow write access, either. HMM does not
- * support architecture that allow write without read.
- */
- hmm_pfns_clear(range, range->pfns,
- range->start, range->end);
- return -EPERM;
- }
-
- hmm_vma_walk.pgmap = NULL;
- hmm_vma_walk.last = start;
- hmm_vma_walk.flags = flags;
- hmm_vma_walk.range = range;
- end = min(range->end, vma->vm_end);
-
- walk_page_range(vma->vm_mm, start, end, &hmm_walk_ops,
- &hmm_vma_walk);
-
- do {
- ret = walk_page_range(vma->vm_mm, start, end,
- &hmm_walk_ops, &hmm_vma_walk);
- start = hmm_vma_walk.last;
-
- /* Keep trying while the range is valid. */
- } while (ret == -EBUSY && range->valid);
-
- if (ret) {
- unsigned long i;
-
- i = (hmm_vma_walk.last - range->start) >> PAGE_SHIFT;
- hmm_pfns_clear(range, &range->pfns[i],
- hmm_vma_walk.last, range->end);
- return ret;
- }
- start = end;
-
- } while (start < range->end);
-
+ if (ret)
+ return ret;
return (hmm_vma_walk.last - range->start) >> PAGE_SHIFT;
}
EXPORT_SYMBOL(hmm_range_fault);
-
-/**
- * hmm_range_dma_map - hmm_range_fault() and dma map page all in one.
- * @range: range being faulted
- * @device: device to map page to
- * @daddrs: array of dma addresses for the mapped pages
- * @flags: HMM_FAULT_*
- *
- * Return: the number of pages mapped on success (including zero), or any
- * status return from hmm_range_fault() otherwise.
- */
-long hmm_range_dma_map(struct hmm_range *range, struct device *device,
- dma_addr_t *daddrs, unsigned int flags)
-{
- unsigned long i, npages, mapped;
- long ret;
-
- ret = hmm_range_fault(range, flags);
- if (ret <= 0)
- return ret ? ret : -EBUSY;
-
- npages = (range->end - range->start) >> PAGE_SHIFT;
- for (i = 0, mapped = 0; i < npages; ++i) {
- enum dma_data_direction dir = DMA_TO_DEVICE;
- struct page *page;
-
- /*
- * FIXME need to update DMA API to provide invalid DMA address
- * value instead of a function to test dma address value. This
- * would remove lot of dumb code duplicated accross many arch.
- *
- * For now setting it to 0 here is good enough as the pfns[]
- * value is what is use to check what is valid and what isn't.
- */
- daddrs[i] = 0;
-
- page = hmm_device_entry_to_page(range, range->pfns[i]);
- if (page == NULL)
- continue;
-
- /* Check if range is being invalidated */
- if (!range->valid) {
- ret = -EBUSY;
- goto unmap;
- }
-
- /* If it is read and write than map bi-directional. */
- if (range->pfns[i] & range->flags[HMM_PFN_WRITE])
- dir = DMA_BIDIRECTIONAL;
-
- daddrs[i] = dma_map_page(device, page, 0, PAGE_SIZE, dir);
- if (dma_mapping_error(device, daddrs[i])) {
- ret = -EFAULT;
- goto unmap;
- }
-
- mapped++;
- }
-
- return mapped;
-
-unmap:
- for (npages = i, i = 0; (i < npages) && mapped; ++i) {
- enum dma_data_direction dir = DMA_TO_DEVICE;
- struct page *page;
-
- page = hmm_device_entry_to_page(range, range->pfns[i]);
- if (page == NULL)
- continue;
-
- if (dma_mapping_error(device, daddrs[i]))
- continue;
-
- /* If it is read and write than map bi-directional. */
- if (range->pfns[i] & range->flags[HMM_PFN_WRITE])
- dir = DMA_BIDIRECTIONAL;
-
- dma_unmap_page(device, daddrs[i], PAGE_SIZE, dir);
- mapped--;
- }
-
- return ret;
-}
-EXPORT_SYMBOL(hmm_range_dma_map);
-
-/**
- * hmm_range_dma_unmap() - unmap range of that was map with hmm_range_dma_map()
- * @range: range being unmapped
- * @device: device against which dma map was done
- * @daddrs: dma address of mapped pages
- * @dirty: dirty page if it had the write flag set
- * Return: number of page unmapped on success, -EINVAL otherwise
- *
- * Note that caller MUST abide by mmu notifier or use HMM mirror and abide
- * to the sync_cpu_device_pagetables() callback so that it is safe here to
- * call set_page_dirty(). Caller must also take appropriate locks to avoid
- * concurrent mmu notifier or sync_cpu_device_pagetables() to make progress.
- */
-long hmm_range_dma_unmap(struct hmm_range *range,
- struct device *device,
- dma_addr_t *daddrs,
- bool dirty)
-{
- unsigned long i, npages;
- long cpages = 0;
-
- /* Sanity check. */
- if (range->end <= range->start)
- return -EINVAL;
- if (!daddrs)
- return -EINVAL;
- if (!range->pfns)
- return -EINVAL;
-
- npages = (range->end - range->start) >> PAGE_SHIFT;
- for (i = 0; i < npages; ++i) {
- enum dma_data_direction dir = DMA_TO_DEVICE;
- struct page *page;
-
- page = hmm_device_entry_to_page(range, range->pfns[i]);
- if (page == NULL)
- continue;
-
- /* If it is read and write than map bi-directional. */
- if (range->pfns[i] & range->flags[HMM_PFN_WRITE]) {
- dir = DMA_BIDIRECTIONAL;
-
- /*
- * See comments in function description on why it is
- * safe here to call set_page_dirty()
- */
- if (dirty)
- set_page_dirty(page);
- }
-
- /* Unmap and clear pfns/dma address */
- dma_unmap_page(device, daddrs[i], PAGE_SIZE, dir);
- range->pfns[i] = range->values[HMM_PFN_NONE];
- /* FIXME see comments in hmm_vma_dma_map() */
- daddrs[i] = 0;
- cpages++;
- }
-
- return cpages;
-}
-EXPORT_SYMBOL(hmm_range_dma_unmap);
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 13cc93785006..41a0fbddc96b 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -3003,7 +3003,7 @@ next:
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(split_huge_pages_fops, NULL, split_huge_pages_set,
+DEFINE_DEBUGFS_ATTRIBUTE(split_huge_pages_fops, NULL, split_huge_pages_set,
"%llu\n");
static int __init split_huge_pages_debugfs(void)
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index b45a95363a84..ac65bb5e38ac 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -244,16 +244,66 @@ struct file_region {
long to;
};
+/* Must be called with resv->lock held. Calling this with count_only == true
+ * will count the number of pages to be added but will not modify the linked
+ * list.
+ */
+static long add_reservation_in_range(struct resv_map *resv, long f, long t,
+ bool count_only)
+{
+ long chg = 0;
+ struct list_head *head = &resv->regions;
+ struct file_region *rg = NULL, *trg = NULL, *nrg = NULL;
+
+ /* Locate the region we are before or in. */
+ list_for_each_entry(rg, head, link)
+ if (f <= rg->to)
+ break;
+
+ /* Round our left edge to the current segment if it encloses us. */
+ if (f > rg->from)
+ f = rg->from;
+
+ chg = t - f;
+
+ /* Check for and consume any regions we now overlap with. */
+ nrg = rg;
+ list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
+ if (&rg->link == head)
+ break;
+ if (rg->from > t)
+ break;
+
+ /* We overlap with this area, if it extends further than
+ * us then we must extend ourselves. Account for its
+ * existing reservation.
+ */
+ if (rg->to > t) {
+ chg += rg->to - t;
+ t = rg->to;
+ }
+ chg -= rg->to - rg->from;
+
+ if (!count_only && rg != nrg) {
+ list_del(&rg->link);
+ kfree(rg);
+ }
+ }
+
+ if (!count_only) {
+ nrg->from = f;
+ nrg->to = t;
+ }
+
+ return chg;
+}
+
/*
* Add the huge page range represented by [f, t) to the reserve
- * map. In the normal case, existing regions will be expanded
- * to accommodate the specified range. Sufficient regions should
- * exist for expansion due to the previous call to region_chg
- * with the same range. However, it is possible that region_del
- * could have been called after region_chg and modifed the map
- * in such a way that no region exists to be expanded. In this
- * case, pull a region descriptor from the cache associated with
- * the map and use that for the new range.
+ * map. Existing regions will be expanded to accommodate the specified
+ * range, or a region will be taken from the cache. Sufficient regions
+ * must exist in the cache due to the previous call to region_chg with
+ * the same range.
*
* Return the number of new huge pages added to the map. This
* number is greater than or equal to zero.
@@ -261,7 +311,7 @@ struct file_region {
static long region_add(struct resv_map *resv, long f, long t)
{
struct list_head *head = &resv->regions;
- struct file_region *rg, *nrg, *trg;
+ struct file_region *rg, *nrg;
long add = 0;
spin_lock(&resv->lock);
@@ -272,9 +322,8 @@ static long region_add(struct resv_map *resv, long f, long t)
/*
* If no region exists which can be expanded to include the
- * specified range, the list must have been modified by an
- * interleving call to region_del(). Pull a region descriptor
- * from the cache and use it for this range.
+ * specified range, pull a region descriptor from the cache
+ * and use it for this range.
*/
if (&rg->link == head || t < rg->from) {
VM_BUG_ON(resv->region_cache_count <= 0);
@@ -292,38 +341,7 @@ static long region_add(struct resv_map *resv, long f, long t)
goto out_locked;
}
- /* Round our left edge to the current segment if it encloses us. */
- if (f > rg->from)
- f = rg->from;
-
- /* Check for and consume any regions we now overlap with. */
- nrg = rg;
- list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
- if (&rg->link == head)
- break;
- if (rg->from > t)
- break;
-
- /* If this area reaches higher then extend our area to
- * include it completely. If this is not the first area
- * which we intend to reuse, free it. */
- if (rg->to > t)
- t = rg->to;
- if (rg != nrg) {
- /* Decrement return value by the deleted range.
- * Another range will span this area so that by
- * end of routine add will be >= zero
- */
- add -= (rg->to - rg->from);
- list_del(&rg->link);
- kfree(rg);
- }
- }
-
- add += (nrg->from - f); /* Added to beginning of region */
- nrg->from = f;
- add += t - nrg->to; /* Added to end of region */
- nrg->to = t;
+ add = add_reservation_in_range(resv, f, t, false);
out_locked:
resv->adds_in_progress--;
@@ -339,15 +357,9 @@ out_locked:
* call to region_add that will actually modify the reserve
* map to add the specified range [f, t). region_chg does
* not change the number of huge pages represented by the
- * map. However, if the existing regions in the map can not
- * be expanded to represent the new range, a new file_region
- * structure is added to the map as a placeholder. This is
- * so that the subsequent region_add call will have all the
- * regions it needs and will not fail.
- *
- * Upon entry, region_chg will also examine the cache of region descriptors
- * associated with the map. If there are not enough descriptors cached, one
- * will be allocated for the in progress add operation.
+ * map. A new file_region structure is added to the cache
+ * as a placeholder, so that the subsequent region_add
+ * call will have all the regions it needs and will not fail.
*
* Returns the number of huge pages that need to be added to the existing
* reservation map for the range [f, t). This number is greater or equal to
@@ -356,11 +368,8 @@ out_locked:
*/
static long region_chg(struct resv_map *resv, long f, long t)
{
- struct list_head *head = &resv->regions;
- struct file_region *rg, *nrg = NULL;
long chg = 0;
-retry:
spin_lock(&resv->lock);
retry_locked:
resv->adds_in_progress++;
@@ -378,10 +387,8 @@ retry_locked:
spin_unlock(&resv->lock);
trg = kmalloc(sizeof(*trg), GFP_KERNEL);
- if (!trg) {
- kfree(nrg);
+ if (!trg)
return -ENOMEM;
- }
spin_lock(&resv->lock);
list_add(&trg->link, &resv->region_cache);
@@ -389,61 +396,8 @@ retry_locked:
goto retry_locked;
}
- /* Locate the region we are before or in. */
- list_for_each_entry(rg, head, link)
- if (f <= rg->to)
- break;
+ chg = add_reservation_in_range(resv, f, t, true);
- /* If we are below the current region then a new region is required.
- * Subtle, allocate a new region at the position but make it zero
- * size such that we can guarantee to record the reservation. */
- if (&rg->link == head || t < rg->from) {
- if (!nrg) {
- resv->adds_in_progress--;
- spin_unlock(&resv->lock);
- nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
- if (!nrg)
- return -ENOMEM;
-
- nrg->from = f;
- nrg->to = f;
- INIT_LIST_HEAD(&nrg->link);
- goto retry;
- }
-
- list_add(&nrg->link, rg->link.prev);
- chg = t - f;
- goto out_nrg;
- }
-
- /* Round our left edge to the current segment if it encloses us. */
- if (f > rg->from)
- f = rg->from;
- chg = t - f;
-
- /* Check for and consume any regions we now overlap with. */
- list_for_each_entry(rg, rg->link.prev, link) {
- if (&rg->link == head)
- break;
- if (rg->from > t)
- goto out;
-
- /* We overlap with this area, if it extends further than
- * us then we must extend ourselves. Account for its
- * existing reservation. */
- if (rg->to > t) {
- chg += rg->to - t;
- t = rg->to;
- }
- chg -= rg->to - rg->from;
- }
-
-out:
- spin_unlock(&resv->lock);
- /* We already know we raced and no longer need the new region */
- kfree(nrg);
- return chg;
-out_nrg:
spin_unlock(&resv->lock);
return chg;
}
@@ -1069,85 +1023,12 @@ static void free_gigantic_page(struct page *page, unsigned int order)
}
#ifdef CONFIG_CONTIG_ALLOC
-static int __alloc_gigantic_page(unsigned long start_pfn,
- unsigned long nr_pages, gfp_t gfp_mask)
-{
- unsigned long end_pfn = start_pfn + nr_pages;
- return alloc_contig_range(start_pfn, end_pfn, MIGRATE_MOVABLE,
- gfp_mask);
-}
-
-static bool pfn_range_valid_gigantic(struct zone *z,
- unsigned long start_pfn, unsigned long nr_pages)
-{
- unsigned long i, end_pfn = start_pfn + nr_pages;
- struct page *page;
-
- for (i = start_pfn; i < end_pfn; i++) {
- page = pfn_to_online_page(i);
- if (!page)
- return false;
-
- if (page_zone(page) != z)
- return false;
-
- if (PageReserved(page))
- return false;
-
- if (page_count(page) > 0)
- return false;
-
- if (PageHuge(page))
- return false;
- }
-
- return true;
-}
-
-static bool zone_spans_last_pfn(const struct zone *zone,
- unsigned long start_pfn, unsigned long nr_pages)
-{
- unsigned long last_pfn = start_pfn + nr_pages - 1;
- return zone_spans_pfn(zone, last_pfn);
-}
-
static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
int nid, nodemask_t *nodemask)
{
- unsigned int order = huge_page_order(h);
- unsigned long nr_pages = 1 << order;
- unsigned long ret, pfn, flags;
- struct zonelist *zonelist;
- struct zone *zone;
- struct zoneref *z;
+ unsigned long nr_pages = 1UL << huge_page_order(h);
- zonelist = node_zonelist(nid, gfp_mask);
- for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), nodemask) {
- spin_lock_irqsave(&zone->lock, flags);
-
- pfn = ALIGN(zone->zone_start_pfn, nr_pages);
- while (zone_spans_last_pfn(zone, pfn, nr_pages)) {
- if (pfn_range_valid_gigantic(zone, pfn, nr_pages)) {
- /*
- * We release the zone lock here because
- * alloc_contig_range() will also lock the zone
- * at some point. If there's an allocation
- * spinning on this lock, it may win the race
- * and cause alloc_contig_range() to fail...
- */
- spin_unlock_irqrestore(&zone->lock, flags);
- ret = __alloc_gigantic_page(pfn, nr_pages, gfp_mask);
- if (!ret)
- return pfn_to_page(pfn);
- spin_lock_irqsave(&zone->lock, flags);
- }
- pfn += nr_pages;
- }
-
- spin_unlock_irqrestore(&zone->lock, flags);
- }
-
- return NULL;
+ return alloc_contig_pages(nr_pages, gfp_mask, nid, nodemask);
}
static void prep_new_huge_page(struct hstate *h, struct page *page, int nid);
@@ -3915,7 +3796,7 @@ retry:
* handling userfault. Reacquire after handling
* fault to make calling code simpler.
*/
- hash = hugetlb_fault_mutex_hash(h, mapping, idx, haddr);
+ hash = hugetlb_fault_mutex_hash(mapping, idx);
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
ret = handle_userfault(&vmf, VM_UFFD_MISSING);
mutex_lock(&hugetlb_fault_mutex_table[hash]);
@@ -4042,8 +3923,7 @@ backout_unlocked:
}
#ifdef CONFIG_SMP
-u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping,
- pgoff_t idx, unsigned long address)
+u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx)
{
unsigned long key[2];
u32 hash;
@@ -4051,7 +3931,7 @@ u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping,
key[0] = (unsigned long) mapping;
key[1] = idx;
- hash = jhash2((u32 *)&key, sizeof(key)/sizeof(u32), 0);
+ hash = jhash2((u32 *)&key, sizeof(key)/(sizeof(u32)), 0);
return hash & (num_fault_mutexes - 1);
}
@@ -4060,8 +3940,7 @@ u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping,
* For uniprocesor systems we always use a single mutex, so just
* return 0 and avoid the hashing overhead.
*/
-u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping,
- pgoff_t idx, unsigned long address)
+u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx)
{
return 0;
}
@@ -4105,7 +3984,7 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
* get spurious allocation failures if two CPUs race to instantiate
* the same page in the page cache.
*/
- hash = hugetlb_fault_mutex_hash(h, mapping, idx, haddr);
+ hash = hugetlb_fault_mutex_hash(mapping, idx);
mutex_lock(&hugetlb_fault_mutex_table[hash]);
entry = huge_ptep_get(ptep);
@@ -4459,6 +4338,21 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
break;
}
}
+
+ /*
+ * If subpage information not requested, update counters
+ * and skip the same_page loop below.
+ */
+ if (!pages && !vmas && !pfn_offset &&
+ (vaddr + huge_page_size(h) < vma->vm_end) &&
+ (remainder >= pages_per_huge_page(h))) {
+ vaddr += huge_page_size(h);
+ remainder -= pages_per_huge_page(h);
+ i += pages_per_huge_page(h);
+ spin_unlock(ptl);
+ continue;
+ }
+
same_page:
if (pages) {
pages[i] = mem_map_offset(page, pfn_offset);
@@ -4842,7 +4736,7 @@ pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
if (!vma_shareable(vma, addr))
return (pte_t *)pmd_alloc(mm, pud, addr);
- i_mmap_lock_write(mapping);
+ i_mmap_lock_read(mapping);
vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) {
if (svma == vma)
continue;
@@ -4872,7 +4766,7 @@ pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
spin_unlock(ptl);
out:
pte = (pte_t *)pmd_alloc(mm, pud, addr);
- i_mmap_unlock_write(mapping);
+ i_mmap_unlock_read(mapping);
return pte;
}
diff --git a/mm/hwpoison-inject.c b/mm/hwpoison-inject.c
index 5b7430bd83a6..e488876b168a 100644
--- a/mm/hwpoison-inject.c
+++ b/mm/hwpoison-inject.c
@@ -67,8 +67,8 @@ static int hwpoison_unpoison(void *data, u64 val)
return unpoison_memory(val);
}
-DEFINE_SIMPLE_ATTRIBUTE(hwpoison_fops, NULL, hwpoison_inject, "%lli\n");
-DEFINE_SIMPLE_ATTRIBUTE(unpoison_fops, NULL, hwpoison_unpoison, "%lli\n");
+DEFINE_DEBUGFS_ATTRIBUTE(hwpoison_fops, NULL, hwpoison_inject, "%lli\n");
+DEFINE_DEBUGFS_ATTRIBUTE(unpoison_fops, NULL, hwpoison_unpoison, "%lli\n");
static void pfn_inject_exit(void)
{
diff --git a/mm/internal.h b/mm/internal.h
index 0d5f720c75ab..3cf20ab3ca01 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -165,6 +165,9 @@ extern void post_alloc_hook(struct page *page, unsigned int order,
gfp_t gfp_flags);
extern int user_min_free_kbytes;
+extern void zone_pcp_update(struct zone *zone);
+extern void zone_pcp_reset(struct zone *zone);
+
#if defined CONFIG_COMPACTION || defined CONFIG_CMA
/*
@@ -290,7 +293,8 @@ static inline bool is_data_mapping(vm_flags_t flags)
/* mm/util.c */
void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
- struct vm_area_struct *prev, struct rb_node *rb_parent);
+ struct vm_area_struct *prev);
+void __vma_unlink_list(struct mm_struct *mm, struct vm_area_struct *vma);
#ifdef CONFIG_MMU
extern long populate_vma_page_range(struct vm_area_struct *vma,
@@ -362,6 +366,27 @@ vma_address(struct page *page, struct vm_area_struct *vma)
return max(start, vma->vm_start);
}
+static inline struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf,
+ struct file *fpin)
+{
+ int flags = vmf->flags;
+
+ if (fpin)
+ return fpin;
+
+ /*
+ * FAULT_FLAG_RETRY_NOWAIT means we don't want to wait on page locks or
+ * anything, so we only pin the file and drop the mmap_sem if only
+ * FAULT_FLAG_ALLOW_RETRY is set.
+ */
+ if ((flags & (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT)) ==
+ FAULT_FLAG_ALLOW_RETRY) {
+ fpin = get_file(vmf->vma->vm_file);
+ up_read(&vmf->vma->vm_mm->mmap_sem);
+ }
+ return fpin;
+}
+
#else /* !CONFIG_MMU */
static inline void clear_page_mlock(struct page *page) { }
static inline void mlock_vma_page(struct page *page) { }
diff --git a/mm/kasan/common.c b/mm/kasan/common.c
index 6814d6d6a023..df3371d5c572 100644
--- a/mm/kasan/common.c
+++ b/mm/kasan/common.c
@@ -36,6 +36,8 @@
#include <linux/bug.h>
#include <linux/uaccess.h>
+#include <asm/tlbflush.h>
+
#include "kasan.h"
#include "../slab.h"
@@ -590,6 +592,7 @@ void kasan_kfree_large(void *ptr, unsigned long ip)
/* The object will be poisoned by page_alloc. */
}
+#ifndef CONFIG_KASAN_VMALLOC
int kasan_module_alloc(void *addr, size_t size)
{
void *ret;
@@ -625,6 +628,7 @@ void kasan_free_shadow(const struct vm_struct *vm)
if (vm->flags & VM_KASAN)
vfree(kasan_mem_to_shadow(vm->addr));
}
+#endif
extern void __kasan_report(unsigned long addr, size_t size, bool is_write, unsigned long ip);
@@ -744,3 +748,232 @@ static int __init kasan_memhotplug_init(void)
core_initcall(kasan_memhotplug_init);
#endif
+
+#ifdef CONFIG_KASAN_VMALLOC
+static int kasan_populate_vmalloc_pte(pte_t *ptep, unsigned long addr,
+ void *unused)
+{
+ unsigned long page;
+ pte_t pte;
+
+ if (likely(!pte_none(*ptep)))
+ return 0;
+
+ page = __get_free_page(GFP_KERNEL);
+ if (!page)
+ return -ENOMEM;
+
+ memset((void *)page, KASAN_VMALLOC_INVALID, PAGE_SIZE);
+ pte = pfn_pte(PFN_DOWN(__pa(page)), PAGE_KERNEL);
+
+ spin_lock(&init_mm.page_table_lock);
+ if (likely(pte_none(*ptep))) {
+ set_pte_at(&init_mm, addr, ptep, pte);
+ page = 0;
+ }
+ spin_unlock(&init_mm.page_table_lock);
+ if (page)
+ free_page(page);
+ return 0;
+}
+
+int kasan_populate_vmalloc(unsigned long requested_size, struct vm_struct *area)
+{
+ unsigned long shadow_start, shadow_end;
+ int ret;
+
+ shadow_start = (unsigned long)kasan_mem_to_shadow(area->addr);
+ shadow_start = ALIGN_DOWN(shadow_start, PAGE_SIZE);
+ shadow_end = (unsigned long)kasan_mem_to_shadow(area->addr +
+ area->size);
+ shadow_end = ALIGN(shadow_end, PAGE_SIZE);
+
+ ret = apply_to_page_range(&init_mm, shadow_start,
+ shadow_end - shadow_start,
+ kasan_populate_vmalloc_pte, NULL);
+ if (ret)
+ return ret;
+
+ flush_cache_vmap(shadow_start, shadow_end);
+
+ kasan_unpoison_shadow(area->addr, requested_size);
+
+ area->flags |= VM_KASAN;
+
+ /*
+ * We need to be careful about inter-cpu effects here. Consider:
+ *
+ * CPU#0 CPU#1
+ * WRITE_ONCE(p, vmalloc(100)); while (x = READ_ONCE(p)) ;
+ * p[99] = 1;
+ *
+ * With compiler instrumentation, that ends up looking like this:
+ *
+ * CPU#0 CPU#1
+ * // vmalloc() allocates memory
+ * // let a = area->addr
+ * // we reach kasan_populate_vmalloc
+ * // and call kasan_unpoison_shadow:
+ * STORE shadow(a), unpoison_val
+ * ...
+ * STORE shadow(a+99), unpoison_val x = LOAD p
+ * // rest of vmalloc process <data dependency>
+ * STORE p, a LOAD shadow(x+99)
+ *
+ * If there is no barrier between the end of unpoisioning the shadow
+ * and the store of the result to p, the stores could be committed
+ * in a different order by CPU#0, and CPU#1 could erroneously observe
+ * poison in the shadow.
+ *
+ * We need some sort of barrier between the stores.
+ *
+ * In the vmalloc() case, this is provided by a smp_wmb() in
+ * clear_vm_uninitialized_flag(). In the per-cpu allocator and in
+ * get_vm_area() and friends, the caller gets shadow allocated but
+ * doesn't have any pages mapped into the virtual address space that
+ * has been reserved. Mapping those pages in will involve taking and
+ * releasing a page-table lock, which will provide the barrier.
+ */
+
+ return 0;
+}
+
+/*
+ * Poison the shadow for a vmalloc region. Called as part of the
+ * freeing process at the time the region is freed.
+ */
+void kasan_poison_vmalloc(void *start, unsigned long size)
+{
+ size = round_up(size, KASAN_SHADOW_SCALE_SIZE);
+ kasan_poison_shadow(start, size, KASAN_VMALLOC_INVALID);
+}
+
+static int kasan_depopulate_vmalloc_pte(pte_t *ptep, unsigned long addr,
+ void *unused)
+{
+ unsigned long page;
+
+ page = (unsigned long)__va(pte_pfn(*ptep) << PAGE_SHIFT);
+
+ spin_lock(&init_mm.page_table_lock);
+
+ if (likely(!pte_none(*ptep))) {
+ pte_clear(&init_mm, addr, ptep);
+ free_page(page);
+ }
+ spin_unlock(&init_mm.page_table_lock);
+
+ return 0;
+}
+
+/*
+ * Release the backing for the vmalloc region [start, end), which
+ * lies within the free region [free_region_start, free_region_end).
+ *
+ * This can be run lazily, long after the region was freed. It runs
+ * under vmap_area_lock, so it's not safe to interact with the vmalloc/vmap
+ * infrastructure.
+ *
+ * How does this work?
+ * -------------------
+ *
+ * We have a region that is page aligned, labelled as A.
+ * That might not map onto the shadow in a way that is page-aligned:
+ *
+ * start end
+ * v v
+ * |????????|????????|AAAAAAAA|AA....AA|AAAAAAAA|????????| < vmalloc
+ * -------- -------- -------- -------- --------
+ * | | | | |
+ * | | | /-------/ |
+ * \-------\|/------/ |/---------------/
+ * ||| ||
+ * |??AAAAAA|AAAAAAAA|AA??????| < shadow
+ * (1) (2) (3)
+ *
+ * First we align the start upwards and the end downwards, so that the
+ * shadow of the region aligns with shadow page boundaries. In the
+ * example, this gives us the shadow page (2). This is the shadow entirely
+ * covered by this allocation.
+ *
+ * Then we have the tricky bits. We want to know if we can free the
+ * partially covered shadow pages - (1) and (3) in the example. For this,
+ * we are given the start and end of the free region that contains this
+ * allocation. Extending our previous example, we could have:
+ *
+ * free_region_start free_region_end
+ * | start end |
+ * v v v v
+ * |FFFFFFFF|FFFFFFFF|AAAAAAAA|AA....AA|AAAAAAAA|FFFFFFFF| < vmalloc
+ * -------- -------- -------- -------- --------
+ * | | | | |
+ * | | | /-------/ |
+ * \-------\|/------/ |/---------------/
+ * ||| ||
+ * |FFAAAAAA|AAAAAAAA|AAF?????| < shadow
+ * (1) (2) (3)
+ *
+ * Once again, we align the start of the free region up, and the end of
+ * the free region down so that the shadow is page aligned. So we can free
+ * page (1) - we know no allocation currently uses anything in that page,
+ * because all of it is in the vmalloc free region. But we cannot free
+ * page (3), because we can't be sure that the rest of it is unused.
+ *
+ * We only consider pages that contain part of the original region for
+ * freeing: we don't try to free other pages from the free region or we'd
+ * end up trying to free huge chunks of virtual address space.
+ *
+ * Concurrency
+ * -----------
+ *
+ * How do we know that we're not freeing a page that is simultaneously
+ * being used for a fresh allocation in kasan_populate_vmalloc(_pte)?
+ *
+ * We _can_ have kasan_release_vmalloc and kasan_populate_vmalloc running
+ * at the same time. While we run under free_vmap_area_lock, the population
+ * code does not.
+ *
+ * free_vmap_area_lock instead operates to ensure that the larger range
+ * [free_region_start, free_region_end) is safe: because __alloc_vmap_area and
+ * the per-cpu region-finding algorithm both run under free_vmap_area_lock,
+ * no space identified as free will become used while we are running. This
+ * means that so long as we are careful with alignment and only free shadow
+ * pages entirely covered by the free region, we will not run in to any
+ * trouble - any simultaneous allocations will be for disjoint regions.
+ */
+void kasan_release_vmalloc(unsigned long start, unsigned long end,
+ unsigned long free_region_start,
+ unsigned long free_region_end)
+{
+ void *shadow_start, *shadow_end;
+ unsigned long region_start, region_end;
+
+ region_start = ALIGN(start, PAGE_SIZE * KASAN_SHADOW_SCALE_SIZE);
+ region_end = ALIGN_DOWN(end, PAGE_SIZE * KASAN_SHADOW_SCALE_SIZE);
+
+ free_region_start = ALIGN(free_region_start,
+ PAGE_SIZE * KASAN_SHADOW_SCALE_SIZE);
+
+ if (start != region_start &&
+ free_region_start < region_start)
+ region_start -= PAGE_SIZE * KASAN_SHADOW_SCALE_SIZE;
+
+ free_region_end = ALIGN_DOWN(free_region_end,
+ PAGE_SIZE * KASAN_SHADOW_SCALE_SIZE);
+
+ if (end != region_end &&
+ free_region_end > region_end)
+ region_end += PAGE_SIZE * KASAN_SHADOW_SCALE_SIZE;
+
+ shadow_start = kasan_mem_to_shadow((void *)region_start);
+ shadow_end = kasan_mem_to_shadow((void *)region_end);
+
+ if (shadow_end > shadow_start) {
+ apply_to_page_range(&init_mm, (unsigned long)shadow_start,
+ (unsigned long)(shadow_end - shadow_start),
+ kasan_depopulate_vmalloc_pte, NULL);
+ flush_tlb_kernel_range((unsigned long)shadow_start,
+ (unsigned long)shadow_end);
+ }
+}
+#endif
diff --git a/mm/kasan/generic_report.c b/mm/kasan/generic_report.c
index 36c645939bc9..2d97efd4954f 100644
--- a/mm/kasan/generic_report.c
+++ b/mm/kasan/generic_report.c
@@ -86,6 +86,9 @@ static const char *get_shadow_bug_type(struct kasan_access_info *info)
case KASAN_ALLOCA_RIGHT:
bug_type = "alloca-out-of-bounds";
break;
+ case KASAN_VMALLOC_INVALID:
+ bug_type = "vmalloc-out-of-bounds";
+ break;
}
return bug_type;
diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
index 35cff6bbb716..3a083274628e 100644
--- a/mm/kasan/kasan.h
+++ b/mm/kasan/kasan.h
@@ -25,6 +25,7 @@
#endif
#define KASAN_GLOBAL_REDZONE 0xFA /* redzone for global variable */
+#define KASAN_VMALLOC_INVALID 0xF9 /* unallocated space in vmapped page */
/*
* Stack redzone shadow values
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index a8a57bebb5fa..b679908743cb 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -1602,6 +1602,24 @@ static void collapse_file(struct mm_struct *mm,
result = SCAN_FAIL;
goto xa_unlocked;
}
+ } else if (PageDirty(page)) {
+ /*
+ * khugepaged only works on read-only fd,
+ * so this page is dirty because it hasn't
+ * been flushed since first write. There
+ * won't be new dirty pages.
+ *
+ * Trigger async flush here and hope the
+ * writeback is done when khugepaged
+ * revisits this page.
+ *
+ * This is a one-off situation. We are not
+ * forcing writeback in loop.
+ */
+ xas_unlock_irq(&xas);
+ filemap_flush(mapping);
+ result = SCAN_FAIL;
+ goto xa_unlocked;
} else if (trylock_page(page)) {
get_page(page);
xas_unlock_irq(&xas);
diff --git a/mm/madvise.c b/mm/madvise.c
index 94c343b4c968..bcdb6a042787 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -864,13 +864,13 @@ static int madvise_inject_error(int behavior,
{
struct page *page;
struct zone *zone;
- unsigned int order;
+ unsigned long size;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- for (; start < end; start += PAGE_SIZE << order) {
+ for (; start < end; start += size) {
unsigned long pfn;
int ret;
@@ -882,9 +882,9 @@ static int madvise_inject_error(int behavior,
/*
* When soft offlining hugepages, after migrating the page
* we dissolve it, therefore in the second loop "page" will
- * no longer be a compound page, and order will be 0.
+ * no longer be a compound page.
*/
- order = compound_order(compound_head(page));
+ size = page_size(compound_head(page));
if (PageHWPoison(page)) {
put_page(page);
@@ -895,7 +895,7 @@ static int madvise_inject_error(int behavior,
pr_info("Soft offlining pfn %#lx at process virtual address %#lx\n",
pfn, start);
- ret = soft_offline_page(page, MF_COUNT_INCREASED);
+ ret = soft_offline_page(pfn, MF_COUNT_INCREASED);
if (ret)
return ret;
continue;
@@ -1059,9 +1059,9 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
if (!madvise_behavior_valid(behavior))
return error;
- if (start & ~PAGE_MASK)
+ if (!PAGE_ALIGNED(start))
return error;
- len = (len_in + ~PAGE_MASK) & PAGE_MASK;
+ len = PAGE_ALIGN(len_in);
/* Check to see whether len was rounded up from small -ve to zero */
if (len_in && !len)
diff --git a/mm/mapping_dirty_helpers.c b/mm/mapping_dirty_helpers.c
new file mode 100644
index 000000000000..71070dda9643
--- /dev/null
+++ b/mm/mapping_dirty_helpers.c
@@ -0,0 +1,315 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/pagewalk.h>
+#include <linux/hugetlb.h>
+#include <linux/bitops.h>
+#include <linux/mmu_notifier.h>
+#include <asm/cacheflush.h>
+#include <asm/tlbflush.h>
+
+/**
+ * struct wp_walk - Private struct for pagetable walk callbacks
+ * @range: Range for mmu notifiers
+ * @tlbflush_start: Address of first modified pte
+ * @tlbflush_end: Address of last modified pte + 1
+ * @total: Total number of modified ptes
+ */
+struct wp_walk {
+ struct mmu_notifier_range range;
+ unsigned long tlbflush_start;
+ unsigned long tlbflush_end;
+ unsigned long total;
+};
+
+/**
+ * wp_pte - Write-protect a pte
+ * @pte: Pointer to the pte
+ * @addr: The virtual page address
+ * @walk: pagetable walk callback argument
+ *
+ * The function write-protects a pte and records the range in
+ * virtual address space of touched ptes for efficient range TLB flushes.
+ */
+static int wp_pte(pte_t *pte, unsigned long addr, unsigned long end,
+ struct mm_walk *walk)
+{
+ struct wp_walk *wpwalk = walk->private;
+ pte_t ptent = *pte;
+
+ if (pte_write(ptent)) {
+ pte_t old_pte = ptep_modify_prot_start(walk->vma, addr, pte);
+
+ ptent = pte_wrprotect(old_pte);
+ ptep_modify_prot_commit(walk->vma, addr, pte, old_pte, ptent);
+ wpwalk->total++;
+ wpwalk->tlbflush_start = min(wpwalk->tlbflush_start, addr);
+ wpwalk->tlbflush_end = max(wpwalk->tlbflush_end,
+ addr + PAGE_SIZE);
+ }
+
+ return 0;
+}
+
+/**
+ * struct clean_walk - Private struct for the clean_record_pte function.
+ * @base: struct wp_walk we derive from
+ * @bitmap_pgoff: Address_space Page offset of the first bit in @bitmap
+ * @bitmap: Bitmap with one bit for each page offset in the address_space range
+ * covered.
+ * @start: Address_space page offset of first modified pte relative
+ * to @bitmap_pgoff
+ * @end: Address_space page offset of last modified pte relative
+ * to @bitmap_pgoff
+ */
+struct clean_walk {
+ struct wp_walk base;
+ pgoff_t bitmap_pgoff;
+ unsigned long *bitmap;
+ pgoff_t start;
+ pgoff_t end;
+};
+
+#define to_clean_walk(_wpwalk) container_of(_wpwalk, struct clean_walk, base)
+
+/**
+ * clean_record_pte - Clean a pte and record its address space offset in a
+ * bitmap
+ * @pte: Pointer to the pte
+ * @addr: The virtual page address
+ * @walk: pagetable walk callback argument
+ *
+ * The function cleans a pte and records the range in
+ * virtual address space of touched ptes for efficient TLB flushes.
+ * It also records dirty ptes in a bitmap representing page offsets
+ * in the address_space, as well as the first and last of the bits
+ * touched.
+ */
+static int clean_record_pte(pte_t *pte, unsigned long addr,
+ unsigned long end, struct mm_walk *walk)
+{
+ struct wp_walk *wpwalk = walk->private;
+ struct clean_walk *cwalk = to_clean_walk(wpwalk);
+ pte_t ptent = *pte;
+
+ if (pte_dirty(ptent)) {
+ pgoff_t pgoff = ((addr - walk->vma->vm_start) >> PAGE_SHIFT) +
+ walk->vma->vm_pgoff - cwalk->bitmap_pgoff;
+ pte_t old_pte = ptep_modify_prot_start(walk->vma, addr, pte);
+
+ ptent = pte_mkclean(old_pte);
+ ptep_modify_prot_commit(walk->vma, addr, pte, old_pte, ptent);
+
+ wpwalk->total++;
+ wpwalk->tlbflush_start = min(wpwalk->tlbflush_start, addr);
+ wpwalk->tlbflush_end = max(wpwalk->tlbflush_end,
+ addr + PAGE_SIZE);
+
+ __set_bit(pgoff, cwalk->bitmap);
+ cwalk->start = min(cwalk->start, pgoff);
+ cwalk->end = max(cwalk->end, pgoff + 1);
+ }
+
+ return 0;
+}
+
+/* wp_clean_pmd_entry - The pagewalk pmd callback. */
+static int wp_clean_pmd_entry(pmd_t *pmd, unsigned long addr, unsigned long end,
+ struct mm_walk *walk)
+{
+ /* Dirty-tracking should be handled on the pte level */
+ pmd_t pmdval = pmd_read_atomic(pmd);
+
+ if (pmd_trans_huge(pmdval) || pmd_devmap(pmdval))
+ WARN_ON(pmd_write(pmdval) || pmd_dirty(pmdval));
+
+ return 0;
+}
+
+/* wp_clean_pud_entry - The pagewalk pud callback. */
+static int wp_clean_pud_entry(pud_t *pud, unsigned long addr, unsigned long end,
+ struct mm_walk *walk)
+{
+ /* Dirty-tracking should be handled on the pte level */
+ pud_t pudval = READ_ONCE(*pud);
+
+ if (pud_trans_huge(pudval) || pud_devmap(pudval))
+ WARN_ON(pud_write(pudval) || pud_dirty(pudval));
+
+ return 0;
+}
+
+/*
+ * wp_clean_pre_vma - The pagewalk pre_vma callback.
+ *
+ * The pre_vma callback performs the cache flush, stages the tlb flush
+ * and calls the necessary mmu notifiers.
+ */
+static int wp_clean_pre_vma(unsigned long start, unsigned long end,
+ struct mm_walk *walk)
+{
+ struct wp_walk *wpwalk = walk->private;
+
+ wpwalk->tlbflush_start = end;
+ wpwalk->tlbflush_end = start;
+
+ mmu_notifier_range_init(&wpwalk->range, MMU_NOTIFY_PROTECTION_PAGE, 0,
+ walk->vma, walk->mm, start, end);
+ mmu_notifier_invalidate_range_start(&wpwalk->range);
+ flush_cache_range(walk->vma, start, end);
+
+ /*
+ * We're not using tlb_gather_mmu() since typically
+ * only a small subrange of PTEs are affected, whereas
+ * tlb_gather_mmu() records the full range.
+ */
+ inc_tlb_flush_pending(walk->mm);
+
+ return 0;
+}
+
+/*
+ * wp_clean_post_vma - The pagewalk post_vma callback.
+ *
+ * The post_vma callback performs the tlb flush and calls necessary mmu
+ * notifiers.
+ */
+static void wp_clean_post_vma(struct mm_walk *walk)
+{
+ struct wp_walk *wpwalk = walk->private;
+
+ if (mm_tlb_flush_nested(walk->mm))
+ flush_tlb_range(walk->vma, wpwalk->range.start,
+ wpwalk->range.end);
+ else if (wpwalk->tlbflush_end > wpwalk->tlbflush_start)
+ flush_tlb_range(walk->vma, wpwalk->tlbflush_start,
+ wpwalk->tlbflush_end);
+
+ mmu_notifier_invalidate_range_end(&wpwalk->range);
+ dec_tlb_flush_pending(walk->mm);
+}
+
+/*
+ * wp_clean_test_walk - The pagewalk test_walk callback.
+ *
+ * Won't perform dirty-tracking on COW, read-only or HUGETLB vmas.
+ */
+static int wp_clean_test_walk(unsigned long start, unsigned long end,
+ struct mm_walk *walk)
+{
+ unsigned long vm_flags = READ_ONCE(walk->vma->vm_flags);
+
+ /* Skip non-applicable VMAs */
+ if ((vm_flags & (VM_SHARED | VM_MAYWRITE | VM_HUGETLB)) !=
+ (VM_SHARED | VM_MAYWRITE))
+ return 1;
+
+ return 0;
+}
+
+static const struct mm_walk_ops clean_walk_ops = {
+ .pte_entry = clean_record_pte,
+ .pmd_entry = wp_clean_pmd_entry,
+ .pud_entry = wp_clean_pud_entry,
+ .test_walk = wp_clean_test_walk,
+ .pre_vma = wp_clean_pre_vma,
+ .post_vma = wp_clean_post_vma
+};
+
+static const struct mm_walk_ops wp_walk_ops = {
+ .pte_entry = wp_pte,
+ .pmd_entry = wp_clean_pmd_entry,
+ .pud_entry = wp_clean_pud_entry,
+ .test_walk = wp_clean_test_walk,
+ .pre_vma = wp_clean_pre_vma,
+ .post_vma = wp_clean_post_vma
+};
+
+/**
+ * wp_shared_mapping_range - Write-protect all ptes in an address space range
+ * @mapping: The address_space we want to write protect
+ * @first_index: The first page offset in the range
+ * @nr: Number of incremental page offsets to cover
+ *
+ * Note: This function currently skips transhuge page-table entries, since
+ * it's intended for dirty-tracking on the PTE level. It will warn on
+ * encountering transhuge write-enabled entries, though, and can easily be
+ * extended to handle them as well.
+ *
+ * Return: The number of ptes actually write-protected. Note that
+ * already write-protected ptes are not counted.
+ */
+unsigned long wp_shared_mapping_range(struct address_space *mapping,
+ pgoff_t first_index, pgoff_t nr)
+{
+ struct wp_walk wpwalk = { .total = 0 };
+
+ i_mmap_lock_read(mapping);
+ WARN_ON(walk_page_mapping(mapping, first_index, nr, &wp_walk_ops,
+ &wpwalk));
+ i_mmap_unlock_read(mapping);
+
+ return wpwalk.total;
+}
+EXPORT_SYMBOL_GPL(wp_shared_mapping_range);
+
+/**
+ * clean_record_shared_mapping_range - Clean and record all ptes in an
+ * address space range
+ * @mapping: The address_space we want to clean
+ * @first_index: The first page offset in the range
+ * @nr: Number of incremental page offsets to cover
+ * @bitmap_pgoff: The page offset of the first bit in @bitmap
+ * @bitmap: Pointer to a bitmap of at least @nr bits. The bitmap needs to
+ * cover the whole range @first_index..@first_index + @nr.
+ * @start: Pointer to number of the first set bit in @bitmap.
+ * is modified as new bits are set by the function.
+ * @end: Pointer to the number of the last set bit in @bitmap.
+ * none set. The value is modified as new bits are set by the function.
+ *
+ * Note: When this function returns there is no guarantee that a CPU has
+ * not already dirtied new ptes. However it will not clean any ptes not
+ * reported in the bitmap. The guarantees are as follows:
+ * a) All ptes dirty when the function starts executing will end up recorded
+ * in the bitmap.
+ * b) All ptes dirtied after that will either remain dirty, be recorded in the
+ * bitmap or both.
+ *
+ * If a caller needs to make sure all dirty ptes are picked up and none
+ * additional are added, it first needs to write-protect the address-space
+ * range and make sure new writers are blocked in page_mkwrite() or
+ * pfn_mkwrite(). And then after a TLB flush following the write-protection
+ * pick up all dirty bits.
+ *
+ * Note: This function currently skips transhuge page-table entries, since
+ * it's intended for dirty-tracking on the PTE level. It will warn on
+ * encountering transhuge dirty entries, though, and can easily be extended
+ * to handle them as well.
+ *
+ * Return: The number of dirty ptes actually cleaned.
+ */
+unsigned long clean_record_shared_mapping_range(struct address_space *mapping,
+ pgoff_t first_index, pgoff_t nr,
+ pgoff_t bitmap_pgoff,
+ unsigned long *bitmap,
+ pgoff_t *start,
+ pgoff_t *end)
+{
+ bool none_set = (*start >= *end);
+ struct clean_walk cwalk = {
+ .base = { .total = 0 },
+ .bitmap_pgoff = bitmap_pgoff,
+ .bitmap = bitmap,
+ .start = none_set ? nr : *start,
+ .end = none_set ? 0 : *end,
+ };
+
+ i_mmap_lock_read(mapping);
+ WARN_ON(walk_page_mapping(mapping, first_index, nr, &clean_walk_ops,
+ &cwalk.base));
+ i_mmap_unlock_read(mapping);
+
+ *start = cwalk.start;
+ *end = cwalk.end;
+
+ return cwalk.base.total;
+}
+EXPORT_SYMBOL_GPL(clean_record_shared_mapping_range);
diff --git a/mm/memblock.c b/mm/memblock.c
index c4b16cae2bc9..4bc2c7d8bf42 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -57,42 +57,38 @@
* at build time. The region arrays for the "memory" and "reserved"
* types are initially sized to %INIT_MEMBLOCK_REGIONS and for the
* "physmap" type to %INIT_PHYSMEM_REGIONS.
- * The :c:func:`memblock_allow_resize` enables automatic resizing of
- * the region arrays during addition of new regions. This feature
- * should be used with care so that memory allocated for the region
- * array will not overlap with areas that should be reserved, for
- * example initrd.
+ * The memblock_allow_resize() enables automatic resizing of the region
+ * arrays during addition of new regions. This feature should be used
+ * with care so that memory allocated for the region array will not
+ * overlap with areas that should be reserved, for example initrd.
*
* The early architecture setup should tell memblock what the physical
- * memory layout is by using :c:func:`memblock_add` or
- * :c:func:`memblock_add_node` functions. The first function does not
- * assign the region to a NUMA node and it is appropriate for UMA
- * systems. Yet, it is possible to use it on NUMA systems as well and
- * assign the region to a NUMA node later in the setup process using
- * :c:func:`memblock_set_node`. The :c:func:`memblock_add_node`
- * performs such an assignment directly.
+ * memory layout is by using memblock_add() or memblock_add_node()
+ * functions. The first function does not assign the region to a NUMA
+ * node and it is appropriate for UMA systems. Yet, it is possible to
+ * use it on NUMA systems as well and assign the region to a NUMA node
+ * later in the setup process using memblock_set_node(). The
+ * memblock_add_node() performs such an assignment directly.
*
* Once memblock is setup the memory can be allocated using one of the
* API variants:
*
- * * :c:func:`memblock_phys_alloc*` - these functions return the
- * **physical** address of the allocated memory
- * * :c:func:`memblock_alloc*` - these functions return the **virtual**
- * address of the allocated memory.
+ * * memblock_phys_alloc*() - these functions return the **physical**
+ * address of the allocated memory
+ * * memblock_alloc*() - these functions return the **virtual** address
+ * of the allocated memory.
*
* Note, that both API variants use implict assumptions about allowed
* memory ranges and the fallback methods. Consult the documentation
- * of :c:func:`memblock_alloc_internal` and
- * :c:func:`memblock_alloc_range_nid` functions for more elaboarte
- * description.
+ * of memblock_alloc_internal() and memblock_alloc_range_nid()
+ * functions for more elaborate description.
*
- * As the system boot progresses, the architecture specific
- * :c:func:`mem_init` function frees all the memory to the buddy page
- * allocator.
+ * As the system boot progresses, the architecture specific mem_init()
+ * function frees all the memory to the buddy page allocator.
*
- * Unless an architecure enables %CONFIG_ARCH_KEEP_MEMBLOCK, the
+ * Unless an architecture enables %CONFIG_ARCH_KEEP_MEMBLOCK, the
* memblock data structures will be discarded after the system
- * initialization compltes.
+ * initialization completes.
*/
#ifndef CONFIG_NEED_MULTIPLE_NODES
@@ -1323,12 +1319,13 @@ __next_mem_pfn_range_in_zone(u64 *idx, struct zone *zone,
* @start: the lower bound of the memory region to allocate (phys address)
* @end: the upper bound of the memory region to allocate (phys address)
* @nid: nid of the free area to find, %NUMA_NO_NODE for any node
+ * @exact_nid: control the allocation fall back to other nodes
*
* The allocation is performed from memory region limited by
- * memblock.current_limit if @max_addr == %MEMBLOCK_ALLOC_ACCESSIBLE.
+ * memblock.current_limit if @end == %MEMBLOCK_ALLOC_ACCESSIBLE.
*
- * If the specified node can not hold the requested memory the
- * allocation falls back to any node in the system
+ * If the specified node can not hold the requested memory and @exact_nid
+ * is false, the allocation falls back to any node in the system.
*
* For systems with memory mirroring, the allocation is attempted first
* from the regions with mirroring enabled and then retried from any
@@ -1342,7 +1339,8 @@ __next_mem_pfn_range_in_zone(u64 *idx, struct zone *zone,
*/
static phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size,
phys_addr_t align, phys_addr_t start,
- phys_addr_t end, int nid)
+ phys_addr_t end, int nid,
+ bool exact_nid)
{
enum memblock_flags flags = choose_memblock_flags();
phys_addr_t found;
@@ -1362,7 +1360,7 @@ again:
if (found && !memblock_reserve(found, size))
goto done;
- if (nid != NUMA_NO_NODE) {
+ if (nid != NUMA_NO_NODE && !exact_nid) {
found = memblock_find_in_range_node(size, align, start,
end, NUMA_NO_NODE,
flags);
@@ -1410,7 +1408,8 @@ phys_addr_t __init memblock_phys_alloc_range(phys_addr_t size,
phys_addr_t start,
phys_addr_t end)
{
- return memblock_alloc_range_nid(size, align, start, end, NUMA_NO_NODE);
+ return memblock_alloc_range_nid(size, align, start, end, NUMA_NO_NODE,
+ false);
}
/**
@@ -1429,7 +1428,7 @@ phys_addr_t __init memblock_phys_alloc_range(phys_addr_t size,
phys_addr_t __init memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid)
{
return memblock_alloc_range_nid(size, align, 0,
- MEMBLOCK_ALLOC_ACCESSIBLE, nid);
+ MEMBLOCK_ALLOC_ACCESSIBLE, nid, false);
}
/**
@@ -1439,6 +1438,7 @@ phys_addr_t __init memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t ali
* @min_addr: the lower bound of the memory region to allocate (phys address)
* @max_addr: the upper bound of the memory region to allocate (phys address)
* @nid: nid of the free area to find, %NUMA_NO_NODE for any node
+ * @exact_nid: control the allocation fall back to other nodes
*
* Allocates memory block using memblock_alloc_range_nid() and
* converts the returned physical address to virtual.
@@ -1454,7 +1454,7 @@ phys_addr_t __init memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t ali
static void * __init memblock_alloc_internal(
phys_addr_t size, phys_addr_t align,
phys_addr_t min_addr, phys_addr_t max_addr,
- int nid)
+ int nid, bool exact_nid)
{
phys_addr_t alloc;
@@ -1469,11 +1469,13 @@ static void * __init memblock_alloc_internal(
if (max_addr > memblock.current_limit)
max_addr = memblock.current_limit;
- alloc = memblock_alloc_range_nid(size, align, min_addr, max_addr, nid);
+ alloc = memblock_alloc_range_nid(size, align, min_addr, max_addr, nid,
+ exact_nid);
/* retry allocation without lower limit */
if (!alloc && min_addr)
- alloc = memblock_alloc_range_nid(size, align, 0, max_addr, nid);
+ alloc = memblock_alloc_range_nid(size, align, 0, max_addr, nid,
+ exact_nid);
if (!alloc)
return NULL;
@@ -1482,6 +1484,43 @@ static void * __init memblock_alloc_internal(
}
/**
+ * memblock_alloc_exact_nid_raw - allocate boot memory block on the exact node
+ * without zeroing memory
+ * @size: size of memory block to be allocated in bytes
+ * @align: alignment of the region and block's size
+ * @min_addr: the lower bound of the memory region from where the allocation
+ * is preferred (phys address)
+ * @max_addr: the upper bound of the memory region from where the allocation
+ * is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to
+ * allocate only from memory limited by memblock.current_limit value
+ * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
+ *
+ * Public function, provides additional debug information (including caller
+ * info), if enabled. Does not zero allocated memory.
+ *
+ * Return:
+ * Virtual address of allocated memory block on success, NULL on failure.
+ */
+void * __init memblock_alloc_exact_nid_raw(
+ phys_addr_t size, phys_addr_t align,
+ phys_addr_t min_addr, phys_addr_t max_addr,
+ int nid)
+{
+ void *ptr;
+
+ memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n",
+ __func__, (u64)size, (u64)align, nid, &min_addr,
+ &max_addr, (void *)_RET_IP_);
+
+ ptr = memblock_alloc_internal(size, align,
+ min_addr, max_addr, nid, true);
+ if (ptr && size > 0)
+ page_init_poison(ptr, size);
+
+ return ptr;
+}
+
+/**
* memblock_alloc_try_nid_raw - allocate boot memory block without zeroing
* memory and without panicking
* @size: size of memory block to be allocated in bytes
@@ -1512,7 +1551,7 @@ void * __init memblock_alloc_try_nid_raw(
&max_addr, (void *)_RET_IP_);
ptr = memblock_alloc_internal(size, align,
- min_addr, max_addr, nid);
+ min_addr, max_addr, nid, false);
if (ptr && size > 0)
page_init_poison(ptr, size);
@@ -1547,7 +1586,7 @@ void * __init memblock_alloc_try_nid(
__func__, (u64)size, (u64)align, nid, &min_addr,
&max_addr, (void *)_RET_IP_);
ptr = memblock_alloc_internal(size, align,
- min_addr, max_addr, nid);
+ min_addr, max_addr, nid, false);
if (ptr)
memset(ptr, 0, size);
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 01f3f8b665e9..bc01423277c5 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -108,7 +108,6 @@ static const char *const mem_cgroup_lru_names[] = {
#define THRESHOLDS_EVENTS_TARGET 128
#define SOFTLIMIT_EVENTS_TARGET 1024
-#define NUMAINFO_EVENTS_TARGET 1024
/*
* Cgroups above their limits are maintained in a RB-Tree, independent of
@@ -778,7 +777,7 @@ void __mod_lruvec_slab_state(void *p, enum node_stat_item idx, int val)
if (!memcg || memcg == root_mem_cgroup) {
__mod_node_page_state(pgdat, idx, val);
} else {
- lruvec = mem_cgroup_lruvec(pgdat, memcg);
+ lruvec = mem_cgroup_lruvec(memcg, pgdat);
__mod_lruvec_state(lruvec, idx, val);
}
rcu_read_unlock();
@@ -877,9 +876,6 @@ static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
case MEM_CGROUP_TARGET_SOFTLIMIT:
next = val + SOFTLIMIT_EVENTS_TARGET;
break;
- case MEM_CGROUP_TARGET_NUMAINFO:
- next = val + NUMAINFO_EVENTS_TARGET;
- break;
default:
break;
}
@@ -899,21 +895,12 @@ static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
if (unlikely(mem_cgroup_event_ratelimit(memcg,
MEM_CGROUP_TARGET_THRESH))) {
bool do_softlimit;
- bool do_numainfo __maybe_unused;
do_softlimit = mem_cgroup_event_ratelimit(memcg,
MEM_CGROUP_TARGET_SOFTLIMIT);
-#if MAX_NUMNODES > 1
- do_numainfo = mem_cgroup_event_ratelimit(memcg,
- MEM_CGROUP_TARGET_NUMAINFO);
-#endif
mem_cgroup_threshold(memcg);
if (unlikely(do_softlimit))
mem_cgroup_update_tree(memcg, page);
-#if MAX_NUMNODES > 1
- if (unlikely(do_numainfo))
- atomic_inc(&memcg->numainfo_events);
-#endif
}
}
@@ -1052,7 +1039,7 @@ struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
struct mem_cgroup_per_node *mz;
mz = mem_cgroup_nodeinfo(root, reclaim->pgdat->node_id);
- iter = &mz->iter[reclaim->priority];
+ iter = &mz->iter;
if (prev && reclaim->generation != iter->generation)
goto out_unlock;
@@ -1152,15 +1139,11 @@ static void __invalidate_reclaim_iterators(struct mem_cgroup *from,
struct mem_cgroup_reclaim_iter *iter;
struct mem_cgroup_per_node *mz;
int nid;
- int i;
for_each_node(nid) {
mz = mem_cgroup_nodeinfo(from, nid);
- for (i = 0; i <= DEF_PRIORITY; i++) {
- iter = &mz->iter[i];
- cmpxchg(&iter->position,
- dead_memcg, NULL);
- }
+ iter = &mz->iter;
+ cmpxchg(&iter->position, dead_memcg, NULL);
}
}
@@ -1238,7 +1221,7 @@ struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct pglist_data *pgd
struct lruvec *lruvec;
if (mem_cgroup_disabled()) {
- lruvec = &pgdat->lruvec;
+ lruvec = &pgdat->__lruvec;
goto out;
}
@@ -1595,104 +1578,6 @@ static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
return ret;
}
-#if MAX_NUMNODES > 1
-
-/**
- * test_mem_cgroup_node_reclaimable
- * @memcg: the target memcg
- * @nid: the node ID to be checked.
- * @noswap : specify true here if the user wants flle only information.
- *
- * This function returns whether the specified memcg contains any
- * reclaimable pages on a node. Returns true if there are any reclaimable
- * pages in the node.
- */
-static bool test_mem_cgroup_node_reclaimable(struct mem_cgroup *memcg,
- int nid, bool noswap)
-{
- struct lruvec *lruvec = mem_cgroup_lruvec(NODE_DATA(nid), memcg);
-
- if (lruvec_page_state(lruvec, NR_INACTIVE_FILE) ||
- lruvec_page_state(lruvec, NR_ACTIVE_FILE))
- return true;
- if (noswap || !total_swap_pages)
- return false;
- if (lruvec_page_state(lruvec, NR_INACTIVE_ANON) ||
- lruvec_page_state(lruvec, NR_ACTIVE_ANON))
- return true;
- return false;
-
-}
-
-/*
- * Always updating the nodemask is not very good - even if we have an empty
- * list or the wrong list here, we can start from some node and traverse all
- * nodes based on the zonelist. So update the list loosely once per 10 secs.
- *
- */
-static void mem_cgroup_may_update_nodemask(struct mem_cgroup *memcg)
-{
- int nid;
- /*
- * numainfo_events > 0 means there was at least NUMAINFO_EVENTS_TARGET
- * pagein/pageout changes since the last update.
- */
- if (!atomic_read(&memcg->numainfo_events))
- return;
- if (atomic_inc_return(&memcg->numainfo_updating) > 1)
- return;
-
- /* make a nodemask where this memcg uses memory from */
- memcg->scan_nodes = node_states[N_MEMORY];
-
- for_each_node_mask(nid, node_states[N_MEMORY]) {
-
- if (!test_mem_cgroup_node_reclaimable(memcg, nid, false))
- node_clear(nid, memcg->scan_nodes);
- }
-
- atomic_set(&memcg->numainfo_events, 0);
- atomic_set(&memcg->numainfo_updating, 0);
-}
-
-/*
- * Selecting a node where we start reclaim from. Because what we need is just
- * reducing usage counter, start from anywhere is O,K. Considering
- * memory reclaim from current node, there are pros. and cons.
- *
- * Freeing memory from current node means freeing memory from a node which
- * we'll use or we've used. So, it may make LRU bad. And if several threads
- * hit limits, it will see a contention on a node. But freeing from remote
- * node means more costs for memory reclaim because of memory latency.
- *
- * Now, we use round-robin. Better algorithm is welcomed.
- */
-int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
-{
- int node;
-
- mem_cgroup_may_update_nodemask(memcg);
- node = memcg->last_scanned_node;
-
- node = next_node_in(node, memcg->scan_nodes);
- /*
- * mem_cgroup_may_update_nodemask might have seen no reclaimmable pages
- * last time it really checked all the LRUs due to rate limiting.
- * Fallback to the current node in that case for simplicity.
- */
- if (unlikely(node == MAX_NUMNODES))
- node = numa_node_id();
-
- memcg->last_scanned_node = node;
- return node;
-}
-#else
-int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
-{
- return 0;
-}
-#endif
-
static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg,
pg_data_t *pgdat,
gfp_t gfp_mask,
@@ -1705,7 +1590,6 @@ static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg,
unsigned long nr_scanned;
struct mem_cgroup_reclaim_cookie reclaim = {
.pgdat = pgdat,
- .priority = 0,
};
excess = soft_limit_excess(root_memcg);
@@ -3750,7 +3634,7 @@ static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
static unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
int nid, unsigned int lru_mask)
{
- struct lruvec *lruvec = mem_cgroup_lruvec(NODE_DATA(nid), memcg);
+ struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
unsigned long nr = 0;
enum lru_list lru;
@@ -5078,7 +4962,6 @@ static struct mem_cgroup *mem_cgroup_alloc(void)
goto fail;
INIT_WORK(&memcg->high_work, high_work_func);
- memcg->last_scanned_node = MAX_NUMNODES;
INIT_LIST_HEAD(&memcg->oom_notify);
mutex_init(&memcg->thresholds_lock);
spin_lock_init(&memcg->move_lock);
@@ -5455,8 +5338,8 @@ static int mem_cgroup_move_account(struct page *page,
anon = PageAnon(page);
pgdat = page_pgdat(page);
- from_vec = mem_cgroup_lruvec(pgdat, from);
- to_vec = mem_cgroup_lruvec(pgdat, to);
+ from_vec = mem_cgroup_lruvec(from, pgdat);
+ to_vec = mem_cgroup_lruvec(to, pgdat);
spin_lock_irqsave(&from->move_lock, flags);
@@ -6096,7 +5979,8 @@ static ssize_t memory_high_write(struct kernfs_open_file *of,
char *buf, size_t nbytes, loff_t off)
{
struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
- unsigned long nr_pages;
+ unsigned int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
+ bool drained = false;
unsigned long high;
int err;
@@ -6107,12 +5991,29 @@ static ssize_t memory_high_write(struct kernfs_open_file *of,
memcg->high = high;
- nr_pages = page_counter_read(&memcg->memory);
- if (nr_pages > high)
- try_to_free_mem_cgroup_pages(memcg, nr_pages - high,
- GFP_KERNEL, true);
+ for (;;) {
+ unsigned long nr_pages = page_counter_read(&memcg->memory);
+ unsigned long reclaimed;
+
+ if (nr_pages <= high)
+ break;
+
+ if (signal_pending(current))
+ break;
+
+ if (!drained) {
+ drain_all_stock(memcg);
+ drained = true;
+ continue;
+ }
+
+ reclaimed = try_to_free_mem_cgroup_pages(memcg, nr_pages - high,
+ GFP_KERNEL, true);
+
+ if (!reclaimed && !nr_retries--)
+ break;
+ }
- memcg_wb_domain_size_changed(memcg);
return nbytes;
}
@@ -6144,10 +6045,8 @@ static ssize_t memory_max_write(struct kernfs_open_file *of,
if (nr_pages <= max)
break;
- if (signal_pending(current)) {
- err = -EINTR;
+ if (signal_pending(current))
break;
- }
if (!drained) {
drain_all_stock(memcg);
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 3151c87dff73..41c634f45d45 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -303,30 +303,24 @@ static unsigned long dev_pagemap_mapping_shift(struct page *page,
/*
* Schedule a process for later kill.
* Uses GFP_ATOMIC allocations to avoid potential recursions in the VM.
- * TBD would GFP_NOIO be enough?
*/
static void add_to_kill(struct task_struct *tsk, struct page *p,
struct vm_area_struct *vma,
- struct list_head *to_kill,
- struct to_kill **tkc)
+ struct list_head *to_kill)
{
struct to_kill *tk;
- if (*tkc) {
- tk = *tkc;
- *tkc = NULL;
- } else {
- tk = kmalloc(sizeof(struct to_kill), GFP_ATOMIC);
- if (!tk) {
- pr_err("Memory failure: Out of memory while machine check handling\n");
- return;
- }
+ tk = kmalloc(sizeof(struct to_kill), GFP_ATOMIC);
+ if (!tk) {
+ pr_err("Memory failure: Out of memory while machine check handling\n");
+ return;
}
+
tk->addr = page_address_in_vma(p, vma);
if (is_zone_device_page(p))
tk->size_shift = dev_pagemap_mapping_shift(p, vma);
else
- tk->size_shift = compound_order(compound_head(p)) + PAGE_SHIFT;
+ tk->size_shift = page_shift(compound_head(p));
/*
* Send SIGKILL if "tk->addr == -EFAULT". Also, as
@@ -345,6 +339,7 @@ static void add_to_kill(struct task_struct *tsk, struct page *p,
kfree(tk);
return;
}
+
get_task_struct(tsk);
tk->tsk = tsk;
list_add_tail(&tk->nd, to_kill);
@@ -436,7 +431,7 @@ static struct task_struct *task_early_kill(struct task_struct *tsk,
* Collect processes when the error hit an anonymous page.
*/
static void collect_procs_anon(struct page *page, struct list_head *to_kill,
- struct to_kill **tkc, int force_early)
+ int force_early)
{
struct vm_area_struct *vma;
struct task_struct *tsk;
@@ -461,7 +456,7 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill,
if (!page_mapped_in_vma(page, vma))
continue;
if (vma->vm_mm == t->mm)
- add_to_kill(t, page, vma, to_kill, tkc);
+ add_to_kill(t, page, vma, to_kill);
}
}
read_unlock(&tasklist_lock);
@@ -472,7 +467,7 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill,
* Collect processes when the error hit a file mapped page.
*/
static void collect_procs_file(struct page *page, struct list_head *to_kill,
- struct to_kill **tkc, int force_early)
+ int force_early)
{
struct vm_area_struct *vma;
struct task_struct *tsk;
@@ -496,7 +491,7 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill,
* to be informed of all such data corruptions.
*/
if (vma->vm_mm == t->mm)
- add_to_kill(t, page, vma, to_kill, tkc);
+ add_to_kill(t, page, vma, to_kill);
}
}
read_unlock(&tasklist_lock);
@@ -505,26 +500,17 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill,
/*
* Collect the processes who have the corrupted page mapped to kill.
- * This is done in two steps for locking reasons.
- * First preallocate one tokill structure outside the spin locks,
- * so that we can kill at least one process reasonably reliable.
*/
static void collect_procs(struct page *page, struct list_head *tokill,
int force_early)
{
- struct to_kill *tk;
-
if (!page->mapping)
return;
- tk = kmalloc(sizeof(struct to_kill), GFP_NOIO);
- if (!tk)
- return;
if (PageAnon(page))
- collect_procs_anon(page, tokill, &tk, force_early);
+ collect_procs_anon(page, tokill, force_early);
else
- collect_procs_file(page, tokill, &tk, force_early);
- kfree(tk);
+ collect_procs_file(page, tokill, force_early);
}
static const char *action_name[] = {
@@ -1490,7 +1476,7 @@ static void memory_failure_work_func(struct work_struct *work)
if (!gotten)
break;
if (entry.flags & MF_SOFT_OFFLINE)
- soft_offline_page(pfn_to_page(entry.pfn), entry.flags);
+ soft_offline_page(entry.pfn, entry.flags);
else
memory_failure(entry.pfn, entry.flags);
}
@@ -1871,7 +1857,7 @@ static int soft_offline_free_page(struct page *page)
/**
* soft_offline_page - Soft offline a page.
- * @page: page to offline
+ * @pfn: pfn to soft-offline
* @flags: flags. Same as memory_failure().
*
* Returns 0 on success, otherwise negated errno.
@@ -1891,18 +1877,17 @@ static int soft_offline_free_page(struct page *page)
* This is not a 100% solution for all memory, but tries to be
* ``good enough'' for the majority of memory.
*/
-int soft_offline_page(struct page *page, int flags)
+int soft_offline_page(unsigned long pfn, int flags)
{
int ret;
- unsigned long pfn = page_to_pfn(page);
+ struct page *page;
- if (is_zone_device_page(page)) {
- pr_debug_ratelimited("soft_offline: %#lx page is device page\n",
- pfn);
- if (flags & MF_COUNT_INCREASED)
- put_page(page);
+ if (!pfn_valid(pfn))
+ return -ENXIO;
+ /* Only online pages can be soft-offlined (esp., not ZONE_DEVICE). */
+ page = pfn_to_online_page(pfn);
+ if (!page)
return -EIO;
- }
if (PageHWPoison(page)) {
pr_info("soft offline: %#lx page already poisoned\n", pfn);
diff --git a/mm/memory.c b/mm/memory.c
index b6a5d6a08438..513c3ecc76ee 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -72,6 +72,8 @@
#include <linux/oom.h>
#include <linux/numa.h>
+#include <trace/events/kmem.h>
+
#include <asm/io.h>
#include <asm/mmu_context.h>
#include <asm/pgalloc.h>
@@ -152,6 +154,10 @@ static int __init init_zero_pfn(void)
}
core_initcall(init_zero_pfn);
+void mm_trace_rss_stat(struct mm_struct *mm, int member, long count)
+{
+ trace_rss_stat(mm, member, count);
+}
#if defined(SPLIT_RSS_COUNTING)
@@ -2289,10 +2295,11 @@ static vm_fault_t do_page_mkwrite(struct vm_fault *vmf)
*
* The function expects the page to be locked and unlocks it.
*/
-static void fault_dirty_shared_page(struct vm_area_struct *vma,
- struct page *page)
+static vm_fault_t fault_dirty_shared_page(struct vm_fault *vmf)
{
+ struct vm_area_struct *vma = vmf->vma;
struct address_space *mapping;
+ struct page *page = vmf->page;
bool dirtied;
bool page_mkwrite = vma->vm_ops && vma->vm_ops->page_mkwrite;
@@ -2307,16 +2314,30 @@ static void fault_dirty_shared_page(struct vm_area_struct *vma,
mapping = page_rmapping(page);
unlock_page(page);
+ if (!page_mkwrite)
+ file_update_time(vma->vm_file);
+
+ /*
+ * Throttle page dirtying rate down to writeback speed.
+ *
+ * mapping may be NULL here because some device drivers do not
+ * set page.mapping but still dirty their pages
+ *
+ * Drop the mmap_sem before waiting on IO, if we can. The file
+ * is pinning the mapping, as per above.
+ */
if ((dirtied || page_mkwrite) && mapping) {
- /*
- * Some device drivers do not set page.mapping
- * but still dirty their pages
- */
+ struct file *fpin;
+
+ fpin = maybe_unlock_mmap_for_io(vmf, NULL);
balance_dirty_pages_ratelimited(mapping);
+ if (fpin) {
+ fput(fpin);
+ return VM_FAULT_RETRY;
+ }
}
- if (!page_mkwrite)
- file_update_time(vma->vm_file);
+ return 0;
}
/*
@@ -2571,6 +2592,7 @@ static vm_fault_t wp_page_shared(struct vm_fault *vmf)
__releases(vmf->ptl)
{
struct vm_area_struct *vma = vmf->vma;
+ vm_fault_t ret = VM_FAULT_WRITE;
get_page(vmf->page);
@@ -2594,10 +2616,10 @@ static vm_fault_t wp_page_shared(struct vm_fault *vmf)
wp_page_reuse(vmf);
lock_page(vmf->page);
}
- fault_dirty_shared_page(vma, vmf->page);
+ ret |= fault_dirty_shared_page(vmf);
put_page(vmf->page);
- return VM_FAULT_WRITE;
+ return ret;
}
/*
@@ -3083,7 +3105,7 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
/*
* The memory barrier inside __SetPageUptodate makes sure that
- * preceeding stores to the page contents become visible before
+ * preceding stores to the page contents become visible before
* the set_pte_at() write.
*/
__SetPageUptodate(page);
@@ -3641,7 +3663,7 @@ static vm_fault_t do_shared_fault(struct vm_fault *vmf)
return ret;
}
- fault_dirty_shared_page(vma, vmf->page);
+ ret |= fault_dirty_shared_page(vmf);
return ret;
}
@@ -3988,6 +4010,7 @@ static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma,
vmf.pud = pud_alloc(mm, p4d, address);
if (!vmf.pud)
return VM_FAULT_OOM;
+retry_pud:
if (pud_none(*vmf.pud) && __transparent_hugepage_enabled(vma)) {
ret = create_huge_pud(&vmf);
if (!(ret & VM_FAULT_FALLBACK))
@@ -4014,6 +4037,11 @@ static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma,
vmf.pmd = pmd_alloc(mm, vmf.pud, address);
if (!vmf.pmd)
return VM_FAULT_OOM;
+
+ /* Huge pud page fault raced with pmd_alloc? */
+ if (pud_trans_unstable(vmf.pud))
+ goto retry_pud;
+
if (pmd_none(*vmf.pmd) && __transparent_hugepage_enabled(vma)) {
ret = create_huge_pmd(&vmf);
if (!(ret & VM_FAULT_FALLBACK))
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index f307bd82d750..55ac23ef11c1 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -49,8 +49,6 @@
* and restore_online_page_callback() for generic callback restore.
*/
-static void generic_online_page(struct page *page, unsigned int order);
-
static online_page_callback_t online_page_callback = generic_online_page;
static DEFINE_MUTEX(online_page_callback_lock);
@@ -278,6 +276,22 @@ static int check_pfn_span(unsigned long pfn, unsigned long nr_pages,
return 0;
}
+static int check_hotplug_memory_addressable(unsigned long pfn,
+ unsigned long nr_pages)
+{
+ const u64 max_addr = PFN_PHYS(pfn + nr_pages) - 1;
+
+ if (max_addr >> MAX_PHYSMEM_BITS) {
+ const u64 max_allowed = (1ull << (MAX_PHYSMEM_BITS + 1)) - 1;
+ WARN(1,
+ "Hotplugged memory exceeds maximum addressable address, range=%#llx-%#llx, maximum=%#llx\n",
+ (u64)PFN_PHYS(pfn), max_addr, max_allowed);
+ return -E2BIG;
+ }
+
+ return 0;
+}
+
/*
* Reasonably generic function for adding memory. It is
* expected that archs that support memory hotplug will
@@ -291,6 +305,10 @@ int __ref __add_pages(int nid, unsigned long pfn, unsigned long nr_pages,
unsigned long nr, start_sec, end_sec;
struct vmem_altmap *altmap = restrictions->altmap;
+ err = check_hotplug_memory_addressable(pfn, nr_pages);
+ if (err)
+ return err;
+
if (altmap) {
/*
* Validate altmap is within bounds of the total request
@@ -580,24 +598,7 @@ int restore_online_page_callback(online_page_callback_t callback)
}
EXPORT_SYMBOL_GPL(restore_online_page_callback);
-void __online_page_set_limits(struct page *page)
-{
-}
-EXPORT_SYMBOL_GPL(__online_page_set_limits);
-
-void __online_page_increment_counters(struct page *page)
-{
- adjust_managed_page_count(page, 1);
-}
-EXPORT_SYMBOL_GPL(__online_page_increment_counters);
-
-void __online_page_free(struct page *page)
-{
- __free_reserved_page(page);
-}
-EXPORT_SYMBOL_GPL(__online_page_free);
-
-static void generic_online_page(struct page *page, unsigned int order)
+void generic_online_page(struct page *page, unsigned int order)
{
kernel_map_pages(page, 1 << order, 1);
__free_pages_core(page, order);
@@ -607,6 +608,7 @@ static void generic_online_page(struct page *page, unsigned int order)
totalhigh_pages_add(1UL << order);
#endif
}
+EXPORT_SYMBOL_GPL(generic_online_page);
static int online_pages_range(unsigned long start_pfn, unsigned long nr_pages,
void *arg)
@@ -1180,7 +1182,8 @@ static bool is_pageblock_removable_nolock(unsigned long pfn)
if (!zone_spans_pfn(zone, pfn))
return false;
- return !has_unmovable_pages(zone, page, 0, MIGRATE_MOVABLE, SKIP_HWPOISON);
+ return !has_unmovable_pages(zone, page, 0, MIGRATE_MOVABLE,
+ MEMORY_OFFLINE);
}
/* Checks if this range of memory is likely to be hot-removable. */
@@ -1377,9 +1380,7 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
return ret;
}
-/*
- * remove from free_area[] and mark all as Reserved.
- */
+/* Mark all sections offline and remove all free pages from the buddy. */
static int
offline_isolated_pages_cb(unsigned long start, unsigned long nr_pages,
void *data)
@@ -1397,7 +1398,8 @@ static int
check_pages_isolated_cb(unsigned long start_pfn, unsigned long nr_pages,
void *data)
{
- return test_pages_isolated(start_pfn, start_pfn + nr_pages, true);
+ return test_pages_isolated(start_pfn, start_pfn + nr_pages,
+ MEMORY_OFFLINE);
}
static int __init cmdline_parse_movable_node(char *p)
@@ -1478,10 +1480,19 @@ static void node_states_clear_node(int node, struct memory_notify *arg)
node_clear_state(node, N_MEMORY);
}
+static int count_system_ram_pages_cb(unsigned long start_pfn,
+ unsigned long nr_pages, void *data)
+{
+ unsigned long *nr_system_ram_pages = data;
+
+ *nr_system_ram_pages += nr_pages;
+ return 0;
+}
+
static int __ref __offline_pages(unsigned long start_pfn,
unsigned long end_pfn)
{
- unsigned long pfn, nr_pages;
+ unsigned long pfn, nr_pages = 0;
unsigned long offlined_pages = 0;
int ret, node, nr_isolate_pageblock;
unsigned long flags;
@@ -1492,6 +1503,22 @@ static int __ref __offline_pages(unsigned long start_pfn,
mem_hotplug_begin();
+ /*
+ * Don't allow to offline memory blocks that contain holes.
+ * Consequently, memory blocks with holes can never get onlined
+ * via the hotplug path - online_pages() - as hotplugged memory has
+ * no holes. This way, we e.g., don't have to worry about marking
+ * memory holes PG_reserved, don't need pfn_valid() checks, and can
+ * avoid using walk_system_ram_range() later.
+ */
+ walk_system_ram_range(start_pfn, end_pfn - start_pfn, &nr_pages,
+ count_system_ram_pages_cb);
+ if (nr_pages != end_pfn - start_pfn) {
+ ret = -EINVAL;
+ reason = "memory holes";
+ goto failed_removal;
+ }
+
/* This makes hotplug much easier...and readable.
we assume this for now. .*/
if (!test_pages_in_a_zone(start_pfn, end_pfn, &valid_start,
@@ -1503,12 +1530,11 @@ static int __ref __offline_pages(unsigned long start_pfn,
zone = page_zone(pfn_to_page(valid_start));
node = zone_to_nid(zone);
- nr_pages = end_pfn - start_pfn;
/* set above range as isolated */
ret = start_isolate_page_range(start_pfn, end_pfn,
MIGRATE_MOVABLE,
- SKIP_HWPOISON | REPORT_FAILURE);
+ MEMORY_OFFLINE | REPORT_FAILURE);
if (ret < 0) {
reason = "failure to isolate range";
goto failed_removal;
@@ -1750,13 +1776,13 @@ static int __ref try_remove_memory(int nid, u64 start, u64 size)
/* remove memmap entry */
firmware_map_remove(start, start + size, "System RAM");
- memblock_free(start, size);
- memblock_remove(start, size);
/* remove memory block devices before removing memory */
remove_memory_block_devices(start, size);
arch_remove_memory(nid, start, size, NULL);
+ memblock_free(start, size);
+ memblock_remove(start, size);
__release_memory_resource(start, size);
try_offline_node(nid);
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index e08c94170ae4..067cf7d3daf5 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -410,7 +410,9 @@ struct queue_pages {
struct list_head *pagelist;
unsigned long flags;
nodemask_t *nmask;
- struct vm_area_struct *prev;
+ unsigned long start;
+ unsigned long end;
+ struct vm_area_struct *first;
};
/*
@@ -618,6 +620,22 @@ static int queue_pages_test_walk(unsigned long start, unsigned long end,
unsigned long endvma = vma->vm_end;
unsigned long flags = qp->flags;
+ /* range check first */
+ VM_BUG_ON((vma->vm_start > start) || (vma->vm_end < end));
+
+ if (!qp->first) {
+ qp->first = vma;
+ if (!(flags & MPOL_MF_DISCONTIG_OK) &&
+ (qp->start < vma->vm_start))
+ /* hole at head side of range */
+ return -EFAULT;
+ }
+ if (!(flags & MPOL_MF_DISCONTIG_OK) &&
+ ((vma->vm_end < qp->end) &&
+ (!vma->vm_next || vma->vm_end < vma->vm_next->vm_start)))
+ /* hole at middle or tail of range */
+ return -EFAULT;
+
/*
* Need check MPOL_MF_STRICT to return -EIO if possible
* regardless of vma_migratable
@@ -628,17 +646,6 @@ static int queue_pages_test_walk(unsigned long start, unsigned long end,
if (endvma > end)
endvma = end;
- if (vma->vm_start > start)
- start = vma->vm_start;
-
- if (!(flags & MPOL_MF_DISCONTIG_OK)) {
- if (!vma->vm_next && vma->vm_end < end)
- return -EFAULT;
- if (qp->prev && qp->prev->vm_end < vma->vm_start)
- return -EFAULT;
- }
-
- qp->prev = vma;
if (flags & MPOL_MF_LAZY) {
/* Similar to task_numa_work, skip inaccessible VMAs */
@@ -681,14 +688,23 @@ queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
nodemask_t *nodes, unsigned long flags,
struct list_head *pagelist)
{
+ int err;
struct queue_pages qp = {
.pagelist = pagelist,
.flags = flags,
.nmask = nodes,
- .prev = NULL,
+ .start = start,
+ .end = end,
+ .first = NULL,
};
- return walk_page_range(mm, start, end, &queue_pages_walk_ops, &qp);
+ err = walk_page_range(mm, start, end, &queue_pages_walk_ops, &qp);
+
+ if (!qp.first)
+ /* whole range in hole */
+ err = -EFAULT;
+
+ return err;
}
/*
@@ -740,8 +756,7 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
unsigned long vmend;
vma = find_vma(mm, start);
- if (!vma || vma->vm_start > start)
- return -EFAULT;
+ VM_BUG_ON(!vma);
prev = vma->vm_prev;
if (start > vma->vm_start)
diff --git a/mm/migrate.c b/mm/migrate.c
index 4fe45d1428c8..eae1565285e3 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1168,15 +1168,11 @@ static ICE_noinline int unmap_and_move(new_page_t get_new_page,
enum migrate_reason reason)
{
int rc = MIGRATEPAGE_SUCCESS;
- struct page *newpage;
+ struct page *newpage = NULL;
if (!thp_migration_supported() && PageTransHuge(page))
return -ENOMEM;
- newpage = get_new_page(page, private);
- if (!newpage)
- return -ENOMEM;
-
if (page_count(page) == 1) {
/* page was freed from under us. So we are done. */
ClearPageActive(page);
@@ -1187,13 +1183,13 @@ static ICE_noinline int unmap_and_move(new_page_t get_new_page,
__ClearPageIsolated(page);
unlock_page(page);
}
- if (put_new_page)
- put_new_page(newpage, private);
- else
- put_page(newpage);
goto out;
}
+ newpage = get_new_page(page, private);
+ if (!newpage)
+ return -ENOMEM;
+
rc = __unmap_and_move(page, newpage, force, mode);
if (rc == MIGRATEPAGE_SUCCESS)
set_page_owner_migrate_reason(newpage, reason);
@@ -1863,7 +1859,7 @@ static bool migrate_balanced_pgdat(struct pglist_data *pgdat,
if (!zone_watermark_ok(zone, 0,
high_wmark_pages(zone) +
nr_migrate_pages,
- 0, 0))
+ ZONE_MOVABLE, 0))
continue;
return true;
}
diff --git a/mm/mmap.c b/mm/mmap.c
index a7d8c84d19b7..9c648524e4dc 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -641,7 +641,7 @@ __vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
struct vm_area_struct *prev, struct rb_node **rb_link,
struct rb_node *rb_parent)
{
- __vma_link_list(mm, vma, prev, rb_parent);
+ __vma_link_list(mm, vma, prev);
__vma_link_rb(mm, vma, rb_link, rb_parent);
}
@@ -684,37 +684,14 @@ static void __insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
static __always_inline void __vma_unlink_common(struct mm_struct *mm,
struct vm_area_struct *vma,
- struct vm_area_struct *prev,
- bool has_prev,
struct vm_area_struct *ignore)
{
- struct vm_area_struct *next;
-
vma_rb_erase_ignore(vma, &mm->mm_rb, ignore);
- next = vma->vm_next;
- if (has_prev)
- prev->vm_next = next;
- else {
- prev = vma->vm_prev;
- if (prev)
- prev->vm_next = next;
- else
- mm->mmap = next;
- }
- if (next)
- next->vm_prev = prev;
-
+ __vma_unlink_list(mm, vma);
/* Kill the cache */
vmacache_invalidate(mm);
}
-static inline void __vma_unlink_prev(struct mm_struct *mm,
- struct vm_area_struct *vma,
- struct vm_area_struct *prev)
-{
- __vma_unlink_common(mm, vma, prev, true, vma);
-}
-
/*
* We cannot adjust vm_start, vm_end, vm_pgoff fields of a vma that
* is already present in an i_mmap tree without adjusting the tree.
@@ -769,8 +746,6 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
remove_next = 1 + (end > next->vm_end);
VM_WARN_ON(remove_next == 2 &&
end != next->vm_next->vm_end);
- VM_WARN_ON(remove_next == 1 &&
- end != next->vm_end);
/* trim end to next, for case 6 first pass */
end = next->vm_end;
}
@@ -889,7 +864,7 @@ again:
* us to remove next before dropping the locks.
*/
if (remove_next != 3)
- __vma_unlink_prev(mm, next, vma);
+ __vma_unlink_common(mm, next, next);
else
/*
* vma is not before next if they've been
@@ -900,7 +875,7 @@ again:
* "next" (which is stored in post-swap()
* "vma").
*/
- __vma_unlink_common(mm, next, NULL, false, vma);
+ __vma_unlink_common(mm, next, vma);
if (file)
__remove_shared_vm_struct(next, file, mapping);
} else if (insert) {
@@ -1116,15 +1091,18 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
* the area passed down from mprotect_fixup, never extending beyond one
* vma, PPPPPP is the prev vma specified, and NNNNNN the next vma after:
*
- * AAAA AAAA AAAA AAAA
- * PPPPPPNNNNNN PPPPPPNNNNNN PPPPPPNNNNNN PPPPNNNNXXXX
- * cannot merge might become might become might become
- * PPNNNNNNNNNN PPPPPPPPPPNN PPPPPPPPPPPP 6 or
- * mmap, brk or case 4 below case 5 below PPPPPPPPXXXX 7 or
- * mremap move: PPPPXXXXXXXX 8
- * AAAA
- * PPPP NNNN PPPPPPPPPPPP PPPPPPPPNNNN PPPPNNNNNNNN
- * might become case 1 below case 2 below case 3 below
+ * AAAA AAAA AAAA
+ * PPPPPPNNNNNN PPPPPPNNNNNN PPPPPPNNNNNN
+ * cannot merge might become might become
+ * PPNNNNNNNNNN PPPPPPPPPPNN
+ * mmap, brk or case 4 below case 5 below
+ * mremap move:
+ * AAAA AAAA
+ * PPPP NNNN PPPPNNNNXXXX
+ * might become might become
+ * PPPPPPPPPPPP 1 or PPPPPPPPPPPP 6 or
+ * PPPPPPPPNNNN 2 or PPPPPPPPXXXX 7 or
+ * PPPPNNNNNNNN 3 PPPPXXXXXXXX 8
*
* It is important for case 8 that the vma NNNN overlapping the
* region AAAA is never going to extended over XXXX. Instead XXXX must
@@ -1442,7 +1420,7 @@ unsigned long do_mmap(struct file *file, unsigned long addr,
* that it represents a valid section of the address space.
*/
addr = get_unmapped_area(file, addr, len, pgoff, flags);
- if (offset_in_page(addr))
+ if (IS_ERR_VALUE(addr))
return addr;
if (flags & MAP_FIXED_NOREPLACE) {
@@ -3006,15 +2984,16 @@ static int do_brk_flags(unsigned long addr, unsigned long len, unsigned long fla
struct rb_node **rb_link, *rb_parent;
pgoff_t pgoff = addr >> PAGE_SHIFT;
int error;
+ unsigned long mapped_addr;
/* Until we need other flags, refuse anything except VM_EXEC. */
if ((flags & (~VM_EXEC)) != 0)
return -EINVAL;
flags |= VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
- error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
- if (offset_in_page(error))
- return error;
+ mapped_addr = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
+ if (IS_ERR_VALUE(mapped_addr))
+ return mapped_addr;
error = mlock_future_check(mm, mm->def_flags, len);
if (error)
diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c
index 9a889e456168..f76ea05b1cb0 100644
--- a/mm/mmu_notifier.c
+++ b/mm/mmu_notifier.c
@@ -12,6 +12,7 @@
#include <linux/export.h>
#include <linux/mm.h>
#include <linux/err.h>
+#include <linux/interval_tree.h>
#include <linux/srcu.h>
#include <linux/rcupdate.h>
#include <linux/sched.h>
@@ -28,6 +29,254 @@ struct lockdep_map __mmu_notifier_invalidate_range_start_map = {
#endif
/*
+ * The mmu notifier_mm structure is allocated and installed in
+ * mm->mmu_notifier_mm inside the mm_take_all_locks() protected
+ * critical section and it's released only when mm_count reaches zero
+ * in mmdrop().
+ */
+struct mmu_notifier_mm {
+ /* all mmu notifiers registered in this mm are queued in this list */
+ struct hlist_head list;
+ bool has_itree;
+ /* to serialize the list modifications and hlist_unhashed */
+ spinlock_t lock;
+ unsigned long invalidate_seq;
+ unsigned long active_invalidate_ranges;
+ struct rb_root_cached itree;
+ wait_queue_head_t wq;
+ struct hlist_head deferred_list;
+};
+
+/*
+ * This is a collision-retry read-side/write-side 'lock', a lot like a
+ * seqcount, however this allows multiple write-sides to hold it at
+ * once. Conceptually the write side is protecting the values of the PTEs in
+ * this mm, such that PTES cannot be read into SPTEs (shadow PTEs) while any
+ * writer exists.
+ *
+ * Note that the core mm creates nested invalidate_range_start()/end() regions
+ * within the same thread, and runs invalidate_range_start()/end() in parallel
+ * on multiple CPUs. This is designed to not reduce concurrency or block
+ * progress on the mm side.
+ *
+ * As a secondary function, holding the full write side also serves to prevent
+ * writers for the itree, this is an optimization to avoid extra locking
+ * during invalidate_range_start/end notifiers.
+ *
+ * The write side has two states, fully excluded:
+ * - mm->active_invalidate_ranges != 0
+ * - mnn->invalidate_seq & 1 == True (odd)
+ * - some range on the mm_struct is being invalidated
+ * - the itree is not allowed to change
+ *
+ * And partially excluded:
+ * - mm->active_invalidate_ranges != 0
+ * - mnn->invalidate_seq & 1 == False (even)
+ * - some range on the mm_struct is being invalidated
+ * - the itree is allowed to change
+ *
+ * Operations on mmu_notifier_mm->invalidate_seq (under spinlock):
+ * seq |= 1 # Begin writing
+ * seq++ # Release the writing state
+ * seq & 1 # True if a writer exists
+ *
+ * The later state avoids some expensive work on inv_end in the common case of
+ * no mni monitoring the VA.
+ */
+static bool mn_itree_is_invalidating(struct mmu_notifier_mm *mmn_mm)
+{
+ lockdep_assert_held(&mmn_mm->lock);
+ return mmn_mm->invalidate_seq & 1;
+}
+
+static struct mmu_interval_notifier *
+mn_itree_inv_start_range(struct mmu_notifier_mm *mmn_mm,
+ const struct mmu_notifier_range *range,
+ unsigned long *seq)
+{
+ struct interval_tree_node *node;
+ struct mmu_interval_notifier *res = NULL;
+
+ spin_lock(&mmn_mm->lock);
+ mmn_mm->active_invalidate_ranges++;
+ node = interval_tree_iter_first(&mmn_mm->itree, range->start,
+ range->end - 1);
+ if (node) {
+ mmn_mm->invalidate_seq |= 1;
+ res = container_of(node, struct mmu_interval_notifier,
+ interval_tree);
+ }
+
+ *seq = mmn_mm->invalidate_seq;
+ spin_unlock(&mmn_mm->lock);
+ return res;
+}
+
+static struct mmu_interval_notifier *
+mn_itree_inv_next(struct mmu_interval_notifier *mni,
+ const struct mmu_notifier_range *range)
+{
+ struct interval_tree_node *node;
+
+ node = interval_tree_iter_next(&mni->interval_tree, range->start,
+ range->end - 1);
+ if (!node)
+ return NULL;
+ return container_of(node, struct mmu_interval_notifier, interval_tree);
+}
+
+static void mn_itree_inv_end(struct mmu_notifier_mm *mmn_mm)
+{
+ struct mmu_interval_notifier *mni;
+ struct hlist_node *next;
+
+ spin_lock(&mmn_mm->lock);
+ if (--mmn_mm->active_invalidate_ranges ||
+ !mn_itree_is_invalidating(mmn_mm)) {
+ spin_unlock(&mmn_mm->lock);
+ return;
+ }
+
+ /* Make invalidate_seq even */
+ mmn_mm->invalidate_seq++;
+
+ /*
+ * The inv_end incorporates a deferred mechanism like rtnl_unlock().
+ * Adds and removes are queued until the final inv_end happens then
+ * they are progressed. This arrangement for tree updates is used to
+ * avoid using a blocking lock during invalidate_range_start.
+ */
+ hlist_for_each_entry_safe(mni, next, &mmn_mm->deferred_list,
+ deferred_item) {
+ if (RB_EMPTY_NODE(&mni->interval_tree.rb))
+ interval_tree_insert(&mni->interval_tree,
+ &mmn_mm->itree);
+ else
+ interval_tree_remove(&mni->interval_tree,
+ &mmn_mm->itree);
+ hlist_del(&mni->deferred_item);
+ }
+ spin_unlock(&mmn_mm->lock);
+
+ wake_up_all(&mmn_mm->wq);
+}
+
+/**
+ * mmu_interval_read_begin - Begin a read side critical section against a VA
+ * range
+ * mni: The range to use
+ *
+ * mmu_iterval_read_begin()/mmu_iterval_read_retry() implement a
+ * collision-retry scheme similar to seqcount for the VA range under mni. If
+ * the mm invokes invalidation during the critical section then
+ * mmu_interval_read_retry() will return true.
+ *
+ * This is useful to obtain shadow PTEs where teardown or setup of the SPTEs
+ * require a blocking context. The critical region formed by this can sleep,
+ * and the required 'user_lock' can also be a sleeping lock.
+ *
+ * The caller is required to provide a 'user_lock' to serialize both teardown
+ * and setup.
+ *
+ * The return value should be passed to mmu_interval_read_retry().
+ */
+unsigned long mmu_interval_read_begin(struct mmu_interval_notifier *mni)
+{
+ struct mmu_notifier_mm *mmn_mm = mni->mm->mmu_notifier_mm;
+ unsigned long seq;
+ bool is_invalidating;
+
+ /*
+ * If the mni has a different seq value under the user_lock than we
+ * started with then it has collided.
+ *
+ * If the mni currently has the same seq value as the mmn_mm seq, then
+ * it is currently between invalidate_start/end and is colliding.
+ *
+ * The locking looks broadly like this:
+ * mn_tree_invalidate_start(): mmu_interval_read_begin():
+ * spin_lock
+ * seq = READ_ONCE(mni->invalidate_seq);
+ * seq == mmn_mm->invalidate_seq
+ * spin_unlock
+ * spin_lock
+ * seq = ++mmn_mm->invalidate_seq
+ * spin_unlock
+ * op->invalidate_range():
+ * user_lock
+ * mmu_interval_set_seq()
+ * mni->invalidate_seq = seq
+ * user_unlock
+ *
+ * [Required: mmu_interval_read_retry() == true]
+ *
+ * mn_itree_inv_end():
+ * spin_lock
+ * seq = ++mmn_mm->invalidate_seq
+ * spin_unlock
+ *
+ * user_lock
+ * mmu_interval_read_retry():
+ * mni->invalidate_seq != seq
+ * user_unlock
+ *
+ * Barriers are not needed here as any races here are closed by an
+ * eventual mmu_interval_read_retry(), which provides a barrier via the
+ * user_lock.
+ */
+ spin_lock(&mmn_mm->lock);
+ /* Pairs with the WRITE_ONCE in mmu_interval_set_seq() */
+ seq = READ_ONCE(mni->invalidate_seq);
+ is_invalidating = seq == mmn_mm->invalidate_seq;
+ spin_unlock(&mmn_mm->lock);
+
+ /*
+ * mni->invalidate_seq must always be set to an odd value via
+ * mmu_interval_set_seq() using the provided cur_seq from
+ * mn_itree_inv_start_range(). This ensures that if seq does wrap we
+ * will always clear the below sleep in some reasonable time as
+ * mmn_mm->invalidate_seq is even in the idle state.
+ */
+ lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
+ lock_map_release(&__mmu_notifier_invalidate_range_start_map);
+ if (is_invalidating)
+ wait_event(mmn_mm->wq,
+ READ_ONCE(mmn_mm->invalidate_seq) != seq);
+
+ /*
+ * Notice that mmu_interval_read_retry() can already be true at this
+ * point, avoiding loops here allows the caller to provide a global
+ * time bound.
+ */
+
+ return seq;
+}
+EXPORT_SYMBOL_GPL(mmu_interval_read_begin);
+
+static void mn_itree_release(struct mmu_notifier_mm *mmn_mm,
+ struct mm_struct *mm)
+{
+ struct mmu_notifier_range range = {
+ .flags = MMU_NOTIFIER_RANGE_BLOCKABLE,
+ .event = MMU_NOTIFY_RELEASE,
+ .mm = mm,
+ .start = 0,
+ .end = ULONG_MAX,
+ };
+ struct mmu_interval_notifier *mni;
+ unsigned long cur_seq;
+ bool ret;
+
+ for (mni = mn_itree_inv_start_range(mmn_mm, &range, &cur_seq); mni;
+ mni = mn_itree_inv_next(mni, &range)) {
+ ret = mni->ops->invalidate(mni, &range, cur_seq);
+ WARN_ON(!ret);
+ }
+
+ mn_itree_inv_end(mmn_mm);
+}
+
+/*
* This function can't run concurrently against mmu_notifier_register
* because mm->mm_users > 0 during mmu_notifier_register and exit_mmap
* runs with mm_users == 0. Other tasks may still invoke mmu notifiers
@@ -39,7 +288,8 @@ struct lockdep_map __mmu_notifier_invalidate_range_start_map = {
* can't go away from under us as exit_mmap holds an mm_count pin
* itself.
*/
-void __mmu_notifier_release(struct mm_struct *mm)
+static void mn_hlist_release(struct mmu_notifier_mm *mmn_mm,
+ struct mm_struct *mm)
{
struct mmu_notifier *mn;
int id;
@@ -49,7 +299,7 @@ void __mmu_notifier_release(struct mm_struct *mm)
* ->release returns.
*/
id = srcu_read_lock(&srcu);
- hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist)
+ hlist_for_each_entry_rcu(mn, &mmn_mm->list, hlist)
/*
* If ->release runs before mmu_notifier_unregister it must be
* handled, as it's the only way for the driver to flush all
@@ -59,10 +309,9 @@ void __mmu_notifier_release(struct mm_struct *mm)
if (mn->ops->release)
mn->ops->release(mn, mm);
- spin_lock(&mm->mmu_notifier_mm->lock);
- while (unlikely(!hlist_empty(&mm->mmu_notifier_mm->list))) {
- mn = hlist_entry(mm->mmu_notifier_mm->list.first,
- struct mmu_notifier,
+ spin_lock(&mmn_mm->lock);
+ while (unlikely(!hlist_empty(&mmn_mm->list))) {
+ mn = hlist_entry(mmn_mm->list.first, struct mmu_notifier,
hlist);
/*
* We arrived before mmu_notifier_unregister so
@@ -72,7 +321,7 @@ void __mmu_notifier_release(struct mm_struct *mm)
*/
hlist_del_init_rcu(&mn->hlist);
}
- spin_unlock(&mm->mmu_notifier_mm->lock);
+ spin_unlock(&mmn_mm->lock);
srcu_read_unlock(&srcu, id);
/*
@@ -87,6 +336,17 @@ void __mmu_notifier_release(struct mm_struct *mm)
synchronize_srcu(&srcu);
}
+void __mmu_notifier_release(struct mm_struct *mm)
+{
+ struct mmu_notifier_mm *mmn_mm = mm->mmu_notifier_mm;
+
+ if (mmn_mm->has_itree)
+ mn_itree_release(mmn_mm, mm);
+
+ if (!hlist_empty(&mmn_mm->list))
+ mn_hlist_release(mmn_mm, mm);
+}
+
/*
* If no young bitflag is supported by the hardware, ->clear_flush_young can
* unmap the address and return 1 or 0 depending if the mapping previously
@@ -159,14 +419,43 @@ void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address,
srcu_read_unlock(&srcu, id);
}
-int __mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
+static int mn_itree_invalidate(struct mmu_notifier_mm *mmn_mm,
+ const struct mmu_notifier_range *range)
+{
+ struct mmu_interval_notifier *mni;
+ unsigned long cur_seq;
+
+ for (mni = mn_itree_inv_start_range(mmn_mm, range, &cur_seq); mni;
+ mni = mn_itree_inv_next(mni, range)) {
+ bool ret;
+
+ ret = mni->ops->invalidate(mni, range, cur_seq);
+ if (!ret) {
+ if (WARN_ON(mmu_notifier_range_blockable(range)))
+ continue;
+ goto out_would_block;
+ }
+ }
+ return 0;
+
+out_would_block:
+ /*
+ * On -EAGAIN the non-blocking caller is not allowed to call
+ * invalidate_range_end()
+ */
+ mn_itree_inv_end(mmn_mm);
+ return -EAGAIN;
+}
+
+static int mn_hlist_invalidate_range_start(struct mmu_notifier_mm *mmn_mm,
+ struct mmu_notifier_range *range)
{
struct mmu_notifier *mn;
int ret = 0;
int id;
id = srcu_read_lock(&srcu);
- hlist_for_each_entry_rcu(mn, &range->mm->mmu_notifier_mm->list, hlist) {
+ hlist_for_each_entry_rcu(mn, &mmn_mm->list, hlist) {
if (mn->ops->invalidate_range_start) {
int _ret;
@@ -190,15 +479,30 @@ int __mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
return ret;
}
-void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range,
- bool only_end)
+int __mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
+{
+ struct mmu_notifier_mm *mmn_mm = range->mm->mmu_notifier_mm;
+ int ret;
+
+ if (mmn_mm->has_itree) {
+ ret = mn_itree_invalidate(mmn_mm, range);
+ if (ret)
+ return ret;
+ }
+ if (!hlist_empty(&mmn_mm->list))
+ return mn_hlist_invalidate_range_start(mmn_mm, range);
+ return 0;
+}
+
+static void mn_hlist_invalidate_end(struct mmu_notifier_mm *mmn_mm,
+ struct mmu_notifier_range *range,
+ bool only_end)
{
struct mmu_notifier *mn;
int id;
- lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
id = srcu_read_lock(&srcu);
- hlist_for_each_entry_rcu(mn, &range->mm->mmu_notifier_mm->list, hlist) {
+ hlist_for_each_entry_rcu(mn, &mmn_mm->list, hlist) {
/*
* Call invalidate_range here too to avoid the need for the
* subsystem of having to register an invalidate_range_end
@@ -225,6 +529,19 @@ void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range,
}
}
srcu_read_unlock(&srcu, id);
+}
+
+void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range,
+ bool only_end)
+{
+ struct mmu_notifier_mm *mmn_mm = range->mm->mmu_notifier_mm;
+
+ lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
+ if (mmn_mm->has_itree)
+ mn_itree_inv_end(mmn_mm);
+
+ if (!hlist_empty(&mmn_mm->list))
+ mn_hlist_invalidate_end(mmn_mm, range, only_end);
lock_map_release(&__mmu_notifier_invalidate_range_start_map);
}
@@ -243,8 +560,9 @@ void __mmu_notifier_invalidate_range(struct mm_struct *mm,
}
/*
- * Same as mmu_notifier_register but here the caller must hold the
- * mmap_sem in write mode.
+ * Same as mmu_notifier_register but here the caller must hold the mmap_sem in
+ * write mode. A NULL mn signals the notifier is being registered for itree
+ * mode.
*/
int __mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm)
{
@@ -261,9 +579,6 @@ int __mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm)
fs_reclaim_release(GFP_KERNEL);
}
- mn->mm = mm;
- mn->users = 1;
-
if (!mm->mmu_notifier_mm) {
/*
* kmalloc cannot be called under mm_take_all_locks(), but we
@@ -271,21 +586,22 @@ int __mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm)
* the write side of the mmap_sem.
*/
mmu_notifier_mm =
- kmalloc(sizeof(struct mmu_notifier_mm), GFP_KERNEL);
+ kzalloc(sizeof(struct mmu_notifier_mm), GFP_KERNEL);
if (!mmu_notifier_mm)
return -ENOMEM;
INIT_HLIST_HEAD(&mmu_notifier_mm->list);
spin_lock_init(&mmu_notifier_mm->lock);
+ mmu_notifier_mm->invalidate_seq = 2;
+ mmu_notifier_mm->itree = RB_ROOT_CACHED;
+ init_waitqueue_head(&mmu_notifier_mm->wq);
+ INIT_HLIST_HEAD(&mmu_notifier_mm->deferred_list);
}
ret = mm_take_all_locks(mm);
if (unlikely(ret))
goto out_clean;
- /* Pairs with the mmdrop in mmu_notifier_unregister_* */
- mmgrab(mm);
-
/*
* Serialize the update against mmu_notifier_unregister. A
* side note: mmu_notifier_release can't run concurrently with
@@ -293,13 +609,28 @@ int __mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm)
* current->mm or explicitly with get_task_mm() or similar).
* We can't race against any other mmu notifier method either
* thanks to mm_take_all_locks().
+ *
+ * release semantics on the initialization of the mmu_notifier_mm's
+ * contents are provided for unlocked readers. acquire can only be
+ * used while holding the mmgrab or mmget, and is safe because once
+ * created the mmu_notififer_mm is not freed until the mm is
+ * destroyed. As above, users holding the mmap_sem or one of the
+ * mm_take_all_locks() do not need to use acquire semantics.
*/
if (mmu_notifier_mm)
- mm->mmu_notifier_mm = mmu_notifier_mm;
+ smp_store_release(&mm->mmu_notifier_mm, mmu_notifier_mm);
- spin_lock(&mm->mmu_notifier_mm->lock);
- hlist_add_head_rcu(&mn->hlist, &mm->mmu_notifier_mm->list);
- spin_unlock(&mm->mmu_notifier_mm->lock);
+ if (mn) {
+ /* Pairs with the mmdrop in mmu_notifier_unregister_* */
+ mmgrab(mm);
+ mn->mm = mm;
+ mn->users = 1;
+
+ spin_lock(&mm->mmu_notifier_mm->lock);
+ hlist_add_head_rcu(&mn->hlist, &mm->mmu_notifier_mm->list);
+ spin_unlock(&mm->mmu_notifier_mm->lock);
+ } else
+ mm->mmu_notifier_mm->has_itree = true;
mm_drop_all_locks(mm);
BUG_ON(atomic_read(&mm->mm_users) <= 0);
@@ -516,6 +847,180 @@ out_unlock:
}
EXPORT_SYMBOL_GPL(mmu_notifier_put);
+static int __mmu_interval_notifier_insert(
+ struct mmu_interval_notifier *mni, struct mm_struct *mm,
+ struct mmu_notifier_mm *mmn_mm, unsigned long start,
+ unsigned long length, const struct mmu_interval_notifier_ops *ops)
+{
+ mni->mm = mm;
+ mni->ops = ops;
+ RB_CLEAR_NODE(&mni->interval_tree.rb);
+ mni->interval_tree.start = start;
+ /*
+ * Note that the representation of the intervals in the interval tree
+ * considers the ending point as contained in the interval.
+ */
+ if (length == 0 ||
+ check_add_overflow(start, length - 1, &mni->interval_tree.last))
+ return -EOVERFLOW;
+
+ /* Must call with a mmget() held */
+ if (WARN_ON(atomic_read(&mm->mm_count) <= 0))
+ return -EINVAL;
+
+ /* pairs with mmdrop in mmu_interval_notifier_remove() */
+ mmgrab(mm);
+
+ /*
+ * If some invalidate_range_start/end region is going on in parallel
+ * we don't know what VA ranges are affected, so we must assume this
+ * new range is included.
+ *
+ * If the itree is invalidating then we are not allowed to change
+ * it. Retrying until invalidation is done is tricky due to the
+ * possibility for live lock, instead defer the add to
+ * mn_itree_inv_end() so this algorithm is deterministic.
+ *
+ * In all cases the value for the mni->invalidate_seq should be
+ * odd, see mmu_interval_read_begin()
+ */
+ spin_lock(&mmn_mm->lock);
+ if (mmn_mm->active_invalidate_ranges) {
+ if (mn_itree_is_invalidating(mmn_mm))
+ hlist_add_head(&mni->deferred_item,
+ &mmn_mm->deferred_list);
+ else {
+ mmn_mm->invalidate_seq |= 1;
+ interval_tree_insert(&mni->interval_tree,
+ &mmn_mm->itree);
+ }
+ mni->invalidate_seq = mmn_mm->invalidate_seq;
+ } else {
+ WARN_ON(mn_itree_is_invalidating(mmn_mm));
+ /*
+ * The starting seq for a mni not under invalidation should be
+ * odd, not equal to the current invalidate_seq and
+ * invalidate_seq should not 'wrap' to the new seq any time
+ * soon.
+ */
+ mni->invalidate_seq = mmn_mm->invalidate_seq - 1;
+ interval_tree_insert(&mni->interval_tree, &mmn_mm->itree);
+ }
+ spin_unlock(&mmn_mm->lock);
+ return 0;
+}
+
+/**
+ * mmu_interval_notifier_insert - Insert an interval notifier
+ * @mni: Interval notifier to register
+ * @start: Starting virtual address to monitor
+ * @length: Length of the range to monitor
+ * @mm : mm_struct to attach to
+ *
+ * This function subscribes the interval notifier for notifications from the
+ * mm. Upon return the ops related to mmu_interval_notifier will be called
+ * whenever an event that intersects with the given range occurs.
+ *
+ * Upon return the range_notifier may not be present in the interval tree yet.
+ * The caller must use the normal interval notifier read flow via
+ * mmu_interval_read_begin() to establish SPTEs for this range.
+ */
+int mmu_interval_notifier_insert(struct mmu_interval_notifier *mni,
+ struct mm_struct *mm, unsigned long start,
+ unsigned long length,
+ const struct mmu_interval_notifier_ops *ops)
+{
+ struct mmu_notifier_mm *mmn_mm;
+ int ret;
+
+ might_lock(&mm->mmap_sem);
+
+ mmn_mm = smp_load_acquire(&mm->mmu_notifier_mm);
+ if (!mmn_mm || !mmn_mm->has_itree) {
+ ret = mmu_notifier_register(NULL, mm);
+ if (ret)
+ return ret;
+ mmn_mm = mm->mmu_notifier_mm;
+ }
+ return __mmu_interval_notifier_insert(mni, mm, mmn_mm, start, length,
+ ops);
+}
+EXPORT_SYMBOL_GPL(mmu_interval_notifier_insert);
+
+int mmu_interval_notifier_insert_locked(
+ struct mmu_interval_notifier *mni, struct mm_struct *mm,
+ unsigned long start, unsigned long length,
+ const struct mmu_interval_notifier_ops *ops)
+{
+ struct mmu_notifier_mm *mmn_mm;
+ int ret;
+
+ lockdep_assert_held_write(&mm->mmap_sem);
+
+ mmn_mm = mm->mmu_notifier_mm;
+ if (!mmn_mm || !mmn_mm->has_itree) {
+ ret = __mmu_notifier_register(NULL, mm);
+ if (ret)
+ return ret;
+ mmn_mm = mm->mmu_notifier_mm;
+ }
+ return __mmu_interval_notifier_insert(mni, mm, mmn_mm, start, length,
+ ops);
+}
+EXPORT_SYMBOL_GPL(mmu_interval_notifier_insert_locked);
+
+/**
+ * mmu_interval_notifier_remove - Remove a interval notifier
+ * @mni: Interval notifier to unregister
+ *
+ * This function must be paired with mmu_interval_notifier_insert(). It cannot
+ * be called from any ops callback.
+ *
+ * Once this returns ops callbacks are no longer running on other CPUs and
+ * will not be called in future.
+ */
+void mmu_interval_notifier_remove(struct mmu_interval_notifier *mni)
+{
+ struct mm_struct *mm = mni->mm;
+ struct mmu_notifier_mm *mmn_mm = mm->mmu_notifier_mm;
+ unsigned long seq = 0;
+
+ might_sleep();
+
+ spin_lock(&mmn_mm->lock);
+ if (mn_itree_is_invalidating(mmn_mm)) {
+ /*
+ * remove is being called after insert put this on the
+ * deferred list, but before the deferred list was processed.
+ */
+ if (RB_EMPTY_NODE(&mni->interval_tree.rb)) {
+ hlist_del(&mni->deferred_item);
+ } else {
+ hlist_add_head(&mni->deferred_item,
+ &mmn_mm->deferred_list);
+ seq = mmn_mm->invalidate_seq;
+ }
+ } else {
+ WARN_ON(RB_EMPTY_NODE(&mni->interval_tree.rb));
+ interval_tree_remove(&mni->interval_tree, &mmn_mm->itree);
+ }
+ spin_unlock(&mmn_mm->lock);
+
+ /*
+ * The possible sleep on progress in the invalidation requires the
+ * caller not hold any locks held by invalidation callbacks.
+ */
+ lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
+ lock_map_release(&__mmu_notifier_invalidate_range_start_map);
+ if (seq)
+ wait_event(mmn_mm->wq,
+ READ_ONCE(mmn_mm->invalidate_seq) != seq);
+
+ /* pairs with mmgrab in mmu_interval_notifier_insert() */
+ mmdrop(mm);
+}
+EXPORT_SYMBOL_GPL(mmu_interval_notifier_remove);
+
/**
* mmu_notifier_synchronize - Ensure all mmu_notifiers are freed
*
diff --git a/mm/mprotect.c b/mm/mprotect.c
index 7967825f6d33..7a8e84f86831 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -80,6 +80,10 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
if (prot_numa) {
struct page *page;
+ /* Avoid TLB flush if possible */
+ if (pte_protnone(oldpte))
+ continue;
+
page = vm_normal_page(vma, addr, oldpte);
if (!page || PageKsm(page))
continue;
@@ -97,10 +101,6 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
if (page_is_file_cache(page) && PageDirty(page))
continue;
- /* Avoid TLB flush if possible */
- if (pte_protnone(oldpte))
- continue;
-
/*
* Don't mess with PTEs if page is already on the node
* a single-threaded process is running on.
diff --git a/mm/mremap.c b/mm/mremap.c
index 1fc8a29fbe3f..122938dcec15 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -558,7 +558,7 @@ static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
ret = get_unmapped_area(vma->vm_file, new_addr, new_len, vma->vm_pgoff +
((addr - vma->vm_start) >> PAGE_SHIFT),
map_flags);
- if (offset_in_page(ret))
+ if (IS_ERR_VALUE(ret))
goto out1;
ret = move_vma(vma, addr, old_len, new_len, new_addr, locked, uf,
@@ -706,7 +706,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
vma->vm_pgoff +
((addr - vma->vm_start) >> PAGE_SHIFT),
map_flags);
- if (offset_in_page(new_addr)) {
+ if (IS_ERR_VALUE(new_addr)) {
ret = new_addr;
goto out;
}
diff --git a/mm/nommu.c b/mm/nommu.c
index 7de592058ab4..bd2b4e5ef144 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -648,7 +648,7 @@ static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma)
if (rb_prev)
prev = rb_entry(rb_prev, struct vm_area_struct, vm_rb);
- __vma_link_list(mm, vma, prev, parent);
+ __vma_link_list(mm, vma, prev);
}
/*
@@ -684,13 +684,7 @@ static void delete_vma_from_mm(struct vm_area_struct *vma)
/* remove from the MM's tree and list */
rb_erase(&vma->vm_rb, &mm->mm_rb);
- if (vma->vm_prev)
- vma->vm_prev->vm_next = vma->vm_next;
- else
- mm->mmap = vma->vm_next;
-
- if (vma->vm_next)
- vma->vm_next->vm_prev = vma->vm_prev;
+ __vma_unlink_list(mm, vma);
}
/*
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index f391c0c4ed1d..4785a8a2040e 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -5354,6 +5354,7 @@ void show_free_areas(unsigned int filter, nodemask_t *nodemask)
" min:%lukB"
" low:%lukB"
" high:%lukB"
+ " reserved_highatomic:%luKB"
" active_anon:%lukB"
" inactive_anon:%lukB"
" active_file:%lukB"
@@ -5375,6 +5376,7 @@ void show_free_areas(unsigned int filter, nodemask_t *nodemask)
K(min_wmark_pages(zone)),
K(low_wmark_pages(zone)),
K(high_wmark_pages(zone)),
+ K(zone->nr_reserved_highatomic),
K(zone_page_state(zone, NR_ZONE_ACTIVE_ANON)),
K(zone_page_state(zone, NR_ZONE_INACTIVE_ANON)),
K(zone_page_state(zone, NR_ZONE_ACTIVE_FILE)),
@@ -6711,7 +6713,7 @@ static void __meminit pgdat_init_internals(struct pglist_data *pgdat)
pgdat_page_ext_init(pgdat);
spin_lock_init(&pgdat->lru_lock);
- lruvec_init(node_lruvec(pgdat));
+ lruvec_init(&pgdat->__lruvec);
}
static void __meminit zone_init_internals(struct zone *zone, enum zone_type idx, int nid,
@@ -7988,6 +7990,15 @@ int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *table, int write,
return 0;
}
+static void __zone_pcp_update(struct zone *zone)
+{
+ unsigned int cpu;
+
+ for_each_possible_cpu(cpu)
+ pageset_set_high_and_batch(zone,
+ per_cpu_ptr(zone->pageset, cpu));
+}
+
/*
* percpu_pagelist_fraction - changes the pcp->high for each zone on each
* cpu. It is the fraction of total pages in each zone that a hot per cpu
@@ -8019,13 +8030,8 @@ int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *table, int write,
if (percpu_pagelist_fraction == old_percpu_pagelist_fraction)
goto out;
- for_each_populated_zone(zone) {
- unsigned int cpu;
-
- for_each_possible_cpu(cpu)
- pageset_set_high_and_batch(zone,
- per_cpu_ptr(zone->pageset, cpu));
- }
+ for_each_populated_zone(zone)
+ __zone_pcp_update(zone);
out:
mutex_unlock(&pcp_batch_high_lock);
return ret;
@@ -8261,7 +8267,7 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
* The HWPoisoned page may be not in buddy system, and
* page_count() is not 0.
*/
- if ((flags & SKIP_HWPOISON) && PageHWPoison(page))
+ if ((flags & MEMORY_OFFLINE) && PageHWPoison(page))
continue;
if (__PageMovable(page))
@@ -8477,7 +8483,7 @@ int alloc_contig_range(unsigned long start, unsigned long end,
}
/* Make sure the range is really isolated. */
- if (test_pages_isolated(outer_start, end, false)) {
+ if (test_pages_isolated(outer_start, end, 0)) {
pr_info_ratelimited("%s: [%lx, %lx) PFNs busy\n",
__func__, outer_start, end);
ret = -EBUSY;
@@ -8502,6 +8508,107 @@ done:
pfn_max_align_up(end), migratetype);
return ret;
}
+
+static int __alloc_contig_pages(unsigned long start_pfn,
+ unsigned long nr_pages, gfp_t gfp_mask)
+{
+ unsigned long end_pfn = start_pfn + nr_pages;
+
+ return alloc_contig_range(start_pfn, end_pfn, MIGRATE_MOVABLE,
+ gfp_mask);
+}
+
+static bool pfn_range_valid_contig(struct zone *z, unsigned long start_pfn,
+ unsigned long nr_pages)
+{
+ unsigned long i, end_pfn = start_pfn + nr_pages;
+ struct page *page;
+
+ for (i = start_pfn; i < end_pfn; i++) {
+ page = pfn_to_online_page(i);
+ if (!page)
+ return false;
+
+ if (page_zone(page) != z)
+ return false;
+
+ if (PageReserved(page))
+ return false;
+
+ if (page_count(page) > 0)
+ return false;
+
+ if (PageHuge(page))
+ return false;
+ }
+ return true;
+}
+
+static bool zone_spans_last_pfn(const struct zone *zone,
+ unsigned long start_pfn, unsigned long nr_pages)
+{
+ unsigned long last_pfn = start_pfn + nr_pages - 1;
+
+ return zone_spans_pfn(zone, last_pfn);
+}
+
+/**
+ * alloc_contig_pages() -- tries to find and allocate contiguous range of pages
+ * @nr_pages: Number of contiguous pages to allocate
+ * @gfp_mask: GFP mask to limit search and used during compaction
+ * @nid: Target node
+ * @nodemask: Mask for other possible nodes
+ *
+ * This routine is a wrapper around alloc_contig_range(). It scans over zones
+ * on an applicable zonelist to find a contiguous pfn range which can then be
+ * tried for allocation with alloc_contig_range(). This routine is intended
+ * for allocation requests which can not be fulfilled with the buddy allocator.
+ *
+ * The allocated memory is always aligned to a page boundary. If nr_pages is a
+ * power of two then the alignment is guaranteed to be to the given nr_pages
+ * (e.g. 1GB request would be aligned to 1GB).
+ *
+ * Allocated pages can be freed with free_contig_range() or by manually calling
+ * __free_page() on each allocated page.
+ *
+ * Return: pointer to contiguous pages on success, or NULL if not successful.
+ */
+struct page *alloc_contig_pages(unsigned long nr_pages, gfp_t gfp_mask,
+ int nid, nodemask_t *nodemask)
+{
+ unsigned long ret, pfn, flags;
+ struct zonelist *zonelist;
+ struct zone *zone;
+ struct zoneref *z;
+
+ zonelist = node_zonelist(nid, gfp_mask);
+ for_each_zone_zonelist_nodemask(zone, z, zonelist,
+ gfp_zone(gfp_mask), nodemask) {
+ spin_lock_irqsave(&zone->lock, flags);
+
+ pfn = ALIGN(zone->zone_start_pfn, nr_pages);
+ while (zone_spans_last_pfn(zone, pfn, nr_pages)) {
+ if (pfn_range_valid_contig(zone, pfn, nr_pages)) {
+ /*
+ * We release the zone lock here because
+ * alloc_contig_range() will also lock the zone
+ * at some point. If there's an allocation
+ * spinning on this lock, it may win the race
+ * and cause alloc_contig_range() to fail...
+ */
+ spin_unlock_irqrestore(&zone->lock, flags);
+ ret = __alloc_contig_pages(pfn, nr_pages,
+ gfp_mask);
+ if (!ret)
+ return pfn_to_page(pfn);
+ spin_lock_irqsave(&zone->lock, flags);
+ }
+ pfn += nr_pages;
+ }
+ spin_unlock_irqrestore(&zone->lock, flags);
+ }
+ return NULL;
+}
#endif /* CONFIG_CONTIG_ALLOC */
void free_contig_range(unsigned long pfn, unsigned int nr_pages)
@@ -8523,11 +8630,8 @@ void free_contig_range(unsigned long pfn, unsigned int nr_pages)
*/
void __meminit zone_pcp_update(struct zone *zone)
{
- unsigned cpu;
mutex_lock(&pcp_batch_high_lock);
- for_each_possible_cpu(cpu)
- pageset_set_high_and_batch(zone,
- per_cpu_ptr(zone->pageset, cpu));
+ __zone_pcp_update(zone);
mutex_unlock(&pcp_batch_high_lock);
}
@@ -8560,7 +8664,7 @@ __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
{
struct page *page;
struct zone *zone;
- unsigned int order, i;
+ unsigned int order;
unsigned long pfn;
unsigned long flags;
unsigned long offlined_pages = 0;
@@ -8588,7 +8692,6 @@ __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
*/
if (unlikely(!PageBuddy(page) && PageHWPoison(page))) {
pfn++;
- SetPageReserved(page);
offlined_pages++;
continue;
}
@@ -8602,8 +8705,6 @@ __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
pfn, 1 << order, end_pfn);
#endif
del_page_from_free_area(page, &zone->free_area[order]);
- for (i = 0; i < (1 << order); i++)
- SetPageReserved((page+i));
pfn += (1 << order);
}
spin_unlock_irqrestore(&zone->lock, flags);
diff --git a/mm/page_io.c b/mm/page_io.c
index 60a66a58b9bf..3a198deb8bb1 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -22,6 +22,7 @@
#include <linux/writeback.h>
#include <linux/frontswap.h>
#include <linux/blkdev.h>
+#include <linux/psi.h>
#include <linux/uio.h>
#include <linux/sched/task.h>
#include <asm/pgtable.h>
@@ -354,10 +355,19 @@ int swap_readpage(struct page *page, bool synchronous)
struct swap_info_struct *sis = page_swap_info(page);
blk_qc_t qc;
struct gendisk *disk;
+ unsigned long pflags;
VM_BUG_ON_PAGE(!PageSwapCache(page) && !synchronous, page);
VM_BUG_ON_PAGE(!PageLocked(page), page);
VM_BUG_ON_PAGE(PageUptodate(page), page);
+
+ /*
+ * Count submission time as memory stall. When the device is congested,
+ * or the submitting cgroup IO-throttled, submission can be a
+ * significant part of overall IO time.
+ */
+ psi_memstall_enter(&pflags);
+
if (frontswap_load(page) == 0) {
SetPageUptodate(page);
unlock_page(page);
@@ -371,7 +381,7 @@ int swap_readpage(struct page *page, bool synchronous)
ret = mapping->a_ops->readpage(swap_file, page);
if (!ret)
count_vm_event(PSWPIN);
- return ret;
+ goto out;
}
ret = bdev_read_page(sis->bdev, swap_page_sector(page), page);
@@ -382,7 +392,7 @@ int swap_readpage(struct page *page, bool synchronous)
}
count_vm_event(PSWPIN);
- return 0;
+ goto out;
}
ret = 0;
@@ -418,6 +428,7 @@ int swap_readpage(struct page *page, bool synchronous)
bio_put(bio);
out:
+ psi_memstall_leave(&pflags);
return ret;
}
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index 89c19c0feadb..04ee1663cdbe 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -168,7 +168,8 @@ __first_valid_page(unsigned long pfn, unsigned long nr_pages)
* @migratetype: Migrate type to set in error recovery.
* @flags: The following flags are allowed (they can be combined in
* a bit mask)
- * SKIP_HWPOISON - ignore hwpoison pages
+ * MEMORY_OFFLINE - isolate to offline (!allocate) memory
+ * e.g., skip over PageHWPoison() pages
* REPORT_FAILURE - report details about the failure to
* isolate the range
*
@@ -257,7 +258,7 @@ void undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
*/
static unsigned long
__test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn,
- bool skip_hwpoisoned_pages)
+ int flags)
{
struct page *page;
@@ -274,7 +275,7 @@ __test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn,
* simple way to verify that as VM_BUG_ON(), though.
*/
pfn += 1 << page_order(page);
- else if (skip_hwpoisoned_pages && PageHWPoison(page))
+ else if ((flags & MEMORY_OFFLINE) && PageHWPoison(page))
/* A HWPoisoned page cannot be also PageBuddy */
pfn++;
else
@@ -286,7 +287,7 @@ __test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn,
/* Caller should ensure that requested range is in a single zone */
int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
- bool skip_hwpoisoned_pages)
+ int isol_flags)
{
unsigned long pfn, flags;
struct page *page;
@@ -308,8 +309,7 @@ int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
/* Check all pages are free or marked as ISOLATED */
zone = page_zone(page);
spin_lock_irqsave(&zone->lock, flags);
- pfn = __test_page_isolated_in_pageblock(start_pfn, end_pfn,
- skip_hwpoisoned_pages);
+ pfn = __test_page_isolated_in_pageblock(start_pfn, end_pfn, isol_flags);
spin_unlock_irqrestore(&zone->lock, flags);
trace_test_pages_isolated(start_pfn, end_pfn, pfn);
diff --git a/mm/pagewalk.c b/mm/pagewalk.c
index d48c2a986ea3..ea0b9e606ad1 100644
--- a/mm/pagewalk.c
+++ b/mm/pagewalk.c
@@ -10,8 +10,9 @@ static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
pte_t *pte;
int err = 0;
const struct mm_walk_ops *ops = walk->ops;
+ spinlock_t *ptl;
- pte = pte_offset_map(pmd, addr);
+ pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
for (;;) {
err = ops->pte_entry(pte, addr, addr + PAGE_SIZE, walk);
if (err)
@@ -22,7 +23,7 @@ static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
pte++;
}
- pte_unmap(pte);
+ pte_unmap_unlock(pte, ptl);
return err;
}
@@ -253,13 +254,23 @@ static int __walk_page_range(unsigned long start, unsigned long end,
{
int err = 0;
struct vm_area_struct *vma = walk->vma;
+ const struct mm_walk_ops *ops = walk->ops;
+
+ if (vma && ops->pre_vma) {
+ err = ops->pre_vma(start, end, walk);
+ if (err)
+ return err;
+ }
if (vma && is_vm_hugetlb_page(vma)) {
- if (walk->ops->hugetlb_entry)
+ if (ops->hugetlb_entry)
err = walk_hugetlb_range(start, end, walk);
} else
err = walk_pgd_range(start, end, walk);
+ if (vma && ops->post_vma)
+ ops->post_vma(walk);
+
return err;
}
@@ -290,6 +301,11 @@ static int __walk_page_range(unsigned long start, unsigned long end,
* its vm_flags. walk_page_test() and @ops->test_walk() are used for this
* purpose.
*
+ * If operations need to be staged before and committed after a vma is walked,
+ * there are two callbacks, pre_vma() and post_vma(). Note that post_vma(),
+ * since it is intended to handle commit-type operations, can't return any
+ * errors.
+ *
* struct mm_walk keeps current values of some common data like vma and pmd,
* which are useful for the access from callbacks. If you want to pass some
* caller-specific data to callbacks, @private should be helpful.
@@ -376,3 +392,80 @@ int walk_page_vma(struct vm_area_struct *vma, const struct mm_walk_ops *ops,
return err;
return __walk_page_range(vma->vm_start, vma->vm_end, &walk);
}
+
+/**
+ * walk_page_mapping - walk all memory areas mapped into a struct address_space.
+ * @mapping: Pointer to the struct address_space
+ * @first_index: First page offset in the address_space
+ * @nr: Number of incremental page offsets to cover
+ * @ops: operation to call during the walk
+ * @private: private data for callbacks' usage
+ *
+ * This function walks all memory areas mapped into a struct address_space.
+ * The walk is limited to only the given page-size index range, but if
+ * the index boundaries cross a huge page-table entry, that entry will be
+ * included.
+ *
+ * Also see walk_page_range() for additional information.
+ *
+ * Locking:
+ * This function can't require that the struct mm_struct::mmap_sem is held,
+ * since @mapping may be mapped by multiple processes. Instead
+ * @mapping->i_mmap_rwsem must be held. This might have implications in the
+ * callbacks, and it's up tho the caller to ensure that the
+ * struct mm_struct::mmap_sem is not needed.
+ *
+ * Also this means that a caller can't rely on the struct
+ * vm_area_struct::vm_flags to be constant across a call,
+ * except for immutable flags. Callers requiring this shouldn't use
+ * this function.
+ *
+ * Return: 0 on success, negative error code on failure, positive number on
+ * caller defined premature termination.
+ */
+int walk_page_mapping(struct address_space *mapping, pgoff_t first_index,
+ pgoff_t nr, const struct mm_walk_ops *ops,
+ void *private)
+{
+ struct mm_walk walk = {
+ .ops = ops,
+ .private = private,
+ };
+ struct vm_area_struct *vma;
+ pgoff_t vba, vea, cba, cea;
+ unsigned long start_addr, end_addr;
+ int err = 0;
+
+ lockdep_assert_held(&mapping->i_mmap_rwsem);
+ vma_interval_tree_foreach(vma, &mapping->i_mmap, first_index,
+ first_index + nr - 1) {
+ /* Clip to the vma */
+ vba = vma->vm_pgoff;
+ vea = vba + vma_pages(vma);
+ cba = first_index;
+ cba = max(cba, vba);
+ cea = first_index + nr;
+ cea = min(cea, vea);
+
+ start_addr = ((cba - vba) << PAGE_SHIFT) + vma->vm_start;
+ end_addr = ((cea - vba) << PAGE_SHIFT) + vma->vm_start;
+ if (start_addr >= end_addr)
+ continue;
+
+ walk.vma = vma;
+ walk.mm = vma->vm_mm;
+
+ err = walk_page_test(vma->vm_start, vma->vm_end, &walk);
+ if (err > 0) {
+ err = 0;
+ break;
+ } else if (err < 0)
+ break;
+
+ err = __walk_page_range(start_addr, end_addr, &walk);
+ if (err)
+ break;
+ }
+
+ return err;
+}
diff --git a/mm/pgtable-generic.c b/mm/pgtable-generic.c
index 532c29276fce..3d7c01e76efc 100644
--- a/mm/pgtable-generic.c
+++ b/mm/pgtable-generic.c
@@ -24,18 +24,27 @@ void pgd_clear_bad(pgd_t *pgd)
pgd_clear(pgd);
}
+#ifndef __PAGETABLE_P4D_FOLDED
void p4d_clear_bad(p4d_t *p4d)
{
p4d_ERROR(*p4d);
p4d_clear(p4d);
}
+#endif
+#ifndef __PAGETABLE_PUD_FOLDED
void pud_clear_bad(pud_t *pud)
{
pud_ERROR(*pud);
pud_clear(pud);
}
+#endif
+/*
+ * Note that the pmd variant below can't be stub'ed out just as for p4d/pud
+ * above. pmd folding is special and typically pmd_* macros refer to upper
+ * level even when folded
+ */
void pmd_clear_bad(pmd_t *pmd)
{
pmd_ERROR(*pmd);
diff --git a/mm/rmap.c b/mm/rmap.c
index 0c7b2a9400d4..b3e381919835 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -251,18 +251,37 @@ static inline void unlock_anon_vma_root(struct anon_vma *root)
* Attach the anon_vmas from src to dst.
* Returns 0 on success, -ENOMEM on failure.
*
- * If dst->anon_vma is NULL this function tries to find and reuse existing
- * anon_vma which has no vmas and only one child anon_vma. This prevents
- * degradation of anon_vma hierarchy to endless linear chain in case of
- * constantly forking task. On the other hand, an anon_vma with more than one
- * child isn't reused even if there was no alive vma, thus rmap walker has a
- * good chance of avoiding scanning the whole hierarchy when it searches where
- * page is mapped.
+ * anon_vma_clone() is called by __vma_split(), __split_vma(), copy_vma() and
+ * anon_vma_fork(). The first three want an exact copy of src, while the last
+ * one, anon_vma_fork(), may try to reuse an existing anon_vma to prevent
+ * endless growth of anon_vma. Since dst->anon_vma is set to NULL before call,
+ * we can identify this case by checking (!dst->anon_vma && src->anon_vma).
+ *
+ * If (!dst->anon_vma && src->anon_vma) is true, this function tries to find
+ * and reuse existing anon_vma which has no vmas and only one child anon_vma.
+ * This prevents degradation of anon_vma hierarchy to endless linear chain in
+ * case of constantly forking task. On the other hand, an anon_vma with more
+ * than one child isn't reused even if there was no alive vma, thus rmap
+ * walker has a good chance of avoiding scanning the whole hierarchy when it
+ * searches where page is mapped.
*/
int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
{
struct anon_vma_chain *avc, *pavc;
struct anon_vma *root = NULL;
+ struct vm_area_struct *prev = dst->vm_prev, *pprev = src->vm_prev;
+
+ /*
+ * If parent share anon_vma with its vm_prev, keep this sharing in in
+ * child.
+ *
+ * 1. Parent has vm_prev, which implies we have vm_prev.
+ * 2. Parent and its vm_prev have the same anon_vma.
+ */
+ if (!dst->anon_vma && src->anon_vma &&
+ pprev && pprev->anon_vma == src->anon_vma)
+ dst->anon_vma = prev->anon_vma;
+
list_for_each_entry_reverse(pavc, &src->anon_vma_chain, same_vma) {
struct anon_vma *anon_vma;
@@ -287,8 +306,8 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
* will always reuse it. Root anon_vma is never reused:
* it has self-parent reference and at least one child.
*/
- if (!dst->anon_vma && anon_vma != src->anon_vma &&
- anon_vma->degree < 2)
+ if (!dst->anon_vma && src->anon_vma &&
+ anon_vma != src->anon_vma && anon_vma->degree < 2)
dst->anon_vma = anon_vma;
}
if (dst->anon_vma)
@@ -458,9 +477,10 @@ void __init anon_vma_init(void)
* chain and verify that the page in question is indeed mapped in it
* [ something equivalent to page_mapped_in_vma() ].
*
- * Since anon_vma's slab is DESTROY_BY_RCU and we know from page_remove_rmap()
- * that the anon_vma pointer from page->mapping is valid if there is a
- * mapcount, we can dereference the anon_vma after observing those.
+ * Since anon_vma's slab is SLAB_TYPESAFE_BY_RCU and we know from
+ * page_remove_rmap() that the anon_vma pointer from page->mapping is valid
+ * if there is a mapcount, we can dereference the anon_vma after observing
+ * those.
*/
struct anon_vma *page_get_anon_vma(struct page *page)
{
@@ -1055,7 +1075,6 @@ static void __page_set_anon_rmap(struct page *page,
static void __page_check_anon_rmap(struct page *page,
struct vm_area_struct *vma, unsigned long address)
{
-#ifdef CONFIG_DEBUG_VM
/*
* The page's anon-rmap details (mapping and index) are guaranteed to
* be set up correctly at this point.
@@ -1068,9 +1087,9 @@ static void __page_check_anon_rmap(struct page *page,
* are initially only visible via the pagetables, and the pte is locked
* over the call to page_add_new_anon_rmap.
*/
- BUG_ON(page_anon_vma(page)->root != vma->anon_vma->root);
- BUG_ON(page_to_pgoff(page) != linear_page_index(vma, address));
-#endif
+ VM_BUG_ON_PAGE(page_anon_vma(page)->root != vma->anon_vma->root, page);
+ VM_BUG_ON_PAGE(page_to_pgoff(page) != linear_page_index(vma, address),
+ page);
}
/**
@@ -1273,12 +1292,20 @@ static void page_remove_anon_compound_rmap(struct page *page)
if (TestClearPageDoubleMap(page)) {
/*
* Subpages can be mapped with PTEs too. Check how many of
- * themi are still mapped.
+ * them are still mapped.
*/
for (i = 0, nr = 0; i < HPAGE_PMD_NR; i++) {
if (atomic_add_negative(-1, &page[i]._mapcount))
nr++;
}
+
+ /*
+ * Queue the page for deferred split if at least one small
+ * page of the compound page is unmapped, but at least one
+ * small page is still mapped.
+ */
+ if (nr && nr < HPAGE_PMD_NR)
+ deferred_split_huge_page(page);
} else {
nr = HPAGE_PMD_NR;
}
@@ -1286,10 +1313,8 @@ static void page_remove_anon_compound_rmap(struct page *page)
if (unlikely(PageMlocked(page)))
clear_page_mlock(page);
- if (nr) {
+ if (nr)
__mod_node_page_state(page_pgdat(page), NR_ANON_MAPPED, -nr);
- deferred_split_huge_page(page);
- }
}
/**
diff --git a/mm/shmem.c b/mm/shmem.c
index 220be9fa2c41..165fa6332993 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1369,7 +1369,8 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
if (list_empty(&info->swaplist))
list_add(&info->swaplist, &shmem_swaplist);
- if (add_to_swap_cache(page, swap, GFP_ATOMIC) == 0) {
+ if (add_to_swap_cache(page, swap,
+ __GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN) == 0) {
spin_lock_irq(&info->lock);
shmem_recalc_inode(inode);
info->swapped++;
@@ -2022,16 +2023,14 @@ static vm_fault_t shmem_fault(struct vm_fault *vmf)
shmem_falloc->waitq &&
vmf->pgoff >= shmem_falloc->start &&
vmf->pgoff < shmem_falloc->next) {
+ struct file *fpin;
wait_queue_head_t *shmem_falloc_waitq;
DEFINE_WAIT_FUNC(shmem_fault_wait, synchronous_wake_function);
ret = VM_FAULT_NOPAGE;
- if ((vmf->flags & FAULT_FLAG_ALLOW_RETRY) &&
- !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
- /* It's polite to up mmap_sem if we can */
- up_read(&vma->vm_mm->mmap_sem);
+ fpin = maybe_unlock_mmap_for_io(vmf, NULL);
+ if (fpin)
ret = VM_FAULT_RETRY;
- }
shmem_falloc_waitq = shmem_falloc->waitq;
prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait,
@@ -2049,6 +2048,9 @@ static vm_fault_t shmem_fault(struct vm_fault *vmf)
spin_lock(&inode->i_lock);
finish_wait(shmem_falloc_waitq, &shmem_fault_wait);
spin_unlock(&inode->i_lock);
+
+ if (fpin)
+ fput(fpin);
return ret;
}
spin_unlock(&inode->i_lock);
@@ -2213,11 +2215,14 @@ static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
return -EPERM;
/*
- * Since the F_SEAL_FUTURE_WRITE seals allow for a MAP_SHARED
- * read-only mapping, take care to not allow mprotect to revert
- * protections.
+ * Since an F_SEAL_FUTURE_WRITE sealed memfd can be mapped as
+ * MAP_SHARED and read-only, take care to not allow mprotect to
+ * revert protections on such mappings. Do this only for shared
+ * mappings. For private mappings, don't need to mask
+ * VM_MAYWRITE as we still want them to be COW-writable.
*/
- vma->vm_flags &= ~(VM_MAYWRITE);
+ if (vma->vm_flags & VM_SHARED)
+ vma->vm_flags &= ~(VM_MAYWRITE);
}
file_accessed(file);
@@ -2742,7 +2747,7 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
}
shmem_falloc.waitq = &shmem_falloc_waitq;
- shmem_falloc.start = unmap_start >> PAGE_SHIFT;
+ shmem_falloc.start = (u64)unmap_start >> PAGE_SHIFT;
shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT;
spin_lock(&inode->i_lock);
inode->i_private = &shmem_falloc;
@@ -3928,7 +3933,7 @@ out2:
static ssize_t shmem_enabled_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
- int values[] = {
+ static const int values[] = {
SHMEM_HUGE_ALWAYS,
SHMEM_HUGE_WITHIN_SIZE,
SHMEM_HUGE_ADVISE,
diff --git a/mm/slab.c b/mm/slab.c
index 66e5d8032bae..f1e1840af533 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1247,9 +1247,10 @@ void __init kmem_cache_init(void)
* structures first. Without this, further allocations will bug.
*/
kmalloc_caches[KMALLOC_NORMAL][INDEX_NODE] = create_kmalloc_cache(
- kmalloc_info[INDEX_NODE].name,
- kmalloc_size(INDEX_NODE), ARCH_KMALLOC_FLAGS,
- 0, kmalloc_size(INDEX_NODE));
+ kmalloc_info[INDEX_NODE].name[KMALLOC_NORMAL],
+ kmalloc_info[INDEX_NODE].size,
+ ARCH_KMALLOC_FLAGS, 0,
+ kmalloc_info[INDEX_NODE].size);
slab_state = PARTIAL_NODE;
setup_kmalloc_cache_index_table();
diff --git a/mm/slab.h b/mm/slab.h
index b2b01694dc43..7e94700aa78c 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -139,7 +139,7 @@ extern struct kmem_cache *kmem_cache;
/* A table of kmalloc cache names and sizes */
extern const struct kmalloc_info_struct {
- const char *name;
+ const char *name[NR_KMALLOC_TYPES];
unsigned int size;
} kmalloc_info[];
@@ -369,7 +369,7 @@ static __always_inline int memcg_charge_slab(struct page *page,
if (ret)
goto out;
- lruvec = mem_cgroup_lruvec(page_pgdat(page), memcg);
+ lruvec = mem_cgroup_lruvec(memcg, page_pgdat(page));
mod_lruvec_state(lruvec, cache_vmstat_idx(s), 1 << order);
/* transer try_charge() page references to kmem_cache */
@@ -393,7 +393,7 @@ static __always_inline void memcg_uncharge_slab(struct page *page, int order,
rcu_read_lock();
memcg = READ_ONCE(s->memcg_params.memcg);
if (likely(!mem_cgroup_is_root(memcg))) {
- lruvec = mem_cgroup_lruvec(page_pgdat(page), memcg);
+ lruvec = mem_cgroup_lruvec(memcg, page_pgdat(page));
mod_lruvec_state(lruvec, cache_vmstat_idx(s), -(1 << order));
memcg_kmem_uncharge_memcg(page, order, memcg);
} else {
diff --git a/mm/slab_common.c b/mm/slab_common.c
index f9fb27b4c843..8afa188f6e20 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -1139,26 +1139,56 @@ struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
return kmalloc_caches[kmalloc_type(flags)][index];
}
+#ifdef CONFIG_ZONE_DMA
+#define INIT_KMALLOC_INFO(__size, __short_size) \
+{ \
+ .name[KMALLOC_NORMAL] = "kmalloc-" #__short_size, \
+ .name[KMALLOC_RECLAIM] = "kmalloc-rcl-" #__short_size, \
+ .name[KMALLOC_DMA] = "dma-kmalloc-" #__short_size, \
+ .size = __size, \
+}
+#else
+#define INIT_KMALLOC_INFO(__size, __short_size) \
+{ \
+ .name[KMALLOC_NORMAL] = "kmalloc-" #__short_size, \
+ .name[KMALLOC_RECLAIM] = "kmalloc-rcl-" #__short_size, \
+ .size = __size, \
+}
+#endif
+
/*
* kmalloc_info[] is to make slub_debug=,kmalloc-xx option work at boot time.
* kmalloc_index() supports up to 2^26=64MB, so the final entry of the table is
* kmalloc-67108864.
*/
const struct kmalloc_info_struct kmalloc_info[] __initconst = {
- {NULL, 0}, {"kmalloc-96", 96},
- {"kmalloc-192", 192}, {"kmalloc-8", 8},
- {"kmalloc-16", 16}, {"kmalloc-32", 32},
- {"kmalloc-64", 64}, {"kmalloc-128", 128},
- {"kmalloc-256", 256}, {"kmalloc-512", 512},
- {"kmalloc-1k", 1024}, {"kmalloc-2k", 2048},
- {"kmalloc-4k", 4096}, {"kmalloc-8k", 8192},
- {"kmalloc-16k", 16384}, {"kmalloc-32k", 32768},
- {"kmalloc-64k", 65536}, {"kmalloc-128k", 131072},
- {"kmalloc-256k", 262144}, {"kmalloc-512k", 524288},
- {"kmalloc-1M", 1048576}, {"kmalloc-2M", 2097152},
- {"kmalloc-4M", 4194304}, {"kmalloc-8M", 8388608},
- {"kmalloc-16M", 16777216}, {"kmalloc-32M", 33554432},
- {"kmalloc-64M", 67108864}
+ INIT_KMALLOC_INFO(0, 0),
+ INIT_KMALLOC_INFO(96, 96),
+ INIT_KMALLOC_INFO(192, 192),
+ INIT_KMALLOC_INFO(8, 8),
+ INIT_KMALLOC_INFO(16, 16),
+ INIT_KMALLOC_INFO(32, 32),
+ INIT_KMALLOC_INFO(64, 64),
+ INIT_KMALLOC_INFO(128, 128),
+ INIT_KMALLOC_INFO(256, 256),
+ INIT_KMALLOC_INFO(512, 512),
+ INIT_KMALLOC_INFO(1024, 1k),
+ INIT_KMALLOC_INFO(2048, 2k),
+ INIT_KMALLOC_INFO(4096, 4k),
+ INIT_KMALLOC_INFO(8192, 8k),
+ INIT_KMALLOC_INFO(16384, 16k),
+ INIT_KMALLOC_INFO(32768, 32k),
+ INIT_KMALLOC_INFO(65536, 64k),
+ INIT_KMALLOC_INFO(131072, 128k),
+ INIT_KMALLOC_INFO(262144, 256k),
+ INIT_KMALLOC_INFO(524288, 512k),
+ INIT_KMALLOC_INFO(1048576, 1M),
+ INIT_KMALLOC_INFO(2097152, 2M),
+ INIT_KMALLOC_INFO(4194304, 4M),
+ INIT_KMALLOC_INFO(8388608, 8M),
+ INIT_KMALLOC_INFO(16777216, 16M),
+ INIT_KMALLOC_INFO(33554432, 32M),
+ INIT_KMALLOC_INFO(67108864, 64M)
};
/*
@@ -1208,36 +1238,14 @@ void __init setup_kmalloc_cache_index_table(void)
}
}
-static const char *
-kmalloc_cache_name(const char *prefix, unsigned int size)
-{
-
- static const char units[3] = "\0kM";
- int idx = 0;
-
- while (size >= 1024 && (size % 1024 == 0)) {
- size /= 1024;
- idx++;
- }
-
- return kasprintf(GFP_NOWAIT, "%s-%u%c", prefix, size, units[idx]);
-}
-
static void __init
-new_kmalloc_cache(int idx, int type, slab_flags_t flags)
+new_kmalloc_cache(int idx, enum kmalloc_cache_type type, slab_flags_t flags)
{
- const char *name;
-
- if (type == KMALLOC_RECLAIM) {
+ if (type == KMALLOC_RECLAIM)
flags |= SLAB_RECLAIM_ACCOUNT;
- name = kmalloc_cache_name("kmalloc-rcl",
- kmalloc_info[idx].size);
- BUG_ON(!name);
- } else {
- name = kmalloc_info[idx].name;
- }
- kmalloc_caches[type][idx] = create_kmalloc_cache(name,
+ kmalloc_caches[type][idx] = create_kmalloc_cache(
+ kmalloc_info[idx].name[type],
kmalloc_info[idx].size, flags, 0,
kmalloc_info[idx].size);
}
@@ -1249,7 +1257,8 @@ new_kmalloc_cache(int idx, int type, slab_flags_t flags)
*/
void __init create_kmalloc_caches(slab_flags_t flags)
{
- int i, type;
+ int i;
+ enum kmalloc_cache_type type;
for (type = KMALLOC_NORMAL; type <= KMALLOC_RECLAIM; type++) {
for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) {
@@ -1278,12 +1287,10 @@ void __init create_kmalloc_caches(slab_flags_t flags)
struct kmem_cache *s = kmalloc_caches[KMALLOC_NORMAL][i];
if (s) {
- unsigned int size = kmalloc_size(i);
- const char *n = kmalloc_cache_name("dma-kmalloc", size);
-
- BUG_ON(!n);
kmalloc_caches[KMALLOC_DMA][i] = create_kmalloc_cache(
- n, size, SLAB_CACHE_DMA | flags, 0, 0);
+ kmalloc_info[i].name[KMALLOC_DMA],
+ kmalloc_info[i].size,
+ SLAB_CACHE_DMA | flags, 0, 0);
}
}
#endif
diff --git a/mm/slub.c b/mm/slub.c
index e72e802fc569..d11389710b12 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -93,9 +93,7 @@
* minimal so we rely on the page allocators per cpu caches for
* fast frees and allocs.
*
- * Overloading of page flags that are otherwise used for LRU management.
- *
- * PageActive The slab is frozen and exempt from list processing.
+ * page->frozen The slab is frozen and exempt from list processing.
* This means that the slab is dedicated to a purpose
* such as satisfying allocations for a specific
* processor. Objects may be freed in the slab while
@@ -111,7 +109,7 @@
* free objects in addition to the regular freelist
* that requires the slab lock.
*
- * PageError Slab requires special handling due to debug
+ * SLAB_DEBUG_FLAGS Slab requires special handling due to debug
* options set. This moves slab handling out of
* the fast path and disables lockless freelists.
*/
@@ -736,6 +734,7 @@ static int check_bytes_and_report(struct kmem_cache *s, struct page *page,
{
u8 *fault;
u8 *end;
+ u8 *addr = page_address(page);
metadata_access_enable();
fault = memchr_inv(start, value, bytes);
@@ -748,8 +747,9 @@ static int check_bytes_and_report(struct kmem_cache *s, struct page *page,
end--;
slab_bug(s, "%s overwritten", what);
- pr_err("INFO: 0x%p-0x%p. First byte 0x%x instead of 0x%x\n",
- fault, end - 1, fault[0], value);
+ pr_err("INFO: 0x%p-0x%p @offset=%tu. First byte 0x%x instead of 0x%x\n",
+ fault, end - 1, fault - addr,
+ fault[0], value);
print_trailer(s, page, object);
restore_bytes(s, what, value, fault, end);
@@ -844,7 +844,8 @@ static int slab_pad_check(struct kmem_cache *s, struct page *page)
while (end > fault && end[-1] == POISON_INUSE)
end--;
- slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1);
+ slab_err(s, page, "Padding overwritten. 0x%p-0x%p @offset=%tu",
+ fault, end - 1, fault - start);
print_section(KERN_ERR, "Padding ", pad, remainder);
restore_bytes(s, "slab padding", POISON_INUSE, fault, end);
@@ -4383,31 +4384,26 @@ static int count_total(struct page *page)
#endif
#ifdef CONFIG_SLUB_DEBUG
-static int validate_slab(struct kmem_cache *s, struct page *page,
+static void validate_slab(struct kmem_cache *s, struct page *page,
unsigned long *map)
{
void *p;
void *addr = page_address(page);
- if (!check_slab(s, page) ||
- !on_freelist(s, page, NULL))
- return 0;
+ if (!check_slab(s, page) || !on_freelist(s, page, NULL))
+ return;
/* Now we know that a valid freelist exists */
bitmap_zero(map, page->objects);
get_map(s, page, map);
for_each_object(p, s, addr, page->objects) {
- if (test_bit(slab_index(p, s, addr), map))
- if (!check_object(s, page, p, SLUB_RED_INACTIVE))
- return 0;
- }
+ u8 val = test_bit(slab_index(p, s, addr), map) ?
+ SLUB_RED_INACTIVE : SLUB_RED_ACTIVE;
- for_each_object(p, s, addr, page->objects)
- if (!test_bit(slab_index(p, s, addr), map))
- if (!check_object(s, page, p, SLUB_RED_ACTIVE))
- return 0;
- return 1;
+ if (!check_object(s, page, p, val))
+ break;
+ }
}
static void validate_slab_slab(struct kmem_cache *s, struct page *page,
diff --git a/mm/sparse.c b/mm/sparse.c
index f6891c1992b1..b20ab7cdac86 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -458,8 +458,7 @@ struct page __init *__populate_section_memmap(unsigned long pfn,
if (map)
return map;
- map = memblock_alloc_try_nid(size,
- PAGE_SIZE, addr,
+ map = memblock_alloc_try_nid_raw(size, size, addr,
MEMBLOCK_ALLOC_ACCESSIBLE, nid);
if (!map)
panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%pa\n",
@@ -482,10 +481,13 @@ static void __init sparse_buffer_init(unsigned long size, int nid)
{
phys_addr_t addr = __pa(MAX_DMA_ADDRESS);
WARN_ON(sparsemap_buf); /* forgot to call sparse_buffer_fini()? */
- sparsemap_buf =
- memblock_alloc_try_nid_raw(size, PAGE_SIZE,
- addr,
- MEMBLOCK_ALLOC_ACCESSIBLE, nid);
+ /*
+ * Pre-allocated buffer is mainly used by __populate_section_memmap
+ * and we want it to be properly aligned to the section size - this is
+ * especially the case for VMEMMAP which maps memmap to PMDs
+ */
+ sparsemap_buf = memblock_alloc_exact_nid_raw(size, section_map_size(),
+ addr, MEMBLOCK_ALLOC_ACCESSIBLE, nid);
sparsemap_buf_end = sparsemap_buf + size;
}
@@ -647,7 +649,7 @@ void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn)
#endif
#ifdef CONFIG_SPARSEMEM_VMEMMAP
-static struct page *populate_section_memmap(unsigned long pfn,
+static struct page * __meminit populate_section_memmap(unsigned long pfn,
unsigned long nr_pages, int nid, struct vmem_altmap *altmap)
{
return __populate_section_memmap(pfn, nr_pages, nid, altmap);
@@ -669,7 +671,7 @@ static void free_map_bootmem(struct page *memmap)
vmemmap_free(start, end, NULL);
}
#else
-struct page *populate_section_memmap(unsigned long pfn,
+struct page * __meminit populate_section_memmap(unsigned long pfn,
unsigned long nr_pages, int nid, struct vmem_altmap *altmap)
{
struct page *page, *ret;
diff --git a/mm/swap.c b/mm/swap.c
index 38c3fa4308e2..5341ae93861f 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -373,9 +373,16 @@ static void __lru_cache_activate_page(struct page *page)
void mark_page_accessed(struct page *page)
{
page = compound_head(page);
- if (!PageActive(page) && !PageUnevictable(page) &&
- PageReferenced(page)) {
+ if (!PageReferenced(page)) {
+ SetPageReferenced(page);
+ } else if (PageUnevictable(page)) {
+ /*
+ * Unevictable pages are on the "LRU_UNEVICTABLE" list. But,
+ * this list is never rotated or maintained, so marking an
+ * evictable page accessed has no effect.
+ */
+ } else if (!PageActive(page)) {
/*
* If the page is on the LRU, queue it for activation via
* activate_page_pvecs. Otherwise, assume the page is on a
@@ -389,8 +396,6 @@ void mark_page_accessed(struct page *page)
ClearPageReferenced(page);
if (page_is_file_cache(page))
workingset_activation(page);
- } else if (!PageReferenced(page)) {
- SetPageReferenced(page);
}
if (page_is_idle(page))
clear_page_idle(page);
@@ -708,9 +713,10 @@ static void lru_add_drain_per_cpu(struct work_struct *dummy)
*/
void lru_add_drain_all(void)
{
+ static seqcount_t seqcount = SEQCNT_ZERO(seqcount);
static DEFINE_MUTEX(lock);
static struct cpumask has_work;
- int cpu;
+ int cpu, seq;
/*
* Make sure nobody triggers this path before mm_percpu_wq is fully
@@ -719,7 +725,19 @@ void lru_add_drain_all(void)
if (WARN_ON(!mm_percpu_wq))
return;
+ seq = raw_read_seqcount_latch(&seqcount);
+
mutex_lock(&lock);
+
+ /*
+ * Piggyback on drain started and finished while we waited for lock:
+ * all pages pended at the time of our enter were drained from vectors.
+ */
+ if (__read_seqcount_retry(&seqcount, seq))
+ goto done;
+
+ raw_write_seqcount_latch(&seqcount);
+
cpumask_clear(&has_work);
for_each_online_cpu(cpu) {
@@ -740,6 +758,7 @@ void lru_add_drain_all(void)
for_each_cpu(cpu, &has_work)
flush_work(&per_cpu(lru_add_drain_work, cpu));
+done:
mutex_unlock(&lock);
}
#else
diff --git a/mm/swapfile.c b/mm/swapfile.c
index dab43523afdd..bb3261d45b6a 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -2887,6 +2887,13 @@ static int claim_swapfile(struct swap_info_struct *p, struct inode *inode)
error = set_blocksize(p->bdev, PAGE_SIZE);
if (error < 0)
return error;
+ /*
+ * Zoned block devices contain zones that have a sequential
+ * write only restriction. Hence zoned block devices are not
+ * suitable for swapping. Disallow them here.
+ */
+ if (blk_queue_is_zoned(p->bdev->bd_queue))
+ return -EINVAL;
p->flags |= SWP_BLKDEV;
} else if (S_ISREG(inode->i_mode)) {
p->bdev = inode->i_sb->s_bdev;
diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
index c7ae74ce5ff3..1b0d7abad1d4 100644
--- a/mm/userfaultfd.c
+++ b/mm/userfaultfd.c
@@ -18,6 +18,36 @@
#include <asm/tlbflush.h>
#include "internal.h"
+static __always_inline
+struct vm_area_struct *find_dst_vma(struct mm_struct *dst_mm,
+ unsigned long dst_start,
+ unsigned long len)
+{
+ /*
+ * Make sure that the dst range is both valid and fully within a
+ * single existing vma.
+ */
+ struct vm_area_struct *dst_vma;
+
+ dst_vma = find_vma(dst_mm, dst_start);
+ if (!dst_vma)
+ return NULL;
+
+ if (dst_start < dst_vma->vm_start ||
+ dst_start + len > dst_vma->vm_end)
+ return NULL;
+
+ /*
+ * Check the vma is registered in uffd, this is required to
+ * enforce the VM_MAYWRITE check done at uffd registration
+ * time.
+ */
+ if (!dst_vma->vm_userfaultfd_ctx.ctx)
+ return NULL;
+
+ return dst_vma;
+}
+
static int mcopy_atomic_pte(struct mm_struct *dst_mm,
pmd_t *dst_pmd,
struct vm_area_struct *dst_vma,
@@ -60,7 +90,7 @@ static int mcopy_atomic_pte(struct mm_struct *dst_mm,
/*
* The memory barrier inside __SetPageUptodate makes sure that
- * preceeding stores to the page contents become visible before
+ * preceding stores to the page contents become visible before
* the set_pte_at() write.
*/
__SetPageUptodate(page);
@@ -184,7 +214,6 @@ static __always_inline ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm,
unsigned long src_addr, dst_addr;
long copied;
struct page *page;
- struct hstate *h;
unsigned long vma_hpagesize;
pgoff_t idx;
u32 hash;
@@ -221,20 +250,9 @@ retry:
*/
if (!dst_vma) {
err = -ENOENT;
- dst_vma = find_vma(dst_mm, dst_start);
+ dst_vma = find_dst_vma(dst_mm, dst_start, len);
if (!dst_vma || !is_vm_hugetlb_page(dst_vma))
goto out_unlock;
- /*
- * Check the vma is registered in uffd, this is
- * required to enforce the VM_MAYWRITE check done at
- * uffd registration time.
- */
- if (!dst_vma->vm_userfaultfd_ctx.ctx)
- goto out_unlock;
-
- if (dst_start < dst_vma->vm_start ||
- dst_start + len > dst_vma->vm_end)
- goto out_unlock;
err = -EINVAL;
if (vma_hpagesize != vma_kernel_pagesize(dst_vma))
@@ -243,10 +261,6 @@ retry:
vm_shared = dst_vma->vm_flags & VM_SHARED;
}
- if (WARN_ON(dst_addr & (vma_hpagesize - 1) ||
- (len - copied) & (vma_hpagesize - 1)))
- goto out_unlock;
-
/*
* If not shared, ensure the dst_vma has a anon_vma.
*/
@@ -256,24 +270,21 @@ retry:
goto out_unlock;
}
- h = hstate_vma(dst_vma);
-
while (src_addr < src_start + len) {
pte_t dst_pteval;
BUG_ON(dst_addr >= dst_start + len);
- VM_BUG_ON(dst_addr & ~huge_page_mask(h));
/*
* Serialize via hugetlb_fault_mutex
*/
idx = linear_page_index(dst_vma, dst_addr);
mapping = dst_vma->vm_file->f_mapping;
- hash = hugetlb_fault_mutex_hash(h, mapping, idx, dst_addr);
+ hash = hugetlb_fault_mutex_hash(mapping, idx);
mutex_lock(&hugetlb_fault_mutex_table[hash]);
err = -ENOMEM;
- dst_pte = huge_pte_alloc(dst_mm, dst_addr, huge_page_size(h));
+ dst_pte = huge_pte_alloc(dst_mm, dst_addr, vma_hpagesize);
if (!dst_pte) {
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
goto out_unlock;
@@ -300,7 +311,8 @@ retry:
err = copy_huge_page_from_user(page,
(const void __user *)src_addr,
- pages_per_huge_page(h), true);
+ vma_hpagesize / PAGE_SIZE,
+ true);
if (unlikely(err)) {
err = -EFAULT;
goto out;
@@ -475,20 +487,9 @@ retry:
* both valid and fully within a single existing vma.
*/
err = -ENOENT;
- dst_vma = find_vma(dst_mm, dst_start);
+ dst_vma = find_dst_vma(dst_mm, dst_start, len);
if (!dst_vma)
goto out_unlock;
- /*
- * Check the vma is registered in uffd, this is required to
- * enforce the VM_MAYWRITE check done at uffd registration
- * time.
- */
- if (!dst_vma->vm_userfaultfd_ctx.ctx)
- goto out_unlock;
-
- if (dst_start < dst_vma->vm_start ||
- dst_start + len > dst_vma->vm_end)
- goto out_unlock;
err = -EINVAL;
/*
diff --git a/mm/util.c b/mm/util.c
index 3ad6db9a722e..988d11e6c17c 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -271,7 +271,7 @@ void *memdup_user_nul(const void __user *src, size_t len)
EXPORT_SYMBOL(memdup_user_nul);
void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
- struct vm_area_struct *prev, struct rb_node *rb_parent)
+ struct vm_area_struct *prev)
{
struct vm_area_struct *next;
@@ -280,18 +280,28 @@ void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
next = prev->vm_next;
prev->vm_next = vma;
} else {
+ next = mm->mmap;
mm->mmap = vma;
- if (rb_parent)
- next = rb_entry(rb_parent,
- struct vm_area_struct, vm_rb);
- else
- next = NULL;
}
vma->vm_next = next;
if (next)
next->vm_prev = vma;
}
+void __vma_unlink_list(struct mm_struct *mm, struct vm_area_struct *vma)
+{
+ struct vm_area_struct *prev, *next;
+
+ next = vma->vm_next;
+ prev = vma->vm_prev;
+ if (prev)
+ prev->vm_next = next;
+ else
+ mm->mmap = next;
+ if (next)
+ next->vm_prev = prev;
+}
+
/* Check if the vma is being used as a stack by this task */
int vma_is_stack_for_current(struct vm_area_struct *vma)
{
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 4a7d7459c4f9..4d3b3d60d893 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -331,6 +331,7 @@ EXPORT_SYMBOL(vmalloc_to_pfn);
static DEFINE_SPINLOCK(vmap_area_lock);
+static DEFINE_SPINLOCK(free_vmap_area_lock);
/* Export for kexec only */
LIST_HEAD(vmap_area_list);
static LLIST_HEAD(vmap_purge_list);
@@ -682,7 +683,7 @@ insert_vmap_area_augment(struct vmap_area *va,
* free area is inserted. If VA has been merged, it is
* freed.
*/
-static __always_inline void
+static __always_inline struct vmap_area *
merge_or_add_vmap_area(struct vmap_area *va,
struct rb_root *root, struct list_head *head)
{
@@ -749,7 +750,10 @@ merge_or_add_vmap_area(struct vmap_area *va,
/* Free vmap_area object. */
kmem_cache_free(vmap_area_cachep, va);
- return;
+
+ /* Point to the new merged area. */
+ va = sibling;
+ merged = true;
}
}
@@ -758,6 +762,8 @@ insert:
link_va(va, root, parent, link, head);
augment_tree_propagate_from(va);
}
+
+ return va;
}
static __always_inline bool
@@ -968,6 +974,19 @@ adjust_va_to_fit_type(struct vmap_area *va,
* There are a few exceptions though, as an example it is
* a first allocation (early boot up) when we have "one"
* big free space that has to be split.
+ *
+ * Also we can hit this path in case of regular "vmap"
+ * allocations, if "this" current CPU was not preloaded.
+ * See the comment in alloc_vmap_area() why. If so, then
+ * GFP_NOWAIT is used instead to get an extra object for
+ * split purpose. That is rare and most time does not
+ * occur.
+ *
+ * What happens if an allocation gets failed. Basically,
+ * an "overflow" path is triggered to purge lazily freed
+ * areas to free some memory, then, the "retry" path is
+ * triggered to repeat one more time. See more details
+ * in alloc_vmap_area() function.
*/
lva = kmem_cache_alloc(vmap_area_cachep, GFP_NOWAIT);
if (!lva)
@@ -1063,9 +1082,9 @@ static struct vmap_area *alloc_vmap_area(unsigned long size,
return ERR_PTR(-EBUSY);
might_sleep();
+ gfp_mask = gfp_mask & GFP_RECLAIM_MASK;
- va = kmem_cache_alloc_node(vmap_area_cachep,
- gfp_mask & GFP_RECLAIM_MASK, node);
+ va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node);
if (unlikely(!va))
return ERR_PTR(-ENOMEM);
@@ -1073,49 +1092,55 @@ static struct vmap_area *alloc_vmap_area(unsigned long size,
* Only scan the relevant parts containing pointers to other objects
* to avoid false negatives.
*/
- kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask & GFP_RECLAIM_MASK);
+ kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask);
retry:
/*
- * Preload this CPU with one extra vmap_area object to ensure
- * that we have it available when fit type of free area is
- * NE_FIT_TYPE.
+ * Preload this CPU with one extra vmap_area object. It is used
+ * when fit type of free area is NE_FIT_TYPE. Please note, it
+ * does not guarantee that an allocation occurs on a CPU that
+ * is preloaded, instead we minimize the case when it is not.
+ * It can happen because of cpu migration, because there is a
+ * race until the below spinlock is taken.
*
* The preload is done in non-atomic context, thus it allows us
* to use more permissive allocation masks to be more stable under
- * low memory condition and high memory pressure.
+ * low memory condition and high memory pressure. In rare case,
+ * if not preloaded, GFP_NOWAIT is used.
*
- * Even if it fails we do not really care about that. Just proceed
- * as it is. "overflow" path will refill the cache we allocate from.
+ * Set "pva" to NULL here, because of "retry" path.
*/
- preempt_disable();
- if (!__this_cpu_read(ne_fit_preload_node)) {
- preempt_enable();
- pva = kmem_cache_alloc_node(vmap_area_cachep, GFP_KERNEL, node);
- preempt_disable();
-
- if (__this_cpu_cmpxchg(ne_fit_preload_node, NULL, pva)) {
- if (pva)
- kmem_cache_free(vmap_area_cachep, pva);
- }
- }
+ pva = NULL;
- spin_lock(&vmap_area_lock);
- preempt_enable();
+ if (!this_cpu_read(ne_fit_preload_node))
+ /*
+ * Even if it fails we do not really care about that.
+ * Just proceed as it is. If needed "overflow" path
+ * will refill the cache we allocate from.
+ */
+ pva = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node);
+
+ spin_lock(&free_vmap_area_lock);
+
+ if (pva && __this_cpu_cmpxchg(ne_fit_preload_node, NULL, pva))
+ kmem_cache_free(vmap_area_cachep, pva);
/*
* If an allocation fails, the "vend" address is
* returned. Therefore trigger the overflow path.
*/
addr = __alloc_vmap_area(size, align, vstart, vend);
+ spin_unlock(&free_vmap_area_lock);
+
if (unlikely(addr == vend))
goto overflow;
va->va_start = addr;
va->va_end = addr + size;
va->vm = NULL;
- insert_vmap_area(va, &vmap_area_root, &vmap_area_list);
+ spin_lock(&vmap_area_lock);
+ insert_vmap_area(va, &vmap_area_root, &vmap_area_list);
spin_unlock(&vmap_area_lock);
BUG_ON(!IS_ALIGNED(va->va_start, align));
@@ -1125,7 +1150,6 @@ retry:
return va;
overflow:
- spin_unlock(&vmap_area_lock);
if (!purged) {
purge_vmap_area_lazy();
purged = 1;
@@ -1161,28 +1185,24 @@ int unregister_vmap_purge_notifier(struct notifier_block *nb)
}
EXPORT_SYMBOL_GPL(unregister_vmap_purge_notifier);
-static void __free_vmap_area(struct vmap_area *va)
+/*
+ * Free a region of KVA allocated by alloc_vmap_area
+ */
+static void free_vmap_area(struct vmap_area *va)
{
/*
* Remove from the busy tree/list.
*/
+ spin_lock(&vmap_area_lock);
unlink_va(va, &vmap_area_root);
+ spin_unlock(&vmap_area_lock);
/*
- * Merge VA with its neighbors, otherwise just add it.
+ * Insert/Merge it back to the free tree/list.
*/
- merge_or_add_vmap_area(va,
- &free_vmap_area_root, &free_vmap_area_list);
-}
-
-/*
- * Free a region of KVA allocated by alloc_vmap_area
- */
-static void free_vmap_area(struct vmap_area *va)
-{
- spin_lock(&vmap_area_lock);
- __free_vmap_area(va);
- spin_unlock(&vmap_area_lock);
+ spin_lock(&free_vmap_area_lock);
+ merge_or_add_vmap_area(va, &free_vmap_area_root, &free_vmap_area_list);
+ spin_unlock(&free_vmap_area_lock);
}
/*
@@ -1275,24 +1295,30 @@ static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end)
flush_tlb_kernel_range(start, end);
resched_threshold = lazy_max_pages() << 1;
- spin_lock(&vmap_area_lock);
+ spin_lock(&free_vmap_area_lock);
llist_for_each_entry_safe(va, n_va, valist, purge_list) {
unsigned long nr = (va->va_end - va->va_start) >> PAGE_SHIFT;
+ unsigned long orig_start = va->va_start;
+ unsigned long orig_end = va->va_end;
/*
* Finally insert or merge lazily-freed area. It is
* detached and there is no need to "unlink" it from
* anything.
*/
- merge_or_add_vmap_area(va,
- &free_vmap_area_root, &free_vmap_area_list);
+ va = merge_or_add_vmap_area(va, &free_vmap_area_root,
+ &free_vmap_area_list);
+
+ if (is_vmalloc_or_module_addr((void *)orig_start))
+ kasan_release_vmalloc(orig_start, orig_end,
+ va->va_start, va->va_end);
atomic_long_sub(nr, &vmap_lazy_nr);
if (atomic_long_read(&vmap_lazy_nr) < resched_threshold)
- cond_resched_lock(&vmap_area_lock);
+ cond_resched_lock(&free_vmap_area_lock);
}
- spin_unlock(&vmap_area_lock);
+ spin_unlock(&free_vmap_area_lock);
return true;
}
@@ -2014,15 +2040,21 @@ int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page **pages)
}
EXPORT_SYMBOL_GPL(map_vm_area);
-static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
- unsigned long flags, const void *caller)
+static inline void setup_vmalloc_vm_locked(struct vm_struct *vm,
+ struct vmap_area *va, unsigned long flags, const void *caller)
{
- spin_lock(&vmap_area_lock);
vm->flags = flags;
vm->addr = (void *)va->va_start;
vm->size = va->va_end - va->va_start;
vm->caller = caller;
va->vm = vm;
+}
+
+static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
+ unsigned long flags, const void *caller)
+{
+ spin_lock(&vmap_area_lock);
+ setup_vmalloc_vm_locked(vm, va, flags, caller);
spin_unlock(&vmap_area_lock);
}
@@ -2068,6 +2100,22 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
setup_vmalloc_vm(area, va, flags, caller);
+ /*
+ * For KASAN, if we are in vmalloc space, we need to cover the shadow
+ * area with real memory. If we come here through VM_ALLOC, this is
+ * done by a higher level function that has access to the true size,
+ * which might not be a full page.
+ *
+ * We assume module space comes via VM_ALLOC path.
+ */
+ if (is_vmalloc_addr(area->addr) && !(area->flags & VM_ALLOC)) {
+ if (kasan_populate_vmalloc(area->size, area)) {
+ unmap_vmap_area(va);
+ kfree(area);
+ return NULL;
+ }
+ }
+
return area;
}
@@ -2245,6 +2293,9 @@ static void __vunmap(const void *addr, int deallocate_pages)
debug_check_no_locks_freed(area->addr, get_vm_area_size(area));
debug_check_no_obj_freed(area->addr, get_vm_area_size(area));
+ if (area->flags & VM_KASAN)
+ kasan_poison_vmalloc(area->addr, area->size);
+
vm_remove_mappings(area, deallocate_pages);
if (deallocate_pages) {
@@ -2440,7 +2491,7 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
goto fail;
}
area->pages[i] = page;
- if (gfpflags_allow_blocking(gfp_mask|highmem_mask))
+ if (gfpflags_allow_blocking(gfp_mask))
cond_resched();
}
atomic_long_add(area->nr_pages, &nr_vmalloc_pages);
@@ -2497,6 +2548,11 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
if (!addr)
return NULL;
+ if (is_vmalloc_or_module_addr(area->addr)) {
+ if (kasan_populate_vmalloc(real_size, area))
+ return NULL;
+ }
+
/*
* In this function, newly allocated vm_struct has VM_UNINITIALIZED
* flag. It means that vm_struct is not fully initialized.
@@ -3282,7 +3338,7 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
goto err_free;
}
retry:
- spin_lock(&vmap_area_lock);
+ spin_lock(&free_vmap_area_lock);
/* start scanning - we scan from the top, begin with the last area */
area = term_area = last_area;
@@ -3364,29 +3420,44 @@ retry:
va = vas[area];
va->va_start = start;
va->va_end = start + size;
-
- insert_vmap_area(va, &vmap_area_root, &vmap_area_list);
}
- spin_unlock(&vmap_area_lock);
+ spin_unlock(&free_vmap_area_lock);
/* insert all vm's */
- for (area = 0; area < nr_vms; area++)
- setup_vmalloc_vm(vms[area], vas[area], VM_ALLOC,
+ spin_lock(&vmap_area_lock);
+ for (area = 0; area < nr_vms; area++) {
+ insert_vmap_area(vas[area], &vmap_area_root, &vmap_area_list);
+
+ setup_vmalloc_vm_locked(vms[area], vas[area], VM_ALLOC,
pcpu_get_vm_areas);
+ }
+ spin_unlock(&vmap_area_lock);
+
+ /* populate the shadow space outside of the lock */
+ for (area = 0; area < nr_vms; area++) {
+ /* assume success here */
+ kasan_populate_vmalloc(sizes[area], vms[area]);
+ }
kfree(vas);
return vms;
recovery:
- /* Remove previously inserted areas. */
+ /*
+ * Remove previously allocated areas. There is no
+ * need in removing these areas from the busy tree,
+ * because they are inserted only on the final step
+ * and when pcpu_get_vm_areas() is success.
+ */
while (area--) {
- __free_vmap_area(vas[area]);
+ merge_or_add_vmap_area(vas[area], &free_vmap_area_root,
+ &free_vmap_area_list);
vas[area] = NULL;
}
overflow:
- spin_unlock(&vmap_area_lock);
+ spin_unlock(&free_vmap_area_lock);
if (!purged) {
purge_vmap_area_lazy();
purged = true;
@@ -3437,9 +3508,12 @@ void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
#ifdef CONFIG_PROC_FS
static void *s_start(struct seq_file *m, loff_t *pos)
+ __acquires(&vmap_purge_lock)
__acquires(&vmap_area_lock)
{
+ mutex_lock(&vmap_purge_lock);
spin_lock(&vmap_area_lock);
+
return seq_list_start(&vmap_area_list, *pos);
}
@@ -3449,8 +3523,10 @@ static void *s_next(struct seq_file *m, void *p, loff_t *pos)
}
static void s_stop(struct seq_file *m, void *p)
+ __releases(&vmap_purge_lock)
__releases(&vmap_area_lock)
{
+ mutex_unlock(&vmap_purge_lock);
spin_unlock(&vmap_area_lock);
}
diff --git a/mm/vmscan.c b/mm/vmscan.c
index ee4eecc7e1c2..74e8edce83ca 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -79,6 +79,13 @@ struct scan_control {
*/
struct mem_cgroup *target_mem_cgroup;
+ /* Can active pages be deactivated as part of reclaim? */
+#define DEACTIVATE_ANON 1
+#define DEACTIVATE_FILE 2
+ unsigned int may_deactivate:2;
+ unsigned int force_deactivate:1;
+ unsigned int skipped_deactivate:1;
+
/* Writepage batching in laptop mode; RECLAIM_WRITE */
unsigned int may_writepage:1;
@@ -101,6 +108,12 @@ struct scan_control {
/* One of the zones is ready for compaction */
unsigned int compaction_ready:1;
+ /* There is easily reclaimable cold cache in the current node */
+ unsigned int cache_trim_mode:1;
+
+ /* The file pages on the current node are dangerously low */
+ unsigned int file_is_tiny:1;
+
/* Allocation order */
s8 order;
@@ -239,13 +252,13 @@ static void unregister_memcg_shrinker(struct shrinker *shrinker)
up_write(&shrinker_rwsem);
}
-static bool global_reclaim(struct scan_control *sc)
+static bool cgroup_reclaim(struct scan_control *sc)
{
- return !sc->target_mem_cgroup;
+ return sc->target_mem_cgroup;
}
/**
- * sane_reclaim - is the usual dirty throttling mechanism operational?
+ * writeback_throttling_sane - is the usual dirty throttling mechanism available?
* @sc: scan_control in question
*
* The normal page dirty throttling mechanism in balance_dirty_pages() is
@@ -257,11 +270,9 @@ static bool global_reclaim(struct scan_control *sc)
* This function tests whether the vmscan currently in progress can assume
* that the normal dirty throttling mechanism is operational.
*/
-static bool sane_reclaim(struct scan_control *sc)
+static bool writeback_throttling_sane(struct scan_control *sc)
{
- struct mem_cgroup *memcg = sc->target_mem_cgroup;
-
- if (!memcg)
+ if (!cgroup_reclaim(sc))
return true;
#ifdef CONFIG_CGROUP_WRITEBACK
if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
@@ -269,29 +280,6 @@ static bool sane_reclaim(struct scan_control *sc)
#endif
return false;
}
-
-static void set_memcg_congestion(pg_data_t *pgdat,
- struct mem_cgroup *memcg,
- bool congested)
-{
- struct mem_cgroup_per_node *mn;
-
- if (!memcg)
- return;
-
- mn = mem_cgroup_nodeinfo(memcg, pgdat->node_id);
- WRITE_ONCE(mn->congested, congested);
-}
-
-static bool memcg_congested(pg_data_t *pgdat,
- struct mem_cgroup *memcg)
-{
- struct mem_cgroup_per_node *mn;
-
- mn = mem_cgroup_nodeinfo(memcg, pgdat->node_id);
- return READ_ONCE(mn->congested);
-
-}
#else
static int prealloc_memcg_shrinker(struct shrinker *shrinker)
{
@@ -302,27 +290,15 @@ static void unregister_memcg_shrinker(struct shrinker *shrinker)
{
}
-static bool global_reclaim(struct scan_control *sc)
+static bool cgroup_reclaim(struct scan_control *sc)
{
- return true;
+ return false;
}
-static bool sane_reclaim(struct scan_control *sc)
+static bool writeback_throttling_sane(struct scan_control *sc)
{
return true;
}
-
-static inline void set_memcg_congestion(struct pglist_data *pgdat,
- struct mem_cgroup *memcg, bool congested)
-{
-}
-
-static inline bool memcg_congested(struct pglist_data *pgdat,
- struct mem_cgroup *memcg)
-{
- return false;
-
-}
#endif
/*
@@ -351,32 +327,21 @@ unsigned long zone_reclaimable_pages(struct zone *zone)
*/
unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, int zone_idx)
{
- unsigned long lru_size = 0;
+ unsigned long size = 0;
int zid;
- if (!mem_cgroup_disabled()) {
- for (zid = 0; zid < MAX_NR_ZONES; zid++)
- lru_size += mem_cgroup_get_zone_lru_size(lruvec, lru, zid);
- } else
- lru_size = node_page_state(lruvec_pgdat(lruvec), NR_LRU_BASE + lru);
-
- for (zid = zone_idx + 1; zid < MAX_NR_ZONES; zid++) {
+ for (zid = 0; zid <= zone_idx && zid < MAX_NR_ZONES; zid++) {
struct zone *zone = &lruvec_pgdat(lruvec)->node_zones[zid];
- unsigned long size;
if (!managed_zone(zone))
continue;
if (!mem_cgroup_disabled())
- size = mem_cgroup_get_zone_lru_size(lruvec, lru, zid);
+ size += mem_cgroup_get_zone_lru_size(lruvec, lru, zid);
else
- size = zone_page_state(&lruvec_pgdat(lruvec)->node_zones[zid],
- NR_ZONE_LRU_BASE + lru);
- lru_size -= min(size, lru_size);
+ size += zone_page_state(zone, NR_ZONE_LRU_BASE + lru);
}
-
- return lru_size;
-
+ return size;
}
/*
@@ -775,7 +740,7 @@ static inline int is_page_cache_freeable(struct page *page)
return page_count(page) - page_has_private(page) == 1 + page_cache_pins;
}
-static int may_write_to_inode(struct inode *inode, struct scan_control *sc)
+static int may_write_to_inode(struct inode *inode)
{
if (current->flags & PF_SWAPWRITE)
return 1;
@@ -823,8 +788,7 @@ typedef enum {
* pageout is called by shrink_page_list() for each dirty page.
* Calls ->writepage().
*/
-static pageout_t pageout(struct page *page, struct address_space *mapping,
- struct scan_control *sc)
+static pageout_t pageout(struct page *page, struct address_space *mapping)
{
/*
* If the page is dirty, only perform writeback if that write
@@ -860,7 +824,7 @@ static pageout_t pageout(struct page *page, struct address_space *mapping,
}
if (mapping->a_ops->writepage == NULL)
return PAGE_ACTIVATE;
- if (!may_write_to_inode(mapping->host, sc))
+ if (!may_write_to_inode(mapping->host))
return PAGE_KEEP;
if (clear_page_dirty_for_io(page)) {
@@ -899,7 +863,7 @@ static pageout_t pageout(struct page *page, struct address_space *mapping,
* gets returned with a refcount of 0.
*/
static int __remove_mapping(struct address_space *mapping, struct page *page,
- bool reclaimed)
+ bool reclaimed, struct mem_cgroup *target_memcg)
{
unsigned long flags;
int refcount;
@@ -971,7 +935,7 @@ static int __remove_mapping(struct address_space *mapping, struct page *page,
*/
if (reclaimed && page_is_file_cache(page) &&
!mapping_exiting(mapping) && !dax_mapping(mapping))
- shadow = workingset_eviction(page);
+ shadow = workingset_eviction(page, target_memcg);
__delete_from_page_cache(page, shadow);
xa_unlock_irqrestore(&mapping->i_pages, flags);
@@ -994,7 +958,7 @@ cannot_free:
*/
int remove_mapping(struct address_space *mapping, struct page *page)
{
- if (__remove_mapping(mapping, page, false)) {
+ if (__remove_mapping(mapping, page, false, NULL)) {
/*
* Unfreezing the refcount with 1 rather than 2 effectively
* drops the pagecache ref for us without requiring another
@@ -1239,7 +1203,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
goto activate_locked;
/* Case 2 above */
- } else if (sane_reclaim(sc) ||
+ } else if (writeback_throttling_sane(sc) ||
!PageReclaim(page) || !may_enter_fs) {
/*
* This is slightly racy - end_page_writeback()
@@ -1394,7 +1358,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
* starts and then write it out here.
*/
try_to_unmap_flush_dirty();
- switch (pageout(page, mapping, sc)) {
+ switch (pageout(page, mapping)) {
case PAGE_KEEP:
goto keep_locked;
case PAGE_ACTIVATE:
@@ -1472,7 +1436,8 @@ static unsigned long shrink_page_list(struct list_head *page_list,
count_vm_event(PGLAZYFREED);
count_memcg_page_event(page, PGLAZYFREED);
- } else if (!mapping || !__remove_mapping(mapping, page, true))
+ } else if (!mapping || !__remove_mapping(mapping, page, true,
+ sc->target_mem_cgroup))
goto keep_locked;
unlock_page(page);
@@ -1820,7 +1785,7 @@ int isolate_lru_page(struct page *page)
/*
* A direct reclaimer may isolate SWAP_CLUSTER_MAX pages from the LRU list and
- * then get resheduled. When there are massive number of tasks doing page
+ * then get rescheduled. When there are massive number of tasks doing page
* allocation, such sleeping direct reclaimers may keep piling up on each CPU,
* the LRU list will go small and be scanned faster than necessary, leading to
* unnecessary swapping, thrashing and OOM.
@@ -1833,7 +1798,7 @@ static int too_many_isolated(struct pglist_data *pgdat, int file,
if (current_is_kswapd())
return 0;
- if (!sane_reclaim(sc))
+ if (!writeback_throttling_sane(sc))
return 0;
if (file) {
@@ -1983,7 +1948,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
reclaim_stat->recent_scanned[file] += nr_taken;
item = current_is_kswapd() ? PGSCAN_KSWAPD : PGSCAN_DIRECT;
- if (global_reclaim(sc))
+ if (!cgroup_reclaim(sc))
__count_vm_events(item, nr_scanned);
__count_memcg_events(lruvec_memcg(lruvec), item, nr_scanned);
spin_unlock_irq(&pgdat->lru_lock);
@@ -1997,7 +1962,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
spin_lock_irq(&pgdat->lru_lock);
item = current_is_kswapd() ? PGSTEAL_KSWAPD : PGSTEAL_DIRECT;
- if (global_reclaim(sc))
+ if (!cgroup_reclaim(sc))
__count_vm_events(item, nr_reclaimed);
__count_memcg_events(lruvec_memcg(lruvec), item, nr_reclaimed);
reclaim_stat->recent_rotated[0] += stat.nr_activate[0];
@@ -2199,6 +2164,20 @@ unsigned long reclaim_pages(struct list_head *page_list)
return nr_reclaimed;
}
+static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
+ struct lruvec *lruvec, struct scan_control *sc)
+{
+ if (is_active_lru(lru)) {
+ if (sc->may_deactivate & (1 << is_file_lru(lru)))
+ shrink_active_list(nr_to_scan, lruvec, sc, lru);
+ else
+ sc->skipped_deactivate = 1;
+ return 0;
+ }
+
+ return shrink_inactive_list(nr_to_scan, lruvec, sc, lru);
+}
+
/*
* The inactive anon list should be small enough that the VM never has
* to do too much work.
@@ -2227,64 +2206,25 @@ unsigned long reclaim_pages(struct list_head *page_list)
* 1TB 101 10GB
* 10TB 320 32GB
*/
-static bool inactive_list_is_low(struct lruvec *lruvec, bool file,
- struct scan_control *sc, bool trace)
+static bool inactive_is_low(struct lruvec *lruvec, enum lru_list inactive_lru)
{
- enum lru_list active_lru = file * LRU_FILE + LRU_ACTIVE;
- struct pglist_data *pgdat = lruvec_pgdat(lruvec);
- enum lru_list inactive_lru = file * LRU_FILE;
+ enum lru_list active_lru = inactive_lru + LRU_ACTIVE;
unsigned long inactive, active;
unsigned long inactive_ratio;
- unsigned long refaults;
unsigned long gb;
- /*
- * If we don't have swap space, anonymous page deactivation
- * is pointless.
- */
- if (!file && !total_swap_pages)
- return false;
-
- inactive = lruvec_lru_size(lruvec, inactive_lru, sc->reclaim_idx);
- active = lruvec_lru_size(lruvec, active_lru, sc->reclaim_idx);
-
- /*
- * When refaults are being observed, it means a new workingset
- * is being established. Disable active list protection to get
- * rid of the stale workingset quickly.
- */
- refaults = lruvec_page_state_local(lruvec, WORKINGSET_ACTIVATE);
- if (file && lruvec->refaults != refaults) {
- inactive_ratio = 0;
- } else {
- gb = (inactive + active) >> (30 - PAGE_SHIFT);
- if (gb)
- inactive_ratio = int_sqrt(10 * gb);
- else
- inactive_ratio = 1;
- }
+ inactive = lruvec_page_state(lruvec, NR_LRU_BASE + inactive_lru);
+ active = lruvec_page_state(lruvec, NR_LRU_BASE + active_lru);
- if (trace)
- trace_mm_vmscan_inactive_list_is_low(pgdat->node_id, sc->reclaim_idx,
- lruvec_lru_size(lruvec, inactive_lru, MAX_NR_ZONES), inactive,
- lruvec_lru_size(lruvec, active_lru, MAX_NR_ZONES), active,
- inactive_ratio, file);
+ gb = (inactive + active) >> (30 - PAGE_SHIFT);
+ if (gb)
+ inactive_ratio = int_sqrt(10 * gb);
+ else
+ inactive_ratio = 1;
return inactive * inactive_ratio < active;
}
-static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
- struct lruvec *lruvec, struct scan_control *sc)
-{
- if (is_active_lru(lru)) {
- if (inactive_list_is_low(lruvec, is_file_lru(lru), sc, true))
- shrink_active_list(nr_to_scan, lruvec, sc, lru);
- return 0;
- }
-
- return shrink_inactive_list(nr_to_scan, lruvec, sc, lru);
-}
-
enum scan_balance {
SCAN_EQUAL,
SCAN_FRACT,
@@ -2301,10 +2241,10 @@ enum scan_balance {
* nr[0] = anon inactive pages to scan; nr[1] = anon active pages to scan
* nr[2] = file inactive pages to scan; nr[3] = file active pages to scan
*/
-static void get_scan_count(struct lruvec *lruvec, struct mem_cgroup *memcg,
- struct scan_control *sc, unsigned long *nr,
- unsigned long *lru_pages)
+static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
+ unsigned long *nr)
{
+ struct mem_cgroup *memcg = lruvec_memcg(lruvec);
int swappiness = mem_cgroup_swappiness(memcg);
struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
u64 fraction[2];
@@ -2329,7 +2269,7 @@ static void get_scan_count(struct lruvec *lruvec, struct mem_cgroup *memcg,
* using the memory controller's swap limit feature would be
* too expensive.
*/
- if (!global_reclaim(sc) && !swappiness) {
+ if (cgroup_reclaim(sc) && !swappiness) {
scan_balance = SCAN_FILE;
goto out;
}
@@ -2345,58 +2285,18 @@ static void get_scan_count(struct lruvec *lruvec, struct mem_cgroup *memcg,
}
/*
- * Prevent the reclaimer from falling into the cache trap: as
- * cache pages start out inactive, every cache fault will tip
- * the scan balance towards the file LRU. And as the file LRU
- * shrinks, so does the window for rotation from references.
- * This means we have a runaway feedback loop where a tiny
- * thrashing file LRU becomes infinitely more attractive than
- * anon pages. Try to detect this based on file LRU size.
+ * If the system is almost out of file pages, force-scan anon.
*/
- if (global_reclaim(sc)) {
- unsigned long pgdatfile;
- unsigned long pgdatfree;
- int z;
- unsigned long total_high_wmark = 0;
-
- pgdatfree = sum_zone_node_page_state(pgdat->node_id, NR_FREE_PAGES);
- pgdatfile = node_page_state(pgdat, NR_ACTIVE_FILE) +
- node_page_state(pgdat, NR_INACTIVE_FILE);
-
- for (z = 0; z < MAX_NR_ZONES; z++) {
- struct zone *zone = &pgdat->node_zones[z];
- if (!managed_zone(zone))
- continue;
-
- total_high_wmark += high_wmark_pages(zone);
- }
-
- if (unlikely(pgdatfile + pgdatfree <= total_high_wmark)) {
- /*
- * Force SCAN_ANON if there are enough inactive
- * anonymous pages on the LRU in eligible zones.
- * Otherwise, the small LRU gets thrashed.
- */
- if (!inactive_list_is_low(lruvec, false, sc, false) &&
- lruvec_lru_size(lruvec, LRU_INACTIVE_ANON, sc->reclaim_idx)
- >> sc->priority) {
- scan_balance = SCAN_ANON;
- goto out;
- }
- }
+ if (sc->file_is_tiny) {
+ scan_balance = SCAN_ANON;
+ goto out;
}
/*
- * If there is enough inactive page cache, i.e. if the size of the
- * inactive list is greater than that of the active list *and* the
- * inactive list actually has some pages to scan on this priority, we
- * do not reclaim anything from the anonymous working set right now.
- * Without the second condition we could end up never scanning an
- * lruvec even if it has plenty of old anonymous pages unless the
- * system is under heavy pressure.
+ * If there is enough inactive page cache, we do not reclaim
+ * anything from the anonymous working right now.
*/
- if (!inactive_list_is_low(lruvec, true, sc, false) &&
- lruvec_lru_size(lruvec, LRU_INACTIVE_FILE, sc->reclaim_idx) >> sc->priority) {
+ if (sc->cache_trim_mode) {
scan_balance = SCAN_FILE;
goto out;
}
@@ -2454,7 +2354,6 @@ static void get_scan_count(struct lruvec *lruvec, struct mem_cgroup *memcg,
fraction[1] = fp;
denominator = ap + fp + 1;
out:
- *lru_pages = 0;
for_each_evictable_lru(lru) {
int file = is_file_lru(lru);
unsigned long lruvec_size;
@@ -2549,18 +2448,12 @@ out:
BUG();
}
- *lru_pages += lruvec_size;
nr[lru] = scan;
}
}
-/*
- * This is a basic per-node page freer. Used by both kswapd and direct reclaim.
- */
-static void shrink_node_memcg(struct pglist_data *pgdat, struct mem_cgroup *memcg,
- struct scan_control *sc, unsigned long *lru_pages)
+static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
{
- struct lruvec *lruvec = mem_cgroup_lruvec(pgdat, memcg);
unsigned long nr[NR_LRU_LISTS];
unsigned long targets[NR_LRU_LISTS];
unsigned long nr_to_scan;
@@ -2570,7 +2463,7 @@ static void shrink_node_memcg(struct pglist_data *pgdat, struct mem_cgroup *memc
struct blk_plug plug;
bool scan_adjusted;
- get_scan_count(lruvec, memcg, sc, nr, lru_pages);
+ get_scan_count(lruvec, sc, nr);
/* Record the original scan target for proportional adjustments later */
memcpy(targets, nr, sizeof(nr));
@@ -2586,7 +2479,7 @@ static void shrink_node_memcg(struct pglist_data *pgdat, struct mem_cgroup *memc
* abort proportional reclaim if either the file or anon lru has already
* dropped to zero at the first pass.
*/
- scan_adjusted = (global_reclaim(sc) && !current_is_kswapd() &&
+ scan_adjusted = (!cgroup_reclaim(sc) && !current_is_kswapd() &&
sc->priority == DEF_PRIORITY);
blk_start_plug(&plug);
@@ -2668,7 +2561,7 @@ static void shrink_node_memcg(struct pglist_data *pgdat, struct mem_cgroup *memc
* Even if we did not try to evict anon pages at all, we want to
* rebalance the anon lru active/inactive ratio.
*/
- if (inactive_list_is_low(lruvec, false, sc, true))
+ if (total_swap_pages && inactive_is_low(lruvec, LRU_INACTIVE_ANON))
shrink_active_list(SWAP_CLUSTER_MAX, lruvec,
sc, LRU_ACTIVE_ANON);
}
@@ -2744,156 +2637,234 @@ static inline bool should_continue_reclaim(struct pglist_data *pgdat,
return inactive_lru_pages > pages_for_compaction;
}
-static bool pgdat_memcg_congested(pg_data_t *pgdat, struct mem_cgroup *memcg)
+static void shrink_node_memcgs(pg_data_t *pgdat, struct scan_control *sc)
{
- return test_bit(PGDAT_CONGESTED, &pgdat->flags) ||
- (memcg && memcg_congested(pgdat, memcg));
+ struct mem_cgroup *target_memcg = sc->target_mem_cgroup;
+ struct mem_cgroup *memcg;
+
+ memcg = mem_cgroup_iter(target_memcg, NULL, NULL);
+ do {
+ struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat);
+ unsigned long reclaimed;
+ unsigned long scanned;
+
+ switch (mem_cgroup_protected(target_memcg, memcg)) {
+ case MEMCG_PROT_MIN:
+ /*
+ * Hard protection.
+ * If there is no reclaimable memory, OOM.
+ */
+ continue;
+ case MEMCG_PROT_LOW:
+ /*
+ * Soft protection.
+ * Respect the protection only as long as
+ * there is an unprotected supply
+ * of reclaimable memory from other cgroups.
+ */
+ if (!sc->memcg_low_reclaim) {
+ sc->memcg_low_skipped = 1;
+ continue;
+ }
+ memcg_memory_event(memcg, MEMCG_LOW);
+ break;
+ case MEMCG_PROT_NONE:
+ /*
+ * All protection thresholds breached. We may
+ * still choose to vary the scan pressure
+ * applied based on by how much the cgroup in
+ * question has exceeded its protection
+ * thresholds (see get_scan_count).
+ */
+ break;
+ }
+
+ reclaimed = sc->nr_reclaimed;
+ scanned = sc->nr_scanned;
+
+ shrink_lruvec(lruvec, sc);
+
+ shrink_slab(sc->gfp_mask, pgdat->node_id, memcg,
+ sc->priority);
+
+ /* Record the group's reclaim efficiency */
+ vmpressure(sc->gfp_mask, memcg, false,
+ sc->nr_scanned - scanned,
+ sc->nr_reclaimed - reclaimed);
+
+ } while ((memcg = mem_cgroup_iter(target_memcg, memcg, NULL)));
}
static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc)
{
struct reclaim_state *reclaim_state = current->reclaim_state;
unsigned long nr_reclaimed, nr_scanned;
+ struct lruvec *target_lruvec;
bool reclaimable = false;
+ unsigned long file;
- do {
- struct mem_cgroup *root = sc->target_mem_cgroup;
- unsigned long node_lru_pages = 0;
- struct mem_cgroup *memcg;
+ target_lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, pgdat);
- memset(&sc->nr, 0, sizeof(sc->nr));
+again:
+ memset(&sc->nr, 0, sizeof(sc->nr));
- nr_reclaimed = sc->nr_reclaimed;
- nr_scanned = sc->nr_scanned;
+ nr_reclaimed = sc->nr_reclaimed;
+ nr_scanned = sc->nr_scanned;
- memcg = mem_cgroup_iter(root, NULL, NULL);
- do {
- unsigned long lru_pages;
- unsigned long reclaimed;
- unsigned long scanned;
+ /*
+ * Target desirable inactive:active list ratios for the anon
+ * and file LRU lists.
+ */
+ if (!sc->force_deactivate) {
+ unsigned long refaults;
- switch (mem_cgroup_protected(root, memcg)) {
- case MEMCG_PROT_MIN:
- /*
- * Hard protection.
- * If there is no reclaimable memory, OOM.
- */
- continue;
- case MEMCG_PROT_LOW:
- /*
- * Soft protection.
- * Respect the protection only as long as
- * there is an unprotected supply
- * of reclaimable memory from other cgroups.
- */
- if (!sc->memcg_low_reclaim) {
- sc->memcg_low_skipped = 1;
- continue;
- }
- memcg_memory_event(memcg, MEMCG_LOW);
- break;
- case MEMCG_PROT_NONE:
- /*
- * All protection thresholds breached. We may
- * still choose to vary the scan pressure
- * applied based on by how much the cgroup in
- * question has exceeded its protection
- * thresholds (see get_scan_count).
- */
- break;
- }
+ if (inactive_is_low(target_lruvec, LRU_INACTIVE_ANON))
+ sc->may_deactivate |= DEACTIVATE_ANON;
+ else
+ sc->may_deactivate &= ~DEACTIVATE_ANON;
- reclaimed = sc->nr_reclaimed;
- scanned = sc->nr_scanned;
- shrink_node_memcg(pgdat, memcg, sc, &lru_pages);
- node_lru_pages += lru_pages;
+ /*
+ * When refaults are being observed, it means a new
+ * workingset is being established. Deactivate to get
+ * rid of any stale active pages quickly.
+ */
+ refaults = lruvec_page_state(target_lruvec,
+ WORKINGSET_ACTIVATE);
+ if (refaults != target_lruvec->refaults ||
+ inactive_is_low(target_lruvec, LRU_INACTIVE_FILE))
+ sc->may_deactivate |= DEACTIVATE_FILE;
+ else
+ sc->may_deactivate &= ~DEACTIVATE_FILE;
+ } else
+ sc->may_deactivate = DEACTIVATE_ANON | DEACTIVATE_FILE;
- shrink_slab(sc->gfp_mask, pgdat->node_id, memcg,
- sc->priority);
+ /*
+ * If we have plenty of inactive file pages that aren't
+ * thrashing, try to reclaim those first before touching
+ * anonymous pages.
+ */
+ file = lruvec_page_state(target_lruvec, NR_INACTIVE_FILE);
+ if (file >> sc->priority && !(sc->may_deactivate & DEACTIVATE_FILE))
+ sc->cache_trim_mode = 1;
+ else
+ sc->cache_trim_mode = 0;
+
+ /*
+ * Prevent the reclaimer from falling into the cache trap: as
+ * cache pages start out inactive, every cache fault will tip
+ * the scan balance towards the file LRU. And as the file LRU
+ * shrinks, so does the window for rotation from references.
+ * This means we have a runaway feedback loop where a tiny
+ * thrashing file LRU becomes infinitely more attractive than
+ * anon pages. Try to detect this based on file LRU size.
+ */
+ if (!cgroup_reclaim(sc)) {
+ unsigned long total_high_wmark = 0;
+ unsigned long free, anon;
+ int z;
- /* Record the group's reclaim efficiency */
- vmpressure(sc->gfp_mask, memcg, false,
- sc->nr_scanned - scanned,
- sc->nr_reclaimed - reclaimed);
+ free = sum_zone_node_page_state(pgdat->node_id, NR_FREE_PAGES);
+ file = node_page_state(pgdat, NR_ACTIVE_FILE) +
+ node_page_state(pgdat, NR_INACTIVE_FILE);
- } while ((memcg = mem_cgroup_iter(root, memcg, NULL)));
+ for (z = 0; z < MAX_NR_ZONES; z++) {
+ struct zone *zone = &pgdat->node_zones[z];
+ if (!managed_zone(zone))
+ continue;
- if (reclaim_state) {
- sc->nr_reclaimed += reclaim_state->reclaimed_slab;
- reclaim_state->reclaimed_slab = 0;
+ total_high_wmark += high_wmark_pages(zone);
}
- /* Record the subtree's reclaim efficiency */
- vmpressure(sc->gfp_mask, sc->target_mem_cgroup, true,
- sc->nr_scanned - nr_scanned,
- sc->nr_reclaimed - nr_reclaimed);
+ /*
+ * Consider anon: if that's low too, this isn't a
+ * runaway file reclaim problem, but rather just
+ * extreme pressure. Reclaim as per usual then.
+ */
+ anon = node_page_state(pgdat, NR_INACTIVE_ANON);
- if (sc->nr_reclaimed - nr_reclaimed)
- reclaimable = true;
+ sc->file_is_tiny =
+ file + free <= total_high_wmark &&
+ !(sc->may_deactivate & DEACTIVATE_ANON) &&
+ anon >> sc->priority;
+ }
- if (current_is_kswapd()) {
- /*
- * If reclaim is isolating dirty pages under writeback,
- * it implies that the long-lived page allocation rate
- * is exceeding the page laundering rate. Either the
- * global limits are not being effective at throttling
- * processes due to the page distribution throughout
- * zones or there is heavy usage of a slow backing
- * device. The only option is to throttle from reclaim
- * context which is not ideal as there is no guarantee
- * the dirtying process is throttled in the same way
- * balance_dirty_pages() manages.
- *
- * Once a node is flagged PGDAT_WRITEBACK, kswapd will
- * count the number of pages under pages flagged for
- * immediate reclaim and stall if any are encountered
- * in the nr_immediate check below.
- */
- if (sc->nr.writeback && sc->nr.writeback == sc->nr.taken)
- set_bit(PGDAT_WRITEBACK, &pgdat->flags);
+ shrink_node_memcgs(pgdat, sc);
- /*
- * Tag a node as congested if all the dirty pages
- * scanned were backed by a congested BDI and
- * wait_iff_congested will stall.
- */
- if (sc->nr.dirty && sc->nr.dirty == sc->nr.congested)
- set_bit(PGDAT_CONGESTED, &pgdat->flags);
+ if (reclaim_state) {
+ sc->nr_reclaimed += reclaim_state->reclaimed_slab;
+ reclaim_state->reclaimed_slab = 0;
+ }
- /* Allow kswapd to start writing pages during reclaim.*/
- if (sc->nr.unqueued_dirty == sc->nr.file_taken)
- set_bit(PGDAT_DIRTY, &pgdat->flags);
+ /* Record the subtree's reclaim efficiency */
+ vmpressure(sc->gfp_mask, sc->target_mem_cgroup, true,
+ sc->nr_scanned - nr_scanned,
+ sc->nr_reclaimed - nr_reclaimed);
- /*
- * If kswapd scans pages marked marked for immediate
- * reclaim and under writeback (nr_immediate), it
- * implies that pages are cycling through the LRU
- * faster than they are written so also forcibly stall.
- */
- if (sc->nr.immediate)
- congestion_wait(BLK_RW_ASYNC, HZ/10);
- }
+ if (sc->nr_reclaimed - nr_reclaimed)
+ reclaimable = true;
+ if (current_is_kswapd()) {
/*
- * Legacy memcg will stall in page writeback so avoid forcibly
- * stalling in wait_iff_congested().
+ * If reclaim is isolating dirty pages under writeback,
+ * it implies that the long-lived page allocation rate
+ * is exceeding the page laundering rate. Either the
+ * global limits are not being effective at throttling
+ * processes due to the page distribution throughout
+ * zones or there is heavy usage of a slow backing
+ * device. The only option is to throttle from reclaim
+ * context which is not ideal as there is no guarantee
+ * the dirtying process is throttled in the same way
+ * balance_dirty_pages() manages.
+ *
+ * Once a node is flagged PGDAT_WRITEBACK, kswapd will
+ * count the number of pages under pages flagged for
+ * immediate reclaim and stall if any are encountered
+ * in the nr_immediate check below.
*/
- if (!global_reclaim(sc) && sane_reclaim(sc) &&
- sc->nr.dirty && sc->nr.dirty == sc->nr.congested)
- set_memcg_congestion(pgdat, root, true);
+ if (sc->nr.writeback && sc->nr.writeback == sc->nr.taken)
+ set_bit(PGDAT_WRITEBACK, &pgdat->flags);
+
+ /* Allow kswapd to start writing pages during reclaim.*/
+ if (sc->nr.unqueued_dirty == sc->nr.file_taken)
+ set_bit(PGDAT_DIRTY, &pgdat->flags);
/*
- * Stall direct reclaim for IO completions if underlying BDIs
- * and node is congested. Allow kswapd to continue until it
- * starts encountering unqueued dirty pages or cycling through
- * the LRU too quickly.
+ * If kswapd scans pages marked marked for immediate
+ * reclaim and under writeback (nr_immediate), it
+ * implies that pages are cycling through the LRU
+ * faster than they are written so also forcibly stall.
*/
- if (!sc->hibernation_mode && !current_is_kswapd() &&
- current_may_throttle() && pgdat_memcg_congested(pgdat, root))
- wait_iff_congested(BLK_RW_ASYNC, HZ/10);
+ if (sc->nr.immediate)
+ congestion_wait(BLK_RW_ASYNC, HZ/10);
+ }
- } while (should_continue_reclaim(pgdat, sc->nr_reclaimed - nr_reclaimed,
- sc));
+ /*
+ * Tag a node/memcg as congested if all the dirty pages
+ * scanned were backed by a congested BDI and
+ * wait_iff_congested will stall.
+ *
+ * Legacy memcg will stall in page writeback so avoid forcibly
+ * stalling in wait_iff_congested().
+ */
+ if ((current_is_kswapd() ||
+ (cgroup_reclaim(sc) && writeback_throttling_sane(sc))) &&
+ sc->nr.dirty && sc->nr.dirty == sc->nr.congested)
+ set_bit(LRUVEC_CONGESTED, &target_lruvec->flags);
+
+ /*
+ * Stall direct reclaim for IO completions if underlying BDIs
+ * and node is congested. Allow kswapd to continue until it
+ * starts encountering unqueued dirty pages or cycling through
+ * the LRU too quickly.
+ */
+ if (!current_is_kswapd() && current_may_throttle() &&
+ !sc->hibernation_mode &&
+ test_bit(LRUVEC_CONGESTED, &target_lruvec->flags))
+ wait_iff_congested(BLK_RW_ASYNC, HZ/10);
+
+ if (should_continue_reclaim(pgdat, sc->nr_reclaimed - nr_reclaimed,
+ sc))
+ goto again;
/*
* Kswapd gives up on balancing particular nodes after too
@@ -2973,7 +2944,7 @@ static void shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
* Take care memory controller reclaiming has small influence
* to global LRU.
*/
- if (global_reclaim(sc)) {
+ if (!cgroup_reclaim(sc)) {
if (!cpuset_zone_allowed(zone,
GFP_KERNEL | __GFP_HARDWALL))
continue;
@@ -3032,19 +3003,14 @@ static void shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
sc->gfp_mask = orig_mask;
}
-static void snapshot_refaults(struct mem_cgroup *root_memcg, pg_data_t *pgdat)
+static void snapshot_refaults(struct mem_cgroup *target_memcg, pg_data_t *pgdat)
{
- struct mem_cgroup *memcg;
-
- memcg = mem_cgroup_iter(root_memcg, NULL, NULL);
- do {
- unsigned long refaults;
- struct lruvec *lruvec;
+ struct lruvec *target_lruvec;
+ unsigned long refaults;
- lruvec = mem_cgroup_lruvec(pgdat, memcg);
- refaults = lruvec_page_state_local(lruvec, WORKINGSET_ACTIVATE);
- lruvec->refaults = refaults;
- } while ((memcg = mem_cgroup_iter(root_memcg, memcg, NULL)));
+ target_lruvec = mem_cgroup_lruvec(target_memcg, pgdat);
+ refaults = lruvec_page_state(target_lruvec, WORKINGSET_ACTIVATE);
+ target_lruvec->refaults = refaults;
}
/*
@@ -3073,7 +3039,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
retry:
delayacct_freepages_start();
- if (global_reclaim(sc))
+ if (!cgroup_reclaim(sc))
__count_zid_vm_events(ALLOCSTALL, sc->reclaim_idx, 1);
do {
@@ -3102,8 +3068,16 @@ retry:
if (zone->zone_pgdat == last_pgdat)
continue;
last_pgdat = zone->zone_pgdat;
+
snapshot_refaults(sc->target_mem_cgroup, zone->zone_pgdat);
- set_memcg_congestion(last_pgdat, sc->target_mem_cgroup, false);
+
+ if (cgroup_reclaim(sc)) {
+ struct lruvec *lruvec;
+
+ lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup,
+ zone->zone_pgdat);
+ clear_bit(LRUVEC_CONGESTED, &lruvec->flags);
+ }
}
delayacct_freepages_end();
@@ -3115,9 +3089,27 @@ retry:
if (sc->compaction_ready)
return 1;
+ /*
+ * We make inactive:active ratio decisions based on the node's
+ * composition of memory, but a restrictive reclaim_idx or a
+ * memory.low cgroup setting can exempt large amounts of
+ * memory from reclaim. Neither of which are very common, so
+ * instead of doing costly eligibility calculations of the
+ * entire cgroup subtree up front, we assume the estimates are
+ * good, and retry with forcible deactivation if that fails.
+ */
+ if (sc->skipped_deactivate) {
+ sc->priority = initial_priority;
+ sc->force_deactivate = 1;
+ sc->skipped_deactivate = 0;
+ goto retry;
+ }
+
/* Untapped cgroup reserves? Don't OOM, retry. */
if (sc->memcg_low_skipped) {
sc->priority = initial_priority;
+ sc->force_deactivate = 0;
+ sc->skipped_deactivate = 0;
sc->memcg_low_reclaim = 1;
sc->memcg_low_skipped = 0;
goto retry;
@@ -3309,6 +3301,7 @@ unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg,
pg_data_t *pgdat,
unsigned long *nr_scanned)
{
+ struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat);
struct scan_control sc = {
.nr_to_reclaim = SWAP_CLUSTER_MAX,
.target_mem_cgroup = memcg,
@@ -3317,7 +3310,6 @@ unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg,
.reclaim_idx = MAX_NR_ZONES - 1,
.may_swap = !noswap,
};
- unsigned long lru_pages;
WARN_ON_ONCE(!current->reclaim_state);
@@ -3334,7 +3326,7 @@ unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg,
* will pick up pages from other mem cgroup's as well. We hack
* the priority and make it zero.
*/
- shrink_node_memcg(pgdat, memcg, &sc, &lru_pages);
+ shrink_lruvec(lruvec, &sc);
trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed);
@@ -3348,10 +3340,8 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
gfp_t gfp_mask,
bool may_swap)
{
- struct zonelist *zonelist;
unsigned long nr_reclaimed;
unsigned long pflags;
- int nid;
unsigned int noreclaim_flag;
struct scan_control sc = {
.nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX),
@@ -3364,16 +3354,14 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
.may_unmap = 1,
.may_swap = may_swap,
};
-
- set_task_reclaim_state(current, &sc.reclaim_state);
/*
- * Unlike direct reclaim via alloc_pages(), memcg's reclaim doesn't
- * take care of from where we get pages. So the node where we start the
- * scan does not need to be the current node.
+ * Traverse the ZONELIST_FALLBACK zonelist of the current node to put
+ * equal pressure on all the nodes. This is based on the assumption that
+ * the reclaim does not bail out early.
*/
- nid = mem_cgroup_select_victim_node(memcg);
+ struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask);
- zonelist = &NODE_DATA(nid)->node_zonelists[ZONELIST_FALLBACK];
+ set_task_reclaim_state(current, &sc.reclaim_state);
trace_mm_vmscan_memcg_reclaim_begin(0, sc.gfp_mask);
@@ -3396,18 +3384,20 @@ static void age_active_anon(struct pglist_data *pgdat,
struct scan_control *sc)
{
struct mem_cgroup *memcg;
+ struct lruvec *lruvec;
if (!total_swap_pages)
return;
+ lruvec = mem_cgroup_lruvec(NULL, pgdat);
+ if (!inactive_is_low(lruvec, LRU_INACTIVE_ANON))
+ return;
+
memcg = mem_cgroup_iter(NULL, NULL, NULL);
do {
- struct lruvec *lruvec = mem_cgroup_lruvec(pgdat, memcg);
-
- if (inactive_list_is_low(lruvec, false, sc, true))
- shrink_active_list(SWAP_CLUSTER_MAX, lruvec,
- sc, LRU_ACTIVE_ANON);
-
+ lruvec = mem_cgroup_lruvec(memcg, pgdat);
+ shrink_active_list(SWAP_CLUSTER_MAX, lruvec,
+ sc, LRU_ACTIVE_ANON);
memcg = mem_cgroup_iter(NULL, memcg, NULL);
} while (memcg);
}
@@ -3475,7 +3465,9 @@ static bool pgdat_balanced(pg_data_t *pgdat, int order, int classzone_idx)
/* Clear pgdat state for congested, dirty or under writeback. */
static void clear_pgdat_congested(pg_data_t *pgdat)
{
- clear_bit(PGDAT_CONGESTED, &pgdat->flags);
+ struct lruvec *lruvec = mem_cgroup_lruvec(NULL, pgdat);
+
+ clear_bit(LRUVEC_CONGESTED, &lruvec->flags);
clear_bit(PGDAT_DIRTY, &pgdat->flags);
clear_bit(PGDAT_WRITEBACK, &pgdat->flags);
}
diff --git a/mm/workingset.c b/mm/workingset.c
index c963831d354f..474186b76ced 100644
--- a/mm/workingset.c
+++ b/mm/workingset.c
@@ -213,28 +213,53 @@ static void unpack_shadow(void *shadow, int *memcgidp, pg_data_t **pgdat,
*workingsetp = workingset;
}
+static void advance_inactive_age(struct mem_cgroup *memcg, pg_data_t *pgdat)
+{
+ /*
+ * Reclaiming a cgroup means reclaiming all its children in a
+ * round-robin fashion. That means that each cgroup has an LRU
+ * order that is composed of the LRU orders of its child
+ * cgroups; and every page has an LRU position not just in the
+ * cgroup that owns it, but in all of that group's ancestors.
+ *
+ * So when the physical inactive list of a leaf cgroup ages,
+ * the virtual inactive lists of all its parents, including
+ * the root cgroup's, age as well.
+ */
+ do {
+ struct lruvec *lruvec;
+
+ lruvec = mem_cgroup_lruvec(memcg, pgdat);
+ atomic_long_inc(&lruvec->inactive_age);
+ } while (memcg && (memcg = parent_mem_cgroup(memcg)));
+}
+
/**
* workingset_eviction - note the eviction of a page from memory
+ * @target_memcg: the cgroup that is causing the reclaim
* @page: the page being evicted
*
* Returns a shadow entry to be stored in @page->mapping->i_pages in place
* of the evicted @page so that a later refault can be detected.
*/
-void *workingset_eviction(struct page *page)
+void *workingset_eviction(struct page *page, struct mem_cgroup *target_memcg)
{
struct pglist_data *pgdat = page_pgdat(page);
- struct mem_cgroup *memcg = page_memcg(page);
- int memcgid = mem_cgroup_id(memcg);
unsigned long eviction;
struct lruvec *lruvec;
+ int memcgid;
/* Page is fully exclusive and pins page->mem_cgroup */
VM_BUG_ON_PAGE(PageLRU(page), page);
VM_BUG_ON_PAGE(page_count(page), page);
VM_BUG_ON_PAGE(!PageLocked(page), page);
- lruvec = mem_cgroup_lruvec(pgdat, memcg);
- eviction = atomic_long_inc_return(&lruvec->inactive_age);
+ advance_inactive_age(page_memcg(page), pgdat);
+
+ lruvec = mem_cgroup_lruvec(target_memcg, pgdat);
+ /* XXX: target_memcg can be NULL, go through lruvec */
+ memcgid = mem_cgroup_id(lruvec_memcg(lruvec));
+ eviction = atomic_long_read(&lruvec->inactive_age);
return pack_shadow(memcgid, pgdat, eviction, PageWorkingset(page));
}
@@ -244,10 +269,13 @@ void *workingset_eviction(struct page *page)
* @shadow: shadow entry of the evicted page
*
* Calculates and evaluates the refault distance of the previously
- * evicted page in the context of the node it was allocated in.
+ * evicted page in the context of the node and the memcg whose memory
+ * pressure caused the eviction.
*/
void workingset_refault(struct page *page, void *shadow)
{
+ struct mem_cgroup *eviction_memcg;
+ struct lruvec *eviction_lruvec;
unsigned long refault_distance;
struct pglist_data *pgdat;
unsigned long active_file;
@@ -277,12 +305,12 @@ void workingset_refault(struct page *page, void *shadow)
* would be better if the root_mem_cgroup existed in all
* configurations instead.
*/
- memcg = mem_cgroup_from_id(memcgid);
- if (!mem_cgroup_disabled() && !memcg)
+ eviction_memcg = mem_cgroup_from_id(memcgid);
+ if (!mem_cgroup_disabled() && !eviction_memcg)
goto out;
- lruvec = mem_cgroup_lruvec(pgdat, memcg);
- refault = atomic_long_read(&lruvec->inactive_age);
- active_file = lruvec_lru_size(lruvec, LRU_ACTIVE_FILE, MAX_NR_ZONES);
+ eviction_lruvec = mem_cgroup_lruvec(eviction_memcg, pgdat);
+ refault = atomic_long_read(&eviction_lruvec->inactive_age);
+ active_file = lruvec_page_state(eviction_lruvec, NR_ACTIVE_FILE);
/*
* Calculate the refault distance
@@ -302,6 +330,17 @@ void workingset_refault(struct page *page, void *shadow)
*/
refault_distance = (refault - eviction) & EVICTION_MASK;
+ /*
+ * The activation decision for this page is made at the level
+ * where the eviction occurred, as that is where the LRU order
+ * during page reclaim is being determined.
+ *
+ * However, the cgroup that will own the page is the one that
+ * is actually experiencing the refault event.
+ */
+ memcg = page_memcg(page);
+ lruvec = mem_cgroup_lruvec(memcg, pgdat);
+
inc_lruvec_state(lruvec, WORKINGSET_REFAULT);
/*
@@ -313,7 +352,7 @@ void workingset_refault(struct page *page, void *shadow)
goto out;
SetPageActive(page);
- atomic_long_inc(&lruvec->inactive_age);
+ advance_inactive_age(memcg, pgdat);
inc_lruvec_state(lruvec, WORKINGSET_ACTIVATE);
/* Page was active prior to eviction */
@@ -332,7 +371,6 @@ out:
void workingset_activation(struct page *page)
{
struct mem_cgroup *memcg;
- struct lruvec *lruvec;
rcu_read_lock();
/*
@@ -345,8 +383,7 @@ void workingset_activation(struct page *page)
memcg = page_memcg_rcu(page);
if (!mem_cgroup_disabled() && !memcg)
goto out;
- lruvec = mem_cgroup_lruvec(page_pgdat(page), memcg);
- atomic_long_inc(&lruvec->inactive_age);
+ advance_inactive_age(memcg, page_pgdat(page));
out:
rcu_read_unlock();
}
@@ -426,7 +463,7 @@ static unsigned long count_shadow_nodes(struct shrinker *shrinker,
struct lruvec *lruvec;
int i;
- lruvec = mem_cgroup_lruvec(NODE_DATA(sc->nid), sc->memcg);
+ lruvec = mem_cgroup_lruvec(sc->memcg, NODE_DATA(sc->nid));
for (pages = 0, i = 0; i < NR_LRU_LISTS; i++)
pages += lruvec_page_state_local(lruvec,
NR_LRU_BASE + i);
diff --git a/mm/z3fold.c b/mm/z3fold.c
index 6d3d3f698ebb..43754d8ebce8 100644
--- a/mm/z3fold.c
+++ b/mm/z3fold.c
@@ -41,6 +41,7 @@
#include <linux/workqueue.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/rwlock.h>
#include <linux/zpool.h>
#include <linux/magic.h>
@@ -90,6 +91,7 @@ struct z3fold_buddy_slots {
*/
unsigned long slot[BUDDY_MASK + 1];
unsigned long pool; /* back link + flags */
+ rwlock_t lock;
};
#define HANDLE_FLAG_MASK (0x03)
@@ -124,6 +126,7 @@ struct z3fold_header {
unsigned short start_middle;
unsigned short first_num:2;
unsigned short mapped_count:2;
+ unsigned short foreign_handles:2;
};
/**
@@ -178,6 +181,19 @@ enum z3fold_page_flags {
PAGE_CLAIMED, /* by either reclaim or free */
};
+/*
+ * handle flags, go under HANDLE_FLAG_MASK
+ */
+enum z3fold_handle_flags {
+ HANDLES_ORPHANED = 0,
+};
+
+/*
+ * Forward declarations
+ */
+static struct z3fold_header *__z3fold_alloc(struct z3fold_pool *, size_t, bool);
+static void compact_page_work(struct work_struct *w);
+
/*****************
* Helpers
*****************/
@@ -191,8 +207,6 @@ static int size_to_chunks(size_t size)
#define for_each_unbuddied_list(_iter, _begin) \
for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++)
-static void compact_page_work(struct work_struct *w);
-
static inline struct z3fold_buddy_slots *alloc_slots(struct z3fold_pool *pool,
gfp_t gfp)
{
@@ -204,6 +218,7 @@ static inline struct z3fold_buddy_slots *alloc_slots(struct z3fold_pool *pool,
if (slots) {
memset(slots->slot, 0, sizeof(slots->slot));
slots->pool = (unsigned long)pool;
+ rwlock_init(&slots->lock);
}
return slots;
@@ -219,25 +234,110 @@ static inline struct z3fold_buddy_slots *handle_to_slots(unsigned long handle)
return (struct z3fold_buddy_slots *)(handle & ~(SLOTS_ALIGN - 1));
}
+/* Lock a z3fold page */
+static inline void z3fold_page_lock(struct z3fold_header *zhdr)
+{
+ spin_lock(&zhdr->page_lock);
+}
+
+/* Try to lock a z3fold page */
+static inline int z3fold_page_trylock(struct z3fold_header *zhdr)
+{
+ return spin_trylock(&zhdr->page_lock);
+}
+
+/* Unlock a z3fold page */
+static inline void z3fold_page_unlock(struct z3fold_header *zhdr)
+{
+ spin_unlock(&zhdr->page_lock);
+}
+
+
+static inline struct z3fold_header *__get_z3fold_header(unsigned long handle,
+ bool lock)
+{
+ struct z3fold_buddy_slots *slots;
+ struct z3fold_header *zhdr;
+ int locked = 0;
+
+ if (!(handle & (1 << PAGE_HEADLESS))) {
+ slots = handle_to_slots(handle);
+ do {
+ unsigned long addr;
+
+ read_lock(&slots->lock);
+ addr = *(unsigned long *)handle;
+ zhdr = (struct z3fold_header *)(addr & PAGE_MASK);
+ if (lock)
+ locked = z3fold_page_trylock(zhdr);
+ read_unlock(&slots->lock);
+ if (locked)
+ break;
+ cpu_relax();
+ } while (lock);
+ } else {
+ zhdr = (struct z3fold_header *)(handle & PAGE_MASK);
+ }
+
+ return zhdr;
+}
+
+/* Returns the z3fold page where a given handle is stored */
+static inline struct z3fold_header *handle_to_z3fold_header(unsigned long h)
+{
+ return __get_z3fold_header(h, false);
+}
+
+/* return locked z3fold page if it's not headless */
+static inline struct z3fold_header *get_z3fold_header(unsigned long h)
+{
+ return __get_z3fold_header(h, true);
+}
+
+static inline void put_z3fold_header(struct z3fold_header *zhdr)
+{
+ struct page *page = virt_to_page(zhdr);
+
+ if (!test_bit(PAGE_HEADLESS, &page->private))
+ z3fold_page_unlock(zhdr);
+}
+
static inline void free_handle(unsigned long handle)
{
struct z3fold_buddy_slots *slots;
+ struct z3fold_header *zhdr;
int i;
bool is_free;
if (handle & (1 << PAGE_HEADLESS))
return;
- WARN_ON(*(unsigned long *)handle == 0);
- *(unsigned long *)handle = 0;
+ if (WARN_ON(*(unsigned long *)handle == 0))
+ return;
+
+ zhdr = handle_to_z3fold_header(handle);
slots = handle_to_slots(handle);
+ write_lock(&slots->lock);
+ *(unsigned long *)handle = 0;
+ write_unlock(&slots->lock);
+ if (zhdr->slots == slots)
+ return; /* simple case, nothing else to do */
+
+ /* we are freeing a foreign handle if we are here */
+ zhdr->foreign_handles--;
is_free = true;
+ read_lock(&slots->lock);
+ if (!test_bit(HANDLES_ORPHANED, &slots->pool)) {
+ read_unlock(&slots->lock);
+ return;
+ }
for (i = 0; i <= BUDDY_MASK; i++) {
if (slots->slot[i]) {
is_free = false;
break;
}
}
+ read_unlock(&slots->lock);
if (is_free) {
struct z3fold_pool *pool = slots_to_pool(slots);
@@ -322,6 +422,7 @@ static struct z3fold_header *init_z3fold_page(struct page *page, bool headless,
zhdr->first_num = 0;
zhdr->start_middle = 0;
zhdr->cpu = -1;
+ zhdr->foreign_handles = 0;
zhdr->slots = slots;
zhdr->pool = pool;
INIT_LIST_HEAD(&zhdr->buddy);
@@ -341,24 +442,6 @@ static void free_z3fold_page(struct page *page, bool headless)
__free_page(page);
}
-/* Lock a z3fold page */
-static inline void z3fold_page_lock(struct z3fold_header *zhdr)
-{
- spin_lock(&zhdr->page_lock);
-}
-
-/* Try to lock a z3fold page */
-static inline int z3fold_page_trylock(struct z3fold_header *zhdr)
-{
- return spin_trylock(&zhdr->page_lock);
-}
-
-/* Unlock a z3fold page */
-static inline void z3fold_page_unlock(struct z3fold_header *zhdr)
-{
- spin_unlock(&zhdr->page_lock);
-}
-
/* Helper function to build the index */
static inline int __idx(struct z3fold_header *zhdr, enum buddy bud)
{
@@ -389,7 +472,9 @@ static unsigned long __encode_handle(struct z3fold_header *zhdr,
if (bud == LAST)
h |= (zhdr->last_chunks << BUDDY_SHIFT);
+ write_lock(&slots->lock);
slots->slot[idx] = h;
+ write_unlock(&slots->lock);
return (unsigned long)&slots->slot[idx];
}
@@ -398,22 +483,15 @@ static unsigned long encode_handle(struct z3fold_header *zhdr, enum buddy bud)
return __encode_handle(zhdr, zhdr->slots, bud);
}
-/* Returns the z3fold page where a given handle is stored */
-static inline struct z3fold_header *handle_to_z3fold_header(unsigned long h)
-{
- unsigned long addr = h;
-
- if (!(addr & (1 << PAGE_HEADLESS)))
- addr = *(unsigned long *)h;
-
- return (struct z3fold_header *)(addr & PAGE_MASK);
-}
-
/* only for LAST bud, returns zero otherwise */
static unsigned short handle_to_chunks(unsigned long handle)
{
- unsigned long addr = *(unsigned long *)handle;
+ struct z3fold_buddy_slots *slots = handle_to_slots(handle);
+ unsigned long addr;
+ read_lock(&slots->lock);
+ addr = *(unsigned long *)handle;
+ read_unlock(&slots->lock);
return (addr & ~PAGE_MASK) >> BUDDY_SHIFT;
}
@@ -425,10 +503,13 @@ static unsigned short handle_to_chunks(unsigned long handle)
static enum buddy handle_to_buddy(unsigned long handle)
{
struct z3fold_header *zhdr;
+ struct z3fold_buddy_slots *slots = handle_to_slots(handle);
unsigned long addr;
+ read_lock(&slots->lock);
WARN_ON(handle & (1 << PAGE_HEADLESS));
addr = *(unsigned long *)handle;
+ read_unlock(&slots->lock);
zhdr = (struct z3fold_header *)(addr & PAGE_MASK);
return (addr - zhdr->first_num) & BUDDY_MASK;
}
@@ -442,6 +523,8 @@ static void __release_z3fold_page(struct z3fold_header *zhdr, bool locked)
{
struct page *page = virt_to_page(zhdr);
struct z3fold_pool *pool = zhdr_to_pool(zhdr);
+ bool is_free = true;
+ int i;
WARN_ON(!list_empty(&zhdr->buddy));
set_bit(PAGE_STALE, &page->private);
@@ -450,8 +533,25 @@ static void __release_z3fold_page(struct z3fold_header *zhdr, bool locked)
if (!list_empty(&page->lru))
list_del_init(&page->lru);
spin_unlock(&pool->lock);
+
+ /* If there are no foreign handles, free the handles array */
+ read_lock(&zhdr->slots->lock);
+ for (i = 0; i <= BUDDY_MASK; i++) {
+ if (zhdr->slots->slot[i]) {
+ is_free = false;
+ break;
+ }
+ }
+ if (!is_free)
+ set_bit(HANDLES_ORPHANED, &zhdr->slots->pool);
+ read_unlock(&zhdr->slots->lock);
+
+ if (is_free)
+ kmem_cache_free(pool->c_handle, zhdr->slots);
+
if (locked)
z3fold_page_unlock(zhdr);
+
spin_lock(&pool->stale_lock);
list_add(&zhdr->buddy, &pool->stale);
queue_work(pool->release_wq, &pool->work);
@@ -479,6 +579,7 @@ static void release_z3fold_page_locked_list(struct kref *ref)
struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
refcount);
struct z3fold_pool *pool = zhdr_to_pool(zhdr);
+
spin_lock(&pool->lock);
list_del_init(&zhdr->buddy);
spin_unlock(&pool->lock);
@@ -559,6 +660,119 @@ static inline void *mchunk_memmove(struct z3fold_header *zhdr,
zhdr->middle_chunks << CHUNK_SHIFT);
}
+static inline bool buddy_single(struct z3fold_header *zhdr)
+{
+ return !((zhdr->first_chunks && zhdr->middle_chunks) ||
+ (zhdr->first_chunks && zhdr->last_chunks) ||
+ (zhdr->middle_chunks && zhdr->last_chunks));
+}
+
+static struct z3fold_header *compact_single_buddy(struct z3fold_header *zhdr)
+{
+ struct z3fold_pool *pool = zhdr_to_pool(zhdr);
+ void *p = zhdr;
+ unsigned long old_handle = 0;
+ size_t sz = 0;
+ struct z3fold_header *new_zhdr = NULL;
+ int first_idx = __idx(zhdr, FIRST);
+ int middle_idx = __idx(zhdr, MIDDLE);
+ int last_idx = __idx(zhdr, LAST);
+ unsigned short *moved_chunks = NULL;
+
+ /*
+ * No need to protect slots here -- all the slots are "local" and
+ * the page lock is already taken
+ */
+ if (zhdr->first_chunks && zhdr->slots->slot[first_idx]) {
+ p += ZHDR_SIZE_ALIGNED;
+ sz = zhdr->first_chunks << CHUNK_SHIFT;
+ old_handle = (unsigned long)&zhdr->slots->slot[first_idx];
+ moved_chunks = &zhdr->first_chunks;
+ } else if (zhdr->middle_chunks && zhdr->slots->slot[middle_idx]) {
+ p += zhdr->start_middle << CHUNK_SHIFT;
+ sz = zhdr->middle_chunks << CHUNK_SHIFT;
+ old_handle = (unsigned long)&zhdr->slots->slot[middle_idx];
+ moved_chunks = &zhdr->middle_chunks;
+ } else if (zhdr->last_chunks && zhdr->slots->slot[last_idx]) {
+ p += PAGE_SIZE - (zhdr->last_chunks << CHUNK_SHIFT);
+ sz = zhdr->last_chunks << CHUNK_SHIFT;
+ old_handle = (unsigned long)&zhdr->slots->slot[last_idx];
+ moved_chunks = &zhdr->last_chunks;
+ }
+
+ if (sz > 0) {
+ enum buddy new_bud = HEADLESS;
+ short chunks = size_to_chunks(sz);
+ void *q;
+
+ new_zhdr = __z3fold_alloc(pool, sz, false);
+ if (!new_zhdr)
+ return NULL;
+
+ if (WARN_ON(new_zhdr == zhdr))
+ goto out_fail;
+
+ if (new_zhdr->first_chunks == 0) {
+ if (new_zhdr->middle_chunks != 0 &&
+ chunks >= new_zhdr->start_middle) {
+ new_bud = LAST;
+ } else {
+ new_bud = FIRST;
+ }
+ } else if (new_zhdr->last_chunks == 0) {
+ new_bud = LAST;
+ } else if (new_zhdr->middle_chunks == 0) {
+ new_bud = MIDDLE;
+ }
+ q = new_zhdr;
+ switch (new_bud) {
+ case FIRST:
+ new_zhdr->first_chunks = chunks;
+ q += ZHDR_SIZE_ALIGNED;
+ break;
+ case MIDDLE:
+ new_zhdr->middle_chunks = chunks;
+ new_zhdr->start_middle =
+ new_zhdr->first_chunks + ZHDR_CHUNKS;
+ q += new_zhdr->start_middle << CHUNK_SHIFT;
+ break;
+ case LAST:
+ new_zhdr->last_chunks = chunks;
+ q += PAGE_SIZE - (new_zhdr->last_chunks << CHUNK_SHIFT);
+ break;
+ default:
+ goto out_fail;
+ }
+ new_zhdr->foreign_handles++;
+ memcpy(q, p, sz);
+ write_lock(&zhdr->slots->lock);
+ *(unsigned long *)old_handle = (unsigned long)new_zhdr +
+ __idx(new_zhdr, new_bud);
+ if (new_bud == LAST)
+ *(unsigned long *)old_handle |=
+ (new_zhdr->last_chunks << BUDDY_SHIFT);
+ write_unlock(&zhdr->slots->lock);
+ add_to_unbuddied(pool, new_zhdr);
+ z3fold_page_unlock(new_zhdr);
+
+ *moved_chunks = 0;
+ }
+
+ return new_zhdr;
+
+out_fail:
+ if (new_zhdr) {
+ if (kref_put(&new_zhdr->refcount, release_z3fold_page_locked))
+ atomic64_dec(&pool->pages_nr);
+ else {
+ add_to_unbuddied(pool, new_zhdr);
+ z3fold_page_unlock(new_zhdr);
+ }
+ }
+ return NULL;
+
+}
+
#define BIG_CHUNK_GAP 3
/* Has to be called with lock held */
static int z3fold_compact_page(struct z3fold_header *zhdr)
@@ -638,6 +852,15 @@ static void do_compact_page(struct z3fold_header *zhdr, bool locked)
return;
}
+ if (!zhdr->foreign_handles && buddy_single(zhdr) &&
+ zhdr->mapped_count == 0 && compact_single_buddy(zhdr)) {
+ if (kref_put(&zhdr->refcount, release_z3fold_page_locked))
+ atomic64_dec(&pool->pages_nr);
+ else
+ z3fold_page_unlock(zhdr);
+ return;
+ }
+
z3fold_compact_page(zhdr);
add_to_unbuddied(pool, zhdr);
z3fold_page_unlock(zhdr);
@@ -690,7 +913,8 @@ lookup:
spin_unlock(&pool->lock);
page = virt_to_page(zhdr);
- if (test_bit(NEEDS_COMPACTING, &page->private)) {
+ if (test_bit(NEEDS_COMPACTING, &page->private) ||
+ test_bit(PAGE_CLAIMED, &page->private)) {
z3fold_page_unlock(zhdr);
zhdr = NULL;
put_cpu_ptr(pool->unbuddied);
@@ -734,7 +958,8 @@ lookup:
spin_unlock(&pool->lock);
page = virt_to_page(zhdr);
- if (test_bit(NEEDS_COMPACTING, &page->private)) {
+ if (test_bit(NEEDS_COMPACTING, &page->private) ||
+ test_bit(PAGE_CLAIMED, &page->private)) {
z3fold_page_unlock(zhdr);
zhdr = NULL;
if (can_sleep)
@@ -1000,7 +1225,7 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
enum buddy bud;
bool page_claimed;
- zhdr = handle_to_z3fold_header(handle);
+ zhdr = get_z3fold_header(handle);
page = virt_to_page(zhdr);
page_claimed = test_and_set_bit(PAGE_CLAIMED, &page->private);
@@ -1014,6 +1239,7 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
spin_lock(&pool->lock);
list_del(&page->lru);
spin_unlock(&pool->lock);
+ put_z3fold_header(zhdr);
free_z3fold_page(page, true);
atomic64_dec(&pool->pages_nr);
}
@@ -1021,7 +1247,6 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
}
/* Non-headless case */
- z3fold_page_lock(zhdr);
bud = handle_to_buddy(handle);
switch (bud) {
@@ -1037,11 +1262,13 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
default:
pr_err("%s: unknown bud %d\n", __func__, bud);
WARN_ON(1);
- z3fold_page_unlock(zhdr);
+ put_z3fold_header(zhdr);
+ clear_bit(PAGE_CLAIMED, &page->private);
return;
}
- free_handle(handle);
+ if (!page_claimed)
+ free_handle(handle);
if (kref_put(&zhdr->refcount, release_z3fold_page_locked_list)) {
atomic64_dec(&pool->pages_nr);
return;
@@ -1053,7 +1280,7 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
}
if (unlikely(PageIsolated(page)) ||
test_and_set_bit(NEEDS_COMPACTING, &page->private)) {
- z3fold_page_unlock(zhdr);
+ put_z3fold_header(zhdr);
clear_bit(PAGE_CLAIMED, &page->private);
return;
}
@@ -1063,14 +1290,14 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
spin_unlock(&pool->lock);
zhdr->cpu = -1;
kref_get(&zhdr->refcount);
- do_compact_page(zhdr, true);
clear_bit(PAGE_CLAIMED, &page->private);
+ do_compact_page(zhdr, true);
return;
}
kref_get(&zhdr->refcount);
- queue_work_on(zhdr->cpu, pool->compact_wq, &zhdr->work);
clear_bit(PAGE_CLAIMED, &page->private);
- z3fold_page_unlock(zhdr);
+ queue_work_on(zhdr->cpu, pool->compact_wq, &zhdr->work);
+ put_z3fold_header(zhdr);
}
/**
@@ -1111,11 +1338,10 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
*/
static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
{
- int i, ret = 0;
+ int i, ret = -1;
struct z3fold_header *zhdr = NULL;
struct page *page = NULL;
struct list_head *pos;
- struct z3fold_buddy_slots slots;
unsigned long first_handle = 0, middle_handle = 0, last_handle = 0;
spin_lock(&pool->lock);
@@ -1153,6 +1379,12 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
zhdr = NULL;
continue; /* can't evict at this point */
}
+ if (zhdr->foreign_handles) {
+ clear_bit(PAGE_CLAIMED, &page->private);
+ z3fold_page_unlock(zhdr);
+ zhdr = NULL;
+ continue; /* can't evict such page */
+ }
kref_get(&zhdr->refcount);
list_del_init(&zhdr->buddy);
zhdr->cpu = -1;
@@ -1176,39 +1408,38 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
last_handle = 0;
middle_handle = 0;
if (zhdr->first_chunks)
- first_handle = __encode_handle(zhdr, &slots,
- FIRST);
+ first_handle = encode_handle(zhdr, FIRST);
if (zhdr->middle_chunks)
- middle_handle = __encode_handle(zhdr, &slots,
- MIDDLE);
+ middle_handle = encode_handle(zhdr, MIDDLE);
if (zhdr->last_chunks)
- last_handle = __encode_handle(zhdr, &slots,
- LAST);
+ last_handle = encode_handle(zhdr, LAST);
/*
* it's safe to unlock here because we hold a
* reference to this page
*/
z3fold_page_unlock(zhdr);
} else {
- first_handle = __encode_handle(zhdr, &slots, HEADLESS);
+ first_handle = encode_handle(zhdr, HEADLESS);
last_handle = middle_handle = 0;
}
-
/* Issue the eviction callback(s) */
if (middle_handle) {
ret = pool->ops->evict(pool, middle_handle);
if (ret)
goto next;
+ free_handle(middle_handle);
}
if (first_handle) {
ret = pool->ops->evict(pool, first_handle);
if (ret)
goto next;
+ free_handle(first_handle);
}
if (last_handle) {
ret = pool->ops->evict(pool, last_handle);
if (ret)
goto next;
+ free_handle(last_handle);
}
next:
if (test_bit(PAGE_HEADLESS, &page->private)) {
@@ -1264,14 +1495,13 @@ static void *z3fold_map(struct z3fold_pool *pool, unsigned long handle)
void *addr;
enum buddy buddy;
- zhdr = handle_to_z3fold_header(handle);
+ zhdr = get_z3fold_header(handle);
addr = zhdr;
page = virt_to_page(zhdr);
if (test_bit(PAGE_HEADLESS, &page->private))
goto out;
- z3fold_page_lock(zhdr);
buddy = handle_to_buddy(handle);
switch (buddy) {
case FIRST:
@@ -1293,8 +1523,8 @@ static void *z3fold_map(struct z3fold_pool *pool, unsigned long handle)
if (addr)
zhdr->mapped_count++;
- z3fold_page_unlock(zhdr);
out:
+ put_z3fold_header(zhdr);
return addr;
}
@@ -1309,18 +1539,17 @@ static void z3fold_unmap(struct z3fold_pool *pool, unsigned long handle)
struct page *page;
enum buddy buddy;
- zhdr = handle_to_z3fold_header(handle);
+ zhdr = get_z3fold_header(handle);
page = virt_to_page(zhdr);
if (test_bit(PAGE_HEADLESS, &page->private))
return;
- z3fold_page_lock(zhdr);
buddy = handle_to_buddy(handle);
if (buddy == MIDDLE)
clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
zhdr->mapped_count--;
- z3fold_page_unlock(zhdr);
+ put_z3fold_header(zhdr);
}
/**
@@ -1352,19 +1581,21 @@ static bool z3fold_page_isolate(struct page *page, isolate_mode_t mode)
test_bit(PAGE_STALE, &page->private))
goto out;
+ if (zhdr->mapped_count != 0 || zhdr->foreign_handles != 0)
+ goto out;
+
pool = zhdr_to_pool(zhdr);
+ spin_lock(&pool->lock);
+ if (!list_empty(&zhdr->buddy))
+ list_del_init(&zhdr->buddy);
+ if (!list_empty(&page->lru))
+ list_del_init(&page->lru);
+ spin_unlock(&pool->lock);
+
+ kref_get(&zhdr->refcount);
+ z3fold_page_unlock(zhdr);
+ return true;
- if (zhdr->mapped_count == 0) {
- kref_get(&zhdr->refcount);
- if (!list_empty(&zhdr->buddy))
- list_del_init(&zhdr->buddy);
- spin_lock(&pool->lock);
- if (!list_empty(&page->lru))
- list_del(&page->lru);
- spin_unlock(&pool->lock);
- z3fold_page_unlock(zhdr);
- return true;
- }
out:
z3fold_page_unlock(zhdr);
return false;
@@ -1387,7 +1618,7 @@ static int z3fold_page_migrate(struct address_space *mapping, struct page *newpa
if (!z3fold_page_trylock(zhdr)) {
return -EAGAIN;
}
- if (zhdr->mapped_count != 0) {
+ if (zhdr->mapped_count != 0 || zhdr->foreign_handles != 0) {
z3fold_page_unlock(zhdr);
return -EBUSY;
}
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index d32077b28433..5d0ed28c0d3a 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -23,7 +23,7 @@
*/
/* Bluetooth HCI sockets. */
-
+#include <linux/compat.h>
#include <linux/export.h>
#include <linux/utsname.h>
#include <linux/sched.h>
@@ -1054,6 +1054,22 @@ done:
return err;
}
+#ifdef CONFIG_COMPAT
+static int hci_sock_compat_ioctl(struct socket *sock, unsigned int cmd,
+ unsigned long arg)
+{
+ switch (cmd) {
+ case HCIDEVUP:
+ case HCIDEVDOWN:
+ case HCIDEVRESET:
+ case HCIDEVRESTAT:
+ return hci_sock_ioctl(sock, cmd, arg);
+ }
+
+ return hci_sock_ioctl(sock, cmd, (unsigned long)compat_ptr(arg));
+}
+#endif
+
static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
int addr_len)
{
@@ -1974,6 +1990,9 @@ static const struct proto_ops hci_sock_ops = {
.sendmsg = hci_sock_sendmsg,
.recvmsg = hci_sock_recvmsg,
.ioctl = hci_sock_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = hci_sock_compat_ioctl,
+#endif
.poll = datagram_poll,
.listen = sock_no_listen,
.shutdown = sock_no_shutdown,
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index 90bb53aa4bee..b4eaf21360ef 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -24,7 +24,7 @@
/*
* RFCOMM sockets.
*/
-
+#include <linux/compat.h>
#include <linux/export.h>
#include <linux/debugfs.h>
#include <linux/sched/signal.h>
@@ -909,6 +909,13 @@ static int rfcomm_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned lon
return err;
}
+#ifdef CONFIG_COMPAT
+static int rfcomm_sock_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
+{
+ return rfcomm_sock_ioctl(sock, cmd, (unsigned long)compat_ptr(arg));
+}
+#endif
+
static int rfcomm_sock_shutdown(struct socket *sock, int how)
{
struct sock *sk = sock->sk;
@@ -1042,7 +1049,10 @@ static const struct proto_ops rfcomm_sock_ops = {
.gettstamp = sock_gettstamp,
.poll = bt_sock_poll,
.socketpair = sock_no_socketpair,
- .mmap = sock_no_mmap
+ .mmap = sock_no_mmap,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = rfcomm_sock_compat_ioctl,
+#endif
};
static const struct net_proto_family rfcomm_sock_family_ops = {
diff --git a/net/compat.c b/net/compat.c
index 0f7ded26059e..47d99c784947 100644
--- a/net/compat.c
+++ b/net/compat.c
@@ -232,7 +232,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
(type == SO_TIMESTAMPNS_OLD || type == SO_TIMESTAMPING_OLD)) {
int count = type == SO_TIMESTAMPNS_OLD ? 1 : 3;
int i;
- struct timespec *ts = (struct timespec *)data;
+ struct __kernel_old_timespec *ts = data;
for (i = 0; i < count; i++) {
cts[i].tv_sec = ts[i].tv_sec;
cts[i].tv_nsec = ts[i].tv_nsec;
diff --git a/net/core/filter.c b/net/core/filter.c
index b0ed048585ba..f1e703eed3d2 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -2299,7 +2299,7 @@ BPF_CALL_4(bpf_msg_pull_data, struct sk_msg *, msg, u32, start,
WARN_ON_ONCE(last_sge == first_sge);
shift = last_sge > first_sge ?
last_sge - first_sge - 1 :
- MAX_SKB_FRAGS - first_sge + last_sge - 1;
+ NR_MSG_FRAG_IDS - first_sge + last_sge - 1;
if (!shift)
goto out;
@@ -2308,8 +2308,8 @@ BPF_CALL_4(bpf_msg_pull_data, struct sk_msg *, msg, u32, start,
do {
u32 move_from;
- if (i + shift >= MAX_MSG_FRAGS)
- move_from = i + shift - MAX_MSG_FRAGS;
+ if (i + shift >= NR_MSG_FRAG_IDS)
+ move_from = i + shift - NR_MSG_FRAG_IDS;
else
move_from = i + shift;
if (move_from == msg->sg.end)
@@ -2323,7 +2323,7 @@ BPF_CALL_4(bpf_msg_pull_data, struct sk_msg *, msg, u32, start,
} while (1);
msg->sg.end = msg->sg.end - shift > msg->sg.end ?
- msg->sg.end - shift + MAX_MSG_FRAGS :
+ msg->sg.end - shift + NR_MSG_FRAG_IDS :
msg->sg.end - shift;
out:
msg->data = sg_virt(&msg->sg.data[first_sge]) + start - offset;
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 9f7aa448bd11..0b5ccb7c9bc8 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -1218,6 +1218,8 @@ static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb,
struct ifla_vf_mac vf_mac;
struct ifla_vf_broadcast vf_broadcast;
struct ifla_vf_info ivi;
+ struct ifla_vf_guid node_guid;
+ struct ifla_vf_guid port_guid;
memset(&ivi, 0, sizeof(ivi));
@@ -1284,6 +1286,18 @@ static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb,
nla_put(skb, IFLA_VF_TRUST,
sizeof(vf_trust), &vf_trust))
goto nla_put_vf_failure;
+
+ memset(&node_guid, 0, sizeof(node_guid));
+ memset(&port_guid, 0, sizeof(port_guid));
+ if (dev->netdev_ops->ndo_get_vf_guid &&
+ !dev->netdev_ops->ndo_get_vf_guid(dev, vfs_num, &node_guid,
+ &port_guid)) {
+ if (nla_put(skb, IFLA_VF_IB_NODE_GUID, sizeof(node_guid),
+ &node_guid) ||
+ nla_put(skb, IFLA_VF_IB_PORT_GUID, sizeof(port_guid),
+ &port_guid))
+ goto nla_put_vf_failure;
+ }
vfvlanlist = nla_nest_start_noflag(skb, IFLA_VF_VLAN_LIST);
if (!vfvlanlist)
goto nla_put_vf_failure;
diff --git a/net/core/scm.c b/net/core/scm.c
index 31a38239c92f..dc6fed1f221c 100644
--- a/net/core/scm.c
+++ b/net/core/scm.c
@@ -268,8 +268,10 @@ void put_cmsg_scm_timestamping(struct msghdr *msg, struct scm_timestamping_inter
struct scm_timestamping tss;
int i;
- for (i = 0; i < ARRAY_SIZE(tss.ts); i++)
- tss.ts[i] = timespec64_to_timespec(tss_internal->ts[i]);
+ for (i = 0; i < ARRAY_SIZE(tss.ts); i++) {
+ tss.ts[i].tv_sec = tss_internal->ts[i].tv_sec;
+ tss.ts[i].tv_nsec = tss_internal->ts[i].tv_nsec;
+ }
put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMPING_OLD, sizeof(tss), &tss);
}
diff --git a/net/core/skmsg.c b/net/core/skmsg.c
index a469d2124f3f..ded2d5227678 100644
--- a/net/core/skmsg.c
+++ b/net/core/skmsg.c
@@ -421,7 +421,7 @@ static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb)
copied = skb->len;
msg->sg.start = 0;
msg->sg.size = copied;
- msg->sg.end = num_sge == MAX_MSG_FRAGS ? 0 : num_sge;
+ msg->sg.end = num_sge;
msg->skb = skb;
sk_psock_queue_msg(psock, msg);
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 53de8e00990e..2fe295432c24 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -495,7 +495,7 @@ int __inet_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len,
snum = ntohs(addr->sin_port);
err = -EACCES;
- if (snum && snum < inet_prot_sock(net) &&
+ if (snum && inet_port_requires_bind_service(net, snum) &&
!ns_capable(net->user_ns, CAP_NET_BIND_SERVICE))
goto out;
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 9b48aec29aca..8a39ee794891 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1864,29 +1864,33 @@ static void tcp_recv_timestamp(struct msghdr *msg, const struct sock *sk,
if (sock_flag(sk, SOCK_RCVTSTAMP)) {
if (sock_flag(sk, SOCK_RCVTSTAMPNS)) {
if (new_tstamp) {
- struct __kernel_timespec kts = {tss->ts[0].tv_sec, tss->ts[0].tv_nsec};
-
+ struct __kernel_timespec kts = {
+ .tv_sec = tss->ts[0].tv_sec,
+ .tv_nsec = tss->ts[0].tv_nsec,
+ };
put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMPNS_NEW,
sizeof(kts), &kts);
} else {
- struct timespec ts_old = timespec64_to_timespec(tss->ts[0]);
-
+ struct __kernel_old_timespec ts_old = {
+ .tv_sec = tss->ts[0].tv_sec,
+ .tv_nsec = tss->ts[0].tv_nsec,
+ };
put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMPNS_OLD,
sizeof(ts_old), &ts_old);
}
} else {
if (new_tstamp) {
- struct __kernel_sock_timeval stv;
-
- stv.tv_sec = tss->ts[0].tv_sec;
- stv.tv_usec = tss->ts[0].tv_nsec / 1000;
+ struct __kernel_sock_timeval stv = {
+ .tv_sec = tss->ts[0].tv_sec,
+ .tv_usec = tss->ts[0].tv_nsec / 1000,
+ };
put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMP_NEW,
sizeof(stv), &stv);
} else {
- struct __kernel_old_timeval tv;
-
- tv.tv_sec = tss->ts[0].tv_sec;
- tv.tv_usec = tss->ts[0].tv_nsec / 1000;
+ struct __kernel_old_timeval tv = {
+ .tv_sec = tss->ts[0].tv_sec,
+ .tv_usec = tss->ts[0].tv_nsec / 1000,
+ };
put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMP_OLD,
sizeof(tv), &tv);
}
diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c
index 8a56e09cfb0e..e38705165ac9 100644
--- a/net/ipv4/tcp_bpf.c
+++ b/net/ipv4/tcp_bpf.c
@@ -301,7 +301,7 @@ EXPORT_SYMBOL_GPL(tcp_bpf_sendmsg_redir);
static int tcp_bpf_send_verdict(struct sock *sk, struct sk_psock *psock,
struct sk_msg *msg, int *copied, int flags)
{
- bool cork = false, enospc = msg->sg.start == msg->sg.end;
+ bool cork = false, enospc = sk_msg_full(msg);
struct sock *sk_redir;
u32 tosend, delta = 0;
int ret;
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index ef37e0574f54..60e2ff91a5b3 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -292,7 +292,7 @@ static int __inet6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len,
return -EINVAL;
snum = ntohs(addr->sin6_port);
- if (snum && snum < inet_prot_sock(net) &&
+ if (snum && inet_port_requires_bind_service(net, snum) &&
!ns_capable(net->user_ns, CAP_NET_BIND_SERVICE))
return -EACCES;
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
index 0185e6e5e5d1..b3c9001d1f43 100644
--- a/net/mac80211/debugfs_sta.c
+++ b/net/mac80211/debugfs_sta.c
@@ -951,12 +951,7 @@ STA_OPS(he_capa);
sta->debugfs_dir, sta, &sta_ ##name## _ops);
#define DEBUGFS_ADD_COUNTER(name, field) \
- if (sizeof(sta->field) == sizeof(u32)) \
- debugfs_create_u32(#name, 0400, sta->debugfs_dir, \
- (u32 *) &sta->field); \
- else \
- debugfs_create_u64(#name, 0400, sta->debugfs_dir, \
- (u64 *) &sta->field);
+ debugfs_create_ulong(#name, 0400, sta->debugfs_dir, &sta->field);
void ieee80211_sta_debugfs_add(struct sta_info *sta)
{
@@ -1001,14 +996,8 @@ void ieee80211_sta_debugfs_add(struct sta_info *sta)
NL80211_EXT_FEATURE_AIRTIME_FAIRNESS))
DEBUGFS_ADD(airtime);
- if (sizeof(sta->driver_buffered_tids) == sizeof(u32))
- debugfs_create_x32("driver_buffered_tids", 0400,
- sta->debugfs_dir,
- (u32 *)&sta->driver_buffered_tids);
- else
- debugfs_create_x64("driver_buffered_tids", 0400,
- sta->debugfs_dir,
- (u64 *)&sta->driver_buffered_tids);
+ debugfs_create_xul("driver_buffered_tids", 0400, sta->debugfs_dir,
+ &sta->driver_buffered_tids);
drv_sta_add_debugfs(local, sdata, &sta->sta, sta->debugfs_dir);
}
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 3be7398901e0..8d14a1acbc37 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -423,7 +423,7 @@ ip_vs_service_find(struct netns_ipvs *ipvs, int af, __u32 fwmark, __u16 protocol
if (!svc && protocol == IPPROTO_TCP &&
atomic_read(&ipvs->ftpsvc_counter) &&
- (vport == FTPDATA || ntohs(vport) >= inet_prot_sock(ipvs->net))) {
+ (vport == FTPDATA || !inet_port_requires_bind_service(ipvs->net, ntohs(vport)))) {
/*
* Check if ftp service entry exists, the packet
* might belong to FTP data connections.
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index 93d4991ddc1f..1047e8043084 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -723,9 +723,13 @@ static size_t ovs_flow_cmd_msg_size(const struct sw_flow_actions *acts,
{
size_t len = NLMSG_ALIGN(sizeof(struct ovs_header));
- /* OVS_FLOW_ATTR_UFID */
+ /* OVS_FLOW_ATTR_UFID, or unmasked flow key as fallback
+ * see ovs_nla_put_identifier()
+ */
if (sfid && ovs_identifier_is_ufid(sfid))
len += nla_total_size(sfid->ufid_len);
+ else
+ len += nla_total_size(ovs_key_attr_size());
/* OVS_FLOW_ATTR_KEY */
if (!sfid || should_fill_key(sfid, ufid_flags))
@@ -901,7 +905,10 @@ static struct sk_buff *ovs_flow_cmd_build_info(const struct sw_flow *flow,
retval = ovs_flow_cmd_fill_info(flow, dp_ifindex, skb,
info->snd_portid, info->snd_seq, 0,
cmd, ufid_flags);
- BUG_ON(retval < 0);
+ if (WARN_ON_ONCE(retval < 0)) {
+ kfree_skb(skb);
+ skb = ERR_PTR(retval);
+ }
return skb;
}
@@ -1365,7 +1372,10 @@ static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
OVS_FLOW_CMD_DEL,
ufid_flags);
rcu_read_unlock();
- BUG_ON(err < 0);
+ if (WARN_ON_ONCE(err < 0)) {
+ kfree_skb(reply);
+ goto out_free;
+ }
ovs_notify(&dp_flow_genl_family, reply, info);
} else {
@@ -1373,6 +1383,7 @@ static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
}
}
+out_free:
ovs_flow_free(flow, true);
return 0;
unlock:
diff --git a/net/psample/psample.c b/net/psample/psample.c
index a6ceb0533b5b..6f2fbc6b9eb2 100644
--- a/net/psample/psample.c
+++ b/net/psample/psample.c
@@ -229,7 +229,7 @@ void psample_sample_packet(struct psample_group *group, struct sk_buff *skb,
data_len = PSAMPLE_MAX_PACKET_SIZE - meta_len - NLA_HDRLEN
- NLA_ALIGNTO;
- nl_skb = genlmsg_new(meta_len + data_len, GFP_ATOMIC);
+ nl_skb = genlmsg_new(meta_len + nla_total_size(data_len), GFP_ATOMIC);
if (unlikely(!nl_skb))
return;
diff --git a/net/rfkill/core.c b/net/rfkill/core.c
index f9b08a6d8dbe..461d75274fb3 100644
--- a/net/rfkill/core.c
+++ b/net/rfkill/core.c
@@ -1311,15 +1311,17 @@ static const struct file_operations rfkill_fops = {
.release = rfkill_fop_release,
#ifdef CONFIG_RFKILL_INPUT
.unlocked_ioctl = rfkill_fop_ioctl,
- .compat_ioctl = rfkill_fop_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
#endif
.llseek = no_llseek,
};
+#define RFKILL_NAME "rfkill"
+
static struct miscdevice rfkill_miscdev = {
- .name = "rfkill",
.fops = &rfkill_fops,
- .minor = MISC_DYNAMIC_MINOR,
+ .name = RFKILL_NAME,
+ .minor = RFKILL_MINOR,
};
static int __init rfkill_init(void)
@@ -1371,3 +1373,6 @@ static void __exit rfkill_exit(void)
class_unregister(&rfkill_class);
}
module_exit(rfkill_exit);
+
+MODULE_ALIAS_MISCDEV(RFKILL_MINOR);
+MODULE_ALIAS("devname:" RFKILL_NAME);
diff --git a/net/sched/sch_mq.c b/net/sched/sch_mq.c
index 0d578333e967..278c0b2dc523 100644
--- a/net/sched/sch_mq.c
+++ b/net/sched/sch_mq.c
@@ -245,7 +245,8 @@ static int mq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
sch = dev_queue->qdisc_sleeping;
- if (gnet_stats_copy_basic(&sch->running, d, NULL, &sch->bstats) < 0 ||
+ if (gnet_stats_copy_basic(&sch->running, d, sch->cpu_bstats,
+ &sch->bstats) < 0 ||
qdisc_qstats_copy(d, sch) < 0)
return -1;
return 0;
diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c
index 46980b8d66c5..0d0113a24962 100644
--- a/net/sched/sch_mqprio.c
+++ b/net/sched/sch_mqprio.c
@@ -557,8 +557,8 @@ static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
sch = dev_queue->qdisc_sleeping;
- if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
- d, NULL, &sch->bstats) < 0 ||
+ if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), d,
+ sch->cpu_bstats, &sch->bstats) < 0 ||
qdisc_qstats_copy(d, sch) < 0)
return -1;
}
diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c
index b2b7fdb06fc6..1330ad224931 100644
--- a/net/sched/sch_multiq.c
+++ b/net/sched/sch_multiq.c
@@ -339,7 +339,7 @@ static int multiq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
cl_q = q->queues[cl - 1];
if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
- d, NULL, &cl_q->bstats) < 0 ||
+ d, cl_q->cpu_bstats, &cl_q->bstats) < 0 ||
qdisc_qstats_copy(d, cl_q) < 0)
return -1;
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index 0f8fedb8809a..18b884cfdfe8 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -356,7 +356,7 @@ static int prio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
cl_q = q->queues[cl - 1];
if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
- d, NULL, &cl_q->bstats) < 0 ||
+ d, cl_q->cpu_bstats, &cl_q->bstats) < 0 ||
qdisc_qstats_copy(d, cl_q) < 0)
return -1;
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 83e4ca1fabda..0b485952a71c 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -384,7 +384,7 @@ static int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len)
}
}
- if (snum && snum < inet_prot_sock(net) &&
+ if (snum && inet_port_requires_bind_service(net, snum) &&
!ns_capable(net->user_ns, CAP_NET_BIND_SERVICE))
return -EACCES;
@@ -1061,7 +1061,7 @@ static int sctp_connect_new_asoc(struct sctp_endpoint *ep,
if (sctp_autobind(sk))
return -EAGAIN;
} else {
- if (ep->base.bind_addr.port < inet_prot_sock(net) &&
+ if (inet_port_requires_bind_service(net, ep->base.bind_addr.port) &&
!ns_capable(net->user_ns, CAP_NET_BIND_SERVICE))
return -EACCES;
}
@@ -8267,6 +8267,7 @@ static int sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
struct sctp_sock *sp = sctp_sk(sk);
bool reuse = (sk->sk_reuse || sp->reuse);
struct sctp_bind_hashbucket *head; /* hash list */
+ struct net *net = sock_net(sk);
kuid_t uid = sock_i_uid(sk);
struct sctp_bind_bucket *pp;
unsigned short snum;
@@ -8282,7 +8283,6 @@ static int sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
/* Search for an available port. */
int low, high, remaining, index;
unsigned int rover;
- struct net *net = sock_net(sk);
inet_get_local_port_range(net, &low, &high);
remaining = (high - low) + 1;
@@ -8294,12 +8294,12 @@ static int sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
rover = low;
if (inet_is_local_reserved_port(net, rover))
continue;
- index = sctp_phashfn(sock_net(sk), rover);
+ index = sctp_phashfn(net, rover);
head = &sctp_port_hashtable[index];
spin_lock(&head->lock);
sctp_for_each_hentry(pp, &head->chain)
if ((pp->port == rover) &&
- net_eq(sock_net(sk), pp->net))
+ net_eq(net, pp->net))
goto next;
break;
next:
@@ -8323,10 +8323,10 @@ static int sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
* to the port number (snum) - we detect that with the
* port iterator, pp being NULL.
*/
- head = &sctp_port_hashtable[sctp_phashfn(sock_net(sk), snum)];
+ head = &sctp_port_hashtable[sctp_phashfn(net, snum)];
spin_lock(&head->lock);
sctp_for_each_hentry(pp, &head->chain) {
- if ((pp->port == snum) && net_eq(pp->net, sock_net(sk)))
+ if ((pp->port == snum) && net_eq(pp->net, net))
goto pp_found;
}
}
@@ -8382,7 +8382,7 @@ pp_found:
pp_not_found:
/* If there was a hash table miss, create a new port. */
ret = 1;
- if (!pp && !(pp = sctp_bucket_create(head, sock_net(sk), snum)))
+ if (!pp && !(pp = sctp_bucket_create(head, net, snum)))
goto fail_unlock;
/* In either case (hit or miss), make sure fastreuse is 1 only
diff --git a/net/socket.c b/net/socket.c
index 17bc1eee198a..ea28cbb9e2e7 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -100,6 +100,7 @@
#include <linux/if_tun.h>
#include <linux/ipv6_route.h>
#include <linux/route.h>
+#include <linux/termios.h>
#include <linux/sockios.h>
#include <net/busy_poll.h>
#include <linux/errqueue.h>
@@ -794,7 +795,7 @@ void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMPNS_NEW,
sizeof(ts), &ts);
} else {
- struct timespec ts;
+ struct __kernel_old_timespec ts;
skb_get_timestampns(skb, &ts);
put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMPNS_OLD,
@@ -1825,32 +1826,46 @@ SYSCALL_DEFINE3(accept, int, fd, struct sockaddr __user *, upeer_sockaddr,
* include the -EINPROGRESS status for such sockets.
*/
-int __sys_connect(int fd, struct sockaddr __user *uservaddr, int addrlen)
+int __sys_connect_file(struct file *file, struct sockaddr __user *uservaddr,
+ int addrlen, int file_flags)
{
struct socket *sock;
struct sockaddr_storage address;
- int err, fput_needed;
+ int err;
- sock = sockfd_lookup_light(fd, &err, &fput_needed);
+ sock = sock_from_file(file, &err);
if (!sock)
goto out;
err = move_addr_to_kernel(uservaddr, addrlen, &address);
if (err < 0)
- goto out_put;
+ goto out;
err =
security_socket_connect(sock, (struct sockaddr *)&address, addrlen);
if (err)
- goto out_put;
+ goto out;
err = sock->ops->connect(sock, (struct sockaddr *)&address, addrlen,
- sock->file->f_flags);
-out_put:
- fput_light(sock->file, fput_needed);
+ sock->file->f_flags | file_flags);
out:
return err;
}
+int __sys_connect(int fd, struct sockaddr __user *uservaddr, int addrlen)
+{
+ int ret = -EBADF;
+ struct fd f;
+
+ f = fdget(fd);
+ if (f.file) {
+ ret = __sys_connect_file(f.file, uservaddr, addrlen, 0);
+ if (f.flags)
+ fput(f.file);
+ }
+
+ return ret;
+}
+
SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
int, addrlen)
{
@@ -2250,15 +2265,10 @@ static int copy_msghdr_from_user(struct msghdr *kmsg,
return err < 0 ? err : 0;
}
-static int ___sys_sendmsg(struct socket *sock, struct user_msghdr __user *msg,
- struct msghdr *msg_sys, unsigned int flags,
- struct used_address *used_address,
- unsigned int allowed_msghdr_flags)
+static int ____sys_sendmsg(struct socket *sock, struct msghdr *msg_sys,
+ unsigned int flags, struct used_address *used_address,
+ unsigned int allowed_msghdr_flags)
{
- struct compat_msghdr __user *msg_compat =
- (struct compat_msghdr __user *)msg;
- struct sockaddr_storage address;
- struct iovec iovstack[UIO_FASTIOV], *iov = iovstack;
unsigned char ctl[sizeof(struct cmsghdr) + 20]
__aligned(sizeof(__kernel_size_t));
/* 20 is size of ipv6_pktinfo */
@@ -2266,19 +2276,10 @@ static int ___sys_sendmsg(struct socket *sock, struct user_msghdr __user *msg,
int ctl_len;
ssize_t err;
- msg_sys->msg_name = &address;
-
- if (MSG_CMSG_COMPAT & flags)
- err = get_compat_msghdr(msg_sys, msg_compat, NULL, &iov);
- else
- err = copy_msghdr_from_user(msg_sys, msg, NULL, &iov);
- if (err < 0)
- return err;
-
err = -ENOBUFS;
if (msg_sys->msg_controllen > INT_MAX)
- goto out_freeiov;
+ goto out;
flags |= (msg_sys->msg_flags & allowed_msghdr_flags);
ctl_len = msg_sys->msg_controllen;
if ((MSG_CMSG_COMPAT & flags) && ctl_len) {
@@ -2286,7 +2287,7 @@ static int ___sys_sendmsg(struct socket *sock, struct user_msghdr __user *msg,
cmsghdr_from_user_compat_to_kern(msg_sys, sock->sk, ctl,
sizeof(ctl));
if (err)
- goto out_freeiov;
+ goto out;
ctl_buf = msg_sys->msg_control;
ctl_len = msg_sys->msg_controllen;
} else if (ctl_len) {
@@ -2295,7 +2296,7 @@ static int ___sys_sendmsg(struct socket *sock, struct user_msghdr __user *msg,
if (ctl_len > sizeof(ctl)) {
ctl_buf = sock_kmalloc(sock->sk, ctl_len, GFP_KERNEL);
if (ctl_buf == NULL)
- goto out_freeiov;
+ goto out;
}
err = -EFAULT;
/*
@@ -2341,7 +2342,47 @@ static int ___sys_sendmsg(struct socket *sock, struct user_msghdr __user *msg,
out_freectl:
if (ctl_buf != ctl)
sock_kfree_s(sock->sk, ctl_buf, ctl_len);
-out_freeiov:
+out:
+ return err;
+}
+
+static int sendmsg_copy_msghdr(struct msghdr *msg,
+ struct user_msghdr __user *umsg, unsigned flags,
+ struct iovec **iov)
+{
+ int err;
+
+ if (flags & MSG_CMSG_COMPAT) {
+ struct compat_msghdr __user *msg_compat;
+
+ msg_compat = (struct compat_msghdr __user *) umsg;
+ err = get_compat_msghdr(msg, msg_compat, NULL, iov);
+ } else {
+ err = copy_msghdr_from_user(msg, umsg, NULL, iov);
+ }
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static int ___sys_sendmsg(struct socket *sock, struct user_msghdr __user *msg,
+ struct msghdr *msg_sys, unsigned int flags,
+ struct used_address *used_address,
+ unsigned int allowed_msghdr_flags)
+{
+ struct sockaddr_storage address;
+ struct iovec iovstack[UIO_FASTIOV], *iov = iovstack;
+ ssize_t err;
+
+ msg_sys->msg_name = &address;
+
+ err = sendmsg_copy_msghdr(msg_sys, msg, flags, &iov);
+ if (err < 0)
+ return err;
+
+ err = ____sys_sendmsg(sock, msg_sys, flags, used_address,
+ allowed_msghdr_flags);
kfree(iov);
return err;
}
@@ -2349,12 +2390,27 @@ out_freeiov:
/*
* BSD sendmsg interface
*/
-long __sys_sendmsg_sock(struct socket *sock, struct user_msghdr __user *msg,
+long __sys_sendmsg_sock(struct socket *sock, struct user_msghdr __user *umsg,
unsigned int flags)
{
- struct msghdr msg_sys;
+ struct iovec iovstack[UIO_FASTIOV], *iov = iovstack;
+ struct sockaddr_storage address;
+ struct msghdr msg = { .msg_name = &address };
+ ssize_t err;
+
+ err = sendmsg_copy_msghdr(&msg, umsg, flags, &iov);
+ if (err)
+ return err;
+ /* disallow ancillary data requests from this path */
+ if (msg.msg_control || msg.msg_controllen) {
+ err = -EINVAL;
+ goto out;
+ }
- return ___sys_sendmsg(sock, msg, &msg_sys, flags, NULL, 0);
+ err = ____sys_sendmsg(sock, &msg, flags, NULL, 0);
+out:
+ kfree(iov);
+ return err;
}
long __sys_sendmsg(int fd, struct user_msghdr __user *msg, unsigned int flags,
@@ -2460,33 +2516,41 @@ SYSCALL_DEFINE4(sendmmsg, int, fd, struct mmsghdr __user *, mmsg,
return __sys_sendmmsg(fd, mmsg, vlen, flags, true);
}
-static int ___sys_recvmsg(struct socket *sock, struct user_msghdr __user *msg,
- struct msghdr *msg_sys, unsigned int flags, int nosec)
+static int recvmsg_copy_msghdr(struct msghdr *msg,
+ struct user_msghdr __user *umsg, unsigned flags,
+ struct sockaddr __user **uaddr,
+ struct iovec **iov)
{
- struct compat_msghdr __user *msg_compat =
- (struct compat_msghdr __user *)msg;
- struct iovec iovstack[UIO_FASTIOV];
- struct iovec *iov = iovstack;
- unsigned long cmsg_ptr;
- int len;
ssize_t err;
- /* kernel mode address */
- struct sockaddr_storage addr;
-
- /* user mode address pointers */
- struct sockaddr __user *uaddr;
- int __user *uaddr_len = COMPAT_NAMELEN(msg);
+ if (MSG_CMSG_COMPAT & flags) {
+ struct compat_msghdr __user *msg_compat;
- msg_sys->msg_name = &addr;
-
- if (MSG_CMSG_COMPAT & flags)
- err = get_compat_msghdr(msg_sys, msg_compat, &uaddr, &iov);
- else
- err = copy_msghdr_from_user(msg_sys, msg, &uaddr, &iov);
+ msg_compat = (struct compat_msghdr __user *) umsg;
+ err = get_compat_msghdr(msg, msg_compat, uaddr, iov);
+ } else {
+ err = copy_msghdr_from_user(msg, umsg, uaddr, iov);
+ }
if (err < 0)
return err;
+ return 0;
+}
+
+static int ____sys_recvmsg(struct socket *sock, struct msghdr *msg_sys,
+ struct user_msghdr __user *msg,
+ struct sockaddr __user *uaddr,
+ unsigned int flags, int nosec)
+{
+ struct compat_msghdr __user *msg_compat =
+ (struct compat_msghdr __user *) msg;
+ int __user *uaddr_len = COMPAT_NAMELEN(msg);
+ struct sockaddr_storage addr;
+ unsigned long cmsg_ptr;
+ int len;
+ ssize_t err;
+
+ msg_sys->msg_name = &addr;
cmsg_ptr = (unsigned long)msg_sys->msg_control;
msg_sys->msg_flags = flags & (MSG_CMSG_CLOEXEC|MSG_CMSG_COMPAT);
@@ -2497,7 +2561,7 @@ static int ___sys_recvmsg(struct socket *sock, struct user_msghdr __user *msg,
flags |= MSG_DONTWAIT;
err = (nosec ? sock_recvmsg_nosec : sock_recvmsg)(sock, msg_sys, flags);
if (err < 0)
- goto out_freeiov;
+ goto out;
len = err;
if (uaddr != NULL) {
@@ -2505,12 +2569,12 @@ static int ___sys_recvmsg(struct socket *sock, struct user_msghdr __user *msg,
msg_sys->msg_namelen, uaddr,
uaddr_len);
if (err < 0)
- goto out_freeiov;
+ goto out;
}
err = __put_user((msg_sys->msg_flags & ~MSG_CMSG_COMPAT),
COMPAT_FLAGS(msg));
if (err)
- goto out_freeiov;
+ goto out;
if (MSG_CMSG_COMPAT & flags)
err = __put_user((unsigned long)msg_sys->msg_control - cmsg_ptr,
&msg_compat->msg_controllen);
@@ -2518,10 +2582,25 @@ static int ___sys_recvmsg(struct socket *sock, struct user_msghdr __user *msg,
err = __put_user((unsigned long)msg_sys->msg_control - cmsg_ptr,
&msg->msg_controllen);
if (err)
- goto out_freeiov;
+ goto out;
err = len;
+out:
+ return err;
+}
+
+static int ___sys_recvmsg(struct socket *sock, struct user_msghdr __user *msg,
+ struct msghdr *msg_sys, unsigned int flags, int nosec)
+{
+ struct iovec iovstack[UIO_FASTIOV], *iov = iovstack;
+ /* user mode address pointers */
+ struct sockaddr __user *uaddr;
+ ssize_t err;
-out_freeiov:
+ err = recvmsg_copy_msghdr(msg_sys, msg, flags, &uaddr, &iov);
+ if (err < 0)
+ return err;
+
+ err = ____sys_recvmsg(sock, msg_sys, msg, uaddr, flags, nosec);
kfree(iov);
return err;
}
@@ -2530,12 +2609,28 @@ out_freeiov:
* BSD recvmsg interface
*/
-long __sys_recvmsg_sock(struct socket *sock, struct user_msghdr __user *msg,
+long __sys_recvmsg_sock(struct socket *sock, struct user_msghdr __user *umsg,
unsigned int flags)
{
- struct msghdr msg_sys;
+ struct iovec iovstack[UIO_FASTIOV], *iov = iovstack;
+ struct sockaddr_storage address;
+ struct msghdr msg = { .msg_name = &address };
+ struct sockaddr __user *uaddr;
+ ssize_t err;
- return ___sys_recvmsg(sock, msg, &msg_sys, flags, 0);
+ err = recvmsg_copy_msghdr(&msg, umsg, flags, &uaddr, &iov);
+ if (err)
+ return err;
+ /* disallow ancillary data requests from this path */
+ if (msg.msg_control || msg.msg_controllen) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ err = ____sys_recvmsg(sock, &msg, umsg, uaddr, flags, 0);
+out:
+ kfree(iov);
+ return err;
}
long __sys_recvmsg(int fd, struct user_msghdr __user *msg, unsigned int flags,
@@ -2851,7 +2946,7 @@ SYSCALL_DEFINE2(socketcall, int, call, unsigned long __user *, args)
a[2], true);
break;
case SYS_RECVMMSG:
- if (IS_ENABLED(CONFIG_64BIT) || !IS_ENABLED(CONFIG_64BIT_TIME))
+ if (IS_ENABLED(CONFIG_64BIT))
err = __sys_recvmmsg(a0, (struct mmsghdr __user *)a1,
a[2], a[3],
(struct __kernel_timespec __user *)a[4],
@@ -3470,6 +3565,8 @@ static int compat_sock_ioctl_trans(struct file *file, struct socket *sock,
case SIOCSARP:
case SIOCGARP:
case SIOCDARP:
+ case SIOCOUTQ:
+ case SIOCOUTQNSD:
case SIOCATMARK:
return sock_do_ioctl(net, sock, cmd, arg);
}
diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c
index 17a529739f8d..0254bb7e418b 100644
--- a/net/tipc/netlink_compat.c
+++ b/net/tipc/netlink_compat.c
@@ -570,7 +570,7 @@ static int tipc_nl_compat_link_stat_dump(struct tipc_nl_compat_msg *msg,
if (len <= 0)
return -EINVAL;
- len = min_t(int, len, TIPC_MAX_BEARER_NAME);
+ len = min_t(int, len, TIPC_MAX_LINK_NAME);
if (!string_is_valid(name, len))
return -EINVAL;
@@ -842,7 +842,7 @@ static int tipc_nl_compat_link_reset_stats(struct tipc_nl_compat_cmd_doit *cmd,
if (len <= 0)
return -EINVAL;
- len = min_t(int, len, TIPC_MAX_BEARER_NAME);
+ len = min_t(int, len, TIPC_MAX_LINK_NAME);
if (!string_is_valid(name, len))
return -EINVAL;
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index a1c8d722ca20..41688da233ab 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -532,7 +532,7 @@ static void __tipc_shutdown(struct socket *sock, int error)
struct sock *sk = sock->sk;
struct tipc_sock *tsk = tipc_sk(sk);
struct net *net = sock_net(sk);
- long timeout = CONN_TIMEOUT_DEFAULT;
+ long timeout = msecs_to_jiffies(CONN_TIMEOUT_DEFAULT);
u32 dnode = tsk_peer_node(tsk);
struct sk_buff *skb;
@@ -540,12 +540,10 @@ static void __tipc_shutdown(struct socket *sock, int error)
tipc_wait_for_cond(sock, &timeout, (!tsk->cong_link_cnt &&
!tsk_conn_cong(tsk)));
- /* Push out unsent messages or remove if pending SYN */
- skb = skb_peek(&sk->sk_write_queue);
- if (skb && !msg_is_syn(buf_msg(skb)))
- tipc_sk_push_backlog(tsk);
- else
- __skb_queue_purge(&sk->sk_write_queue);
+ /* Push out delayed messages if in Nagle mode */
+ tipc_sk_push_backlog(tsk);
+ /* Remove pending SYN */
+ __skb_queue_purge(&sk->sk_write_queue);
/* Reject all unreceived messages, except on an active connection
* (which disconnects locally & sends a 'FIN+' to peer).
@@ -1248,9 +1246,14 @@ static void tipc_sk_push_backlog(struct tipc_sock *tsk)
struct sk_buff_head *txq = &tsk->sk.sk_write_queue;
struct net *net = sock_net(&tsk->sk);
u32 dnode = tsk_peer_node(tsk);
+ struct sk_buff *skb = skb_peek(txq);
int rc;
- if (skb_queue_empty(txq) || tsk->cong_link_cnt)
+ if (!skb || tsk->cong_link_cnt)
+ return;
+
+ /* Do not send SYN again after congestion */
+ if (msg_is_syn(buf_msg(skb)))
return;
tsk->snt_unacked += tsk->snd_backlog;
@@ -1447,8 +1450,10 @@ static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen)
rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts);
if (unlikely(rc != dlen))
return rc;
- if (unlikely(syn && !tipc_msg_skb_clone(&pkts, &sk->sk_write_queue)))
+ if (unlikely(syn && !tipc_msg_skb_clone(&pkts, &sk->sk_write_queue))) {
+ __skb_queue_purge(&pkts);
return -ENOMEM;
+ }
trace_tipc_sk_sendmsg(sk, skb_peek(&pkts), TIPC_DUMP_SK_SNDQ, " ");
rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid);
@@ -2757,6 +2762,7 @@ static void tipc_sk_timeout(struct timer_list *t)
if (sock_owned_by_user(sk)) {
sk_reset_timer(sk, &sk->sk_timer, jiffies + HZ / 20);
bh_unlock_sock(sk);
+ sock_put(sk);
return;
}
diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
index bdca31ffe6da..b3da6c5ab999 100644
--- a/net/tls/tls_main.c
+++ b/net/tls/tls_main.c
@@ -209,24 +209,15 @@ int tls_push_partial_record(struct sock *sk, struct tls_context *ctx,
return tls_push_sg(sk, ctx, sg, offset, flags);
}
-bool tls_free_partial_record(struct sock *sk, struct tls_context *ctx)
+void tls_free_partial_record(struct sock *sk, struct tls_context *ctx)
{
struct scatterlist *sg;
- sg = ctx->partially_sent_record;
- if (!sg)
- return false;
-
- while (1) {
+ for (sg = ctx->partially_sent_record; sg; sg = sg_next(sg)) {
put_page(sg_page(sg));
sk_mem_uncharge(sk, sg->length);
-
- if (sg_is_last(sg))
- break;
- sg++;
}
ctx->partially_sent_record = NULL;
- return true;
}
static void tls_write_space(struct sock *sk)
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index da9f9ce51e7b..2b2d0bae14a9 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -710,8 +710,7 @@ static int tls_push_record(struct sock *sk, int flags,
}
i = msg_pl->sg.start;
- sg_chain(rec->sg_aead_in, 2, rec->inplace_crypto ?
- &msg_en->sg.data[i] : &msg_pl->sg.data[i]);
+ sg_chain(rec->sg_aead_in, 2, &msg_pl->sg.data[i]);
i = msg_en->sg.end;
sk_msg_iter_var_prev(i);
@@ -771,8 +770,14 @@ static int bpf_exec_tx_verdict(struct sk_msg *msg, struct sock *sk,
policy = !(flags & MSG_SENDPAGE_NOPOLICY);
psock = sk_psock_get(sk);
- if (!psock || !policy)
- return tls_push_record(sk, flags, record_type);
+ if (!psock || !policy) {
+ err = tls_push_record(sk, flags, record_type);
+ if (err) {
+ *copied -= sk_msg_free(sk, msg);
+ tls_free_open_rec(sk);
+ }
+ return err;
+ }
more_data:
enospc = sk_msg_full(msg);
if (psock->eval == __SK_NONE) {
@@ -970,8 +975,6 @@ alloc_encrypted:
if (ret)
goto fallback_to_reg_send;
- rec->inplace_crypto = 0;
-
num_zc++;
copied += try_to_copy;
@@ -984,7 +987,7 @@ alloc_encrypted:
num_async++;
else if (ret == -ENOMEM)
goto wait_for_memory;
- else if (ret == -ENOSPC)
+ else if (ctx->open_rec && ret == -ENOSPC)
goto rollback_iter;
else if (ret != -EAGAIN)
goto send_end;
@@ -1053,11 +1056,12 @@ wait_for_memory:
ret = sk_stream_wait_memory(sk, &timeo);
if (ret) {
trim_sgl:
- tls_trim_both_msgs(sk, orig_size);
+ if (ctx->open_rec)
+ tls_trim_both_msgs(sk, orig_size);
goto send_end;
}
- if (msg_en->sg.size < required_size)
+ if (ctx->open_rec && msg_en->sg.size < required_size)
goto alloc_encrypted;
}
@@ -1169,7 +1173,6 @@ alloc_payload:
tls_ctx->pending_open_record_frags = true;
if (full_record || eor || sk_msg_full(msg_pl)) {
- rec->inplace_crypto = 0;
ret = bpf_exec_tx_verdict(msg_pl, sk, full_record,
record_type, &copied, flags);
if (ret) {
@@ -1190,11 +1193,13 @@ wait_for_sndbuf:
wait_for_memory:
ret = sk_stream_wait_memory(sk, &timeo);
if (ret) {
- tls_trim_both_msgs(sk, msg_pl->sg.size);
+ if (ctx->open_rec)
+ tls_trim_both_msgs(sk, msg_pl->sg.size);
goto sendpage_end;
}
- goto alloc_payload;
+ if (ctx->open_rec)
+ goto alloc_payload;
}
if (num_async) {
@@ -2084,7 +2089,8 @@ void tls_sw_release_resources_tx(struct sock *sk)
/* Free up un-sent records in tx_list. First, free
* the partially sent record if any at head of tx_list.
*/
- if (tls_free_partial_record(sk, tls_ctx)) {
+ if (tls_ctx->partially_sent_record) {
+ tls_free_partial_record(sk, tls_ctx);
rec = list_first_entry(&ctx->tx_list,
struct tls_rec, list);
list_del(&rec->list);
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 193cba2d777b..7cfdce10de36 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -644,6 +644,9 @@ static __poll_t unix_poll(struct file *, struct socket *, poll_table *);
static __poll_t unix_dgram_poll(struct file *, struct socket *,
poll_table *);
static int unix_ioctl(struct socket *, unsigned int, unsigned long);
+#ifdef CONFIG_COMPAT
+static int unix_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
+#endif
static int unix_shutdown(struct socket *, int);
static int unix_stream_sendmsg(struct socket *, struct msghdr *, size_t);
static int unix_stream_recvmsg(struct socket *, struct msghdr *, size_t, int);
@@ -685,6 +688,9 @@ static const struct proto_ops unix_stream_ops = {
.getname = unix_getname,
.poll = unix_poll,
.ioctl = unix_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = unix_compat_ioctl,
+#endif
.listen = unix_listen,
.shutdown = unix_shutdown,
.setsockopt = sock_no_setsockopt,
@@ -708,6 +714,9 @@ static const struct proto_ops unix_dgram_ops = {
.getname = unix_getname,
.poll = unix_dgram_poll,
.ioctl = unix_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = unix_compat_ioctl,
+#endif
.listen = sock_no_listen,
.shutdown = unix_shutdown,
.setsockopt = sock_no_setsockopt,
@@ -730,6 +739,9 @@ static const struct proto_ops unix_seqpacket_ops = {
.getname = unix_getname,
.poll = unix_dgram_poll,
.ioctl = unix_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = unix_compat_ioctl,
+#endif
.listen = unix_listen,
.shutdown = unix_shutdown,
.setsockopt = sock_no_setsockopt,
@@ -2580,6 +2592,13 @@ static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
return err;
}
+#ifdef CONFIG_COMPAT
+static int unix_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
+{
+ return unix_ioctl(sock, cmd, (unsigned long)compat_ptr(arg));
+}
+#endif
+
static __poll_t unix_poll(struct file *file, struct socket *sock, poll_table *wait)
{
struct sock *sk = sock->sk;
diff --git a/net/vmw_vsock/hyperv_transport.c b/net/vmw_vsock/hyperv_transport.c
index 3c7d07a99fc5..b3bdae74c243 100644
--- a/net/vmw_vsock/hyperv_transport.c
+++ b/net/vmw_vsock/hyperv_transport.c
@@ -922,6 +922,24 @@ static int hvs_remove(struct hv_device *hdev)
return 0;
}
+/* hv_sock connections can not persist across hibernation, and all the hv_sock
+ * channels are forced to be rescinded before hibernation: see
+ * vmbus_bus_suspend(). Here the dummy hvs_suspend() and hvs_resume()
+ * are only needed because hibernation requires that every vmbus device's
+ * driver should have a .suspend and .resume callback: see vmbus_suspend().
+ */
+static int hvs_suspend(struct hv_device *hv_dev)
+{
+ /* Dummy */
+ return 0;
+}
+
+static int hvs_resume(struct hv_device *dev)
+{
+ /* Dummy */
+ return 0;
+}
+
/* This isn't really used. See vmbus_match() and vmbus_probe() */
static const struct hv_vmbus_device_id id_table[] = {
{},
@@ -933,6 +951,8 @@ static struct hv_driver hvs_drv = {
.id_table = id_table,
.probe = hvs_probe,
.remove = hvs_remove,
+ .suspend = hvs_suspend,
+ .resume = hvs_resume,
};
static int __init hvs_init(void)
diff --git a/samples/Kconfig b/samples/Kconfig
index c8dacb4dda80..9d236c346de5 100644
--- a/samples/Kconfig
+++ b/samples/Kconfig
@@ -19,6 +19,21 @@ config SAMPLE_TRACE_PRINTK
This builds a module that calls trace_printk() and can be used to
test various trace_printk() calls from a module.
+config SAMPLE_FTRACE_DIRECT
+ tristate "Build register_ftrace_direct() example"
+ depends on DYNAMIC_FTRACE_WITH_DIRECT_CALLS && m
+ depends on X86_64 # has x86_64 inlined asm
+ help
+ This builds an ftrace direct function example
+ that hooks to wake_up_process and prints the parameters.
+
+config SAMPLE_TRACE_ARRAY
+ tristate "Build sample module for kernel access to Ftrace instancess"
+ depends on EVENT_TRACING && m
+ help
+ This builds a module that demonstrates the use of various APIs to
+ access Ftrace instances from within the kernel.
+
config SAMPLE_KOBJECT
tristate "Build kobject examples"
help
@@ -169,4 +184,11 @@ config SAMPLE_VFS
as mount API and statx(). Note that this is restricted to the x86
arch whilst it accesses system calls that aren't yet in all arches.
+config SAMPLE_INTEL_MEI
+ bool "Build example program working with intel mei driver"
+ depends on INTEL_MEI
+ help
+ Build a sample program to work with mei device.
+
+
endif # SAMPLES
diff --git a/samples/Makefile b/samples/Makefile
index 7d6e4ca28d69..5ce50ef0f2b2 100644
--- a/samples/Makefile
+++ b/samples/Makefile
@@ -17,6 +17,9 @@ obj-$(CONFIG_SAMPLE_RPMSG_CLIENT) += rpmsg/
subdir-$(CONFIG_SAMPLE_SECCOMP) += seccomp
obj-$(CONFIG_SAMPLE_TRACE_EVENTS) += trace_events/
obj-$(CONFIG_SAMPLE_TRACE_PRINTK) += trace_printk/
+obj-$(CONFIG_SAMPLE_FTRACE_DIRECT) += ftrace/
+obj-$(CONFIG_SAMPLE_TRACE_ARRAY) += ftrace/
obj-$(CONFIG_VIDEO_PCI_SKELETON) += v4l/
obj-y += vfio-mdev/
subdir-$(CONFIG_SAMPLE_VFS) += vfs
+obj-$(CONFIG_SAMPLE_INTEL_MEI) += mei/
diff --git a/samples/ftrace/Makefile b/samples/ftrace/Makefile
new file mode 100644
index 000000000000..4ce896e10b2e
--- /dev/null
+++ b/samples/ftrace/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+obj-$(CONFIG_SAMPLE_FTRACE_DIRECT) += ftrace-direct.o
+obj-$(CONFIG_SAMPLE_FTRACE_DIRECT) += ftrace-direct-too.o
+obj-$(CONFIG_SAMPLE_FTRACE_DIRECT) += ftrace-direct-modify.o
+
+CFLAGS_sample-trace-array.o := -I$(src)
+obj-$(CONFIG_SAMPLE_TRACE_ARRAY) += sample-trace-array.o
diff --git a/samples/ftrace/ftrace-direct-modify.c b/samples/ftrace/ftrace-direct-modify.c
new file mode 100644
index 000000000000..e04229d21475
--- /dev/null
+++ b/samples/ftrace/ftrace-direct-modify.c
@@ -0,0 +1,88 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/module.h>
+#include <linux/kthread.h>
+#include <linux/ftrace.h>
+
+void my_direct_func1(void)
+{
+ trace_printk("my direct func1\n");
+}
+
+void my_direct_func2(void)
+{
+ trace_printk("my direct func2\n");
+}
+
+extern void my_tramp1(void *);
+extern void my_tramp2(void *);
+
+static unsigned long my_ip = (unsigned long)schedule;
+
+asm (
+" .pushsection .text, \"ax\", @progbits\n"
+" my_tramp1:"
+" pushq %rbp\n"
+" movq %rsp, %rbp\n"
+" call my_direct_func1\n"
+" leave\n"
+" ret\n"
+" my_tramp2:"
+" pushq %rbp\n"
+" movq %rsp, %rbp\n"
+" call my_direct_func2\n"
+" leave\n"
+" ret\n"
+" .popsection\n"
+);
+
+static unsigned long my_tramp = (unsigned long)my_tramp1;
+static unsigned long tramps[2] = {
+ (unsigned long)my_tramp1,
+ (unsigned long)my_tramp2,
+};
+
+static int simple_thread(void *arg)
+{
+ static int t;
+ int ret = 0;
+
+ while (!kthread_should_stop()) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(2 * HZ);
+
+ if (ret)
+ continue;
+ t ^= 1;
+ ret = modify_ftrace_direct(my_ip, my_tramp, tramps[t]);
+ if (!ret)
+ my_tramp = tramps[t];
+ WARN_ON_ONCE(ret);
+ }
+
+ return 0;
+}
+
+static struct task_struct *simple_tsk;
+
+static int __init ftrace_direct_init(void)
+{
+ int ret;
+
+ ret = register_ftrace_direct(my_ip, my_tramp);
+ if (!ret)
+ simple_tsk = kthread_run(simple_thread, NULL, "event-sample-fn");
+ return ret;
+}
+
+static void __exit ftrace_direct_exit(void)
+{
+ kthread_stop(simple_tsk);
+ unregister_ftrace_direct(my_ip, my_tramp);
+}
+
+module_init(ftrace_direct_init);
+module_exit(ftrace_direct_exit);
+
+MODULE_AUTHOR("Steven Rostedt");
+MODULE_DESCRIPTION("Example use case of using modify_ftrace_direct()");
+MODULE_LICENSE("GPL");
diff --git a/samples/ftrace/ftrace-direct-too.c b/samples/ftrace/ftrace-direct-too.c
new file mode 100644
index 000000000000..27efa5f6ff52
--- /dev/null
+++ b/samples/ftrace/ftrace-direct-too.c
@@ -0,0 +1,51 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/module.h>
+
+#include <linux/mm.h> /* for handle_mm_fault() */
+#include <linux/ftrace.h>
+
+void my_direct_func(struct vm_area_struct *vma,
+ unsigned long address, unsigned int flags)
+{
+ trace_printk("handle mm fault vma=%p address=%lx flags=%x\n",
+ vma, address, flags);
+}
+
+extern void my_tramp(void *);
+
+asm (
+" .pushsection .text, \"ax\", @progbits\n"
+" my_tramp:"
+" pushq %rbp\n"
+" movq %rsp, %rbp\n"
+" pushq %rdi\n"
+" pushq %rsi\n"
+" pushq %rdx\n"
+" call my_direct_func\n"
+" popq %rdx\n"
+" popq %rsi\n"
+" popq %rdi\n"
+" leave\n"
+" ret\n"
+" .popsection\n"
+);
+
+
+static int __init ftrace_direct_init(void)
+{
+ return register_ftrace_direct((unsigned long)handle_mm_fault,
+ (unsigned long)my_tramp);
+}
+
+static void __exit ftrace_direct_exit(void)
+{
+ unregister_ftrace_direct((unsigned long)handle_mm_fault,
+ (unsigned long)my_tramp);
+}
+
+module_init(ftrace_direct_init);
+module_exit(ftrace_direct_exit);
+
+MODULE_AUTHOR("Steven Rostedt");
+MODULE_DESCRIPTION("Another example use case of using register_ftrace_direct()");
+MODULE_LICENSE("GPL");
diff --git a/samples/ftrace/ftrace-direct.c b/samples/ftrace/ftrace-direct.c
new file mode 100644
index 000000000000..a2e3063bd306
--- /dev/null
+++ b/samples/ftrace/ftrace-direct.c
@@ -0,0 +1,45 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/module.h>
+
+#include <linux/sched.h> /* for wake_up_process() */
+#include <linux/ftrace.h>
+
+void my_direct_func(struct task_struct *p)
+{
+ trace_printk("waking up %s-%d\n", p->comm, p->pid);
+}
+
+extern void my_tramp(void *);
+
+asm (
+" .pushsection .text, \"ax\", @progbits\n"
+" my_tramp:"
+" pushq %rbp\n"
+" movq %rsp, %rbp\n"
+" pushq %rdi\n"
+" call my_direct_func\n"
+" popq %rdi\n"
+" leave\n"
+" ret\n"
+" .popsection\n"
+);
+
+
+static int __init ftrace_direct_init(void)
+{
+ return register_ftrace_direct((unsigned long)wake_up_process,
+ (unsigned long)my_tramp);
+}
+
+static void __exit ftrace_direct_exit(void)
+{
+ unregister_ftrace_direct((unsigned long)wake_up_process,
+ (unsigned long)my_tramp);
+}
+
+module_init(ftrace_direct_init);
+module_exit(ftrace_direct_exit);
+
+MODULE_AUTHOR("Steven Rostedt");
+MODULE_DESCRIPTION("Example use case of using register_ftrace_direct()");
+MODULE_LICENSE("GPL");
diff --git a/samples/ftrace/sample-trace-array.c b/samples/ftrace/sample-trace-array.c
new file mode 100644
index 000000000000..d523450d73eb
--- /dev/null
+++ b/samples/ftrace/sample-trace-array.c
@@ -0,0 +1,131 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/module.h>
+#include <linux/kthread.h>
+#include <linux/trace.h>
+#include <linux/trace_events.h>
+#include <linux/timer.h>
+#include <linux/err.h>
+#include <linux/jiffies.h>
+
+/*
+ * Any file that uses trace points, must include the header.
+ * But only one file, must include the header by defining
+ * CREATE_TRACE_POINTS first. This will make the C code that
+ * creates the handles for the trace points.
+ */
+#define CREATE_TRACE_POINTS
+#include "sample-trace-array.h"
+
+struct trace_array *tr;
+static void mytimer_handler(struct timer_list *unused);
+static struct task_struct *simple_tsk;
+
+/*
+ * mytimer: Timer setup to disable tracing for event "sample_event". This
+ * timer is only for the purposes of the sample module to demonstrate access of
+ * Ftrace instances from within kernel.
+ */
+static DEFINE_TIMER(mytimer, mytimer_handler);
+
+static void mytimer_handler(struct timer_list *unused)
+{
+ /*
+ * Disable tracing for event "sample_event".
+ */
+ trace_array_set_clr_event(tr, "sample-subsystem", "sample_event",
+ false);
+}
+
+static void simple_thread_func(int count)
+{
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(HZ);
+
+ /*
+ * Printing count value using trace_array_printk() - trace_printk()
+ * equivalent for the instance buffers.
+ */
+ trace_array_printk(tr, _THIS_IP_, "trace_array_printk: count=%d\n",
+ count);
+ /*
+ * Tracepoint for event "sample_event". This will print the
+ * current value of count and current jiffies.
+ */
+ trace_sample_event(count, jiffies);
+}
+
+static int simple_thread(void *arg)
+{
+ int count = 0;
+ unsigned long delay = msecs_to_jiffies(5000);
+
+ /*
+ * Enable tracing for "sample_event".
+ */
+ trace_array_set_clr_event(tr, "sample-subsystem", "sample_event", true);
+
+ /*
+ * Adding timer - mytimer. This timer will disable tracing after
+ * delay seconds.
+ *
+ */
+ add_timer(&mytimer);
+ mod_timer(&mytimer, jiffies+delay);
+
+ while (!kthread_should_stop())
+ simple_thread_func(count++);
+
+ del_timer(&mytimer);
+
+ /*
+ * trace_array_put() decrements the reference counter associated with
+ * the trace array - "tr". We are done using the trace array, hence
+ * decrement the reference counter so that it can be destroyed using
+ * trace_array_destroy().
+ */
+ trace_array_put(tr);
+
+ return 0;
+}
+
+static int __init sample_trace_array_init(void)
+{
+ /*
+ * Return a pointer to the trace array with name "sample-instance" if it
+ * exists, else create a new trace array.
+ *
+ * NOTE: This function increments the reference counter
+ * associated with the trace array - "tr".
+ */
+ tr = trace_array_get_by_name("sample-instance");
+
+ if (!tr)
+ return -1;
+ /*
+ * If context specific per-cpu buffers havent already been allocated.
+ */
+ trace_printk_init_buffers();
+
+ simple_tsk = kthread_run(simple_thread, NULL, "sample-instance");
+ if (IS_ERR(simple_tsk))
+ return -1;
+ return 0;
+}
+
+static void __exit sample_trace_array_exit(void)
+{
+ kthread_stop(simple_tsk);
+
+ /*
+ * We are unloading our module and no longer require the trace array.
+ * Remove/destroy "tr" using trace_array_destroy()
+ */
+ trace_array_destroy(tr);
+}
+
+module_init(sample_trace_array_init);
+module_exit(sample_trace_array_exit);
+
+MODULE_AUTHOR("Divya Indi");
+MODULE_DESCRIPTION("Sample module for kernel access to Ftrace instances");
+MODULE_LICENSE("GPL");
diff --git a/samples/ftrace/sample-trace-array.h b/samples/ftrace/sample-trace-array.h
new file mode 100644
index 000000000000..6f8962428158
--- /dev/null
+++ b/samples/ftrace/sample-trace-array.h
@@ -0,0 +1,84 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * If TRACE_SYSTEM is defined, that will be the directory created
+ * in the ftrace directory under /sys/kernel/tracing/events/<system>
+ *
+ * The define_trace.h below will also look for a file name of
+ * TRACE_SYSTEM.h where TRACE_SYSTEM is what is defined here.
+ * In this case, it would look for sample-trace.h
+ *
+ * If the header name will be different than the system name
+ * (as in this case), then you can override the header name that
+ * define_trace.h will look up by defining TRACE_INCLUDE_FILE
+ *
+ * This file is called sample-trace-array.h but we want the system
+ * to be called "sample-subsystem". Therefore we must define the name of this
+ * file:
+ *
+ * #define TRACE_INCLUDE_FILE sample-trace-array
+ *
+ * As we do in the bottom of this file.
+ *
+ * Notice that TRACE_SYSTEM should be defined outside of #if
+ * protection, just like TRACE_INCLUDE_FILE.
+ */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM sample-subsystem
+
+/*
+ * TRACE_SYSTEM is expected to be a C valid variable (alpha-numeric
+ * and underscore), although it may start with numbers. If for some
+ * reason it is not, you need to add the following lines:
+ */
+#undef TRACE_SYSTEM_VAR
+#define TRACE_SYSTEM_VAR sample_subsystem
+
+/*
+ * But the above is only needed if TRACE_SYSTEM is not alpha-numeric
+ * and underscored. By default, TRACE_SYSTEM_VAR will be equal to
+ * TRACE_SYSTEM. As TRACE_SYSTEM_VAR must be alpha-numeric, if
+ * TRACE_SYSTEM is not, then TRACE_SYSTEM_VAR must be defined with
+ * only alpha-numeric and underscores.
+ *
+ * The TRACE_SYSTEM_VAR is only used internally and not visible to
+ * user space.
+ */
+
+/*
+ * Notice that this file is not protected like a normal header.
+ * We also must allow for rereading of this file. The
+ *
+ * || defined(TRACE_HEADER_MULTI_READ)
+ *
+ * serves this purpose.
+ */
+#if !defined(_SAMPLE_TRACE_ARRAY_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _SAMPLE_TRACE_ARRAY_H
+
+#include <linux/tracepoint.h>
+TRACE_EVENT(sample_event,
+
+ TP_PROTO(int count, unsigned long time),
+
+ TP_ARGS(count, time),
+
+ TP_STRUCT__entry(
+ __field(int, count)
+ __field(unsigned long, time)
+ ),
+
+ TP_fast_assign(
+ __entry->count = count;
+ __entry->time = time;
+ ),
+
+ TP_printk("count value=%d at jiffies=%lu", __entry->count,
+ __entry->time)
+ );
+#endif
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE sample-trace-array
+#include <trace/define_trace.h>
diff --git a/samples/mei/Makefile b/samples/mei/Makefile
index c7e52e9e92ca..27f37efdadb4 100644
--- a/samples/mei/Makefile
+++ b/samples/mei/Makefile
@@ -1,10 +1,10 @@
# SPDX-License-Identifier: GPL-2.0
-CC := $(CROSS_COMPILE)gcc
-CFLAGS := -I../../usr/include
+# Copyright (c) 2012-2019, Intel Corporation. All rights reserved.
-PROGS := mei-amt-version
+hostprogs-y := mei-amt-version
-all: $(PROGS)
+HOSTCFLAGS_mei-amt-version.o += -I$(objtree)/usr/include
-clean:
- rm -fr $(PROGS)
+always := $(hostprogs-y)
+
+all: mei-amt-version
diff --git a/scripts/.gitignore b/scripts/.gitignore
index 17f8cef88fa8..4aa1806c59c2 100644
--- a/scripts/.gitignore
+++ b/scripts/.gitignore
@@ -4,7 +4,6 @@
bin2c
conmakehash
kallsyms
-pnmtologo
unifdef
recordmcount
sortextable
diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include
index 10ba926ae292..bc5f25763c1b 100644
--- a/scripts/Kbuild.include
+++ b/scripts/Kbuild.include
@@ -210,17 +210,20 @@ endif
# (needed for the shell)
make-cmd = $(call escsq,$(subst $(pound),$$(pound),$(subst $$,$$$$,$(cmd_$(1)))))
-# Find any prerequisites that is newer than target or that does not exist.
+# Find any prerequisites that are newer than target or that do not exist.
+# (This is not true for now; $? should contain any non-existent prerequisites,
+# but it does not work as expected when .SECONDARY is present. This seems a bug
+# of GNU Make.)
# PHONY targets skipped in both cases.
-any-prereq = $(filter-out $(PHONY),$?)$(filter-out $(PHONY) $(wildcard $^),$^)
+newer-prereqs = $(filter-out $(PHONY),$?)
# Execute command if command has changed or prerequisite(s) are updated.
-if_changed = $(if $(any-prereq)$(cmd-check), \
+if_changed = $(if $(newer-prereqs)$(cmd-check), \
$(cmd); \
printf '%s\n' 'cmd_$@ := $(make-cmd)' > $(dot-target).cmd, @:)
# Execute the command and also postprocess generated .d dependencies file.
-if_changed_dep = $(if $(any-prereq)$(cmd-check),$(cmd_and_fixdep),@:)
+if_changed_dep = $(if $(newer-prereqs)$(cmd-check),$(cmd_and_fixdep),@:)
cmd_and_fixdep = \
$(cmd); \
@@ -230,7 +233,7 @@ cmd_and_fixdep = \
# Usage: $(call if_changed_rule,foo)
# Will check if $(cmd_foo) or any of the prerequisites changed,
# and if so will execute $(rule_foo).
-if_changed_rule = $(if $(any-prereq)$(cmd-check),$(rule_$(1)),@:)
+if_changed_rule = $(if $(newer-prereqs)$(cmd-check),$(rule_$(1)),@:)
###
# why - tell why a target got built
@@ -255,7 +258,7 @@ ifeq ($(KBUILD_VERBOSE),2)
why = \
$(if $(filter $@, $(PHONY)),- due to target is PHONY, \
$(if $(wildcard $@), \
- $(if $(any-prereq),- due to: $(any-prereq), \
+ $(if $(newer-prereqs),- due to: $(newer-prereqs), \
$(if $(cmd-check), \
$(if $(cmd_$@),- due to command line change, \
$(if $(filter $@, $(targets)), \
diff --git a/scripts/Makefile b/scripts/Makefile
index 3e86b300f5a1..00c47901cb06 100644
--- a/scripts/Makefile
+++ b/scripts/Makefile
@@ -4,7 +4,6 @@
# the kernel for the build process.
# ---------------------------------------------------------------------------
# kallsyms: Find all symbols in vmlinux
-# pnmttologo: Convert pnm files to logo files
# conmakehash: Create chartable
# conmakehash: Create arrays for initializing the kernel console tables
@@ -12,7 +11,6 @@ HOST_EXTRACFLAGS += -I$(srctree)/tools/include
hostprogs-$(CONFIG_BUILD_BIN2C) += bin2c
hostprogs-$(CONFIG_KALLSYMS) += kallsyms
-hostprogs-$(CONFIG_LOGO) += pnmtologo
hostprogs-$(CONFIG_VT) += conmakehash
hostprogs-$(BUILD_C_RECORDMCOUNT) += recordmcount
hostprogs-$(CONFIG_BUILDTIME_EXTABLE_SORT) += sortextable
diff --git a/scripts/Makefile.build b/scripts/Makefile.build
index a9e47953ca53..b734ac8a654e 100644
--- a/scripts/Makefile.build
+++ b/scripts/Makefile.build
@@ -283,15 +283,6 @@ quiet_cmd_cc_lst_c = MKLST $@
$(obj)/%.lst: $(src)/%.c FORCE
$(call if_changed_dep,cc_lst_c)
-# header test (header-test-y, header-test-m target)
-# ---------------------------------------------------------------------------
-
-quiet_cmd_cc_s_h = CC $@
- cmd_cc_s_h = $(CC) $(c_flags) -S -o $@ -x c /dev/null -include $<
-
-$(obj)/%.h.s: $(src)/%.h FORCE
- $(call if_changed_dep,cc_s_h)
-
# Compile assembler sources (.S)
# ---------------------------------------------------------------------------
@@ -469,17 +460,20 @@ targets += $(call intermediate_targets, .asn1.o, .asn1.c .asn1.h) \
ifdef single-build
+KBUILD_SINGLE_TARGETS := $(filter $(obj)/%, $(KBUILD_SINGLE_TARGETS))
+
curdir-single := $(sort $(foreach x, $(KBUILD_SINGLE_TARGETS), \
$(if $(filter $(x) $(basename $(x)).o, $(targets)), $(x))))
# Handle single targets without any rule: show "Nothing to be done for ..." or
# "No rule to make target ..." depending on whether the target exists.
unknown-single := $(filter-out $(addsuffix /%, $(subdir-ym)), \
- $(filter $(obj)/%, \
- $(filter-out $(curdir-single), \
- $(KBUILD_SINGLE_TARGETS))))
+ $(filter-out $(curdir-single), $(KBUILD_SINGLE_TARGETS)))
+
+single-subdirs := $(foreach d, $(subdir-ym), \
+ $(if $(filter $(d)/%, $(KBUILD_SINGLE_TARGETS)), $(d)))
-__build: $(curdir-single) $(subdir-ym)
+__build: $(curdir-single) $(single-subdirs)
ifneq ($(unknown-single),)
$(Q)$(MAKE) -f /dev/null $(unknown-single)
endif
diff --git a/scripts/Makefile.headersinst b/scripts/Makefile.headersinst
index 1b405a7ed14f..708fbd08a2c5 100644
--- a/scripts/Makefile.headersinst
+++ b/scripts/Makefile.headersinst
@@ -56,9 +56,6 @@ new-dirs := $(filter-out $(existing-dirs), $(wanted-dirs))
$(if $(new-dirs), $(shell mkdir -p $(new-dirs)))
# Rules
-
-ifndef HDRCHECK
-
quiet_cmd_install = HDRINST $@
cmd_install = $(CONFIG_SHELL) $(srctree)/scripts/headers_install.sh $< $@
@@ -81,21 +78,6 @@ existing-headers := $(filter $(old-headers), $(all-headers))
-include $(foreach f,$(existing-headers),$(dir $(f)).$(notdir $(f)).cmd)
-else
-
-quiet_cmd_check = HDRCHK $<
- cmd_check = $(PERL) $(srctree)/scripts/headers_check.pl $(dst) $(SRCARCH) $<; touch $@
-
-check-files := $(addsuffix .chk, $(all-headers))
-
-$(check-files): $(dst)/%.chk : $(dst)/% $(srctree)/scripts/headers_check.pl
- $(call cmd,check)
-
-__headers: $(check-files)
- @:
-
-endif
-
PHONY += FORCE
FORCE:
diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib
index 179d55af5852..3fa32f83b2d7 100644
--- a/scripts/Makefile.lib
+++ b/scripts/Makefile.lib
@@ -65,20 +65,6 @@ extra-y += $(patsubst %.dtb,%.dt.yaml, $(dtb-y))
extra-$(CONFIG_OF_ALL_DTBS) += $(patsubst %.dtb,%.dt.yaml, $(dtb-))
endif
-# Test self-contained headers
-
-# Wildcard searches in $(srctree)/$(src)/, but not in $(objtree)/$(obj)/.
-# Stale generated headers are often left over, so pattern matching should
-# be avoided. Please notice $(srctree)/$(src)/ and $(objtree)/$(obj) point
-# to the same location for in-tree building. So, header-test-pattern-y should
-# be used with care.
-header-test-y += $(filter-out $(header-test-), \
- $(patsubst $(srctree)/$(src)/%, %, \
- $(wildcard $(addprefix $(srctree)/$(src)/, \
- $(header-test-pattern-y)))))
-
-extra-$(CONFIG_HEADER_TEST) += $(addsuffix .s, $(header-test-y) $(header-test-m))
-
# Add subdir path
extra-y := $(addprefix $(obj)/,$(extra-y))
diff --git a/scripts/Makefile.modpost b/scripts/Makefile.modpost
index 952fff485546..69897d5d3a70 100644
--- a/scripts/Makefile.modpost
+++ b/scripts/Makefile.modpost
@@ -50,12 +50,10 @@ MODPOST = scripts/mod/modpost \
$(if $(CONFIG_MODVERSIONS),-m) \
$(if $(CONFIG_MODULE_SRCVERSION_ALL),-a) \
$(if $(KBUILD_EXTMOD),-i,-o) $(kernelsymfile) \
- $(if $(KBUILD_EXTMOD),-I $(modulesymfile)) \
$(if $(KBUILD_EXTMOD),$(addprefix -e ,$(KBUILD_EXTRA_SYMBOLS))) \
$(if $(KBUILD_EXTMOD),-o $(modulesymfile)) \
$(if $(CONFIG_SECTION_MISMATCH_WARN_ONLY),,-E) \
- $(if $(KBUILD_MODPOST_WARN),-w) \
- $(if $(filter nsdeps,$(MAKECMDGOALS)),-d)
+ $(if $(KBUILD_MODPOST_WARN),-w)
ifdef MODPOST_VMLINUX
@@ -67,10 +65,14 @@ __modpost:
else
-# When building external modules load the Kbuild file to retrieve EXTRA_SYMBOLS info
-ifneq ($(KBUILD_EXTMOD),)
+MODPOST += $(subst -i,-n,$(filter -i,$(MAKEFLAGS))) -s -T - \
+ $(if $(KBUILD_NSDEPS),-d $(MODULES_NSDEPS))
-# set src + obj - they may be used when building the .mod.c file
+ifeq ($(KBUILD_EXTMOD),)
+MODPOST += $(wildcard vmlinux)
+else
+
+# set src + obj - they may be used in the modules's Makefile
obj := $(KBUILD_EXTMOD)
src := $(obj)
@@ -79,8 +81,6 @@ include $(if $(wildcard $(KBUILD_EXTMOD)/Kbuild), \
$(KBUILD_EXTMOD)/Kbuild, $(KBUILD_EXTMOD)/Makefile)
endif
-MODPOST += $(subst -i,-n,$(filter -i,$(MAKEFLAGS))) -s -T - $(wildcard vmlinux)
-
# find all modules listed in modules.order
modules := $(sort $(shell cat $(MODORDER)))
@@ -96,8 +96,6 @@ ifneq ($(KBUILD_MODPOST_NOFINAL),1)
$(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modfinal
endif
-nsdeps: __modpost
-
endif
.PHONY: $(PHONY)
diff --git a/scripts/Makefile.package b/scripts/Makefile.package
index 56eadcc48d46..02135d2671a6 100644
--- a/scripts/Makefile.package
+++ b/scripts/Makefile.package
@@ -103,7 +103,7 @@ snap-pkg:
# tarball targets
# ---------------------------------------------------------------------------
-tar-pkgs := tar-pkg targz-pkg tarbz2-pkg tarxz-pkg
+tar-pkgs := dir-pkg tar-pkg targz-pkg tarbz2-pkg tarxz-pkg
PHONY += $(tar-pkgs)
$(tar-pkgs):
$(MAKE) -f $(srctree)/Makefile
@@ -146,7 +146,9 @@ help:
@echo ' binrpm-pkg - Build only the binary kernel RPM package'
@echo ' deb-pkg - Build both source and binary deb kernel packages'
@echo ' bindeb-pkg - Build only the binary kernel deb package'
- @echo ' snap-pkg - Build only the binary kernel snap package (will connect to external hosts)'
+ @echo ' snap-pkg - Build only the binary kernel snap package'
+ @echo ' (will connect to external hosts)'
+ @echo ' dir-pkg - Build the kernel as a plain directory structure'
@echo ' tar-pkg - Build the kernel as an uncompressed tarball'
@echo ' targz-pkg - Build the kernel as a gzip compressed tarball'
@echo ' tarbz2-pkg - Build the kernel as a bzip2 compressed tarball'
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index 4b40445938dc..592911a2f06c 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -2826,6 +2826,14 @@ sub process {
"added, moved or deleted file(s), does MAINTAINERS need updating?\n" . $herecurr);
}
+# Check for adding new DT bindings not in schema format
+ if (!$in_commit_log &&
+ ($line =~ /^new file mode\s*\d+\s*$/) &&
+ ($realfile =~ m@^Documentation/devicetree/bindings/.*\.txt$@)) {
+ WARN("DT_SCHEMA_BINDING_PATCH",
+ "DT bindings should be in DT schema format. See: Documentation/devicetree/writing-schema.rst\n");
+ }
+
# Check for wrappage within a valid hunk of the file
if ($realcnt != 0 && $line !~ m{^(?:\+|-| |\\ No newline|$)}) {
ERROR("CORRUPTED_PATCH",
@@ -6015,14 +6023,18 @@ sub process {
for (my $count = $linenr; $count <= $lc; $count++) {
my $specifier;
my $extension;
+ my $qualifier;
my $bad_specifier = "";
my $fmt = get_quoted_string($lines[$count - 1], raw_line($count, 0));
$fmt =~ s/%%//g;
- while ($fmt =~ /(\%[\*\d\.]*p(\w))/g) {
+ while ($fmt =~ /(\%[\*\d\.]*p(\w)(\w*))/g) {
$specifier = $1;
$extension = $2;
- if ($extension !~ /[SsBKRraEehMmIiUDdgVCbGNOxt]/) {
+ $qualifier = $3;
+ if ($extension !~ /[SsBKRraEehMmIiUDdgVCbGNOxtf]/ ||
+ ($extension eq "f" &&
+ defined $qualifier && $qualifier !~ /^w/)) {
$bad_specifier = $specifier;
last;
}
@@ -6039,7 +6051,6 @@ sub process {
my $ext_type = "Invalid";
my $use = "";
if ($bad_specifier =~ /p[Ff]/) {
- $ext_type = "Deprecated";
$use = " - use %pS instead";
$use =~ s/pS/ps/ if ($bad_specifier =~ /pf/);
}
diff --git a/scripts/dtc/Makefile b/scripts/dtc/Makefile
index 82160808765c..b5a5b1c548c9 100644
--- a/scripts/dtc/Makefile
+++ b/scripts/dtc/Makefile
@@ -11,7 +11,7 @@ dtc-objs += dtc-lexer.lex.o dtc-parser.tab.o
# Source files need to get at the userspace version of libfdt_env.h to compile
HOST_EXTRACFLAGS := -I $(srctree)/$(src)/libfdt
-ifeq ($(wildcard /usr/include/yaml.h),)
+ifeq ($(shell pkg-config --exists yaml-0.1 2>/dev/null && echo yes),)
ifneq ($(CHECK_DTBS),)
$(error dtc needs libyaml for DT schema validation support. \
Install the necessary libyaml development package.)
@@ -19,7 +19,7 @@ endif
HOST_EXTRACFLAGS += -DNO_YAML
else
dtc-objs += yamltree.o
-HOSTLDLIBS_dtc := -lyaml
+HOSTLDLIBS_dtc := $(shell pkg-config yaml-0.1 --libs)
endif
# Generated files need one more search path to include headers in source tree
diff --git a/scripts/dtc/dtx_diff b/scripts/dtc/dtx_diff
index 00fd4738a587..541c432e7d19 100755
--- a/scripts/dtc/dtx_diff
+++ b/scripts/dtc/dtx_diff
@@ -20,6 +20,8 @@ Usage:
--annotate synonym for -T
+ --color synonym for -c (requires diff with --color support)
+ -c enable colored output
-f print full dts in diff (--unified=99999)
-h synonym for --help
-help synonym for --help
@@ -177,6 +179,7 @@ compile_to_dts() {
annotate=""
cmd_diff=0
diff_flags="-u"
+diff_color=""
dtx_file_1=""
dtx_file_2=""
dtc_sort="-s"
@@ -188,6 +191,13 @@ while [ $# -gt 0 ] ; do
case $1 in
+ -c | --color )
+ if diff --color /dev/null /dev/null 2>/dev/null ; then
+ diff_color="--color=always"
+ fi
+ shift
+ ;;
+
-f )
diff_flags="--unified=999999"
shift
@@ -343,7 +353,7 @@ DTC="\
if (( ${cmd_diff} )) ; then
- diff ${diff_flags} --label "${dtx_file_1}" --label "${dtx_file_2}" \
+ diff ${diff_flags} ${diff_color} --label "${dtx_file_1}" --label "${dtx_file_2}" \
<(compile_to_dts "${dtx_file_1}" "${dtx_path_1_dtc_include}") \
<(compile_to_dts "${dtx_file_2}" "${dtx_path_2_dtc_include}")
diff --git a/scripts/jobserver-exec b/scripts/jobserver-exec
new file mode 100755
index 000000000000..0fdb31a790a8
--- /dev/null
+++ b/scripts/jobserver-exec
@@ -0,0 +1,66 @@
+#!/usr/bin/env python
+# SPDX-License-Identifier: GPL-2.0+
+#
+# This determines how many parallel tasks "make" is expecting, as it is
+# not exposed via an special variables, reserves them all, runs a subprocess
+# with PARALLELISM environment variable set, and releases the jobs back again.
+#
+# https://www.gnu.org/software/make/manual/html_node/POSIX-Jobserver.html#POSIX-Jobserver
+from __future__ import print_function
+import os, sys, errno
+import subprocess
+
+# Extract and prepare jobserver file descriptors from envirnoment.
+claim = 0
+jobs = b""
+try:
+ # Fetch the make environment options.
+ flags = os.environ['MAKEFLAGS']
+
+ # Look for "--jobserver=R,W"
+ # Note that GNU Make has used --jobserver-fds and --jobserver-auth
+ # so this handles all of them.
+ opts = [x for x in flags.split(" ") if x.startswith("--jobserver")]
+
+ # Parse out R,W file descriptor numbers and set them nonblocking.
+ fds = opts[0].split("=", 1)[1]
+ reader, writer = [int(x) for x in fds.split(",", 1)]
+ # Open a private copy of reader to avoid setting nonblocking
+ # on an unexpecting process with the same reader fd.
+ reader = os.open("/proc/self/fd/%d" % (reader),
+ os.O_RDONLY | os.O_NONBLOCK)
+
+ # Read out as many jobserver slots as possible.
+ while True:
+ try:
+ slot = os.read(reader, 8)
+ jobs += slot
+ except (OSError, IOError) as e:
+ if e.errno == errno.EWOULDBLOCK:
+ # Stop at the end of the jobserver queue.
+ break
+ # If something went wrong, give back the jobs.
+ if len(jobs):
+ os.write(writer, jobs)
+ raise e
+ # Add a bump for our caller's reserveration, since we're just going
+ # to sit here blocked on our child.
+ claim = len(jobs) + 1
+except (KeyError, IndexError, ValueError, OSError, IOError) as e:
+ # Any missing environment strings or bad fds should result in just
+ # not being parallel.
+ pass
+
+# We can only claim parallelism if there was a jobserver (i.e. a top-level
+# "-jN" argument) and there were no other failures. Otherwise leave out the
+# environment variable and let the child figure out what is best.
+if claim > 0:
+ os.environ['PARALLELISM'] = '%d' % (claim)
+
+rc = subprocess.call(sys.argv[1:])
+
+# Return all the reserved slots.
+if len(jobs):
+ os.write(writer, jobs)
+
+sys.exit(rc)
diff --git a/scripts/kallsyms.c b/scripts/kallsyms.c
index ae6504d07fd6..fb55f262f42d 100644
--- a/scripts/kallsyms.c
+++ b/scripts/kallsyms.c
@@ -18,15 +18,14 @@
*
*/
+#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <ctype.h>
#include <limits.h>
-#ifndef ARRAY_SIZE
#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof(arr[0]))
-#endif
#define KSYM_NAME_LEN 128
@@ -58,9 +57,9 @@ static struct addr_range percpu_range = {
static struct sym_entry *table;
static unsigned int table_size, table_cnt;
-static int all_symbols = 0;
-static int absolute_percpu = 0;
-static int base_relative = 0;
+static int all_symbols;
+static int absolute_percpu;
+static int base_relative;
static int token_profit[0x10000];
@@ -76,18 +75,88 @@ static void usage(void)
exit(1);
}
-/*
- * This ignores the intensely annoying "mapping symbols" found
- * in ARM ELF files: $a, $t and $d.
- */
-static int is_arm_mapping_symbol(const char *str)
+static char *sym_name(const struct sym_entry *s)
+{
+ return (char *)s->sym + 1;
+}
+
+static bool is_ignored_symbol(const char *name, char type)
{
- return str[0] == '$' && strchr("axtd", str[1])
- && (str[2] == '\0' || str[2] == '.');
+ static const char * const ignored_symbols[] = {
+ /*
+ * Symbols which vary between passes. Passes 1 and 2 must have
+ * identical symbol lists. The kallsyms_* symbols below are
+ * only added after pass 1, they would be included in pass 2
+ * when --all-symbols is specified so exclude them to get a
+ * stable symbol list.
+ */
+ "kallsyms_addresses",
+ "kallsyms_offsets",
+ "kallsyms_relative_base",
+ "kallsyms_num_syms",
+ "kallsyms_names",
+ "kallsyms_markers",
+ "kallsyms_token_table",
+ "kallsyms_token_index",
+ /* Exclude linker generated symbols which vary between passes */
+ "_SDA_BASE_", /* ppc */
+ "_SDA2_BASE_", /* ppc */
+ NULL
+ };
+
+ static const char * const ignored_prefixes[] = {
+ "$", /* local symbols for ARM, MIPS, etc. */
+ ".LASANPC", /* s390 kasan local symbols */
+ "__crc_", /* modversions */
+ "__efistub_", /* arm64 EFI stub namespace */
+ NULL
+ };
+
+ static const char * const ignored_suffixes[] = {
+ "_from_arm", /* arm */
+ "_from_thumb", /* arm */
+ "_veneer", /* arm */
+ NULL
+ };
+
+ const char * const *p;
+
+ /* Exclude symbols which vary between passes. */
+ for (p = ignored_symbols; *p; p++)
+ if (!strcmp(name, *p))
+ return true;
+
+ for (p = ignored_prefixes; *p; p++)
+ if (!strncmp(name, *p, strlen(*p)))
+ return true;
+
+ for (p = ignored_suffixes; *p; p++) {
+ int l = strlen(name) - strlen(*p);
+
+ if (l >= 0 && !strcmp(name + l, *p))
+ return true;
+ }
+
+ if (type == 'U' || type == 'u')
+ return true;
+ /* exclude debugging symbols */
+ if (type == 'N' || type == 'n')
+ return true;
+
+ if (toupper(type) == 'A') {
+ /* Keep these useful absolute symbols */
+ if (strcmp(name, "__kernel_syscall_via_break") &&
+ strcmp(name, "__kernel_syscall_via_epc") &&
+ strcmp(name, "__kernel_sigtramp") &&
+ strcmp(name, "__gp"))
+ return true;
+ }
+
+ return false;
}
-static int check_symbol_range(const char *sym, unsigned long long addr,
- struct addr_range *ranges, int entries)
+static void check_symbol_range(const char *sym, unsigned long long addr,
+ struct addr_range *ranges, int entries)
{
size_t i;
struct addr_range *ar;
@@ -97,14 +166,12 @@ static int check_symbol_range(const char *sym, unsigned long long addr,
if (strcmp(sym, ar->start_sym) == 0) {
ar->start = addr;
- return 0;
+ return;
} else if (strcmp(sym, ar->end_sym) == 0) {
ar->end = addr;
- return 0;
+ return;
}
}
-
- return 1;
}
static int read_symbol(FILE *in, struct sym_entry *s)
@@ -125,34 +192,15 @@ static int read_symbol(FILE *in, struct sym_entry *s)
return -1;
}
+ if (is_ignored_symbol(sym, stype))
+ return -1;
+
/* Ignore most absolute/undefined (?) symbols. */
if (strcmp(sym, "_text") == 0)
_text = s->addr;
- else if (check_symbol_range(sym, s->addr, text_ranges,
- ARRAY_SIZE(text_ranges)) == 0)
- /* nothing to do */;
- else if (toupper(stype) == 'A')
- {
- /* Keep these useful absolute symbols */
- if (strcmp(sym, "__kernel_syscall_via_break") &&
- strcmp(sym, "__kernel_syscall_via_epc") &&
- strcmp(sym, "__kernel_sigtramp") &&
- strcmp(sym, "__gp"))
- return -1;
- }
- else if (toupper(stype) == 'U' ||
- is_arm_mapping_symbol(sym))
- return -1;
- /* exclude also MIPS ELF local symbols ($L123 instead of .L123) */
- else if (sym[0] == '$')
- return -1;
- /* exclude debugging symbols */
- else if (stype == 'N' || stype == 'n')
- return -1;
- /* exclude s390 kasan local symbols */
- else if (!strncmp(sym, ".LASANPC", 8))
- return -1;
+ check_symbol_range(sym, s->addr, text_ranges, ARRAY_SIZE(text_ranges));
+ check_symbol_range(sym, s->addr, &percpu_range, 1);
/* include the type field in the symbol name, so that it gets
* compressed together */
@@ -163,22 +211,19 @@ static int read_symbol(FILE *in, struct sym_entry *s)
"unable to allocate required amount of memory\n");
exit(EXIT_FAILURE);
}
- strcpy((char *)s->sym + 1, sym);
+ strcpy(sym_name(s), sym);
s->sym[0] = stype;
s->percpu_absolute = 0;
- /* Record if we've found __per_cpu_start/end. */
- check_symbol_range(sym, s->addr, &percpu_range, 1);
-
return 0;
}
-static int symbol_in_range(struct sym_entry *s, struct addr_range *ranges,
- int entries)
+static int symbol_in_range(const struct sym_entry *s,
+ const struct addr_range *ranges, int entries)
{
size_t i;
- struct addr_range *ar;
+ const struct addr_range *ar;
for (i = 0; i < entries; ++i) {
ar = &ranges[i];
@@ -190,41 +235,9 @@ static int symbol_in_range(struct sym_entry *s, struct addr_range *ranges,
return 0;
}
-static int symbol_valid(struct sym_entry *s)
+static int symbol_valid(const struct sym_entry *s)
{
- /* Symbols which vary between passes. Passes 1 and 2 must have
- * identical symbol lists. The kallsyms_* symbols below are only added
- * after pass 1, they would be included in pass 2 when --all-symbols is
- * specified so exclude them to get a stable symbol list.
- */
- static char *special_symbols[] = {
- "kallsyms_addresses",
- "kallsyms_offsets",
- "kallsyms_relative_base",
- "kallsyms_num_syms",
- "kallsyms_names",
- "kallsyms_markers",
- "kallsyms_token_table",
- "kallsyms_token_index",
-
- /* Exclude linker generated symbols which vary between passes */
- "_SDA_BASE_", /* ppc */
- "_SDA2_BASE_", /* ppc */
- NULL };
-
- static char *special_prefixes[] = {
- "__crc_", /* modversions */
- "__efistub_", /* arm64 EFI stub namespace */
- NULL };
-
- static char *special_suffixes[] = {
- "_veneer", /* arm */
- "_from_arm", /* arm */
- "_from_thumb", /* arm */
- NULL };
-
- int i;
- char *sym_name = (char *)s->sym + 1;
+ const char *name = sym_name(s);
/* if --all-symbols is not specified, then symbols outside the text
* and inittext sections are discarded */
@@ -239,35 +252,37 @@ static int symbol_valid(struct sym_entry *s)
* rules.
*/
if ((s->addr == text_range_text->end &&
- strcmp(sym_name,
- text_range_text->end_sym)) ||
+ strcmp(name, text_range_text->end_sym)) ||
(s->addr == text_range_inittext->end &&
- strcmp(sym_name,
- text_range_inittext->end_sym)))
+ strcmp(name, text_range_inittext->end_sym)))
return 0;
}
- /* Exclude symbols which vary between passes. */
- for (i = 0; special_symbols[i]; i++)
- if (strcmp(sym_name, special_symbols[i]) == 0)
- return 0;
+ return 1;
+}
- for (i = 0; special_prefixes[i]; i++) {
- int l = strlen(special_prefixes[i]);
+/* remove all the invalid symbols from the table */
+static void shrink_table(void)
+{
+ unsigned int i, pos;
- if (l <= strlen(sym_name) &&
- strncmp(sym_name, special_prefixes[i], l) == 0)
- return 0;
+ pos = 0;
+ for (i = 0; i < table_cnt; i++) {
+ if (symbol_valid(&table[i])) {
+ if (pos != i)
+ table[pos] = table[i];
+ pos++;
+ } else {
+ free(table[i].sym);
+ }
}
+ table_cnt = pos;
- for (i = 0; special_suffixes[i]; i++) {
- int l = strlen(sym_name) - strlen(special_suffixes[i]);
-
- if (l >= 0 && strcmp(sym_name + l, special_suffixes[i]) == 0)
- return 0;
+ /* When valid symbol is not registered, exit to error */
+ if (!table_cnt) {
+ fprintf(stderr, "No valid symbol.\n");
+ exit(1);
}
-
- return 1;
}
static void read_map(FILE *in)
@@ -288,7 +303,7 @@ static void read_map(FILE *in)
}
}
-static void output_label(char *label)
+static void output_label(const char *label)
{
printf(".globl %s\n", label);
printf("\tALGN\n");
@@ -297,7 +312,7 @@ static void output_label(char *label)
/* uncompress a compressed symbol. When this function is called, the best table
* might still be compressed itself, so the function needs to be recursive */
-static int expand_symbol(unsigned char *data, int len, char *result)
+static int expand_symbol(const unsigned char *data, int len, char *result)
{
int c, rlen, total=0;
@@ -322,7 +337,7 @@ static int expand_symbol(unsigned char *data, int len, char *result)
return total;
}
-static int symbol_absolute(struct sym_entry *s)
+static int symbol_absolute(const struct sym_entry *s)
{
return s->percpu_absolute;
}
@@ -460,7 +475,7 @@ static void write_src(void)
/* table lookup compression functions */
/* count all the possible tokens in a symbol */
-static void learn_symbol(unsigned char *symbol, int len)
+static void learn_symbol(const unsigned char *symbol, int len)
{
int i;
@@ -469,7 +484,7 @@ static void learn_symbol(unsigned char *symbol, int len)
}
/* decrease the count for all the possible tokens in a symbol */
-static void forget_symbol(unsigned char *symbol, int len)
+static void forget_symbol(const unsigned char *symbol, int len)
{
int i;
@@ -477,24 +492,17 @@ static void forget_symbol(unsigned char *symbol, int len)
token_profit[ symbol[i] + (symbol[i + 1] << 8) ]--;
}
-/* remove all the invalid symbols from the table and do the initial token count */
+/* do the initial token count */
static void build_initial_tok_table(void)
{
- unsigned int i, pos;
+ unsigned int i;
- pos = 0;
- for (i = 0; i < table_cnt; i++) {
- if ( symbol_valid(&table[i]) ) {
- if (pos != i)
- table[pos] = table[i];
- learn_symbol(table[pos].sym, table[pos].len);
- pos++;
- }
- }
- table_cnt = pos;
+ for (i = 0; i < table_cnt; i++)
+ learn_symbol(table[i].sym, table[i].len);
}
-static void *find_token(unsigned char *str, int len, unsigned char *token)
+static unsigned char *find_token(unsigned char *str, int len,
+ const unsigned char *token)
{
int i;
@@ -507,7 +515,7 @@ static void *find_token(unsigned char *str, int len, unsigned char *token)
/* replace a given token in all the valid symbols. Use the sampled symbols
* to update the counts */
-static void compress_symbols(unsigned char *str, int idx)
+static void compress_symbols(const unsigned char *str, int idx)
{
unsigned int i, len, size;
unsigned char *p1, *p2;
@@ -614,19 +622,13 @@ static void optimize_token_table(void)
insert_real_symbols_in_table();
- /* When valid symbol is not registered, exit to error */
- if (!table_cnt) {
- fprintf(stderr, "No valid symbol.\n");
- exit(1);
- }
-
optimize_result();
}
/* guess for "linker script provide" symbol */
static int may_be_linker_script_provide_symbol(const struct sym_entry *se)
{
- const char *symbol = (char *)se->sym + 1;
+ const char *symbol = sym_name(se);
int len = se->len - 1;
if (len < 8)
@@ -658,16 +660,6 @@ static int may_be_linker_script_provide_symbol(const struct sym_entry *se)
return 0;
}
-static int prefix_underscores_count(const char *str)
-{
- const char *tail = str;
-
- while (*tail == '_')
- tail++;
-
- return tail - str;
-}
-
static int compare_symbols(const void *a, const void *b)
{
const struct sym_entry *sa;
@@ -696,8 +688,8 @@ static int compare_symbols(const void *a, const void *b)
return wa - wb;
/* sort by the number of prefix underscores */
- wa = prefix_underscores_count((const char *)sa->sym + 1);
- wb = prefix_underscores_count((const char *)sb->sym + 1);
+ wa = strspn(sym_name(sa), "_");
+ wb = strspn(sym_name(sb), "_");
if (wa != wb)
return wa - wb;
@@ -731,11 +723,15 @@ static void record_relative_base(void)
{
unsigned int i;
- relative_base = -1ULL;
for (i = 0; i < table_cnt; i++)
- if (!symbol_absolute(&table[i]) &&
- table[i].addr < relative_base)
+ if (!symbol_absolute(&table[i])) {
+ /*
+ * The table is sorted by address.
+ * Take the first non-absolute symbol value.
+ */
relative_base = table[i].addr;
+ return;
+ }
}
int main(int argc, char **argv)
@@ -756,11 +752,12 @@ int main(int argc, char **argv)
usage();
read_map(stdin);
+ shrink_table();
if (absolute_percpu)
make_percpus_absolute();
+ sort_symbols();
if (base_relative)
record_relative_base();
- sort_symbols();
optimize_token_table();
write_src();
diff --git a/scripts/kconfig/Makefile b/scripts/kconfig/Makefile
index ef2f2336c469..2f1a59fa5169 100644
--- a/scripts/kconfig/Makefile
+++ b/scripts/kconfig/Makefile
@@ -66,7 +66,9 @@ localyesconfig localmodconfig: $(obj)/conf
# syncconfig has become an internal implementation detail and is now
# deprecated for external use
simple-targets := oldconfig allnoconfig allyesconfig allmodconfig \
- alldefconfig randconfig listnewconfig olddefconfig syncconfig
+ alldefconfig randconfig listnewconfig olddefconfig syncconfig \
+ helpnewconfig
+
PHONY += $(simple-targets)
$(simple-targets): $(obj)/conf
@@ -134,17 +136,19 @@ help:
@echo ' alldefconfig - New config with all symbols set to default'
@echo ' randconfig - New config with random answer to all options'
@echo ' listnewconfig - List new options'
+ @echo ' helpnewconfig - List new options and help text'
@echo ' olddefconfig - Same as oldconfig but sets new symbols to their'
@echo ' default value without prompting'
@echo ' kvmconfig - Enable additional options for kvm guest kernel support'
- @echo ' xenconfig - Enable additional options for xen dom0 and guest kernel support'
+ @echo ' xenconfig - Enable additional options for xen dom0 and guest kernel'
+ @echo ' support'
@echo ' tinyconfig - Configure the tiniest possible kernel'
@echo ' testconfig - Run Kconfig unit tests (requires python3 and pytest)'
# ===========================================================================
# object files used by all kconfig flavours
common-objs := confdata.o expr.o lexer.lex.o parser.tab.o preprocess.o \
- symbol.o
+ symbol.o util.o
$(obj)/lexer.lex.o: $(obj)/parser.tab.h
HOSTCFLAGS_lexer.lex.o := -I $(srctree)/$(src)
diff --git a/scripts/kconfig/conf.c b/scripts/kconfig/conf.c
index 40e16e871ae2..1f89bf1558ce 100644
--- a/scripts/kconfig/conf.c
+++ b/scripts/kconfig/conf.c
@@ -32,6 +32,7 @@ enum input_mode {
defconfig,
savedefconfig,
listnewconfig,
+ helpnewconfig,
olddefconfig,
};
static enum input_mode input_mode = oldaskconfig;
@@ -434,6 +435,11 @@ static void check_conf(struct menu *menu)
printf("%s%s=%s\n", CONFIG_, sym->name, str);
}
}
+ } else if (input_mode == helpnewconfig) {
+ printf("-----\n");
+ print_help(menu);
+ printf("-----\n");
+
} else {
if (!conf_cnt++)
printf("*\n* Restart config...\n*\n");
@@ -459,6 +465,7 @@ static struct option long_opts[] = {
{"alldefconfig", no_argument, NULL, alldefconfig},
{"randconfig", no_argument, NULL, randconfig},
{"listnewconfig", no_argument, NULL, listnewconfig},
+ {"helpnewconfig", no_argument, NULL, helpnewconfig},
{"olddefconfig", no_argument, NULL, olddefconfig},
{NULL, 0, NULL, 0}
};
@@ -469,6 +476,7 @@ static void conf_usage(const char *progname)
printf("Usage: %s [-s] [option] <kconfig-file>\n", progname);
printf("[option] is _one_ of the following:\n");
printf(" --listnewconfig List new options\n");
+ printf(" --helpnewconfig List new options and help text\n");
printf(" --oldaskconfig Start a new configuration using a line-oriented program\n");
printf(" --oldconfig Update a configuration using a provided .config as base\n");
printf(" --syncconfig Similar to oldconfig but generates configuration in\n"
@@ -543,6 +551,7 @@ int main(int ac, char **av)
case allmodconfig:
case alldefconfig:
case listnewconfig:
+ case helpnewconfig:
case olddefconfig:
break;
case '?':
@@ -576,6 +585,7 @@ int main(int ac, char **av)
case oldaskconfig:
case oldconfig:
case listnewconfig:
+ case helpnewconfig:
case olddefconfig:
conf_read(NULL);
break;
@@ -657,6 +667,7 @@ int main(int ac, char **av)
/* fall through */
case oldconfig:
case listnewconfig:
+ case helpnewconfig:
case syncconfig:
/* Update until a loop caused no more changes */
do {
@@ -675,7 +686,7 @@ int main(int ac, char **av)
defconfig_file);
return 1;
}
- } else if (input_mode != listnewconfig) {
+ } else if (input_mode != listnewconfig && input_mode != helpnewconfig) {
if (!no_conf_write && conf_write(NULL)) {
fprintf(stderr, "\n*** Error during writing of the configuration.\n\n");
exit(1);
diff --git a/scripts/kconfig/mconf-cfg.sh b/scripts/kconfig/mconf-cfg.sh
index c812872d7f9d..aa68ec95620d 100755
--- a/scripts/kconfig/mconf-cfg.sh
+++ b/scripts/kconfig/mconf-cfg.sh
@@ -44,4 +44,7 @@ echo >&2 "* Unable to find the ncurses package."
echo >&2 "* Install ncurses (ncurses-devel or libncurses-dev"
echo >&2 "* depending on your distribution)."
echo >&2 "*"
+echo >&2 "* You may also need to install pkg-config to find the"
+echo >&2 "* ncurses installed in a non-default location."
+echo >&2 "*"
exit 1
diff --git a/scripts/kconfig/nconf-cfg.sh b/scripts/kconfig/nconf-cfg.sh
index 001559ef0a60..c212255070c0 100755
--- a/scripts/kconfig/nconf-cfg.sh
+++ b/scripts/kconfig/nconf-cfg.sh
@@ -44,4 +44,7 @@ echo >&2 "* Unable to find the ncurses package."
echo >&2 "* Install ncurses (ncurses-devel or libncurses-dev"
echo >&2 "* depending on your distribution)."
echo >&2 "*"
+echo >&2 "* You may also need to install pkg-config to find the"
+echo >&2 "* ncurses installed in a non-default location."
+echo >&2 "*"
exit 1
diff --git a/scripts/kconfig/parser.y b/scripts/kconfig/parser.y
index 60936c76865b..b3eff9613cf8 100644
--- a/scripts/kconfig/parser.y
+++ b/scripts/kconfig/parser.y
@@ -727,5 +727,4 @@ void zconfdump(FILE *out)
}
}
-#include "util.c"
#include "menu.c"
diff --git a/scripts/kernel-doc b/scripts/kernel-doc
index 81dc91760b23..f2d73f04e71d 100755
--- a/scripts/kernel-doc
+++ b/scripts/kernel-doc
@@ -1062,7 +1062,7 @@ sub dump_struct($$) {
my $x = shift;
my $file = shift;
- if ($x =~ /(struct|union)\s+(\w+)\s*\{(.*)\}(\s*(__packed|__aligned|__attribute__\s*\(\([a-z0-9,_\s\(\)]*\)\)))*/) {
+ if ($x =~ /(struct|union)\s+(\w+)\s*\{(.*)\}(\s*(__packed|__aligned|____cacheline_aligned_in_smp|__attribute__\s*\(\([a-z0-9,_\s\(\)]*\)\)))*/) {
my $decl_type = $1;
$declaration_name = $2;
my $members = $3;
@@ -1073,10 +1073,11 @@ sub dump_struct($$) {
# strip comments:
$members =~ s/\/\*.*?\*\///gos;
# strip attributes
- $members =~ s/\s*__attribute__\s*\(\([a-z0-9,_\*\s\(\)]*\)\)//gi;
- $members =~ s/\s*__aligned\s*\([^;]*\)//gos;
- $members =~ s/\s*__packed\s*//gos;
- $members =~ s/\s*CRYPTO_MINALIGN_ATTR//gos;
+ $members =~ s/\s*__attribute__\s*\(\([a-z0-9,_\*\s\(\)]*\)\)/ /gi;
+ $members =~ s/\s*__aligned\s*\([^;]*\)/ /gos;
+ $members =~ s/\s*__packed\s*/ /gos;
+ $members =~ s/\s*CRYPTO_MINALIGN_ATTR/ /gos;
+ $members =~ s/\s*____cacheline_aligned_in_smp/ /gos;
# replace DECLARE_BITMAP
$members =~ s/DECLARE_BITMAP\s*\(([^,)]+),\s*([^,)]+)\)/unsigned long $1\[BITS_TO_LONGS($2)\]/gos;
# replace DECLARE_HASHTABLE
@@ -1449,6 +1450,10 @@ sub push_parameter($$$$) {
# handles unnamed variable parameters
$param = "...";
}
+ elsif ($param =~ /\w\.\.\.$/) {
+ # for named variable parameters of the form `x...`, remove the dots
+ $param =~ s/\.\.\.$//;
+ }
if (!defined $parameterdescs{$param} || $parameterdescs{$param} eq "") {
$parameterdescs{$param} = "variable arguments";
}
@@ -1936,6 +1941,18 @@ sub process_name($$) {
sub process_body($$) {
my $file = shift;
+ # Until all named variable macro parameters are
+ # documented using the bare name (`x`) rather than with
+ # dots (`x...`), strip the dots:
+ if ($section =~ /\w\.\.\.$/) {
+ $section =~ s/\.\.\.$//;
+
+ if ($verbose) {
+ print STDERR "${file}:$.: warning: Variable macro arguments should be documented without dots\n";
+ ++$warnings;
+ }
+ }
+
if (/$doc_sect/i) { # case insensitive for supported section names
$newsection = $1;
$newcontents = $2;
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
index d2a30a7b3f07..6e892c93d104 100644
--- a/scripts/mod/modpost.c
+++ b/scripts/mod/modpost.c
@@ -38,8 +38,6 @@ static int sec_mismatch_count = 0;
static int sec_mismatch_fatal = 0;
/* ignore missing files */
static int ignore_missing_files;
-/* write namespace dependencies */
-static int write_namespace_deps;
enum export {
export_plain, export_unused, export_gpl,
@@ -171,7 +169,6 @@ struct symbol {
unsigned int vmlinux:1; /* 1 if symbol is defined in vmlinux */
unsigned int kernel:1; /* 1 if symbol is from kernel
* (only for external modules) **/
- unsigned int preloaded:1; /* 1 if symbol from Module.symvers, or crc */
unsigned int is_static:1; /* 1 if symbol is not global */
enum export export; /* Type of export */
char name[0];
@@ -214,13 +211,11 @@ static struct symbol *new_symbol(const char *name, struct module *module,
enum export export)
{
unsigned int hash;
- struct symbol *new;
hash = tdb_hash(name) % SYMBOL_HASH_SIZE;
- new = symbolhash[hash] = alloc_symbol(name, 0, symbolhash[hash]);
- new->module = module;
- new->export = export;
- return new;
+ symbolhash[hash] = alloc_symbol(name, 0, symbolhash[hash]);
+
+ return symbolhash[hash];
}
static struct symbol *find_symbol(const char *name)
@@ -241,10 +236,8 @@ static struct symbol *find_symbol(const char *name)
static bool contains_namespace(struct namespace_list *list,
const char *namespace)
{
- struct namespace_list *ns_entry;
-
- for (ns_entry = list; ns_entry != NULL; ns_entry = ns_entry->next)
- if (strcmp(ns_entry->namespace, namespace) == 0)
+ for (; list; list = list->next)
+ if (!strcmp(list->namespace, namespace))
return true;
return false;
@@ -312,6 +305,18 @@ static const char *sec_name(struct elf_info *elf, int secindex)
return sech_name(elf, &elf->sechdrs[secindex]);
}
+static void *sym_get_data(const struct elf_info *info, const Elf_Sym *sym)
+{
+ Elf_Shdr *sechdr = &info->sechdrs[sym->st_shndx];
+ unsigned long offset;
+
+ offset = sym->st_value;
+ if (info->hdr->e_type != ET_REL)
+ offset -= sechdr->sh_addr;
+
+ return (void *)info->hdr + sechdr->sh_offset + offset;
+}
+
#define strstarts(str, prefix) (strncmp(str, prefix, strlen(prefix)) == 0)
static enum export export_from_secname(struct elf_info *elf, unsigned int sec)
@@ -348,10 +353,10 @@ static enum export export_from_sec(struct elf_info *elf, unsigned int sec)
return export_unknown;
}
-static const char *namespace_from_kstrtabns(struct elf_info *info,
- Elf_Sym *kstrtabns)
+static const char *namespace_from_kstrtabns(const struct elf_info *info,
+ const Elf_Sym *sym)
{
- char *value = info->ksymtab_strings + kstrtabns->st_value;
+ const char *value = sym_get_data(info, sym);
return value[0] ? value : NULL;
}
@@ -385,33 +390,32 @@ static struct symbol *sym_add_exported(const char *name, struct module *mod,
if (!s) {
s = new_symbol(name, mod, export);
- } else {
- if (!s->preloaded) {
- warn("%s: '%s' exported twice. Previous export was in %s%s\n",
- mod->name, name, s->module->name,
- is_vmlinux(s->module->name) ? "" : ".ko");
- } else {
- /* In case Module.symvers was out of date */
- s->module = mod;
- }
+ } else if (!external_module || is_vmlinux(s->module->name) ||
+ s->module == mod) {
+ warn("%s: '%s' exported twice. Previous export was in %s%s\n",
+ mod->name, name, s->module->name,
+ is_vmlinux(s->module->name) ? "" : ".ko");
+ return s;
}
- s->preloaded = 0;
+
+ s->module = mod;
s->vmlinux = is_vmlinux(mod->name);
s->kernel = 0;
s->export = export;
return s;
}
-static void sym_update_crc(const char *name, struct module *mod,
- unsigned int crc, enum export export)
+static void sym_set_crc(const char *name, unsigned int crc)
{
struct symbol *s = find_symbol(name);
- if (!s) {
- s = new_symbol(name, mod, export);
- /* Don't complain when we find it later. */
- s->preloaded = 1;
- }
+ /*
+ * Ignore stand-alone __crc_*, which might be auto-generated symbols
+ * such as __*_veneer in ARM ELF.
+ */
+ if (!s)
+ return;
+
s->crc = crc;
s->crc_valid = 1;
}
@@ -593,10 +597,6 @@ static int parse_elf(struct elf_info *info, const char *filename)
info->export_unused_gpl_sec = i;
else if (strcmp(secname, "__ksymtab_gpl_future") == 0)
info->export_gpl_future_sec = i;
- else if (strcmp(secname, "__ksymtab_strings") == 0)
- info->ksymtab_strings = (void *)hdr +
- sechdrs[i].sh_offset -
- sechdrs[i].sh_addr;
if (sechdrs[i].sh_type == SHT_SYMTAB) {
unsigned int sh_link_idx;
@@ -679,12 +679,34 @@ static int ignore_undef_symbol(struct elf_info *info, const char *symname)
return 0;
}
-static void handle_modversions(struct module *mod, struct elf_info *info,
- Elf_Sym *sym, const char *symname)
+static void handle_modversion(const struct module *mod,
+ const struct elf_info *info,
+ const Elf_Sym *sym, const char *symname)
{
unsigned int crc;
+
+ if (sym->st_shndx == SHN_UNDEF) {
+ warn("EXPORT symbol \"%s\" [%s%s] version generation failed, symbol will not be versioned.\n",
+ symname, mod->name, is_vmlinux(mod->name) ? "":".ko");
+ return;
+ }
+
+ if (sym->st_shndx == SHN_ABS) {
+ crc = sym->st_value;
+ } else {
+ unsigned int *crcp;
+
+ /* symbol points to the CRC in the ELF object */
+ crcp = sym_get_data(info, sym);
+ crc = TO_NATIVE(*crcp);
+ }
+ sym_set_crc(symname, crc);
+}
+
+static void handle_symbol(struct module *mod, struct elf_info *info,
+ const Elf_Sym *sym, const char *symname)
+{
enum export export;
- bool is_crc = false;
const char *name;
if ((!is_vmlinux(mod->name) || mod->is_dot_o) &&
@@ -693,24 +715,6 @@ static void handle_modversions(struct module *mod, struct elf_info *info,
else
export = export_from_sec(info, get_secindex(info, sym));
- /* CRC'd symbol */
- if (strstarts(symname, "__crc_")) {
- is_crc = true;
- crc = (unsigned int) sym->st_value;
- if (sym->st_shndx != SHN_UNDEF && sym->st_shndx != SHN_ABS) {
- unsigned int *crcp;
-
- /* symbol points to the CRC in the ELF object */
- crcp = (void *)info->hdr + sym->st_value +
- info->sechdrs[sym->st_shndx].sh_offset -
- (info->hdr->e_type != ET_REL ?
- info->sechdrs[sym->st_shndx].sh_addr : 0);
- crc = TO_NATIVE(*crcp);
- }
- sym_update_crc(symname + strlen("__crc_"), mod, crc,
- export);
- }
-
switch (sym->st_shndx) {
case SHN_COMMON:
if (strstarts(symname, "__gnu_lto_")) {
@@ -745,11 +749,6 @@ static void handle_modversions(struct module *mod, struct elf_info *info,
}
#endif
- if (is_crc) {
- const char *e = is_vmlinux(mod->name) ?"":".ko";
- warn("EXPORT symbol \"%s\" [%s%s] version generation failed, symbol will not be versioned.\n",
- symname + strlen("__crc_"), mod->name, e);
- }
mod->unres = alloc_symbol(symname,
ELF_ST_BIND(sym->st_info) == STB_WEAK,
mod->unres);
@@ -2050,18 +2049,22 @@ static void read_symbols(const char *modname)
for (sym = info.symtab_start; sym < info.symtab_stop; sym++) {
symname = remove_dot(info.strtab + sym->st_name);
- handle_modversions(mod, &info, sym, symname);
+ handle_symbol(mod, &info, sym, symname);
handle_moddevtable(mod, &info, sym, symname);
}
- /* Apply symbol namespaces from __kstrtabns_<symbol> entries. */
for (sym = info.symtab_start; sym < info.symtab_stop; sym++) {
symname = remove_dot(info.strtab + sym->st_name);
+ /* Apply symbol namespaces from __kstrtabns_<symbol> entries. */
if (strstarts(symname, "__kstrtabns_"))
sym_update_namespace(symname + strlen("__kstrtabns_"),
namespace_from_kstrtabns(&info,
sym));
+
+ if (strstarts(symname, "__crc_"))
+ handle_modversion(mod, &info, sym,
+ symname + strlen("__crc_"));
}
// check for static EXPORT_SYMBOL_* functions && global vars
@@ -2217,15 +2220,11 @@ static int check_exports(struct module *mod)
else
basename = mod->name;
- if (exp->namespace) {
- add_namespace(&mod->required_namespaces,
- exp->namespace);
-
- if (!write_namespace_deps &&
- !module_imports_namespace(mod, exp->namespace)) {
- warn("module %s uses symbol %s from namespace %s, but does not import it.\n",
- basename, exp->name, exp->namespace);
- }
+ if (exp->namespace &&
+ !module_imports_namespace(mod, exp->namespace)) {
+ warn("module %s uses symbol %s from namespace %s, but does not import it.\n",
+ basename, exp->name, exp->namespace);
+ add_namespace(&mod->missing_namespaces, exp->namespace);
}
if (!mod->gpl_compatible)
@@ -2477,9 +2476,8 @@ static void read_dump(const char *fname, unsigned int kernel)
}
s = sym_add_exported(symname, mod, export_no(export));
s->kernel = kernel;
- s->preloaded = 1;
s->is_static = 0;
- sym_update_crc(symname, mod, crc, export_no(export));
+ sym_set_crc(symname, crc);
sym_update_namespace(symname, namespace);
}
release_file(file, size);
@@ -2527,29 +2525,27 @@ static void write_dump(const char *fname)
free(buf.p);
}
-static void write_namespace_deps_files(void)
+static void write_namespace_deps_files(const char *fname)
{
struct module *mod;
struct namespace_list *ns;
struct buffer ns_deps_buf = {};
for (mod = modules; mod; mod = mod->next) {
- char fname[PATH_MAX];
- if (mod->skip)
+ if (mod->skip || !mod->missing_namespaces)
continue;
- ns_deps_buf.pos = 0;
-
- for (ns = mod->required_namespaces; ns; ns = ns->next)
- buf_printf(&ns_deps_buf, "%s\n", ns->namespace);
+ buf_printf(&ns_deps_buf, "%s.ko:", mod->name);
- if (ns_deps_buf.pos == 0)
- continue;
+ for (ns = mod->missing_namespaces; ns; ns = ns->next)
+ buf_printf(&ns_deps_buf, " %s", ns->namespace);
- sprintf(fname, "%s.ns_deps", mod->name);
- write_if_changed(&ns_deps_buf, fname);
+ buf_printf(&ns_deps_buf, "\n");
}
+
+ write_if_changed(&ns_deps_buf, fname);
+ free(ns_deps_buf.p);
}
struct ext_sym_list {
@@ -2561,7 +2557,8 @@ int main(int argc, char **argv)
{
struct module *mod;
struct buffer buf = { };
- char *kernel_read = NULL, *module_read = NULL;
+ char *kernel_read = NULL;
+ char *missing_namespace_deps = NULL;
char *dump_write = NULL, *files_source = NULL;
int opt;
int err;
@@ -2569,13 +2566,10 @@ int main(int argc, char **argv)
struct ext_sym_list *extsym_iter;
struct ext_sym_list *extsym_start = NULL;
- while ((opt = getopt(argc, argv, "i:I:e:mnsT:o:awEd")) != -1) {
+ while ((opt = getopt(argc, argv, "i:e:mnsT:o:awEd:")) != -1) {
switch (opt) {
case 'i':
kernel_read = optarg;
- break;
- case 'I':
- module_read = optarg;
external_module = 1;
break;
case 'e':
@@ -2611,7 +2605,7 @@ int main(int argc, char **argv)
sec_mismatch_fatal = 1;
break;
case 'd':
- write_namespace_deps = 1;
+ missing_namespace_deps = optarg;
break;
default:
exit(1);
@@ -2620,8 +2614,6 @@ int main(int argc, char **argv)
if (kernel_read)
read_dump(kernel_read, 1);
- if (module_read)
- read_dump(module_read, 0);
while (extsym_start) {
read_dump(extsym_start->file, 0);
extsym_iter = extsym_start->next;
@@ -2647,8 +2639,6 @@ int main(int argc, char **argv)
err |= check_modname_len(mod);
err |= check_exports(mod);
- if (write_namespace_deps)
- continue;
add_header(&buf, mod);
add_intree_flag(&buf, !external_module);
@@ -2663,10 +2653,8 @@ int main(int argc, char **argv)
write_if_changed(&buf, fname);
}
- if (write_namespace_deps) {
- write_namespace_deps_files();
- return 0;
- }
+ if (missing_namespace_deps)
+ write_namespace_deps_files(missing_namespace_deps);
if (dump_write)
write_dump(dump_write);
diff --git a/scripts/mod/modpost.h b/scripts/mod/modpost.h
index ad271bc6c313..64a82d2d85f6 100644
--- a/scripts/mod/modpost.h
+++ b/scripts/mod/modpost.h
@@ -126,8 +126,8 @@ struct module {
struct buffer dev_table_buf;
char srcversion[25];
int is_dot_o;
- // Required namespace dependencies
- struct namespace_list *required_namespaces;
+ // Missing namespace dependencies
+ struct namespace_list *missing_namespaces;
// Actual imported namespaces
struct namespace_list *imported_namespaces;
};
@@ -143,7 +143,6 @@ struct elf_info {
Elf_Section export_gpl_sec;
Elf_Section export_unused_gpl_sec;
Elf_Section export_gpl_future_sec;
- char *ksymtab_strings;
char *strtab;
char *modinfo;
unsigned int modinfo_len;
diff --git a/scripts/nsdeps b/scripts/nsdeps
index 04cea0921673..03a8e7cbe6c7 100644
--- a/scripts/nsdeps
+++ b/scripts/nsdeps
@@ -21,21 +21,26 @@ if [ "$SPATCH_VERSION_NUM" -lt "$SPATCH_REQ_VERSION_NUM" ] ; then
exit 1
fi
+if [ "$KBUILD_EXTMOD" ]; then
+ src_prefix=
+else
+ src_prefix=$srctree/
+fi
+
generate_deps_for_ns() {
$SPATCH --very-quiet --in-place --sp-file \
$srctree/scripts/coccinelle/misc/add_namespace.cocci -D ns=$1 $2
}
generate_deps() {
- local mod_name=`basename $@ .ko`
- local mod_file=`echo $@ | sed -e 's/\.ko/\.mod/'`
- local ns_deps_file=`echo $@ | sed -e 's/\.ko/\.ns_deps/'`
- if [ ! -f "$ns_deps_file" ]; then return; fi
- local mod_source_files="`cat $mod_file | sed -n 1p \
+ local mod=${1%.ko:}
+ shift
+ local namespaces="$*"
+ local mod_source_files="`cat $mod.mod | sed -n 1p \
| sed -e 's/\.o/\.c/g' \
- | sed "s|[^ ]* *|${srctree}/&|g"`"
- for ns in `cat $ns_deps_file`; do
- echo "Adding namespace $ns to module $mod_name (if needed)."
+ | sed "s|[^ ]* *|${src_prefix}&|g"`"
+ for ns in $namespaces; do
+ echo "Adding namespace $ns to module $mod.ko."
generate_deps_for_ns $ns "$mod_source_files"
# sort the imports
for source_file in $mod_source_files; do
@@ -52,7 +57,7 @@ generate_deps() {
done
}
-for f in `cat $objtree/modules.order`; do
- generate_deps $f
-done
-
+while read line
+do
+ generate_deps $line
+done < $MODULES_NSDEPS
diff --git a/scripts/package/buildtar b/scripts/package/buildtar
index 2f66c81e4021..77c7caefede1 100755
--- a/scripts/package/buildtar
+++ b/scripts/package/buildtar
@@ -2,7 +2,7 @@
# SPDX-License-Identifier: GPL-2.0
#
-# buildtar 0.0.4
+# buildtar 0.0.5
#
# (C) 2004-2006 by Jan-Benedict Glaw <jbglaw@lug-owl.de>
#
@@ -24,7 +24,7 @@ tarball="${objtree}/linux-${KERNELRELEASE}-${ARCH}.tar"
# Figure out how to compress, if requested at all
#
case "${1}" in
- tar-pkg)
+ dir-pkg|tar-pkg)
opts=
;;
targz-pkg)
@@ -125,6 +125,10 @@ case "${ARCH}" in
;;
esac
+if [ "${1}" = dir-pkg ]; then
+ echo "Kernel tree successfully created in $tmpdir"
+ exit 0
+fi
#
# Create the tarball
diff --git a/scripts/setlocalversion b/scripts/setlocalversion
index a2998b118ef9..20f2efd57b11 100755
--- a/scripts/setlocalversion
+++ b/scripts/setlocalversion
@@ -45,11 +45,11 @@ scm_version()
# Check for git and a git repo.
if test -z "$(git rev-parse --show-cdup 2>/dev/null)" &&
- head=`git rev-parse --verify --short HEAD 2>/dev/null`; then
+ head=$(git rev-parse --verify --short HEAD 2>/dev/null); then
# If we are at a tagged commit (like "v2.6.30-rc6"), we ignore
# it, because this version is defined in the top level Makefile.
- if [ -z "`git describe --exact-match 2>/dev/null`" ]; then
+ if [ -z "$(git describe --exact-match 2>/dev/null)" ]; then
# If only the short version is requested, don't bother
# running further git commands
@@ -59,7 +59,7 @@ scm_version()
fi
# If we are past a tagged commit (like
# "v2.6.30-rc5-302-g72357d5"), we pretty print it.
- if atag="`git describe 2>/dev/null`"; then
+ if atag="$(git describe 2>/dev/null)"; then
echo "$atag" | awk -F- '{printf("-%05d-%s", $(NF-1),$(NF))}'
# If we don't have a tag at all we print -g{commitish}.
@@ -70,7 +70,7 @@ scm_version()
# Is this git on svn?
if git config --get svn-remote.svn.url >/dev/null; then
- printf -- '-svn%s' "`git svn find-rev $head`"
+ printf -- '-svn%s' "$(git svn find-rev $head)"
fi
# Check for uncommitted changes.
@@ -91,15 +91,15 @@ scm_version()
fi
# Check for mercurial and a mercurial repo.
- if test -d .hg && hgid=`hg id 2>/dev/null`; then
+ if test -d .hg && hgid=$(hg id 2>/dev/null); then
# Do we have an tagged version? If so, latesttagdistance == 1
- if [ "`hg log -r . --template '{latesttagdistance}'`" = "1" ]; then
- id=`hg log -r . --template '{latesttag}'`
+ if [ "$(hg log -r . --template '{latesttagdistance}')" = "1" ]; then
+ id=$(hg log -r . --template '{latesttag}')
printf '%s%s' -hg "$id"
else
- tag=`printf '%s' "$hgid" | cut -d' ' -f2`
+ tag=$(printf '%s' "$hgid" | cut -d' ' -f2)
if [ -z "$tag" -o "$tag" = tip ]; then
- id=`printf '%s' "$hgid" | sed 's/[+ ].*//'`
+ id=$(printf '%s' "$hgid" | sed 's/[+ ].*//')
printf '%s%s' -hg "$id"
fi
fi
@@ -115,8 +115,8 @@ scm_version()
fi
# Check for svn and a svn repo.
- if rev=`LANG= LC_ALL= LC_MESSAGES=C svn info 2>/dev/null | grep '^Last Changed Rev'`; then
- rev=`echo $rev | awk '{print $NF}'`
+ if rev=$(LANG= LC_ALL= LC_MESSAGES=C svn info 2>/dev/null | grep '^Last Changed Rev'); then
+ rev=$(echo $rev | awk '{print $NF}')
printf -- '-svn%s' "$rev"
# All done with svn
diff --git a/scripts/spelling.txt b/scripts/spelling.txt
index de75b9feaaed..672b5931bc8d 100644
--- a/scripts/spelling.txt
+++ b/scripts/spelling.txt
@@ -87,6 +87,7 @@ algorith||algorithm
algorithmical||algorithmically
algoritm||algorithm
algoritms||algorithms
+algorithmn||algorithm
algorrithm||algorithm
algorritm||algorithm
aligment||alignment
@@ -109,6 +110,7 @@ alredy||already
altough||although
alue||value
ambigious||ambiguous
+ambigous||ambiguous
amoung||among
amout||amount
amplifer||amplifier
@@ -179,6 +181,7 @@ attepmpt||attempt
attnetion||attention
attruibutes||attributes
authentification||authentication
+authenicated||authenticated
automaticaly||automatically
automaticly||automatically
automatize||automate
@@ -286,6 +289,7 @@ claread||cleared
clared||cleared
closeing||closing
clustred||clustered
+cnfiguration||configuration
coexistance||coexistence
colescing||coalescing
collapsable||collapsible
@@ -325,9 +329,11 @@ comression||compression
comunication||communication
conbination||combination
conditionaly||conditionally
+conditon||condition
conected||connected
conector||connector
connecetd||connected
+configration||configuration
configuartion||configuration
configuation||configuration
configued||configured
@@ -347,6 +353,7 @@ containts||contains
contaisn||contains
contant||contact
contence||contents
+contiguos||contiguous
continious||continuous
continous||continuous
continously||continuously
@@ -380,6 +387,7 @@ cylic||cyclic
dafault||default
deafult||default
deamon||daemon
+debouce||debounce
decompres||decompress
decsribed||described
decription||description
@@ -448,6 +456,7 @@ diffrent||different
differenciate||differentiate
diffrentiate||differentiate
difinition||definition
+digial||digital
dimention||dimension
dimesions||dimensions
dispalying||displaying
@@ -489,6 +498,7 @@ droput||dropout
druing||during
dynmaic||dynamic
eanable||enable
+eanble||enable
easilly||easily
ecspecially||especially
edditable||editable
@@ -502,6 +512,7 @@ elementry||elementary
eletronic||electronic
embeded||embedded
enabledi||enabled
+enbale||enable
enble||enable
enchanced||enhanced
encorporating||incorporating
@@ -536,6 +547,7 @@ excellant||excellent
execeeded||exceeded
execeeds||exceeds
exeed||exceed
+exeuction||execution
existance||existence
existant||existent
exixt||exist
@@ -601,10 +613,12 @@ frambuffer||framebuffer
framming||framing
framwork||framework
frequncy||frequency
+frequancy||frequency
frome||from
fucntion||function
fuction||function
fuctions||functions
+fullill||fulfill
funcation||function
funcion||function
functionallity||functionality
@@ -642,6 +656,7 @@ happend||happened
harware||hardware
heirarchically||hierarchically
helpfull||helpful
+hexdecimal||hexadecimal
hybernate||hibernate
hierachy||hierarchy
hierarchie||hierarchy
@@ -709,12 +724,14 @@ initalize||initialize
initation||initiation
initators||initiators
initialiazation||initialization
+initializationg||initialization
initializiation||initialization
initialze||initialize
initialzed||initialized
initialzing||initializing
initilization||initialization
initilize||initialize
+initliaze||initialize
inofficial||unofficial
inrerface||interface
insititute||institute
@@ -779,6 +796,7 @@ itertation||iteration
itslef||itself
jave||java
jeffies||jiffies
+jumpimng||jumping
juse||just
jus||just
kown||known
@@ -839,6 +857,7 @@ messags||messages
messgaes||messages
messsage||message
messsages||messages
+metdata||metadata
micropone||microphone
microprocesspr||microprocessor
migrateable||migratable
@@ -857,6 +876,7 @@ mismactch||mismatch
missign||missing
missmanaged||mismanaged
missmatch||mismatch
+misssing||missing
miximum||maximum
mmnemonic||mnemonic
mnay||many
@@ -912,6 +932,7 @@ occured||occurred
occuring||occurring
offser||offset
offet||offset
+offlaod||offload
offloded||offloaded
offseting||offsetting
omited||omitted
@@ -993,6 +1014,7 @@ poiter||pointer
posible||possible
positon||position
possibilites||possibilities
+potocol||protocol
powerfull||powerful
pramater||parameter
preamle||preamble
@@ -1061,11 +1083,13 @@ psychadelic||psychedelic
pwoer||power
queing||queuing
quering||querying
+queus||queues
randomally||randomly
raoming||roaming
reasearcher||researcher
reasearchers||researchers
reasearch||research
+receieve||receive
recepient||recipient
recevied||received
receving||receiving
@@ -1166,6 +1190,7 @@ scaleing||scaling
scaned||scanned
scaning||scanning
scarch||search
+schdule||schedule
seach||search
searchs||searches
secquence||sequence
@@ -1308,6 +1333,7 @@ taskelt||tasklet
teh||the
temorary||temporary
temproarily||temporarily
+temperture||temperature
thead||thread
therfore||therefore
thier||their
@@ -1354,6 +1380,7 @@ uknown||unknown
usupported||unsupported
uncommited||uncommitted
unconditionaly||unconditionally
+undeflow||underflow
underun||underrun
unecessary||unnecessary
unexecpted||unexpected
@@ -1414,6 +1441,7 @@ varible||variable
varient||variant
vaule||value
verbse||verbose
+veify||verify
verisons||versions
verison||version
verson||version
diff --git a/scripts/sphinx-pre-install b/scripts/sphinx-pre-install
index 3b638c0e1a4f..470ccfe678aa 100755
--- a/scripts/sphinx-pre-install
+++ b/scripts/sphinx-pre-install
@@ -124,11 +124,13 @@ sub add_package($$)
sub check_missing_file($$$)
{
- my $file = shift;
+ my $files = shift;
my $package = shift;
my $is_optional = shift;
- return if(-e $file);
+ for (@$files) {
+ return if(-e $_);
+ }
add_package($package, $is_optional);
}
@@ -343,10 +345,11 @@ sub give_debian_hints()
);
if ($pdf) {
- check_missing_file("/usr/share/fonts/truetype/dejavu/DejaVuSans.ttf",
+ check_missing_file(["/usr/share/fonts/truetype/dejavu/DejaVuSans.ttf"],
"fonts-dejavu", 2);
- check_missing_file("/usr/share/fonts/noto-cjk/NotoSansCJK-Regular.ttc",
+ check_missing_file(["/usr/share/fonts/noto-cjk/NotoSansCJK-Regular.ttc",
+ "/usr/share/fonts/opentype/noto/NotoSerifCJK-Regular.ttc"],
"fonts-noto-cjk", 2);
}
@@ -413,7 +416,7 @@ sub give_redhat_hints()
}
if ($pdf) {
- check_missing_file("/usr/share/fonts/google-noto-cjk/NotoSansCJK-Regular.ttc",
+ check_missing_file(["/usr/share/fonts/google-noto-cjk/NotoSansCJK-Regular.ttc"],
"google-noto-sans-cjk-ttc-fonts", 2);
}
@@ -498,7 +501,7 @@ sub give_mageia_hints()
$map{"latexmk"} = "texlive-collection-basic";
if ($pdf) {
- check_missing_file("/usr/share/fonts/google-noto-cjk/NotoSansCJK-Regular.ttc",
+ check_missing_file(["/usr/share/fonts/google-noto-cjk/NotoSansCJK-Regular.ttc"],
"google-noto-sans-cjk-ttc-fonts", 2);
}
@@ -517,6 +520,7 @@ sub give_arch_linux_hints()
"dot" => "graphviz",
"convert" => "imagemagick",
"xelatex" => "texlive-bin",
+ "latexmk" => "texlive-core",
"rsvg-convert" => "extra/librsvg",
);
@@ -528,7 +532,7 @@ sub give_arch_linux_hints()
check_pacman_missing(\@archlinux_tex_pkgs, 2) if ($pdf);
if ($pdf) {
- check_missing_file("/usr/share/fonts/noto-cjk/NotoSansCJK-Regular.ttc",
+ check_missing_file(["/usr/share/fonts/noto-cjk/NotoSansCJK-Regular.ttc"],
"noto-fonts-cjk", 2);
}
@@ -549,11 +553,11 @@ sub give_gentoo_hints()
"rsvg-convert" => "gnome-base/librsvg",
);
- check_missing_file("/usr/share/fonts/dejavu/DejaVuSans.ttf",
+ check_missing_file(["/usr/share/fonts/dejavu/DejaVuSans.ttf"],
"media-fonts/dejavu", 2) if ($pdf);
if ($pdf) {
- check_missing_file("/usr/share/fonts/noto-cjk/NotoSansCJKsc-Regular.otf",
+ check_missing_file(["/usr/share/fonts/noto-cjk/NotoSansCJKsc-Regular.otf"],
"media-fonts/noto-cjk", 2);
}
@@ -645,6 +649,12 @@ sub check_distros()
# Common dependencies
#
+sub deactivate_help()
+{
+ printf "\tIf you want to exit the virtualenv, you can use:\n";
+ printf "\tdeactivate\n";
+}
+
sub check_needs()
{
# Check for needed programs/tools
@@ -686,6 +696,7 @@ sub check_needs()
if ($need_sphinx && scalar @activates > 0 && $activates[0] ge $min_activate) {
printf "\nNeed to activate a compatible Sphinx version on virtualenv with:\n";
printf "\t. $activates[0]\n";
+ deactivate_help();
exit (1);
} else {
my $rec_activate = "$virtenv_dir/bin/activate";
@@ -697,6 +708,7 @@ sub check_needs()
printf "\t$virtualenv $virtenv_dir\n";
printf "\t. $rec_activate\n";
printf "\tpip install -r $requirement_file\n";
+ deactivate_help();
$need++ if (!$rec_sphinx_upgrade);
}
diff --git a/scripts/ver_linux b/scripts/ver_linux
index 810e608baa24..85005d6b7f10 100755
--- a/scripts/ver_linux
+++ b/scripts/ver_linux
@@ -32,6 +32,8 @@ BEGIN {
printversion("PPP", version("pppd --version"))
printversion("Isdn4k-utils", version("isdnctrl"))
printversion("Nfs-utils", version("showmount --version"))
+ printversion("Bison", version("bison --version"))
+ printversion("Flex", version("flex --version"))
while (getline <"/proc/self/maps" > 0) {
if (/libc.*\.so$/) {
diff --git a/security/apparmor/Kconfig b/security/apparmor/Kconfig
index d8b1a360a636..a422a349f926 100644
--- a/security/apparmor/Kconfig
+++ b/security/apparmor/Kconfig
@@ -6,6 +6,8 @@ config SECURITY_APPARMOR
select SECURITY_PATH
select SECURITYFS
select SECURITY_NETWORK
+ select ZLIB_INFLATE
+ select ZLIB_DEFLATE
default n
help
This enables the AppArmor security module.
diff --git a/security/apparmor/apparmorfs.c b/security/apparmor/apparmorfs.c
index 45d13b6462aa..09996f2552ee 100644
--- a/security/apparmor/apparmorfs.c
+++ b/security/apparmor/apparmorfs.c
@@ -21,6 +21,7 @@
#include <linux/fs.h>
#include <linux/fs_context.h>
#include <linux/poll.h>
+#include <linux/zlib.h>
#include <uapi/linux/major.h>
#include <uapi/linux/magic.h>
@@ -65,6 +66,35 @@
* support fns
*/
+struct rawdata_f_data {
+ struct aa_loaddata *loaddata;
+};
+
+#define RAWDATA_F_DATA_BUF(p) (char *)(p + 1)
+
+static void rawdata_f_data_free(struct rawdata_f_data *private)
+{
+ if (!private)
+ return;
+
+ aa_put_loaddata(private->loaddata);
+ kvfree(private);
+}
+
+static struct rawdata_f_data *rawdata_f_data_alloc(size_t size)
+{
+ struct rawdata_f_data *ret;
+
+ if (size > SIZE_MAX - sizeof(*ret))
+ return ERR_PTR(-EINVAL);
+
+ ret = kvzalloc(sizeof(*ret) + size, GFP_KERNEL);
+ if (!ret)
+ return ERR_PTR(-ENOMEM);
+
+ return ret;
+}
+
/**
* aa_mangle_name - mangle a profile name to std profile layout form
* @name: profile name to mangle (NOT NULL)
@@ -1280,36 +1310,117 @@ static int seq_rawdata_hash_show(struct seq_file *seq, void *v)
return 0;
}
+static int seq_rawdata_compressed_size_show(struct seq_file *seq, void *v)
+{
+ struct aa_loaddata *data = seq->private;
+
+ seq_printf(seq, "%zu\n", data->compressed_size);
+
+ return 0;
+}
+
SEQ_RAWDATA_FOPS(abi);
SEQ_RAWDATA_FOPS(revision);
SEQ_RAWDATA_FOPS(hash);
+SEQ_RAWDATA_FOPS(compressed_size);
+
+static int deflate_decompress(char *src, size_t slen, char *dst, size_t dlen)
+{
+ int error;
+ struct z_stream_s strm;
+
+ if (aa_g_rawdata_compression_level == 0) {
+ if (dlen < slen)
+ return -EINVAL;
+ memcpy(dst, src, slen);
+ return 0;
+ }
+
+ memset(&strm, 0, sizeof(strm));
+
+ strm.workspace = kvzalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
+ if (!strm.workspace)
+ return -ENOMEM;
+
+ strm.next_in = src;
+ strm.avail_in = slen;
+
+ error = zlib_inflateInit(&strm);
+ if (error != Z_OK) {
+ error = -ENOMEM;
+ goto fail_inflate_init;
+ }
+
+ strm.next_out = dst;
+ strm.avail_out = dlen;
+
+ error = zlib_inflate(&strm, Z_FINISH);
+ if (error != Z_STREAM_END)
+ error = -EINVAL;
+ else
+ error = 0;
+
+ zlib_inflateEnd(&strm);
+fail_inflate_init:
+ kvfree(strm.workspace);
+ return error;
+}
static ssize_t rawdata_read(struct file *file, char __user *buf, size_t size,
loff_t *ppos)
{
- struct aa_loaddata *rawdata = file->private_data;
+ struct rawdata_f_data *private = file->private_data;
- return simple_read_from_buffer(buf, size, ppos, rawdata->data,
- rawdata->size);
+ return simple_read_from_buffer(buf, size, ppos,
+ RAWDATA_F_DATA_BUF(private),
+ private->loaddata->size);
}
static int rawdata_release(struct inode *inode, struct file *file)
{
- aa_put_loaddata(file->private_data);
+ rawdata_f_data_free(file->private_data);
return 0;
}
static int rawdata_open(struct inode *inode, struct file *file)
{
+ int error;
+ struct aa_loaddata *loaddata;
+ struct rawdata_f_data *private;
+
if (!policy_view_capable(NULL))
return -EACCES;
- file->private_data = __aa_get_loaddata(inode->i_private);
- if (!file->private_data)
+
+ loaddata = __aa_get_loaddata(inode->i_private);
+ if (!loaddata)
/* lost race: this entry is being reaped */
return -ENOENT;
+ private = rawdata_f_data_alloc(loaddata->size);
+ if (IS_ERR(private)) {
+ error = PTR_ERR(private);
+ goto fail_private_alloc;
+ }
+
+ private->loaddata = loaddata;
+
+ error = deflate_decompress(loaddata->data, loaddata->compressed_size,
+ RAWDATA_F_DATA_BUF(private),
+ loaddata->size);
+ if (error)
+ goto fail_decompress;
+
+ file->private_data = private;
return 0;
+
+fail_decompress:
+ rawdata_f_data_free(private);
+ return error;
+
+fail_private_alloc:
+ aa_put_loaddata(loaddata);
+ return error;
}
static const struct file_operations rawdata_fops = {
@@ -1388,6 +1499,13 @@ int __aa_fs_create_rawdata(struct aa_ns *ns, struct aa_loaddata *rawdata)
rawdata->dents[AAFS_LOADDATA_HASH] = dent;
}
+ dent = aafs_create_file("compressed_size", S_IFREG | 0444, dir,
+ rawdata,
+ &seq_rawdata_compressed_size_fops);
+ if (IS_ERR(dent))
+ goto fail;
+ rawdata->dents[AAFS_LOADDATA_COMPRESSED_SIZE] = dent;
+
dent = aafs_create_file("raw_data", S_IFREG | 0444,
dir, rawdata, &rawdata_fops);
if (IS_ERR(dent))
diff --git a/security/apparmor/domain.c b/security/apparmor/domain.c
index 9e0492795267..9be7ccb8379e 100644
--- a/security/apparmor/domain.c
+++ b/security/apparmor/domain.c
@@ -520,7 +520,7 @@ struct aa_label *x_table_lookup(struct aa_profile *profile, u32 xindex,
label = &new_profile->label;
continue;
}
- label = aa_label_parse(&profile->label, *name, GFP_ATOMIC,
+ label = aa_label_parse(&profile->label, *name, GFP_KERNEL,
true, false);
if (IS_ERR(label))
label = NULL;
@@ -600,7 +600,7 @@ static struct aa_label *x_to_label(struct aa_profile *profile,
/* base the stack on post domain transition */
struct aa_label *base = new;
- new = aa_label_parse(base, stack, GFP_ATOMIC, true, false);
+ new = aa_label_parse(base, stack, GFP_KERNEL, true, false);
if (IS_ERR(new))
new = NULL;
aa_put_label(base);
@@ -685,20 +685,9 @@ static struct aa_label *profile_transition(struct aa_profile *profile,
} else if (COMPLAIN_MODE(profile)) {
/* no exec permission - learning mode */
struct aa_profile *new_profile = NULL;
- char *n = kstrdup(name, GFP_ATOMIC);
-
- if (n) {
- /* name is ptr into buffer */
- long pos = name - buffer;
- /* break per cpu buffer hold */
- put_buffers(buffer);
- new_profile = aa_new_null_profile(profile, false, n,
- GFP_KERNEL);
- get_buffers(buffer);
- name = buffer + pos;
- strcpy((char *)name, n);
- kfree(n);
- }
+
+ new_profile = aa_new_null_profile(profile, false, name,
+ GFP_KERNEL);
if (!new_profile) {
error = -ENOMEM;
info = "could not create null profile";
@@ -719,7 +708,7 @@ static struct aa_label *profile_transition(struct aa_profile *profile,
if (DEBUG_ON) {
dbg_printk("apparmor: scrubbing environment variables"
" for %s profile=", name);
- aa_label_printk(new, GFP_ATOMIC);
+ aa_label_printk(new, GFP_KERNEL);
dbg_printk("\n");
}
*secure_exec = true;
@@ -795,7 +784,7 @@ static int profile_onexec(struct aa_profile *profile, struct aa_label *onexec,
if (DEBUG_ON) {
dbg_printk("apparmor: scrubbing environment "
"variables for %s label=", xname);
- aa_label_printk(onexec, GFP_ATOMIC);
+ aa_label_printk(onexec, GFP_KERNEL);
dbg_printk("\n");
}
*secure_exec = true;
@@ -829,7 +818,7 @@ static struct aa_label *handle_onexec(struct aa_label *label,
bprm, buffer, cond, unsafe));
if (error)
return ERR_PTR(error);
- new = fn_label_build_in_ns(label, profile, GFP_ATOMIC,
+ new = fn_label_build_in_ns(label, profile, GFP_KERNEL,
aa_get_newest_label(onexec),
profile_transition(profile, bprm, buffer,
cond, unsafe));
@@ -841,9 +830,9 @@ static struct aa_label *handle_onexec(struct aa_label *label,
buffer, cond, unsafe));
if (error)
return ERR_PTR(error);
- new = fn_label_build_in_ns(label, profile, GFP_ATOMIC,
+ new = fn_label_build_in_ns(label, profile, GFP_KERNEL,
aa_label_merge(&profile->label, onexec,
- GFP_ATOMIC),
+ GFP_KERNEL),
profile_transition(profile, bprm, buffer,
cond, unsafe));
}
@@ -903,13 +892,18 @@ int apparmor_bprm_set_creds(struct linux_binprm *bprm)
ctx->nnp = aa_get_label(label);
/* buffer freed below, name is pointer into buffer */
- get_buffers(buffer);
+ buffer = aa_get_buffer(false);
+ if (!buffer) {
+ error = -ENOMEM;
+ goto done;
+ }
+
/* Test for onexec first as onexec override other x transitions. */
if (ctx->onexec)
new = handle_onexec(label, ctx->onexec, ctx->token,
bprm, buffer, &cond, &unsafe);
else
- new = fn_label_build(label, profile, GFP_ATOMIC,
+ new = fn_label_build(label, profile, GFP_KERNEL,
profile_transition(profile, bprm, buffer,
&cond, &unsafe));
@@ -953,7 +947,7 @@ int apparmor_bprm_set_creds(struct linux_binprm *bprm)
if (DEBUG_ON) {
dbg_printk("scrubbing environment variables for %s "
"label=", bprm->filename);
- aa_label_printk(new, GFP_ATOMIC);
+ aa_label_printk(new, GFP_KERNEL);
dbg_printk("\n");
}
bprm->secureexec = 1;
@@ -964,7 +958,7 @@ int apparmor_bprm_set_creds(struct linux_binprm *bprm)
if (DEBUG_ON) {
dbg_printk("apparmor: clearing unsafe personality "
"bits. %s label=", bprm->filename);
- aa_label_printk(new, GFP_ATOMIC);
+ aa_label_printk(new, GFP_KERNEL);
dbg_printk("\n");
}
bprm->per_clear |= PER_CLEAR_ON_SETID;
@@ -975,7 +969,7 @@ int apparmor_bprm_set_creds(struct linux_binprm *bprm)
done:
aa_put_label(label);
- put_buffers(buffer);
+ aa_put_buffer(buffer);
return error;
diff --git a/security/apparmor/file.c b/security/apparmor/file.c
index 4c1b05eb130c..fe2ebe5e865e 100644
--- a/security/apparmor/file.c
+++ b/security/apparmor/file.c
@@ -76,7 +76,7 @@ static void file_audit_cb(struct audit_buffer *ab, void *va)
if (aad(sa)->peer) {
audit_log_format(ab, " target=");
aa_label_xaudit(ab, labels_ns(aad(sa)->label), aad(sa)->peer,
- FLAG_VIEW_SUBNS, GFP_ATOMIC);
+ FLAG_VIEW_SUBNS, GFP_KERNEL);
} else if (aad(sa)->fs.target) {
audit_log_format(ab, " target=");
audit_log_untrustedstring(ab, aad(sa)->fs.target);
@@ -332,12 +332,14 @@ int aa_path_perm(const char *op, struct aa_label *label,
flags |= PATH_DELEGATE_DELETED | (S_ISDIR(cond->mode) ? PATH_IS_DIR :
0);
- get_buffers(buffer);
+ buffer = aa_get_buffer(false);
+ if (!buffer)
+ return -ENOMEM;
error = fn_for_each_confined(label, profile,
profile_path_perm(op, profile, path, buffer, request,
cond, flags, &perms));
- put_buffers(buffer);
+ aa_put_buffer(buffer);
return error;
}
@@ -475,12 +477,18 @@ int aa_path_link(struct aa_label *label, struct dentry *old_dentry,
int error;
/* buffer freed below, lname is pointer in buffer */
- get_buffers(buffer, buffer2);
+ buffer = aa_get_buffer(false);
+ buffer2 = aa_get_buffer(false);
+ error = -ENOMEM;
+ if (!buffer || !buffer2)
+ goto out;
+
error = fn_for_each_confined(label, profile,
profile_path_link(profile, &link, buffer, &target,
buffer2, &cond));
- put_buffers(buffer, buffer2);
-
+out:
+ aa_put_buffer(buffer);
+ aa_put_buffer(buffer2);
return error;
}
@@ -507,7 +515,7 @@ static void update_file_ctx(struct aa_file_ctx *fctx, struct aa_label *label,
static int __file_path_perm(const char *op, struct aa_label *label,
struct aa_label *flabel, struct file *file,
- u32 request, u32 denied)
+ u32 request, u32 denied, bool in_atomic)
{
struct aa_profile *profile;
struct aa_perms perms = {};
@@ -524,7 +532,9 @@ static int __file_path_perm(const char *op, struct aa_label *label,
return 0;
flags = PATH_DELEGATE_DELETED | (S_ISDIR(cond.mode) ? PATH_IS_DIR : 0);
- get_buffers(buffer);
+ buffer = aa_get_buffer(in_atomic);
+ if (!buffer)
+ return -ENOMEM;
/* check every profile in task label not in current cache */
error = fn_for_each_not_in_set(flabel, label, profile,
@@ -553,7 +563,7 @@ static int __file_path_perm(const char *op, struct aa_label *label,
if (!error)
update_file_ctx(file_ctx(file), label, request);
- put_buffers(buffer);
+ aa_put_buffer(buffer);
return error;
}
@@ -590,11 +600,12 @@ static int __file_sock_perm(const char *op, struct aa_label *label,
* @label: label being enforced (NOT NULL)
* @file: file to revalidate access permissions on (NOT NULL)
* @request: requested permissions
+ * @in_atomic: whether allocations need to be done in atomic context
*
* Returns: %0 if access allowed else error
*/
int aa_file_perm(const char *op, struct aa_label *label, struct file *file,
- u32 request)
+ u32 request, bool in_atomic)
{
struct aa_file_ctx *fctx;
struct aa_label *flabel;
@@ -607,7 +618,8 @@ int aa_file_perm(const char *op, struct aa_label *label, struct file *file,
fctx = file_ctx(file);
rcu_read_lock();
- flabel = rcu_dereference(fctx->label);
+ flabel = aa_get_newest_label(rcu_dereference(fctx->label));
+ rcu_read_unlock();
AA_BUG(!flabel);
/* revalidate access, if task is unconfined, or the cached cred
@@ -626,14 +638,13 @@ int aa_file_perm(const char *op, struct aa_label *label, struct file *file,
if (file->f_path.mnt && path_mediated_fs(file->f_path.dentry))
error = __file_path_perm(op, label, flabel, file, request,
- denied);
+ denied, in_atomic);
else if (S_ISSOCK(file_inode(file)->i_mode))
error = __file_sock_perm(op, label, flabel, file, request,
denied);
done:
- rcu_read_unlock();
-
+ aa_put_label(flabel);
return error;
}
@@ -655,7 +666,8 @@ static void revalidate_tty(struct aa_label *label)
struct tty_file_private, list);
file = file_priv->file;
- if (aa_file_perm(OP_INHERIT, label, file, MAY_READ | MAY_WRITE))
+ if (aa_file_perm(OP_INHERIT, label, file, MAY_READ | MAY_WRITE,
+ IN_ATOMIC))
drop_tty = 1;
}
spin_unlock(&tty->files_lock);
@@ -669,7 +681,8 @@ static int match_file(const void *p, struct file *file, unsigned int fd)
{
struct aa_label *label = (struct aa_label *)p;
- if (aa_file_perm(OP_INHERIT, label, file, aa_map_file_to_perms(file)))
+ if (aa_file_perm(OP_INHERIT, label, file, aa_map_file_to_perms(file),
+ IN_ATOMIC))
return fd + 1;
return 0;
}
diff --git a/security/apparmor/include/apparmor.h b/security/apparmor/include/apparmor.h
index 6b7e6e13176e..1fbabdb565a8 100644
--- a/security/apparmor/include/apparmor.h
+++ b/security/apparmor/include/apparmor.h
@@ -36,6 +36,7 @@ extern enum audit_mode aa_g_audit;
extern bool aa_g_audit_header;
extern bool aa_g_debug;
extern bool aa_g_hash_policy;
+extern int aa_g_rawdata_compression_level;
extern bool aa_g_lock_policy;
extern bool aa_g_logsyscall;
extern bool aa_g_paranoid_load;
diff --git a/security/apparmor/include/file.h b/security/apparmor/include/file.h
index a852be89a7dc..aff26fc71407 100644
--- a/security/apparmor/include/file.h
+++ b/security/apparmor/include/file.h
@@ -197,7 +197,7 @@ int aa_path_link(struct aa_label *label, struct dentry *old_dentry,
const struct path *new_dir, struct dentry *new_dentry);
int aa_file_perm(const char *op, struct aa_label *label, struct file *file,
- u32 request);
+ u32 request, bool in_atomic);
void aa_inherit_files(const struct cred *cred, struct files_struct *files);
diff --git a/security/apparmor/include/match.h b/security/apparmor/include/match.h
index 6b0af638a18d..e23f4aadc1ff 100644
--- a/security/apparmor/include/match.h
+++ b/security/apparmor/include/match.h
@@ -134,7 +134,7 @@ unsigned int aa_dfa_matchn_until(struct aa_dfa *dfa, unsigned int start,
void aa_dfa_free_kref(struct kref *kref);
-#define WB_HISTORY_SIZE 8
+#define WB_HISTORY_SIZE 24
struct match_workbuf {
unsigned int count;
unsigned int pos;
@@ -147,7 +147,6 @@ struct match_workbuf N = { \
.count = 0, \
.pos = 0, \
.len = 0, \
- .size = WB_HISTORY_SIZE, \
}
unsigned int aa_dfa_leftmatch(struct aa_dfa *dfa, unsigned int start,
diff --git a/security/apparmor/include/path.h b/security/apparmor/include/path.h
index 35a8295e8f3a..44a7945fbe3c 100644
--- a/security/apparmor/include/path.h
+++ b/security/apparmor/include/path.h
@@ -11,7 +11,6 @@
#ifndef __AA_PATH_H
#define __AA_PATH_H
-
enum path_flags {
PATH_IS_DIR = 0x1, /* path is a directory */
PATH_CONNECT_PATH = 0x4, /* connect disconnected paths to / */
@@ -26,51 +25,8 @@ int aa_path_name(const struct path *path, int flags, char *buffer,
const char **name, const char **info,
const char *disconnected);
-#define MAX_PATH_BUFFERS 2
-
-/* Per cpu buffers used during mediation */
-/* preallocated buffers to use during path lookups */
-struct aa_buffers {
- char *buf[MAX_PATH_BUFFERS];
-};
-
-#include <linux/percpu.h>
-#include <linux/preempt.h>
-
-DECLARE_PER_CPU(struct aa_buffers, aa_buffers);
-
-#define ASSIGN(FN, A, X, N) ((X) = FN(A, N))
-#define EVAL1(FN, A, X) ASSIGN(FN, A, X, 0) /*X = FN(0)*/
-#define EVAL2(FN, A, X, Y...) \
- do { ASSIGN(FN, A, X, 1); EVAL1(FN, A, Y); } while (0)
-#define EVAL(FN, A, X...) CONCATENATE(EVAL, COUNT_ARGS(X))(FN, A, X)
-
-#define for_each_cpu_buffer(I) for ((I) = 0; (I) < MAX_PATH_BUFFERS; (I)++)
-
-#ifdef CONFIG_DEBUG_PREEMPT
-#define AA_BUG_PREEMPT_ENABLED(X) AA_BUG(preempt_count() <= 0, X)
-#else
-#define AA_BUG_PREEMPT_ENABLED(X) /* nop */
-#endif
-
-#define __get_buffer(C, N) ({ \
- AA_BUG_PREEMPT_ENABLED("__get_buffer without preempt disabled"); \
- (C)->buf[(N)]; })
-
-#define __get_buffers(C, X...) EVAL(__get_buffer, C, X)
-
-#define __put_buffers(X, Y...) ((void)&(X))
-
-#define get_buffers(X...) \
-do { \
- struct aa_buffers *__cpu_var = get_cpu_ptr(&aa_buffers); \
- __get_buffers(__cpu_var, X); \
-} while (0)
-
-#define put_buffers(X, Y...) \
-do { \
- __put_buffers(X, Y); \
- put_cpu_ptr(&aa_buffers); \
-} while (0)
+#define IN_ATOMIC true
+char *aa_get_buffer(bool in_atomic);
+void aa_put_buffer(char *buf);
#endif /* __AA_PATH_H */
diff --git a/security/apparmor/include/policy_unpack.h b/security/apparmor/include/policy_unpack.h
index 46aefae918f5..e0e1ca7ebc38 100644
--- a/security/apparmor/include/policy_unpack.h
+++ b/security/apparmor/include/policy_unpack.h
@@ -41,6 +41,7 @@ enum {
AAFS_LOADDATA_REVISION,
AAFS_LOADDATA_HASH,
AAFS_LOADDATA_DATA,
+ AAFS_LOADDATA_COMPRESSED_SIZE,
AAFS_LOADDATA_DIR, /* must be last actual entry */
AAFS_LOADDATA_NDENTS /* count of entries */
};
@@ -61,11 +62,16 @@ struct aa_loaddata {
struct dentry *dents[AAFS_LOADDATA_NDENTS];
struct aa_ns *ns;
char *name;
- size_t size;
+ size_t size; /* the original size of the payload */
+ size_t compressed_size; /* the compressed size of the payload */
long revision; /* the ns policy revision this caused */
int abi;
unsigned char *hash;
+ /* Pointer to payload. If @compressed_size > 0, then this is the
+ * compressed version of the payload, else it is the uncompressed
+ * version (with the size indicated by @size).
+ */
char *data;
};
diff --git a/security/apparmor/label.c b/security/apparmor/label.c
index 59f1cc2557a7..470693239e64 100644
--- a/security/apparmor/label.c
+++ b/security/apparmor/label.c
@@ -1458,11 +1458,13 @@ static inline bool use_label_hname(struct aa_ns *ns, struct aa_label *label,
/* helper macro for snprint routines */
#define update_for_len(total, len, size, str) \
do { \
+ size_t ulen = len; \
+ \
AA_BUG(len < 0); \
- total += len; \
- len = min(len, size); \
- size -= len; \
- str += len; \
+ total += ulen; \
+ ulen = min(ulen, size); \
+ size -= ulen; \
+ str += ulen; \
} while (0)
/**
@@ -1597,7 +1599,7 @@ int aa_label_snxprint(char *str, size_t size, struct aa_ns *ns,
struct aa_ns *prev_ns = NULL;
struct label_it i;
int count = 0, total = 0;
- size_t len;
+ ssize_t len;
AA_BUG(!str && size != 0);
AA_BUG(!label);
diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
index ec3a928af829..b621ad74f54a 100644
--- a/security/apparmor/lsm.c
+++ b/security/apparmor/lsm.c
@@ -21,6 +21,7 @@
#include <linux/user_namespace.h>
#include <linux/netfilter_ipv4.h>
#include <linux/netfilter_ipv6.h>
+#include <linux/zlib.h>
#include <net/sock.h>
#include <uapi/linux/mount.h>
@@ -43,8 +44,17 @@
/* Flag indicating whether initialization completed */
int apparmor_initialized;
-DEFINE_PER_CPU(struct aa_buffers, aa_buffers);
+union aa_buffer {
+ struct list_head list;
+ char buffer[1];
+};
+
+#define RESERVE_COUNT 2
+static int reserve_count = RESERVE_COUNT;
+static int buffer_count;
+static LIST_HEAD(aa_global_buffers);
+static DEFINE_SPINLOCK(aa_buffers_lock);
/*
* LSM hook functions
@@ -442,7 +452,8 @@ static void apparmor_file_free_security(struct file *file)
aa_put_label(rcu_access_pointer(ctx->label));
}
-static int common_file_perm(const char *op, struct file *file, u32 mask)
+static int common_file_perm(const char *op, struct file *file, u32 mask,
+ bool in_atomic)
{
struct aa_label *label;
int error = 0;
@@ -452,7 +463,7 @@ static int common_file_perm(const char *op, struct file *file, u32 mask)
return -EACCES;
label = __begin_current_label_crit_section();
- error = aa_file_perm(op, label, file, mask);
+ error = aa_file_perm(op, label, file, mask, in_atomic);
__end_current_label_crit_section(label);
return error;
@@ -460,12 +471,13 @@ static int common_file_perm(const char *op, struct file *file, u32 mask)
static int apparmor_file_receive(struct file *file)
{
- return common_file_perm(OP_FRECEIVE, file, aa_map_file_to_perms(file));
+ return common_file_perm(OP_FRECEIVE, file, aa_map_file_to_perms(file),
+ false);
}
static int apparmor_file_permission(struct file *file, int mask)
{
- return common_file_perm(OP_FPERM, file, mask);
+ return common_file_perm(OP_FPERM, file, mask, false);
}
static int apparmor_file_lock(struct file *file, unsigned int cmd)
@@ -475,11 +487,11 @@ static int apparmor_file_lock(struct file *file, unsigned int cmd)
if (cmd == F_WRLCK)
mask |= MAY_WRITE;
- return common_file_perm(OP_FLOCK, file, mask);
+ return common_file_perm(OP_FLOCK, file, mask, false);
}
static int common_mmap(const char *op, struct file *file, unsigned long prot,
- unsigned long flags)
+ unsigned long flags, bool in_atomic)
{
int mask = 0;
@@ -497,20 +509,21 @@ static int common_mmap(const char *op, struct file *file, unsigned long prot,
if (prot & PROT_EXEC)
mask |= AA_EXEC_MMAP;
- return common_file_perm(op, file, mask);
+ return common_file_perm(op, file, mask, in_atomic);
}
static int apparmor_mmap_file(struct file *file, unsigned long reqprot,
unsigned long prot, unsigned long flags)
{
- return common_mmap(OP_FMMAP, file, prot, flags);
+ return common_mmap(OP_FMMAP, file, prot, flags, GFP_ATOMIC);
}
static int apparmor_file_mprotect(struct vm_area_struct *vma,
unsigned long reqprot, unsigned long prot)
{
return common_mmap(OP_FMPROT, vma->vm_file, prot,
- !(vma->vm_flags & VM_SHARED) ? MAP_PRIVATE : 0);
+ !(vma->vm_flags & VM_SHARED) ? MAP_PRIVATE : 0,
+ false);
}
static int apparmor_sb_mount(const char *dev_name, const struct path *path,
@@ -1262,6 +1275,16 @@ static const struct kernel_param_ops param_ops_aauint = {
.get = param_get_aauint
};
+static int param_set_aacompressionlevel(const char *val,
+ const struct kernel_param *kp);
+static int param_get_aacompressionlevel(char *buffer,
+ const struct kernel_param *kp);
+#define param_check_aacompressionlevel param_check_int
+static const struct kernel_param_ops param_ops_aacompressionlevel = {
+ .set = param_set_aacompressionlevel,
+ .get = param_get_aacompressionlevel
+};
+
static int param_set_aalockpolicy(const char *val, const struct kernel_param *kp);
static int param_get_aalockpolicy(char *buffer, const struct kernel_param *kp);
#define param_check_aalockpolicy param_check_bool
@@ -1292,6 +1315,11 @@ bool aa_g_hash_policy = IS_ENABLED(CONFIG_SECURITY_APPARMOR_HASH_DEFAULT);
module_param_named(hash_policy, aa_g_hash_policy, aabool, S_IRUSR | S_IWUSR);
#endif
+/* policy loaddata compression level */
+int aa_g_rawdata_compression_level = Z_DEFAULT_COMPRESSION;
+module_param_named(rawdata_compression_level, aa_g_rawdata_compression_level,
+ aacompressionlevel, 0400);
+
/* Debug mode */
bool aa_g_debug = IS_ENABLED(CONFIG_SECURITY_APPARMOR_DEBUG_MESSAGES);
module_param_named(debug, aa_g_debug, aabool, S_IRUSR | S_IWUSR);
@@ -1402,6 +1430,7 @@ static int param_set_aauint(const char *val, const struct kernel_param *kp)
return -EPERM;
error = param_set_uint(val, kp);
+ aa_g_path_max = max_t(uint32_t, aa_g_path_max, sizeof(union aa_buffer));
pr_info("AppArmor: buffer size set to %d bytes\n", aa_g_path_max);
return error;
@@ -1456,6 +1485,37 @@ static int param_get_aaintbool(char *buffer, const struct kernel_param *kp)
return param_get_bool(buffer, &kp_local);
}
+static int param_set_aacompressionlevel(const char *val,
+ const struct kernel_param *kp)
+{
+ int error;
+
+ if (!apparmor_enabled)
+ return -EINVAL;
+ if (apparmor_initialized)
+ return -EPERM;
+
+ error = param_set_int(val, kp);
+
+ aa_g_rawdata_compression_level = clamp(aa_g_rawdata_compression_level,
+ Z_NO_COMPRESSION,
+ Z_BEST_COMPRESSION);
+ pr_info("AppArmor: policy rawdata compression level set to %u\n",
+ aa_g_rawdata_compression_level);
+
+ return error;
+}
+
+static int param_get_aacompressionlevel(char *buffer,
+ const struct kernel_param *kp)
+{
+ if (!apparmor_enabled)
+ return -EINVAL;
+ if (apparmor_initialized && !policy_view_capable(NULL))
+ return -EPERM;
+ return param_get_int(buffer, kp);
+}
+
static int param_get_audit(char *buffer, const struct kernel_param *kp)
{
if (!apparmor_enabled)
@@ -1514,6 +1574,61 @@ static int param_set_mode(const char *val, const struct kernel_param *kp)
return 0;
}
+char *aa_get_buffer(bool in_atomic)
+{
+ union aa_buffer *aa_buf;
+ bool try_again = true;
+ gfp_t flags = (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
+
+retry:
+ spin_lock(&aa_buffers_lock);
+ if (buffer_count > reserve_count ||
+ (in_atomic && !list_empty(&aa_global_buffers))) {
+ aa_buf = list_first_entry(&aa_global_buffers, union aa_buffer,
+ list);
+ list_del(&aa_buf->list);
+ buffer_count--;
+ spin_unlock(&aa_buffers_lock);
+ return &aa_buf->buffer[0];
+ }
+ if (in_atomic) {
+ /*
+ * out of reserve buffers and in atomic context so increase
+ * how many buffers to keep in reserve
+ */
+ reserve_count++;
+ flags = GFP_ATOMIC;
+ }
+ spin_unlock(&aa_buffers_lock);
+
+ if (!in_atomic)
+ might_sleep();
+ aa_buf = kmalloc(aa_g_path_max, flags);
+ if (!aa_buf) {
+ if (try_again) {
+ try_again = false;
+ goto retry;
+ }
+ pr_warn_once("AppArmor: Failed to allocate a memory buffer.\n");
+ return NULL;
+ }
+ return &aa_buf->buffer[0];
+}
+
+void aa_put_buffer(char *buf)
+{
+ union aa_buffer *aa_buf;
+
+ if (!buf)
+ return;
+ aa_buf = container_of(buf, union aa_buffer, buffer[0]);
+
+ spin_lock(&aa_buffers_lock);
+ list_add(&aa_buf->list, &aa_global_buffers);
+ buffer_count++;
+ spin_unlock(&aa_buffers_lock);
+}
+
/*
* AppArmor init functions
*/
@@ -1525,7 +1640,7 @@ static int param_set_mode(const char *val, const struct kernel_param *kp)
*/
static int __init set_init_ctx(void)
{
- struct cred *cred = (struct cred *)current->real_cred;
+ struct cred *cred = (__force struct cred *)current->real_cred;
set_cred_label(cred, aa_get_label(ns_unconfined(root_ns)));
@@ -1534,38 +1649,48 @@ static int __init set_init_ctx(void)
static void destroy_buffers(void)
{
- u32 i, j;
+ union aa_buffer *aa_buf;
- for_each_possible_cpu(i) {
- for_each_cpu_buffer(j) {
- kfree(per_cpu(aa_buffers, i).buf[j]);
- per_cpu(aa_buffers, i).buf[j] = NULL;
- }
+ spin_lock(&aa_buffers_lock);
+ while (!list_empty(&aa_global_buffers)) {
+ aa_buf = list_first_entry(&aa_global_buffers, union aa_buffer,
+ list);
+ list_del(&aa_buf->list);
+ spin_unlock(&aa_buffers_lock);
+ kfree(aa_buf);
+ spin_lock(&aa_buffers_lock);
}
+ spin_unlock(&aa_buffers_lock);
}
static int __init alloc_buffers(void)
{
- u32 i, j;
-
- for_each_possible_cpu(i) {
- for_each_cpu_buffer(j) {
- char *buffer;
-
- if (cpu_to_node(i) > num_online_nodes())
- /* fallback to kmalloc for offline nodes */
- buffer = kmalloc(aa_g_path_max, GFP_KERNEL);
- else
- buffer = kmalloc_node(aa_g_path_max, GFP_KERNEL,
- cpu_to_node(i));
- if (!buffer) {
- destroy_buffers();
- return -ENOMEM;
- }
- per_cpu(aa_buffers, i).buf[j] = buffer;
+ union aa_buffer *aa_buf;
+ int i, num;
+
+ /*
+ * A function may require two buffers at once. Usually the buffers are
+ * used for a short period of time and are shared. On UP kernel buffers
+ * two should be enough, with more CPUs it is possible that more
+ * buffers will be used simultaneously. The preallocated pool may grow.
+ * This preallocation has also the side-effect that AppArmor will be
+ * disabled early at boot if aa_g_path_max is extremly high.
+ */
+ if (num_online_cpus() > 1)
+ num = 4 + RESERVE_COUNT;
+ else
+ num = 2 + RESERVE_COUNT;
+
+ for (i = 0; i < num; i++) {
+
+ aa_buf = kmalloc(aa_g_path_max, GFP_KERNEL |
+ __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
+ if (!aa_buf) {
+ destroy_buffers();
+ return -ENOMEM;
}
+ aa_put_buffer(&aa_buf->buffer[0]);
}
-
return 0;
}
@@ -1730,7 +1855,7 @@ static int __init apparmor_init(void)
error = alloc_buffers();
if (error) {
AA_ERROR("Unable to allocate work buffers\n");
- goto buffers_out;
+ goto alloc_out;
}
error = set_init_ctx();
@@ -1755,7 +1880,6 @@ static int __init apparmor_init(void)
buffers_out:
destroy_buffers();
-
alloc_out:
aa_destroy_aafs();
aa_teardown_dfa_engine();
diff --git a/security/apparmor/match.c b/security/apparmor/match.c
index 6ccd3734a841..525ce22dc0e9 100644
--- a/security/apparmor/match.c
+++ b/security/apparmor/match.c
@@ -616,8 +616,8 @@ unsigned int aa_dfa_matchn_until(struct aa_dfa *dfa, unsigned int start,
#define inc_wb_pos(wb) \
do { \
- wb->pos = (wb->pos + 1) & (wb->size - 1); \
- wb->len = (wb->len + 1) & (wb->size - 1); \
+ wb->pos = (wb->pos + 1) & (WB_HISTORY_SIZE - 1); \
+ wb->len = (wb->len + 1) & (WB_HISTORY_SIZE - 1); \
} while (0)
/* For DFAs that don't support extended tagging of states */
@@ -636,7 +636,7 @@ static bool is_loop(struct match_workbuf *wb, unsigned int state,
return true;
}
if (pos == 0)
- pos = wb->size;
+ pos = WB_HISTORY_SIZE;
pos--;
}
diff --git a/security/apparmor/mount.c b/security/apparmor/mount.c
index 17081c8dbefa..4ed6688f9d40 100644
--- a/security/apparmor/mount.c
+++ b/security/apparmor/mount.c
@@ -408,11 +408,13 @@ int aa_remount(struct aa_label *label, const struct path *path,
binary = path->dentry->d_sb->s_type->fs_flags & FS_BINARY_MOUNTDATA;
- get_buffers(buffer);
+ buffer = aa_get_buffer(false);
+ if (!buffer)
+ return -ENOMEM;
error = fn_for_each_confined(label, profile,
match_mnt(profile, path, buffer, NULL, NULL, NULL,
flags, data, binary));
- put_buffers(buffer);
+ aa_put_buffer(buffer);
return error;
}
@@ -437,11 +439,18 @@ int aa_bind_mount(struct aa_label *label, const struct path *path,
if (error)
return error;
- get_buffers(buffer, old_buffer);
+ buffer = aa_get_buffer(false);
+ old_buffer = aa_get_buffer(false);
+ error = -ENOMEM;
+ if (!buffer || old_buffer)
+ goto out;
+
error = fn_for_each_confined(label, profile,
match_mnt(profile, path, buffer, &old_path, old_buffer,
NULL, flags, NULL, false));
- put_buffers(buffer, old_buffer);
+out:
+ aa_put_buffer(buffer);
+ aa_put_buffer(old_buffer);
path_put(&old_path);
return error;
@@ -461,11 +470,13 @@ int aa_mount_change_type(struct aa_label *label, const struct path *path,
flags &= (MS_REC | MS_SILENT | MS_SHARED | MS_PRIVATE | MS_SLAVE |
MS_UNBINDABLE);
- get_buffers(buffer);
+ buffer = aa_get_buffer(false);
+ if (!buffer)
+ return -ENOMEM;
error = fn_for_each_confined(label, profile,
match_mnt(profile, path, buffer, NULL, NULL, NULL,
flags, NULL, false));
- put_buffers(buffer);
+ aa_put_buffer(buffer);
return error;
}
@@ -488,11 +499,17 @@ int aa_move_mount(struct aa_label *label, const struct path *path,
if (error)
return error;
- get_buffers(buffer, old_buffer);
+ buffer = aa_get_buffer(false);
+ old_buffer = aa_get_buffer(false);
+ error = -ENOMEM;
+ if (!buffer || !old_buffer)
+ goto out;
error = fn_for_each_confined(label, profile,
match_mnt(profile, path, buffer, &old_path, old_buffer,
NULL, MS_MOVE, NULL, false));
- put_buffers(buffer, old_buffer);
+out:
+ aa_put_buffer(buffer);
+ aa_put_buffer(old_buffer);
path_put(&old_path);
return error;
@@ -533,8 +550,17 @@ int aa_new_mount(struct aa_label *label, const char *dev_name,
}
}
- get_buffers(buffer, dev_buffer);
+ buffer = aa_get_buffer(false);
+ if (!buffer) {
+ error = -ENOMEM;
+ goto out;
+ }
if (dev_path) {
+ dev_buffer = aa_get_buffer(false);
+ if (!dev_buffer) {
+ error = -ENOMEM;
+ goto out;
+ }
error = fn_for_each_confined(label, profile,
match_mnt(profile, path, buffer, dev_path, dev_buffer,
type, flags, data, binary));
@@ -543,7 +569,10 @@ int aa_new_mount(struct aa_label *label, const char *dev_name,
match_mnt_path_str(profile, path, buffer, dev_name,
type, flags, data, binary, NULL));
}
- put_buffers(buffer, dev_buffer);
+
+out:
+ aa_put_buffer(buffer);
+ aa_put_buffer(dev_buffer);
if (dev_path)
path_put(dev_path);
@@ -591,10 +620,13 @@ int aa_umount(struct aa_label *label, struct vfsmount *mnt, int flags)
AA_BUG(!label);
AA_BUG(!mnt);
- get_buffers(buffer);
+ buffer = aa_get_buffer(false);
+ if (!buffer)
+ return -ENOMEM;
+
error = fn_for_each_confined(label, profile,
profile_umount(profile, &path, buffer));
- put_buffers(buffer);
+ aa_put_buffer(buffer);
return error;
}
@@ -667,8 +699,12 @@ int aa_pivotroot(struct aa_label *label, const struct path *old_path,
AA_BUG(!old_path);
AA_BUG(!new_path);
- get_buffers(old_buffer, new_buffer);
- target = fn_label_build(label, profile, GFP_ATOMIC,
+ old_buffer = aa_get_buffer(false);
+ new_buffer = aa_get_buffer(false);
+ error = -ENOMEM;
+ if (!old_buffer || !new_buffer)
+ goto out;
+ target = fn_label_build(label, profile, GFP_KERNEL,
build_pivotroot(profile, new_path, new_buffer,
old_path, old_buffer));
if (!target) {
@@ -686,7 +722,8 @@ int aa_pivotroot(struct aa_label *label, const struct path *old_path,
/* already audited error */
error = PTR_ERR(target);
out:
- put_buffers(old_buffer, new_buffer);
+ aa_put_buffer(old_buffer);
+ aa_put_buffer(new_buffer);
return error;
diff --git a/security/apparmor/policy.c b/security/apparmor/policy.c
index ade333074c8e..03104830c913 100644
--- a/security/apparmor/policy.c
+++ b/security/apparmor/policy.c
@@ -582,7 +582,7 @@ static int replacement_allowed(struct aa_profile *profile, int noreplace,
{
if (profile) {
if (profile->label.flags & FLAG_IMMUTIBLE) {
- *info = "cannot replace immutible profile";
+ *info = "cannot replace immutable profile";
return -EPERM;
} else if (noreplace) {
*info = "profile already exists";
@@ -856,7 +856,7 @@ static struct aa_profile *update_to_newest_parent(struct aa_profile *new)
ssize_t aa_replace_profiles(struct aa_ns *policy_ns, struct aa_label *label,
u32 mask, struct aa_loaddata *udata)
{
- const char *ns_name, *info = NULL;
+ const char *ns_name = NULL, *info = NULL;
struct aa_ns *ns = NULL;
struct aa_load_ent *ent, *tmp;
struct aa_loaddata *rawdata_ent;
@@ -1043,6 +1043,7 @@ ssize_t aa_replace_profiles(struct aa_ns *policy_ns, struct aa_label *label,
out:
aa_put_ns(ns);
aa_put_loaddata(udata);
+ kfree(ns_name);
if (error)
return error;
diff --git a/security/apparmor/policy_unpack.c b/security/apparmor/policy_unpack.c
index 8cfc9493eefc..80364310fb1e 100644
--- a/security/apparmor/policy_unpack.c
+++ b/security/apparmor/policy_unpack.c
@@ -16,6 +16,7 @@
#include <asm/unaligned.h>
#include <linux/ctype.h>
#include <linux/errno.h>
+#include <linux/zlib.h>
#include "include/apparmor.h"
#include "include/audit.h"
@@ -139,9 +140,11 @@ bool aa_rawdata_eq(struct aa_loaddata *l, struct aa_loaddata *r)
{
if (l->size != r->size)
return false;
+ if (l->compressed_size != r->compressed_size)
+ return false;
if (aa_g_hash_policy && memcmp(l->hash, r->hash, aa_hash_size()) != 0)
return false;
- return memcmp(l->data, r->data, r->size) == 0;
+ return memcmp(l->data, r->data, r->compressed_size ?: r->size) == 0;
}
/*
@@ -968,11 +971,14 @@ static int verify_header(struct aa_ext *e, int required, const char **ns)
e, error);
return error;
}
- if (*ns && strcmp(*ns, name))
+ if (*ns && strcmp(*ns, name)) {
audit_iface(NULL, NULL, NULL, "invalid ns change", e,
error);
- else if (!*ns)
- *ns = name;
+ } else if (!*ns) {
+ *ns = kstrdup(name, GFP_KERNEL);
+ if (!*ns)
+ return -ENOMEM;
+ }
}
return 0;
@@ -1039,6 +1045,105 @@ struct aa_load_ent *aa_load_ent_alloc(void)
return ent;
}
+static int deflate_compress(const char *src, size_t slen, char **dst,
+ size_t *dlen)
+{
+ int error;
+ struct z_stream_s strm;
+ void *stgbuf, *dstbuf;
+ size_t stglen = deflateBound(slen);
+
+ memset(&strm, 0, sizeof(strm));
+
+ if (stglen < slen)
+ return -EFBIG;
+
+ strm.workspace = kvzalloc(zlib_deflate_workspacesize(MAX_WBITS,
+ MAX_MEM_LEVEL),
+ GFP_KERNEL);
+ if (!strm.workspace)
+ return -ENOMEM;
+
+ error = zlib_deflateInit(&strm, aa_g_rawdata_compression_level);
+ if (error != Z_OK) {
+ error = -ENOMEM;
+ goto fail_deflate_init;
+ }
+
+ stgbuf = kvzalloc(stglen, GFP_KERNEL);
+ if (!stgbuf) {
+ error = -ENOMEM;
+ goto fail_stg_alloc;
+ }
+
+ strm.next_in = src;
+ strm.avail_in = slen;
+ strm.next_out = stgbuf;
+ strm.avail_out = stglen;
+
+ error = zlib_deflate(&strm, Z_FINISH);
+ if (error != Z_STREAM_END) {
+ error = -EINVAL;
+ goto fail_deflate;
+ }
+ error = 0;
+
+ if (is_vmalloc_addr(stgbuf)) {
+ dstbuf = kvzalloc(strm.total_out, GFP_KERNEL);
+ if (dstbuf) {
+ memcpy(dstbuf, stgbuf, strm.total_out);
+ kvfree(stgbuf);
+ }
+ } else
+ /*
+ * If the staging buffer was kmalloc'd, then using krealloc is
+ * probably going to be faster. The destination buffer will
+ * always be smaller, so it's just shrunk, avoiding a memcpy
+ */
+ dstbuf = krealloc(stgbuf, strm.total_out, GFP_KERNEL);
+
+ if (!dstbuf) {
+ error = -ENOMEM;
+ goto fail_deflate;
+ }
+
+ *dst = dstbuf;
+ *dlen = strm.total_out;
+
+fail_stg_alloc:
+ zlib_deflateEnd(&strm);
+fail_deflate_init:
+ kvfree(strm.workspace);
+ return error;
+
+fail_deflate:
+ kvfree(stgbuf);
+ goto fail_stg_alloc;
+}
+
+static int compress_loaddata(struct aa_loaddata *data)
+{
+
+ AA_BUG(data->compressed_size > 0);
+
+ /*
+ * Shortcut the no compression case, else we increase the amount of
+ * storage required by a small amount
+ */
+ if (aa_g_rawdata_compression_level != 0) {
+ void *udata = data->data;
+ int error = deflate_compress(udata, data->size, &data->data,
+ &data->compressed_size);
+ if (error)
+ return error;
+
+ kvfree(udata);
+ } else
+ data->compressed_size = data->size;
+
+ return 0;
+}
+
/**
* aa_unpack - unpack packed binary profile(s) data loaded from user space
* @udata: user data copied to kmem (NOT NULL)
@@ -1107,6 +1212,9 @@ int aa_unpack(struct aa_loaddata *udata, struct list_head *lh,
goto fail;
}
}
+ error = compress_loaddata(udata);
+ if (error)
+ goto fail;
return 0;
fail_profile:
diff --git a/security/device_cgroup.c b/security/device_cgroup.c
index 725674f3276d..7d0f8f7431ff 100644
--- a/security/device_cgroup.c
+++ b/security/device_cgroup.c
@@ -801,8 +801,8 @@ struct cgroup_subsys devices_cgrp_subsys = {
*
* returns 0 on success, -EPERM case the operation is not permitted
*/
-int __devcgroup_check_permission(short type, u32 major, u32 minor,
- short access)
+static int __devcgroup_check_permission(short type, u32 major, u32 minor,
+ short access)
{
struct dev_cgroup *dev_cgroup;
bool rc;
@@ -824,3 +824,14 @@ int __devcgroup_check_permission(short type, u32 major, u32 minor,
return 0;
}
+
+int devcgroup_check_permission(short type, u32 major, u32 minor, short access)
+{
+ int rc = BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type, major, minor, access);
+
+ if (rc)
+ return -EPERM;
+
+ return __devcgroup_check_permission(type, major, minor, access);
+}
+EXPORT_SYMBOL(devcgroup_check_permission);
diff --git a/security/integrity/Kconfig b/security/integrity/Kconfig
index 0bae6adb63a9..71f0177e8716 100644
--- a/security/integrity/Kconfig
+++ b/security/integrity/Kconfig
@@ -72,6 +72,15 @@ config LOAD_IPL_KEYS
depends on S390
def_bool y
+config LOAD_PPC_KEYS
+ bool "Enable loading of platform and blacklisted keys for POWER"
+ depends on INTEGRITY_PLATFORM_KEYRING
+ depends on PPC_SECURE_BOOT
+ default y
+ help
+ Enable loading of keys to the .platform keyring and blacklisted
+ hashes to the .blacklist keyring for powerpc based platforms.
+
config INTEGRITY_AUDIT
bool "Enables integrity auditing support "
depends on AUDIT
diff --git a/security/integrity/Makefile b/security/integrity/Makefile
index 35e6ca773734..7ee39d66cf16 100644
--- a/security/integrity/Makefile
+++ b/security/integrity/Makefile
@@ -11,8 +11,11 @@ integrity-$(CONFIG_INTEGRITY_SIGNATURE) += digsig.o
integrity-$(CONFIG_INTEGRITY_ASYMMETRIC_KEYS) += digsig_asymmetric.o
integrity-$(CONFIG_INTEGRITY_PLATFORM_KEYRING) += platform_certs/platform_keyring.o
integrity-$(CONFIG_LOAD_UEFI_KEYS) += platform_certs/efi_parser.o \
- platform_certs/load_uefi.o
+ platform_certs/load_uefi.o \
+ platform_certs/keyring_handler.o
integrity-$(CONFIG_LOAD_IPL_KEYS) += platform_certs/load_ipl_s390.o
-
+integrity-$(CONFIG_LOAD_PPC_KEYS) += platform_certs/efi_parser.o \
+ platform_certs/load_powerpc.o \
+ platform_certs/keyring_handler.o
obj-$(CONFIG_IMA) += ima/
obj-$(CONFIG_EVM) += evm/
diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
index 3689081aaf38..df4ca482fb53 100644
--- a/security/integrity/ima/ima.h
+++ b/security/integrity/ima/ima.h
@@ -217,6 +217,9 @@ void ima_store_measurement(struct integrity_iint_cache *iint, struct file *file,
struct evm_ima_xattr_data *xattr_value,
int xattr_len, const struct modsig *modsig, int pcr,
struct ima_template_desc *template_desc);
+void process_buffer_measurement(const void *buf, int size,
+ const char *eventname, enum ima_hooks func,
+ int pcr);
void ima_audit_measurement(struct integrity_iint_cache *iint,
const unsigned char *filename);
int ima_alloc_init_template(struct ima_event_data *event_data,
@@ -253,6 +256,8 @@ int ima_policy_show(struct seq_file *m, void *v);
#define IMA_APPRAISE_KEXEC 0x40
#ifdef CONFIG_IMA_APPRAISE
+int ima_check_blacklist(struct integrity_iint_cache *iint,
+ const struct modsig *modsig, int pcr);
int ima_appraise_measurement(enum ima_hooks func,
struct integrity_iint_cache *iint,
struct file *file, const unsigned char *filename,
@@ -268,6 +273,12 @@ int ima_read_xattr(struct dentry *dentry,
struct evm_ima_xattr_data **xattr_value);
#else
+static inline int ima_check_blacklist(struct integrity_iint_cache *iint,
+ const struct modsig *modsig, int pcr)
+{
+ return 0;
+}
+
static inline int ima_appraise_measurement(enum ima_hooks func,
struct integrity_iint_cache *iint,
struct file *file,
diff --git a/security/integrity/ima/ima_appraise.c b/security/integrity/ima/ima_appraise.c
index 136ae4e0ee92..300c8d2943c5 100644
--- a/security/integrity/ima/ima_appraise.c
+++ b/security/integrity/ima/ima_appraise.c
@@ -12,6 +12,7 @@
#include <linux/magic.h>
#include <linux/ima.h>
#include <linux/evm.h>
+#include <keys/system_keyring.h>
#include "ima.h"
@@ -304,6 +305,38 @@ static int modsig_verify(enum ima_hooks func, const struct modsig *modsig,
}
/*
+ * ima_check_blacklist - determine if the binary is blacklisted.
+ *
+ * Add the hash of the blacklisted binary to the measurement list, based
+ * on policy.
+ *
+ * Returns -EPERM if the hash is blacklisted.
+ */
+int ima_check_blacklist(struct integrity_iint_cache *iint,
+ const struct modsig *modsig, int pcr)
+{
+ enum hash_algo hash_algo;
+ const u8 *digest = NULL;
+ u32 digestsize = 0;
+ int rc = 0;
+
+ if (!(iint->flags & IMA_CHECK_BLACKLIST))
+ return 0;
+
+ if (iint->flags & IMA_MODSIG_ALLOWED && modsig) {
+ ima_get_modsig_digest(modsig, &hash_algo, &digest, &digestsize);
+
+ rc = is_binary_blacklisted(digest, digestsize);
+ if ((rc == -EPERM) && (iint->flags & IMA_MEASURE))
+ process_buffer_measurement(digest, digestsize,
+ "blacklisted-hash", NONE,
+ pcr);
+ }
+
+ return rc;
+}
+
+/*
* ima_appraise_measurement - appraise file measurement
*
* Call evm_verifyxattr() to verify the integrity of 'security.ima'.
diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
index 60027c643ecd..d7e987baf127 100644
--- a/security/integrity/ima/ima_main.c
+++ b/security/integrity/ima/ima_main.c
@@ -335,10 +335,14 @@ static int process_measurement(struct file *file, const struct cred *cred,
xattr_value, xattr_len, modsig, pcr,
template_desc);
if (rc == 0 && (action & IMA_APPRAISE_SUBMASK)) {
- inode_lock(inode);
- rc = ima_appraise_measurement(func, iint, file, pathname,
- xattr_value, xattr_len, modsig);
- inode_unlock(inode);
+ rc = ima_check_blacklist(iint, modsig, pcr);
+ if (rc != -EPERM) {
+ inode_lock(inode);
+ rc = ima_appraise_measurement(func, iint, file,
+ pathname, xattr_value,
+ xattr_len, modsig);
+ inode_unlock(inode);
+ }
if (!rc)
rc = mmap_violation_check(func, file, &pathbuf,
&pathname, filename);
@@ -626,14 +630,14 @@ int ima_load_data(enum kernel_load_data_id id)
* @buf: pointer to the buffer that needs to be added to the log.
* @size: size of buffer(in bytes).
* @eventname: event name to be used for the buffer entry.
- * @cred: a pointer to a credentials structure for user validation.
- * @secid: the secid of the task to be validated.
+ * @func: IMA hook
+ * @pcr: pcr to extend the measurement
*
* Based on policy, the buffer is measured into the ima log.
*/
-static void process_buffer_measurement(const void *buf, int size,
- const char *eventname,
- const struct cred *cred, u32 secid)
+void process_buffer_measurement(const void *buf, int size,
+ const char *eventname, enum ima_hooks func,
+ int pcr)
{
int ret = 0;
struct ima_template_entry *entry = NULL;
@@ -642,19 +646,45 @@ static void process_buffer_measurement(const void *buf, int size,
.filename = eventname,
.buf = buf,
.buf_len = size};
- struct ima_template_desc *template_desc = NULL;
+ struct ima_template_desc *template = NULL;
struct {
struct ima_digest_data hdr;
char digest[IMA_MAX_DIGEST_SIZE];
} hash = {};
int violation = 0;
- int pcr = CONFIG_IMA_MEASURE_PCR_IDX;
int action = 0;
+ u32 secid;
- action = ima_get_action(NULL, cred, secid, 0, KEXEC_CMDLINE, &pcr,
- &template_desc);
- if (!(action & IMA_MEASURE))
- return;
+ /*
+ * Both LSM hooks and auxilary based buffer measurements are
+ * based on policy. To avoid code duplication, differentiate
+ * between the LSM hooks and auxilary buffer measurements,
+ * retrieving the policy rule information only for the LSM hook
+ * buffer measurements.
+ */
+ if (func) {
+ security_task_getsecid(current, &secid);
+ action = ima_get_action(NULL, current_cred(), secid, 0, func,
+ &pcr, &template);
+ if (!(action & IMA_MEASURE))
+ return;
+ }
+
+ if (!pcr)
+ pcr = CONFIG_IMA_MEASURE_PCR_IDX;
+
+ if (!template) {
+ template = lookup_template_desc("ima-buf");
+ ret = template_desc_init_fields(template->fmt,
+ &(template->fields),
+ &(template->num_fields));
+ if (ret < 0) {
+ pr_err("template %s init failed, result: %d\n",
+ (strlen(template->name) ?
+ template->name : template->fmt), ret);
+ return;
+ }
+ }
iint.ima_hash = &hash.hdr;
iint.ima_hash->algo = ima_hash_algo;
@@ -664,7 +694,7 @@ static void process_buffer_measurement(const void *buf, int size,
if (ret < 0)
goto out;
- ret = ima_alloc_init_template(&event_data, &entry, template_desc);
+ ret = ima_alloc_init_template(&event_data, &entry, template);
if (ret < 0)
goto out;
@@ -686,13 +716,9 @@ out:
*/
void ima_kexec_cmdline(const void *buf, int size)
{
- u32 secid;
-
- if (buf && size != 0) {
- security_task_getsecid(current, &secid);
+ if (buf && size != 0)
process_buffer_measurement(buf, size, "kexec-cmdline",
- current_cred(), secid);
- }
+ KEXEC_CMDLINE, 0);
}
static int __init init_ima(void)
diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c
index 5380aca2b351..f19a895ad7cd 100644
--- a/security/integrity/ima/ima_policy.c
+++ b/security/integrity/ima/ima_policy.c
@@ -765,8 +765,8 @@ enum {
Opt_fsuuid, Opt_uid_eq, Opt_euid_eq, Opt_fowner_eq,
Opt_uid_gt, Opt_euid_gt, Opt_fowner_gt,
Opt_uid_lt, Opt_euid_lt, Opt_fowner_lt,
- Opt_appraise_type, Opt_permit_directio,
- Opt_pcr, Opt_template, Opt_err
+ Opt_appraise_type, Opt_appraise_flag,
+ Opt_permit_directio, Opt_pcr, Opt_template, Opt_err
};
static const match_table_t policy_tokens = {
@@ -798,6 +798,7 @@ static const match_table_t policy_tokens = {
{Opt_euid_lt, "euid<%s"},
{Opt_fowner_lt, "fowner<%s"},
{Opt_appraise_type, "appraise_type=%s"},
+ {Opt_appraise_flag, "appraise_flag=%s"},
{Opt_permit_directio, "permit_directio"},
{Opt_pcr, "pcr=%s"},
{Opt_template, "template=%s"},
@@ -1172,6 +1173,11 @@ static int ima_parse_rule(char *rule, struct ima_rule_entry *entry)
else
result = -EINVAL;
break;
+ case Opt_appraise_flag:
+ ima_log_string(ab, "appraise_flag", args[0].from);
+ if (strstr(args[0].from, "blacklist"))
+ entry->flags |= IMA_CHECK_BLACKLIST;
+ break;
case Opt_permit_directio:
entry->flags |= IMA_PERMIT_DIRECTIO;
break;
@@ -1500,6 +1506,8 @@ int ima_policy_show(struct seq_file *m, void *v)
else
seq_puts(m, "appraise_type=imasig ");
}
+ if (entry->flags & IMA_CHECK_BLACKLIST)
+ seq_puts(m, "appraise_flag=check_blacklist ");
if (entry->flags & IMA_PERMIT_DIRECTIO)
seq_puts(m, "permit_directio ");
rcu_read_unlock();
diff --git a/security/integrity/integrity.h b/security/integrity/integrity.h
index d9323d31a3a8..73fc286834d7 100644
--- a/security/integrity/integrity.h
+++ b/security/integrity/integrity.h
@@ -32,6 +32,7 @@
#define EVM_IMMUTABLE_DIGSIG 0x08000000
#define IMA_FAIL_UNVERIFIABLE_SIGS 0x10000000
#define IMA_MODSIG_ALLOWED 0x20000000
+#define IMA_CHECK_BLACKLIST 0x40000000
#define IMA_DO_MASK (IMA_MEASURE | IMA_APPRAISE | IMA_AUDIT | \
IMA_HASH | IMA_APPRAISE_SUBMASK)
diff --git a/security/integrity/platform_certs/keyring_handler.c b/security/integrity/platform_certs/keyring_handler.c
new file mode 100644
index 000000000000..c5ba695c10e3
--- /dev/null
+++ b/security/integrity/platform_certs/keyring_handler.c
@@ -0,0 +1,80 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/cred.h>
+#include <linux/err.h>
+#include <linux/efi.h>
+#include <linux/slab.h>
+#include <keys/asymmetric-type.h>
+#include <keys/system_keyring.h>
+#include "../integrity.h"
+
+static efi_guid_t efi_cert_x509_guid __initdata = EFI_CERT_X509_GUID;
+static efi_guid_t efi_cert_x509_sha256_guid __initdata =
+ EFI_CERT_X509_SHA256_GUID;
+static efi_guid_t efi_cert_sha256_guid __initdata = EFI_CERT_SHA256_GUID;
+
+/*
+ * Blacklist a hash.
+ */
+static __init void uefi_blacklist_hash(const char *source, const void *data,
+ size_t len, const char *type,
+ size_t type_len)
+{
+ char *hash, *p;
+
+ hash = kmalloc(type_len + len * 2 + 1, GFP_KERNEL);
+ if (!hash)
+ return;
+ p = memcpy(hash, type, type_len);
+ p += type_len;
+ bin2hex(p, data, len);
+ p += len * 2;
+ *p = 0;
+
+ mark_hash_blacklisted(hash);
+ kfree(hash);
+}
+
+/*
+ * Blacklist an X509 TBS hash.
+ */
+static __init void uefi_blacklist_x509_tbs(const char *source,
+ const void *data, size_t len)
+{
+ uefi_blacklist_hash(source, data, len, "tbs:", 4);
+}
+
+/*
+ * Blacklist the hash of an executable.
+ */
+static __init void uefi_blacklist_binary(const char *source,
+ const void *data, size_t len)
+{
+ uefi_blacklist_hash(source, data, len, "bin:", 4);
+}
+
+/*
+ * Return the appropriate handler for particular signature list types found in
+ * the UEFI db and MokListRT tables.
+ */
+__init efi_element_handler_t get_handler_for_db(const efi_guid_t *sig_type)
+{
+ if (efi_guidcmp(*sig_type, efi_cert_x509_guid) == 0)
+ return add_to_platform_keyring;
+ return 0;
+}
+
+/*
+ * Return the appropriate handler for particular signature list types found in
+ * the UEFI dbx and MokListXRT tables.
+ */
+__init efi_element_handler_t get_handler_for_dbx(const efi_guid_t *sig_type)
+{
+ if (efi_guidcmp(*sig_type, efi_cert_x509_sha256_guid) == 0)
+ return uefi_blacklist_x509_tbs;
+ if (efi_guidcmp(*sig_type, efi_cert_sha256_guid) == 0)
+ return uefi_blacklist_binary;
+ return 0;
+}
diff --git a/security/integrity/platform_certs/keyring_handler.h b/security/integrity/platform_certs/keyring_handler.h
new file mode 100644
index 000000000000..2462bfa08fe3
--- /dev/null
+++ b/security/integrity/platform_certs/keyring_handler.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef PLATFORM_CERTS_INTERNAL_H
+#define PLATFORM_CERTS_INTERNAL_H
+
+#include <linux/efi.h>
+
+void blacklist_hash(const char *source, const void *data,
+ size_t len, const char *type,
+ size_t type_len);
+
+/*
+ * Blacklist an X509 TBS hash.
+ */
+void blacklist_x509_tbs(const char *source, const void *data, size_t len);
+
+/*
+ * Blacklist the hash of an executable.
+ */
+void blacklist_binary(const char *source, const void *data, size_t len);
+
+/*
+ * Return the handler for particular signature list types found in the db.
+ */
+efi_element_handler_t get_handler_for_db(const efi_guid_t *sig_type);
+
+/*
+ * Return the handler for particular signature list types found in the dbx.
+ */
+efi_element_handler_t get_handler_for_dbx(const efi_guid_t *sig_type);
+
+#endif
diff --git a/security/integrity/platform_certs/load_powerpc.c b/security/integrity/platform_certs/load_powerpc.c
new file mode 100644
index 000000000000..a2900cb85357
--- /dev/null
+++ b/security/integrity/platform_certs/load_powerpc.c
@@ -0,0 +1,96 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 IBM Corporation
+ * Author: Nayna Jain
+ *
+ * - loads keys and hashes stored and controlled by the firmware.
+ */
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/cred.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <asm/secure_boot.h>
+#include <asm/secvar.h>
+#include "keyring_handler.h"
+
+/*
+ * Get a certificate list blob from the named secure variable.
+ */
+static __init void *get_cert_list(u8 *key, unsigned long keylen, uint64_t *size)
+{
+ int rc;
+ void *db;
+
+ rc = secvar_ops->get(key, keylen, NULL, size);
+ if (rc) {
+ pr_err("Couldn't get size: %d\n", rc);
+ return NULL;
+ }
+
+ db = kmalloc(*size, GFP_KERNEL);
+ if (!db)
+ return NULL;
+
+ rc = secvar_ops->get(key, keylen, db, size);
+ if (rc) {
+ kfree(db);
+ pr_err("Error reading %s var: %d\n", key, rc);
+ return NULL;
+ }
+
+ return db;
+}
+
+/*
+ * Load the certs contained in the keys databases into the platform trusted
+ * keyring and the blacklisted X.509 cert SHA256 hashes into the blacklist
+ * keyring.
+ */
+static int __init load_powerpc_certs(void)
+{
+ void *db = NULL, *dbx = NULL;
+ uint64_t dbsize = 0, dbxsize = 0;
+ int rc = 0;
+ struct device_node *node;
+
+ if (!secvar_ops)
+ return -ENODEV;
+
+ /* The following only applies for the edk2-compat backend. */
+ node = of_find_compatible_node(NULL, NULL, "ibm,edk2-compat-v1");
+ if (!node)
+ return -ENODEV;
+
+ /*
+ * Get db, and dbx. They might not exist, so it isn't an error if we
+ * can't get them.
+ */
+ db = get_cert_list("db", 3, &dbsize);
+ if (!db) {
+ pr_err("Couldn't get db list from firmware\n");
+ } else {
+ rc = parse_efi_signature_list("powerpc:db", db, dbsize,
+ get_handler_for_db);
+ if (rc)
+ pr_err("Couldn't parse db signatures: %d\n", rc);
+ kfree(db);
+ }
+
+ dbx = get_cert_list("dbx", 4, &dbxsize);
+ if (!dbx) {
+ pr_info("Couldn't get dbx list from firmware\n");
+ } else {
+ rc = parse_efi_signature_list("powerpc:dbx", dbx, dbxsize,
+ get_handler_for_dbx);
+ if (rc)
+ pr_err("Couldn't parse dbx signatures: %d\n", rc);
+ kfree(dbx);
+ }
+
+ of_node_put(node);
+
+ return rc;
+}
+late_initcall(load_powerpc_certs);
diff --git a/security/integrity/platform_certs/load_uefi.c b/security/integrity/platform_certs/load_uefi.c
index 81b19c52832b..111898aad56e 100644
--- a/security/integrity/platform_certs/load_uefi.c
+++ b/security/integrity/platform_certs/load_uefi.c
@@ -9,11 +9,7 @@
#include <keys/asymmetric-type.h>
#include <keys/system_keyring.h>
#include "../integrity.h"
-
-static efi_guid_t efi_cert_x509_guid __initdata = EFI_CERT_X509_GUID;
-static efi_guid_t efi_cert_x509_sha256_guid __initdata =
- EFI_CERT_X509_SHA256_GUID;
-static efi_guid_t efi_cert_sha256_guid __initdata = EFI_CERT_SHA256_GUID;
+#include "keyring_handler.h"
/*
* Look to see if a UEFI variable called MokIgnoreDB exists and return true if
@@ -68,72 +64,6 @@ static __init void *get_cert_list(efi_char16_t *name, efi_guid_t *guid,
}
/*
- * Blacklist a hash.
- */
-static __init void uefi_blacklist_hash(const char *source, const void *data,
- size_t len, const char *type,
- size_t type_len)
-{
- char *hash, *p;
-
- hash = kmalloc(type_len + len * 2 + 1, GFP_KERNEL);
- if (!hash)
- return;
- p = memcpy(hash, type, type_len);
- p += type_len;
- bin2hex(p, data, len);
- p += len * 2;
- *p = 0;
-
- mark_hash_blacklisted(hash);
- kfree(hash);
-}
-
-/*
- * Blacklist an X509 TBS hash.
- */
-static __init void uefi_blacklist_x509_tbs(const char *source,
- const void *data, size_t len)
-{
- uefi_blacklist_hash(source, data, len, "tbs:", 4);
-}
-
-/*
- * Blacklist the hash of an executable.
- */
-static __init void uefi_blacklist_binary(const char *source,
- const void *data, size_t len)
-{
- uefi_blacklist_hash(source, data, len, "bin:", 4);
-}
-
-/*
- * Return the appropriate handler for particular signature list types found in
- * the UEFI db and MokListRT tables.
- */
-static __init efi_element_handler_t get_handler_for_db(const efi_guid_t *
- sig_type)
-{
- if (efi_guidcmp(*sig_type, efi_cert_x509_guid) == 0)
- return add_to_platform_keyring;
- return 0;
-}
-
-/*
- * Return the appropriate handler for particular signature list types found in
- * the UEFI dbx and MokListXRT tables.
- */
-static __init efi_element_handler_t get_handler_for_dbx(const efi_guid_t *
- sig_type)
-{
- if (efi_guidcmp(*sig_type, efi_cert_x509_sha256_guid) == 0)
- return uefi_blacklist_x509_tbs;
- if (efi_guidcmp(*sig_type, efi_cert_sha256_guid) == 0)
- return uefi_blacklist_binary;
- return 0;
-}
-
-/*
* Load the certs contained in the UEFI databases into the platform trusted
* keyring and the UEFI blacklisted X.509 cert SHA256 hashes into the blacklist
* keyring.
diff --git a/security/lockdown/lockdown.c b/security/lockdown/lockdown.c
index 40b790536def..b2f87015d6e9 100644
--- a/security/lockdown/lockdown.c
+++ b/security/lockdown/lockdown.c
@@ -32,12 +32,14 @@ static const char *const lockdown_reasons[LOCKDOWN_CONFIDENTIALITY_MAX+1] = {
[LOCKDOWN_MODULE_PARAMETERS] = "unsafe module parameters",
[LOCKDOWN_MMIOTRACE] = "unsafe mmio",
[LOCKDOWN_DEBUGFS] = "debugfs access",
+ [LOCKDOWN_XMON_WR] = "xmon write access",
[LOCKDOWN_INTEGRITY_MAX] = "integrity",
[LOCKDOWN_KCORE] = "/proc/kcore access",
[LOCKDOWN_KPROBES] = "use of kprobes",
[LOCKDOWN_BPF_READ] = "use of bpf to read kernel RAM",
[LOCKDOWN_PERF] = "unsafe use of perf",
[LOCKDOWN_TRACEFS] = "use of tracefs",
+ [LOCKDOWN_XMON_RW] = "xmon read and write access",
[LOCKDOWN_CONFIDENTIALITY_MAX] = "confidentiality",
};
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 28eb05490d59..116b4d644f68 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -2549,9 +2549,8 @@ static void selinux_bprm_committing_creds(struct linux_binprm *bprm)
static void selinux_bprm_committed_creds(struct linux_binprm *bprm)
{
const struct task_security_struct *tsec = selinux_cred(current_cred());
- struct itimerval itimer;
u32 osid, sid;
- int rc, i;
+ int rc;
osid = tsec->osid;
sid = tsec->sid;
@@ -2569,11 +2568,8 @@ static void selinux_bprm_committed_creds(struct linux_binprm *bprm)
rc = avc_has_perm(&selinux_state,
osid, sid, SECCLASS_PROCESS, PROCESS__SIGINH, NULL);
if (rc) {
- if (IS_ENABLED(CONFIG_POSIX_TIMERS)) {
- memset(&itimer, 0, sizeof itimer);
- for (i = 0; i < 3; i++)
- do_setitimer(i, &itimer, NULL);
- }
+ clear_itimer();
+
spin_lock_irq(&current->sighand->siglock);
if (!fatal_signal_pending(current)) {
flush_sigqueue(&current->pending);
@@ -3144,6 +3140,9 @@ static int selinux_inode_setxattr(struct dentry *dentry, const char *name,
return dentry_has_perm(current_cred(), dentry, FILE__SETATTR);
}
+ if (!selinux_state.initialized)
+ return (inode_owner_or_capable(inode) ? 0 : -EPERM);
+
sbsec = inode->i_sb->s_security;
if (!(sbsec->flags & SBLABEL_MNT))
return -EOPNOTSUPP;
@@ -3227,6 +3226,15 @@ static void selinux_inode_post_setxattr(struct dentry *dentry, const char *name,
return;
}
+ if (!selinux_state.initialized) {
+ /* If we haven't even been initialized, then we can't validate
+ * against a policy, so leave the label as invalid. It may
+ * resolve to a valid label on the next revalidation try if
+ * we've since initialized.
+ */
+ return;
+ }
+
rc = security_context_to_sid_force(&selinux_state, value, size,
&newsid);
if (rc) {
@@ -4623,8 +4631,8 @@ static int selinux_socket_bind(struct socket *sock, struct sockaddr *address, in
inet_get_local_port_range(sock_net(sk), &low, &high);
- if (snum < max(inet_prot_sock(sock_net(sk)), low) ||
- snum > high) {
+ if (inet_port_requires_bind_service(sock_net(sk), snum) ||
+ snum < low || snum > high) {
err = sel_netport_sid(sk->sk_protocol,
snum, &sid);
if (err)
diff --git a/security/selinux/include/security.h b/security/selinux/include/security.h
index 111121281c47..ae840634e3c7 100644
--- a/security/selinux/include/security.h
+++ b/security/selinux/include/security.h
@@ -40,10 +40,11 @@
#define POLICYDB_VERSION_CONSTRAINT_NAMES 29
#define POLICYDB_VERSION_XPERMS_IOCTL 30
#define POLICYDB_VERSION_INFINIBAND 31
+#define POLICYDB_VERSION_GLBLUB 32
/* Range of policy versions we understand*/
#define POLICYDB_VERSION_MIN POLICYDB_VERSION_BASE
-#define POLICYDB_VERSION_MAX POLICYDB_VERSION_INFINIBAND
+#define POLICYDB_VERSION_MAX POLICYDB_VERSION_GLBLUB
/* Mask for just the mount related flags */
#define SE_MNTMASK 0x0f
diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c
index e6c7643c3fc0..ee94fa469c29 100644
--- a/security/selinux/selinuxfs.c
+++ b/security/selinux/selinuxfs.c
@@ -548,10 +548,6 @@ static ssize_t sel_write_load(struct file *file, const char __user *buf,
if (*ppos != 0)
goto out;
- length = -EFBIG;
- if (count > 64 * 1024 * 1024)
- goto out;
-
length = -ENOMEM;
data = vmalloc(count);
if (!data)
diff --git a/security/selinux/ss/context.h b/security/selinux/ss/context.h
index 2260c44a568c..513e67f48878 100644
--- a/security/selinux/ss/context.h
+++ b/security/selinux/ss/context.h
@@ -95,6 +95,38 @@ out:
return rc;
}
+
+static inline int mls_context_glblub(struct context *dst,
+ struct context *c1, struct context *c2)
+{
+ struct mls_range *dr = &dst->range, *r1 = &c1->range, *r2 = &c2->range;
+ int rc = 0;
+
+ if (r1->level[1].sens < r2->level[0].sens ||
+ r2->level[1].sens < r1->level[0].sens)
+ /* These ranges have no common sensitivities */
+ return -EINVAL;
+
+ /* Take the greatest of the low */
+ dr->level[0].sens = max(r1->level[0].sens, r2->level[0].sens);
+
+ /* Take the least of the high */
+ dr->level[1].sens = min(r1->level[1].sens, r2->level[1].sens);
+
+ rc = ebitmap_and(&dr->level[0].cat,
+ &r1->level[0].cat, &r2->level[0].cat);
+ if (rc)
+ goto out;
+
+ rc = ebitmap_and(&dr->level[1].cat,
+ &r1->level[1].cat, &r2->level[1].cat);
+ if (rc)
+ goto out;
+
+out:
+ return rc;
+}
+
static inline int mls_context_cmp(struct context *c1, struct context *c2)
{
return ((c1->range.level[0].sens == c2->range.level[0].sens) &&
diff --git a/security/selinux/ss/ebitmap.c b/security/selinux/ss/ebitmap.c
index 09929fc5ab47..c8c3663111e2 100644
--- a/security/selinux/ss/ebitmap.c
+++ b/security/selinux/ss/ebitmap.c
@@ -77,6 +77,24 @@ int ebitmap_cpy(struct ebitmap *dst, struct ebitmap *src)
return 0;
}
+int ebitmap_and(struct ebitmap *dst, struct ebitmap *e1, struct ebitmap *e2)
+{
+ struct ebitmap_node *n;
+ int bit, rc;
+
+ ebitmap_init(dst);
+
+ ebitmap_for_each_positive_bit(e1, n, bit) {
+ if (ebitmap_get_bit(e2, bit)) {
+ rc = ebitmap_set_bit(dst, bit, 1);
+ if (rc < 0)
+ return rc;
+ }
+ }
+ return 0;
+}
+
+
#ifdef CONFIG_NETLABEL
/**
* ebitmap_netlbl_export - Export an ebitmap into a NetLabel category bitmap
diff --git a/security/selinux/ss/ebitmap.h b/security/selinux/ss/ebitmap.h
index 6aa7cf6a2197..9a23b81b8832 100644
--- a/security/selinux/ss/ebitmap.h
+++ b/security/selinux/ss/ebitmap.h
@@ -124,6 +124,7 @@ static inline void ebitmap_node_clr_bit(struct ebitmap_node *n,
int ebitmap_cmp(struct ebitmap *e1, struct ebitmap *e2);
int ebitmap_cpy(struct ebitmap *dst, struct ebitmap *src);
+int ebitmap_and(struct ebitmap *dst, struct ebitmap *e1, struct ebitmap *e2);
int ebitmap_contains(struct ebitmap *e1, struct ebitmap *e2, u32 last_e2bit);
int ebitmap_get_bit(struct ebitmap *e, unsigned long bit);
int ebitmap_set_bit(struct ebitmap *e, unsigned long bit, int value);
diff --git a/security/selinux/ss/mls.c b/security/selinux/ss/mls.c
index 5e05f5b902d7..ec5e3d1da9ac 100644
--- a/security/selinux/ss/mls.c
+++ b/security/selinux/ss/mls.c
@@ -529,6 +529,9 @@ int mls_compute_sid(struct policydb *p,
return mls_context_cpy_high(newcontext, tcontext);
case DEFAULT_TARGET_LOW_HIGH:
return mls_context_cpy(newcontext, tcontext);
+ case DEFAULT_GLBLUB:
+ return mls_context_glblub(newcontext,
+ scontext, tcontext);
}
/* Fallthrough */
diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c
index 1260f5fb766e..e20624a68f5d 100644
--- a/security/selinux/ss/policydb.c
+++ b/security/selinux/ss/policydb.c
@@ -160,6 +160,11 @@ static struct policydb_compat_info policydb_compat[] = {
.sym_num = SYM_NUM,
.ocon_num = OCON_NUM,
},
+ {
+ .version = POLICYDB_VERSION_GLBLUB,
+ .sym_num = SYM_NUM,
+ .ocon_num = OCON_NUM,
+ },
};
static struct policydb_compat_info *policydb_lookup_compat(int version)
diff --git a/security/selinux/ss/policydb.h b/security/selinux/ss/policydb.h
index 162d0e79b85b..bc56b14e2216 100644
--- a/security/selinux/ss/policydb.h
+++ b/security/selinux/ss/policydb.h
@@ -69,6 +69,7 @@ struct class_datum {
#define DEFAULT_TARGET_LOW 4
#define DEFAULT_TARGET_HIGH 5
#define DEFAULT_TARGET_LOW_HIGH 6
+#define DEFAULT_GLBLUB 7
char default_range;
};
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index abeb09c30633..ecea41ce919b 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -28,7 +28,6 @@
#include <linux/icmpv6.h>
#include <linux/slab.h>
#include <linux/mutex.h>
-#include <linux/pipe_fs_i.h>
#include <net/cipso_ipv4.h>
#include <net/ip.h>
#include <net/ipv6.h>
diff --git a/sound/aoa/soundbus/i2sbus/pcm.c b/sound/aoa/soundbus/i2sbus/pcm.c
index 7f0754dd3d7d..a94e4023fadf 100644
--- a/sound/aoa/soundbus/i2sbus/pcm.c
+++ b/sound/aoa/soundbus/i2sbus/pcm.c
@@ -1028,7 +1028,7 @@ i2sbus_attach_codec(struct soundbus_dev *dev, struct snd_card *card,
/* well, we really should support scatter/gather DMA */
snd_pcm_lib_preallocate_pages_for_all(
dev->pcm, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(macio_get_pci_dev(i2sdev->macio)),
+ &macio_get_pci_dev(i2sdev->macio)->dev,
64 * 1024, 64 * 1024);
return 0;
diff --git a/sound/arm/pxa2xx-pcm-lib.c b/sound/arm/pxa2xx-pcm-lib.c
index 54500bd098f9..a86c95d89824 100644
--- a/sound/arm/pxa2xx-pcm-lib.c
+++ b/sound/arm/pxa2xx-pcm-lib.c
@@ -175,7 +175,15 @@ void pxa2xx_pcm_free_dma_buffers(struct snd_pcm *pcm)
}
EXPORT_SYMBOL(pxa2xx_pcm_free_dma_buffers);
-int pxa2xx_soc_pcm_new(struct snd_soc_pcm_runtime *rtd)
+void pxa2xx_soc_pcm_free(struct snd_soc_component *component,
+ struct snd_pcm *pcm)
+{
+ pxa2xx_pcm_free_dma_buffers(pcm);
+}
+EXPORT_SYMBOL(pxa2xx_soc_pcm_free);
+
+int pxa2xx_soc_pcm_new(struct snd_soc_component *component,
+ struct snd_soc_pcm_runtime *rtd)
{
struct snd_card *card = rtd->card->snd_card;
struct snd_pcm *pcm = rtd->pcm;
@@ -203,18 +211,64 @@ int pxa2xx_soc_pcm_new(struct snd_soc_pcm_runtime *rtd)
}
EXPORT_SYMBOL(pxa2xx_soc_pcm_new);
-const struct snd_pcm_ops pxa2xx_pcm_ops = {
- .open = pxa2xx_pcm_open,
- .close = pxa2xx_pcm_close,
- .ioctl = snd_pcm_lib_ioctl,
- .hw_params = pxa2xx_pcm_hw_params,
- .hw_free = pxa2xx_pcm_hw_free,
- .prepare = pxa2xx_pcm_prepare,
- .trigger = pxa2xx_pcm_trigger,
- .pointer = pxa2xx_pcm_pointer,
- .mmap = pxa2xx_pcm_mmap,
-};
-EXPORT_SYMBOL(pxa2xx_pcm_ops);
+int pxa2xx_soc_pcm_open(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
+{
+ return pxa2xx_pcm_open(substream);
+}
+EXPORT_SYMBOL(pxa2xx_soc_pcm_open);
+
+int pxa2xx_soc_pcm_close(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
+{
+ return pxa2xx_pcm_close(substream);
+}
+EXPORT_SYMBOL(pxa2xx_soc_pcm_close);
+
+int pxa2xx_soc_pcm_hw_params(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ return pxa2xx_pcm_hw_params(substream, params);
+}
+EXPORT_SYMBOL(pxa2xx_soc_pcm_hw_params);
+
+int pxa2xx_soc_pcm_hw_free(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
+{
+ return pxa2xx_pcm_hw_free(substream);
+}
+EXPORT_SYMBOL(pxa2xx_soc_pcm_hw_free);
+
+int pxa2xx_soc_pcm_prepare(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
+{
+ return pxa2xx_pcm_prepare(substream);
+}
+EXPORT_SYMBOL(pxa2xx_soc_pcm_prepare);
+
+int pxa2xx_soc_pcm_trigger(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream, int cmd)
+{
+ return pxa2xx_pcm_trigger(substream, cmd);
+}
+EXPORT_SYMBOL(pxa2xx_soc_pcm_trigger);
+
+snd_pcm_uframes_t
+pxa2xx_soc_pcm_pointer(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
+{
+ return pxa2xx_pcm_pointer(substream);
+}
+EXPORT_SYMBOL(pxa2xx_soc_pcm_pointer);
+
+int pxa2xx_soc_pcm_mmap(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
+ struct vm_area_struct *vma)
+{
+ return pxa2xx_pcm_mmap(substream, vma);
+}
+EXPORT_SYMBOL(pxa2xx_soc_pcm_mmap);
MODULE_AUTHOR("Nicolas Pitre");
MODULE_DESCRIPTION("Intel PXA2xx sound library");
diff --git a/sound/core/Kconfig b/sound/core/Kconfig
index 4ee79ad6ae22..4044c42d8595 100644
--- a/sound/core/Kconfig
+++ b/sound/core/Kconfig
@@ -72,11 +72,11 @@ config SND_PCM_OSS
config SND_PCM_OSS_PLUGINS
bool "OSS PCM (digital audio) API - Include plugin system"
depends on SND_PCM_OSS
- default y
+ default y
help
- If you disable this option, the ALSA's OSS PCM API will not
- support conversion of channels, formats and rates. It will
- behave like most of new OSS/Free drivers in 2.4/2.6 kernels.
+ If you disable this option, the ALSA's OSS PCM API will not
+ support conversion of channels, formats and rates. It will
+ behave like most of new OSS/Free drivers in 2.4/2.6 kernels.
config SND_PCM_TIMER
bool "PCM timer interface" if EXPERT
@@ -128,13 +128,13 @@ config SND_SUPPORT_OLD_API
or older).
config SND_PROC_FS
- bool "Sound Proc FS Support" if EXPERT
- depends on PROC_FS
- default y
- help
- Say 'N' to disable Sound proc FS, which may reduce code size about
- 9KB on x86_64 platform.
- If unsure say Y.
+ bool "Sound Proc FS Support" if EXPERT
+ depends on PROC_FS
+ default y
+ help
+ Say 'N' to disable Sound proc FS, which may reduce code size about
+ 9KB on x86_64 platform.
+ If unsure say Y.
config SND_VERBOSE_PROCFS
bool "Verbose procfs contents"
@@ -142,8 +142,8 @@ config SND_VERBOSE_PROCFS
default y
help
Say Y here to include code for verbose procfs contents (provides
- useful information to developers when a problem occurs). On the
- other side, it makes the ALSA subsystem larger.
+ useful information to developers when a problem occurs). On the
+ other side, it makes the ALSA subsystem larger.
config SND_VERBOSE_PRINTK
bool "Verbose printk"
@@ -164,7 +164,7 @@ config SND_DEBUG_VERBOSE
depends on SND_DEBUG
help
Say Y here to enable extra-verbose debugging messages.
-
+
Let me repeat: it enables EXTRA-VERBOSE DEBUGGING messages.
So, say Y only if you are ready to be annoyed.
diff --git a/sound/core/init.c b/sound/core/init.c
index db99b7fad6ad..faa9f03c01ca 100644
--- a/sound/core/init.c
+++ b/sound/core/init.c
@@ -215,6 +215,7 @@ int snd_card_new(struct device *parent, int idx, const char *xid,
init_waitqueue_head(&card->power_sleep);
#endif
init_waitqueue_head(&card->remove_sleep);
+ card->sync_irq = -1;
device_initialize(&card->card_dev);
card->card_dev.parent = parent;
diff --git a/sound/core/memalloc.c b/sound/core/memalloc.c
index 6850d13aa98c..a83553fbedf0 100644
--- a/sound/core/memalloc.c
+++ b/sound/core/memalloc.c
@@ -10,6 +10,7 @@
#include <linux/mm.h>
#include <linux/dma-mapping.h>
#include <linux/genalloc.h>
+#include <linux/vmalloc.h>
#ifdef CONFIG_X86
#include <asm/set_memory.h>
#endif
@@ -99,6 +100,14 @@ static void snd_free_dev_iram(struct snd_dma_buffer *dmab)
*
*/
+static inline gfp_t snd_mem_get_gfp_flags(const struct device *dev,
+ gfp_t default_gfp)
+{
+ if (!dev)
+ return default_gfp;
+ else
+ return (__force gfp_t)(unsigned long)dev;
+}
/**
* snd_dma_alloc_pages - allocate the buffer area according to the given type
@@ -116,20 +125,25 @@ static void snd_free_dev_iram(struct snd_dma_buffer *dmab)
int snd_dma_alloc_pages(int type, struct device *device, size_t size,
struct snd_dma_buffer *dmab)
{
+ gfp_t gfp;
+
if (WARN_ON(!size))
return -ENXIO;
if (WARN_ON(!dmab))
return -ENXIO;
- if (WARN_ON(!device))
- return -EINVAL;
dmab->dev.type = type;
dmab->dev.dev = device;
dmab->bytes = 0;
switch (type) {
case SNDRV_DMA_TYPE_CONTINUOUS:
- dmab->area = alloc_pages_exact(size,
- (__force gfp_t)(unsigned long)device);
+ gfp = snd_mem_get_gfp_flags(device, GFP_KERNEL);
+ dmab->area = alloc_pages_exact(size, gfp);
+ dmab->addr = 0;
+ break;
+ case SNDRV_DMA_TYPE_VMALLOC:
+ gfp = snd_mem_get_gfp_flags(device, GFP_KERNEL | __GFP_HIGHMEM);
+ dmab->area = __vmalloc(size, gfp, PAGE_KERNEL);
dmab->addr = 0;
break;
#ifdef CONFIG_HAS_DMA
@@ -215,6 +229,9 @@ void snd_dma_free_pages(struct snd_dma_buffer *dmab)
case SNDRV_DMA_TYPE_CONTINUOUS:
free_pages_exact(dmab->area, dmab->bytes);
break;
+ case SNDRV_DMA_TYPE_VMALLOC:
+ vfree(dmab->area);
+ break;
#ifdef CONFIG_HAS_DMA
#ifdef CONFIG_GENERIC_ALLOCATOR
case SNDRV_DMA_TYPE_DEV_IRAM:
diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
index f57c610d7523..13db77771f0f 100644
--- a/sound/core/oss/pcm_oss.c
+++ b/sound/core/oss/pcm_oss.c
@@ -2717,6 +2717,10 @@ static long snd_pcm_oss_ioctl(struct file *file, unsigned int cmd, unsigned long
static long snd_pcm_oss_ioctl_compat(struct file *file, unsigned int cmd,
unsigned long arg)
{
+ /*
+ * Everything is compatbile except SNDCTL_DSP_MAPINBUF/SNDCTL_DSP_MAPOUTBUF,
+ * which are not implemented for the native case either
+ */
return snd_pcm_oss_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
}
#else
diff --git a/sound/core/pcm_dmaengine.c b/sound/core/pcm_dmaengine.c
index 89a05926ac73..5749a8a49784 100644
--- a/sound/core/pcm_dmaengine.c
+++ b/sound/core/pcm_dmaengine.c
@@ -369,4 +369,87 @@ int snd_dmaengine_pcm_close_release_chan(struct snd_pcm_substream *substream)
}
EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_close_release_chan);
+/**
+ * snd_dmaengine_pcm_refine_runtime_hwparams - Refine runtime hw params
+ * @substream: PCM substream
+ * @dma_data: DAI DMA data
+ * @hw: PCM hw params
+ * @chan: DMA channel to use for data transfers
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ *
+ * This function will query DMA capability, then refine the pcm hardware
+ * parameters.
+ */
+int snd_dmaengine_pcm_refine_runtime_hwparams(
+ struct snd_pcm_substream *substream,
+ struct snd_dmaengine_dai_dma_data *dma_data,
+ struct snd_pcm_hardware *hw,
+ struct dma_chan *chan)
+{
+ struct dma_slave_caps dma_caps;
+ u32 addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
+ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
+ snd_pcm_format_t i;
+ int ret = 0;
+
+ if (!hw || !chan || !dma_data)
+ return -EINVAL;
+
+ ret = dma_get_slave_caps(chan, &dma_caps);
+ if (ret == 0) {
+ if (dma_caps.cmd_pause && dma_caps.cmd_resume)
+ hw->info |= SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME;
+ if (dma_caps.residue_granularity <= DMA_RESIDUE_GRANULARITY_SEGMENT)
+ hw->info |= SNDRV_PCM_INFO_BATCH;
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ addr_widths = dma_caps.dst_addr_widths;
+ else
+ addr_widths = dma_caps.src_addr_widths;
+ }
+
+ /*
+ * If SND_DMAENGINE_PCM_DAI_FLAG_PACK is set keep
+ * hw.formats set to 0, meaning no restrictions are in place.
+ * In this case it's the responsibility of the DAI driver to
+ * provide the supported format information.
+ */
+ if (!(dma_data->flags & SND_DMAENGINE_PCM_DAI_FLAG_PACK))
+ /*
+ * Prepare formats mask for valid/allowed sample types. If the
+ * dma does not have support for the given physical word size,
+ * it needs to be masked out so user space can not use the
+ * format which produces corrupted audio.
+ * In case the dma driver does not implement the slave_caps the
+ * default assumption is that it supports 1, 2 and 4 bytes
+ * widths.
+ */
+ for (i = SNDRV_PCM_FORMAT_FIRST; i <= SNDRV_PCM_FORMAT_LAST; i++) {
+ int bits = snd_pcm_format_physical_width(i);
+
+ /*
+ * Enable only samples with DMA supported physical
+ * widths
+ */
+ switch (bits) {
+ case 8:
+ case 16:
+ case 24:
+ case 32:
+ case 64:
+ if (addr_widths & (1 << (bits / 8)))
+ hw->formats |= pcm_format_to_bits(i);
+ break;
+ default:
+ /* Unsupported types */
+ break;
+ }
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_refine_runtime_hwparams);
+
MODULE_LICENSE("GPL");
diff --git a/sound/core/pcm_local.h b/sound/core/pcm_local.h
index 1161ab2d6a5b..384efd002984 100644
--- a/sound/core/pcm_local.h
+++ b/sound/core/pcm_local.h
@@ -67,4 +67,11 @@ static inline void snd_pcm_timer_done(struct snd_pcm_substream *substream) {}
void __snd_pcm_xrun(struct snd_pcm_substream *substream);
void snd_pcm_group_init(struct snd_pcm_group *group);
+#ifdef CONFIG_SND_DMA_SGBUF
+struct page *snd_pcm_sgbuf_ops_page(struct snd_pcm_substream *substream,
+ unsigned long offset);
+#endif
+
+#define PCM_RUNTIME_CHECK(sub) snd_BUG_ON(!(sub) || !(sub)->runtime)
+
#endif /* __SOUND_CORE_PCM_LOCAL_H */
diff --git a/sound/core/pcm_memory.c b/sound/core/pcm_memory.c
index 7600dcdf5fd4..d4702cc1d376 100644
--- a/sound/core/pcm_memory.c
+++ b/sound/core/pcm_memory.c
@@ -15,6 +15,7 @@
#include <sound/pcm.h>
#include <sound/info.h>
#include <sound/initval.h>
+#include "pcm_local.h"
static int preallocate_dma = 1;
module_param(preallocate_dma, int, 0444);
@@ -193,9 +194,15 @@ static inline void preallocate_info_init(struct snd_pcm_substream *substream)
/*
* pre-allocate the buffer and create a proc file for the substream
*/
-static void snd_pcm_lib_preallocate_pages1(struct snd_pcm_substream *substream,
- size_t size, size_t max)
+static void preallocate_pages(struct snd_pcm_substream *substream,
+ int type, struct device *data,
+ size_t size, size_t max, bool managed)
{
+ if (snd_BUG_ON(substream->dma_buffer.dev.type))
+ return;
+
+ substream->dma_buffer.dev.type = type;
+ substream->dma_buffer.dev.dev = data;
if (size > 0 && preallocate_dma && substream->number < maximum_substreams)
preallocate_pcm_pages(substream, size);
@@ -203,9 +210,25 @@ static void snd_pcm_lib_preallocate_pages1(struct snd_pcm_substream *substream,
if (substream->dma_buffer.bytes > 0)
substream->buffer_bytes_max = substream->dma_buffer.bytes;
substream->dma_max = max;
- preallocate_info_init(substream);
+ if (max > 0)
+ preallocate_info_init(substream);
+ if (managed)
+ substream->managed_buffer_alloc = 1;
}
+static void preallocate_pages_for_all(struct snd_pcm *pcm, int type,
+ void *data, size_t size, size_t max,
+ bool managed)
+{
+ struct snd_pcm_substream *substream;
+ int stream;
+
+ for (stream = 0; stream < 2; stream++)
+ for (substream = pcm->streams[stream].substream; substream;
+ substream = substream->next)
+ preallocate_pages(substream, type, data, size, max,
+ managed);
+}
/**
* snd_pcm_lib_preallocate_pages - pre-allocation for the given DMA type
@@ -221,9 +244,7 @@ void snd_pcm_lib_preallocate_pages(struct snd_pcm_substream *substream,
int type, struct device *data,
size_t size, size_t max)
{
- substream->dma_buffer.dev.type = type;
- substream->dma_buffer.dev.dev = data;
- snd_pcm_lib_preallocate_pages1(substream, size, max);
+ preallocate_pages(substream, type, data, size, max, false);
}
EXPORT_SYMBOL(snd_pcm_lib_preallocate_pages);
@@ -242,17 +263,57 @@ void snd_pcm_lib_preallocate_pages_for_all(struct snd_pcm *pcm,
int type, void *data,
size_t size, size_t max)
{
- struct snd_pcm_substream *substream;
- int stream;
-
- for (stream = 0; stream < 2; stream++)
- for (substream = pcm->streams[stream].substream; substream; substream = substream->next)
- snd_pcm_lib_preallocate_pages(substream, type, data, size, max);
+ preallocate_pages_for_all(pcm, type, data, size, max, false);
}
EXPORT_SYMBOL(snd_pcm_lib_preallocate_pages_for_all);
-#ifdef CONFIG_SND_DMA_SGBUF
/**
+ * snd_pcm_set_managed_buffer - set up buffer management for a substream
+ * @substream: the pcm substream instance
+ * @type: DMA type (SNDRV_DMA_TYPE_*)
+ * @data: DMA type dependent data
+ * @size: the requested pre-allocation size in bytes
+ * @max: the max. allowed pre-allocation size
+ *
+ * Do pre-allocation for the given DMA buffer type, and set the managed
+ * buffer allocation mode to the given substream.
+ * In this mode, PCM core will allocate a buffer automatically before PCM
+ * hw_params ops call, and release the buffer after PCM hw_free ops call
+ * as well, so that the driver doesn't need to invoke the allocation and
+ * the release explicitly in its callback.
+ * When a buffer is actually allocated before the PCM hw_params call, it
+ * turns on the runtime buffer_changed flag for drivers changing their h/w
+ * parameters accordingly.
+ */
+void snd_pcm_set_managed_buffer(struct snd_pcm_substream *substream, int type,
+ struct device *data, size_t size, size_t max)
+{
+ preallocate_pages(substream, type, data, size, max, true);
+}
+EXPORT_SYMBOL(snd_pcm_set_managed_buffer);
+
+/**
+ * snd_pcm_set_managed_buffer_all - set up buffer management for all substreams
+ * for all substreams
+ * @pcm: the pcm instance
+ * @type: DMA type (SNDRV_DMA_TYPE_*)
+ * @data: DMA type dependent data
+ * @size: the requested pre-allocation size in bytes
+ * @max: the max. allowed pre-allocation size
+ *
+ * Do pre-allocation to all substreams of the given pcm for the specified DMA
+ * type and size, and set the managed_buffer_alloc flag to each substream.
+ */
+void snd_pcm_set_managed_buffer_all(struct snd_pcm *pcm, int type,
+ struct device *data,
+ size_t size, size_t max)
+{
+ preallocate_pages_for_all(pcm, type, data, size, max, true);
+}
+EXPORT_SYMBOL(snd_pcm_set_managed_buffer_all);
+
+#ifdef CONFIG_SND_DMA_SGBUF
+/*
* snd_pcm_sgbuf_ops_page - get the page struct at the given offset
* @substream: the pcm substream instance
* @offset: the buffer offset
@@ -270,7 +331,6 @@ struct page *snd_pcm_sgbuf_ops_page(struct snd_pcm_substream *substream, unsigne
return NULL;
return sgbuf->page_table[idx];
}
-EXPORT_SYMBOL(snd_pcm_sgbuf_ops_page);
#endif /* CONFIG_SND_DMA_SGBUF */
/**
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index 91c6ad58729f..1fe581167b7b 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -13,6 +13,7 @@
#include <linux/pm_qos.h>
#include <linux/io.h>
#include <linux/dma-mapping.h>
+#include <linux/vmalloc.h>
#include <sound/core.h>
#include <sound/control.h>
#include <sound/info.h>
@@ -177,6 +178,16 @@ void snd_pcm_stream_unlock_irqrestore(struct snd_pcm_substream *substream,
}
EXPORT_SYMBOL_GPL(snd_pcm_stream_unlock_irqrestore);
+/* Run PCM ioctl ops */
+static int snd_pcm_ops_ioctl(struct snd_pcm_substream *substream,
+ unsigned cmd, void *arg)
+{
+ if (substream->ops->ioctl)
+ return substream->ops->ioctl(substream, cmd, arg);
+ else
+ return snd_pcm_lib_ioctl(substream, cmd, arg);
+}
+
int snd_pcm_info(struct snd_pcm_substream *substream, struct snd_pcm_info *info)
{
struct snd_pcm *pcm = substream->pcm;
@@ -222,7 +233,8 @@ static bool hw_support_mmap(struct snd_pcm_substream *substream)
return false;
if (substream->ops->mmap ||
- substream->dma_buffer.dev.type != SNDRV_DMA_TYPE_DEV)
+ (substream->dma_buffer.dev.type != SNDRV_DMA_TYPE_DEV &&
+ substream->dma_buffer.dev.type != SNDRV_DMA_TYPE_DEV_UC))
return true;
return dma_can_mmap(substream->dma_buffer.dev.dev);
@@ -446,8 +458,9 @@ static int fixup_unreferenced_params(struct snd_pcm_substream *substream,
m = hw_param_mask_c(params, SNDRV_PCM_HW_PARAM_FORMAT);
i = hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_CHANNELS);
if (snd_mask_single(m) && snd_interval_single(i)) {
- err = substream->ops->ioctl(substream,
- SNDRV_PCM_IOCTL1_FIFO_SIZE, params);
+ err = snd_pcm_ops_ioctl(substream,
+ SNDRV_PCM_IOCTL1_FIFO_SIZE,
+ params);
if (err < 0)
return err;
}
@@ -555,6 +568,17 @@ static inline void snd_pcm_timer_notify(struct snd_pcm_substream *substream,
#endif
}
+static void snd_pcm_sync_stop(struct snd_pcm_substream *substream)
+{
+ if (substream->runtime->stop_operating) {
+ substream->runtime->stop_operating = false;
+ if (substream->ops->sync_stop)
+ substream->ops->sync_stop(substream);
+ else if (substream->pcm->card->sync_irq > 0)
+ synchronize_irq(substream->pcm->card->sync_irq);
+ }
+}
+
/**
* snd_pcm_hw_param_choose - choose a configuration defined by @params
* @pcm: PCM instance
@@ -647,6 +671,8 @@ static int snd_pcm_hw_params(struct snd_pcm_substream *substream,
if (atomic_read(&substream->mmap_count))
return -EBADFD;
+ snd_pcm_sync_stop(substream);
+
params->rmask = ~0U;
err = snd_pcm_hw_refine(substream, params);
if (err < 0)
@@ -660,6 +686,14 @@ static int snd_pcm_hw_params(struct snd_pcm_substream *substream,
if (err < 0)
goto _error;
+ if (substream->managed_buffer_alloc) {
+ err = snd_pcm_lib_malloc_pages(substream,
+ params_buffer_bytes(params));
+ if (err < 0)
+ goto _error;
+ runtime->buffer_changed = err > 0;
+ }
+
if (substream->ops->hw_params != NULL) {
err = substream->ops->hw_params(substream, params);
if (err < 0)
@@ -721,6 +755,8 @@ static int snd_pcm_hw_params(struct snd_pcm_substream *substream,
snd_pcm_set_state(substream, SNDRV_PCM_STATE_OPEN);
if (substream->ops->hw_free != NULL)
substream->ops->hw_free(substream);
+ if (substream->managed_buffer_alloc)
+ snd_pcm_lib_free_pages(substream);
return err;
}
@@ -765,8 +801,11 @@ static int snd_pcm_hw_free(struct snd_pcm_substream *substream)
snd_pcm_stream_unlock_irq(substream);
if (atomic_read(&substream->mmap_count))
return -EBADFD;
+ snd_pcm_sync_stop(substream);
if (substream->ops->hw_free)
result = substream->ops->hw_free(substream);
+ if (substream->managed_buffer_alloc)
+ snd_pcm_lib_free_pages(substream);
snd_pcm_set_state(substream, SNDRV_PCM_STATE_OPEN);
pm_qos_remove_request(&substream->latency_pm_qos_req);
return result;
@@ -957,7 +996,7 @@ static int snd_pcm_channel_info(struct snd_pcm_substream *substream,
return -EINVAL;
memset(info, 0, sizeof(*info));
info->channel = channel;
- return substream->ops->ioctl(substream, SNDRV_PCM_IOCTL1_CHANNEL_INFO, info);
+ return snd_pcm_ops_ioctl(substream, SNDRV_PCM_IOCTL1_CHANNEL_INFO, info);
}
static int snd_pcm_channel_info_user(struct snd_pcm_substream *substream,
@@ -1288,6 +1327,7 @@ static void snd_pcm_post_stop(struct snd_pcm_substream *substream, int state)
runtime->status->state = state;
snd_pcm_timer_notify(substream, SNDRV_TIMER_EVENT_MSTOP);
}
+ runtime->stop_operating = true;
wake_up(&runtime->sleep);
wake_up(&runtime->tsleep);
}
@@ -1564,6 +1604,7 @@ static void snd_pcm_post_resume(struct snd_pcm_substream *substream, int state)
snd_pcm_trigger_tstamp(substream);
runtime->status->state = runtime->status->suspended_state;
snd_pcm_timer_notify(substream, SNDRV_TIMER_EVENT_MRESUME);
+ snd_pcm_sync_stop(substream);
}
static const struct action_ops snd_pcm_action_resume = {
@@ -1633,7 +1674,7 @@ static int snd_pcm_pre_reset(struct snd_pcm_substream *substream, int state)
static int snd_pcm_do_reset(struct snd_pcm_substream *substream, int state)
{
struct snd_pcm_runtime *runtime = substream->runtime;
- int err = substream->ops->ioctl(substream, SNDRV_PCM_IOCTL1_RESET, NULL);
+ int err = snd_pcm_ops_ioctl(substream, SNDRV_PCM_IOCTL1_RESET, NULL);
if (err < 0)
return err;
runtime->hw_ptr_base = 0;
@@ -1684,6 +1725,7 @@ static int snd_pcm_pre_prepare(struct snd_pcm_substream *substream,
static int snd_pcm_do_prepare(struct snd_pcm_substream *substream, int state)
{
int err;
+ snd_pcm_sync_stop(substream);
err = substream->ops->prepare(substream);
if (err < 0)
return err;
@@ -3334,7 +3376,18 @@ static inline struct page *
snd_pcm_default_page_ops(struct snd_pcm_substream *substream, unsigned long ofs)
{
void *vaddr = substream->runtime->dma_area + ofs;
- return virt_to_page(vaddr);
+
+ switch (substream->dma_buffer.dev.type) {
+#ifdef CONFIG_SND_DMA_SGBUF
+ case SNDRV_DMA_TYPE_DEV_SG:
+ case SNDRV_DMA_TYPE_DEV_UC_SG:
+ return snd_pcm_sgbuf_ops_page(substream, ofs);
+#endif /* CONFIG_SND_DMA_SGBUF */
+ case SNDRV_DMA_TYPE_VMALLOC:
+ return vmalloc_to_page(vaddr);
+ default:
+ return virt_to_page(vaddr);
+ }
}
/*
@@ -3403,7 +3456,8 @@ int snd_pcm_lib_default_mmap(struct snd_pcm_substream *substream,
#endif /* CONFIG_GENERIC_ALLOCATOR */
#ifndef CONFIG_X86 /* for avoiding warnings arch/x86/mm/pat.c */
if (IS_ENABLED(CONFIG_HAS_DMA) && !substream->ops->page &&
- substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV)
+ (substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV ||
+ substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV_UC))
return dma_mmap_coherent(substream->dma_buffer.dev.dev,
area,
substream->runtime->dma_area,
diff --git a/sound/core/seq/seq_timer.c b/sound/core/seq/seq_timer.c
index 161f3170bd7e..63dc7bdb622d 100644
--- a/sound/core/seq/seq_timer.c
+++ b/sound/core/seq/seq_timer.c
@@ -272,7 +272,13 @@ int snd_seq_timer_open(struct snd_seq_queue *q)
return -EINVAL;
if (tmr->alsa_id.dev_class != SNDRV_TIMER_CLASS_SLAVE)
tmr->alsa_id.dev_sclass = SNDRV_TIMER_SCLASS_SEQUENCER;
- err = snd_timer_open(&t, str, &tmr->alsa_id, q->queue);
+ t = snd_timer_instance_new(str);
+ if (!t)
+ return -ENOMEM;
+ t->callback = snd_seq_timer_interrupt;
+ t->callback_data = q;
+ t->flags |= SNDRV_TIMER_IFLG_AUTO;
+ err = snd_timer_open(t, &tmr->alsa_id, q->queue);
if (err < 0 && tmr->alsa_id.dev_class != SNDRV_TIMER_CLASS_SLAVE) {
if (tmr->alsa_id.dev_class != SNDRV_TIMER_CLASS_GLOBAL ||
tmr->alsa_id.device != SNDRV_TIMER_GLOBAL_SYSTEM) {
@@ -282,16 +288,14 @@ int snd_seq_timer_open(struct snd_seq_queue *q)
tid.dev_sclass = SNDRV_TIMER_SCLASS_SEQUENCER;
tid.card = -1;
tid.device = SNDRV_TIMER_GLOBAL_SYSTEM;
- err = snd_timer_open(&t, str, &tid, q->queue);
+ err = snd_timer_open(t, &tid, q->queue);
}
}
if (err < 0) {
pr_err("ALSA: seq fatal error: cannot create timer (%i)\n", err);
+ snd_timer_instance_free(t);
return err;
}
- t->callback = snd_seq_timer_interrupt;
- t->callback_data = q;
- t->flags |= SNDRV_TIMER_IFLG_AUTO;
spin_lock_irq(&tmr->lock);
tmr->timeri = t;
spin_unlock_irq(&tmr->lock);
@@ -310,8 +314,10 @@ int snd_seq_timer_close(struct snd_seq_queue *q)
t = tmr->timeri;
tmr->timeri = NULL;
spin_unlock_irq(&tmr->lock);
- if (t)
+ if (t) {
snd_timer_close(t);
+ snd_timer_instance_free(t);
+ }
return 0;
}
diff --git a/sound/core/timer.c b/sound/core/timer.c
index 59ae21b0bb93..24fed5c78273 100644
--- a/sound/core/timer.c
+++ b/sound/core/timer.c
@@ -74,6 +74,9 @@ static LIST_HEAD(snd_timer_slave_list);
/* lock for slave active lists */
static DEFINE_SPINLOCK(slave_active_lock);
+#define MAX_SLAVE_INSTANCES 1000
+static int num_slaves;
+
static DEFINE_MUTEX(register_mutex);
static int snd_timer_free(struct snd_timer *timer);
@@ -85,12 +88,11 @@ static void snd_timer_reschedule(struct snd_timer * timer, unsigned long ticks_l
/*
* create a timer instance with the given owner string.
- * when timer is not NULL, increments the module counter
*/
-static struct snd_timer_instance *snd_timer_instance_new(char *owner,
- struct snd_timer *timer)
+struct snd_timer_instance *snd_timer_instance_new(const char *owner)
{
struct snd_timer_instance *timeri;
+
timeri = kzalloc(sizeof(*timeri), GFP_KERNEL);
if (timeri == NULL)
return NULL;
@@ -105,15 +107,20 @@ static struct snd_timer_instance *snd_timer_instance_new(char *owner,
INIT_LIST_HEAD(&timeri->slave_list_head);
INIT_LIST_HEAD(&timeri->slave_active_head);
- timeri->timer = timer;
- if (timer && !try_module_get(timer->module)) {
+ return timeri;
+}
+EXPORT_SYMBOL(snd_timer_instance_new);
+
+void snd_timer_instance_free(struct snd_timer_instance *timeri)
+{
+ if (timeri) {
+ if (timeri->private_free)
+ timeri->private_free(timeri);
kfree(timeri->owner);
kfree(timeri);
- return NULL;
}
-
- return timeri;
}
+EXPORT_SYMBOL(snd_timer_instance_free);
/*
* find a timer instance from the given timer id
@@ -160,6 +167,28 @@ static void snd_timer_request(struct snd_timer_id *tid)
#endif
+/* move the slave if it belongs to the master; return 1 if match */
+static int check_matching_master_slave(struct snd_timer_instance *master,
+ struct snd_timer_instance *slave)
+{
+ if (slave->slave_class != master->slave_class ||
+ slave->slave_id != master->slave_id)
+ return 0;
+ if (master->timer->num_instances >= master->timer->max_instances)
+ return -EBUSY;
+ list_move_tail(&slave->open_list, &master->slave_list_head);
+ master->timer->num_instances++;
+ spin_lock_irq(&slave_active_lock);
+ spin_lock(&master->timer->lock);
+ slave->master = master;
+ slave->timer = master->timer;
+ if (slave->flags & SNDRV_TIMER_IFLG_RUNNING)
+ list_add_tail(&slave->active_list, &master->slave_active_head);
+ spin_unlock(&master->timer->lock);
+ spin_unlock_irq(&slave_active_lock);
+ return 1;
+}
+
/*
* look for a master instance matching with the slave id of the given slave.
* when found, relink the open_link of the slave.
@@ -170,27 +199,18 @@ static int snd_timer_check_slave(struct snd_timer_instance *slave)
{
struct snd_timer *timer;
struct snd_timer_instance *master;
+ int err = 0;
/* FIXME: it's really dumb to look up all entries.. */
list_for_each_entry(timer, &snd_timer_list, device_list) {
list_for_each_entry(master, &timer->open_list_head, open_list) {
- if (slave->slave_class == master->slave_class &&
- slave->slave_id == master->slave_id) {
- if (master->timer->num_instances >=
- master->timer->max_instances)
- return -EBUSY;
- list_move_tail(&slave->open_list,
- &master->slave_list_head);
- master->timer->num_instances++;
- spin_lock_irq(&slave_active_lock);
- slave->master = master;
- slave->timer = master->timer;
- spin_unlock_irq(&slave_active_lock);
- return 0;
- }
+ err = check_matching_master_slave(master, slave);
+ if (err != 0) /* match found or error */
+ goto out;
}
}
- return 0;
+ out:
+ return err < 0 ? err : 0;
}
/*
@@ -202,43 +222,29 @@ static int snd_timer_check_slave(struct snd_timer_instance *slave)
static int snd_timer_check_master(struct snd_timer_instance *master)
{
struct snd_timer_instance *slave, *tmp;
+ int err = 0;
/* check all pending slaves */
list_for_each_entry_safe(slave, tmp, &snd_timer_slave_list, open_list) {
- if (slave->slave_class == master->slave_class &&
- slave->slave_id == master->slave_id) {
- if (master->timer->num_instances >=
- master->timer->max_instances)
- return -EBUSY;
- list_move_tail(&slave->open_list, &master->slave_list_head);
- master->timer->num_instances++;
- spin_lock_irq(&slave_active_lock);
- spin_lock(&master->timer->lock);
- slave->master = master;
- slave->timer = master->timer;
- if (slave->flags & SNDRV_TIMER_IFLG_RUNNING)
- list_add_tail(&slave->active_list,
- &master->slave_active_head);
- spin_unlock(&master->timer->lock);
- spin_unlock_irq(&slave_active_lock);
- }
+ err = check_matching_master_slave(master, slave);
+ if (err < 0)
+ break;
}
- return 0;
+ return err < 0 ? err : 0;
}
-static int snd_timer_close_locked(struct snd_timer_instance *timeri,
- struct device **card_devp_to_put);
+static void snd_timer_close_locked(struct snd_timer_instance *timeri,
+ struct device **card_devp_to_put);
/*
* open a timer instance
* when opening a master, the slave id must be here given.
*/
-int snd_timer_open(struct snd_timer_instance **ti,
- char *owner, struct snd_timer_id *tid,
+int snd_timer_open(struct snd_timer_instance *timeri,
+ struct snd_timer_id *tid,
unsigned int slave_id)
{
struct snd_timer *timer;
- struct snd_timer_instance *timeri = NULL;
struct device *card_dev_to_put = NULL;
int err;
@@ -252,21 +258,17 @@ int snd_timer_open(struct snd_timer_instance **ti,
err = -EINVAL;
goto unlock;
}
- timeri = snd_timer_instance_new(owner, NULL);
- if (!timeri) {
- err = -ENOMEM;
+ if (num_slaves >= MAX_SLAVE_INSTANCES) {
+ err = -EBUSY;
goto unlock;
}
timeri->slave_class = tid->dev_sclass;
timeri->slave_id = tid->device;
timeri->flags |= SNDRV_TIMER_IFLG_SLAVE;
list_add_tail(&timeri->open_list, &snd_timer_slave_list);
+ num_slaves++;
err = snd_timer_check_slave(timeri);
- if (err < 0) {
- snd_timer_close_locked(timeri, &card_dev_to_put);
- timeri = NULL;
- }
- goto unlock;
+ goto list_added;
}
/* open a master instance */
@@ -296,45 +298,40 @@ int snd_timer_open(struct snd_timer_instance **ti,
err = -EBUSY;
goto unlock;
}
- timeri = snd_timer_instance_new(owner, timer);
- if (!timeri) {
- err = -ENOMEM;
+ if (!try_module_get(timer->module)) {
+ err = -EBUSY;
goto unlock;
}
/* take a card refcount for safe disconnection */
- if (timer->card)
+ if (timer->card) {
get_device(&timer->card->card_dev);
- timeri->slave_class = tid->dev_sclass;
- timeri->slave_id = slave_id;
+ card_dev_to_put = &timer->card->card_dev;
+ }
if (list_empty(&timer->open_list_head) && timer->hw.open) {
err = timer->hw.open(timer);
if (err) {
- kfree(timeri->owner);
- kfree(timeri);
- timeri = NULL;
-
- if (timer->card)
- card_dev_to_put = &timer->card->card_dev;
module_put(timer->module);
goto unlock;
}
}
+ timeri->timer = timer;
+ timeri->slave_class = tid->dev_sclass;
+ timeri->slave_id = slave_id;
+
list_add_tail(&timeri->open_list, &timer->open_list_head);
timer->num_instances++;
err = snd_timer_check_master(timeri);
- if (err < 0) {
+list_added:
+ if (err < 0)
snd_timer_close_locked(timeri, &card_dev_to_put);
- timeri = NULL;
- }
unlock:
mutex_unlock(&register_mutex);
/* put_device() is called after unlock for avoiding deadlock */
- if (card_dev_to_put)
+ if (err < 0 && card_dev_to_put)
put_device(card_dev_to_put);
- *ti = timeri;
return err;
}
EXPORT_SYMBOL(snd_timer_open);
@@ -343,8 +340,8 @@ EXPORT_SYMBOL(snd_timer_open);
* close a timer instance
* call this with register_mutex down.
*/
-static int snd_timer_close_locked(struct snd_timer_instance *timeri,
- struct device **card_devp_to_put)
+static void snd_timer_close_locked(struct snd_timer_instance *timeri,
+ struct device **card_devp_to_put)
{
struct snd_timer *timer = timeri->timer;
struct snd_timer_instance *slave, *tmp;
@@ -355,7 +352,11 @@ static int snd_timer_close_locked(struct snd_timer_instance *timeri,
spin_unlock_irq(&timer->lock);
}
- list_del(&timeri->open_list);
+ if (!list_empty(&timeri->open_list)) {
+ list_del_init(&timeri->open_list);
+ if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE)
+ num_slaves--;
+ }
/* force to stop the timer */
snd_timer_stop(timeri);
@@ -374,6 +375,7 @@ static int snd_timer_close_locked(struct snd_timer_instance *timeri,
/* remove slave links */
spin_lock_irq(&slave_active_lock);
spin_lock(&timer->lock);
+ timeri->timer = NULL;
list_for_each_entry_safe(slave, tmp, &timeri->slave_list_head,
open_list) {
list_move_tail(&slave->open_list, &snd_timer_slave_list);
@@ -391,11 +393,6 @@ static int snd_timer_close_locked(struct snd_timer_instance *timeri,
timer = NULL;
}
- if (timeri->private_free)
- timeri->private_free(timeri);
- kfree(timeri->owner);
- kfree(timeri);
-
if (timer) {
if (list_empty(&timer->open_list_head) && timer->hw.close)
timer->hw.close(timer);
@@ -404,28 +401,24 @@ static int snd_timer_close_locked(struct snd_timer_instance *timeri,
*card_devp_to_put = &timer->card->card_dev;
module_put(timer->module);
}
-
- return 0;
}
/*
* close a timer instance
*/
-int snd_timer_close(struct snd_timer_instance *timeri)
+void snd_timer_close(struct snd_timer_instance *timeri)
{
struct device *card_dev_to_put = NULL;
- int err;
if (snd_BUG_ON(!timeri))
- return -ENXIO;
+ return;
mutex_lock(&register_mutex);
- err = snd_timer_close_locked(timeri, &card_dev_to_put);
+ snd_timer_close_locked(timeri, &card_dev_to_put);
mutex_unlock(&register_mutex);
/* put_device() is called after unlock for avoiding deadlock */
if (card_dev_to_put)
put_device(card_dev_to_put);
- return err;
}
EXPORT_SYMBOL(snd_timer_close);
@@ -1474,8 +1467,10 @@ static int snd_timer_user_release(struct inode *inode, struct file *file)
tu = file->private_data;
file->private_data = NULL;
mutex_lock(&tu->ioctl_lock);
- if (tu->timeri)
+ if (tu->timeri) {
snd_timer_close(tu->timeri);
+ snd_timer_instance_free(tu->timeri);
+ }
mutex_unlock(&tu->ioctl_lock);
kfree(tu->queue);
kfree(tu->tqueue);
@@ -1716,6 +1711,7 @@ static int snd_timer_user_tselect(struct file *file,
tu = file->private_data;
if (tu->timeri) {
snd_timer_close(tu->timeri);
+ snd_timer_instance_free(tu->timeri);
tu->timeri = NULL;
}
if (copy_from_user(&tselect, _tselect, sizeof(tselect))) {
@@ -1725,9 +1721,11 @@ static int snd_timer_user_tselect(struct file *file,
sprintf(str, "application %i", current->pid);
if (tselect.id.dev_class != SNDRV_TIMER_CLASS_SLAVE)
tselect.id.dev_sclass = SNDRV_TIMER_SCLASS_APPLICATION;
- err = snd_timer_open(&tu->timeri, str, &tselect.id, current->pid);
- if (err < 0)
+ tu->timeri = snd_timer_instance_new(str);
+ if (!tu->timeri) {
+ err = -ENOMEM;
goto __err;
+ }
tu->timeri->flags |= SNDRV_TIMER_IFLG_FAST;
tu->timeri->callback = tu->tread
@@ -1736,6 +1734,12 @@ static int snd_timer_user_tselect(struct file *file,
tu->timeri->callback_data = (void *)tu;
tu->timeri->disconnect = snd_timer_user_disconnect;
+ err = snd_timer_open(tu->timeri, &tselect.id, current->pid);
+ if (err < 0) {
+ snd_timer_instance_free(tu->timeri);
+ tu->timeri = NULL;
+ }
+
__err:
return err;
}
diff --git a/sound/drivers/Kconfig b/sound/drivers/Kconfig
index 09932cc98e9d..577c8e03ec4d 100644
--- a/sound/drivers/Kconfig
+++ b/sound/drivers/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config SND_MPU401_UART
- tristate
- select SND_RAWMIDI
+ tristate
+ select SND_RAWMIDI
config SND_OPL3_LIB
tristate
@@ -90,16 +90,17 @@ config SND_DUMMY
will be called snd-dummy.
config SND_ALOOP
- tristate "Generic loopback driver (PCM)"
- select SND_PCM
- help
- Say 'Y' or 'M' to include support for the PCM loopback device.
+ tristate "Generic loopback driver (PCM)"
+ select SND_PCM
+ select SND_TIMER
+ help
+ Say 'Y' or 'M' to include support for the PCM loopback device.
This module returns played samples back to the user space using
the standard ALSA PCM device. The devices are routed 0->1 and
- 1->0, where first number is the playback PCM device and second
+ 1->0, where first number is the playback PCM device and second
number is the capture device. Module creates two PCM devices and
configured number of substreams (see the pcm_substreams module
- parameter).
+ parameter).
The loopback device allows time sychronization with an external
timing source using the time shift universal control (+-20%
@@ -142,12 +143,12 @@ config SND_MTS64
select SND_RAWMIDI
help
The ESI Miditerminal 4140 is a 4 In 4 Out MIDI Interface with
- additional SMPTE Timecode capabilities for the parallel port.
+ additional SMPTE Timecode capabilities for the parallel port.
Say 'Y' to include support for this device.
To compile this driver as a module, chose 'M' here: the module
- will be called snd-mts64.
+ will be called snd-mts64.
config SND_SERIAL_U16550
tristate "UART16550 serial MIDI driver"
diff --git a/sound/drivers/aloop.c b/sound/drivers/aloop.c
index 9ccdad89c288..0ebfbe70db00 100644
--- a/sound/drivers/aloop.c
+++ b/sound/drivers/aloop.c
@@ -28,6 +28,7 @@
#include <sound/pcm_params.h>
#include <sound/info.h>
#include <sound/initval.h>
+#include <sound/timer.h>
MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>");
MODULE_DESCRIPTION("A loopback soundcard");
@@ -41,6 +42,7 @@ static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */
static bool enable[SNDRV_CARDS] = {1, [1 ... (SNDRV_CARDS - 1)] = 0};
static int pcm_substreams[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = 8};
static int pcm_notify[SNDRV_CARDS];
+static char *timer_source[SNDRV_CARDS];
module_param_array(index, int, NULL, 0444);
MODULE_PARM_DESC(index, "Index value for loopback soundcard.");
@@ -52,11 +54,48 @@ module_param_array(pcm_substreams, int, NULL, 0444);
MODULE_PARM_DESC(pcm_substreams, "PCM substreams # (1-8) for loopback driver.");
module_param_array(pcm_notify, int, NULL, 0444);
MODULE_PARM_DESC(pcm_notify, "Break capture when PCM format/rate/channels changes.");
+module_param_array(timer_source, charp, NULL, 0444);
+MODULE_PARM_DESC(timer_source, "Sound card name or number and device/subdevice number of timer to be used. Empty string for jiffies timer [default].");
#define NO_PITCH 100000
+#define CABLE_VALID_PLAYBACK BIT(SNDRV_PCM_STREAM_PLAYBACK)
+#define CABLE_VALID_CAPTURE BIT(SNDRV_PCM_STREAM_CAPTURE)
+#define CABLE_VALID_BOTH (CABLE_VALID_PLAYBACK | CABLE_VALID_CAPTURE)
+
+struct loopback_cable;
struct loopback_pcm;
+struct loopback_ops {
+ /* optional
+ * call in loopback->cable_lock
+ */
+ int (*open)(struct loopback_pcm *dpcm);
+ /* required
+ * call in cable->lock
+ */
+ int (*start)(struct loopback_pcm *dpcm);
+ /* required
+ * call in cable->lock
+ */
+ int (*stop)(struct loopback_pcm *dpcm);
+ /* optional */
+ int (*stop_sync)(struct loopback_pcm *dpcm);
+ /* optional */
+ int (*close_substream)(struct loopback_pcm *dpcm);
+ /* optional
+ * call in loopback->cable_lock
+ */
+ int (*close_cable)(struct loopback_pcm *dpcm);
+ /* optional
+ * call in cable->lock
+ */
+ unsigned int (*pos_update)(struct loopback_cable *cable);
+ /* optional */
+ void (*dpcm_info)(struct loopback_pcm *dpcm,
+ struct snd_info_buffer *buffer);
+};
+
struct loopback_cable {
spinlock_t lock;
struct loopback_pcm *streams[2];
@@ -65,6 +104,15 @@ struct loopback_cable {
unsigned int valid;
unsigned int running;
unsigned int pause;
+ /* timer specific */
+ struct loopback_ops *ops;
+ /* If sound timer is used */
+ struct {
+ int stream;
+ struct snd_timer_id id;
+ struct tasklet_struct event_tasklet;
+ struct snd_timer_instance *instance;
+ } snd_timer;
};
struct loopback_setup {
@@ -85,6 +133,7 @@ struct loopback {
struct loopback_cable *cables[MAX_PCM_SUBSTREAMS][2];
struct snd_pcm *pcm[2];
struct loopback_setup setup[MAX_PCM_SUBSTREAMS][2];
+ const char *timer_source;
};
struct loopback_pcm {
@@ -102,10 +151,13 @@ struct loopback_pcm {
/* flags */
unsigned int period_update_pending :1;
/* timer stuff */
- unsigned int irq_pos; /* fractional IRQ position */
- unsigned int period_size_frac;
+ unsigned int irq_pos; /* fractional IRQ position in jiffies
+ * ticks
+ */
+ unsigned int period_size_frac; /* period size in jiffies ticks */
unsigned int last_drift;
unsigned long last_jiffies;
+ /* If jiffies timer is used */
struct timer_list timer;
};
@@ -153,7 +205,7 @@ static inline unsigned int get_rate_shift(struct loopback_pcm *dpcm)
}
/* call in cable->lock */
-static void loopback_timer_start(struct loopback_pcm *dpcm)
+static int loopback_jiffies_timer_start(struct loopback_pcm *dpcm)
{
unsigned long tick;
unsigned int rate_shift = get_rate_shift(dpcm);
@@ -169,23 +221,102 @@ static void loopback_timer_start(struct loopback_pcm *dpcm)
tick = dpcm->period_size_frac - dpcm->irq_pos;
tick = (tick + dpcm->pcm_bps - 1) / dpcm->pcm_bps;
mod_timer(&dpcm->timer, jiffies + tick);
+
+ return 0;
}
/* call in cable->lock */
-static inline void loopback_timer_stop(struct loopback_pcm *dpcm)
+static int loopback_snd_timer_start(struct loopback_pcm *dpcm)
+{
+ struct loopback_cable *cable = dpcm->cable;
+ int err;
+
+ /* Loopback device has to use same period as timer card. Therefore
+ * wake up for each snd_pcm_period_elapsed() call of timer card.
+ */
+ err = snd_timer_start(cable->snd_timer.instance, 1);
+ if (err < 0) {
+ /* do not report error if trying to start but already
+ * running. For example called by opposite substream
+ * of the same cable
+ */
+ if (err == -EBUSY)
+ return 0;
+
+ pcm_err(dpcm->substream->pcm,
+ "snd_timer_start(%d,%d,%d) failed with %d",
+ cable->snd_timer.id.card,
+ cable->snd_timer.id.device,
+ cable->snd_timer.id.subdevice,
+ err);
+ }
+
+ return err;
+}
+
+/* call in cable->lock */
+static inline int loopback_jiffies_timer_stop(struct loopback_pcm *dpcm)
{
del_timer(&dpcm->timer);
dpcm->timer.expires = 0;
+
+ return 0;
}
-static inline void loopback_timer_stop_sync(struct loopback_pcm *dpcm)
+/* call in cable->lock */
+static int loopback_snd_timer_stop(struct loopback_pcm *dpcm)
+{
+ struct loopback_cable *cable = dpcm->cable;
+ int err;
+
+ /* only stop if both devices (playback and capture) are not running */
+ if (cable->running ^ cable->pause)
+ return 0;
+
+ err = snd_timer_stop(cable->snd_timer.instance);
+ if (err < 0) {
+ pcm_err(dpcm->substream->pcm,
+ "snd_timer_stop(%d,%d,%d) failed with %d",
+ cable->snd_timer.id.card,
+ cable->snd_timer.id.device,
+ cable->snd_timer.id.subdevice,
+ err);
+ }
+
+ return err;
+}
+
+static inline int loopback_jiffies_timer_stop_sync(struct loopback_pcm *dpcm)
{
del_timer_sync(&dpcm->timer);
+
+ return 0;
}
-#define CABLE_VALID_PLAYBACK (1 << SNDRV_PCM_STREAM_PLAYBACK)
-#define CABLE_VALID_CAPTURE (1 << SNDRV_PCM_STREAM_CAPTURE)
-#define CABLE_VALID_BOTH (CABLE_VALID_PLAYBACK|CABLE_VALID_CAPTURE)
+/* call in loopback->cable_lock */
+static int loopback_snd_timer_close_cable(struct loopback_pcm *dpcm)
+{
+ struct loopback_cable *cable = dpcm->cable;
+
+ /* snd_timer was not opened */
+ if (!cable->snd_timer.instance)
+ return 0;
+
+ /* will only be called from free_cable() when other stream was
+ * already closed. Other stream cannot be reopened as long as
+ * loopback->cable_lock is locked. Therefore no need to lock
+ * cable->lock;
+ */
+ snd_timer_close(cable->snd_timer.instance);
+
+ /* wait till drain tasklet has finished if requested */
+ tasklet_kill(&cable->snd_timer.event_tasklet);
+
+ snd_timer_instance_free(cable->snd_timer.instance);
+ memset(&cable->snd_timer, 0, sizeof(cable->snd_timer));
+
+ return 0;
+}
static int loopback_check_format(struct loopback_cable *cable, int stream)
{
@@ -249,7 +380,7 @@ static int loopback_trigger(struct snd_pcm_substream *substream, int cmd)
struct snd_pcm_runtime *runtime = substream->runtime;
struct loopback_pcm *dpcm = runtime->private_data;
struct loopback_cable *cable = dpcm->cable;
- int err, stream = 1 << substream->stream;
+ int err = 0, stream = 1 << substream->stream;
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
@@ -262,7 +393,7 @@ static int loopback_trigger(struct snd_pcm_substream *substream, int cmd)
spin_lock(&cable->lock);
cable->running |= stream;
cable->pause &= ~stream;
- loopback_timer_start(dpcm);
+ err = cable->ops->start(dpcm);
spin_unlock(&cable->lock);
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
loopback_active_notify(dpcm);
@@ -271,7 +402,7 @@ static int loopback_trigger(struct snd_pcm_substream *substream, int cmd)
spin_lock(&cable->lock);
cable->running &= ~stream;
cable->pause &= ~stream;
- loopback_timer_stop(dpcm);
+ err = cable->ops->stop(dpcm);
spin_unlock(&cable->lock);
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
loopback_active_notify(dpcm);
@@ -280,7 +411,7 @@ static int loopback_trigger(struct snd_pcm_substream *substream, int cmd)
case SNDRV_PCM_TRIGGER_SUSPEND:
spin_lock(&cable->lock);
cable->pause |= stream;
- loopback_timer_stop(dpcm);
+ err = cable->ops->stop(dpcm);
spin_unlock(&cable->lock);
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
loopback_active_notify(dpcm);
@@ -290,7 +421,7 @@ static int loopback_trigger(struct snd_pcm_substream *substream, int cmd)
spin_lock(&cable->lock);
dpcm->last_jiffies = jiffies;
cable->pause &= ~stream;
- loopback_timer_start(dpcm);
+ err = cable->ops->start(dpcm);
spin_unlock(&cable->lock);
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
loopback_active_notify(dpcm);
@@ -298,7 +429,7 @@ static int loopback_trigger(struct snd_pcm_substream *substream, int cmd)
default:
return -EINVAL;
}
- return 0;
+ return err;
}
static void params_change(struct snd_pcm_substream *substream)
@@ -312,6 +443,13 @@ static void params_change(struct snd_pcm_substream *substream)
cable->hw.rate_max = runtime->rate;
cable->hw.channels_min = runtime->channels;
cable->hw.channels_max = runtime->channels;
+
+ if (cable->snd_timer.instance) {
+ cable->hw.period_bytes_min =
+ frames_to_bytes(runtime, runtime->period_size);
+ cable->hw.period_bytes_max = cable->hw.period_bytes_min;
+ }
+
}
static int loopback_prepare(struct snd_pcm_substream *substream)
@@ -319,9 +457,13 @@ static int loopback_prepare(struct snd_pcm_substream *substream)
struct snd_pcm_runtime *runtime = substream->runtime;
struct loopback_pcm *dpcm = runtime->private_data;
struct loopback_cable *cable = dpcm->cable;
- int bps, salign;
+ int err, bps, salign;
- loopback_timer_stop_sync(dpcm);
+ if (cable->ops->stop_sync) {
+ err = cable->ops->stop_sync(dpcm);
+ if (err < 0)
+ return err;
+ }
salign = (snd_pcm_format_physical_width(runtime->format) *
runtime->channels) / 8;
@@ -457,7 +599,8 @@ static inline void bytepos_finish(struct loopback_pcm *dpcm,
}
/* call in cable->lock */
-static unsigned int loopback_pos_update(struct loopback_cable *cable)
+static unsigned int loopback_jiffies_timer_pos_update
+ (struct loopback_cable *cable)
{
struct loopback_pcm *dpcm_play =
cable->streams[SNDRV_PCM_STREAM_PLAYBACK];
@@ -510,14 +653,15 @@ static unsigned int loopback_pos_update(struct loopback_cable *cable)
return running;
}
-static void loopback_timer_function(struct timer_list *t)
+static void loopback_jiffies_timer_function(struct timer_list *t)
{
struct loopback_pcm *dpcm = from_timer(dpcm, t, timer);
unsigned long flags;
spin_lock_irqsave(&dpcm->cable->lock, flags);
- if (loopback_pos_update(dpcm->cable) & (1 << dpcm->substream->stream)) {
- loopback_timer_start(dpcm);
+ if (loopback_jiffies_timer_pos_update(dpcm->cable) &
+ (1 << dpcm->substream->stream)) {
+ loopback_jiffies_timer_start(dpcm);
if (dpcm->period_update_pending) {
dpcm->period_update_pending = 0;
spin_unlock_irqrestore(&dpcm->cable->lock, flags);
@@ -529,6 +673,193 @@ static void loopback_timer_function(struct timer_list *t)
spin_unlock_irqrestore(&dpcm->cable->lock, flags);
}
+/* call in cable->lock */
+static int loopback_snd_timer_check_resolution(struct snd_pcm_runtime *runtime,
+ unsigned long resolution)
+{
+ if (resolution != runtime->timer_resolution) {
+ struct loopback_pcm *dpcm = runtime->private_data;
+ struct loopback_cable *cable = dpcm->cable;
+ /* Worst case estimation of possible values for resolution
+ * resolution <= (512 * 1024) frames / 8kHz in nsec
+ * resolution <= 65.536.000.000 nsec
+ *
+ * period_size <= 65.536.000.000 nsec / 1000nsec/usec * 192kHz +
+ * 500.000
+ * period_size <= 12.582.912.000.000 <64bit
+ * / 1.000.000 usec/sec
+ */
+ snd_pcm_uframes_t period_size_usec =
+ resolution / 1000 * runtime->rate;
+ /* round to nearest sample rate */
+ snd_pcm_uframes_t period_size =
+ (period_size_usec + 500 * 1000) / (1000 * 1000);
+
+ pcm_err(dpcm->substream->pcm,
+ "Period size (%lu frames) of loopback device is not corresponding to timer resolution (%lu nsec = %lu frames) of card timer %d,%d,%d. Use period size of %lu frames for loopback device.",
+ runtime->period_size, resolution, period_size,
+ cable->snd_timer.id.card,
+ cable->snd_timer.id.device,
+ cable->snd_timer.id.subdevice,
+ period_size);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void loopback_snd_timer_period_elapsed(struct loopback_cable *cable,
+ int event,
+ unsigned long resolution)
+{
+ struct loopback_pcm *dpcm_play, *dpcm_capt;
+ struct snd_pcm_substream *substream_play, *substream_capt;
+ struct snd_pcm_runtime *valid_runtime;
+ unsigned int running, elapsed_bytes;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cable->lock, flags);
+ running = cable->running ^ cable->pause;
+ /* no need to do anything if no stream is running */
+ if (!running) {
+ spin_unlock_irqrestore(&cable->lock, flags);
+ return;
+ }
+
+ dpcm_play = cable->streams[SNDRV_PCM_STREAM_PLAYBACK];
+ dpcm_capt = cable->streams[SNDRV_PCM_STREAM_CAPTURE];
+ substream_play = (running & (1 << SNDRV_PCM_STREAM_PLAYBACK)) ?
+ dpcm_play->substream : NULL;
+ substream_capt = (running & (1 << SNDRV_PCM_STREAM_CAPTURE)) ?
+ dpcm_capt->substream : NULL;
+
+ if (event == SNDRV_TIMER_EVENT_MSTOP) {
+ if (!dpcm_play ||
+ dpcm_play->substream->runtime->status->state !=
+ SNDRV_PCM_STATE_DRAINING) {
+ spin_unlock_irqrestore(&cable->lock, flags);
+ return;
+ }
+ }
+
+ valid_runtime = (running & (1 << SNDRV_PCM_STREAM_PLAYBACK)) ?
+ dpcm_play->substream->runtime :
+ dpcm_capt->substream->runtime;
+
+ /* resolution is only valid for SNDRV_TIMER_EVENT_TICK events */
+ if (event == SNDRV_TIMER_EVENT_TICK) {
+ /* The hardware rules guarantee that playback and capture period
+ * are the same. Therefore only one device has to be checked
+ * here.
+ */
+ if (loopback_snd_timer_check_resolution(valid_runtime,
+ resolution) < 0) {
+ spin_unlock_irqrestore(&cable->lock, flags);
+ if (substream_play)
+ snd_pcm_stop_xrun(substream_play);
+ if (substream_capt)
+ snd_pcm_stop_xrun(substream_capt);
+ return;
+ }
+ }
+
+ elapsed_bytes = frames_to_bytes(valid_runtime,
+ valid_runtime->period_size);
+ /* The same timer interrupt is used for playback and capture device */
+ if ((running & (1 << SNDRV_PCM_STREAM_PLAYBACK)) &&
+ (running & (1 << SNDRV_PCM_STREAM_CAPTURE))) {
+ copy_play_buf(dpcm_play, dpcm_capt, elapsed_bytes);
+ bytepos_finish(dpcm_play, elapsed_bytes);
+ bytepos_finish(dpcm_capt, elapsed_bytes);
+ } else if (running & (1 << SNDRV_PCM_STREAM_PLAYBACK)) {
+ bytepos_finish(dpcm_play, elapsed_bytes);
+ } else if (running & (1 << SNDRV_PCM_STREAM_CAPTURE)) {
+ clear_capture_buf(dpcm_capt, elapsed_bytes);
+ bytepos_finish(dpcm_capt, elapsed_bytes);
+ }
+ spin_unlock_irqrestore(&cable->lock, flags);
+
+ if (substream_play)
+ snd_pcm_period_elapsed(substream_play);
+ if (substream_capt)
+ snd_pcm_period_elapsed(substream_capt);
+}
+
+static void loopback_snd_timer_function(struct snd_timer_instance *timeri,
+ unsigned long resolution,
+ unsigned long ticks)
+{
+ struct loopback_cable *cable = timeri->callback_data;
+
+ loopback_snd_timer_period_elapsed(cable, SNDRV_TIMER_EVENT_TICK,
+ resolution);
+}
+
+static void loopback_snd_timer_tasklet(unsigned long arg)
+{
+ struct snd_timer_instance *timeri = (struct snd_timer_instance *)arg;
+ struct loopback_cable *cable = timeri->callback_data;
+
+ loopback_snd_timer_period_elapsed(cable, SNDRV_TIMER_EVENT_MSTOP, 0);
+}
+
+static void loopback_snd_timer_event(struct snd_timer_instance *timeri,
+ int event,
+ struct timespec *tstamp,
+ unsigned long resolution)
+{
+ /* Do not lock cable->lock here because timer->lock is already hold.
+ * There are other functions which first lock cable->lock and than
+ * timer->lock e.g.
+ * loopback_trigger()
+ * spin_lock(&cable->lock)
+ * loopback_snd_timer_start()
+ * snd_timer_start()
+ * spin_lock(&timer->lock)
+ * Therefore when using the oposit order of locks here it could result
+ * in a deadlock.
+ */
+
+ if (event == SNDRV_TIMER_EVENT_MSTOP) {
+ struct loopback_cable *cable = timeri->callback_data;
+
+ /* sound card of the timer was stopped. Therefore there will not
+ * be any further timer callbacks. Due to this forward audio
+ * data from here if in draining state. When still in running
+ * state the streaming will be aborted by the usual timeout. It
+ * should not be aborted here because may be the timer sound
+ * card does only a recovery and the timer is back soon.
+ * This tasklet triggers loopback_snd_timer_tasklet()
+ */
+ tasklet_schedule(&cable->snd_timer.event_tasklet);
+ }
+}
+
+static void loopback_jiffies_timer_dpcm_info(struct loopback_pcm *dpcm,
+ struct snd_info_buffer *buffer)
+{
+ snd_iprintf(buffer, " update_pending:\t%u\n",
+ dpcm->period_update_pending);
+ snd_iprintf(buffer, " irq_pos:\t\t%u\n", dpcm->irq_pos);
+ snd_iprintf(buffer, " period_frac:\t%u\n", dpcm->period_size_frac);
+ snd_iprintf(buffer, " last_jiffies:\t%lu (%lu)\n",
+ dpcm->last_jiffies, jiffies);
+ snd_iprintf(buffer, " timer_expires:\t%lu\n", dpcm->timer.expires);
+}
+
+static void loopback_snd_timer_dpcm_info(struct loopback_pcm *dpcm,
+ struct snd_info_buffer *buffer)
+{
+ struct loopback_cable *cable = dpcm->cable;
+
+ snd_iprintf(buffer, " sound timer:\thw:%d,%d,%d\n",
+ cable->snd_timer.id.card,
+ cable->snd_timer.id.device,
+ cable->snd_timer.id.subdevice);
+ snd_iprintf(buffer, " timer open:\t\t%s\n",
+ (cable->snd_timer.stream == SNDRV_PCM_STREAM_CAPTURE) ?
+ "capture" : "playback");
+}
+
static snd_pcm_uframes_t loopback_pointer(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
@@ -536,7 +867,8 @@ static snd_pcm_uframes_t loopback_pointer(struct snd_pcm_substream *substream)
snd_pcm_uframes_t pos;
spin_lock(&dpcm->cable->lock);
- loopback_pos_update(dpcm->cable);
+ if (dpcm->cable->ops->pos_update)
+ dpcm->cable->ops->pos_update(dpcm->cable);
pos = dpcm->buf_pos;
spin_unlock(&dpcm->cable->lock);
return bytes_to_frames(runtime, pos);
@@ -576,8 +908,7 @@ static void loopback_runtime_free(struct snd_pcm_runtime *runtime)
static int loopback_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
- return snd_pcm_lib_alloc_vmalloc_buffer(substream,
- params_buffer_bytes(params));
+ return snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(params));
}
static int loopback_hw_free(struct snd_pcm_substream *substream)
@@ -589,7 +920,7 @@ static int loopback_hw_free(struct snd_pcm_substream *substream)
mutex_lock(&dpcm->loopback->cable_lock);
cable->valid &= ~(1 << substream->stream);
mutex_unlock(&dpcm->loopback->cable_lock);
- return snd_pcm_lib_free_vmalloc_buffer(substream);
+ return snd_pcm_lib_free_pages(substream);
}
static unsigned int get_cable_index(struct snd_pcm_substream *substream)
@@ -647,6 +978,23 @@ static int rule_channels(struct snd_pcm_hw_params *params,
return snd_interval_refine(hw_param_interval(params, rule->var), &t);
}
+static int rule_period_bytes(struct snd_pcm_hw_params *params,
+ struct snd_pcm_hw_rule *rule)
+{
+ struct loopback_pcm *dpcm = rule->private;
+ struct loopback_cable *cable = dpcm->cable;
+ struct snd_interval t;
+
+ mutex_lock(&dpcm->loopback->cable_lock);
+ t.min = cable->hw.period_bytes_min;
+ t.max = cable->hw.period_bytes_max;
+ mutex_unlock(&dpcm->loopback->cable_lock);
+ t.openmin = 0;
+ t.openmax = 0;
+ t.integer = 0;
+ return snd_interval_refine(hw_param_interval(params, rule->var), &t);
+}
+
static void free_cable(struct snd_pcm_substream *substream)
{
struct loopback *loopback = substream->private_data;
@@ -662,12 +1010,183 @@ static void free_cable(struct snd_pcm_substream *substream)
cable->streams[substream->stream] = NULL;
spin_unlock_irq(&cable->lock);
} else {
+ struct loopback_pcm *dpcm = substream->runtime->private_data;
+
+ if (cable->ops && cable->ops->close_cable && dpcm)
+ cable->ops->close_cable(dpcm);
/* free the cable */
loopback->cables[substream->number][dev] = NULL;
kfree(cable);
}
}
+static int loopback_jiffies_timer_open(struct loopback_pcm *dpcm)
+{
+ timer_setup(&dpcm->timer, loopback_jiffies_timer_function, 0);
+
+ return 0;
+}
+
+static struct loopback_ops loopback_jiffies_timer_ops = {
+ .open = loopback_jiffies_timer_open,
+ .start = loopback_jiffies_timer_start,
+ .stop = loopback_jiffies_timer_stop,
+ .stop_sync = loopback_jiffies_timer_stop_sync,
+ .close_substream = loopback_jiffies_timer_stop_sync,
+ .pos_update = loopback_jiffies_timer_pos_update,
+ .dpcm_info = loopback_jiffies_timer_dpcm_info,
+};
+
+static int loopback_parse_timer_id(const char *str,
+ struct snd_timer_id *tid)
+{
+ /* [<pref>:](<card name>|<card idx>)[{.,}<dev idx>[{.,}<subdev idx>]] */
+ const char * const sep_dev = ".,";
+ const char * const sep_pref = ":";
+ const char *name = str;
+ char *sep, save = '\0';
+ int card_idx = 0, dev = 0, subdev = 0;
+ int err;
+
+ sep = strpbrk(str, sep_pref);
+ if (sep)
+ name = sep + 1;
+ sep = strpbrk(name, sep_dev);
+ if (sep) {
+ save = *sep;
+ *sep = '\0';
+ }
+ err = kstrtoint(name, 0, &card_idx);
+ if (err == -EINVAL) {
+ /* Must be the name, not number */
+ for (card_idx = 0; card_idx < snd_ecards_limit; card_idx++) {
+ struct snd_card *card = snd_card_ref(card_idx);
+
+ if (card) {
+ if (!strcmp(card->id, name))
+ err = 0;
+ snd_card_unref(card);
+ }
+ if (!err)
+ break;
+ }
+ }
+ if (sep) {
+ *sep = save;
+ if (!err) {
+ char *sep2, save2 = '\0';
+
+ sep2 = strpbrk(sep + 1, sep_dev);
+ if (sep2) {
+ save2 = *sep2;
+ *sep2 = '\0';
+ }
+ err = kstrtoint(sep + 1, 0, &dev);
+ if (sep2) {
+ *sep2 = save2;
+ if (!err)
+ err = kstrtoint(sep2 + 1, 0, &subdev);
+ }
+ }
+ }
+ if (!err && tid) {
+ tid->card = card_idx;
+ tid->device = dev;
+ tid->subdevice = subdev;
+ }
+ return err;
+}
+
+/* call in loopback->cable_lock */
+static int loopback_snd_timer_open(struct loopback_pcm *dpcm)
+{
+ int err = 0;
+ struct snd_timer_id tid = {
+ .dev_class = SNDRV_TIMER_CLASS_PCM,
+ .dev_sclass = SNDRV_TIMER_SCLASS_APPLICATION,
+ };
+ struct snd_timer_instance *timeri;
+ struct loopback_cable *cable = dpcm->cable;
+
+ /* check if timer was already opened. It is only opened once
+ * per playback and capture subdevice (aka cable).
+ */
+ if (cable->snd_timer.instance)
+ goto exit;
+
+ err = loopback_parse_timer_id(dpcm->loopback->timer_source, &tid);
+ if (err < 0) {
+ pcm_err(dpcm->substream->pcm,
+ "Parsing timer source \'%s\' failed with %d",
+ dpcm->loopback->timer_source, err);
+ goto exit;
+ }
+
+ cable->snd_timer.stream = dpcm->substream->stream;
+ cable->snd_timer.id = tid;
+
+ timeri = snd_timer_instance_new(dpcm->loopback->card->id);
+ if (!timeri) {
+ err = -ENOMEM;
+ goto exit;
+ }
+ /* The callback has to be called from another tasklet. If
+ * SNDRV_TIMER_IFLG_FAST is specified it will be called from the
+ * snd_pcm_period_elapsed() call of the selected sound card.
+ * snd_pcm_period_elapsed() helds snd_pcm_stream_lock_irqsave().
+ * Due to our callback loopback_snd_timer_function() also calls
+ * snd_pcm_period_elapsed() which calls snd_pcm_stream_lock_irqsave().
+ * This would end up in a dead lock.
+ */
+ timeri->flags |= SNDRV_TIMER_IFLG_AUTO;
+ timeri->callback = loopback_snd_timer_function;
+ timeri->callback_data = (void *)cable;
+ timeri->ccallback = loopback_snd_timer_event;
+
+ /* initialise a tasklet used for draining */
+ tasklet_init(&cable->snd_timer.event_tasklet,
+ loopback_snd_timer_tasklet, (unsigned long)timeri);
+
+ /* The mutex loopback->cable_lock is kept locked.
+ * Therefore snd_timer_open() cannot be called a second time
+ * by the other device of the same cable.
+ * Therefore the following issue cannot happen:
+ * [proc1] Call loopback_timer_open() ->
+ * Unlock cable->lock for snd_timer_close/open() call
+ * [proc2] Call loopback_timer_open() -> snd_timer_open(),
+ * snd_timer_start()
+ * [proc1] Call snd_timer_open() and overwrite running timer
+ * instance
+ */
+ err = snd_timer_open(timeri, &cable->snd_timer.id, current->pid);
+ if (err < 0) {
+ pcm_err(dpcm->substream->pcm,
+ "snd_timer_open (%d,%d,%d) failed with %d",
+ cable->snd_timer.id.card,
+ cable->snd_timer.id.device,
+ cable->snd_timer.id.subdevice,
+ err);
+ snd_timer_instance_free(timeri);
+ goto exit;
+ }
+
+ cable->snd_timer.instance = timeri;
+
+exit:
+ return err;
+}
+
+/* stop_sync() is not required for sound timer because it does not need to be
+ * restarted in loopback_prepare() on Xrun recovery
+ */
+static struct loopback_ops loopback_snd_timer_ops = {
+ .open = loopback_snd_timer_open,
+ .start = loopback_snd_timer_start,
+ .stop = loopback_snd_timer_stop,
+ .close_cable = loopback_snd_timer_close_cable,
+ .dpcm_info = loopback_snd_timer_dpcm_info,
+};
+
static int loopback_open(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
@@ -685,7 +1204,6 @@ static int loopback_open(struct snd_pcm_substream *substream)
}
dpcm->loopback = loopback;
dpcm->substream = substream;
- timer_setup(&dpcm->timer, loopback_timer_function, 0);
cable = loopback->cables[substream->number][dev];
if (!cable) {
@@ -696,9 +1214,20 @@ static int loopback_open(struct snd_pcm_substream *substream)
}
spin_lock_init(&cable->lock);
cable->hw = loopback_pcm_hardware;
+ if (loopback->timer_source)
+ cable->ops = &loopback_snd_timer_ops;
+ else
+ cable->ops = &loopback_jiffies_timer_ops;
loopback->cables[substream->number][dev] = cable;
}
dpcm->cable = cable;
+ runtime->private_data = dpcm;
+
+ if (cable->ops->open) {
+ err = cable->ops->open(dpcm);
+ if (err < 0)
+ goto unlock;
+ }
snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
@@ -724,7 +1253,22 @@ static int loopback_open(struct snd_pcm_substream *substream)
if (err < 0)
goto unlock;
- runtime->private_data = dpcm;
+ /* In case of sound timer the period time of both devices of the same
+ * loop has to be the same.
+ * This rule only takes effect if a sound timer was chosen
+ */
+ if (cable->snd_timer.instance) {
+ err = snd_pcm_hw_rule_add(runtime, 0,
+ SNDRV_PCM_HW_PARAM_PERIOD_BYTES,
+ rule_period_bytes, dpcm,
+ SNDRV_PCM_HW_PARAM_PERIOD_BYTES, -1);
+ if (err < 0)
+ goto unlock;
+ }
+
+ /* loopback_runtime_free() has not to be called if kfree(dpcm) was
+ * already called here. Otherwise it will end up with a double free.
+ */
runtime->private_free = loopback_runtime_free;
if (get_notify(dpcm))
runtime->hw = loopback_pcm_hardware;
@@ -748,12 +1292,14 @@ static int loopback_close(struct snd_pcm_substream *substream)
{
struct loopback *loopback = substream->private_data;
struct loopback_pcm *dpcm = substream->runtime->private_data;
+ int err = 0;
- loopback_timer_stop_sync(dpcm);
+ if (dpcm->cable->ops->close_substream)
+ err = dpcm->cable->ops->close_substream(dpcm);
mutex_lock(&loopback->cable_lock);
free_cable(substream);
mutex_unlock(&loopback->cable_lock);
- return 0;
+ return err;
}
static const struct snd_pcm_ops loopback_pcm_ops = {
@@ -765,7 +1311,6 @@ static const struct snd_pcm_ops loopback_pcm_ops = {
.prepare = loopback_prepare,
.trigger = loopback_trigger,
.pointer = loopback_pointer,
- .page = snd_pcm_lib_get_vmalloc_page,
};
static int loopback_pcm_new(struct loopback *loopback,
@@ -780,6 +1325,8 @@ static int loopback_pcm_new(struct loopback *loopback,
return err;
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &loopback_pcm_ops);
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &loopback_pcm_ops);
+ snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_VMALLOC,
+ NULL, 0, 0);
pcm->private_data = loopback;
pcm->info_flags = 0;
@@ -1076,13 +1623,8 @@ static void print_dpcm_info(struct snd_info_buffer *buffer,
snd_iprintf(buffer, " bytes_per_sec:\t%u\n", dpcm->pcm_bps);
snd_iprintf(buffer, " sample_align:\t%u\n", dpcm->pcm_salign);
snd_iprintf(buffer, " rate_shift:\t\t%u\n", dpcm->pcm_rate_shift);
- snd_iprintf(buffer, " update_pending:\t%u\n",
- dpcm->period_update_pending);
- snd_iprintf(buffer, " irq_pos:\t\t%u\n", dpcm->irq_pos);
- snd_iprintf(buffer, " period_frac:\t%u\n", dpcm->period_size_frac);
- snd_iprintf(buffer, " last_jiffies:\t%lu (%lu)\n",
- dpcm->last_jiffies, jiffies);
- snd_iprintf(buffer, " timer_expires:\t%lu\n", dpcm->timer.expires);
+ if (dpcm->cable->ops->dpcm_info)
+ dpcm->cable->ops->dpcm_info(dpcm, buffer);
}
static void print_substream_info(struct snd_info_buffer *buffer,
@@ -1118,7 +1660,7 @@ static void print_cable_info(struct snd_info_entry *entry,
mutex_unlock(&loopback->cable_lock);
}
-static int loopback_proc_new(struct loopback *loopback, int cidx)
+static int loopback_cable_proc_new(struct loopback *loopback, int cidx)
{
char name[32];
@@ -1127,6 +1669,48 @@ static int loopback_proc_new(struct loopback *loopback, int cidx)
print_cable_info);
}
+static void loopback_set_timer_source(struct loopback *loopback,
+ const char *value)
+{
+ if (loopback->timer_source) {
+ devm_kfree(loopback->card->dev, loopback->timer_source);
+ loopback->timer_source = NULL;
+ }
+ if (value && *value)
+ loopback->timer_source = devm_kstrdup(loopback->card->dev,
+ value, GFP_KERNEL);
+}
+
+static void print_timer_source_info(struct snd_info_entry *entry,
+ struct snd_info_buffer *buffer)
+{
+ struct loopback *loopback = entry->private_data;
+
+ mutex_lock(&loopback->cable_lock);
+ snd_iprintf(buffer, "%s\n",
+ loopback->timer_source ? loopback->timer_source : "");
+ mutex_unlock(&loopback->cable_lock);
+}
+
+static void change_timer_source_info(struct snd_info_entry *entry,
+ struct snd_info_buffer *buffer)
+{
+ struct loopback *loopback = entry->private_data;
+ char line[64];
+
+ mutex_lock(&loopback->cable_lock);
+ if (!snd_info_get_line(buffer, line, sizeof(line)))
+ loopback_set_timer_source(loopback, strim(line));
+ mutex_unlock(&loopback->cable_lock);
+}
+
+static int loopback_timer_source_proc_new(struct loopback *loopback)
+{
+ return snd_card_rw_proc_new(loopback->card, "timer_source", loopback,
+ print_timer_source_info,
+ change_timer_source_info);
+}
+
static int loopback_probe(struct platform_device *devptr)
{
struct snd_card *card;
@@ -1146,6 +1730,8 @@ static int loopback_probe(struct platform_device *devptr)
pcm_substreams[dev] = MAX_PCM_SUBSTREAMS;
loopback->card = card;
+ loopback_set_timer_source(loopback, timer_source[dev]);
+
mutex_init(&loopback->cable_lock);
err = loopback_pcm_new(loopback, 0, pcm_substreams[dev]);
@@ -1157,8 +1743,9 @@ static int loopback_probe(struct platform_device *devptr)
err = loopback_mixer_new(loopback, pcm_notify[dev] ? 1 : 0);
if (err < 0)
goto __nodev;
- loopback_proc_new(loopback, 0);
- loopback_proc_new(loopback, 1);
+ loopback_cable_proc_new(loopback, 0);
+ loopback_cable_proc_new(loopback, 1);
+ loopback_timer_source_proc_new(loopback);
strcpy(card->driver, "Loopback");
strcpy(card->shortname, "Loopback");
sprintf(card->longname, "Loopback %i", dev + 1);
diff --git a/sound/drivers/dummy.c b/sound/drivers/dummy.c
index aee7c04d49e5..022a0db692e0 100644
--- a/sound/drivers/dummy.c
+++ b/sound/drivers/dummy.c
@@ -702,7 +702,7 @@ static int snd_card_dummy_pcm(struct snd_dummy *dummy, int device,
if (!fake_buffer) {
snd_pcm_lib_preallocate_pages_for_all(pcm,
SNDRV_DMA_TYPE_CONTINUOUS,
- snd_dma_continuous_data(GFP_KERNEL),
+ NULL,
0, 64*1024);
}
return 0;
diff --git a/sound/drivers/ml403-ac97cr.c b/sound/drivers/ml403-ac97cr.c
index a3c1c064d1b5..70a6d1832698 100644
--- a/sound/drivers/ml403-ac97cr.c
+++ b/sound/drivers/ml403-ac97cr.c
@@ -1242,7 +1242,7 @@ snd_ml403_ac97cr_pcm(struct snd_ml403_ac97cr *ml403_ac97cr, int device)
ml403_ac97cr->pcm = pcm;
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_CONTINUOUS,
- snd_dma_continuous_data(GFP_KERNEL),
+ NULL,
64 * 1024,
128 * 1024);
return 0;
diff --git a/sound/drivers/pcsp/pcsp_lib.c b/sound/drivers/pcsp/pcsp_lib.c
index 8f0f05bbc081..f91316bf01cb 100644
--- a/sound/drivers/pcsp/pcsp_lib.c
+++ b/sound/drivers/pcsp/pcsp_lib.c
@@ -352,8 +352,8 @@ int snd_pcsp_new_pcm(struct snd_pcsp *chip)
snd_pcm_lib_preallocate_pages_for_all(chip->pcm,
SNDRV_DMA_TYPE_CONTINUOUS,
- snd_dma_continuous_data
- (GFP_KERNEL), PCSP_BUFFER_SIZE,
+ NULL,
+ PCSP_BUFFER_SIZE,
PCSP_BUFFER_SIZE);
return 0;
diff --git a/sound/drivers/vx/vx_pcm.c b/sound/drivers/vx/vx_pcm.c
index 4705c50fbf4f..f17e0a76c73c 100644
--- a/sound/drivers/vx/vx_pcm.c
+++ b/sound/drivers/vx/vx_pcm.c
@@ -778,8 +778,7 @@ static snd_pcm_uframes_t vx_pcm_playback_pointer(struct snd_pcm_substream *subs)
static int vx_pcm_hw_params(struct snd_pcm_substream *subs,
struct snd_pcm_hw_params *hw_params)
{
- return snd_pcm_lib_alloc_vmalloc_32_buffer
- (subs, params_buffer_bytes(hw_params));
+ return snd_pcm_lib_malloc_pages(subs, params_buffer_bytes(hw_params));
}
/*
@@ -787,7 +786,7 @@ static int vx_pcm_hw_params(struct snd_pcm_substream *subs,
*/
static int vx_pcm_hw_free(struct snd_pcm_substream *subs)
{
- return snd_pcm_lib_free_vmalloc_buffer(subs);
+ return snd_pcm_lib_free_pages(subs);
}
/*
@@ -867,7 +866,6 @@ static const struct snd_pcm_ops vx_pcm_playback_ops = {
.prepare = vx_pcm_prepare,
.trigger = vx_pcm_trigger,
.pointer = vx_pcm_playback_pointer,
- .page = snd_pcm_lib_get_vmalloc_page,
};
@@ -1088,7 +1086,6 @@ static const struct snd_pcm_ops vx_pcm_capture_ops = {
.prepare = vx_pcm_prepare,
.trigger = vx_pcm_trigger,
.pointer = vx_pcm_capture_pointer,
- .page = snd_pcm_lib_get_vmalloc_page,
};
@@ -1233,6 +1230,9 @@ int snd_vx_pcm_new(struct vx_core *chip)
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &vx_pcm_playback_ops);
if (ins)
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &vx_pcm_capture_ops);
+ snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_VMALLOC,
+ snd_dma_continuous_data(GFP_KERNEL | GFP_DMA32),
+ 0, 0);
pcm->private_data = chip;
pcm->private_free = snd_vx_pcm_free;
diff --git a/sound/firewire/Kconfig b/sound/firewire/Kconfig
index b0a904cdb932..995c2cefc222 100644
--- a/sound/firewire/Kconfig
+++ b/sound/firewire/Kconfig
@@ -77,7 +77,7 @@ config SND_BEBOB
tristate "BridgeCo DM1000/DM1100/DM1500 with BeBoB firmware"
select SND_FIREWIRE_LIB
select SND_HWDEP
- help
+ help
Say Y here to include support for FireWire devices based
on BridgeCo DM1000/DM1100/DM1500 with BeBoB firmware:
* Edirol FA-66/FA-101
@@ -111,8 +111,8 @@ config SND_BEBOB
* M-Audio FireWire 1814/ProjectMix IO
* Digidesign Mbox 2 Pro
- To compile this driver as a module, choose M here: the module
- will be called snd-bebob.
+ To compile this driver as a module, choose M here: the module
+ will be called snd-bebob.
config SND_FIREWIRE_DIGI00X
tristate "Digidesign Digi 002/003 family support"
diff --git a/sound/firewire/amdtp-stream.c b/sound/firewire/amdtp-stream.c
index e50e28f77e74..37d38efb4c87 100644
--- a/sound/firewire/amdtp-stream.c
+++ b/sound/firewire/amdtp-stream.c
@@ -9,6 +9,7 @@
#include <linux/device.h>
#include <linux/err.h>
#include <linux/firewire.h>
+#include <linux/firewire-constants.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <sound/pcm.h>
@@ -52,10 +53,6 @@
#define CIP_FMT_AM 0x10
#define AMDTP_FDF_NO_DATA 0xff
-/* TODO: make these configurable */
-#define INTERRUPT_INTERVAL 16
-#define QUEUE_LENGTH 48
-
// For iso header, tstamp and 2 CIP header.
#define IR_CTX_HEADER_SIZE_CIP 16
// For iso header and tstamp.
@@ -180,6 +177,8 @@ int amdtp_stream_add_pcm_hw_constraints(struct amdtp_stream *s,
struct snd_pcm_runtime *runtime)
{
struct snd_pcm_hardware *hw = &runtime->hw;
+ unsigned int ctx_header_size;
+ unsigned int maximum_usec_per_period;
int err;
hw->info = SNDRV_PCM_INFO_BATCH |
@@ -200,19 +199,36 @@ int amdtp_stream_add_pcm_hw_constraints(struct amdtp_stream *s,
hw->period_bytes_max = hw->period_bytes_min * 2048;
hw->buffer_bytes_max = hw->period_bytes_max * hw->periods_min;
- /*
- * Currently firewire-lib processes 16 packets in one software
- * interrupt callback. This equals to 2msec but actually the
- * interval of the interrupts has a jitter.
- * Additionally, even if adding a constraint to fit period size to
- * 2msec, actual calculated frames per period doesn't equal to 2msec,
- * depending on sampling rate.
- * Anyway, the interval to call snd_pcm_period_elapsed() cannot 2msec.
- * Here let us use 5msec for safe period interrupt.
- */
+ // Linux driver for 1394 OHCI controller voluntarily flushes isoc
+ // context when total size of accumulated context header reaches
+ // PAGE_SIZE. This kicks tasklet for the isoc context and brings
+ // callback in the middle of scheduled interrupts.
+ // Although AMDTP streams in the same domain use the same events per
+ // IRQ, use the largest size of context header between IT/IR contexts.
+ // Here, use the value of context header in IR context is for both
+ // contexts.
+ if (!(s->flags & CIP_NO_HEADER))
+ ctx_header_size = IR_CTX_HEADER_SIZE_CIP;
+ else
+ ctx_header_size = IR_CTX_HEADER_SIZE_NO_CIP;
+ maximum_usec_per_period = USEC_PER_SEC * PAGE_SIZE /
+ CYCLES_PER_SECOND / ctx_header_size;
+
+ // In IEC 61883-6, one isoc packet can transfer events up to the value
+ // of syt interval. This comes from the interval of isoc cycle. As 1394
+ // OHCI controller can generate hardware IRQ per isoc packet, the
+ // interval is 125 usec.
+ // However, there are two ways of transmission in IEC 61883-6; blocking
+ // and non-blocking modes. In blocking mode, the sequence of isoc packet
+ // includes 'empty' or 'NODATA' packets which include no event. In
+ // non-blocking mode, the number of events per packet is variable up to
+ // the syt interval.
+ // Due to the above protocol design, the minimum PCM frames per
+ // interrupt should be double of the value of syt interval, thus it is
+ // 250 usec.
err = snd_pcm_hw_constraint_minmax(runtime,
SNDRV_PCM_HW_PARAM_PERIOD_TIME,
- 5000, UINT_MAX);
+ 250, maximum_usec_per_period);
if (err < 0)
goto end;
@@ -436,11 +452,12 @@ static void pcm_period_tasklet(unsigned long data)
snd_pcm_period_elapsed(pcm);
}
-static int queue_packet(struct amdtp_stream *s, struct fw_iso_packet *params)
+static int queue_packet(struct amdtp_stream *s, struct fw_iso_packet *params,
+ bool sched_irq)
{
int err;
- params->interrupt = IS_ALIGNED(s->packet_index + 1, INTERRUPT_INTERVAL);
+ params->interrupt = sched_irq;
params->tag = s->tag;
params->sy = 0;
@@ -451,18 +468,18 @@ static int queue_packet(struct amdtp_stream *s, struct fw_iso_packet *params)
goto end;
}
- if (++s->packet_index >= QUEUE_LENGTH)
+ if (++s->packet_index >= s->queue_size)
s->packet_index = 0;
end:
return err;
}
static inline int queue_out_packet(struct amdtp_stream *s,
- struct fw_iso_packet *params)
+ struct fw_iso_packet *params, bool sched_irq)
{
params->skip =
!!(params->header_length == 0 && params->payload_length == 0);
- return queue_packet(s, params);
+ return queue_packet(s, params, sched_irq);
}
static inline int queue_in_packet(struct amdtp_stream *s,
@@ -472,7 +489,7 @@ static inline int queue_in_packet(struct amdtp_stream *s,
params->header_length = s->ctx_data.tx.ctx_header_size;
params->payload_length = s->ctx_data.tx.max_ctx_payload_length;
params->skip = false;
- return queue_packet(s, params);
+ return queue_packet(s, params, false);
}
static void generate_cip_header(struct amdtp_stream *s, __be32 cip_header[2],
@@ -669,13 +686,14 @@ static inline u32 increment_cycle_count(u32 cycle, unsigned int addend)
}
// Align to actual cycle count for the packet which is going to be scheduled.
-// This module queued the same number of isochronous cycle as QUEUE_LENGTH to
-// skip isochronous cycle, therefore it's OK to just increment the cycle by
-// QUEUE_LENGTH for scheduled cycle.
-static inline u32 compute_it_cycle(const __be32 ctx_header_tstamp)
+// This module queued the same number of isochronous cycle as the size of queue
+// to kip isochronous cycle, therefore it's OK to just increment the cycle by
+// the size of queue for scheduled cycle.
+static inline u32 compute_it_cycle(const __be32 ctx_header_tstamp,
+ unsigned int queue_size)
{
u32 cycle = compute_cycle_count(ctx_header_tstamp);
- return increment_cycle_count(cycle, QUEUE_LENGTH);
+ return increment_cycle_count(cycle, queue_size);
}
static int generate_device_pkt_descs(struct amdtp_stream *s,
@@ -689,7 +707,7 @@ static int generate_device_pkt_descs(struct amdtp_stream *s,
for (i = 0; i < packets; ++i) {
struct pkt_desc *desc = descs + i;
- unsigned int index = (s->packet_index + i) % QUEUE_LENGTH;
+ unsigned int index = (s->packet_index + i) % s->queue_size;
unsigned int cycle;
unsigned int payload_length;
unsigned int data_blocks;
@@ -730,9 +748,9 @@ static void generate_ideal_pkt_descs(struct amdtp_stream *s,
for (i = 0; i < packets; ++i) {
struct pkt_desc *desc = descs + i;
- unsigned int index = (s->packet_index + i) % QUEUE_LENGTH;
+ unsigned int index = (s->packet_index + i) % s->queue_size;
- desc->cycle = compute_it_cycle(*ctx_header);
+ desc->cycle = compute_it_cycle(*ctx_header, s->queue_size);
desc->syt = calculate_syt(s, desc->cycle);
desc->data_blocks = calculate_data_blocks(s, desc->syt);
@@ -773,22 +791,40 @@ static void process_ctx_payloads(struct amdtp_stream *s,
update_pcm_pointers(s, pcm, pcm_frames);
}
+static void amdtp_stream_master_callback(struct fw_iso_context *context,
+ u32 tstamp, size_t header_length,
+ void *header, void *private_data);
+
+static void amdtp_stream_master_first_callback(struct fw_iso_context *context,
+ u32 tstamp, size_t header_length,
+ void *header, void *private_data);
+
static void out_stream_callback(struct fw_iso_context *context, u32 tstamp,
size_t header_length, void *header,
void *private_data)
{
struct amdtp_stream *s = private_data;
const __be32 *ctx_header = header;
- unsigned int packets = header_length / sizeof(*ctx_header);
+ unsigned int events_per_period = s->ctx_data.rx.events_per_period;
+ unsigned int event_count = s->ctx_data.rx.event_count;
+ unsigned int packets;
+ bool is_irq_target;
int i;
if (s->packet_index < 0)
return;
+ // Calculate the number of packets in buffer and check XRUN.
+ packets = header_length / sizeof(*ctx_header);
+
generate_ideal_pkt_descs(s, s->pkt_descs, ctx_header, packets);
process_ctx_payloads(s, s->pkt_descs, packets);
+ is_irq_target =
+ !!(context->callback.sc == amdtp_stream_master_callback ||
+ context->callback.sc == amdtp_stream_master_first_callback);
+
for (i = 0; i < packets; ++i) {
const struct pkt_desc *desc = s->pkt_descs + i;
unsigned int syt;
@@ -796,6 +832,7 @@ static void out_stream_callback(struct fw_iso_context *context, u32 tstamp,
struct fw_iso_packet params;
__be32 header[IT_PKT_HEADER_SIZE_CIP / sizeof(__be32)];
} template = { {0}, {0} };
+ bool sched_irq = false;
if (s->ctx_data.rx.syt_override < 0)
syt = desc->syt;
@@ -806,13 +843,21 @@ static void out_stream_callback(struct fw_iso_context *context, u32 tstamp,
desc->data_blocks, desc->data_block_counter,
syt, i);
- if (queue_out_packet(s, &template.params) < 0) {
+ if (is_irq_target) {
+ event_count += desc->data_blocks;
+ if (event_count >= events_per_period) {
+ event_count -= events_per_period;
+ sched_irq = true;
+ }
+ }
+
+ if (queue_out_packet(s, &template.params, sched_irq) < 0) {
cancel_stream(s);
return;
}
}
- fw_iso_context_queue_flush(s->context);
+ s->ctx_data.rx.event_count = event_count;
}
static void in_stream_callback(struct fw_iso_context *context, u32 tstamp,
@@ -820,15 +865,15 @@ static void in_stream_callback(struct fw_iso_context *context, u32 tstamp,
void *private_data)
{
struct amdtp_stream *s = private_data;
- unsigned int packets;
__be32 *ctx_header = header;
+ unsigned int packets;
int i;
int err;
if (s->packet_index < 0)
return;
- // The number of packets in buffer.
+ // Calculate the number of packets in buffer and check XRUN.
packets = header_length / s->ctx_data.tx.ctx_header_size;
err = generate_device_pkt_descs(s, s->pkt_descs, ctx_header, packets);
@@ -849,11 +894,40 @@ static void in_stream_callback(struct fw_iso_context *context, u32 tstamp,
return;
}
}
+}
+
+static void amdtp_stream_master_callback(struct fw_iso_context *context,
+ u32 tstamp, size_t header_length,
+ void *header, void *private_data)
+{
+ struct amdtp_domain *d = private_data;
+ struct amdtp_stream *irq_target = d->irq_target;
+ struct amdtp_stream *s;
+
+ out_stream_callback(context, tstamp, header_length, header, irq_target);
+ if (amdtp_streaming_error(irq_target))
+ goto error;
+
+ list_for_each_entry(s, &d->streams, list) {
+ if (s != irq_target && amdtp_stream_running(s)) {
+ fw_iso_context_flush_completions(s->context);
+ if (amdtp_streaming_error(s))
+ goto error;
+ }
+ }
- fw_iso_context_queue_flush(s->context);
+ return;
+error:
+ if (amdtp_stream_running(irq_target))
+ cancel_stream(irq_target);
+
+ list_for_each_entry(s, &d->streams, list) {
+ if (amdtp_stream_running(s))
+ cancel_stream(s);
+ }
}
-/* this is executed one time */
+// this is executed one time.
static void amdtp_stream_first_callback(struct fw_iso_context *context,
u32 tstamp, size_t header_length,
void *header, void *private_data)
@@ -874,7 +948,7 @@ static void amdtp_stream_first_callback(struct fw_iso_context *context,
context->callback.sc = in_stream_callback;
} else {
- cycle = compute_it_cycle(*ctx_header);
+ cycle = compute_it_cycle(*ctx_header, s->queue_size);
context->callback.sc = out_stream_callback;
}
@@ -884,17 +958,42 @@ static void amdtp_stream_first_callback(struct fw_iso_context *context,
context->callback.sc(context, tstamp, header_length, header, s);
}
+static void amdtp_stream_master_first_callback(struct fw_iso_context *context,
+ u32 tstamp, size_t header_length,
+ void *header, void *private_data)
+{
+ struct amdtp_domain *d = private_data;
+ struct amdtp_stream *s = d->irq_target;
+ const __be32 *ctx_header = header;
+
+ s->callbacked = true;
+ wake_up(&s->callback_wait);
+
+ s->start_cycle = compute_it_cycle(*ctx_header, s->queue_size);
+
+ context->callback.sc = amdtp_stream_master_callback;
+
+ context->callback.sc(context, tstamp, header_length, header, d);
+}
+
/**
* amdtp_stream_start - start transferring packets
* @s: the AMDTP stream to start
* @channel: the isochronous channel on the bus
* @speed: firewire speed code
+ * @d: the AMDTP domain to which the AMDTP stream belongs
+ * @is_irq_target: whether isoc context for the AMDTP stream is used to generate
+ * hardware IRQ.
+ * @start_cycle: the isochronous cycle to start the context. Start immediately
+ * if negative value is given.
*
* The stream cannot be started until it has been configured with
* amdtp_stream_set_parameters() and it must be started before any PCM or MIDI
* device can be started.
*/
-static int amdtp_stream_start(struct amdtp_stream *s, int channel, int speed)
+static int amdtp_stream_start(struct amdtp_stream *s, int channel, int speed,
+ struct amdtp_domain *d, bool is_irq_target,
+ int start_cycle)
{
static const struct {
unsigned int data_block;
@@ -908,10 +1007,15 @@ static int amdtp_stream_start(struct amdtp_stream *s, int channel, int speed)
[CIP_SFC_88200] = { 0, 67 },
[CIP_SFC_176400] = { 0, 67 },
};
+ unsigned int events_per_buffer = d->events_per_buffer;
+ unsigned int events_per_period = d->events_per_period;
+ unsigned int idle_irq_interval;
unsigned int ctx_header_size;
unsigned int max_ctx_payload_size;
enum dma_data_direction dir;
int type, tag, err;
+ fw_iso_callback_t ctx_cb;
+ void *ctx_data;
mutex_lock(&s->mutex);
@@ -922,6 +1026,12 @@ static int amdtp_stream_start(struct amdtp_stream *s, int channel, int speed)
}
if (s->direction == AMDTP_IN_STREAM) {
+ // NOTE: IT context should be used for constant IRQ.
+ if (is_irq_target) {
+ err = -EINVAL;
+ goto err_unlock;
+ }
+
s->data_block_counter = UINT_MAX;
} else {
entry = &initial_state[s->sfc];
@@ -953,14 +1063,37 @@ static int amdtp_stream_start(struct amdtp_stream *s, int channel, int speed)
max_ctx_payload_size -= IT_PKT_HEADER_SIZE_CIP;
}
- err = iso_packets_buffer_init(&s->buffer, s->unit, QUEUE_LENGTH,
+ // This is a case that AMDTP streams in domain run just for MIDI
+ // substream. Use the number of events equivalent to 10 msec as
+ // interval of hardware IRQ.
+ if (events_per_period == 0)
+ events_per_period = amdtp_rate_table[s->sfc] / 100;
+ if (events_per_buffer == 0)
+ events_per_buffer = events_per_period * 3;
+
+ idle_irq_interval = DIV_ROUND_UP(CYCLES_PER_SECOND * events_per_period,
+ amdtp_rate_table[s->sfc]);
+ s->queue_size = DIV_ROUND_UP(CYCLES_PER_SECOND * events_per_buffer,
+ amdtp_rate_table[s->sfc]);
+
+ err = iso_packets_buffer_init(&s->buffer, s->unit, s->queue_size,
max_ctx_payload_size, dir);
if (err < 0)
goto err_unlock;
+ if (is_irq_target) {
+ s->ctx_data.rx.events_per_period = events_per_period;
+ s->ctx_data.rx.event_count = 0;
+ ctx_cb = amdtp_stream_master_first_callback;
+ ctx_data = d;
+ } else {
+ ctx_cb = amdtp_stream_first_callback;
+ ctx_data = s;
+ }
+
s->context = fw_iso_context_create(fw_parent_device(s->unit)->card,
type, channel, speed, ctx_header_size,
- amdtp_stream_first_callback, s);
+ ctx_cb, ctx_data);
if (IS_ERR(s->context)) {
err = PTR_ERR(s->context);
if (err == -EBUSY)
@@ -981,7 +1114,7 @@ static int amdtp_stream_start(struct amdtp_stream *s, int channel, int speed)
else
s->tag = TAG_CIP;
- s->pkt_descs = kcalloc(INTERRUPT_INTERVAL, sizeof(*s->pkt_descs),
+ s->pkt_descs = kcalloc(s->queue_size, sizeof(*s->pkt_descs),
GFP_KERNEL);
if (!s->pkt_descs) {
err = -ENOMEM;
@@ -991,12 +1124,21 @@ static int amdtp_stream_start(struct amdtp_stream *s, int channel, int speed)
s->packet_index = 0;
do {
struct fw_iso_packet params;
+
if (s->direction == AMDTP_IN_STREAM) {
err = queue_in_packet(s, &params);
} else {
+ bool sched_irq = false;
+
params.header_length = 0;
params.payload_length = 0;
- err = queue_out_packet(s, &params);
+
+ if (is_irq_target) {
+ sched_irq = !((s->packet_index + 1) %
+ idle_irq_interval);
+ }
+
+ err = queue_out_packet(s, &params, sched_irq);
}
if (err < 0)
goto err_pkt_descs;
@@ -1008,7 +1150,7 @@ static int amdtp_stream_start(struct amdtp_stream *s, int channel, int speed)
tag |= FW_ISO_CONTEXT_MATCH_TAG0;
s->callbacked = false;
- err = fw_iso_context_start(s->context, -1, 0, tag);
+ err = fw_iso_context_start(s->context, start_cycle, 0, tag);
if (err < 0)
goto err_pkt_descs;
@@ -1029,54 +1171,69 @@ err_unlock:
}
/**
- * amdtp_stream_pcm_pointer - get the PCM buffer position
+ * amdtp_domain_stream_pcm_pointer - get the PCM buffer position
+ * @d: the AMDTP domain.
* @s: the AMDTP stream that transports the PCM data
*
* Returns the current buffer position, in frames.
*/
-unsigned long amdtp_stream_pcm_pointer(struct amdtp_stream *s)
+unsigned long amdtp_domain_stream_pcm_pointer(struct amdtp_domain *d,
+ struct amdtp_stream *s)
{
- /*
- * This function is called in software IRQ context of period_tasklet or
- * process context.
- *
- * When the software IRQ context was scheduled by software IRQ context
- * of IR/IT contexts, queued packets were already handled. Therefore,
- * no need to flush the queue in buffer anymore.
- *
- * When the process context reach here, some packets will be already
- * queued in the buffer. These packets should be handled immediately
- * to keep better granularity of PCM pointer.
- *
- * Later, the process context will sometimes schedules software IRQ
- * context of the period_tasklet. Then, no need to flush the queue by
- * the same reason as described for IR/IT contexts.
- */
- if (!in_interrupt() && amdtp_stream_running(s))
- fw_iso_context_flush_completions(s->context);
+ struct amdtp_stream *irq_target = d->irq_target;
+
+ if (irq_target && amdtp_stream_running(irq_target)) {
+ // This function is called in software IRQ context of
+ // period_tasklet or process context.
+ //
+ // When the software IRQ context was scheduled by software IRQ
+ // context of IT contexts, queued packets were already handled.
+ // Therefore, no need to flush the queue in buffer furthermore.
+ //
+ // When the process context reach here, some packets will be
+ // already queued in the buffer. These packets should be handled
+ // immediately to keep better granularity of PCM pointer.
+ //
+ // Later, the process context will sometimes schedules software
+ // IRQ context of the period_tasklet. Then, no need to flush the
+ // queue by the same reason as described in the above
+ if (!in_interrupt()) {
+ // Queued packet should be processed without any kernel
+ // preemption to keep latency against bus cycle.
+ preempt_disable();
+ fw_iso_context_flush_completions(irq_target->context);
+ preempt_enable();
+ }
+ }
return READ_ONCE(s->pcm_buffer_pointer);
}
-EXPORT_SYMBOL(amdtp_stream_pcm_pointer);
+EXPORT_SYMBOL_GPL(amdtp_domain_stream_pcm_pointer);
/**
- * amdtp_stream_pcm_ack - acknowledge queued PCM frames
+ * amdtp_domain_stream_pcm_ack - acknowledge queued PCM frames
+ * @d: the AMDTP domain.
* @s: the AMDTP stream that transfers the PCM frames
*
* Returns zero always.
*/
-int amdtp_stream_pcm_ack(struct amdtp_stream *s)
+int amdtp_domain_stream_pcm_ack(struct amdtp_domain *d, struct amdtp_stream *s)
{
- /*
- * Process isochronous packets for recent isochronous cycle to handle
- * queued PCM frames.
- */
- if (amdtp_stream_running(s))
- fw_iso_context_flush_completions(s->context);
+ struct amdtp_stream *irq_target = d->irq_target;
+
+ // Process isochronous packets for recent isochronous cycle to handle
+ // queued PCM frames.
+ if (irq_target && amdtp_stream_running(irq_target)) {
+ // Queued packet should be processed without any kernel
+ // preemption to keep latency against bus cycle.
+ preempt_disable();
+ fw_iso_context_flush_completions(irq_target->context);
+ preempt_enable();
+ }
return 0;
}
-EXPORT_SYMBOL(amdtp_stream_pcm_ack);
+EXPORT_SYMBOL_GPL(amdtp_domain_stream_pcm_ack);
/**
* amdtp_stream_update - update the stream after a bus reset
@@ -1143,6 +1300,8 @@ int amdtp_domain_init(struct amdtp_domain *d)
{
INIT_LIST_HEAD(&d->streams);
+ d->events_per_period = 0;
+
return 0;
}
EXPORT_SYMBOL_GPL(amdtp_domain_init);
@@ -1184,26 +1343,105 @@ int amdtp_domain_add_stream(struct amdtp_domain *d, struct amdtp_stream *s,
}
EXPORT_SYMBOL_GPL(amdtp_domain_add_stream);
+static int get_current_cycle_time(struct fw_card *fw_card, int *cur_cycle)
+{
+ int generation;
+ int rcode;
+ __be32 reg;
+ u32 data;
+
+ // This is a request to local 1394 OHCI controller and expected to
+ // complete without any event waiting.
+ generation = fw_card->generation;
+ smp_rmb(); // node_id vs. generation.
+ rcode = fw_run_transaction(fw_card, TCODE_READ_QUADLET_REQUEST,
+ fw_card->node_id, generation, SCODE_100,
+ CSR_REGISTER_BASE + CSR_CYCLE_TIME,
+ &reg, sizeof(reg));
+ if (rcode != RCODE_COMPLETE)
+ return -EIO;
+
+ data = be32_to_cpu(reg);
+ *cur_cycle = data >> 12;
+
+ return 0;
+}
+
/**
* amdtp_domain_start - start sending packets for isoc context in the domain.
* @d: the AMDTP domain.
+ * @ir_delay_cycle: the cycle delay to start all IR contexts.
*/
-int amdtp_domain_start(struct amdtp_domain *d)
+int amdtp_domain_start(struct amdtp_domain *d, unsigned int ir_delay_cycle)
{
struct amdtp_stream *s;
- int err = 0;
+ int cycle;
+ int err;
+ // Select an IT context as IRQ target.
list_for_each_entry(s, &d->streams, list) {
- err = amdtp_stream_start(s, s->channel, s->speed);
- if (err < 0)
+ if (s->direction == AMDTP_OUT_STREAM)
break;
}
+ if (!s)
+ return -ENXIO;
+ d->irq_target = s;
- if (err < 0) {
- list_for_each_entry(s, &d->streams, list)
- amdtp_stream_stop(s);
+ if (ir_delay_cycle > 0) {
+ struct fw_card *fw_card = fw_parent_device(s->unit)->card;
+
+ err = get_current_cycle_time(fw_card, &cycle);
+ if (err < 0)
+ return err;
+
+ // No need to care overflow in cycle field because of enough
+ // width.
+ cycle += ir_delay_cycle;
+
+ // Round up to sec field.
+ if ((cycle & 0x00001fff) >= CYCLES_PER_SECOND) {
+ unsigned int sec;
+
+ // The sec field can overflow.
+ sec = (cycle & 0xffffe000) >> 13;
+ cycle = (++sec << 13) |
+ ((cycle & 0x00001fff) / CYCLES_PER_SECOND);
+ }
+
+ // In OHCI 1394 specification, lower 2 bits are available for
+ // sec field.
+ cycle &= 0x00007fff;
+ } else {
+ cycle = -1;
+ }
+
+ list_for_each_entry(s, &d->streams, list) {
+ int cycle_match;
+
+ if (s->direction == AMDTP_IN_STREAM) {
+ cycle_match = cycle;
+ } else {
+ // IT context starts immediately.
+ cycle_match = -1;
+ }
+
+ if (s != d->irq_target) {
+ err = amdtp_stream_start(s, s->channel, s->speed, d,
+ false, cycle_match);
+ if (err < 0)
+ goto error;
+ }
}
+ s = d->irq_target;
+ err = amdtp_stream_start(s, s->channel, s->speed, d, true, -1);
+ if (err < 0)
+ goto error;
+
+ return 0;
+error:
+ list_for_each_entry(s, &d->streams, list)
+ amdtp_stream_stop(s);
return err;
}
EXPORT_SYMBOL_GPL(amdtp_domain_start);
@@ -1216,10 +1454,17 @@ void amdtp_domain_stop(struct amdtp_domain *d)
{
struct amdtp_stream *s, *next;
+ if (d->irq_target)
+ amdtp_stream_stop(d->irq_target);
+
list_for_each_entry_safe(s, next, &d->streams, list) {
list_del(&s->list);
- amdtp_stream_stop(s);
+ if (s != d->irq_target)
+ amdtp_stream_stop(s);
}
+
+ d->events_per_period = 0;
+ d->irq_target = NULL;
}
EXPORT_SYMBOL_GPL(amdtp_domain_stop);
diff --git a/sound/firewire/amdtp-stream.h b/sound/firewire/amdtp-stream.h
index bbbca964b9b4..f2d44e2dc3c8 100644
--- a/sound/firewire/amdtp-stream.h
+++ b/sound/firewire/amdtp-stream.h
@@ -117,6 +117,7 @@ struct amdtp_stream {
/* For packet processing. */
struct fw_iso_context *context;
struct iso_packets_buffer buffer;
+ unsigned int queue_size;
int packet_index;
struct pkt_desc *pkt_descs;
int tag;
@@ -142,6 +143,10 @@ struct amdtp_stream {
// To generate CIP header.
unsigned int fdf;
int syt_override;
+
+ // To generate constant hardware IRQ.
+ unsigned int event_count;
+ unsigned int events_per_period;
} rx;
} ctx_data;
@@ -194,8 +199,6 @@ int amdtp_stream_add_pcm_hw_constraints(struct amdtp_stream *s,
struct snd_pcm_runtime *runtime);
void amdtp_stream_pcm_prepare(struct amdtp_stream *s);
-unsigned long amdtp_stream_pcm_pointer(struct amdtp_stream *s);
-int amdtp_stream_pcm_ack(struct amdtp_stream *s);
void amdtp_stream_pcm_abort(struct amdtp_stream *s);
extern const unsigned int amdtp_syt_intervals[CIP_SFC_COUNT];
@@ -272,6 +275,11 @@ static inline bool amdtp_stream_wait_callback(struct amdtp_stream *s,
struct amdtp_domain {
struct list_head streams;
+
+ unsigned int events_per_period;
+ unsigned int events_per_buffer;
+
+ struct amdtp_stream *irq_target;
};
int amdtp_domain_init(struct amdtp_domain *d);
@@ -280,7 +288,21 @@ void amdtp_domain_destroy(struct amdtp_domain *d);
int amdtp_domain_add_stream(struct amdtp_domain *d, struct amdtp_stream *s,
int channel, int speed);
-int amdtp_domain_start(struct amdtp_domain *d);
+int amdtp_domain_start(struct amdtp_domain *d, unsigned int ir_delay_cycle);
void amdtp_domain_stop(struct amdtp_domain *d);
+static inline int amdtp_domain_set_events_per_period(struct amdtp_domain *d,
+ unsigned int events_per_period,
+ unsigned int events_per_buffer)
+{
+ d->events_per_period = events_per_period;
+ d->events_per_buffer = events_per_buffer;
+
+ return 0;
+}
+
+unsigned long amdtp_domain_stream_pcm_pointer(struct amdtp_domain *d,
+ struct amdtp_stream *s);
+int amdtp_domain_stream_pcm_ack(struct amdtp_domain *d, struct amdtp_stream *s);
+
#endif
diff --git a/sound/firewire/bebob/bebob.h b/sound/firewire/bebob/bebob.h
index 356d6ba60959..d1ad9a8451bc 100644
--- a/sound/firewire/bebob/bebob.h
+++ b/sound/firewire/bebob/bebob.h
@@ -217,7 +217,9 @@ int snd_bebob_stream_get_clock_src(struct snd_bebob *bebob,
enum snd_bebob_clock_type *src);
int snd_bebob_stream_discover(struct snd_bebob *bebob);
int snd_bebob_stream_init_duplex(struct snd_bebob *bebob);
-int snd_bebob_stream_reserve_duplex(struct snd_bebob *bebob, unsigned int rate);
+int snd_bebob_stream_reserve_duplex(struct snd_bebob *bebob, unsigned int rate,
+ unsigned int frames_per_period,
+ unsigned int frames_per_buffer);
int snd_bebob_stream_start_duplex(struct snd_bebob *bebob);
void snd_bebob_stream_stop_duplex(struct snd_bebob *bebob);
void snd_bebob_stream_destroy_duplex(struct snd_bebob *bebob);
diff --git a/sound/firewire/bebob/bebob_midi.c b/sound/firewire/bebob/bebob_midi.c
index 4d8805fa8a00..6f597d03e7c1 100644
--- a/sound/firewire/bebob/bebob_midi.c
+++ b/sound/firewire/bebob/bebob_midi.c
@@ -17,7 +17,7 @@ static int midi_open(struct snd_rawmidi_substream *substream)
return err;
mutex_lock(&bebob->mutex);
- err = snd_bebob_stream_reserve_duplex(bebob, 0);
+ err = snd_bebob_stream_reserve_duplex(bebob, 0, 0, 0);
if (err >= 0) {
++bebob->substreams_counter;
err = snd_bebob_stream_start_duplex(bebob);
diff --git a/sound/firewire/bebob/bebob_pcm.c b/sound/firewire/bebob/bebob_pcm.c
index 0fb9eed46837..d4edd06d32cf 100644
--- a/sound/firewire/bebob/bebob_pcm.c
+++ b/sound/firewire/bebob/bebob_pcm.c
@@ -129,18 +129,17 @@ end:
return err;
}
-static int
-pcm_open(struct snd_pcm_substream *substream)
+static int pcm_open(struct snd_pcm_substream *substream)
{
struct snd_bebob *bebob = substream->private_data;
const struct snd_bebob_rate_spec *spec = bebob->spec->rate;
- unsigned int sampling_rate;
+ struct amdtp_domain *d = &bebob->domain;
enum snd_bebob_clock_type src;
int err;
err = snd_bebob_stream_lock_try(bebob);
if (err < 0)
- goto end;
+ return err;
err = pcm_init_hw_params(bebob, substream);
if (err < 0)
@@ -150,15 +149,20 @@ pcm_open(struct snd_pcm_substream *substream)
if (err < 0)
goto err_locked;
- /*
- * When source of clock is internal or any PCM stream are running,
- * the available sampling rate is limited at current sampling rate.
- */
+ mutex_lock(&bebob->mutex);
+
+ // When source of clock is not internal or any stream is reserved for
+ // transmission of PCM frames, the available sampling rate is limited
+ // at current one.
if (src == SND_BEBOB_CLOCK_TYPE_EXTERNAL ||
- amdtp_stream_pcm_running(&bebob->tx_stream) ||
- amdtp_stream_pcm_running(&bebob->rx_stream)) {
+ (bebob->substreams_counter > 0 && d->events_per_period > 0)) {
+ unsigned int frames_per_period = d->events_per_period;
+ unsigned int frames_per_buffer = d->events_per_buffer;
+ unsigned int sampling_rate;
+
err = spec->get(bebob, &sampling_rate);
if (err < 0) {
+ mutex_unlock(&bebob->mutex);
dev_err(&bebob->unit->device,
"fail to get sampling rate: %d\n", err);
goto err_locked;
@@ -166,11 +170,31 @@ pcm_open(struct snd_pcm_substream *substream)
substream->runtime->hw.rate_min = sampling_rate;
substream->runtime->hw.rate_max = sampling_rate;
+
+ if (frames_per_period > 0) {
+ err = snd_pcm_hw_constraint_minmax(substream->runtime,
+ SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
+ frames_per_period, frames_per_period);
+ if (err < 0) {
+ mutex_unlock(&bebob->mutex);
+ goto err_locked;
+ }
+
+ err = snd_pcm_hw_constraint_minmax(substream->runtime,
+ SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
+ frames_per_buffer, frames_per_buffer);
+ if (err < 0) {
+ mutex_unlock(&bebob->mutex);
+ goto err_locked;
+ }
+ }
}
+ mutex_unlock(&bebob->mutex);
+
snd_pcm_set_sync(substream);
-end:
- return err;
+
+ return 0;
err_locked:
snd_bebob_stream_lock_release(bebob);
return err;
@@ -190,16 +214,18 @@ static int pcm_hw_params(struct snd_pcm_substream *substream,
struct snd_bebob *bebob = substream->private_data;
int err;
- err = snd_pcm_lib_alloc_vmalloc_buffer(substream,
- params_buffer_bytes(hw_params));
+ err = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params));
if (err < 0)
return err;
if (substream->runtime->status->state == SNDRV_PCM_STATE_OPEN) {
unsigned int rate = params_rate(hw_params);
+ unsigned int frames_per_period = params_period_size(hw_params);
+ unsigned int frames_per_buffer = params_buffer_size(hw_params);
mutex_lock(&bebob->mutex);
- err = snd_bebob_stream_reserve_duplex(bebob, rate);
+ err = snd_bebob_stream_reserve_duplex(bebob, rate,
+ frames_per_period, frames_per_buffer);
if (err >= 0)
++bebob->substreams_counter;
mutex_unlock(&bebob->mutex);
@@ -221,7 +247,7 @@ static int pcm_hw_free(struct snd_pcm_substream *substream)
mutex_unlock(&bebob->mutex);
- return snd_pcm_lib_free_vmalloc_buffer(substream);
+ return snd_pcm_lib_free_pages(substream);
}
static int
@@ -286,31 +312,33 @@ pcm_playback_trigger(struct snd_pcm_substream *substream, int cmd)
return 0;
}
-static snd_pcm_uframes_t
-pcm_capture_pointer(struct snd_pcm_substream *sbstrm)
+static snd_pcm_uframes_t pcm_capture_pointer(struct snd_pcm_substream *sbstrm)
{
struct snd_bebob *bebob = sbstrm->private_data;
- return amdtp_stream_pcm_pointer(&bebob->tx_stream);
+
+ return amdtp_domain_stream_pcm_pointer(&bebob->domain,
+ &bebob->tx_stream);
}
-static snd_pcm_uframes_t
-pcm_playback_pointer(struct snd_pcm_substream *sbstrm)
+static snd_pcm_uframes_t pcm_playback_pointer(struct snd_pcm_substream *sbstrm)
{
struct snd_bebob *bebob = sbstrm->private_data;
- return amdtp_stream_pcm_pointer(&bebob->rx_stream);
+
+ return amdtp_domain_stream_pcm_pointer(&bebob->domain,
+ &bebob->rx_stream);
}
static int pcm_capture_ack(struct snd_pcm_substream *substream)
{
struct snd_bebob *bebob = substream->private_data;
- return amdtp_stream_pcm_ack(&bebob->tx_stream);
+ return amdtp_domain_stream_pcm_ack(&bebob->domain, &bebob->tx_stream);
}
static int pcm_playback_ack(struct snd_pcm_substream *substream)
{
struct snd_bebob *bebob = substream->private_data;
- return amdtp_stream_pcm_ack(&bebob->rx_stream);
+ return amdtp_domain_stream_pcm_ack(&bebob->domain, &bebob->rx_stream);
}
int snd_bebob_create_pcm_devices(struct snd_bebob *bebob)
@@ -325,7 +353,6 @@ int snd_bebob_create_pcm_devices(struct snd_bebob *bebob)
.trigger = pcm_capture_trigger,
.pointer = pcm_capture_pointer,
.ack = pcm_capture_ack,
- .page = snd_pcm_lib_get_vmalloc_page,
};
static const struct snd_pcm_ops playback_ops = {
.open = pcm_open,
@@ -337,7 +364,6 @@ int snd_bebob_create_pcm_devices(struct snd_bebob *bebob)
.trigger = pcm_playback_trigger,
.pointer = pcm_playback_pointer,
.ack = pcm_playback_ack,
- .page = snd_pcm_lib_get_vmalloc_page,
};
struct snd_pcm *pcm;
int err;
@@ -351,6 +377,8 @@ int snd_bebob_create_pcm_devices(struct snd_bebob *bebob)
"%s PCM", bebob->card->shortname);
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &playback_ops);
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &capture_ops);
+ snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_VMALLOC,
+ NULL, 0, 0);
end:
return err;
}
diff --git a/sound/firewire/bebob/bebob_stream.c b/sound/firewire/bebob/bebob_stream.c
index 6c1497d9f52b..bbae04793c50 100644
--- a/sound/firewire/bebob/bebob_stream.c
+++ b/sound/firewire/bebob/bebob_stream.c
@@ -7,7 +7,7 @@
#include "./bebob.h"
-#define CALLBACK_TIMEOUT 2000
+#define CALLBACK_TIMEOUT 2500
#define FW_ISO_RESOURCE_DELAY 1000
/*
@@ -398,36 +398,19 @@ check_connection_used_by_others(struct snd_bebob *bebob, struct amdtp_stream *s)
return err;
}
-static int make_both_connections(struct snd_bebob *bebob)
-{
- int err = 0;
-
- err = cmp_connection_establish(&bebob->out_conn);
- if (err < 0)
- return err;
-
- err = cmp_connection_establish(&bebob->in_conn);
- if (err < 0) {
- cmp_connection_break(&bebob->out_conn);
- return err;
- }
-
- return 0;
-}
-
-static void
-break_both_connections(struct snd_bebob *bebob)
+static void break_both_connections(struct snd_bebob *bebob)
{
cmp_connection_break(&bebob->in_conn);
cmp_connection_break(&bebob->out_conn);
- /* These models seems to be in transition state for a longer time. */
- if (bebob->maudio_special_quirk != NULL)
- msleep(200);
+ // These models seem to be in transition state for a longer time. When
+ // accessing in the state, any transactions is corrupted. In the worst
+ // case, the device is going to reboot.
+ if (bebob->version < 2)
+ msleep(600);
}
-static int
-start_stream(struct snd_bebob *bebob, struct amdtp_stream *stream)
+static int start_stream(struct snd_bebob *bebob, struct amdtp_stream *stream)
{
struct cmp_connection *conn;
int err = 0;
@@ -437,18 +420,19 @@ start_stream(struct snd_bebob *bebob, struct amdtp_stream *stream)
else
conn = &bebob->out_conn;
- /* channel mapping */
+ // channel mapping.
if (bebob->maudio_special_quirk == NULL) {
err = map_data_channels(bebob, stream);
if (err < 0)
- goto end;
+ return err;
}
- // start amdtp stream.
- err = amdtp_domain_add_stream(&bebob->domain, stream,
- conn->resources.channel, conn->speed);
-end:
- return err;
+ err = cmp_connection_establish(conn);
+ if (err < 0)
+ return err;
+
+ return amdtp_domain_add_stream(&bebob->domain, stream,
+ conn->resources.channel, conn->speed);
}
static int init_stream(struct snd_bebob *bebob, struct amdtp_stream *stream)
@@ -553,7 +537,9 @@ static int keep_resources(struct snd_bebob *bebob, struct amdtp_stream *stream,
return cmp_connection_reserve(conn, amdtp_stream_get_max_payload(stream));
}
-int snd_bebob_stream_reserve_duplex(struct snd_bebob *bebob, unsigned int rate)
+int snd_bebob_stream_reserve_duplex(struct snd_bebob *bebob, unsigned int rate,
+ unsigned int frames_per_period,
+ unsigned int frames_per_buffer)
{
unsigned int curr_rate;
int err;
@@ -606,6 +592,14 @@ int snd_bebob_stream_reserve_duplex(struct snd_bebob *bebob, unsigned int rate)
cmp_connection_release(&bebob->out_conn);
return err;
}
+
+ err = amdtp_domain_set_events_per_period(&bebob->domain,
+ frames_per_period, frames_per_buffer);
+ if (err < 0) {
+ cmp_connection_release(&bebob->out_conn);
+ cmp_connection_release(&bebob->in_conn);
+ return err;
+ }
}
return 0;
@@ -627,7 +621,10 @@ int snd_bebob_stream_start_duplex(struct snd_bebob *bebob)
}
if (!amdtp_stream_running(&bebob->rx_stream)) {
+ enum snd_bebob_clock_type src;
+ struct amdtp_stream *master, *slave;
unsigned int curr_rate;
+ unsigned int ir_delay_cycle;
if (bebob->maudio_special_quirk) {
err = bebob->spec->rate->get(bebob, &curr_rate);
@@ -635,19 +632,40 @@ int snd_bebob_stream_start_duplex(struct snd_bebob *bebob)
return err;
}
- err = make_both_connections(bebob);
+ err = snd_bebob_stream_get_clock_src(bebob, &src);
if (err < 0)
return err;
- err = start_stream(bebob, &bebob->rx_stream);
+ if (src != SND_BEBOB_CLOCK_TYPE_SYT) {
+ master = &bebob->tx_stream;
+ slave = &bebob->rx_stream;
+ } else {
+ master = &bebob->rx_stream;
+ slave = &bebob->tx_stream;
+ }
+
+ err = start_stream(bebob, master);
if (err < 0)
goto error;
- err = start_stream(bebob, &bebob->tx_stream);
+ err = start_stream(bebob, slave);
if (err < 0)
goto error;
- err = amdtp_domain_start(&bebob->domain);
+ // The device postpones start of transmission mostly for 1 sec
+ // after receives packets firstly. For safe, IR context starts
+ // 0.4 sec (=3200 cycles) later to version 1 or 2 firmware,
+ // 2.0 sec (=16000 cycles) for version 3 firmware. This is
+ // within 2.5 sec (=CALLBACK_TIMEOUT).
+ // Furthermore, some devices transfer isoc packets with
+ // discontinuous counter in the beginning of packet streaming.
+ // The delay has an effect to avoid detection of this
+ // discontinuity.
+ if (bebob->version < 2)
+ ir_delay_cycle = 3200;
+ else
+ ir_delay_cycle = 16000;
+ err = amdtp_domain_start(&bebob->domain, ir_delay_cycle);
if (err < 0)
goto error;
diff --git a/sound/firewire/dice/dice-midi.c b/sound/firewire/dice/dice-midi.c
index c9e19bddfc09..4c2998034313 100644
--- a/sound/firewire/dice/dice-midi.c
+++ b/sound/firewire/dice/dice-midi.c
@@ -17,7 +17,7 @@ static int midi_open(struct snd_rawmidi_substream *substream)
mutex_lock(&dice->mutex);
- err = snd_dice_stream_reserve_duplex(dice, 0);
+ err = snd_dice_stream_reserve_duplex(dice, 0, 0, 0);
if (err >= 0) {
++dice->substreams_counter;
err = snd_dice_stream_start_duplex(dice);
diff --git a/sound/firewire/dice/dice-pcm.c b/sound/firewire/dice/dice-pcm.c
index 94a4dccfc381..be79d659eedf 100644
--- a/sound/firewire/dice/dice-pcm.c
+++ b/sound/firewire/dice/dice-pcm.c
@@ -164,13 +164,14 @@ static int init_hw_info(struct snd_dice *dice,
static int pcm_open(struct snd_pcm_substream *substream)
{
struct snd_dice *dice = substream->private_data;
+ struct amdtp_domain *d = &dice->domain;
unsigned int source;
bool internal;
int err;
err = snd_dice_stream_lock_try(dice);
if (err < 0)
- goto end;
+ return err;
err = init_hw_info(dice, substream);
if (err < 0)
@@ -195,27 +196,56 @@ static int pcm_open(struct snd_pcm_substream *substream)
break;
}
- /*
- * When source of clock is not internal or any PCM streams are running,
- * available sampling rate is limited at current sampling rate.
- */
+ mutex_lock(&dice->mutex);
+
+ // When source of clock is not internal or any stream is reserved for
+ // transmission of PCM frames, the available sampling rate is limited
+ // at current one.
if (!internal ||
- amdtp_stream_pcm_running(&dice->tx_stream[0]) ||
- amdtp_stream_pcm_running(&dice->tx_stream[1]) ||
- amdtp_stream_pcm_running(&dice->rx_stream[0]) ||
- amdtp_stream_pcm_running(&dice->rx_stream[1])) {
+ (dice->substreams_counter > 0 && d->events_per_period > 0)) {
+ unsigned int frames_per_period = d->events_per_period;
+ unsigned int frames_per_buffer = d->events_per_buffer;
unsigned int rate;
err = snd_dice_transaction_get_rate(dice, &rate);
- if (err < 0)
+ if (err < 0) {
+ mutex_unlock(&dice->mutex);
goto err_locked;
+ }
+
substream->runtime->hw.rate_min = rate;
substream->runtime->hw.rate_max = rate;
+
+ if (frames_per_period > 0) {
+ // For double_pcm_frame quirk.
+ if (rate > 96000) {
+ frames_per_period *= 2;
+ frames_per_buffer *= 2;
+ }
+
+ err = snd_pcm_hw_constraint_minmax(substream->runtime,
+ SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
+ frames_per_period, frames_per_period);
+ if (err < 0) {
+ mutex_unlock(&dice->mutex);
+ goto err_locked;
+ }
+
+ err = snd_pcm_hw_constraint_minmax(substream->runtime,
+ SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
+ frames_per_buffer, frames_per_buffer);
+ if (err < 0) {
+ mutex_unlock(&dice->mutex);
+ goto err_locked;
+ }
+ }
}
+ mutex_unlock(&dice->mutex);
+
snd_pcm_set_sync(substream);
-end:
- return err;
+
+ return 0;
err_locked:
snd_dice_stream_lock_release(dice);
return err;
@@ -236,16 +266,23 @@ static int pcm_hw_params(struct snd_pcm_substream *substream,
struct snd_dice *dice = substream->private_data;
int err;
- err = snd_pcm_lib_alloc_vmalloc_buffer(substream,
- params_buffer_bytes(hw_params));
+ err = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params));
if (err < 0)
return err;
if (substream->runtime->status->state == SNDRV_PCM_STATE_OPEN) {
unsigned int rate = params_rate(hw_params);
+ unsigned int events_per_period = params_period_size(hw_params);
+ unsigned int events_per_buffer = params_buffer_size(hw_params);
mutex_lock(&dice->mutex);
- err = snd_dice_stream_reserve_duplex(dice, rate);
+ // For double_pcm_frame quirk.
+ if (rate > 96000) {
+ events_per_period /= 2;
+ events_per_buffer /= 2;
+ }
+ err = snd_dice_stream_reserve_duplex(dice, rate,
+ events_per_period, events_per_buffer);
if (err >= 0)
++dice->substreams_counter;
mutex_unlock(&dice->mutex);
@@ -267,7 +304,7 @@ static int pcm_hw_free(struct snd_pcm_substream *substream)
mutex_unlock(&dice->mutex);
- return snd_pcm_lib_free_vmalloc_buffer(substream);
+ return snd_pcm_lib_free_pages(substream);
}
static int capture_prepare(struct snd_pcm_substream *substream)
@@ -341,14 +378,14 @@ static snd_pcm_uframes_t capture_pointer(struct snd_pcm_substream *substream)
struct snd_dice *dice = substream->private_data;
struct amdtp_stream *stream = &dice->tx_stream[substream->pcm->device];
- return amdtp_stream_pcm_pointer(stream);
+ return amdtp_domain_stream_pcm_pointer(&dice->domain, stream);
}
static snd_pcm_uframes_t playback_pointer(struct snd_pcm_substream *substream)
{
struct snd_dice *dice = substream->private_data;
struct amdtp_stream *stream = &dice->rx_stream[substream->pcm->device];
- return amdtp_stream_pcm_pointer(stream);
+ return amdtp_domain_stream_pcm_pointer(&dice->domain, stream);
}
static int capture_ack(struct snd_pcm_substream *substream)
@@ -356,7 +393,7 @@ static int capture_ack(struct snd_pcm_substream *substream)
struct snd_dice *dice = substream->private_data;
struct amdtp_stream *stream = &dice->tx_stream[substream->pcm->device];
- return amdtp_stream_pcm_ack(stream);
+ return amdtp_domain_stream_pcm_ack(&dice->domain, stream);
}
static int playback_ack(struct snd_pcm_substream *substream)
@@ -364,7 +401,7 @@ static int playback_ack(struct snd_pcm_substream *substream)
struct snd_dice *dice = substream->private_data;
struct amdtp_stream *stream = &dice->rx_stream[substream->pcm->device];
- return amdtp_stream_pcm_ack(stream);
+ return amdtp_domain_stream_pcm_ack(&dice->domain, stream);
}
int snd_dice_create_pcm(struct snd_dice *dice)
@@ -379,7 +416,6 @@ int snd_dice_create_pcm(struct snd_dice *dice)
.trigger = capture_trigger,
.pointer = capture_pointer,
.ack = capture_ack,
- .page = snd_pcm_lib_get_vmalloc_page,
};
static const struct snd_pcm_ops playback_ops = {
.open = pcm_open,
@@ -391,7 +427,6 @@ int snd_dice_create_pcm(struct snd_dice *dice)
.trigger = playback_trigger,
.pointer = playback_pointer,
.ack = playback_ack,
- .page = snd_pcm_lib_get_vmalloc_page,
};
struct snd_pcm *pcm;
unsigned int capture, playback;
@@ -421,6 +456,10 @@ int snd_dice_create_pcm(struct snd_dice *dice)
if (playback > 0)
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK,
&playback_ops);
+
+ snd_pcm_lib_preallocate_pages_for_all(pcm,
+ SNDRV_DMA_TYPE_VMALLOC,
+ NULL, 0, 0);
}
return 0;
diff --git a/sound/firewire/dice/dice-stream.c b/sound/firewire/dice/dice-stream.c
index f6a8627ae5a2..6a3d60913e10 100644
--- a/sound/firewire/dice/dice-stream.c
+++ b/sound/firewire/dice/dice-stream.c
@@ -278,7 +278,9 @@ static void finish_session(struct snd_dice *dice, struct reg_params *tx_params,
snd_dice_transaction_clear_enable(dice);
}
-int snd_dice_stream_reserve_duplex(struct snd_dice *dice, unsigned int rate)
+int snd_dice_stream_reserve_duplex(struct snd_dice *dice, unsigned int rate,
+ unsigned int events_per_period,
+ unsigned int events_per_buffer)
{
unsigned int curr_rate;
int err;
@@ -324,6 +326,11 @@ int snd_dice_stream_reserve_duplex(struct snd_dice *dice, unsigned int rate)
&rx_params);
if (err < 0)
goto error;
+
+ err = amdtp_domain_set_events_per_period(&dice->domain,
+ events_per_period, events_per_buffer);
+ if (err < 0)
+ goto error;
}
return 0;
@@ -455,7 +462,7 @@ int snd_dice_stream_start_duplex(struct snd_dice *dice)
goto error;
}
- err = amdtp_domain_start(&dice->domain);
+ err = amdtp_domain_start(&dice->domain, 0);
if (err < 0)
goto error;
diff --git a/sound/firewire/dice/dice.h b/sound/firewire/dice/dice.h
index fa6d74303f54..16366773e22e 100644
--- a/sound/firewire/dice/dice.h
+++ b/sound/firewire/dice/dice.h
@@ -210,7 +210,9 @@ int snd_dice_stream_start_duplex(struct snd_dice *dice);
void snd_dice_stream_stop_duplex(struct snd_dice *dice);
int snd_dice_stream_init_duplex(struct snd_dice *dice);
void snd_dice_stream_destroy_duplex(struct snd_dice *dice);
-int snd_dice_stream_reserve_duplex(struct snd_dice *dice, unsigned int rate);
+int snd_dice_stream_reserve_duplex(struct snd_dice *dice, unsigned int rate,
+ unsigned int events_per_period,
+ unsigned int events_per_buffer);
void snd_dice_stream_update_duplex(struct snd_dice *dice);
int snd_dice_stream_detect_current_formats(struct snd_dice *dice);
diff --git a/sound/firewire/digi00x/digi00x-midi.c b/sound/firewire/digi00x/digi00x-midi.c
index 2b57ece89101..68eb8c39afa6 100644
--- a/sound/firewire/digi00x/digi00x-midi.c
+++ b/sound/firewire/digi00x/digi00x-midi.c
@@ -17,7 +17,7 @@ static int midi_open(struct snd_rawmidi_substream *substream)
return err;
mutex_lock(&dg00x->mutex);
- err = snd_dg00x_stream_reserve_duplex(dg00x, 0);
+ err = snd_dg00x_stream_reserve_duplex(dg00x, 0, 0, 0);
if (err >= 0) {
++dg00x->substreams_counter;
err = snd_dg00x_stream_start_duplex(dg00x);
diff --git a/sound/firewire/digi00x/digi00x-pcm.c b/sound/firewire/digi00x/digi00x-pcm.c
index 18e561b26625..57cbce4fd836 100644
--- a/sound/firewire/digi00x/digi00x-pcm.c
+++ b/sound/firewire/digi00x/digi00x-pcm.c
@@ -100,14 +100,14 @@ static int pcm_init_hw_params(struct snd_dg00x *dg00x,
static int pcm_open(struct snd_pcm_substream *substream)
{
struct snd_dg00x *dg00x = substream->private_data;
+ struct amdtp_domain *d = &dg00x->domain;
enum snd_dg00x_clock clock;
bool detect;
- unsigned int rate;
int err;
err = snd_dg00x_stream_lock_try(dg00x);
if (err < 0)
- goto end;
+ return err;
err = pcm_init_hw_params(dg00x, substream);
if (err < 0)
@@ -127,19 +127,49 @@ static int pcm_open(struct snd_pcm_substream *substream)
}
}
+ mutex_lock(&dg00x->mutex);
+
+ // When source of clock is not internal or any stream is reserved for
+ // transmission of PCM frames, the available sampling rate is limited
+ // at current one.
if ((clock != SND_DG00X_CLOCK_INTERNAL) ||
- amdtp_stream_pcm_running(&dg00x->rx_stream) ||
- amdtp_stream_pcm_running(&dg00x->tx_stream)) {
+ (dg00x->substreams_counter > 0 && d->events_per_period > 0)) {
+ unsigned int frames_per_period = d->events_per_period;
+ unsigned int frames_per_buffer = d->events_per_buffer;
+ unsigned int rate;
+
err = snd_dg00x_stream_get_external_rate(dg00x, &rate);
- if (err < 0)
+ if (err < 0) {
+ mutex_unlock(&dg00x->mutex);
goto err_locked;
+ }
substream->runtime->hw.rate_min = rate;
substream->runtime->hw.rate_max = rate;
+
+ if (frames_per_period > 0) {
+ err = snd_pcm_hw_constraint_minmax(substream->runtime,
+ SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
+ frames_per_period, frames_per_period);
+ if (err < 0) {
+ mutex_unlock(&dg00x->mutex);
+ goto err_locked;
+ }
+
+ err = snd_pcm_hw_constraint_minmax(substream->runtime,
+ SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
+ frames_per_buffer, frames_per_buffer);
+ if (err < 0) {
+ mutex_unlock(&dg00x->mutex);
+ goto err_locked;
+ }
+ }
}
+ mutex_unlock(&dg00x->mutex);
+
snd_pcm_set_sync(substream);
-end:
- return err;
+
+ return 0;
err_locked:
snd_dg00x_stream_lock_release(dg00x);
return err;
@@ -160,16 +190,18 @@ static int pcm_hw_params(struct snd_pcm_substream *substream,
struct snd_dg00x *dg00x = substream->private_data;
int err;
- err = snd_pcm_lib_alloc_vmalloc_buffer(substream,
- params_buffer_bytes(hw_params));
+ err = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params));
if (err < 0)
return err;
if (substream->runtime->status->state == SNDRV_PCM_STATE_OPEN) {
unsigned int rate = params_rate(hw_params);
+ unsigned int frames_per_period = params_period_size(hw_params);
+ unsigned int frames_per_buffer = params_buffer_size(hw_params);
mutex_lock(&dg00x->mutex);
- err = snd_dg00x_stream_reserve_duplex(dg00x, rate);
+ err = snd_dg00x_stream_reserve_duplex(dg00x, rate,
+ frames_per_period, frames_per_buffer);
if (err >= 0)
++dg00x->substreams_counter;
mutex_unlock(&dg00x->mutex);
@@ -191,7 +223,7 @@ static int pcm_hw_free(struct snd_pcm_substream *substream)
mutex_unlock(&dg00x->mutex);
- return snd_pcm_lib_free_vmalloc_buffer(substream);
+ return snd_pcm_lib_free_pages(substream);
}
static int pcm_capture_prepare(struct snd_pcm_substream *substream)
@@ -268,28 +300,28 @@ static snd_pcm_uframes_t pcm_capture_pointer(struct snd_pcm_substream *sbstrm)
{
struct snd_dg00x *dg00x = sbstrm->private_data;
- return amdtp_stream_pcm_pointer(&dg00x->tx_stream);
+ return amdtp_domain_stream_pcm_pointer(&dg00x->domain, &dg00x->tx_stream);
}
static snd_pcm_uframes_t pcm_playback_pointer(struct snd_pcm_substream *sbstrm)
{
struct snd_dg00x *dg00x = sbstrm->private_data;
- return amdtp_stream_pcm_pointer(&dg00x->rx_stream);
+ return amdtp_domain_stream_pcm_pointer(&dg00x->domain, &dg00x->rx_stream);
}
static int pcm_capture_ack(struct snd_pcm_substream *substream)
{
struct snd_dg00x *dg00x = substream->private_data;
- return amdtp_stream_pcm_ack(&dg00x->tx_stream);
+ return amdtp_domain_stream_pcm_ack(&dg00x->domain, &dg00x->tx_stream);
}
static int pcm_playback_ack(struct snd_pcm_substream *substream)
{
struct snd_dg00x *dg00x = substream->private_data;
- return amdtp_stream_pcm_ack(&dg00x->rx_stream);
+ return amdtp_domain_stream_pcm_ack(&dg00x->domain, &dg00x->rx_stream);
}
int snd_dg00x_create_pcm_devices(struct snd_dg00x *dg00x)
@@ -304,7 +336,6 @@ int snd_dg00x_create_pcm_devices(struct snd_dg00x *dg00x)
.trigger = pcm_capture_trigger,
.pointer = pcm_capture_pointer,
.ack = pcm_capture_ack,
- .page = snd_pcm_lib_get_vmalloc_page,
};
static const struct snd_pcm_ops playback_ops = {
.open = pcm_open,
@@ -316,7 +347,6 @@ int snd_dg00x_create_pcm_devices(struct snd_dg00x *dg00x)
.trigger = pcm_playback_trigger,
.pointer = pcm_playback_pointer,
.ack = pcm_playback_ack,
- .page = snd_pcm_lib_get_vmalloc_page,
};
struct snd_pcm *pcm;
int err;
@@ -330,6 +360,8 @@ int snd_dg00x_create_pcm_devices(struct snd_dg00x *dg00x)
"%s PCM", dg00x->card->shortname);
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &playback_ops);
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &capture_ops);
+ snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_VMALLOC,
+ NULL, 0, 0);
return 0;
}
diff --git a/sound/firewire/digi00x/digi00x-stream.c b/sound/firewire/digi00x/digi00x-stream.c
index d6a92460060f..405d6903bfbc 100644
--- a/sound/firewire/digi00x/digi00x-stream.c
+++ b/sound/firewire/digi00x/digi00x-stream.c
@@ -283,7 +283,9 @@ void snd_dg00x_stream_destroy_duplex(struct snd_dg00x *dg00x)
destroy_stream(dg00x, &dg00x->tx_stream);
}
-int snd_dg00x_stream_reserve_duplex(struct snd_dg00x *dg00x, unsigned int rate)
+int snd_dg00x_stream_reserve_duplex(struct snd_dg00x *dg00x, unsigned int rate,
+ unsigned int frames_per_period,
+ unsigned int frames_per_buffer)
{
unsigned int curr_rate;
int err;
@@ -315,6 +317,14 @@ int snd_dg00x_stream_reserve_duplex(struct snd_dg00x *dg00x, unsigned int rate)
fw_iso_resources_free(&dg00x->rx_resources);
return err;
}
+
+ err = amdtp_domain_set_events_per_period(&dg00x->domain,
+ frames_per_period, frames_per_buffer);
+ if (err < 0) {
+ fw_iso_resources_free(&dg00x->rx_resources);
+ fw_iso_resources_free(&dg00x->tx_resources);
+ return err;
+ }
}
return 0;
@@ -365,7 +375,7 @@ int snd_dg00x_stream_start_duplex(struct snd_dg00x *dg00x)
if (err < 0)
goto error;
- err = amdtp_domain_start(&dg00x->domain);
+ err = amdtp_domain_start(&dg00x->domain, 0);
if (err < 0)
goto error;
diff --git a/sound/firewire/digi00x/digi00x.h b/sound/firewire/digi00x/digi00x.h
index 8041c65f2736..129de8edd5ea 100644
--- a/sound/firewire/digi00x/digi00x.h
+++ b/sound/firewire/digi00x/digi00x.h
@@ -141,7 +141,9 @@ int snd_dg00x_stream_get_clock(struct snd_dg00x *dg00x,
int snd_dg00x_stream_check_external_clock(struct snd_dg00x *dg00x,
bool *detect);
int snd_dg00x_stream_init_duplex(struct snd_dg00x *dg00x);
-int snd_dg00x_stream_reserve_duplex(struct snd_dg00x *dg00x, unsigned int rate);
+int snd_dg00x_stream_reserve_duplex(struct snd_dg00x *dg00x, unsigned int rate,
+ unsigned int frames_per_period,
+ unsigned int frames_per_buffer);
int snd_dg00x_stream_start_duplex(struct snd_dg00x *dg00x);
void snd_dg00x_stream_stop_duplex(struct snd_dg00x *dg00x);
void snd_dg00x_stream_update_duplex(struct snd_dg00x *dg00x);
diff --git a/sound/firewire/fireface/ff-pcm.c b/sound/firewire/fireface/ff-pcm.c
index 9eab3ad283ce..4e3bd9a2bec0 100644
--- a/sound/firewire/fireface/ff-pcm.c
+++ b/sound/firewire/fireface/ff-pcm.c
@@ -139,6 +139,7 @@ static int pcm_init_hw_params(struct snd_ff *ff,
static int pcm_open(struct snd_pcm_substream *substream)
{
struct snd_ff *ff = substream->private_data;
+ struct amdtp_domain *d = &ff->domain;
unsigned int rate;
enum snd_ff_clock_src src;
int i, err;
@@ -155,16 +156,21 @@ static int pcm_open(struct snd_pcm_substream *substream)
if (err < 0)
goto release_lock;
+ mutex_lock(&ff->mutex);
+
+ // When source of clock is not internal or any stream is reserved for
+ // transmission of PCM frames, the available sampling rate is limited
+ // at current one.
if (src != SND_FF_CLOCK_SRC_INTERNAL) {
for (i = 0; i < CIP_SFC_COUNT; ++i) {
if (amdtp_rate_table[i] == rate)
break;
}
- /*
- * The unit is configured at sampling frequency which packet
- * streaming engine can't support.
- */
+
+ // The unit is configured at sampling frequency which packet
+ // streaming engine can't support.
if (i >= CIP_SFC_COUNT) {
+ mutex_unlock(&ff->mutex);
err = -EIO;
goto release_lock;
}
@@ -172,14 +178,34 @@ static int pcm_open(struct snd_pcm_substream *substream)
substream->runtime->hw.rate_min = rate;
substream->runtime->hw.rate_max = rate;
} else {
- if (amdtp_stream_pcm_running(&ff->rx_stream) ||
- amdtp_stream_pcm_running(&ff->tx_stream)) {
+ if (ff->substreams_counter > 0) {
+ unsigned int frames_per_period = d->events_per_period;
+ unsigned int frames_per_buffer = d->events_per_buffer;
+
rate = amdtp_rate_table[ff->rx_stream.sfc];
substream->runtime->hw.rate_min = rate;
substream->runtime->hw.rate_max = rate;
+
+ err = snd_pcm_hw_constraint_minmax(substream->runtime,
+ SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
+ frames_per_period, frames_per_period);
+ if (err < 0) {
+ mutex_unlock(&ff->mutex);
+ goto release_lock;
+ }
+
+ err = snd_pcm_hw_constraint_minmax(substream->runtime,
+ SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
+ frames_per_buffer, frames_per_buffer);
+ if (err < 0) {
+ mutex_unlock(&ff->mutex);
+ goto release_lock;
+ }
}
}
+ mutex_unlock(&ff->mutex);
+
snd_pcm_set_sync(substream);
return 0;
@@ -204,16 +230,18 @@ static int pcm_hw_params(struct snd_pcm_substream *substream,
struct snd_ff *ff = substream->private_data;
int err;
- err = snd_pcm_lib_alloc_vmalloc_buffer(substream,
- params_buffer_bytes(hw_params));
+ err = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params));
if (err < 0)
return err;
if (substream->runtime->status->state == SNDRV_PCM_STATE_OPEN) {
unsigned int rate = params_rate(hw_params);
+ unsigned int frames_per_period = params_period_size(hw_params);
+ unsigned int frames_per_buffer = params_buffer_size(hw_params);
mutex_lock(&ff->mutex);
- err = snd_ff_stream_reserve_duplex(ff, rate);
+ err = snd_ff_stream_reserve_duplex(ff, rate, frames_per_period,
+ frames_per_buffer);
if (err >= 0)
++ff->substreams_counter;
mutex_unlock(&ff->mutex);
@@ -235,7 +263,7 @@ static int pcm_hw_free(struct snd_pcm_substream *substream)
mutex_unlock(&ff->mutex);
- return snd_pcm_lib_free_vmalloc_buffer(substream);
+ return snd_pcm_lib_free_pages(substream);
}
static int pcm_capture_prepare(struct snd_pcm_substream *substream)
@@ -312,28 +340,28 @@ static snd_pcm_uframes_t pcm_capture_pointer(struct snd_pcm_substream *sbstrm)
{
struct snd_ff *ff = sbstrm->private_data;
- return amdtp_stream_pcm_pointer(&ff->tx_stream);
+ return amdtp_domain_stream_pcm_pointer(&ff->domain, &ff->tx_stream);
}
static snd_pcm_uframes_t pcm_playback_pointer(struct snd_pcm_substream *sbstrm)
{
struct snd_ff *ff = sbstrm->private_data;
- return amdtp_stream_pcm_pointer(&ff->rx_stream);
+ return amdtp_domain_stream_pcm_pointer(&ff->domain, &ff->rx_stream);
}
static int pcm_capture_ack(struct snd_pcm_substream *substream)
{
struct snd_ff *ff = substream->private_data;
- return amdtp_stream_pcm_ack(&ff->tx_stream);
+ return amdtp_domain_stream_pcm_ack(&ff->domain, &ff->tx_stream);
}
static int pcm_playback_ack(struct snd_pcm_substream *substream)
{
struct snd_ff *ff = substream->private_data;
- return amdtp_stream_pcm_ack(&ff->rx_stream);
+ return amdtp_domain_stream_pcm_ack(&ff->domain, &ff->rx_stream);
}
int snd_ff_create_pcm_devices(struct snd_ff *ff)
@@ -348,7 +376,6 @@ int snd_ff_create_pcm_devices(struct snd_ff *ff)
.trigger = pcm_capture_trigger,
.pointer = pcm_capture_pointer,
.ack = pcm_capture_ack,
- .page = snd_pcm_lib_get_vmalloc_page,
};
static const struct snd_pcm_ops pcm_playback_ops = {
.open = pcm_open,
@@ -360,7 +387,6 @@ int snd_ff_create_pcm_devices(struct snd_ff *ff)
.trigger = pcm_playback_trigger,
.pointer = pcm_playback_pointer,
.ack = pcm_playback_ack,
- .page = snd_pcm_lib_get_vmalloc_page,
};
struct snd_pcm *pcm;
int err;
@@ -374,6 +400,8 @@ int snd_ff_create_pcm_devices(struct snd_ff *ff)
"%s PCM", ff->card->shortname);
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &pcm_playback_ops);
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &pcm_capture_ops);
+ snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_VMALLOC,
+ NULL, 0, 0);
return 0;
}
diff --git a/sound/firewire/fireface/ff-stream.c b/sound/firewire/fireface/ff-stream.c
index e8e6f9fd6433..63b79c4a5405 100644
--- a/sound/firewire/fireface/ff-stream.c
+++ b/sound/firewire/fireface/ff-stream.c
@@ -106,7 +106,9 @@ void snd_ff_stream_destroy_duplex(struct snd_ff *ff)
destroy_stream(ff, &ff->tx_stream);
}
-int snd_ff_stream_reserve_duplex(struct snd_ff *ff, unsigned int rate)
+int snd_ff_stream_reserve_duplex(struct snd_ff *ff, unsigned int rate,
+ unsigned int frames_per_period,
+ unsigned int frames_per_buffer)
{
unsigned int curr_rate;
enum snd_ff_clock_src src;
@@ -150,6 +152,14 @@ int snd_ff_stream_reserve_duplex(struct snd_ff *ff, unsigned int rate)
err = ff->spec->protocol->allocate_resources(ff, rate);
if (err < 0)
return err;
+
+ err = amdtp_domain_set_events_per_period(&ff->domain,
+ frames_per_period, frames_per_buffer);
+ if (err < 0) {
+ fw_iso_resources_free(&ff->tx_resources);
+ fw_iso_resources_free(&ff->rx_resources);
+ return err;
+ }
}
return 0;
@@ -174,6 +184,7 @@ int snd_ff_stream_start_duplex(struct snd_ff *ff, unsigned int rate)
*/
if (!amdtp_stream_running(&ff->rx_stream)) {
int spd = fw_parent_device(ff->unit)->max_speed;
+ unsigned int ir_delay_cycle;
err = ff->spec->protocol->begin_session(ff, rate);
if (err < 0)
@@ -189,7 +200,14 @@ int snd_ff_stream_start_duplex(struct snd_ff *ff, unsigned int rate)
if (err < 0)
goto error;
- err = amdtp_domain_start(&ff->domain);
+ // The device postpones start of transmission mostly for several
+ // cycles after receiving packets firstly.
+ if (ff->spec->protocol == &snd_ff_protocol_ff800)
+ ir_delay_cycle = 800; // = 100 msec
+ else
+ ir_delay_cycle = 16; // = 2 msec
+
+ err = amdtp_domain_start(&ff->domain, ir_delay_cycle);
if (err < 0)
goto error;
diff --git a/sound/firewire/fireface/ff.h b/sound/firewire/fireface/ff.h
index b4c22ca6079e..dc7a20f75983 100644
--- a/sound/firewire/fireface/ff.h
+++ b/sound/firewire/fireface/ff.h
@@ -139,7 +139,9 @@ int snd_ff_stream_get_multiplier_mode(enum cip_sfc sfc,
enum snd_ff_stream_mode *mode);
int snd_ff_stream_init_duplex(struct snd_ff *ff);
void snd_ff_stream_destroy_duplex(struct snd_ff *ff);
-int snd_ff_stream_reserve_duplex(struct snd_ff *ff, unsigned int rate);
+int snd_ff_stream_reserve_duplex(struct snd_ff *ff, unsigned int rate,
+ unsigned int frames_per_period,
+ unsigned int frames_per_buffer);
int snd_ff_stream_start_duplex(struct snd_ff *ff, unsigned int rate);
void snd_ff_stream_stop_duplex(struct snd_ff *ff);
void snd_ff_stream_update_duplex(struct snd_ff *ff);
diff --git a/sound/firewire/fireworks/fireworks.h b/sound/firewire/fireworks/fireworks.h
index 4cda297f8438..dda797209a27 100644
--- a/sound/firewire/fireworks/fireworks.h
+++ b/sound/firewire/fireworks/fireworks.h
@@ -207,7 +207,9 @@ int snd_efw_command_get_sampling_rate(struct snd_efw *efw, unsigned int *rate);
int snd_efw_command_set_sampling_rate(struct snd_efw *efw, unsigned int rate);
int snd_efw_stream_init_duplex(struct snd_efw *efw);
-int snd_efw_stream_reserve_duplex(struct snd_efw *efw, unsigned int rate);
+int snd_efw_stream_reserve_duplex(struct snd_efw *efw, unsigned int rate,
+ unsigned int frames_per_period,
+ unsigned int frames_per_buffer);
int snd_efw_stream_start_duplex(struct snd_efw *efw);
void snd_efw_stream_stop_duplex(struct snd_efw *efw);
void snd_efw_stream_update_duplex(struct snd_efw *efw);
diff --git a/sound/firewire/fireworks/fireworks_midi.c b/sound/firewire/fireworks/fireworks_midi.c
index a9f4a9630d15..84621e356848 100644
--- a/sound/firewire/fireworks/fireworks_midi.c
+++ b/sound/firewire/fireworks/fireworks_midi.c
@@ -17,7 +17,7 @@ static int midi_open(struct snd_rawmidi_substream *substream)
goto end;
mutex_lock(&efw->mutex);
- err = snd_efw_stream_reserve_duplex(efw, 0);
+ err = snd_efw_stream_reserve_duplex(efw, 0, 0, 0);
if (err >= 0) {
++efw->substreams_counter;
err = snd_efw_stream_start_duplex(efw);
diff --git a/sound/firewire/fireworks/fireworks_pcm.c b/sound/firewire/fireworks/fireworks_pcm.c
index a7025dccc754..e69896d748df 100644
--- a/sound/firewire/fireworks/fireworks_pcm.c
+++ b/sound/firewire/fireworks/fireworks_pcm.c
@@ -173,13 +173,13 @@ end:
static int pcm_open(struct snd_pcm_substream *substream)
{
struct snd_efw *efw = substream->private_data;
- unsigned int sampling_rate;
+ struct amdtp_domain *d = &efw->domain;
enum snd_efw_clock_source clock_source;
int err;
err = snd_efw_stream_lock_try(efw);
if (err < 0)
- goto end;
+ return err;
err = pcm_init_hw_params(efw, substream);
if (err < 0)
@@ -189,23 +189,49 @@ static int pcm_open(struct snd_pcm_substream *substream)
if (err < 0)
goto err_locked;
- /*
- * When source of clock is not internal or any PCM streams are running,
- * available sampling rate is limited at current sampling rate.
- */
+ mutex_lock(&efw->mutex);
+
+ // When source of clock is not internal or any stream is reserved for
+ // transmission of PCM frames, the available sampling rate is limited
+ // at current one.
if ((clock_source != SND_EFW_CLOCK_SOURCE_INTERNAL) ||
- amdtp_stream_pcm_running(&efw->tx_stream) ||
- amdtp_stream_pcm_running(&efw->rx_stream)) {
+ (efw->substreams_counter > 0 && d->events_per_period > 0)) {
+ unsigned int frames_per_period = d->events_per_period;
+ unsigned int frames_per_buffer = d->events_per_buffer;
+ unsigned int sampling_rate;
+
err = snd_efw_command_get_sampling_rate(efw, &sampling_rate);
- if (err < 0)
+ if (err < 0) {
+ mutex_unlock(&efw->mutex);
goto err_locked;
+ }
substream->runtime->hw.rate_min = sampling_rate;
substream->runtime->hw.rate_max = sampling_rate;
+
+ if (frames_per_period > 0) {
+ err = snd_pcm_hw_constraint_minmax(substream->runtime,
+ SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
+ frames_per_period, frames_per_period);
+ if (err < 0) {
+ mutex_unlock(&efw->mutex);
+ goto err_locked;
+ }
+
+ err = snd_pcm_hw_constraint_minmax(substream->runtime,
+ SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
+ frames_per_buffer, frames_per_buffer);
+ if (err < 0) {
+ mutex_unlock(&efw->mutex);
+ goto err_locked;
+ }
+ }
}
+ mutex_unlock(&efw->mutex);
+
snd_pcm_set_sync(substream);
-end:
- return err;
+
+ return 0;
err_locked:
snd_efw_stream_lock_release(efw);
return err;
@@ -224,16 +250,18 @@ static int pcm_hw_params(struct snd_pcm_substream *substream,
struct snd_efw *efw = substream->private_data;
int err;
- err = snd_pcm_lib_alloc_vmalloc_buffer(substream,
- params_buffer_bytes(hw_params));
+ err = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params));
if (err < 0)
return err;
if (substream->runtime->status->state == SNDRV_PCM_STATE_OPEN) {
unsigned int rate = params_rate(hw_params);
+ unsigned int frames_per_period = params_period_size(hw_params);
+ unsigned int frames_per_buffer = params_buffer_size(hw_params);
mutex_lock(&efw->mutex);
- err = snd_efw_stream_reserve_duplex(efw, rate);
+ err = snd_efw_stream_reserve_duplex(efw, rate,
+ frames_per_period, frames_per_buffer);
if (err >= 0)
++efw->substreams_counter;
mutex_unlock(&efw->mutex);
@@ -255,7 +283,7 @@ static int pcm_hw_free(struct snd_pcm_substream *substream)
mutex_unlock(&efw->mutex);
- return snd_pcm_lib_free_vmalloc_buffer(substream);
+ return snd_pcm_lib_free_pages(substream);
}
static int pcm_capture_prepare(struct snd_pcm_substream *substream)
@@ -319,26 +347,28 @@ static int pcm_playback_trigger(struct snd_pcm_substream *substream, int cmd)
static snd_pcm_uframes_t pcm_capture_pointer(struct snd_pcm_substream *sbstrm)
{
struct snd_efw *efw = sbstrm->private_data;
- return amdtp_stream_pcm_pointer(&efw->tx_stream);
+
+ return amdtp_domain_stream_pcm_pointer(&efw->domain, &efw->tx_stream);
}
static snd_pcm_uframes_t pcm_playback_pointer(struct snd_pcm_substream *sbstrm)
{
struct snd_efw *efw = sbstrm->private_data;
- return amdtp_stream_pcm_pointer(&efw->rx_stream);
+
+ return amdtp_domain_stream_pcm_pointer(&efw->domain, &efw->rx_stream);
}
static int pcm_capture_ack(struct snd_pcm_substream *substream)
{
struct snd_efw *efw = substream->private_data;
- return amdtp_stream_pcm_ack(&efw->tx_stream);
+ return amdtp_domain_stream_pcm_ack(&efw->domain, &efw->tx_stream);
}
static int pcm_playback_ack(struct snd_pcm_substream *substream)
{
struct snd_efw *efw = substream->private_data;
- return amdtp_stream_pcm_ack(&efw->rx_stream);
+ return amdtp_domain_stream_pcm_ack(&efw->domain, &efw->rx_stream);
}
int snd_efw_create_pcm_devices(struct snd_efw *efw)
@@ -353,7 +383,6 @@ int snd_efw_create_pcm_devices(struct snd_efw *efw)
.trigger = pcm_capture_trigger,
.pointer = pcm_capture_pointer,
.ack = pcm_capture_ack,
- .page = snd_pcm_lib_get_vmalloc_page,
};
static const struct snd_pcm_ops playback_ops = {
.open = pcm_open,
@@ -365,7 +394,6 @@ int snd_efw_create_pcm_devices(struct snd_efw *efw)
.trigger = pcm_playback_trigger,
.pointer = pcm_playback_pointer,
.ack = pcm_playback_ack,
- .page = snd_pcm_lib_get_vmalloc_page,
};
struct snd_pcm *pcm;
int err;
@@ -378,6 +406,8 @@ int snd_efw_create_pcm_devices(struct snd_efw *efw)
snprintf(pcm->name, sizeof(pcm->name), "%s PCM", efw->card->shortname);
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &playback_ops);
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &capture_ops);
+ snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_VMALLOC,
+ NULL, 0, 0);
end:
return err;
}
diff --git a/sound/firewire/fireworks/fireworks_stream.c b/sound/firewire/fireworks/fireworks_stream.c
index f2de304d2f26..2206af0fef42 100644
--- a/sound/firewire/fireworks/fireworks_stream.c
+++ b/sound/firewire/fireworks/fireworks_stream.c
@@ -181,7 +181,9 @@ static int keep_resources(struct snd_efw *efw, struct amdtp_stream *stream,
return cmp_connection_reserve(conn, amdtp_stream_get_max_payload(stream));
}
-int snd_efw_stream_reserve_duplex(struct snd_efw *efw, unsigned int rate)
+int snd_efw_stream_reserve_duplex(struct snd_efw *efw, unsigned int rate,
+ unsigned int frames_per_period,
+ unsigned int frames_per_buffer)
{
unsigned int curr_rate;
int err;
@@ -228,6 +230,14 @@ int snd_efw_stream_reserve_duplex(struct snd_efw *efw, unsigned int rate)
cmp_connection_release(&efw->in_conn);
return err;
}
+
+ err = amdtp_domain_set_events_per_period(&efw->domain,
+ frames_per_period, frames_per_buffer);
+ if (err < 0) {
+ cmp_connection_release(&efw->in_conn);
+ cmp_connection_release(&efw->out_conn);
+ return err;
+ }
}
return 0;
@@ -262,7 +272,7 @@ int snd_efw_stream_start_duplex(struct snd_efw *efw)
if (err < 0)
goto error;
- err = amdtp_domain_start(&efw->domain);
+ err = amdtp_domain_start(&efw->domain, 0);
if (err < 0)
goto error;
diff --git a/sound/firewire/isight.c b/sound/firewire/isight.c
index a16beda7c530..d9f1b962bfef 100644
--- a/sound/firewire/isight.c
+++ b/sound/firewire/isight.c
@@ -288,8 +288,7 @@ static int isight_hw_params(struct snd_pcm_substream *substream,
struct isight *isight = substream->private_data;
int err;
- err = snd_pcm_lib_alloc_vmalloc_buffer(substream,
- params_buffer_bytes(hw_params));
+ err = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params));
if (err < 0)
return err;
@@ -337,7 +336,7 @@ static int isight_hw_free(struct snd_pcm_substream *substream)
isight_stop_streaming(isight);
mutex_unlock(&isight->mutex);
- return snd_pcm_lib_free_vmalloc_buffer(substream);
+ return snd_pcm_lib_free_pages(substream);
}
static int isight_start_streaming(struct isight *isight)
@@ -453,7 +452,6 @@ static int isight_create_pcm(struct isight *isight)
.prepare = isight_prepare,
.trigger = isight_trigger,
.pointer = isight_pointer,
- .page = snd_pcm_lib_get_vmalloc_page,
};
struct snd_pcm *pcm;
int err;
@@ -465,6 +463,8 @@ static int isight_create_pcm(struct isight *isight)
strcpy(pcm->name, "iSight");
isight->pcm = pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream;
isight->pcm->ops = &ops;
+ snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_VMALLOC,
+ NULL, 0, 0);
return 0;
}
diff --git a/sound/firewire/motu/motu-midi.c b/sound/firewire/motu/motu-midi.c
index 46a0035df31e..2365f7dfde26 100644
--- a/sound/firewire/motu/motu-midi.c
+++ b/sound/firewire/motu/motu-midi.c
@@ -17,7 +17,7 @@ static int midi_open(struct snd_rawmidi_substream *substream)
mutex_lock(&motu->mutex);
- err = snd_motu_stream_reserve_duplex(motu, 0);
+ err = snd_motu_stream_reserve_duplex(motu, 0, 0, 0);
if (err >= 0) {
++motu->substreams_counter;
err = snd_motu_stream_start_duplex(motu);
diff --git a/sound/firewire/motu/motu-pcm.c b/sound/firewire/motu/motu-pcm.c
index aa2e584da6fe..349b4d09e84f 100644
--- a/sound/firewire/motu/motu-pcm.c
+++ b/sound/firewire/motu/motu-pcm.c
@@ -134,8 +134,8 @@ static int pcm_open(struct snd_pcm_substream *substream)
{
struct snd_motu *motu = substream->private_data;
const struct snd_motu_protocol *const protocol = motu->spec->protocol;
+ struct amdtp_domain *d = &motu->domain;
enum snd_motu_clock_source src;
- unsigned int rate;
int err;
err = snd_motu_stream_lock_try(motu);
@@ -152,28 +152,51 @@ static int pcm_open(struct snd_pcm_substream *substream)
if (err < 0)
goto err_locked;
- /*
- * When source of clock is not internal or any PCM streams are running,
- * available sampling rate is limited at current sampling rate.
- */
err = protocol->get_clock_source(motu, &src);
if (err < 0)
goto err_locked;
- if (src != SND_MOTU_CLOCK_SOURCE_INTERNAL ||
- amdtp_stream_pcm_running(&motu->tx_stream) ||
- amdtp_stream_pcm_running(&motu->rx_stream)) {
+
+ // When source of clock is not internal or any stream is reserved for
+ // transmission of PCM frames, the available sampling rate is limited
+ // at current one.
+ if ((src != SND_MOTU_CLOCK_SOURCE_INTERNAL &&
+ src != SND_MOTU_CLOCK_SOURCE_SPH) ||
+ (motu->substreams_counter > 0 && d->events_per_period > 0)) {
+ unsigned int frames_per_period = d->events_per_period;
+ unsigned int frames_per_buffer = d->events_per_buffer;
+ unsigned int rate;
+
err = protocol->get_clock_rate(motu, &rate);
if (err < 0)
goto err_locked;
+
substream->runtime->hw.rate_min = rate;
substream->runtime->hw.rate_max = rate;
+
+ if (frames_per_period > 0) {
+ err = snd_pcm_hw_constraint_minmax(substream->runtime,
+ SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
+ frames_per_period, frames_per_period);
+ if (err < 0) {
+ mutex_unlock(&motu->mutex);
+ goto err_locked;
+ }
+
+ err = snd_pcm_hw_constraint_minmax(substream->runtime,
+ SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
+ frames_per_buffer, frames_per_buffer);
+ if (err < 0) {
+ mutex_unlock(&motu->mutex);
+ goto err_locked;
+ }
+ }
}
snd_pcm_set_sync(substream);
mutex_unlock(&motu->mutex);
- return err;
+ return 0;
err_locked:
mutex_unlock(&motu->mutex);
snd_motu_stream_lock_release(motu);
@@ -195,16 +218,18 @@ static int pcm_hw_params(struct snd_pcm_substream *substream,
struct snd_motu *motu = substream->private_data;
int err;
- err = snd_pcm_lib_alloc_vmalloc_buffer(substream,
- params_buffer_bytes(hw_params));
+ err = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params));
if (err < 0)
return err;
if (substream->runtime->status->state == SNDRV_PCM_STATE_OPEN) {
unsigned int rate = params_rate(hw_params);
+ unsigned int frames_per_period = params_period_size(hw_params);
+ unsigned int frames_per_buffer = params_buffer_size(hw_params);
mutex_lock(&motu->mutex);
- err = snd_motu_stream_reserve_duplex(motu, rate);
+ err = snd_motu_stream_reserve_duplex(motu, rate,
+ frames_per_period, frames_per_buffer);
if (err >= 0)
++motu->substreams_counter;
mutex_unlock(&motu->mutex);
@@ -226,7 +251,7 @@ static int pcm_hw_free(struct snd_pcm_substream *substream)
mutex_unlock(&motu->mutex);
- return snd_pcm_lib_free_vmalloc_buffer(substream);
+ return snd_pcm_lib_free_pages(substream);
}
static int capture_prepare(struct snd_pcm_substream *substream)
@@ -295,27 +320,27 @@ static snd_pcm_uframes_t capture_pointer(struct snd_pcm_substream *substream)
{
struct snd_motu *motu = substream->private_data;
- return amdtp_stream_pcm_pointer(&motu->tx_stream);
+ return amdtp_domain_stream_pcm_pointer(&motu->domain, &motu->tx_stream);
}
static snd_pcm_uframes_t playback_pointer(struct snd_pcm_substream *substream)
{
struct snd_motu *motu = substream->private_data;
- return amdtp_stream_pcm_pointer(&motu->rx_stream);
+ return amdtp_domain_stream_pcm_pointer(&motu->domain, &motu->rx_stream);
}
static int capture_ack(struct snd_pcm_substream *substream)
{
struct snd_motu *motu = substream->private_data;
- return amdtp_stream_pcm_ack(&motu->tx_stream);
+ return amdtp_domain_stream_pcm_ack(&motu->domain, &motu->tx_stream);
}
static int playback_ack(struct snd_pcm_substream *substream)
{
struct snd_motu *motu = substream->private_data;
- return amdtp_stream_pcm_ack(&motu->rx_stream);
+ return amdtp_domain_stream_pcm_ack(&motu->domain, &motu->rx_stream);
}
int snd_motu_create_pcm_devices(struct snd_motu *motu)
@@ -330,7 +355,6 @@ int snd_motu_create_pcm_devices(struct snd_motu *motu)
.trigger = capture_trigger,
.pointer = capture_pointer,
.ack = capture_ack,
- .page = snd_pcm_lib_get_vmalloc_page,
};
static const struct snd_pcm_ops playback_ops = {
.open = pcm_open,
@@ -342,7 +366,6 @@ int snd_motu_create_pcm_devices(struct snd_motu *motu)
.trigger = playback_trigger,
.pointer = playback_pointer,
.ack = playback_ack,
- .page = snd_pcm_lib_get_vmalloc_page,
};
struct snd_pcm *pcm;
int err;
@@ -355,6 +378,8 @@ int snd_motu_create_pcm_devices(struct snd_motu *motu)
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &capture_ops);
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &playback_ops);
+ snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_VMALLOC,
+ NULL, 0, 0);
return 0;
}
diff --git a/sound/firewire/motu/motu-proc.c b/sound/firewire/motu/motu-proc.c
index ea46fb4c1b5a..187f6abd878c 100644
--- a/sound/firewire/motu/motu-proc.c
+++ b/sound/firewire/motu/motu-proc.c
@@ -16,9 +16,11 @@ static const char *const clock_names[] = {
[SND_MOTU_CLOCK_SOURCE_SPDIF_ON_OPT] = "S/PDIF on optical interface",
[SND_MOTU_CLOCK_SOURCE_SPDIF_ON_OPT_A] = "S/PDIF on optical interface A",
[SND_MOTU_CLOCK_SOURCE_SPDIF_ON_OPT_B] = "S/PDIF on optical interface B",
- [SND_MOTU_CLOCK_SOURCE_SPDIF_ON_COAX] = "S/PCIF on coaxial interface",
+ [SND_MOTU_CLOCK_SOURCE_SPDIF_ON_COAX] = "S/PDIF on coaxial interface",
[SND_MOTU_CLOCK_SOURCE_AESEBU_ON_XLR] = "AESEBU on XLR interface",
[SND_MOTU_CLOCK_SOURCE_WORD_ON_BNC] = "Word clock on BNC interface",
+ [SND_MOTU_CLOCK_SOURCE_SPH] = "Source packet header",
+ [SND_MOTU_CLOCK_SOURCE_UNKNOWN] = "Unknown",
};
static void proc_read_clock(struct snd_info_entry *entry,
diff --git a/sound/firewire/motu/motu-protocol-v2.c b/sound/firewire/motu/motu-protocol-v2.c
index 9e2f16eebe0a..619b6ae73f62 100644
--- a/sound/firewire/motu/motu-protocol-v2.c
+++ b/sound/firewire/motu/motu-protocol-v2.c
@@ -12,10 +12,8 @@
#define V2_CLOCK_RATE_SHIFT 3
#define V2_CLOCK_SRC_MASK 0x00000007
#define V2_CLOCK_SRC_SHIFT 0
-#define V2_CLOCK_TRAVELER_FETCH_DISABLE 0x04000000
-#define V2_CLOCK_TRAVELER_FETCH_ENABLE 0x03000000
-#define V2_CLOCK_8PRE_FETCH_DISABLE 0x02000000
-#define V2_CLOCK_8PRE_FETCH_ENABLE 0x00000000
+#define V2_CLOCK_FETCH_ENABLE 0x02000000
+#define V2_CLOCK_MODEL_SPECIFIC 0x04000000
#define V2_IN_OUT_CONF_OFFSET 0x0c04
#define V2_OPT_OUT_IFACE_MASK 0x00000c00
@@ -26,10 +24,20 @@
#define V2_OPT_IFACE_MODE_ADAT 1
#define V2_OPT_IFACE_MODE_SPDIF 2
+static int get_clock_rate(u32 data, unsigned int *rate)
+{
+ unsigned int index = (data & V2_CLOCK_RATE_MASK) >> V2_CLOCK_RATE_SHIFT;
+ if (index >= ARRAY_SIZE(snd_motu_clock_rates))
+ return -EIO;
+
+ *rate = snd_motu_clock_rates[index];
+
+ return 0;
+}
+
static int v2_get_clock_rate(struct snd_motu *motu, unsigned int *rate)
{
__be32 reg;
- unsigned int index;
int err;
err = snd_motu_transaction_read(motu, V2_CLOCK_STATUS_OFFSET, &reg,
@@ -37,13 +45,7 @@ static int v2_get_clock_rate(struct snd_motu *motu, unsigned int *rate)
if (err < 0)
return err;
- index = (be32_to_cpu(reg) & V2_CLOCK_RATE_MASK) >> V2_CLOCK_RATE_SHIFT;
- if (index >= ARRAY_SIZE(snd_motu_clock_rates))
- return -EIO;
-
- *rate = snd_motu_clock_rates[index];
-
- return 0;
+ return get_clock_rate(be32_to_cpu(reg), rate);
}
static int v2_set_clock_rate(struct snd_motu *motu, unsigned int rate)
@@ -69,51 +71,44 @@ static int v2_set_clock_rate(struct snd_motu *motu, unsigned int rate)
data &= ~V2_CLOCK_RATE_MASK;
data |= i << V2_CLOCK_RATE_SHIFT;
- if (motu->spec == &snd_motu_spec_traveler) {
- data &= ~V2_CLOCK_TRAVELER_FETCH_ENABLE;
- data |= V2_CLOCK_TRAVELER_FETCH_DISABLE;
- }
-
reg = cpu_to_be32(data);
return snd_motu_transaction_write(motu, V2_CLOCK_STATUS_OFFSET, &reg,
sizeof(reg));
}
-static int v2_get_clock_source(struct snd_motu *motu,
- enum snd_motu_clock_source *src)
+static int get_clock_source(struct snd_motu *motu, u32 data,
+ enum snd_motu_clock_source *src)
{
- __be32 reg;
- unsigned int index;
- int err;
-
- err = snd_motu_transaction_read(motu, V2_CLOCK_STATUS_OFFSET, &reg,
- sizeof(reg));
- if (err < 0)
- return err;
-
- index = be32_to_cpu(reg) & V2_CLOCK_SRC_MASK;
+ unsigned int index = data & V2_CLOCK_SRC_MASK;
if (index > 5)
return -EIO;
- /* To check the configuration of optical interface. */
- err = snd_motu_transaction_read(motu, V2_IN_OUT_CONF_OFFSET, &reg,
- sizeof(reg));
- if (err < 0)
- return err;
-
switch (index) {
case 0:
*src = SND_MOTU_CLOCK_SOURCE_INTERNAL;
break;
case 1:
+ {
+ __be32 reg;
+
+ // To check the configuration of optical interface.
+ int err = snd_motu_transaction_read(motu, V2_IN_OUT_CONF_OFFSET,
+ &reg, sizeof(reg));
+ if (err < 0)
+ return err;
+
if (be32_to_cpu(reg) & 0x00000200)
*src = SND_MOTU_CLOCK_SOURCE_SPDIF_ON_OPT;
else
*src = SND_MOTU_CLOCK_SOURCE_ADAT_ON_OPT;
break;
+ }
case 2:
*src = SND_MOTU_CLOCK_SOURCE_SPDIF_ON_COAX;
break;
+ case 3:
+ *src = SND_MOTU_CLOCK_SOURCE_SPH;
+ break;
case 4:
*src = SND_MOTU_CLOCK_SOURCE_WORD_ON_BNC;
break;
@@ -127,44 +122,65 @@ static int v2_get_clock_source(struct snd_motu *motu,
return 0;
}
+static int v2_get_clock_source(struct snd_motu *motu,
+ enum snd_motu_clock_source *src)
+{
+ __be32 reg;
+ int err;
+
+ err = snd_motu_transaction_read(motu, V2_CLOCK_STATUS_OFFSET, &reg,
+ sizeof(reg));
+ if (err < 0)
+ return err;
+
+ return get_clock_source(motu, be32_to_cpu(reg), src);
+}
+
static int v2_switch_fetching_mode(struct snd_motu *motu, bool enable)
{
+ enum snd_motu_clock_source src;
__be32 reg;
u32 data;
int err = 0;
- if (motu->spec == &snd_motu_spec_traveler ||
- motu->spec == &snd_motu_spec_8pre) {
- err = snd_motu_transaction_read(motu, V2_CLOCK_STATUS_OFFSET,
- &reg, sizeof(reg));
+ // 828mkII implements Altera ACEX 1K EP1K30. Nothing to do.
+ if (motu->spec == &snd_motu_spec_828mk2)
+ return 0;
+
+ err = snd_motu_transaction_read(motu, V2_CLOCK_STATUS_OFFSET, &reg,
+ sizeof(reg));
+ if (err < 0)
+ return err;
+ data = be32_to_cpu(reg);
+
+ err = get_clock_source(motu, data, &src);
+ if (err < 0)
+ return err;
+
+ data &= ~(V2_CLOCK_FETCH_ENABLE | V2_CLOCK_MODEL_SPECIFIC);
+ if (enable)
+ data |= V2_CLOCK_FETCH_ENABLE;
+
+ if (motu->spec->flags & SND_MOTU_SPEC_SUPPORT_CLOCK_X4) {
+ // Expected for Traveler and 896HD, which implements Altera
+ // Cyclone EP1C3.
+ data |= V2_CLOCK_MODEL_SPECIFIC;
+ } else {
+ // For UltraLite and 8pre, which implements Xilinx Spartan
+ // XC3S200.
+ unsigned int rate;
+
+ err = get_clock_rate(data, &rate);
if (err < 0)
return err;
- data = be32_to_cpu(reg);
-
- if (motu->spec == &snd_motu_spec_traveler) {
- data &= ~(V2_CLOCK_TRAVELER_FETCH_DISABLE |
- V2_CLOCK_TRAVELER_FETCH_ENABLE);
-
- if (enable)
- data |= V2_CLOCK_TRAVELER_FETCH_ENABLE;
- else
- data |= V2_CLOCK_TRAVELER_FETCH_DISABLE;
- } else if (motu->spec == &snd_motu_spec_8pre) {
- data &= ~(V2_CLOCK_8PRE_FETCH_DISABLE |
- V2_CLOCK_8PRE_FETCH_ENABLE);
-
- if (enable)
- data |= V2_CLOCK_8PRE_FETCH_DISABLE;
- else
- data |= V2_CLOCK_8PRE_FETCH_ENABLE;
- }
- reg = cpu_to_be32(data);
- err = snd_motu_transaction_write(motu, V2_CLOCK_STATUS_OFFSET,
- &reg, sizeof(reg));
+ if (src == SND_MOTU_CLOCK_SOURCE_SPH && rate > 48000)
+ data |= V2_CLOCK_MODEL_SPECIFIC;
}
- return err;
+ reg = cpu_to_be32(data);
+ return snd_motu_transaction_write(motu, V2_CLOCK_STATUS_OFFSET, &reg,
+ sizeof(reg));
}
static void calculate_fixed_part(struct snd_motu_packet_format *formats,
@@ -191,7 +207,7 @@ static void calculate_fixed_part(struct snd_motu_packet_format *formats,
pcm_chunks[1] += 2;
}
} else {
- if (flags & SND_MOTU_SPEC_RX_SEPARETED_MAIN) {
+ if (flags & SND_MOTU_SPEC_RX_SEPARATED_MAIN) {
pcm_chunks[0] += 2;
pcm_chunks[1] += 2;
}
diff --git a/sound/firewire/motu/motu-protocol-v3.c b/sound/firewire/motu/motu-protocol-v3.c
index 5eafa506e8a9..d1545e2b5caa 100644
--- a/sound/firewire/motu/motu-protocol-v3.c
+++ b/sound/firewire/motu/motu-protocol-v3.c
@@ -104,6 +104,8 @@ static int v3_get_clock_source(struct snd_motu *motu,
*src = SND_MOTU_CLOCK_SOURCE_INTERNAL;
} else if (val == 0x01) {
*src = SND_MOTU_CLOCK_SOURCE_WORD_ON_BNC;
+ } else if (val == 0x02) {
+ *src = SND_MOTU_CLOCK_SOURCE_SPH;
} else if (val == 0x10) {
*src = SND_MOTU_CLOCK_SOURCE_SPDIF_ON_COAX;
} else if (val == 0x18 || val == 0x19) {
@@ -187,7 +189,7 @@ static void calculate_fixed_part(struct snd_motu_packet_format *formats,
pcm_chunks[1] += 2;
}
} else {
- if (flags & SND_MOTU_SPEC_RX_SEPARETED_MAIN) {
+ if (flags & SND_MOTU_SPEC_RX_SEPARATED_MAIN) {
pcm_chunks[0] += 2;
pcm_chunks[1] += 2;
}
diff --git a/sound/firewire/motu/motu-stream.c b/sound/firewire/motu/motu-stream.c
index 813e38e6a86e..a17ddceb1bec 100644
--- a/sound/firewire/motu/motu-stream.c
+++ b/sound/firewire/motu/motu-stream.c
@@ -133,7 +133,9 @@ int snd_motu_stream_cache_packet_formats(struct snd_motu *motu)
return 0;
}
-int snd_motu_stream_reserve_duplex(struct snd_motu *motu, unsigned int rate)
+int snd_motu_stream_reserve_duplex(struct snd_motu *motu, unsigned int rate,
+ unsigned int frames_per_period,
+ unsigned int frames_per_buffer)
{
unsigned int curr_rate;
int err;
@@ -171,6 +173,14 @@ int snd_motu_stream_reserve_duplex(struct snd_motu *motu, unsigned int rate)
fw_iso_resources_free(&motu->tx_resources);
return err;
}
+
+ err = amdtp_domain_set_events_per_period(&motu->domain,
+ frames_per_period, frames_per_buffer);
+ if (err < 0) {
+ fw_iso_resources_free(&motu->tx_resources);
+ fw_iso_resources_free(&motu->rx_resources);
+ return err;
+ }
}
return 0;
@@ -250,7 +260,7 @@ int snd_motu_stream_start_duplex(struct snd_motu *motu)
if (err < 0)
goto stop_streams;
- err = amdtp_domain_start(&motu->domain);
+ err = amdtp_domain_start(&motu->domain, 0);
if (err < 0)
goto stop_streams;
diff --git a/sound/firewire/motu/motu.c b/sound/firewire/motu/motu.c
index 72908b4de77c..f2080d720aa9 100644
--- a/sound/firewire/motu/motu.c
+++ b/sound/firewire/motu/motu.c
@@ -172,13 +172,13 @@ static void motu_bus_update(struct fw_unit *unit)
snd_motu_transaction_reregister(motu);
}
-static const struct snd_motu_spec motu_828mk2 = {
+const struct snd_motu_spec snd_motu_spec_828mk2 = {
.name = "828mk2",
.protocol = &snd_motu_protocol_v2,
.flags = SND_MOTU_SPEC_SUPPORT_CLOCK_X2 |
SND_MOTU_SPEC_TX_MICINST_CHUNK |
SND_MOTU_SPEC_TX_RETURN_CHUNK |
- SND_MOTU_SPEC_RX_SEPARETED_MAIN |
+ SND_MOTU_SPEC_RX_SEPARATED_MAIN |
SND_MOTU_SPEC_HAS_OPT_IFACE_A |
SND_MOTU_SPEC_RX_MIDI_2ND_Q |
SND_MOTU_SPEC_TX_MIDI_2ND_Q,
@@ -187,7 +187,7 @@ static const struct snd_motu_spec motu_828mk2 = {
.analog_out_ports = 8,
};
-const struct snd_motu_spec snd_motu_spec_traveler = {
+static const struct snd_motu_spec motu_traveler = {
.name = "Traveler",
.protocol = &snd_motu_protocol_v2,
.flags = SND_MOTU_SPEC_SUPPORT_CLOCK_X2 |
@@ -202,7 +202,20 @@ const struct snd_motu_spec snd_motu_spec_traveler = {
.analog_out_ports = 8,
};
-const struct snd_motu_spec snd_motu_spec_8pre = {
+static const struct snd_motu_spec motu_ultralite = {
+ .name = "UltraLite",
+ .protocol = &snd_motu_protocol_v2,
+ .flags = SND_MOTU_SPEC_SUPPORT_CLOCK_X2 |
+ SND_MOTU_SPEC_TX_MICINST_CHUNK | // padding.
+ SND_MOTU_SPEC_TX_RETURN_CHUNK |
+ SND_MOTU_SPEC_RX_MIDI_2ND_Q |
+ SND_MOTU_SPEC_TX_MIDI_2ND_Q |
+ SND_MOTU_SPEC_RX_SEPARATED_MAIN,
+ .analog_in_ports = 8,
+ .analog_out_ports = 8,
+};
+
+static const struct snd_motu_spec motu_8pre = {
.name = "8pre",
.protocol = &snd_motu_protocol_v2,
// In tx, use coax chunks for mix-return 1/2. In rx, use coax chunks for
@@ -224,7 +237,7 @@ static const struct snd_motu_spec motu_828mk3 = {
SND_MOTU_SPEC_TX_MICINST_CHUNK |
SND_MOTU_SPEC_TX_RETURN_CHUNK |
SND_MOTU_SPEC_TX_REVERB_CHUNK |
- SND_MOTU_SPEC_RX_SEPARETED_MAIN |
+ SND_MOTU_SPEC_RX_SEPARATED_MAIN |
SND_MOTU_SPEC_HAS_OPT_IFACE_A |
SND_MOTU_SPEC_HAS_OPT_IFACE_B |
SND_MOTU_SPEC_RX_MIDI_3RD_Q |
@@ -240,7 +253,7 @@ static const struct snd_motu_spec motu_audio_express = {
.flags = SND_MOTU_SPEC_SUPPORT_CLOCK_X2 |
SND_MOTU_SPEC_TX_MICINST_CHUNK |
SND_MOTU_SPEC_TX_RETURN_CHUNK |
- SND_MOTU_SPEC_RX_SEPARETED_MAIN |
+ SND_MOTU_SPEC_RX_SEPARATED_MAIN |
SND_MOTU_SPEC_RX_MIDI_2ND_Q |
SND_MOTU_SPEC_TX_MIDI_3RD_Q,
.analog_in_ports = 2,
@@ -253,7 +266,7 @@ static const struct snd_motu_spec motu_4pre = {
.flags = SND_MOTU_SPEC_SUPPORT_CLOCK_X2 |
SND_MOTU_SPEC_TX_MICINST_CHUNK |
SND_MOTU_SPEC_TX_RETURN_CHUNK |
- SND_MOTU_SPEC_RX_SEPARETED_MAIN,
+ SND_MOTU_SPEC_RX_SEPARATED_MAIN,
.analog_in_ports = 2,
.analog_out_ports = 2,
};
@@ -270,9 +283,10 @@ static const struct snd_motu_spec motu_4pre = {
}
static const struct ieee1394_device_id motu_id_table[] = {
- SND_MOTU_DEV_ENTRY(0x000003, &motu_828mk2),
- SND_MOTU_DEV_ENTRY(0x000009, &snd_motu_spec_traveler),
- SND_MOTU_DEV_ENTRY(0x00000f, &snd_motu_spec_8pre),
+ SND_MOTU_DEV_ENTRY(0x000003, &snd_motu_spec_828mk2),
+ SND_MOTU_DEV_ENTRY(0x000009, &motu_traveler),
+ SND_MOTU_DEV_ENTRY(0x00000d, &motu_ultralite),
+ SND_MOTU_DEV_ENTRY(0x00000f, &motu_8pre),
SND_MOTU_DEV_ENTRY(0x000015, &motu_828mk3), /* FireWire only. */
SND_MOTU_DEV_ENTRY(0x000035, &motu_828mk3), /* Hybrid. */
SND_MOTU_DEV_ENTRY(0x000033, &motu_audio_express),
diff --git a/sound/firewire/motu/motu.h b/sound/firewire/motu/motu.h
index 350ee2c16f4a..6efbde405a0d 100644
--- a/sound/firewire/motu/motu.h
+++ b/sound/firewire/motu/motu.h
@@ -86,7 +86,7 @@ enum snd_motu_spec_flags {
SND_MOTU_SPEC_RX_MIDI_3RD_Q = 0x0200,
SND_MOTU_SPEC_TX_MIDI_2ND_Q = 0x0400,
SND_MOTU_SPEC_TX_MIDI_3RD_Q = 0x0800,
- SND_MOTU_SPEC_RX_SEPARETED_MAIN = 0x1000,
+ SND_MOTU_SPEC_RX_SEPARATED_MAIN = 0x1000,
};
#define SND_MOTU_CLOCK_RATE_COUNT 6
@@ -104,6 +104,7 @@ enum snd_motu_clock_source {
SND_MOTU_CLOCK_SOURCE_SPDIF_ON_COAX,
SND_MOTU_CLOCK_SOURCE_AESEBU_ON_XLR,
SND_MOTU_CLOCK_SOURCE_WORD_ON_BNC,
+ SND_MOTU_CLOCK_SOURCE_SPH,
SND_MOTU_CLOCK_SOURCE_UNKNOWN,
};
@@ -129,8 +130,7 @@ struct snd_motu_spec {
extern const struct snd_motu_protocol snd_motu_protocol_v2;
extern const struct snd_motu_protocol snd_motu_protocol_v3;
-extern const struct snd_motu_spec snd_motu_spec_traveler;
-extern const struct snd_motu_spec snd_motu_spec_8pre;
+extern const struct snd_motu_spec snd_motu_spec_828mk2;
int amdtp_motu_init(struct amdtp_stream *s, struct fw_unit *unit,
enum amdtp_stream_direction dir,
@@ -154,7 +154,9 @@ void snd_motu_transaction_unregister(struct snd_motu *motu);
int snd_motu_stream_init_duplex(struct snd_motu *motu);
void snd_motu_stream_destroy_duplex(struct snd_motu *motu);
int snd_motu_stream_cache_packet_formats(struct snd_motu *motu);
-int snd_motu_stream_reserve_duplex(struct snd_motu *motu, unsigned int rate);
+int snd_motu_stream_reserve_duplex(struct snd_motu *motu, unsigned int rate,
+ unsigned int frames_per_period,
+ unsigned int frames_per_buffer);
int snd_motu_stream_start_duplex(struct snd_motu *motu);
void snd_motu_stream_stop_duplex(struct snd_motu *motu);
int snd_motu_stream_lock_try(struct snd_motu *motu);
diff --git a/sound/firewire/oxfw/oxfw-midi.c b/sound/firewire/oxfw/oxfw-midi.c
index 9bdec08cb8ea..775cba3f1f02 100644
--- a/sound/firewire/oxfw/oxfw-midi.c
+++ b/sound/firewire/oxfw/oxfw-midi.c
@@ -18,7 +18,7 @@ static int midi_capture_open(struct snd_rawmidi_substream *substream)
mutex_lock(&oxfw->mutex);
- err = snd_oxfw_stream_reserve_duplex(oxfw, &oxfw->tx_stream, 0, 0);
+ err = snd_oxfw_stream_reserve_duplex(oxfw, &oxfw->tx_stream, 0, 0, 0, 0);
if (err >= 0) {
++oxfw->substreams_count;
err = snd_oxfw_stream_start_duplex(oxfw);
@@ -45,7 +45,7 @@ static int midi_playback_open(struct snd_rawmidi_substream *substream)
mutex_lock(&oxfw->mutex);
- err = snd_oxfw_stream_reserve_duplex(oxfw, &oxfw->rx_stream, 0, 0);
+ err = snd_oxfw_stream_reserve_duplex(oxfw, &oxfw->rx_stream, 0, 0, 0, 0);
if (err >= 0) {
++oxfw->substreams_count;
err = snd_oxfw_stream_start_duplex(oxfw);
diff --git a/sound/firewire/oxfw/oxfw-pcm.c b/sound/firewire/oxfw/oxfw-pcm.c
index 7c6d1c277d4d..9124603edabe 100644
--- a/sound/firewire/oxfw/oxfw-pcm.c
+++ b/sound/firewire/oxfw/oxfw-pcm.c
@@ -170,30 +170,56 @@ end:
static int pcm_open(struct snd_pcm_substream *substream)
{
struct snd_oxfw *oxfw = substream->private_data;
+ struct amdtp_domain *d = &oxfw->domain;
int err;
err = snd_oxfw_stream_lock_try(oxfw);
if (err < 0)
- goto end;
+ return err;
err = init_hw_params(oxfw, substream);
if (err < 0)
goto err_locked;
- /*
- * When any PCM streams are already running, the available sampling
- * rate is limited at current value.
- */
- if (amdtp_stream_pcm_running(&oxfw->tx_stream) ||
- amdtp_stream_pcm_running(&oxfw->rx_stream)) {
+ mutex_lock(&oxfw->mutex);
+
+ // When source of clock is not internal or any stream is reserved for
+ // transmission of PCM frames, the available sampling rate is limited
+ // at current one.
+ if (oxfw->substreams_count > 0 && d->events_per_period > 0) {
+ unsigned int frames_per_period = d->events_per_period;
+ unsigned int frames_per_buffer = d->events_per_buffer;
+
err = limit_to_current_params(substream);
- if (err < 0)
- goto end;
+ if (err < 0) {
+ mutex_unlock(&oxfw->mutex);
+ goto err_locked;
+ }
+
+ if (frames_per_period > 0) {
+ err = snd_pcm_hw_constraint_minmax(substream->runtime,
+ SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
+ frames_per_period, frames_per_period);
+ if (err < 0) {
+ mutex_unlock(&oxfw->mutex);
+ goto err_locked;
+ }
+
+ err = snd_pcm_hw_constraint_minmax(substream->runtime,
+ SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
+ frames_per_buffer, frames_per_buffer);
+ if (err < 0) {
+ mutex_unlock(&oxfw->mutex);
+ goto err_locked;
+ }
+ }
}
+ mutex_unlock(&oxfw->mutex);
+
snd_pcm_set_sync(substream);
-end:
- return err;
+
+ return 0;
err_locked:
snd_oxfw_stream_lock_release(oxfw);
return err;
@@ -213,18 +239,20 @@ static int pcm_capture_hw_params(struct snd_pcm_substream *substream,
struct snd_oxfw *oxfw = substream->private_data;
int err;
- err = snd_pcm_lib_alloc_vmalloc_buffer(substream,
- params_buffer_bytes(hw_params));
+ err = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params));
if (err < 0)
return err;
if (substream->runtime->status->state == SNDRV_PCM_STATE_OPEN) {
unsigned int rate = params_rate(hw_params);
unsigned int channels = params_channels(hw_params);
+ unsigned int frames_per_period = params_period_size(hw_params);
+ unsigned int frames_per_buffer = params_buffer_size(hw_params);
mutex_lock(&oxfw->mutex);
err = snd_oxfw_stream_reserve_duplex(oxfw, &oxfw->tx_stream,
- rate, channels);
+ rate, channels, frames_per_period,
+ frames_per_buffer);
if (err >= 0)
++oxfw->substreams_count;
mutex_unlock(&oxfw->mutex);
@@ -238,18 +266,20 @@ static int pcm_playback_hw_params(struct snd_pcm_substream *substream,
struct snd_oxfw *oxfw = substream->private_data;
int err;
- err = snd_pcm_lib_alloc_vmalloc_buffer(substream,
- params_buffer_bytes(hw_params));
+ err = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params));
if (err < 0)
return err;
if (substream->runtime->status->state == SNDRV_PCM_STATE_OPEN) {
unsigned int rate = params_rate(hw_params);
unsigned int channels = params_channels(hw_params);
+ unsigned int frames_per_period = params_period_size(hw_params);
+ unsigned int frames_per_buffer = params_buffer_size(hw_params);
mutex_lock(&oxfw->mutex);
err = snd_oxfw_stream_reserve_duplex(oxfw, &oxfw->rx_stream,
- rate, channels);
+ rate, channels, frames_per_period,
+ frames_per_buffer);
if (err >= 0)
++oxfw->substreams_count;
mutex_unlock(&oxfw->mutex);
@@ -271,7 +301,7 @@ static int pcm_capture_hw_free(struct snd_pcm_substream *substream)
mutex_unlock(&oxfw->mutex);
- return snd_pcm_lib_free_vmalloc_buffer(substream);
+ return snd_pcm_lib_free_pages(substream);
}
static int pcm_playback_hw_free(struct snd_pcm_substream *substream)
{
@@ -286,7 +316,7 @@ static int pcm_playback_hw_free(struct snd_pcm_substream *substream)
mutex_unlock(&oxfw->mutex);
- return snd_pcm_lib_free_vmalloc_buffer(substream);
+ return snd_pcm_lib_free_pages(substream);
}
static int pcm_capture_prepare(struct snd_pcm_substream *substream)
@@ -361,27 +391,27 @@ static snd_pcm_uframes_t pcm_capture_pointer(struct snd_pcm_substream *sbstm)
{
struct snd_oxfw *oxfw = sbstm->private_data;
- return amdtp_stream_pcm_pointer(&oxfw->tx_stream);
+ return amdtp_domain_stream_pcm_pointer(&oxfw->domain, &oxfw->tx_stream);
}
static snd_pcm_uframes_t pcm_playback_pointer(struct snd_pcm_substream *sbstm)
{
struct snd_oxfw *oxfw = sbstm->private_data;
- return amdtp_stream_pcm_pointer(&oxfw->rx_stream);
+ return amdtp_domain_stream_pcm_pointer(&oxfw->domain, &oxfw->rx_stream);
}
static int pcm_capture_ack(struct snd_pcm_substream *substream)
{
struct snd_oxfw *oxfw = substream->private_data;
- return amdtp_stream_pcm_ack(&oxfw->tx_stream);
+ return amdtp_domain_stream_pcm_ack(&oxfw->domain, &oxfw->tx_stream);
}
static int pcm_playback_ack(struct snd_pcm_substream *substream)
{
struct snd_oxfw *oxfw = substream->private_data;
- return amdtp_stream_pcm_ack(&oxfw->rx_stream);
+ return amdtp_domain_stream_pcm_ack(&oxfw->domain, &oxfw->rx_stream);
}
int snd_oxfw_create_pcm(struct snd_oxfw *oxfw)
@@ -396,7 +426,6 @@ int snd_oxfw_create_pcm(struct snd_oxfw *oxfw)
.trigger = pcm_capture_trigger,
.pointer = pcm_capture_pointer,
.ack = pcm_capture_ack,
- .page = snd_pcm_lib_get_vmalloc_page,
};
static const struct snd_pcm_ops playback_ops = {
.open = pcm_open,
@@ -408,7 +437,6 @@ int snd_oxfw_create_pcm(struct snd_oxfw *oxfw)
.trigger = pcm_playback_trigger,
.pointer = pcm_playback_pointer,
.ack = pcm_playback_ack,
- .page = snd_pcm_lib_get_vmalloc_page,
};
struct snd_pcm *pcm;
unsigned int cap = 0;
@@ -426,6 +454,8 @@ int snd_oxfw_create_pcm(struct snd_oxfw *oxfw)
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &playback_ops);
if (cap > 0)
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &capture_ops);
+ snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_VMALLOC,
+ NULL, 0, 0);
return 0;
}
diff --git a/sound/firewire/oxfw/oxfw-stream.c b/sound/firewire/oxfw/oxfw-stream.c
index 3c9a796b6526..501a80094bf7 100644
--- a/sound/firewire/oxfw/oxfw-stream.c
+++ b/sound/firewire/oxfw/oxfw-stream.c
@@ -244,7 +244,9 @@ static int keep_resources(struct snd_oxfw *oxfw, struct amdtp_stream *stream)
int snd_oxfw_stream_reserve_duplex(struct snd_oxfw *oxfw,
struct amdtp_stream *stream,
- unsigned int rate, unsigned int pcm_channels)
+ unsigned int rate, unsigned int pcm_channels,
+ unsigned int frames_per_period,
+ unsigned int frames_per_buffer)
{
struct snd_oxfw_stream_formation formation;
enum avc_general_plug_dir dir;
@@ -305,6 +307,15 @@ int snd_oxfw_stream_reserve_duplex(struct snd_oxfw *oxfw,
return err;
}
}
+
+ err = amdtp_domain_set_events_per_period(&oxfw->domain,
+ frames_per_period, frames_per_buffer);
+ if (err < 0) {
+ cmp_connection_release(&oxfw->in_conn);
+ if (oxfw->has_output)
+ cmp_connection_release(&oxfw->out_conn);
+ return err;
+ }
}
return 0;
@@ -344,7 +355,7 @@ int snd_oxfw_stream_start_duplex(struct snd_oxfw *oxfw)
}
}
- err = amdtp_domain_start(&oxfw->domain);
+ err = amdtp_domain_start(&oxfw->domain, 0);
if (err < 0)
goto error;
diff --git a/sound/firewire/oxfw/oxfw.h b/sound/firewire/oxfw/oxfw.h
index c9627b8c5d6e..c30e537087b0 100644
--- a/sound/firewire/oxfw/oxfw.h
+++ b/sound/firewire/oxfw/oxfw.h
@@ -103,7 +103,9 @@ int avc_general_inquiry_sig_fmt(struct fw_unit *unit, unsigned int rate,
int snd_oxfw_stream_init_duplex(struct snd_oxfw *oxfw);
int snd_oxfw_stream_reserve_duplex(struct snd_oxfw *oxfw,
struct amdtp_stream *stream,
- unsigned int rate, unsigned int pcm_channels);
+ unsigned int rate, unsigned int pcm_channels,
+ unsigned int frames_per_period,
+ unsigned int frames_per_buffer);
int snd_oxfw_stream_start_duplex(struct snd_oxfw *oxfw);
void snd_oxfw_stream_stop_duplex(struct snd_oxfw *oxfw);
void snd_oxfw_stream_destroy_duplex(struct snd_oxfw *oxfw);
diff --git a/sound/firewire/tascam/tascam-pcm.c b/sound/firewire/tascam/tascam-pcm.c
index 2377732caa52..8e9b444c8bff 100644
--- a/sound/firewire/tascam/tascam-pcm.c
+++ b/sound/firewire/tascam/tascam-pcm.c
@@ -43,13 +43,13 @@ static int pcm_init_hw_params(struct snd_tscm *tscm,
static int pcm_open(struct snd_pcm_substream *substream)
{
struct snd_tscm *tscm = substream->private_data;
+ struct amdtp_domain *d = &tscm->domain;
enum snd_tscm_clock clock;
- unsigned int rate;
int err;
err = snd_tscm_stream_lock_try(tscm);
if (err < 0)
- goto end;
+ return err;
err = pcm_init_hw_params(tscm, substream);
if (err < 0)
@@ -59,19 +59,46 @@ static int pcm_open(struct snd_pcm_substream *substream)
if (err < 0)
goto err_locked;
- if (clock != SND_TSCM_CLOCK_INTERNAL ||
- amdtp_stream_pcm_running(&tscm->rx_stream) ||
- amdtp_stream_pcm_running(&tscm->tx_stream)) {
+ mutex_lock(&tscm->mutex);
+
+ // When source of clock is not internal or any stream is reserved for
+ // transmission of PCM frames, the available sampling rate is limited
+ // at current one.
+ if (clock != SND_TSCM_CLOCK_INTERNAL || tscm->substreams_counter > 0) {
+ unsigned int frames_per_period = d->events_per_period;
+ unsigned int frames_per_buffer = d->events_per_buffer;
+ unsigned int rate;
+
err = snd_tscm_stream_get_rate(tscm, &rate);
- if (err < 0)
+ if (err < 0) {
+ mutex_unlock(&tscm->mutex);
goto err_locked;
+ }
substream->runtime->hw.rate_min = rate;
substream->runtime->hw.rate_max = rate;
+
+ err = snd_pcm_hw_constraint_minmax(substream->runtime,
+ SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
+ frames_per_period, frames_per_period);
+ if (err < 0) {
+ mutex_unlock(&tscm->mutex);
+ goto err_locked;
+ }
+
+ err = snd_pcm_hw_constraint_minmax(substream->runtime,
+ SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
+ frames_per_buffer, frames_per_buffer);
+ if (err < 0) {
+ mutex_unlock(&tscm->mutex);
+ goto err_locked;
+ }
}
+ mutex_unlock(&tscm->mutex);
+
snd_pcm_set_sync(substream);
-end:
- return err;
+
+ return 0;
err_locked:
snd_tscm_stream_lock_release(tscm);
return err;
@@ -92,16 +119,18 @@ static int pcm_hw_params(struct snd_pcm_substream *substream,
struct snd_tscm *tscm = substream->private_data;
int err;
- err = snd_pcm_lib_alloc_vmalloc_buffer(substream,
- params_buffer_bytes(hw_params));
+ err = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params));
if (err < 0)
return err;
if (substream->runtime->status->state == SNDRV_PCM_STATE_OPEN) {
unsigned int rate = params_rate(hw_params);
+ unsigned int frames_per_period = params_period_size(hw_params);
+ unsigned int frames_per_buffer = params_buffer_size(hw_params);
mutex_lock(&tscm->mutex);
- err = snd_tscm_stream_reserve_duplex(tscm, rate);
+ err = snd_tscm_stream_reserve_duplex(tscm, rate,
+ frames_per_period, frames_per_buffer);
if (err >= 0)
++tscm->substreams_counter;
mutex_unlock(&tscm->mutex);
@@ -123,7 +152,7 @@ static int pcm_hw_free(struct snd_pcm_substream *substream)
mutex_unlock(&tscm->mutex);
- return snd_pcm_lib_free_vmalloc_buffer(substream);
+ return snd_pcm_lib_free_pages(substream);
}
static int pcm_capture_prepare(struct snd_pcm_substream *substream)
@@ -200,28 +229,28 @@ static snd_pcm_uframes_t pcm_capture_pointer(struct snd_pcm_substream *sbstrm)
{
struct snd_tscm *tscm = sbstrm->private_data;
- return amdtp_stream_pcm_pointer(&tscm->tx_stream);
+ return amdtp_domain_stream_pcm_pointer(&tscm->domain, &tscm->tx_stream);
}
static snd_pcm_uframes_t pcm_playback_pointer(struct snd_pcm_substream *sbstrm)
{
struct snd_tscm *tscm = sbstrm->private_data;
- return amdtp_stream_pcm_pointer(&tscm->rx_stream);
+ return amdtp_domain_stream_pcm_pointer(&tscm->domain, &tscm->rx_stream);
}
static int pcm_capture_ack(struct snd_pcm_substream *substream)
{
struct snd_tscm *tscm = substream->private_data;
- return amdtp_stream_pcm_ack(&tscm->tx_stream);
+ return amdtp_domain_stream_pcm_ack(&tscm->domain, &tscm->tx_stream);
}
static int pcm_playback_ack(struct snd_pcm_substream *substream)
{
struct snd_tscm *tscm = substream->private_data;
- return amdtp_stream_pcm_ack(&tscm->rx_stream);
+ return amdtp_domain_stream_pcm_ack(&tscm->domain, &tscm->rx_stream);
}
int snd_tscm_create_pcm_devices(struct snd_tscm *tscm)
@@ -236,7 +265,6 @@ int snd_tscm_create_pcm_devices(struct snd_tscm *tscm)
.trigger = pcm_capture_trigger,
.pointer = pcm_capture_pointer,
.ack = pcm_capture_ack,
- .page = snd_pcm_lib_get_vmalloc_page,
};
static const struct snd_pcm_ops playback_ops = {
.open = pcm_open,
@@ -248,7 +276,6 @@ int snd_tscm_create_pcm_devices(struct snd_tscm *tscm)
.trigger = pcm_playback_trigger,
.pointer = pcm_playback_pointer,
.ack = pcm_playback_ack,
- .page = snd_pcm_lib_get_vmalloc_page,
};
struct snd_pcm *pcm;
int err;
@@ -262,6 +289,8 @@ int snd_tscm_create_pcm_devices(struct snd_tscm *tscm)
"%s PCM", tscm->card->shortname);
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &playback_ops);
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &capture_ops);
+ snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_VMALLOC,
+ NULL, 0, 0);
return 0;
}
diff --git a/sound/firewire/tascam/tascam-stream.c b/sound/firewire/tascam/tascam-stream.c
index adf69a520b80..eb07e1decf9b 100644
--- a/sound/firewire/tascam/tascam-stream.c
+++ b/sound/firewire/tascam/tascam-stream.c
@@ -383,7 +383,9 @@ void snd_tscm_stream_destroy_duplex(struct snd_tscm *tscm)
destroy_stream(tscm, &tscm->tx_stream);
}
-int snd_tscm_stream_reserve_duplex(struct snd_tscm *tscm, unsigned int rate)
+int snd_tscm_stream_reserve_duplex(struct snd_tscm *tscm, unsigned int rate,
+ unsigned int frames_per_period,
+ unsigned int frames_per_buffer)
{
unsigned int curr_rate;
int err;
@@ -413,6 +415,14 @@ int snd_tscm_stream_reserve_duplex(struct snd_tscm *tscm, unsigned int rate)
fw_iso_resources_free(&tscm->tx_resources);
return err;
}
+
+ err = amdtp_domain_set_events_per_period(&tscm->domain,
+ frames_per_period, frames_per_buffer);
+ if (err < 0) {
+ fw_iso_resources_free(&tscm->tx_resources);
+ fw_iso_resources_free(&tscm->rx_resources);
+ return err;
+ }
}
return 0;
@@ -463,7 +473,7 @@ int snd_tscm_stream_start_duplex(struct snd_tscm *tscm, unsigned int rate)
if (err < 0)
goto error;
- err = amdtp_domain_start(&tscm->domain);
+ err = amdtp_domain_start(&tscm->domain, 0);
if (err < 0)
return err;
diff --git a/sound/firewire/tascam/tascam.h b/sound/firewire/tascam/tascam.h
index 15bd335fa07f..78b7a08986a1 100644
--- a/sound/firewire/tascam/tascam.h
+++ b/sound/firewire/tascam/tascam.h
@@ -168,7 +168,9 @@ int snd_tscm_stream_get_clock(struct snd_tscm *tscm,
int snd_tscm_stream_init_duplex(struct snd_tscm *tscm);
void snd_tscm_stream_update_duplex(struct snd_tscm *tscm);
void snd_tscm_stream_destroy_duplex(struct snd_tscm *tscm);
-int snd_tscm_stream_reserve_duplex(struct snd_tscm *tscm, unsigned int rate);
+int snd_tscm_stream_reserve_duplex(struct snd_tscm *tscm, unsigned int rate,
+ unsigned int frames_per_period,
+ unsigned int frames_per_buffer);
int snd_tscm_stream_start_duplex(struct snd_tscm *tscm, unsigned int rate);
void snd_tscm_stream_stop_duplex(struct snd_tscm *tscm);
diff --git a/sound/hda/Kconfig b/sound/hda/Kconfig
index 3d33fc1757ba..b0c88fe040ee 100644
--- a/sound/hda/Kconfig
+++ b/sound/hda/Kconfig
@@ -34,6 +34,12 @@ config SND_HDA_PREALLOC_SIZE
via a proc file (/proc/asound/card*/pcm*/sub*/prealloc), too.
config SND_INTEL_NHLT
- tristate
+ bool
# this config should be selected only for Intel ACPI platforms.
- # A fallback is provided so that the code compiles in all cases. \ No newline at end of file
+ # A fallback is provided so that the code compiles in all cases.
+
+config SND_INTEL_DSP_CONFIG
+ tristate
+ select SND_INTEL_NHLT if ACPI
+ # this config should be selected only for Intel DSP platforms.
+ # A fallback is provided so that the code compiles in all cases.
diff --git a/sound/hda/Makefile b/sound/hda/Makefile
index 8560f6ef1b19..601e617918b8 100644
--- a/sound/hda/Makefile
+++ b/sound/hda/Makefile
@@ -14,5 +14,6 @@ obj-$(CONFIG_SND_HDA_CORE) += snd-hda-core.o
#extended hda
obj-$(CONFIG_SND_HDA_EXT_CORE) += ext/
-snd-intel-nhlt-objs := intel-nhlt.o
-obj-$(CONFIG_SND_INTEL_NHLT) += snd-intel-nhlt.o
+snd-intel-dspcfg-objs := intel-dsp-config.o
+snd-intel-dspcfg-$(CONFIG_SND_INTEL_NHLT) += intel-nhlt.o
+obj-$(CONFIG_SND_INTEL_DSP_CONFIG) += snd-intel-dspcfg.o
diff --git a/sound/hda/hdac_regmap.c b/sound/hda/hdac_regmap.c
index 286361ecd640..906b1e20bae0 100644
--- a/sound/hda/hdac_regmap.c
+++ b/sound/hda/hdac_regmap.c
@@ -363,6 +363,7 @@ static const struct regmap_config hda_regmap_cfg = {
.reg_write = hda_reg_write,
.use_single_read = true,
.use_single_write = true,
+ .disable_locking = true,
};
/**
diff --git a/sound/hda/intel-dsp-config.c b/sound/hda/intel-dsp-config.c
new file mode 100644
index 000000000000..be1df80ed013
--- /dev/null
+++ b/sound/hda/intel-dsp-config.c
@@ -0,0 +1,357 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2019 Jaroslav Kysela <perex@perex.cz>
+
+#include <linux/bits.h>
+#include <linux/dmi.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <sound/core.h>
+#include <sound/intel-dsp-config.h>
+#include <sound/intel-nhlt.h>
+
+static int dsp_driver;
+
+module_param(dsp_driver, int, 0444);
+MODULE_PARM_DESC(dsp_driver, "Force the DSP driver for Intel DSP (0=auto, 1=legacy, 2=SST, 3=SOF)");
+
+#define FLAG_SST BIT(0)
+#define FLAG_SOF BIT(1)
+#define FLAG_SOF_ONLY_IF_DMIC BIT(16)
+
+struct config_entry {
+ u32 flags;
+ u16 device;
+ const struct dmi_system_id *dmi_table;
+};
+
+/*
+ * configuration table
+ * - the order of similar PCI ID entries is important!
+ * - the first successful match will win
+ */
+static const struct config_entry config_table[] = {
+/* Merrifield */
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_MERRIFIELD)
+ {
+ .flags = FLAG_SOF,
+ .device = 0x119a,
+ },
+#endif
+/* Broxton-T */
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_APOLLOLAKE)
+ {
+ .flags = FLAG_SOF,
+ .device = 0x1a98,
+ },
+#endif
+/*
+ * Apollolake (Broxton-P)
+ * the legacy HDaudio driver is used except on Up Squared (SOF) and
+ * Chromebooks (SST)
+ */
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_APOLLOLAKE)
+ {
+ .flags = FLAG_SOF,
+ .device = 0x5a98,
+ .dmi_table = (const struct dmi_system_id []) {
+ {
+ .ident = "Up Squared",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "AAEON"),
+ DMI_MATCH(DMI_BOARD_NAME, "UP-APL01"),
+ }
+ },
+ {}
+ }
+ },
+#endif
+#if IS_ENABLED(CONFIG_SND_SOC_INTEL_APL)
+ {
+ .flags = FLAG_SST,
+ .device = 0x5a98,
+ .dmi_table = (const struct dmi_system_id []) {
+ {
+ .ident = "Google Chromebooks",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Google"),
+ }
+ },
+ {}
+ }
+ },
+#endif
+/*
+ * Skylake and Kabylake use legacy HDaudio driver except for Google
+ * Chromebooks (SST)
+ */
+
+/* Sunrise Point-LP */
+#if IS_ENABLED(CONFIG_SND_SOC_INTEL_SKL)
+ {
+ .flags = FLAG_SST,
+ .device = 0x9d70,
+ .dmi_table = (const struct dmi_system_id []) {
+ {
+ .ident = "Google Chromebooks",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Google"),
+ }
+ },
+ {}
+ }
+ },
+#endif
+/* Kabylake-LP */
+#if IS_ENABLED(CONFIG_SND_SOC_INTEL_KBL)
+ {
+ .flags = FLAG_SST,
+ .device = 0x9d71,
+ .dmi_table = (const struct dmi_system_id []) {
+ {
+ .ident = "Google Chromebooks",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Google"),
+ }
+ },
+ {}
+ }
+ },
+#endif
+
+/*
+ * Geminilake uses legacy HDaudio driver except for Google
+ * Chromebooks
+ */
+/* Geminilake */
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_GEMINILAKE)
+ {
+ .flags = FLAG_SOF,
+ .device = 0x3198,
+ .dmi_table = (const struct dmi_system_id []) {
+ {
+ .ident = "Google Chromebooks",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Google"),
+ }
+ },
+ {}
+ }
+ },
+#endif
+
+/*
+ * CoffeeLake, CannonLake, CometLake, IceLake, TigerLake use legacy
+ * HDaudio driver except for Google Chromebooks and when DMICs are
+ * present. Two cases are required since Coreboot does not expose NHLT
+ * tables.
+ *
+ * When the Chromebook quirk is not present, it's based on information
+ * that no such device exists. When the quirk is present, it could be
+ * either based on product information or a placeholder.
+ */
+
+/* Cannonlake */
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_CANNONLAKE)
+ {
+ .flags = FLAG_SOF,
+ .device = 0x9dc8,
+ .dmi_table = (const struct dmi_system_id []) {
+ {
+ .ident = "Google Chromebooks",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Google"),
+ }
+ },
+ {}
+ }
+ },
+ {
+ .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC,
+ .device = 0x9dc8,
+ },
+#endif
+
+/* Coffelake */
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_COFFEELAKE)
+ {
+ .flags = FLAG_SOF,
+ .device = 0xa348,
+ .dmi_table = (const struct dmi_system_id []) {
+ {
+ .ident = "Google Chromebooks",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Google"),
+ }
+ },
+ {}
+ }
+ },
+ {
+ .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC,
+ .device = 0xa348,
+ },
+#endif
+
+/* Cometlake-LP */
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_COMETLAKE_LP)
+ {
+ .flags = FLAG_SOF,
+ .device = 0x02c8,
+ .dmi_table = (const struct dmi_system_id []) {
+ {
+ .ident = "Google Chromebooks",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Google"),
+ }
+ },
+ {}
+ }
+ },
+ {
+ .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC,
+ .device = 0x02c8,
+ },
+#endif
+/* Cometlake-H */
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_COMETLAKE_H)
+ {
+ .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC,
+ .device = 0x06c8,
+ },
+#endif
+
+/* Icelake */
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_ICELAKE)
+ {
+ .flags = FLAG_SOF,
+ .device = 0x34c8,
+ .dmi_table = (const struct dmi_system_id []) {
+ {
+ .ident = "Google Chromebooks",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Google"),
+ }
+ },
+ {}
+ }
+ },
+ {
+ .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC,
+ .device = 0x34c8,
+ },
+#endif
+
+/* Tigerlake */
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_TIGERLAKE)
+ {
+ .flags = FLAG_SOF,
+ .device = 0xa0c8,
+ .dmi_table = (const struct dmi_system_id []) {
+ {
+ .ident = "Google Chromebooks",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Google"),
+ }
+ },
+ {}
+ }
+ },
+
+ {
+ .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC,
+ .device = 0xa0c8,
+ },
+#endif
+
+/* Elkhart Lake */
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_ELKHARTLAKE)
+ {
+ .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC,
+ .device = 0x4b55,
+ },
+#endif
+
+};
+
+static const struct config_entry *snd_intel_dsp_find_config
+ (struct pci_dev *pci, const struct config_entry *table, u32 len)
+{
+ u16 device;
+
+ device = pci->device;
+ for (; len > 0; len--, table++) {
+ if (table->device != device)
+ continue;
+ if (table->dmi_table && !dmi_check_system(table->dmi_table))
+ continue;
+ return table;
+ }
+ return NULL;
+}
+
+static int snd_intel_dsp_check_dmic(struct pci_dev *pci)
+{
+ struct nhlt_acpi_table *nhlt;
+ int ret = 0;
+
+ nhlt = intel_nhlt_init(&pci->dev);
+ if (nhlt) {
+ if (intel_nhlt_get_dmic_geo(&pci->dev, nhlt))
+ ret = 1;
+ intel_nhlt_free(nhlt);
+ }
+ return ret;
+}
+
+int snd_intel_dsp_driver_probe(struct pci_dev *pci)
+{
+ const struct config_entry *cfg;
+
+ /* Intel vendor only */
+ if (pci->vendor != 0x8086)
+ return SND_INTEL_DSP_DRIVER_ANY;
+
+ if (dsp_driver > 0 && dsp_driver <= SND_INTEL_DSP_DRIVER_LAST)
+ return dsp_driver;
+
+ /*
+ * detect DSP by checking class/subclass/prog-id information
+ * class=04 subclass 03 prog-if 00: no DSP, use legacy driver
+ * class=04 subclass 01 prog-if 00: DSP is present
+ * (and may be required e.g. for DMIC or SSP support)
+ * class=04 subclass 03 prog-if 80: use DSP or legacy mode
+ */
+ if (pci->class == 0x040300)
+ return SND_INTEL_DSP_DRIVER_LEGACY;
+ if (pci->class != 0x040100 && pci->class != 0x040380) {
+ dev_err(&pci->dev, "Unknown PCI class/subclass/prog-if information (0x%06x) found, selecting HDA legacy driver\n", pci->class);
+ return SND_INTEL_DSP_DRIVER_LEGACY;
+ }
+
+ dev_info(&pci->dev, "DSP detected with PCI class/subclass/prog-if info 0x%06x\n", pci->class);
+
+ /* find the configuration for the specific device */
+ cfg = snd_intel_dsp_find_config(pci, config_table, ARRAY_SIZE(config_table));
+ if (!cfg)
+ return SND_INTEL_DSP_DRIVER_ANY;
+
+ if (cfg->flags & FLAG_SOF) {
+ if (cfg->flags & FLAG_SOF_ONLY_IF_DMIC) {
+ if (snd_intel_dsp_check_dmic(pci)) {
+ dev_info(&pci->dev, "Digital mics found on Skylake+ platform, using SOF driver\n");
+ return SND_INTEL_DSP_DRIVER_SOF;
+ }
+ } else {
+ return SND_INTEL_DSP_DRIVER_SOF;
+ }
+ }
+
+ if (cfg->flags & FLAG_SST)
+ return SND_INTEL_DSP_DRIVER_SST;
+
+ return SND_INTEL_DSP_DRIVER_LEGACY;
+}
+EXPORT_SYMBOL_GPL(snd_intel_dsp_driver_probe);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Intel DSP config driver");
diff --git a/sound/hda/intel-nhlt.c b/sound/hda/intel-nhlt.c
index daede96f28ee..097ff6c10099 100644
--- a/sound/hda/intel-nhlt.c
+++ b/sound/hda/intel-nhlt.c
@@ -102,6 +102,3 @@ int intel_nhlt_get_dmic_geo(struct device *dev, struct nhlt_acpi_table *nhlt)
return dmic_geo;
}
EXPORT_SYMBOL_GPL(intel_nhlt_get_dmic_geo);
-
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("Intel NHLT driver");
diff --git a/sound/isa/Kconfig b/sound/isa/Kconfig
index b690ed937cbe..6ffa48dd5983 100644
--- a/sound/isa/Kconfig
+++ b/sound/isa/Kconfig
@@ -2,22 +2,22 @@
# ALSA ISA drivers
config SND_WSS_LIB
- tristate
- select SND_PCM
+ tristate
+ select SND_PCM
select SND_TIMER
config SND_SB_COMMON
- tristate
+ tristate
config SND_SB8_DSP
- tristate
- select SND_PCM
- select SND_SB_COMMON
+ tristate
+ select SND_PCM
+ select SND_SB_COMMON
config SND_SB16_DSP
- tristate
- select SND_PCM
- select SND_SB_COMMON
+ tristate
+ select SND_PCM
+ select SND_SB_COMMON
menuconfig SND_ISA
bool "ISA sound devices"
diff --git a/sound/isa/cs423x/cs4236.c b/sound/isa/cs423x/cs4236.c
index 78dd213589b4..fa3c39cff5f8 100644
--- a/sound/isa/cs423x/cs4236.c
+++ b/sound/isa/cs423x/cs4236.c
@@ -278,7 +278,8 @@ static int snd_cs423x_pnp_init_mpu(int dev, struct pnp_dev *pdev)
} else {
mpu_port[dev] = pnp_port_start(pdev, 0);
if (mpu_irq[dev] >= 0 &&
- pnp_irq_valid(pdev, 0) && pnp_irq(pdev, 0) >= 0) {
+ pnp_irq_valid(pdev, 0) &&
+ pnp_irq(pdev, 0) != (resource_size_t)-1) {
mpu_irq[dev] = pnp_irq(pdev, 0);
} else {
mpu_irq[dev] = -1; /* disable interrupt */
diff --git a/sound/mips/Kconfig b/sound/mips/Kconfig
index 8a33402fd415..b497b803c834 100644
--- a/sound/mips/Kconfig
+++ b/sound/mips/Kconfig
@@ -14,15 +14,15 @@ config SND_SGI_O2
tristate "SGI O2 Audio"
depends on SGI_IP32
select SND_PCM
- help
- Sound support for the SGI O2 Workstation.
+ help
+ Sound support for the SGI O2 Workstation.
config SND_SGI_HAL2
- tristate "SGI HAL2 Audio"
- depends on SGI_HAS_HAL2
+ tristate "SGI HAL2 Audio"
+ depends on SGI_HAS_HAL2
select SND_PCM
- help
- Sound support for the SGI Indy and Indigo2 Workstation.
+ help
+ Sound support for the SGI Indy and Indigo2 Workstation.
endif # SND_MIPS
diff --git a/sound/mips/hal2.c b/sound/mips/hal2.c
index 6676bcbd769f..c9e060939708 100644
--- a/sound/mips/hal2.c
+++ b/sound/mips/hal2.c
@@ -741,8 +741,7 @@ static int hal2_pcm_create(struct snd_hal2 *hal2)
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE,
&hal2_capture_ops);
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_CONTINUOUS,
- snd_dma_continuous_data(GFP_KERNEL),
- 0, 1024 * 1024);
+ NULL, 0, 1024 * 1024);
return 0;
}
diff --git a/sound/mips/sgio2audio.c b/sound/mips/sgio2audio.c
index fadc1194b136..9d20ce6118a0 100644
--- a/sound/mips/sgio2audio.c
+++ b/sound/mips/sgio2audio.c
@@ -582,14 +582,13 @@ static int snd_sgio2audio_pcm_close(struct snd_pcm_substream *substream)
static int snd_sgio2audio_pcm_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *hw_params)
{
- return snd_pcm_lib_alloc_vmalloc_buffer(substream,
- params_buffer_bytes(hw_params));
+ return snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params));
}
/* hw_free callback */
static int snd_sgio2audio_pcm_hw_free(struct snd_pcm_substream *substream)
{
- return snd_pcm_lib_free_vmalloc_buffer(substream);
+ return snd_pcm_lib_free_pages(substream);
}
/* prepare callback */
@@ -670,7 +669,6 @@ static const struct snd_pcm_ops snd_sgio2audio_playback1_ops = {
.prepare = snd_sgio2audio_pcm_prepare,
.trigger = snd_sgio2audio_pcm_trigger,
.pointer = snd_sgio2audio_pcm_pointer,
- .page = snd_pcm_lib_get_vmalloc_page,
};
static const struct snd_pcm_ops snd_sgio2audio_playback2_ops = {
@@ -682,7 +680,6 @@ static const struct snd_pcm_ops snd_sgio2audio_playback2_ops = {
.prepare = snd_sgio2audio_pcm_prepare,
.trigger = snd_sgio2audio_pcm_trigger,
.pointer = snd_sgio2audio_pcm_pointer,
- .page = snd_pcm_lib_get_vmalloc_page,
};
static const struct snd_pcm_ops snd_sgio2audio_capture_ops = {
@@ -694,7 +691,6 @@ static const struct snd_pcm_ops snd_sgio2audio_capture_ops = {
.prepare = snd_sgio2audio_pcm_prepare,
.trigger = snd_sgio2audio_pcm_trigger,
.pointer = snd_sgio2audio_pcm_pointer,
- .page = snd_pcm_lib_get_vmalloc_page,
};
/*
@@ -720,6 +716,8 @@ static int snd_sgio2audio_new_pcm(struct snd_sgio2audio *chip)
&snd_sgio2audio_playback1_ops);
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE,
&snd_sgio2audio_capture_ops);
+ snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_VMALLOC,
+ NULL, 0, 0);
/* create second pcm device with one outputs and no input */
err = snd_pcm_new(chip->card, "SGI O2 Audio", 1, 1, 0, &pcm);
@@ -732,6 +730,8 @@ static int snd_sgio2audio_new_pcm(struct snd_sgio2audio *chip)
/* set operators */
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK,
&snd_sgio2audio_playback2_ops);
+ snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_VMALLOC,
+ NULL, 0, 0);
return 0;
}
diff --git a/sound/oss/dmasound/dmasound_core.c b/sound/oss/dmasound/dmasound_core.c
index fc9bcd47d6a4..f802ea331e24 100644
--- a/sound/oss/dmasound/dmasound_core.c
+++ b/sound/oss/dmasound/dmasound_core.c
@@ -384,6 +384,7 @@ static const struct file_operations mixer_fops =
.owner = THIS_MODULE,
.llseek = no_llseek,
.unlocked_ioctl = mixer_unlocked_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.open = mixer_open,
.release = mixer_release,
};
@@ -1167,6 +1168,7 @@ static const struct file_operations sq_fops =
.write = sq_write,
.poll = sq_poll,
.unlocked_ioctl = sq_unlocked_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.open = sq_open,
.release = sq_release,
};
diff --git a/sound/pci/Kconfig b/sound/pci/Kconfig
index 7630f808d087..93bc9bef7641 100644
--- a/sound/pci/Kconfig
+++ b/sound/pci/Kconfig
@@ -217,7 +217,7 @@ config SND_CMIPCI
will be called snd-cmipci.
config SND_OXYGEN_LIB
- tristate
+ tristate
config SND_OXYGEN
tristate "C-Media 8786, 8787, 8788 (Oxygen)"
diff --git a/sound/pci/ad1889.c b/sound/pci/ad1889.c
index 4b2451287e2c..5b6452df8bbd 100644
--- a/sound/pci/ad1889.c
+++ b/sound/pci/ad1889.c
@@ -633,9 +633,9 @@ snd_ad1889_pcm_init(struct snd_ad1889 *chip, int device)
chip->csubs = NULL;
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(chip->pci),
- BUFFER_BYTES_MAX / 2,
- BUFFER_BYTES_MAX);
+ &chip->pci->dev,
+ BUFFER_BYTES_MAX / 2,
+ BUFFER_BYTES_MAX);
return 0;
}
diff --git a/sound/pci/ali5451/ali5451.c b/sound/pci/ali5451/ali5451.c
index 6e28e381c21a..ae29df085ae1 100644
--- a/sound/pci/ali5451/ali5451.c
+++ b/sound/pci/ali5451/ali5451.c
@@ -1672,7 +1672,7 @@ static int snd_ali_pcm(struct snd_ali *codec, int device,
desc->capture_ops);
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(codec->pci),
+ &codec->pci->dev,
64*1024, 128*1024);
pcm->info_flags = 0;
diff --git a/sound/pci/als300.c b/sound/pci/als300.c
index 530799c8d3ce..cfbb8cacaaac 100644
--- a/sound/pci/als300.c
+++ b/sound/pci/als300.c
@@ -592,7 +592,8 @@ static int snd_als300_new_pcm(struct snd_als300 *chip)
/* pre-allocation of buffers */
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(chip->pci), 64*1024, 64*1024);
+ &chip->pci->dev,
+ 64*1024, 64*1024);
return 0;
}
diff --git a/sound/pci/als4000.c b/sound/pci/als4000.c
index b06c3dbb525d..d6f5487afe52 100644
--- a/sound/pci/als4000.c
+++ b/sound/pci/als4000.c
@@ -693,7 +693,8 @@ static int snd_als4000_pcm(struct snd_sb *chip, int device)
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_als4000_playback_ops);
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_als4000_capture_ops);
- snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(chip->pci),
+ snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
+ &chip->pci->dev,
64*1024, 64*1024);
chip->pcm = pcm;
diff --git a/sound/pci/asihpi/asihpi.c b/sound/pci/asihpi/asihpi.c
index 2a21a3d99719..147005fdd3ea 100644
--- a/sound/pci/asihpi/asihpi.c
+++ b/sound/pci/asihpi/asihpi.c
@@ -1325,8 +1325,8 @@ static int snd_card_asihpi_pcm_new(struct snd_card_asihpi *asihpi, int device)
/*? do we want to emulate MMAP for non-BBM cards?
Jack doesn't work with ALSAs MMAP emulation - WHY NOT? */
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(asihpi->pci),
- 64*1024, BUFFER_BYTES_MAX);
+ &asihpi->pci->dev,
+ 64*1024, BUFFER_BYTES_MAX);
return 0;
}
diff --git a/sound/pci/atiixp.c b/sound/pci/atiixp.c
index c953bd73a48c..1e1ededf8eb2 100644
--- a/sound/pci/atiixp.c
+++ b/sound/pci/atiixp.c
@@ -353,7 +353,7 @@ static int atiixp_build_dma_packets(struct atiixp *chip, struct atiixp_dma *dma,
if (dma->desc_buf.area == NULL) {
if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(chip->pci),
+ &chip->pci->dev,
ATI_DESC_LIST_SIZE,
&dma->desc_buf) < 0)
return -ENOMEM;
@@ -1284,7 +1284,7 @@ static int snd_atiixp_pcm_new(struct atiixp *chip)
chip->pcmdevs[ATI_PCMDEV_ANALOG] = pcm;
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(chip->pci),
+ &chip->pci->dev,
64*1024, 128*1024);
err = snd_pcm_add_chmap_ctls(pcm, SNDRV_PCM_STREAM_PLAYBACK,
@@ -1317,7 +1317,7 @@ static int snd_atiixp_pcm_new(struct atiixp *chip)
chip->pcmdevs[ATI_PCMDEV_DIGITAL] = pcm;
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(chip->pci),
+ &chip->pci->dev,
64*1024, 128*1024);
/* pre-select AC97 SPDIF slots 10/11 */
diff --git a/sound/pci/atiixp_modem.c b/sound/pci/atiixp_modem.c
index 95d209f96581..6f088c1949f3 100644
--- a/sound/pci/atiixp_modem.c
+++ b/sound/pci/atiixp_modem.c
@@ -321,7 +321,7 @@ static int atiixp_build_dma_packets(struct atiixp_modem *chip,
return -ENOMEM;
if (dma->desc_buf.area == NULL) {
- if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(chip->pci),
+ if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, &chip->pci->dev,
ATI_DESC_LIST_SIZE, &dma->desc_buf) < 0)
return -ENOMEM;
dma->period_bytes = dma->periods = 0; /* clear */
@@ -995,7 +995,7 @@ static int snd_atiixp_pcm_new(struct atiixp_modem *chip)
chip->pcmdevs[ATI_PCMDEV_ANALOG] = pcm;
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(chip->pci),
+ &chip->pci->dev,
64*1024, 128*1024);
return 0;
diff --git a/sound/pci/au88x0/au88x0_pcm.c b/sound/pci/au88x0/au88x0_pcm.c
index 39ea9ef00f47..a2dcf43beedf 100644
--- a/sound/pci/au88x0/au88x0_pcm.c
+++ b/sound/pci/au88x0/au88x0_pcm.c
@@ -436,7 +436,6 @@ static const struct snd_pcm_ops snd_vortex_playback_ops = {
.prepare = snd_vortex_pcm_prepare,
.trigger = snd_vortex_pcm_trigger,
.pointer = snd_vortex_pcm_pointer,
- .page = snd_pcm_sgbuf_ops_page,
};
/*
@@ -638,7 +637,7 @@ static int snd_vortex_new_pcm(vortex_t *chip, int idx, int nr)
/* pre-allocation of Scatter-Gather buffers */
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV_SG,
- snd_dma_pci_data(chip->pci_dev),
+ &chip->pci_dev->dev,
0x10000, 0x10000);
switch (VORTEX_PCM_TYPE(pcm)) {
diff --git a/sound/pci/aw2/aw2-alsa.c b/sound/pci/aw2/aw2-alsa.c
index e413414181df..1cbfae856a2a 100644
--- a/sound/pci/aw2/aw2-alsa.c
+++ b/sound/pci/aw2/aw2-alsa.c
@@ -613,7 +613,7 @@ static int snd_aw2_new_pcm(struct aw2 *chip)
/* Preallocate continuous pages. */
snd_pcm_lib_preallocate_pages_for_all(pcm_playback_ana,
SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(chip->pci),
+ &chip->pci->dev,
64 * 1024, 64 * 1024);
err = snd_pcm_new(chip->card, "Audiowerk2 digital playback", 1, 1, 0,
@@ -645,7 +645,7 @@ static int snd_aw2_new_pcm(struct aw2 *chip)
/* Preallocate continuous pages. */
snd_pcm_lib_preallocate_pages_for_all(pcm_playback_num,
SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(chip->pci),
+ &chip->pci->dev,
64 * 1024, 64 * 1024);
err = snd_pcm_new(chip->card, "Audiowerk2 capture", 2, 0, 1,
@@ -678,7 +678,7 @@ static int snd_aw2_new_pcm(struct aw2 *chip)
/* Preallocate continuous pages. */
snd_pcm_lib_preallocate_pages_for_all(pcm_capture,
SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(chip->pci),
+ &chip->pci->dev,
64 * 1024, 64 * 1024);
/* Create control */
diff --git a/sound/pci/azt3328.c b/sound/pci/azt3328.c
index f92c9cbb955a..f475370faaaa 100644
--- a/sound/pci/azt3328.c
+++ b/sound/pci/azt3328.c
@@ -2135,8 +2135,8 @@ snd_azf3328_pcm(struct snd_azf3328 *chip)
chip->pcm[AZF_CODEC_CAPTURE] = pcm;
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(chip->pci),
- 64*1024, 64*1024);
+ &chip->pci->dev,
+ 64*1024, 64*1024);
err = snd_pcm_new(chip->card, "AZF3328 I2S OUT", AZF_PCMDEV_I2S_OUT,
1, 0, &pcm);
@@ -2151,8 +2151,8 @@ snd_azf3328_pcm(struct snd_azf3328 *chip)
chip->pcm[AZF_CODEC_I2S_OUT] = pcm;
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(chip->pci),
- 64*1024, 64*1024);
+ &chip->pci->dev,
+ 64*1024, 64*1024);
return 0;
}
diff --git a/sound/pci/bt87x.c b/sound/pci/bt87x.c
index 66a5a24e7558..6bf5ac3600c5 100644
--- a/sound/pci/bt87x.c
+++ b/sound/pci/bt87x.c
@@ -217,7 +217,7 @@ static int snd_bt87x_create_risc(struct snd_bt87x *chip, struct snd_pcm_substrea
__le32 *risc;
if (chip->dma_risc.area == NULL) {
- if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(chip->pci),
+ if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, &chip->pci->dev,
PAGE_ALIGN(MAX_RISC_SIZE), &chip->dma_risc) < 0)
return -ENOMEM;
}
@@ -545,7 +545,6 @@ static const struct snd_pcm_ops snd_bt87x_pcm_ops = {
.prepare = snd_bt87x_prepare,
.trigger = snd_bt87x_trigger,
.pointer = snd_bt87x_pointer,
- .page = snd_pcm_sgbuf_ops_page,
};
static int snd_bt87x_capture_volume_info(struct snd_kcontrol *kcontrol,
@@ -701,7 +700,7 @@ static int snd_bt87x_pcm(struct snd_bt87x *chip, int device, char *name)
strcpy(pcm->name, name);
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_bt87x_pcm_ops);
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV_SG,
- snd_dma_pci_data(chip->pci),
+ &chip->pci->dev,
128 * 1024,
ALIGN(255 * 4092, 1024));
return 0;
diff --git a/sound/pci/ca0106/ca0106_main.c b/sound/pci/ca0106/ca0106_main.c
index 478412e0aa3c..abc2440dc2d9 100644
--- a/sound/pci/ca0106/ca0106_main.c
+++ b/sound/pci/ca0106/ca0106_main.c
@@ -1389,7 +1389,7 @@ static int snd_ca0106_pcm(struct snd_ca0106 *emu, int device)
substream;
substream = substream->next) {
snd_pcm_lib_preallocate_pages(substream, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(emu->pci),
+ &emu->pci->dev,
64*1024, 64*1024);
}
@@ -1397,7 +1397,7 @@ static int snd_ca0106_pcm(struct snd_ca0106 *emu, int device)
substream;
substream = substream->next) {
snd_pcm_lib_preallocate_pages(substream, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(emu->pci),
+ &emu->pci->dev,
64*1024, 64*1024);
}
@@ -1692,7 +1692,7 @@ static int snd_ca0106_create(int dev, struct snd_card *card,
chip->irq = pci->irq;
/* This stores the periods table. */
- if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(pci),
+ if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, &pci->dev,
1024, &chip->buffer) < 0) {
snd_ca0106_free(chip);
return -ENOMEM;
diff --git a/sound/pci/cmipci.c b/sound/pci/cmipci.c
index df720881eb99..dd9d62e2b633 100644
--- a/sound/pci/cmipci.c
+++ b/sound/pci/cmipci.c
@@ -1902,7 +1902,7 @@ static int snd_cmipci_pcm_new(struct cmipci *cm, int device)
cm->pcm = pcm;
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(cm->pci), 64*1024, 128*1024);
+ &cm->pci->dev, 64*1024, 128*1024);
return 0;
}
@@ -1924,7 +1924,7 @@ static int snd_cmipci_pcm2_new(struct cmipci *cm, int device)
cm->pcm2 = pcm;
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(cm->pci), 64*1024, 128*1024);
+ &cm->pci->dev, 64*1024, 128*1024);
return 0;
}
@@ -1947,7 +1947,7 @@ static int snd_cmipci_pcm_spdif_new(struct cmipci *cm, int device)
cm->pcm_spdif = pcm;
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(cm->pci), 64*1024, 128*1024);
+ &cm->pci->dev, 64*1024, 128*1024);
err = snd_pcm_add_chmap_ctls(pcm, SNDRV_PCM_STREAM_PLAYBACK,
snd_pcm_alt_chmaps, cm->max_channels, 0,
diff --git a/sound/pci/cs4281.c b/sound/pci/cs4281.c
index 04c712647853..058c1414b777 100644
--- a/sound/pci/cs4281.c
+++ b/sound/pci/cs4281.c
@@ -975,7 +975,8 @@ static int snd_cs4281_pcm(struct cs4281 *chip, int device)
chip->pcm = pcm;
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(chip->pci), 64*1024, 512*1024);
+ &chip->pci->dev,
+ 64*1024, 512*1024);
return 0;
}
diff --git a/sound/pci/cs46xx/cs46xx_lib.c b/sound/pci/cs46xx/cs46xx_lib.c
index 5b888b795f7e..102a62965ac1 100644
--- a/sound/pci/cs46xx/cs46xx_lib.c
+++ b/sound/pci/cs46xx/cs46xx_lib.c
@@ -1494,7 +1494,7 @@ static int _cs46xx_playback_open_channel (struct snd_pcm_substream *substream,in
cpcm = kzalloc(sizeof(*cpcm), GFP_KERNEL);
if (cpcm == NULL)
return -ENOMEM;
- if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(chip->pci),
+ if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, &chip->pci->dev,
PAGE_SIZE, &cpcm->hw_buf) < 0) {
kfree(cpcm);
return -ENOMEM;
@@ -1582,7 +1582,7 @@ static int snd_cs46xx_capture_open(struct snd_pcm_substream *substream)
{
struct snd_cs46xx *chip = snd_pcm_substream_chip(substream);
- if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(chip->pci),
+ if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, &chip->pci->dev,
PAGE_SIZE, &chip->capt.hw_buf) < 0)
return -ENOMEM;
chip->capt.substream = substream;
@@ -1784,7 +1784,8 @@ int snd_cs46xx_pcm(struct snd_cs46xx *chip, int device)
chip->pcm = pcm;
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(chip->pci), 64*1024, 256*1024);
+ &chip->pci->dev,
+ 64*1024, 256*1024);
return 0;
}
@@ -1809,7 +1810,8 @@ int snd_cs46xx_pcm_rear(struct snd_cs46xx *chip, int device)
chip->pcm_rear = pcm;
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(chip->pci), 64*1024, 256*1024);
+ &chip->pci->dev,
+ 64*1024, 256*1024);
return 0;
}
@@ -1832,7 +1834,8 @@ int snd_cs46xx_pcm_center_lfe(struct snd_cs46xx *chip, int device)
chip->pcm_center_lfe = pcm;
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(chip->pci), 64*1024, 256*1024);
+ &chip->pci->dev,
+ 64*1024, 256*1024);
return 0;
}
@@ -1855,7 +1858,8 @@ int snd_cs46xx_pcm_iec958(struct snd_cs46xx *chip, int device)
chip->pcm_iec958 = pcm;
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(chip->pci), 64*1024, 256*1024);
+ &chip->pci->dev,
+ 64*1024, 256*1024);
return 0;
}
diff --git a/sound/pci/cs5535audio/cs5535audio_pcm.c b/sound/pci/cs5535audio/cs5535audio_pcm.c
index 04822bf2f987..4642e5384e83 100644
--- a/sound/pci/cs5535audio/cs5535audio_pcm.c
+++ b/sound/pci/cs5535audio/cs5535audio_pcm.c
@@ -117,7 +117,7 @@ static int cs5535audio_build_dma_packets(struct cs5535audio *cs5535au,
if (dma->desc_buf.area == NULL) {
if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(cs5535au->pci),
+ &cs5535au->pci->dev,
CS5535AUDIO_DESC_LIST_SIZE+1,
&dma->desc_buf) < 0)
return -ENOMEM;
@@ -432,8 +432,8 @@ int snd_cs5535audio_pcm(struct cs5535audio *cs5535au)
strcpy(pcm->name, "CS5535 Audio");
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(cs5535au->pci),
- 64*1024, 128*1024);
+ &cs5535au->pci->dev,
+ 64*1024, 128*1024);
cs5535au->pcm = pcm;
return 0;
diff --git a/sound/pci/ctxfi/ctpcm.c b/sound/pci/ctxfi/ctpcm.c
index 89923399e646..7ae5b238703c 100644
--- a/sound/pci/ctxfi/ctpcm.c
+++ b/sound/pci/ctxfi/ctpcm.c
@@ -379,7 +379,6 @@ static const struct snd_pcm_ops ct_pcm_playback_ops = {
.prepare = ct_pcm_playback_prepare,
.trigger = ct_pcm_playback_trigger,
.pointer = ct_pcm_playback_pointer,
- .page = snd_pcm_sgbuf_ops_page,
};
/* PCM operators for capture */
@@ -392,7 +391,6 @@ static const struct snd_pcm_ops ct_pcm_capture_ops = {
.prepare = ct_pcm_capture_prepare,
.trigger = ct_pcm_capture_trigger,
.pointer = ct_pcm_capture_pointer,
- .page = snd_pcm_sgbuf_ops_page,
};
static const struct snd_pcm_chmap_elem surround_map[] = {
@@ -452,7 +450,8 @@ int ct_alsa_pcm_create(struct ct_atc *atc,
SNDRV_PCM_STREAM_CAPTURE, &ct_pcm_capture_ops);
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV_SG,
- snd_dma_pci_data(atc->pci), 128*1024, 128*1024);
+ &atc->pci->dev,
+ 128*1024, 128*1024);
chs = 2;
switch (device) {
diff --git a/sound/pci/ctxfi/ctvmem.c b/sound/pci/ctxfi/ctvmem.c
index 2e80b17a7104..bde28aa9e139 100644
--- a/sound/pci/ctxfi/ctvmem.c
+++ b/sound/pci/ctxfi/ctvmem.c
@@ -183,7 +183,7 @@ int ct_vm_create(struct ct_vm **rvm, struct pci_dev *pci)
/* Allocate page table pages */
for (i = 0; i < CT_PTP_NUM; i++) {
err = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(pci),
+ &pci->dev,
PAGE_SIZE, &vm->ptp[i]);
if (err < 0)
break;
diff --git a/sound/pci/echoaudio/echoaudio.c b/sound/pci/echoaudio/echoaudio.c
index ca9125726be2..1465813bf7c6 100644
--- a/sound/pci/echoaudio/echoaudio.c
+++ b/sound/pci/echoaudio/echoaudio.c
@@ -324,7 +324,7 @@ static int pcm_open(struct snd_pcm_substream *substream,
/* Finally allocate a page for the scatter-gather list */
if ((err = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(chip->pci),
+ &chip->pci->dev,
PAGE_SIZE, &pipe->sgpage)) < 0) {
dev_err(chip->card->dev, "s-g list allocation failed\n");
return err;
@@ -824,7 +824,6 @@ static const struct snd_pcm_ops analog_playback_ops = {
.prepare = pcm_prepare,
.trigger = pcm_trigger,
.pointer = pcm_pointer,
- .page = snd_pcm_sgbuf_ops_page,
};
static const struct snd_pcm_ops analog_capture_ops = {
.open = pcm_analog_in_open,
@@ -835,7 +834,6 @@ static const struct snd_pcm_ops analog_capture_ops = {
.prepare = pcm_prepare,
.trigger = pcm_trigger,
.pointer = pcm_pointer,
- .page = snd_pcm_sgbuf_ops_page,
};
#ifdef ECHOCARD_HAS_DIGITAL_IO
#ifndef ECHOCARD_HAS_VMIXER
@@ -848,7 +846,6 @@ static const struct snd_pcm_ops digital_playback_ops = {
.prepare = pcm_prepare,
.trigger = pcm_trigger,
.pointer = pcm_pointer,
- .page = snd_pcm_sgbuf_ops_page,
};
#endif /* !ECHOCARD_HAS_VMIXER */
static const struct snd_pcm_ops digital_capture_ops = {
@@ -860,7 +857,6 @@ static const struct snd_pcm_ops digital_capture_ops = {
.prepare = pcm_prepare,
.trigger = pcm_trigger,
.pointer = pcm_pointer,
- .page = snd_pcm_sgbuf_ops_page,
};
#endif /* ECHOCARD_HAS_DIGITAL_IO */
@@ -869,7 +865,7 @@ static const struct snd_pcm_ops digital_capture_ops = {
/* Preallocate memory only for the first substream because it's the most
* used one
*/
-static int snd_echo_preallocate_pages(struct snd_pcm *pcm, struct device *dev)
+static void snd_echo_preallocate_pages(struct snd_pcm *pcm, struct device *dev)
{
struct snd_pcm_substream *ss;
int stream;
@@ -880,8 +876,6 @@ static int snd_echo_preallocate_pages(struct snd_pcm *pcm, struct device *dev)
dev,
ss->number ? 0 : 128<<10,
256<<10);
-
- return 0;
}
@@ -908,8 +902,7 @@ static int snd_echo_new_pcm(struct echoaudio *chip)
strcpy(pcm->name, chip->card->shortname);
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &analog_playback_ops);
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &analog_capture_ops);
- if ((err = snd_echo_preallocate_pages(pcm, snd_dma_pci_data(chip->pci))) < 0)
- return err;
+ snd_echo_preallocate_pages(pcm, &chip->pci->dev);
#ifdef ECHOCARD_HAS_DIGITAL_IO
/* PCM#1 Digital inputs, no outputs */
@@ -920,8 +913,7 @@ static int snd_echo_new_pcm(struct echoaudio *chip)
chip->digital_pcm = pcm;
strcpy(pcm->name, chip->card->shortname);
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &digital_capture_ops);
- if ((err = snd_echo_preallocate_pages(pcm, snd_dma_pci_data(chip->pci))) < 0)
- return err;
+ snd_echo_preallocate_pages(pcm, &chip->pci->dev);
#endif /* ECHOCARD_HAS_DIGITAL_IO */
#else /* ECHOCARD_HAS_VMIXER */
@@ -941,8 +933,7 @@ static int snd_echo_new_pcm(struct echoaudio *chip)
strcpy(pcm->name, chip->card->shortname);
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &analog_playback_ops);
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &analog_capture_ops);
- if ((err = snd_echo_preallocate_pages(pcm, snd_dma_pci_data(chip->pci))) < 0)
- return err;
+ snd_echo_preallocate_pages(pcm, &chip->pci->dev);
#ifdef ECHOCARD_HAS_DIGITAL_IO
/* PCM#1 Digital i/o */
@@ -955,8 +946,7 @@ static int snd_echo_new_pcm(struct echoaudio *chip)
strcpy(pcm->name, chip->card->shortname);
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &digital_playback_ops);
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &digital_capture_ops);
- if ((err = snd_echo_preallocate_pages(pcm, snd_dma_pci_data(chip->pci))) < 0)
- return err;
+ snd_echo_preallocate_pages(pcm, &chip->pci->dev);
#endif /* ECHOCARD_HAS_DIGITAL_IO */
#endif /* ECHOCARD_HAS_VMIXER */
@@ -1958,7 +1948,7 @@ static int snd_echo_create(struct snd_card *card,
/* Create the DSP comm page - this is the area of memory used for most
of the communication with the DSP, which accesses it via bus mastering */
- if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(chip->pci),
+ if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, &chip->pci->dev,
sizeof(struct comm_page),
&chip->commpage_dma_buf) < 0) {
dev_err(chip->card->dev, "cannot allocate the comm page\n");
diff --git a/sound/pci/emu10k1/emu10k1.c b/sound/pci/emu10k1/emu10k1.c
index f208b6e217fd..29b7720d7961 100644
--- a/sound/pci/emu10k1/emu10k1.c
+++ b/sound/pci/emu10k1/emu10k1.c
@@ -124,8 +124,9 @@ static int snd_card_emu10k1_probe(struct pci_dev *pci,
goto error;
/* This stores the periods table. */
if (emu->card_capabilities->ca0151_chip) { /* P16V */
- if ((err = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(pci),
- 1024, &emu->p16v_buffer)) < 0)
+ err = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, &pci->dev,
+ 1024, &emu->p16v_buffer);
+ if (err < 0)
goto error;
}
diff --git a/sound/pci/emu10k1/emu10k1x.c b/sound/pci/emu10k1/emu10k1x.c
index 9cf81832259c..241b4a0631ab 100644
--- a/sound/pci/emu10k1/emu10k1x.c
+++ b/sound/pci/emu10k1/emu10k1x.c
@@ -877,7 +877,7 @@ static int snd_emu10k1x_pcm(struct emu10k1x *emu, int device)
emu->pcm = pcm;
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(emu->pci),
+ &emu->pci->dev,
32*1024, 32*1024);
return snd_pcm_add_chmap_ctls(pcm, SNDRV_PCM_STREAM_PLAYBACK, map, 2,
@@ -936,8 +936,8 @@ static int snd_emu10k1x_create(struct snd_card *card,
}
chip->irq = pci->irq;
- if(snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(pci),
- 4 * 1024, &chip->dma_buffer) < 0) {
+ if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, &pci->dev,
+ 4 * 1024, &chip->dma_buffer) < 0) {
snd_emu10k1x_free(chip);
return -ENOMEM;
}
diff --git a/sound/pci/emu10k1/emufx.c b/sound/pci/emu10k1/emufx.c
index e053f0d58bdd..a31adecfe608 100644
--- a/sound/pci/emu10k1/emufx.c
+++ b/sound/pci/emu10k1/emufx.c
@@ -2464,7 +2464,7 @@ int snd_emu10k1_fx8010_tram_setup(struct snd_emu10k1 *emu, u32 size)
}
if (size > 0) {
- if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(emu->pci),
+ if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, &emu->pci->dev,
size * 2, &emu->fx8010.etram_pages) < 0)
return -ENOMEM;
memset(emu->fx8010.etram_pages.area, 0, size * 2);
diff --git a/sound/pci/emu10k1/emupcm.c b/sound/pci/emu10k1/emupcm.c
index 6530a55fb878..9a8cf3c7dd67 100644
--- a/sound/pci/emu10k1/emupcm.c
+++ b/sound/pci/emu10k1/emupcm.c
@@ -1366,7 +1366,6 @@ static const struct snd_pcm_ops snd_emu10k1_playback_ops = {
.prepare = snd_emu10k1_playback_prepare,
.trigger = snd_emu10k1_playback_trigger,
.pointer = snd_emu10k1_playback_pointer,
- .page = snd_pcm_sgbuf_ops_page,
};
static const struct snd_pcm_ops snd_emu10k1_capture_ops = {
@@ -1390,7 +1389,6 @@ static const struct snd_pcm_ops snd_emu10k1_efx_playback_ops = {
.prepare = snd_emu10k1_efx_playback_prepare,
.trigger = snd_emu10k1_efx_playback_trigger,
.pointer = snd_emu10k1_efx_playback_pointer,
- .page = snd_pcm_sgbuf_ops_page,
};
int snd_emu10k1_pcm(struct snd_emu10k1 *emu, int device)
@@ -1414,12 +1412,12 @@ int snd_emu10k1_pcm(struct snd_emu10k1 *emu, int device)
for (substream = pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream; substream; substream = substream->next)
snd_pcm_lib_preallocate_pages(substream, SNDRV_DMA_TYPE_DEV_SG,
- snd_dma_pci_data(emu->pci),
+ &emu->pci->dev,
64*1024, 64*1024);
for (substream = pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream; substream; substream = substream->next)
snd_pcm_lib_preallocate_pages(substream, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(emu->pci),
+ &emu->pci->dev,
64*1024, 64*1024);
return 0;
@@ -1445,7 +1443,7 @@ int snd_emu10k1_pcm_multi(struct snd_emu10k1 *emu, int device)
for (substream = pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream; substream; substream = substream->next)
snd_pcm_lib_preallocate_pages(substream, SNDRV_DMA_TYPE_DEV_SG,
- snd_dma_pci_data(emu->pci),
+ &emu->pci->dev,
64*1024, 64*1024);
return 0;
@@ -1480,7 +1478,7 @@ int snd_emu10k1_pcm_mic(struct snd_emu10k1 *emu, int device)
emu->pcm_mic = pcm;
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(emu->pci),
+ &emu->pci->dev,
64*1024, 64*1024);
return 0;
@@ -1855,7 +1853,7 @@ int snd_emu10k1_pcm_efx(struct snd_emu10k1 *emu, int device)
return err;
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(emu->pci),
+ &emu->pci->dev,
64*1024, 64*1024);
return 0;
diff --git a/sound/pci/emu10k1/memory.c b/sound/pci/emu10k1/memory.c
index 135e26544275..94b8d5b08225 100644
--- a/sound/pci/emu10k1/memory.c
+++ b/sound/pci/emu10k1/memory.c
@@ -387,7 +387,7 @@ int snd_emu10k1_alloc_pages_maybe_wider(struct snd_emu10k1 *emu, size_t size,
}
return snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(emu->pci), size, dmab);
+ &emu->pci->dev, size, dmab);
}
/*
@@ -477,7 +477,7 @@ static void __synth_free_pages(struct snd_emu10k1 *emu, int first_page,
int page;
dmab.dev.type = SNDRV_DMA_TYPE_DEV;
- dmab.dev.dev = snd_dma_pci_data(emu->pci);
+ dmab.dev.dev = &emu->pci->dev;
for (page = first_page; page <= last_page; page++) {
if (emu->page_ptr_table[page] == NULL)
diff --git a/sound/pci/emu10k1/p16v.c b/sound/pci/emu10k1/p16v.c
index eeaed555185c..ab8876855989 100644
--- a/sound/pci/emu10k1/p16v.c
+++ b/sound/pci/emu10k1/p16v.c
@@ -643,7 +643,7 @@ int snd_p16v_pcm(struct snd_emu10k1 *emu, int device)
substream;
substream = substream->next) {
snd_pcm_lib_preallocate_pages(substream, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(emu->pci),
+ &emu->pci->dev,
(65536 - 64) * 8,
(65536 - 64) * 8);
/*
@@ -656,7 +656,7 @@ int snd_p16v_pcm(struct snd_emu10k1 *emu, int device)
substream;
substream = substream->next) {
snd_pcm_lib_preallocate_pages(substream, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(emu->pci),
+ &emu->pci->dev,
65536 - 64, 65536 - 64);
/*
dev_dbg(emu->card->dev,
diff --git a/sound/pci/ens1370.c b/sound/pci/ens1370.c
index b767df8181b5..0499dc863202 100644
--- a/sound/pci/ens1370.c
+++ b/sound/pci/ens1370.c
@@ -1275,7 +1275,8 @@ static int snd_ensoniq_pcm(struct ensoniq *ensoniq, int device)
ensoniq->pcm1 = pcm;
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(ensoniq->pci), 64*1024, 128*1024);
+ &ensoniq->pci->dev,
+ 64*1024, 128*1024);
#ifdef CHIP1370
err = snd_pcm_add_chmap_ctls(pcm, SNDRV_PCM_STREAM_PLAYBACK,
@@ -1307,7 +1308,8 @@ static int snd_ensoniq_pcm2(struct ensoniq *ensoniq, int device)
ensoniq->pcm2 = pcm;
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(ensoniq->pci), 64*1024, 128*1024);
+ &ensoniq->pci->dev,
+ 64*1024, 128*1024);
#ifdef CHIP1370
err = snd_pcm_add_chmap_ctls(pcm, SNDRV_PCM_STREAM_PLAYBACK,
@@ -2095,7 +2097,7 @@ static int snd_ensoniq_create(struct snd_card *card,
}
ensoniq->irq = pci->irq;
#ifdef CHIP1370
- if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(pci),
+ if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, &pci->dev,
16, &ensoniq->dma_bug) < 0) {
dev_err(card->dev, "unable to allocate space for phantom area - dma_bug\n");
snd_ensoniq_free(ensoniq);
diff --git a/sound/pci/es1938.c b/sound/pci/es1938.c
index ecf77c8c9e59..c571c5d380ca 100644
--- a/sound/pci/es1938.c
+++ b/sound/pci/es1938.c
@@ -1032,7 +1032,8 @@ static int snd_es1938_new_pcm(struct es1938 *chip, int device)
strcpy(pcm->name, "ESS Solo-1");
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(chip->pci), 64*1024, 64*1024);
+ &chip->pci->dev,
+ 64*1024, 64*1024);
chip->pcm = pcm;
return 0;
diff --git a/sound/pci/es1968.c b/sound/pci/es1968.c
index 974142535a25..7017ca9dea4a 100644
--- a/sound/pci/es1968.c
+++ b/sound/pci/es1968.c
@@ -1422,10 +1422,8 @@ snd_es1968_init_dmabuf(struct es1968 *chip)
int err;
struct esm_memory *chunk;
- chip->dma.dev.type = SNDRV_DMA_TYPE_DEV;
- chip->dma.dev.dev = snd_dma_pci_data(chip->pci);
err = snd_dma_alloc_pages_fallback(SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(chip->pci),
+ &chip->pci->dev,
chip->total_bufsize, &chip->dma);
if (err < 0 || ! chip->dma.area) {
dev_err(chip->card->dev,
diff --git a/sound/pci/fm801.c b/sound/pci/fm801.c
index 3ef7d507eb9b..a7f8109acced 100644
--- a/sound/pci/fm801.c
+++ b/sound/pci/fm801.c
@@ -721,7 +721,7 @@ static int snd_fm801_pcm(struct fm801 *chip, int device)
chip->pcm = pcm;
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(pdev),
+ &pdev->dev,
chip->multichannel ? 128*1024 : 64*1024, 128*1024);
return snd_pcm_add_chmap_ctls(pcm, SNDRV_PCM_STREAM_PLAYBACK,
diff --git a/sound/pci/hda/Kconfig b/sound/pci/hda/Kconfig
index dae47a45b2b8..bd48335d09d7 100644
--- a/sound/pci/hda/Kconfig
+++ b/sound/pci/hda/Kconfig
@@ -12,7 +12,7 @@ config SND_HDA_INTEL
tristate "HD Audio PCI"
depends on SND_PCI
select SND_HDA
- select SND_INTEL_NHLT if ACPI
+ select SND_INTEL_DSP_CONFIG
help
Say Y here to include support for Intel "High Definition
Audio" (Azalia) and its compatible devices.
@@ -23,15 +23,6 @@ config SND_HDA_INTEL
To compile this driver as a module, choose M here: the module
will be called snd-hda-intel.
-config SND_HDA_INTEL_DETECT_DMIC
- bool "DMIC detection and probe abort"
- depends on SND_HDA_INTEL
- help
- Say Y to detect digital microphones on SKL+ devices. DMICs
- cannot be handled by the HDaudio legacy driver and are
- currently only supported by the SOF driver.
- If unsure say N.
-
config SND_HDA_TEGRA
tristate "NVIDIA Tegra HD Audio"
depends on ARCH_TEGRA
diff --git a/sound/pci/hda/hda_bind.c b/sound/pci/hda/hda_bind.c
index 8272b50b8349..6a8564566375 100644
--- a/sound/pci/hda/hda_bind.c
+++ b/sound/pci/hda/hda_bind.c
@@ -43,6 +43,10 @@ static void hda_codec_unsol_event(struct hdac_device *dev, unsigned int ev)
{
struct hda_codec *codec = container_of(dev, struct hda_codec, core);
+ /* ignore unsol events during shutdown */
+ if (codec->bus->shutdown)
+ return;
+
if (codec->patch_ops.unsol_event)
codec->patch_ops.unsol_event(codec, ev);
}
diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c
index 6387c7e90918..2f3b7a35f2d9 100644
--- a/sound/pci/hda/hda_controller.c
+++ b/sound/pci/hda/hda_controller.c
@@ -701,7 +701,6 @@ static const struct snd_pcm_ops azx_pcm_ops = {
.pointer = azx_pcm_pointer,
.get_time_info = azx_get_time_info,
.mmap = azx_pcm_mmap,
- .page = snd_pcm_sgbuf_ops_page,
};
static void azx_pcm_free(struct snd_pcm *pcm)
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index c52419376c74..e76a0bb6d3cf 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -46,7 +46,7 @@
#include <sound/initval.h>
#include <sound/hdaudio.h>
#include <sound/hda_i915.h>
-#include <sound/intel-nhlt.h>
+#include <sound/intel-dsp-config.h>
#include <linux/vgaarb.h>
#include <linux/vga_switcheroo.h>
#include <linux/firmware.h>
@@ -124,7 +124,7 @@ static char *patch[SNDRV_CARDS];
static bool beep_mode[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS-1)] =
CONFIG_SND_HDA_INPUT_BEEP_MODE};
#endif
-static bool dmic_detect = IS_ENABLED(CONFIG_SND_HDA_INTEL_DETECT_DMIC);
+static bool dsp_driver = 1;
module_param_array(index, int, NULL, 0444);
MODULE_PARM_DESC(index, "Index value for Intel HD audio interface.");
@@ -159,8 +159,9 @@ module_param_array(beep_mode, bool, NULL, 0444);
MODULE_PARM_DESC(beep_mode, "Select HDA Beep registration mode "
"(0=off, 1=on) (default=1).");
#endif
-module_param(dmic_detect, bool, 0444);
-MODULE_PARM_DESC(dmic_detect, "DMIC detect on SKL+ platforms");
+module_param(dsp_driver, bool, 0444);
+MODULE_PARM_DESC(dsp_driver, "Allow DSP driver selection (bypass this driver) "
+ "(0=off, 1=on) (default=1)");
#ifdef CONFIG_PM
static int param_set_xint(const char *val, const struct kernel_param *kp);
@@ -368,8 +369,6 @@ enum {
((pci)->device == 0x160c))
#define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98)
-#define IS_CFL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa348)
-#define IS_CNL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9dc8)
static char *driver_short_names[] = {
[AZX_DRIVER_ICH] = "HDA Intel",
@@ -1280,11 +1279,17 @@ static void init_vga_switcheroo(struct azx *chip)
{
struct hda_intel *hda = container_of(chip, struct hda_intel, chip);
struct pci_dev *p = get_bound_vga(chip->pci);
+ struct pci_dev *parent;
if (p) {
dev_info(chip->card->dev,
"Handle vga_switcheroo audio client\n");
hda->use_vga_switcheroo = 1;
- chip->bus.keep_power = 1; /* cleared in either gpu_bound op or codec probe */
+
+ /* cleared in either gpu_bound op or codec probe, or when its
+ * upstream port has _PR3 (i.e. dGPU).
+ */
+ parent = pci_upstream_bridge(p);
+ chip->bus.keep_power = parent ? !pci_pr3_present(parent) : 1;
chip->driver_caps |= AZX_DCAPS_PM_RUNTIME;
pci_dev_put(p);
}
@@ -1382,8 +1387,11 @@ static int azx_free(struct azx *chip)
static int azx_dev_disconnect(struct snd_device *device)
{
struct azx *chip = device->device_data;
+ struct hdac_bus *bus = azx_bus(chip);
chip->bus.shutdown = 1;
+ cancel_work_sync(&bus->unsol_work);
+
return 0;
}
@@ -1753,10 +1761,6 @@ static int azx_create(struct snd_card *card, struct pci_dev *pci,
if (!azx_snoop(chip))
azx_bus(chip)->dma_type = SNDRV_DMA_TYPE_DEV_UC;
- /* Workaround for a communication error on CFL (bko#199007) and CNL */
- if (IS_CFL(pci) || IS_CNL(pci))
- azx_bus(chip)->polling_mode = 1;
-
if (chip->driver_type == AZX_DRIVER_NVIDIA) {
dev_dbg(chip->card->dev, "Enable delay in RIRB handling\n");
chip->bus.needs_damn_long_delay = 1;
@@ -2020,25 +2024,6 @@ static const struct hda_controller_ops pci_hda_ops = {
.position_check = azx_position_check,
};
-static int azx_check_dmic(struct pci_dev *pci, struct azx *chip)
-{
- struct nhlt_acpi_table *nhlt;
- int ret = 0;
-
- if (chip->driver_type == AZX_DRIVER_SKL &&
- pci->class != 0x040300) {
- nhlt = intel_nhlt_init(&pci->dev);
- if (nhlt) {
- if (intel_nhlt_get_dmic_geo(&pci->dev, nhlt)) {
- ret = -ENODEV;
- dev_info(&pci->dev, "Digital mics found on Skylake+ platform, aborting probe\n");
- }
- intel_nhlt_free(nhlt);
- }
- }
- return ret;
-}
-
static int azx_probe(struct pci_dev *pci,
const struct pci_device_id *pci_id)
{
@@ -2056,6 +2041,16 @@ static int azx_probe(struct pci_dev *pci,
return -ENOENT;
}
+ /*
+ * stop probe if another Intel's DSP driver should be activated
+ */
+ if (dsp_driver) {
+ err = snd_intel_dsp_driver_probe(pci);
+ if (err != SND_INTEL_DSP_DRIVER_ANY &&
+ err != SND_INTEL_DSP_DRIVER_LEGACY)
+ return -ENODEV;
+ }
+
err = snd_card_new(&pci->dev, index[dev], id[dev], THIS_MODULE,
0, &card);
if (err < 0) {
@@ -2069,17 +2064,6 @@ static int azx_probe(struct pci_dev *pci,
card->private_data = chip;
hda = container_of(chip, struct hda_intel, chip);
- /*
- * stop probe if digital microphones detected on Skylake+ platform
- * with the DSP enabled. This is an opt-in behavior defined at build
- * time or at run-time with a module parameter
- */
- if (dmic_detect) {
- err = azx_check_dmic(pci, chip);
- if (err < 0)
- goto out_free;
- }
-
pci_set_drvdata(pci, card);
err = register_vga_switcheroo(chip);
diff --git a/sound/pci/hda/hda_jack.c b/sound/pci/hda/hda_jack.c
index 1fb7b06457ae..bf0255cb0515 100644
--- a/sound/pci/hda/hda_jack.c
+++ b/sound/pci/hda/hda_jack.c
@@ -43,7 +43,7 @@ bool is_jack_detectable(struct hda_codec *codec, hda_nid_t nid)
EXPORT_SYMBOL_GPL(is_jack_detectable);
/* execute pin sense measurement */
-static u32 read_pin_sense(struct hda_codec *codec, hda_nid_t nid)
+static u32 read_pin_sense(struct hda_codec *codec, hda_nid_t nid, int dev_id)
{
u32 pincap;
u32 val;
@@ -55,19 +55,20 @@ static u32 read_pin_sense(struct hda_codec *codec, hda_nid_t nid)
AC_VERB_SET_PIN_SENSE, 0);
}
val = snd_hda_codec_read(codec, nid, 0,
- AC_VERB_GET_PIN_SENSE, 0);
+ AC_VERB_GET_PIN_SENSE, dev_id);
if (codec->inv_jack_detect)
val ^= AC_PINSENSE_PRESENCE;
return val;
}
/**
- * snd_hda_jack_tbl_get - query the jack-table entry for the given NID
+ * snd_hda_jack_tbl_get_mst - query the jack-table entry for the given NID
* @codec: the HDA codec
* @nid: pin NID to refer to
+ * @dev_id: pin device entry id
*/
struct hda_jack_tbl *
-snd_hda_jack_tbl_get(struct hda_codec *codec, hda_nid_t nid)
+snd_hda_jack_tbl_get_mst(struct hda_codec *codec, hda_nid_t nid, int dev_id)
{
struct hda_jack_tbl *jack = codec->jacktbl.list;
int i;
@@ -75,19 +76,21 @@ snd_hda_jack_tbl_get(struct hda_codec *codec, hda_nid_t nid)
if (!nid || !jack)
return NULL;
for (i = 0; i < codec->jacktbl.used; i++, jack++)
- if (jack->nid == nid)
+ if (jack->nid == nid && jack->dev_id == dev_id)
return jack;
return NULL;
}
-EXPORT_SYMBOL_GPL(snd_hda_jack_tbl_get);
+EXPORT_SYMBOL_GPL(snd_hda_jack_tbl_get_mst);
/**
* snd_hda_jack_tbl_get_from_tag - query the jack-table entry for the given tag
* @codec: the HDA codec
* @tag: tag value to refer to
+ * @dev_id: pin device entry id
*/
struct hda_jack_tbl *
-snd_hda_jack_tbl_get_from_tag(struct hda_codec *codec, unsigned char tag)
+snd_hda_jack_tbl_get_from_tag(struct hda_codec *codec,
+ unsigned char tag, int dev_id)
{
struct hda_jack_tbl *jack = codec->jacktbl.list;
int i;
@@ -95,29 +98,62 @@ snd_hda_jack_tbl_get_from_tag(struct hda_codec *codec, unsigned char tag)
if (!tag || !jack)
return NULL;
for (i = 0; i < codec->jacktbl.used; i++, jack++)
- if (jack->tag == tag)
+ if (jack->tag == tag && jack->dev_id == dev_id)
return jack;
return NULL;
}
EXPORT_SYMBOL_GPL(snd_hda_jack_tbl_get_from_tag);
+static struct hda_jack_tbl *
+any_jack_tbl_get_from_nid(struct hda_codec *codec, hda_nid_t nid)
+{
+ struct hda_jack_tbl *jack = codec->jacktbl.list;
+ int i;
+
+ if (!nid || !jack)
+ return NULL;
+ for (i = 0; i < codec->jacktbl.used; i++, jack++)
+ if (jack->nid == nid)
+ return jack;
+ return NULL;
+}
+
/**
* snd_hda_jack_tbl_new - create a jack-table entry for the given NID
* @codec: the HDA codec
* @nid: pin NID to assign
*/
static struct hda_jack_tbl *
-snd_hda_jack_tbl_new(struct hda_codec *codec, hda_nid_t nid)
+snd_hda_jack_tbl_new(struct hda_codec *codec, hda_nid_t nid, int dev_id)
{
- struct hda_jack_tbl *jack = snd_hda_jack_tbl_get(codec, nid);
+ struct hda_jack_tbl *jack =
+ snd_hda_jack_tbl_get_mst(codec, nid, dev_id);
+ struct hda_jack_tbl *existing_nid_jack =
+ any_jack_tbl_get_from_nid(codec, nid);
+
+ WARN_ON(dev_id != 0 && !codec->dp_mst);
+
if (jack)
return jack;
jack = snd_array_new(&codec->jacktbl);
if (!jack)
return NULL;
jack->nid = nid;
+ jack->dev_id = dev_id;
jack->jack_dirty = 1;
- jack->tag = codec->jacktbl.used;
+ if (existing_nid_jack) {
+ jack->tag = existing_nid_jack->tag;
+
+ /*
+ * Copy jack_detect from existing_nid_jack to avoid
+ * snd_hda_jack_detect_enable_callback_mst() making multiple
+ * SET_UNSOLICITED_ENABLE calls on the same pin.
+ */
+ jack->jack_detect = existing_nid_jack->jack_detect;
+ } else {
+ jack->tag = codec->jacktbl.used;
+ }
+
return jack;
}
@@ -153,10 +189,12 @@ static void jack_detect_update(struct hda_codec *codec,
if (jack->phantom_jack)
jack->pin_sense = AC_PINSENSE_PRESENCE;
else
- jack->pin_sense = read_pin_sense(codec, jack->nid);
+ jack->pin_sense = read_pin_sense(codec, jack->nid,
+ jack->dev_id);
/* A gating jack indicates the jack is invalid if gating is unplugged */
- if (jack->gating_jack && !snd_hda_jack_detect(codec, jack->gating_jack))
+ if (jack->gating_jack &&
+ !snd_hda_jack_detect_mst(codec, jack->gating_jack, jack->dev_id))
jack->pin_sense &= ~AC_PINSENSE_PRESENCE;
jack->jack_dirty = 0;
@@ -164,7 +202,8 @@ static void jack_detect_update(struct hda_codec *codec,
/* If a jack is gated by this one update it. */
if (jack->gated_jack) {
struct hda_jack_tbl *gated =
- snd_hda_jack_tbl_get(codec, jack->gated_jack);
+ snd_hda_jack_tbl_get_mst(codec, jack->gated_jack,
+ jack->dev_id);
if (gated) {
gated->jack_dirty = 1;
jack_detect_update(codec, gated);
@@ -191,63 +230,69 @@ void snd_hda_jack_set_dirty_all(struct hda_codec *codec)
EXPORT_SYMBOL_GPL(snd_hda_jack_set_dirty_all);
/**
- * snd_hda_pin_sense - execute pin sense measurement
+ * snd_hda_jack_pin_sense - execute pin sense measurement
* @codec: the CODEC to sense
* @nid: the pin NID to sense
*
* Execute necessary pin sense measurement and return its Presence Detect,
* Impedance, ELD Valid etc. status bits.
*/
-u32 snd_hda_pin_sense(struct hda_codec *codec, hda_nid_t nid)
+u32 snd_hda_jack_pin_sense(struct hda_codec *codec, hda_nid_t nid, int dev_id)
{
- struct hda_jack_tbl *jack = snd_hda_jack_tbl_get(codec, nid);
+ struct hda_jack_tbl *jack =
+ snd_hda_jack_tbl_get_mst(codec, nid, dev_id);
if (jack) {
jack_detect_update(codec, jack);
return jack->pin_sense;
}
- return read_pin_sense(codec, nid);
+ return read_pin_sense(codec, nid, dev_id);
}
-EXPORT_SYMBOL_GPL(snd_hda_pin_sense);
+EXPORT_SYMBOL_GPL(snd_hda_jack_pin_sense);
/**
- * snd_hda_jack_detect_state - query pin Presence Detect status
+ * snd_hda_jack_detect_state_mst - query pin Presence Detect status
* @codec: the CODEC to sense
* @nid: the pin NID to sense
+ * @dev_id: pin device entry id
*
* Query and return the pin's Presence Detect status, as either
* HDA_JACK_NOT_PRESENT, HDA_JACK_PRESENT or HDA_JACK_PHANTOM.
*/
-int snd_hda_jack_detect_state(struct hda_codec *codec, hda_nid_t nid)
+int snd_hda_jack_detect_state_mst(struct hda_codec *codec,
+ hda_nid_t nid, int dev_id)
{
- struct hda_jack_tbl *jack = snd_hda_jack_tbl_get(codec, nid);
+ struct hda_jack_tbl *jack =
+ snd_hda_jack_tbl_get_mst(codec, nid, dev_id);
if (jack && jack->phantom_jack)
return HDA_JACK_PHANTOM;
- else if (snd_hda_pin_sense(codec, nid) & AC_PINSENSE_PRESENCE)
+ else if (snd_hda_jack_pin_sense(codec, nid, dev_id) &
+ AC_PINSENSE_PRESENCE)
return HDA_JACK_PRESENT;
else
return HDA_JACK_NOT_PRESENT;
}
-EXPORT_SYMBOL_GPL(snd_hda_jack_detect_state);
+EXPORT_SYMBOL_GPL(snd_hda_jack_detect_state_mst);
/**
- * snd_hda_jack_detect_enable - enable the jack-detection
+ * snd_hda_jack_detect_enable_mst - enable the jack-detection
* @codec: the HDA codec
* @nid: pin NID to enable
* @func: callback function to register
+ * @dev_id: pin device entry id
*
* In the case of error, the return value will be a pointer embedded with
* errno. Check and handle the return value appropriately with standard
* macros such as @IS_ERR() and @PTR_ERR().
*/
struct hda_jack_callback *
-snd_hda_jack_detect_enable_callback(struct hda_codec *codec, hda_nid_t nid,
- hda_jack_callback_fn func)
+snd_hda_jack_detect_enable_callback_mst(struct hda_codec *codec, hda_nid_t nid,
+ int dev_id, hda_jack_callback_fn func)
{
struct hda_jack_tbl *jack;
struct hda_jack_callback *callback = NULL;
int err;
- jack = snd_hda_jack_tbl_new(codec, nid);
+ jack = snd_hda_jack_tbl_new(codec, nid, dev_id);
if (!jack)
return ERR_PTR(-ENOMEM);
if (func) {
@@ -256,6 +301,7 @@ snd_hda_jack_detect_enable_callback(struct hda_codec *codec, hda_nid_t nid,
return ERR_PTR(-ENOMEM);
callback->func = func;
callback->nid = jack->nid;
+ callback->dev_id = jack->dev_id;
callback->next = jack->callback;
jack->callback = callback;
}
@@ -272,19 +318,24 @@ snd_hda_jack_detect_enable_callback(struct hda_codec *codec, hda_nid_t nid,
return ERR_PTR(err);
return callback;
}
-EXPORT_SYMBOL_GPL(snd_hda_jack_detect_enable_callback);
+EXPORT_SYMBOL_GPL(snd_hda_jack_detect_enable_callback_mst);
/**
* snd_hda_jack_detect_enable - Enable the jack detection on the given pin
* @codec: the HDA codec
* @nid: pin NID to enable jack detection
+ * @dev_id: pin device entry id
*
* Enable the jack detection with the default callback. Returns zero if
* successful or a negative error code.
*/
-int snd_hda_jack_detect_enable(struct hda_codec *codec, hda_nid_t nid)
+int snd_hda_jack_detect_enable(struct hda_codec *codec, hda_nid_t nid,
+ int dev_id)
{
- return PTR_ERR_OR_ZERO(snd_hda_jack_detect_enable_callback(codec, nid, NULL));
+ return PTR_ERR_OR_ZERO(snd_hda_jack_detect_enable_callback_mst(codec,
+ nid,
+ dev_id,
+ NULL));
}
EXPORT_SYMBOL_GPL(snd_hda_jack_detect_enable);
@@ -299,8 +350,11 @@ EXPORT_SYMBOL_GPL(snd_hda_jack_detect_enable);
int snd_hda_jack_set_gating_jack(struct hda_codec *codec, hda_nid_t gated_nid,
hda_nid_t gating_nid)
{
- struct hda_jack_tbl *gated = snd_hda_jack_tbl_new(codec, gated_nid);
- struct hda_jack_tbl *gating = snd_hda_jack_tbl_new(codec, gating_nid);
+ struct hda_jack_tbl *gated = snd_hda_jack_tbl_new(codec, gated_nid, 0);
+ struct hda_jack_tbl *gating =
+ snd_hda_jack_tbl_new(codec, gating_nid, 0);
+
+ WARN_ON(codec->dp_mst);
if (!gated || !gating)
return -EINVAL;
@@ -376,9 +430,10 @@ static void hda_free_jack_priv(struct snd_jack *jack)
}
/**
- * snd_hda_jack_add_kctl - Add a kctl for the given pin
+ * snd_hda_jack_add_kctl_mst - Add a kctl for the given pin
* @codec: the HDA codec
* @nid: pin NID to assign
+ * @dev_id : pin device entry id
* @name: string name for the jack
* @phantom_jack: flag to deal as a phantom jack
* @type: jack type bits to be reported, 0 for guessing from pincfg
@@ -387,15 +442,15 @@ static void hda_free_jack_priv(struct snd_jack *jack)
* This assigns a jack-detection kctl to the given pin. The kcontrol
* will have the given name and index.
*/
-int snd_hda_jack_add_kctl(struct hda_codec *codec, hda_nid_t nid,
- const char *name, bool phantom_jack,
- int type, const struct hda_jack_keymap *keymap)
+int snd_hda_jack_add_kctl_mst(struct hda_codec *codec, hda_nid_t nid,
+ int dev_id, const char *name, bool phantom_jack,
+ int type, const struct hda_jack_keymap *keymap)
{
struct hda_jack_tbl *jack;
const struct hda_jack_keymap *map;
int err, state, buttons;
- jack = snd_hda_jack_tbl_new(codec, nid);
+ jack = snd_hda_jack_tbl_new(codec, nid, dev_id);
if (!jack)
return 0;
if (jack->jack)
@@ -425,12 +480,12 @@ int snd_hda_jack_add_kctl(struct hda_codec *codec, hda_nid_t nid,
snd_jack_set_key(jack->jack, map->type, map->key);
}
- state = snd_hda_jack_detect(codec, nid);
+ state = snd_hda_jack_detect_mst(codec, nid, dev_id);
snd_jack_report(jack->jack, state ? jack->type : 0);
return 0;
}
-EXPORT_SYMBOL_GPL(snd_hda_jack_add_kctl);
+EXPORT_SYMBOL_GPL(snd_hda_jack_add_kctl_mst);
static int add_jack_kctl(struct hda_codec *codec, hda_nid_t nid,
const struct auto_pin_cfg *cfg,
@@ -441,6 +496,8 @@ static int add_jack_kctl(struct hda_codec *codec, hda_nid_t nid,
int err;
bool phantom_jack;
+ WARN_ON(codec->dp_mst);
+
if (!nid)
return 0;
def_conf = snd_hda_codec_get_pincfg(codec, nid);
@@ -462,7 +519,7 @@ static int add_jack_kctl(struct hda_codec *codec, hda_nid_t nid,
return err;
if (!phantom_jack)
- return snd_hda_jack_detect_enable(codec, nid);
+ return snd_hda_jack_detect_enable(codec, nid, 0);
return 0;
}
@@ -540,7 +597,8 @@ static void call_jack_callback(struct hda_codec *codec, unsigned int res,
}
if (jack->gated_jack) {
struct hda_jack_tbl *gated =
- snd_hda_jack_tbl_get(codec, jack->gated_jack);
+ snd_hda_jack_tbl_get_mst(codec, jack->gated_jack,
+ jack->dev_id);
if (gated) {
for (cb = gated->callback; cb; cb = cb->next) {
cb->jack = gated;
@@ -561,7 +619,14 @@ void snd_hda_jack_unsol_event(struct hda_codec *codec, unsigned int res)
struct hda_jack_tbl *event;
int tag = (res & AC_UNSOL_RES_TAG) >> AC_UNSOL_RES_TAG_SHIFT;
- event = snd_hda_jack_tbl_get_from_tag(codec, tag);
+ if (codec->dp_mst) {
+ int dev_entry =
+ (res & AC_UNSOL_RES_DE) >> AC_UNSOL_RES_DE_SHIFT;
+
+ event = snd_hda_jack_tbl_get_from_tag(codec, tag, dev_entry);
+ } else {
+ event = snd_hda_jack_tbl_get_from_tag(codec, tag, 0);
+ }
if (!event)
return;
event->jack_dirty = 1;
diff --git a/sound/pci/hda/hda_jack.h b/sound/pci/hda/hda_jack.h
index 22fe7ee43e82..727b6d3ba454 100644
--- a/sound/pci/hda/hda_jack.h
+++ b/sound/pci/hda/hda_jack.h
@@ -19,6 +19,7 @@ typedef void (*hda_jack_callback_fn) (struct hda_codec *, struct hda_jack_callba
struct hda_jack_callback {
hda_nid_t nid;
+ int dev_id;
hda_jack_callback_fn func;
unsigned int private_data; /* arbitrary data */
unsigned int unsol_res; /* unsolicited event bits */
@@ -28,6 +29,7 @@ struct hda_jack_callback {
struct hda_jack_tbl {
hda_nid_t nid;
+ int dev_id;
unsigned char tag; /* unsol event tag */
struct hda_jack_callback *callback;
/* jack-detection stuff */
@@ -49,46 +51,129 @@ struct hda_jack_keymap {
};
struct hda_jack_tbl *
-snd_hda_jack_tbl_get(struct hda_codec *codec, hda_nid_t nid);
+snd_hda_jack_tbl_get_mst(struct hda_codec *codec, hda_nid_t nid, int dev_id);
+
+/**
+ * snd_hda_jack_tbl_get - query the jack-table entry for the given NID
+ * @codec: the HDA codec
+ * @nid: pin NID to refer to
+ */
+static inline struct hda_jack_tbl *
+snd_hda_jack_tbl_get(struct hda_codec *codec, hda_nid_t nid)
+{
+ return snd_hda_jack_tbl_get_mst(codec, nid, 0);
+}
+
struct hda_jack_tbl *
-snd_hda_jack_tbl_get_from_tag(struct hda_codec *codec, unsigned char tag);
+snd_hda_jack_tbl_get_from_tag(struct hda_codec *codec,
+ unsigned char tag, int dev_id);
void snd_hda_jack_tbl_clear(struct hda_codec *codec);
void snd_hda_jack_set_dirty_all(struct hda_codec *codec);
-int snd_hda_jack_detect_enable(struct hda_codec *codec, hda_nid_t nid);
+int snd_hda_jack_detect_enable(struct hda_codec *codec, hda_nid_t nid,
+ int dev_id);
+
struct hda_jack_callback *
+snd_hda_jack_detect_enable_callback_mst(struct hda_codec *codec, hda_nid_t nid,
+ int dev_id, hda_jack_callback_fn cb);
+
+/**
+ * snd_hda_jack_detect_enable - enable the jack-detection
+ * @codec: the HDA codec
+ * @nid: pin NID to enable
+ * @func: callback function to register
+ *
+ * In the case of error, the return value will be a pointer embedded with
+ * errno. Check and handle the return value appropriately with standard
+ * macros such as @IS_ERR() and @PTR_ERR().
+ */
+static inline struct hda_jack_callback *
snd_hda_jack_detect_enable_callback(struct hda_codec *codec, hda_nid_t nid,
- hda_jack_callback_fn cb);
+ hda_jack_callback_fn cb)
+{
+ return snd_hda_jack_detect_enable_callback_mst(codec, nid, 0, cb);
+}
int snd_hda_jack_set_gating_jack(struct hda_codec *codec, hda_nid_t gated_nid,
hda_nid_t gating_nid);
-u32 snd_hda_pin_sense(struct hda_codec *codec, hda_nid_t nid);
+u32 snd_hda_jack_pin_sense(struct hda_codec *codec, hda_nid_t nid, int dev_id);
/* the jack state returned from snd_hda_jack_detect_state() */
enum {
HDA_JACK_NOT_PRESENT, HDA_JACK_PRESENT, HDA_JACK_PHANTOM,
};
-int snd_hda_jack_detect_state(struct hda_codec *codec, hda_nid_t nid);
+int snd_hda_jack_detect_state_mst(struct hda_codec *codec, hda_nid_t nid,
+ int dev_id);
+
+/**
+ * snd_hda_jack_detect_state - query pin Presence Detect status
+ * @codec: the CODEC to sense
+ * @nid: the pin NID to sense
+ *
+ * Query and return the pin's Presence Detect status, as either
+ * HDA_JACK_NOT_PRESENT, HDA_JACK_PRESENT or HDA_JACK_PHANTOM.
+ */
+static inline int
+snd_hda_jack_detect_state(struct hda_codec *codec, hda_nid_t nid)
+{
+ return snd_hda_jack_detect_state_mst(codec, nid, 0);
+}
+
+/**
+ * snd_hda_jack_detect_mst - Detect the jack
+ * @codec: the HDA codec
+ * @nid: pin NID to check jack detection
+ * @dev_id: pin device entry id
+ */
+static inline bool
+snd_hda_jack_detect_mst(struct hda_codec *codec, hda_nid_t nid, int dev_id)
+{
+ return snd_hda_jack_detect_state_mst(codec, nid, dev_id) !=
+ HDA_JACK_NOT_PRESENT;
+}
/**
* snd_hda_jack_detect - Detect the jack
* @codec: the HDA codec
* @nid: pin NID to check jack detection
*/
-static inline bool snd_hda_jack_detect(struct hda_codec *codec, hda_nid_t nid)
+static inline bool
+snd_hda_jack_detect(struct hda_codec *codec, hda_nid_t nid)
{
- return snd_hda_jack_detect_state(codec, nid) != HDA_JACK_NOT_PRESENT;
+ return snd_hda_jack_detect_mst(codec, nid, 0);
}
bool is_jack_detectable(struct hda_codec *codec, hda_nid_t nid);
-int snd_hda_jack_add_kctl(struct hda_codec *codec, hda_nid_t nid,
- const char *name, bool phantom_jack,
- int type, const struct hda_jack_keymap *keymap);
+int snd_hda_jack_add_kctl_mst(struct hda_codec *codec, hda_nid_t nid,
+ int dev_id, const char *name, bool phantom_jack,
+ int type, const struct hda_jack_keymap *keymap);
+
+/**
+ * snd_hda_jack_add_kctl - Add a kctl for the given pin
+ * @codec: the HDA codec
+ * @nid: pin NID to assign
+ * @name: string name for the jack
+ * @phantom_jack: flag to deal as a phantom jack
+ * @type: jack type bits to be reported, 0 for guessing from pincfg
+ * @keymap: optional jack / key mapping
+ *
+ * This assigns a jack-detection kctl to the given pin. The kcontrol
+ * will have the given name and index.
+ */
+static inline int
+snd_hda_jack_add_kctl(struct hda_codec *codec, hda_nid_t nid,
+ const char *name, bool phantom_jack,
+ int type, const struct hda_jack_keymap *keymap)
+{
+ return snd_hda_jack_add_kctl_mst(codec, nid, 0,
+ name, phantom_jack, type, keymap);
+}
+
int snd_hda_jack_add_kctls(struct hda_codec *codec,
const struct auto_pin_cfg *cfg);
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index 968d3caab6ac..90aa0f400a57 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -910,6 +910,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
SND_PCI_QUIRK(0x103c, 0x837f, "HP ProBook 470 G5", CXT_FIXUP_MUTE_LED_GPIO),
SND_PCI_QUIRK(0x103c, 0x8299, "HP 800 G3 SFF", CXT_FIXUP_HP_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x103c, 0x829a, "HP 800 G3 DM", CXT_FIXUP_HP_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x103c, 0x8402, "HP ProBook 645 G4", CXT_FIXUP_MUTE_LED_GPIO),
SND_PCI_QUIRK(0x103c, 0x8455, "HP Z2 G4", CXT_FIXUP_HP_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x103c, 0x8456, "HP Z2 G4 SFF", CXT_FIXUP_HP_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x103c, 0x8457, "HP Z2 G4 mini", CXT_FIXUP_HP_MIC_NO_PRESENCE),
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 78bd2e3722c7..bffde594e204 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -37,25 +37,6 @@ static bool static_hdmi_pcm;
module_param(static_hdmi_pcm, bool, 0644);
MODULE_PARM_DESC(static_hdmi_pcm, "Don't restrict PCM parameters per ELD info");
-#define is_haswell(codec) ((codec)->core.vendor_id == 0x80862807)
-#define is_broadwell(codec) ((codec)->core.vendor_id == 0x80862808)
-#define is_skylake(codec) ((codec)->core.vendor_id == 0x80862809)
-#define is_broxton(codec) ((codec)->core.vendor_id == 0x8086280a)
-#define is_kabylake(codec) ((codec)->core.vendor_id == 0x8086280b)
-#define is_geminilake(codec) (((codec)->core.vendor_id == 0x8086280d) || \
- ((codec)->core.vendor_id == 0x80862800))
-#define is_cannonlake(codec) ((codec)->core.vendor_id == 0x8086280c)
-#define is_icelake(codec) ((codec)->core.vendor_id == 0x8086280f)
-#define is_tigerlake(codec) ((codec)->core.vendor_id == 0x80862812)
-#define is_haswell_plus(codec) (is_haswell(codec) || is_broadwell(codec) \
- || is_skylake(codec) || is_broxton(codec) \
- || is_kabylake(codec) || is_geminilake(codec) \
- || is_cannonlake(codec) || is_icelake(codec) \
- || is_tigerlake(codec))
-#define is_valleyview(codec) ((codec)->core.vendor_id == 0x80862882)
-#define is_cherryview(codec) ((codec)->core.vendor_id == 0x80862883)
-#define is_valleyview_plus(codec) (is_valleyview(codec) || is_cherryview(codec))
-
struct hdmi_spec_per_cvt {
hda_nid_t cvt_nid;
int assigned;
@@ -99,16 +80,19 @@ struct hdmi_spec_per_pin {
/* operations used by generic code that can be overridden by patches */
struct hdmi_ops {
int (*pin_get_eld)(struct hda_codec *codec, hda_nid_t pin_nid,
- unsigned char *buf, int *eld_size);
+ int dev_id, unsigned char *buf, int *eld_size);
void (*pin_setup_infoframe)(struct hda_codec *codec, hda_nid_t pin_nid,
+ int dev_id,
int ca, int active_channels, int conn_type);
/* enable/disable HBR (HD passthrough) */
- int (*pin_hbr_setup)(struct hda_codec *codec, hda_nid_t pin_nid, bool hbr);
+ int (*pin_hbr_setup)(struct hda_codec *codec, hda_nid_t pin_nid,
+ int dev_id, bool hbr);
int (*setup_stream)(struct hda_codec *codec, hda_nid_t cvt_nid,
- hda_nid_t pin_nid, u32 stream_tag, int format);
+ hda_nid_t pin_nid, int dev_id, u32 stream_tag,
+ int format);
void (*pin_cvt_fixup)(struct hda_codec *codec,
struct hdmi_spec_per_pin *per_pin,
@@ -162,6 +146,7 @@ struct hdmi_spec {
bool dyn_pin_out;
bool dyn_pcm_assign;
+ bool intel_hsw_fixup; /* apply Intel platform-specific fixups */
/*
* Non-generic VIA/NVIDIA specific
*/
@@ -654,8 +639,16 @@ static bool hdmi_infoframe_uptodate(struct hda_codec *codec, hda_nid_t pin_nid,
return true;
}
+static int hdmi_pin_get_eld(struct hda_codec *codec, hda_nid_t nid,
+ int dev_id, unsigned char *buf, int *eld_size)
+{
+ snd_hda_set_dev_select(codec, nid, dev_id);
+
+ return snd_hdmi_get_eld(codec, nid, buf, eld_size);
+}
+
static void hdmi_pin_setup_infoframe(struct hda_codec *codec,
- hda_nid_t pin_nid,
+ hda_nid_t pin_nid, int dev_id,
int ca, int active_channels,
int conn_type)
{
@@ -685,6 +678,8 @@ static void hdmi_pin_setup_infoframe(struct hda_codec *codec,
return;
}
+ snd_hda_set_dev_select(codec, pin_nid, dev_id);
+
/*
* sizeof(ai) is used instead of sizeof(*hdmi_ai) or
* sizeof(*dp_ai) to avoid partial match/update problems when
@@ -710,6 +705,7 @@ static void hdmi_setup_audio_infoframe(struct hda_codec *codec,
struct hdmi_spec *spec = codec->spec;
struct hdac_chmap *chmap = &spec->chmap;
hda_nid_t pin_nid = per_pin->pin_nid;
+ int dev_id = per_pin->dev_id;
int channels = per_pin->channels;
int active_channels;
struct hdmi_eld *eld;
@@ -718,6 +714,8 @@ static void hdmi_setup_audio_infoframe(struct hda_codec *codec,
if (!channels)
return;
+ snd_hda_set_dev_select(codec, pin_nid, dev_id);
+
/* some HW (e.g. HSW+) needs reprogramming the amp at each time */
if (get_wcaps(codec, pin_nid) & AC_WCAP_OUT_AMP)
snd_hda_codec_write(codec, pin_nid, 0,
@@ -743,8 +741,8 @@ static void hdmi_setup_audio_infoframe(struct hda_codec *codec,
pin_nid, non_pcm, ca, channels,
per_pin->chmap, per_pin->chmap_set);
- spec->ops.pin_setup_infoframe(codec, pin_nid, ca, active_channels,
- eld->info.conn_type);
+ spec->ops.pin_setup_infoframe(codec, pin_nid, dev_id,
+ ca, active_channels, eld->info.conn_type);
per_pin->non_pcm = non_pcm;
}
@@ -776,34 +774,32 @@ static void jack_callback(struct hda_codec *codec,
if (codec_has_acomp(codec))
return;
- /* hda_jack don't support DP MST */
- check_presence_and_report(codec, jack->nid, 0);
+ check_presence_and_report(codec, jack->nid, jack->dev_id);
}
static void hdmi_intrinsic_event(struct hda_codec *codec, unsigned int res)
{
int tag = res >> AC_UNSOL_RES_TAG_SHIFT;
struct hda_jack_tbl *jack;
- int dev_entry = (res & AC_UNSOL_RES_DE) >> AC_UNSOL_RES_DE_SHIFT;
- /*
- * assume DP MST uses dyn_pcm_assign and acomp and
- * never comes here
- * if DP MST supports unsol event, below code need
- * consider dev_entry
- */
- jack = snd_hda_jack_tbl_get_from_tag(codec, tag);
+ if (codec->dp_mst) {
+ int dev_entry =
+ (res & AC_UNSOL_RES_DE) >> AC_UNSOL_RES_DE_SHIFT;
+
+ jack = snd_hda_jack_tbl_get_from_tag(codec, tag, dev_entry);
+ } else {
+ jack = snd_hda_jack_tbl_get_from_tag(codec, tag, 0);
+ }
if (!jack)
return;
jack->jack_dirty = 1;
codec_dbg(codec,
"HDMI hot plug event: Codec=%d Pin=%d Device=%d Inactive=%d Presence_Detect=%d ELD_Valid=%d\n",
- codec->addr, jack->nid, dev_entry, !!(res & AC_UNSOL_RES_IA),
+ codec->addr, jack->nid, jack->dev_id, !!(res & AC_UNSOL_RES_IA),
!!(res & AC_UNSOL_RES_PD), !!(res & AC_UNSOL_RES_ELDV));
- /* hda_jack don't support DP MST */
- check_presence_and_report(codec, jack->nid, 0);
+ check_presence_and_report(codec, jack->nid, jack->dev_id);
}
static void hdmi_non_intrinsic_event(struct hda_codec *codec, unsigned int res)
@@ -833,11 +829,21 @@ static void hdmi_unsol_event(struct hda_codec *codec, unsigned int res)
{
int tag = res >> AC_UNSOL_RES_TAG_SHIFT;
int subtag = (res & AC_UNSOL_RES_SUBTAG) >> AC_UNSOL_RES_SUBTAG_SHIFT;
+ struct hda_jack_tbl *jack;
if (codec_has_acomp(codec))
return;
- if (!snd_hda_jack_tbl_get_from_tag(codec, tag)) {
+ if (codec->dp_mst) {
+ int dev_entry =
+ (res & AC_UNSOL_RES_DE) >> AC_UNSOL_RES_DE_SHIFT;
+
+ jack = snd_hda_jack_tbl_get_from_tag(codec, tag, dev_entry);
+ } else {
+ jack = snd_hda_jack_tbl_get_from_tag(codec, tag, 0);
+ }
+
+ if (!jack) {
codec_dbg(codec, "Unexpected HDMI event tag 0x%x\n", tag);
return;
}
@@ -878,11 +884,12 @@ static void haswell_verify_D0(struct hda_codec *codec,
((format & AC_FMT_TYPE_NON_PCM) && (format & AC_FMT_CHAN_MASK) == 7)
static int hdmi_pin_hbr_setup(struct hda_codec *codec, hda_nid_t pin_nid,
- bool hbr)
+ int dev_id, bool hbr)
{
int pinctl, new_pinctl;
if (snd_hda_query_pin_caps(codec, pin_nid) & AC_PINCAP_HBR) {
+ snd_hda_set_dev_select(codec, pin_nid, dev_id);
pinctl = snd_hda_codec_read(codec, pin_nid, 0,
AC_VERB_GET_PIN_WIDGET_CONTROL, 0);
@@ -912,20 +919,22 @@ static int hdmi_pin_hbr_setup(struct hda_codec *codec, hda_nid_t pin_nid,
}
static int hdmi_setup_stream(struct hda_codec *codec, hda_nid_t cvt_nid,
- hda_nid_t pin_nid, u32 stream_tag, int format)
+ hda_nid_t pin_nid, int dev_id,
+ u32 stream_tag, int format)
{
struct hdmi_spec *spec = codec->spec;
unsigned int param;
int err;
- err = spec->ops.pin_hbr_setup(codec, pin_nid, is_hbr_format(format));
+ err = spec->ops.pin_hbr_setup(codec, pin_nid, dev_id,
+ is_hbr_format(format));
if (err) {
codec_dbg(codec, "hdmi_setup_stream: HBR is not supported\n");
return err;
}
- if (is_haswell_plus(codec)) {
+ if (spec->intel_hsw_fixup) {
/*
* on recent platforms IEC Coding Type is required for HBR
@@ -1292,6 +1301,7 @@ static int hdmi_read_pin_conn(struct hda_codec *codec, int pin_idx)
struct hdmi_spec *spec = codec->spec;
struct hdmi_spec_per_pin *per_pin = get_pin(spec, pin_idx);
hda_nid_t pin_nid = per_pin->pin_nid;
+ int dev_id = per_pin->dev_id;
if (!(get_wcaps(codec, pin_nid) & AC_WCAP_CONN_LIST)) {
codec_warn(codec,
@@ -1300,24 +1310,43 @@ static int hdmi_read_pin_conn(struct hda_codec *codec, int pin_idx)
return -EINVAL;
}
+ snd_hda_set_dev_select(codec, pin_nid, dev_id);
+
/* all the device entries on the same pin have the same conn list */
- per_pin->num_mux_nids = snd_hda_get_connections(codec, pin_nid,
- per_pin->mux_nids,
- HDA_MAX_CONNECTIONS);
+ per_pin->num_mux_nids =
+ snd_hda_get_raw_connections(codec, pin_nid, per_pin->mux_nids,
+ HDA_MAX_CONNECTIONS);
return 0;
}
static int hdmi_find_pcm_slot(struct hdmi_spec *spec,
- struct hdmi_spec_per_pin *per_pin)
+ struct hdmi_spec_per_pin *per_pin)
{
int i;
- /* try the prefer PCM */
- if (!test_bit(per_pin->pin_nid_idx, &spec->pcm_bitmap))
+ /*
+ * generic_hdmi_build_pcms() allocates (num_nids + dev_num - 1)
+ * number of pcms.
+ *
+ * The per_pin of pin_nid_idx=n and dev_id=m prefers to get pcm-n
+ * if m==0. This guarantees that dynamic pcm assignments are compatible
+ * with the legacy static per_pin-pmc assignment that existed in the
+ * days before DP-MST.
+ *
+ * per_pin of m!=0 prefers to get pcm=(num_nids + (m - 1)).
+ */
+ if (per_pin->dev_id == 0 &&
+ !test_bit(per_pin->pin_nid_idx, &spec->pcm_bitmap))
return per_pin->pin_nid_idx;
- /* have a second try; check the "reserved area" over num_pins */
+ if (per_pin->dev_id != 0 &&
+ !(test_bit(spec->num_nids + (per_pin->dev_id - 1),
+ &spec->pcm_bitmap))) {
+ return spec->num_nids + (per_pin->dev_id - 1);
+ }
+
+ /* have a second try; check the area over num_nids */
for (i = spec->num_nids; i < spec->pcm_used; i++) {
if (!test_bit(i, &spec->pcm_bitmap))
return i;
@@ -1511,6 +1540,7 @@ static bool hdmi_present_sense_via_verbs(struct hdmi_spec_per_pin *per_pin,
struct hdmi_spec *spec = codec->spec;
struct hdmi_eld *eld = &spec->temp_eld;
hda_nid_t pin_nid = per_pin->pin_nid;
+ int dev_id = per_pin->dev_id;
/*
* Always execute a GetPinSense verb here, even when called from
* hdmi_intrinsic_event; for some NVIDIA HW, the unsolicited
@@ -1523,7 +1553,7 @@ static bool hdmi_present_sense_via_verbs(struct hdmi_spec_per_pin *per_pin,
bool ret;
bool do_repoll = false;
- present = snd_hda_pin_sense(codec, pin_nid);
+ present = snd_hda_jack_pin_sense(codec, pin_nid, dev_id);
mutex_lock(&per_pin->lock);
eld->monitor_present = !!(present & AC_PINSENSE_PRESENCE);
@@ -1537,8 +1567,8 @@ static bool hdmi_present_sense_via_verbs(struct hdmi_spec_per_pin *per_pin,
codec->addr, pin_nid, eld->monitor_present, eld->eld_valid);
if (eld->eld_valid) {
- if (spec->ops.pin_get_eld(codec, pin_nid, eld->eld_buffer,
- &eld->eld_size) < 0)
+ if (spec->ops.pin_get_eld(codec, pin_nid, dev_id,
+ eld->eld_buffer, &eld->eld_size) < 0)
eld->eld_valid = false;
else {
if (snd_hdmi_parse_eld(codec, &eld->info, eld->eld_buffer,
@@ -1556,7 +1586,7 @@ static bool hdmi_present_sense_via_verbs(struct hdmi_spec_per_pin *per_pin,
ret = !repoll || !eld->monitor_present || eld->eld_valid;
- jack = snd_hda_jack_tbl_get(codec, pin_nid);
+ jack = snd_hda_jack_tbl_get_mst(codec, pin_nid, per_pin->dev_id);
if (jack) {
jack->block_report = !ret;
jack->pin_sense = (eld->monitor_present && eld->eld_valid) ?
@@ -1587,7 +1617,8 @@ static struct snd_jack *pin_idx_to_jack(struct hda_codec *codec,
* DP MST will use dyn_pcm_assign,
* so DP MST will never come here
*/
- jack_tbl = snd_hda_jack_tbl_get(codec, per_pin->pin_nid);
+ jack_tbl = snd_hda_jack_tbl_get_mst(codec, per_pin->pin_nid,
+ per_pin->dev_id);
if (jack_tbl)
jack = jack_tbl->jack;
}
@@ -1668,7 +1699,8 @@ static void hdmi_repoll_eld(struct work_struct *work)
struct hdmi_spec *spec = codec->spec;
struct hda_jack_tbl *jack;
- jack = snd_hda_jack_tbl_get(codec, per_pin->pin_nid);
+ jack = snd_hda_jack_tbl_get_mst(codec, per_pin->pin_nid,
+ per_pin->dev_id);
if (jack)
jack->jack_dirty = 1;
@@ -1709,7 +1741,7 @@ static int hdmi_add_pin(struct hda_codec *codec, hda_nid_t pin_nid)
* To simplify the implementation, malloc all
* the virtual pins in the initialization statically
*/
- if (is_haswell_plus(codec)) {
+ if (spec->intel_hsw_fixup) {
/*
* On Intel platforms, device entries number is
* changed dynamically. If there is a DP MST
@@ -1758,7 +1790,7 @@ static int hdmi_add_pin(struct hda_codec *codec, hda_nid_t pin_nid)
per_pin->dev_id = i;
per_pin->non_pcm = false;
snd_hda_set_dev_select(codec, pin_nid, i);
- if (is_haswell_plus(codec))
+ if (spec->intel_hsw_fixup)
intel_haswell_fixup_connect_list(codec, pin_nid);
err = hdmi_read_pin_conn(codec, pin_idx);
if (err < 0)
@@ -1873,7 +1905,6 @@ static int generic_hdmi_playback_pcm_prepare(struct hda_pcm_stream *hinfo,
struct hdmi_spec *spec = codec->spec;
int pin_idx;
struct hdmi_spec_per_pin *per_pin;
- hda_nid_t pin_nid;
struct snd_pcm_runtime *runtime = substream->runtime;
bool non_pcm;
int pinctl, stripe;
@@ -1897,7 +1928,6 @@ static int generic_hdmi_playback_pcm_prepare(struct hda_pcm_stream *hinfo,
goto unlock;
}
per_pin = get_pin(spec, pin_idx);
- pin_nid = per_pin->pin_nid;
/* Verify pin:cvt selections to avoid silent audio after S3.
* After S3, the audio driver restores pin:cvt selections
@@ -1912,8 +1942,8 @@ static int generic_hdmi_playback_pcm_prepare(struct hda_pcm_stream *hinfo,
/* Call sync_audio_rate to set the N/CTS/M manually if necessary */
/* Todo: add DP1.2 MST audio support later */
if (codec_has_acomp(codec))
- snd_hdac_sync_audio_rate(&codec->core, pin_nid, per_pin->dev_id,
- runtime->rate);
+ snd_hdac_sync_audio_rate(&codec->core, per_pin->pin_nid,
+ per_pin->dev_id, runtime->rate);
non_pcm = check_non_pcm_per_cvt(codec, cvt_nid);
mutex_lock(&per_pin->lock);
@@ -1931,16 +1961,18 @@ static int generic_hdmi_playback_pcm_prepare(struct hda_pcm_stream *hinfo,
hdmi_setup_audio_infoframe(codec, per_pin, non_pcm);
mutex_unlock(&per_pin->lock);
if (spec->dyn_pin_out) {
- pinctl = snd_hda_codec_read(codec, pin_nid, 0,
+ snd_hda_set_dev_select(codec, per_pin->pin_nid,
+ per_pin->dev_id);
+ pinctl = snd_hda_codec_read(codec, per_pin->pin_nid, 0,
AC_VERB_GET_PIN_WIDGET_CONTROL, 0);
- snd_hda_codec_write(codec, pin_nid, 0,
+ snd_hda_codec_write(codec, per_pin->pin_nid, 0,
AC_VERB_SET_PIN_WIDGET_CONTROL,
pinctl | PIN_OUT);
}
/* snd_hda_set_dev_select() has been called before */
- err = spec->ops.setup_stream(codec, cvt_nid, pin_nid,
- stream_tag, format);
+ err = spec->ops.setup_stream(codec, cvt_nid, per_pin->pin_nid,
+ per_pin->dev_id, stream_tag, format);
unlock:
mutex_unlock(&spec->pcm_lock);
return err;
@@ -1992,6 +2024,8 @@ static int hdmi_pcm_close(struct hda_pcm_stream *hinfo,
per_pin = get_pin(spec, pin_idx);
if (spec->dyn_pin_out) {
+ snd_hda_set_dev_select(codec, per_pin->pin_nid,
+ per_pin->dev_id);
pinctl = snd_hda_codec_read(codec, per_pin->pin_nid, 0,
AC_VERB_GET_PIN_WIDGET_CONTROL, 0);
snd_hda_codec_write(codec, per_pin->pin_nid, 0,
@@ -2075,15 +2109,24 @@ static bool is_hdmi_pcm_attached(struct hdac_device *hdac, int pcm_idx)
static int generic_hdmi_build_pcms(struct hda_codec *codec)
{
struct hdmi_spec *spec = codec->spec;
- int idx;
+ int idx, pcm_num;
/*
* for non-mst mode, pcm number is the same as before
- * for DP MST mode, pcm number is (nid number + dev_num - 1)
- * dev_num is the device entry number in a pin
- *
+ * for DP MST mode without extra PCM, pcm number is same
+ * for DP MST mode with extra PCMs, pcm number is
+ * (nid number + dev_num - 1)
+ * dev_num is the device entry number in a pin
*/
- for (idx = 0; idx < spec->num_nids + spec->dev_num - 1; idx++) {
+
+ if (codec->mst_no_extra_pcms)
+ pcm_num = spec->num_nids;
+ else
+ pcm_num = spec->num_nids + spec->dev_num - 1;
+
+ codec_dbg(codec, "hdmi: pcm_num set to %d\n", pcm_num);
+
+ for (idx = 0; idx < pcm_num; idx++) {
struct hda_pcm *info;
struct hda_pcm_stream *pstr;
@@ -2160,11 +2203,13 @@ static int generic_hdmi_build_jack(struct hda_codec *codec, int pcm_idx)
if (phantom_jack)
strncat(hdmi_str, " Phantom",
sizeof(hdmi_str) - strlen(hdmi_str) - 1);
- ret = snd_hda_jack_add_kctl(codec, per_pin->pin_nid, hdmi_str,
- phantom_jack, 0, NULL);
+ ret = snd_hda_jack_add_kctl_mst(codec, per_pin->pin_nid,
+ per_pin->dev_id, hdmi_str, phantom_jack,
+ 0, NULL);
if (ret < 0)
return ret;
- jack = snd_hda_jack_tbl_get(codec, per_pin->pin_nid);
+ jack = snd_hda_jack_tbl_get_mst(codec, per_pin->pin_nid,
+ per_pin->dev_id);
if (jack == NULL)
return 0;
/* assign jack->jack to pcm_rec[].jack to
@@ -2273,10 +2318,11 @@ static int generic_hdmi_init(struct hda_codec *codec)
if (codec_has_acomp(codec))
continue;
if (spec->use_jack_detect)
- snd_hda_jack_detect_enable(codec, pin_nid);
+ snd_hda_jack_detect_enable(codec, pin_nid, dev_id);
else
- snd_hda_jack_detect_enable_callback(codec, pin_nid,
- jack_callback);
+ snd_hda_jack_detect_enable_callback_mst(codec, pin_nid,
+ dev_id,
+ jack_callback);
}
mutex_unlock(&spec->bind_lock);
return 0;
@@ -2315,8 +2361,8 @@ static void generic_hdmi_free(struct hda_codec *codec)
snd_hdac_acomp_exit(&codec->bus->core);
} else if (codec_has_acomp(codec)) {
snd_hdac_acomp_register_notifier(&codec->bus->core, NULL);
- codec->relaxed_resume = 0;
}
+ codec->relaxed_resume = 0;
for (pin_idx = 0; pin_idx < spec->num_pins; pin_idx++) {
struct hdmi_spec_per_pin *per_pin = get_pin(spec, pin_idx);
@@ -2366,7 +2412,7 @@ static const struct hda_codec_ops generic_hdmi_patch_ops = {
};
static const struct hdmi_ops generic_standard_hdmi_ops = {
- .pin_get_eld = snd_hdmi_get_eld,
+ .pin_get_eld = hdmi_pin_get_eld,
.pin_setup_infoframe = hdmi_pin_setup_infoframe,
.pin_hbr_setup = hdmi_pin_hbr_setup,
.setup_stream = hdmi_setup_stream,
@@ -2426,11 +2472,11 @@ static int patch_generic_hdmi(struct hda_codec *codec)
/* turn on / off the unsol event jack detection dynamically */
static void reprogram_jack_detect(struct hda_codec *codec, hda_nid_t nid,
- bool use_acomp)
+ int dev_id, bool use_acomp)
{
struct hda_jack_tbl *tbl;
- tbl = snd_hda_jack_tbl_get(codec, nid);
+ tbl = snd_hda_jack_tbl_get_mst(codec, nid, dev_id);
if (tbl) {
/* clear unsol even if component notifier is used, or re-enable
* if notifier is cleared
@@ -2443,7 +2489,7 @@ static void reprogram_jack_detect(struct hda_codec *codec, hda_nid_t nid,
* at need (i.e. only when notifier is cleared)
*/
if (!use_acomp)
- snd_hda_jack_detect_enable(codec, nid);
+ snd_hda_jack_detect_enable(codec, nid, dev_id);
}
}
@@ -2463,6 +2509,7 @@ static void generic_acomp_notifier_set(struct drm_audio_component *acomp,
for (i = 0; i < spec->num_pins; i++)
reprogram_jack_detect(spec->codec,
get_pin(spec, i)->pin_nid,
+ get_pin(spec, i)->dev_id,
use_acomp);
}
mutex_unlock(&spec->bind_lock);
@@ -2563,7 +2610,8 @@ static void intel_haswell_fixup_connect_list(struct hda_codec *codec,
hda_nid_t conns[4];
int nconns;
- nconns = snd_hda_get_connections(codec, nid, conns, ARRAY_SIZE(conns));
+ nconns = snd_hda_get_raw_connections(codec, nid, conns,
+ ARRAY_SIZE(conns));
if (nconns == spec->num_cvts &&
!memcmp(conns, spec->cvt_nids, spec->num_cvts * sizeof(hda_nid_t)))
return;
@@ -2664,7 +2712,7 @@ static int intel_pin2port(void *audio_ptr, int pin_nid)
base_nid = intel_base_nid(codec);
if (WARN_ON(pin_nid < base_nid || pin_nid >= base_nid + 3))
return -1;
- return pin_nid - base_nid + 1; /* intel port is 1-based */
+ return pin_nid - base_nid + 1;
}
/*
@@ -2673,10 +2721,9 @@ static int intel_pin2port(void *audio_ptr, int pin_nid)
*/
for (i = 0; i < spec->port_num; i++) {
if (pin_nid == spec->port_map[i])
- return i + 1;
+ return i;
}
- /* return -1 if pin number exceeds our expectation */
codec_info(codec, "Can't find the HDMI/DP port for pin %d\n", pin_nid);
return -1;
}
@@ -2689,13 +2736,12 @@ static int intel_port2pin(struct hda_codec *codec, int port)
/* we assume only from port-B to port-D */
if (port < 1 || port > 3)
return 0;
- /* intel port is 1-based */
return port + intel_base_nid(codec) - 1;
}
- if (port < 1 || port > spec->port_num)
+ if (port < 0 || port >= spec->port_num)
return 0;
- return spec->port_map[port - 1];
+ return spec->port_map[port];
}
static void intel_pin_eld_notify(void *audio_ptr, int port, int pipe)
@@ -2741,10 +2787,12 @@ static void register_i915_notifier(struct hda_codec *codec)
/* setup_stream ops override for HSW+ */
static int i915_hsw_setup_stream(struct hda_codec *codec, hda_nid_t cvt_nid,
- hda_nid_t pin_nid, u32 stream_tag, int format)
+ hda_nid_t pin_nid, int dev_id, u32 stream_tag,
+ int format)
{
haswell_verify_D0(codec, cvt_nid, pin_nid);
- return hdmi_setup_stream(codec, cvt_nid, pin_nid, stream_tag, format);
+ return hdmi_setup_stream(codec, cvt_nid, pin_nid, dev_id,
+ stream_tag, format);
}
/* pin_cvt_fixup ops override for HSW+ and VLV+ */
@@ -2816,6 +2864,7 @@ static int intel_hsw_common_init(struct hda_codec *codec, hda_nid_t vendor_nid,
spec->vendor_nid = vendor_nid;
spec->port_map = port_map;
spec->port_num = port_num;
+ spec->intel_hsw_fixup = true;
intel_haswell_enable_all_pins(codec, true);
intel_haswell_fixup_enable_dp12(codec);
@@ -2846,9 +2895,9 @@ static int patch_i915_icl_hdmi(struct hda_codec *codec)
{
/*
* pin to port mapping table where the value indicate the pin number and
- * the index indicate the port number with 1 base.
+ * the index indicate the port number.
*/
- static const int map[] = {0x4, 0x6, 0x8, 0xa, 0xb};
+ static const int map[] = {0x0, 0x4, 0x6, 0x8, 0xa, 0xb};
return intel_hsw_common_init(codec, 0x02, map, ARRAY_SIZE(map));
}
@@ -2857,14 +2906,13 @@ static int patch_i915_tgl_hdmi(struct hda_codec *codec)
{
/*
* pin to port mapping table where the value indicate the pin number and
- * the index indicate the port number with 1 base.
+ * the index indicate the port number.
*/
static const int map[] = {0x4, 0x6, 0x8, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf};
return intel_hsw_common_init(codec, 0x02, map, ARRAY_SIZE(map));
}
-
/* Intel Baytrail and Braswell; with eld notifier */
static int patch_i915_byt_hdmi(struct hda_codec *codec)
{
@@ -2970,7 +3018,7 @@ static int simple_playback_init(struct hda_codec *codec)
if (get_wcaps(codec, pin) & AC_WCAP_OUT_AMP)
snd_hda_codec_write(codec, pin, 0, AC_VERB_SET_AMP_GAIN_MUTE,
AMP_OUT_UNMUTE);
- snd_hda_jack_detect_enable(codec, pin);
+ snd_hda_jack_detect_enable(codec, pin, per_pin->dev_id);
return 0;
}
@@ -3479,11 +3527,22 @@ static int patch_nvhdmi(struct hda_codec *codec)
struct hdmi_spec *spec;
int err;
- err = patch_generic_hdmi(codec);
- if (err)
+ err = alloc_generic_hdmi(codec);
+ if (err < 0)
return err;
+ codec->dp_mst = true;
spec = codec->spec;
+ spec->dyn_pcm_assign = true;
+
+ err = hdmi_parse_codec(codec);
+ if (err < 0) {
+ generic_spec_free(codec);
+ return err;
+ }
+
+ generic_hdmi_init_per_pins(codec);
+
spec->dyn_pin_out = true;
spec->chmap.ops.chmap_cea_alloc_validate_get_type =
@@ -3497,6 +3556,27 @@ static int patch_nvhdmi(struct hda_codec *codec)
return 0;
}
+static int patch_nvhdmi_legacy(struct hda_codec *codec)
+{
+ struct hdmi_spec *spec;
+ int err;
+
+ err = patch_generic_hdmi(codec);
+ if (err)
+ return err;
+
+ spec = codec->spec;
+ spec->dyn_pin_out = true;
+
+ spec->chmap.ops.chmap_cea_alloc_validate_get_type =
+ nvhdmi_chmap_cea_alloc_validate_get_type;
+ spec->chmap.ops.chmap_validate = nvhdmi_chmap_validate;
+
+ codec->link_down_at_suspend = 1;
+
+ return 0;
+}
+
/*
* The HDA codec on NVIDIA Tegra contains two scratch registers that are
* accessed using vendor-defined verbs. These registers can be used for
@@ -3710,16 +3790,19 @@ static int patch_tegra_hdmi(struct hda_codec *codec)
#define ATI_HBR_ENABLE 0x10
static int atihdmi_pin_get_eld(struct hda_codec *codec, hda_nid_t nid,
- unsigned char *buf, int *eld_size)
+ int dev_id, unsigned char *buf, int *eld_size)
{
+ WARN_ON(dev_id != 0);
/* call hda_eld.c ATI/AMD-specific function */
return snd_hdmi_get_eld_ati(codec, nid, buf, eld_size,
is_amdhdmi_rev3_or_later(codec));
}
-static void atihdmi_pin_setup_infoframe(struct hda_codec *codec, hda_nid_t pin_nid, int ca,
+static void atihdmi_pin_setup_infoframe(struct hda_codec *codec,
+ hda_nid_t pin_nid, int dev_id, int ca,
int active_channels, int conn_type)
{
+ WARN_ON(dev_id != 0);
snd_hda_codec_write(codec, pin_nid, 0, ATI_VERB_SET_CHANNEL_ALLOCATION, ca);
}
@@ -3910,10 +3993,12 @@ static void atihdmi_paired_cea_alloc_to_tlv_chmap(struct hdac_chmap *hchmap,
}
static int atihdmi_pin_hbr_setup(struct hda_codec *codec, hda_nid_t pin_nid,
- bool hbr)
+ int dev_id, bool hbr)
{
int hbr_ctl, hbr_ctl_new;
+ WARN_ON(dev_id != 0);
+
hbr_ctl = snd_hda_codec_read(codec, pin_nid, 0, ATI_VERB_GET_HBR_CONTROL, 0);
if (hbr_ctl >= 0 && (hbr_ctl & ATI_HBR_CAPABLE)) {
if (hbr)
@@ -3939,9 +4024,9 @@ static int atihdmi_pin_hbr_setup(struct hda_codec *codec, hda_nid_t pin_nid,
}
static int atihdmi_setup_stream(struct hda_codec *codec, hda_nid_t cvt_nid,
- hda_nid_t pin_nid, u32 stream_tag, int format)
+ hda_nid_t pin_nid, int dev_id,
+ u32 stream_tag, int format)
{
-
if (is_amdhdmi_rev3_or_later(codec)) {
int ramp_rate = 180; /* default as per AMD spec */
/* disable ramp-up/down for non-pcm as per AMD spec */
@@ -3951,7 +4036,8 @@ static int atihdmi_setup_stream(struct hda_codec *codec, hda_nid_t cvt_nid,
snd_hda_codec_write(codec, cvt_nid, 0, ATI_VERB_SET_RAMP_RATE, ramp_rate);
}
- return hdmi_setup_stream(codec, cvt_nid, pin_nid, stream_tag, format);
+ return hdmi_setup_stream(codec, cvt_nid, pin_nid, dev_id,
+ stream_tag, format);
}
@@ -4081,25 +4167,25 @@ HDA_CODEC_ENTRY(0x10de0004, "GPU 04 HDMI", patch_nvhdmi_8ch_7x),
HDA_CODEC_ENTRY(0x10de0005, "MCP77/78 HDMI", patch_nvhdmi_8ch_7x),
HDA_CODEC_ENTRY(0x10de0006, "MCP77/78 HDMI", patch_nvhdmi_8ch_7x),
HDA_CODEC_ENTRY(0x10de0007, "MCP79/7A HDMI", patch_nvhdmi_8ch_7x),
-HDA_CODEC_ENTRY(0x10de0008, "GPU 08 HDMI/DP", patch_nvhdmi),
-HDA_CODEC_ENTRY(0x10de0009, "GPU 09 HDMI/DP", patch_nvhdmi),
-HDA_CODEC_ENTRY(0x10de000a, "GPU 0a HDMI/DP", patch_nvhdmi),
-HDA_CODEC_ENTRY(0x10de000b, "GPU 0b HDMI/DP", patch_nvhdmi),
-HDA_CODEC_ENTRY(0x10de000c, "MCP89 HDMI", patch_nvhdmi),
-HDA_CODEC_ENTRY(0x10de000d, "GPU 0d HDMI/DP", patch_nvhdmi),
-HDA_CODEC_ENTRY(0x10de0010, "GPU 10 HDMI/DP", patch_nvhdmi),
-HDA_CODEC_ENTRY(0x10de0011, "GPU 11 HDMI/DP", patch_nvhdmi),
-HDA_CODEC_ENTRY(0x10de0012, "GPU 12 HDMI/DP", patch_nvhdmi),
-HDA_CODEC_ENTRY(0x10de0013, "GPU 13 HDMI/DP", patch_nvhdmi),
-HDA_CODEC_ENTRY(0x10de0014, "GPU 14 HDMI/DP", patch_nvhdmi),
-HDA_CODEC_ENTRY(0x10de0015, "GPU 15 HDMI/DP", patch_nvhdmi),
-HDA_CODEC_ENTRY(0x10de0016, "GPU 16 HDMI/DP", patch_nvhdmi),
+HDA_CODEC_ENTRY(0x10de0008, "GPU 08 HDMI/DP", patch_nvhdmi_legacy),
+HDA_CODEC_ENTRY(0x10de0009, "GPU 09 HDMI/DP", patch_nvhdmi_legacy),
+HDA_CODEC_ENTRY(0x10de000a, "GPU 0a HDMI/DP", patch_nvhdmi_legacy),
+HDA_CODEC_ENTRY(0x10de000b, "GPU 0b HDMI/DP", patch_nvhdmi_legacy),
+HDA_CODEC_ENTRY(0x10de000c, "MCP89 HDMI", patch_nvhdmi_legacy),
+HDA_CODEC_ENTRY(0x10de000d, "GPU 0d HDMI/DP", patch_nvhdmi_legacy),
+HDA_CODEC_ENTRY(0x10de0010, "GPU 10 HDMI/DP", patch_nvhdmi_legacy),
+HDA_CODEC_ENTRY(0x10de0011, "GPU 11 HDMI/DP", patch_nvhdmi_legacy),
+HDA_CODEC_ENTRY(0x10de0012, "GPU 12 HDMI/DP", patch_nvhdmi_legacy),
+HDA_CODEC_ENTRY(0x10de0013, "GPU 13 HDMI/DP", patch_nvhdmi_legacy),
+HDA_CODEC_ENTRY(0x10de0014, "GPU 14 HDMI/DP", patch_nvhdmi_legacy),
+HDA_CODEC_ENTRY(0x10de0015, "GPU 15 HDMI/DP", patch_nvhdmi_legacy),
+HDA_CODEC_ENTRY(0x10de0016, "GPU 16 HDMI/DP", patch_nvhdmi_legacy),
/* 17 is known to be absent */
-HDA_CODEC_ENTRY(0x10de0018, "GPU 18 HDMI/DP", patch_nvhdmi),
-HDA_CODEC_ENTRY(0x10de0019, "GPU 19 HDMI/DP", patch_nvhdmi),
-HDA_CODEC_ENTRY(0x10de001a, "GPU 1a HDMI/DP", patch_nvhdmi),
-HDA_CODEC_ENTRY(0x10de001b, "GPU 1b HDMI/DP", patch_nvhdmi),
-HDA_CODEC_ENTRY(0x10de001c, "GPU 1c HDMI/DP", patch_nvhdmi),
+HDA_CODEC_ENTRY(0x10de0018, "GPU 18 HDMI/DP", patch_nvhdmi_legacy),
+HDA_CODEC_ENTRY(0x10de0019, "GPU 19 HDMI/DP", patch_nvhdmi_legacy),
+HDA_CODEC_ENTRY(0x10de001a, "GPU 1a HDMI/DP", patch_nvhdmi_legacy),
+HDA_CODEC_ENTRY(0x10de001b, "GPU 1b HDMI/DP", patch_nvhdmi_legacy),
+HDA_CODEC_ENTRY(0x10de001c, "GPU 1c HDMI/DP", patch_nvhdmi_legacy),
HDA_CODEC_ENTRY(0x10de0020, "Tegra30 HDMI", patch_tegra_hdmi),
HDA_CODEC_ENTRY(0x10de0022, "Tegra114 HDMI", patch_tegra_hdmi),
HDA_CODEC_ENTRY(0x10de0028, "Tegra124 HDMI", patch_tegra_hdmi),
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 80f66ba85f87..d2bf70a1d2fd 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -5892,6 +5892,7 @@ enum {
ALC299_FIXUP_PREDATOR_SPK,
ALC294_FIXUP_ASUS_INTSPK_HEADSET_MIC,
ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE,
+ ALC294_FIXUP_ASUS_INTSPK_GPIO,
};
static const struct hda_fixup alc269_fixups[] = {
@@ -6982,6 +6983,13 @@ static const struct hda_fixup alc269_fixups[] = {
.chained = true,
.chain_id = ALC256_FIXUP_ASUS_HEADSET_MODE
},
+ [ALC294_FIXUP_ASUS_INTSPK_GPIO] = {
+ .type = HDA_FIXUP_FUNC,
+ /* The GPIO must be pulled to initialize the AMP */
+ .v.func = alc_fixup_gpio4,
+ .chained = true,
+ .chain_id = ALC294_FIXUP_ASUS_INTSPK_HEADSET_MIC
+ },
};
static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -7141,7 +7149,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_ASUS_ZENBOOK),
SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A),
SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC),
- SND_PCI_QUIRK(0x1043, 0x17d1, "ASUS UX431FL", ALC294_FIXUP_ASUS_INTSPK_HEADSET_MIC),
+ SND_PCI_QUIRK(0x1043, 0x17d1, "ASUS UX431FL", ALC294_FIXUP_ASUS_INTSPK_GPIO),
SND_PCI_QUIRK(0x1043, 0x18b1, "Asus MJ401TA", ALC256_FIXUP_ASUS_HEADSET_MIC),
SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW),
SND_PCI_QUIRK(0x1043, 0x1a30, "ASUS X705UD", ALC256_FIXUP_ASUS_MIC),
@@ -7248,6 +7256,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
SND_PCI_QUIRK(0x19e5, 0x3204, "Huawei MACH-WX9", ALC256_FIXUP_HUAWEI_MACH_WX9_PINS),
SND_PCI_QUIRK(0x1b7d, 0xa831, "Ordissimo EVE2 ", ALC269VB_FIXUP_ORDISSIMO_EVE2), /* Also known as Malata PC-B1303 */
+ SND_PCI_QUIRK(0x1d72, 0x1901, "RedmiBook 14", ALC256_FIXUP_ASUS_HEADSET_MIC),
SND_PCI_QUIRK(0x10ec, 0x118c, "Medion EE4254 MD62100", ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE),
#if 0
@@ -7512,20 +7521,6 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
{0x19, 0x02a11020},
{0x1a, 0x02a11030},
{0x21, 0x0221101f}),
- SND_HDA_PIN_QUIRK(0x10ec0236, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
- {0x12, 0x90a60140},
- {0x14, 0x90170110},
- {0x21, 0x02211020}),
- SND_HDA_PIN_QUIRK(0x10ec0236, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
- {0x12, 0x90a60140},
- {0x14, 0x90170150},
- {0x21, 0x02211020}),
- SND_HDA_PIN_QUIRK(0x10ec0236, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
- {0x21, 0x02211020}),
- SND_HDA_PIN_QUIRK(0x10ec0236, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
- {0x12, 0x40000000},
- {0x14, 0x90170110},
- {0x21, 0x02211020}),
SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL2_MIC_NO_PRESENCE,
{0x14, 0x90170110},
{0x21, 0x02211020}),
@@ -7608,38 +7603,6 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
{0x1b, 0x01011020},
{0x21, 0x02211010}),
- SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
- {0x12, 0x90a60130},
- {0x14, 0x90170110},
- {0x1b, 0x01011020},
- {0x21, 0x0221101f}),
- SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
- {0x12, 0x90a60160},
- {0x14, 0x90170120},
- {0x21, 0x02211030}),
- SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
- {0x12, 0x90a60170},
- {0x14, 0x90170120},
- {0x21, 0x02211030}),
- SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell Inspiron 5468", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
- {0x12, 0x90a60180},
- {0x14, 0x90170120},
- {0x21, 0x02211030}),
- SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
- {0x12, 0xb7a60130},
- {0x14, 0x90170110},
- {0x21, 0x02211020}),
- SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
- {0x12, 0x90a60130},
- {0x14, 0x90170110},
- {0x14, 0x01011020},
- {0x21, 0x0221101f}),
- SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
- ALC256_STANDARD_PINS),
- SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
- {0x14, 0x90170110},
- {0x1b, 0x01011020},
- {0x21, 0x0221101f}),
SND_HDA_PIN_QUIRK(0x10ec0256, 0x1043, "ASUS", ALC256_FIXUP_ASUS_MIC,
{0x14, 0x90170110},
{0x1b, 0x90a70130},
@@ -7852,6 +7815,12 @@ static const struct snd_hda_pin_quirk alc269_fallback_pin_fixup_tbl[] = {
SND_HDA_PIN_QUIRK(0x10ec0289, 0x1028, "Dell", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE,
{0x19, 0x40000000},
{0x1b, 0x40000000}),
+ SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+ {0x19, 0x40000000},
+ {0x1a, 0x40000000}),
+ SND_HDA_PIN_QUIRK(0x10ec0236, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+ {0x19, 0x40000000},
+ {0x1a, 0x40000000}),
{}
};
diff --git a/sound/pci/ice1712/ice1712.c b/sound/pci/ice1712/ice1712.c
index 4b0dea7f7669..deadba40131c 100644
--- a/sound/pci/ice1712/ice1712.c
+++ b/sound/pci/ice1712/ice1712.c
@@ -884,7 +884,8 @@ static int snd_ice1712_pcm(struct snd_ice1712 *ice, int device)
ice->pcm = pcm;
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(ice->pci), 64*1024, 64*1024);
+ &ice->pci->dev,
+ 64*1024, 64*1024);
dev_warn(ice->card->dev,
"Consumer PCM code does not work well at the moment --jk\n");
@@ -909,7 +910,8 @@ static int snd_ice1712_pcm_ds(struct snd_ice1712 *ice, int device)
ice->pcm_ds = pcm;
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(ice->pci), 64*1024, 128*1024);
+ &ice->pci->dev,
+ 64*1024, 128*1024);
return 0;
}
@@ -1253,7 +1255,8 @@ static int snd_ice1712_pcm_profi(struct snd_ice1712 *ice, int device)
strcpy(pcm->name, "ICE1712 multi");
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(ice->pci), 256*1024, 256*1024);
+ &ice->pci->dev,
+ 256*1024, 256*1024);
ice->pcm_pro = pcm;
diff --git a/sound/pci/ice1712/ice1724.c b/sound/pci/ice1712/ice1724.c
index e62c11816683..c80a16ee6e76 100644
--- a/sound/pci/ice1712/ice1724.c
+++ b/sound/pci/ice1712/ice1724.c
@@ -1143,7 +1143,7 @@ static int snd_vt1724_pcm_profi(struct snd_ice1712 *ice, int device)
strcpy(pcm->name, "ICE1724");
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(ice->pci),
+ &ice->pci->dev,
256*1024, 256*1024);
ice->pcm_pro = pcm;
@@ -1341,7 +1341,7 @@ static int snd_vt1724_pcm_spdif(struct snd_ice1712 *ice, int device)
strcpy(pcm->name, name);
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(ice->pci),
+ &ice->pci->dev,
256*1024, 256*1024);
ice->pcm = pcm;
@@ -1455,7 +1455,7 @@ static int snd_vt1724_pcm_indep(struct snd_ice1712 *ice, int device)
strcpy(pcm->name, "ICE1724 Surround PCM");
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(ice->pci),
+ &ice->pci->dev,
256*1024, 256*1024);
ice->pcm_ds = pcm;
diff --git a/sound/pci/intel8x0.c b/sound/pci/intel8x0.c
index 6ff94d8ad86e..12374ba08ca2 100644
--- a/sound/pci/intel8x0.c
+++ b/sound/pci/intel8x0.c
@@ -1488,7 +1488,7 @@ static int snd_intel8x0_pcm1(struct intel8x0 *chip, int device,
chip->pcm[device] = pcm;
snd_pcm_lib_preallocate_pages_for_all(pcm, intel8x0_dma_type(chip),
- snd_dma_pci_data(chip->pci),
+ &chip->pci->dev,
rec->prealloc_size, rec->prealloc_max_size);
if (rec->playback_ops &&
@@ -3047,7 +3047,7 @@ static int snd_intel8x0_create(struct snd_card *card,
/* allocate buffer descriptor lists */
/* the start of each lists must be aligned to 8 bytes */
- if (snd_dma_alloc_pages(intel8x0_dma_type(chip), snd_dma_pci_data(pci),
+ if (snd_dma_alloc_pages(intel8x0_dma_type(chip), &pci->dev,
chip->bdbars_count * sizeof(u32) * ICH_MAX_FRAGS * 2,
&chip->bdbars) < 0) {
snd_intel8x0_free(chip);
diff --git a/sound/pci/intel8x0m.c b/sound/pci/intel8x0m.c
index 2f960fb092df..a9add5fedfcb 100644
--- a/sound/pci/intel8x0m.c
+++ b/sound/pci/intel8x0m.c
@@ -734,7 +734,7 @@ static int snd_intel8x0m_pcm1(struct intel8x0m *chip, int device,
chip->pcm[device] = pcm;
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(chip->pci),
+ &chip->pci->dev,
rec->prealloc_size,
rec->prealloc_max_size);
@@ -1176,7 +1176,7 @@ static int snd_intel8x0m_create(struct snd_card *card,
/* allocate buffer descriptor lists */
/* the start of each lists must be aligned to 8 bytes */
- if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(pci),
+ if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, &pci->dev,
chip->bdbars_count * sizeof(u32) * ICH_MAX_FRAGS * 2,
&chip->bdbars) < 0) {
snd_intel8x0m_free(chip);
diff --git a/sound/pci/korg1212/korg1212.c b/sound/pci/korg1212/korg1212.c
index 0d81eac0a478..2b8204a13c69 100644
--- a/sound/pci/korg1212/korg1212.c
+++ b/sound/pci/korg1212/korg1212.c
@@ -2275,7 +2275,7 @@ static int snd_korg1212_create(struct snd_card *card, struct pci_dev *pci,
korg1212->idRegPtr,
stateName[korg1212->cardState]);
- if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(pci),
+ if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, &pci->dev,
sizeof(struct KorgSharedBuffer), &korg1212->dma_shared) < 0) {
snd_printk(KERN_ERR "korg1212: can not allocate shared buffer memory (%zd bytes)\n", sizeof(struct KorgSharedBuffer));
snd_korg1212_free(korg1212);
@@ -2290,7 +2290,7 @@ static int snd_korg1212_create(struct snd_card *card, struct pci_dev *pci,
korg1212->DataBufsSize = sizeof(struct KorgAudioBuffer) * kNumBuffers;
- if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(pci),
+ if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, &pci->dev,
korg1212->DataBufsSize, &korg1212->dma_play) < 0) {
snd_printk(KERN_ERR "korg1212: can not allocate play data buffer memory (%d bytes)\n", korg1212->DataBufsSize);
snd_korg1212_free(korg1212);
@@ -2302,7 +2302,7 @@ static int snd_korg1212_create(struct snd_card *card, struct pci_dev *pci,
K1212_DEBUG_PRINTK("K1212_DEBUG: Play Data Area = 0x%p (0x%08x), %d bytes\n",
korg1212->playDataBufsPtr, korg1212->PlayDataPhy, korg1212->DataBufsSize);
- if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(pci),
+ if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, &pci->dev,
korg1212->DataBufsSize, &korg1212->dma_rec) < 0) {
snd_printk(KERN_ERR "korg1212: can not allocate record data buffer memory (%d bytes)\n", korg1212->DataBufsSize);
snd_korg1212_free(korg1212);
@@ -2337,7 +2337,7 @@ static int snd_korg1212_create(struct snd_card *card, struct pci_dev *pci,
return err;
}
- if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(pci),
+ if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, &pci->dev,
dsp_code->size, &korg1212->dma_dsp) < 0) {
snd_printk(KERN_ERR "korg1212: cannot allocate dsp code memory (%zd bytes)\n", dsp_code->size);
snd_korg1212_free(korg1212);
diff --git a/sound/pci/lola/lola.c b/sound/pci/lola/lola.c
index 5cda3488ceab..21ac9d003e8e 100644
--- a/sound/pci/lola/lola.c
+++ b/sound/pci/lola/lola.c
@@ -350,7 +350,7 @@ static int setup_corb_rirb(struct lola *chip)
unsigned long end_time;
err = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(chip->pci),
+ &chip->pci->dev,
PAGE_SIZE, &chip->rb);
if (err < 0)
return err;
diff --git a/sound/pci/lola/lola_pcm.c b/sound/pci/lola/lola_pcm.c
index 151f7cf5ce0e..856bcca60128 100644
--- a/sound/pci/lola/lola_pcm.c
+++ b/sound/pci/lola/lola_pcm.c
@@ -582,7 +582,6 @@ static const struct snd_pcm_ops lola_pcm_ops = {
.prepare = lola_pcm_prepare,
.trigger = lola_pcm_trigger,
.pointer = lola_pcm_pointer,
- .page = snd_pcm_sgbuf_ops_page,
};
int lola_create_pcm(struct lola *chip)
@@ -592,7 +591,7 @@ int lola_create_pcm(struct lola *chip)
for (i = 0; i < 2; i++) {
err = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(chip->pci),
+ &chip->pci->dev,
PAGE_SIZE, &chip->pcm[i].bdl);
if (err < 0)
return err;
@@ -612,7 +611,7 @@ int lola_create_pcm(struct lola *chip)
}
/* buffer pre-allocation */
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV_SG,
- snd_dma_pci_data(chip->pci),
+ &chip->pci->dev,
1024 * 64, 32 * 1024 * 1024);
return 0;
}
diff --git a/sound/pci/lx6464es/lx6464es.c b/sound/pci/lx6464es/lx6464es.c
index fe10714380f2..d0f63fa54121 100644
--- a/sound/pci/lx6464es/lx6464es.c
+++ b/sound/pci/lx6464es/lx6464es.c
@@ -846,7 +846,7 @@ static int lx_pcm_create(struct lx6464es *chip)
strcpy(pcm->name, card_name);
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(chip->pci),
+ &chip->pci->dev,
size, size);
chip->pcm = pcm;
diff --git a/sound/pci/maestro3.c b/sound/pci/maestro3.c
index 19fa73df0846..cc8594d76c70 100644
--- a/sound/pci/maestro3.c
+++ b/sound/pci/maestro3.c
@@ -1861,7 +1861,8 @@ snd_m3_pcm(struct snd_m3 * chip, int device)
chip->pcm = pcm;
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(chip->pci), 64*1024, 64*1024);
+ &chip->pci->dev,
+ 64*1024, 64*1024);
return 0;
}
diff --git a/sound/pci/mixart/mixart.c b/sound/pci/mixart/mixart.c
index e5279ce54ee1..674d37ec96b3 100644
--- a/sound/pci/mixart/mixart.c
+++ b/sound/pci/mixart/mixart.c
@@ -948,7 +948,8 @@ static void preallocate_buffers(struct snd_mixart *chip, struct snd_pcm *pcm)
}
#endif
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(chip->mgr->pci), 32*1024, 32*1024);
+ &chip->mgr->pci->dev,
+ 32*1024, 32*1024);
}
/*
@@ -1360,7 +1361,7 @@ static int snd_mixart_probe(struct pci_dev *pci,
/* create array of streaminfo */
size = PAGE_ALIGN( (MIXART_MAX_STREAM_PER_CARD * MIXART_MAX_CARDS *
sizeof(struct mixart_flowinfo)) );
- if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(pci),
+ if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, &pci->dev,
size, &mgr->flowinfo) < 0) {
snd_mixart_free(mgr);
return -ENOMEM;
@@ -1371,7 +1372,7 @@ static int snd_mixart_probe(struct pci_dev *pci,
/* create array of bufferinfo */
size = PAGE_ALIGN( (MIXART_MAX_STREAM_PER_CARD * MIXART_MAX_CARDS *
sizeof(struct mixart_bufferinfo)) );
- if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(pci),
+ if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, &pci->dev,
size, &mgr->bufferinfo) < 0) {
snd_mixart_free(mgr);
return -ENOMEM;
diff --git a/sound/pci/oxygen/oxygen_pcm.c b/sound/pci/oxygen/oxygen_pcm.c
index e6aa16646fd4..203c8fe48a01 100644
--- a/sound/pci/oxygen/oxygen_pcm.c
+++ b/sound/pci/oxygen/oxygen_pcm.c
@@ -713,13 +713,13 @@ int oxygen_pcm_init(struct oxygen *chip)
if (outs)
snd_pcm_lib_preallocate_pages(pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream,
SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(chip->pci),
+ &chip->pci->dev,
DEFAULT_BUFFER_BYTES_MULTICH,
BUFFER_BYTES_MAX_MULTICH);
if (ins)
snd_pcm_lib_preallocate_pages(pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream,
SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(chip->pci),
+ &chip->pci->dev,
DEFAULT_BUFFER_BYTES,
BUFFER_BYTES_MAX);
}
@@ -739,7 +739,7 @@ int oxygen_pcm_init(struct oxygen *chip)
pcm->private_data = chip;
strcpy(pcm->name, "Digital");
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(chip->pci),
+ &chip->pci->dev,
DEFAULT_BUFFER_BYTES,
BUFFER_BYTES_MAX);
}
@@ -769,7 +769,7 @@ int oxygen_pcm_init(struct oxygen *chip)
pcm->private_data = chip;
strcpy(pcm->name, outs ? "Front Panel" : "Analog 2");
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(chip->pci),
+ &chip->pci->dev,
DEFAULT_BUFFER_BYTES,
BUFFER_BYTES_MAX);
}
@@ -787,7 +787,7 @@ int oxygen_pcm_init(struct oxygen *chip)
pcm->private_data = chip;
strcpy(pcm->name, "Analog 3");
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(chip->pci),
+ &chip->pci->dev,
DEFAULT_BUFFER_BYTES,
BUFFER_BYTES_MAX);
}
diff --git a/sound/pci/pcxhr/pcxhr.c b/sound/pci/pcxhr/pcxhr.c
index e493962d8455..4af34d6d92df 100644
--- a/sound/pci/pcxhr/pcxhr.c
+++ b/sound/pci/pcxhr/pcxhr.c
@@ -1171,7 +1171,7 @@ int pcxhr_create_pcm(struct snd_pcxhr *chip)
strcpy(pcm->name, name);
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(chip->mgr->pci),
+ &chip->mgr->pci->dev,
32*1024, 32*1024);
chip->pcm = pcm;
return 0;
@@ -1644,7 +1644,7 @@ static int pcxhr_probe(struct pci_dev *pci,
/* create hostport purgebuffer */
size = PAGE_ALIGN(sizeof(struct pcxhr_hostport));
- if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(pci),
+ if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, &pci->dev,
size, &mgr->hostport) < 0) {
pcxhr_free(mgr);
return -ENOMEM;
diff --git a/sound/pci/riptide/riptide.c b/sound/pci/riptide/riptide.c
index 58771ae0ed63..abcea86045ec 100644
--- a/sound/pci/riptide/riptide.c
+++ b/sound/pci/riptide/riptide.c
@@ -1550,7 +1550,7 @@ snd_riptide_hw_params(struct snd_pcm_substream *substream,
if (sgdlist->area)
snd_dma_free_pages(sgdlist);
if ((err = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(chip->pci),
+ &chip->pci->dev,
sizeof(struct sgd) * (DESC_MAX_MASK + 1),
sgdlist)) < 0) {
snd_printk(KERN_ERR "Riptide: failed to alloc %d dma bytes\n",
@@ -1661,7 +1661,6 @@ static const struct snd_pcm_ops snd_riptide_playback_ops = {
.hw_params = snd_riptide_hw_params,
.hw_free = snd_riptide_hw_free,
.prepare = snd_riptide_prepare,
- .page = snd_pcm_sgbuf_ops_page,
.trigger = snd_riptide_trigger,
.pointer = snd_riptide_pointer,
};
@@ -1672,7 +1671,6 @@ static const struct snd_pcm_ops snd_riptide_capture_ops = {
.hw_params = snd_riptide_hw_params,
.hw_free = snd_riptide_hw_free,
.prepare = snd_riptide_prepare,
- .page = snd_pcm_sgbuf_ops_page,
.trigger = snd_riptide_trigger,
.pointer = snd_riptide_pointer,
};
@@ -1695,7 +1693,7 @@ static int snd_riptide_pcm(struct snd_riptide *chip, int device)
strcpy(pcm->name, "RIPTIDE");
chip->pcm = pcm;
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV_SG,
- snd_dma_pci_data(chip->pci),
+ &chip->pci->dev,
64 * 1024, 128 * 1024);
return 0;
}
diff --git a/sound/pci/rme32.c b/sound/pci/rme32.c
index 40cc6ca88f7b..58a4b8df25d4 100644
--- a/sound/pci/rme32.c
+++ b/sound/pci/rme32.c
@@ -1375,7 +1375,7 @@ static int snd_rme32_create(struct rme32 *rme32)
snd_pcm_set_ops(rme32->spdif_pcm, SNDRV_PCM_STREAM_CAPTURE,
&snd_rme32_capture_spdif_fd_ops);
snd_pcm_lib_preallocate_pages_for_all(rme32->spdif_pcm, SNDRV_DMA_TYPE_CONTINUOUS,
- snd_dma_continuous_data(GFP_KERNEL),
+ NULL,
0, RME32_MID_BUFFER_SIZE);
rme32->spdif_pcm->info_flags = SNDRV_PCM_INFO_JOINT_DUPLEX;
} else {
@@ -1407,7 +1407,7 @@ static int snd_rme32_create(struct rme32 *rme32)
snd_pcm_set_ops(rme32->adat_pcm, SNDRV_PCM_STREAM_CAPTURE,
&snd_rme32_capture_adat_fd_ops);
snd_pcm_lib_preallocate_pages_for_all(rme32->adat_pcm, SNDRV_DMA_TYPE_CONTINUOUS,
- snd_dma_continuous_data(GFP_KERNEL),
+ NULL,
0, RME32_MID_BUFFER_SIZE);
rme32->adat_pcm->info_flags = SNDRV_PCM_INFO_JOINT_DUPLEX;
} else {
diff --git a/sound/pci/rme9652/hdsp.c b/sound/pci/rme9652/hdsp.c
index 5cbdc9be9c7e..cd20af465d8e 100644
--- a/sound/pci/rme9652/hdsp.c
+++ b/sound/pci/rme9652/hdsp.c
@@ -569,12 +569,7 @@ static char channel_map_H9632_qs[HDSP_MAX_CHANNELS] = {
static int snd_hammerfall_get_buffer(struct pci_dev *pci, struct snd_dma_buffer *dmab, size_t size)
{
- dmab->dev.type = SNDRV_DMA_TYPE_DEV;
- dmab->dev.dev = snd_dma_pci_data(pci);
- if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(pci),
- size, dmab) < 0)
- return -ENOMEM;
- return 0;
+ return snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, &pci->dev, size, dmab);
}
static void snd_hammerfall_free_buffer(struct snd_dma_buffer *dmab, struct pci_dev *pci)
diff --git a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c
index 81a6f4b2bd3c..75c06a7cc779 100644
--- a/sound/pci/rme9652/hdspm.c
+++ b/sound/pci/rme9652/hdspm.c
@@ -6368,7 +6368,6 @@ static const struct snd_pcm_ops snd_hdspm_ops = {
.prepare = snd_hdspm_prepare,
.trigger = snd_hdspm_trigger,
.pointer = snd_hdspm_hw_pointer,
- .page = snd_pcm_sgbuf_ops_page,
};
static int snd_hdspm_create_hwdep(struct snd_card *card,
@@ -6407,7 +6406,7 @@ static int snd_hdspm_preallocate_memory(struct hdspm *hdspm)
wanted = HDSPM_DMA_AREA_BYTES;
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV_SG,
- snd_dma_pci_data(hdspm->pci),
+ &hdspm->pci->dev,
wanted, wanted);
dev_dbg(hdspm->card->dev, " Preallocated %zd Bytes\n", wanted);
return 0;
diff --git a/sound/pci/rme9652/rme9652.c b/sound/pci/rme9652/rme9652.c
index 4c851f8dcaf8..ef5c2f8e17c7 100644
--- a/sound/pci/rme9652/rme9652.c
+++ b/sound/pci/rme9652/rme9652.c
@@ -279,12 +279,7 @@ static char channel_map_9636_ds[26] = {
static int snd_hammerfall_get_buffer(struct pci_dev *pci, struct snd_dma_buffer *dmab, size_t size)
{
- dmab->dev.type = SNDRV_DMA_TYPE_DEV;
- dmab->dev.dev = snd_dma_pci_data(pci);
- if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(pci),
- size, dmab) < 0)
- return -ENOMEM;
- return 0;
+ return snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, &pci->dev, size, dmab);
}
static void snd_hammerfall_free_buffer(struct snd_dma_buffer *dmab, struct pci_dev *pci)
diff --git a/sound/pci/sis7019.c b/sound/pci/sis7019.c
index b0b5e74e776c..ef7dd290ae05 100644
--- a/sound/pci/sis7019.c
+++ b/sound/pci/sis7019.c
@@ -905,7 +905,8 @@ static int sis_pcm_create(struct sis7019 *sis)
* world if this fails.
*/
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(sis->pci), 64*1024, 128*1024);
+ &sis->pci->dev,
+ 64*1024, 128*1024);
return 0;
}
diff --git a/sound/pci/sonicvibes.c b/sound/pci/sonicvibes.c
index 13103f5c309b..31cbc811ad37 100644
--- a/sound/pci/sonicvibes.c
+++ b/sound/pci/sonicvibes.c
@@ -884,7 +884,8 @@ static int snd_sonicvibes_pcm(struct sonicvibes *sonic, int device)
sonic->pcm = pcm;
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(sonic->pci), 64*1024, 128*1024);
+ &sonic->pci->dev,
+ 64*1024, 128*1024);
return 0;
}
diff --git a/sound/pci/trident/trident_main.c b/sound/pci/trident/trident_main.c
index 1a6f6202fd16..07022c0dad40 100644
--- a/sound/pci/trident/trident_main.c
+++ b/sound/pci/trident/trident_main.c
@@ -2076,7 +2076,6 @@ static const struct snd_pcm_ops snd_trident_nx_playback_ops = {
.prepare = snd_trident_playback_prepare,
.trigger = snd_trident_trigger,
.pointer = snd_trident_playback_pointer,
- .page = snd_pcm_sgbuf_ops_page,
};
static const struct snd_pcm_ops snd_trident_capture_ops = {
@@ -2121,7 +2120,6 @@ static const struct snd_pcm_ops snd_trident_nx_foldback_ops = {
.prepare = snd_trident_foldback_prepare,
.trigger = snd_trident_trigger,
.pointer = snd_trident_playback_pointer,
- .page = snd_pcm_sgbuf_ops_page,
};
static const struct snd_pcm_ops snd_trident_spdif_ops = {
@@ -2186,14 +2184,16 @@ int snd_trident_pcm(struct snd_trident *trident, int device)
struct snd_pcm_substream *substream;
for (substream = pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream; substream; substream = substream->next)
snd_pcm_lib_preallocate_pages(substream, SNDRV_DMA_TYPE_DEV_SG,
- snd_dma_pci_data(trident->pci),
+ &trident->pci->dev,
64*1024, 128*1024);
snd_pcm_lib_preallocate_pages(pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream,
- SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(trident->pci),
+ SNDRV_DMA_TYPE_DEV,
+ &trident->pci->dev,
64*1024, 128*1024);
} else {
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(trident->pci), 64*1024, 128*1024);
+ &trident->pci->dev,
+ 64*1024, 128*1024);
}
return 0;
@@ -2243,10 +2243,12 @@ int snd_trident_foldback_pcm(struct snd_trident *trident, int device)
if (trident->tlb.entries)
snd_pcm_lib_preallocate_pages_for_all(foldback, SNDRV_DMA_TYPE_DEV_SG,
- snd_dma_pci_data(trident->pci), 0, 128*1024);
+ &trident->pci->dev,
+ 0, 128*1024);
else
snd_pcm_lib_preallocate_pages_for_all(foldback, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(trident->pci), 64*1024, 128*1024);
+ &trident->pci->dev,
+ 64*1024, 128*1024);
return 0;
}
@@ -2280,7 +2282,9 @@ int snd_trident_spdif_pcm(struct snd_trident *trident, int device)
strcpy(spdif->name, "Trident 4DWave IEC958");
trident->spdif = spdif;
- snd_pcm_lib_preallocate_pages_for_all(spdif, SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(trident->pci), 64*1024, 128*1024);
+ snd_pcm_lib_preallocate_pages_for_all(spdif, SNDRV_DMA_TYPE_DEV,
+ &trident->pci->dev,
+ 64*1024, 128*1024);
return 0;
}
@@ -3338,7 +3342,7 @@ static int snd_trident_tlb_alloc(struct snd_trident *trident)
/* TLB array must be aligned to 16kB !!! so we allocate
32kB region and correct offset when necessary */
- if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(trident->pci),
+ if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, &trident->pci->dev,
2 * SNDRV_TRIDENT_MAX_PAGES * 4, &trident->tlb.buffer) < 0) {
dev_err(trident->card->dev, "unable to allocate TLB buffer\n");
return -ENOMEM;
@@ -3353,7 +3357,7 @@ static int snd_trident_tlb_alloc(struct snd_trident *trident)
return -ENOMEM;
/* allocate and setup silent page and initialise TLB entries */
- if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(trident->pci),
+ if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, &trident->pci->dev,
SNDRV_TRIDENT_PAGE_SIZE, &trident->tlb.silent_page) < 0) {
dev_err(trident->card->dev, "unable to allocate silent page\n");
return -ENOMEM;
diff --git a/sound/pci/via82xx.c b/sound/pci/via82xx.c
index 38601d0dfb73..30c817b6b635 100644
--- a/sound/pci/via82xx.c
+++ b/sound/pci/via82xx.c
@@ -419,7 +419,7 @@ static int build_via_table(struct viadev *dev, struct snd_pcm_substream *substre
/* the start of each lists must be aligned to 8 bytes,
* but the kernel pages are much bigger, so we don't care
*/
- if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(chip->pci),
+ if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, &chip->pci->dev,
PAGE_ALIGN(VIA_TABLE_SIZE * 2 * 8),
&dev->table) < 0)
return -ENOMEM;
@@ -1363,7 +1363,6 @@ static const struct snd_pcm_ops snd_via686_playback_ops = {
.prepare = snd_via686_playback_prepare,
.trigger = snd_via82xx_pcm_trigger,
.pointer = snd_via686_pcm_pointer,
- .page = snd_pcm_sgbuf_ops_page,
};
/* via686 capture callbacks */
@@ -1376,7 +1375,6 @@ static const struct snd_pcm_ops snd_via686_capture_ops = {
.prepare = snd_via686_capture_prepare,
.trigger = snd_via82xx_pcm_trigger,
.pointer = snd_via686_pcm_pointer,
- .page = snd_pcm_sgbuf_ops_page,
};
/* via823x DSX playback callbacks */
@@ -1389,7 +1387,6 @@ static const struct snd_pcm_ops snd_via8233_playback_ops = {
.prepare = snd_via8233_playback_prepare,
.trigger = snd_via82xx_pcm_trigger,
.pointer = snd_via8233_pcm_pointer,
- .page = snd_pcm_sgbuf_ops_page,
};
/* via823x multi-channel playback callbacks */
@@ -1402,7 +1399,6 @@ static const struct snd_pcm_ops snd_via8233_multi_ops = {
.prepare = snd_via8233_multi_prepare,
.trigger = snd_via82xx_pcm_trigger,
.pointer = snd_via8233_pcm_pointer,
- .page = snd_pcm_sgbuf_ops_page,
};
/* via823x capture callbacks */
@@ -1415,7 +1411,6 @@ static const struct snd_pcm_ops snd_via8233_capture_ops = {
.prepare = snd_via8233_capture_prepare,
.trigger = snd_via82xx_pcm_trigger,
.pointer = snd_via8233_pcm_pointer,
- .page = snd_pcm_sgbuf_ops_page,
};
@@ -1459,7 +1454,7 @@ static int snd_via8233_pcm_new(struct via82xx *chip)
init_viadev(chip, chip->capture_devno, VIA_REG_CAPTURE_8233_STATUS, 6, 1);
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV_SG,
- snd_dma_pci_data(chip->pci),
+ &chip->pci->dev,
64*1024, VIA_MAX_BUFSIZE);
err = snd_pcm_add_chmap_ctls(pcm, SNDRV_PCM_STREAM_PLAYBACK,
@@ -1483,7 +1478,7 @@ static int snd_via8233_pcm_new(struct via82xx *chip)
init_viadev(chip, chip->capture_devno + 1, VIA_REG_CAPTURE_8233_STATUS + 0x10, 7, 1);
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV_SG,
- snd_dma_pci_data(chip->pci),
+ &chip->pci->dev,
64*1024, VIA_MAX_BUFSIZE);
err = snd_pcm_add_chmap_ctls(pcm, SNDRV_PCM_STREAM_PLAYBACK,
@@ -1526,7 +1521,7 @@ static int snd_via8233a_pcm_new(struct via82xx *chip)
init_viadev(chip, chip->capture_devno, VIA_REG_CAPTURE_8233_STATUS, 6, 1);
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV_SG,
- snd_dma_pci_data(chip->pci),
+ &chip->pci->dev,
64*1024, VIA_MAX_BUFSIZE);
err = snd_pcm_add_chmap_ctls(pcm, SNDRV_PCM_STREAM_PLAYBACK,
@@ -1552,7 +1547,7 @@ static int snd_via8233a_pcm_new(struct via82xx *chip)
init_viadev(chip, chip->playback_devno, 0x30, 3, 0);
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV_SG,
- snd_dma_pci_data(chip->pci),
+ &chip->pci->dev,
64*1024, VIA_MAX_BUFSIZE);
return 0;
}
@@ -1582,7 +1577,7 @@ static int snd_via686_pcm_new(struct via82xx *chip)
init_viadev(chip, 1, VIA_REG_CAPTURE_STATUS, 0, 1);
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV_SG,
- snd_dma_pci_data(chip->pci),
+ &chip->pci->dev,
64*1024, VIA_MAX_BUFSIZE);
return 0;
}
diff --git a/sound/pci/via82xx_modem.c b/sound/pci/via82xx_modem.c
index bfb5e1b89d5f..0edb9ea6e8a6 100644
--- a/sound/pci/via82xx_modem.c
+++ b/sound/pci/via82xx_modem.c
@@ -272,7 +272,7 @@ static int build_via_table(struct viadev *dev, struct snd_pcm_substream *substre
/* the start of each lists must be aligned to 8 bytes,
* but the kernel pages are much bigger, so we don't care
*/
- if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(chip->pci),
+ if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, &chip->pci->dev,
PAGE_ALIGN(VIA_TABLE_SIZE * 2 * 8),
&dev->table) < 0)
return -ENOMEM;
@@ -801,7 +801,6 @@ static const struct snd_pcm_ops snd_via686_playback_ops = {
.prepare = snd_via82xx_pcm_prepare,
.trigger = snd_via82xx_pcm_trigger,
.pointer = snd_via686_pcm_pointer,
- .page = snd_pcm_sgbuf_ops_page,
};
/* via686 capture callbacks */
@@ -814,7 +813,6 @@ static const struct snd_pcm_ops snd_via686_capture_ops = {
.prepare = snd_via82xx_pcm_prepare,
.trigger = snd_via82xx_pcm_trigger,
.pointer = snd_via686_pcm_pointer,
- .page = snd_pcm_sgbuf_ops_page,
};
@@ -852,7 +850,7 @@ static int snd_via686_pcm_new(struct via82xx_modem *chip)
init_viadev(chip, 1, VIA_REG_MI_STATUS, 1);
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV_SG,
- snd_dma_pci_data(chip->pci),
+ &chip->pci->dev,
64*1024, 128*1024);
return 0;
}
diff --git a/sound/pci/ymfpci/ymfpci_main.c b/sound/pci/ymfpci/ymfpci_main.c
index 90400ebb64af..125c11ed5064 100644
--- a/sound/pci/ymfpci/ymfpci_main.c
+++ b/sound/pci/ymfpci/ymfpci_main.c
@@ -587,7 +587,7 @@ static void snd_ymfpci_pcm_init_voice(struct snd_ymfpci_pcm *ypcm, unsigned int
static int snd_ymfpci_ac3_init(struct snd_ymfpci *chip)
{
- if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(chip->pci),
+ if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, &chip->pci->dev,
4096, &chip->ac3_tmp_base) < 0)
return -ENOMEM;
@@ -1149,7 +1149,8 @@ int snd_ymfpci_pcm(struct snd_ymfpci *chip, int device)
chip->pcm = pcm;
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(chip->pci), 64*1024, 256*1024);
+ &chip->pci->dev,
+ 64*1024, 256*1024);
return snd_pcm_add_chmap_ctls(pcm, SNDRV_PCM_STREAM_PLAYBACK,
snd_pcm_std_chmaps, 2, 0, NULL);
@@ -1184,7 +1185,8 @@ int snd_ymfpci_pcm2(struct snd_ymfpci *chip, int device)
chip->pcm2 = pcm;
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(chip->pci), 64*1024, 256*1024);
+ &chip->pci->dev,
+ 64*1024, 256*1024);
return 0;
}
@@ -1217,7 +1219,8 @@ int snd_ymfpci_pcm_spdif(struct snd_ymfpci *chip, int device)
chip->pcm_spdif = pcm;
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(chip->pci), 64*1024, 256*1024);
+ &chip->pci->dev,
+ 64*1024, 256*1024);
return 0;
}
@@ -1258,7 +1261,8 @@ int snd_ymfpci_pcm_4ch(struct snd_ymfpci *chip, int device)
chip->pcm_4ch = pcm;
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- snd_dma_pci_data(chip->pci), 64*1024, 256*1024);
+ &chip->pci->dev,
+ 64*1024, 256*1024);
return snd_pcm_add_chmap_ctls(pcm, SNDRV_PCM_STREAM_PLAYBACK,
surround_map, 2, 0, NULL);
@@ -2108,7 +2112,7 @@ static int snd_ymfpci_memalloc(struct snd_ymfpci *chip)
chip->work_size;
/* work_ptr must be aligned to 256 bytes, but it's already
covered with the kernel page allocation mechanism */
- if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(chip->pci),
+ if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, &chip->pci->dev,
size, &chip->work_ptr) < 0)
return -ENOMEM;
ptr = chip->work_ptr.area;
diff --git a/sound/pcmcia/pdaudiocf/pdaudiocf_pcm.c b/sound/pcmcia/pdaudiocf/pdaudiocf_pcm.c
index c21fec60cd98..067b1c3a3e02 100644
--- a/sound/pcmcia/pdaudiocf/pdaudiocf_pcm.c
+++ b/sound/pcmcia/pdaudiocf/pdaudiocf_pcm.c
@@ -89,8 +89,7 @@ static int pdacf_pcm_trigger(struct snd_pcm_substream *subs, int cmd)
static int pdacf_pcm_hw_params(struct snd_pcm_substream *subs,
struct snd_pcm_hw_params *hw_params)
{
- return snd_pcm_lib_alloc_vmalloc_32_buffer
- (subs, params_buffer_bytes(hw_params));
+ return snd_pcm_lib_malloc_pages(subs, params_buffer_bytes(hw_params));
}
/*
@@ -98,7 +97,7 @@ static int pdacf_pcm_hw_params(struct snd_pcm_substream *subs,
*/
static int pdacf_pcm_hw_free(struct snd_pcm_substream *subs)
{
- return snd_pcm_lib_free_vmalloc_buffer(subs);
+ return snd_pcm_lib_free_pages(subs);
}
/*
@@ -262,7 +261,6 @@ static const struct snd_pcm_ops pdacf_pcm_capture_ops = {
.prepare = pdacf_pcm_prepare,
.trigger = pdacf_pcm_trigger,
.pointer = pdacf_pcm_capture_pointer,
- .page = snd_pcm_lib_get_vmalloc_page,
};
@@ -279,6 +277,9 @@ int snd_pdacf_pcm_new(struct snd_pdacf *chip)
return err;
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &pdacf_pcm_capture_ops);
+ snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_VMALLOC,
+ snd_dma_continuous_data(GFP_KERNEL | GFP_DMA32),
+ 0, 0);
pcm->private_data = chip;
pcm->info_flags = 0;
diff --git a/sound/sh/aica.c b/sound/sh/aica.c
index 52e9cfb4f819..bf1fb0d8a930 100644
--- a/sound/sh/aica.c
+++ b/sound/sh/aica.c
@@ -443,7 +443,7 @@ static int __init snd_aicapcmchip(struct snd_card_aica
/* Allocate the DMA buffers */
snd_pcm_lib_preallocate_pages_for_all(pcm,
SNDRV_DMA_TYPE_CONTINUOUS,
- snd_dma_continuous_data(GFP_KERNEL),
+ NULL,
AICA_BUFFER_SIZE,
AICA_BUFFER_SIZE);
return 0;
diff --git a/sound/sh/sh_dac_audio.c b/sound/sh/sh_dac_audio.c
index ed877a138965..f9e36abc98ac 100644
--- a/sound/sh/sh_dac_audio.c
+++ b/sound/sh/sh_dac_audio.c
@@ -268,7 +268,7 @@ static int snd_sh_dac_pcm(struct snd_sh_dac *chip, int device)
/* buffer size=48K */
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_CONTINUOUS,
- snd_dma_continuous_data(GFP_KERNEL),
+ NULL,
48 * 1024,
48 * 1024);
diff --git a/sound/soc/amd/acp-pcm-dma.c b/sound/soc/amd/acp-pcm-dma.c
index 52225b4b6382..4b9a27e25206 100644
--- a/sound/soc/amd/acp-pcm-dma.c
+++ b/sound/soc/amd/acp-pcm-dma.c
@@ -759,14 +759,12 @@ static irqreturn_t dma_irq_handler(int irq, void *arg)
return IRQ_NONE;
}
-static int acp_dma_open(struct snd_pcm_substream *substream)
+static int acp_dma_open(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
u16 bank;
int ret = 0;
struct snd_pcm_runtime *runtime = substream->runtime;
- struct snd_soc_pcm_runtime *prtd = substream->private_data;
- struct snd_soc_component *component = snd_soc_rtdcom_lookup(prtd,
- DRV_NAME);
struct audio_drv_data *intr_data = dev_get_drvdata(component->dev);
struct audio_substream_data *adata =
kzalloc(sizeof(struct audio_substream_data), GFP_KERNEL);
@@ -834,7 +832,8 @@ static int acp_dma_open(struct snd_pcm_substream *substream)
return 0;
}
-static int acp_dma_hw_params(struct snd_pcm_substream *substream,
+static int acp_dma_hw_params(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
int status;
@@ -843,8 +842,6 @@ static int acp_dma_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_runtime *runtime;
struct audio_substream_data *rtd;
struct snd_soc_pcm_runtime *prtd = substream->private_data;
- struct snd_soc_component *component = snd_soc_rtdcom_lookup(prtd,
- DRV_NAME);
struct audio_drv_data *adata = dev_get_drvdata(component->dev);
struct snd_soc_card *card = prtd->card;
struct acp_platform_info *pinfo = snd_soc_card_get_drvdata(card);
@@ -995,7 +992,8 @@ static int acp_dma_hw_params(struct snd_pcm_substream *substream,
return status;
}
-static int acp_dma_hw_free(struct snd_pcm_substream *substream)
+static int acp_dma_hw_free(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
return snd_pcm_lib_free_pages(substream);
}
@@ -1011,7 +1009,8 @@ static u64 acp_get_byte_count(struct audio_substream_data *rtd)
return byte_count.bytescount;
}
-static snd_pcm_uframes_t acp_dma_pointer(struct snd_pcm_substream *substream)
+static snd_pcm_uframes_t acp_dma_pointer(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
u32 buffersize;
u32 pos = 0;
@@ -1053,13 +1052,15 @@ static snd_pcm_uframes_t acp_dma_pointer(struct snd_pcm_substream *substream)
return bytes_to_frames(runtime, pos);
}
-static int acp_dma_mmap(struct snd_pcm_substream *substream,
+static int acp_dma_mmap(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
struct vm_area_struct *vma)
{
return snd_pcm_lib_default_mmap(substream, vma);
}
-static int acp_dma_prepare(struct snd_pcm_substream *substream)
+static int acp_dma_prepare(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct audio_substream_data *rtd = runtime->private_data;
@@ -1086,7 +1087,8 @@ static int acp_dma_prepare(struct snd_pcm_substream *substream)
return 0;
}
-static int acp_dma_trigger(struct snd_pcm_substream *substream, int cmd)
+static int acp_dma_trigger(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream, int cmd)
{
int ret;
@@ -1132,10 +1134,9 @@ static int acp_dma_trigger(struct snd_pcm_substream *substream, int cmd)
return ret;
}
-static int acp_dma_new(struct snd_soc_pcm_runtime *rtd)
+static int acp_dma_new(struct snd_soc_component *component,
+ struct snd_soc_pcm_runtime *rtd)
{
- struct snd_soc_component *component = snd_soc_rtdcom_lookup(rtd,
- DRV_NAME);
struct audio_drv_data *adata = dev_get_drvdata(component->dev);
struct device *parent = component->dev->parent;
@@ -1158,14 +1159,12 @@ static int acp_dma_new(struct snd_soc_pcm_runtime *rtd)
return 0;
}
-static int acp_dma_close(struct snd_pcm_substream *substream)
+static int acp_dma_close(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
u16 bank;
struct snd_pcm_runtime *runtime = substream->runtime;
struct audio_substream_data *rtd = runtime->private_data;
- struct snd_soc_pcm_runtime *prtd = substream->private_data;
- struct snd_soc_component *component = snd_soc_rtdcom_lookup(prtd,
- DRV_NAME);
struct audio_drv_data *adata = dev_get_drvdata(component->dev);
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
@@ -1216,22 +1215,18 @@ static int acp_dma_close(struct snd_pcm_substream *substream)
return 0;
}
-static const struct snd_pcm_ops acp_dma_ops = {
- .open = acp_dma_open,
- .close = acp_dma_close,
- .ioctl = snd_pcm_lib_ioctl,
- .hw_params = acp_dma_hw_params,
- .hw_free = acp_dma_hw_free,
- .trigger = acp_dma_trigger,
- .pointer = acp_dma_pointer,
- .mmap = acp_dma_mmap,
- .prepare = acp_dma_prepare,
-};
-
static const struct snd_soc_component_driver acp_asoc_platform = {
- .name = DRV_NAME,
- .ops = &acp_dma_ops,
- .pcm_new = acp_dma_new,
+ .name = DRV_NAME,
+ .open = acp_dma_open,
+ .close = acp_dma_close,
+ .ioctl = snd_soc_pcm_lib_ioctl,
+ .hw_params = acp_dma_hw_params,
+ .hw_free = acp_dma_hw_free,
+ .trigger = acp_dma_trigger,
+ .pointer = acp_dma_pointer,
+ .mmap = acp_dma_mmap,
+ .prepare = acp_dma_prepare,
+ .pcm_construct = acp_dma_new,
};
static int acp_audio_probe(struct platform_device *pdev)
diff --git a/sound/soc/amd/raven/acp3x-pcm-dma.c b/sound/soc/amd/raven/acp3x-pcm-dma.c
index bc4dfafdfcd1..60709e3ba99d 100644
--- a/sound/soc/amd/raven/acp3x-pcm-dma.c
+++ b/sound/soc/amd/raven/acp3x-pcm-dma.c
@@ -275,16 +275,12 @@ static void config_acp3x_dma(struct i2s_stream_instance *rtd, int direction)
rtd->acp3x_base + mmACP_EXTERNAL_INTR_CNTL);
}
-static int acp3x_dma_open(struct snd_pcm_substream *substream)
+static int acp3x_dma_open(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
int ret = 0;
-
struct snd_pcm_runtime *runtime = substream->runtime;
- struct snd_soc_pcm_runtime *prtd = substream->private_data;
- struct snd_soc_component *component = snd_soc_rtdcom_lookup(prtd,
- DRV_NAME);
struct i2s_dev_data *adata = dev_get_drvdata(component->dev);
-
struct i2s_stream_instance *i2s_data = kzalloc(sizeof(struct i2s_stream_instance),
GFP_KERNEL);
if (!i2s_data)
@@ -334,7 +330,8 @@ static u64 acp_get_byte_count(struct i2s_stream_instance *rtd, int direction)
return byte_count;
}
-static int acp3x_dma_hw_params(struct snd_pcm_substream *substream,
+static int acp3x_dma_hw_params(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
int status;
@@ -362,7 +359,8 @@ static int acp3x_dma_hw_params(struct snd_pcm_substream *substream,
return status;
}
-static snd_pcm_uframes_t acp3x_dma_pointer(struct snd_pcm_substream *substream)
+static snd_pcm_uframes_t acp3x_dma_pointer(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
u32 pos = 0;
u32 buffersize = 0;
@@ -379,33 +377,32 @@ static snd_pcm_uframes_t acp3x_dma_pointer(struct snd_pcm_substream *substream)
return bytes_to_frames(substream->runtime, pos);
}
-static int acp3x_dma_new(struct snd_soc_pcm_runtime *rtd)
+static int acp3x_dma_new(struct snd_soc_component *component,
+ struct snd_soc_pcm_runtime *rtd)
{
- struct snd_soc_component *component = snd_soc_rtdcom_lookup(rtd,
- DRV_NAME);
struct device *parent = component->dev->parent;
snd_pcm_lib_preallocate_pages_for_all(rtd->pcm, SNDRV_DMA_TYPE_DEV,
parent, MIN_BUFFER, MAX_BUFFER);
return 0;
}
-static int acp3x_dma_hw_free(struct snd_pcm_substream *substream)
+static int acp3x_dma_hw_free(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
return snd_pcm_lib_free_pages(substream);
}
-static int acp3x_dma_mmap(struct snd_pcm_substream *substream,
+static int acp3x_dma_mmap(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
struct vm_area_struct *vma)
{
return snd_pcm_lib_default_mmap(substream, vma);
}
-static int acp3x_dma_close(struct snd_pcm_substream *substream)
+static int acp3x_dma_close(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
- struct snd_soc_pcm_runtime *prtd = substream->private_data;
struct i2s_stream_instance *rtd = substream->runtime->private_data;
- struct snd_soc_component *component = snd_soc_rtdcom_lookup(prtd,
- DRV_NAME);
struct i2s_dev_data *adata = dev_get_drvdata(component->dev);
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
@@ -422,17 +419,6 @@ static int acp3x_dma_close(struct snd_pcm_substream *substream)
return 0;
}
-static struct snd_pcm_ops acp3x_dma_ops = {
- .open = acp3x_dma_open,
- .close = acp3x_dma_close,
- .ioctl = snd_pcm_lib_ioctl,
- .hw_params = acp3x_dma_hw_params,
- .hw_free = acp3x_dma_hw_free,
- .pointer = acp3x_dma_pointer,
- .mmap = acp3x_dma_mmap,
-};
-
-
static int acp3x_dai_i2s_set_fmt(struct snd_soc_dai *cpu_dai, unsigned int fmt)
{
@@ -610,9 +596,15 @@ static struct snd_soc_dai_driver acp3x_i2s_dai_driver = {
};
static const struct snd_soc_component_driver acp3x_i2s_component = {
- .name = DRV_NAME,
- .ops = &acp3x_dma_ops,
- .pcm_new = acp3x_dma_new,
+ .name = DRV_NAME,
+ .open = acp3x_dma_open,
+ .close = acp3x_dma_close,
+ .ioctl = snd_soc_pcm_lib_ioctl,
+ .hw_params = acp3x_dma_hw_params,
+ .hw_free = acp3x_dma_hw_free,
+ .pointer = acp3x_dma_pointer,
+ .mmap = acp3x_dma_mmap,
+ .pcm_construct = acp3x_dma_new,
};
static int acp3x_audio_probe(struct platform_device *pdev)
@@ -631,7 +623,7 @@ static int acp3x_audio_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(&pdev->dev, "IORESOURCE_IRQ FAILED\n");
- return -ENODEV;
+ return -ENODEV;
}
adata = devm_kzalloc(&pdev->dev, sizeof(*adata), GFP_KERNEL);
diff --git a/sound/soc/atmel/atmel-pcm-pdc.c b/sound/soc/atmel/atmel-pcm-pdc.c
index ed095af866db..18a2fd02fffe 100644
--- a/sound/soc/atmel/atmel-pcm-pdc.c
+++ b/sound/soc/atmel/atmel-pcm-pdc.c
@@ -56,15 +56,17 @@ static int atmel_pcm_preallocate_dma_buffer(struct snd_pcm *pcm,
return 0;
}
-static int atmel_pcm_mmap(struct snd_pcm_substream *substream,
- struct vm_area_struct *vma)
+static int atmel_pcm_mmap(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
+ struct vm_area_struct *vma)
{
return remap_pfn_range(vma, vma->vm_start,
substream->dma_buffer.addr >> PAGE_SHIFT,
vma->vm_end - vma->vm_start, vma->vm_page_prot);
}
-static int atmel_pcm_new(struct snd_soc_pcm_runtime *rtd)
+static int atmel_pcm_new(struct snd_soc_component *component,
+ struct snd_soc_pcm_runtime *rtd)
{
struct snd_card *card = rtd->card->snd_card;
struct snd_pcm *pcm = rtd->pcm;
@@ -93,7 +95,8 @@ static int atmel_pcm_new(struct snd_soc_pcm_runtime *rtd)
return ret;
}
-static void atmel_pcm_free(struct snd_pcm *pcm)
+static void atmel_pcm_free(struct snd_soc_component *component,
+ struct snd_pcm *pcm)
{
struct snd_pcm_substream *substream;
struct snd_dma_buffer *buf;
@@ -196,8 +199,9 @@ static void atmel_pcm_dma_irq(u32 ssc_sr,
/*--------------------------------------------------------------------------*\
* PCM operations
\*--------------------------------------------------------------------------*/
-static int atmel_pcm_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *params)
+static int atmel_pcm_hw_params(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct atmel_runtime_data *prtd = runtime->private_data;
@@ -225,7 +229,8 @@ static int atmel_pcm_hw_params(struct snd_pcm_substream *substream,
return 0;
}
-static int atmel_pcm_hw_free(struct snd_pcm_substream *substream)
+static int atmel_pcm_hw_free(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct atmel_runtime_data *prtd = substream->runtime->private_data;
struct atmel_pcm_dma_params *params = prtd->params;
@@ -239,7 +244,8 @@ static int atmel_pcm_hw_free(struct snd_pcm_substream *substream)
return 0;
}
-static int atmel_pcm_prepare(struct snd_pcm_substream *substream)
+static int atmel_pcm_prepare(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct atmel_runtime_data *prtd = substream->runtime->private_data;
struct atmel_pcm_dma_params *params = prtd->params;
@@ -251,8 +257,8 @@ static int atmel_pcm_prepare(struct snd_pcm_substream *substream)
return 0;
}
-static int atmel_pcm_trigger(struct snd_pcm_substream *substream,
- int cmd)
+static int atmel_pcm_trigger(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream, int cmd)
{
struct snd_pcm_runtime *rtd = substream->runtime;
struct atmel_runtime_data *prtd = rtd->private_data;
@@ -317,8 +323,8 @@ static int atmel_pcm_trigger(struct snd_pcm_substream *substream,
return ret;
}
-static snd_pcm_uframes_t atmel_pcm_pointer(
- struct snd_pcm_substream *substream)
+static snd_pcm_uframes_t atmel_pcm_pointer(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct atmel_runtime_data *prtd = runtime->private_data;
@@ -335,7 +341,8 @@ static snd_pcm_uframes_t atmel_pcm_pointer(
return x;
}
-static int atmel_pcm_open(struct snd_pcm_substream *substream)
+static int atmel_pcm_open(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct atmel_runtime_data *prtd;
@@ -360,7 +367,8 @@ static int atmel_pcm_open(struct snd_pcm_substream *substream)
return ret;
}
-static int atmel_pcm_close(struct snd_pcm_substream *substream)
+static int atmel_pcm_close(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct atmel_runtime_data *prtd = substream->runtime->private_data;
@@ -368,22 +376,18 @@ static int atmel_pcm_close(struct snd_pcm_substream *substream)
return 0;
}
-static const struct snd_pcm_ops atmel_pcm_ops = {
+static const struct snd_soc_component_driver atmel_soc_platform = {
.open = atmel_pcm_open,
.close = atmel_pcm_close,
- .ioctl = snd_pcm_lib_ioctl,
+ .ioctl = snd_soc_pcm_lib_ioctl,
.hw_params = atmel_pcm_hw_params,
.hw_free = atmel_pcm_hw_free,
.prepare = atmel_pcm_prepare,
.trigger = atmel_pcm_trigger,
.pointer = atmel_pcm_pointer,
.mmap = atmel_pcm_mmap,
-};
-
-static struct snd_soc_component_driver atmel_soc_platform = {
- .ops = &atmel_pcm_ops,
- .pcm_new = atmel_pcm_new,
- .pcm_free = atmel_pcm_free,
+ .pcm_construct = atmel_pcm_new,
+ .pcm_destruct = atmel_pcm_free,
};
int atmel_pcm_pdc_platform_register(struct device *dev)
diff --git a/sound/soc/au1x/dbdma2.c b/sound/soc/au1x/dbdma2.c
index d56092a5ee11..4553108ec92a 100644
--- a/sound/soc/au1x/dbdma2.c
+++ b/sound/soc/au1x/dbdma2.c
@@ -182,15 +182,15 @@ out:
return 0;
}
-static inline struct au1xpsc_audio_dmadata *to_dmadata(struct snd_pcm_substream *ss)
+static inline struct au1xpsc_audio_dmadata *to_dmadata(struct snd_pcm_substream *ss,
+ struct snd_soc_component *component)
{
- struct snd_soc_pcm_runtime *rtd = ss->private_data;
- struct snd_soc_component *component = snd_soc_rtdcom_lookup(rtd, DRV_NAME);
struct au1xpsc_audio_dmadata *pcd = snd_soc_component_get_drvdata(component);
return &pcd[ss->stream];
}
-static int au1xpsc_pcm_hw_params(struct snd_pcm_substream *substream,
+static int au1xpsc_pcm_hw_params(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_pcm_runtime *runtime = substream->runtime;
@@ -202,7 +202,7 @@ static int au1xpsc_pcm_hw_params(struct snd_pcm_substream *substream,
goto out;
stype = substream->stream;
- pcd = to_dmadata(substream);
+ pcd = to_dmadata(substream, component);
DBG("runtime->dma_area = 0x%08lx dma_addr_t = 0x%08lx dma_size = %zu "
"runtime->min_align %lu\n",
@@ -232,15 +232,17 @@ out:
return ret;
}
-static int au1xpsc_pcm_hw_free(struct snd_pcm_substream *substream)
+static int au1xpsc_pcm_hw_free(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
snd_pcm_lib_free_pages(substream);
return 0;
}
-static int au1xpsc_pcm_prepare(struct snd_pcm_substream *substream)
+static int au1xpsc_pcm_prepare(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
- struct au1xpsc_audio_dmadata *pcd = to_dmadata(substream);
+ struct au1xpsc_audio_dmadata *pcd = to_dmadata(substream, component);
au1xxx_dbdma_reset(pcd->ddma_chan);
@@ -255,9 +257,10 @@ static int au1xpsc_pcm_prepare(struct snd_pcm_substream *substream)
return 0;
}
-static int au1xpsc_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
+static int au1xpsc_pcm_trigger(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream, int cmd)
{
- u32 c = to_dmadata(substream)->ddma_chan;
+ u32 c = to_dmadata(substream, component)->ddma_chan;
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
@@ -275,14 +278,17 @@ static int au1xpsc_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
}
static snd_pcm_uframes_t
-au1xpsc_pcm_pointer(struct snd_pcm_substream *substream)
+au1xpsc_pcm_pointer(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
- return bytes_to_frames(substream->runtime, to_dmadata(substream)->pos);
+ return bytes_to_frames(substream->runtime,
+ to_dmadata(substream, component)->pos);
}
-static int au1xpsc_pcm_open(struct snd_pcm_substream *substream)
+static int au1xpsc_pcm_open(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
- struct au1xpsc_audio_dmadata *pcd = to_dmadata(substream);
+ struct au1xpsc_audio_dmadata *pcd = to_dmadata(substream, component);
struct snd_soc_pcm_runtime *rtd = substream->private_data;
int stype = substream->stream, *dmaids;
@@ -296,24 +302,15 @@ static int au1xpsc_pcm_open(struct snd_pcm_substream *substream)
return 0;
}
-static int au1xpsc_pcm_close(struct snd_pcm_substream *substream)
+static int au1xpsc_pcm_close(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
- au1x_pcm_dbdma_free(to_dmadata(substream));
+ au1x_pcm_dbdma_free(to_dmadata(substream, component));
return 0;
}
-static const struct snd_pcm_ops au1xpsc_pcm_ops = {
- .open = au1xpsc_pcm_open,
- .close = au1xpsc_pcm_close,
- .ioctl = snd_pcm_lib_ioctl,
- .hw_params = au1xpsc_pcm_hw_params,
- .hw_free = au1xpsc_pcm_hw_free,
- .prepare = au1xpsc_pcm_prepare,
- .trigger = au1xpsc_pcm_trigger,
- .pointer = au1xpsc_pcm_pointer,
-};
-
-static int au1xpsc_pcm_new(struct snd_soc_pcm_runtime *rtd)
+static int au1xpsc_pcm_new(struct snd_soc_component *component,
+ struct snd_soc_pcm_runtime *rtd)
{
struct snd_card *card = rtd->card->snd_card;
struct snd_pcm *pcm = rtd->pcm;
@@ -327,8 +324,15 @@ static int au1xpsc_pcm_new(struct snd_soc_pcm_runtime *rtd)
/* au1xpsc audio platform */
static struct snd_soc_component_driver au1xpsc_soc_component = {
.name = DRV_NAME,
- .ops = &au1xpsc_pcm_ops,
- .pcm_new = au1xpsc_pcm_new,
+ .open = au1xpsc_pcm_open,
+ .close = au1xpsc_pcm_close,
+ .ioctl = snd_soc_pcm_lib_ioctl,
+ .hw_params = au1xpsc_pcm_hw_params,
+ .hw_free = au1xpsc_pcm_hw_free,
+ .prepare = au1xpsc_pcm_prepare,
+ .trigger = au1xpsc_pcm_trigger,
+ .pointer = au1xpsc_pcm_pointer,
+ .pcm_construct = au1xpsc_pcm_new,
};
static int au1xpsc_pcm_drvprobe(struct platform_device *pdev)
diff --git a/sound/soc/au1x/dma.c b/sound/soc/au1x/dma.c
index 1e98cc4f9e27..054dfda89d3e 100644
--- a/sound/soc/au1x/dma.c
+++ b/sound/soc/au1x/dma.c
@@ -174,22 +174,23 @@ static const struct snd_pcm_hardware alchemy_pcm_hardware = {
.fifo_size = 16,
};
-static inline struct alchemy_pcm_ctx *ss_to_ctx(struct snd_pcm_substream *ss)
+static inline struct alchemy_pcm_ctx *ss_to_ctx(struct snd_pcm_substream *ss,
+ struct snd_soc_component *component)
{
- struct snd_soc_pcm_runtime *rtd = ss->private_data;
- struct snd_soc_component *component = snd_soc_rtdcom_lookup(rtd, DRV_NAME);
return snd_soc_component_get_drvdata(component);
}
-static inline struct audio_stream *ss_to_as(struct snd_pcm_substream *ss)
+static inline struct audio_stream *ss_to_as(struct snd_pcm_substream *ss,
+ struct snd_soc_component *component)
{
- struct alchemy_pcm_ctx *ctx = ss_to_ctx(ss);
+ struct alchemy_pcm_ctx *ctx = ss_to_ctx(ss, component);
return &(ctx->stream[ss->stream]);
}
-static int alchemy_pcm_open(struct snd_pcm_substream *substream)
+static int alchemy_pcm_open(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
- struct alchemy_pcm_ctx *ctx = ss_to_ctx(substream);
+ struct alchemy_pcm_ctx *ctx = ss_to_ctx(substream, component);
struct snd_soc_pcm_runtime *rtd = substream->private_data;
int *dmaids, s = substream->stream;
char *name;
@@ -213,9 +214,10 @@ static int alchemy_pcm_open(struct snd_pcm_substream *substream)
return 0;
}
-static int alchemy_pcm_close(struct snd_pcm_substream *substream)
+static int alchemy_pcm_close(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
- struct alchemy_pcm_ctx *ctx = ss_to_ctx(substream);
+ struct alchemy_pcm_ctx *ctx = ss_to_ctx(substream, component);
int stype = substream->stream;
ctx->stream[stype].substream = NULL;
@@ -224,10 +226,11 @@ static int alchemy_pcm_close(struct snd_pcm_substream *substream)
return 0;
}
-static int alchemy_pcm_hw_params(struct snd_pcm_substream *substream,
+static int alchemy_pcm_hw_params(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *hw_params)
{
- struct audio_stream *stream = ss_to_as(substream);
+ struct audio_stream *stream = ss_to_as(substream, component);
int err;
err = snd_pcm_lib_malloc_pages(substream,
@@ -243,16 +246,18 @@ static int alchemy_pcm_hw_params(struct snd_pcm_substream *substream,
return err;
}
-static int alchemy_pcm_hw_free(struct snd_pcm_substream *substream)
+static int alchemy_pcm_hw_free(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
- struct audio_stream *stream = ss_to_as(substream);
+ struct audio_stream *stream = ss_to_as(substream, component);
au1000_release_dma_link(stream);
return snd_pcm_lib_free_pages(substream);
}
-static int alchemy_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
+static int alchemy_pcm_trigger(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream, int cmd)
{
- struct audio_stream *stream = ss_to_as(substream);
+ struct audio_stream *stream = ss_to_as(substream, component);
int err = 0;
switch (cmd) {
@@ -269,9 +274,10 @@ static int alchemy_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
return err;
}
-static snd_pcm_uframes_t alchemy_pcm_pointer(struct snd_pcm_substream *ss)
+static snd_pcm_uframes_t alchemy_pcm_pointer(struct snd_soc_component *component,
+ struct snd_pcm_substream *ss)
{
- struct audio_stream *stream = ss_to_as(ss);
+ struct audio_stream *stream = ss_to_as(ss, component);
long location;
location = get_dma_residue(stream->dma);
@@ -281,30 +287,27 @@ static snd_pcm_uframes_t alchemy_pcm_pointer(struct snd_pcm_substream *ss)
return bytes_to_frames(ss->runtime, location);
}
-static const struct snd_pcm_ops alchemy_pcm_ops = {
- .open = alchemy_pcm_open,
- .close = alchemy_pcm_close,
- .ioctl = snd_pcm_lib_ioctl,
- .hw_params = alchemy_pcm_hw_params,
- .hw_free = alchemy_pcm_hw_free,
- .trigger = alchemy_pcm_trigger,
- .pointer = alchemy_pcm_pointer,
-};
-
-static int alchemy_pcm_new(struct snd_soc_pcm_runtime *rtd)
+static int alchemy_pcm_new(struct snd_soc_component *component,
+ struct snd_soc_pcm_runtime *rtd)
{
struct snd_pcm *pcm = rtd->pcm;
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_CONTINUOUS,
- snd_dma_continuous_data(GFP_KERNEL), 65536, (4096 * 1024) - 1);
+ NULL, 65536, (4096 * 1024) - 1);
return 0;
}
static struct snd_soc_component_driver alchemy_pcm_soc_component = {
.name = DRV_NAME,
- .ops = &alchemy_pcm_ops,
- .pcm_new = alchemy_pcm_new,
+ .open = alchemy_pcm_open,
+ .close = alchemy_pcm_close,
+ .ioctl = snd_soc_pcm_lib_ioctl,
+ .hw_params = alchemy_pcm_hw_params,
+ .hw_free = alchemy_pcm_hw_free,
+ .trigger = alchemy_pcm_trigger,
+ .pointer = alchemy_pcm_pointer,
+ .pcm_construct = alchemy_pcm_new,
};
static int alchemy_pcm_drvprobe(struct platform_device *pdev)
diff --git a/sound/soc/bcm/cygnus-pcm.c b/sound/soc/bcm/cygnus-pcm.c
index 8966b02844dc..c65408085c1d 100644
--- a/sound/soc/bcm/cygnus-pcm.c
+++ b/sound/soc/bcm/cygnus-pcm.c
@@ -376,7 +376,8 @@ static void disable_intr(struct snd_pcm_substream *substream)
}
-static int cygnus_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
+static int cygnus_pcm_trigger(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream, int cmd)
{
int ret = 0;
@@ -577,7 +578,8 @@ static irqreturn_t cygnus_dma_irq(int irq, void *data)
return IRQ_HANDLED;
}
-static int cygnus_pcm_open(struct snd_pcm_substream *substream)
+static int cygnus_pcm_open(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_pcm_runtime *runtime = substream->runtime;
@@ -613,7 +615,8 @@ static int cygnus_pcm_open(struct snd_pcm_substream *substream)
return 0;
}
-static int cygnus_pcm_close(struct snd_pcm_substream *substream)
+static int cygnus_pcm_close(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct cygnus_aio_port *aio;
@@ -633,8 +636,9 @@ static int cygnus_pcm_close(struct snd_pcm_substream *substream)
return 0;
}
-static int cygnus_pcm_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *params)
+static int cygnus_pcm_hw_params(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_pcm_runtime *runtime = substream->runtime;
@@ -649,7 +653,8 @@ static int cygnus_pcm_hw_params(struct snd_pcm_substream *substream,
return 0;
}
-static int cygnus_pcm_hw_free(struct snd_pcm_substream *substream)
+static int cygnus_pcm_hw_free(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct cygnus_aio_port *aio;
@@ -661,7 +666,8 @@ static int cygnus_pcm_hw_free(struct snd_pcm_substream *substream)
return 0;
}
-static int cygnus_pcm_prepare(struct snd_pcm_substream *substream)
+static int cygnus_pcm_prepare(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_pcm_runtime *runtime = substream->runtime;
@@ -694,7 +700,8 @@ static int cygnus_pcm_prepare(struct snd_pcm_substream *substream)
return 0;
}
-static snd_pcm_uframes_t cygnus_pcm_pointer(struct snd_pcm_substream *substream)
+static snd_pcm_uframes_t cygnus_pcm_pointer(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct cygnus_aio_port *aio;
unsigned int res = 0, cur = 0, base = 0;
@@ -750,19 +757,8 @@ static int cygnus_pcm_preallocate_dma_buffer(struct snd_pcm *pcm, int stream)
return 0;
}
-
-static const struct snd_pcm_ops cygnus_pcm_ops = {
- .open = cygnus_pcm_open,
- .close = cygnus_pcm_close,
- .ioctl = snd_pcm_lib_ioctl,
- .hw_params = cygnus_pcm_hw_params,
- .hw_free = cygnus_pcm_hw_free,
- .prepare = cygnus_pcm_prepare,
- .trigger = cygnus_pcm_trigger,
- .pointer = cygnus_pcm_pointer,
-};
-
-static void cygnus_dma_free_dma_buffers(struct snd_pcm *pcm)
+static void cygnus_dma_free_dma_buffers(struct snd_soc_component *component,
+ struct snd_pcm *pcm)
{
struct snd_pcm_substream *substream;
struct snd_dma_buffer *buf;
@@ -788,7 +784,8 @@ static void cygnus_dma_free_dma_buffers(struct snd_pcm *pcm)
}
}
-static int cygnus_dma_new(struct snd_soc_pcm_runtime *rtd)
+static int cygnus_dma_new(struct snd_soc_component *component,
+ struct snd_soc_pcm_runtime *rtd)
{
struct snd_card *card = rtd->card->snd_card;
struct snd_pcm *pcm = rtd->pcm;
@@ -810,7 +807,7 @@ static int cygnus_dma_new(struct snd_soc_pcm_runtime *rtd)
ret = cygnus_pcm_preallocate_dma_buffer(pcm,
SNDRV_PCM_STREAM_CAPTURE);
if (ret) {
- cygnus_dma_free_dma_buffers(pcm);
+ cygnus_dma_free_dma_buffers(component, pcm);
return ret;
}
}
@@ -819,9 +816,16 @@ static int cygnus_dma_new(struct snd_soc_pcm_runtime *rtd)
}
static struct snd_soc_component_driver cygnus_soc_platform = {
- .ops = &cygnus_pcm_ops,
- .pcm_new = cygnus_dma_new,
- .pcm_free = cygnus_dma_free_dma_buffers,
+ .open = cygnus_pcm_open,
+ .close = cygnus_pcm_close,
+ .ioctl = snd_soc_pcm_lib_ioctl,
+ .hw_params = cygnus_pcm_hw_params,
+ .hw_free = cygnus_pcm_hw_free,
+ .prepare = cygnus_pcm_prepare,
+ .trigger = cygnus_pcm_trigger,
+ .pointer = cygnus_pcm_pointer,
+ .pcm_construct = cygnus_dma_new,
+ .pcm_destruct = cygnus_dma_free_dma_buffers,
};
int cygnus_soc_platform_register(struct device *dev,
diff --git a/sound/soc/cirrus/Kconfig b/sound/soc/cirrus/Kconfig
index 2333efac758a..8039a8febefa 100644
--- a/sound/soc/cirrus/Kconfig
+++ b/sound/soc/cirrus/Kconfig
@@ -33,13 +33,13 @@ config SND_EP93XX_SOC_AC97
select SND_SOC_AC97_BUS
config SND_EP93XX_SOC_SNAPPERCL15
- tristate "SoC Audio support for Bluewater Systems Snapper CL15 module"
- depends on SND_EP93XX_SOC && MACH_SNAPPER_CL15 && I2C
- select SND_EP93XX_SOC_I2S
- select SND_SOC_TLV320AIC23_I2C
- help
- Say Y or M here if you want to add support for I2S audio on the
- Bluewater Systems Snapper CL15 module.
+ tristate "SoC Audio support for Bluewater Systems Snapper CL15 module"
+ depends on SND_EP93XX_SOC && MACH_SNAPPER_CL15 && I2C
+ select SND_EP93XX_SOC_I2S
+ select SND_SOC_TLV320AIC23_I2C
+ help
+ Say Y or M here if you want to add support for I2S audio on the
+ Bluewater Systems Snapper CL15 module.
config SND_EP93XX_SOC_SIMONE
tristate "SoC Audio support for Simplemachines Sim.One board"
diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
index 229cc89f8c5a..4abf37b5083f 100644
--- a/sound/soc/codecs/Kconfig
+++ b/sound/soc/codecs/Kconfig
@@ -34,6 +34,8 @@ config SND_SOC_ALL_CODECS
select SND_SOC_ADAU1977_I2C if I2C
select SND_SOC_ADAU1701 if I2C
select SND_SOC_ADAU7002
+ select SND_SOC_ADAU7118_I2C if I2C
+ select SND_SOC_ADAU7118_HW
select SND_SOC_ADS117X
select SND_SOC_AK4104 if SPI_MASTER
select SND_SOC_AK4118 if I2C
@@ -179,6 +181,8 @@ config SND_SOC_ALL_CODECS
select SND_SOC_STAC9766 if SND_SOC_AC97_BUS
select SND_SOC_STI_SAS
select SND_SOC_TAS2552 if I2C
+ select SND_SOC_TAS2562 if I2C
+ select SND_SOC_TAS2770 if I2C
select SND_SOC_TAS5086 if I2C
select SND_SOC_TAS571X if I2C
select SND_SOC_TAS5720 if I2C
@@ -257,16 +261,16 @@ config SND_SOC_ALL_CODECS
select SND_SOC_WM9705 if (SND_SOC_AC97_BUS || SND_SOC_AC97_BUS_NEW)
select SND_SOC_WM9712 if (SND_SOC_AC97_BUS || SND_SOC_AC97_BUS_NEW)
select SND_SOC_WM9713 if (SND_SOC_AC97_BUS || SND_SOC_AC97_BUS_NEW)
- help
- Normally ASoC codec drivers are only built if a machine driver which
- uses them is also built since they are only usable with a machine
- driver. Selecting this option will allow these drivers to be built
- without an explicit machine driver for test and development purposes.
+ help
+ Normally ASoC codec drivers are only built if a machine driver which
+ uses them is also built since they are only usable with a machine
+ driver. Selecting this option will allow these drivers to be built
+ without an explicit machine driver for test and development purposes.
Support for the bus types used to access the codecs to be built must
be selected separately.
- If unsure select "N".
+ If unsure select "N".
config SND_SOC_88PM860X
tristate
@@ -395,6 +399,33 @@ config SND_SOC_ADAU1977_I2C
config SND_SOC_ADAU7002
tristate "Analog Devices ADAU7002 Stereo PDM-to-I2S/TDM Converter"
+config SND_SOC_ADAU7118
+ tristate
+
+config SND_SOC_ADAU7118_HW
+ tristate "Analog Devices ADAU7118 8 Channel PDM-to-I2S/TDM Converter - HW Mode"
+ select SND_SOC_ADAU7118
+ help
+ Enable support for the Analog Devices ADAU7118 8 Channel PDM-to-I2S/TDM
+ Converter. In this mode, the device works in standalone mode which
+ means that there is no bus to comunicate with it. Stereo mode is not
+ supported in this mode.
+
+ To compile this driver as a module, choose M here: the module
+ will be called snd-soc-adau7118-hw.
+
+config SND_SOC_ADAU7118_I2C
+ tristate "Analog Devices ADAU7118 8 Channel PDM-to-I2S/TDM Converter - I2C"
+ depends on I2C
+ select SND_SOC_ADAU7118
+ select REGMAP_I2C
+ help
+ Enable support for the Analog Devices ADAU7118 8 Channel PDM-to-I2S/TDM
+ Converter over I2C. This gives full support over the device.
+
+ To compile this driver as a module, choose M here: the module
+ will be called snd-soc-adau7118-i2c.
+
config SND_SOC_ADAV80X
tristate
@@ -478,6 +509,8 @@ config SND_SOC_CQ0093VC
config SND_SOC_CROS_EC_CODEC
tristate "codec driver for ChromeOS EC"
depends on CROS_EC
+ select CRYPTO
+ select CRYPTO_SHA256
help
If you say yes here you will get support for the
ChromeOS Embedded Controller's Audio Codec.
@@ -570,8 +603,8 @@ config SND_SOC_CS42XX8_I2C
# Cirrus Logic CS43130 HiFi DAC
config SND_SOC_CS43130
- tristate "Cirrus Logic CS43130 CODEC"
- depends on I2C
+ tristate "Cirrus Logic CS43130 CODEC"
+ depends on I2C
config SND_SOC_CS4341
tristate "Cirrus Logic CS4341 CODEC"
@@ -643,19 +676,20 @@ config SND_SOC_L3
tristate
config SND_SOC_DA7210
- tristate
+ tristate
config SND_SOC_DA7213
- tristate
+ tristate "Dialog DA7213 CODEC"
+ depends on I2C
config SND_SOC_DA7218
tristate
config SND_SOC_DA7219
- tristate
+ tristate
config SND_SOC_DA732X
- tristate
+ tristate
config SND_SOC_DA9055
tristate
@@ -717,7 +751,7 @@ config SND_SOC_INNO_RK3036
select REGMAP_MMIO
config SND_SOC_ISABELLE
- tristate
+ tristate
config SND_SOC_LM49453
tristate
@@ -988,7 +1022,7 @@ config SND_SOC_RT5640
tristate
config SND_SOC_RT5645
- tristate
+ tristate
config SND_SOC_RT5651
tristate
@@ -1104,6 +1138,14 @@ config SND_SOC_TAS2552
tristate "Texas Instruments TAS2552 Mono Audio amplifier"
depends on I2C
+config SND_SOC_TAS2562
+ tristate "Texas Instruments TAS2562 Mono Audio amplifier"
+ depends on I2C
+
+config SND_SOC_TAS2770
+ tristate "Texas Instruments TAS2770 speaker amplifier"
+ depends on I2C
+
config SND_SOC_TAS5086
tristate "Texas Instruments TAS5086 speaker amplifier"
depends on I2C
@@ -1220,7 +1262,7 @@ config SND_SOC_UDA134X
tristate
config SND_SOC_UDA1380
- tristate
+ tristate
depends on I2C
config SND_SOC_WCD9335
@@ -1348,7 +1390,7 @@ config SND_SOC_WM8904
depends on I2C
config SND_SOC_WM8940
- tristate
+ tristate
config SND_SOC_WM8955
tristate
diff --git a/sound/soc/codecs/Makefile b/sound/soc/codecs/Makefile
index c498373dcc5f..ddfd07071925 100644
--- a/sound/soc/codecs/Makefile
+++ b/sound/soc/codecs/Makefile
@@ -22,6 +22,9 @@ snd-soc-adau1977-objs := adau1977.o
snd-soc-adau1977-spi-objs := adau1977-spi.o
snd-soc-adau1977-i2c-objs := adau1977-i2c.o
snd-soc-adau7002-objs := adau7002.o
+snd-soc-adau7118-objs := adau7118.o
+snd-soc-adau7118-i2c-objs := adau7118-i2c.o
+snd-soc-adau7118-hw-objs := adau7118-hw.o
snd-soc-adav80x-objs := adav80x.o
snd-soc-adav801-objs := adav801.o
snd-soc-adav803-objs := adav803.o
@@ -196,6 +199,7 @@ snd-soc-tas571x-objs := tas571x.o
snd-soc-tas5720-objs := tas5720.o
snd-soc-tas6424-objs := tas6424.o
snd-soc-tda7419-objs := tda7419.o
+snd-soc-tas2770-objs := tas2770.o
snd-soc-tfa9879-objs := tfa9879.o
snd-soc-tlv320aic23-objs := tlv320aic23.o
snd-soc-tlv320aic23-i2c-objs := tlv320aic23-i2c.o
@@ -280,6 +284,7 @@ snd-soc-max98504-objs := max98504.o
snd-soc-simple-amplifier-objs := simple-amplifier.o
snd-soc-tpa6130a2-objs := tpa6130a2.o
snd-soc-tas2552-objs := tas2552.o
+snd-soc-tas2562-objs := tas2562.o
obj-$(CONFIG_SND_SOC_88PM860X) += snd-soc-88pm860x.o
obj-$(CONFIG_SND_SOC_AB8500_CODEC) += snd-soc-ab8500-codec.o
@@ -304,6 +309,9 @@ obj-$(CONFIG_SND_SOC_ADAU1977) += snd-soc-adau1977.o
obj-$(CONFIG_SND_SOC_ADAU1977_SPI) += snd-soc-adau1977-spi.o
obj-$(CONFIG_SND_SOC_ADAU1977_I2C) += snd-soc-adau1977-i2c.o
obj-$(CONFIG_SND_SOC_ADAU7002) += snd-soc-adau7002.o
+obj-$(CONFIG_SND_SOC_ADAU7118) += snd-soc-adau7118.o
+obj-$(CONFIG_SND_SOC_ADAU7118_I2C) += snd-soc-adau7118-i2c.o
+obj-$(CONFIG_SND_SOC_ADAU7118_HW) += snd-soc-adau7118-hw.o
obj-$(CONFIG_SND_SOC_ADAV80X) += snd-soc-adav80x.o
obj-$(CONFIG_SND_SOC_ADAV801) += snd-soc-adav801.o
obj-$(CONFIG_SND_SOC_ADAV803) += snd-soc-adav803.o
@@ -474,11 +482,13 @@ obj-$(CONFIG_SND_SOC_STA529) += snd-soc-sta529.o
obj-$(CONFIG_SND_SOC_STAC9766) += snd-soc-stac9766.o
obj-$(CONFIG_SND_SOC_STI_SAS) += snd-soc-sti-sas.o
obj-$(CONFIG_SND_SOC_TAS2552) += snd-soc-tas2552.o
+obj-$(CONFIG_SND_SOC_TAS2562) += snd-soc-tas2562.o
obj-$(CONFIG_SND_SOC_TAS5086) += snd-soc-tas5086.o
obj-$(CONFIG_SND_SOC_TAS571X) += snd-soc-tas571x.o
obj-$(CONFIG_SND_SOC_TAS5720) += snd-soc-tas5720.o
obj-$(CONFIG_SND_SOC_TAS6424) += snd-soc-tas6424.o
obj-$(CONFIG_SND_SOC_TDA7419) += snd-soc-tda7419.o
+obj-$(CONFIG_SND_SOC_TAS2770) += snd-soc-tas2770.o
obj-$(CONFIG_SND_SOC_TFA9879) += snd-soc-tfa9879.o
obj-$(CONFIG_SND_SOC_TLV320AIC23) += snd-soc-tlv320aic23.o
obj-$(CONFIG_SND_SOC_TLV320AIC23_I2C) += snd-soc-tlv320aic23-i2c.o
diff --git a/sound/soc/codecs/adau1761.c b/sound/soc/codecs/adau1761.c
index 977f5a63be3f..5ca9b744b7d8 100644
--- a/sound/soc/codecs/adau1761.c
+++ b/sound/soc/codecs/adau1761.c
@@ -28,6 +28,10 @@
#define ADAU1761_REC_MIXER_RIGHT1 0x400d
#define ADAU1761_LEFT_DIFF_INPUT_VOL 0x400e
#define ADAU1761_RIGHT_DIFF_INPUT_VOL 0x400f
+#define ADAU1761_ALC_CTRL0 0x4011
+#define ADAU1761_ALC_CTRL1 0x4012
+#define ADAU1761_ALC_CTRL2 0x4013
+#define ADAU1761_ALC_CTRL3 0x4014
#define ADAU1761_PLAY_LR_MIXER_LEFT 0x4020
#define ADAU1761_PLAY_MIXER_LEFT0 0x401c
#define ADAU1761_PLAY_MIXER_LEFT1 0x401d
@@ -71,6 +75,10 @@ static const struct reg_default adau1761_reg_defaults[] = {
{ ADAU1761_REC_MIXER_RIGHT0, 0x00 },
{ ADAU1761_REC_MIXER_RIGHT1, 0x00 },
{ ADAU1761_LEFT_DIFF_INPUT_VOL, 0x00 },
+ { ADAU1761_ALC_CTRL0, 0x00 },
+ { ADAU1761_ALC_CTRL1, 0x00 },
+ { ADAU1761_ALC_CTRL2, 0x00 },
+ { ADAU1761_ALC_CTRL3, 0x00 },
{ ADAU1761_RIGHT_DIFF_INPUT_VOL, 0x00 },
{ ADAU1761_PLAY_LR_MIXER_LEFT, 0x00 },
{ ADAU1761_PLAY_MIXER_LEFT0, 0x00 },
@@ -121,6 +129,10 @@ static const DECLARE_TLV_DB_SCALE(adau1761_sidetone_tlv, -1800, 300, 1);
static const DECLARE_TLV_DB_SCALE(adau1761_boost_tlv, -600, 600, 1);
static const DECLARE_TLV_DB_SCALE(adau1761_pga_boost_tlv, -2000, 2000, 1);
+static const DECLARE_TLV_DB_SCALE(adau1761_alc_max_gain_tlv, -1200, 600, 0);
+static const DECLARE_TLV_DB_SCALE(adau1761_alc_target_tlv, -2850, 150, 0);
+static const DECLARE_TLV_DB_SCALE(adau1761_alc_ng_threshold_tlv, -7650, 150, 0);
+
static const unsigned int adau1761_bias_select_values[] = {
0, 2, 3,
};
@@ -147,6 +159,103 @@ static SOC_VALUE_ENUM_SINGLE_DECL(adau1761_capture_bias_enum,
ADAU17X1_REC_POWER_MGMT, 1, 0x3, adau1761_bias_select_text,
adau1761_bias_select_values);
+static const unsigned int adau1761_pga_slew_time_values[] = {
+ 3, 0, 1, 2,
+};
+
+static const char * const adau1761_pga_slew_time_text[] = {
+ "Off",
+ "24 ms",
+ "48 ms",
+ "96 ms",
+};
+
+static const char * const adau1761_alc_function_text[] = {
+ "Off",
+ "Right",
+ "Left",
+ "Stereo",
+ "DSP control",
+};
+
+static const char * const adau1761_alc_hold_time_text[] = {
+ "2.67 ms",
+ "5.34 ms",
+ "10.68 ms",
+ "21.36 ms",
+ "42.72 ms",
+ "85.44 ms",
+ "170.88 ms",
+ "341.76 ms",
+ "683.52 ms",
+ "1367 ms",
+ "2734.1 ms",
+ "5468.2 ms",
+ "10936 ms",
+ "21873 ms",
+ "43745 ms",
+ "87491 ms",
+};
+
+static const char * const adau1761_alc_attack_time_text[] = {
+ "6 ms",
+ "12 ms",
+ "24 ms",
+ "48 ms",
+ "96 ms",
+ "192 ms",
+ "384 ms",
+ "768 ms",
+ "1540 ms",
+ "3070 ms",
+ "6140 ms",
+ "12290 ms",
+ "24580 ms",
+ "49150 ms",
+ "98300 ms",
+ "196610 ms",
+};
+
+static const char * const adau1761_alc_decay_time_text[] = {
+ "24 ms",
+ "48 ms",
+ "96 ms",
+ "192 ms",
+ "384 ms",
+ "768 ms",
+ "15400 ms",
+ "30700 ms",
+ "61400 ms",
+ "12290 ms",
+ "24580 ms",
+ "49150 ms",
+ "98300 ms",
+ "196610 ms",
+ "393220 ms",
+ "786430 ms",
+};
+
+static const char * const adau1761_alc_ng_type_text[] = {
+ "Hold",
+ "Mute",
+ "Fade",
+ "Fade + Mute",
+};
+
+static SOC_VALUE_ENUM_SINGLE_DECL(adau1761_pga_slew_time_enum,
+ ADAU1761_ALC_CTRL0, 6, 0x3, adau1761_pga_slew_time_text,
+ adau1761_pga_slew_time_values);
+static SOC_ENUM_SINGLE_DECL(adau1761_alc_function_enum,
+ ADAU1761_ALC_CTRL0, 0, adau1761_alc_function_text);
+static SOC_ENUM_SINGLE_DECL(adau1761_alc_hold_time_enum,
+ ADAU1761_ALC_CTRL1, 4, adau1761_alc_hold_time_text);
+static SOC_ENUM_SINGLE_DECL(adau1761_alc_attack_time_enum,
+ ADAU1761_ALC_CTRL2, 4, adau1761_alc_attack_time_text);
+static SOC_ENUM_SINGLE_DECL(adau1761_alc_decay_time_enum,
+ ADAU1761_ALC_CTRL2, 0, adau1761_alc_decay_time_text);
+static SOC_ENUM_SINGLE_DECL(adau1761_alc_ng_type_enum,
+ ADAU1761_ALC_CTRL3, 6, adau1761_alc_ng_type_text);
+
static const struct snd_kcontrol_new adau1761_jack_detect_controls[] = {
SOC_SINGLE("Speaker Auto-mute Switch", ADAU1761_DIGMIC_JACKDETECT,
4, 1, 0),
@@ -161,6 +270,22 @@ static const struct snd_kcontrol_new adau1761_differential_mode_controls[] = {
SOC_DOUBLE_R_TLV("PGA Boost Capture Volume", ADAU1761_REC_MIXER_LEFT1,
ADAU1761_REC_MIXER_RIGHT1, 3, 2, 0, adau1761_pga_boost_tlv),
+
+ SOC_ENUM("PGA Capture Slew Time", adau1761_pga_slew_time_enum),
+
+ SOC_SINGLE_TLV("ALC Capture Max Gain Volume", ADAU1761_ALC_CTRL0,
+ 3, 7, 0, adau1761_alc_max_gain_tlv),
+ SOC_ENUM("ALC Capture Function", adau1761_alc_function_enum),
+ SOC_ENUM("ALC Capture Hold Time", adau1761_alc_hold_time_enum),
+ SOC_SINGLE_TLV("ALC Capture Target Volume", ADAU1761_ALC_CTRL1,
+ 0, 15, 0, adau1761_alc_target_tlv),
+ SOC_ENUM("ALC Capture Attack Time", adau1761_alc_decay_time_enum),
+ SOC_ENUM("ALC Capture Decay Time", adau1761_alc_attack_time_enum),
+ SOC_ENUM("ALC Capture Noise Gate Type", adau1761_alc_ng_type_enum),
+ SOC_SINGLE("ALC Capture Noise Gate Switch",
+ ADAU1761_ALC_CTRL3, 5, 1, 0),
+ SOC_SINGLE_TLV("ALC Capture Noise Gate Threshold Volume",
+ ADAU1761_ALC_CTRL3, 0, 31, 0, adau1761_alc_ng_threshold_tlv),
};
static const struct snd_kcontrol_new adau1761_single_mode_controls[] = {
@@ -632,6 +757,10 @@ static bool adau1761_readable_register(struct device *dev, unsigned int reg)
case ADAU1761_DEJITTER:
case ADAU1761_CLK_ENABLE0:
case ADAU1761_CLK_ENABLE1:
+ case ADAU1761_ALC_CTRL0:
+ case ADAU1761_ALC_CTRL1:
+ case ADAU1761_ALC_CTRL2:
+ case ADAU1761_ALC_CTRL3:
return true;
default:
break;
diff --git a/sound/soc/codecs/adau7118-hw.c b/sound/soc/codecs/adau7118-hw.c
new file mode 100644
index 000000000000..45a5d2dcc0f2
--- /dev/null
+++ b/sound/soc/codecs/adau7118-hw.c
@@ -0,0 +1,43 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Analog Devices ADAU7118 8 channel PDM-to-I2S/TDM Converter Standalone Hw
+// driver
+//
+// Copyright 2019 Analog Devices Inc.
+
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+
+#include "adau7118.h"
+
+static int adau7118_probe_hw(struct platform_device *pdev)
+{
+ return adau7118_probe(&pdev->dev, NULL, true);
+}
+
+static const struct of_device_id adau7118_of_match[] = {
+ { .compatible = "adi,adau7118" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, adau7118_of_match);
+
+static const struct platform_device_id adau7118_id[] = {
+ { .name = "adau7118" },
+ { }
+};
+MODULE_DEVICE_TABLE(platform, adau7118_id);
+
+static struct platform_driver adau7118_driver_hw = {
+ .driver = {
+ .name = "adau7118",
+ .of_match_table = adau7118_of_match,
+ },
+ .probe = adau7118_probe_hw,
+ .id_table = adau7118_id,
+};
+module_platform_driver(adau7118_driver_hw);
+
+MODULE_AUTHOR("Nuno Sa <nuno.sa@analog.com>");
+MODULE_DESCRIPTION("ADAU7118 8 channel PDM-to-I2S/TDM Converter driver for standalone hw mode");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/adau7118-i2c.c b/sound/soc/codecs/adau7118-i2c.c
new file mode 100644
index 000000000000..a8211362fe82
--- /dev/null
+++ b/sound/soc/codecs/adau7118-i2c.c
@@ -0,0 +1,82 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Analog Devices ADAU7118 8 channel PDM-to-I2S/TDM Converter driver over I2C
+//
+// Copyright 2019 Analog Devices Inc.
+
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+
+#include "adau7118.h"
+
+static const struct reg_default adau7118_reg_defaults[] = {
+ { ADAU7118_REG_VENDOR_ID, 0x41 },
+ { ADAU7118_REG_DEVICE_ID1, 0x71 },
+ { ADAU7118_REG_DEVICE_ID2, 0x18 },
+ { ADAU7118_REG_REVISION_ID, 0x00 },
+ { ADAU7118_REG_ENABLES, 0x3F },
+ { ADAU7118_REG_DEC_RATIO_CLK_MAP, 0xC0 },
+ { ADAU7118_REG_HPF_CONTROL, 0xD0 },
+ { ADAU7118_REG_SPT_CTRL1, 0x41 },
+ { ADAU7118_REG_SPT_CTRL2, 0x00 },
+ { ADAU7118_REG_SPT_CX(0), 0x01 },
+ { ADAU7118_REG_SPT_CX(1), 0x11 },
+ { ADAU7118_REG_SPT_CX(2), 0x21 },
+ { ADAU7118_REG_SPT_CX(3), 0x31 },
+ { ADAU7118_REG_SPT_CX(4), 0x41 },
+ { ADAU7118_REG_SPT_CX(5), 0x51 },
+ { ADAU7118_REG_SPT_CX(6), 0x61 },
+ { ADAU7118_REG_SPT_CX(7), 0x71 },
+ { ADAU7118_REG_DRIVE_STRENGTH, 0x2a },
+ { ADAU7118_REG_RESET, 0x00 },
+};
+
+static const struct regmap_config adau7118_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .reg_defaults = adau7118_reg_defaults,
+ .num_reg_defaults = ARRAY_SIZE(adau7118_reg_defaults),
+ .cache_type = REGCACHE_RBTREE,
+ .max_register = ADAU7118_REG_RESET,
+};
+
+static int adau7118_probe_i2c(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct regmap *map;
+
+ map = devm_regmap_init_i2c(i2c, &adau7118_regmap_config);
+ if (IS_ERR(map)) {
+ dev_err(&i2c->dev, "Failed to init regmap %ld\n", PTR_ERR(map));
+ return PTR_ERR(map);
+ }
+
+ return adau7118_probe(&i2c->dev, map, false);
+}
+
+static const struct of_device_id adau7118_of_match[] = {
+ { .compatible = "adi,adau7118" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, adau7118_of_match);
+
+static const struct i2c_device_id adau7118_id[] = {
+ {"adau7118", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, adau7118_id);
+
+static struct i2c_driver adau7118_driver = {
+ .driver = {
+ .name = "adau7118",
+ .of_match_table = adau7118_of_match,
+ },
+ .probe = adau7118_probe_i2c,
+ .id_table = adau7118_id,
+};
+module_i2c_driver(adau7118_driver);
+
+MODULE_AUTHOR("Nuno Sa <nuno.sa@analog.com>");
+MODULE_DESCRIPTION("ADAU7118 8 channel PDM-to-I2S/TDM Converter driver over I2C");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/adau7118.c b/sound/soc/codecs/adau7118.c
new file mode 100644
index 000000000000..841229dcbca1
--- /dev/null
+++ b/sound/soc/codecs/adau7118.c
@@ -0,0 +1,586 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Analog Devices ADAU7118 8 channel PDM-to-I2S/TDM Converter driver
+//
+// Copyright 2019 Analog Devices Inc.
+
+#include <linux/bitfield.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+
+#include "adau7118.h"
+
+#define ADAU7118_DEC_RATIO_MASK GENMASK(1, 0)
+#define ADAU7118_DEC_RATIO(x) FIELD_PREP(ADAU7118_DEC_RATIO_MASK, x)
+#define ADAU7118_CLK_MAP_MASK GENMASK(7, 4)
+#define ADAU7118_SLOT_WIDTH_MASK GENMASK(5, 4)
+#define ADAU7118_SLOT_WIDTH(x) FIELD_PREP(ADAU7118_SLOT_WIDTH_MASK, x)
+#define ADAU7118_TRISTATE_MASK BIT(6)
+#define ADAU7118_TRISTATE(x) FIELD_PREP(ADAU7118_TRISTATE_MASK, x)
+#define ADAU7118_DATA_FMT_MASK GENMASK(3, 1)
+#define ADAU7118_DATA_FMT(x) FIELD_PREP(ADAU7118_DATA_FMT_MASK, x)
+#define ADAU7118_SAI_MODE_MASK BIT(0)
+#define ADAU7118_SAI_MODE(x) FIELD_PREP(ADAU7118_SAI_MODE_MASK, x)
+#define ADAU7118_LRCLK_BCLK_POL_MASK GENMASK(1, 0)
+#define ADAU7118_LRCLK_BCLK_POL(x) \
+ FIELD_PREP(ADAU7118_LRCLK_BCLK_POL_MASK, x)
+#define ADAU7118_SPT_SLOT_MASK GENMASK(7, 4)
+#define ADAU7118_SPT_SLOT(x) FIELD_PREP(ADAU7118_SPT_SLOT_MASK, x)
+#define ADAU7118_FULL_SOFT_R_MASK BIT(1)
+#define ADAU7118_FULL_SOFT_R(x) FIELD_PREP(ADAU7118_FULL_SOFT_R_MASK, x)
+
+struct adau7118_data {
+ struct regmap *map;
+ struct device *dev;
+ struct regulator *iovdd;
+ struct regulator *dvdd;
+ u32 slot_width;
+ u32 slots;
+ bool hw_mode;
+ bool right_j;
+};
+
+/* Input Enable */
+static const struct snd_kcontrol_new adau7118_dapm_pdm_control[4] = {
+ SOC_DAPM_SINGLE("Capture Switch", ADAU7118_REG_ENABLES, 0, 1, 0),
+ SOC_DAPM_SINGLE("Capture Switch", ADAU7118_REG_ENABLES, 1, 1, 0),
+ SOC_DAPM_SINGLE("Capture Switch", ADAU7118_REG_ENABLES, 2, 1, 0),
+ SOC_DAPM_SINGLE("Capture Switch", ADAU7118_REG_ENABLES, 3, 1, 0),
+};
+
+static const struct snd_soc_dapm_widget adau7118_widgets_sw[] = {
+ /* Input Enable Switches */
+ SND_SOC_DAPM_SWITCH("PDM0", SND_SOC_NOPM, 0, 0,
+ &adau7118_dapm_pdm_control[0]),
+ SND_SOC_DAPM_SWITCH("PDM1", SND_SOC_NOPM, 0, 0,
+ &adau7118_dapm_pdm_control[1]),
+ SND_SOC_DAPM_SWITCH("PDM2", SND_SOC_NOPM, 0, 0,
+ &adau7118_dapm_pdm_control[2]),
+ SND_SOC_DAPM_SWITCH("PDM3", SND_SOC_NOPM, 0, 0,
+ &adau7118_dapm_pdm_control[3]),
+
+ /* PDM Clocks */
+ SND_SOC_DAPM_SUPPLY("PDM_CLK0", ADAU7118_REG_ENABLES, 4, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("PDM_CLK1", ADAU7118_REG_ENABLES, 5, 0, NULL, 0),
+
+ /* Output channels */
+ SND_SOC_DAPM_AIF_OUT("AIF1TX1", "Capture", 0, ADAU7118_REG_SPT_CX(0),
+ 0, 0),
+ SND_SOC_DAPM_AIF_OUT("AIF1TX2", "Capture", 0, ADAU7118_REG_SPT_CX(1),
+ 0, 0),
+ SND_SOC_DAPM_AIF_OUT("AIF1TX3", "Capture", 0, ADAU7118_REG_SPT_CX(2),
+ 0, 0),
+ SND_SOC_DAPM_AIF_OUT("AIF1TX4", "Capture", 0, ADAU7118_REG_SPT_CX(3),
+ 0, 0),
+ SND_SOC_DAPM_AIF_OUT("AIF1TX5", "Capture", 0, ADAU7118_REG_SPT_CX(4),
+ 0, 0),
+ SND_SOC_DAPM_AIF_OUT("AIF1TX6", "Capture", 0, ADAU7118_REG_SPT_CX(5),
+ 0, 0),
+ SND_SOC_DAPM_AIF_OUT("AIF1TX7", "Capture", 0, ADAU7118_REG_SPT_CX(6),
+ 0, 0),
+ SND_SOC_DAPM_AIF_OUT("AIF1TX8", "Capture", 0, ADAU7118_REG_SPT_CX(7),
+ 0, 0),
+};
+
+static const struct snd_soc_dapm_route adau7118_routes_sw[] = {
+ { "PDM0", "Capture Switch", "PDM_DAT0" },
+ { "PDM1", "Capture Switch", "PDM_DAT1" },
+ { "PDM2", "Capture Switch", "PDM_DAT2" },
+ { "PDM3", "Capture Switch", "PDM_DAT3" },
+ { "AIF1TX1", NULL, "PDM0" },
+ { "AIF1TX2", NULL, "PDM0" },
+ { "AIF1TX3", NULL, "PDM1" },
+ { "AIF1TX4", NULL, "PDM1" },
+ { "AIF1TX5", NULL, "PDM2" },
+ { "AIF1TX6", NULL, "PDM2" },
+ { "AIF1TX7", NULL, "PDM3" },
+ { "AIF1TX8", NULL, "PDM3" },
+ { "Capture", NULL, "PDM_CLK0" },
+ { "Capture", NULL, "PDM_CLK1" },
+};
+
+static const struct snd_soc_dapm_widget adau7118_widgets_hw[] = {
+ SND_SOC_DAPM_AIF_OUT("AIF1TX", "Capture", 0, SND_SOC_NOPM, 0, 0),
+};
+
+static const struct snd_soc_dapm_route adau7118_routes_hw[] = {
+ { "AIF1TX", NULL, "PDM_DAT0" },
+ { "AIF1TX", NULL, "PDM_DAT1" },
+ { "AIF1TX", NULL, "PDM_DAT2" },
+ { "AIF1TX", NULL, "PDM_DAT3" },
+};
+
+static const struct snd_soc_dapm_widget adau7118_widgets[] = {
+ SND_SOC_DAPM_INPUT("PDM_DAT0"),
+ SND_SOC_DAPM_INPUT("PDM_DAT1"),
+ SND_SOC_DAPM_INPUT("PDM_DAT2"),
+ SND_SOC_DAPM_INPUT("PDM_DAT3"),
+};
+
+static int adau7118_set_channel_map(struct snd_soc_dai *dai,
+ unsigned int tx_num, unsigned int *tx_slot,
+ unsigned int rx_num, unsigned int *rx_slot)
+{
+ struct adau7118_data *st =
+ snd_soc_component_get_drvdata(dai->component);
+ int chan, ret;
+
+ dev_dbg(st->dev, "Set channel map, %d", tx_num);
+
+ for (chan = 0; chan < tx_num; chan++) {
+ ret = snd_soc_component_update_bits(dai->component,
+ ADAU7118_REG_SPT_CX(chan),
+ ADAU7118_SPT_SLOT_MASK,
+ ADAU7118_SPT_SLOT(tx_slot[chan]));
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int adau7118_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+{
+ struct adau7118_data *st =
+ snd_soc_component_get_drvdata(dai->component);
+ int ret = 0;
+ u32 regval;
+
+ dev_dbg(st->dev, "Set format, fmt:%d\n", fmt);
+
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_I2S:
+ ret = snd_soc_component_update_bits(dai->component,
+ ADAU7118_REG_SPT_CTRL1,
+ ADAU7118_DATA_FMT_MASK,
+ ADAU7118_DATA_FMT(0));
+ break;
+ case SND_SOC_DAIFMT_LEFT_J:
+ ret = snd_soc_component_update_bits(dai->component,
+ ADAU7118_REG_SPT_CTRL1,
+ ADAU7118_DATA_FMT_MASK,
+ ADAU7118_DATA_FMT(1));
+ break;
+ case SND_SOC_DAIFMT_RIGHT_J:
+ st->right_j = true;
+ break;
+ default:
+ dev_err(st->dev, "Invalid format %d",
+ fmt & SND_SOC_DAIFMT_FORMAT_MASK);
+ return -EINVAL;
+ }
+
+ if (ret < 0)
+ return ret;
+
+ switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+ case SND_SOC_DAIFMT_NB_NF:
+ regval = ADAU7118_LRCLK_BCLK_POL(0);
+ break;
+ case SND_SOC_DAIFMT_NB_IF:
+ regval = ADAU7118_LRCLK_BCLK_POL(2);
+ break;
+ case SND_SOC_DAIFMT_IB_NF:
+ regval = ADAU7118_LRCLK_BCLK_POL(1);
+ break;
+ case SND_SOC_DAIFMT_IB_IF:
+ regval = ADAU7118_LRCLK_BCLK_POL(3);
+ break;
+ default:
+ dev_err(st->dev, "Invalid Inv mask %d",
+ fmt & SND_SOC_DAIFMT_INV_MASK);
+ return -EINVAL;
+ }
+
+ ret = snd_soc_component_update_bits(dai->component,
+ ADAU7118_REG_SPT_CTRL2,
+ ADAU7118_LRCLK_BCLK_POL_MASK,
+ regval);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int adau7118_set_tristate(struct snd_soc_dai *dai, int tristate)
+{
+ struct adau7118_data *st =
+ snd_soc_component_get_drvdata(dai->component);
+ int ret;
+
+ dev_dbg(st->dev, "Set tristate, %d\n", tristate);
+
+ ret = snd_soc_component_update_bits(dai->component,
+ ADAU7118_REG_SPT_CTRL1,
+ ADAU7118_TRISTATE_MASK,
+ ADAU7118_TRISTATE(tristate));
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int adau7118_set_tdm_slot(struct snd_soc_dai *dai, unsigned int tx_mask,
+ unsigned int rx_mask, int slots,
+ int slot_width)
+{
+ struct adau7118_data *st =
+ snd_soc_component_get_drvdata(dai->component);
+ int ret = 0;
+ u32 regval;
+
+ dev_dbg(st->dev, "Set tdm, slots:%d width:%d\n", slots, slot_width);
+
+ switch (slot_width) {
+ case 32:
+ regval = ADAU7118_SLOT_WIDTH(0);
+ break;
+ case 24:
+ regval = ADAU7118_SLOT_WIDTH(2);
+ break;
+ case 16:
+ regval = ADAU7118_SLOT_WIDTH(1);
+ break;
+ default:
+ dev_err(st->dev, "Invalid slot width:%d\n", slot_width);
+ return -EINVAL;
+ }
+
+ ret = snd_soc_component_update_bits(dai->component,
+ ADAU7118_REG_SPT_CTRL1,
+ ADAU7118_SLOT_WIDTH_MASK, regval);
+ if (ret < 0)
+ return ret;
+
+ st->slot_width = slot_width;
+ st->slots = slots;
+
+ return 0;
+}
+
+static int adau7118_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct adau7118_data *st =
+ snd_soc_component_get_drvdata(dai->component);
+ u32 data_width = params_width(params), slots_width;
+ int ret;
+ u32 regval;
+
+ if (!st->slots) {
+ /* set stereo mode */
+ ret = snd_soc_component_update_bits(dai->component,
+ ADAU7118_REG_SPT_CTRL1,
+ ADAU7118_SAI_MODE_MASK,
+ ADAU7118_SAI_MODE(0));
+ if (ret < 0)
+ return ret;
+
+ slots_width = 32;
+ } else {
+ slots_width = st->slot_width;
+ }
+
+ if (data_width > slots_width) {
+ dev_err(st->dev, "Invalid data_width:%d, slots_width:%d",
+ data_width, slots_width);
+ return -EINVAL;
+ }
+
+ if (st->right_j) {
+ switch (slots_width - data_width) {
+ case 8:
+ /* delay bclck by 8 */
+ regval = ADAU7118_DATA_FMT(2);
+ break;
+ case 12:
+ /* delay bclck by 12 */
+ regval = ADAU7118_DATA_FMT(3);
+ break;
+ case 16:
+ /* delay bclck by 16 */
+ regval = ADAU7118_DATA_FMT(4);
+ break;
+ default:
+ dev_err(st->dev,
+ "Cannot set right_j setting, slot_w:%d, data_w:%d\n",
+ slots_width, data_width);
+ return -EINVAL;
+ }
+
+ ret = snd_soc_component_update_bits(dai->component,
+ ADAU7118_REG_SPT_CTRL1,
+ ADAU7118_DATA_FMT_MASK,
+ regval);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int adau7118_set_bias_level(struct snd_soc_component *component,
+ enum snd_soc_bias_level level)
+{
+ struct adau7118_data *st = snd_soc_component_get_drvdata(component);
+ int ret = 0;
+
+ dev_dbg(st->dev, "Set bias level %d\n", level);
+
+ switch (level) {
+ case SND_SOC_BIAS_ON:
+ case SND_SOC_BIAS_PREPARE:
+ break;
+
+ case SND_SOC_BIAS_STANDBY:
+ if (snd_soc_component_get_bias_level(component) ==
+ SND_SOC_BIAS_OFF) {
+ /* power on */
+ ret = regulator_enable(st->iovdd);
+ if (ret)
+ return ret;
+
+ /* there's no timing constraints before enabling dvdd */
+ ret = regulator_enable(st->dvdd);
+ if (ret) {
+ regulator_disable(st->iovdd);
+ return ret;
+ }
+
+ if (st->hw_mode)
+ return 0;
+
+ regcache_cache_only(st->map, false);
+ /* sync cache */
+ ret = snd_soc_component_cache_sync(component);
+ }
+ break;
+ case SND_SOC_BIAS_OFF:
+ /* power off */
+ ret = regulator_disable(st->dvdd);
+ if (ret)
+ return ret;
+
+ ret = regulator_disable(st->iovdd);
+ if (ret)
+ return ret;
+
+ if (st->hw_mode)
+ return 0;
+
+ /* cache only */
+ regcache_mark_dirty(st->map);
+ regcache_cache_only(st->map, true);
+
+ break;
+ }
+
+ return ret;
+}
+
+static int adau7118_component_probe(struct snd_soc_component *component)
+{
+ struct adau7118_data *st = snd_soc_component_get_drvdata(component);
+ struct snd_soc_dapm_context *dapm =
+ snd_soc_component_get_dapm(component);
+ int ret = 0;
+
+ if (st->hw_mode) {
+ ret = snd_soc_dapm_new_controls(dapm, adau7118_widgets_hw,
+ ARRAY_SIZE(adau7118_widgets_hw));
+ if (ret)
+ return ret;
+
+ ret = snd_soc_dapm_add_routes(dapm, adau7118_routes_hw,
+ ARRAY_SIZE(adau7118_routes_hw));
+ } else {
+ snd_soc_component_init_regmap(component, st->map);
+ ret = snd_soc_dapm_new_controls(dapm, adau7118_widgets_sw,
+ ARRAY_SIZE(adau7118_widgets_sw));
+ if (ret)
+ return ret;
+
+ ret = snd_soc_dapm_add_routes(dapm, adau7118_routes_sw,
+ ARRAY_SIZE(adau7118_routes_sw));
+ }
+
+ return ret;
+}
+
+static const struct snd_soc_dai_ops adau7118_ops = {
+ .hw_params = adau7118_hw_params,
+ .set_channel_map = adau7118_set_channel_map,
+ .set_fmt = adau7118_set_fmt,
+ .set_tdm_slot = adau7118_set_tdm_slot,
+ .set_tristate = adau7118_set_tristate,
+};
+
+static struct snd_soc_dai_driver adau7118_dai = {
+ .name = "adau7118-hifi-capture",
+ .capture = {
+ .stream_name = "Capture",
+ .channels_min = 1,
+ .channels_max = 8,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |
+ SNDRV_PCM_FMTBIT_S20_LE | SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S24_3LE,
+ .rates = SNDRV_PCM_RATE_CONTINUOUS,
+ .rate_min = 4000,
+ .rate_max = 192000,
+ .sig_bits = 24,
+ },
+};
+
+static const struct snd_soc_component_driver adau7118_component_driver = {
+ .probe = adau7118_component_probe,
+ .set_bias_level = adau7118_set_bias_level,
+ .dapm_widgets = adau7118_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(adau7118_widgets),
+ .use_pmdown_time = 1,
+ .endianness = 1,
+ .non_legacy_dai_naming = 1,
+};
+
+static void adau7118_regulator_disable(void *data)
+{
+ struct adau7118_data *st = data;
+ int ret;
+ /*
+ * If we fail to disable DVDD, don't bother in trying IOVDD. We
+ * actually don't want to be left in the situation where DVDD
+ * is enabled and IOVDD is disabled.
+ */
+ ret = regulator_disable(st->dvdd);
+ if (ret)
+ return;
+
+ regulator_disable(st->iovdd);
+}
+
+static int adau7118_regulator_setup(struct adau7118_data *st)
+{
+ st->iovdd = devm_regulator_get(st->dev, "iovdd");
+ if (IS_ERR(st->iovdd)) {
+ dev_err(st->dev, "Could not get iovdd: %ld\n",
+ PTR_ERR(st->iovdd));
+ return PTR_ERR(st->iovdd);
+ }
+
+ st->dvdd = devm_regulator_get(st->dev, "dvdd");
+ if (IS_ERR(st->dvdd)) {
+ dev_err(st->dev, "Could not get dvdd: %ld\n",
+ PTR_ERR(st->dvdd));
+ return PTR_ERR(st->dvdd);
+ }
+ /* just assume the device is in reset */
+ if (!st->hw_mode) {
+ regcache_mark_dirty(st->map);
+ regcache_cache_only(st->map, true);
+ }
+
+ return devm_add_action_or_reset(st->dev, adau7118_regulator_disable,
+ st);
+}
+
+static int adau7118_parset_dt(const struct adau7118_data *st)
+{
+ int ret;
+ u32 dec_ratio = 0;
+ /* 4 inputs */
+ u32 clk_map[4], regval;
+
+ if (st->hw_mode)
+ return 0;
+
+ ret = device_property_read_u32(st->dev, "adi,decimation-ratio",
+ &dec_ratio);
+ if (!ret) {
+ switch (dec_ratio) {
+ case 64:
+ regval = ADAU7118_DEC_RATIO(0);
+ break;
+ case 32:
+ regval = ADAU7118_DEC_RATIO(1);
+ break;
+ case 16:
+ regval = ADAU7118_DEC_RATIO(2);
+ break;
+ default:
+ dev_err(st->dev, "Invalid dec ratio: %u", dec_ratio);
+ return -EINVAL;
+ }
+
+ ret = regmap_update_bits(st->map,
+ ADAU7118_REG_DEC_RATIO_CLK_MAP,
+ ADAU7118_DEC_RATIO_MASK, regval);
+ if (ret)
+ return ret;
+ }
+
+ ret = device_property_read_u32_array(st->dev, "adi,pdm-clk-map",
+ clk_map, ARRAY_SIZE(clk_map));
+ if (!ret) {
+ int pdm;
+ u32 _clk_map = 0;
+
+ for (pdm = 0; pdm < ARRAY_SIZE(clk_map); pdm++)
+ _clk_map |= (clk_map[pdm] << (pdm + 4));
+
+ ret = regmap_update_bits(st->map,
+ ADAU7118_REG_DEC_RATIO_CLK_MAP,
+ ADAU7118_CLK_MAP_MASK, _clk_map);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+int adau7118_probe(struct device *dev, struct regmap *map, bool hw_mode)
+{
+ struct adau7118_data *st;
+ int ret;
+
+ st = devm_kzalloc(dev, sizeof(*st), GFP_KERNEL);
+ if (!st)
+ return -ENOMEM;
+
+ st->dev = dev;
+ st->hw_mode = hw_mode;
+ dev_set_drvdata(dev, st);
+
+ if (!hw_mode) {
+ st->map = map;
+ adau7118_dai.ops = &adau7118_ops;
+ /*
+ * Perform a full soft reset. This will set all register's
+ * with their reset values.
+ */
+ ret = regmap_update_bits(map, ADAU7118_REG_RESET,
+ ADAU7118_FULL_SOFT_R_MASK,
+ ADAU7118_FULL_SOFT_R(1));
+ if (ret)
+ return ret;
+ }
+
+ ret = adau7118_parset_dt(st);
+ if (ret)
+ return ret;
+
+ ret = adau7118_regulator_setup(st);
+ if (ret)
+ return ret;
+
+ return devm_snd_soc_register_component(dev,
+ &adau7118_component_driver,
+ &adau7118_dai, 1);
+}
+EXPORT_SYMBOL_GPL(adau7118_probe);
+
+MODULE_AUTHOR("Nuno Sa <nuno.sa@analog.com>");
+MODULE_DESCRIPTION("ADAU7118 8 channel PDM-to-I2S/TDM Converter driver");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/adau7118.h b/sound/soc/codecs/adau7118.h
new file mode 100644
index 000000000000..c65679a4dff1
--- /dev/null
+++ b/sound/soc/codecs/adau7118.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_ADAU7118_H
+#define _LINUX_ADAU7118_H
+
+struct regmap;
+struct device;
+
+/* register map */
+#define ADAU7118_REG_VENDOR_ID 0x00
+#define ADAU7118_REG_DEVICE_ID1 0x01
+#define ADAU7118_REG_DEVICE_ID2 0x02
+#define ADAU7118_REG_REVISION_ID 0x03
+#define ADAU7118_REG_ENABLES 0x04
+#define ADAU7118_REG_DEC_RATIO_CLK_MAP 0x05
+#define ADAU7118_REG_HPF_CONTROL 0x06
+#define ADAU7118_REG_SPT_CTRL1 0x07
+#define ADAU7118_REG_SPT_CTRL2 0x08
+#define ADAU7118_REG_SPT_CX(num) (0x09 + (num))
+#define ADAU7118_REG_DRIVE_STRENGTH 0x11
+#define ADAU7118_REG_RESET 0x12
+
+int adau7118_probe(struct device *dev, struct regmap *map, bool hw_mode);
+
+#endif
diff --git a/sound/soc/codecs/cros_ec_codec.c b/sound/soc/codecs/cros_ec_codec.c
index 3c1bd24a1057..7b17f39a6a10 100644
--- a/sound/soc/codecs/cros_ec_codec.c
+++ b/sound/soc/codecs/cros_ec_codec.c
@@ -1,15 +1,23 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Driver for ChromeOS Embedded Controller codec.
+ * Copyright 2019 Google, Inc.
+ *
+ * ChromeOS Embedded Controller codec driver.
*
* This driver uses the cros-ec interface to communicate with the ChromeOS
* EC for audio function.
*/
+#include <crypto/hash.h>
+#include <crypto/sha.h>
#include <linux/delay.h>
#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
#include <linux/platform_data/cros_ec_commands.h>
#include <linux/platform_data/cros_ec_proto.h>
#include <linux/platform_device.h>
@@ -18,92 +26,279 @@
#include <sound/soc.h>
#include <sound/tlv.h>
-#define DRV_NAME "cros-ec-codec"
-
-/**
- * struct cros_ec_codec_data - ChromeOS EC codec driver data.
- * @dev: Device structure used in sysfs.
- * @ec_device: cros_ec_device structure to talk to the physical device.
- * @component: Pointer to the component.
- * @max_dmic_gain: Maximum gain in dB supported by EC codec.
- */
-struct cros_ec_codec_data {
+struct cros_ec_codec_priv {
struct device *dev;
struct cros_ec_device *ec_device;
- struct snd_soc_component *component;
- unsigned int max_dmic_gain;
+
+ /* common */
+ uint32_t ec_capabilities;
+
+ uint64_t ec_shm_addr;
+ uint32_t ec_shm_len;
+
+ uint64_t ap_shm_phys_addr;
+ uint32_t ap_shm_len;
+ uint64_t ap_shm_addr;
+ uint64_t ap_shm_last_alloc;
+
+ /* DMIC */
+ atomic_t dmic_probed;
+
+ /* WoV */
+ bool wov_enabled;
+ uint8_t *wov_audio_shm_p;
+ uint32_t wov_audio_shm_len;
+ uint8_t wov_audio_shm_type;
+ uint8_t *wov_lang_shm_p;
+ uint32_t wov_lang_shm_len;
+ uint8_t wov_lang_shm_type;
+
+ struct mutex wov_dma_lock;
+ uint8_t wov_buf[64000];
+ uint32_t wov_rp, wov_wp;
+ size_t wov_dma_offset;
+ bool wov_burst_read;
+ struct snd_pcm_substream *wov_substream;
+ struct delayed_work wov_copy_work;
+ struct notifier_block wov_notifier;
};
-static const DECLARE_TLV_DB_SCALE(ec_mic_gain_tlv, 0, 100, 0);
+static int ec_codec_capable(struct cros_ec_codec_priv *priv, uint8_t cap)
+{
+ return priv->ec_capabilities & BIT(cap);
+}
-static int ec_command_get_gain(struct snd_soc_component *component,
- struct ec_param_codec_i2s *param,
- struct ec_codec_i2s_gain *resp)
+static int send_ec_host_command(struct cros_ec_device *ec_dev, uint32_t cmd,
+ uint8_t *out, size_t outsize,
+ uint8_t *in, size_t insize)
{
- struct cros_ec_codec_data *codec_data =
- snd_soc_component_get_drvdata(component);
- struct cros_ec_device *ec_device = codec_data->ec_device;
- u8 buffer[sizeof(struct cros_ec_command) +
- max(sizeof(struct ec_param_codec_i2s),
- sizeof(struct ec_codec_i2s_gain))];
- struct cros_ec_command *msg = (struct cros_ec_command *)&buffer;
int ret;
+ struct cros_ec_command *msg;
+
+ msg = kmalloc(sizeof(*msg) + max(outsize, insize), GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
msg->version = 0;
- msg->command = EC_CMD_CODEC_I2S;
- msg->outsize = sizeof(struct ec_param_codec_i2s);
- msg->insize = sizeof(struct ec_codec_i2s_gain);
+ msg->command = cmd;
+ msg->outsize = outsize;
+ msg->insize = insize;
- memcpy(msg->data, param, msg->outsize);
+ if (outsize)
+ memcpy(msg->data, out, outsize);
- ret = cros_ec_cmd_xfer_status(ec_device, msg);
- if (ret > 0)
- memcpy(resp, msg->data, msg->insize);
+ ret = cros_ec_cmd_xfer_status(ec_dev, msg);
+ if (ret < 0)
+ goto error;
+
+ if (insize)
+ memcpy(in, msg->data, insize);
+ ret = 0;
+error:
+ kfree(msg);
return ret;
}
-/*
- * Wrapper for EC command without response.
- */
-static int ec_command_no_resp(struct snd_soc_component *component,
- struct ec_param_codec_i2s *param)
+static int calculate_sha256(struct cros_ec_codec_priv *priv,
+ uint8_t *buf, uint32_t size, uint8_t *digest)
{
- struct cros_ec_codec_data *codec_data =
+ struct crypto_shash *tfm;
+
+ tfm = crypto_alloc_shash("sha256", CRYPTO_ALG_TYPE_SHASH, 0);
+ if (IS_ERR(tfm)) {
+ dev_err(priv->dev, "can't alloc shash\n");
+ return PTR_ERR(tfm);
+ }
+
+ {
+ SHASH_DESC_ON_STACK(desc, tfm);
+
+ desc->tfm = tfm;
+
+ crypto_shash_digest(desc, buf, size, digest);
+ shash_desc_zero(desc);
+ }
+
+ crypto_free_shash(tfm);
+
+#ifdef DEBUG
+ {
+ char digest_str[65];
+
+ bin2hex(digest_str, digest, 32);
+ digest_str[64] = 0;
+ dev_dbg(priv->dev, "hash=%s\n", digest_str);
+ }
+#endif
+
+ return 0;
+}
+
+static int dmic_get_gain(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *component =
+ snd_soc_kcontrol_component(kcontrol);
+ struct cros_ec_codec_priv *priv =
snd_soc_component_get_drvdata(component);
- struct cros_ec_device *ec_device = codec_data->ec_device;
- u8 buffer[sizeof(struct cros_ec_command) +
- sizeof(struct ec_param_codec_i2s)];
- struct cros_ec_command *msg = (struct cros_ec_command *)&buffer;
+ struct ec_param_ec_codec_dmic p;
+ struct ec_response_ec_codec_dmic_get_gain_idx r;
+ int ret;
- msg->version = 0;
- msg->command = EC_CMD_CODEC_I2S;
- msg->outsize = sizeof(struct ec_param_codec_i2s);
- msg->insize = 0;
+ p.cmd = EC_CODEC_DMIC_GET_GAIN_IDX;
+ p.get_gain_idx_param.channel = EC_CODEC_DMIC_CHANNEL_0;
+ ret = send_ec_host_command(priv->ec_device, EC_CMD_EC_CODEC_DMIC,
+ (uint8_t *)&p, sizeof(p),
+ (uint8_t *)&r, sizeof(r));
+ if (ret < 0)
+ return ret;
+ ucontrol->value.integer.value[0] = r.gain;
- memcpy(msg->data, param, msg->outsize);
+ p.cmd = EC_CODEC_DMIC_GET_GAIN_IDX;
+ p.get_gain_idx_param.channel = EC_CODEC_DMIC_CHANNEL_1;
+ ret = send_ec_host_command(priv->ec_device, EC_CMD_EC_CODEC_DMIC,
+ (uint8_t *)&p, sizeof(p),
+ (uint8_t *)&r, sizeof(r));
+ if (ret < 0)
+ return ret;
+ ucontrol->value.integer.value[1] = r.gain;
- return cros_ec_cmd_xfer_status(ec_device, msg);
+ return 0;
}
-static int set_i2s_config(struct snd_soc_component *component,
- enum ec_i2s_config i2s_config)
+static int dmic_put_gain(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
{
- struct ec_param_codec_i2s param;
+ struct snd_soc_component *component =
+ snd_soc_kcontrol_component(kcontrol);
+ struct cros_ec_codec_priv *priv =
+ snd_soc_component_get_drvdata(component);
+ struct soc_mixer_control *control =
+ (struct soc_mixer_control *)kcontrol->private_value;
+ int max_dmic_gain = control->max;
+ int left = ucontrol->value.integer.value[0];
+ int right = ucontrol->value.integer.value[1];
+ struct ec_param_ec_codec_dmic p;
+ int ret;
+
+ if (left > max_dmic_gain || right > max_dmic_gain)
+ return -EINVAL;
- dev_dbg(component->dev, "%s set I2S format to %u\n", __func__,
- i2s_config);
+ dev_dbg(component->dev, "set mic gain to %u, %u\n", left, right);
- param.cmd = EC_CODEC_I2S_SET_CONFIG;
- param.i2s_config = i2s_config;
+ p.cmd = EC_CODEC_DMIC_SET_GAIN_IDX;
+ p.set_gain_idx_param.channel = EC_CODEC_DMIC_CHANNEL_0;
+ p.set_gain_idx_param.gain = left;
+ ret = send_ec_host_command(priv->ec_device, EC_CMD_EC_CODEC_DMIC,
+ (uint8_t *)&p, sizeof(p), NULL, 0);
+ if (ret < 0)
+ return ret;
+
+ p.cmd = EC_CODEC_DMIC_SET_GAIN_IDX;
+ p.set_gain_idx_param.channel = EC_CODEC_DMIC_CHANNEL_1;
+ p.set_gain_idx_param.gain = right;
+ return send_ec_host_command(priv->ec_device, EC_CMD_EC_CODEC_DMIC,
+ (uint8_t *)&p, sizeof(p), NULL, 0);
+}
+
+static const DECLARE_TLV_DB_SCALE(dmic_gain_tlv, 0, 100, 0);
+
+enum {
+ DMIC_CTL_GAIN = 0,
+};
+
+static struct snd_kcontrol_new dmic_controls[] = {
+ [DMIC_CTL_GAIN] =
+ SOC_DOUBLE_EXT_TLV("EC Mic Gain", SND_SOC_NOPM, SND_SOC_NOPM,
+ 0, 0, 0, dmic_get_gain, dmic_put_gain,
+ dmic_gain_tlv),
+};
+
+static int dmic_probe(struct snd_soc_component *component)
+{
+ struct cros_ec_codec_priv *priv =
+ snd_soc_component_get_drvdata(component);
+ struct device *dev = priv->dev;
+ struct soc_mixer_control *control;
+ struct ec_param_ec_codec_dmic p;
+ struct ec_response_ec_codec_dmic_get_max_gain r;
+ int ret;
+
+ if (!atomic_add_unless(&priv->dmic_probed, 1, 1))
+ return 0;
+
+ p.cmd = EC_CODEC_DMIC_GET_MAX_GAIN;
+
+ ret = send_ec_host_command(priv->ec_device, EC_CMD_EC_CODEC_DMIC,
+ (uint8_t *)&p, sizeof(p),
+ (uint8_t *)&r, sizeof(r));
+ if (ret < 0) {
+ dev_warn(dev, "get_max_gain() unsupported\n");
+ return 0;
+ }
+
+ dev_dbg(dev, "max gain = %d\n", r.max_gain);
+
+ control = (struct soc_mixer_control *)
+ dmic_controls[DMIC_CTL_GAIN].private_value;
+ control->max = r.max_gain;
+ control->platform_max = r.max_gain;
- return ec_command_no_resp(component, &param);
+ return snd_soc_add_component_controls(component,
+ &dmic_controls[DMIC_CTL_GAIN], 1);
}
-static int cros_ec_i2s_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+static int i2s_rx_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
{
struct snd_soc_component *component = dai->component;
- enum ec_i2s_config i2s_config;
+ struct cros_ec_codec_priv *priv =
+ snd_soc_component_get_drvdata(component);
+ struct ec_param_ec_codec_i2s_rx p;
+ enum ec_codec_i2s_rx_sample_depth depth;
+ int ret;
+
+ if (params_rate(params) != 48000)
+ return -EINVAL;
+
+ switch (params_format(params)) {
+ case SNDRV_PCM_FORMAT_S16_LE:
+ depth = EC_CODEC_I2S_RX_SAMPLE_DEPTH_16;
+ break;
+ case SNDRV_PCM_FORMAT_S24_LE:
+ depth = EC_CODEC_I2S_RX_SAMPLE_DEPTH_24;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ dev_dbg(component->dev, "set depth to %u\n", depth);
+
+ p.cmd = EC_CODEC_I2S_RX_SET_SAMPLE_DEPTH;
+ p.set_sample_depth_param.depth = depth;
+ ret = send_ec_host_command(priv->ec_device, EC_CMD_EC_CODEC_I2S_RX,
+ (uint8_t *)&p, sizeof(p), NULL, 0);
+ if (ret < 0)
+ return ret;
+
+ dev_dbg(component->dev, "set bclk to %u\n",
+ snd_soc_params_to_bclk(params));
+
+ p.cmd = EC_CODEC_I2S_RX_SET_BCLK;
+ p.set_bclk_param.bclk = snd_soc_params_to_bclk(params);
+ return send_ec_host_command(priv->ec_device, EC_CMD_EC_CODEC_I2S_RX,
+ (uint8_t *)&p, sizeof(p), NULL, 0);
+}
+
+static int i2s_rx_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+{
+ struct snd_soc_component *component = dai->component;
+ struct cros_ec_codec_priv *priv =
+ snd_soc_component_get_drvdata(component);
+ struct ec_param_ec_codec_i2s_rx p;
+ enum ec_codec_i2s_rx_daifmt daifmt;
switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
case SND_SOC_DAIFMT_CBS_CFS:
@@ -121,300 +316,727 @@ static int cros_ec_i2s_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_I2S:
- i2s_config = EC_DAI_FMT_I2S;
+ daifmt = EC_CODEC_I2S_RX_DAIFMT_I2S;
break;
-
case SND_SOC_DAIFMT_RIGHT_J:
- i2s_config = EC_DAI_FMT_RIGHT_J;
+ daifmt = EC_CODEC_I2S_RX_DAIFMT_RIGHT_J;
break;
-
case SND_SOC_DAIFMT_LEFT_J:
- i2s_config = EC_DAI_FMT_LEFT_J;
+ daifmt = EC_CODEC_I2S_RX_DAIFMT_LEFT_J;
break;
+ default:
+ return -EINVAL;
+ }
- case SND_SOC_DAIFMT_DSP_A:
- i2s_config = EC_DAI_FMT_PCM_A;
- break;
+ dev_dbg(component->dev, "set format to %u\n", daifmt);
- case SND_SOC_DAIFMT_DSP_B:
- i2s_config = EC_DAI_FMT_PCM_B;
- break;
+ p.cmd = EC_CODEC_I2S_RX_SET_DAIFMT;
+ p.set_daifmt_param.daifmt = daifmt;
+ return send_ec_host_command(priv->ec_device, EC_CMD_EC_CODEC_I2S_RX,
+ (uint8_t *)&p, sizeof(p), NULL, 0);
+}
+
+static const struct snd_soc_dai_ops i2s_rx_dai_ops = {
+ .hw_params = i2s_rx_hw_params,
+ .set_fmt = i2s_rx_set_fmt,
+};
+static int i2s_rx_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_component *component =
+ snd_soc_dapm_to_component(w->dapm);
+ struct cros_ec_codec_priv *priv =
+ snd_soc_component_get_drvdata(component);
+ struct ec_param_ec_codec_i2s_rx p;
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ dev_dbg(component->dev, "enable I2S RX\n");
+ p.cmd = EC_CODEC_I2S_RX_ENABLE;
+ break;
+ case SND_SOC_DAPM_PRE_PMD:
+ dev_dbg(component->dev, "disable I2S RX\n");
+ p.cmd = EC_CODEC_I2S_RX_DISABLE;
+ break;
default:
- return -EINVAL;
+ return 0;
}
- return set_i2s_config(component, i2s_config);
+ return send_ec_host_command(priv->ec_device, EC_CMD_EC_CODEC_I2S_RX,
+ (uint8_t *)&p, sizeof(p), NULL, 0);
+}
+
+static struct snd_soc_dapm_widget i2s_rx_dapm_widgets[] = {
+ SND_SOC_DAPM_INPUT("DMIC"),
+ SND_SOC_DAPM_SUPPLY("I2S RX Enable", SND_SOC_NOPM, 0, 0, i2s_rx_event,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_PRE_PMD),
+ SND_SOC_DAPM_AIF_OUT("I2S RX", "I2S Capture", 0, SND_SOC_NOPM, 0, 0),
+};
+
+static struct snd_soc_dapm_route i2s_rx_dapm_routes[] = {
+ {"I2S RX", NULL, "DMIC"},
+ {"I2S RX", NULL, "I2S RX Enable"},
+};
+
+static struct snd_soc_dai_driver i2s_rx_dai_driver = {
+ .name = "EC Codec I2S RX",
+ .capture = {
+ .stream_name = "I2S Capture",
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_48000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE,
+ },
+ .ops = &i2s_rx_dai_ops,
+};
+
+static int i2s_rx_probe(struct snd_soc_component *component)
+{
+ return dmic_probe(component);
}
-static int set_i2s_sample_depth(struct snd_soc_component *component,
- enum ec_sample_depth_value depth)
+static const struct snd_soc_component_driver i2s_rx_component_driver = {
+ .probe = i2s_rx_probe,
+ .dapm_widgets = i2s_rx_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(i2s_rx_dapm_widgets),
+ .dapm_routes = i2s_rx_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(i2s_rx_dapm_routes),
+};
+
+static void *wov_map_shm(struct cros_ec_codec_priv *priv,
+ uint8_t shm_id, uint32_t *len, uint8_t *type)
{
- struct ec_param_codec_i2s param;
+ struct ec_param_ec_codec p;
+ struct ec_response_ec_codec_get_shm_addr r;
+ uint32_t req, offset;
+
+ p.cmd = EC_CODEC_GET_SHM_ADDR;
+ p.get_shm_addr_param.shm_id = shm_id;
+ if (send_ec_host_command(priv->ec_device, EC_CMD_EC_CODEC,
+ (uint8_t *)&p, sizeof(p),
+ (uint8_t *)&r, sizeof(r)) < 0) {
+ dev_err(priv->dev, "failed to EC_CODEC_GET_SHM_ADDR\n");
+ return NULL;
+ }
- dev_dbg(component->dev, "%s set depth to %u\n", __func__, depth);
+ dev_dbg(priv->dev, "phys_addr=%#llx, len=%#x\n", r.phys_addr, r.len);
+
+ *len = r.len;
+ *type = r.type;
+
+ switch (r.type) {
+ case EC_CODEC_SHM_TYPE_EC_RAM:
+ return (void __force *)devm_ioremap_wc(priv->dev,
+ r.phys_addr + priv->ec_shm_addr, r.len);
+ case EC_CODEC_SHM_TYPE_SYSTEM_RAM:
+ if (r.phys_addr) {
+ dev_err(priv->dev, "unknown status\n");
+ return NULL;
+ }
+
+ req = round_up(r.len, PAGE_SIZE);
+ dev_dbg(priv->dev, "round up from %u to %u\n", r.len, req);
+
+ if (priv->ap_shm_last_alloc + req >
+ priv->ap_shm_phys_addr + priv->ap_shm_len) {
+ dev_err(priv->dev, "insufficient space for AP SHM\n");
+ return NULL;
+ }
+
+ dev_dbg(priv->dev, "alloc AP SHM addr=%#llx, len=%#x\n",
+ priv->ap_shm_last_alloc, req);
+
+ p.cmd = EC_CODEC_SET_SHM_ADDR;
+ p.set_shm_addr_param.phys_addr = priv->ap_shm_last_alloc;
+ p.set_shm_addr_param.len = req;
+ p.set_shm_addr_param.shm_id = shm_id;
+ if (send_ec_host_command(priv->ec_device, EC_CMD_EC_CODEC,
+ (uint8_t *)&p, sizeof(p),
+ NULL, 0) < 0) {
+ dev_err(priv->dev, "failed to EC_CODEC_SET_SHM_ADDR\n");
+ return NULL;
+ }
+
+ /*
+ * Note: EC codec only requests for `r.len' but we allocate
+ * round up PAGE_SIZE `req'.
+ */
+ offset = priv->ap_shm_last_alloc - priv->ap_shm_phys_addr;
+ priv->ap_shm_last_alloc += req;
+
+ return (void *)(uintptr_t)(priv->ap_shm_addr + offset);
+ default:
+ return NULL;
+ }
+}
- param.cmd = EC_CODEC_SET_SAMPLE_DEPTH;
- param.depth = depth;
+static bool wov_queue_full(struct cros_ec_codec_priv *priv)
+{
+ return ((priv->wov_wp + 1) % sizeof(priv->wov_buf)) == priv->wov_rp;
+}
- return ec_command_no_resp(component, &param);
+static size_t wov_queue_size(struct cros_ec_codec_priv *priv)
+{
+ if (priv->wov_wp >= priv->wov_rp)
+ return priv->wov_wp - priv->wov_rp;
+ else
+ return sizeof(priv->wov_buf) - priv->wov_rp + priv->wov_wp;
}
-static int set_i2s_bclk(struct snd_soc_component *component, uint32_t bclk)
+static void wov_queue_dequeue(struct cros_ec_codec_priv *priv, size_t len)
{
- struct ec_param_codec_i2s param;
+ struct snd_pcm_runtime *runtime = priv->wov_substream->runtime;
+ size_t req;
+
+ while (len) {
+ req = min(len, runtime->dma_bytes - priv->wov_dma_offset);
+ if (priv->wov_wp >= priv->wov_rp)
+ req = min(req, (size_t)priv->wov_wp - priv->wov_rp);
+ else
+ req = min(req, sizeof(priv->wov_buf) - priv->wov_rp);
- dev_dbg(component->dev, "%s set i2s bclk to %u\n", __func__, bclk);
+ memcpy(runtime->dma_area + priv->wov_dma_offset,
+ priv->wov_buf + priv->wov_rp, req);
- param.cmd = EC_CODEC_I2S_SET_BCLK;
- param.bclk = bclk;
+ priv->wov_dma_offset += req;
+ if (priv->wov_dma_offset == runtime->dma_bytes)
+ priv->wov_dma_offset = 0;
- return ec_command_no_resp(component, &param);
+ priv->wov_rp += req;
+ if (priv->wov_rp == sizeof(priv->wov_buf))
+ priv->wov_rp = 0;
+
+ len -= req;
+ }
+
+ snd_pcm_period_elapsed(priv->wov_substream);
}
-static int cros_ec_i2s_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *params,
- struct snd_soc_dai *dai)
+static void wov_queue_try_dequeue(struct cros_ec_codec_priv *priv)
{
- struct snd_soc_component *component = dai->component;
- unsigned int rate, bclk;
- int ret;
+ size_t period_bytes = snd_pcm_lib_period_bytes(priv->wov_substream);
- rate = params_rate(params);
- if (rate != 48000)
- return -EINVAL;
+ while (period_bytes && wov_queue_size(priv) >= period_bytes) {
+ wov_queue_dequeue(priv, period_bytes);
+ period_bytes = snd_pcm_lib_period_bytes(priv->wov_substream);
+ }
+}
- switch (params_format(params)) {
- case SNDRV_PCM_FORMAT_S16_LE:
- ret = set_i2s_sample_depth(component, EC_CODEC_SAMPLE_DEPTH_16);
- break;
- case SNDRV_PCM_FORMAT_S24_LE:
- ret = set_i2s_sample_depth(component, EC_CODEC_SAMPLE_DEPTH_24);
- break;
- default:
- return -EINVAL;
+static void wov_queue_enqueue(struct cros_ec_codec_priv *priv,
+ uint8_t *addr, size_t len, bool iomem)
+{
+ size_t req;
+
+ while (len) {
+ if (wov_queue_full(priv)) {
+ wov_queue_try_dequeue(priv);
+
+ if (wov_queue_full(priv)) {
+ dev_err(priv->dev, "overrun detected\n");
+ return;
+ }
+ }
+
+ if (priv->wov_wp >= priv->wov_rp)
+ req = sizeof(priv->wov_buf) - priv->wov_wp;
+ else
+ /* Note: waste 1-byte to differentiate full and empty */
+ req = priv->wov_rp - priv->wov_wp - 1;
+ req = min(req, len);
+
+ if (iomem)
+ memcpy_fromio(priv->wov_buf + priv->wov_wp,
+ (void __force __iomem *)addr, req);
+ else
+ memcpy(priv->wov_buf + priv->wov_wp, addr, req);
+
+ priv->wov_wp += req;
+ if (priv->wov_wp == sizeof(priv->wov_buf))
+ priv->wov_wp = 0;
+
+ addr += req;
+ len -= req;
}
- if (ret < 0)
+
+ wov_queue_try_dequeue(priv);
+}
+
+static int wov_read_audio_shm(struct cros_ec_codec_priv *priv)
+{
+ struct ec_param_ec_codec_wov p;
+ struct ec_response_ec_codec_wov_read_audio_shm r;
+ int ret;
+
+ p.cmd = EC_CODEC_WOV_READ_AUDIO_SHM;
+ ret = send_ec_host_command(priv->ec_device, EC_CMD_EC_CODEC_WOV,
+ (uint8_t *)&p, sizeof(p),
+ (uint8_t *)&r, sizeof(r));
+ if (ret) {
+ dev_err(priv->dev, "failed to EC_CODEC_WOV_READ_AUDIO_SHM\n");
return ret;
+ }
- bclk = snd_soc_params_to_bclk(params);
- return set_i2s_bclk(component, bclk);
+ if (!r.len)
+ dev_dbg(priv->dev, "no data, sleep\n");
+ else
+ wov_queue_enqueue(priv, priv->wov_audio_shm_p + r.offset, r.len,
+ priv->wov_audio_shm_type == EC_CODEC_SHM_TYPE_EC_RAM);
+ return -EAGAIN;
}
-static const struct snd_soc_dai_ops cros_ec_i2s_dai_ops = {
- .hw_params = cros_ec_i2s_hw_params,
- .set_fmt = cros_ec_i2s_set_dai_fmt,
-};
+static int wov_read_audio(struct cros_ec_codec_priv *priv)
+{
+ struct ec_param_ec_codec_wov p;
+ struct ec_response_ec_codec_wov_read_audio r;
+ int remain = priv->wov_burst_read ? 16000 : 320;
+ int ret;
-static struct snd_soc_dai_driver cros_ec_dai[] = {
- {
- .name = "cros_ec_codec I2S",
- .id = 0,
- .capture = {
- .stream_name = "I2S Capture",
- .channels_min = 2,
- .channels_max = 2,
- .rates = SNDRV_PCM_RATE_48000,
- .formats = SNDRV_PCM_FMTBIT_S16_LE |
- SNDRV_PCM_FMTBIT_S24_LE,
- },
- .ops = &cros_ec_i2s_dai_ops,
+ while (remain >= 0) {
+ p.cmd = EC_CODEC_WOV_READ_AUDIO;
+ ret = send_ec_host_command(priv->ec_device, EC_CMD_EC_CODEC_WOV,
+ (uint8_t *)&p, sizeof(p),
+ (uint8_t *)&r, sizeof(r));
+ if (ret) {
+ dev_err(priv->dev,
+ "failed to EC_CODEC_WOV_READ_AUDIO\n");
+ return ret;
+ }
+
+ if (!r.len) {
+ dev_dbg(priv->dev, "no data, sleep\n");
+ priv->wov_burst_read = false;
+ break;
+ }
+
+ wov_queue_enqueue(priv, r.buf, r.len, false);
+ remain -= r.len;
}
-};
-static int get_ec_mic_gain(struct snd_soc_component *component,
- u8 *left, u8 *right)
+ return -EAGAIN;
+}
+
+static void wov_copy_work(struct work_struct *w)
{
- struct ec_param_codec_i2s param;
- struct ec_codec_i2s_gain resp;
+ struct cros_ec_codec_priv *priv =
+ container_of(w, struct cros_ec_codec_priv, wov_copy_work.work);
int ret;
- param.cmd = EC_CODEC_GET_GAIN;
+ mutex_lock(&priv->wov_dma_lock);
+ if (!priv->wov_substream) {
+ dev_warn(priv->dev, "no pcm substream\n");
+ goto leave;
+ }
- ret = ec_command_get_gain(component, &param, &resp);
- if (ret < 0)
- return ret;
+ if (ec_codec_capable(priv, EC_CODEC_CAP_WOV_AUDIO_SHM))
+ ret = wov_read_audio_shm(priv);
+ else
+ ret = wov_read_audio(priv);
+
+ if (ret == -EAGAIN)
+ schedule_delayed_work(&priv->wov_copy_work,
+ msecs_to_jiffies(10));
+ else if (ret)
+ dev_err(priv->dev, "failed to read audio data\n");
+leave:
+ mutex_unlock(&priv->wov_dma_lock);
+}
- *left = resp.left;
- *right = resp.right;
+static int wov_enable_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *c = snd_soc_kcontrol_component(kcontrol);
+ struct cros_ec_codec_priv *priv = snd_soc_component_get_drvdata(c);
+ ucontrol->value.integer.value[0] = priv->wov_enabled;
return 0;
}
-static int mic_gain_get(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
+static int wov_enable_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
{
- struct snd_soc_component *component =
- snd_soc_kcontrol_component(kcontrol);
- u8 left, right;
+ struct snd_soc_component *c = snd_soc_kcontrol_component(kcontrol);
+ struct cros_ec_codec_priv *priv = snd_soc_component_get_drvdata(c);
+ int enabled = ucontrol->value.integer.value[0];
+ struct ec_param_ec_codec_wov p;
int ret;
- ret = get_ec_mic_gain(component, &left, &right);
- if (ret)
- return ret;
-
- ucontrol->value.integer.value[0] = left;
- ucontrol->value.integer.value[1] = right;
+ if (priv->wov_enabled != enabled) {
+ if (enabled)
+ p.cmd = EC_CODEC_WOV_ENABLE;
+ else
+ p.cmd = EC_CODEC_WOV_DISABLE;
+
+ ret = send_ec_host_command(priv->ec_device, EC_CMD_EC_CODEC_WOV,
+ (uint8_t *)&p, sizeof(p), NULL, 0);
+ if (ret) {
+ dev_err(priv->dev, "failed to %s wov\n",
+ enabled ? "enable" : "disable");
+ return ret;
+ }
+
+ priv->wov_enabled = enabled;
+ }
return 0;
}
-static int set_ec_mic_gain(struct snd_soc_component *component,
- u8 left, u8 right)
+static int wov_set_lang_shm(struct cros_ec_codec_priv *priv,
+ uint8_t *buf, size_t size, uint8_t *digest)
{
- struct ec_param_codec_i2s param;
+ struct ec_param_ec_codec_wov p;
+ struct ec_param_ec_codec_wov_set_lang_shm *pp = &p.set_lang_shm_param;
+ int ret;
- dev_dbg(component->dev, "%s set mic gain to %u, %u\n",
- __func__, left, right);
+ if (size > priv->wov_lang_shm_len) {
+ dev_err(priv->dev, "no enough SHM size: %d\n",
+ priv->wov_lang_shm_len);
+ return -EIO;
+ }
- param.cmd = EC_CODEC_SET_GAIN;
- param.gain.left = left;
- param.gain.right = right;
+ switch (priv->wov_lang_shm_type) {
+ case EC_CODEC_SHM_TYPE_EC_RAM:
+ memcpy_toio((void __force __iomem *)priv->wov_lang_shm_p,
+ buf, size);
+ memset_io((void __force __iomem *)priv->wov_lang_shm_p + size,
+ 0, priv->wov_lang_shm_len - size);
+ break;
+ case EC_CODEC_SHM_TYPE_SYSTEM_RAM:
+ memcpy(priv->wov_lang_shm_p, buf, size);
+ memset(priv->wov_lang_shm_p + size, 0,
+ priv->wov_lang_shm_len - size);
- return ec_command_no_resp(component, &param);
+ /* make sure write to memory before calling host command */
+ wmb();
+ break;
+ }
+
+ p.cmd = EC_CODEC_WOV_SET_LANG_SHM;
+ memcpy(pp->hash, digest, SHA256_DIGEST_SIZE);
+ pp->total_len = size;
+ ret = send_ec_host_command(priv->ec_device, EC_CMD_EC_CODEC_WOV,
+ (uint8_t *)&p, sizeof(p), NULL, 0);
+ if (ret) {
+ dev_err(priv->dev, "failed to EC_CODEC_WOV_SET_LANG_SHM\n");
+ return ret;
+ }
+
+ return 0;
}
-static int mic_gain_put(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
+static int wov_set_lang(struct cros_ec_codec_priv *priv,
+ uint8_t *buf, size_t size, uint8_t *digest)
{
- struct snd_soc_component *component =
- snd_soc_kcontrol_component(kcontrol);
- struct cros_ec_codec_data *codec_data =
- snd_soc_component_get_drvdata(component);
- int left = ucontrol->value.integer.value[0];
- int right = ucontrol->value.integer.value[1];
- unsigned int max_dmic_gain = codec_data->max_dmic_gain;
+ struct ec_param_ec_codec_wov p;
+ struct ec_param_ec_codec_wov_set_lang *pp = &p.set_lang_param;
+ size_t i, req;
+ int ret;
- if (left > max_dmic_gain || right > max_dmic_gain)
- return -EINVAL;
+ for (i = 0; i < size; i += req) {
+ req = min(size - i, ARRAY_SIZE(pp->buf));
+
+ p.cmd = EC_CODEC_WOV_SET_LANG;
+ memcpy(pp->hash, digest, SHA256_DIGEST_SIZE);
+ pp->total_len = size;
+ pp->offset = i;
+ memcpy(pp->buf, buf + i, req);
+ pp->len = req;
+ ret = send_ec_host_command(priv->ec_device, EC_CMD_EC_CODEC_WOV,
+ (uint8_t *)&p, sizeof(p), NULL, 0);
+ if (ret) {
+ dev_err(priv->dev, "failed to EC_CODEC_WOV_SET_LANG\n");
+ return ret;
+ }
+ }
- return set_ec_mic_gain(component, (u8)left, (u8)right);
+ return 0;
}
-static struct snd_kcontrol_new mic_gain_control =
- SOC_DOUBLE_EXT_TLV("EC Mic Gain", SND_SOC_NOPM, SND_SOC_NOPM, 0, 0, 0,
- mic_gain_get, mic_gain_put, ec_mic_gain_tlv);
-
-static int enable_i2s(struct snd_soc_component *component, int enable)
+static int wov_hotword_model_put(struct snd_kcontrol *kcontrol,
+ const unsigned int __user *bytes,
+ unsigned int size)
{
- struct ec_param_codec_i2s param;
+ struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
+ struct cros_ec_codec_priv *priv =
+ snd_soc_component_get_drvdata(component);
+ struct ec_param_ec_codec_wov p;
+ struct ec_response_ec_codec_wov_get_lang r;
+ uint8_t digest[SHA256_DIGEST_SIZE];
+ uint8_t *buf;
+ int ret;
+
+ /* Skips the TLV header. */
+ bytes += 2;
+ size -= 8;
+
+ dev_dbg(priv->dev, "%s: size=%d\n", __func__, size);
+
+ buf = memdup_user(bytes, size);
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
+
+ ret = calculate_sha256(priv, buf, size, digest);
+ if (ret)
+ goto leave;
+
+ p.cmd = EC_CODEC_WOV_GET_LANG;
+ ret = send_ec_host_command(priv->ec_device, EC_CMD_EC_CODEC_WOV,
+ (uint8_t *)&p, sizeof(p),
+ (uint8_t *)&r, sizeof(r));
+ if (ret)
+ goto leave;
- dev_dbg(component->dev, "%s set i2s to %u\n", __func__, enable);
+ if (memcmp(digest, r.hash, SHA256_DIGEST_SIZE) == 0) {
+ dev_dbg(priv->dev, "not updated");
+ goto leave;
+ }
- param.cmd = EC_CODEC_I2S_ENABLE;
- param.i2s_enable = enable;
+ if (ec_codec_capable(priv, EC_CODEC_CAP_WOV_LANG_SHM))
+ ret = wov_set_lang_shm(priv, buf, size, digest);
+ else
+ ret = wov_set_lang(priv, buf, size, digest);
- return ec_command_no_resp(component, &param);
+leave:
+ kfree(buf);
+ return ret;
}
-static int cros_ec_i2s_enable_event(struct snd_soc_dapm_widget *w,
- struct snd_kcontrol *kcontrol, int event)
+static struct snd_kcontrol_new wov_controls[] = {
+ SOC_SINGLE_BOOL_EXT("Wake-on-Voice Switch", 0,
+ wov_enable_get, wov_enable_put),
+ SND_SOC_BYTES_TLV("Hotword Model", 0x11000, NULL,
+ wov_hotword_model_put),
+};
+
+static struct snd_soc_dai_driver wov_dai_driver = {
+ .name = "Wake on Voice",
+ .capture = {
+ .stream_name = "WoV Capture",
+ .channels_min = 1,
+ .channels_max = 1,
+ .rates = SNDRV_PCM_RATE_16000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ },
+};
+
+static int wov_host_event(struct notifier_block *nb,
+ unsigned long queued_during_suspend, void *notify)
{
- struct snd_soc_component *component =
- snd_soc_dapm_to_component(w->dapm);
+ struct cros_ec_codec_priv *priv =
+ container_of(nb, struct cros_ec_codec_priv, wov_notifier);
+ u32 host_event;
+
+ dev_dbg(priv->dev, "%s\n", __func__);
+
+ host_event = cros_ec_get_host_event(priv->ec_device);
+ if (host_event & EC_HOST_EVENT_MASK(EC_HOST_EVENT_WOV)) {
+ schedule_delayed_work(&priv->wov_copy_work, 0);
+ return NOTIFY_OK;
+ } else {
+ return NOTIFY_DONE;
+ }
+}
- switch (event) {
- case SND_SOC_DAPM_PRE_PMU:
- dev_dbg(component->dev,
- "%s got SND_SOC_DAPM_PRE_PMU event\n", __func__);
- return enable_i2s(component, 1);
+static int wov_probe(struct snd_soc_component *component)
+{
+ struct cros_ec_codec_priv *priv =
+ snd_soc_component_get_drvdata(component);
+ int ret;
- case SND_SOC_DAPM_PRE_PMD:
- dev_dbg(component->dev,
- "%s got SND_SOC_DAPM_PRE_PMD event\n", __func__);
- return enable_i2s(component, 0);
+ mutex_init(&priv->wov_dma_lock);
+ INIT_DELAYED_WORK(&priv->wov_copy_work, wov_copy_work);
+
+ priv->wov_notifier.notifier_call = wov_host_event;
+ ret = blocking_notifier_chain_register(
+ &priv->ec_device->event_notifier, &priv->wov_notifier);
+ if (ret)
+ return ret;
+
+ if (ec_codec_capable(priv, EC_CODEC_CAP_WOV_LANG_SHM)) {
+ priv->wov_lang_shm_p = wov_map_shm(priv,
+ EC_CODEC_SHM_ID_WOV_LANG,
+ &priv->wov_lang_shm_len,
+ &priv->wov_lang_shm_type);
+ if (!priv->wov_lang_shm_p)
+ return -EFAULT;
}
- return 0;
+ if (ec_codec_capable(priv, EC_CODEC_CAP_WOV_AUDIO_SHM)) {
+ priv->wov_audio_shm_p = wov_map_shm(priv,
+ EC_CODEC_SHM_ID_WOV_AUDIO,
+ &priv->wov_audio_shm_len,
+ &priv->wov_audio_shm_type);
+ if (!priv->wov_audio_shm_p)
+ return -EFAULT;
+ }
+
+ return dmic_probe(component);
}
-/*
- * The goal of this DAPM route is to turn on/off I2S using EC
- * host command when capture stream is started/stopped.
- */
-static const struct snd_soc_dapm_widget cros_ec_codec_dapm_widgets[] = {
- SND_SOC_DAPM_INPUT("DMIC"),
+static void wov_remove(struct snd_soc_component *component)
+{
+ struct cros_ec_codec_priv *priv =
+ snd_soc_component_get_drvdata(component);
- /*
- * Control EC to enable/disable I2S.
- */
- SND_SOC_DAPM_SUPPLY("I2S Enable", SND_SOC_NOPM,
- 0, 0, cros_ec_i2s_enable_event,
- SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_PRE_PMD),
+ blocking_notifier_chain_unregister(
+ &priv->ec_device->event_notifier, &priv->wov_notifier);
+}
- SND_SOC_DAPM_AIF_OUT("I2STX", "I2S Capture", 0, SND_SOC_NOPM, 0, 0),
-};
+static int wov_pcm_open(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
+{
+ static const struct snd_pcm_hardware hw_param = {
+ .info = SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_MMAP_VALID,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .rates = SNDRV_PCM_RATE_16000,
+ .channels_min = 1,
+ .channels_max = 1,
+ .period_bytes_min = PAGE_SIZE,
+ .period_bytes_max = 0x20000 / 8,
+ .periods_min = 8,
+ .periods_max = 8,
+ .buffer_bytes_max = 0x20000,
+ };
+
+ return snd_soc_set_runtime_hwparams(substream, &hw_param);
+}
-static const struct snd_soc_dapm_route cros_ec_codec_dapm_routes[] = {
- { "I2STX", NULL, "DMIC" },
- { "I2STX", NULL, "I2S Enable" },
-};
+static int wov_pcm_hw_params(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *hw_params)
+{
+ struct cros_ec_codec_priv *priv =
+ snd_soc_component_get_drvdata(component);
-/*
- * Read maximum gain from device property and set it to mixer control.
- */
-static int cros_ec_set_gain_range(struct device *dev)
+ mutex_lock(&priv->wov_dma_lock);
+ priv->wov_substream = substream;
+ priv->wov_rp = priv->wov_wp = 0;
+ priv->wov_dma_offset = 0;
+ priv->wov_burst_read = true;
+ mutex_unlock(&priv->wov_dma_lock);
+
+ return snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params));
+}
+
+static int wov_pcm_hw_free(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
- struct soc_mixer_control *control;
- struct cros_ec_codec_data *codec_data = dev_get_drvdata(dev);
- int rc;
+ struct cros_ec_codec_priv *priv =
+ snd_soc_component_get_drvdata(component);
- rc = device_property_read_u32(dev, "max-dmic-gain",
- &codec_data->max_dmic_gain);
- if (rc)
- return rc;
+ mutex_lock(&priv->wov_dma_lock);
+ wov_queue_dequeue(priv, wov_queue_size(priv));
+ priv->wov_substream = NULL;
+ mutex_unlock(&priv->wov_dma_lock);
- control = (struct soc_mixer_control *)
- mic_gain_control.private_value;
- control->max = codec_data->max_dmic_gain;
- control->platform_max = codec_data->max_dmic_gain;
+ cancel_delayed_work_sync(&priv->wov_copy_work);
- return 0;
+ return snd_pcm_lib_free_pages(substream);
}
-static int cros_ec_codec_probe(struct snd_soc_component *component)
+static snd_pcm_uframes_t wov_pcm_pointer(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
- int rc;
-
- struct cros_ec_codec_data *codec_data =
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct cros_ec_codec_priv *priv =
snd_soc_component_get_drvdata(component);
- rc = cros_ec_set_gain_range(codec_data->dev);
- if (rc)
- return rc;
+ return bytes_to_frames(runtime, priv->wov_dma_offset);
+}
- return snd_soc_add_component_controls(component, &mic_gain_control, 1);
+static int wov_pcm_new(struct snd_soc_component *component,
+ struct snd_soc_pcm_runtime *rtd)
+{
+ snd_pcm_lib_preallocate_pages_for_all(rtd->pcm, SNDRV_DMA_TYPE_VMALLOC,
+ NULL, 0, 0);
+ return 0;
}
-static const struct snd_soc_component_driver cros_ec_component_driver = {
- .probe = cros_ec_codec_probe,
- .dapm_widgets = cros_ec_codec_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(cros_ec_codec_dapm_widgets),
- .dapm_routes = cros_ec_codec_dapm_routes,
- .num_dapm_routes = ARRAY_SIZE(cros_ec_codec_dapm_routes),
+static const struct snd_soc_component_driver wov_component_driver = {
+ .probe = wov_probe,
+ .remove = wov_remove,
+ .controls = wov_controls,
+ .num_controls = ARRAY_SIZE(wov_controls),
+ .open = wov_pcm_open,
+ .hw_params = wov_pcm_hw_params,
+ .hw_free = wov_pcm_hw_free,
+ .pointer = wov_pcm_pointer,
+ .pcm_construct = wov_pcm_new,
};
-/*
- * Platform device and platform driver fro cros-ec-codec.
- */
-static int cros_ec_codec_platform_probe(struct platform_device *pd)
+static int cros_ec_codec_platform_probe(struct platform_device *pdev)
{
- struct device *dev = &pd->dev;
- struct cros_ec_device *ec_device = dev_get_drvdata(pd->dev.parent);
- struct cros_ec_codec_data *codec_data;
+ struct device *dev = &pdev->dev;
+ struct cros_ec_device *ec_device = dev_get_drvdata(pdev->dev.parent);
+ struct cros_ec_codec_priv *priv;
+ struct ec_param_ec_codec p;
+ struct ec_response_ec_codec_get_capabilities r;
+ int ret;
+#ifdef CONFIG_OF
+ struct device_node *node;
+ struct resource res;
+ u64 ec_shm_size;
+ const __be32 *regaddr_p;
+#endif
- codec_data = devm_kzalloc(dev, sizeof(struct cros_ec_codec_data),
- GFP_KERNEL);
- if (!codec_data)
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
return -ENOMEM;
- codec_data->dev = dev;
- codec_data->ec_device = ec_device;
+#ifdef CONFIG_OF
+ regaddr_p = of_get_address(dev->of_node, 0, &ec_shm_size, NULL);
+ if (regaddr_p) {
+ priv->ec_shm_addr = of_read_number(regaddr_p, 2);
+ priv->ec_shm_len = ec_shm_size;
- platform_set_drvdata(pd, codec_data);
+ dev_dbg(dev, "ec_shm_addr=%#llx len=%#x\n",
+ priv->ec_shm_addr, priv->ec_shm_len);
+ }
+
+ node = of_parse_phandle(dev->of_node, "memory-region", 0);
+ if (node) {
+ ret = of_address_to_resource(node, 0, &res);
+ if (!ret) {
+ priv->ap_shm_phys_addr = res.start;
+ priv->ap_shm_len = resource_size(&res);
+ priv->ap_shm_addr =
+ (uint64_t)(uintptr_t)devm_ioremap_wc(
+ dev, priv->ap_shm_phys_addr,
+ priv->ap_shm_len);
+ priv->ap_shm_last_alloc = priv->ap_shm_phys_addr;
+
+ dev_dbg(dev, "ap_shm_phys_addr=%#llx len=%#x\n",
+ priv->ap_shm_phys_addr, priv->ap_shm_len);
+ }
+ }
+#endif
+
+ priv->dev = dev;
+ priv->ec_device = ec_device;
+ atomic_set(&priv->dmic_probed, 0);
+
+ p.cmd = EC_CODEC_GET_CAPABILITIES;
+ ret = send_ec_host_command(priv->ec_device, EC_CMD_EC_CODEC,
+ (uint8_t *)&p, sizeof(p),
+ (uint8_t *)&r, sizeof(r));
+ if (ret) {
+ dev_err(dev, "failed to EC_CODEC_GET_CAPABILITIES\n");
+ return ret;
+ }
+ priv->ec_capabilities = r.capabilities;
+
+ platform_set_drvdata(pdev, priv);
+
+ ret = devm_snd_soc_register_component(dev, &i2s_rx_component_driver,
+ &i2s_rx_dai_driver, 1);
+ if (ret)
+ return ret;
- return devm_snd_soc_register_component(dev, &cros_ec_component_driver,
- cros_ec_dai, ARRAY_SIZE(cros_ec_dai));
+ return devm_snd_soc_register_component(dev, &wov_component_driver,
+ &wov_dai_driver, 1);
}
#ifdef CONFIG_OF
@@ -427,7 +1049,7 @@ MODULE_DEVICE_TABLE(of, cros_ec_codec_of_match);
static struct platform_driver cros_ec_codec_platform_driver = {
.driver = {
- .name = DRV_NAME,
+ .name = "cros-ec-codec",
.of_match_table = of_match_ptr(cros_ec_codec_of_match),
},
.probe = cros_ec_codec_platform_probe,
@@ -438,4 +1060,4 @@ module_platform_driver(cros_ec_codec_platform_driver);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("ChromeOS EC codec driver");
MODULE_AUTHOR("Cheng-Yi Chiang <cychiang@chromium.org>");
-MODULE_ALIAS("platform:" DRV_NAME);
+MODULE_ALIAS("platform:cros-ec-codec");
diff --git a/sound/soc/codecs/cx2072x.c b/sound/soc/codecs/cx2072x.c
index 1c1ba7bea4d8..2ad00ed21bec 100644
--- a/sound/soc/codecs/cx2072x.c
+++ b/sound/soc/codecs/cx2072x.c
@@ -1507,7 +1507,7 @@ static int cx2072x_probe(struct snd_soc_component *codec)
regmap_multi_reg_write(cx2072x->regmap, cx2072x_reg_init,
ARRAY_SIZE(cx2072x_reg_init));
- /* configre PortC as input device */
+ /* configure PortC as input device */
regmap_update_bits(cx2072x->regmap, CX2072X_PORTC_PIN_CTRL,
0x20, 0x20);
diff --git a/sound/soc/codecs/hdac_hda.c b/sound/soc/codecs/hdac_hda.c
index 4570f662fb48..6803d39e09a5 100644
--- a/sound/soc/codecs/hdac_hda.c
+++ b/sound/soc/codecs/hdac_hda.c
@@ -14,13 +14,11 @@
#include <sound/pcm_params.h>
#include <sound/soc.h>
#include <sound/hdaudio_ext.h>
+#include <sound/hda_i915.h>
#include <sound/hda_codec.h>
#include <sound/hda_register.h>
-#include "hdac_hda.h"
-#define HDAC_ANALOG_DAI_ID 0
-#define HDAC_DIGITAL_DAI_ID 1
-#define HDAC_ALT_ANALOG_DAI_ID 2
+#include "hdac_hda.h"
#define STUB_FORMATS (SNDRV_PCM_FMTBIT_S8 | \
SNDRV_PCM_FMTBIT_U8 | \
@@ -32,6 +30,11 @@
SNDRV_PCM_FMTBIT_U32_LE | \
SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_LE)
+#define STUB_HDMI_RATES (SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 |\
+ SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_88200 |\
+ SNDRV_PCM_RATE_96000 | SNDRV_PCM_RATE_176400 |\
+ SNDRV_PCM_RATE_192000)
+
static int hdac_hda_dai_open(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai);
static void hdac_hda_dai_close(struct snd_pcm_substream *substream,
@@ -121,7 +124,46 @@ static struct snd_soc_dai_driver hdac_hda_dais[] = {
.formats = STUB_FORMATS,
.sig_bits = 24,
},
-}
+},
+{
+ .id = HDAC_HDMI_0_DAI_ID,
+ .name = "intel-hdmi-hifi1",
+ .ops = &hdac_hda_dai_ops,
+ .playback = {
+ .stream_name = "hifi1",
+ .channels_min = 1,
+ .channels_max = 32,
+ .rates = STUB_HDMI_RATES,
+ .formats = STUB_FORMATS,
+ .sig_bits = 24,
+ },
+},
+{
+ .id = HDAC_HDMI_1_DAI_ID,
+ .name = "intel-hdmi-hifi2",
+ .ops = &hdac_hda_dai_ops,
+ .playback = {
+ .stream_name = "hifi2",
+ .channels_min = 1,
+ .channels_max = 32,
+ .rates = STUB_HDMI_RATES,
+ .formats = STUB_FORMATS,
+ .sig_bits = 24,
+ },
+},
+{
+ .id = HDAC_HDMI_2_DAI_ID,
+ .name = "intel-hdmi-hifi3",
+ .ops = &hdac_hda_dai_ops,
+ .playback = {
+ .stream_name = "hifi3",
+ .channels_min = 1,
+ .channels_max = 32,
+ .rates = STUB_HDMI_RATES,
+ .formats = STUB_FORMATS,
+ .sig_bits = 24,
+ },
+},
};
@@ -135,10 +177,11 @@ static int hdac_hda_dai_set_tdm_slot(struct snd_soc_dai *dai,
hda_pvt = snd_soc_component_get_drvdata(component);
pcm = &hda_pvt->pcm[dai->id];
+
if (tx_mask)
- pcm[dai->id].stream_tag[SNDRV_PCM_STREAM_PLAYBACK] = tx_mask;
+ pcm->stream_tag[SNDRV_PCM_STREAM_PLAYBACK] = tx_mask;
else
- pcm[dai->id].stream_tag[SNDRV_PCM_STREAM_CAPTURE] = rx_mask;
+ pcm->stream_tag[SNDRV_PCM_STREAM_CAPTURE] = rx_mask;
return 0;
}
@@ -278,6 +321,12 @@ static struct hda_pcm *snd_soc_find_pcm_from_dai(struct hdac_hda_priv *hda_pvt,
struct hda_pcm *cpcm;
const char *pcm_name;
+ /*
+ * map DAI ID to the closest matching PCM name, using the naming
+ * scheme used by hda-codec snd_hda_gen_build_pcms() and for
+ * HDMI in hda_codec patch_hdmi.c)
+ */
+
switch (dai->id) {
case HDAC_ANALOG_DAI_ID:
pcm_name = "Analog";
@@ -288,13 +337,22 @@ static struct hda_pcm *snd_soc_find_pcm_from_dai(struct hdac_hda_priv *hda_pvt,
case HDAC_ALT_ANALOG_DAI_ID:
pcm_name = "Alt Analog";
break;
+ case HDAC_HDMI_0_DAI_ID:
+ pcm_name = "HDMI 0";
+ break;
+ case HDAC_HDMI_1_DAI_ID:
+ pcm_name = "HDMI 1";
+ break;
+ case HDAC_HDMI_2_DAI_ID:
+ pcm_name = "HDMI 2";
+ break;
default:
dev_err(&hcodec->core.dev, "invalid dai id %d\n", dai->id);
return NULL;
}
list_for_each_entry(cpcm, &hcodec->pcm_list_head, list) {
- if (strpbrk(cpcm->name, pcm_name))
+ if (strstr(cpcm->name, pcm_name))
return cpcm;
}
@@ -302,6 +360,18 @@ static struct hda_pcm *snd_soc_find_pcm_from_dai(struct hdac_hda_priv *hda_pvt,
return NULL;
}
+static bool is_hdmi_codec(struct hda_codec *hcodec)
+{
+ struct hda_pcm *cpcm;
+
+ list_for_each_entry(cpcm, &hcodec->pcm_list_head, list) {
+ if (cpcm->pcm_type == HDA_PCM_TYPE_HDMI)
+ return true;
+ }
+
+ return false;
+}
+
static int hdac_hda_codec_probe(struct snd_soc_component *component)
{
struct hdac_hda_priv *hda_pvt =
@@ -322,6 +392,15 @@ static int hdac_hda_codec_probe(struct snd_soc_component *component)
snd_hdac_ext_bus_link_get(hdev->bus, hlink);
+ /*
+ * Ensure any HDA display is powered at codec probe.
+ * After snd_hda_codec_device_new(), display power is
+ * managed by runtime PM.
+ */
+ if (hda_pvt->need_display_power)
+ snd_hdac_display_power(hdev->bus,
+ HDA_CODEC_IDX_CONTROLLER, true);
+
ret = snd_hda_codec_device_new(hcodec->bus, component->card->snd_card,
hdev->addr, hcodec);
if (ret < 0) {
@@ -366,20 +445,31 @@ static int hdac_hda_codec_probe(struct snd_soc_component *component)
dev_dbg(&hdev->dev, "no patch file found\n");
}
+ /* configure codec for 1:1 PCM:DAI mapping */
+ hcodec->mst_no_extra_pcms = 1;
+
ret = snd_hda_codec_parse_pcms(hcodec);
if (ret < 0) {
dev_err(&hdev->dev, "unable to map pcms to dai %d\n", ret);
goto error;
}
- ret = snd_hda_codec_build_controls(hcodec);
- if (ret < 0) {
- dev_err(&hdev->dev, "unable to create controls %d\n", ret);
- goto error;
+ /* HDMI controls need to be created in machine drivers */
+ if (!is_hdmi_codec(hcodec)) {
+ ret = snd_hda_codec_build_controls(hcodec);
+ if (ret < 0) {
+ dev_err(&hdev->dev, "unable to create controls %d\n",
+ ret);
+ goto error;
+ }
}
hcodec->core.lazy_cache = true;
+ if (hda_pvt->need_display_power)
+ snd_hdac_display_power(hdev->bus,
+ HDA_CODEC_IDX_CONTROLLER, false);
+
/*
* hdac_device core already sets the state to active and calls
* get_noresume. So enable runtime and set the device to suspend.
diff --git a/sound/soc/codecs/hdac_hda.h b/sound/soc/codecs/hdac_hda.h
index 6b1bd4f428e7..e145cec085b8 100644
--- a/sound/soc/codecs/hdac_hda.h
+++ b/sound/soc/codecs/hdac_hda.h
@@ -6,6 +6,16 @@
#ifndef __HDAC_HDA_H__
#define __HDAC_HDA_H__
+enum {
+ HDAC_ANALOG_DAI_ID = 0,
+ HDAC_DIGITAL_DAI_ID,
+ HDAC_ALT_ANALOG_DAI_ID,
+ HDAC_HDMI_0_DAI_ID,
+ HDAC_HDMI_1_DAI_ID,
+ HDAC_HDMI_2_DAI_ID,
+ HDAC_LAST_DAI_ID = HDAC_HDMI_2_DAI_ID,
+};
+
struct hdac_hda_pcm {
int stream_tag[2];
unsigned int format_val[2];
@@ -13,7 +23,8 @@ struct hdac_hda_pcm {
struct hdac_hda_priv {
struct hda_codec codec;
- struct hdac_hda_pcm pcm[2];
+ struct hdac_hda_pcm pcm[HDAC_LAST_DAI_ID];
+ bool need_display_power;
};
#define hdac_to_hda_priv(_hdac) \
diff --git a/sound/soc/codecs/madera.h b/sound/soc/codecs/madera.h
index 1f3e8e230cf2..6d8938a3fb64 100644
--- a/sound/soc/codecs/madera.h
+++ b/sound/soc/codecs/madera.h
@@ -27,6 +27,7 @@
#define MADERA_FLL_SRC_NONE -1
#define MADERA_FLL_SRC_MCLK1 0
#define MADERA_FLL_SRC_MCLK2 1
+#define MADERA_FLL_SRC_MCLK3 2
#define MADERA_FLL_SRC_SLIMCLK 3
#define MADERA_FLL_SRC_FLL1 4
#define MADERA_FLL_SRC_FLL2 5
@@ -51,6 +52,7 @@
#define MADERA_CLK_SRC_MCLK1 0x0
#define MADERA_CLK_SRC_MCLK2 0x1
+#define MADERA_CLK_SRC_MCLK3 0x2
#define MADERA_CLK_SRC_FLL1 0x4
#define MADERA_CLK_SRC_FLL2 0x5
#define MADERA_CLK_SRC_FLL3 0x6
diff --git a/sound/soc/codecs/msm8916-wcd-analog.c b/sound/soc/codecs/msm8916-wcd-analog.c
index e3d311fb510e..f53235be77d9 100644
--- a/sound/soc/codecs/msm8916-wcd-analog.c
+++ b/sound/soc/codecs/msm8916-wcd-analog.c
@@ -228,6 +228,10 @@
#define CDC_A_RX_EAR_CTL (0xf19E)
#define RX_EAR_CTL_SPK_VBAT_LDO_EN_MASK BIT(0)
#define RX_EAR_CTL_SPK_VBAT_LDO_EN_ENABLE BIT(0)
+#define RX_EAR_CTL_PA_EAR_PA_EN_MASK BIT(6)
+#define RX_EAR_CTL_PA_EAR_PA_EN_ENABLE BIT(6)
+#define RX_EAR_CTL_PA_SEL_MASK BIT(7)
+#define RX_EAR_CTL_PA_SEL BIT(7)
#define CDC_A_SPKR_DAC_CTL (0xf1B0)
#define SPKR_DAC_CTL_DAC_RESET_MASK BIT(4)
@@ -312,6 +316,7 @@ static const char *const hph_text[] = { "ZERO", "Switch", };
static const struct soc_enum hph_enum = SOC_ENUM_SINGLE_VIRT(
ARRAY_SIZE(hph_text), hph_text);
+static const struct snd_kcontrol_new ear_mux = SOC_DAPM_ENUM("EAR_S", hph_enum);
static const struct snd_kcontrol_new hphl_mux = SOC_DAPM_ENUM("HPHL", hph_enum);
static const struct snd_kcontrol_new hphr_mux = SOC_DAPM_ENUM("HPHR", hph_enum);
@@ -685,6 +690,34 @@ static int pm8916_wcd_analog_enable_spk_pa(struct snd_soc_dapm_widget *w,
return 0;
}
+static int pm8916_wcd_analog_enable_ear_pa(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol,
+ int event)
+{
+ struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm);
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ snd_soc_component_update_bits(component, CDC_A_RX_EAR_CTL,
+ RX_EAR_CTL_PA_SEL_MASK, RX_EAR_CTL_PA_SEL);
+ break;
+ case SND_SOC_DAPM_POST_PMU:
+ snd_soc_component_update_bits(component, CDC_A_RX_EAR_CTL,
+ RX_EAR_CTL_PA_EAR_PA_EN_MASK,
+ RX_EAR_CTL_PA_EAR_PA_EN_ENABLE);
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ snd_soc_component_update_bits(component, CDC_A_RX_EAR_CTL,
+ RX_EAR_CTL_PA_EAR_PA_EN_MASK, 0);
+ /* Delay to reduce ear turn off pop */
+ usleep_range(7000, 7100);
+ snd_soc_component_update_bits(component, CDC_A_RX_EAR_CTL,
+ RX_EAR_CTL_PA_SEL_MASK, 0);
+ break;
+ }
+ return 0;
+}
+
static const struct reg_default wcd_reg_defaults_2_0[] = {
{CDC_A_RX_COM_OCP_CTL, 0xD1},
{CDC_A_RX_COM_OCP_COUNT, 0xFF},
@@ -801,12 +834,20 @@ static const struct snd_soc_dapm_route pm8916_wcd_analog_audio_map[] = {
{"PDM_TX", NULL, "A_MCLK2"},
{"A_MCLK2", NULL, "A_MCLK"},
+ /* Earpiece (RX MIX1) */
+ {"EAR", NULL, "EAR_S"},
+ {"EAR_S", "Switch", "EAR PA"},
+ {"EAR PA", NULL, "RX_BIAS"},
+ {"EAR PA", NULL, "HPHL DAC"},
+ {"EAR PA", NULL, "HPHR DAC"},
+ {"EAR PA", NULL, "EAR CP"},
+
/* Headset (RX MIX1 and RX MIX2) */
{"HEADPHONE", NULL, "HPHL PA"},
{"HEADPHONE", NULL, "HPHR PA"},
- {"HPHL PA", NULL, "EAR_HPHL_CLK"},
- {"HPHR PA", NULL, "EAR_HPHR_CLK"},
+ {"HPHL DAC", NULL, "EAR_HPHL_CLK"},
+ {"HPHR DAC", NULL, "EAR_HPHR_CLK"},
{"CP", NULL, "NCP_CLK"},
@@ -847,11 +888,20 @@ static const struct snd_soc_dapm_widget pm8916_wcd_analog_dapm_widgets[] = {
SND_SOC_DAPM_INPUT("AMIC1"),
SND_SOC_DAPM_INPUT("AMIC3"),
SND_SOC_DAPM_INPUT("AMIC2"),
+ SND_SOC_DAPM_OUTPUT("EAR"),
SND_SOC_DAPM_OUTPUT("HEADPHONE"),
/* RX stuff */
SND_SOC_DAPM_SUPPLY("INT_LDO_H", SND_SOC_NOPM, 1, 0, NULL, 0),
+ SND_SOC_DAPM_PGA_E("EAR PA", SND_SOC_NOPM,
+ 0, 0, NULL, 0,
+ pm8916_wcd_analog_enable_ear_pa,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_MUX("EAR_S", SND_SOC_NOPM, 0, 0, &ear_mux),
+ SND_SOC_DAPM_SUPPLY("EAR CP", CDC_A_NCP_EN, 4, 0, NULL, 0),
+
SND_SOC_DAPM_PGA("HPHL PA", CDC_A_RX_HPH_CNP_EN, 5, 0, NULL, 0),
SND_SOC_DAPM_MUX("HPHL", SND_SOC_NOPM, 0, 0, &hphl_mux),
SND_SOC_DAPM_MIXER("HPHL DAC", CDC_A_RX_HPH_L_PA_DAC_CTL, 3, 0, NULL,
diff --git a/sound/soc/codecs/mt6358.c b/sound/soc/codecs/mt6358.c
index bb737fd678cc..1b830ea4f6ed 100644
--- a/sound/soc/codecs/mt6358.c
+++ b/sound/soc/codecs/mt6358.c
@@ -93,6 +93,8 @@ struct mt6358_priv {
int mtkaif_protocol;
struct regulator *avdd_reg;
+
+ int wov_enabled;
};
int mt6358_set_mtkaif_protocol(struct snd_soc_component *cmpnt,
@@ -464,6 +466,106 @@ static int mt6358_put_volsw(struct snd_kcontrol *kcontrol,
return ret;
}
+static void mt6358_restore_pga(struct mt6358_priv *priv);
+
+static int mt6358_enable_wov_phase2(struct mt6358_priv *priv)
+{
+ /* analog */
+ regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON13,
+ 0xffff, 0x0000);
+ regmap_update_bits(priv->regmap, MT6358_DCXO_CW14, 0xffff, 0xa2b5);
+ regmap_update_bits(priv->regmap, MT6358_AUDENC_ANA_CON1,
+ 0xffff, 0x0800);
+ mt6358_restore_pga(priv);
+
+ regmap_update_bits(priv->regmap, MT6358_DCXO_CW13, 0xffff, 0x9929);
+ regmap_update_bits(priv->regmap, MT6358_AUDENC_ANA_CON9,
+ 0xffff, 0x0025);
+ regmap_update_bits(priv->regmap, MT6358_AUDENC_ANA_CON8,
+ 0xffff, 0x0005);
+
+ /* digital */
+ regmap_update_bits(priv->regmap, MT6358_AUD_TOP_CKPDN_CON0,
+ 0xffff, 0x0000);
+ regmap_update_bits(priv->regmap, MT6358_GPIO_MODE3, 0xffff, 0x0120);
+ regmap_update_bits(priv->regmap, MT6358_AFE_VOW_CFG0, 0xffff, 0xffff);
+ regmap_update_bits(priv->regmap, MT6358_AFE_VOW_CFG1, 0xffff, 0x0200);
+ regmap_update_bits(priv->regmap, MT6358_AFE_VOW_CFG2, 0xffff, 0x2424);
+ regmap_update_bits(priv->regmap, MT6358_AFE_VOW_CFG3, 0xffff, 0xdbac);
+ regmap_update_bits(priv->regmap, MT6358_AFE_VOW_CFG4, 0xffff, 0x029e);
+ regmap_update_bits(priv->regmap, MT6358_AFE_VOW_CFG5, 0xffff, 0x0000);
+ regmap_update_bits(priv->regmap, MT6358_AFE_VOW_POSDIV_CFG0,
+ 0xffff, 0x0000);
+ regmap_update_bits(priv->regmap, MT6358_AFE_VOW_HPF_CFG0,
+ 0xffff, 0x0451);
+ regmap_update_bits(priv->regmap, MT6358_AFE_VOW_TOP, 0xffff, 0x68d1);
+
+ return 0;
+}
+
+static int mt6358_disable_wov_phase2(struct mt6358_priv *priv)
+{
+ /* digital */
+ regmap_update_bits(priv->regmap, MT6358_AFE_VOW_TOP, 0xffff, 0xc000);
+ regmap_update_bits(priv->regmap, MT6358_AFE_VOW_HPF_CFG0,
+ 0xffff, 0x0450);
+ regmap_update_bits(priv->regmap, MT6358_AFE_VOW_POSDIV_CFG0,
+ 0xffff, 0x0c00);
+ regmap_update_bits(priv->regmap, MT6358_AFE_VOW_CFG5, 0xffff, 0x0100);
+ regmap_update_bits(priv->regmap, MT6358_AFE_VOW_CFG4, 0xffff, 0x006c);
+ regmap_update_bits(priv->regmap, MT6358_AFE_VOW_CFG3, 0xffff, 0xa879);
+ regmap_update_bits(priv->regmap, MT6358_AFE_VOW_CFG2, 0xffff, 0x2323);
+ regmap_update_bits(priv->regmap, MT6358_AFE_VOW_CFG1, 0xffff, 0x0400);
+ regmap_update_bits(priv->regmap, MT6358_AFE_VOW_CFG0, 0xffff, 0x0000);
+ regmap_update_bits(priv->regmap, MT6358_GPIO_MODE3, 0xffff, 0x02d8);
+ regmap_update_bits(priv->regmap, MT6358_AUD_TOP_CKPDN_CON0,
+ 0xffff, 0x0000);
+
+ /* analog */
+ regmap_update_bits(priv->regmap, MT6358_AUDENC_ANA_CON8,
+ 0xffff, 0x0004);
+ regmap_update_bits(priv->regmap, MT6358_AUDENC_ANA_CON9,
+ 0xffff, 0x0000);
+ regmap_update_bits(priv->regmap, MT6358_DCXO_CW13, 0xffff, 0x9829);
+ regmap_update_bits(priv->regmap, MT6358_AUDENC_ANA_CON1,
+ 0xffff, 0x0000);
+ mt6358_restore_pga(priv);
+ regmap_update_bits(priv->regmap, MT6358_DCXO_CW14, 0xffff, 0xa2b5);
+ regmap_update_bits(priv->regmap, MT6358_AUDDEC_ANA_CON13,
+ 0xffff, 0x0010);
+
+ return 0;
+}
+
+static int mt6358_get_wov(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *c = snd_soc_kcontrol_component(kcontrol);
+ struct mt6358_priv *priv = snd_soc_component_get_drvdata(c);
+
+ ucontrol->value.integer.value[0] = priv->wov_enabled;
+ return 0;
+}
+
+static int mt6358_put_wov(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *c = snd_soc_kcontrol_component(kcontrol);
+ struct mt6358_priv *priv = snd_soc_component_get_drvdata(c);
+ int enabled = ucontrol->value.integer.value[0];
+
+ if (priv->wov_enabled != enabled) {
+ if (enabled)
+ mt6358_enable_wov_phase2(priv);
+ else
+ mt6358_disable_wov_phase2(priv);
+
+ priv->wov_enabled = enabled;
+ }
+
+ return 0;
+}
+
static const DECLARE_TLV_DB_SCALE(playback_tlv, -1000, 100, 0);
static const DECLARE_TLV_DB_SCALE(pga_tlv, 0, 600, 0);
@@ -483,6 +585,9 @@ static const struct snd_kcontrol_new mt6358_snd_controls[] = {
MT6358_AUDENC_ANA_CON0, MT6358_AUDENC_ANA_CON1,
8, 4, 0,
snd_soc_get_volsw, mt6358_put_volsw, pga_tlv),
+
+ SOC_SINGLE_BOOL_EXT("Wake-on-Voice Phase2 Switch", 0,
+ mt6358_get_wov, mt6358_put_wov),
};
/* MUX */
diff --git a/sound/soc/codecs/pcm3168a.c b/sound/soc/codecs/pcm3168a.c
index 88b75695fbf7..9711fab296eb 100644
--- a/sound/soc/codecs/pcm3168a.c
+++ b/sound/soc/codecs/pcm3168a.c
@@ -9,7 +9,9 @@
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
#include <linux/module.h>
+#include <linux/of_gpio.h>
#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
@@ -59,9 +61,11 @@ struct pcm3168a_priv {
struct regulator_bulk_data supplies[PCM3168A_NUM_SUPPLIES];
struct regmap *regmap;
struct clk *scki;
+ struct gpio_desc *gpio_rst;
unsigned long sysclk;
struct pcm3168a_io_params io_params[2];
+ struct snd_soc_dai_driver dai_drv[2];
};
static const char *const pcm3168a_roll_off[] = { "Sharp", "Slow" };
@@ -314,6 +318,34 @@ static int pcm3168a_set_dai_sysclk(struct snd_soc_dai *dai,
return 0;
}
+static void pcm3168a_update_fixup_pcm_stream(struct snd_soc_dai *dai)
+{
+ struct snd_soc_component *component = dai->component;
+ struct pcm3168a_priv *pcm3168a = snd_soc_component_get_drvdata(component);
+ u64 formats = SNDRV_PCM_FMTBIT_S24_3LE | SNDRV_PCM_FMTBIT_S24_LE;
+ unsigned int channel_max = dai->id == PCM3168A_DAI_DAC ? 8 : 6;
+
+ if (pcm3168a->io_params[dai->id].fmt == PCM3168A_FMT_RIGHT_J) {
+ /* S16_LE is only supported in RIGHT_J mode */
+ formats |= SNDRV_PCM_FMTBIT_S16_LE;
+
+ /*
+ * If multi DIN/DOUT is not selected, RIGHT_J can only support
+ * two channels (no TDM support)
+ */
+ if (pcm3168a->io_params[dai->id].tdm_slots != 2)
+ channel_max = 2;
+ }
+
+ if (dai->id == PCM3168A_DAI_DAC) {
+ dai->driver->playback.channels_max = channel_max;
+ dai->driver->playback.formats = formats;
+ } else {
+ dai->driver->capture.channels_max = channel_max;
+ dai->driver->capture.formats = formats;
+ }
+}
+
static int pcm3168a_set_dai_fmt(struct snd_soc_dai *dai, unsigned int format)
{
struct snd_soc_component *component = dai->component;
@@ -376,6 +408,8 @@ static int pcm3168a_set_dai_fmt(struct snd_soc_dai *dai, unsigned int format)
regmap_update_bits(pcm3168a->regmap, reg, mask, fmt << shift);
+ pcm3168a_update_fixup_pcm_stream(dai);
+
return 0;
}
@@ -409,6 +443,8 @@ static int pcm3168a_set_tdm_slot(struct snd_soc_dai *dai, unsigned int tx_mask,
else
io_params->tdm_mask = rx_mask;
+ pcm3168a_update_fixup_pcm_stream(dai);
+
return 0;
}
@@ -530,63 +566,7 @@ static int pcm3168a_hw_params(struct snd_pcm_substream *substream,
return 0;
}
-static int pcm3168a_startup(struct snd_pcm_substream *substream,
- struct snd_soc_dai *dai)
-{
- struct snd_soc_component *component = dai->component;
- struct pcm3168a_priv *pcm3168a = snd_soc_component_get_drvdata(component);
- unsigned int sample_min;
- unsigned int channel_max;
- unsigned int channel_maxs[] = {
- 8, /* DAC */
- 6 /* ADC */
- };
-
- /*
- * Available Data Bits
- *
- * RIGHT_J : 24 / 16
- * LEFT_J : 24
- * I2S : 24
- *
- * TDM available
- *
- * I2S
- * LEFT_J
- */
- switch (pcm3168a->io_params[dai->id].fmt) {
- case PCM3168A_FMT_RIGHT_J:
- sample_min = 16;
- channel_max = 2;
- break;
- case PCM3168A_FMT_LEFT_J:
- case PCM3168A_FMT_I2S:
- case PCM3168A_FMT_DSP_A:
- case PCM3168A_FMT_DSP_B:
- sample_min = 24;
- channel_max = channel_maxs[dai->id];
- break;
- default:
- sample_min = 24;
- channel_max = 2;
- }
-
- snd_pcm_hw_constraint_minmax(substream->runtime,
- SNDRV_PCM_HW_PARAM_SAMPLE_BITS,
- sample_min, 32);
-
- /* Allow all channels in multi DIN/DOUT mode */
- if (pcm3168a->io_params[dai->id].tdm_slots == 2)
- channel_max = channel_maxs[dai->id];
-
- snd_pcm_hw_constraint_minmax(substream->runtime,
- SNDRV_PCM_HW_PARAM_CHANNELS,
- 2, channel_max);
-
- return 0;
-}
static const struct snd_soc_dai_ops pcm3168a_dai_ops = {
- .startup = pcm3168a_startup,
.set_fmt = pcm3168a_set_dai_fmt,
.set_sysclk = pcm3168a_set_dai_sysclk,
.hw_params = pcm3168a_hw_params,
@@ -666,6 +646,7 @@ static bool pcm3168a_readable_register(struct device *dev, unsigned int reg)
static bool pcm3168a_volatile_register(struct device *dev, unsigned int reg)
{
switch (reg) {
+ case PCM3168A_RST_SMODE:
case PCM3168A_DAC_ZERO:
case PCM3168A_ADC_OV:
return true;
@@ -725,6 +706,25 @@ int pcm3168a_probe(struct device *dev, struct regmap *regmap)
dev_set_drvdata(dev, pcm3168a);
+ /*
+ * Request the reset (connected to RST pin) gpio line as non exclusive
+ * as the same reset line might be connected to multiple pcm3168a codec
+ *
+ * The RST is low active, we want the GPIO line to be high initially, so
+ * request the initial level to LOW which in practice means DEASSERTED:
+ * The deasserted level of GPIO_ACTIVE_LOW is HIGH.
+ */
+ pcm3168a->gpio_rst = devm_gpiod_get_optional(dev, "reset",
+ GPIOD_OUT_LOW |
+ GPIOD_FLAGS_BIT_NONEXCLUSIVE);
+ if (IS_ERR(pcm3168a->gpio_rst)) {
+ ret = PTR_ERR(pcm3168a->gpio_rst);
+ if (ret != -EPROBE_DEFER )
+ dev_err(dev, "failed to acquire RST gpio: %d\n", ret);
+
+ return ret;
+ }
+
pcm3168a->scki = devm_clk_get(dev, "scki");
if (IS_ERR(pcm3168a->scki)) {
ret = PTR_ERR(pcm3168a->scki);
@@ -766,18 +766,28 @@ int pcm3168a_probe(struct device *dev, struct regmap *regmap)
goto err_regulator;
}
- ret = pcm3168a_reset(pcm3168a);
- if (ret) {
- dev_err(dev, "Failed to reset device: %d\n", ret);
- goto err_regulator;
+ if (pcm3168a->gpio_rst) {
+ /*
+ * The device is taken out from reset via GPIO line, wait for
+ * 3846 SCKI clock cycles for the internal reset de-assertion
+ */
+ msleep(DIV_ROUND_UP(3846 * 1000, pcm3168a->sysclk));
+ } else {
+ ret = pcm3168a_reset(pcm3168a);
+ if (ret) {
+ dev_err(dev, "Failed to reset device: %d\n", ret);
+ goto err_regulator;
+ }
}
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
pm_runtime_idle(dev);
- ret = devm_snd_soc_register_component(dev, &pcm3168a_driver, pcm3168a_dais,
- ARRAY_SIZE(pcm3168a_dais));
+ memcpy(pcm3168a->dai_drv, pcm3168a_dais, sizeof(pcm3168a->dai_drv));
+ ret = devm_snd_soc_register_component(dev, &pcm3168a_driver,
+ pcm3168a->dai_drv,
+ ARRAY_SIZE(pcm3168a->dai_drv));
if (ret) {
dev_err(dev, "failed to register component: %d\n", ret);
goto err_regulator;
@@ -806,6 +816,15 @@ static void pcm3168a_disable(struct device *dev)
void pcm3168a_remove(struct device *dev)
{
+ struct pcm3168a_priv *pcm3168a = dev_get_drvdata(dev);
+
+ /*
+ * The RST is low active, we want the GPIO line to be low when the
+ * driver is removed, so set level to 1 which in practice means
+ * ASSERTED:
+ * The asserted level of GPIO_ACTIVE_LOW is LOW.
+ */
+ gpiod_set_value_cansleep(pcm3168a->gpio_rst, 1);
pm_runtime_disable(dev);
#ifndef CONFIG_PM
pcm3168a_disable(dev);
diff --git a/sound/soc/codecs/rt1011.c b/sound/soc/codecs/rt1011.c
index be1e276e3631..2552073e54ce 100644
--- a/sound/soc/codecs/rt1011.c
+++ b/sound/soc/codecs/rt1011.c
@@ -61,7 +61,6 @@ static const struct reg_sequence init_list[] = {
{ RT1011_DAC_SET_1, 0xe702 },
{ RT1011_DAC_SET_3, 0x2004 },
};
-#define RT1011_INIT_REG_LEN ARRAY_SIZE(init_list)
static const struct reg_default rt1011_reg[] = {
{0x0000, 0x0000},
@@ -684,7 +683,8 @@ static int rt1011_reg_init(struct snd_soc_component *component)
{
struct rt1011_priv *rt1011 = snd_soc_component_get_drvdata(component);
- regmap_multi_reg_write(rt1011->regmap, init_list, RT1011_INIT_REG_LEN);
+ regmap_multi_reg_write(rt1011->regmap,
+ init_list, ARRAY_SIZE(init_list));
return 0;
}
@@ -989,7 +989,7 @@ static SOC_ENUM_SINGLE_DECL(rt1011_din_source_enum, RT1011_CROSS_BQ_SET_1, 5,
static const char * const rt1011_tdm_data_out_select[] = {
"TDM_O_LR", "BQ1", "DVOL", "BQ10", "ALC", "DMIX", "ADC_SRC_LR",
- "ADC_O_LR", "ADC_MONO", "RSPK_BPF_LR", "DMIX_ADD", "ENVELOPE_FS",
+ "ADC_O_LR", "ADC_MONO", "RSPK_BPF_LR", "DMIX_ADD", "ENVELOPE_FS",
"SEP_O_GAIN", "ALC_BK_GAIN", "STP_V_C", "DMIX_ABST"
};
@@ -1002,7 +1002,7 @@ static SOC_ENUM_SINGLE_DECL(rt1011_tdm2_l_dac1_enum, RT1011_TDM2_SET_4, 12,
rt1011_tdm_l_ch_data_select);
static SOC_ENUM_SINGLE_DECL(rt1011_tdm1_adc1_dat_enum,
- RT1011_ADCDAT_OUT_SOURCE, 0, rt1011_tdm_data_out_select);
+ RT1011_ADCDAT_OUT_SOURCE, 0, rt1011_tdm_data_out_select);
static SOC_ENUM_SINGLE_DECL(rt1011_tdm1_adc1_loc_enum, RT1011_TDM1_SET_2, 0,
rt1011_tdm_l_ch_data_select);
@@ -1024,9 +1024,9 @@ static const char * const rt1011_tdm_adc_swap_select[] = {
"L/R", "R/L", "L/L", "R/R"
};
-static SOC_ENUM_SINGLE_DECL(rt1011_tdm_adc1_1_enum, RT1011_TDM1_SET_3, 6,
+static SOC_ENUM_SINGLE_DECL(rt1011_tdm_adc1_1_enum, RT1011_TDM1_SET_3, 6,
rt1011_tdm_adc_swap_select);
-static SOC_ENUM_SINGLE_DECL(rt1011_tdm_adc2_1_enum, RT1011_TDM1_SET_3, 4,
+static SOC_ENUM_SINGLE_DECL(rt1011_tdm_adc2_1_enum, RT1011_TDM1_SET_3, 4,
rt1011_tdm_adc_swap_select);
static void rt1011_reset(struct regmap *regmap)
@@ -1092,9 +1092,9 @@ static bool rt1011_validate_bq_drc_coeff(unsigned short reg)
{
if ((reg == RT1011_DAC_SET_1) |
(reg >= RT1011_ADC_SET && reg <= RT1011_ADC_SET_1) |
- (reg == RT1011_ADC_SET_4) | (reg == RT1011_ADC_SET_5) |
+ (reg == RT1011_ADC_SET_4) | (reg == RT1011_ADC_SET_5) |
(reg == RT1011_MIXER_1) |
- (reg == RT1011_A_TIMING_1) | (reg >= RT1011_POWER_7 &&
+ (reg == RT1011_A_TIMING_1) | (reg >= RT1011_POWER_7 &&
reg <= RT1011_POWER_8) |
(reg == RT1011_CLASS_D_POS) | (reg == RT1011_ANALOG_CTRL) |
(reg >= RT1011_SPK_TEMP_PROTECT_0 &&
@@ -1163,9 +1163,6 @@ static int rt1011_bq_drc_coeff_put(struct snd_kcontrol *kcontrol,
(struct rt1011_bq_drc_params *)ucontrol->value.integer.value;
unsigned int i, mode_idx = 0;
- if (!component->card->instantiated)
- return 0;
-
if (strstr(ucontrol->id.name, "AdvanceMode Initial Set"))
mode_idx = RT1011_ADVMODE_INITIAL_SET;
else if (strstr(ucontrol->id.name, "AdvanceMode SEP BQ Coeff"))
@@ -1236,9 +1233,6 @@ static int rt1011_r0_cali_put(struct snd_kcontrol *kcontrol,
struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
struct rt1011_priv *rt1011 = snd_soc_component_get_drvdata(component);
- if (!component->card->instantiated)
- return 0;
-
rt1011->cali_done = 0;
if (snd_soc_component_get_bias_level(component) == SND_SOC_BIAS_OFF &&
ucontrol->value.integer.value[0])
@@ -1284,9 +1278,6 @@ static int rt1011_r0_load_mode_put(struct snd_kcontrol *kcontrol,
if (ucontrol->value.integer.value[0] == rt1011->r0_reg)
return 0;
- if (!component->card->instantiated)
- return 0;
-
if (ucontrol->value.integer.value[0] == 0)
return -EINVAL;
@@ -1298,7 +1289,7 @@ static int rt1011_r0_load_mode_put(struct snd_kcontrol *kcontrol,
r0_integer = format / rt1011->r0_reg / 128;
r0_factor = ((format / rt1011->r0_reg * 100) / 128)
- (r0_integer * 100);
- dev_info(dev, "New r0 resistance about %d.%02d ohm, reg=0x%X\n",
+ dev_info(dev, "New r0 resistance about %d.%02d ohm, reg=0x%X\n",
r0_integer, r0_factor, rt1011->r0_reg);
if (rt1011->r0_reg)
@@ -1640,6 +1631,7 @@ static int rt1011_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
break;
default:
ret = -EINVAL;
+ goto _set_fmt_err_;
}
switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
@@ -1650,6 +1642,7 @@ static int rt1011_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
break;
default:
ret = -EINVAL;
+ goto _set_fmt_err_;
}
switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
@@ -1666,6 +1659,7 @@ static int rt1011_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
break;
default:
ret = -EINVAL;
+ goto _set_fmt_err_;
}
switch (dai->id) {
@@ -1683,6 +1677,7 @@ static int rt1011_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
ret = -EINVAL;
}
+_set_fmt_err_:
snd_soc_dapm_mutex_unlock(dapm);
return ret;
}
@@ -1778,7 +1773,8 @@ static int rt1011_set_component_pll(struct snd_soc_component *component,
ret = rl6231_pll_calc(freq_in, freq_out, &pll_code);
if (ret < 0) {
- dev_err(component->dev, "Unsupport input clock %d\n", freq_in);
+ dev_err(component->dev, "Unsupported input clock %d\n",
+ freq_in);
return ret;
}
@@ -1805,8 +1801,8 @@ static int rt1011_set_tdm_slot(struct snd_soc_dai *dai,
struct snd_soc_component *component = dai->component;
struct snd_soc_dapm_context *dapm =
snd_soc_component_get_dapm(component);
- unsigned int val = 0, tdm_en = 0;
- int ret = 0;
+ unsigned int val = 0, tdm_en = 0, rx_slotnum, tx_slotnum;
+ int ret = 0, first_bit, last_bit;
snd_soc_dapm_mutex_lock(dapm);
if (rx_mask || tx_mask)
@@ -1829,6 +1825,7 @@ static int rt1011_set_tdm_slot(struct snd_soc_dai *dai,
break;
default:
ret = -EINVAL;
+ goto _set_tdm_err_;
}
switch (slot_width) {
@@ -1848,22 +1845,153 @@ static int rt1011_set_tdm_slot(struct snd_soc_dai *dai,
break;
default:
ret = -EINVAL;
+ goto _set_tdm_err_;
+ }
+
+ /* Rx slot configuration */
+ rx_slotnum = hweight_long(rx_mask);
+ first_bit = find_next_bit((unsigned long *)&rx_mask, 32, 0);
+ if (rx_slotnum > 1 || rx_slotnum == 0) {
+ ret = -EINVAL;
+ dev_dbg(component->dev, "too many rx slots or zero slot\n");
+ goto _set_tdm_err_;
+ }
+
+ switch (first_bit) {
+ case 0:
+ case 2:
+ case 4:
+ case 6:
+ snd_soc_component_update_bits(component,
+ RT1011_CROSS_BQ_SET_1, RT1011_MONO_LR_SEL_MASK,
+ RT1011_MONO_L_CHANNEL);
+ snd_soc_component_update_bits(component,
+ RT1011_TDM1_SET_4,
+ RT1011_TDM_I2S_TX_L_DAC1_1_MASK |
+ RT1011_TDM_I2S_TX_R_DAC1_1_MASK,
+ (first_bit << RT1011_TDM_I2S_TX_L_DAC1_1_SFT) |
+ ((first_bit+1) << RT1011_TDM_I2S_TX_R_DAC1_1_SFT));
+ break;
+ case 1:
+ case 3:
+ case 5:
+ case 7:
+ snd_soc_component_update_bits(component,
+ RT1011_CROSS_BQ_SET_1, RT1011_MONO_LR_SEL_MASK,
+ RT1011_MONO_R_CHANNEL);
+ snd_soc_component_update_bits(component,
+ RT1011_TDM1_SET_4,
+ RT1011_TDM_I2S_TX_L_DAC1_1_MASK |
+ RT1011_TDM_I2S_TX_R_DAC1_1_MASK,
+ ((first_bit-1) << RT1011_TDM_I2S_TX_L_DAC1_1_SFT) |
+ (first_bit << RT1011_TDM_I2S_TX_R_DAC1_1_SFT));
+ break;
+ default:
+ ret = -EINVAL;
+ goto _set_tdm_err_;
+ }
+
+ /* Tx slot configuration */
+ tx_slotnum = hweight_long(tx_mask);
+ first_bit = find_next_bit((unsigned long *)&tx_mask, 32, 0);
+ last_bit = find_last_bit((unsigned long *)&tx_mask, 32);
+ if (tx_slotnum > 2 || (last_bit-first_bit) > 1) {
+ ret = -EINVAL;
+ dev_dbg(component->dev, "too many tx slots or tx slot location error\n");
+ goto _set_tdm_err_;
+ }
+
+ if (tx_slotnum == 1) {
+ snd_soc_component_update_bits(component, RT1011_TDM1_SET_2,
+ RT1011_TDM_I2S_DOCK_ADCDAT_LEN_1_MASK |
+ RT1011_TDM_ADCDAT1_DATA_LOCATION, first_bit);
+ switch (first_bit) {
+ case 1:
+ snd_soc_component_update_bits(component,
+ RT1011_TDM1_SET_3,
+ RT1011_TDM_I2S_RX_ADC1_1_MASK,
+ RT1011_TDM_I2S_RX_ADC1_1_LL);
+ break;
+ case 3:
+ snd_soc_component_update_bits(component,
+ RT1011_TDM1_SET_3,
+ RT1011_TDM_I2S_RX_ADC2_1_MASK,
+ RT1011_TDM_I2S_RX_ADC2_1_LL);
+ break;
+ case 5:
+ snd_soc_component_update_bits(component,
+ RT1011_TDM1_SET_3,
+ RT1011_TDM_I2S_RX_ADC3_1_MASK,
+ RT1011_TDM_I2S_RX_ADC3_1_LL);
+ break;
+ case 7:
+ snd_soc_component_update_bits(component,
+ RT1011_TDM1_SET_3,
+ RT1011_TDM_I2S_RX_ADC4_1_MASK,
+ RT1011_TDM_I2S_RX_ADC4_1_LL);
+ break;
+ case 0:
+ snd_soc_component_update_bits(component,
+ RT1011_TDM1_SET_3,
+ RT1011_TDM_I2S_RX_ADC1_1_MASK, 0);
+ break;
+ case 2:
+ snd_soc_component_update_bits(component,
+ RT1011_TDM1_SET_3,
+ RT1011_TDM_I2S_RX_ADC2_1_MASK, 0);
+ break;
+ case 4:
+ snd_soc_component_update_bits(component,
+ RT1011_TDM1_SET_3,
+ RT1011_TDM_I2S_RX_ADC3_1_MASK, 0);
+ break;
+ case 6:
+ snd_soc_component_update_bits(component,
+ RT1011_TDM1_SET_3,
+ RT1011_TDM_I2S_RX_ADC4_1_MASK, 0);
+ break;
+ default:
+ ret = -EINVAL;
+ dev_dbg(component->dev,
+ "tx slot location error\n");
+ goto _set_tdm_err_;
+ }
+ } else if (tx_slotnum == 2) {
+ switch (first_bit) {
+ case 0:
+ case 2:
+ case 4:
+ case 6:
+ snd_soc_component_update_bits(component,
+ RT1011_TDM1_SET_2,
+ RT1011_TDM_I2S_DOCK_ADCDAT_LEN_1_MASK |
+ RT1011_TDM_ADCDAT1_DATA_LOCATION,
+ RT1011_TDM_I2S_DOCK_ADCDAT_2CH | first_bit);
+ break;
+ default:
+ ret = -EINVAL;
+ dev_dbg(component->dev,
+ "tx slot location should be paired and start from slot0/2/4/6\n");
+ goto _set_tdm_err_;
+ }
}
snd_soc_component_update_bits(component, RT1011_TDM1_SET_1,
RT1011_I2S_CH_TX_MASK | RT1011_I2S_CH_RX_MASK |
- RT1011_I2S_CH_TX_LEN_MASK | RT1011_I2S_CH_RX_LEN_MASK, val);
+ RT1011_I2S_CH_TX_LEN_MASK | RT1011_I2S_CH_RX_LEN_MASK, val);
snd_soc_component_update_bits(component, RT1011_TDM2_SET_1,
RT1011_I2S_CH_TX_MASK | RT1011_I2S_CH_RX_MASK |
- RT1011_I2S_CH_TX_LEN_MASK | RT1011_I2S_CH_RX_LEN_MASK, val);
+ RT1011_I2S_CH_TX_LEN_MASK | RT1011_I2S_CH_RX_LEN_MASK, val);
snd_soc_component_update_bits(component, RT1011_TDM1_SET_2,
- RT1011_TDM_I2S_DOCK_EN_1_MASK, tdm_en);
+ RT1011_TDM_I2S_DOCK_EN_1_MASK, tdm_en);
snd_soc_component_update_bits(component, RT1011_TDM2_SET_2,
- RT1011_TDM_I2S_DOCK_EN_2_MASK, tdm_en);
- snd_soc_component_update_bits(component, RT1011_TDM_TOTAL_SET,
- RT1011_ADCDAT1_PIN_CONFIG | RT1011_ADCDAT2_PIN_CONFIG,
- RT1011_ADCDAT1_OUTPUT | RT1011_ADCDAT2_OUTPUT);
+ RT1011_TDM_I2S_DOCK_EN_2_MASK, tdm_en);
+ if (tx_slotnum)
+ snd_soc_component_update_bits(component, RT1011_TDM_TOTAL_SET,
+ RT1011_ADCDAT1_PIN_CONFIG | RT1011_ADCDAT2_PIN_CONFIG,
+ RT1011_ADCDAT1_OUTPUT | RT1011_ADCDAT2_OUTPUT);
+_set_tdm_err_:
snd_soc_dapm_mutex_unlock(dapm);
return ret;
}
@@ -1982,7 +2110,7 @@ static const struct snd_soc_component_driver soc_component_dev_rt1011 = {
.remove = rt1011_remove,
.suspend = rt1011_suspend,
.resume = rt1011_resume,
- .set_bias_level = rt1011_set_bias_level,
+ .set_bias_level = rt1011_set_bias_level,
.controls = rt1011_snd_controls,
.num_controls = ARRAY_SIZE(rt1011_snd_controls),
.dapm_widgets = rt1011_dapm_widgets,
@@ -1991,9 +2119,9 @@ static const struct snd_soc_component_driver soc_component_dev_rt1011 = {
.num_dapm_routes = ARRAY_SIZE(rt1011_dapm_routes),
.set_sysclk = rt1011_set_component_sysclk,
.set_pll = rt1011_set_component_pll,
- .use_pmdown_time = 1,
- .endianness = 1,
- .non_legacy_dai_naming = 1,
+ .use_pmdown_time = 1,
+ .endianness = 1,
+ .non_legacy_dai_naming = 1,
};
static const struct regmap_config rt1011_regmap = {
@@ -2095,17 +2223,17 @@ static int rt1011_calibrate(struct rt1011_priv *rt1011, unsigned char cali_flag)
dc_offset = value << 16;
regmap_read(rt1011->regmap, RT1011_EFUSE_ADC_OFFSET_15_0, &value);
dc_offset |= (value & 0xffff);
- dev_info(dev, "ADC offset=0x%x\n", dc_offset);
+ dev_info(dev, "ADC offset=0x%x\n", dc_offset);
regmap_read(rt1011->regmap, RT1011_EFUSE_DAC_OFFSET_G0_20_16, &value);
dc_offset = value << 16;
regmap_read(rt1011->regmap, RT1011_EFUSE_DAC_OFFSET_G0_15_0, &value);
dc_offset |= (value & 0xffff);
- dev_info(dev, "Gain0 offset=0x%x\n", dc_offset);
+ dev_info(dev, "Gain0 offset=0x%x\n", dc_offset);
regmap_read(rt1011->regmap, RT1011_EFUSE_DAC_OFFSET_G1_20_16, &value);
dc_offset = value << 16;
regmap_read(rt1011->regmap, RT1011_EFUSE_DAC_OFFSET_G1_15_0, &value);
dc_offset |= (value & 0xffff);
- dev_info(dev, "Gain1 offset=0x%x\n", dc_offset);
+ dev_info(dev, "Gain1 offset=0x%x\n", dc_offset);
if (cali_flag) {
@@ -2125,7 +2253,7 @@ static int rt1011_calibrate(struct rt1011_priv *rt1011, unsigned char cali_flag)
while (count < chk_cnt) {
msleep(100);
regmap_read(rt1011->regmap,
- RT1011_INIT_RECIPROCAL_SYN_24_16, &value);
+ RT1011_INIT_RECIPROCAL_SYN_24_16, &value);
r0[count%3] = value << 16;
regmap_read(rt1011->regmap,
RT1011_INIT_RECIPROCAL_SYN_15_0, &value);
@@ -2140,7 +2268,7 @@ static int rt1011_calibrate(struct rt1011_priv *rt1011, unsigned char cali_flag)
break;
}
if (count > chk_cnt) {
- dev_err(dev, "Calibrate R0 Failure\n");
+ dev_err(dev, "Calibrate R0 Failure\n");
ret = -EAGAIN;
} else {
format = 2147483648U; /* 2^24 * 128 */
@@ -2149,7 +2277,7 @@ static int rt1011_calibrate(struct rt1011_priv *rt1011, unsigned char cali_flag)
- (r0_integer * 100);
rt1011->r0_reg = r0[0];
rt1011->cali_done = 1;
- dev_info(dev, "r0 resistance about %d.%02d ohm, reg=0x%X\n",
+ dev_info(dev, "r0 resistance about %d.%02d ohm, reg=0x%X\n",
r0_integer, r0_factor, r0[0]);
}
}
@@ -2196,8 +2324,12 @@ static void rt1011_calibration_work(struct work_struct *work)
struct rt1011_priv *rt1011 =
container_of(work, struct rt1011_priv, cali_work);
struct snd_soc_component *component = rt1011->component;
+ unsigned int r0_integer, r0_factor, format;
- rt1011_calibrate(rt1011, 1);
+ if (rt1011->r0_calib)
+ rt1011_calibrate(rt1011, 0);
+ else
+ rt1011_calibrate(rt1011, 1);
/*
* This flag should reset after booting.
@@ -2208,6 +2340,40 @@ static void rt1011_calibration_work(struct work_struct *work)
/* initial */
rt1011_reg_init(component);
+
+ /* Apply temperature and calibration data from device property */
+ if (rt1011->temperature_calib <= 0xff &&
+ rt1011->temperature_calib > 0) {
+ snd_soc_component_update_bits(component,
+ RT1011_STP_INITIAL_RESISTANCE_TEMP, 0x3ff,
+ (rt1011->temperature_calib << 2));
+ }
+
+ if (rt1011->r0_calib) {
+ rt1011->r0_reg = rt1011->r0_calib;
+
+ format = 2147483648U; /* 2^24 * 128 */
+ r0_integer = format / rt1011->r0_reg / 128;
+ r0_factor = ((format / rt1011->r0_reg * 100) / 128)
+ - (r0_integer * 100);
+ dev_info(component->dev, "DP r0 resistance about %d.%02d ohm, reg=0x%X\n",
+ r0_integer, r0_factor, rt1011->r0_reg);
+
+ rt1011_r0_load(rt1011);
+ }
+}
+
+static int rt1011_parse_dp(struct rt1011_priv *rt1011, struct device *dev)
+{
+ device_property_read_u32(dev, "realtek,temperature_calib",
+ &rt1011->temperature_calib);
+ device_property_read_u32(dev, "realtek,r0_calib",
+ &rt1011->r0_calib);
+
+ dev_dbg(dev, "%s: r0_calib: 0x%x, temperature_calib: 0x%x",
+ __func__, rt1011->r0_calib, rt1011->temperature_calib);
+
+ return 0;
}
static int rt1011_i2c_probe(struct i2c_client *i2c,
@@ -2219,11 +2385,13 @@ static int rt1011_i2c_probe(struct i2c_client *i2c,
rt1011 = devm_kzalloc(&i2c->dev, sizeof(struct rt1011_priv),
GFP_KERNEL);
- if (rt1011 == NULL)
+ if (!rt1011)
return -ENOMEM;
i2c_set_clientdata(i2c, rt1011);
+ rt1011_parse_dp(rt1011, &i2c->dev);
+
rt1011->regmap = devm_regmap_init_i2c(i2c, &rt1011_regmap);
if (IS_ERR(rt1011->regmap)) {
ret = PTR_ERR(rt1011->regmap);
@@ -2254,7 +2422,6 @@ static void rt1011_i2c_shutdown(struct i2c_client *client)
rt1011_reset(rt1011->regmap);
}
-
static struct i2c_driver rt1011_i2c_driver = {
.driver = {
.name = "rt1011",
diff --git a/sound/soc/codecs/rt1011.h b/sound/soc/codecs/rt1011.h
index 2d65983f3d0f..68fadc15fa8c 100644
--- a/sound/soc/codecs/rt1011.h
+++ b/sound/soc/codecs/rt1011.h
@@ -460,6 +460,23 @@
#define RT1011_TDM_I2S_DOCK_EN_1_MASK (0x1 << 3)
#define RT1011_TDM_I2S_DOCK_EN_1_SFT 3
#define RT1011_TDM_I2S_DOCK_EN_1 (0x1 << 3)
+#define RT1011_TDM_ADCDAT1_DATA_LOCATION (0x7 << 0)
+
+/* TDM1 Setting-3 (0x0118) */
+#define RT1011_TDM_I2S_RX_ADC1_1_MASK (0x3 << 6)
+#define RT1011_TDM_I2S_RX_ADC2_1_MASK (0x3 << 4)
+#define RT1011_TDM_I2S_RX_ADC3_1_MASK (0x3 << 2)
+#define RT1011_TDM_I2S_RX_ADC4_1_MASK (0x3 << 0)
+#define RT1011_TDM_I2S_RX_ADC1_1_LL (0x2 << 6)
+#define RT1011_TDM_I2S_RX_ADC2_1_LL (0x2 << 4)
+#define RT1011_TDM_I2S_RX_ADC3_1_LL (0x2 << 2)
+#define RT1011_TDM_I2S_RX_ADC4_1_LL (0x2 << 0)
+
+/* TDM1 Setting-4 (0x011a) */
+#define RT1011_TDM_I2S_TX_L_DAC1_1_MASK (0x7 << 12)
+#define RT1011_TDM_I2S_TX_R_DAC1_1_MASK (0x7 << 8)
+#define RT1011_TDM_I2S_TX_L_DAC1_1_SFT 12
+#define RT1011_TDM_I2S_TX_R_DAC1_1_SFT 8
/* TDM2 Setting-2 (0x0120) */
#define RT1011_TDM_I2S_DOCK_ADCDAT_LEN_2_MASK (0x7 << 13)
@@ -585,6 +602,12 @@
#define RT1011_STP_T0_EN_BIT 6
#define RT1011_STP_T0_EN (0x1 << 6)
+/* Cross Biquad Setting-1 (0x0702) */
+#define RT1011_MONO_LR_SEL_MASK (0x3 << 5)
+#define RT1011_MONO_L_CHANNEL (0x0 << 5)
+#define RT1011_MONO_R_CHANNEL (0x1 << 5)
+#define RT1011_MONO_LR_MIX_CHANNEL (0x2 << 5)
+
/* ClassD Internal Setting-1 (0x1300) */
#define RT1011_DRIVER_READY_SPK (0x1 << 12)
#define RT1011_DRIVER_READY_SPK_BIT 12
@@ -667,6 +690,7 @@ struct rt1011_priv {
int bq_drc_set;
unsigned int r0_reg, cali_done;
+ unsigned int r0_calib, temperature_calib;
int recv_spk_mode;
};
diff --git a/sound/soc/codecs/rt5514-spi.c b/sound/soc/codecs/rt5514-spi.c
index 892ea406a69b..f1b7b947ecbd 100644
--- a/sound/soc/codecs/rt5514-spi.c
+++ b/sound/soc/codecs/rt5514-spi.c
@@ -201,26 +201,25 @@ static irqreturn_t rt5514_spi_irq(int irq, void *data)
}
/* PCM for streaming audio from the DSP buffer */
-static int rt5514_spi_pcm_open(struct snd_pcm_substream *substream)
+static int rt5514_spi_pcm_open(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
snd_soc_set_runtime_hwparams(substream, &rt5514_spi_pcm_hardware);
return 0;
}
-static int rt5514_spi_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *hw_params)
+static int rt5514_spi_hw_params(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *hw_params)
{
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_component *component = snd_soc_rtdcom_lookup(rtd, DRV_NAME);
struct rt5514_dsp *rt5514_dsp =
snd_soc_component_get_drvdata(component);
int ret;
u8 buf[8];
mutex_lock(&rt5514_dsp->dma_lock);
- ret = snd_pcm_lib_alloc_vmalloc_buffer(substream,
- params_buffer_bytes(hw_params));
+ ret = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params));
rt5514_dsp->substream = substream;
rt5514_dsp->dma_offset = 0;
@@ -234,10 +233,9 @@ static int rt5514_spi_hw_params(struct snd_pcm_substream *substream,
return ret;
}
-static int rt5514_spi_hw_free(struct snd_pcm_substream *substream)
+static int rt5514_spi_hw_free(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_component *component = snd_soc_rtdcom_lookup(rtd, DRV_NAME);
struct rt5514_dsp *rt5514_dsp =
snd_soc_component_get_drvdata(component);
@@ -247,28 +245,20 @@ static int rt5514_spi_hw_free(struct snd_pcm_substream *substream)
cancel_delayed_work_sync(&rt5514_dsp->copy_work);
- return snd_pcm_lib_free_vmalloc_buffer(substream);
+ return snd_pcm_lib_free_pages(substream);
}
static snd_pcm_uframes_t rt5514_spi_pcm_pointer(
+ struct snd_soc_component *component,
struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_component *component = snd_soc_rtdcom_lookup(rtd, DRV_NAME);
struct rt5514_dsp *rt5514_dsp =
snd_soc_component_get_drvdata(component);
return bytes_to_frames(runtime, rt5514_dsp->dma_offset);
}
-static const struct snd_pcm_ops rt5514_spi_pcm_ops = {
- .open = rt5514_spi_pcm_open,
- .hw_params = rt5514_spi_hw_params,
- .hw_free = rt5514_spi_hw_free,
- .pointer = rt5514_spi_pcm_pointer,
- .page = snd_pcm_lib_get_vmalloc_page,
-};
static int rt5514_spi_pcm_probe(struct snd_soc_component *component)
{
@@ -301,10 +291,22 @@ static int rt5514_spi_pcm_probe(struct snd_soc_component *component)
return 0;
}
+static int rt5514_spi_pcm_new(struct snd_soc_component *component,
+ struct snd_soc_pcm_runtime *rtd)
+{
+ snd_pcm_lib_preallocate_pages_for_all(rtd->pcm, SNDRV_DMA_TYPE_VMALLOC,
+ NULL, 0, 0);
+ return 0;
+}
+
static const struct snd_soc_component_driver rt5514_spi_component = {
- .name = DRV_NAME,
- .probe = rt5514_spi_pcm_probe,
- .ops = &rt5514_spi_pcm_ops,
+ .name = DRV_NAME,
+ .probe = rt5514_spi_pcm_probe,
+ .open = rt5514_spi_pcm_open,
+ .hw_params = rt5514_spi_hw_params,
+ .hw_free = rt5514_spi_hw_free,
+ .pointer = rt5514_spi_pcm_pointer,
+ .pcm_construct = rt5514_spi_pcm_new,
};
/**
diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c
index 1c06b3b9218c..92d67010aeed 100644
--- a/sound/soc/codecs/rt5645.c
+++ b/sound/soc/codecs/rt5645.c
@@ -3270,6 +3270,9 @@ static void rt5645_jack_detect_work(struct work_struct *work)
snd_soc_jack_report(rt5645->mic_jack,
report, SND_JACK_MICROPHONE);
return;
+ case 4:
+ val = snd_soc_component_read32(rt5645->component, RT5645_A_JD_CTRL1) & 0x0020;
+ break;
default: /* read rt5645 jd1_1 status */
val = snd_soc_component_read32(rt5645->component, RT5645_INT_IRQ_ST) & 0x1000;
break;
@@ -3603,7 +3606,7 @@ static const struct rt5645_platform_data intel_braswell_platform_data = {
static const struct rt5645_platform_data buddy_platform_data = {
.dmic1_data_pin = RT5645_DMIC_DATA_GPIO5,
.dmic2_data_pin = RT5645_DMIC_DATA_IN2P,
- .jd_mode = 3,
+ .jd_mode = 4,
.level_trigger_irq = true,
};
@@ -3636,6 +3639,12 @@ static const struct rt5645_platform_data lattepanda_board_platform_data = {
.inv_jd1_1 = true
};
+static const struct rt5645_platform_data kahlee_platform_data = {
+ .dmic1_data_pin = RT5645_DMIC_DATA_GPIO5,
+ .dmic2_data_pin = RT5645_DMIC_DATA_IN2P,
+ .jd_mode = 3,
+};
+
static const struct dmi_system_id dmi_platform_data[] = {
{
.ident = "Chrome Buddy",
@@ -3742,6 +3751,13 @@ static const struct dmi_system_id dmi_platform_data[] = {
},
.driver_data = (void *)&lattepanda_board_platform_data,
},
+ {
+ .ident = "Chrome Kahlee",
+ .matches = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "Kahlee"),
+ },
+ .driver_data = (void *)&kahlee_platform_data,
+ },
{ }
};
@@ -3999,6 +4015,7 @@ static int rt5645_i2c_probe(struct i2c_client *i2c,
RT5645_JD1_MODE_1);
break;
case 3:
+ case 4:
regmap_update_bits(rt5645->regmap, RT5645_A_JD_CTRL1,
RT5645_JD1_MODE_MASK,
RT5645_JD1_MODE_2);
diff --git a/sound/soc/codecs/rt5663.c b/sound/soc/codecs/rt5663.c
index 2943692f66ed..e6c1ec6c426e 100644
--- a/sound/soc/codecs/rt5663.c
+++ b/sound/soc/codecs/rt5663.c
@@ -3644,7 +3644,7 @@ static int rt5663_i2c_probe(struct i2c_client *i2c,
regmap_update_bits(rt5663->regmap, RT5663_PWR_ANLG_1,
RT5663_LDO1_DVO_MASK | RT5663_AMP_HP_MASK,
RT5663_LDO1_DVO_0_9V | RT5663_AMP_HP_3X);
- break;
+ break;
case CODEC_VER_0:
regmap_update_bits(rt5663->regmap, RT5663_DIG_MISC,
RT5663_DIG_GATE_CTRL_MASK, RT5663_DIG_GATE_CTRL_EN);
@@ -3663,7 +3663,7 @@ static int rt5663_i2c_probe(struct i2c_client *i2c,
regmap_update_bits(rt5663->regmap, RT5663_TDM_2,
RT5663_DATA_SWAP_ADCDAT1_MASK,
RT5663_DATA_SWAP_ADCDAT1_LL);
- break;
+ break;
default:
dev_err(&i2c->dev, "%s:Unknown codec type\n", __func__);
}
diff --git a/sound/soc/codecs/rt5677-spi.c b/sound/soc/codecs/rt5677-spi.c
index d681488f5312..7810b1d7de32 100644
--- a/sound/soc/codecs/rt5677-spi.c
+++ b/sound/soc/codecs/rt5677-spi.c
@@ -24,6 +24,9 @@
#include <linux/firmware.h>
#include <linux/acpi.h>
+#include <sound/soc.h>
+
+#include "rt5677.h"
#include "rt5677-spi.h"
#define DRV_NAME "rt5677spi"
@@ -45,9 +48,367 @@
#define RT5677_SPI_WRITE_16 0x1
#define RT5677_SPI_READ_16 0x0
+#define RT5677_BUF_BYTES_TOTAL 0x20000
+#define RT5677_MIC_BUF_ADDR 0x60030000
+#define RT5677_MODEL_ADDR 0x5FFC9800
+#define RT5677_MIC_BUF_BYTES ((u32)(RT5677_BUF_BYTES_TOTAL - \
+ sizeof(u32)))
+#define RT5677_MIC_BUF_FIRST_READ_SIZE 0x10000
+
static struct spi_device *g_spi;
static DEFINE_MUTEX(spi_mutex);
+struct rt5677_dsp {
+ struct device *dev;
+ struct delayed_work copy_work;
+ struct mutex dma_lock;
+ struct snd_pcm_substream *substream;
+ size_t dma_offset; /* zero-based offset into runtime->dma_area */
+ size_t avail_bytes; /* number of new bytes since last period */
+ u32 mic_read_offset; /* zero-based offset into DSP's mic buffer */
+ bool new_hotword; /* a new hotword is fired */
+};
+
+static const struct snd_pcm_hardware rt5677_spi_pcm_hardware = {
+ .info = SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_INTERLEAVED,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .period_bytes_min = PAGE_SIZE,
+ .period_bytes_max = RT5677_BUF_BYTES_TOTAL / 8,
+ .periods_min = 8,
+ .periods_max = 8,
+ .channels_min = 1,
+ .channels_max = 1,
+ .buffer_bytes_max = RT5677_BUF_BYTES_TOTAL,
+};
+
+static struct snd_soc_dai_driver rt5677_spi_dai = {
+ /* The DAI name "rt5677-dsp-cpu-dai" is not used. The actual DAI name
+ * registered with ASoC is the name of the device "spi-RT5677AA:00",
+ * because we only have one DAI. See snd_soc_register_dais().
+ */
+ .name = "rt5677-dsp-cpu-dai",
+ .id = 0,
+ .capture = {
+ .stream_name = "DSP Capture",
+ .channels_min = 1,
+ .channels_max = 1,
+ .rates = SNDRV_PCM_RATE_16000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ },
+};
+
+/* PCM for streaming audio from the DSP buffer */
+static int rt5677_spi_pcm_open(
+ struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
+{
+ snd_soc_set_runtime_hwparams(substream, &rt5677_spi_pcm_hardware);
+ return 0;
+}
+
+static int rt5677_spi_pcm_close(
+ struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_component *codec_component =
+ snd_soc_rtdcom_lookup(rtd, "rt5677");
+ struct rt5677_priv *rt5677 =
+ snd_soc_component_get_drvdata(codec_component);
+ struct rt5677_dsp *rt5677_dsp =
+ snd_soc_component_get_drvdata(component);
+
+ cancel_delayed_work_sync(&rt5677_dsp->copy_work);
+ rt5677->set_dsp_vad(codec_component, false);
+ return 0;
+}
+
+static int rt5677_spi_hw_params(
+ struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *hw_params)
+{
+ struct rt5677_dsp *rt5677_dsp =
+ snd_soc_component_get_drvdata(component);
+ int ret;
+
+ mutex_lock(&rt5677_dsp->dma_lock);
+ ret = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params));
+ rt5677_dsp->substream = substream;
+ mutex_unlock(&rt5677_dsp->dma_lock);
+
+ return ret;
+}
+
+static int rt5677_spi_hw_free(
+ struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
+{
+ struct rt5677_dsp *rt5677_dsp =
+ snd_soc_component_get_drvdata(component);
+
+ mutex_lock(&rt5677_dsp->dma_lock);
+ rt5677_dsp->substream = NULL;
+ mutex_unlock(&rt5677_dsp->dma_lock);
+
+ return snd_pcm_lib_free_pages(substream);
+}
+
+static int rt5677_spi_prepare(
+ struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_component *rt5677_component =
+ snd_soc_rtdcom_lookup(rtd, "rt5677");
+ struct rt5677_priv *rt5677 =
+ snd_soc_component_get_drvdata(rt5677_component);
+ struct rt5677_dsp *rt5677_dsp =
+ snd_soc_component_get_drvdata(component);
+
+ rt5677->set_dsp_vad(rt5677_component, true);
+ rt5677_dsp->dma_offset = 0;
+ rt5677_dsp->avail_bytes = 0;
+ return 0;
+}
+
+static snd_pcm_uframes_t rt5677_spi_pcm_pointer(
+ struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct rt5677_dsp *rt5677_dsp =
+ snd_soc_component_get_drvdata(component);
+
+ return bytes_to_frames(runtime, rt5677_dsp->dma_offset);
+}
+
+static int rt5677_spi_mic_write_offset(u32 *mic_write_offset)
+{
+ int ret;
+ /* Grab the first 4 bytes that hold the write pointer on the
+ * dsp, and check to make sure that it points somewhere inside the
+ * buffer.
+ */
+ ret = rt5677_spi_read(RT5677_MIC_BUF_ADDR, mic_write_offset,
+ sizeof(u32));
+ if (ret)
+ return ret;
+ /* Adjust the offset so that it's zero-based */
+ *mic_write_offset = *mic_write_offset - sizeof(u32);
+ return *mic_write_offset < RT5677_MIC_BUF_BYTES ? 0 : -EFAULT;
+}
+
+/*
+ * Copy one contiguous block of audio samples from the DSP mic buffer to the
+ * dma_area of the pcm runtime. The receiving buffer may wrap around.
+ * @begin: start offset of the block to copy, in bytes.
+ * @end: offset of the first byte after the block to copy, must be greater
+ * than or equal to begin.
+ *
+ * Return: Zero if successful, or a negative error code on failure.
+ */
+static int rt5677_spi_copy_block(struct rt5677_dsp *rt5677_dsp,
+ u32 begin, u32 end)
+{
+ struct snd_pcm_runtime *runtime = rt5677_dsp->substream->runtime;
+ size_t bytes_per_frame = frames_to_bytes(runtime, 1);
+ size_t first_chunk_len, second_chunk_len;
+ int ret;
+
+ if (begin > end || runtime->dma_bytes < 2 * bytes_per_frame) {
+ dev_err(rt5677_dsp->dev,
+ "Invalid copy from (%u, %u), dma_area size %zu\n",
+ begin, end, runtime->dma_bytes);
+ return -EINVAL;
+ }
+
+ /* The block to copy is empty */
+ if (begin == end)
+ return 0;
+
+ /* If the incoming chunk is too big for the receiving buffer, only the
+ * last "receiving buffer size - one frame" bytes are copied.
+ */
+ if (end - begin > runtime->dma_bytes - bytes_per_frame)
+ begin = end - (runtime->dma_bytes - bytes_per_frame);
+
+ /* May need to split to two chunks, calculate the size of each */
+ first_chunk_len = end - begin;
+ second_chunk_len = 0;
+ if (rt5677_dsp->dma_offset + first_chunk_len > runtime->dma_bytes) {
+ /* Receiving buffer wrapped around */
+ second_chunk_len = first_chunk_len;
+ first_chunk_len = runtime->dma_bytes - rt5677_dsp->dma_offset;
+ second_chunk_len -= first_chunk_len;
+ }
+
+ /* Copy first chunk */
+ ret = rt5677_spi_read(RT5677_MIC_BUF_ADDR + sizeof(u32) + begin,
+ runtime->dma_area + rt5677_dsp->dma_offset,
+ first_chunk_len);
+ if (ret)
+ return ret;
+ rt5677_dsp->dma_offset += first_chunk_len;
+ if (rt5677_dsp->dma_offset == runtime->dma_bytes)
+ rt5677_dsp->dma_offset = 0;
+
+ /* Copy second chunk */
+ if (second_chunk_len) {
+ ret = rt5677_spi_read(RT5677_MIC_BUF_ADDR + sizeof(u32) +
+ begin + first_chunk_len, runtime->dma_area,
+ second_chunk_len);
+ if (!ret)
+ rt5677_dsp->dma_offset = second_chunk_len;
+ }
+ return ret;
+}
+
+/*
+ * Copy a given amount of audio samples from the DSP mic buffer starting at
+ * mic_read_offset, to the dma_area of the pcm runtime. The source buffer may
+ * wrap around. mic_read_offset is updated after successful copy.
+ * @amount: amount of samples to copy, in bytes.
+ *
+ * Return: Zero if successful, or a negative error code on failure.
+ */
+static int rt5677_spi_copy(struct rt5677_dsp *rt5677_dsp, u32 amount)
+{
+ int ret = 0;
+ u32 target;
+
+ if (amount == 0)
+ return ret;
+
+ target = rt5677_dsp->mic_read_offset + amount;
+ /* Copy the first chunk in DSP's mic buffer */
+ ret |= rt5677_spi_copy_block(rt5677_dsp, rt5677_dsp->mic_read_offset,
+ min(target, RT5677_MIC_BUF_BYTES));
+
+ if (target >= RT5677_MIC_BUF_BYTES) {
+ /* Wrap around, copy the second chunk */
+ target -= RT5677_MIC_BUF_BYTES;
+ ret |= rt5677_spi_copy_block(rt5677_dsp, 0, target);
+ }
+
+ if (!ret)
+ rt5677_dsp->mic_read_offset = target;
+ return ret;
+}
+
+/*
+ * A delayed work that streams audio samples from the DSP mic buffer to the
+ * dma_area of the pcm runtime via SPI.
+ */
+static void rt5677_spi_copy_work(struct work_struct *work)
+{
+ struct rt5677_dsp *rt5677_dsp =
+ container_of(work, struct rt5677_dsp, copy_work.work);
+ struct snd_pcm_runtime *runtime;
+ u32 mic_write_offset;
+ size_t new_bytes, copy_bytes, period_bytes;
+ unsigned int delay;
+ int ret = 0;
+
+ /* Ensure runtime->dma_area buffer does not go away while copying. */
+ mutex_lock(&rt5677_dsp->dma_lock);
+ if (!rt5677_dsp->substream) {
+ dev_err(rt5677_dsp->dev, "No pcm substream\n");
+ goto done;
+ }
+
+ runtime = rt5677_dsp->substream->runtime;
+
+ if (rt5677_spi_mic_write_offset(&mic_write_offset)) {
+ dev_err(rt5677_dsp->dev, "No mic_write_offset\n");
+ goto done;
+ }
+
+ /* If this is the first time that we've asked for streaming data after
+ * a hotword is fired, we should start reading from the previous 2
+ * seconds of audio from wherever the mic_write_offset is currently.
+ */
+ if (rt5677_dsp->new_hotword) {
+ rt5677_dsp->new_hotword = false;
+ /* See if buffer wraparound happens */
+ if (mic_write_offset < RT5677_MIC_BUF_FIRST_READ_SIZE)
+ rt5677_dsp->mic_read_offset = RT5677_MIC_BUF_BYTES -
+ (RT5677_MIC_BUF_FIRST_READ_SIZE -
+ mic_write_offset);
+ else
+ rt5677_dsp->mic_read_offset = mic_write_offset -
+ RT5677_MIC_BUF_FIRST_READ_SIZE;
+ }
+
+ /* Calculate the amount of new samples in bytes */
+ if (rt5677_dsp->mic_read_offset <= mic_write_offset)
+ new_bytes = mic_write_offset - rt5677_dsp->mic_read_offset;
+ else
+ new_bytes = RT5677_MIC_BUF_BYTES + mic_write_offset
+ - rt5677_dsp->mic_read_offset;
+
+ /* Copy all new samples from DSP mic buffer, one period at a time */
+ period_bytes = snd_pcm_lib_period_bytes(rt5677_dsp->substream);
+ while (new_bytes) {
+ copy_bytes = min(new_bytes, period_bytes
+ - rt5677_dsp->avail_bytes);
+ ret = rt5677_spi_copy(rt5677_dsp, copy_bytes);
+ if (ret) {
+ dev_err(rt5677_dsp->dev, "Copy failed %d\n", ret);
+ goto done;
+ }
+ rt5677_dsp->avail_bytes += copy_bytes;
+ if (rt5677_dsp->avail_bytes >= period_bytes) {
+ snd_pcm_period_elapsed(rt5677_dsp->substream);
+ rt5677_dsp->avail_bytes = 0;
+ }
+ new_bytes -= copy_bytes;
+ }
+
+ delay = bytes_to_frames(runtime, period_bytes) / (runtime->rate / 1000);
+ schedule_delayed_work(&rt5677_dsp->copy_work, msecs_to_jiffies(delay));
+done:
+ mutex_unlock(&rt5677_dsp->dma_lock);
+}
+
+static int rt5677_spi_pcm_new(struct snd_soc_component *component,
+ struct snd_soc_pcm_runtime *rtd)
+{
+ snd_pcm_lib_preallocate_pages_for_all(rtd->pcm, SNDRV_DMA_TYPE_VMALLOC,
+ NULL, 0, 0);
+ return 0;
+}
+
+static int rt5677_spi_pcm_probe(struct snd_soc_component *component)
+{
+ struct rt5677_dsp *rt5677_dsp;
+
+ rt5677_dsp = devm_kzalloc(component->dev, sizeof(*rt5677_dsp),
+ GFP_KERNEL);
+ if (!rt5677_dsp)
+ return -ENOMEM;
+ rt5677_dsp->dev = &g_spi->dev;
+ mutex_init(&rt5677_dsp->dma_lock);
+ INIT_DELAYED_WORK(&rt5677_dsp->copy_work, rt5677_spi_copy_work);
+
+ snd_soc_component_set_drvdata(component, rt5677_dsp);
+ return 0;
+}
+
+static const struct snd_soc_component_driver rt5677_spi_dai_component = {
+ .name = DRV_NAME,
+ .probe = rt5677_spi_pcm_probe,
+ .open = rt5677_spi_pcm_open,
+ .close = rt5677_spi_pcm_close,
+ .hw_params = rt5677_spi_hw_params,
+ .hw_free = rt5677_spi_hw_free,
+ .prepare = rt5677_spi_prepare,
+ .pointer = rt5677_spi_pcm_pointer,
+ .pcm_construct = rt5677_spi_pcm_new,
+};
+
/* Select a suitable transfer command for the next transfer to ensure
* the transfer address is always naturally aligned while minimizing
* the total number of transfers required.
@@ -218,9 +579,45 @@ int rt5677_spi_write_firmware(u32 addr, const struct firmware *fw)
}
EXPORT_SYMBOL_GPL(rt5677_spi_write_firmware);
+void rt5677_spi_hotword_detected(void)
+{
+ struct rt5677_dsp *rt5677_dsp;
+
+ if (!g_spi)
+ return;
+
+ rt5677_dsp = dev_get_drvdata(&g_spi->dev);
+ if (!rt5677_dsp) {
+ dev_err(&g_spi->dev, "Can't get rt5677_dsp\n");
+ return;
+ }
+
+ mutex_lock(&rt5677_dsp->dma_lock);
+ dev_info(rt5677_dsp->dev, "Hotword detected\n");
+ rt5677_dsp->new_hotword = true;
+ mutex_unlock(&rt5677_dsp->dma_lock);
+
+ schedule_delayed_work(&rt5677_dsp->copy_work, 0);
+}
+EXPORT_SYMBOL_GPL(rt5677_spi_hotword_detected);
+
static int rt5677_spi_probe(struct spi_device *spi)
{
+ int ret;
+
g_spi = spi;
+
+ ret = snd_soc_register_component(&spi->dev, &rt5677_spi_dai_component,
+ &rt5677_spi_dai, 1);
+ if (ret < 0)
+ dev_err(&spi->dev, "Failed to register component.\n");
+
+ return ret;
+}
+
+static int rt5677_spi_remove(struct spi_device *spi)
+{
+ snd_soc_unregister_component(&spi->dev);
return 0;
}
@@ -236,6 +633,7 @@ static struct spi_driver rt5677_spi_driver = {
.acpi_match_table = ACPI_PTR(rt5677_spi_acpi_id),
},
.probe = rt5677_spi_probe,
+ .remove = rt5677_spi_remove,
};
module_spi_driver(rt5677_spi_driver);
diff --git a/sound/soc/codecs/rt5677-spi.h b/sound/soc/codecs/rt5677-spi.h
index 6ba3369dc235..3af36ec928e9 100644
--- a/sound/soc/codecs/rt5677-spi.h
+++ b/sound/soc/codecs/rt5677-spi.h
@@ -12,5 +12,6 @@
int rt5677_spi_read(u32 addr, void *rxbuf, size_t len);
int rt5677_spi_write(u32 addr, const void *txbuf, size_t len);
int rt5677_spi_write_firmware(u32 addr, const struct firmware *fw);
+void rt5677_spi_hotword_detected(void);
#endif /* __RT5677_SPI_H__ */
diff --git a/sound/soc/codecs/rt5677.c b/sound/soc/codecs/rt5677.c
index 315a3d39bc09..e9a051a50ab2 100644
--- a/sound/soc/codecs/rt5677.c
+++ b/sound/soc/codecs/rt5677.c
@@ -38,6 +38,10 @@
#define RT5677_DEVICE_ID 0x6327
+/* Register controlling boot vector */
+#define RT5677_DSP_BOOT_VECTOR 0x1801f090
+#define RT5677_MODEL_ADDR 0x5FFC9800
+
#define RT5677_PR_RANGE_BASE (0xff + 1)
#define RT5677_PR_SPACING 0x100
@@ -298,6 +302,7 @@ static bool rt5677_volatile_register(struct device *dev, unsigned int reg)
case RT5677_I2C_MASTER_CTRL7:
case RT5677_I2C_MASTER_CTRL8:
case RT5677_HAP_GENE_CTRL2:
+ case RT5677_PWR_ANLG2: /* Modified by DSP firmware */
case RT5677_PWR_DSP_ST:
case RT5677_PRIV_DATA:
case RT5677_ASRC_22:
@@ -308,6 +313,8 @@ static bool rt5677_volatile_register(struct device *dev, unsigned int reg)
case RT5677_IRQ_CTRL1:
case RT5677_IRQ_CTRL2:
case RT5677_GPIO_ST:
+ case RT5677_GPIO_CTRL1: /* Modified by DSP firmware */
+ case RT5677_GPIO_CTRL2: /* Modified by DSP firmware */
case RT5677_DSP_INB1_SRC_CTRL4:
case RT5677_DSP_INB2_SRC_CTRL4:
case RT5677_DSP_INB3_SRC_CTRL4:
@@ -686,10 +693,8 @@ static int rt5677_dsp_mode_i2c_read(
return ret;
}
-static void rt5677_set_dsp_mode(struct snd_soc_component *component, bool on)
+static void rt5677_set_dsp_mode(struct rt5677_priv *rt5677, bool on)
{
- struct rt5677_priv *rt5677 = snd_soc_component_get_drvdata(component);
-
if (on) {
regmap_update_bits(rt5677->regmap, RT5677_PWR_DSP1,
RT5677_PWR_DSP, RT5677_PWR_DSP);
@@ -701,86 +706,259 @@ static void rt5677_set_dsp_mode(struct snd_soc_component *component, bool on)
}
}
+static unsigned int rt5677_set_vad_source(struct rt5677_priv *rt5677)
+{
+ struct snd_soc_dapm_context *dapm =
+ snd_soc_component_get_dapm(rt5677->component);
+ /* Force dapm to sync before we enable the
+ * DSP to prevent write corruption
+ */
+ snd_soc_dapm_sync(dapm);
+
+ /* DMIC1 power = enabled
+ * DMIC CLK = 256 * fs / 12
+ */
+ regmap_update_bits(rt5677->regmap, RT5677_DMIC_CTRL1,
+ RT5677_DMIC_CLK_MASK, 5 << RT5677_DMIC_CLK_SFT);
+
+ /* I2S pre divide 2 = /6 (clk_sys2) */
+ regmap_update_bits(rt5677->regmap, RT5677_CLK_TREE_CTRL1,
+ RT5677_I2S_PD2_MASK, RT5677_I2S_PD2_6);
+
+ /* DSP Clock = MCLK1 (bypassed PLL2) */
+ regmap_write(rt5677->regmap, RT5677_GLB_CLK2,
+ RT5677_DSP_CLK_SRC_BYPASS);
+
+ /* SAD Threshold1 */
+ regmap_write(rt5677->regmap, RT5677_VAD_CTRL2, 0x013f);
+ /* SAD Threshold2 */
+ regmap_write(rt5677->regmap, RT5677_VAD_CTRL3, 0x0ae5);
+ /* SAD Sample Rate Converter = Up 6 (8K to 48K)
+ * SAD Output Sample Rate = Same as I2S
+ * SAD Threshold3
+ */
+ regmap_update_bits(rt5677->regmap, RT5677_VAD_CTRL4,
+ RT5677_VAD_OUT_SRC_RATE_MASK | RT5677_VAD_OUT_SRC_MASK |
+ RT5677_VAD_LV_DIFF_MASK, 0x7f << RT5677_VAD_LV_DIFF_SFT);
+ /* Minimum frame level within a pre-determined duration = 32 frames
+ * Bypass ADPCM Encoder/Decoder = Bypass ADPCM
+ * Automatic Push Data to SAD Buffer Once SAD Flag is triggered = enable
+ * SAD Buffer Over-Writing = enable
+ * SAD Buffer Pop Mode Control = disable
+ * SAD Buffer Push Mode Control = enable
+ * SAD Detector Control = enable
+ * SAD Function Control = enable
+ * SAD Function Reset = normal
+ */
+ regmap_write(rt5677->regmap, RT5677_VAD_CTRL1,
+ RT5677_VAD_FUNC_RESET | RT5677_VAD_FUNC_ENABLE |
+ RT5677_VAD_DET_ENABLE | RT5677_VAD_BUF_PUSH |
+ RT5677_VAD_BUF_OW | RT5677_VAD_FG2ENC |
+ RT5677_VAD_ADPCM_BYPASS | 1 << RT5677_VAD_MIN_DUR_SFT);
+
+ /* VAD/SAD is not routed to the IRQ output (i.e. MX-BE[14] = 0), but it
+ * is routed to DSP_IRQ_0, so DSP firmware may use it to sleep and save
+ * power. See ALC5677 datasheet section 9.17 "GPIO, Interrupt and Jack
+ * Detection" for more info.
+ */
+
+ /* Private register, no doc */
+ regmap_update_bits(rt5677->regmap, RT5677_PR_BASE + RT5677_BIAS_CUR4,
+ 0x0f00, 0x0100);
+
+ /* LDO2 output = 1.2V
+ * LDO1 output = 1.2V (LDO_IN = 1.8V)
+ */
+ regmap_update_bits(rt5677->regmap, RT5677_PWR_ANLG1,
+ RT5677_LDO1_SEL_MASK | RT5677_LDO2_SEL_MASK,
+ 5 << RT5677_LDO1_SEL_SFT | 5 << RT5677_LDO2_SEL_SFT);
+
+ /* Codec core power = power on
+ * LDO1 power = power on
+ */
+ regmap_update_bits(rt5677->regmap, RT5677_PWR_ANLG2,
+ RT5677_PWR_CORE | RT5677_PWR_LDO1,
+ RT5677_PWR_CORE | RT5677_PWR_LDO1);
+
+ /* Isolation for DCVDD4 = normal (set during probe)
+ * Isolation for DCVDD2 = normal (set during probe)
+ * Isolation for DSP = normal
+ * Isolation for Band 0~7 = disable
+ * Isolation for InBound 4~10 and OutBound 4~10 = disable
+ */
+ regmap_write(rt5677->regmap, RT5677_PWR_DSP2,
+ RT5677_PWR_CORE_ISO | RT5677_PWR_DSP_ISO |
+ RT5677_PWR_SR7_ISO | RT5677_PWR_SR6_ISO |
+ RT5677_PWR_SR5_ISO | RT5677_PWR_SR4_ISO |
+ RT5677_PWR_SR3_ISO | RT5677_PWR_SR2_ISO |
+ RT5677_PWR_SR1_ISO | RT5677_PWR_SR0_ISO |
+ RT5677_PWR_MLT_ISO);
+
+ /* System Band 0~7 = power on
+ * InBound 4~10 and OutBound 4~10 = power on
+ * DSP = power on
+ * DSP CPU = stop (will be set to "run" after firmware loaded)
+ */
+ regmap_write(rt5677->regmap, RT5677_PWR_DSP1,
+ RT5677_PWR_SR7 | RT5677_PWR_SR6 |
+ RT5677_PWR_SR5 | RT5677_PWR_SR4 |
+ RT5677_PWR_SR3 | RT5677_PWR_SR2 |
+ RT5677_PWR_SR1 | RT5677_PWR_SR0 |
+ RT5677_PWR_MLT | RT5677_PWR_DSP |
+ RT5677_PWR_DSP_CPU);
+
+ return 0;
+}
+
+static int rt5677_parse_and_load_dsp(struct rt5677_priv *rt5677, const u8 *buf,
+ unsigned int len)
+{
+ struct snd_soc_component *component = rt5677->component;
+ Elf32_Ehdr *elf_hdr;
+ Elf32_Phdr *pr_hdr;
+ Elf32_Half i;
+ int ret = 0;
+
+ if (!buf || (len < sizeof(Elf32_Ehdr)))
+ return -ENOMEM;
+
+ elf_hdr = (Elf32_Ehdr *)buf;
+#ifndef EM_XTENSA
+#define EM_XTENSA 94
+#endif
+ if (strncmp(elf_hdr->e_ident, ELFMAG, sizeof(ELFMAG) - 1))
+ dev_err(component->dev, "Wrong ELF header prefix\n");
+ if (elf_hdr->e_ehsize != sizeof(Elf32_Ehdr))
+ dev_err(component->dev, "Wrong Elf header size\n");
+ if (elf_hdr->e_machine != EM_XTENSA)
+ dev_err(component->dev, "Wrong DSP code file\n");
+
+ if (len < elf_hdr->e_phoff)
+ return -ENOMEM;
+ pr_hdr = (Elf32_Phdr *)(buf + elf_hdr->e_phoff);
+ for (i = 0; i < elf_hdr->e_phnum; i++) {
+ /* TODO: handle p_memsz != p_filesz */
+ if (pr_hdr->p_paddr && pr_hdr->p_filesz) {
+ dev_info(component->dev, "Load 0x%x bytes to 0x%x\n",
+ pr_hdr->p_filesz, pr_hdr->p_paddr);
+
+ ret = rt5677_spi_write(pr_hdr->p_paddr,
+ buf + pr_hdr->p_offset,
+ pr_hdr->p_filesz);
+ if (ret)
+ dev_err(component->dev, "Load firmware failed %d\n",
+ ret);
+ }
+ pr_hdr++;
+ }
+ return ret;
+}
+
+static int rt5677_load_dsp_from_file(struct rt5677_priv *rt5677)
+{
+ const struct firmware *fwp;
+ struct device *dev = rt5677->component->dev;
+ int ret = 0;
+
+ /* Load dsp firmware from rt5677_elf_vad file */
+ ret = request_firmware(&fwp, "rt5677_elf_vad", dev);
+ if (ret) {
+ dev_err(dev, "Request rt5677_elf_vad failed %d\n", ret);
+ return ret;
+ }
+ dev_info(dev, "Requested rt5677_elf_vad (%zu)\n", fwp->size);
+
+ ret = rt5677_parse_and_load_dsp(rt5677, fwp->data, fwp->size);
+ release_firmware(fwp);
+ return ret;
+}
+
static int rt5677_set_dsp_vad(struct snd_soc_component *component, bool on)
{
struct rt5677_priv *rt5677 = snd_soc_component_get_drvdata(component);
- static bool activity;
- int ret;
+ rt5677->dsp_vad_en_request = on;
+ rt5677->dsp_vad_en = on;
if (!IS_ENABLED(CONFIG_SND_SOC_RT5677_SPI))
return -ENXIO;
- if (on && !activity) {
+ schedule_delayed_work(&rt5677->dsp_work, 0);
+ return 0;
+}
+
+static void rt5677_dsp_work(struct work_struct *work)
+{
+ struct rt5677_priv *rt5677 =
+ container_of(work, struct rt5677_priv, dsp_work.work);
+ static bool activity;
+ bool enable = rt5677->dsp_vad_en;
+ int i, val;
+
+
+ dev_info(rt5677->component->dev, "DSP VAD: enable=%d, activity=%d\n",
+ enable, activity);
+
+ if (enable && !activity) {
activity = true;
- regcache_cache_only(rt5677->regmap, false);
- regcache_cache_bypass(rt5677->regmap, true);
+ /* Before a hotword is detected, GPIO1 pin is configured as IRQ
+ * output so that jack detect works. When a hotword is detected,
+ * the DSP firmware configures the GPIO1 pin as GPIO1 and
+ * drives a 1. rt5677_irq() is called after a rising edge on
+ * the GPIO1 pin, due to either jack detect event or hotword
+ * event, or both. All possible events are checked and handled
+ * in rt5677_irq() where GPIO1 pin is configured back to IRQ
+ * output if a hotword is detected.
+ */
- regmap_update_bits(rt5677->regmap, RT5677_DIG_MISC, 0x1, 0x1);
- regmap_update_bits(rt5677->regmap,
- RT5677_PR_BASE + RT5677_BIAS_CUR4, 0x0f00, 0x0f00);
- regmap_update_bits(rt5677->regmap, RT5677_PWR_ANLG1,
- RT5677_LDO1_SEL_MASK, 0x0);
- regmap_update_bits(rt5677->regmap, RT5677_PWR_ANLG2,
- RT5677_PWR_LDO1, RT5677_PWR_LDO1);
- switch (rt5677->type) {
- case RT5677:
- regmap_update_bits(rt5677->regmap, RT5677_GLB_CLK1,
- RT5677_MCLK_SRC_MASK, RT5677_MCLK2_SRC);
- regmap_update_bits(rt5677->regmap, RT5677_GLB_CLK2,
- RT5677_PLL2_PR_SRC_MASK |
- RT5677_DSP_CLK_SRC_MASK,
- RT5677_PLL2_PR_SRC_MCLK2 |
- RT5677_DSP_CLK_SRC_BYPASS);
- break;
- case RT5676:
- regmap_update_bits(rt5677->regmap, RT5677_GLB_CLK2,
- RT5677_DSP_CLK_SRC_MASK,
- RT5677_DSP_CLK_SRC_BYPASS);
- break;
- default:
- break;
+ rt5677_set_vad_source(rt5677);
+ rt5677_set_dsp_mode(rt5677, true);
+
+#define RT5677_BOOT_RETRY 20
+ for (i = 0; i < RT5677_BOOT_RETRY; i++) {
+ regmap_read(rt5677->regmap, RT5677_PWR_DSP_ST, &val);
+ if (val == 0x3ff)
+ break;
+ udelay(500);
}
- regmap_write(rt5677->regmap, RT5677_PWR_DSP2, 0x07ff);
- regmap_write(rt5677->regmap, RT5677_PWR_DSP1, 0x07fd);
- rt5677_set_dsp_mode(component, true);
-
- ret = request_firmware(&rt5677->fw1, RT5677_FIRMWARE1,
- component->dev);
- if (ret == 0) {
- rt5677_spi_write_firmware(0x50000000, rt5677->fw1);
- release_firmware(rt5677->fw1);
+ if (i == RT5677_BOOT_RETRY && val != 0x3ff) {
+ dev_err(rt5677->component->dev, "DSP Boot Timed Out!");
+ return;
}
- ret = request_firmware(&rt5677->fw2, RT5677_FIRMWARE2,
- component->dev);
- if (ret == 0) {
- rt5677_spi_write_firmware(0x60000000, rt5677->fw2);
- release_firmware(rt5677->fw2);
- }
+ /* Boot the firmware from IRAM instead of SRAM0. */
+ rt5677_dsp_mode_i2c_write_addr(rt5677, RT5677_DSP_BOOT_VECTOR,
+ 0x0009, 0x0003);
+ rt5677_dsp_mode_i2c_write_addr(rt5677, RT5677_DSP_BOOT_VECTOR,
+ 0x0019, 0x0003);
+ rt5677_dsp_mode_i2c_write_addr(rt5677, RT5677_DSP_BOOT_VECTOR,
+ 0x0009, 0x0003);
- regmap_update_bits(rt5677->regmap, RT5677_PWR_DSP1, 0x1, 0x0);
+ rt5677_load_dsp_from_file(rt5677);
- regcache_cache_bypass(rt5677->regmap, false);
- regcache_cache_only(rt5677->regmap, true);
- } else if (!on && activity) {
+ /* Set DSP CPU to Run */
+ regmap_update_bits(rt5677->regmap, RT5677_PWR_DSP1,
+ RT5677_PWR_DSP_CPU, 0x0);
+ } else if (!enable && activity) {
activity = false;
- regcache_cache_only(rt5677->regmap, false);
- regcache_cache_bypass(rt5677->regmap, true);
+ /* Don't turn off the DSP while handling irqs */
+ mutex_lock(&rt5677->irq_lock);
+ /* Set DSP CPU to Stop */
+ regmap_update_bits(rt5677->regmap, RT5677_PWR_DSP1,
+ RT5677_PWR_DSP_CPU, RT5677_PWR_DSP_CPU);
- regmap_update_bits(rt5677->regmap, RT5677_PWR_DSP1, 0x1, 0x1);
- rt5677_set_dsp_mode(component, false);
- regmap_write(rt5677->regmap, RT5677_PWR_DSP1, 0x0001);
+ rt5677_set_dsp_mode(rt5677, false);
- regmap_write(rt5677->regmap, RT5677_RESET, 0x10ec);
+ /* Disable and clear VAD interrupt */
+ regmap_write(rt5677->regmap, RT5677_VAD_CTRL1, 0x2184);
- regcache_cache_bypass(rt5677->regmap, false);
- regcache_mark_dirty(rt5677->regmap);
- regcache_sync(rt5677->regmap);
- }
+ /* Set GPIO1 pin back to be IRQ output for jack detect */
+ regmap_update_bits(rt5677->regmap, RT5677_GPIO_CTRL1,
+ RT5677_GPIO1_PIN_MASK, RT5677_GPIO1_PIN_IRQ);
- return 0;
+ mutex_unlock(&rt5677->irq_lock);
+ }
}
static const DECLARE_TLV_DB_SCALE(dac_vol_tlv, -6525, 75, 0);
@@ -805,7 +983,7 @@ static int rt5677_dsp_vad_get(struct snd_kcontrol *kcontrol,
struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
struct rt5677_priv *rt5677 = snd_soc_component_get_drvdata(component);
- ucontrol->value.integer.value[0] = rt5677->dsp_vad_en;
+ ucontrol->value.integer.value[0] = rt5677->dsp_vad_en_request;
return 0;
}
@@ -814,12 +992,8 @@ static int rt5677_dsp_vad_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
- struct rt5677_priv *rt5677 = snd_soc_component_get_drvdata(component);
-
- rt5677->dsp_vad_en = !!ucontrol->value.integer.value[0];
- if (snd_soc_component_get_bias_level(component) == SND_SOC_BIAS_OFF)
- rt5677_set_dsp_vad(component, rt5677->dsp_vad_en);
+ rt5677_set_dsp_vad(component, !!ucontrol->value.integer.value[0]);
return 0;
}
@@ -3010,6 +3184,7 @@ static const struct snd_soc_dapm_widget rt5677_dapm_widgets[] = {
SND_SOC_DAPM_AIF_OUT("AIF4TX", "AIF4 Capture", 0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_AIF_IN("SLBRX", "SLIMBus Playback", 0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_AIF_OUT("SLBTX", "SLIMBus Capture", 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("DSPTX", "DSP Buffer", 0, SND_SOC_NOPM, 0, 0),
/* Sidetone Mux */
SND_SOC_DAPM_MUX("Sidetone Mux", SND_SOC_NOPM, 0, 0,
@@ -3544,11 +3719,24 @@ static const struct snd_soc_dapm_route rt5677_dapm_routes[] = {
{ "SLBTX", NULL, "SLB ADC3 Mux" },
{ "SLBTX", NULL, "SLB ADC4 Mux" },
+ { "DSPTX", NULL, "IB01 Bypass Mux" },
+
{ "IB01 Mux", "IF1 DAC 01", "IF1 DAC01" },
{ "IB01 Mux", "IF2 DAC 01", "IF2 DAC01" },
{ "IB01 Mux", "SLB DAC 01", "SLB DAC01" },
{ "IB01 Mux", "STO1 ADC MIX", "Stereo1 ADC MIX" },
- { "IB01 Mux", "VAD ADC/DAC1 FS", "DAC1 FS" },
+ /* The IB01 Mux controls the source for InBound0 and InBound1.
+ * When the mux option "VAD ADC/DAC1 FS" is selected, "VAD ADC" goes to
+ * InBound0 and "DAC1 FS" goes to InBound1. "VAD ADC" is used for
+ * hotwording. "DAC1 FS" is not used currently.
+ *
+ * Creating a common widget node for "VAD ADC" + "DAC1 FS" and
+ * connecting the common widget to IB01 Mux causes the issue where
+ * there is an active path going from system playback -> "DAC1 FS" ->
+ * IB01 Mux -> DSP Buffer -> hotword stream. This wrong path confuses
+ * DAPM. Therefore "DAC1 FS" is ignored for now.
+ */
+ { "IB01 Mux", "VAD ADC/DAC1 FS", "VAD ADC Mux" },
{ "IB01 Bypass Mux", "Bypass", "IB01 Mux" },
{ "IB01 Bypass Mux", "Pass SRC", "IB01 Mux" },
@@ -4457,14 +4645,15 @@ static int rt5677_set_bias_level(struct snd_soc_component *component,
enum snd_soc_bias_level level)
{
struct rt5677_priv *rt5677 = snd_soc_component_get_drvdata(component);
+ enum snd_soc_bias_level prev_bias =
+ snd_soc_component_get_bias_level(component);
switch (level) {
case SND_SOC_BIAS_ON:
break;
case SND_SOC_BIAS_PREPARE:
- if (snd_soc_component_get_bias_level(component) == SND_SOC_BIAS_STANDBY) {
- rt5677_set_dsp_vad(component, false);
+ if (prev_bias == SND_SOC_BIAS_STANDBY) {
regmap_update_bits(rt5677->regmap, RT5677_PWR_ANLG1,
RT5677_LDO1_SEL_MASK | RT5677_LDO2_SEL_MASK,
@@ -4488,9 +4677,25 @@ static int rt5677_set_bias_level(struct snd_soc_component *component,
break;
case SND_SOC_BIAS_STANDBY:
+ if (prev_bias == SND_SOC_BIAS_OFF &&
+ rt5677->dsp_vad_en_request) {
+ /* Re-enable the DSP if it was turned off at suspend */
+ rt5677->dsp_vad_en = true;
+ /* The delay is to wait for MCLK */
+ schedule_delayed_work(&rt5677->dsp_work,
+ msecs_to_jiffies(1000));
+ }
break;
case SND_SOC_BIAS_OFF:
+ flush_delayed_work(&rt5677->dsp_work);
+ if (rt5677->is_dsp_mode) {
+ /* Turn off the DSP before suspend */
+ rt5677->dsp_vad_en = false;
+ schedule_delayed_work(&rt5677->dsp_work, 0);
+ flush_delayed_work(&rt5677->dsp_work);
+ }
+
regmap_update_bits(rt5677->regmap, RT5677_DIG_MISC, 0x1, 0x0);
regmap_write(rt5677->regmap, RT5677_PWR_DIG1, 0x0000);
regmap_write(rt5677->regmap, RT5677_PWR_ANLG1,
@@ -4740,6 +4945,8 @@ static void rt5677_remove(struct snd_soc_component *component)
{
struct rt5677_priv *rt5677 = snd_soc_component_get_drvdata(component);
+ cancel_delayed_work_sync(&rt5677->dsp_work);
+
regmap_write(rt5677->regmap, RT5677_RESET, 0x10ec);
gpiod_set_value_cansleep(rt5677->pow_ldo2, 0);
gpiod_set_value_cansleep(rt5677->reset_pin, 1);
@@ -4750,6 +4957,11 @@ static int rt5677_suspend(struct snd_soc_component *component)
{
struct rt5677_priv *rt5677 = snd_soc_component_get_drvdata(component);
+ if (rt5677->irq) {
+ cancel_delayed_work_sync(&rt5677->resume_irq_check);
+ disable_irq(rt5677->irq);
+ }
+
if (!rt5677->dsp_vad_en) {
regcache_cache_only(rt5677->regmap, true);
regcache_mark_dirty(rt5677->regmap);
@@ -4778,6 +4990,11 @@ static int rt5677_resume(struct snd_soc_component *component)
regcache_sync(rt5677->regmap);
}
+ if (rt5677->irq) {
+ enable_irq(rt5677->irq);
+ schedule_delayed_work(&rt5677->resume_irq_check, 0);
+ }
+
return 0;
}
#else
@@ -4842,6 +5059,11 @@ static const struct snd_soc_dai_ops rt5677_aif_dai_ops = {
.set_tdm_slot = rt5677_set_tdm_slot,
};
+static const struct snd_soc_dai_ops rt5677_dsp_dai_ops = {
+ .set_sysclk = rt5677_set_dai_sysclk,
+ .set_pll = rt5677_set_dai_pll,
+};
+
static struct snd_soc_dai_driver rt5677_dai[] = {
{
.name = "rt5677-aif1",
@@ -4938,6 +5160,18 @@ static struct snd_soc_dai_driver rt5677_dai[] = {
},
.ops = &rt5677_aif_dai_ops,
},
+ {
+ .name = "rt5677-dspbuffer",
+ .id = RT5677_DSPBUFF,
+ .capture = {
+ .stream_name = "DSP Buffer",
+ .channels_min = 1,
+ .channels_max = 1,
+ .rates = SNDRV_PCM_RATE_16000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ },
+ .ops = &rt5677_dsp_dai_ops,
+ },
};
static const struct snd_soc_component_driver soc_component_dev_rt5677 = {
@@ -5073,6 +5307,28 @@ static const struct rt5677_irq_desc rt5677_irq_descs[] = {
},
};
+static bool rt5677_check_hotword(struct rt5677_priv *rt5677)
+{
+ int reg_gpio;
+
+ if (!rt5677->is_dsp_mode)
+ return false;
+
+ if (regmap_read(rt5677->regmap, RT5677_GPIO_CTRL1, &reg_gpio))
+ return false;
+
+ /* Firmware sets GPIO1 pin to be GPIO1 after hotword is detected */
+ if ((reg_gpio & RT5677_GPIO1_PIN_MASK) == RT5677_GPIO1_PIN_IRQ)
+ return false;
+
+ /* Set GPIO1 pin back to be IRQ output for jack detect */
+ regmap_update_bits(rt5677->regmap, RT5677_GPIO_CTRL1,
+ RT5677_GPIO1_PIN_MASK, RT5677_GPIO1_PIN_IRQ);
+
+ rt5677_spi_hotword_detected();
+ return true;
+}
+
static irqreturn_t rt5677_irq(int unused, void *data)
{
struct rt5677_priv *rt5677 = data;
@@ -5118,7 +5374,13 @@ static irqreturn_t rt5677_irq(int unused, void *data)
reg_irq ^= rt5677_irq_descs[i].polarity_mask;
}
}
- if (!irq_fired)
+
+ /* Exit the loop only when we know for sure that GPIO1 pin
+ * was low at some point since irq_lock was acquired. Any event
+ * after that point creates a rising edge that triggers another
+ * call to rt5677_irq().
+ */
+ if (!irq_fired && !rt5677_check_hotword(rt5677))
goto exit;
ret = regmap_write(rt5677->regmap, RT5677_IRQ_CTRL1, reg_irq);
@@ -5129,6 +5391,7 @@ static irqreturn_t rt5677_irq(int unused, void *data)
}
}
exit:
+ WARN_ON_ONCE(loop == 20);
mutex_unlock(&rt5677->irq_lock);
if (irq_fired)
return IRQ_HANDLED;
@@ -5136,6 +5399,39 @@ exit:
return IRQ_NONE;
}
+static void rt5677_resume_irq_check(struct work_struct *work)
+{
+ int i, virq;
+ struct rt5677_priv *rt5677 =
+ container_of(work, struct rt5677_priv, resume_irq_check.work);
+
+ /* This is needed to check and clear the interrupt status register
+ * at resume. If the headset is plugged/unplugged when the device is
+ * fully suspended, there won't be a rising edge at resume to trigger
+ * the interrupt. Without this, we miss the next unplug/plug event.
+ */
+ rt5677_irq(0, rt5677);
+
+ /* Call all enabled jack detect irq handlers again. This is needed in
+ * addition to the above check for a corner case caused by jack gpio
+ * debounce. After codec irq is disabled at suspend, the delayed work
+ * scheduled by soc-jack may run and read wrong jack gpio values, since
+ * the regmap is in cache only mode. At resume, there is no irq because
+ * rt5677_irq has already ran and cleared the irq status at suspend.
+ * Without this explicit check, unplug the headset right after suspend
+ * starts, then after resume the headset is still shown as plugged in.
+ */
+ mutex_lock(&rt5677->irq_lock);
+ for (i = 0; i < RT5677_IRQ_NUM; i++) {
+ if (rt5677->irq_en & rt5677_irq_descs[i].enable_mask) {
+ virq = irq_find_mapping(rt5677->domain, i);
+ if (virq)
+ handle_nested_irq(virq);
+ }
+ }
+ mutex_unlock(&rt5677->irq_lock);
+}
+
static void rt5677_irq_bus_lock(struct irq_data *data)
{
struct rt5677_priv *rt5677 = irq_data_get_irq_chip_data(data);
@@ -5211,6 +5507,7 @@ static int rt5677_init_irq(struct i2c_client *i2c)
}
mutex_init(&rt5677->irq_lock);
+ INIT_DELAYED_WORK(&rt5677->resume_irq_check, rt5677_resume_irq_check);
/*
* Select RC as the debounce clock so that GPIO works even when
@@ -5256,6 +5553,8 @@ static int rt5677_init_irq(struct i2c_client *i2c)
if (ret)
dev_err(&i2c->dev, "Failed to request IRQ: %d\n", ret);
+ rt5677->irq = i2c->irq;
+
return ret;
}
@@ -5271,6 +5570,8 @@ static int rt5677_i2c_probe(struct i2c_client *i2c)
return -ENOMEM;
rt5677->dev = &i2c->dev;
+ rt5677->set_dsp_vad = rt5677_set_dsp_vad;
+ INIT_DELAYED_WORK(&rt5677->dsp_work, rt5677_dsp_work);
i2c_set_clientdata(i2c, rt5677);
if (i2c->dev.of_node) {
diff --git a/sound/soc/codecs/rt5677.h b/sound/soc/codecs/rt5677.h
index 213f4b8ca269..944ae02aafc2 100644
--- a/sound/soc/codecs/rt5677.h
+++ b/sound/soc/codecs/rt5677.h
@@ -1336,6 +1336,8 @@
#define RT5677_PLL_M_SFT 12
#define RT5677_PLL_M_BP (0x1 << 11)
#define RT5677_PLL_M_BP_SFT 11
+#define RT5677_PLL_UPDATE_PLL1 (0x1 << 1)
+#define RT5677_PLL_UPDATE_PLL1_SFT 1
/* Global Clock Control 1 (0x80) */
#define RT5677_SCLK_SRC_MASK (0x3 << 14)
@@ -1730,6 +1732,7 @@ enum {
RT5677_AIF4,
RT5677_AIF5,
RT5677_AIFS,
+ RT5677_DSPBUFF,
};
enum {
@@ -1845,14 +1848,20 @@ struct rt5677_priv {
#ifdef CONFIG_GPIOLIB
struct gpio_chip gpio_chip;
#endif
- bool dsp_vad_en;
+ bool dsp_vad_en_request; /* DSP VAD enable/disable request */
+ bool dsp_vad_en; /* dsp_work parameter */
bool is_dsp_mode;
bool is_vref_slow;
+ struct delayed_work dsp_work;
/* Interrupt handling */
struct irq_domain *domain;
struct mutex irq_lock;
unsigned int irq_en;
+ struct delayed_work resume_irq_check;
+ int irq;
+
+ int (*set_dsp_vad)(struct snd_soc_component *component, bool on);
};
int rt5677_sel_asrc_clk_src(struct snd_soc_component *component,
diff --git a/sound/soc/codecs/rt5682.c b/sound/soc/codecs/rt5682.c
index c50b75ce82e0..b1713fffa3eb 100644
--- a/sound/soc/codecs/rt5682.c
+++ b/sound/soc/codecs/rt5682.c
@@ -44,6 +44,7 @@ static const struct rt5682_platform_data i2s_default_platform_data = {
.dmic1_data_pin = RT5682_DMIC1_DATA_GPIO2,
.dmic1_clk_pin = RT5682_DMIC1_CLK_GPIO3,
.jd_src = RT5682_JD1,
+ .btndet_delay = 16,
};
struct rt5682_priv {
@@ -1002,6 +1003,7 @@ static int rt5682_set_jack_detect(struct snd_soc_component *component,
RT5682_JD1_EN_MASK, RT5682_JD1_DIS);
regmap_update_bits(rt5682->regmap, RT5682_RC_CLK_CTRL,
RT5682_POW_JDH | RT5682_POW_JDL, 0);
+ cancel_delayed_work_sync(&rt5682->jack_detect_work);
return 0;
}
@@ -1026,6 +1028,18 @@ static int rt5682_set_jack_detect(struct snd_soc_component *component,
regmap_update_bits(rt5682->regmap, RT5682_IRQ_CTRL_2,
RT5682_JD1_EN_MASK | RT5682_JD1_POL_MASK,
RT5682_JD1_EN | RT5682_JD1_POL_NOR);
+ regmap_update_bits(rt5682->regmap, RT5682_4BTN_IL_CMD_4,
+ 0x7f7f, (rt5682->pdata.btndet_delay << 8 |
+ rt5682->pdata.btndet_delay));
+ regmap_update_bits(rt5682->regmap, RT5682_4BTN_IL_CMD_5,
+ 0x7f7f, (rt5682->pdata.btndet_delay << 8 |
+ rt5682->pdata.btndet_delay));
+ regmap_update_bits(rt5682->regmap, RT5682_4BTN_IL_CMD_6,
+ 0x7f7f, (rt5682->pdata.btndet_delay << 8 |
+ rt5682->pdata.btndet_delay));
+ regmap_update_bits(rt5682->regmap, RT5682_4BTN_IL_CMD_7,
+ 0x7f7f, (rt5682->pdata.btndet_delay << 8 |
+ rt5682->pdata.btndet_delay));
mod_delayed_work(system_power_efficient_wq,
&rt5682->jack_detect_work, msecs_to_jiffies(250));
break;
@@ -1450,28 +1464,6 @@ static const struct snd_kcontrol_new hpor_switch =
SOC_DAPM_SINGLE_AUTODISABLE("Switch", RT5682_HP_CTRL_1,
RT5682_R_MUTE_SFT, 1, 1);
-static int rt5682_charge_pump_event(struct snd_soc_dapm_widget *w,
- struct snd_kcontrol *kcontrol, int event)
-{
- struct snd_soc_component *component =
- snd_soc_dapm_to_component(w->dapm);
-
- switch (event) {
- case SND_SOC_DAPM_PRE_PMU:
- snd_soc_component_update_bits(component,
- RT5682_HP_CHARGE_PUMP_1, RT5682_PM_HP_MASK, RT5682_PM_HP_HV);
- break;
- case SND_SOC_DAPM_POST_PMD:
- snd_soc_component_update_bits(component,
- RT5682_HP_CHARGE_PUMP_1, RT5682_PM_HP_MASK, RT5682_PM_HP_LV);
- break;
- default:
- return 0;
- }
-
- return 0;
-}
-
static int rt5682_hp_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
@@ -1755,8 +1747,7 @@ static const struct snd_soc_dapm_widget rt5682_dapm_widgets[] = {
SND_SOC_DAPM_SUPPLY("HP Amp R", RT5682_PWR_ANLG_1,
RT5682_PWR_HA_R_BIT, 0, NULL, 0),
SND_SOC_DAPM_SUPPLY_S("Charge Pump", 1, RT5682_DEPOP_1,
- RT5682_PUMP_EN_SFT, 0, rt5682_charge_pump_event,
- SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+ RT5682_PUMP_EN_SFT, 0, NULL, 0),
SND_SOC_DAPM_SUPPLY_S("Capless", 2, RT5682_DEPOP_1,
RT5682_CAPLESS_EN_SFT, 0, NULL, 0),
@@ -2467,6 +2458,8 @@ static int rt5682_parse_dt(struct rt5682_priv *rt5682, struct device *dev)
&rt5682->pdata.dmic1_clk_pin);
device_property_read_u32(dev, "realtek,jd-src",
&rt5682->pdata.jd_src);
+ device_property_read_u32(dev, "realtek,btndet-delay",
+ &rt5682->pdata.btndet_delay);
rt5682->pdata.ldo1_en = of_get_named_gpio(dev->of_node,
"realtek,ldo1-en-gpios", 0);
@@ -2654,6 +2647,8 @@ static int rt5682_i2c_probe(struct i2c_client *i2c,
RT5682_HPA_CP_BIAS_CTRL_MASK, RT5682_HPA_CP_BIAS_3UA);
regmap_update_bits(rt5682->regmap, RT5682_CHARGE_PUMP_1,
RT5682_CP_CLK_HP_MASK, RT5682_CP_CLK_HP_300KHZ);
+ regmap_update_bits(rt5682->regmap, RT5682_HP_CHARGE_PUMP_1,
+ RT5682_PM_HP_MASK, RT5682_PM_HP_HV);
INIT_DELAYED_WORK(&rt5682->jack_detect_work,
rt5682_jack_detect_handler);
diff --git a/sound/soc/codecs/tas2562.c b/sound/soc/codecs/tas2562.c
new file mode 100644
index 000000000000..729acd874c48
--- /dev/null
+++ b/sound/soc/codecs/tas2562.c
@@ -0,0 +1,590 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Driver for the Texas Instruments TAS2562 CODEC
+// Copyright (C) 2019 Texas Instruments Inc.
+
+
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/device.h>
+#include <linux/i2c.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/gpio/consumer.h>
+#include <linux/regulator/consumer.h>
+#include <linux/delay.h>
+
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/tlv.h>
+
+#include "tas2562.h"
+
+#define TAS2562_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE |\
+ SNDRV_PCM_FORMAT_S32_LE)
+
+struct tas2562_data {
+ struct snd_soc_component *component;
+ struct gpio_desc *sdz_gpio;
+ struct regmap *regmap;
+ struct device *dev;
+ struct i2c_client *client;
+ int v_sense_slot;
+ int i_sense_slot;
+};
+
+static int tas2562_set_bias_level(struct snd_soc_component *component,
+ enum snd_soc_bias_level level)
+{
+ struct tas2562_data *tas2562 =
+ snd_soc_component_get_drvdata(component);
+
+ switch (level) {
+ case SND_SOC_BIAS_ON:
+ snd_soc_component_update_bits(component,
+ TAS2562_PWR_CTRL,
+ TAS2562_MODE_MASK, TAS2562_ACTIVE);
+ break;
+ case SND_SOC_BIAS_STANDBY:
+ case SND_SOC_BIAS_PREPARE:
+ snd_soc_component_update_bits(component,
+ TAS2562_PWR_CTRL,
+ TAS2562_MODE_MASK, TAS2562_MUTE);
+ break;
+ case SND_SOC_BIAS_OFF:
+ snd_soc_component_update_bits(component,
+ TAS2562_PWR_CTRL,
+ TAS2562_MODE_MASK, TAS2562_SHUTDOWN);
+ break;
+
+ default:
+ dev_err(tas2562->dev,
+ "wrong power level setting %d\n", level);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int tas2562_set_samplerate(struct tas2562_data *tas2562, int samplerate)
+{
+ int samp_rate;
+ int ramp_rate;
+
+ switch (samplerate) {
+ case 7350:
+ ramp_rate = TAS2562_TDM_CFG0_RAMPRATE_44_1;
+ samp_rate = TAS2562_TDM_CFG0_SAMPRATE_7305_8KHZ;
+ break;
+ case 8000:
+ ramp_rate = 0;
+ samp_rate = TAS2562_TDM_CFG0_SAMPRATE_7305_8KHZ;
+ break;
+ case 14700:
+ ramp_rate = TAS2562_TDM_CFG0_RAMPRATE_44_1;
+ samp_rate = TAS2562_TDM_CFG0_SAMPRATE_14_7_16KHZ;
+ break;
+ case 16000:
+ ramp_rate = 0;
+ samp_rate = TAS2562_TDM_CFG0_SAMPRATE_14_7_16KHZ;
+ break;
+ case 22050:
+ ramp_rate = TAS2562_TDM_CFG0_RAMPRATE_44_1;
+ samp_rate = TAS2562_TDM_CFG0_SAMPRATE_22_05_24KHZ;
+ break;
+ case 24000:
+ ramp_rate = 0;
+ samp_rate = TAS2562_TDM_CFG0_SAMPRATE_22_05_24KHZ;
+ break;
+ case 29400:
+ ramp_rate = TAS2562_TDM_CFG0_RAMPRATE_44_1;
+ samp_rate = TAS2562_TDM_CFG0_SAMPRATE_29_4_32KHZ;
+ break;
+ case 32000:
+ ramp_rate = 0;
+ samp_rate = TAS2562_TDM_CFG0_SAMPRATE_29_4_32KHZ;
+ break;
+ case 44100:
+ ramp_rate = TAS2562_TDM_CFG0_RAMPRATE_44_1;
+ samp_rate = TAS2562_TDM_CFG0_SAMPRATE_44_1_48KHZ;
+ break;
+ case 48000:
+ ramp_rate = 0;
+ samp_rate = TAS2562_TDM_CFG0_SAMPRATE_44_1_48KHZ;
+ break;
+ case 88200:
+ ramp_rate = TAS2562_TDM_CFG0_RAMPRATE_44_1;
+ samp_rate = TAS2562_TDM_CFG0_SAMPRATE_88_2_96KHZ;
+ break;
+ case 96000:
+ ramp_rate = 0;
+ samp_rate = TAS2562_TDM_CFG0_SAMPRATE_88_2_96KHZ;
+ break;
+ case 176400:
+ ramp_rate = TAS2562_TDM_CFG0_RAMPRATE_44_1;
+ samp_rate = TAS2562_TDM_CFG0_SAMPRATE_176_4_192KHZ;
+ break;
+ case 192000:
+ ramp_rate = 0;
+ samp_rate = TAS2562_TDM_CFG0_SAMPRATE_176_4_192KHZ;
+ break;
+ default:
+ dev_info(tas2562->dev, "%s, unsupported sample rate, %d\n",
+ __func__, samplerate);
+ return -EINVAL;
+ }
+
+ snd_soc_component_update_bits(tas2562->component, TAS2562_TDM_CFG0,
+ TAS2562_TDM_CFG0_RAMPRATE_MASK, ramp_rate);
+ snd_soc_component_update_bits(tas2562->component, TAS2562_TDM_CFG0,
+ TAS2562_TDM_CFG0_SAMPRATE_MASK, samp_rate);
+
+ return 0;
+}
+
+static int tas2562_set_dai_tdm_slot(struct snd_soc_dai *dai,
+ unsigned int tx_mask, unsigned int rx_mask,
+ int slots, int slot_width)
+{
+ struct snd_soc_component *component = dai->component;
+ struct tas2562_data *tas2562 = snd_soc_component_get_drvdata(component);
+ int ret = 0;
+
+ switch (slot_width) {
+ case 16:
+ ret = snd_soc_component_update_bits(component,
+ TAS2562_TDM_CFG2,
+ TAS2562_TDM_CFG2_RXLEN_MASK,
+ TAS2562_TDM_CFG2_RXLEN_16B);
+ break;
+ case 24:
+ ret = snd_soc_component_update_bits(component,
+ TAS2562_TDM_CFG2,
+ TAS2562_TDM_CFG2_RXLEN_MASK,
+ TAS2562_TDM_CFG2_RXLEN_24B);
+ break;
+ case 32:
+ ret = snd_soc_component_update_bits(component,
+ TAS2562_TDM_CFG2,
+ TAS2562_TDM_CFG2_RXLEN_MASK,
+ TAS2562_TDM_CFG2_RXLEN_32B);
+ break;
+
+ case 0:
+ /* Do not change slot width */
+ break;
+ default:
+ dev_err(tas2562->dev, "slot width not supported");
+ ret = -EINVAL;
+ }
+
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int tas2562_set_bitwidth(struct tas2562_data *tas2562, int bitwidth)
+{
+ int ret;
+
+ switch (bitwidth) {
+ case SNDRV_PCM_FORMAT_S16_LE:
+ snd_soc_component_update_bits(tas2562->component,
+ TAS2562_TDM_CFG2,
+ TAS2562_TDM_CFG2_RXWLEN_MASK,
+ TAS2562_TDM_CFG2_RXWLEN_16B);
+ tas2562->v_sense_slot = tas2562->i_sense_slot + 2;
+ break;
+ case SNDRV_PCM_FORMAT_S24_LE:
+ snd_soc_component_update_bits(tas2562->component,
+ TAS2562_TDM_CFG2,
+ TAS2562_TDM_CFG2_RXWLEN_MASK,
+ TAS2562_TDM_CFG2_RXWLEN_24B);
+ tas2562->v_sense_slot = tas2562->i_sense_slot + 4;
+ break;
+ case SNDRV_PCM_FORMAT_S32_LE:
+ snd_soc_component_update_bits(tas2562->component,
+ TAS2562_TDM_CFG2,
+ TAS2562_TDM_CFG2_RXWLEN_MASK,
+ TAS2562_TDM_CFG2_RXWLEN_32B);
+ tas2562->v_sense_slot = tas2562->i_sense_slot + 4;
+ break;
+
+ default:
+ dev_info(tas2562->dev, "Not supported params format\n");
+ }
+
+ ret = snd_soc_component_update_bits(tas2562->component,
+ TAS2562_TDM_CFG5,
+ TAS2562_TDM_CFG5_VSNS_EN | TAS2562_TDM_CFG5_VSNS_SLOT_MASK,
+ TAS2562_TDM_CFG5_VSNS_EN | tas2562->v_sense_slot);
+ if (ret < 0)
+ return ret;
+
+ ret = snd_soc_component_update_bits(tas2562->component,
+ TAS2562_TDM_CFG6,
+ TAS2562_TDM_CFG6_ISNS_EN | TAS2562_TDM_CFG6_ISNS_SLOT_MASK,
+ TAS2562_TDM_CFG6_ISNS_EN | tas2562->i_sense_slot);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int tas2562_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_component *component = dai->component;
+ struct tas2562_data *tas2562 = snd_soc_component_get_drvdata(component);
+ int ret;
+
+ ret = tas2562_set_bitwidth(tas2562, params_format(params));
+ if (ret) {
+ dev_err(tas2562->dev, "set bitwidth failed, %d\n", ret);
+ return ret;
+ }
+
+ ret = tas2562_set_samplerate(tas2562, params_rate(params));
+ if (ret)
+ dev_err(tas2562->dev, "set bitwidth failed, %d\n", ret);
+
+ return ret;
+}
+
+static int tas2562_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+{
+ struct snd_soc_component *component = dai->component;
+ struct tas2562_data *tas2562 = snd_soc_component_get_drvdata(component);
+ u8 tdm_rx_start_slot = 0, asi_cfg_1 = 0;
+ int ret;
+
+ switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+ case SND_SOC_DAIFMT_NB_NF:
+ asi_cfg_1 = 0;
+ break;
+ case SND_SOC_DAIFMT_IB_NF:
+ asi_cfg_1 |= TAS2562_TDM_CFG1_RX_FALLING;
+ break;
+ default:
+ dev_err(tas2562->dev, "ASI format Inverse is not found\n");
+ return -EINVAL;
+ }
+
+ ret = snd_soc_component_update_bits(component, TAS2562_TDM_CFG1,
+ TAS2562_TDM_CFG1_RX_EDGE_MASK,
+ asi_cfg_1);
+ if (ret < 0) {
+ dev_err(tas2562->dev, "Failed to set RX edge\n");
+ return ret;
+ }
+
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case (SND_SOC_DAIFMT_I2S):
+ case (SND_SOC_DAIFMT_DSP_A):
+ case (SND_SOC_DAIFMT_DSP_B):
+ tdm_rx_start_slot = BIT(1);
+ break;
+ case (SND_SOC_DAIFMT_LEFT_J):
+ tdm_rx_start_slot = 0;
+ break;
+ default:
+ dev_err(tas2562->dev, "DAI Format is not found, fmt=0x%x\n",
+ fmt);
+ ret = -EINVAL;
+ break;
+ }
+
+ ret = snd_soc_component_update_bits(component, TAS2562_TDM_CFG1,
+ TAS2562_TDM_CFG1_RX_OFFSET_MASK,
+ tdm_rx_start_slot);
+
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int tas2562_mute(struct snd_soc_dai *dai, int mute)
+{
+ struct snd_soc_component *component = dai->component;
+
+ return snd_soc_component_update_bits(component, TAS2562_PWR_CTRL,
+ TAS2562_MODE_MASK,
+ mute ? TAS2562_MUTE : 0);
+}
+
+static int tas2562_codec_probe(struct snd_soc_component *component)
+{
+ struct tas2562_data *tas2562 = snd_soc_component_get_drvdata(component);
+ int ret;
+
+ tas2562->component = component;
+
+ if (tas2562->sdz_gpio)
+ gpiod_set_value_cansleep(tas2562->sdz_gpio, 1);
+
+ ret = snd_soc_component_update_bits(component, TAS2562_PWR_CTRL,
+ TAS2562_MODE_MASK, TAS2562_MUTE);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int tas2562_suspend(struct snd_soc_component *component)
+{
+ struct tas2562_data *tas2562 = snd_soc_component_get_drvdata(component);
+
+ regcache_cache_only(tas2562->regmap, true);
+ regcache_mark_dirty(tas2562->regmap);
+
+ if (tas2562->sdz_gpio)
+ gpiod_set_value_cansleep(tas2562->sdz_gpio, 0);
+
+ return 0;
+}
+
+static int tas2562_resume(struct snd_soc_component *component)
+{
+ struct tas2562_data *tas2562 = snd_soc_component_get_drvdata(component);
+
+ if (tas2562->sdz_gpio)
+ gpiod_set_value_cansleep(tas2562->sdz_gpio, 1);
+
+ regcache_cache_only(tas2562->regmap, false);
+
+ return regcache_sync(tas2562->regmap);
+}
+#else
+#define tas2562_suspend NULL
+#define tas2562_resume NULL
+#endif
+
+static const char * const tas2562_ASI1_src[] = {
+ "I2C offset", "Left", "Right", "LeftRightDiv2",
+};
+
+static SOC_ENUM_SINGLE_DECL(tas2562_ASI1_src_enum, TAS2562_TDM_CFG2, 4,
+ tas2562_ASI1_src);
+
+static const struct snd_kcontrol_new tas2562_asi1_mux =
+ SOC_DAPM_ENUM("ASI1 Source", tas2562_ASI1_src_enum);
+
+static int tas2562_dac_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_component *component =
+ snd_soc_dapm_to_component(w->dapm);
+ struct tas2562_data *tas2562 = snd_soc_component_get_drvdata(component);
+
+ switch (event) {
+ case SND_SOC_DAPM_POST_PMU:
+ dev_info(tas2562->dev, "SND_SOC_DAPM_POST_PMU\n");
+ break;
+ case SND_SOC_DAPM_PRE_PMD:
+ dev_info(tas2562->dev, "SND_SOC_DAPM_PRE_PMD\n");
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static DECLARE_TLV_DB_SCALE(tas2562_dac_tlv, 850, 50, 0);
+
+static const struct snd_kcontrol_new isense_switch =
+ SOC_DAPM_SINGLE("Switch", TAS2562_PWR_CTRL, TAS2562_ISENSE_POWER_EN,
+ 1, 1);
+
+static const struct snd_kcontrol_new vsense_switch =
+ SOC_DAPM_SINGLE("Switch", TAS2562_PWR_CTRL, TAS2562_VSENSE_POWER_EN,
+ 1, 1);
+
+static const struct snd_kcontrol_new tas2562_snd_controls[] = {
+ SOC_SINGLE_TLV("Amp Gain Volume", TAS2562_PB_CFG1, 0, 0x1c, 0,
+ tas2562_dac_tlv),
+};
+
+static const struct snd_soc_dapm_widget tas2562_dapm_widgets[] = {
+ SND_SOC_DAPM_AIF_IN("ASI1", "ASI1 Playback", 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_MUX("ASI1 Sel", SND_SOC_NOPM, 0, 0, &tas2562_asi1_mux),
+ SND_SOC_DAPM_AIF_IN("DAC IN", "Playback", 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_DAC_E("DAC", NULL, SND_SOC_NOPM, 0, 0, tas2562_dac_event,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
+ SND_SOC_DAPM_SWITCH("ISENSE", TAS2562_PWR_CTRL, 3, 1, &isense_switch),
+ SND_SOC_DAPM_SWITCH("VSENSE", TAS2562_PWR_CTRL, 2, 1, &vsense_switch),
+ SND_SOC_DAPM_SIGGEN("VMON"),
+ SND_SOC_DAPM_SIGGEN("IMON"),
+ SND_SOC_DAPM_OUTPUT("OUT"),
+};
+
+static const struct snd_soc_dapm_route tas2562_audio_map[] = {
+ {"ASI1 Sel", "I2C offset", "ASI1"},
+ {"ASI1 Sel", "Left", "ASI1"},
+ {"ASI1 Sel", "Right", "ASI1"},
+ {"ASI1 Sel", "LeftRightDiv2", "ASI1"},
+ { "DAC", NULL, "DAC IN" },
+ { "OUT", NULL, "DAC" },
+ {"ISENSE", "Switch", "IMON"},
+ {"VSENSE", "Switch", "VMON"},
+};
+
+static const struct snd_soc_component_driver soc_component_dev_tas2562 = {
+ .probe = tas2562_codec_probe,
+ .suspend = tas2562_suspend,
+ .resume = tas2562_resume,
+ .set_bias_level = tas2562_set_bias_level,
+ .controls = tas2562_snd_controls,
+ .num_controls = ARRAY_SIZE(tas2562_snd_controls),
+ .dapm_widgets = tas2562_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(tas2562_dapm_widgets),
+ .dapm_routes = tas2562_audio_map,
+ .num_dapm_routes = ARRAY_SIZE(tas2562_audio_map),
+ .idle_bias_on = 1,
+ .use_pmdown_time = 1,
+ .endianness = 1,
+ .non_legacy_dai_naming = 1,
+};
+
+static const struct snd_soc_dai_ops tas2562_speaker_dai_ops = {
+ .hw_params = tas2562_hw_params,
+ .set_fmt = tas2562_set_dai_fmt,
+ .set_tdm_slot = tas2562_set_dai_tdm_slot,
+ .digital_mute = tas2562_mute,
+};
+
+static struct snd_soc_dai_driver tas2562_dai[] = {
+ {
+ .name = "tas2562-amplifier",
+ .id = 0,
+ .playback = {
+ .stream_name = "ASI1 Playback",
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_8000_192000,
+ .formats = TAS2562_FORMATS,
+ },
+ .ops = &tas2562_speaker_dai_ops,
+ },
+};
+
+static const struct regmap_range_cfg tas2562_ranges[] = {
+ {
+ .range_min = 0,
+ .range_max = 5 * 128,
+ .selector_reg = TAS2562_PAGE_CTRL,
+ .selector_mask = 0xff,
+ .selector_shift = 0,
+ .window_start = 0,
+ .window_len = 128,
+ },
+};
+
+static const struct reg_default tas2562_reg_defaults[] = {
+ { TAS2562_PAGE_CTRL, 0x00 },
+ { TAS2562_SW_RESET, 0x00 },
+ { TAS2562_PWR_CTRL, 0x0e },
+ { TAS2562_PB_CFG1, 0x20 },
+ { TAS2562_TDM_CFG0, 0x09 },
+ { TAS2562_TDM_CFG1, 0x02 },
+};
+
+static const struct regmap_config tas2562_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = 5 * 128,
+ .cache_type = REGCACHE_RBTREE,
+ .reg_defaults = tas2562_reg_defaults,
+ .num_reg_defaults = ARRAY_SIZE(tas2562_reg_defaults),
+ .ranges = tas2562_ranges,
+ .num_ranges = ARRAY_SIZE(tas2562_ranges),
+};
+
+static int tas2562_parse_dt(struct tas2562_data *tas2562)
+{
+ struct device *dev = tas2562->dev;
+ int ret = 0;
+
+ tas2562->sdz_gpio = devm_gpiod_get_optional(dev, "shut-down-gpio",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(tas2562->sdz_gpio)) {
+ if (PTR_ERR(tas2562->sdz_gpio) == -EPROBE_DEFER) {
+ tas2562->sdz_gpio = NULL;
+ return -EPROBE_DEFER;
+ }
+ }
+
+ ret = fwnode_property_read_u32(dev->fwnode, "ti,imon-slot-no",
+ &tas2562->i_sense_slot);
+ if (ret)
+ dev_err(dev, "Looking up %s property failed %d\n",
+ "ti,imon-slot-no", ret);
+
+ return ret;
+}
+
+static int tas2562_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct tas2562_data *data;
+ int ret;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->client = client;
+ data->dev = &client->dev;
+
+ tas2562_parse_dt(data);
+
+ data->regmap = devm_regmap_init_i2c(client, &tas2562_regmap_config);
+ if (IS_ERR(data->regmap)) {
+ ret = PTR_ERR(data->regmap);
+ dev_err(dev, "failed to allocate register map: %d\n", ret);
+ return ret;
+ }
+
+ dev_set_drvdata(&client->dev, data);
+
+ return devm_snd_soc_register_component(dev, &soc_component_dev_tas2562,
+ tas2562_dai,
+ ARRAY_SIZE(tas2562_dai));
+
+}
+
+static const struct i2c_device_id tas2562_id[] = {
+ { "tas2562", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, tas2562_id);
+
+static const struct of_device_id tas2562_of_match[] = {
+ { .compatible = "ti,tas2562", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, tas2562_of_match);
+
+static struct i2c_driver tas2562_i2c_driver = {
+ .driver = {
+ .name = "tas2562",
+ .of_match_table = of_match_ptr(tas2562_of_match),
+ },
+ .probe = tas2562_probe,
+ .id_table = tas2562_id,
+};
+
+module_i2c_driver(tas2562_i2c_driver);
+
+MODULE_AUTHOR("Dan Murphy <dmurphy@ti.com>");
+MODULE_DESCRIPTION("TAS2562 Audio amplifier driver");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/tas2562.h b/sound/soc/codecs/tas2562.h
new file mode 100644
index 000000000000..62e659ab786d
--- /dev/null
+++ b/sound/soc/codecs/tas2562.h
@@ -0,0 +1,85 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * tas2562.h - ALSA SoC Texas Instruments TAS2562 Mono Audio Amplifier
+ *
+ * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
+ *
+ * Author: Dan Murphy <dmurphy@ti.com>
+ */
+
+#ifndef __TAS2562_H__
+#define __TAS2562_H__
+
+#define TAS2562_PAGE_CTRL 0x00
+
+#define TAS2562_REG(page, reg) ((page * 128) + reg)
+
+#define TAS2562_SW_RESET TAS2562_REG(0, 0x01)
+#define TAS2562_PWR_CTRL TAS2562_REG(0, 0x02)
+#define TAS2562_PB_CFG1 TAS2562_REG(0, 0x03)
+#define TAS2562_MISC_CFG1 TAS2562_REG(0, 0x04)
+#define TAS2562_MISC_CFG2 TAS2562_REG(0, 0x05)
+
+#define TAS2562_TDM_CFG0 TAS2562_REG(0, 0x06)
+#define TAS2562_TDM_CFG1 TAS2562_REG(0, 0x07)
+#define TAS2562_TDM_CFG2 TAS2562_REG(0, 0x08)
+#define TAS2562_TDM_CFG3 TAS2562_REG(0, 0x09)
+#define TAS2562_TDM_CFG4 TAS2562_REG(0, 0x0a)
+#define TAS2562_TDM_CFG5 TAS2562_REG(0, 0x0b)
+#define TAS2562_TDM_CFG6 TAS2562_REG(0, 0x0c)
+#define TAS2562_TDM_CFG7 TAS2562_REG(0, 0x0d)
+#define TAS2562_TDM_CFG8 TAS2562_REG(0, 0x0e)
+#define TAS2562_TDM_CFG9 TAS2562_REG(0, 0x0f)
+#define TAS2562_TDM_CFG10 TAS2562_REG(0, 0x10)
+#define TAS2562_TDM_DET TAS2562_REG(0, 0x11)
+#define TAS2562_REV_ID TAS2562_REG(0, 0x7d)
+
+/* Page 2 */
+#define TAS2562_DVC_CFG1 TAS2562_REG(2, 0x01)
+#define TAS2562_DVC_CFG2 TAS2562_REG(2, 0x02)
+
+#define TAS2562_RESET BIT(0)
+
+#define TAS2562_MODE_MASK 0x3
+#define TAS2562_ACTIVE 0x0
+#define TAS2562_MUTE 0x1
+#define TAS2562_SHUTDOWN 0x2
+
+#define TAS2562_TDM_CFG1_RX_EDGE_MASK BIT(0)
+#define TAS2562_TDM_CFG1_RX_FALLING 1
+#define TAS2562_TDM_CFG1_RX_OFFSET_MASK GENMASK(4, 0)
+
+#define TAS2562_TDM_CFG0_RAMPRATE_MASK BIT(5)
+#define TAS2562_TDM_CFG0_RAMPRATE_44_1 BIT(5)
+#define TAS2562_TDM_CFG0_SAMPRATE_MASK GENMASK(3, 1)
+#define TAS2562_TDM_CFG0_SAMPRATE_7305_8KHZ 0x0
+#define TAS2562_TDM_CFG0_SAMPRATE_14_7_16KHZ 0x1
+#define TAS2562_TDM_CFG0_SAMPRATE_22_05_24KHZ 0x2
+#define TAS2562_TDM_CFG0_SAMPRATE_29_4_32KHZ 0x3
+#define TAS2562_TDM_CFG0_SAMPRATE_44_1_48KHZ 0x4
+#define TAS2562_TDM_CFG0_SAMPRATE_88_2_96KHZ 0x5
+#define TAS2562_TDM_CFG0_SAMPRATE_176_4_192KHZ 0x6
+
+#define TAS2562_TDM_CFG2_RIGHT_JUSTIFY BIT(6)
+
+#define TAS2562_TDM_CFG2_RXLEN_MASK GENMASK(1, 0)
+#define TAS2562_TDM_CFG2_RXLEN_16B 0x0
+#define TAS2562_TDM_CFG2_RXLEN_24B BIT(0)
+#define TAS2562_TDM_CFG2_RXLEN_32B BIT(1)
+
+#define TAS2562_TDM_CFG2_RXWLEN_MASK GENMASK(3, 2)
+#define TAS2562_TDM_CFG2_RXWLEN_16B 0x0
+#define TAS2562_TDM_CFG2_RXWLEN_20B BIT(2)
+#define TAS2562_TDM_CFG2_RXWLEN_24B BIT(3)
+#define TAS2562_TDM_CFG2_RXWLEN_32B (BIT(2) | BIT(3))
+
+#define TAS2562_VSENSE_POWER_EN BIT(2)
+#define TAS2562_ISENSE_POWER_EN BIT(3)
+
+#define TAS2562_TDM_CFG5_VSNS_EN BIT(6)
+#define TAS2562_TDM_CFG5_VSNS_SLOT_MASK GENMASK(5, 0)
+
+#define TAS2562_TDM_CFG6_ISNS_EN BIT(6)
+#define TAS2562_TDM_CFG6_ISNS_SLOT_MASK GENMASK(5, 0)
+
+#endif /* __TAS2562_H__ */
diff --git a/sound/soc/codecs/tas2770.c b/sound/soc/codecs/tas2770.c
new file mode 100644
index 000000000000..54c8135fe43c
--- /dev/null
+++ b/sound/soc/codecs/tas2770.c
@@ -0,0 +1,819 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// ALSA SoC Texas Instruments TAS2770 20-W Digital Input Mono Class-D
+// Audio Amplifier with Speaker I/V Sense
+//
+// Copyright (C) 2016-2017 Texas Instruments Incorporated - http://www.ti.com/
+// Author: Tracy Yi <tracy-yi@ti.com>
+// Frank Shi <shifu0704@thundersoft.com>
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/pm.h>
+#include <linux/i2c.h>
+#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
+#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
+#include <linux/firmware.h>
+#include <linux/regmap.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/slab.h>
+#include <sound/soc.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/initval.h>
+#include <sound/tlv.h>
+
+#include "tas2770.h"
+
+#define TAS2770_MDELAY 0xFFFFFFFE
+
+static void tas2770_reset(struct tas2770_priv *tas2770)
+{
+ if (tas2770->reset_gpio) {
+ gpiod_set_value_cansleep(tas2770->reset_gpio, 0);
+ msleep(20);
+ gpiod_set_value_cansleep(tas2770->reset_gpio, 1);
+ }
+ snd_soc_component_write(tas2770->component, TAS2770_SW_RST,
+ TAS2770_RST);
+}
+
+static int tas2770_set_bias_level(struct snd_soc_component *component,
+ enum snd_soc_bias_level level)
+{
+ struct tas2770_priv *tas2770 =
+ snd_soc_component_get_drvdata(component);
+
+ switch (level) {
+ case SND_SOC_BIAS_ON:
+ snd_soc_component_update_bits(component,
+ TAS2770_PWR_CTRL,
+ TAS2770_PWR_CTRL_MASK,
+ TAS2770_PWR_CTRL_ACTIVE);
+ break;
+
+ case SND_SOC_BIAS_OFF:
+ snd_soc_component_update_bits(component,
+ TAS2770_PWR_CTRL,
+ TAS2770_PWR_CTRL_MASK,
+ TAS2770_PWR_CTRL_SHUTDOWN);
+ break;
+
+ default:
+ dev_err(tas2770->dev,
+ "wrong power level setting %d\n", level);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int tas2770_codec_suspend(struct snd_soc_component *component)
+{
+ int ret;
+
+ ret = snd_soc_component_update_bits(component,
+ TAS2770_PWR_CTRL,
+ TAS2770_PWR_CTRL_MASK,
+ TAS2770_PWR_CTRL_SHUTDOWN);
+
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int tas2770_codec_resume(struct snd_soc_component *component)
+{
+ int ret;
+
+ ret = snd_soc_component_update_bits(component,
+ TAS2770_PWR_CTRL,
+ TAS2770_PWR_CTRL_MASK,
+ TAS2770_PWR_CTRL_ACTIVE);
+
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+#else
+#define tas2770_codec_suspend NULL
+#define tas2770_codec_resume NULL
+#endif
+
+static const char * const tas2770_ASI1_src[] = {
+ "I2C offset", "Left", "Right", "LeftRightDiv2",
+};
+
+static SOC_ENUM_SINGLE_DECL(
+ tas2770_ASI1_src_enum, TAS2770_TDM_CFG_REG2,
+ 4, tas2770_ASI1_src);
+
+static const struct snd_kcontrol_new tas2770_asi1_mux =
+ SOC_DAPM_ENUM("ASI1 Source", tas2770_ASI1_src_enum);
+
+static int tas2770_dac_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_component *component =
+ snd_soc_dapm_to_component(w->dapm);
+ struct tas2770_priv *tas2770 =
+ snd_soc_component_get_drvdata(component);
+ int ret;
+
+ switch (event) {
+ case SND_SOC_DAPM_POST_PMU:
+ ret = snd_soc_component_update_bits(component,
+ TAS2770_PWR_CTRL,
+ TAS2770_PWR_CTRL_MASK,
+ TAS2770_PWR_CTRL_MUTE);
+ if (ret)
+ goto end;
+ break;
+ case SND_SOC_DAPM_PRE_PMD:
+ ret = snd_soc_component_update_bits(component,
+ TAS2770_PWR_CTRL,
+ TAS2770_PWR_CTRL_MASK,
+ TAS2770_PWR_CTRL_SHUTDOWN);
+ if (ret)
+ goto end;
+ break;
+ default:
+ dev_err(tas2770->dev, "Not supported evevt\n");
+ return -EINVAL;
+ }
+
+end:
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static const struct snd_kcontrol_new isense_switch =
+ SOC_DAPM_SINGLE("Switch", TAS2770_PWR_CTRL, 3, 1, 1);
+static const struct snd_kcontrol_new vsense_switch =
+ SOC_DAPM_SINGLE("Switch", TAS2770_PWR_CTRL, 2, 1, 1);
+
+static const struct snd_soc_dapm_widget tas2770_dapm_widgets[] = {
+ SND_SOC_DAPM_AIF_IN("ASI1", "ASI1 Playback", 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_MUX("ASI1 Sel", SND_SOC_NOPM, 0, 0,
+ &tas2770_asi1_mux),
+ SND_SOC_DAPM_SWITCH("ISENSE", TAS2770_PWR_CTRL, 3, 1,
+ &isense_switch),
+ SND_SOC_DAPM_SWITCH("VSENSE", TAS2770_PWR_CTRL, 2, 1,
+ &vsense_switch),
+ SND_SOC_DAPM_DAC_E("DAC", NULL, SND_SOC_NOPM, 0, 0, tas2770_dac_event,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
+ SND_SOC_DAPM_OUTPUT("OUT"),
+ SND_SOC_DAPM_SIGGEN("VMON"),
+ SND_SOC_DAPM_SIGGEN("IMON")
+};
+
+static const struct snd_soc_dapm_route tas2770_audio_map[] = {
+ {"ASI1 Sel", "I2C offset", "ASI1"},
+ {"ASI1 Sel", "Left", "ASI1"},
+ {"ASI1 Sel", "Right", "ASI1"},
+ {"ASI1 Sel", "LeftRightDiv2", "ASI1"},
+ {"DAC", NULL, "ASI1 Sel"},
+ {"OUT", NULL, "DAC"},
+ {"ISENSE", "Switch", "IMON"},
+ {"VSENSE", "Switch", "VMON"},
+};
+
+static int tas2770_mute(struct snd_soc_dai *dai, int mute)
+{
+ struct snd_soc_component *component = dai->component;
+ int ret;
+
+ if (mute)
+ ret = snd_soc_component_update_bits(component,
+ TAS2770_PWR_CTRL,
+ TAS2770_PWR_CTRL_MASK,
+ TAS2770_PWR_CTRL_MUTE);
+ else
+ ret = snd_soc_component_update_bits(component,
+ TAS2770_PWR_CTRL,
+ TAS2770_PWR_CTRL_MASK,
+ TAS2770_PWR_CTRL_ACTIVE);
+
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int tas2770_set_bitwidth(struct tas2770_priv *tas2770, int bitwidth)
+{
+ int ret;
+ struct snd_soc_component *component = tas2770->component;
+
+ switch (bitwidth) {
+ case SNDRV_PCM_FORMAT_S16_LE:
+ ret = snd_soc_component_update_bits(component,
+ TAS2770_TDM_CFG_REG2,
+ TAS2770_TDM_CFG_REG2_RXW_MASK,
+ TAS2770_TDM_CFG_REG2_RXW_16BITS);
+ tas2770->v_sense_slot = tas2770->i_sense_slot + 2;
+ break;
+ case SNDRV_PCM_FORMAT_S24_LE:
+ ret = snd_soc_component_update_bits(component,
+ TAS2770_TDM_CFG_REG2,
+ TAS2770_TDM_CFG_REG2_RXW_MASK,
+ TAS2770_TDM_CFG_REG2_RXW_24BITS);
+ tas2770->v_sense_slot = tas2770->i_sense_slot + 4;
+ break;
+ case SNDRV_PCM_FORMAT_S32_LE:
+ ret = snd_soc_component_update_bits(component,
+ TAS2770_TDM_CFG_REG2,
+ TAS2770_TDM_CFG_REG2_RXW_MASK,
+ TAS2770_TDM_CFG_REG2_RXW_32BITS);
+ tas2770->v_sense_slot = tas2770->i_sense_slot + 4;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ tas2770->channel_size = bitwidth;
+
+ ret = snd_soc_component_update_bits(component,
+ TAS2770_TDM_CFG_REG5,
+ TAS2770_TDM_CFG_REG5_VSNS_MASK |
+ TAS2770_TDM_CFG_REG5_50_MASK,
+ TAS2770_TDM_CFG_REG5_VSNS_ENABLE |
+ tas2770->v_sense_slot);
+ if (ret)
+ goto end;
+ ret = snd_soc_component_update_bits(component,
+ TAS2770_TDM_CFG_REG6,
+ TAS2770_TDM_CFG_REG6_ISNS_MASK |
+ TAS2770_TDM_CFG_REG6_50_MASK,
+ TAS2770_TDM_CFG_REG6_ISNS_ENABLE |
+ tas2770->i_sense_slot);
+
+end:
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int tas2770_set_samplerate(struct tas2770_priv *tas2770, int samplerate)
+{
+ int ret;
+ struct snd_soc_component *component = tas2770->component;
+
+ switch (samplerate) {
+ case 48000:
+ ret = snd_soc_component_update_bits(component,
+ TAS2770_TDM_CFG_REG0,
+ TAS2770_TDM_CFG_REG0_SMP_MASK,
+ TAS2770_TDM_CFG_REG0_SMP_48KHZ);
+ if (ret)
+ goto end;
+ ret = snd_soc_component_update_bits(component,
+ TAS2770_TDM_CFG_REG0,
+ TAS2770_TDM_CFG_REG0_31_MASK,
+ TAS2770_TDM_CFG_REG0_31_44_1_48KHZ);
+ if (ret)
+ goto end;
+ break;
+ case 44100:
+ ret = snd_soc_component_update_bits(component,
+ TAS2770_TDM_CFG_REG0,
+ TAS2770_TDM_CFG_REG0_SMP_MASK,
+ TAS2770_TDM_CFG_REG0_SMP_44_1KHZ);
+ if (ret)
+ goto end;
+ ret = snd_soc_component_update_bits(component,
+ TAS2770_TDM_CFG_REG0,
+ TAS2770_TDM_CFG_REG0_31_MASK,
+ TAS2770_TDM_CFG_REG0_31_44_1_48KHZ);
+ if (ret)
+ goto end;
+ break;
+ case 96000:
+ ret = snd_soc_component_update_bits(component,
+ TAS2770_TDM_CFG_REG0,
+ TAS2770_TDM_CFG_REG0_SMP_MASK,
+ TAS2770_TDM_CFG_REG0_SMP_48KHZ);
+ if (ret)
+ goto end;
+ ret = snd_soc_component_update_bits(component,
+ TAS2770_TDM_CFG_REG0,
+ TAS2770_TDM_CFG_REG0_31_MASK,
+ TAS2770_TDM_CFG_REG0_31_88_2_96KHZ);
+ break;
+ case 88200:
+ ret = snd_soc_component_update_bits(component,
+ TAS2770_TDM_CFG_REG0,
+ TAS2770_TDM_CFG_REG0_SMP_MASK,
+ TAS2770_TDM_CFG_REG0_SMP_44_1KHZ);
+ if (ret)
+ goto end;
+ ret = snd_soc_component_update_bits(component,
+ TAS2770_TDM_CFG_REG0,
+ TAS2770_TDM_CFG_REG0_31_MASK,
+ TAS2770_TDM_CFG_REG0_31_88_2_96KHZ);
+ break;
+ case 19200:
+ ret = snd_soc_component_update_bits(component,
+ TAS2770_TDM_CFG_REG0,
+ TAS2770_TDM_CFG_REG0_SMP_MASK,
+ TAS2770_TDM_CFG_REG0_SMP_48KHZ);
+ if (ret)
+ goto end;
+ ret = snd_soc_component_update_bits(component,
+ TAS2770_TDM_CFG_REG0,
+ TAS2770_TDM_CFG_REG0_31_MASK,
+ TAS2770_TDM_CFG_REG0_31_176_4_192KHZ);
+ if (ret)
+ goto end;
+ break;
+ case 17640:
+ ret = snd_soc_component_update_bits(component,
+ TAS2770_TDM_CFG_REG0,
+ TAS2770_TDM_CFG_REG0_SMP_MASK,
+ TAS2770_TDM_CFG_REG0_SMP_44_1KHZ);
+ if (ret)
+ goto end;
+ ret = snd_soc_component_update_bits(component,
+ TAS2770_TDM_CFG_REG0,
+ TAS2770_TDM_CFG_REG0_31_MASK,
+ TAS2770_TDM_CFG_REG0_31_176_4_192KHZ);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+end:
+ if (ret < 0)
+ return ret;
+
+ tas2770->sampling_rate = samplerate;
+ return 0;
+}
+
+static int tas2770_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_component *component = dai->component;
+ struct tas2770_priv *tas2770 =
+ snd_soc_component_get_drvdata(component);
+ int ret;
+
+ ret = tas2770_set_bitwidth(tas2770, params_format(params));
+ if (ret < 0)
+ goto end;
+
+
+ ret = tas2770_set_samplerate(tas2770, params_rate(params));
+
+end:
+ return ret;
+}
+
+static int tas2770_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+{
+ u8 tdm_rx_start_slot = 0, asi_cfg_1 = 0;
+ int ret;
+ struct snd_soc_component *component = dai->component;
+ struct tas2770_priv *tas2770 =
+ snd_soc_component_get_drvdata(component);
+
+ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBS_CFS:
+ break;
+ default:
+ dev_err(tas2770->dev, "ASI format master is not found\n");
+ return -EINVAL;
+ }
+
+ switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+ case SND_SOC_DAIFMT_NB_NF:
+ asi_cfg_1 |= TAS2770_TDM_CFG_REG1_RX_RSING;
+ break;
+ case SND_SOC_DAIFMT_IB_NF:
+ asi_cfg_1 |= TAS2770_TDM_CFG_REG1_RX_FALING;
+ break;
+ default:
+ dev_err(tas2770->dev, "ASI format Inverse is not found\n");
+ return -EINVAL;
+ }
+
+ ret = snd_soc_component_update_bits(component, TAS2770_TDM_CFG_REG1,
+ TAS2770_TDM_CFG_REG1_RX_MASK,
+ asi_cfg_1);
+ if (ret < 0)
+ return ret;
+
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_I2S:
+ tdm_rx_start_slot = 1;
+ break;
+ case SND_SOC_DAIFMT_DSP_A:
+ tdm_rx_start_slot = 0;
+ break;
+ case SND_SOC_DAIFMT_DSP_B:
+ tdm_rx_start_slot = 1;
+ break;
+ case SND_SOC_DAIFMT_LEFT_J:
+ tdm_rx_start_slot = 0;
+ break;
+ default:
+ dev_err(tas2770->dev,
+ "DAI Format is not found, fmt=0x%x\n", fmt);
+ return -EINVAL;
+ }
+
+ ret = snd_soc_component_update_bits(component, TAS2770_TDM_CFG_REG1,
+ TAS2770_TDM_CFG_REG1_MASK,
+ (tdm_rx_start_slot << TAS2770_TDM_CFG_REG1_51_SHIFT));
+ if (ret < 0)
+ return ret;
+
+ tas2770->asi_format = fmt;
+
+ return 0;
+}
+
+static int tas2770_set_dai_tdm_slot(struct snd_soc_dai *dai,
+ unsigned int tx_mask,
+ unsigned int rx_mask,
+ int slots, int slot_width)
+{
+ struct snd_soc_component *component = dai->component;
+ struct tas2770_priv *tas2770 =
+ snd_soc_component_get_drvdata(component);
+ int left_slot, right_slot;
+ int ret;
+
+ if (tx_mask == 0 || rx_mask != 0)
+ return -EINVAL;
+
+ if (slots == 1) {
+ if (tx_mask != 1)
+ return -EINVAL;
+ left_slot = 0;
+ right_slot = 0;
+ } else {
+ left_slot = __ffs(tx_mask);
+ tx_mask &= ~(1 << left_slot);
+ if (tx_mask == 0) {
+ right_slot = left_slot;
+ } else {
+ right_slot = __ffs(tx_mask);
+ tx_mask &= ~(1 << right_slot);
+ }
+ }
+
+ if (tx_mask != 0 || left_slot >= slots || right_slot >= slots)
+ return -EINVAL;
+
+ ret = snd_soc_component_update_bits(component, TAS2770_TDM_CFG_REG3,
+ TAS2770_TDM_CFG_REG3_30_MASK,
+ (left_slot << TAS2770_TDM_CFG_REG3_30_SHIFT));
+ if (ret < 0)
+ return ret;
+ ret = snd_soc_component_update_bits(component, TAS2770_TDM_CFG_REG3,
+ TAS2770_TDM_CFG_REG3_RXS_MASK,
+ (right_slot << TAS2770_TDM_CFG_REG3_RXS_SHIFT));
+ if (ret < 0)
+ return ret;
+
+ switch (slot_width) {
+ case 16:
+ ret = snd_soc_component_update_bits(component,
+ TAS2770_TDM_CFG_REG2,
+ TAS2770_TDM_CFG_REG2_RXS_MASK,
+ TAS2770_TDM_CFG_REG2_RXS_16BITS);
+ break;
+
+ case 24:
+ ret = snd_soc_component_update_bits(component,
+ TAS2770_TDM_CFG_REG2,
+ TAS2770_TDM_CFG_REG2_RXS_MASK,
+ TAS2770_TDM_CFG_REG2_RXS_24BITS);
+ break;
+
+ case 32:
+ ret = snd_soc_component_update_bits(component,
+ TAS2770_TDM_CFG_REG2,
+ TAS2770_TDM_CFG_REG2_RXS_MASK,
+ TAS2770_TDM_CFG_REG2_RXS_32BITS);
+ break;
+
+ case 0:
+ /* Do not change slot width */
+ ret = 0;
+ break;
+
+ default:
+ ret = -EINVAL;
+ }
+
+ if (ret < 0)
+ return ret;
+
+ tas2770->slot_width = slot_width;
+ return 0;
+}
+
+static struct snd_soc_dai_ops tas2770_dai_ops = {
+ .digital_mute = tas2770_mute,
+ .hw_params = tas2770_hw_params,
+ .set_fmt = tas2770_set_fmt,
+ .set_tdm_slot = tas2770_set_dai_tdm_slot,
+};
+
+#define TAS2770_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |\
+ SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE)
+
+#define TAS2770_RATES (SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000 |\
+ SNDRV_PCM_RATE_96000 |\
+ SNDRV_PCM_RATE_192000\
+ )
+
+static struct snd_soc_dai_driver tas2770_dai_driver[] = {
+ {
+ .name = "tas2770 ASI1",
+ .id = 0,
+ .playback = {
+ .stream_name = "ASI1 Playback",
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = TAS2770_RATES,
+ .formats = TAS2770_FORMATS,
+ },
+ .capture = {
+ .stream_name = "ASI1 Capture",
+ .channels_min = 0,
+ .channels_max = 2,
+ .rates = TAS2770_RATES,
+ .formats = TAS2770_FORMATS,
+ },
+ .ops = &tas2770_dai_ops,
+ .symmetric_rates = 1,
+ },
+};
+
+static int tas2770_codec_probe(struct snd_soc_component *component)
+{
+ struct tas2770_priv *tas2770 =
+ snd_soc_component_get_drvdata(component);
+
+ tas2770->component = component;
+
+ return 0;
+}
+
+static DECLARE_TLV_DB_SCALE(tas2770_digital_tlv, 1100, 50, 0);
+static DECLARE_TLV_DB_SCALE(tas2770_playback_volume, -12750, 50, 0);
+
+static const struct snd_kcontrol_new tas2770_snd_controls[] = {
+ SOC_SINGLE_TLV("Speaker Playback Volume", TAS2770_PLAY_CFG_REG2,
+ 0, TAS2770_PLAY_CFG_REG2_VMAX, 1,
+ tas2770_playback_volume),
+ SOC_SINGLE_TLV("Amp Gain Volume", TAS2770_PLAY_CFG_REG0,
+ 0, 0x14, 0,
+ tas2770_digital_tlv),
+};
+
+static const struct snd_soc_component_driver soc_component_driver_tas2770 = {
+ .probe = tas2770_codec_probe,
+ .suspend = tas2770_codec_suspend,
+ .resume = tas2770_codec_resume,
+ .set_bias_level = tas2770_set_bias_level,
+ .controls = tas2770_snd_controls,
+ .num_controls = ARRAY_SIZE(tas2770_snd_controls),
+ .dapm_widgets = tas2770_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(tas2770_dapm_widgets),
+ .dapm_routes = tas2770_audio_map,
+ .num_dapm_routes = ARRAY_SIZE(tas2770_audio_map),
+ .idle_bias_on = 1,
+ .endianness = 1,
+ .non_legacy_dai_naming = 1,
+};
+
+static int tas2770_register_codec(struct tas2770_priv *tas2770)
+{
+ return devm_snd_soc_register_component(tas2770->dev,
+ &soc_component_driver_tas2770,
+ tas2770_dai_driver, ARRAY_SIZE(tas2770_dai_driver));
+}
+
+static const struct reg_default tas2770_reg_defaults[] = {
+ { TAS2770_PAGE, 0x00 },
+ { TAS2770_SW_RST, 0x00 },
+ { TAS2770_PWR_CTRL, 0x0e },
+ { TAS2770_PLAY_CFG_REG0, 0x10 },
+ { TAS2770_PLAY_CFG_REG1, 0x01 },
+ { TAS2770_PLAY_CFG_REG2, 0x00 },
+ { TAS2770_MSC_CFG_REG0, 0x07 },
+ { TAS2770_TDM_CFG_REG1, 0x02 },
+ { TAS2770_TDM_CFG_REG2, 0x0a },
+ { TAS2770_TDM_CFG_REG3, 0x10 },
+ { TAS2770_INT_MASK_REG0, 0xfc },
+ { TAS2770_INT_MASK_REG1, 0xb1 },
+ { TAS2770_INT_CFG, 0x05 },
+ { TAS2770_MISC_IRQ, 0x81 },
+ { TAS2770_CLK_CGF, 0x0c },
+
+};
+
+static bool tas2770_volatile(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case TAS2770_PAGE: /* regmap implementation requires this */
+ case TAS2770_SW_RST: /* always clears after write */
+ case TAS2770_BO_PRV_REG0:/* has a self clearing bit */
+ case TAS2770_LVE_INT_REG0:
+ case TAS2770_LVE_INT_REG1:
+ case TAS2770_LAT_INT_REG0:/* Sticky interrupt flags */
+ case TAS2770_LAT_INT_REG1:/* Sticky interrupt flags */
+ case TAS2770_VBAT_MSB:
+ case TAS2770_VBAT_LSB:
+ case TAS2770_TEMP_MSB:
+ case TAS2770_TEMP_LSB:
+ return true;
+ }
+ return false;
+}
+
+static bool tas2770_writeable(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case TAS2770_LVE_INT_REG0:
+ case TAS2770_LVE_INT_REG1:
+ case TAS2770_LAT_INT_REG0:
+ case TAS2770_LAT_INT_REG1:
+ case TAS2770_VBAT_MSB:
+ case TAS2770_VBAT_LSB:
+ case TAS2770_TEMP_MSB:
+ case TAS2770_TEMP_LSB:
+ case TAS2770_TDM_CLK_DETC:
+ case TAS2770_REV_AND_GPID:
+ return false;
+ }
+ return true;
+}
+
+static const struct regmap_range_cfg tas2770_regmap_ranges[] = {
+ {
+ .range_min = 0,
+ .range_max = 1 * 128,
+ .selector_reg = TAS2770_PAGE,
+ .selector_mask = 0xff,
+ .selector_shift = 0,
+ .window_start = 0,
+ .window_len = 128,
+ },
+};
+
+static const struct regmap_config tas2770_i2c_regmap = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .writeable_reg = tas2770_writeable,
+ .volatile_reg = tas2770_volatile,
+ .reg_defaults = tas2770_reg_defaults,
+ .num_reg_defaults = ARRAY_SIZE(tas2770_reg_defaults),
+ .cache_type = REGCACHE_RBTREE,
+ .ranges = tas2770_regmap_ranges,
+ .num_ranges = ARRAY_SIZE(tas2770_regmap_ranges),
+ .max_register = 1 * 128,
+};
+
+static int tas2770_parse_dt(struct device *dev, struct tas2770_priv *tas2770)
+{
+ int rc = 0;
+
+ rc = fwnode_property_read_u32(dev->fwnode, "ti,asi-format",
+ &tas2770->asi_format);
+ if (rc) {
+ dev_err(tas2770->dev, "Looking up %s property failed %d\n",
+ "ti,asi-format", rc);
+ goto end;
+ }
+
+ rc = fwnode_property_read_u32(dev->fwnode, "ti,imon-slot-no",
+ &tas2770->i_sense_slot);
+ if (rc) {
+ dev_err(tas2770->dev, "Looking up %s property failed %d\n",
+ "ti,imon-slot-no", rc);
+ goto end;
+ }
+
+ rc = fwnode_property_read_u32(dev->fwnode, "ti,vmon-slot-no",
+ &tas2770->v_sense_slot);
+ if (rc) {
+ dev_err(tas2770->dev, "Looking up %s property failed %d\n",
+ "ti,vmon-slot-no", rc);
+ goto end;
+ }
+
+end:
+ return rc;
+}
+
+static int tas2770_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct tas2770_priv *tas2770;
+ int result;
+
+ tas2770 = devm_kzalloc(&client->dev,
+ sizeof(struct tas2770_priv), GFP_KERNEL);
+ if (!tas2770)
+ return -ENOMEM;
+ tas2770->dev = &client->dev;
+
+ i2c_set_clientdata(client, tas2770);
+ dev_set_drvdata(&client->dev, tas2770);
+ tas2770->power_state = TAS2770_POWER_SHUTDOWN;
+
+ tas2770->regmap = devm_regmap_init_i2c(client, &tas2770_i2c_regmap);
+ if (IS_ERR(tas2770->regmap)) {
+ result = PTR_ERR(tas2770->regmap);
+ dev_err(&client->dev, "Failed to allocate register map: %d\n",
+ result);
+ goto end;
+ }
+
+ if (client->dev.of_node) {
+ result = tas2770_parse_dt(&client->dev, tas2770);
+ if (result) {
+ dev_err(tas2770->dev, "%s: Failed to parse devicetree\n",
+ __func__);
+ goto end;
+ }
+ }
+
+ tas2770->reset_gpio = devm_gpiod_get_optional(tas2770->dev,
+ "reset-gpio",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(tas2770->reset_gpio)) {
+ if (PTR_ERR(tas2770->reset_gpio) == -EPROBE_DEFER) {
+ tas2770->reset_gpio = NULL;
+ return -EPROBE_DEFER;
+ }
+ }
+
+ tas2770->channel_size = 0;
+ tas2770->slot_width = 0;
+
+ tas2770_reset(tas2770);
+
+ result = tas2770_register_codec(tas2770);
+ if (result)
+ dev_err(tas2770->dev, "Register codec failed.\n");
+
+end:
+ return result;
+}
+
+static int tas2770_i2c_remove(struct i2c_client *client)
+{
+ pm_runtime_disable(&client->dev);
+ return 0;
+}
+
+
+static const struct i2c_device_id tas2770_i2c_id[] = {
+ { "tas2770", 0},
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, tas2770_i2c_id);
+
+#if defined(CONFIG_OF)
+static const struct of_device_id tas2770_of_match[] = {
+ { .compatible = "ti,tas2770" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, tas2770_of_match);
+#endif
+
+static struct i2c_driver tas2770_i2c_driver = {
+ .driver = {
+ .name = "tas2770",
+ .of_match_table = of_match_ptr(tas2770_of_match),
+ },
+ .probe = tas2770_i2c_probe,
+ .remove = tas2770_i2c_remove,
+ .id_table = tas2770_i2c_id,
+};
+
+module_i2c_driver(tas2770_i2c_driver);
+
+MODULE_AUTHOR("Shi Fu <shifu0704@thundersoft.com>");
+MODULE_DESCRIPTION("TAS2770 I2C Smart Amplifier driver");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/tas2770.h b/sound/soc/codecs/tas2770.h
new file mode 100644
index 000000000000..cbb858369fe6
--- /dev/null
+++ b/sound/soc/codecs/tas2770.h
@@ -0,0 +1,143 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * ALSA SoC TAS2770 codec driver
+ *
+ * Copyright (C) 2016-2017 Texas Instruments Incorporated - http://www.ti.com/
+ */
+#ifndef __TAS2770__
+#define __TAS2770__
+
+/* Book Control Register (available in page0 of each book) */
+#define TAS2770_BOOKCTL_PAGE 0
+#define TAS2770_BOOKCTL_REG 127
+#define TAS2770_REG(page, reg) ((page * 128) + reg)
+ /* Page */
+#define TAS2770_PAGE TAS2770_REG(0X0, 0x00)
+#define TAS2770_PAGE_PAGE_MASK 255
+ /* Software Reset */
+#define TAS2770_SW_RST TAS2770_REG(0X0, 0x01)
+#define TAS2770_RST BIT(0)
+ /* Power Control */
+#define TAS2770_PWR_CTRL TAS2770_REG(0X0, 0x02)
+#define TAS2770_PWR_CTRL_MASK 0x3
+#define TAS2770_PWR_CTRL_ACTIVE 0x0
+#define TAS2770_PWR_CTRL_MUTE BIT(0)
+#define TAS2770_PWR_CTRL_SHUTDOWN 0x2
+ /* Playback Configuration Reg0 */
+#define TAS2770_PLAY_CFG_REG0 TAS2770_REG(0X0, 0x03)
+ /* Playback Configuration Reg1 */
+#define TAS2770_PLAY_CFG_REG1 TAS2770_REG(0X0, 0x04)
+ /* Playback Configuration Reg2 */
+#define TAS2770_PLAY_CFG_REG2 TAS2770_REG(0X0, 0x05)
+#define TAS2770_PLAY_CFG_REG2_VMAX 0xc9
+ /* Misc Configuration Reg0 */
+#define TAS2770_MSC_CFG_REG0 TAS2770_REG(0X0, 0x07)
+ /* TDM Configuration Reg0 */
+#define TAS2770_TDM_CFG_REG0 TAS2770_REG(0X0, 0x0A)
+#define TAS2770_TDM_CFG_REG0_SMP_MASK BIT(5)
+#define TAS2770_TDM_CFG_REG0_SMP_48KHZ 0x0
+#define TAS2770_TDM_CFG_REG0_SMP_44_1KHZ BIT(5)
+#define TAS2770_TDM_CFG_REG0_31_MASK 0xe
+#define TAS2770_TDM_CFG_REG0_31_44_1_48KHZ 0x6
+#define TAS2770_TDM_CFG_REG0_31_88_2_96KHZ 0x8
+#define TAS2770_TDM_CFG_REG0_31_176_4_192KHZ 0xa
+ /* TDM Configuration Reg1 */
+#define TAS2770_TDM_CFG_REG1 TAS2770_REG(0X0, 0x0B)
+#define TAS2770_TDM_CFG_REG1_MASK 0x3e
+#define TAS2770_TDM_CFG_REG1_51_SHIFT 1
+#define TAS2770_TDM_CFG_REG1_RX_MASK BIT(0)
+#define TAS2770_TDM_CFG_REG1_RX_RSING 0x0
+#define TAS2770_TDM_CFG_REG1_RX_FALING BIT(0)
+ /* TDM Configuration Reg2 */
+#define TAS2770_TDM_CFG_REG2 TAS2770_REG(0X0, 0x0C)
+#define TAS2770_TDM_CFG_REG2_RXW_MASK 0xc
+#define TAS2770_TDM_CFG_REG2_RXW_16BITS 0x0
+#define TAS2770_TDM_CFG_REG2_RXW_24BITS 0x8
+#define TAS2770_TDM_CFG_REG2_RXW_32BITS 0xc
+#define TAS2770_TDM_CFG_REG2_RXS_MASK 0x3
+#define TAS2770_TDM_CFG_REG2_RXS_16BITS 0x0
+#define TAS2770_TDM_CFG_REG2_RXS_24BITS BIT(0)
+#define TAS2770_TDM_CFG_REG2_RXS_32BITS 0x2
+ /* TDM Configuration Reg3 */
+#define TAS2770_TDM_CFG_REG3 TAS2770_REG(0X0, 0x0D)
+#define TAS2770_TDM_CFG_REG3_RXS_MASK 0xf0
+#define TAS2770_TDM_CFG_REG3_RXS_SHIFT 0x4
+#define TAS2770_TDM_CFG_REG3_30_MASK 0xf
+#define TAS2770_TDM_CFG_REG3_30_SHIFT 0
+ /* TDM Configuration Reg5 */
+#define TAS2770_TDM_CFG_REG5 TAS2770_REG(0X0, 0x0F)
+#define TAS2770_TDM_CFG_REG5_VSNS_MASK BIT(6)
+#define TAS2770_TDM_CFG_REG5_VSNS_ENABLE BIT(6)
+#define TAS2770_TDM_CFG_REG5_50_MASK 0x3f
+ /* TDM Configuration Reg6 */
+#define TAS2770_TDM_CFG_REG6 TAS2770_REG(0X0, 0x10)
+#define TAS2770_TDM_CFG_REG6_ISNS_MASK BIT(6)
+#define TAS2770_TDM_CFG_REG6_ISNS_ENABLE BIT(6)
+#define TAS2770_TDM_CFG_REG6_50_MASK 0x3f
+ /* Brown Out Prevention Reg0 */
+#define TAS2770_BO_PRV_REG0 TAS2770_REG(0X0, 0x1B)
+ /* Interrupt MASK Reg0 */
+#define TAS2770_INT_MASK_REG0 TAS2770_REG(0X0, 0x20)
+#define TAS2770_INT_REG0_DEFAULT 0xfc
+#define TAS2770_INT_MASK_REG0_DISABLE 0xff
+ /* Interrupt MASK Reg1 */
+#define TAS2770_INT_MASK_REG1 TAS2770_REG(0X0, 0x21)
+#define TAS2770_INT_REG1_DEFAULT 0xb1
+#define TAS2770_INT_MASK_REG1_DISABLE 0xff
+ /* Live-Interrupt Reg0 */
+#define TAS2770_LVE_INT_REG0 TAS2770_REG(0X0, 0x22)
+ /* Live-Interrupt Reg1 */
+#define TAS2770_LVE_INT_REG1 TAS2770_REG(0X0, 0x23)
+ /* Latched-Interrupt Reg0 */
+#define TAS2770_LAT_INT_REG0 TAS2770_REG(0X0, 0x24)
+#define TAS2770_LAT_INT_REG0_OCE_FLG BIT(1)
+#define TAS2770_LAT_INT_REG0_OTE_FLG BIT(0)
+ /* Latched-Interrupt Reg1 */
+#define TAS2770_LAT_INT_REG1 TAS2770_REG(0X0, 0x25)
+#define TAS2770_LAT_INT_REG1_VBA_TOV BIT(3)
+#define TAS2770_LAT_INT_REG1_VBA_TUV BIT(2)
+#define TAS2770_LAT_INT_REG1_BOUT_FLG BIT(1)
+ /* VBAT MSB */
+#define TAS2770_VBAT_MSB TAS2770_REG(0X0, 0x27)
+ /* VBAT LSB */
+#define TAS2770_VBAT_LSB TAS2770_REG(0X0, 0x28)
+ /* TEMP MSB */
+#define TAS2770_TEMP_MSB TAS2770_REG(0X0, 0x29)
+ /* TEMP LSB */
+#define TAS2770_TEMP_LSB TAS2770_REG(0X0, 0x2A)
+ /* Interrupt Configuration */
+#define TAS2770_INT_CFG TAS2770_REG(0X0, 0x30)
+ /* Misc IRQ */
+#define TAS2770_MISC_IRQ TAS2770_REG(0X0, 0x32)
+ /* Clock Configuration */
+#define TAS2770_CLK_CGF TAS2770_REG(0X0, 0x3C)
+ /* TDM Clock detection monitor */
+#define TAS2770_TDM_CLK_DETC TAS2770_REG(0X0, 0x77)
+ /* Revision and PG ID */
+#define TAS2770_REV_AND_GPID TAS2770_REG(0X0, 0x7D)
+
+#define TAS2770_POWER_ACTIVE 0
+#define TAS2770_POWER_MUTE 1
+#define TAS2770_POWER_SHUTDOWN 2
+#define ERROR_OVER_CURRENT 0x0000001
+#define ERROR_DIE_OVERTEMP 0x0000002
+#define ERROR_OVER_VOLTAGE 0x0000004
+#define ERROR_UNDER_VOLTAGE 0x0000008
+#define ERROR_BROWNOUT 0x0000010
+#define ERROR_CLASSD_PWR 0x0000020
+
+struct tas2770_priv {
+ struct device *dev;
+ struct regmap *regmap;
+ struct snd_soc_component *component;
+ int power_state;
+ int asi_format;
+ struct gpio_desc *reset_gpio;
+ int sampling_rate;
+ int channel_size;
+ int slot_width;
+ int v_sense_slot;
+ int i_sense_slot;
+};
+
+#endif /* __TAS2770__ */
diff --git a/sound/soc/codecs/tlv320aic31xx.c b/sound/soc/codecs/tlv320aic31xx.c
index df627a08def9..f6f19fdc72f5 100644
--- a/sound/soc/codecs/tlv320aic31xx.c
+++ b/sound/soc/codecs/tlv320aic31xx.c
@@ -171,6 +171,7 @@ struct aic31xx_priv {
int rate_div_line;
bool master_dapm_route_applied;
int irq;
+ u8 ocmv; /* output common-mode voltage */
};
struct aic31xx_rate_divs {
@@ -1312,6 +1313,11 @@ static int aic31xx_codec_probe(struct snd_soc_component *component)
if (ret)
return ret;
+ /* set output common-mode voltage */
+ snd_soc_component_update_bits(component, AIC31XX_HPDRIVER,
+ AIC31XX_HPD_OCMV_MASK,
+ aic31xx->ocmv << AIC31XX_HPD_OCMV_SHIFT);
+
return 0;
}
@@ -1501,6 +1507,43 @@ exit:
return IRQ_NONE;
}
+static void aic31xx_configure_ocmv(struct aic31xx_priv *priv)
+{
+ struct device *dev = priv->dev;
+ int dvdd, avdd;
+ u32 value;
+
+ if (dev->fwnode &&
+ fwnode_property_read_u32(dev->fwnode, "ai31xx-ocmv", &value)) {
+ /* OCMV setting is forced by DT */
+ if (value <= 3) {
+ priv->ocmv = value;
+ return;
+ }
+ }
+
+ avdd = regulator_get_voltage(priv->supplies[3].consumer);
+ dvdd = regulator_get_voltage(priv->supplies[5].consumer);
+
+ if (avdd > 3600000 || dvdd > 1950000) {
+ dev_warn(dev,
+ "Too high supply voltage(s) AVDD: %d, DVDD: %d\n",
+ avdd, dvdd);
+ } else if (avdd == 3600000 && dvdd == 1950000) {
+ priv->ocmv = AIC31XX_HPD_OCMV_1_8V;
+ } else if (avdd >= 3300000 && dvdd >= 1800000) {
+ priv->ocmv = AIC31XX_HPD_OCMV_1_65V;
+ } else if (avdd >= 3000000 && dvdd >= 1650000) {
+ priv->ocmv = AIC31XX_HPD_OCMV_1_5V;
+ } else if (avdd >= 2700000 && dvdd >= 1525000) {
+ priv->ocmv = AIC31XX_HPD_OCMV_1_35V;
+ } else {
+ dev_warn(dev,
+ "Invalid supply voltage(s) AVDD: %d, DVDD: %d\n",
+ avdd, dvdd);
+ }
+}
+
static int aic31xx_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
@@ -1570,6 +1613,8 @@ static int aic31xx_i2c_probe(struct i2c_client *i2c,
return ret;
}
+ aic31xx_configure_ocmv(aic31xx);
+
if (aic31xx->irq > 0) {
regmap_update_bits(aic31xx->regmap, AIC31XX_GPIO1,
AIC31XX_GPIO1_FUNC_MASK,
diff --git a/sound/soc/codecs/tlv320aic31xx.h b/sound/soc/codecs/tlv320aic31xx.h
index cb024955c978..83a8c7604cc3 100644
--- a/sound/soc/codecs/tlv320aic31xx.h
+++ b/sound/soc/codecs/tlv320aic31xx.h
@@ -232,6 +232,14 @@ struct aic31xx_pdata {
#define AIC31XX_HSD_HP 0x01
#define AIC31XX_HSD_HS 0x03
+/* AIC31XX_HPDRIVER */
+#define AIC31XX_HPD_OCMV_MASK GENMASK(4, 3)
+#define AIC31XX_HPD_OCMV_SHIFT 3
+#define AIC31XX_HPD_OCMV_1_35V 0x0
+#define AIC31XX_HPD_OCMV_1_5V 0x1
+#define AIC31XX_HPD_OCMV_1_65V 0x2
+#define AIC31XX_HPD_OCMV_1_8V 0x3
+
/* AIC31XX_MICBIAS */
#define AIC31XX_MICBIAS_MASK GENMASK(1, 0)
#define AIC31XX_MICBIAS_SHIFT 0
diff --git a/sound/soc/codecs/tlv320aic32x4.c b/sound/soc/codecs/tlv320aic32x4.c
index 68165de1c8de..b4e9a6c73f90 100644
--- a/sound/soc/codecs/tlv320aic32x4.c
+++ b/sound/soc/codecs/tlv320aic32x4.c
@@ -573,6 +573,9 @@ static int aic32x4_set_dai_sysclk(struct snd_soc_dai *codec_dai,
struct clk *pll;
pll = devm_clk_get(component->dev, "pll");
+ if (IS_ERR(pll))
+ return PTR_ERR(pll);
+
mclk = clk_get_parent(pll);
return clk_set_rate(mclk, freq);
diff --git a/sound/soc/codecs/wcd9335.c b/sound/soc/codecs/wcd9335.c
index f318403133e9..f11ffa28683b 100644
--- a/sound/soc/codecs/wcd9335.c
+++ b/sound/soc/codecs/wcd9335.c
@@ -2837,11 +2837,11 @@ static int wcd9335_codec_enable_dec(struct snd_soc_dapm_widget *w,
TX_HPF_CUT_OFF_FREQ_MASK) >> 5;
snd_soc_component_update_bits(comp, tx_vol_ctl_reg, 0x10, 0x10);
snd_soc_component_update_bits(comp, dec_cfg_reg, 0x08, 0x00);
- if (hpf_coff_freq != CF_MIN_3DB_150HZ) {
- snd_soc_component_update_bits(comp, dec_cfg_reg,
- TX_HPF_CUT_OFF_FREQ_MASK,
- hpf_coff_freq << 5);
- }
+ if (hpf_coff_freq != CF_MIN_3DB_150HZ) {
+ snd_soc_component_update_bits(comp, dec_cfg_reg,
+ TX_HPF_CUT_OFF_FREQ_MASK,
+ hpf_coff_freq << 5);
+ }
break;
case SND_SOC_DAPM_POST_PMD:
snd_soc_component_update_bits(comp, tx_vol_ctl_reg, 0x10, 0x00);
diff --git a/sound/soc/codecs/wm2200.c b/sound/soc/codecs/wm2200.c
index cf64e109c658..7b087d94141b 100644
--- a/sound/soc/codecs/wm2200.c
+++ b/sound/soc/codecs/wm2200.c
@@ -2410,6 +2410,8 @@ static int wm2200_i2c_probe(struct i2c_client *i2c,
err_pm_runtime:
pm_runtime_disable(&i2c->dev);
+ if (i2c->irq)
+ free_irq(i2c->irq, wm2200);
err_reset:
if (wm2200->pdata.reset)
gpio_set_value_cansleep(wm2200->pdata.reset, 0);
@@ -2426,12 +2428,15 @@ static int wm2200_i2c_remove(struct i2c_client *i2c)
{
struct wm2200_priv *wm2200 = i2c_get_clientdata(i2c);
+ pm_runtime_disable(&i2c->dev);
if (i2c->irq)
free_irq(i2c->irq, wm2200);
if (wm2200->pdata.reset)
gpio_set_value_cansleep(wm2200->pdata.reset, 0);
if (wm2200->pdata.ldo_ena)
gpio_set_value_cansleep(wm2200->pdata.ldo_ena, 0);
+ regulator_bulk_disable(ARRAY_SIZE(wm2200->core_supplies),
+ wm2200->core_supplies);
return 0;
}
diff --git a/sound/soc/codecs/wm5100.c b/sound/soc/codecs/wm5100.c
index 4af0e519e623..91cc63c5a51f 100644
--- a/sound/soc/codecs/wm5100.c
+++ b/sound/soc/codecs/wm5100.c
@@ -2617,6 +2617,7 @@ static int wm5100_i2c_probe(struct i2c_client *i2c,
return ret;
err_reset:
+ pm_runtime_disable(&i2c->dev);
if (i2c->irq)
free_irq(i2c->irq, wm5100);
wm5100_free_gpio(i2c);
@@ -2640,6 +2641,7 @@ static int wm5100_i2c_remove(struct i2c_client *i2c)
{
struct wm5100_priv *wm5100 = i2c_get_clientdata(i2c);
+ pm_runtime_disable(&i2c->dev);
if (i2c->irq)
free_irq(i2c->irq, wm5100);
wm5100_free_gpio(i2c);
diff --git a/sound/soc/codecs/wm8904.c b/sound/soc/codecs/wm8904.c
index bcb3c9d5abf0..7d7ea15d73e0 100644
--- a/sound/soc/codecs/wm8904.c
+++ b/sound/soc/codecs/wm8904.c
@@ -1410,34 +1410,6 @@ static int wm8904_hw_params(struct snd_pcm_substream *substream,
return 0;
}
-
-static int wm8904_set_sysclk(struct snd_soc_dai *dai, int clk_id,
- unsigned int freq, int dir)
-{
- struct snd_soc_component *component = dai->component;
- struct wm8904_priv *priv = snd_soc_component_get_drvdata(component);
-
- switch (clk_id) {
- case WM8904_CLK_MCLK:
- priv->sysclk_src = clk_id;
- priv->mclk_rate = freq;
- break;
-
- case WM8904_CLK_FLL:
- priv->sysclk_src = clk_id;
- break;
-
- default:
- return -EINVAL;
- }
-
- dev_dbg(dai->dev, "Clock source is %d at %uHz\n", clk_id, freq);
-
- wm8904_configure_clocking(component);
-
- return 0;
-}
-
static int wm8904_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
{
struct snd_soc_component *component = dai->component;
@@ -1824,6 +1796,50 @@ out:
return 0;
}
+static int wm8904_set_sysclk(struct snd_soc_dai *dai, int clk_id,
+ unsigned int freq, int dir)
+{
+ struct snd_soc_component *component = dai->component;
+ struct wm8904_priv *priv = snd_soc_component_get_drvdata(component);
+ unsigned long mclk_freq;
+ int ret;
+
+ switch (clk_id) {
+ case WM8904_CLK_AUTO:
+ mclk_freq = clk_get_rate(priv->mclk);
+ /* enable FLL if a different sysclk is desired */
+ if (mclk_freq != freq) {
+ priv->sysclk_src = WM8904_CLK_FLL;
+ ret = wm8904_set_fll(dai, WM8904_FLL_MCLK,
+ WM8904_FLL_MCLK,
+ mclk_freq, freq);
+ if (ret)
+ return ret;
+ break;
+ }
+ clk_id = WM8904_CLK_MCLK;
+ /* fallthrough */
+
+ case WM8904_CLK_MCLK:
+ priv->sysclk_src = clk_id;
+ priv->mclk_rate = freq;
+ break;
+
+ case WM8904_CLK_FLL:
+ priv->sysclk_src = clk_id;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ dev_dbg(dai->dev, "Clock source is %d at %uHz\n", clk_id, freq);
+
+ wm8904_configure_clocking(component);
+
+ return 0;
+}
+
static int wm8904_digital_mute(struct snd_soc_dai *codec_dai, int mute)
{
struct snd_soc_component *component = codec_dai->component;
@@ -1917,6 +1933,7 @@ static int wm8904_set_bias_level(struct snd_soc_component *component,
snd_soc_component_update_bits(component, WM8904_BIAS_CONTROL_0,
WM8904_BIAS_ENA, 0);
+ snd_soc_component_write(component, WM8904_SW_RESET_AND_ID, 0);
regcache_cache_only(wm8904->regmap, true);
regcache_mark_dirty(wm8904->regmap);
diff --git a/sound/soc/codecs/wm8904.h b/sound/soc/codecs/wm8904.h
index c1bca52f9927..de6340446b1f 100644
--- a/sound/soc/codecs/wm8904.h
+++ b/sound/soc/codecs/wm8904.h
@@ -10,6 +10,7 @@
#ifndef _WM8904_H
#define _WM8904_H
+#define WM8904_CLK_AUTO 0
#define WM8904_CLK_MCLK 1
#define WM8904_CLK_FLL 2
diff --git a/sound/soc/codecs/wm8958-dsp2.c b/sound/soc/codecs/wm8958-dsp2.c
index 18535b326680..ca42445b649d 100644
--- a/sound/soc/codecs/wm8958-dsp2.c
+++ b/sound/soc/codecs/wm8958-dsp2.c
@@ -25,6 +25,8 @@
#include <linux/mfd/wm8994/pdata.h>
#include <linux/mfd/wm8994/gpio.h>
+#include <asm/unaligned.h>
+
#include "wm8994.h"
#define WM_FW_BLOCK_INFO 0xff
@@ -58,18 +60,15 @@ static int wm8958_dsp2_fw(struct snd_soc_component *component, const char *name,
}
if (memcmp(fw->data, "WMFW", 4) != 0) {
- memcpy(&data32, fw->data, sizeof(data32));
- data32 = be32_to_cpu(data32);
+ data32 = get_unaligned_be32(fw->data);
dev_err(component->dev, "%s: firmware has bad file magic %08x\n",
name, data32);
goto err;
}
- memcpy(&data32, fw->data + 4, sizeof(data32));
- len = be32_to_cpu(data32);
+ len = get_unaligned_be32(fw->data + 4);
+ data32 = get_unaligned_be32(fw->data + 8);
- memcpy(&data32, fw->data + 8, sizeof(data32));
- data32 = be32_to_cpu(data32);
if ((data32 >> 24) & 0xff) {
dev_err(component->dev, "%s: unsupported firmware version %d\n",
name, (data32 >> 24) & 0xff);
@@ -87,9 +86,8 @@ static int wm8958_dsp2_fw(struct snd_soc_component *component, const char *name,
}
if (check) {
- memcpy(&data64, fw->data + 24, sizeof(u64));
- dev_info(component->dev, "%s timestamp %llx\n",
- name, be64_to_cpu(data64));
+ data64 = get_unaligned_be64(fw->data + 24);
+ dev_info(component->dev, "%s timestamp %llx\n", name, data64);
} else {
snd_soc_component_write(component, 0x102, 0x2);
snd_soc_component_write(component, 0x900, 0x2);
@@ -104,8 +102,7 @@ static int wm8958_dsp2_fw(struct snd_soc_component *component, const char *name,
goto err;
}
- memcpy(&data32, data + 4, sizeof(data32));
- block_len = be32_to_cpu(data32);
+ block_len = get_unaligned_be32(data + 4);
if (block_len + 8 > len) {
dev_err(component->dev, "%zd byte block longer than file\n",
block_len);
@@ -116,8 +113,7 @@ static int wm8958_dsp2_fw(struct snd_soc_component *component, const char *name,
goto err;
}
- memcpy(&data32, data, sizeof(data32));
- data32 = be32_to_cpu(data32);
+ data32 = get_unaligned_be32(data);
switch ((data32 >> 24) & 0xff) {
case WM_FW_BLOCK_INFO:
diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
index d5fb7f5dd551..15ce64a48a87 100644
--- a/sound/soc/codecs/wm8994.c
+++ b/sound/soc/codecs/wm8994.c
@@ -167,12 +167,12 @@ static int configure_aif_clock(struct snd_soc_component *component, int aif)
switch (wm8994->sysclk[aif]) {
case WM8994_SYSCLK_MCLK1:
- rate = wm8994->mclk[0];
+ rate = wm8994->mclk_rate[0];
break;
case WM8994_SYSCLK_MCLK2:
reg1 |= 0x8;
- rate = wm8994->mclk[1];
+ rate = wm8994->mclk_rate[1];
break;
case WM8994_SYSCLK_FLL1:
@@ -1038,6 +1038,45 @@ static bool wm8994_check_class_w_digital(struct snd_soc_component *component)
return true;
}
+static int aif_mclk_set(struct snd_soc_component *component, int aif, bool enable)
+{
+ struct wm8994_priv *wm8994 = snd_soc_component_get_drvdata(component);
+ unsigned int offset, val, clk_idx;
+ int ret;
+
+ if (aif)
+ offset = 4;
+ else
+ offset = 0;
+
+ val = snd_soc_component_read32(component, WM8994_AIF1_CLOCKING_1 + offset);
+ val &= WM8994_AIF1CLK_SRC_MASK;
+
+ switch (val) {
+ case 0:
+ clk_idx = WM8994_MCLK1;
+ break;
+ case 1:
+ clk_idx = WM8994_MCLK2;
+ break;
+ default:
+ return 0;
+ }
+
+ if (enable) {
+ ret = clk_prepare_enable(wm8994->mclk[clk_idx].clk);
+ if (ret < 0) {
+ dev_err(component->dev, "Failed to enable MCLK%d\n",
+ clk_idx);
+ return ret;
+ }
+ } else {
+ clk_disable_unprepare(wm8994->mclk[clk_idx].clk);
+ }
+
+ return 0;
+}
+
static int aif1clk_ev(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
@@ -1045,7 +1084,7 @@ static int aif1clk_ev(struct snd_soc_dapm_widget *w,
struct wm8994_priv *wm8994 = snd_soc_component_get_drvdata(component);
struct wm8994 *control = wm8994->wm8994;
int mask = WM8994_AIF1DAC1L_ENA | WM8994_AIF1DAC1R_ENA;
- int i;
+ int ret, i;
int dac;
int adc;
int val;
@@ -1061,6 +1100,10 @@ static int aif1clk_ev(struct snd_soc_dapm_widget *w,
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
+ ret = aif_mclk_set(component, 0, true);
+ if (ret < 0)
+ return ret;
+
/* Don't enable timeslot 2 if not in use */
if (wm8994->channels[0] <= 2)
mask &= ~(WM8994_AIF1DAC2L_ENA | WM8994_AIF1DAC2R_ENA);
@@ -1133,6 +1176,12 @@ static int aif1clk_ev(struct snd_soc_dapm_widget *w,
break;
}
+ switch (event) {
+ case SND_SOC_DAPM_POST_PMD:
+ aif_mclk_set(component, 0, false);
+ break;
+ }
+
return 0;
}
@@ -1140,13 +1189,17 @@ static int aif2clk_ev(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm);
- int i;
+ int ret, i;
int dac;
int adc;
int val;
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
+ ret = aif_mclk_set(component, 1, true);
+ if (ret < 0)
+ return ret;
+
val = snd_soc_component_read32(component, WM8994_AIF2_CONTROL_1);
if ((val & WM8994_AIF2ADCL_SRC) &&
(val & WM8994_AIF2ADCR_SRC))
@@ -1218,6 +1271,12 @@ static int aif2clk_ev(struct snd_soc_dapm_widget *w,
break;
}
+ switch (event) {
+ case SND_SOC_DAPM_POST_PMD:
+ aif_mclk_set(component, 1, false);
+ break;
+ }
+
return 0;
}
@@ -1623,10 +1682,10 @@ SND_SOC_DAPM_POST("Late Disable PGA", late_disable_ev)
static const struct snd_soc_dapm_widget wm8994_lateclk_widgets[] = {
SND_SOC_DAPM_SUPPLY("AIF1CLK", WM8994_AIF1_CLOCKING_1, 0, 0, aif1clk_ev,
SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
- SND_SOC_DAPM_PRE_PMD),
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_SUPPLY("AIF2CLK", WM8994_AIF2_CLOCKING_1, 0, 0, aif2clk_ev,
SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
- SND_SOC_DAPM_PRE_PMD),
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_PGA("Direct Voice", SND_SOC_NOPM, 0, 0, NULL, 0),
SND_SOC_DAPM_MIXER("SPKL", WM8994_POWER_MANAGEMENT_3, 8, 0,
left_speaker_mixer, ARRAY_SIZE(left_speaker_mixer)),
@@ -2141,6 +2200,7 @@ static int _wm8994_set_fll(struct snd_soc_component *component, int id, int src,
u16 reg, clk1, aif_reg, aif_src;
unsigned long timeout;
bool was_enabled;
+ struct clk *mclk;
switch (id) {
case WM8994_FLL1:
@@ -2216,6 +2276,27 @@ static int _wm8994_set_fll(struct snd_soc_component *component, int id, int src,
snd_soc_component_update_bits(component, WM8994_FLL1_CONTROL_1 + reg_offset,
WM8994_FLL1_ENA, 0);
+ /* Disable MCLK if needed before we possibly change to new clock parent */
+ if (was_enabled) {
+ reg = snd_soc_component_read32(component, WM8994_FLL1_CONTROL_5
+ + reg_offset);
+ reg = ((reg & WM8994_FLL1_REFCLK_SRC_MASK)
+ >> WM8994_FLL1_REFCLK_SRC_SHIFT) + 1;
+
+ switch (reg) {
+ case WM8994_FLL_SRC_MCLK1:
+ mclk = wm8994->mclk[WM8994_MCLK1].clk;
+ break;
+ case WM8994_FLL_SRC_MCLK2:
+ mclk = wm8994->mclk[WM8994_MCLK2].clk;
+ break;
+ default:
+ mclk = NULL;
+ }
+
+ clk_disable_unprepare(mclk);
+ }
+
if (wm8994->fll_byp && src == WM8994_FLL_SRC_BCLK &&
freq_in == freq_out && freq_out) {
dev_dbg(component->dev, "Bypassing FLL%d\n", id + 1);
@@ -2260,10 +2341,29 @@ static int _wm8994_set_fll(struct snd_soc_component *component, int id, int src,
/* Clear any pending completion from a previous failure */
try_wait_for_completion(&wm8994->fll_locked[id]);
+ switch (src) {
+ case WM8994_FLL_SRC_MCLK1:
+ mclk = wm8994->mclk[WM8994_MCLK1].clk;
+ break;
+ case WM8994_FLL_SRC_MCLK2:
+ mclk = wm8994->mclk[WM8994_MCLK2].clk;
+ break;
+ default:
+ mclk = NULL;
+ }
+
/* Enable (with fractional mode if required) */
if (freq_out) {
+ ret = clk_prepare_enable(mclk);
+ if (ret < 0) {
+ dev_err(component->dev, "Failed to enable MCLK for FLL%d\n",
+ id + 1);
+ return ret;
+ }
+
/* Enable VMID if we need it */
if (!was_enabled) {
+
active_reference(component);
switch (control->type) {
@@ -2372,12 +2472,29 @@ static int wm8994_set_fll(struct snd_soc_dai *dai, int id, int src,
return _wm8994_set_fll(dai->component, id, src, freq_in, freq_out);
}
+static int wm8994_set_mclk_rate(struct wm8994_priv *wm8994, unsigned int id,
+ unsigned int *freq)
+{
+ int ret;
+
+ if (!wm8994->mclk[id].clk || *freq == wm8994->mclk_rate[id])
+ return 0;
+
+ ret = clk_set_rate(wm8994->mclk[id].clk, *freq);
+ if (ret < 0)
+ return ret;
+
+ *freq = clk_get_rate(wm8994->mclk[id].clk);
+
+ return 0;
+}
+
static int wm8994_set_dai_sysclk(struct snd_soc_dai *dai,
int clk_id, unsigned int freq, int dir)
{
struct snd_soc_component *component = dai->component;
struct wm8994_priv *wm8994 = snd_soc_component_get_drvdata(component);
- int i;
+ int ret, i;
switch (dai->id) {
case 1:
@@ -2392,7 +2509,12 @@ static int wm8994_set_dai_sysclk(struct snd_soc_dai *dai,
switch (clk_id) {
case WM8994_SYSCLK_MCLK1:
wm8994->sysclk[dai->id - 1] = WM8994_SYSCLK_MCLK1;
- wm8994->mclk[0] = freq;
+
+ ret = wm8994_set_mclk_rate(wm8994, dai->id - 1, &freq);
+ if (ret < 0)
+ return ret;
+
+ wm8994->mclk_rate[0] = freq;
dev_dbg(dai->dev, "AIF%d using MCLK1 at %uHz\n",
dai->id, freq);
break;
@@ -2400,7 +2522,12 @@ static int wm8994_set_dai_sysclk(struct snd_soc_dai *dai,
case WM8994_SYSCLK_MCLK2:
/* TODO: Set GPIO AF */
wm8994->sysclk[dai->id - 1] = WM8994_SYSCLK_MCLK2;
- wm8994->mclk[1] = freq;
+
+ ret = wm8994_set_mclk_rate(wm8994, dai->id - 1, &freq);
+ if (ret < 0)
+ return ret;
+
+ wm8994->mclk_rate[1] = freq;
dev_dbg(dai->dev, "AIF%d using MCLK2 at %uHz\n",
dai->id, freq);
break;
@@ -4456,6 +4583,7 @@ static const struct snd_soc_component_driver soc_component_dev_wm8994 = {
static int wm8994_probe(struct platform_device *pdev)
{
struct wm8994_priv *wm8994;
+ int ret;
wm8994 = devm_kzalloc(&pdev->dev, sizeof(struct wm8994_priv),
GFP_KERNEL);
@@ -4467,6 +4595,16 @@ static int wm8994_probe(struct platform_device *pdev)
wm8994->wm8994 = dev_get_drvdata(pdev->dev.parent);
+ wm8994->mclk[WM8994_MCLK1].id = "MCLK1";
+ wm8994->mclk[WM8994_MCLK2].id = "MCLK2";
+
+ ret = devm_clk_bulk_get_optional(pdev->dev.parent, ARRAY_SIZE(wm8994->mclk),
+ wm8994->mclk);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to get clocks: %d\n", ret);
+ return ret;
+ }
+
pm_runtime_enable(&pdev->dev);
pm_runtime_idle(&pdev->dev);
diff --git a/sound/soc/codecs/wm8994.h b/sound/soc/codecs/wm8994.h
index 1d6f2abe1c11..41c4b126114d 100644
--- a/sound/soc/codecs/wm8994.h
+++ b/sound/soc/codecs/wm8994.h
@@ -6,6 +6,7 @@
#ifndef _WM8994_H
#define _WM8994_H
+#include <linux/clk.h>
#include <sound/soc.h>
#include <linux/firmware.h>
#include <linux/completion.h>
@@ -14,6 +15,12 @@
#include "wm_hubs.h"
+enum {
+ WM8994_MCLK1,
+ WM8994_MCLK2,
+ WM8994_NUM_MCLK
+};
+
/* Sources for AIF1/2 SYSCLK - use with set_dai_sysclk() */
#define WM8994_SYSCLK_MCLK1 1
#define WM8994_SYSCLK_MCLK2 2
@@ -73,9 +80,10 @@ struct wm8994;
struct wm8994_priv {
struct wm_hubs_data hubs;
struct wm8994 *wm8994;
+ struct clk_bulk_data mclk[WM8994_NUM_MCLK];
int sysclk[2];
int sysclk_rate[2];
- int mclk[2];
+ int mclk_rate[2];
int aifclk[2];
int aifdiv[2];
int channels[2];
diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c
index 9b8bb7bbe945..2a9b610f6d43 100644
--- a/sound/soc/codecs/wm_adsp.c
+++ b/sound/soc/codecs/wm_adsp.c
@@ -599,6 +599,9 @@ struct wm_coeff_ctl_ops {
struct wm_coeff_ctl {
const char *name;
const char *fw_name;
+ /* Subname is needed to match with firmware */
+ const char *subname;
+ unsigned int subname_len;
struct wm_adsp_alg_region alg_region;
struct wm_coeff_ctl_ops ops;
struct wm_adsp *dsp;
@@ -1399,6 +1402,7 @@ static void wm_adsp_free_ctl_blk(struct wm_coeff_ctl *ctl)
{
kfree(ctl->cache);
kfree(ctl->name);
+ kfree(ctl->subname);
kfree(ctl);
}
@@ -1472,6 +1476,15 @@ static int wm_adsp_create_control(struct wm_adsp *dsp,
ret = -ENOMEM;
goto err_ctl;
}
+ if (subname) {
+ ctl->subname_len = subname_len;
+ ctl->subname = kmemdup(subname,
+ strlen(subname) + 1, GFP_KERNEL);
+ if (!ctl->subname) {
+ ret = -ENOMEM;
+ goto err_ctl_name;
+ }
+ }
ctl->enabled = 1;
ctl->set = 0;
ctl->ops.xget = wm_coeff_get;
@@ -1485,7 +1498,7 @@ static int wm_adsp_create_control(struct wm_adsp *dsp,
ctl->cache = kzalloc(ctl->len, GFP_KERNEL);
if (!ctl->cache) {
ret = -ENOMEM;
- goto err_ctl_name;
+ goto err_ctl_subname;
}
list_add(&ctl->list, &dsp->ctl_list);
@@ -1508,6 +1521,8 @@ static int wm_adsp_create_control(struct wm_adsp *dsp,
err_ctl_cache:
kfree(ctl->cache);
+err_ctl_subname:
+ kfree(ctl->subname);
err_ctl_name:
kfree(ctl->name);
err_ctl:
@@ -1995,6 +2010,70 @@ out:
return ret;
}
+/*
+ * Find wm_coeff_ctl with input name as its subname
+ * If not found, return NULL
+ */
+static struct wm_coeff_ctl *wm_adsp_get_ctl(struct wm_adsp *dsp,
+ const char *name, int type,
+ unsigned int alg)
+{
+ struct wm_coeff_ctl *pos, *rslt = NULL;
+
+ list_for_each_entry(pos, &dsp->ctl_list, list) {
+ if (!pos->subname)
+ continue;
+ if (strncmp(pos->subname, name, pos->subname_len) == 0 &&
+ pos->alg_region.alg == alg &&
+ pos->alg_region.type == type) {
+ rslt = pos;
+ break;
+ }
+ }
+
+ return rslt;
+}
+
+int wm_adsp_write_ctl(struct wm_adsp *dsp, const char *name, int type,
+ unsigned int alg, void *buf, size_t len)
+{
+ struct wm_coeff_ctl *ctl;
+ struct snd_kcontrol *kcontrol;
+ int ret;
+
+ ctl = wm_adsp_get_ctl(dsp, name, type, alg);
+ if (!ctl)
+ return -EINVAL;
+
+ if (len > ctl->len)
+ return -EINVAL;
+
+ ret = wm_coeff_write_control(ctl, buf, len);
+
+ kcontrol = snd_soc_card_get_kcontrol(dsp->component->card, ctl->name);
+ snd_ctl_notify(dsp->component->card->snd_card,
+ SNDRV_CTL_EVENT_MASK_VALUE, &kcontrol->id);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(wm_adsp_write_ctl);
+
+int wm_adsp_read_ctl(struct wm_adsp *dsp, const char *name, int type,
+ unsigned int alg, void *buf, size_t len)
+{
+ struct wm_coeff_ctl *ctl;
+
+ ctl = wm_adsp_get_ctl(dsp, name, type, alg);
+ if (!ctl)
+ return -EINVAL;
+
+ if (len > ctl->len)
+ return -EINVAL;
+
+ return wm_coeff_read_control(ctl, buf, len);
+}
+EXPORT_SYMBOL_GPL(wm_adsp_read_ctl);
+
static void wm_adsp_ctl_fixup_base(struct wm_adsp *dsp,
const struct wm_adsp_alg_region *alg_region)
{
diff --git a/sound/soc/codecs/wm_adsp.h b/sound/soc/codecs/wm_adsp.h
index aa634ef6c9f5..4c481cf20275 100644
--- a/sound/soc/codecs/wm_adsp.h
+++ b/sound/soc/codecs/wm_adsp.h
@@ -201,5 +201,9 @@ int wm_adsp_compr_pointer(struct snd_compr_stream *stream,
struct snd_compr_tstamp *tstamp);
int wm_adsp_compr_copy(struct snd_compr_stream *stream,
char __user *buf, size_t count);
+int wm_adsp_write_ctl(struct wm_adsp *dsp, const char *name, int type,
+ unsigned int alg, void *buf, size_t len);
+int wm_adsp_read_ctl(struct wm_adsp *dsp, const char *name, int type,
+ unsigned int alg, void *buf, size_t len);
#endif
diff --git a/sound/soc/dwc/dwc-pcm.c b/sound/soc/dwc/dwc-pcm.c
index a9ae91c4597f..4771eb5fbe2a 100644
--- a/sound/soc/dwc/dwc-pcm.c
+++ b/sound/soc/dwc/dwc-pcm.c
@@ -135,7 +135,8 @@ void dw_pcm_pop_rx(struct dw_i2s_dev *dev)
dw_pcm_transfer(dev, false);
}
-static int dw_pcm_open(struct snd_pcm_substream *substream)
+static int dw_pcm_open(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_soc_pcm_runtime *rtd = substream->private_data;
@@ -148,14 +149,16 @@ static int dw_pcm_open(struct snd_pcm_substream *substream)
return 0;
}
-static int dw_pcm_close(struct snd_pcm_substream *substream)
+static int dw_pcm_close(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
synchronize_rcu();
return 0;
}
-static int dw_pcm_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *hw_params)
+static int dw_pcm_hw_params(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *hw_params)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct dw_i2s_dev *dev = runtime->private_data;
@@ -192,12 +195,14 @@ static int dw_pcm_hw_params(struct snd_pcm_substream *substream,
return 0;
}
-static int dw_pcm_hw_free(struct snd_pcm_substream *substream)
+static int dw_pcm_hw_free(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
return snd_pcm_lib_free_pages(substream);
}
-static int dw_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
+static int dw_pcm_trigger(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream, int cmd)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct dw_i2s_dev *dev = runtime->private_data;
@@ -231,7 +236,8 @@ static int dw_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
return ret;
}
-static snd_pcm_uframes_t dw_pcm_pointer(struct snd_pcm_substream *substream)
+static snd_pcm_uframes_t dw_pcm_pointer(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct dw_i2s_dev *dev = runtime->private_data;
@@ -245,35 +251,33 @@ static snd_pcm_uframes_t dw_pcm_pointer(struct snd_pcm_substream *substream)
return pos < runtime->buffer_size ? pos : 0;
}
-static int dw_pcm_new(struct snd_soc_pcm_runtime *rtd)
+static int dw_pcm_new(struct snd_soc_component *component,
+ struct snd_soc_pcm_runtime *rtd)
{
size_t size = dw_pcm_hardware.buffer_bytes_max;
snd_pcm_lib_preallocate_pages_for_all(rtd->pcm,
SNDRV_DMA_TYPE_CONTINUOUS,
- snd_dma_continuous_data(GFP_KERNEL), size, size);
+ NULL, size, size);
return 0;
}
-static void dw_pcm_free(struct snd_pcm *pcm)
+static void dw_pcm_free(struct snd_soc_component *component,
+ struct snd_pcm *pcm)
{
snd_pcm_lib_preallocate_free_for_all(pcm);
}
-static const struct snd_pcm_ops dw_pcm_ops = {
- .open = dw_pcm_open,
- .close = dw_pcm_close,
- .ioctl = snd_pcm_lib_ioctl,
- .hw_params = dw_pcm_hw_params,
- .hw_free = dw_pcm_hw_free,
- .trigger = dw_pcm_trigger,
- .pointer = dw_pcm_pointer,
-};
-
static const struct snd_soc_component_driver dw_pcm_component = {
- .pcm_new = dw_pcm_new,
- .pcm_free = dw_pcm_free,
- .ops = &dw_pcm_ops,
+ .open = dw_pcm_open,
+ .close = dw_pcm_close,
+ .ioctl = snd_soc_pcm_lib_ioctl,
+ .hw_params = dw_pcm_hw_params,
+ .hw_free = dw_pcm_hw_free,
+ .trigger = dw_pcm_trigger,
+ .pointer = dw_pcm_pointer,
+ .pcm_construct = dw_pcm_new,
+ .pcm_destruct = dw_pcm_free,
};
int dw_pcm_register(struct platform_device *pdev)
diff --git a/sound/soc/fsl/Kconfig b/sound/soc/fsl/Kconfig
index aa99c008a925..65e8cd4be930 100644
--- a/sound/soc/fsl/Kconfig
+++ b/sound/soc/fsl/Kconfig
@@ -25,6 +25,16 @@ config SND_SOC_FSL_SAI
This option is only useful for out-of-tree drivers since
in-tree drivers select it automatically.
+config SND_SOC_FSL_MQS
+ tristate "Medium Quality Sound (MQS) module support"
+ depends on SND_SOC_FSL_SAI
+ select REGMAP_MMIO
+ help
+ Say Y if you want to add Medium Quality Sound (MQS)
+ support for the Freescale CPUs.
+ This option is only useful for out-of-tree drivers since
+ in-tree drivers select it automatically.
+
config SND_SOC_FSL_AUDMIX
tristate "Audio Mixer (AUDMIX) module support"
select REGMAP_MMIO
diff --git a/sound/soc/fsl/Makefile b/sound/soc/fsl/Makefile
index c0dd04422fe9..8cde88c72d93 100644
--- a/sound/soc/fsl/Makefile
+++ b/sound/soc/fsl/Makefile
@@ -23,6 +23,7 @@ snd-soc-fsl-esai-objs := fsl_esai.o
snd-soc-fsl-micfil-objs := fsl_micfil.o
snd-soc-fsl-utils-objs := fsl_utils.o
snd-soc-fsl-dma-objs := fsl_dma.o
+snd-soc-fsl-mqs-objs := fsl_mqs.o
obj-$(CONFIG_SND_SOC_FSL_AUDMIX) += snd-soc-fsl-audmix.o
obj-$(CONFIG_SND_SOC_FSL_ASOC_CARD) += snd-soc-fsl-asoc-card.o
@@ -33,6 +34,7 @@ obj-$(CONFIG_SND_SOC_FSL_SPDIF) += snd-soc-fsl-spdif.o
obj-$(CONFIG_SND_SOC_FSL_ESAI) += snd-soc-fsl-esai.o
obj-$(CONFIG_SND_SOC_FSL_MICFIL) += snd-soc-fsl-micfil.o
obj-$(CONFIG_SND_SOC_FSL_UTILS) += snd-soc-fsl-utils.o
+obj-$(CONFIG_SND_SOC_FSL_MQS) += snd-soc-fsl-mqs.o
obj-$(CONFIG_SND_SOC_POWERPC_DMA) += snd-soc-fsl-dma.o
# MPC5200 Platform Support
diff --git a/sound/soc/fsl/fsl_asrc.c b/sound/soc/fsl/fsl_asrc.c
index cfa40ef6b1ca..a3cfceea7d2f 100644
--- a/sound/soc/fsl/fsl_asrc.c
+++ b/sound/soc/fsl/fsl_asrc.c
@@ -115,7 +115,7 @@ static void fsl_asrc_sel_proc(int inrate, int outrate,
* within range [ANCA, ANCA+ANCB-1], depends on the channels of pair A
* while pair A and pair C are comparatively independent.
*/
-static int fsl_asrc_request_pair(int channels, struct fsl_asrc_pair *pair)
+int fsl_asrc_request_pair(int channels, struct fsl_asrc_pair *pair)
{
enum asrc_pair_index index = ASRC_INVALID_PAIR;
struct fsl_asrc *asrc_priv = pair->asrc_priv;
@@ -158,7 +158,7 @@ static int fsl_asrc_request_pair(int channels, struct fsl_asrc_pair *pair)
*
* It clears the resource from asrc_priv and releases the occupied channels.
*/
-static void fsl_asrc_release_pair(struct fsl_asrc_pair *pair)
+void fsl_asrc_release_pair(struct fsl_asrc_pair *pair)
{
struct fsl_asrc *asrc_priv = pair->asrc_priv;
enum asrc_pair_index index = pair->index;
@@ -259,14 +259,24 @@ static int fsl_asrc_set_ideal_ratio(struct fsl_asrc_pair *pair,
* It configures those ASRC registers according to a configuration instance
* of struct asrc_config which includes in/output sample rate, width, channel
* and clock settings.
+ *
+ * Note:
+ * The ideal ratio configuration can work with a flexible clock rate setting.
+ * Using IDEAL_RATIO_RATE gives a faster converting speed but overloads ASRC.
+ * For a regular audio playback, the clock rate should not be slower than an
+ * clock rate aligning with the output sample rate; For a use case requiring
+ * faster conversion, set use_ideal_rate to have the faster speed.
*/
-static int fsl_asrc_config_pair(struct fsl_asrc_pair *pair)
+static int fsl_asrc_config_pair(struct fsl_asrc_pair *pair, bool use_ideal_rate)
{
struct asrc_config *config = pair->config;
struct fsl_asrc *asrc_priv = pair->asrc_priv;
enum asrc_pair_index index = pair->index;
+ enum asrc_word_width input_word_width;
+ enum asrc_word_width output_word_width;
u32 inrate, outrate, indiv, outdiv;
- u32 clk_index[2], div[2];
+ u32 clk_index[2], div[2], rem[2];
+ u64 clk_rate;
int in, out, channels;
int pre_proc, post_proc;
struct clk *clk;
@@ -283,9 +293,32 @@ static int fsl_asrc_config_pair(struct fsl_asrc_pair *pair)
return -EINVAL;
}
- /* Validate output width */
- if (config->output_word_width == ASRC_WIDTH_8_BIT) {
- pair_err("does not support 8bit width output\n");
+ switch (snd_pcm_format_width(config->input_format)) {
+ case 8:
+ input_word_width = ASRC_WIDTH_8_BIT;
+ break;
+ case 16:
+ input_word_width = ASRC_WIDTH_16_BIT;
+ break;
+ case 24:
+ input_word_width = ASRC_WIDTH_24_BIT;
+ break;
+ default:
+ pair_err("does not support this input format, %d\n",
+ config->input_format);
+ return -EINVAL;
+ }
+
+ switch (snd_pcm_format_width(config->output_format)) {
+ case 16:
+ output_word_width = ASRC_WIDTH_16_BIT;
+ break;
+ case 24:
+ output_word_width = ASRC_WIDTH_24_BIT;
+ break;
+ default:
+ pair_err("does not support this output format, %d\n",
+ config->output_format);
return -EINVAL;
}
@@ -326,27 +359,42 @@ static int fsl_asrc_config_pair(struct fsl_asrc_pair *pair)
/* We only have output clock for ideal ratio mode */
clk = asrc_priv->asrck_clk[clk_index[ideal ? OUT : IN]];
- div[IN] = clk_get_rate(clk) / inrate;
- if (div[IN] == 0) {
+ clk_rate = clk_get_rate(clk);
+ rem[IN] = do_div(clk_rate, inrate);
+ div[IN] = (u32)clk_rate;
+
+ /*
+ * The divider range is [1, 1024], defined by the hardware. For non-
+ * ideal ratio configuration, clock rate has to be strictly aligned
+ * with the sample rate. For ideal ratio configuration, clock rates
+ * only result in different converting speeds. So remainder does not
+ * matter, as long as we keep the divider within its valid range.
+ */
+ if (div[IN] == 0 || (!ideal && (div[IN] > 1024 || rem[IN] != 0))) {
pair_err("failed to support input sample rate %dHz by asrck_%x\n",
inrate, clk_index[ideal ? OUT : IN]);
return -EINVAL;
}
- clk = asrc_priv->asrck_clk[clk_index[OUT]];
+ div[IN] = min_t(u32, 1024, div[IN]);
- /* Use fixed output rate for Ideal Ratio mode (INCLK_NONE) */
- if (ideal)
- div[OUT] = clk_get_rate(clk) / IDEAL_RATIO_RATE;
+ clk = asrc_priv->asrck_clk[clk_index[OUT]];
+ clk_rate = clk_get_rate(clk);
+ if (ideal && use_ideal_rate)
+ rem[OUT] = do_div(clk_rate, IDEAL_RATIO_RATE);
else
- div[OUT] = clk_get_rate(clk) / outrate;
+ rem[OUT] = do_div(clk_rate, outrate);
+ div[OUT] = clk_rate;
- if (div[OUT] == 0) {
+ /* Output divider has the same limitation as the input one */
+ if (div[OUT] == 0 || (!ideal && (div[OUT] > 1024 || rem[OUT] != 0))) {
pair_err("failed to support output sample rate %dHz by asrck_%x\n",
outrate, clk_index[OUT]);
return -EINVAL;
}
+ div[OUT] = min_t(u32, 1024, div[OUT]);
+
/* Set the channel number */
channels = config->channel_num;
@@ -383,8 +431,8 @@ static int fsl_asrc_config_pair(struct fsl_asrc_pair *pair)
/* Implement word_width configurations */
regmap_update_bits(asrc_priv->regmap, REG_ASRMCR1(index),
ASRMCR1i_OW16_MASK | ASRMCR1i_IWD_MASK,
- ASRMCR1i_OW16(config->output_word_width) |
- ASRMCR1i_IWD(config->input_word_width));
+ ASRMCR1i_OW16(output_word_width) |
+ ASRMCR1i_IWD(input_word_width));
/* Enable BUFFER STALL */
regmap_update_bits(asrc_priv->regmap, REG_ASRMCR(index),
@@ -497,13 +545,13 @@ static int fsl_asrc_dai_hw_params(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct fsl_asrc *asrc_priv = snd_soc_dai_get_drvdata(dai);
- int width = params_width(params);
struct snd_pcm_runtime *runtime = substream->runtime;
struct fsl_asrc_pair *pair = runtime->private_data;
unsigned int channels = params_channels(params);
unsigned int rate = params_rate(params);
struct asrc_config config;
- int word_width, ret;
+ snd_pcm_format_t format;
+ int ret;
ret = fsl_asrc_request_pair(channels, pair);
if (ret) {
@@ -513,15 +561,10 @@ static int fsl_asrc_dai_hw_params(struct snd_pcm_substream *substream,
pair->config = &config;
- if (width == 16)
- width = ASRC_WIDTH_16_BIT;
- else
- width = ASRC_WIDTH_24_BIT;
-
if (asrc_priv->asrc_width == 16)
- word_width = ASRC_WIDTH_16_BIT;
+ format = SNDRV_PCM_FORMAT_S16_LE;
else
- word_width = ASRC_WIDTH_24_BIT;
+ format = SNDRV_PCM_FORMAT_S24_LE;
config.pair = pair->index;
config.channel_num = channels;
@@ -529,18 +572,18 @@ static int fsl_asrc_dai_hw_params(struct snd_pcm_substream *substream,
config.outclk = OUTCLK_ASRCK1_CLK;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
- config.input_word_width = width;
- config.output_word_width = word_width;
+ config.input_format = params_format(params);
+ config.output_format = format;
config.input_sample_rate = rate;
config.output_sample_rate = asrc_priv->asrc_rate;
} else {
- config.input_word_width = word_width;
- config.output_word_width = width;
+ config.input_format = format;
+ config.output_format = params_format(params);
config.input_sample_rate = asrc_priv->asrc_rate;
config.output_sample_rate = rate;
}
- ret = fsl_asrc_config_pair(pair);
+ ret = fsl_asrc_config_pair(pair, false);
if (ret) {
dev_err(dai->dev, "fail to config asrc pair\n");
return ret;
@@ -604,7 +647,7 @@ static int fsl_asrc_dai_probe(struct snd_soc_dai *dai)
#define FSL_ASRC_FORMATS (SNDRV_PCM_FMTBIT_S24_LE | \
SNDRV_PCM_FMTBIT_S16_LE | \
- SNDRV_PCM_FMTBIT_S20_3LE)
+ SNDRV_PCM_FMTBIT_S24_3LE)
static struct snd_soc_dai_driver fsl_asrc_dai = {
.probe = fsl_asrc_dai_probe,
@@ -615,7 +658,8 @@ static struct snd_soc_dai_driver fsl_asrc_dai = {
.rate_min = 5512,
.rate_max = 192000,
.rates = SNDRV_PCM_RATE_KNOT,
- .formats = FSL_ASRC_FORMATS,
+ .formats = FSL_ASRC_FORMATS |
+ SNDRV_PCM_FMTBIT_S8,
},
.capture = {
.stream_name = "ASRC-Capture",
diff --git a/sound/soc/fsl/fsl_asrc.h b/sound/soc/fsl/fsl_asrc.h
index c60075112570..2b57e8c53728 100644
--- a/sound/soc/fsl/fsl_asrc.h
+++ b/sound/soc/fsl/fsl_asrc.h
@@ -342,8 +342,8 @@ struct asrc_config {
unsigned int dma_buffer_size;
unsigned int input_sample_rate;
unsigned int output_sample_rate;
- enum asrc_word_width input_word_width;
- enum asrc_word_width output_word_width;
+ snd_pcm_format_t input_format;
+ snd_pcm_format_t output_format;
enum asrc_inclk inclk;
enum asrc_outclk outclk;
};
@@ -462,4 +462,7 @@ struct fsl_asrc {
#define DRV_NAME "fsl-asrc-dai"
extern struct snd_soc_component_driver fsl_asrc_component;
struct dma_chan *fsl_asrc_get_dma_channel(struct fsl_asrc_pair *pair, bool dir);
+int fsl_asrc_request_pair(int channels, struct fsl_asrc_pair *pair);
+void fsl_asrc_release_pair(struct fsl_asrc_pair *pair);
+
#endif /* _FSL_ASRC_H */
diff --git a/sound/soc/fsl/fsl_asrc_dma.c b/sound/soc/fsl/fsl_asrc_dma.c
index 01052a0808b0..d6146de9acd2 100644
--- a/sound/soc/fsl/fsl_asrc_dma.c
+++ b/sound/soc/fsl/fsl_asrc_dma.c
@@ -16,13 +16,11 @@
#define FSL_ASRC_DMABUF_SIZE (256 * 1024)
-static const struct snd_pcm_hardware snd_imx_hardware = {
+static struct snd_pcm_hardware snd_imx_hardware = {
.info = SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_BLOCK_TRANSFER |
SNDRV_PCM_INFO_MMAP |
- SNDRV_PCM_INFO_MMAP_VALID |
- SNDRV_PCM_INFO_PAUSE |
- SNDRV_PCM_INFO_RESUME,
+ SNDRV_PCM_INFO_MMAP_VALID,
.buffer_bytes_max = FSL_ASRC_DMABUF_SIZE,
.period_bytes_min = 128,
.period_bytes_max = 65535, /* Limited by SDMA engine */
@@ -54,13 +52,12 @@ static void fsl_asrc_dma_complete(void *arg)
snd_pcm_period_elapsed(substream);
}
-static int fsl_asrc_dma_prepare_and_submit(struct snd_pcm_substream *substream)
+static int fsl_asrc_dma_prepare_and_submit(struct snd_pcm_substream *substream,
+ struct snd_soc_component *component)
{
u8 dir = substream->stream == SNDRV_PCM_STREAM_PLAYBACK ? OUT : IN;
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_pcm_runtime *runtime = substream->runtime;
struct fsl_asrc_pair *pair = runtime->private_data;
- struct snd_soc_component *component = snd_soc_rtdcom_lookup(rtd, DRV_NAME);
struct device *dev = component->dev;
unsigned long flags = DMA_CTRL_ACK;
@@ -97,7 +94,8 @@ static int fsl_asrc_dma_prepare_and_submit(struct snd_pcm_substream *substream)
return 0;
}
-static int fsl_asrc_dma_trigger(struct snd_pcm_substream *substream, int cmd)
+static int fsl_asrc_dma_trigger(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream, int cmd)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct fsl_asrc_pair *pair = runtime->private_data;
@@ -107,7 +105,7 @@ static int fsl_asrc_dma_trigger(struct snd_pcm_substream *substream, int cmd)
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_RESUME:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
- ret = fsl_asrc_dma_prepare_and_submit(substream);
+ ret = fsl_asrc_dma_prepare_and_submit(substream, component);
if (ret)
return ret;
dma_async_issue_pending(pair->dma_chan[IN]);
@@ -126,7 +124,8 @@ static int fsl_asrc_dma_trigger(struct snd_pcm_substream *substream, int cmd)
return 0;
}
-static int fsl_asrc_dma_hw_params(struct snd_pcm_substream *substream,
+static int fsl_asrc_dma_hw_params(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
enum dma_slave_buswidth buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
@@ -134,7 +133,6 @@ static int fsl_asrc_dma_hw_params(struct snd_pcm_substream *substream,
bool tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
struct snd_dmaengine_dai_dma_data *dma_params_fe = NULL;
struct snd_dmaengine_dai_dma_data *dma_params_be = NULL;
- struct snd_soc_component *component = snd_soc_rtdcom_lookup(rtd, DRV_NAME);
struct snd_pcm_runtime *runtime = substream->runtime;
struct fsl_asrc_pair *pair = runtime->private_data;
struct fsl_asrc *asrc_priv = pair->asrc_priv;
@@ -249,7 +247,8 @@ static int fsl_asrc_dma_hw_params(struct snd_pcm_substream *substream,
return 0;
}
-static int fsl_asrc_dma_hw_free(struct snd_pcm_substream *substream)
+static int fsl_asrc_dma_hw_free(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct fsl_asrc_pair *pair = runtime->private_data;
@@ -268,14 +267,27 @@ static int fsl_asrc_dma_hw_free(struct snd_pcm_substream *substream)
return 0;
}
-static int fsl_asrc_dma_startup(struct snd_pcm_substream *substream)
+static int fsl_asrc_dma_startup(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
+ bool tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_pcm_runtime *runtime = substream->runtime;
- struct snd_soc_component *component = snd_soc_rtdcom_lookup(rtd, DRV_NAME);
+ struct snd_dmaengine_dai_dma_data *dma_data;
struct device *dev = component->dev;
struct fsl_asrc *asrc_priv = dev_get_drvdata(dev);
struct fsl_asrc_pair *pair;
+ struct dma_chan *tmp_chan = NULL;
+ u8 dir = tx ? OUT : IN;
+ bool release_pair = true;
+ int ret = 0;
+
+ ret = snd_pcm_hw_constraint_integer(substream->runtime,
+ SNDRV_PCM_HW_PARAM_PERIODS);
+ if (ret < 0) {
+ dev_err(dev, "failed to set pcm hw params periods\n");
+ return ret;
+ }
pair = kzalloc(sizeof(struct fsl_asrc_pair), GFP_KERNEL);
if (!pair)
@@ -285,14 +297,54 @@ static int fsl_asrc_dma_startup(struct snd_pcm_substream *substream)
runtime->private_data = pair;
- snd_pcm_hw_constraint_integer(substream->runtime,
- SNDRV_PCM_HW_PARAM_PERIODS);
+ /* Request a dummy pair, which will be released later.
+ * Request pair function needs channel num as input, for this
+ * dummy pair, we just request "1" channel temporarily.
+ */
+ ret = fsl_asrc_request_pair(1, pair);
+ if (ret < 0) {
+ dev_err(dev, "failed to request asrc pair\n");
+ goto req_pair_err;
+ }
+
+ /* Request a dummy dma channel, which will be released later. */
+ tmp_chan = fsl_asrc_get_dma_channel(pair, dir);
+ if (!tmp_chan) {
+ dev_err(dev, "failed to get dma channel\n");
+ ret = -EINVAL;
+ goto dma_chan_err;
+ }
+
+ dma_data = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream);
+
+ /* Refine the snd_imx_hardware according to caps of DMA. */
+ ret = snd_dmaengine_pcm_refine_runtime_hwparams(substream,
+ dma_data,
+ &snd_imx_hardware,
+ tmp_chan);
+ if (ret < 0) {
+ dev_err(dev, "failed to refine runtime hwparams\n");
+ goto out;
+ }
+
+ release_pair = false;
snd_soc_set_runtime_hwparams(substream, &snd_imx_hardware);
- return 0;
+out:
+ dma_release_channel(tmp_chan);
+
+dma_chan_err:
+ fsl_asrc_release_pair(pair);
+
+req_pair_err:
+ if (release_pair)
+ kfree(pair);
+
+ return ret;
}
-static int fsl_asrc_dma_shutdown(struct snd_pcm_substream *substream)
+static int fsl_asrc_dma_shutdown(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct fsl_asrc_pair *pair = runtime->private_data;
@@ -311,7 +363,9 @@ static int fsl_asrc_dma_shutdown(struct snd_pcm_substream *substream)
return 0;
}
-static snd_pcm_uframes_t fsl_asrc_dma_pcm_pointer(struct snd_pcm_substream *substream)
+static snd_pcm_uframes_t
+fsl_asrc_dma_pcm_pointer(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct fsl_asrc_pair *pair = runtime->private_data;
@@ -319,17 +373,8 @@ static snd_pcm_uframes_t fsl_asrc_dma_pcm_pointer(struct snd_pcm_substream *subs
return bytes_to_frames(substream->runtime, pair->pos);
}
-static const struct snd_pcm_ops fsl_asrc_dma_pcm_ops = {
- .ioctl = snd_pcm_lib_ioctl,
- .hw_params = fsl_asrc_dma_hw_params,
- .hw_free = fsl_asrc_dma_hw_free,
- .trigger = fsl_asrc_dma_trigger,
- .open = fsl_asrc_dma_startup,
- .close = fsl_asrc_dma_shutdown,
- .pointer = fsl_asrc_dma_pcm_pointer,
-};
-
-static int fsl_asrc_dma_pcm_new(struct snd_soc_pcm_runtime *rtd)
+static int fsl_asrc_dma_pcm_new(struct snd_soc_component *component,
+ struct snd_soc_pcm_runtime *rtd)
{
struct snd_card *card = rtd->card->snd_card;
struct snd_pcm_substream *substream;
@@ -364,7 +409,8 @@ err:
return ret;
}
-static void fsl_asrc_dma_pcm_free(struct snd_pcm *pcm)
+static void fsl_asrc_dma_pcm_free(struct snd_soc_component *component,
+ struct snd_pcm *pcm)
{
struct snd_pcm_substream *substream;
int i;
@@ -382,8 +428,14 @@ static void fsl_asrc_dma_pcm_free(struct snd_pcm *pcm)
struct snd_soc_component_driver fsl_asrc_component = {
.name = DRV_NAME,
- .ops = &fsl_asrc_dma_pcm_ops,
- .pcm_new = fsl_asrc_dma_pcm_new,
- .pcm_free = fsl_asrc_dma_pcm_free,
+ .ioctl = snd_soc_pcm_lib_ioctl,
+ .hw_params = fsl_asrc_dma_hw_params,
+ .hw_free = fsl_asrc_dma_hw_free,
+ .trigger = fsl_asrc_dma_trigger,
+ .open = fsl_asrc_dma_startup,
+ .close = fsl_asrc_dma_shutdown,
+ .pointer = fsl_asrc_dma_pcm_pointer,
+ .pcm_construct = fsl_asrc_dma_pcm_new,
+ .pcm_destruct = fsl_asrc_dma_pcm_free,
};
EXPORT_SYMBOL_GPL(fsl_asrc_component);
diff --git a/sound/soc/fsl/fsl_audmix.c b/sound/soc/fsl/fsl_audmix.c
index c7e4e9757dce..a1db1bce330f 100644
--- a/sound/soc/fsl/fsl_audmix.c
+++ b/sound/soc/fsl/fsl_audmix.c
@@ -286,6 +286,7 @@ static int fsl_audmix_dai_trigger(struct snd_pcm_substream *substream, int cmd,
struct snd_soc_dai *dai)
{
struct fsl_audmix *priv = snd_soc_dai_get_drvdata(dai);
+ unsigned long lock_flags;
/* Capture stream shall not be handled */
if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
@@ -295,12 +296,16 @@ static int fsl_audmix_dai_trigger(struct snd_pcm_substream *substream, int cmd,
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_RESUME:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ spin_lock_irqsave(&priv->lock, lock_flags);
priv->tdms |= BIT(dai->driver->id);
+ spin_unlock_irqrestore(&priv->lock, lock_flags);
break;
case SNDRV_PCM_TRIGGER_STOP:
case SNDRV_PCM_TRIGGER_SUSPEND:
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ spin_lock_irqsave(&priv->lock, lock_flags);
priv->tdms &= ~BIT(dai->driver->id);
+ spin_unlock_irqrestore(&priv->lock, lock_flags);
break;
default:
return -EINVAL;
@@ -491,6 +496,7 @@ static int fsl_audmix_probe(struct platform_device *pdev)
return PTR_ERR(priv->ipg_clk);
}
+ spin_lock_init(&priv->lock);
platform_set_drvdata(pdev, priv);
pm_runtime_enable(dev);
diff --git a/sound/soc/fsl/fsl_audmix.h b/sound/soc/fsl/fsl_audmix.h
index 7812ffec45c5..479f05695d53 100644
--- a/sound/soc/fsl/fsl_audmix.h
+++ b/sound/soc/fsl/fsl_audmix.h
@@ -96,6 +96,7 @@ struct fsl_audmix {
struct platform_device *pdev;
struct regmap *regmap;
struct clk *ipg_clk;
+ spinlock_t lock; /* Protect tdms */
u8 tdms;
};
diff --git a/sound/soc/fsl/fsl_dma.c b/sound/soc/fsl/fsl_dma.c
index e22508301412..2868c4f97cb2 100644
--- a/sound/soc/fsl/fsl_dma.c
+++ b/sound/soc/fsl/fsl_dma.c
@@ -201,8 +201,7 @@ static irqreturn_t fsl_dma_isr(int irq, void *dev_id)
struct fsl_dma_private *dma_private = dev_id;
struct snd_pcm_substream *substream = dma_private->substream;
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_component *component = snd_soc_rtdcom_lookup(rtd, DRV_NAME);
- struct device *dev = component->dev;
+ struct device *dev = rtd->dev;
struct ccsr_dma_channel __iomem *dma_channel = dma_private->dma_channel;
irqreturn_t ret = IRQ_NONE;
u32 sr, sr2 = 0;
@@ -280,7 +279,8 @@ static irqreturn_t fsl_dma_isr(int irq, void *dev_id)
* Regardless of where the memory is actually allocated, since the device can
* technically DMA to any 36-bit address, we do need to set the DMA mask to 36.
*/
-static int fsl_dma_new(struct snd_soc_pcm_runtime *rtd)
+static int fsl_dma_new(struct snd_soc_component *component,
+ struct snd_soc_pcm_runtime *rtd)
{
struct snd_card *card = rtd->card->snd_card;
struct snd_pcm *pcm = rtd->pcm;
@@ -380,11 +380,10 @@ static int fsl_dma_new(struct snd_soc_pcm_runtime *rtd)
* buffer, which is what ALSA expects. We're just dividing it into
* contiguous parts, and creating a link descriptor for each one.
*/
-static int fsl_dma_open(struct snd_pcm_substream *substream)
+static int fsl_dma_open(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_component *component = snd_soc_rtdcom_lookup(rtd, DRV_NAME);
struct device *dev = component->dev;
struct dma_object *dma =
container_of(component->driver, struct dma_object, dai);
@@ -533,13 +532,12 @@ static int fsl_dma_open(struct snd_pcm_substream *substream)
* and 8 bytes at a time). So we do not support packed 24-bit samples.
* 24-bit data must be padded to 32 bits.
*/
-static int fsl_dma_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *hw_params)
+static int fsl_dma_hw_params(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *hw_params)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct fsl_dma_private *dma_private = runtime->private_data;
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_component *component = snd_soc_rtdcom_lookup(rtd, DRV_NAME);
struct device *dev = component->dev;
/* Number of bits per sample */
@@ -698,12 +696,11 @@ static int fsl_dma_hw_params(struct snd_pcm_substream *substream,
* The base address of the buffer is stored in the source_addr field of the
* first link descriptor.
*/
-static snd_pcm_uframes_t fsl_dma_pointer(struct snd_pcm_substream *substream)
+static snd_pcm_uframes_t fsl_dma_pointer(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct fsl_dma_private *dma_private = runtime->private_data;
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_component *component = snd_soc_rtdcom_lookup(rtd, DRV_NAME);
struct device *dev = component->dev;
struct ccsr_dma_channel __iomem *dma_channel = dma_private->dma_channel;
dma_addr_t position;
@@ -763,7 +760,8 @@ static snd_pcm_uframes_t fsl_dma_pointer(struct snd_pcm_substream *substream)
*
* This function can be called multiple times.
*/
-static int fsl_dma_hw_free(struct snd_pcm_substream *substream)
+static int fsl_dma_hw_free(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct fsl_dma_private *dma_private = runtime->private_data;
@@ -796,12 +794,11 @@ static int fsl_dma_hw_free(struct snd_pcm_substream *substream)
/**
* fsl_dma_close: close the stream.
*/
-static int fsl_dma_close(struct snd_pcm_substream *substream)
+static int fsl_dma_close(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct fsl_dma_private *dma_private = runtime->private_data;
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_component *component = snd_soc_rtdcom_lookup(rtd, DRV_NAME);
struct device *dev = component->dev;
struct dma_object *dma =
container_of(component->driver, struct dma_object, dai);
@@ -824,7 +821,8 @@ static int fsl_dma_close(struct snd_pcm_substream *substream)
/*
* Remove this PCM driver.
*/
-static void fsl_dma_free_dma_buffers(struct snd_pcm *pcm)
+static void fsl_dma_free_dma_buffers(struct snd_soc_component *component,
+ struct snd_pcm *pcm)
{
struct snd_pcm_substream *substream;
unsigned int i;
@@ -872,15 +870,6 @@ static struct device_node *find_ssi_node(struct device_node *dma_channel_np)
return NULL;
}
-static const struct snd_pcm_ops fsl_dma_ops = {
- .open = fsl_dma_open,
- .close = fsl_dma_close,
- .ioctl = snd_pcm_lib_ioctl,
- .hw_params = fsl_dma_hw_params,
- .hw_free = fsl_dma_hw_free,
- .pointer = fsl_dma_pointer,
-};
-
static int fsl_soc_dma_probe(struct platform_device *pdev)
{
struct dma_object *dma;
@@ -912,9 +901,14 @@ static int fsl_soc_dma_probe(struct platform_device *pdev)
}
dma->dai.name = DRV_NAME;
- dma->dai.ops = &fsl_dma_ops;
- dma->dai.pcm_new = fsl_dma_new;
- dma->dai.pcm_free = fsl_dma_free_dma_buffers;
+ dma->dai.open = fsl_dma_open;
+ dma->dai.close = fsl_dma_close;
+ dma->dai.ioctl = snd_soc_pcm_lib_ioctl;
+ dma->dai.hw_params = fsl_dma_hw_params;
+ dma->dai.hw_free = fsl_dma_hw_free;
+ dma->dai.pointer = fsl_dma_pointer;
+ dma->dai.pcm_construct = fsl_dma_new;
+ dma->dai.pcm_destruct = fsl_dma_free_dma_buffers;
/* Store the SSI-specific information that we need */
dma->ssi_stx_phys = res.start + REG_SSI_STX0;
diff --git a/sound/soc/fsl/fsl_esai.c b/sound/soc/fsl/fsl_esai.c
index a78e4ab478df..c7a49d03463a 100644
--- a/sound/soc/fsl/fsl_esai.c
+++ b/sound/soc/fsl/fsl_esai.c
@@ -33,6 +33,7 @@
* @fsysclk: system clock source to derive HCK, SCK and FS
* @spbaclk: SPBA clock (optional, depending on SoC design)
* @task: tasklet to handle the reset operation
+ * @lock: spin lock between hw_reset() and trigger()
* @fifo_depth: depth of tx/rx FIFO
* @slot_width: width of each DAI slot
* @slots: number of slots
@@ -56,6 +57,7 @@ struct fsl_esai {
struct clk *fsysclk;
struct clk *spbaclk;
struct tasklet_struct task;
+ spinlock_t lock; /* Protect hw_reset and trigger */
u32 fifo_depth;
u32 slot_width;
u32 slots;
@@ -676,8 +678,10 @@ static void fsl_esai_hw_reset(unsigned long arg)
{
struct fsl_esai *esai_priv = (struct fsl_esai *)arg;
bool tx = true, rx = false, enabled[2];
+ unsigned long lock_flags;
u32 tfcr, rfcr;
+ spin_lock_irqsave(&esai_priv->lock, lock_flags);
/* Save the registers */
regmap_read(esai_priv->regmap, REG_ESAI_TFCR, &tfcr);
regmap_read(esai_priv->regmap, REG_ESAI_RFCR, &rfcr);
@@ -715,6 +719,8 @@ static void fsl_esai_hw_reset(unsigned long arg)
fsl_esai_trigger_start(esai_priv, tx);
if (enabled[rx])
fsl_esai_trigger_start(esai_priv, rx);
+
+ spin_unlock_irqrestore(&esai_priv->lock, lock_flags);
}
static int fsl_esai_trigger(struct snd_pcm_substream *substream, int cmd,
@@ -722,6 +728,7 @@ static int fsl_esai_trigger(struct snd_pcm_substream *substream, int cmd,
{
struct fsl_esai *esai_priv = snd_soc_dai_get_drvdata(dai);
bool tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
+ unsigned long lock_flags;
esai_priv->channels[tx] = substream->runtime->channels;
@@ -729,12 +736,16 @@ static int fsl_esai_trigger(struct snd_pcm_substream *substream, int cmd,
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_RESUME:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ spin_lock_irqsave(&esai_priv->lock, lock_flags);
fsl_esai_trigger_start(esai_priv, tx);
+ spin_unlock_irqrestore(&esai_priv->lock, lock_flags);
break;
case SNDRV_PCM_TRIGGER_SUSPEND:
case SNDRV_PCM_TRIGGER_STOP:
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ spin_lock_irqsave(&esai_priv->lock, lock_flags);
fsl_esai_trigger_stop(esai_priv, tx);
+ spin_unlock_irqrestore(&esai_priv->lock, lock_flags);
break;
default:
return -EINVAL;
@@ -1002,6 +1013,7 @@ static int fsl_esai_probe(struct platform_device *pdev)
dev_set_drvdata(&pdev->dev, esai_priv);
+ spin_lock_init(&esai_priv->lock);
ret = fsl_esai_hw_init(esai_priv);
if (ret)
return ret;
diff --git a/sound/soc/fsl/fsl_mqs.c b/sound/soc/fsl/fsl_mqs.c
new file mode 100644
index 000000000000..0c813a45bba7
--- /dev/null
+++ b/sound/soc/fsl/fsl_mqs.c
@@ -0,0 +1,335 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// ALSA SoC IMX MQS driver
+//
+// Copyright (C) 2014-2015 Freescale Semiconductor, Inc.
+// Copyright 2019 NXP
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/mfd/syscon.h>
+#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
+#include <linux/pm_runtime.h>
+#include <linux/of.h>
+#include <linux/pm.h>
+#include <linux/slab.h>
+#include <sound/soc.h>
+#include <sound/pcm.h>
+#include <sound/initval.h>
+
+#define REG_MQS_CTRL 0x00
+
+#define MQS_EN_MASK (0x1 << 28)
+#define MQS_EN_SHIFT (28)
+#define MQS_SW_RST_MASK (0x1 << 24)
+#define MQS_SW_RST_SHIFT (24)
+#define MQS_OVERSAMPLE_MASK (0x1 << 20)
+#define MQS_OVERSAMPLE_SHIFT (20)
+#define MQS_CLK_DIV_MASK (0xFF << 0)
+#define MQS_CLK_DIV_SHIFT (0)
+
+/* codec private data */
+struct fsl_mqs {
+ struct regmap *regmap;
+ struct clk *mclk;
+ struct clk *ipg;
+
+ unsigned int reg_iomuxc_gpr2;
+ unsigned int reg_mqs_ctrl;
+ bool use_gpr;
+};
+
+#define FSL_MQS_RATES (SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000)
+#define FSL_MQS_FORMATS SNDRV_PCM_FMTBIT_S16_LE
+
+static int fsl_mqs_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_component *component = dai->component;
+ struct fsl_mqs *mqs_priv = snd_soc_component_get_drvdata(component);
+ unsigned long mclk_rate;
+ int div, res;
+ int lrclk;
+
+ mclk_rate = clk_get_rate(mqs_priv->mclk);
+ lrclk = params_rate(params);
+
+ /*
+ * mclk_rate / (oversample(32,64) * FS * 2 * divider ) = repeat_rate;
+ * if repeat_rate is 8, mqs can achieve better quality.
+ * oversample rate is fix to 32 currently.
+ */
+ div = mclk_rate / (32 * lrclk * 2 * 8);
+ res = mclk_rate % (32 * lrclk * 2 * 8);
+
+ if (res == 0 && div > 0 && div <= 256) {
+ if (mqs_priv->use_gpr) {
+ regmap_update_bits(mqs_priv->regmap, IOMUXC_GPR2,
+ IMX6SX_GPR2_MQS_CLK_DIV_MASK,
+ (div - 1) << IMX6SX_GPR2_MQS_CLK_DIV_SHIFT);
+ regmap_update_bits(mqs_priv->regmap, IOMUXC_GPR2,
+ IMX6SX_GPR2_MQS_OVERSAMPLE_MASK, 0);
+ } else {
+ regmap_update_bits(mqs_priv->regmap, REG_MQS_CTRL,
+ MQS_CLK_DIV_MASK,
+ (div - 1) << MQS_CLK_DIV_SHIFT);
+ regmap_update_bits(mqs_priv->regmap, REG_MQS_CTRL,
+ MQS_OVERSAMPLE_MASK, 0);
+ }
+ } else {
+ dev_err(component->dev, "can't get proper divider\n");
+ }
+
+ return 0;
+}
+
+static int fsl_mqs_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+{
+ /* Only LEFT_J & SLAVE mode is supported. */
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_LEFT_J:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+ case SND_SOC_DAIFMT_NB_NF:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBS_CFS:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int fsl_mqs_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_component *component = dai->component;
+ struct fsl_mqs *mqs_priv = snd_soc_component_get_drvdata(component);
+
+ if (mqs_priv->use_gpr)
+ regmap_update_bits(mqs_priv->regmap, IOMUXC_GPR2,
+ IMX6SX_GPR2_MQS_EN_MASK,
+ 1 << IMX6SX_GPR2_MQS_EN_SHIFT);
+ else
+ regmap_update_bits(mqs_priv->regmap, REG_MQS_CTRL,
+ MQS_EN_MASK,
+ 1 << MQS_EN_SHIFT);
+ return 0;
+}
+
+static void fsl_mqs_shutdown(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_component *component = dai->component;
+ struct fsl_mqs *mqs_priv = snd_soc_component_get_drvdata(component);
+
+ if (mqs_priv->use_gpr)
+ regmap_update_bits(mqs_priv->regmap, IOMUXC_GPR2,
+ IMX6SX_GPR2_MQS_EN_MASK, 0);
+ else
+ regmap_update_bits(mqs_priv->regmap, REG_MQS_CTRL,
+ MQS_EN_MASK, 0);
+}
+
+static const struct snd_soc_component_driver soc_codec_fsl_mqs = {
+ .idle_bias_on = 1,
+ .non_legacy_dai_naming = 1,
+};
+
+static const struct snd_soc_dai_ops fsl_mqs_dai_ops = {
+ .startup = fsl_mqs_startup,
+ .shutdown = fsl_mqs_shutdown,
+ .hw_params = fsl_mqs_hw_params,
+ .set_fmt = fsl_mqs_set_dai_fmt,
+};
+
+static struct snd_soc_dai_driver fsl_mqs_dai = {
+ .name = "fsl-mqs-dai",
+ .playback = {
+ .stream_name = "Playback",
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = FSL_MQS_RATES,
+ .formats = FSL_MQS_FORMATS,
+ },
+ .ops = &fsl_mqs_dai_ops,
+};
+
+static const struct regmap_config fsl_mqs_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = REG_MQS_CTRL,
+ .cache_type = REGCACHE_NONE,
+};
+
+static int fsl_mqs_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct device_node *gpr_np = NULL;
+ struct fsl_mqs *mqs_priv;
+ void __iomem *regs;
+ int ret;
+
+ mqs_priv = devm_kzalloc(&pdev->dev, sizeof(*mqs_priv), GFP_KERNEL);
+ if (!mqs_priv)
+ return -ENOMEM;
+
+ /* On i.MX6sx the MQS control register is in GPR domain
+ * But in i.MX8QM/i.MX8QXP the control register is moved
+ * to its own domain.
+ */
+ if (of_device_is_compatible(np, "fsl,imx8qm-mqs"))
+ mqs_priv->use_gpr = false;
+ else
+ mqs_priv->use_gpr = true;
+
+ if (mqs_priv->use_gpr) {
+ gpr_np = of_parse_phandle(np, "gpr", 0);
+ if (!gpr_np) {
+ dev_err(&pdev->dev, "failed to get gpr node by phandle\n");
+ return -EINVAL;
+ }
+
+ mqs_priv->regmap = syscon_node_to_regmap(gpr_np);
+ if (IS_ERR(mqs_priv->regmap)) {
+ dev_err(&pdev->dev, "failed to get gpr regmap\n");
+ ret = PTR_ERR(mqs_priv->regmap);
+ goto err_free_gpr_np;
+ }
+ } else {
+ regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
+
+ mqs_priv->regmap = devm_regmap_init_mmio_clk(&pdev->dev,
+ "core",
+ regs,
+ &fsl_mqs_regmap_config);
+ if (IS_ERR(mqs_priv->regmap)) {
+ dev_err(&pdev->dev, "failed to init regmap: %ld\n",
+ PTR_ERR(mqs_priv->regmap));
+ return PTR_ERR(mqs_priv->regmap);
+ }
+
+ mqs_priv->ipg = devm_clk_get(&pdev->dev, "core");
+ if (IS_ERR(mqs_priv->ipg)) {
+ dev_err(&pdev->dev, "failed to get the clock: %ld\n",
+ PTR_ERR(mqs_priv->ipg));
+ return PTR_ERR(mqs_priv->ipg);
+ }
+ }
+
+ mqs_priv->mclk = devm_clk_get(&pdev->dev, "mclk");
+ if (IS_ERR(mqs_priv->mclk)) {
+ dev_err(&pdev->dev, "failed to get the clock: %ld\n",
+ PTR_ERR(mqs_priv->mclk));
+ ret = PTR_ERR(mqs_priv->mclk);
+ goto err_free_gpr_np;
+ }
+
+ dev_set_drvdata(&pdev->dev, mqs_priv);
+ pm_runtime_enable(&pdev->dev);
+
+ ret = devm_snd_soc_register_component(&pdev->dev, &soc_codec_fsl_mqs,
+ &fsl_mqs_dai, 1);
+ if (ret)
+ goto err_free_gpr_np;
+ return 0;
+
+err_free_gpr_np:
+ of_node_put(gpr_np);
+
+ return ret;
+}
+
+static int fsl_mqs_remove(struct platform_device *pdev)
+{
+ pm_runtime_disable(&pdev->dev);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int fsl_mqs_runtime_resume(struct device *dev)
+{
+ struct fsl_mqs *mqs_priv = dev_get_drvdata(dev);
+
+ if (mqs_priv->ipg)
+ clk_prepare_enable(mqs_priv->ipg);
+
+ if (mqs_priv->mclk)
+ clk_prepare_enable(mqs_priv->mclk);
+
+ if (mqs_priv->use_gpr)
+ regmap_write(mqs_priv->regmap, IOMUXC_GPR2,
+ mqs_priv->reg_iomuxc_gpr2);
+ else
+ regmap_write(mqs_priv->regmap, REG_MQS_CTRL,
+ mqs_priv->reg_mqs_ctrl);
+ return 0;
+}
+
+static int fsl_mqs_runtime_suspend(struct device *dev)
+{
+ struct fsl_mqs *mqs_priv = dev_get_drvdata(dev);
+
+ if (mqs_priv->use_gpr)
+ regmap_read(mqs_priv->regmap, IOMUXC_GPR2,
+ &mqs_priv->reg_iomuxc_gpr2);
+ else
+ regmap_read(mqs_priv->regmap, REG_MQS_CTRL,
+ &mqs_priv->reg_mqs_ctrl);
+
+ if (mqs_priv->mclk)
+ clk_disable_unprepare(mqs_priv->mclk);
+
+ if (mqs_priv->ipg)
+ clk_disable_unprepare(mqs_priv->ipg);
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops fsl_mqs_pm_ops = {
+ SET_RUNTIME_PM_OPS(fsl_mqs_runtime_suspend,
+ fsl_mqs_runtime_resume,
+ NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+};
+
+static const struct of_device_id fsl_mqs_dt_ids[] = {
+ { .compatible = "fsl,imx8qm-mqs", },
+ { .compatible = "fsl,imx6sx-mqs", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, fsl_mqs_dt_ids);
+
+static struct platform_driver fsl_mqs_driver = {
+ .probe = fsl_mqs_probe,
+ .remove = fsl_mqs_remove,
+ .driver = {
+ .name = "fsl-mqs",
+ .of_match_table = fsl_mqs_dt_ids,
+ .pm = &fsl_mqs_pm_ops,
+ },
+};
+
+module_platform_driver(fsl_mqs_driver);
+
+MODULE_AUTHOR("Shengjiu Wang <Shengjiu.Wang@nxp.com>");
+MODULE_DESCRIPTION("MQS codec driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform: fsl-mqs");
diff --git a/sound/soc/fsl/imx-pcm-fiq.c b/sound/soc/fsl/imx-pcm-fiq.c
index c49aea4fba56..08131d147983 100644
--- a/sound/soc/fsl/imx-pcm-fiq.c
+++ b/sound/soc/fsl/imx-pcm-fiq.c
@@ -69,8 +69,9 @@ static struct fiq_handler fh = {
.name = DRV_NAME,
};
-static int snd_imx_pcm_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *params)
+static int snd_imx_pcm_hw_params(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct imx_pcm_runtime_data *iprtd = runtime->private_data;
@@ -85,7 +86,8 @@ static int snd_imx_pcm_hw_params(struct snd_pcm_substream *substream,
return 0;
}
-static int snd_imx_pcm_prepare(struct snd_pcm_substream *substream)
+static int snd_imx_pcm_prepare(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct imx_pcm_runtime_data *iprtd = runtime->private_data;
@@ -104,7 +106,8 @@ static int snd_imx_pcm_prepare(struct snd_pcm_substream *substream)
static int imx_pcm_fiq;
-static int snd_imx_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
+static int snd_imx_pcm_trigger(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream, int cmd)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct imx_pcm_runtime_data *iprtd = runtime->private_data;
@@ -141,7 +144,9 @@ static int snd_imx_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
return 0;
}
-static snd_pcm_uframes_t snd_imx_pcm_pointer(struct snd_pcm_substream *substream)
+static snd_pcm_uframes_t
+snd_imx_pcm_pointer(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct imx_pcm_runtime_data *iprtd = runtime->private_data;
@@ -165,7 +170,8 @@ static const struct snd_pcm_hardware snd_imx_hardware = {
.fifo_size = 0,
};
-static int snd_imx_open(struct snd_pcm_substream *substream)
+static int snd_imx_open(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct imx_pcm_runtime_data *iprtd;
@@ -194,7 +200,8 @@ static int snd_imx_open(struct snd_pcm_substream *substream)
return 0;
}
-static int snd_imx_close(struct snd_pcm_substream *substream)
+static int snd_imx_close(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct imx_pcm_runtime_data *iprtd = runtime->private_data;
@@ -206,8 +213,9 @@ static int snd_imx_close(struct snd_pcm_substream *substream)
return 0;
}
-static int snd_imx_pcm_mmap(struct snd_pcm_substream *substream,
- struct vm_area_struct *vma)
+static int snd_imx_pcm_mmap(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
+ struct vm_area_struct *vma)
{
struct snd_pcm_runtime *runtime = substream->runtime;
int ret;
@@ -222,17 +230,6 @@ static int snd_imx_pcm_mmap(struct snd_pcm_substream *substream,
return ret;
}
-static const struct snd_pcm_ops imx_pcm_ops = {
- .open = snd_imx_open,
- .close = snd_imx_close,
- .ioctl = snd_pcm_lib_ioctl,
- .hw_params = snd_imx_pcm_hw_params,
- .prepare = snd_imx_pcm_prepare,
- .trigger = snd_imx_pcm_trigger,
- .pointer = snd_imx_pcm_pointer,
- .mmap = snd_imx_pcm_mmap,
-};
-
static int imx_pcm_preallocate_dma_buffer(struct snd_pcm *pcm, int stream)
{
struct snd_pcm_substream *substream = pcm->streams[stream].substream;
@@ -279,7 +276,8 @@ static int imx_pcm_new(struct snd_soc_pcm_runtime *rtd)
static int ssi_irq;
-static int imx_pcm_fiq_new(struct snd_soc_pcm_runtime *rtd)
+static int snd_imx_pcm_new(struct snd_soc_component *component,
+ struct snd_soc_pcm_runtime *rtd)
{
struct snd_pcm *pcm = rtd->pcm;
struct snd_pcm_substream *substream;
@@ -329,7 +327,8 @@ static void imx_pcm_free(struct snd_pcm *pcm)
}
}
-static void imx_pcm_fiq_free(struct snd_pcm *pcm)
+static void snd_imx_pcm_free(struct snd_soc_component *component,
+ struct snd_pcm *pcm)
{
mxc_set_irq_fiq(ssi_irq, 0);
release_fiq(&fh);
@@ -337,9 +336,16 @@ static void imx_pcm_fiq_free(struct snd_pcm *pcm)
}
static const struct snd_soc_component_driver imx_soc_component_fiq = {
- .ops = &imx_pcm_ops,
- .pcm_new = imx_pcm_fiq_new,
- .pcm_free = imx_pcm_fiq_free,
+ .open = snd_imx_open,
+ .close = snd_imx_close,
+ .ioctl = snd_soc_pcm_lib_ioctl,
+ .hw_params = snd_imx_pcm_hw_params,
+ .prepare = snd_imx_pcm_prepare,
+ .trigger = snd_imx_pcm_trigger,
+ .pointer = snd_imx_pcm_pointer,
+ .mmap = snd_imx_pcm_mmap,
+ .pcm_construct = snd_imx_pcm_new,
+ .pcm_destruct = snd_imx_pcm_free,
};
int imx_pcm_fiq_init(struct platform_device *pdev,
diff --git a/sound/soc/fsl/mpc5200_dma.c b/sound/soc/fsl/mpc5200_dma.c
index ccf9301889fe..5237ac96b756 100644
--- a/sound/soc/fsl/mpc5200_dma.c
+++ b/sound/soc/fsl/mpc5200_dma.c
@@ -98,7 +98,8 @@ static irqreturn_t psc_dma_bcom_irq(int irq, void *_psc_dma_stream)
return IRQ_HANDLED;
}
-static int psc_dma_hw_free(struct snd_pcm_substream *substream)
+static int psc_dma_hw_free(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
snd_pcm_set_runtime_buffer(substream, NULL);
return 0;
@@ -110,7 +111,8 @@ static int psc_dma_hw_free(struct snd_pcm_substream *substream)
* This function is called by ALSA to start, stop, pause, and resume the DMA
* transfer of data.
*/
-static int psc_dma_trigger(struct snd_pcm_substream *substream, int cmd)
+static int psc_dma_trigger(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream, int cmd)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct psc_dma *psc_dma = snd_soc_dai_get_drvdata(rtd->cpu_dai);
@@ -210,7 +212,8 @@ static const struct snd_pcm_hardware psc_dma_hardware = {
.fifo_size = 512,
};
-static int psc_dma_open(struct snd_pcm_substream *substream)
+static int psc_dma_open(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_soc_pcm_runtime *rtd = substream->private_data;
@@ -238,7 +241,8 @@ static int psc_dma_open(struct snd_pcm_substream *substream)
return 0;
}
-static int psc_dma_close(struct snd_pcm_substream *substream)
+static int psc_dma_close(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct psc_dma *psc_dma = snd_soc_dai_get_drvdata(rtd->cpu_dai);
@@ -263,7 +267,8 @@ static int psc_dma_close(struct snd_pcm_substream *substream)
}
static snd_pcm_uframes_t
-psc_dma_pointer(struct snd_pcm_substream *substream)
+psc_dma_pointer(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct psc_dma *psc_dma = snd_soc_dai_get_drvdata(rtd->cpu_dai);
@@ -280,29 +285,19 @@ psc_dma_pointer(struct snd_pcm_substream *substream)
return bytes_to_frames(substream->runtime, count);
}
-static int
-psc_dma_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *params)
+static int psc_dma_hw_params(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
{
snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);
return 0;
}
-static const struct snd_pcm_ops psc_dma_ops = {
- .open = psc_dma_open,
- .close = psc_dma_close,
- .hw_free = psc_dma_hw_free,
- .ioctl = snd_pcm_lib_ioctl,
- .pointer = psc_dma_pointer,
- .trigger = psc_dma_trigger,
- .hw_params = psc_dma_hw_params,
-};
-
-static int psc_dma_new(struct snd_soc_pcm_runtime *rtd)
+static int psc_dma_new(struct snd_soc_component *component,
+ struct snd_soc_pcm_runtime *rtd)
{
struct snd_card *card = rtd->card->snd_card;
- struct snd_soc_component *component = snd_soc_rtdcom_lookup(rtd, DRV_NAME);
struct snd_soc_dai *dai = rtd->cpu_dai;
struct snd_pcm *pcm = rtd->pcm;
size_t size = psc_dma_hardware.buffer_bytes_max;
@@ -341,10 +336,10 @@ static int psc_dma_new(struct snd_soc_pcm_runtime *rtd)
return -ENOMEM;
}
-static void psc_dma_free(struct snd_pcm *pcm)
+static void psc_dma_free(struct snd_soc_component *component,
+ struct snd_pcm *pcm)
{
struct snd_soc_pcm_runtime *rtd = pcm->private_data;
- struct snd_soc_component *component = snd_soc_rtdcom_lookup(rtd, DRV_NAME);
struct snd_pcm_substream *substream;
int stream;
@@ -362,9 +357,15 @@ static void psc_dma_free(struct snd_pcm *pcm)
static const struct snd_soc_component_driver mpc5200_audio_dma_component = {
.name = DRV_NAME,
- .ops = &psc_dma_ops,
- .pcm_new = &psc_dma_new,
- .pcm_free = &psc_dma_free,
+ .open = psc_dma_open,
+ .close = psc_dma_close,
+ .hw_free = psc_dma_hw_free,
+ .ioctl = snd_soc_pcm_lib_ioctl,
+ .pointer = psc_dma_pointer,
+ .trigger = psc_dma_trigger,
+ .hw_params = psc_dma_hw_params,
+ .pcm_construct = psc_dma_new,
+ .pcm_destruct = psc_dma_free,
};
int mpc5200_audio_dma_create(struct platform_device *op)
diff --git a/sound/soc/generic/audio-graph-card.c b/sound/soc/generic/audio-graph-card.c
index 6007e6305735..9ad35d9940fe 100644
--- a/sound/soc/generic/audio-graph-card.c
+++ b/sound/soc/generic/audio-graph-card.c
@@ -232,7 +232,7 @@ static int graph_dai_link_of_dpcm(struct asoc_simple_priv *priv,
if (li->cpu) {
int is_single_links = 0;
- /* BE is dummy */
+ /* Codec is dummy */
codecs->of_node = NULL;
codecs->dai_name = "snd-soc-dummy-dai";
codecs->name = "snd-soc-dummy";
@@ -263,7 +263,7 @@ static int graph_dai_link_of_dpcm(struct asoc_simple_priv *priv,
} else {
struct snd_soc_codec_conf *cconf;
- /* FE is dummy */
+ /* CPU is dummy */
cpus->of_node = NULL;
cpus->dai_name = "snd-soc-dummy-dai";
cpus->name = "snd-soc-dummy";
diff --git a/sound/soc/generic/simple-card.c b/sound/soc/generic/simple-card.c
index fc9c753db8dd..10b82bf043d1 100644
--- a/sound/soc/generic/simple-card.c
+++ b/sound/soc/generic/simple-card.c
@@ -149,7 +149,7 @@ static int simple_dai_link_of_dpcm(struct asoc_simple_priv *priv,
if (li->cpu) {
int is_single_links = 0;
- /* BE is dummy */
+ /* Codec is dummy */
codecs->of_node = NULL;
codecs->dai_name = "snd-soc-dummy-dai";
codecs->name = "snd-soc-dummy";
@@ -179,7 +179,7 @@ static int simple_dai_link_of_dpcm(struct asoc_simple_priv *priv,
} else {
struct snd_soc_codec_conf *cconf;
- /* FE is dummy */
+ /* CPU is dummy */
cpus->of_node = NULL;
cpus->dai_name = "snd-soc-dummy-dai";
cpus->name = "snd-soc-dummy";
diff --git a/sound/soc/intel/Kconfig b/sound/soc/intel/Kconfig
index 01c99750212a..c8de0bb5bed9 100644
--- a/sound/soc/intel/Kconfig
+++ b/sound/soc/intel/Kconfig
@@ -59,10 +59,13 @@ config SND_SOC_INTEL_HASWELL
If you have a Intel Haswell or Broadwell platform connected to
an I2S codec, then enable this option by saying Y or m. This is
typically used for Chromebooks. This is a recommended option.
+ This option is mutually exclusive with the SOF support on
+ Broadwell. If you want to enable SOF on Broadwell, you need to
+ deselect this option first.
config SND_SOC_INTEL_BAYTRAIL
tristate "Baytrail (legacy) Platforms"
- depends on DMADEVICES && ACPI && SND_SST_ATOM_HIFI2_PLATFORM=n
+ depends on DMADEVICES && ACPI && SND_SST_ATOM_HIFI2_PLATFORM=n && SND_SOC_SOF_BAYTRAIL=n
select SND_SOC_INTEL_SST
select SND_SOC_INTEL_SST_ACPI
select SND_SOC_INTEL_SST_FIRMWARE
@@ -101,6 +104,9 @@ config SND_SST_ATOM_HIFI2_PLATFORM_ACPI
If you have a Intel Baytrail or Cherrytrail platform with an I2S
codec, then enable this option by saying Y or m. This is a
recommended option
+ This option is mutually exclusive with the SOF support on
+ Baytrail/Cherrytrail. If you want to enable SOF on
+ Baytrail/Cherrytrail, you need to deselect this option first.
config SND_SOC_INTEL_SKYLAKE
tristate "All Skylake/SST Platforms"
@@ -113,7 +119,7 @@ config SND_SOC_INTEL_SKYLAKE
select SND_SOC_INTEL_CNL
select SND_SOC_INTEL_CFL
help
- This is a backwards-compatible option to select all devices
+ This is a backwards-compatible option to select all devices
supported by the Intel SST/Skylake driver. This option is no
longer recommended and will be deprecated when the SOF
driver is introduced. Distributions should explicitly
@@ -203,9 +209,12 @@ config SND_SOC_INTEL_SKYLAKE_SSP_CLK
config SND_SOC_INTEL_SKYLAKE_HDAUDIO_CODEC
bool "HDAudio codec support"
help
- If you have a Intel Skylake/Broxton/ApolloLake/KabyLake/
- GeminiLake or CannonLake platform with an HDaudio codec
- then enable this option by saying Y
+ This option broke audio on Linus' Skylake laptop in December 2018
+ and the race conditions during the probe were not fixed since.
+ This option is DEPRECATED, all HDaudio codec support needs
+ to be handled by the SOF driver.
+ Distributions should not enable this option and there are no known
+ users of this capability.
config SND_SOC_INTEL_SKYLAKE_COMMON
tristate
@@ -215,7 +224,7 @@ config SND_SOC_INTEL_SKYLAKE_COMMON
select SND_SOC_INTEL_SST
select SND_SOC_HDAC_HDA if SND_SOC_INTEL_SKYLAKE_HDAUDIO_CODEC
select SND_SOC_ACPI_INTEL_MATCH
- select SND_INTEL_NHLT if ACPI
+ select SND_INTEL_DSP_CONFIG
help
If you have a Intel Skylake/Broxton/ApolloLake/KabyLake/
GeminiLake or CannonLake platform with the DSP enabled in the BIOS
diff --git a/sound/soc/intel/atom/sst-mfld-platform-pcm.c b/sound/soc/intel/atom/sst-mfld-platform-pcm.c
index 8cc3cc363eb0..47e3d1943d7e 100644
--- a/sound/soc/intel/atom/sst-mfld-platform-pcm.c
+++ b/sound/soc/intel/atom/sst-mfld-platform-pcm.c
@@ -586,7 +586,8 @@ static struct snd_soc_dai_driver sst_platform_dai[] = {
},
};
-static int sst_platform_open(struct snd_pcm_substream *substream)
+static int sst_soc_open(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime;
@@ -598,15 +599,15 @@ static int sst_platform_open(struct snd_pcm_substream *substream)
return 0;
}
-static int sst_platform_pcm_trigger(struct snd_pcm_substream *substream,
- int cmd)
+static int sst_soc_trigger(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream, int cmd)
{
int ret_val = 0, str_id;
struct sst_runtime_stream *stream;
int status;
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- dev_dbg(rtd->dev, "sst_platform_pcm_trigger called\n");
+ dev_dbg(rtd->dev, "%s called\n", __func__);
if (substream->pcm->internal)
return 0;
stream = substream->runtime->private_data;
@@ -646,8 +647,8 @@ static int sst_platform_pcm_trigger(struct snd_pcm_substream *substream,
}
-static snd_pcm_uframes_t sst_platform_pcm_pointer
- (struct snd_pcm_substream *substream)
+static snd_pcm_uframes_t sst_soc_pointer(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct sst_runtime_stream *stream;
int ret_val, status;
@@ -668,14 +669,8 @@ static snd_pcm_uframes_t sst_platform_pcm_pointer
return str_info->buffer_ptr;
}
-static const struct snd_pcm_ops sst_platform_ops = {
- .open = sst_platform_open,
- .ioctl = snd_pcm_lib_ioctl,
- .trigger = sst_platform_pcm_trigger,
- .pointer = sst_platform_pcm_pointer,
-};
-
-static int sst_pcm_new(struct snd_soc_pcm_runtime *rtd)
+static int sst_soc_pcm_new(struct snd_soc_component *component,
+ struct snd_soc_pcm_runtime *rtd)
{
struct snd_soc_dai *dai = rtd->cpu_dai;
struct snd_pcm *pcm = rtd->pcm;
@@ -709,9 +704,12 @@ static const struct snd_soc_component_driver sst_soc_platform_drv = {
.name = DRV_NAME,
.probe = sst_soc_probe,
.remove = sst_soc_remove,
- .ops = &sst_platform_ops,
+ .open = sst_soc_open,
+ .ioctl = snd_soc_pcm_lib_ioctl,
+ .trigger = sst_soc_trigger,
+ .pointer = sst_soc_pointer,
.compr_ops = &sst_platform_compr_ops,
- .pcm_new = sst_pcm_new,
+ .pcm_construct = sst_soc_pcm_new,
};
static int sst_platform_probe(struct platform_device *pdev)
diff --git a/sound/soc/intel/baytrail/sst-baytrail-pcm.c b/sound/soc/intel/baytrail/sst-baytrail-pcm.c
index 54f2ee3010ee..1d780fcc448c 100644
--- a/sound/soc/intel/baytrail/sst-baytrail-pcm.c
+++ b/sound/soc/intel/baytrail/sst-baytrail-pcm.c
@@ -58,11 +58,11 @@ struct sst_byt_priv_data {
};
/* this may get called several times by oss emulation */
-static int sst_byt_pcm_hw_params(struct snd_pcm_substream *substream,
+static int sst_byt_pcm_hw_params(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_component *component = snd_soc_rtdcom_lookup(rtd, DRV_NAME);
struct sst_byt_priv_data *pdata = snd_soc_component_get_drvdata(component);
struct sst_byt_pcm_data *pcm_data = &pdata->pcm[substream->stream];
struct sst_byt *byt = pdata->byt;
@@ -121,7 +121,8 @@ static int sst_byt_pcm_hw_params(struct snd_pcm_substream *substream,
return 0;
}
-static int sst_byt_pcm_hw_free(struct snd_pcm_substream *substream)
+static int sst_byt_pcm_hw_free(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
@@ -164,10 +165,10 @@ static void sst_byt_pcm_work(struct work_struct *work)
sst_byt_pcm_restore_stream_context(pcm_data->substream);
}
-static int sst_byt_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
+static int sst_byt_pcm_trigger(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream, int cmd)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_component *component = snd_soc_rtdcom_lookup(rtd, DRV_NAME);
struct sst_byt_priv_data *pdata = snd_soc_component_get_drvdata(component);
struct sst_byt_pcm_data *pcm_data = &pdata->pcm[substream->stream];
struct sst_byt *byt = pdata->byt;
@@ -228,11 +229,11 @@ static u32 byt_notify_pointer(struct sst_byt_stream *stream, void *data)
return pos;
}
-static snd_pcm_uframes_t sst_byt_pcm_pointer(struct snd_pcm_substream *substream)
+static snd_pcm_uframes_t sst_byt_pcm_pointer(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_pcm_runtime *runtime = substream->runtime;
- struct snd_soc_component *component = snd_soc_rtdcom_lookup(rtd, DRV_NAME);
struct sst_byt_priv_data *pdata = snd_soc_component_get_drvdata(component);
struct sst_byt_pcm_data *pcm_data = &pdata->pcm[substream->stream];
@@ -241,10 +242,10 @@ static snd_pcm_uframes_t sst_byt_pcm_pointer(struct snd_pcm_substream *substream
return bytes_to_frames(runtime, pcm_data->hw_ptr);
}
-static int sst_byt_pcm_open(struct snd_pcm_substream *substream)
+static int sst_byt_pcm_open(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_component *component = snd_soc_rtdcom_lookup(rtd, DRV_NAME);
struct sst_byt_priv_data *pdata = snd_soc_component_get_drvdata(component);
struct sst_byt_pcm_data *pcm_data = &pdata->pcm[substream->stream];
struct sst_byt *byt = pdata->byt;
@@ -269,10 +270,10 @@ static int sst_byt_pcm_open(struct snd_pcm_substream *substream)
return 0;
}
-static int sst_byt_pcm_close(struct snd_pcm_substream *substream)
+static int sst_byt_pcm_close(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_component *component = snd_soc_rtdcom_lookup(rtd, DRV_NAME);
struct sst_byt_priv_data *pdata = snd_soc_component_get_drvdata(component);
struct sst_byt_pcm_data *pcm_data = &pdata->pcm[substream->stream];
struct sst_byt *byt = pdata->byt;
@@ -294,7 +295,8 @@ out:
return ret;
}
-static int sst_byt_pcm_mmap(struct snd_pcm_substream *substream,
+static int sst_byt_pcm_mmap(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
struct vm_area_struct *vma)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
@@ -303,22 +305,11 @@ static int sst_byt_pcm_mmap(struct snd_pcm_substream *substream,
return snd_pcm_lib_default_mmap(substream, vma);
}
-static const struct snd_pcm_ops sst_byt_pcm_ops = {
- .open = sst_byt_pcm_open,
- .close = sst_byt_pcm_close,
- .ioctl = snd_pcm_lib_ioctl,
- .hw_params = sst_byt_pcm_hw_params,
- .hw_free = sst_byt_pcm_hw_free,
- .trigger = sst_byt_pcm_trigger,
- .pointer = sst_byt_pcm_pointer,
- .mmap = sst_byt_pcm_mmap,
-};
-
-static int sst_byt_pcm_new(struct snd_soc_pcm_runtime *rtd)
+static int sst_byt_pcm_new(struct snd_soc_component *component,
+ struct snd_soc_pcm_runtime *rtd)
{
struct snd_pcm *pcm = rtd->pcm;
size_t size;
- struct snd_soc_component *component = snd_soc_rtdcom_lookup(rtd, DRV_NAME);
struct sst_pdata *pdata = dev_get_platdata(component->dev);
if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream ||
@@ -380,8 +371,15 @@ static int sst_byt_pcm_probe(struct snd_soc_component *component)
static const struct snd_soc_component_driver byt_dai_component = {
.name = DRV_NAME,
.probe = sst_byt_pcm_probe,
- .ops = &sst_byt_pcm_ops,
- .pcm_new = sst_byt_pcm_new,
+ .open = sst_byt_pcm_open,
+ .close = sst_byt_pcm_close,
+ .ioctl = snd_soc_pcm_lib_ioctl,
+ .hw_params = sst_byt_pcm_hw_params,
+ .hw_free = sst_byt_pcm_hw_free,
+ .trigger = sst_byt_pcm_trigger,
+ .pointer = sst_byt_pcm_pointer,
+ .mmap = sst_byt_pcm_mmap,
+ .pcm_construct = sst_byt_pcm_new,
};
#ifdef CONFIG_PM
diff --git a/sound/soc/intel/boards/Kconfig b/sound/soc/intel/boards/Kconfig
index 5c27f7ab4a5f..ef20316e83d1 100644
--- a/sound/soc/intel/boards/Kconfig
+++ b/sound/soc/intel/boards/Kconfig
@@ -3,13 +3,13 @@ menuconfig SND_SOC_INTEL_MACH
bool "Intel Machine drivers"
depends on SND_SOC_INTEL_SST_TOPLEVEL || SND_SOC_SOF_INTEL_TOPLEVEL
help
- Intel ASoC Machine Drivers. If you have a Intel machine that
- has an audio controller with a DSP and I2S or DMIC port, then
- enable this option by saying Y
+ Intel ASoC Machine Drivers. If you have a Intel machine that
+ has an audio controller with a DSP and I2S or DMIC port, then
+ enable this option by saying Y
- Note that the answer to this question doesn't directly affect the
- kernel: saying N will just cause the configurator to skip all
- the questions about Intel ASoC machine drivers.
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Intel ASoC machine drivers.
if SND_SOC_INTEL_MACH
@@ -114,11 +114,11 @@ config SND_SOC_INTEL_CHT_BSW_RT5672_MACH
depends on X86_INTEL_LPSS || COMPILE_TEST
select SND_SOC_ACPI
select SND_SOC_RT5670
- help
- This adds support for ASoC machine driver for Intel(R) Cherrytrail & Braswell
- platforms with RT5672 audio codec.
- Say Y or m if you have such a device. This is a recommended option.
- If unsure select "N".
+ help
+ This adds support for ASoC machine driver for Intel(R) Cherrytrail & Braswell
+ platforms with RT5672 audio codec.
+ Say Y or m if you have such a device. This is a recommended option.
+ If unsure select "N".
config SND_SOC_INTEL_CHT_BSW_RT5645_MACH
tristate "Cherrytrail & Braswell with RT5645/5650 codec"
@@ -263,14 +263,17 @@ config SND_SOC_INTEL_DA7219_MAX98357A_GENERIC
select SND_SOC_DMIC
select SND_SOC_HDAC_HDMI
+config SND_SOC_INTEL_BXT_DA7219_MAX98357A_COMMON
+ tristate
+ select SND_SOC_INTEL_DA7219_MAX98357A_GENERIC
+
if SND_SOC_INTEL_APL
config SND_SOC_INTEL_BXT_DA7219_MAX98357A_MACH
tristate "Broxton with DA7219 and MAX98357A in I2S Mode"
depends on I2C && ACPI
depends on MFD_INTEL_LPSS || COMPILE_TEST
- select SND_SOC_INTEL_DA7219_MAX98357A_GENERIC
- select SND_HDA_DSP_LOADER
+ select SND_SOC_INTEL_BXT_DA7219_MAX98357A_COMMON
help
This adds support for ASoC machine driver for Broxton-P platforms
with DA7219 + MAX98357A I2S audio codec.
@@ -284,7 +287,6 @@ config SND_SOC_INTEL_BXT_RT298_MACH
select SND_SOC_RT298
select SND_SOC_DMIC
select SND_SOC_HDAC_HDMI
- select SND_HDA_DSP_LOADER
help
This adds support for ASoC machine driver for Broxton platforms
with RT286 I2S audio codec.
@@ -311,20 +313,21 @@ config SND_SOC_INTEL_KBL_RT5663_MAX98927_MACH
If unsure select "N".
config SND_SOC_INTEL_KBL_RT5663_RT5514_MAX98927_MACH
- tristate "KBL with RT5663, RT5514 and MAX98927 in I2S Mode"
+ tristate "KBL with RT5663, RT5514 and MAX98927 in I2S Mode"
depends on I2C && ACPI
depends on MFD_INTEL_LPSS || COMPILE_TEST
- depends on SPI
- select SND_SOC_RT5663
- select SND_SOC_RT5514
- select SND_SOC_RT5514_SPI
- select SND_SOC_MAX98927
- select SND_SOC_HDAC_HDMI
- help
- This adds support for ASoC Onboard Codec I2S machine driver. This will
- create an alsa sound card for RT5663 + RT5514 + MAX98927.
- Say Y or m if you have such a device. This is a recommended option.
- If unsure select "N".
+ depends on SPI
+ select SND_SOC_RT5663
+ select SND_SOC_RT5514
+ select SND_SOC_RT5514_SPI
+ select SND_SOC_MAX98927
+ select SND_SOC_HDAC_HDMI
+ select SND_SOC_INTEL_SKYLAKE_SSP_CLK
+ help
+ This adds support for ASoC Onboard Codec I2S machine driver. This will
+ create an alsa sound card for RT5663 + RT5514 + MAX98927.
+ Say Y or m if you have such a device. This is a recommended option.
+ If unsure select "N".
config SND_SOC_INTEL_KBL_DA7219_MAX98357A_MACH
tristate "KBL with DA7219 and MAX98357A in I2S Mode"
@@ -364,7 +367,18 @@ config SND_SOC_INTEL_KBL_RT5660_MACH
endif ## SND_SOC_INTEL_KBL
-if SND_SOC_INTEL_GLK || (SND_SOC_SOF_GEMINILAKE && SND_SOC_SOF_HDA_LINK)
+if SND_SOC_SOF_GEMINILAKE && SND_SOC_SOF_HDA_LINK
+
+config SND_SOC_INTEL_GLK_DA7219_MAX98357A_MACH
+ tristate "GLK with DA7219 and MAX98357A in I2S Mode"
+ depends on I2C && ACPI
+ depends on MFD_INTEL_LPSS || COMPILE_TEST
+ select SND_SOC_INTEL_BXT_DA7219_MAX98357A_COMMON
+ help
+ This adds support for ASoC machine driver for Geminilake platforms
+ with DA7219 + MAX98357A I2S audio codec.
+ Say Y or m if you have such a device. This is a recommended option.
+ If unsure select "N".
config SND_SOC_INTEL_GLK_RT5682_MAX98357A_MACH
tristate "GLK with RT5682 and MAX98357A in I2S Mode"
@@ -374,14 +388,13 @@ config SND_SOC_INTEL_GLK_RT5682_MAX98357A_MACH
select SND_SOC_MAX98357A
select SND_SOC_DMIC
select SND_SOC_HDAC_HDMI
- select SND_HDA_DSP_LOADER
help
This adds support for ASoC machine driver for Geminilake platforms
with RT5682 + MAX98357A I2S audio codec.
Say Y if you have such a device.
If unsure select "N".
-endif ## SND_SOC_INTEL_GLK || (SND_SOC_SOF_GEMINILAKE && SND_SOC_SOF_HDA_LINK)
+endif ## SND_SOC_SOF_GEMINILAKE && SND_SOC_SOF_HDA_LINK
if SND_SOC_INTEL_SKYLAKE_HDAUDIO_CODEC || SND_SOC_SOF_HDA_AUDIO_CODEC
@@ -393,16 +406,16 @@ config SND_SOC_INTEL_SKL_HDA_DSP_GENERIC_MACH
help
This adds support for ASoC machine driver for Intel platforms
SKL/KBL/BXT/APL with iDisp, HDA audio codecs.
- Say Y or m if you have such a device. This is a recommended option.
+ Say Y or m if you have such a device. This is a recommended option.
If unsure select "N".
endif ## SND_SOC_INTEL_SKYLAKE_HDAUDIO_CODEC || SND_SOC_SOF_HDA_AUDIO_CODEC
-if SND_SOC_SOF_HDA_COMMON || SND_SOC_SOF_BAYTRAIL
+if SND_SOC_SOF_HDA_LINK || SND_SOC_SOF_BAYTRAIL
config SND_SOC_INTEL_SOF_RT5682_MACH
tristate "SOF with rt5682 codec in I2S Mode"
depends on I2C && ACPI
- depends on (SND_SOC_SOF_HDA_COMMON && (MFD_INTEL_LPSS || COMPILE_TEST)) ||\
+ depends on (SND_SOC_SOF_HDA_LINK && (MFD_INTEL_LPSS || COMPILE_TEST)) ||\
(SND_SOC_SOF_BAYTRAIL && (X86_INTEL_LPSS || COMPILE_TEST))
select SND_SOC_RT5682
select SND_SOC_DMIC
@@ -412,7 +425,7 @@ config SND_SOC_INTEL_SOF_RT5682_MACH
with rt5682 codec.
Say Y if you have such a device.
If unsure select "N".
-endif ## SND_SOC_SOF_HDA_COMMON || SND_SOC_SOF_BAYTRAIL
+endif ## SND_SOC_SOF_HDA_LINK || SND_SOC_SOF_BAYTRAIL
if (SND_SOC_SOF_COMETLAKE_LP && SND_SOC_SOF_HDA_LINK)
@@ -420,7 +433,26 @@ config SND_SOC_INTEL_CML_LP_DA7219_MAX98357A_MACH
tristate "CML_LP with DA7219 and MAX98357A in I2S Mode"
depends on I2C && ACPI
depends on MFD_INTEL_LPSS || COMPILE_TEST
- select SND_SOC_INTEL_DA7219_MAX98357A_GENERIC
+ select SND_SOC_INTEL_BXT_DA7219_MAX98357A_COMMON
+ help
+ This adds support for ASoC machine driver for Cometlake platforms
+ with DA7219 + MAX98357A I2S audio codec.
+ Say Y or m if you have such a device. This is a recommended option.
+ If unsure select "N".
+
+config SND_SOC_INTEL_SOF_CML_RT1011_RT5682_MACH
+ tristate "CML with RT1011 and RT5682 in I2S Mode"
+ depends on I2C && ACPI
+ depends on MFD_INTEL_LPSS || COMPILE_TEST
+ select SND_SOC_RT1011
+ select SND_SOC_RT5682
+ select SND_SOC_DMIC
+ select SND_SOC_HDAC_HDMI
+ help
+ This adds support for ASoC machine driver for SOF platform with
+ RT1011 + RT5682 I2S codec.
+ Say Y if you have such a device.
+ If unsure select "N".
endif ## SND_SOC_SOF_COMETLAKE_LP && SND_SOC_SOF_HDA_LINK
diff --git a/sound/soc/intel/boards/Makefile b/sound/soc/intel/boards/Makefile
index 6445f90ea542..ba1aa89db09d 100644
--- a/sound/soc/intel/boards/Makefile
+++ b/sound/soc/intel/boards/Makefile
@@ -4,9 +4,9 @@ snd-soc-sst-byt-rt5640-mach-objs := byt-rt5640.o
snd-soc-sst-byt-max98090-mach-objs := byt-max98090.o
snd-soc-sst-bdw-rt5677-mach-objs := bdw-rt5677.o
snd-soc-sst-broadwell-objs := broadwell.o
-snd-soc-sst-bxt-da7219_max98357a-objs := bxt_da7219_max98357a.o
-snd-soc-sst-bxt-rt298-objs := bxt_rt298.o
-snd-soc-sst-glk-rt5682_max98357a-objs := glk_rt5682_max98357a.o
+snd-soc-sst-bxt-da7219_max98357a-objs := bxt_da7219_max98357a.o hda_dsp_common.o
+snd-soc-sst-bxt-rt298-objs := bxt_rt298.o hda_dsp_common.o
+snd-soc-sst-glk-rt5682_max98357a-objs := glk_rt5682_max98357a.o hda_dsp_common.o
snd-soc-sst-bytcr-rt5640-objs := bytcr_rt5640.o
snd-soc-sst-bytcr-rt5651-objs := bytcr_rt5651.o
snd-soc-sst-cht-bsw-rt5672-objs := cht_bsw_rt5672.o
@@ -17,14 +17,15 @@ snd-soc-sst-byt-cht-cx2072x-objs := bytcht_cx2072x.o
snd-soc-sst-byt-cht-da7213-objs := bytcht_da7213.o
snd-soc-sst-byt-cht-es8316-objs := bytcht_es8316.o
snd-soc-sst-byt-cht-nocodec-objs := bytcht_nocodec.o
-snd-soc-sof_rt5682-objs := sof_rt5682.o
+snd-soc-sof_rt5682-objs := sof_rt5682.o hda_dsp_common.o
+snd-soc-cml_rt1011_rt5682-objs := cml_rt1011_rt5682.o hda_dsp_common.o
snd-soc-kbl_da7219_max98357a-objs := kbl_da7219_max98357a.o
snd-soc-kbl_da7219_max98927-objs := kbl_da7219_max98927.o
snd-soc-kbl_rt5663_max98927-objs := kbl_rt5663_max98927.o
snd-soc-kbl_rt5663_rt5514_max98927-objs := kbl_rt5663_rt5514_max98927.o
snd-soc-kbl_rt5660-objs := kbl_rt5660.o
snd-soc-skl_rt286-objs := skl_rt286.o
-snd-soc-skl_hda_dsp-objs := skl_hda_dsp_generic.o skl_hda_dsp_common.o
+snd-soc-skl_hda_dsp-objs := skl_hda_dsp_generic.o skl_hda_dsp_common.o hda_dsp_common.o
snd-skl_nau88l25_max98357a-objs := skl_nau88l25_max98357a.o
snd-soc-skl_nau88l25_ssm4567-objs := skl_nau88l25_ssm4567.o
@@ -32,7 +33,7 @@ obj-$(CONFIG_SND_SOC_INTEL_SOF_RT5682_MACH) += snd-soc-sof_rt5682.o
obj-$(CONFIG_SND_SOC_INTEL_HASWELL_MACH) += snd-soc-sst-haswell.o
obj-$(CONFIG_SND_SOC_INTEL_BYT_RT5640_MACH) += snd-soc-sst-byt-rt5640-mach.o
obj-$(CONFIG_SND_SOC_INTEL_BYT_MAX98090_MACH) += snd-soc-sst-byt-max98090-mach.o
-obj-$(CONFIG_SND_SOC_INTEL_BXT_DA7219_MAX98357A_MACH) += snd-soc-sst-bxt-da7219_max98357a.o
+obj-$(CONFIG_SND_SOC_INTEL_BXT_DA7219_MAX98357A_COMMON) += snd-soc-sst-bxt-da7219_max98357a.o
obj-$(CONFIG_SND_SOC_INTEL_BXT_RT298_MACH) += snd-soc-sst-bxt-rt298.o
obj-$(CONFIG_SND_SOC_INTEL_GLK_RT5682_MAX98357A_MACH) += snd-soc-sst-glk-rt5682_max98357a.o
obj-$(CONFIG_SND_SOC_INTEL_BROADWELL_MACH) += snd-soc-sst-broadwell.o
@@ -47,6 +48,7 @@ obj-$(CONFIG_SND_SOC_INTEL_BYT_CHT_CX2072X_MACH) += snd-soc-sst-byt-cht-cx2072x.
obj-$(CONFIG_SND_SOC_INTEL_BYT_CHT_DA7213_MACH) += snd-soc-sst-byt-cht-da7213.o
obj-$(CONFIG_SND_SOC_INTEL_BYT_CHT_ES8316_MACH) += snd-soc-sst-byt-cht-es8316.o
obj-$(CONFIG_SND_SOC_INTEL_BYT_CHT_NOCODEC_MACH) += snd-soc-sst-byt-cht-nocodec.o
+obj-$(CONFIG_SND_SOC_INTEL_SOF_CML_RT1011_RT5682_MACH) += snd-soc-cml_rt1011_rt5682.o
obj-$(CONFIG_SND_SOC_INTEL_KBL_DA7219_MAX98357A_MACH) += snd-soc-kbl_da7219_max98357a.o
obj-$(CONFIG_SND_SOC_INTEL_KBL_DA7219_MAX98927_MACH) += snd-soc-kbl_da7219_max98927.o
obj-$(CONFIG_SND_SOC_INTEL_KBL_RT5663_MAX98927_MACH) += snd-soc-kbl_rt5663_max98927.o
diff --git a/sound/soc/intel/boards/bdw-rt5677.c b/sound/soc/intel/boards/bdw-rt5677.c
index 4a4d3353e26d..2af8e5a62da8 100644
--- a/sound/soc/intel/boards/bdw-rt5677.c
+++ b/sound/soc/intel/boards/bdw-rt5677.c
@@ -74,6 +74,11 @@ static const struct snd_soc_dapm_route bdw_rt5677_map[] = {
/* CODEC BE connections */
{"SSP0 CODEC IN", NULL, "AIF1 Capture"},
{"AIF1 Playback", NULL, "SSP0 CODEC OUT"},
+ {"DSP Capture", NULL, "DSP Buffer"},
+
+ /* DSP Clock Connections */
+ { "DSP Buffer", NULL, "SSP0 CODEC IN" },
+ { "SSP0 CODEC IN", NULL, "DSPTX" },
};
static const struct snd_kcontrol_new bdw_rt5677_controls[] = {
@@ -165,10 +170,37 @@ static int bdw_rt5677_hw_params(struct snd_pcm_substream *substream,
return ret;
}
+static int bdw_rt5677_dsp_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ int ret;
+
+ ret = snd_soc_dai_set_sysclk(codec_dai, RT5677_SCLK_S_PLL1, 24576000,
+ SND_SOC_CLOCK_IN);
+ if (ret < 0) {
+ dev_err(rtd->dev, "can't set codec sysclk configuration\n");
+ return ret;
+ }
+ ret = snd_soc_dai_set_pll(codec_dai, 0, RT5677_PLL1_S_MCLK,
+ 24000000, 24576000);
+ if (ret < 0) {
+ dev_err(rtd->dev, "can't set codec pll configuration\n");
+ return ret;
+ }
+
+ return 0;
+}
+
static const struct snd_soc_ops bdw_rt5677_ops = {
.hw_params = bdw_rt5677_hw_params,
};
+static const struct snd_soc_ops bdw_rt5677_dsp_ops = {
+ .hw_params = bdw_rt5677_dsp_hw_params,
+};
+
#if !IS_ENABLED(CONFIG_SND_SOC_SOF_BROADWELL)
static int bdw_rt5677_rtd_init(struct snd_soc_pcm_runtime *rtd)
{
@@ -208,6 +240,11 @@ static int bdw_rt5677_init(struct snd_soc_pcm_runtime *rtd)
rt5677_sel_asrc_clk_src(component, RT5677_DA_STEREO_FILTER |
RT5677_AD_STEREO1_FILTER | RT5677_I2S1_SOURCE,
RT5677_CLK_SEL_I2S1_ASRC);
+ /* Enable codec ASRC function for Mono ADC L.
+ * The ASRC clock source is clk_sys2_asrc.
+ */
+ rt5677_sel_asrc_clk_src(component, RT5677_AD_MONO_L_FILTER,
+ RT5677_CLK_SEL_SYS2);
/* Request rt5677 GPIO for headphone amp control */
bdw_rt5677->gpio_hp_en = devm_gpiod_get(component->dev, "headphone-enable",
@@ -258,6 +295,12 @@ SND_SOC_DAILINK_DEF(platform,
SND_SOC_DAILINK_DEF(be,
DAILINK_COMP_ARRAY(COMP_CODEC("i2c-RT5677CE:00", "rt5677-aif1")));
+/* Wake on voice interface */
+SND_SOC_DAILINK_DEFS(dsp,
+ DAILINK_COMP_ARRAY(COMP_CPU("spi-RT5677AA:00")),
+ DAILINK_COMP_ARRAY(COMP_CODEC("i2c-RT5677CE:00", "rt5677-dspbuffer")),
+ DAILINK_COMP_ARRAY(COMP_PLATFORM("spi-RT5677AA:00")));
+
static struct snd_soc_dai_link bdw_rt5677_dais[] = {
/* Front End DAI links */
{
@@ -276,6 +319,14 @@ static struct snd_soc_dai_link bdw_rt5677_dais[] = {
SND_SOC_DAILINK_REG(fe, dummy, platform),
},
+ /* Non-DPCM links */
+ {
+ .name = "Codec DSP",
+ .stream_name = "Wake on Voice",
+ .ops = &bdw_rt5677_dsp_ops,
+ SND_SOC_DAILINK_REG(dsp),
+ },
+
/* Back End DAI links */
{
/* SSP0 - Codec */
diff --git a/sound/soc/intel/boards/bxt_da7219_max98357a.c b/sound/soc/intel/boards/bxt_da7219_max98357a.c
index ac1dea5f9d11..5873abb46441 100644
--- a/sound/soc/intel/boards/bxt_da7219_max98357a.c
+++ b/sound/soc/intel/boards/bxt_da7219_max98357a.c
@@ -21,6 +21,7 @@
#include "../../codecs/da7219.h"
#include "../../codecs/da7219-aad.h"
#include "../common/soc-intel-quirks.h"
+#include "hda_dsp_common.h"
#define BXT_DIALOG_CODEC_DAI "da7219-hifi"
#define BXT_MAXIM_CODEC_DAI "HiFi"
@@ -38,6 +39,7 @@ struct bxt_hdmi_pcm {
struct bxt_card_private {
struct list_head hdmi_pcm_list;
+ bool common_hdmi_codec_drv;
};
enum {
@@ -615,6 +617,13 @@ static int bxt_card_late_probe(struct snd_soc_card *card)
snd_soc_dapm_add_routes(&card->dapm, broxton_map,
ARRAY_SIZE(broxton_map));
+ pcm = list_first_entry(&ctx->hdmi_pcm_list, struct bxt_hdmi_pcm,
+ head);
+ component = pcm->codec_dai->component;
+
+ if (ctx->common_hdmi_codec_drv)
+ return hda_dsp_hdmi_build_controls(card, component);
+
list_for_each_entry(pcm, &ctx->hdmi_pcm_list, head) {
component = pcm->codec_dai->component;
snprintf(jack_name, sizeof(jack_name),
@@ -720,6 +729,8 @@ static int broxton_audio_probe(struct platform_device *pdev)
if (ret)
return ret;
+ ctx->common_hdmi_codec_drv = mach->mach_params.common_hdmi_codec_drv;
+
return devm_snd_soc_register_card(&pdev->dev, &broxton_audio_card);
}
diff --git a/sound/soc/intel/boards/bxt_rt298.c b/sound/soc/intel/boards/bxt_rt298.c
index adf416a49b48..eabf9d8468ae 100644
--- a/sound/soc/intel/boards/bxt_rt298.c
+++ b/sound/soc/intel/boards/bxt_rt298.c
@@ -18,6 +18,7 @@
#include <sound/pcm_params.h>
#include "../../codecs/hdac_hdmi.h"
#include "../../codecs/rt298.h"
+#include "hda_dsp_common.h"
/* Headset jack detection DAPM pins */
static struct snd_soc_jack broxton_headset;
@@ -31,6 +32,7 @@ struct bxt_hdmi_pcm {
struct bxt_rt286_private {
struct list_head hdmi_pcm_list;
+ bool common_hdmi_codec_drv;
};
enum {
@@ -527,6 +529,13 @@ static int bxt_card_late_probe(struct snd_soc_card *card)
int err, i = 0;
char jack_name[NAME_SIZE];
+ pcm = list_first_entry(&ctx->hdmi_pcm_list, struct bxt_hdmi_pcm,
+ head);
+ component = pcm->codec_dai->component;
+
+ if (ctx->common_hdmi_codec_drv)
+ return hda_dsp_hdmi_build_controls(card, component);
+
list_for_each_entry(pcm, &ctx->hdmi_pcm_list, head) {
component = pcm->codec_dai->component;
snprintf(jack_name, sizeof(jack_name),
@@ -626,6 +635,8 @@ static int broxton_audio_probe(struct platform_device *pdev)
if (ret)
return ret;
+ ctx->common_hdmi_codec_drv = mach->mach_params.common_hdmi_codec_drv;
+
return devm_snd_soc_register_card(&pdev->dev, card);
}
diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c
index 9c1aa4ec9cba..dd2b5ad08659 100644
--- a/sound/soc/intel/boards/bytcr_rt5640.c
+++ b/sound/soc/intel/boards/bytcr_rt5640.c
@@ -405,10 +405,12 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
DMI_MATCH(DMI_PRODUCT_NAME, "Aspire SW5-012"),
},
- .driver_data = (void *)(BYT_RT5640_IN1_MAP |
- BYT_RT5640_MCLK_EN |
- BYT_RT5640_SSP0_AIF1),
-
+ .driver_data = (void *)(BYT_RT5640_DMIC1_MAP |
+ BYT_RT5640_JD_SRC_JD2_IN4N |
+ BYT_RT5640_OVCD_TH_2000UA |
+ BYT_RT5640_OVCD_SF_0P75 |
+ BYT_RT5640_SSP0_AIF1 |
+ BYT_RT5640_MCLK_EN),
},
{
.matches = {
diff --git a/sound/soc/intel/boards/cht_bsw_rt5645.c b/sound/soc/intel/boards/cht_bsw_rt5645.c
index 8879c3be29d5..c68a5b85a4a0 100644
--- a/sound/soc/intel/boards/cht_bsw_rt5645.c
+++ b/sound/soc/intel/boards/cht_bsw_rt5645.c
@@ -48,6 +48,7 @@ struct cht_mc_private {
#define CHT_RT5645_SSP2_AIF2 BIT(16) /* default is using AIF1 */
#define CHT_RT5645_SSP0_AIF1 BIT(17)
#define CHT_RT5645_SSP0_AIF2 BIT(18)
+#define CHT_RT5645_PMC_PLT_CLK_0 BIT(19)
static unsigned long cht_rt5645_quirk = 0;
@@ -59,6 +60,8 @@ static void log_quirks(struct device *dev)
dev_info(dev, "quirk SSP0_AIF1 enabled");
if (cht_rt5645_quirk & CHT_RT5645_SSP0_AIF2)
dev_info(dev, "quirk SSP0_AIF2 enabled");
+ if (cht_rt5645_quirk & CHT_RT5645_PMC_PLT_CLK_0)
+ dev_info(dev, "quirk PMC_PLT_CLK_0 enabled");
}
static int platform_clock_control(struct snd_soc_dapm_widget *w,
@@ -226,16 +229,22 @@ static int cht_aif1_hw_params(struct snd_pcm_substream *substream,
return 0;
}
-/* uncomment when we have a real quirk
static int cht_rt5645_quirk_cb(const struct dmi_system_id *id)
{
cht_rt5645_quirk = (unsigned long)id->driver_data;
return 1;
}
-*/
static const struct dmi_system_id cht_rt5645_quirk_table[] = {
{
+ /* Strago family Chromebooks */
+ .callback = cht_rt5645_quirk_cb,
+ .matches = {
+ DMI_MATCH(DMI_PRODUCT_FAMILY, "Intel_Strago"),
+ },
+ .driver_data = (void *)CHT_RT5645_PMC_PLT_CLK_0,
+ },
+ {
},
};
@@ -526,6 +535,7 @@ static int snd_cht_mc_probe(struct platform_device *pdev)
int dai_index = 0;
int ret_val = 0;
int i;
+ const char *mclk_name;
drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_KERNEL);
if (!drv)
@@ -662,11 +672,15 @@ static int snd_cht_mc_probe(struct platform_device *pdev)
if (ret_val)
return ret_val;
- drv->mclk = devm_clk_get(&pdev->dev, "pmc_plt_clk_3");
+ if (cht_rt5645_quirk & CHT_RT5645_PMC_PLT_CLK_0)
+ mclk_name = "pmc_plt_clk_0";
+ else
+ mclk_name = "pmc_plt_clk_3";
+
+ drv->mclk = devm_clk_get(&pdev->dev, mclk_name);
if (IS_ERR(drv->mclk)) {
- dev_err(&pdev->dev,
- "Failed to get MCLK from pmc_plt_clk_3: %ld\n",
- PTR_ERR(drv->mclk));
+ dev_err(&pdev->dev, "Failed to get MCLK from %s: %ld\n",
+ mclk_name, PTR_ERR(drv->mclk));
return PTR_ERR(drv->mclk);
}
diff --git a/sound/soc/intel/boards/cml_rt1011_rt5682.c b/sound/soc/intel/boards/cml_rt1011_rt5682.c
new file mode 100644
index 000000000000..a22f97234201
--- /dev/null
+++ b/sound/soc/intel/boards/cml_rt1011_rt5682.c
@@ -0,0 +1,487 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright(c) 2019 Intel Corporation.
+
+/*
+ * Intel Cometlake I2S Machine driver for RT1011 + RT5682 codec
+ */
+
+#include <linux/input.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/dmi.h>
+#include <linux/slab.h>
+#include <asm/cpu_device_id.h>
+#include <linux/acpi.h>
+#include <sound/core.h>
+#include <sound/jack.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/rt5682.h>
+#include <sound/soc-acpi.h>
+#include "../../codecs/rt1011.h"
+#include "../../codecs/rt5682.h"
+#include "../../codecs/hdac_hdmi.h"
+#include "hda_dsp_common.h"
+
+/* The platform clock outputs 24Mhz clock to codec as I2S MCLK */
+#define CML_PLAT_CLK 24000000
+#define CML_RT1011_CODEC_DAI "rt1011-aif"
+#define CML_RT5682_CODEC_DAI "rt5682-aif1"
+#define NAME_SIZE 32
+
+static struct snd_soc_jack hdmi_jack[3];
+
+struct hdmi_pcm {
+ struct list_head head;
+ struct snd_soc_dai *codec_dai;
+ int device;
+};
+
+struct card_private {
+ char codec_name[SND_ACPI_I2C_ID_LEN];
+ struct snd_soc_jack headset;
+ struct list_head hdmi_pcm_list;
+ bool common_hdmi_codec_drv;
+};
+
+static const struct snd_kcontrol_new cml_controls[] = {
+ SOC_DAPM_PIN_SWITCH("Headphone Jack"),
+ SOC_DAPM_PIN_SWITCH("Headset Mic"),
+ SOC_DAPM_PIN_SWITCH("TL Ext Spk"),
+ SOC_DAPM_PIN_SWITCH("TR Ext Spk"),
+ SOC_DAPM_PIN_SWITCH("WL Ext Spk"),
+ SOC_DAPM_PIN_SWITCH("WR Ext Spk"),
+};
+
+static const struct snd_soc_dapm_widget cml_rt1011_rt5682_widgets[] = {
+ SND_SOC_DAPM_SPK("TL Ext Spk", NULL),
+ SND_SOC_DAPM_SPK("TR Ext Spk", NULL),
+ SND_SOC_DAPM_SPK("WL Ext Spk", NULL),
+ SND_SOC_DAPM_SPK("WR Ext Spk", NULL),
+ SND_SOC_DAPM_HP("Headphone Jack", NULL),
+ SND_SOC_DAPM_MIC("Headset Mic", NULL),
+ SND_SOC_DAPM_MIC("SoC DMIC", NULL),
+};
+
+static const struct snd_soc_dapm_route cml_rt1011_rt5682_map[] = {
+ /*speaker*/
+ {"TL Ext Spk", NULL, "TL SPO"},
+ {"TR Ext Spk", NULL, "TR SPO"},
+ {"WL Ext Spk", NULL, "WL SPO"},
+ {"WR Ext Spk", NULL, "WR SPO"},
+
+ /* HP jack connectors - unknown if we have jack detection */
+ { "Headphone Jack", NULL, "HPOL" },
+ { "Headphone Jack", NULL, "HPOR" },
+
+ /* other jacks */
+ { "IN1P", NULL, "Headset Mic" },
+
+ /* DMIC */
+ {"DMic", NULL, "SoC DMIC"},
+};
+
+static int cml_rt5682_codec_init(struct snd_soc_pcm_runtime *rtd)
+{
+ struct card_private *ctx = snd_soc_card_get_drvdata(rtd->card);
+ struct snd_soc_component *component = rtd->codec_dai->component;
+ struct snd_soc_jack *jack;
+ int ret;
+
+ /* need to enable ASRC function for 24MHz mclk rate */
+ rt5682_sel_asrc_clk_src(component, RT5682_DA_STEREO1_FILTER |
+ RT5682_AD_STEREO1_FILTER,
+ RT5682_CLK_SEL_I2S1_ASRC);
+
+ /*
+ * Headset buttons map to the google Reference headset.
+ * These can be configured by userspace.
+ */
+ ret = snd_soc_card_jack_new(rtd->card, "Headset Jack",
+ SND_JACK_HEADSET | SND_JACK_BTN_0 |
+ SND_JACK_BTN_1 | SND_JACK_BTN_2 |
+ SND_JACK_BTN_3,
+ &ctx->headset, NULL, 0);
+ if (ret) {
+ dev_err(rtd->dev, "Headset Jack creation failed: %d\n", ret);
+ return ret;
+ }
+
+ jack = &ctx->headset;
+
+ snd_jack_set_key(jack->jack, SND_JACK_BTN_0, KEY_PLAYPAUSE);
+ snd_jack_set_key(jack->jack, SND_JACK_BTN_1, KEY_VOICECOMMAND);
+ snd_jack_set_key(jack->jack, SND_JACK_BTN_2, KEY_VOLUMEUP);
+ snd_jack_set_key(jack->jack, SND_JACK_BTN_3, KEY_VOLUMEDOWN);
+ ret = snd_soc_component_set_jack(component, jack, NULL);
+ if (ret)
+ dev_err(rtd->dev, "Headset Jack call-back failed: %d\n", ret);
+
+ return ret;
+};
+
+static int cml_rt5682_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ int clk_id, clk_freq, pll_out, ret;
+
+ clk_id = RT5682_PLL1_S_MCLK;
+ clk_freq = CML_PLAT_CLK;
+
+ pll_out = params_rate(params) * 512;
+
+ ret = snd_soc_dai_set_pll(codec_dai, 0, clk_id, clk_freq, pll_out);
+ if (ret < 0)
+ dev_warn(rtd->dev, "snd_soc_dai_set_pll err = %d\n", ret);
+
+ /* Configure sysclk for codec */
+ ret = snd_soc_dai_set_sysclk(codec_dai, RT5682_SCLK_S_PLL1,
+ pll_out, SND_SOC_CLOCK_IN);
+ if (ret < 0)
+ dev_warn(rtd->dev, "snd_soc_dai_set_sysclk err = %d\n", ret);
+
+ /*
+ * slot_width should be equal or large than data length, set them
+ * be the same
+ */
+ ret = snd_soc_dai_set_tdm_slot(codec_dai, 0x0, 0x0, 2,
+ params_width(params));
+ if (ret < 0)
+ dev_warn(rtd->dev, "set TDM slot err:%d\n", ret);
+ return ret;
+}
+
+static int cml_rt1011_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *codec_dai;
+ struct snd_soc_card *card = rtd->card;
+ int srate, i, ret = 0;
+
+ srate = params_rate(params);
+
+ for (i = 0; i < rtd->num_codecs; i++) {
+ codec_dai = rtd->codec_dais[i];
+
+ /* 100 Fs to drive 24 bit data */
+ ret = snd_soc_dai_set_pll(codec_dai, 0, RT1011_PLL1_S_BCLK,
+ 100 * srate, 256 * srate);
+ if (ret < 0) {
+ dev_err(card->dev, "codec_dai clock not set\n");
+ return ret;
+ }
+
+ ret = snd_soc_dai_set_sysclk(codec_dai,
+ RT1011_FS_SYS_PRE_S_PLL1,
+ 256 * srate, SND_SOC_CLOCK_IN);
+ if (ret < 0) {
+ dev_err(card->dev, "codec_dai clock not set\n");
+ return ret;
+ }
+
+ /*
+ * Codec TDM is configured as 24 bit capture/ playback.
+ * 2 CH PB is done over 4 codecs - 2 Woofers and 2 Tweeters.
+ * The Left woofer and tweeter plays the Left playback data
+ * and similar by the Right.
+ * Hence 2 codecs (1 T and 1 W pair) share same Rx slot.
+ * The feedback is captured for each codec individually.
+ * Hence all 4 codecs use 1 Tx slot each for feedback.
+ */
+ if (!strcmp(codec_dai->component->name, "i2c-10EC1011:00")) {
+ ret = snd_soc_dai_set_tdm_slot(codec_dai,
+ 0x4, 0x1, 4, 24);
+ if (ret < 0)
+ break;
+ }
+ if (!strcmp(codec_dai->component->name, "i2c-10EC1011:02")) {
+ ret = snd_soc_dai_set_tdm_slot(codec_dai,
+ 0x1, 0x1, 4, 24);
+ if (ret < 0)
+ break;
+ }
+ /* TDM Rx slot 2 is used for Right Woofer & Tweeters pair */
+ if (!strcmp(codec_dai->component->name, "i2c-10EC1011:01")) {
+ ret = snd_soc_dai_set_tdm_slot(codec_dai,
+ 0x8, 0x2, 4, 24);
+ if (ret < 0)
+ break;
+ }
+ if (!strcmp(codec_dai->component->name, "i2c-10EC1011:03")) {
+ ret = snd_soc_dai_set_tdm_slot(codec_dai,
+ 0x2, 0x2, 4, 24);
+ if (ret < 0)
+ break;
+ }
+ }
+ if (ret < 0)
+ dev_err(rtd->dev,
+ "set codec TDM slot for %s failed with error %d\n",
+ codec_dai->component->name, ret);
+ return ret;
+}
+
+static struct snd_soc_ops cml_rt5682_ops = {
+ .hw_params = cml_rt5682_hw_params,
+};
+
+static const struct snd_soc_ops cml_rt1011_ops = {
+ .hw_params = cml_rt1011_hw_params,
+};
+
+static int sof_card_late_probe(struct snd_soc_card *card)
+{
+ struct card_private *ctx = snd_soc_card_get_drvdata(card);
+ struct snd_soc_component *component = NULL;
+ char jack_name[NAME_SIZE];
+ struct hdmi_pcm *pcm;
+ int ret, i = 0;
+
+ pcm = list_first_entry(&ctx->hdmi_pcm_list, struct hdmi_pcm,
+ head);
+ component = pcm->codec_dai->component;
+
+ if (ctx->common_hdmi_codec_drv)
+ return hda_dsp_hdmi_build_controls(card, component);
+
+ list_for_each_entry(pcm, &ctx->hdmi_pcm_list, head) {
+ component = pcm->codec_dai->component;
+ snprintf(jack_name, sizeof(jack_name),
+ "HDMI/DP, pcm=%d Jack", pcm->device);
+ ret = snd_soc_card_jack_new(card, jack_name,
+ SND_JACK_AVOUT, &hdmi_jack[i],
+ NULL, 0);
+ if (ret)
+ return ret;
+
+ ret = hdac_hdmi_jack_init(pcm->codec_dai, pcm->device,
+ &hdmi_jack[i]);
+ if (ret < 0)
+ return ret;
+
+ i++;
+ }
+ if (!component)
+ return -EINVAL;
+
+ return hdac_hdmi_jack_port_init(component, &card->dapm);
+}
+
+static int hdmi_init(struct snd_soc_pcm_runtime *rtd)
+{
+ struct card_private *ctx = snd_soc_card_get_drvdata(rtd->card);
+ struct snd_soc_dai *dai = rtd->codec_dai;
+ struct hdmi_pcm *pcm;
+
+ pcm = devm_kzalloc(rtd->card->dev, sizeof(*pcm), GFP_KERNEL);
+ if (!pcm)
+ return -ENOMEM;
+
+ pcm->device = dai->id;
+ pcm->codec_dai = dai;
+
+ list_add_tail(&pcm->head, &ctx->hdmi_pcm_list);
+
+ return 0;
+}
+
+/* Cometlake digital audio interface glue - connects codec <--> CPU */
+
+SND_SOC_DAILINK_DEF(ssp0_pin,
+ DAILINK_COMP_ARRAY(COMP_CPU("SSP0 Pin")));
+SND_SOC_DAILINK_DEF(ssp0_codec,
+ DAILINK_COMP_ARRAY(COMP_CODEC("i2c-10EC5682:00",
+ CML_RT5682_CODEC_DAI)));
+
+SND_SOC_DAILINK_DEF(ssp1_pin,
+ DAILINK_COMP_ARRAY(COMP_CPU("SSP1 Pin")));
+SND_SOC_DAILINK_DEF(ssp1_codec,
+ DAILINK_COMP_ARRAY(
+ /* WL */ COMP_CODEC("i2c-10EC1011:00", CML_RT1011_CODEC_DAI),
+ /* WR */ COMP_CODEC("i2c-10EC1011:01", CML_RT1011_CODEC_DAI),
+ /* TL */ COMP_CODEC("i2c-10EC1011:02", CML_RT1011_CODEC_DAI),
+ /* TR */ COMP_CODEC("i2c-10EC1011:03", CML_RT1011_CODEC_DAI)));
+
+SND_SOC_DAILINK_DEF(dmic_pin,
+ DAILINK_COMP_ARRAY(COMP_CPU("DMIC01 Pin")));
+
+SND_SOC_DAILINK_DEF(dmic16k_pin,
+ DAILINK_COMP_ARRAY(COMP_CPU("DMIC16k Pin")));
+
+SND_SOC_DAILINK_DEF(dmic_codec,
+ DAILINK_COMP_ARRAY(COMP_CODEC("dmic-codec", "dmic-hifi")));
+
+SND_SOC_DAILINK_DEF(idisp1_pin,
+ DAILINK_COMP_ARRAY(COMP_CPU("iDisp1 Pin")));
+SND_SOC_DAILINK_DEF(idisp1_codec,
+ DAILINK_COMP_ARRAY(COMP_CODEC("ehdaudio0D2", "intel-hdmi-hifi1")));
+
+SND_SOC_DAILINK_DEF(idisp2_pin,
+ DAILINK_COMP_ARRAY(COMP_CPU("iDisp2 Pin")));
+SND_SOC_DAILINK_DEF(idisp2_codec,
+ DAILINK_COMP_ARRAY(COMP_CODEC("ehdaudio0D2", "intel-hdmi-hifi2")));
+
+SND_SOC_DAILINK_DEF(idisp3_pin,
+ DAILINK_COMP_ARRAY(COMP_CPU("iDisp3 Pin")));
+SND_SOC_DAILINK_DEF(idisp3_codec,
+ DAILINK_COMP_ARRAY(COMP_CODEC("ehdaudio0D2", "intel-hdmi-hifi3")));
+
+SND_SOC_DAILINK_DEF(platform,
+ DAILINK_COMP_ARRAY(COMP_PLATFORM("0000:00:1f.3")));
+
+static struct snd_soc_dai_link cml_rt1011_rt5682_dailink[] = {
+ /* Back End DAI links */
+ {
+ /* SSP0 - Codec */
+ .name = "SSP0-Codec",
+ .id = 0,
+ .init = cml_rt5682_codec_init,
+ .ignore_pmdown_time = 1,
+ .ops = &cml_rt5682_ops,
+ .dpcm_playback = 1,
+ .dpcm_capture = 1,
+ .no_pcm = 1,
+ SND_SOC_DAILINK_REG(ssp0_pin, ssp0_codec, platform),
+ },
+ {
+ .name = "dmic01",
+ .id = 1,
+ .ignore_suspend = 1,
+ .dpcm_capture = 1,
+ .no_pcm = 1,
+ SND_SOC_DAILINK_REG(dmic_pin, dmic_codec, platform),
+ },
+ {
+ .name = "dmic16k",
+ .id = 2,
+ .ignore_suspend = 1,
+ .dpcm_capture = 1,
+ .no_pcm = 1,
+ SND_SOC_DAILINK_REG(dmic16k_pin, dmic_codec, platform),
+ },
+ {
+ .name = "iDisp1",
+ .id = 3,
+ .init = hdmi_init,
+ .dpcm_playback = 1,
+ .no_pcm = 1,
+ SND_SOC_DAILINK_REG(idisp1_pin, idisp1_codec, platform),
+ },
+ {
+ .name = "iDisp2",
+ .id = 4,
+ .init = hdmi_init,
+ .dpcm_playback = 1,
+ .no_pcm = 1,
+ SND_SOC_DAILINK_REG(idisp2_pin, idisp2_codec, platform),
+ },
+ {
+ .name = "iDisp3",
+ .id = 5,
+ .init = hdmi_init,
+ .dpcm_playback = 1,
+ .no_pcm = 1,
+ SND_SOC_DAILINK_REG(idisp3_pin, idisp3_codec, platform),
+ },
+ {
+ /*
+ * SSP1 - Codec : added to end of list ensuring
+ * reuse of common topologies for other end points
+ * and changing only SSP1's codec
+ */
+ .name = "SSP1-Codec",
+ .id = 6,
+ .dpcm_playback = 1,
+ .dpcm_capture = 1, /* Capture stream provides Feedback */
+ .no_pcm = 1,
+ .ops = &cml_rt1011_ops,
+ SND_SOC_DAILINK_REG(ssp1_pin, ssp1_codec, platform),
+ },
+};
+
+static struct snd_soc_codec_conf rt1011_conf[] = {
+ {
+ .dev_name = "i2c-10EC1011:00",
+ .name_prefix = "WL",
+ },
+ {
+ .dev_name = "i2c-10EC1011:01",
+ .name_prefix = "WR",
+ },
+ {
+ .dev_name = "i2c-10EC1011:02",
+ .name_prefix = "TL",
+ },
+ {
+ .dev_name = "i2c-10EC1011:03",
+ .name_prefix = "TR",
+ },
+};
+
+/* Cometlake audio machine driver for RT1011 and RT5682 */
+static struct snd_soc_card snd_soc_card_cml = {
+ .name = "cml_rt1011_rt5682",
+ .dai_link = cml_rt1011_rt5682_dailink,
+ .num_links = ARRAY_SIZE(cml_rt1011_rt5682_dailink),
+ .codec_conf = rt1011_conf,
+ .num_configs = ARRAY_SIZE(rt1011_conf),
+ .dapm_widgets = cml_rt1011_rt5682_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(cml_rt1011_rt5682_widgets),
+ .dapm_routes = cml_rt1011_rt5682_map,
+ .num_dapm_routes = ARRAY_SIZE(cml_rt1011_rt5682_map),
+ .controls = cml_controls,
+ .num_controls = ARRAY_SIZE(cml_controls),
+ .fully_routed = true,
+ .late_probe = sof_card_late_probe,
+};
+
+static int snd_cml_rt1011_probe(struct platform_device *pdev)
+{
+ struct card_private *ctx;
+ struct snd_soc_acpi_mach *mach;
+ const char *platform_name;
+ int ret;
+
+ ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_ATOMIC);
+ if (!ctx)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&ctx->hdmi_pcm_list);
+ mach = (&pdev->dev)->platform_data;
+ snd_soc_card_cml.dev = &pdev->dev;
+ platform_name = mach->mach_params.platform;
+
+ /* set platform name for each dailink */
+ ret = snd_soc_fixup_dai_links_platform_name(&snd_soc_card_cml,
+ platform_name);
+ if (ret)
+ return ret;
+
+ ctx->common_hdmi_codec_drv = mach->mach_params.common_hdmi_codec_drv;
+
+ snd_soc_card_set_drvdata(&snd_soc_card_cml, ctx);
+
+ return devm_snd_soc_register_card(&pdev->dev, &snd_soc_card_cml);
+}
+
+static struct platform_driver snd_cml_rt1011_rt5682_driver = {
+ .probe = snd_cml_rt1011_probe,
+ .driver = {
+ .name = "cml_rt1011_rt5682",
+ .pm = &snd_soc_pm_ops,
+ },
+};
+module_platform_driver(snd_cml_rt1011_rt5682_driver);
+
+/* Module information */
+MODULE_DESCRIPTION("Cometlake Audio Machine driver - RT1011 and RT5682 in I2S mode");
+MODULE_AUTHOR("Naveen Manohar <naveen.m@intel.com>");
+MODULE_AUTHOR("Sathya Prakash M R <sathya.prakash.m.r@intel.com>");
+MODULE_AUTHOR("Shuming Fan <shumingf@realtek.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:cml_rt1011_rt5682");
diff --git a/sound/soc/intel/boards/glk_rt5682_max98357a.c b/sound/soc/intel/boards/glk_rt5682_max98357a.c
index bd2d371f2acd..b36264d1d1cd 100644
--- a/sound/soc/intel/boards/glk_rt5682_max98357a.c
+++ b/sound/soc/intel/boards/glk_rt5682_max98357a.c
@@ -19,6 +19,7 @@
#include <sound/soc-acpi.h>
#include "../../codecs/rt5682.h"
#include "../../codecs/hdac_hdmi.h"
+#include "hda_dsp_common.h"
/* The platform clock outputs 19.2Mhz clock to codec as I2S MCLK */
#define GLK_PLAT_CLK_FREQ 19200000
@@ -41,6 +42,7 @@ struct glk_hdmi_pcm {
struct glk_card_private {
struct snd_soc_jack geminilake_headset;
struct list_head hdmi_pcm_list;
+ bool common_hdmi_codec_drv;
};
enum {
@@ -545,6 +547,13 @@ static int glk_card_late_probe(struct snd_soc_card *card)
int err = 0;
int i = 0;
+ pcm = list_first_entry(&ctx->hdmi_pcm_list, struct glk_hdmi_pcm,
+ head);
+ component = pcm->codec_dai->component;
+
+ if (ctx->common_hdmi_codec_drv)
+ return hda_dsp_hdmi_build_controls(card, component);
+
list_for_each_entry(pcm, &ctx->hdmi_pcm_list, head) {
component = pcm->codec_dai->component;
snprintf(jack_name, sizeof(jack_name),
@@ -612,6 +621,8 @@ static int geminilake_audio_probe(struct platform_device *pdev)
if (ret)
return ret;
+ ctx->common_hdmi_codec_drv = mach->mach_params.common_hdmi_codec_drv;
+
return devm_snd_soc_register_card(&pdev->dev, card);
}
diff --git a/sound/soc/intel/boards/hda_dsp_common.c b/sound/soc/intel/boards/hda_dsp_common.c
new file mode 100644
index 000000000000..ed36b68d6705
--- /dev/null
+++ b/sound/soc/intel/boards/hda_dsp_common.c
@@ -0,0 +1,85 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright(c) 2019 Intel Corporation. All rights reserved.
+
+#include <sound/pcm.h>
+#include <sound/soc.h>
+#include <sound/hda_codec.h>
+#include <sound/hda_i915.h>
+#include "../../codecs/hdac_hda.h"
+
+#include "hda_dsp_common.h"
+
+/*
+ * Search card topology and return PCM device number
+ * matching Nth HDMI device (zero-based index).
+ */
+struct snd_pcm *hda_dsp_hdmi_pcm_handle(struct snd_soc_card *card,
+ int hdmi_idx)
+{
+ struct snd_soc_pcm_runtime *rtd;
+ struct snd_pcm *spcm;
+ int i = 0;
+
+ for_each_card_rtds(card, rtd) {
+ spcm = rtd->pcm ?
+ rtd->pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].pcm : NULL;
+ if (spcm && strstr(spcm->id, "HDMI")) {
+ if (i == hdmi_idx)
+ return rtd->pcm;
+ ++i;
+ }
+ }
+
+ return NULL;
+}
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA_AUDIO_CODEC)
+/*
+ * Search card topology and register HDMI PCM related controls
+ * to codec driver.
+ */
+int hda_dsp_hdmi_build_controls(struct snd_soc_card *card,
+ struct snd_soc_component *comp)
+{
+ struct hdac_hda_priv *hda_pvt;
+ struct hda_codec *hcodec;
+ struct snd_pcm *spcm;
+ struct hda_pcm *hpcm;
+ int err = 0, i = 0;
+
+ if (!comp)
+ return -EINVAL;
+
+ hda_pvt = snd_soc_component_get_drvdata(comp);
+ hcodec = &hda_pvt->codec;
+
+ list_for_each_entry(hpcm, &hcodec->pcm_list_head, list) {
+ spcm = hda_dsp_hdmi_pcm_handle(card, i);
+ if (spcm) {
+ hpcm->pcm = spcm;
+ hpcm->device = spcm->device;
+ dev_dbg(card->dev,
+ "%s: mapping HDMI converter %d to PCM %d (%p)\n",
+ __func__, i, hpcm->device, spcm);
+ } else {
+ hpcm->pcm = 0;
+ hpcm->device = SNDRV_PCM_INVALID_DEVICE;
+ dev_warn(card->dev,
+ "%s: no PCM in topology for HDMI converter %d\n\n",
+ __func__, i);
+ }
+ i++;
+ }
+ snd_hdac_display_power(hcodec->core.bus,
+ HDA_CODEC_IDX_CONTROLLER, true);
+ err = snd_hda_codec_build_controls(hcodec);
+ if (err < 0)
+ dev_err(card->dev, "unable to create controls %d\n", err);
+ snd_hdac_display_power(hcodec->core.bus,
+ HDA_CODEC_IDX_CONTROLLER, false);
+
+ return err;
+}
+
+#endif
diff --git a/sound/soc/intel/boards/hda_dsp_common.h b/sound/soc/intel/boards/hda_dsp_common.h
new file mode 100644
index 000000000000..431f7f09dccb
--- /dev/null
+++ b/sound/soc/intel/boards/hda_dsp_common.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright(c) 2019 Intel Corporation.
+ */
+
+/*
+ * This file defines helper functions used by multiple
+ * Intel HDA based machine drivers.
+ */
+
+#ifndef __HDA_DSP_COMMON_H
+#define __HDA_DSP_COMMON_H
+
+#include <sound/hda_codec.h>
+#include <sound/hda_i915.h>
+#include "../../codecs/hdac_hda.h"
+
+struct snd_pcm *hda_dsp_hdmi_pcm_handle(struct snd_soc_card *card,
+ int hdmi_idx);
+
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA_AUDIO_CODEC)
+int hda_dsp_hdmi_build_controls(struct snd_soc_card *card,
+ struct snd_soc_component *comp);
+#else
+static inline int hda_dsp_hdmi_build_controls(struct snd_soc_card *card,
+ struct snd_soc_component *comp)
+{
+ return -EINVAL;
+}
+#endif
+
+#endif /* __HDA_DSP_COMMON_H */
diff --git a/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c b/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c
index 74dda8784f1a..3e5f6bead229 100644
--- a/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c
+++ b/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c
@@ -22,6 +22,9 @@
#include "../../codecs/rt5514.h"
#include "../../codecs/rt5663.h"
#include "../../codecs/hdac_hdmi.h"
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
#define KBL_REALTEK_CODEC_DAI "rt5663-aif"
#define KBL_REALTEK_DMIC_CODEC_DAI "rt5514-aif1"
@@ -50,6 +53,8 @@ struct kbl_codec_private {
struct snd_soc_jack kabylake_headset;
struct list_head hdmi_pcm_list;
struct snd_soc_jack kabylake_hdmi[2];
+ struct clk *mclk;
+ struct clk *sclk;
};
enum {
@@ -71,6 +76,61 @@ static const struct snd_kcontrol_new kabylake_controls[] = {
SOC_DAPM_PIN_SWITCH("DMIC"),
};
+static int platform_clock_control(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *k, int event)
+{
+ struct snd_soc_dapm_context *dapm = w->dapm;
+ struct snd_soc_card *card = dapm->card;
+ struct kbl_codec_private *priv = snd_soc_card_get_drvdata(card);
+ int ret = 0;
+
+ /*
+ * MCLK/SCLK need to be ON early for a successful synchronization of
+ * codec internal clock. And the clocks are turned off during
+ * POST_PMD after the stream is stopped.
+ */
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ /* Enable MCLK */
+ ret = clk_set_rate(priv->mclk, 24000000);
+ if (ret < 0) {
+ dev_err(card->dev, "Can't set rate for mclk, err: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(priv->mclk);
+ if (ret < 0) {
+ dev_err(card->dev, "Can't enable mclk, err: %d\n", ret);
+ return ret;
+ }
+
+ /* Enable SCLK */
+ ret = clk_set_rate(priv->sclk, 3072000);
+ if (ret < 0) {
+ dev_err(card->dev, "Can't set rate for sclk, err: %d\n",
+ ret);
+ clk_disable_unprepare(priv->mclk);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(priv->sclk);
+ if (ret < 0) {
+ dev_err(card->dev, "Can't enable sclk, err: %d\n", ret);
+ clk_disable_unprepare(priv->mclk);
+ }
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ clk_disable_unprepare(priv->mclk);
+ clk_disable_unprepare(priv->sclk);
+ break;
+ default:
+ return 0;
+ }
+
+ return 0;
+}
+
static const struct snd_soc_dapm_widget kabylake_widgets[] = {
SND_SOC_DAPM_HP("Headphone Jack", NULL),
SND_SOC_DAPM_MIC("Headset Mic", NULL),
@@ -79,11 +139,15 @@ static const struct snd_soc_dapm_widget kabylake_widgets[] = {
SND_SOC_DAPM_MIC("DMIC", NULL),
SND_SOC_DAPM_SPK("HDMI1", NULL),
SND_SOC_DAPM_SPK("HDMI2", NULL),
+ SND_SOC_DAPM_SUPPLY("Platform Clock", SND_SOC_NOPM, 0, 0,
+ platform_clock_control, SND_SOC_DAPM_PRE_PMU |
+ SND_SOC_DAPM_POST_PMD),
};
static const struct snd_soc_dapm_route kabylake_map[] = {
/* Headphones */
+ { "Headphone Jack", NULL, "Platform Clock" },
{ "Headphone Jack", NULL, "HPOL" },
{ "Headphone Jack", NULL, "HPOR" },
@@ -92,6 +156,7 @@ static const struct snd_soc_dapm_route kabylake_map[] = {
{ "Right Spk", NULL, "Right BE_OUT" },
/* other jacks */
+ { "Headset Mic", NULL, "Platform Clock" },
{ "IN1P", NULL, "Headset Mic" },
{ "IN1N", NULL, "Headset Mic" },
@@ -400,6 +465,9 @@ static int kabylake_dmic_startup(struct snd_pcm_substream *substream)
snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
dmic_constraints);
+ runtime->hw.formats = SNDRV_PCM_FMTBIT_S16_LE;
+ snd_pcm_hw_constraint_msbits(runtime, 0, 16, 16);
+
return snd_pcm_hw_constraint_list(substream->runtime, 0,
SNDRV_PCM_HW_PARAM_RATE, &constraints_rates);
}
@@ -588,6 +656,55 @@ static struct snd_soc_dai_link kabylake_dais[] = {
},
};
+static int kabylake_set_bias_level(struct snd_soc_card *card,
+ struct snd_soc_dapm_context *dapm, enum snd_soc_bias_level level)
+{
+ struct snd_soc_component *component = dapm->component;
+ struct kbl_codec_private *priv = snd_soc_card_get_drvdata(card);
+ int ret = 0;
+
+ if (!component || strcmp(component->name, RT5514_DEV_NAME))
+ return 0;
+
+ if (IS_ERR(priv->mclk))
+ return 0;
+
+ /*
+ * It's required to control mclk directly in the set_bias_level
+ * function for rt5514 codec or the recording function could
+ * break.
+ */
+ switch (level) {
+ case SND_SOC_BIAS_PREPARE:
+ if (dapm->bias_level == SND_SOC_BIAS_ON) {
+ dev_dbg(card->dev, "Disable mclk");
+ clk_disable_unprepare(priv->mclk);
+ } else {
+ dev_dbg(card->dev, "Enable mclk");
+ ret = clk_set_rate(priv->mclk, 24000000);
+ if (ret) {
+ dev_err(card->dev, "Can't set rate for mclk, err: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(priv->mclk);
+ if (ret) {
+ dev_err(card->dev, "Can't enable mclk, err: %d\n",
+ ret);
+
+ /* mclk is already enabled in FW */
+ ret = 0;
+ }
+ }
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
static int kabylake_card_late_probe(struct snd_soc_card *card)
{
struct kbl_codec_private *ctx = snd_soc_card_get_drvdata(card);
@@ -623,10 +740,11 @@ static int kabylake_card_late_probe(struct snd_soc_card *card)
* kabylake audio machine driver for MAX98927 + RT5514 + RT5663
*/
static struct snd_soc_card kabylake_audio_card = {
- .name = "kbl_r5514_5663_max",
+ .name = "kbl-r5514-5663-max",
.owner = THIS_MODULE,
.dai_link = kabylake_dais,
.num_links = ARRAY_SIZE(kabylake_dais),
+ .set_bias_level = kabylake_set_bias_level,
.controls = kabylake_controls,
.num_controls = ARRAY_SIZE(kabylake_controls),
.dapm_widgets = kabylake_widgets,
@@ -643,6 +761,7 @@ static int kabylake_audio_probe(struct platform_device *pdev)
{
struct kbl_codec_private *ctx;
struct snd_soc_acpi_mach *mach;
+ int ret = 0;
ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
@@ -658,6 +777,34 @@ static int kabylake_audio_probe(struct platform_device *pdev)
dmic_constraints = mach->mach_params.dmic_num == 2 ?
&constraints_dmic_2ch : &constraints_dmic_channels;
+ ctx->mclk = devm_clk_get(&pdev->dev, "ssp1_mclk");
+ if (IS_ERR(ctx->mclk)) {
+ ret = PTR_ERR(ctx->mclk);
+ if (ret == -ENOENT) {
+ dev_info(&pdev->dev,
+ "Failed to get ssp1_mclk, defer probe\n");
+ return -EPROBE_DEFER;
+ }
+
+ dev_err(&pdev->dev, "Failed to get ssp1_mclk with err:%d\n",
+ ret);
+ return ret;
+ }
+
+ ctx->sclk = devm_clk_get(&pdev->dev, "ssp1_sclk");
+ if (IS_ERR(ctx->sclk)) {
+ ret = PTR_ERR(ctx->sclk);
+ if (ret == -ENOENT) {
+ dev_info(&pdev->dev,
+ "Failed to get ssp1_sclk, defer probe\n");
+ return -EPROBE_DEFER;
+ }
+
+ dev_err(&pdev->dev, "Failed to get ssp1_sclk with err:%d\n",
+ ret);
+ return ret;
+ }
+
return devm_snd_soc_register_card(&pdev->dev, &kabylake_audio_card);
}
diff --git a/sound/soc/intel/boards/skl_hda_dsp_common.c b/sound/soc/intel/boards/skl_hda_dsp_common.c
index 58409b6e476e..eb419e1ec42b 100644
--- a/sound/soc/intel/boards/skl_hda_dsp_common.c
+++ b/sound/soc/intel/boards/skl_hda_dsp_common.c
@@ -14,6 +14,9 @@
#include "../../codecs/hdac_hdmi.h"
#include "skl_hda_dsp_common.h"
+#include <sound/hda_codec.h>
+#include "../../codecs/hdac_hda.h"
+
#define NAME_SIZE 32
int skl_hda_hdmi_add_pcm(struct snd_soc_card *card, int device)
@@ -136,6 +139,9 @@ int skl_hda_hdmi_jack_init(struct snd_soc_card *card)
char jack_name[NAME_SIZE];
int err;
+ if (ctx->common_hdmi_codec_drv)
+ return skl_hda_hdmi_build_controls(card);
+
list_for_each_entry(pcm, &ctx->hdmi_pcm_list, head) {
component = pcm->codec_dai->component;
snprintf(jack_name, sizeof(jack_name),
diff --git a/sound/soc/intel/boards/skl_hda_dsp_common.h b/sound/soc/intel/boards/skl_hda_dsp_common.h
index daa582e513b2..d6150670ca05 100644
--- a/sound/soc/intel/boards/skl_hda_dsp_common.h
+++ b/sound/soc/intel/boards/skl_hda_dsp_common.h
@@ -8,12 +8,15 @@
* platforms with HDA Codecs.
*/
-#ifndef __SOUND_SOC_HDA_DSP_COMMON_H
-#define __SOUND_SOC_HDA_DSP_COMMON_H
+#ifndef __SKL_HDA_DSP_COMMON_H
+#define __SKL_HDA_DSP_COMMON_H
#include <linux/module.h>
#include <linux/platform_device.h>
#include <sound/core.h>
#include <sound/jack.h>
+#include <sound/hda_codec.h>
+#include "../../codecs/hdac_hda.h"
+#include "hda_dsp_common.h"
#define HDA_DSP_MAX_BE_DAI_LINKS 7
@@ -29,10 +32,30 @@ struct skl_hda_private {
int pcm_count;
int dai_index;
const char *platform_name;
+ bool common_hdmi_codec_drv;
};
extern struct snd_soc_dai_link skl_hda_be_dai_links[HDA_DSP_MAX_BE_DAI_LINKS];
int skl_hda_hdmi_jack_init(struct snd_soc_card *card);
int skl_hda_hdmi_add_pcm(struct snd_soc_card *card, int device);
+/*
+ * Search card topology and register HDMI PCM related controls
+ * to codec driver.
+ */
+static inline int skl_hda_hdmi_build_controls(struct snd_soc_card *card)
+{
+ struct skl_hda_private *ctx = snd_soc_card_get_drvdata(card);
+ struct snd_soc_component *component;
+ struct skl_hda_hdmi_pcm *pcm;
+
+ pcm = list_first_entry(&ctx->hdmi_pcm_list, struct skl_hda_hdmi_pcm,
+ head);
+ component = pcm->codec_dai->component;
+ if (!component)
+ return -EINVAL;
+
+ return hda_dsp_hdmi_build_controls(card, component);
+}
+
#endif /* __SOUND_SOC_HDA_DSP_COMMON_H */
diff --git a/sound/soc/intel/boards/skl_hda_dsp_generic.c b/sound/soc/intel/boards/skl_hda_dsp_generic.c
index 1778acdc367c..4e45901e3a2f 100644
--- a/sound/soc/intel/boards/skl_hda_dsp_generic.c
+++ b/sound/soc/intel/boards/skl_hda_dsp_generic.c
@@ -90,7 +90,7 @@ skl_hda_add_dai_link(struct snd_soc_card *card, struct snd_soc_dai_link *link)
}
static struct snd_soc_card hda_soc_card = {
- .name = "skl_hda_card",
+ .name = "hda-dsp",
.owner = THIS_MODULE,
.dai_link = skl_hda_be_dai_links,
.dapm_widgets = skl_hda_widgets,
@@ -178,6 +178,7 @@ static int skl_hda_audio_probe(struct platform_device *pdev)
ctx->pcm_count = hda_soc_card.num_links;
ctx->dai_index = 1; /* hdmi codec dai name starts from index 1 */
ctx->platform_name = mach->mach_params.platform;
+ ctx->common_hdmi_codec_drv = mach->mach_params.common_hdmi_codec_drv;
hda_soc_card.dev = &pdev->dev;
snd_soc_card_set_drvdata(&hda_soc_card, ctx);
diff --git a/sound/soc/intel/boards/sof_rt5682.c b/sound/soc/intel/boards/sof_rt5682.c
index 4f6e58c3954a..751b8ea6ae1f 100644
--- a/sound/soc/intel/boards/sof_rt5682.c
+++ b/sound/soc/intel/boards/sof_rt5682.c
@@ -21,6 +21,7 @@
#include "../../codecs/rt5682.h"
#include "../../codecs/hdac_hdmi.h"
#include "../common/soc-intel-quirks.h"
+#include "hda_dsp_common.h"
#define NAME_SIZE 32
@@ -53,6 +54,7 @@ struct sof_card_private {
struct clk *mclk;
struct snd_soc_jack sof_headset;
struct list_head hdmi_pcm_list;
+ bool common_hdmi_codec_drv;
};
static int sof_rt5682_quirk_cb(const struct dmi_system_id *id)
@@ -274,6 +276,13 @@ static int sof_card_late_probe(struct snd_soc_card *card)
if (is_legacy_cpu)
return 0;
+ pcm = list_first_entry(&ctx->hdmi_pcm_list, struct sof_hdmi_pcm,
+ head);
+ component = pcm->codec_dai->component;
+
+ if (ctx->common_hdmi_codec_drv)
+ return hda_dsp_hdmi_build_controls(card, component);
+
list_for_each_entry(pcm, &ctx->hdmi_pcm_list, head) {
component = pcm->codec_dai->component;
snprintf(jack_name, sizeof(jack_name),
@@ -370,7 +379,7 @@ static int dmic_init(struct snd_soc_pcm_runtime *rtd)
/* sof audio machine driver for rt5682 codec */
static struct snd_soc_card sof_audio_card_rt5682 = {
- .name = "sof_rt5682",
+ .name = "rt5682", /* the sof- prefix is added by the core */
.owner = THIS_MODULE,
.controls = sof_controls,
.num_controls = ARRAY_SIZE(sof_controls),
@@ -651,6 +660,8 @@ static int sof_audio_probe(struct platform_device *pdev)
if (ret)
return ret;
+ ctx->common_hdmi_codec_drv = mach->mach_params.common_hdmi_codec_drv;
+
snd_soc_card_set_drvdata(&sof_audio_card_rt5682, ctx);
return devm_snd_soc_register_card(&pdev->dev,
diff --git a/sound/soc/intel/common/Makefile b/sound/soc/intel/common/Makefile
index 18d9630ae9a2..bd352878f89a 100644
--- a/sound/soc/intel/common/Makefile
+++ b/sound/soc/intel/common/Makefile
@@ -7,8 +7,10 @@ snd-soc-acpi-intel-match-objs := soc-acpi-intel-byt-match.o soc-acpi-intel-cht-m
soc-acpi-intel-hsw-bdw-match.o \
soc-acpi-intel-skl-match.o soc-acpi-intel-kbl-match.o \
soc-acpi-intel-bxt-match.o soc-acpi-intel-glk-match.o \
- soc-acpi-intel-cnl-match.o soc-acpi-intel-icl-match.o \
+ soc-acpi-intel-cnl-match.o soc-acpi-intel-cfl-match.o \
+ soc-acpi-intel-cml-match.o soc-acpi-intel-icl-match.o \
soc-acpi-intel-tgl-match.o soc-acpi-intel-ehl-match.o \
+ soc-acpi-intel-jsl-match.o \
soc-acpi-intel-hda-match.o
obj-$(CONFIG_SND_SOC_INTEL_SST) += snd-soc-sst-dsp.o snd-soc-sst-ipc.o
diff --git a/sound/soc/intel/common/soc-acpi-intel-cfl-match.c b/sound/soc/intel/common/soc-acpi-intel-cfl-match.c
new file mode 100644
index 000000000000..d6fd2026d0b8
--- /dev/null
+++ b/sound/soc/intel/common/soc-acpi-intel-cfl-match.c
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * soc-apci-intel-cfl-match.c - tables and support for CFL ACPI enumeration.
+ *
+ * Copyright (c) 2019, Intel Corporation.
+ *
+ */
+
+#include <sound/soc-acpi.h>
+#include <sound/soc-acpi-intel-match.h>
+
+struct snd_soc_acpi_mach snd_soc_acpi_intel_cfl_machines[] = {
+ {},
+};
+EXPORT_SYMBOL_GPL(snd_soc_acpi_intel_cfl_machines);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Intel Common ACPI Match module");
diff --git a/sound/soc/intel/common/soc-acpi-intel-cml-match.c b/sound/soc/intel/common/soc-acpi-intel-cml-match.c
new file mode 100644
index 000000000000..5d08ae066738
--- /dev/null
+++ b/sound/soc/intel/common/soc-acpi-intel-cml-match.c
@@ -0,0 +1,56 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * soc-acpi-intel-cml-match.c - tables and support for CML ACPI enumeration.
+ *
+ * Copyright (c) 2019, Intel Corporation.
+ *
+ */
+
+#include <sound/soc-acpi.h>
+#include <sound/soc-acpi-intel-match.h>
+
+static struct snd_soc_acpi_codecs cml_codecs = {
+ .num_codecs = 1,
+ .codecs = {"10EC5682"}
+};
+
+static struct snd_soc_acpi_codecs cml_spk_codecs = {
+ .num_codecs = 1,
+ .codecs = {"MX98357A"}
+};
+
+struct snd_soc_acpi_mach snd_soc_acpi_intel_cml_machines[] = {
+ {
+ .id = "DLGS7219",
+ .drv_name = "cml_da7219_max98357a",
+ .quirk_data = &cml_spk_codecs,
+ .sof_fw_filename = "sof-cml.ri",
+ .sof_tplg_filename = "sof-cml-da7219-max98357a.tplg",
+ },
+ {
+ .id = "MX98357A",
+ .drv_name = "sof_rt5682",
+ .quirk_data = &cml_codecs,
+ .sof_fw_filename = "sof-cml.ri",
+ .sof_tplg_filename = "sof-cml-rt5682-max98357a.tplg",
+ },
+ {
+ .id = "10EC1011",
+ .drv_name = "cml_rt1011_rt5682",
+ .quirk_data = &cml_codecs,
+ .sof_fw_filename = "sof-cml.ri",
+ .sof_tplg_filename = "sof-cml-rt1011-rt5682.tplg",
+ },
+ {
+ .id = "10EC5682",
+ .drv_name = "sof_rt5682",
+ .sof_fw_filename = "sof-cml.ri",
+ .sof_tplg_filename = "sof-cml-rt5682.tplg",
+ },
+
+ {},
+};
+EXPORT_SYMBOL_GPL(snd_soc_acpi_intel_cml_machines);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Intel Common ACPI Match module");
diff --git a/sound/soc/intel/common/soc-acpi-intel-cnl-match.c b/sound/soc/intel/common/soc-acpi-intel-cnl-match.c
index 985aa366c9e8..27588841c8b0 100644
--- a/sound/soc/intel/common/soc-acpi-intel-cnl-match.c
+++ b/sound/soc/intel/common/soc-acpi-intel-cnl-match.c
@@ -14,16 +14,6 @@ static struct skl_machine_pdata cnl_pdata = {
.use_tplg_pcm = true,
};
-static struct snd_soc_acpi_codecs cml_codecs = {
- .num_codecs = 1,
- .codecs = {"10EC5682"}
-};
-
-static struct snd_soc_acpi_codecs cml_spk_codecs = {
- .num_codecs = 1,
- .codecs = {"MX98357A"}
-};
-
struct snd_soc_acpi_mach snd_soc_acpi_intel_cnl_machines[] = {
{
.id = "INT34C2",
@@ -33,27 +23,6 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_cnl_machines[] = {
.sof_fw_filename = "sof-cnl.ri",
.sof_tplg_filename = "sof-cnl-rt274.tplg",
},
- {
- .id = "DLGS7219",
- .drv_name = "cml_da7219_max98357a",
- .quirk_data = &cml_spk_codecs,
- .sof_fw_filename = "sof-cnl.ri",
- .sof_tplg_filename = "sof-cml-da7219-max98357a.tplg",
- },
- {
- .id = "MX98357A",
- .drv_name = "sof_rt5682",
- .quirk_data = &cml_codecs,
- .sof_fw_filename = "sof-cnl.ri",
- .sof_tplg_filename = "sof-cml-rt5682-max98357a.tplg",
- },
- {
- .id = "10EC5682",
- .drv_name = "sof_rt5682",
- .sof_fw_filename = "sof-cnl.ri",
- .sof_tplg_filename = "sof-cml-rt5682.tplg",
- },
-
{},
};
EXPORT_SYMBOL_GPL(snd_soc_acpi_intel_cnl_machines);
diff --git a/sound/soc/intel/common/soc-acpi-intel-jsl-match.c b/sound/soc/intel/common/soc-acpi-intel-jsl-match.c
new file mode 100644
index 000000000000..1c68a04f0c6e
--- /dev/null
+++ b/sound/soc/intel/common/soc-acpi-intel-jsl-match.c
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * soc-apci-intel-jsl-match.c - tables and support for JSL ACPI enumeration.
+ *
+ * Copyright (c) 2019, Intel Corporation.
+ *
+ */
+
+#include <sound/soc-acpi.h>
+#include <sound/soc-acpi-intel-match.h>
+
+struct snd_soc_acpi_mach snd_soc_acpi_intel_jsl_machines[] = {
+ {},
+};
+EXPORT_SYMBOL_GPL(snd_soc_acpi_intel_jsl_machines);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Intel Common ACPI Match module");
diff --git a/sound/soc/intel/haswell/sst-haswell-pcm.c b/sound/soc/intel/haswell/sst-haswell-pcm.c
index 7f4f6b755760..a3a5bba2fbd9 100644
--- a/sound/soc/intel/haswell/sst-haswell-pcm.c
+++ b/sound/soc/intel/haswell/sst-haswell-pcm.c
@@ -458,12 +458,12 @@ static int create_adsp_page_table(struct snd_pcm_substream *substream,
}
/* this may get called several times by oss emulation */
-static int hsw_pcm_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *params)
+static int hsw_pcm_hw_params(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_pcm_runtime *runtime = substream->runtime;
- struct snd_soc_component *component = snd_soc_rtdcom_lookup(rtd, DRV_NAME);
struct hsw_priv_data *pdata = snd_soc_component_get_drvdata(component);
struct hsw_pcm_data *pcm_data;
struct sst_hsw *hsw = pdata->hsw;
@@ -656,16 +656,17 @@ static int hsw_pcm_hw_params(struct snd_pcm_substream *substream,
return 0;
}
-static int hsw_pcm_hw_free(struct snd_pcm_substream *substream)
+static int hsw_pcm_hw_free(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
snd_pcm_lib_free_pages(substream);
return 0;
}
-static int hsw_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
+static int hsw_pcm_trigger(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream, int cmd)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_component *component = snd_soc_rtdcom_lookup(rtd, DRV_NAME);
struct hsw_priv_data *pdata = snd_soc_component_get_drvdata(component);
struct hsw_pcm_data *pcm_data;
struct sst_hsw_stream *sst_stream;
@@ -770,11 +771,11 @@ static u32 hsw_notify_pointer(struct sst_hsw_stream *stream, void *data)
return pos;
}
-static snd_pcm_uframes_t hsw_pcm_pointer(struct snd_pcm_substream *substream)
+static snd_pcm_uframes_t hsw_pcm_pointer(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_pcm_runtime *runtime = substream->runtime;
- struct snd_soc_component *component = snd_soc_rtdcom_lookup(rtd, DRV_NAME);
struct hsw_priv_data *pdata = snd_soc_component_get_drvdata(component);
struct hsw_pcm_data *pcm_data;
struct sst_hsw *hsw = pdata->hsw;
@@ -795,10 +796,10 @@ static snd_pcm_uframes_t hsw_pcm_pointer(struct snd_pcm_substream *substream)
return offset;
}
-static int hsw_pcm_open(struct snd_pcm_substream *substream)
+static int hsw_pcm_open(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_component *component = snd_soc_rtdcom_lookup(rtd, DRV_NAME);
struct hsw_priv_data *pdata = snd_soc_component_get_drvdata(component);
struct hsw_pcm_data *pcm_data;
struct sst_hsw *hsw = pdata->hsw;
@@ -828,10 +829,10 @@ static int hsw_pcm_open(struct snd_pcm_substream *substream)
return 0;
}
-static int hsw_pcm_close(struct snd_pcm_substream *substream)
+static int hsw_pcm_close(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_component *component = snd_soc_rtdcom_lookup(rtd, DRV_NAME);
struct hsw_priv_data *pdata = snd_soc_component_get_drvdata(component);
struct hsw_pcm_data *pcm_data;
struct sst_hsw *hsw = pdata->hsw;
@@ -862,17 +863,6 @@ out:
return ret;
}
-static const struct snd_pcm_ops hsw_pcm_ops = {
- .open = hsw_pcm_open,
- .close = hsw_pcm_close,
- .ioctl = snd_pcm_lib_ioctl,
- .hw_params = hsw_pcm_hw_params,
- .hw_free = hsw_pcm_hw_free,
- .trigger = hsw_pcm_trigger,
- .pointer = hsw_pcm_pointer,
- .page = snd_pcm_sgbuf_ops_page,
-};
-
static int hsw_pcm_create_modules(struct hsw_priv_data *pdata)
{
struct sst_hsw *hsw = pdata->hsw;
@@ -930,10 +920,10 @@ static void hsw_pcm_free_modules(struct hsw_priv_data *pdata)
}
}
-static int hsw_pcm_new(struct snd_soc_pcm_runtime *rtd)
+static int hsw_pcm_new(struct snd_soc_component *component,
+ struct snd_soc_pcm_runtime *rtd)
{
struct snd_pcm *pcm = rtd->pcm;
- struct snd_soc_component *component = snd_soc_rtdcom_lookup(rtd, DRV_NAME);
struct sst_pdata *pdata = dev_get_platdata(component->dev);
struct hsw_priv_data *priv_data = dev_get_drvdata(component->dev);
struct device *dev = pdata->dma_dev;
@@ -1121,8 +1111,14 @@ static const struct snd_soc_component_driver hsw_dai_component = {
.name = DRV_NAME,
.probe = hsw_pcm_probe,
.remove = hsw_pcm_remove,
- .ops = &hsw_pcm_ops,
- .pcm_new = hsw_pcm_new,
+ .open = hsw_pcm_open,
+ .close = hsw_pcm_close,
+ .hw_params = hsw_pcm_hw_params,
+ .hw_free = hsw_pcm_hw_free,
+ .trigger = hsw_pcm_trigger,
+ .pointer = hsw_pcm_pointer,
+ .ioctl = snd_soc_pcm_lib_ioctl,
+ .pcm_construct = hsw_pcm_new,
.controls = hsw_volume_controls,
.num_controls = ARRAY_SIZE(hsw_volume_controls),
.dapm_widgets = widgets,
diff --git a/sound/soc/intel/skylake/skl-pcm.c b/sound/soc/intel/skylake/skl-pcm.c
index 7f287424af9b..8b9abb79a69e 100644
--- a/sound/soc/intel/skylake/skl-pcm.c
+++ b/sound/soc/intel/skylake/skl-pcm.c
@@ -1081,7 +1081,8 @@ int skl_dai_load(struct snd_soc_component *cmp, int index,
return 0;
}
-static int skl_platform_open(struct snd_pcm_substream *substream)
+static int skl_platform_soc_open(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_dai_link *dai_link = rtd->dai_link;
@@ -1167,8 +1168,9 @@ static int skl_coupled_trigger(struct snd_pcm_substream *substream,
return 0;
}
-static int skl_platform_pcm_trigger(struct snd_pcm_substream *substream,
- int cmd)
+static int skl_platform_soc_trigger(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
+ int cmd)
{
struct hdac_bus *bus = get_bus_ctx(substream);
@@ -1178,8 +1180,9 @@ static int skl_platform_pcm_trigger(struct snd_pcm_substream *substream,
return 0;
}
-static snd_pcm_uframes_t skl_platform_pcm_pointer
- (struct snd_pcm_substream *substream)
+static snd_pcm_uframes_t skl_platform_soc_pointer(
+ struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct hdac_ext_stream *hstream = get_hdac_ext_stream(substream);
struct hdac_bus *bus = get_bus_ctx(substream);
@@ -1225,6 +1228,13 @@ static snd_pcm_uframes_t skl_platform_pcm_pointer
return bytes_to_frames(substream->runtime, pos);
}
+static int skl_platform_soc_mmap(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
+ struct vm_area_struct *area)
+{
+ return snd_pcm_lib_default_mmap(substream, area);
+}
+
static u64 skl_adjust_codec_delay(struct snd_pcm_substream *substream,
u64 nsec)
{
@@ -1245,7 +1255,9 @@ static u64 skl_adjust_codec_delay(struct snd_pcm_substream *substream,
return (nsec > codec_nsecs) ? nsec - codec_nsecs : 0;
}
-static int skl_get_time_info(struct snd_pcm_substream *substream,
+static int skl_platform_soc_get_time_info(
+ struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
struct timespec *system_ts, struct timespec *audio_ts,
struct snd_pcm_audio_tstamp_config *audio_tstamp_config,
struct snd_pcm_audio_tstamp_report *audio_tstamp_report)
@@ -1277,24 +1289,16 @@ static int skl_get_time_info(struct snd_pcm_substream *substream,
return 0;
}
-static const struct snd_pcm_ops skl_platform_ops = {
- .open = skl_platform_open,
- .ioctl = snd_pcm_lib_ioctl,
- .trigger = skl_platform_pcm_trigger,
- .pointer = skl_platform_pcm_pointer,
- .get_time_info = skl_get_time_info,
- .mmap = snd_pcm_lib_default_mmap,
- .page = snd_pcm_sgbuf_ops_page,
-};
-
-static void skl_pcm_free(struct snd_pcm *pcm)
+static void skl_platform_soc_free(struct snd_soc_component *component,
+ struct snd_pcm *pcm)
{
snd_pcm_lib_preallocate_free_for_all(pcm);
}
#define MAX_PREALLOC_SIZE (32 * 1024 * 1024)
-static int skl_pcm_new(struct snd_soc_pcm_runtime *rtd)
+static int skl_platform_soc_new(struct snd_soc_component *component,
+ struct snd_soc_pcm_runtime *rtd)
{
struct snd_soc_dai *dai = rtd->cpu_dai;
struct hdac_bus *bus = dev_get_drvdata(dai->dev);
@@ -1310,7 +1314,7 @@ static int skl_pcm_new(struct snd_soc_pcm_runtime *rtd)
size = MAX_PREALLOC_SIZE;
snd_pcm_lib_preallocate_pages_for_all(pcm,
SNDRV_DMA_TYPE_DEV_SG,
- snd_dma_pci_data(skl->pci),
+ &skl->pci->dev,
size, MAX_PREALLOC_SIZE);
}
@@ -1458,7 +1462,7 @@ static int skl_platform_soc_probe(struct snd_soc_component *component)
return 0;
}
-static void skl_pcm_remove(struct snd_soc_component *component)
+static void skl_platform_soc_remove(struct snd_soc_component *component)
{
struct hdac_bus *bus = dev_get_drvdata(component->dev);
struct skl_dev *skl = bus_to_skl(bus);
@@ -1471,10 +1475,15 @@ static void skl_pcm_remove(struct snd_soc_component *component)
static const struct snd_soc_component_driver skl_component = {
.name = "pcm",
.probe = skl_platform_soc_probe,
- .remove = skl_pcm_remove,
- .ops = &skl_platform_ops,
- .pcm_new = skl_pcm_new,
- .pcm_free = skl_pcm_free,
+ .remove = skl_platform_soc_remove,
+ .open = skl_platform_soc_open,
+ .ioctl = snd_soc_pcm_lib_ioctl,
+ .trigger = skl_platform_soc_trigger,
+ .pointer = skl_platform_soc_pointer,
+ .get_time_info = skl_platform_soc_get_time_info,
+ .mmap = skl_platform_soc_mmap,
+ .pcm_construct = skl_platform_soc_new,
+ .pcm_destruct = skl_platform_soc_free,
.module_get_upon_open = 1, /* increment refcount when a pcm is opened */
};
diff --git a/sound/soc/intel/skylake/skl.c b/sound/soc/intel/skylake/skl.c
index 141dbbf975ac..58ba3e9469ba 100644
--- a/sound/soc/intel/skylake/skl.c
+++ b/sound/soc/intel/skylake/skl.c
@@ -27,6 +27,7 @@
#include <sound/hda_i915.h>
#include <sound/hda_codec.h>
#include <sound/intel-nhlt.h>
+#include <sound/intel-dsp-config.h>
#include "skl.h"
#include "skl-sst-dsp.h"
#include "skl-sst-ipc.h"
@@ -987,22 +988,10 @@ static int skl_probe(struct pci_dev *pci,
switch (skl_pci_binding) {
case SND_SKL_PCI_BIND_AUTO:
- /*
- * detect DSP by checking class/subclass/prog-id information
- * class=04 subclass 03 prog-if 00: no DSP, use legacy driver
- * class=04 subclass 01 prog-if 00: DSP is present
- * (and may be required e.g. for DMIC or SSP support)
- * class=04 subclass 03 prog-if 80: use DSP or legacy mode
- */
- if (pci->class == 0x040300) {
- dev_info(&pci->dev, "The DSP is not enabled on this platform, aborting probe\n");
+ err = snd_intel_dsp_driver_probe(pci);
+ if (err != SND_INTEL_DSP_DRIVER_ANY &&
+ err != SND_INTEL_DSP_DRIVER_SST)
return -ENODEV;
- }
- if (pci->class != 0x040100 && pci->class != 0x040380) {
- dev_err(&pci->dev, "Unknown PCI class/subclass/prog-if information (0x%06x) found, aborting probe\n", pci->class);
- return -ENODEV;
- }
- dev_info(&pci->dev, "DSP detected with PCI class/subclass/prog-if info 0x%06x\n", pci->class);
break;
case SND_SKL_PCI_BIND_LEGACY:
dev_info(&pci->dev, "Module parameter forced binding with HDaudio legacy, aborting probe\n");
diff --git a/sound/soc/jz4740/jz4740-i2s.c b/sound/soc/jz4740/jz4740-i2s.c
index 13408de34055..38d48d101783 100644
--- a/sound/soc/jz4740/jz4740-i2s.c
+++ b/sound/soc/jz4740/jz4740-i2s.c
@@ -497,15 +497,13 @@ static int jz4740_i2s_dev_probe(struct platform_device *pdev)
struct jz4740_i2s *i2s;
struct resource *mem;
int ret;
- const struct of_device_id *match;
i2s = devm_kzalloc(&pdev->dev, sizeof(*i2s), GFP_KERNEL);
if (!i2s)
return -ENOMEM;
- match = of_match_device(jz4740_of_matches, &pdev->dev);
- if (match)
- i2s->version = (enum jz47xx_i2s_version)match->data;
+ i2s->version =
+ (enum jz47xx_i2s_version)of_device_get_match_data(&pdev->dev);
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
i2s->base = devm_ioremap_resource(&pdev->dev, mem);
diff --git a/sound/soc/kirkwood/kirkwood-dma.c b/sound/soc/kirkwood/kirkwood-dma.c
index 6f69f314f2c2..e28fb3449f1d 100644
--- a/sound/soc/kirkwood/kirkwood-dma.c
+++ b/sound/soc/kirkwood/kirkwood-dma.c
@@ -98,7 +98,8 @@ kirkwood_dma_conf_mbus_windows(void __iomem *base, int win,
}
}
-static int kirkwood_dma_open(struct snd_pcm_substream *substream)
+static int kirkwood_dma_open(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
int err;
struct snd_pcm_runtime *runtime = substream->runtime;
@@ -132,7 +133,7 @@ static int kirkwood_dma_open(struct snd_pcm_substream *substream)
err = request_irq(priv->irq, kirkwood_dma_irq, IRQF_SHARED,
"kirkwood-i2s", priv);
if (err)
- return -EBUSY;
+ return err;
/*
* Enable Error interrupts. We're only ack'ing them but
@@ -160,7 +161,8 @@ static int kirkwood_dma_open(struct snd_pcm_substream *substream)
return 0;
}
-static int kirkwood_dma_close(struct snd_pcm_substream *substream)
+static int kirkwood_dma_close(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct kirkwood_dma_data *priv = kirkwood_priv(substream);
@@ -180,8 +182,9 @@ static int kirkwood_dma_close(struct snd_pcm_substream *substream)
return 0;
}
-static int kirkwood_dma_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *params)
+static int kirkwood_dma_hw_params(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
{
struct snd_pcm_runtime *runtime = substream->runtime;
@@ -191,13 +194,15 @@ static int kirkwood_dma_hw_params(struct snd_pcm_substream *substream,
return 0;
}
-static int kirkwood_dma_hw_free(struct snd_pcm_substream *substream)
+static int kirkwood_dma_hw_free(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
snd_pcm_set_runtime_buffer(substream, NULL);
return 0;
}
-static int kirkwood_dma_prepare(struct snd_pcm_substream *substream)
+static int kirkwood_dma_prepare(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct kirkwood_dma_data *priv = kirkwood_priv(substream);
@@ -222,8 +227,9 @@ static int kirkwood_dma_prepare(struct snd_pcm_substream *substream)
return 0;
}
-static snd_pcm_uframes_t kirkwood_dma_pointer(struct snd_pcm_substream
- *substream)
+static snd_pcm_uframes_t kirkwood_dma_pointer(
+ struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct kirkwood_dma_data *priv = kirkwood_priv(substream);
snd_pcm_uframes_t count;
@@ -238,16 +244,6 @@ static snd_pcm_uframes_t kirkwood_dma_pointer(struct snd_pcm_substream
return count;
}
-static const struct snd_pcm_ops kirkwood_dma_ops = {
- .open = kirkwood_dma_open,
- .close = kirkwood_dma_close,
- .ioctl = snd_pcm_lib_ioctl,
- .hw_params = kirkwood_dma_hw_params,
- .hw_free = kirkwood_dma_hw_free,
- .prepare = kirkwood_dma_prepare,
- .pointer = kirkwood_dma_pointer,
-};
-
static int kirkwood_dma_preallocate_dma_buffer(struct snd_pcm *pcm,
int stream)
{
@@ -267,7 +263,8 @@ static int kirkwood_dma_preallocate_dma_buffer(struct snd_pcm *pcm,
return 0;
}
-static int kirkwood_dma_new(struct snd_soc_pcm_runtime *rtd)
+static int kirkwood_dma_new(struct snd_soc_component *component,
+ struct snd_soc_pcm_runtime *rtd)
{
struct snd_card *card = rtd->card->snd_card;
struct snd_pcm *pcm = rtd->pcm;
@@ -294,7 +291,8 @@ static int kirkwood_dma_new(struct snd_soc_pcm_runtime *rtd)
return 0;
}
-static void kirkwood_dma_free_dma_buffers(struct snd_pcm *pcm)
+static void kirkwood_dma_free_dma_buffers(struct snd_soc_component *component,
+ struct snd_pcm *pcm)
{
struct snd_pcm_substream *substream;
struct snd_dma_buffer *buf;
@@ -316,7 +314,13 @@ static void kirkwood_dma_free_dma_buffers(struct snd_pcm *pcm)
const struct snd_soc_component_driver kirkwood_soc_component = {
.name = DRV_NAME,
- .ops = &kirkwood_dma_ops,
- .pcm_new = kirkwood_dma_new,
- .pcm_free = kirkwood_dma_free_dma_buffers,
+ .open = kirkwood_dma_open,
+ .close = kirkwood_dma_close,
+ .ioctl = snd_soc_pcm_lib_ioctl,
+ .hw_params = kirkwood_dma_hw_params,
+ .hw_free = kirkwood_dma_hw_free,
+ .prepare = kirkwood_dma_prepare,
+ .pointer = kirkwood_dma_pointer,
+ .pcm_construct = kirkwood_dma_new,
+ .pcm_destruct = kirkwood_dma_free_dma_buffers,
};
diff --git a/sound/soc/mediatek/Kconfig b/sound/soc/mediatek/Kconfig
index 111e44b64b38..a656d2014127 100644
--- a/sound/soc/mediatek/Kconfig
+++ b/sound/soc/mediatek/Kconfig
@@ -125,6 +125,7 @@ config SND_SOC_MT8183_MT6358_TS3A227E_MAX98357A
select SND_SOC_MAX98357A
select SND_SOC_BT_SCO
select SND_SOC_TS3A227E
+ select SND_SOC_CROS_EC_CODEC if CROS_EC
help
This adds ASoC driver for Mediatek MT8183 boards
with the MT6358 TS3A227E MAX98357A audio codec.
diff --git a/sound/soc/mediatek/common/mtk-afe-platform-driver.c b/sound/soc/mediatek/common/mtk-afe-platform-driver.c
index 3ce527ce30ce..b6624d8d084b 100644
--- a/sound/soc/mediatek/common/mtk-afe-platform-driver.c
+++ b/sound/soc/mediatek/common/mtk-afe-platform-driver.c
@@ -77,11 +77,10 @@ int mtk_afe_add_sub_dai_control(struct snd_soc_component *component)
}
EXPORT_SYMBOL_GPL(mtk_afe_add_sub_dai_control);
-static snd_pcm_uframes_t mtk_afe_pcm_pointer
- (struct snd_pcm_substream *substream)
+snd_pcm_uframes_t mtk_afe_pcm_pointer(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_component *component = snd_soc_rtdcom_lookup(rtd, AFE_PCM_NAME);
struct mtk_base_afe *afe = snd_soc_component_get_drvdata(component);
struct mtk_base_afe_memif *memif = &afe->memif[rtd->cpu_dai->id];
const struct mtk_base_memif_data *memif_data = memif->data;
@@ -111,18 +110,13 @@ static snd_pcm_uframes_t mtk_afe_pcm_pointer
POINTER_RETURN_FRAMES:
return bytes_to_frames(substream->runtime, pcm_ptr_bytes);
}
+EXPORT_SYMBOL_GPL(mtk_afe_pcm_pointer);
-const struct snd_pcm_ops mtk_afe_pcm_ops = {
- .ioctl = snd_pcm_lib_ioctl,
- .pointer = mtk_afe_pcm_pointer,
-};
-EXPORT_SYMBOL_GPL(mtk_afe_pcm_ops);
-
-int mtk_afe_pcm_new(struct snd_soc_pcm_runtime *rtd)
+int mtk_afe_pcm_new(struct snd_soc_component *component,
+ struct snd_soc_pcm_runtime *rtd)
{
size_t size;
struct snd_pcm *pcm = rtd->pcm;
- struct snd_soc_component *component = snd_soc_rtdcom_lookup(rtd, AFE_PCM_NAME);
struct mtk_base_afe *afe = snd_soc_component_get_drvdata(component);
size = afe->mtk_afe_hardware->buffer_bytes_max;
@@ -132,17 +126,19 @@ int mtk_afe_pcm_new(struct snd_soc_pcm_runtime *rtd)
}
EXPORT_SYMBOL_GPL(mtk_afe_pcm_new);
-void mtk_afe_pcm_free(struct snd_pcm *pcm)
+void mtk_afe_pcm_free(struct snd_soc_component *component,
+ struct snd_pcm *pcm)
{
snd_pcm_lib_preallocate_free_for_all(pcm);
}
EXPORT_SYMBOL_GPL(mtk_afe_pcm_free);
const struct snd_soc_component_driver mtk_afe_pcm_platform = {
- .name = AFE_PCM_NAME,
- .ops = &mtk_afe_pcm_ops,
- .pcm_new = mtk_afe_pcm_new,
- .pcm_free = mtk_afe_pcm_free,
+ .name = AFE_PCM_NAME,
+ .ioctl = snd_soc_pcm_lib_ioctl,
+ .pointer = mtk_afe_pcm_pointer,
+ .pcm_construct = mtk_afe_pcm_new,
+ .pcm_destruct = mtk_afe_pcm_free,
};
EXPORT_SYMBOL_GPL(mtk_afe_pcm_platform);
diff --git a/sound/soc/mediatek/common/mtk-afe-platform-driver.h b/sound/soc/mediatek/common/mtk-afe-platform-driver.h
index 88df6797732f..e550d11568c3 100644
--- a/sound/soc/mediatek/common/mtk-afe-platform-driver.h
+++ b/sound/soc/mediatek/common/mtk-afe-platform-driver.h
@@ -10,7 +10,6 @@
#define _MTK_AFE_PLATFORM_DRIVER_H_
#define AFE_PCM_NAME "mtk-afe-pcm"
-extern const struct snd_pcm_ops mtk_afe_pcm_ops;
extern const struct snd_soc_component_driver mtk_afe_pcm_platform;
struct mtk_base_afe;
@@ -18,9 +17,12 @@ struct snd_pcm;
struct snd_soc_component;
struct snd_soc_pcm_runtime;
-
-int mtk_afe_pcm_new(struct snd_soc_pcm_runtime *rtd);
-void mtk_afe_pcm_free(struct snd_pcm *pcm);
+snd_pcm_uframes_t mtk_afe_pcm_pointer(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream);
+int mtk_afe_pcm_new(struct snd_soc_component *component,
+ struct snd_soc_pcm_runtime *rtd);
+void mtk_afe_pcm_free(struct snd_soc_component *component,
+ struct snd_pcm *pcm);
int mtk_afe_combine_sub_dai(struct mtk_base_afe *afe);
int mtk_afe_add_sub_dai_control(struct snd_soc_component *component);
diff --git a/sound/soc/mediatek/common/mtk-btcvsd.c b/sound/soc/mediatek/common/mtk-btcvsd.c
index d00608c73c6e..2b490ae2e642 100644
--- a/sound/soc/mediatek/common/mtk-btcvsd.c
+++ b/sound/soc/mediatek/common/mtk-btcvsd.c
@@ -875,11 +875,9 @@ static const struct snd_pcm_hardware mtk_btcvsd_hardware = {
.fifo_size = 0,
};
-static int mtk_pcm_btcvsd_open(struct snd_pcm_substream *substream)
+static int mtk_pcm_btcvsd_open(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_component *component =
- snd_soc_rtdcom_lookup(rtd, BTCVSD_SND_NAME);
struct mtk_btcvsd_snd *bt = snd_soc_component_get_drvdata(component);
int ret;
@@ -899,11 +897,9 @@ static int mtk_pcm_btcvsd_open(struct snd_pcm_substream *substream)
return ret;
}
-static int mtk_pcm_btcvsd_close(struct snd_pcm_substream *substream)
+static int mtk_pcm_btcvsd_close(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_component *component =
- snd_soc_rtdcom_lookup(rtd, BTCVSD_SND_NAME);
struct mtk_btcvsd_snd *bt = snd_soc_component_get_drvdata(component);
struct mtk_btcvsd_snd_stream *bt_stream = get_bt_stream(bt, substream);
@@ -914,12 +910,10 @@ static int mtk_pcm_btcvsd_close(struct snd_pcm_substream *substream)
return 0;
}
-static int mtk_pcm_btcvsd_hw_params(struct snd_pcm_substream *substream,
+static int mtk_pcm_btcvsd_hw_params(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *hw_params)
{
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_component *component =
- snd_soc_rtdcom_lookup(rtd, BTCVSD_SND_NAME);
struct mtk_btcvsd_snd *bt = snd_soc_component_get_drvdata(component);
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
@@ -934,11 +928,9 @@ static int mtk_pcm_btcvsd_hw_params(struct snd_pcm_substream *substream,
return 0;
}
-static int mtk_pcm_btcvsd_hw_free(struct snd_pcm_substream *substream)
+static int mtk_pcm_btcvsd_hw_free(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_component *component =
- snd_soc_rtdcom_lookup(rtd, BTCVSD_SND_NAME);
struct mtk_btcvsd_snd *bt = snd_soc_component_get_drvdata(component);
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
@@ -947,11 +939,9 @@ static int mtk_pcm_btcvsd_hw_free(struct snd_pcm_substream *substream)
return 0;
}
-static int mtk_pcm_btcvsd_prepare(struct snd_pcm_substream *substream)
+static int mtk_pcm_btcvsd_prepare(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_component *component =
- snd_soc_rtdcom_lookup(rtd, BTCVSD_SND_NAME);
struct mtk_btcvsd_snd *bt = snd_soc_component_get_drvdata(component);
struct mtk_btcvsd_snd_stream *bt_stream = get_bt_stream(bt, substream);
@@ -961,11 +951,9 @@ static int mtk_pcm_btcvsd_prepare(struct snd_pcm_substream *substream)
return 0;
}
-static int mtk_pcm_btcvsd_trigger(struct snd_pcm_substream *substream, int cmd)
+static int mtk_pcm_btcvsd_trigger(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream, int cmd)
{
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_component *component =
- snd_soc_rtdcom_lookup(rtd, BTCVSD_SND_NAME);
struct mtk_btcvsd_snd *bt = snd_soc_component_get_drvdata(component);
struct mtk_btcvsd_snd_stream *bt_stream = get_bt_stream(bt, substream);
int stream = substream->stream;
@@ -993,12 +981,10 @@ static int mtk_pcm_btcvsd_trigger(struct snd_pcm_substream *substream, int cmd)
}
}
-static snd_pcm_uframes_t mtk_pcm_btcvsd_pointer
- (struct snd_pcm_substream *substream)
+static snd_pcm_uframes_t mtk_pcm_btcvsd_pointer(
+ struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_component *component =
- snd_soc_rtdcom_lookup(rtd, BTCVSD_SND_NAME);
struct mtk_btcvsd_snd *bt = snd_soc_component_get_drvdata(component);
struct mtk_btcvsd_snd_stream *bt_stream;
snd_pcm_uframes_t frame = 0;
@@ -1044,13 +1030,11 @@ static snd_pcm_uframes_t mtk_pcm_btcvsd_pointer
return frame;
}
-static int mtk_pcm_btcvsd_copy(struct snd_pcm_substream *substream,
+static int mtk_pcm_btcvsd_copy(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
int channel, unsigned long pos,
void __user *buf, unsigned long count)
{
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_component *component =
- snd_soc_rtdcom_lookup(rtd, BTCVSD_SND_NAME);
struct mtk_btcvsd_snd *bt = snd_soc_component_get_drvdata(component);
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
@@ -1061,18 +1045,6 @@ static int mtk_pcm_btcvsd_copy(struct snd_pcm_substream *substream,
return 0;
}
-static struct snd_pcm_ops mtk_btcvsd_ops = {
- .open = mtk_pcm_btcvsd_open,
- .close = mtk_pcm_btcvsd_close,
- .ioctl = snd_pcm_lib_ioctl,
- .hw_params = mtk_pcm_btcvsd_hw_params,
- .hw_free = mtk_pcm_btcvsd_hw_free,
- .prepare = mtk_pcm_btcvsd_prepare,
- .trigger = mtk_pcm_btcvsd_trigger,
- .pointer = mtk_pcm_btcvsd_pointer,
- .copy_user = mtk_pcm_btcvsd_copy,
-};
-
/* kcontrol */
static const char *const btsco_band_str[] = {"NB", "WB"};
@@ -1295,9 +1267,17 @@ static int mtk_btcvsd_snd_component_probe(struct snd_soc_component *component)
}
static const struct snd_soc_component_driver mtk_btcvsd_snd_platform = {
- .name = BTCVSD_SND_NAME,
- .ops = &mtk_btcvsd_ops,
- .probe = mtk_btcvsd_snd_component_probe,
+ .name = BTCVSD_SND_NAME,
+ .probe = mtk_btcvsd_snd_component_probe,
+ .open = mtk_pcm_btcvsd_open,
+ .close = mtk_pcm_btcvsd_close,
+ .ioctl = snd_soc_pcm_lib_ioctl,
+ .hw_params = mtk_pcm_btcvsd_hw_params,
+ .hw_free = mtk_pcm_btcvsd_hw_free,
+ .prepare = mtk_pcm_btcvsd_prepare,
+ .trigger = mtk_pcm_btcvsd_trigger,
+ .pointer = mtk_pcm_btcvsd_pointer,
+ .copy_user = mtk_pcm_btcvsd_copy,
};
static int mtk_btcvsd_snd_probe(struct platform_device *pdev)
diff --git a/sound/soc/mediatek/mt6797/mt6797-afe-pcm.c b/sound/soc/mediatek/mt6797/mt6797-afe-pcm.c
index e52c032d53aa..033c07fb599c 100644
--- a/sound/soc/mediatek/mt6797/mt6797-afe-pcm.c
+++ b/sound/soc/mediatek/mt6797/mt6797-afe-pcm.c
@@ -710,11 +710,12 @@ static int mt6797_afe_component_probe(struct snd_soc_component *component)
}
static const struct snd_soc_component_driver mt6797_afe_component = {
- .name = AFE_PCM_NAME,
- .ops = &mtk_afe_pcm_ops,
- .pcm_new = mtk_afe_pcm_new,
- .pcm_free = mtk_afe_pcm_free,
- .probe = mt6797_afe_component_probe,
+ .name = AFE_PCM_NAME,
+ .probe = mt6797_afe_component_probe,
+ .ioctl = snd_soc_pcm_lib_ioctl,
+ .pointer = mtk_afe_pcm_pointer,
+ .pcm_construct = mtk_afe_pcm_new,
+ .pcm_destruct = mtk_afe_pcm_free,
};
static int mt6797_dai_memif_register(struct mtk_base_afe *afe)
diff --git a/sound/soc/mediatek/mt8183/mt8183-afe-pcm.c b/sound/soc/mediatek/mt8183/mt8183-afe-pcm.c
index 4a31106d3471..76af09d8f1af 100644
--- a/sound/soc/mediatek/mt8183/mt8183-afe-pcm.c
+++ b/sound/soc/mediatek/mt8183/mt8183-afe-pcm.c
@@ -11,6 +11,7 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/pm_runtime.h>
+#include <linux/reset.h>
#include "mt8183-afe-common.h"
#include "mt8183-afe-clk.h"
@@ -1047,11 +1048,12 @@ static int mt8183_afe_component_probe(struct snd_soc_component *component)
}
static const struct snd_soc_component_driver mt8183_afe_component = {
- .name = AFE_PCM_NAME,
- .ops = &mtk_afe_pcm_ops,
- .pcm_new = mtk_afe_pcm_new,
- .pcm_free = mtk_afe_pcm_free,
- .probe = mt8183_afe_component_probe,
+ .name = AFE_PCM_NAME,
+ .probe = mt8183_afe_component_probe,
+ .ioctl = snd_soc_pcm_lib_ioctl,
+ .pointer = mtk_afe_pcm_pointer,
+ .pcm_construct = mtk_afe_pcm_new,
+ .pcm_destruct = mtk_afe_pcm_free,
};
static int mt8183_dai_memif_register(struct mtk_base_afe *afe)
@@ -1089,6 +1091,7 @@ static int mt8183_afe_pcm_dev_probe(struct platform_device *pdev)
struct mtk_base_afe *afe;
struct mt8183_afe_private *afe_priv;
struct device *dev;
+ struct reset_control *rstc;
int i, irq_id, ret;
afe = devm_kzalloc(&pdev->dev, sizeof(*afe), GFP_KERNEL);
@@ -1126,6 +1129,19 @@ static int mt8183_afe_pcm_dev_probe(struct platform_device *pdev)
return ret;
}
+ rstc = devm_reset_control_get(dev, "audiosys");
+ if (IS_ERR(rstc)) {
+ ret = PTR_ERR(rstc);
+ dev_err(dev, "could not get audiosys reset:%d\n", ret);
+ return ret;
+ }
+
+ ret = reset_control_reset(rstc);
+ if (ret) {
+ dev_err(dev, "failed to trigger audio reset:%d\n", ret);
+ return ret;
+ }
+
/* enable clock for regcache get default value from hw */
afe_priv->pm_runtime_bypass_reg_ctl = true;
pm_runtime_get_sync(&pdev->dev);
diff --git a/sound/soc/mediatek/mt8183/mt8183-mt6358-ts3a227-max98357.c b/sound/soc/mediatek/mt8183/mt8183-mt6358-ts3a227-max98357.c
index bb9cdc0d6552..0555f7d73d05 100644
--- a/sound/soc/mediatek/mt8183/mt8183-mt6358-ts3a227-max98357.c
+++ b/sound/soc/mediatek/mt8183/mt8183-mt6358-ts3a227-max98357.c
@@ -19,11 +19,12 @@ enum PINCTRL_PIN_STATE {
PIN_STATE_DEFAULT = 0,
PIN_TDM_OUT_ON,
PIN_TDM_OUT_OFF,
+ PIN_WOV,
PIN_STATE_MAX
};
static const char * const mt8183_pin_str[PIN_STATE_MAX] = {
- "default", "aud_tdm_out_on", "aud_tdm_out_off",
+ "default", "aud_tdm_out_on", "aud_tdm_out_off", "wov",
};
struct mt8183_mt6358_ts3a227_max98357_priv {
@@ -142,6 +143,11 @@ SND_SOC_DAILINK_DEFS(playback_hdmi,
DAILINK_COMP_ARRAY(COMP_DUMMY()),
DAILINK_COMP_ARRAY(COMP_EMPTY()));
+SND_SOC_DAILINK_DEFS(wake_on_voice,
+ DAILINK_COMP_ARRAY(COMP_DUMMY()),
+ DAILINK_COMP_ARRAY(COMP_DUMMY()),
+ DAILINK_COMP_ARRAY(COMP_EMPTY()));
+
/* BE */
SND_SOC_DAILINK_DEFS(primary_codec,
DAILINK_COMP_ARRAY(COMP_CPU("ADDA")),
@@ -229,6 +235,41 @@ static struct snd_soc_ops mt8183_mt6358_tdm_ops = {
.shutdown = mt8183_mt6358_tdm_shutdown,
};
+static int
+mt8183_mt6358_ts3a227_max98357_wov_startup(
+ struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_card *card = rtd->card;
+ struct mt8183_mt6358_ts3a227_max98357_priv *priv =
+ snd_soc_card_get_drvdata(card);
+
+ return pinctrl_select_state(priv->pinctrl,
+ priv->pin_states[PIN_WOV]);
+}
+
+static void
+mt8183_mt6358_ts3a227_max98357_wov_shutdown(
+ struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_card *card = rtd->card;
+ struct mt8183_mt6358_ts3a227_max98357_priv *priv =
+ snd_soc_card_get_drvdata(card);
+ int ret;
+
+ ret = pinctrl_select_state(priv->pinctrl,
+ priv->pin_states[PIN_STATE_DEFAULT]);
+ if (ret)
+ dev_err(card->dev, "%s failed to select state %d\n",
+ __func__, ret);
+}
+
+static const struct snd_soc_ops mt8183_mt6358_ts3a227_max98357_wov_ops = {
+ .startup = mt8183_mt6358_ts3a227_max98357_wov_startup,
+ .shutdown = mt8183_mt6358_ts3a227_max98357_wov_shutdown,
+};
+
static struct snd_soc_dai_link
mt8183_mt6358_ts3a227_max98357_dai_links[] = {
/* FE */
@@ -306,6 +347,15 @@ mt8183_mt6358_ts3a227_max98357_dai_links[] = {
.dpcm_playback = 1,
SND_SOC_DAILINK_REG(playback_hdmi),
},
+ {
+ .name = "Wake on Voice",
+ .stream_name = "Wake on Voice",
+ .ignore_suspend = 1,
+ .ignore = 1,
+ SND_SOC_DAILINK_REG(wake_on_voice),
+ .ops = &mt8183_mt6358_ts3a227_max98357_wov_ops,
+ },
+
/* BE */
{
.name = "Primary Codec",
@@ -429,7 +479,7 @@ static int
mt8183_mt6358_ts3a227_max98357_dev_probe(struct platform_device *pdev)
{
struct snd_soc_card *card = &mt8183_mt6358_ts3a227_max98357_card;
- struct device_node *platform_node;
+ struct device_node *platform_node, *ec_codec;
struct snd_soc_dai_link *dai_link;
struct mt8183_mt6358_ts3a227_max98357_priv *priv;
int ret;
@@ -444,10 +494,24 @@ mt8183_mt6358_ts3a227_max98357_dev_probe(struct platform_device *pdev)
return -EINVAL;
}
+ ec_codec = of_parse_phandle(pdev->dev.of_node, "mediatek,ec-codec", 0);
+
for_each_card_prelinks(card, i, dai_link) {
if (dai_link->platforms->name)
continue;
- dai_link->platforms->of_node = platform_node;
+
+ if (ec_codec && strcmp(dai_link->name, "Wake on Voice") == 0) {
+ dai_link->cpus[0].name = NULL;
+ dai_link->cpus[0].of_node = ec_codec;
+ dai_link->cpus[0].dai_name = NULL;
+ dai_link->codecs[0].name = NULL;
+ dai_link->codecs[0].of_node = ec_codec;
+ dai_link->codecs[0].dai_name = "Wake on Voice";
+ dai_link->platforms[0].of_node = ec_codec;
+ dai_link->ignore = 0;
+ } else {
+ dai_link->platforms->of_node = platform_node;
+ }
}
mt8183_mt6358_ts3a227_max98357_headset_dev.dlc.of_node =
diff --git a/sound/soc/meson/axg-fifo.c b/sound/soc/meson/axg-fifo.c
index 5a3749938900..d6f3eefb8f09 100644
--- a/sound/soc/meson/axg-fifo.c
+++ b/sound/soc/meson/axg-fifo.c
@@ -70,7 +70,8 @@ static void __dma_enable(struct axg_fifo *fifo, bool enable)
enable ? CTRL0_DMA_EN : 0);
}
-static int axg_fifo_pcm_trigger(struct snd_pcm_substream *ss, int cmd)
+int axg_fifo_pcm_trigger(struct snd_soc_component *component,
+ struct snd_pcm_substream *ss, int cmd)
{
struct axg_fifo *fifo = axg_fifo_data(ss);
@@ -91,8 +92,10 @@ static int axg_fifo_pcm_trigger(struct snd_pcm_substream *ss, int cmd)
return 0;
}
+EXPORT_SYMBOL_GPL(axg_fifo_pcm_trigger);
-static snd_pcm_uframes_t axg_fifo_pcm_pointer(struct snd_pcm_substream *ss)
+snd_pcm_uframes_t axg_fifo_pcm_pointer(struct snd_soc_component *component,
+ struct snd_pcm_substream *ss)
{
struct axg_fifo *fifo = axg_fifo_data(ss);
struct snd_pcm_runtime *runtime = ss->runtime;
@@ -102,9 +105,11 @@ static snd_pcm_uframes_t axg_fifo_pcm_pointer(struct snd_pcm_substream *ss)
return bytes_to_frames(runtime, addr - (unsigned int)runtime->dma_addr);
}
+EXPORT_SYMBOL_GPL(axg_fifo_pcm_pointer);
-static int axg_fifo_pcm_hw_params(struct snd_pcm_substream *ss,
- struct snd_pcm_hw_params *params)
+int axg_fifo_pcm_hw_params(struct snd_soc_component *component,
+ struct snd_pcm_substream *ss,
+ struct snd_pcm_hw_params *params)
{
struct snd_pcm_runtime *runtime = ss->runtime;
struct axg_fifo *fifo = axg_fifo_data(ss);
@@ -132,15 +137,17 @@ static int axg_fifo_pcm_hw_params(struct snd_pcm_substream *ss,
return 0;
}
+EXPORT_SYMBOL_GPL(axg_fifo_pcm_hw_params);
-static int g12a_fifo_pcm_hw_params(struct snd_pcm_substream *ss,
- struct snd_pcm_hw_params *params)
+int g12a_fifo_pcm_hw_params(struct snd_soc_component *component,
+ struct snd_pcm_substream *ss,
+ struct snd_pcm_hw_params *params)
{
struct axg_fifo *fifo = axg_fifo_data(ss);
struct snd_pcm_runtime *runtime = ss->runtime;
int ret;
- ret = axg_fifo_pcm_hw_params(ss, params);
+ ret = axg_fifo_pcm_hw_params(component, ss, params);
if (ret)
return ret;
@@ -149,8 +156,10 @@ static int g12a_fifo_pcm_hw_params(struct snd_pcm_substream *ss,
return 0;
}
+EXPORT_SYMBOL_GPL(g12a_fifo_pcm_hw_params);
-static int axg_fifo_pcm_hw_free(struct snd_pcm_substream *ss)
+int axg_fifo_pcm_hw_free(struct snd_soc_component *component,
+ struct snd_pcm_substream *ss)
{
struct axg_fifo *fifo = axg_fifo_data(ss);
@@ -160,6 +169,7 @@ static int axg_fifo_pcm_hw_free(struct snd_pcm_substream *ss)
return snd_pcm_lib_free_pages(ss);
}
+EXPORT_SYMBOL_GPL(axg_fifo_pcm_hw_free);
static void axg_fifo_ack_irq(struct axg_fifo *fifo, u8 mask)
{
@@ -194,7 +204,8 @@ static irqreturn_t axg_fifo_pcm_irq_block(int irq, void *dev_id)
return IRQ_RETVAL(status);
}
-static int axg_fifo_pcm_open(struct snd_pcm_substream *ss)
+int axg_fifo_pcm_open(struct snd_soc_component *component,
+ struct snd_pcm_substream *ss)
{
struct axg_fifo *fifo = axg_fifo_data(ss);
struct device *dev = axg_fifo_dev(ss);
@@ -250,8 +261,10 @@ static int axg_fifo_pcm_open(struct snd_pcm_substream *ss)
return ret;
}
+EXPORT_SYMBOL_GPL(axg_fifo_pcm_open);
-static int axg_fifo_pcm_close(struct snd_pcm_substream *ss)
+int axg_fifo_pcm_close(struct snd_soc_component *component,
+ struct snd_pcm_substream *ss)
{
struct axg_fifo *fifo = axg_fifo_data(ss);
int ret;
@@ -267,28 +280,7 @@ static int axg_fifo_pcm_close(struct snd_pcm_substream *ss)
return ret;
}
-
-const struct snd_pcm_ops axg_fifo_pcm_ops = {
- .open = axg_fifo_pcm_open,
- .close = axg_fifo_pcm_close,
- .ioctl = snd_pcm_lib_ioctl,
- .hw_params = axg_fifo_pcm_hw_params,
- .hw_free = axg_fifo_pcm_hw_free,
- .pointer = axg_fifo_pcm_pointer,
- .trigger = axg_fifo_pcm_trigger,
-};
-EXPORT_SYMBOL_GPL(axg_fifo_pcm_ops);
-
-const struct snd_pcm_ops g12a_fifo_pcm_ops = {
- .open = axg_fifo_pcm_open,
- .close = axg_fifo_pcm_close,
- .ioctl = snd_pcm_lib_ioctl,
- .hw_params = g12a_fifo_pcm_hw_params,
- .hw_free = axg_fifo_pcm_hw_free,
- .pointer = axg_fifo_pcm_pointer,
- .trigger = axg_fifo_pcm_trigger,
-};
-EXPORT_SYMBOL_GPL(g12a_fifo_pcm_ops);
+EXPORT_SYMBOL_GPL(axg_fifo_pcm_close);
int axg_fifo_pcm_new(struct snd_soc_pcm_runtime *rtd, unsigned int type)
{
diff --git a/sound/soc/meson/axg-fifo.h b/sound/soc/meson/axg-fifo.h
index bb1e2ce50256..cf928d43b558 100644
--- a/sound/soc/meson/axg-fifo.h
+++ b/sound/soc/meson/axg-fifo.h
@@ -15,7 +15,7 @@ struct reset_control;
struct snd_soc_component_driver;
struct snd_soc_dai;
struct snd_soc_dai_driver;
-struct snd_pcm_ops;
+
struct snd_soc_pcm_runtime;
#define AXG_FIFO_CH_MAX 128
@@ -75,8 +75,22 @@ struct axg_fifo_match_data {
struct snd_soc_dai_driver *dai_drv;
};
-extern const struct snd_pcm_ops axg_fifo_pcm_ops;
-extern const struct snd_pcm_ops g12a_fifo_pcm_ops;
+int axg_fifo_pcm_open(struct snd_soc_component *component,
+ struct snd_pcm_substream *ss);
+int axg_fifo_pcm_close(struct snd_soc_component *component,
+ struct snd_pcm_substream *ss);
+int axg_fifo_pcm_hw_params(struct snd_soc_component *component,
+ struct snd_pcm_substream *ss,
+ struct snd_pcm_hw_params *params);
+int g12a_fifo_pcm_hw_params(struct snd_soc_component *component,
+ struct snd_pcm_substream *ss,
+ struct snd_pcm_hw_params *params);
+int axg_fifo_pcm_hw_free(struct snd_soc_component *component,
+ struct snd_pcm_substream *ss);
+snd_pcm_uframes_t axg_fifo_pcm_pointer(struct snd_soc_component *component,
+ struct snd_pcm_substream *ss);
+int axg_fifo_pcm_trigger(struct snd_soc_component *component,
+ struct snd_pcm_substream *ss, int cmd);
int axg_fifo_pcm_new(struct snd_soc_pcm_runtime *rtd, unsigned int type);
int axg_fifo_probe(struct platform_device *pdev);
diff --git a/sound/soc/meson/axg-frddr.c b/sound/soc/meson/axg-frddr.c
index 6ab111c31b28..665d75d49d7b 100644
--- a/sound/soc/meson/axg-frddr.c
+++ b/sound/soc/meson/axg-frddr.c
@@ -149,7 +149,13 @@ static const struct snd_soc_component_driver axg_frddr_component_drv = {
.num_dapm_widgets = ARRAY_SIZE(axg_frddr_dapm_widgets),
.dapm_routes = axg_frddr_dapm_routes,
.num_dapm_routes = ARRAY_SIZE(axg_frddr_dapm_routes),
- .ops = &axg_fifo_pcm_ops
+ .open = axg_fifo_pcm_open,
+ .close = axg_fifo_pcm_close,
+ .ioctl = snd_soc_pcm_lib_ioctl,
+ .hw_params = axg_fifo_pcm_hw_params,
+ .hw_free = axg_fifo_pcm_hw_free,
+ .pointer = axg_fifo_pcm_pointer,
+ .trigger = axg_fifo_pcm_trigger,
};
static const struct axg_fifo_match_data axg_frddr_match_data = {
@@ -267,7 +273,13 @@ static const struct snd_soc_component_driver g12a_frddr_component_drv = {
.num_dapm_widgets = ARRAY_SIZE(g12a_frddr_dapm_widgets),
.dapm_routes = g12a_frddr_dapm_routes,
.num_dapm_routes = ARRAY_SIZE(g12a_frddr_dapm_routes),
- .ops = &g12a_fifo_pcm_ops
+ .open = axg_fifo_pcm_open,
+ .close = axg_fifo_pcm_close,
+ .ioctl = snd_soc_pcm_lib_ioctl,
+ .hw_params = g12a_fifo_pcm_hw_params,
+ .hw_free = axg_fifo_pcm_hw_free,
+ .pointer = axg_fifo_pcm_pointer,
+ .trigger = axg_fifo_pcm_trigger,
};
static const struct axg_fifo_match_data g12a_frddr_match_data = {
@@ -331,7 +343,13 @@ static const struct snd_soc_component_driver sm1_frddr_component_drv = {
.num_dapm_widgets = ARRAY_SIZE(sm1_frddr_dapm_widgets),
.dapm_routes = g12a_frddr_dapm_routes,
.num_dapm_routes = ARRAY_SIZE(g12a_frddr_dapm_routes),
- .ops = &g12a_fifo_pcm_ops
+ .open = axg_fifo_pcm_open,
+ .close = axg_fifo_pcm_close,
+ .ioctl = snd_soc_pcm_lib_ioctl,
+ .hw_params = g12a_fifo_pcm_hw_params,
+ .hw_free = axg_fifo_pcm_hw_free,
+ .pointer = axg_fifo_pcm_pointer,
+ .trigger = axg_fifo_pcm_trigger,
};
static const struct axg_fifo_match_data sm1_frddr_match_data = {
diff --git a/sound/soc/meson/axg-toddr.c b/sound/soc/meson/axg-toddr.c
index c8ea2145f576..7fef0b961496 100644
--- a/sound/soc/meson/axg-toddr.c
+++ b/sound/soc/meson/axg-toddr.c
@@ -181,7 +181,13 @@ static const struct snd_soc_component_driver axg_toddr_component_drv = {
.num_dapm_widgets = ARRAY_SIZE(axg_toddr_dapm_widgets),
.dapm_routes = axg_toddr_dapm_routes,
.num_dapm_routes = ARRAY_SIZE(axg_toddr_dapm_routes),
- .ops = &axg_fifo_pcm_ops
+ .open = axg_fifo_pcm_open,
+ .close = axg_fifo_pcm_close,
+ .ioctl = snd_soc_pcm_lib_ioctl,
+ .hw_params = axg_fifo_pcm_hw_params,
+ .hw_free = axg_fifo_pcm_hw_free,
+ .pointer = axg_fifo_pcm_pointer,
+ .trigger = axg_fifo_pcm_trigger,
};
static const struct axg_fifo_match_data axg_toddr_match_data = {
@@ -214,7 +220,13 @@ static const struct snd_soc_component_driver g12a_toddr_component_drv = {
.num_dapm_widgets = ARRAY_SIZE(axg_toddr_dapm_widgets),
.dapm_routes = axg_toddr_dapm_routes,
.num_dapm_routes = ARRAY_SIZE(axg_toddr_dapm_routes),
- .ops = &g12a_fifo_pcm_ops
+ .open = axg_fifo_pcm_open,
+ .close = axg_fifo_pcm_close,
+ .ioctl = snd_soc_pcm_lib_ioctl,
+ .hw_params = g12a_fifo_pcm_hw_params,
+ .hw_free = axg_fifo_pcm_hw_free,
+ .pointer = axg_fifo_pcm_pointer,
+ .trigger = axg_fifo_pcm_trigger,
};
static const struct axg_fifo_match_data g12a_toddr_match_data = {
@@ -278,7 +290,13 @@ static const struct snd_soc_component_driver sm1_toddr_component_drv = {
.num_dapm_widgets = ARRAY_SIZE(sm1_toddr_dapm_widgets),
.dapm_routes = sm1_toddr_dapm_routes,
.num_dapm_routes = ARRAY_SIZE(sm1_toddr_dapm_routes),
- .ops = &g12a_fifo_pcm_ops
+ .open = axg_fifo_pcm_open,
+ .close = axg_fifo_pcm_close,
+ .ioctl = snd_soc_pcm_lib_ioctl,
+ .hw_params = g12a_fifo_pcm_hw_params,
+ .hw_free = axg_fifo_pcm_hw_free,
+ .pointer = axg_fifo_pcm_pointer,
+ .trigger = axg_fifo_pcm_trigger,
};
static const struct axg_fifo_match_data sm1_toddr_match_data = {
diff --git a/sound/soc/pxa/Kconfig b/sound/soc/pxa/Kconfig
index 213d4dab0346..295cfffa4646 100644
--- a/sound/soc/pxa/Kconfig
+++ b/sound/soc/pxa/Kconfig
@@ -190,14 +190,14 @@ config SND_PXA2XX_SOC_MAGICIAN
HTC Magician.
config SND_PXA2XX_SOC_MIOA701
- tristate "SoC Audio support for MIO A701"
- depends on SND_PXA2XX_SOC && MACH_MIOA701
+ tristate "SoC Audio support for MIO A701"
+ depends on SND_PXA2XX_SOC && MACH_MIOA701
depends on AC97_BUS=n
- select SND_PXA2XX_SOC_AC97
- select SND_SOC_WM9713
- help
- Say Y if you want to add support for SoC audio on the
- MIO A701.
+ select SND_PXA2XX_SOC_AC97
+ select SND_SOC_WM9713
+ help
+ Say Y if you want to add support for SoC audio on the
+ MIO A701.
config SND_PXA2XX_SOC_IMOTE2
tristate "SoC Audio support for IMote 2"
@@ -205,7 +205,7 @@ config SND_PXA2XX_SOC_IMOTE2
select SND_PXA2XX_SOC_I2S
select SND_SOC_WM8940
help
- Say Y if you want to add support for SoC audio on the
+ Say Y if you want to add support for SoC audio on the
IMote 2.
config SND_MMP_SOC_BROWNSTONE
diff --git a/sound/soc/pxa/mmp-pcm.c b/sound/soc/pxa/mmp-pcm.c
index 7096b5263e25..54a4c9213e83 100644
--- a/sound/soc/pxa/mmp-pcm.c
+++ b/sound/soc/pxa/mmp-pcm.c
@@ -55,8 +55,9 @@ static struct snd_pcm_hardware mmp_pcm_hardware[] = {
},
};
-static int mmp_pcm_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *params)
+static int mmp_pcm_hw_params(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
{
struct dma_chan *chan = snd_dmaengine_pcm_get_chan(substream);
struct dma_slave_config slave_config;
@@ -77,6 +78,18 @@ static int mmp_pcm_hw_params(struct snd_pcm_substream *substream,
return 0;
}
+static int mmp_pcm_trigger(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream, int cmd)
+{
+ return snd_dmaengine_pcm_trigger(substream, cmd);
+}
+
+static snd_pcm_uframes_t mmp_pcm_pointer(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
+{
+ return snd_dmaengine_pcm_pointer(substream);
+}
+
static bool filter(struct dma_chan *chan, void *param)
{
struct mmp_dma_data *dma_data = param;
@@ -94,10 +107,10 @@ static bool filter(struct dma_chan *chan, void *param)
return found;
}
-static int mmp_pcm_open(struct snd_pcm_substream *substream)
+static int mmp_pcm_open(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_component *component = snd_soc_rtdcom_lookup(rtd, DRV_NAME);
struct platform_device *pdev = to_platform_device(component->dev);
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
struct mmp_dma_data dma_data;
@@ -117,8 +130,15 @@ static int mmp_pcm_open(struct snd_pcm_substream *substream)
&dma_data);
}
-static int mmp_pcm_mmap(struct snd_pcm_substream *substream,
- struct vm_area_struct *vma)
+static int mmp_pcm_close(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
+{
+ return snd_dmaengine_pcm_close_release_chan(substream);
+}
+
+static int mmp_pcm_mmap(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
+ struct vm_area_struct *vma)
{
struct snd_pcm_runtime *runtime = substream->runtime;
unsigned long off = vma->vm_pgoff;
@@ -129,17 +149,8 @@ static int mmp_pcm_mmap(struct snd_pcm_substream *substream,
vma->vm_end - vma->vm_start, vma->vm_page_prot);
}
-static const struct snd_pcm_ops mmp_pcm_ops = {
- .open = mmp_pcm_open,
- .close = snd_dmaengine_pcm_close_release_chan,
- .ioctl = snd_pcm_lib_ioctl,
- .hw_params = mmp_pcm_hw_params,
- .trigger = snd_dmaengine_pcm_trigger,
- .pointer = snd_dmaengine_pcm_pointer,
- .mmap = mmp_pcm_mmap,
-};
-
-static void mmp_pcm_free_dma_buffers(struct snd_pcm *pcm)
+static void mmp_pcm_free_dma_buffers(struct snd_soc_component *component,
+ struct snd_pcm *pcm)
{
struct snd_pcm_substream *substream;
struct snd_dma_buffer *buf;
@@ -188,7 +199,8 @@ static int mmp_pcm_preallocate_dma_buffer(struct snd_pcm_substream *substream,
return 0;
}
-static int mmp_pcm_new(struct snd_soc_pcm_runtime *rtd)
+static int mmp_pcm_new(struct snd_soc_component *component,
+ struct snd_soc_pcm_runtime *rtd)
{
struct snd_pcm_substream *substream;
struct snd_pcm *pcm = rtd->pcm;
@@ -205,15 +217,21 @@ static int mmp_pcm_new(struct snd_soc_pcm_runtime *rtd)
return 0;
err:
- mmp_pcm_free_dma_buffers(pcm);
+ mmp_pcm_free_dma_buffers(component, pcm);
return ret;
}
static const struct snd_soc_component_driver mmp_soc_component = {
.name = DRV_NAME,
- .ops = &mmp_pcm_ops,
- .pcm_new = mmp_pcm_new,
- .pcm_free = mmp_pcm_free_dma_buffers,
+ .open = mmp_pcm_open,
+ .close = mmp_pcm_close,
+ .ioctl = snd_soc_pcm_lib_ioctl,
+ .hw_params = mmp_pcm_hw_params,
+ .trigger = mmp_pcm_trigger,
+ .pointer = mmp_pcm_pointer,
+ .mmap = mmp_pcm_mmap,
+ .pcm_construct = mmp_pcm_new,
+ .pcm_destruct = mmp_pcm_free_dma_buffers,
};
static int mmp_pcm_probe(struct platform_device *pdev)
diff --git a/sound/soc/pxa/poodle.c b/sound/soc/pxa/poodle.c
index 48d5c2252b10..59ef04d0467a 100644
--- a/sound/soc/pxa/poodle.c
+++ b/sound/soc/pxa/poodle.c
@@ -56,7 +56,7 @@ static void poodle_ext_control(struct snd_soc_dapm_context *dapm)
snd_soc_dapm_disable_pin(dapm, "Headphone Jack");
}
- /* set the enpoints to their new connetion states */
+ /* set the endpoints to their new connection states */
if (poodle_spk_func == POODLE_SPK_ON)
snd_soc_dapm_enable_pin(dapm, "Ext Spk");
else
diff --git a/sound/soc/pxa/pxa-ssp.c b/sound/soc/pxa/pxa-ssp.c
index 6c5201431f6e..76fdce54f007 100644
--- a/sound/soc/pxa/pxa-ssp.c
+++ b/sound/soc/pxa/pxa-ssp.c
@@ -869,9 +869,17 @@ static struct snd_soc_dai_driver pxa_ssp_dai = {
static const struct snd_soc_component_driver pxa_ssp_component = {
.name = "pxa-ssp",
- .ops = &pxa2xx_pcm_ops,
- .pcm_new = pxa2xx_soc_pcm_new,
- .pcm_free = pxa2xx_pcm_free_dma_buffers,
+ .pcm_construct = pxa2xx_soc_pcm_new,
+ .pcm_destruct = pxa2xx_soc_pcm_free,
+ .open = pxa2xx_soc_pcm_open,
+ .close = pxa2xx_soc_pcm_close,
+ .ioctl = snd_soc_pcm_lib_ioctl,
+ .hw_params = pxa2xx_soc_pcm_hw_params,
+ .hw_free = pxa2xx_soc_pcm_hw_free,
+ .prepare = pxa2xx_soc_pcm_prepare,
+ .trigger = pxa2xx_soc_pcm_trigger,
+ .pointer = pxa2xx_soc_pcm_pointer,
+ .mmap = pxa2xx_soc_pcm_mmap,
};
#ifdef CONFIG_OF
diff --git a/sound/soc/pxa/pxa2xx-ac97.c b/sound/soc/pxa/pxa2xx-ac97.c
index bf28187315db..31e81a6f616f 100644
--- a/sound/soc/pxa/pxa2xx-ac97.c
+++ b/sound/soc/pxa/pxa2xx-ac97.c
@@ -204,9 +204,17 @@ static struct snd_soc_dai_driver pxa_ac97_dai_driver[] = {
static const struct snd_soc_component_driver pxa_ac97_component = {
.name = "pxa-ac97",
- .ops = &pxa2xx_pcm_ops,
- .pcm_new = pxa2xx_soc_pcm_new,
- .pcm_free = pxa2xx_pcm_free_dma_buffers,
+ .pcm_construct = pxa2xx_soc_pcm_new,
+ .pcm_destruct = pxa2xx_soc_pcm_free,
+ .open = pxa2xx_soc_pcm_open,
+ .close = pxa2xx_soc_pcm_close,
+ .ioctl = snd_soc_pcm_lib_ioctl,
+ .hw_params = pxa2xx_soc_pcm_hw_params,
+ .hw_free = pxa2xx_soc_pcm_hw_free,
+ .prepare = pxa2xx_soc_pcm_prepare,
+ .trigger = pxa2xx_soc_pcm_trigger,
+ .pointer = pxa2xx_soc_pcm_pointer,
+ .mmap = pxa2xx_soc_pcm_mmap,
};
#ifdef CONFIG_OF
diff --git a/sound/soc/pxa/pxa2xx-i2s.c b/sound/soc/pxa/pxa2xx-i2s.c
index 9f7fb7335ac0..e77d707efde7 100644
--- a/sound/soc/pxa/pxa2xx-i2s.c
+++ b/sound/soc/pxa/pxa2xx-i2s.c
@@ -360,9 +360,17 @@ static struct snd_soc_dai_driver pxa_i2s_dai = {
static const struct snd_soc_component_driver pxa_i2s_component = {
.name = "pxa-i2s",
- .ops = &pxa2xx_pcm_ops,
- .pcm_new = pxa2xx_soc_pcm_new,
- .pcm_free = pxa2xx_pcm_free_dma_buffers,
+ .pcm_construct = pxa2xx_soc_pcm_new,
+ .pcm_destruct = pxa2xx_soc_pcm_free,
+ .open = pxa2xx_soc_pcm_open,
+ .close = pxa2xx_soc_pcm_close,
+ .ioctl = snd_soc_pcm_lib_ioctl,
+ .hw_params = pxa2xx_soc_pcm_hw_params,
+ .hw_free = pxa2xx_soc_pcm_hw_free,
+ .prepare = pxa2xx_soc_pcm_prepare,
+ .trigger = pxa2xx_soc_pcm_trigger,
+ .pointer = pxa2xx_soc_pcm_pointer,
+ .mmap = pxa2xx_soc_pcm_mmap,
};
static int pxa2xx_i2s_drv_probe(struct platform_device *pdev)
diff --git a/sound/soc/pxa/pxa2xx-pcm.c b/sound/soc/pxa/pxa2xx-pcm.c
index 74b56fa0870f..07b3455a6f23 100644
--- a/sound/soc/pxa/pxa2xx-pcm.c
+++ b/sound/soc/pxa/pxa2xx-pcm.c
@@ -18,9 +18,17 @@
#include <sound/dmaengine_pcm.h>
static const struct snd_soc_component_driver pxa2xx_soc_platform = {
- .ops = &pxa2xx_pcm_ops,
- .pcm_new = pxa2xx_soc_pcm_new,
- .pcm_free = pxa2xx_pcm_free_dma_buffers,
+ .pcm_construct = pxa2xx_soc_pcm_new,
+ .pcm_destruct = pxa2xx_soc_pcm_free,
+ .open = pxa2xx_soc_pcm_open,
+ .close = pxa2xx_soc_pcm_close,
+ .ioctl = snd_soc_pcm_lib_ioctl,
+ .hw_params = pxa2xx_soc_pcm_hw_params,
+ .hw_free = pxa2xx_soc_pcm_hw_free,
+ .prepare = pxa2xx_soc_pcm_prepare,
+ .trigger = pxa2xx_soc_pcm_trigger,
+ .pointer = pxa2xx_soc_pcm_pointer,
+ .mmap = pxa2xx_soc_pcm_mmap,
};
static int pxa2xx_soc_platform_probe(struct platform_device *pdev)
diff --git a/sound/soc/qcom/Kconfig b/sound/soc/qcom/Kconfig
index 60086858e920..6530d2462a9e 100644
--- a/sound/soc/qcom/Kconfig
+++ b/sound/soc/qcom/Kconfig
@@ -3,8 +3,8 @@ config SND_SOC_QCOM
tristate "ASoC support for QCOM platforms"
depends on ARCH_QCOM || COMPILE_TEST
help
- Say Y or M if you want to add support to use audio devices
- in Qualcomm Technologies SOC-based platforms.
+ Say Y or M if you want to add support to use audio devices
+ in Qualcomm Technologies SOC-based platforms.
config SND_SOC_LPASS_CPU
tristate
@@ -30,17 +30,17 @@ config SND_SOC_STORM
select SND_SOC_LPASS_IPQ806X
select SND_SOC_MAX98357A
help
- Say Y or M if you want add support for SoC audio on the
- Qualcomm Technologies IPQ806X-based Storm board.
+ Say Y or M if you want add support for SoC audio on the
+ Qualcomm Technologies IPQ806X-based Storm board.
config SND_SOC_APQ8016_SBC
tristate "SoC Audio support for APQ8016 SBC platforms"
depends on SND_SOC_QCOM
select SND_SOC_LPASS_APQ8016
help
- Support for Qualcomm Technologies LPASS audio block in
- APQ8016 SOC-based systems.
- Say Y if you want to use audio devices on MI2S.
+ Support for Qualcomm Technologies LPASS audio block in
+ APQ8016 SOC-based systems.
+ Say Y if you want to use audio devices on MI2S.
config SND_SOC_QCOM_COMMON
tristate
@@ -93,9 +93,9 @@ config SND_SOC_MSM8996
select SND_SOC_QDSP6
select SND_SOC_QCOM_COMMON
help
- Support for Qualcomm Technologies LPASS audio block in
- APQ8096 SoC-based systems.
- Say Y if you want to use audio device on this SoCs
+ Support for Qualcomm Technologies LPASS audio block in
+ APQ8096 SoC-based systems.
+ Say Y if you want to use audio device on this SoCs
config SND_SOC_SDM845
tristate "SoC Machine driver for SDM845 boards"
diff --git a/sound/soc/qcom/lpass-platform.c b/sound/soc/qcom/lpass-platform.c
index 4c745baa39f7..2e8892316423 100644
--- a/sound/soc/qcom/lpass-platform.c
+++ b/sound/soc/qcom/lpass-platform.c
@@ -50,12 +50,12 @@ static const struct snd_pcm_hardware lpass_platform_pcm_hardware = {
.fifo_size = 0,
};
-static int lpass_platform_pcmops_open(struct snd_pcm_substream *substream)
+static int lpass_platform_pcmops_open(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
struct snd_soc_dai *cpu_dai = soc_runtime->cpu_dai;
- struct snd_soc_component *component = snd_soc_rtdcom_lookup(soc_runtime, DRV_NAME);
struct lpass_data *drvdata = snd_soc_component_get_drvdata(component);
struct lpass_variant *v = drvdata->variant;
int ret, dma_ch, dir = substream->stream;
@@ -105,11 +105,10 @@ static int lpass_platform_pcmops_open(struct snd_pcm_substream *substream)
return 0;
}
-static int lpass_platform_pcmops_close(struct snd_pcm_substream *substream)
+static int lpass_platform_pcmops_close(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
- struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
- struct snd_soc_component *component = snd_soc_rtdcom_lookup(soc_runtime, DRV_NAME);
struct lpass_data *drvdata = snd_soc_component_get_drvdata(component);
struct lpass_variant *v = drvdata->variant;
struct lpass_pcm_data *data;
@@ -122,11 +121,11 @@ static int lpass_platform_pcmops_close(struct snd_pcm_substream *substream)
return 0;
}
-static int lpass_platform_pcmops_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *params)
+static int lpass_platform_pcmops_hw_params(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
- struct snd_soc_component *component = snd_soc_rtdcom_lookup(soc_runtime, DRV_NAME);
struct lpass_data *drvdata = snd_soc_component_get_drvdata(component);
struct snd_pcm_runtime *rt = substream->runtime;
struct lpass_pcm_data *pcm_data = rt->private_data;
@@ -216,10 +215,10 @@ static int lpass_platform_pcmops_hw_params(struct snd_pcm_substream *substream,
return 0;
}
-static int lpass_platform_pcmops_hw_free(struct snd_pcm_substream *substream)
+static int lpass_platform_pcmops_hw_free(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
- struct snd_soc_component *component = snd_soc_rtdcom_lookup(soc_runtime, DRV_NAME);
struct lpass_data *drvdata = snd_soc_component_get_drvdata(component);
struct snd_pcm_runtime *rt = substream->runtime;
struct lpass_pcm_data *pcm_data = rt->private_data;
@@ -236,11 +235,11 @@ static int lpass_platform_pcmops_hw_free(struct snd_pcm_substream *substream)
return ret;
}
-static int lpass_platform_pcmops_prepare(struct snd_pcm_substream *substream)
+static int lpass_platform_pcmops_prepare(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
- struct snd_soc_component *component = snd_soc_rtdcom_lookup(soc_runtime, DRV_NAME);
struct lpass_data *drvdata = snd_soc_component_get_drvdata(component);
struct snd_pcm_runtime *rt = substream->runtime;
struct lpass_pcm_data *pcm_data = rt->private_data;
@@ -288,11 +287,11 @@ static int lpass_platform_pcmops_prepare(struct snd_pcm_substream *substream)
return 0;
}
-static int lpass_platform_pcmops_trigger(struct snd_pcm_substream *substream,
- int cmd)
+static int lpass_platform_pcmops_trigger(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
+ int cmd)
{
struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
- struct snd_soc_component *component = snd_soc_rtdcom_lookup(soc_runtime, DRV_NAME);
struct lpass_data *drvdata = snd_soc_component_get_drvdata(component);
struct snd_pcm_runtime *rt = substream->runtime;
struct lpass_pcm_data *pcm_data = rt->private_data;
@@ -363,10 +362,10 @@ static int lpass_platform_pcmops_trigger(struct snd_pcm_substream *substream,
}
static snd_pcm_uframes_t lpass_platform_pcmops_pointer(
+ struct snd_soc_component *component,
struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
- struct snd_soc_component *component = snd_soc_rtdcom_lookup(soc_runtime, DRV_NAME);
struct lpass_data *drvdata = snd_soc_component_get_drvdata(component);
struct snd_pcm_runtime *rt = substream->runtime;
struct lpass_pcm_data *pcm_data = rt->private_data;
@@ -395,8 +394,9 @@ static snd_pcm_uframes_t lpass_platform_pcmops_pointer(
return bytes_to_frames(substream->runtime, curr_addr - base_addr);
}
-static int lpass_platform_pcmops_mmap(struct snd_pcm_substream *substream,
- struct vm_area_struct *vma)
+static int lpass_platform_pcmops_mmap(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
+ struct vm_area_struct *vma)
{
struct snd_pcm_runtime *runtime = substream->runtime;
@@ -405,18 +405,6 @@ static int lpass_platform_pcmops_mmap(struct snd_pcm_substream *substream,
runtime->dma_bytes);
}
-static const struct snd_pcm_ops lpass_platform_pcm_ops = {
- .open = lpass_platform_pcmops_open,
- .close = lpass_platform_pcmops_close,
- .ioctl = snd_pcm_lib_ioctl,
- .hw_params = lpass_platform_pcmops_hw_params,
- .hw_free = lpass_platform_pcmops_hw_free,
- .prepare = lpass_platform_pcmops_prepare,
- .trigger = lpass_platform_pcmops_trigger,
- .pointer = lpass_platform_pcmops_pointer,
- .mmap = lpass_platform_pcmops_mmap,
-};
-
static irqreturn_t lpass_dma_interrupt_handler(
struct snd_pcm_substream *substream,
struct lpass_data *drvdata,
@@ -499,11 +487,11 @@ static irqreturn_t lpass_platform_lpaif_irq(int irq, void *data)
return IRQ_HANDLED;
}
-static int lpass_platform_pcm_new(struct snd_soc_pcm_runtime *soc_runtime)
+static int lpass_platform_pcm_new(struct snd_soc_component *component,
+ struct snd_soc_pcm_runtime *soc_runtime)
{
struct snd_pcm *pcm = soc_runtime->pcm;
struct snd_pcm_substream *psubstream, *csubstream;
- struct snd_soc_component *component = snd_soc_rtdcom_lookup(soc_runtime, DRV_NAME);
int ret = -EINVAL;
size_t size = lpass_platform_pcm_hardware.buffer_bytes_max;
@@ -535,7 +523,8 @@ static int lpass_platform_pcm_new(struct snd_soc_pcm_runtime *soc_runtime)
return 0;
}
-static void lpass_platform_pcm_free(struct snd_pcm *pcm)
+static void lpass_platform_pcm_free(struct snd_soc_component *component,
+ struct snd_pcm *pcm)
{
struct snd_pcm_substream *substream;
int i;
@@ -552,9 +541,18 @@ static void lpass_platform_pcm_free(struct snd_pcm *pcm)
static const struct snd_soc_component_driver lpass_component_driver = {
.name = DRV_NAME,
- .pcm_new = lpass_platform_pcm_new,
- .pcm_free = lpass_platform_pcm_free,
- .ops = &lpass_platform_pcm_ops,
+ .open = lpass_platform_pcmops_open,
+ .close = lpass_platform_pcmops_close,
+ .ioctl = snd_soc_pcm_lib_ioctl,
+ .hw_params = lpass_platform_pcmops_hw_params,
+ .hw_free = lpass_platform_pcmops_hw_free,
+ .prepare = lpass_platform_pcmops_prepare,
+ .trigger = lpass_platform_pcmops_trigger,
+ .pointer = lpass_platform_pcmops_pointer,
+ .mmap = lpass_platform_pcmops_mmap,
+ .pcm_construct = lpass_platform_pcm_new,
+ .pcm_destruct = lpass_platform_pcm_free,
+
};
int asoc_qcom_lpass_platform_register(struct platform_device *pdev)
diff --git a/sound/soc/qcom/qdsp6/q6asm-dai.c b/sound/soc/qcom/qdsp6/q6asm-dai.c
index 548eb4fa2da6..8150c10f081e 100644
--- a/sound/soc/qcom/qdsp6/q6asm-dai.c
+++ b/sound/soc/qcom/qdsp6/q6asm-dai.c
@@ -206,16 +206,16 @@ static void event_handler(uint32_t opcode, uint32_t token,
}
}
-static int q6asm_dai_prepare(struct snd_pcm_substream *substream)
+static int q6asm_dai_prepare(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_soc_pcm_runtime *soc_prtd = substream->private_data;
struct q6asm_dai_rtd *prtd = runtime->private_data;
- struct snd_soc_component *c = snd_soc_rtdcom_lookup(soc_prtd, DRV_NAME);
struct q6asm_dai_data *pdata;
int ret, i;
- pdata = snd_soc_component_get_drvdata(c);
+ pdata = snd_soc_component_get_drvdata(component);
if (!pdata)
return -EINVAL;
@@ -294,7 +294,8 @@ static int q6asm_dai_prepare(struct snd_pcm_substream *substream)
return 0;
}
-static int q6asm_dai_trigger(struct snd_pcm_substream *substream, int cmd)
+static int q6asm_dai_trigger(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream, int cmd)
{
int ret = 0;
struct snd_pcm_runtime *runtime = substream->runtime;
@@ -322,21 +323,21 @@ static int q6asm_dai_trigger(struct snd_pcm_substream *substream, int cmd)
return ret;
}
-static int q6asm_dai_open(struct snd_pcm_substream *substream)
+static int q6asm_dai_open(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_soc_pcm_runtime *soc_prtd = substream->private_data;
struct snd_soc_dai *cpu_dai = soc_prtd->cpu_dai;
- struct snd_soc_component *c = snd_soc_rtdcom_lookup(soc_prtd, DRV_NAME);
struct q6asm_dai_rtd *prtd;
struct q6asm_dai_data *pdata;
- struct device *dev = c->dev;
+ struct device *dev = component->dev;
int ret = 0;
int stream_id;
stream_id = cpu_dai->driver->id;
- pdata = snd_soc_component_get_drvdata(c);
+ pdata = snd_soc_component_get_drvdata(component);
if (!pdata) {
pr_err("Drv data not found ..\n");
return -EINVAL;
@@ -414,7 +415,8 @@ static int q6asm_dai_open(struct snd_pcm_substream *substream)
return 0;
}
-static int q6asm_dai_close(struct snd_pcm_substream *substream)
+static int q6asm_dai_close(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_soc_pcm_runtime *soc_prtd = substream->private_data;
@@ -435,7 +437,8 @@ static int q6asm_dai_close(struct snd_pcm_substream *substream)
return 0;
}
-static snd_pcm_uframes_t q6asm_dai_pointer(struct snd_pcm_substream *substream)
+static snd_pcm_uframes_t q6asm_dai_pointer(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
@@ -447,22 +450,21 @@ static snd_pcm_uframes_t q6asm_dai_pointer(struct snd_pcm_substream *substream)
return bytes_to_frames(runtime, (prtd->pcm_irq_pos));
}
-static int q6asm_dai_mmap(struct snd_pcm_substream *substream,
- struct vm_area_struct *vma)
+static int q6asm_dai_mmap(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
+ struct vm_area_struct *vma)
{
-
struct snd_pcm_runtime *runtime = substream->runtime;
- struct snd_soc_pcm_runtime *soc_prtd = substream->private_data;
- struct snd_soc_component *c = snd_soc_rtdcom_lookup(soc_prtd, DRV_NAME);
- struct device *dev = c->dev;
+ struct device *dev = component->dev;
return dma_mmap_coherent(dev, vma,
runtime->dma_area, runtime->dma_addr,
runtime->dma_bytes);
}
-static int q6asm_dai_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *params)
+static int q6asm_dai_hw_params(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct q6asm_dai_rtd *prtd = runtime->private_data;
@@ -482,17 +484,6 @@ static int q6asm_dai_hw_params(struct snd_pcm_substream *substream,
return 0;
}
-static struct snd_pcm_ops q6asm_dai_ops = {
- .open = q6asm_dai_open,
- .hw_params = q6asm_dai_hw_params,
- .close = q6asm_dai_close,
- .ioctl = snd_pcm_lib_ioctl,
- .prepare = q6asm_dai_prepare,
- .trigger = q6asm_dai_trigger,
- .pointer = q6asm_dai_pointer,
- .mmap = q6asm_dai_mmap,
-};
-
static void compress_event_handler(uint32_t opcode, uint32_t token,
uint32_t *payload, void *priv)
{
@@ -635,8 +626,14 @@ static int q6asm_dai_compr_set_params(struct snd_compr_stream *stream,
struct snd_soc_component *c = snd_soc_rtdcom_lookup(rtd, DRV_NAME);
int dir = stream->direction;
struct q6asm_dai_data *pdata;
+ struct q6asm_flac_cfg flac_cfg;
struct device *dev = c->dev;
int ret;
+ union snd_codec_options *codec_options;
+ struct snd_dec_flac *flac;
+
+ codec_options = &(prtd->codec_param.codec.options);
+
memcpy(&prtd->codec_param, params, sizeof(*params));
@@ -673,6 +670,32 @@ static int q6asm_dai_compr_set_params(struct snd_compr_stream *stream,
return ret;
}
+ switch (params->codec.id) {
+ case SND_AUDIOCODEC_FLAC:
+
+ memset(&flac_cfg, 0x0, sizeof(struct q6asm_flac_cfg));
+ flac = &codec_options->flac_d;
+
+ flac_cfg.ch_cfg = params->codec.ch_in;
+ flac_cfg.sample_rate = params->codec.sample_rate;
+ flac_cfg.stream_info_present = 1;
+ flac_cfg.sample_size = flac->sample_size;
+ flac_cfg.min_blk_size = flac->min_blk_size;
+ flac_cfg.max_blk_size = flac->max_blk_size;
+ flac_cfg.max_frame_size = flac->max_frame_size;
+ flac_cfg.min_frame_size = flac->min_frame_size;
+
+ ret = q6asm_stream_media_format_block_flac(prtd->audio_client,
+ &flac_cfg);
+ if (ret < 0) {
+ dev_err(dev, "FLAC CMD Format block failed:%d\n", ret);
+ return -EIO;
+ }
+ break;
+ default:
+ break;
+ }
+
ret = q6asm_map_memory_regions(dir, prtd->audio_client, prtd->phys,
(prtd->pcm_size / prtd->periods),
prtd->periods);
@@ -768,8 +791,9 @@ static int q6asm_dai_compr_get_caps(struct snd_compr_stream *stream,
caps->max_fragment_size = COMPR_PLAYBACK_MAX_FRAGMENT_SIZE;
caps->min_fragments = COMPR_PLAYBACK_MIN_NUM_FRAGMENTS;
caps->max_fragments = COMPR_PLAYBACK_MAX_NUM_FRAGMENTS;
- caps->num_codecs = 1;
+ caps->num_codecs = 2;
caps->codecs[0] = SND_AUDIOCODEC_MP3;
+ caps->codecs[1] = SND_AUDIOCODEC_FLAC;
return 0;
}
@@ -800,15 +824,15 @@ static struct snd_compr_ops q6asm_dai_compr_ops = {
.ack = q6asm_dai_compr_ack,
};
-static int q6asm_dai_pcm_new(struct snd_soc_pcm_runtime *rtd)
+static int q6asm_dai_pcm_new(struct snd_soc_component *component,
+ struct snd_soc_pcm_runtime *rtd)
{
struct snd_pcm_substream *psubstream, *csubstream;
- struct snd_soc_component *c = snd_soc_rtdcom_lookup(rtd, DRV_NAME);
struct snd_pcm *pcm = rtd->pcm;
struct device *dev;
int size, ret;
- dev = c->dev;
+ dev = component->dev;
size = q6asm_dai_hardware_playback.buffer_bytes_max;
psubstream = pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream;
if (psubstream) {
@@ -835,7 +859,8 @@ static int q6asm_dai_pcm_new(struct snd_soc_pcm_runtime *rtd)
return 0;
}
-static void q6asm_dai_pcm_free(struct snd_pcm *pcm)
+static void q6asm_dai_pcm_free(struct snd_soc_component *component,
+ struct snd_pcm *pcm)
{
struct snd_pcm_substream *substream;
int i;
@@ -852,9 +877,16 @@ static void q6asm_dai_pcm_free(struct snd_pcm *pcm)
static const struct snd_soc_component_driver q6asm_fe_dai_component = {
.name = DRV_NAME,
- .ops = &q6asm_dai_ops,
- .pcm_new = q6asm_dai_pcm_new,
- .pcm_free = q6asm_dai_pcm_free,
+ .open = q6asm_dai_open,
+ .hw_params = q6asm_dai_hw_params,
+ .close = q6asm_dai_close,
+ .ioctl = snd_soc_pcm_lib_ioctl,
+ .prepare = q6asm_dai_prepare,
+ .trigger = q6asm_dai_trigger,
+ .pointer = q6asm_dai_pointer,
+ .mmap = q6asm_dai_mmap,
+ .pcm_construct = q6asm_dai_pcm_new,
+ .pcm_destruct = q6asm_dai_pcm_free,
.compr_ops = &q6asm_dai_compr_ops,
};
diff --git a/sound/soc/qcom/qdsp6/q6asm.c b/sound/soc/qcom/qdsp6/q6asm.c
index e8141a33a55e..36e0eab13a98 100644
--- a/sound/soc/qcom/qdsp6/q6asm.c
+++ b/sound/soc/qcom/qdsp6/q6asm.c
@@ -38,6 +38,7 @@
#define ASM_SESSION_CMD_RUN_V2 0x00010DAA
#define ASM_MEDIA_FMT_MULTI_CHANNEL_PCM_V2 0x00010DA5
#define ASM_MEDIA_FMT_MP3 0x00010BE9
+#define ASM_MEDIA_FMT_FLAC 0x00010C16
#define ASM_DATA_CMD_WRITE_V2 0x00010DAB
#define ASM_DATA_CMD_READ_V2 0x00010DAC
#define ASM_SESSION_CMD_SUSPEND 0x00010DEC
@@ -89,6 +90,20 @@ struct asm_multi_channel_pcm_fmt_blk_v2 {
u8 channel_mapping[PCM_MAX_NUM_CHANNEL];
} __packed;
+struct asm_flac_fmt_blk_v2 {
+ struct asm_data_cmd_media_fmt_update_v2 fmt_blk;
+ u16 is_stream_info_present;
+ u16 num_channels;
+ u16 min_blk_size;
+ u16 max_blk_size;
+ u16 md5_sum[8];
+ u32 sample_rate;
+ u32 min_frame_size;
+ u32 max_frame_size;
+ u16 sample_size;
+ u16 reserved;
+} __packed;
+
struct asm_stream_cmd_set_encdec_param {
u32 param_id;
u32 param_size;
@@ -876,6 +891,9 @@ int q6asm_open_write(struct audio_client *ac, uint32_t format,
case FORMAT_LINEAR_PCM:
open->dec_fmt_id = ASM_MEDIA_FMT_MULTI_CHANNEL_PCM_V2;
break;
+ case SND_AUDIOCODEC_FLAC:
+ open->dec_fmt_id = ASM_MEDIA_FMT_FLAC;
+ break;
default:
dev_err(ac->dev, "Invalid format 0x%x\n", format);
rc = -EINVAL;
@@ -1021,6 +1039,42 @@ err:
}
EXPORT_SYMBOL_GPL(q6asm_media_format_block_multi_ch_pcm);
+
+int q6asm_stream_media_format_block_flac(struct audio_client *ac,
+ struct q6asm_flac_cfg *cfg)
+{
+ struct asm_flac_fmt_blk_v2 *fmt;
+ struct apr_pkt *pkt;
+ void *p;
+ int rc, pkt_size;
+
+ pkt_size = APR_HDR_SIZE + sizeof(*fmt);
+ p = kzalloc(pkt_size, GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
+ pkt = p;
+ fmt = p + APR_HDR_SIZE;
+
+ q6asm_add_hdr(ac, &pkt->hdr, pkt_size, true, ac->stream_id);
+
+ pkt->hdr.opcode = ASM_DATA_CMD_MEDIA_FMT_UPDATE_V2;
+ fmt->fmt_blk.fmt_blk_size = sizeof(*fmt) - sizeof(fmt->fmt_blk);
+ fmt->is_stream_info_present = cfg->stream_info_present;
+ fmt->num_channels = cfg->ch_cfg;
+ fmt->min_blk_size = cfg->min_blk_size;
+ fmt->max_blk_size = cfg->max_blk_size;
+ fmt->sample_rate = cfg->sample_rate;
+ fmt->min_frame_size = cfg->min_frame_size;
+ fmt->max_frame_size = cfg->max_frame_size;
+ fmt->sample_size = cfg->sample_size;
+
+ rc = q6asm_ac_send_cmd_sync(ac, pkt);
+ kfree(pkt);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(q6asm_stream_media_format_block_flac);
/**
* q6asm_enc_cfg_blk_pcm_format_support() - setup pcm configuration for capture
*
@@ -1075,6 +1129,7 @@ err:
}
EXPORT_SYMBOL_GPL(q6asm_enc_cfg_blk_pcm_format_support);
+
/**
* q6asm_read() - read data of period size from audio client
*
diff --git a/sound/soc/qcom/qdsp6/q6asm.h b/sound/soc/qcom/qdsp6/q6asm.h
index 9f5fb573e4a0..6764f55f7078 100644
--- a/sound/soc/qcom/qdsp6/q6asm.h
+++ b/sound/soc/qcom/qdsp6/q6asm.h
@@ -32,6 +32,19 @@ enum {
#define NO_TIMESTAMP 0xFF00
#define FORMAT_LINEAR_PCM 0x0000
+struct q6asm_flac_cfg {
+ u32 sample_rate;
+ u32 ext_sample_rate;
+ u32 min_frame_size;
+ u32 max_frame_size;
+ u16 stream_info_present;
+ u16 min_blk_size;
+ u16 max_blk_size;
+ u16 ch_cfg;
+ u16 sample_size;
+ u16 md5_sum;
+};
+
typedef void (*q6asm_cb) (uint32_t opcode, uint32_t token,
void *payload, void *priv);
struct audio_client;
@@ -54,6 +67,8 @@ int q6asm_media_format_block_multi_ch_pcm(struct audio_client *ac,
uint32_t rate, uint32_t channels,
u8 channel_map[PCM_MAX_NUM_CHANNEL],
uint16_t bits_per_sample);
+int q6asm_stream_media_format_block_flac(struct audio_client *ac,
+ struct q6asm_flac_cfg *cfg);
int q6asm_run(struct audio_client *ac, uint32_t flags, uint32_t msw_ts,
uint32_t lsw_ts);
int q6asm_run_nowait(struct audio_client *ac, uint32_t flags, uint32_t msw_ts,
diff --git a/sound/soc/qcom/qdsp6/q6routing.c b/sound/soc/qcom/qdsp6/q6routing.c
index ddcd9978cf57..20724102e85a 100644
--- a/sound/soc/qcom/qdsp6/q6routing.c
+++ b/sound/soc/qcom/qdsp6/q6routing.c
@@ -939,12 +939,12 @@ static const struct snd_soc_dapm_route intercon[] = {
};
-static int routing_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *params)
+static int routing_hw_params(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_component *c = snd_soc_rtdcom_lookup(rtd, DRV_NAME);
- struct msm_routing_data *data = dev_get_drvdata(c->dev);
+ struct msm_routing_data *data = dev_get_drvdata(component->dev);
unsigned int be_id = rtd->cpu_dai->id;
struct session_data *session;
int path_type;
@@ -980,10 +980,6 @@ static int routing_hw_params(struct snd_pcm_substream *substream,
return 0;
}
-static struct snd_pcm_ops q6pcm_routing_ops = {
- .hw_params = routing_hw_params,
-};
-
static int msm_routing_probe(struct snd_soc_component *c)
{
int i;
@@ -997,9 +993,9 @@ static int msm_routing_probe(struct snd_soc_component *c)
}
static const struct snd_soc_component_driver msm_soc_routing_component = {
- .ops = &q6pcm_routing_ops,
.probe = msm_routing_probe,
.name = DRV_NAME,
+ .hw_params = routing_hw_params,
.dapm_widgets = msm_qdsp6_widgets,
.num_dapm_widgets = ARRAY_SIZE(msm_qdsp6_widgets),
.dapm_routes = intercon,
diff --git a/sound/soc/rockchip/Kconfig b/sound/soc/rockchip/Kconfig
index b43657e6e655..d610b553ea3b 100644
--- a/sound/soc/rockchip/Kconfig
+++ b/sound/soc/rockchip/Kconfig
@@ -40,9 +40,10 @@ config SND_SOC_ROCKCHIP_MAX98090
select SND_SOC_ROCKCHIP_I2S
select SND_SOC_MAX98090
select SND_SOC_TS3A227E
+ select SND_SOC_HDMI_CODEC
help
Say Y or M here if you want to add support for SoC audio on Rockchip
- boards using the MAX98090 codec, such as Veyron.
+ boards using the MAX98090 codec and HDMI codec, such as Veyron.
config SND_SOC_ROCKCHIP_RT5645
tristate "ASoC support for Rockchip boards using a RT5645/RT5650 codec"
diff --git a/sound/soc/rockchip/rockchip_max98090.c b/sound/soc/rockchip/rockchip_max98090.c
index e80b09143b63..60930fa85aa4 100644
--- a/sound/soc/rockchip/rockchip_max98090.c
+++ b/sound/soc/rockchip/rockchip_max98090.c
@@ -6,11 +6,13 @@
*/
#include <linux/module.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/gpio.h>
#include <linux/of_gpio.h>
#include <sound/core.h>
+#include <sound/hdmi-codec.h>
#include <sound/jack.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
@@ -36,28 +38,73 @@ static struct snd_soc_jack_pin headset_jack_pins[] = {
};
-static const struct snd_soc_dapm_widget rk_dapm_widgets[] = {
- SND_SOC_DAPM_HP("Headphone", NULL),
- SND_SOC_DAPM_MIC("Headset Mic", NULL),
- SND_SOC_DAPM_MIC("Int Mic", NULL),
- SND_SOC_DAPM_SPK("Speaker", NULL),
+#define RK_MAX98090_WIDGETS \
+ SND_SOC_DAPM_HP("Headphone", NULL), \
+ SND_SOC_DAPM_MIC("Headset Mic", NULL), \
+ SND_SOC_DAPM_MIC("Int Mic", NULL), \
+ SND_SOC_DAPM_SPK("Speaker", NULL)
+
+#define RK_HDMI_WIDGETS \
+ SND_SOC_DAPM_LINE("HDMI", NULL)
+
+static const struct snd_soc_dapm_widget rk_max98090_dapm_widgets[] = {
+ RK_MAX98090_WIDGETS,
+};
+
+static const struct snd_soc_dapm_widget rk_hdmi_dapm_widgets[] = {
+ RK_HDMI_WIDGETS,
+};
+
+static const struct snd_soc_dapm_widget rk_max98090_hdmi_dapm_widgets[] = {
+ RK_MAX98090_WIDGETS,
+ RK_HDMI_WIDGETS,
+};
+
+#define RK_MAX98090_AUDIO_MAP \
+ {"IN34", NULL, "Headset Mic"}, \
+ {"Headset Mic", NULL, "MICBIAS"}, \
+ {"DMICL", NULL, "Int Mic"}, \
+ {"Headphone", NULL, "HPL"}, \
+ {"Headphone", NULL, "HPR"}, \
+ {"Speaker", NULL, "SPKL"}, \
+ {"Speaker", NULL, "SPKR"}
+
+#define RK_HDMI_AUDIO_MAP \
+ {"HDMI", NULL, "TX"}
+
+static const struct snd_soc_dapm_route rk_max98090_audio_map[] = {
+ RK_MAX98090_AUDIO_MAP,
+};
+
+static const struct snd_soc_dapm_route rk_hdmi_audio_map[] = {
+ RK_HDMI_AUDIO_MAP,
+};
+
+static const struct snd_soc_dapm_route rk_max98090_hdmi_audio_map[] = {
+ RK_MAX98090_AUDIO_MAP,
+ RK_HDMI_AUDIO_MAP,
+};
+
+#define RK_MAX98090_CONTROLS \
+ SOC_DAPM_PIN_SWITCH("Headphone"), \
+ SOC_DAPM_PIN_SWITCH("Headset Mic"), \
+ SOC_DAPM_PIN_SWITCH("Int Mic"), \
+ SOC_DAPM_PIN_SWITCH("Speaker")
+
+#define RK_HDMI_CONTROLS \
+ SOC_DAPM_PIN_SWITCH("HDMI")
+
+static const struct snd_kcontrol_new rk_max98090_controls[] = {
+ RK_MAX98090_CONTROLS,
};
-static const struct snd_soc_dapm_route rk_audio_map[] = {
- {"IN34", NULL, "Headset Mic"},
- {"Headset Mic", NULL, "MICBIAS"},
- {"DMICL", NULL, "Int Mic"},
- {"Headphone", NULL, "HPL"},
- {"Headphone", NULL, "HPR"},
- {"Speaker", NULL, "SPKL"},
- {"Speaker", NULL, "SPKR"},
+static const struct snd_kcontrol_new rk_hdmi_controls[] = {
+ RK_HDMI_CONTROLS,
};
-static const struct snd_kcontrol_new rk_mc_controls[] = {
- SOC_DAPM_PIN_SWITCH("Headphone"),
- SOC_DAPM_PIN_SWITCH("Headset Mic"),
- SOC_DAPM_PIN_SWITCH("Int Mic"),
- SOC_DAPM_PIN_SWITCH("Speaker"),
+static const struct snd_kcontrol_new rk_max98090_hdmi_controls[] = {
+ RK_MAX98090_CONTROLS,
+ RK_HDMI_CONTROLS,
};
static int rk_jack_event(struct notifier_block *nb, unsigned long event,
@@ -125,15 +172,20 @@ static int rk_aif1_hw_params(struct snd_pcm_substream *substream,
ret = snd_soc_dai_set_sysclk(cpu_dai, 0, mclk,
SND_SOC_CLOCK_OUT);
- if (ret < 0) {
- dev_err(codec_dai->dev, "Can't set codec clock %d\n", ret);
+ if (ret) {
+ dev_err(cpu_dai->dev, "Can't set cpu dai clock %d\n", ret);
return ret;
}
ret = snd_soc_dai_set_sysclk(codec_dai, 0, mclk,
SND_SOC_CLOCK_IN);
- if (ret < 0) {
- dev_err(codec_dai->dev, "Can't set codec clock %d\n", ret);
+
+ /* HDMI codec dai does not need to set sysclk. */
+ if (!strcmp(rtd->dai_link->name, "HDMI"))
+ return 0;
+
+ if (ret) {
+ dev_err(codec_dai->dev, "Can't set codec dai clock %d\n", ret);
return ret;
}
@@ -155,20 +207,88 @@ static const struct snd_soc_ops rk_aif1_ops = {
.startup = rk_aif1_startup,
};
-SND_SOC_DAILINK_DEFS(hifi,
- DAILINK_COMP_ARRAY(COMP_EMPTY()),
- DAILINK_COMP_ARRAY(COMP_CODEC(NULL, "HiFi")),
- DAILINK_COMP_ARRAY(COMP_EMPTY()));
-
-static struct snd_soc_dai_link rk_dailink = {
- .name = "max98090",
- .stream_name = "Audio",
- .init = rk_init,
- .ops = &rk_aif1_ops,
- /* set max98090 as slave */
- .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
- SND_SOC_DAIFMT_CBS_CFS,
- SND_SOC_DAILINK_REG(hifi),
+SND_SOC_DAILINK_DEFS(analog,
+ DAILINK_COMP_ARRAY(COMP_EMPTY()),
+ DAILINK_COMP_ARRAY(COMP_CODEC(NULL, "HiFi")),
+ DAILINK_COMP_ARRAY(COMP_EMPTY()));
+
+SND_SOC_DAILINK_DEFS(hdmi,
+ DAILINK_COMP_ARRAY(COMP_EMPTY()),
+ DAILINK_COMP_ARRAY(COMP_CODEC(NULL, "i2s-hifi")),
+ DAILINK_COMP_ARRAY(COMP_EMPTY()));
+
+enum {
+ DAILINK_MAX98090,
+ DAILINK_HDMI,
+};
+
+static struct snd_soc_jack rk_hdmi_jack;
+
+static int rk_hdmi_init(struct snd_soc_pcm_runtime *runtime)
+{
+ struct snd_soc_card *card = runtime->card;
+ struct snd_soc_component *component = runtime->codec_dai->component;
+ int ret;
+
+ /* enable jack detection */
+ ret = snd_soc_card_jack_new(card, "HDMI Jack", SND_JACK_LINEOUT,
+ &rk_hdmi_jack, NULL, 0);
+ if (ret) {
+ dev_err(card->dev, "Can't new HDMI Jack %d\n", ret);
+ return ret;
+ }
+
+ return hdmi_codec_set_jack_detect(component, &rk_hdmi_jack);
+}
+
+/* max98090 dai_link */
+static struct snd_soc_dai_link rk_max98090_dailinks[] = {
+ {
+ .name = "max98090",
+ .stream_name = "Analog",
+ .init = rk_init,
+ .ops = &rk_aif1_ops,
+ /* set max98090 as slave */
+ .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
+ SND_SOC_DAIFMT_CBS_CFS,
+ SND_SOC_DAILINK_REG(analog),
+ },
+};
+
+/* HDMI codec dai_link */
+static struct snd_soc_dai_link rk_hdmi_dailinks[] = {
+ {
+ .name = "HDMI",
+ .stream_name = "HDMI",
+ .init = rk_hdmi_init,
+ .ops = &rk_aif1_ops,
+ .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
+ SND_SOC_DAIFMT_CBS_CFS,
+ SND_SOC_DAILINK_REG(hdmi),
+ }
+};
+
+/* max98090 and HDMI codec dai_link */
+static struct snd_soc_dai_link rk_max98090_hdmi_dailinks[] = {
+ [DAILINK_MAX98090] = {
+ .name = "max98090",
+ .stream_name = "Analog",
+ .init = rk_init,
+ .ops = &rk_aif1_ops,
+ /* set max98090 as slave */
+ .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
+ SND_SOC_DAIFMT_CBS_CFS,
+ SND_SOC_DAILINK_REG(analog),
+ },
+ [DAILINK_HDMI] = {
+ .name = "HDMI",
+ .stream_name = "HDMI",
+ .init = rk_hdmi_init,
+ .ops = &rk_aif1_ops,
+ .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
+ SND_SOC_DAIFMT_CBS_CFS,
+ SND_SOC_DAILINK_REG(hdmi),
+ }
};
static int rk_98090_headset_init(struct snd_soc_component *component);
@@ -178,19 +298,47 @@ static struct snd_soc_aux_dev rk_98090_headset_dev = {
.init = rk_98090_headset_init,
};
-static struct snd_soc_card snd_soc_card_rk = {
+static struct snd_soc_card rockchip_max98090_card = {
.name = "ROCKCHIP-I2S",
.owner = THIS_MODULE,
- .dai_link = &rk_dailink,
- .num_links = 1,
+ .dai_link = rk_max98090_dailinks,
+ .num_links = ARRAY_SIZE(rk_max98090_dailinks),
+ .aux_dev = &rk_98090_headset_dev,
+ .num_aux_devs = 1,
+ .dapm_widgets = rk_max98090_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(rk_max98090_dapm_widgets),
+ .dapm_routes = rk_max98090_audio_map,
+ .num_dapm_routes = ARRAY_SIZE(rk_max98090_audio_map),
+ .controls = rk_max98090_controls,
+ .num_controls = ARRAY_SIZE(rk_max98090_controls),
+};
+
+static struct snd_soc_card rockchip_hdmi_card = {
+ .name = "ROCKCHIP-HDMI",
+ .owner = THIS_MODULE,
+ .dai_link = rk_hdmi_dailinks,
+ .num_links = ARRAY_SIZE(rk_hdmi_dailinks),
+ .dapm_widgets = rk_hdmi_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(rk_hdmi_dapm_widgets),
+ .dapm_routes = rk_hdmi_audio_map,
+ .num_dapm_routes = ARRAY_SIZE(rk_hdmi_audio_map),
+ .controls = rk_hdmi_controls,
+ .num_controls = ARRAY_SIZE(rk_hdmi_controls),
+};
+
+static struct snd_soc_card rockchip_max98090_hdmi_card = {
+ .name = "ROCKCHIP-MAX98090-HDMI",
+ .owner = THIS_MODULE,
+ .dai_link = rk_max98090_hdmi_dailinks,
+ .num_links = ARRAY_SIZE(rk_max98090_hdmi_dailinks),
.aux_dev = &rk_98090_headset_dev,
.num_aux_devs = 1,
- .dapm_widgets = rk_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(rk_dapm_widgets),
- .dapm_routes = rk_audio_map,
- .num_dapm_routes = ARRAY_SIZE(rk_audio_map),
- .controls = rk_mc_controls,
- .num_controls = ARRAY_SIZE(rk_mc_controls),
+ .dapm_widgets = rk_max98090_hdmi_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(rk_max98090_hdmi_dapm_widgets),
+ .dapm_routes = rk_max98090_hdmi_audio_map,
+ .num_dapm_routes = ARRAY_SIZE(rk_max98090_hdmi_audio_map),
+ .controls = rk_max98090_hdmi_controls,
+ .num_controls = ARRAY_SIZE(rk_max98090_hdmi_controls),
};
static int rk_98090_headset_init(struct snd_soc_component *component)
@@ -198,7 +346,7 @@ static int rk_98090_headset_init(struct snd_soc_component *component)
int ret;
/* Enable Headset and 4 Buttons Jack detection */
- ret = snd_soc_card_jack_new(&snd_soc_card_rk, "Headset Jack",
+ ret = snd_soc_card_jack_new(component->card, "Headset Jack",
SND_JACK_HEADSET |
SND_JACK_BTN_0 | SND_JACK_BTN_1 |
SND_JACK_BTN_2 | SND_JACK_BTN_3,
@@ -213,41 +361,75 @@ static int rk_98090_headset_init(struct snd_soc_component *component)
return ret;
}
+static int rk_parse_headset_from_of(struct device *dev, struct device_node *np)
+{
+ rk_98090_headset_dev.dlc.of_node = of_parse_phandle(
+ np, "rockchip,headset-codec", 0);
+ if (!rk_98090_headset_dev.dlc.of_node) {
+ dev_err(dev,
+ "Property 'rockchip,headset-codec' missing/invalid\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
static int snd_rk_mc_probe(struct platform_device *pdev)
{
int ret = 0;
- struct snd_soc_card *card = &snd_soc_card_rk;
+ struct snd_soc_card *card;
+ struct device *dev = &pdev->dev;
struct device_node *np = pdev->dev.of_node;
+ struct device_node *np_cpu;
+ struct device_node *np_audio, *np_hdmi;
- /* register the soc card */
- card->dev = &pdev->dev;
+ /* Parse DTS for I2S controller. */
+ np_cpu = of_parse_phandle(np, "rockchip,i2s-controller", 0);
- rk_dailink.codecs->of_node = of_parse_phandle(np,
- "rockchip,audio-codec", 0);
- if (!rk_dailink.codecs->of_node) {
+ if (!np_cpu) {
dev_err(&pdev->dev,
- "Property 'rockchip,audio-codec' missing or invalid\n");
+ "Property 'rockchip,i2s-controller missing or invalid\n");
return -EINVAL;
}
- rk_dailink.cpus->of_node = of_parse_phandle(np,
- "rockchip,i2s-controller", 0);
- if (!rk_dailink.cpus->of_node) {
- dev_err(&pdev->dev,
- "Property 'rockchip,i2s-controller' missing or invalid\n");
+ /*
+ * Find the card to use based on the presences of audio codec
+ * and hdmi codec in device property. Set their of_node accordingly.
+ */
+ np_audio = of_parse_phandle(np, "rockchip,audio-codec", 0);
+ np_hdmi = of_parse_phandle(np, "rockchip,hdmi-codec", 0);
+ if (np_audio && np_hdmi) {
+ card = &rockchip_max98090_hdmi_card;
+ card->dai_link[DAILINK_MAX98090].codecs->of_node = np_audio;
+ card->dai_link[DAILINK_HDMI].codecs->of_node = np_hdmi;
+ card->dai_link[DAILINK_MAX98090].cpus->of_node = np_cpu;
+ card->dai_link[DAILINK_MAX98090].platforms->of_node = np_cpu;
+ card->dai_link[DAILINK_HDMI].cpus->of_node = np_cpu;
+ card->dai_link[DAILINK_HDMI].platforms->of_node = np_cpu;
+ } else if (np_audio) {
+ card = &rockchip_max98090_card;
+ card->dai_link[0].codecs->of_node = np_audio;
+ card->dai_link[0].cpus->of_node = np_cpu;
+ card->dai_link[0].platforms->of_node = np_cpu;
+ } else if (np_hdmi) {
+ card = &rockchip_hdmi_card;
+ card->dai_link[0].codecs->of_node = np_hdmi;
+ card->dai_link[0].cpus->of_node = np_cpu;
+ card->dai_link[0].platforms->of_node = np_cpu;
+ } else {
+ dev_err(dev, "At least one of codecs should be specified\n");
return -EINVAL;
}
- rk_dailink.platforms->of_node = rk_dailink.cpus->of_node;
+ card->dev = dev;
- rk_98090_headset_dev.dlc.of_node = of_parse_phandle(np,
- "rockchip,headset-codec", 0);
- if (!rk_98090_headset_dev.dlc.of_node) {
- dev_err(&pdev->dev,
- "Property 'rockchip,headset-codec' missing/invalid\n");
- return -EINVAL;
+ /* Parse headset detection codec. */
+ if (np_audio) {
+ ret = rk_parse_headset_from_of(dev, np);
+ if (ret)
+ return ret;
}
+ /* Parse card name. */
ret = snd_soc_of_parse_card_name(card, "rockchip,model");
if (ret) {
dev_err(&pdev->dev,
@@ -255,6 +437,7 @@ static int snd_rk_mc_probe(struct platform_device *pdev)
return ret;
}
+ /* register the soc card */
ret = devm_snd_soc_register_card(&pdev->dev, card);
if (ret) {
dev_err(&pdev->dev,
diff --git a/sound/soc/samsung/Kconfig b/sound/soc/samsung/Kconfig
index 638983123d8f..1a0b163ca47b 100644
--- a/sound/soc/samsung/Kconfig
+++ b/sound/soc/samsung/Kconfig
@@ -194,11 +194,13 @@ config SND_SOC_ODROID
help
Say Y here to enable audio support for the Odroid XU3/XU4.
-config SND_SOC_ARNDALE_RT5631_ALC5631
- tristate "Audio support for RT5631(ALC5631) on Arndale Board"
- depends on I2C
- select SND_SAMSUNG_I2S
- select SND_SOC_RT5631
+config SND_SOC_ARNDALE
+ tristate "Audio support for Arndale Board"
+ depends on I2C
+ select SND_SAMSUNG_I2S
+ select SND_SOC_RT5631
+ select MFD_WM8994
+ select SND_SOC_WM8994
config SND_SOC_SAMSUNG_TM2_WM5110
tristate "SoC I2S Audio support for WM5110 on TM2 board"
diff --git a/sound/soc/samsung/Makefile b/sound/soc/samsung/Makefile
index c3b76035f69c..8f5dfe20b9f1 100644
--- a/sound/soc/samsung/Makefile
+++ b/sound/soc/samsung/Makefile
@@ -39,7 +39,7 @@ snd-soc-lowland-objs := lowland.o
snd-soc-littlemill-objs := littlemill.o
snd-soc-bells-objs := bells.o
snd-soc-odroid-objs := odroid.o
-snd-soc-arndale-rt5631-objs := arndale_rt5631.o
+snd-soc-arndale-objs := arndale.o
snd-soc-tm2-wm5110-objs := tm2_wm5110.o
obj-$(CONFIG_SND_SOC_SAMSUNG_JIVE_WM8750) += snd-soc-jive-wm8750.o
@@ -62,5 +62,5 @@ obj-$(CONFIG_SND_SOC_LOWLAND) += snd-soc-lowland.o
obj-$(CONFIG_SND_SOC_LITTLEMILL) += snd-soc-littlemill.o
obj-$(CONFIG_SND_SOC_BELLS) += snd-soc-bells.o
obj-$(CONFIG_SND_SOC_ODROID) += snd-soc-odroid.o
-obj-$(CONFIG_SND_SOC_ARNDALE_RT5631_ALC5631) += snd-soc-arndale-rt5631.o
+obj-$(CONFIG_SND_SOC_ARNDALE) += snd-soc-arndale.o
obj-$(CONFIG_SND_SOC_SAMSUNG_TM2_WM5110) += snd-soc-tm2-wm5110.o
diff --git a/sound/soc/samsung/arndale.c b/sound/soc/samsung/arndale.c
new file mode 100644
index 000000000000..d64602950cbd
--- /dev/null
+++ b/sound/soc/samsung/arndale.c
@@ -0,0 +1,217 @@
+// SPDX-License-Identifier: GPL-2.0+
+//
+// Copyright (c) 2014, Insignal Co., Ltd.
+//
+// Author: Claude <claude@insginal.co.kr>
+
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+
+#include "../codecs/wm8994.h"
+#include "i2s.h"
+
+static int arndale_rt5631_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ int rfs, ret;
+ unsigned long rclk;
+
+ rfs = 256;
+
+ rclk = params_rate(params) * rfs;
+
+ ret = snd_soc_dai_set_sysclk(cpu_dai, SAMSUNG_I2S_CDCLK,
+ 0, SND_SOC_CLOCK_OUT);
+ if (ret < 0)
+ return ret;
+
+ ret = snd_soc_dai_set_sysclk(cpu_dai, SAMSUNG_I2S_RCLKSRC_0,
+ 0, SND_SOC_CLOCK_OUT);
+
+ if (ret < 0)
+ return ret;
+
+ ret = snd_soc_dai_set_sysclk(codec_dai, 0, rclk, SND_SOC_CLOCK_OUT);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static struct snd_soc_ops arndale_rt5631_ops = {
+ .hw_params = arndale_rt5631_hw_params,
+};
+
+static int arndale_wm1811_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ unsigned int rfs, rclk;
+
+ /* Ensure AIF1CLK is >= 3 MHz for optimal performance */
+ if (params_width(params) == 24)
+ rfs = 384;
+ else if (params_rate(params) == 8000 || params_rate(params) == 11025)
+ rfs = 512;
+ else
+ rfs = 256;
+
+ rclk = params_rate(params) * rfs;
+
+ /*
+ * We add 1 to the frequency value to ensure proper EPLL setting
+ * for each audio sampling rate (see epll_24mhz_tbl in drivers/clk/
+ * samsung/clk-exynos5250.c for list of available EPLL rates).
+ * The CODEC uses clk API and the value will be rounded hence the MCLK1
+ * clock's frequency will still be exact multiple of the sample rate.
+ */
+ return snd_soc_dai_set_sysclk(codec_dai, WM8994_SYSCLK_MCLK1,
+ rclk + 1, SND_SOC_CLOCK_IN);
+}
+
+static struct snd_soc_ops arndale_wm1811_ops = {
+ .hw_params = arndale_wm1811_hw_params,
+};
+
+SND_SOC_DAILINK_DEFS(rt5631_hifi,
+ DAILINK_COMP_ARRAY(COMP_EMPTY()),
+ DAILINK_COMP_ARRAY(COMP_CODEC(NULL, "rt5631-aif1")),
+ DAILINK_COMP_ARRAY(COMP_EMPTY()));
+
+static struct snd_soc_dai_link arndale_rt5631_dai[] = {
+ {
+ .name = "RT5631 HiFi",
+ .stream_name = "Primary",
+ .dai_fmt = SND_SOC_DAIFMT_I2S
+ | SND_SOC_DAIFMT_NB_NF
+ | SND_SOC_DAIFMT_CBS_CFS,
+ .ops = &arndale_rt5631_ops,
+ SND_SOC_DAILINK_REG(rt5631_hifi),
+ },
+};
+
+SND_SOC_DAILINK_DEFS(wm1811_hifi,
+ DAILINK_COMP_ARRAY(COMP_EMPTY()),
+ DAILINK_COMP_ARRAY(COMP_CODEC(NULL, "wm8994-aif1")),
+ DAILINK_COMP_ARRAY(COMP_EMPTY()));
+
+static struct snd_soc_dai_link arndale_wm1811_dai[] = {
+ {
+ .name = "WM1811 HiFi",
+ .stream_name = "Primary",
+ .dai_fmt = SND_SOC_DAIFMT_I2S
+ | SND_SOC_DAIFMT_NB_NF
+ | SND_SOC_DAIFMT_CBM_CFM,
+ .ops = &arndale_wm1811_ops,
+ SND_SOC_DAILINK_REG(wm1811_hifi),
+ },
+};
+
+static struct snd_soc_card arndale_rt5631 = {
+ .name = "Arndale RT5631",
+ .owner = THIS_MODULE,
+ .dai_link = arndale_rt5631_dai,
+ .num_links = ARRAY_SIZE(arndale_rt5631_dai),
+};
+
+static struct snd_soc_card arndale_wm1811 = {
+ .name = "Arndale WM1811",
+ .owner = THIS_MODULE,
+ .dai_link = arndale_wm1811_dai,
+ .num_links = ARRAY_SIZE(arndale_wm1811_dai),
+};
+
+static void arndale_put_of_nodes(struct snd_soc_card *card)
+{
+ struct snd_soc_dai_link *dai_link;
+ int i;
+
+ for_each_card_prelinks(card, i, dai_link) {
+ of_node_put(dai_link->cpus->of_node);
+ of_node_put(dai_link->codecs->of_node);
+ }
+}
+
+static int arndale_audio_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct snd_soc_card *card;
+ struct snd_soc_dai_link *dai_link;
+ int ret;
+
+ card = (struct snd_soc_card *)of_device_get_match_data(&pdev->dev);
+ card->dev = &pdev->dev;
+ dai_link = card->dai_link;
+
+ dai_link->cpus->of_node = of_parse_phandle(np, "samsung,audio-cpu", 0);
+ if (!dai_link->cpus->of_node) {
+ dev_err(&pdev->dev,
+ "Property 'samsung,audio-cpu' missing or invalid\n");
+ return -EINVAL;
+ }
+
+ if (!dai_link->platforms->name)
+ dai_link->platforms->of_node = dai_link->cpus->of_node;
+
+ dai_link->codecs->of_node = of_parse_phandle(np, "samsung,audio-codec", 0);
+ if (!dai_link->codecs->of_node) {
+ dev_err(&pdev->dev,
+ "Property 'samsung,audio-codec' missing or invalid\n");
+ ret = -EINVAL;
+ goto err_put_of_nodes;
+ }
+
+ ret = devm_snd_soc_register_card(card->dev, card);
+ if (ret) {
+ dev_err(&pdev->dev, "snd_soc_register_card() failed: %d\n", ret);
+ goto err_put_of_nodes;
+ }
+ return 0;
+
+err_put_of_nodes:
+ arndale_put_of_nodes(card);
+ return ret;
+}
+
+static int arndale_audio_remove(struct platform_device *pdev)
+{
+ struct snd_soc_card *card = platform_get_drvdata(pdev);
+
+ arndale_put_of_nodes(card);
+ return 0;
+}
+
+static const struct of_device_id arndale_audio_of_match[] = {
+ { .compatible = "samsung,arndale-rt5631", .data = &arndale_rt5631 },
+ { .compatible = "samsung,arndale-alc5631", .data = &arndale_rt5631 },
+ { .compatible = "samsung,arndale-wm1811", .data = &arndale_wm1811 },
+ {},
+};
+MODULE_DEVICE_TABLE(of, arndale_audio_of_match);
+
+static struct platform_driver arndale_audio_driver = {
+ .driver = {
+ .name = "arndale-audio",
+ .pm = &snd_soc_pm_ops,
+ .of_match_table = arndale_audio_of_match,
+ },
+ .probe = arndale_audio_probe,
+ .remove = arndale_audio_remove,
+};
+
+module_platform_driver(arndale_audio_driver);
+
+MODULE_AUTHOR("Claude <claude@insignal.co.kr>");
+MODULE_DESCRIPTION("ALSA SoC Driver for Arndale Board");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/samsung/arndale_rt5631.c b/sound/soc/samsung/arndale_rt5631.c
deleted file mode 100644
index fd8c6642fb0d..000000000000
--- a/sound/soc/samsung/arndale_rt5631.c
+++ /dev/null
@@ -1,164 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-//
-// Copyright (c) 2014, Insignal Co., Ltd.
-//
-// Author: Claude <claude@insginal.co.kr>
-
-#include <linux/module.h>
-#include <linux/of_device.h>
-#include <linux/platform_device.h>
-#include <linux/clk.h>
-
-#include <sound/soc.h>
-#include <sound/soc-dapm.h>
-#include <sound/pcm.h>
-#include <sound/pcm_params.h>
-
-#include "i2s.h"
-
-static int arndale_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *params)
-{
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
- int rfs, ret;
- unsigned long rclk;
-
- rfs = 256;
-
- rclk = params_rate(params) * rfs;
-
- ret = snd_soc_dai_set_sysclk(cpu_dai, SAMSUNG_I2S_CDCLK,
- 0, SND_SOC_CLOCK_OUT);
- if (ret < 0)
- return ret;
-
- ret = snd_soc_dai_set_sysclk(cpu_dai, SAMSUNG_I2S_RCLKSRC_0,
- 0, SND_SOC_CLOCK_OUT);
-
- if (ret < 0)
- return ret;
-
- ret = snd_soc_dai_set_sysclk(codec_dai, 0, rclk, SND_SOC_CLOCK_OUT);
- if (ret < 0)
- return ret;
-
- return 0;
-}
-
-static struct snd_soc_ops arndale_ops = {
- .hw_params = arndale_hw_params,
-};
-
-SND_SOC_DAILINK_DEFS(rt5631_hifi,
- DAILINK_COMP_ARRAY(COMP_EMPTY()),
- DAILINK_COMP_ARRAY(COMP_CODEC(NULL, "rt5631-hifi")),
- DAILINK_COMP_ARRAY(COMP_EMPTY()));
-
-static struct snd_soc_dai_link arndale_rt5631_dai[] = {
- {
- .name = "RT5631 HiFi",
- .stream_name = "Primary",
- .dai_fmt = SND_SOC_DAIFMT_I2S
- | SND_SOC_DAIFMT_NB_NF
- | SND_SOC_DAIFMT_CBS_CFS,
- .ops = &arndale_ops,
- SND_SOC_DAILINK_REG(rt5631_hifi),
- },
-};
-
-static struct snd_soc_card arndale_rt5631 = {
- .name = "Arndale RT5631",
- .owner = THIS_MODULE,
- .dai_link = arndale_rt5631_dai,
- .num_links = ARRAY_SIZE(arndale_rt5631_dai),
-};
-
-static void arndale_put_of_nodes(struct snd_soc_card *card)
-{
- struct snd_soc_dai_link *dai_link;
- int i;
-
- for_each_card_prelinks(card, i, dai_link) {
- of_node_put(dai_link->cpus->of_node);
- of_node_put(dai_link->codecs->of_node);
- }
-}
-
-static int arndale_audio_probe(struct platform_device *pdev)
-{
- int n, ret;
- struct device_node *np = pdev->dev.of_node;
- struct snd_soc_card *card = &arndale_rt5631;
-
- card->dev = &pdev->dev;
-
- for (n = 0; np && n < ARRAY_SIZE(arndale_rt5631_dai); n++) {
- if (!arndale_rt5631_dai[n].cpus->dai_name) {
- arndale_rt5631_dai[n].cpus->of_node = of_parse_phandle(np,
- "samsung,audio-cpu", n);
-
- if (!arndale_rt5631_dai[n].cpus->of_node) {
- dev_err(&pdev->dev,
- "Property 'samsung,audio-cpu' missing or invalid\n");
- return -EINVAL;
- }
- }
- if (!arndale_rt5631_dai[n].platforms->name)
- arndale_rt5631_dai[n].platforms->of_node =
- arndale_rt5631_dai[n].cpus->of_node;
-
- arndale_rt5631_dai[n].codecs->name = NULL;
- arndale_rt5631_dai[n].codecs->of_node = of_parse_phandle(np,
- "samsung,audio-codec", n);
- if (!arndale_rt5631_dai[0].codecs->of_node) {
- dev_err(&pdev->dev,
- "Property 'samsung,audio-codec' missing or invalid\n");
- ret = -EINVAL;
- goto err_put_of_nodes;
- }
- }
-
- ret = devm_snd_soc_register_card(card->dev, card);
- if (ret) {
- dev_err(&pdev->dev, "snd_soc_register_card() failed: %d\n", ret);
- goto err_put_of_nodes;
- }
- return 0;
-
-err_put_of_nodes:
- arndale_put_of_nodes(card);
- return ret;
-}
-
-static int arndale_audio_remove(struct platform_device *pdev)
-{
- struct snd_soc_card *card = platform_get_drvdata(pdev);
-
- arndale_put_of_nodes(card);
- return 0;
-}
-
-static const struct of_device_id samsung_arndale_rt5631_of_match[] __maybe_unused = {
- { .compatible = "samsung,arndale-rt5631", },
- { .compatible = "samsung,arndale-alc5631", },
- {},
-};
-MODULE_DEVICE_TABLE(of, samsung_arndale_rt5631_of_match);
-
-static struct platform_driver arndale_audio_driver = {
- .driver = {
- .name = "arndale-audio",
- .pm = &snd_soc_pm_ops,
- .of_match_table = of_match_ptr(samsung_arndale_rt5631_of_match),
- },
- .probe = arndale_audio_probe,
- .remove = arndale_audio_remove,
-};
-
-module_platform_driver(arndale_audio_driver);
-
-MODULE_AUTHOR("Claude <claude@insignal.co.kr>");
-MODULE_DESCRIPTION("ALSA SoC Driver for Arndale Board");
-MODULE_LICENSE("GPL");
diff --git a/sound/soc/samsung/idma.c b/sound/soc/samsung/idma.c
index 65497cd477a5..294dce111b05 100644
--- a/sound/soc/samsung/idma.c
+++ b/sound/soc/samsung/idma.c
@@ -137,8 +137,9 @@ static void idma_done(void *id, int bytes_xfer)
snd_pcm_period_elapsed(substream);
}
-static int idma_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *params)
+static int idma_hw_params(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct idma_ctrl *prtd = substream->runtime->private_data;
@@ -163,14 +164,16 @@ static int idma_hw_params(struct snd_pcm_substream *substream,
return 0;
}
-static int idma_hw_free(struct snd_pcm_substream *substream)
+static int idma_hw_free(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
snd_pcm_set_runtime_buffer(substream, NULL);
return 0;
}
-static int idma_prepare(struct snd_pcm_substream *substream)
+static int idma_prepare(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct idma_ctrl *prtd = substream->runtime->private_data;
@@ -183,7 +186,8 @@ static int idma_prepare(struct snd_pcm_substream *substream)
return 0;
}
-static int idma_trigger(struct snd_pcm_substream *substream, int cmd)
+static int idma_trigger(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream, int cmd)
{
struct idma_ctrl *prtd = substream->runtime->private_data;
int ret = 0;
@@ -216,7 +220,8 @@ static int idma_trigger(struct snd_pcm_substream *substream, int cmd)
}
static snd_pcm_uframes_t
- idma_pointer(struct snd_pcm_substream *substream)
+idma_pointer(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct idma_ctrl *prtd = runtime->private_data;
@@ -233,7 +238,8 @@ static snd_pcm_uframes_t
return bytes_to_frames(substream->runtime, res);
}
-static int idma_mmap(struct snd_pcm_substream *substream,
+static int idma_mmap(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
struct vm_area_struct *vma)
{
struct snd_pcm_runtime *runtime = substream->runtime;
@@ -278,7 +284,8 @@ static irqreturn_t iis_irq(int irqno, void *dev_id)
return IRQ_HANDLED;
}
-static int idma_open(struct snd_pcm_substream *substream)
+static int idma_open(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct idma_ctrl *prtd;
@@ -304,7 +311,8 @@ static int idma_open(struct snd_pcm_substream *substream)
return 0;
}
-static int idma_close(struct snd_pcm_substream *substream)
+static int idma_close(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct idma_ctrl *prtd = runtime->private_data;
@@ -319,19 +327,8 @@ static int idma_close(struct snd_pcm_substream *substream)
return 0;
}
-static const struct snd_pcm_ops idma_ops = {
- .open = idma_open,
- .close = idma_close,
- .ioctl = snd_pcm_lib_ioctl,
- .trigger = idma_trigger,
- .pointer = idma_pointer,
- .mmap = idma_mmap,
- .hw_params = idma_hw_params,
- .hw_free = idma_hw_free,
- .prepare = idma_prepare,
-};
-
-static void idma_free(struct snd_pcm *pcm)
+static void idma_free(struct snd_soc_component *component,
+ struct snd_pcm *pcm)
{
struct snd_pcm_substream *substream;
struct snd_dma_buffer *buf;
@@ -367,7 +364,8 @@ static int preallocate_idma_buffer(struct snd_pcm *pcm, int stream)
return 0;
}
-static int idma_new(struct snd_soc_pcm_runtime *rtd)
+static int idma_new(struct snd_soc_component *component,
+ struct snd_soc_pcm_runtime *rtd)
{
struct snd_card *card = rtd->card->snd_card;
struct snd_pcm *pcm = rtd->pcm;
@@ -394,9 +392,17 @@ void idma_reg_addr_init(void __iomem *regs, dma_addr_t addr)
EXPORT_SYMBOL_GPL(idma_reg_addr_init);
static const struct snd_soc_component_driver asoc_idma_platform = {
- .ops = &idma_ops,
- .pcm_new = idma_new,
- .pcm_free = idma_free,
+ .open = idma_open,
+ .close = idma_close,
+ .ioctl = snd_soc_pcm_lib_ioctl,
+ .trigger = idma_trigger,
+ .pointer = idma_pointer,
+ .mmap = idma_mmap,
+ .hw_params = idma_hw_params,
+ .hw_free = idma_hw_free,
+ .prepare = idma_prepare,
+ .pcm_construct = idma_new,
+ .pcm_destruct = idma_free,
};
static int asoc_idma_platform_probe(struct platform_device *pdev)
diff --git a/sound/soc/sh/dma-sh7760.c b/sound/soc/sh/dma-sh7760.c
index 5aee11c94f2a..2b0eca02a8b9 100644
--- a/sound/soc/sh/dma-sh7760.c
+++ b/sound/soc/sh/dma-sh7760.c
@@ -115,7 +115,8 @@ static void camelot_rxdma(void *data)
snd_pcm_period_elapsed(cam->rx_ss);
}
-static int camelot_pcm_open(struct snd_pcm_substream *substream)
+static int camelot_pcm_open(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct camelot_pcm *cam = &cam_pcm_data[rtd->cpu_dai->id];
@@ -148,7 +149,8 @@ static int camelot_pcm_open(struct snd_pcm_substream *substream)
return 0;
}
-static int camelot_pcm_close(struct snd_pcm_substream *substream)
+static int camelot_pcm_close(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct camelot_pcm *cam = &cam_pcm_data[rtd->cpu_dai->id];
@@ -168,7 +170,8 @@ static int camelot_pcm_close(struct snd_pcm_substream *substream)
return 0;
}
-static int camelot_hw_params(struct snd_pcm_substream *substream,
+static int camelot_hw_params(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *hw_params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
@@ -191,12 +194,14 @@ static int camelot_hw_params(struct snd_pcm_substream *substream,
return 0;
}
-static int camelot_hw_free(struct snd_pcm_substream *substream)
+static int camelot_hw_free(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
return snd_pcm_lib_free_pages(substream);
}
-static int camelot_prepare(struct snd_pcm_substream *substream)
+static int camelot_prepare(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_soc_pcm_runtime *rtd = substream->private_data;
@@ -244,7 +249,8 @@ static inline void dmabrg_rec_dma_stop(struct camelot_pcm *cam)
BRGREG(BRGACR) = acr | ACR_RDS;
}
-static int camelot_trigger(struct snd_pcm_substream *substream, int cmd)
+static int camelot_trigger(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream, int cmd)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct camelot_pcm *cam = &cam_pcm_data[rtd->cpu_dai->id];
@@ -270,7 +276,8 @@ static int camelot_trigger(struct snd_pcm_substream *substream, int cmd)
return 0;
}
-static snd_pcm_uframes_t camelot_pos(struct snd_pcm_substream *substream)
+static snd_pcm_uframes_t camelot_pos(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_soc_pcm_runtime *rtd = substream->private_data;
@@ -292,18 +299,8 @@ static snd_pcm_uframes_t camelot_pos(struct snd_pcm_substream *substream)
return bytes_to_frames(runtime, pos);
}
-static const struct snd_pcm_ops camelot_pcm_ops = {
- .open = camelot_pcm_open,
- .close = camelot_pcm_close,
- .ioctl = snd_pcm_lib_ioctl,
- .hw_params = camelot_hw_params,
- .hw_free = camelot_hw_free,
- .prepare = camelot_prepare,
- .trigger = camelot_trigger,
- .pointer = camelot_pos,
-};
-
-static int camelot_pcm_new(struct snd_soc_pcm_runtime *rtd)
+static int camelot_pcm_new(struct snd_soc_component *component,
+ struct snd_soc_pcm_runtime *rtd)
{
struct snd_pcm *pcm = rtd->pcm;
@@ -312,15 +309,22 @@ static int camelot_pcm_new(struct snd_soc_pcm_runtime *rtd)
*/
snd_pcm_lib_preallocate_pages_for_all(pcm,
SNDRV_DMA_TYPE_CONTINUOUS,
- snd_dma_continuous_data(GFP_KERNEL),
+ NULL,
DMABRG_PREALLOC_BUFFER, DMABRG_PREALLOC_BUFFER_MAX);
return 0;
}
static const struct snd_soc_component_driver sh7760_soc_component = {
- .ops = &camelot_pcm_ops,
- .pcm_new = camelot_pcm_new,
+ .open = camelot_pcm_open,
+ .close = camelot_pcm_close,
+ .ioctl = snd_soc_pcm_lib_ioctl,
+ .hw_params = camelot_hw_params,
+ .hw_free = camelot_hw_free,
+ .prepare = camelot_prepare,
+ .trigger = camelot_trigger,
+ .pointer = camelot_pos,
+ .pcm_construct = camelot_pcm_new,
};
static int sh7760_soc_platform_probe(struct platform_device *pdev)
diff --git a/sound/soc/sh/fsi.c b/sound/soc/sh/fsi.c
index 3447dbdba1f1..e384fdc8d60e 100644
--- a/sound/soc/sh/fsi.c
+++ b/sound/soc/sh/fsi.c
@@ -1718,7 +1718,8 @@ static const struct snd_pcm_hardware fsi_pcm_hardware = {
.fifo_size = 256,
};
-static int fsi_pcm_open(struct snd_pcm_substream *substream)
+static int fsi_pcm_open(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
int ret = 0;
@@ -1731,19 +1732,22 @@ static int fsi_pcm_open(struct snd_pcm_substream *substream)
return ret;
}
-static int fsi_hw_params(struct snd_pcm_substream *substream,
+static int fsi_hw_params(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *hw_params)
{
return snd_pcm_lib_malloc_pages(substream,
params_buffer_bytes(hw_params));
}
-static int fsi_hw_free(struct snd_pcm_substream *substream)
+static int fsi_hw_free(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
return snd_pcm_lib_free_pages(substream);
}
-static snd_pcm_uframes_t fsi_pointer(struct snd_pcm_substream *substream)
+static snd_pcm_uframes_t fsi_pointer(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct fsi_priv *fsi = fsi_get_priv(substream);
struct fsi_stream *io = fsi_stream_get(fsi, substream);
@@ -1751,14 +1755,6 @@ static snd_pcm_uframes_t fsi_pointer(struct snd_pcm_substream *substream)
return fsi_sample2frame(fsi, io->buff_sample_pos);
}
-static const struct snd_pcm_ops fsi_pcm_ops = {
- .open = fsi_pcm_open,
- .ioctl = snd_pcm_lib_ioctl,
- .hw_params = fsi_hw_params,
- .hw_free = fsi_hw_free,
- .pointer = fsi_pointer,
-};
-
/*
* snd_soc_component
*/
@@ -1766,7 +1762,8 @@ static const struct snd_pcm_ops fsi_pcm_ops = {
#define PREALLOC_BUFFER (32 * 1024)
#define PREALLOC_BUFFER_MAX (32 * 1024)
-static int fsi_pcm_new(struct snd_soc_pcm_runtime *rtd)
+static int fsi_pcm_new(struct snd_soc_component *component,
+ struct snd_soc_pcm_runtime *rtd)
{
snd_pcm_lib_preallocate_pages_for_all(
rtd->pcm,
@@ -1817,8 +1814,12 @@ static struct snd_soc_dai_driver fsi_soc_dai[] = {
static const struct snd_soc_component_driver fsi_soc_component = {
.name = "fsi",
- .ops = &fsi_pcm_ops,
- .pcm_new = fsi_pcm_new,
+ .open = fsi_pcm_open,
+ .ioctl = snd_soc_pcm_lib_ioctl,
+ .hw_params = fsi_hw_params,
+ .hw_free = fsi_hw_free,
+ .pointer = fsi_pointer,
+ .pcm_construct = fsi_pcm_new,
};
/*
diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c
index e9596c2096cd..399dc6e9bde5 100644
--- a/sound/soc/sh/rcar/core.c
+++ b/sound/soc/sh/rcar/core.c
@@ -302,7 +302,7 @@ int rsnd_runtime_channel_after_ctu_with_params(struct rsnd_dai_stream *io,
int rsnd_channel_normalization(int chan)
{
- if ((chan > 8) || (chan < 0))
+ if (WARN_ON((chan > 8) || (chan < 0)))
return 0;
/* TDM Extend Mode needs 8ch */
@@ -376,6 +376,17 @@ u32 rsnd_get_adinr_bit(struct rsnd_mod *mod, struct rsnd_dai_stream *io)
*/
u32 rsnd_get_dalign(struct rsnd_mod *mod, struct rsnd_dai_stream *io)
{
+ static const u32 dalign_values[8][2] = {
+ {0x76543210, 0x67452301},
+ {0x00000032, 0x00000023},
+ {0x00007654, 0x00006745},
+ {0x00000076, 0x00000067},
+ {0xfedcba98, 0xefcdab89},
+ {0x000000ba, 0x000000ab},
+ {0x0000fedc, 0x0000efcd},
+ {0x000000fe, 0x000000ef},
+ };
+ int id = 0, inv;
struct rsnd_mod *ssiu = rsnd_io_to_mod_ssiu(io);
struct rsnd_mod *target;
struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
@@ -411,13 +422,18 @@ u32 rsnd_get_dalign(struct rsnd_mod *mod, struct rsnd_dai_stream *io)
target = cmd ? cmd : ssiu;
}
+ if (mod == ssiu)
+ id = rsnd_mod_id_sub(mod);
+
/* Non target mod or non 16bit needs normal DALIGN */
if ((snd_pcm_format_width(runtime->format) != 16) ||
(mod != target))
- return 0x76543210;
+ inv = 0;
/* Target mod needs inverted DALIGN when 16bit */
else
- return 0x67452301;
+ inv = 1;
+
+ return dalign_values[id][inv];
}
u32 rsnd_get_busif_shift(struct rsnd_dai_stream *io, struct rsnd_mod *mod)
@@ -1076,7 +1092,10 @@ static void rsnd_parse_tdm_split_mode(struct rsnd_priv *priv,
j++;
}
+ of_node_put(node);
}
+
+ of_node_put(ssiu_np);
}
static void rsnd_parse_connect_simple(struct rsnd_priv *priv,
@@ -1094,11 +1113,13 @@ static void rsnd_parse_connect_graph(struct rsnd_priv *priv,
struct device_node *endpoint)
{
struct device *dev = rsnd_priv_to_dev(priv);
- struct device_node *remote_node = of_graph_get_remote_port_parent(endpoint);
+ struct device_node *remote_node;
if (!rsnd_io_to_mod_ssi(io))
return;
+ remote_node = of_graph_get_remote_port_parent(endpoint);
+
/* HDMI0 */
if (strstr(remote_node->full_name, "hdmi@fead0000")) {
rsnd_flags_set(io, RSND_STREAM_HDMI0);
@@ -1112,6 +1133,8 @@ static void rsnd_parse_connect_graph(struct rsnd_priv *priv,
}
rsnd_parse_tdm_split_mode(priv, io, endpoint);
+
+ of_node_put(remote_node);
}
void rsnd_parse_connect_common(struct rsnd_dai *rdai,
@@ -1374,8 +1397,9 @@ static int rsnd_dai_probe(struct rsnd_priv *priv)
/*
* pcm ops
*/
-static int rsnd_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *hw_params)
+static int rsnd_hw_params(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *hw_params)
{
struct snd_soc_dai *dai = rsnd_substream_to_dai(substream);
struct rsnd_dai *rdai = rsnd_dai_to_rdai(dai);
@@ -1422,7 +1446,8 @@ static int rsnd_hw_params(struct snd_pcm_substream *substream,
params_buffer_bytes(hw_params));
}
-static int rsnd_hw_free(struct snd_pcm_substream *substream)
+static int rsnd_hw_free(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_soc_dai *dai = rsnd_substream_to_dai(substream);
struct rsnd_dai *rdai = rsnd_dai_to_rdai(dai);
@@ -1436,7 +1461,8 @@ static int rsnd_hw_free(struct snd_pcm_substream *substream)
return snd_pcm_lib_free_pages(substream);
}
-static snd_pcm_uframes_t rsnd_pointer(struct snd_pcm_substream *substream)
+static snd_pcm_uframes_t rsnd_pointer(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_soc_dai *dai = rsnd_substream_to_dai(substream);
struct rsnd_dai *rdai = rsnd_dai_to_rdai(dai);
@@ -1448,13 +1474,6 @@ static snd_pcm_uframes_t rsnd_pointer(struct snd_pcm_substream *substream)
return pointer;
}
-static const struct snd_pcm_ops rsnd_pcm_ops = {
- .ioctl = snd_pcm_lib_ioctl,
- .hw_params = rsnd_hw_params,
- .hw_free = rsnd_hw_free,
- .pointer = rsnd_pointer,
-};
-
/*
* snd_kcontrol
*/
@@ -1648,8 +1667,11 @@ int rsnd_kctrl_new(struct rsnd_mod *mod,
* snd_soc_component
*/
static const struct snd_soc_component_driver rsnd_soc_component = {
- .ops = &rsnd_pcm_ops,
.name = "rsnd",
+ .ioctl = snd_soc_pcm_lib_ioctl,
+ .hw_params = rsnd_hw_params,
+ .hw_free = rsnd_hw_free,
+ .pointer = rsnd_pointer,
};
static int rsnd_rdai_continuance_probe(struct rsnd_priv *priv,
diff --git a/sound/soc/sh/rcar/dma.c b/sound/soc/sh/rcar/dma.c
index 28f65eba2bb4..95aa26d62e4f 100644
--- a/sound/soc/sh/rcar/dma.c
+++ b/sound/soc/sh/rcar/dma.c
@@ -165,14 +165,40 @@ static int rsnd_dmaen_start(struct rsnd_mod *mod,
struct device *dev = rsnd_priv_to_dev(priv);
struct dma_async_tx_descriptor *desc;
struct dma_slave_config cfg = {};
+ enum dma_slave_buswidth buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES;
int is_play = rsnd_io_is_play(io);
int ret;
+ /*
+ * in case of monaural data writing or reading through Audio-DMAC
+ * data is always in Left Justified format, so both src and dst
+ * DMA Bus width need to be set equal to physical data width.
+ */
+ if (rsnd_runtime_channel_original(io) == 1) {
+ struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
+ int bits = snd_pcm_format_physical_width(runtime->format);
+
+ switch (bits) {
+ case 8:
+ buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ break;
+ case 16:
+ buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
+ break;
+ case 32:
+ buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ break;
+ default:
+ dev_err(dev, "invalid format width %d\n", bits);
+ return -EINVAL;
+ }
+ }
+
cfg.direction = is_play ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
cfg.src_addr = dma->src_addr;
cfg.dst_addr = dma->dst_addr;
- cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
- cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ cfg.src_addr_width = buswidth;
+ cfg.dst_addr_width = buswidth;
dev_dbg(dev, "%s %pad -> %pad\n",
rsnd_mod_name(mod),
diff --git a/sound/soc/sh/siu_pcm.c b/sound/soc/sh/siu_pcm.c
index 78c3145b4109..a5e21e554da2 100644
--- a/sound/soc/sh/siu_pcm.c
+++ b/sound/soc/sh/siu_pcm.c
@@ -281,7 +281,8 @@ static int siu_pcm_stmread_stop(struct siu_port *port_info)
return 0;
}
-static int siu_pcm_hw_params(struct snd_pcm_substream *ss,
+static int siu_pcm_hw_params(struct snd_soc_component *component,
+ struct snd_pcm_substream *ss,
struct snd_pcm_hw_params *hw_params)
{
struct siu_info *info = siu_i2s_data;
@@ -297,7 +298,8 @@ static int siu_pcm_hw_params(struct snd_pcm_substream *ss,
return ret;
}
-static int siu_pcm_hw_free(struct snd_pcm_substream *ss)
+static int siu_pcm_hw_free(struct snd_soc_component *component,
+ struct snd_pcm_substream *ss)
{
struct siu_info *info = siu_i2s_data;
struct siu_port *port_info = siu_port_info(ss);
@@ -324,11 +326,10 @@ static bool filter(struct dma_chan *chan, void *slave)
return true;
}
-static int siu_pcm_open(struct snd_pcm_substream *ss)
+static int siu_pcm_open(struct snd_soc_component *component,
+ struct snd_pcm_substream *ss)
{
/* Playback / Capture */
- struct snd_soc_pcm_runtime *rtd = ss->private_data;
- struct snd_soc_component *component = snd_soc_rtdcom_lookup(rtd, DRV_NAME);
struct siu_platform *pdata = component->dev->platform_data;
struct siu_info *info = siu_i2s_data;
struct siu_port *port_info = siu_port_info(ss);
@@ -367,7 +368,8 @@ static int siu_pcm_open(struct snd_pcm_substream *ss)
return 0;
}
-static int siu_pcm_close(struct snd_pcm_substream *ss)
+static int siu_pcm_close(struct snd_soc_component *component,
+ struct snd_pcm_substream *ss)
{
struct siu_info *info = siu_i2s_data;
struct device *dev = ss->pcm->card->dev;
@@ -389,7 +391,8 @@ static int siu_pcm_close(struct snd_pcm_substream *ss)
return 0;
}
-static int siu_pcm_prepare(struct snd_pcm_substream *ss)
+static int siu_pcm_prepare(struct snd_soc_component *component,
+ struct snd_pcm_substream *ss)
{
struct siu_info *info = siu_i2s_data;
struct siu_port *port_info = siu_port_info(ss);
@@ -435,7 +438,8 @@ static int siu_pcm_prepare(struct snd_pcm_substream *ss)
return 0;
}
-static int siu_pcm_trigger(struct snd_pcm_substream *ss, int cmd)
+static int siu_pcm_trigger(struct snd_soc_component *component,
+ struct snd_pcm_substream *ss, int cmd)
{
struct siu_info *info = siu_i2s_data;
struct device *dev = ss->pcm->card->dev;
@@ -477,7 +481,9 @@ static int siu_pcm_trigger(struct snd_pcm_substream *ss, int cmd)
* So far only resolution of one period is supported, subject to extending the
* dmangine API
*/
-static snd_pcm_uframes_t siu_pcm_pointer_dma(struct snd_pcm_substream *ss)
+static snd_pcm_uframes_t
+siu_pcm_pointer_dma(struct snd_soc_component *component,
+ struct snd_pcm_substream *ss)
{
struct device *dev = ss->pcm->card->dev;
struct siu_info *info = siu_i2s_data;
@@ -512,7 +518,8 @@ static snd_pcm_uframes_t siu_pcm_pointer_dma(struct snd_pcm_substream *ss)
return bytes_to_frames(ss->runtime, ptr);
}
-static int siu_pcm_new(struct snd_soc_pcm_runtime *rtd)
+static int siu_pcm_new(struct snd_soc_component *component,
+ struct snd_soc_pcm_runtime *rtd)
{
/* card->dev == socdev->dev, see snd_soc_new_pcms() */
struct snd_card *card = rtd->card->snd_card;
@@ -558,7 +565,8 @@ static int siu_pcm_new(struct snd_soc_pcm_runtime *rtd)
return 0;
}
-static void siu_pcm_free(struct snd_pcm *pcm)
+static void siu_pcm_free(struct snd_soc_component *component,
+ struct snd_pcm *pcm)
{
struct platform_device *pdev = to_platform_device(pcm->card->dev);
struct siu_port *port_info = siu_ports[pdev->id];
@@ -571,21 +579,17 @@ static void siu_pcm_free(struct snd_pcm *pcm)
dev_dbg(pcm->card->dev, "%s\n", __func__);
}
-static const struct snd_pcm_ops siu_pcm_ops = {
+struct const snd_soc_component_driver siu_component = {
+ .name = DRV_NAME,
.open = siu_pcm_open,
.close = siu_pcm_close,
- .ioctl = snd_pcm_lib_ioctl,
+ .ioctl = snd_soc_pcm_lib_ioctl,
.hw_params = siu_pcm_hw_params,
.hw_free = siu_pcm_hw_free,
.prepare = siu_pcm_prepare,
.trigger = siu_pcm_trigger,
.pointer = siu_pcm_pointer_dma,
-};
-
-struct snd_soc_component_driver siu_component = {
- .name = DRV_NAME,
- .ops = &siu_pcm_ops,
- .pcm_new = siu_pcm_new,
- .pcm_free = siu_pcm_free,
+ .pcm_construct = siu_pcm_new,
+ .pcm_destruct = siu_pcm_free,
};
EXPORT_SYMBOL_GPL(siu_component);
diff --git a/sound/soc/soc-component.c b/sound/soc/soc-component.c
index 79ffc2820ba9..9054558ce386 100644
--- a/sound/soc/soc-component.c
+++ b/sound/soc/soc-component.c
@@ -314,30 +314,24 @@ void snd_soc_component_module_put(struct snd_soc_component *component,
int snd_soc_component_open(struct snd_soc_component *component,
struct snd_pcm_substream *substream)
{
- if (component->driver->ops &&
- component->driver->ops->open)
- return component->driver->ops->open(substream);
-
+ if (component->driver->open)
+ return component->driver->open(component, substream);
return 0;
}
int snd_soc_component_close(struct snd_soc_component *component,
struct snd_pcm_substream *substream)
{
- if (component->driver->ops &&
- component->driver->ops->close)
- return component->driver->ops->close(substream);
-
+ if (component->driver->close)
+ return component->driver->close(component, substream);
return 0;
}
int snd_soc_component_prepare(struct snd_soc_component *component,
struct snd_pcm_substream *substream)
{
- if (component->driver->ops &&
- component->driver->ops->prepare)
- return component->driver->ops->prepare(substream);
-
+ if (component->driver->prepare)
+ return component->driver->prepare(component, substream);
return 0;
}
@@ -345,20 +339,17 @@ int snd_soc_component_hw_params(struct snd_soc_component *component,
struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
- if (component->driver->ops &&
- component->driver->ops->hw_params)
- return component->driver->ops->hw_params(substream, params);
-
+ if (component->driver->hw_params)
+ return component->driver->hw_params(component,
+ substream, params);
return 0;
}
int snd_soc_component_hw_free(struct snd_soc_component *component,
struct snd_pcm_substream *substream)
{
- if (component->driver->ops &&
- component->driver->ops->hw_free)
- return component->driver->ops->hw_free(substream);
-
+ if (component->driver->hw_free)
+ return component->driver->hw_free(component, substream);
return 0;
}
@@ -366,10 +357,8 @@ int snd_soc_component_trigger(struct snd_soc_component *component,
struct snd_pcm_substream *substream,
int cmd)
{
- if (component->driver->ops &&
- component->driver->ops->trigger)
- return component->driver->ops->trigger(substream, cmd);
-
+ if (component->driver->trigger)
+ return component->driver->trigger(component, substream, cmd);
return 0;
}
@@ -431,14 +420,10 @@ int snd_soc_pcm_component_pointer(struct snd_pcm_substream *substream)
struct snd_soc_component *component;
struct snd_soc_rtdcom_list *rtdcom;
- for_each_rtdcom(rtd, rtdcom) {
- component = rtdcom->component;
-
- /* FIXME: use 1st pointer */
- if (component->driver->ops &&
- component->driver->ops->pointer)
- return component->driver->ops->pointer(substream);
- }
+ /* FIXME: use 1st pointer */
+ for_each_rtd_components(rtd, rtdcom, component)
+ if (component->driver->pointer)
+ return component->driver->pointer(component, substream);
return 0;
}
@@ -450,17 +435,32 @@ int snd_soc_pcm_component_ioctl(struct snd_pcm_substream *substream,
struct snd_soc_component *component;
struct snd_soc_rtdcom_list *rtdcom;
- for_each_rtdcom(rtd, rtdcom) {
- component = rtdcom->component;
+ /* FIXME: use 1st ioctl */
+ for_each_rtd_components(rtd, rtdcom, component)
+ if (component->driver->ioctl)
+ return component->driver->ioctl(component, substream,
+ cmd, arg);
+
+ return snd_pcm_lib_ioctl(substream, cmd, arg);
+}
+
+int snd_soc_pcm_component_sync_stop(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_component *component;
+ struct snd_soc_rtdcom_list *rtdcom;
+ int ret;
- /* FIXME: use 1st ioctl */
- if (component->driver->ops &&
- component->driver->ops->ioctl)
- return component->driver->ops->ioctl(substream,
- cmd, arg);
+ for_each_rtd_components(rtd, rtdcom, component) {
+ if (component->driver->ioctl) {
+ ret = component->driver->sync_stop(component,
+ substream);
+ if (ret < 0)
+ return ret;
+ }
}
- return snd_pcm_lib_ioctl(substream, cmd, arg);
+ return 0;
}
int snd_soc_pcm_component_copy_user(struct snd_pcm_substream *substream,
@@ -471,15 +471,11 @@ int snd_soc_pcm_component_copy_user(struct snd_pcm_substream *substream,
struct snd_soc_rtdcom_list *rtdcom;
struct snd_soc_component *component;
- for_each_rtdcom(rtd, rtdcom) {
- component = rtdcom->component;
-
- /* FIXME. it returns 1st copy now */
- if (component->driver->ops &&
- component->driver->ops->copy_user)
- return component->driver->ops->copy_user(
- substream, channel, pos, buf, bytes);
- }
+ /* FIXME. it returns 1st copy now */
+ for_each_rtd_components(rtd, rtdcom, component)
+ if (component->driver->copy_user)
+ return component->driver->copy_user(
+ component, substream, channel, pos, buf, bytes);
return -EINVAL;
}
@@ -492,13 +488,11 @@ struct page *snd_soc_pcm_component_page(struct snd_pcm_substream *substream,
struct snd_soc_component *component;
struct page *page;
- for_each_rtdcom(rtd, rtdcom) {
- component = rtdcom->component;
-
- /* FIXME. it returns 1st page now */
- if (component->driver->ops &&
- component->driver->ops->page) {
- page = component->driver->ops->page(substream, offset);
+ /* FIXME. it returns 1st page now */
+ for_each_rtd_components(rtd, rtdcom, component) {
+ if (component->driver->page) {
+ page = component->driver->page(component,
+ substream, offset);
if (page)
return page;
}
@@ -514,30 +508,24 @@ int snd_soc_pcm_component_mmap(struct snd_pcm_substream *substream,
struct snd_soc_rtdcom_list *rtdcom;
struct snd_soc_component *component;
- for_each_rtdcom(rtd, rtdcom) {
- component = rtdcom->component;
-
- /* FIXME. it returns 1st mmap now */
- if (component->driver->ops &&
- component->driver->ops->mmap)
- return component->driver->ops->mmap(substream, vma);
- }
+ /* FIXME. it returns 1st mmap now */
+ for_each_rtd_components(rtd, rtdcom, component)
+ if (component->driver->mmap)
+ return component->driver->mmap(component,
+ substream, vma);
return -EINVAL;
}
-int snd_soc_pcm_component_new(struct snd_pcm *pcm)
+int snd_soc_pcm_component_new(struct snd_soc_pcm_runtime *rtd)
{
- struct snd_soc_pcm_runtime *rtd = pcm->private_data;
struct snd_soc_rtdcom_list *rtdcom;
struct snd_soc_component *component;
int ret;
- for_each_rtdcom(rtd, rtdcom) {
- component = rtdcom->component;
-
- if (component->driver->pcm_new) {
- ret = component->driver->pcm_new(rtd);
+ for_each_rtd_components(rtd, rtdcom, component) {
+ if (component->driver->pcm_construct) {
+ ret = component->driver->pcm_construct(component, rtd);
if (ret < 0)
return ret;
}
@@ -546,16 +534,12 @@ int snd_soc_pcm_component_new(struct snd_pcm *pcm)
return 0;
}
-void snd_soc_pcm_component_free(struct snd_pcm *pcm)
+void snd_soc_pcm_component_free(struct snd_soc_pcm_runtime *rtd)
{
- struct snd_soc_pcm_runtime *rtd = pcm->private_data;
struct snd_soc_rtdcom_list *rtdcom;
struct snd_soc_component *component;
- for_each_rtdcom(rtd, rtdcom) {
- component = rtdcom->component;
-
- if (component->driver->pcm_free)
- component->driver->pcm_free(pcm);
- }
+ for_each_rtd_components(rtd, rtdcom, component)
+ if (component->driver->pcm_destruct)
+ component->driver->pcm_destruct(component, rtd->pcm);
}
diff --git a/sound/soc/soc-compress.c b/sound/soc/soc-compress.c
index 9e54d8ae6d2c..61f230324164 100644
--- a/sound/soc/soc-compress.c
+++ b/sound/soc/soc-compress.c
@@ -28,9 +28,7 @@ static int soc_compr_components_open(struct snd_compr_stream *cstream,
struct snd_soc_rtdcom_list *rtdcom;
int ret;
- for_each_rtdcom(rtd, rtdcom) {
- component = rtdcom->component;
-
+ for_each_rtd_components(rtd, rtdcom, component) {
if (!component->driver->compr_ops ||
!component->driver->compr_ops->open)
continue;
@@ -57,9 +55,7 @@ static int soc_compr_components_free(struct snd_compr_stream *cstream,
struct snd_soc_component *component;
struct snd_soc_rtdcom_list *rtdcom;
- for_each_rtdcom(rtd, rtdcom) {
- component = rtdcom->component;
-
+ for_each_rtd_components(rtd, rtdcom, component) {
if (component == last)
break;
@@ -353,9 +349,7 @@ static int soc_compr_components_trigger(struct snd_compr_stream *cstream,
struct snd_soc_rtdcom_list *rtdcom;
int ret;
- for_each_rtdcom(rtd, rtdcom) {
- component = rtdcom->component;
-
+ for_each_rtd_components(rtd, rtdcom, component) {
if (!component->driver->compr_ops ||
!component->driver->compr_ops->trigger)
continue;
@@ -458,9 +452,7 @@ static int soc_compr_components_set_params(struct snd_compr_stream *cstream,
struct snd_soc_rtdcom_list *rtdcom;
int ret;
- for_each_rtdcom(rtd, rtdcom) {
- component = rtdcom->component;
-
+ for_each_rtd_components(rtd, rtdcom, component) {
if (!component->driver->compr_ops ||
!component->driver->compr_ops->set_params)
continue;
@@ -601,9 +593,7 @@ static int soc_compr_get_params(struct snd_compr_stream *cstream,
goto err;
}
- for_each_rtdcom(rtd, rtdcom) {
- component = rtdcom->component;
-
+ for_each_rtd_components(rtd, rtdcom, component) {
if (!component->driver->compr_ops ||
!component->driver->compr_ops->get_params)
continue;
@@ -627,9 +617,7 @@ static int soc_compr_get_caps(struct snd_compr_stream *cstream,
mutex_lock_nested(&rtd->card->pcm_mutex, rtd->card->pcm_subclass);
- for_each_rtdcom(rtd, rtdcom) {
- component = rtdcom->component;
-
+ for_each_rtd_components(rtd, rtdcom, component) {
if (!component->driver->compr_ops ||
!component->driver->compr_ops->get_caps)
continue;
@@ -652,9 +640,7 @@ static int soc_compr_get_codec_caps(struct snd_compr_stream *cstream,
mutex_lock_nested(&rtd->card->pcm_mutex, rtd->card->pcm_subclass);
- for_each_rtdcom(rtd, rtdcom) {
- component = rtdcom->component;
-
+ for_each_rtd_components(rtd, rtdcom, component) {
if (!component->driver->compr_ops ||
!component->driver->compr_ops->get_codec_caps)
continue;
@@ -684,9 +670,7 @@ static int soc_compr_ack(struct snd_compr_stream *cstream, size_t bytes)
goto err;
}
- for_each_rtdcom(rtd, rtdcom) {
- component = rtdcom->component;
-
+ for_each_rtd_components(rtd, rtdcom, component) {
if (!component->driver->compr_ops ||
!component->driver->compr_ops->ack)
continue;
@@ -715,9 +699,7 @@ static int soc_compr_pointer(struct snd_compr_stream *cstream,
if (cpu_dai->driver->cops && cpu_dai->driver->cops->pointer)
cpu_dai->driver->cops->pointer(cstream, tstamp, cpu_dai);
- for_each_rtdcom(rtd, rtdcom) {
- component = rtdcom->component;
-
+ for_each_rtd_components(rtd, rtdcom, component) {
if (!component->driver->compr_ops ||
!component->driver->compr_ops->pointer)
continue;
@@ -740,9 +722,7 @@ static int soc_compr_copy(struct snd_compr_stream *cstream,
mutex_lock_nested(&rtd->card->pcm_mutex, rtd->card->pcm_subclass);
- for_each_rtdcom(rtd, rtdcom) {
- component = rtdcom->component;
-
+ for_each_rtd_components(rtd, rtdcom, component) {
if (!component->driver->compr_ops ||
!component->driver->compr_ops->copy)
continue;
@@ -770,9 +750,7 @@ static int soc_compr_set_metadata(struct snd_compr_stream *cstream,
return ret;
}
- for_each_rtdcom(rtd, rtdcom) {
- component = rtdcom->component;
-
+ for_each_rtd_components(rtd, rtdcom, component) {
if (!component->driver->compr_ops ||
!component->driver->compr_ops->set_metadata)
continue;
@@ -801,9 +779,7 @@ static int soc_compr_get_metadata(struct snd_compr_stream *cstream,
return ret;
}
- for_each_rtdcom(rtd, rtdcom) {
- component = rtdcom->component;
-
+ for_each_rtd_components(rtd, rtdcom, component) {
if (!component->driver->compr_ops ||
!component->driver->compr_ops->get_metadata)
continue;
@@ -932,9 +908,7 @@ int snd_soc_new_compress(struct snd_soc_pcm_runtime *rtd, int num)
memcpy(compr->ops, &soc_compr_ops, sizeof(soc_compr_ops));
}
- for_each_rtdcom(rtd, rtdcom) {
- component = rtdcom->component;
-
+ for_each_rtd_components(rtd, rtdcom, component) {
if (!component->driver->compr_ops ||
!component->driver->compr_ops->copy)
continue;
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index 88978a3036c4..062653ab03a3 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -125,6 +125,9 @@ static umode_t soc_dev_attr_is_visible(struct kobject *kobj,
struct device *dev = kobj_to_dev(kobj);
struct snd_soc_pcm_runtime *rtd = dev_get_drvdata(dev);
+ if (!rtd)
+ return 0;
+
if (attr == &dev_attr_pmdown_time.attr)
return attr->mode; /* always visible */
return rtd->num_codecs ? attr->mode : 0; /* enabled only with codec */
@@ -274,43 +277,58 @@ static inline void snd_soc_debugfs_exit(void)
#endif
+/*
+ * This is glue code between snd_pcm_lib_ioctl() and
+ * snd_soc_component_driver :: ioctl
+ */
+int snd_soc_pcm_lib_ioctl(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
+ unsigned int cmd, void *arg)
+{
+ return snd_pcm_lib_ioctl(substream, cmd, arg);
+}
+EXPORT_SYMBOL_GPL(snd_soc_pcm_lib_ioctl);
+
static int snd_soc_rtdcom_add(struct snd_soc_pcm_runtime *rtd,
struct snd_soc_component *component)
{
struct snd_soc_rtdcom_list *rtdcom;
+ struct snd_soc_component *comp;
- for_each_rtdcom(rtd, rtdcom) {
+ for_each_rtd_components(rtd, rtdcom, comp) {
/* already connected */
- if (rtdcom->component == component)
+ if (comp == component)
return 0;
}
- rtdcom = kmalloc(sizeof(*rtdcom), GFP_KERNEL);
+ /*
+ * created rtdcom here will be freed when rtd->dev was freed.
+ * see
+ * soc_free_pcm_runtime() :: device_unregister(rtd->dev)
+ */
+ rtdcom = devm_kzalloc(rtd->dev, sizeof(*rtdcom), GFP_KERNEL);
if (!rtdcom)
return -ENOMEM;
rtdcom->component = component;
INIT_LIST_HEAD(&rtdcom->list);
+ /*
+ * When rtd was freed, created rtdcom here will be
+ * also freed.
+ * And we don't need to call list_del(&rtdcom->list)
+ * when freed, because rtd is also freed.
+ */
list_add_tail(&rtdcom->list, &rtd->component_list);
return 0;
}
-static void snd_soc_rtdcom_del_all(struct snd_soc_pcm_runtime *rtd)
-{
- struct snd_soc_rtdcom_list *rtdcom1, *rtdcom2;
-
- for_each_rtdcom_safe(rtd, rtdcom1, rtdcom2)
- kfree(rtdcom1);
-
- INIT_LIST_HEAD(&rtd->component_list);
-}
-
struct snd_soc_component *snd_soc_rtdcom_lookup(struct snd_soc_pcm_runtime *rtd,
const char *driver_name)
{
struct snd_soc_rtdcom_list *rtdcom;
+ struct snd_soc_component *component;
if (!driver_name)
return NULL;
@@ -323,8 +341,8 @@ struct snd_soc_component *snd_soc_rtdcom_lookup(struct snd_soc_pcm_runtime *rtd,
* But, if many components which have same driver name are connected
* to 1 rtd, this function will return 1st found component.
*/
- for_each_rtdcom(rtd, rtdcom) {
- const char *component_name = rtdcom->component->driver->name;
+ for_each_rtd_components(rtd, rtdcom, component) {
+ const char *component_name = component->driver->name;
if (!component_name)
continue;
@@ -338,6 +356,39 @@ struct snd_soc_component *snd_soc_rtdcom_lookup(struct snd_soc_pcm_runtime *rtd,
}
EXPORT_SYMBOL_GPL(snd_soc_rtdcom_lookup);
+static struct snd_soc_component
+*snd_soc_lookup_component_nolocked(struct device *dev, const char *driver_name)
+{
+ struct snd_soc_component *component;
+ struct snd_soc_component *found_component;
+
+ found_component = NULL;
+ for_each_component(component) {
+ if ((dev == component->dev) &&
+ (!driver_name ||
+ (driver_name == component->driver->name) ||
+ (strcmp(component->driver->name, driver_name) == 0))) {
+ found_component = component;
+ break;
+ }
+ }
+
+ return found_component;
+}
+
+struct snd_soc_component *snd_soc_lookup_component(struct device *dev,
+ const char *driver_name)
+{
+ struct snd_soc_component *component;
+
+ mutex_lock(&client_mutex);
+ component = snd_soc_lookup_component_nolocked(dev, driver_name);
+ mutex_unlock(&client_mutex);
+
+ return component;
+}
+EXPORT_SYMBOL_GPL(snd_soc_lookup_component);
+
struct snd_pcm_substream *snd_soc_get_dai_substream(struct snd_soc_card *card,
const char *dai_link, int stream)
{
@@ -355,58 +406,104 @@ EXPORT_SYMBOL_GPL(snd_soc_get_dai_substream);
static const struct snd_soc_ops null_snd_soc_ops;
+static void soc_release_rtd_dev(struct device *dev)
+{
+ /* "dev" means "rtd->dev" */
+ kfree(dev);
+}
+
+static void soc_free_pcm_runtime(struct snd_soc_pcm_runtime *rtd)
+{
+ if (!rtd)
+ return;
+
+ list_del(&rtd->list);
+
+ flush_delayed_work(&rtd->delayed_work);
+ snd_soc_pcm_component_free(rtd);
+
+ /*
+ * we don't need to call kfree() for rtd->dev
+ * see
+ * soc_release_rtd_dev()
+ *
+ * We don't need rtd->dev NULL check, because
+ * it is alloced *before* rtd.
+ * see
+ * soc_new_pcm_runtime()
+ */
+ device_unregister(rtd->dev);
+}
+
static struct snd_soc_pcm_runtime *soc_new_pcm_runtime(
struct snd_soc_card *card, struct snd_soc_dai_link *dai_link)
{
struct snd_soc_pcm_runtime *rtd;
+ struct device *dev;
+ int ret;
- rtd = kzalloc(sizeof(struct snd_soc_pcm_runtime), GFP_KERNEL);
- if (!rtd)
+ /*
+ * for rtd->dev
+ */
+ dev = kzalloc(sizeof(struct device), GFP_KERNEL);
+ if (!dev)
return NULL;
- INIT_LIST_HEAD(&rtd->component_list);
- rtd->card = card;
- rtd->dai_link = dai_link;
- if (!rtd->dai_link->ops)
- rtd->dai_link->ops = &null_snd_soc_ops;
+ dev->parent = card->dev;
+ dev->release = soc_release_rtd_dev;
+ dev->groups = soc_dev_attr_groups;
- rtd->codec_dais = kcalloc(dai_link->num_codecs,
- sizeof(struct snd_soc_dai *),
- GFP_KERNEL);
- if (!rtd->codec_dais) {
- kfree(rtd);
+ dev_set_name(dev, "%s", dai_link->name);
+
+ ret = device_register(dev);
+ if (ret < 0) {
+ put_device(dev); /* soc_release_rtd_dev */
return NULL;
}
- return rtd;
-}
+ /*
+ * for rtd
+ */
+ rtd = devm_kzalloc(dev, sizeof(*rtd), GFP_KERNEL);
+ if (!rtd)
+ goto free_rtd;
-static void soc_free_pcm_runtime(struct snd_soc_pcm_runtime *rtd)
-{
- kfree(rtd->codec_dais);
- snd_soc_rtdcom_del_all(rtd);
- kfree(rtd);
-}
+ rtd->dev = dev;
+ dev_set_drvdata(dev, rtd);
+
+ /*
+ * for rtd->codec_dais
+ */
+ rtd->codec_dais = devm_kcalloc(dev, dai_link->num_codecs,
+ sizeof(struct snd_soc_dai *),
+ GFP_KERNEL);
+ if (!rtd->codec_dais)
+ goto free_rtd;
+
+ /*
+ * rtd remaining settings
+ */
+ INIT_LIST_HEAD(&rtd->component_list);
+ INIT_LIST_HEAD(&rtd->dpcm[SNDRV_PCM_STREAM_PLAYBACK].be_clients);
+ INIT_LIST_HEAD(&rtd->dpcm[SNDRV_PCM_STREAM_CAPTURE].be_clients);
+ INIT_LIST_HEAD(&rtd->dpcm[SNDRV_PCM_STREAM_PLAYBACK].fe_clients);
+ INIT_LIST_HEAD(&rtd->dpcm[SNDRV_PCM_STREAM_CAPTURE].fe_clients);
+
+ rtd->card = card;
+ rtd->dai_link = dai_link;
+ if (!rtd->dai_link->ops)
+ rtd->dai_link->ops = &null_snd_soc_ops;
-static void soc_add_pcm_runtime(struct snd_soc_card *card,
- struct snd_soc_pcm_runtime *rtd)
-{
/* see for_each_card_rtds */
list_add_tail(&rtd->list, &card->rtd_list);
rtd->num = card->num_rtd;
card->num_rtd++;
-}
-static void soc_remove_pcm_runtimes(struct snd_soc_card *card)
-{
- struct snd_soc_pcm_runtime *rtd, *_rtd;
-
- for_each_card_rtds_safe(card, rtd, _rtd) {
- list_del(&rtd->list);
- soc_free_pcm_runtime(rtd);
- }
+ return rtd;
- card->num_rtd = 0;
+free_rtd:
+ soc_free_pcm_runtime(rtd);
+ return NULL;
}
struct snd_soc_pcm_runtime *snd_soc_get_pcm_runtime(struct snd_soc_card *card,
@@ -859,37 +956,168 @@ struct snd_soc_dai_link *snd_soc_find_dai_link(struct snd_soc_card *card,
}
EXPORT_SYMBOL_GPL(snd_soc_find_dai_link);
-static bool soc_is_dai_link_bound(struct snd_soc_card *card,
- struct snd_soc_dai_link *dai_link)
+static int soc_dai_link_sanity_check(struct snd_soc_card *card,
+ struct snd_soc_dai_link *link)
{
- struct snd_soc_pcm_runtime *rtd;
+ int i;
+ struct snd_soc_dai_link_component *codec, *platform;
- for_each_card_rtds(card, rtd) {
- if (rtd->dai_link == dai_link)
- return true;
+ for_each_link_codecs(link, i, codec) {
+ /*
+ * Codec must be specified by 1 of name or OF node,
+ * not both or neither.
+ */
+ if (!!codec->name == !!codec->of_node) {
+ dev_err(card->dev, "ASoC: Neither/both codec name/of_node are set for %s\n",
+ link->name);
+ return -EINVAL;
+ }
+
+ /* Codec DAI name must be specified */
+ if (!codec->dai_name) {
+ dev_err(card->dev, "ASoC: codec_dai_name not set for %s\n",
+ link->name);
+ return -EINVAL;
+ }
+
+ /*
+ * Defer card registration if codec component is not added to
+ * component list.
+ */
+ if (!soc_find_component(codec))
+ return -EPROBE_DEFER;
}
- return false;
+ for_each_link_platforms(link, i, platform) {
+ /*
+ * Platform may be specified by either name or OF node, but it
+ * can be left unspecified, then no components will be inserted
+ * in the rtdcom list
+ */
+ if (!!platform->name == !!platform->of_node) {
+ dev_err(card->dev,
+ "ASoC: Neither/both platform name/of_node are set for %s\n",
+ link->name);
+ return -EINVAL;
+ }
+
+ /*
+ * Defer card registration if platform component is not added to
+ * component list.
+ */
+ if (!soc_find_component(platform))
+ return -EPROBE_DEFER;
+ }
+
+ /* FIXME */
+ if (link->num_cpus > 1) {
+ dev_err(card->dev,
+ "ASoC: multi cpu is not yet supported %s\n",
+ link->name);
+ return -EINVAL;
+ }
+
+ /*
+ * CPU device may be specified by either name or OF node, but
+ * can be left unspecified, and will be matched based on DAI
+ * name alone..
+ */
+ if (link->cpus->name && link->cpus->of_node) {
+ dev_err(card->dev,
+ "ASoC: Neither/both cpu name/of_node are set for %s\n",
+ link->name);
+ return -EINVAL;
+ }
+
+ /*
+ * Defer card registration if cpu dai component is not added to
+ * component list.
+ */
+ if ((link->cpus->of_node || link->cpus->name) &&
+ !soc_find_component(link->cpus))
+ return -EPROBE_DEFER;
+
+ /*
+ * At least one of CPU DAI name or CPU device name/node must be
+ * specified
+ */
+ if (!link->cpus->dai_name &&
+ !(link->cpus->name || link->cpus->of_node)) {
+ dev_err(card->dev,
+ "ASoC: Neither cpu_dai_name nor cpu_name/of_node are set for %s\n",
+ link->name);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * snd_soc_remove_dai_link - Remove a DAI link from the list
+ * @card: The ASoC card that owns the link
+ * @dai_link: The DAI link to remove
+ *
+ * This function removes a DAI link from the ASoC card's link list.
+ *
+ * For DAI links previously added by topology, topology should
+ * remove them by using the dobj embedded in the link.
+ */
+void snd_soc_remove_dai_link(struct snd_soc_card *card,
+ struct snd_soc_dai_link *dai_link)
+{
+ struct snd_soc_pcm_runtime *rtd;
+
+ lockdep_assert_held(&client_mutex);
+
+ /*
+ * Notify the machine driver for extra destruction
+ */
+ if (card->remove_dai_link)
+ card->remove_dai_link(card, dai_link);
+
+ list_del(&dai_link->list);
+
+ rtd = snd_soc_get_pcm_runtime(card, dai_link->name);
+ if (rtd)
+ soc_free_pcm_runtime(rtd);
}
+EXPORT_SYMBOL_GPL(snd_soc_remove_dai_link);
-static int soc_bind_dai_link(struct snd_soc_card *card,
- struct snd_soc_dai_link *dai_link)
+/**
+ * snd_soc_add_dai_link - Add a DAI link dynamically
+ * @card: The ASoC card to which the DAI link is added
+ * @dai_link: The new DAI link to add
+ *
+ * This function adds a DAI link to the ASoC card's link list.
+ *
+ * Note: Topology can use this API to add DAI links when probing the
+ * topology component. And machine drivers can still define static
+ * DAI links in dai_link array.
+ */
+int snd_soc_add_dai_link(struct snd_soc_card *card,
+ struct snd_soc_dai_link *dai_link)
{
struct snd_soc_pcm_runtime *rtd;
struct snd_soc_dai_link_component *codec, *platform;
struct snd_soc_component *component;
- int i;
+ int i, ret;
+
+ lockdep_assert_held(&client_mutex);
+
+ /*
+ * Notify the machine driver for extra initialization
+ */
+ if (card->add_dai_link)
+ card->add_dai_link(card, dai_link);
if (dai_link->ignore)
return 0;
dev_dbg(card->dev, "ASoC: binding %s\n", dai_link->name);
- if (soc_is_dai_link_bound(card, dai_link)) {
- dev_dbg(card->dev, "ASoC: dai link %s already bound\n",
- dai_link->name);
- return 0;
- }
+ ret = soc_dai_link_sanity_check(card, dai_link);
+ if (ret < 0)
+ return ret;
rtd = soc_new_pcm_runtime(card, dai_link);
if (!rtd)
@@ -930,13 +1158,16 @@ static int soc_bind_dai_link(struct snd_soc_card *card,
}
}
- soc_add_pcm_runtime(card, rtd);
+ /* see for_each_card_links */
+ list_add_tail(&dai_link->list, &card->dai_link_list);
+
return 0;
_err_defer:
soc_free_pcm_runtime(rtd);
return -EPROBE_DEFER;
}
+EXPORT_SYMBOL_GPL(snd_soc_add_dai_link);
static void soc_set_of_name_prefix(struct snd_soc_component *component)
{
@@ -973,8 +1204,16 @@ static void soc_set_name_prefix(struct snd_soc_card *card,
soc_set_of_name_prefix(component);
}
-static void soc_cleanup_component(struct snd_soc_component *component)
+static void soc_remove_component(struct snd_soc_component *component,
+ int probed)
{
+
+ if (!component->card)
+ return;
+
+ if (probed)
+ snd_soc_component_remove(component);
+
/* For framework level robustness */
snd_soc_component_set_jack(component, NULL, NULL);
@@ -985,22 +1224,13 @@ static void soc_cleanup_component(struct snd_soc_component *component)
snd_soc_component_module_put_when_remove(component);
}
-static void soc_remove_component(struct snd_soc_component *component)
-{
- if (!component->card)
- return;
-
- snd_soc_component_remove(component);
-
- soc_cleanup_component(component);
-}
-
static int soc_probe_component(struct snd_soc_card *card,
struct snd_soc_component *component)
{
struct snd_soc_dapm_context *dapm =
snd_soc_component_get_dapm(component);
struct snd_soc_dai *dai;
+ int probed = 0;
int ret;
if (!strcmp(component->name, "snd-soc-dummy"))
@@ -1056,6 +1286,7 @@ static int soc_probe_component(struct snd_soc_card *card,
dapm->bias_level != SND_SOC_BIAS_OFF,
"codec %s can not start from non-off bias with idle_bias_off==1\n",
component->name);
+ probed = 1;
/* machine specific init */
if (component->init) {
@@ -1084,7 +1315,7 @@ static int soc_probe_component(struct snd_soc_card *card,
err_probe:
if (ret < 0)
- soc_cleanup_component(component);
+ soc_remove_component(component, probed);
return ret;
}
@@ -1126,7 +1357,6 @@ static int soc_probe_dai(struct snd_soc_dai *dai, int order)
return 0;
}
-static void soc_rtd_free(struct snd_soc_pcm_runtime *rtd); /* remove me */
static void soc_remove_link_dais(struct snd_soc_card *card)
{
int i;
@@ -1136,10 +1366,6 @@ static void soc_remove_link_dais(struct snd_soc_card *card)
for_each_comp_order(order) {
for_each_card_rtds(card, rtd) {
-
- /* finalize rtd device */
- soc_rtd_free(rtd);
-
/* remove the CODEC DAI */
for_each_rtd_codec_dai(rtd, i, codec_dai)
soc_remove_dai(codec_dai, order);
@@ -1187,13 +1413,11 @@ static void soc_remove_link_components(struct snd_soc_card *card)
for_each_comp_order(order) {
for_each_card_rtds(card, rtd) {
- for_each_rtdcom(rtd, rtdcom) {
- component = rtdcom->component;
-
+ for_each_rtd_components(rtd, rtdcom, component) {
if (component->driver->remove_order != order)
continue;
- soc_remove_component(component);
+ soc_remove_component(component, 1);
}
}
}
@@ -1208,9 +1432,7 @@ static int soc_probe_link_components(struct snd_soc_card *card)
for_each_comp_order(order) {
for_each_card_rtds(card, rtd) {
- for_each_rtdcom(rtd, rtdcom) {
- component = rtdcom->component;
-
+ for_each_rtd_components(rtd, rtdcom, component) {
if (component->driver->probe_order != order)
continue;
@@ -1224,119 +1446,6 @@ static int soc_probe_link_components(struct snd_soc_card *card)
return 0;
}
-static void soc_remove_dai_links(struct snd_soc_card *card)
-{
- struct snd_soc_dai_link *link, *_link;
-
- soc_remove_link_dais(card);
-
- soc_remove_link_components(card);
-
- for_each_card_links_safe(card, link, _link) {
- if (link->dobj.type == SND_SOC_DOBJ_DAI_LINK)
- dev_warn(card->dev, "Topology forgot to remove link %s?\n",
- link->name);
-
- list_del(&link->list);
- }
-}
-
-static int soc_init_dai_link(struct snd_soc_card *card,
- struct snd_soc_dai_link *link)
-{
- int i;
- struct snd_soc_dai_link_component *codec, *platform;
-
- for_each_link_codecs(link, i, codec) {
- /*
- * Codec must be specified by 1 of name or OF node,
- * not both or neither.
- */
- if (!!codec->name == !!codec->of_node) {
- dev_err(card->dev, "ASoC: Neither/both codec name/of_node are set for %s\n",
- link->name);
- return -EINVAL;
- }
-
- /* Codec DAI name must be specified */
- if (!codec->dai_name) {
- dev_err(card->dev, "ASoC: codec_dai_name not set for %s\n",
- link->name);
- return -EINVAL;
- }
-
- /*
- * Defer card registration if codec component is not added to
- * component list.
- */
- if (!soc_find_component(codec))
- return -EPROBE_DEFER;
- }
-
- for_each_link_platforms(link, i, platform) {
- /*
- * Platform may be specified by either name or OF node, but it
- * can be left unspecified, then no components will be inserted
- * in the rtdcom list
- */
- if (!!platform->name == !!platform->of_node) {
- dev_err(card->dev,
- "ASoC: Neither/both platform name/of_node are set for %s\n",
- link->name);
- return -EINVAL;
- }
-
- /*
- * Defer card registration if platform component is not added to
- * component list.
- */
- if (!soc_find_component(platform))
- return -EPROBE_DEFER;
- }
-
- /* FIXME */
- if (link->num_cpus > 1) {
- dev_err(card->dev,
- "ASoC: multi cpu is not yet supported %s\n",
- link->name);
- return -EINVAL;
- }
-
- /*
- * CPU device may be specified by either name or OF node, but
- * can be left unspecified, and will be matched based on DAI
- * name alone..
- */
- if (link->cpus->name && link->cpus->of_node) {
- dev_err(card->dev,
- "ASoC: Neither/both cpu name/of_node are set for %s\n",
- link->name);
- return -EINVAL;
- }
-
- /*
- * Defer card registartion if cpu dai component is not added to
- * component list.
- */
- if ((link->cpus->of_node || link->cpus->name) &&
- !soc_find_component(link->cpus))
- return -EPROBE_DEFER;
-
- /*
- * At least one of CPU DAI name or CPU device name/node must be
- * specified
- */
- if (!link->cpus->dai_name &&
- !(link->cpus->name || link->cpus->of_node)) {
- dev_err(card->dev,
- "ASoC: Neither cpu_dai_name nor cpu_name/of_node are set for %s\n",
- link->name);
- return -EINVAL;
- }
-
- return 0;
-}
-
void snd_soc_disconnect_sync(struct device *dev)
{
struct snd_soc_component *component =
@@ -1349,117 +1458,6 @@ void snd_soc_disconnect_sync(struct device *dev)
}
EXPORT_SYMBOL_GPL(snd_soc_disconnect_sync);
-/**
- * snd_soc_add_dai_link - Add a DAI link dynamically
- * @card: The ASoC card to which the DAI link is added
- * @dai_link: The new DAI link to add
- *
- * This function adds a DAI link to the ASoC card's link list.
- *
- * Note: Topology can use this API to add DAI links when probing the
- * topology component. And machine drivers can still define static
- * DAI links in dai_link array.
- */
-int snd_soc_add_dai_link(struct snd_soc_card *card,
- struct snd_soc_dai_link *dai_link)
-{
- if (dai_link->dobj.type
- && dai_link->dobj.type != SND_SOC_DOBJ_DAI_LINK) {
- dev_err(card->dev, "Invalid dai link type %d\n",
- dai_link->dobj.type);
- return -EINVAL;
- }
-
- lockdep_assert_held(&client_mutex);
- /*
- * Notify the machine driver for extra initialization
- * on the link created by topology.
- */
- if (dai_link->dobj.type && card->add_dai_link)
- card->add_dai_link(card, dai_link);
-
- /* see for_each_card_links */
- list_add_tail(&dai_link->list, &card->dai_link_list);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(snd_soc_add_dai_link);
-
-/**
- * snd_soc_remove_dai_link - Remove a DAI link from the list
- * @card: The ASoC card that owns the link
- * @dai_link: The DAI link to remove
- *
- * This function removes a DAI link from the ASoC card's link list.
- *
- * For DAI links previously added by topology, topology should
- * remove them by using the dobj embedded in the link.
- */
-void snd_soc_remove_dai_link(struct snd_soc_card *card,
- struct snd_soc_dai_link *dai_link)
-{
- if (dai_link->dobj.type
- && dai_link->dobj.type != SND_SOC_DOBJ_DAI_LINK) {
- dev_err(card->dev, "Invalid dai link type %d\n",
- dai_link->dobj.type);
- return;
- }
-
- lockdep_assert_held(&client_mutex);
- /*
- * Notify the machine driver for extra destruction
- * on the link created by topology.
- */
- if (dai_link->dobj.type && card->remove_dai_link)
- card->remove_dai_link(card, dai_link);
-
- list_del(&dai_link->list);
-}
-EXPORT_SYMBOL_GPL(snd_soc_remove_dai_link);
-
-static void soc_rtd_free(struct snd_soc_pcm_runtime *rtd)
-{
- if (rtd->dev_registered) {
- /* we don't need to call kfree() for rtd->dev */
- device_unregister(rtd->dev);
- rtd->dev_registered = 0;
- }
-}
-
-static void soc_rtd_release(struct device *dev)
-{
- kfree(dev);
-}
-
-static int soc_rtd_init(struct snd_soc_pcm_runtime *rtd, const char *name)
-{
- int ret = 0;
-
- /* register the rtd device */
- rtd->dev = kzalloc(sizeof(struct device), GFP_KERNEL);
- if (!rtd->dev)
- return -ENOMEM;
- rtd->dev->parent = rtd->card->dev;
- rtd->dev->release = soc_rtd_release;
- rtd->dev->groups = soc_dev_attr_groups;
- dev_set_name(rtd->dev, "%s", name);
- dev_set_drvdata(rtd->dev, rtd);
- INIT_LIST_HEAD(&rtd->dpcm[SNDRV_PCM_STREAM_PLAYBACK].be_clients);
- INIT_LIST_HEAD(&rtd->dpcm[SNDRV_PCM_STREAM_CAPTURE].be_clients);
- INIT_LIST_HEAD(&rtd->dpcm[SNDRV_PCM_STREAM_PLAYBACK].fe_clients);
- INIT_LIST_HEAD(&rtd->dpcm[SNDRV_PCM_STREAM_CAPTURE].fe_clients);
- ret = device_register(rtd->dev);
- if (ret < 0) {
- /* calling put_device() here to free the rtd->dev */
- put_device(rtd->dev);
- dev_err(rtd->card->dev,
- "ASoC: failed to register runtime device: %d\n", ret);
- return ret;
- }
- rtd->dev_registered = 1;
- return 0;
-}
-
static int soc_link_dai_pcm_new(struct snd_soc_dai **dais, int num_dais,
struct snd_soc_pcm_runtime *rtd)
{
@@ -1509,10 +1507,6 @@ static int soc_link_init(struct snd_soc_card *card,
return ret;
}
- ret = soc_rtd_init(rtd, dai_link->name);
- if (ret)
- return ret;
-
/* add DPCM sysfs entries */
soc_dpcm_debugfs_add(rtd);
@@ -1523,9 +1517,7 @@ static int soc_link_init(struct snd_soc_card *card,
* topology based drivers can use the DAI link id field to set PCM
* device number and then use rtd + a base offset of the BEs.
*/
- for_each_rtdcom(rtd, rtdcom) {
- component = rtdcom->component;
-
+ for_each_rtd_components(rtd, rtdcom, component) {
if (!component->driver->use_dai_pcm_id)
continue;
@@ -1590,21 +1582,18 @@ static int soc_bind_aux_dev(struct snd_soc_card *card)
static int soc_probe_aux_devices(struct snd_soc_card *card)
{
- struct snd_soc_component *comp;
+ struct snd_soc_component *component;
int order;
int ret;
for_each_comp_order(order) {
- for_each_card_auxs(card, comp) {
- if (comp->driver->probe_order == order) {
- ret = soc_probe_component(card, comp);
- if (ret < 0) {
- dev_err(card->dev,
- "ASoC: failed to probe aux component %s %d\n",
- comp->name, ret);
- return ret;
- }
- }
+ for_each_card_auxs(card, component) {
+ if (component->driver->probe_order != order)
+ continue;
+
+ ret = soc_probe_component(card, component);
+ if (ret < 0)
+ return ret;
}
}
@@ -1619,7 +1608,7 @@ static void soc_remove_aux_devices(struct snd_soc_card *card)
for_each_comp_order(order) {
for_each_card_auxs_safe(card, comp, _comp) {
if (comp->driver->remove_order == order)
- soc_remove_component(comp);
+ soc_remove_component(comp, 1);
}
}
}
@@ -1729,6 +1718,23 @@ static int is_dmi_valid(const char *field)
return 1;
}
+/*
+ * Append a string to card->dmi_longname with character cleanups.
+ */
+static void append_dmi_string(struct snd_soc_card *card, const char *str)
+{
+ char *dst = card->dmi_longname;
+ size_t dst_len = sizeof(card->dmi_longname);
+ size_t len;
+
+ len = strlen(dst);
+ snprintf(dst + len, dst_len - len, "-%s", str);
+
+ len++; /* skip the separator "-" */
+ if (len < dst_len)
+ cleanup_dmi_name(dst + len);
+}
+
/**
* snd_soc_set_dmi_name() - Register DMI names to card
* @card: The card to register DMI names
@@ -1763,61 +1769,37 @@ static int is_dmi_valid(const char *field)
int snd_soc_set_dmi_name(struct snd_soc_card *card, const char *flavour)
{
const char *vendor, *product, *product_version, *board;
- size_t longname_buf_size = sizeof(card->snd_card->longname);
- size_t len;
if (card->long_name)
return 0; /* long name already set by driver or from DMI */
- /* make up dmi long name as: vendor.product.version.board */
+ /* make up dmi long name as: vendor-product-version-board */
vendor = dmi_get_system_info(DMI_BOARD_VENDOR);
if (!vendor || !is_dmi_valid(vendor)) {
dev_warn(card->dev, "ASoC: no DMI vendor name!\n");
return 0;
}
- snprintf(card->dmi_longname, sizeof(card->snd_card->longname),
- "%s", vendor);
+ snprintf(card->dmi_longname, sizeof(card->dmi_longname), "%s", vendor);
cleanup_dmi_name(card->dmi_longname);
product = dmi_get_system_info(DMI_PRODUCT_NAME);
if (product && is_dmi_valid(product)) {
- len = strlen(card->dmi_longname);
- snprintf(card->dmi_longname + len,
- longname_buf_size - len,
- "-%s", product);
-
- len++; /* skip the separator "-" */
- if (len < longname_buf_size)
- cleanup_dmi_name(card->dmi_longname + len);
+ append_dmi_string(card, product);
/*
* some vendors like Lenovo may only put a self-explanatory
* name in the product version field
*/
product_version = dmi_get_system_info(DMI_PRODUCT_VERSION);
- if (product_version && is_dmi_valid(product_version)) {
- len = strlen(card->dmi_longname);
- snprintf(card->dmi_longname + len,
- longname_buf_size - len,
- "-%s", product_version);
-
- len++;
- if (len < longname_buf_size)
- cleanup_dmi_name(card->dmi_longname + len);
- }
+ if (product_version && is_dmi_valid(product_version))
+ append_dmi_string(card, product_version);
}
board = dmi_get_system_info(DMI_BOARD_NAME);
if (board && is_dmi_valid(board)) {
- len = strlen(card->dmi_longname);
- snprintf(card->dmi_longname + len,
- longname_buf_size - len,
- "-%s", board);
-
- len++;
- if (len < longname_buf_size)
- cleanup_dmi_name(card->dmi_longname + len);
+ if (!product || strcasecmp(board, product))
+ append_dmi_string(card, board);
} else if (!product) {
/* fall back to using legacy name */
dev_warn(card->dev, "ASoC: no DMI board/product name!\n");
@@ -1825,16 +1807,8 @@ int snd_soc_set_dmi_name(struct snd_soc_card *card, const char *flavour)
}
/* Add flavour to dmi long name */
- if (flavour) {
- len = strlen(card->dmi_longname);
- snprintf(card->dmi_longname + len,
- longname_buf_size - len,
- "-%s", flavour);
-
- len++;
- if (len < longname_buf_size)
- cleanup_dmi_name(card->dmi_longname + len);
- }
+ if (flavour)
+ append_dmi_string(card, flavour);
/* set the card long name */
card->long_name = card->dmi_longname;
@@ -1853,7 +1827,7 @@ static void soc_check_tplg_fes(struct snd_soc_card *card)
for_each_component(component) {
- /* does this component override FEs ? */
+ /* does this component override BEs ? */
if (!component->driver->ignore_machine)
continue;
@@ -1874,7 +1848,7 @@ match:
continue;
}
- dev_info(card->dev, "info: override FE DAI link %s\n",
+ dev_info(card->dev, "info: override BE DAI link %s\n",
card->dai_link[i].name);
/* override platform component */
@@ -1918,17 +1892,58 @@ match:
}
}
-static void soc_cleanup_card_resources(struct snd_soc_card *card)
+#define soc_setup_card_name(name, name1, name2, norm) \
+ __soc_setup_card_name(name, sizeof(name), name1, name2, norm)
+static void __soc_setup_card_name(char *name, int len,
+ const char *name1, const char *name2,
+ int normalization)
{
- /* free the ALSA card at first; this syncs with pending operations */
- if (card->snd_card) {
- snd_card_free(card->snd_card);
- card->snd_card = NULL;
+ int i;
+
+ snprintf(name, len, "%s", name1 ? name1 : name2);
+
+ if (!normalization)
+ return;
+
+ /*
+ * Name normalization
+ *
+ * The driver name is somewhat special, as it's used as a key for
+ * searches in the user-space.
+ *
+ * ex)
+ * "abcd??efg" -> "abcd__efg"
+ */
+ for (i = 0; i < len; i++) {
+ switch (name[i]) {
+ case '_':
+ case '-':
+ case '\0':
+ break;
+ default:
+ if (!isalnum(name[i]))
+ name[i] = '_';
+ break;
+ }
}
+}
+
+static void soc_cleanup_card_resources(struct snd_soc_card *card,
+ int card_probed)
+{
+ struct snd_soc_dai_link *link, *_link;
+
+ if (card->snd_card)
+ snd_card_disconnect_sync(card->snd_card);
+
+ snd_soc_dapm_shutdown(card);
/* remove and free each DAI */
- soc_remove_dai_links(card);
- soc_remove_pcm_runtimes(card);
+ soc_remove_link_dais(card);
+ soc_remove_link_components(card);
+
+ for_each_card_links_safe(card, link, _link)
+ snd_soc_remove_dai_link(card, link);
/* remove auxiliary devices */
soc_remove_aux_devices(card);
@@ -1938,26 +1953,39 @@ static void soc_cleanup_card_resources(struct snd_soc_card *card)
soc_cleanup_card_debugfs(card);
/* remove the card */
- if (card->remove)
+ if (card_probed && card->remove)
card->remove(card);
+
+ if (card->snd_card) {
+ snd_card_free(card->snd_card);
+ card->snd_card = NULL;
+ }
+}
+
+static void snd_soc_unbind_card(struct snd_soc_card *card, bool unregister)
+{
+ if (card->instantiated) {
+ int card_probed = 1;
+
+ card->instantiated = false;
+ snd_soc_flush_all_delayed_work(card);
+
+ soc_cleanup_card_resources(card, card_probed);
+ if (!unregister)
+ list_add(&card->list, &unbind_card_list);
+ } else {
+ if (unregister)
+ list_del(&card->list);
+ }
}
-static int snd_soc_instantiate_card(struct snd_soc_card *card)
+static int snd_soc_bind_card(struct snd_soc_card *card)
{
struct snd_soc_pcm_runtime *rtd;
struct snd_soc_dai_link *dai_link;
- int ret, i;
+ int ret, i, card_probed = 0;
mutex_lock(&client_mutex);
- for_each_card_prelinks(card, i, dai_link) {
- ret = soc_init_dai_link(card, dai_link);
- if (ret) {
- dev_err(card->dev, "ASoC: failed to init link %s: %d\n",
- dai_link->name, ret);
- mutex_unlock(&client_mutex);
- return ret;
- }
- }
mutex_lock_nested(&card->mutex, SND_SOC_CARD_CLASS_INIT);
snd_soc_dapm_init(&card->dapm, card, NULL);
@@ -1965,19 +1993,13 @@ static int snd_soc_instantiate_card(struct snd_soc_card *card)
/* check whether any platform is ignore machine FE and using topology */
soc_check_tplg_fes(card);
- /* bind DAIs */
- for_each_card_prelinks(card, i, dai_link) {
- ret = soc_bind_dai_link(card, dai_link);
- if (ret != 0)
- goto probe_end;
- }
-
/* bind aux_devs too */
ret = soc_bind_aux_dev(card);
if (ret < 0)
goto probe_end;
/* add predefined DAI links to the list */
+ card->num_rtd = 0;
for_each_card_prelinks(card, i, dai_link) {
ret = snd_soc_add_dai_link(card, dai_link);
if (ret < 0)
@@ -2013,6 +2035,7 @@ static int snd_soc_instantiate_card(struct snd_soc_card *card)
ret = card->probe(card);
if (ret < 0)
goto probe_end;
+ card_probed = 1;
}
/* probe all components used by DAI links on this card */
@@ -2025,23 +2048,10 @@ static int snd_soc_instantiate_card(struct snd_soc_card *card)
/* probe auxiliary components */
ret = soc_probe_aux_devices(card);
- if (ret < 0)
+ if (ret < 0) {
+ dev_err(card->dev,
+ "ASoC: failed to probe aux component %d\n", ret);
goto probe_end;
-
- /*
- * Find new DAI links added during probing components and bind them.
- * Components with topology may bring new DAIs and DAI links.
- */
- for_each_card_links(card, dai_link) {
- if (soc_is_dai_link_bound(card, dai_link))
- continue;
-
- ret = soc_init_dai_link(card, dai_link);
- if (ret)
- goto probe_end;
- ret = soc_bind_dai_link(card, dai_link);
- if (ret)
- goto probe_end;
}
/* probe all DAI links on this card */
@@ -2076,22 +2086,23 @@ static int snd_soc_instantiate_card(struct snd_soc_card *card)
/* try to set some sane longname if DMI is available */
snd_soc_set_dmi_name(card, NULL);
- snprintf(card->snd_card->shortname, sizeof(card->snd_card->shortname),
- "%s", card->name);
- snprintf(card->snd_card->longname, sizeof(card->snd_card->longname),
- "%s", card->long_name ? card->long_name : card->name);
- snprintf(card->snd_card->driver, sizeof(card->snd_card->driver),
- "%s", card->driver_name ? card->driver_name : card->name);
- for (i = 0; i < ARRAY_SIZE(card->snd_card->driver); i++) {
- switch (card->snd_card->driver[i]) {
- case '_':
- case '-':
- case '\0':
- break;
- default:
- if (!isalnum(card->snd_card->driver[i]))
- card->snd_card->driver[i] = '_';
- break;
+ soc_setup_card_name(card->snd_card->shortname,
+ card->name, NULL, 0);
+ soc_setup_card_name(card->snd_card->longname,
+ card->long_name, card->name, 0);
+ soc_setup_card_name(card->snd_card->driver,
+ card->driver_name, card->name, 1);
+
+ if (card->components) {
+ /* the current implementation of snd_component_add() accepts */
+ /* multiple components in the string separated by space, */
+ /* but the string collision (identical string) check might */
+ /* not work correctly */
+ ret = snd_component_add(card->snd_card, card->components);
+ if (ret < 0) {
+ dev_err(card->dev, "ASoC: %s snd_component_add() failed: %d\n",
+ card->name, ret);
+ goto probe_end;
}
}
@@ -2103,6 +2114,7 @@ static int snd_soc_instantiate_card(struct snd_soc_card *card)
goto probe_end;
}
}
+ card_probed = 1;
snd_soc_dapm_new_widgets(card);
@@ -2117,9 +2129,22 @@ static int snd_soc_instantiate_card(struct snd_soc_card *card)
dapm_mark_endpoints_dirty(card);
snd_soc_dapm_sync(&card->dapm);
+ /* deactivate pins to sleep state */
+ for_each_card_rtds(card, rtd) {
+ struct snd_soc_dai *dai;
+
+ for_each_rtd_codec_dai(rtd, i, dai) {
+ if (!dai->active)
+ pinctrl_pm_select_sleep_state(dai->dev);
+ }
+
+ if (!rtd->cpu_dai->active)
+ pinctrl_pm_select_sleep_state(rtd->cpu_dai->dev);
+ }
+
probe_end:
if (ret < 0)
- soc_cleanup_card_resources(card);
+ soc_cleanup_card_resources(card, card_probed);
mutex_unlock(&card->mutex);
mutex_unlock(&client_mutex);
@@ -2349,33 +2374,6 @@ int snd_soc_add_dai_controls(struct snd_soc_dai *dai,
}
EXPORT_SYMBOL_GPL(snd_soc_add_dai_controls);
-static int snd_soc_bind_card(struct snd_soc_card *card)
-{
- struct snd_soc_pcm_runtime *rtd;
- int ret;
-
- ret = snd_soc_instantiate_card(card);
- if (ret != 0)
- return ret;
-
- /* deactivate pins to sleep state */
- for_each_card_rtds(card, rtd) {
- struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
- struct snd_soc_dai *codec_dai;
- int j;
-
- for_each_rtd_codec_dai(rtd, j, codec_dai) {
- if (!codec_dai->active)
- pinctrl_pm_select_sleep_state(codec_dai->dev);
- }
-
- if (!cpu_dai->active)
- pinctrl_pm_select_sleep_state(cpu_dai->dev);
- }
-
- return ret;
-}
-
/**
* snd_soc_register_card - Register a card with the ASoC core
*
@@ -2400,7 +2398,6 @@ int snd_soc_register_card(struct snd_soc_card *card)
INIT_LIST_HEAD(&card->dapm_dirty);
INIT_LIST_HEAD(&card->dobj_list);
- card->num_rtd = 0;
card->instantiated = 0;
mutex_init(&card->mutex);
mutex_init(&card->dapm_mutex);
@@ -2411,25 +2408,6 @@ int snd_soc_register_card(struct snd_soc_card *card)
}
EXPORT_SYMBOL_GPL(snd_soc_register_card);
-static void snd_soc_unbind_card(struct snd_soc_card *card, bool unregister)
-{
- if (card->instantiated) {
- card->instantiated = false;
- snd_soc_dapm_shutdown(card);
- snd_soc_flush_all_delayed_work(card);
-
- /* remove all components used by DAI links on this card */
- soc_remove_link_components(card);
-
- soc_cleanup_card_resources(card);
- if (!unregister)
- list_add(&card->list, &unbind_card_list);
- } else {
- if (unregister)
- list_del(&card->list);
- }
-}
-
/**
* snd_soc_unregister_card - Unregister a card with the ASoC core
*
@@ -2488,7 +2466,7 @@ static char *fmt_single_name(struct device *dev, int *id)
*id = 0;
}
- return kstrdup(name, GFP_KERNEL);
+ return devm_kstrdup(dev, name, GFP_KERNEL);
}
/*
@@ -2505,38 +2483,38 @@ static inline char *fmt_multiple_name(struct device *dev,
return NULL;
}
- return kstrdup(dai_drv->name, GFP_KERNEL);
+ return devm_kstrdup(dev, dai_drv->name, GFP_KERNEL);
}
-/**
- * snd_soc_unregister_dai - Unregister DAIs from the ASoC core
- *
- * @component: The component for which the DAIs should be unregistered
- */
-static void snd_soc_unregister_dais(struct snd_soc_component *component)
+void snd_soc_unregister_dai(struct snd_soc_dai *dai)
{
- struct snd_soc_dai *dai, *_dai;
-
- for_each_component_dais_safe(component, dai, _dai) {
- dev_dbg(component->dev, "ASoC: Unregistered DAI '%s'\n",
- dai->name);
- list_del(&dai->list);
- kfree(dai->name);
- kfree(dai);
- }
+ dev_dbg(dai->dev, "ASoC: Unregistered DAI '%s'\n", dai->name);
+ list_del(&dai->list);
}
+EXPORT_SYMBOL_GPL(snd_soc_unregister_dai);
-/* Create a DAI and add it to the component's DAI list */
-static struct snd_soc_dai *soc_add_dai(struct snd_soc_component *component,
- struct snd_soc_dai_driver *dai_drv,
- bool legacy_dai_naming)
+/**
+ * snd_soc_register_dai - Register a DAI dynamically & create its widgets
+ *
+ * @component: The component the DAIs are registered for
+ * @dai_drv: DAI driver to use for the DAI
+ *
+ * Topology can use this API to register DAIs when probing a component.
+ * These DAIs's widgets will be freed in the card cleanup and the DAIs
+ * will be freed in the component cleanup.
+ */
+struct snd_soc_dai *snd_soc_register_dai(struct snd_soc_component *component,
+ struct snd_soc_dai_driver *dai_drv,
+ bool legacy_dai_naming)
{
struct device *dev = component->dev;
struct snd_soc_dai *dai;
dev_dbg(dev, "ASoC: dynamically register DAI %s\n", dev_name(dev));
- dai = kzalloc(sizeof(struct snd_soc_dai), GFP_KERNEL);
+ lockdep_assert_held(&client_mutex);
+
+ dai = devm_kzalloc(dev, sizeof(*dai), GFP_KERNEL);
if (dai == NULL)
return NULL;
@@ -2558,10 +2536,8 @@ static struct snd_soc_dai *soc_add_dai(struct snd_soc_component *component,
else
dai->id = component->num_dai;
}
- if (dai->name == NULL) {
- kfree(dai);
+ if (!dai->name)
return NULL;
- }
dai->component = component;
dai->dev = dev;
@@ -2578,6 +2554,19 @@ static struct snd_soc_dai *soc_add_dai(struct snd_soc_component *component,
}
/**
+ * snd_soc_unregister_dai - Unregister DAIs from the ASoC core
+ *
+ * @component: The component for which the DAIs should be unregistered
+ */
+static void snd_soc_unregister_dais(struct snd_soc_component *component)
+{
+ struct snd_soc_dai *dai, *_dai;
+
+ for_each_component_dais_safe(component, dai, _dai)
+ snd_soc_unregister_dai(dai);
+}
+
+/**
* snd_soc_register_dais - Register a DAI with the ASoC core
*
* @component: The component the DAIs are registered for
@@ -2588,16 +2577,12 @@ static int snd_soc_register_dais(struct snd_soc_component *component,
struct snd_soc_dai_driver *dai_drv,
size_t count)
{
- struct device *dev = component->dev;
struct snd_soc_dai *dai;
unsigned int i;
int ret;
- dev_dbg(dev, "ASoC: dai register %s #%zu\n", dev_name(dev), count);
-
for (i = 0; i < count; i++) {
-
- dai = soc_add_dai(component, dai_drv + i, count == 1 &&
+ dai = snd_soc_register_dai(component, dai_drv + i, count == 1 &&
!component->driver->non_legacy_dai_naming);
if (dai == NULL) {
ret = -ENOMEM;
@@ -2613,49 +2598,6 @@ err:
return ret;
}
-/**
- * snd_soc_register_dai - Register a DAI dynamically & create its widgets
- *
- * @component: The component the DAIs are registered for
- * @dai_drv: DAI driver to use for the DAI
- *
- * Topology can use this API to register DAIs when probing a component.
- * These DAIs's widgets will be freed in the card cleanup and the DAIs
- * will be freed in the component cleanup.
- */
-int snd_soc_register_dai(struct snd_soc_component *component,
- struct snd_soc_dai_driver *dai_drv)
-{
- struct snd_soc_dapm_context *dapm =
- snd_soc_component_get_dapm(component);
- struct snd_soc_dai *dai;
- int ret;
-
- if (dai_drv->dobj.type != SND_SOC_DOBJ_PCM) {
- dev_err(component->dev, "Invalid dai type %d\n",
- dai_drv->dobj.type);
- return -EINVAL;
- }
-
- lockdep_assert_held(&client_mutex);
- dai = soc_add_dai(component, dai_drv, false);
- if (!dai)
- return -ENOMEM;
-
- /*
- * Create the DAI widgets here. After adding DAIs, topology may
- * also add routes that need these widgets as source or sink.
- */
- ret = snd_soc_dapm_new_dai_widgets(dapm, dai);
- if (ret != 0) {
- dev_err(component->dev,
- "Failed to create DAI widgets %d\n", ret);
- }
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(snd_soc_register_dai);
-
static int snd_soc_component_initialize(struct snd_soc_component *component,
const struct snd_soc_component_driver *driver, struct device *dev)
{
@@ -2726,40 +2668,6 @@ EXPORT_SYMBOL_GPL(snd_soc_component_exit_regmap);
#endif
-static void snd_soc_component_add(struct snd_soc_component *component)
-{
- mutex_lock(&client_mutex);
-
- if (!component->driver->write && !component->driver->read) {
- if (!component->regmap)
- component->regmap = dev_get_regmap(component->dev,
- NULL);
- if (component->regmap)
- snd_soc_component_setup_regmap(component);
- }
-
- /* see for_each_component */
- list_add(&component->list, &component_list);
-
- mutex_unlock(&client_mutex);
-}
-
-static void snd_soc_component_cleanup(struct snd_soc_component *component)
-{
- snd_soc_unregister_dais(component);
- kfree(component->name);
-}
-
-static void snd_soc_component_del_unlocked(struct snd_soc_component *component)
-{
- struct snd_soc_card *card = component->card;
-
- if (card)
- snd_soc_unbind_card(card, false);
-
- list_del(&component->list);
-}
-
#define ENDIANNESS_MAP(name) \
(SNDRV_PCM_FMTBIT_##name##LE | SNDRV_PCM_FMTBIT_##name##BE)
static u64 endianness_format_map[] = {
@@ -2804,6 +2712,18 @@ static void snd_soc_try_rebind_card(void)
list_del(&card->list);
}
+static void snd_soc_del_component_unlocked(struct snd_soc_component *component)
+{
+ struct snd_soc_card *card = component->card;
+
+ snd_soc_unregister_dais(component);
+
+ if (card)
+ snd_soc_unbind_card(card, false);
+
+ list_del(&component->list);
+}
+
int snd_soc_add_component(struct device *dev,
struct snd_soc_component *component,
const struct snd_soc_component_driver *component_driver,
@@ -2813,6 +2733,8 @@ int snd_soc_add_component(struct device *dev,
int ret;
int i;
+ mutex_lock(&client_mutex);
+
ret = snd_soc_component_initialize(component, component_driver, dev);
if (ret)
goto err_free;
@@ -2830,14 +2752,26 @@ int snd_soc_add_component(struct device *dev,
goto err_cleanup;
}
- snd_soc_component_add(component);
- snd_soc_try_rebind_card();
+ if (!component->driver->write && !component->driver->read) {
+ if (!component->regmap)
+ component->regmap = dev_get_regmap(component->dev,
+ NULL);
+ if (component->regmap)
+ snd_soc_component_setup_regmap(component);
+ }
- return 0;
+ /* see for_each_component */
+ list_add(&component->list, &component_list);
err_cleanup:
- snd_soc_component_cleanup(component);
+ if (ret < 0)
+ snd_soc_del_component_unlocked(component);
err_free:
+ mutex_unlock(&client_mutex);
+
+ if (ret == 0)
+ snd_soc_try_rebind_card();
+
return ret;
}
EXPORT_SYMBOL_GPL(snd_soc_add_component);
@@ -2864,62 +2798,21 @@ EXPORT_SYMBOL_GPL(snd_soc_register_component);
*
* @dev: The device to unregister
*/
-static int __snd_soc_unregister_component(struct device *dev)
-{
- struct snd_soc_component *component;
- int found = 0;
-
- mutex_lock(&client_mutex);
- for_each_component(component) {
- if (dev != component->dev)
- continue;
-
- snd_soc_tplg_component_remove(component,
- SND_SOC_TPLG_INDEX_ALL);
- snd_soc_component_del_unlocked(component);
- found = 1;
- break;
- }
- mutex_unlock(&client_mutex);
-
- if (found)
- snd_soc_component_cleanup(component);
-
- return found;
-}
-
void snd_soc_unregister_component(struct device *dev)
{
- while (__snd_soc_unregister_component(dev))
- ;
-}
-EXPORT_SYMBOL_GPL(snd_soc_unregister_component);
-
-struct snd_soc_component *snd_soc_lookup_component(struct device *dev,
- const char *driver_name)
-{
struct snd_soc_component *component;
- struct snd_soc_component *ret;
- ret = NULL;
mutex_lock(&client_mutex);
- for_each_component(component) {
- if (dev != component->dev)
- continue;
-
- if (driver_name &&
- (driver_name != component->driver->name) &&
- (strcmp(component->driver->name, driver_name) != 0))
- continue;
+ while (1) {
+ component = snd_soc_lookup_component_nolocked(dev, NULL);
+ if (!component)
+ break;
- ret = component;
- break;
+ snd_soc_del_component_unlocked(component);
}
mutex_unlock(&client_mutex);
-
- return ret;
}
-EXPORT_SYMBOL_GPL(snd_soc_lookup_component);
+EXPORT_SYMBOL_GPL(snd_soc_unregister_component);
/* Retrieve a card's name from device tree */
int snd_soc_of_parse_card_name(struct snd_soc_card *card,
diff --git a/sound/soc/soc-generic-dmaengine-pcm.c b/sound/soc/soc-generic-dmaengine-pcm.c
index 5552c66ca642..a428ff393ea2 100644
--- a/sound/soc/soc-generic-dmaengine-pcm.c
+++ b/sound/soc/soc-generic-dmaengine-pcm.c
@@ -75,12 +75,10 @@ int snd_dmaengine_pcm_prepare_slave_config(struct snd_pcm_substream *substream,
}
EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_prepare_slave_config);
-static int dmaengine_pcm_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *params)
+static int dmaengine_pcm_hw_params(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
{
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_component *component =
- snd_soc_rtdcom_lookup(rtd, SND_DMAENGINE_PCM_DRV_NAME);
struct dmaengine_pcm *pcm = soc_component_to_pcm(component);
struct dma_chan *chan = snd_dmaengine_pcm_get_chan(substream);
int (*prepare_slave_config)(struct snd_pcm_substream *substream,
@@ -109,21 +107,16 @@ static int dmaengine_pcm_hw_params(struct snd_pcm_substream *substream,
return snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(params));
}
-static int dmaengine_pcm_set_runtime_hwparams(struct snd_pcm_substream *substream)
+static int
+dmaengine_pcm_set_runtime_hwparams(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_component *component =
- snd_soc_rtdcom_lookup(rtd, SND_DMAENGINE_PCM_DRV_NAME);
struct dmaengine_pcm *pcm = soc_component_to_pcm(component);
struct device *dma_dev = dmaengine_dma_dev(pcm, substream);
struct dma_chan *chan = pcm->chan[substream->stream];
struct snd_dmaengine_dai_dma_data *dma_data;
- struct dma_slave_caps dma_caps;
struct snd_pcm_hardware hw;
- u32 addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
- BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
- BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
- snd_pcm_format_t i;
int ret;
if (pcm->config && pcm->config->pcm_hardware)
@@ -145,82 +138,53 @@ static int dmaengine_pcm_set_runtime_hwparams(struct snd_pcm_substream *substrea
if (pcm->flags & SND_DMAENGINE_PCM_FLAG_NO_RESIDUE)
hw.info |= SNDRV_PCM_INFO_BATCH;
- ret = dma_get_slave_caps(chan, &dma_caps);
- if (ret == 0) {
- if (dma_caps.cmd_pause && dma_caps.cmd_resume)
- hw.info |= SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME;
- if (dma_caps.residue_granularity <= DMA_RESIDUE_GRANULARITY_SEGMENT)
- hw.info |= SNDRV_PCM_INFO_BATCH;
-
- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
- addr_widths = dma_caps.dst_addr_widths;
- else
- addr_widths = dma_caps.src_addr_widths;
- }
-
- /*
- * If SND_DMAENGINE_PCM_DAI_FLAG_PACK is set keep
- * hw.formats set to 0, meaning no restrictions are in place.
- * In this case it's the responsibility of the DAI driver to
- * provide the supported format information.
- */
- if (!(dma_data->flags & SND_DMAENGINE_PCM_DAI_FLAG_PACK))
- /*
- * Prepare formats mask for valid/allowed sample types. If the
- * dma does not have support for the given physical word size,
- * it needs to be masked out so user space can not use the
- * format which produces corrupted audio.
- * In case the dma driver does not implement the slave_caps the
- * default assumption is that it supports 1, 2 and 4 bytes
- * widths.
- */
- for (i = SNDRV_PCM_FORMAT_FIRST; i <= SNDRV_PCM_FORMAT_LAST; i++) {
- int bits = snd_pcm_format_physical_width(i);
-
- /*
- * Enable only samples with DMA supported physical
- * widths
- */
- switch (bits) {
- case 8:
- case 16:
- case 24:
- case 32:
- case 64:
- if (addr_widths & (1 << (bits / 8)))
- hw.formats |= pcm_format_to_bits(i);
- break;
- default:
- /* Unsupported types */
- break;
- }
- }
+ ret = snd_dmaengine_pcm_refine_runtime_hwparams(substream,
+ dma_data,
+ &hw,
+ chan);
+ if (ret)
+ return ret;
return snd_soc_set_runtime_hwparams(substream, &hw);
}
-static int dmaengine_pcm_open(struct snd_pcm_substream *substream)
+static int dmaengine_pcm_open(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_component *component =
- snd_soc_rtdcom_lookup(rtd, SND_DMAENGINE_PCM_DRV_NAME);
struct dmaengine_pcm *pcm = soc_component_to_pcm(component);
struct dma_chan *chan = pcm->chan[substream->stream];
int ret;
- ret = dmaengine_pcm_set_runtime_hwparams(substream);
+ ret = dmaengine_pcm_set_runtime_hwparams(component, substream);
if (ret)
return ret;
return snd_dmaengine_pcm_open(substream, chan);
}
+static int dmaengine_pcm_close(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
+{
+ return snd_dmaengine_pcm_close(substream);
+}
+
+static int dmaengine_pcm_hw_free(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
+{
+ return snd_pcm_lib_free_pages(substream);
+}
+
+static int dmaengine_pcm_trigger(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream, int cmd)
+{
+ return snd_dmaengine_pcm_trigger(substream, cmd);
+}
+
static struct dma_chan *dmaengine_pcm_compat_request_channel(
+ struct snd_soc_component *component,
struct snd_soc_pcm_runtime *rtd,
struct snd_pcm_substream *substream)
{
- struct snd_soc_component *component =
- snd_soc_rtdcom_lookup(rtd, SND_DMAENGINE_PCM_DRV_NAME);
struct dmaengine_pcm *pcm = soc_component_to_pcm(component);
struct snd_dmaengine_dai_dma_data *dma_data;
dma_filter_fn fn = NULL;
@@ -258,10 +222,9 @@ static bool dmaengine_pcm_can_report_residue(struct device *dev,
return true;
}
-static int dmaengine_pcm_new(struct snd_soc_pcm_runtime *rtd)
+static int dmaengine_pcm_new(struct snd_soc_component *component,
+ struct snd_soc_pcm_runtime *rtd)
{
- struct snd_soc_component *component =
- snd_soc_rtdcom_lookup(rtd, SND_DMAENGINE_PCM_DRV_NAME);
struct dmaengine_pcm *pcm = soc_component_to_pcm(component);
const struct snd_dmaengine_pcm_config *config = pcm->config;
struct device *dev = component->dev;
@@ -288,8 +251,8 @@ static int dmaengine_pcm_new(struct snd_soc_pcm_runtime *rtd)
config->chan_names[i]);
if (!pcm->chan[i] && (pcm->flags & SND_DMAENGINE_PCM_FLAG_COMPAT)) {
- pcm->chan[i] = dmaengine_pcm_compat_request_channel(rtd,
- substream);
+ pcm->chan[i] = dmaengine_pcm_compat_request_channel(
+ component, rtd, substream);
}
if (!pcm->chan[i]) {
@@ -318,11 +281,9 @@ static int dmaengine_pcm_new(struct snd_soc_pcm_runtime *rtd)
}
static snd_pcm_uframes_t dmaengine_pcm_pointer(
+ struct snd_soc_component *component,
struct snd_pcm_substream *substream)
{
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_component *component =
- snd_soc_rtdcom_lookup(rtd, SND_DMAENGINE_PCM_DRV_NAME);
struct dmaengine_pcm *pcm = soc_component_to_pcm(component);
if (pcm->flags & SND_DMAENGINE_PCM_FLAG_NO_RESIDUE)
@@ -331,13 +292,11 @@ static snd_pcm_uframes_t dmaengine_pcm_pointer(
return snd_dmaengine_pcm_pointer(substream);
}
-static int dmaengine_copy_user(struct snd_pcm_substream *substream,
+static int dmaengine_copy_user(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
int channel, unsigned long hwoff,
void __user *buf, unsigned long bytes)
{
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_component *component =
- snd_soc_rtdcom_lookup(rtd, SND_DMAENGINE_PCM_DRV_NAME);
struct snd_pcm_runtime *runtime = substream->runtime;
struct dmaengine_pcm *pcm = soc_component_to_pcm(component);
int (*process)(struct snd_pcm_substream *substream,
@@ -365,39 +324,31 @@ static int dmaengine_copy_user(struct snd_pcm_substream *substream,
return 0;
}
-static const struct snd_pcm_ops dmaengine_pcm_ops = {
+static const struct snd_soc_component_driver dmaengine_pcm_component = {
+ .name = SND_DMAENGINE_PCM_DRV_NAME,
+ .probe_order = SND_SOC_COMP_ORDER_LATE,
.open = dmaengine_pcm_open,
- .close = snd_dmaengine_pcm_close,
- .ioctl = snd_pcm_lib_ioctl,
+ .close = dmaengine_pcm_close,
+ .ioctl = snd_soc_pcm_lib_ioctl,
.hw_params = dmaengine_pcm_hw_params,
- .hw_free = snd_pcm_lib_free_pages,
- .trigger = snd_dmaengine_pcm_trigger,
+ .hw_free = dmaengine_pcm_hw_free,
+ .trigger = dmaengine_pcm_trigger,
.pointer = dmaengine_pcm_pointer,
+ .pcm_construct = dmaengine_pcm_new,
};
-static const struct snd_pcm_ops dmaengine_pcm_process_ops = {
+static const struct snd_soc_component_driver dmaengine_pcm_component_process = {
+ .name = SND_DMAENGINE_PCM_DRV_NAME,
+ .probe_order = SND_SOC_COMP_ORDER_LATE,
.open = dmaengine_pcm_open,
- .close = snd_dmaengine_pcm_close,
- .ioctl = snd_pcm_lib_ioctl,
+ .close = dmaengine_pcm_close,
+ .ioctl = snd_soc_pcm_lib_ioctl,
.hw_params = dmaengine_pcm_hw_params,
- .hw_free = snd_pcm_lib_free_pages,
- .trigger = snd_dmaengine_pcm_trigger,
+ .hw_free = dmaengine_pcm_hw_free,
+ .trigger = dmaengine_pcm_trigger,
.pointer = dmaengine_pcm_pointer,
.copy_user = dmaengine_copy_user,
-};
-
-static const struct snd_soc_component_driver dmaengine_pcm_component = {
- .name = SND_DMAENGINE_PCM_DRV_NAME,
- .probe_order = SND_SOC_COMP_ORDER_LATE,
- .ops = &dmaengine_pcm_ops,
- .pcm_new = dmaengine_pcm_new,
-};
-
-static const struct snd_soc_component_driver dmaengine_pcm_component_process = {
- .name = SND_DMAENGINE_PCM_DRV_NAME,
- .probe_order = SND_SOC_COMP_ORDER_LATE,
- .ops = &dmaengine_pcm_process_ops,
- .pcm_new = dmaengine_pcm_new,
+ .pcm_construct = dmaengine_pcm_new,
};
static const char * const dmaengine_pcm_dma_channel_names[] = {
@@ -436,7 +387,7 @@ static int dmaengine_pcm_request_chan_of(struct dmaengine_pcm *pcm,
name = dmaengine_pcm_dma_channel_names[i];
if (config && config->chan_names[i])
name = config->chan_names[i];
- chan = dma_request_slave_channel_reason(dev, name);
+ chan = dma_request_chan(dev, name);
if (IS_ERR(chan)) {
if (PTR_ERR(chan) == -EPROBE_DEFER)
return -EPROBE_DEFER;
diff --git a/sound/soc/soc-jack.c b/sound/soc/soc-jack.c
index a71d2340eb05..b5748dcd490f 100644
--- a/sound/soc/soc-jack.c
+++ b/sound/soc/soc-jack.c
@@ -82,10 +82,9 @@ void snd_soc_jack_report(struct snd_soc_jack *jack, int status, int mask)
unsigned int sync = 0;
int enable;
- trace_snd_soc_jack_report(jack, mask, status);
-
if (!jack)
return;
+ trace_snd_soc_jack_report(jack, mask, status);
dapm = &jack->card->dapm;
diff --git a/sound/soc/soc-ops.c b/sound/soc/soc-ops.c
index f4dc3d445aae..652657dc6809 100644
--- a/sound/soc/soc-ops.c
+++ b/sound/soc/soc-ops.c
@@ -592,23 +592,16 @@ EXPORT_SYMBOL_GPL(snd_soc_get_volsw_range);
int snd_soc_limit_volume(struct snd_soc_card *card,
const char *name, int max)
{
- struct snd_card *snd_card = card->snd_card;
struct snd_kcontrol *kctl;
struct soc_mixer_control *mc;
- int found = 0;
int ret = -EINVAL;
/* Sanity check for name and max */
if (unlikely(!name || max <= 0))
return -EINVAL;
- list_for_each_entry(kctl, &snd_card->controls, list) {
- if (!strncmp(kctl->id.name, name, sizeof(kctl->id.name))) {
- found = 1;
- break;
- }
- }
- if (found) {
+ kctl = snd_soc_card_get_kcontrol(card, name);
+ if (kctl) {
mc = (struct soc_mixer_control *)kctl->private_value;
if (max <= mc->max) {
mc->platform_max = max;
diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
index b600d3eaaf5c..76b7ee637e86 100644
--- a/sound/soc/soc-pcm.c
+++ b/sound/soc/soc-pcm.c
@@ -118,11 +118,8 @@ bool snd_soc_runtime_ignore_pmdown_time(struct snd_soc_pcm_runtime *rtd)
if (!rtd->pmdown_time || rtd->dai_link->ignore_pmdown_time)
return true;
- for_each_rtdcom(rtd, rtdcom) {
- component = rtdcom->component;
-
+ for_each_rtd_components(rtd, rtdcom, component)
ignore &= !component->driver->use_pmdown_time;
- }
return ignore;
}
@@ -435,8 +432,7 @@ static int soc_pcm_components_open(struct snd_pcm_substream *substream,
struct snd_soc_component *component;
int ret = 0;
- for_each_rtdcom(rtd, rtdcom) {
- component = rtdcom->component;
+ for_each_rtd_components(rtd, rtdcom, component) {
*last = component;
ret = snd_soc_component_module_get_when_open(component);
@@ -467,9 +463,7 @@ static int soc_pcm_components_close(struct snd_pcm_substream *substream,
struct snd_soc_component *component;
int ret = 0;
- for_each_rtdcom(rtd, rtdcom) {
- component = rtdcom->component;
-
+ for_each_rtd_components(rtd, rtdcom, component) {
if (component == last)
break;
@@ -500,9 +494,7 @@ static int soc_pcm_open(struct snd_pcm_substream *substream)
for_each_rtd_codec_dai(rtd, i, codec_dai)
pinctrl_pm_select_default_state(codec_dai->dev);
- for_each_rtdcom(rtd, rtdcom) {
- component = rtdcom->component;
-
+ for_each_rtd_components(rtd, rtdcom, component) {
pm_runtime_get_sync(component->dev);
}
@@ -625,9 +617,7 @@ component_err:
out:
mutex_unlock(&rtd->card->pcm_mutex);
- for_each_rtdcom(rtd, rtdcom) {
- component = rtdcom->component;
-
+ for_each_rtd_components(rtd, rtdcom, component) {
pm_runtime_mark_last_busy(component->dev);
pm_runtime_put_autosuspend(component->dev);
}
@@ -740,9 +730,7 @@ static int soc_pcm_close(struct snd_pcm_substream *substream)
mutex_unlock(&rtd->card->pcm_mutex);
- for_each_rtdcom(rtd, rtdcom) {
- component = rtdcom->component;
-
+ for_each_rtd_components(rtd, rtdcom, component) {
pm_runtime_mark_last_busy(component->dev);
pm_runtime_put_autosuspend(component->dev);
}
@@ -782,9 +770,7 @@ static int soc_pcm_prepare(struct snd_pcm_substream *substream)
}
}
- for_each_rtdcom(rtd, rtdcom) {
- component = rtdcom->component;
-
+ for_each_rtd_components(rtd, rtdcom, component) {
ret = snd_soc_component_prepare(component, substream);
if (ret < 0) {
dev_err(component->dev,
@@ -849,9 +835,7 @@ static int soc_pcm_components_hw_free(struct snd_pcm_substream *substream,
struct snd_soc_component *component;
int ret = 0;
- for_each_rtdcom(rtd, rtdcom) {
- component = rtdcom->component;
-
+ for_each_rtd_components(rtd, rtdcom, component) {
if (component == last)
break;
@@ -877,6 +861,11 @@ static int soc_pcm_hw_params(struct snd_pcm_substream *substream,
int i, ret = 0;
mutex_lock_nested(&rtd->card->pcm_mutex, rtd->card->pcm_subclass);
+
+ ret = soc_pcm_params_symmetry(substream, params);
+ if (ret)
+ goto out;
+
if (rtd->dai_link->ops->hw_params) {
ret = rtd->dai_link->ops->hw_params(substream, params);
if (ret < 0) {
@@ -945,9 +934,7 @@ static int soc_pcm_hw_params(struct snd_pcm_substream *substream,
snd_soc_dapm_update_dai(substream, params, cpu_dai);
- for_each_rtdcom(rtd, rtdcom) {
- component = rtdcom->component;
-
+ for_each_rtd_components(rtd, rtdcom, component) {
ret = snd_soc_component_hw_params(component, substream, params);
if (ret < 0) {
dev_err(component->dev,
@@ -958,9 +945,6 @@ static int soc_pcm_hw_params(struct snd_pcm_substream *substream,
}
component = NULL;
- ret = soc_pcm_params_symmetry(substream, params);
- if (ret)
- goto component_err;
out:
mutex_unlock(&rtd->card->pcm_mutex);
return ret;
@@ -1047,7 +1031,7 @@ static int soc_pcm_hw_free(struct snd_pcm_substream *substream)
return 0;
}
-static int soc_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
+static int soc_pcm_trigger_start(struct snd_pcm_substream *substream, int cmd)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_component *component;
@@ -1056,16 +1040,42 @@ static int soc_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
struct snd_soc_dai *codec_dai;
int i, ret;
+ if (rtd->dai_link->ops->trigger) {
+ ret = rtd->dai_link->ops->trigger(substream, cmd);
+ if (ret < 0)
+ return ret;
+ }
+
+ for_each_rtd_components(rtd, rtdcom, component) {
+ ret = snd_soc_component_trigger(component, substream, cmd);
+ if (ret < 0)
+ return ret;
+ }
+
+ ret = snd_soc_dai_trigger(cpu_dai, substream, cmd);
+ if (ret < 0)
+ return ret;
+
for_each_rtd_codec_dai(rtd, i, codec_dai) {
ret = snd_soc_dai_trigger(codec_dai, substream, cmd);
if (ret < 0)
return ret;
}
- for_each_rtdcom(rtd, rtdcom) {
- component = rtdcom->component;
+ return 0;
+}
- ret = snd_soc_component_trigger(component, substream, cmd);
+static int soc_pcm_trigger_stop(struct snd_pcm_substream *substream, int cmd)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_component *component;
+ struct snd_soc_rtdcom_list *rtdcom;
+ struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct snd_soc_dai *codec_dai;
+ int i, ret;
+
+ for_each_rtd_codec_dai(rtd, i, codec_dai) {
+ ret = snd_soc_dai_trigger(codec_dai, substream, cmd);
if (ret < 0)
return ret;
}
@@ -1074,6 +1084,12 @@ static int soc_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
if (ret < 0)
return ret;
+ for_each_rtd_components(rtd, rtdcom, component) {
+ ret = snd_soc_component_trigger(component, substream, cmd);
+ if (ret < 0)
+ return ret;
+ }
+
if (rtd->dai_link->ops->trigger) {
ret = rtd->dai_link->ops->trigger(substream, cmd);
if (ret < 0)
@@ -1083,6 +1099,28 @@ static int soc_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
return 0;
}
+static int soc_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
+{
+ int ret;
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_RESUME:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ ret = soc_pcm_trigger_start(substream, cmd);
+ break;
+ case SNDRV_PCM_TRIGGER_STOP:
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ ret = soc_pcm_trigger_stop(substream, cmd);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
static int soc_pcm_bespoke_trigger(struct snd_pcm_substream *substream,
int cmd)
{
@@ -1146,7 +1184,9 @@ static int dpcm_be_connect(struct snd_soc_pcm_runtime *fe,
{
struct snd_soc_dpcm *dpcm;
unsigned long flags;
+#ifdef CONFIG_DEBUG_FS
char *name;
+#endif
/* only add new dpcms */
for_each_dpcm_be(fe, stream, dpcm) {
@@ -1385,6 +1425,7 @@ static int dpcm_prune_paths(struct snd_soc_pcm_runtime *fe, int stream,
struct snd_soc_dapm_widget *widget;
struct snd_soc_dai *dai;
int prune = 0;
+ int do_prune;
/* Destroy any old FE <--> BE connections */
for_each_dpcm_be(fe, stream, dpcm) {
@@ -1398,13 +1439,16 @@ static int dpcm_prune_paths(struct snd_soc_pcm_runtime *fe, int stream,
continue;
/* is there a valid CODEC DAI widget for this BE */
+ do_prune = 1;
for_each_rtd_codec_dai(dpcm->be, i, dai) {
widget = dai_get_widget(dai, stream);
/* prune the BE if it's no longer in our active list */
if (widget && widget_in_list(list, widget))
- continue;
+ do_prune = 0;
}
+ if (!do_prune)
+ continue;
dev_dbg(fe->dev, "ASoC: pruning %s BE %s for %s\n",
stream ? "capture" : "playback",
@@ -2289,42 +2333,81 @@ int dpcm_be_dai_trigger(struct snd_soc_pcm_runtime *fe, int stream,
}
EXPORT_SYMBOL_GPL(dpcm_be_dai_trigger);
+static int dpcm_dai_trigger_fe_be(struct snd_pcm_substream *substream,
+ int cmd, bool fe_first)
+{
+ struct snd_soc_pcm_runtime *fe = substream->private_data;
+ int ret;
+
+ /* call trigger on the frontend before the backend. */
+ if (fe_first) {
+ dev_dbg(fe->dev, "ASoC: pre trigger FE %s cmd %d\n",
+ fe->dai_link->name, cmd);
+
+ ret = soc_pcm_trigger(substream, cmd);
+ if (ret < 0)
+ return ret;
+
+ ret = dpcm_be_dai_trigger(fe, substream->stream, cmd);
+ return ret;
+ }
+
+ /* call trigger on the frontend after the backend. */
+ ret = dpcm_be_dai_trigger(fe, substream->stream, cmd);
+ if (ret < 0)
+ return ret;
+
+ dev_dbg(fe->dev, "ASoC: post trigger FE %s cmd %d\n",
+ fe->dai_link->name, cmd);
+
+ ret = soc_pcm_trigger(substream, cmd);
+
+ return ret;
+}
+
static int dpcm_fe_dai_do_trigger(struct snd_pcm_substream *substream, int cmd)
{
struct snd_soc_pcm_runtime *fe = substream->private_data;
- int stream = substream->stream, ret;
+ int stream = substream->stream;
+ int ret = 0;
enum snd_soc_dpcm_trigger trigger = fe->dai_link->trigger[stream];
fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_FE;
switch (trigger) {
case SND_SOC_DPCM_TRIGGER_PRE:
- /* call trigger on the frontend before the backend. */
-
- dev_dbg(fe->dev, "ASoC: pre trigger FE %s cmd %d\n",
- fe->dai_link->name, cmd);
-
- ret = soc_pcm_trigger(substream, cmd);
- if (ret < 0) {
- dev_err(fe->dev,"ASoC: trigger FE failed %d\n", ret);
- goto out;
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_RESUME:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ ret = dpcm_dai_trigger_fe_be(substream, cmd, true);
+ break;
+ case SNDRV_PCM_TRIGGER_STOP:
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ ret = dpcm_dai_trigger_fe_be(substream, cmd, false);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
}
-
- ret = dpcm_be_dai_trigger(fe, substream->stream, cmd);
break;
case SND_SOC_DPCM_TRIGGER_POST:
- /* call trigger on the frontend after the backend. */
-
- ret = dpcm_be_dai_trigger(fe, substream->stream, cmd);
- if (ret < 0) {
- dev_err(fe->dev,"ASoC: trigger FE failed %d\n", ret);
- goto out;
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_RESUME:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ ret = dpcm_dai_trigger_fe_be(substream, cmd, false);
+ break;
+ case SNDRV_PCM_TRIGGER_STOP:
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ ret = dpcm_dai_trigger_fe_be(substream, cmd, true);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
}
-
- dev_dbg(fe->dev, "ASoC: post trigger FE %s cmd %d\n",
- fe->dai_link->name, cmd);
-
- ret = soc_pcm_trigger(substream, cmd);
break;
case SND_SOC_DPCM_TRIGGER_BESPOKE:
/* bespoke trigger() - handles both FE and BEs */
@@ -2333,10 +2416,6 @@ static int dpcm_fe_dai_do_trigger(struct snd_pcm_substream *substream, int cmd)
fe->dai_link->name, cmd);
ret = soc_pcm_bespoke_trigger(substream, cmd);
- if (ret < 0) {
- dev_err(fe->dev,"ASoC: trigger FE failed %d\n", ret);
- goto out;
- }
break;
default:
dev_err(fe->dev, "ASoC: invalid trigger cmd %d for %s\n", cmd,
@@ -2345,6 +2424,12 @@ static int dpcm_fe_dai_do_trigger(struct snd_pcm_substream *substream, int cmd)
goto out;
}
+ if (ret < 0) {
+ dev_err(fe->dev, "ASoC: trigger FE cmd: %d failed: %d\n",
+ cmd, ret);
+ goto out;
+ }
+
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_RESUME:
@@ -2809,21 +2894,13 @@ static int dpcm_fe_dai_close(struct snd_pcm_substream *fe_substream)
return ret;
}
-static void soc_pcm_private_free(struct snd_pcm *pcm)
-{
- struct snd_soc_pcm_runtime *rtd = pcm->private_data;
-
- /* need to sync the delayed work before releasing resources */
- flush_delayed_work(&rtd->delayed_work);
- snd_soc_pcm_component_free(pcm);
-}
-
/* create a new pcm */
int soc_new_pcm(struct snd_soc_pcm_runtime *rtd, int num)
{
struct snd_soc_dai *codec_dai;
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
struct snd_soc_rtdcom_list *rtdcom;
+ struct snd_soc_component *component;
struct snd_pcm *pcm;
char new_name[64];
int ret = 0, playback = 0, capture = 0;
@@ -2923,7 +3000,6 @@ int soc_new_pcm(struct snd_soc_pcm_runtime *rtd, int num)
rtd->ops.hw_free = dpcm_fe_dai_hw_free;
rtd->ops.close = dpcm_fe_dai_close;
rtd->ops.pointer = soc_pcm_pointer;
- rtd->ops.ioctl = snd_soc_pcm_component_ioctl;
} else {
rtd->ops.open = soc_pcm_open;
rtd->ops.hw_params = soc_pcm_hw_params;
@@ -2932,20 +3008,20 @@ int soc_new_pcm(struct snd_soc_pcm_runtime *rtd, int num)
rtd->ops.hw_free = soc_pcm_hw_free;
rtd->ops.close = soc_pcm_close;
rtd->ops.pointer = soc_pcm_pointer;
- rtd->ops.ioctl = snd_soc_pcm_component_ioctl;
}
- for_each_rtdcom(rtd, rtdcom) {
- const struct snd_pcm_ops *ops = rtdcom->component->driver->ops;
-
- if (!ops)
- continue;
+ for_each_rtd_components(rtd, rtdcom, component) {
+ const struct snd_soc_component_driver *drv = component->driver;
- if (ops->copy_user)
+ if (drv->ioctl)
+ rtd->ops.ioctl = snd_soc_pcm_component_ioctl;
+ if (drv->sync_stop)
+ rtd->ops.sync_stop = snd_soc_pcm_component_sync_stop;
+ if (drv->copy_user)
rtd->ops.copy_user = snd_soc_pcm_component_copy_user;
- if (ops->page)
+ if (drv->page)
rtd->ops.page = snd_soc_pcm_component_page;
- if (ops->mmap)
+ if (drv->mmap)
rtd->ops.mmap = snd_soc_pcm_component_mmap;
}
@@ -2955,13 +3031,12 @@ int soc_new_pcm(struct snd_soc_pcm_runtime *rtd, int num)
if (capture)
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &rtd->ops);
- ret = snd_soc_pcm_component_new(pcm);
+ ret = snd_soc_pcm_component_new(rtd);
if (ret < 0) {
dev_err(rtd->dev, "ASoC: pcm constructor failed: %d\n", ret);
return ret;
}
- pcm->private_free = soc_pcm_private_free;
pcm->no_device_suspend = true;
out:
dev_info(rtd->card->dev, "%s <-> %s mapping ok\n",
diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
index 0fd032914a31..81d2af000a5c 100644
--- a/sound/soc/soc-topology.c
+++ b/sound/soc/soc-topology.c
@@ -1800,6 +1800,9 @@ static int soc_tplg_dai_create(struct soc_tplg *tplg,
struct snd_soc_dai_driver *dai_drv;
struct snd_soc_pcm_stream *stream;
struct snd_soc_tplg_stream_caps *caps;
+ struct snd_soc_dai *dai;
+ struct snd_soc_dapm_context *dapm =
+ snd_soc_component_get_dapm(tplg->comp);
int ret;
dai_drv = kzalloc(sizeof(struct snd_soc_dai_driver), GFP_KERNEL);
@@ -1842,7 +1845,19 @@ static int soc_tplg_dai_create(struct soc_tplg *tplg,
list_add(&dai_drv->dobj.list, &tplg->comp->dobj_list);
/* register the DAI to the component */
- return snd_soc_register_dai(tplg->comp, dai_drv);
+ dai = snd_soc_register_dai(tplg->comp, dai_drv, false);
+ if (!dai)
+ return -ENOMEM;
+
+ /* Create the DAI widgets here */
+ ret = snd_soc_dapm_new_dai_widgets(dapm, dai);
+ if (ret != 0) {
+ dev_err(dai->dev, "Failed to create DAI widgets %d\n", ret);
+ snd_soc_unregister_dai(dai);
+ return ret;
+ }
+
+ return ret;
}
static void set_link_flags(struct snd_soc_dai_link *link,
diff --git a/sound/soc/soc-utils.c b/sound/soc/soc-utils.c
index 54dcece52b0c..2fd4562f5e63 100644
--- a/sound/soc/soc-utils.c
+++ b/sound/soc/soc-utils.c
@@ -63,7 +63,8 @@ static const struct snd_pcm_hardware dummy_dma_hardware = {
.periods_max = 128,
};
-static int dummy_dma_open(struct snd_pcm_substream *substream)
+static int dummy_dma_open(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
@@ -74,13 +75,9 @@ static int dummy_dma_open(struct snd_pcm_substream *substream)
return 0;
}
-static const struct snd_pcm_ops snd_dummy_dma_ops = {
- .open = dummy_dma_open,
- .ioctl = snd_pcm_lib_ioctl,
-};
-
static const struct snd_soc_component_driver dummy_platform = {
- .ops = &snd_dummy_dma_ops,
+ .open = dummy_dma_open,
+ .ioctl = snd_soc_pcm_lib_ioctl,
};
static const struct snd_soc_component_driver dummy_codec = {
diff --git a/sound/soc/sof/Kconfig b/sound/soc/sof/Kconfig
index bb8036ae567e..71a0fc075a63 100644
--- a/sound/soc/sof/Kconfig
+++ b/sound/soc/sof/Kconfig
@@ -14,8 +14,6 @@ config SND_SOC_SOF_PCI
depends on PCI
select SND_SOC_SOF
select SND_SOC_ACPI if ACPI
- select SND_SOC_SOF_OPTIONS
- select SND_SOC_SOF_INTEL_PCI if SND_SOC_SOF_INTEL_TOPLEVEL
help
This adds support for PCI enumeration. This option is
required to enable Intel Skylake+ devices
@@ -27,8 +25,6 @@ config SND_SOC_SOF_ACPI
depends on ACPI || COMPILE_TEST
select SND_SOC_SOF
select SND_SOC_ACPI if ACPI
- select SND_SOC_SOF_OPTIONS
- select SND_SOC_SOF_INTEL_ACPI if SND_SOC_SOF_INTEL_TOPLEVEL
select IOSF_MBI if X86 && PCI
help
This adds support for ACPI enumeration. This option is required
@@ -40,19 +36,23 @@ config SND_SOC_SOF_OF
tristate "SOF OF enumeration support"
depends on OF || COMPILE_TEST
select SND_SOC_SOF
- select SND_SOC_SOF_OPTIONS
help
This adds support for Device Tree enumeration. This option is
required to enable i.MX8 devices.
Say Y if you need this option. If unsure select "N".
-config SND_SOC_SOF_OPTIONS
- tristate
+config SND_SOC_SOF_DEVELOPER_SUPPORT
+ bool "SOF developer options support"
+ depends on EXPERT
help
- This option is not user-selectable but automagically handled by
- 'select' statements at a higher level
+ This option unlock SOF developer options for debug/performance/
+ code hardening.
+ Distributions should not select this option, only SOF development
+ teams should select it.
+ Say Y if you are involved in SOF development and need this option
+ If not, select N
-if SND_SOC_SOF_OPTIONS
+if SND_SOC_SOF_DEVELOPER_SUPPORT
config SND_SOC_SOF_NOCODEC
tristate
@@ -64,6 +64,11 @@ config SND_SOC_SOF_NOCODEC_SUPPORT
option if no known codec is detected. This is typically only
enabled for developers or devices where the sound card is
controlled externally
+ This option is mutually exclusive with the Intel HDaudio support,
+ selecting it may have negative impacts and prevent e.g. microphone
+ functionality from being enabled on Intel CoffeeLake and later
+ platforms.
+ Distributions should not select this option!
Say Y if you need this nocodec fallback option
If unsure select "N".
@@ -142,6 +147,14 @@ config SND_SOC_SOF_DEBUG_ENABLE_DEBUGFS_CACHE
Say Y if you want to enable caching the memory windows.
If unsure, select "N".
+config SND_SOC_SOF_DEBUG_ENABLE_FIRMWARE_TRACE
+ bool "SOF enable firmware trace"
+ help
+ The firmware trace can be enabled either at build-time with
+ this option, or dynamically by setting flags in the SOF core
+ module parameter (similar to dynamic debug)
+ If unsure, select "N".
+
config SND_SOC_SOF_DEBUG_IPC_FLOOD_TEST
bool "SOF enable IPC flood test"
help
@@ -150,9 +163,17 @@ config SND_SOC_SOF_DEBUG_IPC_FLOOD_TEST
Say Y if you want to enable IPC flood test.
If unsure, select "N".
+config SND_SOC_SOF_DEBUG_RETAIN_DSP_CONTEXT
+ bool "SOF retain DSP context on any FW exceptions"
+ help
+ This option keeps the DSP in D0 state so that firmware debug
+ information can be retained and dumped to userspace.
+ Say Y if you want to retain DSP context for FW exceptions.
+ If unsure, select "N".
+
endif ## SND_SOC_SOF_DEBUG
-endif ## SND_SOC_SOF_OPTIONS
+endif ## SND_SOC_SOF_DEVELOPER_SUPPORT
config SND_SOC_SOF
tristate
diff --git a/sound/soc/sof/control.c b/sound/soc/sof/control.c
index 2b8711eda362..7baf7f1507c3 100644
--- a/sound/soc/sof/control.c
+++ b/sound/soc/sof/control.c
@@ -11,8 +11,39 @@
/* Mixer Controls */
#include <linux/pm_runtime.h>
+#include <linux/leds.h>
#include "sof-priv.h"
+static void update_mute_led(struct snd_sof_control *scontrol,
+ struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ unsigned int temp = 0;
+ unsigned int mask;
+ int i;
+
+ mask = 1U << snd_ctl_get_ioffidx(kcontrol, &ucontrol->id);
+
+ for (i = 0; i < scontrol->num_channels; i++) {
+ if (ucontrol->value.integer.value[i]) {
+ temp |= mask;
+ break;
+ }
+ }
+
+ if (temp == scontrol->led_ctl.led_value)
+ return;
+
+ scontrol->led_ctl.led_value = temp;
+
+#if IS_REACHABLE(CONFIG_LEDS_TRIGGER_AUDIO)
+ if (!scontrol->led_ctl.direction)
+ ledtrig_audio_set(LED_AUDIO_MUTE, temp ? LED_OFF : LED_ON);
+ else
+ ledtrig_audio_set(LED_AUDIO_MICMUTE, temp ? LED_OFF : LED_ON);
+#endif
+}
+
static inline u32 mixer_to_ipc(unsigned int value, u32 *volume_map, int size)
{
if (value >= size)
@@ -118,6 +149,9 @@ int snd_sof_switch_put(struct snd_kcontrol *kcontrol,
cdata->chanv[i].value = value;
}
+ if (scontrol->led_ctl.use_led)
+ update_mute_led(scontrol, kcontrol, ucontrol);
+
/* notify DSP of mixer updates */
if (pm_runtime_active(sdev->dev))
snd_sof_ipc_set_get_comp_data(sdev->ipc, scontrol,
diff --git a/sound/soc/sof/core.c b/sound/soc/sof/core.c
index 81f28f7ff1a0..805918d3bcc0 100644
--- a/sound/soc/sof/core.c
+++ b/sound/soc/sof/core.c
@@ -16,6 +16,11 @@
#include "sof-priv.h"
#include "ops.h"
+/* see SOF_DBG_ flags */
+int sof_core_debug;
+module_param_named(sof_debug, sof_core_debug, int, 0444);
+MODULE_PARM_DESC(sof_debug, "SOF core debug options (0x0 all off)");
+
/* SOF defaults if not provided by the platform in ms */
#define TIMEOUT_DEFAULT_IPC_MS 500
#define TIMEOUT_DEFAULT_BOOT_MS 2000
@@ -127,6 +132,19 @@ struct snd_sof_dai *snd_sof_find_dai(struct snd_sof_dev *sdev,
return NULL;
}
+bool snd_sof_dsp_d0i3_on_suspend(struct snd_sof_dev *sdev)
+{
+ struct snd_sof_pcm *spcm;
+
+ list_for_each_entry(spcm, &sdev->pcm_list, list) {
+ if (spcm->stream[SNDRV_PCM_STREAM_PLAYBACK].suspend_ignored ||
+ spcm->stream[SNDRV_PCM_STREAM_CAPTURE].suspend_ignored)
+ return true;
+ }
+
+ return false;
+}
+
/*
* FW Panic/fault handling.
*/
@@ -350,12 +368,20 @@ static int sof_probe_continue(struct snd_sof_dev *sdev)
goto fw_run_err;
}
- /* init DMA trace */
- ret = snd_sof_init_trace(sdev);
- if (ret < 0) {
- /* non fatal */
- dev_warn(sdev->dev,
- "warning: failed to initialize trace %d\n", ret);
+ if (IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG_ENABLE_FIRMWARE_TRACE) ||
+ (sof_core_debug & SOF_DBG_ENABLE_TRACE)) {
+ sdev->dtrace_is_supported = true;
+
+ /* init DMA trace */
+ ret = snd_sof_init_trace(sdev);
+ if (ret < 0) {
+ /* non fatal */
+ dev_warn(sdev->dev,
+ "warning: failed to initialize trace %d\n",
+ ret);
+ }
+ } else {
+ dev_dbg(sdev->dev, "SOF firmware trace disabled\n");
}
/* hereafter all FW boot flows are for PM reasons */
@@ -445,6 +471,9 @@ int snd_sof_device_probe(struct device *dev, struct snd_sof_pdata *plat_data)
/* initialize sof device */
sdev->dev = dev;
+ /* initialize default D0 sub-state */
+ sdev->d0_substate = SOF_DSP_D0I0;
+
sdev->pdata = plat_data;
sdev->first_boot = true;
dev_set_drvdata(dev, sdev);
@@ -453,7 +482,8 @@ int snd_sof_device_probe(struct device *dev, struct snd_sof_pdata *plat_data)
if (!sof_ops(sdev) || !sof_ops(sdev)->probe || !sof_ops(sdev)->run ||
!sof_ops(sdev)->block_read || !sof_ops(sdev)->block_write ||
!sof_ops(sdev)->send_msg || !sof_ops(sdev)->load_firmware ||
- !sof_ops(sdev)->ipc_msg_data || !sof_ops(sdev)->ipc_pcm_params)
+ !sof_ops(sdev)->ipc_msg_data || !sof_ops(sdev)->ipc_pcm_params ||
+ !sof_ops(sdev)->fw_ready)
return -EINVAL;
INIT_LIST_HEAD(&sdev->pcm_list);
diff --git a/sound/soc/sof/debug.c b/sound/soc/sof/debug.c
index 5529e8eeca46..d2b3b99d3a20 100644
--- a/sound/soc/sof/debug.c
+++ b/sound/soc/sof/debug.c
@@ -463,3 +463,19 @@ void snd_sof_free_debug(struct snd_sof_dev *sdev)
debugfs_remove_recursive(sdev->debugfs_root);
}
EXPORT_SYMBOL_GPL(snd_sof_free_debug);
+
+void snd_sof_handle_fw_exception(struct snd_sof_dev *sdev)
+{
+ if (IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG_RETAIN_DSP_CONTEXT) ||
+ (sof_core_debug & SOF_DBG_RETAIN_CTX)) {
+ /* should we prevent DSP entering D3 ? */
+ dev_info(sdev->dev, "info: preventing DSP entering D3 state to preserve context\n");
+ pm_runtime_get_noresume(sdev->dev);
+ }
+
+ /* dump vital information to the logs */
+ snd_sof_dsp_dbg_dump(sdev, SOF_DBG_REGS | SOF_DBG_MBOX);
+ snd_sof_ipc_dump(sdev);
+ snd_sof_trace_notify_for_error(sdev);
+}
+EXPORT_SYMBOL(snd_sof_handle_fw_exception);
diff --git a/sound/soc/sof/imx/Kconfig b/sound/soc/sof/imx/Kconfig
index 5acae75f5750..bae4f7bf5f75 100644
--- a/sound/soc/sof/imx/Kconfig
+++ b/sound/soc/sof/imx/Kconfig
@@ -5,19 +5,23 @@ config SND_SOC_SOF_IMX_TOPLEVEL
depends on ARM64|| COMPILE_TEST
depends on SND_SOC_SOF_OF
help
- This adds support for Sound Open Firmware for NXP i.MX platforms.
- Say Y if you have such a device.
- If unsure select "N".
+ This adds support for Sound Open Firmware for NXP i.MX platforms.
+ Say Y if you have such a device.
+ If unsure select "N".
if SND_SOC_SOF_IMX_TOPLEVEL
-config SND_SOC_SOF_IMX8
- tristate "SOF support for i.MX8"
+config SND_SOC_SOF_IMX8_SUPPORT
+ bool "SOF support for i.MX8"
depends on IMX_SCU
depends on IMX_DSP
help
- This adds support for Sound Open Firmware for NXP i.MX8 platforms
- Say Y if you have such a device.
- If unsure select "N".
+ This adds support for Sound Open Firmware for NXP i.MX8 platforms
+ Say Y if you have such a device.
+ If unsure select "N".
+
+config SND_SOC_SOF_IMX8
+ def_tristate SND_SOC_SOF_OF
+ depends on SND_SOC_SOF_IMX8_SUPPORT
endif ## SND_SOC_SOF_IMX_IMX_TOPLEVEL
diff --git a/sound/soc/sof/imx/imx8.c b/sound/soc/sof/imx/imx8.c
index 2a22b18e5ec0..cfefcfd92798 100644
--- a/sound/soc/sof/imx/imx8.c
+++ b/sound/soc/sof/imx/imx8.c
@@ -388,6 +388,13 @@ struct snd_sof_dsp_ops sof_imx8_ops = {
/* DAI drivers */
.drv = imx8_dai,
.num_drv = 1, /* we have only 1 ESAI interface on i.MX8 */
+
+ /* ALSA HW info flags */
+ .hw_info = SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_PAUSE |
+ SNDRV_PCM_INFO_NO_PERIOD_WAKEUP
};
EXPORT_SYMBOL(sof_imx8_ops);
diff --git a/sound/soc/sof/intel/Kconfig b/sound/soc/sof/intel/Kconfig
index d62f51d33be1..cc09bb606f7d 100644
--- a/sound/soc/sof/intel/Kconfig
+++ b/sound/soc/sof/intel/Kconfig
@@ -10,7 +10,7 @@ config SND_SOC_SOF_INTEL_TOPLEVEL
if SND_SOC_SOF_INTEL_TOPLEVEL
config SND_SOC_SOF_INTEL_ACPI
- tristate
+ def_tristate SND_SOC_SOF_ACPI
select SND_SOC_SOF_BAYTRAIL if SND_SOC_SOF_BAYTRAIL_SUPPORT
select SND_SOC_SOF_BROADWELL if SND_SOC_SOF_BROADWELL_SUPPORT
help
@@ -18,7 +18,7 @@ config SND_SOC_SOF_INTEL_ACPI
'select' statements at a higher level
config SND_SOC_SOF_INTEL_PCI
- tristate
+ def_tristate SND_SOC_SOF_PCI
select SND_SOC_SOF_MERRIFIELD if SND_SOC_SOF_MERRIFIELD_SUPPORT
select SND_SOC_SOF_APOLLOLAKE if SND_SOC_SOF_APOLLOLAKE_SUPPORT
select SND_SOC_SOF_GEMINILAKE if SND_SOC_SOF_GEMINILAKE_SUPPORT
@@ -29,6 +29,7 @@ config SND_SOC_SOF_INTEL_PCI
select SND_SOC_SOF_COMETLAKE_H if SND_SOC_SOF_COMETLAKE_H_SUPPORT
select SND_SOC_SOF_TIGERLAKE if SND_SOC_SOF_TIGERLAKE_SUPPORT
select SND_SOC_SOF_ELKHARTLAKE if SND_SOC_SOF_ELKHARTLAKE_SUPPORT
+ select SND_SOC_SOF_JASPERLAKE if SND_SOC_SOF_JASPERLAKE_SUPPORT
help
This option is not user-selectable but automagically handled by
'select' statements at a higher level
@@ -36,7 +37,7 @@ config SND_SOC_SOF_INTEL_PCI
config SND_SOC_SOF_INTEL_HIFI_EP_IPC
tristate
help
- This option is not user-selectable but automagically handled by
+ This option is not user-selectable but automagically handled by
'select' statements at a higher level
config SND_SOC_SOF_INTEL_ATOM_HIFI_EP
@@ -61,10 +62,18 @@ if SND_SOC_SOF_INTEL_ACPI
config SND_SOC_SOF_BAYTRAIL_SUPPORT
bool "SOF support for Baytrail, Braswell and Cherrytrail"
+ depends on SND_SST_ATOM_HIFI2_PLATFORM_ACPI=n
help
This adds support for Sound Open Firmware for Intel(R) platforms
using the Baytrail, Braswell or Cherrytrail processors.
- Say Y if you have such a device.
+ This option is mutually exclusive with the Atom/SST and Baytrail
+ legacy drivers. If you want to enable SOF on Baytrail/Cherrytrail,
+ you need to deselect those options first.
+ SOF does not support Baytrail-CR for now, so this option is not
+ recommended for distros. At some point all legacy drivers will be
+ deprecated but not before all userspace firmware/topology/UCM files
+ are made available to downstream distros.
+ Say Y if you want to enable SOF on Baytrail/Cherrytrail
If unsure select "N".
config SND_SOC_SOF_BAYTRAIL
@@ -76,10 +85,18 @@ config SND_SOC_SOF_BAYTRAIL
config SND_SOC_SOF_BROADWELL_SUPPORT
bool "SOF support for Broadwell"
+ depends on SND_SOC_INTEL_HASWELL=n
help
This adds support for Sound Open Firmware for Intel(R) platforms
using the Broadwell processors.
- Say Y if you have such a device.
+ This option is mutually exclusive with the Haswell/Broadwell legacy
+ driver. If you want to enable SOF on Broadwell you need to deselect
+ the legacy driver first.
+ SOF does fully support Broadwell yet, so this option is not
+ recommended for distros. At some point all legacy drivers will be
+ deprecated but not before all userspace firmware/topology/UCM files
+ are made available to downstream distros.
+ Say Y if you want to enable SOF on Broadwell
If unsure select "N".
config SND_SOC_SOF_BROADWELL
@@ -217,31 +234,46 @@ config SND_SOC_SOF_COMETLAKE_H_SUPPORT
config SND_SOC_SOF_TIGERLAKE_SUPPORT
bool "SOF support for Tigerlake"
help
- This adds support for Sound Open Firmware for Intel(R) platforms
- using the Tigerlake processors.
- Say Y if you have such a device.
- If unsure select "N".
+ This adds support for Sound Open Firmware for Intel(R) platforms
+ using the Tigerlake processors.
+ Say Y if you have such a device.
+ If unsure select "N".
config SND_SOC_SOF_TIGERLAKE
tristate
select SND_SOC_SOF_HDA_COMMON
help
- This option is not user-selectable but automagically handled by
+ This option is not user-selectable but automagically handled by
'select' statements at a higher level
config SND_SOC_SOF_ELKHARTLAKE_SUPPORT
bool "SOF support for ElkhartLake"
help
- This adds support for Sound Open Firmware for Intel(R) platforms
- using the ElkhartLake processors.
- Say Y if you have such a device.
- If unsure select "N".
+ This adds support for Sound Open Firmware for Intel(R) platforms
+ using the ElkhartLake processors.
+ Say Y if you have such a device.
+ If unsure select "N".
config SND_SOC_SOF_ELKHARTLAKE
tristate
select SND_SOC_SOF_HDA_COMMON
help
- This option is not user-selectable but automagically handled by
+ This option is not user-selectable but automagically handled by
+ 'select' statements at a higher level
+
+config SND_SOC_SOF_JASPERLAKE_SUPPORT
+ bool "SOF support for JasperLake"
+ help
+ This adds support for Sound Open Firmware for Intel(R) platforms
+ using the JasperLake processors.
+ Say Y if you have such a device.
+ If unsure select "N".
+
+config SND_SOC_SOF_JASPERLAKE
+ tristate
+ select SND_SOC_SOF_HDA_COMMON
+ help
+ This option is not user-selectable but automagically handled by
'select' statements at a higher level
config SND_SOC_SOF_HDA_COMMON
@@ -283,6 +315,16 @@ config SND_SOC_SOF_HDA_ALWAYS_ENABLE_DMI_L1
Say Y if you want to enable DMI Link L1
If unsure, select "N".
+config SND_SOC_SOF_HDA_COMMON_HDMI_CODEC
+ bool "SOF common HDA HDMI codec driver"
+ depends on SND_SOC_SOF_HDA_LINK
+ depends on SND_HDA_CODEC_HDMI
+ help
+ This adds support for HDMI audio by using the common HDA
+ HDMI/DisplayPort codec driver.
+ Say Y if you want to use the common codec driver with SOF.
+ If unsure select "Y".
+
endif ## SND_SOC_SOF_HDA_COMMON
config SND_SOC_SOF_HDA_LINK_BASELINE
@@ -296,7 +338,7 @@ config SND_SOC_SOF_HDA
tristate
select SND_HDA_EXT_CORE if SND_SOC_SOF_HDA_LINK
select SND_SOC_HDAC_HDA if SND_SOC_SOF_HDA_AUDIO_CODEC
- select SND_INTEL_NHLT if ACPI
+ select SND_INTEL_DSP_CONFIG
help
This option is not user-selectable but automagically handled by
'select' statements at a higher level
diff --git a/sound/soc/sof/intel/apl.c b/sound/soc/sof/intel/apl.c
index 8dc7a5558da4..7daa8eb456c8 100644
--- a/sound/soc/sof/intel/apl.c
+++ b/sound/soc/sof/intel/apl.c
@@ -97,6 +97,14 @@ const struct snd_sof_dsp_ops sof_apl_ops = {
.runtime_resume = hda_dsp_runtime_resume,
.runtime_idle = hda_dsp_runtime_idle,
.set_hw_params_upon_resume = hda_dsp_set_hw_params_upon_resume,
+ .set_power_state = hda_dsp_set_power_state,
+
+ /* ALSA HW info flags */
+ .hw_info = SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_PAUSE |
+ SNDRV_PCM_INFO_NO_PERIOD_WAKEUP,
};
EXPORT_SYMBOL(sof_apl_ops);
diff --git a/sound/soc/sof/intel/bdw.c b/sound/soc/sof/intel/bdw.c
index 80e2826fb447..141dad554764 100644
--- a/sound/soc/sof/intel/bdw.c
+++ b/sound/soc/sof/intel/bdw.c
@@ -247,7 +247,7 @@ static void bdw_dump(struct snd_sof_dev *sdev, u32 flags)
struct sof_ipc_dsp_oops_xtensa xoops;
struct sof_ipc_panic_info panic_info;
u32 stack[BDW_STACK_DUMP_SIZE];
- u32 status, panic;
+ u32 status, panic, imrx, imrd;
/* now try generic SOF status messages */
status = snd_sof_dsp_read(sdev, BDW_DSP_BAR, SHIM_IPCD);
@@ -256,6 +256,26 @@ static void bdw_dump(struct snd_sof_dev *sdev, u32 flags)
BDW_STACK_DUMP_SIZE);
snd_sof_get_status(sdev, status, panic, &xoops, &panic_info, stack,
BDW_STACK_DUMP_SIZE);
+
+ /* provide some context for firmware debug */
+ imrx = snd_sof_dsp_read(sdev, BDW_DSP_BAR, SHIM_IMRX);
+ imrd = snd_sof_dsp_read(sdev, BDW_DSP_BAR, SHIM_IMRD);
+ dev_err(sdev->dev,
+ "error: ipc host -> DSP: pending %s complete %s raw 0x%8.8x\n",
+ (panic & SHIM_IPCX_BUSY) ? "yes" : "no",
+ (panic & SHIM_IPCX_DONE) ? "yes" : "no", panic);
+ dev_err(sdev->dev,
+ "error: mask host: pending %s complete %s raw 0x%8.8x\n",
+ (imrx & SHIM_IMRX_BUSY) ? "yes" : "no",
+ (imrx & SHIM_IMRX_DONE) ? "yes" : "no", imrx);
+ dev_err(sdev->dev,
+ "error: ipc DSP -> host: pending %s complete %s raw 0x%8.8x\n",
+ (status & SHIM_IPCD_BUSY) ? "yes" : "no",
+ (status & SHIM_IPCD_DONE) ? "yes" : "no", status);
+ dev_err(sdev->dev,
+ "error: mask DSP: pending %s complete %s raw 0x%8.8x\n",
+ (imrd & SHIM_IMRD_BUSY) ? "yes" : "no",
+ (imrd & SHIM_IMRD_DONE) ? "yes" : "no", imrd);
}
/*
@@ -571,7 +591,14 @@ const struct snd_sof_dsp_ops sof_bdw_ops = {
/* DAI drivers */
.drv = bdw_dai,
- .num_drv = ARRAY_SIZE(bdw_dai)
+ .num_drv = ARRAY_SIZE(bdw_dai),
+
+ /* ALSA HW info flags */
+ .hw_info = SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_PAUSE |
+ SNDRV_PCM_INFO_BATCH,
};
EXPORT_SYMBOL(sof_bdw_ops);
diff --git a/sound/soc/sof/intel/byt.c b/sound/soc/sof/intel/byt.c
index a1e514f71739..2abf80b3eb52 100644
--- a/sound/soc/sof/intel/byt.c
+++ b/sound/soc/sof/intel/byt.c
@@ -145,7 +145,7 @@ static void byt_dump(struct snd_sof_dev *sdev, u32 flags)
struct sof_ipc_dsp_oops_xtensa xoops;
struct sof_ipc_panic_info panic_info;
u32 stack[BYT_STACK_DUMP_SIZE];
- u32 status, panic;
+ u32 status, panic, imrd, imrx;
/* now try generic SOF status messages */
status = snd_sof_dsp_read(sdev, BYT_DSP_BAR, SHIM_IPCD);
@@ -154,6 +154,27 @@ static void byt_dump(struct snd_sof_dev *sdev, u32 flags)
BYT_STACK_DUMP_SIZE);
snd_sof_get_status(sdev, status, panic, &xoops, &panic_info, stack,
BYT_STACK_DUMP_SIZE);
+
+ /* provide some context for firmware debug */
+ imrx = snd_sof_dsp_read(sdev, BYT_DSP_BAR, SHIM_IMRX);
+ imrd = snd_sof_dsp_read(sdev, BYT_DSP_BAR, SHIM_IMRD);
+ dev_err(sdev->dev,
+ "error: ipc host -> DSP: pending %s complete %s raw 0x%8.8x\n",
+ (panic & SHIM_IPCX_BUSY) ? "yes" : "no",
+ (panic & SHIM_IPCX_DONE) ? "yes" : "no", panic);
+ dev_err(sdev->dev,
+ "error: mask host: pending %s complete %s raw 0x%8.8x\n",
+ (imrx & SHIM_IMRX_BUSY) ? "yes" : "no",
+ (imrx & SHIM_IMRX_DONE) ? "yes" : "no", imrx);
+ dev_err(sdev->dev,
+ "error: ipc DSP -> host: pending %s complete %s raw 0x%8.8x\n",
+ (status & SHIM_IPCD_BUSY) ? "yes" : "no",
+ (status & SHIM_IPCD_DONE) ? "yes" : "no", status);
+ dev_err(sdev->dev,
+ "error: mask DSP: pending %s complete %s raw 0x%8.8x\n",
+ (imrd & SHIM_IMRD_BUSY) ? "yes" : "no",
+ (imrd & SHIM_IMRD_DONE) ? "yes" : "no", imrd);
+
}
/*
@@ -511,6 +532,13 @@ const struct snd_sof_dsp_ops sof_tng_ops = {
/* DAI drivers */
.drv = byt_dai,
.num_drv = 3, /* we have only 3 SSPs on byt*/
+
+ /* ALSA HW info flags */
+ .hw_info = SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_PAUSE |
+ SNDRV_PCM_INFO_BATCH,
};
EXPORT_SYMBOL(sof_tng_ops);
@@ -672,6 +700,13 @@ const struct snd_sof_dsp_ops sof_byt_ops = {
/* DAI drivers */
.drv = byt_dai,
.num_drv = 3, /* we have only 3 SSPs on byt*/
+
+ /* ALSA HW info flags */
+ .hw_info = SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_PAUSE |
+ SNDRV_PCM_INFO_BATCH,
};
EXPORT_SYMBOL(sof_byt_ops);
@@ -732,6 +767,13 @@ const struct snd_sof_dsp_ops sof_cht_ops = {
.drv = byt_dai,
/* all 6 SSPs may be available for cherrytrail */
.num_drv = ARRAY_SIZE(byt_dai),
+
+ /* ALSA HW info flags */
+ .hw_info = SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_PAUSE |
+ SNDRV_PCM_INFO_BATCH,
};
EXPORT_SYMBOL(sof_cht_ops);
diff --git a/sound/soc/sof/intel/cnl.c b/sound/soc/sof/intel/cnl.c
index 4ddd73762d81..0e1e265f3f3b 100644
--- a/sound/soc/sof/intel/cnl.c
+++ b/sound/soc/sof/intel/cnl.c
@@ -17,6 +17,7 @@
#include "../ops.h"
#include "hda.h"
+#include "hda-ipc.h"
static const struct snd_sof_debugfs_map cnl_dsp_debugfs[] = {
{"hda", HDA_DSP_HDA_BAR, 0, 0x4000, SOF_DEBUGFS_ACCESS_ALWAYS},
@@ -150,14 +151,45 @@ static void cnl_ipc_dsp_done(struct snd_sof_dev *sdev)
CNL_DSP_REG_HIPCCTL_DONE);
}
+static bool cnl_compact_ipc_compress(struct snd_sof_ipc_msg *msg,
+ u32 *dr, u32 *dd)
+{
+ struct sof_ipc_pm_gate *pm_gate;
+
+ if (msg->header == (SOF_IPC_GLB_PM_MSG | SOF_IPC_PM_GATE)) {
+ pm_gate = msg->msg_data;
+
+ /* send the compact message via the primary register */
+ *dr = HDA_IPC_MSG_COMPACT | HDA_IPC_PM_GATE;
+
+ /* send payload via the extended data register */
+ *dd = pm_gate->flags;
+
+ return true;
+ }
+
+ return false;
+}
+
static int cnl_ipc_send_msg(struct snd_sof_dev *sdev,
struct snd_sof_ipc_msg *msg)
{
- /* send the message */
- sof_mailbox_write(sdev, sdev->host_box.offset, msg->msg_data,
- msg->msg_size);
- snd_sof_dsp_write(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCIDR,
- CNL_DSP_REG_HIPCIDR_BUSY);
+ u32 dr = 0;
+ u32 dd = 0;
+
+ if (cnl_compact_ipc_compress(msg, &dr, &dd)) {
+ /* send the message via IPC registers */
+ snd_sof_dsp_write(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCIDD,
+ dd);
+ snd_sof_dsp_write(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCIDR,
+ CNL_DSP_REG_HIPCIDR_BUSY | dr);
+ } else {
+ /* send the message via mailbox */
+ sof_mailbox_write(sdev, sdev->host_box.offset, msg->msg_data,
+ msg->msg_size);
+ snd_sof_dsp_write(sdev, HDA_DSP_BAR, CNL_DSP_REG_HIPCIDR,
+ CNL_DSP_REG_HIPCIDR_BUSY);
+ }
return 0;
}
@@ -255,6 +287,14 @@ const struct snd_sof_dsp_ops sof_cnl_ops = {
.runtime_resume = hda_dsp_runtime_resume,
.runtime_idle = hda_dsp_runtime_idle,
.set_hw_params_upon_resume = hda_dsp_set_hw_params_upon_resume,
+ .set_power_state = hda_dsp_set_power_state,
+
+ /* ALSA HW info flags */
+ .hw_info = SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_PAUSE |
+ SNDRV_PCM_INFO_NO_PERIOD_WAKEUP,
};
EXPORT_SYMBOL(sof_cnl_ops);
@@ -327,3 +367,20 @@ const struct sof_intel_dsp_desc ehl_chip_info = {
.ssp_base_offset = CNL_SSP_BASE_OFFSET,
};
EXPORT_SYMBOL(ehl_chip_info);
+
+const struct sof_intel_dsp_desc jsl_chip_info = {
+ /* Jasperlake */
+ .cores_num = 2,
+ .init_core_mask = 1,
+ .cores_mask = HDA_DSP_CORE_MASK(0) |
+ HDA_DSP_CORE_MASK(1),
+ .ipc_req = CNL_DSP_REG_HIPCIDR,
+ .ipc_req_mask = CNL_DSP_REG_HIPCIDR_BUSY,
+ .ipc_ack = CNL_DSP_REG_HIPCIDA,
+ .ipc_ack_mask = CNL_DSP_REG_HIPCIDA_DONE,
+ .ipc_ctl = CNL_DSP_REG_HIPCCTL,
+ .rom_init_timeout = 300,
+ .ssp_count = ICL_SSP_COUNT,
+ .ssp_base_offset = CNL_SSP_BASE_OFFSET,
+};
+EXPORT_SYMBOL(jsl_chip_info);
diff --git a/sound/soc/sof/intel/hda-codec.c b/sound/soc/sof/intel/hda-codec.c
index 3ca6795a89ba..827f84a0722e 100644
--- a/sound/soc/sof/intel/hda-codec.c
+++ b/sound/soc/sof/intel/hda-codec.c
@@ -84,6 +84,8 @@ static int hda_codec_probe(struct snd_sof_dev *sdev, int address)
{
#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA_AUDIO_CODEC)
struct hdac_hda_priv *hda_priv;
+ struct snd_soc_acpi_mach_params *mach_params = NULL;
+ struct snd_sof_pdata *pdata = sdev->pdata;
#endif
struct hda_bus *hbus = sof_to_hbus(sdev);
struct hdac_device *hdev;
@@ -113,8 +115,19 @@ static int hda_codec_probe(struct snd_sof_dev *sdev, int address)
if (ret < 0)
return ret;
- /* use legacy bus only for HDA codecs, idisp uses ext bus */
- if ((resp & 0xFFFF0000) != IDISP_VID_INTEL) {
+ if (pdata->machine)
+ mach_params = (struct snd_soc_acpi_mach_params *)
+ &pdata->machine->mach_params;
+
+ if ((resp & 0xFFFF0000) == IDISP_VID_INTEL)
+ hda_priv->need_display_power = true;
+
+ /*
+ * if common HDMI codec driver is not used, codec load
+ * is skipped here and hdac_hdmi is used instead
+ */
+ if ((mach_params && mach_params->common_hdmi_codec_drv) ||
+ (resp & 0xFFFF0000) != IDISP_VID_INTEL) {
hdev->type = HDA_DEV_LEGACY;
hda_codec_load_module(&hda_priv->codec);
}
@@ -155,7 +168,8 @@ int hda_codec_probe_bus(struct snd_sof_dev *sdev)
}
EXPORT_SYMBOL(hda_codec_probe_bus);
-#if IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI)
+#if IS_ENABLED(CONFIG_SND_HDA_CODEC_HDMI) || \
+ IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI)
void hda_codec_i915_get(struct snd_sof_dev *sdev)
{
@@ -204,6 +218,6 @@ int hda_codec_i915_exit(struct snd_sof_dev *sdev)
}
EXPORT_SYMBOL(hda_codec_i915_exit);
-#endif /* CONFIG_SND_SOC_HDAC_HDMI */
+#endif
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/sound/soc/sof/intel/hda-dsp.c b/sound/soc/sof/intel/hda-dsp.c
index fb55a3c5afd0..4a4d318f97ff 100644
--- a/sound/soc/sof/intel/hda-dsp.c
+++ b/sound/soc/sof/intel/hda-dsp.c
@@ -19,6 +19,7 @@
#include <sound/hda_register.h>
#include "../ops.h"
#include "hda.h"
+#include "hda-ipc.h"
/*
* DSP Core control.
@@ -42,6 +43,12 @@ int hda_dsp_core_reset_enter(struct snd_sof_dev *sdev, unsigned int core_mask)
((adspcs & reset) == reset),
HDA_DSP_REG_POLL_INTERVAL_US,
HDA_DSP_RESET_TIMEOUT_US);
+ if (ret < 0) {
+ dev_err(sdev->dev,
+ "error: %s: timeout on HDA_DSP_REG_ADSPCS read\n",
+ __func__);
+ return ret;
+ }
/* has core entered reset ? */
adspcs = snd_sof_dsp_read(sdev, HDA_DSP_BAR,
@@ -77,6 +84,13 @@ int hda_dsp_core_reset_leave(struct snd_sof_dev *sdev, unsigned int core_mask)
HDA_DSP_REG_POLL_INTERVAL_US,
HDA_DSP_RESET_TIMEOUT_US);
+ if (ret < 0) {
+ dev_err(sdev->dev,
+ "error: %s: timeout on HDA_DSP_REG_ADSPCS read\n",
+ __func__);
+ return ret;
+ }
+
/* has core left reset ? */
adspcs = snd_sof_dsp_read(sdev, HDA_DSP_BAR,
HDA_DSP_REG_ADSPCS);
@@ -151,8 +165,12 @@ int hda_dsp_core_power_up(struct snd_sof_dev *sdev, unsigned int core_mask)
(adspcs & cpa) == cpa,
HDA_DSP_REG_POLL_INTERVAL_US,
HDA_DSP_RESET_TIMEOUT_US);
- if (ret < 0)
- dev_err(sdev->dev, "error: timeout on core powerup\n");
+ if (ret < 0) {
+ dev_err(sdev->dev,
+ "error: %s: timeout on HDA_DSP_REG_ADSPCS read\n",
+ __func__);
+ return ret;
+ }
/* did core power up ? */
adspcs = snd_sof_dsp_read(sdev, HDA_DSP_BAR,
@@ -171,17 +189,24 @@ int hda_dsp_core_power_up(struct snd_sof_dev *sdev, unsigned int core_mask)
int hda_dsp_core_power_down(struct snd_sof_dev *sdev, unsigned int core_mask)
{
u32 adspcs;
+ int ret;
/* update bits */
snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR,
HDA_DSP_REG_ADSPCS,
HDA_DSP_ADSPCS_SPA_MASK(core_mask), 0);
- return snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR,
+ ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR,
HDA_DSP_REG_ADSPCS, adspcs,
!(adspcs & HDA_DSP_ADSPCS_SPA_MASK(core_mask)),
HDA_DSP_REG_POLL_INTERVAL_US,
HDA_DSP_PD_TIMEOUT * USEC_PER_MSEC);
+ if (ret < 0)
+ dev_err(sdev->dev,
+ "error: %s: timeout on HDA_DSP_REG_ADSPCS read\n",
+ __func__);
+
+ return ret;
}
bool hda_dsp_core_is_enabled(struct snd_sof_dev *sdev,
@@ -282,6 +307,80 @@ void hda_dsp_ipc_int_disable(struct snd_sof_dev *sdev)
HDA_DSP_REG_HIPCCTL_BUSY | HDA_DSP_REG_HIPCCTL_DONE, 0);
}
+static int hda_dsp_wait_d0i3c_done(struct snd_sof_dev *sdev)
+{
+ struct hdac_bus *bus = sof_to_bus(sdev);
+ int retry = HDA_DSP_REG_POLL_RETRY_COUNT;
+
+ while (snd_hdac_chip_readb(bus, VS_D0I3C) & SOF_HDA_VS_D0I3C_CIP) {
+ if (!retry--)
+ return -ETIMEDOUT;
+ usleep_range(10, 15);
+ }
+
+ return 0;
+}
+
+static int hda_dsp_send_pm_gate_ipc(struct snd_sof_dev *sdev, u32 flags)
+{
+ struct sof_ipc_pm_gate pm_gate;
+ struct sof_ipc_reply reply;
+
+ memset(&pm_gate, 0, sizeof(pm_gate));
+
+ /* configure pm_gate ipc message */
+ pm_gate.hdr.size = sizeof(pm_gate);
+ pm_gate.hdr.cmd = SOF_IPC_GLB_PM_MSG | SOF_IPC_PM_GATE;
+ pm_gate.flags = flags;
+
+ /* send pm_gate ipc to dsp */
+ return sof_ipc_tx_message(sdev->ipc, pm_gate.hdr.cmd, &pm_gate,
+ sizeof(pm_gate), &reply, sizeof(reply));
+}
+
+int hda_dsp_set_power_state(struct snd_sof_dev *sdev,
+ enum sof_d0_substate d0_substate)
+{
+ struct hdac_bus *bus = sof_to_bus(sdev);
+ u32 flags;
+ int ret;
+ u8 value;
+
+ /* Write to D0I3C after Command-In-Progress bit is cleared */
+ ret = hda_dsp_wait_d0i3c_done(sdev);
+ if (ret < 0) {
+ dev_err(bus->dev, "CIP timeout before D0I3C update!\n");
+ return ret;
+ }
+
+ /* Update D0I3C register */
+ value = d0_substate == SOF_DSP_D0I3 ? SOF_HDA_VS_D0I3C_I3 : 0;
+ snd_hdac_chip_updateb(bus, VS_D0I3C, SOF_HDA_VS_D0I3C_I3, value);
+
+ /* Wait for cmd in progress to be cleared before exiting the function */
+ ret = hda_dsp_wait_d0i3c_done(sdev);
+ if (ret < 0) {
+ dev_err(bus->dev, "CIP timeout after D0I3C update!\n");
+ return ret;
+ }
+
+ dev_vdbg(bus->dev, "D0I3C updated, register = 0x%x\n",
+ snd_hdac_chip_readb(bus, VS_D0I3C));
+
+ if (d0_substate == SOF_DSP_D0I0)
+ flags = HDA_PM_PPG;/* prevent power gating in D0 */
+ else
+ flags = HDA_PM_NO_DMA_TRACE;/* disable DMA trace in D0I3*/
+
+ /* sending pm_gate IPC */
+ ret = hda_dsp_send_pm_gate_ipc(sdev, flags);
+ if (ret < 0)
+ dev_err(sdev->dev,
+ "error: PM_GATE ipc error %d\n", ret);
+
+ return ret;
+}
+
static int hda_suspend(struct snd_sof_dev *sdev, bool runtime_suspend)
{
struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
@@ -379,6 +478,22 @@ static int hda_resume(struct snd_sof_dev *sdev, bool runtime_resume)
int hda_dsp_resume(struct snd_sof_dev *sdev)
{
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
+ struct pci_dev *pci = to_pci_dev(sdev->dev);
+
+ if (sdev->s0_suspend) {
+ /* restore L1SEN bit */
+ if (hda->l1_support_changed)
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
+ HDA_VS_INTEL_EM2,
+ HDA_VS_INTEL_EM2_L1SEN, 0);
+
+ /* restore and disable the system wakeup */
+ pci_restore_state(pci);
+ disable_irq_wake(pci->irq);
+ return 0;
+ }
+
/* init hda controller. DSP cores will be powered up during fw boot */
return hda_resume(sdev, false);
}
@@ -410,9 +525,25 @@ int hda_dsp_runtime_suspend(struct snd_sof_dev *sdev)
int hda_dsp_suspend(struct snd_sof_dev *sdev)
{
+ struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
struct hdac_bus *bus = sof_to_bus(sdev);
+ struct pci_dev *pci = to_pci_dev(sdev->dev);
int ret;
+ if (sdev->s0_suspend) {
+ /* enable L1SEN to make sure the system can enter S0Ix */
+ hda->l1_support_changed =
+ snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
+ HDA_VS_INTEL_EM2,
+ HDA_VS_INTEL_EM2_L1SEN,
+ HDA_VS_INTEL_EM2_L1SEN);
+
+ /* enable the system waking up via IPC IRQ */
+ enable_irq_wake(pci->irq);
+ pci_save_state(pci);
+ return 0;
+ }
+
/* stop hda controller and power dsp off */
ret = hda_suspend(sdev, false);
if (ret < 0) {
diff --git a/sound/soc/sof/intel/hda-ipc.c b/sound/soc/sof/intel/hda-ipc.c
index 6aae6f18b3dc..0fd2153c1769 100644
--- a/sound/soc/sof/intel/hda-ipc.c
+++ b/sound/soc/sof/intel/hda-ipc.c
@@ -83,10 +83,12 @@ void hda_dsp_ipc_get_reply(struct snd_sof_dev *sdev)
}
hdr = msg->msg_data;
- if (hdr->cmd == (SOF_IPC_GLB_PM_MSG | SOF_IPC_PM_CTX_SAVE)) {
+ if (hdr->cmd == (SOF_IPC_GLB_PM_MSG | SOF_IPC_PM_CTX_SAVE) ||
+ hdr->cmd == (SOF_IPC_GLB_PM_MSG | SOF_IPC_PM_GATE)) {
/*
* memory windows are powered off before sending IPC reply,
- * so we can't read the mailbox for CTX_SAVE reply.
+ * so we can't read the mailbox for CTX_SAVE and PM_GATE
+ * replies.
*/
reply.error = 0;
reply.hdr.cmd = SOF_IPC_GLB_REPLY;
diff --git a/sound/soc/sof/intel/hda-ipc.h b/sound/soc/sof/intel/hda-ipc.h
new file mode 100644
index 000000000000..aef0ceac9803
--- /dev/null
+++ b/sound/soc/sof/intel/hda-ipc.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * Copyright(c) 2019 Intel Corporation. All rights reserved.
+ *
+ * Author: Keyon Jie <yang.jie@linux.intel.com>
+ */
+
+#ifndef __SOF_INTEL_HDA_IPC_H
+#define __SOF_INTEL_HDA_IPC_H
+
+/*
+ * Primary register, mapped to
+ * - DIPCTDR (HIPCIDR) in sideband IPC (cAVS 1.8+)
+ * - DIPCT in cAVS 1.5 IPC
+ *
+ * Secondary register, mapped to:
+ * - DIPCTDD (HIPCIDD) in sideband IPC (cAVS 1.8+)
+ * - DIPCTE in cAVS 1.5 IPC
+ */
+
+/* Common bits in primary register */
+
+/* Reserved for doorbell */
+#define HDA_IPC_RSVD_31 BIT(31)
+/* Target, 0 - normal message, 1 - compact message(cAVS compatible) */
+#define HDA_IPC_MSG_COMPACT BIT(30)
+/* Direction, 0 - request, 1 - response */
+#define HDA_IPC_RSP BIT(29)
+
+#define HDA_IPC_TYPE_SHIFT 24
+#define HDA_IPC_TYPE_MASK GENMASK(28, 24)
+#define HDA_IPC_TYPE(x) ((x) << HDA_IPC_TYPE_SHIFT)
+
+#define HDA_IPC_PM_GATE HDA_IPC_TYPE(0x8U)
+
+/* Command specific payload bits in secondary register */
+
+/* Disable DMA tracing (0 - keep tracing, 1 - to disable DMA trace) */
+#define HDA_PM_NO_DMA_TRACE BIT(4)
+/* Prevent clock gating (0 - cg allowed, 1 - DSP clock always on) */
+#define HDA_PM_PCG BIT(3)
+/* Prevent power gating (0 - deep power state transitions allowed) */
+#define HDA_PM_PPG BIT(2)
+/* Indicates whether streaming is active */
+#define HDA_PM_PG_STREAMING BIT(1)
+#define HDA_PM_PG_RSVD BIT(0)
+
+#endif
diff --git a/sound/soc/sof/intel/hda-loader.c b/sound/soc/sof/intel/hda-loader.c
index 65c2af3fcaab..b1783360fe10 100644
--- a/sound/soc/sof/intel/hda-loader.c
+++ b/sound/soc/sof/intel/hda-loader.c
@@ -126,7 +126,8 @@ static int cl_dsp_init(struct snd_sof_dev *sdev, const void *fwdata,
HDA_DSP_INIT_TIMEOUT_US);
if (ret < 0) {
- dev_err(sdev->dev, "error: waiting for HIPCIE done\n");
+ dev_err(sdev->dev, "error: %s: timeout for HIPCIE done\n",
+ __func__);
goto err;
}
@@ -152,6 +153,10 @@ static int cl_dsp_init(struct snd_sof_dev *sdev, const void *fwdata,
if (!ret)
return 0;
+ dev_err(sdev->dev,
+ "error: %s: timeout HDA_DSP_SRAM_REG_ROM_STATUS read\n",
+ __func__);
+
err:
hda_dsp_dump(sdev, SOF_DBG_REGS | SOF_DBG_PCI | SOF_DBG_MBOX);
hda_dsp_core_reset_power_down(sdev, chip->cores_mask);
@@ -253,10 +258,22 @@ static int cl_copy_fw(struct snd_sof_dev *sdev, struct hdac_ext_stream *stream)
HDA_DSP_REG_POLL_INTERVAL_US,
HDA_DSP_BASEFW_TIMEOUT_US);
+ /*
+ * even in case of errors we still need to stop the DMAs,
+ * but we return the initial error should the DMA stop also fail
+ */
+
+ if (status < 0) {
+ dev_err(sdev->dev,
+ "error: %s: timeout HDA_DSP_SRAM_REG_ROM_STATUS read\n",
+ __func__);
+ }
+
ret = cl_trigger(sdev, stream, SNDRV_PCM_TRIGGER_STOP);
if (ret < 0) {
dev_err(sdev->dev, "error: DMA trigger stop failed\n");
- return ret;
+ if (!status)
+ status = ret;
}
return status;
@@ -341,13 +358,15 @@ cleanup:
/*
* Perform codeloader stream cleanup.
* This should be done even if firmware loading fails.
+ * If the cleanup also fails, we return the initial error
*/
ret1 = cl_cleanup(sdev, &sdev->dmab, stream);
if (ret1 < 0) {
dev_err(sdev->dev, "error: Code loader DSP cleanup failed\n");
/* set return value to indicate cleanup failure */
- ret = ret1;
+ if (!ret)
+ ret = ret1;
}
/*
diff --git a/sound/soc/sof/intel/hda-pcm.c b/sound/soc/sof/intel/hda-pcm.c
index 9b730f183529..575f5f5877d8 100644
--- a/sound/soc/sof/intel/hda-pcm.c
+++ b/sound/soc/sof/intel/hda-pcm.c
@@ -89,6 +89,7 @@ int hda_dsp_pcm_hw_params(struct snd_sof_dev *sdev,
struct hdac_ext_stream *stream = stream_to_hdac_ext_stream(hstream);
struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
struct snd_dma_buffer *dmab;
+ struct sof_ipc_fw_version *v = &sdev->fw_ready.version;
int ret;
u32 size, rate, bits;
@@ -116,9 +117,17 @@ int hda_dsp_pcm_hw_params(struct snd_sof_dev *sdev,
/* disable SPIB, to enable buffer wrap for stream */
hda_dsp_stream_spib_config(sdev, stream, HDA_DSP_SPIB_DISABLE, 0);
- /* set host_period_bytes to 0 if no IPC position */
- if (hda && hda->no_ipc_position)
- ipc_params->host_period_bytes = 0;
+ /* update no_stream_position flag for ipc params */
+ if (hda && hda->no_ipc_position) {
+ /* For older ABIs set host_period_bytes to zero to inform
+ * FW we don't want position updates. Newer versions use
+ * no_stream_position for this purpose.
+ */
+ if (v->abi_version < SOF_ABI_VER(3, 10, 0))
+ ipc_params->host_period_bytes = 0;
+ else
+ ipc_params->no_stream_position = 1;
+ }
ipc_params->stream_tag = hstream->stream_tag;
diff --git a/sound/soc/sof/intel/hda-stream.c b/sound/soc/sof/intel/hda-stream.c
index 0c11fceb28a7..29ab43281670 100644
--- a/sound/soc/sof/intel/hda-stream.c
+++ b/sound/soc/sof/intel/hda-stream.c
@@ -275,8 +275,12 @@ int hda_dsp_stream_trigger(struct snd_sof_dev *sdev,
HDA_DSP_REG_POLL_INTERVAL_US,
HDA_DSP_STREAM_RUN_TIMEOUT);
- if (ret)
+ if (ret < 0) {
+ dev_err(sdev->dev,
+ "error: %s: cmd %d: timeout on STREAM_SD_OFFSET read\n",
+ __func__, cmd);
return ret;
+ }
hstream->running = true;
break;
@@ -294,8 +298,12 @@ int hda_dsp_stream_trigger(struct snd_sof_dev *sdev,
HDA_DSP_REG_POLL_INTERVAL_US,
HDA_DSP_STREAM_RUN_TIMEOUT);
- if (ret)
+ if (ret < 0) {
+ dev_err(sdev->dev,
+ "error: %s: cmd %d: timeout on STREAM_SD_OFFSET read\n",
+ __func__, cmd);
return ret;
+ }
snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR, sd_offset +
SOF_HDA_ADSP_REG_CL_SD_STS,
@@ -356,8 +364,12 @@ int hda_dsp_stream_hw_params(struct snd_sof_dev *sdev,
HDA_DSP_REG_POLL_INTERVAL_US,
HDA_DSP_STREAM_RUN_TIMEOUT);
- if (ret)
+ if (ret < 0) {
+ dev_err(sdev->dev,
+ "error: %s: timeout on STREAM_SD_OFFSET read1\n",
+ __func__);
return ret;
+ }
snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
sd_offset + SOF_HDA_ADSP_REG_CL_SD_STS,
@@ -418,8 +430,12 @@ int hda_dsp_stream_hw_params(struct snd_sof_dev *sdev,
HDA_DSP_REG_POLL_INTERVAL_US,
HDA_DSP_STREAM_RUN_TIMEOUT);
- if (ret)
+ if (ret < 0) {
+ dev_err(sdev->dev,
+ "error: %s: timeout on STREAM_SD_OFFSET read2\n",
+ __func__);
return ret;
+ }
snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
sd_offset + SOF_HDA_ADSP_REG_CL_SD_STS,
diff --git a/sound/soc/sof/intel/hda.c b/sound/soc/sof/intel/hda.c
index 06e84679087b..91bd88fddac7 100644
--- a/sound/soc/sof/intel/hda.c
+++ b/sound/soc/sof/intel/hda.c
@@ -32,9 +32,6 @@
/* platform specific devices */
#include "shim.h"
-#define IS_CFL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa348)
-#define IS_CNL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9dc8)
-
#define EXCEPT_MAX_HDR_SIZE 0x400
/*
@@ -56,6 +53,11 @@ MODULE_PARM_DESC(use_msi, "SOF HDA use PCI MSI mode");
static int hda_dmic_num = -1;
module_param_named(dmic_num, hda_dmic_num, int, 0444);
MODULE_PARM_DESC(dmic_num, "SOF HDA DMIC number");
+
+static bool hda_codec_use_common_hdmi =
+ IS_ENABLED(CONFIG_SND_SOC_SOF_HDA_COMMON_HDMI_CODEC);
+module_param_named(use_common_hdmi, hda_codec_use_common_hdmi, bool, 0444);
+MODULE_PARM_DESC(use_common_hdmi, "SOF HDA use common HDMI codec driver");
#endif
static const struct hda_dsp_msg_code hda_dsp_rom_msg[] = {
@@ -262,12 +264,9 @@ static int hda_init(struct snd_sof_dev *sdev)
/* HDA bus init */
sof_hda_bus_init(bus, &pci->dev);
- /* Workaround for a communication error on CFL (bko#199007) and CNL */
- if (IS_CFL(pci) || IS_CNL(pci))
- bus->polling_mode = 1;
-
bus->use_posbuf = 1;
bus->bdl_pos_adj = 0;
+ bus->sync_write = 1;
mutex_init(&hbus->prepare_mutex);
hbus->pci = pci;
@@ -416,9 +415,16 @@ static int hda_init_caps(struct snd_sof_dev *sdev)
pdata->tplg_filename =
hda_mach->sof_tplg_filename;
- /* firmware: pick the first in machine list */
+ /*
+ * firmware: pick the first in machine list,
+ * or use nocodec firmware name if list is empty
+ */
mach = pdata->desc->machines;
- pdata->fw_filename = mach->sof_fw_filename;
+ if (mach->id[0])
+ pdata->fw_filename = mach->sof_fw_filename;
+ else
+ pdata->fw_filename =
+ pdata->desc->nocodec_fw_filename;
dev_info(bus->dev, "using HDA machine driver %s now\n",
hda_mach->drv_name);
@@ -465,6 +471,7 @@ static int hda_init_caps(struct snd_sof_dev *sdev)
&pdata->machine->mach_params;
mach_params->codec_mask = bus->codec_mask;
mach_params->platform = dev_name(sdev->dev);
+ mach_params->common_hdmi_codec_drv = hda_codec_use_common_hdmi;
}
/* create codec instances */
diff --git a/sound/soc/sof/intel/hda.h b/sound/soc/sof/intel/hda.h
index 23e430d3e056..18d7e72bf9b7 100644
--- a/sound/soc/sof/intel/hda.h
+++ b/sound/soc/sof/intel/hda.h
@@ -64,6 +64,13 @@
#define SOF_HDA_PPCTL_PIE BIT(31)
#define SOF_HDA_PPCTL_GPROCEN BIT(30)
+/*Vendor Specific Registers*/
+#define SOF_HDA_VS_D0I3C 0x104A
+
+/* D0I3C Register fields */
+#define SOF_HDA_VS_D0I3C_CIP BIT(0) /* Command-In-Progress */
+#define SOF_HDA_VS_D0I3C_I3 BIT(2) /* D0i3 enable bit */
+
/* DPIB entry size: 8 Bytes = 2 DWords */
#define SOF_HDA_DPIB_ENTRY_SIZE 0x8
@@ -207,6 +214,7 @@
#define HDA_DSP_CTRL_RESET_TIMEOUT 100
#define HDA_DSP_WAIT_TIMEOUT 500 /* 500 msec */
#define HDA_DSP_REG_POLL_INTERVAL_US 500 /* 0.5 msec */
+#define HDA_DSP_REG_POLL_RETRY_COUNT 50
#define HDA_DSP_ADSPIC_IPC 1
#define HDA_DSP_ADSPIS_IPC 1
@@ -304,6 +312,7 @@
#define CNL_DSP_REG_HIPCTDD (CNL_DSP_IPC_BASE + 0x08)
#define CNL_DSP_REG_HIPCIDR (CNL_DSP_IPC_BASE + 0x10)
#define CNL_DSP_REG_HIPCIDA (CNL_DSP_IPC_BASE + 0x14)
+#define CNL_DSP_REG_HIPCIDD (CNL_DSP_IPC_BASE + 0x18)
#define CNL_DSP_REG_HIPCCTL (CNL_DSP_IPC_BASE + 0x28)
/* HIPCI */
@@ -399,6 +408,9 @@ struct sof_intel_hda_dev {
int irq;
+ /* PM related */
+ bool l1_support_changed;/* during suspend, is L1SEN changed or not */
+
/* DMIC device */
struct platform_device *dmic_dev;
};
@@ -455,6 +467,9 @@ int hda_dsp_core_reset_power_down(struct snd_sof_dev *sdev,
void hda_dsp_ipc_int_enable(struct snd_sof_dev *sdev);
void hda_dsp_ipc_int_disable(struct snd_sof_dev *sdev);
+int hda_dsp_set_power_state(struct snd_sof_dev *sdev,
+ enum sof_d0_substate d0_substate);
+
int hda_dsp_suspend(struct snd_sof_dev *sdev);
int hda_dsp_resume(struct snd_sof_dev *sdev);
int hda_dsp_runtime_suspend(struct snd_sof_dev *sdev);
@@ -565,7 +580,9 @@ void hda_codec_jack_check(struct snd_sof_dev *sdev);
#endif /* CONFIG_SND_SOC_SOF_HDA */
-#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA) && IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI)
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA) && \
+ (IS_ENABLED(CONFIG_SND_HDA_CODEC_HDMI) || \
+ IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI))
void hda_codec_i915_get(struct snd_sof_dev *sdev);
void hda_codec_i915_put(struct snd_sof_dev *sdev);
@@ -579,7 +596,7 @@ static inline void hda_codec_i915_put(struct snd_sof_dev *sdev) { }
static inline int hda_codec_i915_init(struct snd_sof_dev *sdev) { return 0; }
static inline int hda_codec_i915_exit(struct snd_sof_dev *sdev) { return 0; }
-#endif /* CONFIG_SND_SOC_SOF_HDA && CONFIG_SND_SOC_HDAC_HDMI */
+#endif
/*
* Trace Control.
@@ -596,7 +613,6 @@ extern struct snd_soc_dai_driver skl_dai[];
*/
extern const struct snd_sof_dsp_ops sof_apl_ops;
extern const struct snd_sof_dsp_ops sof_cnl_ops;
-extern const struct snd_sof_dsp_ops sof_skl_ops;
extern const struct sof_intel_dsp_desc apl_chip_info;
extern const struct sof_intel_dsp_desc cnl_chip_info;
@@ -604,5 +620,6 @@ extern const struct sof_intel_dsp_desc skl_chip_info;
extern const struct sof_intel_dsp_desc icl_chip_info;
extern const struct sof_intel_dsp_desc tgl_chip_info;
extern const struct sof_intel_dsp_desc ehl_chip_info;
+extern const struct sof_intel_dsp_desc jsl_chip_info;
#endif
diff --git a/sound/soc/sof/ipc.c b/sound/soc/sof/ipc.c
index 086eeeab8679..5994e1073364 100644
--- a/sound/soc/sof/ipc.c
+++ b/sound/soc/sof/ipc.c
@@ -210,9 +210,7 @@ static int tx_wait_done(struct snd_sof_ipc *ipc, struct snd_sof_ipc_msg *msg,
if (ret == 0) {
dev_err(sdev->dev, "error: ipc timed out for 0x%x size %d\n",
hdr->cmd, hdr->size);
- snd_sof_dsp_dbg_dump(ipc->sdev, SOF_DBG_REGS | SOF_DBG_MBOX);
- snd_sof_ipc_dump(ipc->sdev);
- snd_sof_trace_notify_for_error(ipc->sdev);
+ snd_sof_handle_fw_exception(ipc->sdev);
ret = -ETIMEDOUT;
} else {
/* copy the data returned from DSP */
@@ -796,12 +794,6 @@ struct snd_sof_ipc *snd_sof_ipc_init(struct snd_sof_dev *sdev)
struct snd_sof_ipc *ipc;
struct snd_sof_ipc_msg *msg;
- /* check if mandatory ops required for ipc are defined */
- if (!sof_ops(sdev)->fw_ready) {
- dev_err(sdev->dev, "error: ipc mandatory ops not defined\n");
- return NULL;
- }
-
ipc = devm_kzalloc(sdev->dev, sizeof(*ipc), GFP_KERNEL);
if (!ipc)
return NULL;
diff --git a/sound/soc/sof/ops.h b/sound/soc/sof/ops.h
index 824d36fe59fd..93512dcbaacd 100644
--- a/sound/soc/sof/ops.h
+++ b/sound/soc/sof/ops.h
@@ -193,6 +193,16 @@ static inline int snd_sof_dsp_set_clk(struct snd_sof_dev *sdev, u32 freq)
return 0;
}
+static inline int snd_sof_dsp_set_power_state(struct snd_sof_dev *sdev,
+ enum sof_d0_substate substate)
+{
+ if (sof_ops(sdev)->set_power_state)
+ return sof_ops(sdev)->set_power_state(sdev, substate);
+
+ /* D0 substate is not supported */
+ return -ENOTSUPP;
+}
+
/* debug */
static inline void snd_sof_dsp_dbg_dump(struct snd_sof_dev *sdev, u32 flags)
{
diff --git a/sound/soc/sof/pcm.c b/sound/soc/sof/pcm.c
index 2b876d497447..549238a98b2a 100644
--- a/sound/soc/sof/pcm.c
+++ b/sound/soc/sof/pcm.c
@@ -19,12 +19,11 @@
#define DRV_NAME "sof-audio-component"
/* Create DMA buffer page table for DSP */
-static int create_page_table(struct snd_pcm_substream *substream,
+static int create_page_table(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
unsigned char *dma_area, size_t size)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_component *component =
- snd_soc_rtdcom_lookup(rtd, DRV_NAME);
struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(component);
struct snd_sof_pcm *spcm;
struct snd_dma_buffer *dmab = snd_pcm_get_dma_buf(substream);
@@ -95,13 +94,12 @@ void snd_sof_pcm_period_elapsed(struct snd_pcm_substream *substream)
EXPORT_SYMBOL(snd_sof_pcm_period_elapsed);
/* this may get called several times by oss emulation */
-static int sof_pcm_hw_params(struct snd_pcm_substream *substream,
+static int sof_pcm_hw_params(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_pcm_runtime *runtime = substream->runtime;
- struct snd_soc_component *component =
- snd_soc_rtdcom_lookup(rtd, DRV_NAME);
struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(component);
struct snd_sof_pcm *spcm;
struct sof_ipc_pcm_params pcm;
@@ -135,7 +133,7 @@ static int sof_pcm_hw_params(struct snd_pcm_substream *substream,
* ret == 0 means the buffer is not changed
* so no need to regenerate the page table
*/
- ret = create_page_table(substream, runtime->dma_area,
+ ret = create_page_table(component, substream, runtime->dma_area,
runtime->dma_bytes);
if (ret < 0)
return ret;
@@ -237,11 +235,10 @@ static int sof_pcm_dsp_pcm_free(struct snd_pcm_substream *substream,
return ret;
}
-static int sof_pcm_hw_free(struct snd_pcm_substream *substream)
+static int sof_pcm_hw_free(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_component *component =
- snd_soc_rtdcom_lookup(rtd, DRV_NAME);
struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(component);
struct snd_sof_pcm *spcm;
int ret, err = 0;
@@ -276,11 +273,10 @@ static int sof_pcm_hw_free(struct snd_pcm_substream *substream)
return err;
}
-static int sof_pcm_prepare(struct snd_pcm_substream *substream)
+static int sof_pcm_prepare(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_component *component =
- snd_soc_rtdcom_lookup(rtd, DRV_NAME);
struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(component);
struct snd_sof_pcm *spcm;
int ret;
@@ -300,7 +296,8 @@ static int sof_pcm_prepare(struct snd_pcm_substream *substream)
substream->stream);
/* set hw_params */
- ret = sof_pcm_hw_params(substream, &spcm->params[substream->stream]);
+ ret = sof_pcm_hw_params(component,
+ substream, &spcm->params[substream->stream]);
if (ret < 0) {
dev_err(sdev->dev, "error: set pcm hw_params after resume\n");
return ret;
@@ -313,11 +310,10 @@ static int sof_pcm_prepare(struct snd_pcm_substream *substream)
* FE dai link trigger actions are always executed in non-atomic context because
* they involve IPC's.
*/
-static int sof_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
+static int sof_pcm_trigger(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream, int cmd)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_component *component =
- snd_soc_rtdcom_lookup(rtd, DRV_NAME);
struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(component);
struct snd_sof_pcm *spcm;
struct sof_ipc_stream stream;
@@ -350,8 +346,18 @@ static int sof_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
stream.hdr.cmd |= SOF_IPC_STREAM_TRIG_RELEASE;
break;
case SNDRV_PCM_TRIGGER_RESUME:
+ if (spcm->stream[substream->stream].suspend_ignored) {
+ /*
+ * this case will be triggered when INFO_RESUME is
+ * supported, no need to resume streams that remained
+ * enabled in D0ix.
+ */
+ spcm->stream[substream->stream].suspend_ignored = false;
+ return 0;
+ }
+
/* set up hw_params */
- ret = sof_pcm_prepare(substream);
+ ret = sof_pcm_prepare(component, substream);
if (ret < 0) {
dev_err(sdev->dev,
"error: failed to set up hw_params upon resume\n");
@@ -360,9 +366,30 @@ static int sof_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
/* fallthrough */
case SNDRV_PCM_TRIGGER_START:
+ if (spcm->stream[substream->stream].suspend_ignored) {
+ /*
+ * This case will be triggered when INFO_RESUME is
+ * not supported, no need to re-start streams that
+ * remained enabled in D0ix.
+ */
+ spcm->stream[substream->stream].suspend_ignored = false;
+ return 0;
+ }
stream.hdr.cmd |= SOF_IPC_STREAM_TRIG_START;
break;
case SNDRV_PCM_TRIGGER_SUSPEND:
+ if (sdev->s0_suspend &&
+ spcm->stream[substream->stream].d0i3_compatible) {
+ /*
+ * trap the event, not sending trigger stop to
+ * prevent the FW pipelines from being stopped,
+ * and mark the flag to ignore the upcoming DAPM
+ * PM events.
+ */
+ spcm->stream[substream->stream].suspend_ignored = true;
+ return 0;
+ }
+ /* fallthrough */
case SNDRV_PCM_TRIGGER_STOP:
stream.hdr.cmd |= SOF_IPC_STREAM_TRIG_STOP;
ipc_first = true;
@@ -395,11 +422,10 @@ static int sof_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
return ret;
}
-static snd_pcm_uframes_t sof_pcm_pointer(struct snd_pcm_substream *substream)
+static snd_pcm_uframes_t sof_pcm_pointer(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_component *component =
- snd_soc_rtdcom_lookup(rtd, DRV_NAME);
struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(component);
struct snd_sof_pcm *spcm;
snd_pcm_uframes_t host, dai;
@@ -428,13 +454,13 @@ static snd_pcm_uframes_t sof_pcm_pointer(struct snd_pcm_substream *substream)
return host;
}
-static int sof_pcm_open(struct snd_pcm_substream *substream)
+static int sof_pcm_open(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_pcm_runtime *runtime = substream->runtime;
- struct snd_soc_component *component =
- snd_soc_rtdcom_lookup(rtd, DRV_NAME);
struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(component);
+ const struct snd_sof_dsp_ops *ops = sof_ops(sdev);
struct snd_sof_pcm *spcm;
struct snd_soc_tplg_stream_caps *caps;
int ret;
@@ -464,11 +490,8 @@ static int sof_pcm_open(struct snd_pcm_substream *substream)
le32_to_cpu(caps->period_size_min));
/* set runtime config */
- runtime->hw.info = SNDRV_PCM_INFO_MMAP |
- SNDRV_PCM_INFO_MMAP_VALID |
- SNDRV_PCM_INFO_INTERLEAVED |
- SNDRV_PCM_INFO_PAUSE |
- SNDRV_PCM_INFO_NO_PERIOD_WAKEUP;
+ runtime->hw.info = ops->hw_info; /* platform-specific */
+
runtime->hw.formats = le64_to_cpu(caps->formats);
runtime->hw.period_bytes_min = le32_to_cpu(caps->period_size_min);
runtime->hw.period_bytes_max = le32_to_cpu(caps->period_size_max);
@@ -505,11 +528,10 @@ static int sof_pcm_open(struct snd_pcm_substream *substream)
return ret;
}
-static int sof_pcm_close(struct snd_pcm_substream *substream)
+static int sof_pcm_close(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_component *component =
- snd_soc_rtdcom_lookup(rtd, DRV_NAME);
struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(component);
struct snd_sof_pcm *spcm;
int err;
@@ -538,27 +560,14 @@ static int sof_pcm_close(struct snd_pcm_substream *substream)
return 0;
}
-static struct snd_pcm_ops sof_pcm_ops = {
- .open = sof_pcm_open,
- .close = sof_pcm_close,
- .ioctl = snd_pcm_lib_ioctl,
- .hw_params = sof_pcm_hw_params,
- .prepare = sof_pcm_prepare,
- .hw_free = sof_pcm_hw_free,
- .trigger = sof_pcm_trigger,
- .pointer = sof_pcm_pointer,
- .page = snd_pcm_sgbuf_ops_page,
-};
-
/*
* Pre-allocate playback/capture audio buffer pages.
* no need to explicitly release memory preallocated by sof_pcm_new in pcm_free
* snd_pcm_lib_preallocate_free_for_all() is called by the core.
*/
-static int sof_pcm_new(struct snd_soc_pcm_runtime *rtd)
+static int sof_pcm_new(struct snd_soc_component *component,
+ struct snd_soc_pcm_runtime *rtd)
{
- struct snd_soc_component *component =
- snd_soc_rtdcom_lookup(rtd, DRV_NAME);
struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(component);
struct snd_sof_pcm *spcm;
struct snd_pcm *pcm = rtd->pcm;
@@ -691,6 +700,14 @@ static int sof_pcm_dai_link_fixup(struct snd_soc_pcm_runtime *rtd,
case SOF_DAI_INTEL_ALH:
/* do nothing for ALH dai_link */
break;
+ case SOF_DAI_IMX_ESAI:
+ channels->min = dai->dai_config->esai.tdm_slots;
+ channels->max = dai->dai_config->esai.tdm_slots;
+
+ dev_dbg(sdev->dev,
+ "channels_min: %d channels_max: %d\n",
+ channels->min, channels->max);
+ break;
default:
dev_err(sdev->dev, "error: invalid DAI type %d\n",
dai->dai_config->type);
@@ -752,11 +769,19 @@ void snd_sof_new_platform_drv(struct snd_sof_dev *sdev)
pd->name = "sof-audio-component";
pd->probe = sof_pcm_probe;
pd->remove = sof_pcm_remove;
- pd->ops = &sof_pcm_ops;
+ pd->open = sof_pcm_open;
+ pd->close = sof_pcm_close;
+ pd->ioctl = snd_soc_pcm_lib_ioctl;
+ pd->hw_params = sof_pcm_hw_params;
+ pd->prepare = sof_pcm_prepare;
+ pd->hw_free = sof_pcm_hw_free;
+ pd->trigger = sof_pcm_trigger;
+ pd->pointer = sof_pcm_pointer;
+
#if IS_ENABLED(CONFIG_SND_SOC_SOF_COMPRESS)
pd->compr_ops = &sof_compressed_ops;
#endif
- pd->pcm_new = sof_pcm_new;
+ pd->pcm_construct = sof_pcm_new;
pd->ignore_machine = drv_name;
pd->be_hw_params_fixup = sof_pcm_dai_link_fixup;
pd->be_pcm_base = SOF_BE_PCM_BASE;
diff --git a/sound/soc/sof/pm.c b/sound/soc/sof/pm.c
index e23beaeefe00..0fd5567237a8 100644
--- a/sound/soc/sof/pm.c
+++ b/sound/soc/sof/pm.c
@@ -197,7 +197,7 @@ static int sof_restore_pipelines(struct snd_sof_dev *sdev)
return ret;
}
-static int sof_send_pm_ipc(struct snd_sof_dev *sdev, int cmd)
+static int sof_send_pm_ctx_ipc(struct snd_sof_dev *sdev, int cmd)
{
struct sof_ipc_pm_ctx pm_ctx;
struct sof_ipc_reply reply;
@@ -320,12 +320,15 @@ static int sof_resume(struct device *dev, bool runtime_resume)
}
/* notify DSP of system resume */
- ret = sof_send_pm_ipc(sdev, SOF_IPC_PM_CTX_RESTORE);
+ ret = sof_send_pm_ctx_ipc(sdev, SOF_IPC_PM_CTX_RESTORE);
if (ret < 0)
dev_err(sdev->dev,
"error: ctx_restore ipc error during resume %d\n",
ret);
+ /* initialize default D0 sub-state */
+ sdev->d0_substate = SOF_DSP_D0I0;
+
return ret;
}
@@ -358,7 +361,7 @@ static int sof_suspend(struct device *dev, bool runtime_suspend)
sof_cache_debugfs(sdev);
#endif
/* notify DSP of upcoming power down */
- ret = sof_send_pm_ipc(sdev, SOF_IPC_PM_CTX_SAVE);
+ ret = sof_send_pm_ctx_ipc(sdev, SOF_IPC_PM_CTX_SAVE);
if (ret == -EBUSY || ret == -EAGAIN) {
/*
* runtime PM has logic to handle -EBUSY/-EAGAIN so
@@ -408,14 +411,135 @@ int snd_sof_runtime_resume(struct device *dev)
}
EXPORT_SYMBOL(snd_sof_runtime_resume);
+int snd_sof_set_d0_substate(struct snd_sof_dev *sdev,
+ enum sof_d0_substate d0_substate)
+{
+ int ret;
+
+ if (sdev->d0_substate == d0_substate)
+ return 0;
+
+ /* do platform specific set_state */
+ ret = snd_sof_dsp_set_power_state(sdev, d0_substate);
+ if (ret < 0)
+ return ret;
+
+ /* update dsp D0 sub-state */
+ sdev->d0_substate = d0_substate;
+
+ return 0;
+}
+EXPORT_SYMBOL(snd_sof_set_d0_substate);
+
+/*
+ * Audio DSP states may transform as below:-
+ *
+ * D0I3 compatible stream
+ * Runtime +---------------------+ opened only, timeout
+ * suspend | +--------------------+
+ * +------------+ D0(active) | |
+ * | | <---------------+ |
+ * | +--------> | | |
+ * | |Runtime +--^--+---------^--+--+ The last | |
+ * | |resume | | | | opened D0I3 | |
+ * | | | | | | compatible | |
+ * | | resume| | | | stream closed | |
+ * | | from | | D3 | | | |
+ * | | D3 | |suspend | | d0i3 | |
+ * | | | | | |suspend | |
+ * | | | | | | | |
+ * | | | | | | | |
+ * +-v---+-----------+--v-------+ | | +------+----v----+
+ * | | | +-----------> |
+ * | D3 (suspended) | | | D0I3 +-----+
+ * | | +--------------+ | |
+ * | | resume from | | |
+ * +-------------------^--------+ d0i3 suspend +----------------+ |
+ * | |
+ * | D3 suspend |
+ * +------------------------------------------------+
+ *
+ * d0i3_suspend = s0_suspend && D0I3 stream opened,
+ * D3 suspend = !d0i3_suspend,
+ */
+
int snd_sof_resume(struct device *dev)
{
+ struct snd_sof_dev *sdev = dev_get_drvdata(dev);
+ int ret;
+
+ if (snd_sof_dsp_d0i3_on_suspend(sdev)) {
+ /* resume from D0I3 */
+ dev_dbg(sdev->dev, "DSP will exit from D0i3...\n");
+ ret = snd_sof_set_d0_substate(sdev, SOF_DSP_D0I0);
+ if (ret == -ENOTSUPP) {
+ /* fallback to resume from D3 */
+ dev_dbg(sdev->dev, "D0i3 not supported, fall back to resume from D3...\n");
+ goto d3_resume;
+ } else if (ret < 0) {
+ dev_err(sdev->dev, "error: failed to exit from D0I3 %d\n",
+ ret);
+ return ret;
+ }
+
+ /* platform-specific resume from D0i3 */
+ return snd_sof_dsp_resume(sdev);
+ }
+
+d3_resume:
+ /* resume from D3 */
return sof_resume(dev, false);
}
EXPORT_SYMBOL(snd_sof_resume);
int snd_sof_suspend(struct device *dev)
{
+ struct snd_sof_dev *sdev = dev_get_drvdata(dev);
+ int ret;
+
+ if (snd_sof_dsp_d0i3_on_suspend(sdev)) {
+ /* suspend to D0i3 */
+ dev_dbg(sdev->dev, "DSP is trying to enter D0i3...\n");
+ ret = snd_sof_set_d0_substate(sdev, SOF_DSP_D0I3);
+ if (ret == -ENOTSUPP) {
+ /* fallback to D3 suspend */
+ dev_dbg(sdev->dev, "D0i3 not supported, fall back to D3...\n");
+ goto d3_suspend;
+ } else if (ret < 0) {
+ dev_err(sdev->dev, "error: failed to enter D0I3, %d\n",
+ ret);
+ return ret;
+ }
+
+ /* platform-specific suspend to D0i3 */
+ return snd_sof_dsp_suspend(sdev);
+ }
+
+d3_suspend:
+ /* suspend to D3 */
return sof_suspend(dev, false);
}
EXPORT_SYMBOL(snd_sof_suspend);
+
+int snd_sof_prepare(struct device *dev)
+{
+ struct snd_sof_dev *sdev = dev_get_drvdata(dev);
+
+#if defined(CONFIG_ACPI)
+ sdev->s0_suspend = acpi_target_system_state() == ACPI_STATE_S0;
+#else
+ /* will suspend to S3 by default */
+ sdev->s0_suspend = false;
+#endif
+
+ return 0;
+}
+EXPORT_SYMBOL(snd_sof_prepare);
+
+void snd_sof_complete(struct device *dev)
+{
+ struct snd_sof_dev *sdev = dev_get_drvdata(dev);
+
+ sdev->s0_suspend = false;
+}
+EXPORT_SYMBOL(snd_sof_complete);
diff --git a/sound/soc/sof/sof-acpi-dev.c b/sound/soc/sof/sof-acpi-dev.c
index ea7b8b895412..df318f50dd0b 100644
--- a/sound/soc/sof/sof-acpi-dev.c
+++ b/sound/soc/sof/sof-acpi-dev.c
@@ -29,6 +29,12 @@ static char *tplg_path;
module_param(tplg_path, charp, 0444);
MODULE_PARM_DESC(tplg_path, "alternate path for SOF topology.");
+static int sof_acpi_debug;
+module_param_named(sof_acpi_debug, sof_acpi_debug, int, 0444);
+MODULE_PARM_DESC(sof_acpi_debug, "SOF ACPI debug options (0x0 all off)");
+
+#define SOF_ACPI_DISABLE_PM_RUNTIME BIT(0)
+
#if IS_ENABLED(CONFIG_SND_SOC_SOF_HASWELL)
static const struct sof_dev_desc sof_acpi_haswell_desc = {
.machines = snd_soc_acpi_intel_haswell_machines,
@@ -121,6 +127,9 @@ static const struct dev_pm_ops sof_acpi_pm = {
static void sof_acpi_probe_complete(struct device *dev)
{
+ if (sof_acpi_debug & SOF_ACPI_DISABLE_PM_RUNTIME)
+ return;
+
/* allow runtime_pm */
pm_runtime_set_autosuspend_delay(dev, SND_SOF_SUSPEND_DELAY_MS);
pm_runtime_use_autosuspend(dev);
@@ -221,7 +230,8 @@ static int sof_acpi_probe(struct platform_device *pdev)
static int sof_acpi_remove(struct platform_device *pdev)
{
- pm_runtime_disable(&pdev->dev);
+ if (!(sof_acpi_debug & SOF_ACPI_DISABLE_PM_RUNTIME))
+ pm_runtime_disable(&pdev->dev);
/* call sof helper for DSP hardware remove */
snd_sof_device_remove(&pdev->dev);
diff --git a/sound/soc/sof/sof-pci-dev.c b/sound/soc/sof/sof-pci-dev.c
index d66412a77873..bbeffd932de7 100644
--- a/sound/soc/sof/sof-pci-dev.c
+++ b/sound/soc/sof/sof-pci-dev.c
@@ -12,6 +12,7 @@
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/pm_runtime.h>
+#include <sound/intel-dsp-config.h>
#include <sound/soc-acpi.h>
#include <sound/soc-acpi-intel-match.h>
#include <sound/sof.h>
@@ -29,6 +30,12 @@ static char *tplg_path;
module_param(tplg_path, charp, 0444);
MODULE_PARM_DESC(tplg_path, "alternate path for SOF topology.");
+static int sof_pci_debug;
+module_param_named(sof_pci_debug, sof_pci_debug, int, 0444);
+MODULE_PARM_DESC(sof_pci_debug, "SOF PCI debug options (0x0 all off)");
+
+#define SOF_PCI_DISABLE_PM_RUNTIME BIT(0)
+
#if IS_ENABLED(CONFIG_SND_SOC_SOF_APOLLOLAKE)
static const struct sof_dev_desc bxt_desc = {
.machines = snd_soc_acpi_intel_bxt_machines,
@@ -113,7 +120,7 @@ static const struct sof_dev_desc cnl_desc = {
#if IS_ENABLED(CONFIG_SND_SOC_SOF_COFFEELAKE)
static const struct sof_dev_desc cfl_desc = {
- .machines = snd_soc_acpi_intel_cnl_machines,
+ .machines = snd_soc_acpi_intel_cfl_machines,
.resindex_lpe_base = 0,
.resindex_pcicfg_base = -1,
.resindex_imr_base = -1,
@@ -122,7 +129,7 @@ static const struct sof_dev_desc cfl_desc = {
.chip_info = &cnl_chip_info,
.default_fw_path = "intel/sof",
.default_tplg_path = "intel/sof-tplg",
- .nocodec_fw_filename = "sof-cnl.ri",
+ .nocodec_fw_filename = "sof-cfl.ri",
.nocodec_tplg_filename = "sof-cnl-nocodec.tplg",
.ops = &sof_cnl_ops,
.arch_ops = &sof_xtensa_arch_ops
@@ -133,7 +140,7 @@ static const struct sof_dev_desc cfl_desc = {
IS_ENABLED(CONFIG_SND_SOC_SOF_COMETLAKE_H)
static const struct sof_dev_desc cml_desc = {
- .machines = snd_soc_acpi_intel_cnl_machines,
+ .machines = snd_soc_acpi_intel_cml_machines,
.resindex_lpe_base = 0,
.resindex_pcicfg_base = -1,
.resindex_imr_base = -1,
@@ -142,7 +149,7 @@ static const struct sof_dev_desc cml_desc = {
.chip_info = &cnl_chip_info,
.default_fw_path = "intel/sof",
.default_tplg_path = "intel/sof-tplg",
- .nocodec_fw_filename = "sof-cnl.ri",
+ .nocodec_fw_filename = "sof-cml.ri",
.nocodec_tplg_filename = "sof-cnl-nocodec.tplg",
.ops = &sof_cnl_ops,
.arch_ops = &sof_xtensa_arch_ops
@@ -167,42 +174,6 @@ static const struct sof_dev_desc icl_desc = {
};
#endif
-#if IS_ENABLED(CONFIG_SND_SOC_SOF_SKYLAKE)
-static const struct sof_dev_desc skl_desc = {
- .machines = snd_soc_acpi_intel_skl_machines,
- .resindex_lpe_base = 0,
- .resindex_pcicfg_base = -1,
- .resindex_imr_base = -1,
- .irqindex_host_ipc = -1,
- .resindex_dma_base = -1,
- .chip_info = &skl_chip_info,
- .default_fw_path = "intel/sof",
- .default_tplg_path = "intel/sof-tplg",
- .nocodec_fw_filename = "sof-skl.ri",
- .nocodec_tplg_filename = "sof-skl-nocodec.tplg",
- .ops = &sof_skl_ops,
- .arch_ops = &sof_xtensa_arch_ops
-};
-#endif
-
-#if IS_ENABLED(CONFIG_SND_SOC_SOF_KABYLAKE)
-static const struct sof_dev_desc kbl_desc = {
- .machines = snd_soc_acpi_intel_kbl_machines,
- .resindex_lpe_base = 0,
- .resindex_pcicfg_base = -1,
- .resindex_imr_base = -1,
- .irqindex_host_ipc = -1,
- .resindex_dma_base = -1,
- .chip_info = &skl_chip_info,
- .default_fw_path = "intel/sof",
- .default_tplg_path = "intel/sof-tplg",
- .nocodec_fw_filename = "sof-kbl.ri",
- .nocodec_tplg_filename = "sof-kbl-nocodec.tplg",
- .ops = &sof_skl_ops,
- .arch_ops = &sof_xtensa_arch_ops
-};
-#endif
-
#if IS_ENABLED(CONFIG_SND_SOC_SOF_TIGERLAKE)
static const struct sof_dev_desc tgl_desc = {
.machines = snd_soc_acpi_intel_tgl_machines,
@@ -239,7 +210,27 @@ static const struct sof_dev_desc ehl_desc = {
};
#endif
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_JASPERLAKE)
+static const struct sof_dev_desc jsl_desc = {
+ .machines = snd_soc_acpi_intel_jsl_machines,
+ .resindex_lpe_base = 0,
+ .resindex_pcicfg_base = -1,
+ .resindex_imr_base = -1,
+ .irqindex_host_ipc = -1,
+ .resindex_dma_base = -1,
+ .chip_info = &jsl_chip_info,
+ .default_fw_path = "intel/sof",
+ .default_tplg_path = "intel/sof-tplg",
+ .nocodec_fw_filename = "sof-jsl.ri",
+ .nocodec_tplg_filename = "sof-jsl-nocodec.tplg",
+ .ops = &sof_cnl_ops,
+ .arch_ops = &sof_xtensa_arch_ops
+};
+#endif
+
static const struct dev_pm_ops sof_pci_pm = {
+ .prepare = snd_sof_prepare,
+ .complete = snd_sof_complete,
SET_SYSTEM_SLEEP_PM_OPS(snd_sof_suspend, snd_sof_resume)
SET_RUNTIME_PM_OPS(snd_sof_runtime_suspend, snd_sof_runtime_resume,
snd_sof_runtime_idle)
@@ -249,6 +240,9 @@ static void sof_pci_probe_complete(struct device *dev)
{
dev_dbg(dev, "Completing SOF PCI probe");
+ if (sof_pci_debug & SOF_PCI_DISABLE_PM_RUNTIME)
+ return;
+
/* allow runtime_pm */
pm_runtime_set_autosuspend_delay(dev, SND_SOF_SUSPEND_DELAY_MS);
pm_runtime_use_autosuspend(dev);
@@ -277,6 +271,11 @@ static int sof_pci_probe(struct pci_dev *pci,
const struct snd_sof_dsp_ops *ops;
int ret;
+ ret = snd_intel_dsp_driver_probe(pci);
+ if (ret != SND_INTEL_DSP_DRIVER_ANY &&
+ ret != SND_INTEL_DSP_DRIVER_SOF)
+ return -ENODEV;
+
dev_dbg(&pci->dev, "PCI DSP detected");
/* get ops for platform */
@@ -370,7 +369,8 @@ static void sof_pci_remove(struct pci_dev *pci)
snd_sof_device_remove(&pci->dev);
/* follow recommendation in pci-driver.c to increment usage counter */
- pm_runtime_get_noresume(&pci->dev);
+ if (!(sof_pci_debug & SOF_PCI_DISABLE_PM_RUNTIME))
+ pm_runtime_get_noresume(&pci->dev);
/* release pci regions and disable device */
pci_release_regions(pci);
@@ -401,18 +401,14 @@ static const struct pci_device_id sof_pci_ids[] = {
{ PCI_DEVICE(0x8086, 0xa348),
.driver_data = (unsigned long)&cfl_desc},
#endif
-#if IS_ENABLED(CONFIG_SND_SOC_SOF_KABYLAKE)
- { PCI_DEVICE(0x8086, 0x9d71),
- .driver_data = (unsigned long)&kbl_desc},
-#endif
-#if IS_ENABLED(CONFIG_SND_SOC_SOF_SKYLAKE)
- { PCI_DEVICE(0x8086, 0x9d70),
- .driver_data = (unsigned long)&skl_desc},
-#endif
#if IS_ENABLED(CONFIG_SND_SOC_SOF_ICELAKE)
{ PCI_DEVICE(0x8086, 0x34C8),
.driver_data = (unsigned long)&icl_desc},
#endif
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_JASPERLAKE)
+ { PCI_DEVICE(0x8086, 0x38c8),
+ .driver_data = (unsigned long)&jsl_desc},
+#endif
#if IS_ENABLED(CONFIG_SND_SOC_SOF_COMETLAKE_LP)
{ PCI_DEVICE(0x8086, 0x02c8),
.driver_data = (unsigned long)&cml_desc},
diff --git a/sound/soc/sof/sof-priv.h b/sound/soc/sof/sof-priv.h
index 730f3259dd02..c7c2c70ee4d0 100644
--- a/sound/soc/sof/sof-priv.h
+++ b/sound/soc/sof/sof-priv.h
@@ -15,6 +15,7 @@
#include <sound/hdaudio.h>
#include <sound/soc.h>
+#include <sound/control.h>
#include <sound/sof.h>
#include <sound/sof/stream.h> /* needs to be included before control.h */
@@ -28,10 +29,15 @@
#include <uapi/sound/sof/fw.h>
/* debug flags */
-#define SOF_DBG_REGS BIT(1)
-#define SOF_DBG_MBOX BIT(2)
-#define SOF_DBG_TEXT BIT(3)
-#define SOF_DBG_PCI BIT(4)
+#define SOF_DBG_ENABLE_TRACE BIT(0)
+#define SOF_DBG_REGS BIT(1)
+#define SOF_DBG_MBOX BIT(2)
+#define SOF_DBG_TEXT BIT(3)
+#define SOF_DBG_PCI BIT(4)
+#define SOF_DBG_RETAIN_CTX BIT(5) /* prevent DSP D3 on FW exception */
+
+/* global debug state set by SOF_DBG_ flags */
+extern int sof_core_debug;
/* max BARs mmaped devices can use */
#define SND_SOF_BARS 8
@@ -62,6 +68,12 @@
#define DMA_CHAN_INVALID 0xFFFFFFFF
+/* DSP D0ix sub-state */
+enum sof_d0_substate {
+ SOF_DSP_D0I0 = 0, /* DSP default D0 substate */
+ SOF_DSP_D0I3, /* DSP D0i3(low power) substate*/
+};
+
struct snd_sof_dev;
struct snd_sof_ipc_msg;
struct snd_sof_ipc;
@@ -128,7 +140,7 @@ struct snd_sof_dsp_ops {
* FW ready checks for ABI compatibility and creates
* memory windows at first boot
*/
- int (*fw_ready)(struct snd_sof_dev *sdev, u32 msg_id); /* optional */
+ int (*fw_ready)(struct snd_sof_dev *sdev, u32 msg_id); /* mandatory */
/* connect pcm substream to a host stream */
int (*pcm_open)(struct snd_sof_dev *sdev,
@@ -177,6 +189,8 @@ struct snd_sof_dsp_ops {
int (*runtime_resume)(struct snd_sof_dev *sof_dev); /* optional */
int (*runtime_idle)(struct snd_sof_dev *sof_dev); /* optional */
int (*set_hw_params_upon_resume)(struct snd_sof_dev *sdev); /* optional */
+ int (*set_power_state)(struct snd_sof_dev *sdev,
+ enum sof_d0_substate d0_substate); /* optional */
/* DSP clocking */
int (*set_clk)(struct snd_sof_dev *sof_dev, u32 freq); /* optional */
@@ -205,6 +219,9 @@ struct snd_sof_dsp_ops {
/* DAI ops */
struct snd_soc_dai_driver *drv;
int num_drv;
+
+ /* ALSA HW info flags, will be stored in snd_pcm_runtime.hw.info */
+ u32 hw_info;
};
/* DSP architecture specific callbacks for oops and stack dumps */
@@ -293,6 +310,12 @@ struct snd_sof_pcm_stream {
struct sof_ipc_stream_posn posn;
struct snd_pcm_substream *substream;
struct work_struct period_elapsed_work;
+ bool d0i3_compatible; /* DSP can be in D0I3 when this pcm is opened */
+ /*
+ * flag to indicate that the DSP pipelines should be kept
+ * active or not while suspending the stream
+ */
+ bool suspend_ignored;
};
/* ALSA SOF PCM device */
@@ -305,6 +328,12 @@ struct snd_sof_pcm {
bool prepared[2]; /* PCM_PARAMS set successfully */
};
+struct snd_sof_led_control {
+ unsigned int use_led;
+ unsigned int direction;
+ unsigned int led_value;
+};
+
/* ALSA SOF Kcontrol device */
struct snd_sof_control {
struct snd_sof_dev *sdev;
@@ -319,6 +348,8 @@ struct snd_sof_control {
u32 *volume_table; /* volume table computed from tlv data*/
struct list_head list; /* list in sdev control list */
+
+ struct snd_sof_led_control led_ctl;
};
/* ASoC SOF DAPM widget */
@@ -370,6 +401,11 @@ struct snd_sof_dev {
*/
struct snd_soc_component_driver plat_drv;
+ /* power states related */
+ enum sof_d0_substate d0_substate;
+ /* flag to track if the intended power target of suspend is S0ix */
+ bool s0_suspend;
+
/* DSP firmware boot */
wait_queue_head_t boot_wait;
u32 boot_complete;
@@ -434,6 +470,7 @@ struct snd_sof_dev {
int dma_trace_pages;
wait_queue_head_t trace_sleep;
u32 host_offset;
+ u32 dtrace_is_supported; /* set with Kconfig or module parameter */
u32 dtrace_is_enabled;
u32 dtrace_error;
u32 dtrace_draining;
@@ -455,6 +492,10 @@ int snd_sof_runtime_resume(struct device *dev);
int snd_sof_runtime_idle(struct device *dev);
int snd_sof_resume(struct device *dev);
int snd_sof_suspend(struct device *dev);
+int snd_sof_prepare(struct device *dev);
+void snd_sof_complete(struct device *dev);
+int snd_sof_set_d0_substate(struct snd_sof_dev *sdev,
+ enum sof_d0_substate d0_substate);
void snd_sof_new_platform_drv(struct snd_sof_dev *sdev);
@@ -512,6 +553,8 @@ struct snd_sof_pcm *snd_sof_find_spcm_dai(struct snd_sof_dev *sdev,
return NULL;
}
+bool snd_sof_dsp_d0i3_on_suspend(struct snd_sof_dev *sdev);
+
struct snd_sof_pcm *snd_sof_find_spcm_name(struct snd_sof_dev *sdev,
const char *name);
struct snd_sof_pcm *snd_sof_find_spcm_comp(struct snd_sof_dev *sdev,
@@ -575,6 +618,7 @@ void snd_sof_get_status(struct snd_sof_dev *sdev, u32 panic_code,
struct sof_ipc_panic_info *panic_info,
void *stack, size_t stack_words);
int snd_sof_init_trace_ipc(struct snd_sof_dev *sdev);
+void snd_sof_handle_fw_exception(struct snd_sof_dev *sdev);
/*
* Platform specific ops.
diff --git a/sound/soc/sof/topology.c b/sound/soc/sof/topology.c
index 4452594c2e17..d82ab981e840 100644
--- a/sound/soc/sof/topology.c
+++ b/sound/soc/sof/topology.c
@@ -135,7 +135,9 @@ static int sof_keyword_dapm_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *k, int event)
{
struct snd_sof_widget *swidget = w->dobj.private;
+ int stream = SNDRV_PCM_STREAM_CAPTURE;
struct snd_sof_dev *sdev;
+ struct snd_sof_pcm *spcm;
int ret = 0;
if (!swidget)
@@ -146,11 +148,24 @@ static int sof_keyword_dapm_event(struct snd_soc_dapm_widget *w,
dev_dbg(sdev->dev, "received event %d for widget %s\n",
event, w->name);
+ /* get runtime PCM params using widget's stream name */
+ spcm = snd_sof_find_spcm_name(sdev, swidget->widget->sname);
+ if (!spcm) {
+ dev_err(sdev->dev, "error: cannot find PCM for %s\n",
+ swidget->widget->name);
+ return -EINVAL;
+ }
+
/* process events */
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
+ if (spcm->stream[stream].suspend_ignored) {
+ dev_dbg(sdev->dev, "PRE_PMU event ignored, KWD pipeline is already RUNNING\n");
+ return 0;
+ }
+
/* set pcm params */
- ret = ipc_pcm_params(swidget, SOF_IPC_STREAM_CAPTURE);
+ ret = ipc_pcm_params(swidget, stream);
if (ret < 0) {
dev_err(sdev->dev,
"error: failed to set pcm params for widget %s\n",
@@ -166,6 +181,11 @@ static int sof_keyword_dapm_event(struct snd_soc_dapm_widget *w,
swidget->widget->name);
break;
case SND_SOC_DAPM_POST_PMD:
+ if (spcm->stream[stream].suspend_ignored) {
+ dev_dbg(sdev->dev, "POST_PMD even ignored, KWD pipeline will remain RUNNING\n");
+ return 0;
+ }
+
/* stop trigger */
ret = ipc_trigger(swidget, SOF_IPC_STREAM_TRIG_STOP);
if (ret < 0)
@@ -433,164 +453,6 @@ static enum sof_comp_type find_process_comp_type(enum sof_ipc_process_type type)
}
/*
- * Standard Kcontrols.
- */
-
-static int sof_control_load_volume(struct snd_soc_component *scomp,
- struct snd_sof_control *scontrol,
- struct snd_kcontrol_new *kc,
- struct snd_soc_tplg_ctl_hdr *hdr)
-{
- struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
- struct snd_soc_tplg_mixer_control *mc =
- container_of(hdr, struct snd_soc_tplg_mixer_control, hdr);
- struct sof_ipc_ctrl_data *cdata;
- int tlv[TLV_ITEMS];
- unsigned int i;
- int ret;
-
- /* validate topology data */
- if (le32_to_cpu(mc->num_channels) > SND_SOC_TPLG_MAX_CHAN)
- return -EINVAL;
-
- /* init the volume get/put data */
- scontrol->size = struct_size(scontrol->control_data, chanv,
- le32_to_cpu(mc->num_channels));
- scontrol->control_data = kzalloc(scontrol->size, GFP_KERNEL);
- if (!scontrol->control_data)
- return -ENOMEM;
-
- scontrol->comp_id = sdev->next_comp_id;
- scontrol->min_volume_step = le32_to_cpu(mc->min);
- scontrol->max_volume_step = le32_to_cpu(mc->max);
- scontrol->num_channels = le32_to_cpu(mc->num_channels);
-
- /* set cmd for mixer control */
- if (le32_to_cpu(mc->max) == 1) {
- scontrol->cmd = SOF_CTRL_CMD_SWITCH;
- goto out;
- }
-
- scontrol->cmd = SOF_CTRL_CMD_VOLUME;
-
- /* extract tlv data */
- if (get_tlv_data(kc->tlv.p, tlv) < 0) {
- dev_err(sdev->dev, "error: invalid TLV data\n");
- return -EINVAL;
- }
-
- /* set up volume table */
- ret = set_up_volume_table(scontrol, tlv, le32_to_cpu(mc->max) + 1);
- if (ret < 0) {
- dev_err(sdev->dev, "error: setting up volume table\n");
- return ret;
- }
-
- /* set default volume values to 0dB in control */
- cdata = scontrol->control_data;
- for (i = 0; i < scontrol->num_channels; i++) {
- cdata->chanv[i].channel = i;
- cdata->chanv[i].value = VOL_ZERO_DB;
- }
-
-out:
- dev_dbg(sdev->dev, "tplg: load kcontrol index %d chans %d\n",
- scontrol->comp_id, scontrol->num_channels);
-
- return 0;
-}
-
-static int sof_control_load_enum(struct snd_soc_component *scomp,
- struct snd_sof_control *scontrol,
- struct snd_kcontrol_new *kc,
- struct snd_soc_tplg_ctl_hdr *hdr)
-{
- struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
- struct snd_soc_tplg_enum_control *ec =
- container_of(hdr, struct snd_soc_tplg_enum_control, hdr);
-
- /* validate topology data */
- if (le32_to_cpu(ec->num_channels) > SND_SOC_TPLG_MAX_CHAN)
- return -EINVAL;
-
- /* init the enum get/put data */
- scontrol->size = struct_size(scontrol->control_data, chanv,
- le32_to_cpu(ec->num_channels));
- scontrol->control_data = kzalloc(scontrol->size, GFP_KERNEL);
- if (!scontrol->control_data)
- return -ENOMEM;
-
- scontrol->comp_id = sdev->next_comp_id;
- scontrol->num_channels = le32_to_cpu(ec->num_channels);
-
- scontrol->cmd = SOF_CTRL_CMD_ENUM;
-
- dev_dbg(sdev->dev, "tplg: load kcontrol index %d chans %d comp_id %d\n",
- scontrol->comp_id, scontrol->num_channels, scontrol->comp_id);
-
- return 0;
-}
-
-static int sof_control_load_bytes(struct snd_soc_component *scomp,
- struct snd_sof_control *scontrol,
- struct snd_kcontrol_new *kc,
- struct snd_soc_tplg_ctl_hdr *hdr)
-{
- struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
- struct sof_ipc_ctrl_data *cdata;
- struct snd_soc_tplg_bytes_control *control =
- container_of(hdr, struct snd_soc_tplg_bytes_control, hdr);
- struct soc_bytes_ext *sbe = (struct soc_bytes_ext *)kc->private_value;
- int max_size = sbe->max;
-
- /* init the get/put bytes data */
- scontrol->size = sizeof(struct sof_ipc_ctrl_data) +
- le32_to_cpu(control->priv.size);
-
- if (scontrol->size > max_size) {
- dev_err(sdev->dev, "err: bytes data size %d exceeds max %d.\n",
- scontrol->size, max_size);
- return -EINVAL;
- }
-
- scontrol->control_data = kzalloc(max_size, GFP_KERNEL);
- cdata = scontrol->control_data;
- if (!scontrol->control_data)
- return -ENOMEM;
-
- scontrol->comp_id = sdev->next_comp_id;
- scontrol->cmd = SOF_CTRL_CMD_BINARY;
-
- dev_dbg(sdev->dev, "tplg: load kcontrol index %d chans %d\n",
- scontrol->comp_id, scontrol->num_channels);
-
- if (le32_to_cpu(control->priv.size) > 0) {
- memcpy(cdata->data, control->priv.data,
- le32_to_cpu(control->priv.size));
-
- if (cdata->data->magic != SOF_ABI_MAGIC) {
- dev_err(sdev->dev, "error: Wrong ABI magic 0x%08x.\n",
- cdata->data->magic);
- return -EINVAL;
- }
- if (SOF_ABI_VERSION_INCOMPATIBLE(SOF_ABI_VERSION,
- cdata->data->abi)) {
- dev_err(sdev->dev,
- "error: Incompatible ABI version 0x%08x.\n",
- cdata->data->abi);
- return -EINVAL;
- }
- if (cdata->data->size + sizeof(const struct sof_abi_hdr) !=
- le32_to_cpu(control->priv.size)) {
- dev_err(sdev->dev,
- "error: Conflict in bytes vs. priv size.\n");
- return -EINVAL;
- }
- }
- return 0;
-}
-
-/*
* Topology Token Parsing.
* New tokens should be added to headers and parsing tables below.
*/
@@ -725,6 +587,16 @@ static const struct sof_topology_token pcm_tokens[] = {
offsetof(struct sof_ipc_comp_host, dmac_config), 0},
};
+/* PCM */
+static const struct sof_topology_token stream_tokens[] = {
+ {SOF_TKN_STREAM_PLAYBACK_COMPATIBLE_D0I3,
+ SND_SOC_TPLG_TUPLE_TYPE_BOOL, get_token_u16,
+ offsetof(struct snd_sof_pcm, stream[0].d0i3_compatible), 0},
+ {SOF_TKN_STREAM_CAPTURE_COMPATIBLE_D0I3,
+ SND_SOC_TPLG_TUPLE_TYPE_BOOL, get_token_u16,
+ offsetof(struct snd_sof_pcm, stream[1].d0i3_compatible), 0},
+};
+
/* Generic components */
static const struct sof_topology_token comp_tokens[] = {
{SOF_TKN_COMP_PERIOD_SINK_COUNT,
@@ -799,6 +671,13 @@ static const struct sof_topology_token dmic_tokens[] = {
};
+/* ESAI */
+static const struct sof_topology_token esai_tokens[] = {
+ {SOF_TKN_IMX_ESAI_MCLK_ID,
+ SND_SOC_TPLG_TUPLE_TYPE_SHORT, get_token_u16,
+ offsetof(struct sof_ipc_dai_esai_params, mclk_id), 0},
+};
+
/*
* DMIC PDM Tokens
* SOF_TKN_INTEL_DMIC_PDM_CTRL_ID should be the first token
@@ -840,6 +719,14 @@ static const struct sof_topology_token dmic_pdm_tokens[] = {
static const struct sof_topology_token hda_tokens[] = {
};
+/* Leds */
+static const struct sof_topology_token led_tokens[] = {
+ {SOF_TKN_MUTE_LED_USE, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct snd_sof_led_control, use_led), 0},
+ {SOF_TKN_MUTE_LED_DIRECTION, SND_SOC_TPLG_TUPLE_TYPE_WORD,
+ get_token_u32, offsetof(struct snd_sof_led_control, direction), 0},
+};
+
static void sof_parse_uuid_tokens(struct snd_soc_component *scomp,
void *object,
const struct sof_topology_token *tokens,
@@ -1040,6 +927,200 @@ static void sof_dbg_comp_config(struct snd_soc_component *scomp,
config->frame_fmt);
}
+/*
+ * Standard Kcontrols.
+ */
+
+static int sof_control_load_volume(struct snd_soc_component *scomp,
+ struct snd_sof_control *scontrol,
+ struct snd_kcontrol_new *kc,
+ struct snd_soc_tplg_ctl_hdr *hdr)
+{
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ struct snd_soc_tplg_mixer_control *mc =
+ container_of(hdr, struct snd_soc_tplg_mixer_control, hdr);
+ struct sof_ipc_ctrl_data *cdata;
+ int tlv[TLV_ITEMS];
+ unsigned int i;
+ int ret = 0;
+
+ /* validate topology data */
+ if (le32_to_cpu(mc->num_channels) > SND_SOC_TPLG_MAX_CHAN) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* init the volume get/put data */
+ scontrol->size = struct_size(scontrol->control_data, chanv,
+ le32_to_cpu(mc->num_channels));
+ scontrol->control_data = kzalloc(scontrol->size, GFP_KERNEL);
+ if (!scontrol->control_data) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ scontrol->comp_id = sdev->next_comp_id;
+ scontrol->min_volume_step = le32_to_cpu(mc->min);
+ scontrol->max_volume_step = le32_to_cpu(mc->max);
+ scontrol->num_channels = le32_to_cpu(mc->num_channels);
+
+ /* set cmd for mixer control */
+ if (le32_to_cpu(mc->max) == 1) {
+ scontrol->cmd = SOF_CTRL_CMD_SWITCH;
+ goto skip;
+ }
+
+ scontrol->cmd = SOF_CTRL_CMD_VOLUME;
+
+ /* extract tlv data */
+ if (get_tlv_data(kc->tlv.p, tlv) < 0) {
+ dev_err(sdev->dev, "error: invalid TLV data\n");
+ ret = -EINVAL;
+ goto out_free;
+ }
+
+ /* set up volume table */
+ ret = set_up_volume_table(scontrol, tlv, le32_to_cpu(mc->max) + 1);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: setting up volume table\n");
+ goto out_free;
+ }
+
+ /* set default volume values to 0dB in control */
+ cdata = scontrol->control_data;
+ for (i = 0; i < scontrol->num_channels; i++) {
+ cdata->chanv[i].channel = i;
+ cdata->chanv[i].value = VOL_ZERO_DB;
+ }
+
+skip:
+ /* set up possible led control from mixer private data */
+ ret = sof_parse_tokens(scomp, &scontrol->led_ctl, led_tokens,
+ ARRAY_SIZE(led_tokens), mc->priv.array,
+ le32_to_cpu(mc->priv.size));
+ if (ret != 0) {
+ dev_err(sdev->dev, "error: parse led tokens failed %d\n",
+ le32_to_cpu(mc->priv.size));
+ goto out_free_table;
+ }
+
+ dev_dbg(sdev->dev, "tplg: load kcontrol index %d chans %d\n",
+ scontrol->comp_id, scontrol->num_channels);
+
+ return ret;
+
+out_free_table:
+ if (le32_to_cpu(mc->max) > 1)
+ kfree(scontrol->volume_table);
+out_free:
+ kfree(scontrol->control_data);
+out:
+ return ret;
+}
+
+static int sof_control_load_enum(struct snd_soc_component *scomp,
+ struct snd_sof_control *scontrol,
+ struct snd_kcontrol_new *kc,
+ struct snd_soc_tplg_ctl_hdr *hdr)
+{
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ struct snd_soc_tplg_enum_control *ec =
+ container_of(hdr, struct snd_soc_tplg_enum_control, hdr);
+
+ /* validate topology data */
+ if (le32_to_cpu(ec->num_channels) > SND_SOC_TPLG_MAX_CHAN)
+ return -EINVAL;
+
+ /* init the enum get/put data */
+ scontrol->size = struct_size(scontrol->control_data, chanv,
+ le32_to_cpu(ec->num_channels));
+ scontrol->control_data = kzalloc(scontrol->size, GFP_KERNEL);
+ if (!scontrol->control_data)
+ return -ENOMEM;
+
+ scontrol->comp_id = sdev->next_comp_id;
+ scontrol->num_channels = le32_to_cpu(ec->num_channels);
+
+ scontrol->cmd = SOF_CTRL_CMD_ENUM;
+
+ dev_dbg(sdev->dev, "tplg: load kcontrol index %d chans %d comp_id %d\n",
+ scontrol->comp_id, scontrol->num_channels, scontrol->comp_id);
+
+ return 0;
+}
+
+static int sof_control_load_bytes(struct snd_soc_component *scomp,
+ struct snd_sof_control *scontrol,
+ struct snd_kcontrol_new *kc,
+ struct snd_soc_tplg_ctl_hdr *hdr)
+{
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ struct sof_ipc_ctrl_data *cdata;
+ struct snd_soc_tplg_bytes_control *control =
+ container_of(hdr, struct snd_soc_tplg_bytes_control, hdr);
+ struct soc_bytes_ext *sbe = (struct soc_bytes_ext *)kc->private_value;
+ int max_size = sbe->max;
+ int ret = 0;
+
+ /* init the get/put bytes data */
+ scontrol->size = sizeof(struct sof_ipc_ctrl_data) +
+ le32_to_cpu(control->priv.size);
+
+ if (scontrol->size > max_size) {
+ dev_err(sdev->dev, "err: bytes data size %d exceeds max %d.\n",
+ scontrol->size, max_size);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ scontrol->control_data = kzalloc(max_size, GFP_KERNEL);
+ cdata = scontrol->control_data;
+ if (!scontrol->control_data) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ scontrol->comp_id = sdev->next_comp_id;
+ scontrol->cmd = SOF_CTRL_CMD_BINARY;
+
+ dev_dbg(sdev->dev, "tplg: load kcontrol index %d chans %d\n",
+ scontrol->comp_id, scontrol->num_channels);
+
+ if (le32_to_cpu(control->priv.size) > 0) {
+ memcpy(cdata->data, control->priv.data,
+ le32_to_cpu(control->priv.size));
+
+ if (cdata->data->magic != SOF_ABI_MAGIC) {
+ dev_err(sdev->dev, "error: Wrong ABI magic 0x%08x.\n",
+ cdata->data->magic);
+ ret = -EINVAL;
+ goto out_free;
+ }
+ if (SOF_ABI_VERSION_INCOMPATIBLE(SOF_ABI_VERSION,
+ cdata->data->abi)) {
+ dev_err(sdev->dev,
+ "error: Incompatible ABI version 0x%08x.\n",
+ cdata->data->abi);
+ ret = -EINVAL;
+ goto out_free;
+ }
+ if (cdata->data->size + sizeof(const struct sof_abi_hdr) !=
+ le32_to_cpu(control->priv.size)) {
+ dev_err(sdev->dev,
+ "error: Conflict in bytes vs. priv size.\n");
+ ret = -EINVAL;
+ goto out_free;
+ }
+ }
+
+ return ret;
+
+out_free:
+ kfree(scontrol->control_data);
+out:
+ return ret;
+}
+
/* external kcontrol init - used for any driver specific init */
static int sof_control_load(struct snd_soc_component *scomp, int index,
struct snd_kcontrol_new *kc,
@@ -1095,6 +1176,11 @@ static int sof_control_load(struct snd_soc_component *scomp, int index,
return 0;
}
+ if (ret < 0) {
+ kfree(scontrol);
+ return ret;
+ }
+
dobj->private = scontrol;
list_add(&scontrol->list, &sdev->kcontrol_list);
return ret;
@@ -1581,7 +1667,7 @@ static int sof_widget_load_pga(struct snd_soc_component *scomp, int index,
if (!volume)
return -ENOMEM;
- if (le32_to_cpu(tw->num_kcontrols) != 1) {
+ if (!le32_to_cpu(tw->num_kcontrols)) {
dev_err(sdev->dev, "error: invalid kcontrol count %d for volume\n",
tw->num_kcontrols);
ret = -EINVAL;
@@ -1618,7 +1704,8 @@ static int sof_widget_load_pga(struct snd_soc_component *scomp, int index,
swidget->private = volume;
list_for_each_entry(scontrol, &sdev->kcontrol_list, list) {
- if (scontrol->comp_id == swidget->comp_id) {
+ if (scontrol->comp_id == swidget->comp_id &&
+ scontrol->volume_table) {
min_step = scontrol->min_volume_step;
max_step = scontrol->max_volume_step;
volume->min_value = scontrol->volume_table[min_step];
@@ -2272,6 +2359,7 @@ static int sof_dai_load(struct snd_soc_component *scomp, int index,
{
struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
struct snd_soc_tplg_stream_caps *caps;
+ struct snd_soc_tplg_private *private = &pcm->priv;
struct snd_sof_pcm *spcm;
int stream = SNDRV_PCM_STREAM_PLAYBACK;
int ret = 0;
@@ -2288,17 +2376,28 @@ static int sof_dai_load(struct snd_soc_component *scomp, int index,
spcm->stream[SNDRV_PCM_STREAM_PLAYBACK].comp_id = COMP_ID_UNASSIGNED;
spcm->stream[SNDRV_PCM_STREAM_CAPTURE].comp_id = COMP_ID_UNASSIGNED;
- if (pcm) {
- spcm->pcm = *pcm;
- dev_dbg(sdev->dev, "tplg: load pcm %s\n", pcm->dai_name);
- }
+ spcm->pcm = *pcm;
+ dev_dbg(sdev->dev, "tplg: load pcm %s\n", pcm->dai_name);
+
dai_drv->dobj.private = spcm;
list_add(&spcm->list, &sdev->pcm_list);
+ ret = sof_parse_tokens(scomp, spcm, stream_tokens,
+ ARRAY_SIZE(stream_tokens), private->array,
+ le32_to_cpu(private->size));
+ if (ret) {
+ dev_err(sdev->dev, "error: parse stream tokens failed %d\n",
+ le32_to_cpu(private->size));
+ return ret;
+ }
+
/* do we need to allocate playback PCM DMA pages */
if (!spcm->pcm.playback)
goto capture;
+ dev_vdbg(sdev->dev, "tplg: pcm %s stream tokens: playback d0i3:%d\n",
+ spcm->pcm.pcm_name, spcm->stream[0].d0i3_compatible);
+
caps = &spcm->pcm.caps[stream];
/* allocate playback page table buffer */
@@ -2326,6 +2425,9 @@ capture:
if (!spcm->pcm.capture)
return ret;
+ dev_vdbg(sdev->dev, "tplg: pcm %s stream tokens: capture d0i3:%d\n",
+ spcm->pcm.pcm_name, spcm->stream[1].d0i3_compatible);
+
caps = &spcm->pcm.caps[stream];
/* allocate capture page table buffer */
@@ -2536,8 +2638,66 @@ static int sof_link_esai_load(struct snd_soc_component *scomp, int index,
struct snd_soc_tplg_hw_config *hw_config,
struct sof_ipc_dai_config *config)
{
- /*TODO: Add implementation */
- return 0;
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
+ struct snd_soc_tplg_private *private = &cfg->priv;
+ struct sof_ipc_reply reply;
+ u32 size = sizeof(*config);
+ int ret;
+
+ /* handle master/slave and inverted clocks */
+ sof_dai_set_format(hw_config, config);
+
+ /* init IPC */
+ memset(&config->esai, 0, sizeof(struct sof_ipc_dai_esai_params));
+ config->hdr.size = size;
+
+ ret = sof_parse_tokens(scomp, &config->esai, esai_tokens,
+ ARRAY_SIZE(esai_tokens), private->array,
+ le32_to_cpu(private->size));
+ if (ret != 0) {
+ dev_err(sdev->dev, "error: parse esai tokens failed %d\n",
+ le32_to_cpu(private->size));
+ return ret;
+ }
+
+ config->esai.mclk_rate = le32_to_cpu(hw_config->mclk_rate);
+ config->esai.bclk_rate = le32_to_cpu(hw_config->bclk_rate);
+ config->esai.fsync_rate = le32_to_cpu(hw_config->fsync_rate);
+ config->esai.mclk_direction = hw_config->mclk_direction;
+ config->esai.tdm_slots = le32_to_cpu(hw_config->tdm_slots);
+ config->esai.tdm_slot_width = le32_to_cpu(hw_config->tdm_slot_width);
+ config->esai.rx_slots = le32_to_cpu(hw_config->rx_slots);
+ config->esai.tx_slots = le32_to_cpu(hw_config->tx_slots);
+
+ dev_info(sdev->dev,
+ "tplg: config ESAI%d fmt 0x%x mclk %d width %d slots %d mclk id %d\n",
+ config->dai_index, config->format,
+ config->esai.mclk_rate, config->esai.tdm_slot_width,
+ config->esai.tdm_slots, config->esai.mclk_id);
+
+ if (config->esai.tdm_slots < 1 || config->esai.tdm_slots > 8) {
+ dev_err(sdev->dev, "error: invalid channel count for ESAI%d\n",
+ config->dai_index);
+ return -EINVAL;
+ }
+
+ /* send message to DSP */
+ ret = sof_ipc_tx_message(sdev->ipc,
+ config->hdr.cmd, config, size, &reply,
+ sizeof(reply));
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: failed to set DAI config for ESAI%d\n",
+ config->dai_index);
+ return ret;
+ }
+
+ /* set config for all DAI's with name matching the link name */
+ ret = sof_set_dai_config(sdev, size, link, config);
+ if (ret < 0)
+ dev_err(sdev->dev, "error: failed to save DAI config for ESAI%d\n",
+ config->dai_index);
+
+ return ret;
}
static int sof_link_dmic_load(struct snd_soc_component *scomp, int index,
@@ -2828,6 +2988,10 @@ static int sof_link_load(struct snd_soc_component *scomp, int index,
if (!link->no_pcm) {
link->nonatomic = true;
+ /* set trigger order */
+ link->trigger[0] = SND_SOC_DPCM_TRIGGER_POST;
+ link->trigger[1] = SND_SOC_DPCM_TRIGGER_POST;
+
/* nothing more to do for FE dai links */
return 0;
}
diff --git a/sound/soc/sof/trace.c b/sound/soc/sof/trace.c
index 4c3cff031fd6..b0e4556c8536 100644
--- a/sound/soc/sof/trace.c
+++ b/sound/soc/sof/trace.c
@@ -162,6 +162,9 @@ int snd_sof_init_trace_ipc(struct snd_sof_dev *sdev)
struct sof_ipc_reply ipc_reply;
int ret;
+ if (!sdev->dtrace_is_supported)
+ return 0;
+
if (sdev->dtrace_is_enabled || !sdev->dma_trace_pages)
return -EINVAL;
@@ -222,6 +225,9 @@ int snd_sof_init_trace(struct snd_sof_dev *sdev)
{
int ret;
+ if (!sdev->dtrace_is_supported)
+ return 0;
+
/* set false before start initialization */
sdev->dtrace_is_enabled = false;
@@ -277,6 +283,9 @@ EXPORT_SYMBOL(snd_sof_init_trace);
int snd_sof_trace_update_pos(struct snd_sof_dev *sdev,
struct sof_ipc_dma_trace_posn *posn)
{
+ if (!sdev->dtrace_is_supported)
+ return 0;
+
if (sdev->dtrace_is_enabled && sdev->host_offset != posn->host_offset) {
sdev->host_offset = posn->host_offset;
wake_up(&sdev->trace_sleep);
@@ -293,6 +302,9 @@ int snd_sof_trace_update_pos(struct snd_sof_dev *sdev,
/* an error has occurred within the DSP that prevents further trace */
void snd_sof_trace_notify_for_error(struct snd_sof_dev *sdev)
{
+ if (!sdev->dtrace_is_supported)
+ return;
+
if (sdev->dtrace_is_enabled) {
dev_err(sdev->dev, "error: waking up any trace sleepers\n");
sdev->dtrace_error = true;
@@ -305,7 +317,7 @@ void snd_sof_release_trace(struct snd_sof_dev *sdev)
{
int ret;
- if (!sdev->dtrace_is_enabled)
+ if (!sdev->dtrace_is_supported || !sdev->dtrace_is_enabled)
return;
ret = snd_sof_dma_trace_trigger(sdev, SNDRV_PCM_TRIGGER_STOP);
@@ -326,6 +338,9 @@ EXPORT_SYMBOL(snd_sof_release_trace);
void snd_sof_free_trace(struct snd_sof_dev *sdev)
{
+ if (!sdev->dtrace_is_supported)
+ return;
+
snd_sof_release_trace(sdev);
snd_dma_free_pages(&sdev->dmatb);
diff --git a/sound/soc/sprd/sprd-pcm-dma.c b/sound/soc/sprd/sprd-pcm-dma.c
index d38ebbbbf169..da4b8f5f192b 100644
--- a/sound/soc/sprd/sprd-pcm-dma.c
+++ b/sound/soc/sprd/sprd-pcm-dma.c
@@ -46,12 +46,10 @@ static const struct snd_pcm_hardware sprd_pcm_hardware = {
.buffer_bytes_max = 64 * 1024,
};
-static int sprd_pcm_open(struct snd_pcm_substream *substream)
+static int sprd_pcm_open(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_component *component =
- snd_soc_rtdcom_lookup(rtd, DRV_NAME);
struct device *dev = component->dev;
struct sprd_pcm_dma_private *dma_private;
int hw_chan = SPRD_PCM_CHANNEL_MAX;
@@ -111,13 +109,11 @@ error:
return ret;
}
-static int sprd_pcm_close(struct snd_pcm_substream *substream)
+static int sprd_pcm_close(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct sprd_pcm_dma_private *dma_private = runtime->private_data;
- struct snd_soc_component *component =
- snd_soc_rtdcom_lookup(rtd, DRV_NAME);
struct device *dev = component->dev;
int size = runtime->hw.periods_max * SPRD_PCM_DMA_LINKLIST_SIZE;
int i;
@@ -157,14 +153,12 @@ static void sprd_pcm_release_dma_channel(struct snd_pcm_substream *substream)
}
}
-static int sprd_pcm_request_dma_channel(struct snd_pcm_substream *substream,
+static int sprd_pcm_request_dma_channel(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
int channels)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct sprd_pcm_dma_private *dma_private = runtime->private_data;
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_component *component =
- snd_soc_rtdcom_lookup(rtd, DRV_NAME);
struct device *dev = component->dev;
struct sprd_pcm_dma_params *dma_params = dma_private->params;
int i;
@@ -190,14 +184,13 @@ static int sprd_pcm_request_dma_channel(struct snd_pcm_substream *substream,
return 0;
}
-static int sprd_pcm_hw_params(struct snd_pcm_substream *substream,
+static int sprd_pcm_hw_params(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct sprd_pcm_dma_private *dma_private = runtime->private_data;
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_component *component =
- snd_soc_rtdcom_lookup(rtd, DRV_NAME);
struct sprd_pcm_dma_params *dma_params;
size_t totsize = params_buffer_bytes(params);
size_t period = params_period_bytes(params);
@@ -218,7 +211,8 @@ static int sprd_pcm_hw_params(struct snd_pcm_substream *substream,
if (!dma_private->params) {
dma_private->params = dma_params;
- ret = sprd_pcm_request_dma_channel(substream, channels);
+ ret = sprd_pcm_request_dma_channel(component,
+ substream, channels);
if (ret)
return ret;
}
@@ -313,7 +307,8 @@ sg_err:
return ret;
}
-static int sprd_pcm_hw_free(struct snd_pcm_substream *substream)
+static int sprd_pcm_hw_free(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
snd_pcm_set_runtime_buffer(substream, NULL);
sprd_pcm_release_dma_channel(substream);
@@ -321,13 +316,11 @@ static int sprd_pcm_hw_free(struct snd_pcm_substream *substream)
return 0;
}
-static int sprd_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
+static int sprd_pcm_trigger(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream, int cmd)
{
struct sprd_pcm_dma_private *dma_private =
substream->runtime->private_data;
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_component *component =
- snd_soc_rtdcom_lookup(rtd, DRV_NAME);
int ret = 0, i;
switch (cmd) {
@@ -387,13 +380,11 @@ static int sprd_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
return ret;
}
-static snd_pcm_uframes_t sprd_pcm_pointer(struct snd_pcm_substream *substream)
+static snd_pcm_uframes_t sprd_pcm_pointer(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct sprd_pcm_dma_private *dma_private = runtime->private_data;
- struct snd_soc_component *component =
- snd_soc_rtdcom_lookup(rtd, DRV_NAME);
int pointer[SPRD_PCM_CHANNEL_MAX];
int bytes_of_pointer = 0, sel_max = 0, i;
snd_pcm_uframes_t x;
@@ -444,7 +435,8 @@ static snd_pcm_uframes_t sprd_pcm_pointer(struct snd_pcm_substream *substream)
return x;
}
-static int sprd_pcm_mmap(struct snd_pcm_substream *substream,
+static int sprd_pcm_mmap(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
struct vm_area_struct *vma)
{
struct snd_pcm_runtime *runtime = substream->runtime;
@@ -456,18 +448,8 @@ static int sprd_pcm_mmap(struct snd_pcm_substream *substream,
vma->vm_page_prot);
}
-static struct snd_pcm_ops sprd_pcm_ops = {
- .open = sprd_pcm_open,
- .close = sprd_pcm_close,
- .ioctl = snd_pcm_lib_ioctl,
- .hw_params = sprd_pcm_hw_params,
- .hw_free = sprd_pcm_hw_free,
- .trigger = sprd_pcm_trigger,
- .pointer = sprd_pcm_pointer,
- .mmap = sprd_pcm_mmap,
-};
-
-static int sprd_pcm_new(struct snd_soc_pcm_runtime *rtd)
+static int sprd_pcm_new(struct snd_soc_component *component,
+ struct snd_soc_pcm_runtime *rtd)
{
struct snd_card *card = rtd->card->snd_card;
struct snd_pcm *pcm = rtd->pcm;
@@ -506,7 +488,8 @@ static int sprd_pcm_new(struct snd_soc_pcm_runtime *rtd)
return 0;
}
-static void sprd_pcm_free(struct snd_pcm *pcm)
+static void sprd_pcm_free(struct snd_soc_component *component,
+ struct snd_pcm *pcm)
{
struct snd_pcm_substream *substream;
int i;
@@ -523,10 +506,17 @@ static void sprd_pcm_free(struct snd_pcm *pcm)
static const struct snd_soc_component_driver sprd_soc_component = {
.name = DRV_NAME,
- .ops = &sprd_pcm_ops,
+ .open = sprd_pcm_open,
+ .close = sprd_pcm_close,
+ .ioctl = snd_soc_pcm_lib_ioctl,
+ .hw_params = sprd_pcm_hw_params,
+ .hw_free = sprd_pcm_hw_free,
+ .trigger = sprd_pcm_trigger,
+ .pointer = sprd_pcm_pointer,
+ .mmap = sprd_pcm_mmap,
+ .pcm_construct = sprd_pcm_new,
+ .pcm_destruct = sprd_pcm_free,
.compr_ops = &sprd_platform_compr_ops,
- .pcm_new = sprd_pcm_new,
- .pcm_free = sprd_pcm_free,
};
static int sprd_soc_platform_probe(struct platform_device *pdev)
diff --git a/sound/soc/stm/stm32_adfsdm.c b/sound/soc/stm/stm32_adfsdm.c
index 3c9a9deec9af..81c407da15c5 100644
--- a/sound/soc/stm/stm32_adfsdm.c
+++ b/sound/soc/stm/stm32_adfsdm.c
@@ -210,7 +210,8 @@ static int stm32_afsdm_pcm_cb(const void *data, size_t size, void *private)
return 0;
}
-static int stm32_adfsdm_trigger(struct snd_pcm_substream *substream, int cmd)
+static int stm32_adfsdm_trigger(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream, int cmd)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct stm32_adfsdm_priv *priv =
@@ -230,7 +231,8 @@ static int stm32_adfsdm_trigger(struct snd_pcm_substream *substream, int cmd)
return -EINVAL;
}
-static int stm32_adfsdm_pcm_open(struct snd_pcm_substream *substream)
+static int stm32_adfsdm_pcm_open(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct stm32_adfsdm_priv *priv = snd_soc_dai_get_drvdata(rtd->cpu_dai);
@@ -243,7 +245,8 @@ static int stm32_adfsdm_pcm_open(struct snd_pcm_substream *substream)
return ret;
}
-static int stm32_adfsdm_pcm_close(struct snd_pcm_substream *substream)
+static int stm32_adfsdm_pcm_close(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct stm32_adfsdm_priv *priv =
@@ -256,6 +259,7 @@ static int stm32_adfsdm_pcm_close(struct snd_pcm_substream *substream)
}
static snd_pcm_uframes_t stm32_adfsdm_pcm_pointer(
+ struct snd_soc_component *component,
struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
@@ -265,7 +269,8 @@ static snd_pcm_uframes_t stm32_adfsdm_pcm_pointer(
return bytes_to_frames(substream->runtime, priv->pos);
}
-static int stm32_adfsdm_pcm_hw_params(struct snd_pcm_substream *substream,
+static int stm32_adfsdm_pcm_hw_params(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
@@ -282,23 +287,16 @@ static int stm32_adfsdm_pcm_hw_params(struct snd_pcm_substream *substream,
params_period_size(params));
}
-static int stm32_adfsdm_pcm_hw_free(struct snd_pcm_substream *substream)
+static int stm32_adfsdm_pcm_hw_free(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
snd_pcm_lib_free_pages(substream);
return 0;
}
-static struct snd_pcm_ops stm32_adfsdm_pcm_ops = {
- .open = stm32_adfsdm_pcm_open,
- .close = stm32_adfsdm_pcm_close,
- .hw_params = stm32_adfsdm_pcm_hw_params,
- .hw_free = stm32_adfsdm_pcm_hw_free,
- .trigger = stm32_adfsdm_trigger,
- .pointer = stm32_adfsdm_pcm_pointer,
-};
-
-static int stm32_adfsdm_pcm_new(struct snd_soc_pcm_runtime *rtd)
+static int stm32_adfsdm_pcm_new(struct snd_soc_component *component,
+ struct snd_soc_pcm_runtime *rtd)
{
struct snd_pcm *pcm = rtd->pcm;
struct stm32_adfsdm_priv *priv =
@@ -310,7 +308,8 @@ static int stm32_adfsdm_pcm_new(struct snd_soc_pcm_runtime *rtd)
return 0;
}
-static void stm32_adfsdm_pcm_free(struct snd_pcm *pcm)
+static void stm32_adfsdm_pcm_free(struct snd_soc_component *component,
+ struct snd_pcm *pcm)
{
struct snd_pcm_substream *substream;
@@ -320,9 +319,14 @@ static void stm32_adfsdm_pcm_free(struct snd_pcm *pcm)
}
static struct snd_soc_component_driver stm32_adfsdm_soc_platform = {
- .ops = &stm32_adfsdm_pcm_ops,
- .pcm_new = stm32_adfsdm_pcm_new,
- .pcm_free = stm32_adfsdm_pcm_free,
+ .open = stm32_adfsdm_pcm_open,
+ .close = stm32_adfsdm_pcm_close,
+ .hw_params = stm32_adfsdm_pcm_hw_params,
+ .hw_free = stm32_adfsdm_pcm_hw_free,
+ .trigger = stm32_adfsdm_trigger,
+ .pointer = stm32_adfsdm_pcm_pointer,
+ .pcm_construct = stm32_adfsdm_pcm_new,
+ .pcm_destruct = stm32_adfsdm_pcm_free,
};
static const struct of_device_id stm32_adfsdm_of_match[] = {
diff --git a/sound/soc/stm/stm32_sai.c b/sound/soc/stm/stm32_sai.c
index ef4273361d0d..e20267504b16 100644
--- a/sound/soc/stm/stm32_sai.c
+++ b/sound/soc/stm/stm32_sai.c
@@ -100,7 +100,7 @@ static int stm32_sai_sync_conf_provider(struct stm32_sai_data *sai, int synco)
dev_err(&sai->pdev->dev, "%pOFn%s already set as sync provider\n",
sai->pdev->dev.of_node,
prev_synco == STM_SAI_SYNC_OUT_A ? "A" : "B");
- stm32_sai_pclk_disable(&sai->pdev->dev);
+ stm32_sai_pclk_disable(&sai->pdev->dev);
return -EINVAL;
}
diff --git a/sound/soc/stm/stm32_spdifrx.c b/sound/soc/stm/stm32_spdifrx.c
index cd4b235fce57..3fd28ee01675 100644
--- a/sound/soc/stm/stm32_spdifrx.c
+++ b/sound/soc/stm/stm32_spdifrx.c
@@ -351,6 +351,8 @@ static int stm32_spdifrx_start_sync(struct stm32_spdifrx_data *spdifrx)
SPDIFRX_CR_CUMSK | SPDIFRX_CR_PTMSK | SPDIFRX_CR_RXSTEO;
cr_mask = cr;
+ cr |= SPDIFRX_CR_NBTRSET(SPDIFRX_NBTR_63);
+ cr_mask |= SPDIFRX_CR_NBTR_MASK;
cr |= SPDIFRX_CR_SPDIFENSET(SPDIFRX_SPDIFEN_SYNC);
cr_mask |= SPDIFRX_CR_SPDIFEN_MASK;
ret = regmap_update_bits(spdifrx->regmap, STM32_SPDIFRX_CR,
@@ -666,7 +668,7 @@ static irqreturn_t stm32_spdifrx_isr(int irq, void *devid)
struct snd_pcm_substream *substream = spdifrx->substream;
struct platform_device *pdev = spdifrx->pdev;
unsigned int cr, mask, sr, imr;
- unsigned int flags;
+ unsigned int flags, sync_state;
int err = 0, err_xrun = 0;
regmap_read(spdifrx->regmap, STM32_SPDIFRX_SR, &sr);
@@ -726,11 +728,23 @@ static irqreturn_t stm32_spdifrx_isr(int irq, void *devid)
}
if (err) {
- /* SPDIFRX in STATE_STOP. Disable SPDIFRX to clear errors */
+ regmap_read(spdifrx->regmap, STM32_SPDIFRX_CR, &cr);
+ sync_state = FIELD_GET(SPDIFRX_CR_SPDIFEN_MASK, cr) &&
+ SPDIFRX_SPDIFEN_SYNC;
+
+ /* SPDIFRX is in STATE_STOP. Disable SPDIFRX to clear errors */
cr = SPDIFRX_CR_SPDIFENSET(SPDIFRX_SPDIFEN_DISABLE);
regmap_update_bits(spdifrx->regmap, STM32_SPDIFRX_CR,
SPDIFRX_CR_SPDIFEN_MASK, cr);
+ /* If SPDIFRX was in STATE_SYNC, retry synchro */
+ if (sync_state) {
+ cr = SPDIFRX_CR_SPDIFENSET(SPDIFRX_SPDIFEN_SYNC);
+ regmap_update_bits(spdifrx->regmap, STM32_SPDIFRX_CR,
+ SPDIFRX_CR_SPDIFEN_MASK, cr);
+ return IRQ_HANDLED;
+ }
+
if (substream)
snd_pcm_stop(substream, SNDRV_PCM_STATE_DISCONNECTED);
diff --git a/sound/soc/sunxi/sun4i-codec.c b/sound/soc/sunxi/sun4i-codec.c
index ee448d5e07a6..34f3e0be3058 100644
--- a/sound/soc/sunxi/sun4i-codec.c
+++ b/sound/soc/sunxi/sun4i-codec.c
@@ -1442,7 +1442,7 @@ static struct snd_soc_card *sun8i_a23_codec_create_card(struct device *dev)
if (!aux_dev.dlc.of_node) {
dev_err(dev, "Can't find analog controls for codec.\n");
return ERR_PTR(-EINVAL);
- };
+ }
card->dai_link = sun4i_codec_create_link(dev, &card->num_links);
if (!card->dai_link)
@@ -1480,7 +1480,7 @@ static struct snd_soc_card *sun8i_h3_codec_create_card(struct device *dev)
if (!aux_dev.dlc.of_node) {
dev_err(dev, "Can't find analog controls for codec.\n");
return ERR_PTR(-EINVAL);
- };
+ }
card->dai_link = sun4i_codec_create_link(dev, &card->num_links);
if (!card->dai_link)
@@ -1518,7 +1518,7 @@ static struct snd_soc_card *sun8i_v3s_codec_create_card(struct device *dev)
if (!aux_dev.dlc.of_node) {
dev_err(dev, "Can't find analog controls for codec.\n");
return ERR_PTR(-EINVAL);
- };
+ }
card->dai_link = sun4i_codec_create_link(dev, &card->num_links);
if (!card->dai_link)
diff --git a/sound/soc/tegra/tegra30_i2s.c b/sound/soc/tegra/tegra30_i2s.c
index e6d548fa980b..dbed3c5408e7 100644
--- a/sound/soc/tegra/tegra30_i2s.c
+++ b/sound/soc/tegra/tegra30_i2s.c
@@ -127,7 +127,7 @@ static int tegra30_i2s_hw_params(struct snd_pcm_substream *substream,
struct device *dev = dai->dev;
struct tegra30_i2s *i2s = snd_soc_dai_get_drvdata(dai);
unsigned int mask, val, reg;
- int ret, sample_size, srate, i2sclock, bitcnt;
+ int ret, sample_size, srate, i2sclock, bitcnt, audio_bits;
struct tegra30_ahub_cif_conf cif_conf;
if (params_channels(params) != 2)
@@ -137,8 +137,19 @@ static int tegra30_i2s_hw_params(struct snd_pcm_substream *substream,
switch (params_format(params)) {
case SNDRV_PCM_FORMAT_S16_LE:
val = TEGRA30_I2S_CTRL_BIT_SIZE_16;
+ audio_bits = TEGRA30_AUDIOCIF_BITS_16;
sample_size = 16;
break;
+ case SNDRV_PCM_FORMAT_S24_LE:
+ val = TEGRA30_I2S_CTRL_BIT_SIZE_24;
+ audio_bits = TEGRA30_AUDIOCIF_BITS_24;
+ sample_size = 24;
+ break;
+ case SNDRV_PCM_FORMAT_S32_LE:
+ val = TEGRA30_I2S_CTRL_BIT_SIZE_32;
+ audio_bits = TEGRA30_AUDIOCIF_BITS_32;
+ sample_size = 32;
+ break;
default:
return -EINVAL;
}
@@ -170,8 +181,8 @@ static int tegra30_i2s_hw_params(struct snd_pcm_substream *substream,
cif_conf.threshold = 0;
cif_conf.audio_channels = 2;
cif_conf.client_channels = 2;
- cif_conf.audio_bits = TEGRA30_AUDIOCIF_BITS_16;
- cif_conf.client_bits = TEGRA30_AUDIOCIF_BITS_16;
+ cif_conf.audio_bits = audio_bits;
+ cif_conf.client_bits = audio_bits;
cif_conf.expand = 0;
cif_conf.stereo_conv = 0;
cif_conf.replicate = 0;
@@ -220,9 +231,9 @@ static void tegra30_i2s_start_capture(struct tegra30_i2s *i2s)
static void tegra30_i2s_stop_capture(struct tegra30_i2s *i2s)
{
- tegra30_ahub_disable_rx_fifo(i2s->capture_fifo_cif);
regmap_update_bits(i2s->regmap, TEGRA30_I2S_CTRL,
TEGRA30_I2S_CTRL_XFER_EN_RX, 0);
+ tegra30_ahub_disable_rx_fifo(i2s->capture_fifo_cif);
}
static int tegra30_i2s_trigger(struct snd_pcm_substream *substream, int cmd,
@@ -254,6 +265,34 @@ static int tegra30_i2s_trigger(struct snd_pcm_substream *substream, int cmd,
return 0;
}
+static int tegra30_i2s_set_tdm(struct snd_soc_dai *dai,
+ unsigned int tx_mask, unsigned int rx_mask,
+ int slots, int slot_width)
+{
+ struct tegra30_i2s *i2s = snd_soc_dai_get_drvdata(dai);
+ unsigned int mask, val;
+
+ dev_dbg(dai->dev, "%s: txmask=0x%08x rxmask=0x%08x slots=%d width=%d\n",
+ __func__, tx_mask, rx_mask, slots, slot_width);
+
+ mask = TEGRA30_I2S_SLOT_CTRL_TOTAL_SLOTS_MASK |
+ TEGRA30_I2S_SLOT_CTRL_RX_SLOT_ENABLES_MASK |
+ TEGRA30_I2S_SLOT_CTRL_TX_SLOT_ENABLES_MASK;
+
+ val = (tx_mask << TEGRA30_I2S_SLOT_CTRL_TX_SLOT_ENABLES_SHIFT) |
+ (rx_mask << TEGRA30_I2S_SLOT_CTRL_RX_SLOT_ENABLES_SHIFT) |
+ ((slots - 1) << TEGRA30_I2S_SLOT_CTRL_TOTAL_SLOTS_SHIFT);
+
+ pm_runtime_get_sync(dai->dev);
+ regmap_update_bits(i2s->regmap, TEGRA30_I2S_SLOT_CTRL, mask, val);
+ /* set the fsync width to minimum of 1 clock width */
+ regmap_update_bits(i2s->regmap, TEGRA30_I2S_CH_CTRL,
+ TEGRA30_I2S_CH_CTRL_FSYNC_WIDTH_MASK, 0x0);
+ pm_runtime_put(dai->dev);
+
+ return 0;
+}
+
static int tegra30_i2s_probe(struct snd_soc_dai *dai)
{
struct tegra30_i2s *i2s = snd_soc_dai_get_drvdata(dai);
@@ -268,6 +307,7 @@ static const struct snd_soc_dai_ops tegra30_i2s_dai_ops = {
.set_fmt = tegra30_i2s_set_fmt,
.hw_params = tegra30_i2s_hw_params,
.trigger = tegra30_i2s_trigger,
+ .set_tdm_slot = tegra30_i2s_set_tdm,
};
static const struct snd_soc_dai_driver tegra30_i2s_dai_template = {
@@ -277,14 +317,18 @@ static const struct snd_soc_dai_driver tegra30_i2s_dai_template = {
.channels_min = 2,
.channels_max = 2,
.rates = SNDRV_PCM_RATE_8000_96000,
- .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .formats = SNDRV_PCM_FMTBIT_S32_LE |
+ SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S16_LE,
},
.capture = {
.stream_name = "Capture",
.channels_min = 2,
.channels_max = 2,
.rates = SNDRV_PCM_RATE_8000_96000,
- .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .formats = SNDRV_PCM_FMTBIT_S32_LE |
+ SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S16_LE,
},
.ops = &tegra30_i2s_dai_ops,
.symmetric_rates = 1,
diff --git a/sound/soc/ti/davinci-mcasp.c b/sound/soc/ti/davinci-mcasp.c
index 7aa3c32e4a49..8e5371801d88 100644
--- a/sound/soc/ti/davinci-mcasp.c
+++ b/sound/soc/ti/davinci-mcasp.c
@@ -1867,7 +1867,7 @@ static int davinci_mcasp_get_dma_type(struct davinci_mcasp *mcasp)
return PCM_EDMA;
tmp = mcasp->dma_data[SNDRV_PCM_STREAM_PLAYBACK].filter_data;
- chan = dma_request_slave_channel_reason(mcasp->dev, tmp);
+ chan = dma_request_chan(mcasp->dev, tmp);
if (IS_ERR(chan)) {
if (PTR_ERR(chan) != -EPROBE_DEFER)
dev_err(mcasp->dev,
diff --git a/sound/soc/txx9/txx9aclc.c b/sound/soc/txx9/txx9aclc.c
index 66044559f70f..33c78d33e5a1 100644
--- a/sound/soc/txx9/txx9aclc.c
+++ b/sound/soc/txx9/txx9aclc.c
@@ -47,12 +47,12 @@ static const struct snd_pcm_hardware txx9aclc_pcm_hardware = {
.buffer_bytes_max = 32 * 1024,
};
-static int txx9aclc_pcm_hw_params(struct snd_pcm_substream *substream,
+static int txx9aclc_pcm_hw_params(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = snd_pcm_substream_chip(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
- struct snd_soc_component *component = snd_soc_rtdcom_lookup(rtd, DRV_NAME);
struct txx9aclc_dmadata *dmadata = runtime->private_data;
int ret;
@@ -76,12 +76,14 @@ static int txx9aclc_pcm_hw_params(struct snd_pcm_substream *substream,
return 0;
}
-static int txx9aclc_pcm_hw_free(struct snd_pcm_substream *substream)
+static int txx9aclc_pcm_hw_free(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
return snd_pcm_lib_free_pages(substream);
}
-static int txx9aclc_pcm_prepare(struct snd_pcm_substream *substream)
+static int txx9aclc_pcm_prepare(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct txx9aclc_dmadata *dmadata = runtime->private_data;
@@ -203,7 +205,8 @@ static void txx9aclc_dma_tasklet(unsigned long data)
spin_unlock_irqrestore(&dmadata->dma_lock, flags);
}
-static int txx9aclc_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
+static int txx9aclc_pcm_trigger(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream, int cmd)
{
struct txx9aclc_dmadata *dmadata = substream->runtime->private_data;
struct txx9aclc_plat_drvdata *drvdata = txx9aclc_drvdata;
@@ -236,14 +239,16 @@ static int txx9aclc_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
}
static snd_pcm_uframes_t
-txx9aclc_pcm_pointer(struct snd_pcm_substream *substream)
+txx9aclc_pcm_pointer(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct txx9aclc_dmadata *dmadata = substream->runtime->private_data;
return bytes_to_frames(substream->runtime, dmadata->pos);
}
-static int txx9aclc_pcm_open(struct snd_pcm_substream *substream)
+static int txx9aclc_pcm_open(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct txx9aclc_soc_device *dev = &txx9aclc_soc_device;
struct txx9aclc_dmadata *dmadata = &dev->dmadata[substream->stream];
@@ -261,7 +266,8 @@ static int txx9aclc_pcm_open(struct snd_pcm_substream *substream)
return 0;
}
-static int txx9aclc_pcm_close(struct snd_pcm_substream *substream)
+static int txx9aclc_pcm_close(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct txx9aclc_dmadata *dmadata = substream->runtime->private_data;
struct dma_chan *chan = dmadata->dma_chan;
@@ -271,23 +277,12 @@ static int txx9aclc_pcm_close(struct snd_pcm_substream *substream)
return 0;
}
-static const struct snd_pcm_ops txx9aclc_pcm_ops = {
- .open = txx9aclc_pcm_open,
- .close = txx9aclc_pcm_close,
- .ioctl = snd_pcm_lib_ioctl,
- .hw_params = txx9aclc_pcm_hw_params,
- .hw_free = txx9aclc_pcm_hw_free,
- .prepare = txx9aclc_pcm_prepare,
- .trigger = txx9aclc_pcm_trigger,
- .pointer = txx9aclc_pcm_pointer,
-};
-
-static int txx9aclc_pcm_new(struct snd_soc_pcm_runtime *rtd)
+static int txx9aclc_pcm_new(struct snd_soc_component *component,
+ struct snd_soc_pcm_runtime *rtd)
{
struct snd_card *card = rtd->card->snd_card;
struct snd_soc_dai *dai = rtd->cpu_dai;
struct snd_pcm *pcm = rtd->pcm;
- struct snd_soc_component *component = snd_soc_rtdcom_lookup(rtd, DRV_NAME);
struct platform_device *pdev = to_platform_device(component->dev);
struct txx9aclc_soc_device *dev;
struct resource *r;
@@ -409,8 +404,15 @@ static const struct snd_soc_component_driver txx9aclc_soc_component = {
.name = DRV_NAME,
.probe = txx9aclc_pcm_probe,
.remove = txx9aclc_pcm_remove,
- .ops = &txx9aclc_pcm_ops,
- .pcm_new = txx9aclc_pcm_new,
+ .open = txx9aclc_pcm_open,
+ .close = txx9aclc_pcm_close,
+ .ioctl = snd_soc_pcm_lib_ioctl,
+ .hw_params = txx9aclc_pcm_hw_params,
+ .hw_free = txx9aclc_pcm_hw_free,
+ .prepare = txx9aclc_pcm_prepare,
+ .trigger = txx9aclc_pcm_trigger,
+ .pointer = txx9aclc_pcm_pointer,
+ .pcm_construct = txx9aclc_pcm_new,
};
static int txx9aclc_soc_platform_probe(struct platform_device *pdev)
diff --git a/sound/soc/uniphier/aio-dma.c b/sound/soc/uniphier/aio-dma.c
index e8446cc4e8f8..700d936ed94e 100644
--- a/sound/soc/uniphier/aio-dma.c
+++ b/sound/soc/uniphier/aio-dma.c
@@ -93,7 +93,8 @@ static irqreturn_t aiodma_irq(int irq, void *p)
return ret;
}
-static int uniphier_aiodma_open(struct snd_pcm_substream *substream)
+static int uniphier_aiodma_open(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
@@ -103,7 +104,8 @@ static int uniphier_aiodma_open(struct snd_pcm_substream *substream)
SNDRV_PCM_HW_PARAM_BUFFER_BYTES, 256);
}
-static int uniphier_aiodma_hw_params(struct snd_pcm_substream *substream,
+static int uniphier_aiodma_hw_params(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);
@@ -112,7 +114,8 @@ static int uniphier_aiodma_hw_params(struct snd_pcm_substream *substream,
return 0;
}
-static int uniphier_aiodma_hw_free(struct snd_pcm_substream *substream)
+static int uniphier_aiodma_hw_free(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
snd_pcm_set_runtime_buffer(substream, NULL);
substream->runtime->dma_bytes = 0;
@@ -120,7 +123,8 @@ static int uniphier_aiodma_hw_free(struct snd_pcm_substream *substream)
return 0;
}
-static int uniphier_aiodma_prepare(struct snd_pcm_substream *substream)
+static int uniphier_aiodma_prepare(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_soc_pcm_runtime *rtd = snd_pcm_substream_chip(substream);
@@ -146,7 +150,8 @@ static int uniphier_aiodma_prepare(struct snd_pcm_substream *substream)
return 0;
}
-static int uniphier_aiodma_trigger(struct snd_pcm_substream *substream, int cmd)
+static int uniphier_aiodma_trigger(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream, int cmd)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_soc_pcm_runtime *rtd = snd_pcm_substream_chip(substream);
@@ -181,6 +186,7 @@ static int uniphier_aiodma_trigger(struct snd_pcm_substream *substream, int cmd)
}
static snd_pcm_uframes_t uniphier_aiodma_pointer(
+ struct snd_soc_component *component,
struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
@@ -204,7 +210,8 @@ static snd_pcm_uframes_t uniphier_aiodma_pointer(
return pos;
}
-static int uniphier_aiodma_mmap(struct snd_pcm_substream *substream,
+static int uniphier_aiodma_mmap(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
struct vm_area_struct *vma)
{
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
@@ -214,18 +221,8 @@ static int uniphier_aiodma_mmap(struct snd_pcm_substream *substream,
vma->vm_end - vma->vm_start, vma->vm_page_prot);
}
-static const struct snd_pcm_ops uniphier_aiodma_ops = {
- .open = uniphier_aiodma_open,
- .ioctl = snd_pcm_lib_ioctl,
- .hw_params = uniphier_aiodma_hw_params,
- .hw_free = uniphier_aiodma_hw_free,
- .prepare = uniphier_aiodma_prepare,
- .trigger = uniphier_aiodma_trigger,
- .pointer = uniphier_aiodma_pointer,
- .mmap = uniphier_aiodma_mmap,
-};
-
-static int uniphier_aiodma_new(struct snd_soc_pcm_runtime *rtd)
+static int uniphier_aiodma_new(struct snd_soc_component *component,
+ struct snd_soc_pcm_runtime *rtd)
{
struct device *dev = rtd->card->snd_card->dev;
struct snd_pcm *pcm = rtd->pcm;
@@ -242,16 +239,24 @@ static int uniphier_aiodma_new(struct snd_soc_pcm_runtime *rtd)
return 0;
}
-static void uniphier_aiodma_free(struct snd_pcm *pcm)
+static void uniphier_aiodma_free(struct snd_soc_component *component,
+ struct snd_pcm *pcm)
{
snd_pcm_lib_preallocate_free_for_all(pcm);
}
static const struct snd_soc_component_driver uniphier_soc_platform = {
- .pcm_new = uniphier_aiodma_new,
- .pcm_free = uniphier_aiodma_free,
- .ops = &uniphier_aiodma_ops,
- .compr_ops = &uniphier_aio_compr_ops,
+ .open = uniphier_aiodma_open,
+ .ioctl = snd_soc_pcm_lib_ioctl,
+ .hw_params = uniphier_aiodma_hw_params,
+ .hw_free = uniphier_aiodma_hw_free,
+ .prepare = uniphier_aiodma_prepare,
+ .trigger = uniphier_aiodma_trigger,
+ .pointer = uniphier_aiodma_pointer,
+ .mmap = uniphier_aiodma_mmap,
+ .pcm_construct = uniphier_aiodma_new,
+ .pcm_destruct = uniphier_aiodma_free,
+ .compr_ops = &uniphier_aio_compr_ops,
};
static const struct regmap_config aiodma_regmap_config = {
diff --git a/sound/soc/ux500/ux500_msp_i2s.c b/sound/soc/ux500/ux500_msp_i2s.c
index a90e0d7f0b73..394d8b2a4a16 100644
--- a/sound/soc/ux500/ux500_msp_i2s.c
+++ b/sound/soc/ux500/ux500_msp_i2s.c
@@ -533,7 +533,6 @@ static void disable_msp_tx(struct ux500_msp *msp)
static int disable_msp(struct ux500_msp *msp, unsigned int dir)
{
u32 reg_val_GCR;
- int status = 0;
unsigned int disable_tx, disable_rx;
reg_val_GCR = readl(msp->registers + MSP_GCR);
@@ -566,7 +565,7 @@ static int disable_msp(struct ux500_msp *msp, unsigned int dir)
else if (disable_rx)
disable_msp_rx(msp);
- return status;
+ return 0;
}
int ux500_msp_i2s_trigger(struct ux500_msp *msp, int cmd, int direction)
diff --git a/sound/soc/xilinx/Kconfig b/sound/soc/xilinx/Kconfig
index 69973179ef15..1d3586b68db7 100644
--- a/sound/soc/xilinx/Kconfig
+++ b/sound/soc/xilinx/Kconfig
@@ -9,15 +9,15 @@ config SND_SOC_XILINX_I2S
encapsulates PCM in AES format and sends AES data.
config SND_SOC_XILINX_AUDIO_FORMATTER
- tristate "Audio support for the the Xilinx audio formatter"
- help
- Select this option to enable Xilinx audio formatter
- support. This provides DMA platform device support for
- audio functionality.
+ tristate "Audio support for the the Xilinx audio formatter"
+ help
+ Select this option to enable Xilinx audio formatter
+ support. This provides DMA platform device support for
+ audio functionality.
config SND_SOC_XILINX_SPDIF
- tristate "Audio support for the the Xilinx SPDIF"
- help
- Select this option to enable Xilinx SPDIF Audio.
- This provides playback and capture of SPDIF audio in
- AES format.
+ tristate "Audio support for the the Xilinx SPDIF"
+ help
+ Select this option to enable Xilinx SPDIF Audio.
+ This provides playback and capture of SPDIF audio in
+ AES format.
diff --git a/sound/soc/xilinx/xlnx_formatter_pcm.c b/sound/soc/xilinx/xlnx_formatter_pcm.c
index 48970efe7838..296c4caf96a0 100644
--- a/sound/soc/xilinx/xlnx_formatter_pcm.c
+++ b/sound/soc/xilinx/xlnx_formatter_pcm.c
@@ -313,16 +313,14 @@ static irqreturn_t xlnx_s2mm_irq_handler(int irq, void *arg)
return IRQ_NONE;
}
-static int xlnx_formatter_pcm_open(struct snd_pcm_substream *substream)
+static int xlnx_formatter_pcm_open(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
int err;
u32 val, data_format_mode;
u32 ch_count_mask, ch_count_shift, data_xfer_mode, data_xfer_shift;
struct xlnx_pcm_stream_param *stream_data;
struct snd_pcm_runtime *runtime = substream->runtime;
- struct snd_soc_pcm_runtime *prtd = substream->private_data;
- struct snd_soc_component *component = snd_soc_rtdcom_lookup(prtd,
- DRV_NAME);
struct xlnx_pcm_drv_data *adata = dev_get_drvdata(component->dev);
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
@@ -387,14 +385,12 @@ static int xlnx_formatter_pcm_open(struct snd_pcm_substream *substream)
return 0;
}
-static int xlnx_formatter_pcm_close(struct snd_pcm_substream *substream)
+static int xlnx_formatter_pcm_close(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
int ret;
struct xlnx_pcm_stream_param *stream_data =
substream->runtime->private_data;
- struct snd_soc_pcm_runtime *prtd = substream->private_data;
- struct snd_soc_component *component = snd_soc_rtdcom_lookup(prtd,
- DRV_NAME);
ret = xlnx_formatter_pcm_reset(stream_data->mmio);
if (ret) {
@@ -409,7 +405,8 @@ err_reset:
}
static snd_pcm_uframes_t
-xlnx_formatter_pcm_pointer(struct snd_pcm_substream *substream)
+xlnx_formatter_pcm_pointer(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
u32 pos;
struct snd_pcm_runtime *runtime = substream->runtime;
@@ -423,16 +420,14 @@ xlnx_formatter_pcm_pointer(struct snd_pcm_substream *substream)
return bytes_to_frames(runtime, pos);
}
-static int xlnx_formatter_pcm_hw_params(struct snd_pcm_substream *substream,
+static int xlnx_formatter_pcm_hw_params(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
u32 low, high, active_ch, val, bytes_per_ch, bits_per_sample;
u32 aes_reg1_val, aes_reg2_val;
int status;
u64 size;
- struct snd_soc_pcm_runtime *prtd = substream->private_data;
- struct snd_soc_component *component = snd_soc_rtdcom_lookup(prtd,
- DRV_NAME);
struct snd_pcm_runtime *runtime = substream->runtime;
struct xlnx_pcm_stream_param *stream_data = runtime->private_data;
@@ -500,12 +495,14 @@ static int xlnx_formatter_pcm_hw_params(struct snd_pcm_substream *substream,
return 0;
}
-static int xlnx_formatter_pcm_hw_free(struct snd_pcm_substream *substream)
+static int xlnx_formatter_pcm_hw_free(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
return snd_pcm_lib_free_pages(substream);
}
-static int xlnx_formatter_pcm_trigger(struct snd_pcm_substream *substream,
+static int xlnx_formatter_pcm_trigger(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
int cmd)
{
u32 val;
@@ -532,10 +529,9 @@ static int xlnx_formatter_pcm_trigger(struct snd_pcm_substream *substream,
return 0;
}
-static int xlnx_formatter_pcm_new(struct snd_soc_pcm_runtime *rtd)
+static int xlnx_formatter_pcm_new(struct snd_soc_component *component,
+ struct snd_soc_pcm_runtime *rtd)
{
- struct snd_soc_component *component = snd_soc_rtdcom_lookup(rtd,
- DRV_NAME);
snd_pcm_lib_preallocate_pages_for_all(rtd->pcm,
SNDRV_DMA_TYPE_DEV, component->dev,
xlnx_pcm_hardware.buffer_bytes_max,
@@ -543,20 +539,16 @@ static int xlnx_formatter_pcm_new(struct snd_soc_pcm_runtime *rtd)
return 0;
}
-static const struct snd_pcm_ops xlnx_formatter_pcm_ops = {
- .open = xlnx_formatter_pcm_open,
- .close = xlnx_formatter_pcm_close,
- .ioctl = snd_pcm_lib_ioctl,
- .hw_params = xlnx_formatter_pcm_hw_params,
- .hw_free = xlnx_formatter_pcm_hw_free,
- .trigger = xlnx_formatter_pcm_trigger,
- .pointer = xlnx_formatter_pcm_pointer,
-};
-
static const struct snd_soc_component_driver xlnx_asoc_component = {
- .name = DRV_NAME,
- .ops = &xlnx_formatter_pcm_ops,
- .pcm_new = xlnx_formatter_pcm_new,
+ .name = DRV_NAME,
+ .open = xlnx_formatter_pcm_open,
+ .close = xlnx_formatter_pcm_close,
+ .ioctl = snd_soc_pcm_lib_ioctl,
+ .hw_params = xlnx_formatter_pcm_hw_params,
+ .hw_free = xlnx_formatter_pcm_hw_free,
+ .trigger = xlnx_formatter_pcm_trigger,
+ .pointer = xlnx_formatter_pcm_pointer,
+ .pcm_construct = xlnx_formatter_pcm_new,
};
static int xlnx_formatter_pcm_probe(struct platform_device *pdev)
@@ -564,7 +556,6 @@ static int xlnx_formatter_pcm_probe(struct platform_device *pdev)
int ret;
u32 val;
struct xlnx_pcm_drv_data *aud_drv_data;
- struct resource *res;
struct device *dev = &pdev->dev;
aud_drv_data = devm_kzalloc(dev, sizeof(*aud_drv_data), GFP_KERNEL);
@@ -584,13 +575,7 @@ static int xlnx_formatter_pcm_probe(struct platform_device *pdev)
return ret;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(dev, "audio formatter node:addr to resource failed\n");
- ret = -ENXIO;
- goto clk_err;
- }
- aud_drv_data->mmio = devm_ioremap_resource(dev, res);
+ aud_drv_data->mmio = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(aud_drv_data->mmio)) {
dev_err(dev, "audio formatter ioremap failed\n");
ret = PTR_ERR(aud_drv_data->mmio);
diff --git a/sound/soc/xtensa/xtfpga-i2s.c b/sound/soc/xtensa/xtfpga-i2s.c
index efd374f114a0..e08f4fee932a 100644
--- a/sound/soc/xtensa/xtfpga-i2s.c
+++ b/sound/soc/xtensa/xtfpga-i2s.c
@@ -365,7 +365,8 @@ static const struct snd_pcm_hardware xtfpga_pcm_hardware = {
.fifo_size = 16,
};
-static int xtfpga_pcm_open(struct snd_pcm_substream *substream)
+static int xtfpga_pcm_open(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_soc_pcm_runtime *rtd = substream->private_data;
@@ -378,13 +379,15 @@ static int xtfpga_pcm_open(struct snd_pcm_substream *substream)
return 0;
}
-static int xtfpga_pcm_close(struct snd_pcm_substream *substream)
+static int xtfpga_pcm_close(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
synchronize_rcu();
return 0;
}
-static int xtfpga_pcm_hw_params(struct snd_pcm_substream *substream,
+static int xtfpga_pcm_hw_params(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *hw_params)
{
int ret;
@@ -424,7 +427,8 @@ static int xtfpga_pcm_hw_params(struct snd_pcm_substream *substream,
return ret;
}
-static int xtfpga_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
+static int xtfpga_pcm_trigger(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream, int cmd)
{
int ret = 0;
struct snd_pcm_runtime *runtime = substream->runtime;
@@ -452,7 +456,8 @@ static int xtfpga_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
return ret;
}
-static snd_pcm_uframes_t xtfpga_pcm_pointer(struct snd_pcm_substream *substream)
+static snd_pcm_uframes_t xtfpga_pcm_pointer(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct xtfpga_i2s *i2s = runtime->private_data;
@@ -461,7 +466,8 @@ static snd_pcm_uframes_t xtfpga_pcm_pointer(struct snd_pcm_substream *substream)
return pos < runtime->buffer_size ? pos : 0;
}
-static int xtfpga_pcm_new(struct snd_soc_pcm_runtime *rtd)
+static int xtfpga_pcm_new(struct snd_soc_component *component,
+ struct snd_soc_pcm_runtime *rtd)
{
struct snd_card *card = rtd->card->snd_card;
size_t size = xtfpga_pcm_hardware.buffer_bytes_max;
@@ -471,19 +477,15 @@ static int xtfpga_pcm_new(struct snd_soc_pcm_runtime *rtd)
return 0;
}
-static const struct snd_pcm_ops xtfpga_pcm_ops = {
+static const struct snd_soc_component_driver xtfpga_i2s_component = {
+ .name = DRV_NAME,
.open = xtfpga_pcm_open,
.close = xtfpga_pcm_close,
- .ioctl = snd_pcm_lib_ioctl,
+ .ioctl = snd_soc_pcm_lib_ioctl,
.hw_params = xtfpga_pcm_hw_params,
.trigger = xtfpga_pcm_trigger,
.pointer = xtfpga_pcm_pointer,
-};
-
-static const struct snd_soc_component_driver xtfpga_i2s_component = {
- .name = DRV_NAME,
- .pcm_new = xtfpga_pcm_new,
- .ops = &xtfpga_pcm_ops,
+ .pcm_construct = xtfpga_pcm_new,
};
static const struct snd_soc_dai_ops xtfpga_i2s_dai_ops = {
diff --git a/sound/soc/zte/Kconfig b/sound/soc/zte/Kconfig
index a7842e4b791c..a23d4f13ca19 100644
--- a/sound/soc/zte/Kconfig
+++ b/sound/soc/zte/Kconfig
@@ -18,9 +18,9 @@ config ZX_I2S
ZTE ZX I2S interface
config ZX_TDM
- tristate "ZTE ZX TDM Driver Support"
- depends on COMMON_CLK
- select SND_SOC_GENERIC_DMAENGINE_PCM
- help
- Say Y or M if you want to add support for codecs attached to the
- ZTE ZX TDM interface
+ tristate "ZTE ZX TDM Driver Support"
+ depends on COMMON_CLK
+ select SND_SOC_GENERIC_DMAENGINE_PCM
+ help
+ Say Y or M if you want to add support for codecs attached to the
+ ZTE ZX TDM interface
diff --git a/sound/sparc/amd7930.c b/sound/sparc/amd7930.c
index 441222c8e223..d4b8ccc61dc2 100644
--- a/sound/sparc/amd7930.c
+++ b/sound/sparc/amd7930.c
@@ -777,7 +777,7 @@ static int snd_amd7930_pcm(struct snd_amd7930 *amd)
amd->pcm = pcm;
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_CONTINUOUS,
- snd_dma_continuous_data(GFP_KERNEL),
+ NULL,
64*1024, 64*1024);
return 0;
diff --git a/sound/sparc/dbri.c b/sound/sparc/dbri.c
index 6e065d44060e..4911103421ff 100644
--- a/sound/sparc/dbri.c
+++ b/sound/sparc/dbri.c
@@ -2249,7 +2249,7 @@ static int snd_dbri_pcm(struct snd_card *card)
strcpy(pcm->name, card->shortname);
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_CONTINUOUS,
- snd_dma_continuous_data(GFP_KERNEL),
+ NULL,
64 * 1024, 64 * 1024);
return 0;
}
diff --git a/sound/usb/6fire/pcm.c b/sound/usb/6fire/pcm.c
index 88ac1c4ee163..cdc5dd7fbe16 100644
--- a/sound/usb/6fire/pcm.c
+++ b/sound/usb/6fire/pcm.c
@@ -449,13 +449,13 @@ static int usb6fire_pcm_close(struct snd_pcm_substream *alsa_sub)
static int usb6fire_pcm_hw_params(struct snd_pcm_substream *alsa_sub,
struct snd_pcm_hw_params *hw_params)
{
- return snd_pcm_lib_alloc_vmalloc_buffer(alsa_sub,
- params_buffer_bytes(hw_params));
+ return snd_pcm_lib_malloc_pages(alsa_sub,
+ params_buffer_bytes(hw_params));
}
static int usb6fire_pcm_hw_free(struct snd_pcm_substream *alsa_sub)
{
- return snd_pcm_lib_free_vmalloc_buffer(alsa_sub);
+ return snd_pcm_lib_free_pages(alsa_sub);
}
static int usb6fire_pcm_prepare(struct snd_pcm_substream *alsa_sub)
@@ -560,7 +560,6 @@ static const struct snd_pcm_ops pcm_ops = {
.prepare = usb6fire_pcm_prepare,
.trigger = usb6fire_pcm_trigger,
.pointer = usb6fire_pcm_pointer,
- .page = snd_pcm_lib_get_vmalloc_page,
};
static void usb6fire_pcm_init_urb(struct pcm_urb *urb,
@@ -659,14 +658,9 @@ int usb6fire_pcm_init(struct sfire_chip *chip)
strcpy(pcm->name, "DMX 6Fire USB");
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &pcm_ops);
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &pcm_ops);
+ snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_VMALLOC,
+ NULL, 0, 0);
- if (ret) {
- usb6fire_pcm_buffers_destroy(rt);
- kfree(rt);
- dev_err(&chip->dev->dev,
- "error preallocating pcm buffers.\n");
- return ret;
- }
rt->instance = pcm;
chip->pcm = rt;
diff --git a/sound/usb/Kconfig b/sound/usb/Kconfig
index e2c53a0841da..059242f15d75 100644
--- a/sound/usb/Kconfig
+++ b/sound/usb/Kconfig
@@ -107,24 +107,24 @@ config SND_USB_US122L
will be called snd-usb-us122l.
config SND_USB_6FIRE
- tristate "TerraTec DMX 6Fire USB"
- select FW_LOADER
- select BITREVERSE
- select SND_RAWMIDI
- select SND_PCM
- select SND_VMASTER
- help
- Say Y here to include support for TerraTec 6fire DMX USB interface.
-
- You will need firmware files in order to be able to use the device
- after it has been coldstarted. An install script for the firmware
- and further help can be found at
- http://sixfireusb.sourceforge.net
+ tristate "TerraTec DMX 6Fire USB"
+ select FW_LOADER
+ select BITREVERSE
+ select SND_RAWMIDI
+ select SND_PCM
+ select SND_VMASTER
+ help
+ Say Y here to include support for TerraTec 6fire DMX USB interface.
+
+ You will need firmware files in order to be able to use the device
+ after it has been coldstarted. An install script for the firmware
+ and further help can be found at
+ http://sixfireusb.sourceforge.net
config SND_USB_HIFACE
- tristate "M2Tech hiFace USB-SPDIF driver"
- select SND_PCM
- help
+ tristate "M2Tech hiFace USB-SPDIF driver"
+ select SND_PCM
+ help
Select this option to include support for M2Tech hiFace USB-SPDIF
interface.
diff --git a/sound/usb/caiaq/audio.c b/sound/usb/caiaq/audio.c
index 444bb637ce13..970eb0865ba3 100644
--- a/sound/usb/caiaq/audio.c
+++ b/sound/usb/caiaq/audio.c
@@ -170,15 +170,14 @@ static int snd_usb_caiaq_substream_close(struct snd_pcm_substream *substream)
static int snd_usb_caiaq_pcm_hw_params(struct snd_pcm_substream *sub,
struct snd_pcm_hw_params *hw_params)
{
- return snd_pcm_lib_alloc_vmalloc_buffer(sub,
- params_buffer_bytes(hw_params));
+ return snd_pcm_lib_malloc_pages(sub, params_buffer_bytes(hw_params));
}
static int snd_usb_caiaq_pcm_hw_free(struct snd_pcm_substream *sub)
{
struct snd_usb_caiaqdev *cdev = snd_pcm_substream_chip(sub);
deactivate_substream(cdev, sub);
- return snd_pcm_lib_free_vmalloc_buffer(sub);
+ return snd_pcm_lib_free_pages(sub);
}
/* this should probably go upstream */
@@ -334,7 +333,6 @@ static const struct snd_pcm_ops snd_usb_caiaq_ops = {
.prepare = snd_usb_caiaq_pcm_prepare,
.trigger = snd_usb_caiaq_pcm_trigger,
.pointer = snd_usb_caiaq_pcm_pointer,
- .page = snd_pcm_lib_get_vmalloc_page,
};
static void check_for_elapsed_periods(struct snd_usb_caiaqdev *cdev,
@@ -843,6 +841,8 @@ int snd_usb_caiaq_audio_init(struct snd_usb_caiaqdev *cdev)
&snd_usb_caiaq_ops);
snd_pcm_set_ops(cdev->pcm, SNDRV_PCM_STREAM_CAPTURE,
&snd_usb_caiaq_ops);
+ snd_pcm_lib_preallocate_pages_for_all(cdev->pcm, SNDRV_DMA_TYPE_VMALLOC,
+ NULL, 0, 0);
cdev->data_cb_info =
kmalloc_array(N_URBS, sizeof(struct snd_usb_caiaq_cb_info),
diff --git a/sound/usb/card.c b/sound/usb/card.c
index db91dc76cc91..9f743ebae615 100644
--- a/sound/usb/card.c
+++ b/sound/usb/card.c
@@ -74,6 +74,7 @@ static bool autoclock = true;
static char *quirk_alias[SNDRV_CARDS];
bool snd_usb_use_vmalloc = true;
+bool snd_usb_skip_validation;
module_param_array(index, int, NULL, 0444);
MODULE_PARM_DESC(index, "Index value for the USB audio adapter.");
@@ -96,6 +97,8 @@ module_param_array(quirk_alias, charp, NULL, 0444);
MODULE_PARM_DESC(quirk_alias, "Quirk aliases, e.g. 0123abcd:5678beef.");
module_param_named(use_vmalloc, snd_usb_use_vmalloc, bool, 0444);
MODULE_PARM_DESC(use_vmalloc, "Use vmalloc for PCM intermediate buffers (default: yes).");
+module_param_named(skip_validation, snd_usb_skip_validation, bool, 0444);
+MODULE_PARM_DESC(skip_validation, "Skip unit descriptor validation (default: no).");
/*
* we keep the snd_usb_audio_t instances by ourselves for merging
diff --git a/sound/usb/clock.c b/sound/usb/clock.c
index 6b8c14f9b5d4..018b1ecb5404 100644
--- a/sound/usb/clock.c
+++ b/sound/usb/clock.c
@@ -165,21 +165,21 @@ static bool uac_clock_source_is_valid(struct snd_usb_audio *chip,
snd_usb_find_clock_source_v3(chip->ctrl_intf, source_id);
if (!cs_desc)
- return 0;
+ return false;
bmControls = le32_to_cpu(cs_desc->bmControls);
} else { /* UAC_VERSION_1/2 */
struct uac_clock_source_descriptor *cs_desc =
snd_usb_find_clock_source(chip->ctrl_intf, source_id);
if (!cs_desc)
- return 0;
+ return false;
bmControls = cs_desc->bmControls;
}
/* If a clock source can't tell us whether it's valid, we assume it is */
if (!uac_v2v3_control_is_readable(bmControls,
UAC2_CS_CONTROL_CLOCK_VALID))
- return 1;
+ return true;
err = snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0), UAC2_CS_CUR,
USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN,
@@ -191,10 +191,10 @@ static bool uac_clock_source_is_valid(struct snd_usb_audio *chip,
dev_warn(&dev->dev,
"%s(): cannot get clock validity for id %d\n",
__func__, source_id);
- return 0;
+ return false;
}
- return !!data;
+ return data ? true : false;
}
static int __uac_clock_find_source(struct snd_usb_audio *chip, int entity_id,
diff --git a/sound/usb/hiface/pcm.c b/sound/usb/hiface/pcm.c
index c406497c5919..e0de71917274 100644
--- a/sound/usb/hiface/pcm.c
+++ b/sound/usb/hiface/pcm.c
@@ -418,13 +418,13 @@ static int hiface_pcm_close(struct snd_pcm_substream *alsa_sub)
static int hiface_pcm_hw_params(struct snd_pcm_substream *alsa_sub,
struct snd_pcm_hw_params *hw_params)
{
- return snd_pcm_lib_alloc_vmalloc_buffer(alsa_sub,
- params_buffer_bytes(hw_params));
+ return snd_pcm_lib_malloc_pages(alsa_sub,
+ params_buffer_bytes(hw_params));
}
static int hiface_pcm_hw_free(struct snd_pcm_substream *alsa_sub)
{
- return snd_pcm_lib_free_vmalloc_buffer(alsa_sub);
+ return snd_pcm_lib_free_pages(alsa_sub);
}
static int hiface_pcm_prepare(struct snd_pcm_substream *alsa_sub)
@@ -518,7 +518,6 @@ static const struct snd_pcm_ops pcm_ops = {
.prepare = hiface_pcm_prepare,
.trigger = hiface_pcm_trigger,
.pointer = hiface_pcm_pointer,
- .page = snd_pcm_lib_get_vmalloc_page,
};
static int hiface_pcm_init_urb(struct pcm_urb *urb,
@@ -614,6 +613,8 @@ int hiface_pcm_init(struct hiface_chip *chip, u8 extra_freq)
strlcpy(pcm->name, "USB-SPDIF Audio", sizeof(pcm->name));
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &pcm_ops);
+ snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_VMALLOC,
+ NULL, 0, 0);
rt->instance = pcm;
diff --git a/sound/usb/line6/pcm.c b/sound/usb/line6/pcm.c
index f70211e6b174..9c437c716cfd 100644
--- a/sound/usb/line6/pcm.c
+++ b/sound/usb/line6/pcm.c
@@ -502,9 +502,7 @@ static int snd_line6_new_pcm(struct usb_line6 *line6, struct snd_pcm **pcm_ret)
/* pre-allocation of buffers */
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_CONTINUOUS,
- snd_dma_continuous_data
- (GFP_KERNEL), 64 * 1024,
- 128 * 1024);
+ NULL, 64 * 1024, 128 * 1024);
return 0;
}
diff --git a/sound/usb/misc/ua101.c b/sound/usb/misc/ua101.c
index 307b72d5fffa..566a4a31528a 100644
--- a/sound/usb/misc/ua101.c
+++ b/sound/usb/misc/ua101.c
@@ -733,8 +733,8 @@ static int capture_pcm_hw_params(struct snd_pcm_substream *substream,
if (err < 0)
return err;
- return snd_pcm_lib_alloc_vmalloc_buffer(substream,
- params_buffer_bytes(hw_params));
+ return snd_pcm_lib_malloc_pages(substream,
+ params_buffer_bytes(hw_params));
}
static int playback_pcm_hw_params(struct snd_pcm_substream *substream,
@@ -751,13 +751,13 @@ static int playback_pcm_hw_params(struct snd_pcm_substream *substream,
if (err < 0)
return err;
- return snd_pcm_lib_alloc_vmalloc_buffer(substream,
- params_buffer_bytes(hw_params));
+ return snd_pcm_lib_malloc_pages(substream,
+ params_buffer_bytes(hw_params));
}
static int ua101_pcm_hw_free(struct snd_pcm_substream *substream)
{
- return snd_pcm_lib_free_vmalloc_buffer(substream);
+ return snd_pcm_lib_free_pages(substream);
}
static int capture_pcm_prepare(struct snd_pcm_substream *substream)
@@ -889,7 +889,6 @@ static const struct snd_pcm_ops capture_pcm_ops = {
.prepare = capture_pcm_prepare,
.trigger = capture_pcm_trigger,
.pointer = capture_pcm_pointer,
- .page = snd_pcm_lib_get_vmalloc_page,
};
static const struct snd_pcm_ops playback_pcm_ops = {
@@ -901,7 +900,6 @@ static const struct snd_pcm_ops playback_pcm_ops = {
.prepare = playback_pcm_prepare,
.trigger = playback_pcm_trigger,
.pointer = playback_pcm_pointer,
- .page = snd_pcm_lib_get_vmalloc_page,
};
static const struct uac_format_type_i_discrete_descriptor *
@@ -1296,6 +1294,8 @@ static int ua101_probe(struct usb_interface *interface,
strcpy(ua->pcm->name, name);
snd_pcm_set_ops(ua->pcm, SNDRV_PCM_STREAM_PLAYBACK, &playback_pcm_ops);
snd_pcm_set_ops(ua->pcm, SNDRV_PCM_STREAM_CAPTURE, &capture_pcm_ops);
+ snd_pcm_lib_preallocate_pages_for_all(ua->pcm, SNDRV_DMA_TYPE_VMALLOC,
+ NULL, 0, 0);
err = snd_usbmidi_create(card, ua->intf[INTF_MIDI],
&ua->midi_list, &midi_quirk);
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index 45eee5cc312e..6cd4ff09c5ee 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -2930,6 +2930,9 @@ static int snd_usb_mixer_controls_badd(struct usb_mixer_interface *mixer,
continue;
iface = usb_ifnum_to_if(dev, intf);
+ if (!iface)
+ continue;
+
num = iface->num_altsetting;
if (num < 2)
diff --git a/sound/usb/mixer_scarlett.c b/sound/usb/mixer_scarlett.c
index 83715fd8dfd6..9d10cbf1b5ed 100644
--- a/sound/usb/mixer_scarlett.c
+++ b/sound/usb/mixer_scarlett.c
@@ -142,6 +142,7 @@ enum {
SCARLETT_OUTPUTS,
SCARLETT_SWITCH_IMPEDANCE,
SCARLETT_SWITCH_PAD,
+ SCARLETT_SWITCH_GAIN,
};
enum {
@@ -192,6 +193,15 @@ static const struct scarlett_mixer_elem_enum_info opt_pad = {
}
};
+static const struct scarlett_mixer_elem_enum_info opt_gain = {
+ .start = 0,
+ .len = 2,
+ .offsets = {},
+ .names = (char const * const []){
+ "Lo", "Hi"
+ }
+};
+
static const struct scarlett_mixer_elem_enum_info opt_impedance = {
.start = 0,
.len = 2,
@@ -652,8 +662,8 @@ static struct scarlett_device_info s6i6_info = {
{ .num = 1, .type = SCARLETT_SWITCH_PAD, .name = NULL},
{ .num = 2, .type = SCARLETT_SWITCH_IMPEDANCE, .name = NULL},
{ .num = 2, .type = SCARLETT_SWITCH_PAD, .name = NULL},
- { .num = 3, .type = SCARLETT_SWITCH_PAD, .name = NULL},
- { .num = 4, .type = SCARLETT_SWITCH_PAD, .name = NULL},
+ { .num = 3, .type = SCARLETT_SWITCH_GAIN, .name = NULL},
+ { .num = 4, .type = SCARLETT_SWITCH_GAIN, .name = NULL},
},
.matrix_mux_init = {
@@ -883,6 +893,15 @@ static int scarlett_controls_create_generic(struct usb_mixer_interface *mixer,
if (err < 0)
return err;
break;
+ case SCARLETT_SWITCH_GAIN:
+ sprintf(mx, "Input %d Gain Switch", ctl->num);
+ err = add_new_ctl(mixer, &usb_scarlett_ctl_enum,
+ scarlett_ctl_enum_resume, 0x01,
+ 0x08, ctl->num, USB_MIXER_S16, 1, mx,
+ &opt_gain, &elem);
+ if (err < 0)
+ return err;
+ break;
}
}
diff --git a/sound/usb/mixer_scarlett_gen2.c b/sound/usb/mixer_scarlett_gen2.c
index 7d460b1f1735..94b903d95afa 100644
--- a/sound/usb/mixer_scarlett_gen2.c
+++ b/sound/usb/mixer_scarlett_gen2.c
@@ -261,34 +261,34 @@ static const struct scarlett2_device_info s6i6_gen2_info = {
},
.ports = {
- {
+ [SCARLETT2_PORT_TYPE_NONE] = {
.id = 0x000,
.num = { 1, 0, 8, 8, 8 },
.src_descr = "Off",
.src_num_offset = 0,
},
- {
+ [SCARLETT2_PORT_TYPE_ANALOGUE] = {
.id = 0x080,
.num = { 4, 4, 4, 4, 4 },
.src_descr = "Analogue %d",
.src_num_offset = 1,
.dst_descr = "Analogue Output %02d Playback"
},
- {
+ [SCARLETT2_PORT_TYPE_SPDIF] = {
.id = 0x180,
.num = { 2, 2, 2, 2, 2 },
.src_descr = "S/PDIF %d",
.src_num_offset = 1,
.dst_descr = "S/PDIF Output %d Playback"
},
- {
+ [SCARLETT2_PORT_TYPE_MIX] = {
.id = 0x300,
.num = { 10, 18, 18, 18, 18 },
.src_descr = "Mix %c",
.src_num_offset = 65,
.dst_descr = "Mixer Input %02d Capture"
},
- {
+ [SCARLETT2_PORT_TYPE_PCM] = {
.id = 0x600,
.num = { 6, 6, 6, 6, 6 },
.src_descr = "PCM %d",
@@ -317,44 +317,44 @@ static const struct scarlett2_device_info s18i8_gen2_info = {
},
.ports = {
- {
+ [SCARLETT2_PORT_TYPE_NONE] = {
.id = 0x000,
.num = { 1, 0, 8, 8, 4 },
.src_descr = "Off",
.src_num_offset = 0,
},
- {
+ [SCARLETT2_PORT_TYPE_ANALOGUE] = {
.id = 0x080,
.num = { 8, 6, 6, 6, 6 },
.src_descr = "Analogue %d",
.src_num_offset = 1,
.dst_descr = "Analogue Output %02d Playback"
},
- {
+ [SCARLETT2_PORT_TYPE_SPDIF] = {
+ .id = 0x180,
/* S/PDIF outputs aren't available at 192KHz
* but are included in the USB mux I/O
* assignment message anyway
*/
- .id = 0x180,
.num = { 2, 2, 2, 2, 2 },
.src_descr = "S/PDIF %d",
.src_num_offset = 1,
.dst_descr = "S/PDIF Output %d Playback"
},
- {
+ [SCARLETT2_PORT_TYPE_ADAT] = {
.id = 0x200,
.num = { 8, 0, 0, 0, 0 },
.src_descr = "ADAT %d",
.src_num_offset = 1,
},
- {
+ [SCARLETT2_PORT_TYPE_MIX] = {
.id = 0x300,
.num = { 10, 18, 18, 18, 18 },
.src_descr = "Mix %c",
.src_num_offset = 65,
.dst_descr = "Mixer Input %02d Capture"
},
- {
+ [SCARLETT2_PORT_TYPE_PCM] = {
.id = 0x600,
.num = { 20, 18, 18, 14, 10 },
.src_descr = "PCM %d",
@@ -387,20 +387,20 @@ static const struct scarlett2_device_info s18i20_gen2_info = {
},
.ports = {
- {
+ [SCARLETT2_PORT_TYPE_NONE] = {
.id = 0x000,
.num = { 1, 0, 8, 8, 6 },
.src_descr = "Off",
.src_num_offset = 0,
},
- {
+ [SCARLETT2_PORT_TYPE_ANALOGUE] = {
.id = 0x080,
.num = { 8, 10, 10, 10, 10 },
.src_descr = "Analogue %d",
.src_num_offset = 1,
.dst_descr = "Analogue Output %02d Playback"
},
- {
+ [SCARLETT2_PORT_TYPE_SPDIF] = {
/* S/PDIF outputs aren't available at 192KHz
* but are included in the USB mux I/O
* assignment message anyway
@@ -411,21 +411,21 @@ static const struct scarlett2_device_info s18i20_gen2_info = {
.src_num_offset = 1,
.dst_descr = "S/PDIF Output %d Playback"
},
- {
+ [SCARLETT2_PORT_TYPE_ADAT] = {
.id = 0x200,
.num = { 8, 8, 8, 4, 0 },
.src_descr = "ADAT %d",
.src_num_offset = 1,
.dst_descr = "ADAT Output %d Playback"
},
- {
+ [SCARLETT2_PORT_TYPE_MIX] = {
.id = 0x300,
.num = { 10, 18, 18, 18, 18 },
.src_descr = "Mix %c",
.src_num_offset = 65,
.dst_descr = "Mixer Input %02d Capture"
},
- {
+ [SCARLETT2_PORT_TYPE_PCM] = {
.id = 0x600,
.num = { 20, 18, 18, 14, 10 },
.src_descr = "PCM %d",
diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
index ff5ab24f3bd1..9c8930bb00c8 100644
--- a/sound/usb/pcm.c
+++ b/sound/usb/pcm.c
@@ -785,12 +785,8 @@ static int snd_usb_hw_params(struct snd_pcm_substream *substream,
if (ret)
return ret;
- if (snd_usb_use_vmalloc)
- ret = snd_pcm_lib_alloc_vmalloc_buffer(substream,
- params_buffer_bytes(hw_params));
- else
- ret = snd_pcm_lib_malloc_pages(substream,
- params_buffer_bytes(hw_params));
+ ret = snd_pcm_lib_malloc_pages(substream,
+ params_buffer_bytes(hw_params));
if (ret < 0)
goto stop_pipeline;
@@ -857,10 +853,7 @@ static int snd_usb_hw_free(struct snd_pcm_substream *substream)
snd_usb_unlock_shutdown(subs->stream->chip);
}
- if (snd_usb_use_vmalloc)
- return snd_pcm_lib_free_vmalloc_buffer(substream);
- else
- return snd_pcm_lib_free_pages(substream);
+ return snd_pcm_lib_free_pages(substream);
}
/*
@@ -1781,7 +1774,6 @@ static const struct snd_pcm_ops snd_usb_playback_ops = {
.prepare = snd_usb_pcm_prepare,
.trigger = snd_usb_substream_playback_trigger,
.pointer = snd_usb_pcm_pointer,
- .page = snd_pcm_lib_get_vmalloc_page,
};
static const struct snd_pcm_ops snd_usb_capture_ops = {
@@ -1793,43 +1785,14 @@ static const struct snd_pcm_ops snd_usb_capture_ops = {
.prepare = snd_usb_pcm_prepare,
.trigger = snd_usb_substream_capture_trigger,
.pointer = snd_usb_pcm_pointer,
- .page = snd_pcm_lib_get_vmalloc_page,
-};
-
-static const struct snd_pcm_ops snd_usb_playback_dev_ops = {
- .open = snd_usb_pcm_open,
- .close = snd_usb_pcm_close,
- .ioctl = snd_pcm_lib_ioctl,
- .hw_params = snd_usb_hw_params,
- .hw_free = snd_usb_hw_free,
- .prepare = snd_usb_pcm_prepare,
- .trigger = snd_usb_substream_playback_trigger,
- .pointer = snd_usb_pcm_pointer,
- .page = snd_pcm_sgbuf_ops_page,
-};
-
-static const struct snd_pcm_ops snd_usb_capture_dev_ops = {
- .open = snd_usb_pcm_open,
- .close = snd_usb_pcm_close,
- .ioctl = snd_pcm_lib_ioctl,
- .hw_params = snd_usb_hw_params,
- .hw_free = snd_usb_hw_free,
- .prepare = snd_usb_pcm_prepare,
- .trigger = snd_usb_substream_capture_trigger,
- .pointer = snd_usb_pcm_pointer,
- .page = snd_pcm_sgbuf_ops_page,
};
void snd_usb_set_pcm_ops(struct snd_pcm *pcm, int stream)
{
const struct snd_pcm_ops *ops;
- if (snd_usb_use_vmalloc)
- ops = stream == SNDRV_PCM_STREAM_PLAYBACK ?
+ ops = stream == SNDRV_PCM_STREAM_PLAYBACK ?
&snd_usb_playback_ops : &snd_usb_capture_ops;
- else
- ops = stream == SNDRV_PCM_STREAM_PLAYBACK ?
- &snd_usb_playback_dev_ops : &snd_usb_capture_dev_ops;
snd_pcm_set_ops(pcm, stream, ops);
}
@@ -1839,7 +1802,10 @@ void snd_usb_preallocate_buffer(struct snd_usb_substream *subs)
struct snd_pcm_substream *s = pcm->streams[subs->direction].substream;
struct device *dev = subs->dev->bus->controller;
- if (!snd_usb_use_vmalloc)
+ if (snd_usb_use_vmalloc)
+ snd_pcm_lib_preallocate_pages(s, SNDRV_DMA_TYPE_VMALLOC,
+ NULL, 0, 0);
+ else
snd_pcm_lib_preallocate_pages(s, SNDRV_DMA_TYPE_DEV_SG,
dev, 64*1024, 512*1024);
}
diff --git a/sound/usb/usbaudio.h b/sound/usb/usbaudio.h
index feb30f9c1716..ff3cbf653de8 100644
--- a/sound/usb/usbaudio.h
+++ b/sound/usb/usbaudio.h
@@ -120,5 +120,6 @@ int snd_usb_lock_shutdown(struct snd_usb_audio *chip);
void snd_usb_unlock_shutdown(struct snd_usb_audio *chip);
extern bool snd_usb_use_vmalloc;
+extern bool snd_usb_skip_validation;
#endif /* __USBAUDIO_H */
diff --git a/sound/usb/usx2y/usbusx2yaudio.c b/sound/usb/usx2y/usbusx2yaudio.c
index 89fa287678fc..25a0939f410a 100644
--- a/sound/usb/usx2y/usbusx2yaudio.c
+++ b/sound/usb/usx2y/usbusx2yaudio.c
@@ -970,13 +970,13 @@ static int usX2Y_audio_stream_new(struct snd_card *card, int playback_endpoint,
if (playback_endpoint) {
snd_pcm_lib_preallocate_pages(pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream,
SNDRV_DMA_TYPE_CONTINUOUS,
- snd_dma_continuous_data(GFP_KERNEL),
+ NULL,
64*1024, 128*1024);
}
snd_pcm_lib_preallocate_pages(pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream,
SNDRV_DMA_TYPE_CONTINUOUS,
- snd_dma_continuous_data(GFP_KERNEL),
+ NULL,
64*1024, 128*1024);
usX2Y(card)->pcm_devs++;
diff --git a/sound/usb/usx2y/usx2yhwdeppcm.c b/sound/usb/usx2y/usx2yhwdeppcm.c
index ac8960b6b299..997493e839ee 100644
--- a/sound/usb/usx2y/usx2yhwdeppcm.c
+++ b/sound/usb/usx2y/usx2yhwdeppcm.c
@@ -728,11 +728,11 @@ int usX2Y_hwdep_pcm_new(struct snd_card *card)
sprintf(pcm->name, NAME_ALLCAPS" hwdep Audio");
snd_pcm_lib_preallocate_pages(pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream,
SNDRV_DMA_TYPE_CONTINUOUS,
- snd_dma_continuous_data(GFP_KERNEL),
+ NULL,
64*1024, 128*1024);
snd_pcm_lib_preallocate_pages(pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream,
SNDRV_DMA_TYPE_CONTINUOUS,
- snd_dma_continuous_data(GFP_KERNEL),
+ NULL,
64*1024, 128*1024);
return 0;
diff --git a/sound/usb/validate.c b/sound/usb/validate.c
index 389e8657434a..36ae78c3da3d 100644
--- a/sound/usb/validate.c
+++ b/sound/usb/validate.c
@@ -322,11 +322,28 @@ static bool validate_desc(unsigned char *hdr, int protocol,
bool snd_usb_validate_audio_desc(void *p, int protocol)
{
- return validate_desc(p, protocol, audio_validators);
+ unsigned char *c = p;
+ bool valid;
+
+ valid = validate_desc(p, protocol, audio_validators);
+ if (!valid && snd_usb_skip_validation) {
+ print_hex_dump(KERN_ERR, "USB-audio: buggy audio desc: ",
+ DUMP_PREFIX_NONE, 16, 1, c, c[0], true);
+ valid = true;
+ }
+ return valid;
}
bool snd_usb_validate_midi_desc(void *p)
{
- return validate_desc(p, UAC_VERSION_1, midi_validators);
+ unsigned char *c = p;
+ bool valid;
+
+ valid = validate_desc(p, UAC_VERSION_1, midi_validators);
+ if (!valid && snd_usb_skip_validation) {
+ print_hex_dump(KERN_ERR, "USB-audio: buggy midi desc: ",
+ DUMP_PREFIX_NONE, 16, 1, c, c[0], true);
+ valid = true;
+ }
+ return valid;
}
-
diff --git a/sound/x86/intel_hdmi_audio.c b/sound/x86/intel_hdmi_audio.c
index 5fd4e32247a6..cd389d21219a 100644
--- a/sound/x86/intel_hdmi_audio.c
+++ b/sound/x86/intel_hdmi_audio.c
@@ -1708,10 +1708,8 @@ static int hdmi_lpe_audio_probe(struct platform_device *pdev)
/* get resources */
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(&pdev->dev, "Could not get irq resource: %d\n", irq);
+ if (irq < 0)
return irq;
- }
res_mmio = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res_mmio) {
diff --git a/tools/arch/x86/lib/x86-opcode-map.txt b/tools/arch/x86/lib/x86-opcode-map.txt
index 0a0e9112f284..8908c58bd6cd 100644
--- a/tools/arch/x86/lib/x86-opcode-map.txt
+++ b/tools/arch/x86/lib/x86-opcode-map.txt
@@ -695,16 +695,28 @@ AVXcode: 2
4d: vrcp14ss/d Vsd,Hpd,Wsd (66),(ev)
4e: vrsqrt14ps/d Vpd,Wpd (66),(ev)
4f: vrsqrt14ss/d Vsd,Hsd,Wsd (66),(ev)
-# Skip 0x50-0x57
+50: vpdpbusd Vx,Hx,Wx (66),(ev)
+51: vpdpbusds Vx,Hx,Wx (66),(ev)
+52: vdpbf16ps Vx,Hx,Wx (F3),(ev) | vpdpwssd Vx,Hx,Wx (66),(ev) | vp4dpwssd Vdqq,Hdqq,Wdq (F2),(ev)
+53: vpdpwssds Vx,Hx,Wx (66),(ev) | vp4dpwssds Vdqq,Hdqq,Wdq (F2),(ev)
+54: vpopcntb/w Vx,Wx (66),(ev)
+55: vpopcntd/q Vx,Wx (66),(ev)
58: vpbroadcastd Vx,Wx (66),(v)
59: vpbroadcastq Vx,Wx (66),(v) | vbroadcasti32x2 Vx,Wx (66),(evo)
5a: vbroadcasti128 Vqq,Mdq (66),(v) | vbroadcasti32x4/64x2 Vx,Wx (66),(evo)
5b: vbroadcasti32x8/64x4 Vqq,Mdq (66),(ev)
-# Skip 0x5c-0x63
+# Skip 0x5c-0x61
+62: vpexpandb/w Vx,Wx (66),(ev)
+63: vpcompressb/w Wx,Vx (66),(ev)
64: vpblendmd/q Vx,Hx,Wx (66),(ev)
65: vblendmps/d Vx,Hx,Wx (66),(ev)
66: vpblendmb/w Vx,Hx,Wx (66),(ev)
-# Skip 0x67-0x74
+68: vp2intersectd/q Kx,Hx,Wx (F2),(ev)
+# Skip 0x69-0x6f
+70: vpshldvw Vx,Hx,Wx (66),(ev)
+71: vpshldvd/q Vx,Hx,Wx (66),(ev)
+72: vcvtne2ps2bf16 Vx,Hx,Wx (F2),(ev) | vcvtneps2bf16 Vx,Wx (F3),(ev) | vpshrdvw Vx,Hx,Wx (66),(ev)
+73: vpshrdvd/q Vx,Hx,Wx (66),(ev)
75: vpermi2b/w Vx,Hx,Wx (66),(ev)
76: vpermi2d/q Vx,Hx,Wx (66),(ev)
77: vpermi2ps/d Vx,Hx,Wx (66),(ev)
@@ -727,6 +739,7 @@ AVXcode: 2
8c: vpmaskmovd/q Vx,Hx,Mx (66),(v)
8d: vpermb/w Vx,Hx,Wx (66),(ev)
8e: vpmaskmovd/q Mx,Vx,Hx (66),(v)
+8f: vpshufbitqmb Kx,Hx,Wx (66),(ev)
# 0x0f 0x38 0x90-0xbf (FMA)
90: vgatherdd/q Vx,Hx,Wx (66),(v) | vpgatherdd/q Vx,Wx (66),(evo)
91: vgatherqd/q Vx,Hx,Wx (66),(v) | vpgatherqd/q Vx,Wx (66),(evo)
@@ -738,8 +751,8 @@ AVXcode: 2
97: vfmsubadd132ps/d Vx,Hx,Wx (66),(v)
98: vfmadd132ps/d Vx,Hx,Wx (66),(v)
99: vfmadd132ss/d Vx,Hx,Wx (66),(v),(v1)
-9a: vfmsub132ps/d Vx,Hx,Wx (66),(v)
-9b: vfmsub132ss/d Vx,Hx,Wx (66),(v),(v1)
+9a: vfmsub132ps/d Vx,Hx,Wx (66),(v) | v4fmaddps Vdqq,Hdqq,Wdq (F2),(ev)
+9b: vfmsub132ss/d Vx,Hx,Wx (66),(v),(v1) | v4fmaddss Vdq,Hdq,Wdq (F2),(ev)
9c: vfnmadd132ps/d Vx,Hx,Wx (66),(v)
9d: vfnmadd132ss/d Vx,Hx,Wx (66),(v),(v1)
9e: vfnmsub132ps/d Vx,Hx,Wx (66),(v)
@@ -752,8 +765,8 @@ a6: vfmaddsub213ps/d Vx,Hx,Wx (66),(v)
a7: vfmsubadd213ps/d Vx,Hx,Wx (66),(v)
a8: vfmadd213ps/d Vx,Hx,Wx (66),(v)
a9: vfmadd213ss/d Vx,Hx,Wx (66),(v),(v1)
-aa: vfmsub213ps/d Vx,Hx,Wx (66),(v)
-ab: vfmsub213ss/d Vx,Hx,Wx (66),(v),(v1)
+aa: vfmsub213ps/d Vx,Hx,Wx (66),(v) | v4fnmaddps Vdqq,Hdqq,Wdq (F2),(ev)
+ab: vfmsub213ss/d Vx,Hx,Wx (66),(v),(v1) | v4fnmaddss Vdq,Hdq,Wdq (F2),(ev)
ac: vfnmadd213ps/d Vx,Hx,Wx (66),(v)
ad: vfnmadd213ss/d Vx,Hx,Wx (66),(v),(v1)
ae: vfnmsub213ps/d Vx,Hx,Wx (66),(v)
@@ -780,11 +793,12 @@ ca: sha1msg2 Vdq,Wdq | vrcp28ps/d Vx,Wx (66),(ev)
cb: sha256rnds2 Vdq,Wdq | vrcp28ss/d Vx,Hx,Wx (66),(ev)
cc: sha256msg1 Vdq,Wdq | vrsqrt28ps/d Vx,Wx (66),(ev)
cd: sha256msg2 Vdq,Wdq | vrsqrt28ss/d Vx,Hx,Wx (66),(ev)
+cf: vgf2p8mulb Vx,Wx (66)
db: VAESIMC Vdq,Wdq (66),(v1)
-dc: VAESENC Vdq,Hdq,Wdq (66),(v1)
-dd: VAESENCLAST Vdq,Hdq,Wdq (66),(v1)
-de: VAESDEC Vdq,Hdq,Wdq (66),(v1)
-df: VAESDECLAST Vdq,Hdq,Wdq (66),(v1)
+dc: vaesenc Vx,Hx,Wx (66)
+dd: vaesenclast Vx,Hx,Wx (66)
+de: vaesdec Vx,Hx,Wx (66)
+df: vaesdeclast Vx,Hx,Wx (66)
f0: MOVBE Gy,My | MOVBE Gw,Mw (66) | CRC32 Gd,Eb (F2) | CRC32 Gd,Eb (66&F2)
f1: MOVBE My,Gy | MOVBE Mw,Gw (66) | CRC32 Gd,Ey (F2) | CRC32 Gd,Ew (66&F2)
f2: ANDN Gy,By,Ey (v)
@@ -848,7 +862,7 @@ AVXcode: 3
41: vdppd Vdq,Hdq,Wdq,Ib (66),(v1)
42: vmpsadbw Vx,Hx,Wx,Ib (66),(v1) | vdbpsadbw Vx,Hx,Wx,Ib (66),(evo)
43: vshufi32x4/64x2 Vx,Hx,Wx,Ib (66),(ev)
-44: vpclmulqdq Vdq,Hdq,Wdq,Ib (66),(v1)
+44: vpclmulqdq Vx,Hx,Wx,Ib (66)
46: vperm2i128 Vqq,Hqq,Wqq,Ib (66),(v)
4a: vblendvps Vx,Hx,Wx,Lx (66),(v)
4b: vblendvpd Vx,Hx,Wx,Lx (66),(v)
@@ -865,7 +879,13 @@ AVXcode: 3
63: vpcmpistri Vdq,Wdq,Ib (66),(v1)
66: vfpclassps/d Vk,Wx,Ib (66),(ev)
67: vfpclassss/d Vk,Wx,Ib (66),(ev)
+70: vpshldw Vx,Hx,Wx,Ib (66),(ev)
+71: vpshldd/q Vx,Hx,Wx,Ib (66),(ev)
+72: vpshrdw Vx,Hx,Wx,Ib (66),(ev)
+73: vpshrdd/q Vx,Hx,Wx,Ib (66),(ev)
cc: sha1rnds4 Vdq,Wdq,Ib
+ce: vgf2p8affineqb Vx,Wx,Ib (66)
+cf: vgf2p8affineinvqb Vx,Wx,Ib (66)
df: VAESKEYGEN Vdq,Wdq,Ib (66),(v1)
f0: RORX Gy,Ey,Ib (F2),(v)
EndTable
diff --git a/tools/build/Makefile.feature b/tools/build/Makefile.feature
index 8a19753cc26a..574c2e0b9d20 100644
--- a/tools/build/Makefile.feature
+++ b/tools/build/Makefile.feature
@@ -96,7 +96,8 @@ FEATURE_TESTS_EXTRA := \
cxx \
llvm \
llvm-version \
- clang
+ clang \
+ libbpf
FEATURE_TESTS ?= $(FEATURE_TESTS_BASIC)
diff --git a/tools/build/feature/Makefile b/tools/build/feature/Makefile
index 8499385365c0..f30a89046aa3 100644
--- a/tools/build/feature/Makefile
+++ b/tools/build/feature/Makefile
@@ -53,6 +53,7 @@ FILES= \
test-zlib.bin \
test-lzma.bin \
test-bpf.bin \
+ test-libbpf.bin \
test-get_cpuid.bin \
test-sdt.bin \
test-cxx.bin \
@@ -270,6 +271,9 @@ $(OUTPUT)test-get_cpuid.bin:
$(OUTPUT)test-bpf.bin:
$(BUILD)
+$(OUTPUT)test-libbpf.bin:
+ $(BUILD) -lbpf
+
$(OUTPUT)test-sdt.bin:
$(BUILD)
diff --git a/tools/build/feature/test-libbpf.c b/tools/build/feature/test-libbpf.c
new file mode 100644
index 000000000000..a508756cf4cc
--- /dev/null
+++ b/tools/build/feature/test-libbpf.c
@@ -0,0 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <bpf/libbpf.h>
+
+int main(void)
+{
+ return bpf_object__open("test") ? 0 : -1;
+}
diff --git a/tools/hv/vmbus_testing b/tools/hv/vmbus_testing
new file mode 100755
index 000000000000..e7212903dd1d
--- /dev/null
+++ b/tools/hv/vmbus_testing
@@ -0,0 +1,376 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0
+#
+# Program to allow users to fuzz test Hyper-V drivers
+# by interfacing with Hyper-V debugfs attributes.
+# Current test methods available:
+# 1. delay testing
+#
+# Current file/directory structure of hyper-V debugfs:
+# /sys/kernel/debug/hyperv/UUID
+# /sys/kernel/debug/hyperv/UUID/<test-state filename>
+# /sys/kernel/debug/hyperv/UUID/<test-method sub-directory>
+#
+# author: Branden Bonaby <brandonbonaby94@gmail.com>
+
+import os
+import cmd
+import argparse
+import glob
+from argparse import RawDescriptionHelpFormatter
+from argparse import RawTextHelpFormatter
+from enum import Enum
+
+# Do not change unless, you change the debugfs attributes
+# in /drivers/hv/debugfs.c. All fuzz testing
+# attributes will start with "fuzz_test".
+
+# debugfs path for hyperv must exist before proceeding
+debugfs_hyperv_path = "/sys/kernel/debug/hyperv"
+if not os.path.isdir(debugfs_hyperv_path):
+ print("{} doesn't exist/check permissions".format(debugfs_hyperv_path))
+ exit(-1)
+
+class dev_state(Enum):
+ off = 0
+ on = 1
+
+# File names, that correspond to the files created in
+# /drivers/hv/debugfs.c
+class f_names(Enum):
+ state_f = "fuzz_test_state"
+ buff_f = "fuzz_test_buffer_interrupt_delay"
+ mess_f = "fuzz_test_message_delay"
+
+# Both single_actions and all_actions are used
+# for error checking and to allow for some subparser
+# names to be abbreviated. Do not abbreviate the
+# test method names, as it will become less intuitive
+# as to what the user can do. If you do decide to
+# abbreviate the test method name, make sure the main
+# function reflects this change.
+
+all_actions = [
+ "disable_all",
+ "D",
+ "enable_all",
+ "view_all",
+ "V"
+]
+
+single_actions = [
+ "disable_single",
+ "d",
+ "enable_single",
+ "view_single",
+ "v"
+]
+
+def main():
+
+ file_map = recursive_file_lookup(debugfs_hyperv_path, dict())
+ args = parse_args()
+ if (not args.action):
+ print ("Error, no options selected...exiting")
+ exit(-1)
+ arg_set = { k for (k,v) in vars(args).items() if v and k != "action" }
+ arg_set.add(args.action)
+ path = args.path if "path" in arg_set else None
+ if (path and path[-1] == "/"):
+ path = path[:-1]
+ validate_args_path(path, arg_set, file_map)
+ if (path and "enable_single" in arg_set):
+ state_path = locate_state(path, file_map)
+ set_test_state(state_path, dev_state.on.value, args.quiet)
+
+ # Use subparsers as the key for different actions
+ if ("delay" in arg_set):
+ validate_delay_values(args.delay_time)
+ if (args.enable_all):
+ set_delay_all_devices(file_map, args.delay_time,
+ args.quiet)
+ else:
+ set_delay_values(path, file_map, args.delay_time,
+ args.quiet)
+ elif ("disable_all" in arg_set or "D" in arg_set):
+ disable_all_testing(file_map)
+ elif ("disable_single" in arg_set or "d" in arg_set):
+ disable_testing_single_device(path, file_map)
+ elif ("view_all" in arg_set or "V" in arg_set):
+ get_all_devices_test_status(file_map)
+ elif ("view_single" in arg_set or "v" in arg_set):
+ get_device_test_values(path, file_map)
+
+# Get the state location
+def locate_state(device, file_map):
+ return file_map[device][f_names.state_f.value]
+
+# Validate delay values to make sure they are acceptable to
+# enable delays on a device
+def validate_delay_values(delay):
+
+ if (delay[0] == -1 and delay[1] == -1):
+ print("\nError, At least 1 value must be greater than 0")
+ exit(-1)
+ for i in delay:
+ if (i < -1 or i == 0 or i > 1000):
+ print("\nError, Values must be equal to -1 "
+ "or be > 0 and <= 1000")
+ exit(-1)
+
+# Validate argument path
+def validate_args_path(path, arg_set, file_map):
+
+ if (not path and any(element in arg_set for element in single_actions)):
+ print("Error, path (-p) REQUIRED for the specified option. "
+ "Use (-h) to check usage.")
+ exit(-1)
+ elif (path and any(item in arg_set for item in all_actions)):
+ print("Error, path (-p) NOT REQUIRED for the specified option. "
+ "Use (-h) to check usage." )
+ exit(-1)
+ elif (path not in file_map and any(item in arg_set
+ for item in single_actions)):
+ print("Error, path '{}' not a valid vmbus device".format(path))
+ exit(-1)
+
+# display Testing status of single device
+def get_device_test_values(path, file_map):
+
+ for name in file_map[path]:
+ file_location = file_map[path][name]
+ print( name + " = " + str(read_test_files(file_location)))
+
+# Create a map of the vmbus devices and their associated files
+# [key=device, value = [key = filename, value = file path]]
+def recursive_file_lookup(path, file_map):
+
+ for f_path in glob.iglob(path + '**/*'):
+ if (os.path.isfile(f_path)):
+ if (f_path.rsplit("/",2)[0] == debugfs_hyperv_path):
+ directory = f_path.rsplit("/",1)[0]
+ else:
+ directory = f_path.rsplit("/",2)[0]
+ f_name = f_path.split("/")[-1]
+ if (file_map.get(directory)):
+ file_map[directory].update({f_name:f_path})
+ else:
+ file_map[directory] = {f_name:f_path}
+ elif (os.path.isdir(f_path)):
+ recursive_file_lookup(f_path,file_map)
+ return file_map
+
+# display Testing state of devices
+def get_all_devices_test_status(file_map):
+
+ for device in file_map:
+ if (get_test_state(locate_state(device, file_map)) is 1):
+ print("Testing = ON for: {}"
+ .format(device.split("/")[5]))
+ else:
+ print("Testing = OFF for: {}"
+ .format(device.split("/")[5]))
+
+# read the vmbus device files, path must be absolute path before calling
+def read_test_files(path):
+ try:
+ with open(path,"r") as f:
+ file_value = f.readline().strip()
+ return int(file_value)
+
+ except IOError as e:
+ errno, strerror = e.args
+ print("I/O error({0}): {1} on file {2}"
+ .format(errno, strerror, path))
+ exit(-1)
+ except ValueError:
+ print ("Element to int conversion error in: \n{}".format(path))
+ exit(-1)
+
+# writing to vmbus device files, path must be absolute path before calling
+def write_test_files(path, value):
+
+ try:
+ with open(path,"w") as f:
+ f.write("{}".format(value))
+ except IOError as e:
+ errno, strerror = e.args
+ print("I/O error({0}): {1} on file {2}"
+ .format(errno, strerror, path))
+ exit(-1)
+
+# set testing state of device
+def set_test_state(state_path, state_value, quiet):
+
+ write_test_files(state_path, state_value)
+ if (get_test_state(state_path) is 1):
+ if (not quiet):
+ print("Testing = ON for device: {}"
+ .format(state_path.split("/")[5]))
+ else:
+ if (not quiet):
+ print("Testing = OFF for device: {}"
+ .format(state_path.split("/")[5]))
+
+# get testing state of device
+def get_test_state(state_path):
+ #state == 1 - test = ON
+ #state == 0 - test = OFF
+ return read_test_files(state_path)
+
+# write 1 - 1000 microseconds, into a single device using the
+# fuzz_test_buffer_interrupt_delay and fuzz_test_message_delay
+# debugfs attributes
+def set_delay_values(device, file_map, delay_length, quiet):
+
+ try:
+ interrupt = file_map[device][f_names.buff_f.value]
+ message = file_map[device][f_names.mess_f.value]
+
+ # delay[0]- buffer interrupt delay, delay[1]- message delay
+ if (delay_length[0] >= 0 and delay_length[0] <= 1000):
+ write_test_files(interrupt, delay_length[0])
+ if (delay_length[1] >= 0 and delay_length[1] <= 1000):
+ write_test_files(message, delay_length[1])
+ if (not quiet):
+ print("Buffer delay testing = {} for: {}"
+ .format(read_test_files(interrupt),
+ interrupt.split("/")[5]))
+ print("Message delay testing = {} for: {}"
+ .format(read_test_files(message),
+ message.split("/")[5]))
+ except IOError as e:
+ errno, strerror = e.args
+ print("I/O error({0}): {1} on files {2}{3}"
+ .format(errno, strerror, interrupt, message))
+ exit(-1)
+
+# enabling delay testing on all devices
+def set_delay_all_devices(file_map, delay, quiet):
+
+ for device in (file_map):
+ set_test_state(locate_state(device, file_map),
+ dev_state.on.value,
+ quiet)
+ set_delay_values(device, file_map, delay, quiet)
+
+# disable all testing on a SINGLE device.
+def disable_testing_single_device(device, file_map):
+
+ for name in file_map[device]:
+ file_location = file_map[device][name]
+ write_test_files(file_location, dev_state.off.value)
+ print("ALL testing now OFF for {}".format(device.split("/")[-1]))
+
+# disable all testing on ALL devices
+def disable_all_testing(file_map):
+
+ for device in file_map:
+ disable_testing_single_device(device, file_map)
+
+def parse_args():
+ parser = argparse.ArgumentParser(prog = "vmbus_testing",usage ="\n"
+ "%(prog)s [delay] [-h] [-e|-E] -t [-p]\n"
+ "%(prog)s [view_all | V] [-h]\n"
+ "%(prog)s [disable_all | D] [-h]\n"
+ "%(prog)s [disable_single | d] [-h|-p]\n"
+ "%(prog)s [view_single | v] [-h|-p]\n"
+ "%(prog)s --version\n",
+ description = "\nUse lsvmbus to get vmbus device type "
+ "information.\n" "\nThe debugfs root path is "
+ "/sys/kernel/debug/hyperv",
+ formatter_class = RawDescriptionHelpFormatter)
+ subparsers = parser.add_subparsers(dest = "action")
+ parser.add_argument("--version", action = "version",
+ version = '%(prog)s 0.1.0')
+ parser.add_argument("-q","--quiet", action = "store_true",
+ help = "silence none important test messages."
+ " This will only work when enabling testing"
+ " on a device.")
+ # Use the path parser to hold the --path attribute so it can
+ # be shared between subparsers. Also do the same for the state
+ # parser, as all testing methods will use --enable_all and
+ # enable_single.
+ path_parser = argparse.ArgumentParser(add_help=False)
+ path_parser.add_argument("-p","--path", metavar = "",
+ help = "Debugfs path to a vmbus device. The path "
+ "must be the absolute path to the device.")
+ state_parser = argparse.ArgumentParser(add_help=False)
+ state_group = state_parser.add_mutually_exclusive_group(required = True)
+ state_group.add_argument("-E", "--enable_all", action = "store_const",
+ const = "enable_all",
+ help = "Enable the specified test type "
+ "on ALL vmbus devices.")
+ state_group.add_argument("-e", "--enable_single",
+ action = "store_const",
+ const = "enable_single",
+ help = "Enable the specified test type on a "
+ "SINGLE vmbus device.")
+ parser_delay = subparsers.add_parser("delay",
+ parents = [state_parser, path_parser],
+ help = "Delay the ring buffer interrupt or the "
+ "ring buffer message reads in microseconds.",
+ prog = "vmbus_testing",
+ usage = "%(prog)s [-h]\n"
+ "%(prog)s -E -t [value] [value]\n"
+ "%(prog)s -e -t [value] [value] -p",
+ description = "Delay the ring buffer interrupt for "
+ "vmbus devices, or delay the ring buffer message "
+ "reads for vmbus devices (both in microseconds). This "
+ "is only on the host to guest channel.")
+ parser_delay.add_argument("-t", "--delay_time", metavar = "", nargs = 2,
+ type = check_range, default =[0,0], required = (True),
+ help = "Set [buffer] & [message] delay time. "
+ "Value constraints: -1 == value "
+ "or 0 < value <= 1000.\n"
+ "Use -1 to keep the previous value for that delay "
+ "type, or a value > 0 <= 1000 to change the delay "
+ "time.")
+ parser_dis_all = subparsers.add_parser("disable_all",
+ aliases = ['D'], prog = "vmbus_testing",
+ usage = "%(prog)s [disable_all | D] -h\n"
+ "%(prog)s [disable_all | D]\n",
+ help = "Disable ALL testing on ALL vmbus devices.",
+ description = "Disable ALL testing on ALL vmbus "
+ "devices.")
+ parser_dis_single = subparsers.add_parser("disable_single",
+ aliases = ['d'],
+ parents = [path_parser], prog = "vmbus_testing",
+ usage = "%(prog)s [disable_single | d] -h\n"
+ "%(prog)s [disable_single | d] -p\n",
+ help = "Disable ALL testing on a SINGLE vmbus device.",
+ description = "Disable ALL testing on a SINGLE vmbus "
+ "device.")
+ parser_view_all = subparsers.add_parser("view_all", aliases = ['V'],
+ help = "View the test state for ALL vmbus devices.",
+ prog = "vmbus_testing",
+ usage = "%(prog)s [view_all | V] -h\n"
+ "%(prog)s [view_all | V]\n",
+ description = "This shows the test state for ALL the "
+ "vmbus devices.")
+ parser_view_single = subparsers.add_parser("view_single",
+ aliases = ['v'],parents = [path_parser],
+ help = "View the test values for a SINGLE vmbus "
+ "device.",
+ description = "This shows the test values for a SINGLE "
+ "vmbus device.", prog = "vmbus_testing",
+ usage = "%(prog)s [view_single | v] -h\n"
+ "%(prog)s [view_single | v] -p")
+
+ return parser.parse_args()
+
+# value checking for range checking input in parser
+def check_range(arg1):
+
+ try:
+ val = int(arg1)
+ except ValueError as err:
+ raise argparse.ArgumentTypeError(str(err))
+ if val < -1 or val > 1000:
+ message = ("\n\nvalue must be -1 or 0 < value <= 1000. "
+ "Value program received: {}\n").format(val)
+ raise argparse.ArgumentTypeError(message)
+ return val
+
+if __name__ == "__main__":
+ main()
diff --git a/tools/iio/Build b/tools/iio/Build
index f74cbda64710..8d0f3af3723f 100644
--- a/tools/iio/Build
+++ b/tools/iio/Build
@@ -1,3 +1,4 @@
+iio_utils-y += iio_utils.o
lsiio-y += lsiio.o iio_utils.o
iio_event_monitor-y += iio_event_monitor.o iio_utils.o
iio_generic_buffer-y += iio_generic_buffer.o iio_utils.o
diff --git a/tools/iio/Makefile b/tools/iio/Makefile
index e22378dba244..3de763d9ab70 100644
--- a/tools/iio/Makefile
+++ b/tools/iio/Makefile
@@ -32,20 +32,24 @@ $(OUTPUT)include/linux/iio: ../../include/uapi/linux/iio
prepare: $(OUTPUT)include/linux/iio
+IIO_UTILS_IN := $(OUTPUT)iio_utils-in.o
+$(IIO_UTILS_IN): prepare FORCE
+ $(Q)$(MAKE) $(build)=iio_utils
+
LSIIO_IN := $(OUTPUT)lsiio-in.o
-$(LSIIO_IN): prepare FORCE
+$(LSIIO_IN): prepare FORCE $(OUTPUT)iio_utils-in.o
$(Q)$(MAKE) $(build)=lsiio
$(OUTPUT)lsiio: $(LSIIO_IN)
$(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) $< -o $@
IIO_EVENT_MONITOR_IN := $(OUTPUT)iio_event_monitor-in.o
-$(IIO_EVENT_MONITOR_IN): prepare FORCE
+$(IIO_EVENT_MONITOR_IN): prepare FORCE $(OUTPUT)iio_utils-in.o
$(Q)$(MAKE) $(build)=iio_event_monitor
$(OUTPUT)iio_event_monitor: $(IIO_EVENT_MONITOR_IN)
$(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) $< -o $@
IIO_GENERIC_BUFFER_IN := $(OUTPUT)iio_generic_buffer-in.o
-$(IIO_GENERIC_BUFFER_IN): prepare FORCE
+$(IIO_GENERIC_BUFFER_IN): prepare FORCE $(OUTPUT)iio_utils-in.o
$(Q)$(MAKE) $(build)=iio_generic_buffer
$(OUTPUT)iio_generic_buffer: $(IIO_GENERIC_BUFFER_IN)
$(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) $< -o $@
diff --git a/tools/pci/pcitest.c b/tools/pci/pcitest.c
index cb1e51fcc84e..32b7c6f9043d 100644
--- a/tools/pci/pcitest.c
+++ b/tools/pci/pcitest.c
@@ -129,6 +129,7 @@ static int run_test(struct pci_test *test)
}
fflush(stdout);
+ close(fd);
return (ret < 0) ? ret : 1 - ret; /* return 0 if test succeeded */
}
diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
index 1783427da9b0..c90f4146e5a2 100644
--- a/tools/perf/Makefile.config
+++ b/tools/perf/Makefile.config
@@ -483,6 +483,16 @@ ifndef NO_LIBELF
ifeq ($(feature-bpf), 1)
CFLAGS += -DHAVE_LIBBPF_SUPPORT
$(call detected,CONFIG_LIBBPF)
+
+ # detecting libbpf without LIBBPF_DYNAMIC, so make VF=1 shows libbpf detection status
+ $(call feature_check,libbpf)
+ ifdef LIBBPF_DYNAMIC
+ ifeq ($(feature-libbpf), 1)
+ EXTLIBS += -lbpf
+ else
+ dummy := $(error Error: No libbpf devel library found, please install libbpf-devel);
+ endif
+ endif
endif
ifndef NO_DWARF
diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
index 1cd294468a1f..eae5d5e95952 100644
--- a/tools/perf/Makefile.perf
+++ b/tools/perf/Makefile.perf
@@ -116,6 +116,8 @@ include ../scripts/utilities.mak
#
# Define TCMALLOC to enable tcmalloc heap profiling.
#
+# Define LIBBPF_DYNAMIC to enable libbpf dynamic linking.
+#
# As per kernel Makefile, avoid funny character set dependencies
unexport LC_ALL
@@ -360,7 +362,9 @@ export PERL_PATH
PERFLIBS = $(LIBAPI) $(LIBTRACEEVENT) $(LIBSUBCMD) $(LIBPERF)
ifndef NO_LIBBPF
- PERFLIBS += $(LIBBPF)
+ ifndef LIBBPF_DYNAMIC
+ PERFLIBS += $(LIBBPF)
+ endif
endif
# We choose to avoid "if .. else if .. else .. endif endif"
diff --git a/tools/perf/arch/arm/tests/dwarf-unwind.c b/tools/perf/arch/arm/tests/dwarf-unwind.c
index 2c35e532bc9a..ccfa87055c4a 100644
--- a/tools/perf/arch/arm/tests/dwarf-unwind.c
+++ b/tools/perf/arch/arm/tests/dwarf-unwind.c
@@ -3,7 +3,7 @@
#include "perf_regs.h"
#include "thread.h"
#include "map.h"
-#include "map_groups.h"
+#include "maps.h"
#include "event.h"
#include "debug.h"
#include "tests/tests.h"
@@ -26,7 +26,7 @@ static int sample_ustack(struct perf_sample *sample,
sp = (unsigned long) regs[PERF_REG_ARM_SP];
- map = map_groups__find(thread->mg, (u64)sp);
+ map = maps__find(thread->maps, (u64)sp);
if (!map) {
pr_debug("failed to get stack map\n");
free(buf);
diff --git a/tools/perf/arch/arm64/tests/dwarf-unwind.c b/tools/perf/arch/arm64/tests/dwarf-unwind.c
index a6a407fa1b8b..46147a483049 100644
--- a/tools/perf/arch/arm64/tests/dwarf-unwind.c
+++ b/tools/perf/arch/arm64/tests/dwarf-unwind.c
@@ -3,7 +3,7 @@
#include "perf_regs.h"
#include "thread.h"
#include "map.h"
-#include "map_groups.h"
+#include "maps.h"
#include "event.h"
#include "debug.h"
#include "tests/tests.h"
@@ -26,7 +26,7 @@ static int sample_ustack(struct perf_sample *sample,
sp = (unsigned long) regs[PERF_REG_ARM64_SP];
- map = map_groups__find(thread->mg, (u64)sp);
+ map = maps__find(thread->maps, (u64)sp);
if (!map) {
pr_debug("failed to get stack map\n");
free(buf);
diff --git a/tools/perf/arch/powerpc/tests/dwarf-unwind.c b/tools/perf/arch/powerpc/tests/dwarf-unwind.c
index 5c178e4a1995..8efd9ed9e9db 100644
--- a/tools/perf/arch/powerpc/tests/dwarf-unwind.c
+++ b/tools/perf/arch/powerpc/tests/dwarf-unwind.c
@@ -3,7 +3,7 @@
#include "perf_regs.h"
#include "thread.h"
#include "map.h"
-#include "map_groups.h"
+#include "maps.h"
#include "event.h"
#include "debug.h"
#include "tests/tests.h"
@@ -27,7 +27,7 @@ static int sample_ustack(struct perf_sample *sample,
sp = (unsigned long) regs[PERF_REG_POWERPC_R1];
- map = map_groups__find(thread->mg, (u64)sp);
+ map = maps__find(thread->maps, (u64)sp);
if (!map) {
pr_debug("failed to get stack map\n");
free(buf);
diff --git a/tools/perf/arch/s390/annotate/instructions.c b/tools/perf/arch/s390/annotate/instructions.c
index 2a6662e42f89..0e136630659e 100644
--- a/tools/perf/arch/s390/annotate/instructions.c
+++ b/tools/perf/arch/s390/annotate/instructions.c
@@ -38,7 +38,7 @@ static int s390_call__parse(struct arch *arch, struct ins_operands *ops,
return -1;
target.addr = map__objdump_2mem(map, ops->target.addr);
- if (map_groups__find_ams(ms->mg, &target) == 0 &&
+ if (maps__find_ams(ms->maps, &target) == 0 &&
map__rip_2objdump(target.ms.map, map->map_ip(target.ms.map, target.addr)) == ops->target.addr)
ops->target.sym = target.ms.sym;
diff --git a/tools/perf/arch/x86/tests/dwarf-unwind.c b/tools/perf/arch/x86/tests/dwarf-unwind.c
index 6ad0a1cedb13..ef43be9b6ec2 100644
--- a/tools/perf/arch/x86/tests/dwarf-unwind.c
+++ b/tools/perf/arch/x86/tests/dwarf-unwind.c
@@ -3,7 +3,7 @@
#include "perf_regs.h"
#include "thread.h"
#include "map.h"
-#include "map_groups.h"
+#include "maps.h"
#include "event.h"
#include "debug.h"
#include "tests/tests.h"
@@ -27,7 +27,7 @@ static int sample_ustack(struct perf_sample *sample,
sp = (unsigned long) regs[PERF_REG_X86_SP];
- map = map_groups__find(thread->mg, (u64)sp);
+ map = maps__find(thread->maps, (u64)sp);
if (!map) {
pr_debug("failed to get stack map\n");
free(buf);
diff --git a/tools/perf/arch/x86/tests/insn-x86-dat-32.c b/tools/perf/arch/x86/tests/insn-x86-dat-32.c
index 58f8f2a095c4..e6461abc9e7b 100644
--- a/tools/perf/arch/x86/tests/insn-x86-dat-32.c
+++ b/tools/perf/arch/x86/tests/insn-x86-dat-32.c
@@ -667,6 +667,86 @@
"62 f2 55 0f 4f f4 \tvrsqrt14ss %xmm4,%xmm5,%xmm6{%k7}",},
{{0x62, 0xf2, 0xd5, 0x0f, 0x4f, 0xf4, }, 6, 0, "", "",
"62 f2 d5 0f 4f f4 \tvrsqrt14sd %xmm4,%xmm5,%xmm6{%k7}",},
+{{0x62, 0xf2, 0x6d, 0x08, 0x50, 0xd9, }, 6, 0, "", "",
+"62 f2 6d 08 50 d9 \tvpdpbusd %xmm1,%xmm2,%xmm3",},
+{{0x62, 0xf2, 0x6d, 0x28, 0x50, 0xd9, }, 6, 0, "", "",
+"62 f2 6d 28 50 d9 \tvpdpbusd %ymm1,%ymm2,%ymm3",},
+{{0x62, 0xf2, 0x6d, 0x48, 0x50, 0xd9, }, 6, 0, "", "",
+"62 f2 6d 48 50 d9 \tvpdpbusd %zmm1,%zmm2,%zmm3",},
+{{0x62, 0xf2, 0x6d, 0x48, 0x50, 0x9c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f2 6d 48 50 9c c8 78 56 34 12 \tvpdpbusd 0x12345678(%eax,%ecx,8),%zmm2,%zmm3",},
+{{0x62, 0xf2, 0x6d, 0x08, 0x51, 0xd9, }, 6, 0, "", "",
+"62 f2 6d 08 51 d9 \tvpdpbusds %xmm1,%xmm2,%xmm3",},
+{{0x62, 0xf2, 0x6d, 0x28, 0x51, 0xd9, }, 6, 0, "", "",
+"62 f2 6d 28 51 d9 \tvpdpbusds %ymm1,%ymm2,%ymm3",},
+{{0x62, 0xf2, 0x6d, 0x48, 0x51, 0xd9, }, 6, 0, "", "",
+"62 f2 6d 48 51 d9 \tvpdpbusds %zmm1,%zmm2,%zmm3",},
+{{0x62, 0xf2, 0x6d, 0x48, 0x51, 0x9c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f2 6d 48 51 9c c8 78 56 34 12 \tvpdpbusds 0x12345678(%eax,%ecx,8),%zmm2,%zmm3",},
+{{0x62, 0xf2, 0x6e, 0x08, 0x52, 0xd9, }, 6, 0, "", "",
+"62 f2 6e 08 52 d9 \tvdpbf16ps %xmm1,%xmm2,%xmm3",},
+{{0x62, 0xf2, 0x6e, 0x28, 0x52, 0xd9, }, 6, 0, "", "",
+"62 f2 6e 28 52 d9 \tvdpbf16ps %ymm1,%ymm2,%ymm3",},
+{{0x62, 0xf2, 0x6e, 0x48, 0x52, 0xd9, }, 6, 0, "", "",
+"62 f2 6e 48 52 d9 \tvdpbf16ps %zmm1,%zmm2,%zmm3",},
+{{0x62, 0xf2, 0x6e, 0x48, 0x52, 0x9c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f2 6e 48 52 9c c8 78 56 34 12 \tvdpbf16ps 0x12345678(%eax,%ecx,8),%zmm2,%zmm3",},
+{{0x62, 0xf2, 0x6d, 0x08, 0x52, 0xd9, }, 6, 0, "", "",
+"62 f2 6d 08 52 d9 \tvpdpwssd %xmm1,%xmm2,%xmm3",},
+{{0x62, 0xf2, 0x6d, 0x28, 0x52, 0xd9, }, 6, 0, "", "",
+"62 f2 6d 28 52 d9 \tvpdpwssd %ymm1,%ymm2,%ymm3",},
+{{0x62, 0xf2, 0x6d, 0x48, 0x52, 0xd9, }, 6, 0, "", "",
+"62 f2 6d 48 52 d9 \tvpdpwssd %zmm1,%zmm2,%zmm3",},
+{{0x62, 0xf2, 0x6d, 0x48, 0x52, 0x9c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f2 6d 48 52 9c c8 78 56 34 12 \tvpdpwssd 0x12345678(%eax,%ecx,8),%zmm2,%zmm3",},
+{{0x62, 0xf2, 0x7f, 0x48, 0x52, 0x20, }, 6, 0, "", "",
+"62 f2 7f 48 52 20 \tvp4dpwssd (%eax),%zmm0,%zmm4",},
+{{0x62, 0xf2, 0x7f, 0x48, 0x52, 0xa4, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f2 7f 48 52 a4 c8 78 56 34 12 \tvp4dpwssd 0x12345678(%eax,%ecx,8),%zmm0,%zmm4",},
+{{0x62, 0xf2, 0x6d, 0x08, 0x53, 0xd9, }, 6, 0, "", "",
+"62 f2 6d 08 53 d9 \tvpdpwssds %xmm1,%xmm2,%xmm3",},
+{{0x62, 0xf2, 0x6d, 0x28, 0x53, 0xd9, }, 6, 0, "", "",
+"62 f2 6d 28 53 d9 \tvpdpwssds %ymm1,%ymm2,%ymm3",},
+{{0x62, 0xf2, 0x6d, 0x48, 0x53, 0xd9, }, 6, 0, "", "",
+"62 f2 6d 48 53 d9 \tvpdpwssds %zmm1,%zmm2,%zmm3",},
+{{0x62, 0xf2, 0x6d, 0x48, 0x53, 0x9c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f2 6d 48 53 9c c8 78 56 34 12 \tvpdpwssds 0x12345678(%eax,%ecx,8),%zmm2,%zmm3",},
+{{0x62, 0xf2, 0x7f, 0x48, 0x53, 0x20, }, 6, 0, "", "",
+"62 f2 7f 48 53 20 \tvp4dpwssds (%eax),%zmm0,%zmm4",},
+{{0x62, 0xf2, 0x7f, 0x48, 0x53, 0xa4, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f2 7f 48 53 a4 c8 78 56 34 12 \tvp4dpwssds 0x12345678(%eax,%ecx,8),%zmm0,%zmm4",},
+{{0x62, 0xf2, 0x7d, 0x08, 0x54, 0xd1, }, 6, 0, "", "",
+"62 f2 7d 08 54 d1 \tvpopcntb %xmm1,%xmm2",},
+{{0x62, 0xf2, 0x7d, 0x28, 0x54, 0xd1, }, 6, 0, "", "",
+"62 f2 7d 28 54 d1 \tvpopcntb %ymm1,%ymm2",},
+{{0x62, 0xf2, 0x7d, 0x48, 0x54, 0xd1, }, 6, 0, "", "",
+"62 f2 7d 48 54 d1 \tvpopcntb %zmm1,%zmm2",},
+{{0x62, 0xf2, 0x7d, 0x48, 0x54, 0x94, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f2 7d 48 54 94 c8 78 56 34 12 \tvpopcntb 0x12345678(%eax,%ecx,8),%zmm2",},
+{{0x62, 0xf2, 0xfd, 0x08, 0x54, 0xd1, }, 6, 0, "", "",
+"62 f2 fd 08 54 d1 \tvpopcntw %xmm1,%xmm2",},
+{{0x62, 0xf2, 0xfd, 0x28, 0x54, 0xd1, }, 6, 0, "", "",
+"62 f2 fd 28 54 d1 \tvpopcntw %ymm1,%ymm2",},
+{{0x62, 0xf2, 0xfd, 0x48, 0x54, 0xd1, }, 6, 0, "", "",
+"62 f2 fd 48 54 d1 \tvpopcntw %zmm1,%zmm2",},
+{{0x62, 0xf2, 0xfd, 0x48, 0x54, 0x94, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f2 fd 48 54 94 c8 78 56 34 12 \tvpopcntw 0x12345678(%eax,%ecx,8),%zmm2",},
+{{0x62, 0xf2, 0x7d, 0x08, 0x55, 0xd1, }, 6, 0, "", "",
+"62 f2 7d 08 55 d1 \tvpopcntd %xmm1,%xmm2",},
+{{0x62, 0xf2, 0x7d, 0x28, 0x55, 0xd1, }, 6, 0, "", "",
+"62 f2 7d 28 55 d1 \tvpopcntd %ymm1,%ymm2",},
+{{0x62, 0xf2, 0x7d, 0x48, 0x55, 0xd1, }, 6, 0, "", "",
+"62 f2 7d 48 55 d1 \tvpopcntd %zmm1,%zmm2",},
+{{0x62, 0xf2, 0x7d, 0x48, 0x55, 0x94, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f2 7d 48 55 94 c8 78 56 34 12 \tvpopcntd 0x12345678(%eax,%ecx,8),%zmm2",},
+{{0x62, 0xf2, 0xfd, 0x08, 0x55, 0xd1, }, 6, 0, "", "",
+"62 f2 fd 08 55 d1 \tvpopcntq %xmm1,%xmm2",},
+{{0x62, 0xf2, 0xfd, 0x28, 0x55, 0xd1, }, 6, 0, "", "",
+"62 f2 fd 28 55 d1 \tvpopcntq %ymm1,%ymm2",},
+{{0x62, 0xf2, 0xfd, 0x48, 0x55, 0xd1, }, 6, 0, "", "",
+"62 f2 fd 48 55 d1 \tvpopcntq %zmm1,%zmm2",},
+{{0x62, 0xf2, 0xfd, 0x48, 0x55, 0x94, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f2 fd 48 55 94 c8 78 56 34 12 \tvpopcntq 0x12345678(%eax,%ecx,8),%zmm2",},
{{0xc4, 0xe2, 0x79, 0x59, 0xf4, }, 5, 0, "", "",
"c4 e2 79 59 f4 \tvpbroadcastq %xmm4,%xmm6",},
{{0x62, 0xf2, 0x7d, 0x48, 0x59, 0xf7, }, 6, 0, "", "",
@@ -681,6 +761,38 @@
"62 f2 7d 48 5b 31 \tvbroadcasti32x8 (%ecx),%zmm6",},
{{0x62, 0xf2, 0xfd, 0x48, 0x5b, 0x31, }, 6, 0, "", "",
"62 f2 fd 48 5b 31 \tvbroadcasti64x4 (%ecx),%zmm6",},
+{{0x62, 0xf2, 0x7d, 0x08, 0x62, 0xd1, }, 6, 0, "", "",
+"62 f2 7d 08 62 d1 \tvpexpandb %xmm1,%xmm2",},
+{{0x62, 0xf2, 0x7d, 0x28, 0x62, 0xd1, }, 6, 0, "", "",
+"62 f2 7d 28 62 d1 \tvpexpandb %ymm1,%ymm2",},
+{{0x62, 0xf2, 0x7d, 0x48, 0x62, 0xd1, }, 6, 0, "", "",
+"62 f2 7d 48 62 d1 \tvpexpandb %zmm1,%zmm2",},
+{{0x62, 0xf2, 0x7d, 0x48, 0x62, 0x94, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f2 7d 48 62 94 c8 78 56 34 12 \tvpexpandb 0x12345678(%eax,%ecx,8),%zmm2",},
+{{0x62, 0xf2, 0xfd, 0x08, 0x62, 0xd1, }, 6, 0, "", "",
+"62 f2 fd 08 62 d1 \tvpexpandw %xmm1,%xmm2",},
+{{0x62, 0xf2, 0xfd, 0x28, 0x62, 0xd1, }, 6, 0, "", "",
+"62 f2 fd 28 62 d1 \tvpexpandw %ymm1,%ymm2",},
+{{0x62, 0xf2, 0xfd, 0x48, 0x62, 0xd1, }, 6, 0, "", "",
+"62 f2 fd 48 62 d1 \tvpexpandw %zmm1,%zmm2",},
+{{0x62, 0xf2, 0xfd, 0x48, 0x62, 0x94, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f2 fd 48 62 94 c8 78 56 34 12 \tvpexpandw 0x12345678(%eax,%ecx,8),%zmm2",},
+{{0x62, 0xf2, 0x7d, 0x08, 0x63, 0xca, }, 6, 0, "", "",
+"62 f2 7d 08 63 ca \tvpcompressb %xmm1,%xmm2",},
+{{0x62, 0xf2, 0x7d, 0x28, 0x63, 0xca, }, 6, 0, "", "",
+"62 f2 7d 28 63 ca \tvpcompressb %ymm1,%ymm2",},
+{{0x62, 0xf2, 0x7d, 0x48, 0x63, 0xca, }, 6, 0, "", "",
+"62 f2 7d 48 63 ca \tvpcompressb %zmm1,%zmm2",},
+{{0x62, 0xf2, 0x7d, 0x48, 0x63, 0x94, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f2 7d 48 63 94 c8 78 56 34 12 \tvpcompressb %zmm2,0x12345678(%eax,%ecx,8)",},
+{{0x62, 0xf2, 0xfd, 0x08, 0x63, 0xca, }, 6, 0, "", "",
+"62 f2 fd 08 63 ca \tvpcompressw %xmm1,%xmm2",},
+{{0x62, 0xf2, 0xfd, 0x28, 0x63, 0xca, }, 6, 0, "", "",
+"62 f2 fd 28 63 ca \tvpcompressw %ymm1,%ymm2",},
+{{0x62, 0xf2, 0xfd, 0x48, 0x63, 0xca, }, 6, 0, "", "",
+"62 f2 fd 48 63 ca \tvpcompressw %zmm1,%zmm2",},
+{{0x62, 0xf2, 0xfd, 0x48, 0x63, 0x94, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f2 fd 48 63 94 c8 78 56 34 12 \tvpcompressw %zmm2,0x12345678(%eax,%ecx,8)",},
{{0x62, 0xf2, 0x55, 0x48, 0x64, 0xf4, }, 6, 0, "", "",
"62 f2 55 48 64 f4 \tvpblendmd %zmm4,%zmm5,%zmm6",},
{{0x62, 0xf2, 0xd5, 0x48, 0x64, 0xf4, }, 6, 0, "", "",
@@ -693,6 +805,86 @@
"62 f2 55 48 66 f4 \tvpblendmb %zmm4,%zmm5,%zmm6",},
{{0x62, 0xf2, 0xd5, 0x48, 0x66, 0xf4, }, 6, 0, "", "",
"62 f2 d5 48 66 f4 \tvpblendmw %zmm4,%zmm5,%zmm6",},
+{{0x62, 0xf2, 0x6f, 0x08, 0x68, 0xd9, }, 6, 0, "", "",
+"62 f2 6f 08 68 d9 \tvp2intersectd %xmm1,%xmm2,%k3",},
+{{0x62, 0xf2, 0x6f, 0x28, 0x68, 0xd9, }, 6, 0, "", "",
+"62 f2 6f 28 68 d9 \tvp2intersectd %ymm1,%ymm2,%k3",},
+{{0x62, 0xf2, 0x6f, 0x48, 0x68, 0xd9, }, 6, 0, "", "",
+"62 f2 6f 48 68 d9 \tvp2intersectd %zmm1,%zmm2,%k3",},
+{{0x62, 0xf2, 0x6f, 0x48, 0x68, 0x9c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f2 6f 48 68 9c c8 78 56 34 12 \tvp2intersectd 0x12345678(%eax,%ecx,8),%zmm2,%k3",},
+{{0x62, 0xf2, 0xef, 0x08, 0x68, 0xd9, }, 6, 0, "", "",
+"62 f2 ef 08 68 d9 \tvp2intersectq %xmm1,%xmm2,%k3",},
+{{0x62, 0xf2, 0xef, 0x28, 0x68, 0xd9, }, 6, 0, "", "",
+"62 f2 ef 28 68 d9 \tvp2intersectq %ymm1,%ymm2,%k3",},
+{{0x62, 0xf2, 0xef, 0x48, 0x68, 0xd9, }, 6, 0, "", "",
+"62 f2 ef 48 68 d9 \tvp2intersectq %zmm1,%zmm2,%k3",},
+{{0x62, 0xf2, 0xef, 0x48, 0x68, 0x9c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f2 ef 48 68 9c c8 78 56 34 12 \tvp2intersectq 0x12345678(%eax,%ecx,8),%zmm2,%k3",},
+{{0x62, 0xf2, 0xed, 0x08, 0x70, 0xd9, }, 6, 0, "", "",
+"62 f2 ed 08 70 d9 \tvpshldvw %xmm1,%xmm2,%xmm3",},
+{{0x62, 0xf2, 0xed, 0x28, 0x70, 0xd9, }, 6, 0, "", "",
+"62 f2 ed 28 70 d9 \tvpshldvw %ymm1,%ymm2,%ymm3",},
+{{0x62, 0xf2, 0xed, 0x48, 0x70, 0xd9, }, 6, 0, "", "",
+"62 f2 ed 48 70 d9 \tvpshldvw %zmm1,%zmm2,%zmm3",},
+{{0x62, 0xf2, 0xed, 0x48, 0x70, 0x9c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f2 ed 48 70 9c c8 78 56 34 12 \tvpshldvw 0x12345678(%eax,%ecx,8),%zmm2,%zmm3",},
+{{0x62, 0xf2, 0x6d, 0x08, 0x71, 0xd9, }, 6, 0, "", "",
+"62 f2 6d 08 71 d9 \tvpshldvd %xmm1,%xmm2,%xmm3",},
+{{0x62, 0xf2, 0x6d, 0x28, 0x71, 0xd9, }, 6, 0, "", "",
+"62 f2 6d 28 71 d9 \tvpshldvd %ymm1,%ymm2,%ymm3",},
+{{0x62, 0xf2, 0x6d, 0x48, 0x71, 0xd9, }, 6, 0, "", "",
+"62 f2 6d 48 71 d9 \tvpshldvd %zmm1,%zmm2,%zmm3",},
+{{0x62, 0xf2, 0x6d, 0x48, 0x71, 0x9c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f2 6d 48 71 9c c8 78 56 34 12 \tvpshldvd 0x12345678(%eax,%ecx,8),%zmm2,%zmm3",},
+{{0x62, 0xf2, 0xed, 0x08, 0x71, 0xd9, }, 6, 0, "", "",
+"62 f2 ed 08 71 d9 \tvpshldvq %xmm1,%xmm2,%xmm3",},
+{{0x62, 0xf2, 0xed, 0x28, 0x71, 0xd9, }, 6, 0, "", "",
+"62 f2 ed 28 71 d9 \tvpshldvq %ymm1,%ymm2,%ymm3",},
+{{0x62, 0xf2, 0xed, 0x48, 0x71, 0xd9, }, 6, 0, "", "",
+"62 f2 ed 48 71 d9 \tvpshldvq %zmm1,%zmm2,%zmm3",},
+{{0x62, 0xf2, 0xed, 0x48, 0x71, 0x9c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f2 ed 48 71 9c c8 78 56 34 12 \tvpshldvq 0x12345678(%eax,%ecx,8),%zmm2,%zmm3",},
+{{0x62, 0xf2, 0x6f, 0x08, 0x72, 0xd9, }, 6, 0, "", "",
+"62 f2 6f 08 72 d9 \tvcvtne2ps2bf16 %xmm1,%xmm2,%xmm3",},
+{{0x62, 0xf2, 0x6f, 0x28, 0x72, 0xd9, }, 6, 0, "", "",
+"62 f2 6f 28 72 d9 \tvcvtne2ps2bf16 %ymm1,%ymm2,%ymm3",},
+{{0x62, 0xf2, 0x6f, 0x48, 0x72, 0xd9, }, 6, 0, "", "",
+"62 f2 6f 48 72 d9 \tvcvtne2ps2bf16 %zmm1,%zmm2,%zmm3",},
+{{0x62, 0xf2, 0x6f, 0x48, 0x72, 0x9c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f2 6f 48 72 9c c8 78 56 34 12 \tvcvtne2ps2bf16 0x12345678(%eax,%ecx,8),%zmm2,%zmm3",},
+{{0x62, 0xf2, 0x7e, 0x08, 0x72, 0xd1, }, 6, 0, "", "",
+"62 f2 7e 08 72 d1 \tvcvtneps2bf16 %xmm1,%xmm2",},
+{{0x62, 0xf2, 0x7e, 0x28, 0x72, 0xd1, }, 6, 0, "", "",
+"62 f2 7e 28 72 d1 \tvcvtneps2bf16 %ymm1,%xmm2",},
+{{0x62, 0xf2, 0x7e, 0x48, 0x72, 0xd1, }, 6, 0, "", "",
+"62 f2 7e 48 72 d1 \tvcvtneps2bf16 %zmm1,%ymm2",},
+{{0x62, 0xf2, 0x7e, 0x48, 0x72, 0x94, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f2 7e 48 72 94 c8 78 56 34 12 \tvcvtneps2bf16 0x12345678(%eax,%ecx,8),%ymm2",},
+{{0x62, 0xf2, 0xed, 0x08, 0x72, 0xd9, }, 6, 0, "", "",
+"62 f2 ed 08 72 d9 \tvpshrdvw %xmm1,%xmm2,%xmm3",},
+{{0x62, 0xf2, 0xed, 0x28, 0x72, 0xd9, }, 6, 0, "", "",
+"62 f2 ed 28 72 d9 \tvpshrdvw %ymm1,%ymm2,%ymm3",},
+{{0x62, 0xf2, 0xed, 0x48, 0x72, 0xd9, }, 6, 0, "", "",
+"62 f2 ed 48 72 d9 \tvpshrdvw %zmm1,%zmm2,%zmm3",},
+{{0x62, 0xf2, 0xed, 0x48, 0x72, 0x9c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f2 ed 48 72 9c c8 78 56 34 12 \tvpshrdvw 0x12345678(%eax,%ecx,8),%zmm2,%zmm3",},
+{{0x62, 0xf2, 0x6d, 0x08, 0x73, 0xd9, }, 6, 0, "", "",
+"62 f2 6d 08 73 d9 \tvpshrdvd %xmm1,%xmm2,%xmm3",},
+{{0x62, 0xf2, 0x6d, 0x28, 0x73, 0xd9, }, 6, 0, "", "",
+"62 f2 6d 28 73 d9 \tvpshrdvd %ymm1,%ymm2,%ymm3",},
+{{0x62, 0xf2, 0x6d, 0x48, 0x73, 0xd9, }, 6, 0, "", "",
+"62 f2 6d 48 73 d9 \tvpshrdvd %zmm1,%zmm2,%zmm3",},
+{{0x62, 0xf2, 0x6d, 0x48, 0x73, 0x9c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f2 6d 48 73 9c c8 78 56 34 12 \tvpshrdvd 0x12345678(%eax,%ecx,8),%zmm2,%zmm3",},
+{{0x62, 0xf2, 0xed, 0x08, 0x73, 0xd9, }, 6, 0, "", "",
+"62 f2 ed 08 73 d9 \tvpshrdvq %xmm1,%xmm2,%xmm3",},
+{{0x62, 0xf2, 0xed, 0x28, 0x73, 0xd9, }, 6, 0, "", "",
+"62 f2 ed 28 73 d9 \tvpshrdvq %ymm1,%ymm2,%ymm3",},
+{{0x62, 0xf2, 0xed, 0x48, 0x73, 0xd9, }, 6, 0, "", "",
+"62 f2 ed 48 73 d9 \tvpshrdvq %zmm1,%zmm2,%zmm3",},
+{{0x62, 0xf2, 0xed, 0x48, 0x73, 0x9c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f2 ed 48 73 9c c8 78 56 34 12 \tvpshrdvq 0x12345678(%eax,%ecx,8),%zmm2,%zmm3",},
{{0x62, 0xf2, 0x55, 0x48, 0x75, 0xf4, }, 6, 0, "", "",
"62 f2 55 48 75 f4 \tvpermi2b %zmm4,%zmm5,%zmm6",},
{{0x62, 0xf2, 0xd5, 0x48, 0x75, 0xf4, }, 6, 0, "", "",
@@ -745,6 +937,14 @@
"62 f2 55 48 8d f4 \tvpermb %zmm4,%zmm5,%zmm6",},
{{0x62, 0xf2, 0xd5, 0x48, 0x8d, 0xf4, }, 6, 0, "", "",
"62 f2 d5 48 8d f4 \tvpermw %zmm4,%zmm5,%zmm6",},
+{{0x62, 0xf2, 0x6d, 0x08, 0x8f, 0xd9, }, 6, 0, "", "",
+"62 f2 6d 08 8f d9 \tvpshufbitqmb %xmm1,%xmm2,%k3",},
+{{0x62, 0xf2, 0x6d, 0x28, 0x8f, 0xd9, }, 6, 0, "", "",
+"62 f2 6d 28 8f d9 \tvpshufbitqmb %ymm1,%ymm2,%k3",},
+{{0x62, 0xf2, 0x6d, 0x48, 0x8f, 0xd9, }, 6, 0, "", "",
+"62 f2 6d 48 8f d9 \tvpshufbitqmb %zmm1,%zmm2,%k3",},
+{{0x62, 0xf2, 0x6d, 0x48, 0x8f, 0x9c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f2 6d 48 8f 9c c8 78 56 34 12 \tvpshufbitqmb 0x12345678(%eax,%ecx,8),%zmm2,%k3",},
{{0xc4, 0xe2, 0x69, 0x90, 0x4c, 0x7d, 0x02, }, 7, 0, "", "",
"c4 e2 69 90 4c 7d 02 \tvpgatherdd %xmm2,0x2(%ebp,%xmm7,2),%xmm1",},
{{0xc4, 0xe2, 0xe9, 0x90, 0x4c, 0x7d, 0x04, }, 7, 0, "", "",
@@ -761,6 +961,38 @@
"62 f2 7d 49 91 b4 fd 7b 00 00 00 \tvpgatherqd 0x7b(%ebp,%zmm7,8),%ymm6{%k1}",},
{{0x62, 0xf2, 0xfd, 0x49, 0x91, 0xb4, 0xfd, 0x7b, 0x00, 0x00, 0x00, }, 11, 0, "", "",
"62 f2 fd 49 91 b4 fd 7b 00 00 00 \tvpgatherqq 0x7b(%ebp,%zmm7,8),%zmm6{%k1}",},
+{{0xc4, 0xe2, 0x69, 0x9a, 0xd9, }, 5, 0, "", "",
+"c4 e2 69 9a d9 \tvfmsub132ps %xmm1,%xmm2,%xmm3",},
+{{0xc4, 0xe2, 0x6d, 0x9a, 0xd9, }, 5, 0, "", "",
+"c4 e2 6d 9a d9 \tvfmsub132ps %ymm1,%ymm2,%ymm3",},
+{{0x62, 0xf2, 0x6d, 0x48, 0x9a, 0xd9, }, 6, 0, "", "",
+"62 f2 6d 48 9a d9 \tvfmsub132ps %zmm1,%zmm2,%zmm3",},
+{{0x62, 0xf2, 0x6d, 0x48, 0x9a, 0x9c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f2 6d 48 9a 9c c8 78 56 34 12 \tvfmsub132ps 0x12345678(%eax,%ecx,8),%zmm2,%zmm3",},
+{{0xc4, 0xe2, 0xe9, 0x9a, 0xd9, }, 5, 0, "", "",
+"c4 e2 e9 9a d9 \tvfmsub132pd %xmm1,%xmm2,%xmm3",},
+{{0xc4, 0xe2, 0xed, 0x9a, 0xd9, }, 5, 0, "", "",
+"c4 e2 ed 9a d9 \tvfmsub132pd %ymm1,%ymm2,%ymm3",},
+{{0x62, 0xf2, 0xed, 0x48, 0x9a, 0xd9, }, 6, 0, "", "",
+"62 f2 ed 48 9a d9 \tvfmsub132pd %zmm1,%zmm2,%zmm3",},
+{{0x62, 0xf2, 0xed, 0x48, 0x9a, 0x9c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f2 ed 48 9a 9c c8 78 56 34 12 \tvfmsub132pd 0x12345678(%eax,%ecx,8),%zmm2,%zmm3",},
+{{0x62, 0xf2, 0x7f, 0x48, 0x9a, 0x20, }, 6, 0, "", "",
+"62 f2 7f 48 9a 20 \tv4fmaddps (%eax),%zmm0,%zmm4",},
+{{0x62, 0xf2, 0x7f, 0x48, 0x9a, 0xa4, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f2 7f 48 9a a4 c8 78 56 34 12 \tv4fmaddps 0x12345678(%eax,%ecx,8),%zmm0,%zmm4",},
+{{0xc4, 0xe2, 0x69, 0x9b, 0xd9, }, 5, 0, "", "",
+"c4 e2 69 9b d9 \tvfmsub132ss %xmm1,%xmm2,%xmm3",},
+{{0xc4, 0xe2, 0x69, 0x9b, 0x9c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 10, 0, "", "",
+"c4 e2 69 9b 9c c8 78 56 34 12 \tvfmsub132ss 0x12345678(%eax,%ecx,8),%xmm2,%xmm3",},
+{{0xc4, 0xe2, 0xe9, 0x9b, 0xd9, }, 5, 0, "", "",
+"c4 e2 e9 9b d9 \tvfmsub132sd %xmm1,%xmm2,%xmm3",},
+{{0xc4, 0xe2, 0xe9, 0x9b, 0x9c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 10, 0, "", "",
+"c4 e2 e9 9b 9c c8 78 56 34 12 \tvfmsub132sd 0x12345678(%eax,%ecx,8),%xmm2,%xmm3",},
+{{0x62, 0xf2, 0x7f, 0x08, 0x9b, 0x20, }, 6, 0, "", "",
+"62 f2 7f 08 9b 20 \tv4fmaddss (%eax),%xmm0,%xmm4",},
+{{0x62, 0xf2, 0x7f, 0x08, 0x9b, 0xa4, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f2 7f 08 9b a4 c8 78 56 34 12 \tv4fmaddss 0x12345678(%eax,%ecx,8),%xmm0,%xmm4",},
{{0x62, 0xf2, 0x7d, 0x49, 0xa0, 0xb4, 0xfd, 0x7b, 0x00, 0x00, 0x00, }, 11, 0, "", "",
"62 f2 7d 49 a0 b4 fd 7b 00 00 00 \tvpscatterdd %zmm6,0x7b(%ebp,%zmm7,8){%k1}",},
{{0x62, 0xf2, 0xfd, 0x49, 0xa0, 0xb4, 0xfd, 0x7b, 0x00, 0x00, 0x00, }, 11, 0, "", "",
@@ -777,6 +1009,38 @@
"62 f2 7d 49 a3 b4 fd 7b 00 00 00 \tvscatterqps %ymm6,0x7b(%ebp,%zmm7,8){%k1}",},
{{0x62, 0xf2, 0xfd, 0x49, 0xa3, 0xb4, 0xfd, 0x7b, 0x00, 0x00, 0x00, }, 11, 0, "", "",
"62 f2 fd 49 a3 b4 fd 7b 00 00 00 \tvscatterqpd %zmm6,0x7b(%ebp,%zmm7,8){%k1}",},
+{{0xc4, 0xe2, 0x69, 0xaa, 0xd9, }, 5, 0, "", "",
+"c4 e2 69 aa d9 \tvfmsub213ps %xmm1,%xmm2,%xmm3",},
+{{0xc4, 0xe2, 0x6d, 0xaa, 0xd9, }, 5, 0, "", "",
+"c4 e2 6d aa d9 \tvfmsub213ps %ymm1,%ymm2,%ymm3",},
+{{0x62, 0xf2, 0x6d, 0x48, 0xaa, 0xd9, }, 6, 0, "", "",
+"62 f2 6d 48 aa d9 \tvfmsub213ps %zmm1,%zmm2,%zmm3",},
+{{0x62, 0xf2, 0x6d, 0x48, 0xaa, 0x9c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f2 6d 48 aa 9c c8 78 56 34 12 \tvfmsub213ps 0x12345678(%eax,%ecx,8),%zmm2,%zmm3",},
+{{0xc4, 0xe2, 0xe9, 0xaa, 0xd9, }, 5, 0, "", "",
+"c4 e2 e9 aa d9 \tvfmsub213pd %xmm1,%xmm2,%xmm3",},
+{{0xc4, 0xe2, 0xed, 0xaa, 0xd9, }, 5, 0, "", "",
+"c4 e2 ed aa d9 \tvfmsub213pd %ymm1,%ymm2,%ymm3",},
+{{0x62, 0xf2, 0xed, 0x48, 0xaa, 0xd9, }, 6, 0, "", "",
+"62 f2 ed 48 aa d9 \tvfmsub213pd %zmm1,%zmm2,%zmm3",},
+{{0x62, 0xf2, 0xed, 0x48, 0xaa, 0x9c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f2 ed 48 aa 9c c8 78 56 34 12 \tvfmsub213pd 0x12345678(%eax,%ecx,8),%zmm2,%zmm3",},
+{{0x62, 0xf2, 0x7f, 0x48, 0xaa, 0x20, }, 6, 0, "", "",
+"62 f2 7f 48 aa 20 \tv4fnmaddps (%eax),%zmm0,%zmm4",},
+{{0x62, 0xf2, 0x7f, 0x48, 0xaa, 0xa4, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f2 7f 48 aa a4 c8 78 56 34 12 \tv4fnmaddps 0x12345678(%eax,%ecx,8),%zmm0,%zmm4",},
+{{0xc4, 0xe2, 0x69, 0xab, 0xd9, }, 5, 0, "", "",
+"c4 e2 69 ab d9 \tvfmsub213ss %xmm1,%xmm2,%xmm3",},
+{{0xc4, 0xe2, 0x69, 0xab, 0x9c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 10, 0, "", "",
+"c4 e2 69 ab 9c c8 78 56 34 12 \tvfmsub213ss 0x12345678(%eax,%ecx,8),%xmm2,%xmm3",},
+{{0xc4, 0xe2, 0xe9, 0xab, 0xd9, }, 5, 0, "", "",
+"c4 e2 e9 ab d9 \tvfmsub213sd %xmm1,%xmm2,%xmm3",},
+{{0xc4, 0xe2, 0xe9, 0xab, 0x9c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 10, 0, "", "",
+"c4 e2 e9 ab 9c c8 78 56 34 12 \tvfmsub213sd 0x12345678(%eax,%ecx,8),%xmm2,%xmm3",},
+{{0x62, 0xf2, 0x7f, 0x08, 0xab, 0x20, }, 6, 0, "", "",
+"62 f2 7f 08 ab 20 \tv4fnmaddss (%eax),%xmm0,%xmm4",},
+{{0x62, 0xf2, 0x7f, 0x08, 0xab, 0xa4, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f2 7f 08 ab a4 c8 78 56 34 12 \tv4fnmaddss 0x12345678(%eax,%ecx,8),%xmm0,%xmm4",},
{{0x62, 0xf2, 0xd5, 0x48, 0xb4, 0xf4, }, 6, 0, "", "",
"62 f2 d5 48 b4 f4 \tvpmadd52luq %zmm4,%zmm5,%zmm6",},
{{0x62, 0xf2, 0xd5, 0x48, 0xb5, 0xf4, }, 6, 0, "", "",
@@ -805,6 +1069,50 @@
"62 f2 4d 0f cd fd \tvrsqrt28ss %xmm5,%xmm6,%xmm7{%k7}",},
{{0x62, 0xf2, 0xcd, 0x0f, 0xcd, 0xfd, }, 6, 0, "", "",
"62 f2 cd 0f cd fd \tvrsqrt28sd %xmm5,%xmm6,%xmm7{%k7}",},
+{{0x66, 0x0f, 0x38, 0xcf, 0xd9, }, 5, 0, "", "",
+"66 0f 38 cf d9 \tgf2p8mulb %xmm1,%xmm3",},
+{{0x66, 0x0f, 0x38, 0xcf, 0x9c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 10, 0, "", "",
+"66 0f 38 cf 9c c8 78 56 34 12 \tgf2p8mulb 0x12345678(%eax,%ecx,8),%xmm3",},
+{{0xc4, 0xe2, 0x69, 0xcf, 0xd9, }, 5, 0, "", "",
+"c4 e2 69 cf d9 \tvgf2p8mulb %xmm1,%xmm2,%xmm3",},
+{{0xc4, 0xe2, 0x6d, 0xcf, 0xd9, }, 5, 0, "", "",
+"c4 e2 6d cf d9 \tvgf2p8mulb %ymm1,%ymm2,%ymm3",},
+{{0x62, 0xf2, 0x6d, 0x48, 0xcf, 0xd9, }, 6, 0, "", "",
+"62 f2 6d 48 cf d9 \tvgf2p8mulb %zmm1,%zmm2,%zmm3",},
+{{0x62, 0xf2, 0x6d, 0x48, 0xcf, 0x9c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f2 6d 48 cf 9c c8 78 56 34 12 \tvgf2p8mulb 0x12345678(%eax,%ecx,8),%zmm2,%zmm3",},
+{{0xc4, 0xe2, 0x69, 0xdc, 0xd9, }, 5, 0, "", "",
+"c4 e2 69 dc d9 \tvaesenc %xmm1,%xmm2,%xmm3",},
+{{0xc4, 0xe2, 0x6d, 0xdc, 0xd9, }, 5, 0, "", "",
+"c4 e2 6d dc d9 \tvaesenc %ymm1,%ymm2,%ymm3",},
+{{0x62, 0xf2, 0x6d, 0x48, 0xdc, 0xd9, }, 6, 0, "", "",
+"62 f2 6d 48 dc d9 \tvaesenc %zmm1,%zmm2,%zmm3",},
+{{0x62, 0xf2, 0x6d, 0x48, 0xdc, 0x9c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f2 6d 48 dc 9c c8 78 56 34 12 \tvaesenc 0x12345678(%eax,%ecx,8),%zmm2,%zmm3",},
+{{0xc4, 0xe2, 0x69, 0xdd, 0xd9, }, 5, 0, "", "",
+"c4 e2 69 dd d9 \tvaesenclast %xmm1,%xmm2,%xmm3",},
+{{0xc4, 0xe2, 0x6d, 0xdd, 0xd9, }, 5, 0, "", "",
+"c4 e2 6d dd d9 \tvaesenclast %ymm1,%ymm2,%ymm3",},
+{{0x62, 0xf2, 0x6d, 0x48, 0xdd, 0xd9, }, 6, 0, "", "",
+"62 f2 6d 48 dd d9 \tvaesenclast %zmm1,%zmm2,%zmm3",},
+{{0x62, 0xf2, 0x6d, 0x48, 0xdd, 0x9c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f2 6d 48 dd 9c c8 78 56 34 12 \tvaesenclast 0x12345678(%eax,%ecx,8),%zmm2,%zmm3",},
+{{0xc4, 0xe2, 0x69, 0xde, 0xd9, }, 5, 0, "", "",
+"c4 e2 69 de d9 \tvaesdec %xmm1,%xmm2,%xmm3",},
+{{0xc4, 0xe2, 0x6d, 0xde, 0xd9, }, 5, 0, "", "",
+"c4 e2 6d de d9 \tvaesdec %ymm1,%ymm2,%ymm3",},
+{{0x62, 0xf2, 0x6d, 0x48, 0xde, 0xd9, }, 6, 0, "", "",
+"62 f2 6d 48 de d9 \tvaesdec %zmm1,%zmm2,%zmm3",},
+{{0x62, 0xf2, 0x6d, 0x48, 0xde, 0x9c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f2 6d 48 de 9c c8 78 56 34 12 \tvaesdec 0x12345678(%eax,%ecx,8),%zmm2,%zmm3",},
+{{0xc4, 0xe2, 0x69, 0xdf, 0xd9, }, 5, 0, "", "",
+"c4 e2 69 df d9 \tvaesdeclast %xmm1,%xmm2,%xmm3",},
+{{0xc4, 0xe2, 0x6d, 0xdf, 0xd9, }, 5, 0, "", "",
+"c4 e2 6d df d9 \tvaesdeclast %ymm1,%ymm2,%ymm3",},
+{{0x62, 0xf2, 0x6d, 0x48, 0xdf, 0xd9, }, 6, 0, "", "",
+"62 f2 6d 48 df d9 \tvaesdeclast %zmm1,%zmm2,%zmm3",},
+{{0x62, 0xf2, 0x6d, 0x48, 0xdf, 0x9c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f2 6d 48 df 9c c8 78 56 34 12 \tvaesdeclast 0x12345678(%eax,%ecx,8),%zmm2,%zmm3",},
{{0x62, 0xf3, 0x4d, 0x48, 0x03, 0xfd, 0x12, }, 7, 0, "", "",
"62 f3 4d 48 03 fd 12 \tvalignd $0x12,%zmm5,%zmm6,%zmm7",},
{{0x62, 0xf3, 0xcd, 0x48, 0x03, 0xfd, 0x12, }, 7, 0, "", "",
@@ -905,6 +1213,12 @@
"62 f3 4d 48 43 fd 12 \tvshufi32x4 $0x12,%zmm5,%zmm6,%zmm7",},
{{0x62, 0xf3, 0xcd, 0x48, 0x43, 0xfd, 0x12, }, 7, 0, "", "",
"62 f3 cd 48 43 fd 12 \tvshufi64x2 $0x12,%zmm5,%zmm6,%zmm7",},
+{{0xc4, 0xe3, 0x69, 0x44, 0xd9, 0x12, }, 6, 0, "", "",
+"c4 e3 69 44 d9 12 \tvpclmulqdq $0x12,%xmm1,%xmm2,%xmm3",},
+{{0xc4, 0xe3, 0x6d, 0x44, 0xd9, 0x12, }, 6, 0, "", "",
+"c4 e3 6d 44 d9 12 \tvpclmulqdq $0x12,%ymm1,%ymm2,%ymm3",},
+{{0x62, 0xf3, 0x6d, 0x48, 0x44, 0xd9, 0x12, }, 7, 0, "", "",
+"62 f3 6d 48 44 d9 12 \tvpclmulqdq $0x12,%zmm1,%zmm2,%zmm3",},
{{0x62, 0xf3, 0x4d, 0x48, 0x50, 0xfd, 0x12, }, 7, 0, "", "",
"62 f3 4d 48 50 fd 12 \tvrangeps $0x12,%zmm5,%zmm6,%zmm7",},
{{0x62, 0xf3, 0xcd, 0x48, 0x50, 0xfd, 0x12, }, 7, 0, "", "",
@@ -937,6 +1251,58 @@
"62 f3 7d 08 67 ef 12 \tvfpclassss $0x12,%xmm7,%k5",},
{{0x62, 0xf3, 0xfd, 0x08, 0x67, 0xef, 0x12, }, 7, 0, "", "",
"62 f3 fd 08 67 ef 12 \tvfpclasssd $0x12,%xmm7,%k5",},
+{{0x62, 0xf3, 0xed, 0x08, 0x70, 0xd9, 0x12, }, 7, 0, "", "",
+"62 f3 ed 08 70 d9 12 \tvpshldw $0x12,%xmm1,%xmm2,%xmm3",},
+{{0x62, 0xf3, 0xed, 0x28, 0x70, 0xd9, 0x12, }, 7, 0, "", "",
+"62 f3 ed 28 70 d9 12 \tvpshldw $0x12,%ymm1,%ymm2,%ymm3",},
+{{0x62, 0xf3, 0xed, 0x48, 0x70, 0xd9, 0x12, }, 7, 0, "", "",
+"62 f3 ed 48 70 d9 12 \tvpshldw $0x12,%zmm1,%zmm2,%zmm3",},
+{{0x62, 0xf3, 0x6d, 0x08, 0x71, 0xd9, 0x12, }, 7, 0, "", "",
+"62 f3 6d 08 71 d9 12 \tvpshldd $0x12,%xmm1,%xmm2,%xmm3",},
+{{0x62, 0xf3, 0x6d, 0x28, 0x71, 0xd9, 0x12, }, 7, 0, "", "",
+"62 f3 6d 28 71 d9 12 \tvpshldd $0x12,%ymm1,%ymm2,%ymm3",},
+{{0x62, 0xf3, 0x6d, 0x48, 0x71, 0xd9, 0x12, }, 7, 0, "", "",
+"62 f3 6d 48 71 d9 12 \tvpshldd $0x12,%zmm1,%zmm2,%zmm3",},
+{{0x62, 0xf3, 0xed, 0x08, 0x71, 0xd9, 0x12, }, 7, 0, "", "",
+"62 f3 ed 08 71 d9 12 \tvpshldq $0x12,%xmm1,%xmm2,%xmm3",},
+{{0x62, 0xf3, 0xed, 0x28, 0x71, 0xd9, 0x12, }, 7, 0, "", "",
+"62 f3 ed 28 71 d9 12 \tvpshldq $0x12,%ymm1,%ymm2,%ymm3",},
+{{0x62, 0xf3, 0xed, 0x48, 0x71, 0xd9, 0x12, }, 7, 0, "", "",
+"62 f3 ed 48 71 d9 12 \tvpshldq $0x12,%zmm1,%zmm2,%zmm3",},
+{{0x62, 0xf3, 0xed, 0x08, 0x72, 0xd9, 0x12, }, 7, 0, "", "",
+"62 f3 ed 08 72 d9 12 \tvpshrdw $0x12,%xmm1,%xmm2,%xmm3",},
+{{0x62, 0xf3, 0xed, 0x28, 0x72, 0xd9, 0x12, }, 7, 0, "", "",
+"62 f3 ed 28 72 d9 12 \tvpshrdw $0x12,%ymm1,%ymm2,%ymm3",},
+{{0x62, 0xf3, 0xed, 0x48, 0x72, 0xd9, 0x12, }, 7, 0, "", "",
+"62 f3 ed 48 72 d9 12 \tvpshrdw $0x12,%zmm1,%zmm2,%zmm3",},
+{{0x62, 0xf3, 0x6d, 0x08, 0x73, 0xd9, 0x12, }, 7, 0, "", "",
+"62 f3 6d 08 73 d9 12 \tvpshrdd $0x12,%xmm1,%xmm2,%xmm3",},
+{{0x62, 0xf3, 0x6d, 0x28, 0x73, 0xd9, 0x12, }, 7, 0, "", "",
+"62 f3 6d 28 73 d9 12 \tvpshrdd $0x12,%ymm1,%ymm2,%ymm3",},
+{{0x62, 0xf3, 0x6d, 0x48, 0x73, 0xd9, 0x12, }, 7, 0, "", "",
+"62 f3 6d 48 73 d9 12 \tvpshrdd $0x12,%zmm1,%zmm2,%zmm3",},
+{{0x62, 0xf3, 0xed, 0x08, 0x73, 0xd9, 0x12, }, 7, 0, "", "",
+"62 f3 ed 08 73 d9 12 \tvpshrdq $0x12,%xmm1,%xmm2,%xmm3",},
+{{0x62, 0xf3, 0xed, 0x28, 0x73, 0xd9, 0x12, }, 7, 0, "", "",
+"62 f3 ed 28 73 d9 12 \tvpshrdq $0x12,%ymm1,%ymm2,%ymm3",},
+{{0x62, 0xf3, 0xed, 0x48, 0x73, 0xd9, 0x12, }, 7, 0, "", "",
+"62 f3 ed 48 73 d9 12 \tvpshrdq $0x12,%zmm1,%zmm2,%zmm3",},
+{{0x66, 0x0f, 0x3a, 0xce, 0xd9, 0x12, }, 6, 0, "", "",
+"66 0f 3a ce d9 12 \tgf2p8affineqb $0x12,%xmm1,%xmm3",},
+{{0xc4, 0xe3, 0xe9, 0xce, 0xd9, 0x12, }, 6, 0, "", "",
+"c4 e3 e9 ce d9 12 \tvgf2p8affineqb $0x12,%xmm1,%xmm2,%xmm3",},
+{{0xc4, 0xe3, 0xed, 0xce, 0xd9, 0x12, }, 6, 0, "", "",
+"c4 e3 ed ce d9 12 \tvgf2p8affineqb $0x12,%ymm1,%ymm2,%ymm3",},
+{{0x62, 0xf3, 0xed, 0x48, 0xce, 0xd9, 0x12, }, 7, 0, "", "",
+"62 f3 ed 48 ce d9 12 \tvgf2p8affineqb $0x12,%zmm1,%zmm2,%zmm3",},
+{{0x66, 0x0f, 0x3a, 0xcf, 0xd9, 0x12, }, 6, 0, "", "",
+"66 0f 3a cf d9 12 \tgf2p8affineinvqb $0x12,%xmm1,%xmm3",},
+{{0xc4, 0xe3, 0xe9, 0xcf, 0xd9, 0x12, }, 6, 0, "", "",
+"c4 e3 e9 cf d9 12 \tvgf2p8affineinvqb $0x12,%xmm1,%xmm2,%xmm3",},
+{{0xc4, 0xe3, 0xed, 0xcf, 0xd9, 0x12, }, 6, 0, "", "",
+"c4 e3 ed cf d9 12 \tvgf2p8affineinvqb $0x12,%ymm1,%ymm2,%ymm3",},
+{{0x62, 0xf3, 0xed, 0x48, 0xcf, 0xd9, 0x12, }, 7, 0, "", "",
+"62 f3 ed 48 cf d9 12 \tvgf2p8affineinvqb $0x12,%zmm1,%zmm2,%zmm3",},
{{0x62, 0xf1, 0x4d, 0x48, 0x72, 0xc5, 0x12, }, 7, 0, "", "",
"62 f1 4d 48 72 c5 12 \tvprord $0x12,%zmm5,%zmm6",},
{{0x62, 0xf1, 0xcd, 0x48, 0x72, 0xc5, 0x12, }, 7, 0, "", "",
diff --git a/tools/perf/arch/x86/tests/insn-x86-dat-64.c b/tools/perf/arch/x86/tests/insn-x86-dat-64.c
index 656f8aed31de..567ecccfad7c 100644
--- a/tools/perf/arch/x86/tests/insn-x86-dat-64.c
+++ b/tools/perf/arch/x86/tests/insn-x86-dat-64.c
@@ -587,6 +587,112 @@
"62 02 35 07 4f d0 \tvrsqrt14ss %xmm24,%xmm25,%xmm26{%k7}",},
{{0x62, 0x02, 0xb5, 0x07, 0x4f, 0xd0, }, 6, 0, "", "",
"62 02 b5 07 4f d0 \tvrsqrt14sd %xmm24,%xmm25,%xmm26{%k7}",},
+{{0x62, 0xf2, 0x6d, 0x08, 0x50, 0xd9, }, 6, 0, "", "",
+"62 f2 6d 08 50 d9 \tvpdpbusd %xmm1,%xmm2,%xmm3",},
+{{0x62, 0xf2, 0x6d, 0x28, 0x50, 0xd9, }, 6, 0, "", "",
+"62 f2 6d 28 50 d9 \tvpdpbusd %ymm1,%ymm2,%ymm3",},
+{{0x62, 0xf2, 0x6d, 0x48, 0x50, 0xd9, }, 6, 0, "", "",
+"62 f2 6d 48 50 d9 \tvpdpbusd %zmm1,%zmm2,%zmm3",},
+{{0x62, 0xf2, 0x6d, 0x48, 0x50, 0x9c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f2 6d 48 50 9c c8 78 56 34 12 \tvpdpbusd 0x12345678(%rax,%rcx,8),%zmm2,%zmm3",},
+{{0x67, 0x62, 0xf2, 0x6d, 0x48, 0x50, 0x9c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f2 6d 48 50 9c c8 78 56 34 12 \tvpdpbusd 0x12345678(%eax,%ecx,8),%zmm2,%zmm3",},
+{{0x62, 0xf2, 0x6d, 0x08, 0x51, 0xd9, }, 6, 0, "", "",
+"62 f2 6d 08 51 d9 \tvpdpbusds %xmm1,%xmm2,%xmm3",},
+{{0x62, 0xf2, 0x6d, 0x28, 0x51, 0xd9, }, 6, 0, "", "",
+"62 f2 6d 28 51 d9 \tvpdpbusds %ymm1,%ymm2,%ymm3",},
+{{0x62, 0xf2, 0x6d, 0x48, 0x51, 0xd9, }, 6, 0, "", "",
+"62 f2 6d 48 51 d9 \tvpdpbusds %zmm1,%zmm2,%zmm3",},
+{{0x62, 0xf2, 0x6d, 0x48, 0x51, 0x9c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f2 6d 48 51 9c c8 78 56 34 12 \tvpdpbusds 0x12345678(%rax,%rcx,8),%zmm2,%zmm3",},
+{{0x67, 0x62, 0xf2, 0x6d, 0x48, 0x51, 0x9c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f2 6d 48 51 9c c8 78 56 34 12 \tvpdpbusds 0x12345678(%eax,%ecx,8),%zmm2,%zmm3",},
+{{0x62, 0xf2, 0x6e, 0x08, 0x52, 0xd9, }, 6, 0, "", "",
+"62 f2 6e 08 52 d9 \tvdpbf16ps %xmm1,%xmm2,%xmm3",},
+{{0x62, 0xf2, 0x6e, 0x28, 0x52, 0xd9, }, 6, 0, "", "",
+"62 f2 6e 28 52 d9 \tvdpbf16ps %ymm1,%ymm2,%ymm3",},
+{{0x62, 0xf2, 0x6e, 0x48, 0x52, 0xd9, }, 6, 0, "", "",
+"62 f2 6e 48 52 d9 \tvdpbf16ps %zmm1,%zmm2,%zmm3",},
+{{0x62, 0xf2, 0x6e, 0x48, 0x52, 0x9c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f2 6e 48 52 9c c8 78 56 34 12 \tvdpbf16ps 0x12345678(%rax,%rcx,8),%zmm2,%zmm3",},
+{{0x67, 0x62, 0xf2, 0x6e, 0x48, 0x52, 0x9c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f2 6e 48 52 9c c8 78 56 34 12 \tvdpbf16ps 0x12345678(%eax,%ecx,8),%zmm2,%zmm3",},
+{{0x62, 0xf2, 0x6d, 0x08, 0x52, 0xd9, }, 6, 0, "", "",
+"62 f2 6d 08 52 d9 \tvpdpwssd %xmm1,%xmm2,%xmm3",},
+{{0x62, 0xf2, 0x6d, 0x28, 0x52, 0xd9, }, 6, 0, "", "",
+"62 f2 6d 28 52 d9 \tvpdpwssd %ymm1,%ymm2,%ymm3",},
+{{0x62, 0xf2, 0x6d, 0x48, 0x52, 0xd9, }, 6, 0, "", "",
+"62 f2 6d 48 52 d9 \tvpdpwssd %zmm1,%zmm2,%zmm3",},
+{{0x62, 0xf2, 0x6d, 0x48, 0x52, 0x9c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f2 6d 48 52 9c c8 78 56 34 12 \tvpdpwssd 0x12345678(%rax,%rcx,8),%zmm2,%zmm3",},
+{{0x67, 0x62, 0xf2, 0x6d, 0x48, 0x52, 0x9c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f2 6d 48 52 9c c8 78 56 34 12 \tvpdpwssd 0x12345678(%eax,%ecx,8),%zmm2,%zmm3",},
+{{0x62, 0xf2, 0x7f, 0x48, 0x52, 0x20, }, 6, 0, "", "",
+"62 f2 7f 48 52 20 \tvp4dpwssd (%rax),%zmm0,%zmm4",},
+{{0x67, 0x62, 0xf2, 0x7f, 0x48, 0x52, 0x20, }, 7, 0, "", "",
+"67 62 f2 7f 48 52 20 \tvp4dpwssd (%eax),%zmm0,%zmm4",},
+{{0x62, 0xf2, 0x7f, 0x48, 0x52, 0xa4, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f2 7f 48 52 a4 c8 78 56 34 12 \tvp4dpwssd 0x12345678(%rax,%rcx,8),%zmm0,%zmm4",},
+{{0x67, 0x62, 0xf2, 0x7f, 0x48, 0x52, 0xa4, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f2 7f 48 52 a4 c8 78 56 34 12 \tvp4dpwssd 0x12345678(%eax,%ecx,8),%zmm0,%zmm4",},
+{{0x62, 0xf2, 0x6d, 0x08, 0x53, 0xd9, }, 6, 0, "", "",
+"62 f2 6d 08 53 d9 \tvpdpwssds %xmm1,%xmm2,%xmm3",},
+{{0x62, 0xf2, 0x6d, 0x28, 0x53, 0xd9, }, 6, 0, "", "",
+"62 f2 6d 28 53 d9 \tvpdpwssds %ymm1,%ymm2,%ymm3",},
+{{0x62, 0xf2, 0x6d, 0x48, 0x53, 0xd9, }, 6, 0, "", "",
+"62 f2 6d 48 53 d9 \tvpdpwssds %zmm1,%zmm2,%zmm3",},
+{{0x62, 0xf2, 0x6d, 0x48, 0x53, 0x9c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f2 6d 48 53 9c c8 78 56 34 12 \tvpdpwssds 0x12345678(%rax,%rcx,8),%zmm2,%zmm3",},
+{{0x67, 0x62, 0xf2, 0x6d, 0x48, 0x53, 0x9c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f2 6d 48 53 9c c8 78 56 34 12 \tvpdpwssds 0x12345678(%eax,%ecx,8),%zmm2,%zmm3",},
+{{0x62, 0xf2, 0x7f, 0x48, 0x53, 0x20, }, 6, 0, "", "",
+"62 f2 7f 48 53 20 \tvp4dpwssds (%rax),%zmm0,%zmm4",},
+{{0x67, 0x62, 0xf2, 0x7f, 0x48, 0x53, 0x20, }, 7, 0, "", "",
+"67 62 f2 7f 48 53 20 \tvp4dpwssds (%eax),%zmm0,%zmm4",},
+{{0x62, 0xf2, 0x7f, 0x48, 0x53, 0xa4, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f2 7f 48 53 a4 c8 78 56 34 12 \tvp4dpwssds 0x12345678(%rax,%rcx,8),%zmm0,%zmm4",},
+{{0x67, 0x62, 0xf2, 0x7f, 0x48, 0x53, 0xa4, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f2 7f 48 53 a4 c8 78 56 34 12 \tvp4dpwssds 0x12345678(%eax,%ecx,8),%zmm0,%zmm4",},
+{{0x62, 0xf2, 0x7d, 0x08, 0x54, 0xd1, }, 6, 0, "", "",
+"62 f2 7d 08 54 d1 \tvpopcntb %xmm1,%xmm2",},
+{{0x62, 0xf2, 0x7d, 0x28, 0x54, 0xd1, }, 6, 0, "", "",
+"62 f2 7d 28 54 d1 \tvpopcntb %ymm1,%ymm2",},
+{{0x62, 0xf2, 0x7d, 0x48, 0x54, 0xd1, }, 6, 0, "", "",
+"62 f2 7d 48 54 d1 \tvpopcntb %zmm1,%zmm2",},
+{{0x62, 0xf2, 0x7d, 0x48, 0x54, 0x94, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f2 7d 48 54 94 c8 78 56 34 12 \tvpopcntb 0x12345678(%rax,%rcx,8),%zmm2",},
+{{0x67, 0x62, 0xf2, 0x7d, 0x48, 0x54, 0x94, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f2 7d 48 54 94 c8 78 56 34 12 \tvpopcntb 0x12345678(%eax,%ecx,8),%zmm2",},
+{{0x62, 0xf2, 0xfd, 0x08, 0x54, 0xd1, }, 6, 0, "", "",
+"62 f2 fd 08 54 d1 \tvpopcntw %xmm1,%xmm2",},
+{{0x62, 0xf2, 0xfd, 0x28, 0x54, 0xd1, }, 6, 0, "", "",
+"62 f2 fd 28 54 d1 \tvpopcntw %ymm1,%ymm2",},
+{{0x62, 0xf2, 0xfd, 0x48, 0x54, 0xd1, }, 6, 0, "", "",
+"62 f2 fd 48 54 d1 \tvpopcntw %zmm1,%zmm2",},
+{{0x62, 0xf2, 0xfd, 0x48, 0x54, 0x94, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f2 fd 48 54 94 c8 78 56 34 12 \tvpopcntw 0x12345678(%rax,%rcx,8),%zmm2",},
+{{0x67, 0x62, 0xf2, 0xfd, 0x48, 0x54, 0x94, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f2 fd 48 54 94 c8 78 56 34 12 \tvpopcntw 0x12345678(%eax,%ecx,8),%zmm2",},
+{{0x62, 0xf2, 0x7d, 0x08, 0x55, 0xd1, }, 6, 0, "", "",
+"62 f2 7d 08 55 d1 \tvpopcntd %xmm1,%xmm2",},
+{{0x62, 0xf2, 0x7d, 0x28, 0x55, 0xd1, }, 6, 0, "", "",
+"62 f2 7d 28 55 d1 \tvpopcntd %ymm1,%ymm2",},
+{{0x62, 0xf2, 0x7d, 0x48, 0x55, 0xd1, }, 6, 0, "", "",
+"62 f2 7d 48 55 d1 \tvpopcntd %zmm1,%zmm2",},
+{{0x62, 0xf2, 0x7d, 0x48, 0x55, 0x94, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f2 7d 48 55 94 c8 78 56 34 12 \tvpopcntd 0x12345678(%rax,%rcx,8),%zmm2",},
+{{0x67, 0x62, 0xf2, 0x7d, 0x48, 0x55, 0x94, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f2 7d 48 55 94 c8 78 56 34 12 \tvpopcntd 0x12345678(%eax,%ecx,8),%zmm2",},
+{{0x62, 0xf2, 0xfd, 0x08, 0x55, 0xd1, }, 6, 0, "", "",
+"62 f2 fd 08 55 d1 \tvpopcntq %xmm1,%xmm2",},
+{{0x62, 0xf2, 0xfd, 0x28, 0x55, 0xd1, }, 6, 0, "", "",
+"62 f2 fd 28 55 d1 \tvpopcntq %ymm1,%ymm2",},
+{{0x62, 0xf2, 0xfd, 0x48, 0x55, 0xd1, }, 6, 0, "", "",
+"62 f2 fd 48 55 d1 \tvpopcntq %zmm1,%zmm2",},
+{{0x62, 0xf2, 0xfd, 0x48, 0x55, 0x94, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f2 fd 48 55 94 c8 78 56 34 12 \tvpopcntq 0x12345678(%rax,%rcx,8),%zmm2",},
+{{0x67, 0x62, 0xf2, 0xfd, 0x48, 0x55, 0x94, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f2 fd 48 55 94 c8 78 56 34 12 \tvpopcntq 0x12345678(%eax,%ecx,8),%zmm2",},
{{0xc4, 0xe2, 0x79, 0x59, 0xf4, }, 5, 0, "", "",
"c4 e2 79 59 f4 \tvpbroadcastq %xmm4,%xmm6",},
{{0x62, 0x02, 0x7d, 0x48, 0x59, 0xd3, }, 6, 0, "", "",
@@ -601,6 +707,46 @@
"62 62 7d 48 5b 21 \tvbroadcasti32x8 (%rcx),%zmm28",},
{{0x62, 0x62, 0xfd, 0x48, 0x5b, 0x11, }, 6, 0, "", "",
"62 62 fd 48 5b 11 \tvbroadcasti64x4 (%rcx),%zmm26",},
+{{0x62, 0xf2, 0x7d, 0x08, 0x62, 0xd1, }, 6, 0, "", "",
+"62 f2 7d 08 62 d1 \tvpexpandb %xmm1,%xmm2",},
+{{0x62, 0xf2, 0x7d, 0x28, 0x62, 0xd1, }, 6, 0, "", "",
+"62 f2 7d 28 62 d1 \tvpexpandb %ymm1,%ymm2",},
+{{0x62, 0xf2, 0x7d, 0x48, 0x62, 0xd1, }, 6, 0, "", "",
+"62 f2 7d 48 62 d1 \tvpexpandb %zmm1,%zmm2",},
+{{0x62, 0xf2, 0x7d, 0x48, 0x62, 0x94, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f2 7d 48 62 94 c8 78 56 34 12 \tvpexpandb 0x12345678(%rax,%rcx,8),%zmm2",},
+{{0x67, 0x62, 0xf2, 0x7d, 0x48, 0x62, 0x94, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f2 7d 48 62 94 c8 78 56 34 12 \tvpexpandb 0x12345678(%eax,%ecx,8),%zmm2",},
+{{0x62, 0xf2, 0xfd, 0x08, 0x62, 0xd1, }, 6, 0, "", "",
+"62 f2 fd 08 62 d1 \tvpexpandw %xmm1,%xmm2",},
+{{0x62, 0xf2, 0xfd, 0x28, 0x62, 0xd1, }, 6, 0, "", "",
+"62 f2 fd 28 62 d1 \tvpexpandw %ymm1,%ymm2",},
+{{0x62, 0xf2, 0xfd, 0x48, 0x62, 0xd1, }, 6, 0, "", "",
+"62 f2 fd 48 62 d1 \tvpexpandw %zmm1,%zmm2",},
+{{0x62, 0xf2, 0xfd, 0x48, 0x62, 0x94, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f2 fd 48 62 94 c8 78 56 34 12 \tvpexpandw 0x12345678(%rax,%rcx,8),%zmm2",},
+{{0x67, 0x62, 0xf2, 0xfd, 0x48, 0x62, 0x94, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f2 fd 48 62 94 c8 78 56 34 12 \tvpexpandw 0x12345678(%eax,%ecx,8),%zmm2",},
+{{0x62, 0xf2, 0x7d, 0x08, 0x63, 0xca, }, 6, 0, "", "",
+"62 f2 7d 08 63 ca \tvpcompressb %xmm1,%xmm2",},
+{{0x62, 0xf2, 0x7d, 0x28, 0x63, 0xca, }, 6, 0, "", "",
+"62 f2 7d 28 63 ca \tvpcompressb %ymm1,%ymm2",},
+{{0x62, 0xf2, 0x7d, 0x48, 0x63, 0xca, }, 6, 0, "", "",
+"62 f2 7d 48 63 ca \tvpcompressb %zmm1,%zmm2",},
+{{0x62, 0xf2, 0x7d, 0x48, 0x63, 0x94, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f2 7d 48 63 94 c8 78 56 34 12 \tvpcompressb %zmm2,0x12345678(%rax,%rcx,8)",},
+{{0x67, 0x62, 0xf2, 0x7d, 0x48, 0x63, 0x94, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f2 7d 48 63 94 c8 78 56 34 12 \tvpcompressb %zmm2,0x12345678(%eax,%ecx,8)",},
+{{0x62, 0xf2, 0xfd, 0x08, 0x63, 0xca, }, 6, 0, "", "",
+"62 f2 fd 08 63 ca \tvpcompressw %xmm1,%xmm2",},
+{{0x62, 0xf2, 0xfd, 0x28, 0x63, 0xca, }, 6, 0, "", "",
+"62 f2 fd 28 63 ca \tvpcompressw %ymm1,%ymm2",},
+{{0x62, 0xf2, 0xfd, 0x48, 0x63, 0xca, }, 6, 0, "", "",
+"62 f2 fd 48 63 ca \tvpcompressw %zmm1,%zmm2",},
+{{0x62, 0xf2, 0xfd, 0x48, 0x63, 0x94, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f2 fd 48 63 94 c8 78 56 34 12 \tvpcompressw %zmm2,0x12345678(%rax,%rcx,8)",},
+{{0x67, 0x62, 0xf2, 0xfd, 0x48, 0x63, 0x94, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f2 fd 48 63 94 c8 78 56 34 12 \tvpcompressw %zmm2,0x12345678(%eax,%ecx,8)",},
{{0x62, 0x02, 0x25, 0x40, 0x64, 0xe2, }, 6, 0, "", "",
"62 02 25 40 64 e2 \tvpblendmd %zmm26,%zmm27,%zmm28",},
{{0x62, 0x02, 0xa5, 0x40, 0x64, 0xe2, }, 6, 0, "", "",
@@ -613,6 +759,106 @@
"62 02 25 40 66 e2 \tvpblendmb %zmm26,%zmm27,%zmm28",},
{{0x62, 0x02, 0xa5, 0x40, 0x66, 0xe2, }, 6, 0, "", "",
"62 02 a5 40 66 e2 \tvpblendmw %zmm26,%zmm27,%zmm28",},
+{{0x62, 0xf2, 0x6f, 0x08, 0x68, 0xd9, }, 6, 0, "", "",
+"62 f2 6f 08 68 d9 \tvp2intersectd %xmm1,%xmm2,%k3",},
+{{0x62, 0xf2, 0x6f, 0x28, 0x68, 0xd9, }, 6, 0, "", "",
+"62 f2 6f 28 68 d9 \tvp2intersectd %ymm1,%ymm2,%k3",},
+{{0x62, 0xf2, 0x6f, 0x48, 0x68, 0xd9, }, 6, 0, "", "",
+"62 f2 6f 48 68 d9 \tvp2intersectd %zmm1,%zmm2,%k3",},
+{{0x62, 0xf2, 0x6f, 0x48, 0x68, 0x9c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f2 6f 48 68 9c c8 78 56 34 12 \tvp2intersectd 0x12345678(%rax,%rcx,8),%zmm2,%k3",},
+{{0x67, 0x62, 0xf2, 0x6f, 0x48, 0x68, 0x9c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f2 6f 48 68 9c c8 78 56 34 12 \tvp2intersectd 0x12345678(%eax,%ecx,8),%zmm2,%k3",},
+{{0x62, 0xf2, 0xef, 0x08, 0x68, 0xd9, }, 6, 0, "", "",
+"62 f2 ef 08 68 d9 \tvp2intersectq %xmm1,%xmm2,%k3",},
+{{0x62, 0xf2, 0xef, 0x28, 0x68, 0xd9, }, 6, 0, "", "",
+"62 f2 ef 28 68 d9 \tvp2intersectq %ymm1,%ymm2,%k3",},
+{{0x62, 0xf2, 0xef, 0x48, 0x68, 0xd9, }, 6, 0, "", "",
+"62 f2 ef 48 68 d9 \tvp2intersectq %zmm1,%zmm2,%k3",},
+{{0x62, 0xf2, 0xef, 0x48, 0x68, 0x9c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f2 ef 48 68 9c c8 78 56 34 12 \tvp2intersectq 0x12345678(%rax,%rcx,8),%zmm2,%k3",},
+{{0x67, 0x62, 0xf2, 0xef, 0x48, 0x68, 0x9c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f2 ef 48 68 9c c8 78 56 34 12 \tvp2intersectq 0x12345678(%eax,%ecx,8),%zmm2,%k3",},
+{{0x62, 0xf2, 0xed, 0x08, 0x70, 0xd9, }, 6, 0, "", "",
+"62 f2 ed 08 70 d9 \tvpshldvw %xmm1,%xmm2,%xmm3",},
+{{0x62, 0xf2, 0xed, 0x28, 0x70, 0xd9, }, 6, 0, "", "",
+"62 f2 ed 28 70 d9 \tvpshldvw %ymm1,%ymm2,%ymm3",},
+{{0x62, 0xf2, 0xed, 0x48, 0x70, 0xd9, }, 6, 0, "", "",
+"62 f2 ed 48 70 d9 \tvpshldvw %zmm1,%zmm2,%zmm3",},
+{{0x62, 0xf2, 0xed, 0x48, 0x70, 0x9c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f2 ed 48 70 9c c8 78 56 34 12 \tvpshldvw 0x12345678(%rax,%rcx,8),%zmm2,%zmm3",},
+{{0x67, 0x62, 0xf2, 0xed, 0x48, 0x70, 0x9c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f2 ed 48 70 9c c8 78 56 34 12 \tvpshldvw 0x12345678(%eax,%ecx,8),%zmm2,%zmm3",},
+{{0x62, 0xf2, 0x6d, 0x08, 0x71, 0xd9, }, 6, 0, "", "",
+"62 f2 6d 08 71 d9 \tvpshldvd %xmm1,%xmm2,%xmm3",},
+{{0x62, 0xf2, 0x6d, 0x28, 0x71, 0xd9, }, 6, 0, "", "",
+"62 f2 6d 28 71 d9 \tvpshldvd %ymm1,%ymm2,%ymm3",},
+{{0x62, 0xf2, 0x6d, 0x48, 0x71, 0xd9, }, 6, 0, "", "",
+"62 f2 6d 48 71 d9 \tvpshldvd %zmm1,%zmm2,%zmm3",},
+{{0x62, 0xf2, 0x6d, 0x48, 0x71, 0x9c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f2 6d 48 71 9c c8 78 56 34 12 \tvpshldvd 0x12345678(%rax,%rcx,8),%zmm2,%zmm3",},
+{{0x67, 0x62, 0xf2, 0x6d, 0x48, 0x71, 0x9c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f2 6d 48 71 9c c8 78 56 34 12 \tvpshldvd 0x12345678(%eax,%ecx,8),%zmm2,%zmm3",},
+{{0x62, 0xf2, 0xed, 0x08, 0x71, 0xd9, }, 6, 0, "", "",
+"62 f2 ed 08 71 d9 \tvpshldvq %xmm1,%xmm2,%xmm3",},
+{{0x62, 0xf2, 0xed, 0x28, 0x71, 0xd9, }, 6, 0, "", "",
+"62 f2 ed 28 71 d9 \tvpshldvq %ymm1,%ymm2,%ymm3",},
+{{0x62, 0xf2, 0xed, 0x48, 0x71, 0xd9, }, 6, 0, "", "",
+"62 f2 ed 48 71 d9 \tvpshldvq %zmm1,%zmm2,%zmm3",},
+{{0x62, 0xf2, 0xed, 0x48, 0x71, 0x9c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f2 ed 48 71 9c c8 78 56 34 12 \tvpshldvq 0x12345678(%rax,%rcx,8),%zmm2,%zmm3",},
+{{0x67, 0x62, 0xf2, 0xed, 0x48, 0x71, 0x9c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f2 ed 48 71 9c c8 78 56 34 12 \tvpshldvq 0x12345678(%eax,%ecx,8),%zmm2,%zmm3",},
+{{0x62, 0xf2, 0x6f, 0x08, 0x72, 0xd9, }, 6, 0, "", "",
+"62 f2 6f 08 72 d9 \tvcvtne2ps2bf16 %xmm1,%xmm2,%xmm3",},
+{{0x62, 0xf2, 0x6f, 0x28, 0x72, 0xd9, }, 6, 0, "", "",
+"62 f2 6f 28 72 d9 \tvcvtne2ps2bf16 %ymm1,%ymm2,%ymm3",},
+{{0x62, 0xf2, 0x6f, 0x48, 0x72, 0xd9, }, 6, 0, "", "",
+"62 f2 6f 48 72 d9 \tvcvtne2ps2bf16 %zmm1,%zmm2,%zmm3",},
+{{0x62, 0xf2, 0x6f, 0x48, 0x72, 0x9c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f2 6f 48 72 9c c8 78 56 34 12 \tvcvtne2ps2bf16 0x12345678(%rax,%rcx,8),%zmm2,%zmm3",},
+{{0x67, 0x62, 0xf2, 0x6f, 0x48, 0x72, 0x9c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f2 6f 48 72 9c c8 78 56 34 12 \tvcvtne2ps2bf16 0x12345678(%eax,%ecx,8),%zmm2,%zmm3",},
+{{0x62, 0xf2, 0x7e, 0x08, 0x72, 0xd1, }, 6, 0, "", "",
+"62 f2 7e 08 72 d1 \tvcvtneps2bf16 %xmm1,%xmm2",},
+{{0x62, 0xf2, 0x7e, 0x28, 0x72, 0xd1, }, 6, 0, "", "",
+"62 f2 7e 28 72 d1 \tvcvtneps2bf16 %ymm1,%xmm2",},
+{{0x62, 0xf2, 0x7e, 0x48, 0x72, 0xd1, }, 6, 0, "", "",
+"62 f2 7e 48 72 d1 \tvcvtneps2bf16 %zmm1,%ymm2",},
+{{0x62, 0xf2, 0x7e, 0x48, 0x72, 0x94, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f2 7e 48 72 94 c8 78 56 34 12 \tvcvtneps2bf16 0x12345678(%rax,%rcx,8),%ymm2",},
+{{0x67, 0x62, 0xf2, 0x7e, 0x48, 0x72, 0x94, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f2 7e 48 72 94 c8 78 56 34 12 \tvcvtneps2bf16 0x12345678(%eax,%ecx,8),%ymm2",},
+{{0x62, 0xf2, 0xed, 0x08, 0x72, 0xd9, }, 6, 0, "", "",
+"62 f2 ed 08 72 d9 \tvpshrdvw %xmm1,%xmm2,%xmm3",},
+{{0x62, 0xf2, 0xed, 0x28, 0x72, 0xd9, }, 6, 0, "", "",
+"62 f2 ed 28 72 d9 \tvpshrdvw %ymm1,%ymm2,%ymm3",},
+{{0x62, 0xf2, 0xed, 0x48, 0x72, 0xd9, }, 6, 0, "", "",
+"62 f2 ed 48 72 d9 \tvpshrdvw %zmm1,%zmm2,%zmm3",},
+{{0x62, 0xf2, 0xed, 0x48, 0x72, 0x9c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f2 ed 48 72 9c c8 78 56 34 12 \tvpshrdvw 0x12345678(%rax,%rcx,8),%zmm2,%zmm3",},
+{{0x67, 0x62, 0xf2, 0xed, 0x48, 0x72, 0x9c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f2 ed 48 72 9c c8 78 56 34 12 \tvpshrdvw 0x12345678(%eax,%ecx,8),%zmm2,%zmm3",},
+{{0x62, 0xf2, 0x6d, 0x08, 0x73, 0xd9, }, 6, 0, "", "",
+"62 f2 6d 08 73 d9 \tvpshrdvd %xmm1,%xmm2,%xmm3",},
+{{0x62, 0xf2, 0x6d, 0x28, 0x73, 0xd9, }, 6, 0, "", "",
+"62 f2 6d 28 73 d9 \tvpshrdvd %ymm1,%ymm2,%ymm3",},
+{{0x62, 0xf2, 0x6d, 0x48, 0x73, 0xd9, }, 6, 0, "", "",
+"62 f2 6d 48 73 d9 \tvpshrdvd %zmm1,%zmm2,%zmm3",},
+{{0x62, 0xf2, 0x6d, 0x48, 0x73, 0x9c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f2 6d 48 73 9c c8 78 56 34 12 \tvpshrdvd 0x12345678(%rax,%rcx,8),%zmm2,%zmm3",},
+{{0x67, 0x62, 0xf2, 0x6d, 0x48, 0x73, 0x9c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f2 6d 48 73 9c c8 78 56 34 12 \tvpshrdvd 0x12345678(%eax,%ecx,8),%zmm2,%zmm3",},
+{{0x62, 0xf2, 0xed, 0x08, 0x73, 0xd9, }, 6, 0, "", "",
+"62 f2 ed 08 73 d9 \tvpshrdvq %xmm1,%xmm2,%xmm3",},
+{{0x62, 0xf2, 0xed, 0x28, 0x73, 0xd9, }, 6, 0, "", "",
+"62 f2 ed 28 73 d9 \tvpshrdvq %ymm1,%ymm2,%ymm3",},
+{{0x62, 0xf2, 0xed, 0x48, 0x73, 0xd9, }, 6, 0, "", "",
+"62 f2 ed 48 73 d9 \tvpshrdvq %zmm1,%zmm2,%zmm3",},
+{{0x62, 0xf2, 0xed, 0x48, 0x73, 0x9c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f2 ed 48 73 9c c8 78 56 34 12 \tvpshrdvq 0x12345678(%rax,%rcx,8),%zmm2,%zmm3",},
+{{0x67, 0x62, 0xf2, 0xed, 0x48, 0x73, 0x9c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f2 ed 48 73 9c c8 78 56 34 12 \tvpshrdvq 0x12345678(%eax,%ecx,8),%zmm2,%zmm3",},
{{0x62, 0x02, 0x35, 0x40, 0x75, 0xd0, }, 6, 0, "", "",
"62 02 35 40 75 d0 \tvpermi2b %zmm24,%zmm25,%zmm26",},
{{0x62, 0x02, 0xa5, 0x40, 0x75, 0xe2, }, 6, 0, "", "",
@@ -667,6 +913,16 @@
"62 02 25 40 8d e2 \tvpermb %zmm26,%zmm27,%zmm28",},
{{0x62, 0x02, 0xa5, 0x40, 0x8d, 0xe2, }, 6, 0, "", "",
"62 02 a5 40 8d e2 \tvpermw %zmm26,%zmm27,%zmm28",},
+{{0x62, 0xf2, 0x6d, 0x08, 0x8f, 0xd9, }, 6, 0, "", "",
+"62 f2 6d 08 8f d9 \tvpshufbitqmb %xmm1,%xmm2,%k3",},
+{{0x62, 0xf2, 0x6d, 0x28, 0x8f, 0xd9, }, 6, 0, "", "",
+"62 f2 6d 28 8f d9 \tvpshufbitqmb %ymm1,%ymm2,%k3",},
+{{0x62, 0xf2, 0x6d, 0x48, 0x8f, 0xd9, }, 6, 0, "", "",
+"62 f2 6d 48 8f d9 \tvpshufbitqmb %zmm1,%zmm2,%k3",},
+{{0x62, 0xf2, 0x6d, 0x48, 0x8f, 0x9c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f2 6d 48 8f 9c c8 78 56 34 12 \tvpshufbitqmb 0x12345678(%rax,%rcx,8),%zmm2,%k3",},
+{{0x67, 0x62, 0xf2, 0x6d, 0x48, 0x8f, 0x9c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f2 6d 48 8f 9c c8 78 56 34 12 \tvpshufbitqmb 0x12345678(%eax,%ecx,8),%zmm2,%k3",},
{{0xc4, 0xe2, 0x69, 0x90, 0x4c, 0x7d, 0x02, }, 7, 0, "", "",
"c4 e2 69 90 4c 7d 02 \tvpgatherdd %xmm2,0x2(%rbp,%xmm7,2),%xmm1",},
{{0xc4, 0xe2, 0xe9, 0x90, 0x4c, 0x7d, 0x04, }, 7, 0, "", "",
@@ -683,6 +939,54 @@
"62 22 7d 41 91 94 dd 7b 00 00 00 \tvpgatherqd 0x7b(%rbp,%zmm27,8),%ymm26{%k1}",},
{{0x62, 0x22, 0xfd, 0x41, 0x91, 0x94, 0xdd, 0x7b, 0x00, 0x00, 0x00, }, 11, 0, "", "",
"62 22 fd 41 91 94 dd 7b 00 00 00 \tvpgatherqq 0x7b(%rbp,%zmm27,8),%zmm26{%k1}",},
+{{0xc4, 0xe2, 0x69, 0x9a, 0xd9, }, 5, 0, "", "",
+"c4 e2 69 9a d9 \tvfmsub132ps %xmm1,%xmm2,%xmm3",},
+{{0xc4, 0xe2, 0x6d, 0x9a, 0xd9, }, 5, 0, "", "",
+"c4 e2 6d 9a d9 \tvfmsub132ps %ymm1,%ymm2,%ymm3",},
+{{0x62, 0xf2, 0x6d, 0x48, 0x9a, 0xd9, }, 6, 0, "", "",
+"62 f2 6d 48 9a d9 \tvfmsub132ps %zmm1,%zmm2,%zmm3",},
+{{0x62, 0xf2, 0x6d, 0x48, 0x9a, 0x9c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f2 6d 48 9a 9c c8 78 56 34 12 \tvfmsub132ps 0x12345678(%rax,%rcx,8),%zmm2,%zmm3",},
+{{0x67, 0x62, 0xf2, 0x6d, 0x48, 0x9a, 0x9c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f2 6d 48 9a 9c c8 78 56 34 12 \tvfmsub132ps 0x12345678(%eax,%ecx,8),%zmm2,%zmm3",},
+{{0xc4, 0xe2, 0xe9, 0x9a, 0xd9, }, 5, 0, "", "",
+"c4 e2 e9 9a d9 \tvfmsub132pd %xmm1,%xmm2,%xmm3",},
+{{0xc4, 0xe2, 0xed, 0x9a, 0xd9, }, 5, 0, "", "",
+"c4 e2 ed 9a d9 \tvfmsub132pd %ymm1,%ymm2,%ymm3",},
+{{0x62, 0xf2, 0xed, 0x48, 0x9a, 0xd9, }, 6, 0, "", "",
+"62 f2 ed 48 9a d9 \tvfmsub132pd %zmm1,%zmm2,%zmm3",},
+{{0x62, 0xf2, 0xed, 0x48, 0x9a, 0x9c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f2 ed 48 9a 9c c8 78 56 34 12 \tvfmsub132pd 0x12345678(%rax,%rcx,8),%zmm2,%zmm3",},
+{{0x67, 0x62, 0xf2, 0xed, 0x48, 0x9a, 0x9c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f2 ed 48 9a 9c c8 78 56 34 12 \tvfmsub132pd 0x12345678(%eax,%ecx,8),%zmm2,%zmm3",},
+{{0x62, 0xf2, 0x7f, 0x48, 0x9a, 0x20, }, 6, 0, "", "",
+"62 f2 7f 48 9a 20 \tv4fmaddps (%rax),%zmm0,%zmm4",},
+{{0x67, 0x62, 0xf2, 0x7f, 0x48, 0x9a, 0x20, }, 7, 0, "", "",
+"67 62 f2 7f 48 9a 20 \tv4fmaddps (%eax),%zmm0,%zmm4",},
+{{0x62, 0xf2, 0x7f, 0x48, 0x9a, 0xa4, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f2 7f 48 9a a4 c8 78 56 34 12 \tv4fmaddps 0x12345678(%rax,%rcx,8),%zmm0,%zmm4",},
+{{0x67, 0x62, 0xf2, 0x7f, 0x48, 0x9a, 0xa4, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f2 7f 48 9a a4 c8 78 56 34 12 \tv4fmaddps 0x12345678(%eax,%ecx,8),%zmm0,%zmm4",},
+{{0xc4, 0xe2, 0x69, 0x9b, 0xd9, }, 5, 0, "", "",
+"c4 e2 69 9b d9 \tvfmsub132ss %xmm1,%xmm2,%xmm3",},
+{{0xc4, 0xe2, 0x69, 0x9b, 0x9c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 10, 0, "", "",
+"c4 e2 69 9b 9c c8 78 56 34 12 \tvfmsub132ss 0x12345678(%rax,%rcx,8),%xmm2,%xmm3",},
+{{0x67, 0xc4, 0xe2, 0x69, 0x9b, 0x9c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"67 c4 e2 69 9b 9c c8 78 56 34 12 \tvfmsub132ss 0x12345678(%eax,%ecx,8),%xmm2,%xmm3",},
+{{0xc4, 0xe2, 0xe9, 0x9b, 0xd9, }, 5, 0, "", "",
+"c4 e2 e9 9b d9 \tvfmsub132sd %xmm1,%xmm2,%xmm3",},
+{{0xc4, 0xe2, 0xe9, 0x9b, 0x9c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 10, 0, "", "",
+"c4 e2 e9 9b 9c c8 78 56 34 12 \tvfmsub132sd 0x12345678(%rax,%rcx,8),%xmm2,%xmm3",},
+{{0x67, 0xc4, 0xe2, 0xe9, 0x9b, 0x9c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"67 c4 e2 e9 9b 9c c8 78 56 34 12 \tvfmsub132sd 0x12345678(%eax,%ecx,8),%xmm2,%xmm3",},
+{{0x62, 0xf2, 0x7f, 0x08, 0x9b, 0x20, }, 6, 0, "", "",
+"62 f2 7f 08 9b 20 \tv4fmaddss (%rax),%xmm0,%xmm4",},
+{{0x67, 0x62, 0xf2, 0x7f, 0x08, 0x9b, 0x20, }, 7, 0, "", "",
+"67 62 f2 7f 08 9b 20 \tv4fmaddss (%eax),%xmm0,%xmm4",},
+{{0x62, 0xf2, 0x7f, 0x08, 0x9b, 0xa4, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f2 7f 08 9b a4 c8 78 56 34 12 \tv4fmaddss 0x12345678(%rax,%rcx,8),%xmm0,%xmm4",},
+{{0x67, 0x62, 0xf2, 0x7f, 0x08, 0x9b, 0xa4, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f2 7f 08 9b a4 c8 78 56 34 12 \tv4fmaddss 0x12345678(%eax,%ecx,8),%xmm0,%xmm4",},
{{0x62, 0x22, 0x7d, 0x41, 0xa0, 0xa4, 0xed, 0x7b, 0x00, 0x00, 0x00, }, 11, 0, "", "",
"62 22 7d 41 a0 a4 ed 7b 00 00 00 \tvpscatterdd %zmm28,0x7b(%rbp,%zmm29,8){%k1}",},
{{0x62, 0x22, 0xfd, 0x41, 0xa0, 0x94, 0xdd, 0x7b, 0x00, 0x00, 0x00, }, 11, 0, "", "",
@@ -699,6 +1003,54 @@
"62 b2 7d 41 a3 b4 ed 7b 00 00 00 \tvscatterqps %ymm6,0x7b(%rbp,%zmm29,8){%k1}",},
{{0x62, 0x22, 0xfd, 0x41, 0xa3, 0xa4, 0xed, 0x7b, 0x00, 0x00, 0x00, }, 11, 0, "", "",
"62 22 fd 41 a3 a4 ed 7b 00 00 00 \tvscatterqpd %zmm28,0x7b(%rbp,%zmm29,8){%k1}",},
+{{0xc4, 0xe2, 0x69, 0xaa, 0xd9, }, 5, 0, "", "",
+"c4 e2 69 aa d9 \tvfmsub213ps %xmm1,%xmm2,%xmm3",},
+{{0xc4, 0xe2, 0x6d, 0xaa, 0xd9, }, 5, 0, "", "",
+"c4 e2 6d aa d9 \tvfmsub213ps %ymm1,%ymm2,%ymm3",},
+{{0x62, 0xf2, 0x6d, 0x48, 0xaa, 0xd9, }, 6, 0, "", "",
+"62 f2 6d 48 aa d9 \tvfmsub213ps %zmm1,%zmm2,%zmm3",},
+{{0x62, 0xf2, 0x6d, 0x48, 0xaa, 0x9c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f2 6d 48 aa 9c c8 78 56 34 12 \tvfmsub213ps 0x12345678(%rax,%rcx,8),%zmm2,%zmm3",},
+{{0x67, 0x62, 0xf2, 0x6d, 0x48, 0xaa, 0x9c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f2 6d 48 aa 9c c8 78 56 34 12 \tvfmsub213ps 0x12345678(%eax,%ecx,8),%zmm2,%zmm3",},
+{{0xc4, 0xe2, 0xe9, 0xaa, 0xd9, }, 5, 0, "", "",
+"c4 e2 e9 aa d9 \tvfmsub213pd %xmm1,%xmm2,%xmm3",},
+{{0xc4, 0xe2, 0xed, 0xaa, 0xd9, }, 5, 0, "", "",
+"c4 e2 ed aa d9 \tvfmsub213pd %ymm1,%ymm2,%ymm3",},
+{{0x62, 0xf2, 0xed, 0x48, 0xaa, 0xd9, }, 6, 0, "", "",
+"62 f2 ed 48 aa d9 \tvfmsub213pd %zmm1,%zmm2,%zmm3",},
+{{0x62, 0xf2, 0xed, 0x48, 0xaa, 0x9c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f2 ed 48 aa 9c c8 78 56 34 12 \tvfmsub213pd 0x12345678(%rax,%rcx,8),%zmm2,%zmm3",},
+{{0x67, 0x62, 0xf2, 0xed, 0x48, 0xaa, 0x9c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f2 ed 48 aa 9c c8 78 56 34 12 \tvfmsub213pd 0x12345678(%eax,%ecx,8),%zmm2,%zmm3",},
+{{0x62, 0xf2, 0x7f, 0x48, 0xaa, 0x20, }, 6, 0, "", "",
+"62 f2 7f 48 aa 20 \tv4fnmaddps (%rax),%zmm0,%zmm4",},
+{{0x67, 0x62, 0xf2, 0x7f, 0x48, 0xaa, 0x20, }, 7, 0, "", "",
+"67 62 f2 7f 48 aa 20 \tv4fnmaddps (%eax),%zmm0,%zmm4",},
+{{0x62, 0xf2, 0x7f, 0x48, 0xaa, 0xa4, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f2 7f 48 aa a4 c8 78 56 34 12 \tv4fnmaddps 0x12345678(%rax,%rcx,8),%zmm0,%zmm4",},
+{{0x67, 0x62, 0xf2, 0x7f, 0x48, 0xaa, 0xa4, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f2 7f 48 aa a4 c8 78 56 34 12 \tv4fnmaddps 0x12345678(%eax,%ecx,8),%zmm0,%zmm4",},
+{{0xc4, 0xe2, 0x69, 0xab, 0xd9, }, 5, 0, "", "",
+"c4 e2 69 ab d9 \tvfmsub213ss %xmm1,%xmm2,%xmm3",},
+{{0xc4, 0xe2, 0x69, 0xab, 0x9c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 10, 0, "", "",
+"c4 e2 69 ab 9c c8 78 56 34 12 \tvfmsub213ss 0x12345678(%rax,%rcx,8),%xmm2,%xmm3",},
+{{0x67, 0xc4, 0xe2, 0x69, 0xab, 0x9c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"67 c4 e2 69 ab 9c c8 78 56 34 12 \tvfmsub213ss 0x12345678(%eax,%ecx,8),%xmm2,%xmm3",},
+{{0xc4, 0xe2, 0xe9, 0xab, 0xd9, }, 5, 0, "", "",
+"c4 e2 e9 ab d9 \tvfmsub213sd %xmm1,%xmm2,%xmm3",},
+{{0xc4, 0xe2, 0xe9, 0xab, 0x9c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 10, 0, "", "",
+"c4 e2 e9 ab 9c c8 78 56 34 12 \tvfmsub213sd 0x12345678(%rax,%rcx,8),%xmm2,%xmm3",},
+{{0x67, 0xc4, 0xe2, 0xe9, 0xab, 0x9c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"67 c4 e2 e9 ab 9c c8 78 56 34 12 \tvfmsub213sd 0x12345678(%eax,%ecx,8),%xmm2,%xmm3",},
+{{0x62, 0xf2, 0x7f, 0x08, 0xab, 0x20, }, 6, 0, "", "",
+"62 f2 7f 08 ab 20 \tv4fnmaddss (%rax),%xmm0,%xmm4",},
+{{0x67, 0x62, 0xf2, 0x7f, 0x08, 0xab, 0x20, }, 7, 0, "", "",
+"67 62 f2 7f 08 ab 20 \tv4fnmaddss (%eax),%xmm0,%xmm4",},
+{{0x62, 0xf2, 0x7f, 0x08, 0xab, 0xa4, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f2 7f 08 ab a4 c8 78 56 34 12 \tv4fnmaddss 0x12345678(%rax,%rcx,8),%xmm0,%xmm4",},
+{{0x67, 0x62, 0xf2, 0x7f, 0x08, 0xab, 0xa4, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f2 7f 08 ab a4 c8 78 56 34 12 \tv4fnmaddss 0x12345678(%eax,%ecx,8),%xmm0,%xmm4",},
{{0x62, 0x02, 0xa5, 0x40, 0xb4, 0xe2, }, 6, 0, "", "",
"62 02 a5 40 b4 e2 \tvpmadd52luq %zmm26,%zmm27,%zmm28",},
{{0x62, 0x02, 0xa5, 0x40, 0xb5, 0xe2, }, 6, 0, "", "",
@@ -727,6 +1079,62 @@
"62 02 15 07 cd f4 \tvrsqrt28ss %xmm28,%xmm29,%xmm30{%k7}",},
{{0x62, 0x02, 0xad, 0x07, 0xcd, 0xd9, }, 6, 0, "", "",
"62 02 ad 07 cd d9 \tvrsqrt28sd %xmm25,%xmm26,%xmm27{%k7}",},
+{{0x66, 0x0f, 0x38, 0xcf, 0xd9, }, 5, 0, "", "",
+"66 0f 38 cf d9 \tgf2p8mulb %xmm1,%xmm3",},
+{{0x66, 0x0f, 0x38, 0xcf, 0x9c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 10, 0, "", "",
+"66 0f 38 cf 9c c8 78 56 34 12 \tgf2p8mulb 0x12345678(%rax,%rcx,8),%xmm3",},
+{{0x67, 0x66, 0x0f, 0x38, 0xcf, 0x9c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"67 66 0f 38 cf 9c c8 78 56 34 12 \tgf2p8mulb 0x12345678(%eax,%ecx,8),%xmm3",},
+{{0xc4, 0xe2, 0x69, 0xcf, 0xd9, }, 5, 0, "", "",
+"c4 e2 69 cf d9 \tvgf2p8mulb %xmm1,%xmm2,%xmm3",},
+{{0xc4, 0xe2, 0x6d, 0xcf, 0xd9, }, 5, 0, "", "",
+"c4 e2 6d cf d9 \tvgf2p8mulb %ymm1,%ymm2,%ymm3",},
+{{0x62, 0xf2, 0x6d, 0x48, 0xcf, 0xd9, }, 6, 0, "", "",
+"62 f2 6d 48 cf d9 \tvgf2p8mulb %zmm1,%zmm2,%zmm3",},
+{{0x62, 0xf2, 0x6d, 0x48, 0xcf, 0x9c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f2 6d 48 cf 9c c8 78 56 34 12 \tvgf2p8mulb 0x12345678(%rax,%rcx,8),%zmm2,%zmm3",},
+{{0x67, 0x62, 0xf2, 0x6d, 0x48, 0xcf, 0x9c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f2 6d 48 cf 9c c8 78 56 34 12 \tvgf2p8mulb 0x12345678(%eax,%ecx,8),%zmm2,%zmm3",},
+{{0xc4, 0xe2, 0x69, 0xdc, 0xd9, }, 5, 0, "", "",
+"c4 e2 69 dc d9 \tvaesenc %xmm1,%xmm2,%xmm3",},
+{{0xc4, 0xe2, 0x6d, 0xdc, 0xd9, }, 5, 0, "", "",
+"c4 e2 6d dc d9 \tvaesenc %ymm1,%ymm2,%ymm3",},
+{{0x62, 0xf2, 0x6d, 0x48, 0xdc, 0xd9, }, 6, 0, "", "",
+"62 f2 6d 48 dc d9 \tvaesenc %zmm1,%zmm2,%zmm3",},
+{{0x62, 0xf2, 0x6d, 0x48, 0xdc, 0x9c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f2 6d 48 dc 9c c8 78 56 34 12 \tvaesenc 0x12345678(%rax,%rcx,8),%zmm2,%zmm3",},
+{{0x67, 0x62, 0xf2, 0x6d, 0x48, 0xdc, 0x9c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f2 6d 48 dc 9c c8 78 56 34 12 \tvaesenc 0x12345678(%eax,%ecx,8),%zmm2,%zmm3",},
+{{0xc4, 0xe2, 0x69, 0xdd, 0xd9, }, 5, 0, "", "",
+"c4 e2 69 dd d9 \tvaesenclast %xmm1,%xmm2,%xmm3",},
+{{0xc4, 0xe2, 0x6d, 0xdd, 0xd9, }, 5, 0, "", "",
+"c4 e2 6d dd d9 \tvaesenclast %ymm1,%ymm2,%ymm3",},
+{{0x62, 0xf2, 0x6d, 0x48, 0xdd, 0xd9, }, 6, 0, "", "",
+"62 f2 6d 48 dd d9 \tvaesenclast %zmm1,%zmm2,%zmm3",},
+{{0x62, 0xf2, 0x6d, 0x48, 0xdd, 0x9c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f2 6d 48 dd 9c c8 78 56 34 12 \tvaesenclast 0x12345678(%rax,%rcx,8),%zmm2,%zmm3",},
+{{0x67, 0x62, 0xf2, 0x6d, 0x48, 0xdd, 0x9c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f2 6d 48 dd 9c c8 78 56 34 12 \tvaesenclast 0x12345678(%eax,%ecx,8),%zmm2,%zmm3",},
+{{0xc4, 0xe2, 0x69, 0xde, 0xd9, }, 5, 0, "", "",
+"c4 e2 69 de d9 \tvaesdec %xmm1,%xmm2,%xmm3",},
+{{0xc4, 0xe2, 0x6d, 0xde, 0xd9, }, 5, 0, "", "",
+"c4 e2 6d de d9 \tvaesdec %ymm1,%ymm2,%ymm3",},
+{{0x62, 0xf2, 0x6d, 0x48, 0xde, 0xd9, }, 6, 0, "", "",
+"62 f2 6d 48 de d9 \tvaesdec %zmm1,%zmm2,%zmm3",},
+{{0x62, 0xf2, 0x6d, 0x48, 0xde, 0x9c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f2 6d 48 de 9c c8 78 56 34 12 \tvaesdec 0x12345678(%rax,%rcx,8),%zmm2,%zmm3",},
+{{0x67, 0x62, 0xf2, 0x6d, 0x48, 0xde, 0x9c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f2 6d 48 de 9c c8 78 56 34 12 \tvaesdec 0x12345678(%eax,%ecx,8),%zmm2,%zmm3",},
+{{0xc4, 0xe2, 0x69, 0xdf, 0xd9, }, 5, 0, "", "",
+"c4 e2 69 df d9 \tvaesdeclast %xmm1,%xmm2,%xmm3",},
+{{0xc4, 0xe2, 0x6d, 0xdf, 0xd9, }, 5, 0, "", "",
+"c4 e2 6d df d9 \tvaesdeclast %ymm1,%ymm2,%ymm3",},
+{{0x62, 0xf2, 0x6d, 0x48, 0xdf, 0xd9, }, 6, 0, "", "",
+"62 f2 6d 48 df d9 \tvaesdeclast %zmm1,%zmm2,%zmm3",},
+{{0x62, 0xf2, 0x6d, 0x48, 0xdf, 0x9c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f2 6d 48 df 9c c8 78 56 34 12 \tvaesdeclast 0x12345678(%rax,%rcx,8),%zmm2,%zmm3",},
+{{0x67, 0x62, 0xf2, 0x6d, 0x48, 0xdf, 0x9c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f2 6d 48 df 9c c8 78 56 34 12 \tvaesdeclast 0x12345678(%eax,%ecx,8),%zmm2,%zmm3",},
{{0x62, 0x03, 0x15, 0x40, 0x03, 0xf4, 0x12, }, 7, 0, "", "",
"62 03 15 40 03 f4 12 \tvalignd $0x12,%zmm28,%zmm29,%zmm30",},
{{0x62, 0x03, 0xad, 0x40, 0x03, 0xd9, 0x12, }, 7, 0, "", "",
@@ -827,6 +1235,14 @@
"62 03 2d 40 43 d9 12 \tvshufi32x4 $0x12,%zmm25,%zmm26,%zmm27",},
{{0x62, 0x03, 0x95, 0x40, 0x43, 0xf4, 0x12, }, 7, 0, "", "",
"62 03 95 40 43 f4 12 \tvshufi64x2 $0x12,%zmm28,%zmm29,%zmm30",},
+{{0xc4, 0xe3, 0x69, 0x44, 0xd9, 0x12, }, 6, 0, "", "",
+"c4 e3 69 44 d9 12 \tvpclmulqdq $0x12,%xmm1,%xmm2,%xmm3",},
+{{0xc4, 0xe3, 0x6d, 0x44, 0xd9, 0x12, }, 6, 0, "", "",
+"c4 e3 6d 44 d9 12 \tvpclmulqdq $0x12,%ymm1,%ymm2,%ymm3",},
+{{0x62, 0xf3, 0x6d, 0x48, 0x44, 0xd9, 0x12, }, 7, 0, "", "",
+"62 f3 6d 48 44 d9 12 \tvpclmulqdq $0x12,%zmm1,%zmm2,%zmm3",},
+{{0x62, 0x03, 0x2d, 0x40, 0x44, 0xd9, 0x12, }, 7, 0, "", "",
+"62 03 2d 40 44 d9 12 \tvpclmulqdq $0x12,%zmm25,%zmm26,%zmm27",},
{{0x62, 0x03, 0x2d, 0x40, 0x50, 0xd9, 0x12, }, 7, 0, "", "",
"62 03 2d 40 50 d9 12 \tvrangeps $0x12,%zmm25,%zmm26,%zmm27",},
{{0x62, 0x03, 0x95, 0x40, 0x50, 0xf4, 0x12, }, 7, 0, "", "",
@@ -859,6 +1275,74 @@
"62 93 7d 08 67 eb 12 \tvfpclassss $0x12,%xmm27,%k5",},
{{0x62, 0x93, 0xfd, 0x08, 0x67, 0xee, 0x12, }, 7, 0, "", "",
"62 93 fd 08 67 ee 12 \tvfpclasssd $0x12,%xmm30,%k5",},
+{{0x62, 0xf3, 0xed, 0x08, 0x70, 0xd9, 0x12, }, 7, 0, "", "",
+"62 f3 ed 08 70 d9 12 \tvpshldw $0x12,%xmm1,%xmm2,%xmm3",},
+{{0x62, 0xf3, 0xed, 0x28, 0x70, 0xd9, 0x12, }, 7, 0, "", "",
+"62 f3 ed 28 70 d9 12 \tvpshldw $0x12,%ymm1,%ymm2,%ymm3",},
+{{0x62, 0xf3, 0xed, 0x48, 0x70, 0xd9, 0x12, }, 7, 0, "", "",
+"62 f3 ed 48 70 d9 12 \tvpshldw $0x12,%zmm1,%zmm2,%zmm3",},
+{{0x62, 0x03, 0xad, 0x40, 0x70, 0xd9, 0x12, }, 7, 0, "", "",
+"62 03 ad 40 70 d9 12 \tvpshldw $0x12,%zmm25,%zmm26,%zmm27",},
+{{0x62, 0xf3, 0x6d, 0x08, 0x71, 0xd9, 0x12, }, 7, 0, "", "",
+"62 f3 6d 08 71 d9 12 \tvpshldd $0x12,%xmm1,%xmm2,%xmm3",},
+{{0x62, 0xf3, 0x6d, 0x28, 0x71, 0xd9, 0x12, }, 7, 0, "", "",
+"62 f3 6d 28 71 d9 12 \tvpshldd $0x12,%ymm1,%ymm2,%ymm3",},
+{{0x62, 0xf3, 0x6d, 0x48, 0x71, 0xd9, 0x12, }, 7, 0, "", "",
+"62 f3 6d 48 71 d9 12 \tvpshldd $0x12,%zmm1,%zmm2,%zmm3",},
+{{0x62, 0x03, 0x2d, 0x40, 0x71, 0xd9, 0x12, }, 7, 0, "", "",
+"62 03 2d 40 71 d9 12 \tvpshldd $0x12,%zmm25,%zmm26,%zmm27",},
+{{0x62, 0xf3, 0xed, 0x08, 0x71, 0xd9, 0x12, }, 7, 0, "", "",
+"62 f3 ed 08 71 d9 12 \tvpshldq $0x12,%xmm1,%xmm2,%xmm3",},
+{{0x62, 0xf3, 0xed, 0x28, 0x71, 0xd9, 0x12, }, 7, 0, "", "",
+"62 f3 ed 28 71 d9 12 \tvpshldq $0x12,%ymm1,%ymm2,%ymm3",},
+{{0x62, 0xf3, 0xed, 0x48, 0x71, 0xd9, 0x12, }, 7, 0, "", "",
+"62 f3 ed 48 71 d9 12 \tvpshldq $0x12,%zmm1,%zmm2,%zmm3",},
+{{0x62, 0x03, 0xad, 0x40, 0x71, 0xd9, 0x12, }, 7, 0, "", "",
+"62 03 ad 40 71 d9 12 \tvpshldq $0x12,%zmm25,%zmm26,%zmm27",},
+{{0x62, 0xf3, 0xed, 0x08, 0x72, 0xd9, 0x12, }, 7, 0, "", "",
+"62 f3 ed 08 72 d9 12 \tvpshrdw $0x12,%xmm1,%xmm2,%xmm3",},
+{{0x62, 0xf3, 0xed, 0x28, 0x72, 0xd9, 0x12, }, 7, 0, "", "",
+"62 f3 ed 28 72 d9 12 \tvpshrdw $0x12,%ymm1,%ymm2,%ymm3",},
+{{0x62, 0xf3, 0xed, 0x48, 0x72, 0xd9, 0x12, }, 7, 0, "", "",
+"62 f3 ed 48 72 d9 12 \tvpshrdw $0x12,%zmm1,%zmm2,%zmm3",},
+{{0x62, 0x03, 0xad, 0x40, 0x72, 0xd9, 0x12, }, 7, 0, "", "",
+"62 03 ad 40 72 d9 12 \tvpshrdw $0x12,%zmm25,%zmm26,%zmm27",},
+{{0x62, 0xf3, 0x6d, 0x08, 0x73, 0xd9, 0x12, }, 7, 0, "", "",
+"62 f3 6d 08 73 d9 12 \tvpshrdd $0x12,%xmm1,%xmm2,%xmm3",},
+{{0x62, 0xf3, 0x6d, 0x28, 0x73, 0xd9, 0x12, }, 7, 0, "", "",
+"62 f3 6d 28 73 d9 12 \tvpshrdd $0x12,%ymm1,%ymm2,%ymm3",},
+{{0x62, 0xf3, 0x6d, 0x48, 0x73, 0xd9, 0x12, }, 7, 0, "", "",
+"62 f3 6d 48 73 d9 12 \tvpshrdd $0x12,%zmm1,%zmm2,%zmm3",},
+{{0x62, 0x03, 0x2d, 0x40, 0x73, 0xd9, 0x12, }, 7, 0, "", "",
+"62 03 2d 40 73 d9 12 \tvpshrdd $0x12,%zmm25,%zmm26,%zmm27",},
+{{0x62, 0xf3, 0xed, 0x08, 0x73, 0xd9, 0x12, }, 7, 0, "", "",
+"62 f3 ed 08 73 d9 12 \tvpshrdq $0x12,%xmm1,%xmm2,%xmm3",},
+{{0x62, 0xf3, 0xed, 0x28, 0x73, 0xd9, 0x12, }, 7, 0, "", "",
+"62 f3 ed 28 73 d9 12 \tvpshrdq $0x12,%ymm1,%ymm2,%ymm3",},
+{{0x62, 0xf3, 0xed, 0x48, 0x73, 0xd9, 0x12, }, 7, 0, "", "",
+"62 f3 ed 48 73 d9 12 \tvpshrdq $0x12,%zmm1,%zmm2,%zmm3",},
+{{0x62, 0x03, 0xad, 0x40, 0x73, 0xd9, 0x12, }, 7, 0, "", "",
+"62 03 ad 40 73 d9 12 \tvpshrdq $0x12,%zmm25,%zmm26,%zmm27",},
+{{0x66, 0x0f, 0x3a, 0xce, 0xd9, 0x12, }, 6, 0, "", "",
+"66 0f 3a ce d9 12 \tgf2p8affineqb $0x12,%xmm1,%xmm3",},
+{{0xc4, 0xe3, 0xe9, 0xce, 0xd9, 0x12, }, 6, 0, "", "",
+"c4 e3 e9 ce d9 12 \tvgf2p8affineqb $0x12,%xmm1,%xmm2,%xmm3",},
+{{0xc4, 0xe3, 0xed, 0xce, 0xd9, 0x12, }, 6, 0, "", "",
+"c4 e3 ed ce d9 12 \tvgf2p8affineqb $0x12,%ymm1,%ymm2,%ymm3",},
+{{0x62, 0xf3, 0xed, 0x48, 0xce, 0xd9, 0x12, }, 7, 0, "", "",
+"62 f3 ed 48 ce d9 12 \tvgf2p8affineqb $0x12,%zmm1,%zmm2,%zmm3",},
+{{0x62, 0x03, 0xad, 0x40, 0xce, 0xd9, 0x12, }, 7, 0, "", "",
+"62 03 ad 40 ce d9 12 \tvgf2p8affineqb $0x12,%zmm25,%zmm26,%zmm27",},
+{{0x66, 0x0f, 0x3a, 0xcf, 0xd9, 0x12, }, 6, 0, "", "",
+"66 0f 3a cf d9 12 \tgf2p8affineinvqb $0x12,%xmm1,%xmm3",},
+{{0xc4, 0xe3, 0xe9, 0xcf, 0xd9, 0x12, }, 6, 0, "", "",
+"c4 e3 e9 cf d9 12 \tvgf2p8affineinvqb $0x12,%xmm1,%xmm2,%xmm3",},
+{{0xc4, 0xe3, 0xed, 0xcf, 0xd9, 0x12, }, 6, 0, "", "",
+"c4 e3 ed cf d9 12 \tvgf2p8affineinvqb $0x12,%ymm1,%ymm2,%ymm3",},
+{{0x62, 0xf3, 0xed, 0x48, 0xcf, 0xd9, 0x12, }, 7, 0, "", "",
+"62 f3 ed 48 cf d9 12 \tvgf2p8affineinvqb $0x12,%zmm1,%zmm2,%zmm3",},
+{{0x62, 0x03, 0xad, 0x40, 0xcf, 0xd9, 0x12, }, 7, 0, "", "",
+"62 03 ad 40 cf d9 12 \tvgf2p8affineinvqb $0x12,%zmm25,%zmm26,%zmm27",},
{{0x62, 0x91, 0x2d, 0x40, 0x72, 0xc1, 0x12, }, 7, 0, "", "",
"62 91 2d 40 72 c1 12 \tvprord $0x12,%zmm25,%zmm26",},
{{0x62, 0x91, 0xad, 0x40, 0x72, 0xc1, 0x12, }, 7, 0, "", "",
diff --git a/tools/perf/arch/x86/tests/insn-x86-dat-src.c b/tools/perf/arch/x86/tests/insn-x86-dat-src.c
index dd85a3afd9ce..ddbf07c50bb8 100644
--- a/tools/perf/arch/x86/tests/insn-x86-dat-src.c
+++ b/tools/perf/arch/x86/tests/insn-x86-dat-src.c
@@ -510,6 +510,82 @@ int main(void)
asm volatile("vrsqrt14ss %xmm24,%xmm25,%xmm26{%k7}");
asm volatile("vrsqrt14sd %xmm24,%xmm25,%xmm26{%k7}");
+ /* AVX-512: Op code 0f 38 50 */
+
+ asm volatile("vpdpbusd %xmm1, %xmm2, %xmm3");
+ asm volatile("vpdpbusd %ymm1, %ymm2, %ymm3");
+ asm volatile("vpdpbusd %zmm1, %zmm2, %zmm3");
+ asm volatile("vpdpbusd 0x12345678(%rax,%rcx,8),%zmm2,%zmm3");
+ asm volatile("vpdpbusd 0x12345678(%eax,%ecx,8),%zmm2,%zmm3");
+
+ /* AVX-512: Op code 0f 38 51 */
+
+ asm volatile("vpdpbusds %xmm1, %xmm2, %xmm3");
+ asm volatile("vpdpbusds %ymm1, %ymm2, %ymm3");
+ asm volatile("vpdpbusds %zmm1, %zmm2, %zmm3");
+ asm volatile("vpdpbusds 0x12345678(%rax,%rcx,8),%zmm2,%zmm3");
+ asm volatile("vpdpbusds 0x12345678(%eax,%ecx,8),%zmm2,%zmm3");
+
+ /* AVX-512: Op code 0f 38 52 */
+
+ asm volatile("vdpbf16ps %xmm1, %xmm2, %xmm3");
+ asm volatile("vdpbf16ps %ymm1, %ymm2, %ymm3");
+ asm volatile("vdpbf16ps %zmm1, %zmm2, %zmm3");
+ asm volatile("vdpbf16ps 0x12345678(%rax,%rcx,8),%zmm2,%zmm3");
+ asm volatile("vdpbf16ps 0x12345678(%eax,%ecx,8),%zmm2,%zmm3");
+
+ asm volatile("vpdpwssd %xmm1, %xmm2, %xmm3");
+ asm volatile("vpdpwssd %ymm1, %ymm2, %ymm3");
+ asm volatile("vpdpwssd %zmm1, %zmm2, %zmm3");
+ asm volatile("vpdpwssd 0x12345678(%rax,%rcx,8),%zmm2,%zmm3");
+ asm volatile("vpdpwssd 0x12345678(%eax,%ecx,8),%zmm2,%zmm3");
+
+ asm volatile("vp4dpwssd (%rax), %zmm0, %zmm4");
+ asm volatile("vp4dpwssd (%eax), %zmm0, %zmm4");
+ asm volatile("vp4dpwssd 0x12345678(%rax,%rcx,8),%zmm0,%zmm4");
+ asm volatile("vp4dpwssd 0x12345678(%eax,%ecx,8),%zmm0,%zmm4");
+
+ /* AVX-512: Op code 0f 38 53 */
+
+ asm volatile("vpdpwssds %xmm1, %xmm2, %xmm3");
+ asm volatile("vpdpwssds %ymm1, %ymm2, %ymm3");
+ asm volatile("vpdpwssds %zmm1, %zmm2, %zmm3");
+ asm volatile("vpdpwssds 0x12345678(%rax,%rcx,8),%zmm2,%zmm3");
+ asm volatile("vpdpwssds 0x12345678(%eax,%ecx,8),%zmm2,%zmm3");
+
+ asm volatile("vp4dpwssds (%rax), %zmm0, %zmm4");
+ asm volatile("vp4dpwssds (%eax), %zmm0, %zmm4");
+ asm volatile("vp4dpwssds 0x12345678(%rax,%rcx,8),%zmm0,%zmm4");
+ asm volatile("vp4dpwssds 0x12345678(%eax,%ecx,8),%zmm0,%zmm4");
+
+ /* AVX-512: Op code 0f 38 54 */
+
+ asm volatile("vpopcntb %xmm1, %xmm2");
+ asm volatile("vpopcntb %ymm1, %ymm2");
+ asm volatile("vpopcntb %zmm1, %zmm2");
+ asm volatile("vpopcntb 0x12345678(%rax,%rcx,8),%zmm2");
+ asm volatile("vpopcntb 0x12345678(%eax,%ecx,8),%zmm2");
+
+ asm volatile("vpopcntw %xmm1, %xmm2");
+ asm volatile("vpopcntw %ymm1, %ymm2");
+ asm volatile("vpopcntw %zmm1, %zmm2");
+ asm volatile("vpopcntw 0x12345678(%rax,%rcx,8),%zmm2");
+ asm volatile("vpopcntw 0x12345678(%eax,%ecx,8),%zmm2");
+
+ /* AVX-512: Op code 0f 38 55 */
+
+ asm volatile("vpopcntd %xmm1, %xmm2");
+ asm volatile("vpopcntd %ymm1, %ymm2");
+ asm volatile("vpopcntd %zmm1, %zmm2");
+ asm volatile("vpopcntd 0x12345678(%rax,%rcx,8),%zmm2");
+ asm volatile("vpopcntd 0x12345678(%eax,%ecx,8),%zmm2");
+
+ asm volatile("vpopcntq %xmm1, %xmm2");
+ asm volatile("vpopcntq %ymm1, %ymm2");
+ asm volatile("vpopcntq %zmm1, %zmm2");
+ asm volatile("vpopcntq 0x12345678(%rax,%rcx,8),%zmm2");
+ asm volatile("vpopcntq 0x12345678(%eax,%ecx,8),%zmm2");
+
/* AVX-512: Op code 0f 38 59 */
asm volatile("vpbroadcastq %xmm4,%xmm6");
@@ -526,6 +602,34 @@ int main(void)
asm volatile("vbroadcasti32x8 (%rcx),%zmm28");
asm volatile("vbroadcasti64x4 (%rcx),%zmm26");
+ /* AVX-512: Op code 0f 38 62 */
+
+ asm volatile("vpexpandb %xmm1, %xmm2");
+ asm volatile("vpexpandb %ymm1, %ymm2");
+ asm volatile("vpexpandb %zmm1, %zmm2");
+ asm volatile("vpexpandb 0x12345678(%rax,%rcx,8),%zmm2");
+ asm volatile("vpexpandb 0x12345678(%eax,%ecx,8),%zmm2");
+
+ asm volatile("vpexpandw %xmm1, %xmm2");
+ asm volatile("vpexpandw %ymm1, %ymm2");
+ asm volatile("vpexpandw %zmm1, %zmm2");
+ asm volatile("vpexpandw 0x12345678(%rax,%rcx,8),%zmm2");
+ asm volatile("vpexpandw 0x12345678(%eax,%ecx,8),%zmm2");
+
+ /* AVX-512: Op code 0f 38 63 */
+
+ asm volatile("vpcompressb %xmm1, %xmm2");
+ asm volatile("vpcompressb %ymm1, %ymm2");
+ asm volatile("vpcompressb %zmm1, %zmm2");
+ asm volatile("vpcompressb %zmm2,0x12345678(%rax,%rcx,8)");
+ asm volatile("vpcompressb %zmm2,0x12345678(%eax,%ecx,8)");
+
+ asm volatile("vpcompressw %xmm1, %xmm2");
+ asm volatile("vpcompressw %ymm1, %ymm2");
+ asm volatile("vpcompressw %zmm1, %zmm2");
+ asm volatile("vpcompressw %zmm2,0x12345678(%rax,%rcx,8)");
+ asm volatile("vpcompressw %zmm2,0x12345678(%eax,%ecx,8)");
+
/* AVX-512: Op code 0f 38 64 */
asm volatile("vpblendmd %zmm26,%zmm27,%zmm28");
@@ -541,6 +645,76 @@ int main(void)
asm volatile("vpblendmb %zmm26,%zmm27,%zmm28");
asm volatile("vpblendmw %zmm26,%zmm27,%zmm28");
+ /* AVX-512: Op code 0f 38 68 */
+
+ asm volatile("vp2intersectd %xmm1, %xmm2, %k3");
+ asm volatile("vp2intersectd %ymm1, %ymm2, %k3");
+ asm volatile("vp2intersectd %zmm1, %zmm2, %k3");
+ asm volatile("vp2intersectd 0x12345678(%rax,%rcx,8),%zmm2,%k3");
+ asm volatile("vp2intersectd 0x12345678(%eax,%ecx,8),%zmm2,%k3");
+
+ asm volatile("vp2intersectq %xmm1, %xmm2, %k3");
+ asm volatile("vp2intersectq %ymm1, %ymm2, %k3");
+ asm volatile("vp2intersectq %zmm1, %zmm2, %k3");
+ asm volatile("vp2intersectq 0x12345678(%rax,%rcx,8),%zmm2,%k3");
+ asm volatile("vp2intersectq 0x12345678(%eax,%ecx,8),%zmm2,%k3");
+
+ /* AVX-512: Op code 0f 38 70 */
+
+ asm volatile("vpshldvw %xmm1, %xmm2, %xmm3");
+ asm volatile("vpshldvw %ymm1, %ymm2, %ymm3");
+ asm volatile("vpshldvw %zmm1, %zmm2, %zmm3");
+ asm volatile("vpshldvw 0x12345678(%rax,%rcx,8),%zmm2,%zmm3");
+ asm volatile("vpshldvw 0x12345678(%eax,%ecx,8),%zmm2,%zmm3");
+
+ /* AVX-512: Op code 0f 38 71 */
+
+ asm volatile("vpshldvd %xmm1, %xmm2, %xmm3");
+ asm volatile("vpshldvd %ymm1, %ymm2, %ymm3");
+ asm volatile("vpshldvd %zmm1, %zmm2, %zmm3");
+ asm volatile("vpshldvd 0x12345678(%rax,%rcx,8),%zmm2,%zmm3");
+ asm volatile("vpshldvd 0x12345678(%eax,%ecx,8),%zmm2,%zmm3");
+
+ asm volatile("vpshldvq %xmm1, %xmm2, %xmm3");
+ asm volatile("vpshldvq %ymm1, %ymm2, %ymm3");
+ asm volatile("vpshldvq %zmm1, %zmm2, %zmm3");
+ asm volatile("vpshldvq 0x12345678(%rax,%rcx,8),%zmm2,%zmm3");
+ asm volatile("vpshldvq 0x12345678(%eax,%ecx,8),%zmm2,%zmm3");
+
+ /* AVX-512: Op code 0f 38 72 */
+
+ asm volatile("vcvtne2ps2bf16 %xmm1, %xmm2, %xmm3");
+ asm volatile("vcvtne2ps2bf16 %ymm1, %ymm2, %ymm3");
+ asm volatile("vcvtne2ps2bf16 %zmm1, %zmm2, %zmm3");
+ asm volatile("vcvtne2ps2bf16 0x12345678(%rax,%rcx,8),%zmm2,%zmm3");
+ asm volatile("vcvtne2ps2bf16 0x12345678(%eax,%ecx,8),%zmm2,%zmm3");
+
+ asm volatile("vcvtneps2bf16 %xmm1, %xmm2");
+ asm volatile("vcvtneps2bf16 %ymm1, %xmm2");
+ asm volatile("vcvtneps2bf16 %zmm1, %ymm2");
+ asm volatile("vcvtneps2bf16 0x12345678(%rax,%rcx,8),%ymm2");
+ asm volatile("vcvtneps2bf16 0x12345678(%eax,%ecx,8),%ymm2");
+
+ asm volatile("vpshrdvw %xmm1, %xmm2, %xmm3");
+ asm volatile("vpshrdvw %ymm1, %ymm2, %ymm3");
+ asm volatile("vpshrdvw %zmm1, %zmm2, %zmm3");
+ asm volatile("vpshrdvw 0x12345678(%rax,%rcx,8),%zmm2,%zmm3");
+ asm volatile("vpshrdvw 0x12345678(%eax,%ecx,8),%zmm2,%zmm3");
+
+ /* AVX-512: Op code 0f 38 73 */
+
+ asm volatile("vpshrdvd %xmm1, %xmm2, %xmm3");
+ asm volatile("vpshrdvd %ymm1, %ymm2, %ymm3");
+ asm volatile("vpshrdvd %zmm1, %zmm2, %zmm3");
+ asm volatile("vpshrdvd 0x12345678(%rax,%rcx,8),%zmm2,%zmm3");
+ asm volatile("vpshrdvd 0x12345678(%eax,%ecx,8),%zmm2,%zmm3");
+
+ asm volatile("vpshrdvq %xmm1, %xmm2, %xmm3");
+ asm volatile("vpshrdvq %ymm1, %ymm2, %ymm3");
+ asm volatile("vpshrdvq %zmm1, %zmm2, %zmm3");
+ asm volatile("vpshrdvq 0x12345678(%rax,%rcx,8),%zmm2,%zmm3");
+ asm volatile("vpshrdvq 0x12345678(%eax,%ecx,8),%zmm2,%zmm3");
+
/* AVX-512: Op code 0f 38 75 */
asm volatile("vpermi2b %zmm24,%zmm25,%zmm26");
@@ -613,6 +787,14 @@ int main(void)
asm volatile("vpermb %zmm26,%zmm27,%zmm28");
asm volatile("vpermw %zmm26,%zmm27,%zmm28");
+ /* AVX-512: Op code 0f 38 8f */
+
+ asm volatile("vpshufbitqmb %xmm1, %xmm2, %k3");
+ asm volatile("vpshufbitqmb %ymm1, %ymm2, %k3");
+ asm volatile("vpshufbitqmb %zmm1, %zmm2, %k3");
+ asm volatile("vpshufbitqmb 0x12345678(%rax,%rcx,8),%zmm2,%k3");
+ asm volatile("vpshufbitqmb 0x12345678(%eax,%ecx,8),%zmm2,%k3");
+
/* AVX-512: Op code 0f 38 90 */
asm volatile("vpgatherdd %xmm2,0x02(%rbp,%xmm7,2),%xmm1");
@@ -627,6 +809,40 @@ int main(void)
asm volatile("vpgatherqd 0x7b(%rbp,%zmm27,8),%ymm26{%k1}");
asm volatile("vpgatherqq 0x7b(%rbp,%zmm27,8),%zmm26{%k1}");
+ /* AVX-512: Op code 0f 38 9a */
+
+ asm volatile("vfmsub132ps %xmm1, %xmm2, %xmm3");
+ asm volatile("vfmsub132ps %ymm1, %ymm2, %ymm3");
+ asm volatile("vfmsub132ps %zmm1, %zmm2, %zmm3");
+ asm volatile("vfmsub132ps 0x12345678(%rax,%rcx,8),%zmm2,%zmm3");
+ asm volatile("vfmsub132ps 0x12345678(%eax,%ecx,8),%zmm2,%zmm3");
+
+ asm volatile("vfmsub132pd %xmm1, %xmm2, %xmm3");
+ asm volatile("vfmsub132pd %ymm1, %ymm2, %ymm3");
+ asm volatile("vfmsub132pd %zmm1, %zmm2, %zmm3");
+ asm volatile("vfmsub132pd 0x12345678(%rax,%rcx,8),%zmm2,%zmm3");
+ asm volatile("vfmsub132pd 0x12345678(%eax,%ecx,8),%zmm2,%zmm3");
+
+ asm volatile("v4fmaddps (%rax), %zmm0, %zmm4");
+ asm volatile("v4fmaddps (%eax), %zmm0, %zmm4");
+ asm volatile("v4fmaddps 0x12345678(%rax,%rcx,8),%zmm0,%zmm4");
+ asm volatile("v4fmaddps 0x12345678(%eax,%ecx,8),%zmm0,%zmm4");
+
+ /* AVX-512: Op code 0f 38 9b */
+
+ asm volatile("vfmsub132ss %xmm1, %xmm2, %xmm3");
+ asm volatile("vfmsub132ss 0x12345678(%rax,%rcx,8),%xmm2,%xmm3");
+ asm volatile("vfmsub132ss 0x12345678(%eax,%ecx,8),%xmm2,%xmm3");
+
+ asm volatile("vfmsub132sd %xmm1, %xmm2, %xmm3");
+ asm volatile("vfmsub132sd 0x12345678(%rax,%rcx,8),%xmm2,%xmm3");
+ asm volatile("vfmsub132sd 0x12345678(%eax,%ecx,8),%xmm2,%xmm3");
+
+ asm volatile("v4fmaddss (%rax), %xmm0, %xmm4");
+ asm volatile("v4fmaddss (%eax), %xmm0, %xmm4");
+ asm volatile("v4fmaddss 0x12345678(%rax,%rcx,8),%xmm0,%xmm4");
+ asm volatile("v4fmaddss 0x12345678(%eax,%ecx,8),%xmm0,%xmm4");
+
/* AVX-512: Op code 0f 38 a0 */
asm volatile("vpscatterdd %zmm28,0x7b(%rbp,%zmm29,8){%k1}");
@@ -647,6 +863,40 @@ int main(void)
asm volatile("vscatterqps %ymm6,0x7b(%rbp,%zmm29,8){%k1}");
asm volatile("vscatterqpd %zmm28,0x7b(%rbp,%zmm29,8){%k1}");
+ /* AVX-512: Op code 0f 38 aa */
+
+ asm volatile("vfmsub213ps %xmm1, %xmm2, %xmm3");
+ asm volatile("vfmsub213ps %ymm1, %ymm2, %ymm3");
+ asm volatile("vfmsub213ps %zmm1, %zmm2, %zmm3");
+ asm volatile("vfmsub213ps 0x12345678(%rax,%rcx,8),%zmm2,%zmm3");
+ asm volatile("vfmsub213ps 0x12345678(%eax,%ecx,8),%zmm2,%zmm3");
+
+ asm volatile("vfmsub213pd %xmm1, %xmm2, %xmm3");
+ asm volatile("vfmsub213pd %ymm1, %ymm2, %ymm3");
+ asm volatile("vfmsub213pd %zmm1, %zmm2, %zmm3");
+ asm volatile("vfmsub213pd 0x12345678(%rax,%rcx,8),%zmm2,%zmm3");
+ asm volatile("vfmsub213pd 0x12345678(%eax,%ecx,8),%zmm2,%zmm3");
+
+ asm volatile("v4fnmaddps (%rax), %zmm0, %zmm4");
+ asm volatile("v4fnmaddps (%eax), %zmm0, %zmm4");
+ asm volatile("v4fnmaddps 0x12345678(%rax,%rcx,8),%zmm0,%zmm4");
+ asm volatile("v4fnmaddps 0x12345678(%eax,%ecx,8),%zmm0,%zmm4");
+
+ /* AVX-512: Op code 0f 38 ab */
+
+ asm volatile("vfmsub213ss %xmm1, %xmm2, %xmm3");
+ asm volatile("vfmsub213ss 0x12345678(%rax,%rcx,8),%xmm2,%xmm3");
+ asm volatile("vfmsub213ss 0x12345678(%eax,%ecx,8),%xmm2,%xmm3");
+
+ asm volatile("vfmsub213sd %xmm1, %xmm2, %xmm3");
+ asm volatile("vfmsub213sd 0x12345678(%rax,%rcx,8),%xmm2,%xmm3");
+ asm volatile("vfmsub213sd 0x12345678(%eax,%ecx,8),%xmm2,%xmm3");
+
+ asm volatile("v4fnmaddss (%rax), %xmm0, %xmm4");
+ asm volatile("v4fnmaddss (%eax), %xmm0, %xmm4");
+ asm volatile("v4fnmaddss 0x12345678(%rax,%rcx,8),%xmm0,%xmm4");
+ asm volatile("v4fnmaddss 0x12345678(%eax,%ecx,8),%xmm0,%xmm4");
+
/* AVX-512: Op code 0f 38 b4 */
asm volatile("vpmadd52luq %zmm26,%zmm27,%zmm28");
@@ -685,6 +935,50 @@ int main(void)
asm volatile("vrsqrt28ss %xmm28,%xmm29,%xmm30{%k7}");
asm volatile("vrsqrt28sd %xmm25,%xmm26,%xmm27{%k7}");
+ /* AVX-512: Op code 0f 38 cf */
+
+ asm volatile("gf2p8mulb %xmm1, %xmm3");
+ asm volatile("gf2p8mulb 0x12345678(%rax,%rcx,8),%xmm3");
+ asm volatile("gf2p8mulb 0x12345678(%eax,%ecx,8),%xmm3");
+
+ asm volatile("vgf2p8mulb %xmm1, %xmm2, %xmm3");
+ asm volatile("vgf2p8mulb %ymm1, %ymm2, %ymm3");
+ asm volatile("vgf2p8mulb %zmm1, %zmm2, %zmm3");
+ asm volatile("vgf2p8mulb 0x12345678(%rax,%rcx,8),%zmm2,%zmm3");
+ asm volatile("vgf2p8mulb 0x12345678(%eax,%ecx,8),%zmm2,%zmm3");
+
+ /* AVX-512: Op code 0f 38 dc */
+
+ asm volatile("vaesenc %xmm1, %xmm2, %xmm3");
+ asm volatile("vaesenc %ymm1, %ymm2, %ymm3");
+ asm volatile("vaesenc %zmm1, %zmm2, %zmm3");
+ asm volatile("vaesenc 0x12345678(%rax,%rcx,8),%zmm2,%zmm3");
+ asm volatile("vaesenc 0x12345678(%eax,%ecx,8),%zmm2,%zmm3");
+
+ /* AVX-512: Op code 0f 38 dd */
+
+ asm volatile("vaesenclast %xmm1, %xmm2, %xmm3");
+ asm volatile("vaesenclast %ymm1, %ymm2, %ymm3");
+ asm volatile("vaesenclast %zmm1, %zmm2, %zmm3");
+ asm volatile("vaesenclast 0x12345678(%rax,%rcx,8),%zmm2,%zmm3");
+ asm volatile("vaesenclast 0x12345678(%eax,%ecx,8),%zmm2,%zmm3");
+
+ /* AVX-512: Op code 0f 38 de */
+
+ asm volatile("vaesdec %xmm1, %xmm2, %xmm3");
+ asm volatile("vaesdec %ymm1, %ymm2, %ymm3");
+ asm volatile("vaesdec %zmm1, %zmm2, %zmm3");
+ asm volatile("vaesdec 0x12345678(%rax,%rcx,8),%zmm2,%zmm3");
+ asm volatile("vaesdec 0x12345678(%eax,%ecx,8),%zmm2,%zmm3");
+
+ /* AVX-512: Op code 0f 38 df */
+
+ asm volatile("vaesdeclast %xmm1, %xmm2, %xmm3");
+ asm volatile("vaesdeclast %ymm1, %ymm2, %ymm3");
+ asm volatile("vaesdeclast %zmm1, %zmm2, %zmm3");
+ asm volatile("vaesdeclast 0x12345678(%rax,%rcx,8),%zmm2,%zmm3");
+ asm volatile("vaesdeclast 0x12345678(%eax,%ecx,8),%zmm2,%zmm3");
+
/* AVX-512: Op code 0f 3a 03 */
asm volatile("valignd $0x12,%zmm28,%zmm29,%zmm30");
@@ -804,6 +1098,13 @@ int main(void)
asm volatile("vshufi32x4 $0x12,%zmm25,%zmm26,%zmm27");
asm volatile("vshufi64x2 $0x12,%zmm28,%zmm29,%zmm30");
+ /* AVX-512: Op code 0f 3a 44 */
+
+ asm volatile("vpclmulqdq $0x12,%xmm1,%xmm2,%xmm3");
+ asm volatile("vpclmulqdq $0x12,%ymm1,%ymm2,%ymm3");
+ asm volatile("vpclmulqdq $0x12,%zmm1,%zmm2,%zmm3");
+ asm volatile("vpclmulqdq $0x12,%zmm25,%zmm26,%zmm27");
+
/* AVX-512: Op code 0f 3a 50 */
asm volatile("vrangeps $0x12,%zmm25,%zmm26,%zmm27");
@@ -844,6 +1145,62 @@ int main(void)
asm volatile("vfpclassss $0x12,%xmm27,%k5");
asm volatile("vfpclasssd $0x12,%xmm30,%k5");
+ /* AVX-512: Op code 0f 3a 70 */
+
+ asm volatile("vpshldw $0x12,%xmm1,%xmm2,%xmm3");
+ asm volatile("vpshldw $0x12,%ymm1,%ymm2,%ymm3");
+ asm volatile("vpshldw $0x12,%zmm1,%zmm2,%zmm3");
+ asm volatile("vpshldw $0x12,%zmm25,%zmm26,%zmm27");
+
+ /* AVX-512: Op code 0f 3a 71 */
+
+ asm volatile("vpshldd $0x12,%xmm1,%xmm2,%xmm3");
+ asm volatile("vpshldd $0x12,%ymm1,%ymm2,%ymm3");
+ asm volatile("vpshldd $0x12,%zmm1,%zmm2,%zmm3");
+ asm volatile("vpshldd $0x12,%zmm25,%zmm26,%zmm27");
+
+ asm volatile("vpshldq $0x12,%xmm1,%xmm2,%xmm3");
+ asm volatile("vpshldq $0x12,%ymm1,%ymm2,%ymm3");
+ asm volatile("vpshldq $0x12,%zmm1,%zmm2,%zmm3");
+ asm volatile("vpshldq $0x12,%zmm25,%zmm26,%zmm27");
+
+ /* AVX-512: Op code 0f 3a 72 */
+
+ asm volatile("vpshrdw $0x12,%xmm1,%xmm2,%xmm3");
+ asm volatile("vpshrdw $0x12,%ymm1,%ymm2,%ymm3");
+ asm volatile("vpshrdw $0x12,%zmm1,%zmm2,%zmm3");
+ asm volatile("vpshrdw $0x12,%zmm25,%zmm26,%zmm27");
+
+ /* AVX-512: Op code 0f 3a 73 */
+
+ asm volatile("vpshrdd $0x12,%xmm1,%xmm2,%xmm3");
+ asm volatile("vpshrdd $0x12,%ymm1,%ymm2,%ymm3");
+ asm volatile("vpshrdd $0x12,%zmm1,%zmm2,%zmm3");
+ asm volatile("vpshrdd $0x12,%zmm25,%zmm26,%zmm27");
+
+ asm volatile("vpshrdq $0x12,%xmm1,%xmm2,%xmm3");
+ asm volatile("vpshrdq $0x12,%ymm1,%ymm2,%ymm3");
+ asm volatile("vpshrdq $0x12,%zmm1,%zmm2,%zmm3");
+ asm volatile("vpshrdq $0x12,%zmm25,%zmm26,%zmm27");
+
+ /* AVX-512: Op code 0f 3a ce */
+
+ asm volatile("gf2p8affineqb $0x12,%xmm1,%xmm3");
+
+ asm volatile("vgf2p8affineqb $0x12,%xmm1,%xmm2,%xmm3");
+ asm volatile("vgf2p8affineqb $0x12,%ymm1,%ymm2,%ymm3");
+ asm volatile("vgf2p8affineqb $0x12,%zmm1,%zmm2,%zmm3");
+ asm volatile("vgf2p8affineqb $0x12,%zmm25,%zmm26,%zmm27");
+
+ /* AVX-512: Op code 0f 3a cf */
+
+ asm volatile("gf2p8affineinvqb $0x12,%xmm1,%xmm3");
+
+ asm volatile("vgf2p8affineinvqb $0x12,%xmm1,%xmm2,%xmm3");
+ asm volatile("vgf2p8affineinvqb $0x12,%ymm1,%ymm2,%ymm3");
+ asm volatile("vgf2p8affineinvqb $0x12,%zmm1,%zmm2,%zmm3");
+ asm volatile("vgf2p8affineinvqb $0x12,%zmm25,%zmm26,%zmm27");
+
/* AVX-512: Op code 0f 72 (Grp13) */
asm volatile("vprord $0x12,%zmm25,%zmm26");
@@ -1946,6 +2303,69 @@ int main(void)
asm volatile("vrsqrt14ss %xmm4,%xmm5,%xmm6{%k7}");
asm volatile("vrsqrt14sd %xmm4,%xmm5,%xmm6{%k7}");
+ /* AVX-512: Op code 0f 38 50 */
+
+ asm volatile("vpdpbusd %xmm1, %xmm2, %xmm3");
+ asm volatile("vpdpbusd %ymm1, %ymm2, %ymm3");
+ asm volatile("vpdpbusd %zmm1, %zmm2, %zmm3");
+ asm volatile("vpdpbusd 0x12345678(%eax,%ecx,8),%zmm2,%zmm3");
+
+ /* AVX-512: Op code 0f 38 51 */
+
+ asm volatile("vpdpbusds %xmm1, %xmm2, %xmm3");
+ asm volatile("vpdpbusds %ymm1, %ymm2, %ymm3");
+ asm volatile("vpdpbusds %zmm1, %zmm2, %zmm3");
+ asm volatile("vpdpbusds 0x12345678(%eax,%ecx,8),%zmm2,%zmm3");
+
+ /* AVX-512: Op code 0f 38 52 */
+
+ asm volatile("vdpbf16ps %xmm1, %xmm2, %xmm3");
+ asm volatile("vdpbf16ps %ymm1, %ymm2, %ymm3");
+ asm volatile("vdpbf16ps %zmm1, %zmm2, %zmm3");
+ asm volatile("vdpbf16ps 0x12345678(%eax,%ecx,8),%zmm2,%zmm3");
+
+ asm volatile("vpdpwssd %xmm1, %xmm2, %xmm3");
+ asm volatile("vpdpwssd %ymm1, %ymm2, %ymm3");
+ asm volatile("vpdpwssd %zmm1, %zmm2, %zmm3");
+ asm volatile("vpdpwssd 0x12345678(%eax,%ecx,8),%zmm2,%zmm3");
+
+ asm volatile("vp4dpwssd (%eax), %zmm0, %zmm4");
+ asm volatile("vp4dpwssd 0x12345678(%eax,%ecx,8),%zmm0,%zmm4");
+
+ /* AVX-512: Op code 0f 38 53 */
+
+ asm volatile("vpdpwssds %xmm1, %xmm2, %xmm3");
+ asm volatile("vpdpwssds %ymm1, %ymm2, %ymm3");
+ asm volatile("vpdpwssds %zmm1, %zmm2, %zmm3");
+ asm volatile("vpdpwssds 0x12345678(%eax,%ecx,8),%zmm2,%zmm3");
+
+ asm volatile("vp4dpwssds (%eax), %zmm0, %zmm4");
+ asm volatile("vp4dpwssds 0x12345678(%eax,%ecx,8),%zmm0,%zmm4");
+
+ /* AVX-512: Op code 0f 38 54 */
+
+ asm volatile("vpopcntb %xmm1, %xmm2");
+ asm volatile("vpopcntb %ymm1, %ymm2");
+ asm volatile("vpopcntb %zmm1, %zmm2");
+ asm volatile("vpopcntb 0x12345678(%eax,%ecx,8),%zmm2");
+
+ asm volatile("vpopcntw %xmm1, %xmm2");
+ asm volatile("vpopcntw %ymm1, %ymm2");
+ asm volatile("vpopcntw %zmm1, %zmm2");
+ asm volatile("vpopcntw 0x12345678(%eax,%ecx,8),%zmm2");
+
+ /* AVX-512: Op code 0f 38 55 */
+
+ asm volatile("vpopcntd %xmm1, %xmm2");
+ asm volatile("vpopcntd %ymm1, %ymm2");
+ asm volatile("vpopcntd %zmm1, %zmm2");
+ asm volatile("vpopcntd 0x12345678(%eax,%ecx,8),%zmm2");
+
+ asm volatile("vpopcntq %xmm1, %xmm2");
+ asm volatile("vpopcntq %ymm1, %ymm2");
+ asm volatile("vpopcntq %zmm1, %zmm2");
+ asm volatile("vpopcntq 0x12345678(%eax,%ecx,8),%zmm2");
+
/* AVX-512: Op code 0f 38 59 */
asm volatile("vpbroadcastq %xmm4,%xmm6");
@@ -1962,6 +2382,30 @@ int main(void)
asm volatile("vbroadcasti32x8 (%ecx),%zmm6");
asm volatile("vbroadcasti64x4 (%ecx),%zmm6");
+ /* AVX-512: Op code 0f 38 62 */
+
+ asm volatile("vpexpandb %xmm1, %xmm2");
+ asm volatile("vpexpandb %ymm1, %ymm2");
+ asm volatile("vpexpandb %zmm1, %zmm2");
+ asm volatile("vpexpandb 0x12345678(%eax,%ecx,8),%zmm2");
+
+ asm volatile("vpexpandw %xmm1, %xmm2");
+ asm volatile("vpexpandw %ymm1, %ymm2");
+ asm volatile("vpexpandw %zmm1, %zmm2");
+ asm volatile("vpexpandw 0x12345678(%eax,%ecx,8),%zmm2");
+
+ /* AVX-512: Op code 0f 38 63 */
+
+ asm volatile("vpcompressb %xmm1, %xmm2");
+ asm volatile("vpcompressb %ymm1, %ymm2");
+ asm volatile("vpcompressb %zmm1, %zmm2");
+ asm volatile("vpcompressb %zmm2,0x12345678(%eax,%ecx,8)");
+
+ asm volatile("vpcompressw %xmm1, %xmm2");
+ asm volatile("vpcompressw %ymm1, %ymm2");
+ asm volatile("vpcompressw %zmm1, %zmm2");
+ asm volatile("vpcompressw %zmm2,0x12345678(%eax,%ecx,8)");
+
/* AVX-512: Op code 0f 38 64 */
asm volatile("vpblendmd %zmm4,%zmm5,%zmm6");
@@ -1977,6 +2421,66 @@ int main(void)
asm volatile("vpblendmb %zmm4,%zmm5,%zmm6");
asm volatile("vpblendmw %zmm4,%zmm5,%zmm6");
+ /* AVX-512: Op code 0f 38 68 */
+
+ asm volatile("vp2intersectd %xmm1, %xmm2, %k3");
+ asm volatile("vp2intersectd %ymm1, %ymm2, %k3");
+ asm volatile("vp2intersectd %zmm1, %zmm2, %k3");
+ asm volatile("vp2intersectd 0x12345678(%eax,%ecx,8),%zmm2,%k3");
+
+ asm volatile("vp2intersectq %xmm1, %xmm2, %k3");
+ asm volatile("vp2intersectq %ymm1, %ymm2, %k3");
+ asm volatile("vp2intersectq %zmm1, %zmm2, %k3");
+ asm volatile("vp2intersectq 0x12345678(%eax,%ecx,8),%zmm2,%k3");
+
+ /* AVX-512: Op code 0f 38 70 */
+
+ asm volatile("vpshldvw %xmm1, %xmm2, %xmm3");
+ asm volatile("vpshldvw %ymm1, %ymm2, %ymm3");
+ asm volatile("vpshldvw %zmm1, %zmm2, %zmm3");
+ asm volatile("vpshldvw 0x12345678(%eax,%ecx,8),%zmm2,%zmm3");
+
+ /* AVX-512: Op code 0f 38 71 */
+
+ asm volatile("vpshldvd %xmm1, %xmm2, %xmm3");
+ asm volatile("vpshldvd %ymm1, %ymm2, %ymm3");
+ asm volatile("vpshldvd %zmm1, %zmm2, %zmm3");
+ asm volatile("vpshldvd 0x12345678(%eax,%ecx,8),%zmm2,%zmm3");
+
+ asm volatile("vpshldvq %xmm1, %xmm2, %xmm3");
+ asm volatile("vpshldvq %ymm1, %ymm2, %ymm3");
+ asm volatile("vpshldvq %zmm1, %zmm2, %zmm3");
+ asm volatile("vpshldvq 0x12345678(%eax,%ecx,8),%zmm2,%zmm3");
+
+ /* AVX-512: Op code 0f 38 72 */
+
+ asm volatile("vcvtne2ps2bf16 %xmm1, %xmm2, %xmm3");
+ asm volatile("vcvtne2ps2bf16 %ymm1, %ymm2, %ymm3");
+ asm volatile("vcvtne2ps2bf16 %zmm1, %zmm2, %zmm3");
+ asm volatile("vcvtne2ps2bf16 0x12345678(%eax,%ecx,8),%zmm2,%zmm3");
+
+ asm volatile("vcvtneps2bf16 %xmm1, %xmm2");
+ asm volatile("vcvtneps2bf16 %ymm1, %xmm2");
+ asm volatile("vcvtneps2bf16 %zmm1, %ymm2");
+ asm volatile("vcvtneps2bf16 0x12345678(%eax,%ecx,8),%ymm2");
+
+ asm volatile("vpshrdvw %xmm1, %xmm2, %xmm3");
+ asm volatile("vpshrdvw %ymm1, %ymm2, %ymm3");
+ asm volatile("vpshrdvw %zmm1, %zmm2, %zmm3");
+ asm volatile("vpshrdvw 0x12345678(%eax,%ecx,8),%zmm2,%zmm3");
+
+ /* AVX-512: Op code 0f 38 73 */
+
+ asm volatile("vpshrdvd %xmm1, %xmm2, %xmm3");
+ asm volatile("vpshrdvd %ymm1, %ymm2, %ymm3");
+ asm volatile("vpshrdvd %zmm1, %zmm2, %zmm3");
+ asm volatile("vpshrdvd 0x12345678(%eax,%ecx,8),%zmm2,%zmm3");
+
+ asm volatile("vpshrdvq %xmm1, %xmm2, %xmm3");
+ asm volatile("vpshrdvq %ymm1, %ymm2, %ymm3");
+ asm volatile("vpshrdvq %zmm1, %zmm2, %zmm3");
+ asm volatile("vpshrdvq 0x12345678(%eax,%ecx,8),%zmm2,%zmm3");
+
/* AVX-512: Op code 0f 38 75 */
asm volatile("vpermi2b %zmm4,%zmm5,%zmm6");
@@ -2048,6 +2552,13 @@ int main(void)
asm volatile("vpermb %zmm4,%zmm5,%zmm6");
asm volatile("vpermw %zmm4,%zmm5,%zmm6");
+ /* AVX-512: Op code 0f 38 8f */
+
+ asm volatile("vpshufbitqmb %xmm1, %xmm2, %k3");
+ asm volatile("vpshufbitqmb %ymm1, %ymm2, %k3");
+ asm volatile("vpshufbitqmb %zmm1, %zmm2, %k3");
+ asm volatile("vpshufbitqmb 0x12345678(%eax,%ecx,8),%zmm2,%k3");
+
/* AVX-512: Op code 0f 38 90 */
asm volatile("vpgatherdd %xmm2,0x02(%ebp,%xmm7,2),%xmm1");
@@ -2062,6 +2573,32 @@ int main(void)
asm volatile("vpgatherqd 0x7b(%ebp,%zmm7,8),%ymm6{%k1}");
asm volatile("vpgatherqq 0x7b(%ebp,%zmm7,8),%zmm6{%k1}");
+ /* AVX-512: Op code 0f 38 9a */
+
+ asm volatile("vfmsub132ps %xmm1, %xmm2, %xmm3");
+ asm volatile("vfmsub132ps %ymm1, %ymm2, %ymm3");
+ asm volatile("vfmsub132ps %zmm1, %zmm2, %zmm3");
+ asm volatile("vfmsub132ps 0x12345678(%eax,%ecx,8),%zmm2,%zmm3");
+
+ asm volatile("vfmsub132pd %xmm1, %xmm2, %xmm3");
+ asm volatile("vfmsub132pd %ymm1, %ymm2, %ymm3");
+ asm volatile("vfmsub132pd %zmm1, %zmm2, %zmm3");
+ asm volatile("vfmsub132pd 0x12345678(%eax,%ecx,8),%zmm2,%zmm3");
+
+ asm volatile("v4fmaddps (%eax), %zmm0, %zmm4");
+ asm volatile("v4fmaddps 0x12345678(%eax,%ecx,8),%zmm0,%zmm4");
+
+ /* AVX-512: Op code 0f 38 9b */
+
+ asm volatile("vfmsub132ss %xmm1, %xmm2, %xmm3");
+ asm volatile("vfmsub132ss 0x12345678(%eax,%ecx,8),%xmm2,%xmm3");
+
+ asm volatile("vfmsub132sd %xmm1, %xmm2, %xmm3");
+ asm volatile("vfmsub132sd 0x12345678(%eax,%ecx,8),%xmm2,%xmm3");
+
+ asm volatile("v4fmaddss (%eax), %xmm0, %xmm4");
+ asm volatile("v4fmaddss 0x12345678(%eax,%ecx,8),%xmm0,%xmm4");
+
/* AVX-512: Op code 0f 38 a0 */
asm volatile("vpscatterdd %zmm6,0x7b(%ebp,%zmm7,8){%k1}");
@@ -2082,6 +2619,32 @@ int main(void)
asm volatile("vscatterqps %ymm6,0x7b(%ebp,%zmm7,8){%k1}");
asm volatile("vscatterqpd %zmm6,0x7b(%ebp,%zmm7,8){%k1}");
+ /* AVX-512: Op code 0f 38 aa */
+
+ asm volatile("vfmsub213ps %xmm1, %xmm2, %xmm3");
+ asm volatile("vfmsub213ps %ymm1, %ymm2, %ymm3");
+ asm volatile("vfmsub213ps %zmm1, %zmm2, %zmm3");
+ asm volatile("vfmsub213ps 0x12345678(%eax,%ecx,8),%zmm2,%zmm3");
+
+ asm volatile("vfmsub213pd %xmm1, %xmm2, %xmm3");
+ asm volatile("vfmsub213pd %ymm1, %ymm2, %ymm3");
+ asm volatile("vfmsub213pd %zmm1, %zmm2, %zmm3");
+ asm volatile("vfmsub213pd 0x12345678(%eax,%ecx,8),%zmm2,%zmm3");
+
+ asm volatile("v4fnmaddps (%eax), %zmm0, %zmm4");
+ asm volatile("v4fnmaddps 0x12345678(%eax,%ecx,8),%zmm0,%zmm4");
+
+ /* AVX-512: Op code 0f 38 ab */
+
+ asm volatile("vfmsub213ss %xmm1, %xmm2, %xmm3");
+ asm volatile("vfmsub213ss 0x12345678(%eax,%ecx,8),%xmm2,%xmm3");
+
+ asm volatile("vfmsub213sd %xmm1, %xmm2, %xmm3");
+ asm volatile("vfmsub213sd 0x12345678(%eax,%ecx,8),%xmm2,%xmm3");
+
+ asm volatile("v4fnmaddss (%eax), %xmm0, %xmm4");
+ asm volatile("v4fnmaddss 0x12345678(%eax,%ecx,8),%xmm0,%xmm4");
+
/* AVX-512: Op code 0f 38 b4 */
asm volatile("vpmadd52luq %zmm4,%zmm5,%zmm6");
@@ -2120,6 +2683,44 @@ int main(void)
asm volatile("vrsqrt28ss %xmm5,%xmm6,%xmm7{%k7}");
asm volatile("vrsqrt28sd %xmm5,%xmm6,%xmm7{%k7}");
+ /* AVX-512: Op code 0f 38 cf */
+
+ asm volatile("gf2p8mulb %xmm1, %xmm3");
+ asm volatile("gf2p8mulb 0x12345678(%eax,%ecx,8),%xmm3");
+
+ asm volatile("vgf2p8mulb %xmm1, %xmm2, %xmm3");
+ asm volatile("vgf2p8mulb %ymm1, %ymm2, %ymm3");
+ asm volatile("vgf2p8mulb %zmm1, %zmm2, %zmm3");
+ asm volatile("vgf2p8mulb 0x12345678(%eax,%ecx,8),%zmm2,%zmm3");
+
+ /* AVX-512: Op code 0f 38 dc */
+
+ asm volatile("vaesenc %xmm1, %xmm2, %xmm3");
+ asm volatile("vaesenc %ymm1, %ymm2, %ymm3");
+ asm volatile("vaesenc %zmm1, %zmm2, %zmm3");
+ asm volatile("vaesenc 0x12345678(%eax,%ecx,8),%zmm2,%zmm3");
+
+ /* AVX-512: Op code 0f 38 dd */
+
+ asm volatile("vaesenclast %xmm1, %xmm2, %xmm3");
+ asm volatile("vaesenclast %ymm1, %ymm2, %ymm3");
+ asm volatile("vaesenclast %zmm1, %zmm2, %zmm3");
+ asm volatile("vaesenclast 0x12345678(%eax,%ecx,8),%zmm2,%zmm3");
+
+ /* AVX-512: Op code 0f 38 de */
+
+ asm volatile("vaesdec %xmm1, %xmm2, %xmm3");
+ asm volatile("vaesdec %ymm1, %ymm2, %ymm3");
+ asm volatile("vaesdec %zmm1, %zmm2, %zmm3");
+ asm volatile("vaesdec 0x12345678(%eax,%ecx,8),%zmm2,%zmm3");
+
+ /* AVX-512: Op code 0f 38 df */
+
+ asm volatile("vaesdeclast %xmm1, %xmm2, %xmm3");
+ asm volatile("vaesdeclast %ymm1, %ymm2, %ymm3");
+ asm volatile("vaesdeclast %zmm1, %zmm2, %zmm3");
+ asm volatile("vaesdeclast 0x12345678(%eax,%ecx,8),%zmm2,%zmm3");
+
/* AVX-512: Op code 0f 3a 03 */
asm volatile("valignd $0x12,%zmm5,%zmm6,%zmm7");
@@ -2239,6 +2840,12 @@ int main(void)
asm volatile("vshufi32x4 $0x12,%zmm5,%zmm6,%zmm7");
asm volatile("vshufi64x2 $0x12,%zmm5,%zmm6,%zmm7");
+ /* AVX-512: Op code 0f 3a 44 */
+
+ asm volatile("vpclmulqdq $0x12,%xmm1,%xmm2,%xmm3");
+ asm volatile("vpclmulqdq $0x12,%ymm1,%ymm2,%ymm3");
+ asm volatile("vpclmulqdq $0x12,%zmm1,%zmm2,%zmm3");
+
/* AVX-512: Op code 0f 3a 50 */
asm volatile("vrangeps $0x12,%zmm5,%zmm6,%zmm7");
@@ -2279,6 +2886,54 @@ int main(void)
asm volatile("vfpclassss $0x12,%xmm7,%k5");
asm volatile("vfpclasssd $0x12,%xmm7,%k5");
+ /* AVX-512: Op code 0f 3a 70 */
+
+ asm volatile("vpshldw $0x12,%xmm1,%xmm2,%xmm3");
+ asm volatile("vpshldw $0x12,%ymm1,%ymm2,%ymm3");
+ asm volatile("vpshldw $0x12,%zmm1,%zmm2,%zmm3");
+
+ /* AVX-512: Op code 0f 3a 71 */
+
+ asm volatile("vpshldd $0x12,%xmm1,%xmm2,%xmm3");
+ asm volatile("vpshldd $0x12,%ymm1,%ymm2,%ymm3");
+ asm volatile("vpshldd $0x12,%zmm1,%zmm2,%zmm3");
+
+ asm volatile("vpshldq $0x12,%xmm1,%xmm2,%xmm3");
+ asm volatile("vpshldq $0x12,%ymm1,%ymm2,%ymm3");
+ asm volatile("vpshldq $0x12,%zmm1,%zmm2,%zmm3");
+
+ /* AVX-512: Op code 0f 3a 72 */
+
+ asm volatile("vpshrdw $0x12,%xmm1,%xmm2,%xmm3");
+ asm volatile("vpshrdw $0x12,%ymm1,%ymm2,%ymm3");
+ asm volatile("vpshrdw $0x12,%zmm1,%zmm2,%zmm3");
+
+ /* AVX-512: Op code 0f 3a 73 */
+
+ asm volatile("vpshrdd $0x12,%xmm1,%xmm2,%xmm3");
+ asm volatile("vpshrdd $0x12,%ymm1,%ymm2,%ymm3");
+ asm volatile("vpshrdd $0x12,%zmm1,%zmm2,%zmm3");
+
+ asm volatile("vpshrdq $0x12,%xmm1,%xmm2,%xmm3");
+ asm volatile("vpshrdq $0x12,%ymm1,%ymm2,%ymm3");
+ asm volatile("vpshrdq $0x12,%zmm1,%zmm2,%zmm3");
+
+ /* AVX-512: Op code 0f 3a ce */
+
+ asm volatile("gf2p8affineqb $0x12,%xmm1,%xmm3");
+
+ asm volatile("vgf2p8affineqb $0x12,%xmm1,%xmm2,%xmm3");
+ asm volatile("vgf2p8affineqb $0x12,%ymm1,%ymm2,%ymm3");
+ asm volatile("vgf2p8affineqb $0x12,%zmm1,%zmm2,%zmm3");
+
+ /* AVX-512: Op code 0f 3a cf */
+
+ asm volatile("gf2p8affineinvqb $0x12,%xmm1,%xmm3");
+
+ asm volatile("vgf2p8affineinvqb $0x12,%xmm1,%xmm2,%xmm3");
+ asm volatile("vgf2p8affineinvqb $0x12,%ymm1,%ymm2,%ymm3");
+ asm volatile("vgf2p8affineinvqb $0x12,%zmm1,%zmm2,%zmm3");
+
/* AVX-512: Op code 0f 72 (Grp13) */
asm volatile("vprord $0x12,%zmm5,%zmm6");
diff --git a/tools/perf/arch/x86/util/event.c b/tools/perf/arch/x86/util/event.c
index d1044df7c0d7..ac45015cc6ba 100644
--- a/tools/perf/arch/x86/util/event.c
+++ b/tools/perf/arch/x86/util/event.c
@@ -18,8 +18,7 @@ int perf_event__synthesize_extra_kmaps(struct perf_tool *tool,
{
int rc = 0;
struct map *pos;
- struct map_groups *kmaps = &machine->kmaps;
- struct maps *maps = &kmaps->maps;
+ struct maps *kmaps = &machine->kmaps;
union perf_event *event = zalloc(sizeof(event->mmap) +
machine->id_hdr_size);
@@ -29,7 +28,7 @@ int perf_event__synthesize_extra_kmaps(struct perf_tool *tool,
return -1;
}
- maps__for_each_entry(maps, pos) {
+ maps__for_each_entry(kmaps, pos) {
struct kmap *kmap;
size_t size;
diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c
index 376dbf10ad64..f8b6ae557d8b 100644
--- a/tools/perf/builtin-diff.c
+++ b/tools/perf/builtin-diff.c
@@ -547,8 +547,8 @@ static int64_t block_cycles_diff_cmp(struct hist_entry *left,
if (!pairs_left && !pairs_right)
return 0;
- l = labs(left->diff.cycles);
- r = labs(right->diff.cycles);
+ l = llabs(left->diff.cycles);
+ r = llabs(right->diff.cycles);
return r - l;
}
@@ -646,7 +646,7 @@ static void compute_cycles_diff(struct hist_entry *he,
if (i >= he->block_info->num || i >= NUM_SPARKS)
break;
- val = labs(pair->block_info->cycles_spark[i] -
+ val = llabs(pair->block_info->cycles_spark[i] -
he->block_info->cycles_spark[i]);
update_spark_value(pair->diff.svals, NUM_SPARKS,
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index ab0f6e516b03..830d563de889 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -780,11 +780,6 @@ static size_t maps__fprintf_task(struct maps *maps, int indent, FILE *fp)
return printed;
}
-static int map_groups__fprintf_task(struct map_groups *mg, int indent, FILE *fp)
-{
- return maps__fprintf_task(&mg->maps, indent, fp);
-}
-
static void task__print_level(struct task *task, FILE *fp, int level)
{
struct thread *thread = task->thread;
@@ -795,7 +790,7 @@ static void task__print_level(struct task *task, FILE *fp, int level)
fprintf(fp, "%s\n", thread__comm_str(thread));
- map_groups__fprintf_task(thread->mg, comm_indent, fp);
+ maps__fprintf_task(thread->maps, comm_indent, fp);
if (!list_empty(&task->children)) {
list_for_each_entry(child, &task->children, list)
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index f86c5cce5b2c..e2406b291c1c 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -448,7 +448,7 @@ static int perf_evsel__check_attr(struct evsel *evsel,
"selected. Hence, no address to lookup the source line number.\n");
return -EINVAL;
}
- if (PRINT_FIELD(BRSTACKINSN) &&
+ if (PRINT_FIELD(BRSTACKINSN) && !allow_user_set &&
!(perf_evlist__combined_branch_type(session->evlist) &
PERF_SAMPLE_BRANCH_ANY)) {
pr_err("Display of branch stack assembler requested, but non all-branch filter set\n"
@@ -932,6 +932,48 @@ static int grab_bb(u8 *buffer, u64 start, u64 end,
return len;
}
+static int map__fprintf_srccode(struct map *map, u64 addr, FILE *fp, struct srccode_state *state)
+{
+ char *srcfile;
+ int ret = 0;
+ unsigned line;
+ int len;
+ char *srccode;
+
+ if (!map || !map->dso)
+ return 0;
+ srcfile = get_srcline_split(map->dso,
+ map__rip_2objdump(map, addr),
+ &line);
+ if (!srcfile)
+ return 0;
+
+ /* Avoid redundant printing */
+ if (state &&
+ state->srcfile &&
+ !strcmp(state->srcfile, srcfile) &&
+ state->line == line) {
+ free(srcfile);
+ return 0;
+ }
+
+ srccode = find_sourceline(srcfile, line, &len);
+ if (!srccode)
+ goto out_free_line;
+
+ ret = fprintf(fp, "|%-8d %.*s", line, len, srccode);
+
+ if (state) {
+ state->srcfile = srcfile;
+ state->line = line;
+ }
+ return ret;
+
+out_free_line:
+ free(srcfile);
+ return ret;
+}
+
static int print_srccode(struct thread *thread, u8 cpumode, uint64_t addr)
{
struct addr_location al;
@@ -1084,7 +1126,7 @@ static int perf_sample__fprintf_brstackinsn(struct perf_sample *sample,
insn++;
}
}
- if (off != (unsigned)len)
+ if (off != end - start)
printed += fprintf(fp, "\tmismatch of LBR data and executable\n");
}
diff --git a/tools/perf/tests/Build b/tools/perf/tests/Build
index e72accefd669..a3c595fba943 100644
--- a/tools/perf/tests/Build
+++ b/tools/perf/tests/Build
@@ -27,7 +27,7 @@ perf-y += wp.o
perf-y += task-exit.o
perf-y += sw-clock.o
perf-y += mmap-thread-lookup.o
-perf-y += thread-mg-share.o
+perf-y += thread-maps-share.o
perf-y += switch-tracking.o
perf-y += keep-tracking.o
perf-y += code-reading.o
@@ -52,7 +52,7 @@ perf-y += perf-hooks.o
perf-y += clang.o
perf-y += unit_number__scnprintf.o
perf-y += mem2node.o
-perf-y += map_groups.o
+perf-y += maps.o
perf-y += time-utils-test.o
$(OUTPUT)tests/llvm-src-base.c: tests/bpf-script-example.c tests/Build
diff --git a/tools/perf/tests/builtin-test.c b/tools/perf/tests/builtin-test.c
index 8b286e9b7549..7115aa32a51e 100644
--- a/tools/perf/tests/builtin-test.c
+++ b/tools/perf/tests/builtin-test.c
@@ -166,8 +166,8 @@ static struct test generic_tests[] = {
.func = test__mmap_thread_lookup,
},
{
- .desc = "Share thread mg",
- .func = test__thread_mg_share,
+ .desc = "Share thread maps",
+ .func = test__thread_maps_share,
},
{
.desc = "Sort output of hist entries",
@@ -297,8 +297,8 @@ static struct test generic_tests[] = {
.func = test__time_utils,
},
{
- .desc = "map_groups__merge_in",
- .func = test__map_groups__merge_in,
+ .desc = "maps__merge_in",
+ .func = test__maps__merge_in,
},
{
.func = NULL,
diff --git a/tools/perf/tests/code-reading.c b/tools/perf/tests/code-reading.c
index 1f017e1b2a55..6fe221d31f07 100644
--- a/tools/perf/tests/code-reading.c
+++ b/tools/perf/tests/code-reading.c
@@ -276,7 +276,7 @@ static int read_object_code(u64 addr, size_t len, u8 cpumode,
len = al.map->end - addr;
/* Read the object code using perf */
- ret_len = dso__data_read_offset(al.map->dso, thread->mg->machine,
+ ret_len = dso__data_read_offset(al.map->dso, thread->maps->machine,
al.addr, buf1, len);
if (ret_len != len) {
pr_debug("dso__data_read_offset failed\n");
diff --git a/tools/perf/tests/map_groups.c b/tools/perf/tests/maps.c
index 6b9f1cdcbe5b..edcbc70ff9d6 100644
--- a/tools/perf/tests/map_groups.c
+++ b/tools/perf/tests/maps.c
@@ -3,7 +3,7 @@
#include <linux/kernel.h>
#include "tests.h"
#include "map.h"
-#include "map_groups.h"
+#include "maps.h"
#include "dso.h"
#include "debug.h"
@@ -13,12 +13,12 @@ struct map_def {
u64 end;
};
-static int check_maps(struct map_def *merged, unsigned int size, struct map_groups *mg)
+static int check_maps(struct map_def *merged, unsigned int size, struct maps *maps)
{
struct map *map;
unsigned int i = 0;
- map_groups__for_each_entry(mg, map) {
+ maps__for_each_entry(maps, map) {
if (i > 0)
TEST_ASSERT_VAL("less maps expected", (map && i < size) || (!map && i == size));
@@ -33,9 +33,9 @@ static int check_maps(struct map_def *merged, unsigned int size, struct map_grou
return TEST_OK;
}
-int test__map_groups__merge_in(struct test *t __maybe_unused, int subtest __maybe_unused)
+int test__maps__merge_in(struct test *t __maybe_unused, int subtest __maybe_unused)
{
- struct map_groups mg;
+ struct maps maps;
unsigned int i;
struct map_def bpf_progs[] = {
{ "bpf_prog_1", 200, 300 },
@@ -64,7 +64,7 @@ int test__map_groups__merge_in(struct test *t __maybe_unused, int subtest __mayb
struct map *map_kcore1, *map_kcore2, *map_kcore3;
int ret;
- map_groups__init(&mg, NULL);
+ maps__init(&maps, NULL);
for (i = 0; i < ARRAY_SIZE(bpf_progs); i++) {
struct map *map;
@@ -74,7 +74,7 @@ int test__map_groups__merge_in(struct test *t __maybe_unused, int subtest __mayb
map->start = bpf_progs[i].start;
map->end = bpf_progs[i].end;
- map_groups__insert(&mg, map);
+ maps__insert(&maps, map);
map__put(map);
}
@@ -99,22 +99,22 @@ int test__map_groups__merge_in(struct test *t __maybe_unused, int subtest __mayb
map_kcore3->start = 880;
map_kcore3->end = 1100;
- ret = map_groups__merge_in(&mg, map_kcore1);
+ ret = maps__merge_in(&maps, map_kcore1);
TEST_ASSERT_VAL("failed to merge map", !ret);
- ret = check_maps(merged12, ARRAY_SIZE(merged12), &mg);
+ ret = check_maps(merged12, ARRAY_SIZE(merged12), &maps);
TEST_ASSERT_VAL("merge check failed", !ret);
- ret = map_groups__merge_in(&mg, map_kcore2);
+ ret = maps__merge_in(&maps, map_kcore2);
TEST_ASSERT_VAL("failed to merge map", !ret);
- ret = check_maps(merged12, ARRAY_SIZE(merged12), &mg);
+ ret = check_maps(merged12, ARRAY_SIZE(merged12), &maps);
TEST_ASSERT_VAL("merge check failed", !ret);
- ret = map_groups__merge_in(&mg, map_kcore3);
+ ret = maps__merge_in(&maps, map_kcore3);
TEST_ASSERT_VAL("failed to merge map", !ret);
- ret = check_maps(merged3, ARRAY_SIZE(merged3), &mg);
+ ret = check_maps(merged3, ARRAY_SIZE(merged3), &maps);
TEST_ASSERT_VAL("merge check failed", !ret);
return TEST_OK;
}
diff --git a/tools/perf/tests/tests.h b/tools/perf/tests/tests.h
index 9837b6e93023..25aea387e2bf 100644
--- a/tools/perf/tests/tests.h
+++ b/tools/perf/tests/tests.h
@@ -73,7 +73,7 @@ int test__dwarf_unwind(struct test *test, int subtest);
int test__expr(struct test *test, int subtest);
int test__hists_filter(struct test *test, int subtest);
int test__mmap_thread_lookup(struct test *test, int subtest);
-int test__thread_mg_share(struct test *test, int subtest);
+int test__thread_maps_share(struct test *test, int subtest);
int test__hists_output(struct test *test, int subtest);
int test__hists_cumulate(struct test *test, int subtest);
int test__switch_tracking(struct test *test, int subtest);
@@ -107,7 +107,7 @@ const char *test__clang_subtest_get_desc(int subtest);
int test__clang_subtest_get_nr(void);
int test__unit_number__scnprint(struct test *test, int subtest);
int test__mem2node(struct test *t, int subtest);
-int test__map_groups__merge_in(struct test *t, int subtest);
+int test__maps__merge_in(struct test *t, int subtest);
int test__time_utils(struct test *t, int subtest);
bool test__bp_signal_is_supported(void);
diff --git a/tools/perf/tests/thread-mg-share.c b/tools/perf/tests/thread-maps-share.c
index cbac71716dec..9371484973f2 100644
--- a/tools/perf/tests/thread-mg-share.c
+++ b/tools/perf/tests/thread-maps-share.c
@@ -4,7 +4,7 @@
#include "thread.h"
#include "debug.h"
-int test__thread_mg_share(struct test *test __maybe_unused, int subtest __maybe_unused)
+int test__thread_maps_share(struct test *test __maybe_unused, int subtest __maybe_unused)
{
struct machines machines;
struct machine *machine;
@@ -12,16 +12,16 @@ int test__thread_mg_share(struct test *test __maybe_unused, int subtest __maybe_
/* thread group */
struct thread *leader;
struct thread *t1, *t2, *t3;
- struct map_groups *mg;
+ struct maps *maps;
/* other process */
struct thread *other, *other_leader;
- struct map_groups *other_mg;
+ struct maps *other_maps;
/*
* This test create 2 processes abstractions (struct thread)
* with several threads and checks they properly share and
- * maintain map groups info (struct map_groups).
+ * maintain maps info (struct maps).
*
* thread group (pid: 0, tids: 0, 1, 2, 3)
* other group (pid: 4, tids: 4, 5)
@@ -42,17 +42,17 @@ int test__thread_mg_share(struct test *test __maybe_unused, int subtest __maybe_
TEST_ASSERT_VAL("failed to create threads",
leader && t1 && t2 && t3 && other);
- mg = leader->mg;
- TEST_ASSERT_EQUAL("wrong refcnt", refcount_read(&mg->refcnt), 4);
+ maps = leader->maps;
+ TEST_ASSERT_EQUAL("wrong refcnt", refcount_read(&maps->refcnt), 4);
- /* test the map groups pointer is shared */
- TEST_ASSERT_VAL("map groups don't match", mg == t1->mg);
- TEST_ASSERT_VAL("map groups don't match", mg == t2->mg);
- TEST_ASSERT_VAL("map groups don't match", mg == t3->mg);
+ /* test the maps pointer is shared */
+ TEST_ASSERT_VAL("maps don't match", maps == t1->maps);
+ TEST_ASSERT_VAL("maps don't match", maps == t2->maps);
+ TEST_ASSERT_VAL("maps don't match", maps == t3->maps);
/*
* Verify the other leader was created by previous call.
- * It should have shared map groups with no change in
+ * It should have shared maps with no change in
* refcnt.
*/
other_leader = machine__find_thread(machine, 4, 4);
@@ -70,26 +70,26 @@ int test__thread_mg_share(struct test *test __maybe_unused, int subtest __maybe_
machine__remove_thread(machine, other);
machine__remove_thread(machine, other_leader);
- other_mg = other->mg;
- TEST_ASSERT_EQUAL("wrong refcnt", refcount_read(&other_mg->refcnt), 2);
+ other_maps = other->maps;
+ TEST_ASSERT_EQUAL("wrong refcnt", refcount_read(&other_maps->refcnt), 2);
- TEST_ASSERT_VAL("map groups don't match", other_mg == other_leader->mg);
+ TEST_ASSERT_VAL("maps don't match", other_maps == other_leader->maps);
/* release thread group */
thread__put(leader);
- TEST_ASSERT_EQUAL("wrong refcnt", refcount_read(&mg->refcnt), 3);
+ TEST_ASSERT_EQUAL("wrong refcnt", refcount_read(&maps->refcnt), 3);
thread__put(t1);
- TEST_ASSERT_EQUAL("wrong refcnt", refcount_read(&mg->refcnt), 2);
+ TEST_ASSERT_EQUAL("wrong refcnt", refcount_read(&maps->refcnt), 2);
thread__put(t2);
- TEST_ASSERT_EQUAL("wrong refcnt", refcount_read(&mg->refcnt), 1);
+ TEST_ASSERT_EQUAL("wrong refcnt", refcount_read(&maps->refcnt), 1);
thread__put(t3);
/* release other group */
thread__put(other_leader);
- TEST_ASSERT_EQUAL("wrong refcnt", refcount_read(&other_mg->refcnt), 1);
+ TEST_ASSERT_EQUAL("wrong refcnt", refcount_read(&other_maps->refcnt), 1);
thread__put(other);
diff --git a/tools/perf/tests/vmlinux-kallsyms.c b/tools/perf/tests/vmlinux-kallsyms.c
index ff649078da9a..193b7c91b4e2 100644
--- a/tools/perf/tests/vmlinux-kallsyms.c
+++ b/tools/perf/tests/vmlinux-kallsyms.c
@@ -190,10 +190,9 @@ next_pair:
* so use the short name, less descriptive but the same ("[kernel]" in
* both cases.
*/
- pair = map_groups__find_by_name(&kallsyms.kmaps,
- (map->dso->kernel ?
- map->dso->short_name :
- map->dso->name));
+ pair = maps__find_by_name(&kallsyms.kmaps, (map->dso->kernel ?
+ map->dso->short_name :
+ map->dso->name));
if (pair) {
pair->priv = 1;
} else {
@@ -213,7 +212,7 @@ next_pair:
mem_start = vmlinux_map->unmap_ip(vmlinux_map, map->start);
mem_end = vmlinux_map->unmap_ip(vmlinux_map, map->end);
- pair = map_groups__find(&kallsyms.kmaps, mem_start);
+ pair = maps__find(&kallsyms.kmaps, mem_start);
if (pair == NULL || pair->priv)
continue;
diff --git a/tools/perf/ui/browsers/annotate.c b/tools/perf/ui/browsers/annotate.c
index 992705c78bd0..badbddbb30f8 100644
--- a/tools/perf/ui/browsers/annotate.c
+++ b/tools/perf/ui/browsers/annotate.c
@@ -430,7 +430,7 @@ static bool annotate_browser__callq(struct annotate_browser *browser,
return true;
}
- target_ms.mg = ms->mg;
+ target_ms.maps = ms->maps;
target_ms.map = ms->map;
target_ms.sym = dl->ops.target.sym;
pthread_mutex_unlock(&notes->lock);
diff --git a/tools/perf/ui/stdio/hist.c b/tools/perf/ui/stdio/hist.c
index 132056c7d5b7..2ab2af4d4849 100644
--- a/tools/perf/ui/stdio/hist.c
+++ b/tools/perf/ui/stdio/hist.c
@@ -8,7 +8,7 @@
#include "../../util/event.h"
#include "../../util/hist.h"
#include "../../util/map.h"
-#include "../../util/map_groups.h"
+#include "../../util/maps.h"
#include "../../util/symbol.h"
#include "../../util/sort.h"
#include "../../util/evsel.h"
@@ -885,7 +885,7 @@ size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows,
}
if (h->ms.map == NULL && verbose > 1) {
- map_groups__fprintf(h->thread->mg, fp);
+ maps__fprintf(h->thread->maps, fp);
fprintf(fp, "%.10s end\n", graph_dotted_line);
}
}
diff --git a/tools/perf/util/Build b/tools/perf/util/Build
index b8e05a147b2b..07da6c790b63 100644
--- a/tools/perf/util/Build
+++ b/tools/perf/util/Build
@@ -49,6 +49,7 @@ perf-y += header.o
perf-y += callchain.o
perf-y += values.o
perf-y += debug.o
+perf-y += fncache.o
perf-y += machine.o
perf-y += map.o
perf-y += pstack.o
@@ -76,6 +77,7 @@ perf-y += sort.o
perf-y += hist.o
perf-y += util.o
perf-y += cpumap.o
+perf-y += affinity.o
perf-y += cputopo.o
perf-y += cgroup.o
perf-y += target.o
diff --git a/tools/perf/util/affinity.c b/tools/perf/util/affinity.c
new file mode 100644
index 000000000000..a5e31f826828
--- /dev/null
+++ b/tools/perf/util/affinity.c
@@ -0,0 +1,73 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Manage affinity to optimize IPIs inside the kernel perf API. */
+#define _GNU_SOURCE 1
+#include <sched.h>
+#include <stdlib.h>
+#include <linux/bitmap.h>
+#include <linux/zalloc.h>
+#include "perf.h"
+#include "cpumap.h"
+#include "affinity.h"
+
+static int get_cpu_set_size(void)
+{
+ int sz = cpu__max_cpu() + 8 - 1;
+ /*
+ * sched_getaffinity doesn't like masks smaller than the kernel.
+ * Hopefully that's big enough.
+ */
+ if (sz < 4096)
+ sz = 4096;
+ return sz / 8;
+}
+
+int affinity__setup(struct affinity *a)
+{
+ int cpu_set_size = get_cpu_set_size();
+
+ a->orig_cpus = bitmap_alloc(cpu_set_size * 8);
+ if (!a->orig_cpus)
+ return -1;
+ sched_getaffinity(0, cpu_set_size, (cpu_set_t *)a->orig_cpus);
+ a->sched_cpus = bitmap_alloc(cpu_set_size * 8);
+ if (!a->sched_cpus) {
+ zfree(&a->orig_cpus);
+ return -1;
+ }
+ bitmap_zero((unsigned long *)a->sched_cpus, cpu_set_size);
+ a->changed = false;
+ return 0;
+}
+
+/*
+ * perf_event_open does an IPI internally to the target CPU.
+ * It is more efficient to change perf's affinity to the target
+ * CPU and then set up all events on that CPU, so we amortize
+ * CPU communication.
+ */
+void affinity__set(struct affinity *a, int cpu)
+{
+ int cpu_set_size = get_cpu_set_size();
+
+ if (cpu == -1)
+ return;
+ a->changed = true;
+ set_bit(cpu, a->sched_cpus);
+ /*
+ * We ignore errors because affinity is just an optimization.
+ * This could happen for example with isolated CPUs or cpusets.
+ * In this case the IPIs inside the kernel's perf API still work.
+ */
+ sched_setaffinity(0, cpu_set_size, (cpu_set_t *)a->sched_cpus);
+ clear_bit(cpu, a->sched_cpus);
+}
+
+void affinity__cleanup(struct affinity *a)
+{
+ int cpu_set_size = get_cpu_set_size();
+
+ if (a->changed)
+ sched_setaffinity(0, cpu_set_size, (cpu_set_t *)a->orig_cpus);
+ zfree(&a->sched_cpus);
+ zfree(&a->orig_cpus);
+}
diff --git a/tools/perf/util/affinity.h b/tools/perf/util/affinity.h
new file mode 100644
index 000000000000..0ad6a18ef20c
--- /dev/null
+++ b/tools/perf/util/affinity.h
@@ -0,0 +1,17 @@
+// SPDX-License-Identifier: GPL-2.0
+#ifndef PERF_AFFINITY_H
+#define PERF_AFFINITY_H 1
+
+#include <stdbool.h>
+
+struct affinity {
+ unsigned long *orig_cpus;
+ unsigned long *sched_cpus;
+ bool changed;
+};
+
+void affinity__cleanup(struct affinity *a);
+void affinity__set(struct affinity *a, int cpu);
+int affinity__setup(struct affinity *a);
+
+#endif // PERF_AFFINITY_H
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index 5ea9a4534848..f5e77ed237e8 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -23,7 +23,7 @@
#include "dso.h"
#include "env.h"
#include "map.h"
-#include "map_groups.h"
+#include "maps.h"
#include "symbol.h"
#include "srcline.h"
#include "units.h"
@@ -271,7 +271,7 @@ static int call__parse(struct arch *arch, struct ins_operands *ops, struct map_s
find_target:
target.addr = map__objdump_2mem(map, ops->target.addr);
- if (map_groups__find_ams(ms->mg, &target) == 0 &&
+ if (maps__find_ams(ms->maps, &target) == 0 &&
map__rip_2objdump(target.ms.map, map->map_ip(target.ms.map, target.addr)) == ops->target.addr)
ops->target.sym = target.ms.sym;
@@ -391,7 +391,7 @@ static int jump__parse(struct arch *arch, struct ins_operands *ops, struct map_s
* Actual navigation will come next, with further understanding of how
* the symbol searching and disassembly should be done.
*/
- if (map_groups__find_ams(ms->mg, &target) == 0 &&
+ if (maps__find_ams(ms->maps, &target) == 0 &&
map__rip_2objdump(target.ms.map, map->map_ip(target.ms.map, target.addr)) == ops->target.addr)
ops->target.sym = target.ms.sym;
@@ -1545,7 +1545,7 @@ static int symbol__parse_objdump_line(struct symbol *sym,
.ms = { .map = map, },
};
- if (!map_groups__find_ams(args->ms.mg, &target) &&
+ if (!maps__find_ams(args->ms.maps, &target) &&
target.ms.sym->start == target.al_addr)
dl->ops.target.sym = target.ms.sym;
}
diff --git a/tools/perf/util/bpf-event.c b/tools/perf/util/bpf-event.c
index f7ed5d122e22..a3207d900339 100644
--- a/tools/perf/util/bpf-event.c
+++ b/tools/perf/util/bpf-event.c
@@ -52,9 +52,7 @@ static int machine__process_bpf_event_load(struct machine *machine,
for (i = 0; i < info_linear->info.nr_jited_ksyms; i++) {
u64 *addrs = (u64 *)(uintptr_t)(info_linear->info.jited_ksyms);
u64 addr = addrs[i];
- struct map *map;
-
- map = map_groups__find(&machine->kmaps, addr);
+ struct map *map = maps__find(&machine->kmaps, addr);
if (map) {
map->dso->binary_type = DSO_BINARY_TYPE__BPF_PROG_INFO;
diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c
index 5cefce33b66b..818aa4efd386 100644
--- a/tools/perf/util/callchain.c
+++ b/tools/perf/util/callchain.c
@@ -1106,7 +1106,7 @@ int hist_entry__append_callchain(struct hist_entry *he, struct perf_sample *samp
int fill_callchain_info(struct addr_location *al, struct callchain_cursor_node *node,
bool hide_unresolved)
{
- al->mg = node->ms.mg;
+ al->maps = node->ms.maps;
al->map = node->ms.map;
al->sym = node->ms.sym;
al->srcline = node->srcline;
@@ -1119,8 +1119,8 @@ int fill_callchain_info(struct addr_location *al, struct callchain_cursor_node *
goto out;
}
- if (al->mg == &al->mg->machine->kmaps) {
- if (machine__is_host(al->mg->machine)) {
+ if (al->maps == &al->maps->machine->kmaps) {
+ if (machine__is_host(al->maps->machine)) {
al->cpumode = PERF_RECORD_MISC_KERNEL;
al->level = 'k';
} else {
@@ -1128,7 +1128,7 @@ int fill_callchain_info(struct addr_location *al, struct callchain_cursor_node *
al->level = 'g';
}
} else {
- if (machine__is_host(al->mg->machine)) {
+ if (machine__is_host(al->maps->machine)) {
al->cpumode = PERF_RECORD_MISC_USER;
al->level = '.';
} else if (perf_guest) {
diff --git a/tools/perf/util/cs-etm.c b/tools/perf/util/cs-etm.c
index f5f855fff412..5471045ebf5c 100644
--- a/tools/perf/util/cs-etm.c
+++ b/tools/perf/util/cs-etm.c
@@ -2569,7 +2569,7 @@ int cs_etm__process_auxtrace_info(union perf_event *event,
if (err)
goto err_delete_thread;
- if (thread__init_map_groups(etm->unknown_thread, etm->machine)) {
+ if (thread__init_maps(etm->unknown_thread, etm->machine)) {
err = -ENOMEM;
goto err_delete_thread;
}
diff --git a/tools/perf/util/db-export.c b/tools/perf/util/db-export.c
index d029faf9fc9f..db7447154622 100644
--- a/tools/perf/util/db-export.c
+++ b/tools/perf/util/db-export.c
@@ -181,7 +181,7 @@ static int db_ids_from_al(struct db_export *dbe, struct addr_location *al,
if (al->map) {
struct dso *dso = al->map->dso;
- err = db_export__dso(dbe, dso, al->mg->machine);
+ err = db_export__dso(dbe, dso, al->maps->machine);
if (err)
return err;
*dso_db_id = dso->db_id;
@@ -251,7 +251,7 @@ static struct call_path *call_path_from_sample(struct db_export *dbe,
*/
al.sym = node->ms.sym;
al.map = node->ms.map;
- al.mg = thread->mg;
+ al.maps = thread->maps;
al.addr = node->ip;
if (al.map && !al.sym)
@@ -360,13 +360,13 @@ int db_export__sample(struct db_export *dbe, union perf_event *event,
if (err)
return err;
- err = db_export__machine(dbe, al->mg->machine);
+ err = db_export__machine(dbe, al->maps->machine);
if (err)
return err;
- main_thread = thread__main_thread(al->mg->machine, thread);
+ main_thread = thread__main_thread(al->maps->machine, thread);
- err = db_export__threads(dbe, thread, main_thread, al->mg->machine, &comm);
+ err = db_export__threads(dbe, thread, main_thread, al->maps->machine, &comm);
if (err)
goto out_put;
@@ -380,7 +380,7 @@ int db_export__sample(struct db_export *dbe, union perf_event *event,
goto out_put;
if (dbe->cpr) {
- struct call_path *cp = call_path_from_sample(dbe, al->mg->machine,
+ struct call_path *cp = call_path_from_sample(dbe, al->maps->machine,
thread, sample,
evsel);
if (cp) {
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
index 0141b26bae47..c5447ff516a2 100644
--- a/tools/perf/util/event.c
+++ b/tools/perf/util/event.c
@@ -457,11 +457,11 @@ int perf_event__process(struct perf_tool *tool __maybe_unused,
struct map *thread__find_map(struct thread *thread, u8 cpumode, u64 addr,
struct addr_location *al)
{
- struct map_groups *mg = thread->mg;
- struct machine *machine = mg->machine;
+ struct maps *maps = thread->maps;
+ struct machine *machine = maps->machine;
bool load_map = false;
- al->mg = mg;
+ al->maps = maps;
al->thread = thread;
al->addr = addr;
al->cpumode = cpumode;
@@ -474,13 +474,13 @@ struct map *thread__find_map(struct thread *thread, u8 cpumode, u64 addr,
if (cpumode == PERF_RECORD_MISC_KERNEL && perf_host) {
al->level = 'k';
- al->mg = mg = &machine->kmaps;
+ al->maps = maps = &machine->kmaps;
load_map = true;
} else if (cpumode == PERF_RECORD_MISC_USER && perf_host) {
al->level = '.';
} else if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL && perf_guest) {
al->level = 'g';
- al->mg = mg = &machine->kmaps;
+ al->maps = maps = &machine->kmaps;
load_map = true;
} else if (cpumode == PERF_RECORD_MISC_GUEST_USER && perf_guest) {
al->level = 'u';
@@ -500,7 +500,7 @@ struct map *thread__find_map(struct thread *thread, u8 cpumode, u64 addr,
return NULL;
}
- al->map = map_groups__find(mg, al->addr);
+ al->map = maps__find(maps, al->addr);
if (al->map != NULL) {
/*
* Kernel maps might be changed when loading symbols so loading
@@ -523,7 +523,7 @@ struct map *thread__find_map_fb(struct thread *thread, u8 cpumode, u64 addr,
struct addr_location *al)
{
struct map *map = thread__find_map(thread, cpumode, addr, al);
- struct machine *machine = thread->mg->machine;
+ struct machine *machine = thread->maps->machine;
u8 addr_cpumode = machine__addr_cpumode(machine, cpumode, addr);
if (map || addr_cpumode == cpumode)
diff --git a/tools/perf/util/fncache.c b/tools/perf/util/fncache.c
new file mode 100644
index 000000000000..6225cbc52310
--- /dev/null
+++ b/tools/perf/util/fncache.c
@@ -0,0 +1,63 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Manage a cache of file names' existence */
+#include <stdlib.h>
+#include <unistd.h>
+#include <string.h>
+#include <linux/list.h>
+#include "fncache.h"
+
+struct fncache {
+ struct hlist_node nd;
+ bool res;
+ char name[];
+};
+
+#define FNHSIZE 61
+
+static struct hlist_head fncache_hash[FNHSIZE];
+
+unsigned shash(const unsigned char *s)
+{
+ unsigned h = 0;
+ while (*s)
+ h = 65599 * h + *s++;
+ return h ^ (h >> 16);
+}
+
+static bool lookup_fncache(const char *name, bool *res)
+{
+ int h = shash((const unsigned char *)name) % FNHSIZE;
+ struct fncache *n;
+
+ hlist_for_each_entry(n, &fncache_hash[h], nd) {
+ if (!strcmp(n->name, name)) {
+ *res = n->res;
+ return true;
+ }
+ }
+ return false;
+}
+
+static void update_fncache(const char *name, bool res)
+{
+ struct fncache *n = malloc(sizeof(struct fncache) + strlen(name) + 1);
+ int h = shash((const unsigned char *)name) % FNHSIZE;
+
+ if (!n)
+ return;
+ strcpy(n->name, name);
+ n->res = res;
+ hlist_add_head(&n->nd, &fncache_hash[h]);
+}
+
+/* No LRU, only use when bounded in some other way. */
+bool file_available(const char *name)
+{
+ bool res;
+
+ if (lookup_fncache(name, &res))
+ return res;
+ res = access(name, R_OK) == 0;
+ update_fncache(name, res);
+ return res;
+}
diff --git a/tools/perf/util/fncache.h b/tools/perf/util/fncache.h
new file mode 100644
index 000000000000..fe020beaefb1
--- /dev/null
+++ b/tools/perf/util/fncache.h
@@ -0,0 +1,7 @@
+#ifndef _FCACHE_H
+#define _FCACHE_H 1
+
+unsigned shash(const unsigned char *s);
+bool file_available(const char *name);
+
+#endif
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index 0a8d72ae93ca..ca5a8f4d007e 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -692,7 +692,7 @@ __hists__add_entry(struct hists *hists,
.ino = ns ? ns->link_info[CGROUP_NS_INDEX].ino : 0,
},
.ms = {
- .mg = al->mg,
+ .maps = al->maps,
.map = al->map,
.sym = al->sym,
},
@@ -760,7 +760,7 @@ struct hist_entry *hists__add_entry_block(struct hists *hists,
.block_info = block_info,
.hists = hists,
.ms = {
- .mg = al->mg,
+ .maps = al->maps,
.map = al->map,
.sym = al->sym,
},
@@ -895,7 +895,7 @@ iter_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
if (iter->curr >= iter->total)
return 0;
- al->mg = bi[i].to.ms.mg;
+ al->maps = bi[i].to.ms.maps;
al->map = bi[i].to.ms.map;
al->sym = bi[i].to.ms.sym;
al->addr = bi[i].to.addr;
@@ -1072,7 +1072,7 @@ iter_add_next_cumulative_entry(struct hist_entry_iter *iter,
.comm = thread__comm(al->thread),
.ip = al->addr,
.ms = {
- .mg = al->mg,
+ .maps = al->maps,
.map = al->map,
.sym = al->sym,
},
diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
index 409afc611be9..33cf8928cf05 100644
--- a/tools/perf/util/intel-pt.c
+++ b/tools/perf/util/intel-pt.c
@@ -3296,7 +3296,7 @@ int intel_pt_process_auxtrace_info(union perf_event *event,
err = thread__set_comm(pt->unknown_thread, "unknown", 0);
if (err)
goto err_delete_thread;
- if (thread__init_map_groups(pt->unknown_thread, pt->machine)) {
+ if (thread__init_maps(pt->unknown_thread, pt->machine)) {
err = -ENOMEM;
goto err_delete_thread;
}
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index e2a312c649f0..416d174d223c 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -86,7 +86,7 @@ int machine__init(struct machine *machine, const char *root_dir, pid_t pid)
int err = -ENOMEM;
memset(machine, 0, sizeof(*machine));
- map_groups__init(&machine->kmaps, machine);
+ maps__init(&machine->kmaps, machine);
RB_CLEAR_NODE(&machine->rb_node);
dsos__init(&machine->dsos);
@@ -217,7 +217,7 @@ void machine__exit(struct machine *machine)
return;
machine__destroy_kernel_maps(machine);
- map_groups__exit(&machine->kmaps);
+ maps__exit(&machine->kmaps);
dsos__exit(&machine->dsos);
machine__exit_vdso(machine);
zfree(&machine->root_dir);
@@ -412,28 +412,28 @@ static void machine__update_thread_pid(struct machine *machine,
if (!leader)
goto out_err;
- if (!leader->mg)
- leader->mg = map_groups__new(machine);
+ if (!leader->maps)
+ leader->maps = maps__new(machine);
- if (!leader->mg)
+ if (!leader->maps)
goto out_err;
- if (th->mg == leader->mg)
+ if (th->maps == leader->maps)
return;
- if (th->mg) {
+ if (th->maps) {
/*
* Maps are created from MMAP events which provide the pid and
* tid. Consequently there never should be any maps on a thread
* with an unknown pid. Just print an error if there are.
*/
- if (!map_groups__empty(th->mg))
+ if (!maps__empty(th->maps))
pr_err("Discarding thread maps for %d:%d\n",
th->pid_, th->tid);
- map_groups__put(th->mg);
+ maps__put(th->maps);
}
- th->mg = map_groups__get(leader->mg);
+ th->maps = maps__get(leader->maps);
out_put:
thread__put(leader);
return;
@@ -536,14 +536,13 @@ static struct thread *____machine__findnew_thread(struct machine *machine,
rb_insert_color_cached(&th->rb_node, &threads->entries, leftmost);
/*
- * We have to initialize map_groups separately
- * after rb tree is updated.
+ * We have to initialize maps separately after rb tree is updated.
*
* The reason is that we call machine__findnew_thread
- * within thread__init_map_groups to find the thread
+ * within thread__init_maps to find the thread
* leader and that would screwed the rb tree.
*/
- if (thread__init_map_groups(th, machine)) {
+ if (thread__init_maps(th, machine)) {
rb_erase_cached(&th->rb_node, &threads->entries);
RB_CLEAR_NODE(&th->rb_node);
thread__put(th);
@@ -724,9 +723,8 @@ static int machine__process_ksymbol_register(struct machine *machine,
struct perf_sample *sample __maybe_unused)
{
struct symbol *sym;
- struct map *map;
+ struct map *map = maps__find(&machine->kmaps, event->ksymbol.addr);
- map = map_groups__find(&machine->kmaps, event->ksymbol.addr);
if (!map) {
map = dso__new_map(event->ksymbol.name);
if (!map)
@@ -734,7 +732,7 @@ static int machine__process_ksymbol_register(struct machine *machine,
map->start = event->ksymbol.addr;
map->end = map->start + event->ksymbol.len;
- map_groups__insert(&machine->kmaps, map);
+ maps__insert(&machine->kmaps, map);
}
sym = symbol__new(map->map_ip(map, map->start),
@@ -752,9 +750,9 @@ static int machine__process_ksymbol_unregister(struct machine *machine,
{
struct map *map;
- map = map_groups__find(&machine->kmaps, event->ksymbol.addr);
+ map = maps__find(&machine->kmaps, event->ksymbol.addr);
if (map)
- map_groups__remove(&machine->kmaps, map);
+ maps__remove(&machine->kmaps, map);
return 0;
}
@@ -790,9 +788,9 @@ static struct map *machine__addnew_module_map(struct machine *machine, u64 start
if (map == NULL)
goto out;
- map_groups__insert(&machine->kmaps, map);
+ maps__insert(&machine->kmaps, map);
- /* Put the map here because map_groups__insert alread got it */
+ /* Put the map here because maps__insert alread got it */
map__put(map);
out:
/* put the dso here, corresponding to machine__findnew_module_dso */
@@ -977,7 +975,7 @@ int machine__create_extra_kernel_map(struct machine *machine,
kmap->kmaps = &machine->kmaps;
strlcpy(kmap->name, xm->name, KMAP_NAME_LEN);
- map_groups__insert(&machine->kmaps, map);
+ maps__insert(&machine->kmaps, map);
pr_debug2("Added extra kernel map %s %" PRIx64 "-%" PRIx64 "\n",
kmap->name, map->start, map->end);
@@ -1022,8 +1020,7 @@ static u64 find_entry_trampoline(struct dso *dso)
int machine__map_x86_64_entry_trampolines(struct machine *machine,
struct dso *kernel)
{
- struct map_groups *kmaps = &machine->kmaps;
- struct maps *maps = &kmaps->maps;
+ struct maps *kmaps = &machine->kmaps;
int nr_cpus_avail, cpu;
bool found = false;
struct map *map;
@@ -1033,14 +1030,14 @@ int machine__map_x86_64_entry_trampolines(struct machine *machine,
* In the vmlinux case, pgoff is a virtual address which must now be
* mapped to a vmlinux offset.
*/
- maps__for_each_entry(maps, map) {
+ maps__for_each_entry(kmaps, map) {
struct kmap *kmap = __map__kmap(map);
struct map *dest_map;
if (!kmap || !is_entry_trampoline(kmap->name))
continue;
- dest_map = map_groups__find(kmaps, map->pgoff);
+ dest_map = maps__find(kmaps, map->pgoff);
if (dest_map != map)
map->pgoff = dest_map->map_ip(dest_map, map->pgoff);
found = true;
@@ -1102,7 +1099,7 @@ __machine__create_kernel_maps(struct machine *machine, struct dso *kernel)
return -1;
kmap->kmaps = &machine->kmaps;
- map_groups__insert(&machine->kmaps, map);
+ maps__insert(&machine->kmaps, map);
return 0;
}
@@ -1116,7 +1113,7 @@ void machine__destroy_kernel_maps(struct machine *machine)
return;
kmap = map__kmap(map);
- map_groups__remove(&machine->kmaps, map);
+ maps__remove(&machine->kmaps, map);
if (kmap && kmap->ref_reloc_sym) {
zfree((char **)&kmap->ref_reloc_sym->name);
zfree(&kmap->ref_reloc_sym);
@@ -1211,7 +1208,7 @@ int machine__load_kallsyms(struct machine *machine, const char *filename)
* kernel, with modules between them, fixup the end of all
* sections.
*/
- map_groups__fixup_end(&machine->kmaps);
+ maps__fixup_end(&machine->kmaps);
}
return ret;
@@ -1262,11 +1259,10 @@ static bool is_kmod_dso(struct dso *dso)
dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE;
}
-static int map_groups__set_module_path(struct map_groups *mg, const char *path,
- struct kmod_path *m)
+static int maps__set_module_path(struct maps *maps, const char *path, struct kmod_path *m)
{
char *long_name;
- struct map *map = map_groups__find_by_name(mg, m->name);
+ struct map *map = maps__find_by_name(maps, m->name);
if (map == NULL)
return 0;
@@ -1290,8 +1286,7 @@ static int map_groups__set_module_path(struct map_groups *mg, const char *path,
return 0;
}
-static int map_groups__set_modules_path_dir(struct map_groups *mg,
- const char *dir_name, int depth)
+static int maps__set_modules_path_dir(struct maps *maps, const char *dir_name, int depth)
{
struct dirent *dent;
DIR *dir = opendir(dir_name);
@@ -1323,8 +1318,7 @@ static int map_groups__set_modules_path_dir(struct map_groups *mg,
continue;
}
- ret = map_groups__set_modules_path_dir(mg, path,
- depth + 1);
+ ret = maps__set_modules_path_dir(maps, path, depth + 1);
if (ret < 0)
goto out;
} else {
@@ -1335,7 +1329,7 @@ static int map_groups__set_modules_path_dir(struct map_groups *mg,
goto out;
if (m.kmod)
- ret = map_groups__set_module_path(mg, path, &m);
+ ret = maps__set_module_path(maps, path, &m);
zfree(&m.name);
@@ -1362,7 +1356,7 @@ static int machine__set_modules_path(struct machine *machine)
machine->root_dir, version);
free(version);
- return map_groups__set_modules_path_dir(&machine->kmaps, modules_path, 0);
+ return maps__set_modules_path_dir(&machine->kmaps, modules_path, 0);
}
int __weak arch__fix_module_text_start(u64 *start __maybe_unused,
u64 *size __maybe_unused,
@@ -1435,11 +1429,11 @@ static void machine__update_kernel_mmap(struct machine *machine,
struct map *map = machine__kernel_map(machine);
map__get(map);
- map_groups__remove(&machine->kmaps, map);
+ maps__remove(&machine->kmaps, map);
machine__set_kernel_mmap(machine, start, end);
- map_groups__insert(&machine->kmaps, map);
+ maps__insert(&machine->kmaps, map);
map__put(map);
}
@@ -1940,7 +1934,7 @@ static void ip__resolve_ams(struct thread *thread,
ams->addr = ip;
ams->al_addr = al.addr;
- ams->ms.mg = al.mg;
+ ams->ms.maps = al.maps;
ams->ms.sym = al.sym;
ams->ms.map = al.map;
ams->phys_addr = 0;
@@ -1958,7 +1952,7 @@ static void ip__resolve_data(struct thread *thread,
ams->addr = addr;
ams->al_addr = al.addr;
- ams->ms.mg = al.mg;
+ ams->ms.maps = al.maps;
ams->ms.sym = al.sym;
ams->ms.map = al.map;
ams->phys_addr = phys_addr;
@@ -2075,7 +2069,7 @@ static int add_callchain_ip(struct thread *thread,
iter_cycles = iter->cycles;
}
- ms.mg = al.mg;
+ ms.maps = al.maps;
ms.map = al.map;
ms.sym = al.sym;
srcline = callchain_srcline(&ms, al.addr);
diff --git a/tools/perf/util/machine.h b/tools/perf/util/machine.h
index 499be204830d..be0a930eca89 100644
--- a/tools/perf/util/machine.h
+++ b/tools/perf/util/machine.h
@@ -4,7 +4,7 @@
#include <sys/types.h>
#include <linux/rbtree.h>
-#include "map_groups.h"
+#include "maps.h"
#include "dsos.h"
#include "rwsem.h"
@@ -51,7 +51,7 @@ struct machine {
struct vdso_info *vdso_info;
struct perf_env *env;
struct dsos dsos;
- struct map_groups kmaps;
+ struct maps kmaps;
struct map *vmlinux_map;
u64 kernel_start;
pid_t *current_tid;
@@ -83,7 +83,7 @@ struct map *machine__kernel_map(struct machine *machine)
static inline
struct maps *machine__kernel_maps(struct machine *machine)
{
- return &machine->kmaps.maps;
+ return &machine->kmaps;
}
int machine__get_kernel_start(struct machine *machine);
@@ -212,7 +212,7 @@ static inline
struct symbol *machine__find_kernel_symbol(struct machine *machine, u64 addr,
struct map **mapp)
{
- return map_groups__find_symbol(&machine->kmaps, addr, mapp);
+ return maps__find_symbol(&machine->kmaps, addr, mapp);
}
static inline
@@ -220,7 +220,7 @@ struct symbol *machine__find_kernel_symbol_by_name(struct machine *machine,
const char *name,
struct map **mapp)
{
- return map_groups__find_symbol_by_name(&machine->kmaps, name, mapp);
+ return maps__find_symbol_by_name(&machine->kmaps, name, mapp);
}
int arch__fix_module_text_start(u64 *start, u64 *size, const char *name);
diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c
index 744bfbaf35cf..fdd5bddb3075 100644
--- a/tools/perf/util/map.c
+++ b/tools/perf/util/map.c
@@ -433,51 +433,6 @@ int map__fprintf_srcline(struct map *map, u64 addr, const char *prefix,
return ret;
}
-int map__fprintf_srccode(struct map *map, u64 addr,
- FILE *fp,
- struct srccode_state *state)
-{
- char *srcfile;
- int ret = 0;
- unsigned line;
- int len;
- char *srccode;
-
- if (!map || !map->dso)
- return 0;
- srcfile = get_srcline_split(map->dso,
- map__rip_2objdump(map, addr),
- &line);
- if (!srcfile)
- return 0;
-
- /* Avoid redundant printing */
- if (state &&
- state->srcfile &&
- !strcmp(state->srcfile, srcfile) &&
- state->line == line) {
- free(srcfile);
- return 0;
- }
-
- srccode = find_sourceline(srcfile, line, &len);
- if (!srccode)
- goto out_free_line;
-
- ret = fprintf(fp, "|%-8d %.*s", line, len, srccode);
-
- if (state) {
- state->srcfile = srcfile;
- state->line = line;
- }
- return ret;
-
-out_free_line:
- free(srcfile);
- return ret;
-}
-
-
void srccode_state_free(struct srccode_state *state)
{
zfree(&state->srcfile);
@@ -557,73 +512,71 @@ u64 map__objdump_2mem(struct map *map, u64 ip)
return ip + map->reloc;
}
-static void maps__init(struct maps *maps)
+void maps__init(struct maps *maps, struct machine *machine)
{
maps->entries = RB_ROOT;
init_rwsem(&maps->lock);
+ maps->machine = machine;
+ maps->last_search_by_name = NULL;
+ maps->nr_maps = 0;
+ maps->maps_by_name = NULL;
+ refcount_set(&maps->refcnt, 1);
}
-void map_groups__init(struct map_groups *mg, struct machine *machine)
-{
- maps__init(&mg->maps);
- mg->machine = machine;
- mg->last_search_by_name = NULL;
- mg->nr_maps = 0;
- mg->maps_by_name = NULL;
- refcount_set(&mg->refcnt, 1);
-}
-
-static void __map_groups__free_maps_by_name(struct map_groups *mg)
+static void __maps__free_maps_by_name(struct maps *maps)
{
/*
* Free everything to try to do it from the rbtree in the next search
*/
- zfree(&mg->maps_by_name);
- mg->nr_maps_allocated = 0;
+ zfree(&maps->maps_by_name);
+ maps->nr_maps_allocated = 0;
}
-void map_groups__insert(struct map_groups *mg, struct map *map)
+void maps__insert(struct maps *maps, struct map *map)
{
- struct maps *maps = &mg->maps;
-
down_write(&maps->lock);
__maps__insert(maps, map);
- ++mg->nr_maps;
+ ++maps->nr_maps;
/*
* If we already performed some search by name, then we need to add the just
* inserted map and resort.
*/
- if (mg->maps_by_name) {
- if (mg->nr_maps > mg->nr_maps_allocated) {
- int nr_allocate = mg->nr_maps * 2;
- struct map **maps_by_name = realloc(mg->maps_by_name, nr_allocate * sizeof(map));
+ if (maps->maps_by_name) {
+ if (maps->nr_maps > maps->nr_maps_allocated) {
+ int nr_allocate = maps->nr_maps * 2;
+ struct map **maps_by_name = realloc(maps->maps_by_name, nr_allocate * sizeof(map));
if (maps_by_name == NULL) {
- __map_groups__free_maps_by_name(mg);
+ __maps__free_maps_by_name(maps);
return;
}
- mg->maps_by_name = maps_by_name;
- mg->nr_maps_allocated = nr_allocate;
+ maps->maps_by_name = maps_by_name;
+ maps->nr_maps_allocated = nr_allocate;
}
- mg->maps_by_name[mg->nr_maps - 1] = map;
- __map_groups__sort_by_name(mg);
+ maps->maps_by_name[maps->nr_maps - 1] = map;
+ __maps__sort_by_name(maps);
}
up_write(&maps->lock);
}
-void map_groups__remove(struct map_groups *mg, struct map *map)
+static void __maps__remove(struct maps *maps, struct map *map)
+{
+ rb_erase_init(&map->rb_node, &maps->entries);
+ map__put(map);
+}
+
+void maps__remove(struct maps *maps, struct map *map)
{
- struct maps *maps = &mg->maps;
down_write(&maps->lock);
- if (mg->last_search_by_name == map)
- mg->last_search_by_name = NULL;
+ if (maps->last_search_by_name == map)
+ maps->last_search_by_name = NULL;
__maps__remove(maps, map);
- --mg->nr_maps;
- if (mg->maps_by_name)
- __map_groups__free_maps_by_name(mg);
+ --maps->nr_maps;
+ if (maps->maps_by_name)
+ __maps__free_maps_by_name(maps);
up_write(&maps->lock);
}
@@ -637,50 +590,44 @@ static void __maps__purge(struct maps *maps)
}
}
-static void maps__exit(struct maps *maps)
+void maps__exit(struct maps *maps)
{
down_write(&maps->lock);
__maps__purge(maps);
up_write(&maps->lock);
}
-void map_groups__exit(struct map_groups *mg)
-{
- maps__exit(&mg->maps);
-}
-
-bool map_groups__empty(struct map_groups *mg)
+bool maps__empty(struct maps *maps)
{
- return !maps__first(&mg->maps);
+ return !maps__first(maps);
}
-struct map_groups *map_groups__new(struct machine *machine)
+struct maps *maps__new(struct machine *machine)
{
- struct map_groups *mg = zalloc(sizeof(*mg));
+ struct maps *maps = zalloc(sizeof(*maps));
- if (mg != NULL)
- map_groups__init(mg, machine);
+ if (maps != NULL)
+ maps__init(maps, machine);
- return mg;
+ return maps;
}
-void map_groups__delete(struct map_groups *mg)
+void maps__delete(struct maps *maps)
{
- map_groups__exit(mg);
- unwind__finish_access(mg);
- free(mg);
+ maps__exit(maps);
+ unwind__finish_access(maps);
+ free(maps);
}
-void map_groups__put(struct map_groups *mg)
+void maps__put(struct maps *maps)
{
- if (mg && refcount_dec_and_test(&mg->refcnt))
- map_groups__delete(mg);
+ if (maps && refcount_dec_and_test(&maps->refcnt))
+ maps__delete(maps);
}
-struct symbol *map_groups__find_symbol(struct map_groups *mg,
- u64 addr, struct map **mapp)
+struct symbol *maps__find_symbol(struct maps *maps, u64 addr, struct map **mapp)
{
- struct map *map = map_groups__find(mg, addr);
+ struct map *map = maps__find(maps, addr);
/* Ensure map is loaded before using map->map_ip */
if (map != NULL && map__load(map) >= 0) {
@@ -699,8 +646,7 @@ static bool map__contains_symbol(struct map *map, struct symbol *sym)
return ip >= map->start && ip < map->end;
}
-struct symbol *maps__find_symbol_by_name(struct maps *maps, const char *name,
- struct map **mapp)
+struct symbol *maps__find_symbol_by_name(struct maps *maps, const char *name, struct map **mapp)
{
struct symbol *sym;
struct map *pos;
@@ -727,19 +673,12 @@ out:
return sym;
}
-struct symbol *map_groups__find_symbol_by_name(struct map_groups *mg,
- const char *name,
- struct map **mapp)
-{
- return maps__find_symbol_by_name(&mg->maps, name, mapp);
-}
-
-int map_groups__find_ams(struct map_groups *mg, struct addr_map_symbol *ams)
+int maps__find_ams(struct maps *maps, struct addr_map_symbol *ams)
{
if (ams->addr < ams->ms.map->start || ams->addr >= ams->ms.map->end) {
- if (mg == NULL)
+ if (maps == NULL)
return -1;
- ams->ms.map = map_groups__find(mg, ams->addr);
+ ams->ms.map = maps__find(maps, ams->addr);
if (ams->ms.map == NULL)
return -1;
}
@@ -750,7 +689,7 @@ int map_groups__find_ams(struct map_groups *mg, struct addr_map_symbol *ams)
return ams->ms.sym ? 0 : -1;
}
-static size_t maps__fprintf(struct maps *maps, FILE *fp)
+size_t maps__fprintf(struct maps *maps, FILE *fp)
{
size_t printed = 0;
struct map *pos;
@@ -771,19 +710,8 @@ static size_t maps__fprintf(struct maps *maps, FILE *fp)
return printed;
}
-size_t map_groups__fprintf(struct map_groups *mg, FILE *fp)
-{
- return maps__fprintf(&mg->maps, fp);
-}
-
-static void __map_groups__insert(struct map_groups *mg, struct map *map)
-{
- __maps__insert(&mg->maps, map);
-}
-
-int map_groups__fixup_overlappings(struct map_groups *mg, struct map *map, FILE *fp)
+int maps__fixup_overlappings(struct maps *maps, struct map *map, FILE *fp)
{
- struct maps *maps = &mg->maps;
struct rb_root *root;
struct rb_node *next, *first;
int err = 0;
@@ -848,7 +776,7 @@ int map_groups__fixup_overlappings(struct map_groups *mg, struct map *map, FILE
}
before->end = map->start;
- __map_groups__insert(mg, before);
+ __maps__insert(maps, before);
if (verbose >= 2 && !use_browser)
map__fprintf(before, fp);
map__put(before);
@@ -865,7 +793,7 @@ int map_groups__fixup_overlappings(struct map_groups *mg, struct map *map, FILE
after->start = map->end;
after->pgoff += map->end - pos->start;
assert(pos->map_ip(pos, map->end) == after->map_ip(after, map->end));
- __map_groups__insert(mg, after);
+ __maps__insert(maps, after);
if (verbose >= 2 && !use_browser)
map__fprintf(after, fp);
map__put(after);
@@ -886,31 +814,30 @@ out:
/*
* XXX This should not really _copy_ te maps, but refcount them.
*/
-int map_groups__clone(struct thread *thread, struct map_groups *parent)
+int maps__clone(struct thread *thread, struct maps *parent)
{
- struct map_groups *mg = thread->mg;
+ struct maps *maps = thread->maps;
int err = -ENOMEM;
struct map *map;
- struct maps *maps = &parent->maps;
- down_read(&maps->lock);
+ down_read(&parent->lock);
- maps__for_each_entry(maps, map) {
+ maps__for_each_entry(parent, map) {
struct map *new = map__clone(map);
if (new == NULL)
goto out_unlock;
- err = unwind__prepare_access(mg, new, NULL);
+ err = unwind__prepare_access(maps, new, NULL);
if (err)
goto out_unlock;
- map_groups__insert(mg, new);
+ maps__insert(maps, new);
map__put(new);
}
err = 0;
out_unlock:
- up_read(&maps->lock);
+ up_read(&parent->lock);
return err;
}
@@ -935,26 +862,6 @@ static void __maps__insert(struct maps *maps, struct map *map)
map__get(map);
}
-void maps__insert(struct maps *maps, struct map *map)
-{
- down_write(&maps->lock);
- __maps__insert(maps, map);
- up_write(&maps->lock);
-}
-
-void __maps__remove(struct maps *maps, struct map *map)
-{
- rb_erase_init(&map->rb_node, &maps->entries);
- map__put(map);
-}
-
-void maps__remove(struct maps *maps, struct map *map)
-{
- down_write(&maps->lock);
- __maps__remove(maps, map);
- up_write(&maps->lock);
-}
-
struct map *maps__find(struct maps *maps, u64 ip)
{
struct rb_node *p;
@@ -1018,7 +925,7 @@ struct kmap *map__kmap(struct map *map)
return kmap;
}
-struct map_groups *map__kmaps(struct map *map)
+struct maps *map__kmaps(struct map *map)
{
struct kmap *kmap = map__kmap(map);
diff --git a/tools/perf/util/map.h b/tools/perf/util/map.h
index 5e8899883231..067036e8970c 100644
--- a/tools/perf/util/map.h
+++ b/tools/perf/util/map.h
@@ -12,11 +12,8 @@
#include <linux/types.h>
struct dso;
-struct ip_callchain;
-struct ref_reloc_sym;
-struct map_groups;
+struct maps;
struct machine;
-struct evsel;
struct map {
union {
@@ -45,7 +42,7 @@ struct kmap;
struct kmap *__map__kmap(struct map *map);
struct kmap *map__kmap(struct map *map);
-struct map_groups *map__kmaps(struct map *map);
+struct maps *map__kmaps(struct map *map);
static inline u64 map__map_ip(struct map *map, u64 ip)
{
@@ -138,19 +135,12 @@ char *map__srcline(struct map *map, u64 addr, struct symbol *sym);
int map__fprintf_srcline(struct map *map, u64 addr, const char *prefix,
FILE *fp);
-struct srccode_state;
-
-int map__fprintf_srccode(struct map *map, u64 addr,
- FILE *fp, struct srccode_state *state);
-
int map__load(struct map *map);
struct symbol *map__find_symbol(struct map *map, u64 addr);
struct symbol *map__find_symbol_by_name(struct map *map, const char *name);
void map__fixup_start(struct map *map);
void map__fixup_end(struct map *map);
-void map__reloc_vmlinux(struct map *map);
-
int map__set_kallsyms_ref_reloc_sym(struct map *map, const char *symbol_name,
u64 addr);
diff --git a/tools/perf/util/map_groups.h b/tools/perf/util/map_groups.h
deleted file mode 100644
index 63ed211fe241..000000000000
--- a/tools/perf/util/map_groups.h
+++ /dev/null
@@ -1,106 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __PERF_MAP_GROUPS_H
-#define __PERF_MAP_GROUPS_H
-
-#include <linux/refcount.h>
-#include <linux/rbtree.h>
-#include <stdio.h>
-#include <stdbool.h>
-#include <linux/types.h>
-#include "rwsem.h"
-
-struct ref_reloc_sym;
-struct machine;
-struct map;
-struct thread;
-
-struct maps {
- struct rb_root entries;
- struct rw_semaphore lock;
-};
-
-void maps__insert(struct maps *maps, struct map *map);
-void maps__remove(struct maps *maps, struct map *map);
-void __maps__remove(struct maps *maps, struct map *map);
-struct map *maps__find(struct maps *maps, u64 addr);
-struct map *maps__first(struct maps *maps);
-struct map *map__next(struct map *map);
-
-#define maps__for_each_entry(maps, map) \
- for (map = maps__first(maps); map; map = map__next(map))
-
-#define maps__for_each_entry_safe(maps, map, next) \
- for (map = maps__first(maps), next = map__next(map); map; map = next, next = map__next(map))
-
-struct symbol *maps__find_symbol_by_name(struct maps *maps, const char *name, struct map **mapp);
-
-struct map_groups {
- struct maps maps;
- struct machine *machine;
- struct map *last_search_by_name;
- struct map **maps_by_name;
- refcount_t refcnt;
- unsigned int nr_maps;
- unsigned int nr_maps_allocated;
-#ifdef HAVE_LIBUNWIND_SUPPORT
- void *addr_space;
- struct unwind_libunwind_ops *unwind_libunwind_ops;
-#endif
-};
-
-#define KMAP_NAME_LEN 256
-
-struct kmap {
- struct ref_reloc_sym *ref_reloc_sym;
- struct map_groups *kmaps;
- char name[KMAP_NAME_LEN];
-};
-
-struct map_groups *map_groups__new(struct machine *machine);
-void map_groups__delete(struct map_groups *mg);
-bool map_groups__empty(struct map_groups *mg);
-
-static inline struct map_groups *map_groups__get(struct map_groups *mg)
-{
- if (mg)
- refcount_inc(&mg->refcnt);
- return mg;
-}
-
-void map_groups__put(struct map_groups *mg);
-void map_groups__init(struct map_groups *mg, struct machine *machine);
-void map_groups__exit(struct map_groups *mg);
-int map_groups__clone(struct thread *thread, struct map_groups *parent);
-size_t map_groups__fprintf(struct map_groups *mg, FILE *fp);
-
-void map_groups__insert(struct map_groups *mg, struct map *map);
-
-void map_groups__remove(struct map_groups *mg, struct map *map);
-
-static inline struct map *map_groups__find(struct map_groups *mg, u64 addr)
-{
- return maps__find(&mg->maps, addr);
-}
-
-#define map_groups__for_each_entry(mg, map) \
- for (map = maps__first(&mg->maps); map; map = map__next(map))
-
-#define map_groups__for_each_entry_safe(mg, map, next) \
- for (map = maps__first(&mg->maps), next = map__next(map); map; map = next, next = map__next(map))
-
-struct symbol *map_groups__find_symbol(struct map_groups *mg, u64 addr, struct map **mapp);
-struct symbol *map_groups__find_symbol_by_name(struct map_groups *mg, const char *name, struct map **mapp);
-
-struct addr_map_symbol;
-
-int map_groups__find_ams(struct map_groups *mg, struct addr_map_symbol *ams);
-
-int map_groups__fixup_overlappings(struct map_groups *mg, struct map *map, FILE *fp);
-
-struct map *map_groups__find_by_name(struct map_groups *mg, const char *name);
-
-int map_groups__merge_in(struct map_groups *kmaps, struct map *new_map);
-
-void __map_groups__sort_by_name(struct map_groups *mg);
-
-#endif // __PERF_MAP_GROUPS_H
diff --git a/tools/perf/util/map_symbol.h b/tools/perf/util/map_symbol.h
index 2964d971aeab..5b8ca93798e9 100644
--- a/tools/perf/util/map_symbol.h
+++ b/tools/perf/util/map_symbol.h
@@ -4,12 +4,12 @@
#include <linux/types.h>
-struct map_groups;
+struct maps;
struct map;
struct symbol;
struct map_symbol {
- struct map_groups *mg;
+ struct maps *maps;
struct map *map;
struct symbol *sym;
};
diff --git a/tools/perf/util/maps.h b/tools/perf/util/maps.h
new file mode 100644
index 000000000000..3dd000ddf925
--- /dev/null
+++ b/tools/perf/util/maps.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __PERF_MAPS_H
+#define __PERF_MAPS_H
+
+#include <linux/refcount.h>
+#include <linux/rbtree.h>
+#include <stdio.h>
+#include <stdbool.h>
+#include <linux/types.h>
+#include "rwsem.h"
+
+struct ref_reloc_sym;
+struct machine;
+struct map;
+struct maps;
+struct thread;
+
+struct map *maps__find(struct maps *maps, u64 addr);
+struct map *maps__first(struct maps *maps);
+struct map *map__next(struct map *map);
+
+#define maps__for_each_entry(maps, map) \
+ for (map = maps__first(maps); map; map = map__next(map))
+
+#define maps__for_each_entry_safe(maps, map, next) \
+ for (map = maps__first(maps), next = map__next(map); map; map = next, next = map__next(map))
+
+struct maps {
+ struct rb_root entries;
+ struct rw_semaphore lock;
+ struct machine *machine;
+ struct map *last_search_by_name;
+ struct map **maps_by_name;
+ refcount_t refcnt;
+ unsigned int nr_maps;
+ unsigned int nr_maps_allocated;
+#ifdef HAVE_LIBUNWIND_SUPPORT
+ void *addr_space;
+ struct unwind_libunwind_ops *unwind_libunwind_ops;
+#endif
+};
+
+#define KMAP_NAME_LEN 256
+
+struct kmap {
+ struct ref_reloc_sym *ref_reloc_sym;
+ struct maps *kmaps;
+ char name[KMAP_NAME_LEN];
+};
+
+struct maps *maps__new(struct machine *machine);
+void maps__delete(struct maps *maps);
+bool maps__empty(struct maps *maps);
+
+static inline struct maps *maps__get(struct maps *maps)
+{
+ if (maps)
+ refcount_inc(&maps->refcnt);
+ return maps;
+}
+
+void maps__put(struct maps *maps);
+void maps__init(struct maps *maps, struct machine *machine);
+void maps__exit(struct maps *maps);
+int maps__clone(struct thread *thread, struct maps *parent);
+size_t maps__fprintf(struct maps *maps, FILE *fp);
+
+void maps__insert(struct maps *maps, struct map *map);
+
+void maps__remove(struct maps *maps, struct map *map);
+
+struct symbol *maps__find_symbol(struct maps *maps, u64 addr, struct map **mapp);
+struct symbol *maps__find_symbol_by_name(struct maps *maps, const char *name, struct map **mapp);
+
+struct addr_map_symbol;
+
+int maps__find_ams(struct maps *maps, struct addr_map_symbol *ams);
+
+int maps__fixup_overlappings(struct maps *maps, struct map *map, FILE *fp);
+
+struct map *maps__find_by_name(struct maps *maps, const char *name);
+
+int maps__merge_in(struct maps *kmaps, struct map *new_map);
+
+void __maps__sort_by_name(struct maps *maps);
+
+#endif // __PERF_MAPS_H
diff --git a/tools/perf/util/perf_regs.h b/tools/perf/util/perf_regs.h
index e014c2c038f4..a45499126184 100644
--- a/tools/perf/util/perf_regs.h
+++ b/tools/perf/util/perf_regs.h
@@ -41,7 +41,7 @@ int perf_reg_value(u64 *valp, struct regs_dump *regs, int id);
static inline const char *perf_reg_name(int id __maybe_unused)
{
- return NULL;
+ return "unknown";
}
static inline int perf_reg_value(u64 *valp __maybe_unused,
diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
index e8d348988026..8b99fd312aae 100644
--- a/tools/perf/util/pmu.c
+++ b/tools/perf/util/pmu.c
@@ -24,6 +24,7 @@
#include "pmu-events/pmu-events.h"
#include "string2.h"
#include "strbuf.h"
+#include "fncache.h"
struct perf_pmu_format {
char *name;
@@ -82,7 +83,6 @@ int perf_pmu__format_parse(char *dir, struct list_head *head)
*/
static int pmu_format(const char *name, struct list_head *format)
{
- struct stat st;
char path[PATH_MAX];
const char *sysfs = sysfs__mountpoint();
@@ -92,8 +92,8 @@ static int pmu_format(const char *name, struct list_head *format)
snprintf(path, PATH_MAX,
"%s" EVENT_SOURCE_DEVICE_PATH "%s/format", sysfs, name);
- if (stat(path, &st) < 0)
- return 0; /* no error if format does not exist */
+ if (!file_available(path))
+ return 0;
if (perf_pmu__format_parse(path, format))
return -1;
@@ -475,7 +475,6 @@ static int pmu_aliases_parse(char *dir, struct list_head *head)
*/
static int pmu_aliases(const char *name, struct list_head *head)
{
- struct stat st;
char path[PATH_MAX];
const char *sysfs = sysfs__mountpoint();
@@ -485,8 +484,8 @@ static int pmu_aliases(const char *name, struct list_head *head)
snprintf(path, PATH_MAX,
"%s/bus/event_source/devices/%s/events", sysfs, name);
- if (stat(path, &st) < 0)
- return 0; /* no error if 'events' does not exist */
+ if (!file_available(path))
+ return 0;
if (pmu_aliases_parse(path, head))
return -1;
@@ -525,7 +524,6 @@ static int pmu_alias_terms(struct perf_pmu_alias *alias,
*/
static int pmu_type(const char *name, __u32 *type)
{
- struct stat st;
char path[PATH_MAX];
FILE *file;
int ret = 0;
@@ -537,7 +535,7 @@ static int pmu_type(const char *name, __u32 *type)
snprintf(path, PATH_MAX,
"%s" EVENT_SOURCE_DEVICE_PATH "%s/type", sysfs, name);
- if (stat(path, &st) < 0)
+ if (access(path, R_OK) < 0)
return -1;
file = fopen(path, "r");
@@ -628,14 +626,11 @@ static struct perf_cpu_map *pmu_cpumask(const char *name)
static bool pmu_is_uncore(const char *name)
{
char path[PATH_MAX];
- struct perf_cpu_map *cpus;
- const char *sysfs = sysfs__mountpoint();
+ const char *sysfs;
+ sysfs = sysfs__mountpoint();
snprintf(path, PATH_MAX, CPUS_TEMPLATE_UNCORE, sysfs, name);
- cpus = __pmu_cpumask(path);
- perf_cpu_map__put(cpus);
-
- return !!cpus;
+ return file_available(path);
}
/*
@@ -645,7 +640,6 @@ static bool pmu_is_uncore(const char *name)
*/
static int is_arm_pmu_core(const char *name)
{
- struct stat st;
char path[PATH_MAX];
const char *sysfs = sysfs__mountpoint();
@@ -655,10 +649,7 @@ static int is_arm_pmu_core(const char *name)
/* Look for cpu sysfs (specific to arm) */
scnprintf(path, PATH_MAX, "%s/bus/event_source/devices/%s/cpus",
sysfs, name);
- if (stat(path, &st) == 0)
- return 1;
-
- return 0;
+ return file_available(path);
}
static char *perf_pmu__getcpuid(struct perf_pmu *pmu)
@@ -1544,7 +1535,6 @@ bool pmu_have_event(const char *pname, const char *name)
static FILE *perf_pmu__open_file(struct perf_pmu *pmu, const char *name)
{
- struct stat st;
char path[PATH_MAX];
const char *sysfs;
@@ -1554,10 +1544,8 @@ static FILE *perf_pmu__open_file(struct perf_pmu *pmu, const char *name)
snprintf(path, PATH_MAX,
"%s" EVENT_SOURCE_DEVICE_PATH "%s/%s", sysfs, pmu->name, name);
-
- if (stat(path, &st) < 0)
+ if (!file_available(path))
return NULL;
-
return fopen(path, "r");
}
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index 52b2d165453a..eea132f512b0 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -28,7 +28,7 @@
#include "dso.h"
#include "color.h"
#include "map.h"
-#include "map_groups.h"
+#include "maps.h"
#include "symbol.h"
#include <api/fs/fs.h>
#include "trace-event.h" /* For __maybe_unused */
@@ -321,7 +321,7 @@ static int kernel_get_module_dso(const char *module, struct dso **pdso)
char module_name[128];
snprintf(module_name, sizeof(module_name), "[%s]", module);
- map = map_groups__find_by_name(&host_machine->kmaps, module_name);
+ map = maps__find_by_name(&host_machine->kmaps, module_name);
if (map) {
dso = map->dso;
goto found;
diff --git a/tools/perf/util/python-ext-sources b/tools/perf/util/python-ext-sources
index 9af183860fbd..e7279ea6043a 100644
--- a/tools/perf/util/python-ext-sources
+++ b/tools/perf/util/python-ext-sources
@@ -33,3 +33,4 @@ util/trace-event.c
util/string.c
util/symbol_fprintf.c
util/units.c
+util/affinity.c
diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c
index 9581a904af29..80ca5d0ab7fe 100644
--- a/tools/perf/util/scripting-engines/trace-event-python.c
+++ b/tools/perf/util/scripting-engines/trace-event-python.c
@@ -1127,7 +1127,7 @@ static void python_export_sample_table(struct db_export *dbe,
tuple_set_u64(t, 0, es->db_id);
tuple_set_u64(t, 1, es->evsel->db_id);
- tuple_set_u64(t, 2, es->al->mg->machine->db_id);
+ tuple_set_u64(t, 2, es->al->maps->machine->db_id);
tuple_set_u64(t, 3, es->al->thread->db_id);
tuple_set_u64(t, 4, es->comm_db_id);
tuple_set_u64(t, 5, es->dso_db_id);
diff --git a/tools/perf/util/srccode.c b/tools/perf/util/srccode.c
index d84ed8b6caaa..c29edaaca863 100644
--- a/tools/perf/util/srccode.c
+++ b/tools/perf/util/srccode.c
@@ -16,6 +16,7 @@
#include "srccode.h"
#include "debug.h"
#include <internal/lib.h> // page_size
+#include "fncache.h"
#define MAXSRCCACHE (32*1024*1024)
#define MAXSRCFILES 64
@@ -36,14 +37,6 @@ static LIST_HEAD(srcfile_list);
static long map_total_sz;
static int num_srcfiles;
-static unsigned shash(unsigned char *s)
-{
- unsigned h = 0;
- while (*s)
- h = 65599 * h + *s++;
- return h ^ (h >> 16);
-}
-
static int countlines(char *map, int maplen)
{
int numl;
diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
index 16776d5fbaea..6658fbf196e6 100644
--- a/tools/perf/util/symbol-elf.c
+++ b/tools/perf/util/symbol-elf.c
@@ -9,7 +9,7 @@
#include "dso.h"
#include "map.h"
-#include "map_groups.h"
+#include "maps.h"
#include "symbol.h"
#include "symsrc.h"
#include "demangle-java.h"
@@ -844,7 +844,7 @@ void __weak arch__sym_update(struct symbol *s __maybe_unused,
static int dso__process_kernel_symbol(struct dso *dso, struct map *map,
GElf_Sym *sym, GElf_Shdr *shdr,
- struct map_groups *kmaps, struct kmap *kmap,
+ struct maps *kmaps, struct kmap *kmap,
struct dso **curr_dsop, struct map **curr_mapp,
const char *section_name,
bool adjust_kernel_syms, bool kmodule, bool *remap_kernel)
@@ -876,8 +876,8 @@ static int dso__process_kernel_symbol(struct dso *dso, struct map *map,
/* Ensure maps are correctly ordered */
if (kmaps) {
map__get(map);
- map_groups__remove(kmaps, map);
- map_groups__insert(kmaps, map);
+ maps__remove(kmaps, map);
+ maps__insert(kmaps, map);
map__put(map);
}
}
@@ -902,7 +902,7 @@ static int dso__process_kernel_symbol(struct dso *dso, struct map *map,
snprintf(dso_name, sizeof(dso_name), "%s%s", dso->short_name, section_name);
- curr_map = map_groups__find_by_name(kmaps, dso_name);
+ curr_map = maps__find_by_name(kmaps, dso_name);
if (curr_map == NULL) {
u64 start = sym->st_value;
@@ -928,7 +928,7 @@ static int dso__process_kernel_symbol(struct dso *dso, struct map *map,
curr_map->map_ip = curr_map->unmap_ip = identity__map_ip;
}
curr_dso->symtab_type = dso->symtab_type;
- map_groups__insert(kmaps, curr_map);
+ maps__insert(kmaps, curr_map);
/*
* Add it before we drop the referece to curr_map, i.e. while
* we still are sure to have a reference to this DSO via
@@ -950,7 +950,7 @@ int dso__load_sym(struct dso *dso, struct map *map, struct symsrc *syms_ss,
struct symsrc *runtime_ss, int kmodule)
{
struct kmap *kmap = dso->kernel ? map__kmap(map) : NULL;
- struct map_groups *kmaps = kmap ? map__kmaps(map) : NULL;
+ struct maps *kmaps = kmap ? map__kmaps(map) : NULL;
struct map *curr_map = map;
struct dso *curr_dso = dso;
Elf_Data *symstrs, *secstrs;
@@ -1162,7 +1162,7 @@ int dso__load_sym(struct dso *dso, struct map *map, struct symsrc *syms_ss,
* We need to fixup this here too because we create new
* maps here, for things like vsyscall sections.
*/
- map_groups__fixup_end(kmaps);
+ maps__fixup_end(kmaps);
}
}
err = nr;
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index db9667aacb88..3b379b1296f1 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -239,9 +239,8 @@ void symbols__fixup_end(struct rb_root_cached *symbols)
curr->end = roundup(curr->start, 4096) + 4096;
}
-void map_groups__fixup_end(struct map_groups *mg)
+void maps__fixup_end(struct maps *maps)
{
- struct maps *maps = &mg->maps;
struct map *prev = NULL, *curr;
down_write(&maps->lock);
@@ -698,7 +697,7 @@ static int dso__load_all_kallsyms(struct dso *dso, const char *filename)
return kallsyms__parse(filename, dso, map__process_kallsym_symbol);
}
-static int map_groups__split_kallsyms_for_kcore(struct map_groups *kmaps, struct dso *dso)
+static int maps__split_kallsyms_for_kcore(struct maps *kmaps, struct dso *dso)
{
struct map *curr_map;
struct symbol *pos;
@@ -724,7 +723,7 @@ static int map_groups__split_kallsyms_for_kcore(struct map_groups *kmaps, struct
if (module)
*module = '\0';
- curr_map = map_groups__find(kmaps, pos->start);
+ curr_map = maps__find(kmaps, pos->start);
if (!curr_map) {
symbol__delete(pos);
@@ -751,8 +750,8 @@ static int map_groups__split_kallsyms_for_kcore(struct map_groups *kmaps, struct
* kernel range is broken in several maps, named [kernel].N, as we don't have
* the original ELF section names vmlinux have.
*/
-static int map_groups__split_kallsyms(struct map_groups *kmaps, struct dso *dso, u64 delta,
- struct map *initial_map)
+static int maps__split_kallsyms(struct maps *kmaps, struct dso *dso, u64 delta,
+ struct map *initial_map)
{
struct machine *machine;
struct map *curr_map = initial_map;
@@ -797,7 +796,7 @@ static int map_groups__split_kallsyms(struct map_groups *kmaps, struct dso *dso,
dso__set_loaded(curr_map->dso);
}
- curr_map = map_groups__find_by_name(kmaps, module);
+ curr_map = maps__find_by_name(kmaps, module);
if (curr_map == NULL) {
pr_debug("%s/proc/{kallsyms,modules} "
"inconsistency while looking "
@@ -864,7 +863,7 @@ static int map_groups__split_kallsyms(struct map_groups *kmaps, struct dso *dso,
}
curr_map->map_ip = curr_map->unmap_ip = identity__map_ip;
- map_groups__insert(kmaps, curr_map);
+ maps__insert(kmaps, curr_map);
++kernel_range;
} else if (delta) {
/* Kernel was relocated at boot time */
@@ -1049,8 +1048,7 @@ out_delete_from:
return ret;
}
-static int do_validate_kcore_modules(const char *filename,
- struct map_groups *kmaps)
+static int do_validate_kcore_modules(const char *filename, struct maps *kmaps)
{
struct rb_root modules = RB_ROOT;
struct map *old_map;
@@ -1060,7 +1058,7 @@ static int do_validate_kcore_modules(const char *filename,
if (err)
return err;
- map_groups__for_each_entry(kmaps, old_map) {
+ maps__for_each_entry(kmaps, old_map) {
struct module_info *mi;
if (!__map__is_kmodule(old_map)) {
@@ -1107,7 +1105,7 @@ static bool filename_from_kallsyms_filename(char *filename,
static int validate_kcore_modules(const char *kallsyms_filename,
struct map *map)
{
- struct map_groups *kmaps = map__kmaps(map);
+ struct maps *kmaps = map__kmaps(map);
char modules_filename[PATH_MAX];
if (!kmaps)
@@ -1167,15 +1165,15 @@ static int kcore_mapfn(u64 start, u64 len, u64 pgoff, void *data)
}
/*
- * Merges map into map_groups by splitting the new map
- * within the existing map regions.
+ * Merges map into maps by splitting the new map within the existing map
+ * regions.
*/
-int map_groups__merge_in(struct map_groups *kmaps, struct map *new_map)
+int maps__merge_in(struct maps *kmaps, struct map *new_map)
{
struct map *old_map;
LIST_HEAD(merged);
- map_groups__for_each_entry(kmaps, old_map) {
+ maps__for_each_entry(kmaps, old_map) {
/* no overload with this one */
if (new_map->end < old_map->start ||
new_map->start >= old_map->end)
@@ -1232,12 +1230,12 @@ int map_groups__merge_in(struct map_groups *kmaps, struct map *new_map)
while (!list_empty(&merged)) {
old_map = list_entry(merged.next, struct map, node);
list_del_init(&old_map->node);
- map_groups__insert(kmaps, old_map);
+ maps__insert(kmaps, old_map);
map__put(old_map);
}
if (new_map) {
- map_groups__insert(kmaps, new_map);
+ maps__insert(kmaps, new_map);
map__put(new_map);
}
return 0;
@@ -1246,7 +1244,7 @@ int map_groups__merge_in(struct map_groups *kmaps, struct map *new_map)
static int dso__load_kcore(struct dso *dso, struct map *map,
const char *kallsyms_filename)
{
- struct map_groups *kmaps = map__kmaps(map);
+ struct maps *kmaps = map__kmaps(map);
struct kcore_mapfn_data md;
struct map *old_map, *new_map, *replacement_map = NULL, *next;
struct machine *machine;
@@ -1295,14 +1293,14 @@ static int dso__load_kcore(struct dso *dso, struct map *map,
}
/* Remove old maps */
- map_groups__for_each_entry_safe(kmaps, old_map, next) {
+ maps__for_each_entry_safe(kmaps, old_map, next) {
/*
* We need to preserve eBPF maps even if they are
* covered by kcore, because we need to access
* eBPF dso for source data.
*/
if (old_map != map && !__map__is_bpf_prog(old_map))
- map_groups__remove(kmaps, old_map);
+ maps__remove(kmaps, old_map);
}
machine->trampolines_mapped = false;
@@ -1331,8 +1329,8 @@ static int dso__load_kcore(struct dso *dso, struct map *map,
map->unmap_ip = new_map->unmap_ip;
/* Ensure maps are correctly ordered */
map__get(map);
- map_groups__remove(kmaps, map);
- map_groups__insert(kmaps, map);
+ maps__remove(kmaps, map);
+ maps__insert(kmaps, map);
map__put(map);
map__put(new_map);
} else {
@@ -1341,7 +1339,7 @@ static int dso__load_kcore(struct dso *dso, struct map *map,
* and ensure that current maps (eBPF)
* stay intact.
*/
- if (map_groups__merge_in(kmaps, new_map))
+ if (maps__merge_in(kmaps, new_map))
goto out_err;
}
}
@@ -1433,9 +1431,9 @@ int __dso__load_kallsyms(struct dso *dso, const char *filename,
dso->symtab_type = DSO_BINARY_TYPE__KALLSYMS;
if (!no_kcore && !dso__load_kcore(dso, map, filename))
- return map_groups__split_kallsyms_for_kcore(kmap->kmaps, dso);
+ return maps__split_kallsyms_for_kcore(kmap->kmaps, dso);
else
- return map_groups__split_kallsyms(kmap->kmaps, dso, delta, map);
+ return maps__split_kallsyms(kmap->kmaps, dso, delta, map);
}
int dso__load_kallsyms(struct dso *dso, const char *filename,
@@ -1772,68 +1770,67 @@ static int map__strcmp_name(const void *name, const void *b)
return strcmp(name, map->dso->short_name);
}
-void __map_groups__sort_by_name(struct map_groups *mg)
+void __maps__sort_by_name(struct maps *maps)
{
- qsort(mg->maps_by_name, mg->nr_maps, sizeof(struct map *), map__strcmp);
+ qsort(maps->maps_by_name, maps->nr_maps, sizeof(struct map *), map__strcmp);
}
-static int map__groups__sort_by_name_from_rbtree(struct map_groups *mg)
+static int map__groups__sort_by_name_from_rbtree(struct maps *maps)
{
struct map *map;
- struct map **maps_by_name = realloc(mg->maps_by_name, mg->nr_maps * sizeof(map));
+ struct map **maps_by_name = realloc(maps->maps_by_name, maps->nr_maps * sizeof(map));
int i = 0;
if (maps_by_name == NULL)
return -1;
- mg->maps_by_name = maps_by_name;
- mg->nr_maps_allocated = mg->nr_maps;
+ maps->maps_by_name = maps_by_name;
+ maps->nr_maps_allocated = maps->nr_maps;
- maps__for_each_entry(&mg->maps, map)
+ maps__for_each_entry(maps, map)
maps_by_name[i++] = map;
- __map_groups__sort_by_name(mg);
+ __maps__sort_by_name(maps);
return 0;
}
-static struct map *__map_groups__find_by_name(struct map_groups *mg, const char *name)
+static struct map *__maps__find_by_name(struct maps *maps, const char *name)
{
struct map **mapp;
- if (mg->maps_by_name == NULL &&
- map__groups__sort_by_name_from_rbtree(mg))
+ if (maps->maps_by_name == NULL &&
+ map__groups__sort_by_name_from_rbtree(maps))
return NULL;
- mapp = bsearch(name, mg->maps_by_name, mg->nr_maps, sizeof(*mapp), map__strcmp_name);
+ mapp = bsearch(name, maps->maps_by_name, maps->nr_maps, sizeof(*mapp), map__strcmp_name);
if (mapp)
return *mapp;
return NULL;
}
-struct map *map_groups__find_by_name(struct map_groups *mg, const char *name)
+struct map *maps__find_by_name(struct maps *maps, const char *name)
{
- struct maps *maps = &mg->maps;
struct map *map;
down_read(&maps->lock);
- if (mg->last_search_by_name && strcmp(mg->last_search_by_name->dso->short_name, name) == 0) {
- map = mg->last_search_by_name;
+ if (maps->last_search_by_name && strcmp(maps->last_search_by_name->dso->short_name, name) == 0) {
+ map = maps->last_search_by_name;
goto out_unlock;
}
/*
- * If we have mg->maps_by_name, then the name isn't in the rbtree,
- * as mg->maps_by_name mirrors the rbtree when lookups by name are
+ * If we have maps->maps_by_name, then the name isn't in the rbtree,
+ * as maps->maps_by_name mirrors the rbtree when lookups by name are
* made.
*/
- map = __map_groups__find_by_name(mg, name);
- if (map || mg->maps_by_name != NULL)
+ map = __maps__find_by_name(maps, name);
+ if (map || maps->maps_by_name != NULL)
goto out_unlock;
/* Fallback to traversing the rbtree... */
maps__for_each_entry(maps, map)
if (strcmp(map->dso->short_name, name) == 0) {
- mg->last_search_by_name = map;
+ maps->last_search_by_name = map;
goto out_unlock;
}
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
index 0b718cc9fb28..93fc43db1be3 100644
--- a/tools/perf/util/symbol.h
+++ b/tools/perf/util/symbol.h
@@ -21,7 +21,7 @@
struct dso;
struct map;
-struct map_groups;
+struct maps;
struct option;
/*
@@ -108,7 +108,7 @@ struct ref_reloc_sym {
struct addr_location {
struct thread *thread;
- struct map_groups *mg;
+ struct maps *maps;
struct map *map;
struct symbol *sym;
const char *srcline;
@@ -186,7 +186,7 @@ void __symbols__insert(struct rb_root_cached *symbols, struct symbol *sym,
void symbols__insert(struct rb_root_cached *symbols, struct symbol *sym);
void symbols__fixup_duplicate(struct rb_root_cached *symbols);
void symbols__fixup_end(struct rb_root_cached *symbols);
-void map_groups__fixup_end(struct map_groups *mg);
+void maps__fixup_end(struct maps *maps);
typedef int (*mapfn_t)(u64 start, u64 len, u64 pgoff, void *data);
int file__read_maps(int fd, bool exe, mapfn_t mapfn, void *data,
diff --git a/tools/perf/util/synthetic-events.c b/tools/perf/util/synthetic-events.c
index 48c3f8b9c852..c423298fe62d 100644
--- a/tools/perf/util/synthetic-events.c
+++ b/tools/perf/util/synthetic-events.c
@@ -493,7 +493,7 @@ static int __event__synthesize_thread(union perf_event *comm_event,
/*
* send mmap only for thread group leader
- * see thread__init_map_groups
+ * see thread__init_maps()
*/
if (pid == tgid &&
perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
diff --git a/tools/perf/util/thread-stack.c b/tools/perf/util/thread-stack.c
index cd8a948d03ec..0885967d5bc3 100644
--- a/tools/perf/util/thread-stack.c
+++ b/tools/perf/util/thread-stack.c
@@ -134,8 +134,8 @@ static int thread_stack__init(struct thread_stack *ts, struct thread *thread,
if (err)
return err;
- if (thread->mg && thread->mg->machine) {
- struct machine *machine = thread->mg->machine;
+ if (thread->maps && thread->maps->machine) {
+ struct machine *machine = thread->maps->machine;
const char *arch = perf_env__arch(machine->env);
ts->kernel_start = machine__kernel_start(machine);
diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c
index 0a277a920970..28b719388028 100644
--- a/tools/perf/util/thread.c
+++ b/tools/perf/util/thread.c
@@ -19,21 +19,21 @@
#include <api/fs/fs.h>
-int thread__init_map_groups(struct thread *thread, struct machine *machine)
+int thread__init_maps(struct thread *thread, struct machine *machine)
{
pid_t pid = thread->pid_;
if (pid == thread->tid || pid == -1) {
- thread->mg = map_groups__new(machine);
+ thread->maps = maps__new(machine);
} else {
struct thread *leader = __machine__findnew_thread(machine, pid, pid);
if (leader) {
- thread->mg = map_groups__get(leader->mg);
+ thread->maps = maps__get(leader->maps);
thread__put(leader);
}
}
- return thread->mg ? 0 : -1;
+ return thread->maps ? 0 : -1;
}
struct thread *thread__new(pid_t pid, pid_t tid)
@@ -86,9 +86,9 @@ void thread__delete(struct thread *thread)
thread_stack__free(thread);
- if (thread->mg) {
- map_groups__put(thread->mg);
- thread->mg = NULL;
+ if (thread->maps) {
+ maps__put(thread->maps);
+ thread->maps = NULL;
}
down_write(&thread->namespaces_lock);
list_for_each_entry_safe(namespaces, tmp_namespaces,
@@ -251,7 +251,7 @@ static int ____thread__set_comm(struct thread *thread, const char *str,
list_add(&new->list, &thread->comm_list);
if (exec)
- unwind__flush_access(thread->mg);
+ unwind__flush_access(thread->maps);
}
thread->comm_set = true;
@@ -324,19 +324,19 @@ int thread__comm_len(struct thread *thread)
size_t thread__fprintf(struct thread *thread, FILE *fp)
{
return fprintf(fp, "Thread %d %s\n", thread->tid, thread__comm_str(thread)) +
- map_groups__fprintf(thread->mg, fp);
+ maps__fprintf(thread->maps, fp);
}
int thread__insert_map(struct thread *thread, struct map *map)
{
int ret;
- ret = unwind__prepare_access(thread->mg, map, NULL);
+ ret = unwind__prepare_access(thread->maps, map, NULL);
if (ret)
return ret;
- map_groups__fixup_overlappings(thread->mg, map, stderr);
- map_groups__insert(thread->mg, map);
+ maps__fixup_overlappings(thread->maps, map, stderr);
+ maps__insert(thread->maps, map);
return 0;
}
@@ -345,13 +345,13 @@ static int __thread__prepare_access(struct thread *thread)
{
bool initialized = false;
int err = 0;
- struct maps *maps = &thread->mg->maps;
+ struct maps *maps = thread->maps;
struct map *map;
down_read(&maps->lock);
maps__for_each_entry(maps, map) {
- err = unwind__prepare_access(thread->mg, map, &initialized);
+ err = unwind__prepare_access(thread->maps, map, &initialized);
if (err || initialized)
break;
}
@@ -371,21 +371,19 @@ static int thread__prepare_access(struct thread *thread)
return err;
}
-static int thread__clone_map_groups(struct thread *thread,
- struct thread *parent,
- bool do_maps_clone)
+static int thread__clone_maps(struct thread *thread, struct thread *parent, bool do_maps_clone)
{
/* This is new thread, we share map groups for process. */
if (thread->pid_ == parent->pid_)
return thread__prepare_access(thread);
- if (thread->mg == parent->mg) {
+ if (thread->maps == parent->maps) {
pr_debug("broken map groups on thread %d/%d parent %d/%d\n",
thread->pid_, thread->tid, parent->pid_, parent->tid);
return 0;
}
/* But this one is new process, copy maps. */
- return do_maps_clone ? map_groups__clone(thread, parent->mg) : 0;
+ return do_maps_clone ? maps__clone(thread, parent->maps) : 0;
}
int thread__fork(struct thread *thread, struct thread *parent, u64 timestamp, bool do_maps_clone)
@@ -401,7 +399,7 @@ int thread__fork(struct thread *thread, struct thread *parent, u64 timestamp, bo
}
thread->ppid = parent->tid;
- return thread__clone_map_groups(thread, parent, do_maps_clone);
+ return thread__clone_maps(thread, parent, do_maps_clone);
}
void thread__find_cpumode_addr_location(struct thread *thread, u64 addr,
diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h
index 51bdb9a7af7f..20b96b5d1f15 100644
--- a/tools/perf/util/thread.h
+++ b/tools/perf/util/thread.h
@@ -25,7 +25,7 @@ struct thread {
struct rb_node rb_node;
struct list_head node;
};
- struct map_groups *mg;
+ struct maps *maps;
pid_t pid_; /* Not all tools update this */
pid_t tid;
pid_t ppid;
@@ -53,7 +53,7 @@ struct namespaces;
struct comm;
struct thread *thread__new(pid_t pid, pid_t tid);
-int thread__init_map_groups(struct thread *thread, struct machine *machine);
+int thread__init_maps(struct thread *thread, struct machine *machine);
void thread__delete(struct thread *thread);
struct thread *thread__get(struct thread *thread);
diff --git a/tools/perf/util/unwind-libdw.c b/tools/perf/util/unwind-libdw.c
index d2a8df01c4a7..7a3dbc259cec 100644
--- a/tools/perf/util/unwind-libdw.c
+++ b/tools/perf/util/unwind-libdw.c
@@ -81,7 +81,7 @@ static int entry(u64 ip, struct unwind_info *ui)
return -1;
e->ip = ip;
- e->ms.mg = al.mg;
+ e->ms.maps = al.maps;
e->ms.map = al.map;
e->ms.sym = al.sym;
@@ -200,7 +200,7 @@ int unwind__get_entries(unwind_entry_cb_t cb, void *arg,
struct unwind_info *ui, ui_buf = {
.sample = data,
.thread = thread,
- .machine = thread->mg->machine,
+ .machine = thread->maps->machine,
.cb = cb,
.arg = arg,
.max_stack = max_stack,
diff --git a/tools/perf/util/unwind-libunwind-local.c b/tools/perf/util/unwind-libunwind-local.c
index 6d53347d6744..b4649f5a0c2f 100644
--- a/tools/perf/util/unwind-libunwind-local.c
+++ b/tools/perf/util/unwind-libunwind-local.c
@@ -578,7 +578,7 @@ static int entry(u64 ip, struct thread *thread,
e.ms.sym = thread__find_symbol(thread, PERF_RECORD_MISC_USER, ip, &al);
e.ip = ip;
e.ms.map = al.map;
- e.ms.mg = al.mg;
+ e.ms.maps = al.maps;
pr_debug("unwind: %s:ip = 0x%" PRIx64 " (0x%" PRIx64 ")\n",
al.sym ? al.sym->name : "''",
@@ -616,26 +616,26 @@ static unw_accessors_t accessors = {
.get_proc_name = get_proc_name,
};
-static int _unwind__prepare_access(struct map_groups *mg)
+static int _unwind__prepare_access(struct maps *maps)
{
- mg->addr_space = unw_create_addr_space(&accessors, 0);
- if (!mg->addr_space) {
+ maps->addr_space = unw_create_addr_space(&accessors, 0);
+ if (!maps->addr_space) {
pr_err("unwind: Can't create unwind address space.\n");
return -ENOMEM;
}
- unw_set_caching_policy(mg->addr_space, UNW_CACHE_GLOBAL);
+ unw_set_caching_policy(maps->addr_space, UNW_CACHE_GLOBAL);
return 0;
}
-static void _unwind__flush_access(struct map_groups *mg)
+static void _unwind__flush_access(struct maps *maps)
{
- unw_flush_cache(mg->addr_space, 0, 0);
+ unw_flush_cache(maps->addr_space, 0, 0);
}
-static void _unwind__finish_access(struct map_groups *mg)
+static void _unwind__finish_access(struct maps *maps)
{
- unw_destroy_addr_space(mg->addr_space);
+ unw_destroy_addr_space(maps->addr_space);
}
static int get_entries(struct unwind_info *ui, unwind_entry_cb_t cb,
@@ -660,7 +660,7 @@ static int get_entries(struct unwind_info *ui, unwind_entry_cb_t cb,
*/
if (max_stack - 1 > 0) {
WARN_ONCE(!ui->thread, "WARNING: ui->thread is NULL");
- addr_space = ui->thread->mg->addr_space;
+ addr_space = ui->thread->maps->addr_space;
if (addr_space == NULL)
return -1;
@@ -709,7 +709,7 @@ static int _unwind__get_entries(unwind_entry_cb_t cb, void *arg,
struct unwind_info ui = {
.sample = data,
.thread = thread,
- .machine = thread->mg->machine,
+ .machine = thread->maps->machine,
};
if (!data->user_regs.regs)
diff --git a/tools/perf/util/unwind-libunwind.c b/tools/perf/util/unwind-libunwind.c
index a24fb57c9b2c..e89a5479b361 100644
--- a/tools/perf/util/unwind-libunwind.c
+++ b/tools/perf/util/unwind-libunwind.c
@@ -12,14 +12,12 @@ struct unwind_libunwind_ops __weak *local_unwind_libunwind_ops;
struct unwind_libunwind_ops __weak *x86_32_unwind_libunwind_ops;
struct unwind_libunwind_ops __weak *arm64_unwind_libunwind_ops;
-static void unwind__register_ops(struct map_groups *mg,
- struct unwind_libunwind_ops *ops)
+static void unwind__register_ops(struct maps *maps, struct unwind_libunwind_ops *ops)
{
- mg->unwind_libunwind_ops = ops;
+ maps->unwind_libunwind_ops = ops;
}
-int unwind__prepare_access(struct map_groups *mg, struct map *map,
- bool *initialized)
+int unwind__prepare_access(struct maps *maps, struct map *map, bool *initialized)
{
const char *arch;
enum dso_type dso_type;
@@ -29,7 +27,7 @@ int unwind__prepare_access(struct map_groups *mg, struct map *map,
if (!dwarf_callchain_users)
return 0;
- if (mg->addr_space) {
+ if (maps->addr_space) {
pr_debug("unwind: thread map already set, dso=%s\n",
map->dso->name);
if (initialized)
@@ -38,14 +36,14 @@ int unwind__prepare_access(struct map_groups *mg, struct map *map,
}
/* env->arch is NULL for live-mode (i.e. perf top) */
- if (!mg->machine->env || !mg->machine->env->arch)
+ if (!maps->machine->env || !maps->machine->env->arch)
goto out_register;
- dso_type = dso__type(map->dso, mg->machine);
+ dso_type = dso__type(map->dso, maps->machine);
if (dso_type == DSO__TYPE_UNKNOWN)
return 0;
- arch = perf_env__arch(mg->machine->env);
+ arch = perf_env__arch(maps->machine->env);
if (!strcmp(arch, "x86")) {
if (dso_type != DSO__TYPE_64BIT)
@@ -60,31 +58,31 @@ int unwind__prepare_access(struct map_groups *mg, struct map *map,
return 0;
}
out_register:
- unwind__register_ops(mg, ops);
+ unwind__register_ops(maps, ops);
- err = mg->unwind_libunwind_ops->prepare_access(mg);
+ err = maps->unwind_libunwind_ops->prepare_access(maps);
if (initialized)
*initialized = err ? false : true;
return err;
}
-void unwind__flush_access(struct map_groups *mg)
+void unwind__flush_access(struct maps *maps)
{
- if (mg->unwind_libunwind_ops)
- mg->unwind_libunwind_ops->flush_access(mg);
+ if (maps->unwind_libunwind_ops)
+ maps->unwind_libunwind_ops->flush_access(maps);
}
-void unwind__finish_access(struct map_groups *mg)
+void unwind__finish_access(struct maps *maps)
{
- if (mg->unwind_libunwind_ops)
- mg->unwind_libunwind_ops->finish_access(mg);
+ if (maps->unwind_libunwind_ops)
+ maps->unwind_libunwind_ops->finish_access(maps);
}
int unwind__get_entries(unwind_entry_cb_t cb, void *arg,
struct thread *thread,
struct perf_sample *data, int max_stack)
{
- if (thread->mg->unwind_libunwind_ops)
- return thread->mg->unwind_libunwind_ops->get_entries(cb, arg, thread, data, max_stack);
+ if (thread->maps->unwind_libunwind_ops)
+ return thread->maps->unwind_libunwind_ops->get_entries(cb, arg, thread, data, max_stack);
return 0;
}
diff --git a/tools/perf/util/unwind.h b/tools/perf/util/unwind.h
index 50337c966979..ab8ad469c8de 100644
--- a/tools/perf/util/unwind.h
+++ b/tools/perf/util/unwind.h
@@ -6,7 +6,7 @@
#include <linux/types.h>
#include "util/map_symbol.h"
-struct map_groups;
+struct maps;
struct perf_sample;
struct thread;
@@ -18,9 +18,9 @@ struct unwind_entry {
typedef int (*unwind_entry_cb_t)(struct unwind_entry *entry, void *arg);
struct unwind_libunwind_ops {
- int (*prepare_access)(struct map_groups *mg);
- void (*flush_access)(struct map_groups *mg);
- void (*finish_access)(struct map_groups *mg);
+ int (*prepare_access)(struct maps *maps);
+ void (*flush_access)(struct maps *maps);
+ void (*finish_access)(struct maps *maps);
int (*get_entries)(unwind_entry_cb_t cb, void *arg,
struct thread *thread,
struct perf_sample *data, int max_stack);
@@ -45,20 +45,19 @@ int unwind__get_entries(unwind_entry_cb_t cb, void *arg,
#endif
int LIBUNWIND__ARCH_REG_ID(int regnum);
-int unwind__prepare_access(struct map_groups *mg, struct map *map,
- bool *initialized);
-void unwind__flush_access(struct map_groups *mg);
-void unwind__finish_access(struct map_groups *mg);
+int unwind__prepare_access(struct maps *maps, struct map *map, bool *initialized);
+void unwind__flush_access(struct maps *maps);
+void unwind__finish_access(struct maps *maps);
#else
-static inline int unwind__prepare_access(struct map_groups *mg __maybe_unused,
+static inline int unwind__prepare_access(struct maps *maps __maybe_unused,
struct map *map __maybe_unused,
bool *initialized __maybe_unused)
{
return 0;
}
-static inline void unwind__flush_access(struct map_groups *mg __maybe_unused) {}
-static inline void unwind__finish_access(struct map_groups *mg __maybe_unused) {}
+static inline void unwind__flush_access(struct maps *maps __maybe_unused) {}
+static inline void unwind__finish_access(struct maps *maps __maybe_unused) {}
#endif
#else
static inline int
@@ -71,14 +70,14 @@ unwind__get_entries(unwind_entry_cb_t cb __maybe_unused,
return 0;
}
-static inline int unwind__prepare_access(struct map_groups *mg __maybe_unused,
+static inline int unwind__prepare_access(struct maps *maps __maybe_unused,
struct map *map __maybe_unused,
bool *initialized __maybe_unused)
{
return 0;
}
-static inline void unwind__flush_access(struct map_groups *mg __maybe_unused) {}
-static inline void unwind__finish_access(struct map_groups *mg __maybe_unused) {}
+static inline void unwind__flush_access(struct maps *maps __maybe_unused) {}
+static inline void unwind__finish_access(struct maps *maps __maybe_unused) {}
#endif /* HAVE_DWARF_UNWIND_SUPPORT */
#endif /* __UNWIND_H */
diff --git a/tools/perf/util/vdso.c b/tools/perf/util/vdso.c
index 6e00793c10ee..3cc91ad048ea 100644
--- a/tools/perf/util/vdso.c
+++ b/tools/perf/util/vdso.c
@@ -144,7 +144,7 @@ static enum dso_type machine__thread_dso_type(struct machine *machine,
enum dso_type dso_type = DSO__TYPE_UNKNOWN;
struct map *map;
- map_groups__for_each_entry(thread->mg, map) {
+ maps__for_each_entry(thread->maps, map) {
struct dso *dso = map->dso;
if (!dso || dso->long_name[0] != '/')
continue;
diff --git a/tools/power/cpupower/ToDo b/tools/power/cpupower/ToDo
index 6e8b89f282e6..b196a139a3e4 100644
--- a/tools/power/cpupower/ToDo
+++ b/tools/power/cpupower/ToDo
@@ -8,3 +8,17 @@ ToDos sorted by priority:
- Add another c1e debug idle monitor
-> Is by design racy with BIOS, but could be added
with a --force option and some "be careful" messages
+- Add cpu_start()/cpu_stop() callbacks for monitor
+ -> This is to move the per_cpu logic from inside the
+ monitor to outside it. This can be given higher
+ priority in fork_it.
+- Fork as many processes as there are CPUs in case the
+ per_cpu_schedule flag is set.
+ -> Bind forked process to each cpu.
+ -> Execute start measures via the forked processes on
+ each cpu.
+ -> Run test executable in a forked process.
+ -> Execute stop measures via the forked processes on
+ each cpu.
+ This would be ideal as it will not introduce noise in the
+ tested executable.
diff --git a/tools/power/cpupower/utils/cpupower-info.c b/tools/power/cpupower/utils/cpupower-info.c
index 4c9d342b70ff..d3755ea70d4d 100644
--- a/tools/power/cpupower/utils/cpupower-info.c
+++ b/tools/power/cpupower/utils/cpupower-info.c
@@ -10,6 +10,7 @@
#include <errno.h>
#include <string.h>
#include <getopt.h>
+#include <sys/utsname.h>
#include "helpers/helpers.h"
#include "helpers/sysfs.h"
@@ -30,6 +31,7 @@ int cmd_info(int argc, char **argv)
extern char *optarg;
extern int optind, opterr, optopt;
unsigned int cpu;
+ struct utsname uts;
union {
struct {
@@ -39,6 +41,13 @@ int cmd_info(int argc, char **argv)
} params = {};
int ret = 0;
+ ret = uname(&uts);
+ if (!ret && (!strcmp(uts.machine, "ppc64le") ||
+ !strcmp(uts.machine, "ppc64"))) {
+ fprintf(stderr, _("Subcommand not supported on POWER.\n"));
+ return ret;
+ }
+
setlocale(LC_ALL, "");
textdomain(PACKAGE);
diff --git a/tools/power/cpupower/utils/cpupower-set.c b/tools/power/cpupower/utils/cpupower-set.c
index 3cd95c6cb974..3cca6f715dd9 100644
--- a/tools/power/cpupower/utils/cpupower-set.c
+++ b/tools/power/cpupower/utils/cpupower-set.c
@@ -10,6 +10,7 @@
#include <errno.h>
#include <string.h>
#include <getopt.h>
+#include <sys/utsname.h>
#include "helpers/helpers.h"
#include "helpers/sysfs.h"
@@ -31,6 +32,7 @@ int cmd_set(int argc, char **argv)
extern char *optarg;
extern int optind, opterr, optopt;
unsigned int cpu;
+ struct utsname uts;
union {
struct {
@@ -41,6 +43,13 @@ int cmd_set(int argc, char **argv)
int perf_bias = 0;
int ret = 0;
+ ret = uname(&uts);
+ if (!ret && (!strcmp(uts.machine, "ppc64le") ||
+ !strcmp(uts.machine, "ppc64"))) {
+ fprintf(stderr, _("Subcommand not supported on POWER.\n"));
+ return ret;
+ }
+
setlocale(LC_ALL, "");
textdomain(PACKAGE);
diff --git a/tools/power/cpupower/utils/helpers/cpuid.c b/tools/power/cpupower/utils/helpers/cpuid.c
index 5cc39d4e23ed..73bfafc60e9b 100644
--- a/tools/power/cpupower/utils/helpers/cpuid.c
+++ b/tools/power/cpupower/utils/helpers/cpuid.c
@@ -131,6 +131,10 @@ out:
if (ext_cpuid_level >= 0x80000007 &&
(cpuid_edx(0x80000007) & (1 << 9)))
cpu_info->caps |= CPUPOWER_CAP_AMD_CBP;
+
+ if (ext_cpuid_level >= 0x80000008 &&
+ cpuid_ebx(0x80000008) & (1 << 4))
+ cpu_info->caps |= CPUPOWER_CAP_AMD_RDPRU;
}
if (cpu_info->vendor == X86_VENDOR_INTEL) {
diff --git a/tools/power/cpupower/utils/helpers/helpers.h b/tools/power/cpupower/utils/helpers/helpers.h
index 357b19bb136e..c258eeccd05f 100644
--- a/tools/power/cpupower/utils/helpers/helpers.h
+++ b/tools/power/cpupower/utils/helpers/helpers.h
@@ -69,6 +69,7 @@ enum cpupower_cpu_vendor {X86_VENDOR_UNKNOWN = 0, X86_VENDOR_INTEL,
#define CPUPOWER_CAP_HAS_TURBO_RATIO 0x00000010
#define CPUPOWER_CAP_IS_SNB 0x00000020
#define CPUPOWER_CAP_INTEL_IDA 0x00000040
+#define CPUPOWER_CAP_AMD_RDPRU 0x00000080
#define CPUPOWER_AMD_CPBDIS 0x02000000
diff --git a/tools/power/cpupower/utils/idle_monitor/amd_fam14h_idle.c b/tools/power/cpupower/utils/idle_monitor/amd_fam14h_idle.c
index 3f893b99b337..33dc34db4f3c 100644
--- a/tools/power/cpupower/utils/idle_monitor/amd_fam14h_idle.c
+++ b/tools/power/cpupower/utils/idle_monitor/amd_fam14h_idle.c
@@ -328,7 +328,7 @@ struct cpuidle_monitor amd_fam14h_monitor = {
.stop = amd_fam14h_stop,
.do_register = amd_fam14h_register,
.unregister = amd_fam14h_unregister,
- .needs_root = 1,
+ .flags.needs_root = 1,
.overflow_s = OVERFLOW_MS / 1000,
};
#endif /* #if defined(__i386__) || defined(__x86_64__) */
diff --git a/tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c b/tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c
index f634aeb65c5f..3c4cee160b0e 100644
--- a/tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c
+++ b/tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c
@@ -207,6 +207,6 @@ struct cpuidle_monitor cpuidle_sysfs_monitor = {
.stop = cpuidle_stop,
.do_register = cpuidle_register,
.unregister = cpuidle_unregister,
- .needs_root = 0,
+ .flags.needs_root = 0,
.overflow_s = UINT_MAX,
};
diff --git a/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c b/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c
index d3c3e6e7aa26..6d44fec55ad5 100644
--- a/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c
+++ b/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c
@@ -408,7 +408,7 @@ int cmd_monitor(int argc, char **argv)
dprint("Try to register: %s\n", all_monitors[num]->name);
test_mon = all_monitors[num]->do_register();
if (test_mon) {
- if (test_mon->needs_root && !run_as_root) {
+ if (test_mon->flags.needs_root && !run_as_root) {
fprintf(stderr, _("Available monitor %s needs "
"root access\n"), test_mon->name);
continue;
diff --git a/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.h b/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.h
index a2d901d3bfaf..5b5eb1da0cce 100644
--- a/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.h
+++ b/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.h
@@ -60,7 +60,10 @@ struct cpuidle_monitor {
struct cpuidle_monitor* (*do_register) (void);
void (*unregister)(void);
unsigned int overflow_s;
- int needs_root;
+ struct {
+ unsigned int needs_root:1;
+ unsigned int per_cpu_schedule:1;
+ } flags;
};
extern long long timespec_diff_us(struct timespec start, struct timespec end);
diff --git a/tools/power/cpupower/utils/idle_monitor/hsw_ext_idle.c b/tools/power/cpupower/utils/idle_monitor/hsw_ext_idle.c
index 7c7451d3f494..97ad3233a521 100644
--- a/tools/power/cpupower/utils/idle_monitor/hsw_ext_idle.c
+++ b/tools/power/cpupower/utils/idle_monitor/hsw_ext_idle.c
@@ -39,7 +39,6 @@ static cstate_t hsw_ext_cstates[HSW_EXT_CSTATE_COUNT] = {
{
.name = "PC9",
.desc = N_("Processor Package C9"),
- .desc = N_("Processor Package C2"),
.id = PC9,
.range = RANGE_PACKAGE,
.get_count_percent = hsw_ext_get_count_percent,
@@ -188,7 +187,7 @@ struct cpuidle_monitor intel_hsw_ext_monitor = {
.stop = hsw_ext_stop,
.do_register = hsw_ext_register,
.unregister = hsw_ext_unregister,
- .needs_root = 1,
+ .flags.needs_root = 1,
.overflow_s = 922000000 /* 922337203 seconds TSC overflow
at 20GHz */
};
diff --git a/tools/power/cpupower/utils/idle_monitor/mperf_monitor.c b/tools/power/cpupower/utils/idle_monitor/mperf_monitor.c
index 44806a6dae11..e7d48cb563c0 100644
--- a/tools/power/cpupower/utils/idle_monitor/mperf_monitor.c
+++ b/tools/power/cpupower/utils/idle_monitor/mperf_monitor.c
@@ -19,6 +19,10 @@
#define MSR_APERF 0xE8
#define MSR_MPERF 0xE7
+#define RDPRU ".byte 0x0f, 0x01, 0xfd"
+#define RDPRU_ECX_MPERF 0
+#define RDPRU_ECX_APERF 1
+
#define MSR_TSC 0x10
#define MSR_AMD_HWCR 0xc0010015
@@ -86,15 +90,51 @@ static int mperf_get_tsc(unsigned long long *tsc)
return ret;
}
+static int get_aperf_mperf(int cpu, unsigned long long *aval,
+ unsigned long long *mval)
+{
+ unsigned long low_a, high_a;
+ unsigned long low_m, high_m;
+ int ret;
+
+ /*
+ * Running on the cpu from which we read the registers will
+ * prevent APERF/MPERF from going out of sync because of IPI
+ * latency introduced by read_msr()s.
+ */
+ if (mperf_monitor.flags.per_cpu_schedule) {
+ if (bind_cpu(cpu))
+ return 1;
+ }
+
+ if (cpupower_cpu_info.caps & CPUPOWER_CAP_AMD_RDPRU) {
+ asm volatile(RDPRU
+ : "=a" (low_a), "=d" (high_a)
+ : "c" (RDPRU_ECX_APERF));
+ asm volatile(RDPRU
+ : "=a" (low_m), "=d" (high_m)
+ : "c" (RDPRU_ECX_MPERF));
+
+ *aval = ((low_a) | (high_a) << 32);
+ *mval = ((low_m) | (high_m) << 32);
+
+ return 0;
+ }
+
+ ret = read_msr(cpu, MSR_APERF, aval);
+ ret |= read_msr(cpu, MSR_MPERF, mval);
+
+ return ret;
+}
+
static int mperf_init_stats(unsigned int cpu)
{
- unsigned long long val;
+ unsigned long long aval, mval;
int ret;
- ret = read_msr(cpu, MSR_APERF, &val);
- aperf_previous_count[cpu] = val;
- ret |= read_msr(cpu, MSR_MPERF, &val);
- mperf_previous_count[cpu] = val;
+ ret = get_aperf_mperf(cpu, &aval, &mval);
+ aperf_previous_count[cpu] = aval;
+ mperf_previous_count[cpu] = mval;
is_valid[cpu] = !ret;
return 0;
@@ -102,13 +142,12 @@ static int mperf_init_stats(unsigned int cpu)
static int mperf_measure_stats(unsigned int cpu)
{
- unsigned long long val;
+ unsigned long long aval, mval;
int ret;
- ret = read_msr(cpu, MSR_APERF, &val);
- aperf_current_count[cpu] = val;
- ret |= read_msr(cpu, MSR_MPERF, &val);
- mperf_current_count[cpu] = val;
+ ret = get_aperf_mperf(cpu, &aval, &mval);
+ aperf_current_count[cpu] = aval;
+ mperf_current_count[cpu] = mval;
is_valid[cpu] = !ret;
return 0;
@@ -305,6 +344,9 @@ struct cpuidle_monitor *mperf_register(void)
if (init_maxfreq_mode())
return NULL;
+ if (cpupower_cpu_info.vendor == X86_VENDOR_AMD)
+ mperf_monitor.flags.per_cpu_schedule = 1;
+
/* Free this at program termination */
is_valid = calloc(cpu_count, sizeof(int));
mperf_previous_count = calloc(cpu_count, sizeof(unsigned long long));
@@ -333,7 +375,7 @@ struct cpuidle_monitor mperf_monitor = {
.stop = mperf_stop,
.do_register = mperf_register,
.unregister = mperf_unregister,
- .needs_root = 1,
+ .flags.needs_root = 1,
.overflow_s = 922000000 /* 922337203 seconds TSC overflow
at 20GHz */
};
diff --git a/tools/power/cpupower/utils/idle_monitor/nhm_idle.c b/tools/power/cpupower/utils/idle_monitor/nhm_idle.c
index be7256696a37..114271165182 100644
--- a/tools/power/cpupower/utils/idle_monitor/nhm_idle.c
+++ b/tools/power/cpupower/utils/idle_monitor/nhm_idle.c
@@ -208,7 +208,7 @@ struct cpuidle_monitor intel_nhm_monitor = {
.stop = nhm_stop,
.do_register = intel_nhm_register,
.unregister = intel_nhm_unregister,
- .needs_root = 1,
+ .flags.needs_root = 1,
.overflow_s = 922000000 /* 922337203 seconds TSC overflow
at 20GHz */
};
diff --git a/tools/power/cpupower/utils/idle_monitor/snb_idle.c b/tools/power/cpupower/utils/idle_monitor/snb_idle.c
index 968333571cad..df8b223cc096 100644
--- a/tools/power/cpupower/utils/idle_monitor/snb_idle.c
+++ b/tools/power/cpupower/utils/idle_monitor/snb_idle.c
@@ -192,7 +192,7 @@ struct cpuidle_monitor intel_snb_monitor = {
.stop = snb_stop,
.do_register = snb_register,
.unregister = snb_unregister,
- .needs_root = 1,
+ .flags.needs_root = 1,
.overflow_s = 922000000 /* 922337203 seconds TSC overflow
at 20GHz */
};
diff --git a/tools/power/x86/intel-speed-select/isst-config.c b/tools/power/x86/intel-speed-select/isst-config.c
index 2a9890c8395a..944183f9ed5a 100644
--- a/tools/power/x86/intel-speed-select/isst-config.c
+++ b/tools/power/x86/intel-speed-select/isst-config.c
@@ -11,10 +11,11 @@
struct process_cmd_struct {
char *feature;
char *command;
- void (*process_fn)(void);
+ void (*process_fn)(int arg);
+ int arg;
};
-static const char *version_str = "v1.0";
+static const char *version_str = "v1.1";
static const int supported_api_ver = 1;
static struct isst_if_platform_info isst_platform_info;
static char *progname;
@@ -22,6 +23,7 @@ static int debug_flag;
static FILE *outf;
static int cpu_model;
+static int cpu_stepping;
#define MAX_CPUS_IN_ONE_REQ 64
static short max_target_cpus;
@@ -39,6 +41,7 @@ static unsigned long long fact_trl;
static int out_format_json;
static int cmd_help;
static int force_online_offline;
+static int auto_mode;
/* clos related */
static int current_clos = -1;
@@ -70,7 +73,16 @@ void debug_printf(const char *format, ...)
va_end(args);
}
-static void update_cpu_model(void)
+
+int is_clx_n_platform(void)
+{
+ if (cpu_model == 0x55)
+ if (cpu_stepping == 0x6 || cpu_stepping == 0x7)
+ return 1;
+ return 0;
+}
+
+static int update_cpu_model(void)
{
unsigned int ebx, ecx, edx;
unsigned int fms, family;
@@ -80,6 +92,33 @@ static void update_cpu_model(void)
cpu_model = (fms >> 4) & 0xf;
if (family == 6 || family == 0xf)
cpu_model += ((fms >> 16) & 0xf) << 4;
+
+ cpu_stepping = fms & 0xf;
+ /* only three CascadeLake-N models are supported */
+ if (is_clx_n_platform()) {
+ FILE *fp;
+ size_t n = 0;
+ char *line = NULL;
+ int ret = 1;
+
+ fp = fopen("/proc/cpuinfo", "r");
+ if (!fp)
+ err(-1, "cannot open /proc/cpuinfo\n");
+
+ while (getline(&line, &n, fp) > 0) {
+ if (strstr(line, "model name")) {
+ if (strstr(line, "6252N") ||
+ strstr(line, "6230N") ||
+ strstr(line, "5218N"))
+ ret = 0;
+ break;
+ }
+ }
+ free(line);
+ fclose(fp);
+ return ret;
+ }
+ return 0;
}
/* Open a file, and exit on failure */
@@ -161,6 +200,11 @@ int get_physical_die_id(int cpu)
return ret;
}
+int get_cpufreq_base_freq(int cpu)
+{
+ return parse_int_file(0, "/sys/devices/system/cpu/cpu%d/cpufreq/base_frequency", cpu);
+}
+
int get_topo_max_cpus(void)
{
return topo_max_cpus;
@@ -169,7 +213,7 @@ int get_topo_max_cpus(void)
static void set_cpu_online_offline(int cpu, int state)
{
char buffer[128];
- int fd;
+ int fd, ret;
snprintf(buffer, sizeof(buffer),
"/sys/devices/system/cpu/cpu%d/online", cpu);
@@ -179,9 +223,12 @@ static void set_cpu_online_offline(int cpu, int state)
err(-1, "%s open failed", buffer);
if (state)
- write(fd, "1\n", 2);
+ ret = write(fd, "1\n", 2);
else
- write(fd, "0\n", 2);
+ ret = write(fd, "0\n", 2);
+
+ if (ret == -1)
+ perror("Online/Offline: Operation failed\n");
close(fd);
}
@@ -291,6 +338,7 @@ void free_cpu_set(cpu_set_t *cpu_set)
}
static int cpu_cnt[MAX_PACKAGE_COUNT][MAX_DIE_PER_PACKAGE];
+static long long core_mask[MAX_PACKAGE_COUNT][MAX_DIE_PER_PACKAGE];
static void set_cpu_present_cpu_mask(void)
{
size_t size;
@@ -315,13 +363,33 @@ static void set_cpu_present_cpu_mask(void)
pkg_id = get_physical_package_id(i);
if (pkg_id < MAX_PACKAGE_COUNT &&
- die_id < MAX_DIE_PER_PACKAGE)
+ die_id < MAX_DIE_PER_PACKAGE) {
+ int core_id = get_physical_core_id(i);
+
cpu_cnt[pkg_id][die_id]++;
+ core_mask[pkg_id][die_id] |= (1ULL << core_id);
+ }
}
closedir(dir);
}
}
+int get_core_count(int pkg_id, int die_id)
+{
+ int cnt = 0;
+
+ if (pkg_id < MAX_PACKAGE_COUNT && die_id < MAX_DIE_PER_PACKAGE) {
+ int i;
+
+ for (i = 0; i < sizeof(long long) * 8; ++i) {
+ if (core_mask[pkg_id][die_id] & (1ULL << i))
+ cnt++;
+ }
+ }
+
+ return cnt;
+}
+
int get_cpu_count(int pkg_id, int die_id)
{
if (pkg_id < MAX_PACKAGE_COUNT && die_id < MAX_DIE_PER_PACKAGE)
@@ -532,12 +600,6 @@ int isst_send_mbox_command(unsigned int cpu, unsigned char command,
if (!ret && !write)
*resp = value;
break;
- case CLOS_PM_QOS_CONFIG:
- ret = isst_send_mmio_command(cpu, PM_QOS_CONFIG_OFFSET,
- write, &value);
- if (!ret && !write)
- *resp = value;
- break;
case CLOS_STATUS:
break;
default:
@@ -562,6 +624,7 @@ int isst_send_mbox_command(unsigned int cpu, unsigned char command,
fprintf(outf,
"Error: mbox_cmd cpu:%d command:%x sub_command:%x parameter:%x req_data:%x\n",
cpu, command, sub_command, parameter, req_data);
+ return -1;
} else {
*resp = mbox_cmds.mbox_cmd[0].resp_data;
debug_printf(
@@ -678,7 +741,7 @@ static void exec_on_get_ctdp_cpu(int cpu, void *arg1, void *arg2, void *arg3,
}
#define _get_tdp_level(desc, suffix, object, help) \
- static void get_tdp_##object(void) \
+ static void get_tdp_##object(int arg) \
{ \
struct isst_pkg_ctdp ctdp; \
\
@@ -708,6 +771,152 @@ _get_tdp_level("get-config-current_level", levels, current_level,
"Current TDP Level");
_get_tdp_level("get-lock-status", levels, locked, "TDP lock status");
+struct isst_pkg_ctdp clx_n_pkg_dev;
+
+static int clx_n_get_base_ratio(void)
+{
+ FILE *fp;
+ char *begin, *end, *line = NULL;
+ char number[5];
+ float value = 0;
+ size_t n = 0;
+
+ fp = fopen("/proc/cpuinfo", "r");
+ if (!fp)
+ err(-1, "cannot open /proc/cpuinfo\n");
+
+ while (getline(&line, &n, fp) > 0) {
+ if (strstr(line, "model name")) {
+ /* this is true for CascadeLake-N */
+ begin = strstr(line, "@ ") + 2;
+ end = strstr(line, "GHz");
+ strncpy(number, begin, end - begin);
+ value = atof(number) * 10;
+ break;
+ }
+ }
+ free(line);
+ fclose(fp);
+
+ return (int)(value);
+}
+
+static int clx_n_config(int cpu)
+{
+ int i, ret, pkg_id, die_id;
+ unsigned long cpu_bf;
+ struct isst_pkg_ctdp_level_info *ctdp_level;
+ struct isst_pbf_info *pbf_info;
+
+ ctdp_level = &clx_n_pkg_dev.ctdp_level[0];
+ pbf_info = &ctdp_level->pbf_info;
+ ctdp_level->core_cpumask_size =
+ alloc_cpu_set(&ctdp_level->core_cpumask);
+
+ /* find the frequency base ratio */
+ ctdp_level->tdp_ratio = clx_n_get_base_ratio();
+ if (ctdp_level->tdp_ratio == 0) {
+ debug_printf("CLX: cn base ratio is zero\n");
+ ret = -1;
+ goto error_ret;
+ }
+
+ /* find the high and low priority frequencies */
+ pbf_info->p1_high = 0;
+ pbf_info->p1_low = ~0;
+
+ pkg_id = get_physical_package_id(cpu);
+ die_id = get_physical_die_id(cpu);
+
+ for (i = 0; i < topo_max_cpus; i++) {
+ if (!CPU_ISSET_S(i, present_cpumask_size, present_cpumask))
+ continue;
+
+ if (pkg_id != get_physical_package_id(i) ||
+ die_id != get_physical_die_id(i))
+ continue;
+
+ CPU_SET_S(i, ctdp_level->core_cpumask_size,
+ ctdp_level->core_cpumask);
+
+ cpu_bf = parse_int_file(1,
+ "/sys/devices/system/cpu/cpu%d/cpufreq/base_frequency",
+ i);
+ if (cpu_bf > pbf_info->p1_high)
+ pbf_info->p1_high = cpu_bf;
+ if (cpu_bf < pbf_info->p1_low)
+ pbf_info->p1_low = cpu_bf;
+ }
+
+ if (pbf_info->p1_high == ~0UL) {
+ debug_printf("CLX: maximum base frequency not set\n");
+ ret = -1;
+ goto error_ret;
+ }
+
+ if (pbf_info->p1_low == 0) {
+ debug_printf("CLX: minimum base frequency not set\n");
+ ret = -1;
+ goto error_ret;
+ }
+
+ /* convert frequencies back to ratios */
+ pbf_info->p1_high = pbf_info->p1_high / 100000;
+ pbf_info->p1_low = pbf_info->p1_low / 100000;
+
+ /* create high priority cpu mask */
+ pbf_info->core_cpumask_size = alloc_cpu_set(&pbf_info->core_cpumask);
+ for (i = 0; i < topo_max_cpus; i++) {
+ if (!CPU_ISSET_S(i, present_cpumask_size, present_cpumask))
+ continue;
+
+ if (pkg_id != get_physical_package_id(i) ||
+ die_id != get_physical_die_id(i))
+ continue;
+
+ cpu_bf = parse_int_file(1,
+ "/sys/devices/system/cpu/cpu%d/cpufreq/base_frequency",
+ i);
+ cpu_bf = cpu_bf / 100000;
+ if (cpu_bf == pbf_info->p1_high)
+ CPU_SET_S(i, pbf_info->core_cpumask_size,
+ pbf_info->core_cpumask);
+ }
+
+ /* extra ctdp & pbf struct parameters */
+ ctdp_level->processed = 1;
+ ctdp_level->pbf_support = 1; /* PBF is always supported and enabled */
+ ctdp_level->pbf_enabled = 1;
+ ctdp_level->fact_support = 0; /* FACT is never supported */
+ ctdp_level->fact_enabled = 0;
+
+ return 0;
+
+error_ret:
+ free_cpu_set(ctdp_level->core_cpumask);
+ return ret;
+}
+
+static void dump_clx_n_config_for_cpu(int cpu, void *arg1, void *arg2,
+ void *arg3, void *arg4)
+{
+ int ret;
+
+ ret = clx_n_config(cpu);
+ if (ret) {
+ perror("isst_get_process_ctdp");
+ } else {
+ struct isst_pkg_ctdp_level_info *ctdp_level;
+ struct isst_pbf_info *pbf_info;
+
+ ctdp_level = &clx_n_pkg_dev.ctdp_level[0];
+ pbf_info = &ctdp_level->pbf_info;
+ isst_ctdp_display_information(cpu, outf, tdp_level, &clx_n_pkg_dev);
+ free_cpu_set(ctdp_level->core_cpumask);
+ free_cpu_set(pbf_info->core_cpumask);
+ }
+}
+
static void dump_isst_config_for_cpu(int cpu, void *arg1, void *arg2,
void *arg3, void *arg4)
{
@@ -724,8 +933,10 @@ static void dump_isst_config_for_cpu(int cpu, void *arg1, void *arg2,
}
}
-static void dump_isst_config(void)
+static void dump_isst_config(int arg)
{
+ void *fn;
+
if (cmd_help) {
fprintf(stderr,
"Print Intel(R) Speed Select Technology Performance profile configuration\n");
@@ -737,14 +948,17 @@ static void dump_isst_config(void)
exit(0);
}
+ if (!is_clx_n_platform())
+ fn = dump_isst_config_for_cpu;
+ else
+ fn = dump_clx_n_config_for_cpu;
+
isst_ctdp_display_information_start(outf);
if (max_target_cpus)
- for_each_online_target_cpu_in_set(dump_isst_config_for_cpu,
- NULL, NULL, NULL, NULL);
+ for_each_online_target_cpu_in_set(fn, NULL, NULL, NULL, NULL);
else
- for_each_online_package_in_set(dump_isst_config_for_cpu, NULL,
- NULL, NULL, NULL);
+ for_each_online_package_in_set(fn, NULL, NULL, NULL, NULL);
isst_ctdp_display_information_end(outf);
}
@@ -787,7 +1001,7 @@ static void set_tdp_level_for_cpu(int cpu, void *arg1, void *arg2, void *arg3,
}
}
-static void set_tdp_level(void)
+static void set_tdp_level(int arg)
{
if (cmd_help) {
fprintf(stderr, "Set Config TDP level\n");
@@ -812,6 +1026,26 @@ static void set_tdp_level(void)
isst_ctdp_display_information_end(outf);
}
+static void clx_n_dump_pbf_config_for_cpu(int cpu, void *arg1, void *arg2,
+ void *arg3, void *arg4)
+{
+ int ret;
+
+ ret = clx_n_config(cpu);
+ if (ret) {
+ perror("isst_get_process_ctdp");
+ } else {
+ struct isst_pkg_ctdp_level_info *ctdp_level;
+ struct isst_pbf_info *pbf_info;
+
+ ctdp_level = &clx_n_pkg_dev.ctdp_level[0];
+ pbf_info = &ctdp_level->pbf_info;
+ isst_pbf_display_information(cpu, outf, tdp_level, pbf_info);
+ free_cpu_set(ctdp_level->core_cpumask);
+ free_cpu_set(pbf_info->core_cpumask);
+ }
+}
+
static void dump_pbf_config_for_cpu(int cpu, void *arg1, void *arg2, void *arg3,
void *arg4)
{
@@ -827,8 +1061,10 @@ static void dump_pbf_config_for_cpu(int cpu, void *arg1, void *arg2, void *arg3,
}
}
-static void dump_pbf_config(void)
+static void dump_pbf_config(int arg)
{
+ void *fn;
+
if (cmd_help) {
fprintf(stderr,
"Print Intel(R) Speed Select Technology base frequency configuration for a TDP level\n");
@@ -842,72 +1078,372 @@ static void dump_pbf_config(void)
exit(1);
}
+ if (!is_clx_n_platform())
+ fn = dump_pbf_config_for_cpu;
+ else
+ fn = clx_n_dump_pbf_config_for_cpu;
+
isst_ctdp_display_information_start(outf);
+
if (max_target_cpus)
- for_each_online_target_cpu_in_set(dump_pbf_config_for_cpu, NULL,
- NULL, NULL, NULL);
+ for_each_online_target_cpu_in_set(fn, NULL, NULL, NULL, NULL);
else
- for_each_online_package_in_set(dump_pbf_config_for_cpu, NULL,
- NULL, NULL, NULL);
+ for_each_online_package_in_set(fn, NULL, NULL, NULL, NULL);
+
isst_ctdp_display_information_end(outf);
}
-static void set_pbf_for_cpu(int cpu, void *arg1, void *arg2, void *arg3,
- void *arg4)
+static int set_clos_param(int cpu, int clos, int epp, int wt, int min, int max)
{
+ struct isst_clos_config clos_config;
int ret;
- int status = *(int *)arg4;
- ret = isst_set_pbf_fact_status(cpu, 1, status);
+ ret = isst_pm_get_clos(cpu, clos, &clos_config);
if (ret) {
- perror("isst_set_pbf");
- } else {
- if (status)
- isst_display_result(cpu, outf, "base-freq", "enable",
- ret);
+ perror("isst_pm_get_clos");
+ return ret;
+ }
+ clos_config.clos_min = min;
+ clos_config.clos_max = max;
+ clos_config.epp = epp;
+ clos_config.clos_prop_prio = wt;
+ ret = isst_set_clos(cpu, clos, &clos_config);
+ if (ret) {
+ perror("isst_pm_set_clos");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int set_cpufreq_scaling_min_max(int cpu, int max, int freq)
+{
+ char buffer[128], freq_str[16];
+ int fd, ret, len;
+
+ if (max)
+ snprintf(buffer, sizeof(buffer),
+ "/sys/devices/system/cpu/cpu%d/cpufreq/scaling_max_freq", cpu);
+ else
+ snprintf(buffer, sizeof(buffer),
+ "/sys/devices/system/cpu/cpu%d/cpufreq/scaling_min_freq", cpu);
+
+ fd = open(buffer, O_WRONLY);
+ if (fd < 0)
+ return fd;
+
+ snprintf(freq_str, sizeof(freq_str), "%d", freq);
+ len = strlen(freq_str);
+ ret = write(fd, freq_str, len);
+ if (ret == -1) {
+ close(fd);
+ return ret;
+ }
+ close(fd);
+
+ return 0;
+}
+
+static int set_clx_pbf_cpufreq_scaling_min_max(int cpu)
+{
+ struct isst_pkg_ctdp_level_info *ctdp_level;
+ struct isst_pbf_info *pbf_info;
+ int i, pkg_id, die_id, freq, freq_high, freq_low;
+ int ret;
+
+ ret = clx_n_config(cpu);
+ if (ret) {
+ perror("set_clx_pbf_cpufreq_scaling_min_max");
+ return ret;
+ }
+
+ ctdp_level = &clx_n_pkg_dev.ctdp_level[0];
+ pbf_info = &ctdp_level->pbf_info;
+ freq_high = pbf_info->p1_high * 100000;
+ freq_low = pbf_info->p1_low * 100000;
+
+ pkg_id = get_physical_package_id(cpu);
+ die_id = get_physical_die_id(cpu);
+ for (i = 0; i < get_topo_max_cpus(); ++i) {
+ if (pkg_id != get_physical_package_id(i) ||
+ die_id != get_physical_die_id(i))
+ continue;
+
+ if (CPU_ISSET_S(i, pbf_info->core_cpumask_size,
+ pbf_info->core_cpumask))
+ freq = freq_high;
else
- isst_display_result(cpu, outf, "base-freq", "disable",
- ret);
+ freq = freq_low;
+
+ set_cpufreq_scaling_min_max(i, 1, freq);
+ set_cpufreq_scaling_min_max(i, 0, freq);
}
+
+ return 0;
}
-static void set_pbf_enable(void)
+static int set_cpufreq_scaling_min_max_from_cpuinfo(int cpu, int cpuinfo_max, int scaling_max)
{
- int status = 1;
+ char buffer[128], min_freq[16];
+ int fd, ret, len;
- if (cmd_help) {
- fprintf(stderr,
- "Enable Intel Speed Select Technology base frequency feature [No command arguments are required]\n");
- exit(0);
+ if (!CPU_ISSET_S(cpu, present_cpumask_size, present_cpumask))
+ return -1;
+
+ if (cpuinfo_max)
+ snprintf(buffer, sizeof(buffer),
+ "/sys/devices/system/cpu/cpu%d/cpufreq/cpuinfo_max_freq", cpu);
+ else
+ snprintf(buffer, sizeof(buffer),
+ "/sys/devices/system/cpu/cpu%d/cpufreq/cpuinfo_min_freq", cpu);
+
+ fd = open(buffer, O_RDONLY);
+ if (fd < 0)
+ return fd;
+
+ len = read(fd, min_freq, sizeof(min_freq));
+ close(fd);
+
+ if (len < 0)
+ return len;
+
+ if (scaling_max)
+ snprintf(buffer, sizeof(buffer),
+ "/sys/devices/system/cpu/cpu%d/cpufreq/scaling_max_freq", cpu);
+ else
+ snprintf(buffer, sizeof(buffer),
+ "/sys/devices/system/cpu/cpu%d/cpufreq/scaling_min_freq", cpu);
+
+ fd = open(buffer, O_WRONLY);
+ if (fd < 0)
+ return fd;
+
+ len = strlen(min_freq);
+ ret = write(fd, min_freq, len);
+ if (ret == -1) {
+ close(fd);
+ return ret;
}
+ close(fd);
- isst_ctdp_display_information_start(outf);
- if (max_target_cpus)
- for_each_online_target_cpu_in_set(set_pbf_for_cpu, NULL, NULL,
- NULL, &status);
+ return 0;
+}
+
+static void set_scaling_min_to_cpuinfo_max(int cpu)
+{
+ int i, pkg_id, die_id;
+
+ pkg_id = get_physical_package_id(cpu);
+ die_id = get_physical_die_id(cpu);
+ for (i = 0; i < get_topo_max_cpus(); ++i) {
+ if (pkg_id != get_physical_package_id(i) ||
+ die_id != get_physical_die_id(i))
+ continue;
+
+ set_cpufreq_scaling_min_max_from_cpuinfo(i, 1, 0);
+ }
+}
+
+static void set_scaling_min_to_cpuinfo_min(int cpu)
+{
+ int i, pkg_id, die_id;
+
+ pkg_id = get_physical_package_id(cpu);
+ die_id = get_physical_die_id(cpu);
+ for (i = 0; i < get_topo_max_cpus(); ++i) {
+ if (pkg_id != get_physical_package_id(i) ||
+ die_id != get_physical_die_id(i))
+ continue;
+
+ set_cpufreq_scaling_min_max_from_cpuinfo(i, 0, 0);
+ }
+}
+
+static void set_scaling_max_to_cpuinfo_max(int cpu)
+{
+ int i, pkg_id, die_id;
+
+ pkg_id = get_physical_package_id(cpu);
+ die_id = get_physical_die_id(cpu);
+ for (i = 0; i < get_topo_max_cpus(); ++i) {
+ if (pkg_id != get_physical_package_id(i) ||
+ die_id != get_physical_die_id(i))
+ continue;
+
+ set_cpufreq_scaling_min_max_from_cpuinfo(i, 1, 1);
+ }
+}
+
+static int set_core_priority_and_min(int cpu, int mask_size,
+ cpu_set_t *cpu_mask, int min_high,
+ int min_low)
+{
+ int pkg_id, die_id, ret, i;
+
+ if (!CPU_COUNT_S(mask_size, cpu_mask))
+ return -1;
+
+ ret = set_clos_param(cpu, 0, 0, 0, min_high, 0xff);
+ if (ret)
+ return ret;
+
+ ret = set_clos_param(cpu, 1, 15, 15, min_low, 0xff);
+ if (ret)
+ return ret;
+
+ ret = set_clos_param(cpu, 2, 15, 15, min_low, 0xff);
+ if (ret)
+ return ret;
+
+ ret = set_clos_param(cpu, 3, 15, 15, min_low, 0xff);
+ if (ret)
+ return ret;
+
+ pkg_id = get_physical_package_id(cpu);
+ die_id = get_physical_die_id(cpu);
+ for (i = 0; i < get_topo_max_cpus(); ++i) {
+ int clos;
+
+ if (pkg_id != get_physical_package_id(i) ||
+ die_id != get_physical_die_id(i))
+ continue;
+
+ if (CPU_ISSET_S(i, mask_size, cpu_mask))
+ clos = 0;
+ else
+ clos = 3;
+
+ debug_printf("Associate cpu: %d clos: %d\n", i, clos);
+ ret = isst_clos_associate(i, clos);
+ if (ret) {
+ perror("isst_clos_associate");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int set_pbf_core_power(int cpu)
+{
+ struct isst_pbf_info pbf_info;
+ struct isst_pkg_ctdp pkg_dev;
+ int ret;
+
+ ret = isst_get_ctdp_levels(cpu, &pkg_dev);
+ if (ret) {
+ perror("isst_get_ctdp_levels");
+ return ret;
+ }
+ debug_printf("Current_level: %d\n", pkg_dev.current_level);
+
+ ret = isst_get_pbf_info(cpu, pkg_dev.current_level, &pbf_info);
+ if (ret) {
+ perror("isst_get_pbf_info");
+ return ret;
+ }
+ debug_printf("p1_high: %d p1_low: %d\n", pbf_info.p1_high,
+ pbf_info.p1_low);
+
+ ret = set_core_priority_and_min(cpu, pbf_info.core_cpumask_size,
+ pbf_info.core_cpumask,
+ pbf_info.p1_high, pbf_info.p1_low);
+ if (ret) {
+ perror("set_core_priority_and_min");
+ return ret;
+ }
+
+ ret = isst_pm_qos_config(cpu, 1, 1);
+ if (ret) {
+ perror("isst_pm_qos_config");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void set_pbf_for_cpu(int cpu, void *arg1, void *arg2, void *arg3,
+ void *arg4)
+{
+ int ret;
+ int status = *(int *)arg4;
+
+ if (is_clx_n_platform()) {
+ if (status) {
+ ret = 0;
+ if (auto_mode)
+ set_clx_pbf_cpufreq_scaling_min_max(cpu);
+
+ } else {
+ ret = -1;
+ if (auto_mode) {
+ set_scaling_max_to_cpuinfo_max(cpu);
+ set_scaling_min_to_cpuinfo_min(cpu);
+ }
+ }
+ goto disp_result;
+ }
+
+ if (auto_mode) {
+ if (status) {
+ ret = set_pbf_core_power(cpu);
+ if (ret)
+ goto disp_result;
+ } else {
+ isst_pm_qos_config(cpu, 0, 0);
+ }
+ }
+
+ ret = isst_set_pbf_fact_status(cpu, 1, status);
+ if (ret) {
+ perror("isst_set_pbf");
+ if (auto_mode)
+ isst_pm_qos_config(cpu, 0, 0);
+ } else {
+ if (auto_mode) {
+ if (status)
+ set_scaling_min_to_cpuinfo_max(cpu);
+ else
+ set_scaling_min_to_cpuinfo_min(cpu);
+ }
+ }
+
+disp_result:
+ if (status)
+ isst_display_result(cpu, outf, "base-freq", "enable",
+ ret);
else
- for_each_online_package_in_set(set_pbf_for_cpu, NULL, NULL,
- NULL, &status);
- isst_ctdp_display_information_end(outf);
+ isst_display_result(cpu, outf, "base-freq", "disable",
+ ret);
}
-static void set_pbf_disable(void)
+static void set_pbf_enable(int arg)
{
- int status = 0;
+ int enable = arg;
if (cmd_help) {
- fprintf(stderr,
- "Disable Intel Speed Select Technology base frequency feature [No command arguments are required]\n");
+ if (enable) {
+ fprintf(stderr,
+ "Enable Intel Speed Select Technology base frequency feature\n");
+ fprintf(stderr,
+ "\tOptional Arguments: -a|--auto : Use priority of cores to set core-power associations\n");
+ } else {
+
+ fprintf(stderr,
+ "Disable Intel Speed Select Technology base frequency feature\n");
+ fprintf(stderr,
+ "\tOptional Arguments: -a|--auto : Also disable core-power associations\n");
+ }
exit(0);
}
isst_ctdp_display_information_start(outf);
if (max_target_cpus)
for_each_online_target_cpu_in_set(set_pbf_for_cpu, NULL, NULL,
- NULL, &status);
+ NULL, &enable);
else
for_each_online_package_in_set(set_pbf_for_cpu, NULL, NULL,
- NULL, &status);
+ NULL, &enable);
isst_ctdp_display_information_end(outf);
}
@@ -925,7 +1461,7 @@ static void dump_fact_config_for_cpu(int cpu, void *arg1, void *arg2,
fact_avx, &fact_info);
}
-static void dump_fact_config(void)
+static void dump_fact_config(int arg)
{
if (cmd_help) {
fprintf(stderr,
@@ -960,73 +1496,156 @@ static void set_fact_for_cpu(int cpu, void *arg1, void *arg2, void *arg3,
int ret;
int status = *(int *)arg4;
+ if (auto_mode) {
+ if (status) {
+ ret = isst_pm_qos_config(cpu, 1, 1);
+ if (ret)
+ goto disp_results;
+ } else {
+ isst_pm_qos_config(cpu, 0, 0);
+ }
+ }
+
ret = isst_set_pbf_fact_status(cpu, 0, status);
- if (ret)
+ if (ret) {
perror("isst_set_fact");
- else {
- if (status) {
- struct isst_pkg_ctdp pkg_dev;
+ if (auto_mode)
+ isst_pm_qos_config(cpu, 0, 0);
- ret = isst_get_ctdp_levels(cpu, &pkg_dev);
- if (ret) {
- isst_display_result(cpu, outf, "turbo-freq",
- "enable", ret);
- return;
- }
+ goto disp_results;
+ }
+
+ /* Set TRL */
+ if (status) {
+ struct isst_pkg_ctdp pkg_dev;
+
+ ret = isst_get_ctdp_levels(cpu, &pkg_dev);
+ if (!ret)
ret = isst_set_trl(cpu, fact_trl);
- isst_display_result(cpu, outf, "turbo-freq", "enable",
- ret);
- } else {
- /* Since we modified TRL during Fact enable, restore it */
- isst_set_trl_from_current_tdp(cpu, fact_trl);
- isst_display_result(cpu, outf, "turbo-freq", "disable",
- ret);
- }
+ if (ret && auto_mode)
+ isst_pm_qos_config(cpu, 0, 0);
+ }
+
+disp_results:
+ if (status) {
+ isst_display_result(cpu, outf, "turbo-freq", "enable", ret);
+ } else {
+ /* Since we modified TRL during Fact enable, restore it */
+ isst_set_trl_from_current_tdp(cpu, fact_trl);
+ isst_display_result(cpu, outf, "turbo-freq", "disable", ret);
}
}
-static void set_fact_enable(void)
+static void set_fact_enable(int arg)
{
- int status = 1;
+ int i, ret, enable = arg;
if (cmd_help) {
- fprintf(stderr,
- "Enable Intel Speed Select Technology Turbo frequency feature\n");
- fprintf(stderr,
- "Optional: -t|--trl : Specify turbo ratio limit\n");
+ if (enable) {
+ fprintf(stderr,
+ "Enable Intel Speed Select Technology Turbo frequency feature\n");
+ fprintf(stderr,
+ "Optional: -t|--trl : Specify turbo ratio limit\n");
+ fprintf(stderr,
+ "\tOptional Arguments: -a|--auto : Designate specified target CPUs with");
+ fprintf(stderr,
+ "-C|--cpu option as as high priority using core-power feature\n");
+ } else {
+ fprintf(stderr,
+ "Disable Intel Speed Select Technology turbo frequency feature\n");
+ fprintf(stderr,
+ "Optional: -t|--trl : Specify turbo ratio limit\n");
+ fprintf(stderr,
+ "\tOptional Arguments: -a|--auto : Also disable core-power associations\n");
+ }
exit(0);
}
isst_ctdp_display_information_start(outf);
if (max_target_cpus)
for_each_online_target_cpu_in_set(set_fact_for_cpu, NULL, NULL,
- NULL, &status);
+ NULL, &enable);
else
for_each_online_package_in_set(set_fact_for_cpu, NULL, NULL,
- NULL, &status);
+ NULL, &enable);
isst_ctdp_display_information_end(outf);
-}
-static void set_fact_disable(void)
-{
- int status = 0;
+ if (enable && auto_mode) {
+ /*
+ * When we adjust CLOS param, we have to set for siblings also.
+ * So for the each user specified CPU, also add the sibling
+ * in the present_cpu_mask.
+ */
+ for (i = 0; i < get_topo_max_cpus(); ++i) {
+ char buffer[128], sibling_list[128], *cpu_str;
+ int fd, len;
- if (cmd_help) {
- fprintf(stderr,
- "Disable Intel Speed Select Technology turbo frequency feature\n");
- fprintf(stderr,
- "Optional: -t|--trl : Specify turbo ratio limit\n");
- exit(0);
+ if (!CPU_ISSET_S(i, target_cpumask_size, target_cpumask))
+ continue;
+
+ snprintf(buffer, sizeof(buffer),
+ "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list", i);
+
+ fd = open(buffer, O_RDONLY);
+ if (fd < 0)
+ continue;
+
+ len = read(fd, sibling_list, sizeof(sibling_list));
+ close(fd);
+
+ if (len < 0)
+ continue;
+
+ cpu_str = strtok(sibling_list, ",");
+ while (cpu_str != NULL) {
+ int cpu;
+
+ sscanf(cpu_str, "%d", &cpu);
+ CPU_SET_S(cpu, target_cpumask_size, target_cpumask);
+ cpu_str = strtok(NULL, ",");
+ }
+ }
+
+ for (i = 0; i < get_topo_max_cpus(); ++i) {
+ int clos;
+
+ if (!CPU_ISSET_S(i, present_cpumask_size, present_cpumask))
+ continue;
+
+ ret = set_clos_param(i, 0, 0, 0, 0, 0xff);
+ if (ret)
+ goto error_disp;
+
+ ret = set_clos_param(i, 1, 15, 15, 0, 0xff);
+ if (ret)
+ goto error_disp;
+
+ ret = set_clos_param(i, 2, 15, 15, 0, 0xff);
+ if (ret)
+ goto error_disp;
+
+ ret = set_clos_param(i, 3, 15, 15, 0, 0xff);
+ if (ret)
+ goto error_disp;
+
+ if (CPU_ISSET_S(i, target_cpumask_size, target_cpumask))
+ clos = 0;
+ else
+ clos = 3;
+
+ debug_printf("Associate cpu: %d clos: %d\n", i, clos);
+ ret = isst_clos_associate(i, clos);
+ if (ret)
+ goto error_disp;
+ }
+ isst_display_result(i, outf, "turbo-freq --auto", "enable", 0);
}
- isst_ctdp_display_information_start(outf);
- if (max_target_cpus)
- for_each_online_target_cpu_in_set(set_fact_for_cpu, NULL, NULL,
- NULL, &status);
- else
- for_each_online_package_in_set(set_fact_for_cpu, NULL, NULL,
- NULL, &status);
- isst_ctdp_display_information_end(outf);
+ return;
+
+error_disp:
+ isst_display_result(i, outf, "turbo-freq --auto", "enable", ret);
+
}
static void enable_clos_qos_config(int cpu, void *arg1, void *arg2, void *arg3,
@@ -1036,31 +1655,36 @@ static void enable_clos_qos_config(int cpu, void *arg1, void *arg2, void *arg3,
int status = *(int *)arg4;
ret = isst_pm_qos_config(cpu, status, clos_priority_type);
- if (ret) {
+ if (ret)
perror("isst_pm_qos_config");
- } else {
- if (status)
- isst_display_result(cpu, outf, "core-power", "enable",
- ret);
- else
- isst_display_result(cpu, outf, "core-power", "disable",
- ret);
- }
+
+ if (status)
+ isst_display_result(cpu, outf, "core-power", "enable",
+ ret);
+ else
+ isst_display_result(cpu, outf, "core-power", "disable",
+ ret);
}
-static void set_clos_enable(void)
+static void set_clos_enable(int arg)
{
- int status = 1;
+ int enable = arg;
if (cmd_help) {
- fprintf(stderr, "Enable core-power for a package/die\n");
- fprintf(stderr,
- "\tClos Enable: Specify priority type with [--priority|-p]\n");
- fprintf(stderr, "\t\t 0: Proportional, 1: Ordered\n");
+ if (enable) {
+ fprintf(stderr,
+ "Enable core-power for a package/die\n");
+ fprintf(stderr,
+ "\tClos Enable: Specify priority type with [--priority|-p]\n");
+ fprintf(stderr, "\t\t 0: Proportional, 1: Ordered\n");
+ } else {
+ fprintf(stderr,
+ "Disable core-power: [No command arguments are required]\n");
+ }
exit(0);
}
- if (cpufreq_sysfs_present()) {
+ if (enable && cpufreq_sysfs_present()) {
fprintf(stderr,
"cpufreq subsystem and core-power enable will interfere with each other!\n");
}
@@ -1068,30 +1692,10 @@ static void set_clos_enable(void)
isst_ctdp_display_information_start(outf);
if (max_target_cpus)
for_each_online_target_cpu_in_set(enable_clos_qos_config, NULL,
- NULL, NULL, &status);
+ NULL, NULL, &enable);
else
for_each_online_package_in_set(enable_clos_qos_config, NULL,
- NULL, NULL, &status);
- isst_ctdp_display_information_end(outf);
-}
-
-static void set_clos_disable(void)
-{
- int status = 0;
-
- if (cmd_help) {
- fprintf(stderr,
- "Disable core-power: [No command arguments are required]\n");
- exit(0);
- }
-
- isst_ctdp_display_information_start(outf);
- if (max_target_cpus)
- for_each_online_target_cpu_in_set(enable_clos_qos_config, NULL,
- NULL, NULL, &status);
- else
- for_each_online_package_in_set(enable_clos_qos_config, NULL,
- NULL, NULL, &status);
+ NULL, NULL, &enable);
isst_ctdp_display_information_end(outf);
}
@@ -1109,7 +1713,7 @@ static void dump_clos_config_for_cpu(int cpu, void *arg1, void *arg2,
&clos_config);
}
-static void dump_clos_config(void)
+static void dump_clos_config(int arg)
{
if (cmd_help) {
fprintf(stderr,
@@ -1145,7 +1749,7 @@ static void get_clos_info_for_cpu(int cpu, void *arg1, void *arg2, void *arg3,
isst_clos_display_clos_information(cpu, outf, enable, prio_type);
}
-static void dump_clos_info(void)
+static void dump_clos_info(int arg)
{
if (cmd_help) {
fprintf(stderr,
@@ -1188,7 +1792,7 @@ static void set_clos_config_for_cpu(int cpu, void *arg1, void *arg2, void *arg3,
isst_display_result(cpu, outf, "core-power", "config", ret);
}
-static void set_clos_config(void)
+static void set_clos_config(int arg)
{
if (cmd_help) {
fprintf(stderr,
@@ -1198,9 +1802,9 @@ static void set_clos_config(void)
fprintf(stderr, "\tSpecify clos EPP with [--epp|-e]\n");
fprintf(stderr,
"\tSpecify clos Proportional Priority [--weight|-w]\n");
- fprintf(stderr, "\tSpecify clos min with [--min|-n]\n");
- fprintf(stderr, "\tSpecify clos max with [--max|-m]\n");
- fprintf(stderr, "\tSpecify clos desired with [--desired|-d]\n");
+ fprintf(stderr, "\tSpecify clos min in MHz with [--min|-n]\n");
+ fprintf(stderr, "\tSpecify clos max in MHz with [--max|-m]\n");
+ fprintf(stderr, "\tSpecify clos desired in MHz with [--desired|-d]\n");
exit(0);
}
@@ -1222,7 +1826,7 @@ static void set_clos_config(void)
clos_min = 0;
}
if (clos_max < 0) {
- fprintf(stderr, "clos max is not specified, default: 0xff\n");
+ fprintf(stderr, "clos max is not specified, default: 25500 MHz\n");
clos_max = 0xff;
}
if (clos_desired < 0) {
@@ -1252,7 +1856,7 @@ static void set_clos_assoc_for_cpu(int cpu, void *arg1, void *arg2, void *arg3,
isst_display_result(cpu, outf, "core-power", "assoc", ret);
}
-static void set_clos_assoc(void)
+static void set_clos_assoc(int arg)
{
if (cmd_help) {
fprintf(stderr, "Associate a clos id to a CPU\n");
@@ -1286,7 +1890,7 @@ static void get_clos_assoc_for_cpu(int cpu, void *arg1, void *arg2, void *arg3,
isst_clos_display_assoc_information(cpu, outf, clos);
}
-static void get_clos_assoc(void)
+static void get_clos_assoc(int arg)
{
if (cmd_help) {
fprintf(stderr, "Get associate clos id to a CPU\n");
@@ -1306,27 +1910,36 @@ static void get_clos_assoc(void)
isst_ctdp_display_information_end(outf);
}
+static struct process_cmd_struct clx_n_cmds[] = {
+ { "perf-profile", "info", dump_isst_config, 0 },
+ { "base-freq", "info", dump_pbf_config, 0 },
+ { "base-freq", "enable", set_pbf_enable, 1 },
+ { "base-freq", "disable", set_pbf_enable, 0 },
+ { NULL, NULL, NULL, 0 }
+};
+
static struct process_cmd_struct isst_cmds[] = {
- { "perf-profile", "get-lock-status", get_tdp_locked },
- { "perf-profile", "get-config-levels", get_tdp_levels },
- { "perf-profile", "get-config-version", get_tdp_version },
- { "perf-profile", "get-config-enabled", get_tdp_enabled },
- { "perf-profile", "get-config-current-level", get_tdp_current_level },
- { "perf-profile", "set-config-level", set_tdp_level },
- { "perf-profile", "info", dump_isst_config },
- { "base-freq", "info", dump_pbf_config },
- { "base-freq", "enable", set_pbf_enable },
- { "base-freq", "disable", set_pbf_disable },
- { "turbo-freq", "info", dump_fact_config },
- { "turbo-freq", "enable", set_fact_enable },
- { "turbo-freq", "disable", set_fact_disable },
- { "core-power", "info", dump_clos_info },
- { "core-power", "enable", set_clos_enable },
- { "core-power", "disable", set_clos_disable },
- { "core-power", "config", set_clos_config },
- { "core-power", "get-config", dump_clos_config },
- { "core-power", "assoc", set_clos_assoc },
- { "core-power", "get-assoc", get_clos_assoc },
+ { "perf-profile", "get-lock-status", get_tdp_locked, 0 },
+ { "perf-profile", "get-config-levels", get_tdp_levels, 0 },
+ { "perf-profile", "get-config-version", get_tdp_version, 0 },
+ { "perf-profile", "get-config-enabled", get_tdp_enabled, 0 },
+ { "perf-profile", "get-config-current-level", get_tdp_current_level,
+ 0 },
+ { "perf-profile", "set-config-level", set_tdp_level, 0 },
+ { "perf-profile", "info", dump_isst_config, 0 },
+ { "base-freq", "info", dump_pbf_config, 0 },
+ { "base-freq", "enable", set_pbf_enable, 1 },
+ { "base-freq", "disable", set_pbf_enable, 0 },
+ { "turbo-freq", "info", dump_fact_config, 0 },
+ { "turbo-freq", "enable", set_fact_enable, 1 },
+ { "turbo-freq", "disable", set_fact_enable, 0 },
+ { "core-power", "info", dump_clos_info, 0 },
+ { "core-power", "enable", set_clos_enable, 1 },
+ { "core-power", "disable", set_clos_enable, 0 },
+ { "core-power", "config", set_clos_config, 0 },
+ { "core-power", "get-config", dump_clos_config, 0 },
+ { "core-power", "assoc", set_clos_assoc, 0 },
+ { "core-power", "get-assoc", get_clos_assoc, 0 },
{ NULL, NULL, NULL }
};
@@ -1417,15 +2030,19 @@ static void parse_cmd_args(int argc, int start, char **argv)
{ "max", required_argument, 0, 'm' },
{ "priority", required_argument, 0, 'p' },
{ "weight", required_argument, 0, 'w' },
+ { "auto", no_argument, 0, 'a' },
{ 0, 0, 0, 0 }
};
option_index = start;
optind = start + 1;
- while ((opt = getopt_long(argc, argv, "b:l:t:c:d:e:n:m:p:w:ho",
+ while ((opt = getopt_long(argc, argv, "b:l:t:c:d:e:n:m:p:w:hoa",
long_options, &option_index)) != -1) {
switch (opt) {
+ case 'a':
+ auto_mode = 1;
+ break;
case 'b':
fact_bucket = atoi(optarg);
break;
@@ -1459,15 +2076,18 @@ static void parse_cmd_args(int argc, int start, char **argv)
break;
case 'd':
clos_desired = atoi(optarg);
+ clos_desired /= DISP_FREQ_MULTIPLIER;
break;
case 'e':
clos_epp = atoi(optarg);
break;
case 'n':
clos_min = atoi(optarg);
+ clos_min /= DISP_FREQ_MULTIPLIER;
break;
case 'm':
clos_max = atoi(optarg);
+ clos_max /= DISP_FREQ_MULTIPLIER;
break;
case 'p':
clos_priority_type = atoi(optarg);
@@ -1489,12 +2109,15 @@ static void isst_help(void)
TDP, etc.\n");
printf("\nCommands : For feature=perf-profile\n");
printf("\tinfo\n");
- printf("\tget-lock-status\n");
- printf("\tget-config-levels\n");
- printf("\tget-config-version\n");
- printf("\tget-config-enabled\n");
- printf("\tget-config-current-level\n");
- printf("\tset-config-level\n");
+
+ if (!is_clx_n_platform()) {
+ printf("\tget-lock-status\n");
+ printf("\tget-config-levels\n");
+ printf("\tget-config-version\n");
+ printf("\tget-config-enabled\n");
+ printf("\tget-config-current-level\n");
+ printf("\tset-config-level\n");
+ }
}
static void pbf_help(void)
@@ -1544,7 +2167,15 @@ static struct process_cmd_help_struct isst_help_cmds[] = {
{ NULL, NULL }
};
-void process_command(int argc, char **argv)
+static struct process_cmd_help_struct clx_n_help_cmds[] = {
+ { "perf-profile", isst_help },
+ { "base-freq", pbf_help },
+ { NULL, NULL }
+};
+
+void process_command(int argc, char **argv,
+ struct process_cmd_help_struct *help_cmds,
+ struct process_cmd_struct *cmds)
{
int i = 0, matched = 0;
char *feature = argv[optind];
@@ -1555,23 +2186,24 @@ void process_command(int argc, char **argv)
debug_printf("feature name [%s] command [%s]\n", feature, cmd);
if (!strcmp(cmd, "-h") || !strcmp(cmd, "--help")) {
- while (isst_help_cmds[i].feature) {
- if (!strcmp(isst_help_cmds[i].feature, feature)) {
- isst_help_cmds[i].process_fn();
+ while (help_cmds[i].feature) {
+ if (!strcmp(help_cmds[i].feature, feature)) {
+ help_cmds[i].process_fn();
exit(0);
}
++i;
}
}
- create_cpu_map();
+ if (!is_clx_n_platform())
+ create_cpu_map();
i = 0;
- while (isst_cmds[i].feature) {
- if (!strcmp(isst_cmds[i].feature, feature) &&
- !strcmp(isst_cmds[i].command, cmd)) {
+ while (cmds[i].feature) {
+ if (!strcmp(cmds[i].feature, feature) &&
+ !strcmp(cmds[i].command, cmd)) {
parse_cmd_args(argc, optind + 1, argv);
- isst_cmds[i].process_fn();
+ cmds[i].process_fn(cmds[i].arg);
matched = 1;
break;
}
@@ -1682,17 +2314,23 @@ static void cmdline(int argc, char **argv)
fprintf(stderr, "Feature name and|or command not specified\n");
exit(0);
}
- update_cpu_model();
+ ret = update_cpu_model();
+ if (ret)
+ err(-1, "Invalid CPU model (%d)\n", cpu_model);
printf("Intel(R) Speed Select Technology\n");
printf("Executing on CPU model:%d[0x%x]\n", cpu_model, cpu_model);
set_max_cpu_num();
set_cpu_present_cpu_mask();
set_cpu_target_cpu_mask();
- ret = isst_fill_platform_info();
- if (ret)
- goto out;
- process_command(argc, argv);
+ if (!is_clx_n_platform()) {
+ ret = isst_fill_platform_info();
+ if (ret)
+ goto out;
+ process_command(argc, argv, isst_help_cmds, isst_cmds);
+ } else {
+ process_command(argc, argv, clx_n_help_cmds, clx_n_cmds);
+ }
out:
free_cpu_set(present_cpumask);
free_cpu_set(target_cpumask);
diff --git a/tools/power/x86/intel-speed-select/isst-core.c b/tools/power/x86/intel-speed-select/isst-core.c
index 6dee5332c9d3..d14c7bcd327a 100644
--- a/tools/power/x86/intel-speed-select/isst-core.c
+++ b/tools/power/x86/intel-speed-select/isst-core.c
@@ -13,8 +13,14 @@ int isst_get_ctdp_levels(int cpu, struct isst_pkg_ctdp *pkg_dev)
ret = isst_send_mbox_command(cpu, CONFIG_TDP,
CONFIG_TDP_GET_LEVELS_INFO, 0, 0, &resp);
- if (ret)
- return ret;
+ if (ret) {
+ pkg_dev->levels = 0;
+ pkg_dev->locked = 1;
+ pkg_dev->current_level = 0;
+ pkg_dev->version = 0;
+ pkg_dev->enabled = 0;
+ return 0;
+ }
debug_printf("cpu:%d CONFIG_TDP_GET_LEVELS_INFO resp:%x\n", cpu, resp);
@@ -95,6 +101,69 @@ int isst_get_pwr_info(int cpu, int config_index,
return 0;
}
+void isst_get_uncore_p0_p1_info(int cpu, int config_index,
+ struct isst_pkg_ctdp_level_info *ctdp_level)
+{
+ unsigned int resp;
+ int ret;
+ ret = isst_send_mbox_command(cpu, CONFIG_TDP,
+ CONFIG_TDP_GET_UNCORE_P0_P1_INFO, 0,
+ config_index, &resp);
+ if (ret) {
+ ctdp_level->uncore_p0 = 0;
+ ctdp_level->uncore_p1 = 0;
+ return;
+ }
+
+ ctdp_level->uncore_p0 = resp & GENMASK(7, 0);
+ ctdp_level->uncore_p1 = (resp & GENMASK(15, 8)) >> 8;
+ debug_printf(
+ "cpu:%d ctdp:%d CONFIG_TDP_GET_UNCORE_P0_P1_INFO resp:%x uncore p0:%d uncore p1:%d\n",
+ cpu, config_index, resp, ctdp_level->uncore_p0,
+ ctdp_level->uncore_p1);
+}
+
+void isst_get_p1_info(int cpu, int config_index,
+ struct isst_pkg_ctdp_level_info *ctdp_level)
+{
+ unsigned int resp;
+ int ret;
+ ret = isst_send_mbox_command(cpu, CONFIG_TDP, CONFIG_TDP_GET_P1_INFO, 0,
+ config_index, &resp);
+ if (ret) {
+ ctdp_level->sse_p1 = 0;
+ ctdp_level->avx2_p1 = 0;
+ ctdp_level->avx512_p1 = 0;
+ return;
+ }
+
+ ctdp_level->sse_p1 = resp & GENMASK(7, 0);
+ ctdp_level->avx2_p1 = (resp & GENMASK(15, 8)) >> 8;
+ ctdp_level->avx512_p1 = (resp & GENMASK(23, 16)) >> 16;
+ debug_printf(
+ "cpu:%d ctdp:%d CONFIG_TDP_GET_P1_INFO resp:%x sse_p1:%d avx2_p1:%d avx512_p1:%d\n",
+ cpu, config_index, resp, ctdp_level->sse_p1,
+ ctdp_level->avx2_p1, ctdp_level->avx512_p1);
+}
+
+void isst_get_uncore_mem_freq(int cpu, int config_index,
+ struct isst_pkg_ctdp_level_info *ctdp_level)
+{
+ unsigned int resp;
+ int ret;
+ ret = isst_send_mbox_command(cpu, CONFIG_TDP, CONFIG_TDP_GET_MEM_FREQ,
+ 0, config_index, &resp);
+ if (ret) {
+ ctdp_level->mem_freq = 0;
+ return;
+ }
+
+ ctdp_level->mem_freq = resp & GENMASK(7, 0);
+ debug_printf(
+ "cpu:%d ctdp:%d CONFIG_TDP_GET_MEM_FREQ resp:%x uncore mem_freq:%d\n",
+ cpu, config_index, resp, ctdp_level->mem_freq);
+}
+
int isst_get_tjmax_info(int cpu, int config_index,
struct isst_pkg_ctdp_level_info *ctdp_level)
{
@@ -149,6 +218,27 @@ int isst_get_coremask_info(int cpu, int config_index,
return 0;
}
+int isst_get_get_trl_from_msr(int cpu, int *trl)
+{
+ unsigned long long msr_trl;
+ int ret;
+
+ ret = isst_send_msr_command(cpu, 0x1AD, 0, &msr_trl);
+ if (ret)
+ return ret;
+
+ trl[0] = msr_trl & GENMASK(7, 0);
+ trl[1] = (msr_trl & GENMASK(15, 8)) >> 8;
+ trl[2] = (msr_trl & GENMASK(23, 16)) >> 16;
+ trl[3] = (msr_trl & GENMASK(31, 24)) >> 24;
+ trl[4] = (msr_trl & GENMASK(39, 32)) >> 32;
+ trl[5] = (msr_trl & GENMASK(47, 40)) >> 40;
+ trl[6] = (msr_trl & GENMASK(55, 48)) >> 48;
+ trl[7] = (msr_trl & GENMASK(63, 56)) >> 56;
+
+ return 0;
+}
+
int isst_get_get_trl(int cpu, int level, int avx_level, int *trl)
{
unsigned int req, resp;
@@ -245,12 +335,15 @@ int isst_set_tdp_level(int cpu, int tdp_level)
int isst_get_pbf_info(int cpu, int level, struct isst_pbf_info *pbf_info)
{
+ int i, ret, core_cnt, max;
unsigned int req, resp;
- int i, ret;
pbf_info->core_cpumask_size = alloc_cpu_set(&pbf_info->core_cpumask);
- for (i = 0; i < 2; ++i) {
+ core_cnt = get_core_count(get_physical_package_id(cpu), get_physical_die_id(cpu));
+ max = core_cnt > 32 ? 2 : 1;
+
+ for (i = 0; i < max; ++i) {
unsigned long long mask;
int count;
@@ -258,7 +351,7 @@ int isst_get_pbf_info(int cpu, int level, struct isst_pbf_info *pbf_info)
CONFIG_TDP_PBF_GET_CORE_MASK_INFO,
0, (i << 8) | level, &resp);
if (ret)
- return ret;
+ break;
debug_printf(
"cpu:%d CONFIG_TDP_PBF_GET_CORE_MASK_INFO resp:%x\n",
@@ -323,7 +416,7 @@ int isst_set_pbf_fact_status(int cpu, int pbf, int enable)
ret = isst_get_ctdp_levels(cpu, &pkg_dev);
if (ret)
- return ret;
+ debug_printf("cpu:%d No support for dynamic ISST\n", cpu);
current_level = pkg_dev.current_level;
@@ -553,7 +646,6 @@ int isst_get_process_ctdp(int cpu, int tdp_level, struct isst_pkg_ctdp *pkg_dev)
i);
ctdp_level = &pkg_dev->ctdp_level[i];
- ctdp_level->processed = 1;
ctdp_level->level = i;
ctdp_level->control_cpu = cpu;
ctdp_level->pkg_id = get_physical_package_id(cpu);
@@ -561,7 +653,37 @@ int isst_get_process_ctdp(int cpu, int tdp_level, struct isst_pkg_ctdp *pkg_dev)
ret = isst_get_ctdp_control(cpu, i, ctdp_level);
if (ret)
- return ret;
+ continue;
+
+ pkg_dev->processed = 1;
+ ctdp_level->processed = 1;
+
+ if (ctdp_level->pbf_support) {
+ ret = isst_get_pbf_info(cpu, i, &ctdp_level->pbf_info);
+ if (!ret)
+ ctdp_level->pbf_found = 1;
+ }
+
+ if (ctdp_level->fact_support) {
+ ret = isst_get_fact_info(cpu, i,
+ &ctdp_level->fact_info);
+ if (ret)
+ return ret;
+ }
+
+ if (!pkg_dev->enabled) {
+ int freq;
+
+ freq = get_cpufreq_base_freq(cpu);
+ if (freq > 0) {
+ ctdp_level->sse_p1 = freq / 100000;
+ ctdp_level->tdp_ratio = ctdp_level->sse_p1;
+ }
+
+ isst_get_get_trl_from_msr(cpu, ctdp_level->trl_sse_active_cores);
+ isst_get_trl_bucket_info(cpu, &ctdp_level->buckets_info);
+ continue;
+ }
ret = isst_get_tdp_info(cpu, i, ctdp_level);
if (ret)
@@ -600,22 +722,11 @@ int isst_get_process_ctdp(int cpu, int tdp_level, struct isst_pkg_ctdp *pkg_dev)
if (ret)
return ret;
- if (ctdp_level->pbf_support) {
- ret = isst_get_pbf_info(cpu, i, &ctdp_level->pbf_info);
- if (!ret)
- ctdp_level->pbf_found = 1;
- }
-
- if (ctdp_level->fact_support) {
- ret = isst_get_fact_info(cpu, i,
- &ctdp_level->fact_info);
- if (ret)
- return ret;
- }
+ isst_get_uncore_p0_p1_info(cpu, i, ctdp_level);
+ isst_get_p1_info(cpu, i, ctdp_level);
+ isst_get_uncore_mem_freq(cpu, i, ctdp_level);
}
- pkg_dev->processed = 1;
-
return 0;
}
@@ -649,6 +760,27 @@ int isst_pm_qos_config(int cpu, int enable_clos, int priority_type)
unsigned int req, resp;
int ret;
+ if (!enable_clos) {
+ struct isst_pkg_ctdp pkg_dev;
+ struct isst_pkg_ctdp_level_info ctdp_level;
+
+ ret = isst_get_ctdp_levels(cpu, &pkg_dev);
+ if (ret) {
+ debug_printf("isst_get_ctdp_levels\n");
+ return ret;
+ }
+
+ ret = isst_get_ctdp_control(cpu, pkg_dev.current_level,
+ &ctdp_level);
+ if (ret)
+ return ret;
+
+ if (ctdp_level.fact_enabled) {
+ debug_printf("Turbo-freq feature must be disabled first\n");
+ return -EINVAL;
+ }
+ }
+
ret = isst_send_mbox_command(cpu, CONFIG_CLOS, CLOS_PM_QOS_CONFIG, 0, 0,
&resp);
if (ret)
diff --git a/tools/power/x86/intel-speed-select/isst-display.c b/tools/power/x86/intel-speed-select/isst-display.c
index 40346d534f78..040dd09d5eee 100644
--- a/tools/power/x86/intel-speed-select/isst-display.c
+++ b/tools/power/x86/intel-speed-select/isst-display.c
@@ -6,8 +6,6 @@
#include "isst.h"
-#define DISP_FREQ_MULTIPLIER 100
-
static void printcpulist(int str_len, char *str, int mask_size,
cpu_set_t *cpu_mask)
{
@@ -204,6 +202,9 @@ static void _isst_pbf_display_information(int cpu, FILE *outf, int level,
pbf_info->p1_low * DISP_FREQ_MULTIPLIER);
format_and_print(outf, disp_level + 1, header, value);
+ if (is_clx_n_platform())
+ return;
+
snprintf(header, sizeof(header), "tjunction-temperature(C)");
snprintf(value, sizeof(value), "%d", pbf_info->t_prochot);
format_and_print(outf, disp_level + 1, header, value);
@@ -314,7 +315,8 @@ void isst_ctdp_display_information(int cpu, FILE *outf, int tdp_level,
char value[256];
int i, base_level = 1;
- print_package_info(cpu, outf);
+ if (pkg_dev->processed)
+ print_package_info(cpu, outf);
for (i = 0; i <= pkg_dev->levels; ++i) {
struct isst_pkg_ctdp_level_info *ctdp_level;
@@ -334,27 +336,66 @@ void isst_ctdp_display_information(int cpu, FILE *outf, int tdp_level,
snprintf(value, sizeof(value), "%d", j);
format_and_print(outf, base_level + 4, header, value);
- snprintf(header, sizeof(header), "enable-cpu-mask");
- printcpumask(sizeof(value), value,
- ctdp_level->core_cpumask_size,
- ctdp_level->core_cpumask);
- format_and_print(outf, base_level + 4, header, value);
-
- snprintf(header, sizeof(header), "enable-cpu-list");
- printcpulist(sizeof(value), value,
- ctdp_level->core_cpumask_size,
- ctdp_level->core_cpumask);
- format_and_print(outf, base_level + 4, header, value);
+ if (ctdp_level->core_cpumask_size) {
+ snprintf(header, sizeof(header), "enable-cpu-mask");
+ printcpumask(sizeof(value), value,
+ ctdp_level->core_cpumask_size,
+ ctdp_level->core_cpumask);
+ format_and_print(outf, base_level + 4, header, value);
+
+ snprintf(header, sizeof(header), "enable-cpu-list");
+ printcpulist(sizeof(value), value,
+ ctdp_level->core_cpumask_size,
+ ctdp_level->core_cpumask);
+ format_and_print(outf, base_level + 4, header, value);
+ }
snprintf(header, sizeof(header), "thermal-design-power-ratio");
snprintf(value, sizeof(value), "%d", ctdp_level->tdp_ratio);
format_and_print(outf, base_level + 4, header, value);
snprintf(header, sizeof(header), "base-frequency(MHz)");
+ if (!ctdp_level->sse_p1)
+ ctdp_level->sse_p1 = ctdp_level->tdp_ratio;
snprintf(value, sizeof(value), "%d",
- ctdp_level->tdp_ratio * DISP_FREQ_MULTIPLIER);
+ ctdp_level->sse_p1 * DISP_FREQ_MULTIPLIER);
format_and_print(outf, base_level + 4, header, value);
+ if (ctdp_level->avx2_p1) {
+ snprintf(header, sizeof(header), "base-frequency-avx2(MHz)");
+ snprintf(value, sizeof(value), "%d",
+ ctdp_level->avx2_p1 * DISP_FREQ_MULTIPLIER);
+ format_and_print(outf, base_level + 4, header, value);
+ }
+
+ if (ctdp_level->avx512_p1) {
+ snprintf(header, sizeof(header), "base-frequency-avx512(MHz)");
+ snprintf(value, sizeof(value), "%d",
+ ctdp_level->avx512_p1 * DISP_FREQ_MULTIPLIER);
+ format_and_print(outf, base_level + 4, header, value);
+ }
+
+ if (ctdp_level->uncore_p1) {
+ snprintf(header, sizeof(header), "uncore-frequency-min(MHz)");
+ snprintf(value, sizeof(value), "%d",
+ ctdp_level->uncore_p1 * DISP_FREQ_MULTIPLIER);
+ format_and_print(outf, base_level + 4, header, value);
+ }
+
+ if (ctdp_level->uncore_p0) {
+ snprintf(header, sizeof(header), "uncore-frequency-max(MHz)");
+ snprintf(value, sizeof(value), "%d",
+ ctdp_level->uncore_p0 * DISP_FREQ_MULTIPLIER);
+ format_and_print(outf, base_level + 4, header, value);
+ }
+
+ if (ctdp_level->mem_freq) {
+ snprintf(header, sizeof(header), "mem-frequency(MHz)");
+ snprintf(value, sizeof(value), "%d",
+ ctdp_level->mem_freq * DISP_FREQ_MULTIPLIER);
+ format_and_print(outf, base_level + 4, header, value);
+ }
+
snprintf(header, sizeof(header),
"speed-select-turbo-freq");
if (ctdp_level->fact_support) {
@@ -377,13 +418,26 @@ void isst_ctdp_display_information(int cpu, FILE *outf, int tdp_level,
snprintf(value, sizeof(value), "unsupported");
format_and_print(outf, base_level + 4, header, value);
- snprintf(header, sizeof(header), "thermal-design-power(W)");
- snprintf(value, sizeof(value), "%d", ctdp_level->pkg_tdp);
- format_and_print(outf, base_level + 4, header, value);
+ if (is_clx_n_platform()) {
+ if (ctdp_level->pbf_support)
+ _isst_pbf_display_information(cpu, outf,
+ tdp_level,
+ &ctdp_level->pbf_info,
+ base_level + 4);
+ continue;
+ }
- snprintf(header, sizeof(header), "tjunction-max(C)");
- snprintf(value, sizeof(value), "%d", ctdp_level->t_proc_hot);
- format_and_print(outf, base_level + 4, header, value);
+ if (ctdp_level->pkg_tdp) {
+ snprintf(header, sizeof(header), "thermal-design-power(W)");
+ snprintf(value, sizeof(value), "%d", ctdp_level->pkg_tdp);
+ format_and_print(outf, base_level + 4, header, value);
+ }
+
+ if (ctdp_level->t_proc_hot) {
+ snprintf(header, sizeof(header), "tjunction-max(C)");
+ snprintf(value, sizeof(value), "%d", ctdp_level->t_proc_hot);
+ format_and_print(outf, base_level + 4, header, value);
+ }
snprintf(header, sizeof(header), "turbo-ratio-limits-sse");
format_and_print(outf, base_level + 4, header, NULL);
@@ -402,41 +456,41 @@ void isst_ctdp_display_information(int cpu, FILE *outf, int tdp_level,
DISP_FREQ_MULTIPLIER);
format_and_print(outf, base_level + 6, header, value);
}
- snprintf(header, sizeof(header), "turbo-ratio-limits-avx");
- format_and_print(outf, base_level + 4, header, NULL);
- for (j = 0; j < 8; ++j) {
- snprintf(header, sizeof(header), "bucket-%d", j);
- format_and_print(outf, base_level + 5, header, NULL);
- snprintf(header, sizeof(header), "core-count");
- snprintf(value, sizeof(value), "%llu", (ctdp_level->buckets_info >> (j * 8)) & 0xff);
- format_and_print(outf, base_level + 6, header, value);
+ if (ctdp_level->trl_avx_active_cores[0]) {
+ snprintf(header, sizeof(header), "turbo-ratio-limits-avx2");
+ format_and_print(outf, base_level + 4, header, NULL);
+ for (j = 0; j < 8; ++j) {
+ snprintf(header, sizeof(header), "bucket-%d", j);
+ format_and_print(outf, base_level + 5, header, NULL);
- snprintf(header, sizeof(header),
- "max-turbo-frequency(MHz)");
- snprintf(value, sizeof(value), "%d",
- ctdp_level->trl_avx_active_cores[j] *
- DISP_FREQ_MULTIPLIER);
- format_and_print(outf, base_level + 6, header, value);
+ snprintf(header, sizeof(header), "core-count");
+ snprintf(value, sizeof(value), "%llu", (ctdp_level->buckets_info >> (j * 8)) & 0xff);
+ format_and_print(outf, base_level + 6, header, value);
+
+ snprintf(header, sizeof(header), "max-turbo-frequency(MHz)");
+ snprintf(value, sizeof(value), "%d", ctdp_level->trl_avx_active_cores[j] * DISP_FREQ_MULTIPLIER);
+ format_and_print(outf, base_level + 6, header, value);
+ }
}
- snprintf(header, sizeof(header), "turbo-ratio-limits-avx512");
- format_and_print(outf, base_level + 4, header, NULL);
- for (j = 0; j < 8; ++j) {
- snprintf(header, sizeof(header), "bucket-%d", j);
- format_and_print(outf, base_level + 5, header, NULL);
+ if (ctdp_level->trl_avx_512_active_cores[0]) {
+ snprintf(header, sizeof(header), "turbo-ratio-limits-avx512");
+ format_and_print(outf, base_level + 4, header, NULL);
+ for (j = 0; j < 8; ++j) {
+ snprintf(header, sizeof(header), "bucket-%d", j);
+ format_and_print(outf, base_level + 5, header, NULL);
- snprintf(header, sizeof(header), "core-count");
- snprintf(value, sizeof(value), "%llu", (ctdp_level->buckets_info >> (j * 8)) & 0xff);
- format_and_print(outf, base_level + 6, header, value);
+ snprintf(header, sizeof(header), "core-count");
+ snprintf(value, sizeof(value), "%llu", (ctdp_level->buckets_info >> (j * 8)) & 0xff);
+ format_and_print(outf, base_level + 6, header, value);
- snprintf(header, sizeof(header),
- "max-turbo-frequency(MHz)");
- snprintf(value, sizeof(value), "%d",
- ctdp_level->trl_avx_512_active_cores[j] *
- DISP_FREQ_MULTIPLIER);
+ snprintf(header, sizeof(header), "max-turbo-frequency(MHz)");
+ snprintf(value, sizeof(value), "%d", ctdp_level->trl_avx_512_active_cores[j] * DISP_FREQ_MULTIPLIER);
format_and_print(outf, base_level + 6, header, value);
+ }
}
+
if (ctdp_level->pbf_support)
_isst_pbf_display_information(cpu, outf, i,
&ctdp_level->pbf_info,
@@ -509,15 +563,15 @@ void isst_clos_display_information(int cpu, FILE *outf, int clos,
format_and_print(outf, 5, header, value);
snprintf(header, sizeof(header), "clos-min");
- snprintf(value, sizeof(value), "%d", clos_config->clos_min);
+ snprintf(value, sizeof(value), "%d MHz", clos_config->clos_min * DISP_FREQ_MULTIPLIER);
format_and_print(outf, 5, header, value);
snprintf(header, sizeof(header), "clos-max");
- snprintf(value, sizeof(value), "%d", clos_config->clos_max);
+ snprintf(value, sizeof(value), "%d MHz", clos_config->clos_max * DISP_FREQ_MULTIPLIER);
format_and_print(outf, 5, header, value);
snprintf(header, sizeof(header), "clos-desired");
- snprintf(value, sizeof(value), "%d", clos_config->clos_desired);
+ snprintf(value, sizeof(value), "%d MHz", clos_config->clos_desired * DISP_FREQ_MULTIPLIER);
format_and_print(outf, 5, header, value);
format_and_print(outf, 1, NULL, NULL);
diff --git a/tools/power/x86/intel-speed-select/isst.h b/tools/power/x86/intel-speed-select/isst.h
index d280b27d600d..cdf0f8a6dbbf 100644
--- a/tools/power/x86/intel-speed-select/isst.h
+++ b/tools/power/x86/intel-speed-select/isst.h
@@ -69,6 +69,8 @@
#define PM_CLOS_OFFSET 0x08
#define PQR_ASSOC_OFFSET 0x20
+#define DISP_FREQ_MULTIPLIER 100
+
struct isst_clos_config {
int pkg_id;
int die_id;
@@ -161,6 +163,7 @@ struct isst_pkg_ctdp {
extern int get_topo_max_cpus(void);
extern int get_cpu_count(int pkg_id, int die_id);
+extern int get_core_count(int pkg_id, int die_id);
/* Common interfaces */
extern void debug_printf(const char *format, ...);
@@ -237,4 +240,6 @@ extern void isst_display_result(int cpu, FILE *outf, char *feature, char *cmd,
extern int isst_clos_get_clos_information(int cpu, int *enable, int *type);
extern void isst_clos_display_clos_information(int cpu, FILE *outf,
int clos_enable, int type);
+extern int is_clx_n_platform(void);
+extern int get_cpufreq_base_freq(int cpu);
#endif
diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
index 0fb95f25944d..d67f968eac21 100644
--- a/tools/testing/selftests/Makefile
+++ b/tools/testing/selftests/Makefile
@@ -88,10 +88,10 @@ override LDFLAGS =
endif
ifneq ($(O),)
- BUILD := $(abs_objtree)
+ BUILD := $(O)
else
ifneq ($(KBUILD_OUTPUT),)
- BUILD := $(abs_objtree)/kselftest
+ BUILD := $(KBUILD_OUTPUT)/kselftest
else
BUILD := $(shell pwd)
DEFAULT_INSTALL_HDR_PATH := 1
@@ -104,7 +104,6 @@ include $(top_srcdir)/scripts/subarch.include
ARCH ?= $(SUBARCH)
export KSFT_KHDR_INSTALL_DONE := 1
export BUILD
-#$(info abd_objtree = $(abs_objtree) BUILD = $(BUILD))
# build and run gpio when output directory is the src dir.
# gpio has dependency on tools/gpio and builds tools/gpio
diff --git a/tools/testing/selftests/bpf/test_sockmap.c b/tools/testing/selftests/bpf/test_sockmap.c
index 3845144e2c91..4a851513c842 100644
--- a/tools/testing/selftests/bpf/test_sockmap.c
+++ b/tools/testing/selftests/bpf/test_sockmap.c
@@ -240,14 +240,14 @@ static int sockmap_init_sockets(int verbose)
addr.sin_port = htons(S1_PORT);
err = bind(s1, (struct sockaddr *)&addr, sizeof(addr));
if (err < 0) {
- perror("bind s1 failed()\n");
+ perror("bind s1 failed()");
return errno;
}
addr.sin_port = htons(S2_PORT);
err = bind(s2, (struct sockaddr *)&addr, sizeof(addr));
if (err < 0) {
- perror("bind s2 failed()\n");
+ perror("bind s2 failed()");
return errno;
}
@@ -255,14 +255,14 @@ static int sockmap_init_sockets(int verbose)
addr.sin_port = htons(S1_PORT);
err = listen(s1, 32);
if (err < 0) {
- perror("listen s1 failed()\n");
+ perror("listen s1 failed()");
return errno;
}
addr.sin_port = htons(S2_PORT);
err = listen(s2, 32);
if (err < 0) {
- perror("listen s1 failed()\n");
+ perror("listen s1 failed()");
return errno;
}
@@ -270,14 +270,14 @@ static int sockmap_init_sockets(int verbose)
addr.sin_port = htons(S1_PORT);
err = connect(c1, (struct sockaddr *)&addr, sizeof(addr));
if (err < 0 && errno != EINPROGRESS) {
- perror("connect c1 failed()\n");
+ perror("connect c1 failed()");
return errno;
}
addr.sin_port = htons(S2_PORT);
err = connect(c2, (struct sockaddr *)&addr, sizeof(addr));
if (err < 0 && errno != EINPROGRESS) {
- perror("connect c2 failed()\n");
+ perror("connect c2 failed()");
return errno;
} else if (err < 0) {
err = 0;
@@ -286,13 +286,13 @@ static int sockmap_init_sockets(int verbose)
/* Accept Connecrtions */
p1 = accept(s1, NULL, NULL);
if (p1 < 0) {
- perror("accept s1 failed()\n");
+ perror("accept s1 failed()");
return errno;
}
p2 = accept(s2, NULL, NULL);
if (p2 < 0) {
- perror("accept s1 failed()\n");
+ perror("accept s1 failed()");
return errno;
}
@@ -332,6 +332,10 @@ static int msg_loop_sendpage(int fd, int iov_length, int cnt,
int i, fp;
file = fopen(".sendpage_tst.tmp", "w+");
+ if (!file) {
+ perror("create file for sendpage");
+ return 1;
+ }
for (i = 0; i < iov_length * cnt; i++, k++)
fwrite(&k, sizeof(char), 1, file);
fflush(file);
@@ -339,12 +343,17 @@ static int msg_loop_sendpage(int fd, int iov_length, int cnt,
fclose(file);
fp = open(".sendpage_tst.tmp", O_RDONLY);
+ if (fp < 0) {
+ perror("reopen file for sendpage");
+ return 1;
+ }
+
clock_gettime(CLOCK_MONOTONIC, &s->start);
for (i = 0; i < cnt; i++) {
int sent = sendfile(fd, fp, NULL, iov_length);
if (!drop && sent < 0) {
- perror("send loop error:");
+ perror("send loop error");
close(fp);
return sent;
} else if (drop && sent >= 0) {
@@ -463,7 +472,7 @@ static int msg_loop(int fd, int iov_count, int iov_length, int cnt,
int sent = sendmsg(fd, &msg, flags);
if (!drop && sent < 0) {
- perror("send loop error:");
+ perror("send loop error");
goto out_errno;
} else if (drop && sent >= 0) {
printf("send loop error expected: %i\n", sent);
@@ -499,7 +508,7 @@ static int msg_loop(int fd, int iov_count, int iov_length, int cnt,
total_bytes -= txmsg_pop_total;
err = clock_gettime(CLOCK_MONOTONIC, &s->start);
if (err < 0)
- perror("recv start time: ");
+ perror("recv start time");
while (s->bytes_recvd < total_bytes) {
if (txmsg_cork) {
timeout.tv_sec = 0;
@@ -543,7 +552,7 @@ static int msg_loop(int fd, int iov_count, int iov_length, int cnt,
if (recv < 0) {
if (errno != EWOULDBLOCK) {
clock_gettime(CLOCK_MONOTONIC, &s->end);
- perror("recv failed()\n");
+ perror("recv failed()");
goto out_errno;
}
}
@@ -557,7 +566,7 @@ static int msg_loop(int fd, int iov_count, int iov_length, int cnt,
errno = msg_verify_data(&msg, recv, chunk_sz);
if (errno) {
- perror("data verify msg failed\n");
+ perror("data verify msg failed");
goto out_errno;
}
if (recvp) {
@@ -565,7 +574,7 @@ static int msg_loop(int fd, int iov_count, int iov_length, int cnt,
recvp,
chunk_sz);
if (errno) {
- perror("data verify msg_peek failed\n");
+ perror("data verify msg_peek failed");
goto out_errno;
}
}
@@ -654,7 +663,7 @@ static int sendmsg_test(struct sockmap_options *opt)
err = 0;
exit(err ? 1 : 0);
} else if (rxpid == -1) {
- perror("msg_loop_rx: ");
+ perror("msg_loop_rx");
return errno;
}
@@ -681,7 +690,7 @@ static int sendmsg_test(struct sockmap_options *opt)
s.bytes_recvd, recvd_Bps, recvd_Bps/giga);
exit(err ? 1 : 0);
} else if (txpid == -1) {
- perror("msg_loop_tx: ");
+ perror("msg_loop_tx");
return errno;
}
@@ -715,7 +724,7 @@ static int forever_ping_pong(int rate, struct sockmap_options *opt)
/* Ping/Pong data from client to server */
sc = send(c1, buf, sizeof(buf), 0);
if (sc < 0) {
- perror("send failed()\n");
+ perror("send failed()");
return sc;
}
@@ -748,7 +757,7 @@ static int forever_ping_pong(int rate, struct sockmap_options *opt)
rc = recv(i, buf, sizeof(buf), 0);
if (rc < 0) {
if (errno != EWOULDBLOCK) {
- perror("recv failed()\n");
+ perror("recv failed()");
return rc;
}
}
@@ -760,7 +769,7 @@ static int forever_ping_pong(int rate, struct sockmap_options *opt)
sc = send(i, buf, rc, 0);
if (sc < 0) {
- perror("send failed()\n");
+ perror("send failed()");
return sc;
}
}
diff --git a/tools/testing/selftests/bpf/xdping.c b/tools/testing/selftests/bpf/xdping.c
index d60a343b1371..842d9155d36c 100644
--- a/tools/testing/selftests/bpf/xdping.c
+++ b/tools/testing/selftests/bpf/xdping.c
@@ -45,7 +45,7 @@ static int get_stats(int fd, __u16 count, __u32 raddr)
printf("\nXDP RTT data:\n");
if (bpf_map_lookup_elem(fd, &raddr, &pinginfo)) {
- perror("bpf_map_lookup elem: ");
+ perror("bpf_map_lookup elem");
return 1;
}
diff --git a/tools/testing/selftests/ftrace/settings b/tools/testing/selftests/ftrace/settings
new file mode 100644
index 000000000000..e7b9417537fb
--- /dev/null
+++ b/tools/testing/selftests/ftrace/settings
@@ -0,0 +1 @@
+timeout=0
diff --git a/tools/testing/selftests/ftrace/test.d/direct/ftrace-direct.tc b/tools/testing/selftests/ftrace/test.d/direct/ftrace-direct.tc
new file mode 100644
index 000000000000..d75a8695bc21
--- /dev/null
+++ b/tools/testing/selftests/ftrace/test.d/direct/ftrace-direct.tc
@@ -0,0 +1,69 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+# description: Test ftrace direct functions against tracers
+
+rmmod ftrace-direct ||:
+if ! modprobe ftrace-direct ; then
+ echo "No ftrace-direct sample module - please make CONFIG_SAMPLE_FTRACE_DIRECT=m"
+ exit_unresolved;
+fi
+
+echo "Let the module run a little"
+sleep 1
+
+grep -q "my_direct_func: waking up" trace
+
+rmmod ftrace-direct
+
+test_tracer() {
+ tracer=$1
+
+ # tracer -> direct -> no direct > no tracer
+ echo $tracer > current_tracer
+ modprobe ftrace-direct
+ rmmod ftrace-direct
+ echo nop > current_tracer
+
+ # tracer -> direct -> no tracer > no direct
+ echo $tracer > current_tracer
+ modprobe ftrace-direct
+ echo nop > current_tracer
+ rmmod ftrace-direct
+
+ # direct -> tracer -> no tracer > no direct
+ modprobe ftrace-direct
+ echo $tracer > current_tracer
+ echo nop > current_tracer
+ rmmod ftrace-direct
+
+ # direct -> tracer -> no direct > no notracer
+ modprobe ftrace-direct
+ echo $tracer > current_tracer
+ rmmod ftrace-direct
+ echo nop > current_tracer
+}
+
+for t in `cat available_tracers`; do
+ if [ "$t" != "nop" ]; then
+ test_tracer $t
+ fi
+done
+
+echo nop > current_tracer
+rmmod ftrace-direct ||:
+
+# Now do the same thing with another direct function registered
+echo "Running with another ftrace direct function"
+
+rmmod ftrace-direct-too ||:
+modprobe ftrace-direct-too
+
+for t in `cat available_tracers`; do
+ if [ "$t" != "nop" ]; then
+ test_tracer $t
+ fi
+done
+
+echo nop > current_tracer
+rmmod ftrace-direct ||:
+rmmod ftrace-direct-too ||:
diff --git a/tools/testing/selftests/ftrace/test.d/direct/kprobe-direct.tc b/tools/testing/selftests/ftrace/test.d/direct/kprobe-direct.tc
new file mode 100644
index 000000000000..801ecb63e84c
--- /dev/null
+++ b/tools/testing/selftests/ftrace/test.d/direct/kprobe-direct.tc
@@ -0,0 +1,84 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+# description: Test ftrace direct functions against kprobes
+
+rmmod ftrace-direct ||:
+if ! modprobe ftrace-direct ; then
+ echo "No ftrace-direct sample module - please build with CONFIG_SAMPLE_FTRACE_DIRECT=m"
+ exit_unresolved;
+fi
+
+if [ ! -f kprobe_events ]; then
+ echo "No kprobe_events file -please build CONFIG_KPROBE_EVENTS"
+ exit_unresolved;
+fi
+
+echo "Let the module run a little"
+sleep 1
+
+grep -q "my_direct_func: waking up" trace
+
+rmmod ftrace-direct
+
+echo 'p:kwake wake_up_process task=$arg1' > kprobe_events
+
+start_direct() {
+ echo > trace
+ modprobe ftrace-direct
+ sleep 1
+ grep -q "my_direct_func: waking up" trace
+}
+
+stop_direct() {
+ rmmod ftrace-direct
+}
+
+enable_probe() {
+ echo > trace
+ echo 1 > events/kprobes/kwake/enable
+ sleep 1
+ grep -q "kwake:" trace
+}
+
+disable_probe() {
+ echo 0 > events/kprobes/kwake/enable
+}
+
+test_kprobes() {
+ # probe -> direct -> no direct > no probe
+ enable_probe
+ start_direct
+ stop_direct
+ disable_probe
+
+ # probe -> direct -> no probe > no direct
+ enable_probe
+ start_direct
+ disable_probe
+ stop_direct
+
+ # direct -> probe -> no probe > no direct
+ start_direct
+ enable_probe
+ disable_probe
+ stop_direct
+
+ # direct -> probe -> no direct > no noprobe
+ start_direct
+ enable_probe
+ stop_direct
+ disable_probe
+}
+
+test_kprobes
+
+# Now do this with a second registered direct function
+echo "Running with another ftrace direct function"
+
+modprobe ftrace-direct-too
+
+test_kprobes
+
+rmmod ftrace-direct-too
+
+echo > kprobe_events
diff --git a/tools/testing/selftests/livepatch/Makefile b/tools/testing/selftests/livepatch/Makefile
index 1cf40a9e7185..3876d8d62494 100644
--- a/tools/testing/selftests/livepatch/Makefile
+++ b/tools/testing/selftests/livepatch/Makefile
@@ -5,6 +5,7 @@ TEST_PROGS := \
test-livepatch.sh \
test-callbacks.sh \
test-shadow-vars.sh \
- test-state.sh
+ test-state.sh \
+ test-ftrace.sh
include ../lib.mk
diff --git a/tools/testing/selftests/livepatch/functions.sh b/tools/testing/selftests/livepatch/functions.sh
index 79b0affd21fb..31eb09e38729 100644
--- a/tools/testing/selftests/livepatch/functions.sh
+++ b/tools/testing/selftests/livepatch/functions.sh
@@ -29,29 +29,45 @@ function die() {
exit 1
}
-function push_dynamic_debug() {
- DYNAMIC_DEBUG=$(grep '^kernel/livepatch' /sys/kernel/debug/dynamic_debug/control | \
- awk -F'[: ]' '{print "file " $1 " line " $2 " " $4}')
+function push_config() {
+ DYNAMIC_DEBUG=$(grep '^kernel/livepatch' /sys/kernel/debug/dynamic_debug/control | \
+ awk -F'[: ]' '{print "file " $1 " line " $2 " " $4}')
+ FTRACE_ENABLED=$(sysctl --values kernel.ftrace_enabled)
}
-function pop_dynamic_debug() {
+function pop_config() {
if [[ -n "$DYNAMIC_DEBUG" ]]; then
echo -n "$DYNAMIC_DEBUG" > /sys/kernel/debug/dynamic_debug/control
fi
+ if [[ -n "$FTRACE_ENABLED" ]]; then
+ sysctl kernel.ftrace_enabled="$FTRACE_ENABLED" &> /dev/null
+ fi
}
-# set_dynamic_debug() - save the current dynamic debug config and tweak
-# it for the self-tests. Set a script exit trap
-# that restores the original config.
function set_dynamic_debug() {
- push_dynamic_debug
- trap pop_dynamic_debug EXIT INT TERM HUP
cat <<-EOF > /sys/kernel/debug/dynamic_debug/control
file kernel/livepatch/* +p
func klp_try_switch_task -p
EOF
}
+function set_ftrace_enabled() {
+ local sysctl="$1"
+ result=$(sysctl kernel.ftrace_enabled="$1" 2>&1 | paste --serial --delimiters=' ')
+ echo "livepatch: $result" > /dev/kmsg
+}
+
+# setup_config - save the current config and set a script exit trap that
+# restores the original config. Setup the dynamic debug
+# for verbose livepatching output and turn on
+# the ftrace_enabled sysctl.
+function setup_config() {
+ push_config
+ set_dynamic_debug
+ set_ftrace_enabled 1
+ trap pop_config EXIT INT TERM HUP
+}
+
# loop_until(cmd) - loop a command until it is successful or $MAX_RETRIES,
# sleep $RETRY_INTERVAL between attempts
# cmd - command and its arguments to run
diff --git a/tools/testing/selftests/livepatch/test-callbacks.sh b/tools/testing/selftests/livepatch/test-callbacks.sh
index e97a9dcb73c7..a35289b13c9c 100755
--- a/tools/testing/selftests/livepatch/test-callbacks.sh
+++ b/tools/testing/selftests/livepatch/test-callbacks.sh
@@ -9,7 +9,7 @@ MOD_LIVEPATCH2=test_klp_callbacks_demo2
MOD_TARGET=test_klp_callbacks_mod
MOD_TARGET_BUSY=test_klp_callbacks_busy
-set_dynamic_debug
+setup_config
# TEST: target module before livepatch
diff --git a/tools/testing/selftests/livepatch/test-ftrace.sh b/tools/testing/selftests/livepatch/test-ftrace.sh
new file mode 100755
index 000000000000..e2a76887f40a
--- /dev/null
+++ b/tools/testing/selftests/livepatch/test-ftrace.sh
@@ -0,0 +1,65 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (C) 2019 Joe Lawrence <joe.lawrence@redhat.com>
+
+. $(dirname $0)/functions.sh
+
+MOD_LIVEPATCH=test_klp_livepatch
+
+setup_config
+
+
+# TEST: livepatch interaction with ftrace_enabled sysctl
+# - turn ftrace_enabled OFF and verify livepatches can't load
+# - turn ftrace_enabled ON and verify livepatch can load
+# - verify that ftrace_enabled can't be turned OFF while a livepatch is loaded
+
+echo -n "TEST: livepatch interaction with ftrace_enabled sysctl ... "
+dmesg -C
+
+set_ftrace_enabled 0
+load_failing_mod $MOD_LIVEPATCH
+
+set_ftrace_enabled 1
+load_lp $MOD_LIVEPATCH
+if [[ "$(cat /proc/cmdline)" != "$MOD_LIVEPATCH: this has been live patched" ]] ; then
+ echo -e "FAIL\n\n"
+ die "livepatch kselftest(s) failed"
+fi
+
+set_ftrace_enabled 0
+if [[ "$(cat /proc/cmdline)" != "$MOD_LIVEPATCH: this has been live patched" ]] ; then
+ echo -e "FAIL\n\n"
+ die "livepatch kselftest(s) failed"
+fi
+disable_lp $MOD_LIVEPATCH
+unload_lp $MOD_LIVEPATCH
+
+check_result "livepatch: kernel.ftrace_enabled = 0
+% modprobe $MOD_LIVEPATCH
+livepatch: enabling patch '$MOD_LIVEPATCH'
+livepatch: '$MOD_LIVEPATCH': initializing patching transition
+livepatch: failed to register ftrace handler for function 'cmdline_proc_show' (-16)
+livepatch: failed to patch object 'vmlinux'
+livepatch: failed to enable patch '$MOD_LIVEPATCH'
+livepatch: '$MOD_LIVEPATCH': canceling patching transition, going to unpatch
+livepatch: '$MOD_LIVEPATCH': completing unpatching transition
+livepatch: '$MOD_LIVEPATCH': unpatching complete
+modprobe: ERROR: could not insert '$MOD_LIVEPATCH': Device or resource busy
+livepatch: kernel.ftrace_enabled = 1
+% modprobe $MOD_LIVEPATCH
+livepatch: enabling patch '$MOD_LIVEPATCH'
+livepatch: '$MOD_LIVEPATCH': initializing patching transition
+livepatch: '$MOD_LIVEPATCH': starting patching transition
+livepatch: '$MOD_LIVEPATCH': completing patching transition
+livepatch: '$MOD_LIVEPATCH': patching complete
+livepatch: sysctl: setting key \"kernel.ftrace_enabled\": Device or resource busy kernel.ftrace_enabled = 0
+% echo 0 > /sys/kernel/livepatch/$MOD_LIVEPATCH/enabled
+livepatch: '$MOD_LIVEPATCH': initializing unpatching transition
+livepatch: '$MOD_LIVEPATCH': starting unpatching transition
+livepatch: '$MOD_LIVEPATCH': completing unpatching transition
+livepatch: '$MOD_LIVEPATCH': unpatching complete
+% rmmod $MOD_LIVEPATCH"
+
+
+exit 0
diff --git a/tools/testing/selftests/livepatch/test-livepatch.sh b/tools/testing/selftests/livepatch/test-livepatch.sh
index f05268aea859..493e3df415a1 100755
--- a/tools/testing/selftests/livepatch/test-livepatch.sh
+++ b/tools/testing/selftests/livepatch/test-livepatch.sh
@@ -7,7 +7,7 @@
MOD_LIVEPATCH=test_klp_livepatch
MOD_REPLACE=test_klp_atomic_replace
-set_dynamic_debug
+setup_config
# TEST: basic function patching
diff --git a/tools/testing/selftests/livepatch/test-shadow-vars.sh b/tools/testing/selftests/livepatch/test-shadow-vars.sh
index 04a37831e204..1aae73299114 100755
--- a/tools/testing/selftests/livepatch/test-shadow-vars.sh
+++ b/tools/testing/selftests/livepatch/test-shadow-vars.sh
@@ -6,7 +6,7 @@
MOD_TEST=test_klp_shadow_vars
-set_dynamic_debug
+setup_config
# TEST: basic shadow variable API
diff --git a/tools/testing/selftests/memfd/memfd_test.c b/tools/testing/selftests/memfd/memfd_test.c
index c67d32eeb668..334a7eea2004 100644
--- a/tools/testing/selftests/memfd/memfd_test.c
+++ b/tools/testing/selftests/memfd/memfd_test.c
@@ -290,6 +290,40 @@ static void mfd_assert_read_shared(int fd)
munmap(p, mfd_def_size);
}
+static void mfd_assert_fork_private_write(int fd)
+{
+ int *p;
+ pid_t pid;
+
+ p = mmap(NULL,
+ mfd_def_size,
+ PROT_READ | PROT_WRITE,
+ MAP_PRIVATE,
+ fd,
+ 0);
+ if (p == MAP_FAILED) {
+ printf("mmap() failed: %m\n");
+ abort();
+ }
+
+ p[0] = 22;
+
+ pid = fork();
+ if (pid == 0) {
+ p[0] = 33;
+ exit(0);
+ } else {
+ waitpid(pid, NULL, 0);
+
+ if (p[0] != 22) {
+ printf("MAP_PRIVATE copy-on-write failed: %m\n");
+ abort();
+ }
+ }
+
+ munmap(p, mfd_def_size);
+}
+
static void mfd_assert_write(int fd)
{
ssize_t l;
@@ -760,6 +794,8 @@ static void test_seal_future_write(void)
mfd_assert_read_shared(fd2);
mfd_fail_write(fd2);
+ mfd_assert_fork_private_write(fd);
+
munmap(p, mfd_def_size);
close(fd2);
close(fd);
diff --git a/tools/testing/selftests/net/forwarding/tc_common.sh b/tools/testing/selftests/net/forwarding/tc_common.sh
index d93589bd4d1d..64f652633585 100644
--- a/tools/testing/selftests/net/forwarding/tc_common.sh
+++ b/tools/testing/selftests/net/forwarding/tc_common.sh
@@ -3,16 +3,42 @@
CHECK_TC="yes"
+# Can be overridden by the configuration file. See lib.sh
+TC_HIT_TIMEOUT=${TC_HIT_TIMEOUT:=1000} # ms
+
+__tc_check_packets()
+{
+ local id=$1
+ local handle=$2
+ local count=$3
+ local operator=$4
+
+ start_time="$(date -u +%s%3N)"
+ while true
+ do
+ cmd_jq "tc -j -s filter show $id" \
+ ".[] | select(.options.handle == $handle) | \
+ select(.options.actions[0].stats.packets $operator $count)" \
+ &> /dev/null
+ ret=$?
+ if [[ $ret -eq 0 ]]; then
+ return $ret
+ fi
+ current_time="$(date -u +%s%3N)"
+ diff=$(expr $current_time - $start_time)
+ if [ "$diff" -gt "$TC_HIT_TIMEOUT" ]; then
+ return 1
+ fi
+ done
+}
+
tc_check_packets()
{
local id=$1
local handle=$2
local count=$3
- cmd_jq "tc -j -s filter show $id" \
- ".[] | select(.options.handle == $handle) | \
- select(.options.actions[0].stats.packets == $count)" \
- &> /dev/null
+ __tc_check_packets "$id" "$handle" "$count" "=="
}
tc_check_packets_hitting()
@@ -20,8 +46,5 @@ tc_check_packets_hitting()
local id=$1
local handle=$2
- cmd_jq "tc -j -s filter show $id" \
- ".[] | select(.options.handle == $handle) | \
- select(.options.actions[0].stats.packets > 0)" \
- &> /dev/null
+ __tc_check_packets "$id" "$handle" 0 ">"
}
diff --git a/tools/testing/selftests/net/pmtu.sh b/tools/testing/selftests/net/pmtu.sh
index ab367e75f095..d697815d2785 100755
--- a/tools/testing/selftests/net/pmtu.sh
+++ b/tools/testing/selftests/net/pmtu.sh
@@ -1249,8 +1249,7 @@ test_list_flush_ipv4_exception() {
done
run_cmd ${ns_a} ping -q -M want -i 0.1 -c 2 -s 1800 "${dst2}"
- # Each exception is printed as two lines
- if [ "$(${ns_a} ip route list cache | wc -l)" -ne 202 ]; then
+ if [ "$(${ns_a} ip -oneline route list cache | wc -l)" -ne 101 ]; then
err " can't list cached exceptions"
fail=1
fi
@@ -1300,7 +1299,7 @@ test_list_flush_ipv6_exception() {
run_cmd ${ns_a} ping -q -M want -i 0.1 -w 1 -s 1800 "${dst_prefix1}${i}"
done
run_cmd ${ns_a} ping -q -M want -i 0.1 -w 1 -s 1800 "${dst2}"
- if [ "$(${ns_a} ip -6 route list cache | wc -l)" -ne 101 ]; then
+ if [ "$(${ns_a} ip -oneline -6 route list cache | wc -l)" -ne 101 ]; then
err " can't list cached exceptions"
fail=1
fi
diff --git a/tools/testing/selftests/net/tls.c b/tools/testing/selftests/net/tls.c
index 1c8f194d6556..46abcae47dee 100644
--- a/tools/testing/selftests/net/tls.c
+++ b/tools/testing/selftests/net/tls.c
@@ -268,6 +268,38 @@ TEST_F(tls, sendmsg_single)
EXPECT_EQ(memcmp(buf, test_str, send_len), 0);
}
+#define MAX_FRAGS 64
+#define SEND_LEN 13
+TEST_F(tls, sendmsg_fragmented)
+{
+ char const *test_str = "test_sendmsg";
+ char buf[SEND_LEN * MAX_FRAGS];
+ struct iovec vec[MAX_FRAGS];
+ struct msghdr msg;
+ int i, frags;
+
+ for (frags = 1; frags <= MAX_FRAGS; frags++) {
+ for (i = 0; i < frags; i++) {
+ vec[i].iov_base = (char *)test_str;
+ vec[i].iov_len = SEND_LEN;
+ }
+
+ memset(&msg, 0, sizeof(struct msghdr));
+ msg.msg_iov = vec;
+ msg.msg_iovlen = frags;
+
+ EXPECT_EQ(sendmsg(self->fd, &msg, 0), SEND_LEN * frags);
+ EXPECT_EQ(recv(self->cfd, buf, SEND_LEN * frags, MSG_WAITALL),
+ SEND_LEN * frags);
+
+ for (i = 0; i < frags; i++)
+ EXPECT_EQ(memcmp(buf + SEND_LEN * i,
+ test_str, SEND_LEN), 0);
+ }
+}
+#undef MAX_FRAGS
+#undef SEND_LEN
+
TEST_F(tls, sendmsg_large)
{
void *mem = malloc(16384);
@@ -694,6 +726,34 @@ TEST_F(tls, recv_lowat)
EXPECT_EQ(memcmp(send_mem, recv_mem + 10, 5), 0);
}
+TEST_F(tls, recv_rcvbuf)
+{
+ char send_mem[4096];
+ char recv_mem[4096];
+ int rcv_buf = 1024;
+
+ memset(send_mem, 0x1c, sizeof(send_mem));
+
+ EXPECT_EQ(setsockopt(self->cfd, SOL_SOCKET, SO_RCVBUF,
+ &rcv_buf, sizeof(rcv_buf)), 0);
+
+ EXPECT_EQ(send(self->fd, send_mem, 512, 0), 512);
+ memset(recv_mem, 0, sizeof(recv_mem));
+ EXPECT_EQ(recv(self->cfd, recv_mem, sizeof(recv_mem), 0), 512);
+ EXPECT_EQ(memcmp(send_mem, recv_mem, 512), 0);
+
+ if (self->notls)
+ return;
+
+ EXPECT_EQ(send(self->fd, send_mem, 4096, 0), 4096);
+ memset(recv_mem, 0, sizeof(recv_mem));
+ EXPECT_EQ(recv(self->cfd, recv_mem, sizeof(recv_mem), 0), -1);
+ EXPECT_EQ(errno, EMSGSIZE);
+
+ EXPECT_EQ(recv(self->cfd, recv_mem, sizeof(recv_mem), 0), -1);
+ EXPECT_EQ(errno, EMSGSIZE);
+}
+
TEST_F(tls, bidir)
{
char const *test_str = "test_read";
diff --git a/tools/testing/selftests/powerpc/include/utils.h b/tools/testing/selftests/powerpc/include/utils.h
index 0e2b2e6284ac..e089a0c30d9a 100644
--- a/tools/testing/selftests/powerpc/include/utils.h
+++ b/tools/testing/selftests/powerpc/include/utils.h
@@ -34,6 +34,7 @@ int pick_online_cpu(void);
int read_debugfs_file(char *debugfs_file, int *result);
int write_debugfs_file(char *debugfs_file, int result);
+int read_sysfs_file(char *debugfs_file, char *result, size_t result_size);
void set_dscr(unsigned long val);
int perf_event_open_counter(unsigned int type,
unsigned long config, int group_fd);
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/Makefile b/tools/testing/selftests/powerpc/pmu/ebb/Makefile
index 23f4caf48ffc..417306353e07 100644
--- a/tools/testing/selftests/powerpc/pmu/ebb/Makefile
+++ b/tools/testing/selftests/powerpc/pmu/ebb/Makefile
@@ -1,4 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
+include ../../../../../../scripts/Kbuild.include
+
noarg:
$(MAKE) -C ../../
@@ -6,7 +8,10 @@ noarg:
CFLAGS += -m64
# Toolchains may build PIE by default which breaks the assembly
-LDFLAGS += -no-pie
+no-pie-option := $(call try-run, echo 'int main() { return 0; }' | \
+ $(CC) -Werror $(KBUILD_CPPFLAGS) $(CC_OPTION_CFLAGS) -no-pie -x c - -o "$$TMP", -no-pie)
+
+LDFLAGS += $(no-pie-option)
TEST_GEN_PROGS := reg_access_test event_attributes_test cycles_test \
cycles_with_freeze_test pmc56_overflow_test \
diff --git a/tools/testing/selftests/powerpc/ptrace/perf-hwbreak.c b/tools/testing/selftests/powerpc/ptrace/perf-hwbreak.c
index 200337daec42..c1f324afdbf3 100644
--- a/tools/testing/selftests/powerpc/ptrace/perf-hwbreak.c
+++ b/tools/testing/selftests/powerpc/ptrace/perf-hwbreak.c
@@ -148,6 +148,121 @@ static int runtestsingle(int readwriteflag, int exclude_user, int arraytest)
return 0;
}
+static int runtest_dar_outside(void)
+{
+ void *target;
+ volatile __u16 temp16;
+ volatile __u64 temp64;
+ struct perf_event_attr attr;
+ int break_fd;
+ unsigned long long breaks;
+ int fail = 0;
+ size_t res;
+
+ target = malloc(8);
+ if (!target) {
+ perror("malloc failed");
+ exit(EXIT_FAILURE);
+ }
+
+ /* setup counters */
+ memset(&attr, 0, sizeof(attr));
+ attr.disabled = 1;
+ attr.type = PERF_TYPE_BREAKPOINT;
+ attr.exclude_kernel = 1;
+ attr.exclude_hv = 1;
+ attr.exclude_guest = 1;
+ attr.bp_type = HW_BREAKPOINT_RW;
+ /* watch middle half of target array */
+ attr.bp_addr = (__u64)(target + 2);
+ attr.bp_len = 4;
+ break_fd = sys_perf_event_open(&attr, 0, -1, -1, 0);
+ if (break_fd < 0) {
+ free(target);
+ perror("sys_perf_event_open");
+ exit(EXIT_FAILURE);
+ }
+
+ /* Shouldn't hit. */
+ ioctl(break_fd, PERF_EVENT_IOC_RESET);
+ ioctl(break_fd, PERF_EVENT_IOC_ENABLE);
+ temp16 = *((__u16 *)target);
+ *((__u16 *)target) = temp16;
+ ioctl(break_fd, PERF_EVENT_IOC_DISABLE);
+ res = read(break_fd, &breaks, sizeof(unsigned long long));
+ assert(res == sizeof(unsigned long long));
+ if (breaks == 0) {
+ printf("TESTED: No overlap\n");
+ } else {
+ printf("FAILED: No overlap: %lld != 0\n", breaks);
+ fail = 1;
+ }
+
+ /* Hit */
+ ioctl(break_fd, PERF_EVENT_IOC_RESET);
+ ioctl(break_fd, PERF_EVENT_IOC_ENABLE);
+ temp16 = *((__u16 *)(target + 1));
+ *((__u16 *)(target + 1)) = temp16;
+ ioctl(break_fd, PERF_EVENT_IOC_DISABLE);
+ res = read(break_fd, &breaks, sizeof(unsigned long long));
+ assert(res == sizeof(unsigned long long));
+ if (breaks == 2) {
+ printf("TESTED: Partial overlap\n");
+ } else {
+ printf("FAILED: Partial overlap: %lld != 2\n", breaks);
+ fail = 1;
+ }
+
+ /* Hit */
+ ioctl(break_fd, PERF_EVENT_IOC_RESET);
+ ioctl(break_fd, PERF_EVENT_IOC_ENABLE);
+ temp16 = *((__u16 *)(target + 5));
+ *((__u16 *)(target + 5)) = temp16;
+ ioctl(break_fd, PERF_EVENT_IOC_DISABLE);
+ res = read(break_fd, &breaks, sizeof(unsigned long long));
+ assert(res == sizeof(unsigned long long));
+ if (breaks == 2) {
+ printf("TESTED: Partial overlap\n");
+ } else {
+ printf("FAILED: Partial overlap: %lld != 2\n", breaks);
+ fail = 1;
+ }
+
+ /* Shouldn't Hit */
+ ioctl(break_fd, PERF_EVENT_IOC_RESET);
+ ioctl(break_fd, PERF_EVENT_IOC_ENABLE);
+ temp16 = *((__u16 *)(target + 6));
+ *((__u16 *)(target + 6)) = temp16;
+ ioctl(break_fd, PERF_EVENT_IOC_DISABLE);
+ res = read(break_fd, &breaks, sizeof(unsigned long long));
+ assert(res == sizeof(unsigned long long));
+ if (breaks == 0) {
+ printf("TESTED: No overlap\n");
+ } else {
+ printf("FAILED: No overlap: %lld != 0\n", breaks);
+ fail = 1;
+ }
+
+ /* Hit */
+ ioctl(break_fd, PERF_EVENT_IOC_RESET);
+ ioctl(break_fd, PERF_EVENT_IOC_ENABLE);
+ temp64 = *((__u64 *)target);
+ *((__u64 *)target) = temp64;
+ ioctl(break_fd, PERF_EVENT_IOC_DISABLE);
+ res = read(break_fd, &breaks, sizeof(unsigned long long));
+ assert(res == sizeof(unsigned long long));
+ if (breaks == 2) {
+ printf("TESTED: Full overlap\n");
+ } else {
+ printf("FAILED: Full overlap: %lld != 2\n", breaks);
+ fail = 1;
+ }
+
+ free(target);
+ close(break_fd);
+ return fail;
+}
+
static int runtest(void)
{
int rwflag;
@@ -172,7 +287,9 @@ static int runtest(void)
return ret;
}
}
- return 0;
+
+ ret = runtest_dar_outside();
+ return ret;
}
diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-hwbreak.c b/tools/testing/selftests/powerpc/ptrace/ptrace-hwbreak.c
index 3066d310f32b..7deedbc16b0b 100644
--- a/tools/testing/selftests/powerpc/ptrace/ptrace-hwbreak.c
+++ b/tools/testing/selftests/powerpc/ptrace/ptrace-hwbreak.c
@@ -22,321 +22,486 @@
#include <sys/wait.h>
#include "ptrace.h"
-/* Breakpoint access modes */
-enum {
- BP_X = 1,
- BP_RW = 2,
- BP_W = 4,
-};
-
-static pid_t child_pid;
-static struct ppc_debug_info dbginfo;
-
-static void get_dbginfo(void)
-{
- int ret;
+#define SPRN_PVR 0x11F
+#define PVR_8xx 0x00500000
- ret = ptrace(PPC_PTRACE_GETHWDBGINFO, child_pid, NULL, &dbginfo);
- if (ret) {
- perror("Can't get breakpoint info\n");
- exit(-1);
- }
-}
+bool is_8xx;
-static bool hwbreak_present(void)
-{
- return (dbginfo.num_data_bps != 0);
-}
+/*
+ * Use volatile on all global var so that compiler doesn't
+ * optimise their load/stores. Otherwise selftest can fail.
+ */
+static volatile __u64 glvar;
-static bool dawr_present(void)
-{
- return !!(dbginfo.features & PPC_DEBUG_FEATURE_DATA_BP_DAWR);
-}
+#define DAWR_MAX_LEN 512
+static volatile __u8 big_var[DAWR_MAX_LEN] __attribute__((aligned(512)));
-static void set_breakpoint_addr(void *addr)
-{
- int ret;
+#define A_LEN 6
+#define B_LEN 6
+struct gstruct {
+ __u8 a[A_LEN]; /* double word aligned */
+ __u8 b[B_LEN]; /* double word unaligned */
+};
+static volatile struct gstruct gstruct __attribute__((aligned(512)));
- ret = ptrace(PTRACE_SET_DEBUGREG, child_pid, 0, addr);
- if (ret) {
- perror("Can't set breakpoint addr\n");
- exit(-1);
- }
-}
-static int set_hwbreakpoint_addr(void *addr, int range)
+static void get_dbginfo(pid_t child_pid, struct ppc_debug_info *dbginfo)
{
- int ret;
-
- struct ppc_hw_breakpoint info;
-
- info.version = 1;
- info.trigger_type = PPC_BREAKPOINT_TRIGGER_RW;
- info.addr_mode = PPC_BREAKPOINT_MODE_EXACT;
- if (range > 0)
- info.addr_mode = PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE;
- info.condition_mode = PPC_BREAKPOINT_CONDITION_NONE;
- info.addr = (__u64)addr;
- info.addr2 = (__u64)addr + range;
- info.condition_value = 0;
-
- ret = ptrace(PPC_PTRACE_SETHWDEBUG, child_pid, 0, &info);
- if (ret < 0) {
- perror("Can't set breakpoint\n");
+ if (ptrace(PPC_PTRACE_GETHWDBGINFO, child_pid, NULL, dbginfo)) {
+ perror("Can't get breakpoint info");
exit(-1);
}
- return ret;
}
-static int del_hwbreakpoint_addr(int watchpoint_handle)
+static bool dawr_present(struct ppc_debug_info *dbginfo)
{
- int ret;
-
- ret = ptrace(PPC_PTRACE_DELHWDEBUG, child_pid, 0, watchpoint_handle);
- if (ret < 0) {
- perror("Can't delete hw breakpoint\n");
- exit(-1);
- }
- return ret;
+ return !!(dbginfo->features & PPC_DEBUG_FEATURE_DATA_BP_DAWR);
}
-#define DAWR_LENGTH_MAX 512
-
-/* Dummy variables to test read/write accesses */
-static unsigned long long
- dummy_array[DAWR_LENGTH_MAX / sizeof(unsigned long long)]
- __attribute__((aligned(512)));
-static unsigned long long *dummy_var = dummy_array;
-
static void write_var(int len)
{
- long long *plval;
- char *pcval;
- short *psval;
- int *pival;
+ __u8 *pcvar;
+ __u16 *psvar;
+ __u32 *pivar;
+ __u64 *plvar;
switch (len) {
case 1:
- pcval = (char *)dummy_var;
- *pcval = 0xff;
+ pcvar = (__u8 *)&glvar;
+ *pcvar = 0xff;
break;
case 2:
- psval = (short *)dummy_var;
- *psval = 0xffff;
+ psvar = (__u16 *)&glvar;
+ *psvar = 0xffff;
break;
case 4:
- pival = (int *)dummy_var;
- *pival = 0xffffffff;
+ pivar = (__u32 *)&glvar;
+ *pivar = 0xffffffff;
break;
case 8:
- plval = (long long *)dummy_var;
- *plval = 0xffffffffffffffffLL;
+ plvar = (__u64 *)&glvar;
+ *plvar = 0xffffffffffffffffLL;
break;
}
}
static void read_var(int len)
{
- char cval __attribute__((unused));
- short sval __attribute__((unused));
- int ival __attribute__((unused));
- long long lval __attribute__((unused));
+ __u8 cvar __attribute__((unused));
+ __u16 svar __attribute__((unused));
+ __u32 ivar __attribute__((unused));
+ __u64 lvar __attribute__((unused));
switch (len) {
case 1:
- cval = *(char *)dummy_var;
+ cvar = (__u8)glvar;
break;
case 2:
- sval = *(short *)dummy_var;
+ svar = (__u16)glvar;
break;
case 4:
- ival = *(int *)dummy_var;
+ ivar = (__u32)glvar;
break;
case 8:
- lval = *(long long *)dummy_var;
+ lvar = (__u64)glvar;
break;
}
}
-/*
- * Do the r/w accesses to trigger the breakpoints. And run
- * the usual traps.
- */
-static void trigger_tests(void)
+static void test_workload(void)
{
- int len, ret;
+ __u8 cvar __attribute__((unused));
+ __u32 ivar __attribute__((unused));
+ int len = 0;
- ret = ptrace(PTRACE_TRACEME, 0, NULL, 0);
- if (ret) {
- perror("Can't be traced?\n");
- return;
+ if (ptrace(PTRACE_TRACEME, 0, NULL, 0)) {
+ perror("Child can't be traced?");
+ exit(-1);
}
/* Wake up father so that it sets up the first test */
kill(getpid(), SIGUSR1);
- /* Test write watchpoints */
- for (len = 1; len <= sizeof(long); len <<= 1)
+ /* PTRACE_SET_DEBUGREG, WO test */
+ for (len = 1; len <= sizeof(glvar); len <<= 1)
write_var(len);
- /* Test read/write watchpoints (on read accesses) */
- for (len = 1; len <= sizeof(long); len <<= 1)
+ /* PTRACE_SET_DEBUGREG, RO test */
+ for (len = 1; len <= sizeof(glvar); len <<= 1)
read_var(len);
- /* Test when breakpoint is unset */
-
- /* Test write watchpoints */
- for (len = 1; len <= sizeof(long); len <<= 1)
- write_var(len);
+ /* PTRACE_SET_DEBUGREG, RW test */
+ for (len = 1; len <= sizeof(glvar); len <<= 1) {
+ if (rand() % 2)
+ read_var(len);
+ else
+ write_var(len);
+ }
- /* Test read/write watchpoints (on read accesses) */
- for (len = 1; len <= sizeof(long); len <<= 1)
- read_var(len);
+ /* PPC_PTRACE_SETHWDEBUG, MODE_EXACT, WO test */
+ write_var(1);
+
+ /* PPC_PTRACE_SETHWDEBUG, MODE_EXACT, RO test */
+ read_var(1);
+
+ /* PPC_PTRACE_SETHWDEBUG, MODE_EXACT, RW test */
+ if (rand() % 2)
+ write_var(1);
+ else
+ read_var(1);
+
+ /* PPC_PTRACE_SETHWDEBUG, MODE_RANGE, DW ALIGNED, WO test */
+ gstruct.a[rand() % A_LEN] = 'a';
+
+ /* PPC_PTRACE_SETHWDEBUG, MODE_RANGE, DW ALIGNED, RO test */
+ cvar = gstruct.a[rand() % A_LEN];
+
+ /* PPC_PTRACE_SETHWDEBUG, MODE_RANGE, DW ALIGNED, RW test */
+ if (rand() % 2)
+ gstruct.a[rand() % A_LEN] = 'a';
+ else
+ cvar = gstruct.a[rand() % A_LEN];
+
+ /* PPC_PTRACE_SETHWDEBUG, MODE_RANGE, DW UNALIGNED, WO test */
+ gstruct.b[rand() % B_LEN] = 'b';
+
+ /* PPC_PTRACE_SETHWDEBUG, MODE_RANGE, DW UNALIGNED, RO test */
+ cvar = gstruct.b[rand() % B_LEN];
+
+ /* PPC_PTRACE_SETHWDEBUG, MODE_RANGE, DW UNALIGNED, RW test */
+ if (rand() % 2)
+ gstruct.b[rand() % B_LEN] = 'b';
+ else
+ cvar = gstruct.b[rand() % B_LEN];
+
+ /* PPC_PTRACE_SETHWDEBUG, MODE_RANGE, DW UNALIGNED, DAR OUTSIDE, RW test */
+ if (rand() % 2)
+ *((int *)(gstruct.a + 4)) = 10;
+ else
+ ivar = *((int *)(gstruct.a + 4));
+
+ /* PPC_PTRACE_SETHWDEBUG. DAWR_MAX_LEN. RW test */
+ if (rand() % 2)
+ big_var[rand() % DAWR_MAX_LEN] = 'a';
+ else
+ cvar = big_var[rand() % DAWR_MAX_LEN];
}
-static void check_success(const char *msg)
+static void check_success(pid_t child_pid, const char *name, const char *type,
+ unsigned long saddr, int len)
{
- const char *msg2;
int status;
+ siginfo_t siginfo;
+ unsigned long eaddr = (saddr + len - 1) | 0x7;
+
+ saddr &= ~0x7;
/* Wait for the child to SIGTRAP */
wait(&status);
- msg2 = "Failed";
+ ptrace(PTRACE_GETSIGINFO, child_pid, NULL, &siginfo);
- if (WIFSTOPPED(status) && WSTOPSIG(status) == SIGTRAP) {
- msg2 = "Child process hit the breakpoint";
+ if (!WIFSTOPPED(status) || WSTOPSIG(status) != SIGTRAP ||
+ (unsigned long)siginfo.si_addr < saddr ||
+ (unsigned long)siginfo.si_addr > eaddr) {
+ printf("%s, %s, len: %d: Fail\n", name, type, len);
+ exit(-1);
}
- printf("%s Result: [%s]\n", msg, msg2);
+ printf("%s, %s, len: %d: Ok\n", name, type, len);
+
+ if (!is_8xx) {
+ /*
+ * For ptrace registered watchpoint, signal is generated
+ * before executing load/store. Singlestep the instruction
+ * and then continue the test.
+ */
+ ptrace(PTRACE_SINGLESTEP, child_pid, NULL, 0);
+ wait(NULL);
+ }
}
-static void launch_watchpoints(char *buf, int mode, int len,
- struct ppc_debug_info *dbginfo, bool dawr)
+static void ptrace_set_debugreg(pid_t child_pid, unsigned long wp_addr)
{
- const char *mode_str;
- unsigned long data = (unsigned long)(dummy_var);
- int wh, range;
-
- data &= ~0x7UL;
-
- if (mode == BP_W) {
- data |= (1UL << 1);
- mode_str = "write";
- } else {
- data |= (1UL << 0);
- data |= (1UL << 1);
- mode_str = "read";
+ if (ptrace(PTRACE_SET_DEBUGREG, child_pid, 0, wp_addr)) {
+ perror("PTRACE_SET_DEBUGREG failed");
+ exit(-1);
}
+}
- /* Set DABR_TRANSLATION bit */
- data |= (1UL << 2);
-
- /* use PTRACE_SET_DEBUGREG breakpoints */
- set_breakpoint_addr((void *)data);
- ptrace(PTRACE_CONT, child_pid, NULL, 0);
- sprintf(buf, "Test %s watchpoint with len: %d ", mode_str, len);
- check_success(buf);
- /* Unregister hw brkpoint */
- set_breakpoint_addr(NULL);
+static int ptrace_sethwdebug(pid_t child_pid, struct ppc_hw_breakpoint *info)
+{
+ int wh = ptrace(PPC_PTRACE_SETHWDEBUG, child_pid, 0, info);
- data = (data & ~7); /* remove dabr control bits */
+ if (wh <= 0) {
+ perror("PPC_PTRACE_SETHWDEBUG failed");
+ exit(-1);
+ }
+ return wh;
+}
- /* use PPC_PTRACE_SETHWDEBUG breakpoint */
- if (!(dbginfo->features & PPC_DEBUG_FEATURE_DATA_BP_RANGE))
- return; /* not supported */
- wh = set_hwbreakpoint_addr((void *)data, 0);
- ptrace(PTRACE_CONT, child_pid, NULL, 0);
- sprintf(buf, "Test %s watchpoint with len: %d ", mode_str, len);
- check_success(buf);
- /* Unregister hw brkpoint */
- del_hwbreakpoint_addr(wh);
-
- /* try a wider range */
- range = 8;
- if (dawr)
- range = 512 - ((int)data & (DAWR_LENGTH_MAX - 1));
- wh = set_hwbreakpoint_addr((void *)data, range);
- ptrace(PTRACE_CONT, child_pid, NULL, 0);
- sprintf(buf, "Test %s watchpoint with len: %d ", mode_str, len);
- check_success(buf);
- /* Unregister hw brkpoint */
- del_hwbreakpoint_addr(wh);
+static void ptrace_delhwdebug(pid_t child_pid, int wh)
+{
+ if (ptrace(PPC_PTRACE_DELHWDEBUG, child_pid, 0, wh) < 0) {
+ perror("PPC_PTRACE_DELHWDEBUG failed");
+ exit(-1);
+ }
}
-/* Set the breakpoints and check the child successfully trigger them */
-static int launch_tests(bool dawr)
+#define DABR_READ_SHIFT 0
+#define DABR_WRITE_SHIFT 1
+#define DABR_TRANSLATION_SHIFT 2
+
+static int test_set_debugreg(pid_t child_pid)
{
- char buf[1024];
- int len, i, status;
+ unsigned long wp_addr = (unsigned long)&glvar;
+ char *name = "PTRACE_SET_DEBUGREG";
+ int len;
+
+ /* PTRACE_SET_DEBUGREG, WO test*/
+ wp_addr &= ~0x7UL;
+ wp_addr |= (1UL << DABR_WRITE_SHIFT);
+ wp_addr |= (1UL << DABR_TRANSLATION_SHIFT);
+ for (len = 1; len <= sizeof(glvar); len <<= 1) {
+ ptrace_set_debugreg(child_pid, wp_addr);
+ ptrace(PTRACE_CONT, child_pid, NULL, 0);
+ check_success(child_pid, name, "WO", wp_addr, len);
+ }
- struct ppc_debug_info dbginfo;
+ /* PTRACE_SET_DEBUGREG, RO test */
+ wp_addr &= ~0x7UL;
+ wp_addr |= (1UL << DABR_READ_SHIFT);
+ wp_addr |= (1UL << DABR_TRANSLATION_SHIFT);
+ for (len = 1; len <= sizeof(glvar); len <<= 1) {
+ ptrace_set_debugreg(child_pid, wp_addr);
+ ptrace(PTRACE_CONT, child_pid, NULL, 0);
+ check_success(child_pid, name, "RO", wp_addr, len);
+ }
- i = ptrace(PPC_PTRACE_GETHWDBGINFO, child_pid, NULL, &dbginfo);
- if (i) {
- perror("Can't set breakpoint info\n");
- exit(-1);
+ /* PTRACE_SET_DEBUGREG, RW test */
+ wp_addr &= ~0x7UL;
+ wp_addr |= (1Ul << DABR_READ_SHIFT);
+ wp_addr |= (1UL << DABR_WRITE_SHIFT);
+ wp_addr |= (1UL << DABR_TRANSLATION_SHIFT);
+ for (len = 1; len <= sizeof(glvar); len <<= 1) {
+ ptrace_set_debugreg(child_pid, wp_addr);
+ ptrace(PTRACE_CONT, child_pid, NULL, 0);
+ check_success(child_pid, name, "RW", wp_addr, len);
}
- if (!(dbginfo.features & PPC_DEBUG_FEATURE_DATA_BP_RANGE))
- printf("WARNING: Kernel doesn't support PPC_PTRACE_SETHWDEBUG\n");
- /* Write watchpoint */
- for (len = 1; len <= sizeof(long); len <<= 1)
- launch_watchpoints(buf, BP_W, len, &dbginfo, dawr);
+ ptrace_set_debugreg(child_pid, 0);
+ return 0;
+}
- /* Read-Write watchpoint */
- for (len = 1; len <= sizeof(long); len <<= 1)
- launch_watchpoints(buf, BP_RW, len, &dbginfo, dawr);
+static void get_ppc_hw_breakpoint(struct ppc_hw_breakpoint *info, int type,
+ unsigned long addr, int len)
+{
+ info->version = 1;
+ info->trigger_type = type;
+ info->condition_mode = PPC_BREAKPOINT_CONDITION_NONE;
+ info->addr = (__u64)addr;
+ info->addr2 = (__u64)addr + len;
+ info->condition_value = 0;
+ if (!len)
+ info->addr_mode = PPC_BREAKPOINT_MODE_EXACT;
+ else
+ info->addr_mode = PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE;
+}
+static void test_sethwdebug_exact(pid_t child_pid)
+{
+ struct ppc_hw_breakpoint info;
+ unsigned long wp_addr = (unsigned long)&glvar;
+ char *name = "PPC_PTRACE_SETHWDEBUG, MODE_EXACT";
+ int len = 1; /* hardcoded in kernel */
+ int wh;
+
+ /* PPC_PTRACE_SETHWDEBUG, MODE_EXACT, WO test */
+ get_ppc_hw_breakpoint(&info, PPC_BREAKPOINT_TRIGGER_WRITE, wp_addr, 0);
+ wh = ptrace_sethwdebug(child_pid, &info);
ptrace(PTRACE_CONT, child_pid, NULL, 0);
+ check_success(child_pid, name, "WO", wp_addr, len);
+ ptrace_delhwdebug(child_pid, wh);
- /*
- * Now we have unregistered the breakpoint, access by child
- * should not cause SIGTRAP.
- */
+ /* PPC_PTRACE_SETHWDEBUG, MODE_EXACT, RO test */
+ get_ppc_hw_breakpoint(&info, PPC_BREAKPOINT_TRIGGER_READ, wp_addr, 0);
+ wh = ptrace_sethwdebug(child_pid, &info);
+ ptrace(PTRACE_CONT, child_pid, NULL, 0);
+ check_success(child_pid, name, "RO", wp_addr, len);
+ ptrace_delhwdebug(child_pid, wh);
- wait(&status);
+ /* PPC_PTRACE_SETHWDEBUG, MODE_EXACT, RW test */
+ get_ppc_hw_breakpoint(&info, PPC_BREAKPOINT_TRIGGER_RW, wp_addr, 0);
+ wh = ptrace_sethwdebug(child_pid, &info);
+ ptrace(PTRACE_CONT, child_pid, NULL, 0);
+ check_success(child_pid, name, "RW", wp_addr, len);
+ ptrace_delhwdebug(child_pid, wh);
+}
- if (WIFSTOPPED(status) && WSTOPSIG(status) == SIGTRAP) {
- printf("FAIL: Child process hit the breakpoint, which is not expected\n");
- ptrace(PTRACE_CONT, child_pid, NULL, 0);
- return TEST_FAIL;
- }
+static void test_sethwdebug_range_aligned(pid_t child_pid)
+{
+ struct ppc_hw_breakpoint info;
+ unsigned long wp_addr;
+ char *name = "PPC_PTRACE_SETHWDEBUG, MODE_RANGE, DW ALIGNED";
+ int len;
+ int wh;
+
+ /* PPC_PTRACE_SETHWDEBUG, MODE_RANGE, DW ALIGNED, WO test */
+ wp_addr = (unsigned long)&gstruct.a;
+ len = A_LEN;
+ get_ppc_hw_breakpoint(&info, PPC_BREAKPOINT_TRIGGER_WRITE, wp_addr, len);
+ wh = ptrace_sethwdebug(child_pid, &info);
+ ptrace(PTRACE_CONT, child_pid, NULL, 0);
+ check_success(child_pid, name, "WO", wp_addr, len);
+ ptrace_delhwdebug(child_pid, wh);
+
+ /* PPC_PTRACE_SETHWDEBUG, MODE_RANGE, DW ALIGNED, RO test */
+ wp_addr = (unsigned long)&gstruct.a;
+ len = A_LEN;
+ get_ppc_hw_breakpoint(&info, PPC_BREAKPOINT_TRIGGER_READ, wp_addr, len);
+ wh = ptrace_sethwdebug(child_pid, &info);
+ ptrace(PTRACE_CONT, child_pid, NULL, 0);
+ check_success(child_pid, name, "RO", wp_addr, len);
+ ptrace_delhwdebug(child_pid, wh);
+
+ /* PPC_PTRACE_SETHWDEBUG, MODE_RANGE, DW ALIGNED, RW test */
+ wp_addr = (unsigned long)&gstruct.a;
+ len = A_LEN;
+ get_ppc_hw_breakpoint(&info, PPC_BREAKPOINT_TRIGGER_RW, wp_addr, len);
+ wh = ptrace_sethwdebug(child_pid, &info);
+ ptrace(PTRACE_CONT, child_pid, NULL, 0);
+ check_success(child_pid, name, "RW", wp_addr, len);
+ ptrace_delhwdebug(child_pid, wh);
+}
- if (WIFEXITED(status))
- printf("Child exited normally\n");
+static void test_sethwdebug_range_unaligned(pid_t child_pid)
+{
+ struct ppc_hw_breakpoint info;
+ unsigned long wp_addr;
+ char *name = "PPC_PTRACE_SETHWDEBUG, MODE_RANGE, DW UNALIGNED";
+ int len;
+ int wh;
+
+ /* PPC_PTRACE_SETHWDEBUG, MODE_RANGE, DW UNALIGNED, WO test */
+ wp_addr = (unsigned long)&gstruct.b;
+ len = B_LEN;
+ get_ppc_hw_breakpoint(&info, PPC_BREAKPOINT_TRIGGER_WRITE, wp_addr, len);
+ wh = ptrace_sethwdebug(child_pid, &info);
+ ptrace(PTRACE_CONT, child_pid, NULL, 0);
+ check_success(child_pid, name, "WO", wp_addr, len);
+ ptrace_delhwdebug(child_pid, wh);
+
+ /* PPC_PTRACE_SETHWDEBUG, MODE_RANGE, DW UNALIGNED, RO test */
+ wp_addr = (unsigned long)&gstruct.b;
+ len = B_LEN;
+ get_ppc_hw_breakpoint(&info, PPC_BREAKPOINT_TRIGGER_READ, wp_addr, len);
+ wh = ptrace_sethwdebug(child_pid, &info);
+ ptrace(PTRACE_CONT, child_pid, NULL, 0);
+ check_success(child_pid, name, "RO", wp_addr, len);
+ ptrace_delhwdebug(child_pid, wh);
+
+ /* PPC_PTRACE_SETHWDEBUG, MODE_RANGE, DW UNALIGNED, RW test */
+ wp_addr = (unsigned long)&gstruct.b;
+ len = B_LEN;
+ get_ppc_hw_breakpoint(&info, PPC_BREAKPOINT_TRIGGER_RW, wp_addr, len);
+ wh = ptrace_sethwdebug(child_pid, &info);
+ ptrace(PTRACE_CONT, child_pid, NULL, 0);
+ check_success(child_pid, name, "RW", wp_addr, len);
+ ptrace_delhwdebug(child_pid, wh);
- return TEST_PASS;
+}
+
+static void test_sethwdebug_range_unaligned_dar(pid_t child_pid)
+{
+ struct ppc_hw_breakpoint info;
+ unsigned long wp_addr;
+ char *name = "PPC_PTRACE_SETHWDEBUG, MODE_RANGE, DW UNALIGNED, DAR OUTSIDE";
+ int len;
+ int wh;
+
+ /* PPC_PTRACE_SETHWDEBUG, MODE_RANGE, DW UNALIGNED, DAR OUTSIDE, RW test */
+ wp_addr = (unsigned long)&gstruct.b;
+ len = B_LEN;
+ get_ppc_hw_breakpoint(&info, PPC_BREAKPOINT_TRIGGER_WRITE, wp_addr, len);
+ wh = ptrace_sethwdebug(child_pid, &info);
+ ptrace(PTRACE_CONT, child_pid, NULL, 0);
+ check_success(child_pid, name, "RW", wp_addr, len);
+ ptrace_delhwdebug(child_pid, wh);
+}
+
+static void test_sethwdebug_dawr_max_range(pid_t child_pid)
+{
+ struct ppc_hw_breakpoint info;
+ unsigned long wp_addr;
+ char *name = "PPC_PTRACE_SETHWDEBUG, DAWR_MAX_LEN";
+ int len;
+ int wh;
+
+ /* PPC_PTRACE_SETHWDEBUG, DAWR_MAX_LEN, RW test */
+ wp_addr = (unsigned long)big_var;
+ len = DAWR_MAX_LEN;
+ get_ppc_hw_breakpoint(&info, PPC_BREAKPOINT_TRIGGER_RW, wp_addr, len);
+ wh = ptrace_sethwdebug(child_pid, &info);
+ ptrace(PTRACE_CONT, child_pid, NULL, 0);
+ check_success(child_pid, name, "RW", wp_addr, len);
+ ptrace_delhwdebug(child_pid, wh);
+}
+
+/* Set the breakpoints and check the child successfully trigger them */
+static void
+run_tests(pid_t child_pid, struct ppc_debug_info *dbginfo, bool dawr)
+{
+ test_set_debugreg(child_pid);
+ if (dbginfo->features & PPC_DEBUG_FEATURE_DATA_BP_RANGE) {
+ test_sethwdebug_exact(child_pid);
+
+ if (!is_8xx)
+ test_sethwdebug_range_aligned(child_pid);
+ if (dawr && !is_8xx) {
+ test_sethwdebug_range_unaligned(child_pid);
+ test_sethwdebug_range_unaligned_dar(child_pid);
+ test_sethwdebug_dawr_max_range(child_pid);
+ }
+ }
}
static int ptrace_hwbreak(void)
{
- pid_t pid;
- int ret;
+ pid_t child_pid;
+ struct ppc_debug_info dbginfo;
bool dawr;
- pid = fork();
- if (!pid) {
- trigger_tests();
+ child_pid = fork();
+ if (!child_pid) {
+ test_workload();
return 0;
}
wait(NULL);
- child_pid = pid;
+ get_dbginfo(child_pid, &dbginfo);
+ SKIP_IF(dbginfo.num_data_bps == 0);
- get_dbginfo();
- SKIP_IF(!hwbreak_present());
- dawr = dawr_present();
-
- ret = launch_tests(dawr);
+ dawr = dawr_present(&dbginfo);
+ run_tests(child_pid, &dbginfo, dawr);
+ /* Let the child exit first. */
+ ptrace(PTRACE_CONT, child_pid, NULL, 0);
wait(NULL);
- return ret;
+ /*
+ * Testcases exits immediately with -1 on any failure. If
+ * it has reached here, it means all tests were successful.
+ */
+ return TEST_PASS;
}
int main(int argc, char **argv, char **envp)
{
+ int pvr = 0;
+ asm __volatile__ ("mfspr %0,%1" : "=r"(pvr) : "i"(SPRN_PVR));
+ if (pvr == PVR_8xx)
+ is_8xx = true;
+
return test_harness(ptrace_hwbreak, "ptrace-hwbreak");
}
diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-tar.c b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-tar.c
index 25e23e73c72e..2ecfa1158e2b 100644
--- a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-tar.c
+++ b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-tar.c
@@ -73,7 +73,7 @@ trans:
[sprn_texasr]"i"(SPRN_TEXASR), [tar_1]"i"(TAR_1),
[dscr_1]"i"(DSCR_1), [tar_2]"i"(TAR_2), [dscr_2]"i"(DSCR_2),
[tar_3]"i"(TAR_3), [dscr_3]"i"(DSCR_3)
- : "memory", "r0", "r1", "r3", "r4", "r5", "r6"
+ : "memory", "r0", "r3", "r4", "r5", "r6", "lr"
);
/* TM failed, analyse */
diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-vsx.c b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-vsx.c
index f603fe5a445b..6f7fb51f0809 100644
--- a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-vsx.c
+++ b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-vsx.c
@@ -74,8 +74,8 @@ trans:
"3: ;"
: [res] "=r" (result), [texasr] "=r" (texasr)
: [sprn_texasr] "i" (SPRN_TEXASR)
- : "memory", "r0", "r1", "r3", "r4",
- "r7", "r8", "r9", "r10", "r11"
+ : "memory", "r0", "r3", "r4",
+ "r7", "r8", "r9", "r10", "r11", "lr"
);
if (result) {
diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-tar.c b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-tar.c
index e0d37f07bdeb..46ef378a15ec 100644
--- a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-tar.c
+++ b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-tar.c
@@ -62,7 +62,7 @@ trans:
[sprn_ppr]"i"(SPRN_PPR), [sprn_texasr]"i"(SPRN_TEXASR),
[tar_1]"i"(TAR_1), [dscr_1]"i"(DSCR_1), [tar_2]"i"(TAR_2),
[dscr_2]"i"(DSCR_2), [cptr1] "b" (&cptr[1])
- : "memory", "r0", "r1", "r3", "r4", "r5", "r6"
+ : "memory", "r0", "r3", "r4", "r5", "r6"
);
/* TM failed, analyse */
diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-vsx.c b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-vsx.c
index 8027457b97b7..70ca01234f79 100644
--- a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-vsx.c
+++ b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-vsx.c
@@ -62,8 +62,8 @@ trans:
"3: ;"
: [res] "=r" (result), [texasr] "=r" (texasr)
: [sprn_texasr] "i" (SPRN_TEXASR), [cptr1] "b" (&cptr[1])
- : "memory", "r0", "r1", "r3", "r4",
- "r7", "r8", "r9", "r10", "r11"
+ : "memory", "r0", "r3", "r4",
+ "r7", "r8", "r9", "r10", "r11", "lr"
);
if (result) {
diff --git a/tools/testing/selftests/powerpc/security/Makefile b/tools/testing/selftests/powerpc/security/Makefile
index 85861c46b445..eadbbff50be6 100644
--- a/tools/testing/selftests/powerpc/security/Makefile
+++ b/tools/testing/selftests/powerpc/security/Makefile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0+
-TEST_GEN_PROGS := rfi_flush
+TEST_GEN_PROGS := rfi_flush spectre_v2
top_srcdir = ../../../../..
CFLAGS += -I../../../../../usr/include
@@ -8,3 +8,6 @@ CFLAGS += -I../../../../../usr/include
include ../../lib.mk
$(TEST_GEN_PROGS): ../harness.c ../utils.c
+
+$(OUTPUT)/spectre_v2: CFLAGS += -m64
+$(OUTPUT)/spectre_v2: ../pmu/event.c branch_loops.S
diff --git a/tools/testing/selftests/powerpc/security/branch_loops.S b/tools/testing/selftests/powerpc/security/branch_loops.S
new file mode 100644
index 000000000000..22e9204e3421
--- /dev/null
+++ b/tools/testing/selftests/powerpc/security/branch_loops.S
@@ -0,0 +1,82 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/*
+ * Copyright 2019, Michael Ellerman, IBM Corp.
+ */
+
+#include <ppc-asm.h>
+
+ .data
+
+jump_table:
+ .long 0x0
+ .long (.Lstate_1 - .Lstate_0)
+ .long (.Lstate_2 - .Lstate_0)
+ .long (.Lstate_3 - .Lstate_0)
+ .long (.Lstate_4 - .Lstate_0)
+ .long (.Lstate_5 - .Lstate_0)
+ .long (.Lstate_6 - .Lstate_0)
+ .long (.Lstate_7 - .Lstate_0)
+
+ .text
+
+#define ITER_SHIFT 31
+
+.macro state number
+ .balign 32
+.Lstate_\number:
+ .if \number==7
+ li r3, 0
+ .else
+ li r3, \number+1
+ .endif
+ b .Lloop
+.endm
+
+FUNC_START(pattern_cache_loop)
+ li r3, 0
+ li r4, 1
+ sldi r4, r4, ITER_SHIFT
+
+.Lloop: cmpdi r4, 0
+ beqlr
+
+ addi r4, r4, -1
+
+ ld r6, jump_table@got(%r2)
+ sldi r5, r3, 2
+ lwax r6, r5, r6
+ ld r7, .Lstate_0@got(%r2)
+ add r6, r6, r7
+ mtctr r6
+ bctr
+
+ state 0
+ state 1
+ state 2
+ state 3
+ state 4
+ state 5
+ state 6
+ state 7
+
+FUNC_END(pattern_cache_loop)
+
+
+FUNC_START(indirect_branch_loop)
+ li r3, 1
+ sldi r3, r3, ITER_SHIFT
+
+1: cmpdi r3, 0
+ beqlr
+
+ addi r3, r3, -1
+
+ ld r4, 2f@got(%r2)
+ mtctr r4
+ bctr
+
+ .balign 32
+2: b 1b
+
+FUNC_END(indirect_branch_loop)
diff --git a/tools/testing/selftests/powerpc/security/spectre_v2.c b/tools/testing/selftests/powerpc/security/spectre_v2.c
new file mode 100644
index 000000000000..8c6b982af2a8
--- /dev/null
+++ b/tools/testing/selftests/powerpc/security/spectre_v2.c
@@ -0,0 +1,218 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/*
+ * Copyright 2018-2019 IBM Corporation.
+ */
+
+#define __SANE_USERSPACE_TYPES__
+
+#include <sys/types.h>
+#include <stdint.h>
+#include <malloc.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdio.h>
+#include <sys/prctl.h>
+#include "utils.h"
+
+#include "../pmu/event.h"
+
+
+extern void pattern_cache_loop(void);
+extern void indirect_branch_loop(void);
+
+static int do_count_loop(struct event *events, bool is_p9, s64 *miss_percent)
+{
+ u64 pred, mpred;
+
+ prctl(PR_TASK_PERF_EVENTS_ENABLE);
+
+ if (is_p9)
+ pattern_cache_loop();
+ else
+ indirect_branch_loop();
+
+ prctl(PR_TASK_PERF_EVENTS_DISABLE);
+
+ event_read(&events[0]);
+ event_read(&events[1]);
+
+ // We could scale all the events by running/enabled but we're lazy
+ // As long as the PMU is uncontended they should all run
+ FAIL_IF(events[0].result.running != events[0].result.enabled);
+ FAIL_IF(events[1].result.running != events[1].result.enabled);
+
+ pred = events[0].result.value;
+ mpred = events[1].result.value;
+
+ if (is_p9) {
+ event_read(&events[2]);
+ event_read(&events[3]);
+ FAIL_IF(events[2].result.running != events[2].result.enabled);
+ FAIL_IF(events[3].result.running != events[3].result.enabled);
+
+ pred += events[2].result.value;
+ mpred += events[3].result.value;
+ }
+
+ *miss_percent = 100 * mpred / pred;
+
+ return 0;
+}
+
+static void setup_event(struct event *e, u64 config, char *name)
+{
+ event_init_named(e, config, name);
+
+ e->attr.disabled = 1;
+ e->attr.exclude_kernel = 1;
+ e->attr.exclude_hv = 1;
+ e->attr.exclude_idle = 1;
+}
+
+enum spectre_v2_state {
+ VULNERABLE = 0,
+ UNKNOWN = 1, // Works with FAIL_IF()
+ NOT_AFFECTED,
+ BRANCH_SERIALISATION,
+ COUNT_CACHE_DISABLED,
+ COUNT_CACHE_FLUSH_SW,
+ COUNT_CACHE_FLUSH_HW,
+ BTB_FLUSH,
+};
+
+static enum spectre_v2_state get_sysfs_state(void)
+{
+ enum spectre_v2_state state = UNKNOWN;
+ char buf[256];
+ int len;
+
+ memset(buf, 0, sizeof(buf));
+ FAIL_IF(read_sysfs_file("devices/system/cpu/vulnerabilities/spectre_v2", buf, sizeof(buf)));
+
+ // Make sure it's NULL terminated
+ buf[sizeof(buf) - 1] = '\0';
+
+ // Trim the trailing newline
+ len = strlen(buf);
+ FAIL_IF(len < 1);
+ buf[len - 1] = '\0';
+
+ printf("sysfs reports: '%s'\n", buf);
+
+ // Order matters
+ if (strstr(buf, "Vulnerable"))
+ state = VULNERABLE;
+ else if (strstr(buf, "Not affected"))
+ state = NOT_AFFECTED;
+ else if (strstr(buf, "Indirect branch serialisation (kernel only)"))
+ state = BRANCH_SERIALISATION;
+ else if (strstr(buf, "Indirect branch cache disabled"))
+ state = COUNT_CACHE_DISABLED;
+ else if (strstr(buf, "Software count cache flush (hardware accelerated)"))
+ state = COUNT_CACHE_FLUSH_HW;
+ else if (strstr(buf, "Software count cache flush"))
+ state = COUNT_CACHE_FLUSH_SW;
+ else if (strstr(buf, "Branch predictor state flush"))
+ state = BTB_FLUSH;
+
+ return state;
+}
+
+#define PM_BR_PRED_CCACHE 0x040a4 // P8 + P9
+#define PM_BR_MPRED_CCACHE 0x040ac // P8 + P9
+#define PM_BR_PRED_PCACHE 0x048a0 // P9 only
+#define PM_BR_MPRED_PCACHE 0x048b0 // P9 only
+
+#define SPRN_PVR 287
+
+int spectre_v2_test(void)
+{
+ enum spectre_v2_state state;
+ struct event events[4];
+ s64 miss_percent;
+ bool is_p9;
+
+ state = get_sysfs_state();
+ if (state == UNKNOWN) {
+ printf("Error: couldn't determine spectre_v2 mitigation state?\n");
+ return -1;
+ }
+
+ memset(events, 0, sizeof(events));
+
+ setup_event(&events[0], PM_BR_PRED_CCACHE, "PM_BR_PRED_CCACHE");
+ setup_event(&events[1], PM_BR_MPRED_CCACHE, "PM_BR_MPRED_CCACHE");
+ FAIL_IF(event_open(&events[0]));
+ FAIL_IF(event_open_with_group(&events[1], events[0].fd) == -1);
+
+ is_p9 = ((mfspr(SPRN_PVR) >> 16) & 0xFFFF) == 0x4e;
+
+ if (is_p9) {
+ // Count pattern cache too
+ setup_event(&events[2], PM_BR_PRED_PCACHE, "PM_BR_PRED_PCACHE");
+ setup_event(&events[3], PM_BR_MPRED_PCACHE, "PM_BR_MPRED_PCACHE");
+
+ FAIL_IF(event_open_with_group(&events[2], events[0].fd) == -1);
+ FAIL_IF(event_open_with_group(&events[3], events[0].fd) == -1);
+ }
+
+ FAIL_IF(do_count_loop(events, is_p9, &miss_percent));
+
+ event_report_justified(&events[0], 18, 10);
+ event_report_justified(&events[1], 18, 10);
+ event_close(&events[0]);
+ event_close(&events[1]);
+
+ if (is_p9) {
+ event_report_justified(&events[2], 18, 10);
+ event_report_justified(&events[3], 18, 10);
+ event_close(&events[2]);
+ event_close(&events[3]);
+ }
+
+ printf("Miss percent %lld %%\n", miss_percent);
+
+ switch (state) {
+ case VULNERABLE:
+ case NOT_AFFECTED:
+ case COUNT_CACHE_FLUSH_SW:
+ case COUNT_CACHE_FLUSH_HW:
+ // These should all not affect userspace branch prediction
+ if (miss_percent > 15) {
+ printf("Branch misses > 15%% unexpected in this configuration!\n");
+ printf("Possible mis-match between reported & actual mitigation\n");
+ return 1;
+ }
+ break;
+ case BRANCH_SERIALISATION:
+ // This seems to affect userspace branch prediction a bit?
+ if (miss_percent > 25) {
+ printf("Branch misses > 25%% unexpected in this configuration!\n");
+ printf("Possible mis-match between reported & actual mitigation\n");
+ return 1;
+ }
+ break;
+ case COUNT_CACHE_DISABLED:
+ if (miss_percent < 95) {
+ printf("Branch misses < 20%% unexpected in this configuration!\n");
+ printf("Possible mis-match between reported & actual mitigation\n");
+ return 1;
+ }
+ break;
+ case UNKNOWN:
+ case BTB_FLUSH:
+ printf("Not sure!\n");
+ return 1;
+ }
+
+ printf("OK - Measured branch prediction rates match reported spectre v2 mitigation.\n");
+
+ return 0;
+}
+
+int main(int argc, char *argv[])
+{
+ return test_harness(spectre_v2_test, "spectre_v2");
+}
diff --git a/tools/testing/selftests/powerpc/signal/sigfuz.c b/tools/testing/selftests/powerpc/signal/sigfuz.c
index dade00c698c2..08f9afe3b95c 100644
--- a/tools/testing/selftests/powerpc/signal/sigfuz.c
+++ b/tools/testing/selftests/powerpc/signal/sigfuz.c
@@ -42,7 +42,7 @@
#include "utils.h"
/* Selftest defaults */
-#define COUNT_MAX 4000 /* Number of interactions */
+#define COUNT_MAX 600 /* Number of interactions */
#define THREADS 16 /* Number of threads */
/* Arguments options */
diff --git a/tools/testing/selftests/powerpc/tm/tm-signal-sigreturn-nt.c b/tools/testing/selftests/powerpc/tm/tm-signal-sigreturn-nt.c
index 56fbf9f6bbf3..07c388147b75 100644
--- a/tools/testing/selftests/powerpc/tm/tm-signal-sigreturn-nt.c
+++ b/tools/testing/selftests/powerpc/tm/tm-signal-sigreturn-nt.c
@@ -10,10 +10,12 @@
*/
#define _GNU_SOURCE
+#include <stdio.h>
#include <stdlib.h>
#include <signal.h>
#include "utils.h"
+#include "tm.h"
void trap_signal_handler(int signo, siginfo_t *si, void *uc)
{
@@ -29,6 +31,8 @@ int tm_signal_sigreturn_nt(void)
{
struct sigaction trap_sa;
+ SKIP_IF(!have_htm());
+
trap_sa.sa_flags = SA_SIGINFO;
trap_sa.sa_sigaction = trap_signal_handler;
diff --git a/tools/testing/selftests/powerpc/utils.c b/tools/testing/selftests/powerpc/utils.c
index c02d24835db4..5ee0e98c4896 100644
--- a/tools/testing/selftests/powerpc/utils.c
+++ b/tools/testing/selftests/powerpc/utils.c
@@ -127,6 +127,26 @@ bool is_ppc64le(void)
return strcmp(uts.machine, "ppc64le") == 0;
}
+int read_sysfs_file(char *fpath, char *result, size_t result_size)
+{
+ char path[PATH_MAX] = "/sys/";
+ int rc = -1, fd;
+
+ strncat(path, fpath, PATH_MAX - strlen(path) - 1);
+
+ if ((fd = open(path, O_RDONLY)) < 0)
+ return rc;
+
+ rc = read(fd, result, result_size);
+
+ close(fd);
+
+ if (rc < 0)
+ return rc;
+
+ return 0;
+}
+
int read_debugfs_file(char *debugfs_file, int *result)
{
int rc = -1, fd;
diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c
index 7f8b5c8982e3..6944b898bb53 100644
--- a/tools/testing/selftests/seccomp/seccomp_bpf.c
+++ b/tools/testing/selftests/seccomp/seccomp_bpf.c
@@ -35,6 +35,7 @@
#include <stdbool.h>
#include <string.h>
#include <time.h>
+#include <limits.h>
#include <linux/elf.h>
#include <sys/uio.h>
#include <sys/utsname.h>
@@ -43,6 +44,7 @@
#include <sys/times.h>
#include <sys/socket.h>
#include <sys/ioctl.h>
+#include <linux/kcmp.h>
#include <unistd.h>
#include <sys/syscall.h>
@@ -112,6 +114,8 @@ struct seccomp_data {
# define __NR_seccomp 383
# elif defined(__aarch64__)
# define __NR_seccomp 277
+# elif defined(__riscv)
+# define __NR_seccomp 277
# elif defined(__hppa__)
# define __NR_seccomp 338
# elif defined(__powerpc__)
@@ -204,6 +208,10 @@ struct seccomp_notif_sizes {
#define PTRACE_EVENTMSG_SYSCALL_EXIT 2
#endif
+#ifndef SECCOMP_USER_NOTIF_FLAG_CONTINUE
+#define SECCOMP_USER_NOTIF_FLAG_CONTINUE 0x00000001
+#endif
+
#ifndef seccomp
int seccomp(unsigned int op, unsigned int flags, void *args)
{
@@ -1587,6 +1595,10 @@ TEST_F(TRACE_poke, getpid_runs_normally)
# define ARCH_REGS struct user_pt_regs
# define SYSCALL_NUM regs[8]
# define SYSCALL_RET regs[0]
+#elif defined(__riscv) && __riscv_xlen == 64
+# define ARCH_REGS struct user_regs_struct
+# define SYSCALL_NUM a7
+# define SYSCALL_RET a0
#elif defined(__hppa__)
# define ARCH_REGS struct user_regs_struct
# define SYSCALL_NUM gr[20]
@@ -1676,7 +1688,7 @@ void change_syscall(struct __test_metadata *_metadata,
EXPECT_EQ(0, ret) {}
#if defined(__x86_64__) || defined(__i386__) || defined(__powerpc__) || \
- defined(__s390__) || defined(__hppa__)
+ defined(__s390__) || defined(__hppa__) || defined(__riscv)
{
regs.SYSCALL_NUM = syscall;
}
@@ -3077,7 +3089,7 @@ static int user_trap_syscall(int nr, unsigned int flags)
return seccomp(SECCOMP_SET_MODE_FILTER, flags, &prog);
}
-#define USER_NOTIF_MAGIC 116983961184613L
+#define USER_NOTIF_MAGIC INT_MAX
TEST(user_notification_basic)
{
pid_t pid;
@@ -3485,6 +3497,108 @@ TEST(seccomp_get_notif_sizes)
EXPECT_EQ(sizes.seccomp_notif_resp, sizeof(struct seccomp_notif_resp));
}
+static int filecmp(pid_t pid1, pid_t pid2, int fd1, int fd2)
+{
+#ifdef __NR_kcmp
+ return syscall(__NR_kcmp, pid1, pid2, KCMP_FILE, fd1, fd2);
+#else
+ errno = ENOSYS;
+ return -1;
+#endif
+}
+
+TEST(user_notification_continue)
+{
+ pid_t pid;
+ long ret;
+ int status, listener;
+ struct seccomp_notif req = {};
+ struct seccomp_notif_resp resp = {};
+ struct pollfd pollfd;
+
+ ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
+ ASSERT_EQ(0, ret) {
+ TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
+ }
+
+ listener = user_trap_syscall(__NR_dup, SECCOMP_FILTER_FLAG_NEW_LISTENER);
+ ASSERT_GE(listener, 0);
+
+ pid = fork();
+ ASSERT_GE(pid, 0);
+
+ if (pid == 0) {
+ int dup_fd, pipe_fds[2];
+ pid_t self;
+
+ ret = pipe(pipe_fds);
+ if (ret < 0)
+ exit(1);
+
+ dup_fd = dup(pipe_fds[0]);
+ if (dup_fd < 0)
+ exit(1);
+
+ self = getpid();
+
+ ret = filecmp(self, self, pipe_fds[0], dup_fd);
+ if (ret)
+ exit(2);
+
+ exit(0);
+ }
+
+ pollfd.fd = listener;
+ pollfd.events = POLLIN | POLLOUT;
+
+ EXPECT_GT(poll(&pollfd, 1, -1), 0);
+ EXPECT_EQ(pollfd.revents, POLLIN);
+
+ EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0);
+
+ pollfd.fd = listener;
+ pollfd.events = POLLIN | POLLOUT;
+
+ EXPECT_GT(poll(&pollfd, 1, -1), 0);
+ EXPECT_EQ(pollfd.revents, POLLOUT);
+
+ EXPECT_EQ(req.data.nr, __NR_dup);
+
+ resp.id = req.id;
+ resp.flags = SECCOMP_USER_NOTIF_FLAG_CONTINUE;
+
+ /*
+ * Verify that setting SECCOMP_USER_NOTIF_FLAG_CONTINUE enforces other
+ * args be set to 0.
+ */
+ resp.error = 0;
+ resp.val = USER_NOTIF_MAGIC;
+ EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp), -1);
+ EXPECT_EQ(errno, EINVAL);
+
+ resp.error = USER_NOTIF_MAGIC;
+ resp.val = 0;
+ EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp), -1);
+ EXPECT_EQ(errno, EINVAL);
+
+ resp.error = 0;
+ resp.val = 0;
+ EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp), 0) {
+ if (errno == EINVAL)
+ XFAIL(goto skip, "Kernel does not support SECCOMP_USER_NOTIF_FLAG_CONTINUE");
+ }
+
+skip:
+ EXPECT_EQ(waitpid(pid, &status, 0), pid);
+ EXPECT_EQ(true, WIFEXITED(status));
+ EXPECT_EQ(0, WEXITSTATUS(status)) {
+ if (WEXITSTATUS(status) == 2) {
+ XFAIL(return, "Kernel does not support kcmp() syscall");
+ return;
+ }
+ }
+}
+
/*
* TODO:
* - add microbenchmarks
diff --git a/tools/testing/selftests/vm/config b/tools/testing/selftests/vm/config
index 1c0d76cb5adf..93b90a9b1eeb 100644
--- a/tools/testing/selftests/vm/config
+++ b/tools/testing/selftests/vm/config
@@ -1,2 +1,3 @@
CONFIG_SYSVIPC=y
CONFIG_USERFAULTFD=y
+CONFIG_TEST_VMALLOC=m
diff --git a/tools/testing/selftests/x86/single_step_syscall.c b/tools/testing/selftests/x86/single_step_syscall.c
index 50ce6c3dd904..1063328e275c 100644
--- a/tools/testing/selftests/x86/single_step_syscall.c
+++ b/tools/testing/selftests/x86/single_step_syscall.c
@@ -43,7 +43,19 @@ static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *),
err(1, "sigaction");
}
-static volatile sig_atomic_t sig_traps;
+static void clearhandler(int sig)
+{
+ struct sigaction sa;
+ memset(&sa, 0, sizeof(sa));
+ sa.sa_handler = SIG_DFL;
+ sigemptyset(&sa.sa_mask);
+ if (sigaction(sig, &sa, 0))
+ err(1, "sigaction");
+}
+
+static volatile sig_atomic_t sig_traps, sig_eflags;
+sigjmp_buf jmpbuf;
+static unsigned char altstack_data[SIGSTKSZ];
#ifdef __x86_64__
# define REG_IP REG_RIP
@@ -90,6 +102,25 @@ static void sigtrap(int sig, siginfo_t *info, void *ctx_void)
}
}
+static char const * const signames[] = {
+ [SIGSEGV] = "SIGSEGV",
+ [SIGBUS] = "SIBGUS",
+ [SIGTRAP] = "SIGTRAP",
+ [SIGILL] = "SIGILL",
+};
+
+static void print_and_longjmp(int sig, siginfo_t *si, void *ctx_void)
+{
+ ucontext_t *ctx = ctx_void;
+
+ printf("\tGot %s with RIP=%lx, TF=%ld\n", signames[sig],
+ (unsigned long)ctx->uc_mcontext.gregs[REG_IP],
+ (unsigned long)ctx->uc_mcontext.gregs[REG_EFL] & X86_EFLAGS_TF);
+
+ sig_eflags = (unsigned long)ctx->uc_mcontext.gregs[REG_EFL];
+ siglongjmp(jmpbuf, 1);
+}
+
static void check_result(void)
{
unsigned long new_eflags = get_eflags();
@@ -109,6 +140,22 @@ static void check_result(void)
sig_traps = 0;
}
+static void fast_syscall_no_tf(void)
+{
+ sig_traps = 0;
+ printf("[RUN]\tFast syscall with TF cleared\n");
+ fflush(stdout); /* Force a syscall */
+ if (get_eflags() & X86_EFLAGS_TF) {
+ printf("[FAIL]\tTF is now set\n");
+ exit(1);
+ }
+ if (sig_traps) {
+ printf("[FAIL]\tGot SIGTRAP\n");
+ exit(1);
+ }
+ printf("[OK]\tNothing unexpected happened\n");
+}
+
int main()
{
#ifdef CAN_BUILD_32
@@ -163,17 +210,46 @@ int main()
check_result();
/* Now make sure that another fast syscall doesn't set TF again. */
- printf("[RUN]\tFast syscall with TF cleared\n");
- fflush(stdout); /* Force a syscall */
- if (get_eflags() & X86_EFLAGS_TF) {
- printf("[FAIL]\tTF is now set\n");
- exit(1);
+ fast_syscall_no_tf();
+
+ /*
+ * And do a forced SYSENTER to make sure that this works even if
+ * fast syscalls don't use SYSENTER.
+ *
+ * Invoking SYSENTER directly breaks all the rules. Just handle
+ * the SIGSEGV.
+ */
+ if (sigsetjmp(jmpbuf, 1) == 0) {
+ unsigned long nr = SYS_getpid;
+ printf("[RUN]\tSet TF and check SYSENTER\n");
+ stack_t stack = {
+ .ss_sp = altstack_data,
+ .ss_size = SIGSTKSZ,
+ };
+ if (sigaltstack(&stack, NULL) != 0)
+ err(1, "sigaltstack");
+ sethandler(SIGSEGV, print_and_longjmp,
+ SA_RESETHAND | SA_ONSTACK);
+ sethandler(SIGILL, print_and_longjmp, SA_RESETHAND);
+ set_eflags(get_eflags() | X86_EFLAGS_TF);
+ /* Clear EBP first to make sure we segfault cleanly. */
+ asm volatile ("xorl %%ebp, %%ebp; SYSENTER" : "+a" (nr) :: "flags", "rcx"
+#ifdef __x86_64__
+ , "r11"
+#endif
+ );
+
+ /* We're unreachable here. SYSENTER forgets RIP. */
}
- if (sig_traps) {
- printf("[FAIL]\tGot SIGTRAP\n");
+ clearhandler(SIGSEGV);
+ clearhandler(SIGILL);
+ if (!(sig_eflags & X86_EFLAGS_TF)) {
+ printf("[FAIL]\tTF was cleared\n");
exit(1);
}
- printf("[OK]\tNothing unexpected happened\n");
+
+ /* Now make sure that another fast syscall doesn't set TF again. */
+ fast_syscall_no_tf();
return 0;
}
diff --git a/tools/usb/usbip/libsrc/usbip_host_common.c b/tools/usb/usbip/libsrc/usbip_host_common.c
index 2813aa821c82..d1d8ba2a4a40 100644
--- a/tools/usb/usbip/libsrc/usbip_host_common.c
+++ b/tools/usb/usbip/libsrc/usbip_host_common.c
@@ -57,7 +57,7 @@ static int32_t read_attr_usbip_status(struct usbip_usb_device *udev)
}
value = atoi(status);
-
+ close(fd);
return value;
}
diff --git a/usr/include/Makefile b/usr/include/Makefile
index 57b20f7b6729..24543a30b9f0 100644
--- a/usr/include/Makefile
+++ b/usr/include/Makefile
@@ -26,8 +26,6 @@ header-test- += drm/vmwgfx_drm.h
header-test- += linux/am437x-vpfe.h
header-test- += linux/android/binder.h
header-test- += linux/android/binderfs.h
-header-test-$(CONFIG_CPU_BIG_ENDIAN) += linux/byteorder/big_endian.h
-header-test-$(CONFIG_CPU_LITTLE_ENDIAN) += linux/byteorder/little_endian.h
header-test- += linux/coda.h
header-test- += linux/elfcore.h
header-test- += linux/errqueue.h
@@ -36,8 +34,6 @@ header-test- += linux/hdlc/ioctl.h
header-test- += linux/ivtv.h
header-test- += linux/kexec.h
header-test- += linux/matroxfb.h
-header-test- += linux/netfilter_ipv4/ipt_LOG.h
-header-test- += linux/netfilter_ipv6/ip6t_LOG.h
header-test- += linux/nfc.h
header-test- += linux/omap3isp.h
header-test- += linux/omapfb.h
@@ -99,9 +95,16 @@ endif
# asm-generic/*.h is used by asm/*.h, and should not be included directly
header-test- += asm-generic/%
-# The rest are compile-tested
-header-test-y += $(filter-out $(header-test-), \
- $(patsubst $(obj)/%,%, $(wildcard \
- $(addprefix $(obj)/, *.h */*.h */*/*.h */*/*/*.h))))
+extra-y := $(patsubst $(obj)/%.h,%.hdrtest, $(shell find $(obj) -name '*.h'))
+
+quiet_cmd_hdrtest = HDRTEST $<
+ cmd_hdrtest = \
+ $(CC) $(c_flags) -S -o /dev/null -x c /dev/null \
+ $(if $(filter-out $(header-test-), $*.h), -include $<); \
+ $(PERL) $(srctree)/scripts/headers_check.pl $(obj) $(SRCARCH) $<; \
+ touch $@
+
+$(obj)/%.hdrtest: $(obj)/%.h FORCE
+ $(call if_changed_dep,hdrtest)
clean-files += $(filter-out Makefile, $(notdir $(wildcard $(obj)/*)))